summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.xz
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c940
-rw-r--r--drivers/net/3c501.h93
-rw-r--r--drivers/net/3c503.c747
-rw-r--r--drivers/net/3c503.h91
-rw-r--r--drivers/net/3c505.c1690
-rw-r--r--drivers/net/3c505.h293
-rw-r--r--drivers/net/3c507.c965
-rw-r--r--drivers/net/3c509.c1622
-rw-r--r--drivers/net/3c515.c1594
-rw-r--r--drivers/net/3c523.c1323
-rw-r--r--drivers/net/3c523.h355
-rw-r--r--drivers/net/3c527.c1675
-rw-r--r--drivers/net/3c527.h81
-rw-r--r--drivers/net/3c59x.c3365
-rw-r--r--drivers/net/7990.c681
-rw-r--r--drivers/net/7990.h256
-rw-r--r--drivers/net/8139cp.c1904
-rw-r--r--drivers/net/8139too.c2666
-rw-r--r--drivers/net/82596.c1618
-rw-r--r--drivers/net/8390.c1130
-rw-r--r--drivers/net/8390.h214
-rw-r--r--drivers/net/Kconfig2538
-rw-r--r--drivers/net/LICENSE.SRC15
-rw-r--r--drivers/net/Makefile196
-rw-r--r--drivers/net/Space.c412
-rw-r--r--drivers/net/a2065.c843
-rw-r--r--drivers/net/a2065.h173
-rw-r--r--drivers/net/ac3200.c424
-rw-r--r--drivers/net/acenic.c3271
-rw-r--r--drivers/net/acenic.h794
-rw-r--r--drivers/net/acenic_firmware.h9457
-rwxr-xr-xdrivers/net/amd8111e.c2167
-rwxr-xr-xdrivers/net/amd8111e.h823
-rw-r--r--drivers/net/apne.c637
-rw-r--r--drivers/net/appletalk/Kconfig98
-rw-r--r--drivers/net/appletalk/Makefile7
-rw-r--r--drivers/net/appletalk/cops.c1059
-rw-r--r--drivers/net/appletalk/cops.h60
-rw-r--r--drivers/net/appletalk/cops_ffdrv.h533
-rw-r--r--drivers/net/appletalk/cops_ltdrv.h242
-rw-r--r--drivers/net/appletalk/ipddp.c317
-rw-r--r--drivers/net/appletalk/ipddp.h27
-rw-r--r--drivers/net/appletalk/ltpc.c1313
-rw-r--r--drivers/net/appletalk/ltpc.h73
-rw-r--r--drivers/net/arcnet/Kconfig140
-rw-r--r--drivers/net/arcnet/Makefile14
-rw-r--r--drivers/net/arcnet/arc-rawmode.c204
-rw-r--r--drivers/net/arcnet/arc-rimi.c368
-rw-r--r--drivers/net/arcnet/arcnet.c1102
-rw-r--r--drivers/net/arcnet/capmode.c296
-rw-r--r--drivers/net/arcnet/com20020-isa.c219
-rw-r--r--drivers/net/arcnet/com20020-pci.c189
-rw-r--r--drivers/net/arcnet/com20020.c357
-rw-r--r--drivers/net/arcnet/com90io.c435
-rw-r--r--drivers/net/arcnet/com90xx.c646
-rw-r--r--drivers/net/arcnet/rfc1051.c253
-rw-r--r--drivers/net/arcnet/rfc1201.c549
-rw-r--r--drivers/net/ariadne.c878
-rw-r--r--drivers/net/ariadne.h415
-rw-r--r--drivers/net/arm/Kconfig46
-rw-r--r--drivers/net/arm/Makefile10
-rw-r--r--drivers/net/arm/am79c961a.c750
-rw-r--r--drivers/net/arm/am79c961a.h148
-rw-r--r--drivers/net/arm/ether00.c1017
-rw-r--r--drivers/net/arm/ether1.c1110
-rw-r--r--drivers/net/arm/ether1.h281
-rw-r--r--drivers/net/arm/ether3.c936
-rw-r--r--drivers/net/arm/ether3.h177
-rw-r--r--drivers/net/arm/etherh.c862
-rw-r--r--drivers/net/at1700.c939
-rw-r--r--drivers/net/atari_bionet.c674
-rw-r--r--drivers/net/atari_pamsnet.c895
-rw-r--r--drivers/net/atarilance.c1206
-rw-r--r--drivers/net/atp.c952
-rw-r--r--drivers/net/atp.h259
-rw-r--r--drivers/net/au1000_eth.c2273
-rw-r--r--drivers/net/au1000_eth.h236
-rw-r--r--drivers/net/b44.c1978
-rw-r--r--drivers/net/b44.h427
-rw-r--r--drivers/net/bmac.c1708
-rw-r--r--drivers/net/bmac.h164
-rw-r--r--drivers/net/bonding/Makefile8
-rw-r--r--drivers/net/bonding/bond_3ad.c2451
-rw-r--r--drivers/net/bonding/bond_3ad.h300
-rw-r--r--drivers/net/bonding/bond_alb.c1696
-rw-r--r--drivers/net/bonding/bond_alb.h141
-rw-r--r--drivers/net/bonding/bond_main.c4708
-rw-r--r--drivers/net/bonding/bonding.h252
-rw-r--r--drivers/net/bsd_comp.c1179
-rw-r--r--drivers/net/cris/Makefile1
-rw-r--r--drivers/net/cris/eth_v10.c1836
-rw-r--r--drivers/net/cs89x0.c1866
-rw-r--r--drivers/net/cs89x0.h476
-rw-r--r--drivers/net/de600.c561
-rw-r--r--drivers/net/de600.h169
-rw-r--r--drivers/net/de620.c1047
-rw-r--r--drivers/net/de620.h117
-rw-r--r--drivers/net/declance.c1320
-rw-r--r--drivers/net/defxx.c3463
-rw-r--r--drivers/net/defxx.h1778
-rw-r--r--drivers/net/depca.c2122
-rw-r--r--drivers/net/depca.h185
-rw-r--r--drivers/net/dgrs.c1617
-rw-r--r--drivers/net/dgrs.h38
-rw-r--r--drivers/net/dgrs_asstruct.h37
-rw-r--r--drivers/net/dgrs_bcomm.h148
-rw-r--r--drivers/net/dgrs_es4h.h183
-rw-r--r--drivers/net/dgrs_ether.h135
-rw-r--r--drivers/net/dgrs_firmware.c9966
-rw-r--r--drivers/net/dgrs_i82596.h473
-rw-r--r--drivers/net/dgrs_plx9060.h175
-rw-r--r--drivers/net/dl2k.c1872
-rw-r--r--drivers/net/dl2k.h711
-rw-r--r--drivers/net/dummy.c152
-rw-r--r--drivers/net/e100.c2374
-rw-r--r--drivers/net/e1000/LICENSE339
-rw-r--r--drivers/net/e1000/Makefile35
-rw-r--r--drivers/net/e1000/e1000.h261
-rw-r--r--drivers/net/e1000/e1000_ethtool.c1673
-rw-r--r--drivers/net/e1000/e1000_hw.c5405
-rw-r--r--drivers/net/e1000/e1000_hw.h2144
-rw-r--r--drivers/net/e1000/e1000_main.c3162
-rw-r--r--drivers/net/e1000/e1000_osdep.h101
-rw-r--r--drivers/net/e1000/e1000_param.c744
-rw-r--r--drivers/net/e2100.c485
-rw-r--r--drivers/net/eepro.c1865
-rw-r--r--drivers/net/eepro100.c2412
-rw-r--r--drivers/net/eexpress.c1752
-rw-r--r--drivers/net/eexpress.h179
-rw-r--r--drivers/net/epic100.c1687
-rw-r--r--drivers/net/eql.c613
-rw-r--r--drivers/net/es3210.c478
-rw-r--r--drivers/net/eth16i.c1509
-rw-r--r--drivers/net/ewrk3.c2007
-rw-r--r--drivers/net/ewrk3.h322
-rw-r--r--drivers/net/fealnx.c2005
-rw-r--r--drivers/net/fec.c2259
-rw-r--r--drivers/net/fec.h164
-rw-r--r--drivers/net/fec_8xx/Kconfig14
-rw-r--r--drivers/net/fec_8xx/Makefile12
-rw-r--r--drivers/net/fec_8xx/fec_8xx-netta.c153
-rw-r--r--drivers/net/fec_8xx/fec_8xx.h218
-rw-r--r--drivers/net/fec_8xx/fec_main.c1275
-rw-r--r--drivers/net/fec_8xx/fec_mii.c380
-rw-r--r--drivers/net/fmv18x.c689
-rw-r--r--drivers/net/forcedeth.c2232
-rw-r--r--drivers/net/gianfar.c1849
-rw-r--r--drivers/net/gianfar.h538
-rw-r--r--drivers/net/gianfar_ethtool.c527
-rw-r--r--drivers/net/gianfar_phy.c661
-rw-r--r--drivers/net/gianfar_phy.h213
-rw-r--r--drivers/net/gt64240eth.h402
-rw-r--r--drivers/net/gt96100eth.c1569
-rw-r--r--drivers/net/gt96100eth.h349
-rw-r--r--drivers/net/hamachi.c2024
-rw-r--r--drivers/net/hamradio/6pack.c1051
-rw-r--r--drivers/net/hamradio/Kconfig191
-rw-r--r--drivers/net/hamradio/Makefile22
-rw-r--r--drivers/net/hamradio/baycom_epp.c1382
-rw-r--r--drivers/net/hamradio/baycom_par.c576
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c704
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c740
-rw-r--r--drivers/net/hamradio/bpqether.c643
-rw-r--r--drivers/net/hamradio/dmascc.c1493
-rw-r--r--drivers/net/hamradio/hdlcdrv.c817
-rw-r--r--drivers/net/hamradio/mkiss.c951
-rw-r--r--drivers/net/hamradio/mkiss.h62
-rw-r--r--drivers/net/hamradio/scc.c2191
-rw-r--r--drivers/net/hamradio/yam.c1218
-rw-r--r--drivers/net/hamradio/yam1200.h343
-rw-r--r--drivers/net/hamradio/yam9600.h343
-rw-r--r--drivers/net/hamradio/z8530.h245
-rw-r--r--drivers/net/hp-plus.c495
-rw-r--r--drivers/net/hp.c464
-rw-r--r--drivers/net/hp100.c3115
-rw-r--r--drivers/net/hp100.h615
-rw-r--r--drivers/net/hplance.c231
-rw-r--r--drivers/net/hplance.h26
-rw-r--r--drivers/net/hydra.c256
-rw-r--r--drivers/net/hydra.h177
-rw-r--r--drivers/net/ibm_emac/Makefile12
-rw-r--r--drivers/net/ibm_emac/ibm_emac.h267
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2012
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.h146
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.c224
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.c463
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h131
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.c298
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.h137
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.h65
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.h48
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.h93
-rw-r--r--drivers/net/ibmlana.c1080
-rw-r--r--drivers/net/ibmlana.h279
-rw-r--r--drivers/net/ibmveth.c1175
-rw-r--r--drivers/net/ibmveth.h158
-rw-r--r--drivers/net/ioc3-eth.c1653
-rw-r--r--drivers/net/irda/Kconfig404
-rw-r--r--drivers/net/irda/Makefile47
-rw-r--r--drivers/net/irda/act200l-sir.c257
-rw-r--r--drivers/net/irda/act200l.c297
-rw-r--r--drivers/net/irda/actisys-sir.c246
-rw-r--r--drivers/net/irda/actisys.c288
-rw-r--r--drivers/net/irda/ali-ircc.c2277
-rw-r--r--drivers/net/irda/ali-ircc.h231
-rw-r--r--drivers/net/irda/au1000_ircc.h127
-rw-r--r--drivers/net/irda/au1k_ir.c851
-rw-r--r--drivers/net/irda/donauboe.c1789
-rw-r--r--drivers/net/irda/donauboe.h363
-rw-r--r--drivers/net/irda/ep7211_ir.c122
-rw-r--r--drivers/net/irda/esi-sir.c159
-rw-r--r--drivers/net/irda/esi.c149
-rw-r--r--drivers/net/irda/girbil-sir.c258
-rw-r--r--drivers/net/irda/girbil.c250
-rw-r--r--drivers/net/irda/irda-usb.c1602
-rw-r--r--drivers/net/irda/irda-usb.h163
-rw-r--r--drivers/net/irda/irport.c1146
-rw-r--r--drivers/net/irda/irport.h80
-rw-r--r--drivers/net/irda/irtty-sir.c642
-rw-r--r--drivers/net/irda/irtty-sir.h34
-rw-r--r--drivers/net/irda/litelink-sir.c209
-rw-r--r--drivers/net/irda/litelink.c179
-rw-r--r--drivers/net/irda/ma600-sir.c264
-rw-r--r--drivers/net/irda/ma600.c354
-rw-r--r--drivers/net/irda/mcp2120-sir.c230
-rw-r--r--drivers/net/irda/mcp2120.c240
-rw-r--r--drivers/net/irda/nsc-ircc.c2222
-rw-r--r--drivers/net/irda/nsc-ircc.h280
-rw-r--r--drivers/net/irda/old_belkin-sir.c156
-rw-r--r--drivers/net/irda/old_belkin.c164
-rw-r--r--drivers/net/irda/sa1100_ir.c1045
-rw-r--r--drivers/net/irda/sir-dev.h202
-rw-r--r--drivers/net/irda/sir_core.c56
-rw-r--r--drivers/net/irda/sir_dev.c677
-rw-r--r--drivers/net/irda/sir_dongle.c134
-rw-r--r--drivers/net/irda/sir_kthread.c502
-rw-r--r--drivers/net/irda/smsc-ircc2.c2396
-rw-r--r--drivers/net/irda/smsc-ircc2.h194
-rw-r--r--drivers/net/irda/smsc-sio.h100
-rw-r--r--drivers/net/irda/stir4200.c1184
-rw-r--r--drivers/net/irda/tekram-sir.c232
-rw-r--r--drivers/net/irda/tekram.c282
-rw-r--r--drivers/net/irda/via-ircc.c1676
-rw-r--r--drivers/net/irda/via-ircc.h853
-rw-r--r--drivers/net/irda/vlsi_ir.c1912
-rw-r--r--drivers/net/irda/vlsi_ir.h798
-rw-r--r--drivers/net/irda/w83977af.h53
-rw-r--r--drivers/net/irda/w83977af_ir.c1379
-rw-r--r--drivers/net/irda/w83977af_ir.h199
-rw-r--r--drivers/net/isa-skeleton.c724
-rw-r--r--drivers/net/iseries_veth.c1422
-rw-r--r--drivers/net/iseries_veth.h46
-rw-r--r--drivers/net/ixgb/Makefile35
-rw-r--r--drivers/net/ixgb/ixgb.h200
-rw-r--r--drivers/net/ixgb/ixgb_ee.c774
-rw-r--r--drivers/net/ixgb/ixgb_ee.h106
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c704
-rw-r--r--drivers/net/ixgb/ixgb_hw.c1202
-rw-r--r--drivers/net/ixgb/ixgb_hw.h847
-rw-r--r--drivers/net/ixgb/ixgb_ids.h48
-rw-r--r--drivers/net/ixgb/ixgb_main.c2166
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h96
-rw-r--r--drivers/net/ixgb/ixgb_param.c476
-rw-r--r--drivers/net/jazzsonic.c381
-rw-r--r--drivers/net/lance.c1308
-rw-r--r--drivers/net/lasi_82596.c1607
-rw-r--r--drivers/net/lne390.c458
-rw-r--r--drivers/net/loopback.c233
-rw-r--r--drivers/net/lp486e.c1352
-rw-r--r--drivers/net/mac8390.c757
-rw-r--r--drivers/net/mac89x0.c666
-rw-r--r--drivers/net/mace.c1053
-rw-r--r--drivers/net/mace.h173
-rw-r--r--drivers/net/macmace.c710
-rw-r--r--drivers/net/macsonic.c657
-rw-r--r--drivers/net/meth.c843
-rw-r--r--drivers/net/meth.h246
-rw-r--r--drivers/net/mii.c398
-rw-r--r--drivers/net/mv643xx_eth.c3033
-rw-r--r--drivers/net/mv643xx_eth.h438
-rw-r--r--drivers/net/mvme147.c203
-rw-r--r--drivers/net/myri_code.h6287
-rw-r--r--drivers/net/myri_sbus.c1174
-rw-r--r--drivers/net/myri_sbus.h313
-rw-r--r--drivers/net/natsemi.c3273
-rw-r--r--drivers/net/ne-h8300.c670
-rw-r--r--drivers/net/ne.c862
-rw-r--r--drivers/net/ne2.c829
-rw-r--r--drivers/net/ne2k-pci.c712
-rw-r--r--drivers/net/ne3210.c374
-rw-r--r--drivers/net/netconsole.c127
-rw-r--r--drivers/net/ni5010.c812
-rw-r--r--drivers/net/ni5010.h144
-rw-r--r--drivers/net/ni52.c1386
-rw-r--r--drivers/net/ni52.h310
-rw-r--r--drivers/net/ni65.c1277
-rw-r--r--drivers/net/ni65.h121
-rw-r--r--drivers/net/ns83820.c2222
-rw-r--r--drivers/net/oaknet.c665
-rw-r--r--drivers/net/pci-skeleton.c1977
-rw-r--r--drivers/net/pcmcia/3c574_cs.c1307
-rw-r--r--drivers/net/pcmcia/3c589_cs.c1081
-rw-r--r--drivers/net/pcmcia/Kconfig132
-rw-r--r--drivers/net/pcmcia/Makefile16
-rw-r--r--drivers/net/pcmcia/axnet_cs.c1864
-rw-r--r--drivers/net/pcmcia/com20020_cs.c509
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1286
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c535
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c1699
-rw-r--r--drivers/net/pcmcia/ositech.h358
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1659
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2260
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2031
-rw-r--r--drivers/net/pcnet32.c2358
-rw-r--r--drivers/net/plip.c1427
-rw-r--r--drivers/net/ppp_async.c1033
-rw-r--r--drivers/net/ppp_deflate.c659
-rw-r--r--drivers/net/ppp_generic.c2746
-rw-r--r--drivers/net/ppp_synctty.c803
-rw-r--r--drivers/net/pppoe.c1153
-rw-r--r--drivers/net/pppox.c153
-rw-r--r--drivers/net/r8169.c2523
-rw-r--r--drivers/net/rrunner.c1756
-rw-r--r--drivers/net/rrunner.h848
-rw-r--r--drivers/net/s2io-regs.h778
-rw-r--r--drivers/net/s2io.c4950
-rw-r--r--drivers/net/s2io.h760
-rw-r--r--drivers/net/saa9730.c1184
-rw-r--r--drivers/net/saa9730.h371
-rw-r--r--drivers/net/sb1000.c1202
-rw-r--r--drivers/net/sb1250-mac.c2920
-rw-r--r--drivers/net/seeq8005.c769
-rw-r--r--drivers/net/seeq8005.h156
-rw-r--r--drivers/net/sgiseeq.c773
-rw-r--r--drivers/net/sgiseeq.h103
-rw-r--r--drivers/net/shaper.c755
-rw-r--r--drivers/net/sis900.c2370
-rw-r--r--drivers/net/sis900.h279
-rw-r--r--drivers/net/sk98lin/Makefile89
-rw-r--r--drivers/net/sk98lin/h/lm80.h179
-rw-r--r--drivers/net/sk98lin/h/skaddr.h333
-rw-r--r--drivers/net/sk98lin/h/skcsum.h219
-rw-r--r--drivers/net/sk98lin/h/skdebug.h74
-rw-r--r--drivers/net/sk98lin/h/skdrv1st.h191
-rw-r--r--drivers/net/sk98lin/h/skdrv2nd.h456
-rw-r--r--drivers/net/sk98lin/h/skerror.h55
-rw-r--r--drivers/net/sk98lin/h/skgedrv.h51
-rw-r--r--drivers/net/sk98lin/h/skgehw.h2126
-rw-r--r--drivers/net/sk98lin/h/skgehwt.h48
-rw-r--r--drivers/net/sk98lin/h/skgei2c.h210
-rw-r--r--drivers/net/sk98lin/h/skgeinit.h853
-rw-r--r--drivers/net/sk98lin/h/skgepnm2.h334
-rw-r--r--drivers/net/sk98lin/h/skgepnmi.h966
-rw-r--r--drivers/net/sk98lin/h/skgesirq.h111
-rw-r--r--drivers/net/sk98lin/h/ski2c.h177
-rw-r--r--drivers/net/sk98lin/h/skqueue.h94
-rw-r--r--drivers/net/sk98lin/h/skrlmt.h438
-rw-r--r--drivers/net/sk98lin/h/sktimer.h63
-rw-r--r--drivers/net/sk98lin/h/sktypes.h69
-rw-r--r--drivers/net/sk98lin/h/skversion.h38
-rw-r--r--drivers/net/sk98lin/h/skvpd.h271
-rw-r--r--drivers/net/sk98lin/h/xmac_ii.h1579
-rw-r--r--drivers/net/sk98lin/skaddr.c1773
-rw-r--r--drivers/net/sk98lin/skcsum.c871
-rw-r--r--drivers/net/sk98lin/skdim.c742
-rw-r--r--drivers/net/sk98lin/skethtool.c552
-rw-r--r--drivers/net/sk98lin/skge.c5186
-rw-r--r--drivers/net/sk98lin/skgehwt.c171
-rw-r--r--drivers/net/sk98lin/skgeinit.c2151
-rw-r--r--drivers/net/sk98lin/skgemib.c1082
-rw-r--r--drivers/net/sk98lin/skgepnmi.c8359
-rw-r--r--drivers/net/sk98lin/skgesirq.c2251
-rw-r--r--drivers/net/sk98lin/ski2c.c1296
-rw-r--r--drivers/net/sk98lin/sklm80.c213
-rw-r--r--drivers/net/sk98lin/skproc.c265
-rw-r--r--drivers/net/sk98lin/skqueue.c179
-rw-r--r--drivers/net/sk98lin/skrlmt.c3258
-rw-r--r--drivers/net/sk98lin/sktimer.c250
-rw-r--r--drivers/net/sk98lin/skvpd.c1197
-rw-r--r--drivers/net/sk98lin/skxmac2.c4607
-rw-r--r--drivers/net/sk_g16.c2066
-rw-r--r--drivers/net/sk_g16.h165
-rw-r--r--drivers/net/sk_mca.c1217
-rw-r--r--drivers/net/sk_mca.h172
-rw-r--r--drivers/net/skfp/Makefile20
-rw-r--r--drivers/net/skfp/can.c83
-rw-r--r--drivers/net/skfp/cfm.c627
-rw-r--r--drivers/net/skfp/drvfbi.c1529
-rw-r--r--drivers/net/skfp/ecm.c536
-rw-r--r--drivers/net/skfp/ess.c720
-rw-r--r--drivers/net/skfp/fplustm.c1561
-rw-r--r--drivers/net/skfp/h/cmtdef.h763
-rw-r--r--drivers/net/skfp/h/fddi.h69
-rw-r--r--drivers/net/skfp/h/fddimib.h349
-rw-r--r--drivers/net/skfp/h/fplustm.h274
-rw-r--r--drivers/net/skfp/h/hwmtm.h424
-rw-r--r--drivers/net/skfp/h/lnkstat.h84
-rw-r--r--drivers/net/skfp/h/mbuf.h54
-rw-r--r--drivers/net/skfp/h/osdef1st.h123
-rw-r--r--drivers/net/skfp/h/sba.h142
-rw-r--r--drivers/net/skfp/h/sba_def.h76
-rw-r--r--drivers/net/skfp/h/skfbi.h1919
-rw-r--r--drivers/net/skfp/h/skfbiinc.h123
-rw-r--r--drivers/net/skfp/h/smc.h471
-rw-r--r--drivers/net/skfp/h/smt.h882
-rw-r--r--drivers/net/skfp/h/smt_p.h326
-rw-r--r--drivers/net/skfp/h/smtstate.h106
-rw-r--r--drivers/net/skfp/h/supern_2.h1059
-rw-r--r--drivers/net/skfp/h/targethw.h169
-rw-r--r--drivers/net/skfp/h/targetos.h165
-rw-r--r--drivers/net/skfp/h/types.h39
-rw-r--r--drivers/net/skfp/hwmtm.c2219
-rw-r--r--drivers/net/skfp/hwt.c305
-rw-r--r--drivers/net/skfp/lnkstat.c204
-rw-r--r--drivers/net/skfp/pcmplc.c2024
-rw-r--r--drivers/net/skfp/pmf.c1671
-rw-r--r--drivers/net/skfp/queue.c173
-rw-r--r--drivers/net/skfp/rmt.c654
-rw-r--r--drivers/net/skfp/skfddi.c2293
-rw-r--r--drivers/net/skfp/smt.c2097
-rw-r--r--drivers/net/skfp/smtdef.c360
-rw-r--r--drivers/net/skfp/smtinit.c125
-rw-r--r--drivers/net/skfp/smtparse.c467
-rw-r--r--drivers/net/skfp/smttimer.c156
-rw-r--r--drivers/net/skfp/srf.c429
-rw-r--r--drivers/net/slhc.c768
-rw-r--r--drivers/net/slip.c1522
-rw-r--r--drivers/net/slip.h121
-rw-r--r--drivers/net/smc-mca.c508
-rw-r--r--drivers/net/smc-mca.h61
-rw-r--r--drivers/net/smc-ultra.c615
-rw-r--r--drivers/net/smc-ultra32.c454
-rw-r--r--drivers/net/smc9194.c1631
-rw-r--r--drivers/net/smc9194.h241
-rw-r--r--drivers/net/smc91x.c2343
-rw-r--r--drivers/net/smc91x.h1032
-rw-r--r--drivers/net/sonic.c616
-rw-r--r--drivers/net/sonic.h483
-rw-r--r--drivers/net/starfire.c2218
-rw-r--r--drivers/net/starfire_firmware.pl31
-rw-r--r--drivers/net/stnic.c320
-rw-r--r--drivers/net/sun3_82586.c1211
-rw-r--r--drivers/net/sun3_82586.h318
-rw-r--r--drivers/net/sun3lance.c965
-rw-r--r--drivers/net/sunbmac.c1324
-rw-r--r--drivers/net/sunbmac.h356
-rw-r--r--drivers/net/sundance.c1785
-rw-r--r--drivers/net/sungem.c3204
-rw-r--r--drivers/net/sungem.h1051
-rw-r--r--drivers/net/sungem_phy.c872
-rw-r--r--drivers/net/sungem_phy.h117
-rw-r--r--drivers/net/sunhme.c3426
-rw-r--r--drivers/net/sunhme.h515
-rw-r--r--drivers/net/sunlance.c1614
-rw-r--r--drivers/net/sunqe.c1043
-rw-r--r--drivers/net/sunqe.h351
-rw-r--r--drivers/net/tc35815.c1745
-rw-r--r--drivers/net/tg3.c9083
-rw-r--r--drivers/net/tg3.h2206
-rw-r--r--drivers/net/tlan.c3304
-rw-r--r--drivers/net/tlan.h540
-rw-r--r--drivers/net/tokenring/3c359.c1830
-rw-r--r--drivers/net/tokenring/3c359.h290
-rw-r--r--drivers/net/tokenring/3c359_microcode.h1581
-rw-r--r--drivers/net/tokenring/Kconfig186
-rw-r--r--drivers/net/tokenring/Makefile15
-rw-r--r--drivers/net/tokenring/abyss.c481
-rw-r--r--drivers/net/tokenring/abyss.h58
-rw-r--r--drivers/net/tokenring/ibmtr.c1987
-rw-r--r--drivers/net/tokenring/lanstreamer.c2011
-rw-r--r--drivers/net/tokenring/lanstreamer.h358
-rw-r--r--drivers/net/tokenring/madgemc.c800
-rw-r--r--drivers/net/tokenring/madgemc.h70
-rw-r--r--drivers/net/tokenring/olympic.c1786
-rw-r--r--drivers/net/tokenring/olympic.h322
-rw-r--r--drivers/net/tokenring/proteon.c432
-rw-r--r--drivers/net/tokenring/skisa.c442
-rw-r--r--drivers/net/tokenring/smctr.c5742
-rw-r--r--drivers/net/tokenring/smctr.h1588
-rw-r--r--drivers/net/tokenring/smctr_firmware.h979
-rw-r--r--drivers/net/tokenring/tms380tr.c2410
-rw-r--r--drivers/net/tokenring/tms380tr.h1141
-rw-r--r--drivers/net/tokenring/tmspci.c267
-rw-r--r--drivers/net/tulip/21142.c245
-rw-r--r--drivers/net/tulip/Kconfig166
-rw-r--r--drivers/net/tulip/Makefile17
-rw-r--r--drivers/net/tulip/de2104x.c2187
-rw-r--r--drivers/net/tulip/de4x5.c5778
-rw-r--r--drivers/net/tulip/de4x5.h1029
-rw-r--r--drivers/net/tulip/dmfe.c2066
-rw-r--r--drivers/net/tulip/eeprom.c357
-rw-r--r--drivers/net/tulip/interrupt.c786
-rw-r--r--drivers/net/tulip/media.c562
-rw-r--r--drivers/net/tulip/pnic.c172
-rw-r--r--drivers/net/tulip/pnic2.c407
-rw-r--r--drivers/net/tulip/timer.c175
-rw-r--r--drivers/net/tulip/tulip.h493
-rw-r--r--drivers/net/tulip/tulip_core.c1861
-rw-r--r--drivers/net/tulip/winbond-840.c1716
-rw-r--r--drivers/net/tulip/xircom_cb.c1277
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c1748
-rw-r--r--drivers/net/tun.c883
-rw-r--r--drivers/net/typhoon-firmware.h3778
-rw-r--r--drivers/net/typhoon.c2673
-rw-r--r--drivers/net/typhoon.h619
-rw-r--r--drivers/net/via-rhine.c2035
-rw-r--r--drivers/net/via-velocity.c3303
-rw-r--r--drivers/net/via-velocity.h1879
-rw-r--r--drivers/net/wan/Kconfig607
-rw-r--r--drivers/net/wan/Makefile86
-rw-r--r--drivers/net/wan/c101.c446
-rw-r--r--drivers/net/wan/cosa.c2100
-rw-r--r--drivers/net/wan/cosa.h117
-rw-r--r--drivers/net/wan/cycx_drv.c586
-rw-r--r--drivers/net/wan/cycx_main.c351
-rw-r--r--drivers/net/wan/cycx_x25.c1609
-rw-r--r--drivers/net/wan/dlci.c566
-rw-r--r--drivers/net/wan/dscc4.c2074
-rw-r--r--drivers/net/wan/farsync.c2712
-rw-r--r--drivers/net/wan/farsync.h357
-rw-r--r--drivers/net/wan/hd64570.h241
-rw-r--r--drivers/net/wan/hd64572.h527
-rw-r--r--drivers/net/wan/hd6457x.c853
-rw-r--r--drivers/net/wan/hdlc_cisco.c330
-rw-r--r--drivers/net/wan/hdlc_fr.c1237
-rw-r--r--drivers/net/wan/hdlc_generic.c343
-rw-r--r--drivers/net/wan/hdlc_ppp.c115
-rw-r--r--drivers/net/wan/hdlc_raw.c90
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c107
-rw-r--r--drivers/net/wan/hdlc_x25.c219
-rw-r--r--drivers/net/wan/hostess_sv11.c420
-rw-r--r--drivers/net/wan/lapbether.c465
-rw-r--r--drivers/net/wan/lmc/Makefile17
-rw-r--r--drivers/net/wan/lmc/lmc.h33
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c85
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h52
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h257
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2201
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1246
-rw-r--r--drivers/net/wan/lmc/lmc_media.h65
-rw-r--r--drivers/net/wan/lmc/lmc_prot.h15
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c249
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h16
-rw-r--r--drivers/net/wan/lmc/lmc_var.h570
-rw-r--r--drivers/net/wan/n2.c562
-rw-r--r--drivers/net/wan/pc300-falc-lh.h1238
-rw-r--r--drivers/net/wan/pc300.h497
-rw-r--r--drivers/net/wan/pc300_drv.c3692
-rw-r--r--drivers/net/wan/pc300_tty.c1095
-rw-r--r--drivers/net/wan/pci200syn.c488
-rw-r--r--drivers/net/wan/sbni.c1735
-rw-r--r--drivers/net/wan/sbni.h141
-rw-r--r--drivers/net/wan/sdla.c1676
-rw-r--r--drivers/net/wan/sdla_chdlc.c4433
-rw-r--r--drivers/net/wan/sdla_fr.c5068
-rw-r--r--drivers/net/wan/sdla_ft1.c344
-rw-r--r--drivers/net/wan/sdla_ppp.c3429
-rw-r--r--drivers/net/wan/sdla_x25.c5496
-rw-r--r--drivers/net/wan/sdladrv.c2318
-rw-r--r--drivers/net/wan/sdlamain.c1341
-rw-r--r--drivers/net/wan/sealevel.c469
-rw-r--r--drivers/net/wan/syncppp.c1488
-rw-r--r--drivers/net/wan/wanpipe_multppp.c2357
-rw-r--r--drivers/net/wan/wanxl.c839
-rw-r--r--drivers/net/wan/wanxl.h152
-rw-r--r--drivers/net/wan/wanxlfw.S895
-rw-r--r--drivers/net/wan/wanxlfw.inc_shipped158
-rw-r--r--drivers/net/wan/x25_asy.c844
-rw-r--r--drivers/net/wan/x25_asy.h50
-rw-r--r--drivers/net/wan/z85230.c1851
-rw-r--r--drivers/net/wan/z85230.h449
-rw-r--r--drivers/net/wd.c559
-rw-r--r--drivers/net/wireless/Kconfig365
-rw-r--r--drivers/net/wireless/Makefile33
-rw-r--r--drivers/net/wireless/README25
-rw-r--r--drivers/net/wireless/airo.c7667
-rw-r--r--drivers/net/wireless/airo_cs.c622
-rw-r--r--drivers/net/wireless/airport.c304
-rw-r--r--drivers/net/wireless/arlan-main.c1896
-rw-r--r--drivers/net/wireless/arlan-proc.c1262
-rw-r--r--drivers/net/wireless/arlan.h541
-rw-r--r--drivers/net/wireless/atmel.c4272
-rw-r--r--drivers/net/wireless/atmel.h43
-rw-r--r--drivers/net/wireless/atmel_cs.c708
-rw-r--r--drivers/net/wireless/atmel_pci.c89
-rw-r--r--drivers/net/wireless/hermes.c554
-rw-r--r--drivers/net/wireless/hermes.h481
-rw-r--r--drivers/net/wireless/hermes_rid.h148
-rw-r--r--drivers/net/wireless/i82586.h413
-rw-r--r--drivers/net/wireless/i82593.h224
-rw-r--r--drivers/net/wireless/ieee802_11.h78
-rw-r--r--drivers/net/wireless/netwave_cs.c1736
-rw-r--r--drivers/net/wireless/orinoco.c4243
-rw-r--r--drivers/net/wireless/orinoco.h153
-rw-r--r--drivers/net/wireless/orinoco_cs.c636
-rw-r--r--drivers/net/wireless/orinoco_pci.c417
-rw-r--r--drivers/net/wireless/orinoco_plx.c419
-rw-r--r--drivers/net/wireless/orinoco_tmd.c276
-rw-r--r--drivers/net/wireless/prism54/Makefile8
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.c260
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.h173
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2750
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h51
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h507
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c956
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h216
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c519
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h73
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c339
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c513
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h145
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c907
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.h59
-rw-r--r--drivers/net/wireless/prism54/prismcompat.h44
-rw-r--r--drivers/net/wireless/ray_cs.c2957
-rw-r--r--drivers/net/wireless/ray_cs.h78
-rw-r--r--drivers/net/wireless/rayctl.h732
-rw-r--r--drivers/net/wireless/strip.c2843
-rw-r--r--drivers/net/wireless/todo.txt15
-rw-r--r--drivers/net/wireless/wavelan.c4452
-rw-r--r--drivers/net/wireless/wavelan.h370
-rw-r--r--drivers/net/wireless/wavelan.p.h716
-rw-r--r--drivers/net/wireless/wavelan_cs.c4914
-rw-r--r--drivers/net/wireless/wavelan_cs.h386
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h813
-rw-r--r--drivers/net/wireless/wl3501.h614
-rw-r--r--drivers/net/wireless/wl3501_cs.c2270
-rw-r--r--drivers/net/yellowfin.c1499
-rw-r--r--drivers/net/znet.c948
-rw-r--r--drivers/net/zorro8390.c439
630 files changed, 617265 insertions, 0 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
new file mode 100644
index 000000000000..f6d51ce34b00
--- /dev/null
+++ b/drivers/net/3c501.c
@@ -0,0 +1,940 @@
+/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */
+/*
+ Written 1992,1993,1994 Donald Becker
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ This is a device driver for the 3Com Etherlink 3c501.
+ Do not purchase this card, even as a joke. It's performance is horrible,
+ and it breaks in many ways.
+
+ The original author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Fixed (again!) the missing interrupt locking on TX/RX shifting.
+ Alan Cox <Alan.Cox@linux.org>
+
+ Removed calls to init_etherdev since they are no longer needed, and
+ cleaned up modularization just a bit. The driver still allows only
+ the default address for cards when loaded as a module, but that's
+ really less braindead than anyone using a 3c501 board. :)
+ 19950208 (invid@msen.com)
+
+ Added traps for interrupts hitting the window as we clear and TX load
+ the board. Now getting 150K/second FTP with a 3c501 card. Still playing
+ with a TX-TX optimisation to see if we can touch 180-200K/second as seems
+ theoretically maximum.
+ 19950402 Alan Cox <Alan.Cox@linux.org>
+
+ Cleaned up for 2.3.x because we broke SMP now.
+ 20000208 Alan Cox <alan@redhat.com>
+
+ Check up pass for 2.5. Nothing significant changed
+ 20021009 Alan Cox <alan@redhat.com>
+
+ Fixed zero fill corner case
+ 20030104 Alan Cox <alan@redhat.com>
+
+
+ For the avoidance of doubt the "preferred form" of this code is one which
+ is in an open non patent encumbered format. Where cryptographic key signing
+ forms part of the process of creating an executable the information
+ including keys needed to generate an equivalently functional executable
+ are deemed to be part of the source code.
+
+*/
+
+
+/**
+ * DOC: 3c501 Card Notes
+ *
+ * Some notes on this thing if you have to hack it. [Alan]
+ *
+ * Some documentation is available from 3Com. Due to the boards age
+ * standard responses when you ask for this will range from 'be serious'
+ * to 'give it to a museum'. The documentation is incomplete and mostly
+ * of historical interest anyway.
+ *
+ * The basic system is a single buffer which can be used to receive or
+ * transmit a packet. A third command mode exists when you are setting
+ * things up.
+ *
+ * If it's transmitting it's not receiving and vice versa. In fact the
+ * time to get the board back into useful state after an operation is
+ * quite large.
+ *
+ * The driver works by keeping the board in receive mode waiting for a
+ * packet to arrive. When one arrives it is copied out of the buffer
+ * and delivered to the kernel. The card is reloaded and off we go.
+ *
+ * When transmitting lp->txing is set and the card is reset (from
+ * receive mode) [possibly losing a packet just received] to command
+ * mode. A packet is loaded and transmit mode triggered. The interrupt
+ * handler runs different code for transmit interrupts and can handle
+ * returning to receive mode or retransmissions (yes you have to help
+ * out with those too).
+ *
+ * DOC: Problems
+ *
+ * There are a wide variety of undocumented error returns from the card
+ * and you basically have to kick the board and pray if they turn up. Most
+ * only occur under extreme load or if you do something the board doesn't
+ * like (eg touching a register at the wrong time).
+ *
+ * The driver is less efficient than it could be. It switches through
+ * receive mode even if more transmits are queued. If this worries you buy
+ * a real Ethernet card.
+ *
+ * The combination of slow receive restart and no real multicast
+ * filter makes the board unusable with a kernel compiled for IP
+ * multicasting in a real multicast environment. That's down to the board,
+ * but even with no multicast programs running a multicast IP kernel is
+ * in group 224.0.0.1 and you will therefore be listening to all multicasts.
+ * One nv conference running over that Ethernet and you can give up.
+ *
+ */
+
+#define DRV_NAME "3c501"
+#define DRV_VERSION "2002/10/09"
+
+
+static const char version[] =
+ DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@redhat.com).\n";
+
+/*
+ * Braindamage remaining:
+ * The 3c501 board.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/config.h> /* for CONFIG_IP_MULTICAST */
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+
+#include "3c501.h"
+
+/*
+ * The boilerplate probe code.
+ */
+
+static int io=0x280;
+static int irq=5;
+static int mem_start;
+
+/**
+ * el1_probe: - probe for a 3c501
+ * @dev: The device structure passed in to probe.
+ *
+ * This can be called from two places. The network layer will probe using
+ * a device structure passed in with the probe information completed. For a
+ * modular driver we use #init_module to fill in our own structure and probe
+ * for it.
+ *
+ * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to
+ * probe and failing to find anything.
+ */
+
+struct net_device * __init el1_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ static unsigned ports[] = { 0x280, 0x300, 0};
+ unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ mem_start = dev->mem_start & 7;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = el1_probe1(dev, io);
+ } else if (io != 0) {
+ err = -ENXIO; /* Don't probe at all. */
+ } else {
+ for (port = ports; *port && el1_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, EL1_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/**
+ * el1_probe1:
+ * @dev: The device structure to use
+ * @ioaddr: An I/O address to probe at.
+ *
+ * The actual probe. This is iterated over by #el1_probe in order to
+ * check all the applicable device locations.
+ *
+ * Returns 0 for a success, in which case the device is activated,
+ * EAGAIN if the IRQ is in use by another driver, and ENODEV if the
+ * board cannot be found.
+ */
+
+static int __init el1_probe1(struct net_device *dev, int ioaddr)
+{
+ struct net_local *lp;
+ const char *mname; /* Vendor name */
+ unsigned char station_addr[6];
+ int autoirq = 0;
+ int i;
+
+ /*
+ * Reserve I/O resource for exclusive use by this driver
+ */
+
+ if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME))
+ return -ENODEV;
+
+ /*
+ * Read the station address PROM data from the special port.
+ */
+
+ for (i = 0; i < 6; i++)
+ {
+ outw(i, ioaddr + EL1_DATAPTR);
+ station_addr[i] = inb(ioaddr + EL1_SAPROM);
+ }
+ /*
+ * Check the first three octets of the S.A. for 3Com's prefix, or
+ * for the Sager NP943 prefix.
+ */
+
+ if (station_addr[0] == 0x02 && station_addr[1] == 0x60
+ && station_addr[2] == 0x8c)
+ {
+ mname = "3c501";
+ } else if (station_addr[0] == 0x00 && station_addr[1] == 0x80
+ && station_addr[2] == 0xC8)
+ {
+ mname = "NP943";
+ }
+ else {
+ release_region(ioaddr, EL1_IO_EXTENT);
+ return -ENODEV;
+ }
+
+ /*
+ * We auto-IRQ by shutting off the interrupt line and letting it float
+ * high.
+ */
+
+ dev->irq = irq;
+
+ if (dev->irq < 2)
+ {
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ inb(RX_STATUS); /* Clear pending interrupts. */
+ inb(TX_STATUS);
+ outb(AX_LOOP + 1, AX_CMD);
+
+ outb(0x00, AX_CMD);
+
+ mdelay(20);
+ autoirq = probe_irq_off(irq_mask);
+
+ if (autoirq == 0)
+ {
+ printk(KERN_WARNING "%s probe at %#x failed to detect IRQ line.\n",
+ mname, ioaddr);
+ release_region(ioaddr, EL1_IO_EXTENT);
+ return -EAGAIN;
+ }
+ }
+
+ outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
+ dev->base_addr = ioaddr;
+ memcpy(dev->dev_addr, station_addr, ETH_ALEN);
+
+ if (mem_start & 0xf)
+ el_debug = mem_start & 0x7;
+ if (autoirq)
+ dev->irq = autoirq;
+
+ printk(KERN_INFO "%s: %s EtherLink at %#lx, using %sIRQ %d.\n", dev->name, mname, dev->base_addr,
+ autoirq ? "auto":"assigned ", dev->irq);
+
+#ifdef CONFIG_IP_MULTICAST
+ printk(KERN_WARNING "WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
+#endif
+
+ if (el_debug)
+ printk(KERN_DEBUG "%s", version);
+
+ memset(dev->priv, 0, sizeof(struct net_local));
+ lp = netdev_priv(dev);
+ spin_lock_init(&lp->lock);
+
+ /*
+ * The EL1-specific entries in the device structure.
+ */
+
+ dev->open = &el_open;
+ dev->hard_start_xmit = &el_start_xmit;
+ dev->tx_timeout = &el_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->stop = &el1_close;
+ dev->get_stats = &el1_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ return 0;
+}
+
+/**
+ * el1_open:
+ * @dev: device that is being opened
+ *
+ * When an ifconfig is issued which changes the device flags to include
+ * IFF_UP this function is called. It is only called when the change
+ * occurs, not when the interface remains up. #el1_close will be called
+ * when it goes down.
+ *
+ * Returns 0 for a successful open, or -EAGAIN if someone has run off
+ * with our interrupt line.
+ */
+
+static int el_open(struct net_device *dev)
+{
+ int retval;
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (el_debug > 2)
+ printk(KERN_DEBUG "%s: Doing el_open()...", dev->name);
+
+ if ((retval = request_irq(dev->irq, &el_interrupt, 0, dev->name, dev)))
+ return retval;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ el_reset(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ lp->txing = 0; /* Board in RX mode */
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ netif_start_queue(dev);
+ return 0;
+}
+
+/**
+ * el_timeout:
+ * @dev: The 3c501 card that has timed out
+ *
+ * Attempt to restart the board. This is basically a mixture of extreme
+ * violence and prayer
+ *
+ */
+
+static void el_timeout(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if (el_debug)
+ printk (KERN_DEBUG "%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
+ dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS));
+ lp->stats.tx_errors++;
+ outb(TX_NORM, TX_CMD);
+ outb(RX_NORM, RX_CMD);
+ outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ lp->txing = 0; /* Ripped back in to RX */
+ netif_wake_queue(dev);
+}
+
+
+/**
+ * el_start_xmit:
+ * @skb: The packet that is queued to be sent
+ * @dev: The 3c501 card we want to throw it down
+ *
+ * Attempt to send a packet to a 3c501 card. There are some interesting
+ * catches here because the 3c501 is an extremely old and therefore
+ * stupid piece of technology.
+ *
+ * If we are handling an interrupt on the other CPU we cannot load a packet
+ * as we may still be attempting to retrieve the last RX packet buffer.
+ *
+ * When a transmit times out we dump the card into control mode and just
+ * start again. It happens enough that it isnt worth logging.
+ *
+ * We avoid holding the spin locks when doing the packet load to the board.
+ * The device is very slow, and its DMA mode is even slower. If we held the
+ * lock while loading 1500 bytes onto the controller we would drop a lot of
+ * serial port characters. This requires we do extra locking, but we have
+ * no real choice.
+ */
+
+static int el_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ /*
+ * Avoid incoming interrupts between us flipping txing and flipping
+ * mode as the driver assumes txing is a faithful indicator of card
+ * state
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /*
+ * Avoid timer-based retransmission conflicts.
+ */
+
+ netif_stop_queue(dev);
+
+ do
+ {
+ int len = skb->len;
+ int pad = 0;
+ int gp_start;
+ unsigned char *buf = skb->data;
+
+ if (len < ETH_ZLEN)
+ pad = ETH_ZLEN - len;
+
+ gp_start = 0x800 - ( len + pad );
+
+ lp->tx_pkt_start = gp_start;
+ lp->collisions = 0;
+
+ lp->stats.tx_bytes += skb->len;
+
+ /*
+ * Command mode with status cleared should [in theory]
+ * mean no more interrupts can be pending on the card.
+ */
+
+ outb_p(AX_SYS, AX_CMD);
+ inb_p(RX_STATUS);
+ inb_p(TX_STATUS);
+
+ lp->loading = 1;
+ lp->txing = 1;
+
+ /*
+ * Turn interrupts back on while we spend a pleasant afternoon
+ * loading bytes into the board
+ */
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ outw(0x00, RX_BUF_CLR); /* Set rx packet area to 0. */
+ outw(gp_start, GP_LOW); /* aim - packet will be loaded into buffer start */
+ outsb(DATAPORT,buf,len); /* load buffer (usual thing each byte increments the pointer) */
+ if (pad) {
+ while(pad--) /* Zero fill buffer tail */
+ outb(0, DATAPORT);
+ }
+ outw(gp_start, GP_LOW); /* the board reuses the same register */
+
+ if(lp->loading != 2)
+ {
+ outb(AX_XMIT, AX_CMD); /* fire ... Trigger xmit. */
+ lp->loading=0;
+ dev->trans_start = jiffies;
+ if (el_debug > 2)
+ printk(KERN_DEBUG " queued xmit.\n");
+ dev_kfree_skb (skb);
+ return 0;
+ }
+ /* A receive upset our load, despite our best efforts */
+ if(el_debug>2)
+ printk(KERN_DEBUG "%s: burped during tx load.\n", dev->name);
+ spin_lock_irqsave(&lp->lock, flags);
+ }
+ while(1);
+
+}
+
+/**
+ * el_interrupt:
+ * @irq: Interrupt number
+ * @dev_id: The 3c501 that burped
+ * @regs: Register data (surplus to our requirements)
+ *
+ * Handle the ether interface interrupts. The 3c501 needs a lot more
+ * hand holding than most cards. In particular we get a transmit interrupt
+ * with a collision error because the board firmware isnt capable of rewinding
+ * its own transmit buffer pointers. It can however count to 16 for us.
+ *
+ * On the receive side the card is also very dumb. It has no buffering to
+ * speak of. We simply pull the packet out of its PIO buffer (which is slow)
+ * and queue it for the kernel. Then we reset the card for the next packet.
+ *
+ * We sometimes get suprise interrupts late both because the SMP IRQ delivery
+ * is message passing and because the card sometimes seems to deliver late. I
+ * think if it is part way through a receive and the mode is changed it carries
+ * on receiving and sends us an interrupt. We have to band aid all these cases
+ * to get a sensible 150kbytes/second performance. Even then you want a small
+ * TCP window.
+ */
+
+static irqreturn_t el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr;
+ int axsr; /* Aux. status reg. */
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ /*
+ * What happened ?
+ */
+
+ axsr = inb(AX_STATUS);
+
+ /*
+ * Log it
+ */
+
+ if (el_debug > 3)
+ printk(KERN_DEBUG "%s: el_interrupt() aux=%#02x", dev->name, axsr);
+
+ if(lp->loading==1 && !lp->txing)
+ printk(KERN_WARNING "%s: Inconsistent state loading while not in tx\n",
+ dev->name);
+
+ if (lp->txing)
+ {
+
+ /*
+ * Board in transmit mode. May be loading. If we are
+ * loading we shouldn't have got this.
+ */
+
+ int txsr = inb(TX_STATUS);
+
+ if(lp->loading==1)
+ {
+ if(el_debug > 2)
+ {
+ printk(KERN_DEBUG "%s: Interrupt while loading [", dev->name);
+ printk(KERN_DEBUG " txsr=%02x gp=%04x rp=%04x]\n", txsr, inw(GP_LOW),inw(RX_LOW));
+ }
+ lp->loading=2; /* Force a reload */
+ spin_unlock(&lp->lock);
+ goto out;
+ }
+
+ if (el_debug > 6)
+ printk(KERN_DEBUG " txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
+
+ if ((axsr & 0x80) && (txsr & TX_READY) == 0)
+ {
+ /*
+ * FIXME: is there a logic to whether to keep on trying or
+ * reset immediately ?
+ */
+ if(el_debug>1)
+ printk(KERN_DEBUG "%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x"
+ " gp=%03x rp=%03x.\n", dev->name, txsr, axsr,
+ inw(ioaddr + EL1_DATAPTR), inw(ioaddr + EL1_RXPTR));
+ lp->txing = 0;
+ netif_wake_queue(dev);
+ }
+ else if (txsr & TX_16COLLISIONS)
+ {
+ /*
+ * Timed out
+ */
+ if (el_debug)
+ printk (KERN_DEBUG "%s: Transmit failed 16 times, Ethernet jammed?\n",dev->name);
+ outb(AX_SYS, AX_CMD);
+ lp->txing = 0;
+ lp->stats.tx_aborted_errors++;
+ netif_wake_queue(dev);
+ }
+ else if (txsr & TX_COLLISION)
+ {
+ /*
+ * Retrigger xmit.
+ */
+
+ if (el_debug > 6)
+ printk(KERN_DEBUG " retransmitting after a collision.\n");
+ /*
+ * Poor little chip can't reset its own start pointer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ outw(lp->tx_pkt_start, GP_LOW);
+ outb(AX_XMIT, AX_CMD);
+ lp->stats.collisions++;
+ spin_unlock(&lp->lock);
+ goto out;
+ }
+ else
+ {
+ /*
+ * It worked.. we will now fall through and receive
+ */
+ lp->stats.tx_packets++;
+ if (el_debug > 6)
+ printk(KERN_DEBUG " Tx succeeded %s\n",
+ (txsr & TX_RDY) ? "." : "but tx is busy!");
+ /*
+ * This is safe the interrupt is atomic WRT itself.
+ */
+
+ lp->txing = 0;
+ netif_wake_queue(dev); /* In case more to transmit */
+ }
+ }
+ else
+ {
+ /*
+ * In receive mode.
+ */
+
+ int rxsr = inb(RX_STATUS);
+ if (el_debug > 5)
+ printk(KERN_DEBUG " rxsr=%02x txsr=%02x rp=%04x", rxsr, inb(TX_STATUS),inw(RX_LOW));
+ /*
+ * Just reading rx_status fixes most errors.
+ */
+ if (rxsr & RX_MISSED)
+ lp->stats.rx_missed_errors++;
+ else if (rxsr & RX_RUNT)
+ { /* Handled to avoid board lock-up. */
+ lp->stats.rx_length_errors++;
+ if (el_debug > 5)
+ printk(KERN_DEBUG " runt.\n");
+ }
+ else if (rxsr & RX_GOOD)
+ {
+ /*
+ * Receive worked.
+ */
+ el_receive(dev);
+ }
+ else
+ {
+ /*
+ * Nothing? Something is broken!
+ */
+ if (el_debug > 2)
+ printk(KERN_DEBUG "%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
+ dev->name, rxsr);
+ el_reset(dev);
+ }
+ if (el_debug > 3)
+ printk(KERN_DEBUG ".\n");
+ }
+
+ /*
+ * Move into receive mode
+ */
+
+ outb(AX_RX, AX_CMD);
+ outw(0x00, RX_BUF_CLR);
+ inb(RX_STATUS); /* Be certain that interrupts are cleared. */
+ inb(TX_STATUS);
+ spin_unlock(&lp->lock);
+out:
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * el_receive:
+ * @dev: Device to pull the packets from
+ *
+ * We have a good packet. Well, not really "good", just mostly not broken.
+ * We must check everything to see if it is good. In particular we occasionally
+ * get wild packet sizes from the card. If the packet seems sane we PIO it
+ * off the card and queue it for the protocol layers.
+ */
+
+static void el_receive(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int pkt_len;
+ struct sk_buff *skb;
+
+ pkt_len = inw(RX_LOW);
+
+ if (el_debug > 4)
+ printk(KERN_DEBUG " el_receive %d.\n", pkt_len);
+
+ if ((pkt_len < 60) || (pkt_len > 1536))
+ {
+ if (el_debug)
+ printk(KERN_DEBUG "%s: bogus packet, length=%d\n", dev->name, pkt_len);
+ lp->stats.rx_over_errors++;
+ return;
+ }
+
+ /*
+ * Command mode so we can empty the buffer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ skb = dev_alloc_skb(pkt_len+2);
+
+ /*
+ * Start of frame
+ */
+
+ outw(0x00, GP_LOW);
+ if (skb == NULL)
+ {
+ printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ return;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* Force 16 byte alignment */
+ skb->dev = dev;
+ /*
+ * The read increments through the bytes. The interrupt
+ * handler will fix the pointer when it returns to
+ * receive mode.
+ */
+ insb(DATAPORT, skb_put(skb,pkt_len), pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes+=pkt_len;
+ }
+ return;
+}
+
+/**
+ * el_reset: Reset a 3c501 card
+ * @dev: The 3c501 card about to get zapped
+ *
+ * Even resetting a 3c501 isnt simple. When you activate reset it loses all
+ * its configuration. You must hold the lock when doing this. The function
+ * cannot take the lock itself as it is callable from the irq handler.
+ */
+
+static void el_reset(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if (el_debug> 2)
+ printk(KERN_INFO "3c501 reset...");
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ outb(AX_LOOP, AX_CMD); /* Aux control, irq and loopback enabled */
+ {
+ int i;
+ for (i = 0; i < 6; i++) /* Set the station address. */
+ outb(dev->dev_addr[i], ioaddr + i);
+ }
+
+ outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
+ outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
+ outb(RX_NORM, RX_CMD); /* Set Rx commands. */
+ inb(RX_STATUS); /* Clear status. */
+ inb(TX_STATUS);
+ lp->txing = 0;
+}
+
+/**
+ * el1_close:
+ * @dev: 3c501 card to shut down
+ *
+ * Close a 3c501 card. The IFF_UP flag has been cleared by the user via
+ * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued,
+ * and then disable the interrupts. Finally we reset the chip. The effects
+ * of the rest will be cleaned up by #el1_open. Always returns 0 indicating
+ * a success.
+ */
+
+static int el1_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ printk(KERN_INFO "%s: Shutting down Ethernet card at %#x.\n", dev->name, ioaddr);
+
+ netif_stop_queue(dev);
+
+ /*
+ * Free and disable the IRQ.
+ */
+
+ free_irq(dev->irq, dev);
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+
+ return 0;
+}
+
+/**
+ * el1_get_stats:
+ * @dev: The card to get the statistics for
+ *
+ * In smarter devices this function is needed to pull statistics off the
+ * board itself. The 3c501 has no hardware statistics. We maintain them all
+ * so they are by definition always up to date.
+ *
+ * Returns the statistics for the card from the card private data
+ */
+
+static struct net_device_stats *el1_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+/**
+ * set_multicast_list:
+ * @dev: The device to adjust
+ *
+ * Set or clear the multicast filter for this adaptor to use the best-effort
+ * filtering supported. The 3c501 supports only three modes of filtering.
+ * It always receives broadcasts and packets for itself. You can choose to
+ * optionally receive all packets, or all multicast packets on top of this.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ outb(RX_PROM, RX_CMD);
+ inb(RX_STATUS);
+ }
+ else if (dev->mc_list || dev->flags&IFF_ALLMULTI)
+ {
+ outb(RX_MULT, RX_CMD); /* Multicast or all multicast is the same */
+ inb(RX_STATUS); /* Clear status. */
+ }
+ else
+ {
+ outb(RX_NORM, RX_CMD);
+ inb(RX_STATUS);
+ }
+}
+
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ debug = level;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+#ifdef MODULE
+
+static struct net_device *dev_3c501;
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+MODULE_PARM_DESC(io, "EtherLink I/O base address");
+MODULE_PARM_DESC(irq, "EtherLink IRQ number");
+
+/**
+ * init_module:
+ *
+ * When the driver is loaded as a module this function is called. We fake up
+ * a device structure with the base I/O and interrupt set as if it were being
+ * called from Space.c. This minimises the extra code that would otherwise
+ * be required.
+ *
+ * Returns 0 for success or -EIO if a card is not found. Returning an error
+ * here also causes the module to be unloaded
+ */
+
+int init_module(void)
+{
+ dev_3c501 = el1_probe(-1);
+ if (IS_ERR(dev_3c501))
+ return PTR_ERR(dev_3c501);
+ return 0;
+}
+
+/**
+ * cleanup_module:
+ *
+ * The module is being unloaded. We unhook our network device from the system
+ * and then free up the resources we took when the card was found.
+ */
+
+void cleanup_module(void)
+{
+ struct net_device *dev = dev_3c501;
+ unregister_netdev(dev);
+ release_region(dev->base_addr, EL1_IO_EXTENT);
+ free_netdev(dev);
+}
+
+#endif /* MODULE */
+
+MODULE_AUTHOR("Donald Becker, Alan Cox");
+MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/3c501.h b/drivers/net/3c501.h
new file mode 100644
index 000000000000..adb0588a4d79
--- /dev/null
+++ b/drivers/net/3c501.h
@@ -0,0 +1,93 @@
+
+/*
+ * Index to functions.
+ */
+
+static int el1_probe1(struct net_device *dev, int ioaddr);
+static int el_open(struct net_device *dev);
+static void el_timeout(struct net_device *dev);
+static int el_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t el_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void el_receive(struct net_device *dev);
+static void el_reset(struct net_device *dev);
+static int el1_close(struct net_device *dev);
+static struct net_device_stats *el1_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static struct ethtool_ops netdev_ethtool_ops;
+
+#define EL1_IO_EXTENT 16
+
+#ifndef EL_DEBUG
+#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
+#endif /* Anything above 5 is wordy death! */
+#define debug el_debug
+static int el_debug = EL_DEBUG;
+
+/*
+ * Board-specific info in dev->priv.
+ */
+
+struct net_local
+{
+ struct net_device_stats stats;
+ int tx_pkt_start; /* The length of the current Tx packet. */
+ int collisions; /* Tx collisions this packet */
+ int loading; /* Spot buffer load collisions */
+ int txing; /* True if card is in TX mode */
+ spinlock_t lock; /* Serializing lock */
+};
+
+
+#define RX_STATUS (ioaddr + 0x06)
+#define RX_CMD RX_STATUS
+#define TX_STATUS (ioaddr + 0x07)
+#define TX_CMD TX_STATUS
+#define GP_LOW (ioaddr + 0x08)
+#define GP_HIGH (ioaddr + 0x09)
+#define RX_BUF_CLR (ioaddr + 0x0A)
+#define RX_LOW (ioaddr + 0x0A)
+#define RX_HIGH (ioaddr + 0x0B)
+#define SAPROM (ioaddr + 0x0C)
+#define AX_STATUS (ioaddr + 0x0E)
+#define AX_CMD AX_STATUS
+#define DATAPORT (ioaddr + 0x0F)
+#define TX_RDY 0x08 /* In TX_STATUS */
+
+#define EL1_DATAPTR 0x08
+#define EL1_RXPTR 0x0A
+#define EL1_SAPROM 0x0C
+#define EL1_DATAPORT 0x0f
+
+/*
+ * Writes to the ax command register.
+ */
+
+#define AX_OFF 0x00 /* Irq off, buffer access on */
+#define AX_SYS 0x40 /* Load the buffer */
+#define AX_XMIT 0x44 /* Transmit a packet */
+#define AX_RX 0x48 /* Receive a packet */
+#define AX_LOOP 0x0C /* Loopback mode */
+#define AX_RESET 0x80
+
+/*
+ * Normal receive mode written to RX_STATUS. We must intr on short packets
+ * to avoid bogus rx lockups.
+ */
+
+#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
+#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
+#define RX_MULT 0xE8 /* Accept multicast packets. */
+#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
+
+/*
+ * TX_STATUS register.
+ */
+
+#define TX_COLLISION 0x02
+#define TX_16COLLISIONS 0x04
+#define TX_READY 0x08
+
+#define RX_RUNT 0x08
+#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
+#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
+
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
new file mode 100644
index 000000000000..29dfd47f41d2
--- /dev/null
+++ b/drivers/net/3c503.c
@@ -0,0 +1,747 @@
+/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This driver should work with the 3c503 and 3c503/16. It should be used
+ in shared memory mode for best performance, although it may also work
+ in programmed-I/O mode.
+
+ Sources:
+ EtherLink II Technical Reference Manual,
+ EtherLink II/16 Technical Reference Manual Supplement,
+ 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
+
+ The Crynwr 3c503 packet driver.
+
+ Changelog:
+
+ Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
+ Paul Gortmaker : multiple card support for module users.
+ rjohnson@analogic.com : Fix up PIO interface for efficient operation.
+ Jeff Garzik : ethtool support
+
+*/
+
+#define DRV_NAME "3c503"
+#define DRV_VERSION "1.10a"
+#define DRV_RELDATE "11/17/2001"
+
+
+static const char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+
+#include "8390.h"
+#include "3c503.h"
+#define WRD_COUNT 4
+
+static int el2_pio_probe(struct net_device *dev);
+static int el2_probe1(struct net_device *dev, int ioaddr);
+
+/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
+static unsigned int netcard_portlist[] __initdata =
+ { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
+
+#define EL2_IO_EXTENT 16
+
+static int el2_open(struct net_device *dev);
+static int el2_close(struct net_device *dev);
+static void el2_reset_8390(struct net_device *dev);
+static void el2_init_card(struct net_device *dev);
+static void el2_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset);
+static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static struct ethtool_ops netdev_ethtool_ops;
+
+
+/* This routine probes for a memory-mapped 3c503 board by looking for
+ the "location register" at the end of the jumpered boot PROM space.
+ This works even if a PROM isn't there.
+
+ If the ethercard isn't found there is an optional probe for
+ ethercard jumpered to programmed-I/O mode.
+ */
+static int __init do_el2_probe(struct net_device *dev)
+{
+ int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ SET_MODULE_OWNER(dev);
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (addr = addrs; *addr; addr++) {
+ void __iomem *p = ioremap(*addr, 1);
+ unsigned base_bits;
+ int i;
+
+ if (!p)
+ continue;
+ base_bits = readb(p);
+ iounmap(p);
+ i = ffs(base_bits) - 1;
+ if (i == -1 || base_bits != (1 << i))
+ continue;
+ if (el2_probe1(dev, netcard_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+#if ! defined(no_probe_nonshared_memory)
+ return el2_pio_probe(dev);
+#else
+ return -ENODEV;
+#endif
+}
+
+/* Try all of the locations that aren't obviously empty. This touches
+ a lot of locations, and is much riskier than the code above. */
+static int __init
+el2_pio_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ if (el2_probe1(dev, netcard_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: el2_close() handles free_irq */
+ release_region(dev->base_addr, EL2_IO_EXTENT);
+ if (ei_status.mem)
+ iounmap(ei_status.mem);
+}
+
+#ifndef MODULE
+struct net_device * __init el2_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_el2_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+/* Probe for the Etherlink II card at I/O port base IOADDR,
+ returning non-zero on success. If found, set the station
+ address and memory parameters in DEVICE. */
+static int __init
+el2_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, iobase_reg, membase_reg, saved_406, wordlength, retval;
+ static unsigned version_printed;
+ unsigned long vendor_id;
+
+ if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Reset and/or avoid any lurking NE2000 */
+ if (inb(ioaddr + 0x408) == 0xff) {
+ mdelay(1);
+ retval = -ENODEV;
+ goto out1;
+ }
+
+ /* We verify that it's a 3C503 board by checking the first three octets
+ of its ethernet address. */
+ iobase_reg = inb(ioaddr+0x403);
+ membase_reg = inb(ioaddr+0x404);
+ /* ASIC location registers should be 0 or have only a single bit set. */
+ if ( (iobase_reg & (iobase_reg - 1))
+ || (membase_reg & (membase_reg - 1))) {
+ retval = -ENODEV;
+ goto out1;
+ }
+ saved_406 = inb_p(ioaddr + 0x406);
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
+ outb_p(ECNTRL_THIN, ioaddr + 0x406);
+ /* Map the station addr PROM into the lower I/O ports. We now check
+ for both the old and new 3Com prefix */
+ outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
+ vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
+ if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
+ /* Restore the register we frobbed. */
+ outb(saved_406, ioaddr + 0x406);
+ retval = -ENODEV;
+ goto out1;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ dev->base_addr = ioaddr;
+
+ printk("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Map the 8390 back into the window. */
+ outb(ECNTRL_THIN, ioaddr + 0x406);
+
+ /* Check for EL2/16 as described in tech. man. */
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+ outb_p(0, ioaddr + EN0_DCFG);
+ outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
+ wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+
+ /* Probe for, turn on and clear the board's shared memory. */
+ if (ei_debug > 2) printk(" memory jumpers %2.2x ", membase_reg);
+ outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
+
+ /* This should be probed for (or set via an ioctl()) at run-time.
+ Right now we use a sleazy hack to pass in the interface number
+ at boot-time via the low bits of the mem_end field. That value is
+ unused, and the low bits would be discarded even if it was used. */
+#if defined(EI8390_THICK) || defined(EL2_AUI)
+ ei_status.interface_num = 1;
+#else
+ ei_status.interface_num = dev->mem_end & 0xf;
+#endif
+ printk(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
+
+ if ((membase_reg & 0xf0) == 0) {
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ ei_status.mem = NULL;
+ } else {
+ dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
+ ((membase_reg & 0xA0) ? 0x4000 : 0);
+#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
+ ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE);
+
+#ifdef EL2MEMTEST
+ /* This has never found an error, but someone might care.
+ Note that it only tests the 2nd 8kB on 16kB 3c503/16
+ cards between card addr. 0x2000 and 0x3fff. */
+ { /* Check the card's memory. */
+ void __iomem *mem_base = ei_status.mem;
+ unsigned int test_val = 0xbbadf00d;
+ writel(0xba5eba5e, mem_base);
+ for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
+ writel(test_val, mem_base + i);
+ if (readl(mem_base) != 0xba5eba5e
+ || readl(mem_base + i) != test_val) {
+ printk("3c503: memory failure or memory address conflict.\n");
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ iounmap(mem_base);
+ ei_status.mem = NULL;
+ break;
+ }
+ test_val += 0x55555555;
+ writel(0, mem_base + i);
+ }
+ }
+#endif /* EL2MEMTEST */
+
+ if (dev->mem_start)
+ dev->mem_end = dev->mem_start + EL2_MEMSIZE;
+
+ if (wordlength) { /* No Tx pages to skip over to get to Rx */
+ ei_status.priv = 0;
+ ei_status.name = "3c503/16";
+ } else {
+ ei_status.priv = TX_PAGES * 256;
+ ei_status.name = "3c503";
+ }
+ }
+
+ /*
+ Divide up the memory on the card. This is the same regardless of
+ whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
+ we use the entire 8k of bank1 for an Rx ring. We only use 3k
+ of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
+ (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
+ 5kB for an Rx ring. */
+
+ if (wordlength) {
+ ei_status.tx_start_page = EL2_MB0_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG;
+ } else {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ }
+
+ /* Finish setting the board's parameters. */
+ ei_status.stop_page = EL2_MB1_STOP_PG;
+ ei_status.word16 = wordlength;
+ ei_status.reset_8390 = &el2_reset_8390;
+ ei_status.get_8390_hdr = &el2_get_8390_hdr;
+ ei_status.block_input = &el2_block_input;
+ ei_status.block_output = &el2_block_output;
+
+ if (dev->irq == 2)
+ dev->irq = 9;
+ else if (dev->irq > 5 && dev->irq != 9) {
+ printk("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
+ dev->irq);
+ dev->irq = 0;
+ }
+
+ ei_status.saved_irq = dev->irq;
+
+ dev->open = &el2_open;
+ dev->stop = &el2_close;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ if (dev->mem_start)
+ printk("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
+ dev->name, ei_status.name, (wordlength+1)<<3,
+ dev->mem_start, dev->mem_end-1);
+
+ else
+ {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ printk("\n%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
+ dev->name, ei_status.name, (wordlength+1)<<3);
+ }
+ release_region(ioaddr + 0x400, 8);
+ return 0;
+out1:
+ release_region(ioaddr + 0x400, 8);
+out:
+ release_region(ioaddr, EL2_IO_EXTENT);
+ return retval;
+}
+
+static int
+el2_open(struct net_device *dev)
+{
+ int retval = -EAGAIN;
+
+ if (dev->irq < 2) {
+ int irqlist[] = {5, 9, 3, 4, 0};
+ int *irqp = irqlist;
+
+ outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
+ do {
+ if (request_irq (*irqp, NULL, 0, "bogus", dev) != -EBUSY) {
+ /* Twinkle the interrupt, and check if it's seen. */
+ unsigned long cookie = probe_irq_on();
+ outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
+ outb_p(0x00, E33G_IDCFR);
+ if (*irqp == probe_irq_off(cookie) /* It's a good IRQ line! */
+ && ((retval = request_irq(dev->irq = *irqp,
+ ei_interrupt, 0, dev->name, dev)) == 0))
+ break;
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+ return retval;
+ }
+ } else {
+ if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
+ return retval;
+ }
+ }
+
+ el2_init_card(dev);
+ ei_open(dev);
+ return 0;
+}
+
+static int
+el2_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ dev->irq = ei_status.saved_irq;
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+
+ ei_close(dev);
+ return 0;
+}
+
+/* This is called whenever we have a unrecoverable failure:
+ transmit timeout
+ Bad ring buffer packet header
+ */
+static void
+el2_reset_8390(struct net_device *dev)
+{
+ if (ei_debug > 1) {
+ printk("%s: Resetting the 3c503 board...", dev->name);
+ printk("%#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
+ E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
+ }
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
+ ei_status.txing = 0;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ el2_init_card(dev);
+ if (ei_debug > 1) printk("done\n");
+}
+
+/* Initialize the 3c503 GA registers after a reset. */
+static void
+el2_init_card(struct net_device *dev)
+{
+ /* Unmap the station PROM and select the DIX or BNC connector. */
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+
+ /* Set ASIC copy of rx's first and last+1 buffer pages */
+ /* These must be the same as in the 8390. */
+ outb(ei_status.rx_start_page, E33G_STARTPG);
+ outb(ei_status.stop_page, E33G_STOPPG);
+
+ /* Point the vector pointer registers somewhere ?harmless?. */
+ outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
+ outb(0xff, E33G_VP1);
+ outb(0x00, E33G_VP0);
+ /* Turn off all interrupts until we're opened. */
+ outb_p(0x00, dev->base_addr + EN0_IMR);
+ /* Enable IRQs iff started. */
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ /* Set the interrupt line. */
+ outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
+ outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */
+ outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
+ outb_p(0x00, E33G_DMAAL);
+ return; /* We always succeed */
+}
+
+/*
+ * Either use the shared memory (if enabled on the board) or put the packet
+ * out through the ASIC FIFO.
+ */
+static void
+el2_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ unsigned short int *wrd;
+ int boguscount; /* timeout counter */
+ unsigned short word; /* temporary for better machine code */
+ void __iomem *base = ei_status.mem;
+
+ if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
+ outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
+ else
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ if (base) { /* Shared memory transfer */
+ memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8),
+ buf, count);
+ outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
+ return;
+ }
+
+/*
+ * No shared memory, put the packet out the other way.
+ * Set up then start the internal memory transfer to Tx Start Page
+ */
+
+ word = (unsigned short)start_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I am going to write data to the FIFO as quickly as possible.
+ * Note that E33G_FIFOH is defined incorrectly. It is really
+ * E33G_FIFOL, the lowest port address for both the byte and
+ * word write. Variable 'count' is NOT checked. Caller must supply a
+ * valid count. Note that I may write a harmless extra byte to the
+ * 8390 if the byte-count was not even.
+ */
+ wrd = (unsigned short int *) buf;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_block_output.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ outsw(E33G_FIFOH, wrd, WRD_COUNT);
+ wrd += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ outsw(E33G_FIFOH, wrd, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ return;
+}
+
+/* Read the 4 byte, page aligned 8390 specific header. */
+static void
+el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int boguscount;
+ void __iomem *base = ei_status.mem;
+ unsigned short word;
+
+ if (base) { /* Use the shared memory. */
+ void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+
+ word = (unsigned short)ring_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
+ memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1);
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+
+static void
+el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int boguscount = 0;
+ void __iomem *base = ei_status.mem;
+ unsigned short int *buf;
+ unsigned short word;
+
+ /* Maybe enable shared memory just be to be safe... nahh.*/
+ if (base) { /* Use the shared memory. */
+ ring_offset -= (EL2_MB1_START_PG<<8);
+ if (ring_offset + count > EL2_MEMSIZE) {
+ /* We must wrap the input move. */
+ int semi_count = EL2_MEMSIZE - ring_offset;
+ memcpy_fromio(skb->data, base + ring_offset, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, base + ring_offset, count, 0);
+ }
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+ word = (unsigned short) ring_offset;
+ outb(word>>8, E33G_DMAAH);
+ outb(word&0xFF, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I also try to get data as fast as possible. I am betting that I
+ * can read one extra byte without clobbering anything in the kernel because
+ * this would only occur on an odd byte-count and allocation of skb->data
+ * is word-aligned. Variable 'count' is NOT checked. Caller must check
+ * for a valid count.
+ * [This is currently quite safe.... but if one day the 3c503 explodes
+ * you know where to come looking ;)]
+ */
+
+ buf = (unsigned short int *) skb->data;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_block_input.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ insw(E33G_FIFOH, buf, WRD_COUNT);
+ buf += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ insw(E33G_FIFOH, buf, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ return;
+}
+
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+#ifdef MODULE
+#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
+
+static struct net_device *dev_el2[MAX_EL2_CARDS];
+static int io[MAX_EL2_CARDS];
+static int irq[MAX_EL2_CARDS];
+static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(xcvr, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
+MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "3c503.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ if (do_el2_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_el2[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct net_device *dev = dev_el2[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/3c503.h b/drivers/net/3c503.h
new file mode 100644
index 000000000000..b9f8a46f89b3
--- /dev/null
+++ b/drivers/net/3c503.h
@@ -0,0 +1,91 @@
+/* Definitions for the 3Com 3c503 Etherlink 2. */
+/* This file is distributed under the GPL.
+ Many of these names and comments are directly from the Crynwr packet
+ drivers, which are released under the GPL. */
+
+#define EL2H (dev->base_addr + 0x400)
+#define EL2L (dev->base_addr)
+
+/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
+ out of available addresses on the first one... */
+
+#define OLD_3COM_ID 0x02608c
+#define NEW_3COM_ID 0x0020af
+
+/* Shared memory management parameters. NB: The 8 bit cards have only
+ one bank (MB1) which serves both Tx and Rx packet space. The 16bit
+ cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
+ You choose which bank appears in the sh. mem window with EGACFR_MBSn */
+
+#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
+#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
+#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
+
+/* 3Com 3c503 ASIC registers */
+#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
+#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
+#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
+#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
+ /* (non-useful, but it also appears at the end of EPROM space) */
+#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
+#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
+#define E33G_CNTRL (EL2H+6) /* Board's main control register */
+#define E33G_STATUS (EL2H+7) /* Status on completions. */
+#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
+ /* (Which IRQ to assert, DMA chan to use) */
+#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
+#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
+/* "Vector pointer" - if this address matches a read, the EPROM (rather than
+ shared RAM) is mapped into memory space. */
+#define E33G_VP2 (EL2H+11)
+#define E33G_VP1 (EL2H+12)
+#define E33G_VP0 (EL2H+13)
+#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
+#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
+
+/* Bits in E33G_CNTRL register: */
+
+#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
+#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
+#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
+#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
+#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
+#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
+#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
+#define ECNTRL_START (0x80) /* Start the DMA logic */
+
+/* Bits in E33G_STATUS register: */
+
+#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
+#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
+#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
+#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
+#define ESTAT_DIP (0x08) /* DMA In Progress */
+
+/* Bits in E33G_GACFR register: */
+
+#define EGACFR_NIM (0x80) /* NIC interrupt mask */
+#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
+#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
+#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
+#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
+#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
+
+#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
+#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
+
+/*
+ MBS2 MBS1 MBS0 Sh. mem windows card mem at:
+ ---- ---- ---- -----------------------------
+ 0 0 0 0x0000 -- bank 0
+ 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
+ 0 1 0 0x4000 -- bank 2, not used
+ 0 1 1 0x6000 -- bank 3, not used
+
+There was going to be a 32k card that used bank 2 and 3, but it
+never got produced.
+
+*/
+
+
+/* End of 3C503 parameter definitions */
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
new file mode 100644
index 000000000000..76fa8cc24085
--- /dev/null
+++ b/drivers/net/3c505.c
@@ -0,0 +1,1690 @@
+/*
+ * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505)
+ * By Craig Southeren, Juha Laiho and Philip Blundell
+ *
+ * 3c505.c This module implements an interface to the 3Com
+ * Etherlink Plus (3c505) Ethernet card. Linux device
+ * driver interface reverse engineered from the Linux 3C509
+ * device drivers. Some 3C505 information gleaned from
+ * the Crynwr packet driver. Still this driver would not
+ * be here without 3C505 technical reference provided by
+ * 3Com.
+ *
+ * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $
+ *
+ * Authors: Linux 3c505 device driver by
+ * Craig Southeren, <craigs@ineluki.apana.org.au>
+ * Final debugging by
+ * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
+ * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
+ * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
+ * Linux 3C509 driver by
+ * Donald Becker, <becker@super.org>
+ * (Now at <becker@scyld.com>)
+ * Crynwr packet driver by
+ * Krishnan Gopalan and Gregg Stefancik,
+ * Clemson University Engineering Computer Operations.
+ * Portions of the code have been adapted from the 3c505
+ * driver for NCSA Telnet by Bruce Orchard and later
+ * modified by Warren Van Houten and krus@diku.dk.
+ * 3C505 technical information provided by
+ * Terry Murphy, of 3Com Network Adapter Division
+ * Linux 1.3.0 changes by
+ * Alan Cox <Alan.Cox@linux.org>
+ * More debugging, DMA support, currently maintained by
+ * Philip Blundell <philb@gnu.org>
+ * Multicard/soft configurable dma channel/rev 2 hardware support
+ * by Christopher Collins <ccollins@pcug.org.au>
+ * Ethtool support (jgarzik), 11/17/2001
+ */
+
+#define DRV_NAME "3c505"
+#define DRV_VERSION "1.10a"
+
+
+/* Theory of operation:
+ *
+ * The 3c505 is quite an intelligent board. All communication with it is done
+ * by means of Primary Command Blocks (PCBs); these are transferred using PIO
+ * through the command register. The card has 256k of on-board RAM, which is
+ * used to buffer received packets. It might seem at first that more buffers
+ * are better, but in fact this isn't true. From my tests, it seems that
+ * more than about 10 buffers are unnecessary, and there is a noticeable
+ * performance hit in having more active on the card. So the majority of the
+ * card's memory isn't, in fact, used. Sadly, the card only has one transmit
+ * buffer and, short of loading our own firmware into it (which is what some
+ * drivers resort to) there's nothing we can do about this.
+ *
+ * We keep up to 4 "receive packet" commands active on the board at a time.
+ * When a packet comes in, so long as there is a receive command active, the
+ * board will send us a "packet received" PCB and then add the data for that
+ * packet to the DMA queue. If a DMA transfer is not already in progress, we
+ * set one up to start uploading the data. We have to maintain a list of
+ * backlogged receive packets, because the card may decide to tell us about
+ * a newly-arrived packet at any time, and we may not be able to start a DMA
+ * transfer immediately (ie one may already be going on). We can't NAK the
+ * PCB, because then it would throw the packet away.
+ *
+ * Trying to send a PCB to the card at the wrong moment seems to have bad
+ * effects. If we send it a transmit PCB while a receive DMA is happening,
+ * it will just NAK the PCB and so we will have wasted our time. Worse, it
+ * sometimes seems to interrupt the transfer. The majority of the low-level
+ * code is protected by one huge semaphore -- "busy" -- which is set whenever
+ * it probably isn't safe to do anything to the card. The receive routine
+ * must gain a lock on "busy" before it can start a DMA transfer, and the
+ * transmit routine must gain a lock before it sends the first PCB to the card.
+ * The send_pcb() routine also has an internal semaphore to protect it against
+ * being re-entered (which would be disastrous) -- this is needed because
+ * several things can happen asynchronously (re-priming the receiver and
+ * asking the card for statistics, for example). send_pcb() will also refuse
+ * to talk to the card at all if a DMA upload is happening. The higher-level
+ * networking code will reschedule a later retry if some part of the driver
+ * is blocked. In practice, this doesn't seem to happen very often.
+ */
+
+/* This driver may now work with revision 2.x hardware, since all the read
+ * operations on the HCR have been removed (we now keep our own softcopy).
+ * But I don't have an old card to test it on.
+ *
+ * This has had the bad effect that the autoprobe routine is now a bit
+ * less friendly to other devices. However, it was never very good.
+ * before, so I doubt it will hurt anybody.
+ */
+
+/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly
+ * to make it more reliable, and secondly to add DMA mode. Many things could
+ * probably be done better; the concurrency protection is particularly awful.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+
+#include "3c505.h"
+
+/*********************************************************
+ *
+ * define debug messages here as common strings to reduce space
+ *
+ *********************************************************/
+
+static const char filename[] = __FILE__;
+
+static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n";
+#define TIMEOUT_MSG(lineno) \
+ printk(timeout_msg, filename,__FUNCTION__,(lineno))
+
+static const char invalid_pcb_msg[] =
+"*** invalid pcb length %d at %s:%s (line %d) ***\n";
+#define INVALID_PCB_MSG(len) \
+ printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__)
+
+static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x...";
+
+static char stilllooking_msg[] __initdata = "still looking...";
+
+static char found_msg[] __initdata = "found.\n";
+
+static char notfound_msg[] __initdata = "not found (reason = %d)\n";
+
+static char couldnot_msg[] __initdata = KERN_INFO "%s: 3c505 not found\n";
+
+/*********************************************************
+ *
+ * various other debug stuff
+ *
+ *********************************************************/
+
+#ifdef ELP_DEBUG
+static int elp_debug = ELP_DEBUG;
+#else
+static int elp_debug;
+#endif
+#define debug elp_debug
+
+/*
+ * 0 = no messages (well, some)
+ * 1 = messages when high level commands performed
+ * 2 = messages when low level commands performed
+ * 3 = messages when interrupts received
+ */
+
+/*****************************************************************
+ *
+ * useful macros
+ *
+ *****************************************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+
+/*****************************************************************
+ *
+ * List of I/O-addresses we try to auto-sense
+ * Last element MUST BE 0!
+ *****************************************************************/
+
+static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0};
+
+/* Dma Memory related stuff */
+
+static unsigned long dma_mem_alloc(int size)
+{
+ int order = get_order(size);
+ return __get_dma_pages(GFP_KERNEL, order);
+}
+
+
+/*****************************************************************
+ *
+ * Functions for I/O (note the inline !)
+ *
+ *****************************************************************/
+
+static inline unsigned char inb_status(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_STATUS);
+}
+
+static inline int inb_command(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_COMMAND);
+}
+
+static inline void outb_control(unsigned char val, struct net_device *dev)
+{
+ outb(val, dev->base_addr + PORT_CONTROL);
+ ((elp_device *)(dev->priv))->hcr_val = val;
+}
+
+#define HCR_VAL(x) (((elp_device *)((x)->priv))->hcr_val)
+
+static inline void outb_command(unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr + PORT_COMMAND);
+}
+
+static inline unsigned int backlog_next(unsigned int n)
+{
+ return (n + 1) % BACKLOG_SIZE;
+}
+
+/*****************************************************************
+ *
+ * useful functions for accessing the adapter
+ *
+ *****************************************************************/
+
+/*
+ * use this routine when accessing the ASF bits as they are
+ * changed asynchronously by the adapter
+ */
+
+/* get adapter PCB status */
+#define GET_ASF(addr) \
+ (get_status(addr)&ASF_PCB_MASK)
+
+static inline int get_status(unsigned int base_addr)
+{
+ unsigned long timeout = jiffies + 10*HZ/100;
+ register int stat1;
+ do {
+ stat1 = inb_status(base_addr);
+ } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ return stat1;
+}
+
+static inline void set_hsf(struct net_device *dev, int hsf)
+{
+ elp_device *adapter = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static int start_receive(struct net_device *, pcb_struct *);
+
+inline static void adapter_reset(struct net_device *dev)
+{
+ unsigned long timeout;
+ elp_device *adapter = dev->priv;
+ unsigned char orig_hcr = adapter->hcr_val;
+
+ outb_control(0, dev);
+
+ if (inb_status(dev->base_addr) & ACRF) {
+ do {
+ inb_command(dev->base_addr);
+ timeout = jiffies + 2*HZ/100;
+ while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF));
+ } while (inb_status(dev->base_addr) & ACRF);
+ set_hsf(dev, HSF_PCB_NAK);
+ }
+ outb_control(adapter->hcr_val | ATTN | DIR, dev);
+ mdelay(10);
+ outb_control(adapter->hcr_val & ~ATTN, dev);
+ mdelay(10);
+ outb_control(adapter->hcr_val | FLSH, dev);
+ mdelay(10);
+ outb_control(adapter->hcr_val & ~FLSH, dev);
+ mdelay(10);
+
+ outb_control(orig_hcr, dev);
+ if (!start_receive(dev, &adapter->tx_pcb))
+ printk(KERN_ERR "%s: start receive command failed \n", dev->name);
+}
+
+/* Check to make sure that a DMA transfer hasn't timed out. This should
+ * never happen in theory, but seems to occur occasionally if the card gets
+ * prodded at the wrong time.
+ */
+static inline void check_3c505_dma(struct net_device *dev)
+{
+ elp_device *adapter = dev->priv;
+ if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
+ unsigned long flags, f;
+ printk(KERN_ERR "%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma));
+ spin_lock_irqsave(&adapter->lock, flags);
+ adapter->dmaing = 0;
+ adapter->busy = 0;
+
+ f=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(f);
+
+ if (adapter->rx_active)
+ adapter->rx_active--;
+ outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ }
+}
+
+/* Primitive functions used by send_pcb() */
+static inline unsigned int send_pcb_slow(unsigned int base_addr, unsigned char byte)
+{
+ unsigned long timeout;
+ outb_command(byte, base_addr);
+ for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
+ if (inb_status(base_addr) & HCRE)
+ return FALSE;
+ }
+ printk(KERN_WARNING "3c505: send_pcb_slow timed out\n");
+ return TRUE;
+}
+
+static inline unsigned int send_pcb_fast(unsigned int base_addr, unsigned char byte)
+{
+ unsigned int timeout;
+ outb_command(byte, base_addr);
+ for (timeout = 0; timeout < 40000; timeout++) {
+ if (inb_status(base_addr) & HCRE)
+ return FALSE;
+ }
+ printk(KERN_WARNING "3c505: send_pcb_fast timed out\n");
+ return TRUE;
+}
+
+/* Check to see if the receiver needs restarting, and kick it if so */
+static inline void prime_rx(struct net_device *dev)
+{
+ elp_device *adapter = dev->priv;
+ while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) {
+ if (!start_receive(dev, &adapter->itx_pcb))
+ break;
+ }
+}
+
+/*****************************************************************
+ *
+ * send_pcb
+ * Send a PCB to the adapter.
+ *
+ * output byte to command reg --<--+
+ * wait until HCRE is non zero |
+ * loop until all bytes sent -->--+
+ * set HSF1 and HSF2 to 1
+ * output pcb length
+ * wait until ASF give ACK or NAK
+ * set HSF1 and HSF2 to 0
+ *
+ *****************************************************************/
+
+/* This can be quite slow -- the adapter is allowed to take up to 40ms
+ * to respond to the initial interrupt.
+ *
+ * We run initially with interrupts turned on, but with a semaphore set
+ * so that nobody tries to re-enter this code. Once the first byte has
+ * gone through, we turn interrupts off and then send the others (the
+ * timeout is reduced to 500us).
+ */
+
+static int send_pcb(struct net_device *dev, pcb_struct * pcb)
+{
+ int i;
+ unsigned long timeout;
+ elp_device *adapter = dev->priv;
+ unsigned long flags;
+
+ check_3c505_dma(dev);
+
+ if (adapter->dmaing && adapter->current_dma.direction == 0)
+ return FALSE;
+
+ /* Avoid contention */
+ if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) {
+ if (elp_debug >= 3) {
+ printk(KERN_DEBUG "%s: send_pcb entered while threaded\n", dev->name);
+ }
+ return FALSE;
+ }
+ /*
+ * load each byte into the command register and
+ * wait for the HCRE bit to indicate the adapter
+ * had read the byte
+ */
+ set_hsf(dev, 0);
+
+ if (send_pcb_slow(dev->base_addr, pcb->command))
+ goto abort;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ if (send_pcb_fast(dev->base_addr, pcb->length))
+ goto sti_abort;
+
+ for (i = 0; i < pcb->length; i++) {
+ if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
+ goto sti_abort;
+ }
+
+ outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */
+ outb_command(2 + pcb->length, dev->base_addr);
+
+ /* now wait for the acknowledgement */
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
+ switch (GET_ASF(dev->base_addr)) {
+ case ASF_PCB_ACK:
+ adapter->send_pcb_semaphore = 0;
+ return TRUE;
+
+ case ASF_PCB_NAK:
+#ifdef ELP_DEBUG
+ printk(KERN_DEBUG "%s: send_pcb got NAK\n", dev->name);
+#endif
+ goto abort;
+ }
+ }
+
+ if (elp_debug >= 1)
+ printk(KERN_DEBUG "%s: timeout waiting for PCB acknowledge (status %02x)\n", dev->name, inb_status(dev->base_addr));
+ goto abort;
+
+ sti_abort:
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ abort:
+ adapter->send_pcb_semaphore = 0;
+ return FALSE;
+}
+
+
+/*****************************************************************
+ *
+ * receive_pcb
+ * Read a PCB from the adapter
+ *
+ * wait for ACRF to be non-zero ---<---+
+ * input a byte |
+ * if ASF1 and ASF2 were not both one |
+ * before byte was read, loop --->---+
+ * set HSF1 and HSF2 for ack
+ *
+ *****************************************************************/
+
+static int receive_pcb(struct net_device *dev, pcb_struct * pcb)
+{
+ int i, j;
+ int total_length;
+ int stat;
+ unsigned long timeout;
+ unsigned long flags;
+
+ elp_device *adapter = dev->priv;
+
+ set_hsf(dev, 0);
+
+ /* get the command code */
+ timeout = jiffies + 2*HZ/100;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+ pcb->command = inb_command(dev->base_addr);
+
+ /* read the data length */
+ timeout = jiffies + 3*HZ/100;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ printk(KERN_INFO "%s: status %02x\n", dev->name, stat);
+ return FALSE;
+ }
+ pcb->length = inb_command(dev->base_addr);
+
+ if (pcb->length > MAX_PCB_DATA) {
+ INVALID_PCB_MSG(pcb->length);
+ adapter_reset(dev);
+ return FALSE;
+ }
+ /* read the data */
+ spin_lock_irqsave(&adapter->lock, flags);
+ i = 0;
+ do {
+ j = 0;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && j++ < 20000);
+ pcb->data.raw[i++] = inb_command(dev->base_addr);
+ if (i > MAX_PCB_DATA)
+ INVALID_PCB_MSG(i);
+ } while ((stat & ASF_PCB_MASK) != ASF_PCB_END && j < 20000);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ if (j >= 20000) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+ /* woops, the last "data" byte was really the length! */
+ total_length = pcb->data.raw[--i];
+
+ /* safety check total length vs data length */
+ if (total_length != (pcb->length + 2)) {
+ if (elp_debug >= 2)
+ printk(KERN_WARNING "%s: mangled PCB received\n", dev->name);
+ set_hsf(dev, HSF_PCB_NAK);
+ return FALSE;
+ }
+
+ if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
+ if (test_and_set_bit(0, (void *) &adapter->busy)) {
+ if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
+ set_hsf(dev, HSF_PCB_NAK);
+ printk(KERN_WARNING "%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
+ pcb->command = 0;
+ return TRUE;
+ } else {
+ pcb->command = 0xff;
+ }
+ }
+ }
+ set_hsf(dev, HSF_PCB_ACK);
+ return TRUE;
+}
+
+/******************************************************
+ *
+ * queue a receive command on the adapter so we will get an
+ * interrupt when a packet is received.
+ *
+ ******************************************************/
+
+static int start_receive(struct net_device *dev, pcb_struct * tx_pcb)
+{
+ int status;
+ elp_device *adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: restarting receiver\n", dev->name);
+ tx_pcb->command = CMD_RECEIVE_PACKET;
+ tx_pcb->length = sizeof(struct Rcv_pkt);
+ tx_pcb->data.rcv_pkt.buf_seg
+ = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
+ tx_pcb->data.rcv_pkt.buf_len = 1600;
+ tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
+ status = send_pcb(dev, tx_pcb);
+ if (status)
+ adapter->rx_active++;
+ return status;
+}
+
+/******************************************************
+ *
+ * extract a packet from the adapter
+ * this routine is only called from within the interrupt
+ * service routine, so no cli/sti calls are needed
+ * note that the length is always assumed to be even
+ *
+ ******************************************************/
+
+static void receive_packet(struct net_device *dev, int len)
+{
+ int rlen;
+ elp_device *adapter = dev->priv;
+ void *target;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ rlen = (len + 1) & ~1;
+ skb = dev_alloc_skb(rlen + 2);
+
+ if (!skb) {
+ printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
+ target = adapter->dma_buffer;
+ adapter->current_dma.target = NULL;
+ /* FIXME: stats */
+ return;
+ }
+
+ skb_reserve(skb, 2);
+ target = skb_put(skb, rlen);
+ if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
+ adapter->current_dma.target = target;
+ target = adapter->dma_buffer;
+ } else {
+ adapter->current_dma.target = NULL;
+ }
+
+ /* if this happens, we die */
+ if (test_and_set_bit(0, (void *) &adapter->dmaing))
+ printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);
+
+ skb->dev = dev;
+ adapter->current_dma.direction = 0;
+ adapter->current_dma.length = rlen;
+ adapter->current_dma.skb = skb;
+ adapter->current_dma.start_time = jiffies;
+
+ outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x04); /* dma read */
+ set_dma_addr(dev->dma, isa_virt_to_bus(target));
+ set_dma_count(dev->dma, rlen);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ if (elp_debug >= 3) {
+ printk(KERN_DEBUG "%s: rx DMA transfer started\n", dev->name);
+ }
+
+ if (adapter->rx_active)
+ adapter->rx_active--;
+
+ if (!adapter->busy)
+ printk(KERN_WARNING "%s: receive_packet called, busy not set.\n", dev->name);
+}
+
+/******************************************************
+ *
+ * interrupt handler
+ *
+ ******************************************************/
+
+static irqreturn_t elp_interrupt(int irq, void *dev_id, struct pt_regs *reg_ptr)
+{
+ int len;
+ int dlen;
+ int icount = 0;
+ struct net_device *dev;
+ elp_device *adapter;
+ unsigned long timeout;
+
+ dev = dev_id;
+ adapter = (elp_device *) dev->priv;
+
+ spin_lock(&adapter->lock);
+
+ do {
+ /*
+ * has a DMA transfer finished?
+ */
+ if (inb_status(dev->base_addr) & DONE) {
+ if (!adapter->dmaing) {
+ printk(KERN_WARNING "%s: phantom DMA completed\n", dev->name);
+ }
+ if (elp_debug >= 3) {
+ printk(KERN_DEBUG "%s: %s DMA complete, status %02x\n", dev->name, adapter->current_dma.direction ? "tx" : "rx", inb_status(dev->base_addr));
+ }
+
+ outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
+ if (adapter->current_dma.direction) {
+ dev_kfree_skb_irq(adapter->current_dma.skb);
+ } else {
+ struct sk_buff *skb = adapter->current_dma.skb;
+ if (skb) {
+ if (adapter->current_dma.target) {
+ /* have already done the skb_put() */
+ memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
+ }
+ skb->protocol = eth_type_trans(skb,dev);
+ adapter->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ }
+ adapter->dmaing = 0;
+ if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
+ int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
+ adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
+ if (elp_debug >= 2)
+ printk(KERN_DEBUG "%s: receiving backlogged packet (%d)\n", dev->name, t);
+ receive_packet(dev, t);
+ } else {
+ adapter->busy = 0;
+ }
+ } else {
+ /* has one timed out? */
+ check_3c505_dma(dev);
+ }
+
+ /*
+ * receive a PCB from the adapter
+ */
+ timeout = jiffies + 3*HZ/100;
+ while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) {
+ if (receive_pcb(dev, &adapter->irx_pcb)) {
+ switch (adapter->irx_pcb.command)
+ {
+ case 0:
+ break;
+ /*
+ * received a packet - this must be handled fast
+ */
+ case 0xff:
+ case CMD_RECEIVE_PACKET_COMPLETE:
+ /* if the device isn't open, don't pass packets up the stack */
+ if (!netif_running(dev))
+ break;
+ len = adapter->irx_pcb.data.rcv_resp.pkt_len;
+ dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
+ if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
+ printk(KERN_ERR "%s: interrupt - packet not received correctly\n", dev->name);
+ } else {
+ if (elp_debug >= 3) {
+ printk(KERN_DEBUG "%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen);
+ }
+ if (adapter->irx_pcb.command == 0xff) {
+ if (elp_debug >= 2)
+ printk(KERN_DEBUG "%s: adding packet to backlog (len = %d)\n", dev->name, dlen);
+ adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
+ adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
+ } else {
+ receive_packet(dev, dlen);
+ }
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: packet received\n", dev->name);
+ }
+ break;
+
+ /*
+ * 82586 configured correctly
+ */
+ case CMD_CONFIGURE_82586_RESPONSE:
+ adapter->got[CMD_CONFIGURE_82586] = 1;
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: interrupt - configure response received\n", dev->name);
+ break;
+
+ /*
+ * Adapter memory configuration
+ */
+ case CMD_CONFIGURE_ADAPTER_RESPONSE:
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: Adapter memory configuration %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Multicast list loading
+ */
+ case CMD_LOAD_MULTICAST_RESPONSE:
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: Multicast address list loading %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Station address setting
+ */
+ case CMD_SET_ADDRESS_RESPONSE:
+ adapter->got[CMD_SET_STATION_ADDRESS] = 1;
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: Ethernet address setting %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+
+ /*
+ * received board statistics
+ */
+ case CMD_NETWORK_STATISTICS_RESPONSE:
+ adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
+ adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
+ adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
+ adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
+ adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
+ adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
+ adapter->got[CMD_NETWORK_STATISTICS] = 1;
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name);
+ break;
+
+ /*
+ * sent a packet
+ */
+ case CMD_TRANSMIT_PACKET_COMPLETE:
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: interrupt - packet sent\n", dev->name);
+ if (!netif_running(dev))
+ break;
+ switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
+ case 0xffff:
+ adapter->stats.tx_aborted_errors++;
+ printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
+ break;
+ case 0xfffe:
+ adapter->stats.tx_fifo_errors++;
+ printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
+ break;
+ }
+ netif_wake_queue(dev);
+ break;
+
+ /*
+ * some unknown PCB
+ */
+ default:
+ printk(KERN_DEBUG "%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command);
+ break;
+ }
+ } else {
+ printk(KERN_WARNING "%s: failed to read PCB on interrupt\n", dev->name);
+ adapter_reset(dev);
+ }
+ }
+
+ } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));
+
+ prime_rx(dev);
+
+ /*
+ * indicate no longer in interrupt routine
+ */
+ spin_unlock(&adapter->lock);
+ return IRQ_HANDLED;
+}
+
+
+/******************************************************
+ *
+ * open the board
+ *
+ ******************************************************/
+
+static int elp_open(struct net_device *dev)
+{
+ elp_device *adapter;
+ int retval;
+
+ adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: request to open device\n", dev->name);
+
+ /*
+ * make sure we actually found the device
+ */
+ if (adapter == NULL) {
+ printk(KERN_ERR "%s: Opening a non-existent physical device\n", dev->name);
+ return -EAGAIN;
+ }
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0, dev);
+
+ /*
+ * clear any pending interrupts
+ */
+ inb_command(dev->base_addr);
+ adapter_reset(dev);
+
+ /*
+ * no receive PCBs active
+ */
+ adapter->rx_active = 0;
+
+ adapter->busy = 0;
+ adapter->send_pcb_semaphore = 0;
+ adapter->rx_backlog.in = 0;
+ adapter->rx_backlog.out = 0;
+
+ spin_lock_init(&adapter->lock);
+
+ /*
+ * install our interrupt service routine
+ */
+ if ((retval = request_irq(dev->irq, &elp_interrupt, 0, dev->name, dev))) {
+ printk(KERN_ERR "%s: could not allocate IRQ%d\n", dev->name, dev->irq);
+ return retval;
+ }
+ if ((retval = request_dma(dev->dma, dev->name))) {
+ free_irq(dev->irq, dev);
+ printk(KERN_ERR "%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
+ return retval;
+ }
+ adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
+ if (!adapter->dma_buffer) {
+ printk(KERN_ERR "%s: could not allocate DMA buffer\n", dev->name);
+ free_dma(dev->dma);
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+ adapter->dmaing = 0;
+
+ /*
+ * enable interrupts on the board
+ */
+ outb_control(CMDE, dev);
+
+ /*
+ * configure adapter memory: we need 10 multicast addresses, default==0
+ */
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: sending 3c505 memory configuration command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.data.memconf.cmd_q = 10;
+ adapter->tx_pcb.data.memconf.rcv_q = 20;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 20;
+ adapter->tx_pcb.data.memconf.rcv_b = 20;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ adapter->tx_pcb.length = sizeof(struct Memconf);
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk(KERN_ERR "%s: couldn't send memory configuration command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ }
+
+
+ /*
+ * configure adapter to receive broadcast messages and wait for response
+ */
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk(KERN_ERR "%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ }
+
+ /* enable burst-mode DMA */
+ /* outb(0x1, dev->base_addr + PORT_AUXDMA); */
+
+ /*
+ * queue receive commands to provide buffering
+ */
+ prime_rx(dev);
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
+
+ /*
+ * device is now officially open!
+ */
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+
+/******************************************************
+ *
+ * send a packet to the adapter
+ *
+ ******************************************************/
+
+static int send_packet(struct net_device *dev, struct sk_buff *skb)
+{
+ elp_device *adapter = dev->priv;
+ unsigned long target;
+ unsigned long flags;
+
+ /*
+ * make sure the length is even and no shorter than 60 bytes
+ */
+ unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);
+
+ if (test_and_set_bit(0, (void *) &adapter->busy)) {
+ if (elp_debug >= 2)
+ printk(KERN_DEBUG "%s: transmit blocked\n", dev->name);
+ return FALSE;
+ }
+
+ adapter->stats.tx_bytes += nlen;
+
+ /*
+ * send the adapter a transmit packet command. Ignore segment and offset
+ * and make sure the length is even
+ */
+ adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
+ adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
+ adapter->tx_pcb.data.xmit_pkt.buf_ofs
+ = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
+ adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
+
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ adapter->busy = 0;
+ return FALSE;
+ }
+ /* if this happens, we die */
+ if (test_and_set_bit(0, (void *) &adapter->dmaing))
+ printk(KERN_DEBUG "%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
+
+ adapter->current_dma.direction = 1;
+ adapter->current_dma.start_time = jiffies;
+
+ if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
+ memcpy(adapter->dma_buffer, skb->data, nlen);
+ memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
+ target = isa_virt_to_bus(adapter->dma_buffer);
+ }
+ else {
+ target = isa_virt_to_bus(skb->data);
+ }
+ adapter->current_dma.skb = skb;
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x48); /* dma memory -> io */
+ set_dma_addr(dev->dma, target);
+ set_dma_count(dev->dma, nlen);
+ outb_control(adapter->hcr_val | DMAE | TCEN, dev);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: DMA transfer started\n", dev->name);
+
+ return TRUE;
+}
+
+/*
+ * The upper layer thinks we timed out
+ */
+
+static void elp_timeout(struct net_device *dev)
+{
+ elp_device *adapter = dev->priv;
+ int stat;
+
+ stat = inb_status(dev->base_addr);
+ printk(KERN_WARNING "%s: transmit timed out, lost %s?\n", dev->name, (stat & ACRF) ? "interrupt" : "command");
+ if (elp_debug >= 1)
+ printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat);
+ dev->trans_start = jiffies;
+ adapter->stats.tx_dropped++;
+ netif_wake_queue(dev);
+}
+
+/******************************************************
+ *
+ * start the transmitter
+ * return 0 if sent OK, else return 1
+ *
+ ******************************************************/
+
+static int elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ elp_device *adapter = dev->priv;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ check_3c505_dma(dev);
+
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: request to send packet of length %d\n", dev->name, (int) skb->len);
+
+ netif_stop_queue(dev);
+
+ /*
+ * send the packet at skb->data for skb->len
+ */
+ if (!send_packet(dev, skb)) {
+ if (elp_debug >= 2) {
+ printk(KERN_DEBUG "%s: failed to transmit packet\n", dev->name);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ return 1;
+ }
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: packet of length %d sent\n", dev->name, (int) skb->len);
+
+ /*
+ * start the transmit timeout
+ */
+ dev->trans_start = jiffies;
+
+ prime_rx(dev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ netif_start_queue(dev);
+ return 0;
+}
+
+/******************************************************
+ *
+ * return statistics on the board
+ *
+ ******************************************************/
+
+static struct net_device_stats *elp_get_stats(struct net_device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: request for stats\n", dev->name);
+
+ /* If the device is closed, just return the latest stats we have,
+ - we cannot ask from the adapter without interrupts */
+ if (!netif_running(dev))
+ return &adapter->stats;
+
+ /* send a get statistics command to the board */
+ adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
+ adapter->tx_pcb.length = 0;
+ adapter->got[CMD_NETWORK_STATISTICS] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk(KERN_ERR "%s: couldn't send get statistics command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ return &adapter->stats;
+ }
+ }
+
+ /* statistics are now up to date */
+ return &adapter->stats;
+}
+
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ debug = level;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+/******************************************************
+ *
+ * close the board
+ *
+ ******************************************************/
+
+static int elp_close(struct net_device *dev)
+{
+ elp_device *adapter;
+
+ adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: request to close device\n", dev->name);
+
+ netif_stop_queue(dev);
+
+ /* Someone may request the device statistic information even when
+ * the interface is closed. The following will update the statistics
+ * structure in the driver, so we'll be able to give current statistics.
+ */
+ (void) elp_get_stats(dev);
+
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0, dev);
+
+ /*
+ * release the IRQ
+ */
+ free_irq(dev->irq, dev);
+
+ free_dma(dev->dma);
+ free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE));
+
+ return 0;
+}
+
+
+/************************************************************
+ *
+ * Set multicast list
+ * num_addrs==0: clear mc_list
+ * num_addrs==-1: set promiscuous mode
+ * num_addrs>0: set mc_list
+ *
+ ************************************************************/
+
+static void elp_set_mc_list(struct net_device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+ struct dev_mc_list *dmi = dev->mc_list;
+ int i;
+ unsigned long flags;
+
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: request to set multicast list\n", dev->name);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ /* send a "load multicast list" command to the board, max 10 addrs/cmd */
+ /* if num_addrs==0 the list will be cleared */
+ adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
+ adapter->tx_pcb.length = 6 * dev->mc_count;
+ for (i = 0; i < dev->mc_count; i++) {
+ memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
+ dmi = dmi->next;
+ }
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk(KERN_ERR "%s: couldn't send set_multicast command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ }
+ }
+ if (dev->mc_count)
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
+ else /* num_addrs == 0 */
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ } else
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
+ /*
+ * configure adapter to receive messages (as specified above)
+ * and wait for response
+ */
+ if (elp_debug >= 3)
+ printk(KERN_DEBUG "%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ {
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ printk(KERN_ERR "%s: couldn't send 82586 configure command\n", dev->name);
+ }
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ }
+}
+
+/************************************************************
+ *
+ * A couple of tests to see if there's 3C505 or not
+ * Called only by elp_autodetect
+ ************************************************************/
+
+static int __init elp_sense(struct net_device *dev)
+{
+ int addr = dev->base_addr;
+ const char *name = dev->name;
+ byte orig_HSR;
+
+ if (!request_region(addr, ELP_IO_EXTENT, "3c505"))
+ return -ENODEV;
+
+ orig_HSR = inb_status(addr);
+
+ if (elp_debug > 0)
+ printk(search_msg, name, addr);
+
+ if (orig_HSR == 0xff) {
+ if (elp_debug > 0)
+ printk(notfound_msg, 1);
+ goto out;
+ }
+
+ /* Wait for a while; the adapter may still be booting up */
+ if (elp_debug > 0)
+ printk(stilllooking_msg);
+
+ if (orig_HSR & DIR) {
+ /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
+ outb(0, dev->base_addr + PORT_CONTROL);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(30*HZ/100);
+ if (inb_status(addr) & DIR) {
+ if (elp_debug > 0)
+ printk(notfound_msg, 2);
+ goto out;
+ }
+ } else {
+ /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
+ outb(DIR, dev->base_addr + PORT_CONTROL);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(30*HZ/100);
+ if (!(inb_status(addr) & DIR)) {
+ if (elp_debug > 0)
+ printk(notfound_msg, 3);
+ goto out;
+ }
+ }
+ /*
+ * It certainly looks like a 3c505.
+ */
+ if (elp_debug > 0)
+ printk(found_msg);
+
+ return 0;
+out:
+ release_region(addr, ELP_IO_EXTENT);
+ return -ENODEV;
+}
+
+/*************************************************************
+ *
+ * Search through addr_list[] and try to find a 3C505
+ * Called only by eplus_probe
+ *************************************************************/
+
+static int __init elp_autodetect(struct net_device *dev)
+{
+ int idx = 0;
+
+ /* if base address set, then only check that address
+ otherwise, run through the table */
+ if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ } else
+ while ((dev->base_addr = addr_list[idx++])) {
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ }
+
+ /* could not find an adapter */
+ if (elp_debug > 0)
+ printk(couldnot_msg, dev->name);
+
+ return 0; /* Because of this, the layer above will return -ENODEV */
+}
+
+
+/******************************************************
+ *
+ * probe for an Etherlink Plus board at the specified address
+ *
+ ******************************************************/
+
+/* There are three situations we need to be able to detect here:
+
+ * a) the card is idle
+ * b) the card is still booting up
+ * c) the card is stuck in a strange state (some DOS drivers do this)
+ *
+ * In case (a), all is well. In case (b), we wait 10 seconds to see if the
+ * card finishes booting, and carry on if so. In case (c), we do a hard reset,
+ * loop round, and hope for the best.
+ *
+ * This is all very unpleasant, but hopefully avoids the problems with the old
+ * probe code (which had a 15-second delay if the card was idle, and didn't
+ * work at all if it was in a weird state).
+ */
+
+static int __init elplus_setup(struct net_device *dev)
+{
+ elp_device *adapter = dev->priv;
+ int i, tries, tries1, okay;
+ unsigned long timeout;
+ unsigned long cookie = 0;
+ int err = -ENODEV;
+
+ SET_MODULE_OWNER(dev);
+
+ /*
+ * setup adapter structure
+ */
+
+ dev->base_addr = elp_autodetect(dev);
+ if (!dev->base_addr)
+ return -ENODEV;
+
+ adapter->send_pcb_semaphore = 0;
+
+ for (tries1 = 0; tries1 < 3; tries1++) {
+ outb_control((adapter->hcr_val | CMDE) & ~DIR, dev);
+ /* First try to write just one byte, to see if the card is
+ * responding at all normally.
+ */
+ timeout = jiffies + 5*HZ/100;
+ okay = 0;
+ while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
+ if ((inb_status(dev->base_addr) & HCRE)) {
+ outb_command(0, dev->base_addr); /* send a spurious byte */
+ timeout = jiffies + 5*HZ/100;
+ while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
+ if (inb_status(dev->base_addr) & HCRE)
+ okay = 1;
+ }
+ if (!okay) {
+ /* Nope, it's ignoring the command register. This means that
+ * either it's still booting up, or it's died.
+ */
+ printk(KERN_ERR "%s: command register wouldn't drain, ", dev->name);
+ if ((inb_status(dev->base_addr) & 7) == 3) {
+ /* If the adapter status is 3, it *could* still be booting.
+ * Give it the benefit of the doubt for 10 seconds.
+ */
+ printk("assuming 3c505 still starting\n");
+ timeout = jiffies + 10*HZ;
+ while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7));
+ if (inb_status(dev->base_addr) & 7) {
+ printk(KERN_ERR "%s: 3c505 failed to start\n", dev->name);
+ } else {
+ okay = 1; /* It started */
+ }
+ } else {
+ /* Otherwise, it must just be in a strange
+ * state. We probably need to kick it.
+ */
+ printk("3c505 is sulking\n");
+ }
+ }
+ for (tries = 0; tries < 5 && okay; tries++) {
+
+ /*
+ * Try to set the Ethernet address, to make sure that the board
+ * is working.
+ */
+ adapter->tx_pcb.command = CMD_STATION_ADDRESS;
+ adapter->tx_pcb.length = 0;
+ cookie = probe_irq_on();
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ printk(KERN_ERR "%s: could not send first PCB\n", dev->name);
+ probe_irq_off(cookie);
+ continue;
+ }
+ if (!receive_pcb(dev, &adapter->rx_pcb)) {
+ printk(KERN_ERR "%s: could not read first PCB\n", dev->name);
+ probe_irq_off(cookie);
+ continue;
+ }
+ if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
+ (adapter->rx_pcb.length != 6)) {
+ printk(KERN_ERR "%s: first PCB wrong (%d, %d)\n", dev->name, adapter->rx_pcb.command, adapter->rx_pcb.length);
+ probe_irq_off(cookie);
+ continue;
+ }
+ goto okay;
+ }
+ /* It's broken. Do a hard reset to re-initialise the board,
+ * and try again.
+ */
+ printk(KERN_INFO "%s: resetting adapter\n", dev->name);
+ outb_control(adapter->hcr_val | FLSH | ATTN, dev);
+ outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev);
+ }
+ printk(KERN_ERR "%s: failed to initialise 3c505\n", dev->name);
+ goto out;
+
+ okay:
+ if (dev->irq) { /* Is there a preset IRQ? */
+ int rpt = probe_irq_off(cookie);
+ if (dev->irq != rpt) {
+ printk(KERN_WARNING "%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
+ }
+ /* if dev->irq == probe_irq_off(cookie), all is well */
+ } else /* No preset IRQ; just use what we can detect */
+ dev->irq = probe_irq_off(cookie);
+ switch (dev->irq) { /* Legal, sane? */
+ case 0:
+ printk(KERN_ERR "%s: IRQ probe failed: check 3c505 jumpers.\n",
+ dev->name);
+ goto out;
+ case 1:
+ case 6:
+ case 8:
+ case 13:
+ printk(KERN_ERR "%s: Impossible IRQ %d reported by probe_irq_off().\n",
+ dev->name, dev->irq);
+ goto out;
+ }
+ /*
+ * Now we have the IRQ number so we can disable the interrupts from
+ * the board until the board is opened.
+ */
+ outb_control(adapter->hcr_val & ~CMDE, dev);
+
+ /*
+ * copy Ethernet address into structure
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i];
+
+ /* find a DMA channel */
+ if (!dev->dma) {
+ if (dev->mem_start) {
+ dev->dma = dev->mem_start & 7;
+ }
+ else {
+ printk(KERN_WARNING "%s: warning, DMA channel not specified, using default\n", dev->name);
+ dev->dma = ELP_DMA;
+ }
+ }
+
+ /*
+ * print remainder of startup message
+ */
+ printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, ",
+ dev->name, dev->base_addr, dev->irq, dev->dma);
+ printk("addr %02x:%02x:%02x:%02x:%02x:%02x, ",
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /*
+ * read more information from the adapter
+ */
+
+ adapter->tx_pcb.command = CMD_ADAPTER_INFO;
+ adapter->tx_pcb.length = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
+ (adapter->rx_pcb.length != 10)) {
+ printk("not responding to second PCB\n");
+ }
+ printk("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
+
+ /*
+ * reconfigure the adapter memory to better suit our purposes
+ */
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.length = 12;
+ adapter->tx_pcb.data.memconf.cmd_q = 8;
+ adapter->tx_pcb.data.memconf.rcv_q = 8;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 10;
+ adapter->tx_pcb.data.memconf.rcv_b = 10;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
+ (adapter->rx_pcb.length != 2)) {
+ printk(KERN_ERR "%s: could not configure adapter memory\n", dev->name);
+ }
+ if (adapter->rx_pcb.data.configure) {
+ printk(KERN_ERR "%s: adapter configuration failed\n", dev->name);
+ }
+
+ dev->open = elp_open; /* local */
+ dev->stop = elp_close; /* local */
+ dev->get_stats = elp_get_stats; /* local */
+ dev->hard_start_xmit = elp_start_xmit; /* local */
+ dev->tx_timeout = elp_timeout; /* local */
+ dev->watchdog_timeo = 10*HZ;
+ dev->set_multicast_list = elp_set_mc_list; /* local */
+ dev->ethtool_ops = &netdev_ethtool_ops; /* local */
+
+ memset(&(adapter->stats), 0, sizeof(struct net_device_stats));
+ dev->mem_start = dev->mem_end = 0;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ return 0;
+out:
+ release_region(dev->base_addr, ELP_IO_EXTENT);
+ return err;
+}
+
+#ifndef MODULE
+struct net_device * __init elplus_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(elp_device));
+ int err;
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = elplus_setup(dev);
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ return dev;
+}
+
+#else
+static struct net_device *dev_3c505[ELP_MAX_CARDS];
+static int io[ELP_MAX_CARDS];
+static int irq[ELP_MAX_CARDS];
+static int dma[ELP_MAX_CARDS];
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
+MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
+ struct net_device *dev = alloc_etherdev(sizeof(elp_device));
+ if (!dev)
+ break;
+
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (dma[this_dev]) {
+ dev->dma = dma[this_dev];
+ } else {
+ dev->dma = ELP_DMA;
+ printk(KERN_WARNING "3c505.c: warning, using default DMA channel,\n");
+ }
+ if (io[this_dev] == 0) {
+ if (this_dev) {
+ free_netdev(dev);
+ break;
+ }
+ printk(KERN_NOTICE "3c505.c: module autoprobe not recommended, give io=xx.\n");
+ }
+ if (elplus_setup(dev) != 0) {
+ printk(KERN_WARNING "3c505.c: Failed to register card at 0x%x.\n", io[this_dev]);
+ free_netdev(dev);
+ break;
+ }
+ dev_3c505[this_dev] = dev;
+ found++;
+ }
+ if (!found)
+ return -ENODEV;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_3c505[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ release_region(dev->base_addr, ELP_IO_EXTENT);
+ free_netdev(dev);
+ }
+ }
+}
+
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/3c505.h b/drivers/net/3c505.h
new file mode 100644
index 000000000000..77dfeedff815
--- /dev/null
+++ b/drivers/net/3c505.h
@@ -0,0 +1,293 @@
+/*****************************************************************
+ *
+ * defines for 3Com Etherlink Plus adapter
+ *
+ *****************************************************************/
+
+#define ELP_DMA 6
+#define ELP_RX_PCBS 4
+#define ELP_MAX_CARDS 4
+
+/*
+ * I/O register offsets
+ */
+#define PORT_COMMAND 0x00 /* read/write, 8-bit */
+#define PORT_STATUS 0x02 /* read only, 8-bit */
+#define PORT_AUXDMA 0x02 /* write only, 8-bit */
+#define PORT_DATA 0x04 /* read/write, 16-bit */
+#define PORT_CONTROL 0x06 /* read/write, 8-bit */
+
+#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
+
+/*
+ * host control registers bits
+ */
+#define ATTN 0x80 /* attention */
+#define FLSH 0x40 /* flush data register */
+#define DMAE 0x20 /* DMA enable */
+#define DIR 0x10 /* direction */
+#define TCEN 0x08 /* terminal count interrupt enable */
+#define CMDE 0x04 /* command register interrupt enable */
+#define HSF2 0x02 /* host status flag 2 */
+#define HSF1 0x01 /* host status flag 1 */
+
+/*
+ * combinations of HSF flags used for PCB transmission
+ */
+#define HSF_PCB_ACK HSF1
+#define HSF_PCB_NAK HSF2
+#define HSF_PCB_END (HSF2|HSF1)
+#define HSF_PCB_MASK (HSF2|HSF1)
+
+/*
+ * host status register bits
+ */
+#define HRDY 0x80 /* data register ready */
+#define HCRE 0x40 /* command register empty */
+#define ACRF 0x20 /* adapter command register full */
+/* #define DIR 0x10 direction - same as in control register */
+#define DONE 0x08 /* DMA done */
+#define ASF3 0x04 /* adapter status flag 3 */
+#define ASF2 0x02 /* adapter status flag 2 */
+#define ASF1 0x01 /* adapter status flag 1 */
+
+/*
+ * combinations of ASF flags used for PCB reception
+ */
+#define ASF_PCB_ACK ASF1
+#define ASF_PCB_NAK ASF2
+#define ASF_PCB_END (ASF2|ASF1)
+#define ASF_PCB_MASK (ASF2|ASF1)
+
+/*
+ * host aux DMA register bits
+ */
+#define DMA_BRST 0x01 /* DMA burst */
+
+/*
+ * maximum amount of data allowed in a PCB
+ */
+#define MAX_PCB_DATA 62
+
+/*****************************************************************
+ *
+ * timeout value
+ * this is a rough value used for loops to stop them from
+ * locking up the whole machine in the case of failure or
+ * error conditions
+ *
+ *****************************************************************/
+
+#define TIMEOUT 300
+
+/*****************************************************************
+ *
+ * PCB commands
+ *
+ *****************************************************************/
+
+enum {
+ /*
+ * host PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
+ CMD_CONFIGURE_82586 = 0x02,
+ CMD_STATION_ADDRESS = 0x03,
+ CMD_DMA_DOWNLOAD = 0x04,
+ CMD_DMA_UPLOAD = 0x05,
+ CMD_PIO_DOWNLOAD = 0x06,
+ CMD_PIO_UPLOAD = 0x07,
+ CMD_RECEIVE_PACKET = 0x08,
+ CMD_TRANSMIT_PACKET = 0x09,
+ CMD_NETWORK_STATISTICS = 0x0a,
+ CMD_LOAD_MULTICAST_LIST = 0x0b,
+ CMD_CLEAR_PROGRAM = 0x0c,
+ CMD_DOWNLOAD_PROGRAM = 0x0d,
+ CMD_EXECUTE_PROGRAM = 0x0e,
+ CMD_SELF_TEST = 0x0f,
+ CMD_SET_STATION_ADDRESS = 0x10,
+ CMD_ADAPTER_INFO = 0x11,
+ NUM_TRANSMIT_CMDS,
+
+ /*
+ * adapter PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
+ CMD_CONFIGURE_82586_RESPONSE = 0x32,
+ CMD_ADDRESS_RESPONSE = 0x33,
+ CMD_DOWNLOAD_DATA_REQUEST = 0x34,
+ CMD_UPLOAD_DATA_REQUEST = 0x35,
+ CMD_RECEIVE_PACKET_COMPLETE = 0x38,
+ CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
+ CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
+ CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
+ CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
+ CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
+ CMD_EXECUTE_RESPONSE = 0x3e,
+ CMD_SELF_TEST_RESPONSE = 0x3f,
+ CMD_SET_ADDRESS_RESPONSE = 0x40,
+ CMD_ADAPTER_INFO_RESPONSE = 0x41
+};
+
+/* Definitions for the PCB data structure */
+
+/* Data units */
+typedef unsigned char byte;
+typedef unsigned short int word;
+typedef unsigned long int dword;
+
+/* Data structures */
+struct Memconf {
+ word cmd_q,
+ rcv_q,
+ mcast,
+ frame,
+ rcv_b,
+ progs;
+};
+
+struct Rcv_pkt {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ timeout;
+};
+
+struct Xmit_pkt {
+ word buf_ofs,
+ buf_seg,
+ pkt_len;
+};
+
+struct Rcv_resp {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ pkt_len,
+ timeout,
+ status;
+ dword timetag;
+};
+
+struct Xmit_resp {
+ word buf_ofs,
+ buf_seg,
+ c_stat,
+ status;
+};
+
+
+struct Netstat {
+ dword tot_recv,
+ tot_xmit;
+ word err_CRC,
+ err_align,
+ err_res,
+ err_ovrrun;
+};
+
+
+struct Selftest {
+ word error;
+ union {
+ word ROM_cksum;
+ struct {
+ word ofs, seg;
+ } RAM;
+ word i82586;
+ } failure;
+};
+
+struct Info {
+ byte minor_vers,
+ major_vers;
+ word ROM_cksum,
+ RAM_sz,
+ free_ofs,
+ free_seg;
+};
+
+struct Memdump {
+ word size,
+ off,
+ seg;
+};
+
+/*
+Primary Command Block. The most important data structure. All communication
+between the host and the adapter is done with these. (Except for the actual
+Ethernet data, which has different packaging.)
+*/
+typedef struct {
+ byte command;
+ byte length;
+ union {
+ struct Memconf memconf;
+ word configure;
+ struct Rcv_pkt rcv_pkt;
+ struct Xmit_pkt xmit_pkt;
+ byte multicast[10][6];
+ byte eth_addr[6];
+ byte failed;
+ struct Rcv_resp rcv_resp;
+ struct Xmit_resp xmit_resp;
+ struct Netstat netstat;
+ struct Selftest selftest;
+ struct Info info;
+ struct Memdump memdump;
+ byte raw[62];
+ } data;
+} pcb_struct;
+
+/* These defines for 'configure' */
+#define RECV_STATION 0x00
+#define RECV_BROAD 0x01
+#define RECV_MULTI 0x02
+#define RECV_PROMISC 0x04
+#define NO_LOOPBACK 0x00
+#define INT_LOOPBACK 0x08
+#define EXT_LOOPBACK 0x10
+
+/*****************************************************************
+ *
+ * structure to hold context information for adapter
+ *
+ *****************************************************************/
+
+#define DMA_BUFFER_SIZE 1600
+#define BACKLOG_SIZE 4
+
+typedef struct {
+ volatile short got[NUM_TRANSMIT_CMDS]; /* flags for
+ command completion */
+ pcb_struct tx_pcb; /* PCB for foreground sending */
+ pcb_struct rx_pcb; /* PCB for foreground receiving */
+ pcb_struct itx_pcb; /* PCB for background sending */
+ pcb_struct irx_pcb; /* PCB for background receiving */
+ struct net_device_stats stats;
+
+ void *dma_buffer;
+
+ struct {
+ unsigned int length[BACKLOG_SIZE];
+ unsigned int in;
+ unsigned int out;
+ } rx_backlog;
+
+ struct {
+ unsigned int direction;
+ unsigned int length;
+ struct sk_buff *skb;
+ void *target;
+ unsigned long start_time;
+ } current_dma;
+
+ /* flags */
+ unsigned long send_pcb_semaphore;
+ unsigned long dmaing;
+ unsigned long busy;
+
+ unsigned int rx_active; /* number of receive PCBs */
+ volatile unsigned char hcr_val; /* what we think the HCR contains */
+ spinlock_t lock; /* Interrupt v tx lock */
+} elp_device;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
new file mode 100644
index 000000000000..4db82893909c
--- /dev/null
+++ b/drivers/net/3c507.c
@@ -0,0 +1,965 @@
+/* 3c507.c: An EtherLink16 device driver for Linux. */
+/*
+ Written 1993,1994 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
+ and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
+ Mark Salazar <leslie@access.digex.net> made the changes for cards with
+ only 16K packet buffers.
+
+ Things remaining to do:
+ Verify that the tx and rx buffers don't have fencepost errors.
+ Move the theory of operation and memory map documentation.
+ The statistics need to be updated correctly.
+*/
+
+#define DRV_NAME "3c507"
+#define DRV_VERSION "1.10a"
+#define DRV_RELDATE "11/17/2001"
+
+static const char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
+
+/*
+ Sources:
+ This driver wouldn't have been written with the availability of the
+ Crynwr driver source code. It provided a known-working implementation
+ that filled in the gaping holes of the Intel documentation. Three cheers
+ for Russ Nelson.
+
+ Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
+ info that the casual reader might think that it documents the i82586 :-<.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/* use 0 for production, 1 for verification, 2..7 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+#define debug net_debug
+
+
+/*
+ Details of the i82586.
+
+ You'll really need the databook to understand the details of this part,
+ but the outline is that the i82586 has two separate processing units.
+ Both are started from a list of three configuration tables, of which only
+ the last, the System Control Block (SCB), is used after reset-time. The SCB
+ has the following fields:
+ Status word
+ Command word
+ Tx/Command block addr.
+ Rx block addr.
+ The command word accepts the following controls for the Tx and Rx units:
+ */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+
+/* The Rx unit uses a list of frame descriptors and a list of data buffer
+ descriptors. We use full-sized (1518 byte) data buffers, so there is
+ a one-to-one pairing of frame descriptors to buffer descriptors.
+
+ The Tx ("command") unit executes a list of commands that look like:
+ Status word Written by the 82586 when the command is done.
+ Command word Command in lower 3 bits, post-command action in upper 3
+ Link word The address of the next command.
+ Parameters (as needed).
+
+ Some definitions related to the Command Word are:
+ */
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ int last_restart;
+ ushort rx_head;
+ ushort rx_tail;
+ ushort tx_head;
+ ushort tx_cmd_link;
+ ushort tx_reap;
+ ushort tx_pkts_in_ring;
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+/*
+ Details of the EtherLink16 Implementation
+ The 3c507 is a generic shared-memory i82586 implementation.
+ The host can map 16K, 32K, 48K, or 64K of the 64K memory into
+ 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
+ */
+
+/* Offsets from the base I/O address. */
+#define SA_DATA 0 /* Station address data, or 3Com signature. */
+#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
+#define RESET_IRQ 10 /* Reset the latched IRQ line. */
+#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
+#define ROM_CONFIG 13
+#define MEM_CONFIG 14
+#define IRQ_CONFIG 15
+#define EL16_IO_EXTENT 16
+
+/* The ID port is used at boot-time to locate the ethercard. */
+#define ID_PORT 0x100
+
+/* Offsets to registers in the mailbox (SCB). */
+#define iSCB_STATUS 0x8
+#define iSCB_CMD 0xA
+#define iSCB_CBL 0xC /* Command BLock offset. */
+#define iSCB_RFA 0xE /* Rx Frame Area offset. */
+
+/* Since the 3c507 maps the shared memory window so that the last byte is
+ at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
+ 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
+ We can account for this be setting the 'SBC Base' entry in the ISCP table
+ below for all the 16 bit offset addresses, and also adding the 'SCB Base'
+ value to all 24 bit physical addresses (in the SCP table and the TX and RX
+ Buffer Descriptors).
+ -Mark
+ */
+#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
+
+/*
+ What follows in 'init_words[]' is the "program" that is downloaded to the
+ 82586 memory. It's mostly tables and command blocks, and starts at the
+ reset address 0xfffff6. This is designed to be similar to the EtherExpress,
+ thus the unusual location of the SCB at 0x0008.
+
+ Even with the additional "don't care" values, doing it this way takes less
+ program space than initializing the individual tables, and I feel it's much
+ cleaner.
+
+ The databook is particularly useless for the first two structures, I had
+ to use the Crynwr driver as an example.
+
+ The memory setup is as follows:
+ */
+
+#define CONFIG_CMD 0x0018
+#define SET_SA_CMD 0x0024
+#define SA_OFFSET 0x002A
+#define IDLELOOP 0x30
+#define TDR_CMD 0x38
+#define TDR_TIME 0x3C
+#define DUMP_CMD 0x40
+#define DIAG_CMD 0x48
+#define SET_MC_CMD 0x4E
+#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
+
+#define TX_BUF_START 0x0100
+#define NUM_TX_BUFS 5
+#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
+
+#define RX_BUF_START 0x2000
+#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
+#define RX_BUF_END (dev->mem_end - dev->mem_start)
+
+#define TX_TIMEOUT 5
+
+/*
+ That's it: only 86 bytes to set up the beast, including every extra
+ command available. The 170 byte buffer at DUMP_DATA is shared between the
+ Dump command (called only by the diagnostic program) and the SetMulticastList
+ command.
+
+ To complete the memory setup you only have to write the station address at
+ SA_OFFSET and create the Tx & Rx buffer lists.
+
+ The Tx command chain and buffer list is setup as follows:
+ A Tx command table, with the data buffer pointing to...
+ A Tx data buffer descriptor. The packet is in a single buffer, rather than
+ chaining together several smaller buffers.
+ A NoOp command, which initially points to itself,
+ And the packet data.
+
+ A transmit is done by filling in the Tx command table and data buffer,
+ re-writing the NoOp command, and finally changing the offset of the last
+ command to point to the current Tx command. When the Tx command is finished,
+ it jumps to the NoOp, when it loops until the next Tx command changes the
+ "link offset" in the NoOp. This way the 82586 never has to go through the
+ slow restart sequence.
+
+ The Rx buffer list is set up in the obvious ring structure. We have enough
+ memory (and low enough interrupt latency) that we can avoid the complicated
+ Rx buffer linked lists by alway associating a full-size Rx data buffer with
+ each Rx data frame.
+
+ I current use four transmit buffers starting at TX_BUF_START (0x0100), and
+ use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
+
+ */
+
+static unsigned short init_words[] = {
+ /* System Configuration Pointer (SCP). */
+ 0x0000, /* Set bus size to 16 bits. */
+ 0,0, /* pad words. */
+ 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
+
+ /* Intermediate System Configuration Pointer (ISCP). */
+ 0x0001, /* Status word that's cleared when init is done. */
+ 0x0008,0,0, /* SCB offset, (skip, skip) */
+
+ /* System Control Block (SCB). */
+ 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
+ CONFIG_CMD, /* Command list pointer, points to Configure. */
+ RX_BUF_START, /* Rx block list. */
+ 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
+
+ /* 0x0018: Configure command. Change to put MAC data with packet. */
+ 0, CmdConfigure, /* Status, command. */
+ SET_SA_CMD, /* Next command is Set Station Addr. */
+ 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
+ 0x2e40, /* Magic values, including MAC data location. */
+ 0, /* Unused pad word. */
+
+ /* 0x0024: Setup station address command. */
+ 0, CmdSASetup,
+ SET_MC_CMD, /* Next command. */
+ 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
+
+ /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
+ 0, CmdNOp, IDLELOOP, 0 /* pad */,
+
+ /* 0x0038: A unused Time-Domain Reflectometer command. */
+ 0, CmdTDR, IDLELOOP, 0,
+
+ /* 0x0040: An unused Dump State command. */
+ 0, CmdDump, IDLELOOP, DUMP_DATA,
+
+ /* 0x0048: An unused Diagnose command. */
+ 0, CmdDiagnose, IDLELOOP,
+
+ /* 0x004E: An empty set-multicast-list command. */
+ 0, CmdMulticastList, IDLELOOP, 0,
+};
+
+/* Index to functions, as function prototypes. */
+
+static int el16_probe1(struct net_device *dev, int ioaddr);
+static int el16_open(struct net_device *dev);
+static int el16_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t el16_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void el16_rx(struct net_device *dev);
+static int el16_close(struct net_device *dev);
+static struct net_device_stats *el16_get_stats(struct net_device *dev);
+static void el16_tx_timeout (struct net_device *dev);
+
+static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad);
+static void init_82586_mem(struct net_device *dev);
+static struct ethtool_ops netdev_ethtool_ops;
+static void init_rx_bufs(struct net_device *);
+
+static int io = 0x300;
+static int irq;
+static int mem_start;
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, (detachable devices only) allocate space for the
+ device and return success.
+ */
+
+struct net_device * __init el16_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ static unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
+ unsigned *port;
+ int err = -ENODEV;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ mem_start = dev->mem_start & 15;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (io > 0x1ff) /* Check a single specified location. */
+ err = el16_probe1(dev, io);
+ else if (io != 0)
+ err = -ENXIO; /* Don't probe at all. */
+ else {
+ for (port = ports; *port; port++) {
+ err = el16_probe1(dev, *port);
+ if (!err)
+ break;
+ }
+ }
+
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ free_irq(dev->irq, dev);
+ iounmap(((struct net_local *)netdev_priv(dev))->base);
+ release_region(dev->base_addr, EL16_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int __init el16_probe1(struct net_device *dev, int ioaddr)
+{
+ static unsigned char init_ID_done, version_printed;
+ int i, irq, irqval, retval;
+ struct net_local *lp;
+
+ if (init_ID_done == 0) {
+ ushort lrs_state = 0xff;
+ /* Send the ID sequence to the ID_PORT to enable the board(s). */
+ outb(0x00, ID_PORT);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, ID_PORT);
+ lrs_state <<= 1;
+ if (lrs_state & 0x100)
+ lrs_state ^= 0xe7;
+ }
+ outb(0x00, ID_PORT);
+ init_ID_done = 1;
+ }
+
+ if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME))
+ return -ENODEV;
+
+ if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') ||
+ (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: 3c507 at %#x,", dev->name, ioaddr);
+
+ /* We should make a few more checks here, like the first three octets of
+ the S.A. for the manufacturer's code. */
+
+ irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ irqval = request_irq(irq, &el16_interrupt, 0, DRV_NAME, dev);
+ if (irqval) {
+ printk(KERN_ERR "3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ /* We've committed to using the board, and can start filling in *dev. */
+ dev->base_addr = ioaddr;
+
+ outb(0x01, ioaddr + MISC_CTRL);
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = inb(ioaddr + i);
+ printk(" %02x", dev->dev_addr[i]);
+ }
+
+ if (mem_start)
+ net_debug = mem_start & 7;
+
+#ifdef MEM_BASE
+ dev->mem_start = MEM_BASE;
+ dev->mem_end = dev->mem_start + 0x10000;
+#else
+ {
+ int base;
+ int size;
+ char mem_config = inb(ioaddr + MEM_CONFIG);
+ if (mem_config & 0x20) {
+ size = 64*1024;
+ base = 0xf00000 + (mem_config & 0x08 ? 0x080000
+ : ((mem_config & 3) << 17));
+ } else {
+ size = ((mem_config & 3) + 1) << 14;
+ base = 0x0c0000 + ( (mem_config & 0x18) << 12);
+ }
+ dev->mem_start = base;
+ dev->mem_end = base + size;
+ }
+#endif
+
+ dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
+ dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ printk(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
+ dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
+
+ if (net_debug)
+ printk(version);
+
+ lp = netdev_priv(dev);
+ memset(lp, 0, sizeof(*lp));
+ spin_lock_init(&lp->lock);
+ lp->base = ioremap(dev->mem_start, RX_BUF_END);
+ if (!lp->base) {
+ printk(KERN_ERR "3c507: unable to remap memory\n");
+ retval = -EAGAIN;
+ goto out1;
+ }
+
+ dev->open = el16_open;
+ dev->stop = el16_close;
+ dev->hard_start_xmit = el16_send_packet;
+ dev->get_stats = el16_get_stats;
+ dev->tx_timeout = el16_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
+ return 0;
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr, EL16_IO_EXTENT);
+ return retval;
+}
+
+static int el16_open(struct net_device *dev)
+{
+ /* Initialize the 82586 memory and start it. */
+ init_82586_mem(dev);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+
+static void el16_tx_timeout (struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ void __iomem *shmem = lp->base;
+
+ if (net_debug > 1)
+ printk ("%s: transmit timed out, %s? ", dev->name,
+ readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" :
+ "network cable problem");
+ /* Try to restart the adaptor. */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (net_debug > 1)
+ printk ("Resetting board.\n");
+ /* Completely reset the adaptor. */
+ init_82586_mem (dev);
+ lp->tx_pkts_in_ring = 0;
+ } else {
+ /* Issue the channel attention signal and hope it "gets better". */
+ if (net_debug > 1)
+ printk ("Kicking board.\n");
+ writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD);
+ outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+
+static int el16_send_packet (struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ netif_stop_queue (dev);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ lp->stats.tx_bytes += length;
+ /* Disable the 82586's input to the interrupt line. */
+ outb (0x80, ioaddr + MISC_CTRL);
+
+ hardware_send_packet (dev, buf, skb->len, length - skb->len);
+
+ dev->trans_start = jiffies;
+ /* Enable the 82586 interrupt input. */
+ outb (0x84, ioaddr + MISC_CTRL);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ dev_kfree_skb (skb);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t el16_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ ushort ack_cmd = 0;
+ void __iomem *shmem;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+ shmem = lp->base;
+
+ spin_lock(&lp->lock);
+
+ status = readw(shmem+iSCB_STATUS);
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* Reap the Tx packet buffers. */
+ while (lp->tx_pkts_in_ring) {
+ unsigned short tx_status = readw(shmem+lp->tx_reap);
+ if (!(tx_status & 0x8000)) {
+ if (net_debug > 5)
+ printk("Tx command incomplete (%#x).\n", lp->tx_reap);
+ break;
+ }
+ /* Tx unsuccessful or some interesting status bit set. */
+ if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) {
+ lp->stats.tx_errors++;
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ lp->stats.collisions += tx_status & 0xf;
+ }
+ lp->stats.tx_packets++;
+ if (net_debug > 5)
+ printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
+ lp->tx_reap += TX_BUF_SIZE;
+ if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_reap = TX_BUF_START;
+
+ lp->tx_pkts_in_ring--;
+ /* There is always more space in the Tx ring buffer now. */
+ netif_wake_queue(dev);
+
+ if (++boguscount > 10)
+ break;
+ }
+
+ if (status & 0x4000) { /* Packet received. */
+ if (net_debug > 5)
+ printk("Received packet, rx_head %04x.\n", lp->rx_head);
+ el16_rx(dev);
+ }
+
+ /* Acknowledge the interrupt sources. */
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x0700) != 0x0200 && netif_running(dev)) {
+ if (net_debug)
+ printk("%s: Command unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ /* If this ever occurs we should really re-write the idle loop, reset
+ the Tx list, and do a complete restart of the command unit.
+ For now we rely on the Tx timeout if the resume doesn't work. */
+ ack_cmd |= CUC_RESUME;
+ }
+
+ if ((status & 0x0070) != 0x0040 && netif_running(dev)) {
+ /* The Rx unit is not ready, it must be hung. Restart the receiver by
+ initializing the rx buffers, and issuing an Rx start command. */
+ if (net_debug)
+ printk("%s: Rx unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ init_rx_bufs(dev);
+ writew(RX_BUF_START,shmem+iSCB_RFA);
+ ack_cmd |= RX_START;
+ }
+
+ writew(ack_cmd,shmem+iSCB_CMD);
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+
+ /* Clear the latched interrupt. */
+ outb(0, ioaddr + RESET_IRQ);
+
+ /* Enable the 82586's interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+static int el16_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ void __iomem *shmem = lp->base;
+
+ netif_stop_queue(dev);
+
+ /* Flush the Tx and disable Rx. */
+ writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD);
+ outb(0, ioaddr + SIGNAL_CA);
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* We always physically use the IRQ line, so we don't do free_irq(). */
+
+ /* Update the statistics here. */
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *el16_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ /* ToDo: decide if there are any useful statistics from the SCB. */
+
+ return &lp->stats;
+}
+
+/* Initialize the Rx-block list. */
+static void init_rx_bufs(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ void __iomem *write_ptr;
+ unsigned short SCB_base = SCB_BASE;
+
+ int cur_rxbuf = lp->rx_head = RX_BUF_START;
+
+ /* Initialize each Rx frame + data buffer. */
+ do { /* While there is room for one more. */
+
+ write_ptr = lp->base + cur_rxbuf;
+
+ writew(0x0000,write_ptr); /* Status */
+ writew(0x0000,write_ptr+=2); /* Command */
+ writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */
+ writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */
+ writew(0x0000,write_ptr+=2); /* Pad for dest addr. */
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2); /* Pad for source addr. */
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2); /* Pad for protocol. */
+
+ writew(0x0000,write_ptr+=2); /* Buffer: Actual count */
+ writew(-1,write_ptr+=2); /* Buffer: Next (none). */
+ writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */
+ writew(0x0000,write_ptr+=2);
+ /* Finally, the number of bytes in the buffer. */
+ writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2);
+
+ lp->rx_tail = cur_rxbuf;
+ cur_rxbuf += RX_BUF_SIZE;
+ } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
+
+ /* Terminate the list by setting the EOL bit, and wrap the pointer to make
+ the list a ring. */
+ write_ptr = lp->base + lp->rx_tail + 2;
+ writew(0xC000,write_ptr); /* Command, mark as last. */
+ writew(lp->rx_head,write_ptr+2); /* Link */
+}
+
+static void init_82586_mem(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ void __iomem *shmem = lp->base;
+
+ /* Enable loopback to protect the wire while starting up,
+ and hold the 586 in reset during the memory initialization. */
+ outb(0x20, ioaddr + MISC_CTRL);
+
+ /* Fix the ISCP address and base. */
+ init_words[3] = SCB_BASE;
+ init_words[7] = SCB_BASE;
+
+ /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
+ memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10);
+
+ /* Write the words at 0x0000. */
+ memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
+
+ /* Fill in the station address. */
+ memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr,
+ sizeof(dev->dev_addr));
+
+ /* The Tx-block list is written as needed. We just set up the values. */
+ lp->tx_cmd_link = IDLELOOP + 4;
+ lp->tx_head = lp->tx_reap = TX_BUF_START;
+
+ init_rx_bufs(dev);
+
+ /* Start the 586 by releasing the reset line, but leave loopback. */
+ outb(0xA0, ioaddr + MISC_CTRL);
+
+ /* This was time consuming to track down: you need to give two channel
+ attention signals to reliably start up the i82586. */
+ outb(0, ioaddr + SIGNAL_CA);
+
+ {
+ int boguscnt = 50;
+ while (readw(shmem+iSCB_STATUS) == 0)
+ if (--boguscnt == 0) {
+ printk("%s: i82586 initialization timed out with status %04x,"
+ "cmd %04x.\n", dev->name,
+ readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD));
+ break;
+ }
+ /* Issue channel-attn -- the 82586 won't start. */
+ outb(0, ioaddr + SIGNAL_CA);
+ }
+
+ /* Disable loopback and enable interrupts. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ if (net_debug > 4)
+ printk("%s: Initialized 82586, status %04x.\n", dev->name,
+ readw(shmem+iSCB_STATUS));
+ return;
+}
+
+static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
+{
+ struct net_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ ushort tx_block = lp->tx_head;
+ void __iomem *write_ptr = lp->base + tx_block;
+ static char padding[ETH_ZLEN];
+
+ /* Set the write pointer to the Tx block, and put out the header. */
+ writew(0x0000,write_ptr); /* Tx status */
+ writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */
+ writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */
+ writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */
+
+ /* Output the data buffer descriptor. */
+ writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */
+ writew(-1,write_ptr+=2); /* No next data buffer. */
+ writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */
+ writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */
+
+ /* Output the Loop-back NoOp command. */
+ writew(0x0000,write_ptr+=2); /* Tx status */
+ writew(CmdNOp,write_ptr+=2); /* Tx command */
+ writew(tx_block+16,write_ptr+=2); /* Next is myself. */
+
+ /* Output the packet at the write pointer. */
+ memcpy_toio(write_ptr+2, buf, length);
+ if (pad)
+ memcpy_toio(write_ptr+length+2, padding, pad);
+
+ /* Set the old command link pointing to this send packet. */
+ writew(tx_block,lp->base + lp->tx_cmd_link);
+ lp->tx_cmd_link = tx_block + 20;
+
+ /* Set the next free tx region. */
+ lp->tx_head = tx_block + TX_BUF_SIZE;
+ if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_head = TX_BUF_START;
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
+ dev->name, ioaddr, length, tx_block, lp->tx_head);
+ }
+
+ /* Grimly block further packets if there has been insufficient reaping. */
+ if (++lp->tx_pkts_in_ring < NUM_TX_BUFS)
+ netif_wake_queue(dev);
+}
+
+static void el16_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ void __iomem *shmem = lp->base;
+ ushort rx_head = lp->rx_head;
+ ushort rx_tail = lp->rx_tail;
+ ushort boguscount = 10;
+ short frame_status;
+
+ while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */
+ void __iomem *read_frame = lp->base + rx_head;
+ ushort rfd_cmd = readw(read_frame+2);
+ ushort next_rx_frame = readw(read_frame+4);
+ ushort data_buffer_addr = readw(read_frame+6);
+ void __iomem *data_frame = lp->base + data_buffer_addr;
+ ushort pkt_len = readw(data_frame);
+
+ if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22
+ || (pkt_len & 0xC000) != 0xC000) {
+ printk("%s: Rx frame at %#x corrupted, status %04x cmd %04x"
+ "next %04x data-buf @%04x %04x.\n", dev->name, rx_head,
+ frame_status, rfd_cmd, next_rx_frame, data_buffer_addr,
+ pkt_len);
+ } else if ((frame_status & 0x2000) == 0) {
+ /* Frame Rxed, but with error. */
+ lp->stats.rx_errors++;
+ if (frame_status & 0x0800) lp->stats.rx_crc_errors++;
+ if (frame_status & 0x0400) lp->stats.rx_frame_errors++;
+ if (frame_status & 0x0200) lp->stats.rx_fifo_errors++;
+ if (frame_status & 0x0100) lp->stats.rx_over_errors++;
+ if (frame_status & 0x0080) lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb_reserve(skb,2);
+ skb->dev = dev;
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+
+ /* Clear the status word and set End-of-List on the rx frame. */
+ writew(0,read_frame);
+ writew(0xC000,read_frame+2);
+ /* Clear the end-of-list on the prev. RFD. */
+ writew(0x0000,lp->base + rx_tail + 2);
+
+ rx_tail = rx_head;
+ rx_head = next_rx_frame;
+ if (--boguscount == 0)
+ break;
+ }
+
+ lp->rx_head = rx_head;
+ lp->rx_tail = rx_tail;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ debug = level;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+#ifdef MODULE
+static struct net_device *dev_3c507;
+module_param(io, int, 0);
+module_param(irq, int, 0);
+MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
+MODULE_PARM_DESC(irq, "(ignored)");
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("3c507: You should not use auto-probing with insmod!\n");
+ dev_3c507 = el16_probe(-1);
+ return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
+}
+
+void
+cleanup_module(void)
+{
+ struct net_device *dev = dev_3c507;
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ iounmap(((struct net_local *)netdev_priv(dev))->base);
+ release_region(dev->base_addr, EL16_IO_EXTENT);
+ free_netdev(dev);
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -I/usr/src/linux/drivers/net -Wall -Wstrict-prototypes -O6 -m486 -c 3c507.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
new file mode 100644
index 000000000000..e843109d4f62
--- /dev/null
+++ b/drivers/net/3c509.c
@@ -0,0 +1,1622 @@
+/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
+/*
+ Written 1993-2000 by Donald Becker.
+
+ Copyright 1994-2000 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ This driver is for the 3Com EtherLinkIII series.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Known limitations:
+ Because of the way 3c509 ISA detection works it's difficult to predict
+ a priori which of several ISA-mode cards will be detected first.
+
+ This driver does not use predictive interrupt mode, resulting in higher
+ packet latency but lower overhead. If interrupts are disabled for an
+ unusually long time it could also result in missed packets, but in
+ practice this rarely happens.
+
+
+ FIXES:
+ Alan Cox: Removed the 'Unexpected interrupt' bug.
+ Michael Meskes: Upgraded to Donald Becker's version 1.07.
+ Alan Cox: Increased the eeprom delay. Regardless of
+ what the docs say some people definitely
+ get problems with lower (but in card spec)
+ delays
+ v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
+ other cleanups. -djb
+ Andrea Arcangeli: Upgraded to Donald Becker's version 1.12.
+ Rick Payne: Fixed SMP race condition
+ v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb
+ v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb
+ v1.15 1/31/98 Faster recovery for Tx errors. -djb
+ v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb
+ v1.18 12Mar2001 Andrew Morton <andrewm@uow.edu.au>
+ - Avoid bogus detect of 3c590's (Andrzej Krzysztofowicz)
+ - Reviewed against 1.18 from scyld.com
+ v1.18a 17Nov2001 Jeff Garzik <jgarzik@pobox.com>
+ - ethtool support
+ v1.18b 1Mar2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ - Power Management support
+ v1.18c 1Mar2002 David Ruggiero <jdr@farfalle.com>
+ - Full duplex support
+ v1.19 16Oct2002 Zwane Mwaikambo <zwane@linuxpower.ca>
+ - Additional ethtool features
+ v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com>
+ - Increase *read_eeprom udelay to workaround oops with 2 cards.
+ v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org>
+ - Introduce driver model for EISA cards.
+*/
+
+#define DRV_NAME "3c509"
+#define DRV_VERSION "1.19b"
+#define DRV_RELDATE "08Nov2002"
+
+/* A few values that may be tweaked. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (400*HZ/1000)
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 10;
+
+#include <linux/config.h>
+#include <linux/module.h>
+#ifdef CONFIG_MCA
+#include <linux/mca.h>
+#endif
+#include <linux/isapnp.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pm.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h> /* for udelay() */
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/device.h>
+#include <linux/eisa.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
+static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n";
+
+#ifdef EL3_DEBUG
+static int el3_debug = EL3_DEBUG;
+#else
+static int el3_debug = 2;
+#endif
+
+/* Used to do a global count of all the cards in the system. Must be
+ * a global variable so that the mca/eisa probe routines can increment
+ * it */
+static int el3_cards = 0;
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ anyway if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+#define EEPROM_READ 0x80
+
+#define EL3_IO_EXTENT 16
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11, PowerUp = 27<<11,
+ PowerDown = 28<<11, PowerAuto = 29<<11};
+
+enum c509status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */
+#define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+#define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */
+#define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */
+
+/*
+ * Must be a power of two (we use a binary and in the
+ * circular queue)
+ */
+#define SKB_QUEUE_SIZE 64
+
+struct el3_private {
+ struct net_device_stats stats;
+ struct net_device *next_dev;
+ spinlock_t lock;
+ /* skb send-queue */
+ int head, size;
+ struct sk_buff *queue[SKB_QUEUE_SIZE];
+#ifdef CONFIG_PM
+ struct pm_dev *pmdev;
+#endif
+ enum {
+ EL3_MCA,
+ EL3_PNP,
+ EL3_EISA,
+ } type; /* type of device */
+ struct device *dev;
+};
+static int id_port __initdata = 0x110; /* Start with 0x110 to avoid new sound cards.*/
+static struct net_device *el3_root_dev;
+
+static ushort id_read_eeprom(int index);
+static ushort read_eeprom(int ioaddr, int index);
+static int el3_open(struct net_device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev);
+static int el3_close(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void el3_tx_timeout (struct net_device *dev);
+static void el3_down(struct net_device *dev);
+static void el3_up(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
+#ifdef CONFIG_PM
+static int el3_suspend(struct pm_dev *pdev);
+static int el3_resume(struct pm_dev *pdev);
+static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data);
+#endif
+/* generic device remove for all device types */
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
+static int el3_device_remove (struct device *device);
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void el3_poll_controller(struct net_device *dev);
+#endif
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id el3_eisa_ids[] = {
+ { "TCM5092" },
+ { "TCM5093" },
+ { "" }
+};
+
+static int el3_eisa_probe (struct device *device);
+
+static struct eisa_driver el3_eisa_driver = {
+ .id_table = el3_eisa_ids,
+ .driver = {
+ .name = "3c509",
+ .probe = el3_eisa_probe,
+ .remove = __devexit_p (el3_device_remove)
+ }
+};
+#endif
+
+#ifdef CONFIG_MCA
+static int el3_mca_probe(struct device *dev);
+
+static short el3_mca_adapter_ids[] __initdata = {
+ 0x627c,
+ 0x627d,
+ 0x62db,
+ 0x62f6,
+ 0x62f7,
+ 0x0000
+};
+
+static char *el3_mca_adapter_names[] __initdata = {
+ "3Com 3c529 EtherLink III (10base2)",
+ "3Com 3c529 EtherLink III (10baseT)",
+ "3Com 3c529 EtherLink III (test mode)",
+ "3Com 3c529 EtherLink III (TP or coax)",
+ "3Com 3c529 EtherLink III (TP)",
+ NULL
+};
+
+static struct mca_driver el3_mca_driver = {
+ .id_table = el3_mca_adapter_ids,
+ .driver = {
+ .name = "3c529",
+ .bus = &mca_bus_type,
+ .probe = el3_mca_probe,
+ .remove = __devexit_p(el3_device_remove),
+ },
+};
+#endif /* CONFIG_MCA */
+
+#if defined(__ISAPNP__)
+static struct isapnp_device_id el3_isapnp_adapters[] __initdata = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5090),
+ (long) "3Com Etherlink III (TP)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5091),
+ (long) "3Com Etherlink III" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5094),
+ (long) "3Com Etherlink III (combo)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5095),
+ (long) "3Com Etherlink III (TPO)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5098),
+ (long) "3Com Etherlink III (TPC)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f7),
+ (long) "3Com Etherlink III compatible" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f8),
+ (long) "3Com Etherlink III compatible" },
+ { } /* terminate list */
+};
+
+static u16 el3_isapnp_phys_addr[8][3];
+static int nopnp;
+#endif /* __ISAPNP__ */
+
+/* With the driver model introduction for EISA devices, both init
+ * and cleanup have been split :
+ * - EISA devices probe/remove starts in el3_eisa_probe/el3_device_remove
+ * - MCA/ISA still use el3_probe
+ *
+ * Both call el3_common_init/el3_common_remove. */
+
+static int __init el3_common_init(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ short i;
+ int err;
+
+ spin_lock_init(&lp->lock);
+
+ if (dev->mem_start & 0x05) { /* xcvr codes 1/3/4/12 */
+ dev->if_port = (dev->mem_start & 0x0f);
+ } else { /* xcvr codes 0/8 */
+ /* use eeprom value, but save user's full-duplex selection */
+ dev->if_port |= (dev->mem_start & 0x08);
+ }
+
+ /* The EL3-specific entries in the device structure. */
+ dev->open = &el3_open;
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->stop = &el3_close;
+ dev->get_stats = &el3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->tx_timeout = el3_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = el3_poll_controller;
+#endif
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR "Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n",
+ dev->base_addr, dev->irq);
+ release_region(dev->base_addr, EL3_IO_EXTENT);
+ return err;
+ }
+
+ {
+ const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
+ printk("%s: 3c5x9 found at %#3.3lx, %s port, address ",
+ dev->name, dev->base_addr,
+ if_names[(dev->if_port & 0x03)]);
+ }
+
+ /* Read in the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+ printk(", IRQ %d.\n", dev->irq);
+
+ if (el3_debug > 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ return 0;
+
+}
+
+static void el3_common_remove (struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+
+ (void) lp; /* Keep gcc quiet... */
+#ifdef CONFIG_PM
+ if (lp->pmdev)
+ pm_unregister(lp->pmdev);
+#endif
+#if defined(__ISAPNP__)
+ if (lp->type == EL3_PNP)
+ pnp_device_detach(to_pnp_dev(lp->dev));
+#endif
+
+ unregister_netdev (dev);
+ release_region(dev->base_addr, EL3_IO_EXTENT);
+ free_netdev (dev);
+}
+
+static int __init el3_probe(int card_idx)
+{
+ struct net_device *dev;
+ struct el3_private *lp;
+ short lrs_state = 0xff, i;
+ int ioaddr, irq, if_port;
+ u16 phys_addr[3];
+ static int current_tag;
+ int err = -ENODEV;
+#if defined(__ISAPNP__)
+ static int pnp_cards;
+ struct pnp_dev *idev = NULL;
+
+ if (nopnp == 1)
+ goto no_pnp;
+
+ for (i=0; el3_isapnp_adapters[i].vendor != 0; i++) {
+ int j;
+ while ((idev = pnp_find_dev(NULL,
+ el3_isapnp_adapters[i].vendor,
+ el3_isapnp_adapters[i].function,
+ idev))) {
+ if (pnp_device_attach(idev) < 0)
+ continue;
+ if (pnp_activate_dev(idev) < 0) {
+__again:
+ pnp_device_detach(idev);
+ continue;
+ }
+ if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0))
+ goto __again;
+ ioaddr = pnp_port_start(idev, 0);
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509 PnP")) {
+ pnp_device_detach(idev);
+ return -EBUSY;
+ }
+ irq = pnp_irq(idev, 0);
+ if (el3_debug > 3)
+ printk ("ISAPnP reports %s at i/o 0x%x, irq %d\n",
+ (char*) el3_isapnp_adapters[i].driver_data, ioaddr, irq);
+ EL3WINDOW(0);
+ for (j = 0; j < 3; j++)
+ el3_isapnp_phys_addr[pnp_cards][j] =
+ phys_addr[j] =
+ htons(read_eeprom(ioaddr, j));
+ if_port = read_eeprom(ioaddr, 8) >> 14;
+ dev = alloc_etherdev(sizeof (struct el3_private));
+ if (!dev) {
+ release_region(ioaddr, EL3_IO_EXTENT);
+ pnp_device_detach(idev);
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &idev->dev);
+ pnp_cards++;
+
+ netdev_boot_setup_check(dev);
+ goto found;
+ }
+ }
+no_pnp:
+#endif /* __ISAPNP__ */
+
+ /* Select an open I/O location at 0x1*0 to do contention select. */
+ for ( ; id_port < 0x200; id_port += 0x10) {
+ if (!request_region(id_port, 1, "3c509"))
+ continue;
+ outb(0x00, id_port);
+ outb(0xff, id_port);
+ if (inb(id_port) & 0x01){
+ release_region(id_port, 1);
+ break;
+ } else
+ release_region(id_port, 1);
+ }
+ if (id_port >= 0x200) {
+ /* Rare -- do we really need a warning? */
+ printk(" WARNING: No I/O port available for 3c509 activation.\n");
+ return -ENODEV;
+ }
+
+ /* Next check for all ISA bus boards by sending the ID sequence to the
+ ID_PORT. We find cards past the first by setting the 'current_tag'
+ on cards as they are found. Cards with their tag set will not
+ respond to subsequent ID sequences. */
+
+ outb(0x00, id_port);
+ outb(0x00, id_port);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, id_port);
+ lrs_state <<= 1;
+ lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
+ }
+
+ /* For the first probe, clear all board's tag registers. */
+ if (current_tag == 0)
+ outb(0xd0, id_port);
+ else /* Otherwise kill off already-found boards. */
+ outb(0xd8, id_port);
+
+ if (id_read_eeprom(7) != 0x6d50) {
+ return -ENODEV;
+ }
+
+ /* Read in EEPROM data, which does contention-select.
+ Only the lowest address board will stay "on-line".
+ 3Com got the byte order backwards. */
+ for (i = 0; i < 3; i++) {
+ phys_addr[i] = htons(id_read_eeprom(i));
+ }
+
+#if defined(__ISAPNP__)
+ if (nopnp == 0) {
+ /* The ISA PnP 3c509 cards respond to the ID sequence.
+ This check is needed in order not to register them twice. */
+ for (i = 0; i < pnp_cards; i++) {
+ if (phys_addr[0] == el3_isapnp_phys_addr[i][0] &&
+ phys_addr[1] == el3_isapnp_phys_addr[i][1] &&
+ phys_addr[2] == el3_isapnp_phys_addr[i][2])
+ {
+ if (el3_debug > 3)
+ printk("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
+ phys_addr[0] & 0xff, phys_addr[0] >> 8,
+ phys_addr[1] & 0xff, phys_addr[1] >> 8,
+ phys_addr[2] & 0xff, phys_addr[2] >> 8);
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, id_port);
+ goto no_pnp;
+ }
+ }
+ }
+#endif /* __ISAPNP__ */
+
+ {
+ unsigned int iobase = id_read_eeprom(8);
+ if_port = iobase >> 14;
+ ioaddr = 0x200 + ((iobase & 0x1f) << 4);
+ }
+ irq = id_read_eeprom(9) >> 12;
+
+ dev = alloc_etherdev(sizeof (struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+
+ netdev_boot_setup_check(dev);
+
+ /* Set passed-in IRQ or I/O Addr. */
+ if (dev->irq > 1 && dev->irq < 16)
+ irq = dev->irq;
+
+ if (dev->base_addr) {
+ if (dev->mem_end == 0x3c509 /* Magic key */
+ && dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0)
+ ioaddr = dev->base_addr & 0x3f0;
+ else if (dev->base_addr != ioaddr)
+ goto out;
+ }
+
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, id_port);
+
+ /* Activate the adaptor at the EEPROM location. */
+ outb((ioaddr >> 4) | 0xe0, id_port);
+
+ EL3WINDOW(0);
+ if (inw(ioaddr) != 0x6d50)
+ goto out1;
+
+ /* Free the interrupt so that some other card can use it. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+#if defined(__ISAPNP__)
+ found: /* PNP jumps here... */
+#endif /* __ISAPNP__ */
+
+ memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->if_port = if_port;
+ lp = netdev_priv(dev);
+#if defined(__ISAPNP__)
+ lp->dev = &idev->dev;
+#endif
+ err = el3_common_init(dev);
+
+ if (err)
+ goto out1;
+
+#ifdef CONFIG_PM
+ /* register power management */
+ lp->pmdev = pm_register(PM_ISA_DEV, card_idx, el3_pm_callback);
+ if (lp->pmdev) {
+ struct pm_dev *p;
+ p = lp->pmdev;
+ p->data = (struct net_device *)dev;
+ }
+#endif
+
+ el3_cards++;
+ lp->next_dev = el3_root_dev;
+ el3_root_dev = dev;
+ return 0;
+
+out1:
+#if defined(__ISAPNP__)
+ if (idev)
+ pnp_device_detach(idev);
+#endif
+out:
+ free_netdev(dev);
+ return err;
+}
+
+#ifdef CONFIG_MCA
+static int __init el3_mca_probe(struct device *device)
+{
+ /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
+ * heavily modified by Chris Beauregard
+ * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA
+ * probing.
+ *
+ * redone for multi-card detection by ZP Gu (zpg@castle.net)
+ * now works as a module */
+
+ struct el3_private *lp;
+ short i;
+ int ioaddr, irq, if_port;
+ u16 phys_addr[3];
+ struct net_device *dev = NULL;
+ u_char pos4, pos5;
+ struct mca_device *mdev = to_mca_device(device);
+ int slot = mdev->slot;
+ int err;
+
+ pos4 = mca_device_read_stored_pos(mdev, 4);
+ pos5 = mca_device_read_stored_pos(mdev, 5);
+
+ ioaddr = ((short)((pos4&0xfc)|0x02)) << 8;
+ irq = pos5 & 0x0f;
+
+
+ printk("3c529: found %s at slot %d\n",
+ el3_mca_adapter_names[mdev->index], slot + 1);
+
+ /* claim the slot */
+ strncpy(mdev->name, el3_mca_adapter_names[mdev->index],
+ sizeof(mdev->name));
+ mca_device_set_claim(mdev, 1);
+
+ if_port = pos4 & 0x03;
+
+ irq = mca_device_transform_irq(mdev, irq);
+ ioaddr = mca_device_transform_ioport(mdev, ioaddr);
+ if (el3_debug > 2) {
+ printk("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
+ }
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++) {
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ }
+
+ dev = alloc_etherdev(sizeof (struct el3_private));
+ if (dev == NULL) {
+ release_region(ioaddr, EL3_IO_EXTENT);
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ netdev_boot_setup_check(dev);
+
+ memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->if_port = if_port;
+ lp = netdev_priv(dev);
+ lp->dev = device;
+ lp->type = EL3_MCA;
+ device->driver_data = dev;
+ err = el3_common_init(dev);
+
+ if (err) {
+ device->driver_data = NULL;
+ free_netdev(dev);
+ return -ENOMEM;
+ }
+
+ el3_cards++;
+ return 0;
+}
+
+#endif /* CONFIG_MCA */
+
+#ifdef CONFIG_EISA
+static int __init el3_eisa_probe (struct device *device)
+{
+ struct el3_private *lp;
+ short i;
+ int ioaddr, irq, if_port;
+ u16 phys_addr[3];
+ struct net_device *dev = NULL;
+ struct eisa_device *edev;
+ int err;
+
+ /* Yeepee, The driver framework is calling us ! */
+ edev = to_eisa_device (device);
+ ioaddr = edev->base_addr;
+
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509"))
+ return -EBUSY;
+
+ /* Change the register set to the configuration window 0. */
+ outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
+
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ /* Restore the "Product ID" to the EEPROM read register. */
+ read_eeprom(ioaddr, 3);
+
+ dev = alloc_etherdev(sizeof (struct el3_private));
+ if (dev == NULL) {
+ release_region(ioaddr, EL3_IO_EXTENT);
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ netdev_boot_setup_check(dev);
+
+ memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->if_port = if_port;
+ lp = netdev_priv(dev);
+ lp->dev = device;
+ lp->type = EL3_EISA;
+ eisa_set_drvdata (edev, dev);
+ err = el3_common_init(dev);
+
+ if (err) {
+ eisa_set_drvdata (edev, NULL);
+ free_netdev(dev);
+ return err;
+ }
+
+ el3_cards++;
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
+/* This remove works for all device types.
+ *
+ * The net dev must be stored in the driver_data field */
+static int __devexit el3_device_remove (struct device *device)
+{
+ struct net_device *dev;
+
+ dev = device->driver_data;
+
+ el3_common_remove (dev);
+ return 0;
+}
+#endif
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static ushort read_eeprom(int ioaddr, int index)
+{
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Pause for at least 162 us. for the read to take place.
+ Some chips seem to require much longer */
+ mdelay(2);
+ return inw(ioaddr + 12);
+}
+
+/* Read a word from the EEPROM when in the ISA ID probe state. */
+static ushort __init id_read_eeprom(int index)
+{
+ int bit, word = 0;
+
+ /* Issue read command, and pause for at least 162 us. for it to complete.
+ Assume extra-fast 16Mhz bus. */
+ outb(EEPROM_READ + index, id_port);
+
+ /* Pause for at least 162 us. for the read to take place. */
+ /* Some chips seem to require much longer */
+ mdelay(4);
+
+ for (bit = 15; bit >= 0; bit--)
+ word = (word << 1) + (inb(id_port) & 0x01);
+
+ if (el3_debug > 3)
+ printk(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
+
+ return word;
+}
+
+
+static int
+el3_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(RxReset, ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ i = request_irq(dev->irq, &el3_interrupt, 0, dev->name, dev);
+ if (i)
+ return i;
+
+ EL3WINDOW(0);
+ if (el3_debug > 3)
+ printk("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
+ dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
+
+ el3_up(dev);
+
+ if (el3_debug > 3)
+ printk("%s: Opened 3c509 IRQ %d status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
+
+ return 0;
+}
+
+static void
+el3_tx_timeout (struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ printk("%s: transmit timed out, Tx_status %2.2x status %4.4x "
+ "Tx FIFO room %d.\n",
+ dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
+ inw(ioaddr + TX_FREE));
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+
+static int
+el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ netif_stop_queue (dev);
+
+ lp->stats.tx_bytes += skb->len;
+
+ if (el3_debug > 4) {
+ printk("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
+ dev->name, skb->len, inw(ioaddr + EL3_STATUS));
+ }
+#if 0
+#ifndef final_version
+ { /* Error-checking code, delete someday. */
+ ushort status = inw(ioaddr + EL3_STATUS);
+ if (status & 0x0001 /* IRQ line active, missed one. */
+ && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
+ printk("%s: Missed interrupt, status then %04x now %04x"
+ " Tx %2.2x Rx %4.4x.\n", dev->name, status,
+ inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
+ inw(ioaddr + RX_STATUS));
+ /* Fake interrupt trigger by masking, acknowledge interrupts. */
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ }
+ }
+#endif
+#endif
+ /*
+ * We lock the driver against other processors. Note
+ * we don't need to lock versus the IRQ as we suspended
+ * that. This means that we lose the ability to take
+ * an RX during a TX upload. That sucks a bit with SMP
+ * on an original 3c509 (2K buffer)
+ *
+ * Using disable_irq stops us crapping on other
+ * time sensitive devices.
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+#ifdef __powerpc__
+ outsl_ns(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#else
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#endif
+
+ dev->trans_start = jiffies;
+ if (inw(ioaddr + TX_FREE) > 1536)
+ netif_start_queue(dev);
+ else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb (skb);
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static irqreturn_t
+el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct el3_private *lp;
+ int ioaddr, status;
+ int i = max_interrupt_work;
+
+ if (dev == NULL) {
+ printk ("el3_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ lp = netdev_priv(dev);
+ spin_lock(&lp->lock);
+
+ ioaddr = dev->base_addr;
+
+ if (el3_debug > 4) {
+ status = inw(ioaddr + EL3_STATUS);
+ printk("%s: interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+
+ if (status & RxComplete)
+ el3_rx(dev);
+
+ if (status & TxAvailable) {
+ if (el3_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue (dev);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & TxComplete) { /* Really Tx error. */
+ struct el3_private *lp = netdev_priv(dev);
+ short tx_status;
+ int i = 4;
+
+ while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ outw(SetRxFilter | RxStation | RxBroadcast
+ | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0)
+ | (dev->flags & IFF_PROMISC ? RxProm : 0),
+ ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ printk("%s: Infinite loop in interrupt, status %4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupts. */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */
+ }
+
+ if (el3_debug > 4) {
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
+ inw(ioaddr + EL3_STATUS));
+ }
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void el3_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ el3_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+static struct net_device_stats *
+el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ /*
+ * This is fast enough not to bother with disable IRQ
+ * stuff.
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return &lp->stats;
+}
+
+/* Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+ */
+static void update_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if (el3_debug > 5)
+ printk(" Updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ return;
+}
+
+static int
+el3_rx(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ short rx_status;
+
+ if (el3_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+ lp->stats.rx_bytes += pkt_len;
+ if (el3_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+#ifdef __powerpc__
+ insl_ns(ioaddr+RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#else
+ insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#endif
+
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ continue;
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ lp->stats.rx_dropped++;
+ if (el3_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ inw(ioaddr + EL3_STATUS); /* Delay. */
+ while (inw(ioaddr + EL3_STATUS) & 0x1000)
+ printk(KERN_DEBUG " Waiting for 3c509 to discard packet, status %x.\n",
+ inw(ioaddr + EL3_STATUS) );
+ }
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+ struct el3_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if (el3_debug > 1) {
+ static int old;
+ if (old != dev->mc_count) {
+ old = dev->mc_count;
+ printk("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ }
+ }
+ spin_lock_irqsave(&lp->lock, flags);
+ if (dev->flags&IFF_PROMISC) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ }
+ else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
+ }
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static int
+el3_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct el3_private *lp = netdev_priv(dev);
+
+ if (el3_debug > 2)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ el3_down(dev);
+
+ free_irq(dev->irq, dev);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ if (lp->type != EL3_EISA) {
+ /* But we explicitly zero the IRQ line select anyway. Don't do
+ * it on EISA cards, it prevents the module from getting an
+ * IRQ after unload+reload... */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+ }
+
+ return 0;
+}
+
+static int
+el3_link_ok(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ u16 tmp;
+
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ return tmp & (1<<11);
+}
+
+static int
+el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ /* obtain current transceiver via WN4_MEDIA? */
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ ecmd->transceiver = XCVR_INTERNAL;
+ switch (tmp >> 14) {
+ case 0:
+ ecmd->port = PORT_TP;
+ break;
+ case 1:
+ ecmd->port = PORT_AUI;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case 3:
+ ecmd->port = PORT_BNC;
+ default:
+ break;
+ }
+
+ ecmd->duplex = DUPLEX_HALF;
+ ecmd->supported = 0;
+ tmp = inw(ioaddr + WN0_CONF_CTRL);
+ if (tmp & (1<<13))
+ ecmd->supported |= SUPPORTED_AUI;
+ if (tmp & (1<<12))
+ ecmd->supported |= SUPPORTED_BNC;
+ if (tmp & (1<<9)) {
+ ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full; /* hmm... */
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_NETDIAG);
+ if (tmp & FD_ENABLE)
+ ecmd->duplex = DUPLEX_FULL;
+ }
+
+ ecmd->speed = SPEED_10;
+ EL3WINDOW(1);
+ return 0;
+}
+
+static int
+el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ int ioaddr = dev->base_addr;
+
+ if (ecmd->speed != SPEED_10)
+ return -EINVAL;
+ if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL))
+ return -EINVAL;
+
+ /* change XCVR type */
+ EL3WINDOW(0);
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ switch (ecmd->port) {
+ case PORT_TP:
+ tmp &= ~(3<<14);
+ dev->if_port = 0;
+ break;
+ case PORT_AUI:
+ tmp |= (1<<14);
+ dev->if_port = 1;
+ break;
+ case PORT_BNC:
+ tmp |= (3<<14);
+ dev->if_port = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ outw(tmp, ioaddr + WN0_ADDR_CONF);
+ if (dev->if_port == 3) {
+ /* fire up the DC-DC convertor if BNC gets enabled */
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ if (tmp & (3 << 14)) {
+ outw(StartCoax, ioaddr + EL3_CMD);
+ udelay(800);
+ } else
+ return -EIO;
+ }
+
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_NETDIAG);
+ if (ecmd->duplex == DUPLEX_FULL)
+ tmp |= FD_ENABLE;
+ else
+ tmp &= ~FD_ENABLE;
+ outw(tmp, ioaddr + WN4_NETDIAG);
+ EL3WINDOW(1);
+
+ return 0;
+}
+
+static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+}
+
+static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ret;
+
+ spin_lock_irq(&lp->lock);
+ ret = el3_netdev_get_ecmd(dev, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return ret;
+}
+
+static int el3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ret;
+
+ spin_lock_irq(&lp->lock);
+ ret = el3_netdev_set_ecmd(dev, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return ret;
+}
+
+static u32 el3_get_link(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ u32 ret;
+
+ spin_lock_irq(&lp->lock);
+ ret = el3_link_ok(dev);
+ spin_unlock_irq(&lp->lock);
+ return ret;
+}
+
+static u32 el3_get_msglevel(struct net_device *dev)
+{
+ return el3_debug;
+}
+
+static void el3_set_msglevel(struct net_device *dev, u32 v)
+{
+ el3_debug = v;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = el3_get_drvinfo,
+ .get_settings = el3_get_settings,
+ .set_settings = el3_set_settings,
+ .get_link = el3_get_link,
+ .get_msglevel = el3_get_msglevel,
+ .set_msglevel = el3_set_msglevel,
+};
+
+static void
+el3_down(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 3)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* Disable link beat and jabber, if_port may change here next open(). */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(dev);
+}
+
+static void
+el3_up(struct net_device *dev)
+{
+ int i, sw_info, net_diag;
+ int ioaddr = dev->base_addr;
+
+ /* Activating the board required and does no harm otherwise */
+ outw(0x0001, ioaddr + 4);
+
+ /* Set the IRQ line. */
+ outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ);
+
+ /* Set the station address in window 2 each time opened. */
+ EL3WINDOW(2);
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ if ((dev->if_port & 0x03) == 3) /* BNC interface */
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ else if ((dev->if_port & 0x03) == 0) { /* 10baseT interface */
+ /* Combine secondary sw_info word (the adapter level) and primary
+ sw_info word (duplex setting plus other useless bits) */
+ EL3WINDOW(0);
+ sw_info = (read_eeprom(ioaddr, 0x14) & 0x400f) |
+ (read_eeprom(ioaddr, 0x0d) & 0xBff0);
+
+ EL3WINDOW(4);
+ net_diag = inw(ioaddr + WN4_NETDIAG);
+ net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */
+ printk("%s: ", dev->name);
+ switch (dev->if_port & 0x0c) {
+ case 12:
+ /* force full-duplex mode if 3c5x9b */
+ if (sw_info & 0x000f) {
+ printk("Forcing 3c5x9b full-duplex mode");
+ break;
+ }
+ case 8:
+ /* set full-duplex mode based on eeprom config setting */
+ if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
+ printk("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
+ break;
+ }
+ default:
+ /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
+ printk("Setting 3c5x9/3c5x9B half-duplex mode");
+ net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */
+ }
+
+ outw(net_diag, ioaddr + WN4_NETDIAG);
+ printk(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info);
+ if (el3_debug > 3)
+ printk("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag);
+ /* Enable link beat and jabber check. */
+ outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-case and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull,
+ ioaddr + EL3_CMD);
+
+ netif_start_queue(dev);
+}
+
+/* Power Management support functions */
+#ifdef CONFIG_PM
+
+static int
+el3_suspend(struct pm_dev *pdev)
+{
+ unsigned long flags;
+ struct net_device *dev;
+ struct el3_private *lp;
+ int ioaddr;
+
+ if (!pdev && !pdev->data)
+ return -EINVAL;
+
+ dev = (struct net_device *)pdev->data;
+ lp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ if (netif_running(dev))
+ netif_device_detach(dev);
+
+ el3_down(dev);
+ outw(PowerDown, ioaddr + EL3_CMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+
+static int
+el3_resume(struct pm_dev *pdev)
+{
+ unsigned long flags;
+ struct net_device *dev;
+ struct el3_private *lp;
+ int ioaddr;
+
+ if (!pdev && !pdev->data)
+ return -EINVAL;
+
+ dev = (struct net_device *)pdev->data;
+ lp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ outw(PowerUp, ioaddr + EL3_CMD);
+ el3_up(dev);
+
+ if (netif_running(dev))
+ netif_device_attach(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+
+static int
+el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data)
+{
+ switch (rqst) {
+ case PM_SUSPEND:
+ return el3_suspend(pdev);
+
+ case PM_RESUME:
+ return el3_resume(pdev);
+ }
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/* Parameters that may be passed into the module. */
+static int debug = -1;
+static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+module_param(debug,int, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(xcvr, int, NULL, 0);
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(debug, "debug level (0-6)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(xcvr,"transceiver(s) (0=internal, 1=external)");
+MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
+#if defined(__ISAPNP__)
+module_param(nopnp, int, 0);
+MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)");
+MODULE_DEVICE_TABLE(isapnp, el3_isapnp_adapters);
+#endif /* __ISAPNP__ */
+MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B) ISA/PnP ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int __init el3_init_module(void)
+{
+ el3_cards = 0;
+
+ if (debug >= 0)
+ el3_debug = debug;
+
+ el3_root_dev = NULL;
+ while (el3_probe(el3_cards) == 0) {
+ if (irq[el3_cards] > 1)
+ el3_root_dev->irq = irq[el3_cards];
+ if (xcvr[el3_cards] >= 0)
+ el3_root_dev->if_port = xcvr[el3_cards];
+ el3_cards++;
+ }
+
+#ifdef CONFIG_EISA
+ if (eisa_driver_register (&el3_eisa_driver) < 0) {
+ eisa_driver_unregister (&el3_eisa_driver);
+ }
+#endif
+#ifdef CONFIG_MCA
+ mca_register_driver(&el3_mca_driver);
+#endif
+ return 0;
+}
+
+static void __exit el3_cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ while (el3_root_dev) {
+ struct el3_private *lp = netdev_priv(el3_root_dev);
+
+ next_dev = lp->next_dev;
+ el3_common_remove (el3_root_dev);
+ el3_root_dev = next_dev;
+ }
+
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&el3_eisa_driver);
+#endif
+#ifdef CONFIG_MCA
+ mca_unregister_driver(&el3_mca_driver);
+#endif
+}
+
+module_init (el3_init_module);
+module_exit (el3_cleanup_module);
+
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
new file mode 100644
index 000000000000..c4cf4fcd1344
--- /dev/null
+++ b/drivers/net/3c515.c
@@ -0,0 +1,1594 @@
+/*
+ Written 1997-1998 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ 2000/2/2- Added support for kernel-level ISAPnP
+ by Stephen Frost <sfrost@snowman.net> and Alessandro Zummo
+ Cleaned up for 2.3.x/softnet by Jeff Garzik and Alan Cox.
+
+ 2001/11/17 - Added ethtool support (jgarzik)
+
+ 2002/10/28 - Locking updates for 2.5 (alan@redhat.com)
+
+*/
+
+#define DRV_NAME "3c515"
+#define DRV_VERSION "0.99t-ac"
+#define DRV_RELDATE "28-Oct-2002"
+
+static char *version =
+DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n";
+
+#define CORKSCREW 1
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+static int rx_copybreak = 200;
+
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Enable the automatic media selection code -- usually set. */
+#define AUTOMEDIA 1
+
+/* Allow the use of fragment bus master transfers instead of only
+ programmed-I/O for Vortex cards. Full-bus-master transfers are always
+ enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined,
+ the feature may be turned on using 'options'. */
+#define VORTEX_BUS_MASTER
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/isapnp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define NEW_MULTICAST
+#include <linux/delay.h>
+
+#define MAX_UNITS 8
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* "Knobs" for adjusting internal parameters. */
+/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
+#define DRIVER_DEBUG 1
+/* Some values here only for performance evaluation and path-coverage
+ debugging. */
+static int rx_nocopy, rx_copy, queued_packet;
+
+/* Number of times to check to see if the Tx FIFO has space, used in some
+ limited cases. */
+#define WAIT_TX_AVAIL 200
+
+/* Operational parameter that usually are not changed. */
+#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */
+
+/* The size here is somewhat misleading: the Corkscrew also uses the ISA
+ aliased registers at <base>+0x400.
+ */
+#define CORKSCREW_TOTAL_SIZE 0x20
+
+#ifdef DRIVER_DEBUG
+static int corkscrew_debug = DRIVER_DEBUG;
+#else
+static int corkscrew_debug = 1;
+#endif
+
+#define CORKSCREW_ID 10
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL,
+3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout,
+it's not practical to integrate this driver with the other EtherLink drivers.
+
+II. Board-specific settings
+
+The Corkscrew has an EEPROM for configuration, but no special settings are
+needed for Linux.
+
+III. Driver operation
+
+The 3c515 series use an interface that's very similar to the 3c900 "Boomerang"
+PCI cards, with the bus master interface extensively modified to work with
+the ISA bus.
+
+The card is capable of full-bus-master transfers with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3.
+
+This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate
+receive buffer. This scheme allocates full-sized skbuffs as receive
+buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is
+chosen to trade-off the memory wasted by passing the full-sized skbuff to
+the queue layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the netif
+layer. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Terry Murphy of 3Com for providing documentation and a development
+board.
+
+The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com
+project names. I use these names to eliminate confusion -- 3Com product
+numbers and names are very similar and often confused.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes!
+This driver only supports ethernet frames because of the recent MTU limit
+of 1.5K, but the changes to support 4.5K are minimal.
+*/
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chips
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum corkscrew_cmd {
+ TotalReset = 0 << 11, SelectWindow = 1 << 11, StartCoax = 2 << 11,
+ RxDisable = 3 << 11, RxEnable = 4 << 11, RxReset = 5 << 11,
+ UpStall = 6 << 11, UpUnstall = (6 << 11) + 1, DownStall = (6 << 11) + 2,
+ DownUnstall = (6 << 11) + 3, RxDiscard = 8 << 11, TxEnable = 9 << 11,
+ TxDisable = 10 << 11, TxReset = 11 << 11, FakeIntr = 12 << 11,
+ AckIntr = 13 << 11, SetIntrEnb = 14 << 11, SetStatusEnb = 15 << 11,
+ SetRxFilter = 16 << 11, SetRxThreshold = 17 << 11,
+ SetTxThreshold = 18 << 11, SetTxStart = 19 << 11, StartDMAUp = 20 << 11,
+ StartDMADown = (20 << 11) + 1, StatsEnable = 21 << 11,
+ StatsDisable = 22 << 11, StopCoax = 23 << 11,
+};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+/* Bits in the general status register. */
+enum corkscrew_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1 << 8, DownComplete = 1 << 9, UpComplete = 1 << 10,
+ DMAInProgress = 1 << 11, /* DMA controller is still busy. */
+ CmdInProgress = 1 << 12, /* EL3_CMD is still busy. */
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer = 0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0IRQ = 0x08,
+#if defined(CORKSCREW)
+ Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */
+ Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */
+#else
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+#endif
+};
+enum Win0_EEPROM_bits {
+ EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01 = 0, PhysAddr23 = 1, PhysAddr45 = 2, ModelID = 3,
+ EtherLink3ID = 7,
+};
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config = 0, Wn3_MAC_Ctrl = 6, Wn3_Options = 8,
+};
+union wn3_config {
+ int i;
+ struct w3_config_fields {
+ unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+ int pad8:8;
+ unsigned int ram_split:2, pad18:2, xcvr:3, pad21:1, autoselect:1;
+ int pad24:7;
+ } u;
+};
+
+enum Window4 {
+ Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 { /* Window 7: Bus Master control. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+};
+
+/* Boomerang-style bus master control registers. Note ISA aliases! */
+enum MasterCtrl {
+ PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen =
+ 0x40c,
+ TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+struct boom_rx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete = 0x00008000, RxDError = 0x4000,
+ /* See boomerang_rx() for actual error bits */
+};
+
+struct boom_tx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+
+struct corkscrew_private {
+ const char *product_name;
+ struct list_head list;
+ struct net_device *our_dev;
+ /* The Rx and Tx rings are here to keep them quad-word-aligned. */
+ struct boom_rx_desc rx_ring[RX_RING_SIZE];
+ struct boom_tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */
+ struct net_device_stats stats;
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ struct timer_list timer; /* Media selection timer. */
+ int capabilities ; /* Adapter capabilities word. */
+ int options; /* User-settable misc. driver options. */
+ int last_rx_packets; /* For media autoselection. */
+ unsigned int available_media:8, /* From Wn3_Options */
+ media_override:3, /* Passed-in media type. */
+ default_media:3, /* Read from the EEPROM. */
+ full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */
+ tx_full:1;
+ spinlock_t lock;
+ struct device *dev;
+};
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT = 0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII = 6, XCVR_Default = 8,
+};
+
+static struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config. */
+ next:8; /* The media type to try next. */
+ short wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP, 0x08, XCVR_10base2, (14 * HZ) / 10 },
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1 * HZ) / 10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1 * HZ) / 10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14 * HZ) / 10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14 * HZ) / 10},
+ { "MII", 0, 0x40, XCVR_10baseT, 3 * HZ},
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+#ifdef __ISAPNP__
+static struct isapnp_device_id corkscrew_isapnp_adapters[] = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051),
+ (long) "3Com Fast EtherLink ISA" },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(isapnp, corkscrew_isapnp_adapters);
+
+static int nopnp;
+#endif /* __ISAPNP__ */
+
+static struct net_device *corkscrew_scan(int unit);
+static void corkscrew_setup(struct net_device *dev, int ioaddr,
+ struct pnp_dev *idev, int card_number);
+static int corkscrew_open(struct net_device *dev);
+static void corkscrew_timer(unsigned long arg);
+static int corkscrew_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int corkscrew_rx(struct net_device *dev);
+static void corkscrew_timeout(struct net_device *dev);
+static int boomerang_rx(struct net_device *dev);
+static irqreturn_t corkscrew_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs);
+static int corkscrew_close(struct net_device *dev);
+static void update_stats(int addr, struct net_device *dev);
+static struct net_device_stats *corkscrew_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static struct ethtool_ops netdev_ethtool_ops;
+
+
+/*
+ Unfortunately maximizing the shared code between the integrated and
+ module version of the driver results in a complicated set of initialization
+ procedures.
+ init_module() -- modules / tc59x_init() -- built-in
+ The wrappers for corkscrew_scan()
+ corkscrew_scan() The common routine that scans for PCI and EISA cards
+ corkscrew_found_device() Allocate a device structure when we find a card.
+ Different versions exist for modules and built-in.
+ corkscrew_probe1() Fill in the device structure -- this is separated
+ so that the modules code can put it in dev->init.
+*/
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Note: this is the only limit on the number of cards supported!! */
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1, };
+
+#ifdef MODULE
+static int debug = -1;
+
+module_param(debug, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param(rx_copybreak, int, 0);
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(debug, "3c515 debug level (0-6)");
+MODULE_PARM_DESC(options, "3c515: Bits 0-2: media type, bit 3: full duplex, bit 4: bus mastering");
+MODULE_PARM_DESC(rx_copybreak, "3c515 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt");
+
+/* A list of all installed Vortex devices, for removing the driver module. */
+/* we will need locking (and refcounting) if we ever use it for more */
+static LIST_HEAD(root_corkscrew_dev);
+
+int init_module(void)
+{
+ int found = 0;
+ if (debug >= 0)
+ corkscrew_debug = debug;
+ if (corkscrew_debug)
+ printk(version);
+ while (corkscrew_scan(-1))
+ found++;
+ return found ? 0 : -ENODEV;
+}
+
+#else
+struct net_device *tc515_probe(int unit)
+{
+ struct net_device *dev = corkscrew_scan(unit);
+ static int printed;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (corkscrew_debug > 0 && !printed) {
+ printed = 1;
+ printk(version);
+ }
+
+ return dev;
+}
+#endif /* not MODULE */
+
+static int check_device(unsigned ioaddr)
+{
+ int timer;
+
+ if (!request_region(ioaddr, CORKSCREW_TOTAL_SIZE, "3c515"))
+ return 0;
+ /* Check the resource configuration for a matching ioaddr. */
+ if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0)) {
+ release_region(ioaddr, CORKSCREW_TOTAL_SIZE);
+ return 0;
+ }
+ /* Verify by reading the device ID from the EEPROM. */
+ outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ if (inw(ioaddr + Wn0EepromData) != 0x6d50) {
+ release_region(ioaddr, CORKSCREW_TOTAL_SIZE);
+ return 0;
+ }
+ return 1;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ list_del_init(&vp->list);
+ if (dev->dma)
+ free_dma(dev->dma);
+ outw(TotalReset, dev->base_addr + EL3_CMD);
+ release_region(dev->base_addr, CORKSCREW_TOTAL_SIZE);
+ if (vp->dev)
+ pnp_device_detach(to_pnp_dev(vp->dev));
+}
+
+static struct net_device *corkscrew_scan(int unit)
+{
+ struct net_device *dev;
+ static int cards_found = 0;
+ static int ioaddr;
+ int err;
+#ifdef __ISAPNP__
+ short i;
+ static int pnp_cards;
+#endif
+
+ dev = alloc_etherdev(sizeof(struct corkscrew_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ SET_MODULE_OWNER(dev);
+
+#ifdef __ISAPNP__
+ if(nopnp == 1)
+ goto no_pnp;
+ for(i=0; corkscrew_isapnp_adapters[i].vendor != 0; i++) {
+ struct pnp_dev *idev = NULL;
+ int irq;
+ while((idev = pnp_find_dev(NULL,
+ corkscrew_isapnp_adapters[i].vendor,
+ corkscrew_isapnp_adapters[i].function,
+ idev))) {
+
+ if (pnp_device_attach(idev) < 0)
+ continue;
+ if (pnp_activate_dev(idev) < 0) {
+ printk("pnp activate failed (out of resources?)\n");
+ pnp_device_detach(idev);
+ continue;
+ }
+ if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ ioaddr = pnp_port_start(idev, 0);
+ irq = pnp_irq(idev, 0);
+ if (!check_device(ioaddr)) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ if(corkscrew_debug)
+ printk ("ISAPNP reports %s at i/o 0x%x, irq %d\n",
+ (char*) corkscrew_isapnp_adapters[i].driver_data, ioaddr, irq);
+ printk(KERN_INFO "3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
+ inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
+ /* irq = inw(ioaddr + 0x2002) & 15; */ /* Use the irq from isapnp */
+ corkscrew_setup(dev, ioaddr, idev, cards_found++);
+ SET_NETDEV_DEV(dev, &idev->dev);
+ pnp_cards++;
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ cleanup_card(dev);
+ }
+ }
+no_pnp:
+#endif /* __ISAPNP__ */
+
+ /* Check all locations on the ISA bus -- evil! */
+ for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20) {
+ if (!check_device(ioaddr))
+ continue;
+
+ printk(KERN_INFO "3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
+ inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
+ corkscrew_setup(dev, ioaddr, NULL, cards_found++);
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ return NULL;
+}
+
+static void corkscrew_setup(struct net_device *dev, int ioaddr,
+ struct pnp_dev *idev, int card_number)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int i;
+ int irq;
+
+ if (idev) {
+ irq = pnp_irq(idev, 0);
+ vp->dev = &idev->dev;
+ } else {
+ irq = inw(ioaddr + 0x2002) & 15;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->dma = inw(ioaddr + 0x2000) & 7;
+ vp->product_name = "3c515";
+ vp->options = dev->mem_start;
+ vp->our_dev = dev;
+
+ if (!vp->options) {
+ if (card_number >= MAX_UNITS)
+ vp->options = -1;
+ else
+ vp->options = options[card_number];
+ }
+
+ if (vp->options >= 0) {
+ vp->media_override = vp->options & 7;
+ if (vp->media_override == 2)
+ vp->media_override = 0;
+ vp->full_duplex = (vp->options & 8) ? 1 : 0;
+ vp->bus_master = (vp->options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+#ifdef MODULE
+ list_add(&vp->list, &root_corkscrew_dev);
+#endif
+
+ printk(KERN_INFO "%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr);
+
+ spin_lock_init(&vp->lock);
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ for (i = 0; i < 0x18; i++) {
+ short *phys_addr = (short *) dev->dev_addr;
+ int timer;
+ outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ eeprom[i] = inw(ioaddr + Wn0EepromData);
+ checksum ^= eeprom[i];
+ if (i < 3)
+ phys_addr[i] = htons(eeprom[i]);
+ }
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00)
+ printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ if (eeprom[16] == 0x11c7) { /* Corkscrew */
+ if (request_dma(dev->dma, "3c515")) {
+ printk(", DMA %d allocation failed", dev->dma);
+ dev->dma = 0;
+ } else
+ printk(", DMA %d", dev->dma);
+ }
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15))
+ printk(KERN_WARNING " *** Warning: this IRQ is unlikely to work! ***\n");
+
+ {
+ char *ram_split[] = { "5:3", "3:1", "1:1", "3:5" };
+ union wn3_config config;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ config.i = inl(ioaddr + Wn3_Config);
+ if (corkscrew_debug > 1)
+ printk(KERN_INFO " Internal config register is %4.4x, transceivers %#x.\n",
+ config.i, inw(ioaddr + Wn3_Options));
+ printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ 8 << config.u.ram_size,
+ config.u.ram_width ? "word" : "byte",
+ ram_split[config.u.ram_split],
+ config.u.autoselect ? "autoselect/" : "",
+ media_tbl[config.u.xcvr].name);
+ dev->if_port = config.u.xcvr;
+ vp->default_media = config.u.xcvr;
+ vp->autoselect = config.u.autoselect;
+ }
+ if (vp->media_override != 7) {
+ printk(KERN_INFO " Media override to transceiver type %d (%s).\n",
+ vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ }
+
+ vp->capabilities = eeprom[16];
+ vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0;
+ /* Rx is broken at 10mbps, so we always disable it. */
+ /* vp->full_bus_master_rx = 0; */
+ vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0;
+
+ /* The 3c51x-specific entries in the device structure. */
+ dev->open = &corkscrew_open;
+ dev->hard_start_xmit = &corkscrew_start_xmit;
+ dev->tx_timeout = &corkscrew_timeout;
+ dev->watchdog_timeo = (400 * HZ) / 1000;
+ dev->stop = &corkscrew_close;
+ dev->get_stats = &corkscrew_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+}
+
+
+static int corkscrew_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct corkscrew_private *vp = netdev_priv(dev);
+ union wn3_config config;
+ int i;
+
+ /* Before initializing select the active media port. */
+ EL3WINDOW(3);
+ if (vp->full_duplex)
+ outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */
+ config.i = inl(ioaddr + Wn3_Config);
+
+ if (vp->media_override != 7) {
+ if (corkscrew_debug > 1)
+ printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = 4;
+ while (!(vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+
+ if (corkscrew_debug > 1)
+ printk("%s: Initial media type %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ init_timer(&vp->timer);
+ vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
+ vp->timer.data = (unsigned long) dev;
+ vp->timer.function = &corkscrew_timer; /* timer handler */
+ add_timer(&vp->timer);
+ } else
+ dev->if_port = vp->default_media;
+
+ config.u.xcvr = dev->if_port;
+ outl(config.i, ioaddr + Wn3_Config);
+
+ if (corkscrew_debug > 1) {
+ printk("%s: corkscrew_open() InternalConfig %8.8x.\n",
+ dev->name, config.i);
+ }
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Wait a few ticks for the RxReset command to complete. */
+ for (i = 20; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ /* Use the now-standard shared IRQ implementation. */
+ if (vp->capabilities == 0x11c7) {
+ /* Corkscrew: Cannot share ISA resources. */
+ if (dev->irq == 0
+ || dev->dma == 0
+ || request_irq(dev->irq, &corkscrew_interrupt, 0,
+ vp->product_name, dev)) return -EAGAIN;
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ } else if (request_irq(dev->irq, &corkscrew_interrupt, SA_SHIRQ,
+ vp->product_name, dev)) {
+ return -EAGAIN;
+ }
+
+ if (corkscrew_debug > 1) {
+ EL3WINDOW(4);
+ printk("%s: corkscrew_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Set the station address and mask in window 2 each time opened. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i += 2)
+ outw(0, ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms... */
+ outw(StartCoax, ioaddr + EL3_CMD);
+ EL3WINDOW(4);
+ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP | Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ if (corkscrew_debug > 2)
+ printk("%s: Filling in the Rx ring.\n",
+ dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ if (i < (RX_RING_SIZE - 1))
+ vp->rx_ring[i].next =
+ isa_virt_to_bus(&vp->rx_ring[i + 1]);
+ else
+ vp->rx_ring[i].next = 0;
+ vp->rx_ring[i].status = 0; /* Clear complete bit. */
+ vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000;
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = isa_virt_to_bus(skb->tail);
+ }
+ vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
+ outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ vp->cur_tx = vp->dirty_tx = 0;
+ outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+ /* Clear the Tx ring. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = NULL;
+ outl(0, ioaddr + DownListPtr);
+ }
+ /* Set receiver mode: presumably accept b-case and phys addr only. */
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ netif_start_queue(dev);
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | AdapterFailure | IntReq | StatsFull |
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0), ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete,
+ ioaddr + EL3_CMD);
+
+ return 0;
+}
+
+static void corkscrew_timer(unsigned long data)
+{
+#ifdef AUTOMEDIA
+ struct net_device *dev = (struct net_device *) data;
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ int ok = 0;
+
+ if (corkscrew_debug > 1)
+ printk("%s: Media selection timer tick happened, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ spin_lock_irqsave(&vp->lock, flags);
+
+ {
+ int old_window = inw(ioaddr + EL3_CMD) >> 13;
+ int media_status;
+ EL3WINDOW(4);
+ media_status = inw(ioaddr + Wn4_Media);
+ switch (dev->if_port) {
+ case 0:
+ case 4:
+ case 5: /* 10baseT, 100baseTX, 100baseFX */
+ if (media_status & Media_LnkBeat) {
+ ok = 1;
+ if (corkscrew_debug > 1)
+ printk("%s: Media %s has link beat, %x.\n",
+ dev->name,
+ media_tbl[dev->if_port].name,
+ media_status);
+ } else if (corkscrew_debug > 1)
+ printk("%s: Media %s is has no link beat, %x.\n",
+ dev->name,
+ media_tbl[dev->if_port].name,
+ media_status);
+
+ break;
+ default: /* Other media types handled by Tx timeouts. */
+ if (corkscrew_debug > 1)
+ printk("%s: Media %s is has no indication, %x.\n",
+ dev->name,
+ media_tbl[dev->if_port].name,
+ media_status);
+ ok = 1;
+ }
+ if (!ok) {
+ union wn3_config config;
+
+ do {
+ dev->if_port =
+ media_tbl[dev->if_port].next;
+ }
+ while (!(vp->available_media & media_tbl[dev->if_port].mask));
+
+ if (dev->if_port == 8) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (corkscrew_debug > 1)
+ printk("%s: Media selection failing, using default %s port.\n",
+ dev->name,
+ media_tbl[dev->if_port].name);
+ } else {
+ if (corkscrew_debug > 1)
+ printk("%s: Media selection failed, now trying %s port.\n",
+ dev->name,
+ media_tbl[dev->if_port].name);
+ vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
+ add_timer(&vp->timer);
+ }
+ outw((media_status & ~(Media_10TP | Media_SQE)) |
+ media_tbl[dev->if_port].media_bits,
+ ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+ config.i = inl(ioaddr + Wn3_Config);
+ config.u.xcvr = dev->if_port;
+ outl(config.i, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == 3 ? StartCoax : StopCoax,
+ ioaddr + EL3_CMD);
+ }
+ EL3WINDOW(old_window);
+ }
+
+ spin_unlock_irqrestore(&vp->lock, flags);
+ if (corkscrew_debug > 1)
+ printk("%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+#endif /* AUTOMEDIA */
+ return;
+}
+
+static void corkscrew_timeout(struct net_device *dev)
+{
+ int i;
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING
+ "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, inb(ioaddr + TxStatus),
+ inw(ioaddr + EL3_STATUS));
+ /* Slight code bloat to be user friendly. */
+ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
+ printk(KERN_WARNING
+ "%s: Transmitter encountered 16 collisions -- network"
+ " network cable problem?\n", dev->name);
+#ifndef final_version
+ printk(" Flags; bus-master %d, full %d; dirty %d current %d.\n",
+ vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx,
+ vp->cur_tx);
+ printk(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr),
+ &vp->tx_ring[0]);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(" %d: %p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+ vp->tx_ring[i].length, vp->tx_ring[i].status);
+ }
+#endif
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+ vp->stats.tx_errors++;
+ vp->stats.tx_dropped++;
+ netif_wake_queue(dev);
+}
+
+static int corkscrew_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Block a timer-based transmit from overlapping. */
+
+ netif_stop_queue(dev);
+
+ if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */
+ /* Calculate the next Tx descriptor entry. */
+ int entry = vp->cur_tx % TX_RING_SIZE;
+ struct boom_tx_desc *prev_entry;
+ unsigned long flags, i;
+
+ if (vp->tx_full) /* No room to transmit with */
+ return 1;
+ if (vp->cur_tx != 0)
+ prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
+ else
+ prev_entry = NULL;
+ if (corkscrew_debug > 3)
+ printk("%s: Trying to send a packet, Tx index %d.\n",
+ dev->name, vp->cur_tx);
+ /* vp->tx_full = 1; */
+ vp->tx_skbuff[entry] = skb;
+ vp->tx_ring[entry].next = 0;
+ vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data);
+ vp->tx_ring[entry].length = skb->len | 0x80000000;
+ vp->tx_ring[entry].status = skb->len | 0x80000000;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ outw(DownStall, ioaddr + EL3_CMD);
+ /* Wait for the stall to complete. */
+ for (i = 20; i >= 0; i--)
+ if ((inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
+ break;
+ if (prev_entry)
+ prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]);
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(isa_virt_to_bus(&vp->tx_ring[entry]),
+ ioaddr + DownListPtr);
+ queued_packet++;
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
+ vp->tx_full = 1;
+ else { /* Clear previous interrupt enable. */
+ if (prev_entry)
+ prev_entry->status &= ~0x80000000;
+ netif_wake_queue(dev);
+ }
+ dev->trans_start = jiffies;
+ return 0;
+ }
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+ vp->stats.tx_bytes += skb->len;
+#ifdef VORTEX_BUS_MASTER
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ /* queue will be woken at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb(skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_wake_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536 >> 2),
+ ioaddr + EL3_CMD);
+ }
+#else
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb(skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_wake_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
+#endif /* bus master */
+
+ dev->trans_start = jiffies;
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (corkscrew_debug > 2)
+ printk("%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04)
+ vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38)
+ vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 20; j >= 0; j--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+
+static irqreturn_t corkscrew_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ /* Use the now-standard shared IRQ implementation. */
+ struct net_device *dev = dev_id;
+ struct corkscrew_private *lp = netdev_priv(dev);
+ int ioaddr, status;
+ int latency;
+ int i = max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+
+ spin_lock(&lp->lock);
+
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (corkscrew_debug > 4)
+ printk("%s: interrupt, status %4.4x, timer %d.\n",
+ dev->name, status, latency);
+ if ((status & 0xE000) != 0xE000) {
+ static int donedidthis;
+ /* Some interrupt controllers store a bogus interrupt from boot-time.
+ Ignore a single early interrupt, but don't hang the machine for
+ other interrupt problems. */
+ if (donedidthis++ > 100) {
+ printk(KERN_ERR "%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
+ dev->name, status, netif_running(dev));
+ free_irq(dev->irq, dev);
+ dev->irq = -1;
+ }
+ }
+
+ do {
+ if (corkscrew_debug > 5)
+ printk("%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ corkscrew_rx(dev);
+
+ if (status & TxAvailable) {
+ if (corkscrew_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+ if (status & DownComplete) {
+ unsigned int dirty_tx = lp->dirty_tx;
+
+ while (lp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry]))
+ break; /* It still hasn't been processed. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ }
+ dirty_tx++;
+ }
+ lp->dirty_tx = dirty_tx;
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+#ifdef VORTEX_BUS_MASTER
+ if (status & DMADone) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */
+ netif_wake_queue(dev);
+ }
+#endif
+ if (status & UpComplete) {
+ boomerang_rx(dev);
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts at once. */
+ if (status & RxEarly) { /* Rx early is unused. */
+ corkscrew_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat;
+ if (corkscrew_debug > 4)
+ printk("%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* DEBUG HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) {
+ int win, reg;
+ printk("%s: Updating stats failed, disabling stats as an"
+ " interrupt source.\n", dev->name);
+ for (win = 0; win < 8; win++) {
+ EL3WINDOW(win);
+ printk("\n Vortex window %d:", win);
+ for (reg = 0; reg < 16; reg++)
+ printk(" %2.2x", inb(ioaddr + reg));
+ }
+ EL3WINDOW(7);
+ outw(SetIntrEnb | TxAvailable |
+ RxComplete | AdapterFailure |
+ UpComplete | DownComplete |
+ TxComplete, ioaddr + EL3_CMD);
+ DoneDidThat++;
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure,
+ ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ printk(KERN_ERR "%s: Too much work in interrupt, status %4.4x. "
+ "Disabling functions (%4.4x).\n", dev->name,
+ status, SetStatusEnb | ((~status) & 0x7FE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ spin_unlock(&lp->lock);
+
+ if (corkscrew_debug > 4)
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
+ return IRQ_HANDLED;
+}
+
+static int corkscrew_rx(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (corkscrew_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (corkscrew_debug > 2)
+ printk(" Rx error: status %2.2x.\n",
+ rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01)
+ vp->stats.rx_over_errors++;
+ if (rx_error & 0x02)
+ vp->stats.rx_length_errors++;
+ if (rx_error & 0x04)
+ vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08)
+ vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10)
+ vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5 + 2);
+ if (corkscrew_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO,
+ skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ vp->stats.rx_bytes += pkt_len;
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (corkscrew_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ vp->stats.rx_dropped++;
+ /* Wait a limited time to skip this packet. */
+ for (i = 200; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ return 0;
+}
+
+static int boomerang_rx(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ int ioaddr = dev->base_addr;
+ int rx_status;
+
+ if (corkscrew_debug > 5)
+ printk(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
+ while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) {
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (corkscrew_debug > 2)
+ printk(" Rx error: status %2.2x.\n",
+ rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01)
+ vp->stats.rx_over_errors++;
+ if (rx_error & 0x02)
+ vp->stats.rx_length_errors++;
+ if (rx_error & 0x04)
+ vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08)
+ vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10)
+ vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ vp->stats.rx_bytes += pkt_len;
+ if (corkscrew_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 4)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ isa_bus_to_virt(vp->rx_ring[entry].
+ addr), pkt_len);
+ rx_copy++;
+ } else {
+ void *temp;
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+ /* Remove this checking code for final release. */
+ if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp)
+ printk("%s: Warning -- the skbuff addresses do not match"
+ " in boomerang_rx: %p vs. %p / %p.\n",
+ dev->name,
+ isa_bus_to_virt(vp->
+ rx_ring[entry].
+ addr), skb->head,
+ temp);
+ rx_nocopy++;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[entry].addr = isa_virt_to_bus(skb->tail);
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ }
+ return 0;
+}
+
+static int corkscrew_close(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int i;
+
+ netif_stop_queue(dev);
+
+ if (corkscrew_debug > 1) {
+ printk("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS),
+ inb(ioaddr + TxStatus));
+ printk("%s: corkscrew close stats: rx_nocopy %d rx_copy %d"
+ " tx_queued %d.\n", dev->name, rx_nocopy, rx_copy,
+ queued_packet);
+ }
+
+ del_timer(&vp->timer);
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+ free_irq(dev->irq, dev);
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ outl(0, ioaddr + UpListPtr);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ outl(0, ioaddr + DownListPtr);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (vp->tx_skbuff[i]) {
+ dev_kfree_skb(vp->tx_skbuff[i]);
+ vp->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&vp->lock, flags);
+ update_stats(dev->base_addr, dev);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ }
+ return &vp->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(int ioaddr, struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ vp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ vp->stats.collisions += inb(ioaddr + 3);
+ vp->stats.tx_window_errors += inb(ioaddr + 4);
+ vp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ vp->stats.tx_packets += inb(ioaddr + 6);
+ vp->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Must read to clear */
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* We change back to window 7 (not 1) with the Vortex. */
+ EL3WINDOW(7);
+ return;
+}
+
+/* This new version of set_rx_mode() supports v1.4 kernels.
+ The Vortex chip has no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. At least
+ the chip has a very clean way to set the mode, unlike many others. */
+static void set_rx_mode(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ short new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (corkscrew_debug > 3)
+ printk("%s: Setting promiscuous mode.\n",
+ dev->name);
+ new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
+ } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ outw(new_mode, ioaddr + EL3_CMD);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return corkscrew_debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ corkscrew_debug = level;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+
+#ifdef MODULE
+void cleanup_module(void)
+{
+ while (!list_empty(&root_corkscrew_dev)) {
+ struct net_device *dev;
+ struct corkscrew_private *vp;
+
+ vp = list_entry(root_corkscrew_dev.next,
+ struct corkscrew_private, list);
+ dev = vp->our_dev;
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c515.c"
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
new file mode 100644
index 000000000000..8f6b2fa13e28
--- /dev/null
+++ b/drivers/net/3c523.c
@@ -0,0 +1,1323 @@
+/*
+ net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip)
+
+
+ This is an extension to the Linux operating system, and is covered by the
+ same GNU General Public License that covers that work.
+
+ Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
+
+ This is basically Michael Hipp's ni52 driver, with a new probing
+ algorithm and some minor changes to the 82586 CA and reset routines.
+ Thanks a lot Michael for a really clean i82586 implementation! Unless
+ otherwise documented in ni52.c, any bugs are mine.
+
+ Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in
+ any way. The ni52 is a lot easier to modify.
+
+ sources:
+ ni52.c
+
+ Crynwr packet driver collection was a great reference for my first
+ attempt at this sucker. The 3c507 driver also helped, until I noticed
+ that ni52.c was a lot nicer.
+
+ EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference
+ Manual, courtesy of 3Com CardFacts, documents the 3c523-specific
+ stuff. Information on CardFacts is found in the Ethernet HOWTO.
+ Also see <a href="http://www.3com.com/">
+
+ Microprocessor Communications Support Chips, T.J. Byers, ISBN
+ 0-444-01224-9, has a section on the i82586. It tells you just enough
+ to know that you really don't want to learn how to program the chip.
+
+ The original device probe code was stolen from ps2esdi.c
+
+ Known Problems:
+ Since most of the code was stolen from ni52.c, you'll run across the
+ same bugs in the 0.62 version of ni52.c, plus maybe a few because of
+ the 3c523 idiosynchacies. The 3c523 has 16K of RAM though, so there
+ shouldn't be the overrun problem that the 8K ni52 has.
+
+ This driver is for a 16K adapter. It should work fine on the 64K
+ adapters, but it will only use one of the 4 banks of RAM. Modifying
+ this for the 64K version would require a lot of heinous bank
+ switching, which I'm sure not interested in doing. If you try to
+ implement a bank switching version, you'll basically have to remember
+ what bank is enabled and do a switch everytime you access a memory
+ location that's not current. You'll also have to remap pointers on
+ the driver side, because it only knows about 16K of the memory.
+ Anyone desperate or masochistic enough to try?
+
+ It seems to be stable now when multiple transmit buffers are used. I
+ can't see any performance difference, but then I'm working on a 386SX.
+
+ Multicast doesn't work. It doesn't even pretend to work. Don't use
+ it. Don't compile your kernel with multicast support. I don't know
+ why.
+
+ Features:
+ This driver is useable as a loadable module. If you try to specify an
+ IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will
+ search the MCA slots until it finds a 3c523 with the specified
+ parameters.
+
+ This driver does support multiple ethernet cards when used as a module
+ (up to MAX_3C523_CARDS, the default being 4)
+
+ This has been tested with both BNC and TP versions, internal and
+ external transceivers. Haven't tested with the 64K version (that I
+ know of).
+
+ History:
+ Jan 1st, 1996
+ first public release
+ Feb 4th, 1996
+ update to 1.3.59, incorporated multicast diffs from ni52.c
+ Feb 15th, 1996
+ added shared irq support
+ Apr 1999
+ added support for multiple cards when used as a module
+ added option to disable multicast as is causes problems
+ Ganesh Sittampalam <ganesh.sittampalam@magdalen.oxford.ac.uk>
+ Stuart Adamson <stuart.adamson@compsoc.net>
+ Nov 2001
+ added support for ethtool (jgarzik)
+
+ $Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $
+ */
+
+#define DRV_NAME "3c523"
+#define DRV_VERSION "17-Nov-2001"
+
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mca-legacy.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+#include "3c523.h"
+
+/*************************************************************************/
+#define DEBUG /* debug on */
+#define SYSBUSVAL 0 /* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */
+#undef ELMC_MULTICAST /* Disable multicast support as it is somewhat seriously broken at the moment */
+
+#define make32(ptr16) (p->memtop + (short) (ptr16) )
+#define make24(ptr32) ((char *) (ptr32) - p->base)
+#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
+
+/*************************************************************************/
+/*
+ Tables to which we can map values in the configuration registers.
+ */
+static int irq_table[] __initdata = {
+ 12, 7, 3, 9
+};
+
+static int csr_table[] __initdata = {
+ 0x300, 0x1300, 0x2300, 0x3300
+};
+
+static int shm_table[] __initdata = {
+ 0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000
+};
+
+/******************* how to calculate the buffers *****************************
+
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change this values: */
+
+#define RECV_BUFF_SIZE 1524 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
+
+#if (NUM_XMIT_BUFFS == 1)
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+#endif
+
+/**************************************************************************/
+
+#define DELAY(x) { mdelay(32 * x); }
+
+/* a much shorter delay: */
+#define DELAY_16(); { udelay(16) ; }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() { int i; \
+ for(i=0;i<1024;i++) { \
+ if(!p->scb->cmd) break; \
+ DELAY_16(); \
+ if(i == 1023) { \
+ printk(KERN_WARNING "%s:%d: scb_cmd timed out .. resetting i82586\n",\
+ dev->name,__LINE__); \
+ elmc_id_reset586(); } } }
+
+static irqreturn_t elmc_interrupt(int irq, void *dev_id, struct pt_regs *reg_ptr);
+static int elmc_open(struct net_device *dev);
+static int elmc_close(struct net_device *dev);
+static int elmc_send_packet(struct sk_buff *, struct net_device *);
+static struct net_device_stats *elmc_get_stats(struct net_device *dev);
+static void elmc_timeout(struct net_device *dev);
+#ifdef ELMC_MULTICAST
+static void set_multicast_list(struct net_device *dev);
+#endif
+static struct ethtool_ops netdev_ethtool_ops;
+
+/* helper-functions */
+static int init586(struct net_device *dev);
+static int check586(struct net_device *dev, unsigned long where, unsigned size);
+static void alloc586(struct net_device *dev);
+static void startrecv586(struct net_device *dev);
+static void *alloc_rfa(struct net_device *dev, void *ptr);
+static void elmc_rcv_int(struct net_device *dev);
+static void elmc_xmt_int(struct net_device *dev);
+static void elmc_rnr_int(struct net_device *dev);
+
+struct priv {
+ struct net_device_stats stats;
+ unsigned long base;
+ char *memtop;
+ unsigned long mapped_start; /* Start of ioremap */
+ volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct transmit_cmd_struct *xmit_cmds[2];
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point, num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count, xmit_last;
+ volatile int slot;
+};
+
+#define elmc_attn586() {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);}
+#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);}
+
+/* with interrupts disabled - this will clear the interrupt bit in the
+ 3c523 control register, and won't put it back. This effectively
+ disables interrupts on the card. */
+#define elmc_id_attn586() {elmc_do_attn586(dev->base_addr,0);}
+#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);}
+
+/*************************************************************************/
+/*
+ Do a Channel Attention on the 3c523. This is extremely board dependent.
+ */
+static void elmc_do_attn586(int ioaddr, int ints)
+{
+ /* the 3c523 requires a minimum of 500 ns. The delays here might be
+ a little too large, and hence they may cut the performance of the
+ card slightly. If someone who knows a little more about Linux
+ timing would care to play with these, I'd appreciate it. */
+
+ /* this bit masking stuff is crap. I'd rather have separate
+ registers with strobe triggers for each of these functions. <sigh>
+ Ya take what ya got. */
+
+ outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL);
+ DELAY_16(); /* > 500 ns */
+ outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL);
+}
+
+/*************************************************************************/
+/*
+ Reset the 82586 on the 3c523. Also very board dependent.
+ */
+static void elmc_do_reset586(int ioaddr, int ints)
+{
+ /* toggle the RST bit low then high */
+ outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL);
+ DELAY_16(); /* > 500 ns */
+ outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL);
+
+ elmc_do_attn586(ioaddr, ints);
+}
+
+/**********************************************
+ * close device
+ */
+
+static int elmc_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ elmc_id_reset586(); /* the hard way to stop the receiver */
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+
+static int elmc_open(struct net_device *dev)
+{
+ int ret;
+
+ elmc_id_attn586(); /* disable interrupts */
+
+ ret = request_irq(dev->irq, &elmc_interrupt, SA_SHIRQ | SA_SAMPLE_RANDOM,
+ dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: couldn't get irq %d\n", dev->name, dev->irq);
+ elmc_id_reset586();
+ return ret;
+ }
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ netif_start_queue(dev);
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+
+static int __init check586(struct net_device *dev, unsigned long where, unsigned size)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ char *iscp_addrs[2];
+ int i = 0;
+
+ p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000;
+ p->memtop = isa_bus_to_virt((unsigned long)where) + size;
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *) p->scp, 0, sizeof(struct scp_struct));
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+
+ iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
+ iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
+
+ for (i = 0; i < 2; i++) {
+ p->iscp = (struct iscp_struct *) iscp_addrs[i];
+ memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ elmc_id_reset586();
+
+ /* reset586 does an implicit CA */
+
+ /* apparently, you sometimes have to kick the 82586 twice... */
+ elmc_id_attn586();
+ DELAY(1);
+
+ if (p->iscp->busy) { /* i82586 clears 'busy' after successful init */
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by elmc_probe and open586.
+ */
+
+void alloc586(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ elmc_id_reset586();
+ DELAY(2);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
+ p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
+ memset((char *) p->scp, 0, sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+
+ p->iscp->busy = 1;
+ elmc_id_reset586();
+ elmc_id_attn586();
+
+ DELAY(2);
+
+ if (p->iscp->busy) {
+ printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
+ }
+ memset((char *) p->scb, 0, sizeof(struct scb_struct));
+}
+
+/*****************************************************************/
+
+static int elmc_getinfo(char *buf, int slot, void *d)
+{
+ int len = 0;
+ struct net_device *dev = (struct net_device *) d;
+ int i;
+
+ if (dev == NULL)
+ return len;
+
+ len += sprintf(buf + len, "Revision: 0x%x\n",
+ inb(dev->base_addr + ELMC_REVISION) & 0xf);
+ len += sprintf(buf + len, "IRQ: %d\n", dev->irq);
+ len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr,
+ dev->base_addr + ELMC_IO_EXTENT);
+ len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
+ dev->mem_end - 1);
+ len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ?
+ "External" : "Internal");
+ len += sprintf(buf + len, "Device: %s\n", dev->name);
+ len += sprintf(buf + len, "Hardware Address:");
+ for (i = 0; i < 6; i++) {
+ len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
+ }
+ buf[len++] = '\n';
+ buf[len] = 0;
+
+ return len;
+} /* elmc_getinfo() */
+
+/*****************************************************************/
+
+static int __init do_elmc_probe(struct net_device *dev)
+{
+ static int slot;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+ u_char status = 0;
+ u_char revision = 0;
+ int i = 0;
+ unsigned int size = 0;
+ int retval;
+ struct priv *pr = dev->priv;
+
+ SET_MODULE_OWNER(dev);
+ if (MCA_bus == 0) {
+ return -ENODEV;
+ }
+ /* search through the slots for the 3c523. */
+ slot = mca_find_adapter(ELMC_MCA_ID, 0);
+ while (slot != -1) {
+ status = mca_read_stored_pos(slot, 2);
+
+ dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6];
+ dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1];
+
+ /*
+ If we're trying to match a specified irq or IO address,
+ we'll reject a match unless it's what we're looking for.
+ Also reject it if the card is already in use.
+ */
+
+ if ((irq && irq != dev->irq) ||
+ (base_addr && base_addr != dev->base_addr)) {
+ slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
+ continue;
+ }
+ if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) {
+ slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
+ continue;
+ }
+
+ /* found what we're looking for... */
+ break;
+ }
+
+ /* we didn't find any 3c523 in the slots we checked for */
+ if (slot == MCA_NOTFOUND)
+ return ((base_addr || irq) ? -ENXIO : -ENODEV);
+
+ mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
+ mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
+
+ /* if we get this far, adapter has been found - carry on */
+ printk(KERN_INFO "%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1);
+
+ /* Now we extract configuration info from the card.
+ The 3c523 provides information in two of the POS registers, but
+ the second one is only needed if we want to tell the card what IRQ
+ to use. I suspect that whoever sets the thing up initially would
+ prefer we don't screw with those things.
+
+ Note that we read the status info when we found the card...
+
+ See 3c523.h for more details.
+ */
+
+ /* revision is stored in the first 4 bits of the revision register */
+ revision = inb(dev->base_addr + ELMC_REVISION) & 0xf;
+
+ /* according to docs, we read the interrupt and write it back to
+ the IRQ select register, since the POST might not configure the IRQ
+ properly. */
+ switch (dev->irq) {
+ case 3:
+ mca_write_pos(slot, 3, 0x04);
+ break;
+ case 7:
+ mca_write_pos(slot, 3, 0x02);
+ break;
+ case 9:
+ mca_write_pos(slot, 3, 0x08);
+ break;
+ case 12:
+ mca_write_pos(slot, 3, 0x01);
+ break;
+ }
+
+ memset(pr, 0, sizeof(struct priv));
+ pr->slot = slot;
+
+ printk(KERN_INFO "%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
+ dev->base_addr);
+
+ /* Determine if we're using the on-board transceiver (i.e. coax) or
+ an external one. The information is pretty much useless, but I
+ guess it's worth brownie points. */
+ dev->if_port = (status & ELMC_STATUS_DISABLE_THIN);
+
+ /* The 3c523 has a 24K chunk of memory. The first 16K is the
+ shared memory, while the last 8K is for the EtherStart BIOS ROM.
+ Which we don't care much about here. We'll just tell Linux that
+ we're using 16K. MCA won't permit address space conflicts caused
+ by not mapping the other 8K. */
+ dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3];
+
+ /* We're using MCA, so it's a given that the information about memory
+ size is correct. The Crynwr drivers do something like this. */
+
+ elmc_id_reset586(); /* seems like a good idea before checking it... */
+
+ size = 0x4000; /* check for 16K mem */
+ if (!check586(dev, dev->mem_start, size)) {
+ printk(KERN_ERR "%s: memprobe, Can't find memory at 0x%lx!\n", dev->name,
+ dev->mem_start);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
+
+ pr->memtop = isa_bus_to_virt(dev->mem_start) + size;
+ pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
+ alloc586(dev);
+
+ elmc_id_reset586(); /* make sure it doesn't generate spurious ints */
+
+ /* set number of receive-buffs according to memsize */
+ pr->num_recv_buffs = NUM_RECV_BUFFS_16;
+
+ /* dump all the assorted information */
+ printk(KERN_INFO "%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name,
+ dev->irq, dev->if_port ? "ex" : "in",
+ dev->mem_start, dev->mem_end - 1);
+
+ /* The hardware address for the 3c523 is stored in the first six
+ bytes of the IO address. */
+ printk(KERN_INFO "%s: hardware address ", dev->name);
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = inb(dev->base_addr + i);
+ printk(" %02x", dev->dev_addr[i]);
+ }
+ printk("\n");
+
+ dev->open = &elmc_open;
+ dev->stop = &elmc_close;
+ dev->get_stats = &elmc_get_stats;
+ dev->hard_start_xmit = &elmc_send_packet;
+ dev->tx_timeout = &elmc_timeout;
+ dev->watchdog_timeo = HZ;
+#ifdef ELMC_MULTICAST
+ dev->set_multicast_list = &set_multicast_list;
+#else
+ dev->set_multicast_list = NULL;
+#endif
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+ /* note that we haven't actually requested the IRQ from the kernel.
+ That gets done in elmc_open(). I'm not sure that's such a good idea,
+ but it works, so I'll go with it. */
+
+#ifndef ELMC_MULTICAST
+ dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */
+#endif
+
+ return 0;
+err_out:
+ mca_set_adapter_procfn(slot, NULL, NULL);
+ release_region(dev->base_addr, ELMC_IO_EXTENT);
+ return retval;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ mca_set_adapter_procfn(((struct priv *) (dev->priv))->slot, NULL, NULL);
+ release_region(dev->base_addr, ELMC_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init elmc_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct priv));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_elmc_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+/**********************************************
+ * init the chip (elmc-interrupt should be disabled?!)
+ * needs a correct 'allocated' memory
+ */
+
+static int init586(struct net_device *dev)
+{
+ void *ptr;
+ unsigned long s;
+ int i, result = 0;
+ struct priv *p = (struct priv *) dev->priv;
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct dev_mc_list *dmi = dev->mc_list;
+ int num_addrs = dev->mc_count;
+
+ ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
+ cfg_cmd->promisc = 1;
+ dev->flags |= IFF_PROMISC;
+ }
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ elmc_id_attn586();
+
+ s = jiffies; /* warning: only active with interrupts on !! */
+ while (!(cfg_cmd->cmd_status & STAT_COMPL)) {
+ if (jiffies - s > 30*HZ/100)
+ break;
+ }
+
+ if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) {
+ printk(KERN_WARNING "%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
+ return 1;
+ }
+ /*
+ * individual address setup
+ */
+ ias_cmd = (struct iasetup_cmd_struct *) ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ elmc_id_attn586();
+
+ s = jiffies;
+ while (!(ias_cmd->cmd_status & STAT_COMPL)) {
+ if (jiffies - s > 30*HZ/100)
+ break;
+ }
+
+ if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) {
+ printk(KERN_WARNING "%s (elmc): individual address setup command failed: %04x\n", dev->name, ias_cmd->cmd_status);
+ return 1;
+ }
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+ tdr_cmd = (struct tdr_cmd_struct *) ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ elmc_attn586();
+
+ s = jiffies;
+ while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
+ if (jiffies - s > 30*HZ/100) {
+ printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
+ result = 1;
+ break;
+ }
+ }
+
+ if (!result) {
+ DELAY(2); /* wait for result */
+ result = tdr_cmd->status;
+
+ p->scb->cmd = p->scb->status & STAT_MASK;
+ elmc_id_attn586(); /* ack the interrupts */
+
+ if (result & TDR_LNK_OK) {
+ /* empty */
+ } else if (result & TDR_XCVR_PRB) {
+ printk(KERN_WARNING "%s: TDR: Transceiver problem!\n", dev->name);
+ } else if (result & TDR_ET_OPN) {
+ printk(KERN_WARNING "%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
+ } else if (result & TDR_ET_SRT) {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk(KERN_WARNING "%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
+ } else {
+ printk(KERN_WARNING "%s: TDR: Unknown status %04x\n", dev->name, result);
+ }
+ }
+ /*
+ * ack interrupts
+ */
+ p->scb->cmd = p->scb->status & STAT_MASK;
+ elmc_id_attn586();
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for (i = 0; i < 2; i++) {
+ p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+ p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+#else
+ for (i = 0; i < NUM_XMIT_BUFFS; i++) {
+ p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */
+
+ /*
+ * Multicast setup
+ */
+
+ if (dev->mc_count) {
+ /* I don't understand this: do we really need memory after the init? */
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if (len <= 0) {
+ printk(KERN_ERR "%s: Ooooops, no memory for MC-Setup!\n", dev->name);
+ } else {
+ if (len < num_addrs) {
+ num_addrs = len;
+ printk(KERN_WARNING "%s: Sorry, can only apply %d MC-Address(es).\n",
+ dev->name, num_addrs);
+ }
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = num_addrs * 6;
+ for (i = 0; i < num_addrs; i++) {
+ memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr, 6);
+ dmi = dmi->next;
+ }
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd = CUC_START;
+ elmc_id_attn586();
+ s = jiffies;
+ while (!(mc_cmd->cmd_status & STAT_COMPL)) {
+ if (jiffies - s > 30*HZ/100)
+ break;
+ }
+ if (!(mc_cmd->cmd_status & STAT_COMPL)) {
+ printk(KERN_WARNING "%s: Can't apply multicast-address-list.\n", dev->name);
+ }
+ }
+ }
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for (i = 0; i < NUM_XMIT_BUFFS; i++) {
+ p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if ((void *) ptr > (void *) p->iscp) {
+ printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", dev->name);
+ return 1;
+ }
+ memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct));
+ memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_status = STAT_COMPL;
+ p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter' (nop-loop)
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd = CUC_START;
+ elmc_id_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = 0xffff;
+ p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT;
+#endif
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for elmc_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct net_device *dev, void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs);
+ p->rfd_first = rfd;
+
+ for (i = 0; i < p->num_recv_buffs; i++) {
+ rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs);
+ }
+ rfd[p->num_recv_buffs - 1].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + p->num_recv_buffs);
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs);
+
+ for (i = 0; i < p->num_recv_buffs; i++) {
+ rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs));
+ rbd[i].size = RECV_BUFF_SIZE;
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + p->num_recv_buffs - 1;
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static irqreturn_t
+elmc_interrupt(int irq, void *dev_id, struct pt_regs *reg_ptr)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ unsigned short stat;
+ struct priv *p;
+
+ if (dev == NULL) {
+ printk(KERN_ERR "elmc-interrupt: irq %d for unknown device.\n", (int) -(((struct pt_regs *) reg_ptr)->orig_eax + 2));
+ return IRQ_NONE;
+ } else if (!netif_running(dev)) {
+ /* The 3c523 has this habit of generating interrupts during the
+ reset. I'm not sure if the ni52 has this same problem, but it's
+ really annoying if we haven't finished initializing it. I was
+ hoping all the elmc_id_* commands would disable this, but I
+ might have missed a few. */
+
+ elmc_id_attn586(); /* ack inter. and disable any more */
+ return IRQ_HANDLED;
+ } else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) {
+ /* wasn't this device */
+ return IRQ_NONE;
+ }
+ /* reading ELMC_CTRL also clears the INT bit. */
+
+ p = (struct priv *) dev->priv;
+
+ while ((stat = p->scb->status & STAT_MASK))
+ {
+ p->scb->cmd = stat;
+ elmc_attn586(); /* ack inter. */
+
+ if (stat & STAT_CX) {
+ /* command with I-bit set complete */
+ elmc_xmt_int(dev);
+ }
+ if (stat & STAT_FR) {
+ /* received a frame */
+ elmc_rcv_int(dev);
+ }
+#ifndef NO_NOPCOMMANDS
+ if (stat & STAT_CNA) {
+ /* CU went 'not ready' */
+ if (netif_running(dev)) {
+ printk(KERN_WARNING "%s: oops! CU has left active state. stat: %04x/%04x.\n", dev->name, (int) stat, (int) p->scb->status);
+ }
+ }
+#endif
+
+ if (stat & STAT_RNR) {
+ /* RU went 'not ready' */
+
+ if (p->scb->status & RU_SUSPEND) {
+ /* special case: RU_SUSPEND */
+
+ WAIT_4_SCB_CMD();
+ p->scb->cmd = RUC_RESUME;
+ elmc_attn586();
+ } else {
+ printk(KERN_WARNING "%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n", dev->name, (int) stat, (int) p->scb->status);
+ elmc_rnr_int(dev);
+ }
+ }
+ WAIT_4_SCB_CMD(); /* wait for ack. (elmc_xmt_int can be faster than ack!!) */
+ if (p->scb->cmd) { /* timed out? */
+ break;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void elmc_rcv_int(struct net_device *dev)
+{
+ int status;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = (struct priv *) dev->priv;
+
+ for (; (status = p->rfd_top->status) & STAT_COMPL;) {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if (status & STAT_OK) { /* frame received without error? */
+ if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte alignment */
+ skb_put(skb,totlen);
+ eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += totlen;
+ } else {
+ p->stats.rx_dropped++;
+ }
+ } else {
+ printk(KERN_WARNING "%s: received oversized frame.\n", dev->name);
+ p->stats.rx_dropped++;
+ }
+ } else { /* frame !(ok), only with 'save-bad-frames' */
+ printk(KERN_WARNING "%s: oops! rfd-error-status: %04x\n", dev->name, status);
+ p->stats.rx_errors++;
+ }
+ p->rfd_top->status = 0;
+ p->rfd_top->last = RFD_SUSP;
+ p->rfd_last->last = 0; /* delete RU_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ }
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void elmc_rnr_int(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd */
+ p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ elmc_attn586();
+ WAIT_4_SCB_CMD(); /* wait for accept cmd. */
+
+ alloc_rfa(dev, (char *) p->rfd_first);
+ startrecv586(dev); /* restart RU */
+
+ printk(KERN_WARNING "%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void elmc_xmt_int(struct net_device *dev)
+{
+ int status;
+ struct priv *p = (struct priv *) dev->priv;
+
+ status = p->xmit_cmds[p->xmit_last]->cmd_status;
+ if (!(status & STAT_COMPL)) {
+ printk(KERN_WARNING "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
+ }
+ if (status & STAT_OK) {
+ p->stats.tx_packets++;
+ p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ } else {
+ p->stats.tx_errors++;
+ if (status & TCMD_LATECOLL) {
+ printk(KERN_WARNING "%s: late collision detected.\n", dev->name);
+ p->stats.collisions++;
+ } else if (status & TCMD_NOCARRIER) {
+ p->stats.tx_carrier_errors++;
+ printk(KERN_WARNING "%s: no carrier detected.\n", dev->name);
+ } else if (status & TCMD_LOSTCTS) {
+ printk(KERN_WARNING "%s: loss of CTS detected.\n", dev->name);
+ } else if (status & TCMD_UNDERRUN) {
+ p->stats.tx_fifo_errors++;
+ printk(KERN_WARNING "%s: DMA underrun detected.\n", dev->name);
+ } else if (status & TCMD_MAXCOLL) {
+ printk(KERN_WARNING "%s: Max. collisions exceeded.\n", dev->name);
+ p->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS != 1)
+ if ((++p->xmit_last) == NUM_XMIT_BUFFS) {
+ p->xmit_last = 0;
+ }
+#endif
+
+ netif_wake_queue(dev);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd = RUC_START;
+ elmc_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */
+}
+
+/******************************************************
+ * timeout
+ */
+
+static void elmc_timeout(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ /* COMMAND-UNIT active? */
+ if (p->scb->status & CU_ACTIVE) {
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n", dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name, (int) p->xmit_cmds[0]->cmd_status, (int) p->nop_cmds[0]->cmd_status, (int) p->nop_cmds[1]->cmd_status, (int) p->nop_point);
+#endif
+ p->scb->cmd = CUC_ABORT;
+ elmc_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd = CUC_START;
+ elmc_attn586();
+ WAIT_4_SCB_CMD();
+ netif_wake_queue(dev);
+ } else {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %04x\n", dev->name, p->scb->status);
+ printk("%s: command-stats: %04x %04x\n", dev->name, p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status);
+#endif
+ elmc_close(dev);
+ elmc_open(dev);
+ }
+}
+
+/******************************************************
+ * send frame
+ */
+
+static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int len;
+ int i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = (struct priv *) dev->priv;
+
+ netif_stop_queue(dev);
+
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+ if (len != skb->len)
+ memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
+ memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len);
+
+#if (NUM_XMIT_BUFFS == 1)
+#ifdef NO_NOPCOMMANDS
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+ for (i = 0; i < 16; i++) {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd = CUC_START;
+ p->xmit_cmds[0]->cmd_status = 0;
+ elmc_attn586();
+ dev->trans_start = jiffies;
+ if (!i) {
+ dev_kfree_skb(skb);
+ }
+ WAIT_4_SCB_CMD();
+ if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */
+ break;
+ }
+ if (p->xmit_cmds[0]->cmd_status) {
+ break;
+ }
+ if (i == 15) {
+ printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
+ }
+ }
+#else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ dev->trans_start = jiffies;
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb);
+#endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
+ if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) {
+ next_nop = 0;
+ }
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ dev->trans_start = jiffies;
+ p->xmit_count = next_nop;
+ if (p->xmit_count != p->xmit_last)
+ netif_wake_queue(dev);
+ dev_kfree_skb(skb);
+#endif
+ return 0;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct net_device_stats *elmc_get_stats(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ unsigned short crc, aln, rsc, ovrn;
+
+ crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
+ p->scb->crc_errs -= crc;
+ aln = p->scb->aln_errs;
+ p->scb->aln_errs -= aln;
+ rsc = p->scb->rsc_errs;
+ p->scb->rsc_errs -= rsc;
+ ovrn = p->scb->ovrn_errs;
+ p->scb->ovrn_errs -= ovrn;
+
+ p->stats.rx_crc_errors += crc;
+ p->stats.rx_fifo_errors += ovrn;
+ p->stats.rx_frame_errors += aln;
+ p->stats.rx_dropped += rsc;
+
+ return &p->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+#ifdef ELMC_MULTICAST
+static void set_multicast_list(struct net_device *dev)
+{
+ if (!dev->start) {
+ /* without a running interface, promiscuous doesn't work */
+ return;
+ }
+ dev->start = 0;
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ dev->start = 1;
+}
+#endif
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+#ifdef MODULE
+
+/* Increase if needed ;) */
+#define MAX_3C523_CARDS 4
+
+static struct net_device *dev_elmc[MAX_3C523_CARDS];
+static int irq[MAX_3C523_CARDS];
+static int io[MAX_3C523_CARDS];
+module_param_array(irq, int, NULL, 0);
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
+
+int init_module(void)
+{
+ int this_dev,found = 0;
+
+ /* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */
+ for(this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
+ struct net_device *dev = alloc_etherdev(sizeof(struct priv));
+ if (!dev)
+ break;
+ dev->irq=irq[this_dev];
+ dev->base_addr=io[this_dev];
+ if (do_elmc_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_elmc[this_dev] = dev;
+ found++;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ if (io[this_dev]==0)
+ break;
+ printk(KERN_WARNING "3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]);
+ }
+
+ if(found==0) {
+ if(io[0]==0) printk(KERN_NOTICE "3c523.c: No 3c523 cards found\n");
+ return -ENXIO;
+ } else return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+ for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
+ struct net_device *dev = dev_elmc[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/3c523.h b/drivers/net/3c523.h
new file mode 100644
index 000000000000..7292f88b48e3
--- /dev/null
+++ b/drivers/net/3c523.h
@@ -0,0 +1,355 @@
+#ifndef _3c523_INCLUDE_
+#define _3c523_INCLUDE_
+/*
+ This is basically a hacked version of ni52.h, for the 3c523
+ Etherlink/MC.
+*/
+
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * Copyright 1995 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
+ *
+ * See 3c523.c for details.
+ *
+ * $Header: /home/chrisb/linux-1.2.13-3c523/drivers/net/RCS/3c523.h,v 1.6 1996/01/20 05:09:00 chrisb Exp chrisb $
+ */
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* hast to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned short status; /* status word */
+ unsigned short cmd; /* command word */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* alignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x0700 /* mask for CU command */
+#define CUC_NOP 0x0000 /* NOP-command */
+#define CUC_START 0x0100 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x0200 /* resume after suspend */
+#define CUC_SUSPEND 0x0300 /* Suspend CU */
+#define CUC_ABORT 0x0400 /* abort command operation immediately */
+
+#define ACK_MASK 0xf000 /* mask for ACK command */
+#define ACK_CX 0x8000 /* acknowledges STAT_CX */
+#define ACK_FR 0x4000 /* ack. STAT_FR */
+#define ACK_CNA 0x2000 /* ack. STAT_CNA */
+#define ACK_RNR 0x1000 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf000 /* mask for cause of interrupt */
+#define STAT_CX 0x8000 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x4000 /* RU finished receiving a frame */
+#define STAT_CNA 0x2000 /* CU left active state */
+#define STAT_RNR 0x1000 /* RU left ready state */
+
+#define CU_STATUS 0x700 /* CU status, 0=idle */
+#define CU_SUSPEND 0x100 /* CU is suspended */
+#define CU_ACTIVE 0x200 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned short status; /* status word */
+ unsigned short last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x8000 /* last: last rfd in the list */
+#define RFD_SUSP 0x4000 /* last: suspend RU after */
+#define RFD_ERRMASK 0x0fe1 /* status: errormask */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA */
+#define RFD_RNR 0x0200 /* status: receiver out of resources */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+/*************************************************************************/
+/*
+Verbatim from the Crynwyr stuff:
+
+ The 3c523 responds with adapter code 0x6042 at slot
+registers xxx0 and xxx1. The setup register is at xxx2 and
+contains the following bits:
+
+0: card enable
+2,1: csr address select
+ 00 = 0300
+ 01 = 1300
+ 10 = 2300
+ 11 = 3300
+4,3: shared memory address select
+ 00 = 0c0000
+ 01 = 0c8000
+ 10 = 0d0000
+ 11 = 0d8000
+5: set to disable on-board thinnet
+7,6: (read-only) shows selected irq
+ 00 = 12
+ 01 = 7
+ 10 = 3
+ 11 = 9
+
+The interrupt-select register is at xxx3 and uses one bit per irq.
+
+0: int 12
+1: int 7
+2: int 3
+3: int 9
+
+ Again, the documentation stresses that the setup register
+should never be written. The interrupt-select register may be
+written with the value corresponding to bits 7.6 in
+the setup register to insure corret setup.
+*/
+
+/* Offsets from the base I/O address. */
+#define ELMC_SA 0 /* first 6 bytes are IEEE network address */
+#define ELMC_CTRL 6 /* control & status register */
+#define ELMC_REVISION 7 /* revision register, first 4 bits only */
+#define ELMC_IO_EXTENT 8
+
+/* these are the bit selects for the port register 2 */
+#define ELMC_STATUS_ENABLED 0x01
+#define ELMC_STATUS_CSR_SELECT 0x06
+#define ELMC_STATUS_MEMORY_SELECT 0x18
+#define ELMC_STATUS_DISABLE_THIN 0x20
+#define ELMC_STATUS_IRQ_SELECT 0xc0
+
+/* this is the card id used in the detection code. You might recognize
+it from @6042.adf */
+#define ELMC_MCA_ID 0x6042
+
+/*
+ The following define the bits for the control & status register
+
+ The bank select registers can be used if more than 16K of memory is
+ on the card. For some stupid reason, bank 3 is the one for the
+ bottom 16K, and the card defaults to bank 0. So we have to set the
+ bank to 3 before the card will even think of operating. To get bank
+ 3, set BS0 and BS1 to high (of course...)
+*/
+#define ELMC_CTRL_BS0 0x01 /* RW bank select */
+#define ELMC_CTRL_BS1 0x02 /* RW bank select */
+#define ELMC_CTRL_INTE 0x04 /* RW interrupt enable, assert high */
+#define ELMC_CTRL_INT 0x08 /* R interrupt active, assert high */
+/*#define ELMC_CTRL_* 0x10*/ /* reserved */
+#define ELMC_CTRL_LBK 0x20 /* RW loopback enable, assert high */
+#define ELMC_CTRL_CA 0x40 /* RW channel attention, assert high */
+#define ELMC_CTRL_RST 0x80 /* RW 82586 reset, assert low */
+
+/* some handy compound bits */
+
+/* normal operation should have bank 3 and RST high, ints enabled */
+#define ELMC_NORMAL (ELMC_CTRL_INTE|ELMC_CTRL_RST|0x3)
+
+#endif /* _3c523_INCLUDE_ */
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
new file mode 100644
index 000000000000..6db3301e7965
--- /dev/null
+++ b/drivers/net/3c527.c
@@ -0,0 +1,1675 @@
+/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
+ *
+ * (c) Copyright 1998 Red Hat Software Inc
+ * Written by Alan Cox.
+ * Further debugging by Carl Drougge.
+ * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
+ * Heavily modified by Richard Procter <rnp@paradise.net.nz>
+ *
+ * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
+ * (for the MCA stuff) written by Wim Dumon.
+ *
+ * Thanks to 3Com for making this possible by providing me with the
+ * documentation.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#define DRV_NAME "3c527"
+#define DRV_VERSION "0.7-SMP"
+#define DRV_RELDATE "2003/09/21"
+
+static const char *version =
+DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
+
+/**
+ * DOC: Traps for the unwary
+ *
+ * The diagram (Figure 1-1) and the POS summary disagree with the
+ * "Interrupt Level" section in the manual.
+ *
+ * The manual contradicts itself when describing the minimum number
+ * buffers in the 'configure lists' command.
+ * My card accepts a buffer config of 4/4.
+ *
+ * Setting the SAV BP bit does not save bad packets, but
+ * only enables RX on-card stats collection.
+ *
+ * The documentation in places seems to miss things. In actual fact
+ * I've always eventually found everything is documented, it just
+ * requires careful study.
+ *
+ * DOC: Theory Of Operation
+ *
+ * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
+ * amount of on board intelligence that housekeeps a somewhat dumber
+ * Intel NIC. For performance we want to keep the transmit queue deep
+ * as the card can transmit packets while fetching others from main
+ * memory by bus master DMA. Transmission and reception are driven by
+ * circular buffer queues.
+ *
+ * The mailboxes can be used for controlling how the card traverses
+ * its buffer rings, but are used only for inital setup in this
+ * implementation. The exec mailbox allows a variety of commands to
+ * be executed. Each command must complete before the next is
+ * executed. Primarily we use the exec mailbox for controlling the
+ * multicast lists. We have to do a certain amount of interesting
+ * hoop jumping as the multicast list changes can occur in interrupt
+ * state when the card has an exec command pending. We defer such
+ * events until the command completion interrupt.
+ *
+ * A copy break scheme (taken from 3c59x.c) is employed whereby
+ * received frames exceeding a configurable length are passed
+ * directly to the higher networking layers without incuring a copy,
+ * in what amounts to a time/space trade-off.
+ *
+ * The card also keeps a large amount of statistical information
+ * on-board. In a perfect world, these could be used safely at no
+ * cost. However, lacking information to the contrary, processing
+ * them without races would involve so much extra complexity as to
+ * make it unworthwhile to do so. In the end, a hybrid SW/HW
+ * implementation was made necessary --- see mc32_update_stats().
+ *
+ * DOC: Notes
+ *
+ * It should be possible to use two or more cards, but at this stage
+ * only by loading two copies of the same module.
+ *
+ * The on-board 82586 NIC has trouble receiving multiple
+ * back-to-back frames and so is likely to drop packets from fast
+ * senders.
+**/
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/mca-legacy.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/ethtool.h>
+#include <linux/completion.h>
+#include <linux/bitops.h>
+
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "3c527.h"
+
+MODULE_LICENSE("GPL");
+
+/*
+ * The name of the card. Is used for messages and in the requests for
+ * io regions, irqs and dma channels
+ */
+static const char* cardname = DRV_NAME;
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 2
+#endif
+
+#undef DEBUG_IRQ
+
+static unsigned int mc32_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define MC32_IO_EXTENT 8
+
+/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
+#define TX_RING_LEN 32 /* Typically the card supports 37 */
+#define RX_RING_LEN 8 /* " " " */
+
+/* Copy break point, see above for details.
+ * Setting to > 1512 effectively disables this feature. */
+#define RX_COPYBREAK 200 /* Value from 3c59x.c */
+
+/* Issue the 82586 workaround command - this is for "busy lans", but
+ * basically means for all lans now days - has a performance (latency)
+ * cost, but best set. */
+static const int WORKAROUND_82586=1;
+
+/* Pointers to buffers and their on-card records */
+struct mc32_ring_desc
+{
+ volatile struct skb_header *p;
+ struct sk_buff *skb;
+};
+
+/* Information that needs to be kept for each board. */
+struct mc32_local
+{
+ int slot;
+
+ u32 base;
+ struct net_device_stats net_stats;
+ volatile struct mc32_mailbox *rx_box;
+ volatile struct mc32_mailbox *tx_box;
+ volatile struct mc32_mailbox *exec_box;
+ volatile struct mc32_stats *stats; /* Start of on-card statistics */
+ u16 tx_chain; /* Transmit list start offset */
+ u16 rx_chain; /* Receive list start offset */
+ u16 tx_len; /* Transmit list count */
+ u16 rx_len; /* Receive list count */
+
+ u16 xceiver_desired_state; /* HALTED or RUNNING */
+ u16 cmd_nonblocking; /* Thread is uninterested in command result */
+ u16 mc_reload_wait; /* A multicast load request is pending */
+ u32 mc_list_valid; /* True when the mclist is set */
+
+ struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
+ struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
+
+ atomic_t tx_count; /* buffers left */
+ atomic_t tx_ring_head; /* index to tx en-queue end */
+ u16 tx_ring_tail; /* index to tx de-queue end */
+
+ u16 rx_ring_tail; /* index to rx de-queue end */
+
+ struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
+ struct completion execution_cmd; /* Card has completed an execute command */
+ struct completion xceiver_cmd; /* Card has completed a tx or rx command */
+};
+
+/* The station (ethernet) address prefix, used for a sanity check. */
+#define SA_ADDR0 0x02
+#define SA_ADDR1 0x60
+#define SA_ADDR2 0xAC
+
+struct mca_adapters_t {
+ unsigned int id;
+ char *name;
+};
+
+static const struct mca_adapters_t mc32_adapters[] = {
+ { 0x0041, "3COM EtherLink MC/32" },
+ { 0x8EF5, "IBM High Performance Lan Adapter" },
+ { 0x0000, NULL }
+};
+
+
+/* Macros for ring index manipulations */
+static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
+static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
+
+static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
+
+
+/* Index to functions, as function prototypes. */
+static int mc32_probe1(struct net_device *dev, int ioaddr);
+static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
+static int mc32_open(struct net_device *dev);
+static void mc32_timeout(struct net_device *dev);
+static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int mc32_close(struct net_device *dev);
+static struct net_device_stats *mc32_get_stats(struct net_device *dev);
+static void mc32_set_multicast_list(struct net_device *dev);
+static void mc32_reset_multicast_list(struct net_device *dev);
+static struct ethtool_ops netdev_ethtool_ops;
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ unsigned slot = lp->slot;
+ mca_mark_as_unused(slot);
+ mca_set_adapter_name(slot, NULL);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+}
+
+/**
+ * mc32_probe - Search for supported boards
+ * @unit: interface number to use
+ *
+ * Because MCA bus is a real bus and we can scan for cards we could do a
+ * single scan for all boards here. Right now we use the passed in device
+ * structure and scan for only one board. This needs fixing for modules
+ * in particular.
+ */
+
+struct net_device *__init mc32_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
+ static int current_mca_slot = -1;
+ int i;
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ SET_MODULE_OWNER(dev);
+
+ /* Do not check any supplied i/o locations.
+ POS registers usually don't fail :) */
+
+ /* MCA cards have POS registers.
+ Autodetecting MCA cards is extremely simple.
+ Just search for the card. */
+
+ for(i = 0; (mc32_adapters[i].name != NULL); i++) {
+ current_mca_slot =
+ mca_find_unused_adapter(mc32_adapters[i].id, 0);
+
+ if(current_mca_slot != MCA_NOTFOUND) {
+ if(!mc32_probe1(dev, current_mca_slot))
+ {
+ mca_set_adapter_name(current_mca_slot,
+ mc32_adapters[i].name);
+ mca_mark_as_used(current_mca_slot);
+ err = register_netdev(dev);
+ if (err) {
+ cleanup_card(dev);
+ free_netdev(dev);
+ dev = ERR_PTR(err);
+ }
+ return dev;
+ }
+
+ }
+ }
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+}
+
+/**
+ * mc32_probe1 - Check a given slot for a board and test the card
+ * @dev: Device structure to fill in
+ * @slot: The MCA bus slot being used by this card
+ *
+ * Decode the slot data and configure the card structures. Having done this we
+ * can reset the card and configure it. The card does a full self test cycle
+ * in firmware so we have to wait for it to return and post us either a
+ * failure case or some addresses we use to find the board internals.
+ */
+
+static int __init mc32_probe1(struct net_device *dev, int slot)
+{
+ static unsigned version_printed;
+ int i, err;
+ u8 POS;
+ u32 base;
+ struct mc32_local *lp = netdev_priv(dev);
+ static u16 mca_io_bases[]={
+ 0x7280,0x7290,
+ 0x7680,0x7690,
+ 0x7A80,0x7A90,
+ 0x7E80,0x7E90
+ };
+ static u32 mca_mem_bases[]={
+ 0x00C0000,
+ 0x00C4000,
+ 0x00C8000,
+ 0x00CC000,
+ 0x00D0000,
+ 0x00D4000,
+ 0x00D8000,
+ 0x00DC000
+ };
+ static char *failures[]={
+ "Processor instruction",
+ "Processor data bus",
+ "Processor data bus",
+ "Processor data bus",
+ "Adapter bus",
+ "ROM checksum",
+ "Base RAM",
+ "Extended RAM",
+ "82586 internal loopback",
+ "82586 initialisation failure",
+ "Adapter list configuration error"
+ };
+
+ /* Time to play MCA games */
+
+ if (mc32_debug && version_printed++ == 0)
+ printk(KERN_DEBUG "%s", version);
+
+ printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
+
+ POS = mca_read_stored_pos(slot, 2);
+
+ if(!(POS&1))
+ {
+ printk(" disabled.\n");
+ return -ENODEV;
+ }
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = mca_io_bases[(POS>>1)&7];
+ dev->mem_start = mca_mem_bases[(POS>>4)&7];
+
+ POS = mca_read_stored_pos(slot, 4);
+ if(!(POS&1))
+ {
+ printk("memory window disabled.\n");
+ return -ENODEV;
+ }
+
+ POS = mca_read_stored_pos(slot, 5);
+
+ i=(POS>>4)&3;
+ if(i==3)
+ {
+ printk("invalid memory window.\n");
+ return -ENODEV;
+ }
+
+ i*=16384;
+ i+=16384;
+
+ dev->mem_end=dev->mem_start + i;
+
+ dev->irq = ((POS>>2)&3)+9;
+
+ if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
+ {
+ printk("io 0x%3lX, which is busy.\n", dev->base_addr);
+ return -EBUSY;
+ }
+
+ printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
+ dev->base_addr, dev->irq, dev->mem_start, i/1024);
+
+
+ /* We ought to set the cache line size here.. */
+
+
+ /*
+ * Go PROM browsing
+ */
+
+ printk("%s: Address ", dev->name);
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ {
+ mca_write_pos(slot, 6, i+12);
+ mca_write_pos(slot, 7, 0);
+
+ printk(" %2.2x", dev->dev_addr[i] = mca_read_pos(slot,3));
+ }
+
+ mca_write_pos(slot, 6, 0);
+ mca_write_pos(slot, 7, 0);
+
+ POS = mca_read_stored_pos(slot, 4);
+
+ if(POS&2)
+ printk(" : BNC port selected.\n");
+ else
+ printk(" : AUI port selected.\n");
+
+ POS=inb(dev->base_addr+HOST_CTRL);
+ POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
+ POS&=~HOST_CTRL_INTE;
+ outb(POS, dev->base_addr+HOST_CTRL);
+ /* Reset adapter */
+ udelay(100);
+ /* Reset off */
+ POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
+ outb(POS, dev->base_addr+HOST_CTRL);
+
+ udelay(300);
+
+ /*
+ * Grab the IRQ
+ */
+
+ err = request_irq(dev->irq, &mc32_interrupt, SA_SHIRQ | SA_SAMPLE_RANDOM, DRV_NAME, dev);
+ if (err) {
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
+ goto err_exit_ports;
+ }
+
+ memset(lp, 0, sizeof(struct mc32_local));
+ lp->slot = slot;
+
+ i=0;
+
+ base = inb(dev->base_addr);
+
+ while(base == 0xFF)
+ {
+ i++;
+ if(i == 1000)
+ {
+ printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
+ err = -ENODEV;
+ goto err_exit_irq;
+ }
+ udelay(1000);
+ if(inb(dev->base_addr+2)&(1<<5))
+ base = inb(dev->base_addr);
+ }
+
+ if(base>0)
+ {
+ if(base < 0x0C)
+ printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
+ base<0x0A?" test failure":"");
+ else
+ printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
+ err = -ENODEV;
+ goto err_exit_irq;
+ }
+
+ base=0;
+ for(i=0;i<4;i++)
+ {
+ int n=0;
+
+ while(!(inb(dev->base_addr+2)&(1<<5)))
+ {
+ n++;
+ udelay(50);
+ if(n>100)
+ {
+ printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
+ err = -ENODEV;
+ goto err_exit_irq;
+ }
+ }
+
+ base|=(inb(dev->base_addr)<<(8*i));
+ }
+
+ lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
+
+ base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
+
+ lp->base = dev->mem_start+base;
+
+ lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
+ lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
+
+ lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
+
+ /*
+ * Descriptor chains (card relative)
+ */
+
+ lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
+ lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
+ lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
+ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
+
+ init_MUTEX_LOCKED(&lp->cmd_mutex);
+ init_completion(&lp->execution_cmd);
+ init_completion(&lp->xceiver_cmd);
+
+ printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
+ dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
+
+ dev->open = mc32_open;
+ dev->stop = mc32_close;
+ dev->hard_start_xmit = mc32_send_packet;
+ dev->get_stats = mc32_get_stats;
+ dev->set_multicast_list = mc32_set_multicast_list;
+ dev->tx_timeout = mc32_timeout;
+ dev->watchdog_timeo = HZ*5; /* Board does all the work */
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+ return 0;
+
+err_exit_irq:
+ free_irq(dev->irq, dev);
+err_exit_ports:
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+ return err;
+}
+
+
+/**
+ * mc32_ready_poll - wait until we can feed it a command
+ * @dev: The device to wait for
+ *
+ * Wait until the card becomes ready to accept a command via the
+ * command register. This tells us nothing about the completion
+ * status of any pending commands and takes very little time at all.
+ */
+
+static inline void mc32_ready_poll(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
+}
+
+
+/**
+ * mc32_command_nowait - send a command non blocking
+ * @dev: The 3c527 to issue the command to
+ * @cmd: The command word to write to the mailbox
+ * @data: A data block if the command expects one
+ * @len: Length of the data block
+ *
+ * Send a command from interrupt state. If there is a command
+ * currently being executed then we return an error of -1. It
+ * simply isn't viable to wait around as commands may be
+ * slow. This can theoretically be starved on SMP, but it's hard
+ * to see a realistic situation. We do not wait for the command
+ * to complete --- we rely on the interrupt handler to tidy up
+ * after us.
+ */
+
+static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int ret = -1;
+
+ if (down_trylock(&lp->cmd_mutex) == 0)
+ {
+ lp->cmd_nonblocking=1;
+ lp->exec_box->mbox=0;
+ lp->exec_box->mbox=cmd;
+ memcpy((void *)lp->exec_box->data, data, len);
+ barrier(); /* the memcpy forgot the volatile so be sure */
+
+ /* Send the command */
+ mc32_ready_poll(dev);
+ outb(1<<6, ioaddr+HOST_CMD);
+
+ ret = 0;
+
+ /* Interrupt handler will signal mutex on completion */
+ }
+
+ return ret;
+}
+
+
+/**
+ * mc32_command - send a command and sleep until completion
+ * @dev: The 3c527 card to issue the command to
+ * @cmd: The command word to write to the mailbox
+ * @data: A data block if the command expects one
+ * @len: Length of the data block
+ *
+ * Sends exec commands in a user context. This permits us to wait around
+ * for the replies and also to wait for the command buffer to complete
+ * from a previous command before we execute our command. After our
+ * command completes we will attempt any pending multicast reload
+ * we blocked off by hogging the exec buffer.
+ *
+ * You feed the card a command, you wait, it interrupts you get a
+ * reply. All well and good. The complication arises because you use
+ * commands for filter list changes which come in at bh level from things
+ * like IPV6 group stuff.
+ */
+
+static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int ret = 0;
+
+ down(&lp->cmd_mutex);
+
+ /*
+ * My Turn
+ */
+
+ lp->cmd_nonblocking=0;
+ lp->exec_box->mbox=0;
+ lp->exec_box->mbox=cmd;
+ memcpy((void *)lp->exec_box->data, data, len);
+ barrier(); /* the memcpy forgot the volatile so be sure */
+
+ mc32_ready_poll(dev);
+ outb(1<<6, ioaddr+HOST_CMD);
+
+ wait_for_completion(&lp->execution_cmd);
+
+ if(lp->exec_box->mbox&(1<<13))
+ ret = -1;
+
+ up(&lp->cmd_mutex);
+
+ /*
+ * A multicast set got blocked - try it now
+ */
+
+ if(lp->mc_reload_wait)
+ {
+ mc32_reset_multicast_list(dev);
+ }
+
+ return ret;
+}
+
+
+/**
+ * mc32_start_transceiver - tell board to restart tx/rx
+ * @dev: The 3c527 card to issue the command to
+ *
+ * This may be called from the interrupt state, where it is used
+ * to restart the rx ring if the card runs out of rx buffers.
+ *
+ * We must first check if it's ok to (re)start the transceiver. See
+ * mc32_close for details.
+ */
+
+static void mc32_start_transceiver(struct net_device *dev) {
+
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Ignore RX overflow on device closure */
+ if (lp->xceiver_desired_state==HALTED)
+ return;
+
+ /* Give the card the offset to the post-EOL-bit RX descriptor */
+ mc32_ready_poll(dev);
+ lp->rx_box->mbox=0;
+ lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
+ outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
+
+ mc32_ready_poll(dev);
+ lp->tx_box->mbox=0;
+ outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
+
+ /* We are not interrupted on start completion */
+}
+
+
+/**
+ * mc32_halt_transceiver - tell board to stop tx/rx
+ * @dev: The 3c527 card to issue the command to
+ *
+ * We issue the commands to halt the card's transceiver. In fact,
+ * after some experimenting we now simply tell the card to
+ * suspend. When issuing aborts occasionally odd things happened.
+ *
+ * We then sleep until the card has notified us that both rx and
+ * tx have been suspended.
+ */
+
+static void mc32_halt_transceiver(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ mc32_ready_poll(dev);
+ lp->rx_box->mbox=0;
+ outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
+ wait_for_completion(&lp->xceiver_cmd);
+
+ mc32_ready_poll(dev);
+ lp->tx_box->mbox=0;
+ outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
+ wait_for_completion(&lp->xceiver_cmd);
+}
+
+
+/**
+ * mc32_load_rx_ring - load the ring of receive buffers
+ * @dev: 3c527 to build the ring for
+ *
+ * This initalises the on-card and driver datastructures to
+ * the point where mc32_start_transceiver() can be called.
+ *
+ * The card sets up the receive ring for us. We are required to use the
+ * ring it provides, although the size of the ring is configurable.
+ *
+ * We allocate an sk_buff for each ring entry in turn and
+ * initalise its house-keeping info. At the same time, we read
+ * each 'next' pointer in our rx_ring array. This reduces slow
+ * shared-memory reads and makes it easy to access predecessor
+ * descriptors.
+ *
+ * We then set the end-of-list bit for the last entry so that the
+ * card will know when it has run out of buffers.
+ */
+
+static int mc32_load_rx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int i;
+ u16 rx_base;
+ volatile struct skb_header *p;
+
+ rx_base=lp->rx_chain;
+
+ for(i=0; i<RX_RING_LEN; i++) {
+ lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
+ if (lp->rx_ring[i].skb==NULL) {
+ for (;i>=0;i--)
+ kfree_skb(lp->rx_ring[i].skb);
+ return -ENOBUFS;
+ }
+ skb_reserve(lp->rx_ring[i].skb, 18);
+
+ p=isa_bus_to_virt(lp->base+rx_base);
+
+ p->control=0;
+ p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
+ p->status=0;
+ p->length=1532;
+
+ lp->rx_ring[i].p=p;
+ rx_base=p->next;
+ }
+
+ lp->rx_ring[i-1].p->control |= CONTROL_EOL;
+
+ lp->rx_ring_tail=0;
+
+ return 0;
+}
+
+
+/**
+ * mc32_flush_rx_ring - free the ring of receive buffers
+ * @lp: Local data of 3c527 to flush the rx ring of
+ *
+ * Free the buffer for each ring slot. This may be called
+ * before mc32_load_rx_ring(), eg. on error in mc32_open().
+ * Requires rx skb pointers to point to a valid skb, or NULL.
+ */
+
+static void mc32_flush_rx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int i;
+
+ for(i=0; i < RX_RING_LEN; i++)
+ {
+ if (lp->rx_ring[i].skb) {
+ dev_kfree_skb(lp->rx_ring[i].skb);
+ lp->rx_ring[i].skb = NULL;
+ }
+ lp->rx_ring[i].p=NULL;
+ }
+}
+
+
+/**
+ * mc32_load_tx_ring - load transmit ring
+ * @dev: The 3c527 card to issue the command to
+ *
+ * This sets up the host transmit data-structures.
+ *
+ * First, we obtain from the card it's current postion in the tx
+ * ring, so that we will know where to begin transmitting
+ * packets.
+ *
+ * Then, we read the 'next' pointers from the on-card tx ring into
+ * our tx_ring array to reduce slow shared-mem reads. Finally, we
+ * intitalise the tx house keeping variables.
+ *
+ */
+
+static void mc32_load_tx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct skb_header *p;
+ int i;
+ u16 tx_base;
+
+ tx_base=lp->tx_box->data[0];
+
+ for(i=0 ; i<TX_RING_LEN ; i++)
+ {
+ p=isa_bus_to_virt(lp->base+tx_base);
+ lp->tx_ring[i].p=p;
+ lp->tx_ring[i].skb=NULL;
+
+ tx_base=p->next;
+ }
+
+ /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
+ /* see mc32_tx_ring */
+
+ atomic_set(&lp->tx_count, TX_RING_LEN-1);
+ atomic_set(&lp->tx_ring_head, 0);
+ lp->tx_ring_tail=0;
+}
+
+
+/**
+ * mc32_flush_tx_ring - free transmit ring
+ * @lp: Local data of 3c527 to flush the tx ring of
+ *
+ * If the ring is non-empty, zip over the it, freeing any
+ * allocated skb_buffs. The tx ring house-keeping variables are
+ * then reset. Requires rx skb pointers to point to a valid skb,
+ * or NULL.
+ */
+
+static void mc32_flush_tx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int i;
+
+ for (i=0; i < TX_RING_LEN; i++)
+ {
+ if (lp->tx_ring[i].skb)
+ {
+ dev_kfree_skb(lp->tx_ring[i].skb);
+ lp->tx_ring[i].skb = NULL;
+ }
+ }
+
+ atomic_set(&lp->tx_count, 0);
+ atomic_set(&lp->tx_ring_head, 0);
+ lp->tx_ring_tail=0;
+}
+
+
+/**
+ * mc32_open - handle 'up' of card
+ * @dev: device to open
+ *
+ * The user is trying to bring the card into ready state. This requires
+ * a brief dialogue with the card. Firstly we enable interrupts and then
+ * 'indications'. Without these enabled the card doesn't bother telling
+ * us what it has done. This had me puzzled for a week.
+ *
+ * We configure the number of card descriptors, then load the network
+ * address and multicast filters. Turn on the workaround mode. This
+ * works around a bug in the 82586 - it asks the firmware to do
+ * so. It has a performance (latency) hit but is needed on busy
+ * [read most] lans. We load the ring with buffers then we kick it
+ * all off.
+ */
+
+static int mc32_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct mc32_local *lp = netdev_priv(dev);
+ u8 one=1;
+ u8 regs;
+ u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
+
+ /*
+ * Interrupts enabled
+ */
+
+ regs=inb(ioaddr+HOST_CTRL);
+ regs|=HOST_CTRL_INTE;
+ outb(regs, ioaddr+HOST_CTRL);
+
+ /*
+ * Allow ourselves to issue commands
+ */
+
+ up(&lp->cmd_mutex);
+
+
+ /*
+ * Send the indications on command
+ */
+
+ mc32_command(dev, 4, &one, 2);
+
+ /*
+ * Poke it to make sure it's really dead.
+ */
+
+ mc32_halt_transceiver(dev);
+ mc32_flush_tx_ring(dev);
+
+ /*
+ * Ask card to set up on-card descriptors to our spec
+ */
+
+ if(mc32_command(dev, 8, descnumbuffs, 4)) {
+ printk("%s: %s rejected our buffer configuration!\n",
+ dev->name, cardname);
+ mc32_close(dev);
+ return -ENOBUFS;
+ }
+
+ /* Report new configuration */
+ mc32_command(dev, 6, NULL, 0);
+
+ lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
+ lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
+ lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
+ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
+
+ /* Set Network Address */
+ mc32_command(dev, 1, dev->dev_addr, 6);
+
+ /* Set the filters */
+ mc32_set_multicast_list(dev);
+
+ if (WORKAROUND_82586) {
+ u16 zero_word=0;
+ mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
+ }
+
+ mc32_load_tx_ring(dev);
+
+ if(mc32_load_rx_ring(dev))
+ {
+ mc32_close(dev);
+ return -ENOBUFS;
+ }
+
+ lp->xceiver_desired_state = RUNNING;
+
+ /* And finally, set the ball rolling... */
+ mc32_start_transceiver(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+
+/**
+ * mc32_timeout - handle a timeout from the network layer
+ * @dev: 3c527 that timed out
+ *
+ * Handle a timeout on transmit from the 3c527. This normally means
+ * bad things as the hardware handles cable timeouts and mess for
+ * us.
+ *
+ */
+
+static void mc32_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
+ /* Try to restart the adaptor. */
+ netif_wake_queue(dev);
+}
+
+
+/**
+ * mc32_send_packet - queue a frame for transmit
+ * @skb: buffer to transmit
+ * @dev: 3c527 to send it out of
+ *
+ * Transmit a buffer. This normally means throwing the buffer onto
+ * the transmit queue as the queue is quite large. If the queue is
+ * full then we set tx_busy and return. Once the interrupt handler
+ * gets messages telling it to reclaim transmit queue entries, we will
+ * clear tx_busy and the kernel will start calling this again.
+ *
+ * We do not disable interrupts or acquire any locks; this can
+ * run concurrently with mc32_tx_ring(), and the function itself
+ * is serialised at a higher layer. However, similarly for the
+ * card itself, we must ensure that we update tx_ring_head only
+ * after we've established a valid packet on the tx ring (and
+ * before we let the card "see" it, to prevent it racing with the
+ * irq handler).
+ *
+ */
+
+static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ u32 head = atomic_read(&lp->tx_ring_head);
+
+ volatile struct skb_header *p, *np;
+
+ netif_stop_queue(dev);
+
+ if(atomic_read(&lp->tx_count)==0) {
+ return 1;
+ }
+
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL) {
+ netif_wake_queue(dev);
+ return 0;
+ }
+
+ atomic_dec(&lp->tx_count);
+
+ /* P is the last sending/sent buffer as a pointer */
+ p=lp->tx_ring[head].p;
+
+ head = next_tx(head);
+
+ /* NP is the buffer we will be loading */
+ np=lp->tx_ring[head].p;
+
+ /* We will need this to flush the buffer out */
+ lp->tx_ring[head].skb=skb;
+
+ np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ np->data = isa_virt_to_bus(skb->data);
+ np->status = 0;
+ np->control = CONTROL_EOP | CONTROL_EOL;
+ wmb();
+
+ /*
+ * The new frame has been setup; we can now
+ * let the interrupt handler and card "see" it
+ */
+
+ atomic_set(&lp->tx_ring_head, head);
+ p->control &= ~CONTROL_EOL;
+
+ netif_wake_queue(dev);
+ return 0;
+}
+
+
+/**
+ * mc32_update_stats - pull off the on board statistics
+ * @dev: 3c527 to service
+ *
+ *
+ * Query and reset the on-card stats. There's the small possibility
+ * of a race here, which would result in an underestimation of
+ * actual errors. As such, we'd prefer to keep all our stats
+ * collection in software. As a rule, we do. However it can't be
+ * used for rx errors and collisions as, by default, the card discards
+ * bad rx packets.
+ *
+ * Setting the SAV BP in the rx filter command supposedly
+ * stops this behaviour. However, testing shows that it only seems to
+ * enable the collation of on-card rx statistics --- the driver
+ * never sees an RX descriptor with an error status set.
+ *
+ */
+
+static void mc32_update_stats(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct mc32_stats *st = lp->stats;
+
+ u32 rx_errors=0;
+
+ rx_errors+=lp->net_stats.rx_crc_errors +=st->rx_crc_errors;
+ st->rx_crc_errors=0;
+ rx_errors+=lp->net_stats.rx_fifo_errors +=st->rx_overrun_errors;
+ st->rx_overrun_errors=0;
+ rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors;
+ st->rx_alignment_errors=0;
+ rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors;
+ st->rx_tooshort_errors=0;
+ rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors;
+ st->rx_outofresource_errors=0;
+ lp->net_stats.rx_errors=rx_errors;
+
+ /* Number of packets which saw one collision */
+ lp->net_stats.collisions+=st->dataC[10];
+ st->dataC[10]=0;
+
+ /* Number of packets which saw 2--15 collisions */
+ lp->net_stats.collisions+=st->dataC[11];
+ st->dataC[11]=0;
+}
+
+
+/**
+ * mc32_rx_ring - process the receive ring
+ * @dev: 3c527 that needs its receive ring processing
+ *
+ *
+ * We have received one or more indications from the card that a
+ * receive has completed. The buffer ring thus contains dirty
+ * entries. We walk the ring by iterating over the circular rx_ring
+ * array, starting at the next dirty buffer (which happens to be the
+ * one we finished up at last time around).
+ *
+ * For each completed packet, we will either copy it and pass it up
+ * the stack or, if the packet is near MTU sized, we allocate
+ * another buffer and flip the old one up the stack.
+ *
+ * We must succeed in keeping a buffer on the ring. If necessary we
+ * will toss a received packet rather than lose a ring entry. Once
+ * the first uncompleted descriptor is found, we move the
+ * End-Of-List bit to include the buffers just processed.
+ *
+ */
+
+static void mc32_rx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct skb_header *p;
+ u16 rx_ring_tail;
+ u16 rx_old_tail;
+ int x=0;
+
+ rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
+
+ do
+ {
+ p=lp->rx_ring[rx_ring_tail].p;
+
+ if(!(p->status & (1<<7))) { /* Not COMPLETED */
+ break;
+ }
+ if(p->status & (1<<6)) /* COMPLETED_OK */
+ {
+
+ u16 length=p->length;
+ struct sk_buff *skb;
+ struct sk_buff *newskb;
+
+ /* Try to save time by avoiding a copy on big frames */
+
+ if ((length > RX_COPYBREAK)
+ && ((newskb=dev_alloc_skb(1532)) != NULL))
+ {
+ skb=lp->rx_ring[rx_ring_tail].skb;
+ skb_put(skb, length);
+
+ skb_reserve(newskb,18);
+ lp->rx_ring[rx_ring_tail].skb=newskb;
+ p->data=isa_virt_to_bus(newskb->data);
+ }
+ else
+ {
+ skb=dev_alloc_skb(length+2);
+
+ if(skb==NULL) {
+ lp->net_stats.rx_dropped++;
+ goto dropped;
+ }
+
+ skb_reserve(skb,2);
+ memcpy(skb_put(skb, length),
+ lp->rx_ring[rx_ring_tail].skb->data, length);
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ skb->dev=dev;
+ dev->last_rx = jiffies;
+ lp->net_stats.rx_packets++;
+ lp->net_stats.rx_bytes += length;
+ netif_rx(skb);
+ }
+
+ dropped:
+ p->length = 1532;
+ p->status = 0;
+
+ rx_ring_tail=next_rx(rx_ring_tail);
+ }
+ while(x++<48);
+
+ /* If there was actually a frame to be processed, place the EOL bit */
+ /* at the descriptor prior to the one to be filled next */
+
+ if (rx_ring_tail != rx_old_tail)
+ {
+ lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
+ lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
+
+ lp->rx_ring_tail=rx_ring_tail;
+ }
+}
+
+
+/**
+ * mc32_tx_ring - process completed transmits
+ * @dev: 3c527 that needs its transmit ring processing
+ *
+ *
+ * This operates in a similar fashion to mc32_rx_ring. We iterate
+ * over the transmit ring. For each descriptor which has been
+ * processed by the card, we free its associated buffer and note
+ * any errors. This continues until the transmit ring is emptied
+ * or we reach a descriptor that hasn't yet been processed by the
+ * card.
+ *
+ */
+
+static void mc32_tx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct skb_header *np;
+
+ /*
+ * We rely on head==tail to mean 'queue empty'.
+ * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
+ * tx_ring_head wrapping to tail and confusing a 'queue empty'
+ * condition with 'queue full'
+ */
+
+ while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
+ {
+ u16 t;
+
+ t=next_tx(lp->tx_ring_tail);
+ np=lp->tx_ring[t].p;
+
+ if(!(np->status & (1<<7)))
+ {
+ /* Not COMPLETED */
+ break;
+ }
+ lp->net_stats.tx_packets++;
+ if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
+ {
+ lp->net_stats.tx_errors++;
+
+ switch(np->status&0x0F)
+ {
+ case 1:
+ lp->net_stats.tx_aborted_errors++;
+ break; /* Max collisions */
+ case 2:
+ lp->net_stats.tx_fifo_errors++;
+ break;
+ case 3:
+ lp->net_stats.tx_carrier_errors++;
+ break;
+ case 4:
+ lp->net_stats.tx_window_errors++;
+ break; /* CTS Lost */
+ case 5:
+ lp->net_stats.tx_aborted_errors++;
+ break; /* Transmit timeout */
+ }
+ }
+ /* Packets are sent in order - this is
+ basically a FIFO queue of buffers matching
+ the card ring */
+ lp->net_stats.tx_bytes+=lp->tx_ring[t].skb->len;
+ dev_kfree_skb_irq(lp->tx_ring[t].skb);
+ lp->tx_ring[t].skb=NULL;
+ atomic_inc(&lp->tx_count);
+ netif_wake_queue(dev);
+
+ lp->tx_ring_tail=t;
+ }
+
+}
+
+
+/**
+ * mc32_interrupt - handle an interrupt from a 3c527
+ * @irq: Interrupt number
+ * @dev_id: 3c527 that requires servicing
+ * @regs: Registers (unused)
+ *
+ *
+ * An interrupt is raised whenever the 3c527 writes to the command
+ * register. This register contains the message it wishes to send us
+ * packed into a single byte field. We keep reading status entries
+ * until we have processed all the control items, but simply count
+ * transmit and receive reports. When all reports are in we empty the
+ * transceiver rings as appropriate. This saves the overhead of
+ * multiple command requests.
+ *
+ * Because MCA is level-triggered, we shouldn't miss indications.
+ * Therefore, we needn't ask the card to suspend interrupts within
+ * this handler. The card receives an implicit acknowledgment of the
+ * current interrupt when we read the command register.
+ *
+ */
+
+static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct mc32_local *lp;
+ int ioaddr, status, boguscount = 0;
+ int rx_event = 0;
+ int tx_event = 0;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* See whats cooking */
+
+ while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
+ {
+ status=inb(ioaddr+HOST_CMD);
+
+#ifdef DEBUG_IRQ
+ printk("Status TX%d RX%d EX%d OV%d BC%d\n",
+ (status&7), (status>>3)&7, (status>>6)&1,
+ (status>>7)&1, boguscount);
+#endif
+
+ switch(status&7)
+ {
+ case 0:
+ break;
+ case 6: /* TX fail */
+ case 2: /* TX ok */
+ tx_event = 1;
+ break;
+ case 3: /* Halt */
+ case 4: /* Abort */
+ complete(&lp->xceiver_cmd);
+ break;
+ default:
+ printk("%s: strange tx ack %d\n", dev->name, status&7);
+ }
+ status>>=3;
+ switch(status&7)
+ {
+ case 0:
+ break;
+ case 2: /* RX */
+ rx_event=1;
+ break;
+ case 3: /* Halt */
+ case 4: /* Abort */
+ complete(&lp->xceiver_cmd);
+ break;
+ case 6:
+ /* Out of RX buffers stat */
+ /* Must restart rx */
+ lp->net_stats.rx_dropped++;
+ mc32_rx_ring(dev);
+ mc32_start_transceiver(dev);
+ break;
+ default:
+ printk("%s: strange rx ack %d\n",
+ dev->name, status&7);
+ }
+ status>>=3;
+ if(status&1)
+ {
+ /*
+ * No thread is waiting: we need to tidy
+ * up ourself.
+ */
+
+ if (lp->cmd_nonblocking) {
+ up(&lp->cmd_mutex);
+ if (lp->mc_reload_wait)
+ mc32_reset_multicast_list(dev);
+ }
+ else complete(&lp->execution_cmd);
+ }
+ if(status&2)
+ {
+ /*
+ * We get interrupted once per
+ * counter that is about to overflow.
+ */
+
+ mc32_update_stats(dev);
+ }
+ }
+
+
+ /*
+ * Process the transmit and receive rings
+ */
+
+ if(tx_event)
+ mc32_tx_ring(dev);
+
+ if(rx_event)
+ mc32_rx_ring(dev);
+
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * mc32_close - user configuring the 3c527 down
+ * @dev: 3c527 card to shut down
+ *
+ * The 3c527 is a bus mastering device. We must be careful how we
+ * shut it down. It may also be running shared interrupt so we have
+ * to be sure to silence it properly
+ *
+ * We indicate that the card is closing to the rest of the
+ * driver. Otherwise, it is possible that the card may run out
+ * of receive buffers and restart the transceiver while we're
+ * trying to close it.
+ *
+ * We abort any receive and transmits going on and then wait until
+ * any pending exec commands have completed in other code threads.
+ * In theory we can't get here while that is true, in practice I am
+ * paranoid
+ *
+ * We turn off the interrupt enable for the board to be sure it can't
+ * intefere with other devices.
+ */
+
+static int mc32_close(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ u8 regs;
+ u16 one=1;
+
+ lp->xceiver_desired_state = HALTED;
+ netif_stop_queue(dev);
+
+ /*
+ * Send the indications on command (handy debug check)
+ */
+
+ mc32_command(dev, 4, &one, 2);
+
+ /* Shut down the transceiver */
+
+ mc32_halt_transceiver(dev);
+
+ /* Ensure we issue no more commands beyond this point */
+
+ down(&lp->cmd_mutex);
+
+ /* Ok the card is now stopping */
+
+ regs=inb(ioaddr+HOST_CTRL);
+ regs&=~HOST_CTRL_INTE;
+ outb(regs, ioaddr+HOST_CTRL);
+
+ mc32_flush_rx_ring(dev);
+ mc32_flush_tx_ring(dev);
+
+ mc32_update_stats(dev);
+
+ return 0;
+}
+
+
+/**
+ * mc32_get_stats - hand back stats to network layer
+ * @dev: The 3c527 card to handle
+ *
+ * We've collected all the stats we can in software already. Now
+ * it's time to update those kept on-card and return the lot.
+ *
+ */
+
+static struct net_device_stats *mc32_get_stats(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+
+ mc32_update_stats(dev);
+ return &lp->net_stats;
+}
+
+
+/**
+ * do_mc32_set_multicast_list - attempt to update multicasts
+ * @dev: 3c527 device to load the list on
+ * @retry: indicates this is not the first call.
+ *
+ *
+ * Actually set or clear the multicast filter for this adaptor. The
+ * locking issues are handled by this routine. We have to track
+ * state as it may take multiple calls to get the command sequence
+ * completed. We just keep trying to schedule the loads until we
+ * manage to process them all.
+ *
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ *
+ * num_addrs == 0 Normal mode, clear multicast list
+ *
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ *
+ * See mc32_update_stats() regards setting the SAV BP bit.
+ *
+ */
+
+static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
+
+ if (dev->flags&IFF_PROMISC)
+ /* Enable promiscuous mode */
+ filt |= 1;
+ else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
+ {
+ dev->flags|=IFF_PROMISC;
+ filt |= 1;
+ }
+ else if(dev->mc_count)
+ {
+ unsigned char block[62];
+ unsigned char *bp;
+ struct dev_mc_list *dmc=dev->mc_list;
+
+ int i;
+
+ if(retry==0)
+ lp->mc_list_valid = 0;
+ if(!lp->mc_list_valid)
+ {
+ block[1]=0;
+ block[0]=dev->mc_count;
+ bp=block+2;
+
+ for(i=0;i<dev->mc_count;i++)
+ {
+ memcpy(bp, dmc->dmi_addr, 6);
+ bp+=6;
+ dmc=dmc->next;
+ }
+ if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
+ {
+ lp->mc_reload_wait = 1;
+ return;
+ }
+ lp->mc_list_valid=1;
+ }
+ }
+
+ if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
+ {
+ lp->mc_reload_wait = 1;
+ }
+ else {
+ lp->mc_reload_wait = 0;
+ }
+}
+
+
+/**
+ * mc32_set_multicast_list - queue multicast list update
+ * @dev: The 3c527 to use
+ *
+ * Commence loading the multicast list. This is called when the kernel
+ * changes the lists. It will override any pending list we are trying to
+ * load.
+ */
+
+static void mc32_set_multicast_list(struct net_device *dev)
+{
+ do_mc32_set_multicast_list(dev,0);
+}
+
+
+/**
+ * mc32_reset_multicast_list - reset multicast list
+ * @dev: The 3c527 to use
+ *
+ * Attempt the next step in loading the multicast lists. If this attempt
+ * fails to complete then it will be scheduled and this function called
+ * again later from elsewhere.
+ */
+
+static void mc32_reset_multicast_list(struct net_device *dev)
+{
+ do_mc32_set_multicast_list(dev,1);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return mc32_debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ mc32_debug = level;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+#ifdef MODULE
+
+static struct net_device *this_device;
+
+/**
+ * init_module - entry point
+ *
+ * Probe and locate a 3c527 card. This really should probe and locate
+ * all the 3c527 cards in the machine not just one of them. Yes you can
+ * insmod multiple modules for now but it's a hack.
+ */
+
+int init_module(void)
+{
+ this_device = mc32_probe(-1);
+ if (IS_ERR(this_device))
+ return PTR_ERR(this_device);
+ return 0;
+}
+
+/**
+ * cleanup_module - free resources for an unload
+ *
+ * Unloading time. We release the MCA bus resources and the interrupt
+ * at which point everything is ready to unload. The card must be stopped
+ * at this point or we would not have been called. When we unload we
+ * leave the card stopped but not totally shut down. When the card is
+ * initialized it must be rebooted or the rings reloaded before any
+ * transmit operations are allowed to start scribbling into memory.
+ */
+
+void cleanup_module(void)
+{
+ unregister_netdev(this_device);
+ cleanup_card(this_device);
+ free_netdev(this_device);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h
new file mode 100644
index 000000000000..c10f009ce9b6
--- /dev/null
+++ b/drivers/net/3c527.h
@@ -0,0 +1,81 @@
+/*
+ * 3COM "EtherLink MC/32" Descriptions
+ */
+
+/*
+ * Registers
+ */
+
+#define HOST_CMD 0
+#define HOST_CMD_START_RX (1<<3)
+#define HOST_CMD_SUSPND_RX (3<<3)
+#define HOST_CMD_RESTRT_RX (5<<3)
+
+#define HOST_CMD_SUSPND_TX 3
+#define HOST_CMD_RESTRT_TX 5
+
+
+#define HOST_STATUS 2
+#define HOST_STATUS_CRR (1<<6)
+#define HOST_STATUS_CWR (1<<5)
+
+
+#define HOST_CTRL 6
+#define HOST_CTRL_ATTN (1<<7)
+#define HOST_CTRL_RESET (1<<6)
+#define HOST_CTRL_INTE (1<<2)
+
+#define HOST_RAMPAGE 8
+
+#define HALTED 0
+#define RUNNING 1
+
+struct mc32_mailbox
+{
+ u16 mbox __attribute((packed));
+ u16 data[1] __attribute((packed));
+};
+
+struct skb_header
+{
+ u8 status __attribute((packed));
+ u8 control __attribute((packed));
+ u16 next __attribute((packed)); /* Do not change! */
+ u16 length __attribute((packed));
+ u32 data __attribute((packed));
+};
+
+struct mc32_stats
+{
+ /* RX Errors */
+ u32 rx_crc_errors __attribute((packed));
+ u32 rx_alignment_errors __attribute((packed));
+ u32 rx_overrun_errors __attribute((packed));
+ u32 rx_tooshort_errors __attribute((packed));
+ u32 rx_toolong_errors __attribute((packed));
+ u32 rx_outofresource_errors __attribute((packed));
+
+ u32 rx_discarded __attribute((packed)); /* via card pattern match filter */
+
+ /* TX Errors */
+ u32 tx_max_collisions __attribute((packed));
+ u32 tx_carrier_errors __attribute((packed));
+ u32 tx_underrun_errors __attribute((packed));
+ u32 tx_cts_errors __attribute((packed));
+ u32 tx_timeout_errors __attribute((packed)) ;
+
+ /* various cruft */
+ u32 dataA[6] __attribute((packed));
+ u16 dataB[5] __attribute((packed));
+ u32 dataC[14] __attribute((packed));
+};
+
+#define STATUS_MASK 0x0F
+#define COMPLETED (1<<7)
+#define COMPLETED_OK (1<<6)
+#define BUFFER_BUSY (1<<5)
+
+#define CONTROL_EOP (1<<7) /* End Of Packet */
+#define CONTROL_EOL (1<<6) /* End of List */
+
+#define MCA_MC32_ID 0x0041 /* Our MCA ident */
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
new file mode 100644
index 000000000000..43e2ac532f82
--- /dev/null
+++ b/drivers/net/3c59x.c
@@ -0,0 +1,3365 @@
+/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
+/*
+ Written 1996-1999 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
+ Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
+ and the EtherLink XL 3c900 and 3c905 cards.
+
+ Problem reports and questions should be directed to
+ vortex@scyld.com
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Linux Kernel Additions:
+
+ 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
+ 0.99H+lk1.0 - Jeff Garzik <jgarzik@pobox.com>
+ Remove compatibility defines for kernel versions < 2.2.x.
+ Update for new 2.3.x module interface
+ LK1.1.2 (March 19, 2000)
+ * New PCI interface (jgarzik)
+
+ LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
+ - Merged with 3c575_cb.c
+ - Don't set RxComplete in boomerang interrupt enable reg
+ - spinlock in vortex_timer to protect mdio functions
+ - disable local interrupts around call to vortex_interrupt in
+ vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
+ - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
+ - In vortex_start_xmit(), move the lock to _after_ we've altered
+ vp->cur_tx and vp->tx_full. This defeats the race between
+ vortex_start_xmit() and vortex_interrupt which was identified
+ by Bogdan Costescu.
+ - Merged back support for six new cards from various sources
+ - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
+ insertion oops)
+ - Tell it that 3c905C has NWAY for 100bT autoneg
+ - Fix handling of SetStatusEnd in 'Too much work..' code, as
+ per 2.3.99's 3c575_cb (Dave Hinds).
+ - Split ISR into two for vortex & boomerang
+ - Fix MOD_INC/DEC races
+ - Handle resource allocation failures.
+ - Fix 3CCFE575CT LED polarity
+ - Make tx_interrupt_mitigation the default
+
+ LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
+ - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
+ - Put vortex_info_tbl into __devinitdata
+ - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
+ as in the hardware.
+ - Increased the loop counter in issue_and_wait from 2,000 to 4,000.
+
+ LK1.1.5 28 April 2000, andrewm
+ - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
+ - Some extra diagnostics
+ - In vortex_error(), reset the Tx on maxCollisions. Otherwise most
+ chips usually get a Tx timeout.
+ - Added extra_reset module parm
+ - Replaced some inline timer manip with mod_timer
+ (Franois romieu <Francois.Romieu@nic.fr>)
+ - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
+ (this came across from 3c575_cb).
+
+ LK1.1.6 06 Jun 2000, andrewm
+ - Backed out the PPC defines.
+ - Use del_timer_sync(), mod_timer().
+ - Fix wrapped ulong comparison in boomerang_rx()
+ - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
+ (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
+ - Replace union wn3_config with BFINS/BFEXT manipulation for
+ sparc64 (Pete Zaitcev, Peter Jones)
+ - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
+ do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
+ Donald Becker)
+ - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
+ - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
+
+ LK1.1.7 2 Jul 2000 andrewm
+ - Better handling of shared IRQs
+ - Reset the transmitter on a Tx reclaim error
+ - Fixed crash under OOM during vortex_open() (Mark Hemment)
+ - Fix Rx cessation problem during OOM (help from Mark Hemment)
+ - The spinlocks around the mdio access were blocking interrupts for 300uS.
+ Fix all this to use spin_lock_bh() within mdio_read/write
+ - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
+ have one.
+ - Added 802.3x MAC-layer flow control support
+
+ LK1.1.8 13 Aug 2000 andrewm
+ - Ignore request_region() return value - already reserved if Cardbus.
+ - Merged some additional Cardbus flags from Don's 0.99Qk
+ - Some fixes for 3c556 (Fred Maciel)
+ - Fix for EISA initialisation (Jan Rekorajski)
+ - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
+ - Fixed MII_XCVR_PWR for 3CCFE575CT
+ - Added INVERT_LED_PWR, used it.
+ - Backed out the extra_reset stuff
+
+ LK1.1.9 12 Sep 2000 andrewm
+ - Backed out the tx_reset_resume flags. It was a no-op.
+ - In vortex_error, don't reset the Tx on txReclaim errors
+ - In vortex_error, don't reset the Tx on maxCollisions errors.
+ Hence backed out all the DownListPtr logic here.
+ - In vortex_error, give Tornado cards a partial TxReset on
+ maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this.
+ - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
+ - Fixed a bug where, if vp->tx_full is set when the interface
+ is downed, it remains set when the interface is upped. Bad
+ things happen.
+
+ LK1.1.10 17 Sep 2000 andrewm
+ - Added EEPROM_8BIT for 3c555 (Fred Maciel)
+ - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
+ - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
+
+ LK1.1.11 13 Nov 2000 andrewm
+ - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
+
+ LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
+ - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
+ - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
+ - Added extended issue_and_wait for the 3c905CX.
+ - Look for an MII on PHY index 24 first (3c905CX oddity).
+ - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
+ - Don't free skbs we don't own on oom path in vortex_open().
+
+ LK1.1.13 27 Jan 2001
+ - Added explicit `medialock' flag so we can truly
+ lock the media type down with `options'.
+ - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
+ - Added and used EEPROM_NORESET for 3c556B PM resumes.
+ - Fixed leakage of vp->rx_ring.
+ - Break out separate HAS_HWCKSM device capability flag.
+ - Kill vp->tx_full (ANK)
+ - Merge zerocopy fragment handling (ANK?)
+
+ LK1.1.14 15 Feb 2001
+ - Enable WOL. Can be turned on with `enable_wol' module option.
+ - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
+ - If a device's internalconfig register reports it has NWAY,
+ use it, even if autoselect is enabled.
+
+ LK1.1.15 6 June 2001 akpm
+ - Prevent double counting of received bytes (Lars Christensen)
+ - Add ethtool support (jgarzik)
+ - Add module parm descriptions (Andrzej M. Krzysztofowicz)
+ - Implemented alloc_etherdev() API
+ - Special-case the 'Tx error 82' message.
+
+ LK1.1.16 18 July 2001 akpm
+ - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM
+ - Lessen verbosity of bootup messages
+ - Fix WOL - use new PM API functions.
+ - Use netif_running() instead of vp->open in suspend/resume.
+ - Don't reset the interface logic on open/close/rmmod. It upsets
+ autonegotiation, and hence DHCP (from 0.99T).
+ - Back out EEPROM_NORESET flag because of the above (we do it for all
+ NICs).
+ - Correct 3c982 identification string
+ - Rename wait_for_completion() to issue_and_wait() to avoid completion.h
+ clash.
+
+ LK1.1.17 18Dec01 akpm
+ - PCI ID 9805 is a Python-T, not a dual-port Cyclone. Apparently.
+ And it has NWAY.
+ - Mask our advertised modes (vp->advertising) with our capabilities
+ (MII reg5) when deciding which duplex mode to use.
+ - Add `global_options' as default for options[]. Ditto global_enable_wol,
+ global_full_duplex.
+
+ LK1.1.18 01Jul02 akpm
+ - Fix for undocumented transceiver power-up bit on some 3c566B's
+ (Donald Becker, Rahul Karnik)
+
+ - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details.
+ - Also see Documentation/networking/vortex.txt
+
+ LK1.1.19 10Nov02 Marc Zyngier <maz@wild-wind.fr.eu.org>
+ - EISA sysfs integration.
+*/
+
+/*
+ * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation
+ * as well as other drivers
+ *
+ * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
+ * due to dead code elimination. There will be some performance benefits from this due to
+ * elimination of all the tests and reduced cache footprint.
+ */
+
+
+#define DRV_NAME "3c59x"
+#define DRV_VERSION "LK1.1.19"
+#define DRV_RELDATE "10 Nov 2002"
+
+
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 32
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+#ifndef __arm__
+static int rx_copybreak = 200;
+#else
+/* ARM systems perform better by disregarding the bus-master
+ transfer capability of these cards. -- rmk */
+static int rx_copybreak = 1513;
+#endif
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 32;
+/* Tx timeout interval (millisecs) */
+static int watchdog = 5000;
+
+/* Allow aggregation of Tx interrupts. Saves CPU load at the cost
+ * of possible Tx stalls if the system is blocking interrupts
+ * somewhere else. Undefine this to disable.
+ */
+#define tx_interrupt_mitigation 1
+
+/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
+#define vortex_debug debug
+#ifdef VORTEX_DEBUG
+static int vortex_debug = VORTEX_DEBUG;
+#else
+static int vortex_debug = 1;
+#endif
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/eisa.h>
+#include <linux/bitops.h>
+#include <asm/irq.h> /* For NR_IRQS only. */
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
+ This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+#include <linux/delay.h>
+
+
+static char version[] __devinitdata =
+DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "
+ DRV_VERSION " " DRV_RELDATE);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+
+/* Operational parameter that usually are not changed. */
+
+/* The Vortex size is twice that of the original EtherLinkIII series: the
+ runtime register window, window 1, is now always mapped in.
+ The Boomerang size is twice as large as the Vortex -- it has additional
+ bus master control registers. */
+#define VORTEX_TOTAL_SIZE 0x20
+#define BOOMERANG_TOTAL_SIZE 0x40
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required;
+
+#define PFX DRV_NAME ": "
+
+
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com FastEtherLink and FastEtherLink
+XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
+versions of the FastEtherLink cards. The supported product IDs are
+ 3c590, 3c592, 3c595, 3c597, 3c900, 3c905
+
+The related ISA 3c515 is supported with a separate driver, 3c515.c, included
+with the kernel source or available from
+ cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+The EEPROM settings for media type and forced-full-duplex are observed.
+The EEPROM media type should be left at the default "autoselect" unless using
+10base2 or AUI connections which cannot be reliably detected.
+
+III. Driver operation
+
+The 3c59x series use an interface that's very similar to the previous 3c5x9
+series. The primary interface is two programmed-I/O FIFOs, with an
+alternate single-contiguous-region bus-master transfer (see next).
+
+The 3c900 "Boomerang" series uses a full-bus-master interface with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3. The first chip version retains a compatible
+programmed-I/O interface that has been removed in 'B' and subsequent board
+revisions.
+
+One extension that is advertised in a very large font is that the adapters
+are capable of being bus masters. On the Vortex chip this capability was
+only for a single contiguous region making it far less useful than the full
+bus master capability. There is a significant performance impact of taking
+an extra interrupt or polling for the completion of each transfer, as well
+as difficulty sharing the single transfer engine between the transmit and
+receive threads. Using DMA transfers is a win only with large blocks or
+with the flawed versions of the Intel Orion motherboard PCI controller.
+
+The Boomerang chip's full-bus-master interface is useful, and has the
+currently-unused advantages over other similar chips that queued transmit
+packets may be reordered and receive buffer groups are associated with a
+single frame.
+
+With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
+Rather than a fixed intermediate receive buffer, this scheme allocates
+full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
+the copying breakpoint: it is chosen to trade-off the memory wasted by
+passing the full-sized skbuff to the queue layer for all frames vs. the
+copying cost of copying a frame to a correctly-sized skbuff.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
+3c590, 3c595, and 3c900 boards.
+The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
+the EISA version is called "Demon". According to Terry these names come
+from rides at the local amusement park.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
+This driver only supports ethernet packets because of the skbuff allocation
+limit of 4K.
+*/
+
+/* This table drives the PCI probe routines. It's mostly boilerplate in all
+ of the drivers, and will likely be provided by some future kernel.
+*/
+enum pci_flags_bit {
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
+ EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
+ HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
+ INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
+ EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000,
+ EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, };
+
+enum vortex_chips {
+ CH_3C590 = 0,
+ CH_3C592,
+ CH_3C597,
+ CH_3C595_1,
+ CH_3C595_2,
+
+ CH_3C595_3,
+ CH_3C900_1,
+ CH_3C900_2,
+ CH_3C900_3,
+ CH_3C900_4,
+
+ CH_3C900_5,
+ CH_3C900B_FL,
+ CH_3C905_1,
+ CH_3C905_2,
+ CH_3C905B_1,
+
+ CH_3C905B_2,
+ CH_3C905B_FX,
+ CH_3C905C,
+ CH_3C9202,
+ CH_3C980,
+ CH_3C9805,
+
+ CH_3CSOHO100_TX,
+ CH_3C555,
+ CH_3C556,
+ CH_3C556B,
+ CH_3C575,
+
+ CH_3C575_1,
+ CH_3CCFE575,
+ CH_3CCFE575CT,
+ CH_3CCFE656,
+ CH_3CCFEM656,
+
+ CH_3CCFEM656_1,
+ CH_3C450,
+ CH_3C920,
+ CH_3C982A,
+ CH_3C982B,
+
+ CH_905BT4,
+ CH_920B_EMB_WNM,
+};
+
+
+/* note: this array directly indexed by above enums, and MUST
+ * be kept in sync with both the enums above, and the PCI device
+ * table below
+ */
+static struct vortex_chip_info {
+ const char *name;
+ int flags;
+ int drv_flags;
+ int io_size;
+} vortex_info_tbl[] __devinitdata = {
+ {"3c590 Vortex 10Mbps",
+ PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
+ PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
+ PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c595 Vortex 100baseTx",
+ PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c595 Vortex 100baseT4",
+ PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+
+ {"3c595 Vortex 100base-MII",
+ PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c900 Boomerang 10baseT",
+ PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
+ {"3c900 Boomerang 10Mbps Combo",
+ PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
+ {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c900 Cyclone 10Mbps Combo",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+
+ {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c900B-FL Cyclone 10base-FL",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c905 Boomerang 100baseTx",
+ PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
+ {"3c905 Boomerang 100baseT4",
+ PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
+ {"3c905B Cyclone 100baseTx",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+
+ {"3c905B Cyclone 10/100/BNC",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3c905B-FX Cyclone 100baseFx",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c905C Tornado",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
+ {"3c980 Cyclone",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+
+ {"3c980C Python-T",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3cSOHO100-TX Hurricane",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3c555 Laptop Hurricane",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
+ {"3c556 Laptop Tornado",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
+ HAS_HWCKSM, 128, },
+ {"3c556B Laptop Hurricane",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
+ WNO_XCVR_PWR|HAS_HWCKSM, 128, },
+
+ {"3c575 [Megahertz] 10/100 LAN CardBus",
+ PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+ {"3c575 Boomerang CardBus",
+ PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+ {"3CCFE575BT Cyclone CardBus",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
+ {"3CCFE575CT Tornado CardBus",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+ {"3CCFE656 Cyclone CardBus",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
+
+ {"3CCFEM656B Cyclone+Winmodem CardBus",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
+ {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+ {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3c920 Tornado",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3c982 Hydra Dual Port A",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
+
+ {"3c982 Hydra Dual Port B",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
+ {"3c905B-T4",
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ {"3c920B-EMB-WNM Tornado",
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+
+ {NULL,}, /* NULL terminated list. */
+};
+
+
+static struct pci_device_id vortex_pci_tbl[] = {
+ { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
+ { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
+ { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
+ { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
+ { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
+
+ { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
+ { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
+ { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
+ { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
+ { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
+
+ { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
+ { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
+ { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
+ { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
+ { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
+
+ { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
+ { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
+ { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
+ { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 },
+ { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
+ { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
+
+ { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
+ { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
+ { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
+ { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
+ { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
+
+ { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
+ { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
+ { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
+ { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
+ { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
+
+ { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
+ { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
+ { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
+ { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A },
+ { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B },
+
+ { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 },
+ { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM },
+
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
+
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chip
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+ UpStall = 6<<11, UpUnstall = (6<<11)+1,
+ DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+ RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+ StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Bits in the general status register. */
+enum vortex_status {
+ IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+ DMAInProgress = 1<<11, /* DMA controller is still busy.*/
+ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Vortex this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+enum Win0_EEPROM_bits {
+ EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+ EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
+ NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
+ DriverTune=13, Checksum=15};
+
+enum Window2 { /* Window 2. */
+ Wn2_ResetOptions=12,
+};
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+
+#define BFEXT(value, offset, bitcount) \
+ ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
+
+#define BFINS(lhs, rhs, offset, bitcount) \
+ (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \
+ (((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
+
+#define RAM_SIZE(v) BFEXT(v, 0, 3)
+#define RAM_WIDTH(v) BFEXT(v, 3, 1)
+#define RAM_SPEED(v) BFEXT(v, 4, 2)
+#define ROM_SIZE(v) BFEXT(v, 6, 2)
+#define RAM_SPLIT(v) BFEXT(v, 16, 2)
+#define XCVR(v) BFEXT(v, 20, 4)
+#define AUTOSELECT(v) BFEXT(v, 24, 1)
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 { /* Window 7: Bus Master control. */
+ Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6,
+ Wn7_MasterStatus = 12,
+};
+/* Boomerang bus master control registers. */
+enum MasterCtrl {
+ PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
+ TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
+#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */
+struct boom_rx_desc {
+ u32 next; /* Last entry points to 0. */
+ s32 status;
+ u32 addr; /* Up to 63 addr/len pairs possible. */
+ s32 length; /* Set LAST_FRAG to indicate last pair. */
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete=0x00008000, RxDError=0x4000,
+ /* See boomerang_rx() for actual error bits */
+ IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
+ IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
+};
+
+#ifdef MAX_SKB_FRAGS
+#define DO_ZEROCOPY 1
+#else
+#define DO_ZEROCOPY 0
+#endif
+
+struct boom_tx_desc {
+ u32 next; /* Last entry points to 0. */
+ s32 status; /* bits 0:12 length, others see below. */
+#if DO_ZEROCOPY
+ struct {
+ u32 addr;
+ s32 length;
+ } frag[1+MAX_SKB_FRAGS];
+#else
+ u32 addr;
+ s32 length;
+#endif
+};
+
+/* Values for the Tx status entry. */
+enum tx_desc_status {
+ CRCDisable=0x2000, TxDComplete=0x8000,
+ AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
+ TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
+};
+
+/* Chip features we care about in vp->capabilities, read from the EEPROM. */
+enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
+
+struct vortex_extra_stats {
+ unsigned long tx_deferred;
+ unsigned long tx_multiple_collisions;
+ unsigned long rx_bad_ssd;
+};
+
+struct vortex_private {
+ /* The Rx and Tx rings should be quad-word-aligned. */
+ struct boom_rx_desc* rx_ring;
+ struct boom_tx_desc* tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct net_device_stats stats; /* Generic stats */
+ struct vortex_extra_stats xstats; /* NIC-specific extra stats */
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
+
+ /* PCI configuration space information. */
+ struct device *gendev;
+ char __iomem *cb_fn_base; /* CardBus function status addr space. */
+
+ /* Some values here only for performance evaluation and path-coverage */
+ int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
+ int card_idx;
+
+ /* The remainder are related to chip state, mostly media selection. */
+ struct timer_list timer; /* Media selection timer. */
+ struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
+ int options; /* User-settable misc. driver options. */
+ unsigned int media_override:4, /* Passed-in media type. */
+ default_media:4, /* Read from the EEPROM/Wn3_Config. */
+ full_duplex:1, force_fd:1, autoselect:1,
+ bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
+ flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
+ partner_flow_ctrl:1, /* Partner supports flow control */
+ has_nway:1,
+ enable_wol:1, /* Wake-on-LAN is enabled */
+ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
+ open:1,
+ medialock:1,
+ must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
+ large_frames:1; /* accept large frames */
+ int drv_flags;
+ u16 status_enable;
+ u16 intr_enable;
+ u16 available_media; /* From Wn3_Options. */
+ u16 capabilities, info1, info2; /* Various, from EEPROM. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+ u16 deferred; /* Resend these interrupts when we
+ * bale from the ISR */
+ u16 io_size; /* Size of PCI region (for release_region) */
+ spinlock_t lock; /* Serialise access to device & its vortex_private */
+ struct mii_if_info mii; /* MII lib hooks/info */
+};
+
+#ifdef CONFIG_PCI
+#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
+#else
+#define DEVICE_PCI(dev) NULL
+#endif
+
+#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
+
+#ifdef CONFIG_EISA
+#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
+#else
+#define DEVICE_EISA(dev) NULL
+#endif
+
+#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
+};
+
+static struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config.*/
+ next:8; /* The media type to try next. */
+ int wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
+ { "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
+ { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "tx_deferred" },
+ { "tx_multiple_collisions" },
+ { "rx_bad_ssd" },
+};
+
+/* number of ETHTOOL_GSTATS u64's */
+#define VORTEX_NUM_STATS 3
+
+static int vortex_probe1(struct device *gendev, long ioaddr, int irq,
+ int chip_idx, int card_idx);
+static void vortex_up(struct net_device *dev);
+static void vortex_down(struct net_device *dev, int final);
+static int vortex_open(struct net_device *dev);
+static void mdio_sync(long ioaddr, int bits);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
+static void vortex_timer(unsigned long arg);
+static void rx_oom_timer(unsigned long arg);
+static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int vortex_rx(struct net_device *dev);
+static int boomerang_rx(struct net_device *dev);
+static irqreturn_t vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int vortex_close(struct net_device *dev);
+static void dump_tx_ring(struct net_device *dev);
+static void update_stats(long ioaddr, struct net_device *dev);
+static struct net_device_stats *vortex_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+#ifdef CONFIG_PCI
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static void vortex_tx_timeout(struct net_device *dev);
+static void acpi_set_WOL(struct net_device *dev);
+static struct ethtool_ops vortex_ethtool_ops;
+static void set_8021q_mode(struct net_device *dev, int enable);
+
+
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Option count limit only -- unlimited interfaces are supported. */
+#define MAX_UNITS 8
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int global_options = -1;
+static int global_full_duplex = -1;
+static int global_enable_wol = -1;
+
+/* #define dev_alloc_skb dev_alloc_skb_debug */
+
+/* Variables to work-around the Compaq PCI BIOS32 problem. */
+static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
+static struct net_device *compaq_net_device;
+
+static int vortex_cards_found;
+
+module_param(debug, int, 0);
+module_param(global_options, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param(global_full_duplex, int, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param_array(hw_checksums, int, NULL, 0);
+module_param_array(flow_ctrl, int, NULL, 0);
+module_param(global_enable_wol, int, 0);
+module_param_array(enable_wol, int, NULL, 0);
+module_param(rx_copybreak, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param(compaq_ioaddr, int, 0);
+module_param(compaq_irq, int, 0);
+module_param(compaq_device_id, int, 0);
+module_param(watchdog, int, 0);
+MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
+MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
+MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
+MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
+MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
+MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
+MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
+MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_vortex(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+ local_save_flags(flags);
+ local_irq_disable();
+ (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev,NULL);
+ local_irq_restore(flags);
+}
+#endif
+
+#ifdef CONFIG_PM
+
+static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev && dev->priv) {
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ vortex_down(dev, 1);
+ }
+ }
+ return 0;
+}
+
+static int vortex_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev && dev->priv) {
+ if (netif_running(dev)) {
+ vortex_up(dev);
+ netif_device_attach(dev);
+ }
+ }
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id vortex_eisa_ids[] = {
+ { "TCM5920", CH_3C592 },
+ { "TCM5970", CH_3C597 },
+ { "" }
+};
+
+static int vortex_eisa_probe (struct device *device);
+static int vortex_eisa_remove (struct device *device);
+
+static struct eisa_driver vortex_eisa_driver = {
+ .id_table = vortex_eisa_ids,
+ .driver = {
+ .name = "3c59x",
+ .probe = vortex_eisa_probe,
+ .remove = vortex_eisa_remove
+ }
+};
+
+static int vortex_eisa_probe (struct device *device)
+{
+ long ioaddr;
+ struct eisa_device *edev;
+
+ edev = to_eisa_device (device);
+ ioaddr = edev->base_addr;
+
+ if (!request_region(ioaddr, VORTEX_TOTAL_SIZE, DRV_NAME))
+ return -EBUSY;
+
+ if (vortex_probe1(device, ioaddr, inw(ioaddr + 0xC88) >> 12,
+ edev->id.driver_data, vortex_cards_found)) {
+ release_region (ioaddr, VORTEX_TOTAL_SIZE);
+ return -ENODEV;
+ }
+
+ vortex_cards_found++;
+
+ return 0;
+}
+
+static int vortex_eisa_remove (struct device *device)
+{
+ struct eisa_device *edev;
+ struct net_device *dev;
+ struct vortex_private *vp;
+ long ioaddr;
+
+ edev = to_eisa_device (device);
+ dev = eisa_get_drvdata (edev);
+
+ if (!dev) {
+ printk("vortex_eisa_remove called for Compaq device!\n");
+ BUG();
+ }
+
+ vp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ unregister_netdev (dev);
+ outw (TotalReset|0x14, ioaddr + EL3_CMD);
+ release_region (ioaddr, VORTEX_TOTAL_SIZE);
+
+ free_netdev (dev);
+ return 0;
+}
+#endif
+
+/* returns count found (>= 0), or negative on error */
+static int __init vortex_eisa_init (void)
+{
+ int eisa_found = 0;
+ int orig_cards_found = vortex_cards_found;
+
+#ifdef CONFIG_EISA
+ if (eisa_driver_register (&vortex_eisa_driver) >= 0) {
+ /* Because of the way EISA bus is probed, we cannot assume
+ * any device have been found when we exit from
+ * eisa_driver_register (the bus root driver may not be
+ * initialized yet). So we blindly assume something was
+ * found, and let the sysfs magic happend... */
+
+ eisa_found = 1;
+ }
+#endif
+
+ /* Special code to work-around the Compaq PCI BIOS32 problem. */
+ if (compaq_ioaddr) {
+ vortex_probe1(NULL, compaq_ioaddr, compaq_irq,
+ compaq_device_id, vortex_cards_found++);
+ }
+
+ return vortex_cards_found - orig_cards_found + eisa_found;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit vortex_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ /* wake up and enable device */
+ rc = pci_enable_device (pdev);
+ if (rc < 0)
+ goto out;
+
+ rc = vortex_probe1 (&pdev->dev, pci_resource_start (pdev, 0),
+ pdev->irq, ent->driver_data, vortex_cards_found);
+ if (rc < 0) {
+ pci_disable_device (pdev);
+ goto out;
+ }
+
+ vortex_cards_found++;
+
+out:
+ return rc;
+}
+
+/*
+ * Start up the PCI/EISA device which is described by *gendev.
+ * Return 0 on success.
+ *
+ * NOTE: pdev can be NULL, for the case of a Compaq device
+ */
+static int __devinit vortex_probe1(struct device *gendev,
+ long ioaddr, int irq,
+ int chip_idx, int card_idx)
+{
+ struct vortex_private *vp;
+ int option;
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int i, step;
+ struct net_device *dev;
+ static int printed_version;
+ int retval, print_info;
+ struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
+ char *print_name = "3c59x";
+ struct pci_dev *pdev = NULL;
+ struct eisa_device *edev = NULL;
+
+ if (!printed_version) {
+ printk (version);
+ printed_version = 1;
+ }
+
+ if (gendev) {
+ if ((pdev = DEVICE_PCI(gendev))) {
+ print_name = pci_name(pdev);
+ }
+
+ if ((edev = DEVICE_EISA(gendev))) {
+ print_name = edev->dev.bus_id;
+ }
+ }
+
+ dev = alloc_etherdev(sizeof(*vp));
+ retval = -ENOMEM;
+ if (!dev) {
+ printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
+ goto out;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, gendev);
+ vp = netdev_priv(dev);
+
+ option = global_options;
+
+ /* The lower four bits are the media type. */
+ if (dev->mem_start) {
+ /*
+ * The 'options' param is passed in as the third arg to the
+ * LILO 'ether=' argument for non-modular use
+ */
+ option = dev->mem_start;
+ }
+ else if (card_idx < MAX_UNITS) {
+ if (options[card_idx] >= 0)
+ option = options[card_idx];
+ }
+
+ if (option > 0) {
+ if (option & 0x8000)
+ vortex_debug = 7;
+ if (option & 0x4000)
+ vortex_debug = 2;
+ if (option & 0x0400)
+ vp->enable_wol = 1;
+ }
+
+ print_info = (vortex_debug > 1);
+ if (print_info)
+ printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
+
+ printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n",
+ print_name,
+ pdev ? "PCI" : "EISA",
+ vci->name,
+ ioaddr);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->mtu = mtu;
+ vp->large_frames = mtu > 1500;
+ vp->drv_flags = vci->drv_flags;
+ vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
+ vp->io_size = vci->io_size;
+ vp->card_idx = card_idx;
+
+ /* module list only for Compaq device */
+ if (gendev == NULL) {
+ compaq_net_device = dev;
+ }
+
+ /* PCI-only startup logic */
+ if (pdev) {
+ /* EISA resources already marked, so only PCI needs to do this here */
+ /* Ignore return value, because Cardbus drivers already allocate for us */
+ if (request_region(ioaddr, vci->io_size, print_name) != NULL)
+ vp->must_free_region = 1;
+
+ /* enable bus-mastering if necessary */
+ if (vci->flags & PCI_USES_MASTER)
+ pci_set_master (pdev);
+
+ if (vci->drv_flags & IS_VORTEX) {
+ u8 pci_latency;
+ u8 new_latency = 248;
+
+ /* Check the PCI latency value. On the 3c590 series the latency timer
+ must be set to the maximum value to avoid data corruption that occurs
+ when the timer expires during a transfer. This bug exists the Vortex
+ chip only. */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < new_latency) {
+ printk(KERN_INFO "%s: Overriding PCI latency"
+ " timer (CFLT) setting of %d, new value is %d.\n",
+ print_name, pci_latency, new_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+ }
+ }
+ }
+
+ spin_lock_init(&vp->lock);
+ vp->gendev = gendev;
+ vp->mii.dev = dev;
+ vp->mii.mdio_read = mdio_read;
+ vp->mii.mdio_write = mdio_write;
+ vp->mii.phy_id_mask = 0x1f;
+ vp->mii.reg_num_mask = 0x1f;
+
+ /* Makes sure rings are at least 16 byte aligned. */
+ vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ &vp->rx_ring_dma);
+ retval = -ENOMEM;
+ if (vp->rx_ring == 0)
+ goto free_region;
+
+ vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
+ vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+
+ /* if we are a PCI driver, we store info in pdev->driver_data
+ * instead of a module list */
+ if (pdev)
+ pci_set_drvdata(pdev, dev);
+ if (edev)
+ eisa_set_drvdata (edev, dev);
+
+ vp->media_override = 7;
+ if (option >= 0) {
+ vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
+ if (vp->media_override != 7)
+ vp->medialock = 1;
+ vp->full_duplex = (option & 0x200) ? 1 : 0;
+ vp->bus_master = (option & 16) ? 1 : 0;
+ }
+
+ if (global_full_duplex > 0)
+ vp->full_duplex = 1;
+ if (global_enable_wol > 0)
+ vp->enable_wol = 1;
+
+ if (card_idx < MAX_UNITS) {
+ if (full_duplex[card_idx] > 0)
+ vp->full_duplex = 1;
+ if (flow_ctrl[card_idx] > 0)
+ vp->flow_ctrl = 1;
+ if (enable_wol[card_idx] > 0)
+ vp->enable_wol = 1;
+ }
+
+ vp->force_fd = vp->full_duplex;
+ vp->options = option;
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ {
+ int base;
+
+ if (vci->drv_flags & EEPROM_8BIT)
+ base = 0x230;
+ else if (vci->drv_flags & EEPROM_OFFSET)
+ base = EEPROM_Read + 0x30;
+ else
+ base = EEPROM_Read;
+
+ for (i = 0; i < 0x40; i++) {
+ int timer;
+ outw(base + i, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 10; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ eeprom[i] = inw(ioaddr + Wn0EepromData);
+ }
+ }
+ for (i = 0; i < 0x18; i++)
+ checksum ^= eeprom[i];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
+ while (i < 0x21)
+ checksum ^= eeprom[i++];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ }
+ if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
+ printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ if (print_info) {
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ }
+ /* Unfortunately an all zero eeprom passes the checksum and this
+ gets found in the wild in failure cases. Crypto is hard 8) */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ retval = -EINVAL;
+ printk(KERN_ERR "*** EEPROM MAC address is invalid.\n");
+ goto free_ring; /* With every pack */
+ }
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+#ifdef __sparc__
+ if (print_info)
+ printk(", IRQ %s\n", __irq_itoa(dev->irq));
+#else
+ if (print_info)
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (dev->irq <= 0 || dev->irq >= NR_IRQS)
+ printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
+ dev->irq);
+#endif
+
+ EL3WINDOW(4);
+ step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
+ if (print_info) {
+ printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-"
+ "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
+ step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
+ }
+
+
+ if (pdev && vci->drv_flags & HAS_CB_FNS) {
+ unsigned long fn_st_addr; /* Cardbus function status space */
+ unsigned short n;
+
+ fn_st_addr = pci_resource_start (pdev, 2);
+ if (fn_st_addr) {
+ vp->cb_fn_base = ioremap(fn_st_addr, 128);
+ retval = -ENOMEM;
+ if (!vp->cb_fn_base)
+ goto free_ring;
+ }
+ if (print_info) {
+ printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
+ print_name, fn_st_addr, vp->cb_fn_base);
+ }
+ EL3WINDOW(2);
+
+ n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+ if (vp->drv_flags & INVERT_LED_PWR)
+ n |= 0x10;
+ if (vp->drv_flags & INVERT_MII_PWR)
+ n |= 0x4000;
+ outw(n, ioaddr + Wn2_ResetOptions);
+ if (vp->drv_flags & WNO_XCVR_PWR) {
+ EL3WINDOW(0);
+ outw(0x0800, ioaddr);
+ }
+ }
+
+ /* Extract our information from the EEPROM data. */
+ vp->info1 = eeprom[13];
+ vp->info2 = eeprom[15];
+ vp->capabilities = eeprom[16];
+
+ if (vp->info1 & 0x8000) {
+ vp->full_duplex = 1;
+ if (print_info)
+ printk(KERN_INFO "Full duplex capable\n");
+ }
+
+ {
+ static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ unsigned int config;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
+ vp->available_media = 0x40;
+ config = inl(ioaddr + Wn3_Config);
+ if (print_info) {
+ printk(KERN_DEBUG " Internal config register is %4.4x, "
+ "transceivers %#x.\n", config, inw(ioaddr + Wn3_Options));
+ printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ 8 << RAM_SIZE(config),
+ RAM_WIDTH(config) ? "word" : "byte",
+ ram_split[RAM_SPLIT(config)],
+ AUTOSELECT(config) ? "autoselect/" : "",
+ XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
+ media_tbl[XCVR(config)].name);
+ }
+ vp->default_media = XCVR(config);
+ if (vp->default_media == XCVR_NWAY)
+ vp->has_nway = 1;
+ vp->autoselect = AUTOSELECT(config);
+ }
+
+ if (vp->media_override != 7) {
+ printk(KERN_INFO "%s: Media override to transceiver type %d (%s).\n",
+ print_name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else
+ dev->if_port = vp->default_media;
+
+ if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
+ dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int phy, phy_idx = 0;
+ EL3WINDOW(4);
+ mii_preamble_required++;
+ if (vp->drv_flags & EXTRA_PREAMBLE)
+ mii_preamble_required++;
+ mdio_sync(ioaddr, 32);
+ mdio_read(dev, 24, 1);
+ for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
+ int mii_status, phyx;
+
+ /*
+ * For the 3c905CX we look at index 24 first, because it bogusly
+ * reports an external PHY at all indices
+ */
+ if (phy == 0)
+ phyx = 24;
+ else if (phy <= 24)
+ phyx = phy - 1;
+ else
+ phyx = phy;
+ mii_status = mdio_read(dev, phyx, 1);
+ if (mii_status && mii_status != 0xffff) {
+ vp->phys[phy_idx++] = phyx;
+ if (print_info) {
+ printk(KERN_INFO " MII transceiver found at address %d,"
+ " status %4x.\n", phyx, mii_status);
+ }
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required++;
+ }
+ }
+ mii_preamble_required--;
+ if (phy_idx == 0) {
+ printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n");
+ vp->phys[0] = 24;
+ } else {
+ vp->advertising = mdio_read(dev, vp->phys[0], 4);
+ if (vp->full_duplex) {
+ /* Only advertise the FD media types. */
+ vp->advertising &= ~0x02A0;
+ mdio_write(dev, vp->phys[0], 4, vp->advertising);
+ }
+ }
+ vp->mii.phy_id = vp->phys[0];
+ }
+
+ if (vp->capabilities & CapBusMaster) {
+ vp->full_bus_master_tx = 1;
+ if (print_info) {
+ printk(KERN_INFO " Enabling bus-master transmits and %s receives.\n",
+ (vp->info2 & 1) ? "early" : "whole-frame" );
+ }
+ vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+ vp->bus_master = 0; /* AKPM: vortex only */
+ }
+
+ /* The 3c59x-specific entries in the device structure. */
+ dev->open = vortex_open;
+ if (vp->full_bus_master_tx) {
+ dev->hard_start_xmit = boomerang_start_xmit;
+ /* Actually, it still should work with iommu. */
+ dev->features |= NETIF_F_SG;
+ if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
+ (hw_checksums[card_idx] == 1)) {
+ dev->features |= NETIF_F_IP_CSUM;
+ }
+ } else {
+ dev->hard_start_xmit = vortex_start_xmit;
+ }
+
+ if (print_info) {
+ printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
+ print_name,
+ (dev->features & NETIF_F_SG) ? "en":"dis",
+ (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
+ }
+
+ dev->stop = vortex_close;
+ dev->get_stats = vortex_get_stats;
+#ifdef CONFIG_PCI
+ dev->do_ioctl = vortex_ioctl;
+#endif
+ dev->ethtool_ops = &vortex_ethtool_ops;
+ dev->set_multicast_list = set_rx_mode;
+ dev->tx_timeout = vortex_tx_timeout;
+ dev->watchdog_timeo = (watchdog * HZ) / 1000;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = poll_vortex;
+#endif
+ if (pdev) {
+ vp->pm_state_valid = 1;
+ pci_save_state(VORTEX_PCI(vp));
+ acpi_set_WOL(dev);
+ }
+ retval = register_netdev(dev);
+ if (retval == 0)
+ return 0;
+
+free_ring:
+ pci_free_consistent(pdev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+free_region:
+ if (vp->must_free_region)
+ release_region(ioaddr, vci->io_size);
+ free_netdev(dev);
+ printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
+out:
+ return retval;
+}
+
+static void
+issue_and_wait(struct net_device *dev, int cmd)
+{
+ int i;
+
+ outw(cmd, dev->base_addr + EL3_CMD);
+ for (i = 0; i < 2000; i++) {
+ if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress))
+ return;
+ }
+
+ /* OK, that didn't work. Do it the slow way. One second */
+ for (i = 0; i < 100000; i++) {
+ if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
+ if (vortex_debug > 1)
+ printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
+ dev->name, cmd, i * 10);
+ return;
+ }
+ udelay(10);
+ }
+ printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
+ dev->name, cmd, inw(dev->base_addr + EL3_STATUS));
+}
+
+static void
+vortex_up(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned int config;
+ int i;
+
+ if (VORTEX_PCI(vp)) {
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
+ pci_restore_state(VORTEX_PCI(vp));
+ pci_enable_device(VORTEX_PCI(vp));
+ }
+
+ /* Before initializing select the active media port. */
+ EL3WINDOW(3);
+ config = inl(ioaddr + Wn3_Config);
+
+ if (vp->media_override != 7) {
+ printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ if (vp->has_nway) {
+ if (vortex_debug > 1)
+ printk(KERN_INFO "%s: using NWAY device table, not %d\n",
+ dev->name, dev->if_port);
+ dev->if_port = XCVR_NWAY;
+ } else {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = XCVR_100baseTx;
+ while (! (vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+ if (vortex_debug > 1)
+ printk(KERN_INFO "%s: first available media type: %s\n",
+ dev->name, media_tbl[dev->if_port].name);
+ }
+ } else {
+ dev->if_port = vp->default_media;
+ if (vortex_debug > 1)
+ printk(KERN_INFO "%s: using default media %s\n",
+ dev->name, media_tbl[dev->if_port].name);
+ }
+
+ init_timer(&vp->timer);
+ vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
+ vp->timer.data = (unsigned long)dev;
+ vp->timer.function = vortex_timer; /* timer handler */
+ add_timer(&vp->timer);
+
+ init_timer(&vp->rx_oom_timer);
+ vp->rx_oom_timer.data = (unsigned long)dev;
+ vp->rx_oom_timer.function = rx_oom_timer;
+
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "%s: Initial media type %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ vp->full_duplex = vp->force_fd;
+ config = BFINS(config, dev->if_port, 20, 4);
+ if (vortex_debug > 6)
+ printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
+ outl(config, ioaddr + Wn3_Config);
+
+ if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int mii_reg1, mii_reg5;
+ EL3WINDOW(4);
+ /* Read BMSR (reg1) only to clear old status. */
+ mii_reg1 = mdio_read(dev, vp->phys[0], 1);
+ mii_reg5 = mdio_read(dev, vp->phys[0], 5);
+ if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) {
+ netif_carrier_off(dev); /* No MII device or no link partner report */
+ } else {
+ mii_reg5 &= vp->advertising;
+ if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
+ || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
+ vp->full_duplex = 1;
+ netif_carrier_on(dev);
+ }
+ vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
+ if (vortex_debug > 1)
+ printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
+ " info1 %04x, setting %s-duplex.\n",
+ dev->name, vp->phys[0],
+ mii_reg1, mii_reg5,
+ vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
+ EL3WINDOW(3);
+ }
+
+ /* Set the full-duplex bit. */
+ outw( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+ (vp->large_frames ? 0x40 : 0) |
+ ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
+ ioaddr + Wn3_MAC_Ctrl);
+
+ if (vortex_debug > 1) {
+ printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
+ dev->name, config);
+ }
+
+ issue_and_wait(dev, TxReset);
+ /*
+ * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
+ */
+ issue_and_wait(dev, RxReset|0x04);
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ if (vortex_debug > 1) {
+ EL3WINDOW(4);
+ printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Set the station address and mask in window 2 each time opened. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ if (vp->cb_fn_base) {
+ unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+ if (vp->drv_flags & INVERT_LED_PWR)
+ n |= 0x10;
+ if (vp->drv_flags & INVERT_MII_PWR)
+ n |= 0x4000;
+ outw(n, ioaddr + Wn2_ResetOptions);
+ }
+
+ if (dev->if_port == XCVR_10base2)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ if (dev->if_port != XCVR_NWAY) {
+ EL3WINDOW(4);
+ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ /* Initialize the RxEarly register as recommended. */
+ outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ outl(0x0020, ioaddr + PktStatus);
+ outl(vp->rx_ring_dma, ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ vp->cur_tx = vp->dirty_tx = 0;
+ if (vp->drv_flags & IS_BOOMERANG)
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+ /* Clear the Rx, Tx rings. */
+ for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */
+ vp->rx_ring[i].status = 0;
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = NULL;
+ outl(0, ioaddr + DownListPtr);
+ }
+ /* Set receiver mode: presumably accept b-case and phys addr only. */
+ set_rx_mode(dev);
+ /* enable 802.1q tagged frames */
+ set_8021q_mode(dev, 1);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+// issue_and_wait(dev, SetTxStart|0x07ff);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0);
+ vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
+ (vp->full_bus_master_rx ? 0 : RxComplete) |
+ StatsFull | HostError | TxComplete | IntReq
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ writel(0x8000, vp->cb_fn_base + 4);
+ netif_start_queue (dev);
+}
+
+static int
+vortex_open(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ int i;
+ int retval;
+
+ /* Use the now-standard shared IRQ implementation. */
+ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
+ &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev))) {
+ printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
+ goto out;
+ }
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ if (vortex_debug > 2)
+ printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
+ vp->rx_ring[i].status = 0; /* Clear complete bit. */
+ vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ }
+ if (i != RX_RING_SIZE) {
+ int j;
+ printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name);
+ for (j = 0; j < i; j++) {
+ if (vp->rx_skbuff[j]) {
+ dev_kfree_skb(vp->rx_skbuff[j]);
+ vp->rx_skbuff[j] = NULL;
+ }
+ }
+ retval = -ENOMEM;
+ goto out_free_irq;
+ }
+ /* Wrap the ring. */
+ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
+ }
+
+ vortex_up(dev);
+ return 0;
+
+out_free_irq:
+ free_irq(dev->irq, dev);
+out:
+ if (vortex_debug > 1)
+ printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval);
+ return retval;
+}
+
+static void
+vortex_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+ int ok = 0;
+ int media_status, mii_status, old_window;
+
+ if (vortex_debug > 2) {
+ printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
+ }
+
+ if (vp->medialock)
+ goto leave_media_alone;
+ disable_irq(dev->irq);
+ old_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ media_status = inw(ioaddr + Wn4_Media);
+ switch (dev->if_port) {
+ case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
+ if (media_status & Media_LnkBeat) {
+ netif_carrier_on(dev);
+ ok = 1;
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ } else {
+ netif_carrier_off(dev);
+ if (vortex_debug > 1) {
+ printk(KERN_DEBUG "%s: Media %s has no link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ }
+ }
+ break;
+ case XCVR_MII: case XCVR_NWAY:
+ {
+ spin_lock_bh(&vp->lock);
+ mii_status = mdio_read(dev, vp->phys[0], 1);
+ ok = 1;
+ if (vortex_debug > 2)
+ printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
+ dev->name, mii_status);
+ if (mii_status & BMSR_LSTATUS) {
+ int mii_reg5 = mdio_read(dev, vp->phys[0], 5);
+ if (! vp->force_fd && mii_reg5 != 0xffff) {
+ int duplex;
+
+ mii_reg5 &= vp->advertising;
+ duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
+ if (vp->full_duplex != duplex) {
+ vp->full_duplex = duplex;
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII "
+ "#%d link partner capability of %4.4x.\n",
+ dev->name, vp->full_duplex ? "full" : "half",
+ vp->phys[0], mii_reg5);
+ /* Set the full-duplex bit. */
+ EL3WINDOW(3);
+ outw( (vp->full_duplex ? 0x20 : 0) |
+ (vp->large_frames ? 0x40 : 0) |
+ ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
+ ioaddr + Wn3_MAC_Ctrl);
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n");
+ /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */
+ }
+ }
+ netif_carrier_on(dev);
+ } else {
+ netif_carrier_off(dev);
+ }
+ spin_unlock_bh(&vp->lock);
+ }
+ break;
+ default: /* Other media types handled by Tx timeouts. */
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "%s: Media %s has no indication, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ ok = 1;
+ }
+ if ( ! ok) {
+ unsigned int config;
+
+ do {
+ dev->if_port = media_tbl[dev->if_port].next;
+ } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
+ if (dev->if_port == XCVR_Default) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "%s: Media selection failing, using default "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ } else {
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "%s: Media selection failed, now trying "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ next_tick = media_tbl[dev->if_port].wait;
+ }
+ outw((media_status & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+ config = inl(ioaddr + Wn3_Config);
+ config = BFINS(config, dev->if_port, 20, 4);
+ outl(config, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
+ ioaddr + EL3_CMD);
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
+ /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
+ }
+ EL3WINDOW(old_window);
+ enable_irq(dev->irq);
+
+leave_media_alone:
+ if (vortex_debug > 2)
+ printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ mod_timer(&vp->timer, RUN_AT(next_tick));
+ if (vp->deferred)
+ outw(FakeIntr, ioaddr + EL3_CMD);
+ return;
+}
+
+static void vortex_tx_timeout(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, inb(ioaddr + TxStatus),
+ inw(ioaddr + EL3_STATUS));
+ EL3WINDOW(4);
+ printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n",
+ inw(ioaddr + Wn4_NetDiag),
+ inw(ioaddr + Wn4_Media),
+ inl(ioaddr + PktStatus),
+ inw(ioaddr + Wn4_FIFODiag));
+ /* Slight code bloat to be user friendly. */
+ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
+ printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
+ " network cable problem?\n", dev->name);
+ if (inw(ioaddr + EL3_STATUS) & IntLatch) {
+ printk(KERN_ERR "%s: Interrupt posted but not delivered --"
+ " IRQ blocked by another device?\n", dev->name);
+ /* Bad idea here.. but we might as well handle a few events. */
+ {
+ /*
+ * Block interrupts because vortex_interrupt does a bare spin_lock()
+ */
+ unsigned long flags;
+ local_irq_save(flags);
+ if (vp->full_bus_master_tx)
+ boomerang_interrupt(dev->irq, dev, NULL);
+ else
+ vortex_interrupt(dev->irq, dev, NULL);
+ local_irq_restore(flags);
+ }
+ }
+
+ if (vortex_debug > 0)
+ dump_tx_ring(dev);
+
+ issue_and_wait(dev, TxReset);
+
+ vp->stats.tx_errors++;
+ if (vp->full_bus_master_tx) {
+ printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
+ if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
+ outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
+ ioaddr + DownListPtr);
+ if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
+ netif_wake_queue (dev);
+ if (vp->drv_flags & IS_BOOMERANG)
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ } else {
+ vp->stats.tx_dropped++;
+ netif_wake_queue(dev);
+ }
+
+ /* Issue Tx Enable */
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+}
+
+/*
+ * Handle uncommon interrupt sources. This is a separate routine to minimize
+ * the cache impact.
+ */
+static void
+vortex_error(struct net_device *dev, int status)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int do_tx_reset = 0, reset_mask = 0;
+ unsigned char tx_status = 0;
+
+ if (vortex_debug > 2) {
+ printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", dev->name, status);
+ }
+
+ if (status & TxComplete) { /* Really "TxError" for us. */
+ tx_status = inb(ioaddr + TxStatus);
+ /* Presumably a tx-timeout. We must merely re-enable. */
+ if (vortex_debug > 2
+ || (tx_status != 0x88 && vortex_debug > 0)) {
+ printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status == 0x82) {
+ printk(KERN_ERR "Probably a duplex mismatch. See "
+ "Documentation/networking/vortex.txt\n");
+ }
+ dump_tx_ring(dev);
+ }
+ if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ outb(0, ioaddr + TxStatus);
+ if (tx_status & 0x30) { /* txJabber or txUnderrun */
+ do_tx_reset = 1;
+ } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
+ do_tx_reset = 1;
+ reset_mask = 0x0108; /* Reset interface logic, but not download logic */
+ } else { /* Merely re-enable the transmitter. */
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat;
+ if (vortex_debug > 4)
+ printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ inw(ioaddr + EL3_STATUS) & StatsFull) {
+ printk(KERN_WARNING "%s: Updating statistics failed, disabling "
+ "stats as an interrupt source.\n", dev->name);
+ EL3WINDOW(5);
+ outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+ vp->intr_enable &= ~StatsFull;
+ EL3WINDOW(7);
+ DoneDidThat++;
+ }
+ }
+ if (status & IntReq) { /* Restore all interrupt sources. */
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
+ }
+ if (status & HostError) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
+ dev->name, fifo_diag);
+ /* Adapter failure requires Tx/Rx reset and reinit. */
+ if (vp->full_bus_master_tx) {
+ int bus_status = inl(ioaddr + PktStatus);
+ /* 0x80000000 PCI master abort. */
+ /* 0x40000000 PCI target abort. */
+ if (vortex_debug)
+ printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
+
+ /* In this case, blow the card away */
+ /* Must not enter D3 or we can't legally issue the reset! */
+ vortex_down(dev, 0);
+ issue_and_wait(dev, TotalReset | 0xff);
+ vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */
+ } else if (fifo_diag & 0x0400)
+ do_tx_reset = 1;
+ if (fifo_diag & 0x3000) {
+ /* Reset Rx fifo and upload logic */
+ issue_and_wait(dev, RxReset|0x07);
+ /* Set the Rx filter to the current state. */
+ set_rx_mode(dev);
+ /* enable 802.1q VLAN tagged frames */
+ set_8021q_mode(dev, 1);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | HostError, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (do_tx_reset) {
+ issue_and_wait(dev, TxReset|reset_mask);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ if (!vp->full_bus_master_tx)
+ netif_wake_queue(dev);
+ }
+}
+
+static int
+vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ int len = (skb->len + 3) & ~3;
+ outl( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
+ ioaddr + Wn7_MasterAddr);
+ outw(len, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ /* netif_wake_queue() will be called at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_start_queue (dev); /* AKPM: redundant? */
+ } else {
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ netif_stop_queue(dev);
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+ }
+
+ dev->trans_start = jiffies;
+
+ /* Clear the Tx status stack. */
+ {
+ int tx_status;
+ int i = 32;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (vortex_debug > 2)
+ printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ issue_and_wait(dev, TxReset);
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+static int
+boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ /* Calculate the next Tx descriptor entry. */
+ int entry = vp->cur_tx % TX_RING_SIZE;
+ struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+ unsigned long flags;
+
+ if (vortex_debug > 6) {
+ printk(KERN_DEBUG "boomerang_start_xmit()\n");
+ if (vortex_debug > 3)
+ printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
+ dev->name, vp->cur_tx);
+ }
+
+ if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
+ if (vortex_debug > 0)
+ printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
+ dev->name);
+ netif_stop_queue(dev);
+ return 1;
+ }
+
+ vp->tx_skbuff[entry] = skb;
+
+ vp->tx_ring[entry].next = 0;
+#if DO_ZEROCOPY
+ if (skb->ip_summed != CHECKSUM_HW)
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+ else
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
+ skb->len, PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
+ } else {
+ int i;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
+ skb->len-skb->data_len, PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ vp->tx_ring[entry].frag[i+1].addr =
+ cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
+ (void*)page_address(frag->page) + frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE));
+
+ if (i == skb_shinfo(skb)->nr_frags-1)
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+ else
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+ }
+ }
+#else
+ vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+#endif
+
+ spin_lock_irqsave(&vp->lock, flags);
+ /* Wait for the stall to complete. */
+ issue_and_wait(dev, DownStall);
+ prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
+ vp->queued_packet++;
+ }
+
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
+ netif_stop_queue (dev);
+ } else { /* Clear previous interrupt enable. */
+#if defined(tx_interrupt_mitigation)
+ /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
+ * were selected, this would corrupt DN_COMPLETE. No?
+ */
+ prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
+#endif
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+
+/*
+ * This is the ISR for the vortex series chips.
+ * full_bus_master_tx == 0 && full_bus_master_rx == 0
+ */
+
+static irqreturn_t
+vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr;
+ int status;
+ int work_done = max_interrupt_work;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ spin_lock(&vp->lock);
+
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (vortex_debug > 6)
+ printk("vortex_interrupt(). status=0x%4x\n", status);
+
+ if ((status & IntLatch) == 0)
+ goto handler_exit; /* No interrupt: shared IRQs cause this */
+ handled = 1;
+
+ if (status & IntReq) {
+ status |= vp->deferred;
+ vp->deferred = 0;
+ }
+
+ if (status == 0xffff) /* h/w no longer present (hotplug)? */
+ goto handler_exit;
+
+ if (vortex_debug > 4)
+ printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+ dev->name, status, inb(ioaddr + Timer));
+
+ do {
+ if (vortex_debug > 5)
+ printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ vortex_rx(dev);
+
+ if (status & TxAvailable) {
+ if (vortex_debug > 5)
+ printk(KERN_DEBUG " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue (dev);
+ }
+
+ if (status & DMADone) {
+ if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
+ if (inw(ioaddr + TxFree) > 1536) {
+ /*
+ * AKPM: FIXME: I don't think we need this. If the queue was stopped due to
+ * insufficient FIFO room, the TxAvailable test will succeed and call
+ * netif_wake_queue()
+ */
+ netif_wake_queue(dev);
+ } else { /* Interrupt when FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ netif_stop_queue(dev);
+ }
+ }
+ }
+ /* Check for all uncommon interrupts at once. */
+ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
+ if (status == 0xffff)
+ break;
+ vortex_error(dev, status);
+ }
+
+ if (--work_done < 0) {
+ printk(KERN_WARNING "%s: Too much work in interrupt, status "
+ "%4.4x.\n", dev->name, status);
+ /* Disable all pending interrupts. */
+ do {
+ vp->deferred |= status;
+ outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+ ioaddr + EL3_CMD);
+ outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+ } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+ /* The timer will reenable interrupts. */
+ mod_timer(&vp->timer, jiffies + 1*HZ);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ if (vortex_debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, status);
+handler_exit:
+ spin_unlock(&vp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * This is the ISR for the boomerang series chips.
+ * full_bus_master_tx == 1 && full_bus_master_rx == 1
+ */
+
+static irqreturn_t
+boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr;
+ int status;
+ int work_done = max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+
+ /*
+ * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
+ * and boomerang_start_xmit
+ */
+ spin_lock(&vp->lock);
+
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (vortex_debug > 6)
+ printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
+
+ if ((status & IntLatch) == 0)
+ goto handler_exit; /* No interrupt: shared IRQs can cause this */
+
+ if (status == 0xffff) { /* h/w no longer present (hotplug)? */
+ if (vortex_debug > 1)
+ printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
+ goto handler_exit;
+ }
+
+ if (status & IntReq) {
+ status |= vp->deferred;
+ vp->deferred = 0;
+ }
+
+ if (vortex_debug > 4)
+ printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+ dev->name, status, inb(ioaddr + Timer));
+ do {
+ if (vortex_debug > 5)
+ printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & UpComplete) {
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ if (vortex_debug > 5)
+ printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
+ boomerang_rx(dev);
+ }
+
+ if (status & DownComplete) {
+ unsigned int dirty_tx = vp->dirty_tx;
+
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ while (vp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+#if 1 /* AKPM: the latter is faster, but cyclone-only */
+ if (inl(ioaddr + DownListPtr) ==
+ vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
+ break; /* It still hasn't been processed. */
+#else
+ if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
+ break; /* It still hasn't been processed. */
+#endif
+
+ if (vp->tx_skbuff[entry]) {
+ struct sk_buff *skb = vp->tx_skbuff[entry];
+#if DO_ZEROCOPY
+ int i;
+ for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
+ PCI_DMA_TODEVICE);
+#else
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+ dev_kfree_skb_irq(skb);
+ vp->tx_skbuff[entry] = NULL;
+ } else {
+ printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
+ }
+ /* vp->stats.tx_packets++; Counted below. */
+ dirty_tx++;
+ }
+ vp->dirty_tx = dirty_tx;
+ if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
+ if (vortex_debug > 6)
+ printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
+ netif_wake_queue (dev);
+ }
+ }
+
+ /* Check for all uncommon interrupts at once. */
+ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
+ vortex_error(dev, status);
+
+ if (--work_done < 0) {
+ printk(KERN_WARNING "%s: Too much work in interrupt, status "
+ "%4.4x.\n", dev->name, status);
+ /* Disable all pending interrupts. */
+ do {
+ vp->deferred |= status;
+ outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+ ioaddr + EL3_CMD);
+ outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+ } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+ /* The timer will reenable interrupts. */
+ mod_timer(&vp->timer, jiffies + 1*HZ);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ writel(0x8000, vp->cb_fn_base + 4);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch);
+
+ if (vortex_debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, status);
+handler_exit:
+ spin_unlock(&vp->lock);
+ return IRQ_HANDLED;
+}
+
+static int vortex_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (vortex_debug > 5)
+ printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (vortex_debug > 2)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5);
+ if (vortex_debug > 4)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ if (vp->bus_master &&
+ ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+ dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
+ pkt_len, PCI_DMA_FROMDEVICE);
+ outl(dma, ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ outw(StartDMAUp, ioaddr + EL3_CMD);
+ while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
+ ;
+ pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
+ } else {
+ insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (vortex_debug > 0)
+ printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
+ "size %d.\n", dev->name, pkt_len);
+ }
+ vp->stats.rx_dropped++;
+ issue_and_wait(dev, RxDiscard);
+ }
+
+ return 0;
+}
+
+static int
+boomerang_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ long ioaddr = dev->base_addr;
+ int rx_status;
+ int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+
+ if (vortex_debug > 5)
+ printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS));
+
+ while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
+ if (--rx_work_limit < 0)
+ break;
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (vortex_debug > 2)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+ dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
+
+ if (vortex_debug > 4)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ vp->rx_skbuff[entry]->tail,
+ pkt_len);
+ pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ vp->rx_copy++;
+ } else {
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ skb_put(skb, pkt_len);
+ pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ vp->rx_nocopy++;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ { /* Use hardware checksum info. */
+ int csum_bits = rx_status & 0xee000000;
+ if (csum_bits &&
+ (csum_bits == (IPChksumValid | TCPChksumValid) ||
+ csum_bits == (IPChksumValid | UDPChksumValid))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ vp->rx_csumhits++;
+ }
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL) {
+ static unsigned long last_jif;
+ if ((jiffies - last_jif) > 10 * HZ) {
+ printk(KERN_WARNING "%s: memory shortage\n", dev->name);
+ last_jif = jiffies;
+ }
+ if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
+ mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
+ break; /* Bad news! */
+ }
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ outw(UpUnstall, ioaddr + EL3_CMD);
+ }
+ return 0;
+}
+
+/*
+ * If we've hit a total OOM refilling the Rx ring we poll once a second
+ * for some memory. Otherwise there is no way to restart the rx process.
+ */
+static void
+rx_oom_timer(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct vortex_private *vp = netdev_priv(dev);
+
+ spin_lock_irq(&vp->lock);
+ if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
+ boomerang_rx(dev);
+ if (vortex_debug > 1) {
+ printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name,
+ ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
+ }
+ spin_unlock_irq(&vp->lock);
+}
+
+static void
+vortex_down(struct net_device *dev, int final_down)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ netif_stop_queue (dev);
+
+ del_timer_sync(&vp->rx_oom_timer);
+ del_timer_sync(&vp->timer);
+
+ /* Turn off statistics ASAP. We update vp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ /* Disable receiving 802.1q tagged frames */
+ set_8021q_mode(dev, 0);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx)
+ outl(0, ioaddr + UpListPtr);
+ if (vp->full_bus_master_tx)
+ outl(0, ioaddr + DownListPtr);
+
+ if (final_down && VORTEX_PCI(vp)) {
+ pci_save_state(VORTEX_PCI(vp));
+ acpi_set_WOL(dev);
+ }
+}
+
+static int
+vortex_close(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+
+ if (netif_device_present(dev))
+ vortex_down(dev, 1);
+
+ if (vortex_debug > 1) {
+ printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+ printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
+ " tx_queued %d Rx pre-checksummed %d.\n",
+ dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
+ }
+
+#if DO_ZEROCOPY
+ if ( vp->rx_csumhits &&
+ ((vp->drv_flags & HAS_HWCKSM) == 0) &&
+ (hw_checksums[vp->card_idx] == -1)) {
+ printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", dev->name);
+ }
+#endif
+
+ free_irq(dev->irq, dev);
+
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+ pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (vp->tx_skbuff[i]) {
+ struct sk_buff *skb = vp->tx_skbuff[i];
+#if DO_ZEROCOPY
+ int k;
+
+ for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[i].frag[k].addr),
+ le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
+ PCI_DMA_TODEVICE);
+#else
+ pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+ dev_kfree_skb(skb);
+ vp->tx_skbuff[i] = NULL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void
+dump_tx_ring(struct net_device *dev)
+{
+ if (vortex_debug > 0) {
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ if (vp->full_bus_master_tx) {
+ int i;
+ int stalled = inl(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
+
+ printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
+ vp->full_bus_master_tx,
+ vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
+ vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
+ printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
+ inl(ioaddr + DownListPtr),
+ &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
+ issue_and_wait(dev, DownStall);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+#if DO_ZEROCOPY
+ le32_to_cpu(vp->tx_ring[i].frag[0].length),
+#else
+ le32_to_cpu(vp->tx_ring[i].length),
+#endif
+ le32_to_cpu(vp->tx_ring[i].status));
+ }
+ if (!stalled)
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ }
+ }
+}
+
+static struct net_device_stats *vortex_get_stats(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
+ spin_lock_irqsave (&vp->lock, flags);
+ update_stats(dev->base_addr, dev);
+ spin_unlock_irqrestore (&vp->lock, flags);
+ }
+ return &vp->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(long ioaddr, struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ int old_window = inw(ioaddr + EL3_CMD);
+
+ if (old_window == 0xffff) /* Chip suspended or ejected. */
+ return;
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ vp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ vp->stats.collisions += inb(ioaddr + 3);
+ vp->stats.tx_window_errors += inb(ioaddr + 4);
+ vp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ vp->stats.tx_packets += inb(ioaddr + 6);
+ vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ vp->stats.rx_bytes += inw(ioaddr + 10);
+ vp->stats.tx_bytes += inw(ioaddr + 12);
+ /* Extra stats for get_ethtool_stats() */
+ vp->xstats.tx_multiple_collisions += inb(ioaddr + 2);
+ vp->xstats.tx_deferred += inb(ioaddr + 8);
+ EL3WINDOW(4);
+ vp->xstats.rx_bad_ssd += inb(ioaddr + 12);
+
+ {
+ u8 up = inb(ioaddr + 13);
+ vp->stats.rx_bytes += (up & 0x0f) << 16;
+ vp->stats.tx_bytes += (up & 0xf0) << 12;
+ }
+
+ EL3WINDOW(old_window >> 13);
+ return;
+}
+
+static int vortex_nway_reset(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ EL3WINDOW(4);
+ rc = mii_nway_restart(&vp->mii);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ return rc;
+}
+
+static u32 vortex_get_link(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ EL3WINDOW(4);
+ rc = mii_link_ok(&vp->mii);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ return rc;
+}
+
+static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ EL3WINDOW(4);
+ rc = mii_ethtool_gset(&vp->mii, cmd);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ return rc;
+}
+
+static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ EL3WINDOW(4);
+ rc = mii_ethtool_sset(&vp->mii, cmd);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ return rc;
+}
+
+static u32 vortex_get_msglevel(struct net_device *dev)
+{
+ return vortex_debug;
+}
+
+static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
+{
+ vortex_debug = dbg;
+}
+
+static int vortex_get_stats_count(struct net_device *dev)
+{
+ return VORTEX_NUM_STATS;
+}
+
+static void vortex_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ update_stats(dev->base_addr, dev);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ data[0] = vp->xstats.tx_deferred;
+ data[1] = vp->xstats.tx_multiple_collisions;
+ data[2] = vp->xstats.rx_bad_ssd;
+}
+
+
+static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void vortex_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (VORTEX_PCI(vp)) {
+ strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+ } else {
+ if (VORTEX_EISA(vp))
+ sprintf(info->bus_info, vp->gendev->bus_id);
+ else
+ sprintf(info->bus_info, "EISA 0x%lx %d",
+ dev->base_addr, dev->irq);
+ }
+}
+
+static struct ethtool_ops vortex_ethtool_ops = {
+ .get_drvinfo = vortex_get_drvinfo,
+ .get_strings = vortex_get_strings,
+ .get_msglevel = vortex_get_msglevel,
+ .set_msglevel = vortex_set_msglevel,
+ .get_ethtool_stats = vortex_get_ethtool_stats,
+ .get_stats_count = vortex_get_stats_count,
+ .get_settings = vortex_get_settings,
+ .set_settings = vortex_set_settings,
+ .get_link = vortex_get_link,
+ .nway_reset = vortex_nway_reset,
+};
+
+#ifdef CONFIG_PCI
+/*
+ * Must power the device up to do MDIO operations
+ */
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ int err;
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int state = 0;
+
+ if(VORTEX_PCI(vp))
+ state = VORTEX_PCI(vp)->current_state;
+
+ /* The kernel core really should have pci_get_power_state() */
+
+ if(state != 0)
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
+ spin_lock_irqsave(&vp->lock, flags);
+ EL3WINDOW(4);
+ err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ if(state != 0)
+ pci_set_power_state(VORTEX_PCI(vp), state);
+
+ return err;
+}
+#endif
+
+
+/* Pre-Cyclone chips have no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. At least
+ the chip has a very clean way to set the mode, unlike many others. */
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ int new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (vortex_debug > 0)
+ printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+ } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ outw(new_mode, ioaddr + EL3_CMD);
+}
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
+ Note that this must be done after each RxReset due to some backwards
+ compatibility logic in the Cyclone and Tornado ASICs */
+
+/* The Ethernet Type used for 802.1q tagged frames */
+#define VLAN_ETHER_TYPE 0x8100
+
+static void set_8021q_mode(struct net_device *dev, int enable)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int old_window = inw(ioaddr + EL3_CMD);
+ int mac_ctrl;
+
+ if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
+ /* cyclone and tornado chipsets can recognize 802.1q
+ * tagged frames and treat them correctly */
+
+ int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */
+ if (enable)
+ max_pkt_size += 4; /* 802.1Q VLAN tag */
+
+ EL3WINDOW(3);
+ outw(max_pkt_size, ioaddr+Wn3_MaxPktSize);
+
+ /* set VlanEtherType to let the hardware checksumming
+ treat tagged frames correctly */
+ EL3WINDOW(7);
+ outw(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
+ } else {
+ /* on older cards we have to enable large frames */
+
+ vp->large_frames = dev->mtu > 1500 || enable;
+
+ EL3WINDOW(3);
+ mac_ctrl = inw(ioaddr+Wn3_MAC_Ctrl);
+ if (vp->large_frames)
+ mac_ctrl |= 0x40;
+ else
+ mac_ctrl &= ~0x40;
+ outw(mac_ctrl, ioaddr+Wn3_MAC_Ctrl);
+ }
+
+ EL3WINDOW(old_window);
+}
+#else
+
+static void set_8021q_mode(struct net_device *dev, int enable)
+{
+}
+
+
+#endif
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define mdio_delay() inl(mdio_addr)
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long ioaddr, int bits)
+{
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ int i;
+ long ioaddr = dev->base_addr;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ long ioaddr = dev->base_addr;
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Leave the interface idle. */
+ for (i = 1; i >= 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return;
+}
+
+/* ACPI: Advanced Configuration and Power Interface. */
+/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
+static void acpi_set_WOL(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ if (vp->enable_wol) {
+ /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
+ EL3WINDOW(7);
+ outw(2, ioaddr + 0x0c);
+ /* The RxFilter must accept the WOL frames. */
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD);
+
+ pci_enable_wake(VORTEX_PCI(vp), 0, 1);
+ }
+ /* Change the power state to D3; RxEnable doesn't take effect. */
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
+}
+
+
+static void __devexit vortex_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct vortex_private *vp;
+
+ if (!dev) {
+ printk("vortex_remove_one called for Compaq device!\n");
+ BUG();
+ }
+
+ vp = netdev_priv(dev);
+
+ /* AKPM: FIXME: we should have
+ * if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
+ * here
+ */
+ unregister_netdev(dev);
+
+ if (VORTEX_PCI(vp)) {
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
+ if (vp->pm_state_valid)
+ pci_restore_state(VORTEX_PCI(vp));
+ pci_disable_device(VORTEX_PCI(vp));
+ }
+ /* Should really use issue_and_wait() here */
+ outw(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
+ dev->base_addr + EL3_CMD);
+
+ pci_free_consistent(pdev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+ if (vp->must_free_region)
+ release_region(dev->base_addr, vp->io_size);
+ free_netdev(dev);
+}
+
+
+static struct pci_driver vortex_driver = {
+ .name = "3c59x",
+ .probe = vortex_init_one,
+ .remove = __devexit_p(vortex_remove_one),
+ .id_table = vortex_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = vortex_suspend,
+ .resume = vortex_resume,
+#endif
+};
+
+
+static int vortex_have_pci;
+static int vortex_have_eisa;
+
+
+static int __init vortex_init (void)
+{
+ int pci_rc, eisa_rc;
+
+ pci_rc = pci_module_init(&vortex_driver);
+ eisa_rc = vortex_eisa_init();
+
+ if (pci_rc == 0)
+ vortex_have_pci = 1;
+ if (eisa_rc > 0)
+ vortex_have_eisa = 1;
+
+ return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
+}
+
+
+static void __exit vortex_eisa_cleanup (void)
+{
+ struct vortex_private *vp;
+ long ioaddr;
+
+#ifdef CONFIG_EISA
+ /* Take care of the EISA devices */
+ eisa_driver_unregister (&vortex_eisa_driver);
+#endif
+
+ if (compaq_net_device) {
+ vp = compaq_net_device->priv;
+ ioaddr = compaq_net_device->base_addr;
+
+ unregister_netdev (compaq_net_device);
+ outw (TotalReset, ioaddr + EL3_CMD);
+ release_region (ioaddr, VORTEX_TOTAL_SIZE);
+
+ free_netdev (compaq_net_device);
+ }
+}
+
+
+static void __exit vortex_cleanup (void)
+{
+ if (vortex_have_pci)
+ pci_unregister_driver (&vortex_driver);
+ if (vortex_have_eisa)
+ vortex_eisa_cleanup ();
+}
+
+
+module_init(vortex_init);
+module_exit(vortex_cleanup);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
new file mode 100644
index 000000000000..18b027e73f28
--- /dev/null
+++ b/drivers/net/7990.c
@@ -0,0 +1,681 @@
+/*
+ * 7990.c -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ * NB: this was made easy by the fact that Jes Sorensen had cleaned up
+ * most of a2025 and sunlance with the aim of merging them, so the
+ * common code was pretty obvious.
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/route.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/irq.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#ifdef CONFIG_HP300
+#include <asm/blinken.h>
+#endif
+
+#include "7990.h"
+
+#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+
+#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
+#include "hplance.h"
+
+#undef WRITERAP
+#undef WRITERDP
+#undef READRDP
+
+#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
+
+/* Lossage Factor Nine, Mr Sulu. */
+#define WRITERAP(lp,x) (lp->writerap(lp,x))
+#define WRITERDP(lp,x) (lp->writerdp(lp,x))
+#define READRDP(lp) (lp->readrdp(lp))
+
+#else
+
+/* These inlines can be used if only CONFIG_HPLANCE is defined */
+static inline void WRITERAP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline void WRITERDP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline __u16 READRDP(struct lance_private *lp)
+{
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+#endif
+#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
+
+/* debugging output macros, various flavours */
+/* #define TEST_HITS */
+#ifdef UNDEF
+#define PRINT_RINGS() \
+do { \
+ int t; \
+ for (t=0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
+ ib->brx_ring[t].length,\
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
+ }\
+ for (t=0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
+ ib->btx_ring[t].length,\
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
+ }\
+} while (0)
+#else
+#define PRINT_RINGS()
+#endif
+
+/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
+static void load_csrs (struct lance_private *lp)
+{
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
+
+ leptr = LANCE_ADDR (aib);
+
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
+}
+
+/* #define to 0 or 1 appropriately */
+#define DEBUG_IRING 0
+/* Set up the Lance Rx and Tx rings and the init block */
+static void lance_init_ring (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
+#ifdef __BIG_ENDIAN
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+#else
+ for (i=0; i<6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
+#endif
+
+ if (DEBUG_IRING)
+ printk ("TX rings:\n");
+
+ lp->tx_full = 0;
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ if (DEBUG_IRING)
+ printk ("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk ("RX rings:\n");
+ for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ if (DEBUG_IRING)
+ printk ("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk ("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk ("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+ PRINT_RINGS();
+}
+
+/* LANCE must be STOPped before we do this, too... */
+static int init_restart_lance (struct lance_private *lp)
+{
+ int i;
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
+
+ /* Need a hook here for sunlance ledma stuff */
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+
+ return 0;
+}
+
+static int lance_reset (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ load_csrs (lp);
+ lance_init_ring (dev);
+ dev->trans_start = jiffies;
+ status = init_restart_lance (lp);
+#ifdef DEBUG_DRIVER
+ printk ("Lance restart=%d\n", status);
+#endif
+ return status;
+}
+
+static int lance_rx (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+ int len = 0; /* XXX shut up gcc warnings */
+ struct sk_buff *skb = 0; /* XXX shut up gcc warnings */
+#ifdef TEST_HITS
+ int i;
+#endif
+
+#ifdef TEST_HITS
+ printk ("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk ("]");
+#endif
+#ifdef CONFIG_HP300
+ blinken_leds(0x40, 0);
+#endif
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ lp->stats.rx_over_errors++;
+ lp->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) lp->stats.rx_errors++;
+ } else {
+ len = (rd->mblength & 0xfff) - 4;
+ skb = dev_alloc_skb (len+2);
+
+ if (skb == 0) {
+ printk ("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb->dev = dev;
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put (skb, len); /* make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
+ len, 0);
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+#ifdef CONFIG_HP300
+ blinken_leds(0x80, 0);
+#endif
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring [i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ lp->stats.tx_errors++;
+ if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ lp->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ lp->stats.tx_fifo_errors++;
+
+ printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ lp->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ lp->stats.collisions += 2;
+
+ lp->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
+}
+
+static irqreturn_t
+lance_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ spin_lock (&lp->devlock);
+
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
+
+ PRINT_RINGS();
+
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock (&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
+ }
+
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx (dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx (dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+
+ spin_unlock (&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+int lance_open (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int res;
+
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, 0, lp->name, dev))
+ return -EAGAIN;
+
+ res = lance_reset(dev);
+ spin_lock_init(&lp->devlock);
+ netif_start_queue (dev);
+
+ return res;
+}
+
+int lance_close (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue (dev);
+
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ free_irq(lp->irq, dev);
+
+ return 0;
+}
+
+void lance_tx_timeout(struct net_device *dev)
+{
+ printk("lance_tx_timeout\n");
+ lance_reset(dev);
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
+ unsigned long flags;
+
+ if (!TX_BUFFS_AVAIL)
+ return -1;
+
+ netif_stop_queue (dev);
+
+ skblen = skb->len;
+
+#ifdef DEBUG_DRIVER
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk ("\n");
+ printk ("%2.2x ", skb->data [i]);
+ }
+ }
+#endif
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
+
+ if (skb->len < ETH_ZLEN)
+ memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
+ memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb);
+
+ spin_lock_irqsave (&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue (dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore (&lp->devlock, flags);
+
+ return 0;
+}
+
+struct net_device_stats *lance_get_stats (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+/* taken from the depca driver via a2065.c */
+static void lance_load_multicast (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct dev_mc_list *dmi=dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI){
+ ib->filter [0] = 0xffffffff;
+ ib->filter [1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++){
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc = crc >> 26;
+ mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ }
+ return;
+}
+
+
+void lance_set_multicast (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int stopped;
+
+ stopped = netif_queue_stopped(dev);
+ if (!stopped)
+ netif_stop_queue (dev);
+
+ while (lp->tx_old != lp->tx_new)
+ schedule();
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring (dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast (dev);
+ }
+ load_csrs (lp);
+ init_restart_lance (lp);
+
+ if (!stopped)
+ netif_start_queue (dev);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void lance_poll(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ spin_lock (&lp->devlock);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STRT);
+ spin_unlock (&lp->devlock);
+ lance_interrupt(dev->irq, dev, NULL);
+}
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/7990.h b/drivers/net/7990.h
new file mode 100644
index 000000000000..31ae5099738d
--- /dev/null
+++ b/drivers/net/7990.h
@@ -0,0 +1,256 @@
+/*
+ * 7990.h -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ */
+
+#ifndef _7990_H
+#define _7990_H
+
+/* The lance only has two register locations. We communicate mostly via memory. */
+#define LANCE_RDP 0 /* Register Data Port */
+#define LANCE_RAP 2 /* Register Address Port */
+
+/* Transmit/receive ring definitions.
+ * We allow the specific drivers to override these defaults if they want to.
+ * NB: according to lance.c, increasing the number of buffers is a waste
+ * of space and reduces the chance that an upper layer will be able to
+ * reorder queued Tx packets based on priority. [Clearly there is a minimum
+ * limit too: too small and we drop rx packets and can't tx at full speed.]
+ * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5.
+ */
+
+/* Blast! This won't work. The problem is that we can't specify a default
+ * setting because that would cause the lance_init_block struct to be
+ * too long (and overflow the RAM on shared-memory cards like the HP LANCE.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+#endif
+
+#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
+
+/* Each receive buffer is described by a receive message descriptor (RMD) */
+struct lance_rx_desc {
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ volatile unsigned short mblength; /* Actual number of bytes received */
+};
+
+/* Ditto for TMD: */
+struct lance_tx_desc {
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short misc;
+};
+
+/* There are three memory structures accessed by the LANCE:
+ * the initialization block, the receive and transmit descriptor rings,
+ * and the data buffers themselves. In fact we might as well put the
+ * init block,the Tx and Rx rings and the buffers together in memory:
+ */
+struct lance_init_block {
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
+};
+
+/* This is where we keep all the stuff the driver needs to know about.
+ * I'm definitely unhappy about the mechanism for allowing specific
+ * drivers to add things...
+ */
+struct lance_private
+{
+ char *name;
+ unsigned long base;
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ struct net_device_stats stats;
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
+
+ unsigned int irq; /* IRQ to register */
+
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
+ spinlock_t devlock;
+ char tx_full;
+};
+
+/*
+ * Am7990 Control and Status Registers
+ */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+#define LE_C3_BSWP 0x0004 /* Byte Swap
+ (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control
+ (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
+ * but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
+ */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+/* and this one is from the C-LANCE data sheet... */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+/*
+ * Receive Flags
+ */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+/*
+ * Error Flags
+ */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+/* Miscellaneous useful macros */
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Now the prototypes we export */
+extern int lance_open(struct net_device *dev);
+extern int lance_close (struct net_device *dev);
+extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+extern struct net_device_stats *lance_get_stats (struct net_device *dev);
+extern void lance_set_multicast (struct net_device *dev);
+extern void lance_tx_timeout(struct net_device *dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern void lance_poll(struct net_device *dev);
+#endif
+
+#endif /* ndef _7990_H */
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
new file mode 100644
index 000000000000..58c6a85c3aa2
--- /dev/null
+++ b/drivers/net/8139cp.c
@@ -0,0 +1,1904 @@
+/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
+/*
+ Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
+
+ Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
+ Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
+ Copyright 2001 Manfred Spraul [natsemi.c]
+ Copyright 1999-2001 by Donald Becker. [natsemi.c]
+ Written 1997-2001 by Donald Becker. [8139too.c]
+ Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ See the file COPYING in this distribution for more information.
+
+ Contributors:
+
+ Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
+ PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
+ LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
+
+ TODO:
+ * Test Tx checksumming thoroughly
+ * Implement dev->tx_timeout
+
+ Low priority TODO:
+ * Complete reset on PciErr
+ * Consider Rx interrupt mitigation using TimerIntr
+ * Investigate using skb->priority with h/w VLAN priority
+ * Investigate using High Priority Tx Queue with skb->priority
+ * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
+ * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
+ * Implement Tx software interrupt mitigation via
+ Tx descriptor bit
+ * The real minimum of CP_MIN_MTU is 4 bytes. However,
+ for this to be supported, one must(?) turn on packet padding.
+ * Support external MII transceivers (patch available)
+
+ NOTES:
+ * TX checksumming is considered experimental. It is off by
+ default, use ethtool to turn it on.
+
+ */
+
+#define DRV_NAME "8139cp"
+#define DRV_VERSION "1.2"
+#define DRV_RELDATE "Mar 22, 2004"
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/cache.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* VLAN tagging feature enable/disable */
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define CP_VLAN_TAG_USED 1
+#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
+ do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
+#else
+#define CP_VLAN_TAG_USED 0
+#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
+ do { (tx_desc)->opts2 = 0; } while (0)
+#endif
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int debug = -1;
+MODULE_PARM (debug, "i");
+MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+MODULE_PARM (multicast_filter_limit, "i");
+MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
+
+#define PFX DRV_NAME ": "
+
+#ifndef TRUE
+#define FALSE 0
+#define TRUE (!FALSE)
+#endif
+
+#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
+#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
+#define CP_REGS_SIZE (0xff + 1)
+#define CP_REGS_VER 1 /* version 1 */
+#define CP_RX_RING_SIZE 64
+#define CP_TX_RING_SIZE 64
+#define CP_RING_BYTES \
+ ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
+ (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
+ CP_STATS_SIZE)
+#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
+#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
+#define TX_BUFFS_AVAIL(CP) \
+ (((CP)->tx_tail <= (CP)->tx_head) ? \
+ (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
+ (CP)->tx_tail - (CP)->tx_head - 1)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+#define RX_OFFSET 2
+#define CP_INTERNAL_PHY 32
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
+#define CP_MAX_MTU 4096
+
+enum {
+ /* NIC register offsets */
+ MAC0 = 0x00, /* Ethernet hardware address. */
+ MAR0 = 0x08, /* Multicast filter. */
+ StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
+ TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
+ HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
+ Cmd = 0x37, /* Command register */
+ IntrMask = 0x3C, /* Interrupt mask */
+ IntrStatus = 0x3E, /* Interrupt status */
+ TxConfig = 0x40, /* Tx configuration */
+ ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
+ RxConfig = 0x44, /* Rx configuration */
+ RxMissed = 0x4C, /* 24 bits valid, write clears */
+ Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
+ Config1 = 0x52, /* Config1 */
+ Config3 = 0x59, /* Config3 */
+ Config4 = 0x5A, /* Config4 */
+ MultiIntr = 0x5C, /* Multiple interrupt select */
+ BasicModeCtrl = 0x62, /* MII BMCR */
+ BasicModeStatus = 0x64, /* MII BMSR */
+ NWayAdvert = 0x66, /* MII ADVERTISE */
+ NWayLPAR = 0x68, /* MII LPA */
+ NWayExpansion = 0x6A, /* MII Expansion */
+ Config5 = 0xD8, /* Config5 */
+ TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
+ RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
+ CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
+ IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
+ RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
+ TxThresh = 0xEC, /* Early Tx threshold */
+ OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
+ OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
+
+ /* Tx and Rx status descriptors */
+ DescOwn = (1 << 31), /* Descriptor is owned by NIC */
+ RingEnd = (1 << 30), /* End of descriptor ring */
+ FirstFrag = (1 << 29), /* First segment of a packet */
+ LastFrag = (1 << 28), /* Final segment of a packet */
+ TxError = (1 << 23), /* Tx error summary */
+ RxError = (1 << 20), /* Rx error summary */
+ IPCS = (1 << 18), /* Calculate IP checksum */
+ UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
+ TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
+ TxVlanTag = (1 << 17), /* Add VLAN tag */
+ RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
+ IPFail = (1 << 15), /* IP checksum failed */
+ UDPFail = (1 << 14), /* UDP/IP checksum failed */
+ TCPFail = (1 << 13), /* TCP/IP checksum failed */
+ NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
+ PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
+ PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
+ RxProtoTCP = 1,
+ RxProtoUDP = 2,
+ RxProtoIP = 3,
+ TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
+ TxOWC = (1 << 22), /* Tx Out-of-window collision */
+ TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
+ TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
+ TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
+ TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
+ RxErrFrame = (1 << 27), /* Rx frame alignment error */
+ RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
+ RxErrCRC = (1 << 18), /* Rx CRC error */
+ RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
+ RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
+ RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
+
+ /* StatsAddr register */
+ DumpStats = (1 << 3), /* Begin stats dump */
+
+ /* RxConfig register */
+ RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
+ RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
+ AcceptErr = 0x20, /* Accept packets with CRC errors */
+ AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
+ AcceptBroadcast = 0x08, /* Accept broadcast packets */
+ AcceptMulticast = 0x04, /* Accept multicast packets */
+ AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
+ AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
+
+ /* IntrMask / IntrStatus registers */
+ PciErr = (1 << 15), /* System error on the PCI bus */
+ TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
+ LenChg = (1 << 13), /* Cable length change */
+ SWInt = (1 << 8), /* Software-requested interrupt */
+ TxEmpty = (1 << 7), /* No Tx descriptors available */
+ RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
+ LinkChg = (1 << 5), /* Packet underrun, or link change */
+ RxEmpty = (1 << 4), /* No Rx descriptors available */
+ TxErr = (1 << 3), /* Tx error */
+ TxOK = (1 << 2), /* Tx packet sent */
+ RxErr = (1 << 1), /* Rx error */
+ RxOK = (1 << 0), /* Rx packet received */
+ IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
+ but hardware likes to raise it */
+
+ IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
+ RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
+ RxErr | RxOK | IntrResvd,
+
+ /* C mode command register */
+ CmdReset = (1 << 4), /* Enable to reset; self-clearing */
+ RxOn = (1 << 3), /* Rx mode enable */
+ TxOn = (1 << 2), /* Tx mode enable */
+
+ /* C+ mode command register */
+ RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
+ RxChkSum = (1 << 5), /* Rx checksum offload enable */
+ PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
+ PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
+ CpRxOn = (1 << 1), /* Rx mode enable */
+ CpTxOn = (1 << 0), /* Tx mode enable */
+
+ /* Cfg9436 EEPROM control register */
+ Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
+ Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
+
+ /* TxConfig register */
+ IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ /* Early Tx Threshold register */
+ TxThreshMask = 0x3f, /* Mask bits 5-0 */
+ TxThreshMax = 2048, /* Max early Tx threshold */
+
+ /* Config1 register */
+ DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
+ LWACT = (1 << 4), /* LWAKE active mode */
+ PMEnable = (1 << 0), /* Enable various PM features of chip */
+
+ /* Config3 register */
+ PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
+ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
+ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
+
+ /* Config4 register */
+ LWPTN = (1 << 1), /* LWAKE Pattern */
+ LWPME = (1 << 4), /* LANWAKE vs PMEB */
+
+ /* Config5 register */
+ BWF = (1 << 6), /* Accept Broadcast wakeup frame */
+ MWF = (1 << 5), /* Accept Multicast wakeup frame */
+ UWF = (1 << 4), /* Accept Unicast wakeup frame */
+ LANWake = (1 << 1), /* Enable LANWake signal */
+ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+
+ cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
+ cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
+ cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
+};
+
+static const unsigned int cp_rx_config =
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+
+struct cp_desc {
+ u32 opts1;
+ u32 opts2;
+ u64 addr;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ unsigned frag;
+};
+
+struct cp_dma_stats {
+ u64 tx_ok;
+ u64 rx_ok;
+ u64 tx_err;
+ u32 rx_err;
+ u16 rx_fifo;
+ u16 frame_align;
+ u32 tx_ok_1col;
+ u32 tx_ok_mcol;
+ u64 rx_ok_phys;
+ u64 rx_ok_bcast;
+ u32 rx_ok_mcast;
+ u16 tx_abort;
+ u16 tx_underrun;
+} __attribute__((packed));
+
+struct cp_extra_stats {
+ unsigned long rx_frags;
+};
+
+struct cp_private {
+ void __iomem *regs;
+ struct net_device *dev;
+ spinlock_t lock;
+ u32 msg_enable;
+
+ struct pci_dev *pdev;
+ u32 rx_config;
+ u16 cpcmd;
+
+ struct net_device_stats net_stats;
+ struct cp_extra_stats cp_stats;
+ struct cp_dma_stats *nic_stats;
+ dma_addr_t nic_stats_dma;
+
+ unsigned rx_tail ____cacheline_aligned;
+ struct cp_desc *rx_ring;
+ struct ring_info rx_skb[CP_RX_RING_SIZE];
+ unsigned rx_buf_sz;
+
+ unsigned tx_head ____cacheline_aligned;
+ unsigned tx_tail;
+
+ struct cp_desc *tx_ring;
+ struct ring_info tx_skb[CP_TX_RING_SIZE];
+ dma_addr_t ring_dma;
+
+#if CP_VLAN_TAG_USED
+ struct vlan_group *vlgrp;
+#endif
+
+ unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
+
+ struct mii_if_info mii_if;
+};
+
+#define cpr8(reg) readb(cp->regs + (reg))
+#define cpr16(reg) readw(cp->regs + (reg))
+#define cpr32(reg) readl(cp->regs + (reg))
+#define cpw8(reg,val) writeb((val), cp->regs + (reg))
+#define cpw16(reg,val) writew((val), cp->regs + (reg))
+#define cpw32(reg,val) writel((val), cp->regs + (reg))
+#define cpw8_f(reg,val) do { \
+ writeb((val), cp->regs + (reg)); \
+ readb(cp->regs + (reg)); \
+ } while (0)
+#define cpw16_f(reg,val) do { \
+ writew((val), cp->regs + (reg)); \
+ readw(cp->regs + (reg)); \
+ } while (0)
+#define cpw32_f(reg,val) do { \
+ writel((val), cp->regs + (reg)); \
+ readl(cp->regs + (reg)); \
+ } while (0)
+
+
+static void __cp_set_rx_mode (struct net_device *dev);
+static void cp_tx (struct cp_private *cp);
+static void cp_clean_rings (struct cp_private *cp);
+
+static struct pci_device_id cp_pci_tbl[] = {
+ { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "tx_ok" },
+ { "rx_ok" },
+ { "tx_err" },
+ { "rx_err" },
+ { "rx_fifo" },
+ { "frame_align" },
+ { "tx_ok_1col" },
+ { "tx_ok_mcol" },
+ { "rx_ok_phys" },
+ { "rx_ok_bcast" },
+ { "rx_ok_mcast" },
+ { "tx_abort" },
+ { "tx_underrun" },
+ { "rx_frags" },
+};
+
+
+#if CP_VLAN_TAG_USED
+static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cp->vlgrp = grp;
+ cp->cpcmd |= RxVlanOn;
+ cpw16(CpCmd, cp->cpcmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cp->cpcmd &= ~RxVlanOn;
+ cpw16(CpCmd, cp->cpcmd);
+ if (cp->vlgrp)
+ cp->vlgrp->vlan_devices[vid] = NULL;
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+#endif /* CP_VLAN_TAG_USED */
+
+static inline void cp_set_rxbufsize (struct cp_private *cp)
+{
+ unsigned int mtu = cp->dev->mtu;
+
+ if (mtu > ETH_DATA_LEN)
+ /* MTU + ethernet header + FCS + optional VLAN tag */
+ cp->rx_buf_sz = mtu + ETH_HLEN + 8;
+ else
+ cp->rx_buf_sz = PKT_BUF_SZ;
+}
+
+static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
+ struct cp_desc *desc)
+{
+ skb->protocol = eth_type_trans (skb, cp->dev);
+
+ cp->net_stats.rx_packets++;
+ cp->net_stats.rx_bytes += skb->len;
+ cp->dev->last_rx = jiffies;
+
+#if CP_VLAN_TAG_USED
+ if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
+ vlan_hwaccel_receive_skb(skb, cp->vlgrp,
+ be16_to_cpu(desc->opts2 & 0xffff));
+ } else
+#endif
+ netif_receive_skb(skb);
+}
+
+static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
+ u32 status, u32 len)
+{
+ if (netif_msg_rx_err (cp))
+ printk (KERN_DEBUG
+ "%s: rx err, slot %d status 0x%x len %d\n",
+ cp->dev->name, rx_tail, status, len);
+ cp->net_stats.rx_errors++;
+ if (status & RxErrFrame)
+ cp->net_stats.rx_frame_errors++;
+ if (status & RxErrCRC)
+ cp->net_stats.rx_crc_errors++;
+ if ((status & RxErrRunt) || (status & RxErrLong))
+ cp->net_stats.rx_length_errors++;
+ if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
+ cp->net_stats.rx_length_errors++;
+ if (status & RxErrFIFO)
+ cp->net_stats.rx_fifo_errors++;
+}
+
+static inline unsigned int cp_rx_csum_ok (u32 status)
+{
+ unsigned int protocol = (status >> 16) & 0x3;
+
+ if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+ return 1;
+ else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
+ return 1;
+ else if ((protocol == RxProtoIP) && (!(status & IPFail)))
+ return 1;
+ return 0;
+}
+
+static int cp_rx_poll (struct net_device *dev, int *budget)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned rx_tail = cp->rx_tail;
+ unsigned rx_work = dev->quota;
+ unsigned rx;
+
+rx_status_loop:
+ rx = 0;
+ cpw16(IntrStatus, cp_rx_intr_mask);
+
+ while (1) {
+ u32 status, len;
+ dma_addr_t mapping;
+ struct sk_buff *skb, *new_skb;
+ struct cp_desc *desc;
+ unsigned buflen;
+
+ skb = cp->rx_skb[rx_tail].skb;
+ if (!skb)
+ BUG();
+
+ desc = &cp->rx_ring[rx_tail];
+ status = le32_to_cpu(desc->opts1);
+ if (status & DescOwn)
+ break;
+
+ len = (status & 0x1fff) - 4;
+ mapping = cp->rx_skb[rx_tail].mapping;
+
+ if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
+ /* we don't support incoming fragmented frames.
+ * instead, we attempt to ensure that the
+ * pre-allocated RX skbs are properly sized such
+ * that RX fragments are never encountered
+ */
+ cp_rx_err_acct(cp, rx_tail, status, len);
+ cp->net_stats.rx_dropped++;
+ cp->cp_stats.rx_frags++;
+ goto rx_next;
+ }
+
+ if (status & (RxError | RxErrFIFO)) {
+ cp_rx_err_acct(cp, rx_tail, status, len);
+ goto rx_next;
+ }
+
+ if (netif_msg_rx_status(cp))
+ printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
+ cp->dev->name, rx_tail, status, len);
+
+ buflen = cp->rx_buf_sz + RX_OFFSET;
+ new_skb = dev_alloc_skb (buflen);
+ if (!new_skb) {
+ cp->net_stats.rx_dropped++;
+ goto rx_next;
+ }
+
+ skb_reserve(new_skb, RX_OFFSET);
+ new_skb->dev = cp->dev;
+
+ pci_unmap_single(cp->pdev, mapping,
+ buflen, PCI_DMA_FROMDEVICE);
+
+ /* Handle checksum offloading for incoming packets. */
+ if (cp_rx_csum_ok(status))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_put(skb, len);
+
+ mapping =
+ cp->rx_skb[rx_tail].mapping =
+ pci_map_single(cp->pdev, new_skb->tail,
+ buflen, PCI_DMA_FROMDEVICE);
+ cp->rx_skb[rx_tail].skb = new_skb;
+
+ cp_rx_skb(cp, skb, desc);
+ rx++;
+
+rx_next:
+ cp->rx_ring[rx_tail].opts2 = 0;
+ cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
+ if (rx_tail == (CP_RX_RING_SIZE - 1))
+ desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
+ cp->rx_buf_sz);
+ else
+ desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
+ rx_tail = NEXT_RX(rx_tail);
+
+ if (!rx_work--)
+ break;
+ }
+
+ cp->rx_tail = rx_tail;
+
+ dev->quota -= rx;
+ *budget -= rx;
+
+ /* if we did not reach work limit, then we're done with
+ * this round of polling
+ */
+ if (rx_work) {
+ if (cpr16(IntrStatus) & cp_rx_intr_mask)
+ goto rx_status_loop;
+
+ local_irq_disable();
+ cpw16_f(IntrMask, cp_intr_mask);
+ __netif_rx_complete(dev);
+ local_irq_enable();
+
+ return 0; /* done */
+ }
+
+ return 1; /* not done */
+}
+
+static irqreturn_t
+cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct cp_private *cp;
+ u16 status;
+
+ if (unlikely(dev == NULL))
+ return IRQ_NONE;
+ cp = netdev_priv(dev);
+
+ status = cpr16(IntrStatus);
+ if (!status || (status == 0xFFFF))
+ return IRQ_NONE;
+
+ if (netif_msg_intr(cp))
+ printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
+ dev->name, status, cpr8(Cmd), cpr16(CpCmd));
+
+ cpw16(IntrStatus, status & ~cp_rx_intr_mask);
+
+ spin_lock(&cp->lock);
+
+ /* close possible race's with dev_close */
+ if (unlikely(!netif_running(dev))) {
+ cpw16(IntrMask, 0);
+ spin_unlock(&cp->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
+ if (netif_rx_schedule_prep(dev)) {
+ cpw16_f(IntrMask, cp_norx_intr_mask);
+ __netif_rx_schedule(dev);
+ }
+
+ if (status & (TxOK | TxErr | TxEmpty | SWInt))
+ cp_tx(cp);
+ if (status & LinkChg)
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
+
+ spin_unlock(&cp->lock);
+
+ if (status & PciErr) {
+ u16 pci_status;
+
+ pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
+ printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
+ dev->name, status, pci_status);
+
+ /* TODO: reset hardware */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cp_tx (struct cp_private *cp)
+{
+ unsigned tx_head = cp->tx_head;
+ unsigned tx_tail = cp->tx_tail;
+
+ while (tx_tail != tx_head) {
+ struct sk_buff *skb;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
+ if (status & DescOwn)
+ break;
+
+ skb = cp->tx_skb[tx_tail].skb;
+ if (!skb)
+ BUG();
+
+ pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+
+ if (status & LastFrag) {
+ if (status & (TxError | TxFIFOUnder)) {
+ if (netif_msg_tx_err(cp))
+ printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
+ cp->dev->name, status);
+ cp->net_stats.tx_errors++;
+ if (status & TxOWC)
+ cp->net_stats.tx_window_errors++;
+ if (status & TxMaxCol)
+ cp->net_stats.tx_aborted_errors++;
+ if (status & TxLinkFail)
+ cp->net_stats.tx_carrier_errors++;
+ if (status & TxFIFOUnder)
+ cp->net_stats.tx_fifo_errors++;
+ } else {
+ cp->net_stats.collisions +=
+ ((status >> TxColCntShift) & TxColCntMask);
+ cp->net_stats.tx_packets++;
+ cp->net_stats.tx_bytes += skb->len;
+ if (netif_msg_tx_done(cp))
+ printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
+ }
+ dev_kfree_skb_irq(skb);
+ }
+
+ cp->tx_skb[tx_tail].skb = NULL;
+
+ tx_tail = NEXT_TX(tx_tail);
+ }
+
+ cp->tx_tail = tx_tail;
+
+ if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(cp->dev);
+}
+
+static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned entry;
+ u32 eor;
+#if CP_VLAN_TAG_USED
+ u32 vlan_tag = 0;
+#endif
+
+ spin_lock_irq(&cp->lock);
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&cp->lock);
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
+ dev->name);
+ return 1;
+ }
+
+#if CP_VLAN_TAG_USED
+ if (cp->vlgrp && vlan_tx_tag_present(skb))
+ vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
+#endif
+
+ entry = cp->tx_head;
+ eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ struct cp_desc *txd = &cp->tx_ring[entry];
+ u32 len;
+ dma_addr_t mapping;
+
+ len = skb->len;
+ mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ CP_VLAN_TX_TAG(txd, vlan_tag);
+ txd->addr = cpu_to_le64(mapping);
+ wmb();
+
+ if (skb->ip_summed == CHECKSUM_HW) {
+ const struct iphdr *ip = skb->nh.iph;
+ if (ip->protocol == IPPROTO_TCP)
+ txd->opts1 = cpu_to_le32(eor | len | DescOwn |
+ FirstFrag | LastFrag |
+ IPCS | TCPCS);
+ else if (ip->protocol == IPPROTO_UDP)
+ txd->opts1 = cpu_to_le32(eor | len | DescOwn |
+ FirstFrag | LastFrag |
+ IPCS | UDPCS);
+ else
+ BUG();
+ } else
+ txd->opts1 = cpu_to_le32(eor | len | DescOwn |
+ FirstFrag | LastFrag);
+ wmb();
+
+ cp->tx_skb[entry].skb = skb;
+ cp->tx_skb[entry].mapping = mapping;
+ cp->tx_skb[entry].frag = 0;
+ entry = NEXT_TX(entry);
+ } else {
+ struct cp_desc *txd;
+ u32 first_len, first_eor;
+ dma_addr_t first_mapping;
+ int frag, first_entry = entry;
+ const struct iphdr *ip = skb->nh.iph;
+
+ /* We must give this initial chunk to the device last.
+ * Otherwise we could race with the device.
+ */
+ first_eor = eor;
+ first_len = skb_headlen(skb);
+ first_mapping = pci_map_single(cp->pdev, skb->data,
+ first_len, PCI_DMA_TODEVICE);
+ cp->tx_skb[entry].skb = skb;
+ cp->tx_skb[entry].mapping = first_mapping;
+ cp->tx_skb[entry].frag = 1;
+ entry = NEXT_TX(entry);
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ u32 len;
+ u32 ctrl;
+ dma_addr_t mapping;
+
+ len = this_frag->size;
+ mapping = pci_map_single(cp->pdev,
+ ((void *) page_address(this_frag->page) +
+ this_frag->page_offset),
+ len, PCI_DMA_TODEVICE);
+ eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
+
+ if (skb->ip_summed == CHECKSUM_HW) {
+ ctrl = eor | len | DescOwn | IPCS;
+ if (ip->protocol == IPPROTO_TCP)
+ ctrl |= TCPCS;
+ else if (ip->protocol == IPPROTO_UDP)
+ ctrl |= UDPCS;
+ else
+ BUG();
+ } else
+ ctrl = eor | len | DescOwn;
+
+ if (frag == skb_shinfo(skb)->nr_frags - 1)
+ ctrl |= LastFrag;
+
+ txd = &cp->tx_ring[entry];
+ CP_VLAN_TX_TAG(txd, vlan_tag);
+ txd->addr = cpu_to_le64(mapping);
+ wmb();
+
+ txd->opts1 = cpu_to_le32(ctrl);
+ wmb();
+
+ cp->tx_skb[entry].skb = skb;
+ cp->tx_skb[entry].mapping = mapping;
+ cp->tx_skb[entry].frag = frag + 2;
+ entry = NEXT_TX(entry);
+ }
+
+ txd = &cp->tx_ring[first_entry];
+ CP_VLAN_TX_TAG(txd, vlan_tag);
+ txd->addr = cpu_to_le64(first_mapping);
+ wmb();
+
+ if (skb->ip_summed == CHECKSUM_HW) {
+ if (ip->protocol == IPPROTO_TCP)
+ txd->opts1 = cpu_to_le32(first_eor | first_len |
+ FirstFrag | DescOwn |
+ IPCS | TCPCS);
+ else if (ip->protocol == IPPROTO_UDP)
+ txd->opts1 = cpu_to_le32(first_eor | first_len |
+ FirstFrag | DescOwn |
+ IPCS | UDPCS);
+ else
+ BUG();
+ } else
+ txd->opts1 = cpu_to_le32(first_eor | first_len |
+ FirstFrag | DescOwn);
+ wmb();
+ }
+ cp->tx_head = entry;
+ if (netif_msg_tx_queued(cp))
+ printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+ dev->name, entry, skb->len);
+ if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&cp->lock);
+
+ cpw8(TxPoll, NormalTxPoll);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static void __cp_set_rx_mode (struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int i, rx_mode;
+ u32 tmp;
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ /* We can safely update without stopping the chip. */
+ tmp = cp_rx_config | rx_mode;
+ if (cp->rx_config != tmp) {
+ cpw32_f (RxConfig, tmp);
+ cp->rx_config = tmp;
+ }
+ cpw32_f (MAR0 + 0, mc_filter[0]);
+ cpw32_f (MAR0 + 4, mc_filter[1]);
+}
+
+static void cp_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct cp_private *cp = netdev_priv(dev);
+
+ spin_lock_irqsave (&cp->lock, flags);
+ __cp_set_rx_mode(dev);
+ spin_unlock_irqrestore (&cp->lock, flags);
+}
+
+static void __cp_get_stats(struct cp_private *cp)
+{
+ /* only lower 24 bits valid; write any value to clear */
+ cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
+ cpw32 (RxMissed, 0);
+}
+
+static struct net_device_stats *cp_get_stats(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irqsave(&cp->lock, flags);
+ if (netif_running(dev) && netif_device_present(dev))
+ __cp_get_stats(cp);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return &cp->net_stats;
+}
+
+static void cp_stop_hw (struct cp_private *cp)
+{
+ cpw16(IntrStatus, ~(cpr16(IntrStatus)));
+ cpw16_f(IntrMask, 0);
+ cpw8(Cmd, 0);
+ cpw16_f(CpCmd, 0);
+ cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
+
+ cp->rx_tail = 0;
+ cp->tx_head = cp->tx_tail = 0;
+}
+
+static void cp_reset_hw (struct cp_private *cp)
+{
+ unsigned work = 1000;
+
+ cpw8(Cmd, CmdReset);
+
+ while (work--) {
+ if (!(cpr8(Cmd) & CmdReset))
+ return;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
+}
+
+static inline void cp_start_hw (struct cp_private *cp)
+{
+ cpw16(CpCmd, cp->cpcmd);
+ cpw8(Cmd, RxOn | TxOn);
+}
+
+static void cp_init_hw (struct cp_private *cp)
+{
+ struct net_device *dev = cp->dev;
+ dma_addr_t ring_dma;
+
+ cp_reset_hw(cp);
+
+ cpw8_f (Cfg9346, Cfg9346_Unlock);
+
+ /* Restore our idea of the MAC address. */
+ cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
+ cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+
+ cp_start_hw(cp);
+ cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
+
+ __cp_set_rx_mode(dev);
+ cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
+
+ cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
+ /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
+ cpw8(Config3, PARMEnable);
+ cp->wol_enabled = 0;
+
+ cpw8(Config5, cpr8(Config5) & PMEStatus);
+
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ cpw16(MultiIntr, 0);
+
+ cpw16_f(IntrMask, cp_intr_mask);
+
+ cpw8_f(Cfg9346, Cfg9346_Lock);
+}
+
+static int cp_refill_rx (struct cp_private *cp)
+{
+ unsigned i;
+
+ for (i = 0; i < CP_RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
+ if (!skb)
+ goto err_out;
+
+ skb->dev = cp->dev;
+ skb_reserve(skb, RX_OFFSET);
+
+ cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
+ skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ cp->rx_skb[i].skb = skb;
+ cp->rx_skb[i].frag = 0;
+
+ cp->rx_ring[i].opts2 = 0;
+ cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
+ if (i == (CP_RX_RING_SIZE - 1))
+ cp->rx_ring[i].opts1 =
+ cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
+ else
+ cp->rx_ring[i].opts1 =
+ cpu_to_le32(DescOwn | cp->rx_buf_sz);
+ }
+
+ return 0;
+
+err_out:
+ cp_clean_rings(cp);
+ return -ENOMEM;
+}
+
+static int cp_init_rings (struct cp_private *cp)
+{
+ memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+ cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
+
+ cp->rx_tail = 0;
+ cp->tx_head = cp->tx_tail = 0;
+
+ return cp_refill_rx (cp);
+}
+
+static int cp_alloc_rings (struct cp_private *cp)
+{
+ void *mem;
+
+ mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
+ if (!mem)
+ return -ENOMEM;
+
+ cp->rx_ring = mem;
+ cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
+
+ mem += (CP_RING_BYTES - CP_STATS_SIZE);
+ cp->nic_stats = mem;
+ cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
+
+ return cp_init_rings(cp);
+}
+
+static void cp_clean_rings (struct cp_private *cp)
+{
+ unsigned i;
+
+ memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
+ memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+
+ for (i = 0; i < CP_RX_RING_SIZE; i++) {
+ if (cp->rx_skb[i].skb) {
+ pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
+ cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(cp->rx_skb[i].skb);
+ }
+ }
+
+ for (i = 0; i < CP_TX_RING_SIZE; i++) {
+ if (cp->tx_skb[i].skb) {
+ struct sk_buff *skb = cp->tx_skb[i].skb;
+ pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ cp->net_stats.tx_dropped++;
+ }
+ }
+
+ memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
+ memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
+}
+
+static void cp_free_rings (struct cp_private *cp)
+{
+ cp_clean_rings(cp);
+ pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
+ cp->rx_ring = NULL;
+ cp->tx_ring = NULL;
+ cp->nic_stats = NULL;
+}
+
+static int cp_open (struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+
+ if (netif_msg_ifup(cp))
+ printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
+
+ rc = cp_alloc_rings(cp);
+ if (rc)
+ return rc;
+
+ cp_init_hw(cp);
+
+ rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
+ if (rc)
+ goto err_out_hw;
+
+ netif_carrier_off(dev);
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
+ netif_start_queue(dev);
+
+ return 0;
+
+err_out_hw:
+ cp_stop_hw(cp);
+ cp_free_rings(cp);
+ return rc;
+}
+
+static int cp_close (struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (netif_msg_ifdown(cp))
+ printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ cp_stop_hw(cp);
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ cp_free_rings(cp);
+ return 0;
+}
+
+#ifdef BROKEN
+static int cp_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ /* check for invalid MTU, according to hardware limits */
+ if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
+ return -EINVAL;
+
+ /* if network interface not up, no need for complexity */
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ cp_set_rxbufsize(cp); /* set new rx buf size */
+ return 0;
+ }
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ cp_stop_hw(cp); /* stop h/w and free rings */
+ cp_clean_rings(cp);
+
+ dev->mtu = new_mtu;
+ cp_set_rxbufsize(cp); /* set new rx buf size */
+
+ rc = cp_init_rings(cp); /* realloc and restart h/w */
+ cp_start_hw(cp);
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return rc;
+}
+#endif /* BROKEN */
+
+static char mii_2_8139_map[8] = {
+ BasicModeCtrl,
+ BasicModeStatus,
+ 0,
+ 0,
+ NWayAdvert,
+ NWayLPAR,
+ NWayExpansion,
+ 0
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct cp_private *cp = netdev_priv(dev);
+
+ return location < 8 && mii_2_8139_map[location] ?
+ readw(cp->regs + mii_2_8139_map[location]) : 0;
+}
+
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ struct cp_private *cp = netdev_priv(dev);
+
+ if (location == 0) {
+ cpw8(Cfg9346, Cfg9346_Unlock);
+ cpw16(BasicModeCtrl, value);
+ cpw8(Cfg9346, Cfg9346_Lock);
+ } else if (location < 8 && mii_2_8139_map[location])
+ cpw16(mii_2_8139_map[location], value);
+}
+
+/* Set the ethtool Wake-on-LAN settings */
+static int netdev_set_wol (struct cp_private *cp,
+ const struct ethtool_wolinfo *wol)
+{
+ u8 options;
+
+ options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
+ /* If WOL is being disabled, no need for complexity */
+ if (wol->wolopts) {
+ if (wol->wolopts & WAKE_PHY) options |= LinkUp;
+ if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
+ }
+
+ cpw8 (Cfg9346, Cfg9346_Unlock);
+ cpw8 (Config3, options);
+ cpw8 (Cfg9346, Cfg9346_Lock);
+
+ options = 0; /* Paranoia setting */
+ options = cpr8 (Config5) & ~(UWF | MWF | BWF);
+ /* If WOL is being disabled, no need for complexity */
+ if (wol->wolopts) {
+ if (wol->wolopts & WAKE_UCAST) options |= UWF;
+ if (wol->wolopts & WAKE_BCAST) options |= BWF;
+ if (wol->wolopts & WAKE_MCAST) options |= MWF;
+ }
+
+ cpw8 (Config5, options);
+
+ cp->wol_enabled = (wol->wolopts) ? 1 : 0;
+
+ return 0;
+}
+
+/* Get the ethtool Wake-on-LAN settings */
+static void netdev_get_wol (struct cp_private *cp,
+ struct ethtool_wolinfo *wol)
+{
+ u8 options;
+
+ wol->wolopts = 0; /* Start from scratch */
+ wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_MCAST | WAKE_UCAST;
+ /* We don't need to go on if WOL is disabled */
+ if (!cp->wol_enabled) return;
+
+ options = cpr8 (Config3);
+ if (options & LinkUp) wol->wolopts |= WAKE_PHY;
+ if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
+
+ options = 0; /* Paranoia setting */
+ options = cpr8 (Config5);
+ if (options & UWF) wol->wolopts |= WAKE_UCAST;
+ if (options & BWF) wol->wolopts |= WAKE_BCAST;
+ if (options & MWF) wol->wolopts |= WAKE_MCAST;
+}
+
+static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct cp_private *cp = netdev_priv(dev);
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(cp->pdev));
+}
+
+static int cp_get_regs_len(struct net_device *dev)
+{
+ return CP_REGS_SIZE;
+}
+
+static int cp_get_stats_count (struct net_device *dev)
+{
+ return CP_NUM_STATS;
+}
+
+static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rc = mii_ethtool_gset(&cp->mii_if, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return rc;
+}
+
+static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rc = mii_ethtool_sset(&cp->mii_if, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return rc;
+}
+
+static int cp_nway_reset(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ return mii_nway_restart(&cp->mii_if);
+}
+
+static u32 cp_get_msglevel(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ return cp->msg_enable;
+}
+
+static void cp_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ cp->msg_enable = value;
+}
+
+static u32 cp_get_rx_csum(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
+}
+
+static int cp_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ u16 cmd = cp->cpcmd, newcmd;
+
+ newcmd = cmd;
+
+ if (data)
+ newcmd |= RxChkSum;
+ else
+ newcmd &= ~RxChkSum;
+
+ if (newcmd != cmd) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cp->cpcmd = newcmd;
+ cpw16_f(CpCmd, newcmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ }
+
+ return 0;
+}
+
+static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (regs->len < CP_REGS_SIZE)
+ return /* -EINVAL */;
+
+ regs->version = CP_REGS_VER;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave (&cp->lock, flags);
+ netdev_get_wol (cp, wol);
+ spin_unlock_irqrestore (&cp->lock, flags);
+}
+
+static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave (&cp->lock, flags);
+ rc = netdev_set_wol (cp, wol);
+ spin_unlock_irqrestore (&cp->lock, flags);
+
+ return rc;
+}
+
+static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+static void cp_get_ethtool_stats (struct net_device *dev,
+ struct ethtool_stats *estats, u64 *tmp_stats)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned int work = 100;
+ int i;
+
+ /* begin NIC statistics dump */
+ cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16);
+ cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats);
+ cpr32(StatsAddr);
+
+ while (work-- > 0) {
+ if ((cpr32(StatsAddr) & DumpStats) == 0)
+ break;
+ cpu_relax();
+ }
+
+ if (cpr32(StatsAddr) & DumpStats)
+ return /* -EIO */;
+
+ i = 0;
+ tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok);
+ tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok);
+ tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err);
+ tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err);
+ tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo);
+ tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align);
+ tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col);
+ tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol);
+ tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys);
+ tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast);
+ tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast);
+ tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort);
+ tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun);
+ tmp_stats[i++] = cp->cp_stats.rx_frags;
+ if (i != CP_NUM_STATS)
+ BUG();
+}
+
+static struct ethtool_ops cp_ethtool_ops = {
+ .get_drvinfo = cp_get_drvinfo,
+ .get_regs_len = cp_get_regs_len,
+ .get_stats_count = cp_get_stats_count,
+ .get_settings = cp_get_settings,
+ .set_settings = cp_set_settings,
+ .nway_reset = cp_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = cp_get_msglevel,
+ .set_msglevel = cp_set_msglevel,
+ .get_rx_csum = cp_get_rx_csum,
+ .set_rx_csum = cp_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_regs = cp_get_regs,
+ .get_wol = cp_get_wol,
+ .set_wol = cp_set_wol,
+ .get_strings = cp_get_strings,
+ .get_ethtool_stats = cp_get_ethtool_stats,
+};
+
+static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return rc;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() readl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+static int read_eeprom (void __iomem *ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ void __iomem *ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ writeb (EE_ENB & ~EE_CS, ee_addr);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writeb (EE_ENB | dataval, ee_addr);
+ eeprom_delay ();
+ writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ }
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ for (i = 16; i > 0; i--) {
+ writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ retval =
+ (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
+ 0);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+ }
+
+ /* Terminate the EEPROM access. */
+ writeb (~EE_CS, ee_addr);
+ eeprom_delay ();
+
+ return retval;
+}
+
+/* Put the board into D3cold state and wait for WakeUp signal */
+static void cp_set_d3_state (struct cp_private *cp)
+{
+ pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
+ pci_set_power_state (cp->pdev, PCI_D3hot);
+}
+
+static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct cp_private *cp;
+ int rc;
+ void __iomem *regs;
+ long pciaddr;
+ unsigned int addr_len, i, pci_using_dac;
+ u8 pci_rev;
+
+#ifndef MODULE
+ static int version_printed;
+ if (version_printed++ == 0)
+ printk("%s", version);
+#endif
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
+ printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
+ pci_name(pdev), pdev->vendor, pdev->device, pci_rev);
+ printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n");
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct cp_private));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ cp = netdev_priv(dev);
+ cp->pdev = pdev;
+ cp->dev = dev;
+ cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
+ spin_lock_init (&cp->lock);
+ cp->mii_if.dev = dev;
+ cp->mii_if.mdio_read = mdio_read;
+ cp->mii_if.mdio_write = mdio_write;
+ cp->mii_if.phy_id = CP_INTERNAL_PHY;
+ cp->mii_if.phy_id_mask = 0x1f;
+ cp->mii_if.reg_num_mask = 0x1f;
+ cp_set_rxbufsize(cp);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_free;
+
+ rc = pci_set_mwi(pdev);
+ if (rc)
+ goto err_out_disable;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_mwi;
+
+ pciaddr = pci_resource_start(pdev, 1);
+ if (!pciaddr) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
+ pci_name(pdev));
+ goto err_out_res;
+ }
+ if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
+ pci_resource_len(pdev, 1), pci_name(pdev));
+ goto err_out_res;
+ }
+
+ /* Configure DMA attributes. */
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL) &&
+ !pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ pci_using_dac = 1;
+ } else {
+ pci_using_dac = 0;
+
+ rc = pci_set_dma_mask(pdev, 0xffffffffULL);
+ if (rc) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_res;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, 0xffffffffULL);
+ if (rc) {
+ printk(KERN_ERR PFX "No usable consistent DMA configuration, "
+ "aborting.\n");
+ goto err_out_res;
+ }
+ }
+
+ cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
+ PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
+
+ regs = ioremap(pciaddr, CP_REGS_SIZE);
+ if (!regs) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
+ pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
+ goto err_out_res;
+ }
+ dev->base_addr = (unsigned long) regs;
+ cp->regs = regs;
+
+ cp_stop_hw(cp);
+
+ /* read MAC address from EEPROM */
+ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((u16 *) (dev->dev_addr))[i] =
+ le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
+
+ dev->open = cp_open;
+ dev->stop = cp_close;
+ dev->set_multicast_list = cp_set_rx_mode;
+ dev->hard_start_xmit = cp_start_xmit;
+ dev->get_stats = cp_get_stats;
+ dev->do_ioctl = cp_ioctl;
+ dev->poll = cp_rx_poll;
+ dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
+#ifdef BROKEN
+ dev->change_mtu = cp_change_mtu;
+#endif
+ dev->ethtool_ops = &cp_ethtool_ops;
+#if 0
+ dev->tx_timeout = cp_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+#if CP_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_rx_register = cp_vlan_rx_register;
+ dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
+#endif
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ dev->irq = pdev->irq;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_iomap;
+
+ printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
+ "%02x:%02x:%02x:%02x:%02x:%02x, "
+ "IRQ %d\n",
+ dev->name,
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5],
+ dev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ /* enable busmastering and memory-write-invalidate */
+ pci_set_master(pdev);
+
+ if (cp->wol_enabled) cp_set_d3_state (cp);
+
+ return 0;
+
+err_out_iomap:
+ iounmap(regs);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_mwi:
+ pci_clear_mwi(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static void cp_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cp_private *cp = netdev_priv(dev);
+
+ if (!dev)
+ BUG();
+ unregister_netdev(dev);
+ iounmap(cp->regs);
+ if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
+ pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+}
+
+#ifdef CONFIG_PM
+static int cp_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev;
+ struct cp_private *cp;
+ unsigned long flags;
+
+ dev = pci_get_drvdata (pdev);
+ cp = netdev_priv(dev);
+
+ if (!dev || !netif_running (dev)) return 0;
+
+ netif_device_detach (dev);
+ netif_stop_queue (dev);
+
+ spin_lock_irqsave (&cp->lock, flags);
+
+ /* Disable Rx and Tx */
+ cpw16 (IntrMask, 0);
+ cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
+
+ spin_unlock_irqrestore (&cp->lock, flags);
+
+ if (cp->pdev && cp->wol_enabled) {
+ pci_save_state (cp->pdev);
+ cp_set_d3_state (cp);
+ }
+
+ return 0;
+}
+
+static int cp_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev;
+ struct cp_private *cp;
+
+ dev = pci_get_drvdata (pdev);
+ cp = netdev_priv(dev);
+
+ netif_device_attach (dev);
+
+ if (cp->pdev && cp->wol_enabled) {
+ pci_set_power_state (cp->pdev, PCI_D0);
+ pci_restore_state (cp->pdev);
+ }
+
+ cp_init_hw (cp);
+ netif_start_queue (dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver cp_driver = {
+ .name = DRV_NAME,
+ .id_table = cp_pci_tbl,
+ .probe = cp_init_one,
+ .remove = cp_remove_one,
+#ifdef CONFIG_PM
+ .resume = cp_resume,
+ .suspend = cp_suspend,
+#endif
+};
+
+static int __init cp_init (void)
+{
+#ifdef MODULE
+ printk("%s", version);
+#endif
+ return pci_module_init (&cp_driver);
+}
+
+static void __exit cp_exit (void)
+{
+ pci_unregister_driver (&cp_driver);
+}
+
+module_init(cp_init);
+module_exit(cp_exit);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
new file mode 100644
index 000000000000..d4bd20c21a1f
--- /dev/null
+++ b/drivers/net/8139too.c
@@ -0,0 +1,2666 @@
+/*
+
+ 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux.
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000-2002 Jeff Garzik
+
+ Much code comes from Donald Becker's rtl8139.c driver,
+ versions 1.13 and older. This driver was originally based
+ on rtl8139.c version 1.07. Header of rtl8139.c version 1.13:
+
+ -----<snip>-----
+
+ Written 1997-2001 by Donald Becker.
+ This software may be used and distributed according to the
+ terms of the GNU General Public License (GPL), incorporated
+ herein by reference. Drivers based on or derived from this
+ code fall under the GPL and must retain the authorship,
+ copyright and license notice. This file is not a complete
+ program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139
+ PCI ethernet chips.
+
+ The author may be reached as becker@scyld.com, or C/O Scyld
+ Computing Corporation 410 Severn Ave., Suite 210 Annapolis
+ MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/rtl8139.html
+
+ Twister-tuning table provided by Kinston
+ <shangh@realtek.com.tw>.
+
+ -----<snip>-----
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Contributors:
+
+ Donald Becker - he wrote the original driver, kudos to him!
+ (but please don't e-mail him for support, this isn't his driver)
+
+ Tigran Aivazian - bug fixes, skbuff free cleanup
+
+ Martin Mares - suggestions for PCI cleanup
+
+ David S. Miller - PCI DMA and softnet updates
+
+ Ernst Gill - fixes ported from BSD driver
+
+ Daniel Kobras - identified specific locations of
+ posted MMIO write bugginess
+
+ Gerard Sharp - bug fix, testing and feedback
+
+ David Ford - Rx ring wrap fix
+
+ Dan DeMaggio - swapped RTL8139 cards with me, and allowed me
+ to find and fix a crucial bug on older chipsets.
+
+ Donald Becker/Chris Butterworth/Marcus Westergren -
+ Noticed various Rx packet size-related buglets.
+
+ Santiago Garcia Mantinan - testing and feedback
+
+ Jens David - 2.2.x kernel backports
+
+ Martin Dennett - incredibly helpful insight on undocumented
+ features of the 8139 chips
+
+ Jean-Jacques Michel - bug fix
+
+ Tobias Ringström - Rx interrupt status checking suggestion
+
+ Andrew Morton - Clear blocked signals, avoid
+ buffer overrun setting current->comm.
+
+ Kalle Olavi Niemitalo - Wake-on-LAN ioctls
+
+ Robert Kuebel - Save kernel thread from dying on any signal.
+
+ Submitting bug reports:
+
+ "rtl8139-diag -mmmaaavvveefN" output
+ enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log
+
+*/
+
+#define DRV_NAME "8139too"
+#define DRV_VERSION "0.9.27"
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
+
+/* Default Message level */
+#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+
+/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
+#ifdef CONFIG_8139TOO_PIO
+#define USE_IO_OPS 1
+#endif
+
+/* define to 1 to enable copious debugging info */
+#undef RTL8139_DEBUG
+
+/* define to 1 to disable lightweight runtime debugging checks */
+#undef RTL8139_NDEBUG
+
+
+#ifdef RTL8139_DEBUG
+/* note: prints function name for you */
+# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#else
+# define DPRINTK(fmt, args...)
+#endif
+
+#ifdef RTL8139_NDEBUG
+# define assert(expr) do {} while (0)
+#else
+# define assert(expr) \
+ if(unlikely(!(expr))) { \
+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ }
+#endif
+
+
+/* A few user-configurable values. */
+/* media options */
+#define MAX_UNITS 8
+static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* bitmapped message enable number */
+static int debug = -1;
+
+/*
+ * Receive ring size
+ * Warning: 64K ring has hardware issues and may lock up.
+ */
+#if defined(CONFIG_SH_DREAMCAST)
+#define RX_BUF_IDX 1 /* 16K ring */
+#else
+#define RX_BUF_IDX 2 /* 32K ring */
+#endif
+#define RX_BUF_LEN (8192 << RX_BUF_IDX)
+#define RX_BUF_PAD 16
+#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
+
+#if RX_BUF_LEN == 65536
+#define RX_BUF_TOT_LEN RX_BUF_LEN
+#else
+#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+#endif
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC 4
+
+/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
+#define MAX_ETH_FRAME_SIZE 1536
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
+#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* PCI Tuning Parameters
+ Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+
+enum {
+ HAS_MII_XCVR = 0x010000,
+ HAS_CHIP_XCVR = 0x020000,
+ HAS_LNK_CHNG = 0x040000,
+};
+
+#define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */
+#define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */
+#define RTL_MIN_IO_SIZE 0x80
+#define RTL8139B_IO_SIZE 256
+
+#define RTL8129_CAPS HAS_MII_XCVR
+#define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
+
+typedef enum {
+ RTL8139 = 0,
+ RTL8129,
+} board_t;
+
+
+/* indexed by board_t, above */
+static struct {
+ const char *name;
+ u32 hw_flags;
+} board_info[] __devinitdata = {
+ { "RealTek RTL8139", RTL8139_CAPS },
+ { "RealTek RTL8129", RTL8129_CAPS },
+};
+
+
+static struct pci_device_id rtl8139_pci_tbl[] = {
+ {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+
+#ifdef CONFIG_SH_SECUREEDGE5410
+ /* Bogus 8139 silicon reports 8129 without external PROM :-( */
+ {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+#endif
+#ifdef CONFIG_8139TOO_8129
+ {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 },
+#endif
+
+ /* some crazy cards report invalid vendor ids like
+ * 0x0001 here. The other ids are valid and constant,
+ * so we simply don't match on the main vendor id.
+ */
+ {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 },
+ {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 },
+ {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 },
+
+ {0,}
+};
+MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl);
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "early_rx" },
+ { "tx_buf_mapped" },
+ { "tx_timeouts" },
+ { "rx_lost_in_ring" },
+};
+
+/* The rest of these values should never change. */
+
+/* Symbolic offsets to registers. */
+enum RTL8139_registers {
+ MAC0 = 0, /* Ethernet hardware address. */
+ MAR0 = 8, /* Multicast filter. */
+ TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf = 0x30,
+ ChipCmd = 0x37,
+ RxBufPtr = 0x38,
+ RxBufAddr = 0x3A,
+ IntrMask = 0x3C,
+ IntrStatus = 0x3E,
+ TxConfig = 0x40,
+ RxConfig = 0x44,
+ Timer = 0x48, /* A general-purpose counter. */
+ RxMissed = 0x4C, /* 24 bits valid, write clears. */
+ Cfg9346 = 0x50,
+ Config0 = 0x51,
+ Config1 = 0x52,
+ FlashReg = 0x54,
+ MediaStatus = 0x58,
+ Config3 = 0x59,
+ Config4 = 0x5A, /* absent on RTL-8139A */
+ HltClk = 0x5B,
+ MultiIntr = 0x5C,
+ TxSummary = 0x60,
+ BasicModeCtrl = 0x62,
+ BasicModeStatus = 0x64,
+ NWayAdvert = 0x66,
+ NWayLPAR = 0x68,
+ NWayExpansion = 0x6A,
+ /* Undocumented registers, but required for proper operation. */
+ FIFOTMS = 0x70, /* FIFO Control and test. */
+ CSCR = 0x74, /* Chip Status and Configuration Register. */
+ PARA78 = 0x78,
+ PARA7c = 0x7c, /* Magic transceiver parameter register. */
+ Config5 = 0xD8, /* absent on RTL-8139A */
+};
+
+enum ClearBitMasks {
+ MultiIntrClear = 0xF000,
+ ChipCmdClear = 0xE2,
+ Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+};
+
+enum ChipCmdBits {
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08,
+ CmdTxEnb = 0x04,
+ RxBufEmpty = 0x01,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+ PCIErr = 0x8000,
+ PCSTimeout = 0x4000,
+ RxFIFOOver = 0x40,
+ RxUnderrun = 0x20,
+ RxOverflow = 0x10,
+ TxErr = 0x08,
+ TxOK = 0x04,
+ RxErr = 0x02,
+ RxOK = 0x01,
+
+ RxAckBits = RxFIFOOver | RxOverflow | RxOK,
+};
+
+enum TxStatusBits {
+ TxHostOwns = 0x2000,
+ TxUnderrun = 0x4000,
+ TxStatOK = 0x8000,
+ TxOutOfWindow = 0x20000000,
+ TxAborted = 0x40000000,
+ TxCarrierLost = 0x80000000,
+};
+enum RxStatusBits {
+ RxMulticast = 0x8000,
+ RxPhysical = 0x4000,
+ RxBroadcast = 0x2000,
+ RxBadSymbol = 0x0020,
+ RxRunt = 0x0010,
+ RxTooLong = 0x0008,
+ RxCRCErr = 0x0004,
+ RxBadAlign = 0x0002,
+ RxStatusOK = 0x0001,
+};
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+ AcceptErr = 0x20,
+ AcceptRunt = 0x10,
+ AcceptBroadcast = 0x08,
+ AcceptMulticast = 0x04,
+ AcceptMyPhys = 0x02,
+ AcceptAllPhys = 0x01,
+};
+
+/* Bits in TxConfig. */
+enum tx_config_bits {
+
+ /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */
+ TxIFGShift = 24,
+ TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */
+ TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */
+ TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */
+ TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */
+
+ TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
+ TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
+ TxClearAbt = (1 << 0), /* Clear abort (WO) */
+ TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */
+ TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */
+
+ TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
+};
+
+/* Bits in Config1 */
+enum Config1Bits {
+ Cfg1_PM_Enable = 0x01,
+ Cfg1_VPD_Enable = 0x02,
+ Cfg1_PIO = 0x04,
+ Cfg1_MMIO = 0x08,
+ LWAKE = 0x10, /* not on 8139, 8139A */
+ Cfg1_Driver_Load = 0x20,
+ Cfg1_LED0 = 0x40,
+ Cfg1_LED1 = 0x80,
+ SLEEP = (1 << 1), /* only on 8139, 8139A */
+ PWRDN = (1 << 0), /* only on 8139, 8139A */
+};
+
+/* Bits in Config3 */
+enum Config3Bits {
+ Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */
+ Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */
+ Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */
+ Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */
+ Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */
+ Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */
+ Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */
+ Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */
+};
+
+/* Bits in Config4 */
+enum Config4Bits {
+ LWPTN = (1 << 2), /* not on 8139, 8139A */
+};
+
+/* Bits in Config5 */
+enum Config5Bits {
+ Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */
+ Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */
+ Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */
+ Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */
+ Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */
+ Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */
+ Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */
+};
+
+enum RxConfigBits {
+ /* rx fifo threshold */
+ RxCfgFIFOShift = 13,
+ RxCfgFIFONone = (7 << RxCfgFIFOShift),
+
+ /* Max DMA burst */
+ RxCfgDMAShift = 8,
+ RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
+
+ /* rx ring buffer length */
+ RxCfgRcv8K = 0,
+ RxCfgRcv16K = (1 << 11),
+ RxCfgRcv32K = (1 << 12),
+ RxCfgRcv64K = (1 << 11) | (1 << 12),
+
+ /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */
+ RxNoWrap = (1 << 7),
+};
+
+/* Twister tuning parameters from RealTek.
+ Completely undocumented, but required to tune bad links on some boards. */
+enum CSCRBits {
+ CSCR_LinkOKBit = 0x0400,
+ CSCR_LinkChangeBit = 0x0800,
+ CSCR_LinkStatusBits = 0x0f000,
+ CSCR_LinkDownOffCmd = 0x003c0,
+ CSCR_LinkDownCmd = 0x0f3c0,
+};
+
+enum Cfg9346Bits {
+ Cfg9346_Lock = 0x00,
+ Cfg9346_Unlock = 0xC0,
+};
+
+typedef enum {
+ CH_8139 = 0,
+ CH_8139_K,
+ CH_8139A,
+ CH_8139A_G,
+ CH_8139B,
+ CH_8130,
+ CH_8139C,
+ CH_8100,
+ CH_8100B_8139D,
+ CH_8101,
+} chip_t;
+
+enum chip_flags {
+ HasHltClk = (1 << 0),
+ HasLWake = (1 << 1),
+};
+
+#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \
+ (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22)
+#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1)
+
+/* directly indexed by chip_t, above */
+const static struct {
+ const char *name;
+ u32 version; /* from RTL8139C/RTL8139D docs */
+ u32 flags;
+} rtl_chip_info[] = {
+ { "RTL-8139",
+ HW_REVID(1, 0, 0, 0, 0, 0, 0),
+ HasHltClk,
+ },
+
+ { "RTL-8139 rev K",
+ HW_REVID(1, 1, 0, 0, 0, 0, 0),
+ HasHltClk,
+ },
+
+ { "RTL-8139A",
+ HW_REVID(1, 1, 1, 0, 0, 0, 0),
+ HasHltClk, /* XXX undocumented? */
+ },
+
+ { "RTL-8139A rev G",
+ HW_REVID(1, 1, 1, 0, 0, 1, 0),
+ HasHltClk, /* XXX undocumented? */
+ },
+
+ { "RTL-8139B",
+ HW_REVID(1, 1, 1, 1, 0, 0, 0),
+ HasLWake,
+ },
+
+ { "RTL-8130",
+ HW_REVID(1, 1, 1, 1, 1, 0, 0),
+ HasLWake,
+ },
+
+ { "RTL-8139C",
+ HW_REVID(1, 1, 1, 0, 1, 0, 0),
+ HasLWake,
+ },
+
+ { "RTL-8100",
+ HW_REVID(1, 1, 1, 1, 0, 1, 0),
+ HasLWake,
+ },
+
+ { "RTL-8100B/8139D",
+ HW_REVID(1, 1, 1, 0, 1, 0, 1),
+ HasLWake,
+ },
+
+ { "RTL-8101",
+ HW_REVID(1, 1, 1, 0, 1, 1, 1),
+ HasLWake,
+ },
+};
+
+struct rtl_extra_stats {
+ unsigned long early_rx;
+ unsigned long tx_buf_mapped;
+ unsigned long tx_timeouts;
+ unsigned long rx_lost_in_ring;
+};
+
+struct rtl8139_private {
+ void *mmio_addr;
+ int drv_flags;
+ struct pci_dev *pci_dev;
+ u32 msg_enable;
+ struct net_device_stats stats;
+ unsigned char *rx_ring;
+ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
+ unsigned int tx_flag;
+ unsigned long cur_tx;
+ unsigned long dirty_tx;
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_bufs_dma;
+ signed char phys[4]; /* MII device addresses. */
+ char twistie, twist_row, twist_col; /* Twister tune state. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ spinlock_t lock;
+ spinlock_t rx_lock;
+ chip_t chipset;
+ pid_t thr_pid;
+ wait_queue_head_t thr_wait;
+ struct completion thr_exited;
+ u32 rx_config;
+ struct rtl_extra_stats xstats;
+ int time_to_die;
+ struct mii_if_info mii;
+ unsigned int regs_len;
+ unsigned long fifo_copy_timeout;
+};
+
+MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(multicast_filter_limit, int, 0);
+module_param_array(media, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(debug, int, 0);
+MODULE_PARM_DESC (debug, "8139too bitmapped message enable number");
+MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses");
+MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
+
+static int read_eeprom (void *ioaddr, int location, int addr_len);
+static int rtl8139_open (struct net_device *dev);
+static int mdio_read (struct net_device *dev, int phy_id, int location);
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int val);
+static void rtl8139_start_thread(struct net_device *dev);
+static void rtl8139_tx_timeout (struct net_device *dev);
+static void rtl8139_init_ring (struct net_device *dev);
+static int rtl8139_start_xmit (struct sk_buff *skb,
+ struct net_device *dev);
+static int rtl8139_poll(struct net_device *dev, int *budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void rtl8139_poll_controller(struct net_device *dev);
+#endif
+static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance,
+ struct pt_regs *regs);
+static int rtl8139_close (struct net_device *dev);
+static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
+static void rtl8139_set_rx_mode (struct net_device *dev);
+static void __set_rx_mode (struct net_device *dev);
+static void rtl8139_hw_start (struct net_device *dev);
+static struct ethtool_ops rtl8139_ethtool_ops;
+
+#ifdef USE_IO_OPS
+
+#define RTL_R8(reg) inb (((unsigned long)ioaddr) + (reg))
+#define RTL_R16(reg) inw (((unsigned long)ioaddr) + (reg))
+#define RTL_R32(reg) ((unsigned long) inl (((unsigned long)ioaddr) + (reg)))
+#define RTL_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg))
+#define RTL_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg))
+#define RTL_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg))
+#define RTL_W8_F RTL_W8
+#define RTL_W16_F RTL_W16
+#define RTL_W32_F RTL_W32
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb(addr) inb((unsigned long)(addr))
+#define readw(addr) inw((unsigned long)(addr))
+#define readl(addr) inl((unsigned long)(addr))
+#define writeb(val,addr) outb((val),(unsigned long)(addr))
+#define writew(val,addr) outw((val),(unsigned long)(addr))
+#define writel(val,addr) outl((val),(unsigned long)(addr))
+
+#else
+
+/* write MMIO register, with flush */
+/* Flush avoids rtl8139 bug w/ posted MMIO writes */
+#define RTL_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
+#define RTL_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
+#define RTL_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+
+
+#define MMIO_FLUSH_AUDIT_COMPLETE 1
+#if MMIO_FLUSH_AUDIT_COMPLETE
+
+/* write MMIO register */
+#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
+
+#else
+
+/* write MMIO register, then flush */
+#define RTL_W8 RTL_W8_F
+#define RTL_W16 RTL_W16_F
+#define RTL_W32 RTL_W32_F
+
+#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
+
+/* read MMIO register */
+#define RTL_R8(reg) readb (ioaddr + (reg))
+#define RTL_R16(reg) readw (ioaddr + (reg))
+#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+
+#endif /* USE_IO_OPS */
+
+
+static const u16 rtl8139_intr_mask =
+ PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
+ TxErr | TxOK | RxErr | RxOK;
+
+static const u16 rtl8139_norx_intr_mask =
+ PCIErr | PCSTimeout | RxUnderrun |
+ TxErr | TxOK | RxErr ;
+
+#if RX_BUF_IDX == 0
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv8K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#elif RX_BUF_IDX == 1
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv16K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#elif RX_BUF_IDX == 2
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv32K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#elif RX_BUF_IDX == 3
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv64K |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#else
+#error "Invalid configuration for 8139_RXBUF_IDX"
+#endif
+
+static const unsigned int rtl8139_tx_config =
+ TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift);
+
+static void __rtl8139_cleanup_dev (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev;
+
+ assert (dev != NULL);
+ assert (tp->pci_dev != NULL);
+ pdev = tp->pci_dev;
+
+#ifndef USE_IO_OPS
+ if (tp->mmio_addr)
+ iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+
+ /* it's ok to call this even if we have no regions to free */
+ pci_release_regions (pdev);
+
+ free_netdev(dev);
+ pci_set_drvdata (pdev, NULL);
+}
+
+
+static void rtl8139_chip_reset (void *ioaddr)
+{
+ int i;
+
+ /* Soft reset the chip. */
+ RTL_W8 (ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--) {
+ barrier();
+ if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
+ break;
+ udelay (10);
+ }
+}
+
+
+static int __devinit rtl8139_init_board (struct pci_dev *pdev,
+ struct net_device **dev_out)
+{
+ void *ioaddr;
+ struct net_device *dev;
+ struct rtl8139_private *tp;
+ u8 tmp8;
+ int rc, disable_dev_on_err = 0;
+ unsigned int i;
+ unsigned long pio_start, pio_end, pio_flags, pio_len;
+ unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+ u32 version;
+
+ assert (pdev != NULL);
+
+ *dev_out = NULL;
+
+ /* dev and priv zeroed in alloc_etherdev */
+ dev = alloc_etherdev (sizeof (*tp));
+ if (dev == NULL) {
+ printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pci_name(pdev));
+ return -ENOMEM;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ tp = netdev_priv(dev);
+ tp->pci_dev = pdev;
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device (pdev);
+ if (rc)
+ goto err_out;
+
+ pio_start = pci_resource_start (pdev, 0);
+ pio_end = pci_resource_end (pdev, 0);
+ pio_flags = pci_resource_flags (pdev, 0);
+ pio_len = pci_resource_len (pdev, 0);
+
+ mmio_start = pci_resource_start (pdev, 1);
+ mmio_end = pci_resource_end (pdev, 1);
+ mmio_flags = pci_resource_flags (pdev, 1);
+ mmio_len = pci_resource_len (pdev, 1);
+
+ /* set this immediately, we need to know before
+ * we talk to the chip directly */
+ DPRINTK("PIO region size == 0x%02X\n", pio_len);
+ DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
+
+#ifdef USE_IO_OPS
+ /* make sure PCI base addr 0 is PIO */
+ if (!(pio_flags & IORESOURCE_IO)) {
+ printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pci_name(pdev));
+ rc = -ENODEV;
+ goto err_out;
+ }
+ /* check for weird/broken PCI region reporting */
+ if (pio_len < RTL_MIN_IO_SIZE) {
+ printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pci_name(pdev));
+ rc = -ENODEV;
+ goto err_out;
+ }
+#else
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(mmio_flags & IORESOURCE_MEM)) {
+ printk (KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pci_name(pdev));
+ rc = -ENODEV;
+ goto err_out;
+ }
+ if (mmio_len < RTL_MIN_IO_SIZE) {
+ printk (KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pci_name(pdev));
+ rc = -ENODEV;
+ goto err_out;
+ }
+#endif
+
+ rc = pci_request_regions (pdev, "8139too");
+ if (rc)
+ goto err_out;
+ disable_dev_on_err = 1;
+
+ /* enable PCI bus-mastering */
+ pci_set_master (pdev);
+
+#ifdef USE_IO_OPS
+ ioaddr = (void *) pio_start;
+ dev->base_addr = pio_start;
+ tp->mmio_addr = ioaddr;
+ tp->regs_len = pio_len;
+#else
+ /* ioremap MMIO region */
+ ioaddr = ioremap (mmio_start, mmio_len);
+ if (ioaddr == NULL) {
+ printk (KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev));
+ rc = -EIO;
+ goto err_out;
+ }
+ dev->base_addr = (long) ioaddr;
+ tp->mmio_addr = ioaddr;
+ tp->regs_len = mmio_len;
+#endif /* USE_IO_OPS */
+
+ /* Bring old chips out of low-power mode. */
+ RTL_W8 (HltClk, 'R');
+
+ /* check for missing/broken hardware */
+ if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
+ printk (KERN_ERR PFX "%s: Chip not responding, ignoring board\n",
+ pci_name(pdev));
+ rc = -EIO;
+ goto err_out;
+ }
+
+ /* identify chip attached to board */
+ version = RTL_R32 (TxConfig) & HW_REVID_MASK;
+ for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++)
+ if (version == rtl_chip_info[i].version) {
+ tp->chipset = i;
+ goto match;
+ }
+
+ /* if unknown chip, assume array element #0, original RTL-8139 in this case */
+ printk (KERN_DEBUG PFX "%s: unknown chip version, assuming RTL-8139\n",
+ pci_name(pdev));
+ printk (KERN_DEBUG PFX "%s: TxConfig = 0x%lx\n", pci_name(pdev), RTL_R32 (TxConfig));
+ tp->chipset = 0;
+
+match:
+ DPRINTK ("chipset id (%d) == index %d, '%s'\n",
+ version, i, rtl_chip_info[i].name);
+
+ if (tp->chipset >= CH_8139B) {
+ u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
+ DPRINTK("PCI PM wakeup\n");
+ if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
+ (tmp8 & LWAKE))
+ new_tmp8 &= ~LWAKE;
+ new_tmp8 |= Cfg1_PM_Enable;
+ if (new_tmp8 != tmp8) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tmp8);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ if (rtl_chip_info[tp->chipset].flags & HasLWake) {
+ tmp8 = RTL_R8 (Config4);
+ if (tmp8 & LWPTN) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config4, tmp8 & ~LWPTN);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ }
+ } else {
+ DPRINTK("Old chip wakeup\n");
+ tmp8 = RTL_R8 (Config1);
+ tmp8 &= ~(SLEEP | PWRDN);
+ RTL_W8 (Config1, tmp8);
+ }
+
+ rtl8139_chip_reset (ioaddr);
+
+ *dev_out = dev;
+ return 0;
+
+err_out:
+ __rtl8139_cleanup_dev (dev);
+ if (disable_dev_on_err)
+ pci_disable_device (pdev);
+ return rc;
+}
+
+
+static int __devinit rtl8139_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct rtl8139_private *tp;
+ int i, addr_len, option;
+ void *ioaddr;
+ static int board_idx = -1;
+ u8 pci_rev;
+
+ assert (pdev != NULL);
+ assert (ent != NULL);
+
+ board_idx++;
+
+ /* when we're built into the kernel, the driver version message
+ * is only printed if at least one 8139 board has been found
+ */
+#ifndef MODULE
+ {
+ static int printed_version;
+ if (!printed_version++)
+ printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+ }
+#endif
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) {
+ printk(KERN_INFO PFX "pci dev %s (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
+ pci_name(pdev), pdev->vendor, pdev->device, pci_rev);
+ printk(KERN_INFO PFX "Use the \"8139cp\" driver for improved performance and stability.\n");
+ }
+
+ i = rtl8139_init_board (pdev, &dev);
+ if (i < 0)
+ return i;
+
+ assert (dev != NULL);
+ tp = netdev_priv(dev);
+
+ ioaddr = tp->mmio_addr;
+ assert (ioaddr != NULL);
+
+ addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((u16 *) (dev->dev_addr))[i] =
+ le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+
+ /* The Rtl8139-specific entries in the device structure. */
+ dev->open = rtl8139_open;
+ dev->hard_start_xmit = rtl8139_start_xmit;
+ dev->poll = rtl8139_poll;
+ dev->weight = 64;
+ dev->stop = rtl8139_close;
+ dev->get_stats = rtl8139_get_stats;
+ dev->set_multicast_list = rtl8139_set_rx_mode;
+ dev->do_ioctl = netdev_ioctl;
+ dev->ethtool_ops = &rtl8139_ethtool_ops;
+ dev->tx_timeout = rtl8139_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = rtl8139_poll_controller;
+#endif
+
+ /* note: the hardware is not capable of sg/csum/highdma, however
+ * through the use of skb_copy_and_csum_dev we enable these
+ * features
+ */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+
+ dev->irq = pdev->irq;
+
+ /* tp zeroed and aligned in alloc_etherdev */
+ tp = netdev_priv(dev);
+
+ /* note: tp->chipset set in rtl8139_init_board */
+ tp->drv_flags = board_info[ent->driver_data].hw_flags;
+ tp->mmio_addr = ioaddr;
+ tp->msg_enable =
+ (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
+ spin_lock_init (&tp->lock);
+ spin_lock_init (&tp->rx_lock);
+ init_waitqueue_head (&tp->thr_wait);
+ init_completion (&tp->thr_exited);
+ tp->mii.dev = dev;
+ tp->mii.mdio_read = mdio_read;
+ tp->mii.mdio_write = mdio_write;
+ tp->mii.phy_id_mask = 0x3f;
+ tp->mii.reg_num_mask = 0x1f;
+
+ /* dev is fully set up and ready to use now */
+ DPRINTK("about to register device named %s (%p)...\n", dev->name, dev);
+ i = register_netdev (dev);
+ if (i) goto err_out;
+
+ pci_set_drvdata (pdev, dev);
+
+ printk (KERN_INFO "%s: %s at 0x%lx, "
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
+ "IRQ %d\n",
+ dev->name,
+ board_info[ent->driver_data].name,
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5],
+ dev->irq);
+
+ printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
+ dev->name, rtl_chip_info[tp->chipset].name);
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+#ifdef CONFIG_8139TOO_8129
+ if (tp->drv_flags & HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ u16 advertising = mdio_read(dev, phy, 4);
+ tp->phys[phy_idx++] = phy;
+ printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
+ "advertising %4.4x.\n",
+ dev->name, phy, mii_status, advertising);
+ }
+ }
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM "
+ "transceiver.\n",
+ dev->name);
+ tp->phys[0] = 32;
+ }
+ } else
+#endif
+ tp->phys[0] = 32;
+ tp->mii.phy_id = tp->phys[0];
+
+ /* The lower four bits are the media type. */
+ option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
+ if (option > 0) {
+ tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
+ tp->default_port = option & 0xFF;
+ if (tp->default_port)
+ tp->mii.force_media = 1;
+ }
+ if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
+ tp->mii.full_duplex = full_duplex[board_idx];
+ if (tp->mii.full_duplex) {
+ printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+ /* Changing the MII-advertised media because might prevent
+ re-connection. */
+ tp->mii.force_media = 1;
+ }
+ if (tp->default_port) {
+ printk(KERN_INFO " Forcing %dMbps %s-duplex operation.\n",
+ (option & 0x20 ? 100 : 10),
+ (option & 0x10 ? "full" : "half"));
+ mdio_write(dev, tp->phys[0], 0,
+ ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
+ ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ /* Put the chip into low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+
+ return 0;
+
+err_out:
+ __rtl8139_cleanup_dev (dev);
+ pci_disable_device (pdev);
+ return i;
+}
+
+
+static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ assert (dev != NULL);
+
+ unregister_netdev (dev);
+
+ __rtl8139_cleanup_dev (dev);
+ pci_disable_device (pdev);
+}
+
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() readl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ void *ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ writeb (EE_ENB & ~EE_CS, ee_addr);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writeb (EE_ENB | dataval, ee_addr);
+ eeprom_delay ();
+ writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ }
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ for (i = 16; i > 0; i--) {
+ writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ retval =
+ (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
+ 0);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+ }
+
+ /* Terminate the EEPROM access. */
+ writeb (~EE_CS, ee_addr);
+ eeprom_delay ();
+
+ return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol.
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define MDIO_DIR 0x80
+#define MDIO_DATA_OUT 0x04
+#define MDIO_DATA_IN 0x02
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay(mdio_addr) readb(mdio_addr)
+
+
+static char mii_2_8139_map[8] = {
+ BasicModeCtrl,
+ BasicModeStatus,
+ 0,
+ 0,
+ NWayAdvert,
+ NWayLPAR,
+ NWayExpansion,
+ 0
+};
+
+
+#ifdef CONFIG_8139TOO_8129
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync (void *mdio_addr)
+{
+ int i;
+
+ for (i = 32; i >= 0; i--) {
+ writeb (MDIO_WRITE1, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+}
+#endif
+
+static int mdio_read (struct net_device *dev, int phy_id, int location)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int retval = 0;
+#ifdef CONFIG_8139TOO_8129
+ void *mdio_addr = tp->mmio_addr + Config4;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i;
+#endif
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ return location < 8 && mii_2_8139_map[location] ?
+ readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
+ }
+
+#ifdef CONFIG_8139TOO_8129
+ mdio_sync (mdio_addr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ writeb (MDIO_DIR | dataval, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ writeb (0, mdio_addr);
+ mdio_delay (mdio_addr);
+ retval = (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+ writeb (MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+#endif
+
+ return (retval >> 1) & 0xffff;
+}
+
+
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+#ifdef CONFIG_8139TOO_8129
+ void *mdio_addr = tp->mmio_addr + Config4;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+ int i;
+#endif
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ void *ioaddr = tp->mmio_addr;
+ if (location == 0) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W16 (BasicModeCtrl, value);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ } else if (location < 8 && mii_2_8139_map[location])
+ RTL_W16 (mii_2_8139_map[location], value);
+ return;
+ }
+
+#ifdef CONFIG_8139TOO_8129
+ mdio_sync (mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval =
+ (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ writeb (dataval, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (dataval | MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ writeb (0, mdio_addr);
+ mdio_delay (mdio_addr);
+ writeb (MDIO_CLK, mdio_addr);
+ mdio_delay (mdio_addr);
+ }
+#endif
+}
+
+
+static int rtl8139_open (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int retval;
+ void *ioaddr = tp->mmio_addr;
+
+ retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval)
+ return retval;
+
+ tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ &tp->tx_bufs_dma);
+ tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ &tp->rx_ring_dma);
+ if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+ free_irq(dev->irq, dev);
+
+ if (tp->tx_bufs)
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ if (tp->rx_ring)
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+
+ return -ENOMEM;
+
+ }
+
+ tp->mii.full_duplex = tp->mii.force_media;
+ tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+
+ rtl8139_init_ring (dev);
+ rtl8139_hw_start (dev);
+ netif_start_queue (dev);
+
+ if (netif_msg_ifup(tp))
+ printk(KERN_DEBUG "%s: rtl8139_open() ioaddr %#lx IRQ %d"
+ " GP Pins %2.2x %s-duplex.\n",
+ dev->name, pci_resource_start (tp->pci_dev, 1),
+ dev->irq, RTL_R8 (MediaStatus),
+ tp->mii.full_duplex ? "full" : "half");
+
+ rtl8139_start_thread(dev);
+
+ return 0;
+}
+
+
+static void rtl_check_media (struct net_device *dev, unsigned int init_media)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ if (tp->phys[0] >= 0) {
+ mii_check_media(&tp->mii, netif_msg_link(tp), init_media);
+ }
+}
+
+/* Start the hardware at open or resume. */
+static void rtl8139_hw_start (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ u32 i;
+ u8 tmp;
+
+ /* Bring old chips out of low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'R');
+
+ rtl8139_chip_reset (ioaddr);
+
+ /* unlock Config[01234] and BMCR register writes */
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+ /* Restore our idea of the MAC address. */
+ RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
+ RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+ RTL_W32 (RxConfig, tp->rx_config);
+ RTL_W32 (TxConfig, rtl8139_tx_config);
+
+ tp->cur_rx = 0;
+
+ rtl_check_media (dev, 1);
+
+ if (tp->chipset >= CH_8139B) {
+ /* Disable magic packet scanning, which is enabled
+ * when PM is enabled in Config1. It can be reenabled
+ * via ETHTOOL_SWOL if desired. */
+ RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
+ }
+
+ DPRINTK("init buffer addresses\n");
+
+ /* Lock Config[01234] and BMCR register writes */
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* init Tx buffer DMA addresses */
+ for (i = 0; i < NUM_TX_DESC; i++)
+ RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+
+ RTL_W32 (RxMissed, 0);
+
+ rtl8139_set_rx_mode (dev);
+
+ /* no early-rx interrupts */
+ RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
+
+ /* make sure RxTx has started */
+ tmp = RTL_R8 (ChipCmd);
+ if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ RTL_W16 (IntrMask, rtl8139_intr_mask);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void rtl8139_init_ring (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int i;
+
+ tp->cur_rx = 0;
+ tp->cur_tx = 0;
+ tp->dirty_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++)
+ tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+}
+
+
+/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */
+static int next_tick = 3 * HZ;
+
+#ifndef CONFIG_8139TOO_TUNE_TWISTER
+static inline void rtl8139_tune_twister (struct net_device *dev,
+ struct rtl8139_private *tp) {}
+#else
+enum TwisterParamVals {
+ PARA78_default = 0x78fa8388,
+ PARA7c_default = 0xcb38de43, /* param[0][3] */
+ PARA7c_xxx = 0xcb38de43,
+};
+
+static const unsigned long param[4][4] = {
+ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};
+
+static void rtl8139_tune_twister (struct net_device *dev,
+ struct rtl8139_private *tp)
+{
+ int linkcase;
+ void *ioaddr = tp->mmio_addr;
+
+ /* This is a complicated state machine to configure the "twister" for
+ impedance/echos based on the cable length.
+ All of this is magic and undocumented.
+ */
+ switch (tp->twistie) {
+ case 1:
+ if (RTL_R16 (CSCR) & CSCR_LinkOKBit) {
+ /* We have link beat, let us tune the twister. */
+ RTL_W16 (CSCR, CSCR_LinkDownOffCmd);
+ tp->twistie = 2; /* Change to state 2. */
+ next_tick = HZ / 10;
+ } else {
+ /* Just put in some reasonable defaults for when beat returns. */
+ RTL_W16 (CSCR, CSCR_LinkDownCmd);
+ RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */
+ RTL_W32 (PARA78, PARA78_default);
+ RTL_W32 (PARA7c, PARA7c_default);
+ tp->twistie = 0; /* Bail from future actions. */
+ }
+ break;
+ case 2:
+ /* Read how long it took to hear the echo. */
+ linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits;
+ if (linkcase == 0x7000)
+ tp->twist_row = 3;
+ else if (linkcase == 0x3000)
+ tp->twist_row = 2;
+ else if (linkcase == 0x1000)
+ tp->twist_row = 1;
+ else
+ tp->twist_row = 0;
+ tp->twist_col = 0;
+ tp->twistie = 3; /* Change to state 2. */
+ next_tick = HZ / 10;
+ break;
+ case 3:
+ /* Put out four tuning parameters, one per 100msec. */
+ if (tp->twist_col == 0)
+ RTL_W16 (FIFOTMS, 0);
+ RTL_W32 (PARA7c, param[(int) tp->twist_row]
+ [(int) tp->twist_col]);
+ next_tick = HZ / 10;
+ if (++tp->twist_col >= 4) {
+ /* For short cables we are done.
+ For long cables (row == 3) check for mistune. */
+ tp->twistie =
+ (tp->twist_row == 3) ? 4 : 0;
+ }
+ break;
+ case 4:
+ /* Special case for long cables: check for mistune. */
+ if ((RTL_R16 (CSCR) &
+ CSCR_LinkStatusBits) == 0x7000) {
+ tp->twistie = 0;
+ break;
+ } else {
+ RTL_W32 (PARA7c, 0xfb38de03);
+ tp->twistie = 5;
+ next_tick = HZ / 10;
+ }
+ break;
+ case 5:
+ /* Retune for shorter cable (column 2). */
+ RTL_W32 (FIFOTMS, 0x20);
+ RTL_W32 (PARA78, PARA78_default);
+ RTL_W32 (PARA7c, PARA7c_default);
+ RTL_W32 (FIFOTMS, 0x00);
+ tp->twist_row = 2;
+ tp->twist_col = 0;
+ tp->twistie = 3;
+ next_tick = HZ / 10;
+ break;
+
+ default:
+ /* do nothing */
+ break;
+ }
+}
+#endif /* CONFIG_8139TOO_TUNE_TWISTER */
+
+static inline void rtl8139_thread_iter (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr)
+{
+ int mii_lpa;
+
+ mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
+
+ if (!tp->mii.force_media && mii_lpa != 0xffff) {
+ int duplex = (mii_lpa & LPA_100FULL)
+ || (mii_lpa & 0x01C0) == 0x0040;
+ if (tp->mii.full_duplex != duplex) {
+ tp->mii.full_duplex = duplex;
+
+ if (mii_lpa) {
+ printk (KERN_INFO
+ "%s: Setting %s-duplex based on MII #%d link"
+ " partner ability of %4.4x.\n",
+ dev->name,
+ tp->mii.full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
+ } else {
+ printk(KERN_INFO"%s: media is unconnected, link down, or incompatible connection\n",
+ dev->name);
+ }
+#if 0
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+#endif
+ }
+ }
+
+ next_tick = HZ * 60;
+
+ rtl8139_tune_twister (dev, tp);
+
+ DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n",
+ dev->name, RTL_R16 (NWayLPAR));
+ DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n",
+ dev->name, RTL_R16 (IntrMask), RTL_R16 (IntrStatus));
+ DPRINTK ("%s: Chip config %2.2x %2.2x.\n",
+ dev->name, RTL_R8 (Config0),
+ RTL_R8 (Config1));
+}
+
+static int rtl8139_thread (void *data)
+{
+ struct net_device *dev = data;
+ struct rtl8139_private *tp = netdev_priv(dev);
+ unsigned long timeout;
+
+ daemonize("%s", dev->name);
+ allow_signal(SIGTERM);
+
+ while (1) {
+ timeout = next_tick;
+ do {
+ timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
+ /* make swsusp happy with our thread */
+ try_to_freeze(PF_FREEZE);
+ } while (!signal_pending (current) && (timeout > 0));
+
+ if (signal_pending (current)) {
+ flush_signals(current);
+ }
+
+ if (tp->time_to_die)
+ break;
+
+ if (rtnl_lock_interruptible ())
+ break;
+ rtl8139_thread_iter (dev, tp, tp->mmio_addr);
+ rtnl_unlock ();
+ }
+
+ complete_and_exit (&tp->thr_exited, 0);
+}
+
+static void rtl8139_start_thread(struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ tp->thr_pid = -1;
+ tp->twistie = 0;
+ tp->time_to_die = 0;
+ if (tp->chipset == CH_8139_K)
+ tp->twistie = 1;
+ else if (tp->drv_flags & HAS_LNK_CHNG)
+ return;
+
+ tp->thr_pid = kernel_thread(rtl8139_thread, dev, CLONE_FS|CLONE_FILES);
+ if (tp->thr_pid < 0) {
+ printk (KERN_WARNING "%s: unable to start kernel thread\n",
+ dev->name);
+ }
+}
+
+static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
+{
+ tp->cur_tx = 0;
+ tp->dirty_tx = 0;
+
+ /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
+}
+
+
+static void rtl8139_tx_timeout (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ int i;
+ u8 tmp8;
+ unsigned long flags;
+
+ printk (KERN_DEBUG "%s: Transmit timeout, status %2.2x %4.4x %4.4x "
+ "media %2.2x.\n", dev->name, RTL_R8 (ChipCmd),
+ RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus));
+ /* Emit info to figure out what went wrong. */
+ printk (KERN_DEBUG "%s: Tx queue start entry %ld dirty entry %ld.\n",
+ dev->name, tp->cur_tx, tp->dirty_tx);
+ for (i = 0; i < NUM_TX_DESC; i++)
+ printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n",
+ dev->name, i, RTL_R32 (TxStatus0 + (i * 4)),
+ i == tp->dirty_tx % NUM_TX_DESC ?
+ " (queue head)" : "");
+
+ tp->xstats.tx_timeouts++;
+
+ /* disable Tx ASAP, if not already */
+ tmp8 = RTL_R8 (ChipCmd);
+ if (tmp8 & CmdTxEnb)
+ RTL_W8 (ChipCmd, CmdRxEnb);
+
+ spin_lock(&tp->rx_lock);
+ /* Disable interrupts by clearing the interrupt mask. */
+ RTL_W16 (IntrMask, 0x0000);
+
+ /* Stop a shared interrupt from scavenging while we are. */
+ spin_lock_irqsave (&tp->lock, flags);
+ rtl8139_tx_clear (tp);
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ /* ...and finally, reset everything */
+ if (netif_running(dev)) {
+ rtl8139_hw_start (dev);
+ netif_wake_queue (dev);
+ }
+ spin_unlock(&tp->rx_lock);
+}
+
+
+static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ unsigned int entry;
+ unsigned int len = skb->len;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % NUM_TX_DESC;
+
+ /* Note: the chip doesn't have auto-pad! */
+ if (likely(len < TX_BUF_SIZE)) {
+ if (len < ETH_ZLEN)
+ memset(tp->tx_buf[entry], 0, ETH_ZLEN);
+ skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+ dev_kfree_skb(skb);
+ } else {
+ dev_kfree_skb(skb);
+ tp->stats.tx_dropped++;
+ return 0;
+ }
+
+ spin_lock_irq(&tp->lock);
+ RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
+ tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
+
+ dev->trans_start = jiffies;
+
+ tp->cur_tx++;
+ wmb();
+
+ if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
+ netif_stop_queue (dev);
+ spin_unlock_irq(&tp->lock);
+
+ if (netif_msg_tx_queued(tp))
+ printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n",
+ dev->name, len, entry);
+
+ return 0;
+}
+
+
+static void rtl8139_tx_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr)
+{
+ unsigned long dirty_tx, tx_left;
+
+ assert (dev != NULL);
+ assert (ioaddr != NULL);
+
+ dirty_tx = tp->dirty_tx;
+ tx_left = tp->cur_tx - dirty_tx;
+ while (tx_left > 0) {
+ int entry = dirty_tx % NUM_TX_DESC;
+ int txstatus;
+
+ txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
+
+ if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+ if (netif_msg_tx_err(tp))
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+ tp->stats.tx_errors++;
+ if (txstatus & TxAborted) {
+ tp->stats.tx_aborted_errors++;
+ RTL_W32 (TxConfig, TxClearAbt);
+ RTL_W16 (IntrStatus, TxErr);
+ wmb();
+ }
+ if (txstatus & TxCarrierLost)
+ tp->stats.tx_carrier_errors++;
+ if (txstatus & TxOutOfWindow)
+ tp->stats.tx_window_errors++;
+ } else {
+ if (txstatus & TxUnderrun) {
+ /* Add 64 to the Tx FIFO threshold. */
+ if (tp->tx_flag < 0x00300000)
+ tp->tx_flag += 0x00020000;
+ tp->stats.tx_fifo_errors++;
+ }
+ tp->stats.collisions += (txstatus >> 24) & 15;
+ tp->stats.tx_bytes += txstatus & 0x7ff;
+ tp->stats.tx_packets++;
+ }
+
+ dirty_tx++;
+ tx_left--;
+ }
+
+#ifndef RTL8139_NDEBUG
+ if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ printk (KERN_ERR "%s: Out-of-sync dirty pointer, %ld vs. %ld.\n",
+ dev->name, dirty_tx, tp->cur_tx);
+ dirty_tx += NUM_TX_DESC;
+ }
+#endif /* RTL8139_NDEBUG */
+
+ /* only wake the queue if we did work, and the queue is stopped */
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ mb();
+ netif_wake_queue (dev);
+ }
+}
+
+
+/* TODO: clean this up! Rx reset need not be this intensive */
+static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
+ struct rtl8139_private *tp, void *ioaddr)
+{
+ u8 tmp8;
+#ifdef CONFIG_8139_OLD_RX_RESET
+ int tmp_work;
+#endif
+
+ if (netif_msg_rx_err (tp))
+ printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
+ dev->name, rx_status);
+ tp->stats.rx_errors++;
+ if (!(rx_status & RxStatusOK)) {
+ if (rx_status & RxTooLong) {
+ DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
+ dev->name, rx_status);
+ /* A.C.: The chip hangs here. */
+ }
+ if (rx_status & (RxBadSymbol | RxBadAlign))
+ tp->stats.rx_frame_errors++;
+ if (rx_status & (RxRunt | RxTooLong))
+ tp->stats.rx_length_errors++;
+ if (rx_status & RxCRCErr)
+ tp->stats.rx_crc_errors++;
+ } else {
+ tp->xstats.rx_lost_in_ring++;
+ }
+
+#ifndef CONFIG_8139_OLD_RX_RESET
+ tmp8 = RTL_R8 (ChipCmd);
+ RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb);
+ RTL_W8 (ChipCmd, tmp8);
+ RTL_W32 (RxConfig, tp->rx_config);
+ tp->cur_rx = 0;
+#else
+ /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+
+ /* disable receive */
+ RTL_W8_F (ChipCmd, CmdTxEnb);
+ tmp_work = 200;
+ while (--tmp_work > 0) {
+ udelay(1);
+ tmp8 = RTL_R8 (ChipCmd);
+ if (!(tmp8 & CmdRxEnb))
+ break;
+ }
+ if (tmp_work <= 0)
+ printk (KERN_WARNING PFX "rx stop wait too long\n");
+ /* restart receive */
+ tmp_work = 200;
+ while (--tmp_work > 0) {
+ RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb);
+ udelay(1);
+ tmp8 = RTL_R8 (ChipCmd);
+ if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
+ break;
+ }
+ if (tmp_work <= 0)
+ printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
+
+ /* and reinitialize all rx related registers */
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+ RTL_W32 (RxConfig, tp->rx_config);
+ tp->cur_rx = 0;
+
+ DPRINTK("init buffer addresses\n");
+
+ /* Lock Config[01234] and BMCR register writes */
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* A.C.: Reset the multicast list. */
+ __set_rx_mode (dev);
+#endif
+}
+
+#if RX_BUF_IDX == 3
+static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
+ u32 offset, unsigned int size)
+{
+ u32 left = RX_BUF_LEN - offset;
+
+ if (size > left) {
+ memcpy(skb->data, ring + offset, left);
+ memcpy(skb->data+left, ring, size - left);
+ } else
+ memcpy(skb->data, ring + offset, size);
+}
+#endif
+
+static void rtl8139_isr_ack(struct rtl8139_private *tp)
+{
+ void *ioaddr = tp->mmio_addr;
+ u16 status;
+
+ status = RTL_R16 (IntrStatus) & RxAckBits;
+
+ /* Clear out errors and receive interrupts */
+ if (likely(status != 0)) {
+ if (unlikely(status & (RxFIFOOver | RxOverflow))) {
+ tp->stats.rx_errors++;
+ if (status & RxFIFOOver)
+ tp->stats.rx_fifo_errors++;
+ }
+ RTL_W16_F (IntrStatus, RxAckBits);
+ }
+}
+
+static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
+ int budget)
+{
+ void *ioaddr = tp->mmio_addr;
+ int received = 0;
+ unsigned char *rx_ring = tp->rx_ring;
+ unsigned int cur_rx = tp->cur_rx;
+ unsigned int rx_size = 0;
+
+ DPRINTK ("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx,
+ RTL_R16 (RxBufAddr),
+ RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+
+ while (netif_running(dev) && received < budget
+ && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
+ u32 ring_offset = cur_rx % RX_BUF_LEN;
+ u32 rx_status;
+ unsigned int pkt_size;
+ struct sk_buff *skb;
+
+ rmb();
+
+ /* read size+status of next frame from DMA ring buffer */
+ rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+ rx_size = rx_status >> 16;
+ pkt_size = rx_size - 4;
+
+ if (netif_msg_rx_status(tp))
+ printk(KERN_DEBUG "%s: rtl8139_rx() status %4.4x, size %4.4x,"
+ " cur %4.4x.\n", dev->name, rx_status,
+ rx_size, cur_rx);
+#if RTL8139_DEBUG > 2
+ {
+ int i;
+ DPRINTK ("%s: Frame contents ", dev->name);
+ for (i = 0; i < 70; i++)
+ printk (" %2.2x",
+ rx_ring[ring_offset + i]);
+ printk (".\n");
+ }
+#endif
+
+ /* Packet copy from FIFO still in progress.
+ * Theoretically, this should never happen
+ * since EarlyRx is disabled.
+ */
+ if (unlikely(rx_size == 0xfff0)) {
+ if (!tp->fifo_copy_timeout)
+ tp->fifo_copy_timeout = jiffies + 2;
+ else if (time_after(jiffies, tp->fifo_copy_timeout)) {
+ DPRINTK ("%s: hung FIFO. Reset.", dev->name);
+ rx_size = 0;
+ goto no_early_rx;
+ }
+ if (netif_msg_intr(tp)) {
+ printk(KERN_DEBUG "%s: fifo copy in progress.",
+ dev->name);
+ }
+ tp->xstats.early_rx++;
+ break;
+ }
+
+no_early_rx:
+ tp->fifo_copy_timeout = 0;
+
+ /* If Rx err or invalid rx_size/rx_status received
+ * (which happens if we get lost in the ring),
+ * Rx process gets reset, so we abort any further
+ * Rx processing.
+ */
+ if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
+ (rx_size < 8) ||
+ (!(rx_status & RxStatusOK)))) {
+ rtl8139_rx_err (rx_status, dev, tp, ioaddr);
+ received = -1;
+ goto out;
+ }
+
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+
+ skb = dev_alloc_skb (pkt_size + 2);
+ if (likely(skb)) {
+ skb->dev = dev;
+ skb_reserve (skb, 2); /* 16 byte align the IP fields. */
+#if RX_BUF_IDX == 3
+ wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
+#else
+ eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+#endif
+ skb_put (skb, pkt_size);
+
+ skb->protocol = eth_type_trans (skb, dev);
+
+ dev->last_rx = jiffies;
+ tp->stats.rx_bytes += pkt_size;
+ tp->stats.rx_packets++;
+
+ netif_receive_skb (skb);
+ } else {
+ if (net_ratelimit())
+ printk (KERN_WARNING
+ "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ tp->stats.rx_dropped++;
+ }
+ received++;
+
+ cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+ RTL_W16 (RxBufPtr, (u16) (cur_rx - 16));
+
+ rtl8139_isr_ack(tp);
+ }
+
+ if (unlikely(!received || rx_size == 0xfff0))
+ rtl8139_isr_ack(tp);
+
+#if RTL8139_DEBUG > 1
+ DPRINTK ("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
+ RTL_R16 (RxBufAddr),
+ RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+#endif
+
+ tp->cur_rx = cur_rx;
+
+ /*
+ * The receive buffer should be mostly empty.
+ * Tell NAPI to reenable the Rx irq.
+ */
+ if (tp->fifo_copy_timeout)
+ received = budget;
+
+out:
+ return received;
+}
+
+
+static void rtl8139_weird_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr,
+ int status, int link_changed)
+{
+ DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n",
+ dev->name, status);
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ /* Update the error count. */
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ if ((status & RxUnderrun) && link_changed &&
+ (tp->drv_flags & HAS_LNK_CHNG)) {
+ rtl_check_media(dev, 0);
+ status &= ~RxUnderrun;
+ }
+
+ if (status & (RxUnderrun | RxErr))
+ tp->stats.rx_errors++;
+
+ if (status & PCSTimeout)
+ tp->stats.rx_length_errors++;
+ if (status & RxUnderrun)
+ tp->stats.rx_fifo_errors++;
+ if (status & PCIErr) {
+ u16 pci_cmd_status;
+ pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+ pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
+
+ printk (KERN_ERR "%s: PCI Bus error %4.4x.\n",
+ dev->name, pci_cmd_status);
+ }
+}
+
+static int rtl8139_poll(struct net_device *dev, int *budget)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ int orig_budget = min(*budget, dev->quota);
+ int done = 1;
+
+ spin_lock(&tp->rx_lock);
+ if (likely(RTL_R16(IntrStatus) & RxAckBits)) {
+ int work_done;
+
+ work_done = rtl8139_rx(dev, tp, orig_budget);
+ if (likely(work_done > 0)) {
+ *budget -= work_done;
+ dev->quota -= work_done;
+ done = (work_done < orig_budget);
+ }
+ }
+
+ if (done) {
+ /*
+ * Order is important since data can get interrupted
+ * again when we think we are done.
+ */
+ local_irq_disable();
+ RTL_W16_F(IntrMask, rtl8139_intr_mask);
+ __netif_rx_complete(dev);
+ local_irq_enable();
+ }
+ spin_unlock(&tp->rx_lock);
+
+ return !done;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ u16 status, ackstat;
+ int link_changed = 0; /* avoid bogus "uninit" warning */
+ int handled = 0;
+
+ spin_lock (&tp->lock);
+ status = RTL_R16 (IntrStatus);
+
+ /* shared irq? */
+ if (unlikely((status & rtl8139_intr_mask) == 0))
+ goto out;
+
+ handled = 1;
+
+ /* h/w no longer present (hotplug?) or major error, bail */
+ if (unlikely(status == 0xFFFF))
+ goto out;
+
+ /* close possible race's with dev_close */
+ if (unlikely(!netif_running(dev))) {
+ RTL_W16 (IntrMask, 0);
+ goto out;
+ }
+
+ /* Acknowledge all of the current interrupt sources ASAP, but
+ an first get an additional status bit from CSCR. */
+ if (unlikely(status & RxUnderrun))
+ link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
+
+ ackstat = status & ~(RxAckBits | TxErr);
+ if (ackstat)
+ RTL_W16 (IntrStatus, ackstat);
+
+ /* Receive packets are processed by poll routine.
+ If not running start it now. */
+ if (status & RxAckBits){
+ if (netif_rx_schedule_prep(dev)) {
+ RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
+ __netif_rx_schedule (dev);
+ }
+ }
+
+ /* Check uncommon events with one test. */
+ if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr)))
+ rtl8139_weird_interrupt (dev, tp, ioaddr,
+ status, link_changed);
+
+ if (status & (TxOK | TxErr)) {
+ rtl8139_tx_interrupt (dev, tp, ioaddr);
+ if (status & TxErr)
+ RTL_W16 (IntrStatus, TxErr);
+ }
+ out:
+ spin_unlock (&tp->lock);
+
+ DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, RTL_R16 (IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void rtl8139_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ rtl8139_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int rtl8139_close (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ int ret = 0;
+ unsigned long flags;
+
+ netif_stop_queue (dev);
+
+ if (tp->thr_pid >= 0) {
+ tp->time_to_die = 1;
+ wmb();
+ ret = kill_proc (tp->thr_pid, SIGTERM, 1);
+ if (ret) {
+ printk (KERN_ERR "%s: unable to signal thread\n", dev->name);
+ return ret;
+ }
+ wait_for_completion (&tp->thr_exited);
+ }
+
+ if (netif_msg_ifdown(tp))
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, RTL_R16 (IntrStatus));
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Stop the chip's Tx and Rx DMA processes. */
+ RTL_W8 (ChipCmd, 0);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ RTL_W16 (IntrMask, 0);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ synchronize_irq (dev->irq); /* racy, but that's ok here */
+ free_irq (dev->irq, dev);
+
+ rtl8139_tx_clear (tp);
+
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ tp->rx_ring = NULL;
+ tp->tx_bufs = NULL;
+
+ /* Green! Put the chip in low-power mode. */
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+
+ return 0;
+}
+
+
+/* Get the ethtool Wake-on-LAN settings. Assumes that wol points to
+ kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and
+ other threads or interrupts aren't messing with the 8139. */
+static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ void *ioaddr = np->mmio_addr;
+
+ spin_lock_irq(&np->lock);
+ if (rtl_chip_info[np->chipset].flags & HasLWake) {
+ u8 cfg3 = RTL_R8 (Config3);
+ u8 cfg5 = RTL_R8 (Config5);
+
+ wol->supported = WAKE_PHY | WAKE_MAGIC
+ | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
+
+ wol->wolopts = 0;
+ if (cfg3 & Cfg3_LinkUp)
+ wol->wolopts |= WAKE_PHY;
+ if (cfg3 & Cfg3_Magic)
+ wol->wolopts |= WAKE_MAGIC;
+ /* (KON)FIXME: See how netdev_set_wol() handles the
+ following constants. */
+ if (cfg5 & Cfg5_UWF)
+ wol->wolopts |= WAKE_UCAST;
+ if (cfg5 & Cfg5_MWF)
+ wol->wolopts |= WAKE_MCAST;
+ if (cfg5 & Cfg5_BWF)
+ wol->wolopts |= WAKE_BCAST;
+ }
+ spin_unlock_irq(&np->lock);
+}
+
+
+/* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes
+ that wol points to kernel memory and other threads or interrupts
+ aren't messing with the 8139. */
+static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ void *ioaddr = np->mmio_addr;
+ u32 support;
+ u8 cfg3, cfg5;
+
+ support = ((rtl_chip_info[np->chipset].flags & HasLWake)
+ ? (WAKE_PHY | WAKE_MAGIC
+ | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)
+ : 0);
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic);
+ if (wol->wolopts & WAKE_PHY)
+ cfg3 |= Cfg3_LinkUp;
+ if (wol->wolopts & WAKE_MAGIC)
+ cfg3 |= Cfg3_Magic;
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config3, cfg3);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF);
+ /* (KON)FIXME: These are untested. We may have to set the
+ CRC0, Wakeup0 and LSBCRC0 registers too, but I have no
+ documentation. */
+ if (wol->wolopts & WAKE_UCAST)
+ cfg5 |= Cfg5_UWF;
+ if (wol->wolopts & WAKE_MCAST)
+ cfg5 |= Cfg5_MWF;
+ if (wol->wolopts & WAKE_BCAST)
+ cfg5 |= Cfg5_BWF;
+ RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */
+ spin_unlock_irq(&np->lock);
+
+ return 0;
+}
+
+static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+ info->regdump_len = np->regs_len;
+}
+
+static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ int rc;
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_sset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+ return rc;
+}
+
+static int rtl8139_nway_reset(struct net_device *dev)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii);
+}
+
+static u32 rtl8139_get_link(struct net_device *dev)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii);
+}
+
+static u32 rtl8139_get_msglevel(struct net_device *dev)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ np->msg_enable = datum;
+}
+
+/* TODO: we are too slack to do reg dumping for pio, for now */
+#ifdef CONFIG_8139TOO_PIO
+#define rtl8139_get_regs_len NULL
+#define rtl8139_get_regs NULL
+#else
+static int rtl8139_get_regs_len(struct net_device *dev)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ return np->regs_len;
+}
+
+static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+
+ regs->version = RTL_REGS_VER;
+
+ spin_lock_irq(&np->lock);
+ memcpy_fromio(regbuf, np->mmio_addr, regs->len);
+ spin_unlock_irq(&np->lock);
+}
+#endif /* CONFIG_8139TOO_MMIO */
+
+static int rtl8139_get_stats_count(struct net_device *dev)
+{
+ return RTL_NUM_STATS;
+}
+
+static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+
+ data[0] = np->xstats.early_rx;
+ data[1] = np->xstats.tx_buf_mapped;
+ data[2] = np->xstats.tx_timeouts;
+ data[3] = np->xstats.rx_lost_in_ring;
+}
+
+static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
+}
+
+static struct ethtool_ops rtl8139_ethtool_ops = {
+ .get_drvinfo = rtl8139_get_drvinfo,
+ .get_settings = rtl8139_get_settings,
+ .set_settings = rtl8139_set_settings,
+ .get_regs_len = rtl8139_get_regs_len,
+ .get_regs = rtl8139_get_regs,
+ .nway_reset = rtl8139_nway_reset,
+ .get_link = rtl8139_get_link,
+ .get_msglevel = rtl8139_get_msglevel,
+ .set_msglevel = rtl8139_set_msglevel,
+ .get_wol = rtl8139_get_wol,
+ .set_wol = rtl8139_set_wol,
+ .get_strings = rtl8139_get_strings,
+ .get_stats_count = rtl8139_get_stats_count,
+ .get_ethtool_stats = rtl8139_get_ethtool_stats,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rtl8139_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+
+static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave (&tp->lock, flags);
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+ spin_unlock_irqrestore (&tp->lock, flags);
+ }
+
+ return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static void __set_rx_mode (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int i, rx_mode;
+ u32 tmp;
+
+ DPRINTK ("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
+ dev->name, dev->flags, RTL_R32 (RxConfig));
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ /* We can safely update without stopping the chip. */
+ tmp = rtl8139_rx_config | rx_mode;
+ if (tp->rx_config != tmp) {
+ RTL_W32_F (RxConfig, tmp);
+ tp->rx_config = tmp;
+ }
+ RTL_W32_F (MAR0 + 0, mc_filter[0]);
+ RTL_W32_F (MAR0 + 4, mc_filter[1]);
+}
+
+static void rtl8139_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ spin_lock_irqsave (&tp->lock, flags);
+ __set_rx_mode(dev);
+ spin_unlock_irqrestore (&tp->lock, flags);
+}
+
+#ifdef CONFIG_PM
+
+static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ pci_save_state (pdev);
+
+ if (!netif_running (dev))
+ return 0;
+
+ netif_device_detach (dev);
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Disable interrupts, stop Tx and Rx. */
+ RTL_W16 (IntrMask, 0);
+ RTL_W8 (ChipCmd, 0);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ pci_set_power_state (pdev, PCI_D3hot);
+
+ return 0;
+}
+
+
+static int rtl8139_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ pci_restore_state (pdev);
+ if (!netif_running (dev))
+ return 0;
+ pci_set_power_state (pdev, PCI_D0);
+ rtl8139_init_ring (dev);
+ rtl8139_hw_start (dev);
+ netif_device_attach (dev);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver rtl8139_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = rtl8139_pci_tbl,
+ .probe = rtl8139_init_one,
+ .remove = __devexit_p(rtl8139_remove_one),
+#ifdef CONFIG_PM
+ .suspend = rtl8139_suspend,
+ .resume = rtl8139_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init rtl8139_init_module (void)
+{
+ /* when we're a module, we always print a version message,
+ * even if no 8139 board is found.
+ */
+#ifdef MODULE
+ printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+#endif
+
+ return pci_module_init (&rtl8139_pci_driver);
+}
+
+
+static void __exit rtl8139_cleanup_module (void)
+{
+ pci_unregister_driver (&rtl8139_pci_driver);
+}
+
+
+module_init(rtl8139_init_module);
+module_exit(rtl8139_cleanup_module);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
new file mode 100644
index 000000000000..65f97b1dc581
--- /dev/null
+++ b/drivers/net/82596.c
@@ -0,0 +1,1618 @@
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performace test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+
+static char version[] __initdata =
+ "82596.c $Revision: 1.5 $\n";
+
+#define DRV_NAME "82596"
+
+/* DEBUG flags
+ */
+
+#define DEB_INIT 0x0001
+#define DEB_PROBE 0x0002
+#define DEB_SERIOUS 0x0004
+#define DEB_ERRORS 0x0008
+#define DEB_MULTI 0x0010
+#define DEB_TDR 0x0020
+#define DEB_OPEN 0x0040
+#define DEB_RESET 0x0080
+#define DEB_ADDCMD 0x0100
+#define DEB_STATUS 0x0200
+#define DEB_STARTTX 0x0400
+#define DEB_RXADDR 0x0800
+#define DEB_TXADDR 0x1000
+#define DEB_RXFRAME 0x2000
+#define DEB_INTS 0x4000
+#define DEB_STRUCT 0x8000
+#define DEB_ANY 0xffff
+
+
+#define DEB(x,y) if (i596_debug & (x)) y
+
+
+#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
+#define ENABLE_MVME16x_NET
+#endif
+#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
+#define ENABLE_BVME6000_NET
+#endif
+#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
+#define ENABLE_APRICOT
+#endif
+
+#ifdef ENABLE_MVME16x_NET
+#include <asm/mvme16xhw.h>
+#endif
+#ifdef ENABLE_BVME6000_NET
+#include <asm/bvme6000hw.h>
+#endif
+
+/*
+ * Define various macros for Channel Attention, word swapping etc., dependent
+ * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
+ */
+
+#ifdef __mc68000__
+#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define ISCP_BUSY 0x00010000
+#define MACH_IS_APRICOT 0
+#else
+#define WSWAPrfd(x) ((struct i596_rfd *)(x))
+#define WSWAPrbd(x) ((struct i596_rbd *)(x))
+#define WSWAPiscp(x) ((struct i596_iscp *)(x))
+#define WSWAPscb(x) ((struct i596_scb *)(x))
+#define WSWAPcmd(x) ((struct i596_cmd *)(x))
+#define WSWAPtbd(x) ((struct i596_tbd *)(x))
+#define WSWAPchar(x) ((char *)(x))
+#define ISCP_BUSY 0x0001
+#define MACH_IS_APRICOT 1
+#endif
+
+/*
+ * The MPU_PORT command allows direct access to the 82596. With PORT access
+ * the following commands are available (p5-18). The 32-bit port command
+ * must be word-swapped with the most significant word written first.
+ * This only applies to VME boards.
+ */
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
+
+static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
+
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define PKT_BUF_SZ 1536
+#define MAX_MC_CNT 64
+
+#define I596_TOTAL_SIZE 17
+
+#define I596_NULL ((void *)0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
+};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+#define TX_TIMEOUT 5
+
+
+struct i596_reg {
+ unsigned short porthi;
+ unsigned short portlo;
+ unsigned long ca;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ struct i596_tbd *next;
+ char *data;
+};
+
+/* The command structure has two 'next' pointers; v_next is the address of
+ * the next command as seen by the CPU, b_next is the address of the next
+ * command as seen by the 82596. The b_next pointer, as used by the 82596
+ * always references the status field of the next command, rather than the
+ * v_next field, because the 82596 is unaware of v_next. It may seem more
+ * logical to put v_next at the end of the structure, but we cannot do that
+ * because the 82596 expects other fields to be there, depending on command
+ * type.
+ */
+
+struct i596_cmd {
+ struct i596_cmd *v_next; /* Address from CPUs viewpoint */
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *b_next; /* Address from i596 viewpoint */
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ struct i596_tbd *tbd;
+ unsigned short size;
+ unsigned short pad;
+ struct sk_buff *skb; /* So we can free it after tx */
+};
+
+struct tdr_cmd {
+ struct i596_cmd cmd;
+ unsigned short status;
+ unsigned short pad;
+};
+
+struct mc_cmd {
+ struct i596_cmd cmd;
+ short mc_cnt;
+ char mc_addrs[MAX_MC_CNT*6];
+};
+
+struct sa_cmd {
+ struct i596_cmd cmd;
+ char eth_addr[8];
+};
+
+struct cf_cmd {
+ struct i596_cmd cmd;
+ char i596_config[16];
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ struct i596_rfd *b_next; /* Address from i596 viewpoint */
+ struct i596_rbd *rbd;
+ unsigned short count;
+ unsigned short size;
+ struct i596_rfd *v_next; /* Address from CPUs viewpoint */
+ struct i596_rfd *v_prev;
+};
+
+struct i596_rbd {
+ unsigned short count;
+ unsigned short zero1;
+ struct i596_rbd *b_next;
+ unsigned char *b_data; /* Address from i596 viewpoint */
+ unsigned short size;
+ unsigned short zero2;
+ struct sk_buff *skb;
+ struct i596_rbd *v_next;
+ struct i596_rbd *b_addr; /* This rbd addr from i596 view */
+ unsigned char *v_data; /* Address from CPUs viewpoint */
+};
+
+#define TX_RING_SIZE 64
+#define RX_RING_SIZE 16
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ unsigned long crc_err;
+ unsigned long align_err;
+ unsigned long resource_err;
+ unsigned long over_err;
+ unsigned long rcvdt_err;
+ unsigned long short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ unsigned long stat;
+ struct i596_scb *scb;
+};
+
+struct i596_scp {
+ unsigned long sysbus;
+ unsigned long pad;
+ struct i596_iscp *iscp;
+};
+
+struct i596_private {
+ volatile struct i596_scp scp;
+ volatile struct i596_iscp iscp;
+ volatile struct i596_scb scb;
+ struct sa_cmd sa_cmd;
+ struct cf_cmd cf_cmd;
+ struct tdr_cmd tdr_cmd;
+ struct mc_cmd mc_cmd;
+ unsigned long stat;
+ int last_restart __attribute__((aligned(4)));
+ struct i596_rfd *rfd_head;
+ struct i596_rbd *rbd_head;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ struct net_device_stats stats;
+ struct i596_rfd rfds[RX_RING_SIZE];
+ struct i596_rbd rbds[RX_RING_SIZE];
+ struct tx_cmd tx_cmds[TX_RING_SIZE];
+ struct i596_tbd tbds[TX_RING_SIZE];
+ int next_tx_cmd;
+ spinlock_t lock;
+};
+
+static char init_setup[] =
+{
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+#ifdef CONFIG_VME
+ 0xc0, /* don't save bad frames */
+#else
+ 0x80, /* don't save bad frames */
+#endif
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct net_device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int i596_close(struct net_device *dev);
+static struct net_device_stats *i596_get_stats(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void i596_tx_timeout (struct net_device *dev);
+static void print_eth(unsigned char *buf, char *str);
+static void set_multicast_list(struct net_device *dev);
+
+static int rx_ring_size = RX_RING_SIZE;
+static int ticks_limit = 25;
+static int max_cmd_backlog = TX_RING_SIZE-1;
+
+
+static inline void CA(struct net_device *dev)
+{
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ ((struct i596_reg *) dev->base_addr)->ca = 1;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile u32 i;
+
+ i = *(volatile u32 *) (dev->base_addr);
+ }
+#endif
+#ifdef ENABLE_APRICOT
+ if (MACH_IS_APRICOT) {
+ outw(0, (short) (dev->base_addr) + 4);
+ }
+#endif
+}
+
+
+static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
+{
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
+ p->porthi = ((c) | (u32) (x)) & 0xffff;
+ p->portlo = ((c) | (u32) (x)) >> 16;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ u32 v = (u32) (c) | (u32) (x);
+ v = ((u32) (v) << 16) | ((u32) (v) >> 16);
+ *(volatile u32 *) dev->base_addr = v;
+ udelay(1);
+ *(volatile u32 *) dev->base_addr = v;
+ }
+#endif
+}
+
+
+static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ while (--delcnt && lp->iscp.stat)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str, lp->scb.status, lp->scb.command);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ while (--delcnt && lp->scb.command)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str, lp->scb.status, lp->scb.command);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
+{
+ volatile struct i596_cmd *c = cmd;
+
+ while (--delcnt && c->command)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s.\n", dev->name, str);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static void i596_display_data(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
+ &lp->scp, lp->scp.sysbus, lp->scp.iscp);
+ printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
+ &lp->iscp, lp->iscp.stat, lp->iscp.scb);
+ printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
+ " .cmd = %p, .rfd = %p\n",
+ &lp->scb, lp->scb.status, lp->scb.command,
+ lp->scb.cmd, lp->scb.rfd);
+ printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx,"
+ " over %lx, rcvdt %lx, short %lx\n",
+ lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
+ lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
+ cmd = lp->cmd_head;
+ while (cmd != I596_NULL) {
+ printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
+ cmd, cmd->status, cmd->command, cmd->b_next);
+ cmd = cmd->v_next;
+ }
+ rfd = lp->rfd_head;
+ printk(KERN_ERR "rfd_head = %p\n", rfd);
+ do {
+ printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
+ " count %04x\n",
+ rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
+ rfd->count);
+ rfd = rfd->v_next;
+ } while (rfd != lp->rfd_head);
+ rbd = lp->rbd_head;
+ printk(KERN_ERR "rbd_head = %p\n", rbd);
+ do {
+ printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n",
+ rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
+ rbd = rbd->v_next;
+ } while (rbd != lp->rbd_head);
+}
+
+
+#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
+static irqreturn_t i596_error(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ pcc2[0x28] = 1;
+ pcc2[0x2b] = 0x1d;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ *ethirq = 3;
+ }
+#endif
+ printk(KERN_ERR "%s: Error interrupt\n", dev->name);
+ i596_display_data(dev);
+ return IRQ_HANDLED;
+}
+#endif
+
+static inline void init_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ int i;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ /* First build the Receive Buffer Descriptor List */
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+
+ if (skb == NULL)
+ panic("82596: alloc_skb() failed");
+ skb->dev = dev;
+ rbd->v_next = rbd+1;
+ rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
+ rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
+ rbd->skb = skb;
+ rbd->v_data = skb->tail;
+ rbd->b_data = WSWAPchar(virt_to_bus(skb->tail));
+ rbd->size = PKT_BUF_SZ;
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ);
+#endif
+ }
+ lp->rbd_head = lp->rbds;
+ rbd = lp->rbds + rx_ring_size - 1;
+ rbd->v_next = lp->rbds;
+ rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
+
+ /* Now build the Receive Frame Descriptor List */
+
+ for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
+ rfd->rbd = I596_NULL;
+ rfd->v_next = rfd+1;
+ rfd->v_prev = rfd-1;
+ rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
+ rfd->cmd = CMD_FLEX;
+ }
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
+ rfd = lp->rfds;
+ rfd->rbd = lp->rbd_head;
+ rfd->v_prev = lp->rfds + rx_ring_size - 1;
+ rfd = lp->rfds + rx_ring_size - 1;
+ rfd->v_next = lp->rfds;
+ rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+}
+
+static inline void remove_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct i596_rbd *rbd;
+ int i;
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ if (rbd->skb == NULL)
+ break;
+ dev_kfree_skb(rbd->skb);
+ }
+}
+
+
+static void rebuild_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ int i;
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+
+ for (i = 0; i < rx_ring_size; i++) {
+ lp->rfds[i].rbd = I596_NULL;
+ lp->rfds[i].cmd = CMD_FLEX;
+ }
+ lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
+ lp->rbd_head = lp->rbds;
+ lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
+}
+
+
+static int init_i596_mem(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET)
+ short ioaddr = dev->base_addr;
+#endif
+ unsigned long flags;
+
+ MPU_PORT(dev, PORT_RESET, NULL);
+
+ udelay(100); /* Wait 100us - seems to help */
+
+#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Disable all ints for now */
+ pcc2[0x28] = 1;
+ pcc2[0x2a] = 0x48;
+ /* Following disables snooping. Snooping is not required
+ * as we make appropriate use of non-cached pages for
+ * shared data, and cache_push/cache_clear.
+ */
+ pcc2[0x2b] = 0x08;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ }
+#endif
+
+ /* change the scp address */
+
+ MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
+
+#elif defined(ENABLE_APRICOT)
+
+ {
+ u32 scp = virt_to_bus(&lp->scp);
+
+ /* change the scp address */
+ outw(0, ioaddr);
+ outw(0, ioaddr);
+ outb(4, ioaddr + 0xf);
+ outw(scp | 2, ioaddr);
+ outw(scp >> 16, ioaddr);
+ }
+#endif
+
+ lp->last_cmd = jiffies;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x)
+ lp->scp.sysbus = 0x00000054;
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000)
+ lp->scp.sysbus = 0x0000004c;
+#endif
+#ifdef ENABLE_APRICOT
+ if (MACH_IS_APRICOT)
+ lp->scp.sysbus = 0x00440000;
+#endif
+
+ lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
+ lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
+ lp->iscp.stat = ISCP_BUSY;
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = lp->scb.cmd = I596_NULL;
+
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ lp->scb.t_on = 7 * 25;
+ lp->scb.t_off = 1 * 25;
+ }
+#endif
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
+
+#if defined(ENABLE_APRICOT)
+ (void) inb(ioaddr + 0x10);
+ outb(4, ioaddr + 0xf);
+#endif
+ CA(dev);
+
+ if (wait_istat(dev,lp,1000,"initialization timed out"))
+ goto failed;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+ rebuild_rx_bufs(dev);
+ lp->scb.command = 0;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Enable ints, etc. now */
+ pcc2[0x2a] = 0x55; /* Edge sensitive */
+ pcc2[0x2b] = 0x15;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 3;
+ }
+#endif
+
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
+ memcpy(lp->cf_cmd.i596_config, init_setup, 14);
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
+ memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+ lp->sa_cmd.cmd.command = CmdSASetup;
+ i596_add_cmd(dev, &lp->sa_cmd.cmd);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
+ lp->tdr_cmd.cmd.command = CmdTDR;
+ i596_add_cmd(dev, &lp->tdr_cmd.cmd);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
+ spin_unlock_irqrestore (&lp->lock, flags);
+ goto failed;
+ }
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
+ lp->scb.command = RX_START;
+ CA(dev);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (wait_cmd(dev,lp,1000,"RX_START not processed"))
+ goto failed;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
+ return 0;
+
+failed:
+ printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
+ MPU_PORT(dev, PORT_RESET, NULL);
+ return -1;
+}
+
+static inline int i596_rx(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+ int frames = 0;
+
+ DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
+ lp->rfd_head, lp->rbd_head));
+
+ rfd = lp->rfd_head; /* Ref next frame to check */
+
+ while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
+ if (rfd->rbd == I596_NULL)
+ rbd = I596_NULL;
+ else if (rfd->rbd == lp->rbd_head->b_addr)
+ rbd = lp->rbd_head;
+ else {
+ printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
+ /* XXX Now what? */
+ rbd = I596_NULL;
+ }
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n",
+ rfd, rfd->rbd, rfd->stat));
+
+ if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
+ /* a good frame */
+ int pkt_len = rbd->count & 0x3fff;
+ struct sk_buff *skb = rbd->skb;
+ int rx_in_place = 0;
+
+ DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
+ frames++;
+
+ /* Check if the packet is long enough to just accept
+ * without copying to a properly sized skbuff.
+ */
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ /* Get fresh skbuff to replace filled one. */
+ newskb = dev_alloc_skb(PKT_BUF_SZ);
+ if (newskb == NULL) {
+ skb = NULL; /* drop pkt */
+ goto memory_squeeze;
+ }
+ /* Pass up the skb already on the Rx ring. */
+ skb_put(skb, pkt_len);
+ rx_in_place = 1;
+ rbd->skb = newskb;
+ newskb->dev = dev;
+ rbd->v_data = newskb->tail;
+ rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail));
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ);
+#endif
+ }
+ else
+ skb = dev_alloc_skb(pkt_len + 2);
+memory_squeeze:
+ if (skb == NULL) {
+ /* XXX tulip.c can defer packets here!! */
+ printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ }
+ else {
+ skb->dev = dev;
+ if (!rx_in_place) {
+ /* 16 byte align the data fields */
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ skb->len = pkt_len;
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(rbd->skb->tail),
+ pkt_len);
+#endif
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes+=pkt_len;
+ }
+ }
+ else {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
+ dev->name, rfd->stat));
+ lp->stats.rx_errors++;
+ if ((rfd->stat) & 0x0001)
+ lp->stats.collisions++;
+ if ((rfd->stat) & 0x0080)
+ lp->stats.rx_length_errors++;
+ if ((rfd->stat) & 0x0100)
+ lp->stats.rx_over_errors++;
+ if ((rfd->stat) & 0x0200)
+ lp->stats.rx_fifo_errors++;
+ if ((rfd->stat) & 0x0400)
+ lp->stats.rx_frame_errors++;
+ if ((rfd->stat) & 0x0800)
+ lp->stats.rx_crc_errors++;
+ if ((rfd->stat) & 0x1000)
+ lp->stats.rx_length_errors++;
+ }
+
+ /* Clear the buffer descriptor count and EOF + F flags */
+
+ if (rbd != I596_NULL && (rbd->count & 0x4000)) {
+ rbd->count = 0;
+ lp->rbd_head = rbd->v_next;
+ }
+
+ /* Tidy the frame descriptor, marking it as end of list */
+
+ rfd->rbd = I596_NULL;
+ rfd->stat = 0;
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+ rfd->count = 0;
+
+ /* Remove end-of-list from old end descriptor */
+
+ rfd->v_prev->cmd = CMD_FLEX;
+
+ /* Update record of next frame descriptor to process */
+
+ lp->scb.rfd = rfd->b_next;
+ lp->rfd_head = rfd->v_next;
+ rfd = lp->rfd_head;
+ }
+
+ DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
+
+ return 0;
+}
+
+
+static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+
+ while (lp->cmd_head != I596_NULL) {
+ ptr = lp->cmd_head;
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ dev_kfree_skb(skb);
+
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+
+ ptr->v_next = ptr->b_next = I596_NULL;
+ tx_cmd->cmd.command = 0; /* Mark as free */
+ break;
+ }
+ default:
+ ptr->v_next = ptr->b_next = I596_NULL;
+ }
+ }
+
+ wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
+ lp->scb.cmd = I596_NULL;
+}
+
+static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
+{
+ unsigned long flags;
+
+ DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ wait_cmd(dev,lp,100,"i596_reset timed out");
+
+ netif_stop_queue(dev);
+
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA(dev);
+
+ /* wait for shutdown */
+ wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ i596_cleanup_cmd(dev,lp);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ init_i596_mem(dev);
+}
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL | CMD_INTR);
+ cmd->v_next = cmd->b_next = I596_NULL;
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (lp->cmd_head != I596_NULL) {
+ lp->cmd_tail->v_next = cmd;
+ lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
+ } else {
+ lp->cmd_head = cmd;
+ wait_cmd(dev,lp,100,"i596_add_cmd timed out");
+ lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
+ lp->scb.command = CUC_START;
+ CA(dev);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (lp->cmd_backlog > max_cmd_backlog) {
+ unsigned long tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < ticks_limit)
+ return;
+
+ printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
+
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int i596_open(struct net_device *dev)
+{
+ int res = 0;
+
+ DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+ if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ if (request_irq(0x56, i596_error, 0, "i82596_error", dev))
+ return -EAGAIN;
+ }
+#endif
+ init_rx_bufs(dev);
+
+ netif_start_queue(dev);
+
+ /* Initialize the 82596 memory */
+ if (init_i596_mem(dev)) {
+ res = -EAGAIN;
+ free_irq(dev->irq, dev);
+ }
+
+ return res;
+}
+
+static void i596_tx_timeout (struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
+ dev->name));
+
+ lp->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
+ /* Shutdown and restart */
+ i596_reset (dev, lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
+ lp->scb.command = CUC_START | RX_START;
+ CA (dev);
+ lp->last_restart = lp->stats.tx_packets;
+ }
+
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tbd;
+ short length = skb->len;
+ dev->trans_start = jiffies;
+
+ DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%x) called\n", dev->name,
+ skb->len, (unsigned int)skb->data));
+
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+ netif_stop_queue(dev);
+
+ tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
+ tbd = lp->tbds + lp->next_tx_cmd;
+
+ if (tx_cmd->cmd.command) {
+ printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
+ dev->name);
+ lp->stats.tx_dropped++;
+
+ dev_kfree_skb(skb);
+ } else {
+ if (++lp->next_tx_cmd == TX_RING_SIZE)
+ lp->next_tx_cmd = 0;
+ tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
+ tbd->next = I596_NULL;
+
+ tx_cmd->cmd.command = CMD_FLEX | CmdTx;
+ tx_cmd->skb = skb;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tbd->pad = 0;
+ tbd->size = EOF | length;
+
+ tbd->data = WSWAPchar(virt_to_bus(skb->data));
+
+#ifdef __mc68000__
+ cache_push(virt_to_phys(skb->data), length);
+#endif
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
+ i596_add_cmd(dev, &tx_cmd->cmd);
+
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += length;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void print_eth(unsigned char *add, char *str)
+{
+ int i;
+
+ printk(KERN_DEBUG "i596 0x%p, ", add);
+ for (i = 0; i < 6; i++)
+ printk(" %02X", add[i + 6]);
+ printk(" -->");
+ for (i = 0; i < 6; i++)
+ printk(" %02X", add[i]);
+ printk(" %02X%02X, %s\n", add[12], add[13], str);
+}
+
+static int io = 0x300;
+static int irq = 10;
+
+struct net_device * __init i82596_probe(int unit)
+{
+ struct net_device *dev;
+ int i;
+ struct i596_private *lp;
+ char eth_addr[8];
+ static int probed;
+ int err;
+
+ if (probed)
+ return ERR_PTR(-ENODEV);
+ probed++;
+
+ dev = alloc_etherdev(0);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ } else {
+ dev->base_addr = io;
+ dev->irq = irq;
+ }
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
+ printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
+ err = -ENODEV;
+ goto out;
+ }
+ memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
+ dev->base_addr = MVME_I596_BASE;
+ dev->irq = (unsigned) MVME16x_IRQ_I596;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
+ unsigned char msr = rtc[3];
+ int i;
+
+ rtc[3] |= 0x80;
+ for (i = 0; i < 6; i++)
+ eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
+ rtc[3] = msr;
+ dev->base_addr = BVME_I596_BASE;
+ dev->irq = (unsigned) BVME_IRQ_I596;
+ }
+#endif
+#ifdef ENABLE_APRICOT
+ {
+ int checksum = 0;
+ int ioaddr = 0x300;
+
+ /* this is easy the ethernet interface can only be at 0x300 */
+ /* first check nothing is already registered here */
+
+ if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
+ printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
+ err = -EBUSY;
+ goto out;
+ }
+
+ for (i = 0; i < 8; i++) {
+ eth_addr[i] = inb(ioaddr + 8 + i);
+ checksum += eth_addr[i];
+ }
+
+ /* checksum is a multiple of 0x100, got this wrong first time
+ some machines have 0x100, some 0x200. The DOS driver doesn't
+ even bother with the checksum.
+ Some other boards trip the checksum.. but then appear as
+ ether address 0. Trap these - AC */
+
+ if ((checksum % 0x100) ||
+ (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
+ err = -ENODEV;
+ goto out1;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = 10;
+ }
+#endif
+ dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
+ if (!dev->mem_start) {
+ err = -ENOMEM;
+ goto out1;
+ }
+
+ DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
+
+ for (i = 0; i < 6; i++)
+ DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+
+ DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
+
+ DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
+
+ /* The 82596-specific entries in the device structure. */
+ SET_MODULE_OWNER(dev);
+ dev->open = i596_open;
+ dev->stop = i596_close;
+ dev->hard_start_xmit = i596_start_xmit;
+ dev->get_stats = i596_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->tx_timeout = i596_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->priv = (void *)(dev->mem_start);
+
+ lp = dev->priv;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
+ dev->name, (unsigned long)lp,
+ sizeof(struct i596_private), (unsigned long)&lp->scb));
+ memset((void *) lp, 0, sizeof(struct i596_private));
+
+#ifdef __mc68000__
+ cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
+ cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
+ kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
+#endif
+ lp->scb.command = 0;
+ lp->scb.cmd = I596_NULL;
+ lp->scb.rfd = I596_NULL;
+ spin_lock_init(&lp->lock);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return dev;
+out2:
+#ifdef __mc68000__
+ /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
+ * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
+ */
+ kernel_set_cachemode((void *)(dev->mem_start), 4096,
+ IOMAP_FULL_CACHING);
+#endif
+ free_page ((u32)(dev->mem_start));
+out1:
+#ifdef ENABLE_APRICOT
+ release_region(dev->base_addr, I596_TOTAL_SIZE);
+#endif
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct i596_private *lp;
+ short ioaddr;
+ unsigned short status, ack_cmd = 0;
+ int handled = 0;
+
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
+ i596_error(irq, dev_id, regs);
+ return IRQ_HANDLED;
+ }
+ }
+#endif
+ if (dev == NULL) {
+ printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = dev->priv;
+
+ spin_lock (&lp->lock);
+
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
+ status = lp->scb.status;
+
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ dev->name, irq, status));
+
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x8000) || (status & 0x2000)) {
+ struct i596_cmd *ptr;
+
+ handled = 1;
+ if ((status & 0x8000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
+ if ((status & 0x2000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
+
+ while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
+ ptr = lp->cmd_head;
+
+ DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
+ lp->cmd_head->status, lp->cmd_head->command));
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ if ((ptr->status) & STAT_OK) {
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
+ } else {
+ lp->stats.tx_errors++;
+ if ((ptr->status) & 0x0020)
+ lp->stats.collisions++;
+ if (!((ptr->status) & 0x0040))
+ lp->stats.tx_heartbeat_errors++;
+ if ((ptr->status) & 0x0400)
+ lp->stats.tx_carrier_errors++;
+ if ((ptr->status) & 0x0800)
+ lp->stats.collisions++;
+ if ((ptr->status) & 0x1000)
+ lp->stats.tx_aborted_errors++;
+ }
+
+ dev_kfree_skb_irq(skb);
+
+ tx_cmd->cmd.command = 0; /* Mark free */
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned short status = ((struct tdr_cmd *)ptr)->status;
+
+ if (status & 0x8000) {
+ DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
+ } else {
+ if (status & 0x4000)
+ printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
+ if (status & 0x2000)
+ printk(KERN_ERR "%s: Termination problem.\n", dev->name);
+ if (status & 0x1000)
+ printk(KERN_ERR "%s: Short circuit.\n", dev->name);
+
+ DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
+ }
+ break;
+ }
+ case CmdConfigure:
+ case CmdMulticastList:
+ /* Zap command so set_multicast_list() knows it is free */
+ ptr->command = 0;
+ break;
+ }
+ ptr->v_next = ptr->b_next = I596_NULL;
+ lp->last_cmd = jiffies;
+ }
+
+ ptr = lp->cmd_head;
+ while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
+ ptr->command &= 0x1fff;
+ ptr = ptr->v_next;
+ }
+
+ if ((lp->cmd_head != I596_NULL))
+ ack_cmd |= CUC_START;
+ lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
+ }
+ if ((status & 0x1000) || (status & 0x4000)) {
+ if ((status & 0x4000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
+ i596_rx(dev);
+ /* Only RX_START if stopped - RGH 07-07-96 */
+ if (status & 0x1000) {
+ if (netif_running(dev)) {
+ DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
+ ack_cmd |= RX_START;
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ rebuild_rx_bufs(dev);
+ }
+ }
+ }
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
+ lp->scb.command = ack_cmd;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ /* Ack the interrupt */
+
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ pcc2[0x2a] |= 0x08;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ *ethirq = 3;
+ }
+#endif
+#ifdef ENABLE_APRICOT
+ (void) inb(ioaddr + 0x10);
+ outb(4, ioaddr + 0xf);
+#endif
+ CA(dev);
+
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
+
+ spin_unlock (&lp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static int i596_close(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status));
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ wait_cmd(dev,lp,100,"close1 timed out");
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA(dev);
+
+ wait_cmd(dev,lp,100,"close2 timed out");
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DEB(DEB_STRUCT,i596_display_data(dev));
+ i596_cleanup_cmd(dev,lp);
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Disable all ints */
+ pcc2[0x28] = 1;
+ pcc2[0x2a] = 0x40;
+ pcc2[0x2b] = 0x40; /* Set snooping bits now! */
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ }
+#endif
+
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+static struct net_device_stats *
+ i596_get_stats(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ int config = 0, cnt;
+
+ DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
+ dev->name, dev->mc_count,
+ dev->flags & IFF_PROMISC ? "ON" : "OFF",
+ dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+
+ if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
+ return;
+
+ if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] |= 0x01;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] &= ~0x01;
+ config = 1;
+ }
+ if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] &= ~0x20;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] |= 0x20;
+ config = 1;
+ }
+ if (config) {
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+ }
+
+ cnt = dev->mc_count;
+ if (cnt > MAX_MC_CNT)
+ {
+ cnt = MAX_MC_CNT;
+ printk(KERN_ERR "%s: Only %d multicast addresses supported",
+ dev->name, cnt);
+ }
+
+ if (dev->mc_count > 0) {
+ struct dev_mc_list *dmi;
+ unsigned char *cp;
+ struct mc_cmd *cmd;
+
+ if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
+ return;
+ cmd = &lp->mc_cmd;
+ cmd->cmd.command = CmdMulticastList;
+ cmd->mc_cnt = dev->mc_count * 6;
+ cp = cmd->mc_addrs;
+ for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
+ memcpy(cp, dmi->dmi_addr, 6);
+ if (i596_debug > 1)
+ DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
+ }
+ i596_add_cmd(dev, &cmd->cmd);
+ }
+}
+
+#ifdef MODULE
+static struct net_device *dev_82596;
+
+#ifdef ENABLE_APRICOT
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "Apricot IRQ number");
+#endif
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "i82596 debug mask");
+
+int init_module(void)
+{
+ if (debug >= 0)
+ i596_debug = debug;
+ dev_82596 = i82596_probe(-1);
+ if (IS_ERR(dev_82596))
+ return PTR_ERR(dev_82596);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(dev_82596);
+#ifdef __mc68000__
+ /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
+ * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
+ */
+
+ kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
+ IOMAP_FULL_CACHING);
+#endif
+ free_page ((u32)(dev_82596->mem_start));
+#ifdef ENABLE_APRICOT
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_82596->base_addr, I596_TOTAL_SIZE);
+#endif
+ free_netdev(dev_82596);
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 82596.c"
+ * End:
+ */
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
new file mode 100644
index 000000000000..bab16bcc9ae5
--- /dev/null
+++ b/drivers/net/8390.c
@@ -0,0 +1,1130 @@
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+ Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
+ Paul Gortmaker : tweak ANK's above multicast changes a bit.
+ Paul Gortmaker : update packet statistics for v2.1.x
+ Alan Cox : support arbitary stupid port mappings on the
+ 68K Macintosh. Support >16bit I/O spaces
+ Paul Gortmaker : add kmod support for auto-loading of the 8390
+ module by all drivers that require it.
+ Alan Cox : Spinlocking work, added 'BUG_83C690'
+ Paul Gortmaker : Separate out Tx timeout code from Tx path.
+ Paul Gortmaker : Remove old unused single Tx buffer code.
+ Hayato Fujiwara : Add m32r support.
+ Paul Gortmaker : use skb_padto() instead of stack scratch area
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#define NS8390_CORE
+#include "8390.h"
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct net_device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_tx_timeout(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct net_device *dev);
+static void do_set_multicast_list(struct net_device *dev);
+
+/*
+ * SMP and the 8390 setup.
+ *
+ * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ * a page register that controls bank and packet buffer access. We guard
+ * this with ei_local->page_lock. Nobody should assume or set the page other
+ * than zero when the lock is not held. Lock holders must restore page 0
+ * before unlocking. Even pure readers must take the lock to protect in
+ * page 0.
+ *
+ * To make life difficult the chip can also be very slow. We therefore can't
+ * just use spinlocks. For the longer lockups we disable the irq the device
+ * sits on and hold the lock. We must hold the lock because there is a dual
+ * processor case other than interrupts (get stats/set multicast list in
+ * parallel with each other and transmit).
+ *
+ * Note: in theory we can just disable the irq on the card _but_ there is
+ * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ * enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ * Finally by special arrangement for the purpose of being generally
+ * annoying the transmit function is called bh atomic. That places
+ * restrictions on the user context callers as disable_irq won't save
+ * them.
+ */
+
+
+
+/**
+ * ei_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+int ei_open(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+ /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
+ wrapper that does e.g. media check & then calls ei_tx_timeout. */
+ if (dev->tx_timeout == NULL)
+ dev->tx_timeout = ei_tx_timeout;
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * Grab the page lock so we own the register set, then call
+ * the init function.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ NS8390_init(dev, 1);
+ /* Set the flag before we drop the lock, That way the IRQ arrives
+ after its set and we get no silly warnings */
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+/**
+ * ei_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
+ */
+int ei_close(struct net_device *dev)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned long flags;
+
+ /*
+ * Hold the page lock during close
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ NS8390_init(dev, 0);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * ei_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+void ei_tx_timeout(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ unsigned long flags;
+
+#if defined(CONFIG_M32R) && defined(CONFIG_SMP)
+ unsigned long icucr;
+
+ local_irq_save(flags);
+ icucr = inl(ICUCR1);
+ icucr |= M32R_ICUCR_ISMOD11;
+ outl(icucr, ICUCR1);
+ local_irq_restore(flags);
+#endif
+ ei_local->stat.tx_errors++;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ txsr = inb(e8390_base+EN0_TSR);
+ isr = inb(e8390_base+EN0_ISR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets)
+ {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Ugly but a reset can be slow, yet must be protected */
+
+ disable_irq_nosync(dev->irq);
+ spin_lock(&ei_local->page_lock);
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ NS8390_init(dev, 1);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int send_length = skb->len, output_page;
+ unsigned long flags;
+
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ send_length = ETH_ZLEN;
+ }
+
+ /* Mask interrupts from the ethercard.
+ SMP: We have to grab the lock here otherwise the IRQ handler
+ on another CPU can flip window and race the IRQ mask set. We end
+ up trashing the mcast filter not disabling irqs if we don't lock */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ outb_p(0x00, e8390_base + EN0_IMR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+
+ /*
+ * Slow phase with lock held.
+ */
+
+ disable_irq_nosync(dev->irq);
+
+ spin_lock(&ei_local->page_lock);
+
+ ei_local->irqlock = 1;
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0)
+ {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ }
+ else if (ei_local->tx2 == 0)
+ {
+ output_page = ei_local->tx_start_page + TX_PAGES/2;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ }
+ else
+ { /* We should never get here. */
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ netif_stop_queue(dev);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ ei_local->stat.tx_errors++;
+ return 1;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ ei_block_output(dev, send_length, skb->data, output_page);
+
+ if (! ei_local->txing)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page)
+ {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ }
+ else
+ {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ }
+ else ei_local->txqueue++;
+
+ if (ei_local->tx1 && ei_local->tx2)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+
+ dev_kfree_skb (skb);
+ ei_local->stat.tx_bytes += send_length;
+
+ return 0;
+}
+
+/**
+ * ei_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ * @regs: unused
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * necessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ long e8390_base;
+ int interrupts, nr_serviced = 0;
+ struct ei_device *ei_local;
+
+ if (dev == NULL)
+ {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ e8390_base = dev->base_addr;
+ ei_local = (struct ei_device *) netdev_priv(dev);
+
+ /*
+ * Protect the irq test too.
+ */
+
+ spin_lock(&ei_local->page_lock);
+
+ if (ei_local->irqlock)
+ {
+#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+#endif
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_NONE;
+ }
+
+ /* Change to page 0 and read the intr status reg. */
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+ if (ei_debug > 3)
+ printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
+ inb_p(e8390_base + EN0_ISR));
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE)
+ {
+ if (!netif_running(dev)) {
+ printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ /* rmk - acknowledge the interrupts */
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ interrupts = 0;
+ break;
+ }
+ if (interrupts & ENISR_OVER)
+ ei_rx_overrun(dev);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
+ {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX)
+ ei_tx_intr(dev);
+ else if (interrupts & ENISR_TX_ERR)
+ ei_tx_err(dev);
+
+ if (interrupts & ENISR_COUNTERS)
+ {
+ ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
+ }
+
+ /* Ignore any RDC interrupts that make it back to here. */
+ if (interrupts & ENISR_RDC)
+ {
+ outb_p(ENISR_RDC, e8390_base + EN0_ISR);
+ }
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ }
+
+ if (interrupts && ei_debug)
+ {
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ if (nr_serviced >= MAX_SERVICE)
+ {
+ /* 0xFF is valid for a card removal */
+ if(interrupts!=0xFF)
+ printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_RETVAL(nr_serviced > 0);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void ei_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ ei_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned char txsr = inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+ printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ if (txsr & ENTSR_ABT)
+ printk("excess-collisions ");
+ if (txsr & ENTSR_ND)
+ printk("non-deferral ");
+ if (txsr & ENTSR_CRS)
+ printk("lost-carrier ");
+ if (txsr & ENTSR_FU)
+ printk("FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ printk("lost-heartbeat ");
+ printk("\n");
+#endif
+
+ outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int status = inb(e8390_base + EN0_TSR);
+
+ outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+
+ if (ei_local->tx1 < 0)
+ {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ if (ei_local->tx2 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ }
+ else ei_local->lasttx = 20, ei_local->txing = 0;
+ }
+ else if (ei_local->tx2 < 0)
+ {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ if (ei_local->tx1 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ }
+ else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ }
+// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
+// dev->name, ei_local->lasttx);
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT)
+ {
+ ei_local->stat.tx_aborted_errors++;
+ ei_local->stat.collisions += 16;
+ }
+ if (status & ENTSR_CRS)
+ ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU)
+ ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH)
+ ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC)
+ ei_local->stat.tx_window_errors++;
+ }
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+ int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
+
+ while (++rx_pkt_count < 10)
+ {
+ int pkt_len, pkt_stat;
+
+ /* Get the rx page (incoming packet pointer). */
+ outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
+ rxing_page = inb_p(e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.)
+
+ Keep quiet if it looks like a card removal. One problem here
+ is that some clones crash in roughly the same way.
+ */
+ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
+ printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+ pkt_stat = rx_frame.status;
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ /* Check for bogosity warned by 3c503 book: the status byte is never
+ written. This happened a lot during testing! This code should be
+ cleaned up someday. */
+ if (rx_frame.next != next_frame
+ && rx_frame.next != next_frame + 1
+ && rx_frame.next != next_frame - num_rx_pages
+ && rx_frame.next != next_frame + 1 - num_rx_pages) {
+ ei_local->current_page = rxing_page;
+ outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
+ ei_local->stat.rx_errors++;
+ continue;
+ }
+
+ if (pkt_len < 60 || pkt_len > 1518)
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ ei_local->stat.rx_length_errors++;
+ }
+ else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += pkt_len;
+ if (pkt_stat & ENRSR_PHY)
+ ei_local->stat.multicast++;
+ }
+ }
+ else
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ ei_local->stat.rx_errors++;
+ /* NB: The NIC counts CRC, frame and missed errors. */
+ if (pkt_stat & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ /* We used to also ack ENISR_OVER here, but that would sometimes mask
+ a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
+ outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
+ return;
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+
+ mdelay(10);
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ outb_p(0x00, e8390_base+EN0_RCNTLO);
+ outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+
+ if (was_txing)
+ {
+ unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed)
+ must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+ outb_p(ENISR_OVER, e8390_base+EN0_ISR);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
+ if (must_resend)
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ * Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned long flags;
+
+ /* If the card is stopped, just return the present stats. */
+ if (!netif_running(dev))
+ return &ei_local->stat;
+
+ spin_lock_irqsave(&ei_local->page_lock,flags);
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return &ei_local->stat;
+}
+
+/*
+ * Form the 64 bit 8390 multicast table from the linked list of addresses
+ * associated with this dev structure.
+ */
+
+static inline void make_mc_bits(u8 *bits, struct net_device *dev)
+{
+ struct dev_mc_list *dmi;
+
+ for (dmi=dev->mc_list; dmi; dmi=dmi->next)
+ {
+ u32 crc;
+ if (dmi->dmi_addrlen != ETH_ALEN)
+ {
+ printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
+ continue;
+ }
+ crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ /*
+ * The 8390 uses the 6 most significant bits of the
+ * CRC to index the multicast table.
+ */
+ bits[crc>>29] |= (1<<((crc>>26)&7));
+ }
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ * Set or clear the multicast filter for this adaptor. May be called
+ * from a BH in 2.1.x. Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ int i;
+ struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+
+ if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
+ {
+ memset(ei_local->mcfilter, 0, 8);
+ if (dev->mc_list)
+ make_mc_bits(ei_local->mcfilter, dev);
+ }
+ else
+ memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
+
+ /*
+ * DP8390 manuals don't specify any magic sequence for altering
+ * the multicast regs on an already running card. To be safe, we
+ * ensure multicast mode is off prior to loading up the new hash
+ * table. If this proves to be not enough, we can always resort
+ * to stopping the NIC, loading the table and then restarting.
+ *
+ * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
+ * Elite16) appear to be write-only. The NS 8390 data sheet lists
+ * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
+ * Ultra32 EISA) appears to have this bug fixed.
+ */
+
+ if (netif_running(dev))
+ outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+ outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
+ for(i = 0; i < 8; i++)
+ {
+ outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
+#ifndef BUG_83C690
+ if(inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
+ printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
+#endif
+ }
+ outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
+
+ if(dev->flags&IFF_PROMISC)
+ outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
+ else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
+ else
+ outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+ }
+
+/*
+ * Called without lock held. This is invoked from user context and may
+ * be parallel to just about everything else. Its also fairly quick and
+ * not called too often. Must protect against both bh and irq users
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ do_set_multicast_list(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+}
+
+/**
+ * ethdev_setup - init rest of 8390 device struct
+ * @dev: network device structure to init
+ *
+ * Initialize the rest of the 8390 device structure. Do NOT __init
+ * this, as it is used by 8390 based modular drivers too.
+ */
+
+static void ethdev_setup(struct net_device *dev)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ if (ei_debug > 1)
+ printk(version);
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+
+ spin_lock_init(&ei_local->page_lock);
+}
+
+/**
+ * alloc_ei_netdev - alloc_etherdev counterpart for 8390
+ * @size: extra bytes to allocate
+ *
+ * Allocate 8390-specific net_device.
+ */
+struct net_device *__alloc_ei_netdev(int size)
+{
+ return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
+ ethdev_setup);
+}
+
+
+
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * NS8390_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean. non-zero value to initiate chip processing
+ *
+ * Must be called with lock held.
+ */
+
+void NS8390_init(struct net_device *dev, int startp)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int i;
+ int endcfg = ei_local->word16
+ ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
+ : 0x48;
+
+ if(sizeof(struct e8390_pkt_hdr)!=4)
+ panic("8390.c: header struct mispacked\n");
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers. */
+
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+ for(i = 0; i < 6; i++)
+ {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+ if (ei_debug > 1 && inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
+ printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ }
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ netif_start_queue(dev);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+
+ if (startp)
+ {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
+ do_set_multicast_list(dev); /* (re)load the mcast table */
+ }
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+ Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
+
+ if (inb_p(e8390_base) & E8390_TRANS)
+ {
+ printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
+
+EXPORT_SYMBOL(ei_open);
+EXPORT_SYMBOL(ei_close);
+EXPORT_SYMBOL(ei_interrupt);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+EXPORT_SYMBOL(ei_poll);
+#endif
+EXPORT_SYMBOL(NS8390_init);
+EXPORT_SYMBOL(__alloc_ei_netdev);
+
+#if defined(MODULE)
+
+int init_module(void)
+{
+ return 0;
+}
+
+void cleanup_module(void)
+{
+}
+
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
new file mode 100644
index 000000000000..599b68d8c45f
--- /dev/null
+++ b/drivers/net/8390.h
@@ -0,0 +1,214 @@
+/* Generic NS8390 register definitions. */
+/* This file is part of Donald Becker's 8390 drivers, and is distributed
+ under the same license. Auto-loading of 8390.o only in v2.2 - Paul G.
+ Some of these names and comments originated from the Crynwr
+ packet drivers, which are distributed under the GPL. */
+
+#ifndef _8390_h
+#define _8390_h
+
+#include <linux/config.h>
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+
+#define TX_PAGES 12 /* Two Tx slots */
+
+#define ETHER_ADDR_LEN 6
+
+/* The 8390 specific per-packet-header format. */
+struct e8390_pkt_hdr {
+ unsigned char status; /* status */
+ unsigned char next; /* pointer to next packet. */
+ unsigned short count; /* header + packet length in bytes */
+};
+
+#ifdef notdef
+extern int ei_debug;
+#else
+#define ei_debug 1
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern void ei_poll(struct net_device *dev);
+#endif
+
+extern void NS8390_init(struct net_device *dev, int startp);
+extern int ei_open(struct net_device *dev);
+extern int ei_close(struct net_device *dev);
+extern irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+extern struct net_device *__alloc_ei_netdev(int size);
+static inline struct net_device *alloc_ei_netdev(void)
+{
+ return __alloc_ei_netdev(0);
+}
+
+/* You have one of these per-board */
+struct ei_device {
+ const char *name;
+ void (*reset_8390)(struct net_device *);
+ void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int);
+ void (*block_output)(struct net_device *, int, const unsigned char *, int);
+ void (*block_input)(struct net_device *, int, struct sk_buff *, int);
+ unsigned long rmem_start;
+ unsigned long rmem_end;
+ void __iomem *mem;
+ unsigned char mcfilter[8];
+ unsigned open:1;
+ unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
+ unsigned bigendian:1; /* 16-bit big endian mode. Do NOT */
+ /* set this on random 8390 clones! */
+ unsigned txing:1; /* Transmit Active */
+ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
+ unsigned dmaing:1; /* Remote DMA Active */
+ unsigned char tx_start_page, rx_start_page, stop_page;
+ unsigned char current_page; /* Read pointer in buffer */
+ unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
+ unsigned char txqueue; /* Tx Packet buffer queue length. */
+ short tx1, tx2; /* Packet lengths for ping-pong tx. */
+ short lasttx; /* Alpha version consistency check. */
+ unsigned char reg0; /* Register '0' in a WD8013 */
+ unsigned char reg5; /* Register '5' in a WD8013 */
+ unsigned char saved_irq; /* Original dev->irq value. */
+ struct net_device_stats stat; /* The new statistics table. */
+ u32 *reg_offset; /* Register mapping table */
+ spinlock_t page_lock; /* Page register locks */
+ unsigned long priv; /* Private field to store bus IDs etc. */
+};
+
+/* The maximum number of 8390 interrupt service routines called per IRQ. */
+#define MAX_SERVICE 12
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
+#define TX_TIMEOUT (20*HZ/100)
+
+#define ei_status (*(struct ei_device *)netdev_priv(dev))
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+/*
+ * Only generate indirect loads given a machine that needs them.
+ * - removed AMIGA_PCMCIA from this list, handled as ISA io now
+ */
+
+#if defined(CONFIG_MAC) || \
+ defined(CONFIG_ZORRO8390) || defined(CONFIG_ZORRO8390_MODULE) || \
+ defined(CONFIG_HYDRA) || defined(CONFIG_HYDRA_MODULE)
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#undef inb
+#undef inb_p
+#undef outb
+#undef outb_p
+
+#define inb(port) in_8(port)
+#define outb(val,port) out_8(port,val)
+#define inb_p(port) in_8(port)
+#define outb_p(val,port) out_8(port,val)
+
+#elif defined(CONFIG_ARM_ETHERH) || defined(CONFIG_ARM_ETHERH_MODULE)
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#undef inb
+#undef inb_p
+#undef outb
+#undef outb_p
+
+#define inb(_p) readb(_p)
+#define outb(_v,_p) writeb(_v,_p)
+#define inb_p(_p) inb(_p)
+#define outb_p(_v,_p) outb(_v,_p)
+
+#elif defined(CONFIG_NET_CBUS) || defined(CONFIG_NE_H8300) || defined(CONFIG_NE_H8300_MODULE)
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#else
+#define EI_SHIFT(x) (x)
+#endif
+
+#define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */
+#define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */
+#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */
+#define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */
+#define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */
+#define EN0_TPSR EI_SHIFT(0x04) /* Transmit starting page WR */
+#define EN0_NCR EI_SHIFT(0x05) /* Number of collision reg RD */
+#define EN0_TCNTLO EI_SHIFT(0x05) /* Low byte of tx byte count WR */
+#define EN0_FIFO EI_SHIFT(0x06) /* FIFO RD */
+#define EN0_TCNTHI EI_SHIFT(0x06) /* High byte of tx byte count WR */
+#define EN0_ISR EI_SHIFT(0x07) /* Interrupt status reg RD WR */
+#define EN0_CRDALO EI_SHIFT(0x08) /* low byte of current remote dma address RD */
+#define EN0_RSARLO EI_SHIFT(0x08) /* Remote start address reg 0 */
+#define EN0_CRDAHI EI_SHIFT(0x09) /* high byte, current remote dma address RD */
+#define EN0_RSARHI EI_SHIFT(0x09) /* Remote start address reg 1 */
+#define EN0_RCNTLO EI_SHIFT(0x0a) /* Remote byte count reg WR */
+#define EN0_RCNTHI EI_SHIFT(0x0b) /* Remote byte count reg WR */
+#define EN0_RSR EI_SHIFT(0x0c) /* rx status reg RD */
+#define EN0_RXCR EI_SHIFT(0x0c) /* RX configuration reg WR */
+#define EN0_TXCR EI_SHIFT(0x0d) /* TX configuration reg WR */
+#define EN0_COUNTER0 EI_SHIFT(0x0d) /* Rcv alignment error counter RD */
+#define EN0_DCFG EI_SHIFT(0x0e) /* Data configuration reg WR */
+#define EN0_COUNTER1 EI_SHIFT(0x0e) /* Rcv CRC error counter RD */
+#define EN0_IMR EI_SHIFT(0x0f) /* Interrupt mask reg WR */
+#define EN0_COUNTER2 EI_SHIFT(0x0f) /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+#define ENDCFG_BOS 0x02 /* byte order selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS EI_SHIFT(0x01) /* This board's physical enet addr RD WR */
+#define EN1_PHYS_SHIFT(i) EI_SHIFT(i+1) /* Get and set mac address */
+#define EN1_CURPAG EI_SHIFT(0x07) /* Current memory page RD WR */
+#define EN1_MULT EI_SHIFT(0x08) /* Multicast filter mask array (8 bytes) RD WR */
+#define EN1_MULT_SHIFT(i) EI_SHIFT(8+i) /* Get and set multicast filter */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicast address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+#endif /* _8390_h */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
new file mode 100644
index 000000000000..74d57865a819
--- /dev/null
+++ b/drivers/net/Kconfig
@@ -0,0 +1,2538 @@
+
+#
+# Network device configuration
+#
+
+config NETDEVICES
+ depends on NET
+ bool "Network device support"
+ ---help---
+ You can say N here if you don't intend to connect your Linux box to
+ any other computer at all.
+
+ You'll have to say Y if your computer contains a network card that
+ you want to use under Linux. If you are going to run SLIP or PPP over
+ telephone line or null modem cable you need say Y here. Connecting
+ two machines with parallel ports using PLIP needs this, as well as
+ AX.25/KISS for sending Internet traffic over amateur radio links.
+
+ See also "The Linux Network Administrator's Guide" by Olaf Kirch and
+ Terry Dawson. Available at <http://www.tldp.org/guides.html>.
+
+ If unsure, say Y.
+
+config DUMMY
+ tristate "Dummy net driver support"
+ depends on NETDEVICES
+ ---help---
+ This is essentially a bit-bucket device (i.e. traffic you send to
+ this device is consigned into oblivion) with a configurable IP
+ address. It is most commonly used in order to make your currently
+ inactive SLIP address seem like a real address for local programs.
+ If you use SLIP or PPP, you might want to say Y here. Since this
+ thing often comes in handy, the default is Y. It won't enlarge your
+ kernel either. What a deal. Read about it in the Network
+ Administrator's Guide, available from
+ <http://www.tldp.org/docs.html#guide>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called dummy. If you want to use more than one dummy
+ device at a time, you need to compile this driver as a module.
+ Instead of 'dummy', the devices will then be called 'dummy0',
+ 'dummy1' etc.
+
+config BONDING
+ tristate "Bonding driver support"
+ depends on NETDEVICES
+ depends on INET
+ ---help---
+ Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet
+ Channels together. This is called 'Etherchannel' by Cisco,
+ 'Trunking' by Sun, 802.3ad by the IEEE, and 'Bonding' in Linux.
+
+ The driver supports multiple bonding modes to allow for both high
+ perfomance and high availability operation.
+
+ Refer to <file:Documentation/networking/bonding.txt> for more
+ information.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bonding.
+
+config EQUALIZER
+ tristate "EQL (serial line load balancing) support"
+ depends on NETDEVICES
+ ---help---
+ If you have two serial connections to some other computer (this
+ usually requires two modems and two telephone lines) and you use
+ SLIP (the protocol for sending Internet traffic over telephone
+ lines) or PPP (a better SLIP) on them, you can make them behave like
+ one double speed connection using this driver. Naturally, this has
+ to be supported at the other end as well, either with a similar EQL
+ Linux driver or with a Livingston Portmaster 2e.
+
+ Say Y if you want this and read
+ <file:Documentation/networking/eql.txt>. You may also want to read
+ section 6.2 of the NET-3-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called eql. If unsure, say N.
+
+config TUN
+ tristate "Universal TUN/TAP device driver support"
+ depends on NETDEVICES
+ select CRC32
+ ---help---
+ TUN/TAP provides packet reception and transmission for user space
+ programs. It can be viewed as a simple Point-to-Point or Ethernet
+ device, which instead of receiving packets from a physical media,
+ receives them from user space program and instead of sending packets
+ via physical media writes them to the user space program.
+
+ When a program opens /dev/net/tun, driver creates and registers
+ corresponding net device tunX or tapX. After a program closed above
+ devices, driver will automatically delete tunXX or tapXX device and
+ all routes corresponding to it.
+
+ Please read <file:Documentation/networking/tuntap.txt> for more
+ information.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tun.
+
+ If you don't know what to use this for, you don't need it.
+
+config NET_SB1000
+ tristate "General Instruments Surfboard 1000"
+ depends on NETDEVICES && PNP
+ ---help---
+ This is a driver for the General Instrument (also known as
+ NextLevel) SURFboard 1000 internal
+ cable modem. This is an ISA card which is used by a number of cable
+ TV companies to provide cable modem access. It's a one-way
+ downstream-only cable modem, meaning that your upstream net link is
+ provided by your regular phone modem.
+
+ At present this driver only compiles as a module, so say M here if
+ you have this card. The module will be called sb1000. Then read
+ <file:Documentation/networking/README.sb1000> for information on how
+ to use this module, as it needs special ppp scripts for establishing
+ a connection. Further documentation and the necessary scripts can be
+ found at:
+
+ <http://www.jacksonville.net/~fventuri/>
+ <http://home.adelphia.net/~siglercm/sb1000.html>
+ <http://linuxpower.cx/~cable/>
+
+ If you don't have this card, of course say N.
+
+if NETDEVICES
+ source "drivers/net/arcnet/Kconfig"
+endif
+
+#
+# Ethernet
+#
+
+menu "Ethernet (10 or 100Mbit)"
+ depends on NETDEVICES && !UML
+
+config NET_ETHERNET
+ bool "Ethernet (10 or 100Mbit)"
+ ---help---
+ Ethernet (also called IEEE 802.3 or ISO 8802-2) is the most common
+ type of Local Area Network (LAN) in universities and companies.
+
+ Common varieties of Ethernet are: 10BASE-2 or Thinnet (10 Mbps over
+ coaxial cable, linking computers in a chain), 10BASE-T or twisted
+ pair (10 Mbps over twisted pair cable, linking computers to central
+ hubs), 10BASE-F (10 Mbps over optical fiber links, using hubs),
+ 100BASE-TX (100 Mbps over two twisted pair cables, using hubs),
+ 100BASE-T4 (100 Mbps over 4 standard voice-grade twisted pair
+ cables, using hubs), 100BASE-FX (100 Mbps over optical fiber links)
+ [the 100BASE varieties are also known as Fast Ethernet], and Gigabit
+ Ethernet (1 Gbps over optical fiber or short copper links).
+
+ If your Linux machine will be connected to an Ethernet and you have
+ an Ethernet network interface card (NIC) installed in your computer,
+ say Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. You will then also have
+ to say Y to the driver for your particular NIC.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Ethernet network cards. If unsure, say N.
+
+config MII
+ tristate "Generic Media Independent Interface device support"
+ depends on NET_ETHERNET
+ help
+ Most ethernet controllers have MII transceiver either as an external
+ or internal device. It is safe to say Y or M here even if your
+ ethernet card lack MII.
+
+source "drivers/net/arm/Kconfig"
+
+config MACE
+ tristate "MACE (Power Mac ethernet) support"
+ depends on NET_ETHERNET && PPC_PMAC && PPC32
+ select CRC32
+ help
+ Power Macintoshes and clones with Ethernet built-in on the
+ motherboard will usually use a MACE (Medium Access Control for
+ Ethernet) interface. Say Y to include support for the MACE chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mace.
+
+config MACE_AAUI_PORT
+ bool "Use AAUI port instead of TP by default"
+ depends on MACE
+ help
+ Some Apple machines (notably the Apple Network Server) which use the
+ MACE ethernet chip have an Apple AUI port (small 15-pin connector),
+ instead of an 8-pin RJ45 connector for twisted-pair ethernet. Say
+ Y here if you have such a machine. If unsure, say N.
+ The driver will default to AAUI on ANS anyway, and if you use it as
+ a module, you can provide the port_aaui=0|1 to force the driver.
+
+config BMAC
+ tristate "BMAC (G3 ethernet) support"
+ depends on NET_ETHERNET && PPC_PMAC && PPC32
+ select CRC32
+ help
+ Say Y for support of BMAC Ethernet interfaces. These are used on G3
+ computers.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bmac.
+
+config OAKNET
+ tristate "National DP83902AV (Oak ethernet) support"
+ depends on NET_ETHERNET && PPC && BROKEN
+ select CRC32
+ help
+ Say Y if your machine has this type of Ethernet network card.
+
+ To compile this driver as a module, choose M here: the module
+ will be called oaknet.
+
+config ARIADNE
+ tristate "Ariadne support"
+ depends on NET_ETHERNET && ZORRO
+ help
+ If you have a Village Tronic Ariadne Ethernet adapter, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ariadne.
+
+config A2065
+ tristate "A2065 support"
+ depends on NET_ETHERNET && ZORRO
+ select CRC32
+ help
+ If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called a2065.
+
+config HYDRA
+ tristate "Hydra support"
+ depends on NET_ETHERNET && ZORRO
+ select CRC32
+ help
+ If you have a Hydra Ethernet adapter, say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hydra.
+
+config ZORRO8390
+ tristate "Zorro NS8390-based Ethernet support"
+ depends on NET_ETHERNET && ZORRO
+ select CRC32
+ help
+ This driver is for Zorro Ethernet cards using an NS8390-compatible
+ chipset, like the Village Tronic Ariadne II and the Individual
+ Computers X-Surf Ethernet cards. If you have such a card, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called zorro8390.
+
+config APNE
+ tristate "PCMCIA NE2000 support"
+ depends on NET_ETHERNET && AMIGA_PCMCIA
+ select CRC32
+ help
+ If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called apne.
+
+config APOLLO_ELPLUS
+ tristate "Apollo 3c505 support"
+ depends on NET_ETHERNET && APOLLO
+ help
+ Say Y or M here if your Apollo has a 3Com 3c505 ISA Ethernet card.
+ If you don't have one made for Apollos, you can use one from a PC,
+ except that your Apollo won't be able to boot from it (because the
+ code in the ROM will be for a PC).
+
+config MAC8390
+ bool "Macintosh NS 8390 based ethernet cards"
+ depends on NET_ETHERNET && MAC
+ select CRC32
+ help
+ If you want to include a driver to support Nubus or LC-PDS
+ Ethernet cards using an NS8390 chipset or its equivalent, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config MAC89x0
+ tristate "Macintosh CS89x0 based ethernet cards"
+ depends on NET_ETHERNET && MAC && BROKEN
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ Nubus or LC-PDS network (Ethernet) card of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. This module will
+ be called mac89x0.
+
+config MACSONIC
+ tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
+ depends on NET_ETHERNET && MAC
+ ---help---
+ Support for NatSemi SONIC based Ethernet devices. This includes
+ the onboard Ethernet in many Quadras as well as some LC-PDS,
+ a few Nubus and all known Comm Slot Ethernet cards. If you have
+ one of these say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. This module will
+ be called macsonic.
+
+config MACMACE
+ bool "Macintosh (AV) onboard MACE ethernet (EXPERIMENTAL)"
+ depends on NET_ETHERNET && MAC && EXPERIMENTAL
+ select CRC32
+ help
+ Support for the onboard AMD 79C940 MACE Ethernet controller used in
+ the 660AV and 840AV Macintosh. If you have one of these Macintoshes
+ say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config MVME147_NET
+ tristate "MVME147 (Lance) Ethernet support"
+ depends on NET_ETHERNET && MVME147
+ select CRC32
+ help
+ Support for the on-board Ethernet interface on the Motorola MVME147
+ single-board computer. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config MVME16x_NET
+ tristate "MVME16x Ethernet support"
+ depends on NET_ETHERNET && MVME16x
+ help
+ This is the driver for the Ethernet interface on the Motorola
+ MVME162, 166, 167, 172 and 177 boards. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config BVME6000_NET
+ tristate "BVME6000 Ethernet support"
+ depends on NET_ETHERNET && BVME6000
+ help
+ This is the driver for the Ethernet interface on BVME4000 and
+ BVME6000 VME boards. Say Y here to include the driver for this chip
+ in your kernel.
+ To compile this driver as a module, choose M here.
+
+config ATARILANCE
+ tristate "Atari Lance support"
+ depends on NET_ETHERNET && ATARI
+ help
+ Say Y to include support for several Atari Ethernet adapters based
+ on the AMD Lance chipset: RieblCard (with or without battery), or
+ PAMCard VME (also the version by Rhotron, with different addresses).
+
+config ATARI_BIONET
+ tristate "BioNet-100 support"
+ depends on NET_ETHERNET && ATARI && ATARI_ACSI && BROKEN
+ help
+ Say Y to include support for BioData's BioNet-100 Ethernet adapter
+ for the ACSI port. The driver works (has to work...) with a polled
+ I/O scheme, so it's rather slow :-(
+
+config ATARI_PAMSNET
+ tristate "PAMsNet support"
+ depends on NET_ETHERNET && ATARI && ATARI_ACSI && BROKEN
+ help
+ Say Y to include support for the PAMsNet Ethernet adapter for the
+ ACSI port ("ACSI node"). The driver works (has to work...) with a
+ polled I/O scheme, so it's rather slow :-(
+
+config SUN3LANCE
+ tristate "Sun3/Sun3x on-board LANCE support"
+ depends on NET_ETHERNET && (SUN3 || SUN3X)
+ help
+ Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
+ featured an AMD Lance 10Mbit Ethernet controller on board; say Y
+ here to compile in the Linux driver for this and enable Ethernet.
+ General Linux information on the Sun 3 and 3x series (now
+ discontinued) is at
+ <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+ If you're not building a kernel for a Sun 3, say N.
+
+config SUN3_82586
+ tristate "Sun3 on-board Intel 82586 support"
+ depends on NET_ETHERNET && SUN3
+ help
+ This driver enables support for the on-board Intel 82586 based
+ Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note
+ that this driver does not support 82586-based adapters on additional
+ VME boards.
+
+config HPLANCE
+ bool "HP on-board LANCE support"
+ depends on NET_ETHERNET && DIO
+ select CRC32
+ help
+ If you want to use the builtin "LANCE" Ethernet controller on an
+ HP300 machine, say Y here.
+
+config LASI_82596
+ tristate "Lasi ethernet"
+ depends on NET_ETHERNET && PARISC && GSC_LASI
+ help
+ Say Y here to support the on-board Intel 82596 ethernet controller
+ built into Hewlett-Packard PA-RISC machines.
+
+config MIPS_JAZZ_SONIC
+ tristate "MIPS JAZZ onboard SONIC Ethernet support"
+ depends on NET_ETHERNET && MACH_JAZZ
+ help
+ This is the driver for the onboard card of MIPS Magnum 4000,
+ Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
+
+config MIPS_GT96100ETH
+ bool "MIPS GT96100 Ethernet support"
+ depends on NET_ETHERNET && MIPS_GT96100
+ help
+ Say Y here to support the Ethernet subsystem on your GT96100 card.
+
+config MIPS_AU1X00_ENET
+ bool "MIPS AU1000 Ethernet support"
+ depends on NET_ETHERNET && SOC_AU1X00
+ select CRC32
+ help
+ If you have an Alchemy Semi AU1X00 based system
+ say Y. Otherwise, say N.
+
+config NET_SB1250_MAC
+ tristate "SB1250 Ethernet support"
+ depends on NET_ETHERNET && SIBYTE_SB1xxx_SOC
+
+config SGI_IOC3_ETH
+ bool "SGI IOC3 Ethernet"
+ depends on NET_ETHERNET && PCI && SGI_IP27
+ select CRC32
+ select MII
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config SGI_IOC3_ETH_HW_RX_CSUM
+ bool "Receive hardware checksums"
+ depends on SGI_IOC3_ETH && INET
+ default y
+ help
+ The SGI IOC3 network adapter supports TCP and UDP checksums in
+ hardware to offload processing of these checksums from the CPU. At
+ the moment only acceleration of IPv4 is supported. This option
+ enables offloading for checksums on receive. If unsure, say Y.
+
+config SGI_IOC3_ETH_HW_TX_CSUM
+ bool "Transmit hardware checksums"
+ depends on SGI_IOC3_ETH && INET
+ default y
+ help
+ The SGI IOC3 network adapter supports TCP and UDP checksums in
+ hardware to offload processing of these checksums from the CPU. At
+ the moment only acceleration of IPv4 is supported. This option
+ enables offloading for checksums on transmit. If unsure, say Y.
+
+config SGI_O2MACE_ETH
+ tristate "SGI O2 MACE Fast Ethernet support"
+ depends on NET_ETHERNET && SGI_IP32=y
+
+config STNIC
+ tristate "National DP83902AV support"
+ depends on NET_ETHERNET && SUPERH
+ select CRC32
+ help
+ Support for cards based on the National Semiconductor DP83902AV
+ ST-NIC Serial Network Interface Controller for Twisted Pair. This
+ is a 10Mbit/sec Ethernet controller. Product overview and specs at
+ <http://www.national.com/pf/DP/DP83902A.html>.
+
+ If unsure, say N.
+
+config SUNLANCE
+ tristate "Sun LANCE support"
+ depends on NET_ETHERNET && SBUS
+ select CRC32
+ help
+ This driver supports the "le" interface present on all 32-bit Sparc
+ systems, on some older Ultra systems and as an Sbus option. These
+ cards are based on the AMD Lance chipset, which is better known
+ via the NE2100 cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunlance.
+
+config HAPPYMEAL
+ tristate "Sun Happy Meal 10/100baseT support"
+ depends on NET_ETHERNET && (SBUS || PCI)
+ select CRC32
+ help
+ This driver supports the "hme" interface present on most Ultra
+ systems and as an option on older Sbus systems. This driver supports
+ both PCI and Sbus devices. This driver also supports the "qfe" quad
+ 100baseT device available in both PCI and Sbus configurations.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunhme.
+
+config SUNBMAC
+ tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
+ depends on NET_ETHERNET && SBUS && EXPERIMENTAL
+ select CRC32
+ help
+ This driver supports the "be" interface available as an Sbus option.
+ This is Sun's older 100baseT Ethernet device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunbmac.
+
+config SUNQE
+ tristate "Sun QuadEthernet support"
+ depends on NET_ETHERNET && SBUS
+ select CRC32
+ help
+ This driver supports the "qe" 10baseT Ethernet device, available as
+ an Sbus option. Note that this is not the same as Quad FastEthernet
+ "qfe" which is supported by the Happy Meal driver instead.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunqe.
+
+config SUNGEM
+ tristate "Sun GEM support"
+ depends on NET_ETHERNET && PCI
+ select CRC32
+ help
+ Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
+ <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+
+config NET_VENDOR_3COM
+ bool "3COM cards"
+ depends on NET_ETHERNET && (ISA || EISA || MCA || PCI)
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about 3COM cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+config EL1
+ tristate "3c501 \"EtherLink\" support"
+ depends on NET_VENDOR_3COM && ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Also, consider buying a
+ new card, since the 3c501 is slow, broken, and obsolete: you will
+ have problems. Some people suggest to ping ("man ping") a nearby
+ machine every minute ("man cron") when using this card.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c501.
+
+config EL2
+ tristate "3c503 \"EtherLink II\" support"
+ depends on NET_VENDOR_3COM && ISA
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c503.
+
+config ELPLUS
+ tristate "3c505 \"EtherLink Plus\" support"
+ depends on NET_VENDOR_3COM && ISA
+ ---help---
+ Information about this network (Ethernet) card can be found in
+ <file:Documentation/networking/3c505.txt>. If you have a card of
+ this type, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c505.
+
+config EL16
+ tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)"
+ depends on NET_VENDOR_3COM && ISA && EXPERIMENTAL
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c507.
+
+config EL3
+ tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support"
+ depends on NET_VENDOR_3COM && (ISA || EISA || MCA)
+ ---help---
+ If you have a network (Ethernet) card belonging to the 3Com
+ EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
+ from <http://www.tldp.org/docs.html#howto>.
+
+ If your card is not working you may need to use the DOS
+ setup disk to disable Plug & Play mode, and to select the default
+ media type.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c509.
+
+config 3C515
+ tristate "3c515 ISA \"Fast EtherLink\""
+ depends on NET_VENDOR_3COM && (ISA || EISA)
+ help
+ If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
+ network card, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c515.
+
+config ELMC
+ tristate "3c523 \"EtherLink/MC\" support"
+ depends on NET_VENDOR_3COM && MCA_LEGACY
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c523.
+
+config ELMC_II
+ tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)"
+ depends on NET_VENDOR_3COM && MCA && MCA_LEGACY
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called 3c527.
+
+config VORTEX
+ tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
+ depends on NET_VENDOR_3COM && (PCI || EISA)
+ select MII
+ ---help---
+ This option enables driver support for a large number of 10mbps and
+ 10/100mbps EISA, PCI and PCMCIA 3Com network cards:
+
+ "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
+ "Boomerang" (EtherLink XL 3c900 or 3c905) PCI
+ "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus
+ "Tornado" (3c905) PCI
+ "Hurricane" (3c555/3cSOHO) PCI
+
+ If you have such a card, say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>. More
+ specific information is in
+ <file:Documentation/networking/vortex.txt> and in the comments at
+ the beginning of <file:drivers/net/3c59x.c>.
+
+ To compile this support as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>.
+
+config TYPHOON
+ tristate "3cr990 series \"Typhoon\" support"
+ depends on NET_VENDOR_3COM && PCI
+ select CRC32
+ ---help---
+ This option enables driver support for the 3cr990 series of cards:
+
+ 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97,
+ 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server,
+ 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR
+
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called typhoon.
+
+config LANCE
+ tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
+ depends on NET_ETHERNET && ISA
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
+ of this type.
+
+ To compile this driver as a module, choose M here: the module
+ will be called lance. This is recommended.
+
+config NET_VENDOR_SMC
+ bool "Western Digital/SMC cards"
+ depends on NET_ETHERNET && (ISA || MCA || EISA || MAC)
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Western Digital cards. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+config WD80x3
+ tristate "WD80*3 support"
+ depends on NET_VENDOR_SMC && ISA
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called wd.
+
+config ULTRAMCA
+ tristate "SMC Ultra MCA support"
+ depends on NET_VENDOR_SMC && MCA
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type and are running
+ an MCA based system (PS/2), say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called smc-mca.
+
+config ULTRA
+ tristate "SMC Ultra support"
+ depends on NET_VENDOR_SMC && ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Important: There have been many reports that, with some motherboards
+ mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible,
+ such as some BusLogic models) causes corruption problems with many
+ operating systems. The Linux smc-ultra driver has a work-around for
+ this but keep it in mind if you have such a SCSI card and have
+ problems.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called smc-ultra.
+
+config ULTRA32
+ tristate "SMC Ultra32 EISA support"
+ depends on NET_VENDOR_SMC && EISA
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called smc-ultra32.
+
+config SMC91X
+ tristate "SMC 91C9x/91C1xxx support"
+ select CRC32
+ select MII
+ depends on NET_ETHERNET && (ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH)
+ help
+ This is a driver for SMC's 91x series of Ethernet chipsets,
+ including the SMC91C94 and the SMC91C111. Say Y if you want it
+ compiled into the kernel, and read the file
+ <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
+ available from <http://www.linuxdoc.org/docs.html#howto>.
+
+ This driver is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called smc91x. If you want to compile it as a
+ module, say M here and read <file:Documentation/modules.txt> as well
+ as <file:Documentation/networking/net-modules.txt>.
+
+config SMC9194
+ tristate "SMC 9194 support"
+ depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
+ select CRC32
+ ---help---
+ This is support for the SMC9xxx based Ethernet cards. Choose this
+ option if you have a DELL laptop with the docking station, or
+ another SMC9192/9194 based chipset. Say Y if you want it compiled
+ into the kernel, and read the file
+ <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called smc9194.
+
+config NET_VENDOR_RACAL
+ bool "Racal-Interlan (Micom) NI cards"
+ depends on NET_ETHERNET && ISA
+ help
+ If you have a network (Ethernet) card belonging to this class, such
+ as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about NI cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+config NI5010
+ tristate "NI5010 support (EXPERIMENTAL)"
+ depends on NET_VENDOR_RACAL && ISA && EXPERIMENTAL && BROKEN_ON_SMP
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that this is still
+ experimental code.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ni5010.
+
+config NI52
+ tristate "NI5210 support"
+ depends on NET_VENDOR_RACAL && ISA
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ni52.
+
+config NI65
+ tristate "NI6510 support"
+ depends on NET_VENDOR_RACAL && ISA
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ni65.
+
+source "drivers/net/tulip/Kconfig"
+
+config AT1700
+ tristate "AT1700/1720 support (EXPERIMENTAL)"
+ depends on NET_ETHERNET && (ISA || MCA_LEGACY) && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called at1700.
+
+config DEPCA
+ tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
+ depends on NET_ETHERNET && (ISA || EISA || MCA)
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> as well as
+ <file:drivers/net/depca.c>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called depca.
+
+config HP100
+ tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
+ depends on NET_ETHERNET && (ISA || EISA || PCI)
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called hp100.
+
+config NET_ISA
+ bool "Other ISA cards"
+ depends on NET_ETHERNET && ISA
+ ---help---
+ If your network (Ethernet) card hasn't been mentioned yet and its
+ bus system (that's the way the cards talks to the other components
+ of your computer) is ISA (as opposed to EISA, VLB or PCI), say Y.
+ Make sure you know the name of your card. Read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ If unsure, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the remaining ISA network card questions. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+config E2100
+ tristate "Cabletron E21xx support"
+ depends on NET_ISA
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called e2100.
+
+config EWRK3
+ tristate "EtherWORKS 3 (DE203, DE204, DE205) support"
+ depends on NET_ISA
+ select CRC32
+ ---help---
+ This driver supports the DE203, DE204 and DE205 network (Ethernet)
+ cards. If this is for you, say Y and read
+ <file:Documentation/networking/ewrk3.txt> in the kernel source as
+ well as the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ewrk3.
+
+config EEXPRESS
+ tristate "EtherExpress 16 support"
+ depends on NET_ISA
+ ---help---
+ If you have an EtherExpress16 network (Ethernet) card, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that the Intel
+ EtherExpress16 card used to be regarded as a very poor choice
+ because the driver was very unreliable. We now have a new driver
+ that should do better.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called eexpress.
+
+config EEXPRESS_PRO
+ tristate "EtherExpressPro support/EtherExpress 10 (i82595) support"
+ depends on NET_ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y. This
+ driver supports intel i82595{FX,TX} based boards. Note however
+ that the EtherExpress PRO/100 Ethernet card has its own separate
+ driver. Please read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called eepro.
+
+config FMV18X
+ tristate "FMV-181/182/183/184 support (OBSOLETE)"
+ depends on NET_ISA && OBSOLETE
+ ---help---
+ If you have a Fujitsu FMV-181/182/183/184 network (Ethernet) card,
+ say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ If you use an FMV-183 or FMV-184 and it is not working, you may need
+ to disable Plug & Play mode of the card.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called fmv18x.
+
+config HPLAN_PLUS
+ tristate "HP PCLAN+ (27247B and 27252A) support"
+ depends on NET_ISA
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called hp-plus.
+
+config HPLAN
+ tristate "HP PCLAN (27245 and other 27xxx series) support"
+ depends on NET_ISA
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called hp.
+
+config LP486E
+ tristate "LP486E on board Ethernet"
+ depends on NET_ISA
+ help
+ Say Y here to support the 82596-based on-board Ethernet controller
+ for the Panther motherboard, which is one of the two shipped in the
+ Intel Professional Workstation.
+
+config ETH16I
+ tristate "ICL EtherTeam 16i/32 support"
+ depends on NET_ISA
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called eth16i.
+
+config NE2000
+ tristate "NE2000/NE1000 support"
+ depends on NET_ISA || (Q40 && m) || M32R
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Many Ethernet cards
+ without a specific driver are compatible with NE2000.
+
+ If you have a PCI NE2000 card however, say N here and Y to "PCI
+ NE2000 support", above. If you have a NE2000 card and are running on
+ an MCA system (a bus system used on some IBM PS/2 computers and
+ laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
+ below.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ne.
+
+config ZNET
+ tristate "Zenith Z-Note support (EXPERIMENTAL)"
+ depends on NET_ISA && EXPERIMENTAL
+ help
+ The Zenith Z-Note notebook computer has a built-in network
+ (Ethernet) card, and this is the Linux driver for it. Note that the
+ IBM Thinkpad 300 is compatible with the Z-Note and is also supported
+ by this driver. Read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config SEEQ8005
+ tristate "SEEQ8005 support (EXPERIMENTAL)"
+ depends on NET_ISA && EXPERIMENTAL
+ help
+ This is a driver for the SEEQ 8005 network (Ethernet) card. If this
+ is for you, read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called seeq8005.
+
+config SK_G16
+ tristate "SK_G16 support (OBSOLETE)"
+ depends on NET_ISA && OBSOLETE
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config SKMC
+ tristate "SKnet MCA support"
+ depends on NET_ETHERNET && MCA && BROKEN
+ ---help---
+ These are Micro Channel Ethernet adapters. You need to say Y to "MCA
+ support" in order to use this driver. Supported cards are the SKnet
+ Junior MC2 and the SKnet MC2(+). The driver automatically
+ distinguishes between the two cards. Note that using multiple boards
+ of different type hasn't been tested with this driver. Say Y if you
+ have one of these Ethernet adapters.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called sk_mca.
+
+config NE2_MCA
+ tristate "NE/2 (ne2000 MCA version) support"
+ depends on NET_ETHERNET && MCA_LEGACY
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ne2.
+
+config IBMLANA
+ tristate "IBM LAN Adapter/A support"
+ depends on NET_ETHERNET && MCA && MCA_LEGACY
+ ---help---
+ This is a Micro Channel Ethernet adapter. You need to set
+ CONFIG_MCA to use this driver. It is both available as an in-kernel
+ driver and as a module.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The only
+ currently supported card is the IBM LAN Adapter/A for Ethernet. It
+ will both support 16K and 32K memory windows, however a 32K window
+ gives a better security against packet losses. Usage of multiple
+ boards with this driver should be possible, but has not been tested
+ up to now due to lack of hardware.
+
+config IBMVETH
+ tristate "IBM LAN Virtual Ethernet support"
+ depends on NETDEVICES && NET_ETHERNET && PPC_PSERIES
+ ---help---
+ This driver supports virtual ethernet adapters on newer IBM iSeries
+ and pSeries systems.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called ibmveth.
+
+config IBM_EMAC
+ tristate "IBM PPC4xx EMAC driver support"
+ depends on 4xx
+ select CRC32
+ ---help---
+ This driver supports the IBM PPC4xx EMAC family of on-chip
+ Ethernet controllers.
+
+config IBM_EMAC_ERRMSG
+ bool "Verbose error messages"
+ depends on IBM_EMAC
+
+config IBM_EMAC_RXB
+ int "Number of receive buffers"
+ depends on IBM_EMAC
+ default "128" if IBM_EMAC4
+ default "64"
+
+config IBM_EMAC_TXB
+ int "Number of transmit buffers"
+ depends on IBM_EMAC
+ default "128" if IBM_EMAC4
+ default "8"
+
+config IBM_EMAC_FGAP
+ int "Frame gap"
+ depends on IBM_EMAC
+ default "8"
+
+config IBM_EMAC_SKBRES
+ int "Skb reserve amount"
+ depends on IBM_EMAC
+ default "0"
+
+config NET_PCI
+ bool "EISA, VLB, PCI and on board controllers"
+ depends on NET_ETHERNET && (ISA || EISA || PCI)
+ help
+ This is another class of network cards which attach directly to the
+ bus. If you have one of those, say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about this class of network cards. If you say Y, you
+ will be asked for your specific card in the following questions. If
+ you are unsure, say Y.
+
+config PCNET32
+ tristate "AMD PCnet32 PCI support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ help
+ If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called pcnet32.
+
+config AMD8111_ETH
+ tristate "AMD 8111 (new PCI lance) support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ help
+ If you have an AMD 8111-based PCI lance ethernet card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called amd8111e.
+config AMD8111E_NAPI
+ bool "Enable NAPI support"
+ depends on AMD8111_ETH
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
+
+config ADAPTEC_STARFIRE
+ tristate "Adaptec Starfire/DuraLAN support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ help
+ Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network
+ adapter. The DuraLAN chip is used on the 64 bit PCI boards from
+ Adaptec e.g. the ANA-6922A. The older 32 bit boards use the tulip
+ driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called starfire. This is recommended.
+
+config ADAPTEC_STARFIRE_NAPI
+ bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
+ depends on ADAPTEC_STARFIRE && EXPERIMENTAL
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
+
+config AC3200
+ tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)"
+ depends on NET_PCI && (ISA || EISA) && EXPERIMENTAL
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ac3200.
+
+config APRICOT
+ tristate "Apricot Xen-II on board Ethernet"
+ depends on NET_PCI && ISA
+ help
+ If you have a network (Ethernet) controller of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will be
+ called apricot.
+
+config B44
+ tristate "Broadcom 4400 ethernet support (EXPERIMENTAL)"
+ depends on NET_PCI && PCI && EXPERIMENTAL
+ select MII
+ help
+ If you have a network (Ethernet) controller of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will be
+ called b44.
+
+config FORCEDETH
+ tristate "Reverse Engineered nForce Ethernet support (EXPERIMENTAL)"
+ depends on NET_PCI && PCI && EXPERIMENTAL
+ help
+ If you have a network (Ethernet) controller of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will be
+ called forcedeth.
+
+
+config CS89x0
+ tristate "CS89x0 support"
+ depends on NET_PCI && (ISA || ARCH_IXDP2X01)
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ network (Ethernet) card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> as well as
+ <file:Documentation/networking/cs89x0.txt>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will be
+ called cs89x.
+
+config TC35815
+ tristate "TOSHIBA TC35815 Ethernet support"
+ depends on NET_PCI && PCI && TOSHIBA_JMR3927
+
+config DGRS
+ tristate "Digi Intl. RightSwitch SE-X support"
+ depends on NET_PCI && (PCI || EISA)
+ ---help---
+ This is support for the Digi International RightSwitch series of
+ PCI/EISA Ethernet switch cards. These include the SE-4 and the SE-6
+ models. If you have a network card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. More specific
+ information is contained in <file:Documentation/networking/dgrs.txt>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called dgrs.
+
+config EEPRO100
+ tristate "EtherExpressPro/100 support (eepro100, original Becker driver)"
+ depends on NET_PCI && PCI
+ select MII
+ help
+ If you have an Intel EtherExpress PRO/100 PCI network (Ethernet)
+ card, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called eepro100.
+
+
+config E100
+ tristate "Intel(R) PRO/100+ support"
+ depends on NET_PCI && PCI
+ select MII
+ ---help---
+ This driver supports Intel(R) PRO/100 family of adapters.
+ To verify that your adapter is supported, find the board ID number
+ on the adapter. Look for a label that has a barcode and a number
+ in the format 123456-001 (six digits hyphen three digits).
+
+ Use the above information and the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ to identify the adapter.
+
+ For the latest Intel PRO/100 network driver for Linux, see:
+
+ <http://appsr.intel.com/scripts-df/support_intel.asp>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e100.txt>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called e100.
+
+config LNE390
+ tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)"
+ depends on NET_PCI && EISA && EXPERIMENTAL
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called lne390.
+
+config FEALNX
+ tristate "Myson MTD-8xx PCI Ethernet support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ help
+ Say Y here to support the Mysom MTD-800 family of PCI-based Ethernet
+ cards. Specifications and data at
+ <http://www.myson.com.hk/mtd/datasheet/>.
+
+config NATSEMI
+ tristate "National Semiconductor DP8381x series PCI Ethernet support"
+ depends on NET_PCI && PCI
+ select CRC32
+ help
+ This driver is for the National Semiconductor DP83810 series,
+ which is used in cards from PureData, NetGear, Linksys
+ and others, including the 83815 chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/natsemi.html>.
+
+config NE2K_PCI
+ tristate "PCI NE2000 and clones support (see help)"
+ depends on NET_PCI && PCI
+ select CRC32
+ ---help---
+ This driver is for NE2000 compatible PCI cards. It will not work
+ with ISA NE2000 cards (they have their own driver, "NE2000/NE1000
+ support" below). If you have a PCI NE2000 network (Ethernet) card,
+ say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ This driver also works for the following NE2000 clone cards:
+ RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2
+ NetVin NV5000SC Via 86C926 SureCom NE34 Winbond
+ Holtek HT80232 Holtek HT80229
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ne2k-pci.
+
+config NE3210
+ tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)"
+ depends on NET_PCI && EISA && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that this driver
+ will NOT WORK for NE3200 cards as they are completely different.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ne3210.
+
+config ES3210
+ tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)"
+ depends on NET_PCI && EISA && EXPERIMENTAL
+ select CRC32
+ help
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called es3210.
+
+config 8139CP
+ tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
+ depends on NET_PCI && PCI && EXPERIMENTAL
+ select CRC32
+ select MII
+ help
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the RTL8139C+ chips. If you have one of those, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8139cp. This is recommended.
+
+config 8139TOO
+ tristate "RealTek RTL-8139 PCI Fast Ethernet Adapter support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the RTL8139 chips. If you have one of those, say Y and read
+ the Ethernet-HOWTO <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8139too. This is recommended.
+
+config 8139TOO_PIO
+ bool "Use PIO instead of MMIO"
+ default y
+ depends on 8139TOO
+ help
+ This instructs the driver to use programmed I/O ports (PIO) instead
+ of PCI shared memory (MMIO). This can possibly solve some problems
+ in case your mainboard has memory consistency issues. If unsure,
+ say N.
+
+config 8139TOO_TUNE_TWISTER
+ bool "Support for uncommon RTL-8139 rev. K (automatic channel equalization)"
+ depends on 8139TOO
+ help
+ This implements a function which might come in handy in case you
+ are using low quality on long cabling. It is required for RealTek
+ RTL-8139 revision K boards, and totally unused otherwise. It tries
+ to match the transceiver to the cable characteristics. This is
+ experimental since hardly documented by the manufacturer.
+ If unsure, say Y.
+
+config 8139TOO_8129
+ bool "Support for older RTL-8129/8130 boards"
+ depends on 8139TOO
+ help
+ This enables support for the older and uncommon RTL-8129 and
+ RTL-8130 chips, which support MII via an external transceiver,
+ instead of an internal one. Disabling this option will save some
+ memory by making the code size smaller. If unsure, say Y.
+
+config 8139_OLD_RX_RESET
+ bool "Use older RX-reset method"
+ depends on 8139TOO
+ help
+ The 8139too driver was recently updated to contain a more rapid
+ reset sequence, in the face of severe receive errors. This "new"
+ RX-reset method should be adequate for all boards. But if you
+ experience problems, you can enable this option to restore the
+ old RX-reset behavior. If unsure, say N.
+
+config SIS900
+ tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
+ depends on NET_PCI && PCI
+ select CRC32
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
+ SiS 630 and SiS 540 chipsets. If you have one of those, say Y and
+ read the Ethernet-HOWTO, available at
+ <http://www.tldp.org/docs.html#howto>. Please read
+ <file:Documentation/networking/sis900.txt> and comments at the
+ beginning of <file:drivers/net/sis900.c> for more information.
+
+ This driver also supports AMD 79C901 HomePNA so that you can use
+ your phone line as a network cable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sis900. This is recommended.
+
+config EPIC100
+ tristate "SMC EtherPower II"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ help
+ This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC,
+ which is based on the SMC83c17x (EPIC/100).
+ More specific information and updates are available from
+ <http://www.scyld.com/network/epic100.html>.
+
+config SUNDANCE
+ tristate "Sundance Alta support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ help
+ This driver is for the Sundance "Alta" chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/sundance.html>.
+
+config SUNDANCE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on SUNDANCE
+ help
+ Enable memory-mapped I/O for interaction with Sundance NIC registers.
+ Do NOT enable this by default, PIO (enabled when MMIO is disabled)
+ is known to solve bugs on certain chips.
+
+ If unsure, say N.
+
+config TLAN
+ tristate "TI ThunderLAN support"
+ depends on NET_PCI && (PCI || EISA) && !64BIT
+ ---help---
+ If you have a PCI Ethernet network card based on the ThunderLAN chip
+ which is supported by this driver, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Devices currently supported by this driver are Compaq Netelligent,
+ Compaq NetFlex and Olicom cards. Please read the file
+ <file:Documentation/networking/tlan.txt> for more details.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called tlan.
+
+ Please email feedback to <torben.mathiasen@compaq.com>.
+
+config VIA_RHINE
+ tristate "VIA Rhine support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select MII
+ help
+ If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A),
+ Rhine-II (VT6102), or Rhine-III (VT6105)), say Y here. Rhine-type
+ Ethernet functions can also be found integrated on South Bridges
+ (e.g. VT8235).
+
+ To compile this driver as a module, choose M here. The module
+ will be called via-rhine.
+
+config VIA_RHINE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on VIA_RHINE
+ help
+ This instructs the driver to use PCI shared memory (MMIO) instead of
+ programmed I/O ports (PIO). Enabling this gives an improvement in
+ processing time in parts of the driver.
+
+ If unsure, say Y.
+
+config LAN_SAA9730
+ bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
+ depends on NET_PCI && EXPERIMENTAL && MIPS
+ help
+ The SAA9730 is a combined multimedia and peripheral controller used
+ in thin clients, Internet access terminals, and diskless
+ workstations.
+ See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
+
+config NET_POCKET
+ bool "Pocket and portable adapters"
+ depends on NET_ETHERNET && ISA
+ ---help---
+ Cute little network (Ethernet) devices which attach to the parallel
+ port ("pocket adapters"), commonly used with laptops. If you have
+ one of those, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ If you want to plug a network (or some other) card into the PCMCIA
+ (or PC-card) slot of your laptop instead (PCMCIA is the standard for
+ credit card size extension cards used by all modern laptops), you
+ need the pcmcia-cs package (location contained in the file
+ <file:Documentation/Changes>) and you can say N here.
+
+ Laptop users should read the Linux Laptop home page at
+ <http://www.linux-on-laptops.com/> or
+ Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about this class of network devices. If you say Y, you
+ will be asked for your specific device in the following questions.
+
+config ATP
+ tristate "AT-LAN-TEC/RealTek pocket adapter support"
+ depends on NET_POCKET && ISA && X86
+ select CRC32
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>, if you
+ want to use this. If you intend to use this driver, you should have
+ said N to the "Parallel printer support", because the two drivers
+ don't like each other.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atp.
+
+config DE600
+ tristate "D-Link DE600 pocket adapter support"
+ depends on NET_POCKET && ISA
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:Documentation/networking/DLINK.txt> as well as the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, if you want to use
+ this. It is possible to have several devices share a single parallel
+ port and it is safe to compile the corresponding drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called de600.
+
+config DE620
+ tristate "D-Link DE620 pocket adapter support"
+ depends on NET_POCKET && ISA
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:Documentation/networking/DLINK.txt> as well as the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, if you want to use
+ this. It is possible to have several devices share a single parallel
+ port and it is safe to compile the corresponding drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called de620.
+
+config SGISEEQ
+ tristate "SGI Seeq ethernet controller support"
+ depends on NET_ETHERNET && SGI_IP22
+ help
+ Say Y here if you have an Seeq based Ethernet network card. This is
+ used in many Silicon Graphics machines.
+
+config DECLANCE
+ tristate "DEC LANCE ethernet controller support"
+ depends on NET_ETHERNET && MACH_DECSTATION
+ select CRC32
+ help
+ This driver is for the series of Ethernet controllers produced by
+ DEC (now Compaq) based on the AMD Lance chipset, including the
+ DEPCA series. (This chipset is better known via the NE2100 cards.)
+
+config 68360_ENET
+ bool "Motorola 68360 ethernet controller"
+ depends on M68360
+ help
+ Say Y here if you want to use the built-in ethernet controller of
+ the Motorola 68360 processor.
+
+config FEC
+ bool "FEC ethernet controller (of ColdFire 5272)"
+ depends on M5272 || M5282
+ help
+ Say Y here if you want to use the built-in 10/100 Fast ethernet
+ controller on the Motorola ColdFire 5272 processor.
+
+config NE_H8300
+ tristate "NE2000 compatible support for H8/300"
+ depends on H8300 && NET_ETHERNET
+ help
+ Say Y here if you want to use the NE2000 compatible
+ controller on the Renesas H8/300 processor.
+
+source "drivers/net/fec_8xx/Kconfig"
+
+endmenu
+
+#
+# Gigabit Ethernet
+#
+
+menu "Ethernet (1000 Mbit)"
+ depends on NETDEVICES && !UML
+
+config ACENIC
+ tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support"
+ depends on PCI
+ ---help---
+ Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear
+ GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet
+ adapter. The driver allows for using the Jumbo Frame option (9000
+ bytes/frame) however it requires that your switches can handle this
+ as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig
+ line.
+
+ To compile this driver as a module, choose M here: the
+ module will be called acenic.
+
+config ACENIC_OMIT_TIGON_I
+ bool "Omit support for old Tigon I based AceNICs"
+ depends on ACENIC
+ help
+ Say Y here if you only have Tigon II based AceNICs and want to leave
+ out support for the older Tigon I based cards which are no longer
+ being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B
+ version)). This will reduce the size of the driver object by
+ app. 100KB. If you are not sure whether your card is a Tigon I or a
+ Tigon II, say N here.
+
+ The safe and default value for this is N.
+
+config DL2K
+ tristate "D-Link DL2000-based Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ help
+ This driver supports D-Link 2000-based gigabit ethernet cards, which
+ includes
+ D-Link DGE-550T Gigabit Ethernet Adapter.
+ D-Link DL2000-based Gigabit Ethernet Adapter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dl2k.
+
+config E1000
+ tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) PRO/1000 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called e1000.
+
+config E1000_NAPI
+ bool "Use Rx Polling (NAPI)"
+ depends on E1000
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
+
+config MYRI_SBUS
+ tristate "MyriCOM Gigabit Ethernet support"
+ depends on SBUS
+ help
+ This driver supports MyriCOM Sbus gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called myri_sbus. This is recommended.
+
+config NS83820
+ tristate "National Semiconduct DP83820 support"
+ depends on PCI
+ help
+ This is a driver for the National Semiconductor DP83820 series
+ of gigabit ethernet MACs. Cards using this chipset include
+ the D-Link DGE-500T, PureData's PDP8023Z-TG, SMC's SMC9462TX,
+ SOHO-GA2000T, SOHO-GA2500T. The driver supports the use of
+ zero copy.
+
+config HAMACHI
+ tristate "Packet Engines Hamachi GNIC-II support"
+ depends on PCI
+ select MII
+ help
+ If you have a Gigabit Ethernet card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will be
+ called hamachi.
+
+config YELLOWFIN
+ tristate "Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet
+ adapter or the SYM53C885 Ethernet controller. The Gigabit adapter is
+ used by the Beowulf Linux cluster project. See
+ <http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html> for more
+ information about this driver in particular and Beowulf in general.
+
+ To compile this driver as a module, choose M here: the module
+ will be called yellowfin. This is recommended.
+
+config R8169
+ tristate "Realtek 8169 gigabit ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
+
+ To compile this driver as a module, choose M here: the module
+ will be called r8169. This is recommended.
+
+config R8169_NAPI
+ bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)"
+ depends on R8169 && EXPERIMENTAL
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
+
+config R8169_VLAN
+ bool "VLAN support"
+ depends on R8169 && VLAN_8021Q
+ ---help---
+ Say Y here for the r8169 driver to support the functions required
+ by the kernel 802.1Q code.
+
+ If in doubt, say Y.
+
+config SK98LIN
+ tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
+ depends on PCI
+ ---help---
+ Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx
+ compliant Gigabit Ethernet Adapter. The following adapters are supported
+ by this driver:
+ - 3Com 3C940 Gigabit LOM Ethernet Adapter
+ - 3Com 3C941 Gigabit LOM Ethernet Adapter
+ - Allied Telesyn AT-2970LX Gigabit Ethernet Adapter
+ - Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter
+ - Allied Telesyn AT-2970SX Gigabit Ethernet Adapter
+ - Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter
+ - Allied Telesyn AT-2970TX Gigabit Ethernet Adapter
+ - Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter
+ - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter
+ - Allied Telesyn AT-2971T Gigabit Ethernet Adapter
+ - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45
+ - DGE-530T Gigabit Ethernet Adapter
+ - EG1032 v2 Instant Gigabit Network Adapter
+ - EG1064 v2 Instant Gigabit Network Adapter
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit)
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Albatron)
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Asus)
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (ECS)
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Epox)
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Foxconn)
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Gigabyte)
+ - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Iwill)
+ - Marvell 88E8050 Gigabit LOM Ethernet Adapter (Intel)
+ - Marvell RDK-8001 Adapter
+ - Marvell RDK-8002 Adapter
+ - Marvell RDK-8003 Adapter
+ - Marvell RDK-8004 Adapter
+ - Marvell RDK-8006 Adapter
+ - Marvell RDK-8007 Adapter
+ - Marvell RDK-8008 Adapter
+ - Marvell RDK-8009 Adapter
+ - Marvell RDK-8010 Adapter
+ - Marvell RDK-8011 Adapter
+ - Marvell RDK-8012 Adapter
+ - Marvell RDK-8052 Adapter
+ - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (32 bit)
+ - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (64 bit)
+ - N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L)
+ - SK-9521 10/100/1000Base-T Adapter
+ - SK-9521 V2.0 10/100/1000Base-T Adapter
+ - SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T)
+ - SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter
+ - SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link)
+ - SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX)
+ - SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter
+ - SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link)
+ - SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX)
+ - SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter
+ - SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link)
+ - SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter
+ - SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition)
+ - SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter
+ - SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link)
+ - SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX)
+ - SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter
+ - SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link)
+ - SMC EZ Card 1000 (SMC9452TXV.2)
+
+ The adapters support Jumbo Frames.
+ The dual link adapters support link-failover and dual port features.
+ Both Marvell Yukon and SysKonnect SK-98xx/SK-95xx adapters support
+ the scatter-gather functionality with sendfile(). Please refer to
+ <file:Documentation/networking/sk98lin.txt> for more information about
+ optional driver parameters.
+ Questions concerning this driver may be addressed to:
+ <linux@syskonnect.de>
+
+ If you want to compile this driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>. The module will
+ be called sk98lin. This is recommended.
+
+config VIA_VELOCITY
+ tristate "VIA Velocity support"
+ depends on NET_PCI && PCI
+ select CRC32
+ select CRC_CCITT
+ select MII
+ help
+ If you have a VIA "Velocity" based network card say Y here.
+
+ To compile this driver as a module, choose M here. The module
+ will be called via-velocity.
+
+config TIGON3
+ tristate "Broadcom Tigon3 support"
+ depends on PCI
+ help
+ This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tg3. This is recommended.
+
+config GIANFAR
+ tristate "Gianfar Ethernet"
+ depends on 85xx || 83xx
+ help
+ This driver supports the Gigabit TSEC on the MPC85xx
+ family of chips, and the FEC on the 8540
+
+config GFAR_NAPI
+ bool "NAPI Support"
+ depends on GIANFAR
+
+config MV643XX_ETH
+ tristate "MV-643XX Ethernet support"
+ depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3
+ help
+ This driver supports the gigabit Ethernet on the Marvell MV643XX
+ chipset which is used in the Momenco Ocelot C and Jaguar ATX and
+ Pegasos II, amongst other PPC and MIPS boards.
+
+config MV643XX_ETH_0
+ bool "MV-643XX Port 0"
+ depends on MV643XX_ETH
+ help
+ This enables support for Port 0 of the Marvell MV643XX Gigabit
+ Ethernet.
+
+config MV643XX_ETH_1
+ bool "MV-643XX Port 1"
+ depends on MV643XX_ETH
+ help
+ This enables support for Port 1 of the Marvell MV643XX Gigabit
+ Ethernet.
+
+config MV643XX_ETH_2
+ bool "MV-643XX Port 2"
+ depends on MV643XX_ETH
+ help
+ This enables support for Port 2 of the Marvell MV643XX Gigabit
+ Ethernet.
+
+endmenu
+
+#
+# 10 Gigabit Ethernet
+#
+
+menu "Ethernet (10000 Mbit)"
+ depends on NETDEVICES && !UML
+
+config IXGB
+ tristate "Intel(R) PRO/10GbE support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) PRO/10GbE family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgb.txt>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called ixgb.
+
+config IXGB_NAPI
+ bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
+ depends on IXGB && EXPERIMENTAL
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
+
+config S2IO
+ tristate "S2IO 10Gbe XFrame NIC"
+ depends on PCI
+ ---help---
+ This driver supports the 10Gbe XFrame NIC of S2IO.
+ For help regarding driver compilation, installation and
+ tuning please look into ~/drivers/net/s2io/README.txt.
+
+config S2IO_NAPI
+ bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
+ depends on S2IO && EXPERIMENTAL
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
+
+config 2BUFF_MODE
+ bool "Use 2 Buffer Mode on Rx side."
+ depends on S2IO
+ ---help---
+ On enabling the 2 buffer mode, the received frame will be
+ split into 2 parts before being DMA'ed to the hosts memory.
+ The parts are the ethernet header and ethernet payload.
+ This is useful on systems where DMA'ing to to unaligned
+ physical memory loactions comes with a heavy price.
+ If not sure please say N.
+
+endmenu
+
+if !UML
+source "drivers/net/tokenring/Kconfig"
+
+source "drivers/net/wireless/Kconfig"
+
+source "drivers/net/pcmcia/Kconfig"
+endif
+
+source "drivers/net/wan/Kconfig"
+
+source "drivers/atm/Kconfig"
+
+source "drivers/s390/net/Kconfig"
+
+config ISERIES_VETH
+ tristate "iSeries Virtual Ethernet driver support"
+ depends on NETDEVICES && PPC_ISERIES
+
+config FDDI
+ bool "FDDI driver support"
+ depends on NETDEVICES && (PCI || EISA)
+ help
+ Fiber Distributed Data Interface is a high speed local area network
+ design; essentially a replacement for high speed Ethernet. FDDI can
+ run over copper or fiber. If you are connected to such a network and
+ want a driver for the FDDI card in your computer, say Y here (and
+ then also Y to the driver for your FDDI card, below). Most people
+ will say N.
+
+config DEFXX
+ tristate "Digital DEFEA and DEFPA adapter support"
+ depends on FDDI && (PCI || EISA)
+ help
+ This is support for the DIGITAL series of EISA (DEFEA) and PCI
+ (DEFPA) controllers which can connect you to a local FDDI network.
+
+config SKFP
+ tristate "SysKonnect FDDI PCI support"
+ depends on FDDI && PCI
+ ---help---
+ Say Y here if you have a SysKonnect FDDI PCI adapter.
+ The following adapters are supported by this driver:
+ - SK-5521 (SK-NET FDDI-UP)
+ - SK-5522 (SK-NET FDDI-UP DAS)
+ - SK-5541 (SK-NET FDDI-FP)
+ - SK-5543 (SK-NET FDDI-LP)
+ - SK-5544 (SK-NET FDDI-LP DAS)
+ - SK-5821 (SK-NET FDDI-UP64)
+ - SK-5822 (SK-NET FDDI-UP64 DAS)
+ - SK-5841 (SK-NET FDDI-FP64)
+ - SK-5843 (SK-NET FDDI-LP64)
+ - SK-5844 (SK-NET FDDI-LP64 DAS)
+ - Netelligent 100 FDDI DAS Fibre SC
+ - Netelligent 100 FDDI SAS Fibre SC
+ - Netelligent 100 FDDI DAS UTP
+ - Netelligent 100 FDDI SAS UTP
+ - Netelligent 100 FDDI SAS Fibre MIC
+
+ Read <file:Documentation/networking/skfp.txt> for information about
+ the driver.
+
+ Questions concerning this driver can be addressed to:
+ <linux@syskonnect.de>
+
+ To compile this driver as a module, choose M here: the module
+ will be called skfp. This is recommended.
+
+config HIPPI
+ bool "HIPPI driver support (EXPERIMENTAL)"
+ depends on NETDEVICES && EXPERIMENTAL && INET && PCI
+ help
+ HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
+ 1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI
+ can run over copper (25m) or fiber (300m on multi-mode or 10km on
+ single-mode). HIPPI networks are commonly used for clusters and to
+ connect to super computers. If you are connected to a HIPPI network
+ and have a HIPPI network card in your computer that you want to use
+ under Linux, say Y here (you must also remember to enable the driver
+ for your HIPPI card below). Most people will say N here.
+
+config ROADRUNNER
+ tristate "Essential RoadRunner HIPPI PCI adapter support (EXPERIMENTAL)"
+ depends on HIPPI && PCI
+ help
+ Say Y here if this is your PCI HIPPI network card.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rrunner. If unsure, say N.
+
+config ROADRUNNER_LARGE_RINGS
+ bool "Use large TX/RX rings (EXPERIMENTAL)"
+ depends on ROADRUNNER
+ help
+ If you say Y here, the RoadRunner driver will preallocate up to 2 MB
+ of additional memory to allow for fastest operation, both for
+ transmitting and receiving. This memory cannot be used by any other
+ kernel code or by user space programs. Say Y here only if you have
+ the memory.
+
+config PLIP
+ tristate "PLIP (parallel port) support"
+ depends on NETDEVICES && PARPORT
+ ---help---
+ PLIP (Parallel Line Internet Protocol) is used to create a
+ reasonably fast mini network consisting of two (or, rarely, more)
+ local machines. A PLIP link from a Linux box is a popular means to
+ install a Linux distribution on a machine which doesn't have a
+ CD-ROM drive (a minimal system has to be transferred with floppies
+ first). The kernels on both machines need to have this PLIP option
+ enabled for this to work.
+
+ The PLIP driver has two modes, mode 0 and mode 1. The parallel
+ ports (the connectors at the computers with 25 holes) are connected
+ with "null printer" or "Turbo Laplink" cables which can transmit 4
+ bits at a time (mode 0) or with special PLIP cables, to be used on
+ bidirectional parallel ports only, which can transmit 8 bits at a
+ time (mode 1); you can find the wiring of these cables in
+ <file:Documentation/networking/PLIP.txt>. The cables can be up to
+ 15m long. Mode 0 works also if one of the machines runs DOS/Windows
+ and has some PLIP software installed, e.g. the Crynwr PLIP packet
+ driver (<http://oak.oakland.edu/simtel.net/msdos/pktdrvr-pre.html>)
+ and winsock or NCSA's telnet.
+
+ If you want to use PLIP, say Y and read the PLIP mini-HOWTO as well
+ as the NET-3-HOWTO, both available from
+ <http://www.tldp.org/docs.html#howto>. Note that the PLIP
+ protocol has been changed and this PLIP driver won't work together
+ with the PLIP support in Linux versions 1.0.x. This option enlarges
+ your kernel by about 8 KB.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will be
+ called plip. If unsure, say Y or M, in case you buy a laptop
+ later.
+
+config PPP
+ tristate "PPP (point-to-point protocol) support"
+ depends on NETDEVICES
+ ---help---
+ PPP (Point to Point Protocol) is a newer and better SLIP. It serves
+ the same purpose: sending Internet traffic over telephone (and other
+ serial) lines. Ask your access provider if they support it, because
+ otherwise you can't use it; most Internet access providers these
+ days support PPP rather than SLIP.
+
+ To use PPP, you need an additional program called pppd as described
+ in the PPP-HOWTO, available at
+ <http://www.tldp.org/docs.html#howto>. Make sure that you have
+ the version of pppd recommended in <file:Documentation/Changes>.
+ The PPP option enlarges your kernel by about 16 KB.
+
+ There are actually two versions of PPP: the traditional PPP for
+ asynchronous lines, such as regular analog phone lines, and
+ synchronous PPP which can be used over digital ISDN lines for
+ example. If you want to use PPP over phone lines or other
+ asynchronous serial lines, you need to say Y (or M) here and also to
+ the next option, "PPP support for async serial ports". For PPP over
+ synchronous lines, you should say Y (or M) here and to "Support
+ synchronous PPP", below.
+
+ If you said Y to "Version information on all symbols" above, then
+ you cannot compile the PPP driver into the kernel; you can then only
+ compile it as a module. To compile this driver as a module, choose M
+ here and read <file:Documentation/networking/net-modules.txt>.
+ The module will be called ppp_generic.
+
+config PPP_MULTILINK
+ bool "PPP multilink support (EXPERIMENTAL)"
+ depends on PPP && EXPERIMENTAL
+ help
+ PPP multilink is a protocol (defined in RFC 1990) which allows you
+ to combine several (logical or physical) lines into one logical PPP
+ connection, so that you can utilize your full bandwidth.
+
+ This has to be supported at the other end as well and you need a
+ version of the pppd daemon which understands the multilink protocol.
+
+ If unsure, say N.
+
+config PPP_FILTER
+ bool "PPP filtering"
+ depends on PPP
+ help
+ Say Y here if you want to be able to filter the packets passing over
+ PPP interfaces. This allows you to control which packets count as
+ activity (i.e. which packets will reset the idle timer or bring up
+ a demand-dialled link) and which packets are to be dropped entirely.
+ You need to say Y here if you wish to use the pass-filter and
+ active-filter options to pppd.
+
+ If unsure, say N.
+
+config PPP_ASYNC
+ tristate "PPP support for async serial ports"
+ depends on PPP
+ select CRC_CCITT
+ ---help---
+ Say Y (or M) here if you want to be able to use PPP over standard
+ asynchronous serial ports, such as COM1 or COM2 on a PC. If you use
+ a modem (not a synchronous or ISDN modem) to contact your ISP, you
+ need this option.
+
+ To compile this driver as a module, choose M here.
+
+ If unsure, say Y.
+
+config PPP_SYNC_TTY
+ tristate "PPP support for sync tty ports"
+ depends on PPP
+ help
+ Say Y (or M) here if you want to be able to use PPP over synchronous
+ (HDLC) tty devices, such as the SyncLink adapter. These devices
+ are often used for high-speed leased lines like T1/E1.
+
+ To compile this driver as a module, choose M here.
+
+config PPP_DEFLATE
+ tristate "PPP Deflate compression"
+ depends on PPP
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ ---help---
+ Support for the Deflate compression method for PPP, which uses the
+ Deflate algorithm (the same algorithm that gzip uses) to compress
+ each PPP packet before it is sent over the wire. The machine at the
+ other end of the PPP link (usually your ISP) has to support the
+ Deflate compression method as well for this to be useful. Even if
+ they don't support it, it is safe to say Y here.
+
+ To compile this driver as a module, choose M here.
+
+config PPP_BSDCOMP
+ tristate "PPP BSD-Compress compression"
+ depends on PPP
+ ---help---
+ Support for the BSD-Compress compression method for PPP, which uses
+ the LZW compression method to compress each PPP packet before it is
+ sent over the wire. The machine at the other end of the PPP link
+ (usually your ISP) has to support the BSD-Compress compression
+ method as well for this to be useful. Even if they don't support it,
+ it is safe to say Y here.
+
+ The PPP Deflate compression method ("PPP Deflate compression",
+ above) is preferable to BSD-Compress, because it compresses better
+ and is patent-free.
+
+ Note that the BSD compression code will always be compiled as a
+ module; it is called bsd_comp and will show up in the directory
+ modules once you have said "make modules". If unsure, say N.
+
+config PPPOE
+ tristate "PPP over Ethernet (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PPP
+ help
+ Support for PPP over Ethernet.
+
+ This driver requires the latest version of pppd from the CVS
+ repository at cvs.samba.org. Alternatively, see the
+ RoaringPenguin package (<http://www.roaringpenguin.com/pppoe>)
+ which contains instruction on how to use this driver (under
+ the heading "Kernel mode PPPoE").
+
+config PPPOATM
+ tristate "PPP over ATM"
+ depends on ATM && PPP
+ help
+ Support PPP (Point to Point Protocol) encapsulated in ATM frames.
+ This implementation does not yet comply with section 8 of RFC2364,
+ which can lead to bad results if the ATM peer loses state and
+ changes its encapsulation unilaterally.
+
+config SLIP
+ tristate "SLIP (serial line) support"
+ depends on NETDEVICES
+ ---help---
+ Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to
+ connect to your Internet service provider or to connect to some
+ other local Unix box or if you want to configure your Linux box as a
+ Slip/CSlip server for other people to dial in. SLIP (Serial Line
+ Internet Protocol) is a protocol used to send Internet traffic over
+ serial connections such as telephone lines or null modem cables;
+ nowadays, the protocol PPP is more commonly used for this same
+ purpose.
+
+ Normally, your access provider has to support SLIP in order for you
+ to be able to use it, but there is now a SLIP emulator called SLiRP
+ around (available from
+ <ftp://ibiblio.org/pub/Linux/system/network/serial/>) which
+ allows you to use SLIP over a regular dial up shell connection. If
+ you plan to use SLiRP, make sure to say Y to CSLIP, below. The
+ NET-3-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, explains how to
+ configure SLIP. Note that you don't need this option if you just
+ want to run term (term is a program which gives you almost full
+ Internet connectivity if you have a regular dial up shell account on
+ some Internet connected Unix computer. Read
+ <http://www.bart.nl/~patrickr/term-howto/Term-HOWTO.html>). SLIP
+ support will enlarge your kernel by about 4 KB. If unsure, say N.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will be
+ called slip.
+
+config SLIP_COMPRESSED
+ bool "CSLIP compressed headers"
+ depends on SLIP
+ ---help---
+ This protocol is faster than SLIP because it uses compression on the
+ TCP/IP headers (not on the data itself), but it has to be supported
+ on both ends. Ask your access provider if you are not sure and
+ answer Y, just in case. You will still be able to use plain SLIP. If
+ you plan to use SLiRP, the SLIP emulator (available from
+ <ftp://ibiblio.org/pub/Linux/system/network/serial/>) which
+ allows you to use SLIP over a regular dial up shell connection, you
+ definitely want to say Y here. The NET-3-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, explains how to configure
+ CSLIP. This won't enlarge your kernel.
+
+config SLIP_SMART
+ bool "Keepalive and linefill"
+ depends on SLIP
+ help
+ Adds additional capabilities to the SLIP driver to support the
+ RELCOM line fill and keepalive monitoring. Ideal on poor quality
+ analogue lines.
+
+config SLIP_MODE_SLIP6
+ bool "Six bit SLIP encapsulation"
+ depends on SLIP
+ help
+ Just occasionally you may need to run IP over hostile serial
+ networks that don't pass all control characters or are only seven
+ bit. Saying Y here adds an extra mode you can use with SLIP:
+ "slip6". In this mode, SLIP will only send normal ASCII symbols over
+ the serial device. Naturally, this has to be supported at the other
+ end of the link as well. It's good enough, for example, to run IP
+ over the async ports of a Camtec JNT Pad. If unsure, say N.
+
+config NET_FC
+ bool "Fibre Channel driver support"
+ depends on NETDEVICES && SCSI && PCI
+ help
+ Fibre Channel is a high speed serial protocol mainly used to connect
+ large storage devices to the computer; it is compatible with and
+ intended to replace SCSI.
+
+ If you intend to use Fibre Channel, you need to have a Fibre channel
+ adaptor card in your computer; say Y here and to the driver for your
+ adaptor below. You also should have said Y to "SCSI support" and
+ "SCSI generic support".
+
+config SHAPER
+ tristate "Traffic Shaper (EXPERIMENTAL)"
+ depends on NETDEVICES && EXPERIMENTAL
+ ---help---
+ The traffic shaper is a virtual network device that allows you to
+ limit the rate of outgoing data flow over some other network device.
+ The traffic that you want to slow down can then be routed through
+ these virtual devices. See
+ <file:Documentation/networking/shaper.txt> for more information.
+
+ An alternative to this traffic shaper is the experimental
+ Class-Based Queueing (CBQ) scheduling support which you get if you
+ say Y to "QoS and/or fair queueing" above.
+
+ To set up and configure shaper devices, you need the shapecfg
+ program, available from <ftp://shadow.cabi.net/pub/Linux/> in the
+ shaper package.
+
+ To compile this driver as a module, choose M here: the module
+ will be called shaper. If unsure, say N.
+
+config NETCONSOLE
+ tristate "Network console logging support (EXPERIMENTAL)"
+ depends on NETDEVICES && EXPERIMENTAL
+ ---help---
+ If you want to log kernel messages over the network, enable this.
+ See <file:Documentation/networking/netconsole.txt> for details.
+
diff --git a/drivers/net/LICENSE.SRC b/drivers/net/LICENSE.SRC
new file mode 100644
index 000000000000..72c44e730c7a
--- /dev/null
+++ b/drivers/net/LICENSE.SRC
@@ -0,0 +1,15 @@
+Code in this directory written at the IDA Supercomputing Research Center
+carries the following copyright and license.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used
+ and distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ In addition to the disclaimers in the GPL, SRC expressly disclaims any
+ and all warranties, expressed or implied, concerning the enclosed software.
+ This software was developed at SRC for use in internal research, and the
+ intent in sharing this software is to promote the productive interchange
+ of ideas throughout the research community. All software is furnished
+ on an "as-is" basis. No further updates to this software should be
+ expected. Although updates may occur, no commitment exists.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
new file mode 100644
index 000000000000..6202b10dbb4d
--- /dev/null
+++ b/drivers/net/Makefile
@@ -0,0 +1,196 @@
+#
+# Makefile for the Linux network (ethercard) device drivers.
+#
+
+ifeq ($(CONFIG_ISDN_PPP),y)
+ obj-$(CONFIG_ISDN) += slhc.o
+endif
+
+obj-$(CONFIG_E1000) += e1000/
+obj-$(CONFIG_IBM_EMAC) += ibm_emac/
+obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_BONDING) += bonding/
+obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+
+gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_phy.o
+
+#
+# link order important here
+#
+obj-$(CONFIG_PLIP) += plip.o
+
+obj-$(CONFIG_ROADRUNNER) += rrunner.o
+
+obj-$(CONFIG_HAPPYMEAL) += sunhme.o
+obj-$(CONFIG_SUNLANCE) += sunlance.o
+obj-$(CONFIG_SUNQE) += sunqe.o
+obj-$(CONFIG_SUNBMAC) += sunbmac.o
+obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
+obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
+
+obj-$(CONFIG_MACE) += mace.o
+obj-$(CONFIG_BMAC) += bmac.o
+
+obj-$(CONFIG_OAKNET) += oaknet.o 8390.o
+
+obj-$(CONFIG_DGRS) += dgrs.o
+obj-$(CONFIG_VORTEX) += 3c59x.o
+obj-$(CONFIG_TYPHOON) += typhoon.o
+obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
+obj-$(CONFIG_PCNET32) += pcnet32.o
+obj-$(CONFIG_EEPRO100) += eepro100.o
+obj-$(CONFIG_E100) += e100.o
+obj-$(CONFIG_TLAN) += tlan.o
+obj-$(CONFIG_EPIC100) += epic100.o
+obj-$(CONFIG_SIS900) += sis900.o
+obj-$(CONFIG_YELLOWFIN) += yellowfin.o
+obj-$(CONFIG_ACENIC) += acenic.o
+obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o
+obj-$(CONFIG_NATSEMI) += natsemi.o
+obj-$(CONFIG_NS83820) += ns83820.o
+obj-$(CONFIG_STNIC) += stnic.o 8390.o
+obj-$(CONFIG_FEALNX) += fealnx.o
+obj-$(CONFIG_TIGON3) += tg3.o
+obj-$(CONFIG_TC35815) += tc35815.o
+obj-$(CONFIG_SK98LIN) += sk98lin/
+obj-$(CONFIG_SKFP) += skfp/
+obj-$(CONFIG_VIA_RHINE) += via-rhine.o
+obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
+obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
+
+#
+# end link order section
+#
+
+obj-$(CONFIG_MII) += mii.o
+
+obj-$(CONFIG_SUNDANCE) += sundance.o
+obj-$(CONFIG_HAMACHI) += hamachi.o
+obj-$(CONFIG_NET) += Space.o loopback.o
+obj-$(CONFIG_SEEQ8005) += seeq8005.o
+obj-$(CONFIG_NET_SB1000) += sb1000.o
+obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
+obj-$(CONFIG_APNE) += apne.o 8390.o
+obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
+obj-$(CONFIG_SHAPER) += shaper.o
+obj-$(CONFIG_SK_G16) += sk_g16.o
+obj-$(CONFIG_HP100) += hp100.o
+obj-$(CONFIG_SMC9194) += smc9194.o
+obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_68360_ENET) += 68360enet.o
+obj-$(CONFIG_ARM_ETHERH) += 8390.o
+obj-$(CONFIG_WD80x3) += wd.o 8390.o
+obj-$(CONFIG_EL2) += 3c503.o 8390.o
+obj-$(CONFIG_NE2000) += ne.o 8390.o
+obj-$(CONFIG_NE2_MCA) += ne2.o 8390.o
+obj-$(CONFIG_HPLAN) += hp.o 8390.o
+obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o
+obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
+obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
+obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
+obj-$(CONFIG_E2100) += e2100.o 8390.o
+obj-$(CONFIG_ES3210) += es3210.o 8390.o
+obj-$(CONFIG_LNE390) += lne390.o 8390.o
+obj-$(CONFIG_NE3210) += ne3210.o 8390.o
+obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o
+obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_FORCEDETH) += forcedeth.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+
+obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
+obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
+obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
+obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
+obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
+obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
+
+obj-$(CONFIG_SLIP) += slip.o
+ifeq ($(CONFIG_SLIP_COMPRESSED),y)
+ obj-$(CONFIG_SLIP) += slhc.o
+endif
+
+obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_DE600) += de600.o
+obj-$(CONFIG_DE620) += de620.o
+obj-$(CONFIG_LANCE) += lance.o
+obj-$(CONFIG_SUN3_82586) += sun3_82586.o
+obj-$(CONFIG_SUN3LANCE) += sun3lance.o
+obj-$(CONFIG_DEFXX) += defxx.o
+obj-$(CONFIG_SGISEEQ) += sgiseeq.o
+obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o
+obj-$(CONFIG_AT1700) += at1700.o
+obj-$(CONFIG_FMV18X) += fmv18x.o
+obj-$(CONFIG_EL1) += 3c501.o
+obj-$(CONFIG_EL16) += 3c507.o
+obj-$(CONFIG_ELMC) += 3c523.o
+obj-$(CONFIG_SKMC) += sk_mca.o
+obj-$(CONFIG_IBMLANA) += ibmlana.o
+obj-$(CONFIG_ELMC_II) += 3c527.o
+obj-$(CONFIG_EL3) += 3c509.o
+obj-$(CONFIG_3C515) += 3c515.o
+obj-$(CONFIG_EEXPRESS) += eexpress.o
+obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
+obj-$(CONFIG_8139CP) += 8139cp.o
+obj-$(CONFIG_8139TOO) += 8139too.o
+obj-$(CONFIG_ZNET) += znet.o
+obj-$(CONFIG_LAN_SAA9730) += saa9730.o
+obj-$(CONFIG_DEPCA) += depca.o
+obj-$(CONFIG_EWRK3) += ewrk3.o
+obj-$(CONFIG_ATP) += atp.o
+obj-$(CONFIG_NI5010) += ni5010.o
+obj-$(CONFIG_NI52) += ni52.o
+obj-$(CONFIG_NI65) += ni65.o
+obj-$(CONFIG_ELPLUS) += 3c505.o
+obj-$(CONFIG_AC3200) += ac3200.o 8390.o
+obj-$(CONFIG_APRICOT) += 82596.o
+obj-$(CONFIG_LASI_82596) += lasi_82596.o
+obj-$(CONFIG_MVME16x_NET) += 82596.o
+obj-$(CONFIG_BVME6000_NET) += 82596.o
+
+# This is also a 82596 and should probably be merged
+obj-$(CONFIG_LP486E) += lp486e.o
+
+obj-$(CONFIG_ETH16I) += eth16i.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
+obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
+obj-$(CONFIG_EQUALIZER) += eql.o
+obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
+obj-$(CONFIG_MIPS_GT96100ETH) += gt96100eth.o
+obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
+obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
+obj-$(CONFIG_DECLANCE) += declance.o
+obj-$(CONFIG_ATARILANCE) += atarilance.o
+obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o
+obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o
+obj-$(CONFIG_A2065) += a2065.o
+obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_ARIADNE) += ariadne.o
+obj-$(CONFIG_CS89x0) += cs89x0.o
+obj-$(CONFIG_MACSONIC) += macsonic.o
+obj-$(CONFIG_MACMACE) += macmace.o
+obj-$(CONFIG_MAC89x0) += mac89x0.o
+obj-$(CONFIG_TUN) += tun.o
+obj-$(CONFIG_DL2K) += dl2k.o
+obj-$(CONFIG_R8169) += r8169.o
+obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
+obj-$(CONFIG_IBMVETH) += ibmveth.o
+obj-$(CONFIG_S2IO) += s2io.o
+obj-$(CONFIG_SMC91X) += smc91x.o
+obj-$(CONFIG_FEC_8XX) += fec_8xx/
+
+obj-$(CONFIG_ARM) += arm/
+obj-$(CONFIG_DEV_APPLETALK) += appletalk/
+obj-$(CONFIG_TR) += tokenring/
+obj-$(CONFIG_WAN) += wan/
+obj-$(CONFIG_ARCNET) += arcnet/
+obj-$(CONFIG_NET_PCMCIA) += pcmcia/
+obj-$(CONFIG_NET_WIRELESS) += wireless/
+obj-$(CONFIG_NET_TULIP) += tulip/
+obj-$(CONFIG_HAMRADIO) += hamradio/
+obj-$(CONFIG_IRDA) += irda/
+obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+
+obj-$(CONFIG_NETCONSOLE) += netconsole.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
new file mode 100644
index 000000000000..fc519377b5aa
--- /dev/null
+++ b/drivers/net/Space.c
@@ -0,0 +1,412 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Holds initial configuration information for devices.
+ *
+ * Version: @(#)Space.c 1.0.7 08/12/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald J. Becker, <becker@scyld.com>
+ *
+ * Changelog:
+ * Stephen Hemminger (09/2003)
+ * - get rid of pre-linked dev list, dynamic device allocation
+ * Paul Gortmaker (03/2002)
+ * - struct init cleanup, enable multiple ISA autoprobes.
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 09/1999
+ * - fix sbni: s/device/net_device/
+ * Paul Gortmaker (06/98):
+ * - sort probes in a sane way, make sure all (safe) probes
+ * get run once & failed autoprobes don't autoprobe again.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/trdevice.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/divert.h>
+
+/* A unified ethernet device probe. This is the easiest way to have every
+ ethernet adaptor have the name "eth[0123...]".
+ */
+
+extern struct net_device *ne2_probe(int unit);
+extern struct net_device *hp100_probe(int unit);
+extern struct net_device *ultra_probe(int unit);
+extern struct net_device *ultra32_probe(int unit);
+extern struct net_device *wd_probe(int unit);
+extern struct net_device *el2_probe(int unit);
+extern struct net_device *ne_probe(int unit);
+extern struct net_device *hp_probe(int unit);
+extern struct net_device *hp_plus_probe(int unit);
+extern struct net_device *express_probe(int unit);
+extern struct net_device *eepro_probe(int unit);
+extern struct net_device *at1700_probe(int unit);
+extern struct net_device *fmv18x_probe(int unit);
+extern struct net_device *eth16i_probe(int unit);
+extern struct net_device *i82596_probe(int unit);
+extern struct net_device *ewrk3_probe(int unit);
+extern struct net_device *el1_probe(int unit);
+extern struct net_device *wavelan_probe(int unit);
+extern struct net_device *arlan_probe(int unit);
+extern struct net_device *el16_probe(int unit);
+extern struct net_device *elmc_probe(int unit);
+extern struct net_device *skmca_probe(int unit);
+extern struct net_device *elplus_probe(int unit);
+extern struct net_device *ac3200_probe(int unit);
+extern struct net_device *es_probe(int unit);
+extern struct net_device *lne390_probe(int unit);
+extern struct net_device *e2100_probe(int unit);
+extern struct net_device *ni5010_probe(int unit);
+extern struct net_device *ni52_probe(int unit);
+extern struct net_device *ni65_probe(int unit);
+extern struct net_device *sonic_probe(int unit);
+extern struct net_device *SK_init(int unit);
+extern struct net_device *seeq8005_probe(int unit);
+extern struct net_device *smc_init(int unit);
+extern struct net_device *atarilance_probe(int unit);
+extern struct net_device *sun3lance_probe(int unit);
+extern struct net_device *sun3_82586_probe(int unit);
+extern struct net_device *apne_probe(int unit);
+extern struct net_device *bionet_probe(int unit);
+extern struct net_device *pamsnet_probe(int unit);
+extern struct net_device *cs89x0_probe(int unit);
+extern struct net_device *hplance_probe(int unit);
+extern struct net_device *bagetlance_probe(int unit);
+extern struct net_device *mvme147lance_probe(int unit);
+extern struct net_device *tc515_probe(int unit);
+extern struct net_device *lance_probe(int unit);
+extern struct net_device *mace_probe(int unit);
+extern struct net_device *macsonic_probe(int unit);
+extern struct net_device *mac8390_probe(int unit);
+extern struct net_device *mac89x0_probe(int unit);
+extern struct net_device *mc32_probe(int unit);
+extern struct net_device *cops_probe(int unit);
+extern struct net_device *ltpc_probe(void);
+
+/* Detachable devices ("pocket adaptors") */
+extern struct net_device *de620_probe(int unit);
+
+/* Fibre Channel adapters */
+extern int iph5526_probe(struct net_device *dev);
+
+/* SBNI adapters */
+extern int sbni_probe(int unit);
+
+struct devprobe2 {
+ struct net_device *(*probe)(int unit);
+ int status; /* non-zero if autoprobe has failed */
+};
+
+static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
+{
+ struct net_device *dev;
+ for (; p->probe; p++) {
+ if (autoprobe && p->status)
+ continue;
+ dev = p->probe(unit);
+ if (!IS_ERR(dev))
+ return 0;
+ if (autoprobe)
+ p->status = PTR_ERR(dev);
+ }
+ return -ENODEV;
+}
+
+/*
+ * This is a bit of an artificial separation as there are PCI drivers
+ * that also probe for EISA cards (in the PCI group) and there are ISA
+ * drivers that probe for EISA cards (in the ISA group). These are the
+ * legacy EISA only driver probes, and also the legacy PCI probes
+ */
+
+static struct devprobe2 eisa_probes[] __initdata = {
+#ifdef CONFIG_ULTRA32
+ {ultra32_probe, 0},
+#endif
+#ifdef CONFIG_AC3200
+ {ac3200_probe, 0},
+#endif
+#ifdef CONFIG_ES3210
+ {es_probe, 0},
+#endif
+#ifdef CONFIG_LNE390
+ {lne390_probe, 0},
+#endif
+ {NULL, 0},
+};
+
+static struct devprobe2 mca_probes[] __initdata = {
+#ifdef CONFIG_NE2_MCA
+ {ne2_probe, 0},
+#endif
+#ifdef CONFIG_ELMC /* 3c523 */
+ {elmc_probe, 0},
+#endif
+#ifdef CONFIG_ELMC_II /* 3c527 */
+ {mc32_probe, 0},
+#endif
+#ifdef CONFIG_SKMC /* SKnet Microchannel */
+ {skmca_probe, 0},
+#endif
+ {NULL, 0},
+};
+
+/*
+ * ISA probes that touch addresses < 0x400 (including those that also
+ * look for EISA/PCI/MCA cards in addition to ISA cards).
+ */
+static struct devprobe2 isa_probes[] __initdata = {
+#ifdef CONFIG_HP100 /* ISA, EISA & PCI */
+ {hp100_probe, 0},
+#endif
+#ifdef CONFIG_3C515
+ {tc515_probe, 0},
+#endif
+#ifdef CONFIG_ULTRA
+ {ultra_probe, 0},
+#endif
+#ifdef CONFIG_WD80x3
+ {wd_probe, 0},
+#endif
+#ifdef CONFIG_EL2 /* 3c503 */
+ {el2_probe, 0},
+#endif
+#ifdef CONFIG_HPLAN
+ {hp_probe, 0},
+#endif
+#ifdef CONFIG_HPLAN_PLUS
+ {hp_plus_probe, 0},
+#endif
+#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
+ {e2100_probe, 0},
+#endif
+#if defined(CONFIG_NE2000) || \
+ defined(CONFIG_NE_H8300) /* ISA (use ne2k-pci for PCI cards) */
+ {ne_probe, 0},
+#endif
+#ifdef CONFIG_LANCE /* ISA/VLB (use pcnet32 for PCI cards) */
+ {lance_probe, 0},
+#endif
+#ifdef CONFIG_SMC9194
+ {smc_init, 0},
+#endif
+#ifdef CONFIG_SEEQ8005
+ {seeq8005_probe, 0},
+#endif
+#ifdef CONFIG_CS89x0
+ {cs89x0_probe, 0},
+#endif
+#ifdef CONFIG_AT1700
+ {at1700_probe, 0},
+#endif
+#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */
+ {fmv18x_probe, 0},
+#endif
+#ifdef CONFIG_ETH16I
+ {eth16i_probe, 0}, /* ICL EtherTeam 16i/32 */
+#endif
+#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
+ {express_probe, 0},
+#endif
+#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
+ {eepro_probe, 0},
+#endif
+#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
+ {ewrk3_probe, 0},
+#endif
+#if defined(CONFIG_APRICOT) || defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
+ {i82596_probe, 0},
+#endif
+#ifdef CONFIG_EL1 /* 3c501 */
+ {el1_probe, 0},
+#endif
+#ifdef CONFIG_WAVELAN /* WaveLAN */
+ {wavelan_probe, 0},
+#endif
+#ifdef CONFIG_ARLAN /* Aironet */
+ {arlan_probe, 0},
+#endif
+#ifdef CONFIG_EL16 /* 3c507 */
+ {el16_probe, 0},
+#endif
+#ifdef CONFIG_ELPLUS /* 3c505 */
+ {elplus_probe, 0},
+#endif
+#ifdef CONFIG_SK_G16
+ {SK_init, 0},
+#endif
+#ifdef CONFIG_NI5010
+ {ni5010_probe, 0},
+#endif
+#ifdef CONFIG_NI52
+ {ni52_probe, 0},
+#endif
+#ifdef CONFIG_NI65
+ {ni65_probe, 0},
+#endif
+ {NULL, 0},
+};
+
+static struct devprobe2 parport_probes[] __initdata = {
+#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
+ {de620_probe, 0},
+#endif
+ {NULL, 0},
+};
+
+static struct devprobe2 m68k_probes[] __initdata = {
+#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
+ {atarilance_probe, 0},
+#endif
+#ifdef CONFIG_SUN3LANCE /* sun3 onboard Lance chip */
+ {sun3lance_probe, 0},
+#endif
+#ifdef CONFIG_SUN3_82586 /* sun3 onboard Intel 82586 chip */
+ {sun3_82586_probe, 0},
+#endif
+#ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */
+ {apne_probe, 0},
+#endif
+#ifdef CONFIG_ATARI_BIONET /* Atari Bionet Ethernet board */
+ {bionet_probe, 0},
+#endif
+#ifdef CONFIG_ATARI_PAMSNET /* Atari PAMsNet Ethernet board */
+ {pamsnet_probe, 0},
+#endif
+#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */
+ {mvme147lance_probe, 0},
+#endif
+#ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */
+ {mace_probe, 0},
+#endif
+#ifdef CONFIG_MACSONIC /* Mac SONIC-based Ethernet of all sorts */
+ {macsonic_probe, 0},
+#endif
+#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */
+ {mac8390_probe, 0},
+#endif
+#ifdef CONFIG_MAC89x0
+ {mac89x0_probe, 0},
+#endif
+ {NULL, 0},
+};
+
+/*
+ * Unified ethernet device probe, segmented per architecture and
+ * per bus interface. This drives the legacy devices only for now.
+ */
+
+static void __init ethif_probe2(int unit)
+{
+ unsigned long base_addr = netdev_boot_base("eth", unit);
+
+ if (base_addr == 1)
+ return;
+
+ (void)( probe_list2(unit, m68k_probes, base_addr == 0) &&
+ probe_list2(unit, eisa_probes, base_addr == 0) &&
+ probe_list2(unit, mca_probes, base_addr == 0) &&
+ probe_list2(unit, isa_probes, base_addr == 0) &&
+ probe_list2(unit, parport_probes, base_addr == 0));
+}
+
+#ifdef CONFIG_TR
+/* Token-ring device probe */
+extern int ibmtr_probe_card(struct net_device *);
+extern struct net_device *sk_isa_probe(int unit);
+extern struct net_device *proteon_probe(int unit);
+extern struct net_device *smctr_probe(int unit);
+
+static struct devprobe2 tr_probes2[] __initdata = {
+#ifdef CONFIG_SKISA
+ {sk_isa_probe, 0},
+#endif
+#ifdef CONFIG_PROTEON
+ {proteon_probe, 0},
+#endif
+#ifdef CONFIG_SMCTR
+ {smctr_probe, 0},
+#endif
+ {NULL, 0},
+};
+
+static __init int trif_probe(int unit)
+{
+ int err = -ENODEV;
+#ifdef CONFIG_IBMTR
+ struct net_device *dev = alloc_trdev(0);
+ if (!dev)
+ return -ENOMEM;
+
+ sprintf(dev->name, "tr%d", unit);
+ netdev_boot_setup_check(dev);
+ err = ibmtr_probe_card(dev);
+ if (err)
+ free_netdev(dev);
+#endif
+ return err;
+}
+
+static void __init trif_probe2(int unit)
+{
+ unsigned long base_addr = netdev_boot_base("tr", unit);
+
+ if (base_addr == 1)
+ return;
+ probe_list2(unit, tr_probes2, base_addr == 0);
+}
+#endif
+
+
+/*
+ * The loopback device is global so it can be directly referenced
+ * by the network code. Also, it must be first on device list.
+ */
+extern int loopback_init(void);
+
+/* Statically configured drivers -- order matters here. */
+static int __init net_olddevs_init(void)
+{
+ int num;
+
+ if (loopback_init()) {
+ printk(KERN_ERR "Network loopback device setup failed\n");
+ }
+
+
+#ifdef CONFIG_SBNI
+ for (num = 0; num < 8; ++num)
+ sbni_probe(num);
+#endif
+#ifdef CONFIG_TR
+ for (num = 0; num < 8; ++num)
+ if (!trif_probe(num))
+ trif_probe2(num);
+#endif
+ for (num = 0; num < 8; ++num)
+ ethif_probe2(num);
+
+#ifdef CONFIG_COPS
+ cops_probe(0);
+ cops_probe(1);
+ cops_probe(2);
+#endif
+#ifdef CONFIG_LTPC
+ ltpc_probe();
+#endif
+
+ return 0;
+}
+
+device_initcall(net_olddevs_init);
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
new file mode 100644
index 000000000000..8e538a6d7d97
--- /dev/null
+++ b/drivers/net/a2065.c
@@ -0,0 +1,843 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * Fixes and tips by:
+ * - Janos Farkas (CHEXUM@sparta.banki.hu)
+ * - Jes Degn Soerensen (jds@kom.auc.dk)
+ * - Matt Domsch (Matt_Domsch@dell.com)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "a2065.h"
+
+
+ /*
+ * Transmit/Receive Ring Definitions
+ */
+
+#define LANCE_LOG_TX_BUFFERS (2)
+#define LANCE_LOG_RX_BUFFERS (4)
+
+#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
+
+#define TX_RING_MOD_MASK (TX_RING_SIZE-1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE-1)
+
+#define PKT_BUF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUF_SIZE
+#define TX_BUFF_SIZE PKT_BUF_SIZE
+
+
+ /*
+ * Layout of the Lance's RAM Buffer
+ */
+
+
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode (reg. 15) */
+ unsigned char phys_addr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+ char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+};
+
+
+ /*
+ * Private Device Data
+ */
+
+struct lance_private {
+ char *name;
+ volatile struct lance_regs *ll;
+ volatile struct lance_init_block *init_block; /* Hosts view */
+ volatile struct lance_init_block *lance_init_block; /* Lance view */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ struct net_device_stats stats;
+ int tpe; /* cable-selection is TPE */
+ int auto_select; /* cable-selection by carrier */
+ unsigned short busmaster_regval;
+
+#ifdef CONFIG_SUNLANCE
+ struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
+ int burst_sizes; /* ledma SBus burst sizes */
+#endif
+ struct timer_list multicast_timer;
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Load the CSR registers */
+static void load_csrs (struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
+
+ leptr = LANCE_ADDR (aib);
+
+ ll->rap = LE_CSR1;
+ ll->rdp = (leptr & 0xFFFF);
+ ll->rap = LE_CSR2;
+ ll->rdp = leptr >> 16;
+ ll->rap = LE_CSR3;
+ ll->rdp = lp->busmaster_regval;
+
+ /* Point back to csr0 */
+ ll->rap = LE_CSR0;
+}
+
+#define ZERO 0
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+
+ if (ZERO)
+ printk(KERN_DEBUG "TX rings:\n");
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i <= (1<<lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ if (i < 3 && ZERO)
+ printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (ZERO)
+ printk(KERN_DEBUG "RX rings:\n");
+ for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ if (i < 3 && ZERO)
+ printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (ZERO)
+ printk(KERN_DEBUG "RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (ZERO)
+ printk(KERN_DEBUG "TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+}
+
+static int init_restart_lance (struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_INIT;
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, ll->rdp);
+ return -EIO;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ ll->rdp = LE_C0_IDON;
+ ll->rdp = LE_C0_INEA | LE_C0_STRT;
+
+ return 0;
+}
+
+static int lance_rx (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+ int len = 0; /* XXX shut up gcc warnings */
+ struct sk_buff *skb = 0; /* XXX shut up gcc warnings */
+
+#ifdef TEST_HITS
+ int i;
+ printk(KERN_DEBUG "[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk ("]\n");
+#endif
+
+ ll->rdp = LE_C0_RINT|LE_C0_INEA;
+ for (rd = &ib->brx_ring [lp->rx_new];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ lp->stats.rx_over_errors++;
+ lp->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) lp->stats.rx_errors++;
+ } else {
+ len = (rd->mblength & 0xfff) - 4;
+ skb = dev_alloc_skb (len+2);
+
+ if (skb == 0) {
+ printk(KERN_WARNING "%s: Memory squeeze, "
+ "deferring packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb->dev = dev;
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put (skb, len); /* make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
+ len, 0);
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+ /* csr0 is 2f3 */
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring [i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ lp->stats.tx_errors++;
+ if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ lp->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_ERR "%s: Carrier Lost, "
+ "trying %s\n", dev->name,
+ lp->tpe?"TPE":"AUI");
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ lp->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, "
+ "restarting\n", dev->name);
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ lp->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ lp->stats.collisions += 2;
+
+ lp->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ return 0;
+}
+
+static irqreturn_t
+lance_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev;
+ struct lance_private *lp;
+ volatile struct lance_regs *ll;
+ int csr0;
+
+ dev = (struct net_device *) dev_id;
+
+ lp = netdev_priv(dev);
+ ll = lp->ll;
+
+ ll->rap = LE_CSR0; /* LANCE Controller Status */
+ csr0 = ll->rdp;
+
+ if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
+ return IRQ_NONE; /* been generated by the Lance. */
+
+ /* Acknowledge all the interrupt sources ASAP */
+ ll->rdp = csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|
+ LE_C0_INIT);
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA;
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx (dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx (dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk(KERN_ERR "%s: Bus master arbitration failure, status "
+ "%4.4x.\n", dev->name, csr0);
+ /* Restart the chip. */
+ ll->rdp = LE_C0_STRT;
+ }
+
+ if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|
+ LE_C0_IDON|LE_C0_INEA;
+ return IRQ_HANDLED;
+}
+
+struct net_device *last_dev = 0;
+
+static int lance_open (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int ret;
+
+ last_dev = dev;
+
+ /* Stop the Lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (ret) return ret;
+
+ load_csrs (lp);
+ lance_init_ring (dev);
+
+ netif_start_queue(dev);
+
+ return init_restart_lance (lp);
+}
+
+static int lance_close (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ return 0;
+}
+
+static inline int lance_reset (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ load_csrs (lp);
+
+ lance_init_ring (dev);
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ status = init_restart_lance (lp);
+#ifdef DEBUG_DRIVER
+ printk(KERN_DEBUG "Lance restart=%d\n", status);
+#endif
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ int status = 0;
+ static int outs;
+ unsigned long flags;
+
+ skblen = skb->len;
+ len = skblen;
+
+ if (len < ETH_ZLEN) {
+ len = ETH_ZLEN;
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ }
+
+ local_irq_save(flags);
+
+ if (!TX_BUFFS_AVAIL){
+ local_irq_restore(flags);
+ return -1;
+ }
+
+#ifdef DEBUG_DRIVER
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk("\n" KERN_DEBUG);
+ printk ("%2.2x ", skb->data [i]);
+ }
+ printk("\n");
+ }
+#endif
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
+
+ memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
+
+ /* Clear the slack of the packet, do I need this? */
+ if (len != skblen)
+ memset ((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+
+ outs++;
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb);
+
+ local_irq_restore(flags);
+
+ return status;
+}
+
+static struct net_device_stats *lance_get_stats (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct dev_mc_list *dmi=dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI){
+ ib->filter [0] = 0xffffffff;
+ ib->filter [1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++){
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc = crc >> 26;
+ mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ }
+ return;
+}
+
+static void lance_set_multicast (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring (dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast (dev);
+ }
+ load_csrs (lp);
+ init_restart_lance (lp);
+ netif_wake_queue(dev);
+}
+
+static int __devinit a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static void __devexit a2065_remove_one(struct zorro_dev *z);
+
+
+static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_CBM_A2065_1 },
+ { ZORRO_PROD_CBM_A2065_2 },
+ { ZORRO_PROD_AMERISTAR_A2065 },
+ { 0 }
+};
+
+static struct zorro_driver a2065_driver = {
+ .name = "a2065",
+ .id_table = a2065_zorro_tbl,
+ .probe = a2065_init_one,
+ .remove = __devexit_p(a2065_remove_one),
+};
+
+static int __devinit a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct net_device *dev;
+ struct lance_private *priv;
+ unsigned long board, base_addr, mem_start;
+ struct resource *r1, *r2;
+ int err;
+
+ board = z->resource.start;
+ base_addr = board+A2065_LANCE;
+ mem_start = board+A2065_RAM;
+
+ r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
+ "Am7990");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_resource(r1);
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (dev == NULL) {
+ release_resource(r1);
+ release_resource(r2);
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ priv = netdev_priv(dev);
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ dev->dev_addr[0] = 0x00;
+ if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0x10;
+ } else { /* Ameristar */
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x9f;
+ }
+ dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff;
+ dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff;
+ dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
+ dev->base_addr = ZTWO_VADDR(base_addr);
+ dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start+A2065_RAM_SIZE;
+
+ priv->ll = (volatile struct lance_regs *)dev->base_addr;
+ priv->init_block = (struct lance_init_block *)dev->mem_start;
+ priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
+ priv->auto_select = 0;
+ priv->busmaster_regval = LE_C3_BSWP;
+
+ priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
+ priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
+
+ dev->open = &lance_open;
+ dev->stop = &lance_close;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->tx_timeout = &lance_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &lance_set_multicast;
+ dev->dma = 0;
+
+ init_timer(&priv->multicast_timer);
+ priv->multicast_timer.data = (unsigned long) dev;
+ priv->multicast_timer.function =
+ (void (*)(unsigned long)) &lance_set_multicast;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_resource(r1);
+ release_resource(r2);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ return 0;
+}
+
+
+static void __devexit a2065_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr),
+ sizeof(struct lance_regs));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static int __init a2065_init_module(void)
+{
+ return zorro_module_init(&a2065_driver);
+}
+
+static void __exit a2065_cleanup_module(void)
+{
+ zorro_unregister_driver(&a2065_driver);
+}
+
+module_init(a2065_init_module);
+module_exit(a2065_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/a2065.h b/drivers/net/a2065.h
new file mode 100644
index 000000000000..184ad573dbda
--- /dev/null
+++ b/drivers/net/a2065.h
@@ -0,0 +1,173 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+
+/*
+ * Am7990 Local Area Network Controller for Ethernet (LANCE)
+ */
+
+struct lance_regs {
+ unsigned short rdp; /* Register Data Port */
+ unsigned short rap; /* Register Address Port */
+};
+
+
+/*
+ * Am7990 Control and Status Registers
+ */
+
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] */
+#define LE_CSR2 0x0002 /* IADR[23:16] */
+#define LE_CSR3 0x0003 /* Misc */
+
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+
+#define LE_C0_ERR 0x8000 /* Error */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+
+#define LE_C3_BSWP 0x0004 /* Byte Swap
+ (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control
+ (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ unsigned char rmd1_bits; /* descriptor bits */
+ unsigned char rmd1_hadr; /* high address of packet */
+ short length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ unsigned short mblength; /* Aactual number of bytes received */
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ unsigned char tmd1_bits; /* descriptor bits */
+ unsigned char tmd1_hadr; /* high address of packet */
+ short length; /* Length is 2s complement (negative)! */
+ unsigned short misc;
+};
+
+
+/*
+ * Receive Flags
+ */
+
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved,
+ LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Error Flags
+ */
+
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+
+/*
+ * A2065 Expansion Board Structure
+ */
+
+#define A2065_LANCE 0x4000
+
+#define A2065_RAM 0x8000
+#define A2065_RAM_SIZE 0x8000
+
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
new file mode 100644
index 000000000000..24fba36b5c1d
--- /dev/null
+++ b/drivers/net/ac3200.c
@@ -0,0 +1,424 @@
+/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
+/*
+ Written 1993, 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
+ Adapter. The programming information is from the users manual, as related
+ by glee@ardnassak.math.clemson.edu.
+
+ Changelog:
+
+ Paul Gortmaker 05/98 : add support for shared mem above 1MB.
+
+ */
+
+static const char version[] =
+ "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ac3200"
+
+/* Offsets from the base address. */
+#define AC_NIC_BASE 0x00
+#define AC_SA_PROM 0x16 /* The station address PROM. */
+#define AC_ADDR0 0x00 /* Prefix station address values. */
+#define AC_ADDR1 0x40
+#define AC_ADDR2 0x90
+#define AC_ID_PORT 0xC80
+#define AC_EISA_ID 0x0110d305
+#define AC_RESET_PORT 0xC84
+#define AC_RESET 0x00
+#define AC_ENABLE 0x01
+#define AC_CONFIG 0xC90 /* The configuration port. */
+
+#define AC_IO_EXTENT 0x20
+ /* Actually accessed is:
+ * AC_NIC_BASE (0-15)
+ * AC_SA_PROM (0-5)
+ * AC_ID_PORT (0-3)
+ * AC_RESET_PORT
+ * AC_CONFIG
+ */
+
+/* Decoding of the configuration register. */
+static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static int addrmap[8] =
+{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
+static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
+
+#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
+#define config2mem(configval) addrmap[(configval) & 7]
+#define config2name(configval) port_name[((configval) >> 6) & 3]
+
+/* First and last 8390 pages. */
+#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
+#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
+
+static int ac_probe1(int ioaddr, struct net_device *dev);
+
+static int ac_open(struct net_device *dev);
+static void ac_reset_8390(struct net_device *dev);
+static void ac_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ac_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int ac_close_card(struct net_device *dev);
+
+
+/* Probe for the AC3200.
+
+ The AC3200 can be identified by either the EISA configuration registers,
+ or the unique value in the station address PROM.
+ */
+
+static int __init do_ac3200_probe(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+
+ SET_MODULE_OWNER(dev);
+
+ if (ioaddr > 0x1ff) /* Check a single specified location. */
+ return ac_probe1(ioaddr, dev);
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ if ( ! EISA_bus)
+ return -ENXIO;
+
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (ac_probe1(ioaddr, dev) == 0)
+ return 0;
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* Someday free_irq may be in ac_close_card() */
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, AC_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+#ifndef MODULE
+struct net_device * __init ac3200_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_ac3200_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init ac_probe1(int ioaddr, struct net_device *dev)
+{
+ int i, retval;
+
+ if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ if (inb_p(ioaddr + AC_ID_PORT) == 0xff) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+#ifndef final_version
+ printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x,"
+ " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
+ inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
+ inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
+#endif
+
+ printk("AC3200 in EISA slot %d, node", ioaddr/0x1000);
+ for(i = 0; i < 6; i++)
+ printk(" %02x", dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i));
+
+#if 0
+ /* Check the vendor ID/prefix. Redundant after checking the EISA ID */
+ if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
+ || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
+ || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
+ printk(", not found (invalid prefix).\n");
+ retval = -ENODEV;
+ goto out;
+ }
+#endif
+
+ /* Assign and allocate the interrupt now. */
+ if (dev->irq == 0) {
+ dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
+ printk(", using");
+ } else {
+ dev->irq = irq_canonicalize(dev->irq);
+ printk(", assigning");
+ }
+
+ retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
+ goto out1;
+ }
+
+ printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]);
+
+ dev->base_addr = ioaddr;
+
+#ifdef notyet
+ if (dev->mem_start) { /* Override the value from the board. */
+ for (i = 0; i < 7; i++)
+ if (addrmap[i] == dev->mem_start)
+ break;
+ if (i >= 7)
+ i = 0;
+ outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
+ }
+#endif
+
+ dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
+ dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
+
+ printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n",
+ dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start);
+
+ /*
+ * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
+ * the card mem within the region covered by `normal' RAM !!!
+ *
+ * ioremap() will fail in that case.
+ */
+ ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100);
+ if (!ei_status.mem) {
+ printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n");
+ printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n");
+ printk(KERN_ERR "ac3200.c: Driver NOT installed.\n");
+ retval = -EINVAL;
+ goto out1;
+ }
+ printk("ac3200.c: remapped %dkB card memory to virtual address %p\n",
+ AC_STOP_PG/4, ei_status.mem);
+
+ dev->mem_start = (unsigned long)ei_status.mem;
+ dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256;
+
+ ei_status.name = "AC3200";
+ ei_status.tx_start_page = AC_START_PG;
+ ei_status.rx_start_page = AC_START_PG + TX_PAGES;
+ ei_status.stop_page = AC_STOP_PG;
+ ei_status.word16 = 1;
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &ac_reset_8390;
+ ei_status.block_input = &ac_block_input;
+ ei_status.block_output = &ac_block_output;
+ ei_status.get_8390_hdr = &ac_get_8390_hdr;
+
+ dev->open = &ac_open;
+ dev->stop = &ac_close_card;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+ return 0;
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr, AC_IO_EXTENT);
+ return retval;
+}
+
+static int ac_open(struct net_device *dev)
+{
+#ifdef notyet
+ /* Someday we may enable the IRQ and shared memory here. */
+ int ioaddr = dev->base_addr;
+#endif
+
+ ei_open(dev);
+ return 0;
+}
+
+static void ac_reset_8390(struct net_device *dev)
+{
+ ushort ioaddr = dev->base_addr;
+
+ outb(AC_RESET, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
+
+ ei_status.txing = 0;
+ outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256;
+
+ if (ring_offset + count > AC_STOP_PG*256) {
+ /* We must wrap the input move. */
+ int semi_count = AC_STOP_PG*256 - ring_offset;
+ memcpy_fromio(skb->data, start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count,
+ ei_status.mem + TX_PAGES*256, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, start, count, 0);
+ }
+}
+
+static void ac_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8);
+
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ac_close_card(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+#ifdef notyet
+ /* We should someday disable shared memory and interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+#endif
+
+ ei_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
+static struct net_device *dev_ac32[MAX_AC32_CARDS];
+static int io[MAX_AC32_CARDS];
+static int irq[MAX_AC32_CARDS];
+static int mem[MAX_AC32_CARDS];
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, "Memory base address(es)");
+MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ if (io[this_dev] == 0 && this_dev != 0)
+ break;
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
+ if (do_ac3200_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_ac32[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct net_device *dev = dev_ac32[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
new file mode 100644
index 000000000000..6eea3a8accb7
--- /dev/null
+++ b/drivers/net/acenic.c
@@ -0,0 +1,3271 @@
+/*
+ * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
+ * and other Tigon based cards.
+ *
+ * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
+ *
+ * Thanks to Alteon and 3Com for providing hardware and documentation
+ * enabling me to write this driver.
+ *
+ * A mailing list for discussing the use of this driver has been
+ * setup, please subscribe to the lists if you have any questions
+ * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
+ * see how to subscribe.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Additional credits:
+ * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
+ * dump support. The trace dump support has not been
+ * integrated yet however.
+ * Troy Benjegerdes: Big Endian (PPC) patches.
+ * Nate Stahl: Better out of memory handling and stats support.
+ * Aman Singla: Nasty race between interrupt handler and tx code dealing
+ * with 'testing the tx_ret_csm and setting tx_full'
+ * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
+ * infrastructure and Sparc support
+ * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
+ * driver under Linux/Sparc64
+ * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
+ * ETHTOOL_GDRVINFO support
+ * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
+ * handler and close() cleanup.
+ * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
+ * memory mapped IO is enabled to
+ * make the driver work on RS/6000.
+ * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
+ * where the driver would disable
+ * bus master mode if it had to disable
+ * write and invalidate.
+ * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
+ * endian systems.
+ * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
+ * rx producer index when
+ * flushing the Jumbo ring.
+ * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
+ * driver init path.
+ * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/sockios.h>
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#include <linux/if_vlan.h>
+#endif
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+
+#include <net/sock.h>
+#include <net/ip.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+
+#define DRV_NAME "acenic"
+
+#undef INDEX_DEBUG
+
+#ifdef CONFIG_ACENIC_OMIT_TIGON_I
+#define ACE_IS_TIGON_I(ap) 0
+#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
+#else
+#define ACE_IS_TIGON_I(ap) (ap->version == 1)
+#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
+#endif
+
+#ifndef PCI_VENDOR_ID_ALTEON
+#define PCI_VENDOR_ID_ALTEON 0x12ae
+#endif
+#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
+#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
+#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
+#endif
+#ifndef PCI_DEVICE_ID_3COM_3C985
+#define PCI_DEVICE_ID_3COM_3C985 0x0001
+#endif
+#ifndef PCI_VENDOR_ID_NETGEAR
+#define PCI_VENDOR_ID_NETGEAR 0x1385
+#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
+#endif
+#ifndef PCI_DEVICE_ID_NETGEAR_GA620T
+#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
+#endif
+
+
+/*
+ * Farallon used the DEC vendor ID by mistake and they seem not
+ * to care - stinky!
+ */
+#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
+#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
+#endif
+#ifndef PCI_DEVICE_ID_FARALLON_PN9100T
+#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
+#endif
+#ifndef PCI_VENDOR_ID_SGI
+#define PCI_VENDOR_ID_SGI 0x10a9
+#endif
+#ifndef PCI_DEVICE_ID_SGI_ACENIC
+#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
+#endif
+
+static struct pci_device_id acenic_pci_tbl[] = {
+ { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ /*
+ * Farallon used the DEC vendor ID on their cards incorrectly,
+ * then later Alteon's ID.
+ */
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev) do{} while(0)
+#endif
+
+#if LINUX_VERSION_CODE >= 0x2051c
+#define ace_sync_irq(irq) synchronize_irq(irq)
+#else
+#define ace_sync_irq(irq) synchronize_irq()
+#endif
+
+#ifndef offset_in_page
+#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
+#endif
+
+#define ACE_MAX_MOD_PARMS 8
+#define BOARD_IDX_STATIC 0
+#define BOARD_IDX_OVERFLOW -1
+
+#if (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)) && \
+ defined(NETIF_F_HW_VLAN_RX)
+#define ACENIC_DO_VLAN 1
+#define ACE_RCB_VLAN_FLAG RCB_FLG_VLAN_ASSIST
+#else
+#define ACENIC_DO_VLAN 0
+#define ACE_RCB_VLAN_FLAG 0
+#endif
+
+#include "acenic.h"
+
+/*
+ * These must be defined before the firmware is included.
+ */
+#define MAX_TEXT_LEN 96*1024
+#define MAX_RODATA_LEN 8*1024
+#define MAX_DATA_LEN 2*1024
+
+#include "acenic_firmware.h"
+
+#ifndef tigon2FwReleaseLocal
+#define tigon2FwReleaseLocal 0
+#endif
+
+/*
+ * This driver currently supports Tigon I and Tigon II based cards
+ * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
+ * GA620. The driver should also work on the SGI, DEC and Farallon
+ * versions of the card, however I have not been able to test that
+ * myself.
+ *
+ * This card is really neat, it supports receive hardware checksumming
+ * and jumbo frames (up to 9000 bytes) and does a lot of work in the
+ * firmware. Also the programming interface is quite neat, except for
+ * the parts dealing with the i2c eeprom on the card ;-)
+ *
+ * Using jumbo frames:
+ *
+ * To enable jumbo frames, simply specify an mtu between 1500 and 9000
+ * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
+ * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
+ * interface number and <MTU> being the MTU value.
+ *
+ * Module parameters:
+ *
+ * When compiled as a loadable module, the driver allows for a number
+ * of module parameters to be specified. The driver supports the
+ * following module parameters:
+ *
+ * trace=<val> - Firmware trace level. This requires special traced
+ * firmware to replace the firmware supplied with
+ * the driver - for debugging purposes only.
+ *
+ * link=<val> - Link state. Normally you want to use the default link
+ * parameters set by the driver. This can be used to
+ * override these in case your switch doesn't negotiate
+ * the link properly. Valid values are:
+ * 0x0001 - Force half duplex link.
+ * 0x0002 - Do not negotiate line speed with the other end.
+ * 0x0010 - 10Mbit/sec link.
+ * 0x0020 - 100Mbit/sec link.
+ * 0x0040 - 1000Mbit/sec link.
+ * 0x0100 - Do not negotiate flow control.
+ * 0x0200 - Enable RX flow control Y
+ * 0x0400 - Enable TX flow control Y (Tigon II NICs only).
+ * Default value is 0x0270, ie. enable link+flow
+ * control negotiation. Negotiating the highest
+ * possible link speed with RX flow control enabled.
+ *
+ * When disabling link speed negotiation, only one link
+ * speed is allowed to be specified!
+ *
+ * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
+ * to wait for more packets to arive before
+ * interrupting the host, from the time the first
+ * packet arrives.
+ *
+ * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
+ * to wait for more packets to arive in the transmit ring,
+ * before interrupting the host, after transmitting the
+ * first packet in the ring.
+ *
+ * max_tx_desc=<val> - maximum number of transmit descriptors
+ * (packets) transmitted before interrupting the host.
+ *
+ * max_rx_desc=<val> - maximum number of receive descriptors
+ * (packets) received before interrupting the host.
+ *
+ * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
+ * increments of the NIC's on board memory to be used for
+ * transmit and receive buffers. For the 1MB NIC app. 800KB
+ * is available, on the 1/2MB NIC app. 300KB is available.
+ * 68KB will always be available as a minimum for both
+ * directions. The default value is a 50/50 split.
+ * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
+ * operations, default (1) is to always disable this as
+ * that is what Alteon does on NT. I have not been able
+ * to measure any real performance differences with
+ * this on my systems. Set <val>=0 if you want to
+ * enable these operations.
+ *
+ * If you use more than one NIC, specify the parameters for the
+ * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
+ * run tracing on NIC #2 but not on NIC #1 and #3.
+ *
+ * TODO:
+ *
+ * - Proper multicast support.
+ * - NIC dump support.
+ * - More tuning parameters.
+ *
+ * The mini ring is not used under Linux and I am not sure it makes sense
+ * to actually use it.
+ *
+ * New interrupt handler strategy:
+ *
+ * The old interrupt handler worked using the traditional method of
+ * replacing an skbuff with a new one when a packet arrives. However
+ * the rx rings do not need to contain a static number of buffer
+ * descriptors, thus it makes sense to move the memory allocation out
+ * of the main interrupt handler and do it in a bottom half handler
+ * and only allocate new buffers when the number of buffers in the
+ * ring is below a certain threshold. In order to avoid starving the
+ * NIC under heavy load it is however necessary to force allocation
+ * when hitting a minimum threshold. The strategy for alloction is as
+ * follows:
+ *
+ * RX_LOW_BUF_THRES - allocate buffers in the bottom half
+ * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
+ * the buffers in the interrupt handler
+ * RX_RING_THRES - maximum number of buffers in the rx ring
+ * RX_MINI_THRES - maximum number of buffers in the mini ring
+ * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
+ *
+ * One advantagous side effect of this allocation approach is that the
+ * entire rx processing can be done without holding any spin lock
+ * since the rx rings and registers are totally independent of the tx
+ * ring and its registers. This of course includes the kmalloc's of
+ * new skb's. Thus start_xmit can run in parallel with rx processing
+ * and the memory allocation on SMP systems.
+ *
+ * Note that running the skb reallocation in a bottom half opens up
+ * another can of races which needs to be handled properly. In
+ * particular it can happen that the interrupt handler tries to run
+ * the reallocation while the bottom half is either running on another
+ * CPU or was interrupted on the same CPU. To get around this the
+ * driver uses bitops to prevent the reallocation routines from being
+ * reentered.
+ *
+ * TX handling can also be done without holding any spin lock, wheee
+ * this is fun! since tx_ret_csm is only written to by the interrupt
+ * handler. The case to be aware of is when shutting down the device
+ * and cleaning up where it is necessary to make sure that
+ * start_xmit() is not running while this is happening. Well DaveM
+ * informs me that this case is already protected against ... bye bye
+ * Mr. Spin Lock, it was nice to know you.
+ *
+ * TX interrupts are now partly disabled so the NIC will only generate
+ * TX interrupts for the number of coal ticks, not for the number of
+ * TX packets in the queue. This should reduce the number of TX only,
+ * ie. when no RX processing is done, interrupts seen.
+ */
+
+/*
+ * Threshold values for RX buffer allocation - the low water marks for
+ * when to start refilling the rings are set to 75% of the ring
+ * sizes. It seems to make sense to refill the rings entirely from the
+ * intrrupt handler once it gets below the panic threshold, that way
+ * we don't risk that the refilling is moved to another CPU when the
+ * one running the interrupt handler just got the slab code hot in its
+ * cache.
+ */
+#define RX_RING_SIZE 72
+#define RX_MINI_SIZE 64
+#define RX_JUMBO_SIZE 48
+
+#define RX_PANIC_STD_THRES 16
+#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
+#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
+#define RX_PANIC_MINI_THRES 12
+#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
+#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
+#define RX_PANIC_JUMBO_THRES 6
+#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
+#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
+
+
+/*
+ * Size of the mini ring entries, basically these just should be big
+ * enough to take TCP ACKs
+ */
+#define ACE_MINI_SIZE 100
+
+#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
+#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
+#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
+
+/*
+ * There seems to be a magic difference in the effect between 995 and 996
+ * but little difference between 900 and 995 ... no idea why.
+ *
+ * There is now a default set of tuning parameters which is set, depending
+ * on whether or not the user enables Jumbo frames. It's assumed that if
+ * Jumbo frames are enabled, the user wants optimal tuning for that case.
+ */
+#define DEF_TX_COAL 400 /* 996 */
+#define DEF_TX_MAX_DESC 60 /* was 40 */
+#define DEF_RX_COAL 120 /* 1000 */
+#define DEF_RX_MAX_DESC 25
+#define DEF_TX_RATIO 21 /* 24 */
+
+#define DEF_JUMBO_TX_COAL 20
+#define DEF_JUMBO_TX_MAX_DESC 60
+#define DEF_JUMBO_RX_COAL 30
+#define DEF_JUMBO_RX_MAX_DESC 6
+#define DEF_JUMBO_TX_RATIO 21
+
+#if tigon2FwReleaseLocal < 20001118
+/*
+ * Standard firmware and early modifications duplicate
+ * IRQ load without this flag (coal timer is never reset).
+ * Note that with this flag tx_coal should be less than
+ * time to xmit full tx ring.
+ * 400usec is not so bad for tx ring size of 128.
+ */
+#define TX_COAL_INTS_ONLY 1 /* worth it */
+#else
+/*
+ * With modified firmware, this is not necessary, but still useful.
+ */
+#define TX_COAL_INTS_ONLY 1
+#endif
+
+#define DEF_TRACE 0
+#define DEF_STAT (2 * TICKS_PER_SEC)
+
+
+static int link[ACE_MAX_MOD_PARMS];
+static int trace[ACE_MAX_MOD_PARMS];
+static int tx_coal_tick[ACE_MAX_MOD_PARMS];
+static int rx_coal_tick[ACE_MAX_MOD_PARMS];
+static int max_tx_desc[ACE_MAX_MOD_PARMS];
+static int max_rx_desc[ACE_MAX_MOD_PARMS];
+static int tx_ratio[ACE_MAX_MOD_PARMS];
+static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
+
+MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
+
+module_param_array(link, int, NULL, 0);
+module_param_array(trace, int, NULL, 0);
+module_param_array(tx_coal_tick, int, NULL, 0);
+module_param_array(max_tx_desc, int, NULL, 0);
+module_param_array(rx_coal_tick, int, NULL, 0);
+module_param_array(max_rx_desc, int, NULL, 0);
+module_param_array(tx_ratio, int, NULL, 0);
+MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
+MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
+MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
+MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
+MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
+MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
+MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
+
+
+static char version[] __devinitdata =
+ "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
+ " http://home.cern.ch/~jes/gige/acenic.html\n";
+
+static int ace_get_settings(struct net_device *, struct ethtool_cmd *);
+static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
+static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+
+static struct ethtool_ops ace_ethtool_ops = {
+ .get_settings = ace_get_settings,
+ .set_settings = ace_set_settings,
+ .get_drvinfo = ace_get_drvinfo,
+};
+
+static void ace_watchdog(struct net_device *dev);
+
+static int __devinit acenic_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct ace_private *ap;
+ static int boards_found;
+
+ dev = alloc_etherdev(sizeof(struct ace_private));
+ if (dev == NULL) {
+ printk(KERN_ERR "acenic: Unable to allocate "
+ "net_device structure!\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ap = dev->priv;
+ ap->pdev = pdev;
+ ap->name = pci_name(pdev);
+
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+#if ACENIC_DO_VLAN
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_rx_register = ace_vlan_rx_register;
+ dev->vlan_rx_kill_vid = ace_vlan_rx_kill_vid;
+#endif
+ if (1) {
+ dev->tx_timeout = &ace_watchdog;
+ dev->watchdog_timeo = 5*HZ;
+ }
+
+ dev->open = &ace_open;
+ dev->stop = &ace_close;
+ dev->hard_start_xmit = &ace_start_xmit;
+ dev->get_stats = &ace_get_stats;
+ dev->set_multicast_list = &ace_set_multicast_list;
+ SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
+ dev->set_mac_address = &ace_set_mac_addr;
+ dev->change_mtu = &ace_change_mtu;
+
+ /* we only display this string ONCE */
+ if (!boards_found)
+ printk(version);
+
+ if (pci_enable_device(pdev))
+ goto fail_free_netdev;
+
+ /*
+ * Enable master mode before we start playing with the
+ * pci_command word since pci_set_master() will modify
+ * it.
+ */
+ pci_set_master(pdev);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
+
+ /* OpenFirmware on Mac's does not set this - DOH.. */
+ if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
+ printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
+ "access - was not enabled by BIOS/Firmware\n",
+ ap->name);
+ ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
+ pci_write_config_word(ap->pdev, PCI_COMMAND,
+ ap->pci_command);
+ wmb();
+ }
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
+ if (ap->pci_latency <= 0x40) {
+ ap->pci_latency = 0x40;
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
+ }
+
+ /*
+ * Remap the regs into kernel space - this is abuse of
+ * dev->base_addr since it was means for I/O port
+ * addresses but who gives a damn.
+ */
+ dev->base_addr = pci_resource_start(pdev, 0);
+ ap->regs = ioremap(dev->base_addr, 0x4000);
+ if (!ap->regs) {
+ printk(KERN_ERR "%s: Unable to map I/O register, "
+ "AceNIC %i will be disabled.\n",
+ ap->name, boards_found);
+ goto fail_free_netdev;
+ }
+
+ switch(pdev->vendor) {
+ case PCI_VENDOR_ID_ALTEON:
+ if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
+ printk(KERN_INFO "%s: Farallon PN9100-T ",
+ ap->name);
+ } else {
+ printk(KERN_INFO "%s: Alteon AceNIC ",
+ ap->name);
+ }
+ break;
+ case PCI_VENDOR_ID_3COM:
+ printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
+ break;
+ case PCI_VENDOR_ID_NETGEAR:
+ printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
+ break;
+ case PCI_VENDOR_ID_DEC:
+ if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
+ printk(KERN_INFO "%s: Farallon PN9000-SX ",
+ ap->name);
+ break;
+ }
+ case PCI_VENDOR_ID_SGI:
+ printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
+ break;
+ default:
+ printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
+ break;
+ }
+
+ printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
+#ifdef __sparc__
+ printk("irq %s\n", __irq_itoa(pdev->irq));
+#else
+ printk("irq %i\n", pdev->irq);
+#endif
+
+#ifdef CONFIG_ACENIC_OMIT_TIGON_I
+ if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
+ printk(KERN_ERR "%s: Driver compiled without Tigon I"
+ " support - NIC disabled\n", dev->name);
+ goto fail_uninit;
+ }
+#endif
+
+ if (ace_allocate_descriptors(dev))
+ goto fail_free_netdev;
+
+#ifdef MODULE
+ if (boards_found >= ACE_MAX_MOD_PARMS)
+ ap->board_idx = BOARD_IDX_OVERFLOW;
+ else
+ ap->board_idx = boards_found;
+#else
+ ap->board_idx = BOARD_IDX_STATIC;
+#endif
+
+ if (ace_init(dev))
+ goto fail_free_netdev;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "acenic: device registration failed\n");
+ goto fail_uninit;
+ }
+ ap->name = dev->name;
+
+ if (ap->pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, dev);
+
+ boards_found++;
+ return 0;
+
+ fail_uninit:
+ ace_init_cleanup(dev);
+ fail_free_netdev:
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+static void __devexit acenic_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ short i;
+
+ unregister_netdev(dev);
+
+ writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
+ if (ap->version >= 2)
+ writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
+
+ /*
+ * This clears any pending interrupts
+ */
+ writel(1, &regs->Mb0Lo);
+ readl(&regs->CpuCtrl); /* flush */
+
+ /*
+ * Make sure no other CPUs are processing interrupts
+ * on the card before the buffers are being released.
+ * Otherwise one might experience some `interesting'
+ * effects.
+ *
+ * Then release the RX buffers - jumbo buffers were
+ * already released in ace_close().
+ */
+ ace_sync_irq(dev->irq);
+
+ for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
+ struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
+
+ if (skb) {
+ struct ring_info *ringp;
+ dma_addr_t mapping;
+
+ ringp = &ap->skb->rx_std_skbuff[i];
+ mapping = pci_unmap_addr(ringp, mapping);
+ pci_unmap_page(ap->pdev, mapping,
+ ACE_STD_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ ap->rx_std_ring[i].size = 0;
+ ap->skb->rx_std_skbuff[i].skb = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+
+ if (ap->version >= 2) {
+ for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
+ struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
+
+ if (skb) {
+ struct ring_info *ringp;
+ dma_addr_t mapping;
+
+ ringp = &ap->skb->rx_mini_skbuff[i];
+ mapping = pci_unmap_addr(ringp,mapping);
+ pci_unmap_page(ap->pdev, mapping,
+ ACE_MINI_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ ap->rx_mini_ring[i].size = 0;
+ ap->skb->rx_mini_skbuff[i].skb = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+ }
+
+ for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
+ struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
+ if (skb) {
+ struct ring_info *ringp;
+ dma_addr_t mapping;
+
+ ringp = &ap->skb->rx_jumbo_skbuff[i];
+ mapping = pci_unmap_addr(ringp, mapping);
+ pci_unmap_page(ap->pdev, mapping,
+ ACE_JUMBO_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ ap->rx_jumbo_ring[i].size = 0;
+ ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+
+ ace_init_cleanup(dev);
+ free_netdev(dev);
+}
+
+static struct pci_driver acenic_pci_driver = {
+ .name = "acenic",
+ .id_table = acenic_pci_tbl,
+ .probe = acenic_probe_one,
+ .remove = __devexit_p(acenic_remove_one),
+};
+
+static int __init acenic_init(void)
+{
+ return pci_module_init(&acenic_pci_driver);
+}
+
+static void __exit acenic_exit(void)
+{
+ pci_unregister_driver(&acenic_pci_driver);
+}
+
+module_init(acenic_init);
+module_exit(acenic_exit);
+
+static void ace_free_descriptors(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ int size;
+
+ if (ap->rx_std_ring != NULL) {
+ size = (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES +
+ RX_RETURN_RING_ENTRIES));
+ pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
+ ap->rx_ring_base_dma);
+ ap->rx_std_ring = NULL;
+ ap->rx_jumbo_ring = NULL;
+ ap->rx_mini_ring = NULL;
+ ap->rx_return_ring = NULL;
+ }
+ if (ap->evt_ring != NULL) {
+ size = (sizeof(struct event) * EVT_RING_ENTRIES);
+ pci_free_consistent(ap->pdev, size, ap->evt_ring,
+ ap->evt_ring_dma);
+ ap->evt_ring = NULL;
+ }
+ if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
+ size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
+ pci_free_consistent(ap->pdev, size, ap->tx_ring,
+ ap->tx_ring_dma);
+ }
+ ap->tx_ring = NULL;
+
+ if (ap->evt_prd != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->evt_prd, ap->evt_prd_dma);
+ ap->evt_prd = NULL;
+ }
+ if (ap->rx_ret_prd != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->rx_ret_prd,
+ ap->rx_ret_prd_dma);
+ ap->rx_ret_prd = NULL;
+ }
+ if (ap->tx_csm != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->tx_csm, ap->tx_csm_dma);
+ ap->tx_csm = NULL;
+ }
+}
+
+
+static int ace_allocate_descriptors(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ int size;
+
+ size = (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES +
+ RX_RETURN_RING_ENTRIES));
+
+ ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
+ &ap->rx_ring_base_dma);
+ if (ap->rx_std_ring == NULL)
+ goto fail;
+
+ ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
+ ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
+ ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
+
+ size = (sizeof(struct event) * EVT_RING_ENTRIES);
+
+ ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
+
+ if (ap->evt_ring == NULL)
+ goto fail;
+
+ /*
+ * Only allocate a host TX ring for the Tigon II, the Tigon I
+ * has to use PCI registers for this ;-(
+ */
+ if (!ACE_IS_TIGON_I(ap)) {
+ size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
+
+ ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
+ &ap->tx_ring_dma);
+
+ if (ap->tx_ring == NULL)
+ goto fail;
+ }
+
+ ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->evt_prd_dma);
+ if (ap->evt_prd == NULL)
+ goto fail;
+
+ ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->rx_ret_prd_dma);
+ if (ap->rx_ret_prd == NULL)
+ goto fail;
+
+ ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->tx_csm_dma);
+ if (ap->tx_csm == NULL)
+ goto fail;
+
+ return 0;
+
+fail:
+ /* Clean up. */
+ ace_init_cleanup(dev);
+ return 1;
+}
+
+
+/*
+ * Generic cleanup handling data allocated during init. Used when the
+ * module is unloaded or if an error occurs during initialization
+ */
+static void ace_init_cleanup(struct net_device *dev)
+{
+ struct ace_private *ap;
+
+ ap = netdev_priv(dev);
+
+ ace_free_descriptors(dev);
+
+ if (ap->info)
+ pci_free_consistent(ap->pdev, sizeof(struct ace_info),
+ ap->info, ap->info_dma);
+ if (ap->skb)
+ kfree(ap->skb);
+ if (ap->trace_buf)
+ kfree(ap->trace_buf);
+
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+
+ iounmap(ap->regs);
+}
+
+
+/*
+ * Commands are considered to be slow.
+ */
+static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
+{
+ u32 idx;
+
+ idx = readl(&regs->CmdPrd);
+
+ writel(*(u32 *)(cmd), &regs->CmdRng[idx]);
+ idx = (idx + 1) % CMD_RING_ENTRIES;
+
+ writel(idx, &regs->CmdPrd);
+}
+
+
+static int __devinit ace_init(struct net_device *dev)
+{
+ struct ace_private *ap;
+ struct ace_regs __iomem *regs;
+ struct ace_info *info = NULL;
+ struct pci_dev *pdev;
+ unsigned long myjif;
+ u64 tmp_ptr;
+ u32 tig_ver, mac1, mac2, tmp, pci_state;
+ int board_idx, ecode = 0;
+ short i;
+ unsigned char cache_size;
+
+ ap = netdev_priv(dev);
+ regs = ap->regs;
+
+ board_idx = ap->board_idx;
+
+ /*
+ * aman@sgi.com - its useful to do a NIC reset here to
+ * address the `Firmware not running' problem subsequent
+ * to any crashes involving the NIC
+ */
+ writel(HW_RESET | (HW_RESET << 24), &regs->HostCtrl);
+ readl(&regs->HostCtrl); /* PCI write posting */
+ udelay(5);
+
+ /*
+ * Don't access any other registers before this point!
+ */
+#ifdef __BIG_ENDIAN
+ /*
+ * This will most likely need BYTE_SWAP once we switch
+ * to using __raw_writel()
+ */
+ writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
+ &regs->HostCtrl);
+#else
+ writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
+ &regs->HostCtrl);
+#endif
+ readl(&regs->HostCtrl); /* PCI write posting */
+
+ /*
+ * Stop the NIC CPU and clear pending interrupts
+ */
+ writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
+ readl(&regs->CpuCtrl); /* PCI write posting */
+ writel(0, &regs->Mb0Lo);
+
+ tig_ver = readl(&regs->HostCtrl) >> 28;
+
+ switch(tig_ver){
+#ifndef CONFIG_ACENIC_OMIT_TIGON_I
+ case 4:
+ case 5:
+ printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
+ tig_ver, tigonFwReleaseMajor, tigonFwReleaseMinor,
+ tigonFwReleaseFix);
+ writel(0, &regs->LocalCtrl);
+ ap->version = 1;
+ ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
+ break;
+#endif
+ case 6:
+ printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
+ tig_ver, tigon2FwReleaseMajor, tigon2FwReleaseMinor,
+ tigon2FwReleaseFix);
+ writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
+ readl(&regs->CpuBCtrl); /* PCI write posting */
+ /*
+ * The SRAM bank size does _not_ indicate the amount
+ * of memory on the card, it controls the _bank_ size!
+ * Ie. a 1MB AceNIC will have two banks of 512KB.
+ */
+ writel(SRAM_BANK_512K, &regs->LocalCtrl);
+ writel(SYNC_SRAM_TIMING, &regs->MiscCfg);
+ ap->version = 2;
+ ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
+ break;
+ default:
+ printk(KERN_WARNING " Unsupported Tigon version detected "
+ "(%i)\n", tig_ver);
+ ecode = -ENODEV;
+ goto init_error;
+ }
+
+ /*
+ * ModeStat _must_ be set after the SRAM settings as this change
+ * seems to corrupt the ModeStat and possible other registers.
+ * The SRAM settings survive resets and setting it to the same
+ * value a second time works as well. This is what caused the
+ * `Firmware not running' problem on the Tigon II.
+ */
+#ifdef __BIG_ENDIAN
+ writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
+ ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
+#else
+ writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
+ ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
+#endif
+ readl(&regs->ModeStat); /* PCI write posting */
+
+ mac1 = 0;
+ for(i = 0; i < 4; i++) {
+ mac1 = mac1 << 8;
+ tmp = read_eeprom_byte(dev, 0x8c+i);
+ if (tmp < 0) {
+ ecode = -EIO;
+ goto init_error;
+ } else
+ mac1 |= (tmp & 0xff);
+ }
+ mac2 = 0;
+ for(i = 4; i < 8; i++) {
+ mac2 = mac2 << 8;
+ tmp = read_eeprom_byte(dev, 0x8c+i);
+ if (tmp < 0) {
+ ecode = -EIO;
+ goto init_error;
+ } else
+ mac2 |= (tmp & 0xff);
+ }
+
+ writel(mac1, &regs->MacAddrHi);
+ writel(mac2, &regs->MacAddrLo);
+
+ printk("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (mac1 >> 8) & 0xff, mac1 & 0xff, (mac2 >> 24) &0xff,
+ (mac2 >> 16) & 0xff, (mac2 >> 8) & 0xff, mac2 & 0xff);
+
+ dev->dev_addr[0] = (mac1 >> 8) & 0xff;
+ dev->dev_addr[1] = mac1 & 0xff;
+ dev->dev_addr[2] = (mac2 >> 24) & 0xff;
+ dev->dev_addr[3] = (mac2 >> 16) & 0xff;
+ dev->dev_addr[4] = (mac2 >> 8) & 0xff;
+ dev->dev_addr[5] = mac2 & 0xff;
+
+ /*
+ * Looks like this is necessary to deal with on all architectures,
+ * even this %$#%$# N440BX Intel based thing doesn't get it right.
+ * Ie. having two NICs in the machine, one will have the cache
+ * line set at boot time, the other will not.
+ */
+ pdev = ap->pdev;
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
+ cache_size <<= 2;
+ if (cache_size != SMP_CACHE_BYTES) {
+ printk(KERN_INFO " PCI cache line size set incorrectly "
+ "(%i bytes) by BIOS/FW, ", cache_size);
+ if (cache_size > SMP_CACHE_BYTES)
+ printk("expecting %i\n", SMP_CACHE_BYTES);
+ else {
+ printk("correcting to %i\n", SMP_CACHE_BYTES);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ SMP_CACHE_BYTES >> 2);
+ }
+ }
+
+ pci_state = readl(&regs->PciState);
+ printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
+ "latency: %i clks\n",
+ (pci_state & PCI_32BIT) ? 32 : 64,
+ (pci_state & PCI_66MHZ) ? 66 : 33,
+ ap->pci_latency);
+
+ /*
+ * Set the max DMA transfer size. Seems that for most systems
+ * the performance is better when no MAX parameter is
+ * set. However for systems enabling PCI write and invalidate,
+ * DMA writes must be set to the L1 cache line size to get
+ * optimal performance.
+ *
+ * The default is now to turn the PCI write and invalidate off
+ * - that is what Alteon does for NT.
+ */
+ tmp = READ_CMD_MEM | WRITE_CMD_MEM;
+ if (ap->version >= 2) {
+ tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
+ /*
+ * Tuning parameters only supported for 8 cards
+ */
+ if (board_idx == BOARD_IDX_OVERFLOW ||
+ dis_pci_mem_inval[board_idx]) {
+ if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
+ ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND,
+ ap->pci_command);
+ printk(KERN_INFO " Disabling PCI memory "
+ "write and invalidate\n");
+ }
+ } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
+ printk(KERN_INFO " PCI memory write & invalidate "
+ "enabled by BIOS, enabling counter measures\n");
+
+ switch(SMP_CACHE_BYTES) {
+ case 16:
+ tmp |= DMA_WRITE_MAX_16;
+ break;
+ case 32:
+ tmp |= DMA_WRITE_MAX_32;
+ break;
+ case 64:
+ tmp |= DMA_WRITE_MAX_64;
+ break;
+ case 128:
+ tmp |= DMA_WRITE_MAX_128;
+ break;
+ default:
+ printk(KERN_INFO " Cache line size %i not "
+ "supported, PCI write and invalidate "
+ "disabled\n", SMP_CACHE_BYTES);
+ ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND,
+ ap->pci_command);
+ }
+ }
+ }
+
+#ifdef __sparc__
+ /*
+ * On this platform, we know what the best dma settings
+ * are. We use 64-byte maximum bursts, because if we
+ * burst larger than the cache line size (or even cross
+ * a 64byte boundary in a single burst) the UltraSparc
+ * PCI controller will disconnect at 64-byte multiples.
+ *
+ * Read-multiple will be properly enabled above, and when
+ * set will give the PCI controller proper hints about
+ * prefetching.
+ */
+ tmp &= ~DMA_READ_WRITE_MASK;
+ tmp |= DMA_READ_MAX_64;
+ tmp |= DMA_WRITE_MAX_64;
+#endif
+#ifdef __alpha__
+ tmp &= ~DMA_READ_WRITE_MASK;
+ tmp |= DMA_READ_MAX_128;
+ /*
+ * All the docs say MUST NOT. Well, I did.
+ * Nothing terrible happens, if we load wrong size.
+ * Bit w&i still works better!
+ */
+ tmp |= DMA_WRITE_MAX_128;
+#endif
+ writel(tmp, &regs->PciState);
+
+#if 0
+ /*
+ * The Host PCI bus controller driver has to set FBB.
+ * If all devices on that PCI bus support FBB, then the controller
+ * can enable FBB support in the Host PCI Bus controller (or on
+ * the PCI-PCI bridge if that applies).
+ * -ggg
+ */
+ /*
+ * I have received reports from people having problems when this
+ * bit is enabled.
+ */
+ if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
+ printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
+ ap->pci_command |= PCI_COMMAND_FAST_BACK;
+ pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
+ }
+#endif
+
+ /*
+ * Configure DMA attributes.
+ */
+ if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ ap->pci_using_dac = 1;
+ } else if (!pci_set_dma_mask(pdev, 0xffffffffULL)) {
+ ap->pci_using_dac = 0;
+ } else {
+ ecode = -ENODEV;
+ goto init_error;
+ }
+
+ /*
+ * Initialize the generic info block and the command+event rings
+ * and the control blocks for the transmit and receive rings
+ * as they need to be setup once and for all.
+ */
+ if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
+ &ap->info_dma))) {
+ ecode = -EAGAIN;
+ goto init_error;
+ }
+ ap->info = info;
+
+ /*
+ * Get the memory for the skb rings.
+ */
+ if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
+ ecode = -EAGAIN;
+ goto init_error;
+ }
+
+ ecode = request_irq(pdev->irq, ace_interrupt, SA_SHIRQ,
+ DRV_NAME, dev);
+ if (ecode) {
+ printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+ DRV_NAME, pdev->irq);
+ goto init_error;
+ } else
+ dev->irq = pdev->irq;
+
+#ifdef INDEX_DEBUG
+ spin_lock_init(&ap->debug_lock);
+ ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
+ ap->last_std_rx = 0;
+ ap->last_mini_rx = 0;
+#endif
+
+ memset(ap->info, 0, sizeof(struct ace_info));
+ memset(ap->skb, 0, sizeof(struct ace_skb));
+
+ ace_load_firmware(dev);
+ ap->fw_running = 0;
+
+ tmp_ptr = ap->info_dma;
+ writel(tmp_ptr >> 32, &regs->InfoPtrHi);
+ writel(tmp_ptr & 0xffffffff, &regs->InfoPtrLo);
+
+ memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
+
+ set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
+ info->evt_ctrl.flags = 0;
+
+ *(ap->evt_prd) = 0;
+ wmb();
+ set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
+ writel(0, &regs->EvtCsm);
+
+ set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
+ info->cmd_ctrl.flags = 0;
+ info->cmd_ctrl.max_len = 0;
+
+ for (i = 0; i < CMD_RING_ENTRIES; i++)
+ writel(0, &regs->CmdRng[i]);
+
+ writel(0, &regs->CmdPrd);
+ writel(0, &regs->CmdCsm);
+
+ tmp_ptr = ap->info_dma;
+ tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
+ set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
+
+ set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
+ info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
+ info->rx_std_ctrl.flags =
+ RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
+
+ memset(ap->rx_std_ring, 0,
+ RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
+
+ for (i = 0; i < RX_STD_RING_ENTRIES; i++)
+ ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
+
+ ap->rx_std_skbprd = 0;
+ atomic_set(&ap->cur_rx_bufs, 0);
+
+ set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
+ info->rx_jumbo_ctrl.max_len = 0;
+ info->rx_jumbo_ctrl.flags =
+ RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
+
+ memset(ap->rx_jumbo_ring, 0,
+ RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
+
+ for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
+ ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
+
+ ap->rx_jumbo_skbprd = 0;
+ atomic_set(&ap->cur_jumbo_bufs, 0);
+
+ memset(ap->rx_mini_ring, 0,
+ RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
+
+ if (ap->version >= 2) {
+ set_aceaddr(&info->rx_mini_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES))));
+ info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
+ info->rx_mini_ctrl.flags =
+ RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|ACE_RCB_VLAN_FLAG;
+
+ for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
+ ap->rx_mini_ring[i].flags =
+ BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
+ } else {
+ set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
+ info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
+ info->rx_mini_ctrl.max_len = 0;
+ }
+
+ ap->rx_mini_skbprd = 0;
+ atomic_set(&ap->cur_mini_bufs, 0);
+
+ set_aceaddr(&info->rx_return_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES))));
+ info->rx_return_ctrl.flags = 0;
+ info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
+
+ memset(ap->rx_return_ring, 0,
+ RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
+
+ set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
+ *(ap->rx_ret_prd) = 0;
+
+ writel(TX_RING_BASE, &regs->WinBase);
+
+ if (ACE_IS_TIGON_I(ap)) {
+ ap->tx_ring = (struct tx_desc *) regs->Window;
+ for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
+ * sizeof(struct tx_desc)) / sizeof(u32); i++)
+ writel(0, (void __iomem *)ap->tx_ring + i * 4);
+
+ set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
+ } else {
+ memset(ap->tx_ring, 0,
+ MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
+
+ set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
+ }
+
+ info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
+ tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
+
+ /*
+ * The Tigon I does not like having the TX ring in host memory ;-(
+ */
+ if (!ACE_IS_TIGON_I(ap))
+ tmp |= RCB_FLG_TX_HOST_RING;
+#if TX_COAL_INTS_ONLY
+ tmp |= RCB_FLG_COAL_INT_ONLY;
+#endif
+ info->tx_ctrl.flags = tmp;
+
+ set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
+
+ /*
+ * Potential item for tuning parameter
+ */
+#if 0 /* NO */
+ writel(DMA_THRESH_16W, &regs->DmaReadCfg);
+ writel(DMA_THRESH_16W, &regs->DmaWriteCfg);
+#else
+ writel(DMA_THRESH_8W, &regs->DmaReadCfg);
+ writel(DMA_THRESH_8W, &regs->DmaWriteCfg);
+#endif
+
+ writel(0, &regs->MaskInt);
+ writel(1, &regs->IfIdx);
+#if 0
+ /*
+ * McKinley boxes do not like us fiddling with AssistState
+ * this early
+ */
+ writel(1, &regs->AssistState);
+#endif
+
+ writel(DEF_STAT, &regs->TuneStatTicks);
+ writel(DEF_TRACE, &regs->TuneTrace);
+
+ ace_set_rxtx_parms(dev, 0);
+
+ if (board_idx == BOARD_IDX_OVERFLOW) {
+ printk(KERN_WARNING "%s: more than %i NICs detected, "
+ "ignoring module parameters!\n",
+ ap->name, ACE_MAX_MOD_PARMS);
+ } else if (board_idx >= 0) {
+ if (tx_coal_tick[board_idx])
+ writel(tx_coal_tick[board_idx],
+ &regs->TuneTxCoalTicks);
+ if (max_tx_desc[board_idx])
+ writel(max_tx_desc[board_idx], &regs->TuneMaxTxDesc);
+
+ if (rx_coal_tick[board_idx])
+ writel(rx_coal_tick[board_idx],
+ &regs->TuneRxCoalTicks);
+ if (max_rx_desc[board_idx])
+ writel(max_rx_desc[board_idx], &regs->TuneMaxRxDesc);
+
+ if (trace[board_idx])
+ writel(trace[board_idx], &regs->TuneTrace);
+
+ if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
+ writel(tx_ratio[board_idx], &regs->TxBufRat);
+ }
+
+ /*
+ * Default link parameters
+ */
+ tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
+ LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
+ if(ap->version >= 2)
+ tmp |= LNK_TX_FLOW_CTL_Y;
+
+ /*
+ * Override link default parameters
+ */
+ if ((board_idx >= 0) && link[board_idx]) {
+ int option = link[board_idx];
+
+ tmp = LNK_ENABLE;
+
+ if (option & 0x01) {
+ printk(KERN_INFO "%s: Setting half duplex link\n",
+ ap->name);
+ tmp &= ~LNK_FULL_DUPLEX;
+ }
+ if (option & 0x02)
+ tmp &= ~LNK_NEGOTIATE;
+ if (option & 0x10)
+ tmp |= LNK_10MB;
+ if (option & 0x20)
+ tmp |= LNK_100MB;
+ if (option & 0x40)
+ tmp |= LNK_1000MB;
+ if ((option & 0x70) == 0) {
+ printk(KERN_WARNING "%s: No media speed specified, "
+ "forcing auto negotiation\n", ap->name);
+ tmp |= LNK_NEGOTIATE | LNK_1000MB |
+ LNK_100MB | LNK_10MB;
+ }
+ if ((option & 0x100) == 0)
+ tmp |= LNK_NEG_FCTL;
+ else
+ printk(KERN_INFO "%s: Disabling flow control "
+ "negotiation\n", ap->name);
+ if (option & 0x200)
+ tmp |= LNK_RX_FLOW_CTL_Y;
+ if ((option & 0x400) && (ap->version >= 2)) {
+ printk(KERN_INFO "%s: Enabling TX flow control\n",
+ ap->name);
+ tmp |= LNK_TX_FLOW_CTL_Y;
+ }
+ }
+
+ ap->link = tmp;
+ writel(tmp, &regs->TuneLink);
+ if (ap->version >= 2)
+ writel(tmp, &regs->TuneFastLink);
+
+ if (ACE_IS_TIGON_I(ap))
+ writel(tigonFwStartAddr, &regs->Pc);
+ if (ap->version == 2)
+ writel(tigon2FwStartAddr, &regs->Pc);
+
+ writel(0, &regs->Mb0Lo);
+
+ /*
+ * Set tx_csm before we start receiving interrupts, otherwise
+ * the interrupt handler might think it is supposed to process
+ * tx ints before we are up and running, which may cause a null
+ * pointer access in the int handler.
+ */
+ ap->cur_rx = 0;
+ ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
+
+ wmb();
+ ace_set_txprd(regs, ap, 0);
+ writel(0, &regs->RxRetCsm);
+
+ /*
+ * Zero the stats before starting the interface
+ */
+ memset(&ap->stats, 0, sizeof(ap->stats));
+
+ /*
+ * Enable DMA engine now.
+ * If we do this sooner, Mckinley box pukes.
+ * I assume it's because Tigon II DMA engine wants to check
+ * *something* even before the CPU is started.
+ */
+ writel(1, &regs->AssistState); /* enable DMA */
+
+ /*
+ * Start the NIC CPU
+ */
+ writel(readl(&regs->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), &regs->CpuCtrl);
+ readl(&regs->CpuCtrl);
+
+ /*
+ * Wait for the firmware to spin up - max 3 seconds.
+ */
+ myjif = jiffies + 3 * HZ;
+ while (time_before(jiffies, myjif) && !ap->fw_running)
+ cpu_relax();
+
+ if (!ap->fw_running) {
+ printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
+
+ ace_dump_trace(ap);
+ writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
+ readl(&regs->CpuCtrl);
+
+ /* aman@sgi.com - account for badly behaving firmware/NIC:
+ * - have observed that the NIC may continue to generate
+ * interrupts for some reason; attempt to stop it - halt
+ * second CPU for Tigon II cards, and also clear Mb0
+ * - if we're a module, we'll fail to load if this was
+ * the only GbE card in the system => if the kernel does
+ * see an interrupt from the NIC, code to handle it is
+ * gone and OOps! - so free_irq also
+ */
+ if (ap->version >= 2)
+ writel(readl(&regs->CpuBCtrl) | CPU_HALT,
+ &regs->CpuBCtrl);
+ writel(0, &regs->Mb0Lo);
+ readl(&regs->Mb0Lo);
+
+ ecode = -EBUSY;
+ goto init_error;
+ }
+
+ /*
+ * We load the ring here as there seem to be no way to tell the
+ * firmware to wipe the ring without re-initializing it.
+ */
+ if (!test_and_set_bit(0, &ap->std_refill_busy))
+ ace_load_std_rx_ring(ap, RX_RING_SIZE);
+ else
+ printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
+ ap->name);
+ if (ap->version >= 2) {
+ if (!test_and_set_bit(0, &ap->mini_refill_busy))
+ ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
+ else
+ printk(KERN_ERR "%s: Someone is busy refilling "
+ "the RX mini ring\n", ap->name);
+ }
+ return 0;
+
+ init_error:
+ ace_init_cleanup(dev);
+ return ecode;
+}
+
+
+static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ int board_idx = ap->board_idx;
+
+ if (board_idx >= 0) {
+ if (!jumbo) {
+ if (!tx_coal_tick[board_idx])
+ writel(DEF_TX_COAL, &regs->TuneTxCoalTicks);
+ if (!max_tx_desc[board_idx])
+ writel(DEF_TX_MAX_DESC, &regs->TuneMaxTxDesc);
+ if (!rx_coal_tick[board_idx])
+ writel(DEF_RX_COAL, &regs->TuneRxCoalTicks);
+ if (!max_rx_desc[board_idx])
+ writel(DEF_RX_MAX_DESC, &regs->TuneMaxRxDesc);
+ if (!tx_ratio[board_idx])
+ writel(DEF_TX_RATIO, &regs->TxBufRat);
+ } else {
+ if (!tx_coal_tick[board_idx])
+ writel(DEF_JUMBO_TX_COAL,
+ &regs->TuneTxCoalTicks);
+ if (!max_tx_desc[board_idx])
+ writel(DEF_JUMBO_TX_MAX_DESC,
+ &regs->TuneMaxTxDesc);
+ if (!rx_coal_tick[board_idx])
+ writel(DEF_JUMBO_RX_COAL,
+ &regs->TuneRxCoalTicks);
+ if (!max_rx_desc[board_idx])
+ writel(DEF_JUMBO_RX_MAX_DESC,
+ &regs->TuneMaxRxDesc);
+ if (!tx_ratio[board_idx])
+ writel(DEF_JUMBO_TX_RATIO, &regs->TxBufRat);
+ }
+ }
+}
+
+
+static void ace_watchdog(struct net_device *data)
+{
+ struct net_device *dev = data;
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ /*
+ * We haven't received a stats update event for more than 2.5
+ * seconds and there is data in the transmit queue, thus we
+ * asume the card is stuck.
+ */
+ if (*ap->tx_csm != ap->tx_ret_csm) {
+ printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
+ dev->name, (unsigned int)readl(&regs->HostCtrl));
+ /* This can happen due to ieee flow control. */
+ } else {
+ printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
+ dev->name);
+#if 0
+ netif_wake_queue(dev);
+#endif
+ }
+}
+
+
+static void ace_tasklet(unsigned long dev)
+{
+ struct ace_private *ap = netdev_priv((struct net_device *)dev);
+ int cur_size;
+
+ cur_size = atomic_read(&ap->cur_rx_bufs);
+ if ((cur_size < RX_LOW_STD_THRES) &&
+ !test_and_set_bit(0, &ap->std_refill_busy)) {
+#ifdef DEBUG
+ printk("refilling buffers (current %i)\n", cur_size);
+#endif
+ ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
+ }
+
+ if (ap->version >= 2) {
+ cur_size = atomic_read(&ap->cur_mini_bufs);
+ if ((cur_size < RX_LOW_MINI_THRES) &&
+ !test_and_set_bit(0, &ap->mini_refill_busy)) {
+#ifdef DEBUG
+ printk("refilling mini buffers (current %i)\n",
+ cur_size);
+#endif
+ ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ }
+ }
+
+ cur_size = atomic_read(&ap->cur_jumbo_bufs);
+ if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
+ !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
+#ifdef DEBUG
+ printk("refilling jumbo buffers (current %i)\n", cur_size);
+#endif
+ ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ }
+ ap->tasklet_pending = 0;
+}
+
+
+/*
+ * Copy the contents of the NIC's trace buffer to kernel memory.
+ */
+static void ace_dump_trace(struct ace_private *ap)
+{
+#if 0
+ if (!ap->trace_buf)
+ if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
+ return;
+#endif
+}
+
+
+/*
+ * Load the standard rx ring.
+ *
+ * Loading rings is safe without holding the spin lock since this is
+ * done only before the device is enabled, thus no interrupts are
+ * generated and by the interrupt handler/tasklet handler.
+ */
+static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
+{
+ struct ace_regs __iomem *regs = ap->regs;
+ short i, idx;
+
+
+ prefetchw(&ap->cur_rx_bufs);
+
+ idx = ap->rx_std_skbprd;
+
+ for (i = 0; i < nr_bufs; i++) {
+ struct sk_buff *skb;
+ struct rx_desc *rd;
+ dma_addr_t mapping;
+
+ skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ ACE_STD_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+ ap->skb->rx_std_skbuff[idx].skb = skb;
+ pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
+ mapping, mapping);
+
+ rd = &ap->rx_std_ring[idx];
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = ACE_STD_BUFSIZE;
+ rd->idx = idx;
+ idx = (idx + 1) % RX_STD_RING_ENTRIES;
+ }
+
+ if (!i)
+ goto error_out;
+
+ atomic_add(i, &ap->cur_rx_bufs);
+ ap->rx_std_skbprd = idx;
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct cmd cmd;
+ cmd.evt = C_SET_RX_PRD_IDX;
+ cmd.code = 0;
+ cmd.idx = ap->rx_std_skbprd;
+ ace_issue_cmd(regs, &cmd);
+ } else {
+ writel(idx, &regs->RxStdPrd);
+ wmb();
+ }
+
+ out:
+ clear_bit(0, &ap->std_refill_busy);
+ return;
+
+ error_out:
+ printk(KERN_INFO "Out of memory when allocating "
+ "standard receive buffers\n");
+ goto out;
+}
+
+
+static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
+{
+ struct ace_regs __iomem *regs = ap->regs;
+ short i, idx;
+
+ prefetchw(&ap->cur_mini_bufs);
+
+ idx = ap->rx_mini_skbprd;
+ for (i = 0; i < nr_bufs; i++) {
+ struct sk_buff *skb;
+ struct rx_desc *rd;
+ dma_addr_t mapping;
+
+ skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ ACE_MINI_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+ ap->skb->rx_mini_skbuff[idx].skb = skb;
+ pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
+ mapping, mapping);
+
+ rd = &ap->rx_mini_ring[idx];
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = ACE_MINI_BUFSIZE;
+ rd->idx = idx;
+ idx = (idx + 1) % RX_MINI_RING_ENTRIES;
+ }
+
+ if (!i)
+ goto error_out;
+
+ atomic_add(i, &ap->cur_mini_bufs);
+
+ ap->rx_mini_skbprd = idx;
+
+ writel(idx, &regs->RxMiniPrd);
+ wmb();
+
+ out:
+ clear_bit(0, &ap->mini_refill_busy);
+ return;
+ error_out:
+ printk(KERN_INFO "Out of memory when allocating "
+ "mini receive buffers\n");
+ goto out;
+}
+
+
+/*
+ * Load the jumbo rx ring, this may happen at any time if the MTU
+ * is changed to a value > 1500.
+ */
+static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
+{
+ struct ace_regs __iomem *regs = ap->regs;
+ short i, idx;
+
+ idx = ap->rx_jumbo_skbprd;
+
+ for (i = 0; i < nr_bufs; i++) {
+ struct sk_buff *skb;
+ struct rx_desc *rd;
+ dma_addr_t mapping;
+
+ skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ ACE_JUMBO_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+ ap->skb->rx_jumbo_skbuff[idx].skb = skb;
+ pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
+ mapping, mapping);
+
+ rd = &ap->rx_jumbo_ring[idx];
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = ACE_JUMBO_BUFSIZE;
+ rd->idx = idx;
+ idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
+ }
+
+ if (!i)
+ goto error_out;
+
+ atomic_add(i, &ap->cur_jumbo_bufs);
+ ap->rx_jumbo_skbprd = idx;
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct cmd cmd;
+ cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
+ cmd.code = 0;
+ cmd.idx = ap->rx_jumbo_skbprd;
+ ace_issue_cmd(regs, &cmd);
+ } else {
+ writel(idx, &regs->RxJumboPrd);
+ wmb();
+ }
+
+ out:
+ clear_bit(0, &ap->jumbo_refill_busy);
+ return;
+ error_out:
+ if (net_ratelimit())
+ printk(KERN_INFO "Out of memory when allocating "
+ "jumbo receive buffers\n");
+ goto out;
+}
+
+
+/*
+ * All events are considered to be slow (RX/TX ints do not generate
+ * events) and are handled here, outside the main interrupt handler,
+ * to reduce the size of the handler.
+ */
+static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
+{
+ struct ace_private *ap;
+
+ ap = netdev_priv(dev);
+
+ while (evtcsm != evtprd) {
+ switch (ap->evt_ring[evtcsm].evt) {
+ case E_FW_RUNNING:
+ printk(KERN_INFO "%s: Firmware up and running\n",
+ ap->name);
+ ap->fw_running = 1;
+ wmb();
+ break;
+ case E_STATS_UPDATED:
+ break;
+ case E_LNK_STATE:
+ {
+ u16 code = ap->evt_ring[evtcsm].code;
+ switch (code) {
+ case E_C_LINK_UP:
+ {
+ u32 state = readl(&ap->regs->GigLnkState);
+ printk(KERN_WARNING "%s: Optical link UP "
+ "(%s Duplex, Flow Control: %s%s)\n",
+ ap->name,
+ state & LNK_FULL_DUPLEX ? "Full":"Half",
+ state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
+ state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
+ break;
+ }
+ case E_C_LINK_DOWN:
+ printk(KERN_WARNING "%s: Optical link DOWN\n",
+ ap->name);
+ break;
+ case E_C_LINK_10_100:
+ printk(KERN_WARNING "%s: 10/100BaseT link "
+ "UP\n", ap->name);
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown optical link "
+ "state %02x\n", ap->name, code);
+ }
+ break;
+ }
+ case E_ERROR:
+ switch(ap->evt_ring[evtcsm].code) {
+ case E_C_ERR_INVAL_CMD:
+ printk(KERN_ERR "%s: invalid command error\n",
+ ap->name);
+ break;
+ case E_C_ERR_UNIMP_CMD:
+ printk(KERN_ERR "%s: unimplemented command "
+ "error\n", ap->name);
+ break;
+ case E_C_ERR_BAD_CFG:
+ printk(KERN_ERR "%s: bad config error\n",
+ ap->name);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown error %02x\n",
+ ap->name, ap->evt_ring[evtcsm].code);
+ }
+ break;
+ case E_RESET_JUMBO_RNG:
+ {
+ int i;
+ for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
+ if (ap->skb->rx_jumbo_skbuff[i].skb) {
+ ap->rx_jumbo_ring[i].size = 0;
+ set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
+ dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
+ ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+ }
+ }
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct cmd cmd;
+ cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(ap->regs, &cmd);
+ } else {
+ writel(0, &((ap->regs)->RxJumboPrd));
+ wmb();
+ }
+
+ ap->jumbo = 0;
+ ap->rx_jumbo_skbprd = 0;
+ printk(KERN_INFO "%s: Jumbo ring flushed\n",
+ ap->name);
+ clear_bit(0, &ap->jumbo_refill_busy);
+ break;
+ }
+ default:
+ printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
+ ap->name, ap->evt_ring[evtcsm].evt);
+ }
+ evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
+ }
+
+ return evtcsm;
+}
+
+
+static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ u32 idx;
+ int mini_count = 0, std_count = 0;
+
+ idx = rxretcsm;
+
+ prefetchw(&ap->cur_rx_bufs);
+ prefetchw(&ap->cur_mini_bufs);
+
+ while (idx != rxretprd) {
+ struct ring_info *rip;
+ struct sk_buff *skb;
+ struct rx_desc *rxdesc, *retdesc;
+ u32 skbidx;
+ int bd_flags, desc_type, mapsize;
+ u16 csum;
+
+
+ /* make sure the rx descriptor isn't read before rxretprd */
+ if (idx == rxretcsm)
+ rmb();
+
+ retdesc = &ap->rx_return_ring[idx];
+ skbidx = retdesc->idx;
+ bd_flags = retdesc->flags;
+ desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
+
+ switch(desc_type) {
+ /*
+ * Normal frames do not have any flags set
+ *
+ * Mini and normal frames arrive frequently,
+ * so use a local counter to avoid doing
+ * atomic operations for each packet arriving.
+ */
+ case 0:
+ rip = &ap->skb->rx_std_skbuff[skbidx];
+ mapsize = ACE_STD_BUFSIZE;
+ rxdesc = &ap->rx_std_ring[skbidx];
+ std_count++;
+ break;
+ case BD_FLG_JUMBO:
+ rip = &ap->skb->rx_jumbo_skbuff[skbidx];
+ mapsize = ACE_JUMBO_BUFSIZE;
+ rxdesc = &ap->rx_jumbo_ring[skbidx];
+ atomic_dec(&ap->cur_jumbo_bufs);
+ break;
+ case BD_FLG_MINI:
+ rip = &ap->skb->rx_mini_skbuff[skbidx];
+ mapsize = ACE_MINI_BUFSIZE;
+ rxdesc = &ap->rx_mini_ring[skbidx];
+ mini_count++;
+ break;
+ default:
+ printk(KERN_INFO "%s: unknown frame type (0x%02x) "
+ "returned by NIC\n", dev->name,
+ retdesc->flags);
+ goto error;
+ }
+
+ skb = rip->skb;
+ rip->skb = NULL;
+ pci_unmap_page(ap->pdev,
+ pci_unmap_addr(rip, mapping),
+ mapsize,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, retdesc->size);
+
+ /*
+ * Fly baby, fly!
+ */
+ csum = retdesc->tcp_udp_csum;
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /*
+ * Instead of forcing the poor tigon mips cpu to calculate
+ * pseudo hdr checksum, we do this ourselves.
+ */
+ if (bd_flags & BD_FLG_TCP_UDP_SUM) {
+ skb->csum = htons(csum);
+ skb->ip_summed = CHECKSUM_HW;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ /* send it up */
+#if ACENIC_DO_VLAN
+ if (ap->vlgrp && (bd_flags & BD_FLG_VLAN_TAG)) {
+ vlan_hwaccel_rx(skb, ap->vlgrp, retdesc->vlan);
+ } else
+#endif
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ ap->stats.rx_packets++;
+ ap->stats.rx_bytes += retdesc->size;
+
+ idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
+ }
+
+ atomic_sub(std_count, &ap->cur_rx_bufs);
+ if (!ACE_IS_TIGON_I(ap))
+ atomic_sub(mini_count, &ap->cur_mini_bufs);
+
+ out:
+ /*
+ * According to the documentation RxRetCsm is obsolete with
+ * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
+ */
+ if (ACE_IS_TIGON_I(ap)) {
+ writel(idx, &ap->regs->RxRetCsm);
+ }
+ ap->cur_rx = idx;
+
+ return;
+ error:
+ idx = rxretprd;
+ goto out;
+}
+
+
+static inline void ace_tx_int(struct net_device *dev,
+ u32 txcsm, u32 idx)
+{
+ struct ace_private *ap = netdev_priv(dev);
+
+ do {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ struct tx_ring_info *info;
+
+ info = ap->skb->tx_skbuff + idx;
+ skb = info->skb;
+ mapping = pci_unmap_addr(info, mapping);
+
+ if (mapping) {
+ pci_unmap_page(ap->pdev, mapping,
+ pci_unmap_len(info, maplen),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(info, mapping, 0);
+ }
+
+ if (skb) {
+ ap->stats.tx_packets++;
+ ap->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ info->skb = NULL;
+ }
+
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+ } while (idx != txcsm);
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ wmb();
+ ap->tx_ret_csm = txcsm;
+
+ /* So... tx_ret_csm is advanced _after_ check for device wakeup.
+ *
+ * We could try to make it before. In this case we would get
+ * the following race condition: hard_start_xmit on other cpu
+ * enters after we advanced tx_ret_csm and fills space,
+ * which we have just freed, so that we make illegal device wakeup.
+ * There is no good way to workaround this (at entry
+ * to ace_start_xmit detects this condition and prevents
+ * ring corruption, but it is not a good workaround.)
+ *
+ * When tx_ret_csm is advanced after, we wake up device _only_
+ * if we really have some space in ring (though the core doing
+ * hard_start_xmit can see full ring for some period and has to
+ * synchronize.) Superb.
+ * BUT! We get another subtle race condition. hard_start_xmit
+ * may think that ring is full between wakeup and advancing
+ * tx_ret_csm and will stop device instantly! It is not so bad.
+ * We are guaranteed that there is something in ring, so that
+ * the next irq will resume transmission. To speedup this we could
+ * mark descriptor, which closes ring with BD_FLG_COAL_NOW
+ * (see ace_start_xmit).
+ *
+ * Well, this dilemma exists in all lock-free devices.
+ * We, following scheme used in drivers by Donald Becker,
+ * select the least dangerous.
+ * --ANK
+ */
+}
+
+
+static irqreturn_t ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ u32 idx;
+ u32 txcsm, rxretcsm, rxretprd;
+ u32 evtcsm, evtprd;
+
+ /*
+ * In case of PCI shared interrupts or spurious interrupts,
+ * we want to make sure it is actually our interrupt before
+ * spending any time in here.
+ */
+ if (!(readl(&regs->HostCtrl) & IN_INT))
+ return IRQ_NONE;
+
+ /*
+ * ACK intr now. Otherwise we will lose updates to rx_ret_prd,
+ * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
+ * writel(0, &regs->Mb0Lo).
+ *
+ * "IRQ avoidance" recommended in docs applies to IRQs served
+ * threads and it is wrong even for that case.
+ */
+ writel(0, &regs->Mb0Lo);
+ readl(&regs->Mb0Lo);
+
+ /*
+ * There is no conflict between transmit handling in
+ * start_xmit and receive processing, thus there is no reason
+ * to take a spin lock for RX handling. Wait until we start
+ * working on the other stuff - hey we don't need a spin lock
+ * anymore.
+ */
+ rxretprd = *ap->rx_ret_prd;
+ rxretcsm = ap->cur_rx;
+
+ if (rxretprd != rxretcsm)
+ ace_rx_int(dev, rxretprd, rxretcsm);
+
+ txcsm = *ap->tx_csm;
+ idx = ap->tx_ret_csm;
+
+ if (txcsm != idx) {
+ /*
+ * If each skb takes only one descriptor this check degenerates
+ * to identity, because new space has just been opened.
+ * But if skbs are fragmented we must check that this index
+ * update releases enough of space, otherwise we just
+ * wait for device to make more work.
+ */
+ if (!tx_ring_full(ap, txcsm, ap->tx_prd))
+ ace_tx_int(dev, txcsm, idx);
+ }
+
+ evtcsm = readl(&regs->EvtCsm);
+ evtprd = *ap->evt_prd;
+
+ if (evtcsm != evtprd) {
+ evtcsm = ace_handle_event(dev, evtcsm, evtprd);
+ writel(evtcsm, &regs->EvtCsm);
+ }
+
+ /*
+ * This has to go last in the interrupt handler and run with
+ * the spin lock released ... what lock?
+ */
+ if (netif_running(dev)) {
+ int cur_size;
+ int run_tasklet = 0;
+
+ cur_size = atomic_read(&ap->cur_rx_bufs);
+ if (cur_size < RX_LOW_STD_THRES) {
+ if ((cur_size < RX_PANIC_STD_THRES) &&
+ !test_and_set_bit(0, &ap->std_refill_busy)) {
+#ifdef DEBUG
+ printk("low on std buffers %i\n", cur_size);
+#endif
+ ace_load_std_rx_ring(ap,
+ RX_RING_SIZE - cur_size);
+ } else
+ run_tasklet = 1;
+ }
+
+ if (!ACE_IS_TIGON_I(ap)) {
+ cur_size = atomic_read(&ap->cur_mini_bufs);
+ if (cur_size < RX_LOW_MINI_THRES) {
+ if ((cur_size < RX_PANIC_MINI_THRES) &&
+ !test_and_set_bit(0,
+ &ap->mini_refill_busy)) {
+#ifdef DEBUG
+ printk("low on mini buffers %i\n",
+ cur_size);
+#endif
+ ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ } else
+ run_tasklet = 1;
+ }
+ }
+
+ if (ap->jumbo) {
+ cur_size = atomic_read(&ap->cur_jumbo_bufs);
+ if (cur_size < RX_LOW_JUMBO_THRES) {
+ if ((cur_size < RX_PANIC_JUMBO_THRES) &&
+ !test_and_set_bit(0,
+ &ap->jumbo_refill_busy)){
+#ifdef DEBUG
+ printk("low on jumbo buffers %i\n",
+ cur_size);
+#endif
+ ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ } else
+ run_tasklet = 1;
+ }
+ }
+ if (run_tasklet && !ap->tasklet_pending) {
+ ap->tasklet_pending = 1;
+ tasklet_schedule(&ap->ace_tasklet);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+#if ACENIC_DO_VLAN
+static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ace_mask_irq(dev);
+
+ ap->vlgrp = grp;
+
+ ace_unmask_irq(dev);
+ local_irq_restore(flags);
+}
+
+
+static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ace_mask_irq(dev);
+
+ if (ap->vlgrp)
+ ap->vlgrp->vlan_devices[vid] = NULL;
+
+ ace_unmask_irq(dev);
+ local_irq_restore(flags);
+}
+#endif /* ACENIC_DO_VLAN */
+
+
+static int ace_open(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct cmd cmd;
+
+ if (!(ap->fw_running)) {
+ printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
+ return -EBUSY;
+ }
+
+ writel(dev->mtu + ETH_HLEN + 4, &regs->IfMtu);
+
+ cmd.evt = C_CLEAR_STATS;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ cmd.evt = C_HOST_STATE;
+ cmd.code = C_C_STACK_UP;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ if (ap->jumbo &&
+ !test_and_set_bit(0, &ap->jumbo_refill_busy))
+ ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+
+ if (dev->flags & IFF_PROMISC) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ ap->promisc = 1;
+ }else
+ ap->promisc = 0;
+ ap->mcast_all = 0;
+
+#if 0
+ cmd.evt = C_LNK_NEGOTIATION;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+#endif
+
+ netif_start_queue(dev);
+
+ /*
+ * Setup the bottom half rx ring refill handler
+ */
+ tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
+ return 0;
+}
+
+
+static int ace_close(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct cmd cmd;
+ unsigned long flags;
+ short i;
+
+ /*
+ * Without (or before) releasing irq and stopping hardware, this
+ * is an absolute non-sense, by the way. It will be reset instantly
+ * by the first irq.
+ */
+ netif_stop_queue(dev);
+
+
+ if (ap->promisc) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->promisc = 0;
+ }
+
+ cmd.evt = C_HOST_STATE;
+ cmd.code = C_C_STACK_DOWN;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ tasklet_kill(&ap->ace_tasklet);
+
+ /*
+ * Make sure one CPU is not processing packets while
+ * buffers are being released by another.
+ */
+
+ local_irq_save(flags);
+ ace_mask_irq(dev);
+
+ for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ struct tx_ring_info *info;
+
+ info = ap->skb->tx_skbuff + i;
+ skb = info->skb;
+ mapping = pci_unmap_addr(info, mapping);
+
+ if (mapping) {
+ if (ACE_IS_TIGON_I(ap)) {
+ struct tx_desc __iomem *tx
+ = (struct tx_desc __iomem *) &ap->tx_ring[i];
+ writel(0, &tx->addr.addrhi);
+ writel(0, &tx->addr.addrlo);
+ writel(0, &tx->flagsize);
+ } else
+ memset(ap->tx_ring + i, 0,
+ sizeof(struct tx_desc));
+ pci_unmap_page(ap->pdev, mapping,
+ pci_unmap_len(info, maplen),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(info, mapping, 0);
+ }
+ if (skb) {
+ dev_kfree_skb(skb);
+ info->skb = NULL;
+ }
+ }
+
+ if (ap->jumbo) {
+ cmd.evt = C_RESET_JUMBO_RNG;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+
+ ace_unmask_irq(dev);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+static inline dma_addr_t
+ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
+ struct sk_buff *tail, u32 idx)
+{
+ dma_addr_t mapping;
+ struct tx_ring_info *info;
+
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ skb->len, PCI_DMA_TODEVICE);
+
+ info = ap->skb->tx_skbuff + idx;
+ info->skb = tail;
+ pci_unmap_addr_set(info, mapping, mapping);
+ pci_unmap_len_set(info, maplen, skb->len);
+ return mapping;
+}
+
+
+static inline void
+ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
+ u32 flagsize, u32 vlan_tag)
+{
+#if !USE_TX_COAL_NOW
+ flagsize &= ~BD_FLG_COAL_NOW;
+#endif
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct tx_desc __iomem *io = (struct tx_desc __iomem *) desc;
+ writel(addr >> 32, &io->addr.addrhi);
+ writel(addr & 0xffffffff, &io->addr.addrlo);
+ writel(flagsize, &io->flagsize);
+#if ACENIC_DO_VLAN
+ writel(vlan_tag, &io->vlanres);
+#endif
+ } else {
+ desc->addr.addrhi = addr >> 32;
+ desc->addr.addrlo = addr;
+ desc->flagsize = flagsize;
+#if ACENIC_DO_VLAN
+ desc->vlanres = vlan_tag;
+#endif
+ }
+}
+
+
+static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct tx_desc *desc;
+ u32 idx, flagsize;
+ unsigned long maxjiff = jiffies + 3*HZ;
+
+restart:
+ idx = ap->tx_prd;
+
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
+ goto overflow;
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ dma_addr_t mapping;
+ u32 vlan_tag = 0;
+
+ mapping = ace_map_tx_skb(ap, skb, skb, idx);
+ flagsize = (skb->len << 16) | (BD_FLG_END);
+ if (skb->ip_summed == CHECKSUM_HW)
+ flagsize |= BD_FLG_TCP_UDP_SUM;
+#if ACENIC_DO_VLAN
+ if (vlan_tx_tag_present(skb)) {
+ flagsize |= BD_FLG_VLAN_TAG;
+ vlan_tag = vlan_tx_tag_get(skb);
+ }
+#endif
+ desc = ap->tx_ring + idx;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+
+ /* Look at ace_tx_int for explanations. */
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
+ flagsize |= BD_FLG_COAL_NOW;
+
+ ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
+ } else {
+ dma_addr_t mapping;
+ u32 vlan_tag = 0;
+ int i, len = 0;
+
+ mapping = ace_map_tx_skb(ap, skb, NULL, idx);
+ flagsize = (skb_headlen(skb) << 16);
+ if (skb->ip_summed == CHECKSUM_HW)
+ flagsize |= BD_FLG_TCP_UDP_SUM;
+#if ACENIC_DO_VLAN
+ if (vlan_tx_tag_present(skb)) {
+ flagsize |= BD_FLG_VLAN_TAG;
+ vlan_tag = vlan_tx_tag_get(skb);
+ }
+#endif
+
+ ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
+
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct tx_ring_info *info;
+
+ len += frag->size;
+ info = ap->skb->tx_skbuff + idx;
+ desc = ap->tx_ring + idx;
+
+ mapping = pci_map_page(ap->pdev, frag->page,
+ frag->page_offset, frag->size,
+ PCI_DMA_TODEVICE);
+
+ flagsize = (frag->size << 16);
+ if (skb->ip_summed == CHECKSUM_HW)
+ flagsize |= BD_FLG_TCP_UDP_SUM;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+
+ if (i == skb_shinfo(skb)->nr_frags - 1) {
+ flagsize |= BD_FLG_END;
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
+ flagsize |= BD_FLG_COAL_NOW;
+
+ /*
+ * Only the last fragment frees
+ * the skb!
+ */
+ info->skb = skb;
+ } else {
+ info->skb = NULL;
+ }
+ pci_unmap_addr_set(info, mapping, mapping);
+ pci_unmap_len_set(info, maplen, frag->size);
+ ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
+ }
+ }
+
+ wmb();
+ ap->tx_prd = idx;
+ ace_set_txprd(regs, ap, idx);
+
+ if (flagsize & BD_FLG_COAL_NOW) {
+ netif_stop_queue(dev);
+
+ /*
+ * A TX-descriptor producer (an IRQ) might have gotten
+ * inbetween, making the ring free again. Since xmit is
+ * serialized, this is the only situation we have to
+ * re-test.
+ */
+ if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
+ netif_wake_queue(dev);
+ }
+
+ dev->trans_start = jiffies;
+ return NETDEV_TX_OK;
+
+overflow:
+ /*
+ * This race condition is unavoidable with lock-free drivers.
+ * We wake up the queue _before_ tx_prd is advanced, so that we can
+ * enter hard_start_xmit too early, while tx ring still looks closed.
+ * This happens ~1-4 times per 100000 packets, so that we can allow
+ * to loop syncing to other CPU. Probably, we need an additional
+ * wmb() in ace_tx_intr as well.
+ *
+ * Note that this race is relieved by reserving one more entry
+ * in tx ring than it is necessary (see original non-SG driver).
+ * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
+ * is already overkill.
+ *
+ * Alternative is to return with 1 not throttling queue. In this
+ * case loop becomes longer, no more useful effects.
+ */
+ if (time_before(jiffies, maxjiff)) {
+ barrier();
+ cpu_relax();
+ goto restart;
+ }
+
+ /* The ring is stuck full. */
+ printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
+ return NETDEV_TX_BUSY;
+}
+
+
+static int ace_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ if (new_mtu > ACE_JUMBO_MTU)
+ return -EINVAL;
+
+ writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
+ dev->mtu = new_mtu;
+
+ if (new_mtu > ACE_STD_MTU) {
+ if (!(ap->jumbo)) {
+ printk(KERN_INFO "%s: Enabling Jumbo frame "
+ "support\n", dev->name);
+ ap->jumbo = 1;
+ if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
+ ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+ ace_set_rxtx_parms(dev, 1);
+ }
+ } else {
+ while (test_and_set_bit(0, &ap->jumbo_refill_busy));
+ ace_sync_irq(dev->irq);
+ ace_set_rxtx_parms(dev, 0);
+ if (ap->jumbo) {
+ struct cmd cmd;
+
+ cmd.evt = C_RESET_JUMBO_RNG;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+ }
+
+ return 0;
+}
+
+static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ u32 link;
+
+ memset(ecmd, 0, sizeof(struct ethtool_cmd));
+ ecmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ link = readl(&regs->GigLnkState);
+ if (link & LNK_1000MB)
+ ecmd->speed = SPEED_1000;
+ else {
+ link = readl(&regs->FastLnkState);
+ if (link & LNK_100MB)
+ ecmd->speed = SPEED_100;
+ else if (link & LNK_10MB)
+ ecmd->speed = SPEED_10;
+ else
+ ecmd->speed = 0;
+ }
+ if (link & LNK_FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+
+ if (link & LNK_NEGOTIATE)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+#if 0
+ /*
+ * Current struct ethtool_cmd is insufficient
+ */
+ ecmd->trace = readl(&regs->TuneTrace);
+
+ ecmd->txcoal = readl(&regs->TuneTxCoalTicks);
+ ecmd->rxcoal = readl(&regs->TuneRxCoalTicks);
+#endif
+ ecmd->maxtxpkt = readl(&regs->TuneMaxTxDesc);
+ ecmd->maxrxpkt = readl(&regs->TuneMaxRxDesc);
+
+ return 0;
+}
+
+static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ u32 link, speed;
+
+ link = readl(&regs->GigLnkState);
+ if (link & LNK_1000MB)
+ speed = SPEED_1000;
+ else {
+ link = readl(&regs->FastLnkState);
+ if (link & LNK_100MB)
+ speed = SPEED_100;
+ else if (link & LNK_10MB)
+ speed = SPEED_10;
+ else
+ speed = SPEED_100;
+ }
+
+ link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
+ LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
+ if (!ACE_IS_TIGON_I(ap))
+ link |= LNK_TX_FLOW_CTL_Y;
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ link |= LNK_NEGOTIATE;
+ if (ecmd->speed != speed) {
+ link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
+ switch (speed) {
+ case SPEED_1000:
+ link |= LNK_1000MB;
+ break;
+ case SPEED_100:
+ link |= LNK_100MB;
+ break;
+ case SPEED_10:
+ link |= LNK_10MB;
+ break;
+ }
+ }
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ link |= LNK_FULL_DUPLEX;
+
+ if (link != ap->link) {
+ struct cmd cmd;
+ printk(KERN_INFO "%s: Renegotiating link state\n",
+ dev->name);
+
+ ap->link = link;
+ writel(link, &regs->TuneLink);
+ if (!ACE_IS_TIGON_I(ap))
+ writel(link, &regs->TuneFastLink);
+ wmb();
+
+ cmd.evt = C_LNK_NEGOTIATION;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+ return 0;
+}
+
+static void ace_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ace_private *ap = netdev_priv(dev);
+
+ strlcpy(info->driver, "acenic", sizeof(info->driver));
+ snprintf(info->version, sizeof(info->version), "%i.%i.%i",
+ tigonFwReleaseMajor, tigonFwReleaseMinor,
+ tigonFwReleaseFix);
+
+ if (ap->pdev)
+ strlcpy(info->bus_info, pci_name(ap->pdev),
+ sizeof(info->bus_info));
+
+}
+
+/*
+ * Set the hardware MAC address.
+ */
+static int ace_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct sockaddr *addr=p;
+ u8 *da;
+ struct cmd cmd;
+
+ if(netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+
+ da = (u8 *)dev->dev_addr;
+
+ writel(da[0] << 8 | da[1], &regs->MacAddrHi);
+ writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
+ &regs->MacAddrLo);
+
+ cmd.evt = C_SET_MAC_ADDR;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ return 0;
+}
+
+
+static void ace_set_multicast_list(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct cmd cmd;
+
+ if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->mcast_all = 1;
+ } else if (ap->mcast_all) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->mcast_all = 0;
+ }
+
+ if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->promisc = 1;
+ }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->promisc = 0;
+ }
+
+ /*
+ * For the time being multicast relies on the upper layers
+ * filtering it properly. The Firmware does not allow one to
+ * set the entire multicast list at a time and keeping track of
+ * it here is going to be messy.
+ */
+ if ((dev->mc_count) && !(ap->mcast_all)) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }else if (!ap->mcast_all) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+}
+
+
+static struct net_device_stats *ace_get_stats(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_mac_stats __iomem *mac_stats =
+ (struct ace_mac_stats __iomem *)ap->regs->Stats;
+
+ ap->stats.rx_missed_errors = readl(&mac_stats->drop_space);
+ ap->stats.multicast = readl(&mac_stats->kept_mc);
+ ap->stats.collisions = readl(&mac_stats->coll);
+
+ return &ap->stats;
+}
+
+
+static void __devinit ace_copy(struct ace_regs __iomem *regs, void *src,
+ u32 dest, int size)
+{
+ void __iomem *tdest;
+ u32 *wsrc;
+ short tsize, i;
+
+ if (size <= 0)
+ return;
+
+ while (size > 0) {
+ tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
+ min_t(u32, size, ACE_WINDOW_SIZE));
+ tdest = (void __iomem *) &regs->Window +
+ (dest & (ACE_WINDOW_SIZE - 1));
+ writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
+ /*
+ * This requires byte swapping on big endian, however
+ * writel does that for us
+ */
+ wsrc = src;
+ for (i = 0; i < (tsize / 4); i++) {
+ writel(wsrc[i], tdest + i*4);
+ }
+ dest += tsize;
+ src += tsize;
+ size -= tsize;
+ }
+
+ return;
+}
+
+
+static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
+{
+ void __iomem *tdest;
+ short tsize = 0, i;
+
+ if (size <= 0)
+ return;
+
+ while (size > 0) {
+ tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
+ min_t(u32, size, ACE_WINDOW_SIZE));
+ tdest = (void __iomem *) &regs->Window +
+ (dest & (ACE_WINDOW_SIZE - 1));
+ writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
+
+ for (i = 0; i < (tsize / 4); i++) {
+ writel(0, tdest + i*4);
+ }
+
+ dest += tsize;
+ size -= tsize;
+ }
+
+ return;
+}
+
+
+/*
+ * Download the firmware into the SRAM on the NIC
+ *
+ * This operation requires the NIC to be halted and is performed with
+ * interrupts disabled and with the spinlock hold.
+ */
+int __devinit ace_load_firmware(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ if (!(readl(&regs->CpuCtrl) & CPU_HALTED)) {
+ printk(KERN_ERR "%s: trying to download firmware while the "
+ "CPU is running!\n", ap->name);
+ return -EFAULT;
+ }
+
+ /*
+ * Do not try to clear more than 512KB or we end up seeing
+ * funny things on NICs with only 512KB SRAM
+ */
+ ace_clear(regs, 0x2000, 0x80000-0x2000);
+ if (ACE_IS_TIGON_I(ap)) {
+ ace_copy(regs, tigonFwText, tigonFwTextAddr, tigonFwTextLen);
+ ace_copy(regs, tigonFwData, tigonFwDataAddr, tigonFwDataLen);
+ ace_copy(regs, tigonFwRodata, tigonFwRodataAddr,
+ tigonFwRodataLen);
+ ace_clear(regs, tigonFwBssAddr, tigonFwBssLen);
+ ace_clear(regs, tigonFwSbssAddr, tigonFwSbssLen);
+ }else if (ap->version == 2) {
+ ace_clear(regs, tigon2FwBssAddr, tigon2FwBssLen);
+ ace_clear(regs, tigon2FwSbssAddr, tigon2FwSbssLen);
+ ace_copy(regs, tigon2FwText, tigon2FwTextAddr,tigon2FwTextLen);
+ ace_copy(regs, tigon2FwRodata, tigon2FwRodataAddr,
+ tigon2FwRodataLen);
+ ace_copy(regs, tigon2FwData, tigon2FwDataAddr,tigon2FwDataLen);
+ }
+
+ return 0;
+}
+
+
+/*
+ * The eeprom on the AceNIC is an Atmel i2c EEPROM.
+ *
+ * Accessing the EEPROM is `interesting' to say the least - don't read
+ * this code right after dinner.
+ *
+ * This is all about black magic and bit-banging the device .... I
+ * wonder in what hospital they have put the guy who designed the i2c
+ * specs.
+ *
+ * Oh yes, this is only the beginning!
+ *
+ * Thanks to Stevarino Webinski for helping tracking down the bugs in the
+ * code i2c readout code by beta testing all my hacks.
+ */
+static void __devinit eeprom_start(struct ace_regs __iomem *regs)
+{
+ u32 local;
+
+ readl(&regs->LocalCtrl);
+ udelay(ACE_SHORT_DELAY);
+ local = readl(&regs->LocalCtrl);
+ local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+}
+
+
+static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
+{
+ short i;
+ u32 local;
+
+ udelay(ACE_SHORT_DELAY);
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_DATA_OUT;
+ local |= EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+
+ for (i = 0; i < 8; i++, magic <<= 1) {
+ udelay(ACE_SHORT_DELAY);
+ if (magic & 0x80)
+ local |= EEPROM_DATA_OUT;
+ else
+ local &= ~EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ }
+}
+
+
+static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs)
+{
+ int state;
+ u32 local;
+
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_LONG_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ /* sample data in middle of high clk */
+ state = (readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0;
+ udelay(ACE_SHORT_DELAY);
+ mb();
+ writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+
+ return state;
+}
+
+
+static void __devinit eeprom_stop(struct ace_regs __iomem *regs)
+{
+ u32 local;
+
+ udelay(ACE_SHORT_DELAY);
+ local = readl(&regs->LocalCtrl);
+ local |= EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_LONG_DELAY);
+ local &= ~EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ mb();
+}
+
+
+/*
+ * Read a whole byte from the EEPROM.
+ */
+static int __devinit read_eeprom_byte(struct net_device *dev,
+ unsigned long offset)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ unsigned long flags;
+ u32 local;
+ int result = 0;
+ short i;
+
+ if (!dev) {
+ printk(KERN_ERR "No device!\n");
+ result = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Don't take interrupts on this CPU will bit banging
+ * the %#%#@$ I2C device
+ */
+ local_irq_save(flags);
+
+ eeprom_start(regs);
+
+ eeprom_prep(regs, EEPROM_WRITE_SELECT);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ eeprom_prep(regs, (offset >> 8) & 0xff);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to set address byte 0\n",
+ ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ eeprom_prep(regs, offset & 0xff);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to set address byte 1\n",
+ ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ eeprom_start(regs);
+ eeprom_prep(regs, EEPROM_READ_SELECT);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
+ ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ for (i = 0; i < 8; i++) {
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ udelay(ACE_LONG_DELAY);
+ mb();
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ /* sample data mid high clk */
+ result = (result << 1) |
+ ((readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0);
+ udelay(ACE_SHORT_DELAY);
+ mb();
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ udelay(ACE_SHORT_DELAY);
+ mb();
+ if (i == 7) {
+ local |= EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ }
+ }
+
+ local |= EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ writel(readl(&regs->LocalCtrl) | EEPROM_CLK_OUT, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ udelay(ACE_LONG_DELAY);
+ writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ eeprom_stop(regs);
+
+ local_irq_restore(flags);
+ out:
+ return result;
+
+ eeprom_read_error:
+ printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
+ ap->name, offset);
+ goto out;
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__SMP__ -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h -c -o acenic.o acenic.c"
+ * End:
+ */
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
new file mode 100644
index 000000000000..a97107023495
--- /dev/null
+++ b/drivers/net/acenic.h
@@ -0,0 +1,794 @@
+#ifndef _ACENIC_H_
+#define _ACENIC_H_
+
+#include <linux/config.h>
+
+/*
+ * Generate TX index update each time, when TX ring is closed.
+ * Normally, this is not useful, because results in more dma (and irqs
+ * without TX_COAL_INTS_ONLY).
+ */
+#define USE_TX_COAL_NOW 0
+
+/*
+ * Addressing:
+ *
+ * The Tigon uses 64-bit host addresses, regardless of their actual
+ * length, and it expects a big-endian format. For 32 bit systems the
+ * upper 32 bits of the address are simply ignored (zero), however for
+ * little endian 64 bit systems (Alpha) this looks strange with the
+ * two parts of the address word being swapped.
+ *
+ * The addresses are split in two 32 bit words for all architectures
+ * as some of them are in PCI shared memory and it is necessary to use
+ * readl/writel to access them.
+ *
+ * The addressing code is derived from Pete Wyckoff's work, but
+ * modified to deal properly with readl/writel usage.
+ */
+
+struct ace_regs {
+ u32 pad0[16]; /* PCI control registers */
+
+ u32 HostCtrl; /* 0x40 */
+ u32 LocalCtrl;
+
+ u32 pad1[2];
+
+ u32 MiscCfg; /* 0x50 */
+
+ u32 pad2[2];
+
+ u32 PciState;
+
+ u32 pad3[2]; /* 0x60 */
+
+ u32 WinBase;
+ u32 WinData;
+
+ u32 pad4[12]; /* 0x70 */
+
+ u32 DmaWriteState; /* 0xa0 */
+ u32 pad5[3];
+ u32 DmaReadState; /* 0xb0 */
+
+ u32 pad6[26];
+
+ u32 AssistState;
+
+ u32 pad7[8]; /* 0x120 */
+
+ u32 CpuCtrl; /* 0x140 */
+ u32 Pc;
+
+ u32 pad8[3];
+
+ u32 SramAddr; /* 0x154 */
+ u32 SramData;
+
+ u32 pad9[49];
+
+ u32 MacRxState; /* 0x220 */
+
+ u32 pad10[7];
+
+ u32 CpuBCtrl; /* 0x240 */
+ u32 PcB;
+
+ u32 pad11[3];
+
+ u32 SramBAddr; /* 0x254 */
+ u32 SramBData;
+
+ u32 pad12[105];
+
+ u32 pad13[32]; /* 0x400 */
+ u32 Stats[32];
+
+ u32 Mb0Hi; /* 0x500 */
+ u32 Mb0Lo;
+ u32 Mb1Hi;
+ u32 CmdPrd;
+ u32 Mb2Hi;
+ u32 TxPrd;
+ u32 Mb3Hi;
+ u32 RxStdPrd;
+ u32 Mb4Hi;
+ u32 RxJumboPrd;
+ u32 Mb5Hi;
+ u32 RxMiniPrd;
+ u32 Mb6Hi;
+ u32 Mb6Lo;
+ u32 Mb7Hi;
+ u32 Mb7Lo;
+ u32 Mb8Hi;
+ u32 Mb8Lo;
+ u32 Mb9Hi;
+ u32 Mb9Lo;
+ u32 MbAHi;
+ u32 MbALo;
+ u32 MbBHi;
+ u32 MbBLo;
+ u32 MbCHi;
+ u32 MbCLo;
+ u32 MbDHi;
+ u32 MbDLo;
+ u32 MbEHi;
+ u32 MbELo;
+ u32 MbFHi;
+ u32 MbFLo;
+
+ u32 pad14[32];
+
+ u32 MacAddrHi; /* 0x600 */
+ u32 MacAddrLo;
+ u32 InfoPtrHi;
+ u32 InfoPtrLo;
+ u32 MultiCastHi; /* 0x610 */
+ u32 MultiCastLo;
+ u32 ModeStat;
+ u32 DmaReadCfg;
+ u32 DmaWriteCfg; /* 0x620 */
+ u32 TxBufRat;
+ u32 EvtCsm;
+ u32 CmdCsm;
+ u32 TuneRxCoalTicks;/* 0x630 */
+ u32 TuneTxCoalTicks;
+ u32 TuneStatTicks;
+ u32 TuneMaxTxDesc;
+ u32 TuneMaxRxDesc; /* 0x640 */
+ u32 TuneTrace;
+ u32 TuneLink;
+ u32 TuneFastLink;
+ u32 TracePtr; /* 0x650 */
+ u32 TraceStrt;
+ u32 TraceLen;
+ u32 IfIdx;
+ u32 IfMtu; /* 0x660 */
+ u32 MaskInt;
+ u32 GigLnkState;
+ u32 FastLnkState;
+ u32 pad16[4]; /* 0x670 */
+ u32 RxRetCsm; /* 0x680 */
+
+ u32 pad17[31];
+
+ u32 CmdRng[64]; /* 0x700 */
+ u32 Window[0x200];
+};
+
+
+typedef struct {
+ u32 addrhi;
+ u32 addrlo;
+} aceaddr;
+
+
+#define ACE_WINDOW_SIZE 0x800
+
+#define ACE_JUMBO_MTU 9000
+#define ACE_STD_MTU 1500
+
+#define ACE_TRACE_SIZE 0x8000
+
+/*
+ * Host control register bits.
+ */
+
+#define IN_INT 0x01
+#define CLR_INT 0x02
+#define HW_RESET 0x08
+#define BYTE_SWAP 0x10
+#define WORD_SWAP 0x20
+#define MASK_INTS 0x40
+
+/*
+ * Local control register bits.
+ */
+
+#define EEPROM_DATA_IN 0x800000
+#define EEPROM_DATA_OUT 0x400000
+#define EEPROM_WRITE_ENABLE 0x200000
+#define EEPROM_CLK_OUT 0x100000
+
+#define EEPROM_BASE 0xa0000000
+
+#define EEPROM_WRITE_SELECT 0xa0
+#define EEPROM_READ_SELECT 0xa1
+
+#define SRAM_BANK_512K 0x200
+
+
+/*
+ * udelay() values for when clocking the eeprom
+ */
+#define ACE_SHORT_DELAY 2
+#define ACE_LONG_DELAY 4
+
+
+/*
+ * Misc Config bits
+ */
+
+#define SYNC_SRAM_TIMING 0x100000
+
+
+/*
+ * CPU state bits.
+ */
+
+#define CPU_RESET 0x01
+#define CPU_TRACE 0x02
+#define CPU_PROM_FAILED 0x10
+#define CPU_HALT 0x00010000
+#define CPU_HALTED 0xffff0000
+
+
+/*
+ * PCI State bits.
+ */
+
+#define DMA_READ_MAX_4 0x04
+#define DMA_READ_MAX_16 0x08
+#define DMA_READ_MAX_32 0x0c
+#define DMA_READ_MAX_64 0x10
+#define DMA_READ_MAX_128 0x14
+#define DMA_READ_MAX_256 0x18
+#define DMA_READ_MAX_1K 0x1c
+#define DMA_WRITE_MAX_4 0x20
+#define DMA_WRITE_MAX_16 0x40
+#define DMA_WRITE_MAX_32 0x60
+#define DMA_WRITE_MAX_64 0x80
+#define DMA_WRITE_MAX_128 0xa0
+#define DMA_WRITE_MAX_256 0xc0
+#define DMA_WRITE_MAX_1K 0xe0
+#define DMA_READ_WRITE_MASK 0xfc
+#define MEM_READ_MULTIPLE 0x00020000
+#define PCI_66MHZ 0x00080000
+#define PCI_32BIT 0x00100000
+#define DMA_WRITE_ALL_ALIGN 0x00800000
+#define READ_CMD_MEM 0x06000000
+#define WRITE_CMD_MEM 0x70000000
+
+
+/*
+ * Mode status
+ */
+
+#define ACE_BYTE_SWAP_BD 0x02
+#define ACE_WORD_SWAP_BD 0x04 /* not actually used */
+#define ACE_WARN 0x08
+#define ACE_BYTE_SWAP_DMA 0x10
+#define ACE_NO_JUMBO_FRAG 0x200
+#define ACE_FATAL 0x40000000
+
+
+/*
+ * DMA config
+ */
+
+#define DMA_THRESH_1W 0x10
+#define DMA_THRESH_2W 0x20
+#define DMA_THRESH_4W 0x40
+#define DMA_THRESH_8W 0x80
+#define DMA_THRESH_16W 0x100
+#define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */
+
+
+/*
+ * Tuning parameters
+ */
+
+#define TICKS_PER_SEC 1000000
+
+
+/*
+ * Link bits
+ */
+
+#define LNK_PREF 0x00008000
+#define LNK_10MB 0x00010000
+#define LNK_100MB 0x00020000
+#define LNK_1000MB 0x00040000
+#define LNK_FULL_DUPLEX 0x00080000
+#define LNK_HALF_DUPLEX 0x00100000
+#define LNK_TX_FLOW_CTL_Y 0x00200000
+#define LNK_NEG_ADVANCED 0x00400000
+#define LNK_RX_FLOW_CTL_Y 0x00800000
+#define LNK_NIC 0x01000000
+#define LNK_JAM 0x02000000
+#define LNK_JUMBO 0x04000000
+#define LNK_ALTEON 0x08000000
+#define LNK_NEG_FCTL 0x10000000
+#define LNK_NEGOTIATE 0x20000000
+#define LNK_ENABLE 0x40000000
+#define LNK_UP 0x80000000
+
+
+/*
+ * Event definitions
+ */
+
+#define EVT_RING_ENTRIES 256
+#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
+
+struct event {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u32 idx:12;
+ u32 code:12;
+ u32 evt:8;
+#else
+ u32 evt:8;
+ u32 code:12;
+ u32 idx:12;
+#endif
+ u32 pad;
+};
+
+
+/*
+ * Events
+ */
+
+#define E_FW_RUNNING 0x01
+#define E_STATS_UPDATED 0x04
+
+#define E_STATS_UPDATE 0x04
+
+#define E_LNK_STATE 0x06
+#define E_C_LINK_UP 0x01
+#define E_C_LINK_DOWN 0x02
+#define E_C_LINK_10_100 0x03
+
+#define E_ERROR 0x07
+#define E_C_ERR_INVAL_CMD 0x01
+#define E_C_ERR_UNIMP_CMD 0x02
+#define E_C_ERR_BAD_CFG 0x03
+
+#define E_MCAST_LIST 0x08
+#define E_C_MCAST_ADDR_ADD 0x01
+#define E_C_MCAST_ADDR_DEL 0x02
+
+#define E_RESET_JUMBO_RNG 0x09
+
+
+/*
+ * Commands
+ */
+
+#define CMD_RING_ENTRIES 64
+
+struct cmd {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u32 idx:12;
+ u32 code:12;
+ u32 evt:8;
+#else
+ u32 evt:8;
+ u32 code:12;
+ u32 idx:12;
+#endif
+};
+
+
+#define C_HOST_STATE 0x01
+#define C_C_STACK_UP 0x01
+#define C_C_STACK_DOWN 0x02
+
+#define C_FDR_FILTERING 0x02
+#define C_C_FDR_FILT_ENABLE 0x01
+#define C_C_FDR_FILT_DISABLE 0x02
+
+#define C_SET_RX_PRD_IDX 0x03
+#define C_UPDATE_STATS 0x04
+#define C_RESET_JUMBO_RNG 0x05
+#define C_ADD_MULTICAST_ADDR 0x08
+#define C_DEL_MULTICAST_ADDR 0x09
+
+#define C_SET_PROMISC_MODE 0x0a
+#define C_C_PROMISC_ENABLE 0x01
+#define C_C_PROMISC_DISABLE 0x02
+
+#define C_LNK_NEGOTIATION 0x0b
+#define C_C_NEGOTIATE_BOTH 0x00
+#define C_C_NEGOTIATE_GIG 0x01
+#define C_C_NEGOTIATE_10_100 0x02
+
+#define C_SET_MAC_ADDR 0x0c
+#define C_CLEAR_PROFILE 0x0d
+
+#define C_SET_MULTICAST_MODE 0x0e
+#define C_C_MCAST_ENABLE 0x01
+#define C_C_MCAST_DISABLE 0x02
+
+#define C_CLEAR_STATS 0x0f
+#define C_SET_RX_JUMBO_PRD_IDX 0x10
+#define C_REFRESH_STATS 0x11
+
+
+/*
+ * Descriptor flags
+ */
+#define BD_FLG_TCP_UDP_SUM 0x01
+#define BD_FLG_IP_SUM 0x02
+#define BD_FLG_END 0x04
+#define BD_FLG_MORE 0x08
+#define BD_FLG_JUMBO 0x10
+#define BD_FLG_UCAST 0x20
+#define BD_FLG_MCAST 0x40
+#define BD_FLG_BCAST 0x60
+#define BD_FLG_TYP_MASK 0x60
+#define BD_FLG_IP_FRAG 0x80
+#define BD_FLG_IP_FRAG_END 0x100
+#define BD_FLG_VLAN_TAG 0x200
+#define BD_FLG_FRAME_ERROR 0x400
+#define BD_FLG_COAL_NOW 0x800
+#define BD_FLG_MINI 0x1000
+
+
+/*
+ * Ring Control block flags
+ */
+#define RCB_FLG_TCP_UDP_SUM 0x01
+#define RCB_FLG_IP_SUM 0x02
+#define RCB_FLG_NO_PSEUDO_HDR 0x08
+#define RCB_FLG_VLAN_ASSIST 0x10
+#define RCB_FLG_COAL_INT_ONLY 0x20
+#define RCB_FLG_TX_HOST_RING 0x40
+#define RCB_FLG_IEEE_SNAP_SUM 0x80
+#define RCB_FLG_EXT_RX_BD 0x100
+#define RCB_FLG_RNG_DISABLE 0x200
+
+
+/*
+ * TX ring - maximum TX ring entries for Tigon I's is 128
+ */
+#define MAX_TX_RING_ENTRIES 256
+#define TIGON_I_TX_RING_ENTRIES 128
+#define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
+#define TX_RING_BASE 0x3800
+
+struct tx_desc{
+ aceaddr addr;
+ u32 flagsize;
+#if 0
+/*
+ * This is in PCI shared mem and must be accessed with readl/writel
+ * real layout is:
+ */
+#if __LITTLE_ENDIAN
+ u16 flags;
+ u16 size;
+ u16 vlan;
+ u16 reserved;
+#else
+ u16 size;
+ u16 flags;
+ u16 reserved;
+ u16 vlan;
+#endif
+#endif
+ u32 vlanres;
+};
+
+
+#define RX_STD_RING_ENTRIES 512
+#define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
+
+#define RX_JUMBO_RING_ENTRIES 256
+#define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
+
+#define RX_MINI_RING_ENTRIES 1024
+#define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
+
+#define RX_RETURN_RING_ENTRIES 2048
+#define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \
+ sizeof(struct rx_desc))
+
+struct rx_desc{
+ aceaddr addr;
+#ifdef __LITTLE_ENDIAN
+ u16 size;
+ u16 idx;
+#else
+ u16 idx;
+ u16 size;
+#endif
+#ifdef __LITTLE_ENDIAN
+ u16 flags;
+ u16 type;
+#else
+ u16 type;
+ u16 flags;
+#endif
+#ifdef __LITTLE_ENDIAN
+ u16 tcp_udp_csum;
+ u16 ip_csum;
+#else
+ u16 ip_csum;
+ u16 tcp_udp_csum;
+#endif
+#ifdef __LITTLE_ENDIAN
+ u16 vlan;
+ u16 err_flags;
+#else
+ u16 err_flags;
+ u16 vlan;
+#endif
+ u32 reserved;
+ u32 opague;
+};
+
+
+/*
+ * This struct is shared with the NIC firmware.
+ */
+struct ring_ctrl {
+ aceaddr rngptr;
+#ifdef __LITTLE_ENDIAN
+ u16 flags;
+ u16 max_len;
+#else
+ u16 max_len;
+ u16 flags;
+#endif
+ u32 pad;
+};
+
+
+struct ace_mac_stats {
+ u32 excess_colls;
+ u32 coll_1;
+ u32 coll_2;
+ u32 coll_3;
+ u32 coll_4;
+ u32 coll_5;
+ u32 coll_6;
+ u32 coll_7;
+ u32 coll_8;
+ u32 coll_9;
+ u32 coll_10;
+ u32 coll_11;
+ u32 coll_12;
+ u32 coll_13;
+ u32 coll_14;
+ u32 coll_15;
+ u32 late_coll;
+ u32 defers;
+ u32 crc_err;
+ u32 underrun;
+ u32 crs_err;
+ u32 pad[3];
+ u32 drop_ula;
+ u32 drop_mc;
+ u32 drop_fc;
+ u32 drop_space;
+ u32 coll;
+ u32 kept_bc;
+ u32 kept_mc;
+ u32 kept_uc;
+};
+
+
+struct ace_info {
+ union {
+ u32 stats[256];
+ } s;
+ struct ring_ctrl evt_ctrl;
+ struct ring_ctrl cmd_ctrl;
+ struct ring_ctrl tx_ctrl;
+ struct ring_ctrl rx_std_ctrl;
+ struct ring_ctrl rx_jumbo_ctrl;
+ struct ring_ctrl rx_mini_ctrl;
+ struct ring_ctrl rx_return_ctrl;
+ aceaddr evt_prd_ptr;
+ aceaddr rx_ret_prd_ptr;
+ aceaddr tx_csm_ptr;
+ aceaddr stats2_ptr;
+};
+
+
+struct ring_info {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(mapping)
+};
+
+
+/*
+ * Funny... As soon as we add maplen on alpha, it starts to work
+ * much slower. Hmm... is it because struct does not fit to one cacheline?
+ * So, split tx_ring_info.
+ */
+struct tx_ring_info {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(mapping)
+ DECLARE_PCI_UNMAP_LEN(maplen)
+};
+
+
+/*
+ * struct ace_skb holding the rings of skb's. This is an awful lot of
+ * pointers, but I don't see any other smart mode to do this in an
+ * efficient manner ;-(
+ */
+struct ace_skb
+{
+ struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES];
+ struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES];
+ struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES];
+ struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
+};
+
+
+/*
+ * Struct private for the AceNIC.
+ *
+ * Elements are grouped so variables used by the tx handling goes
+ * together, and will go into the same cache lines etc. in order to
+ * avoid cache line contention between the rx and tx handling on SMP.
+ *
+ * Frequently accessed variables are put at the beginning of the
+ * struct to help the compiler generate better/shorter code.
+ */
+struct ace_private
+{
+ struct ace_info *info;
+ struct ace_regs __iomem *regs; /* register base */
+ struct ace_skb *skb;
+ dma_addr_t info_dma; /* 32/64 bit */
+
+ int version, link;
+ int promisc, mcast_all;
+
+ /*
+ * TX elements
+ */
+ struct tx_desc *tx_ring;
+ u32 tx_prd;
+ volatile u32 tx_ret_csm;
+ int tx_ring_entries;
+
+ /*
+ * RX elements
+ */
+ unsigned long std_refill_busy
+ __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ unsigned long mini_refill_busy, jumbo_refill_busy;
+ atomic_t cur_rx_bufs;
+ atomic_t cur_mini_bufs;
+ atomic_t cur_jumbo_bufs;
+ u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
+ u32 cur_rx;
+
+ struct rx_desc *rx_std_ring;
+ struct rx_desc *rx_jumbo_ring;
+ struct rx_desc *rx_mini_ring;
+ struct rx_desc *rx_return_ring;
+
+#if ACENIC_DO_VLAN
+ struct vlan_group *vlgrp;
+#endif
+
+ int tasklet_pending, jumbo;
+ struct tasklet_struct ace_tasklet;
+
+ struct event *evt_ring;
+
+ volatile u32 *evt_prd, *rx_ret_prd, *tx_csm;
+
+ dma_addr_t tx_ring_dma; /* 32/64 bit */
+ dma_addr_t rx_ring_base_dma;
+ dma_addr_t evt_ring_dma;
+ dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
+
+ unsigned char *trace_buf;
+ struct pci_dev *pdev;
+ struct net_device *next;
+ volatile int fw_running;
+ int board_idx;
+ u16 pci_command;
+ u8 pci_latency;
+ const char *name;
+#ifdef INDEX_DEBUG
+ spinlock_t debug_lock
+ __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ u32 last_tx, last_std_rx, last_mini_rx;
+#endif
+ struct net_device_stats stats;
+ int pci_using_dac;
+};
+
+
+#define TX_RESERVED MAX_SKB_FRAGS
+
+static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
+{
+ return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
+}
+
+#define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
+#define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED)
+
+static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
+{
+ u64 baddr = (u64) addr;
+ aa->addrlo = baddr & 0xffffffff;
+ aa->addrhi = baddr >> 32;
+ wmb();
+}
+
+
+static inline void ace_set_txprd(struct ace_regs __iomem *regs,
+ struct ace_private *ap, u32 value)
+{
+#ifdef INDEX_DEBUG
+ unsigned long flags;
+ spin_lock_irqsave(&ap->debug_lock, flags);
+ writel(value, &regs->TxPrd);
+ if (value == ap->last_tx)
+ printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
+ "to tx producer (%i)\n", value);
+ ap->last_tx = value;
+ spin_unlock_irqrestore(&ap->debug_lock, flags);
+#else
+ writel(value, &regs->TxPrd);
+#endif
+ wmb();
+}
+
+
+static inline void ace_mask_irq(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ if (ACE_IS_TIGON_I(ap))
+ writel(1, &regs->MaskInt);
+ else
+ writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
+
+ ace_sync_irq(dev->irq);
+}
+
+
+static inline void ace_unmask_irq(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ if (ACE_IS_TIGON_I(ap))
+ writel(0, &regs->MaskInt);
+ else
+ writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
+}
+
+
+/*
+ * Prototypes
+ */
+static int ace_init(struct net_device *dev);
+static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
+static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
+static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
+static irqreturn_t ace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int ace_load_firmware(struct net_device *dev);
+static int ace_open(struct net_device *dev);
+static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int ace_close(struct net_device *dev);
+static void ace_tasklet(unsigned long dev);
+static void ace_dump_trace(struct ace_private *ap);
+static void ace_set_multicast_list(struct net_device *dev);
+static int ace_change_mtu(struct net_device *dev, int new_mtu);
+static int ace_set_mac_addr(struct net_device *dev, void *p);
+static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
+static int ace_allocate_descriptors(struct net_device *dev);
+static void ace_free_descriptors(struct net_device *dev);
+static void ace_init_cleanup(struct net_device *dev);
+static struct net_device_stats *ace_get_stats(struct net_device *dev);
+static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
+#if ACENIC_DO_VLAN
+static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp);
+static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+#endif
+
+#endif /* _ACENIC_H_ */
diff --git a/drivers/net/acenic_firmware.h b/drivers/net/acenic_firmware.h
new file mode 100644
index 000000000000..6d625d595622
--- /dev/null
+++ b/drivers/net/acenic_firmware.h
@@ -0,0 +1,9457 @@
+#include <linux/config.h>
+/*
+ * Declare these here even if Tigon I support is disabled to avoid
+ * the compiler complaining about undefined symbols.
+ */
+#define tigonFwReleaseMajor 0xc
+#define tigonFwReleaseMinor 0x4
+#define tigonFwReleaseFix 0xb
+#define tigonFwStartAddr 0x00004000
+#define tigonFwTextAddr 0x00004000
+#define tigonFwTextLen 0x11140
+#define tigonFwRodataAddr 0x00015140
+#define tigonFwRodataLen 0xac0
+#define tigonFwDataAddr 0x00015c20
+#define tigonFwDataLen 0x170
+#define tigonFwSbssAddr 0x00015d90
+#define tigonFwSbssLen 0x38
+#define tigonFwBssAddr 0x00015dd0
+#define tigonFwBssLen 0x2080
+#ifdef CONFIG_ACENIC_OMIT_TIGON_I
+#define tigonFwText NULL
+#define tigonFwData NULL
+#define tigonFwRodata NULL
+#else
+/* Generated by genfw.c */
+static u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __devinitdata = {
+0x10000003,
+0x0, 0xd, 0xd, 0x3c1d0001,
+0x8fbd5c54, 0x3a0f021, 0x3c100000, 0x26104000,
+0xc00100c, 0x0, 0xd, 0x27bdffd8,
+0x3c1cc000, 0x3c1b0013, 0x377bd800, 0xd021,
+0x3c170013, 0x36f75418, 0x2e02021, 0x340583e8,
+0xafbf0024, 0xc002488, 0xafb00020, 0xc0023e8,
+0x0, 0x3c040001, 0x248451a4, 0x24050001,
+0x2e03021, 0x3821, 0x3c100001, 0x26107e50,
+0xafb00010, 0xc002403, 0xafbb0014, 0x3c02000f,
+0x3442ffff, 0x2021024, 0x362102b, 0x10400009,
+0x24050003, 0x3c040001, 0x248451b0, 0x2003021,
+0x3603821, 0x3c020010, 0xafa20010, 0xc002403,
+0xafa00014, 0x2021, 0x3405c000, 0x3c010001,
+0x370821, 0xa02083b0, 0x3c010001, 0x370821,
+0xa02083b2, 0x3c010001, 0x370821, 0xa02083b3,
+0x3c010001, 0x370821, 0xac2083b4, 0xa2e004d8,
+0x418c0, 0x24840001, 0x771021, 0xac40727c,
+0x771021, 0xac407280, 0x2e31021, 0xa445727c,
+0x2c820020, 0x1440fff7, 0x418c0, 0x2021,
+0x3405c000, 0x418c0, 0x24840001, 0x771021,
+0xac40737c, 0x771021, 0xac407380, 0x2e31021,
+0xa445737c, 0x2c820080, 0x5440fff7, 0x418c0,
+0xaf800054, 0xaf80011c, 0x8f820044, 0x34420040,
+0xaf820044, 0x8f820044, 0x34420020, 0xaf820044,
+0x8f420218, 0x30420002, 0x10400009, 0x0,
+0x8f420220, 0x3c030002, 0x34630004, 0x431025,
+0xaee204c4, 0x8f42021c, 0x8001074, 0x34420004,
+0x8f420220, 0x3c030002, 0x34630006, 0x431025,
+0xaee204c4, 0x8f42021c, 0x34420006, 0xaee204cc,
+0x8f420218, 0x30420010, 0x1040000a, 0x0,
+0x8f42021c, 0x34420004, 0xaee204c8, 0x8f420220,
+0x3c03000a, 0x34630004, 0x431025, 0x800108a,
+0xaee204c0, 0x8f420220, 0x3c03000a, 0x34630006,
+0x431025, 0xaee204c0, 0x8f42021c, 0x34420006,
+0xaee204c8, 0x8f420218, 0x30420200, 0x10400003,
+0x24020001, 0x8001091, 0xa2e27248, 0xa2e07248,
+0x24020001, 0xaf8200a0, 0xaf8200b0, 0x8f830054,
+0x8f820054, 0x8001099, 0x24630064, 0x8f820054,
+0x621023, 0x2c420065, 0x1440fffc, 0x0,
+0xaf800044, 0x8f420208, 0x8f43020c, 0xaee20010,
+0xaee30014, 0x8ee40010, 0x8ee50014, 0x26e20030,
+0xaee20028, 0x24020490, 0xaee20018, 0xaf840090,
+0xaf850094, 0x8ee20028, 0xaf8200b4, 0x96e2001a,
+0xaf82009c, 0x8f8200b0, 0x8ee304cc, 0x431025,
+0xaf8200b0, 0x8f8200b0, 0x30420004, 0x1440fffd,
+0x0, 0x8ee20450, 0x8ee30454, 0xaee304fc,
+0x8ee204fc, 0x2442e000, 0x2c422001, 0x1440000d,
+0x26e40030, 0x8ee20450, 0x8ee30454, 0x3c040001,
+0x248451bc, 0x3c050001, 0xafa00010, 0xafa00014,
+0x8ee704fc, 0x34a5f000, 0xc002403, 0x603021,
+0x26e40030, 0xc002488, 0x24050400, 0x27440080,
+0xc002488, 0x24050080, 0x26e4777c, 0xc002488,
+0x24050400, 0x8f42025c, 0x26e40094, 0xaee20060,
+0x8f420260, 0x27450200, 0x24060008, 0xaee20068,
+0x24020006, 0xc00249a, 0xaee20064, 0x3c023b9a,
+0x3442ca00, 0x2021, 0x24030002, 0xaee30074,
+0xaee30070, 0xaee2006c, 0x240203e8, 0xaee20104,
+0x24020001, 0xaee30100, 0xaee2010c, 0x3c030001,
+0x641821, 0x90635c20, 0x2e41021, 0x24840001,
+0xa043009c, 0x2c82000f, 0x1440fff8, 0x0,
+0x8f820040, 0x2e41821, 0x24840001, 0x21702,
+0x24420030, 0xa062009c, 0x2e41021, 0xa040009c,
+0x96e2046a, 0x30420003, 0x14400009, 0x0,
+0x96e2047a, 0x30420003, 0x50400131, 0x3c030800,
+0x96e2046a, 0x30420003, 0x1040002a, 0x3c020700,
+0x96e2047a, 0x30420003, 0x10400026, 0x3c020700,
+0x96e3047a, 0x96e2046a, 0x14620022, 0x3c020700,
+0x8ee204c0, 0x24030001, 0xa2e34e20, 0x34420e00,
+0xaee204c0, 0x8f420218, 0x30420100, 0x10400005,
+0x0, 0x3c020001, 0x2442e168, 0x800111d,
+0x21100, 0x3c020001, 0x2442d35c, 0x21100,
+0x21182, 0x3c030800, 0x431025, 0x3c010001,
+0xac221238, 0x3c020001, 0x2442f680, 0x21100,
+0x21182, 0x3c030800, 0x431025, 0x3c010001,
+0xac221278, 0x8ee20000, 0x34424000, 0x8001238,
+0xaee20000, 0x34423000, 0xafa20018, 0x8ee20608,
+0x8f430228, 0x24420001, 0x304900ff, 0x512300e2,
+0xafa00010, 0x8ee20608, 0x210c0, 0x571021,
+0x8fa30018, 0x8fa4001c, 0xac43060c, 0xac440610,
+0x8f870120, 0x27623800, 0x24e80020, 0x102102b,
+0x50400001, 0x27683000, 0x8f820128, 0x11020004,
+0x0, 0x8f820124, 0x15020007, 0x1021,
+0x8ee201a4, 0x3021, 0x24420001, 0xaee201a4,
+0x80011a0, 0x8ee201a4, 0x8ee40608, 0x420c0,
+0x801821, 0x8ee40430, 0x8ee50434, 0xa32821,
+0xa3302b, 0x822021, 0x862021, 0xace40000,
+0xace50004, 0x8ee30608, 0x24020008, 0xa4e2000e,
+0x2402000d, 0xace20018, 0xace9001c, 0x318c0,
+0x2463060c, 0x2e31021, 0xace20008, 0x8ee204c4,
+0xace20010, 0xaf880120, 0x92e24e20, 0x14400037,
+0x24060001, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x24030040, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
+0x0, 0x8ee24e34, 0x24420001, 0x10a20005,
+0x0, 0x800118a, 0x0, 0x14a00005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
+0xac800000, 0x80011a0, 0x0, 0x8ee24e30,
+0x24030040, 0x24420001, 0x50430003, 0x1021,
+0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x24020007,
+0xac820000, 0x24020001, 0xac820004, 0x54c0000c,
+0xaee90608, 0x3c040001, 0x248451c8, 0xafa00010,
+0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
+0xc002403, 0x34a5f000, 0x8001223, 0x0,
+0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
+0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
+0x0, 0x8f820124, 0x14c20007, 0x0,
+0x8ee201a4, 0x3021, 0x24420001, 0xaee201a4,
+0x8001207, 0x8ee201a4, 0x8ee20608, 0xac62001c,
+0x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
+0x24020008, 0xa462000e, 0x24020011, 0xac620018,
+0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
+0xaf860120, 0x92e24e20, 0x14400037, 0x24060001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c830000, 0x24020012, 0x1462001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee54e30, 0x24420001, 0x10430007, 0x0,
+0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
+0x80011f1, 0x0, 0x14a00005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x8001207, 0x0, 0x8ee24e30, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020012, 0xac820000,
+0x24020001, 0xac820004, 0x14c0001b, 0x0,
+0x3c040001, 0x248451d0, 0xafa00010, 0xafa00014,
+0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
+0x34a5f001, 0x8ee201b0, 0x24420001, 0xaee201b0,
+0x8001223, 0x8ee201b0, 0x3c040001, 0x248451dc,
+0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
+0xc002403, 0x34a5f005, 0x8ee201ac, 0x24420001,
+0xaee201ac, 0x8ee201ac, 0x8ee20160, 0x3c040001,
+0x248451e8, 0x3405f001, 0x24420001, 0xaee20160,
+0x8ee20160, 0x3021, 0x3821, 0xafa00010,
+0xc002403, 0xafa00014, 0x8001238, 0x0,
+0x3c020001, 0x2442f5a8, 0x21100, 0x21182,
+0x431025, 0x3c010001, 0xac221278, 0x96e2045a,
+0x30420003, 0x10400025, 0x3c050fff, 0x8ee204c8,
+0x34a5ffff, 0x34420a00, 0xaee204c8, 0x8ee304c8,
+0x3c040001, 0x248451f4, 0x24020001, 0xa2e204ec,
+0xa2e204ed, 0x3c020002, 0x621825, 0x3c020001,
+0x2442a390, 0x451024, 0x21082, 0xaee304c8,
+0x3c030800, 0x431025, 0x3c010001, 0xac221220,
+0x3c020001, 0x2442add4, 0x451024, 0x21082,
+0x431025, 0x3c010001, 0xac221280, 0x96e6045a,
+0x3821, 0x24050011, 0xafa00010, 0xc002403,
+0xafa00014, 0x8001268, 0x0, 0x3c020001,
+0x2442a9d4, 0x21100, 0x21182, 0x3c030800,
+0x431025, 0x3c010001, 0xac221280, 0x96e2046a,
+0x30420010, 0x14400009, 0x0, 0x96e2047a,
+0x30420010, 0x10400112, 0x0, 0x96e2046a,
+0x30420010, 0x10400005, 0x3c020700, 0x96e2047a,
+0x30420010, 0x14400102, 0x3c020700, 0x34423000,
+0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
+0x304900ff, 0x512300e2, 0xafa00010, 0x8ee20608,
+0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
+0xac43060c, 0xac440610, 0x8f870120, 0x27623800,
+0x24e80020, 0x102102b, 0x50400001, 0x27683000,
+0x8f820128, 0x11020004, 0x0, 0x8f820124,
+0x15020007, 0x1021, 0x8ee201a4, 0x3021,
+0x24420001, 0xaee201a4, 0x80012ea, 0x8ee201a4,
+0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
+0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xace40000, 0xace50004, 0x8ee30608,
+0x24020008, 0xa4e2000e, 0x2402000d, 0xace20018,
+0xace9001c, 0x318c0, 0x2463060c, 0x2e31021,
+0xace20008, 0x8ee204c4, 0xace20010, 0xaf880120,
+0x92e24e20, 0x14400037, 0x24060001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
+0x24020007, 0x1462001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
+0x24420001, 0x10430007, 0x0, 0x8ee24e34,
+0x24420001, 0x10a20005, 0x0, 0x80012d4,
+0x0, 0x14a00005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x80012ea,
+0x0, 0x8ee24e30, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x24020007, 0xac820000, 0x24020001,
+0xac820004, 0x54c0000c, 0xaee90608, 0x3c040001,
+0x248451c8, 0xafa00010, 0xafa00014, 0x8ee60608,
+0x8f470228, 0x3c050009, 0xc002403, 0x34a5f000,
+0x800136d, 0x0, 0x8f830120, 0x27623800,
+0x24660020, 0xc2102b, 0x50400001, 0x27663000,
+0x8f820128, 0x10c20004, 0x0, 0x8f820124,
+0x14c20007, 0x0, 0x8ee201a4, 0x3021,
+0x24420001, 0xaee201a4, 0x8001351, 0x8ee201a4,
+0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
+0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
+0x24020011, 0xac620018, 0xac640000, 0xac650004,
+0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
+0x14400037, 0x24060001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c830000, 0x24020012,
+0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
+0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
+0x10430007, 0x0, 0x8ee24e34, 0x24420001,
+0x10a20005, 0x0, 0x800133b, 0x0,
+0x14a00005, 0x0, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
+0x50400013, 0xac800000, 0x8001351, 0x0,
+0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x24020012, 0xac820000, 0x24020001, 0xac820004,
+0x14c0001b, 0x0, 0x3c040001, 0x248451d0,
+0xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
+0x3c050009, 0xc002403, 0x34a5f001, 0x8ee201b0,
+0x24420001, 0xaee201b0, 0x800136d, 0x8ee201b0,
+0x3c040001, 0x248451dc, 0xafa00014, 0x8ee60608,
+0x8f470228, 0x3c050009, 0xc002403, 0x34a5f005,
+0x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
+0x8ee20160, 0x3c040001, 0x248451e8, 0x3405f002,
+0x24420001, 0xaee20160, 0x8ee20160, 0x3021,
+0x3821, 0xafa00010, 0xc002403, 0xafa00014,
+0x96e6047a, 0x96e7046a, 0x3c040001, 0x24845200,
+0x24050012, 0xafa00010, 0xc002403, 0xafa00014,
+0xc004500, 0x0, 0xc002318, 0x0,
+0x3c060001, 0x34c63800, 0xaee00608, 0xaf400228,
+0xaf40022c, 0x96e30458, 0x8ee40000, 0x3c0512d8,
+0x34a5c358, 0x27623800, 0xaee27258, 0x27623800,
+0xaee27260, 0x27623800, 0xaee27264, 0x3661021,
+0xaee27270, 0x2402ffff, 0xaee004d4, 0xaee004e0,
+0xaee004e4, 0xaee004f0, 0xa2e004f4, 0xaee00e0c,
+0xaee00e18, 0xaee00e10, 0xaee00e14, 0xaee00e1c,
+0xaee0724c, 0xaee05244, 0xaee05240, 0xaee0523c,
+0xaee07250, 0xaee07254, 0xaee0725c, 0xaee07268,
+0xaee004d0, 0x2463ffff, 0x852025, 0xaee304f8,
+0xaee40000, 0xaf800060, 0xaf820064, 0x3c020100,
+0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
+0x304900ff, 0x512300e2, 0xafa00010, 0x8ee20608,
+0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
+0xac43060c, 0xac440610, 0x8f870120, 0x27623800,
+0x24e80020, 0x102102b, 0x50400001, 0x27683000,
+0x8f820128, 0x11020004, 0x0, 0x8f820124,
+0x15020007, 0x1021, 0x8ee201a4, 0x3021,
+0x24420001, 0xaee201a4, 0x8001422, 0x8ee201a4,
+0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
+0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xace40000, 0xace50004, 0x8ee30608,
+0x24020008, 0xa4e2000e, 0x2402000d, 0xace20018,
+0xace9001c, 0x318c0, 0x2463060c, 0x2e31021,
+0xace20008, 0x8ee204c4, 0xace20010, 0xaf880120,
+0x92e24e20, 0x14400037, 0x24060001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
+0x24020007, 0x1462001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
+0x24420001, 0x10430007, 0x0, 0x8ee24e34,
+0x24420001, 0x10a20005, 0x0, 0x800140c,
+0x0, 0x14a00005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x8001422,
+0x0, 0x8ee24e30, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x24020007, 0xac820000, 0x24020001,
+0xac820004, 0x54c0000c, 0xaee90608, 0x3c040001,
+0x248451c8, 0xafa00010, 0xafa00014, 0x8ee60608,
+0x8f470228, 0x3c050009, 0xc002403, 0x34a5f000,
+0x80014a5, 0x0, 0x8f830120, 0x27623800,
+0x24660020, 0xc2102b, 0x50400001, 0x27663000,
+0x8f820128, 0x10c20004, 0x0, 0x8f820124,
+0x14c20007, 0x0, 0x8ee201a4, 0x3021,
+0x24420001, 0xaee201a4, 0x8001489, 0x8ee201a4,
+0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
+0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
+0x24020011, 0xac620018, 0xac640000, 0xac650004,
+0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
+0x14400037, 0x24060001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c830000, 0x24020012,
+0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
+0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
+0x10430007, 0x0, 0x8ee24e34, 0x24420001,
+0x10a20005, 0x0, 0x8001473, 0x0,
+0x14a00005, 0x0, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
+0x50400013, 0xac800000, 0x8001489, 0x0,
+0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x24020012, 0xac820000, 0x24020001, 0xac820004,
+0x14c0001b, 0x0, 0x3c040001, 0x248451d0,
+0xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
+0x3c050009, 0xc002403, 0x34a5f001, 0x8ee201b0,
+0x24420001, 0xaee201b0, 0x80014a5, 0x8ee201b0,
+0x3c040001, 0x248451dc, 0xafa00014, 0x8ee60608,
+0x8f470228, 0x3c050009, 0xc002403, 0x34a5f005,
+0x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
+0x8ee20154, 0x24420001, 0xaee20154, 0xc0014dc,
+0x8ee20154, 0x8f8200a0, 0x30420004, 0x1440fffd,
+0x0, 0x8f820040, 0x30420001, 0x14400008,
+0x0, 0x8f430104, 0x24020001, 0x10620004,
+0x0, 0x8f420264, 0x10400006, 0x0,
+0x8ee2017c, 0x24420001, 0xaee2017c, 0x80014c5,
+0x8ee2017c, 0x8f820044, 0x34420004, 0xaf820044,
+0x8ee20178, 0x24420001, 0xaee20178, 0x8ee20178,
+0x8f8200d8, 0x8f8300d4, 0x431023, 0xaee2726c,
+0x8ee2726c, 0x1c400003, 0x3c030001, 0x431021,
+0xaee2726c, 0xc004064, 0x0, 0xc004440,
+0xaf800228, 0x8fbf0024, 0x8fb00020, 0x3e00008,
+0x27bd0028, 0x3e00008, 0x0, 0x3e00008,
+0x0, 0x0, 0x0, 0x2402002c,
+0xaf820050, 0xaee07274, 0x8f420238, 0xaee27278,
+0x8f820054, 0x24420067, 0xaf820058, 0xaee07b88,
+0xaee07b8c, 0xaee07b84, 0x3c010001, 0x370821,
+0xac2083bc, 0x3c010001, 0x370821, 0x3e00008,
+0xa02083b9, 0x27bdffd8, 0xafbf0024, 0xafb00020,
+0x8f820054, 0x3c030001, 0x8c635cd8, 0x24420067,
+0x1060000d, 0xaf820058, 0x3c020001, 0x571021,
+0x904283b8, 0x10400005, 0x3c030200, 0x3c010001,
+0x370821, 0x8001503, 0xa02083b8, 0x8ee20000,
+0x431025, 0xaee20000, 0x8f420218, 0x30420100,
+0x104000c6, 0x0, 0x8f8200b0, 0x30420004,
+0x104000c2, 0x0, 0x3c030001, 0x771821,
+0x8c6383d0, 0x8f820104, 0x146200b4, 0x0,
+0x3c030001, 0x771821, 0x8c6383d4, 0x8f8200b4,
+0x146200ae, 0x0, 0x8f8200b0, 0x3c030080,
+0x431024, 0x1040000d, 0x0, 0x8f82011c,
+0x34420002, 0xaf82011c, 0x8f8200b0, 0x2403fffb,
+0x431024, 0xaf8200b0, 0x8f82011c, 0x2403fffd,
+0x431024, 0x80015cc, 0xaf82011c, 0x3c030001,
+0x771821, 0x8c6383d0, 0x8f820104, 0x14620082,
+0x0, 0x3c030001, 0x771821, 0x8c6383d4,
+0x8f8200b4, 0x1462007c, 0x0, 0x3c070001,
+0xf73821, 0x8ce783d0, 0x8f8200b0, 0x3c040001,
+0x24845270, 0xafa00014, 0xafa20010, 0x8f8600b0,
+0x3c050005, 0xc002403, 0x34a50900, 0x8f82011c,
+0x34420002, 0xaf82011c, 0x8f830104, 0x8f8200b0,
+0x34420001, 0xaf8200b0, 0xaf830104, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20006, 0x0, 0x8ee201a4,
+0x24420001, 0xaee201a4, 0x80015a0, 0x8ee201a4,
+0x8f440208, 0x8f45020c, 0x26e20030, 0xac620008,
+0x24020400, 0xa462000e, 0x2402000f, 0xac620018,
+0xac60001c, 0xac640000, 0xac650004, 0x8ee204c4,
+0xac620010, 0xaf860120, 0x92e24e20, 0x14400037,
+0x0, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x24030040, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
+0x0, 0x8ee24e34, 0x24420001, 0x10a20005,
+0x0, 0x800158a, 0x0, 0x14a00005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
+0xac800000, 0x80015a0, 0x0, 0x8ee24e30,
+0x24030040, 0x24420001, 0x50430003, 0x1021,
+0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x24020007,
+0xac820000, 0x24020001, 0xac820004, 0x8f82011c,
+0x2403fffd, 0x431024, 0xaf82011c, 0x8ee201e4,
+0x3c070001, 0xf73821, 0x8ce783d0, 0x24420001,
+0xaee201e4, 0x8ee201e4, 0x3c040001, 0x2484527c,
+0x80015bd, 0xafa00010, 0x8f820104, 0x3c010001,
+0x370821, 0xac2283d0, 0x8f8200b4, 0x3c070001,
+0xf73821, 0x8ce783d0, 0x3c040001, 0x24845284,
+0x3c010001, 0x370821, 0xac2283d4, 0xafa00010,
+0xafa00014, 0x8f8600b0, 0x3c050005, 0xc002403,
+0x34a50900, 0x80015cc, 0x0, 0x8f820104,
+0x3c010001, 0x370821, 0xac2283d0, 0x8f8200b4,
+0x3c010001, 0x370821, 0xac2283d4, 0x8ee27274,
+0x92e304f4, 0x24420067, 0x14600006, 0xaee27274,
+0x8ee27274, 0x8f430234, 0x43102b, 0x1440007b,
+0x0, 0x8ee304e4, 0x8ee204f8, 0x14620004,
+0x0, 0x92e204f4, 0x50400074, 0xa2e004f4,
+0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
+0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
+0x0, 0x8f820124, 0x14c20007, 0x0,
+0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
+0x8001637, 0x8ee201a4, 0x8ee204e4, 0xac62001c,
+0x8ee404b0, 0x8ee504b4, 0x2462001c, 0xac620008,
+0x24020008, 0xa462000e, 0x24020011, 0xac620018,
+0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
+0xaf860120, 0x92e24e20, 0x14400037, 0x24100001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c830000, 0x24020012, 0x1462001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee54e30, 0x24420001, 0x10430007, 0x0,
+0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
+0x8001621, 0x0, 0x14a00005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x8001637, 0x0, 0x8ee24e30, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020012, 0xac820000,
+0x24020001, 0xac820004, 0x5600000b, 0x24100001,
+0x8ee204e4, 0x3c040001, 0x2484528c, 0xafa00014,
+0xafa20010, 0x8ee60608, 0x8f470228, 0x3c050009,
+0xc002403, 0x34a5f006, 0x16000003, 0x24020001,
+0x8001650, 0xa2e204f4, 0x8ee20170, 0x24420001,
+0xaee20170, 0x8ee20170, 0x8ee204e4, 0xa2e004f4,
+0xaee004f0, 0xaee07274, 0xaee204f8, 0x8ee20e1c,
+0x1040006d, 0x0, 0x8f830120, 0x27623800,
+0x24660020, 0xc2102b, 0x50400001, 0x27663000,
+0x8f820128, 0x10c20004, 0x0, 0x8f820124,
+0x14c20007, 0x0, 0x8ee201a4, 0x8021,
+0x24420001, 0xaee201a4, 0x80016ad, 0x8ee201a4,
+0x8ee2724c, 0xac62001c, 0x8ee404a8, 0x8ee504ac,
+0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
+0x24020011, 0xac620018, 0xac640000, 0xac650004,
+0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
+0x14400037, 0x24100001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c830000, 0x24020012,
+0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
+0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
+0x10430007, 0x0, 0x8ee24e34, 0x24420001,
+0x10a20005, 0x0, 0x8001697, 0x0,
+0x14a00005, 0x0, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
+0x50400013, 0xac800000, 0x80016ad, 0x0,
+0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x24020012, 0xac820000, 0x24020001, 0xac820004,
+0x5600000b, 0x24100001, 0x8ee2724c, 0x3c040001,
+0x24845298, 0xafa00014, 0xafa20010, 0x8ee6724c,
+0x8f470280, 0x3c050009, 0xc002403, 0x34a5f008,
+0x56000001, 0xaee00e1c, 0x8ee20174, 0x24420001,
+0xaee20174, 0x8ee20174, 0x8ee24e24, 0x10400019,
+0x0, 0xaee04e24, 0x8f820040, 0x30420001,
+0x14400008, 0x0, 0x8f430104, 0x24020001,
+0x10620004, 0x0, 0x8f420264, 0x10400006,
+0x0, 0x8ee2017c, 0x24420001, 0xaee2017c,
+0x80016da, 0x8ee2017c, 0x8f820044, 0x34420004,
+0xaf820044, 0x8ee20178, 0x24420001, 0xaee20178,
+0x8ee20178, 0x8ee27278, 0x2442ff99, 0xaee27278,
+0x8ee27278, 0x1c4002ad, 0x0, 0x8f420238,
+0x104002aa, 0x0, 0x3c020001, 0x571021,
+0x904283e0, 0x144002a5, 0x0, 0x8f420080,
+0xaee2004c, 0x8f4200c0, 0xaee20048, 0x8f420084,
+0xaee20038, 0x8f420084, 0xaee20244, 0x8f420088,
+0xaee20248, 0x8f42008c, 0xaee2024c, 0x8f420090,
+0xaee20250, 0x8f420094, 0xaee20254, 0x8f420098,
+0xaee20258, 0x8f42009c, 0xaee2025c, 0x8f4200a0,
+0xaee20260, 0x8f4200a4, 0xaee20264, 0x8f4200a8,
+0xaee20268, 0x8f4200ac, 0xaee2026c, 0x8f4200b0,
+0xaee20270, 0x8f4200b4, 0xaee20274, 0x8f4200b8,
+0xaee20278, 0x8f4200bc, 0x24040001, 0xaee2027c,
+0xaee0003c, 0x41080, 0x571021, 0x8ee3003c,
+0x8c420244, 0x24840001, 0x621821, 0x2c82000f,
+0xaee3003c, 0x1440fff8, 0x41080, 0x8f4200cc,
+0xaee20050, 0x8f4200d0, 0xaee20054, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
+0x8021, 0x24420001, 0xaee201a4, 0x8001775,
+0x8ee201a4, 0x8f440208, 0x8f45020c, 0x26e20030,
+0xac620008, 0x24020400, 0xa462000e, 0x2402000f,
+0xac620018, 0xac60001c, 0xac640000, 0xac650004,
+0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
+0x14400037, 0x24100001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c830000, 0x24020007,
+0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
+0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
+0x10430007, 0x0, 0x8ee24e34, 0x24420001,
+0x10a20005, 0x0, 0x800175f, 0x0,
+0x14a00005, 0x0, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
+0x50400013, 0xac800000, 0x8001775, 0x0,
+0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x24020007, 0xac820000, 0x24020001, 0xac820004,
+0x12000212, 0x3c020400, 0xafa20018, 0x3c020001,
+0x571021, 0x904283b0, 0x1040010b, 0x0,
+0x8ee20608, 0x8f430228, 0x24420001, 0x304a00ff,
+0x514300fd, 0xafa00010, 0x8ee20608, 0x210c0,
+0x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
+0xac440610, 0x8f830054, 0x8f820054, 0x24690032,
+0x1221023, 0x2c420033, 0x1040006a, 0x5821,
+0x24180008, 0x240f000d, 0x240d0007, 0x240c0040,
+0x240e0001, 0x8f870120, 0x27623800, 0x24e80020,
+0x102102b, 0x50400001, 0x27683000, 0x8f820128,
+0x11020004, 0x0, 0x8f820124, 0x15020007,
+0x1021, 0x8ee201a4, 0x8021, 0x24420001,
+0xaee201a4, 0x80017f3, 0x8ee201a4, 0x8ee40608,
+0x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
+0xa32821, 0xa3302b, 0x822021, 0x862021,
+0xace40000, 0xace50004, 0x8ee20608, 0xa4f8000e,
+0xacef0018, 0xacea001c, 0x210c0, 0x2442060c,
+0x2e21021, 0xace20008, 0x8ee204c4, 0xace20010,
+0xaf880120, 0x92e24e20, 0x14400033, 0x24100001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c820000, 0x144d001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x104c0007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x80017e0,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400010, 0xac800000, 0x80017f3,
+0x0, 0x8ee24e30, 0x24420001, 0x504c0003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0xac8d0000, 0xac8e0004, 0x56000006, 0x240b0001,
+0x8f820054, 0x1221023, 0x2c420033, 0x1440ff9d,
+0x0, 0x316300ff, 0x24020001, 0x14620077,
+0x3c050009, 0xaeea0608, 0x8f830054, 0x8f820054,
+0x24690032, 0x1221023, 0x2c420033, 0x10400061,
+0x5821, 0x240d0008, 0x240c0011, 0x24080012,
+0x24070040, 0x240a0001, 0x8f830120, 0x27623800,
+0x24660020, 0xc2102b, 0x50400001, 0x27663000,
+0x8f820128, 0x10c20004, 0x0, 0x8f820124,
+0x14c20007, 0x0, 0x8ee201a4, 0x8021,
+0x24420001, 0xaee201a4, 0x800185f, 0x8ee201a4,
+0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
+0x2462001c, 0xac620008, 0xa46d000e, 0xac6c0018,
+0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
+0xaf860120, 0x92e24e20, 0x14400033, 0x24100001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c820000, 0x1448001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x10470007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x800184c,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400010, 0xac800000, 0x800185f,
+0x0, 0x8ee24e30, 0x24420001, 0x50470003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0xac880000, 0xac8a0004, 0x56000006, 0x240b0001,
+0x8f820054, 0x1221023, 0x2c420033, 0x1440ffa6,
+0x0, 0x316300ff, 0x24020001, 0x14620003,
+0x3c050009, 0x800197c, 0x24100001, 0x3c040001,
+0x248452a4, 0xafa00010, 0xafa00014, 0x8f860120,
+0x8f870124, 0x800187b, 0x34a5f011, 0x3c040001,
+0x248452b0, 0xafa00010, 0xafa00014, 0x8f860120,
+0x8f870124, 0x34a5f010, 0xc002403, 0x8021,
+0x800197c, 0x0, 0x3c040001, 0x248452bc,
+0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
+0x8001975, 0x34a5f00f, 0x8ee20608, 0x8f430228,
+0x24420001, 0x304900ff, 0x512300e2, 0xafa00010,
+0x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
+0x8fa4001c, 0xac43060c, 0xac440610, 0x8f870120,
+0x27623800, 0x24e80020, 0x102102b, 0x50400001,
+0x27683000, 0x8f820128, 0x11020004, 0x0,
+0x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
+0x8021, 0x24420001, 0xaee201a4, 0x80018f7,
+0x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
+0x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0xace40000, 0xace50004,
+0x8ee30608, 0x24020008, 0xa4e2000e, 0x2402000d,
+0xace20018, 0xace9001c, 0x318c0, 0x2463060c,
+0x2e31021, 0xace20008, 0x8ee204c4, 0xace20010,
+0xaf880120, 0x92e24e20, 0x14400037, 0x24100001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c830000, 0x24020007, 0x1462001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee54e30, 0x24420001, 0x10430007, 0x0,
+0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
+0x80018e1, 0x0, 0x14a00005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x80018f7, 0x0, 0x8ee24e30, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020007, 0xac820000,
+0x24020001, 0xac820004, 0x5600000c, 0xaee90608,
+0x3c040001, 0x248452c8, 0xafa00010, 0xafa00014,
+0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
+0x34a5f000, 0x800197c, 0x0, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
+0x8021, 0x24420001, 0xaee201a4, 0x800195e,
+0x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
+0x8ee504a4, 0x2462001c, 0xac620008, 0x24020008,
+0xa462000e, 0x24020011, 0xac620018, 0xac640000,
+0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
+0x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
+0x24020012, 0x1462001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
+0x24420001, 0x10430007, 0x0, 0x8ee24e34,
+0x24420001, 0x10a20005, 0x0, 0x8001948,
+0x0, 0x14a00005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x800195e,
+0x0, 0x8ee24e30, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x24020012, 0xac820000, 0x24020001,
+0xac820004, 0x5600001d, 0x24100001, 0x3c040001,
+0x248452d0, 0xafa00010, 0xafa00014, 0x8ee60608,
+0x8f470228, 0x3c050009, 0xc002403, 0x34a5f001,
+0x8ee201b0, 0x24420001, 0xaee201b0, 0x800197c,
+0x8ee201b0, 0x3c040001, 0x248452dc, 0xafa00014,
+0x8ee60608, 0x8f470228, 0x3c050009, 0x34a5f005,
+0xc002403, 0x0, 0x8ee201ac, 0x8021,
+0x24420001, 0xaee201ac, 0x8ee201ac, 0x1200000c,
+0x24020001, 0x3c010001, 0x370821, 0xa02083b0,
+0x8f420238, 0x8ee30158, 0x24630001, 0xaee30158,
+0x8ee30158, 0x800198c, 0xaee27278, 0x24020001,
+0x3c010001, 0x370821, 0xa02283b0, 0x3c020001,
+0x8c425cd8, 0x10400187, 0x0, 0x8ee27b84,
+0x24430001, 0x284200c9, 0x144001a4, 0xaee37b84,
+0x8ee204d4, 0x30420002, 0x14400119, 0xaee07b84,
+0x8ee204d4, 0x3c030600, 0x34631000, 0x34420002,
+0xaee204d4, 0xafa30018, 0x8ee20608, 0x8f430228,
+0x24420001, 0x304a00ff, 0x514300fd, 0xafa00010,
+0x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
+0x8fa4001c, 0xac43060c, 0xac440610, 0x8f830054,
+0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
+0x1040006a, 0x5821, 0x24180008, 0x240f000d,
+0x240d0007, 0x240c0040, 0x240e0001, 0x8f870120,
+0x27623800, 0x24e80020, 0x102102b, 0x50400001,
+0x27683000, 0x8f820128, 0x11020004, 0x0,
+0x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
+0x8021, 0x24420001, 0xaee201a4, 0x8001a15,
+0x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
+0x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0xace40000, 0xace50004,
+0x8ee20608, 0xa4f8000e, 0xacef0018, 0xacea001c,
+0x210c0, 0x2442060c, 0x2e21021, 0xace20008,
+0x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
+0x14400033, 0x24100001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c820000, 0x144d001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x0, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee34e30, 0x24420001, 0x104c0007,
+0x0, 0x8ee24e34, 0x24420001, 0x10620005,
+0x0, 0x8001a02, 0x0, 0x14600005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
+0xac800000, 0x8001a15, 0x0, 0x8ee24e30,
+0x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0xac8d0000, 0xac8e0004,
+0x56000006, 0x240b0001, 0x8f820054, 0x1221023,
+0x2c420033, 0x1440ff9d, 0x0, 0x316300ff,
+0x24020001, 0x54620078, 0xafa00010, 0xaeea0608,
+0x8f830054, 0x8f820054, 0x24690032, 0x1221023,
+0x2c420033, 0x10400061, 0x5821, 0x240d0008,
+0x240c0011, 0x24080012, 0x24070040, 0x240a0001,
+0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
+0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
+0x0, 0x8f820124, 0x14c20007, 0x0,
+0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
+0x8001a81, 0x8ee201a4, 0x8ee20608, 0xac62001c,
+0x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
+0xa46d000e, 0xac6c0018, 0xac640000, 0xac650004,
+0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
+0x14400033, 0x24100001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c820000, 0x1448001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x0, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10470007,
+0x0, 0x8ee24e34, 0x24420001, 0x10620005,
+0x0, 0x8001a6e, 0x0, 0x14600005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
+0xac800000, 0x8001a81, 0x0, 0x8ee24e30,
+0x24420001, 0x50470003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0xac880000, 0xac8a0004,
+0x56000006, 0x240b0001, 0x8f820054, 0x1221023,
+0x2c420033, 0x1440ffa6, 0x0, 0x316300ff,
+0x24020001, 0x10620022, 0x0, 0x3c040001,
+0x248452a4, 0xafa00010, 0xafa00014, 0x8f860120,
+0x8f870124, 0x3c050009, 0xc002403, 0x34a5f011,
+0x8001aad, 0x0, 0x3c040001, 0x248452b0,
+0xafa00014, 0x8f860120, 0x8f870124, 0x3c050009,
+0xc002403, 0x34a5f010, 0x8001aad, 0x0,
+0x3c040001, 0x248452bc, 0xafa00014, 0x8ee60608,
+0x8f470228, 0x3c050009, 0xc002403, 0x34a5f00f,
+0x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
+0x8ee2015c, 0x24420001, 0xaee2015c, 0x8ee2015c,
+0x8ee204d4, 0x30420001, 0x10400055, 0x0,
+0x8f420218, 0x30420080, 0x10400029, 0x0,
+0x8f820044, 0x34420040, 0xaf820044, 0x8ee27b7c,
+0x402821, 0x8ee200c0, 0x8ee300c4, 0x24060000,
+0x2407ffff, 0x2021, 0x461024, 0x1444000d,
+0x671824, 0x1465000b, 0x0, 0x8ee27b80,
+0x402821, 0x8ee200e0, 0x8ee300e4, 0x2021,
+0x461024, 0x14440003, 0x671824, 0x1065000b,
+0x0, 0x8ee200c0, 0x8ee300c4, 0x8ee400e0,
+0x8ee500e4, 0xaee37b7c, 0xaee57b80, 0x8f820044,
+0x38420020, 0x8001b38, 0xaf820044, 0x8f820044,
+0x2403ffdf, 0x431024, 0x8001b38, 0xaf820044,
+0x8f820044, 0x2403ffdf, 0x431024, 0xaf820044,
+0x8ee27b7c, 0x402821, 0x8ee200c0, 0x8ee300c4,
+0x24060000, 0x2407ffff, 0x2021, 0x461024,
+0x1444000d, 0x671824, 0x1465000b, 0x0,
+0x8ee27b80, 0x402821, 0x8ee200e0, 0x8ee300e4,
+0x2021, 0x461024, 0x14440003, 0x671824,
+0x1065000b, 0x0, 0x8ee200c0, 0x8ee300c4,
+0x8ee400e0, 0x8ee500e4, 0xaee37b7c, 0xaee57b80,
+0x8f820044, 0x38420040, 0x8001b38, 0xaf820044,
+0x8f820044, 0x34420040, 0x8001b38, 0xaf820044,
+0x8f820044, 0x34420040, 0xaf820044, 0x8ee27b8c,
+0x24430001, 0x28420015, 0x14400028, 0xaee37b8c,
+0x8f820044, 0x38420020, 0xaf820044, 0x8001b38,
+0xaee07b8c, 0x8ee204d4, 0x30420001, 0x10400011,
+0x0, 0x8f420218, 0x30420080, 0x10400009,
+0x0, 0x8f820044, 0x34420020, 0xaf820044,
+0x8f820044, 0x2403ffbf, 0x431024, 0x8001b36,
+0xaf820044, 0x8f820044, 0x34420060, 0x8001b36,
+0xaf820044, 0x8f820044, 0x34420040, 0xaf820044,
+0x8ee27b88, 0x24430001, 0x28421389, 0x14400005,
+0xaee37b88, 0x8f820044, 0x38420020, 0xaf820044,
+0xaee07b88, 0xc004603, 0x0, 0x8fbf0024,
+0x8fb00020, 0x3e00008, 0x27bd0028, 0x27bdffb8,
+0xafbf0044, 0xafb60040, 0xafb5003c, 0xafb40038,
+0xafb30034, 0xafb20030, 0xafb1002c, 0xafb00028,
+0x8f960064, 0x32c20004, 0x1040000c, 0x24020004,
+0xaf820064, 0x8f420114, 0xaee204e0, 0x8f820060,
+0x34420008, 0xaf820060, 0x8ee2016c, 0x24420001,
+0xaee2016c, 0x80022f4, 0x8ee2016c, 0x32c20001,
+0x10400004, 0x24020001, 0xaf820064, 0x80022f4,
+0x0, 0x32c20002, 0x1440000c, 0x3c050003,
+0x3c040001, 0x24845354, 0x34a50001, 0x2c03021,
+0x3821, 0xafa00010, 0xc002403, 0xafa00014,
+0x2402fff8, 0x80022f4, 0xaf820064, 0x8f43022c,
+0x8f42010c, 0x5062000c, 0xafa00010, 0x8f42022c,
+0x21080, 0x5a1021, 0x8c420300, 0xafa20020,
+0x8f42022c, 0x24070001, 0x24420001, 0x3042003f,
+0x8001b80, 0xaf42022c, 0x3c040001, 0x24845360,
+0xafa00014, 0x8f46022c, 0x8f47010c, 0x3c050003,
+0xc002403, 0x34a5f01f, 0x3821, 0x14e00003,
+0x0, 0x80022ed, 0xaf960064, 0x93a20020,
+0x2443ffff, 0x2c620011, 0x10400658, 0x31080,
+0x3c010001, 0x220821, 0x8c225418, 0x400008,
+0x0, 0x8fa20020, 0x30420fff, 0xaee20e0c,
+0x8f820060, 0x34420200, 0xaf820060, 0x8ee20118,
+0x24420001, 0xaee20118, 0x80022e8, 0x8ee20118,
+0x8fa20020, 0x24030001, 0x3c010001, 0x370821,
+0xa02383b1, 0x30420fff, 0xaee25238, 0x8f820060,
+0x34420100, 0xaf820060, 0x8ee20144, 0x24420001,
+0xaee20144, 0x80022e8, 0x8ee20144, 0x8fa20020,
+0x21200, 0x22502, 0x24020001, 0x10820005,
+0x24020002, 0x10820009, 0x2402fffe, 0x8001bc9,
+0xafa00010, 0x8ee204d4, 0xaee40070, 0xaee40074,
+0x34420001, 0x8001bbd, 0xaee204d4, 0x8ee304d4,
+0xaee40070, 0xaee40074, 0x621824, 0xaee304d4,
+0x8f840054, 0x41442, 0x41c82, 0x431021,
+0x41cc2, 0x431023, 0x41d02, 0x431021,
+0x41d42, 0x431023, 0x8001bd0, 0xaee20078,
+0x3c040001, 0x2484536c, 0xafa00014, 0x8fa60020,
+0x3c050003, 0xc002403, 0x34a50004, 0x8ee20110,
+0x24420001, 0xaee20110, 0x80022e8, 0x8ee20110,
+0x27440212, 0xc0022fe, 0x24050006, 0x3049001f,
+0x920c0, 0x2e41021, 0x9442727c, 0x30424000,
+0x1040000a, 0x971021, 0x97430212, 0xa443727e,
+0x8f430214, 0x971021, 0xac437280, 0x2e41821,
+0x34028000, 0x8001c79, 0xa462727c, 0x9443727e,
+0x97420212, 0x14620006, 0x2e41021, 0x971021,
+0x8c437280, 0x8f420214, 0x1062009f, 0x2e41021,
+0x9442727c, 0x30428000, 0x1040002a, 0x2406ffff,
+0x2021, 0x410c0, 0x2e21021, 0x9442737c,
+0x30424000, 0x54400005, 0x803021, 0x24840001,
+0x2c820080, 0x1440fff8, 0x410c0, 0x4c10010,
+0x618c0, 0x610c0, 0x571821, 0x8c63737c,
+0x571021, 0xafa30010, 0x8c427380, 0x3c040001,
+0x24845378, 0xafa20014, 0x8f470214, 0x3c050003,
+0xc002403, 0x34a50013, 0x8001c90, 0x3c020800,
+0x97440212, 0x771021, 0xa444737e, 0x8f440214,
+0x771021, 0x2e31821, 0xac447380, 0x34028000,
+0xa462737c, 0x910c0, 0x2e21021, 0x8001c79,
+0xa446727c, 0x2e41021, 0x9445727c, 0x8001c2e,
+0x510c0, 0x9443737e, 0x97420212, 0x14620006,
+0x510c0, 0x971021, 0x8c437380, 0x8f420214,
+0x10620065, 0x510c0, 0x2e21021, 0x9445737c,
+0x510c0, 0x2e21021, 0x9442737c, 0x30428000,
+0x1040fff0, 0x971021, 0x520c0, 0x971021,
+0x9443737e, 0x97420212, 0x14620006, 0x2406ffff,
+0x971021, 0x8c437380, 0x8f420214, 0x10620053,
+0x3c020800, 0x2021, 0x410c0, 0x2e21021,
+0x9442737c, 0x30424000, 0x54400005, 0x803021,
+0x24840001, 0x2c820080, 0x1440fff8, 0x410c0,
+0x4c10023, 0x618c0, 0x910c0, 0x571821,
+0x8c63727c, 0x571021, 0xafa30010, 0x8c427280,
+0x3c040001, 0x24845384, 0xafa20014, 0x8f470214,
+0x3c050003, 0xc002403, 0x34a5f017, 0x8001c90,
+0x3c020800, 0x8f430210, 0xb71021, 0xac43777c,
+0x8f430214, 0xb71021, 0xac437780, 0x3c020001,
+0x571021, 0x8c4283b4, 0x24420001, 0x3c010001,
+0x370821, 0xac2283b4, 0x3c030001, 0x771821,
+0x8c6383b4, 0x2e51021, 0x8001c82, 0xa443777c,
+0x97440212, 0x771021, 0xa444737e, 0x8f440214,
+0x771021, 0x2e31821, 0xac447380, 0x34028000,
+0xa462737c, 0x510c0, 0x2e21021, 0xa446737c,
+0x2021, 0x428c0, 0x2e51021, 0x9442777c,
+0x1040ffdc, 0x24840001, 0x2c820080, 0x5440fffa,
+0x428c0, 0x92e204d8, 0x10400006, 0x24020001,
+0x8ee304dc, 0x1221004, 0x621825, 0x8001c8f,
+0xaee304dc, 0x8f830228, 0x24020001, 0x1221004,
+0x621825, 0xaf830228, 0x3c020800, 0x34421000,
+0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
+0x304a00ff, 0x514300fd, 0xafa00010, 0x8ee20608,
+0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
+0xac43060c, 0xac440610, 0x8f830054, 0x8f820054,
+0x24690032, 0x1221023, 0x2c420033, 0x1040006a,
+0x5821, 0x24100008, 0x240f000d, 0x240d0007,
+0x240c0040, 0x240e0001, 0x8f870120, 0x27623800,
+0x24e80020, 0x102102b, 0x50400001, 0x27683000,
+0x8f820128, 0x11020004, 0x0, 0x8f820124,
+0x15020007, 0x1021, 0x8ee201a4, 0x3821,
+0x24420001, 0xaee201a4, 0x8001d08, 0x8ee201a4,
+0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
+0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xace40000, 0xace50004, 0x8ee20608,
+0xa4f0000e, 0xacef0018, 0xacea001c, 0x210c0,
+0x2442060c, 0x2e21021, 0xace20008, 0x8ee204c4,
+0xace20010, 0xaf880120, 0x92e24e20, 0x14400033,
+0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c820000, 0x144d001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee34e30, 0x24420001, 0x104c0007, 0x0,
+0x8ee24e34, 0x24420001, 0x10620005, 0x0,
+0x8001cf5, 0x0, 0x14600005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
+0x8001d08, 0x0, 0x8ee24e30, 0x24420001,
+0x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0xac8d0000, 0xac8e0004, 0x54e00006,
+0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
+0x1440ff9d, 0x0, 0x316300ff, 0x24020001,
+0x54620078, 0xafa00010, 0xaeea0608, 0x8f830054,
+0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
+0x10400061, 0x5821, 0x240e0008, 0x240d0011,
+0x240a0012, 0x24080040, 0x240c0001, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
+0x3821, 0x24420001, 0xaee201a4, 0x8001d74,
+0x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
+0x8ee504a4, 0x2462001c, 0xac620008, 0xa46e000e,
+0xac6d0018, 0xac640000, 0xac650004, 0x8ee204c4,
+0xac620010, 0xaf860120, 0x92e24e20, 0x14400033,
+0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c820000, 0x144a001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee34e30, 0x24420001, 0x10480007, 0x0,
+0x8ee24e34, 0x24420001, 0x10620005, 0x0,
+0x8001d61, 0x0, 0x14600005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
+0x8001d74, 0x0, 0x8ee24e30, 0x24420001,
+0x50480003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0xac8a0000, 0xac8c0004, 0x54e00006,
+0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
+0x1440ffa6, 0x0, 0x316300ff, 0x24020001,
+0x10620022, 0x0, 0x3c040001, 0x24845390,
+0xafa00010, 0xafa00014, 0x8f860120, 0x8f870124,
+0x3c050009, 0xc002403, 0x34a5f011, 0x8001da0,
+0x0, 0x3c040001, 0x2484539c, 0xafa00014,
+0x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
+0x34a5f010, 0x8001da0, 0x0, 0x3c040001,
+0x248453a8, 0xafa00014, 0x8ee60608, 0x8f470228,
+0x3c050009, 0xc002403, 0x34a5f00f, 0x8ee201ac,
+0x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20124,
+0x24420001, 0xaee20124, 0x8001f97, 0x8ee20124,
+0x27440212, 0xc0022fe, 0x24050006, 0x3049001f,
+0x928c0, 0x2e51021, 0x9442727c, 0x30428000,
+0x1040002f, 0x2e51021, 0x9442727c, 0x30424000,
+0x1440001c, 0xb71021, 0x9443727e, 0x97420212,
+0x14620018, 0xb71021, 0x8c437280, 0x8f420214,
+0x54620016, 0xafa20010, 0x92e204d8, 0x10400007,
+0x24020001, 0x8ee304dc, 0x1221004, 0x21027,
+0x621824, 0x8001dc9, 0xaee304dc, 0x8f830228,
+0x1221004, 0x21027, 0x621824, 0xaf830228,
+0x910c0, 0x2e21821, 0x3402c000, 0x8001e4e,
+0xa462727c, 0x8f420214, 0xafa20010, 0x910c0,
+0x571021, 0x8c42727c, 0x3c040001, 0x248453b4,
+0x3c050003, 0xafa20014, 0x8f470210, 0x34a5f01c,
+0xc002403, 0x1203021, 0x8001e83, 0x3c020800,
+0xb71021, 0x9443727e, 0x97420212, 0x14620019,
+0x918c0, 0xb71021, 0x8c437280, 0x8f420214,
+0x14620014, 0x918c0, 0x2e51021, 0x9447727c,
+0x720c0, 0x971021, 0x9443737e, 0xb71021,
+0xa443727e, 0x971021, 0x8c437380, 0xb71021,
+0xac437280, 0x2e41021, 0x9443737c, 0x2e51021,
+0xa443727c, 0x2e41821, 0x3402c000, 0x8001e4e,
+0xa462737c, 0x2e31021, 0x9447727c, 0x3021,
+0x720c0, 0x2e41021, 0x9442737c, 0x4021,
+0x30428000, 0x14400025, 0xe02821, 0x605021,
+0x340bc000, 0x971021, 0x9443737e, 0x97420212,
+0x54620015, 0xe02821, 0x971021, 0x8c437380,
+0x8f420214, 0x54620010, 0xe02821, 0x11000006,
+0x2e41021, 0x9443737c, 0x510c0, 0x2e21021,
+0x8001e1a, 0xa443737c, 0x9443737c, 0x2ea1021,
+0xa443727c, 0x710c0, 0x2e21021, 0xa44b737c,
+0x8001e28, 0x24060001, 0x510c0, 0x2e21021,
+0x9447737c, 0x720c0, 0x2e41021, 0x9442737c,
+0x30428000, 0x1040ffdf, 0x25080001, 0x30c200ff,
+0x14400025, 0x2021, 0x720c0, 0x971021,
+0x9443737e, 0x97420212, 0x1462000f, 0x910c0,
+0x971021, 0x8c437380, 0x8f420214, 0x1462000a,
+0x910c0, 0x2e41821, 0x3402c000, 0x15000015,
+0xa462737c, 0x910c0, 0x2e21821, 0x34028000,
+0x8001e4e, 0xa462727c, 0x571021, 0x8c42727c,
+0x3c040001, 0x248453c0, 0x3c050003, 0xafa20010,
+0x710c0, 0x571021, 0x8c42737c, 0x34a5001e,
+0x1203021, 0xc002403, 0xafa20014, 0x8001e83,
+0x3c020800, 0x2021, 0x428c0, 0xb71021,
+0x9443777e, 0x97420212, 0x5462002b, 0x24840001,
+0xb71021, 0x8c437780, 0x8f420214, 0x54620026,
+0x24840001, 0x3c020001, 0x571021, 0x8c4283b4,
+0x2442ffff, 0x3c010001, 0x370821, 0xac2283b4,
+0x3c020001, 0x571021, 0x8c4283b4, 0x809021,
+0x242102b, 0x1040000e, 0x24b1777c, 0x24b07784,
+0x2f02021, 0x2f12821, 0xc002490, 0x24060008,
+0x26310008, 0x3c020001, 0x571021, 0x8c4283b4,
+0x26520001, 0x242102b, 0x1440fff5, 0x26100008,
+0x3c040001, 0x972021, 0x8c8483b4, 0x24050008,
+0x420c0, 0x2484777c, 0xc002488, 0x2e42021,
+0x8001e83, 0x3c020800, 0x2c820080, 0x1440ffcf,
+0x428c0, 0x3c020800, 0x34422000, 0xafa20018,
+0x8ee20608, 0x8f430228, 0x24420001, 0x304a00ff,
+0x514300fd, 0xafa00010, 0x8ee20608, 0x210c0,
+0x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
+0xac440610, 0x8f830054, 0x8f820054, 0x24690032,
+0x1221023, 0x2c420033, 0x1040006a, 0x5821,
+0x24100008, 0x240f000d, 0x240d0007, 0x240c0040,
+0x240e0001, 0x8f870120, 0x27623800, 0x24e80020,
+0x102102b, 0x50400001, 0x27683000, 0x8f820128,
+0x11020004, 0x0, 0x8f820124, 0x15020007,
+0x1021, 0x8ee201a4, 0x3821, 0x24420001,
+0xaee201a4, 0x8001efb, 0x8ee201a4, 0x8ee40608,
+0x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
+0xa32821, 0xa3302b, 0x822021, 0x862021,
+0xace40000, 0xace50004, 0x8ee20608, 0xa4f0000e,
+0xacef0018, 0xacea001c, 0x210c0, 0x2442060c,
+0x2e21021, 0xace20008, 0x8ee204c4, 0xace20010,
+0xaf880120, 0x92e24e20, 0x14400033, 0x24070001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c820000, 0x144d001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x104c0007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x8001ee8,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400010, 0xac800000, 0x8001efb,
+0x0, 0x8ee24e30, 0x24420001, 0x504c0003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0xac8d0000, 0xac8e0004, 0x54e00006, 0x240b0001,
+0x8f820054, 0x1221023, 0x2c420033, 0x1440ff9d,
+0x0, 0x316300ff, 0x24020001, 0x54620078,
+0xafa00010, 0xaeea0608, 0x8f830054, 0x8f820054,
+0x24690032, 0x1221023, 0x2c420033, 0x10400061,
+0x5821, 0x240e0008, 0x240d0011, 0x240a0012,
+0x24080040, 0x240c0001, 0x8f830120, 0x27623800,
+0x24660020, 0xc2102b, 0x50400001, 0x27663000,
+0x8f820128, 0x10c20004, 0x0, 0x8f820124,
+0x14c20007, 0x0, 0x8ee201a4, 0x3821,
+0x24420001, 0xaee201a4, 0x8001f67, 0x8ee201a4,
+0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
+0x2462001c, 0xac620008, 0xa46e000e, 0xac6d0018,
+0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
+0xaf860120, 0x92e24e20, 0x14400033, 0x24070001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c820000, 0x144a001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x10480007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x8001f54,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400010, 0xac800000, 0x8001f67,
+0x0, 0x8ee24e30, 0x24420001, 0x50480003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0xac8a0000, 0xac8c0004, 0x54e00006, 0x240b0001,
+0x8f820054, 0x1221023, 0x2c420033, 0x1440ffa6,
+0x0, 0x316300ff, 0x24020001, 0x10620022,
+0x0, 0x3c040001, 0x24845390, 0xafa00010,
+0xafa00014, 0x8f860120, 0x8f870124, 0x3c050009,
+0xc002403, 0x34a5f011, 0x8001f93, 0x0,
+0x3c040001, 0x2484539c, 0xafa00014, 0x8f860120,
+0x8f870124, 0x3c050009, 0xc002403, 0x34a5f010,
+0x8001f93, 0x0, 0x3c040001, 0x248453a8,
+0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
+0xc002403, 0x34a5f00f, 0x8ee201ac, 0x24420001,
+0xaee201ac, 0x8ee201ac, 0x8ee20128, 0x24420001,
+0xaee20128, 0x8ee20128, 0x8ee20164, 0x24420001,
+0xaee20164, 0x80022e8, 0x8ee20164, 0x8fa20020,
+0x21200, 0x21d02, 0x24020001, 0x10620005,
+0x24020002, 0x1062000d, 0x0, 0x8001fb7,
+0xafa00010, 0x92e204d8, 0x14400006, 0x24020001,
+0x8f820228, 0xaee204dc, 0x2402ffff, 0xaf820228,
+0x24020001, 0x8001fbe, 0xa2e204d8, 0x92e204d8,
+0x5040000c, 0xa2e004d8, 0x8ee204dc, 0xaf820228,
+0x8001fbe, 0xa2e004d8, 0x3c040001, 0x248453c8,
+0xafa00014, 0x8fa60020, 0x3c050003, 0xc002403,
+0x34a5f009, 0x8ee2013c, 0x24420001, 0xaee2013c,
+0x80022e8, 0x8ee2013c, 0x8fa20020, 0x21200,
+0x22502, 0x24020001, 0x10820005, 0x24020002,
+0x1082000f, 0x0, 0x8001fe3, 0xafa00010,
+0x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
+0x34420008, 0xaf820220, 0x24020001, 0x3c010001,
+0x370821, 0xa02283b2, 0x8001fea, 0xaee40108,
+0x8f820220, 0x3c0308ff, 0x3463fff7, 0x431024,
+0xaf820220, 0x3c010001, 0x370821, 0xa02083b2,
+0x8001fea, 0xaee40108, 0x3c040001, 0x248453d4,
+0xafa00014, 0x8fa60020, 0x3c050003, 0xc002403,
+0x34a5f00a, 0x8ee2012c, 0x24420001, 0xaee2012c,
+0x80022e8, 0x8ee2012c, 0x8fa20020, 0x21200,
+0x21d02, 0x24020001, 0x10620005, 0x24020002,
+0x1062000e, 0x0, 0x8002011, 0xafa00010,
+0x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
+0x34420008, 0xaf820220, 0x24020001, 0x3c010001,
+0x370821, 0x8002018, 0xa02283b3, 0x3c020001,
+0x571021, 0x904283b2, 0x3c010001, 0x370821,
+0x1440000e, 0xa02083b3, 0x8f820220, 0x3c0308ff,
+0x3463fff7, 0x431024, 0x8002018, 0xaf820220,
+0x3c040001, 0x248453e0, 0xafa00014, 0x8fa60020,
+0x3c050003, 0xc002403, 0x34a5f00b, 0x8ee20114,
+0x24420001, 0xaee20114, 0x80022e8, 0x8ee20114,
+0x27840208, 0x27450200, 0xc00249a, 0x24060008,
+0x26e40094, 0x27450200, 0xc00249a, 0x24060008,
+0x8ee20134, 0x24420001, 0xaee20134, 0x80022e8,
+0x8ee20134, 0x8f460248, 0x2021, 0xc005108,
+0x24050004, 0x8ee20130, 0x24420001, 0xaee20130,
+0x80022e8, 0x8ee20130, 0x8ef301cc, 0x8ef401d0,
+0x8ef501d8, 0x8ee20140, 0x26e40030, 0x24420001,
+0xaee20140, 0x8ef00140, 0x8ef10074, 0x8ef20070,
+0xc002488, 0x24050400, 0xaef301cc, 0xaef401d0,
+0xaef501d8, 0xaef00140, 0xaef10074, 0xaef20070,
+0x8f42025c, 0x26e40094, 0xaee20060, 0x8f420260,
+0x27450200, 0x24060008, 0xaee20068, 0x24020006,
+0xc00249a, 0xaee20064, 0x3c023b9a, 0x3442ca00,
+0xaee2006c, 0x240203e8, 0x24040002, 0x24030001,
+0xaee20104, 0xaee40100, 0xaee3010c, 0x8f820220,
+0x30420008, 0x10400004, 0x0, 0xaee30108,
+0x8002061, 0x2021, 0xaee40108, 0x2021,
+0x3c030001, 0x641821, 0x90635c30, 0x2e41021,
+0x24840001, 0xa043009c, 0x2c82000f, 0x1440fff8,
+0x0, 0x8f820040, 0x2e41821, 0x24840001,
+0x21702, 0x24420030, 0xa062009c, 0x2e41021,
+0x80022e8, 0xa040009c, 0x24020001, 0x3c010001,
+0x370821, 0xa02283e0, 0x240b0400, 0x24080014,
+0x240a0040, 0x24090001, 0x8f830100, 0x27623000,
+0x24660020, 0xc2102b, 0x50400001, 0x27662800,
+0x8f820108, 0x10c20004, 0x0, 0x8f820104,
+0x14c20007, 0x26e20030, 0x8ee201a8, 0x3821,
+0x24420001, 0xaee201a8, 0x80020a8, 0x8ee201a8,
+0x8ee404b8, 0x8ee504bc, 0xac620008, 0xa46b000e,
+0xac680018, 0xac60001c, 0xac640000, 0xac650004,
+0x8ee204cc, 0xac620010, 0xaf860100, 0x92e204ec,
+0x1440000e, 0x24070001, 0x8ee24e28, 0x24420001,
+0x504a0003, 0x1021, 0x8ee24e28, 0x24420001,
+0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
+0x2e21021, 0xac480000, 0xac490004, 0x10e0ffd2,
+0x0, 0x80022e8, 0x0, 0x3c020900,
+0xaee05238, 0xaee0523c, 0xaee05240, 0xaee05244,
+0xaee001d0, 0x3c010001, 0x370821, 0xa02083b1,
+0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
+0x304a00ff, 0x514300fd, 0xafa00010, 0x8ee20608,
+0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
+0xac43060c, 0xac440610, 0x8f830054, 0x8f820054,
+0x24690032, 0x1221023, 0x2c420033, 0x1040006a,
+0x5821, 0x24100008, 0x240f000d, 0x240d0007,
+0x240c0040, 0x240e0001, 0x8f870120, 0x27623800,
+0x24e80020, 0x102102b, 0x50400001, 0x27683000,
+0x8f820128, 0x11020004, 0x0, 0x8f820124,
+0x15020007, 0x1021, 0x8ee201a4, 0x3821,
+0x24420001, 0xaee201a4, 0x800212c, 0x8ee201a4,
+0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
+0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xace40000, 0xace50004, 0x8ee20608,
+0xa4f0000e, 0xacef0018, 0xacea001c, 0x210c0,
+0x2442060c, 0x2e21021, 0xace20008, 0x8ee204c4,
+0xace20010, 0xaf880120, 0x92e24e20, 0x14400033,
+0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c820000, 0x144d001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee34e30, 0x24420001, 0x104c0007, 0x0,
+0x8ee24e34, 0x24420001, 0x10620005, 0x0,
+0x8002119, 0x0, 0x14600005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
+0x800212c, 0x0, 0x8ee24e30, 0x24420001,
+0x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0xac8d0000, 0xac8e0004, 0x54e00006,
+0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
+0x1440ff9d, 0x0, 0x316300ff, 0x24020001,
+0x54620078, 0xafa00010, 0xaeea0608, 0x8f830054,
+0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
+0x10400061, 0x5821, 0x240e0008, 0x240d0011,
+0x240a0012, 0x24080040, 0x240c0001, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
+0x3821, 0x24420001, 0xaee201a4, 0x8002198,
+0x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
+0x8ee504a4, 0x2462001c, 0xac620008, 0xa46e000e,
+0xac6d0018, 0xac640000, 0xac650004, 0x8ee204c4,
+0xac620010, 0xaf860120, 0x92e24e20, 0x14400033,
+0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c820000, 0x144a001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee34e30, 0x24420001, 0x10480007, 0x0,
+0x8ee24e34, 0x24420001, 0x10620005, 0x0,
+0x8002185, 0x0, 0x14600005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
+0x8002198, 0x0, 0x8ee24e30, 0x24420001,
+0x50480003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0xac8a0000, 0xac8c0004, 0x54e00006,
+0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
+0x1440ffa6, 0x0, 0x316300ff, 0x24020001,
+0x10620022, 0x0, 0x3c040001, 0x24845390,
+0xafa00010, 0xafa00014, 0x8f860120, 0x8f870124,
+0x3c050009, 0xc002403, 0x34a5f011, 0x80021c4,
+0x0, 0x3c040001, 0x2484539c, 0xafa00014,
+0x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
+0x34a5f010, 0x80021c4, 0x0, 0x3c040001,
+0x248453a8, 0xafa00014, 0x8ee60608, 0x8f470228,
+0x3c050009, 0xc002403, 0x34a5f00f, 0x8ee201ac,
+0x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20120,
+0x24420001, 0xaee20120, 0x8ee20120, 0x8ee20168,
+0x24420001, 0xaee20168, 0x80022e8, 0x8ee20168,
+0x8f42025c, 0x26e40094, 0xaee20060, 0x8f420260,
+0x27450200, 0x24060008, 0xc00249a, 0xaee20068,
+0x8f820220, 0x30420008, 0x14400002, 0x24020001,
+0x24020002, 0xaee20108, 0x8ee2011c, 0x24420001,
+0xaee2011c, 0x80022e8, 0x8ee2011c, 0x3c040001,
+0x248453ec, 0xafa00010, 0xafa00014, 0x8fa60020,
+0x3c050003, 0xc002403, 0x34a5f00f, 0x93a20020,
+0x3c030700, 0x34631000, 0x431025, 0xafa20018,
+0x8ee20608, 0x8f430228, 0x24420001, 0x304900ff,
+0x512300e2, 0xafa00010, 0x8ee20608, 0x210c0,
+0x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
+0xac440610, 0x8f870120, 0x27623800, 0x24e80020,
+0x102102b, 0x50400001, 0x27683000, 0x8f820128,
+0x11020004, 0x0, 0x8f820124, 0x15020007,
+0x1021, 0x8ee201a4, 0x3821, 0x24420001,
+0xaee201a4, 0x800225d, 0x8ee201a4, 0x8ee40608,
+0x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
+0xa32821, 0xa3302b, 0x822021, 0x862021,
+0xace40000, 0xace50004, 0x8ee30608, 0x24020008,
+0xa4e2000e, 0x2402000d, 0xace20018, 0xace9001c,
+0x318c0, 0x2463060c, 0x2e31021, 0xace20008,
+0x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
+0x14400037, 0x24070001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c830000, 0x24020007,
+0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
+0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
+0x10430007, 0x0, 0x8ee24e34, 0x24420001,
+0x10a20005, 0x0, 0x8002247, 0x0,
+0x14a00005, 0x0, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
+0x50400013, 0xac800000, 0x800225d, 0x0,
+0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x24020007, 0xac820000, 0x24020001, 0xac820004,
+0x54e0000c, 0xaee90608, 0x3c040001, 0x248453f4,
+0xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
+0x3c050009, 0xc002403, 0x34a5f000, 0x80022e0,
+0x0, 0x8f830120, 0x27623800, 0x24660020,
+0xc2102b, 0x50400001, 0x27663000, 0x8f820128,
+0x10c20004, 0x0, 0x8f820124, 0x14c20007,
+0x0, 0x8ee201a4, 0x3821, 0x24420001,
+0xaee201a4, 0x80022c4, 0x8ee201a4, 0x8ee20608,
+0xac62001c, 0x8ee404a0, 0x8ee504a4, 0x2462001c,
+0xac620008, 0x24020008, 0xa462000e, 0x24020011,
+0xac620018, 0xac640000, 0xac650004, 0x8ee204c4,
+0xac620010, 0xaf860120, 0x92e24e20, 0x14400037,
+0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c830000, 0x24020012, 0x1462001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x24030040, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
+0x0, 0x8ee24e34, 0x24420001, 0x10a20005,
+0x0, 0x80022ae, 0x0, 0x14a00005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
+0xac800000, 0x80022c4, 0x0, 0x8ee24e30,
+0x24030040, 0x24420001, 0x50430003, 0x1021,
+0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x24020012,
+0xac820000, 0x24020001, 0xac820004, 0x14e0001b,
+0x0, 0x3c040001, 0x248453fc, 0xafa00010,
+0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
+0xc002403, 0x34a5f001, 0x8ee201b0, 0x24420001,
+0xaee201b0, 0x80022e0, 0x8ee201b0, 0x3c040001,
+0x24845408, 0xafa00014, 0x8ee60608, 0x8f470228,
+0x3c050009, 0xc002403, 0x34a5f005, 0x8ee201ac,
+0x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20150,
+0x24420001, 0xaee20150, 0x8ee20150, 0x8ee20160,
+0x24420001, 0xaee20160, 0x8ee20160, 0x8f43022c,
+0x8f42010c, 0x14620009, 0x24020002, 0xaf820064,
+0x8f820064, 0x14400005, 0x0, 0x8f43022c,
+0x8f42010c, 0x1462f875, 0x0, 0x8fbf0044,
+0x8fb60040, 0x8fb5003c, 0x8fb40038, 0x8fb30034,
+0x8fb20030, 0x8fb1002c, 0x8fb00028, 0x3e00008,
+0x27bd0048, 0x27bdfff8, 0x2408ffff, 0x10a00014,
+0x4821, 0x3c0aedb8, 0x354a8320, 0x90870000,
+0x24840001, 0x3021, 0x1071026, 0x30420001,
+0x10400002, 0x81842, 0x6a1826, 0x604021,
+0x24c60001, 0x2cc20008, 0x1440fff7, 0x73842,
+0x25290001, 0x125102b, 0x1440fff0, 0x0,
+0x1001021, 0x3e00008, 0x27bd0008, 0x27bdffe8,
+0x27642800, 0xafbf0010, 0xc002488, 0x24051000,
+0x24020021, 0xaf800100, 0xaf800104, 0xaf800108,
+0xaf800110, 0xaf800114, 0xaf800118, 0xaf800120,
+0xaf800124, 0xaf800128, 0xaf800130, 0xaf800134,
+0xaf800138, 0xaee04e28, 0xaee04e2c, 0xaee04e30,
+0xaee04e34, 0xaf82011c, 0x8f420218, 0x30420040,
+0x10400004, 0x0, 0x8f82011c, 0x34420004,
+0xaf82011c, 0x8fbf0010, 0x3e00008, 0x27bd0018,
+0x27bdffe0, 0xafbf0018, 0x8f820104, 0xafa20010,
+0x8f820100, 0x3c050002, 0xafa20014, 0x8f8600b0,
+0x8f87011c, 0x3c040001, 0x248454c0, 0xc002403,
+0x34a5f000, 0x8f8300b0, 0x3c027f00, 0x621824,
+0x3c020400, 0x10620029, 0x43102b, 0x14400008,
+0x3c022000, 0x3c020100, 0x10620024, 0x3c020200,
+0x10620011, 0x0, 0x8002374, 0x0,
+0x10620008, 0x3c024000, 0x1462001c, 0x0,
+0x8ee20190, 0x24420001, 0xaee20190, 0x8002374,
+0x8ee20190, 0x8ee2018c, 0x24420001, 0xaee2018c,
+0x8002374, 0x8ee2018c, 0x8f82011c, 0x34420002,
+0xaf82011c, 0x8f830104, 0x8f8200b0, 0x34420001,
+0xaf8200b0, 0xaf830104, 0x8f82011c, 0x2403fffd,
+0x431024, 0xaf82011c, 0x8ee201a0, 0x24420001,
+0xaee201a0, 0x8002377, 0x8ee201a0, 0x8f8200b0,
+0x34420001, 0xaf8200b0, 0x8fbf0018, 0x3e00008,
+0x27bd0020, 0x27bdffe0, 0xafbf001c, 0xafb00018,
+0x8f820120, 0xafa20010, 0x8f820124, 0x3c050001,
+0xafa20014, 0x8f8600a0, 0x8f87011c, 0x3c040001,
+0x248454cc, 0xc002403, 0x34a5f000, 0x8f8300a0,
+0x3c027f00, 0x621824, 0x3c020400, 0x10620053,
+0x8021, 0x43102b, 0x14400008, 0x3c042000,
+0x3c020100, 0x1062004d, 0x3c020200, 0x1062003a,
+0x0, 0x80023e0, 0x0, 0x10640003,
+0x3c024000, 0x14620045, 0x0, 0x8f8200a0,
+0x441024, 0x10400006, 0x0, 0x8ee20194,
+0x24420001, 0xaee20194, 0x80023a9, 0x8ee20194,
+0x8ee20198, 0x24420001, 0xaee20198, 0x8ee20198,
+0x8f82011c, 0x34420002, 0xaf82011c, 0x8f82011c,
+0x30420200, 0x1040001b, 0x0, 0x8f8300a0,
+0x8f840124, 0x8f8200ac, 0x14400007, 0x24020001,
+0x3c020001, 0x3442f000, 0x621024, 0x50400001,
+0x24100001, 0x24020001, 0x1200000d, 0xaf8200a0,
+0x8f820124, 0x2442ffe0, 0xaf820124, 0x8f820124,
+0x8f820124, 0x27633000, 0x43102b, 0x10400005,
+0x276237e0, 0xaf820124, 0x80023ca, 0x0,
+0xaf840124, 0x8f82011c, 0x2403fffd, 0x431024,
+0x80023e3, 0xaf82011c, 0x8f82011c, 0x34420002,
+0xaf82011c, 0x8f830124, 0x8f8200a0, 0x34420001,
+0xaf8200a0, 0xaf830124, 0x8f82011c, 0x2403fffd,
+0x431024, 0xaf82011c, 0x8ee2019c, 0x24420001,
+0xaee2019c, 0x80023e3, 0x8ee2019c, 0x8f8200a0,
+0x34420001, 0xaf8200a0, 0x8fbf001c, 0x8fb00018,
+0x3e00008, 0x27bd0020, 0x0, 0x3c020001,
+0x8c425c58, 0x27bdffe8, 0xafbf0014, 0x14400012,
+0xafb00010, 0x3c100001, 0x26105dd0, 0x2002021,
+0xc002488, 0x24052000, 0x26021fe0, 0x3c010001,
+0xac225d94, 0x3c010001, 0xac225d90, 0xaf420250,
+0x24022000, 0xaf500254, 0xaf420258, 0x24020001,
+0x3c010001, 0xac225c58, 0x8fbf0014, 0x8fb00010,
+0x3e00008, 0x27bd0018, 0x3c030001, 0x8c635d94,
+0x8c820000, 0x8fa80010, 0x8fa90014, 0xac620000,
+0x3c020001, 0x8c425d94, 0x8c830004, 0xac430004,
+0xac450008, 0x8f840054, 0x2443ffe0, 0xac460010,
+0xac470014, 0xac480018, 0xac49001c, 0x3c010001,
+0xac235d94, 0xac44000c, 0x3c020001, 0x24425dd0,
+0x62182b, 0x10600005, 0x0, 0x3c020001,
+0x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
+0x8c635d94, 0x3c020001, 0x8c425c40, 0xac620000,
+0x3c030001, 0x8c635d94, 0x3c020001, 0x8c425c40,
+0xac620004, 0x3e00008, 0xaf430250, 0x3c030001,
+0x8c635d94, 0x3c020001, 0x8c425c40, 0x27bdffd0,
+0xafb40020, 0x8fb40040, 0xafb00010, 0x808021,
+0xafb50024, 0x8fb50044, 0x8fa40048, 0xafb10014,
+0xa08821, 0xafbf0028, 0xafb3001c, 0xafb20018,
+0xac620000, 0x3c050001, 0x8ca55d94, 0x3c020001,
+0x8c425c40, 0xc09021, 0xe09821, 0x10800006,
+0xaca20004, 0x24a50008, 0xc002490, 0x24060018,
+0x800244e, 0x0, 0x24a40008, 0xc002488,
+0x24050018, 0x3c020001, 0x8c425d94, 0x3c050001,
+0x24a55dd0, 0x2442ffe0, 0x3c010001, 0xac225d94,
+0x45102b, 0x10400005, 0x0, 0x3c020001,
+0x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
+0x8c635d94, 0x8e020000, 0xac620000, 0x3c030001,
+0x8c635d94, 0x8e020004, 0xac620004, 0xac710008,
+0x8f840054, 0x2462ffe0, 0x3c010001, 0xac225d94,
+0x45102b, 0xac720010, 0xac730014, 0xac740018,
+0xac75001c, 0x10400005, 0xac64000c, 0x3c020001,
+0x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
+0x8c635d94, 0x3c020001, 0x8c425c40, 0xac620000,
+0x3c030001, 0x8c635d94, 0x3c020001, 0x8c425c40,
+0xac620004, 0xaf430250, 0x8fbf0028, 0x8fb50024,
+0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
+0x8fb00010, 0x3e00008, 0x27bd0030, 0x10a00005,
+0x0, 0xac800000, 0x24a5fffc, 0x14a0fffd,
+0x24840004, 0x3e00008, 0x0, 0x10c00007,
+0x0, 0x8c820000, 0x24840004, 0x24c6fffc,
+0xaca20000, 0x14c0fffb, 0x24a50004, 0x3e00008,
+0x0, 0x10c00007, 0x0, 0x8ca20000,
+0x24a50004, 0x24c6fffc, 0xac820000, 0x14c0fffb,
+0x24840004, 0x3e00008, 0x0, 0x3e00008,
+0x0, 0x27bdffd8, 0xafbf0020, 0x8ee304e4,
+0x8ee204e0, 0x10620436, 0x0, 0x8ee204e4,
+0x8ee304fc, 0x21100, 0x626021, 0x95870008,
+0x8d8a0000, 0x8d8b0004, 0x958d000a, 0x8ee2725c,
+0x8ee3726c, 0x30e4ffff, 0x441021, 0x62182b,
+0x10600015, 0x31a20004, 0x8f8200d8, 0x8ee37258,
+0x431023, 0xaee2726c, 0x8ee2726c, 0x1c400003,
+0x3c030001, 0x431021, 0xaee2726c, 0x8ee2725c,
+0x8ee3726c, 0x441021, 0x62182b, 0x10600006,
+0x31a20004, 0x8ee201b8, 0x24420001, 0xaee201b8,
+0x80028e1, 0x8ee201b8, 0x10400240, 0x31a20200,
+0x1040014d, 0x4821, 0x96e2045a, 0x30420010,
+0x10400149, 0x0, 0x8f840100, 0x27623000,
+0x24850020, 0xa2102b, 0x50400001, 0x27652800,
+0x8f820108, 0x10a20004, 0x0, 0x8f820104,
+0x14a20006, 0x2402000c, 0x8ee201a8, 0x24420001,
+0xaee201a8, 0x800252c, 0x8ee201a8, 0xac8a0000,
+0xac8b0004, 0x8ee37264, 0x24060005, 0xa482000e,
+0xac860018, 0xac830008, 0x8ee204e4, 0xac82001c,
+0x8ee204c8, 0xac820010, 0xaf850100, 0x92e204ec,
+0x14400036, 0x24090001, 0x8ee24e28, 0x210c0,
+0x24424e38, 0x2e22021, 0x8c820000, 0x1446001f,
+0x0, 0x8ee34e28, 0x8ee24e2c, 0x1062001b,
+0x24030040, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e2c, 0x8ee54e28, 0x24420001, 0x10430007,
+0x0, 0x8ee24e2c, 0x24420001, 0x10a20005,
+0x0, 0x8002516, 0x0, 0x14a00005,
+0x0, 0x8f820108, 0x24420020, 0xaf820108,
+0x8f820108, 0x8c820004, 0x2c420011, 0x50400013,
+0xac800000, 0x800252c, 0x0, 0x8ee24e28,
+0x24030040, 0x24420001, 0x50430003, 0x1021,
+0x8ee24e28, 0x24420001, 0xaee24e28, 0x8ee24e28,
+0x210c0, 0x24424e38, 0x2e22021, 0x24020005,
+0xac820000, 0x24020001, 0xac820004, 0x1520000a,
+0x3c040001, 0xafab0010, 0x8ee27264, 0x3c040001,
+0x24845730, 0x3c050004, 0xafa20014, 0x8ee604e4,
+0x80028be, 0x34a5f114, 0x8ee27264, 0x34843800,
+0x3641821, 0x24420010, 0x43102b, 0x14400073,
+0x0, 0x8ee27264, 0x24480010, 0x3641021,
+0x102102b, 0x14400002, 0x3c02ffff, 0x1024021,
+0x8f850100, 0x27623000, 0x24a60020, 0xc2102b,
+0x50400001, 0x27662800, 0x8f820108, 0x10c20004,
+0x0, 0x8f820104, 0x14c20007, 0x2563000c,
+0x8ee201a8, 0x4821, 0x24420001, 0xaee201a8,
+0x80025a0, 0x8ee201a8, 0x2c64000c, 0x1441021,
+0xaca20000, 0xaca30004, 0x24e2fff4, 0xa4a2000e,
+0x24020006, 0xaca80008, 0xaca20018, 0x8ee204e4,
+0xaca2001c, 0x8ee204c8, 0x3c030002, 0x431025,
+0xaca20010, 0xaf860100, 0x92e204ec, 0x14400037,
+0x24090001, 0x8ee24e28, 0x210c0, 0x24424e38,
+0x2e22021, 0x8c830000, 0x24020005, 0x1462001f,
+0x0, 0x8ee34e28, 0x8ee24e2c, 0x1062001b,
+0x24030040, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e2c, 0x8ee54e28, 0x24420001, 0x10430007,
+0x0, 0x8ee24e2c, 0x24420001, 0x10a20005,
+0x0, 0x800258a, 0x0, 0x14a00005,
+0x0, 0x8f820108, 0x24420020, 0xaf820108,
+0x8f820108, 0x8c820004, 0x2c420011, 0x50400013,
+0xac800000, 0x80025a0, 0x0, 0x8ee24e28,
+0x24030040, 0x24420001, 0x50430003, 0x1021,
+0x8ee24e28, 0x24420001, 0xaee24e28, 0x8ee24e28,
+0x210c0, 0x24424e38, 0x2e22021, 0x24020005,
+0xac820000, 0x24020001, 0xac820004, 0x1520000a,
+0x2508fffc, 0xafab0010, 0x8ee27264, 0x3c040001,
+0x24845730, 0x3c050004, 0xafa20014, 0x8ee604e4,
+0x80028be, 0x34a5f125, 0x34028100, 0xa5020000,
+0x9582000e, 0x800261d, 0xa5020002, 0x8f850100,
+0x27623000, 0x24a60020, 0xc2102b, 0x50400001,
+0x27662800, 0x8f820108, 0x10c20004, 0x0,
+0x8f820104, 0x14c20007, 0x2563000c, 0x8ee201a8,
+0x4821, 0x24420001, 0xaee201a8, 0x800260d,
+0x8ee201a8, 0x2c64000c, 0x1441021, 0xaca20000,
+0xaca30004, 0x8ee37264, 0x24e2fff4, 0xa4a2000e,
+0x24020006, 0xaca20018, 0x24630010, 0xaca30008,
+0x8ee204e4, 0xaca2001c, 0x8ee204c8, 0x3c030002,
+0x431025, 0xaca20010, 0xaf860100, 0x92e204ec,
+0x14400037, 0x24090001, 0x8ee24e28, 0x210c0,
+0x24424e38, 0x2e22021, 0x8c830000, 0x24020005,
+0x1462001f, 0x0, 0x8ee34e28, 0x8ee24e2c,
+0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e2c, 0x8ee54e28, 0x24420001,
+0x10430007, 0x0, 0x8ee24e2c, 0x24420001,
+0x10a20005, 0x0, 0x80025f7, 0x0,
+0x14a00005, 0x0, 0x8f820108, 0x24420020,
+0xaf820108, 0x8f820108, 0x8c820004, 0x2c420011,
+0x50400013, 0xac800000, 0x800260d, 0x0,
+0x8ee24e28, 0x24030040, 0x24420001, 0x50430003,
+0x1021, 0x8ee24e28, 0x24420001, 0xaee24e28,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
+0x24020005, 0xac820000, 0x24020001, 0xac820004,
+0x1520000a, 0x34028100, 0xafab0010, 0x8ee27264,
+0x3c040001, 0x24845730, 0x3c050004, 0xafa20014,
+0x8ee604e4, 0x80028be, 0x34a5f015, 0x8ee37264,
+0xa462000c, 0x8ee37264, 0x9582000e, 0xa462000e,
+0x8002681, 0x24e70004, 0x8f840100, 0x27623000,
+0x24850020, 0xa2102b, 0x50400001, 0x27652800,
+0x8f820108, 0x10a20004, 0x0, 0x8f820104,
+0x14a20007, 0x24020006, 0x8ee201a8, 0x4821,
+0x24420001, 0xaee201a8, 0x8002677, 0x8ee201a8,
+0xac8a0000, 0xac8b0004, 0x8ee37264, 0xa487000e,
+0xac820018, 0xac830008, 0x8ee204e4, 0xac82001c,
+0x8ee204c8, 0x3c030002, 0x431025, 0xac820010,
+0xaf850100, 0x92e204ec, 0x14400037, 0x24090001,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
+0x8c830000, 0x24020005, 0x1462001f, 0x0,
+0x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
+0x8ee54e28, 0x24420001, 0x10430007, 0x0,
+0x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
+0x8002661, 0x0, 0x14a00005, 0x0,
+0x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x8002677, 0x0, 0x8ee24e28, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
+0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
+0x24424e38, 0x2e22021, 0x24020005, 0xac820000,
+0x24020001, 0xac820004, 0x15200009, 0x3c050004,
+0xafab0010, 0x8ee27264, 0x3c040001, 0x24845730,
+0xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f004,
+0x8ee2725c, 0x30e7ffff, 0x471021, 0xaee2725c,
+0x8ee204e4, 0x8ee304fc, 0x8ee47258, 0x21100,
+0x431021, 0xac44000c, 0x8ee27258, 0xafa20018,
+0x8ee3725c, 0xafa3001c, 0x8ee2725c, 0x2c42003c,
+0x10400004, 0x24620001, 0x2403fffe, 0x431024,
+0xafa2001c, 0x8ee27264, 0x3c060001, 0x34c63800,
+0x8ee3725c, 0x2405fff8, 0x471021, 0x24420007,
+0x451024, 0x24630007, 0xaee27258, 0x8ee2726c,
+0x8ee47258, 0x651824, 0x431023, 0xaee2726c,
+0x3661021, 0x82202b, 0x14800004, 0x3c03ffff,
+0x8ee27258, 0x431021, 0xaee27258, 0x8ee27258,
+0xaee27264, 0x8f8200f0, 0x24470008, 0x27621800,
+0xe2102b, 0x50400001, 0x27671000, 0x8f8200f4,
+0x14e20007, 0x0, 0x8ee201b4, 0x4821,
+0x24420001, 0xaee201b4, 0x80026c4, 0x8ee201b4,
+0x8f8200f0, 0x24090001, 0x8fa30018, 0x8fa4001c,
+0xac430000, 0xac440004, 0xaf8700f0, 0x15200012,
+0xd1142, 0x8f8200f0, 0xafa20010, 0x8f8200f4,
+0x3c040001, 0x2484573c, 0xafa20014, 0x8fa60018,
+0x8fa7001c, 0x3c050004, 0xc002403, 0x34a5f005,
+0x8ee20088, 0x24420001, 0xaee20088, 0x8ee20088,
+0x80028d3, 0xaee0725c, 0x30430003, 0x24020002,
+0x10620016, 0x28620003, 0x10400005, 0x24020001,
+0x10620008, 0x0, 0x8002703, 0x0,
+0x24020003, 0x10620017, 0x0, 0x8002703,
+0x0, 0x8ee200e8, 0x8ee300ec, 0x24630001,
+0x2c640001, 0x441021, 0xaee200e8, 0xaee300ec,
+0x8ee200e8, 0x8002703, 0x8ee300ec, 0x8ee200f0,
+0x8ee300f4, 0x24630001, 0x2c640001, 0x441021,
+0xaee200f0, 0xaee300f4, 0x8ee200f0, 0x8002703,
+0x8ee300f4, 0x8ee200f8, 0x8ee300fc, 0x24630001,
+0x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
+0x8ee200f8, 0x8ee300fc, 0x8ee2725c, 0x8ee400e0,
+0x8ee500e4, 0x401821, 0x1021, 0xa32821,
+0xa3302b, 0x822021, 0x862021, 0xaee400e0,
+0xaee500e4, 0x80028d3, 0xaee0725c, 0x30e2ffff,
+0x104001c1, 0x31a20200, 0x1040014d, 0x4821,
+0x96e2045a, 0x30420010, 0x10400149, 0x0,
+0x8f840100, 0x27623000, 0x24850020, 0xa2102b,
+0x50400001, 0x27652800, 0x8f820108, 0x10a20004,
+0x0, 0x8f820104, 0x14a20006, 0x2402000c,
+0x8ee201a8, 0x24420001, 0xaee201a8, 0x800276e,
+0x8ee201a8, 0xac8a0000, 0xac8b0004, 0x8ee37264,
+0x24060005, 0xa482000e, 0xac860018, 0xac830008,
+0x8ee204e4, 0xac82001c, 0x8ee204c8, 0xac820010,
+0xaf850100, 0x92e204ec, 0x14400036, 0x24090001,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
+0x8c820000, 0x1446001f, 0x0, 0x8ee34e28,
+0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
+0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
+0x24420001, 0x10a20005, 0x0, 0x8002758,
+0x0, 0x14a00005, 0x0, 0x8f820108,
+0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x800276e,
+0x0, 0x8ee24e28, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
+0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
+0x2e22021, 0x24020005, 0xac820000, 0x24020001,
+0xac820004, 0x1520000a, 0x3c040001, 0xafab0010,
+0x8ee27264, 0x3c040001, 0x24845730, 0x3c050004,
+0xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f014,
+0x8ee27264, 0x34843800, 0x3641821, 0x24420010,
+0x43102b, 0x14400073, 0x0, 0x8ee27264,
+0x24480010, 0x3641021, 0x102102b, 0x14400002,
+0x3c02ffff, 0x1024021, 0x8f850100, 0x27623000,
+0x24a60020, 0xc2102b, 0x50400001, 0x27662800,
+0x8f820108, 0x10c20004, 0x0, 0x8f820104,
+0x14c20007, 0x2563000c, 0x8ee201a8, 0x4821,
+0x24420001, 0xaee201a8, 0x80027e2, 0x8ee201a8,
+0x2c64000c, 0x1441021, 0xaca20000, 0xaca30004,
+0x24e2fff4, 0xa4a2000e, 0x24020006, 0xaca80008,
+0xaca20018, 0x8ee204e4, 0xaca2001c, 0x8ee204c8,
+0x3c030002, 0x431025, 0xaca20010, 0xaf860100,
+0x92e204ec, 0x14400037, 0x24090001, 0x8ee24e28,
+0x210c0, 0x24424e38, 0x2e22021, 0x8c830000,
+0x24020005, 0x1462001f, 0x0, 0x8ee34e28,
+0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
+0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
+0x24420001, 0x10a20005, 0x0, 0x80027cc,
+0x0, 0x14a00005, 0x0, 0x8f820108,
+0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x80027e2,
+0x0, 0x8ee24e28, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
+0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
+0x2e22021, 0x24020005, 0xac820000, 0x24020001,
+0xac820004, 0x1520000a, 0x2508fffc, 0xafab0010,
+0x8ee27264, 0x3c040001, 0x24845730, 0x3c050004,
+0xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f015,
+0x34028100, 0xa5020000, 0x9582000e, 0x800285f,
+0xa5020002, 0x8f850100, 0x27623000, 0x24a60020,
+0xc2102b, 0x50400001, 0x27662800, 0x8f820108,
+0x10c20004, 0x0, 0x8f820104, 0x14c20007,
+0x2563000c, 0x8ee201a8, 0x4821, 0x24420001,
+0xaee201a8, 0x800284f, 0x8ee201a8, 0x2c64000c,
+0x1441021, 0xaca20000, 0xaca30004, 0x8ee37264,
+0x24e2fff4, 0xa4a2000e, 0x24020006, 0xaca20018,
+0x24630010, 0xaca30008, 0x8ee204e4, 0xaca2001c,
+0x8ee204c8, 0x3c030002, 0x431025, 0xaca20010,
+0xaf860100, 0x92e204ec, 0x14400037, 0x24090001,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
+0x8c830000, 0x24020005, 0x1462001f, 0x0,
+0x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
+0x8ee54e28, 0x24420001, 0x10430007, 0x0,
+0x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
+0x8002839, 0x0, 0x14a00005, 0x0,
+0x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x800284f, 0x0, 0x8ee24e28, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
+0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
+0x24424e38, 0x2e22021, 0x24020005, 0xac820000,
+0x24020001, 0xac820004, 0x1520000a, 0x34028100,
+0xafab0010, 0x8ee27264, 0x3c040001, 0x24845730,
+0x3c050004, 0xafa20014, 0x8ee604e4, 0x80028be,
+0x34a5f016, 0x8ee37264, 0xa462000c, 0x8ee37264,
+0x9582000e, 0xa462000e, 0x80028c2, 0x24e70004,
+0x8f830100, 0x27623000, 0x24640020, 0x82102b,
+0x50400001, 0x27642800, 0x8f820108, 0x10820004,
+0x0, 0x8f820104, 0x14820007, 0x24050005,
+0x8ee201a8, 0x4821, 0x24420001, 0xaee201a8,
+0x80028b6, 0x8ee201a8, 0xac6a0000, 0xac6b0004,
+0x8ee27264, 0xa467000e, 0xac650018, 0xac620008,
+0x8ee204e4, 0xac62001c, 0x8ee204c8, 0xac620010,
+0xaf840100, 0x92e204ec, 0x14400036, 0x24090001,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
+0x8c820000, 0x1445001f, 0x0, 0x8ee34e28,
+0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
+0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
+0x24420001, 0x10a20005, 0x0, 0x80028a0,
+0x0, 0x14a00005, 0x0, 0x8f820108,
+0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x80028b6,
+0x0, 0x8ee24e28, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
+0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
+0x2e22021, 0x24020005, 0xac820000, 0x24020001,
+0xac820004, 0x1520000b, 0x3c050004, 0x3c040001,
+0x24845748, 0xafab0010, 0xafa00014, 0x8ee604e4,
+0x34a5f017, 0xc002403, 0x30e7ffff, 0x80028e1,
+0x0, 0x8ee27264, 0x3c050001, 0x30e4ffff,
+0x441021, 0xaee27264, 0x8ee2725c, 0x8ee37264,
+0x34a53800, 0x441021, 0xaee2725c, 0x3651021,
+0x62182b, 0x14600004, 0x3c03ffff, 0x8ee27264,
+0x431021, 0xaee27264, 0x8ee304e4, 0x96e20458,
+0x24630001, 0x2442ffff, 0x621824, 0xaee304e4,
+0x8ee304e4, 0x8ee204e0, 0x14620005, 0x0,
+0x8f820060, 0x2403fff7, 0x431024, 0xaf820060,
+0x8fbf0020, 0x3e00008, 0x27bd0028, 0x27bdffe0,
+0xafbf0018, 0x8ee304e8, 0x8ee204e0, 0x10620189,
+0x0, 0x8ee204e8, 0x8ee304fc, 0x21100,
+0x621821, 0x94670008, 0x92e204ed, 0x8c680000,
+0x8c690004, 0x10400023, 0x946a000a, 0x8ee204c8,
+0x34460400, 0x31420200, 0x1040001f, 0x0,
+0x96e2045a, 0x30420010, 0x1040001b, 0x3c028000,
+0x3c010001, 0x370821, 0xac2283d8, 0x8ee27264,
+0x9464000e, 0x3c050001, 0x34a53800, 0x24420004,
+0xaee27264, 0x8ee37264, 0x42400, 0x3651021,
+0x3c010001, 0x370821, 0xac2483dc, 0x62182b,
+0x14600005, 0x24e70004, 0x8ee27264, 0x3c03ffff,
+0x431021, 0xaee27264, 0x8ee27264, 0x8002917,
+0xaee27258, 0x8ee604c8, 0x8ee2726c, 0x30e4ffff,
+0x44102a, 0x10400015, 0x0, 0x8f8200d8,
+0x8ee37258, 0x431023, 0xaee2726c, 0x8ee2726c,
+0x1c400007, 0x44102a, 0x8ee2726c, 0x3c030001,
+0x431021, 0xaee2726c, 0x8ee2726c, 0x44102a,
+0x10400006, 0x0, 0x8ee201b8, 0x24420001,
+0xaee201b8, 0x8002a72, 0x8ee201b8, 0x3c020001,
+0x571021, 0x8c4283d8, 0x54400001, 0x24e7fffc,
+0x31420004, 0x104000b9, 0x30e2ffff, 0x3c020001,
+0x571021, 0x8c4283d8, 0x1040002f, 0x5021,
+0x8f840100, 0x27623000, 0x24850020, 0xa2102b,
+0x50400001, 0x27652800, 0x8f820108, 0x10a20032,
+0x0, 0x8f820104, 0x10a2002f, 0x24020015,
+0xac880000, 0xac890004, 0x8ee37264, 0xa487000e,
+0xac820018, 0xac830008, 0x8ee204e8, 0x3c030001,
+0x771821, 0x8c6383dc, 0xac860010, 0x431025,
+0xac82001c, 0xaf850100, 0x92e204ec, 0x14400066,
+0x240a0001, 0x8ee24e28, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
+0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
+0x2e21821, 0x24020015, 0xac620000, 0x24020001,
+0x80029bf, 0xac620004, 0x8f840100, 0x27623000,
+0x24850020, 0xa2102b, 0x50400001, 0x27652800,
+0x8f820108, 0x10a20004, 0x0, 0x8f820104,
+0x14a20006, 0x24020006, 0x8ee201a8, 0x24420001,
+0xaee201a8, 0x80029bf, 0x8ee201a8, 0xac880000,
+0xac890004, 0x8ee37264, 0xa487000e, 0xac820018,
+0xac830008, 0x8ee204e8, 0xac860010, 0xac82001c,
+0xaf850100, 0x92e204ec, 0x14400037, 0x240a0001,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
+0x8c830000, 0x24020005, 0x1462001f, 0x0,
+0x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
+0x8ee54e28, 0x24420001, 0x10430007, 0x0,
+0x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
+0x80029a9, 0x0, 0x14a00005, 0x0,
+0x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x80029bf, 0x0, 0x8ee24e28, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
+0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
+0x24424e38, 0x2e22021, 0x24020005, 0xac820000,
+0x24020001, 0xac820004, 0x1540000a, 0x24020001,
+0xafa90010, 0x8ee27264, 0x3c040001, 0x24845730,
+0x3c050004, 0xafa20014, 0x8ee604e4, 0x8002a4f,
+0x34a5f204, 0xa2e204ed, 0x8ee204e8, 0x8ee304fc,
+0x8ee47258, 0x3c060001, 0x34c63800, 0x3c010001,
+0x370821, 0xac2083d8, 0x3c010001, 0x370821,
+0xac2083dc, 0x21100, 0x431021, 0xac44000c,
+0x8ee27264, 0x2405fff8, 0x30e3ffff, 0x431021,
+0x24420007, 0x451024, 0x24630007, 0xaee27258,
+0x8ee2726c, 0x8ee47258, 0x651824, 0x431023,
+0xaee2726c, 0x3661021, 0x82202b, 0x14800004,
+0x3c03ffff, 0x8ee27258, 0x431021, 0xaee27258,
+0x8ee27258, 0x8002a64, 0xaee27264, 0x10400073,
+0x0, 0x8f830100, 0x27623000, 0x24640020,
+0x82102b, 0x14400002, 0x5021, 0x27642800,
+0x8f820108, 0x10820004, 0x0, 0x8f820104,
+0x14820006, 0x24050005, 0x8ee201a8, 0x24420001,
+0xaee201a8, 0x8002a46, 0x8ee201a8, 0xac680000,
+0xac690004, 0x8ee27264, 0xa467000e, 0xac650018,
+0xac620008, 0x8ee204e8, 0xac660010, 0xac62001c,
+0xaf840100, 0x92e204ec, 0x14400036, 0x240a0001,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
+0x8c820000, 0x1445001f, 0x0, 0x8ee34e28,
+0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
+0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
+0x24420001, 0x10a20005, 0x0, 0x8002a30,
+0x0, 0x14a00005, 0x0, 0x8f820108,
+0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x8002a46,
+0x0, 0x8ee24e28, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
+0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
+0x2e22021, 0x24020005, 0xac820000, 0x24020001,
+0xac820004, 0x1540000c, 0x30e5ffff, 0x3c040001,
+0x24845748, 0x3c050004, 0xafa90010, 0xafa00014,
+0x8ee604e4, 0x34a5f237, 0xc002403, 0x30e7ffff,
+0x8002a72, 0x0, 0x8ee27264, 0x451021,
+0xaee27264, 0x8ee2726c, 0x8ee37264, 0x3c040001,
+0x34843800, 0xa2e004ed, 0x451023, 0xaee2726c,
+0x3641021, 0x62182b, 0x14600004, 0x3c03ffff,
+0x8ee27264, 0x431021, 0xaee27264, 0x8ee304e8,
+0x96e20458, 0x24630001, 0x2442ffff, 0x621824,
+0xaee304e8, 0x8ee304e8, 0x8ee204e0, 0x14620005,
+0x0, 0x8f820060, 0x2403fff7, 0x431024,
+0xaf820060, 0x8fbf0018, 0x3e00008, 0x27bd0020,
+0x27bdffe0, 0xafbf001c, 0xafb00018, 0x8f820100,
+0x8ee34e2c, 0x8f820104, 0x8f850108, 0x24020040,
+0x24630001, 0x50620003, 0x1021, 0x8ee24e2c,
+0x24420001, 0xaee24e2c, 0x8ee24e2c, 0x8ee34e2c,
+0x210c0, 0x24424e38, 0x2e22021, 0x8ee24e28,
+0x8c870004, 0x14620007, 0xa03021, 0x8f820108,
+0x24420020, 0xaf820108, 0x8f820108, 0x8002aa2,
+0xac800000, 0x8ee24e2c, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e2c, 0x24420001,
+0x210c0, 0x24424e38, 0x2e22021, 0x8c820004,
+0x8f830108, 0x21140, 0x621821, 0xaf830108,
+0xac800000, 0x8cc20018, 0x2443fffe, 0x2c620013,
+0x104000c1, 0x31080, 0x3c010001, 0x220821,
+0x8c225770, 0x400008, 0x0, 0x8ee204f0,
+0x471021, 0xaee204f0, 0x8ee204f0, 0x8f43023c,
+0x43102b, 0x144000be, 0x0, 0x8ee304e4,
+0x8ee204f8, 0x506200ba, 0xa2e004f4, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
+0x8021, 0x24420001, 0xaee201a4, 0x8002b12,
+0x8ee201a4, 0x8ee204e4, 0xac62001c, 0x8ee404b0,
+0x8ee504b4, 0x2462001c, 0xac620008, 0x24020008,
+0xa462000e, 0x24020011, 0xac620018, 0xac640000,
+0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
+0x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
+0x24020012, 0x1462001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
+0x24420001, 0x10430007, 0x0, 0x8ee24e34,
+0x24420001, 0x10a20005, 0x0, 0x8002afc,
+0x0, 0x14a00005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x8002b12,
+0x0, 0x8ee24e30, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x24020012, 0xac820000, 0x24020001,
+0xac820004, 0x5600000b, 0x24100001, 0x8ee204e4,
+0x3c040001, 0x24845754, 0xafa00014, 0xafa20010,
+0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
+0x34a5f006, 0x16000003, 0x24020001, 0x8002b71,
+0xa2e204f4, 0x8ee20170, 0x24420001, 0xaee20170,
+0x8ee20170, 0x8ee204e4, 0xa2e004f4, 0xaee004f0,
+0xaee204f8, 0x8f42023c, 0x50400045, 0xaee07274,
+0x8ee20184, 0x24420001, 0xaee20184, 0x8ee20184,
+0x8002b71, 0xaee07274, 0x8ee20504, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee20504,
+0x24420001, 0xaee20504, 0x8ee20504, 0x8cc30018,
+0x21080, 0x571021, 0x8c440508, 0x24020003,
+0x1462000f, 0x0, 0x3c020001, 0x571021,
+0x904283b1, 0x10400014, 0x0, 0x8ee201d0,
+0x8ee35240, 0x441021, 0xaee201d0, 0x8ee201d8,
+0x641821, 0x306300ff, 0x8002b59, 0xaee35240,
+0x8ee201cc, 0x8ee30e10, 0x441021, 0xaee201cc,
+0x8ee201d8, 0x641821, 0x306301ff, 0xaee30e10,
+0x441021, 0xaee201d8, 0x8ee20000, 0x34420040,
+0x8002b71, 0xaee20000, 0x8ee2014c, 0x3c010001,
+0x370821, 0xa02083e0, 0x24420001, 0xaee2014c,
+0x8002b71, 0x8ee2014c, 0x94c7000e, 0x8cc2001c,
+0x3c040001, 0x24845760, 0xafa60014, 0xafa20010,
+0x8cc60018, 0x3c050008, 0xc002403, 0x34a50910,
+0x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
+0x27bdff98, 0xafbf0060, 0xafbe005c, 0xafb60058,
+0xafb50054, 0xafb40050, 0xafb3004c, 0xafb20048,
+0xafb10044, 0xafb00040, 0x8f830108, 0x8f820104,
+0xafa00024, 0x106203e7, 0xafa0002c, 0x3c1e0001,
+0x37de3800, 0x3c0bffff, 0x8f930108, 0x8e620018,
+0x8f830104, 0x2443fffe, 0x2c620014, 0x104003cf,
+0x31080, 0x3c010001, 0x220821, 0x8c2257c0,
+0x400008, 0x0, 0x9663000e, 0x8ee2725c,
+0x8ee404f0, 0x431021, 0xaee2725c, 0x8e63001c,
+0x96e20458, 0x24840001, 0xaee404f0, 0x24630001,
+0x2442ffff, 0x621824, 0xaee304e4, 0x8f42023c,
+0x82202b, 0x148003b9, 0x0, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
+0x8021, 0x24420001, 0xaee201a4, 0x8002bfe,
+0x8ee201a4, 0x8ee204e4, 0xac62001c, 0x8ee404b0,
+0x8ee504b4, 0x2462001c, 0xac620008, 0x24020008,
+0xa462000e, 0x24020011, 0xac620018, 0xac640000,
+0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
+0x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
+0x24020012, 0x1462001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x240c0040, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x104c0007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x8002be8,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400013, 0xac800000, 0x8002bfe,
+0x0, 0x8ee24e30, 0x240c0040, 0x24420001,
+0x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x24020012, 0x240c0001, 0xac820000,
+0xac8c0004, 0x5600000d, 0x24100001, 0x8ee204e4,
+0x3c040001, 0x24845754, 0xafa00014, 0xafa20010,
+0x8ee60608, 0x8f470228, 0x3c050009, 0x34a5f006,
+0xc002403, 0xafab0038, 0x8fab0038, 0x1200030a,
+0x240c0001, 0x8002f19, 0x0, 0x966c001c,
+0xafac002c, 0x9662001e, 0x3c0c8000, 0xafac0024,
+0xae62001c, 0x8e75001c, 0x8ee204fc, 0x8ee404fc,
+0x151900, 0x621021, 0x8c52000c, 0x92e27b98,
+0x641821, 0x9476000a, 0x14400003, 0x32c20002,
+0xaef27ba4, 0xaef57b9c, 0x1040004b, 0x8021,
+0x96e2045a, 0x30420002, 0x10400047, 0x0,
+0x8e63001c, 0x8ee204fc, 0x32100, 0x821021,
+0x8c42000c, 0x37e1821, 0x24420022, 0x43102b,
+0x1440000a, 0x24050014, 0x8ee204fc, 0x821021,
+0x8c44000c, 0xafab0038, 0xc002f75, 0x2484000e,
+0x8fab0038, 0x8002c52, 0x3050ffff, 0x8ee204fc,
+0x821021, 0x8c42000c, 0x9450000e, 0x94430010,
+0x94440012, 0x94450014, 0x2038021, 0x2048021,
+0x2058021, 0x94430016, 0x94440018, 0x9445001a,
+0x2038021, 0x2048021, 0x2058021, 0x9443001c,
+0x9444001e, 0x94420020, 0x2038021, 0x2048021,
+0x2028021, 0x101c02, 0x3202ffff, 0x628021,
+0x8e63001c, 0x8ee204fc, 0x102402, 0x32900,
+0xa21021, 0x8c43000c, 0x3202ffff, 0x828021,
+0x37e1021, 0x24630018, 0x62182b, 0x14600009,
+0x0, 0x8ee204fc, 0xa21021, 0x8c43000c,
+0x101027, 0x3c01ffff, 0x230821, 0x8002c6f,
+0xa4220018, 0x8ee204fc, 0xa21021, 0x8c43000c,
+0x101027, 0xa4620018, 0x96e2045a, 0x8821,
+0x30420008, 0x14400063, 0xa021, 0x8e63001c,
+0x8ee204fc, 0x33100, 0xc21021, 0x8c42000c,
+0x37e1821, 0x24420022, 0x43102b, 0x14400035,
+0x0, 0x8ee204fc, 0xc21021, 0x8c42000c,
+0x24470010, 0x37e1021, 0xe2102b, 0x50400001,
+0xeb3821, 0x8ee204fc, 0x94f10000, 0xc21021,
+0x8c42000c, 0x24470016, 0x37e1021, 0xe2102b,
+0x14400002, 0x2634ffec, 0xeb3821, 0x8ee204fc,
+0x90e30001, 0xc21021, 0x8c42000c, 0x2447001a,
+0x37e1021, 0xe2102b, 0x14400002, 0x2838821,
+0xeb3821, 0x94e20000, 0x24e70002, 0x2228821,
+0x37e1021, 0xe2102b, 0x50400001, 0xeb3821,
+0x94e20000, 0x24e70002, 0x2228821, 0x37e1021,
+0xe2102b, 0x50400001, 0xeb3821, 0x94e20000,
+0x24e70002, 0x2228821, 0x37e1021, 0xe2102b,
+0x50400001, 0xeb3821, 0x94e20000, 0x8002cd0,
+0x2228821, 0x8ee204fc, 0xc21021, 0x8c43000c,
+0x8ee204fc, 0x94710010, 0x8ee304fc, 0xc21021,
+0x8c44000c, 0xc31821, 0x8c62000c, 0x2634ffec,
+0x90840017, 0x8ee304fc, 0x9442001a, 0x2848821,
+0xc31821, 0x8c65000c, 0x8ee304fc, 0x2228821,
+0x8ee204fc, 0xc31821, 0xc21021, 0x8c44000c,
+0x8c62000c, 0x94a3001c, 0x9484001e, 0x94420020,
+0x2238821, 0x2248821, 0x2228821, 0x111c02,
+0x3222ffff, 0x628821, 0x111c02, 0x3222ffff,
+0x628821, 0x32c20001, 0x104000b2, 0x0,
+0x96e2045a, 0x30420001, 0x104000ae, 0x32c20080,
+0x10400008, 0x0, 0x92e27b98, 0x14400005,
+0x0, 0x240c0001, 0xa2ec7b98, 0xaef57b9c,
+0xaef27ba4, 0x8ee304fc, 0x151100, 0x431021,
+0x8c47000c, 0x37e1821, 0x24e2000e, 0x43102b,
+0x14400008, 0xe02021, 0x2405000e, 0xc002f75,
+0xafab0038, 0x3042ffff, 0x8fab0038, 0x8002d09,
+0x2028021, 0x94e60000, 0x24e70002, 0x94e50000,
+0x24e70002, 0x94e30000, 0x24e70002, 0x94e20000,
+0x24e70002, 0x94e40000, 0x24e70002, 0x2068021,
+0x2058021, 0x2038021, 0x2028021, 0x94e20000,
+0x94e30002, 0x2048021, 0x2028021, 0x2038021,
+0x101c02, 0x3202ffff, 0x628021, 0x101c02,
+0x3202ffff, 0x8ee47b9c, 0x628021, 0x14950004,
+0x3205ffff, 0x96620016, 0x8002d17, 0x512021,
+0x96620016, 0x542021, 0x41402, 0x3083ffff,
+0x432021, 0x852023, 0x41402, 0x822021,
+0x3084ffff, 0x50800001, 0x3404ffff, 0x8ee27ba4,
+0x24430017, 0x37e1021, 0x62102b, 0x50400001,
+0x6b1821, 0x90630000, 0x24020011, 0x14620031,
+0x24020006, 0x8ee27ba4, 0x37e1821, 0x24420028,
+0x43102b, 0x14400018, 0x0, 0x8ee27b9c,
+0x12a2000a, 0x32c20100, 0x8ee27ba4, 0x3c01ffff,
+0x220821, 0x94220028, 0x822021, 0x41c02,
+0x3082ffff, 0x622021, 0x32c20100, 0x14400004,
+0x41027, 0x92e27b98, 0x14400002, 0x41027,
+0x3044ffff, 0x8ee27ba4, 0x3c01ffff, 0x220821,
+0x8002d8a, 0xa4240028, 0x8ee27b9c, 0x12a20008,
+0x32c20100, 0x8ee27ba4, 0x94420028, 0x822021,
+0x41c02, 0x3082ffff, 0x622021, 0x32c20100,
+0x14400004, 0x41027, 0x92e27b98, 0x14400002,
+0x41027, 0x3044ffff, 0x8ee27ba4, 0x8002d8a,
+0xa4440028, 0x1462002f, 0x37e1821, 0x8ee27ba4,
+0x24420032, 0x43102b, 0x14400018, 0x0,
+0x8ee27b9c, 0x12a2000a, 0x32c20100, 0x8ee27ba4,
+0x3c01ffff, 0x220821, 0x94220032, 0x822021,
+0x41c02, 0x3082ffff, 0x622021, 0x32c20100,
+0x14400004, 0x41027, 0x92e27b98, 0x14400002,
+0x41027, 0x3044ffff, 0x8ee27ba4, 0x3c01ffff,
+0x220821, 0x8002d8a, 0xa4240032, 0x8ee27b9c,
+0x12a20008, 0x32c20100, 0x8ee27ba4, 0x94420032,
+0x822021, 0x41c02, 0x3082ffff, 0x622021,
+0x32c20100, 0x14400004, 0x41027, 0x92e27b98,
+0x14400002, 0x41027, 0x3044ffff, 0x8ee27ba4,
+0xa4440032, 0x8fac0024, 0x1180002c, 0x37e1821,
+0x8e420000, 0xae42fffc, 0x2642000a, 0x43102b,
+0x1440001b, 0x34038100, 0x26430004, 0x37e1021,
+0x62102b, 0x14400003, 0x602021, 0x6b1821,
+0x602021, 0x8c620000, 0x24630004, 0xae420000,
+0x37e1021, 0x62102b, 0x50400001, 0x6b1821,
+0x8c620000, 0xac820000, 0x34028100, 0xa4620000,
+0x24630002, 0x37e1021, 0x62102b, 0x50400001,
+0x6b1821, 0x97ac002e, 0x8002db4, 0xa46c0000,
+0x8e420004, 0x8e440008, 0xa6430008, 0x97ac002e,
+0xa64c000a, 0xae420000, 0xae440004, 0x9662000e,
+0x2652fffc, 0x24420004, 0xa662000e, 0x9662000e,
+0x8ee3725c, 0x621821, 0xaee3725c, 0xafb20018,
+0x8ee3725c, 0xafa3001c, 0x8ee2725c, 0x2c42003c,
+0x10400004, 0x24620001, 0x2403fffe, 0x431024,
+0xafa2001c, 0x32c20080, 0x1040000c, 0x32c20100,
+0x8ee27ba8, 0x24430001, 0x210c0, 0x571021,
+0xaee37ba8, 0x8fa30018, 0x8fa4001c, 0xac437bac,
+0xac447bb0, 0x8002ea0, 0xaee0725c, 0x10400072,
+0x0, 0x8ee27ba8, 0x24430001, 0x210c0,
+0x571021, 0xaee37ba8, 0x8fa30018, 0x8fa4001c,
+0xac437bac, 0xac447bb0, 0x8ee27ba8, 0x10400063,
+0x4821, 0x5021, 0x8f8200f0, 0x24480008,
+0x27621800, 0x102102b, 0x50400001, 0x27681000,
+0x8f8200f4, 0x15020007, 0x0, 0x8ee201b4,
+0x8021, 0x24420001, 0xaee201b4, 0x8002dfa,
+0x8ee201b4, 0x8f8300f0, 0x24100001, 0x1571021,
+0x8c447bac, 0x8c457bb0, 0xac640000, 0xac650004,
+0xaf8800f0, 0x16000006, 0x2ea1021, 0x8ee20088,
+0x24420001, 0xaee20088, 0x8002e3f, 0x8ee20088,
+0x8c427bb0, 0x8ee400e0, 0x8ee500e4, 0x8ee67b9c,
+0x401821, 0x1021, 0xa32821, 0xa3382b,
+0x822021, 0x872021, 0x8ee204fc, 0xc93021,
+0x63100, 0xaee400e0, 0xaee500e4, 0xc23021,
+0x94c2000a, 0x240c0002, 0x21142, 0x30430003,
+0x106c0016, 0x28620003, 0x10400005, 0x240c0001,
+0x106c0008, 0x0, 0x8002e3f, 0x0,
+0x240c0003, 0x106c0017, 0x0, 0x8002e3f,
+0x0, 0x8ee200e8, 0x8ee300ec, 0x24630001,
+0x2c640001, 0x441021, 0xaee200e8, 0xaee300ec,
+0x8ee200e8, 0x8002e3f, 0x8ee300ec, 0x8ee200f0,
+0x8ee300f4, 0x24630001, 0x2c640001, 0x441021,
+0xaee200f0, 0xaee300f4, 0x8ee200f0, 0x8002e3f,
+0x8ee300f4, 0x8ee200f8, 0x8ee300fc, 0x24630001,
+0x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
+0x8ee200f8, 0x8ee300fc, 0x8ee27ba8, 0x25290001,
+0x122102b, 0x1440ffa0, 0x254a0008, 0xa2e07b98,
+0x8002e9f, 0xaee07ba8, 0x8f8200f0, 0x24470008,
+0x27621800, 0xe2102b, 0x50400001, 0x27671000,
+0x8f8200f4, 0x14e20007, 0x0, 0x8ee201b4,
+0x8021, 0x24420001, 0xaee201b4, 0x8002e5d,
+0x8ee201b4, 0x8f8200f0, 0x24100001, 0x8fa30018,
+0x8fa4001c, 0xac430000, 0xac440004, 0xaf8700f0,
+0x16000007, 0x0, 0x8ee20088, 0x24420001,
+0xaee20088, 0x8ee20088, 0x8002ea0, 0xaee0725c,
+0x8ee2725c, 0x8ee400e0, 0x8ee500e4, 0x240c0002,
+0x401821, 0x1021, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0x161142, 0x30430003,
+0xaee400e0, 0xaee500e4, 0x106c0017, 0x2c620003,
+0x10400005, 0x240c0001, 0x106c0008, 0x0,
+0x8002ea0, 0xaee0725c, 0x240c0003, 0x106c0019,
+0x0, 0x8002ea0, 0xaee0725c, 0x8ee200e8,
+0x8ee300ec, 0x24630001, 0x2c640001, 0x441021,
+0xaee200e8, 0xaee300ec, 0x8ee200e8, 0x8ee300ec,
+0x8002ea0, 0xaee0725c, 0x8ee200f0, 0x8ee300f4,
+0x24630001, 0x2c640001, 0x441021, 0xaee200f0,
+0xaee300f4, 0x8ee200f0, 0x8ee300f4, 0x8002ea0,
+0xaee0725c, 0x8ee200f8, 0x8ee300fc, 0x24630001,
+0x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
+0x8ee200f8, 0x8ee300fc, 0xaee0725c, 0x8e62001c,
+0x96e30458, 0x8ee404f0, 0x24420001, 0x2463ffff,
+0x431024, 0x24840001, 0xaee204e4, 0xaee404f0,
+0x8f42023c, 0x82202b, 0x148000b0, 0x0,
+0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
+0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
+0x0, 0x8f820124, 0x14c20007, 0x0,
+0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
+0x8002f07, 0x8ee201a4, 0x8ee204e4, 0xac62001c,
+0x8ee404b0, 0x8ee504b4, 0x2462001c, 0xac620008,
+0x24020008, 0xa462000e, 0x24020011, 0xac620018,
+0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
+0xaf860120, 0x92e24e20, 0x14400037, 0x24100001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c830000, 0x24020012, 0x1462001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x240c0040,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee34e30, 0x24420001, 0x104c0007, 0x0,
+0x8ee24e34, 0x24420001, 0x10620005, 0x0,
+0x8002ef1, 0x0, 0x14600005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x8002f07, 0x0, 0x8ee24e30, 0x240c0040,
+0x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020012, 0x240c0001,
+0xac820000, 0xac8c0004, 0x5600000d, 0x24100001,
+0x8ee204e4, 0x3c040001, 0x24845754, 0xafa00014,
+0xafa20010, 0x8ee60608, 0x8f470228, 0x3c050009,
+0x34a5f006, 0xc002403, 0xafab0038, 0x8fab0038,
+0x16000003, 0x240c0001, 0x8002f5c, 0xa2ec04f4,
+0x8ee20170, 0x24420001, 0xaee20170, 0x8ee20170,
+0x8ee204e4, 0xa2e004f4, 0xaee004f0, 0xaee07274,
+0xaee204f8, 0x8f42023c, 0x10400038, 0x0,
+0x8ee20184, 0x24420001, 0xaee20184, 0x8002f5c,
+0x8ee20184, 0x8ee20504, 0x240c0040, 0x24420001,
+0x504c0003, 0x1021, 0x8ee20504, 0x24420001,
+0xaee20504, 0x8ee20504, 0x8e630018, 0x240c0003,
+0x21080, 0x571021, 0x146c000f, 0x8c440508,
+0x3c020001, 0x571021, 0x904283b1, 0x10400014,
+0x0, 0x8ee201d0, 0x8ee35240, 0x441021,
+0xaee201d0, 0x8ee201d8, 0x641821, 0x306300ff,
+0x8002f4f, 0xaee35240, 0x8ee201cc, 0x8ee30e10,
+0x441021, 0xaee201cc, 0x8ee201d8, 0x641821,
+0x306301ff, 0xaee30e10, 0x441021, 0xaee201d8,
+0x8ee20000, 0x34420040, 0x8002f5c, 0xaee20000,
+0x8ee2014c, 0x3c010001, 0x370821, 0xa02083e0,
+0x24420001, 0xaee2014c, 0x8ee2014c, 0x8f820108,
+0x24420020, 0xaf820108, 0x8f820108, 0x8f820108,
+0x27633000, 0x43102b, 0x14400002, 0x27622800,
+0xaf820108, 0x8f830108, 0x8f820104, 0x1462fc1e,
+0x0, 0x8fbf0060, 0x8fbe005c, 0x8fb60058,
+0x8fb50054, 0x8fb40050, 0x8fb3004c, 0x8fb20048,
+0x8fb10044, 0x8fb00040, 0x3e00008, 0x27bd0068,
+0x52843, 0x10a0000d, 0x3021, 0x3c030001,
+0x34633800, 0x3c07ffff, 0x3631021, 0x82102b,
+0x50400001, 0x872021, 0x94820000, 0x24840002,
+0x24a5ffff, 0x14a0fff8, 0xc23021, 0x61c02,
+0x30c2ffff, 0x623021, 0x61c02, 0x30c2ffff,
+0x623021, 0x3e00008, 0x30c2ffff, 0x27bdff88,
+0x240f0001, 0xafbf0070, 0xafbe006c, 0xafb60068,
+0xafb50064, 0xafb40060, 0xafb3005c, 0xafb20058,
+0xafb10054, 0xafb00050, 0xa3a00027, 0xafaf002c,
+0x8ee204d4, 0x8021, 0x30420001, 0x1440002a,
+0xa3a00037, 0x8f8700e0, 0x8f8800c4, 0x8f8200e8,
+0xe22023, 0x2c821000, 0x50400001, 0x24841000,
+0x420c2, 0x801821, 0x8ee400c8, 0x8ee500cc,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaee400c8, 0xaee500cc, 0x8f8300c8,
+0x3c02000a, 0x3442efff, 0x1032023, 0x44102b,
+0x10400003, 0x3c02000a, 0x3442f000, 0x822021,
+0x801821, 0x8ee400c0, 0x8ee500c4, 0x1021,
+0xa32821, 0xa3302b, 0x822021, 0x862021,
+0xaee400c0, 0xaee500c4, 0xaf8800c8, 0xaf8700e4,
+0x80034cc, 0xaf8700e8, 0x3c020001, 0x571021,
+0x904283c0, 0x1040000b, 0x0, 0x3c140001,
+0x297a021, 0x8e9483c4, 0x3c130001, 0x2779821,
+0x8e7383c8, 0x3c120001, 0x2579021, 0x8003193,
+0x8e5283cc, 0x8f8300e0, 0x8f8200e4, 0x10430007,
+0x8821, 0x8f8200e4, 0x24110001, 0x8c430000,
+0x8c440004, 0xafa30018, 0xafa4001c, 0x1620000e,
+0x3c02ffff, 0x8f8200c4, 0xafa20010, 0x8f8200c8,
+0x3c040001, 0x24845870, 0xafa20014, 0x8f8600e0,
+0x8f8700e4, 0x3c050006, 0xc002403, 0x34a5f000,
+0x80034cc, 0x0, 0x8fa3001c, 0x8fb20018,
+0x3074ffff, 0x2694fffc, 0x621024, 0x10400058,
+0x2409821, 0x3c020080, 0x621024, 0x1040000a,
+0x3c040040, 0x8ee2007c, 0x24420001, 0xaee2007c,
+0x8ee2007c, 0x8ee201fc, 0x24420001, 0xaee201fc,
+0x80034c6, 0x8ee201fc, 0x3c060004, 0x3c0b0001,
+0x3c0a0002, 0x3c050010, 0x3c090008, 0x8ee20080,
+0x3c080020, 0x34078000, 0x24420001, 0xaee20080,
+0x8ee20080, 0x8fa2001c, 0x441824, 0x10660021,
+0xc3102b, 0x14400007, 0x0, 0x106b0011,
+0x0, 0x106a0015, 0x0, 0x8003049,
+0x42042, 0x10650023, 0xa3102b, 0x14400005,
+0x0, 0x10690019, 0x0, 0x8003049,
+0x42042, 0x10680021, 0x0, 0x8003049,
+0x42042, 0x8ee20034, 0x24420001, 0xaee20034,
+0x8ee20034, 0x8003049, 0x42042, 0x8ee201ec,
+0x24420001, 0xaee201ec, 0x8ee201ec, 0x8003049,
+0x42042, 0x8ee201f0, 0x24420001, 0xaee201f0,
+0x8ee201f0, 0x8003049, 0x42042, 0x8ee201f4,
+0x24420001, 0xaee201f4, 0x8ee201f4, 0x8003049,
+0x42042, 0x8ee20030, 0x24420001, 0xaee20030,
+0x8ee20030, 0x8003049, 0x42042, 0x8ee201f8,
+0x24420001, 0xaee201f8, 0x8ee201f8, 0x42042,
+0x1087047c, 0x0, 0x800300e, 0x0,
+0x3c020001, 0x571021, 0x904283b2, 0x14400084,
+0x24020001, 0x3c030001, 0x771821, 0x906383b3,
+0x1462007f, 0x3c020100, 0x8e430000, 0x621024,
+0x1040006f, 0x2402ffff, 0x14620005, 0x24100001,
+0x96430004, 0x3402ffff, 0x10620075, 0x0,
+0x92e204d8, 0x14400072, 0x0, 0x3c020001,
+0x571021, 0x8c4283b4, 0x28420005, 0x10400020,
+0x3821, 0x3c020001, 0x571021, 0x8c4283b4,
+0x18400016, 0x2821, 0x96660000, 0x520c0,
+0x971021, 0x9442777e, 0x14460009, 0x971021,
+0x94437780, 0x96620002, 0x14620005, 0x971021,
+0x94437782, 0x96620004, 0x50620008, 0x24070001,
+0x3c020001, 0x571021, 0x8c4283b4, 0x24a50001,
+0xa2102a, 0x5440ffee, 0x520c0, 0x30e200ff,
+0x10400440, 0x0, 0x80030d5, 0x0,
+0x2402021, 0xc0022fe, 0x24050006, 0x3044001f,
+0x428c0, 0x2e51021, 0x9442727c, 0x30424000,
+0x14400434, 0xb71021, 0x9443727e, 0x96620000,
+0x1462000b, 0x418c0, 0xb71021, 0x94437280,
+0x96620002, 0x14620006, 0x418c0, 0xb71021,
+0x94437282, 0x96620004, 0x10620035, 0x418c0,
+0x2e31021, 0x9442727c, 0x30428000, 0x14400421,
+0x2e31021, 0x944b727c, 0x96670000, 0xb28c0,
+0xb71021, 0x9442737e, 0x80030b7, 0x3021,
+0x420c0, 0x2e41021, 0x9443737c, 0x2e41021,
+0x944b737c, 0x30638000, 0x14600010, 0xb28c0,
+0xb71021, 0x9442737e, 0x1447fff5, 0x1602021,
+0xb71021, 0x94437380, 0x96620002, 0x5462fff1,
+0x420c0, 0xb71021, 0x94437382, 0x96620004,
+0x5462ffec, 0x420c0, 0x24060001, 0x30c200ff,
+0x10400400, 0x0, 0x80030d5, 0x0,
+0x97430202, 0x96420000, 0x146203fa, 0x0,
+0x97430204, 0x96420002, 0x146203f6, 0x0,
+0x97430206, 0x96420004, 0x146203f2, 0x0,
+0x92420000, 0x3a030001, 0x30420001, 0x431024,
+0x10400074, 0x2402ffff, 0x8e630000, 0x14620004,
+0x3402ffff, 0x96630004, 0x1062006f, 0x240f0002,
+0x3c020001, 0x571021, 0x904283b2, 0x1440006a,
+0x240f0003, 0x92e204d8, 0x54400068, 0xafaf002c,
+0x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
+0x10400020, 0x3821, 0x3c020001, 0x571021,
+0x8c4283b4, 0x18400016, 0x2821, 0x96660000,
+0x520c0, 0x971021, 0x9442777e, 0x14460009,
+0x971021, 0x94437780, 0x96620002, 0x14620005,
+0x971021, 0x94437782, 0x96620004, 0x50620008,
+0x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
+0x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
+0x30e200ff, 0x14400044, 0x240f0003, 0x80034c6,
+0x0, 0x2402021, 0xc0022fe, 0x24050006,
+0x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
+0x30424000, 0x144003af, 0xb71021, 0x9443727e,
+0x96620000, 0x1462000b, 0x418c0, 0xb71021,
+0x94437280, 0x96620002, 0x14620006, 0x418c0,
+0xb71021, 0x94437282, 0x96620004, 0x10620027,
+0x418c0, 0x2e31021, 0x9442727c, 0x30428000,
+0x1440039c, 0x2e31021, 0x944b727c, 0x96670000,
+0xb28c0, 0xb71021, 0x9442737e, 0x800313c,
+0x3021, 0x420c0, 0x2e41021, 0x9443737c,
+0x2e41021, 0x944b737c, 0x30638000, 0x14600010,
+0xb28c0, 0xb71021, 0x9442737e, 0x1447fff5,
+0x1602021, 0xb71021, 0x94437380, 0x96620002,
+0x5462fff1, 0x420c0, 0xb71021, 0x94437382,
+0x96620004, 0x5462ffec, 0x420c0, 0x24060001,
+0x30c200ff, 0x1040037b, 0x0, 0x800314f,
+0x240f0003, 0x240f0001, 0xafaf002c, 0x8f420260,
+0x54102b, 0x1040003a, 0x0, 0x8f8300e4,
+0x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
+0xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2801821,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
+0x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
+0x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
+0xafa20010, 0x8f8200e4, 0x3c040001, 0x24845878,
+0xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
+0xc002403, 0x34a5f003, 0x80034cc, 0x0,
+0x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
+0x24845884, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
+0x3c050006, 0xc002403, 0x34a5f002, 0x8ee201c0,
+0x24420001, 0xaee201c0, 0x8ee20000, 0x8ee301c0,
+0x2403ffbf, 0x431024, 0x8003470, 0xaee20000,
+0x96e20468, 0x54102b, 0x10400003, 0x0,
+0x240f0001, 0xa3af0027, 0x12800301, 0x24160007,
+0x24150040, 0x241e0001, 0x240e0012, 0x8ee2724c,
+0x8f430280, 0x24420001, 0x304207ff, 0x106202d3,
+0x0, 0x93a20027, 0x10400014, 0x0,
+0x8ee35240, 0x8ee25244, 0x10620009, 0x26ed5244,
+0x8ee65244, 0x8ee35244, 0x21140, 0x24425248,
+0x2e28021, 0x24630001, 0x80031bf, 0x306b00ff,
+0x92e27248, 0x1440ffca, 0x0, 0x8ee201e0,
+0x24420001, 0xaee201e0, 0x8ee201e0, 0x8ee30e10,
+0x8ee20e18, 0x1062ffc2, 0x26ed0e18, 0x8ee60e18,
+0x8ee30e18, 0x21140, 0x24420e20, 0x2e28021,
+0x24630001, 0x306b01ff, 0x96e2046a, 0x30420010,
+0x10400019, 0x0, 0x9642000c, 0x340f8100,
+0x144f0015, 0x0, 0x3c020001, 0x571021,
+0x904283c0, 0x14400010, 0x0, 0x9642000e,
+0xa6020016, 0x8e420008, 0x8e430004, 0x8e440000,
+0x2694fffc, 0xae42000c, 0xae430008, 0xae440004,
+0x9602000e, 0x26730004, 0x240f0001, 0xa3af0037,
+0x34420200, 0xa602000e, 0x8e020000, 0x8e030004,
+0x3c040001, 0x34843800, 0x306a0007, 0x26a9823,
+0x3641021, 0x262102b, 0x10400005, 0x28aa021,
+0x2641023, 0x3621823, 0x3c020020, 0x439823,
+0x26820007, 0x2404fff8, 0x9603000a, 0x446024,
+0x6a1821, 0x6c102b, 0x10400002, 0x1803821,
+0x603821, 0xae130018, 0x8f880120, 0x24e20007,
+0x443824, 0x27623800, 0x25090020, 0x122102b,
+0x50400001, 0x27693000, 0x8f820128, 0x11220004,
+0x0, 0x8f820124, 0x15220007, 0x1401821,
+0x8ee201a4, 0x8821, 0x24420001, 0xaee201a4,
+0x800324c, 0x8ee201a4, 0x8e040000, 0x8e050004,
+0x1021, 0xad130008, 0xa507000e, 0xad160018,
+0xad06001c, 0xa3302b, 0xa32823, 0x822023,
+0x862023, 0xad040000, 0xad050004, 0x8ee204c0,
+0xad020010, 0xaf890120, 0x92e24e20, 0x14400033,
+0x24110001, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c820000, 0x1456001f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee34e30, 0x24420001, 0x10550007, 0x0,
+0x8ee24e34, 0x24420001, 0x10620005, 0x0,
+0x8003239, 0x0, 0x14600005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
+0x800324c, 0x0, 0x8ee24e30, 0x24420001,
+0x50550003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0xac960000, 0xac9e0004, 0x16200018,
+0x3c050006, 0x8e020018, 0x3c040001, 0x24845890,
+0xafa20010, 0x8e020000, 0x8e030004, 0x34a5f009,
+0x2003021, 0xc002403, 0xafa30014, 0x93a20037,
+0x10400216, 0x340f8100, 0x8e420004, 0x8e430008,
+0x8e44000c, 0xa64f000c, 0xae420000, 0xae430004,
+0xae440008, 0x96020016, 0x8003470, 0xa642000e,
+0x14ec0168, 0x28a1823, 0x960c000a, 0x9603000e,
+0x28a1023, 0xa602000a, 0x34620004, 0xa602000e,
+0x8f880120, 0x27623800, 0x25090020, 0x122102b,
+0x14400002, 0x306affff, 0x27693000, 0x8f820128,
+0x11220004, 0x0, 0x8f820124, 0x15220007,
+0x24040020, 0x8ee201a4, 0x8821, 0x24420001,
+0xaee201a4, 0x80032ca, 0x8ee201a4, 0x8ee5724c,
+0x8ee60490, 0x8ee70494, 0xa504000e, 0x24040004,
+0xad100008, 0xad040018, 0x52940, 0xa01821,
+0x1021, 0xe33821, 0xe3202b, 0xc23021,
+0xc43021, 0xad060000, 0xad070004, 0x8ee2724c,
+0xad02001c, 0x8ee204c4, 0xad020010, 0xaf890120,
+0x92e24e20, 0x14400033, 0x24110001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c820000,
+0x1456001f, 0x0, 0x8ee34e30, 0x8ee24e34,
+0x1062001b, 0x0, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e34, 0x8ee34e30, 0x24420001,
+0x10550007, 0x0, 0x8ee24e34, 0x24420001,
+0x10620005, 0x0, 0x80032b7, 0x0,
+0x14600005, 0x0, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
+0x50400010, 0xac800000, 0x80032ca, 0x0,
+0x8ee24e30, 0x24420001, 0x50550003, 0x1021,
+0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0xac960000,
+0xac9e0004, 0x1620000d, 0x0, 0xa60c000a,
+0xa60a000e, 0x8f820100, 0xafa20010, 0x8f820104,
+0x3c040001, 0x2484589c, 0x3c050006, 0xafa20014,
+0x8ee6724c, 0x800343b, 0x34a5f00b, 0x3c010001,
+0x370821, 0xa02083c0, 0xadab0000, 0x8ee201d8,
+0x8ee3724c, 0x2442ffff, 0xaee201d8, 0x8ee201d8,
+0x24630001, 0x306307ff, 0x26e25244, 0x15a20006,
+0xaee3724c, 0x8ee201d0, 0x2442ffff, 0xaee201d0,
+0x80032ef, 0x8ee201d0, 0x8ee201cc, 0x2442ffff,
+0xaee201cc, 0x8ee201cc, 0x8f420240, 0x10400073,
+0x0, 0x8ee20e1c, 0x24420001, 0xaee20e1c,
+0x8f430240, 0x43102b, 0x14400176, 0xa021,
+0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
+0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
+0x0, 0x8f820124, 0x14c20007, 0x0,
+0x8ee201a4, 0x8821, 0x24420001, 0xaee201a4,
+0x800334f, 0x8ee201a4, 0x8ee2724c, 0xac62001c,
+0x8ee404a8, 0x8ee504ac, 0x2462001c, 0xac620008,
+0x24020008, 0xa462000e, 0x24020011, 0xac620018,
+0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
+0xaf860120, 0x92e24e20, 0x14400033, 0x24110001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c820000, 0x144e001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x10550007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x800333c,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400010, 0xac800000, 0x800334f,
+0x0, 0x8ee24e30, 0x24420001, 0x50550003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0xac8e0000, 0xac9e0004, 0x5620000d, 0x24110001,
+0x8ee2724c, 0x3c040001, 0x248458a8, 0xafa00014,
+0xafa20010, 0x8ee6724c, 0x8f470280, 0x3c050009,
+0x34a5f008, 0xc002403, 0xafae0048, 0x8fae0048,
+0x56200001, 0xaee00e1c, 0x8ee20188, 0x24420001,
+0xaee20188, 0x80033c8, 0x8ee20188, 0x8f830120,
+0x27623800, 0x24660020, 0xc2102b, 0x50400001,
+0x27663000, 0x8f820128, 0x10c20004, 0x0,
+0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
+0x8821, 0x24420001, 0xaee201a4, 0x80033ba,
+0x8ee201a4, 0x8ee2724c, 0xac62001c, 0x8ee404a8,
+0x8ee504ac, 0x2462001c, 0xac620008, 0x24020008,
+0xa462000e, 0x24020011, 0xac620018, 0xac640000,
+0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
+0x92e24e20, 0x14400033, 0x24110001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c820000,
+0x144e001f, 0x0, 0x8ee34e30, 0x8ee24e34,
+0x1062001b, 0x0, 0x8c820004, 0x24420001,
+0xac820004, 0x8ee24e34, 0x8ee34e30, 0x24420001,
+0x10550007, 0x0, 0x8ee24e34, 0x24420001,
+0x10620005, 0x0, 0x80033a7, 0x0,
+0x14600005, 0x0, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
+0x50400010, 0xac800000, 0x80033ba, 0x0,
+0x8ee24e30, 0x24420001, 0x50550003, 0x1021,
+0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0xac8e0000,
+0xac9e0004, 0x1620000d, 0x0, 0x8ee2724c,
+0x3c040001, 0x248458a8, 0xafa00014, 0xafa20010,
+0x8ee6724c, 0x8f470280, 0x3c050009, 0x34a5f008,
+0xc002403, 0xafae0048, 0x8fae0048, 0x8ee20174,
+0x24420001, 0xaee20174, 0x8ee20174, 0x800346e,
+0xa021, 0x960c000a, 0x183102b, 0x54400001,
+0x1801821, 0xa603000a, 0x8f880120, 0x27623800,
+0x25090020, 0x122102b, 0x50400001, 0x27693000,
+0x8f820128, 0x11220004, 0x0, 0x8f820124,
+0x15220007, 0x24040020, 0x8ee201a4, 0x8821,
+0x24420001, 0xaee201a4, 0x800342f, 0x8ee201a4,
+0x8ee5724c, 0x8ee60490, 0x8ee70494, 0xa504000e,
+0x24040004, 0xad100008, 0xad040018, 0x52940,
+0xa01821, 0x1021, 0xe33821, 0xe3202b,
+0xc23021, 0xc43021, 0xad060000, 0xad070004,
+0x8ee2724c, 0xad02001c, 0x8ee204c4, 0xad020010,
+0xaf890120, 0x92e24e20, 0x14400033, 0x24110001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c820000, 0x1456001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x10550007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x800341c,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400010, 0xac800000, 0x800342f,
+0x0, 0x8ee24e30, 0x24420001, 0x50550003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0xac960000, 0xac9e0004, 0x1620001d, 0x0,
+0xa60c000a, 0x8f820100, 0xafa20010, 0x8f820104,
+0x3c040001, 0x2484589c, 0x3c050006, 0xafa20014,
+0x8ee6724c, 0x34a5f00d, 0xc002403, 0x2003821,
+0x93a20037, 0x10400031, 0x340f8100, 0x8e420004,
+0x8e430008, 0x8e44000c, 0xa64f000c, 0xae420000,
+0xae430004, 0xae440008, 0x96020016, 0xa642000e,
+0x9602000e, 0x3042fdff, 0x8003470, 0xa602000e,
+0x8ee201d8, 0x2442ffff, 0xaee201d8, 0x8ee201d8,
+0x8ee201cc, 0x3c04001f, 0x3c010001, 0x370821,
+0xa03e83c0, 0x2442ffff, 0xaee201cc, 0x9603000a,
+0x3484ffff, 0x8ee201cc, 0x6a1821, 0x2639821,
+0x93202b, 0x10800003, 0x3c02fff5, 0x34421000,
+0x2629821, 0xadab0000, 0x8ee2724c, 0x24420001,
+0x304207ff, 0xaee2724c, 0x8f420240, 0x10400004,
+0x283a023, 0x8ee20e1c, 0x24420001, 0xaee20e1c,
+0xa3a00027, 0x1680fd29, 0x0, 0x12800024,
+0x0, 0x3c010001, 0x370821, 0xac3483c4,
+0x3c010001, 0x370821, 0xac3383c8, 0x3c010001,
+0x370821, 0xac3283cc, 0x93a20037, 0x10400008,
+0x0, 0x3c020001, 0x571021, 0x8c4283cc,
+0x24420004, 0x3c010001, 0x370821, 0xac2283cc,
+0x8ee2724c, 0x8f430280, 0x24420001, 0x304207ff,
+0x14620006, 0x0, 0x8ee201c4, 0x24420001,
+0xaee201c4, 0x80034cc, 0x8ee201c4, 0x8ee201bc,
+0x24420001, 0xaee201bc, 0x80034cc, 0x8ee201bc,
+0x97a4001e, 0x2484fffc, 0x801821, 0x8ee400c0,
+0x8ee500c4, 0x1021, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0xaee400c0, 0xaee500c4,
+0x8faf002c, 0x24020002, 0x11e2000f, 0x29e20003,
+0x14400017, 0x24020003, 0x15e20015, 0x0,
+0x8ee200d0, 0x8ee300d4, 0x24630001, 0x2c640001,
+0x441021, 0xaee200d0, 0xaee300d4, 0x8ee200d0,
+0x80034c6, 0x8ee300d4, 0x8ee200d8, 0x8ee300dc,
+0x24630001, 0x2c640001, 0x441021, 0xaee200d8,
+0xaee300dc, 0x8ee200d8, 0x80034c6, 0x8ee300dc,
+0x8ee200c8, 0x8ee300cc, 0x24630001, 0x2c640001,
+0x441021, 0xaee200c8, 0xaee300cc, 0x8ee200c8,
+0x8ee300cc, 0x8f8300e4, 0x8f8200e0, 0x10620003,
+0x24630008, 0xaf8300e4, 0xaf8300e8, 0x8fbf0070,
+0x8fbe006c, 0x8fb60068, 0x8fb50064, 0x8fb40060,
+0x8fb3005c, 0x8fb20058, 0x8fb10054, 0x8fb00050,
+0x3e00008, 0x27bd0078, 0x27bdffb0, 0xafb50044,
+0xa821, 0xafb00030, 0x8021, 0xafbf004c,
+0xafb60048, 0xafb40040, 0xafb3003c, 0xafb20038,
+0xafb10034, 0x8ee204d4, 0x24140001, 0x30420001,
+0x1440002a, 0xb021, 0x8f8700e0, 0x8f8800c4,
+0x8f8200e8, 0xe22023, 0x2c821000, 0x50400001,
+0x24841000, 0x420c2, 0x801821, 0x8ee400c8,
+0x8ee500cc, 0x1021, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0xaee400c8, 0xaee500cc,
+0x8f8300c8, 0x3c02000a, 0x3442efff, 0x1032023,
+0x44102b, 0x10400003, 0x3c02000a, 0x3442f000,
+0x822021, 0x801821, 0x8ee400c0, 0x8ee500c4,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaee400c0, 0xaee500c4, 0xaf8800c8,
+0xaf8700e4, 0x8003850, 0xaf8700e8, 0x3c020001,
+0x571021, 0x904283c0, 0x1040000b, 0x0,
+0x3c130001, 0x2779821, 0x8e7383c4, 0x3c110001,
+0x2378821, 0x8e3183c8, 0x3c120001, 0x2579021,
+0x80036e8, 0x8e5283cc, 0x8f8300e0, 0x8f8200e4,
+0x10430007, 0x4821, 0x8f8200e4, 0x24090001,
+0x8c430000, 0x8c440004, 0xafa30018, 0xafa4001c,
+0x1520000e, 0x3c02ffff, 0x8f8200c4, 0xafa20010,
+0x8f8200c8, 0x3c040001, 0x24845870, 0xafa20014,
+0x8f8600e0, 0x8f8700e4, 0x3c050006, 0xc002403,
+0x34a5f000, 0x8003850, 0x0, 0x8fa3001c,
+0x8fb20018, 0x3073ffff, 0x2673fffc, 0x621024,
+0x10400058, 0x2408821, 0x3c020080, 0x621024,
+0x1040000a, 0x3c040040, 0x8ee2007c, 0x24420001,
+0xaee2007c, 0x8ee2007c, 0x8ee201fc, 0x24420001,
+0xaee201fc, 0x800384a, 0x8ee201fc, 0x3c060004,
+0x3c0b0001, 0x3c0a0002, 0x3c050010, 0x3c090008,
+0x8ee20080, 0x3c080020, 0x34078000, 0x24420001,
+0xaee20080, 0x8ee20080, 0x8fa2001c, 0x441824,
+0x10660021, 0xc3102b, 0x14400007, 0x0,
+0x106b0011, 0x0, 0x106a0015, 0x0,
+0x8003592, 0x42042, 0x10650023, 0xa3102b,
+0x14400005, 0x0, 0x10690019, 0x0,
+0x8003592, 0x42042, 0x10680021, 0x0,
+0x8003592, 0x42042, 0x8ee20034, 0x24420001,
+0xaee20034, 0x8ee20034, 0x8003592, 0x42042,
+0x8ee201ec, 0x24420001, 0xaee201ec, 0x8ee201ec,
+0x8003592, 0x42042, 0x8ee201f0, 0x24420001,
+0xaee201f0, 0x8ee201f0, 0x8003592, 0x42042,
+0x8ee201f4, 0x24420001, 0xaee201f4, 0x8ee201f4,
+0x8003592, 0x42042, 0x8ee20030, 0x24420001,
+0xaee20030, 0x8ee20030, 0x8003592, 0x42042,
+0x8ee201f8, 0x24420001, 0xaee201f8, 0x8ee201f8,
+0x42042, 0x108702b7, 0x0, 0x8003557,
+0x0, 0x3c020001, 0x571021, 0x904283b2,
+0x14400084, 0x24020001, 0x3c030001, 0x771821,
+0x906383b3, 0x1462007f, 0x3c020100, 0x8e430000,
+0x621024, 0x1040006f, 0x2402ffff, 0x14620005,
+0x24100001, 0x96430004, 0x3402ffff, 0x10620075,
+0x0, 0x92e204d8, 0x14400072, 0x0,
+0x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
+0x10400020, 0x3821, 0x3c020001, 0x571021,
+0x8c4283b4, 0x18400016, 0x2821, 0x96260000,
+0x520c0, 0x971021, 0x9442777e, 0x14460009,
+0x971021, 0x94437780, 0x96220002, 0x14620005,
+0x971021, 0x94437782, 0x96220004, 0x50620008,
+0x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
+0x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
+0x30e200ff, 0x1040027b, 0x0, 0x800361e,
+0x0, 0x2402021, 0xc0022fe, 0x24050006,
+0x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
+0x30424000, 0x1440026f, 0xb71021, 0x9443727e,
+0x96220000, 0x1462000b, 0x418c0, 0xb71021,
+0x94437280, 0x96220002, 0x14620006, 0x418c0,
+0xb71021, 0x94437282, 0x96220004, 0x10620035,
+0x418c0, 0x2e31021, 0x9442727c, 0x30428000,
+0x1440025c, 0x2e31021, 0x9448727c, 0x96270000,
+0x828c0, 0xb71021, 0x9442737e, 0x8003600,
+0x3021, 0x420c0, 0x2e41021, 0x9443737c,
+0x2e41021, 0x9448737c, 0x30638000, 0x14600010,
+0x828c0, 0xb71021, 0x9442737e, 0x1447fff5,
+0x1002021, 0xb71021, 0x94437380, 0x96220002,
+0x5462fff1, 0x420c0, 0xb71021, 0x94437382,
+0x96220004, 0x5462ffec, 0x420c0, 0x24060001,
+0x30c200ff, 0x1040023b, 0x0, 0x800361e,
+0x0, 0x97430202, 0x96420000, 0x14620235,
+0x0, 0x97430204, 0x96420002, 0x14620231,
+0x0, 0x97430206, 0x96420004, 0x1462022d,
+0x0, 0x92420000, 0x3a030001, 0x30420001,
+0x431024, 0x10400074, 0x2402ffff, 0x8e230000,
+0x14620004, 0x3402ffff, 0x96230004, 0x1062006f,
+0x24140002, 0x3c020001, 0x571021, 0x904283b2,
+0x1440006a, 0x24140003, 0x92e204d8, 0x14400067,
+0x0, 0x3c020001, 0x571021, 0x8c4283b4,
+0x28420005, 0x10400020, 0x3821, 0x3c020001,
+0x571021, 0x8c4283b4, 0x18400016, 0x2821,
+0x96260000, 0x520c0, 0x971021, 0x9442777e,
+0x14460009, 0x971021, 0x94437780, 0x96220002,
+0x14620005, 0x971021, 0x94437782, 0x96220004,
+0x50620008, 0x24070001, 0x3c020001, 0x571021,
+0x8c4283b4, 0x24a50001, 0xa2102a, 0x5440ffee,
+0x520c0, 0x30e200ff, 0x14400044, 0x24140003,
+0x800384a, 0x0, 0x2402021, 0xc0022fe,
+0x24050006, 0x3044001f, 0x428c0, 0x2e51021,
+0x9442727c, 0x30424000, 0x144001ea, 0xb71021,
+0x9443727e, 0x96220000, 0x1462000b, 0x418c0,
+0xb71021, 0x94437280, 0x96220002, 0x14620006,
+0x418c0, 0xb71021, 0x94437282, 0x96220004,
+0x10620027, 0x418c0, 0x2e31021, 0x9442727c,
+0x30428000, 0x144001d7, 0x2e31021, 0x9448727c,
+0x96270000, 0x828c0, 0xb71021, 0x9442737e,
+0x8003685, 0x3021, 0x420c0, 0x2e41021,
+0x9443737c, 0x2e41021, 0x9448737c, 0x30638000,
+0x14600010, 0x828c0, 0xb71021, 0x9442737e,
+0x1447fff5, 0x1002021, 0xb71021, 0x94437380,
+0x96220002, 0x5462fff1, 0x420c0, 0xb71021,
+0x94437382, 0x96220004, 0x5462ffec, 0x420c0,
+0x24060001, 0x30c200ff, 0x104001b6, 0x0,
+0x8003698, 0x24140003, 0x24140001, 0x8f420260,
+0x53102b, 0x10400049, 0x0, 0x8f8300e4,
+0x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
+0xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2601821,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
+0x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
+0x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
+0xafa20010, 0x8f8200e4, 0x3c040001, 0x24845878,
+0xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
+0xc002403, 0x34a5f003, 0x8003850, 0x0,
+0x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
+0x24845884, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
+0xc002403, 0x34a5f002, 0x8ee201c0, 0x24420001,
+0xaee201c0, 0x8ee20000, 0x8ee301c0, 0x2403ffbf,
+0x431024, 0x80037f8, 0xaee20000, 0x8ee25240,
+0xafa20010, 0x8ee25244, 0x3c040001, 0x24845884,
+0xafa20014, 0x8ee60e10, 0x8ee70e18, 0x3c050006,
+0xc002403, 0x34a5f002, 0x8ee201c0, 0x24420001,
+0xaee201c0, 0x80037f8, 0x8ee201c0, 0x96e20468,
+0x53102b, 0x54400001, 0x3c158000, 0x12600131,
+0x3c0c001f, 0x358cffff, 0x8ee2724c, 0x8f430280,
+0x24420001, 0x304207ff, 0x10620108, 0x0,
+0x12a00014, 0x0, 0x8ee35240, 0x8ee25244,
+0x10620009, 0x26ee5244, 0x8eeb5244, 0x8ee35244,
+0x21140, 0x24425248, 0x2e28021, 0x24630001,
+0x8003712, 0x306800ff, 0x92e27248, 0x1440ffc0,
+0x3c050006, 0x8ee201e0, 0x24420001, 0xaee201e0,
+0x8ee201e0, 0x8ee30e10, 0x8ee20e18, 0x1062ffcb,
+0x26ee0e18, 0x8eeb0e18, 0xa821, 0x8ee30e18,
+0x21140, 0x24420e20, 0x2e28021, 0x24630001,
+0x306801ff, 0x96e2046a, 0x30420010, 0x10400017,
+0x34028100, 0x9643000c, 0x14620014, 0x0,
+0x3c020001, 0x571021, 0x904283c0, 0x1440000f,
+0x0, 0x9642000e, 0xa6020016, 0x8e420008,
+0x8e430004, 0x8e440000, 0x2673fffc, 0xae42000c,
+0xae430008, 0xae440004, 0x9602000e, 0x26310004,
+0x24160001, 0x34420200, 0xa602000e, 0x9603000a,
+0x2605021, 0x73102b, 0x10400002, 0x2606821,
+0x605021, 0x2d42003d, 0x1040002a, 0x3821,
+0x9623000c, 0x24020800, 0x54620027, 0xae110018,
+0x3c020001, 0x571021, 0x904283c0, 0x54400022,
+0xae110018, 0x26220017, 0x182102b, 0x10400013,
+0x0, 0x3c02fff5, 0x511021, 0x90421017,
+0x38430006, 0x2c630001, 0x38420011, 0x2c420001,
+0x621825, 0x10600013, 0x26220010, 0x182102b,
+0x1040000e, 0x0, 0x3c07fff5, 0xf13821,
+0x94e71010, 0x800375e, 0x24e7000e, 0x92220017,
+0x38430006, 0x2c630001, 0x38420011, 0x2c420001,
+0x621825, 0x50600004, 0xae110018, 0x96270010,
+0x24e7000e, 0xae110018, 0x3c020001, 0x571021,
+0x904283c0, 0x2102b, 0x14e00002, 0x24ec0,
+0x1403821, 0x8f830120, 0x27623800, 0x24660020,
+0xc2102b, 0x50400001, 0x27663000, 0x8f820128,
+0x10c20004, 0x0, 0x8f820124, 0x14c20007,
+0x2402000b, 0x8ee201a4, 0x4821, 0x24420001,
+0xaee201a4, 0x80037bf, 0x8ee201a4, 0x8e040000,
+0x8e050004, 0xac620018, 0x1751025, 0x491025,
+0xac710008, 0xa467000e, 0xac62001c, 0xac640000,
+0xac650004, 0x8ee204c0, 0xac620010, 0xaf860120,
+0x92e24e20, 0x14400038, 0x24090001, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
+0x24020007, 0x14620020, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001c, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee34e34, 0x8ee54e30,
+0x24020040, 0x24630001, 0x10620007, 0x0,
+0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
+0x80037a9, 0x0, 0x14a00005, 0x0,
+0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
+0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
+0x80037bf, 0x0, 0x8ee24e30, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020007, 0xac820000,
+0x24020001, 0xac820004, 0x15200018, 0x3c050006,
+0x8e020018, 0x3c040001, 0x24845890, 0xafa20010,
+0x8e020000, 0x8e030004, 0x34a5f009, 0x2003021,
+0xc002403, 0xafa30014, 0x32c200ff, 0x1040002b,
+0x34028100, 0x8e430004, 0x8e440008, 0x8e45000c,
+0xa642000c, 0xae430000, 0xae440004, 0xae450008,
+0x96020016, 0x80037f8, 0xa642000e, 0x154d000a,
+0x0, 0x9602000e, 0xa613000a, 0x34420004,
+0xa602000e, 0x3c010001, 0x370821, 0xa02083c0,
+0x80037f6, 0x9821, 0x9604000a, 0x93102b,
+0x10400002, 0x2601821, 0x801821, 0x24020001,
+0xa603000a, 0x3c010001, 0x370821, 0xa02283c0,
+0x9604000a, 0x2248821, 0x191102b, 0x10400003,
+0x3c02fff5, 0x34421000, 0x2228821, 0x2649823,
+0xa821, 0x1660fef4, 0xadc80000, 0x12600021,
+0x32c200ff, 0x3c010001, 0x370821, 0xac3383c4,
+0x3c010001, 0x370821, 0xac3183c8, 0x3c010001,
+0x370821, 0x10400008, 0xac3283cc, 0x3c020001,
+0x571021, 0x8c4283cc, 0x24420004, 0x3c010001,
+0x370821, 0xac2283cc, 0x8ee2724c, 0x8f430280,
+0x24420001, 0x14620006, 0x0, 0x8ee201c4,
+0x24420001, 0xaee201c4, 0x8003850, 0x8ee201c4,
+0x8ee201bc, 0x24420001, 0xaee201bc, 0x8003850,
+0x8ee201bc, 0x97a4001e, 0x2484fffc, 0x801821,
+0x8ee400c0, 0x8ee500c4, 0x1021, 0xa32821,
+0xa3302b, 0x822021, 0x862021, 0x24020002,
+0xaee400c0, 0xaee500c4, 0x1282000f, 0x2a820003,
+0x14400017, 0x24020003, 0x16820015, 0x0,
+0x8ee200d0, 0x8ee300d4, 0x24630001, 0x2c640001,
+0x441021, 0xaee200d0, 0xaee300d4, 0x8ee200d0,
+0x800384a, 0x8ee300d4, 0x8ee200d8, 0x8ee300dc,
+0x24630001, 0x2c640001, 0x441021, 0xaee200d8,
+0xaee300dc, 0x8ee200d8, 0x800384a, 0x8ee300dc,
+0x8ee200c8, 0x8ee300cc, 0x24630001, 0x2c640001,
+0x441021, 0xaee200c8, 0xaee300cc, 0x8ee200c8,
+0x8ee300cc, 0x8f8300e4, 0x8f8200e0, 0x10620003,
+0x24630008, 0xaf8300e4, 0xaf8300e8, 0x8fbf004c,
+0x8fb60048, 0x8fb50044, 0x8fb40040, 0x8fb3003c,
+0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
+0x27bd0050, 0x27bdff90, 0xafb60060, 0xb021,
+0xafbf0068, 0xafbe0064, 0xafb5005c, 0xafb40058,
+0xafb30054, 0xafb20050, 0xafb1004c, 0xafb00048,
+0x8ee204d4, 0x8821, 0x24150001, 0x30420001,
+0x1440002a, 0xa3a0002f, 0x8f8700e0, 0x8f8800c4,
+0x8f8200e8, 0xe22023, 0x2c821000, 0x50400001,
+0x24841000, 0x420c2, 0x801821, 0x8ee400c8,
+0x8ee500cc, 0x1021, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0xaee400c8, 0xaee500cc,
+0x8f8300c8, 0x3c02000a, 0x3442efff, 0x1032023,
+0x44102b, 0x10400003, 0x3c02000a, 0x3442f000,
+0x822021, 0x801821, 0x8ee400c0, 0x8ee500c4,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaee400c0, 0xaee500c4, 0xaf8800c8,
+0xaf8700e4, 0x8003c5b, 0xaf8700e8, 0x3c020001,
+0x571021, 0x904283c0, 0x1040000b, 0x0,
+0x3c130001, 0x2779821, 0x8e7383c4, 0x3c100001,
+0x2178021, 0x8e1083c8, 0x3c120001, 0x2579021,
+0x8003a59, 0x8e5283cc, 0x8f8300e0, 0x8f8200e4,
+0x10430007, 0x3821, 0x8f8200e4, 0x24070001,
+0x8c430000, 0x8c440004, 0xafa30018, 0xafa4001c,
+0x14e0000e, 0x3c02ffff, 0x8f8200c4, 0xafa20010,
+0x8f8200c8, 0x3c040001, 0x248458b4, 0xafa20014,
+0x8f8600e0, 0x8f8700e4, 0x3c050006, 0xc002403,
+0x34a5f200, 0x8003c5b, 0x0, 0x8fa3001c,
+0x8fb20018, 0x3073ffff, 0x2673fffc, 0x621024,
+0x10400058, 0x2408021, 0x3c020080, 0x621024,
+0x1040000a, 0x3c040040, 0x8ee2007c, 0x24420001,
+0xaee2007c, 0x8ee2007c, 0x8ee201fc, 0x24420001,
+0xaee201fc, 0x8003c55, 0x8ee201fc, 0x3c060004,
+0x3c0b0001, 0x3c0a0002, 0x3c050010, 0x3c090008,
+0x8ee20080, 0x3c080020, 0x34078000, 0x24420001,
+0xaee20080, 0x8ee20080, 0x8fa2001c, 0x441824,
+0x10660021, 0xc3102b, 0x14400007, 0x0,
+0x106b0011, 0x0, 0x106a0015, 0x0,
+0x8003916, 0x42042, 0x10650023, 0xa3102b,
+0x14400005, 0x0, 0x10690019, 0x0,
+0x8003916, 0x42042, 0x10680021, 0x0,
+0x8003916, 0x42042, 0x8ee20034, 0x24420001,
+0xaee20034, 0x8ee20034, 0x8003916, 0x42042,
+0x8ee201ec, 0x24420001, 0xaee201ec, 0x8ee201ec,
+0x8003916, 0x42042, 0x8ee201f0, 0x24420001,
+0xaee201f0, 0x8ee201f0, 0x8003916, 0x42042,
+0x8ee201f4, 0x24420001, 0xaee201f4, 0x8ee201f4,
+0x8003916, 0x42042, 0x8ee20030, 0x24420001,
+0xaee20030, 0x8ee20030, 0x8003916, 0x42042,
+0x8ee201f8, 0x24420001, 0xaee201f8, 0x8ee201f8,
+0x42042, 0x1087033e, 0x0, 0x80038db,
+0x0, 0x3c020001, 0x571021, 0x904283b2,
+0x14400084, 0x24020001, 0x3c030001, 0x771821,
+0x906383b3, 0x1462007f, 0x3c020100, 0x8e430000,
+0x621024, 0x1040006f, 0x2402ffff, 0x14620005,
+0x24110001, 0x96430004, 0x3402ffff, 0x10620075,
+0x0, 0x92e204d8, 0x14400072, 0x0,
+0x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
+0x10400020, 0x3821, 0x3c020001, 0x571021,
+0x8c4283b4, 0x18400016, 0x2821, 0x96060000,
+0x520c0, 0x971021, 0x9442777e, 0x14460009,
+0x971021, 0x94437780, 0x96020002, 0x14620005,
+0x971021, 0x94437782, 0x96020004, 0x50620008,
+0x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
+0x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
+0x30e200ff, 0x10400302, 0x0, 0x80039a2,
+0x0, 0x2402021, 0xc0022fe, 0x24050006,
+0x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
+0x30424000, 0x144002f6, 0xb71021, 0x9443727e,
+0x96020000, 0x1462000b, 0x418c0, 0xb71021,
+0x94437280, 0x96020002, 0x14620006, 0x418c0,
+0xb71021, 0x94437282, 0x96020004, 0x10620035,
+0x418c0, 0x2e31021, 0x9442727c, 0x30428000,
+0x144002e3, 0x2e31021, 0x944d727c, 0x96070000,
+0xd28c0, 0xb71021, 0x9442737e, 0x8003984,
+0x3021, 0x420c0, 0x2e41021, 0x9443737c,
+0x2e41021, 0x944d737c, 0x30638000, 0x14600010,
+0xd28c0, 0xb71021, 0x9442737e, 0x1447fff5,
+0x1a02021, 0xb71021, 0x94437380, 0x96020002,
+0x5462fff1, 0x420c0, 0xb71021, 0x94437382,
+0x96020004, 0x5462ffec, 0x420c0, 0x24060001,
+0x30c200ff, 0x104002c2, 0x0, 0x80039a2,
+0x0, 0x97430202, 0x96420000, 0x146202bc,
+0x0, 0x97430204, 0x96420002, 0x146202b8,
+0x0, 0x97430206, 0x96420004, 0x146202b4,
+0x0, 0x92420000, 0x3a230001, 0x30420001,
+0x431024, 0x10400074, 0x2402ffff, 0x8e030000,
+0x14620004, 0x3402ffff, 0x96030004, 0x1062006f,
+0x24150002, 0x3c020001, 0x571021, 0x904283b2,
+0x1440006a, 0x24150003, 0x92e204d8, 0x14400067,
+0x0, 0x3c020001, 0x571021, 0x8c4283b4,
+0x28420005, 0x10400020, 0x3821, 0x3c020001,
+0x571021, 0x8c4283b4, 0x18400016, 0x2821,
+0x96060000, 0x520c0, 0x971021, 0x9442777e,
+0x14460009, 0x971021, 0x94437780, 0x96020002,
+0x14620005, 0x971021, 0x94437782, 0x96020004,
+0x50620008, 0x24070001, 0x3c020001, 0x571021,
+0x8c4283b4, 0x24a50001, 0xa2102a, 0x5440ffee,
+0x520c0, 0x30e200ff, 0x14400044, 0x24150003,
+0x8003c55, 0x0, 0x2402021, 0xc0022fe,
+0x24050006, 0x3044001f, 0x428c0, 0x2e51021,
+0x9442727c, 0x30424000, 0x14400271, 0xb71021,
+0x9443727e, 0x96020000, 0x1462000b, 0x418c0,
+0xb71021, 0x94437280, 0x96020002, 0x14620006,
+0x418c0, 0xb71021, 0x94437282, 0x96020004,
+0x10620027, 0x418c0, 0x2e31021, 0x9442727c,
+0x30428000, 0x1440025e, 0x2e31021, 0x944d727c,
+0x96070000, 0xd28c0, 0xb71021, 0x9442737e,
+0x8003a09, 0x3021, 0x420c0, 0x2e41021,
+0x9443737c, 0x2e41021, 0x944d737c, 0x30638000,
+0x14600010, 0xd28c0, 0xb71021, 0x9442737e,
+0x1447fff5, 0x1a02021, 0xb71021, 0x94437380,
+0x96020002, 0x5462fff1, 0x420c0, 0xb71021,
+0x94437382, 0x96020004, 0x5462ffec, 0x420c0,
+0x24060001, 0x30c200ff, 0x1040023d, 0x0,
+0x8003a1c, 0x24150003, 0x24150001, 0x8f420260,
+0x53102b, 0x10400036, 0x0, 0x8f8300e4,
+0x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
+0xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2601821,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
+0x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
+0x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
+0xafa20010, 0x8f8200e4, 0x3c040001, 0x248458c0,
+0xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
+0xc002403, 0x34a5f203, 0x8003c5b, 0x0,
+0x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
+0x248458cc, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
+0x3c050006, 0xc002403, 0x34a5f202, 0x8ee201c0,
+0x24420001, 0xaee201c0, 0x8003c02, 0x8ee201c0,
+0x96e20468, 0x53102b, 0x54400001, 0x3c168000,
+0x126001cb, 0x3c0e001f, 0x35ceffff, 0x3c0ffff5,
+0x35ef1000, 0x241e0040, 0x8ee2724c, 0x8f430280,
+0x24420001, 0x304207ff, 0x1062019e, 0x0,
+0x12c00012, 0x0, 0x8ee35240, 0x8ee25244,
+0x1062000a, 0x26f85244, 0x8ef45244, 0xafb80024,
+0x8ee35244, 0x21140, 0x24425248, 0x2e28821,
+0x24630001, 0x8003a85, 0x306d00ff, 0x8ee201e0,
+0x24420001, 0xaee201e0, 0x8ee201e0, 0x8ee30e10,
+0x8ee20e18, 0x1062ffca, 0x26f80e18, 0x8ef40e18,
+0xb021, 0xafb80024, 0x8ee30e18, 0x21140,
+0x24420e20, 0x2e28821, 0x24630001, 0x306d01ff,
+0x96e2046a, 0x30420010, 0x10400018, 0x34028100,
+0x9643000c, 0x14620015, 0x0, 0x3c020001,
+0x571021, 0x904283c0, 0x14400010, 0x0,
+0x9642000e, 0xa6220016, 0x8e420008, 0x8e430004,
+0x8e440000, 0x2673fffc, 0xae42000c, 0xae430008,
+0xae440004, 0x9622000e, 0x26100004, 0x24180001,
+0xa3b8002f, 0x34420200, 0xa622000e, 0x8e220000,
+0x8e230004, 0x3c040001, 0x34843800, 0x2003021,
+0x306a0007, 0x20a8023, 0x3641021, 0x202102b,
+0x10400005, 0x26a9821, 0x2041023, 0x3621823,
+0x3c020020, 0x438023, 0x26620007, 0x9623000a,
+0x2418fff8, 0x58c824, 0x6a1821, 0x79102b,
+0x10400002, 0x3206021, 0x606021, 0x1801821,
+0x24620007, 0x2418fff8, 0x586024, 0x26c102b,
+0x14400004, 0x1932823, 0x1832823, 0x8003ac3,
+0xc31021, 0xd31021, 0x4a2023, 0x1c4102b,
+0x54400001, 0x8f2021, 0x25420040, 0x4c102b,
+0x14400035, 0x5821, 0x94c3000c, 0x24020800,
+0x54620032, 0xae260018, 0x3c020001, 0x571021,
+0x904283c0, 0x5440002d, 0xae260018, 0x24c20017,
+0x1c2102b, 0x10400013, 0x0, 0x3c02fff5,
+0x461021, 0x90421017, 0x38430006, 0x2c630001,
+0x38420011, 0x2c420001, 0x621825, 0x10600014,
+0x24c20010, 0x1c2102b, 0x1040000e, 0x0,
+0x3c0bfff5, 0x1665821, 0x956b1010, 0x8003af4,
+0x2562000e, 0x90c20017, 0x38430006, 0x2c630001,
+0x38420011, 0x2c420001, 0x621825, 0x10600005,
+0x1601821, 0x94cb0010, 0x2562000e, 0x4a5821,
+0x1601821, 0x24620007, 0x2418fff8, 0x585824,
+0xc31021, 0x4a2023, 0x1c4102b, 0x10400002,
+0x1632823, 0x8f2021, 0xae260018, 0x3c020001,
+0x571021, 0x904283c0, 0x2102b, 0x216c0,
+0x15600002, 0xafa20044, 0x1805821, 0x30820001,
+0x10400007, 0x4021, 0x90880000, 0x24840001,
+0x1c4102b, 0x10400002, 0x24a5ffff, 0x8f2021,
+0x50a00012, 0x81c02, 0x2ca20002, 0x54400009,
+0x24a5ffff, 0x94820000, 0x24840002, 0x1024021,
+0x1c4102b, 0x10400006, 0x24a5fffe, 0x8003b21,
+0x8f2021, 0x90820000, 0x21200, 0x1024021,
+0x14a0fff2, 0x2ca20002, 0x81c02, 0x3102ffff,
+0x624021, 0x3108ffff, 0x1402821, 0x11400011,
+0x2002021, 0x2ca20002, 0x54400009, 0x24a5ffff,
+0x94820000, 0x24840002, 0x1024021, 0x1c4102b,
+0x10400006, 0x24a5fffe, 0x8003b38, 0x8f2021,
+0x90820000, 0x21200, 0x1024021, 0x14a0fff2,
+0x2ca20002, 0x81c02, 0x3102ffff, 0x624021,
+0x81c02, 0x3102ffff, 0x8f890120, 0x624021,
+0x27623800, 0x25230020, 0x62102b, 0x14400002,
+0x3108ffff, 0x27633000, 0x8f820128, 0x10620004,
+0x0, 0x8f820124, 0x14620007, 0x1402821,
+0x8ee201a4, 0x3821, 0x24420001, 0xaee201a4,
+0x8003bc9, 0x8ee201a4, 0x8e260000, 0x8e270004,
+0x81400, 0x3448000b, 0xad300008, 0xa52b000e,
+0xad280018, 0x8fb80044, 0x2021, 0x2961025,
+0x581025, 0xad22001c, 0xe5102b, 0xe53823,
+0xc43023, 0xc23023, 0xad260000, 0xad270004,
+0x8ee204c0, 0xad220010, 0xaf830120, 0x92e24e20,
+0x1440005f, 0x24070001, 0x2502ffee, 0x2c420002,
+0x14400003, 0x24020011, 0x15020024, 0x0,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c830000, 0x24020012, 0x1462000f, 0x0,
+0x8ee34e30, 0x8ee24e34, 0x1062000b, 0x0,
+0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
+0x8ee34e30, 0x24420001, 0x105e002a, 0x0,
+0x8003ba8, 0x0, 0x8ee24e30, 0x24420001,
+0x505e0003, 0x1021, 0x8ee24e30, 0x24420001,
+0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8003bc6, 0x24020012, 0x8ee24e30,
+0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
+0x24020007, 0x1462001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x105e0007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x8003bb4,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400012, 0xac800000, 0x8003bc9,
+0x0, 0x8ee24e30, 0x24420001, 0x505e0003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x24020007, 0xac820000, 0x24020001, 0xac820004,
+0x14e00019, 0x3c050006, 0x3c040001, 0x24845890,
+0x8e220018, 0x34a5f209, 0xafa20010, 0x8e220000,
+0x8e230004, 0x2203021, 0x1603821, 0xc002403,
+0xafa30014, 0x93a2002f, 0x1040002a, 0x34028100,
+0x8e430004, 0x8e440008, 0x8e45000c, 0xa642000c,
+0xae430000, 0xae440004, 0xae450008, 0x96220016,
+0x8003c02, 0xa642000e, 0x1599000a, 0x26a1823,
+0x9622000e, 0xa623000a, 0x34420004, 0xa622000e,
+0x3c010001, 0x370821, 0xa02083c0, 0x8003bff,
+0x9821, 0x9624000a, 0x83102b, 0x54400001,
+0x801821, 0x24020001, 0xa623000a, 0x3c010001,
+0x370821, 0xa02283c0, 0x9622000a, 0x4a1821,
+0x2038021, 0x1d0102b, 0x54400001, 0x20f8021,
+0x2639823, 0xb021, 0x8fb80024, 0x1660fe5e,
+0xaf0d0000, 0x12600022, 0x0, 0x3c010001,
+0x370821, 0xac3383c4, 0x3c010001, 0x370821,
+0xac3083c8, 0x3c010001, 0x370821, 0xac3283cc,
+0x93a2002f, 0x10400008, 0x0, 0x3c020001,
+0x571021, 0x8c4283cc, 0x24420004, 0x3c010001,
+0x370821, 0xac2283cc, 0x8f430280, 0x8ee2724c,
+0x14620006, 0x0, 0x8ee201c4, 0x24420001,
+0xaee201c4, 0x8003c5b, 0x8ee201c4, 0x8ee201bc,
+0x24420001, 0xaee201bc, 0x8003c5b, 0x8ee201bc,
+0x97a4001e, 0x2484fffc, 0x801821, 0x8ee400c0,
+0x8ee500c4, 0x1021, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0x24020002, 0xaee400c0,
+0xaee500c4, 0x12a2000f, 0x2aa20003, 0x14400017,
+0x24020003, 0x16a20015, 0x0, 0x8ee200d0,
+0x8ee300d4, 0x24630001, 0x2c640001, 0x441021,
+0xaee200d0, 0xaee300d4, 0x8ee200d0, 0x8003c55,
+0x8ee300d4, 0x8ee200d8, 0x8ee300dc, 0x24630001,
+0x2c640001, 0x441021, 0xaee200d8, 0xaee300dc,
+0x8ee200d8, 0x8003c55, 0x8ee300dc, 0x8ee200c8,
+0x8ee300cc, 0x24630001, 0x2c640001, 0x441021,
+0xaee200c8, 0xaee300cc, 0x8ee200c8, 0x8ee300cc,
+0x8f8300e4, 0x8f8200e0, 0x10620003, 0x24630008,
+0xaf8300e4, 0xaf8300e8, 0x8fbf0068, 0x8fbe0064,
+0x8fb60060, 0x8fb5005c, 0x8fb40058, 0x8fb30054,
+0x8fb20050, 0x8fb1004c, 0x8fb00048, 0x3e00008,
+0x27bd0070, 0x27bdffe0, 0xafbf0018, 0x8ee30e14,
+0x8ee20e0c, 0x10620074, 0x0, 0x8ee30e0c,
+0x8ee20e14, 0x622023, 0x4820001, 0x24840200,
+0x8ee30e18, 0x8ee20e14, 0x43102b, 0x14400004,
+0x24020200, 0x8ee30e14, 0x8003c7d, 0x431823,
+0x8ee20e18, 0x8ee30e14, 0x431023, 0x2443ffff,
+0x804821, 0x69102a, 0x54400001, 0x604821,
+0x8f870100, 0x27623000, 0x24e80020, 0x102102b,
+0x50400001, 0x27682800, 0x8f820108, 0x11020004,
+0x0, 0x8f820104, 0x15020007, 0x1021,
+0x8ee201a8, 0x2021, 0x24420001, 0xaee201a8,
+0x8003cbf, 0x8ee201a8, 0x8ee40e14, 0x42140,
+0x801821, 0x8ee40460, 0x8ee50464, 0xa32821,
+0xa3302b, 0x822021, 0x862021, 0xace40000,
+0xace50004, 0x8ee30e14, 0x91140, 0xa4e2000e,
+0x24020002, 0xace20018, 0x31940, 0x24630e20,
+0x2e31021, 0xace20008, 0x8ee20e14, 0xace2001c,
+0x8ee204cc, 0xace20010, 0xaf880100, 0x92e204ec,
+0x14400011, 0x24040001, 0x8ee24e28, 0x24030040,
+0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
+0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
+0x24424e38, 0x2e21821, 0x24020002, 0xac620000,
+0x24020001, 0xac620004, 0x1480000e, 0x24030040,
+0x8ee20e14, 0xafa20010, 0x8ee20e18, 0x3c050007,
+0xafa20014, 0x8ee60e0c, 0x8ee70e10, 0x3c040001,
+0x248458d4, 0xc002403, 0x34a5f001, 0x8003cdd,
+0x0, 0x8ee20500, 0x24420001, 0x50430003,
+0x1021, 0x8ee20500, 0x24420001, 0xaee20500,
+0x8ee20500, 0x21080, 0x571021, 0xac490508,
+0x8ee20e14, 0x491021, 0x304201ff, 0xaee20e14,
+0x8ee30e14, 0x8ee20e0c, 0x14620005, 0x0,
+0x8f820060, 0x2403fdff, 0x431024, 0xaf820060,
+0x8fbf0018, 0x3e00008, 0x27bd0020, 0x27bdffe0,
+0xafbf0018, 0x8ee3523c, 0x8ee25238, 0x10620074,
+0x0, 0x8ee35238, 0x8ee2523c, 0x622023,
+0x4820001, 0x24840100, 0x8ee35244, 0x8ee2523c,
+0x43102b, 0x14400004, 0x24020100, 0x8ee3523c,
+0x8003cff, 0x431823, 0x8ee25244, 0x8ee3523c,
+0x431023, 0x2443ffff, 0x804821, 0x69102a,
+0x54400001, 0x604821, 0x8f870100, 0x27623000,
+0x24e80020, 0x102102b, 0x50400001, 0x27682800,
+0x8f820108, 0x11020004, 0x0, 0x8f820104,
+0x15020007, 0x1021, 0x8ee201a8, 0x2021,
+0x24420001, 0xaee201a8, 0x8003d41, 0x8ee201a8,
+0x8ee4523c, 0x42140, 0x801821, 0x8ee40470,
+0x8ee50474, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xace40000, 0xace50004, 0x8ee3523c,
+0x91140, 0xa4e2000e, 0x24020003, 0xace20018,
+0x31940, 0x24635248, 0x2e31021, 0xace20008,
+0x8ee2523c, 0xace2001c, 0x8ee204cc, 0xace20010,
+0xaf880100, 0x92e204ec, 0x14400011, 0x24040001,
+0x8ee24e28, 0x24030040, 0x24420001, 0x50430003,
+0x1021, 0x8ee24e28, 0x24420001, 0xaee24e28,
+0x8ee24e28, 0x210c0, 0x24424e38, 0x2e21821,
+0x24020003, 0xac620000, 0x24020001, 0xac620004,
+0x1480000e, 0x24030040, 0x8ee2523c, 0xafa20010,
+0x8ee25244, 0x3c050007, 0xafa20014, 0x8ee65238,
+0x8ee75240, 0x3c040001, 0x248458e0, 0xc002403,
+0x34a5f010, 0x8003d5f, 0x0, 0x8ee20500,
+0x24420001, 0x50430003, 0x1021, 0x8ee20500,
+0x24420001, 0xaee20500, 0x8ee20500, 0x21080,
+0x571021, 0xac490508, 0x8ee2523c, 0x491021,
+0x304200ff, 0xaee2523c, 0x8ee3523c, 0x8ee25238,
+0x14620005, 0x0, 0x8f820060, 0x2403feff,
+0x431024, 0xaf820060, 0x8fbf0018, 0x3e00008,
+0x27bd0020, 0x8f820120, 0x8ee34e34, 0x8f820124,
+0x8f860128, 0x24020040, 0x24630001, 0x50620003,
+0x1021, 0x8ee24e34, 0x24420001, 0xaee24e34,
+0x8ee24e34, 0x8ee44e34, 0x8ee34e30, 0x210c0,
+0x24425038, 0x14830007, 0x2e22821, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8003d92,
+0xaca00000, 0x8ee24e34, 0x24030040, 0x24420001,
+0x50430003, 0x1021, 0x8ee24e34, 0x24420001,
+0x210c0, 0x24425038, 0x2e22821, 0x8ca20004,
+0x8f830128, 0x21140, 0x621821, 0xaf830128,
+0xaca00000, 0x8cc20018, 0x2443fffe, 0x2c620012,
+0x10400008, 0x31080, 0x3c010001, 0x220821,
+0x8c2258f0, 0x400008, 0x0, 0x24020001,
+0xaee24e24, 0x3e00008, 0x0, 0x27bdffc8,
+0xafbf0030, 0xafb5002c, 0xafb40028, 0xafb30024,
+0xafb20020, 0xafb1001c, 0xafb00018, 0x8f830128,
+0x8f820124, 0x106202b0, 0x9821, 0x3c11001f,
+0x3631ffff, 0x3c12fff5, 0x36521000, 0x24150012,
+0x24140040, 0x8f8c0128, 0x8f820128, 0x24420020,
+0xaf820128, 0x9182001b, 0x8f830128, 0x2443fffe,
+0x2c620012, 0x1040029c, 0x31080, 0x3c010001,
+0x220821, 0x8c225948, 0x400008, 0x0,
+0x8f420218, 0x30420100, 0x10400007, 0x0,
+0x95830016, 0x95820018, 0x621823, 0x31402,
+0x431021, 0xa5820016, 0x8d82001c, 0x3c038000,
+0x3044ffff, 0x436824, 0x3c030800, 0x431824,
+0x11a00004, 0xad84001c, 0x41140, 0x8003dd8,
+0x24425248, 0x41140, 0x24420e20, 0x2e25821,
+0x9562000e, 0x3042fffc, 0x10600004, 0xa562000e,
+0x95840016, 0x8003ec0, 0x0, 0x8d690018,
+0x4021, 0x952a0000, 0x25290002, 0x95270000,
+0x25290002, 0x95260000, 0x25290002, 0x95250000,
+0x25290002, 0x95240000, 0x25290002, 0x95230000,
+0x25290002, 0x95220000, 0x25290002, 0x1475021,
+0x1465021, 0x1455021, 0x1445021, 0x1435021,
+0x1425021, 0xa1c02, 0x3142ffff, 0x625021,
+0xa1c02, 0x3142ffff, 0x625021, 0x96e2046a,
+0x314effff, 0x30420002, 0x10400044, 0x5021,
+0x25220014, 0x222102b, 0x10400014, 0x1201821,
+0x2405000a, 0x2021, 0x223102b, 0x54400001,
+0x721821, 0x94620000, 0x24630002, 0x24a5ffff,
+0x14a0fff9, 0x822021, 0x41c02, 0x3082ffff,
+0x622021, 0x41402, 0x3083ffff, 0x431021,
+0x3042ffff, 0x8003e33, 0x1425021, 0x952a0000,
+0x25290002, 0x95280000, 0x25290002, 0x95270000,
+0x25290002, 0x95260000, 0x25290002, 0x95250000,
+0x25290002, 0x95230000, 0x25290002, 0x95220000,
+0x25290002, 0x95240000, 0x25290002, 0x1485021,
+0x1475021, 0x1465021, 0x1455021, 0x1435021,
+0x1425021, 0x95220000, 0x95230002, 0x1445021,
+0x1425021, 0x1435021, 0xa1c02, 0x3142ffff,
+0x625021, 0xa1c02, 0x3142ffff, 0x625021,
+0x3148ffff, 0x51000001, 0x3408ffff, 0x8d620018,
+0x9443000c, 0x24020800, 0x54620005, 0xa5680010,
+0x9562000e, 0x34420002, 0xa562000e, 0xa5680010,
+0x96e2046a, 0x2821, 0x30420008, 0x14400056,
+0x3021, 0x8d630018, 0x24620024, 0x222102b,
+0x10400034, 0x24690010, 0x229102b, 0x54400001,
+0x1324821, 0x95250000, 0x24690014, 0x229102b,
+0x10400002, 0x24a5ffec, 0x1324821, 0x95220000,
+0x30420fff, 0x14400003, 0x25290002, 0x8003e60,
+0x24130001, 0x9821, 0xa03021, 0x229102b,
+0x54400001, 0x1324821, 0x91220001, 0x25290002,
+0xa22821, 0x229102b, 0x54400001, 0x1324821,
+0x25290002, 0x229102b, 0x54400001, 0x1324821,
+0x95220000, 0x25290002, 0xa22821, 0x229102b,
+0x54400001, 0x1324821, 0x95220000, 0x25290002,
+0xa22821, 0x229102b, 0x54400001, 0x1324821,
+0x95220000, 0x25290002, 0xa22821, 0x229102b,
+0x54400001, 0x1324821, 0x95220000, 0x8003e99,
+0xa22821, 0x94650010, 0x94620014, 0x24690016,
+0x30420fff, 0x14400003, 0x24a5ffec, 0x8003e8c,
+0x24130001, 0x9821, 0xa03021, 0x91230001,
+0x25290004, 0x95220000, 0x25290002, 0x95240000,
+0x25290002, 0xa32821, 0xa22821, 0x95220000,
+0x95230002, 0xa42821, 0xa22821, 0xa32821,
+0x51c02, 0x30a2ffff, 0x622821, 0x51c02,
+0x30a2ffff, 0x622821, 0x96e2046a, 0x30420001,
+0x1040001e, 0x2021, 0x95820016, 0x4e2023,
+0x41402, 0x822021, 0x326200ff, 0x50400002,
+0x862021, 0x852021, 0x41402, 0x822021,
+0x3084ffff, 0x50800001, 0x3404ffff, 0x8d620018,
+0x24430017, 0x223102b, 0x54400001, 0x721821,
+0x90620000, 0x38430011, 0x2c630001, 0x38420006,
+0x2c420001, 0x621825, 0x10600004, 0x0,
+0x9562000e, 0x34420001, 0xa562000e, 0x9562000e,
+0x240a0002, 0x30420004, 0x10400002, 0xa5640012,
+0x240a0004, 0x8f880120, 0x27623800, 0x25090020,
+0x122102b, 0x50400001, 0x27693000, 0x8f820128,
+0x11220004, 0x0, 0x8f820124, 0x15220007,
+0x24040020, 0x8ee201a4, 0x8021, 0x24420001,
+0xaee201a4, 0x8003f4f, 0x8ee201a4, 0x8ee5724c,
+0x8ee60490, 0x8ee70494, 0xad0b0008, 0xa504000e,
+0xad0a0018, 0x52940, 0xa01821, 0x1021,
+0xe33821, 0xe3202b, 0xc23021, 0xc43021,
+0xad060000, 0xad070004, 0x8ee2724c, 0x4d1025,
+0xad02001c, 0x8ee204c4, 0xad020010, 0xaf890120,
+0x92e24e20, 0x14400060, 0x24100001, 0x2543ffee,
+0x2c630002, 0x39420011, 0x2c420001, 0x621825,
+0x10600024, 0x0, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c820000, 0x1455000f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062000b,
+0x0, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee34e30, 0x24420001, 0x1054002b,
+0x0, 0x8003f2e, 0x0, 0x8ee24e30,
+0x24420001, 0x50540003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020001, 0x8003f4e,
+0xac950000, 0x8ee24e30, 0x210c0, 0x24425038,
+0x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x0, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10540007,
+0x0, 0x8ee24e34, 0x24420001, 0x10620005,
+0x0, 0x8003f3a, 0x0, 0x14600005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400012,
+0xac800000, 0x8003f4f, 0x0, 0x8ee24e30,
+0x24420001, 0x50540003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020007, 0xac820000,
+0x24020001, 0xac820004, 0x1600000d, 0x0,
+0x8f820120, 0x3c040001, 0x24845938, 0xafa00014,
+0xafa20010, 0x8d86001c, 0x8f870124, 0x3c050008,
+0xc002403, 0x34a50001, 0x8004057, 0x0,
+0x8ee2724c, 0x24420001, 0x304207ff, 0x11a00006,
+0xaee2724c, 0x8ee201d0, 0x2442ffff, 0xaee201d0,
+0x8003f6b, 0x8ee201d0, 0x8ee201cc, 0x2442ffff,
+0xaee201cc, 0x8ee201cc, 0x8ee201d8, 0x2442ffff,
+0xaee201d8, 0x8004057, 0x8ee201d8, 0x8f420240,
+0x104000e5, 0x0, 0x8ee20e1c, 0x24420001,
+0x8004057, 0xaee20e1c, 0x9582001e, 0xad82001c,
+0x8f420240, 0x10400072, 0x0, 0x8ee20e1c,
+0x24420001, 0xaee20e1c, 0x8f430240, 0x43102b,
+0x144000d5, 0x0, 0x8f830120, 0x27623800,
+0x24660020, 0xc2102b, 0x50400001, 0x27663000,
+0x8f820128, 0x10c20004, 0x0, 0x8f820124,
+0x14c20007, 0x0, 0x8ee201a4, 0x8021,
+0x24420001, 0xaee201a4, 0x8003fda, 0x8ee201a4,
+0x8ee2724c, 0xac62001c, 0x8ee404a8, 0x8ee504ac,
+0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
+0x24020011, 0xac620018, 0xac640000, 0xac650004,
+0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
+0x14400034, 0x24100001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c820000, 0x1455001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x0, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10540007,
+0x0, 0x8ee24e34, 0x24420001, 0x10620005,
+0x0, 0x8003fc6, 0x0, 0x14600005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400011,
+0xac800000, 0x8003fda, 0x0, 0x8ee24e30,
+0x24420001, 0x50540003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x24020001, 0xac950000,
+0xac820004, 0x5600000b, 0x24100001, 0x8ee2724c,
+0x3c040001, 0x248458a8, 0xafa00014, 0xafa20010,
+0x8ee6724c, 0x8f470280, 0x3c050009, 0xc002403,
+0x34a5f008, 0x56000001, 0xaee00e1c, 0x8ee20188,
+0x24420001, 0xaee20188, 0x8004050, 0x8ee20188,
+0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
+0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
+0x0, 0x8f820124, 0x14c20007, 0x0,
+0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
+0x8004044, 0x8ee201a4, 0x8ee2724c, 0xac62001c,
+0x8ee404a8, 0x8ee504ac, 0x2462001c, 0xac620008,
+0x24020008, 0xa462000e, 0x24020011, 0xac620018,
+0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
+0xaf860120, 0x92e24e20, 0x14400034, 0x24100001,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x8c820000, 0x1455001f, 0x0, 0x8ee34e30,
+0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
+0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
+0x24420001, 0x10540007, 0x0, 0x8ee24e34,
+0x24420001, 0x10620005, 0x0, 0x8004030,
+0x0, 0x14600005, 0x0, 0x8f820128,
+0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
+0x2c420011, 0x50400011, 0xac800000, 0x8004044,
+0x0, 0x8ee24e30, 0x24420001, 0x50540003,
+0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
+0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
+0x24020001, 0xac950000, 0xac820004, 0x1600000b,
+0x0, 0x8ee2724c, 0x3c040001, 0x248458a8,
+0xafa00014, 0xafa20010, 0x8ee6724c, 0x8f470280,
+0x3c050009, 0xc002403, 0x34a5f008, 0x8ee20174,
+0x24420001, 0xaee20174, 0x8004057, 0x8ee20174,
+0x24020001, 0xaee24e24, 0x8f830128, 0x8f820124,
+0x1462fd58, 0x0, 0x8fbf0030, 0x8fb5002c,
+0x8fb40028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
+0x8fb00018, 0x3e00008, 0x27bd0038, 0x27bdffe8,
+0x27840208, 0x27450200, 0x24060008, 0xafbf0014,
+0xc00249a, 0xafb00010, 0x2021, 0x24100001,
+0x2402241f, 0xaf900210, 0xaf900200, 0xaf800204,
+0xaf820214, 0x8f460248, 0x24030004, 0x3c020040,
+0x3c010001, 0xac235cc4, 0x3c010001, 0xac235cc8,
+0x3c010001, 0xac205d9c, 0x3c010001, 0xac225cc0,
+0x3c010001, 0xac235cc8, 0xc005108, 0x24050004,
+0xc004822, 0x0, 0x8ee20000, 0x3c03feff,
+0x3463fffd, 0x431024, 0xaee20000, 0x3c023c00,
+0xaf82021c, 0x3c010001, 0x370821, 0xac3083ac,
+0x8fbf0014, 0x8fb00010, 0x3e00008, 0x27bd0018,
+0x27bdffe0, 0x3c050008, 0x34a50400, 0xafbf0018,
+0xafa00010, 0xafa00014, 0x8f860200, 0x3c040001,
+0x248459f0, 0xc002403, 0x3821, 0x8ee20280,
+0x24420001, 0xaee20280, 0x8ee20280, 0x8f830200,
+0x3c023f00, 0x621824, 0x8fbf0018, 0x3c020400,
+0x3e00008, 0x27bd0020, 0x27bdffd8, 0xafbf0020,
+0xafb1001c, 0xafb00018, 0x8f900220, 0x8ee20214,
+0x3821, 0x24420001, 0xaee20214, 0x8ee20214,
+0x3c020300, 0x2021024, 0x10400027, 0x3c110400,
+0xc00429b, 0x0, 0x3c020100, 0x2021024,
+0x10400007, 0x0, 0x8ee20218, 0x24420001,
+0xaee20218, 0x8ee20218, 0x80040c6, 0x3c03fdff,
+0x8ee2021c, 0x24420001, 0xaee2021c, 0x8ee2021c,
+0x3c03fdff, 0x3463ffff, 0x3c0808ff, 0x3508ffff,
+0x8ee20000, 0x3c040001, 0x248459fc, 0x3c050008,
+0x2003021, 0x431024, 0xaee20000, 0x8f820220,
+0x3821, 0x3c030300, 0x481024, 0x431025,
+0xaf820220, 0xafa00010, 0xc002403, 0xafa00014,
+0x8004296, 0x0, 0x2111024, 0x1040001f,
+0x3c024000, 0x8f830224, 0x24021402, 0x1462000b,
+0x3c03fdff, 0x3c040001, 0x24845a08, 0x3c050008,
+0xafa00010, 0xafa00014, 0x8f860224, 0x34a5ffff,
+0xc002403, 0x3821, 0x3c03fdff, 0x8ee20000,
+0x3463ffff, 0x2002021, 0x431024, 0xc004e54,
+0xaee20000, 0x8ee20220, 0x24420001, 0xaee20220,
+0x8ee20220, 0x8f820220, 0x3c0308ff, 0x3463ffff,
+0x431024, 0x8004295, 0x511025, 0x2021024,
+0x10400142, 0x0, 0x8ee2022c, 0x24420001,
+0xaee2022c, 0x8ee2022c, 0x8f820220, 0x3c0308ff,
+0x3463ffff, 0x431024, 0x34420004, 0xaf820220,
+0x8f830054, 0x8f820054, 0x800410e, 0x24630002,
+0x8f820054, 0x621023, 0x2c420003, 0x1440fffc,
+0x0, 0x8f8600e0, 0x8f8400e4, 0x30c20007,
+0x10400012, 0x0, 0x8f8300e4, 0x2402fff8,
+0xc21024, 0x1043000d, 0x0, 0x8f820054,
+0x8f8300e0, 0x14c30009, 0x24440050, 0x8f820054,
+0x821023, 0x2c420051, 0x10400004, 0x0,
+0x8f8200e0, 0x10c2fff9, 0x0, 0x8f820220,
+0x3c0308ff, 0x3463fffd, 0x431024, 0xaf820220,
+0x8f8600e0, 0x30c20007, 0x10400003, 0x2402fff8,
+0xc23024, 0xaf8600e0, 0x8f8300c4, 0x3c02001f,
+0x3442ffff, 0x24680008, 0x48102b, 0x10400003,
+0x3c02fff5, 0x34421000, 0x1024021, 0x8f8b00c8,
+0x8f850120, 0x8f840124, 0x8004145, 0x6021,
+0x27623800, 0x82102b, 0x50400001, 0x27643000,
+0x10a40010, 0x318200ff, 0x8c820018, 0x38430007,
+0x2c630001, 0x3842000b, 0x2c420001, 0x621825,
+0x5060fff3, 0x24840020, 0x8ee20240, 0x240c0001,
+0x24420001, 0xaee20240, 0x8ee20240, 0x8c8b0008,
+0x318200ff, 0x14400065, 0x0, 0x3c020001,
+0x571021, 0x904283c0, 0x14400060, 0x0,
+0x8f8400e4, 0xc41023, 0x218c3, 0x4620001,
+0x24630200, 0x8f8900c4, 0x10600005, 0x24020001,
+0x10620009, 0x0, 0x8004187, 0x0,
+0x8ee20230, 0x1205821, 0x24420001, 0xaee20230,
+0x80041bc, 0x8ee20230, 0x8ee20234, 0x3c05000a,
+0x24420001, 0xaee20234, 0x8c8b0000, 0x34a5f000,
+0x8ee20234, 0x12b1823, 0xa3102b, 0x54400001,
+0x651821, 0x2c62233f, 0x14400040, 0x0,
+0x8f8200e8, 0x24420008, 0xaf8200e8, 0x8f8200e8,
+0x8f8200e4, 0x1205821, 0x24420008, 0xaf8200e4,
+0x80041bc, 0x8f8200e4, 0x8ee20238, 0x3c03000a,
+0x24420001, 0xaee20238, 0x8c840000, 0x3463f000,
+0x8ee20238, 0x883823, 0x67102b, 0x54400001,
+0xe33821, 0x3c020003, 0x34420d40, 0x47102b,
+0x10400003, 0x0, 0x80041bc, 0x805821,
+0x8f8200e4, 0x24440008, 0xaf8400e4, 0x8f8400e4,
+0x10860018, 0x3c05000a, 0x34a5f000, 0x3c0a0003,
+0x354a0d40, 0x8ee2007c, 0x24420001, 0xaee2007c,
+0x8c830000, 0x8ee2007c, 0x683823, 0xa7102b,
+0x54400001, 0xe53821, 0x147102b, 0x54400007,
+0x605821, 0x8f8200e4, 0x24440008, 0xaf8400e4,
+0x8f8400e4, 0x1486ffef, 0x0, 0x14860005,
+0x0, 0x1205821, 0xaf8600e4, 0x80041bc,
+0xaf8600e8, 0xaf8400e4, 0xaf8400e8, 0x8f8200c8,
+0x3c03000a, 0x3463f000, 0x483823, 0x67102b,
+0x54400001, 0xe33821, 0x3c020003, 0x34420d3f,
+0x47102b, 0x54400007, 0x6021, 0x1683823,
+0x67102b, 0x54400003, 0xe33821, 0x80041cf,
+0x3c020003, 0x3c020003, 0x34420d3f, 0x47102b,
+0x14400016, 0x318200ff, 0x14400006, 0x0,
+0x3c020001, 0x571021, 0x904283c0, 0x1040000f,
+0x0, 0x8ee2023c, 0x3c04fdff, 0x8ee30000,
+0x3484ffff, 0x24420001, 0xaee2023c, 0x8ee2023c,
+0x24020001, 0x641824, 0x3c010001, 0x370821,
+0xa02283b8, 0x800422c, 0xaee30000, 0xaf8b00c8,
+0x8f8300c8, 0x8f8200c4, 0x3c04000a, 0x3484f000,
+0x623823, 0x87102b, 0x54400001, 0xe43821,
+0x3c020003, 0x34420d40, 0x47102b, 0x2ce30001,
+0x431025, 0x10400008, 0x0, 0x8f820220,
+0x3c0308ff, 0x3463ffff, 0x431024, 0x3c034000,
+0x431025, 0xaf820220, 0x8f8600e0, 0x8f8400e4,
+0x10c4002a, 0x0, 0x8ee2007c, 0x24420001,
+0xaee2007c, 0x8ee2007c, 0x24c2fff8, 0xaf8200e0,
+0x3c020001, 0x8c427e30, 0x3c030008, 0x8f8600e0,
+0x431024, 0x1040001d, 0x0, 0x10c4001b,
+0x240dfff8, 0x3c0a000a, 0x354af000, 0x3c0c0080,
+0x24850008, 0x27622800, 0x50a20001, 0x27651800,
+0x8c880004, 0x8c820000, 0x8ca90000, 0x3103ffff,
+0x431021, 0x4d1024, 0x24430010, 0x6b102b,
+0x54400001, 0x6a1821, 0x12b102b, 0x54400001,
+0x12a4821, 0x10690002, 0x10c1025, 0xac820004,
+0xa02021, 0x14c4ffeb, 0x24850008, 0x8f820220,
+0x3c0308ff, 0x3463ffff, 0x431024, 0x34420002,
+0xaf820220, 0x8f830054, 0x8f820054, 0x8004237,
+0x24630001, 0x8f820054, 0x621023, 0x2c420002,
+0x1440fffc, 0x0, 0x8f820220, 0x3c0308ff,
+0x3463fffb, 0x431024, 0xaf820220, 0x6010055,
+0x0, 0x8ee20228, 0x24420001, 0xaee20228,
+0x8ee20228, 0x8f820220, 0x3c0308ff, 0x3463ffff,
+0x431024, 0x34420004, 0xaf820220, 0x8f830054,
+0x8f820054, 0x8004251, 0x24630002, 0x8f820054,
+0x621023, 0x2c420003, 0x1440fffc, 0x0,
+0x8f8600e0, 0x30c20007, 0x10400012, 0x0,
+0x8f8300e4, 0x2402fff8, 0xc21024, 0x1043000d,
+0x0, 0x8f820054, 0x8f8300e0, 0x14c30009,
+0x24440032, 0x8f820054, 0x821023, 0x2c420033,
+0x10400004, 0x0, 0x8f8200e0, 0x10c2fff9,
+0x0, 0x8f820220, 0x3c0308ff, 0x3463fffd,
+0x431024, 0xaf820220, 0x8f8600e0, 0x30c20007,
+0x10400003, 0x2402fff8, 0xc23024, 0xaf8600e0,
+0x240301f5, 0x8f8200e8, 0x673823, 0x718c0,
+0x431021, 0xaf8200e8, 0x8f8200e8, 0xaf8200e4,
+0x8ee2007c, 0x3c0408ff, 0x3484ffff, 0x471021,
+0xaee2007c, 0x8f820220, 0x3c038000, 0x34630002,
+0x441024, 0x431025, 0xaf820220, 0x8f830054,
+0x8f820054, 0x800428d, 0x24630001, 0x8f820054,
+0x621023, 0x2c420002, 0x1440fffc, 0x0,
+0x8f820220, 0x3c0308ff, 0x3463fffb, 0x431024,
+0xaf820220, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
+0x3e00008, 0x27bd0028, 0x3c020001, 0x8c425cd8,
+0x27bdffd8, 0x10400012, 0xafbf0020, 0x3c040001,
+0x24845a14, 0x3c050008, 0x24020001, 0x3c010001,
+0x370821, 0xac2283ac, 0xafa00010, 0xafa00014,
+0x8f860220, 0x34a50498, 0x3c010001, 0xac205cd8,
+0x3c010001, 0xac225ccc, 0xc002403, 0x3821,
+0x8f420268, 0x3c037fff, 0x3463ffff, 0x431024,
+0xaf420268, 0x8ee204d0, 0x8ee404d4, 0x2403fffe,
+0x431024, 0x30840002, 0x1080011e, 0xaee204d0,
+0x8ee204d4, 0x2403fffd, 0x431024, 0xaee204d4,
+0x8f820044, 0x3c030600, 0x34632000, 0x34420020,
+0xaf820044, 0xafa30018, 0x8ee20608, 0x8f430228,
+0x24420001, 0x304a00ff, 0x514300fe, 0xafa00010,
+0x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
+0x8fa4001c, 0xac43060c, 0xac440610, 0x8f830054,
+0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
+0x1040006a, 0x5821, 0x24180008, 0x240f000d,
+0x240d0007, 0x240c0040, 0x240e0001, 0x8f870120,
+0x27623800, 0x24e80020, 0x102102b, 0x50400001,
+0x27683000, 0x8f820128, 0x11020004, 0x0,
+0x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
+0x2821, 0x24420001, 0xaee201a4, 0x800433d,
+0x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
+0x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
+0x822021, 0x862021, 0xace40000, 0xace50004,
+0x8ee20608, 0xa4f8000e, 0xacef0018, 0xacea001c,
+0x210c0, 0x2442060c, 0x2e21021, 0xace20008,
+0x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
+0x14400033, 0x24050001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c820000, 0x144d001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x0, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee34e30, 0x24420001, 0x104c0007,
+0x0, 0x8ee24e34, 0x24420001, 0x10620005,
+0x0, 0x800432a, 0x0, 0x14600005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
+0xac800000, 0x800433d, 0x0, 0x8ee24e30,
+0x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0xac8d0000, 0xac8e0004,
+0x54a00006, 0x240b0001, 0x8f820054, 0x1221023,
+0x2c420033, 0x1440ff9d, 0x0, 0x316300ff,
+0x24020001, 0x54620079, 0xafa00010, 0xaeea0608,
+0x8f830054, 0x8f820054, 0x24690032, 0x1221023,
+0x2c420033, 0x10400061, 0x5821, 0x240d0008,
+0x240c0011, 0x24080012, 0x24070040, 0x240a0001,
+0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
+0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
+0x0, 0x8f820124, 0x14c20007, 0x0,
+0x8ee201a4, 0x2821, 0x24420001, 0xaee201a4,
+0x80043a9, 0x8ee201a4, 0x8ee20608, 0xac62001c,
+0x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
+0xa46d000e, 0xac6c0018, 0xac640000, 0xac650004,
+0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
+0x14400033, 0x24050001, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0x8c820000, 0x1448001f,
+0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
+0x0, 0x8c820004, 0x24420001, 0xac820004,
+0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10470007,
+0x0, 0x8ee24e34, 0x24420001, 0x10620005,
+0x0, 0x8004396, 0x0, 0x14600005,
+0x0, 0x8f820128, 0x24420020, 0xaf820128,
+0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
+0xac800000, 0x80043a9, 0x0, 0x8ee24e30,
+0x24420001, 0x50470003, 0x1021, 0x8ee24e30,
+0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
+0x24425038, 0x2e22021, 0xac880000, 0xac8a0004,
+0x54a00006, 0x240b0001, 0x8f820054, 0x1221023,
+0x2c420033, 0x1440ffa6, 0x0, 0x316300ff,
+0x24020001, 0x54620003, 0xafa00010, 0x80043d6,
+0x0, 0x3c040001, 0x24845a20, 0xafa00014,
+0x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
+0x34a5f011, 0x80043d6, 0x0, 0x3c040001,
+0x24845a2c, 0xafa00014, 0x8f860120, 0x8f870124,
+0x3c050009, 0xc002403, 0x34a5f010, 0x80043d6,
+0x0, 0x3c040001, 0x24845a38, 0xafa00014,
+0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
+0x34a5f00f, 0x8ee201ac, 0x24420001, 0xaee201ac,
+0x8ee201ac, 0x8ee2015c, 0x24420001, 0xaee2015c,
+0x8ee2015c, 0x8fbf0020, 0x3e00008, 0x27bd0028,
+0x3c020001, 0x8c425cd8, 0x27bdffe0, 0x1440000d,
+0xafbf0018, 0x3c040001, 0x24845a44, 0x3c050008,
+0xafa00010, 0xafa00014, 0x8f860220, 0x34a50499,
+0x24020001, 0x3c010001, 0xac225cd8, 0xc002403,
+0x3821, 0x8ee204d0, 0x3c030001, 0x771821,
+0x946383b2, 0x34420001, 0x10600007, 0xaee204d0,
+0x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
+0x34420008, 0xaf820220, 0x2021, 0xc0052a2,
+0x24050004, 0xaf420268, 0x8fbf0018, 0x3e00008,
+0x27bd0020, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x3c120001,
+0x26521200, 0x3c140001, 0x8e945c50, 0x3c100001,
+0x26101120, 0x3c15c000, 0x36b50060, 0x8e8a0000,
+0x8eb30000, 0x26a400b, 0x248000a, 0x200f821,
+0x0, 0xd, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x80014d6,
+0x0, 0x80014d8, 0x3c0a0001, 0x80014d8,
+0x3c0a0002, 0x80014d8, 0x0, 0x80024a6,
+0x0, 0x80014d8, 0x3c0a0003, 0x80014d8,
+0x3c0a0004, 0x8002f8c, 0x0, 0x80014d8,
+0x3c0a0005, 0x8003ce8, 0x0, 0x8003c66,
+0x0, 0x80014d8, 0x3c0a0006, 0x80014d8,
+0x3c0a0007, 0x80014d8, 0x0, 0x80014d8,
+0x0, 0x80014d8, 0x0, 0x8002a75,
+0x0, 0x80014d8, 0x3c0a000b, 0x80014d8,
+0x3c0a000c, 0x80014d8, 0x3c0a000d, 0x800237a,
+0x0, 0x8002339, 0x0, 0x80014d8,
+0x3c0a000e, 0x8001b3c, 0x0, 0x80024a4,
+0x0, 0x80014d8, 0x3c0a000f, 0x80040a7,
+0x0, 0x8004091, 0x0, 0x80014d8,
+0x3c0a0010, 0x80014ee, 0x0, 0x80014d8,
+0x3c0a0011, 0x80014d8, 0x3c0a0012, 0x80014d8,
+0x3c0a0013, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x3c030001,
+0x34633800, 0x24050080, 0x2404001f, 0x2406ffff,
+0x24020001, 0xaf80021c, 0xaf820200, 0xaf820220,
+0x3631021, 0xaf8200c0, 0x3631021, 0xaf8200c4,
+0x3631021, 0xaf8200c8, 0x27623800, 0xaf8200d0,
+0x27623800, 0xaf8200d4, 0x27623800, 0xaf8200d8,
+0x27621800, 0xaf8200e0, 0x27621800, 0xaf8200e4,
+0x27621800, 0xaf8200e8, 0x27621000, 0xaf8200f0,
+0x27621000, 0xaf8200f4, 0x27621000, 0xaf8200f8,
+0xaca00000, 0x2484ffff, 0x1486fffd, 0x24a50004,
+0x8f830040, 0x3c02f000, 0x621824, 0x3c025000,
+0x1062000c, 0x43102b, 0x14400006, 0x3c026000,
+0x3c024000, 0x10620008, 0x24020800, 0x8004539,
+0x0, 0x10620004, 0x24020800, 0x8004539,
+0x0, 0x24020700, 0x3c010001, 0xac225cdc,
+0x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
+0xafb00020, 0x8f830054, 0x8f820054, 0x3c010001,
+0xac205cc4, 0x8004545, 0x24630064, 0x8f820054,
+0x621023, 0x2c420065, 0x1440fffc, 0x0,
+0xc004d71, 0x0, 0x24040001, 0x2821,
+0x27a60018, 0x34028000, 0xc00498e, 0xa7a20018,
+0x8f830054, 0x8f820054, 0x8004556, 0x24630064,
+0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
+0x24040001, 0x24050001, 0xc00494c, 0x27a60018,
+0x8f830054, 0x8f820054, 0x8004562, 0x24630064,
+0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
+0x24040001, 0x24050001, 0xc00494c, 0x27a60018,
+0x8f830054, 0x8f820054, 0x800456e, 0x24630064,
+0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
+0x24040001, 0x3c060001, 0x24c65da0, 0xc00494c,
+0x24050002, 0x8f830054, 0x8f820054, 0x800457b,
+0x24630064, 0x8f820054, 0x621023, 0x2c420065,
+0x1440fffc, 0x24040001, 0x24050003, 0x3c100001,
+0x26105da2, 0xc00494c, 0x2003021, 0x97a60018,
+0x3c070001, 0x94e75da0, 0x3c040001, 0x24845ab0,
+0xafa00014, 0x96020000, 0x3c05000d, 0x34a50100,
+0xc002403, 0xafa20010, 0x97a20018, 0x1040004c,
+0x24036040, 0x96020000, 0x3042fff0, 0x1443000a,
+0x24020020, 0x3c030001, 0x94635da0, 0x54620009,
+0x24027830, 0x24020003, 0x3c010001, 0xac225cc4,
+0x80045ac, 0x24020005, 0x3c030001, 0x94635da0,
+0x24027830, 0x1462000f, 0x24030010, 0x3c020001,
+0x94425da2, 0x3042fff0, 0x1443000a, 0x24020003,
+0x3c010001, 0xac225cc4, 0x24020006, 0x3c010001,
+0xac225db0, 0x3c010001, 0xac225dbc, 0x80045e6,
+0x3c09fff0, 0x3c020001, 0x8c425cc4, 0x3c030001,
+0x94635da0, 0x34420001, 0x3c010001, 0xac225cc4,
+0x24020015, 0x1462000f, 0x0, 0x3c020001,
+0x94425da2, 0x3042fff0, 0x3843f420, 0x2c630001,
+0x3842f430, 0x2c420001, 0x621825, 0x10600005,
+0x24020003, 0x3c010001, 0xac225dbc, 0x80045e6,
+0x3c09fff0, 0x3c030001, 0x94635da0, 0x24027810,
+0x1462000b, 0x24020002, 0x3c020001, 0x94425da2,
+0x3042fff0, 0x14400006, 0x24020002, 0x24020004,
+0x3c010001, 0xac225dbc, 0x80045e6, 0x3c09fff0,
+0x3c010001, 0xac225dbc, 0x80045e6, 0x3c09fff0,
+0x3c020001, 0x8c425cc4, 0x24030001, 0x3c010001,
+0xac235dbc, 0x34420004, 0x3c010001, 0xac225cc4,
+0x3c09fff0, 0x3529bdc0, 0x3c060001, 0x8cc65cc4,
+0x3c040001, 0x24845ab0, 0x24020001, 0x3c010001,
+0xac225ccc, 0x8f820054, 0x3c070001, 0x8ce75dbc,
+0x3c030001, 0x94635da0, 0x3c080001, 0x95085da2,
+0x3c05000d, 0x34a50100, 0x3c010001, 0xac205cc8,
+0x491021, 0x3c010001, 0xac225dac, 0xafa30010,
+0xc002403, 0xafa80014, 0x8fbf0024, 0x8fb00020,
+0x3e00008, 0x27bd0028, 0x27bdffe8, 0x3c050001,
+0x8ca55cc8, 0x24060004, 0x24020001, 0x14a20014,
+0xafbf0010, 0x3c020001, 0x8c427e3c, 0x30428000,
+0x10400005, 0x3c04000f, 0x3c030001, 0x8c635dbc,
+0x8004617, 0x34844240, 0x3c040004, 0x3c030001,
+0x8c635dbc, 0x348493e0, 0x24020005, 0x14620016,
+0x0, 0x3c04003d, 0x800462f, 0x34840900,
+0x3c020001, 0x8c427e38, 0x30428000, 0x10400005,
+0x3c04001e, 0x3c030001, 0x8c635dbc, 0x800462a,
+0x34848480, 0x3c04000f, 0x3c030001, 0x8c635dbc,
+0x34844240, 0x24020005, 0x14620003, 0x0,
+0x3c04007a, 0x34841200, 0x3c020001, 0x8c425dac,
+0x8f830054, 0x441021, 0x431023, 0x44102b,
+0x14400037, 0x0, 0x3c020001, 0x8c425cd0,
+0x14400033, 0x0, 0x3c010001, 0x10c00025,
+0xac205ce0, 0x3c090001, 0x8d295cc4, 0x24070001,
+0x3c044000, 0x3c080001, 0x25087e3c, 0x250afffc,
+0x52842, 0x14a00002, 0x24c6ffff, 0x24050008,
+0xa91024, 0x10400010, 0x0, 0x14a70008,
+0x0, 0x8d020000, 0x441024, 0x1040000a,
+0x0, 0x3c010001, 0x800465b, 0xac255ce0,
+0x8d420000, 0x441024, 0x10400003, 0x0,
+0x3c010001, 0xac275ce0, 0x3c020001, 0x8c425ce0,
+0x6182b, 0x2c420001, 0x431024, 0x5440ffe5,
+0x52842, 0x8f820054, 0x3c030001, 0x8c635ce0,
+0x3c010001, 0xac225dac, 0x1060002a, 0x24020001,
+0x3c010001, 0xac255cc8, 0x3c010001, 0xac225ccc,
+0x3c020001, 0x8c425ce0, 0x10400022, 0x0,
+0x3c020001, 0x8c425ccc, 0x1040000a, 0x24020001,
+0x3c010001, 0xac205ccc, 0x3c010001, 0x370821,
+0xac2283ac, 0x3c010001, 0xac205d4c, 0x3c010001,
+0xac225d04, 0x3c030001, 0x771821, 0x8c6383ac,
+0x24020008, 0x10620005, 0x24020001, 0xc004695,
+0x0, 0x8004692, 0x0, 0x3c030001,
+0x8c635cc8, 0x10620007, 0x2402000e, 0x3c030001,
+0x8c637dd0, 0x10620003, 0x0, 0xc004e54,
+0x8f840220, 0x8fbf0010, 0x3e00008, 0x27bd0018,
+0x27bdffe0, 0x3c02fdff, 0xafbf0018, 0x8ee30000,
+0x3c050001, 0x8ca55cc8, 0x3c040001, 0x8c845cf0,
+0x3442ffff, 0x621824, 0x14a40008, 0xaee30000,
+0x3c030001, 0x771821, 0x8c6383ac, 0x3c020001,
+0x8c425cf4, 0x10620008, 0x0, 0x3c020001,
+0x571021, 0x8c4283ac, 0x3c010001, 0xac255cf0,
+0x3c010001, 0xac225cf4, 0x3c030001, 0x8c635cc8,
+0x24020002, 0x10620169, 0x2c620003, 0x10400005,
+0x24020001, 0x10620008, 0x0, 0x800481c,
+0x0, 0x24020004, 0x106200b1, 0x24020001,
+0x800481d, 0x0, 0x3c020001, 0x571021,
+0x8c4283ac, 0x2443ffff, 0x2c620008, 0x1040015a,
+0x31080, 0x3c010001, 0x220821, 0x8c225ac8,
+0x400008, 0x0, 0x3c030001, 0x8c635dbc,
+0x24020005, 0x14620014, 0x0, 0x3c020001,
+0x8c425cd4, 0x1040000a, 0x24020003, 0xc004822,
+0x0, 0x24020002, 0x3c010001, 0x370821,
+0xac2283ac, 0x3c010001, 0x80046e0, 0xac205cd4,
+0x3c010001, 0x370821, 0xac2283ac, 0x3c010001,
+0x800481f, 0xac205c60, 0xc004822, 0x0,
+0x3c020001, 0x8c425cd4, 0x3c010001, 0xac205c60,
+0x104000dd, 0x24020002, 0x3c010001, 0x370821,
+0xac2283ac, 0x3c010001, 0x800481f, 0xac205cd4,
+0x3c030001, 0x8c635dbc, 0x24020005, 0x14620003,
+0x24020001, 0x3c010001, 0xac225d00, 0xc0049cf,
+0x0, 0x3c030001, 0x8c635d00, 0x800478e,
+0x24020011, 0x3c050001, 0x8ca55cc8, 0x3c060001,
+0x8cc67e3c, 0xc005108, 0x2021, 0x24020005,
+0x3c010001, 0xac205cd4, 0x3c010001, 0x370821,
+0x800481f, 0xac2283ac, 0x3c040001, 0x24845abc,
+0x3c05000f, 0x34a50100, 0x3021, 0x3821,
+0xafa00010, 0xc002403, 0xafa00014, 0x800481f,
+0x0, 0x8f820220, 0x3c03f700, 0x431025,
+0x80047b7, 0xaf820220, 0x8f820220, 0x3c030004,
+0x431024, 0x144000a9, 0x24020007, 0x8f830054,
+0x3c020001, 0x8c425da4, 0x2463d8f0, 0x431023,
+0x2c422710, 0x144000f8, 0x24020001, 0x800481d,
+0x0, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
+0x2021, 0xc005386, 0x2021, 0x3c030001,
+0x8c637e34, 0x46100ea, 0x24020001, 0x3c020008,
+0x621024, 0x10400006, 0x0, 0x8f820214,
+0x3c03ffff, 0x431024, 0x8004741, 0x3442251f,
+0x8f820214, 0x3c03ffff, 0x431024, 0x3442241f,
+0xaf820214, 0x8ee20000, 0x3c030200, 0x431025,
+0xaee20000, 0x8f820220, 0x2403fffb, 0x431024,
+0xaf820220, 0x8f820220, 0x34420002, 0xaf820220,
+0x24020008, 0x3c010001, 0x370821, 0xac2283ac,
+0x8f820220, 0x3c030004, 0x431024, 0x14400005,
+0x0, 0x8f820220, 0x3c03f700, 0x431025,
+0xaf820220, 0x3c030001, 0x8c635dbc, 0x24020005,
+0x1462000a, 0x0, 0x3c020001, 0x94425da2,
+0x24429fbc, 0x2c420004, 0x10400004, 0x24040018,
+0x24050002, 0xc004d93, 0x24060020, 0xc0043dd,
+0x0, 0x3c010001, 0x800481f, 0xac205d50,
+0x3c020001, 0x571021, 0x8c4283ac, 0x2443ffff,
+0x2c620008, 0x104000ac, 0x31080, 0x3c010001,
+0x220821, 0x8c225ae8, 0x400008, 0x0,
+0xc00429b, 0x0, 0x3c010001, 0xac205ccc,
+0xaf800204, 0x3c010001, 0xc004822, 0xac207e20,
+0x24020001, 0x3c010001, 0xac225ce4, 0x24020002,
+0x3c010001, 0x370821, 0x800481f, 0xac2283ac,
+0xc00489f, 0x0, 0x3c030001, 0x8c635ce4,
+0x24020009, 0x14620090, 0x24020003, 0x3c010001,
+0x370821, 0x800481f, 0xac2283ac, 0x3c020001,
+0x8c427e38, 0x30424000, 0x10400005, 0x0,
+0x8f820044, 0x3c03ffff, 0x800479f, 0x34637fff,
+0x8f820044, 0x2403ff7f, 0x431024, 0xaf820044,
+0x8f830054, 0x80047b9, 0x24020004, 0x8f830054,
+0x3c020001, 0x8c425da4, 0x2463d8f0, 0x431023,
+0x2c422710, 0x14400074, 0x24020005, 0x3c010001,
+0x370821, 0x800481f, 0xac2283ac, 0x8f820220,
+0x3c03f700, 0x431025, 0xaf820220, 0xaf800204,
+0x3c010001, 0xac207e20, 0x8f830054, 0x24020006,
+0x3c010001, 0x370821, 0xac2283ac, 0x3c010001,
+0x800481f, 0xac235da4, 0x8f830054, 0x3c020001,
+0x8c425da4, 0x2463fff6, 0x431023, 0x2c42000a,
+0x14400059, 0x0, 0x24020007, 0x3c010001,
+0x370821, 0x800481f, 0xac2283ac, 0x8f820220,
+0x3c04f700, 0x441025, 0xaf820220, 0x8f820220,
+0x3c030300, 0x431024, 0x14400005, 0x1821,
+0x8f820220, 0x24030001, 0x441025, 0xaf820220,
+0x10600043, 0x24020001, 0x8f820214, 0x3c03ffff,
+0x3c040001, 0x8c845d98, 0x431024, 0x3442251f,
+0xaf820214, 0x24020008, 0x3c010001, 0x370821,
+0x1080000b, 0xac2283ac, 0x3c020001, 0x8c425d74,
+0x14400007, 0x24020001, 0x3c010001, 0xac227dd0,
+0xc004e54, 0x8f840220, 0x800480c, 0x0,
+0x8f820220, 0x3c030008, 0x431024, 0x14400017,
+0x2402000e, 0x3c010001, 0xac227dd0, 0x8ee20000,
+0x2021, 0x3c030200, 0x431025, 0xc005386,
+0xaee20000, 0x8f820220, 0x2403fffb, 0x431024,
+0xaf820220, 0x8f820220, 0x34420002, 0xc0043dd,
+0xaf820220, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
+0x2021, 0x800481f, 0x0, 0x3c020001,
+0x8c425d74, 0x10400010, 0x0, 0x3c020001,
+0x8c425d70, 0x2442ffff, 0x3c010001, 0xac225d70,
+0x14400009, 0x24020002, 0x3c010001, 0xac205d74,
+0x3c010001, 0x800481f, 0xac225d70, 0x24020001,
+0x3c010001, 0xac225ccc, 0x8fbf0018, 0x3e00008,
+0x27bd0020, 0x8f820200, 0x8f820220, 0x8f820220,
+0x34420004, 0xaf820220, 0x8f820200, 0x3c060001,
+0x8cc65cc8, 0x34420004, 0xaf820200, 0x24020002,
+0x10c2003a, 0x2cc20003, 0x10400005, 0x24020001,
+0x10c20008, 0x0, 0x8004868, 0x0,
+0x24020004, 0x10c20013, 0x24020001, 0x8004868,
+0x0, 0x3c030001, 0x8c635cb8, 0x3c020001,
+0x8c425cc0, 0x3c040001, 0x8c845cdc, 0x3c050001,
+0x8ca55cbc, 0xaf860200, 0xaf860220, 0x34630022,
+0x441025, 0x451025, 0x34420002, 0x8004867,
+0xaf830200, 0x3c030001, 0x8c635d98, 0xaf820200,
+0x10600009, 0xaf820220, 0x3c020001, 0x8c425d74,
+0x14400005, 0x3c033f00, 0x3c020001, 0x8c425cb0,
+0x800485b, 0x346300e0, 0x3c020001, 0x8c425cb0,
+0x3c033f00, 0x346300e2, 0x431025, 0xaf820200,
+0x3c030001, 0x8c635cb4, 0x3c04f700, 0x3c020001,
+0x8c425cc0, 0x3c050001, 0x8ca55cdc, 0x641825,
+0x431025, 0x451025, 0xaf820220, 0x3e00008,
+0x0, 0x8f820220, 0x3c030001, 0x8c635cc8,
+0x34420004, 0xaf820220, 0x24020001, 0x1062000f,
+0x0, 0x8f830054, 0x8f820054, 0x24630002,
+0x621023, 0x2c420003, 0x10400011, 0x0,
+0x8f820054, 0x621023, 0x2c420003, 0x1040000c,
+0x0, 0x8004879, 0x0, 0x8f830054,
+0x8f820054, 0x8004885, 0x24630007, 0x8f820054,
+0x621023, 0x2c420008, 0x1440fffc, 0x0,
+0x8f8400e0, 0x30820007, 0x1040000d, 0x0,
+0x8f820054, 0x8f8300e0, 0x14830009, 0x24450032,
+0x8f820054, 0xa21023, 0x2c420033, 0x10400004,
+0x0, 0x8f8200e0, 0x1082fff9, 0x0,
+0x8f820220, 0x2403fffd, 0x431024, 0xaf820220,
+0x3e00008, 0x0, 0x3c030001, 0x8c635ce4,
+0x3c020001, 0x8c425ce8, 0x50620004, 0x2463ffff,
+0x3c010001, 0xac235ce8, 0x2463ffff, 0x2c620009,
+0x1040009d, 0x31080, 0x3c010001, 0x220821,
+0x8c225b08, 0x400008, 0x0, 0x8f820044,
+0x34428080, 0xaf820044, 0x8f830054, 0x8004938,
+0x24020002, 0x8f830054, 0x3c020001, 0x8c425da8,
+0x2463d8f0, 0x431023, 0x2c422710, 0x1440008a,
+0x24020003, 0x8004945, 0x0, 0x8f820044,
+0x3c03ffff, 0x34637fff, 0x431024, 0xaf820044,
+0x8f830054, 0x8004938, 0x24020004, 0x8f830054,
+0x3c020001, 0x8c425da8, 0x2463fff6, 0x431023,
+0x2c42000a, 0x14400078, 0x24020005, 0x8004945,
+0x0, 0x8f820220, 0x3c03f700, 0x431025,
+0xaf820220, 0x8f820220, 0x2403fffb, 0x431024,
+0xaf820220, 0x8f820220, 0x34420002, 0xaf820220,
+0x3c023f00, 0x344200e0, 0xaf820200, 0x8f820200,
+0x2403fffd, 0x431024, 0xaf820200, 0x24040001,
+0x3405ffff, 0xaf840204, 0x8f830054, 0x8f820054,
+0x80048ec, 0x24630001, 0x8f820054, 0x621023,
+0x2c420002, 0x1440fffc, 0x0, 0x8f820224,
+0x42040, 0xa4102b, 0x1040fff2, 0x0,
+0x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
+0x8f820214, 0x3c03ffff, 0x431024, 0x3442251f,
+0xaf820214, 0x8f820220, 0x2403fffb, 0x431024,
+0xaf820220, 0x8f820220, 0x3c04f700, 0x34840008,
+0x34420002, 0xaf820220, 0x8f820220, 0x3c033f00,
+0x346300e2, 0x441025, 0xaf820220, 0xaf830200,
+0x8f8400f0, 0x276217f8, 0x14820002, 0x24850008,
+0x27651000, 0x8f8200f4, 0x10a20007, 0x3c038000,
+0x34630040, 0x3c020001, 0x24425c70, 0xac820000,
+0xac830004, 0xaf8500f0, 0x8f830054, 0x8004938,
+0x24020006, 0x8f830054, 0x3c020001, 0x8c425da8,
+0x2463fff6, 0x431023, 0x2c42000a, 0x14400022,
+0x24020007, 0x8004945, 0x0, 0x8f8200e0,
+0xaf8200e4, 0x8f8200e0, 0xaf8200e8, 0x8f820220,
+0x34420004, 0xaf820220, 0x8f820220, 0x2403fff7,
+0x431024, 0xaf820220, 0x8f820044, 0x34428080,
+0xaf820044, 0x8f830054, 0x24020008, 0x3c010001,
+0xac225ce4, 0x3c010001, 0x8004947, 0xac235da8,
+0x8f830054, 0x3c020001, 0x8c425da8, 0x2463d8f0,
+0x431023, 0x2c422710, 0x14400003, 0x24020009,
+0x3c010001, 0xac225ce4, 0x3e00008, 0x0,
+0x0, 0x0, 0x0, 0x27bdffd8,
+0xafb20018, 0x809021, 0xafb3001c, 0xa09821,
+0xafb10014, 0xc08821, 0xafb00010, 0x8021,
+0xafbf0020, 0xa6200000, 0xc004d4b, 0x24040001,
+0x26100001, 0x2e020020, 0x1440fffb, 0x0,
+0xc004d4b, 0x2021, 0xc004d4b, 0x24040001,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0x24100010, 0x2501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d4b, 0x108042, 0x1600fffa,
+0x2501024, 0x24100010, 0x2701024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x2701024, 0xc004d71, 0x34108000,
+0xc004d71, 0x0, 0xc004d2b, 0x0,
+0x50400005, 0x108042, 0x96220000, 0x501025,
+0xa6220000, 0x108042, 0x1600fff7, 0x0,
+0xc004d71, 0x0, 0x8fbf0020, 0x8fb3001c,
+0x8fb20018, 0x8fb10014, 0x8fb00010, 0x3e00008,
+0x27bd0028, 0x27bdffd8, 0xafb10014, 0x808821,
+0xafb20018, 0xa09021, 0xafb3001c, 0xc09821,
+0xafb00010, 0x8021, 0xafbf0020, 0xc004d4b,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0x24100010, 0x2301024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x2301024, 0x24100010, 0x2501024,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x2501024, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0x34108000,
+0x96620000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
+0x0, 0xc004d71, 0x0, 0x8fbf0020,
+0x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
+0x3e00008, 0x27bd0028, 0x3c030001, 0x8c635d00,
+0x3c020001, 0x8c425d48, 0x27bdffd8, 0xafbf0020,
+0xafb1001c, 0x10620003, 0xafb00018, 0x3c010001,
+0xac235d48, 0x2463ffff, 0x2c620013, 0x10400349,
+0x31080, 0x3c010001, 0x220821, 0x8c225b30,
+0x400008, 0x0, 0xc004d71, 0x8021,
+0x34028000, 0xa7a20010, 0x27b10010, 0xc004d4b,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0xc004d4b,
+0x2021, 0x108042, 0x1600fffc, 0x0,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fff8, 0x0, 0xc004d71, 0x0,
+0x8004d24, 0x24020002, 0x27b10010, 0xa7a00010,
+0x8021, 0xc004d4b, 0x24040001, 0x26100001,
+0x2e020020, 0x1440fffb, 0x0, 0xc004d4b,
+0x2021, 0xc004d4b, 0x24040001, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0x24100010,
+0x32020001, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020001,
+0x24100010, 0xc004d4b, 0x2021, 0x108042,
+0x1600fffc, 0x0, 0xc004d71, 0x34108000,
+0xc004d71, 0x0, 0xc004d2b, 0x0,
+0x50400005, 0x108042, 0x96220000, 0x501025,
+0xa6220000, 0x108042, 0x1600fff7, 0x0,
+0xc004d71, 0x0, 0x97a20010, 0x30428000,
+0x144002dc, 0x24020003, 0x8004d24, 0x0,
+0x24021200, 0xa7a20010, 0x27b10010, 0x8021,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0xc004d4b, 0x2021, 0x108042, 0x1600fffc,
+0x0, 0xc004d4b, 0x24040001, 0xc004d4b,
+0x2021, 0x34108000, 0x96220000, 0x501024,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fff8, 0x0, 0xc004d71,
+0x0, 0x8f830054, 0x8004d16, 0x24020004,
+0x8f830054, 0x3c020001, 0x8c425db8, 0x2463ff9c,
+0x431023, 0x2c420064, 0x1440029e, 0x24020002,
+0x3c030001, 0x8c635dbc, 0x10620297, 0x2c620003,
+0x14400296, 0x24020011, 0x24020003, 0x10620005,
+0x24020004, 0x10620291, 0x2402000f, 0x8004d24,
+0x24020011, 0x8004d24, 0x24020005, 0x24020014,
+0xa7a20010, 0x27b10010, 0x8021, 0xc004d4b,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x32020012,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020012, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0x34108000,
+0x96220000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
+0x0, 0xc004d71, 0x0, 0x8f830054,
+0x8004d16, 0x24020006, 0x8f830054, 0x3c020001,
+0x8c425db8, 0x2463ff9c, 0x431023, 0x2c420064,
+0x14400250, 0x24020007, 0x8004d24, 0x0,
+0x24020006, 0xa7a20010, 0x27b10010, 0x8021,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020013, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020013,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fff8, 0x0, 0xc004d71, 0x0,
+0x8f830054, 0x8004d16, 0x24020008, 0x8f830054,
+0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
+0x2c420064, 0x1440020f, 0x24020009, 0x8004d24,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
+0xc004d4b, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
+0xc004d71, 0x34108000, 0xc004d71, 0x0,
+0xc004d2b, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004d71, 0x8021,
+0x97a20010, 0x27b10010, 0x34420001, 0xa7a20010,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fff8, 0x0, 0xc004d71, 0x0,
+0x8f830054, 0x8004d16, 0x2402000a, 0x8f830054,
+0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
+0x2c420064, 0x1440019b, 0x2402000b, 0x8004d24,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
+0xc004d4b, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020017, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020017,
+0xc004d71, 0x34108000, 0xc004d71, 0x0,
+0xc004d2b, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004d71, 0x8021,
+0x97a20010, 0x27b10010, 0x34420700, 0xa7a20010,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020017, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020017,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fff8, 0x0, 0xc004d71, 0x0,
+0x8f830054, 0x8004d16, 0x2402000c, 0x8f830054,
+0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
+0x2c420064, 0x14400127, 0x24020012, 0x8004d24,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
+0xc004d4b, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020014, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020014,
+0xc004d71, 0x34108000, 0xc004d71, 0x0,
+0xc004d2b, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004d71, 0x8021,
+0x97a20010, 0x27b10010, 0x34420010, 0xa7a20010,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020014, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020014,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fff8, 0x0, 0xc004d71, 0x0,
+0x8f830054, 0x8004d16, 0x24020013, 0x8f830054,
+0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
+0x2c420064, 0x144000b3, 0x2402000d, 0x8004d24,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
+0xc004d4b, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
+0xc004d71, 0x34108000, 0xc004d71, 0x0,
+0xc004d2b, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004d71, 0x8021,
+0x97a20010, 0x27b10010, 0x3042fffe, 0xa7a20010,
+0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
+0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fff8, 0x0, 0xc004d71, 0x0,
+0x8f830054, 0x8004d16, 0x2402000e, 0x24020840,
+0xa7a20010, 0x27b10010, 0x8021, 0xc004d4b,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x32020013,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x32020013, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0x34108000,
+0x96220000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
+0x0, 0xc004d71, 0x0, 0x8f830054,
+0x24020010, 0x3c010001, 0xac225d00, 0x3c010001,
+0x8004d26, 0xac235db8, 0x8f830054, 0x3c020001,
+0x8c425db8, 0x2463ff9c, 0x431023, 0x2c420064,
+0x14400004, 0x0, 0x24020011, 0x3c010001,
+0xac225d00, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
+0x3e00008, 0x27bd0028, 0x8f850044, 0x8f820044,
+0x3c030001, 0x431025, 0x3c030008, 0xaf820044,
+0x8f840054, 0x8f820054, 0xa32824, 0x8004d37,
+0x24840001, 0x8f820054, 0x821023, 0x2c420002,
+0x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
+0x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
+0x8f820054, 0x8004d45, 0x24630001, 0x8f820054,
+0x621023, 0x2c420002, 0x1440fffc, 0x0,
+0x3e00008, 0xa01021, 0x8f830044, 0x3c02fff0,
+0x3442ffff, 0x42480, 0x621824, 0x3c020002,
+0x822025, 0x641825, 0xaf830044, 0x8f820044,
+0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820044,
+0x8f830054, 0x8f820054, 0x8004d5e, 0x24630001,
+0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
+0x0, 0x8f820044, 0x3c030001, 0x431025,
+0xaf820044, 0x8f830054, 0x8f820054, 0x8004d6b,
+0x24630001, 0x8f820054, 0x621023, 0x2c420002,
+0x1440fffc, 0x0, 0x3e00008, 0x0,
+0x8f820044, 0x3c03fff0, 0x3463ffff, 0x431024,
+0xaf820044, 0x8f820044, 0x3c030001, 0x431025,
+0xaf820044, 0x8f830054, 0x8f820054, 0x8004d7f,
+0x24630001, 0x8f820054, 0x621023, 0x2c420002,
+0x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
+0x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
+0x8f820054, 0x8004d8d, 0x24630001, 0x8f820054,
+0x621023, 0x2c420002, 0x1440fffc, 0x0,
+0x3e00008, 0x0, 0x27bdffc8, 0xafb30024,
+0x809821, 0xafb5002c, 0xa0a821, 0xafb20020,
+0xc09021, 0x32a2ffff, 0xafbf0030, 0xafb40028,
+0xafb1001c, 0xafb00018, 0x14400034, 0xa7b20010,
+0x3271ffff, 0x27b20010, 0x8021, 0xc004d4b,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x2301024, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0x34108000,
+0x96420000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d4b, 0x108042, 0x12000075,
+0x0, 0x8004dc9, 0x0, 0x3274ffff,
+0x27b10010, 0xa7a00010, 0x8021, 0xc004d4b,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x24040001, 0xc004d4b,
+0x2021, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x2901024,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x2901024, 0xc004d71,
+0x34108000, 0xc004d71, 0x0, 0xc004d2b,
+0x0, 0x50400005, 0x108042, 0x96220000,
+0x501025, 0xa6220000, 0x108042, 0x1600fff7,
+0x0, 0xc004d71, 0x0, 0x32a5ffff,
+0x24020001, 0x54a20004, 0x24020002, 0x97a20010,
+0x8004e14, 0x521025, 0x14a20006, 0x3271ffff,
+0x97a20010, 0x121827, 0x431024, 0xa7a20010,
+0x3271ffff, 0x27b20010, 0x8021, 0xc004d4b,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d4b, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
+0x10400002, 0x2021, 0x24040001, 0xc004d4b,
+0x108042, 0x1600fffa, 0x2301024, 0xc004d4b,
+0x24040001, 0xc004d4b, 0x2021, 0x34108000,
+0x96420000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
+0x0, 0xc004d71, 0x0, 0x8fbf0030,
+0x8fb5002c, 0x8fb40028, 0x8fb30024, 0x8fb20020,
+0x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0038,
+0x0, 0x0, 0x0, 0x27bdffe8,
+0xafbf0010, 0x3c030001, 0x771821, 0x8c6383ac,
+0x24020008, 0x1462022c, 0x803021, 0x3c020001,
+0x8c425d98, 0x14400033, 0x0, 0x8f850224,
+0x38a30020, 0x2c630001, 0x38a20010, 0x2c420001,
+0x621825, 0x1460000d, 0x38a30030, 0x2c630001,
+0x38a20400, 0x2c420001, 0x621825, 0x14600007,
+0x38a30402, 0x2c630001, 0x38a20404, 0x2c420001,
+0x621825, 0x10600005, 0x0, 0xc00429b,
+0x0, 0x8004e8d, 0x2402000e, 0xc0043dd,
+0x0, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
+0x2021, 0x3c030001, 0x8c635cc8, 0x24020004,
+0x14620005, 0x2403fffb, 0x3c020001, 0x8c425cc4,
+0x8004e89, 0x2403fff7, 0x3c020001, 0x8c425cc4,
+0x431024, 0x3c010001, 0xac225cc4, 0x2402000e,
+0x3c010001, 0xc00429b, 0xac227dd0, 0x8005087,
+0x0, 0x8f820220, 0x3c030400, 0x431024,
+0x10400027, 0x2403ffbf, 0x8f850224, 0x3c020001,
+0x8c427ddc, 0xa32024, 0x431024, 0x1482000c,
+0x0, 0x3c020001, 0x8c427de0, 0x24420001,
+0x3c010001, 0xac227de0, 0x2c420002, 0x14400008,
+0x24020001, 0x3c010001, 0x8004ead, 0xac227e00,
+0x3c010001, 0xac207de0, 0x3c010001, 0xac207e00,
+0x3c020001, 0x8c427e00, 0x10400006, 0x30a20040,
+0x10400004, 0x24020001, 0x3c010001, 0x8004eb8,
+0xac227e04, 0x3c010001, 0xac207e04, 0x3c010001,
+0xac257ddc, 0x3c010001, 0x8004ec8, 0xac207e10,
+0x24020001, 0x3c010001, 0xac227e10, 0x3c010001,
+0xac207e00, 0x3c010001, 0xac207de0, 0x3c010001,
+0xac207e04, 0x3c010001, 0xac207ddc, 0x3c030001,
+0x8c637dd0, 0x3c020001, 0x8c427dd4, 0x10620003,
+0x3c020200, 0x3c010001, 0xac237dd4, 0xc21024,
+0x10400007, 0x2463ffff, 0x8f820220, 0x24030001,
+0x3c010001, 0xac235ccc, 0x8005085, 0x3c03f700,
+0x2c62000e, 0x104001a8, 0x31080, 0x3c010001,
+0x220821, 0x8c225b80, 0x400008, 0x0,
+0x3c010001, 0xac207e00, 0x3c010001, 0xac207de0,
+0x3c010001, 0xac207ddc, 0x3c010001, 0xac207e04,
+0x3c010001, 0xac207df8, 0x3c010001, 0xac207df0,
+0xc00486a, 0xaf800224, 0x24020002, 0x3c010001,
+0xac227dd0, 0x3c020001, 0x8c427e10, 0x14400056,
+0x3c03fdff, 0x8ee20000, 0x3463ffff, 0x431024,
+0xc00429b, 0xaee20000, 0xaf800204, 0x8f820200,
+0x2403fffd, 0x431024, 0xaf820200, 0x3c010001,
+0xac207e20, 0x8f830054, 0x3c020001, 0x8c427df8,
+0x24040001, 0x3c010001, 0xac247e0c, 0x24420001,
+0x3c010001, 0xac227df8, 0x2c420004, 0x3c010001,
+0xac237df4, 0x14400006, 0x24020003, 0x3c010001,
+0xac245ccc, 0x3c010001, 0x8005083, 0xac207df8,
+0x3c010001, 0x8005083, 0xac227dd0, 0x8f830054,
+0x3c020001, 0x8c427df4, 0x2463d8f0, 0x431023,
+0x2c422710, 0x14400003, 0x24020004, 0x3c010001,
+0xac227dd0, 0x3c020001, 0x8c427e10, 0x14400026,
+0x3c03fdff, 0x8ee20000, 0x3463ffff, 0x431024,
+0x8005083, 0xaee20000, 0x3c040001, 0x8c845d9c,
+0x3c010001, 0xc00508a, 0xac207de8, 0x3c020001,
+0x8c427e1c, 0xaf820204, 0x3c020001, 0x8c427e10,
+0x14400015, 0x3c03fdff, 0x8ee20000, 0x3463ffff,
+0x431024, 0xaee20000, 0x8f820204, 0x30420030,
+0x1440013c, 0x24020002, 0x3c030001, 0x8c637e1c,
+0x24020005, 0x3c010001, 0xac227dd0, 0x3c010001,
+0x8005083, 0xac237e20, 0x3c020001, 0x8c427e10,
+0x10400010, 0x3c03fdff, 0x3c020001, 0x8c425d6c,
+0x24420001, 0x3c010001, 0xac225d6c, 0x2c420002,
+0x14400131, 0x24020001, 0x3c010001, 0xac225d74,
+0x3c010001, 0xac205d6c, 0x3c010001, 0x8005083,
+0xac225ccc, 0x8ee20000, 0x3463ffff, 0x431024,
+0xaee20000, 0x3c020001, 0x8c427e00, 0x10400122,
+0x0, 0x3c020001, 0x8c427ddc, 0x1040011e,
+0x0, 0x3c010001, 0xac227e08, 0x24020003,
+0x3c010001, 0xac227de0, 0x8005024, 0x24020006,
+0x3c010001, 0xac207de8, 0x8f820204, 0x34420040,
+0xaf820204, 0x3c020001, 0x8c427e20, 0x24030007,
+0x3c010001, 0xac237dd0, 0x34420040, 0x3c010001,
+0xac227e20, 0x3c020001, 0x8c427e00, 0x10400005,
+0x0, 0x3c020001, 0x8c427ddc, 0x104000f9,
+0x24020002, 0x3c050001, 0x24a57de0, 0x8ca20000,
+0x2c424e21, 0x104000f3, 0x24020002, 0x3c020001,
+0x8c427e04, 0x104000f8, 0x2404ffbf, 0x3c020001,
+0x8c427ddc, 0x3c030001, 0x8c637e08, 0x441024,
+0x641824, 0x10430004, 0x24020001, 0x3c010001,
+0x8005083, 0xac227dd0, 0x24020003, 0xaca20000,
+0x24020008, 0x3c010001, 0xac227dd0, 0x3c020001,
+0x8c427e0c, 0x1040000c, 0x24020001, 0x3c040001,
+0xc005097, 0x8c847ddc, 0x3c020001, 0x8c427e28,
+0x14400005, 0x24020001, 0x3c020001, 0x8c427e24,
+0x10400006, 0x24020001, 0x3c010001, 0xac225ccc,
+0x3c010001, 0x8005083, 0xac207df8, 0x3c020001,
+0x8c427df0, 0x3c030001, 0x8c637ddc, 0x2c420001,
+0x210c0, 0x30630008, 0x3c010001, 0xac227df0,
+0x3c010001, 0xac237dec, 0x8f830054, 0x24020009,
+0x3c010001, 0xac227dd0, 0x3c010001, 0x8005083,
+0xac237df4, 0x8f830054, 0x3c020001, 0x8c427df4,
+0x2463d8f0, 0x431023, 0x2c422710, 0x144000a8,
+0x0, 0x3c020001, 0x8c427e00, 0x10400005,
+0x0, 0x3c020001, 0x8c427ddc, 0x104000a9,
+0x24020002, 0x3c030001, 0x24637de0, 0x8c620000,
+0x2c424e21, 0x104000a3, 0x24020002, 0x3c020001,
+0x8c427e0c, 0x1040000e, 0x0, 0x3c020001,
+0x8c427ddc, 0x3c010001, 0xac207e0c, 0x30420080,
+0x1040002f, 0x2402000c, 0x8f820204, 0x30420080,
+0x1440000c, 0x24020003, 0x8005011, 0x2402000c,
+0x3c020001, 0x8c427ddc, 0x30420080, 0x14400005,
+0x24020003, 0x8f820204, 0x30420080, 0x1040001f,
+0x24020003, 0xac620000, 0x2402000a, 0x3c010001,
+0xac227dd0, 0x3c040001, 0x24847e18, 0x8c820000,
+0x3c030001, 0x8c637df0, 0x431025, 0xaf820204,
+0x8c830000, 0x3c040001, 0x8c847df0, 0x2402000b,
+0x3c010001, 0xac227dd0, 0x641825, 0x3c010001,
+0xac237e20, 0x3c050001, 0x24a57de0, 0x8ca20000,
+0x2c424e21, 0x1040006f, 0x24020002, 0x3c020001,
+0x8c427e10, 0x10400005, 0x0, 0x2402000c,
+0x3c010001, 0x8005083, 0xac227dd0, 0x3c020001,
+0x8c427e00, 0x1040006c, 0x0, 0x3c040001,
+0x8c847ddc, 0x1080005e, 0x30820008, 0x3c030001,
+0x8c637dec, 0x10620064, 0x24020003, 0x3c010001,
+0xac247e08, 0xaca20000, 0x24020006, 0x3c010001,
+0x8005083, 0xac227dd0, 0x8f820200, 0x34420002,
+0xaf820200, 0x8f830054, 0x2402000d, 0x3c010001,
+0xac227dd0, 0x3c010001, 0xac237df4, 0x8f830054,
+0x3c020001, 0x8c427df4, 0x2463d8f0, 0x431023,
+0x2c422710, 0x1440003a, 0x0, 0x3c020001,
+0x8c427e10, 0x10400029, 0x2402000e, 0x3c030001,
+0x8c637e24, 0x3c010001, 0x14600015, 0xac227dd0,
+0xc0043dd, 0x0, 0x3c050001, 0x8ca55cc8,
+0xc0052a2, 0x2021, 0x3c030001, 0x8c635cc8,
+0x24020004, 0x14620005, 0x2403fffb, 0x3c020001,
+0x8c425cc4, 0x8005052, 0x2403fff7, 0x3c020001,
+0x8c425cc4, 0x431024, 0x3c010001, 0xac225cc4,
+0x8ee20000, 0x3c030200, 0x431025, 0xaee20000,
+0x8f820224, 0x3c010001, 0xac227e2c, 0x8f820220,
+0x2403fffb, 0x431024, 0xaf820220, 0x8f820220,
+0x34420002, 0x8005083, 0xaf820220, 0x3c020001,
+0x8c427e00, 0x10400005, 0x0, 0x3c020001,
+0x8c427ddc, 0x1040000f, 0x24020002, 0x3c020001,
+0x8c427de0, 0x2c424e21, 0x1040000a, 0x24020002,
+0x3c020001, 0x8c427e00, 0x1040000f, 0x0,
+0x3c020001, 0x8c427ddc, 0x1440000b, 0x0,
+0x24020002, 0x3c010001, 0x8005083, 0xac227dd0,
+0x3c020001, 0x8c427e00, 0x10400003, 0x0,
+0xc00429b, 0x0, 0x8f820220, 0x3c03f700,
+0x431025, 0xaf820220, 0x8fbf0010, 0x3e00008,
+0x27bd0018, 0x3c030001, 0x24637e28, 0x8c620000,
+0x10400005, 0x34422000, 0x3c010001, 0xac227e1c,
+0x8005095, 0xac600000, 0x3c010001, 0xac247e1c,
+0x3e00008, 0x0, 0x27bdffe0, 0x30820030,
+0xafbf0018, 0x3c010001, 0xac227e24, 0x14400067,
+0x3c02ffff, 0x34421f0e, 0x821024, 0x14400061,
+0x24020030, 0x30822000, 0x1040005d, 0x30838000,
+0x31a02, 0x30820001, 0x21200, 0x3c040001,
+0x8c845d9c, 0x621825, 0x331c2, 0x3c030001,
+0x24635d78, 0x30828000, 0x21202, 0x30840001,
+0x42200, 0x441025, 0x239c2, 0x61080,
+0x431021, 0x471021, 0x90430000, 0x24020001,
+0x10620025, 0x0, 0x10600007, 0x24020002,
+0x10620013, 0x24020003, 0x1062002c, 0x3c05000f,
+0x80050f9, 0x0, 0x8f820200, 0x2403feff,
+0x431024, 0xaf820200, 0x8f820220, 0x3c03fffe,
+0x3463ffff, 0x431024, 0xaf820220, 0x3c010001,
+0xac207e44, 0x3c010001, 0x8005104, 0xac207e4c,
+0x8f820200, 0x34420100, 0xaf820200, 0x8f820220,
+0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820220,
+0x24020100, 0x3c010001, 0xac227e44, 0x3c010001,
+0x8005104, 0xac207e4c, 0x8f820200, 0x2403feff,
+0x431024, 0xaf820200, 0x8f820220, 0x3c030001,
+0x431025, 0xaf820220, 0x3c010001, 0xac207e44,
+0x3c010001, 0x8005104, 0xac237e4c, 0x8f820200,
+0x34420100, 0xaf820200, 0x8f820220, 0x3c030001,
+0x431025, 0xaf820220, 0x24020100, 0x3c010001,
+0xac227e44, 0x3c010001, 0x8005104, 0xac237e4c,
+0x34a5ffff, 0x3c040001, 0x24845bb8, 0xafa30010,
+0xc002403, 0xafa00014, 0x8005104, 0x0,
+0x24020030, 0x3c010001, 0xac227e28, 0x8fbf0018,
+0x3e00008, 0x27bd0020, 0x0, 0x27bdffc8,
+0xafb20028, 0x809021, 0xafb3002c, 0xa09821,
+0xafb00020, 0xc08021, 0x3c040001, 0x24845bd0,
+0x3c050009, 0x3c020001, 0x8c425cc8, 0x34a59001,
+0x2403021, 0x2603821, 0xafbf0030, 0xafb10024,
+0xa7a0001a, 0xafb00014, 0xc002403, 0xafa20010,
+0x24020002, 0x12620083, 0x2e620003, 0x10400005,
+0x24020001, 0x1262000a, 0x0, 0x800529b,
+0x0, 0x24020004, 0x126200fa, 0x24020008,
+0x126200f9, 0x3c02ffec, 0x800529b, 0x0,
+0x3c020001, 0x8c425cc4, 0x30420002, 0x14400004,
+0x128940, 0x3c02fffb, 0x3442ffff, 0x2028024,
+0x3c010001, 0x310821, 0xac307e3c, 0x3c024000,
+0x2021024, 0x1040004e, 0x1023c2, 0x30840030,
+0x101382, 0x3042001c, 0x3c030001, 0x24635d08,
+0x431021, 0x823821, 0x3c020020, 0x2021024,
+0x10400006, 0x24020100, 0x3c010001, 0x310821,
+0xac227e40, 0x8005150, 0x3c020080, 0x3c010001,
+0x310821, 0xac207e40, 0x3c020080, 0x2021024,
+0x10400006, 0x121940, 0x3c020001, 0x3c010001,
+0x230821, 0x800515c, 0xac227e48, 0x121140,
+0x3c010001, 0x220821, 0xac207e48, 0x94e40000,
+0x3c030001, 0x8c635dbc, 0x24020005, 0x10620010,
+0xa7a40018, 0x32024000, 0x10400002, 0x34824000,
+0xa7a20018, 0x24040001, 0x94e20002, 0x24050004,
+0x24e60002, 0x34420001, 0xc00498e, 0xa4e20002,
+0x24040001, 0x2821, 0xc00498e, 0x27a60018,
+0x3c020001, 0x8c425cc8, 0x24110001, 0x3c010001,
+0xac315cd4, 0x14530004, 0x32028000, 0xc00429b,
+0x0, 0x32028000, 0x1040011f, 0x0,
+0xc00429b, 0x0, 0x3c030001, 0x8c635dbc,
+0x24020005, 0x10620118, 0x24020002, 0x3c010001,
+0xac315ccc, 0x3c010001, 0x800529b, 0xac225cc8,
+0x24040001, 0x24050004, 0x27b0001a, 0xc00498e,
+0x2003021, 0x24040001, 0x2821, 0xc00498e,
+0x2003021, 0x3c020001, 0x511021, 0x8c427e34,
+0x3c040001, 0x8c845cc8, 0x3c03bfff, 0x3463ffff,
+0x3c010001, 0xac335cd4, 0x431024, 0x3c010001,
+0x310821, 0x109300fa, 0xac227e34, 0x800529b,
+0x0, 0x3c022000, 0x2021024, 0x10400005,
+0x24020001, 0x3c010001, 0xac225d98, 0x80051ad,
+0x128940, 0x3c010001, 0xac205d98, 0x128940,
+0x3c010001, 0x310821, 0xac307e38, 0x3c024000,
+0x2021024, 0x14400016, 0x0, 0x3c020001,
+0x8c425d98, 0x10400008, 0x24040004, 0x24050001,
+0xc004d93, 0x24062000, 0x24020001, 0x3c010001,
+0x370821, 0xac2283ac, 0x3c020001, 0x511021,
+0x8c427e30, 0x3c03bfff, 0x3463ffff, 0x431024,
+0x3c010001, 0x310821, 0x8005299, 0xac227e30,
+0x3c020001, 0x8c425d98, 0x10400028, 0x3c0300a0,
+0x2031024, 0x5443000d, 0x3c020020, 0x3c020001,
+0x8c425d9c, 0x24030100, 0x3c010001, 0x310821,
+0xac237e44, 0x3c030001, 0x3c010001, 0x310821,
+0xac237e4c, 0x80051f0, 0x34420400, 0x2021024,
+0x10400008, 0x24030100, 0x3c020001, 0x8c425d9c,
+0x3c010001, 0x310821, 0xac237e44, 0x80051f0,
+0x34420800, 0x3c020080, 0x2021024, 0x1040002e,
+0x3c030001, 0x3c020001, 0x8c425d9c, 0x3c010001,
+0x310821, 0xac237e4c, 0x34420c00, 0x3c010001,
+0xac225d9c, 0x8005218, 0x24040001, 0x3c020020,
+0x2021024, 0x10400006, 0x24020100, 0x3c010001,
+0x310821, 0xac227e44, 0x8005201, 0x3c020080,
+0x3c010001, 0x310821, 0xac207e44, 0x3c020080,
+0x2021024, 0x10400007, 0x121940, 0x3c020001,
+0x3c010001, 0x230821, 0xac227e4c, 0x800520f,
+0x24040001, 0x121140, 0x3c010001, 0x220821,
+0xac207e4c, 0x24040001, 0x2821, 0x27b0001e,
+0xc00494c, 0x2003021, 0x24040001, 0x2821,
+0xc00494c, 0x2003021, 0x24040001, 0x24050001,
+0x27b0001c, 0xc00494c, 0x2003021, 0x24040001,
+0x24050001, 0xc00494c, 0x2003021, 0x8005299,
+0x0, 0x3c02ffec, 0x3442ffff, 0x2028024,
+0x3c020008, 0x2028025, 0x121140, 0x3c010001,
+0x220821, 0xac307e38, 0x3c022000, 0x2021024,
+0x10400009, 0x0, 0x3c020001, 0x8c425d74,
+0x14400005, 0x24020001, 0x3c010001, 0xac225d98,
+0x800523a, 0x3c024000, 0x3c010001, 0xac205d98,
+0x3c024000, 0x2021024, 0x1440001e, 0x0,
+0x3c020001, 0x8c425d98, 0x3c010001, 0xac205ce0,
+0x10400007, 0x24022020, 0x3c010001, 0xac225d9c,
+0x24020001, 0x3c010001, 0x370821, 0xac2283ac,
+0x3c04bfff, 0x121940, 0x3c020001, 0x431021,
+0x8c427e30, 0x3c050001, 0x8ca55cc8, 0x3484ffff,
+0x441024, 0x3c010001, 0x230821, 0xac227e30,
+0x24020001, 0x10a20044, 0x0, 0x8005299,
+0x0, 0x3c020001, 0x8c425d98, 0x1040001c,
+0x24022000, 0x3c010001, 0xac225d9c, 0x3c0300a0,
+0x2031024, 0x14430005, 0x121140, 0x3402a000,
+0x3c010001, 0x8005294, 0xac225d9c, 0x3c030001,
+0x621821, 0x8c637e38, 0x3c020020, 0x621024,
+0x10400004, 0x24022001, 0x3c010001, 0x8005294,
+0xac225d9c, 0x3c020080, 0x621024, 0x1040001f,
+0x3402a001, 0x3c010001, 0x8005294, 0xac225d9c,
+0x3c020020, 0x2021024, 0x10400007, 0x121940,
+0x24020100, 0x3c010001, 0x230821, 0xac227e44,
+0x8005288, 0x3c020080, 0x121140, 0x3c010001,
+0x220821, 0xac207e44, 0x3c020080, 0x2021024,
+0x10400006, 0x121940, 0x3c020001, 0x3c010001,
+0x230821, 0x8005294, 0xac227e4c, 0x121140,
+0x3c010001, 0x220821, 0xac207e4c, 0x3c030001,
+0x8c635cc8, 0x24020001, 0x10620003, 0x0,
+0xc00429b, 0x0, 0x8fbf0030, 0x8fb3002c,
+0x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
+0x27bd0038, 0x27bdffd8, 0xafb20020, 0x809021,
+0xafb1001c, 0x8821, 0x24020002, 0xafbf0024,
+0xafb00018, 0xa7a00012, 0x10a200d3, 0xa7a00010,
+0x2ca20003, 0x10400005, 0x24020001, 0x10a2000a,
+0x128140, 0x8005380, 0x2201021, 0x24020004,
+0x10a2007d, 0x24020008, 0x10a2007c, 0x122940,
+0x8005380, 0x2201021, 0x3c030001, 0x701821,
+0x8c637e3c, 0x3c024000, 0x621024, 0x14400009,
+0x24040001, 0x3c027fff, 0x3442ffff, 0x628824,
+0x3c010001, 0x300821, 0xac317e34, 0x8005380,
+0x2201021, 0x24050001, 0xc00494c, 0x27a60010,
+0x24040001, 0x24050001, 0xc00494c, 0x27a60010,
+0x97a20010, 0x30420004, 0x10400034, 0x3c114000,
+0x3c020001, 0x8c425dbc, 0x2443ffff, 0x2c620006,
+0x10400034, 0x31080, 0x3c010001, 0x220821,
+0x8c225be0, 0x400008, 0x0, 0x24040001,
+0x24050011, 0x27b00012, 0xc00494c, 0x2003021,
+0x24040001, 0x24050011, 0xc00494c, 0x2003021,
+0x97a50012, 0x30a24000, 0x10400002, 0x3c040010,
+0x3c040008, 0x3c030001, 0x8005301, 0x30a28000,
+0x24040001, 0x24050014, 0x27b00012, 0xc00494c,
+0x2003021, 0x24040001, 0x24050014, 0xc00494c,
+0x2003021, 0x97a50012, 0x30a21000, 0x10400002,
+0x3c040010, 0x3c040008, 0x3c030001, 0x30a20800,
+0x54400001, 0x3c030002, 0x3c028000, 0x2221025,
+0x641825, 0x800530e, 0x438825, 0x3c110001,
+0x2308821, 0x8e317e3c, 0x3c027fff, 0x3442ffff,
+0x2228824, 0x3c020001, 0x8c425cd8, 0x1040001d,
+0x121140, 0x3c020001, 0x8c425d98, 0x10400002,
+0x3c022000, 0x2228825, 0x121140, 0x3c010001,
+0x220821, 0x8c227e40, 0x10400003, 0x3c020020,
+0x8005322, 0x2228825, 0x3c02ffdf, 0x3442ffff,
+0x2228824, 0x121140, 0x3c010001, 0x220821,
+0x8c227e48, 0x10400003, 0x3c020080, 0x800532d,
+0x2228825, 0x3c02ff7f, 0x3442ffff, 0x2228824,
+0x121140, 0x3c010001, 0x220821, 0xac317e34,
+0x8005380, 0x2201021, 0x122940, 0x3c030001,
+0x651821, 0x8c637e38, 0x3c024000, 0x621024,
+0x14400008, 0x3c027fff, 0x3442ffff, 0x628824,
+0x3c010001, 0x250821, 0xac317e30, 0x8005380,
+0x2201021, 0x3c020001, 0x8c425cd8, 0x10400033,
+0x3c11c00c, 0x3c020001, 0x8c425d74, 0x3c04c00c,
+0x34842000, 0x3c030001, 0x8c635d98, 0x2102b,
+0x21023, 0x441024, 0x10600003, 0x518825,
+0x3c022000, 0x2228825, 0x3c020001, 0x451021,
+0x8c427e44, 0x10400003, 0x3c020020, 0x800535d,
+0x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
+0x121140, 0x3c010001, 0x220821, 0x8c227e4c,
+0x10400003, 0x3c020080, 0x8005368, 0x2228825,
+0x3c02ff7f, 0x3442ffff, 0x2228824, 0x3c020001,
+0x8c425d60, 0x10400002, 0x3c020800, 0x2228825,
+0x3c020001, 0x8c425d64, 0x10400002, 0x3c020400,
+0x2228825, 0x3c020001, 0x8c425d68, 0x10400006,
+0x3c020100, 0x800537b, 0x2228825, 0x3c027fff,
+0x3442ffff, 0x628824, 0x121140, 0x3c010001,
+0x220821, 0xac317e30, 0x2201021, 0x8fbf0024,
+0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x3e00008,
+0x27bd0028, 0x27bdffd8, 0xafb40020, 0x80a021,
+0xafbf0024, 0xafb3001c, 0xafb20018, 0xafb10014,
+0xafb00010, 0x8f900200, 0x3c030001, 0x8c635cc8,
+0x8f930220, 0x24020002, 0x10620063, 0x2c620003,
+0x10400005, 0x24020001, 0x1062000a, 0x141940,
+0x8005448, 0x0, 0x24020004, 0x1062005a,
+0x24020008, 0x10620059, 0x149140, 0x8005448,
+0x0, 0x3c040001, 0x832021, 0x8c847e3c,
+0x3c110001, 0x2238821, 0x8e317e34, 0x3c024000,
+0x821024, 0x1040003e, 0x3c020008, 0x2221024,
+0x10400020, 0x36100002, 0x3c020001, 0x431021,
+0x8c427e40, 0x10400005, 0x36100020, 0x36100100,
+0x3c020020, 0x80053bd, 0x2228825, 0x2402feff,
+0x2028024, 0x3c02ffdf, 0x3442ffff, 0x2228824,
+0x141140, 0x3c010001, 0x220821, 0x8c227e48,
+0x10400005, 0x3c020001, 0x2629825, 0x3c020080,
+0x80053dc, 0x2228825, 0x3c02fffe, 0x3442ffff,
+0x2629824, 0x3c02ff7f, 0x3442ffff, 0x80053dc,
+0x2228824, 0x2402fedf, 0x2028024, 0x3c02fffe,
+0x3442ffff, 0x2629824, 0x3c02ff5f, 0x3442ffff,
+0x2228824, 0x3c010001, 0x230821, 0xac207e40,
+0x3c010001, 0x230821, 0xac207e48, 0xc00486a,
+0x0, 0xaf900200, 0xaf930220, 0x8f820220,
+0x2403fffb, 0x431024, 0xaf820220, 0x8f820220,
+0x34420002, 0xaf820220, 0x80053f3, 0x141140,
+0x8f820200, 0x2403fffd, 0x431024, 0xc00486a,
+0xaf820200, 0x3c02bfff, 0x3442ffff, 0xc00429b,
+0x2228824, 0x141140, 0x3c010001, 0x220821,
+0x8005448, 0xac317e34, 0x149140, 0x3c040001,
+0x922021, 0x8c847e38, 0x3c110001, 0x2328821,
+0x8e317e30, 0x3c024000, 0x821024, 0x14400011,
+0x0, 0x3c020001, 0x8c425d98, 0x14400006,
+0x3c02bfff, 0x8f820200, 0x34420002, 0xc00486a,
+0xaf820200, 0x3c02bfff, 0x3442ffff, 0xc00429b,
+0x2228824, 0x3c010001, 0x320821, 0x8005448,
+0xac317e30, 0x3c020001, 0x8c425d98, 0x10400005,
+0x3c020020, 0x3c020001, 0x8c425d74, 0x1040002b,
+0x3c020020, 0x821024, 0x10400007, 0x36100020,
+0x24020100, 0x3c010001, 0x320821, 0xac227e44,
+0x8005428, 0x36100100, 0x3c010001, 0x320821,
+0xac207e44, 0x2402feff, 0x2028024, 0x3c020080,
+0x821024, 0x10400007, 0x141940, 0x3c020001,
+0x3c010001, 0x230821, 0xac227e4c, 0x8005439,
+0x2629825, 0x141140, 0x3c010001, 0x220821,
+0xac207e4c, 0x3c02fffe, 0x3442ffff, 0x2629824,
+0xc00486a, 0x0, 0xaf900200, 0xaf930220,
+0x8f820220, 0x2403fffb, 0x431024, 0xaf820220,
+0x8f820220, 0x34420002, 0xaf820220, 0x141140,
+0x3c010001, 0x220821, 0xac317e30, 0x8fbf0024,
+0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
+0x8fb00010, 0x3e00008, 0x27bd0028, 0x0 };
+static u32 tigonFwRodata[(MAX_RODATA_LEN/4) + 1] __initdata = {
+0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f66776d, 0x61696e2e, 0x632c7620, 0x312e312e,
+0x322e3131, 0x20313939, 0x382f3034, 0x2f323720,
+0x32323a31, 0x333a3432, 0x20736875, 0x616e6720,
+0x45787020, 0x24000000, 0x7468655f, 0x4441574e,
+0x0, 0x53544143, 0x4b5f3120, 0x0,
+0x42616453, 0x6e64526e, 0x67000000, 0x3f456e71,
+0x45767400, 0x3f6e6f51, 0x64457650, 0x0,
+0x6576526e, 0x6746756c, 0x6c000000, 0x496c6c43,
+0x6f6e6652, 0x78000000, 0x53656e64, 0x436b5375,
+0x6d000000, 0x52656376, 0x566c616e, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f74696d, 0x65722e63, 0x2c762031, 0x2e312e32,
+0x2e382031, 0x3939382f, 0x30372f33, 0x31203137,
+0x3a35383a, 0x34352073, 0x6875616e, 0x67204578,
+0x70202400, 0x542d446d, 0x61526431, 0x0,
+0x542d446d, 0x61424200, 0x542d446d, 0x61320000,
+0x3f6e6f51, 0x64547845, 0x0, 0x3f6e6f51,
+0x64527845, 0x0, 0x656e714d, 0x45765046,
+0x61696c00, 0x656e714d, 0x45764661, 0x696c0000,
+0x6661696c, 0x456e454d, 0x0, 0x3f456e71,
+0x45767400, 0x3f6e6f51, 0x64457650, 0x0,
+0x6576526e, 0x6746756c, 0x6c000000, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f636f6d, 0x6d616e64, 0x2e632c76, 0x20312e31,
+0x2e322e31, 0x30203139, 0x39382f31, 0x312f3138,
+0x2031373a, 0x31313a31, 0x38207368, 0x75616e67,
+0x20457870, 0x20240000, 0x3f4d626f, 0x78457674,
+0x0, 0x4e4f636f, 0x6d616e64, 0x0,
+0x68737465, 0x5f455252, 0x0, 0x412d4572,
+0x72427563, 0x0, 0x4552524f, 0x522d4164,
+0x64000000, 0x656e714d, 0x45765046, 0x61696c00,
+0x656e714d, 0x45764661, 0x696c0000, 0x6661696c,
+0x456e454d, 0x0, 0x442d4572, 0x724c6173,
+0x74000000, 0x442d4572, 0x72320000, 0x6d437374,
+0x4d644552, 0x52000000, 0x70726f6d, 0x4d644552,
+0x52000000, 0x46696c74, 0x4d644552, 0x52000000,
+0x636d645f, 0x45525200, 0x3f456e71, 0x45767400,
+0x3f6e6f51, 0x64457650, 0x0, 0x6576526e,
+0x6746756c, 0x6c000000, 0x0, 0x6ea0,
+0x7fbc, 0x6e38, 0x8734, 0x82b0,
+0x8780, 0x8780, 0x6f54, 0x7694,
+0x7f0c, 0x80a8, 0x8074, 0x8780,
+0x7e70, 0x80cc, 0x6e64, 0x81cc,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f646d61, 0x2e632c76, 0x20312e31, 0x2e322e33,
+0x20313939, 0x382f3034, 0x2f323720, 0x32323a31,
+0x333a3431, 0x20736875, 0x616e6720, 0x45787020,
+0x24000000, 0x646d6172, 0x6441544e, 0x0,
+0x646d6177, 0x7241544e, 0x0, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f747261, 0x63652e63, 0x2c762031, 0x2e312e32,
+0x2e322031, 0x3939382f, 0x30342f32, 0x37203232,
+0x3a31333a, 0x35302073, 0x6875616e, 0x67204578,
+0x70202400, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f646174, 0x612e632c, 0x7620312e, 0x312e322e,
+0x32203139, 0x39382f30, 0x342f3237, 0x2032323a,
+0x31333a34, 0x30207368, 0x75616e67, 0x20457870,
+0x20240000, 0x46575f56, 0x45525349, 0x4f4e3a20,
+0x23312046, 0x72692041, 0x70722037, 0x2031373a,
+0x35353a34, 0x38205044, 0x54203230, 0x30300000,
+0x46575f43, 0x4f4d5049, 0x4c455f54, 0x494d453a,
+0x2031373a, 0x35353a34, 0x38000000, 0x46575f43,
+0x4f4d5049, 0x4c455f42, 0x593a2064, 0x65767263,
+0x73000000, 0x46575f43, 0x4f4d5049, 0x4c455f48,
+0x4f53543a, 0x20636f6d, 0x70757465, 0x0,
+0x46575f43, 0x4f4d5049, 0x4c455f44, 0x4f4d4149,
+0x4e3a2065, 0x6e672e61, 0x6374656f, 0x6e2e636f,
+0x6d000000, 0x46575f43, 0x4f4d5049, 0x4c45523a,
+0x20676363, 0x20766572, 0x73696f6e, 0x20322e37,
+0x2e320000, 0x0, 0x0, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f6d656d, 0x2e632c76, 0x20312e31, 0x2e322e32,
+0x20313939, 0x382f3034, 0x2f323720, 0x32323a31,
+0x333a3434, 0x20736875, 0x616e6720, 0x45787020,
+0x24000000, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f73656e, 0x642e632c, 0x7620312e, 0x312e322e,
+0x31312031, 0x3939382f, 0x31322f32, 0x32203137,
+0x3a31373a, 0x35352073, 0x6875616e, 0x67204578,
+0x70202400, 0x736e6464, 0x654e6f51, 0x20000000,
+0x6e6f454e, 0x515f5458, 0x0, 0x736e6464,
+0x744e6f51, 0x20000000, 0x3f6e6f51, 0x64547845,
+0x0, 0x756e6b72, 0x64747970, 0x65000000,
+0x0, 0xaccc, 0xaccc, 0xad9c,
+0xaab0, 0xaab0, 0xad9c, 0xad9c,
+0xad9c, 0xad9c, 0xad9c, 0xad9c,
+0xad9c, 0xad9c, 0xad9c, 0xad9c,
+0xad9c, 0xad9c, 0xad9c, 0xad7c,
+0x0, 0xbca8, 0xbca8, 0xbd70,
+0xae4c, 0xb058, 0xbd70, 0xbd70,
+0xbd70, 0xbd70, 0xbd70, 0xbd70,
+0xbd70, 0xbd70, 0xbd70, 0xbd70,
+0xbd70, 0xbd70, 0xbd70, 0xbd54,
+0xb040, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f726563, 0x762e632c, 0x7620312e, 0x312e322e,
+0x31392031, 0x3939382f, 0x30372f32, 0x34203231,
+0x3a33303a, 0x30352073, 0x6875616e, 0x67204578,
+0x70202400, 0x706b5278, 0x45525200, 0x66726d32,
+0x4c617267, 0x65000000, 0x72784e6f, 0x52784264,
+0x0, 0x72785144, 0x6d614446, 0x0,
+0x72785144, 0x6d614246, 0x0, 0x3f6e6f51,
+0x64527845, 0x0, 0x706b5278, 0x45525273,
+0x0, 0x66726d32, 0x4c726753, 0x0,
+0x72784e6f, 0x42645300, 0x3f724264, 0x446d6146,
+0x0, 0x3f724a42, 0x64446d46, 0x0,
+0x0, 0xf678, 0xf678, 0xf678,
+0xf678, 0xf678, 0xf678, 0xf678,
+0xf678, 0xf678, 0xf678, 0xf678,
+0xf678, 0xf678, 0xf678, 0xf678,
+0xf670, 0xf670, 0xf670, 0x572d444d,
+0x41456e46, 0x0, 0x0, 0xfdc0,
+0x1015c, 0xfddc, 0x1015c, 0x1015c,
+0x1015c, 0x1015c, 0x1015c, 0x1015c,
+0xf704, 0x1015c, 0x1015c, 0x1015c,
+0x1015c, 0x1015c, 0x10154, 0x10154,
+0x10154, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f6d6163, 0x2e632c76, 0x20312e31, 0x2e322e31,
+0x32203139, 0x39382f30, 0x342f3237, 0x2032323a,
+0x31333a34, 0x32207368, 0x75616e67, 0x20457870,
+0x20240000, 0x6d616374, 0x7841544e, 0x0,
+0x4e745379, 0x6e264c6b, 0x0, 0x72656d61,
+0x73737274, 0x0, 0x6c696e6b, 0x444f574e,
+0x0, 0x656e714d, 0x45765046, 0x61696c00,
+0x656e714d, 0x45764661, 0x696c0000, 0x6661696c,
+0x456e454d, 0x0, 0x6c696e6b, 0x55500000,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
+0x2f636b73, 0x756d2e63, 0x2c762031, 0x2e312e32,
+0x2e322031, 0x3939382f, 0x30342f32, 0x37203232,
+0x3a31333a, 0x33392073, 0x6875616e, 0x67204578,
+0x70202400, 0x50726f62, 0x65506879, 0x0,
+0x6c6e6b41, 0x53535254, 0x0, 0x11b2c,
+0x11bc4, 0x11bf8, 0x11c2c, 0x11c58,
+0x11c6c, 0x11ca8, 0x1207c, 0x11de4,
+0x11e24, 0x11e50, 0x11e90, 0x11ec0,
+0x11efc, 0x11f30, 0x1207c, 0x122c0,
+0x122d8, 0x12300, 0x12320, 0x12348,
+0x12478, 0x124a0, 0x124f4, 0x1251c,
+0x0, 0x1278c, 0x1285c, 0x12934,
+0x12a04, 0x12a60, 0x12b3c, 0x12b64,
+0x12c40, 0x12c68, 0x12e10, 0x12e38,
+0x12fe0, 0x131d8, 0x1346c, 0x13380,
+0x1346c, 0x13498, 0x13008, 0x131b0,
+0x0, 0x13b84, 0x13bc8, 0x13c60,
+0x13cac, 0x13d1c, 0x13db4, 0x13de8,
+0x13e70, 0x13f08, 0x13fd8, 0x14018,
+0x1409c, 0x140c0, 0x141f4, 0x646f4261,
+0x73655067, 0x0, 0x0, 0x0,
+0x0, 0x73746d61, 0x634c4e4b, 0x0,
+0x0, 0x14c38, 0x14c38, 0x14b80,
+0x14bc4, 0x14c38, 0x14c38, 0x0,
+0x0, 0x0 };
+static u32 tigonFwData[(MAX_DATA_LEN/4) + 1] __initdata = {
+0x416c7465,
+0x6f6e2041, 0x63654e49, 0x43205600, 0x416c7465,
+0x6f6e2041, 0x63654e49, 0x43205600, 0x42424242,
+0x0, 0x0, 0x0, 0x135418,
+0x13e7fc, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x60cf00,
+0x60, 0xcf000000, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x3, 0x0,
+0x1, 0x0, 0x0, 0x0,
+0x1, 0x0, 0x1, 0x0,
+0x0, 0x0, 0x0, 0x1,
+0x1, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x1000000, 0x21000000,
+0x12000140, 0x0, 0x0, 0x20000000,
+0x120000a0, 0x0, 0x12000060, 0x12000180,
+0x120001e0, 0x0, 0x0, 0x0,
+0x1, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x2,
+0x0, 0x0, 0x30001, 0x1,
+0x30201, 0x0, 0x0, 0x0 };
+#endif
+/* Generated by genfw.c */
+#define tigon2FwReleaseMajor 0xc
+#define tigon2FwReleaseMinor 0x4
+#define tigon2FwReleaseFix 0xb
+#define tigon2FwStartAddr 0x00004000
+#define tigon2FwTextAddr 0x00004000
+#define tigon2FwTextLen 0x11bc0
+#define tigon2FwRodataAddr 0x00015bc0
+#define tigon2FwRodataLen 0x10d0
+#define tigon2FwDataAddr 0x00016cc0
+#define tigon2FwDataLen 0x1c0
+#define tigon2FwSbssAddr 0x00016e80
+#define tigon2FwSbssLen 0xcc
+#define tigon2FwBssAddr 0x00016f50
+#define tigon2FwBssLen 0x20c0
+static u32 tigon2FwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
+0x0,
+0x10000003, 0x0, 0xd, 0xd,
+0x3c1d0001, 0x8fbd6d20, 0x3a0f021, 0x3c100000,
+0x26104000, 0xc0010c0, 0x0, 0xd,
+0x3c1d0001, 0x8fbd6d24, 0x3a0f021, 0x3c100000,
+0x26104000, 0xc0017e0, 0x0, 0xd,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x2000008,
+0x0, 0x800172f, 0x3c0a0001, 0x800172f,
+0x3c0a0002, 0x800172f, 0x0, 0x8002cac,
+0x0, 0x8002c4f, 0x0, 0x800172f,
+0x3c0a0004, 0x800328a, 0x0, 0x8001a52,
+0x0, 0x800394d, 0x0, 0x80038f4,
+0x0, 0x800172f, 0x3c0a0006, 0x80039bb,
+0x3c0a0007, 0x800172f, 0x3c0a0008, 0x800172f,
+0x3c0a0009, 0x8003a13, 0x0, 0x8002ea6,
+0x0, 0x800172f, 0x3c0a000b, 0x800172f,
+0x3c0a000c, 0x800172f, 0x3c0a000d, 0x80028fb,
+0x0, 0x8002890, 0x0, 0x800172f,
+0x3c0a000e, 0x800208c, 0x0, 0x8001964,
+0x0, 0x8001a04, 0x0, 0x8003ca6,
+0x0, 0x8003c94, 0x0, 0x800172f,
+0x0, 0x800191a, 0x0, 0x800172f,
+0x0, 0x800172f, 0x3c0a0013, 0x800172f,
+0x3c0a0014, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x27bdffe0,
+0x3c1cc000, 0xafbf001c, 0xafb00018, 0x8f820140,
+0x24030003, 0xaf8300ec, 0x34420004, 0xc002b20,
+0xaf820140, 0x3c0100c0, 0xc001763, 0xac203ffc,
+0x401821, 0x3c020010, 0x3c010001, 0xac236e9c,
+0x10620011, 0x43102b, 0x14400002, 0x3c020020,
+0x3c020008, 0x1062000c, 0x24050100, 0x3c060001,
+0x8cc66e9c, 0x3c040001, 0x24845c74, 0x3821,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x3c020020,
+0x3c010001, 0xac226e9c, 0x24020008, 0x3c010001,
+0xac226eb4, 0x2402001f, 0x3c010001, 0xac226ec4,
+0x24020016, 0x3c010001, 0xac226e98, 0x3c05fffe,
+0x34a56f08, 0x3c020001, 0x8c426e9c, 0x3c030002,
+0x24639010, 0x3c040001, 0x8c846cc4, 0x431023,
+0x14800002, 0x458021, 0x2610fa38, 0x2402f000,
+0x2028024, 0xc001785, 0x2002021, 0x2022823,
+0x3c040020, 0x821823, 0x651823, 0x247bb000,
+0x3c03fffe, 0x3463bf08, 0x363b821, 0x3c0600bf,
+0x34c6f000, 0x3c070001, 0x8ce76cc0, 0x3c0300bf,
+0x3463e000, 0x852023, 0x3c010001, 0xac246ea8,
+0x822023, 0x3c010001, 0xac256e90, 0x52842,
+0x3c010001, 0xac226e84, 0x27620ffc, 0x3c010001,
+0xac226d20, 0x27621ffc, 0xdb3023, 0x7b1823,
+0x3c010001, 0xac246e88, 0x3c010001, 0xac256eac,
+0x3c010001, 0xac226d24, 0xaf860150, 0x10e00011,
+0xaf830250, 0x3c1d0001, 0x8fbd6ccc, 0x3a0f021,
+0xc001749, 0x0, 0x3c020001, 0x8c426cd0,
+0x3c030001, 0x8c636cd4, 0x2442fe00, 0x24630200,
+0x3c010001, 0xac226cd0, 0x3c010001, 0x10000004,
+0xac236cd4, 0x3c1d0001, 0x8fbd6d20, 0x3a0f021,
+0x3c020001, 0x8c426cc4, 0x1040000d, 0x26fafa38,
+0x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
+0x3c1a0001, 0x8f5a6cd4, 0x2442fa38, 0x246305c8,
+0x3c010001, 0xac226cd0, 0x3c010001, 0xac236cd4,
+0x3c020001, 0x8c426cc8, 0x14400003, 0x0,
+0x3c010001, 0xac206cd0, 0xc001151, 0x0,
+0x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
+0x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
+0x27bdff98, 0xafb00048, 0x3c100001, 0x8e1066b8,
+0xafb20050, 0x3c120000, 0x26524100, 0xafbf0060,
+0xafbe005c, 0xafb50058, 0xafb30054, 0xafb1004c,
+0xafa20034, 0xafa30030, 0xafa00010, 0xafa00014,
+0x8f860040, 0x3c040001, 0x24845c80, 0x24050200,
+0x3c010001, 0xac326e80, 0xc002b3b, 0x2003821,
+0x8f830040, 0x3c02f000, 0x621824, 0x3c026000,
+0x1062000b, 0xa3a0003f, 0x240e0001, 0x3c040001,
+0x24845c88, 0xa3ae003f, 0xafa00010, 0xafa00014,
+0x8f860040, 0x24050300, 0xc002b3b, 0x2003821,
+0x8f820240, 0x3c030001, 0x431025, 0xaf820240,
+0xaf800048, 0x8f820048, 0x14400005, 0x0,
+0xaf800048, 0x8f820048, 0x10400004, 0x0,
+0xaf800048, 0x10000003, 0x2e02021, 0xaf80004c,
+0x2e02021, 0x3c050001, 0xc002ba8, 0x34a540f8,
+0x3402021, 0xc002ba8, 0x240505c8, 0x3c020001,
+0x8c426ea8, 0x3c0d0001, 0x8dad6e88, 0x3c030001,
+0x8c636e84, 0x3c080001, 0x8d086e90, 0x3c090001,
+0x8d296eac, 0x3c0a0001, 0x8d4a6eb4, 0x3c0b0001,
+0x8d6b6ec4, 0x3c0c0001, 0x8d8c6e98, 0x3c040001,
+0x24845c94, 0x24050400, 0xaf42013c, 0x8f42013c,
+0x24060001, 0x24070001, 0xaf400000, 0xaf4d0138,
+0xaf430144, 0xaf480148, 0xaf49014c, 0xaf4a0150,
+0xaf4b0154, 0xaf4c0158, 0x2442ff80, 0xaf420140,
+0x24020001, 0xafa20010, 0xc002b3b, 0xafa00014,
+0x8f420138, 0xafa20010, 0x8f42013c, 0xafa20014,
+0x8f460144, 0x8f470148, 0x3c040001, 0x24845ca0,
+0xc002b3b, 0x24050500, 0xafb70010, 0xafba0014,
+0x8f46014c, 0x8f470150, 0x3c040001, 0x24845cac,
+0xc002b3b, 0x24050600, 0x3c020001, 0x8c426e9c,
+0x3603821, 0x3c060002, 0x24c69010, 0x2448ffff,
+0x1061824, 0xe81024, 0x43102b, 0x10400006,
+0x24050900, 0x3c040001, 0x24845cb8, 0xafa80010,
+0xc002b3b, 0xafa00014, 0x8f82000c, 0xafa20010,
+0x8f82003c, 0xafa20014, 0x8f860000, 0x8f870004,
+0x3c040001, 0x24845cc4, 0xc002b3b, 0x24051000,
+0x8c020220, 0x8c030224, 0x8c060218, 0x8c07021c,
+0x3c040001, 0x24845ccc, 0x24051100, 0xafa20010,
+0xc002b3b, 0xafa30014, 0xaf800054, 0xaf80011c,
+0x8c020218, 0x30420002, 0x10400009, 0x0,
+0x8c020220, 0x3c030002, 0x34630004, 0x431025,
+0xaf42000c, 0x8c02021c, 0x10000008, 0x34420004,
+0x8c020220, 0x3c030002, 0x34630006, 0x431025,
+0xaf42000c, 0x8c02021c, 0x34420006, 0xaf420014,
+0x8c020218, 0x30420010, 0x1040000a, 0x0,
+0x8c02021c, 0x34420004, 0xaf420010, 0x8c020220,
+0x3c03000a, 0x34630004, 0x431025, 0x10000009,
+0xaf420008, 0x8c020220, 0x3c03000a, 0x34630006,
+0x431025, 0xaf420008, 0x8c02021c, 0x34420006,
+0xaf420010, 0x24020001, 0xaf8200a0, 0xaf8200b0,
+0x8f830054, 0x8f820054, 0xaf8000d0, 0xaf8000c0,
+0x10000002, 0x24630064, 0x8f820054, 0x621023,
+0x2c420065, 0x1440fffc, 0x0, 0x8c040208,
+0x8c05020c, 0x26e20028, 0xaee20020, 0x24020490,
+0xaee20010, 0xaee40008, 0xaee5000c, 0x26e40008,
+0x8c820000, 0x8c830004, 0xaf820090, 0xaf830094,
+0x8c820018, 0xaf8200b4, 0x9482000a, 0xaf82009c,
+0x8f420014, 0xaf8200b0, 0x8f8200b0, 0x30420004,
+0x1440fffd, 0x0, 0x8f8200b0, 0x3c03ef00,
+0x431024, 0x10400021, 0x0, 0x8f8200b4,
+0xafa20010, 0x8f820090, 0x8f830094, 0x3c040001,
+0x24845cd4, 0xafa30014, 0x8f8600b0, 0x8f87009c,
+0x3c050001, 0xc002b3b, 0x34a5200d, 0x3c040001,
+0x24845ce0, 0x240203c0, 0xafa20010, 0xafa00014,
+0x8f860144, 0x3c070001, 0x24e75ce8, 0xc002b3b,
+0x3405dead, 0x8f82011c, 0x34420002, 0xaf82011c,
+0x8f820220, 0x34420004, 0xaf820220, 0x8f820140,
+0x3c030001, 0x431025, 0xaf820140, 0x96e20472,
+0x96e60452, 0x96e70462, 0xafa20010, 0x96e20482,
+0x3c040001, 0x24845d14, 0x24051200, 0xc002b3b,
+0xafa20014, 0x96f00452, 0x32020001, 0x10400002,
+0xb021, 0x24160001, 0x32020002, 0x54400001,
+0x36d60002, 0x32020008, 0x54400001, 0x36d60004,
+0x32020010, 0x54400001, 0x36d60008, 0x32020020,
+0x54400001, 0x36d60010, 0x32020040, 0x54400001,
+0x36d60020, 0x32020080, 0x54400001, 0x36d60040,
+0x96e60482, 0x30c20200, 0x54400001, 0x36d64000,
+0x96e30472, 0x30620200, 0x10400003, 0x30620100,
+0x10000003, 0x36d62000, 0x54400001, 0x36d61000,
+0x96f00462, 0x32c24000, 0x14400004, 0x3207009b,
+0x30c2009b, 0x14e20007, 0x240e0001, 0x32c22000,
+0x1440000d, 0x32020001, 0x3062009b, 0x10e20009,
+0x240e0001, 0x3c040001, 0x24845d20, 0x24051300,
+0x2003821, 0xa3ae003f, 0xafa30010, 0xc002b3b,
+0xafa00014, 0x32020001, 0x54400001, 0x36d60080,
+0x32020002, 0x54400001, 0x36d60100, 0x32020008,
+0x54400001, 0x36d60200, 0x32020010, 0x54400001,
+0x36d60400, 0x32020080, 0x54400001, 0x36d60800,
+0x8c020218, 0x30420200, 0x10400002, 0x3c020008,
+0x2c2b025, 0x8c020218, 0x30420800, 0x10400002,
+0x3c020080, 0x2c2b025, 0x8c020218, 0x30420400,
+0x10400002, 0x3c020100, 0x2c2b025, 0x8c020218,
+0x30420100, 0x10400002, 0x3c020200, 0x2c2b025,
+0x8c020218, 0x30420080, 0x10400002, 0x3c020400,
+0x2c2b025, 0x8c020218, 0x30422000, 0x10400002,
+0x3c020010, 0x2c2b025, 0x8c020218, 0x30424000,
+0x10400002, 0x3c020020, 0x2c2b025, 0x8c020218,
+0x30421000, 0x10400002, 0x3c020040, 0x2c2b025,
+0x8ee20498, 0x8ee3049c, 0xaf420160, 0xaf430164,
+0x8ee204a0, 0x8ee304a4, 0xaf420168, 0xaf43016c,
+0x8ee204a8, 0x8ee304ac, 0xaf420170, 0xaf430174,
+0x8ee20428, 0x8ee3042c, 0xaf420178, 0xaf43017c,
+0x8ee20448, 0x8ee3044c, 0xaf420180, 0xaf430184,
+0x8ee20458, 0x8ee3045c, 0xaf420188, 0xaf43018c,
+0x8ee20468, 0x8ee3046c, 0xaf420190, 0xaf430194,
+0x8ee20478, 0x8ee3047c, 0xaf420198, 0xaf43019c,
+0x8ee20488, 0x8ee3048c, 0xaf4201a0, 0xaf4301a4,
+0x8ee204b0, 0x8ee304b4, 0x24040080, 0xaf4201a8,
+0xaf4301ac, 0xc002ba8, 0x24050080, 0x8c02025c,
+0x27440224, 0xaf4201f0, 0x8c020260, 0x24050200,
+0x24060008, 0xc002bbf, 0xaf4201f8, 0x3c043b9a,
+0x3484ca00, 0x3821, 0x24020006, 0x24030002,
+0xaf4201f4, 0x240203e8, 0xaf430204, 0xaf430200,
+0xaf4401fc, 0xaf420294, 0x24020001, 0xaf430290,
+0xaf42029c, 0x3c030001, 0x671821, 0x90636cd8,
+0x3471021, 0x24e70001, 0xa043022c, 0x2ce2000f,
+0x1440fff8, 0x3471821, 0x24e70001, 0x3c080001,
+0x350840f8, 0x8f820040, 0x3c040001, 0x24845d2c,
+0x24051400, 0x21702, 0x24420030, 0xa062022c,
+0x3471021, 0xa040022c, 0x8c070218, 0x2c03021,
+0x240205c8, 0xafa20010, 0xc002b3b, 0xafa80014,
+0x3c040001, 0x24845d38, 0x3c050000, 0x24a55c80,
+0x24060010, 0x27b10030, 0x2203821, 0x27b30034,
+0xc0017a3, 0xafb30010, 0x3c030001, 0x8c636cc8,
+0x1060000a, 0x408021, 0x8fa30030, 0x2405ff00,
+0x8fa20034, 0x246400ff, 0x852024, 0x831823,
+0x431023, 0xafa20034, 0xafa40030, 0x3c040001,
+0x24845d44, 0x3c050000, 0x24a54100, 0x24060108,
+0x2203821, 0xc0017a3, 0xafb30010, 0x409021,
+0x32c20003, 0x3c010001, 0xac326e80, 0x10400045,
+0x2203821, 0x8f820050, 0x3c030010, 0x431024,
+0x10400016, 0x0, 0x8c020218, 0x30420040,
+0x1040000f, 0x24020001, 0x8f820050, 0x8c030218,
+0x240e0001, 0x3c040001, 0x24845d50, 0xa3ae003f,
+0xafa20010, 0xafa30014, 0x8f870040, 0x24051500,
+0xc002b3b, 0x2c03021, 0x10000004, 0x0,
+0x3c010001, 0x370821, 0xa02240f4, 0x3c040001,
+0x24845d5c, 0x3c050001, 0x24a55b40, 0x3c060001,
+0x24c65bac, 0xc53023, 0x8f420010, 0x27b30030,
+0x2603821, 0x27b10034, 0x34420a00, 0xaf420010,
+0xc0017a3, 0xafb10010, 0x3c040001, 0x24845d70,
+0x3c050001, 0x24a5b714, 0x3c060001, 0x24c6ba90,
+0xc53023, 0x2603821, 0xaf420108, 0xc0017a3,
+0xafb10010, 0x3c040001, 0x24845d8c, 0x3c050001,
+0x24a5be58, 0x3c060001, 0x24c6c900, 0xc53023,
+0x2603821, 0x3c010001, 0xac226ef4, 0xc0017a3,
+0xafb10010, 0x3c040001, 0x24845da4, 0x10000024,
+0x24051600, 0x3c040001, 0x24845dac, 0x3c050001,
+0x24a5a10c, 0x3c060001, 0x24c6a238, 0xc53023,
+0xc0017a3, 0xafb30010, 0x3c040001, 0x24845dbc,
+0x3c050001, 0x24a5b2b0, 0x3c060001, 0x24c6b70c,
+0xc53023, 0x2203821, 0xaf420108, 0xc0017a3,
+0xafb30010, 0x3c040001, 0x24845dd0, 0x3c050001,
+0x24a5ba98, 0x3c060001, 0x24c6be50, 0xc53023,
+0x2203821, 0x3c010001, 0xac226ef4, 0xc0017a3,
+0xafb30010, 0x3c040001, 0x24845de4, 0x24051650,
+0x2c03021, 0x3821, 0x3c010001, 0xac226ef8,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x32c20020,
+0x10400021, 0x27a70030, 0x3c040001, 0x24845df0,
+0x3c050001, 0x24a5b13c, 0x3c060001, 0x24c6b2a8,
+0xc53023, 0x24022000, 0xaf42001c, 0x27a20034,
+0xc0017a3, 0xafa20010, 0x21900, 0x31982,
+0x3c040800, 0x641825, 0xae430028, 0x24030010,
+0xaf43003c, 0x96e30450, 0xaf430040, 0x8f430040,
+0x3c040001, 0x24845e04, 0xafa00014, 0xafa30010,
+0x8f47001c, 0x24051660, 0x3c010001, 0xac226ef0,
+0x10000025, 0x32c60020, 0x8ee20448, 0x8ee3044c,
+0xaf43001c, 0x8f42001c, 0x2442e000, 0x2c422001,
+0x1440000a, 0x240e0001, 0x3c040001, 0x24845e10,
+0xa3ae003f, 0xafa00010, 0xafa00014, 0x8f46001c,
+0x24051700, 0xc002b3b, 0x3821, 0x3c020000,
+0x24425cbc, 0x21100, 0x21182, 0x3c030800,
+0x431025, 0xae420028, 0x24020008, 0xaf42003c,
+0x96e20450, 0xaf420040, 0x8f420040, 0x3c040001,
+0x24845e1c, 0xafa00014, 0xafa20010, 0x8f47001c,
+0x24051800, 0x32c60020, 0xc002b3b, 0x0,
+0x3c050fff, 0x3c030001, 0x8c636ef4, 0x34a5ffff,
+0x2403021, 0x3c020001, 0x8c426ef8, 0x3c040800,
+0x651824, 0x31882, 0x641825, 0x451024,
+0x21082, 0x441025, 0xacc20080, 0x32c20180,
+0x10400056, 0xacc30020, 0x8f82005c, 0x3c030080,
+0x431024, 0x1040000d, 0x0, 0x8f820050,
+0xafa20010, 0x8f82005c, 0x240e0001, 0x3c040001,
+0x24845e28, 0xa3ae003f, 0xafa20014, 0x8f870040,
+0x24051900, 0xc002b3b, 0x2c03021, 0x8f820050,
+0x3c030010, 0x431024, 0x10400016, 0x0,
+0x8c020218, 0x30420040, 0x1040000f, 0x24020001,
+0x8f820050, 0x8c030218, 0x240e0001, 0x3c040001,
+0x24845d50, 0xa3ae003f, 0xafa20010, 0xafa30014,
+0x8f870040, 0x24052000, 0xc002b3b, 0x2c03021,
+0x10000004, 0x0, 0x3c010001, 0x370821,
+0xa02240f4, 0x3c040001, 0x24845e34, 0x3c050001,
+0x24a55ac0, 0x3c060001, 0x24c65b38, 0xc53023,
+0x8f420008, 0x27b30030, 0x2603821, 0x27b10034,
+0x34420e00, 0xaf420008, 0xc0017a3, 0xafb10010,
+0x3c040001, 0x24845e4c, 0x3c050001, 0x24a5d8b4,
+0x3c060001, 0x24c6e3c8, 0xc53023, 0x2603821,
+0xaf42010c, 0xc0017a3, 0xafb10010, 0x3c040001,
+0x24845e64, 0x3c050001, 0x24a5e9ac, 0x3c060001,
+0x24c6f0f0, 0xc53023, 0x2603821, 0x3c010001,
+0xac226f04, 0xc0017a3, 0xafb10010, 0x3c040001,
+0x24845e7c, 0x10000027, 0x24052100, 0x3c040001,
+0x24845e84, 0x3c050001, 0x24a59fc8, 0x3c060001,
+0x24c6a104, 0xc53023, 0x27b10030, 0x2203821,
+0x27b30034, 0xc0017a3, 0xafb30010, 0x3c040001,
+0x24845e94, 0x3c050001, 0x24a5cad4, 0x3c060001,
+0x24c6d8ac, 0xc53023, 0x2203821, 0xaf42010c,
+0xc0017a3, 0xafb30010, 0x3c040001, 0x24845ea4,
+0x3c050001, 0x24a5e84c, 0x3c060001, 0x24c6e9a4,
+0xc53023, 0x2203821, 0x3c010001, 0xac226f04,
+0xc0017a3, 0xafb30010, 0x3c040001, 0x24845eb8,
+0x24052150, 0x2c03021, 0x3821, 0x3c010001,
+0xac226f10, 0xafa00010, 0xc002b3b, 0xafa00014,
+0x3c110fff, 0x3c030001, 0x8c636f04, 0x3631ffff,
+0x2409821, 0x3c020001, 0x8c426f10, 0x3c0e0800,
+0x711824, 0x31882, 0x6e1825, 0x511024,
+0x21082, 0x4e1025, 0xae630038, 0xae620078,
+0x8c020218, 0x30420040, 0x14400004, 0x24020001,
+0x3c010001, 0x370821, 0xa02240f4, 0x3c040001,
+0x24845ec4, 0x3c050001, 0x24a5e3d0, 0x3c060001,
+0x24c6e52c, 0xc53023, 0x27be0030, 0x3c03821,
+0x27b50034, 0xc0017a3, 0xafb50010, 0x3c010001,
+0xac226efc, 0x511024, 0x21082, 0x3c0e0800,
+0x4e1025, 0xae620050, 0x32c22000, 0x10400006,
+0x3c03821, 0x3c020000, 0x24425cbc, 0x2221024,
+0x1000000f, 0x21082, 0x3c040001, 0x24845ed8,
+0x3c050001, 0x24a5e534, 0x3c060001, 0x24c6e6e4,
+0xc53023, 0xc0017a3, 0xafb50010, 0x3c010001,
+0xac226f14, 0x511024, 0x21082, 0x3c0e0800,
+0x4e1025, 0xae620048, 0x32c24000, 0x10400005,
+0x27a70030, 0x3c020000, 0x24425cbc, 0x1000000e,
+0x21100, 0x3c040001, 0x24845ef0, 0x3c050001,
+0x24a5e6ec, 0x3c060001, 0x24c6e844, 0xc53023,
+0x27a20034, 0xc0017a3, 0xafa20010, 0x3c010001,
+0xac226f08, 0x21100, 0x21182, 0x3c030800,
+0x431025, 0xae420060, 0x3c040001, 0x24845f08,
+0x3c050001, 0x24a58230, 0x3c060001, 0x24c68650,
+0xc53023, 0x27b10030, 0x2203821, 0x27b30034,
+0xc0017a3, 0xafb30010, 0x3c0e0fff, 0x35ceffff,
+0x3c040001, 0x24845f14, 0x3c050000, 0x24a56468,
+0x3c060000, 0x24c66588, 0xc53023, 0x2203821,
+0x240f021, 0x3c010001, 0xac226edc, 0x4e1024,
+0x21082, 0x3c150800, 0x551025, 0xafae0044,
+0xafc200b8, 0xc0017a3, 0xafb30010, 0x3c040001,
+0x24845f20, 0x3c050000, 0x24a56590, 0x3c060000,
+0x24c66808, 0x8fae0044, 0xc53023, 0x2203821,
+0x3c010001, 0xac226ed0, 0x4e1024, 0x21082,
+0x551025, 0xafc200e8, 0xc0017a3, 0xafb30010,
+0x3c040001, 0x24845f38, 0x3c050000, 0x24a56810,
+0x3c060000, 0x24c66940, 0x8fae0044, 0xc53023,
+0x2203821, 0x3c010001, 0xac226ec8, 0x4e1024,
+0x21082, 0x551025, 0xafc200c0, 0xc0017a3,
+0xafb30010, 0x3c040001, 0x24845f50, 0x3c050001,
+0x24a5fad0, 0x3c060001, 0x24c6fba8, 0x8fae0044,
+0xc53023, 0x2203821, 0x3c010001, 0xac226ed4,
+0x4e1024, 0x21082, 0x551025, 0xafc200c8,
+0xc0017a3, 0xafb30010, 0x3c040001, 0x24845f5c,
+0x3c050001, 0x24a5c93c, 0x3c060001, 0x24c6ca20,
+0xc53023, 0x2203821, 0xaf420110, 0xc0017a3,
+0xafb30010, 0x3c040001, 0x24845f6c, 0x3c050001,
+0x24a5c910, 0x3c060001, 0x24c6c934, 0xc53023,
+0x2203821, 0xaf420124, 0xc0017a3, 0xafb30010,
+0x3c040001, 0x24845f7c, 0x3c050001, 0x24a55a80,
+0x3c060001, 0x24c65aac, 0xc53023, 0x2203821,
+0xaf420120, 0xaf420114, 0xc0017a3, 0xafb30010,
+0x3c040001, 0x24845f88, 0x3c050001, 0x24a5f298,
+0x3c060001, 0x24c6f6b4, 0xc53023, 0x2203821,
+0xaf420118, 0xc0017a3, 0xafb30010, 0x8fae0044,
+0x3c010001, 0xac226f18, 0x4e1024, 0x21082,
+0x551025, 0xc003fc3, 0xafc200d0, 0xc003c40,
+0x0, 0xc0027a8, 0x0, 0xac000228,
+0xac00022c, 0x96e20450, 0x2442ffff, 0xaf420038,
+0x96e20460, 0xaf420080, 0x32c24000, 0x14400003,
+0x0, 0x96e20480, 0xaf420084, 0x96e70490,
+0x50e00001, 0x24070800, 0x24e2ffff, 0xaf420088,
+0xaf42007c, 0x24020800, 0x10e2000f, 0x32c24000,
+0x10400003, 0x24020400, 0x10e2000b, 0x0,
+0x240e0001, 0x3c040001, 0x24845f98, 0xa3ae003f,
+0x96e60490, 0x24052170, 0x2c03821, 0xafa00010,
+0xc002b3b, 0xafa00014, 0x8f430138, 0x8f440138,
+0x24020001, 0xa34205c2, 0xaf430094, 0xaf440098,
+0xafa00010, 0xafa00014, 0x8f460080, 0x8f470084,
+0x3c040001, 0x24845fa4, 0xc002b3b, 0x24052200,
+0xc0024a4, 0x3c110800, 0x3c1433d8, 0x3694cb58,
+0x3c020800, 0x34420080, 0x3c040001, 0x24845fb0,
+0x3c050000, 0x24a55d00, 0x3c060000, 0x24c65d1c,
+0xc53023, 0x27a70030, 0xaf820060, 0x2402ffff,
+0xaf820064, 0x27a20034, 0xc0017a3, 0xafa20010,
+0x3c010001, 0xac226eb8, 0x21100, 0x21182,
+0x511025, 0xc0018fc, 0xae420000, 0x8f820240,
+0x3c030001, 0x431025, 0xaf820240, 0x3c020000,
+0x24424034, 0xaf820244, 0xaf800240, 0x8f820060,
+0x511024, 0x14400005, 0x3c030800, 0x8f820060,
+0x431024, 0x1040fffd, 0x0, 0xc003c4d,
+0x8821, 0x3c020100, 0xafa20020, 0x8f530018,
+0x240200ff, 0x56620001, 0x26710001, 0x8c020228,
+0x1622000e, 0x1330c0, 0x8f42033c, 0x24420001,
+0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
+0x24845c24, 0x3c050009, 0xafa00014, 0xafa20010,
+0x8fa60020, 0x1000003f, 0x34a50100, 0xd71021,
+0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
+0xc01821, 0x8f440178, 0x8f45017c, 0x1021,
+0x24070004, 0xafa70010, 0xafb10014, 0x8f48000c,
+0x24c604c0, 0x2e63021, 0xafa80018, 0x8f48010c,
+0x24070008, 0xa32821, 0xa3482b, 0x822021,
+0x100f809, 0x892021, 0x1440000b, 0x24070008,
+0x8f820120, 0xafa20010, 0x8f820124, 0x3c040001,
+0x24845c2c, 0x3c050009, 0xafa20014, 0x8fa60020,
+0x1000001c, 0x34a50200, 0x8f440160, 0x8f450164,
+0x8f43000c, 0xaf510018, 0x8f860120, 0x24020010,
+0xafa20010, 0xafb10014, 0xafa30018, 0x8f42010c,
+0x40f809, 0x24c6001c, 0x14400010, 0x0,
+0x8f420340, 0x24420001, 0xaf420340, 0x8f420340,
+0x8f820120, 0xafa20010, 0x8f820124, 0x3c040001,
+0x24845c34, 0x3c050009, 0xafa20014, 0x8fa60020,
+0x34a50300, 0xc002b3b, 0x2603821, 0x8f4202e4,
+0x24420001, 0xaf4202e4, 0x8f4202e4, 0x93a2003f,
+0x10400069, 0x3c020700, 0x34423000, 0xafa20028,
+0x8f530018, 0x240200ff, 0x12620002, 0x8821,
+0x26710001, 0x8c020228, 0x1622000e, 0x1330c0,
+0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
+0x8c020228, 0x3c040001, 0x24845c24, 0x3c050009,
+0xafa00014, 0xafa20010, 0x8fa60028, 0x1000003f,
+0x34a50100, 0xd71021, 0x8fa30028, 0x8fa4002c,
+0xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
+0x8f45017c, 0x1021, 0x24070004, 0xafa70010,
+0xafb10014, 0x8f48000c, 0x24c604c0, 0x2e63021,
+0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
+0xa3482b, 0x822021, 0x100f809, 0x892021,
+0x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
+0x8f820124, 0x3c040001, 0x24845c2c, 0x3c050009,
+0xafa20014, 0x8fa60028, 0x1000001c, 0x34a50200,
+0x8f440160, 0x8f450164, 0x8f43000c, 0xaf510018,
+0x8f860120, 0x24020010, 0xafa20010, 0xafb10014,
+0xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
+0x14400010, 0x0, 0x8f420340, 0x24420001,
+0xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
+0x8f820124, 0x3c040001, 0x24845c34, 0x3c050009,
+0xafa20014, 0x8fa60028, 0x34a50300, 0xc002b3b,
+0x2603821, 0x8f4202f0, 0x24420001, 0xaf4202f0,
+0x8f4202f0, 0x3c040001, 0x24845fc0, 0xafa00010,
+0xafa00014, 0x8fa60028, 0x24052300, 0xc002b3b,
+0x3821, 0x10000004, 0x0, 0x8c020264,
+0x10400005, 0x0, 0x8f8200a0, 0x30420004,
+0x1440fffa, 0x0, 0x8f820044, 0x34420004,
+0xaf820044, 0x8f420308, 0x24420001, 0xaf420308,
+0x8f420308, 0x8f8200d8, 0x8f8300d4, 0x431023,
+0x2442ff80, 0xaf420090, 0x8f420090, 0x2842ff81,
+0x10400006, 0x24020001, 0x8f420090, 0x8f430144,
+0x431021, 0xaf420090, 0x24020001, 0xaf42008c,
+0x32c20008, 0x10400006, 0x0, 0x8f820214,
+0x3c038100, 0x3042ffff, 0x431025, 0xaf820214,
+0x3c030001, 0x8c636d94, 0x30620002, 0x10400009,
+0x30620001, 0x3c040001, 0x24845fcc, 0x3c050000,
+0x24a56d50, 0x3c060000, 0x24c671c8, 0x10000012,
+0xc53023, 0x10400009, 0x0, 0x3c040001,
+0x24845fdc, 0x3c050000, 0x24a571d0, 0x3c060000,
+0x24c67678, 0x10000008, 0xc53023, 0x3c040001,
+0x24845fec, 0x3c050000, 0x24a56948, 0x3c060000,
+0x24c66d48, 0xc53023, 0x27a70030, 0x27a20034,
+0xc0017a3, 0xafa20010, 0x3c010001, 0xac226ecc,
+0x3c020001, 0x8c426ecc, 0x3c030800, 0x21100,
+0x21182, 0x431025, 0xae420040, 0x8f8200a0,
+0xafa20010, 0x8f8200b0, 0xafa20014, 0x8f86005c,
+0x8f87011c, 0x3c040001, 0x24845ffc, 0x3c010001,
+0xac366ea4, 0x3c010001, 0xac206e94, 0x3c010001,
+0xac3c6e8c, 0x3c010001, 0xac3b6ebc, 0x3c010001,
+0xac376ec0, 0x3c010001, 0xac3a6ea0, 0xc002b3b,
+0x24052400, 0x8f820200, 0xafa20010, 0x8f820220,
+0xafa20014, 0x8f860044, 0x8f870050, 0x3c040001,
+0x24846008, 0xc002b3b, 0x24052500, 0x8f830060,
+0x74100b, 0x242000a, 0x200f821, 0x0,
+0xd, 0x8fbf0060, 0x8fbe005c, 0x8fb50058,
+0x8fb30054, 0x8fb20050, 0x8fb1004c, 0x8fb00048,
+0x3e00008, 0x27bd0068, 0x27bdffe0, 0x3c040001,
+0x24846014, 0x24052600, 0x3021, 0x3821,
+0xafbf0018, 0xafa00010, 0xc002b3b, 0xafa00014,
+0x8fbf0018, 0x3e00008, 0x27bd0020, 0x3e00008,
+0x0, 0x3e00008, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x3e00008, 0x0, 0x3e00008, 0x0,
+0x27bdfde0, 0x27a50018, 0x3c04dead, 0x3484beef,
+0xafbf0218, 0x8f820150, 0x3c03001f, 0x3463ffff,
+0xafa40018, 0xa22823, 0xa32824, 0x8ca20000,
+0x1044000a, 0x0, 0xafa50010, 0x8ca20000,
+0xafa20014, 0x8f860150, 0x8f870250, 0x3c040001,
+0x2484601c, 0xc002b3b, 0x24052700, 0x8fbf0218,
+0x3e00008, 0x27bd0220, 0x27bdffe0, 0x3c06abba,
+0x34c6babe, 0xafb00018, 0x3c100004, 0x3c07007f,
+0x34e7ffff, 0xafbf001c, 0x102840, 0x8e040000,
+0x8ca30000, 0xaca00000, 0xae060000, 0x8ca20000,
+0xaca30000, 0x10460005, 0xae040000, 0xa08021,
+0xf0102b, 0x1040fff5, 0x102840, 0x3c040001,
+0x24846028, 0x24052800, 0x2003021, 0x3821,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x2001021,
+0x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
+0x8c020224, 0x3047003f, 0x10e00010, 0x803021,
+0x2821, 0x24030020, 0xe31024, 0x10400002,
+0x63042, 0xa62821, 0x31842, 0x1460fffb,
+0xe31024, 0x2402f000, 0xa22824, 0x3402ffff,
+0x45102b, 0x14400003, 0x3c020001, 0x10000008,
+0x3c020001, 0x3442ffff, 0x851823, 0x43102b,
+0x14400003, 0xa01021, 0x3c02fffe, 0x821021,
+0x3e00008, 0x0, 0x27bdffd0, 0xafb50028,
+0x8fb50040, 0xafb20020, 0xa09021, 0xafb1001c,
+0x24c60003, 0xafbf002c, 0xafb30024, 0xafb00018,
+0x8ea20000, 0x2403fffc, 0xc38024, 0x50102b,
+0x1440001b, 0xe08821, 0x8e330000, 0xafb00010,
+0x8ea20000, 0xafa20014, 0x8e270000, 0x24053000,
+0xc002b3b, 0x2403021, 0x8e230000, 0x702021,
+0x64102b, 0x10400007, 0x2402821, 0x8ca20000,
+0xac620000, 0x24630004, 0x64102b, 0x1440fffb,
+0x24a50004, 0x8ea20000, 0x501023, 0xaea20000,
+0x8e220000, 0x501021, 0x1000000b, 0xae220000,
+0x2402002d, 0xa0820000, 0xafb00010, 0x8ea20000,
+0x2409821, 0xafa20014, 0x8e270000, 0x24053100,
+0xc002b3b, 0x2603021, 0x2601021, 0x8fbf002c,
+0x8fb50028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
+0x8fb00018, 0x3e00008, 0x27bd0030, 0x27bdffe8,
+0x3c1cc000, 0x3c05fffe, 0x3c030001, 0x8c636e84,
+0x3c040001, 0x8c846e90, 0x34a5bf08, 0x24021ffc,
+0x3c010001, 0xac226cd0, 0x3c0200c0, 0x3c010001,
+0xac226cd4, 0x3c020020, 0xafbf0010, 0x3c0100c0,
+0xac201ffc, 0x431023, 0x441023, 0x245bb000,
+0x365b821, 0x3c1d0001, 0x8fbd6ccc, 0x3a0f021,
+0x3c0400c0, 0x34840200, 0x3c1a00c0, 0x3c0300c0,
+0x346307c8, 0x24021dfc, 0x3c010001, 0xac226cd0,
+0x24021834, 0x3c010001, 0xac246cd4, 0x3c010001,
+0xac226cd0, 0x3c010001, 0xac236cd4, 0xc00180d,
+0x375a0200, 0x8fbf0010, 0x3e00008, 0x27bd0018,
+0x27bdffc8, 0x3c040001, 0x24846034, 0x24053200,
+0x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
+0x3021, 0x3603821, 0xafbf0030, 0xafb3002c,
+0xafb20028, 0xafb10024, 0xafb00020, 0xafa2001c,
+0xafa30018, 0xafb70010, 0xc002b3b, 0xafba0014,
+0xc001916, 0x0, 0x8f820240, 0x34420004,
+0xaf820240, 0x24020001, 0xaf420000, 0x3c020001,
+0x571021, 0x904240f4, 0x10400092, 0x2403fffc,
+0x3c100001, 0x2610ac73, 0x3c120001, 0x2652a84c,
+0x2121023, 0x438024, 0x8fa3001c, 0x3c040001,
+0x24846040, 0x70102b, 0x1440001a, 0x27b30018,
+0x8fb10018, 0x24053000, 0x2403021, 0xafb00010,
+0xafa30014, 0xc002b3b, 0x2203821, 0x8fa30018,
+0x702021, 0x64102b, 0x10400007, 0x2403021,
+0x8cc20000, 0xac620000, 0x24630004, 0x64102b,
+0x1440fffb, 0x24c60004, 0x8fa2001c, 0x501023,
+0xafa2001c, 0x8e620000, 0x501021, 0x1000000a,
+0xae620000, 0x2408821, 0x24053100, 0xafb00010,
+0xafa30014, 0x8fa70018, 0x2203021, 0x2402002d,
+0xc002b3b, 0xa0820000, 0x24070020, 0x8fa3001c,
+0x3c040001, 0x2484605c, 0x24120020, 0x3c010001,
+0xac316eb0, 0x2c620020, 0x1440001d, 0x27b10018,
+0x8fb00018, 0x24053000, 0x3c060001, 0x24c66f50,
+0xafa70010, 0xafa30014, 0xc002b3b, 0x2003821,
+0x8fa30018, 0x3c040001, 0x24846f50, 0x24650020,
+0x65102b, 0x10400007, 0x0, 0x8c820000,
+0xac620000, 0x24630004, 0x65102b, 0x1440fffb,
+0x24840004, 0x8fa2001c, 0x521023, 0xafa2001c,
+0x8e220000, 0x521021, 0x1000000b, 0xae220000,
+0x3c100001, 0x26106f50, 0x24053100, 0xafa70010,
+0xafa30014, 0x8fa70018, 0x2003021, 0x2402002d,
+0xc002b3b, 0xa0820000, 0x24070020, 0x3c040001,
+0x24846070, 0x8fa3001c, 0x24120020, 0x3c010001,
+0xac306ee4, 0x2c620020, 0x1440001d, 0x27b10018,
+0x8fb00018, 0x24053000, 0x3c060001, 0x24c66f70,
+0xafa70010, 0xafa30014, 0xc002b3b, 0x2003821,
+0x8fa30018, 0x3c040001, 0x24846f70, 0x24650020,
+0x65102b, 0x10400007, 0x0, 0x8c820000,
+0xac620000, 0x24630004, 0x65102b, 0x1440fffb,
+0x24840004, 0x8fa2001c, 0x521023, 0xafa2001c,
+0x8e220000, 0x521021, 0x1000000b, 0xae220000,
+0x3c100001, 0x26106f70, 0x24053100, 0xafa70010,
+0xafa30014, 0x8fa70018, 0x2003021, 0x2402002d,
+0xc002b3b, 0xa0820000, 0x3c010001, 0x10000031,
+0xac306ee0, 0x3c100001, 0x2610821f, 0x3c120001,
+0x2652809c, 0x2121023, 0x438024, 0x8fa3001c,
+0x3c040001, 0x24846084, 0x70102b, 0x1440001a,
+0x27b30018, 0x8fb10018, 0x24053000, 0x2403021,
+0xafb00010, 0xafa30014, 0xc002b3b, 0x2203821,
+0x8fa30018, 0x702021, 0x64102b, 0x10400007,
+0x2403021, 0x8cc20000, 0xac620000, 0x24630004,
+0x64102b, 0x1440fffb, 0x24c60004, 0x8fa2001c,
+0x501023, 0xafa2001c, 0x8e620000, 0x501021,
+0x1000000a, 0xae620000, 0x2408821, 0x24053100,
+0xafb00010, 0xafa30014, 0x8fa70018, 0x2203021,
+0x2402002d, 0xc002b3b, 0xa0820000, 0x3c010001,
+0xac316eb0, 0x3c030001, 0x8c636eb0, 0x24020400,
+0x60f809, 0xaf820070, 0x8fbf0030, 0x8fb3002c,
+0x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
+0x27bd0038, 0x0, 0x0, 0x8f820040,
+0x3c03f000, 0x431024, 0x3c036000, 0x14430006,
+0x0, 0x8f820050, 0x2403ff80, 0x431024,
+0x34420055, 0xaf820050, 0x8f820054, 0x244203e8,
+0xaf820058, 0x240201f4, 0xaf4200e0, 0x24020004,
+0xaf4200e8, 0x24020002, 0xaf4001b0, 0xaf4000e4,
+0xaf4200dc, 0xaf4000d8, 0xaf4000d4, 0x3e00008,
+0xaf4000d0, 0x8f820054, 0x24420005, 0x3e00008,
+0xaf820078, 0x27bdffe8, 0xafbf0010, 0x8f820054,
+0x244203e8, 0xaf820058, 0x3c020800, 0x2c21024,
+0x10400004, 0x3c02f7ff, 0x3442ffff, 0x2c2b024,
+0x36940040, 0x3c020001, 0x8c426da8, 0x10400017,
+0x3c020200, 0x3c030001, 0x8c636f1c, 0x10600016,
+0x282a025, 0x3c020001, 0x8c426e44, 0x14400012,
+0x3c020200, 0x3c020001, 0x8c426d94, 0x30420003,
+0x1440000d, 0x3c020200, 0x8f830224, 0x3c020002,
+0x8c428fec, 0x10620008, 0x3c020200, 0xc003daf,
+0x0, 0x10000004, 0x3c020200, 0xc004196,
+0x0, 0x3c020200, 0x2c21024, 0x10400003,
+0x0, 0xc001f4b, 0x0, 0x8f4200d8,
+0x8f4300dc, 0x24420001, 0xaf4200d8, 0x43102b,
+0x14400003, 0x0, 0xaf4000d8, 0x36940080,
+0x8c030238, 0x1060000c, 0x0, 0x8f4201b0,
+0x244203e8, 0xaf4201b0, 0x43102b, 0x14400006,
+0x0, 0x934205c5, 0x14400003, 0x0,
+0xc001da0, 0x0, 0x8fbf0010, 0x3e00008,
+0x27bd0018, 0x3e00008, 0x0, 0x27bdffd8,
+0xafbf0020, 0x8f43002c, 0x8f420038, 0x10620059,
+0x0, 0x3c020001, 0x571021, 0x904240f0,
+0x10400026, 0x24070008, 0x8f440170, 0x8f450174,
+0x8f48000c, 0x8f860120, 0x24020020, 0xafa20010,
+0xafa30014, 0xafa80018, 0x8f42010c, 0x40f809,
+0x24c6001c, 0x14400011, 0x24020001, 0x3c010001,
+0x370821, 0xa02240f0, 0x8f820124, 0xafa20010,
+0x8f820128, 0x3c040001, 0x24846128, 0xafa20014,
+0x8f46002c, 0x8f870120, 0x3c050009, 0xc002b3b,
+0x34a50900, 0x1000005c, 0x0, 0x8f420300,
+0x24420001, 0xaf420300, 0x8f420300, 0x8f42002c,
+0xa34005c1, 0x10000027, 0xaf420038, 0x8f440170,
+0x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
+0x24020080, 0xafa20010, 0xafa30014, 0xafa80018,
+0x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
+0x24020001, 0x3c010001, 0x370821, 0xa02240f1,
+0x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
+0x24846134, 0xafa20014, 0x8f46002c, 0x8f870120,
+0x3c050009, 0xc002b3b, 0x34a51100, 0x10000036,
+0x0, 0x8f420300, 0x8f43002c, 0x24420001,
+0xaf420300, 0x8f420300, 0x24020001, 0xa34205c1,
+0xaf430038, 0x3c010001, 0x370821, 0xa02040f1,
+0x3c010001, 0x370821, 0xa02040f0, 0x10000026,
+0xaf400034, 0x934205c1, 0x1040001d, 0x0,
+0xa34005c1, 0x8f820040, 0x30420001, 0x14400008,
+0x2021, 0x8c030104, 0x24020001, 0x50620005,
+0x24040001, 0x8c020264, 0x10400003, 0x801021,
+0x24040001, 0x801021, 0x10400006, 0x0,
+0x8f42030c, 0x24420001, 0xaf42030c, 0x10000008,
+0x8f42030c, 0x8f820044, 0x34420004, 0xaf820044,
+0x8f420308, 0x24420001, 0xaf420308, 0x8f420308,
+0x3c010001, 0x370821, 0xa02040f0, 0x3c010001,
+0x370821, 0xa02040f1, 0x8f420000, 0x10400007,
+0x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
+0x0, 0x10000005, 0x0, 0xaf800048,
+0x8f820048, 0x1040fffd, 0x0, 0x8f820060,
+0x3c03ff7f, 0x3463ffff, 0x431024, 0xaf820060,
+0x8f420000, 0x10400003, 0x0, 0x10000002,
+0xaf80004c, 0xaf800048, 0x8fbf0020, 0x3e00008,
+0x27bd0028, 0x3e00008, 0x0, 0x27bdffd8,
+0xafbf0020, 0x8f430044, 0x8f42007c, 0x10620029,
+0x24070008, 0x8f440168, 0x8f45016c, 0x8f48000c,
+0x8f860120, 0x24020040, 0xafa20010, 0xafa30014,
+0xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
+0x14400011, 0x24020001, 0x3c010001, 0x370821,
+0xa02240f2, 0x8f820124, 0xafa20010, 0x8f820128,
+0x3c040001, 0x2484613c, 0xafa20014, 0x8f460044,
+0x8f870120, 0x3c050009, 0xc002b3b, 0x34a51300,
+0x1000000f, 0x0, 0x8f420304, 0x24420001,
+0xaf420304, 0x8f420304, 0x8f420044, 0xaf42007c,
+0x3c010001, 0x370821, 0xa02040f2, 0x10000004,
+0xaf400078, 0x3c010001, 0x370821, 0xa02040f2,
+0x8f420000, 0x10400007, 0x0, 0xaf80004c,
+0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
+0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
+0x0, 0x8f820060, 0x3c03feff, 0x3463ffff,
+0x431024, 0xaf820060, 0x8f420000, 0x10400003,
+0x0, 0x10000002, 0xaf80004c, 0xaf800048,
+0x8fbf0020, 0x3e00008, 0x27bd0028, 0x3e00008,
+0x0, 0x3c020001, 0x8c426da8, 0x27bdffa8,
+0xafbf0050, 0xafbe004c, 0xafb50048, 0xafb30044,
+0xafb20040, 0xafb1003c, 0xafb00038, 0x104000d5,
+0x8f900044, 0x8f4200d0, 0x24430001, 0x2842000b,
+0x144000e4, 0xaf4300d0, 0x8f420004, 0x30420002,
+0x1440009c, 0xaf4000d0, 0x8f420004, 0x3c030001,
+0x8c636d98, 0x34420002, 0xaf420004, 0x24020001,
+0x14620003, 0x3c020600, 0x10000002, 0x34423000,
+0x34421000, 0xafa20020, 0x8f4a0018, 0xafaa0034,
+0x27aa0020, 0xafaa002c, 0x8faa0034, 0x240200ff,
+0x11420002, 0x1821, 0x25430001, 0x8c020228,
+0x609821, 0x1662000e, 0x3c050009, 0x8f42033c,
+0x24420001, 0xaf42033c, 0x8f42033c, 0x8c020228,
+0x8fa70034, 0x3c040001, 0x2484610c, 0xafa00014,
+0xafa20010, 0x8fa60020, 0x10000070, 0x34a50500,
+0x8faa0034, 0xa38c0, 0xf71021, 0x8fa30020,
+0x8fa40024, 0xac4304c0, 0xac4404c4, 0x8f830054,
+0x8f820054, 0x247103e8, 0x2221023, 0x2c4203e9,
+0x1040001b, 0xa821, 0xe09021, 0x265e04c0,
+0x8f440178, 0x8f45017c, 0x2401821, 0x240a0004,
+0xafaa0010, 0xafb30014, 0x8f48000c, 0x1021,
+0x2fe3021, 0xafa80018, 0x8f48010c, 0x24070008,
+0xa32821, 0xa3482b, 0x822021, 0x100f809,
+0x892021, 0x54400006, 0x24150001, 0x8f820054,
+0x2221023, 0x2c4203e9, 0x1440ffe9, 0x0,
+0x32a200ff, 0x54400018, 0xaf530018, 0x8f420378,
+0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
+0x8faa002c, 0x8fa70034, 0xafa20010, 0x8f820124,
+0x3c040001, 0x24846118, 0xafa20014, 0x8d460000,
+0x3c050009, 0x10000035, 0x34a50600, 0x8f420308,
+0x24150001, 0x24420001, 0xaf420308, 0x8f420308,
+0x1000001e, 0x32a200ff, 0x8f830054, 0x8f820054,
+0x247103e8, 0x2221023, 0x2c4203e9, 0x10400016,
+0xa821, 0x3c1e0020, 0x24120010, 0x8f42000c,
+0x8f440160, 0x8f450164, 0x8f860120, 0xafb20010,
+0xafb30014, 0x5e1025, 0xafa20018, 0x8f42010c,
+0x24070008, 0x40f809, 0x24c6001c, 0x1440ffe3,
+0x0, 0x8f820054, 0x2221023, 0x2c4203e9,
+0x1440ffee, 0x0, 0x32a200ff, 0x14400011,
+0x3c050009, 0x8f420378, 0x24420001, 0xaf420378,
+0x8f420378, 0x8f820120, 0x8faa002c, 0x8fa70034,
+0xafa20010, 0x8f820124, 0x3c040001, 0x24846120,
+0xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
+0x0, 0x8f4202ec, 0x24420001, 0xaf4202ec,
+0x8f4202ec, 0x8f420004, 0x30420001, 0x50400029,
+0x36100040, 0x3c020400, 0x2c21024, 0x10400013,
+0x2404ffdf, 0x8f420250, 0x8f430254, 0x8f4401b4,
+0x14640006, 0x36100040, 0x8f420270, 0x8f430274,
+0x8f4401b8, 0x10640007, 0x2402ffdf, 0x8f420250,
+0x8f430254, 0x8f440270, 0x8f450274, 0x10000012,
+0x3a100020, 0x1000002b, 0x2028024, 0x8f420250,
+0x8f430254, 0x8f4501b4, 0x14650006, 0x2048024,
+0x8f420270, 0x8f430274, 0x8f4401b8, 0x50640021,
+0x36100040, 0x8f420250, 0x8f430254, 0x8f440270,
+0x8f450274, 0x3a100040, 0xaf4301b4, 0x10000019,
+0xaf4501b8, 0x8f4200d4, 0x24430001, 0x10000011,
+0x28420033, 0x8f420004, 0x30420001, 0x10400009,
+0x3c020400, 0x2c21024, 0x10400004, 0x2402ffdf,
+0x2028024, 0x1000000b, 0x36100040, 0x10000009,
+0x36100060, 0x8f4200d4, 0x36100040, 0x24430001,
+0x284201f5, 0x14400003, 0xaf4300d4, 0xaf4000d4,
+0x3a100020, 0xaf900044, 0x2402ff7f, 0x282a024,
+0x8fbf0050, 0x8fbe004c, 0x8fb50048, 0x8fb30044,
+0x8fb20040, 0x8fb1003c, 0x8fb00038, 0x3e00008,
+0x27bd0058, 0x3e00008, 0x0, 0x3c020001,
+0x8c426da8, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
+0xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
+0x104000c7, 0xafb00030, 0x8f4200d0, 0x24430001,
+0x2842000b, 0x144000da, 0xaf4300d0, 0x8f420004,
+0x30420002, 0x14400097, 0xaf4000d0, 0x8f420004,
+0x3c030001, 0x8c636d98, 0x34420002, 0xaf420004,
+0x24020001, 0x14620003, 0x3c020600, 0x10000002,
+0x34423000, 0x34421000, 0xafa20020, 0x1821,
+0x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
+0xafaa002c, 0x27c30001, 0x8c020228, 0x609021,
+0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
+0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
+0x2484610c, 0x3c050009, 0xafa00014, 0xafa20010,
+0x8fa60020, 0x1000006d, 0x34a50500, 0xf71021,
+0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
+0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
+0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
+0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
+0x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
+0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
+0x24070008, 0xa32821, 0xa3482b, 0x822021,
+0x100f809, 0x892021, 0x54400006, 0x24130001,
+0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
+0x0, 0x326200ff, 0x54400017, 0xaf520018,
+0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
+0x8f820120, 0x8faa002c, 0xafa20010, 0x8f820124,
+0x3c040001, 0x24846118, 0x3c050009, 0xafa20014,
+0x8d460000, 0x10000035, 0x34a50600, 0x8f420308,
+0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
+0x1000001e, 0x326200ff, 0x8f830054, 0x8f820054,
+0x247003e8, 0x2021023, 0x2c4203e9, 0x10400016,
+0x9821, 0x3c150020, 0x24110010, 0x8f42000c,
+0x8f440160, 0x8f450164, 0x8f860120, 0xafb10010,
+0xafb20014, 0x551025, 0xafa20018, 0x8f42010c,
+0x24070008, 0x40f809, 0x24c6001c, 0x1440ffe3,
+0x0, 0x8f820054, 0x2021023, 0x2c4203e9,
+0x1440ffee, 0x0, 0x326200ff, 0x14400011,
+0x0, 0x8f420378, 0x24420001, 0xaf420378,
+0x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
+0x8f820124, 0x3c040001, 0x24846120, 0x3c050009,
+0xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
+0x3c03821, 0x8f4202ec, 0x24420001, 0xaf4202ec,
+0x8f4202ec, 0x8f420004, 0x30420001, 0x10400018,
+0x24040001, 0x8f420250, 0x8f430254, 0x8f4501b4,
+0x3c010001, 0x14650006, 0xa0246cf1, 0x8f420270,
+0x8f430274, 0x8f4401b8, 0x10640021, 0x0,
+0x8f420250, 0x8f430254, 0x3c040001, 0x90846cf0,
+0x8f460270, 0x8f470274, 0x38840001, 0xaf4301b4,
+0xaf4701b8, 0x3c010001, 0x10000025, 0xa0246cf0,
+0x8f4200d4, 0x3c010001, 0xa0206cf0, 0x24430001,
+0x28420033, 0x1440001e, 0xaf4300d4, 0x3c020001,
+0x90426cf1, 0xaf4000d4, 0x10000017, 0x38420001,
+0x8f420004, 0x30420001, 0x10400008, 0x0,
+0xc00565a, 0x2021, 0x3c010001, 0xa0206cf1,
+0x3c010001, 0x1000000e, 0xa0206cf0, 0x8f4200d4,
+0x3c010001, 0xa0206cf0, 0x24430001, 0x284201f5,
+0x14400007, 0xaf4300d4, 0x3c020001, 0x90426cf1,
+0xaf4000d4, 0x421026, 0x3c010001, 0xa0226cf1,
+0x3c030001, 0x8c636d98, 0x24020002, 0x1462000c,
+0x3c030002, 0x3c030001, 0x90636cf1, 0x24020001,
+0x5462001f, 0x2021, 0x3c020001, 0x90426cf0,
+0x1443001b, 0x24040005, 0x10000019, 0x24040006,
+0x3c020002, 0x8c428ff4, 0x431024, 0x1040000b,
+0x24020001, 0x3c030001, 0x90636cf1, 0x54620010,
+0x2021, 0x3c020001, 0x90426cf0, 0x1443000c,
+0x24040003, 0x1000000a, 0x24040004, 0x3c030001,
+0x90636cf1, 0x14620006, 0x2021, 0x3c020001,
+0x90426cf0, 0x24040001, 0x50440001, 0x24040002,
+0xc00565a, 0x0, 0x2402ff7f, 0x282a024,
+0x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
+0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
+0x27bd0050, 0x3e00008, 0x0, 0x3c020001,
+0x8c426da8, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
+0xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
+0x104000de, 0xafb00030, 0x8f4200d0, 0x3c040001,
+0x8c846d98, 0x24430001, 0x2842000b, 0xaf4400e8,
+0x144000fe, 0xaf4300d0, 0x8f420004, 0x30420002,
+0x14400095, 0xaf4000d0, 0x8f420004, 0x34420002,
+0xaf420004, 0x24020001, 0x14820003, 0x3c020600,
+0x10000002, 0x34423000, 0x34421000, 0xafa20020,
+0x1821, 0x8f5e0018, 0x27aa0020, 0x240200ff,
+0x13c20002, 0xafaa002c, 0x27c30001, 0x8c020228,
+0x609021, 0x1642000e, 0x1e38c0, 0x8f42033c,
+0x24420001, 0xaf42033c, 0x8f42033c, 0x8c020228,
+0x3c040001, 0x2484610c, 0x3c050009, 0xafa00014,
+0xafa20010, 0x8fa60020, 0x1000006d, 0x34a50500,
+0xf71021, 0x8fa30020, 0x8fa40024, 0xac4304c0,
+0xac4404c4, 0x8f830054, 0x8f820054, 0x247003e8,
+0x2021023, 0x2c4203e9, 0x1040001b, 0x9821,
+0xe08821, 0x263504c0, 0x8f440178, 0x8f45017c,
+0x2201821, 0x240a0004, 0xafaa0010, 0xafb20014,
+0x8f48000c, 0x1021, 0x2f53021, 0xafa80018,
+0x8f48010c, 0x24070008, 0xa32821, 0xa3482b,
+0x822021, 0x100f809, 0x892021, 0x54400006,
+0x24130001, 0x8f820054, 0x2021023, 0x2c4203e9,
+0x1440ffe9, 0x0, 0x326200ff, 0x54400017,
+0xaf520018, 0x8f420378, 0x24420001, 0xaf420378,
+0x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
+0x8f820124, 0x3c040001, 0x24846118, 0x3c050009,
+0xafa20014, 0x8d460000, 0x10000035, 0x34a50600,
+0x8f420308, 0x24130001, 0x24420001, 0xaf420308,
+0x8f420308, 0x1000001e, 0x326200ff, 0x8f830054,
+0x8f820054, 0x247003e8, 0x2021023, 0x2c4203e9,
+0x10400016, 0x9821, 0x3c150020, 0x24110010,
+0x8f42000c, 0x8f440160, 0x8f450164, 0x8f860120,
+0xafb10010, 0xafb20014, 0x551025, 0xafa20018,
+0x8f42010c, 0x24070008, 0x40f809, 0x24c6001c,
+0x1440ffe3, 0x0, 0x8f820054, 0x2021023,
+0x2c4203e9, 0x1440ffee, 0x0, 0x326200ff,
+0x14400011, 0x0, 0x8f420378, 0x24420001,
+0xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
+0xafa20010, 0x8f820124, 0x3c040001, 0x24846120,
+0x3c050009, 0xafa20014, 0x8d460000, 0x34a50700,
+0xc002b3b, 0x3c03821, 0x8f4202ec, 0x24420001,
+0xaf4202ec, 0x8f4202ec, 0x8f420004, 0x30420001,
+0x10400033, 0x3c020400, 0x2c21024, 0x10400017,
+0x0, 0x934205c0, 0x8f440250, 0x8f450254,
+0x8f4301b4, 0x34420020, 0x14a30006, 0xa34205c0,
+0x8f420270, 0x8f430274, 0x8f4401b8, 0x10640008,
+0x0, 0x8f420250, 0x8f430254, 0x934405c0,
+0x8f460270, 0x8f470274, 0x10000016, 0x38840040,
+0x934205c0, 0x10000048, 0x304200bf, 0x934205c0,
+0x8f440250, 0x8f450254, 0x8f4301b4, 0x304200bf,
+0x14a30006, 0xa34205c0, 0x8f420270, 0x8f430274,
+0x8f4401b8, 0x1064000b, 0x0, 0x8f420250,
+0x8f430254, 0x934405c0, 0x8f460270, 0x8f470274,
+0x38840020, 0xaf4301b4, 0xaf4701b8, 0x10000033,
+0xa34405c0, 0x934205c0, 0x1000002f, 0x34420020,
+0x934205c0, 0x8f4300d4, 0x34420020, 0xa34205c0,
+0x24620001, 0x10000023, 0x28630033, 0x8f4200e4,
+0x8f4300e0, 0x24420001, 0xaf4200e4, 0x43102a,
+0x14400006, 0x24030001, 0x8f4200e8, 0x14430002,
+0xaf4000e4, 0x24030004, 0xaf4300e8, 0x8f420004,
+0x30420001, 0x1040000d, 0x3c020400, 0x2c21024,
+0x10400007, 0x0, 0x934205c0, 0x34420040,
+0xa34205c0, 0x934205c0, 0x1000000f, 0x304200df,
+0x934205c0, 0x1000000c, 0x34420060, 0x934205c0,
+0x8f4300d4, 0x34420020, 0xa34205c0, 0x24620001,
+0x286300fb, 0x14600005, 0xaf4200d4, 0x934205c0,
+0xaf4000d4, 0x38420040, 0xa34205c0, 0x934205c0,
+0x8f4300e8, 0x3042007f, 0xa34205c0, 0x24020001,
+0x14620005, 0x0, 0x934405c0, 0x42102,
+0x10000003, 0x348400f0, 0x934405c0, 0x3484000f,
+0xc005640, 0x0, 0x2402ff7f, 0x282a024,
+0x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
+0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
+0x27bd0050, 0x3e00008, 0x0, 0x27bdffb0,
+0x274401c0, 0x26e30028, 0x24650400, 0x65102b,
+0xafbf0048, 0xafbe0044, 0xafb50040, 0xafb3003c,
+0xafb20038, 0xafb10034, 0x10400007, 0xafb00030,
+0x8c820000, 0xac620000, 0x24630004, 0x65102b,
+0x1440fffb, 0x24840004, 0x8c020080, 0xaee20044,
+0x8c0200c0, 0xaee20040, 0x8c020084, 0xaee20030,
+0x8c020084, 0xaee2023c, 0x8c020088, 0xaee20240,
+0x8c02008c, 0xaee20244, 0x8c020090, 0xaee20248,
+0x8c020094, 0xaee2024c, 0x8c020098, 0xaee20250,
+0x8c02009c, 0xaee20254, 0x8c0200a0, 0xaee20258,
+0x8c0200a4, 0xaee2025c, 0x8c0200a8, 0xaee20260,
+0x8c0200ac, 0xaee20264, 0x8c0200b0, 0xaee20268,
+0x8c0200b4, 0xaee2026c, 0x8c0200b8, 0xaee20270,
+0x8c0200bc, 0x24040001, 0xaee20274, 0xaee00034,
+0x41080, 0x571021, 0x8ee30034, 0x8c42023c,
+0x24840001, 0x621821, 0x2c82000f, 0xaee30034,
+0x1440fff8, 0x41080, 0x8c0200cc, 0xaee20048,
+0x8c0200d0, 0xaee2004c, 0x8c0200e0, 0xaee201f8,
+0x8c0200e4, 0xaee201fc, 0x8c0200e8, 0xaee20200,
+0x8c0200ec, 0xaee20204, 0x8c0200f0, 0xaee20208,
+0x8ee400c0, 0x8ee500c4, 0x8c0200fc, 0x45102b,
+0x1040000b, 0x0, 0x8ee200c0, 0x8ee300c4,
+0x24040001, 0x24050000, 0x651821, 0x65302b,
+0x441021, 0x461021, 0xaee200c0, 0xaee300c4,
+0x8c0200fc, 0x8ee400c0, 0x8ee500c4, 0x2408ffff,
+0x24090000, 0x401821, 0x1021, 0x882024,
+0xa92824, 0x822025, 0xa32825, 0xaee400c0,
+0xaee500c4, 0x8ee400d0, 0x8ee500d4, 0x8c0200f4,
+0x45102b, 0x1040000b, 0x0, 0x8ee200d0,
+0x8ee300d4, 0x24040001, 0x24050000, 0x651821,
+0x65302b, 0x441021, 0x461021, 0xaee200d0,
+0xaee300d4, 0x8c0200f4, 0x8ee400d0, 0x8ee500d4,
+0x401821, 0x1021, 0x882024, 0xa92824,
+0x822025, 0xa32825, 0xaee400d0, 0xaee500d4,
+0x8ee400c8, 0x8ee500cc, 0x8c0200f8, 0x45102b,
+0x1040000b, 0x0, 0x8ee200c8, 0x8ee300cc,
+0x24040001, 0x24050000, 0x651821, 0x65302b,
+0x441021, 0x461021, 0xaee200c8, 0xaee300cc,
+0x8c0200f8, 0x8ee400c8, 0x8ee500cc, 0x401821,
+0x1021, 0x882024, 0xa92824, 0x822025,
+0xa32825, 0x24020008, 0xaee400c8, 0xaee500cc,
+0xafa20010, 0xafa00014, 0x8f42000c, 0x8c040208,
+0x8c05020c, 0xafa20018, 0x8f42010c, 0x26e60028,
+0x40f809, 0x24070400, 0x104000f0, 0x3c020400,
+0xafa20020, 0x934205c6, 0x10400089, 0x1821,
+0x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
+0xafaa002c, 0x27c30001, 0x8c020228, 0x609021,
+0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
+0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
+0x2484610c, 0x3c050009, 0xafa00014, 0xafa20010,
+0x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
+0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
+0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
+0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
+0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
+0x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
+0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
+0x24070008, 0xa32821, 0xa3482b, 0x822021,
+0x100f809, 0x892021, 0x54400006, 0x24130001,
+0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
+0x0, 0x326200ff, 0x54400017, 0xaf520018,
+0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
+0x8f820120, 0x8faa002c, 0xafa20010, 0x8f820124,
+0x3c040001, 0x24846118, 0x3c050009, 0xafa20014,
+0x8d460000, 0x10000033, 0x34a50600, 0x8f420308,
+0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
+0x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
+0x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
+0x9821, 0x24110010, 0x8f42000c, 0x8f440160,
+0x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
+0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
+0x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
+0x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
+0x326200ff, 0x54400012, 0x24020001, 0x8f420378,
+0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
+0x8faa002c, 0xafa20010, 0x8f820124, 0x3c040001,
+0x24846120, 0x3c050009, 0xafa20014, 0x8d460000,
+0x34a50700, 0xc002b3b, 0x3c03821, 0x1021,
+0x1440005b, 0x24020001, 0x10000065, 0x0,
+0x8f510018, 0x240200ff, 0x12220002, 0x8021,
+0x26300001, 0x8c020228, 0x1602000e, 0x1130c0,
+0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
+0x8c020228, 0x3c040001, 0x248460f4, 0x3c050009,
+0xafa00014, 0xafa20010, 0x8fa60020, 0x1000003f,
+0x34a50100, 0xd71021, 0x8fa30020, 0x8fa40024,
+0xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
+0x8f45017c, 0x1021, 0x24070004, 0xafa70010,
+0xafb00014, 0x8f48000c, 0x24c604c0, 0x2e63021,
+0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
+0xa3482b, 0x822021, 0x100f809, 0x892021,
+0x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
+0x8f820124, 0x3c040001, 0x248460fc, 0x3c050009,
+0xafa20014, 0x8fa60020, 0x1000001c, 0x34a50200,
+0x8f440160, 0x8f450164, 0x8f43000c, 0xaf500018,
+0x8f860120, 0x24020010, 0xafa20010, 0xafb00014,
+0xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
+0x54400011, 0x24020001, 0x8f420340, 0x24420001,
+0xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
+0x8f820124, 0x3c040001, 0x24846104, 0x3c050009,
+0xafa20014, 0x8fa60020, 0x34a50300, 0xc002b3b,
+0x2203821, 0x1021, 0x1040000d, 0x24020001,
+0x8f4202e8, 0xa34005c6, 0xaf4001b0, 0x24420001,
+0xaf4202e8, 0x8f4202e8, 0x8ee20150, 0x24420001,
+0xaee20150, 0x10000003, 0x8ee20150, 0x24020001,
+0xa34205c6, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
+0x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
+0x3e00008, 0x27bd0050, 0x27bdffd8, 0xafbf0020,
+0x8f8200b0, 0x30420004, 0x10400068, 0x0,
+0x8f430128, 0x8f820104, 0x14620005, 0x0,
+0x8f430130, 0x8f8200b4, 0x10620006, 0x0,
+0x8f820104, 0xaf420128, 0x8f8200b4, 0x1000005b,
+0xaf420130, 0x8f8200b0, 0x3c030080, 0x431024,
+0x1040000d, 0x0, 0x8f82011c, 0x34420002,
+0xaf82011c, 0x8f8200b0, 0x2403fffb, 0x431024,
+0xaf8200b0, 0x8f82011c, 0x2403fffd, 0x431024,
+0x1000004a, 0xaf82011c, 0x8f430128, 0x8f820104,
+0x14620005, 0x0, 0x8f430130, 0x8f8200b4,
+0x10620010, 0x0, 0x8f820104, 0xaf420128,
+0x8f8200b4, 0x8f430128, 0xaf420130, 0xafa30010,
+0x8f420130, 0x3c040001, 0x24846144, 0xafa20014,
+0x8f86011c, 0x8f8700b0, 0x3c050005, 0x10000031,
+0x34a50900, 0x8f420128, 0xafa20010, 0x8f420130,
+0x3c040001, 0x24846150, 0xafa20014, 0x8f86011c,
+0x8f8700b0, 0x3c050005, 0xc002b3b, 0x34a51000,
+0x8f82011c, 0x34420002, 0xaf82011c, 0x8f830104,
+0x8f8200b0, 0x34420001, 0xaf8200b0, 0x24020008,
+0xaf830104, 0xafa20010, 0xafa00014, 0x8f42000c,
+0x8c040208, 0x8c05020c, 0xafa20018, 0x8f42010c,
+0x26e60028, 0x40f809, 0x24070400, 0x8f82011c,
+0x2403fffd, 0x431024, 0xaf82011c, 0x8ee201dc,
+0x24420001, 0xaee201dc, 0x8ee201dc, 0x8f420128,
+0xafa20010, 0x8f420130, 0x3c040001, 0x2484615c,
+0xafa20014, 0x8f86011c, 0x8f8700b0, 0x3c050005,
+0x34a51100, 0xc002b3b, 0x0, 0x8f8200a0,
+0x30420004, 0x10400069, 0x0, 0x8f43012c,
+0x8f820124, 0x14620005, 0x0, 0x8f430134,
+0x8f8200a4, 0x10620006, 0x0, 0x8f820124,
+0xaf42012c, 0x8f8200a4, 0x1000005c, 0xaf420134,
+0x8f8200a0, 0x3c030080, 0x431024, 0x1040000d,
+0x0, 0x8f82011c, 0x34420002, 0xaf82011c,
+0x8f8200a0, 0x2403fffb, 0x431024, 0xaf8200a0,
+0x8f82011c, 0x2403fffd, 0x431024, 0x1000004b,
+0xaf82011c, 0x8f43012c, 0x8f820124, 0x14620005,
+0x0, 0x8f430134, 0x8f8200a4, 0x10620010,
+0x0, 0x8f820124, 0xaf42012c, 0x8f8200a4,
+0x8f43012c, 0xaf420134, 0xafa30010, 0x8f420134,
+0x3c040001, 0x24846168, 0xafa20014, 0x8f86011c,
+0x8f8700a0, 0x3c050005, 0x10000032, 0x34a51200,
+0x8f42012c, 0xafa20010, 0x8f420134, 0x3c040001,
+0x24846174, 0xafa20014, 0x8f86011c, 0x8f8700a0,
+0x3c050005, 0xc002b3b, 0x34a51300, 0x8f82011c,
+0x34420002, 0xaf82011c, 0x8f830124, 0x8f8200a0,
+0x34420001, 0xaf8200a0, 0x24020080, 0xaf830124,
+0xafa20010, 0xafa00014, 0x8f420014, 0x8c040208,
+0x8c05020c, 0xafa20018, 0x8f420108, 0x3c060001,
+0x24c66ed8, 0x40f809, 0x24070004, 0x8f82011c,
+0x2403fffd, 0x431024, 0xaf82011c, 0x8ee201dc,
+0x24420001, 0xaee201dc, 0x8ee201dc, 0x8f42012c,
+0xafa20010, 0x8f420134, 0x3c040001, 0x24846180,
+0xafa20014, 0x8f86011c, 0x8f8700a0, 0x3c050005,
+0x34a51400, 0xc002b3b, 0x0, 0x8fbf0020,
+0x3e00008, 0x27bd0028, 0x3c081000, 0x24070001,
+0x3c060080, 0x3c050100, 0x8f820070, 0x481024,
+0x1040fffd, 0x0, 0x8f820054, 0x24420005,
+0xaf820078, 0x8c040234, 0x10800016, 0x1821,
+0x3c020001, 0x571021, 0x8c4240e8, 0x24420005,
+0x3c010001, 0x370821, 0xac2240e8, 0x3c020001,
+0x571021, 0x8c4240e8, 0x44102b, 0x14400009,
+0x0, 0x3c030080, 0x3c010001, 0x370821,
+0xac2040e8, 0x3c010001, 0x370821, 0x1000000b,
+0xa02740f0, 0x3c020001, 0x571021, 0x904240f0,
+0x54400006, 0x661825, 0x3c020001, 0x571021,
+0x904240f1, 0x54400001, 0x661825, 0x8c040230,
+0x10800013, 0x0, 0x3c020001, 0x571021,
+0x8c4240ec, 0x24420005, 0x3c010001, 0x370821,
+0xac2240ec, 0x3c020001, 0x571021, 0x8c4240ec,
+0x44102b, 0x14400006, 0x0, 0x3c010001,
+0x370821, 0xac2040ec, 0x10000006, 0x651825,
+0x3c020001, 0x571021, 0x904240f2, 0x54400001,
+0x651825, 0x1060ffbc, 0x0, 0x8f420000,
+0x10400007, 0x0, 0xaf80004c, 0x8f82004c,
+0x1040fffd, 0x0, 0x10000005, 0x0,
+0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
+0x8f820060, 0x431025, 0xaf820060, 0x8f420000,
+0x10400003, 0x0, 0x1000ffa7, 0xaf80004c,
+0x1000ffa5, 0xaf800048, 0x3e00008, 0x0,
+0x0, 0x0, 0x0, 0x27bdffe0,
+0xafbf0018, 0x8f860064, 0x30c20004, 0x10400025,
+0x24040004, 0x8c020114, 0xaf420020, 0xaf840064,
+0x8f4202fc, 0x24420001, 0xaf4202fc, 0x8f4202fc,
+0x8f820064, 0x30420004, 0x14400005, 0x0,
+0x8c030114, 0x8f420020, 0x1462fff2, 0x0,
+0x8f420000, 0x10400007, 0x8f43003c, 0xaf80004c,
+0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
+0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
+0x0, 0x8f820060, 0x431025, 0xaf820060,
+0x8f420000, 0x10400073, 0x0, 0x1000006f,
+0x0, 0x30c20008, 0x10400020, 0x24040008,
+0x8c02011c, 0xaf420048, 0xaf840064, 0x8f4202a8,
+0x24420001, 0xaf4202a8, 0x8f4202a8, 0x8f820064,
+0x30420008, 0x14400005, 0x0, 0x8c03011c,
+0x8f420048, 0x1462fff2, 0x0, 0x8f420000,
+0x10400007, 0x0, 0xaf80004c, 0x8f82004c,
+0x1040fffd, 0x0, 0x10000005, 0x0,
+0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
+0x8f820060, 0x1000ffd9, 0x34420200, 0x30c20020,
+0x10400023, 0x24040020, 0x8c02012c, 0xaf420068,
+0xaf840064, 0x8f4202d8, 0x24420001, 0xaf4202d8,
+0x8f4202d8, 0x8f820064, 0x30420020, 0x14400005,
+0x32c24000, 0x8c03012c, 0x8f420068, 0x1462fff2,
+0x32c24000, 0x14400002, 0x3c020001, 0x2c2b025,
+0x8f420000, 0x10400007, 0x0, 0xaf80004c,
+0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
+0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
+0x0, 0x8f820060, 0x1000ffb4, 0x34420800,
+0x30c20010, 0x10400029, 0x24040010, 0x8c020124,
+0xaf420058, 0xaf840064, 0x8f4202d4, 0x24420001,
+0xaf4202d4, 0x8f4202d4, 0x8f820064, 0x30420010,
+0x14400005, 0x32c22000, 0x8c030124, 0x8f420058,
+0x1462fff2, 0x32c22000, 0x50400001, 0x36d68000,
+0x8f420000, 0x10400007, 0x0, 0xaf80004c,
+0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
+0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
+0x0, 0x8f820060, 0x34420100, 0xaf820060,
+0x8f420000, 0x10400003, 0x0, 0x1000006c,
+0xaf80004c, 0x1000006a, 0xaf800048, 0x30c20001,
+0x10400004, 0x24020001, 0xaf820064, 0x10000064,
+0x0, 0x30c20002, 0x1440000b, 0x3c050003,
+0x3c040001, 0x24846244, 0x34a50500, 0x3821,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x2402ffc0,
+0x10000057, 0xaf820064, 0x8c05022c, 0x8c02010c,
+0x10a20048, 0x51080, 0x8c460300, 0x24a20001,
+0x3045003f, 0x24020003, 0xac05022c, 0x61e02,
+0x10620005, 0x24020010, 0x1062001d, 0x30c20fff,
+0x10000039, 0x0, 0x8f4302a8, 0x8f440000,
+0x30c20fff, 0xaf420048, 0x24630001, 0xaf4302a8,
+0x10800007, 0x8f4202a8, 0xaf80004c, 0x8f82004c,
+0x1040fffd, 0x0, 0x10000005, 0x0,
+0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
+0x8f820060, 0x34420200, 0xaf820060, 0x8f420000,
+0x1040001f, 0x0, 0x1000001b, 0x0,
+0xaf420058, 0x32c22000, 0x50400001, 0x36d68000,
+0x8f4202d4, 0x8f430000, 0x24420001, 0xaf4202d4,
+0x10600007, 0x8f4202d4, 0xaf80004c, 0x8f82004c,
+0x1040fffd, 0x0, 0x10000005, 0x0,
+0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
+0x8f820060, 0x34420100, 0xaf820060, 0x8f420000,
+0x10400003, 0x0, 0x10000006, 0xaf80004c,
+0x10000004, 0xaf800048, 0xc002196, 0xc02021,
+0x402821, 0x8c02010c, 0x14a20002, 0x24020002,
+0xaf820064, 0x8f820064, 0x30420002, 0x14400004,
+0x0, 0x8c02010c, 0x14a2ffac, 0x0,
+0x8fbf0018, 0x3e00008, 0x27bd0020, 0x3e00008,
+0x0, 0x27bdffa0, 0xafb00040, 0x808021,
+0x101602, 0x2442ffff, 0x304300ff, 0x2c620013,
+0xafbf0058, 0xafbe0054, 0xafb50050, 0xafb3004c,
+0xafb20048, 0xafb10044, 0x104001f3, 0xafa50034,
+0x31080, 0x3c010001, 0x220821, 0x8c226288,
+0x400008, 0x0, 0x101302, 0x30440fff,
+0x24020001, 0x10820005, 0x24020002, 0x1082000c,
+0x2402fffe, 0x10000024, 0x3c050003, 0x8f430004,
+0x3c020001, 0x8c426f04, 0xaf440200, 0xaf440204,
+0x3c040001, 0x8c846e80, 0x10000009, 0x34630001,
+0x8f430004, 0xaf440200, 0xaf440204, 0x3c040001,
+0x8c846e80, 0x621824, 0x3c020001, 0x2442ca28,
+0x21100, 0x21182, 0xaf430004, 0x3c030800,
+0x431025, 0xac820038, 0x8f840054, 0x41442,
+0x41c82, 0x431021, 0x41cc2, 0x431023,
+0x41d02, 0x431021, 0x41d42, 0x431023,
+0x10000009, 0xaf420208, 0x3c040001, 0x24846250,
+0x34a51000, 0x2003021, 0x3821, 0xafa00010,
+0xc002b3b, 0xafa00014, 0x8f4202a0, 0x24420001,
+0xaf4202a0, 0x1000021f, 0x8f4202a0, 0x27b00028,
+0x2002021, 0x24050210, 0xc002bbf, 0x24060008,
+0xc002518, 0x2002021, 0x10000216, 0x0,
+0x8faa0034, 0x27a40028, 0xa1880, 0x25420001,
+0x3042003f, 0xafa20034, 0x8c650300, 0x8faa0034,
+0x21080, 0x8c430300, 0x25420001, 0x3042003f,
+0xafa20034, 0xac02022c, 0xafa50028, 0xc002518,
+0xafa3002c, 0x10000203, 0x0, 0x27b00028,
+0x2002021, 0x24050210, 0xc002bbf, 0x24060008,
+0xc002657, 0x2002021, 0x100001fa, 0x0,
+0x8faa0034, 0x27a40028, 0xa1880, 0x25420001,
+0x3042003f, 0xafa20034, 0x8c650300, 0x8faa0034,
+0x21080, 0x8c430300, 0x25420001, 0x3042003f,
+0xafa20034, 0xac02022c, 0xafa50028, 0xc002657,
+0xafa3002c, 0x100001e7, 0x0, 0x101302,
+0x30430fff, 0x24020001, 0x10620005, 0x24020002,
+0x1062001e, 0x3c020002, 0x10000033, 0x3c050003,
+0x3c030002, 0x2c31024, 0x54400037, 0x2c3b025,
+0x8f820228, 0x3c010001, 0x370821, 0xac2238d8,
+0x8f82022c, 0x3c010001, 0x370821, 0xac2238dc,
+0x8f820230, 0x3c010001, 0x370821, 0xac2238e0,
+0x8f820234, 0x3c010001, 0x370821, 0xac2238e4,
+0x2402ffff, 0xaf820228, 0xaf82022c, 0xaf820230,
+0xaf820234, 0x10000020, 0x2c3b025, 0x2c21024,
+0x10400012, 0x3c02fffd, 0x3c020001, 0x571021,
+0x8c4238d8, 0xaf820228, 0x3c020001, 0x571021,
+0x8c4238dc, 0xaf82022c, 0x3c020001, 0x571021,
+0x8c4238e0, 0xaf820230, 0x3c020001, 0x571021,
+0x8c4238e4, 0xaf820234, 0x3c02fffd, 0x3442ffff,
+0x10000009, 0x2c2b024, 0x3c040001, 0x2484625c,
+0x34a51100, 0x2003021, 0x3821, 0xafa00010,
+0xc002b3b, 0xafa00014, 0x8f4202cc, 0x24420001,
+0xaf4202cc, 0x1000019f, 0x8f4202cc, 0x101302,
+0x30450fff, 0x24020001, 0x10a20005, 0x24020002,
+0x10a2000d, 0x3c0408ff, 0x10000014, 0x3c050003,
+0x3c0208ff, 0x3442ffff, 0x8f830220, 0x3c040004,
+0x2c4b025, 0x621824, 0x34630008, 0xaf830220,
+0x10000012, 0xaf450298, 0x3484fff7, 0x3c03fffb,
+0x8f820220, 0x3463ffff, 0x2c3b024, 0x441024,
+0xaf820220, 0x10000009, 0xaf450298, 0x3c040001,
+0x24846268, 0x34a51200, 0x2003021, 0x3821,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x8f4202bc,
+0x24420001, 0xaf4202bc, 0x10000176, 0x8f4202bc,
+0x27840208, 0x24050200, 0xc002bbf, 0x24060008,
+0x27440224, 0x24050200, 0xc002bbf, 0x24060008,
+0x8f4202c4, 0x24420001, 0xaf4202c4, 0x10000169,
+0x8f4202c4, 0x101302, 0x30430fff, 0x24020001,
+0x10620011, 0x28620002, 0x50400005, 0x24020002,
+0x10600007, 0x0, 0x10000017, 0x0,
+0x1062000f, 0x0, 0x10000013, 0x0,
+0x8c060248, 0x2021, 0xc005104, 0x24050004,
+0x10000007, 0x0, 0x8c060248, 0x2021,
+0xc005104, 0x24050004, 0x10000010, 0x0,
+0x8c06024c, 0x2021, 0xc005104, 0x24050001,
+0x1000000a, 0x0, 0x3c040001, 0x24846274,
+0x3c050003, 0x34a51300, 0x2003021, 0x3821,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x8f4202c0,
+0x24420001, 0xaf4202c0, 0x1000013a, 0x8f4202c0,
+0xc002426, 0x0, 0x10000136, 0x0,
+0x24020001, 0xa34205c5, 0x24100100, 0x8f4401a8,
+0x8f4501ac, 0xafb00010, 0xafa00014, 0x8f420014,
+0xafa20018, 0x8f420108, 0x26e60028, 0x40f809,
+0x24070400, 0x1040fff5, 0x0, 0x10000125,
+0x0, 0x3c03ffff, 0x34637fff, 0x8f420368,
+0x8f440360, 0x2c3b024, 0x1821, 0xaf400058,
+0xaf40005c, 0xaf400060, 0xaf400064, 0x441023,
+0xaf420368, 0x3c020900, 0xaf400360, 0xafa20020,
+0x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
+0xafaa003c, 0x27c30001, 0x8c020228, 0x609021,
+0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
+0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
+0x2484620c, 0x3c050009, 0xafa00014, 0xafa20010,
+0x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
+0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
+0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
+0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
+0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
+0x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
+0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
+0x24070008, 0xa32821, 0xa3482b, 0x822021,
+0x100f809, 0x892021, 0x54400006, 0x24130001,
+0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
+0x0, 0x326200ff, 0x54400017, 0xaf520018,
+0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
+0x8f820120, 0x8faa003c, 0xafa20010, 0x8f820124,
+0x3c040001, 0x24846218, 0x3c050009, 0xafa20014,
+0x8d460000, 0x10000033, 0x34a50600, 0x8f420308,
+0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
+0x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
+0x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
+0x9821, 0x24110010, 0x8f42000c, 0x8f440160,
+0x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
+0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
+0x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
+0x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
+0x326200ff, 0x14400011, 0x0, 0x8f420378,
+0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
+0x8faa003c, 0xafa20010, 0x8f820124, 0x3c040001,
+0x24846220, 0x3c050009, 0xafa20014, 0x8d460000,
+0x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202b0,
+0x24420001, 0xaf4202b0, 0x8f4202b0, 0x8f4202f8,
+0x24420001, 0xaf4202f8, 0x1000008a, 0x8f4202f8,
+0x8c02025c, 0x27440224, 0xaf4201f0, 0x8c020260,
+0x24050200, 0x24060008, 0xc002bbf, 0xaf4201f8,
+0x8f820220, 0x30420008, 0x14400002, 0x24020001,
+0x24020002, 0xaf420298, 0x8f4202ac, 0x24420001,
+0xaf4202ac, 0x10000077, 0x8f4202ac, 0x3c0200ff,
+0x3442ffff, 0x2021824, 0x32c20180, 0x14400006,
+0x3402fffb, 0x43102b, 0x14400003, 0x0,
+0x1000006c, 0xaf4300bc, 0x3c040001, 0x24846280,
+0x3c050003, 0x34a51500, 0x2003021, 0x3821,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x3c020700,
+0x34421000, 0x101e02, 0x621825, 0xafa30020,
+0x8f510018, 0x240200ff, 0x12220002, 0x8021,
+0x26300001, 0x8c020228, 0x1602000e, 0x1130c0,
+0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
+0x8c020228, 0x3c040001, 0x248461f4, 0x3c050009,
+0xafa00014, 0xafa20010, 0x8fa60020, 0x1000003f,
+0x34a50100, 0xd71021, 0x8fa30020, 0x8fa40024,
+0xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
+0x8f45017c, 0x1021, 0x24070004, 0xafa70010,
+0xafb00014, 0x8f48000c, 0x24c604c0, 0x2e63021,
+0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
+0xa3482b, 0x822021, 0x100f809, 0x892021,
+0x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
+0x8f820124, 0x3c040001, 0x248461fc, 0x3c050009,
+0xafa20014, 0x8fa60020, 0x1000001c, 0x34a50200,
+0x8f440160, 0x8f450164, 0x8f43000c, 0xaf500018,
+0x8f860120, 0x24020010, 0xafa20010, 0xafb00014,
+0xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
+0x14400010, 0x0, 0x8f420340, 0x24420001,
+0xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
+0x8f820124, 0x3c040001, 0x24846204, 0x3c050009,
+0xafa20014, 0x8fa60020, 0x34a50300, 0xc002b3b,
+0x2203821, 0x8f4202e0, 0x24420001, 0xaf4202e0,
+0x8f4202e0, 0x8f4202f0, 0x24420001, 0xaf4202f0,
+0x8f4202f0, 0x8fa20034, 0x8fbf0058, 0x8fbe0054,
+0x8fb50050, 0x8fb3004c, 0x8fb20048, 0x8fb10044,
+0x8fb00040, 0x3e00008, 0x27bd0060, 0x27bdfff8,
+0x2408ffff, 0x10a00014, 0x4821, 0x3c0aedb8,
+0x354a8320, 0x90870000, 0x24840001, 0x3021,
+0x1071026, 0x30420001, 0x10400002, 0x81842,
+0x6a1826, 0x604021, 0x24c60001, 0x2cc20008,
+0x1440fff7, 0x73842, 0x25290001, 0x125102b,
+0x1440fff0, 0x0, 0x1001021, 0x3e00008,
+0x27bd0008, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
+0xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
+0xafb00030, 0x8f870220, 0xafa70024, 0x8f870200,
+0xafa7002c, 0x8f820220, 0x3c0308ff, 0x3463ffff,
+0x431024, 0x34420004, 0xaf820220, 0x8f820200,
+0x3c03c0ff, 0x3463ffff, 0x431024, 0x34420004,
+0xaf820200, 0x8f530358, 0x8f55035c, 0x8f5e0360,
+0x8f470364, 0xafa70014, 0x8f470368, 0xafa7001c,
+0x8f4202d0, 0x274401c0, 0x24420001, 0xaf4202d0,
+0x8f5002d0, 0x8f510204, 0x8f520200, 0xc002ba8,
+0x24050400, 0xaf530358, 0xaf55035c, 0xaf5e0360,
+0x8fa70014, 0xaf470364, 0x8fa7001c, 0xaf470368,
+0xaf5002d0, 0xaf510204, 0xaf520200, 0x8c02025c,
+0x27440224, 0xaf4201f0, 0x8c020260, 0x24050200,
+0x24060008, 0xaf4201f8, 0x24020006, 0xc002bbf,
+0xaf4201f4, 0x3c023b9a, 0x3442ca00, 0xaf4201fc,
+0x240203e8, 0x24040002, 0x24030001, 0xaf420294,
+0xaf440290, 0xaf43029c, 0x8f820220, 0x30420008,
+0x10400004, 0x0, 0xaf430298, 0x10000003,
+0x3021, 0xaf440298, 0x3021, 0x3c030001,
+0x661821, 0x90636d00, 0x3461021, 0x24c60001,
+0xa043022c, 0x2cc2000f, 0x1440fff8, 0x3461821,
+0x24c60001, 0x8f820040, 0x24040080, 0x24050080,
+0x21702, 0x24420030, 0xa062022c, 0x3461021,
+0xc002ba8, 0xa040022c, 0x8fa70024, 0x30e20004,
+0x14400006, 0x0, 0x8f820220, 0x3c0308ff,
+0x3463fffb, 0x431024, 0xaf820220, 0x8fa7002c,
+0x30e20004, 0x14400006, 0x0, 0x8f820200,
+0x3c03c0ff, 0x3463fffb, 0x431024, 0xaf820200,
+0x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
+0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
+0x27bd0050, 0x0, 0x0, 0xaf400104,
+0x24040001, 0x410c0, 0x2e21821, 0x24820001,
+0x3c010001, 0x230821, 0xa42234d0, 0x402021,
+0x2c820080, 0x1440fff8, 0x410c0, 0x24020001,
+0x3c010001, 0x370821, 0xa42038d0, 0xaf420100,
+0xaf800228, 0xaf80022c, 0xaf800230, 0xaf800234,
+0x3e00008, 0x0, 0x27bdffe8, 0xafbf0014,
+0xafb00010, 0x8f420104, 0x28420005, 0x10400026,
+0x808021, 0x3c020001, 0x8f430104, 0x344230d0,
+0x2e22021, 0x318c0, 0x621821, 0x2e31821,
+0x83102b, 0x10400015, 0x1021, 0x96070000,
+0x24840006, 0x24660006, 0x9482fffc, 0x14470009,
+0x2821, 0x9483fffe, 0x96020002, 0x14620006,
+0xa01021, 0x94820000, 0x96030004, 0x431026,
+0x2c450001, 0xa01021, 0x14400009, 0x24840008,
+0x86102b, 0x1440fff0, 0x1021, 0x304200ff,
+0x14400030, 0x24020001, 0x1000002e, 0x1021,
+0x1000fffa, 0x24020001, 0x2002021, 0xc00240c,
+0x24050006, 0x3042007f, 0x218c0, 0x2e31021,
+0x3c010001, 0x220821, 0x942230d0, 0x1040fff2,
+0x2e31021, 0x3c060001, 0xc23021, 0x94c630d0,
+0x10c0ffed, 0x3c080001, 0x350834d2, 0x96070000,
+0x610c0, 0x572021, 0x882021, 0x94820000,
+0x14470009, 0x2821, 0x94830002, 0x96020002,
+0x14620006, 0xa01021, 0x94820004, 0x96030004,
+0x431026, 0x2c450001, 0xa01021, 0x14400007,
+0x610c0, 0x2e21021, 0x3c060001, 0xc23021,
+0x94c634d0, 0x14c0ffeb, 0x610c0, 0x10c0ffd2,
+0x24020001, 0x8fbf0014, 0x8fb00010, 0x3e00008,
+0x27bd0018, 0x3e00008, 0x0, 0x27bdffb0,
+0x801021, 0xafb00030, 0x24500002, 0x2002021,
+0x24050006, 0xafb10034, 0x408821, 0xafbf0048,
+0xafbe0044, 0xafb50040, 0xafb3003c, 0xc00240c,
+0xafb20038, 0x3047007f, 0x710c0, 0x2e21021,
+0x3c050001, 0xa22821, 0x94a530d0, 0x50a0001c,
+0xa03021, 0x3c090001, 0x352934d2, 0x96280002,
+0x510c0, 0x572021, 0x892021, 0x94820000,
+0x14480009, 0x3021, 0x94830002, 0x96020002,
+0x14620006, 0xc01021, 0x94820004, 0x96030004,
+0x431026, 0x2c460001, 0xc01021, 0x14400007,
+0x510c0, 0x2e21021, 0x3c050001, 0xa22821,
+0x94a534d0, 0x14a0ffeb, 0x510c0, 0xa03021,
+0x10c00014, 0x610c0, 0x571821, 0x3c010001,
+0x230821, 0x8c2334d0, 0x571021, 0xafa30010,
+0x3c010001, 0x220821, 0x8c2234d4, 0x3c040001,
+0x24846394, 0xafa20014, 0x8e260000, 0x8e270004,
+0x3c050004, 0xc002b3b, 0x34a50400, 0x10000063,
+0x3c020800, 0x8f450100, 0x10a00006, 0x510c0,
+0x2e21021, 0x3c010001, 0x220821, 0x942234d0,
+0xaf420100, 0xa03021, 0x14c00011, 0x628c0,
+0x710c0, 0x2e21021, 0xafa70010, 0x3c010001,
+0x220821, 0x942230d0, 0x3c040001, 0x248463a0,
+0xafa20014, 0x8e260000, 0x8e270004, 0x3c050004,
+0xc002b3b, 0x34a50500, 0x10000048, 0x3c020800,
+0xb71821, 0x3c020001, 0x96040000, 0x344234d2,
+0x621821, 0xa4640000, 0x8e020002, 0x720c0,
+0xac620002, 0x2e41021, 0x3c030001, 0x621821,
+0x946330d0, 0x2e51021, 0x3c010001, 0x220821,
+0xa42334d0, 0x2e41021, 0x3c010001, 0x220821,
+0xa42630d0, 0x8f420104, 0x24420001, 0x28420080,
+0x1040000f, 0x3c020002, 0x8f420104, 0x3c040001,
+0x348430d2, 0x96030000, 0x210c0, 0x571021,
+0x441021, 0xa4430000, 0x8e030002, 0xac430002,
+0x8f420104, 0x24420001, 0xaf420104, 0x3c020002,
+0x2c21024, 0x10400011, 0x72142, 0x3c030001,
+0x346338d8, 0x24020003, 0x441023, 0x21080,
+0x572021, 0x832021, 0x571021, 0x431021,
+0x30e5001f, 0x8c430000, 0x24020001, 0xa21004,
+0x621825, 0x1000000c, 0xac830000, 0x24020003,
+0x441023, 0x21080, 0x5c2821, 0x5c1021,
+0x30e4001f, 0x8c430228, 0x24020001, 0x821004,
+0x621825, 0xaca30228, 0x3c020800, 0x34421000,
+0x1821, 0xafa20020, 0x8f5e0018, 0x27aa0020,
+0x240200ff, 0x13c20002, 0xafaa002c, 0x27c30001,
+0x8c020228, 0x609021, 0x1642000e, 0x1e38c0,
+0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
+0x8c020228, 0x3c040001, 0x2484635c, 0x3c050009,
+0xafa00014, 0xafa20010, 0x8fa60020, 0x1000006b,
+0x34a50500, 0xf71021, 0x8fa30020, 0x8fa40024,
+0xac4304c0, 0xac4404c4, 0x8f830054, 0x8f820054,
+0x247003e8, 0x2021023, 0x2c4203e9, 0x1040001b,
+0x9821, 0xe08821, 0x263504c0, 0x8f440178,
+0x8f45017c, 0x2201821, 0x240a0004, 0xafaa0010,
+0xafb20014, 0x8f48000c, 0x1021, 0x2f53021,
+0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
+0xa3482b, 0x822021, 0x100f809, 0x892021,
+0x54400006, 0x24130001, 0x8f820054, 0x2021023,
+0x2c4203e9, 0x1440ffe9, 0x0, 0x326200ff,
+0x54400017, 0xaf520018, 0x8f420378, 0x24420001,
+0xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
+0xafa20010, 0x8f820124, 0x3c040001, 0x24846368,
+0x3c050009, 0xafa20014, 0x8d460000, 0x10000033,
+0x34a50600, 0x8f420308, 0x24130001, 0x24420001,
+0xaf420308, 0x8f420308, 0x1000001c, 0x326200ff,
+0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
+0x2c4203e9, 0x10400014, 0x9821, 0x24110010,
+0x8f42000c, 0x8f440160, 0x8f450164, 0x8f860120,
+0xafb10010, 0xafb20014, 0xafa20018, 0x8f42010c,
+0x24070008, 0x40f809, 0x24c6001c, 0x1440ffe5,
+0x0, 0x8f820054, 0x2021023, 0x2c4203e9,
+0x1440ffef, 0x0, 0x326200ff, 0x14400011,
+0x0, 0x8f420378, 0x24420001, 0xaf420378,
+0x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
+0x8f820124, 0x3c040001, 0x24846370, 0x3c050009,
+0xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
+0x3c03821, 0x8f4202b4, 0x24420001, 0xaf4202b4,
+0x8f4202b4, 0x8f4202f4, 0x24420001, 0xaf4202f4,
+0x8f4202f4, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
+0x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
+0x3e00008, 0x27bd0050, 0x27bdffa0, 0x801021,
+0xafb00040, 0x24500002, 0x2002021, 0x24050006,
+0xafb10044, 0x408821, 0xafbf0058, 0xafbe0054,
+0xafb50050, 0xafb3004c, 0xc00240c, 0xafb20048,
+0x3048007f, 0x810c0, 0x2e21021, 0x3c060001,
+0xc23021, 0x94c630d0, 0x10c0001c, 0x3821,
+0x3c0a0001, 0x354a34d2, 0x96290002, 0x610c0,
+0x572021, 0x8a2021, 0x94820000, 0x14490009,
+0x2821, 0x94830002, 0x96020002, 0x14620006,
+0xa01021, 0x94820004, 0x96030004, 0x431026,
+0x2c450001, 0xa01021, 0x14400008, 0x610c0,
+0xc03821, 0x2e21021, 0x3c060001, 0xc23021,
+0x94c634d0, 0x14c0ffea, 0x610c0, 0x14c00011,
+0xafa70028, 0x810c0, 0x2e21021, 0xafa80010,
+0x3c010001, 0x220821, 0x942230d0, 0x3c040001,
+0x248463ac, 0xafa20014, 0x8e260000, 0x8e270004,
+0x3c050004, 0xc002b3b, 0x34a50900, 0x10000075,
+0x3c020800, 0x10e0000c, 0x610c0, 0x2e21021,
+0x3c030001, 0x621821, 0x946334d0, 0x710c0,
+0x2e21021, 0x3c010001, 0x220821, 0xa42334d0,
+0x1000000b, 0x3c040001, 0x2e21021, 0x3c030001,
+0x621821, 0x946334d0, 0x810c0, 0x2e21021,
+0x3c010001, 0x220821, 0xa42330d0, 0x3c040001,
+0x348430d0, 0x8f430100, 0x610c0, 0x2e21021,
+0x3c010001, 0x220821, 0xa42334d0, 0x8f420104,
+0x2e43821, 0x2821, 0x18400029, 0xaf460100,
+0x24e60006, 0x94c3fffc, 0x96020000, 0x14620009,
+0x2021, 0x94c3fffe, 0x96020002, 0x14620006,
+0x801021, 0x94c20000, 0x96030004, 0x431026,
+0x2c440001, 0x801021, 0x50400014, 0x24a50001,
+0x8f420104, 0x2442ffff, 0xa2102a, 0x1040000b,
+0x24e40004, 0x94820006, 0x8c830008, 0xa482fffe,
+0xac830000, 0x8f420104, 0x24a50001, 0x2442ffff,
+0xa2102a, 0x1440fff7, 0x24840008, 0x8f420104,
+0x2442ffff, 0x10000006, 0xaf420104, 0x8f420104,
+0x24c60008, 0xa2102a, 0x1440ffda, 0x24e70008,
+0x810c0, 0x2e21021, 0x3c010001, 0x220821,
+0x942230d0, 0x14400023, 0x3c020800, 0x3c020002,
+0x2c21024, 0x10400012, 0x82142, 0x3c030001,
+0x346338d8, 0x24020003, 0x441023, 0x21080,
+0x572021, 0x832021, 0x571021, 0x431021,
+0x3105001f, 0x24030001, 0x8c420000, 0xa31804,
+0x31827, 0x431024, 0x1000000d, 0xac820000,
+0x24020003, 0x441023, 0x21080, 0x5c2821,
+0x5c1021, 0x3104001f, 0x24030001, 0x8c420228,
+0x831804, 0x31827, 0x431024, 0xaca20228,
+0x3c020800, 0x34422000, 0x1821, 0xafa20020,
+0x8f5e0018, 0x27ab0020, 0x240200ff, 0x13c20002,
+0xafab0034, 0x27c30001, 0x8c020228, 0x609021,
+0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
+0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
+0x2484635c, 0x3c050009, 0xafa00014, 0xafa20010,
+0x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
+0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
+0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
+0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
+0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
+0x240b0004, 0xafab0010, 0xafb20014, 0x8f48000c,
+0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
+0x24070008, 0xa32821, 0xa3482b, 0x822021,
+0x100f809, 0x892021, 0x54400006, 0x24130001,
+0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
+0x0, 0x326200ff, 0x54400017, 0xaf520018,
+0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
+0x8f820120, 0x8fab0034, 0xafa20010, 0x8f820124,
+0x3c040001, 0x24846368, 0x3c050009, 0xafa20014,
+0x8d660000, 0x10000033, 0x34a50600, 0x8f420308,
+0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
+0x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
+0x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
+0x9821, 0x24110010, 0x8f42000c, 0x8f440160,
+0x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
+0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
+0x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
+0x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
+0x326200ff, 0x14400011, 0x0, 0x8f420378,
+0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
+0x8fab0034, 0xafa20010, 0x8f820124, 0x3c040001,
+0x24846370, 0x3c050009, 0xafa20014, 0x8d660000,
+0x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202b8,
+0x24420001, 0xaf4202b8, 0x8f4202b8, 0x8f4202f4,
+0x24420001, 0xaf4202f4, 0x8f4202f4, 0x8fbf0058,
+0x8fbe0054, 0x8fb50050, 0x8fb3004c, 0x8fb20048,
+0x8fb10044, 0x8fb00040, 0x3e00008, 0x27bd0060,
+0x0, 0x0, 0x0, 0x27bdffe0,
+0x27644000, 0xafbf0018, 0xc002ba8, 0x24051000,
+0x3c030001, 0x34632cc0, 0x3c040001, 0x34842ec8,
+0x24020020, 0xaf82011c, 0x2e31021, 0xaf800100,
+0xaf800104, 0xaf800108, 0xaf800110, 0xaf800114,
+0xaf800118, 0xaf800120, 0xaf800124, 0xaf800128,
+0xaf800130, 0xaf800134, 0xaf800138, 0xaf4200ec,
+0x2e31021, 0xaf4200f0, 0x2e41021, 0xaf4200f4,
+0x2e41021, 0xaf4200f8, 0x3c020001, 0x571021,
+0x904240f4, 0x1440001c, 0x3c050001, 0x8f82011c,
+0x3c040001, 0x24846470, 0x3c050001, 0x34420001,
+0xaf82011c, 0xafa00010, 0xafa00014, 0x8f86011c,
+0x34a50100, 0xc002b3b, 0x3821, 0x8c020218,
+0x30420040, 0x10400014, 0x0, 0x8f82011c,
+0x3c040001, 0x2484647c, 0x3c050001, 0x34420004,
+0xaf82011c, 0xafa00010, 0xafa00014, 0x8f86011c,
+0x10000007, 0x34a50200, 0x3c040001, 0x24846484,
+0xafa00010, 0xafa00014, 0x8f86011c, 0x34a50300,
+0xc002b3b, 0x3821, 0x8fbf0018, 0x3e00008,
+0x27bd0020, 0x8fa90010, 0x8f83012c, 0x8faa0014,
+0x8fab0018, 0x1060000a, 0x27624fe0, 0x14620002,
+0x24680020, 0x27684800, 0x8f820128, 0x11020004,
+0x0, 0x8f820124, 0x15020007, 0x0,
+0x8f430334, 0x1021, 0x24630001, 0xaf430334,
+0x10000039, 0x8f430334, 0xac640000, 0xac650004,
+0xac660008, 0xa467000e, 0xac690018, 0xac6a001c,
+0xac6b0010, 0xac620014, 0xaf880120, 0x8f4200fc,
+0x8f4400f4, 0x2442ffff, 0xaf4200fc, 0x8c820000,
+0x10490005, 0x3042ff8f, 0x10400019, 0x3122ff8f,
+0x10400018, 0x3c020001, 0x8c830004, 0x2c620010,
+0x10400013, 0x3c020001, 0x24630001, 0xac830004,
+0x8f4300f8, 0x344230c8, 0x2e21021, 0x54620004,
+0x24620008, 0x3c020001, 0x34422ec8, 0x2e21021,
+0x14440015, 0x24020001, 0x8f820128, 0x24420020,
+0xaf820128, 0x8f820128, 0x1000000f, 0x24020001,
+0x3c020001, 0x344230c8, 0x2e21021, 0x54820004,
+0x24820008, 0x3c020001, 0x34422ec8, 0x2e21021,
+0x402021, 0x24020001, 0xaf4400f4, 0xac890000,
+0xac820004, 0x24020001, 0x3e00008, 0x0,
+0x3e00008, 0x0, 0x8fa90010, 0x8f83010c,
+0x8faa0014, 0x8fab0018, 0x1060000a, 0x276247e0,
+0x14620002, 0x24680020, 0x27684000, 0x8f820108,
+0x11020004, 0x0, 0x8f820104, 0x15020007,
+0x0, 0x8f430338, 0x1021, 0x24630001,
+0xaf430338, 0x10000035, 0x8f430338, 0xac640000,
+0xac650004, 0xac660008, 0xa467000e, 0xac690018,
+0xac6a001c, 0xac6b0010, 0xac620014, 0xaf880100,
+0x8f4400ec, 0x8c820000, 0x30420006, 0x10400019,
+0x31220006, 0x10400018, 0x3c020001, 0x8c830004,
+0x2c620010, 0x10400013, 0x3c020001, 0x24630001,
+0xac830004, 0x8f4300f0, 0x34422ec0, 0x2e21021,
+0x54620004, 0x24620008, 0x3c020001, 0x34422cc0,
+0x2e21021, 0x14440015, 0x24020001, 0x8f820108,
+0x24420020, 0xaf820108, 0x8f820108, 0x1000000f,
+0x24020001, 0x3c020001, 0x34422ec0, 0x2e21021,
+0x54820004, 0x24820008, 0x3c020001, 0x34422cc0,
+0x2e21021, 0x402021, 0x24020001, 0xaf4400ec,
+0xac890000, 0xac820004, 0x24020001, 0x3e00008,
+0x0, 0x3e00008, 0x0, 0x27bdffd8,
+0x3c040001, 0x2484648c, 0x3c050001, 0xafbf0024,
+0xafb20020, 0xafb1001c, 0xafb00018, 0x8f900104,
+0x8f9100b0, 0x8f92011c, 0x34a52500, 0x8f820100,
+0x2403021, 0x2203821, 0xafa20010, 0xc002b3b,
+0xafb00014, 0x8e020008, 0xafa20010, 0x8e02000c,
+0x3c040001, 0x24846498, 0xafa20014, 0x8e060000,
+0x8e070004, 0x3c050001, 0xc002b3b, 0x34a52510,
+0x8e020018, 0xafa20010, 0x8e02001c, 0x3c040001,
+0x248464a4, 0xafa20014, 0x8e060010, 0x8e070014,
+0x3c050001, 0xc002b3b, 0x34a52520, 0x3c027f00,
+0x2221024, 0x3c030800, 0x54430016, 0x3c030200,
+0x8f82009c, 0x3042ffff, 0x14400012, 0x3c030200,
+0x3c040001, 0x248464b0, 0x3c050002, 0x34a5f030,
+0x3021, 0x3821, 0x36420002, 0xaf82011c,
+0x36220001, 0xaf8200b0, 0xaf900104, 0xaf92011c,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x10000024,
+0x0, 0x2c31024, 0x1040000d, 0x2231024,
+0x1040000b, 0x36420002, 0xaf82011c, 0x36220001,
+0xaf8200b0, 0xaf900104, 0xaf92011c, 0x8f420330,
+0x24420001, 0xaf420330, 0x10000015, 0x8f420330,
+0x3c040001, 0x248464b8, 0x240202a9, 0xafa20010,
+0xafa00014, 0x8f860144, 0x3c070001, 0x24e764c0,
+0xc002b3b, 0x3405dead, 0x8f82011c, 0x34420002,
+0xaf82011c, 0x8f820220, 0x34420004, 0xaf820220,
+0x8f820140, 0x3c030001, 0x431025, 0xaf820140,
+0x8fbf0024, 0x8fb20020, 0x8fb1001c, 0x8fb00018,
+0x3e00008, 0x27bd0028, 0x27bdffd8, 0x3c040001,
+0x248464e8, 0x3c050001, 0xafbf0024, 0xafb20020,
+0xafb1001c, 0xafb00018, 0x8f900124, 0x8f9100a0,
+0x8f92011c, 0x34a52600, 0x8f820120, 0x2403021,
+0x2203821, 0xafa20010, 0xc002b3b, 0xafb00014,
+0x8e020008, 0xafa20010, 0x8e02000c, 0x3c040001,
+0x248464f4, 0xafa20014, 0x8e060000, 0x8e070004,
+0x3c050001, 0xc002b3b, 0x34a52610, 0x8e020018,
+0xafa20010, 0x8e02001c, 0x3c040001, 0x24846500,
+0xafa20014, 0x8e060010, 0x8e070014, 0x3c050001,
+0xc002b3b, 0x34a52620, 0x3c027f00, 0x2221024,
+0x3c030800, 0x54430016, 0x3c030200, 0x8f8200ac,
+0x3042ffff, 0x14400012, 0x3c030200, 0x3c040001,
+0x2484650c, 0x3c050001, 0x34a5f030, 0x3021,
+0x3821, 0x36420002, 0xaf82011c, 0x36220001,
+0xaf8200a0, 0xaf900124, 0xaf92011c, 0xafa00010,
+0xc002b3b, 0xafa00014, 0x10000024, 0x0,
+0x2c31024, 0x1040000d, 0x2231024, 0x1040000b,
+0x36420002, 0xaf82011c, 0x36220001, 0xaf8200a0,
+0xaf900124, 0xaf92011c, 0x8f42032c, 0x24420001,
+0xaf42032c, 0x10000015, 0x8f42032c, 0x3c040001,
+0x248464b8, 0x240202e2, 0xafa20010, 0xafa00014,
+0x8f860144, 0x3c070001, 0x24e764c0, 0xc002b3b,
+0x3405dead, 0x8f82011c, 0x34420002, 0xaf82011c,
+0x8f820220, 0x34420004, 0xaf820220, 0x8f820140,
+0x3c030001, 0x431025, 0xaf820140, 0x8fbf0024,
+0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x3e00008,
+0x27bd0028, 0x6021, 0x5021, 0x3021,
+0x2821, 0x6821, 0x4821, 0x7821,
+0x7021, 0x8f880124, 0x8f870104, 0x1580002e,
+0x8f8b011c, 0x11a00014, 0x31620800, 0x8f820120,
+0x10460029, 0x0, 0x3c040001, 0x8c846ee4,
+0x8cc20000, 0x8cc30004, 0xac820000, 0xac830004,
+0x8cc20008, 0xac820008, 0x94c2000e, 0xa482000e,
+0x8cc20010, 0x240c0001, 0xac820010, 0x8cc20014,
+0x10000012, 0x24c60020, 0x10400017, 0x0,
+0x3c040001, 0x8c846ee4, 0x8d020000, 0x8d030004,
+0xac820000, 0xac830004, 0x8d020008, 0xac820008,
+0x9502000e, 0xa482000e, 0x8d020010, 0x25060020,
+0xac820010, 0x8d020014, 0x240c0001, 0xc01821,
+0xac820014, 0x27624fe0, 0x43102b, 0x54400001,
+0x27634800, 0x603021, 0x1540002f, 0x31620100,
+0x11200014, 0x31628000, 0x8f820100, 0x1045002a,
+0x31620100, 0x3c040001, 0x8c846ee0, 0x8ca20000,
+0x8ca30004, 0xac820000, 0xac830004, 0x8ca20008,
+0xac820008, 0x94a2000e, 0xa482000e, 0x8ca20010,
+0x240a0001, 0xac820010, 0x8ca20014, 0x10000012,
+0x24a50020, 0x10400018, 0x31620100, 0x3c040001,
+0x8c846ee0, 0x8ce20000, 0x8ce30004, 0xac820000,
+0xac830004, 0x8ce20008, 0xac820008, 0x94e2000e,
+0xa482000e, 0x8ce20010, 0x24e50020, 0xac820010,
+0x8ce20014, 0x240a0001, 0xa01821, 0xac820014,
+0x276247e0, 0x43102b, 0x54400001, 0x27634000,
+0x602821, 0x31620100, 0x5440001d, 0x31621000,
+0x11a00009, 0x31a20800, 0x10400004, 0x25020020,
+0x8f8200a8, 0xa5e20000, 0x25020020, 0xaf820124,
+0x8f880124, 0x6821, 0x11800011, 0x31621000,
+0x3c040001, 0x8c846ee4, 0x8c820000, 0x8c830004,
+0xaf820080, 0xaf830084, 0x8c820008, 0xaf8200a4,
+0x9482000e, 0xaf8200ac, 0x8c820010, 0x6021,
+0xaf8200a0, 0x8c8d0010, 0x8c8f0014, 0x31621000,
+0x1440ff82, 0x0, 0x1120000f, 0x31220800,
+0x10400004, 0x3c020002, 0x8f8200b8, 0xa5c20000,
+0x3c020002, 0x1221024, 0x10400004, 0x24e20020,
+0x8f8200b4, 0xaf8200d4, 0x24e20020, 0xaf820104,
+0x8f870104, 0x4821, 0x1140ff70, 0x0,
+0x3c040001, 0x8c846ee0, 0x8c820000, 0x8c830004,
+0xaf820090, 0xaf830094, 0x8c820008, 0xaf8200b4,
+0x9482000e, 0xaf82009c, 0x8c820010, 0x5021,
+0xaf8200b0, 0x8c890010, 0x1000ff60, 0x8c8e0014,
+0x3e00008, 0x0, 0x6021, 0x5821,
+0x3021, 0x2821, 0x6821, 0x5021,
+0x7821, 0x7021, 0x8f880124, 0x8f870104,
+0x3c180100, 0x1580002e, 0x8f89011c, 0x11a00014,
+0x31220800, 0x8f820120, 0x10460029, 0x0,
+0x3c040001, 0x8c846ee4, 0x8cc20000, 0x8cc30004,
+0xac820000, 0xac830004, 0x8cc20008, 0xac820008,
+0x94c2000e, 0xa482000e, 0x8cc20010, 0x240c0001,
+0xac820010, 0x8cc20014, 0x10000012, 0x24c60020,
+0x10400017, 0x0, 0x3c040001, 0x8c846ee4,
+0x8d020000, 0x8d030004, 0xac820000, 0xac830004,
+0x8d020008, 0xac820008, 0x9502000e, 0xa482000e,
+0x8d020010, 0x25060020, 0xac820010, 0x8d020014,
+0x240c0001, 0xc01821, 0xac820014, 0x27624fe0,
+0x43102b, 0x54400001, 0x27634800, 0x603021,
+0x1560002f, 0x31220100, 0x11400014, 0x31228000,
+0x8f820100, 0x1045002a, 0x31220100, 0x3c040001,
+0x8c846ee0, 0x8ca20000, 0x8ca30004, 0xac820000,
+0xac830004, 0x8ca20008, 0xac820008, 0x94a2000e,
+0xa482000e, 0x8ca20010, 0x240b0001, 0xac820010,
+0x8ca20014, 0x10000012, 0x24a50020, 0x10400018,
+0x31220100, 0x3c040001, 0x8c846ee0, 0x8ce20000,
+0x8ce30004, 0xac820000, 0xac830004, 0x8ce20008,
+0xac820008, 0x94e2000e, 0xa482000e, 0x8ce20010,
+0x24e50020, 0xac820010, 0x8ce20014, 0x240b0001,
+0xa01821, 0xac820014, 0x276247e0, 0x43102b,
+0x54400001, 0x27634000, 0x602821, 0x31220100,
+0x5440001d, 0x31221000, 0x11a00009, 0x31a20800,
+0x10400004, 0x25020020, 0x8f8200a8, 0xa5e20000,
+0x25020020, 0xaf820124, 0x8f880124, 0x6821,
+0x11800011, 0x31221000, 0x3c040001, 0x8c846ee4,
+0x8c820000, 0x8c830004, 0xaf820080, 0xaf830084,
+0x8c820008, 0xaf8200a4, 0x9482000e, 0xaf8200ac,
+0x8c820010, 0x6021, 0xaf8200a0, 0x8c8d0010,
+0x8c8f0014, 0x31221000, 0x14400022, 0x0,
+0x1140000f, 0x31420800, 0x10400004, 0x3c020002,
+0x8f8200b8, 0xa5c20000, 0x3c020002, 0x1421024,
+0x10400004, 0x24e20020, 0x8f8200b4, 0xaf8200d4,
+0x24e20020, 0xaf820104, 0x8f870104, 0x5021,
+0x11600010, 0x0, 0x3c040001, 0x8c846ee0,
+0x8c820000, 0x8c830004, 0xaf820090, 0xaf830094,
+0x8c820008, 0xaf8200b4, 0x9482000e, 0xaf82009c,
+0x8c820010, 0x5821, 0xaf8200b0, 0x8c8a0010,
+0x8c8e0014, 0x8f820070, 0x3c031000, 0x431024,
+0x1040ff5c, 0x0, 0x8f820054, 0x24420005,
+0xaf820078, 0x8c040234, 0x10800016, 0x1821,
+0x3c020001, 0x571021, 0x8c4240e8, 0x24420005,
+0x3c010001, 0x370821, 0xac2240e8, 0x3c020001,
+0x571021, 0x8c4240e8, 0x44102b, 0x14400009,
+0x24020001, 0x3c030080, 0x3c010001, 0x370821,
+0xac2040e8, 0x3c010001, 0x370821, 0x1000000c,
+0xa02240f0, 0x3c020001, 0x571021, 0x904240f0,
+0x14400006, 0x3c020080, 0x3c020001, 0x571021,
+0x904240f1, 0x10400002, 0x3c020080, 0x621825,
+0x8c040230, 0x10800013, 0x0, 0x3c020001,
+0x571021, 0x8c4240ec, 0x24420005, 0x3c010001,
+0x370821, 0xac2240ec, 0x3c020001, 0x571021,
+0x8c4240ec, 0x44102b, 0x14400006, 0x0,
+0x3c010001, 0x370821, 0xac2040ec, 0x10000006,
+0x781825, 0x3c020001, 0x571021, 0x904240f2,
+0x54400001, 0x781825, 0x1060ff1a, 0x0,
+0x8f420000, 0x10400007, 0x0, 0xaf80004c,
+0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
+0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
+0x0, 0x8f820060, 0x431025, 0xaf820060,
+0x8f420000, 0x10400003, 0x0, 0x1000ff05,
+0xaf80004c, 0x1000ff03, 0xaf800048, 0x3e00008,
+0x0, 0x0, 0x0, 0x3c020001,
+0x8c426d28, 0x27bdffe8, 0xafbf0014, 0x14400012,
+0xafb00010, 0x3c100001, 0x26106f90, 0x2002021,
+0xc002ba8, 0x24052000, 0x26021fe0, 0x3c010001,
+0xac226eec, 0x3c010001, 0xac226ee8, 0xac020250,
+0x24022000, 0xac100254, 0xac020258, 0x24020001,
+0x3c010001, 0xac226d28, 0x8fbf0014, 0x8fb00010,
+0x3e00008, 0x27bd0018, 0x3c090001, 0x8d296eec,
+0x8c820000, 0x8fa30010, 0x8fa80014, 0xad220000,
+0x8c820004, 0xad250008, 0xad220004, 0x8f820054,
+0xad260010, 0xad270014, 0xad230018, 0xad28001c,
+0xad22000c, 0x2529ffe0, 0x3c020001, 0x24426f90,
+0x122102b, 0x10400003, 0x0, 0x3c090001,
+0x8d296ee8, 0x3c020001, 0x8c426d10, 0xad220000,
+0x3c020001, 0x8c426d10, 0x3c010001, 0xac296eec,
+0xad220004, 0xac090250, 0x3e00008, 0x0,
+0x27bdffd0, 0xafb00010, 0x3c100001, 0x8e106eec,
+0x3c020001, 0x8c426d10, 0xafb10014, 0x808821,
+0xafbe0024, 0x8fbe0040, 0x8fa40048, 0xafb20018,
+0xa09021, 0xafbf0028, 0xafb50020, 0xafb3001c,
+0xae020000, 0x3c020001, 0x8c426d10, 0xc09821,
+0xe0a821, 0x10800006, 0xae020004, 0x26050008,
+0xc002bb3, 0x24060018, 0x10000005, 0x2610ffe0,
+0x26040008, 0xc002ba8, 0x24050018, 0x2610ffe0,
+0x3c030001, 0x24636f90, 0x203102b, 0x10400003,
+0x0, 0x3c100001, 0x8e106ee8, 0x8e220000,
+0xae020000, 0x8e220004, 0xae120008, 0xae020004,
+0x8f820054, 0xae130010, 0xae150014, 0xae1e0018,
+0x8fa80044, 0xae08001c, 0xae02000c, 0x2610ffe0,
+0x203102b, 0x10400003, 0x0, 0x3c100001,
+0x8e106ee8, 0x3c020001, 0x8c426d10, 0xae020000,
+0x3c020001, 0x8c426d10, 0x3c010001, 0xac306eec,
+0xae020004, 0xac100250, 0x8fbf0028, 0x8fbe0024,
+0x8fb50020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
+0x8fb00010, 0x3e00008, 0x27bd0030, 0x851821,
+0x83102b, 0x10400006, 0x0, 0xac800000,
+0x24840004, 0x83102b, 0x5440fffd, 0xac800000,
+0x3e00008, 0x0, 0xa61821, 0xa3102b,
+0x10400007, 0x0, 0x8c820000, 0xaca20000,
+0x24a50004, 0xa3102b, 0x1440fffb, 0x24840004,
+0x3e00008, 0x0, 0x861821, 0x83102b,
+0x10400007, 0x0, 0x8ca20000, 0xac820000,
+0x24840004, 0x83102b, 0x1440fffb, 0x24a50004,
+0x3e00008, 0x0, 0x63080, 0x861821,
+0x83102b, 0x10400006, 0x0, 0xac850000,
+0x24840004, 0x83102b, 0x5440fffd, 0xac850000,
+0x3e00008, 0x0, 0x0, 0x26e50028,
+0xa03021, 0x274301c0, 0x8f4d0358, 0x8f47035c,
+0x8f480360, 0x8f490364, 0x8f4a0368, 0x8f4b0204,
+0x8f4c0200, 0x24640400, 0x64102b, 0x10400008,
+0x3c0208ff, 0x8cc20000, 0xac620000, 0x24630004,
+0x64102b, 0x1440fffb, 0x24c60004, 0x3c0208ff,
+0x3442ffff, 0x3c03c0ff, 0xaf4d0358, 0xaf47035c,
+0xaf480360, 0xaf490364, 0xaf4a0368, 0xaf4b0204,
+0xaf4c0200, 0x8f840220, 0x3463ffff, 0x8f860200,
+0x821024, 0x34420004, 0xc31824, 0x34630004,
+0xaf820220, 0xaf830200, 0x8ca20214, 0xac020084,
+0x8ca20218, 0xac020088, 0x8ca2021c, 0xac02008c,
+0x8ca20220, 0xac020090, 0x8ca20224, 0xac020094,
+0x8ca20228, 0xac020098, 0x8ca2022c, 0xac02009c,
+0x8ca20230, 0xac0200a0, 0x8ca20234, 0xac0200a4,
+0x8ca20238, 0xac0200a8, 0x8ca2023c, 0xac0200ac,
+0x8ca20240, 0xac0200b0, 0x8ca20244, 0xac0200b4,
+0x8ca20248, 0xac0200b8, 0x8ca2024c, 0xac0200bc,
+0x8ca2001c, 0xac020080, 0x8ca20018, 0xac0200c0,
+0x8ca20020, 0xac0200cc, 0x8ca20024, 0xac0200d0,
+0x8ca201d0, 0xac0200e0, 0x8ca201d4, 0xac0200e4,
+0x8ca201d8, 0xac0200e8, 0x8ca201dc, 0xac0200ec,
+0x8ca201e0, 0xac0200f0, 0x8ca20098, 0x8ca3009c,
+0xac0300fc, 0x8ca200a8, 0x8ca300ac, 0xac0300f4,
+0x8ca200a0, 0x8ca300a4, 0x30840004, 0xac0300f8,
+0x14800007, 0x30c20004, 0x8f820220, 0x3c0308ff,
+0x3463fffb, 0x431024, 0xaf820220, 0x30c20004,
+0x14400006, 0x0, 0x8f820200, 0x3c03c0ff,
+0x3463fffb, 0x431024, 0xaf820200, 0x8f4202dc,
+0xa34005c5, 0x24420001, 0xaf4202dc, 0x8f4202dc,
+0x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
+0xafb00020, 0x8f430024, 0x8f420020, 0x10620038,
+0x0, 0x8f430020, 0x8f420024, 0x622023,
+0x4810003, 0x0, 0x8f420040, 0x822021,
+0x8f430030, 0x8f420024, 0x43102b, 0x14400005,
+0x0, 0x8f430040, 0x8f420024, 0x10000005,
+0x621023, 0x8f420030, 0x8f430024, 0x431023,
+0x2442ffff, 0x406021, 0x8c102a, 0x54400001,
+0x806021, 0x8f4a0024, 0x8f490040, 0x8f480024,
+0x8f440180, 0x8f450184, 0x8f460024, 0x8f4b001c,
+0x24070001, 0xafa70010, 0x84100, 0x1001821,
+0x14c5021, 0x2529ffff, 0x1498024, 0xafb00014,
+0x8f470014, 0x1021, 0x63100, 0xafa70018,
+0xa32821, 0xa3382b, 0x822021, 0x872021,
+0x8f420108, 0x1663021, 0x40f809, 0xc3900,
+0x54400001, 0xaf500024, 0x8f430024, 0x8f420020,
+0x14620018, 0x0, 0x8f420000, 0x10400007,
+0x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
+0x0, 0x10000005, 0x0, 0xaf800048,
+0x8f820048, 0x1040fffd, 0x0, 0x8f820060,
+0x2403ffef, 0x431024, 0xaf820060, 0x8f420000,
+0x10400003, 0x0, 0x10000002, 0xaf80004c,
+0xaf800048, 0x8fbf0024, 0x8fb00020, 0x3e00008,
+0x27bd0028, 0x3e00008, 0x0, 0x27bdffc0,
+0x32c20020, 0xafbf0038, 0xafb30034, 0xafb20030,
+0xafb1002c, 0x10400004, 0xafb00028, 0x8f530028,
+0x10000002, 0x0, 0x8f530020, 0x8f420030,
+0x105300eb, 0x21100, 0x8f43001c, 0x628021,
+0x8e040000, 0x8e050004, 0x96120008, 0x8f420090,
+0x9611000a, 0x3246ffff, 0x46102a, 0x10400017,
+0x0, 0x8f8200d8, 0x8f430098, 0x431023,
+0x2442dcbe, 0xaf420090, 0x8f420090, 0x2842dcbf,
+0x10400005, 0x0, 0x8f420090, 0x8f430144,
+0x431021, 0xaf420090, 0x8f420090, 0x46102a,
+0x10400006, 0x0, 0x8f420348, 0x24420001,
+0xaf420348, 0x100000e1, 0x8f420348, 0x8f8200fc,
+0x14400006, 0x0, 0x8f420344, 0x24420001,
+0xaf420344, 0x100000d9, 0x8f420344, 0x934205c2,
+0x1040000b, 0x32c20008, 0x10400008, 0x32220200,
+0x10400006, 0x3c034000, 0x9602000e, 0xaf4300ac,
+0x21400, 0x10000002, 0xaf4200b0, 0xaf4000ac,
+0x32220004, 0x1040007f, 0x32220800, 0x10400003,
+0x3247ffff, 0x10000002, 0x24020020, 0x24020004,
+0xafa20010, 0x8f420030, 0xafa20014, 0x8f420010,
+0x3c030002, 0x431025, 0xafa20018, 0x8f460098,
+0x8f420108, 0x40f809, 0x0, 0x104000b7,
+0x0, 0x8f42009c, 0x8f430094, 0x2421021,
+0xaf42009c, 0xae03000c, 0x8f4200ac, 0x10400008,
+0x3c034000, 0x8f420094, 0x431025, 0xafa20020,
+0x8f42009c, 0x8f4300b0, 0x10000004, 0x431025,
+0x8f420094, 0xafa20020, 0x8f42009c, 0xafa20024,
+0x8f8200fc, 0x8fa30020, 0x8fa40024, 0xac430000,
+0xac440004, 0x24420008, 0xaf8200f0, 0x8f42009c,
+0x8f440270, 0x8f450274, 0x401821, 0x1021,
+0xa32821, 0xa3302b, 0x822021, 0x862021,
+0x32230060, 0x24020040, 0xaf440270, 0xaf450274,
+0x10620017, 0x2c620041, 0x10400005, 0x24020020,
+0x10620008, 0x24020001, 0x10000026, 0x0,
+0x24020060, 0x10620019, 0x24020001, 0x10000021,
+0x0, 0x8f420278, 0x8f43027c, 0x24630001,
+0x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
+0x8f420278, 0x8f43027c, 0x10000016, 0x24020001,
+0x8f420280, 0x8f430284, 0x24630001, 0x2c640001,
+0x441021, 0xaf420280, 0xaf430284, 0x8f420280,
+0x8f430284, 0x1000000b, 0x24020001, 0x8f420288,
+0x8f43028c, 0x24630001, 0x2c640001, 0x441021,
+0xaf420288, 0xaf43028c, 0x8f420288, 0x8f43028c,
+0x24020001, 0xa34205c2, 0x8f420098, 0x3244ffff,
+0x2406fff8, 0x8f45013c, 0x441021, 0x24420007,
+0x461024, 0x24840007, 0xaf420094, 0x8f420090,
+0x8f430094, 0x862024, 0x441023, 0x65182b,
+0x14600005, 0xaf420090, 0x8f420094, 0x8f430144,
+0x431023, 0xaf420094, 0x8f420094, 0x10000023,
+0xaf40009c, 0x3247ffff, 0x50e00022, 0x32c20020,
+0x14400002, 0x24020010, 0x24020002, 0xafa20010,
+0x8f420030, 0xafa20014, 0x8f420010, 0xafa20018,
+0x8f460098, 0x8f420108, 0x40f809, 0x0,
+0x1040003a, 0x3245ffff, 0x8f420098, 0x8f430090,
+0x8f46013c, 0x451021, 0xaf420098, 0x8f42009c,
+0x8f440098, 0xa34005c2, 0x651823, 0xaf430090,
+0x451021, 0x86202b, 0x14800005, 0xaf42009c,
+0x8f420098, 0x8f430144, 0x431023, 0xaf420098,
+0x32c20020, 0x10400005, 0x0, 0x8f420358,
+0x2442ffff, 0xaf420358, 0x8f420358, 0x8f420030,
+0x8f430040, 0x24420001, 0x2463ffff, 0x431024,
+0xaf420030, 0x8f420030, 0x14530018, 0x0,
+0x8f420000, 0x10400007, 0x0, 0xaf80004c,
+0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
+0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
+0x0, 0x8f820060, 0x2403fff7, 0x431024,
+0xaf820060, 0x8f420000, 0x10400003, 0x0,
+0x10000002, 0xaf80004c, 0xaf800048, 0x8fbf0038,
+0x8fb30034, 0x8fb20030, 0x8fb1002c, 0x8fb00028,
+0x3e00008, 0x27bd0040, 0x3e00008, 0x0,
+0x27bdffd0, 0x32c20020, 0xafbf002c, 0xafb20028,
+0xafb10024, 0x10400004, 0xafb00020, 0x8f520028,
+0x10000002, 0x0, 0x8f520020, 0x8f420030,
+0x105200b5, 0x21100, 0x8f43001c, 0x628021,
+0x8e040000, 0x8e050004, 0x96110008, 0x8f420090,
+0x9607000a, 0x3226ffff, 0x46102a, 0x10400017,
+0x0, 0x8f8200d8, 0x8f430098, 0x431023,
+0x2442dc46, 0xaf420090, 0x8f420090, 0x2842dc47,
+0x10400005, 0x0, 0x8f420090, 0x8f430144,
+0x431021, 0xaf420090, 0x8f420090, 0x46102a,
+0x10400006, 0x0, 0x8f420348, 0x24420001,
+0xaf420348, 0x100000ab, 0x8f420348, 0x8f8600fc,
+0x10c0000c, 0x0, 0x8f8200f4, 0x2403fff8,
+0x431024, 0x461023, 0x218c3, 0x58600001,
+0x24630100, 0x8f42008c, 0x43102b, 0x14400006,
+0x712c2, 0x8f420344, 0x24420001, 0xaf420344,
+0x10000098, 0x8f420344, 0x934305c2, 0x1060000f,
+0x30460001, 0x8f420010, 0x34480400, 0x32c20008,
+0x10400008, 0x30e20200, 0x10400006, 0x3c034000,
+0x9602000e, 0xaf4300ac, 0x21400, 0x10000004,
+0xaf4200b0, 0x10000002, 0xaf4000ac, 0x8f480010,
+0x30e20004, 0x10400045, 0x3227ffff, 0x8f4900ac,
+0x11200005, 0x30c200ff, 0x14400006, 0x24020040,
+0x10000004, 0x24020008, 0x14400002, 0x24020020,
+0x24020004, 0xafa20010, 0x8f430030, 0x11200004,
+0xafa30014, 0x8f4200b0, 0x621025, 0xafa20014,
+0x3c020002, 0x1021025, 0xafa20018, 0x8f460098,
+0x8f420108, 0x40f809, 0x0, 0x10400069,
+0x3224ffff, 0x8f42008c, 0x8f430094, 0x24420001,
+0xaf42008c, 0x24020001, 0xae03000c, 0xa34205c2,
+0x8f420098, 0x2406fff8, 0x8f45013c, 0x441021,
+0x24420007, 0x461024, 0x24840007, 0xaf420094,
+0x8f420090, 0x8f430094, 0x862024, 0x441023,
+0x65182b, 0x14600005, 0xaf420090, 0x8f420094,
+0x8f430144, 0x431023, 0xaf420094, 0x8f430094,
+0x8f420140, 0x43102b, 0x10400009, 0x0,
+0x8f43013c, 0x8f440094, 0x8f420090, 0x8f450138,
+0x641823, 0x431023, 0xaf420090, 0xaf450094,
+0x8f420094, 0x1000001f, 0xaf420098, 0x10e0001d,
+0x30c200ff, 0x14400002, 0x24020010, 0x24020002,
+0xafa20010, 0x8f420030, 0xafa80018, 0xafa20014,
+0x8f460098, 0x8f420108, 0x40f809, 0x0,
+0x10400030, 0x3225ffff, 0x8f420098, 0x8f44013c,
+0x451021, 0xaf420098, 0x8f420090, 0x8f430098,
+0xa34005c2, 0x451023, 0x64182b, 0x14600005,
+0xaf420090, 0x8f420098, 0x8f430144, 0x431023,
+0xaf420098, 0x8f420030, 0x8f430040, 0x24420001,
+0x2463ffff, 0x431024, 0xaf420030, 0x8f420030,
+0x14520018, 0x0, 0x8f420000, 0x10400007,
+0x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
+0x0, 0x10000005, 0x0, 0xaf800048,
+0x8f820048, 0x1040fffd, 0x0, 0x8f820060,
+0x2403fff7, 0x431024, 0xaf820060, 0x8f420000,
+0x10400003, 0x0, 0x10000002, 0xaf80004c,
+0xaf800048, 0x8fbf002c, 0x8fb20028, 0x8fb10024,
+0x8fb00020, 0x3e00008, 0x27bd0030, 0x3e00008,
+0x0, 0x27bdffd8, 0x3c020001, 0x34422ec0,
+0xafbf0020, 0x8f4300f0, 0x8f840108, 0x2e21021,
+0x54620004, 0x24620008, 0x3c020001, 0x34422cc0,
+0x2e21021, 0x401821, 0xaf4300f0, 0xac600000,
+0x8f4200ec, 0x8c660004, 0x14620004, 0x3c020001,
+0x24820020, 0x1000000f, 0xaf820108, 0x8f4300f0,
+0x34422ec0, 0x2e21021, 0x54620004, 0x24620008,
+0x3c020001, 0x34422cc0, 0x2e21021, 0x401821,
+0x8c620004, 0x21140, 0x821021, 0xaf820108,
+0xac600000, 0x8c850018, 0x30a20036, 0x1040006c,
+0x30a20001, 0x8c82001c, 0x8f430040, 0x8f440034,
+0x24420001, 0x2463ffff, 0x431024, 0x862021,
+0xaf42002c, 0x30a20030, 0x14400006, 0xaf440034,
+0x8f420034, 0x8c03023c, 0x43102b, 0x144000b4,
+0x0, 0x32c20010, 0x10400028, 0x24070008,
+0x8f440170, 0x8f450174, 0x8f43002c, 0x8f48000c,
+0x8f860120, 0x24020080, 0xafa20010, 0xafa30014,
+0xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
+0x14400011, 0x24020001, 0x3c010001, 0x370821,
+0xa02240f1, 0x8f820124, 0xafa20010, 0x8f820128,
+0x3c040001, 0x248467c4, 0xafa20014, 0x8f46002c,
+0x8f870120, 0x3c050009, 0xc002b3b, 0x34a51100,
+0x10000036, 0x0, 0x8f420300, 0x8f43002c,
+0x24420001, 0xaf420300, 0x8f420300, 0x24020001,
+0xa34205c1, 0x10000026, 0xaf430038, 0x8f440170,
+0x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
+0x24020020, 0xafa20010, 0xafa30014, 0xafa80018,
+0x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
+0x24020001, 0x3c010001, 0x370821, 0xa02240f0,
+0x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
+0x248467b8, 0xafa20014, 0x8f46002c, 0x8f870120,
+0x3c050009, 0xc002b3b, 0x34a50900, 0x1000000f,
+0x0, 0x8f420300, 0x24420001, 0xaf420300,
+0x8f420300, 0x8f42002c, 0xa34005c1, 0xaf420038,
+0x3c010001, 0x370821, 0xa02040f1, 0x3c010001,
+0x370821, 0xa02040f0, 0xaf400034, 0x8f420314,
+0x24420001, 0xaf420314, 0x10000059, 0x8f420314,
+0x10400022, 0x30a27000, 0x8c85001c, 0x8f420028,
+0xa22023, 0x4810003, 0x0, 0x8f420040,
+0x822021, 0x8f420358, 0x8f430000, 0xaf450028,
+0x441021, 0x10600007, 0xaf420358, 0xaf80004c,
+0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
+0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
+0x0, 0x8f820060, 0x34420008, 0xaf820060,
+0x8f420000, 0x10400003, 0x0, 0x10000038,
+0xaf80004c, 0x10000036, 0xaf800048, 0x1040002f,
+0x30a21000, 0x1040000c, 0x30a24000, 0x8c83001c,
+0x8f420050, 0x622023, 0x4820001, 0x24840200,
+0x8f42035c, 0x441021, 0xaf42035c, 0x8f420368,
+0x1000001a, 0xaf430050, 0x1040000c, 0x32c28000,
+0x8c83001c, 0x8f420070, 0x622023, 0x4820001,
+0x24840400, 0x8f420364, 0x441021, 0xaf420364,
+0x8f420368, 0x1000000d, 0xaf430070, 0x1040000e,
+0x3c020800, 0x8c83001c, 0x8f420060, 0x622023,
+0x4820001, 0x24840100, 0x8f420360, 0x441021,
+0xaf420360, 0x8f420368, 0xaf430060, 0x441021,
+0xaf420368, 0x3c020800, 0x2c21024, 0x50400008,
+0x36940040, 0x10000006, 0x0, 0x30a20100,
+0x10400003, 0x0, 0xc002bd8, 0x0,
+0x8fbf0020, 0x3e00008, 0x27bd0028, 0x3e00008,
+0x0, 0x27bdffa8, 0xafbf0050, 0xafbe004c,
+0xafb50048, 0xafb30044, 0xafb20040, 0xafb1003c,
+0xafb00038, 0x8f910108, 0x26220020, 0xaf820108,
+0x8e320018, 0xa821, 0x32420024, 0x104001ba,
+0xf021, 0x8e26001c, 0x8f43001c, 0x61100,
+0x621821, 0x8c70000c, 0x9604000c, 0x962d0016,
+0x9473000a, 0x2c8305dd, 0x38828870, 0x2c420001,
+0x621825, 0x10600015, 0x2821, 0x32c20040,
+0x10400015, 0x24020800, 0x96030014, 0x14620012,
+0x3402aaaa, 0x9603000e, 0x14620007, 0x2021,
+0x96030010, 0x24020300, 0x14620004, 0x801021,
+0x96020012, 0x2c440001, 0x801021, 0x54400006,
+0x24050016, 0x10000004, 0x0, 0x24020800,
+0x50820001, 0x2405000e, 0x934205c3, 0x14400008,
+0x5821, 0x240b0001, 0x32620180, 0xaf4500a8,
+0xaf5000a0, 0x10400002, 0xaf4600a4, 0xa34b05c3,
+0x10a00085, 0x2054021, 0x91020000, 0x3821,
+0x3042000f, 0x25080, 0x32c20002, 0x10400012,
+0x10a1821, 0x32620002, 0x10400010, 0x32c20001,
+0x1002021, 0x94820000, 0x24840002, 0xe23821,
+0x83102b, 0x1440fffb, 0x30e2ffff, 0x71c02,
+0x623821, 0x71c02, 0x30e2ffff, 0x623821,
+0x71027, 0xa502000a, 0x32c20001, 0x1040006a,
+0x32620001, 0x10400068, 0x0, 0x8f4200a8,
+0x10400065, 0x0, 0x8f4200a0, 0x8f4300a8,
+0x431021, 0x904c0009, 0x318900ff, 0x39230006,
+0x3182b, 0x39220011, 0x2102b, 0x621824,
+0x1060000c, 0x3c050006, 0x8f4200a4, 0x3c040001,
+0x248467d4, 0xafa20010, 0x8f4200a0, 0x34a54600,
+0x1203821, 0xc002b3b, 0xafa20014, 0x1000004e,
+0x0, 0x32c20004, 0x14400013, 0x2821,
+0x316200ff, 0x14400004, 0x0, 0x95020002,
+0x1000000d, 0x4a2823, 0x9505000c, 0x9502000e,
+0x95030010, 0xa22821, 0xa32821, 0x95030012,
+0x91040009, 0x95020002, 0xa32821, 0xa42821,
+0x4a1023, 0xa22821, 0x2002021, 0x94820000,
+0x24840002, 0xe23821, 0x88102b, 0x1440fffb,
+0x71c02, 0x30e2ffff, 0x623821, 0x71c02,
+0x30e2ffff, 0x623821, 0x1a52821, 0x51c02,
+0x30a2ffff, 0x622821, 0x51c02, 0x30a2ffff,
+0x622821, 0xa72823, 0x51402, 0xa22821,
+0x30a5ffff, 0x50a00001, 0x3405ffff, 0x316200ff,
+0x14400008, 0x318300ff, 0x8f4300a0, 0x8f4200a8,
+0x624021, 0x91020000, 0x3042000f, 0x25080,
+0x318300ff, 0x24020006, 0x14620003, 0x10a1021,
+0x10000002, 0x24440010, 0x24440006, 0x316200ff,
+0x14400006, 0x0, 0x94820000, 0xa22821,
+0x51c02, 0x30a2ffff, 0x622821, 0x934205c3,
+0x10400003, 0x32620100, 0x50400003, 0xa4850000,
+0x52827, 0xa4850000, 0x9622000e, 0x8f43009c,
+0x621821, 0x32a200ff, 0x10400007, 0xaf43009c,
+0x3c024000, 0x2021025, 0xafa20020, 0x8f42009c,
+0x10000003, 0x5e1025, 0xafb00020, 0x8f42009c,
+0xafa20024, 0x32620080, 0x10400010, 0x32620100,
+0x8f4200b4, 0x24430001, 0x210c0, 0x571021,
+0xaf4300b4, 0x8fa30020, 0x8fa40024, 0x3c010001,
+0x220821, 0xac2338e8, 0x3c010001, 0x220821,
+0xac2438ec, 0x100000a5, 0x32c20020, 0x10400064,
+0x0, 0x8f4200b4, 0x24430001, 0x210c0,
+0x571021, 0xaf4300b4, 0x8fa30020, 0x8fa40024,
+0x3c010001, 0x220821, 0xac2338e8, 0x3c010001,
+0x220821, 0xac2438ec, 0x8f4200b4, 0x10400051,
+0x3821, 0x3c090001, 0x352938e8, 0x3c08001f,
+0x3508ffff, 0x240bffff, 0x340affff, 0x710c0,
+0x571021, 0x491021, 0x8c430000, 0x8c440004,
+0xafa30028, 0xafa4002c, 0x8f8200fc, 0x8fa30028,
+0x8fa4002c, 0xac430000, 0xac440004, 0x24420008,
+0xaf8200f0, 0x8f42008c, 0x2442ffff, 0xaf42008c,
+0x97a2002e, 0x8f440270, 0x8f450274, 0x401821,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaf440270, 0xaf450274, 0x8fa20028,
+0x481024, 0x90430000, 0x30630001, 0x1460000b,
+0x402021, 0x8f420278, 0x8f43027c, 0x24630001,
+0x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
+0x8f420278, 0x1000001a, 0x8f43027c, 0x8c820000,
+0x144b000e, 0x0, 0x94820004, 0x144a000b,
+0x0, 0x8f420288, 0x8f43028c, 0x24630001,
+0x2c640001, 0x441021, 0xaf420288, 0xaf43028c,
+0x8f420288, 0x1000000a, 0x8f43028c, 0x8f420280,
+0x8f430284, 0x24630001, 0x2c640001, 0x441021,
+0xaf420280, 0xaf430284, 0x8f420280, 0x8f430284,
+0x8f4200b4, 0x24e70001, 0xe2102b, 0x1440ffb8,
+0x710c0, 0xa34005c3, 0x1000003f, 0xaf4000b4,
+0x8f8200fc, 0x8fa30020, 0x8fa40024, 0xac430000,
+0xac440004, 0x24420008, 0xaf8200f0, 0x8f42009c,
+0x8f46008c, 0x8f440270, 0x8f450274, 0x401821,
+0x1021, 0x24c6ffff, 0xaf46008c, 0xa32821,
+0xa3302b, 0x822021, 0x862021, 0xaf440270,
+0xaf450274, 0x92020000, 0x30420001, 0x1440000c,
+0x2402ffff, 0x8f420278, 0x8f43027c, 0x24630001,
+0x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
+0x8f420278, 0x8f43027c, 0x1000001c, 0x32c20020,
+0x8e030000, 0x1462000f, 0x3402ffff, 0x96030004,
+0x1462000c, 0x0, 0x8f420288, 0x8f43028c,
+0x24630001, 0x2c640001, 0x441021, 0xaf420288,
+0xaf43028c, 0x8f420288, 0x8f43028c, 0x1000000b,
+0x32c20020, 0x8f420280, 0x8f430284, 0x24630001,
+0x2c640001, 0x441021, 0xaf420280, 0xaf430284,
+0x8f420280, 0x8f430284, 0x32c20020, 0x10400005,
+0xaf40009c, 0x8f420358, 0x2442ffff, 0xaf420358,
+0x8f420358, 0x8e22001c, 0x8f430040, 0x24420001,
+0x2463ffff, 0x431024, 0xaf42002c, 0x32420060,
+0x14400008, 0x32c20010, 0x8f420034, 0x24420001,
+0xaf420034, 0x8c03023c, 0x43102b, 0x14400102,
+0x32c20010, 0x10400018, 0x24070008, 0x8f440170,
+0x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
+0x24020080, 0xafa20010, 0xafa30014, 0xafa80018,
+0x8f42010c, 0x40f809, 0x24c6001c, 0x10400047,
+0x24020001, 0x8f420300, 0x8f43002c, 0x24420001,
+0xaf420300, 0x8f420300, 0x24020001, 0xa34205c1,
+0x1000007c, 0xaf430038, 0x8f440170, 0x8f450174,
+0x8f43002c, 0x8f48000c, 0x8f860120, 0x24020020,
+0xafa20010, 0xafa30014, 0xafa80018, 0x8f42010c,
+0x40f809, 0x24c6001c, 0x10400057, 0x24020001,
+0x10000065, 0x0, 0x32420012, 0x10400075,
+0x32420001, 0x9622000e, 0x8f43009c, 0x621821,
+0x32c20020, 0x10400005, 0xaf43009c, 0x8f420358,
+0x2442ffff, 0xaf420358, 0x8f420358, 0x8e22001c,
+0x8f430040, 0x24420001, 0x2463ffff, 0x431024,
+0xaf42002c, 0x32420010, 0x14400008, 0x32c20010,
+0x8f420034, 0x24420001, 0xaf420034, 0x8c03023c,
+0x43102b, 0x144000bc, 0x32c20010, 0x10400028,
+0x24070008, 0x8f440170, 0x8f450174, 0x8f43002c,
+0x8f48000c, 0x8f860120, 0x24020080, 0xafa20010,
+0xafa30014, 0xafa80018, 0x8f42010c, 0x40f809,
+0x24c6001c, 0x14400011, 0x24020001, 0x3c010001,
+0x370821, 0xa02240f1, 0x8f820124, 0xafa20010,
+0x8f820128, 0x3c040001, 0x248467c4, 0xafa20014,
+0x8f46002c, 0x8f870120, 0x3c050009, 0xc002b3b,
+0x34a51100, 0x10000036, 0x0, 0x8f420300,
+0x8f43002c, 0x24420001, 0xaf420300, 0x8f420300,
+0x24020001, 0xa34205c1, 0x10000026, 0xaf430038,
+0x8f440170, 0x8f450174, 0x8f43002c, 0x8f48000c,
+0x8f860120, 0x24020020, 0xafa20010, 0xafa30014,
+0xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
+0x14400011, 0x24020001, 0x3c010001, 0x370821,
+0xa02240f0, 0x8f820124, 0xafa20010, 0x8f820128,
+0x3c040001, 0x248467b8, 0xafa20014, 0x8f46002c,
+0x8f870120, 0x3c050009, 0xc002b3b, 0x34a50900,
+0x1000000f, 0x0, 0x8f420300, 0x24420001,
+0xaf420300, 0x8f420300, 0x8f42002c, 0xa34005c1,
+0xaf420038, 0x3c010001, 0x370821, 0xa02040f1,
+0x3c010001, 0x370821, 0xa02040f0, 0xaf400034,
+0x8f420314, 0x24420001, 0xaf420314, 0x10000062,
+0x8f420314, 0x10400022, 0x32427000, 0x8e25001c,
+0x8f420028, 0xa22023, 0x4810003, 0x0,
+0x8f420040, 0x822021, 0x8f420358, 0x8f430000,
+0xaf450028, 0x441021, 0x10600007, 0xaf420358,
+0xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
+0x10000005, 0x0, 0xaf800048, 0x8f820048,
+0x1040fffd, 0x0, 0x8f820060, 0x34420008,
+0xaf820060, 0x8f420000, 0x10400003, 0x0,
+0x10000041, 0xaf80004c, 0x1000003f, 0xaf800048,
+0x1040002f, 0x32421000, 0x1040000c, 0x32424000,
+0x8e23001c, 0x8f420050, 0x622023, 0x4820001,
+0x24840200, 0x8f42035c, 0x441021, 0xaf42035c,
+0x8f420368, 0x1000001a, 0xaf430050, 0x1040000c,
+0x32c28000, 0x8e23001c, 0x8f420070, 0x622023,
+0x4820001, 0x24840400, 0x8f420364, 0x441021,
+0xaf420364, 0x8f420368, 0x1000000d, 0xaf430070,
+0x1040000e, 0x3c020800, 0x8e23001c, 0x8f420060,
+0x622023, 0x4820001, 0x24840100, 0x8f420360,
+0x441021, 0xaf420360, 0x8f420368, 0xaf430060,
+0x441021, 0xaf420368, 0x3c020800, 0x2c21024,
+0x50400011, 0x36940040, 0x1000000f, 0x0,
+0x32420048, 0x10400007, 0x24150001, 0x8e22001c,
+0x3c03ffff, 0x43f024, 0x3042ffff, 0x1000fd75,
+0xae22001c, 0x32420100, 0x10400003, 0x0,
+0xc002bd8, 0x0, 0x8fbf0050, 0x8fbe004c,
+0x8fb50048, 0x8fb30044, 0x8fb20040, 0x8fb1003c,
+0x8fb00038, 0x3e00008, 0x27bd0058, 0x3e00008,
+0x0, 0x0, 0x0, 0x8f8300e4,
+0x8f8200e0, 0x2404fff8, 0x441024, 0x621026,
+0x2102b, 0x21023, 0x3e00008, 0x621024,
+0x3e00008, 0x0, 0x27bdffe0, 0xafbf001c,
+0xafb00018, 0x8f8600c4, 0x8f8400e0, 0x8f8500e4,
+0x2402fff8, 0x821824, 0x10a30009, 0x27623ff8,
+0x14a20002, 0x24a20008, 0x27623000, 0x408021,
+0x16030005, 0x30820004, 0x10400004, 0xc02021,
+0x10000022, 0x1021, 0x8e040000, 0x8f42011c,
+0x14a20003, 0x0, 0x8f420120, 0xaf420114,
+0x8ca30000, 0x8f420148, 0x831823, 0x43102b,
+0x10400003, 0x0, 0x8f420148, 0x621821,
+0x94a20006, 0x24420050, 0x62102b, 0x1440000f,
+0xa01021, 0xafa40010, 0xafa30014, 0x8ca60000,
+0x8ca70004, 0x3c040001, 0xc002b3b, 0x24846894,
+0x8f42020c, 0x24420001, 0xaf42020c, 0x8f42020c,
+0x1021, 0xaf9000e8, 0xaf9000e4, 0x8fbf001c,
+0x8fb00018, 0x3e00008, 0x27bd0020, 0x3e00008,
+0x0, 0x8f8400e0, 0x8f8800c4, 0x8f8300e8,
+0x2402fff8, 0x823824, 0xe32023, 0x2c821000,
+0x50400001, 0x24841000, 0x420c2, 0x801821,
+0x8f440258, 0x8f45025c, 0x1021, 0xa32821,
+0xa3302b, 0x822021, 0x862021, 0xaf440258,
+0xaf45025c, 0x8f8300c8, 0x8f420148, 0x1032023,
+0x82102b, 0x14400004, 0x801821, 0x8f420148,
+0x822021, 0x801821, 0x8f440250, 0x8f450254,
+0x1021, 0xa32821, 0xa3302b, 0x822021,
+0x862021, 0xaf440250, 0xaf450254, 0xaf8800c8,
+0xaf8700e4, 0xaf8700e8, 0x3e00008, 0x0,
+0x27bdff30, 0x240a0001, 0xafbf00c8, 0xafbe00c4,
+0xafb500c0, 0xafb300bc, 0xafb200b8, 0xafb100b4,
+0xafb000b0, 0xa3a00097, 0xafa00044, 0xafaa005c,
+0x934205c4, 0xa7a0008e, 0x1040000a, 0xa7a00086,
+0x8f4b00c4, 0xafab0064, 0x8f4a00c0, 0xafaa006c,
+0x8f4b00cc, 0xafab0074, 0x8f4a00c8, 0x10000129,
+0xafaa007c, 0x8f420114, 0x40f809, 0x0,
+0x403021, 0x10c0034f, 0x0, 0x8cc20000,
+0x8cc30004, 0xafa20020, 0xafa30024, 0x8fab0024,
+0x8faa0020, 0x3162ffff, 0x2442fffc, 0xafa2006c,
+0x3c020006, 0x2c21024, 0xafab007c, 0x14400015,
+0xafaa0064, 0x91420000, 0x30420001, 0x10400011,
+0x2402ffff, 0x8d430000, 0x14620004, 0x3402ffff,
+0x95430004, 0x1062000b, 0x0, 0xc0024bb,
+0x8fa40064, 0x304200ff, 0x14400006, 0x0,
+0x8f420118, 0x40f809, 0x0, 0x1000032d,
+0x0, 0x8fa20024, 0x3c03ffbf, 0x3463ffff,
+0x431024, 0x3c03ffff, 0x431824, 0x14600003,
+0xafa20024, 0x10000040, 0x1821, 0x3c020080,
+0x621024, 0x10400007, 0x0, 0x8f42038c,
+0x24420001, 0xaf42038c, 0x8f42038c, 0x10000036,
+0x24030001, 0x8f420210, 0x24420001, 0xaf420210,
+0x8f420210, 0x3c020001, 0x621024, 0x10400006,
+0x3c020002, 0x8f4201c4, 0x24420001, 0xaf4201c4,
+0x8f4201c4, 0x3c020002, 0x621024, 0x10400006,
+0x3c020004, 0x8f42037c, 0x24420001, 0xaf42037c,
+0x8f42037c, 0x3c020004, 0x621024, 0x10400006,
+0x3c020008, 0x8f420380, 0x24420001, 0xaf420380,
+0x8f420380, 0x3c020008, 0x621024, 0x10400006,
+0x3c020010, 0x8f420384, 0x24420001, 0xaf420384,
+0x8f420384, 0x3c020010, 0x621024, 0x10400006,
+0x3c020020, 0x8f4201c0, 0x24420001, 0xaf4201c0,
+0x8f4201c0, 0x3c020020, 0x621024, 0x10400006,
+0x24030001, 0x8f420388, 0x24420001, 0xaf420388,
+0x8f420388, 0x24030001, 0x8c020260, 0x8fab006c,
+0x4b102b, 0x10400014, 0x307000ff, 0x8f4201e8,
+0x24420001, 0xaf4201e8, 0x8f4201e8, 0x8faa007c,
+0x8f8200e0, 0x354a0100, 0xafaa007c, 0xafa20010,
+0x8f8200e4, 0x24100001, 0x3c040001, 0x248468a0,
+0xafa20014, 0x8fa60020, 0x8fa70024, 0x3c050007,
+0xc002b3b, 0x34a50800, 0x12000010, 0x3c020080,
+0x2c21024, 0x1440000e, 0x32c20400, 0x8fab007c,
+0x3c020080, 0x34420100, 0x1621024, 0x10400005,
+0x0, 0x8f42020c, 0x24420001, 0xaf42020c,
+0x8f42020c, 0x100002b0, 0x8fa3006c, 0x32c20400,
+0x10400015, 0x34028100, 0x8faa0064, 0x9543000c,
+0x14620012, 0x3c020100, 0x240b0200, 0xa7ab008e,
+0x9542000e, 0x8d430008, 0x8d440004, 0x8d450000,
+0x8faa006c, 0x8fab0064, 0x254afffc, 0xafaa006c,
+0xa7a20086, 0xad63000c, 0xad640008, 0xad650004,
+0x256b0004, 0xafab0064, 0x3c020100, 0x2c21024,
+0x10400004, 0x0, 0x8faa006c, 0x254a0004,
+0xafaa006c, 0x8f4200bc, 0x5040000a, 0xafa00074,
+0x8fab006c, 0x4b102b, 0x50400006, 0xafa00074,
+0x8f4200bc, 0x1621023, 0xafa20074, 0x8f4a00bc,
+0xafaa006c, 0x8f420080, 0x8fab006c, 0x4b102b,
+0x10400056, 0x32c28000, 0x1040005e, 0x240a0003,
+0x32c21000, 0x1040005b, 0xafaa005c, 0x10000058,
+0x240b0004, 0x8f420350, 0x2403ffbf, 0x283a024,
+0x24420001, 0xaf420350, 0x1000024f, 0x8f420350,
+0x2c2b025, 0x2402ffbf, 0x282a024, 0x8f830128,
+0x3c040001, 0x248468d0, 0x26620001, 0xafa20014,
+0xafa30010, 0x8f860120, 0x8f870124, 0x3c050007,
+0xc002b3b, 0x34a52250, 0x1000023f, 0x0,
+0x2c2b025, 0x2402ffbf, 0x282a024, 0x8f830128,
+0x3c040001, 0x248468d0, 0x24020002, 0xafa20014,
+0xafa30010, 0x8f860120, 0x8f870124, 0x3c050007,
+0xc002b3b, 0x34a52450, 0x1000022f, 0x0,
+0x8ea20000, 0x8ea30004, 0x3c040001, 0x248468e8,
+0xafb00010, 0xafbe0014, 0x8ea70018, 0x34a52800,
+0xc002b3b, 0x603021, 0x10000223, 0x0,
+0xa6b1000a, 0x8f820124, 0x3c040001, 0x248468f0,
+0xafbe0014, 0xafa20010, 0x8f460044, 0x8f870120,
+0x3c050007, 0xc002b3b, 0x34a53000, 0x10000216,
+0x0, 0xa6b1000a, 0xa6b2000e, 0x8f820124,
+0x3c040001, 0x248468fc, 0xafbe0014, 0xafa20010,
+0x8f460044, 0x8f870120, 0x3c050007, 0xc002b3b,
+0x34a53200, 0x10000208, 0x0, 0x8f420084,
+0x8faa006c, 0x4a102b, 0x14400007, 0x3c020001,
+0x2c21024, 0x10400004, 0x0, 0x240b0002,
+0xafab005c, 0x8faa006c, 0x1140021b, 0x27ab0020,
+0xafab00a4, 0x3c0a001f, 0x354affff, 0xafaa009c,
+0x8fab005c, 0x240a0001, 0x556a0021, 0x240a0002,
+0x8f430054, 0x8f420050, 0x1062000b, 0x274b0054,
+0x8f5e0054, 0x3403ecc0, 0xafab004c, 0x27c20001,
+0x304201ff, 0xafa20054, 0x1e1140, 0x431021,
+0x1000006b, 0x2e2a821, 0x8f420044, 0x8faa006c,
+0x3c040001, 0x248468ac, 0xafaa0014, 0xafa20010,
+0x8f460054, 0x8f470050, 0x3c050007, 0xc002b3b,
+0x34a51300, 0x8f430350, 0x2402ffbf, 0x282a024,
+0x24630001, 0xaf430350, 0x100001d3, 0x8f420350,
+0x156a001d, 0x0, 0x8f430074, 0x8f420070,
+0x1062000a, 0x274b0074, 0x8f5e0074, 0xafab004c,
+0x27c20001, 0x304203ff, 0xafa20054, 0x1e1140,
+0x24426cc0, 0x1000004a, 0x2e2a821, 0x8f420044,
+0x8faa006c, 0x3c040001, 0x248468b8, 0x3c050007,
+0xafaa0014, 0xafa20010, 0x8f460074, 0x8f470070,
+0x34a51500, 0x240b0001, 0xc002b3b, 0xafab005c,
+0x1000ffc3, 0x0, 0x8f430064, 0x8f420060,
+0x1062001a, 0x274a0064, 0x8f5e0064, 0x8fab005c,
+0xafaa004c, 0x27c20001, 0x304200ff, 0xafa20054,
+0x24020004, 0x1562000e, 0x1e1140, 0x1e1180,
+0x24420cc0, 0x2e21021, 0xafa20044, 0x9442002a,
+0x8faa0044, 0x8fab006c, 0x4b102b, 0x10400024,
+0x25550020, 0x240a0001, 0x10000021, 0xa3aa0097,
+0x24424cc0, 0x1000001e, 0x2e2a821, 0x8f420044,
+0x8fab006c, 0x3c040001, 0x248468c4, 0xafab0014,
+0xafa20010, 0x8f460064, 0x8f470060, 0x3c050007,
+0xc002b3b, 0x34a51800, 0x3c020008, 0x2c21024,
+0x1440ff34, 0x0, 0x8f420370, 0x240a0001,
+0xafaa005c, 0x24420001, 0xaf420370, 0x1000ff90,
+0x8f420370, 0x27a30036, 0x131040, 0x621821,
+0x94620000, 0x441021, 0x10000020, 0xa4620000,
+0x8fab0064, 0xaeab0018, 0x93a20097, 0x10400072,
+0x9821, 0x8faa0044, 0x8fa4006c, 0x8fa300a4,
+0x25420020, 0xafa20028, 0x25420008, 0xafa20030,
+0x25420010, 0xafaa002c, 0xafa20034, 0x9542002a,
+0xa7a20038, 0x95420018, 0xa7a2003a, 0x9542001a,
+0xa7a2003c, 0x9542001c, 0xa7a2003e, 0x94620018,
+0x24630002, 0x822023, 0x1880ffde, 0x26730001,
+0x2e620004, 0x1440fff9, 0x0, 0x8f4200fc,
+0x26650001, 0xa2102a, 0x1440002b, 0x24030001,
+0x8f83012c, 0x10600023, 0x0, 0x8f820124,
+0x431023, 0x22143, 0x58800001, 0x24840040,
+0x8f820128, 0x431023, 0x21943, 0x58600001,
+0x24630040, 0x64102a, 0x54400001, 0x602021,
+0xaf4400fc, 0x8f4200fc, 0xa2102a, 0x10400011,
+0x24030001, 0x10000015, 0x306200ff, 0x8fab0064,
+0x96070018, 0xafab0010, 0x8e220008, 0x3c040001,
+0x248468dc, 0x8c430004, 0x8c420000, 0x34a52400,
+0x2403021, 0xc002b3b, 0xafa30014, 0x1000002b,
+0x0, 0x8f420334, 0x1821, 0x24420001,
+0xaf420334, 0x8f420334, 0x306200ff, 0x5040fedc,
+0x3c020800, 0x12600021, 0x9021, 0x8fb100a4,
+0x2208021, 0x8e220008, 0x96070018, 0x8fa60064,
+0x8c440000, 0x8c450004, 0x240a0001, 0xafaa0010,
+0xafbe0014, 0x8f420008, 0xafa20018, 0x8f42010c,
+0x40f809, 0x0, 0x1040ffd8, 0x3c050007,
+0x96020018, 0x8fab0064, 0x8faa009c, 0x1625821,
+0x14b102b, 0x10400004, 0xafab0064, 0x8f420148,
+0x1625823, 0xafab0064, 0x26100002, 0x26520001,
+0x253102b, 0x1440ffe3, 0x26310004, 0x8fb0006c,
+0x10000036, 0x97b10038, 0x8f4200fc, 0x24050002,
+0xa2102a, 0x1440001b, 0x24030001, 0x8f83012c,
+0x10600013, 0x0, 0x8f820124, 0x431023,
+0x22143, 0x58800001, 0x24840040, 0x8f820128,
+0x431023, 0x21943, 0x58600001, 0x24630040,
+0x64102a, 0x54400001, 0x602021, 0xaf4400fc,
+0x8f4200fc, 0xa2102a, 0x14400006, 0x24030001,
+0x8f420334, 0x1821, 0x24420001, 0xaf420334,
+0x8f420334, 0x306200ff, 0x1040fea5, 0x3c020800,
+0x96b1000a, 0x8fb0006c, 0x3223ffff, 0x70102b,
+0x54400001, 0x608021, 0x8ea40000, 0x8ea50004,
+0x240b0001, 0xafab0010, 0xafbe0014, 0x8f420008,
+0x8fa60064, 0xafa20018, 0x8f42010c, 0x40f809,
+0x2003821, 0x1040fea2, 0x3c050007, 0x96a3000e,
+0x97aa008e, 0x11400007, 0x609021, 0x934205c4,
+0x14400004, 0x0, 0x97ab0086, 0x6a1825,
+0xa6ab0016, 0x8faa007c, 0x3c02ffff, 0x1421024,
+0x10400003, 0xa1402, 0x34630400, 0xa6a20014,
+0x8fab006c, 0x560b0072, 0xa6a3000e, 0x34620004,
+0xa6a2000e, 0x8faa0074, 0x16a1021, 0xa6a2000a,
+0x8f430044, 0x8f4401a0, 0x8f4501a4, 0x34028000,
+0xafa20010, 0x8f420044, 0x2a03021, 0x24070020,
+0xafa20014, 0x8f42000c, 0x31940, 0x604821,
+0xafa20018, 0x8f42010c, 0x4021, 0xa92821,
+0xa9182b, 0x882021, 0x40f809, 0x832021,
+0x5040fe7f, 0xa6b2000e, 0x8f420368, 0xafa0006c,
+0xa34005c4, 0x2442ffff, 0xaf420368, 0x8fab005c,
+0x240a0001, 0x8f420368, 0x156a0006, 0x240a0002,
+0x8f42035c, 0x2442ffff, 0xaf42035c, 0x1000000c,
+0x8f42035c, 0x156a0006, 0x0, 0x8f420364,
+0x2442ffff, 0xaf420364, 0x10000005, 0x8f420364,
+0x8f420360, 0x2442ffff, 0xaf420360, 0x8f420360,
+0x8faa0054, 0x8fab004c, 0xad6a0000, 0x8f420044,
+0x8f440088, 0x8f430078, 0x24420001, 0x441024,
+0x24630001, 0xaf420044, 0xaf430078, 0x8c020240,
+0x62182b, 0x14600075, 0x24070008, 0x8f440168,
+0x8f45016c, 0x8f430044, 0x8f48000c, 0x8f860120,
+0x24020040, 0xafa20010, 0xafa30014, 0xafa80018,
+0x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
+0x240b0001, 0x3c010001, 0x370821, 0xa02b40f2,
+0x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
+0x2484688c, 0xafa20014, 0x8f460044, 0x8f870120,
+0x3c050009, 0xc002b3b, 0x34a51300, 0x1000000b,
+0x0, 0x8f420304, 0x24420001, 0xaf420304,
+0x8f420304, 0x8f420044, 0xaf42007c, 0x3c010001,
+0x370821, 0xa02040f2, 0xaf400078, 0x8f420318,
+0x24420001, 0xaf420318, 0x10000048, 0x8f420318,
+0xa6b0000a, 0x8f430044, 0x8f4401a0, 0x8f4501a4,
+0x34028000, 0xafa20010, 0x8f420044, 0x2a03021,
+0x24070020, 0xafa20014, 0x8f42000c, 0x31940,
+0x604821, 0xafa20018, 0x8f42010c, 0x4021,
+0xa92821, 0xa9182b, 0x882021, 0x40f809,
+0x832021, 0x1040fe1f, 0x240a0001, 0xa34a05c4,
+0x8fab006c, 0x8faa0064, 0x1705823, 0xafab006c,
+0x8fab009c, 0x1505021, 0x16a102b, 0x10400004,
+0xafaa0064, 0x8f420148, 0x1425023, 0xafaa0064,
+0x8f420368, 0x2442ffff, 0xaf420368, 0x8faa005c,
+0x240b0001, 0x8f420368, 0x154b0006, 0x240b0002,
+0x8f42035c, 0x2442ffff, 0xaf42035c, 0x1000000c,
+0x8f42035c, 0x114b0006, 0x0, 0x8f420360,
+0x2442ffff, 0xaf420360, 0x10000005, 0x8f420360,
+0x8f420364, 0x2442ffff, 0xaf420364, 0x8f420364,
+0x8fab0054, 0x8faa004c, 0xad4b0000, 0x8f420044,
+0x8f440088, 0x8f430078, 0x24420001, 0x441024,
+0x24630001, 0xaf420044, 0xaf430078, 0x8faa006c,
+0x1540fe0b, 0x0, 0x8fab006c, 0x1160001e,
+0x0, 0x934205c4, 0x10400009, 0x0,
+0x8faa0064, 0xaf4a00c4, 0xaf4b00c0, 0x8fab007c,
+0xaf4b00c8, 0x8faa0074, 0x1000000e, 0xaf4a00cc,
+0x97ab008e, 0x1160000b, 0x34038100, 0x8fa20020,
+0x8c46000c, 0xa443000c, 0x97aa0086, 0x8c440004,
+0x8c450008, 0xa44a000e, 0xac440000, 0xac450004,
+0xac460008, 0x8f42034c, 0x24420001, 0xaf42034c,
+0x10000010, 0x8f42034c, 0x8fab007c, 0x3164ffff,
+0x2484fffc, 0x801821, 0x8f440250, 0x8f450254,
+0x8f460118, 0x1021, 0xa32821, 0xa3382b,
+0x822021, 0x872021, 0xaf440250, 0xc0f809,
+0xaf450254, 0x8fbf00c8, 0x8fbe00c4, 0x8fb500c0,
+0x8fb300bc, 0x8fb200b8, 0x8fb100b4, 0x8fb000b0,
+0x3e00008, 0x27bd00d0, 0x3e00008, 0x0,
+0x27bdff38, 0x240b0001, 0xafbf00c0, 0xafbe00bc,
+0xafb500b8, 0xafb300b4, 0xafb200b0, 0xafb100ac,
+0xafb000a8, 0xa3a00087, 0xafa00044, 0xafab005c,
+0x934205c4, 0xa7a00076, 0x10400007, 0xa7a0007e,
+0x8f4c00c0, 0xafac0064, 0x8f4b00c8, 0x8f5e00c4,
+0x10000130, 0xafab006c, 0x8f420114, 0x40f809,
+0x0, 0x403021, 0x10c002a1, 0x0,
+0x8cc20000, 0x8cc30004, 0xafa20020, 0xafa30024,
+0x8fac0024, 0x8fbe0020, 0x3182ffff, 0x2442fffc,
+0xafa20064, 0x3c020006, 0x2c21024, 0x14400015,
+0xafac006c, 0x93c20000, 0x30420001, 0x10400011,
+0x2402ffff, 0x8fc30000, 0x14620004, 0x3402ffff,
+0x97c30004, 0x1062000b, 0x0, 0xc0024bb,
+0x3c02021, 0x304200ff, 0x14400006, 0x0,
+0x8f420118, 0x40f809, 0x0, 0x10000280,
+0x0, 0x8fa20024, 0x3c03ffbf, 0x3463ffff,
+0x431024, 0x3c03ffff, 0x431824, 0x14600003,
+0xafa20024, 0x10000040, 0x8021, 0x3c020080,
+0x621024, 0x10400007, 0x0, 0x8f42038c,
+0x24420001, 0xaf42038c, 0x8f42038c, 0x10000036,
+0x24100001, 0x8f420210, 0x24420001, 0xaf420210,
+0x8f420210, 0x3c020001, 0x621024, 0x10400006,
+0x3c020002, 0x8f4201c4, 0x24420001, 0xaf4201c4,
+0x8f4201c4, 0x3c020002, 0x621024, 0x10400006,
+0x3c020004, 0x8f42037c, 0x24420001, 0xaf42037c,
+0x8f42037c, 0x3c020004, 0x621024, 0x10400006,
+0x3c020008, 0x8f420380, 0x24420001, 0xaf420380,
+0x8f420380, 0x3c020008, 0x621024, 0x10400006,
+0x3c020010, 0x8f420384, 0x24420001, 0xaf420384,
+0x8f420384, 0x3c020010, 0x621024, 0x10400006,
+0x3c020020, 0x8f4201c0, 0x24420001, 0xaf4201c0,
+0x8f4201c0, 0x3c020020, 0x621024, 0x10400006,
+0x24100001, 0x8f420388, 0x24420001, 0xaf420388,
+0x8f420388, 0x24100001, 0x8c020260, 0x8fab0064,
+0x4b102b, 0x10400015, 0x320200ff, 0x8f4201e8,
+0x24420001, 0xaf4201e8, 0x8f4201e8, 0x8fac006c,
+0x8f8200e0, 0x358c0100, 0xafac006c, 0xafa20010,
+0x8f8200e4, 0x24100001, 0x3c040001, 0x248468a0,
+0xafa20014, 0x8fa60020, 0x8fa70024, 0x3c050007,
+0xc002b3b, 0x34a53600, 0x320200ff, 0x10400010,
+0x3c020080, 0x2c21024, 0x1440000e, 0x32c20400,
+0x8fab006c, 0x3c020080, 0x34420100, 0x1621024,
+0x10400005, 0x0, 0x8f42020c, 0x24420001,
+0xaf42020c, 0x8f42020c, 0x10000202, 0x8fa30064,
+0x32c20400, 0x10400012, 0x34028100, 0x97c3000c,
+0x1462000f, 0x0, 0x240c0200, 0xa7ac0076,
+0x97c2000e, 0x8fc30008, 0x8fc40004, 0x8fab0064,
+0x8fc50000, 0x256bfffc, 0xafab0064, 0xa7a2007e,
+0xafc3000c, 0xafc40008, 0xafc50004, 0x27de0004,
+0x8fa70064, 0x320200ff, 0x14400034, 0x3c020100,
+0x97c4000c, 0x2c8305dd, 0x38828870, 0x2c420001,
+0x621825, 0x10600015, 0x2821, 0x32c20800,
+0x10400015, 0x24020800, 0x97c30014, 0x14620012,
+0x3402aaaa, 0x97c3000e, 0x14620007, 0x2021,
+0x97c30010, 0x24020300, 0x14620004, 0x801021,
+0x97c20012, 0x2c440001, 0x801021, 0x54400006,
+0x24050016, 0x10000004, 0x0, 0x24020800,
+0x50820001, 0x2405000e, 0x10a00013, 0x3c52021,
+0x24830009, 0x3c02001f, 0x3442ffff, 0x43102b,
+0x10400003, 0x0, 0x8f420148, 0x621823,
+0x90620000, 0x38430006, 0x2c630001, 0x38420011,
+0x2c420001, 0x621825, 0x10600004, 0x3c020100,
+0x94820002, 0x453821, 0x3c020100, 0x2c21024,
+0x5040000e, 0xafa70064, 0x8fac0064, 0x10ec0008,
+0x3c050007, 0x3c040001, 0x24846908, 0x8fa60064,
+0x34a54000, 0xafa00010, 0xc002b3b, 0xafa00014,
+0x8fab0064, 0x256b0004, 0xafab0064, 0x8f420080,
+0x8fac0064, 0x4c102b, 0x1040002c, 0x32c28000,
+0x10400034, 0x240b0003, 0x32c21000, 0x10400031,
+0xafab005c, 0x1000002e, 0x240c0004, 0x8f420350,
+0x2403ffbf, 0x283a024, 0x24420001, 0xaf420350,
+0x10000173, 0x8f420350, 0x3c020800, 0x2c2b025,
+0x2402ffbf, 0x282a024, 0x8f830128, 0x3c040001,
+0x248468d0, 0x26620001, 0xafa20014, 0xafa30010,
+0x8f860120, 0x8f870124, 0x3c050007, 0xc002b3b,
+0x34a55300, 0x10000162, 0x0, 0x8ea20000,
+0x8ea30004, 0x3c040001, 0x248468e8, 0xafb00010,
+0xafb10014, 0x8ea70018, 0x34a55900, 0xc002b3b,
+0x603021, 0x10000156, 0x0, 0x8f420084,
+0x8fab0064, 0x4b102b, 0x14400007, 0x3c020001,
+0x2c21024, 0x10400004, 0x0, 0x240c0002,
+0xafac005c, 0x8fab0064, 0x11600166, 0x27ac0020,
+0xafac008c, 0x8fab005c, 0x240c0001, 0x556c0021,
+0x240c0002, 0x8f430054, 0x8f420050, 0x1062000b,
+0x274b0054, 0x8f510054, 0x3403ecc0, 0xafab004c,
+0x26220001, 0x304201ff, 0xafa20054, 0x111140,
+0x431021, 0x1000006b, 0x2e2a821, 0x8f420044,
+0x8fac0064, 0x3c040001, 0x248468ac, 0xafac0014,
+0xafa20010, 0x8f460054, 0x8f470050, 0x3c050007,
+0xc002b3b, 0x34a54300, 0x8f430350, 0x2402ffbf,
+0x282a024, 0x24630001, 0xaf430350, 0x10000124,
+0x8f420350, 0x156c001d, 0x0, 0x8f430074,
+0x8f420070, 0x1062000a, 0x274b0074, 0x8f510074,
+0xafab004c, 0x26220001, 0x304203ff, 0xafa20054,
+0x111140, 0x24426cc0, 0x1000004a, 0x2e2a821,
+0x8f420044, 0x8fac0064, 0x3c040001, 0x248468b8,
+0x3c050007, 0xafac0014, 0xafa20010, 0x8f460074,
+0x8f470070, 0x34a54500, 0x240b0001, 0xc002b3b,
+0xafab005c, 0x1000ffc3, 0x0, 0x8f430064,
+0x8f420060, 0x1062001a, 0x274c0064, 0x8f510064,
+0x8fab005c, 0xafac004c, 0x26220001, 0x304200ff,
+0xafa20054, 0x24020004, 0x1562000e, 0x111140,
+0x111180, 0x24420cc0, 0x2e21021, 0xafa20044,
+0x9442002a, 0x8fac0044, 0x8fab0064, 0x4b102b,
+0x10400024, 0x25950020, 0x240c0001, 0x10000021,
+0xa3ac0087, 0x24424cc0, 0x1000001e, 0x2e2a821,
+0x8f420044, 0x8fab0064, 0x3c040001, 0x248468c4,
+0xafab0014, 0xafa20010, 0x8f460064, 0x8f470060,
+0x3c050007, 0xc002b3b, 0x34a54800, 0x3c020008,
+0x2c21024, 0x1440ff61, 0x0, 0x8f420370,
+0x240c0001, 0xafac005c, 0x24420001, 0xaf420370,
+0x1000ff90, 0x8f420370, 0x27a30036, 0x131040,
+0x621821, 0x94620000, 0x441021, 0x1000001f,
+0xa4620000, 0xaebe0018, 0x93a20087, 0x10400084,
+0x9821, 0x8fab0044, 0x8fa40064, 0x8fa3008c,
+0x25620020, 0xafa20028, 0x25620008, 0xafa20030,
+0x25620010, 0xafab002c, 0xafa20034, 0x9562002a,
+0xa7a20038, 0x95620018, 0xa7a2003a, 0x9562001a,
+0xa7a2003c, 0x9562001c, 0xa7a2003e, 0x94620018,
+0x24630002, 0x822023, 0x1880ffdf, 0x26730001,
+0x2e620004, 0x1440fff9, 0x0, 0x8f4200fc,
+0x262102a, 0x14400030, 0x24030001, 0x8f83012c,
+0x10600028, 0x0, 0x8f820124, 0x431023,
+0x22143, 0x58800001, 0x24840040, 0x8f820128,
+0x431023, 0x21943, 0x58600001, 0x24630040,
+0x64102a, 0x54400001, 0x602021, 0xaf4400fc,
+0x8f4200fc, 0x262102a, 0x10400016, 0x24030001,
+0x1000001a, 0x306200ff, 0x8fac008c, 0x101040,
+0x4c1021, 0x94470018, 0x101080, 0x4c1021,
+0xafbe0010, 0x8c420008, 0x3c040001, 0x248468dc,
+0x3c050007, 0x8c430004, 0x8c420000, 0x34a55500,
+0x2003021, 0xc002b3b, 0xafa30014, 0x10000039,
+0x0, 0x8f420334, 0x1821, 0x24420001,
+0xaf420334, 0x8f420334, 0x306200ff, 0x1040ff06,
+0x8021, 0x8f430008, 0x2402fbff, 0x1260002d,
+0x625024, 0x3c0b4000, 0x22b4025, 0x8fb1008c,
+0x2669ffff, 0x2209021, 0x8e420008, 0x96270018,
+0x8c440000, 0x8c450004, 0x56090004, 0x240b0001,
+0x240c0002, 0x10000002, 0xafac0010, 0xafab0010,
+0x16000004, 0xafa80014, 0x8f420008, 0x10000002,
+0xafa20018, 0xafaa0018, 0x8f42010c, 0x3c03021,
+0xafa80098, 0xafa9009c, 0x40f809, 0xafaa00a0,
+0x8fa80098, 0x8fa9009c, 0x8faa00a0, 0x1040ffc2,
+0x3c02001f, 0x96230018, 0x3442ffff, 0x3c3f021,
+0x5e102b, 0x10400003, 0x26310002, 0x8f420148,
+0x3c2f023, 0x26100001, 0x213102b, 0x1440ffda,
+0x26520004, 0x8fb00064, 0x1000001a, 0x0,
+0x96a3000a, 0x8fb00064, 0x70102b, 0x54400001,
+0x608021, 0x8ea40000, 0x8ea50004, 0x8fab005c,
+0x240c0002, 0xafac0010, 0x934305c4, 0xb1700,
+0x10600003, 0x2223025, 0x3c020800, 0xc23025,
+0xafa60014, 0x8f420008, 0xafa20018, 0x8f42010c,
+0x3c03021, 0x40f809, 0x2003821, 0x1040fecb,
+0x3c050007, 0x97ac0076, 0x11800007, 0x96a3000e,
+0x934205c4, 0x14400004, 0x0, 0x97ab007e,
+0x6c1825, 0xa6ab0016, 0x8fac006c, 0x3c02ffff,
+0x1821024, 0x10400003, 0xc1402, 0x34630400,
+0xa6a20014, 0xa6b0000a, 0x8fab0064, 0x560b0006,
+0x3d0f021, 0x34620004, 0xafa00064, 0xa6a2000e,
+0x1000000d, 0xa34005c4, 0x8fac0064, 0x3c02001f,
+0x3442ffff, 0x5e102b, 0x1906023, 0xafac0064,
+0xa6a3000e, 0x240b0001, 0x10400003, 0xa34b05c4,
+0x8f420148, 0x3c2f023, 0x8fab0054, 0x8fac004c,
+0xad8b0000, 0x8fac0064, 0x1580feba, 0x0,
+0x8fab0064, 0x1160001b, 0x0, 0x934205c4,
+0x10400006, 0x0, 0xaf5e00c4, 0xaf4b00c0,
+0x8fac006c, 0x1000000e, 0xaf4c00c8, 0x97ab0076,
+0x1160000b, 0x34038100, 0x8fa20020, 0x8c46000c,
+0xa443000c, 0x97ac007e, 0x8c440004, 0x8c450008,
+0xa44c000e, 0xac440000, 0xac450004, 0xac460008,
+0x8f42034c, 0x24420001, 0xaf42034c, 0x10000010,
+0x8f42034c, 0x8fab006c, 0x3164ffff, 0x2484fffc,
+0x801821, 0x8f440250, 0x8f450254, 0x8f460118,
+0x1021, 0xa32821, 0xa3382b, 0x822021,
+0x872021, 0xaf440250, 0xc0f809, 0xaf450254,
+0x8fbf00c0, 0x8fbe00bc, 0x8fb500b8, 0x8fb300b4,
+0x8fb200b0, 0x8fb100ac, 0x8fb000a8, 0x3e00008,
+0x27bd00c8, 0x3e00008, 0x0, 0x27bdffd8,
+0xafbf0024, 0xafb00020, 0x8f43004c, 0x8f420048,
+0x10620034, 0x0, 0x8f430048, 0x8f42004c,
+0x622023, 0x4820001, 0x24840200, 0x8f430054,
+0x8f42004c, 0x43102b, 0x14400004, 0x24020200,
+0x8f43004c, 0x10000005, 0x431023, 0x8f420054,
+0x8f43004c, 0x431023, 0x2442ffff, 0x405021,
+0x8a102a, 0x54400001, 0x805021, 0x8f49004c,
+0x8f48004c, 0x8f440188, 0x8f45018c, 0x8f46004c,
+0x24071000, 0xafa70010, 0x84140, 0x1001821,
+0x12a4821, 0x313001ff, 0xafb00014, 0x8f470014,
+0x1021, 0x63140, 0xafa70018, 0xa32821,
+0xa3382b, 0x822021, 0x872021, 0x3402ecc0,
+0xc23021, 0x8f420108, 0x2e63021, 0x40f809,
+0xa3940, 0x54400001, 0xaf50004c, 0x8f43004c,
+0x8f420048, 0x14620018, 0x0, 0x8f420000,
+0x10400007, 0x0, 0xaf80004c, 0x8f82004c,
+0x1040fffd, 0x0, 0x10000005, 0x0,
+0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
+0x8f820060, 0x2403fdff, 0x431024, 0xaf820060,
+0x8f420000, 0x10400003, 0x0, 0x10000002,
+0xaf80004c, 0xaf800048, 0x8fbf0024, 0x8fb00020,
+0x3e00008, 0x27bd0028, 0x3e00008, 0x0,
+0x27bdffd8, 0xafbf0024, 0xafb00020, 0x8f43005c,
+0x8f420058, 0x10620049, 0x0, 0x8f430058,
+0x8f42005c, 0x622023, 0x4820001, 0x24840100,
+0x8f430064, 0x8f42005c, 0x43102b, 0x14400004,
+0x24020100, 0x8f43005c, 0x10000005, 0x431023,
+0x8f420064, 0x8f43005c, 0x431023, 0x2442ffff,
+0x403821, 0x87102a, 0x54400001, 0x803821,
+0x8f42005c, 0x471021, 0x305000ff, 0x32c21000,
+0x10400015, 0x24082000, 0x8f49005c, 0x8f440190,
+0x8f450194, 0x8f46005c, 0x73980, 0xafa80010,
+0xafb00014, 0x8f480014, 0x94980, 0x1201821,
+0x1021, 0xa32821, 0xa3482b, 0x822021,
+0x892021, 0x63180, 0xafa80018, 0x8f420108,
+0x10000014, 0x24c60cc0, 0x8f49005c, 0x8f440190,
+0x8f450194, 0x8f46005c, 0x73940, 0xafa80010,
+0xafb00014, 0x8f480014, 0x94940, 0x1201821,
+0x1021, 0xa32821, 0xa3482b, 0x822021,
+0x892021, 0x63140, 0xafa80018, 0x8f420108,
+0x24c64cc0, 0x40f809, 0x2e63021, 0x54400001,
+0xaf50005c, 0x8f43005c, 0x8f420058, 0x14620018,
+0x0, 0x8f420000, 0x10400007, 0x0,
+0xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
+0x10000005, 0x0, 0xaf800048, 0x8f820048,
+0x1040fffd, 0x0, 0x8f820060, 0x2403feff,
+0x431024, 0xaf820060, 0x8f420000, 0x10400003,
+0x0, 0x10000002, 0xaf80004c, 0xaf800048,
+0x8fbf0024, 0x8fb00020, 0x3e00008, 0x27bd0028,
+0x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
+0xafb00020, 0x8f43006c, 0x8f420068, 0x10620033,
+0x0, 0x8f430068, 0x8f42006c, 0x622023,
+0x4820001, 0x24840400, 0x8f430074, 0x8f42006c,
+0x43102b, 0x14400004, 0x24020400, 0x8f43006c,
+0x10000005, 0x431023, 0x8f420074, 0x8f43006c,
+0x431023, 0x2442ffff, 0x405021, 0x8a102a,
+0x54400001, 0x805021, 0x8f49006c, 0x8f48006c,
+0x8f440198, 0x8f45019c, 0x8f46006c, 0x24074000,
+0xafa70010, 0x84140, 0x1001821, 0x12a4821,
+0x313003ff, 0xafb00014, 0x8f470014, 0x1021,
+0x63140, 0x24c66cc0, 0xafa70018, 0xa32821,
+0xa3382b, 0x822021, 0x872021, 0x8f420108,
+0x2e63021, 0x40f809, 0xa3940, 0x54400001,
+0xaf50006c, 0x8f43006c, 0x8f420068, 0x14620018,
+0x0, 0x8f420000, 0x10400007, 0x0,
+0xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
+0x10000005, 0x0, 0xaf800048, 0x8f820048,
+0x1040fffd, 0x0, 0x8f820060, 0x2403f7ff,
+0x431024, 0xaf820060, 0x8f420000, 0x10400003,
+0x0, 0x10000002, 0xaf80004c, 0xaf800048,
+0x8fbf0024, 0x8fb00020, 0x3e00008, 0x27bd0028,
+0x3e00008, 0x0, 0x8f4200fc, 0x3c030001,
+0x8f4400f8, 0x346330c8, 0x24420001, 0xaf4200fc,
+0x8f850128, 0x2e31021, 0x54820004, 0x24820008,
+0x3c020001, 0x34422ec8, 0x2e21021, 0x401821,
+0xaf4300f8, 0xac600000, 0x8f4200f4, 0x14620004,
+0x3c020001, 0x24a20020, 0x1000000f, 0xaf820128,
+0x8f4300f8, 0x344230c8, 0x2e21021, 0x54620004,
+0x24620008, 0x3c020001, 0x34422ec8, 0x2e21021,
+0x401821, 0x8c620004, 0x21140, 0xa21021,
+0xaf820128, 0xac600000, 0x8ca30018, 0x30620070,
+0x1040002d, 0x30620020, 0x10400004, 0x3c020010,
+0x2c21024, 0x1040000d, 0x0, 0x30620040,
+0x10400004, 0x3c020020, 0x2c21024, 0x10400007,
+0x0, 0x30620010, 0x1040001f, 0x3c020040,
+0x2c21024, 0x1440001c, 0x0, 0x8f820040,
+0x30420001, 0x14400008, 0x2021, 0x8c030104,
+0x24020001, 0x50620005, 0x24040001, 0x8c020264,
+0x10400003, 0x801021, 0x24040001, 0x801021,
+0x10400006, 0x0, 0x8f42030c, 0x24420001,
+0xaf42030c, 0x10000008, 0x8f42030c, 0x8f820044,
+0x34420004, 0xaf820044, 0x8f420308, 0x24420001,
+0xaf420308, 0x8f420308, 0x3e00008, 0x0,
+0x3e00008, 0x0, 0x27bdff98, 0xafbf0060,
+0xafbe005c, 0xafb50058, 0xafb30054, 0xafb20050,
+0xafb1004c, 0xafb00048, 0x8f4200fc, 0x24420001,
+0xaf4200fc, 0x8f880128, 0x25020020, 0xaf820128,
+0x8d030018, 0x30620070, 0x1040002e, 0x30620020,
+0x10400004, 0x3c020010, 0x2c21024, 0x1040000d,
+0x0, 0x30620040, 0x10400004, 0x3c020020,
+0x2c21024, 0x10400007, 0x0, 0x30620010,
+0x104001a9, 0x3c020040, 0x2c21024, 0x144001a6,
+0x0, 0x8f820040, 0x30420001, 0x14400008,
+0x2021, 0x8c030104, 0x24020001, 0x50620005,
+0x24040001, 0x8c020264, 0x10400003, 0x801021,
+0x24040001, 0x801021, 0x10400006, 0x0,
+0x8f42030c, 0x24420001, 0xaf42030c, 0x10000192,
+0x8f42030c, 0x8f820044, 0x34420004, 0xaf820044,
+0x8f420308, 0x24420001, 0xaf420308, 0x1000018a,
+0x8f420308, 0x30620002, 0x1040014b, 0x3c020800,
+0x8d1e001c, 0x1e5702, 0xafaa0034, 0x950a0016,
+0x3c22024, 0xafaa0024, 0x8faa0034, 0x24020001,
+0x15420006, 0x33deffff, 0x1e1140, 0x3403ecc0,
+0x431021, 0x10000010, 0x2e2a821, 0x24020002,
+0x15420005, 0x24020003, 0x1e1140, 0x24426cc0,
+0x10000009, 0x2e2a821, 0x15420005, 0x1e1180,
+0x1e1140, 0x24424cc0, 0x10000003, 0x2e2a821,
+0x571021, 0x24550ce0, 0x96a2000e, 0x304afffc,
+0x30420400, 0x10400003, 0xafaa002c, 0x100000e1,
+0x8821, 0x10800004, 0x8821, 0x97b10026,
+0x100000dd, 0xa6b10012, 0x8eb30018, 0x966a000c,
+0xa7aa003e, 0x97a5003e, 0x2ca305dd, 0x38a28870,
+0x2c420001, 0x621825, 0x10600015, 0x2021,
+0x32c20800, 0x10400015, 0x24020800, 0x96630014,
+0x14620012, 0x3402aaaa, 0x9663000e, 0x14620007,
+0x2821, 0x96630010, 0x24020300, 0x14620004,
+0xa01021, 0x96620012, 0x2c450001, 0xa01021,
+0x54400006, 0x24040016, 0x10000004, 0x0,
+0x24020800, 0x50a20001, 0x2404000e, 0x108000b9,
+0x2649021, 0x92420000, 0x3042000f, 0x28080,
+0x32c20100, 0x10400020, 0x2501821, 0x3c020020,
+0x43102b, 0x1440000e, 0x2402021, 0x2821,
+0x94820000, 0x24840002, 0xa22821, 0x83102b,
+0x1440fffb, 0x30a2ffff, 0x51c02, 0x622821,
+0x51c02, 0x30a2ffff, 0x10000009, 0x622821,
+0x8f470148, 0x8f420110, 0x102842, 0x3c060020,
+0x40f809, 0xafa80040, 0x3045ffff, 0x8fa80040,
+0x50a00001, 0x3405ffff, 0x8faa002c, 0x354a0002,
+0x10000002, 0xafaa002c, 0x2821, 0x32c20080,
+0x10400090, 0xa6a50010, 0x26430009, 0x3c02001f,
+0x3442ffff, 0x43102b, 0x10400003, 0x0,
+0x8f420148, 0x621823, 0x90660000, 0x30c200ff,
+0x38430006, 0x2c630001, 0x38420011, 0x2c420001,
+0x621825, 0x1060007f, 0x24020800, 0x8821,
+0x97a3003e, 0x1462000f, 0x2602021, 0x96710000,
+0x96620002, 0x96630004, 0x96640006, 0x2228821,
+0x2238821, 0x2248821, 0x96620008, 0x9663000a,
+0x9664000c, 0x2228821, 0x2238821, 0x10000007,
+0x2248821, 0x94820000, 0x24840002, 0x2228821,
+0x92102b, 0x1440fffb, 0x0, 0x111c02,
+0x3222ffff, 0x628821, 0x111c02, 0x3222ffff,
+0x628821, 0x32c20200, 0x10400003, 0x26440006,
+0x1000003e, 0x8021, 0x3c05001f, 0x34a5ffff,
+0xa4102b, 0x10400003, 0x0, 0x8f420148,
+0x822023, 0x94820000, 0x30421fff, 0x10400004,
+0x2644000c, 0x96420002, 0x10000030, 0x508023,
+0x96420002, 0x26430014, 0x508023, 0x3c020020,
+0x43102b, 0x1440000a, 0xd08021, 0x9642000c,
+0x2028021, 0x9642000e, 0x96430010, 0x96440012,
+0x2028021, 0x2038021, 0x10000020, 0x2048021,
+0xa4102b, 0x10400003, 0x0, 0x8f420148,
+0x822023, 0x94820000, 0x24840002, 0x2028021,
+0xa4102b, 0x10400003, 0x0, 0x8f420148,
+0x822023, 0x94820000, 0x24840002, 0x2028021,
+0xa4102b, 0x10400003, 0x0, 0x8f420148,
+0x822023, 0x94820000, 0x24840002, 0x2028021,
+0xa4102b, 0x10400003, 0x0, 0x8f420148,
+0x822023, 0x94820000, 0x2028021, 0x3c020100,
+0x2c21024, 0x1040000e, 0x0, 0x8faa002c,
+0x31420004, 0x1040000a, 0x0, 0x9504000e,
+0x2642021, 0xc003eec, 0x2484fffc, 0x3042ffff,
+0x2228821, 0x111c02, 0x3222ffff, 0x628821,
+0x8faa0024, 0x1518823, 0x111402, 0x2228821,
+0x2308821, 0x111402, 0x2228821, 0x3231ffff,
+0x52200001, 0x3411ffff, 0x8faa002c, 0x354a0001,
+0xafaa002c, 0xa6b10012, 0x97aa002e, 0xa6aa000e,
+0x8faa002c, 0x31420004, 0x10400002, 0x24091000,
+0x34098000, 0x8f480044, 0x8f4401a0, 0x8f4501a4,
+0xafa90010, 0x8f490044, 0x84140, 0x1001821,
+0xafa90014, 0x8f48000c, 0x2a03021, 0x24070020,
+0xafa80018, 0x8f48010c, 0x1021, 0xa32821,
+0xa3482b, 0x822021, 0x100f809, 0x892021,
+0x1440000b, 0x0, 0x8f820128, 0x3c040001,
+0x24846914, 0xafbe0014, 0xafa20010, 0x8f860124,
+0x8f870120, 0x3c050007, 0xc002b3b, 0x34a59920,
+0x8f420368, 0x2442ffff, 0xaf420368, 0x8f420044,
+0x8f430088, 0x24420001, 0x431024, 0xaf420044,
+0x8faa0034, 0x8f440368, 0x24020001, 0x15420006,
+0x24020002, 0x8f42035c, 0x2442ffff, 0xaf42035c,
+0x10000049, 0x8f42035c, 0x15420006, 0x0,
+0x8f420364, 0x2442ffff, 0xaf420364, 0x10000042,
+0x8f420364, 0x8f420360, 0x2442ffff, 0xaf420360,
+0x1000003d, 0x8f420360, 0x30621000, 0x10400005,
+0x30628000, 0x8f420078, 0x24420001, 0x10000036,
+0xaf420078, 0x10400034, 0x0, 0x8f420078,
+0x24420001, 0xaf420078, 0x8c030240, 0x43102b,
+0x1440002d, 0x24070008, 0x8f440168, 0x8f45016c,
+0x8f430044, 0x8f48000c, 0x8f860120, 0x24020040,
+0xafa20010, 0xafa30014, 0xafa80018, 0x8f42010c,
+0x40f809, 0x24c6001c, 0x14400011, 0x24020001,
+0x3c010001, 0x370821, 0xa02240f2, 0x8f820124,
+0xafa20010, 0x8f820128, 0x3c040001, 0x2484688c,
+0xafa20014, 0x8f460044, 0x8f870120, 0x3c050009,
+0xc002b3b, 0x34a51300, 0x1000000b, 0x0,
+0x8f420304, 0x24420001, 0xaf420304, 0x8f420304,
+0x8f420044, 0xaf42007c, 0x3c010001, 0x370821,
+0xa02040f2, 0xaf400078, 0x8f420318, 0x24420001,
+0xaf420318, 0x8f420318, 0x8fbf0060, 0x8fbe005c,
+0x8fb50058, 0x8fb30054, 0x8fb20050, 0x8fb1004c,
+0x8fb00048, 0x3e00008, 0x27bd0068, 0x3e00008,
+0x0, 0x0, 0x0, 0x8f42013c,
+0xaf8200c0, 0x8f42013c, 0xaf8200c4, 0x8f42013c,
+0xaf8200c8, 0x8f420138, 0xaf8200d0, 0x8f420138,
+0xaf8200d4, 0x8f420138, 0x3e00008, 0xaf8200d8,
+0x27bdffe0, 0x27840208, 0x24050200, 0xafbf0018,
+0xc002bbf, 0x24060008, 0x8c020204, 0xc004012,
+0xaf820210, 0x3c020001, 0x8c426d94, 0x30420002,
+0x1040000e, 0x2021, 0x8c060248, 0x24020002,
+0x3c010001, 0xac226d98, 0xc005104, 0x24050002,
+0x2021, 0x8c060248, 0x24020001, 0x3c010001,
+0xac226d98, 0x10000011, 0x24050001, 0x8c060248,
+0x24020004, 0x3c010001, 0xac226d98, 0xc005104,
+0x24050004, 0x3c020001, 0x8c426d94, 0x30420001,
+0x10400008, 0x24020001, 0x3c010001, 0xac226d98,
+0x2021, 0x24050001, 0x3c06601b, 0xc005104,
+0x0, 0x3c040001, 0x248469d0, 0x8f420150,
+0x8f430154, 0x3c050008, 0x8f460158, 0x21640,
+0x31940, 0x34630403, 0x431025, 0x633c0,
+0x461025, 0xaf82021c, 0xafa00010, 0xafa00014,
+0x8f86021c, 0x34a50200, 0xc002b3b, 0x3821,
+0x3c010001, 0xac206d90, 0x3c010001, 0xac206da8,
+0x8fbf0018, 0x3e00008, 0x27bd0020, 0x27bdffe0,
+0x3c050008, 0x34a50300, 0xafbf0018, 0xafa00010,
+0xafa00014, 0x8f860200, 0x3c040001, 0x248469dc,
+0xc002b3b, 0x3821, 0x8f420410, 0x24420001,
+0xaf420410, 0x8f420410, 0x8fbf0018, 0x3e00008,
+0x27bd0020, 0x27bdffd8, 0xafbf0020, 0xafb1001c,
+0xafb00018, 0x8f4203a4, 0x24420001, 0xaf4203a4,
+0x8f4203a4, 0x8f900220, 0x8f8200e0, 0xafa20010,
+0x8f8200e4, 0xafa20014, 0x8f8600c4, 0x8f8700c8,
+0x3c040001, 0x248469e8, 0xc002b3b, 0x2002821,
+0x3c044000, 0x2041024, 0x504000b4, 0x3c040100,
+0x8f4203bc, 0x24420001, 0xaf4203bc, 0x8f4203bc,
+0x8f8700c4, 0x8f8300c8, 0x8f420148, 0x671823,
+0x43102b, 0x10400003, 0x0, 0x8f420148,
+0x621821, 0x10600005, 0x0, 0x8f42014c,
+0x43102b, 0x1040000b, 0x0, 0x8f8200e0,
+0x8f430124, 0xaf42011c, 0xaf430114, 0x8f820220,
+0x3c0308ff, 0x3463fffb, 0x431024, 0x100000ce,
+0x441025, 0x8f820220, 0x3c0308ff, 0x3463ffff,
+0x431024, 0x34420004, 0xaf820220, 0x8f8200e0,
+0x8f430124, 0xaf42011c, 0xaf430114, 0x8f8600c8,
+0x8f840120, 0x8f830124, 0x10000005, 0x2821,
+0x14620002, 0x24620020, 0x27624800, 0x401821,
+0x1064000c, 0x30a200ff, 0x8c620018, 0x30420003,
+0x1040fff7, 0x27624fe0, 0x8f4203d0, 0x24050001,
+0x24420001, 0xaf4203d0, 0x8f4203d0, 0x8c660008,
+0x30a200ff, 0x14400058, 0x0, 0x934205c4,
+0x14400055, 0x0, 0x8f8700c4, 0x8f8800e0,
+0x8f8400e4, 0x2402fff8, 0x1024024, 0x1041023,
+0x218c3, 0x4620001, 0x24630200, 0x10600005,
+0x24020001, 0x10620009, 0x0, 0x1000001f,
+0x0, 0x8f4203c0, 0xe03021, 0x24420001,
+0xaf4203c0, 0x10000040, 0x8f4203c0, 0x8f4203c4,
+0x24420001, 0xaf4203c4, 0x8c860000, 0x8f420148,
+0x8f4303c4, 0xe61823, 0x43102b, 0x10400004,
+0x2c62233f, 0x8f420148, 0x621821, 0x2c62233f,
+0x14400031, 0x0, 0x8f42020c, 0x24420001,
+0xaf42020c, 0x8f42020c, 0xe03021, 0x24820008,
+0xaf8200e4, 0x10000028, 0xaf8200e8, 0x8f4203c8,
+0x24420001, 0xaf4203c8, 0x8f4203c8, 0x8c850000,
+0x8f420148, 0xa71823, 0x43102b, 0x10400003,
+0x0, 0x8f420148, 0x621821, 0x8f42014c,
+0x43102b, 0x5440000a, 0xa03021, 0x8f42020c,
+0x24420001, 0xaf42020c, 0x8f42020c, 0x24820008,
+0xaf8200e4, 0x8f8400e4, 0x1488ffec, 0xaf8400e8,
+0x1488000d, 0x27623000, 0x14820002, 0x2482fff8,
+0x27623ff8, 0x94430006, 0x3c02001f, 0x3442ffff,
+0xc33021, 0x46102b, 0x10400003, 0x0,
+0x8f420148, 0xc23023, 0xaf8600c8, 0x8f8300c4,
+0x8f420148, 0xc31823, 0x43102b, 0x10400003,
+0x0, 0x8f420148, 0x621821, 0x10600005,
+0x0, 0x8f42014c, 0x43102b, 0x50400008,
+0x3c02fdff, 0x8f820220, 0x3c0308ff, 0x3463fffb,
+0x431024, 0x3c034000, 0x1000003f, 0x431025,
+0x8f4303cc, 0x3442ffff, 0x282a024, 0x24630001,
+0xaf4303cc, 0x10000039, 0x8f4203cc, 0x2041024,
+0x1040000e, 0x3c110200, 0x8f4203a8, 0x24420001,
+0xaf4203a8, 0x8f4203a8, 0x8f820220, 0x3c0308ff,
+0x3463ffff, 0x431024, 0x441025, 0xc003daf,
+0xaf820220, 0x10000029, 0x0, 0x2111024,
+0x50400008, 0x3c110400, 0x8f4203ac, 0x24420001,
+0xaf4203ac, 0xc003daf, 0x8f4203ac, 0x10000019,
+0x0, 0x2111024, 0x1040001c, 0x0,
+0x8f830224, 0x24021402, 0x14620009, 0x3c050008,
+0x3c040001, 0x248469f4, 0xafa00010, 0xafa00014,
+0x8f860224, 0x34a50500, 0xc002b3b, 0x3821,
+0x8f4203b0, 0x24420001, 0xaf4203b0, 0x8f4203b0,
+0x8f820220, 0x2002021, 0x34420002, 0xc004e9c,
+0xaf820220, 0x8f820220, 0x3c0308ff, 0x3463ffff,
+0x431024, 0x511025, 0xaf820220, 0x8fbf0020,
+0x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0028,
+0x3e00008, 0x0, 0x3c020001, 0x8c426da8,
+0x27bdffb0, 0xafbf0048, 0xafbe0044, 0xafb50040,
+0xafb3003c, 0xafb20038, 0xafb10034, 0x1040000f,
+0xafb00030, 0x3c040001, 0x24846a00, 0x3c050008,
+0xafa00010, 0xafa00014, 0x8f860220, 0x34a50600,
+0x24020001, 0x3c010001, 0xac206da8, 0x3c010001,
+0xac226d9c, 0xc002b3b, 0x3821, 0x3c037fff,
+0x8c020268, 0x3463ffff, 0x3c04fdff, 0x431024,
+0xac020268, 0x8f420004, 0x3484ffff, 0x30420002,
+0x10400092, 0x284a024, 0x3c040600, 0x34842000,
+0x8f420004, 0x2821, 0x2403fffd, 0x431024,
+0xaf420004, 0xafa40020, 0x8f5e0018, 0x27aa0020,
+0x240200ff, 0x13c20002, 0xafaa002c, 0x27c50001,
+0x8c020228, 0xa09021, 0x1642000e, 0x1e38c0,
+0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
+0x8c020228, 0x3c040001, 0x24846998, 0x3c050009,
+0xafa00014, 0xafa20010, 0x8fa60020, 0x1000006d,
+0x34a50500, 0xf71021, 0x8fa30020, 0x8fa40024,
+0xac4304c0, 0xac4404c4, 0x8f830054, 0x8f820054,
+0x247003e8, 0x2021023, 0x2c4203e9, 0x1040001b,
+0x9821, 0xe08821, 0x263504c0, 0x8f440178,
+0x8f45017c, 0x2201821, 0x240a0004, 0xafaa0010,
+0xafb20014, 0x8f48000c, 0x1021, 0x2f53021,
+0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
+0xa3482b, 0x822021, 0x100f809, 0x892021,
+0x54400006, 0x24130001, 0x8f820054, 0x2021023,
+0x2c4203e9, 0x1440ffe9, 0x0, 0x326200ff,
+0x54400017, 0xaf520018, 0x8f420378, 0x24420001,
+0xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
+0xafa20010, 0x8f820124, 0x3c040001, 0x248469a4,
+0x3c050009, 0xafa20014, 0x8d460000, 0x10000035,
+0x34a50600, 0x8f420308, 0x24130001, 0x24420001,
+0xaf420308, 0x8f420308, 0x1000001e, 0x326200ff,
+0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
+0x2c4203e9, 0x10400016, 0x9821, 0x3c150020,
+0x24110010, 0x8f42000c, 0x8f440160, 0x8f450164,
+0x8f860120, 0xafb10010, 0xafb20014, 0x551025,
+0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
+0x24c6001c, 0x1440ffe3, 0x0, 0x8f820054,
+0x2021023, 0x2c4203e9, 0x1440ffee, 0x0,
+0x326200ff, 0x14400011, 0x0, 0x8f420378,
+0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
+0x8faa002c, 0xafa20010, 0x8f820124, 0x3c040001,
+0x248469ac, 0x3c050009, 0xafa20014, 0x8d460000,
+0x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202ec,
+0x24420001, 0xaf4202ec, 0x8f4202ec, 0x8fbf0048,
+0x8fbe0044, 0x8fb50040, 0x8fb3003c, 0x8fb20038,
+0x8fb10034, 0x8fb00030, 0x3e00008, 0x27bd0050,
+0x3c020001, 0x8c426da8, 0x27bdffe0, 0x1440000d,
+0xafbf0018, 0x3c040001, 0x24846a0c, 0x3c050008,
+0xafa00010, 0xafa00014, 0x8f860220, 0x34a50700,
+0x24020001, 0x3c010001, 0xac226da8, 0xc002b3b,
+0x3821, 0x3c020004, 0x2c21024, 0x10400007,
+0x0, 0x8f820220, 0x3c0308ff, 0x3463ffff,
+0x431024, 0x34420008, 0xaf820220, 0x3c050001,
+0x8ca56d98, 0x24020001, 0x14a20007, 0x2021,
+0xc00529b, 0x24050001, 0xac02026c, 0x8c03026c,
+0x10000006, 0x3c020007, 0xc00529b, 0x2021,
+0xac020268, 0x8c030268, 0x3c020007, 0x621824,
+0x3c020002, 0x5062000d, 0x3c0205f5, 0x43102b,
+0x14400006, 0x3c020004, 0x3c020001, 0x10620009,
+0x3c020098, 0x1000000b, 0x0, 0x14620009,
+0x3c023b9a, 0x10000004, 0x3442ca00, 0x10000002,
+0x3442e100, 0x34429680, 0xaf4201fc, 0x8f4201fc,
+0xaee20064, 0x8fbf0018, 0x3e00008, 0x27bd0020,
+0x0, 0x0, 0x0, 0x86102b,
+0x50400001, 0x872023, 0xc41023, 0x24843,
+0x125102b, 0x1040001b, 0x91040, 0x824021,
+0x88102b, 0x10400007, 0x1821, 0x94820000,
+0x24840002, 0x621821, 0x88102b, 0x1440fffb,
+0x0, 0x602021, 0xc73023, 0xa91023,
+0x21040, 0xc22821, 0xc5102b, 0x10400007,
+0x1821, 0x94c20000, 0x24c60002, 0x621821,
+0xc5102b, 0x1440fffb, 0x0, 0x1000000d,
+0x832021, 0x51040, 0x822821, 0x85102b,
+0x10400007, 0x1821, 0x94820000, 0x24840002,
+0x621821, 0x85102b, 0x1440fffb, 0x0,
+0x602021, 0x41c02, 0x3082ffff, 0x622021,
+0x41c02, 0x3082ffff, 0x622021, 0x3e00008,
+0x3082ffff, 0x3e00008, 0x0, 0x802821,
+0x30a20001, 0x1040002b, 0x3c03001f, 0x3463ffff,
+0x24a20004, 0x62102b, 0x54400007, 0x65102b,
+0x90a20001, 0x90a40003, 0x90a30000, 0x90a50002,
+0x1000002a, 0x441021, 0x10400003, 0x0,
+0x8f420148, 0xa22823, 0x90a40000, 0x24a50001,
+0x65102b, 0x10400003, 0x0, 0x8f420148,
+0xa22823, 0x90a20000, 0x24a50001, 0x21200,
+0x822021, 0x65102b, 0x10400003, 0x0,
+0x8f420148, 0xa22823, 0x90a20000, 0x24a50001,
+0x822021, 0x65102b, 0x10400003, 0x0,
+0x8f420148, 0xa22823, 0x90a20000, 0x1000002d,
+0x21200, 0x3463ffff, 0x24a20004, 0x62102b,
+0x5440000a, 0x65102b, 0x90a20000, 0x90a40002,
+0x90a30001, 0x90a50003, 0x441021, 0x21200,
+0x651821, 0x10000020, 0x432021, 0x10400003,
+0x0, 0x8f420148, 0xa22823, 0x90a20000,
+0x24a50001, 0x22200, 0x65102b, 0x10400003,
+0x0, 0x8f420148, 0xa22823, 0x90a20000,
+0x24a50001, 0x822021, 0x65102b, 0x10400003,
+0x0, 0x8f420148, 0xa22823, 0x90a20000,
+0x24a50001, 0x21200, 0x822021, 0x65102b,
+0x10400003, 0x0, 0x8f420148, 0xa22823,
+0x90a20000, 0x822021, 0x41c02, 0x3082ffff,
+0x622021, 0x41c02, 0x3082ffff, 0x622021,
+0x3e00008, 0x3082ffff, 0x0, 0x8f820220,
+0x34420002, 0xaf820220, 0x3c020002, 0x8c428ff8,
+0x30424000, 0x10400054, 0x24040001, 0x8f820200,
+0x24067fff, 0x8f830200, 0x30450002, 0x2402fffd,
+0x621824, 0xaf830200, 0xaf840204, 0x8f830054,
+0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
+0x621023, 0x2c420002, 0x1440fffc, 0x0,
+0x8f820224, 0x1444004d, 0x42040, 0xc4102b,
+0x1040fff1, 0x0, 0x8f820200, 0x451025,
+0xaf820200, 0x8f820220, 0x34428000, 0xaf820220,
+0x8f830054, 0x8f820054, 0x10000002, 0x24630001,
+0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
+0x0, 0x8f820220, 0x3c030004, 0x431024,
+0x1440000f, 0x0, 0x8f820220, 0x3c03ffff,
+0x34637fff, 0x431024, 0xaf820220, 0x8f830054,
+0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
+0x621023, 0x2c420002, 0x1440fffc, 0x0,
+0x8f820220, 0x3c030004, 0x431024, 0x1440000d,
+0x0, 0x8f820220, 0x34428000, 0xaf820220,
+0x8f830054, 0x8f820054, 0x10000002, 0x24630001,
+0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
+0x0, 0x8f820220, 0x3c030004, 0x431024,
+0x1040001b, 0x1021, 0x8f830220, 0x24020001,
+0x10000015, 0x3c04f700, 0x8f820220, 0x3c04f700,
+0x441025, 0xaf820220, 0x8f820220, 0x2403fffd,
+0x431024, 0xaf820220, 0x8f820220, 0x3c030300,
+0x431024, 0x14400003, 0x0, 0x10000008,
+0x1021, 0x8f820220, 0x34420002, 0xaf820220,
+0x8f830220, 0x24020001, 0x641825, 0xaf830220,
+0x3e00008, 0x0, 0x2021, 0x3c050100,
+0x24020001, 0xaf80021c, 0xaf820200, 0xaf820220,
+0x27625000, 0xaf8200c0, 0x27625000, 0xaf8200c4,
+0x27625000, 0xaf8200c8, 0x27625000, 0xaf8200d0,
+0x27625000, 0xaf8200d4, 0x27625000, 0xaf8200d8,
+0x27623000, 0xaf8200e0, 0x27623000, 0xaf8200e4,
+0x27623000, 0xaf8200e8, 0x27622800, 0xaf8200f0,
+0x27622800, 0xaf8200f4, 0x27622800, 0xaf8200f8,
+0x418c0, 0x24840001, 0x3631021, 0xac453004,
+0x3631021, 0xac403000, 0x28820200, 0x1440fff9,
+0x418c0, 0x2021, 0x418c0, 0x24840001,
+0x3631021, 0xac402804, 0x3631021, 0xac402800,
+0x28820100, 0x1440fff9, 0x418c0, 0xaf80023c,
+0x24030080, 0x24040100, 0xac600000, 0x24630004,
+0x64102b, 0x5440fffd, 0xac600000, 0x8f830040,
+0x3c02f000, 0x621824, 0x3c025000, 0x1062000c,
+0x43102b, 0x14400006, 0x3c026000, 0x3c024000,
+0x10620008, 0x24020800, 0x10000008, 0x0,
+0x10620004, 0x24020800, 0x10000004, 0x0,
+0x24020700, 0x3c010001, 0xac226dac, 0x3e00008,
+0x0, 0x3c020001, 0x8c426dbc, 0x27bdffd0,
+0xafbf002c, 0xafb20028, 0xafb10024, 0xafb00020,
+0x3c010001, 0x10400005, 0xac206d94, 0xc004d9e,
+0x0, 0x3c010001, 0xac206dbc, 0x8f830054,
+0x8f820054, 0x10000002, 0x24630064, 0x8f820054,
+0x621023, 0x2c420065, 0x1440fffc, 0x0,
+0xc004db9, 0x0, 0x24040001, 0x2821,
+0x27a60018, 0x34028000, 0xc0045be, 0xa7a20018,
+0x8f830054, 0x8f820054, 0x10000002, 0x24630064,
+0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
+0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
+0x8f830054, 0x8f820054, 0x10000002, 0x24630064,
+0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
+0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
+0x8f830054, 0x8f820054, 0x10000002, 0x24630064,
+0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
+0x24040001, 0x3c060001, 0x24c66f24, 0xc00457c,
+0x24050002, 0x8f830054, 0x8f820054, 0x10000002,
+0x24630064, 0x8f820054, 0x621023, 0x2c420065,
+0x1440fffc, 0x24040001, 0x24050003, 0x3c100001,
+0x26106f26, 0xc00457c, 0x2003021, 0x97a60018,
+0x3c070001, 0x94e76f24, 0x3c040001, 0x24846ae0,
+0xafa00014, 0x96020000, 0x3c05000d, 0x34a50100,
+0xc002b3b, 0xafa20010, 0x97a20018, 0x1040004d,
+0x24036040, 0x96020000, 0x3042fff0, 0x1443000c,
+0x24020020, 0x3c030001, 0x94636f24, 0x1462000b,
+0x24027830, 0x24020003, 0x3c010001, 0xac226d94,
+0x24020005, 0x3c010001, 0x1000003f, 0xac226f34,
+0x3c030001, 0x94636f24, 0x24027830, 0x1462000c,
+0x24030010, 0x3c020001, 0x94426f26, 0x3042fff0,
+0x14430007, 0x24020003, 0x3c010001, 0xac226d94,
+0x24020006, 0x3c010001, 0x1000002f, 0xac226f34,
+0x3c020001, 0x8c426d94, 0x3c030001, 0x94636f24,
+0x34420001, 0x3c010001, 0xac226d94, 0x24020015,
+0x1462000b, 0x0, 0x3c020001, 0x94426f26,
+0x3042fff0, 0x3843f420, 0x2c630001, 0x3842f430,
+0x2c420001, 0x621825, 0x1460001b, 0x24020003,
+0x3c030001, 0x94636f24, 0x24027810, 0x14620016,
+0x24020002, 0x3c020001, 0x94426f26, 0x3042fff0,
+0x14400011, 0x24020002, 0x1000000f, 0x24020004,
+0x3c020001, 0x8c426d94, 0x34420008, 0x3c010001,
+0xac226d94, 0x1000005e, 0x24020004, 0x3c020001,
+0x8c426d94, 0x34420004, 0x3c010001, 0x100000af,
+0xac226d94, 0x24020001, 0x3c010001, 0xac226f40,
+0x3c020001, 0x8c426d94, 0x30420002, 0x144000b2,
+0x3c09fff0, 0x24020e00, 0xaf820238, 0x8f840054,
+0x8f820054, 0x24030008, 0x3c010001, 0xac236d98,
+0x10000002, 0x248401f4, 0x8f820054, 0x821023,
+0x2c4201f5, 0x1440fffc, 0x3c0200c8, 0x344201fb,
+0xaf820238, 0x8f830054, 0x8f820054, 0x10000002,
+0x246301f4, 0x8f820054, 0x621023, 0x2c4201f5,
+0x1440fffc, 0x8021, 0x24120001, 0x24110009,
+0xc004482, 0x0, 0x3c010001, 0xac326db4,
+0xc004547, 0x0, 0x3c020001, 0x8c426db4,
+0x1451fffb, 0x3c0200c8, 0x344201f6, 0xaf820238,
+0x8f830054, 0x8f820054, 0x10000002, 0x2463000a,
+0x8f820054, 0x621023, 0x2c42000b, 0x1440fffc,
+0x0, 0x8f820220, 0x24040001, 0x34420002,
+0xaf820220, 0x8f830200, 0x24057fff, 0x2402fffd,
+0x621824, 0xaf830200, 0xaf840204, 0x8f830054,
+0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
+0x621023, 0x2c420002, 0x1440fffc, 0x0,
+0x8f820224, 0x14440005, 0x34028000, 0x42040,
+0xa4102b, 0x1040fff0, 0x34028000, 0x1082ffa0,
+0x26100001, 0x2e020014, 0x1440ffcd, 0x24020004,
+0x3c010001, 0xac226d98, 0x8021, 0x24120009,
+0x3c11ffff, 0x36313f7f, 0xc004482, 0x0,
+0x24020001, 0x3c010001, 0xac226db4, 0xc004547,
+0x0, 0x3c020001, 0x8c426db4, 0x1452fffb,
+0x0, 0x8f820044, 0x511024, 0x34425080,
+0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
+0x2463000a, 0x8f820054, 0x621023, 0x2c42000b,
+0x1440fffc, 0x0, 0x8f820044, 0x511024,
+0x3442f080, 0xaf820044, 0x8f830054, 0x8f820054,
+0x10000002, 0x2463000a, 0x8f820054, 0x621023,
+0x2c42000b, 0x1440fffc, 0x0, 0x8f820220,
+0x3c03f700, 0x431025, 0xaf820220, 0x8f830054,
+0x8f820054, 0x10000002, 0x24630064, 0x8f820054,
+0x621023, 0x2c420065, 0x1440fffc, 0x0,
+0x8f820220, 0x24040001, 0x34420002, 0xaf820220,
+0x8f830200, 0x24057fff, 0x2402fffd, 0x621824,
+0xaf830200, 0xaf840204, 0x8f830054, 0x8f820054,
+0x10000002, 0x24630001, 0x8f820054, 0x621023,
+0x2c420002, 0x1440fffc, 0x0, 0x8f820224,
+0x14440005, 0x34028000, 0x42040, 0xa4102b,
+0x1040fff0, 0x34028000, 0x1082ff50, 0x26100001,
+0x2e020064, 0x1440ffb0, 0x0, 0x3c020001,
+0x8c426d94, 0x30420004, 0x14400007, 0x3c09fff0,
+0x8f820044, 0x3c03ffff, 0x34633f7f, 0x431024,
+0xaf820044, 0x3c09fff0, 0x3529bdc0, 0x3c060001,
+0x8cc66d94, 0x3c040001, 0x24846ae0, 0x24020001,
+0x3c010001, 0xac226d9c, 0x8f820054, 0x3c070001,
+0x8ce76f40, 0x3c030001, 0x94636f24, 0x3c080001,
+0x95086f26, 0x3c05000d, 0x34a50100, 0x3c010001,
+0xac206d98, 0x491021, 0x3c010001, 0xac226f30,
+0xafa30010, 0xc002b3b, 0xafa80014, 0x8fbf002c,
+0x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
+0x27bd0030, 0x27bdffe8, 0x3c050001, 0x8ca56d98,
+0x24060004, 0x24020001, 0x14a20014, 0xafbf0010,
+0x3c020002, 0x8c428ffc, 0x30428000, 0x10400005,
+0x3c04000f, 0x3c030001, 0x8c636f40, 0x10000005,
+0x34844240, 0x3c040004, 0x3c030001, 0x8c636f40,
+0x348493e0, 0x24020005, 0x14620016, 0x0,
+0x3c04003d, 0x10000013, 0x34840900, 0x3c020002,
+0x8c428ff8, 0x30428000, 0x10400005, 0x3c04001e,
+0x3c030001, 0x8c636f40, 0x10000005, 0x34848480,
+0x3c04000f, 0x3c030001, 0x8c636f40, 0x34844240,
+0x24020005, 0x14620003, 0x0, 0x3c04007a,
+0x34841200, 0x3c020001, 0x8c426f30, 0x8f830054,
+0x441021, 0x431023, 0x44102b, 0x1440004c,
+0x0, 0x3c020001, 0x8c426da0, 0x14400048,
+0x0, 0x3c010001, 0x10c00025, 0xac206db0,
+0x3c090001, 0x8d296d94, 0x24070001, 0x3c044000,
+0x3c080002, 0x25088ffc, 0x250afffc, 0x52842,
+0x14a00002, 0x24c6ffff, 0x24050008, 0xa91024,
+0x10400010, 0x0, 0x14a70008, 0x0,
+0x8d020000, 0x441024, 0x1040000a, 0x0,
+0x3c010001, 0x10000007, 0xac256db0, 0x8d420000,
+0x441024, 0x10400003, 0x0, 0x3c010001,
+0xac276db0, 0x3c020001, 0x8c426db0, 0x6182b,
+0x2c420001, 0x431024, 0x5440ffe5, 0x52842,
+0x8f820054, 0x3c030001, 0x8c636db0, 0x3c010001,
+0xac226f30, 0x1060003b, 0x24020005, 0x3c030001,
+0x8c636f40, 0x3c010001, 0xac256d98, 0x14620012,
+0x24020001, 0x3c020002, 0x8c428ff8, 0x3c032000,
+0x34635000, 0x431024, 0x14400006, 0x24020001,
+0x3c010001, 0xac206f1c, 0x3c010001, 0xac226d98,
+0x24020001, 0x3c010001, 0xac226e24, 0x3c010001,
+0xac226da4, 0x24020001, 0x3c010001, 0xac226d9c,
+0x3c020001, 0x8c426db0, 0x1040001e, 0x0,
+0x3c020001, 0x8c426d9c, 0x10400008, 0x24020001,
+0x3c010001, 0xac206d9c, 0xaee204b8, 0x3c010001,
+0xac206e1c, 0x3c010001, 0xac226dd4, 0x8ee304b8,
+0x24020008, 0x10620005, 0x24020001, 0xc004239,
+0x0, 0x1000000b, 0x0, 0x3c030001,
+0x8c636d98, 0x10620007, 0x2402000e, 0x3c030002,
+0x8c638f90, 0x10620003, 0x0, 0xc004e9c,
+0x8f840220, 0x8fbf0010, 0x3e00008, 0x27bd0018,
+0x27bdffe0, 0x3c03fdff, 0x3c040001, 0x8c846d98,
+0x3c020001, 0x8c426dc0, 0x3463ffff, 0x283a024,
+0x14820006, 0xafbf0018, 0x8ee304b8, 0x3c020001,
+0x8c426dc4, 0x10620006, 0x0, 0x8ee204b8,
+0x3c010001, 0xac246dc0, 0x3c010001, 0xac226dc4,
+0x3c030001, 0x8c636d98, 0x24020002, 0x1062019c,
+0x2c620003, 0x10400005, 0x24020001, 0x1062000a,
+0x0, 0x10000226, 0x0, 0x24020004,
+0x106200b6, 0x24020008, 0x1062010a, 0x24020001,
+0x1000021f, 0x0, 0x8ee204b8, 0x2443ffff,
+0x2c620008, 0x1040021c, 0x31080, 0x3c010001,
+0x220821, 0x8c226af8, 0x400008, 0x0,
+0x3c030001, 0x8c636f40, 0x24020005, 0x14620010,
+0x0, 0x3c020001, 0x8c426da4, 0x10400008,
+0x24020003, 0xc004482, 0x0, 0x24020002,
+0xaee204b8, 0x3c010001, 0x10000002, 0xac206da4,
+0xaee204b8, 0x3c010001, 0x10000203, 0xac206d30,
+0xc004482, 0x0, 0x3c020001, 0x8c426da4,
+0x3c010001, 0xac206d30, 0x1440017a, 0x24020002,
+0x1000019d, 0x24020007, 0x3c030001, 0x8c636f40,
+0x24020005, 0x14620003, 0x24020001, 0x3c010001,
+0xac226dd0, 0xc0045ff, 0x0, 0x3c030001,
+0x8c636dd0, 0x10000174, 0x24020011, 0x3c050001,
+0x8ca56d98, 0x3c060002, 0x8cc68ffc, 0xc005104,
+0x2021, 0x24020005, 0x3c010001, 0xac206da4,
+0x100001e1, 0xaee204b8, 0x3c040001, 0x24846aec,
+0x3c05000f, 0x34a50100, 0x3021, 0x3821,
+0xafa00010, 0xc002b3b, 0xafa00014, 0x100001d6,
+0x0, 0x8f820220, 0x3c030004, 0x431024,
+0x14400175, 0x24020007, 0x8f830054, 0x3c020001,
+0x8c426f28, 0x2463d8f0, 0x431023, 0x2c422710,
+0x14400003, 0x24020001, 0x3c010001, 0xac226d9c,
+0x3c020002, 0x8c428ffc, 0x30425000, 0x104001c2,
+0x0, 0x8f820220, 0x30428000, 0x1040017d,
+0x0, 0x10000175, 0x0, 0x3c050001,
+0x8ca56d98, 0xc00529b, 0x2021, 0xc00551b,
+0x2021, 0x3c030002, 0x8c638ff4, 0x46101b0,
+0x24020001, 0x3c020008, 0x621024, 0x10400006,
+0x0, 0x8f820214, 0x3c03ffff, 0x431024,
+0x10000005, 0x3442251f, 0x8f820214, 0x3c03ffff,
+0x431024, 0x3442241f, 0xaf820214, 0x8f820220,
+0x3c030200, 0x34420002, 0xaf820220, 0x24020008,
+0xaee204b8, 0x8f820220, 0x283a025, 0x3c030004,
+0x431024, 0x14400016, 0x0, 0x3c020002,
+0x8c428ffc, 0x30425000, 0x1040000d, 0x0,
+0x8f820220, 0x30428000, 0x10400006, 0x0,
+0x8f820220, 0x3c03ffff, 0x34637fff, 0x10000003,
+0x431024, 0x8f820220, 0x34428000, 0xaf820220,
+0x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
+0x3c030001, 0x8c636f40, 0x24020005, 0x1462000a,
+0x0, 0x3c020001, 0x94426f26, 0x24429fbc,
+0x2c420004, 0x10400004, 0x24040018, 0x24050002,
+0xc004ddb, 0x24060020, 0xc003e6d, 0x0,
+0x3c010001, 0x10000170, 0xac206e20, 0x8ee204b8,
+0x2443ffff, 0x2c620008, 0x1040016b, 0x31080,
+0x3c010001, 0x220821, 0x8c226b18, 0x400008,
+0x0, 0xc004547, 0x0, 0x3c030001,
+0x8c636db4, 0x100000e8, 0x24020009, 0x3c020002,
+0x8c428ff8, 0x30424000, 0x10400004, 0x0,
+0x8f820044, 0x10000006, 0x3442f080, 0x8f820044,
+0x3c03ffff, 0x34633f7f, 0x431024, 0x3442a080,
+0xaf820044, 0x8f830054, 0x100000ea, 0x24020004,
+0x8f830054, 0x3c020001, 0x8c426f28, 0x2463d8f0,
+0x431023, 0x2c422710, 0x14400147, 0x24020005,
+0x100000d8, 0x0, 0x8f820220, 0x3c03f700,
+0x431025, 0xaf820220, 0xaf800204, 0x3c010002,
+0x100000d6, 0xac208fe0, 0x8f830054, 0x3c020001,
+0x8c426f28, 0x2463fff6, 0x431023, 0x2c42000a,
+0x14400135, 0x24020007, 0x100000d7, 0x0,
+0xc003f50, 0x0, 0x1040012d, 0x24020001,
+0x8f820214, 0x3c03ffff, 0x3c040001, 0x8c846f1c,
+0x431024, 0x3442251f, 0xaf820214, 0x24020008,
+0x10800005, 0xaee204b8, 0x3c020001, 0x8c426e44,
+0x10400064, 0x24020001, 0x8f820220, 0x3c030008,
+0x431024, 0x1040006a, 0x3c020200, 0x10000078,
+0x0, 0x8ee204b8, 0x2443ffff, 0x2c620007,
+0x10400115, 0x31080, 0x3c010001, 0x220821,
+0x8c226b38, 0x400008, 0x0, 0xc003daf,
+0x0, 0x3c010001, 0xac206d9c, 0xaf800204,
+0x3c010002, 0xc004482, 0xac208fe0, 0x24020001,
+0x3c010001, 0xac226db4, 0x24020002, 0x10000102,
+0xaee204b8, 0xc004547, 0x0, 0x3c030001,
+0x8c636db4, 0x10000084, 0x24020009, 0x3c020002,
+0x8c428ff8, 0x30424000, 0x10400003, 0x3c0200c8,
+0x10000002, 0x344201f6, 0x344201fe, 0xaf820238,
+0x8f830054, 0x1000008b, 0x24020004, 0x8f830054,
+0x3c020001, 0x8c426f28, 0x2463d8f0, 0x431023,
+0x2c422710, 0x144000e8, 0x24020005, 0x10000079,
+0x0, 0x8f820220, 0x3c03f700, 0x431025,
+0xaf820220, 0xaf800204, 0x3c010002, 0x10000077,
+0xac208fe0, 0x8f830054, 0x3c020001, 0x8c426f28,
+0x2463fff6, 0x431023, 0x2c42000a, 0x144000d6,
+0x24020007, 0x10000078, 0x0, 0xc003f50,
+0x0, 0x104000ce, 0x24020001, 0x8f820214,
+0x3c03ffff, 0x3c040001, 0x8c846f1c, 0x431024,
+0x3442251f, 0xaf820214, 0x24020008, 0x1080000f,
+0xaee204b8, 0x3c020001, 0x8c426e44, 0x1440000b,
+0x0, 0x8f820220, 0x34420002, 0xaf820220,
+0x24020001, 0x3c010002, 0xac228f90, 0xc004e9c,
+0x8f840220, 0x10000016, 0x0, 0x8f820220,
+0x3c030008, 0x431024, 0x14400011, 0x3c020200,
+0x282a025, 0x2402000e, 0x3c010002, 0xac228f90,
+0xc00551b, 0x2021, 0x8f820220, 0x34420002,
+0xc003e6d, 0xaf820220, 0x3c050001, 0x8ca56d98,
+0xc00529b, 0x2021, 0x100000a3, 0x0,
+0x3c020001, 0x8c426e44, 0x1040009f, 0x0,
+0x3c020001, 0x8c426e40, 0x2442ffff, 0x3c010001,
+0xac226e40, 0x14400098, 0x24020002, 0x3c010001,
+0xac206e44, 0x3c010001, 0x10000093, 0xac226e40,
+0x8ee204b8, 0x2443ffff, 0x2c620007, 0x1040008e,
+0x31080, 0x3c010001, 0x220821, 0x8c226b58,
+0x400008, 0x0, 0x3c020001, 0x8c426da4,
+0x10400018, 0x24020005, 0xc004482, 0x0,
+0x24020002, 0xaee204b8, 0x3c010001, 0x1000007e,
+0xac206da4, 0xc004963, 0x0, 0x3c030001,
+0x8c636dd4, 0x24020006, 0x14620077, 0x24020003,
+0x10000075, 0xaee204b8, 0x3c050001, 0x8ca56d98,
+0x3c060002, 0x8cc68ff8, 0xc005104, 0x2021,
+0x24020005, 0x1000006c, 0xaee204b8, 0x8f820220,
+0x3c03f700, 0x431025, 0xaf820220, 0x8f830054,
+0x24020006, 0xaee204b8, 0x3c010001, 0x10000062,
+0xac236f28, 0x8f820220, 0x3c030004, 0x431024,
+0x10400003, 0x24020007, 0x1000005b, 0xaee204b8,
+0x8f830054, 0x3c020001, 0x8c426f28, 0x2463d8f0,
+0x431023, 0x2c422710, 0x14400003, 0x24020001,
+0x3c010001, 0xac226d9c, 0x3c020002, 0x8c428ff8,
+0x30425000, 0x1040004c, 0x0, 0x8f820220,
+0x30428000, 0x10400007, 0x0, 0x8f820220,
+0x3c03ffff, 0x34637fff, 0x431024, 0x10000042,
+0xaf820220, 0x8f820220, 0x34428000, 0x1000003e,
+0xaf820220, 0x3c050001, 0x8ca56d98, 0xc00529b,
+0x2021, 0xc00551b, 0x2021, 0x3c020002,
+0x8c428ff0, 0x4410032, 0x24020001, 0x8f820214,
+0x3c03ffff, 0x431024, 0x3442251f, 0xaf820214,
+0x24020008, 0xaee204b8, 0x8f820220, 0x34420002,
+0xaf820220, 0x8f820220, 0x3c030004, 0x431024,
+0x14400016, 0x0, 0x3c020002, 0x8c428ff8,
+0x30425000, 0x1040000d, 0x0, 0x8f820220,
+0x30428000, 0x10400006, 0x0, 0x8f820220,
+0x3c03ffff, 0x34637fff, 0x10000003, 0x431024,
+0x8f820220, 0x34428000, 0xaf820220, 0x8f820220,
+0x3c03f700, 0x431025, 0xaf820220, 0x3c020001,
+0x94426f26, 0x24429fbc, 0x2c420004, 0x10400004,
+0x24040018, 0x24050002, 0xc004ddb, 0x24060020,
+0xc003e6d, 0x0, 0x10000003, 0x0,
+0x3c010001, 0xac226d9c, 0x8fbf0018, 0x3e00008,
+0x27bd0020, 0x8f820200, 0x8f820220, 0x8f820220,
+0x34420004, 0xaf820220, 0x8f820200, 0x3c050001,
+0x8ca56d98, 0x34420004, 0xaf820200, 0x24020002,
+0x10a2004b, 0x2ca20003, 0x10400005, 0x24020001,
+0x10a2000a, 0x0, 0x100000b1, 0x0,
+0x24020004, 0x10a20072, 0x24020008, 0x10a20085,
+0x3c02f0ff, 0x100000aa, 0x0, 0x8f830050,
+0x3c02f0ff, 0x3442ffff, 0x3c040001, 0x8c846f40,
+0x621824, 0x3c020700, 0x621825, 0x24020e00,
+0x2484fffb, 0x2c840002, 0xaf830050, 0xaf850200,
+0xaf850220, 0x14800006, 0xaf820238, 0x8f820044,
+0x3c03ffff, 0x34633f7f, 0x431024, 0xaf820044,
+0x3c030001, 0x8c636f40, 0x24020005, 0x14620004,
+0x0, 0x8f820044, 0x34425000, 0xaf820044,
+0x3c020001, 0x8c426d88, 0x3c030001, 0x8c636f40,
+0x34420022, 0x2463fffc, 0x2c630002, 0x1460000c,
+0xaf820200, 0x3c020001, 0x8c426dac, 0x3c030001,
+0x8c636d90, 0x3c040001, 0x8c846d8c, 0x34428000,
+0x621825, 0x641825, 0x1000000a, 0x34620002,
+0x3c020001, 0x8c426d90, 0x3c030001, 0x8c636dac,
+0x3c040001, 0x8c846d8c, 0x431025, 0x441025,
+0x34420002, 0xaf820220, 0x1000002f, 0x24020001,
+0x24020e01, 0xaf820238, 0x8f830050, 0x3c02f0ff,
+0x3442ffff, 0x3c040001, 0x8c846f1c, 0x621824,
+0x3c020d00, 0x621825, 0x24020001, 0xaf830050,
+0xaf820200, 0xaf820220, 0x10800005, 0x3c033f00,
+0x3c020001, 0x8c426d80, 0x10000004, 0x34630070,
+0x3c020001, 0x8c426d80, 0x34630072, 0x431025,
+0xaf820200, 0x3c030001, 0x8c636d84, 0x3c02f700,
+0x621825, 0x3c020001, 0x8c426d90, 0x3c040001,
+0x8c846dac, 0x3c050001, 0x8ca56f40, 0x431025,
+0x441025, 0xaf820220, 0x24020005, 0x14a20006,
+0x24020001, 0x8f820044, 0x2403afff, 0x431024,
+0xaf820044, 0x24020001, 0x1000003d, 0xaf820238,
+0x8f830050, 0x3c02f0ff, 0x3442ffff, 0x3c040001,
+0x8c846f1c, 0x621824, 0x3c020a00, 0x621825,
+0x24020001, 0xaf830050, 0xaf820200, 0x1080001e,
+0xaf820220, 0x3c020001, 0x8c426e44, 0x1440001a,
+0x3c033f00, 0x3c020001, 0x8c426d80, 0x1000001a,
+0x346300e0, 0x8f830050, 0x3c040001, 0x8c846f1c,
+0x3442ffff, 0x621824, 0x1080000f, 0xaf830050,
+0x3c020001, 0x8c426e44, 0x1440000b, 0x3c043f00,
+0x3c030001, 0x8c636d80, 0x348400e0, 0x24020001,
+0xaf820200, 0xaf820220, 0x641825, 0xaf830200,
+0x10000008, 0x3c05f700, 0x3c020001, 0x8c426d80,
+0x3c033f00, 0x346300e2, 0x431025, 0xaf820200,
+0x3c05f700, 0x34a58000, 0x3c030001, 0x8c636d84,
+0x3c020001, 0x8c426d90, 0x3c040001, 0x8c846dac,
+0x651825, 0x431025, 0x441025, 0xaf820220,
+0x3e00008, 0x0, 0x3c030001, 0x8c636db4,
+0x3c020001, 0x8c426db8, 0x10620003, 0x24020002,
+0x3c010001, 0xac236db8, 0x1062001d, 0x2c620003,
+0x10400025, 0x24020001, 0x14620023, 0x24020004,
+0x3c030001, 0x8c636d98, 0x10620006, 0x24020008,
+0x1462000c, 0x3c0200c8, 0x344201fb, 0x10000009,
+0xaf820238, 0x24020e01, 0xaf820238, 0x8f820044,
+0x3c03ffff, 0x34633f7f, 0x431024, 0x34420080,
+0xaf820044, 0x8f830054, 0x24020002, 0x3c010001,
+0xac226db4, 0x3c010001, 0x1000000b, 0xac236f2c,
+0x8f830054, 0x3c020001, 0x8c426f2c, 0x2463d8f0,
+0x431023, 0x2c422710, 0x14400003, 0x24020009,
+0x3c010001, 0xac226db4, 0x3e00008, 0x0,
+0x0, 0x0, 0x0, 0x27bdffd8,
+0xafb20018, 0x809021, 0xafb3001c, 0xa09821,
+0xafb10014, 0xc08821, 0xafb00010, 0x8021,
+0xafbf0020, 0xa6200000, 0xc004d78, 0x24040001,
+0x26100001, 0x2e020020, 0x1440fffb, 0x0,
+0xc004d78, 0x2021, 0xc004d78, 0x24040001,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x24100010, 0x2501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
+0x2501024, 0x24100010, 0x2701024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x2701024, 0xc004db9, 0x34108000,
+0xc004db9, 0x0, 0xc004d58, 0x0,
+0x50400005, 0x108042, 0x96220000, 0x501025,
+0xa6220000, 0x108042, 0x1600fff7, 0x0,
+0xc004db9, 0x0, 0x8fbf0020, 0x8fb3001c,
+0x8fb20018, 0x8fb10014, 0x8fb00010, 0x3e00008,
+0x27bd0028, 0x27bdffd8, 0xafb10014, 0x808821,
+0xafb20018, 0xa09021, 0xafb3001c, 0xc09821,
+0xafb00010, 0x8021, 0xafbf0020, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0x24100010, 0x2301024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x2301024, 0x24100010, 0x2501024,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x2501024, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x34108000,
+0x96620000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
+0x0, 0xc004db9, 0x0, 0x8fbf0020,
+0x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
+0x3e00008, 0x27bd0028, 0x3c040001, 0x8c846dd0,
+0x3c020001, 0x8c426e18, 0x27bdffd8, 0xafbf0020,
+0xafb1001c, 0x10820003, 0xafb00018, 0x3c010001,
+0xac246e18, 0x3c030001, 0x8c636f40, 0x24020005,
+0x14620005, 0x2483ffff, 0xc004963, 0x0,
+0x1000034c, 0x0, 0x2c620013, 0x10400349,
+0x31080, 0x3c010001, 0x220821, 0x8c226b80,
+0x400008, 0x0, 0xc004db9, 0x8021,
+0x34028000, 0xa7a20010, 0x27b10010, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0xc004d78,
+0x2021, 0x108042, 0x1600fffc, 0x0,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fff8, 0x0, 0xc004db9, 0x0,
+0x1000030e, 0x24020002, 0x27b10010, 0xa7a00010,
+0x8021, 0xc004d78, 0x24040001, 0x26100001,
+0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
+0x2021, 0xc004d78, 0x24040001, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x24100010,
+0x32020001, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020001,
+0x24100010, 0xc004d78, 0x2021, 0x108042,
+0x1600fffc, 0x0, 0xc004db9, 0x34108000,
+0xc004db9, 0x0, 0xc004d58, 0x0,
+0x50400005, 0x108042, 0x96220000, 0x501025,
+0xa6220000, 0x108042, 0x1600fff7, 0x0,
+0xc004db9, 0x0, 0x97a20010, 0x30428000,
+0x144002dc, 0x24020003, 0x100002d8, 0x0,
+0x24021200, 0xa7a20010, 0x27b10010, 0x8021,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0xc004d78, 0x2021, 0x108042, 0x1600fffc,
+0x0, 0xc004d78, 0x24040001, 0xc004d78,
+0x2021, 0x34108000, 0x96220000, 0x501024,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fff8, 0x0, 0xc004db9,
+0x0, 0x8f830054, 0x10000296, 0x24020004,
+0x8f830054, 0x3c020001, 0x8c426f3c, 0x2463ff9c,
+0x431023, 0x2c420064, 0x1440029e, 0x24020002,
+0x3c030001, 0x8c636f40, 0x10620297, 0x2c620003,
+0x14400296, 0x24020011, 0x24020003, 0x10620005,
+0x24020004, 0x10620291, 0x2402000f, 0x1000028f,
+0x24020011, 0x1000028d, 0x24020005, 0x24020014,
+0xa7a20010, 0x27b10010, 0x8021, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x32020012,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020012, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x34108000,
+0x96220000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
+0x0, 0xc004db9, 0x0, 0x8f830054,
+0x10000248, 0x24020006, 0x8f830054, 0x3c020001,
+0x8c426f3c, 0x2463ff9c, 0x431023, 0x2c420064,
+0x14400250, 0x24020007, 0x1000024c, 0x0,
+0x24020006, 0xa7a20010, 0x27b10010, 0x8021,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020013, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020013,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fff8, 0x0, 0xc004db9, 0x0,
+0x8f830054, 0x10000207, 0x24020008, 0x8f830054,
+0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
+0x2c420064, 0x1440020f, 0x24020009, 0x1000020b,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
+0xc004d78, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
+0xc004db9, 0x34108000, 0xc004db9, 0x0,
+0xc004d58, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004db9, 0x8021,
+0x97a20010, 0x27b10010, 0x34420001, 0xa7a20010,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fff8, 0x0, 0xc004db9, 0x0,
+0x8f830054, 0x10000193, 0x2402000a, 0x8f830054,
+0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
+0x2c420064, 0x1440019b, 0x2402000b, 0x10000197,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
+0xc004d78, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020017, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020017,
+0xc004db9, 0x34108000, 0xc004db9, 0x0,
+0xc004d58, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004db9, 0x8021,
+0x97a20010, 0x27b10010, 0x34420700, 0xa7a20010,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020017, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020017,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fff8, 0x0, 0xc004db9, 0x0,
+0x8f830054, 0x1000011f, 0x2402000c, 0x8f830054,
+0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
+0x2c420064, 0x14400127, 0x24020012, 0x10000123,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
+0xc004d78, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020014, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020014,
+0xc004db9, 0x34108000, 0xc004db9, 0x0,
+0xc004d58, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004db9, 0x8021,
+0x97a20010, 0x27b10010, 0x34420010, 0xa7a20010,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020014, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020014,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fff8, 0x0, 0xc004db9, 0x0,
+0x8f830054, 0x100000ab, 0x24020013, 0x8f830054,
+0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
+0x2c420064, 0x144000b3, 0x2402000d, 0x100000af,
+0x0, 0x27b10010, 0xa7a00010, 0x8021,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
+0xc004d78, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
+0xc004db9, 0x34108000, 0xc004db9, 0x0,
+0xc004d58, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004db9, 0x8021,
+0x97a20010, 0x27b10010, 0x3042fffe, 0xa7a20010,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x34108000, 0x96220000, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fff8, 0x0, 0xc004db9, 0x0,
+0x8f830054, 0x10000037, 0x2402000e, 0x24020840,
+0xa7a20010, 0x27b10010, 0x8021, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x32020013,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020013, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x34108000,
+0x96220000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
+0x0, 0xc004db9, 0x0, 0x8f830054,
+0x24020010, 0x3c010001, 0xac226dd0, 0x3c010001,
+0x1000000c, 0xac236f3c, 0x8f830054, 0x3c020001,
+0x8c426f3c, 0x2463ff9c, 0x431023, 0x2c420064,
+0x14400004, 0x0, 0x24020011, 0x3c010001,
+0xac226dd0, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
+0x3e00008, 0x27bd0028, 0x3c030001, 0x8c636d98,
+0x27bdffc8, 0x24020002, 0xafbf0034, 0xafb20030,
+0xafb1002c, 0x14620004, 0xafb00028, 0x3c120002,
+0x10000003, 0x8e528ff8, 0x3c120002, 0x8e528ffc,
+0x3c030001, 0x8c636dd4, 0x3c020001, 0x8c426e1c,
+0x50620004, 0x2463ffff, 0x3c010001, 0xac236e1c,
+0x2463ffff, 0x2c620006, 0x10400377, 0x31080,
+0x3c010001, 0x220821, 0x8c226bd8, 0x400008,
+0x0, 0x2021, 0x2821, 0xc004ddb,
+0x34068000, 0x24040010, 0x24050002, 0x24060002,
+0x24020002, 0xc004ddb, 0xa7a20018, 0x24020002,
+0x3c010001, 0x10000364, 0xac226dd4, 0x27b10018,
+0xa7a00018, 0x8021, 0xc004d78, 0x24040001,
+0x26100001, 0x2e020020, 0x1440fffb, 0x0,
+0xc004d78, 0x2021, 0xc004d78, 0x24040001,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x24100010, 0x32020001, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
+0x32020001, 0x24100010, 0xc004d78, 0x2021,
+0x108042, 0x1600fffc, 0x0, 0xc004db9,
+0x34108000, 0xc004db9, 0x0, 0xc004d58,
+0x0, 0x50400005, 0x108042, 0x96220000,
+0x501025, 0xa6220000, 0x108042, 0x1600fff7,
+0x0, 0xc004db9, 0x0, 0x97a20018,
+0x30428000, 0x14400004, 0x24020003, 0x3c010001,
+0xac226dd4, 0x24020003, 0x3c010001, 0x1000032a,
+0xac226dd4, 0x24040010, 0x24050002, 0x24060002,
+0x24020002, 0xc004ddb, 0xa7a20018, 0x3c030001,
+0x8c636e20, 0x24020001, 0x146201e1, 0x8021,
+0x27b10018, 0xa7a00018, 0xc004d78, 0x24040001,
+0x26100001, 0x2e020020, 0x1440fffb, 0x0,
+0xc004d78, 0x2021, 0xc004d78, 0x24040001,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x24100010, 0x32020001, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
+0x32020001, 0x24100010, 0x32020018, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020018, 0xc004db9, 0x34108000,
+0xc004db9, 0x0, 0xc004d58, 0x0,
+0x50400005, 0x108042, 0x96220000, 0x501025,
+0xa6220000, 0x108042, 0x1600fff7, 0x0,
+0xc004db9, 0x8021, 0x27b10018, 0xa7a00018,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
+0xc004d78, 0x2021, 0x24100010, 0x32020001,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x32020001, 0x24100010,
+0x32020018, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
+0xc004db9, 0x34108000, 0xc004db9, 0x0,
+0xc004d58, 0x0, 0x50400005, 0x108042,
+0x96220000, 0x501025, 0xa6220000, 0x108042,
+0x1600fff7, 0x0, 0xc004db9, 0x8021,
+0x24040018, 0x2821, 0xc004ddb, 0x24060404,
+0xa7a0001a, 0xc004d78, 0x24040001, 0x26100001,
+0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
+0x2021, 0xc004d78, 0x24040001, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x24100010,
+0x32020001, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020001,
+0x24100010, 0x32020018, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
+0x32020018, 0xc004db9, 0x34108000, 0xc004db9,
+0x0, 0xc004d58, 0x0, 0x50400005,
+0x108042, 0x97a2001a, 0x501025, 0xa7a2001a,
+0x108042, 0x1600fff7, 0x0, 0xc004db9,
+0x8021, 0xa7a0001a, 0xc004d78, 0x24040001,
+0x26100001, 0x2e020020, 0x1440fffb, 0x0,
+0xc004d78, 0x2021, 0xc004d78, 0x24040001,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x24100010, 0x32020001, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
+0x32020001, 0x24100010, 0x32020018, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020018, 0xc004db9, 0x34108000,
+0xc004db9, 0x0, 0xc004d58, 0x0,
+0x50400005, 0x108042, 0x97a2001a, 0x501025,
+0xa7a2001a, 0x108042, 0x1600fff7, 0x0,
+0xc004db9, 0x8021, 0xa7a0001c, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x24040001, 0xc004d78,
+0x2021, 0x24100010, 0xc004d78, 0x2021,
+0x108042, 0x1600fffc, 0x0, 0x24100010,
+0x3202001e, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x3202001e,
+0xc004db9, 0x34108000, 0xc004db9, 0x0,
+0xc004d58, 0x0, 0x50400005, 0x108042,
+0x97a2001c, 0x501025, 0xa7a2001c, 0x108042,
+0x1600fff7, 0x0, 0xc004db9, 0x8021,
+0xa7a0001c, 0xc004d78, 0x24040001, 0x26100001,
+0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
+0x2021, 0xc004d78, 0x24040001, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x24100010,
+0xc004d78, 0x2021, 0x108042, 0x1600fffc,
+0x0, 0x24100010, 0x3202001e, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x3202001e, 0xc004db9, 0x34108000,
+0xc004db9, 0x0, 0xc004d58, 0x0,
+0x50400005, 0x108042, 0x97a2001c, 0x501025,
+0xa7a2001c, 0x108042, 0x1600fff7, 0x0,
+0xc004db9, 0x8021, 0x24020002, 0xa7a2001e,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0x24100010, 0xc004d78,
+0x2021, 0x108042, 0x1600fffc, 0x0,
+0x24100010, 0x3202001e, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
+0x3202001e, 0xc004d78, 0x24040001, 0xc004d78,
+0x2021, 0x34108000, 0x97a2001e, 0x501024,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fff8, 0x0, 0xc004db9,
+0x8021, 0xa7a00020, 0xc004d78, 0x24040001,
+0x26100001, 0x2e020020, 0x1440fffb, 0x0,
+0xc004d78, 0x2021, 0xc004d78, 0x24040001,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x24100010, 0xc004d78, 0x2021, 0x108042,
+0x1600fffc, 0x0, 0x24100010, 0x3202001e,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x3202001e, 0xc004db9,
+0x34108000, 0xc004db9, 0x0, 0xc004d58,
+0x0, 0x50400005, 0x108042, 0x97a20020,
+0x501025, 0xa7a20020, 0x108042, 0x1600fff7,
+0x0, 0xc004db9, 0x8021, 0xa7a00020,
+0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
+0x1440fffb, 0x0, 0xc004d78, 0x2021,
+0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
+0xc004d78, 0x2021, 0x24100010, 0xc004d78,
+0x2021, 0x108042, 0x1600fffc, 0x0,
+0x24100010, 0x3202001e, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
+0x3202001e, 0xc004db9, 0x34108000, 0xc004db9,
+0x0, 0xc004d58, 0x0, 0x50400005,
+0x108042, 0x97a20020, 0x501025, 0xa7a20020,
+0x108042, 0x1600fff7, 0x0, 0xc004db9,
+0x8021, 0xa7a00022, 0xc004d78, 0x24040001,
+0x26100001, 0x2e020020, 0x1440fffb, 0x0,
+0xc004d78, 0x2021, 0xc004d78, 0x24040001,
+0xc004d78, 0x2021, 0xc004d78, 0x24040001,
+0x24100010, 0xc004d78, 0x2021, 0x108042,
+0x1600fffc, 0x0, 0x24100010, 0xc004d78,
+0x2021, 0x108042, 0x1600fffc, 0x0,
+0xc004d78, 0x24040001, 0xc004d78, 0x2021,
+0x34108000, 0x97a20022, 0x501024, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fff8, 0x0, 0xc004db9, 0x0,
+0x24040018, 0x24050002, 0xc004ddb, 0x24060004,
+0x3c100001, 0x8e106e24, 0x24020001, 0x1602011d,
+0x0, 0x3c020001, 0x94426f26, 0x3c010001,
+0xac206e24, 0x24429fbc, 0x2c420004, 0x1040000c,
+0x24040009, 0x24050001, 0xc004ddb, 0x24060400,
+0x24040018, 0x24050001, 0xc004ddb, 0x24060020,
+0x24040018, 0x24050001, 0xc004ddb, 0x24062000,
+0x3c024000, 0x2421024, 0x10400123, 0x3c022000,
+0x2421024, 0x10400004, 0x0, 0x3c010001,
+0x10000003, 0xac306f1c, 0x3c010001, 0xac206f1c,
+0x3c030001, 0x8c636f34, 0x24020005, 0x146200f9,
+0x0, 0x3c020001, 0x8c426f1c, 0x10400067,
+0x3c020004, 0x2421024, 0x10400011, 0xa7a00018,
+0x3c020008, 0x2421024, 0x10400002, 0x24020200,
+0xa7a20018, 0x3c020010, 0x2421024, 0x10400004,
+0x0, 0x97a20018, 0x34420100, 0xa7a20018,
+0x97a60018, 0x24040009, 0x10000004, 0x2821,
+0x24040009, 0x2821, 0x3021, 0xc004ddb,
+0x0, 0x24020001, 0xa7a2001a, 0x3c020008,
+0x2421024, 0x1040000c, 0x3c020002, 0x2421024,
+0x10400002, 0x24020101, 0xa7a2001a, 0x3c020001,
+0x2421024, 0x10400005, 0x3c020010, 0x97a2001a,
+0x34420040, 0xa7a2001a, 0x3c020010, 0x2421024,
+0x1040000e, 0x3c020002, 0x2421024, 0x10400005,
+0x3c020001, 0x97a2001a, 0x34420080, 0xa7a2001a,
+0x3c020001, 0x2421024, 0x10400005, 0x3c0300a0,
+0x97a2001a, 0x34420020, 0xa7a2001a, 0x3c0300a0,
+0x2431024, 0x54430004, 0x3c020020, 0x97a2001a,
+0x1000000c, 0x34420400, 0x2421024, 0x50400004,
+0x3c020080, 0x97a2001a, 0x10000006, 0x34420800,
+0x2421024, 0x10400004, 0x0, 0x97a2001a,
+0x34420c00, 0xa7a2001a, 0x97a6001a, 0x24040004,
+0xc004ddb, 0x2821, 0x3c020004, 0x2421024,
+0x10400004, 0xa7a0001c, 0x32425000, 0x14400004,
+0x0, 0x32424000, 0x10400005, 0x2021,
+0xc004cf9, 0x2402021, 0x10000096, 0x0,
+0x97a6001c, 0x2821, 0x34c61200, 0xc004ddb,
+0xa7a6001c, 0x1000008f, 0x0, 0x2421024,
+0x10400004, 0xa7a00018, 0x32425000, 0x14400004,
+0x0, 0x32424000, 0x10400005, 0x3c020010,
+0xc004cf9, 0x2402021, 0x10000019, 0xa7a0001a,
+0x2421024, 0x10400004, 0x0, 0x97a20018,
+0x10000004, 0xa7a20018, 0x97a20018, 0x34420100,
+0xa7a20018, 0x3c020001, 0x2421024, 0x10400004,
+0x0, 0x97a20018, 0x10000004, 0xa7a20018,
+0x97a20018, 0x34422000, 0xa7a20018, 0x97a60018,
+0x2021, 0xc004ddb, 0x2821, 0xa7a0001a,
+0x8021, 0xc004d78, 0x24040001, 0x26100001,
+0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
+0x2021, 0xc004d78, 0x24040001, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x24100010,
+0x32020001, 0x10400002, 0x2021, 0x24040001,
+0xc004d78, 0x108042, 0x1600fffa, 0x32020001,
+0x24100010, 0xc004d78, 0x2021, 0x108042,
+0x1600fffc, 0x0, 0xc004db9, 0x34108000,
+0xc004db9, 0x0, 0xc004d58, 0x0,
+0x50400005, 0x108042, 0x97a2001a, 0x501025,
+0xa7a2001a, 0x108042, 0x1600fff7, 0x0,
+0xc004db9, 0x8021, 0xa7a0001a, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x24040001, 0xc004d78,
+0x2021, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0xc004d78,
+0x2021, 0x108042, 0x1600fffc, 0x0,
+0xc004db9, 0x34108000, 0xc004db9, 0x0,
+0xc004d58, 0x0, 0x50400005, 0x108042,
+0x97a2001a, 0x501025, 0xa7a2001a, 0x108042,
+0x1600fff7, 0x0, 0xc004db9, 0x0,
+0x3c040001, 0x24846bcc, 0x97a60018, 0x97a7001a,
+0x3c020001, 0x8c426d98, 0x3c030001, 0x8c636f1c,
+0x3c05000d, 0x34a50205, 0xafa20010, 0xc002b3b,
+0xafa30014, 0x8f830054, 0x24020004, 0x3c010001,
+0xac226dd4, 0x3c010001, 0x10000017, 0xac236f38,
+0x8f830054, 0x3c020001, 0x8c426f38, 0x2463ff9c,
+0x431023, 0x2c420064, 0x1440000f, 0x0,
+0x8f820220, 0x24030005, 0x3c010001, 0xac236dd4,
+0x3c03f700, 0x431025, 0x10000007, 0xaf820220,
+0x24020006, 0x3c010001, 0xac226dd4, 0x24020011,
+0x3c010001, 0xac226dd0, 0x8fbf0034, 0x8fb20030,
+0x8fb1002c, 0x8fb00028, 0x3e00008, 0x27bd0038,
+0x27bdffd8, 0xafb00018, 0x808021, 0xafb1001c,
+0x8821, 0x32024000, 0x10400013, 0xafbf0020,
+0x3c020010, 0x2021024, 0x2c420001, 0x21023,
+0x30434100, 0x3c020001, 0x2021024, 0x14400006,
+0x34714000, 0x3c020002, 0x2021024, 0x14400002,
+0x34716000, 0x34714040, 0x2021, 0x2821,
+0x10000036, 0x2203021, 0x32021000, 0x10400035,
+0x2021, 0x2821, 0xc004ddb, 0x24060040,
+0x24040018, 0x2821, 0xc004ddb, 0x24060c00,
+0x24040017, 0x2821, 0xc004ddb, 0x24060400,
+0x24040016, 0x2821, 0xc004ddb, 0x24060006,
+0x24040017, 0x2821, 0xc004ddb, 0x24062500,
+0x24040016, 0x2821, 0xc004ddb, 0x24060006,
+0x24040017, 0x2821, 0xc004ddb, 0x24064600,
+0x24040016, 0x2821, 0xc004ddb, 0x24060006,
+0x24040017, 0x2821, 0xc004ddb, 0x24066700,
+0x24040016, 0x2821, 0xc004ddb, 0x24060006,
+0x2404001f, 0x2821, 0xc004ddb, 0x24060010,
+0x24040009, 0x2821, 0xc004ddb, 0x24061500,
+0x24040009, 0x2821, 0x24061d00, 0xc004ddb,
+0x0, 0x3c040001, 0x24846bf0, 0x3c05000e,
+0x34a50100, 0x2003021, 0x2203821, 0xafa00010,
+0xc002b3b, 0xafa00014, 0x8fbf0020, 0x8fb1001c,
+0x8fb00018, 0x3e00008, 0x27bd0028, 0x8f850044,
+0x8f820044, 0x3c030001, 0x431025, 0x3c030008,
+0xaf820044, 0x8f840054, 0x8f820054, 0xa32824,
+0x10000002, 0x24840001, 0x8f820054, 0x821023,
+0x2c420002, 0x1440fffc, 0x0, 0x8f820044,
+0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820044,
+0x8f830054, 0x8f820054, 0x10000002, 0x24630001,
+0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
+0x0, 0x3e00008, 0xa01021, 0x8f830044,
+0x3c02fff0, 0x3442ffff, 0x42480, 0x621824,
+0x3c020002, 0x822025, 0x641825, 0xaf830044,
+0x8f820044, 0x3c03fffe, 0x3463ffff, 0x431024,
+0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
+0x24630001, 0x8f820054, 0x621023, 0x2c420002,
+0x1440fffc, 0x0, 0x8f820044, 0x3c030001,
+0x431025, 0xaf820044, 0x8f830054, 0x8f820054,
+0x10000002, 0x24630001, 0x8f820054, 0x621023,
+0x2c420002, 0x1440fffc, 0x0, 0x3e00008,
+0x0, 0x8f820044, 0x2403ff7f, 0x431024,
+0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
+0x24630001, 0x8f820054, 0x621023, 0x2c420002,
+0x1440fffc, 0x0, 0x8f820044, 0x34420080,
+0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
+0x24630001, 0x8f820054, 0x621023, 0x2c420002,
+0x1440fffc, 0x0, 0x3e00008, 0x0,
+0x8f820044, 0x3c03fff0, 0x3463ffff, 0x431024,
+0xaf820044, 0x8f820044, 0x3c030001, 0x431025,
+0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
+0x24630001, 0x8f820054, 0x621023, 0x2c420002,
+0x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
+0x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
+0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
+0x621023, 0x2c420002, 0x1440fffc, 0x0,
+0x3e00008, 0x0, 0x27bdffc8, 0xafb30024,
+0x809821, 0xafbe002c, 0xa0f021, 0xafb20020,
+0xc09021, 0x33c2ffff, 0xafbf0030, 0xafb50028,
+0xafb1001c, 0xafb00018, 0x14400034, 0xa7b20010,
+0x3271ffff, 0x27b20010, 0x8021, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x2301024, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x34108000,
+0x96420000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x12000075,
+0x0, 0x1000fff6, 0x0, 0x3275ffff,
+0x27b10010, 0xa7a00010, 0x8021, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x24040001, 0xc004d78,
+0x2021, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x2b01024,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x2b01024, 0xc004db9,
+0x34108000, 0xc004db9, 0x0, 0xc004d58,
+0x0, 0x50400005, 0x108042, 0x96220000,
+0x501025, 0xa6220000, 0x108042, 0x1600fff7,
+0x0, 0xc004db9, 0x0, 0x33c5ffff,
+0x24020001, 0x54a20004, 0x24020002, 0x97a20010,
+0x10000006, 0x521025, 0x14a20006, 0x3271ffff,
+0x97a20010, 0x121827, 0x431024, 0xa7a20010,
+0x3271ffff, 0x27b20010, 0x8021, 0xc004d78,
+0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
+0x0, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0xc004d78,
+0x24040001, 0x24100010, 0x32020001, 0x10400002,
+0x2021, 0x24040001, 0xc004d78, 0x108042,
+0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
+0x10400002, 0x2021, 0x24040001, 0xc004d78,
+0x108042, 0x1600fffa, 0x2301024, 0xc004d78,
+0x24040001, 0xc004d78, 0x2021, 0x34108000,
+0x96420000, 0x501024, 0x10400002, 0x2021,
+0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
+0x0, 0xc004db9, 0x0, 0x8fbf0030,
+0x8fbe002c, 0x8fb50028, 0x8fb30024, 0x8fb20020,
+0x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0038,
+0x0, 0x0, 0x0, 0x27bdffe8,
+0xafbf0010, 0x8ee304b8, 0x24020008, 0x146201e0,
+0x0, 0x3c020001, 0x8c426f1c, 0x14400005,
+0x0, 0xc003daf, 0x8f840224, 0x100001d8,
+0x0, 0x8f820220, 0x3c030008, 0x431024,
+0x10400026, 0x24020001, 0x8f840224, 0x8f820220,
+0x3c030400, 0x431024, 0x10400006, 0x0,
+0x3c010002, 0xac208fa0, 0x3c010002, 0x1000000b,
+0xac208fc0, 0x3c030002, 0x24638fa0, 0x8c620000,
+0x24420001, 0xac620000, 0x2c420002, 0x14400003,
+0x24020001, 0x3c010002, 0xac228fc0, 0x3c020002,
+0x8c428fc0, 0x10400006, 0x30820040, 0x10400004,
+0x24020001, 0x3c010002, 0x10000003, 0xac228fc4,
+0x3c010002, 0xac208fc4, 0x3c010002, 0xac248f9c,
+0x3c010002, 0x1000000b, 0xac208fd0, 0x3c010002,
+0xac228fd0, 0x3c010002, 0xac208fc0, 0x3c010002,
+0xac208fa0, 0x3c010002, 0xac208fc4, 0x3c010002,
+0xac208f9c, 0x3c030002, 0x8c638f90, 0x3c020002,
+0x8c428f94, 0x50620004, 0x2463ffff, 0x3c010002,
+0xac238f94, 0x2463ffff, 0x2c62000e, 0x10400194,
+0x31080, 0x3c010001, 0x220821, 0x8c226c00,
+0x400008, 0x0, 0x24020002, 0x3c010002,
+0xac208fc0, 0x3c010002, 0xac208fa0, 0x3c010002,
+0xac208f9c, 0x3c010002, 0xac208fc4, 0x3c010002,
+0xac208fb8, 0x3c010002, 0xac208fb0, 0xaf800224,
+0x3c010002, 0xac228f90, 0x3c020002, 0x8c428fd0,
+0x1440004f, 0x3c02fdff, 0x3442ffff, 0xc003daf,
+0x282a024, 0xaf800204, 0x8f820200, 0x2403fffd,
+0x431024, 0xaf820200, 0x3c010002, 0xac208fe0,
+0x8f830054, 0x3c020002, 0x8c428fb8, 0x24040001,
+0x3c010002, 0xac248fcc, 0x24420001, 0x3c010002,
+0xac228fb8, 0x2c420004, 0x3c010002, 0xac238fb4,
+0x14400006, 0x24020003, 0x3c010001, 0xac246d9c,
+0x3c010002, 0x1000015e, 0xac208fb8, 0x3c010002,
+0x1000015b, 0xac228f90, 0x8f830054, 0x3c020002,
+0x8c428fb4, 0x2463d8f0, 0x431023, 0x2c422710,
+0x14400003, 0x24020004, 0x3c010002, 0xac228f90,
+0x3c020002, 0x8c428fd0, 0x14400021, 0x3c02fdff,
+0x3442ffff, 0x1000014a, 0x282a024, 0x3c040001,
+0x8c846f20, 0x3c010002, 0xc005084, 0xac208fa8,
+0x3c020002, 0x8c428fdc, 0xaf820204, 0x3c020002,
+0x8c428fd0, 0x14400012, 0x3c03fdff, 0x8f820204,
+0x3463ffff, 0x30420030, 0x1440012f, 0x283a024,
+0x3c030002, 0x8c638fdc, 0x24020005, 0x3c010002,
+0xac228f90, 0x3c010002, 0x10000131, 0xac238fe0,
+0x3c020002, 0x8c428fd0, 0x10400010, 0x3c02fdff,
+0x3c020001, 0x8c426e3c, 0x24420001, 0x3c010001,
+0xac226e3c, 0x2c420002, 0x14400125, 0x24020001,
+0x3c010001, 0xac226e44, 0x3c010001, 0xac206e3c,
+0x3c010001, 0x1000011e, 0xac226d9c, 0x3c030002,
+0x8c638fc0, 0x3442ffff, 0x10600119, 0x282a024,
+0x3c020002, 0x8c428f9c, 0x10400115, 0x0,
+0x3c010002, 0xac228fc8, 0x24020003, 0x3c010002,
+0xac228fa0, 0x100000b8, 0x24020006, 0x3c010002,
+0xac208fa8, 0x8f820204, 0x34420040, 0xaf820204,
+0x3c020002, 0x8c428fe0, 0x24030007, 0x3c010002,
+0xac238f90, 0x34420040, 0x3c010002, 0xac228fe0,
+0x3c020002, 0x8c428fc0, 0x10400005, 0x0,
+0x3c020002, 0x8c428f9c, 0x104000f0, 0x24020002,
+0x3c050002, 0x24a58fa0, 0x8ca20000, 0x2c424e21,
+0x104000ea, 0x24020002, 0x3c020002, 0x8c428fc4,
+0x104000ef, 0x2404ffbf, 0x3c020002, 0x8c428f9c,
+0x3c030002, 0x8c638fc8, 0x441024, 0x641824,
+0x10430004, 0x24020001, 0x3c010002, 0x100000e4,
+0xac228f90, 0x24020003, 0xaca20000, 0x24020008,
+0x3c010002, 0xac228f90, 0x3c020002, 0x8c428fcc,
+0x1040000c, 0x24020001, 0x3c040002, 0xc005091,
+0x8c848f9c, 0x3c020002, 0x8c428fe8, 0x14400005,
+0x24020001, 0x3c020002, 0x8c428fe4, 0x10400006,
+0x24020001, 0x3c010001, 0xac226d9c, 0x3c010002,
+0x100000cb, 0xac208fb8, 0x3c020002, 0x8c428fb0,
+0x3c030002, 0x8c638f9c, 0x2c420001, 0x210c0,
+0x30630008, 0x3c010002, 0xac228fb0, 0x3c010002,
+0xac238fac, 0x8f830054, 0x24020009, 0x3c010002,
+0xac228f90, 0x3c010002, 0x100000b9, 0xac238fb4,
+0x8f830054, 0x3c020002, 0x8c428fb4, 0x2463d8f0,
+0x431023, 0x2c422710, 0x1440009f, 0x0,
+0x3c020002, 0x8c428fc0, 0x10400005, 0x0,
+0x3c020002, 0x8c428f9c, 0x104000a0, 0x24020002,
+0x3c030002, 0x24638fa0, 0x8c620000, 0x2c424e21,
+0x1040009a, 0x24020002, 0x3c020002, 0x8c428fcc,
+0x1040000e, 0x0, 0x3c020002, 0x8c428f9c,
+0x3c010002, 0xac208fcc, 0x30420080, 0x1040002f,
+0x2402000c, 0x8f820204, 0x30420080, 0x1440000c,
+0x24020003, 0x10000029, 0x2402000c, 0x3c020002,
+0x8c428f9c, 0x30420080, 0x14400005, 0x24020003,
+0x8f820204, 0x30420080, 0x1040001f, 0x24020003,
+0xac620000, 0x2402000a, 0x3c010002, 0xac228f90,
+0x3c040002, 0x24848fd8, 0x8c820000, 0x3c030002,
+0x8c638fb0, 0x431025, 0xaf820204, 0x8c830000,
+0x3c040002, 0x8c848fb0, 0x2402000b, 0x3c010002,
+0xac228f90, 0x641825, 0x3c010002, 0xac238fe0,
+0x3c050002, 0x24a58fa0, 0x8ca20000, 0x2c424e21,
+0x10400066, 0x24020002, 0x3c020002, 0x8c428fd0,
+0x10400005, 0x0, 0x2402000c, 0x3c010002,
+0x10000067, 0xac228f90, 0x3c020002, 0x8c428fc0,
+0x10400063, 0x0, 0x3c040002, 0x8c848f9c,
+0x10800055, 0x30820008, 0x3c030002, 0x8c638fac,
+0x1062005b, 0x24020003, 0x3c010002, 0xac248fc8,
+0xaca20000, 0x24020006, 0x3c010002, 0x10000054,
+0xac228f90, 0x8f820200, 0x34420002, 0xaf820200,
+0x8f830054, 0x2402000d, 0x3c010002, 0xac228f90,
+0x3c010002, 0xac238fb4, 0x8f830054, 0x3c020002,
+0x8c428fb4, 0x2463d8f0, 0x431023, 0x2c422710,
+0x14400031, 0x0, 0x3c020002, 0x8c428fd0,
+0x10400020, 0x2402000e, 0x3c030002, 0x8c638fe4,
+0x3c010002, 0x14600015, 0xac228f90, 0xc003e6d,
+0x0, 0x3c050001, 0x8ca56d98, 0xc00529b,
+0x2021, 0x3c030001, 0x8c636d98, 0x24020004,
+0x14620005, 0x2403fffb, 0x3c020001, 0x8c426d94,
+0x10000003, 0x2403fff7, 0x3c020001, 0x8c426d94,
+0x431024, 0x3c010001, 0xac226d94, 0x8f830224,
+0x3c020200, 0x3c010002, 0xac238fec, 0x10000020,
+0x282a025, 0x3c020002, 0x8c428fc0, 0x10400005,
+0x0, 0x3c020002, 0x8c428f9c, 0x1040000f,
+0x24020002, 0x3c020002, 0x8c428fa0, 0x2c424e21,
+0x1040000a, 0x24020002, 0x3c020002, 0x8c428fc0,
+0x1040000f, 0x0, 0x3c020002, 0x8c428f9c,
+0x1440000b, 0x0, 0x24020002, 0x3c010002,
+0x10000007, 0xac228f90, 0x3c020002, 0x8c428fc0,
+0x10400003, 0x0, 0xc003daf, 0x0,
+0x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
+0x8fbf0010, 0x3e00008, 0x27bd0018, 0x3c030002,
+0x24638fe8, 0x8c620000, 0x10400005, 0x34422000,
+0x3c010002, 0xac228fdc, 0x10000003, 0xac600000,
+0x3c010002, 0xac248fdc, 0x3e00008, 0x0,
+0x27bdffe0, 0x30820030, 0xafbf0018, 0x3c010002,
+0xac228fe4, 0x14400067, 0x3c02ffff, 0x34421f0e,
+0x821024, 0x14400061, 0x24020030, 0x30822000,
+0x1040005d, 0x30838000, 0x31a02, 0x30820001,
+0x21200, 0x3c040001, 0x8c846f20, 0x621825,
+0x331c2, 0x3c030001, 0x24636e48, 0x30828000,
+0x21202, 0x30840001, 0x42200, 0x441025,
+0x239c2, 0x61080, 0x431021, 0x471021,
+0x90430000, 0x24020001, 0x10620025, 0x0,
+0x10600007, 0x24020002, 0x10620013, 0x24020003,
+0x1062002c, 0x3c05000f, 0x10000037, 0x0,
+0x8f820200, 0x2403feff, 0x431024, 0xaf820200,
+0x8f820220, 0x3c03fffe, 0x3463ffff, 0x431024,
+0xaf820220, 0x3c010002, 0xac209004, 0x3c010002,
+0x10000034, 0xac20900c, 0x8f820200, 0x34420100,
+0xaf820200, 0x8f820220, 0x3c03fffe, 0x3463ffff,
+0x431024, 0xaf820220, 0x24020100, 0x3c010002,
+0xac229004, 0x3c010002, 0x10000026, 0xac20900c,
+0x8f820200, 0x2403feff, 0x431024, 0xaf820200,
+0x8f820220, 0x3c030001, 0x431025, 0xaf820220,
+0x3c010002, 0xac209004, 0x3c010002, 0x10000019,
+0xac23900c, 0x8f820200, 0x34420100, 0xaf820200,
+0x8f820220, 0x3c030001, 0x431025, 0xaf820220,
+0x24020100, 0x3c010002, 0xac229004, 0x3c010002,
+0x1000000c, 0xac23900c, 0x34a5ffff, 0x3c040001,
+0x24846c38, 0xafa30010, 0xc002b3b, 0xafa00014,
+0x10000004, 0x0, 0x24020030, 0x3c010002,
+0xac228fe8, 0x8fbf0018, 0x3e00008, 0x27bd0020,
+0x0, 0x0, 0x0, 0x27bdffc8,
+0xafb20028, 0x809021, 0xafb3002c, 0xa09821,
+0xafb00020, 0xc08021, 0x3c040001, 0x24846c50,
+0x3c050009, 0x3c020001, 0x8c426d98, 0x34a59001,
+0x2403021, 0x2603821, 0xafbf0030, 0xafb10024,
+0xa7a0001a, 0xafb00014, 0xc002b3b, 0xafa20010,
+0x24020002, 0x12620083, 0x2e620003, 0x10400005,
+0x24020001, 0x1262000a, 0x0, 0x10000173,
+0x0, 0x24020004, 0x126200f8, 0x24020008,
+0x126200f7, 0x3c02ffec, 0x1000016c, 0x0,
+0x3c020001, 0x8c426d94, 0x30420002, 0x14400004,
+0x128940, 0x3c02fffb, 0x3442ffff, 0x2028024,
+0x3c010002, 0x310821, 0xac308ffc, 0x3c024000,
+0x2021024, 0x1040004e, 0x1023c2, 0x30840030,
+0x101382, 0x3042001c, 0x3c030001, 0x24636dd8,
+0x431021, 0x823821, 0x3c020020, 0x2021024,
+0x10400006, 0x24020100, 0x3c010002, 0x310821,
+0xac229000, 0x10000005, 0x3c020080, 0x3c010002,
+0x310821, 0xac209000, 0x3c020080, 0x2021024,
+0x10400006, 0x121940, 0x3c020001, 0x3c010002,
+0x230821, 0x10000005, 0xac229008, 0x121140,
+0x3c010002, 0x220821, 0xac209008, 0x94e40000,
+0x3c030001, 0x8c636f40, 0x24020005, 0x10620010,
+0xa7a40018, 0x32024000, 0x10400002, 0x34824000,
+0xa7a20018, 0x24040001, 0x94e20002, 0x24050004,
+0x24e60002, 0x34420001, 0xc0045be, 0xa4e20002,
+0x24040001, 0x2821, 0xc0045be, 0x27a60018,
+0x3c020001, 0x8c426d98, 0x24110001, 0x3c010001,
+0xac316da4, 0x14530004, 0x32028000, 0xc003daf,
+0x0, 0x32028000, 0x1040011c, 0x0,
+0xc003daf, 0x0, 0x3c030001, 0x8c636f40,
+0x24020005, 0x10620115, 0x24020002, 0x3c010001,
+0xac316d9c, 0x3c010001, 0x10000110, 0xac226d98,
+0x24040001, 0x24050004, 0x27b0001a, 0xc0045be,
+0x2003021, 0x24040001, 0x2821, 0xc0045be,
+0x2003021, 0x3c020002, 0x511021, 0x8c428ff4,
+0x3c040001, 0x8c846d98, 0x3c03bfff, 0x3463ffff,
+0x3c010001, 0xac336da4, 0x431024, 0x3c010002,
+0x310821, 0x109300f7, 0xac228ff4, 0x100000f7,
+0x0, 0x3c022000, 0x2021024, 0x10400005,
+0x24020001, 0x3c010001, 0xac226f1c, 0x10000004,
+0x128940, 0x3c010001, 0xac206f1c, 0x128940,
+0x3c010002, 0x310821, 0xac308ff8, 0x3c024000,
+0x2021024, 0x14400014, 0x0, 0x3c020001,
+0x8c426f1c, 0x10400006, 0x24040004, 0x24050001,
+0xc004ddb, 0x24062000, 0x24020001, 0xaee204b8,
+0x3c020002, 0x511021, 0x8c428ff0, 0x3c03bfff,
+0x3463ffff, 0x431024, 0x3c010002, 0x310821,
+0x100000d0, 0xac228ff0, 0x3c020001, 0x8c426f1c,
+0x10400028, 0x3c0300a0, 0x2031024, 0x5443000d,
+0x3c020020, 0x3c020001, 0x8c426f20, 0x24030100,
+0x3c010002, 0x310821, 0xac239004, 0x3c030001,
+0x3c010002, 0x310821, 0xac23900c, 0x10000015,
+0x34420400, 0x2021024, 0x10400008, 0x24030100,
+0x3c020001, 0x8c426f20, 0x3c010002, 0x310821,
+0xac239004, 0x1000000b, 0x34420800, 0x3c020080,
+0x2021024, 0x1040002e, 0x3c030001, 0x3c020001,
+0x8c426f20, 0x3c010002, 0x310821, 0xac23900c,
+0x34420c00, 0x3c010001, 0xac226f20, 0x10000025,
+0x24040001, 0x3c020020, 0x2021024, 0x10400006,
+0x24020100, 0x3c010002, 0x310821, 0xac229004,
+0x10000005, 0x3c020080, 0x3c010002, 0x310821,
+0xac209004, 0x3c020080, 0x2021024, 0x10400007,
+0x121940, 0x3c020001, 0x3c010002, 0x230821,
+0xac22900c, 0x10000006, 0x24040001, 0x121140,
+0x3c010002, 0x220821, 0xac20900c, 0x24040001,
+0x2821, 0x27b0001e, 0xc00457c, 0x2003021,
+0x24040001, 0x2821, 0xc00457c, 0x2003021,
+0x24040001, 0x24050001, 0x27b0001c, 0xc00457c,
+0x2003021, 0x24040001, 0x24050001, 0xc00457c,
+0x2003021, 0x10000077, 0x0, 0x3c02ffec,
+0x3442ffff, 0x2028024, 0x3c020008, 0x2028025,
+0x121140, 0x3c010002, 0x220821, 0xac308ff8,
+0x3c022000, 0x2021024, 0x10400009, 0x0,
+0x3c020001, 0x8c426e44, 0x14400005, 0x24020001,
+0x3c010001, 0xac226f1c, 0x10000004, 0x3c024000,
+0x3c010001, 0xac206f1c, 0x3c024000, 0x2021024,
+0x1440001d, 0x24020e01, 0x3c030001, 0x8c636f1c,
+0xaf820238, 0x3c010001, 0xac206db0, 0x10600005,
+0x24022020, 0x3c010001, 0xac226f20, 0x24020001,
+0xaee204b8, 0x3c04bfff, 0x121940, 0x3c020002,
+0x431021, 0x8c428ff0, 0x3c050001, 0x8ca56d98,
+0x3484ffff, 0x441024, 0x3c010002, 0x230821,
+0xac228ff0, 0x24020001, 0x10a20044, 0x0,
+0x10000040, 0x0, 0x3c020001, 0x8c426f1c,
+0x1040001c, 0x24022000, 0x3c010001, 0xac226f20,
+0x3c0300a0, 0x2031024, 0x14430005, 0x121140,
+0x3402a000, 0x3c010001, 0x1000002d, 0xac226f20,
+0x3c030002, 0x621821, 0x8c638ff8, 0x3c020020,
+0x621024, 0x10400004, 0x24022001, 0x3c010001,
+0x10000023, 0xac226f20, 0x3c020080, 0x621024,
+0x1040001f, 0x3402a001, 0x3c010001, 0x1000001c,
+0xac226f20, 0x3c020020, 0x2021024, 0x10400007,
+0x121940, 0x24020100, 0x3c010002, 0x230821,
+0xac229004, 0x10000006, 0x3c020080, 0x121140,
+0x3c010002, 0x220821, 0xac209004, 0x3c020080,
+0x2021024, 0x10400006, 0x121940, 0x3c020001,
+0x3c010002, 0x230821, 0x10000005, 0xac22900c,
+0x121140, 0x3c010002, 0x220821, 0xac20900c,
+0x3c030001, 0x8c636d98, 0x24020001, 0x10620003,
+0x0, 0xc003daf, 0x0, 0x8fbf0030,
+0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
+0x3e00008, 0x27bd0038, 0x27bdffb0, 0xafb3003c,
+0x9821, 0xafb50040, 0xa821, 0xafb10034,
+0x8821, 0x24020002, 0xafbf0048, 0xafbe0044,
+0xafb20038, 0xafb00030, 0xafa4002c, 0xa7a0001a,
+0xa7a00018, 0xa7a00020, 0xa7a0001e, 0xa7a00022,
+0x10a20130, 0xa7a0001c, 0x2ca20003, 0x10400005,
+0x24020001, 0x10a2000a, 0x3c024000, 0x1000025d,
+0x2201021, 0x24020004, 0x10a2020a, 0x24020008,
+0x10a20208, 0x2201021, 0x10000256, 0x0,
+0x8fa8002c, 0x88140, 0x3c030002, 0x701821,
+0x8c638ffc, 0x621024, 0x14400009, 0x24040001,
+0x3c027fff, 0x3442ffff, 0x628824, 0x3c010002,
+0x300821, 0xac318ff4, 0x10000246, 0x2201021,
+0x24050001, 0xc00457c, 0x27a60018, 0x24040001,
+0x24050001, 0xc00457c, 0x27a60018, 0x97a20018,
+0x30420004, 0x104000d9, 0x3c114000, 0x3c020001,
+0x8c426f40, 0x2443ffff, 0x2c620006, 0x104000d9,
+0x31080, 0x3c010001, 0x220821, 0x8c226c68,
+0x400008, 0x0, 0x24040001, 0x24050011,
+0x27b0001a, 0xc00457c, 0x2003021, 0x24040001,
+0x24050011, 0xc00457c, 0x2003021, 0x97a3001a,
+0x30624000, 0x10400002, 0x3c150010, 0x3c150008,
+0x30628000, 0x104000aa, 0x3c130001, 0x100000a8,
+0x3c130002, 0x24040001, 0x24050014, 0x27b0001a,
+0xc00457c, 0x2003021, 0x24040001, 0x24050014,
+0xc00457c, 0x2003021, 0x97a3001a, 0x30621000,
+0x10400002, 0x3c150010, 0x3c150008, 0x30620800,
+0x10400097, 0x3c130001, 0x10000095, 0x3c130002,
+0x24040001, 0x24050019, 0x27b0001c, 0xc00457c,
+0x2003021, 0x24040001, 0x24050019, 0xc00457c,
+0x2003021, 0x97a2001c, 0x30430700, 0x24020400,
+0x10620027, 0x28620401, 0x1040000e, 0x24020200,
+0x1062001f, 0x28620201, 0x10400005, 0x24020100,
+0x5062001e, 0x3c130001, 0x1000001e, 0x24040001,
+0x24020300, 0x50620019, 0x3c130002, 0x10000019,
+0x24040001, 0x24020600, 0x1062000d, 0x28620601,
+0x10400005, 0x24020500, 0x5062000b, 0x3c130002,
+0x10000010, 0x24040001, 0x24020700, 0x1462000d,
+0x24040001, 0x3c130004, 0x1000000a, 0x3c150008,
+0x10000006, 0x3c130004, 0x10000005, 0x3c150008,
+0x3c130001, 0x10000002, 0x3c150008, 0x3c150010,
+0x24040001, 0x24050018, 0x27b0001e, 0xc00457c,
+0x2003021, 0x24040001, 0x24050018, 0xc00457c,
+0x2003021, 0x8fa8002c, 0x97a7001e, 0x81140,
+0x3c060002, 0xc23021, 0x8cc68ff4, 0x97a20022,
+0x3c100001, 0x26106c5c, 0x2002021, 0xafa20010,
+0x97a2001c, 0x3c05000c, 0x34a50303, 0xc002b3b,
+0xafa20014, 0x3c020004, 0x16620010, 0x3c020001,
+0x8f840054, 0x24030001, 0x24020002, 0x3c010001,
+0xac236d9c, 0x3c010001, 0xac226d98, 0x3c010001,
+0xac236da4, 0x3c010001, 0xac236e24, 0x3c010001,
+0xac246f30, 0x1000004f, 0x2b38825, 0x16620039,
+0x3c028000, 0x3c020001, 0x8c426e20, 0x1440001e,
+0x24040018, 0x2021, 0x2821, 0xc004ddb,
+0x34068000, 0x8f830054, 0x8f820054, 0x2b38825,
+0x10000002, 0x24630032, 0x8f820054, 0x621023,
+0x2c420033, 0x1440fffc, 0x0, 0x8f830054,
+0x24020001, 0x3c010001, 0xac226e20, 0x3c010001,
+0xac226d9c, 0x3c010001, 0xac226d98, 0x3c010001,
+0xac226da4, 0x3c010001, 0xac226e24, 0x3c010001,
+0x1000002c, 0xac236f30, 0x2821, 0xc004ddb,
+0x24060404, 0x2021, 0x2405001e, 0x27a60018,
+0x24020002, 0xc0045be, 0xa7a20018, 0x2021,
+0x2821, 0x27a60018, 0xc0045be, 0xa7a00018,
+0x24040018, 0x24050002, 0xc004ddb, 0x24060004,
+0x3c028000, 0x2221025, 0x2b31825, 0x10000015,
+0x438825, 0x2221025, 0x2751825, 0x438825,
+0x2002021, 0x97a6001c, 0x3c070001, 0x8ce76d98,
+0x3c05000c, 0x34a50326, 0xafb30010, 0xc002b3b,
+0xafb10014, 0x10000007, 0x0, 0x3c110002,
+0x2308821, 0x8e318ffc, 0x3c027fff, 0x3442ffff,
+0x2228824, 0x3c020001, 0x8c426da8, 0x1040001e,
+0x0, 0x3c020001, 0x8c426f1c, 0x10400002,
+0x3c022000, 0x2228825, 0x8fa8002c, 0x81140,
+0x3c010002, 0x220821, 0x8c229000, 0x10400003,
+0x3c020020, 0x10000005, 0x2228825, 0x3c02ffdf,
+0x3442ffff, 0x2228824, 0x8fa8002c, 0x81140,
+0x3c010002, 0x220821, 0x8c229008, 0x10400003,
+0x3c020080, 0x10000004, 0x2228825, 0x3c02ff7f,
+0x3442ffff, 0x2228824, 0x8fa8002c, 0x81140,
+0x3c010002, 0x220821, 0xac318ff4, 0x10000135,
+0x2201021, 0x8fa8002c, 0x8f140, 0x3c030002,
+0x7e1821, 0x8c638ff8, 0x3c024000, 0x621024,
+0x14400009, 0x24040001, 0x3c027fff, 0x3442ffff,
+0x628824, 0x3c010002, 0x3e0821, 0xac318ff0,
+0x10000124, 0x2201021, 0x2821, 0xc00457c,
+0x27a60018, 0x24040001, 0x2821, 0xc00457c,
+0x27a60018, 0x24040001, 0x24050001, 0x27b20020,
+0xc00457c, 0x2403021, 0x24040001, 0x24050001,
+0xc00457c, 0x2403021, 0x24040001, 0x24050004,
+0x27b1001e, 0xc00457c, 0x2203021, 0x24040001,
+0x24050004, 0xc00457c, 0x2203021, 0x24040001,
+0x24050005, 0x27b00022, 0xc00457c, 0x2003021,
+0x24040001, 0x24050005, 0xc00457c, 0x2003021,
+0x24040001, 0x24050010, 0xc00457c, 0x27a60018,
+0x24040001, 0x24050010, 0xc00457c, 0x27a60018,
+0x24040001, 0x2405000a, 0xc00457c, 0x2403021,
+0x24040001, 0x2405000a, 0xc00457c, 0x2403021,
+0x24040001, 0x24050018, 0xc00457c, 0x2203021,
+0x24040001, 0x24050018, 0xc00457c, 0x2203021,
+0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
+0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
+0x97a20018, 0x30420004, 0x10400066, 0x3c114000,
+0x3c030001, 0x8c636f34, 0x24020005, 0x14620067,
+0x24040001, 0x24050019, 0x27b0001c, 0xc00457c,
+0x2003021, 0x24040001, 0x24050019, 0xc00457c,
+0x2003021, 0x97a2001c, 0x30430700, 0x24020400,
+0x10620027, 0x28620401, 0x1040000e, 0x24020200,
+0x1062001f, 0x28620201, 0x10400005, 0x24020100,
+0x5062001e, 0x3c130001, 0x1000001e, 0x3c020004,
+0x24020300, 0x50620019, 0x3c130002, 0x10000019,
+0x3c020004, 0x24020600, 0x1062000d, 0x28620601,
+0x10400005, 0x24020500, 0x5062000b, 0x3c130002,
+0x10000010, 0x3c020004, 0x24020700, 0x1462000d,
+0x3c020004, 0x3c130004, 0x1000000a, 0x3c150008,
+0x10000006, 0x3c130004, 0x10000005, 0x3c150008,
+0x3c130001, 0x10000002, 0x3c150008, 0x3c150010,
+0x3c020004, 0x12620017, 0x3c028000, 0x8f820054,
+0x24100001, 0x3c010001, 0xac306d9c, 0x3c010001,
+0xac306d98, 0x3c010001, 0xac306da4, 0x3c010001,
+0xac306e24, 0x3c010001, 0xac226f30, 0x3c020001,
+0x16620022, 0x2758825, 0x2021, 0x2821,
+0xc004ddb, 0x34068000, 0x3c010001, 0x1000001b,
+0xac306e20, 0x2221025, 0x2b31825, 0x438825,
+0x97a6001c, 0x3c020001, 0x8c426f1c, 0x3c070001,
+0x8ce76d98, 0x3c040001, 0x24846c5c, 0xafa20010,
+0x97a2001e, 0x3c05000c, 0x34a50323, 0x3c010001,
+0xac206e20, 0xc002b3b, 0xafa20014, 0x10000007,
+0x0, 0x3c110002, 0x23e8821, 0x8e318ff0,
+0x3c027fff, 0x3442ffff, 0x2228824, 0x3c020001,
+0x8c426da8, 0x10400069, 0x0, 0x3c020001,
+0x8c426f1c, 0x10400002, 0x3c022000, 0x2228825,
+0x8fa8002c, 0x81140, 0x3c010002, 0x220821,
+0x8c229004, 0x10400003, 0x3c020020, 0x10000005,
+0x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
+0x8fa8002c, 0x81140, 0x3c010002, 0x220821,
+0x8c22900c, 0x10400003, 0x3c020080, 0x1000004f,
+0x2228825, 0x3c02ff7f, 0x3442ffff, 0x1000004b,
+0x2228824, 0x8fa8002c, 0x82940, 0x3c030002,
+0x651821, 0x8c638ff8, 0x3c024000, 0x621024,
+0x14400008, 0x3c027fff, 0x3442ffff, 0x628824,
+0x3c010002, 0x250821, 0xac318ff0, 0x10000041,
+0x2201021, 0x3c020001, 0x8c426da8, 0x10400034,
+0x3c11c00c, 0x3c020001, 0x8c426e44, 0x3c04c00c,
+0x34842000, 0x3c030001, 0x8c636f1c, 0x2102b,
+0x21023, 0x441024, 0x10600003, 0x518825,
+0x3c022000, 0x2228825, 0x3c020002, 0x451021,
+0x8c429004, 0x10400003, 0x3c020020, 0x10000004,
+0x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
+0x8fa8002c, 0x81140, 0x3c010002, 0x220821,
+0x8c22900c, 0x10400003, 0x3c020080, 0x10000004,
+0x2228825, 0x3c02ff7f, 0x3442ffff, 0x2228824,
+0x3c020001, 0x8c426e30, 0x10400002, 0x3c020800,
+0x2228825, 0x3c020001, 0x8c426e34, 0x10400002,
+0x3c020400, 0x2228825, 0x3c020001, 0x8c426e38,
+0x10400006, 0x3c020100, 0x10000004, 0x2228825,
+0x3c027fff, 0x3442ffff, 0x628824, 0x8fa8002c,
+0x81140, 0x3c010002, 0x220821, 0xac318ff0,
+0x2201021, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
+0x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
+0x3e00008, 0x27bd0050, 0x27bdffd0, 0xafb20028,
+0x809021, 0xafbf002c, 0xafb10024, 0xafb00020,
+0x8f840200, 0x3c100001, 0x8e106d98, 0x8f860220,
+0x24020002, 0x1202005c, 0x2e020003, 0x10400005,
+0x24020001, 0x1202000a, 0x121940, 0x1000010c,
+0x0, 0x24020004, 0x120200bf, 0x24020008,
+0x120200be, 0x128940, 0x10000105, 0x0,
+0x3c050002, 0xa32821, 0x8ca58ffc, 0x3c100002,
+0x2038021, 0x8e108ff4, 0x3c024000, 0xa21024,
+0x10400038, 0x3c020008, 0x2021024, 0x10400020,
+0x34840002, 0x3c020002, 0x431021, 0x8c429000,
+0x10400005, 0x34840020, 0x34840100, 0x3c020020,
+0x10000006, 0x2028025, 0x2402feff, 0x822024,
+0x3c02ffdf, 0x3442ffff, 0x2028024, 0x121140,
+0x3c010002, 0x220821, 0x8c229008, 0x10400005,
+0x3c020001, 0xc23025, 0x3c020080, 0x10000016,
+0x2028025, 0x3c02fffe, 0x3442ffff, 0xc23024,
+0x3c02ff7f, 0x3442ffff, 0x1000000f, 0x2028024,
+0x2402fedf, 0x822024, 0x3c02fffe, 0x3442ffff,
+0xc23024, 0x3c02ff5f, 0x3442ffff, 0x2028024,
+0x3c010002, 0x230821, 0xac209000, 0x3c010002,
+0x230821, 0xac209008, 0xaf840200, 0xaf860220,
+0x8f820220, 0x34420002, 0xaf820220, 0x1000000a,
+0x121140, 0x3c02bfff, 0x3442ffff, 0x8f830200,
+0x2028024, 0x2402fffd, 0x621824, 0xc003daf,
+0xaf830200, 0x121140, 0x3c010002, 0x220821,
+0x100000b7, 0xac308ff4, 0x3c020001, 0x8c426f1c,
+0x10400069, 0x24050004, 0x24040001, 0xc00457c,
+0x27a60018, 0x24040001, 0x24050005, 0xc00457c,
+0x27a6001a, 0x97a30018, 0x97a2001a, 0x3c040001,
+0x24846e48, 0x30630c00, 0x31a82, 0x30420c00,
+0x21282, 0xa7a2001a, 0x21080, 0x441021,
+0x431021, 0xa7a30018, 0x90480000, 0x24020001,
+0x3103ffff, 0x10620029, 0x28620002, 0x10400005,
+0x0, 0x10600009, 0x0, 0x1000003d,
+0x0, 0x10700013, 0x24020003, 0x1062002c,
+0x0, 0x10000037, 0x0, 0x8f820200,
+0x2403feff, 0x431024, 0xaf820200, 0x8f820220,
+0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820220,
+0x3c010002, 0xac209004, 0x3c010002, 0x10000032,
+0xac20900c, 0x8f820200, 0x34420100, 0xaf820200,
+0x8f820220, 0x3c03fffe, 0x3463ffff, 0x431024,
+0xaf820220, 0x24020100, 0x3c010002, 0xac229004,
+0x3c010002, 0x10000024, 0xac20900c, 0x8f820200,
+0x2403feff, 0x431024, 0xaf820200, 0x8f820220,
+0x3c030001, 0x431025, 0xaf820220, 0x3c010002,
+0xac209004, 0x3c010002, 0x10000017, 0xac23900c,
+0x8f820200, 0x34420100, 0xaf820200, 0x8f820220,
+0x3c030001, 0x431025, 0xaf820220, 0x24020100,
+0x3c010002, 0xac229004, 0x3c010002, 0x1000000a,
+0xac23900c, 0x3c040001, 0x24846c80, 0x97a6001a,
+0x97a70018, 0x3c050001, 0x34a5ffff, 0xafa80010,
+0xc002b3b, 0xafa00014, 0x8f820200, 0x34420002,
+0x1000004b, 0xaf820200, 0x128940, 0x3c050002,
+0xb12821, 0x8ca58ff8, 0x3c100002, 0x2118021,
+0x8e108ff0, 0x3c024000, 0xa21024, 0x14400010,
+0x0, 0x3c020001, 0x8c426f1c, 0x14400005,
+0x3c02bfff, 0x8f820200, 0x34420002, 0xaf820200,
+0x3c02bfff, 0x3442ffff, 0xc003daf, 0x2028024,
+0x3c010002, 0x310821, 0x10000031, 0xac308ff0,
+0x3c020001, 0x8c426f1c, 0x10400005, 0x3c020020,
+0x3c020001, 0x8c426e44, 0x10400025, 0x3c020020,
+0xa21024, 0x10400007, 0x34840020, 0x24020100,
+0x3c010002, 0x310821, 0xac229004, 0x10000006,
+0x34840100, 0x3c010002, 0x310821, 0xac209004,
+0x2402feff, 0x822024, 0x3c020080, 0xa21024,
+0x10400007, 0x121940, 0x3c020001, 0x3c010002,
+0x230821, 0xac22900c, 0x10000008, 0xc23025,
+0x121140, 0x3c010002, 0x220821, 0xac20900c,
+0x3c02fffe, 0x3442ffff, 0xc23024, 0xaf840200,
+0xaf860220, 0x8f820220, 0x34420002, 0xaf820220,
+0x121140, 0x3c010002, 0x220821, 0xac308ff0,
+0x8fbf002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
+0x3e00008, 0x27bd0030, 0x0, 0x1821,
+0x308400ff, 0x2405ffdf, 0x2406ffbf, 0x641007,
+0x30420001, 0x10400004, 0x0, 0x8f820044,
+0x10000003, 0x34420040, 0x8f820044, 0x461024,
+0xaf820044, 0x8f820044, 0x34420020, 0xaf820044,
+0x8f820044, 0x451024, 0xaf820044, 0x24630001,
+0x28620008, 0x5440ffee, 0x641007, 0x3e00008,
+0x0, 0x2c820008, 0x1040001b, 0x0,
+0x2405ffdf, 0x2406ffbf, 0x41880, 0x3c020001,
+0x24426e60, 0x621821, 0x24640004, 0x90620000,
+0x10400004, 0x0, 0x8f820044, 0x10000003,
+0x34420040, 0x8f820044, 0x461024, 0xaf820044,
+0x8f820044, 0x34420020, 0xaf820044, 0x8f820044,
+0x451024, 0xaf820044, 0x24630001, 0x64102b,
+0x1440ffee, 0x0, 0x3e00008, 0x0,
+0x0, 0x0, 0x0, 0x8f8400c4,
+0x8f8600e0, 0x8f8700e4, 0x2402fff8, 0xc22824,
+0x10e5001a, 0x27623ff8, 0x14e20002, 0x24e80008,
+0x27683000, 0x55050004, 0x8d0a0000, 0x30c20004,
+0x14400012, 0x805021, 0x8ce90000, 0x8f42013c,
+0x1494823, 0x49182b, 0x94eb0006, 0x10600002,
+0x25630050, 0x494821, 0x123182b, 0x50400003,
+0x8f4201fc, 0x3e00008, 0xe01021, 0xaf8800e8,
+0x24420001, 0xaf4201fc, 0xaf8800e4, 0x3e00008,
+0x1021, 0x3e00008, 0x0, 0x8f8300e4,
+0x27623ff8, 0x10620004, 0x24620008, 0xaf8200e8,
+0x3e00008, 0xaf8200e4, 0x27623000, 0xaf8200e8,
+0x3e00008, 0xaf8200e4, 0x3e00008, 0x0,
+0x0, 0x0, 0x0, 0x8f880120,
+0x27624fe0, 0x8f830128, 0x15020002, 0x25090020,
+0x27694800, 0x11230012, 0x8fa20010, 0xad040000,
+0xad050004, 0xad060008, 0xa507000e, 0x8fa30014,
+0xad020018, 0x8fa20018, 0xad03001c, 0x25030016,
+0xad020010, 0xad030014, 0xaf890120, 0x8f4300fc,
+0x24020001, 0x2463ffff, 0x3e00008, 0xaf4300fc,
+0x8f430324, 0x1021, 0x24630001, 0x3e00008,
+0xaf430324, 0x3e00008, 0x0, 0x8f880100,
+0x276247e0, 0x8f830108, 0x15020002, 0x25090020,
+0x27694000, 0x1123000f, 0x8fa20010, 0xad040000,
+0xad050004, 0xad060008, 0xa507000e, 0x8fa30014,
+0xad020018, 0x8fa20018, 0xad03001c, 0x25030016,
+0xad020010, 0xad030014, 0xaf890100, 0x3e00008,
+0x24020001, 0x8f430328, 0x1021, 0x24630001,
+0x3e00008, 0xaf430328, 0x3e00008, 0x0,
+0x0, 0x0, 0x0, 0x0 };
+static u32 tigon2FwRodata[(MAX_RODATA_LEN/4) + 1] __initdata = {
+0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f6677, 0x6d61696e, 0x2e632c76, 0x20312e31,
+0x2e322e34, 0x35203139, 0x39392f30, 0x312f3234,
+0x2030303a, 0x31303a35, 0x35207368, 0x75616e67,
+0x20457870, 0x20240000, 0x65767452, 0x6e674600,
+0x51657674, 0x46000000, 0x51657674, 0x505f4600,
+0x4d657674, 0x526e6746, 0x0, 0x4d516576,
+0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
+0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
+0x51725072, 0x6f644600, 0x6261644d, 0x656d537a,
+0x0, 0x68775665, 0x72000000, 0x62616448,
+0x77566572, 0x0, 0x2a2a4441, 0x574e5f41,
+0x0, 0x74785278, 0x4266537a, 0x0,
+0x62664174, 0x6e4d726b, 0x0, 0x7265645a,
+0x6f6e6531, 0x0, 0x70636943, 0x6f6e6600,
+0x67656e43, 0x6f6e6600, 0x2a646d61, 0x5244666c,
+0x0, 0x2a50414e, 0x49432a00, 0x2e2e2f2e,
+0x2e2f2e2e, 0x2f2e2e2f, 0x2e2e2f73, 0x72632f6e,
+0x69632f66, 0x77322f63, 0x6f6d6d6f, 0x6e2f6677,
+0x6d61696e, 0x2e630000, 0x72636246, 0x6c616773,
+0x0, 0x62616452, 0x78526362, 0x0,
+0x676c6f62, 0x466c6773, 0x0, 0x2b5f6469,
+0x73705f6c, 0x6f6f7000, 0x2b65765f, 0x68616e64,
+0x6c657200, 0x63616e74, 0x31446d61, 0x0,
+0x2b715f64, 0x6d615f74, 0x6f5f6e69, 0x635f636b,
+0x73756d00, 0x2b685f73, 0x656e645f, 0x64617461,
+0x5f726561, 0x64795f63, 0x6b73756d, 0x0,
+0x2b685f64, 0x6d615f72, 0x645f6173, 0x73697374,
+0x5f636b73, 0x756d0000, 0x74436b73, 0x6d4f6e00,
+0x2b715f64, 0x6d615f74, 0x6f5f6e69, 0x63000000,
+0x2b685f73, 0x656e645f, 0x64617461, 0x5f726561,
+0x64790000, 0x2b685f64, 0x6d615f72, 0x645f6173,
+0x73697374, 0x0, 0x74436b73, 0x6d4f6666,
+0x0, 0x2b685f73, 0x656e645f, 0x62645f72,
+0x65616479, 0x0, 0x68737453, 0x52696e67,
+0x0, 0x62616453, 0x52696e67, 0x0,
+0x6e696353, 0x52696e67, 0x0, 0x77446d61,
+0x416c6c41, 0x0, 0x2b715f64, 0x6d615f74,
+0x6f5f686f, 0x73745f63, 0x6b73756d, 0x0,
+0x2b685f6d, 0x61635f72, 0x785f636f, 0x6d705f63,
+0x6b73756d, 0x0, 0x2b685f64, 0x6d615f77,
+0x725f6173, 0x73697374, 0x5f636b73, 0x756d0000,
+0x72436b73, 0x6d4f6e00, 0x2b715f64, 0x6d615f74,
+0x6f5f686f, 0x73740000, 0x2b685f6d, 0x61635f72,
+0x785f636f, 0x6d700000, 0x2b685f64, 0x6d615f77,
+0x725f6173, 0x73697374, 0x0, 0x72436b73,
+0x6d4f6666, 0x0, 0x2b685f72, 0x6563765f,
+0x62645f72, 0x65616479, 0x0, 0x2b685f72,
+0x6563765f, 0x6a756d62, 0x6f5f6264, 0x5f726561,
+0x64790000, 0x2b685f72, 0x6563765f, 0x6d696e69,
+0x5f62645f, 0x72656164, 0x79000000, 0x2b6d685f,
+0x636f6d6d, 0x616e6400, 0x2b685f74, 0x696d6572,
+0x0, 0x2b685f64, 0x6f5f7570, 0x64617465,
+0x5f74785f, 0x636f6e73, 0x0, 0x2b685f64,
+0x6f5f7570, 0x64617465, 0x5f72785f, 0x70726f64,
+0x0, 0x2b636b73, 0x756d3136, 0x0,
+0x2b706565, 0x6b5f6d61, 0x635f7278, 0x5f776100,
+0x2b706565, 0x6b5f6d61, 0x635f7278, 0x0,
+0x2b646571, 0x5f6d6163, 0x5f727800, 0x2b685f6d,
+0x61635f72, 0x785f6174, 0x746e0000, 0x62616452,
+0x6574537a, 0x0, 0x72784264, 0x4266537a,
+0x0, 0x2b6e756c, 0x6c5f6861, 0x6e646c65,
+0x72000000, 0x66774f70, 0x4661696c, 0x0,
+0x2b685f75, 0x70646174, 0x655f6c65, 0x64340000,
+0x2b685f75, 0x70646174, 0x655f6c65, 0x64360000,
+0x2b685f75, 0x70646174, 0x655f6c65, 0x64320000,
+0x696e7453, 0x74617465, 0x0, 0x2a2a696e,
+0x69744370, 0x0, 0x23736372, 0x65616d00,
+0x69537461, 0x636b4572, 0x0, 0x70726f62,
+0x654d656d, 0x0, 0x2a2a4441, 0x574e5f42,
+0x0, 0x2b73775f, 0x646d615f, 0x61737369,
+0x73745f70, 0x6c75735f, 0x74696d65, 0x72000000,
+0x2b267072, 0x656c6f61, 0x645f7772, 0x5f646573,
+0x63720000, 0x2b267072, 0x656c6f61, 0x645f7264,
+0x5f646573, 0x63720000, 0x2b685f68, 0x665f7469,
+0x6d657200, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f7469, 0x6d65722e, 0x632c7620, 0x312e312e,
+0x322e3335, 0x20313939, 0x392f3031, 0x2f323720,
+0x31393a30, 0x393a3530, 0x20686179, 0x65732045,
+0x78702024, 0x0, 0x65767452, 0x6e674600,
+0x51657674, 0x46000000, 0x51657674, 0x505f4600,
+0x4d657674, 0x526e6746, 0x0, 0x4d516576,
+0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
+0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
+0x51725072, 0x6f644600, 0x542d446d, 0x61526432,
+0x0, 0x542d446d, 0x61526431, 0x0,
+0x542d446d, 0x61526442, 0x0, 0x542d446d,
+0x61577232, 0x0, 0x542d446d, 0x61577231,
+0x0, 0x542d446d, 0x61577242, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f636f, 0x6d6d616e, 0x642e632c, 0x7620312e,
+0x312e322e, 0x32382031, 0x3939392f, 0x30312f32,
+0x30203139, 0x3a34393a, 0x34392073, 0x6875616e,
+0x67204578, 0x70202400, 0x65767452, 0x6e674600,
+0x51657674, 0x46000000, 0x51657674, 0x505f4600,
+0x4d657674, 0x526e6746, 0x0, 0x4d516576,
+0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
+0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
+0x51725072, 0x6f644600, 0x3f48636d, 0x644d6278,
+0x0, 0x3f636d64, 0x48737453, 0x0,
+0x3f636d64, 0x4d634d64, 0x0, 0x3f636d64,
+0x50726f6d, 0x0, 0x3f636d64, 0x4c696e6b,
+0x0, 0x3f636d64, 0x45727200, 0x86ac,
+0x8e5c, 0x8e5c, 0x8de4, 0x8b78,
+0x8e30, 0x8e5c, 0x8790, 0x8800,
+0x8990, 0x8a68, 0x8a34, 0x8e5c,
+0x8870, 0x8b24, 0x8e5c, 0x8b34,
+0x87b4, 0x8824, 0x0, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f6d63, 0x6173742e, 0x632c7620, 0x312e312e,
+0x322e3820, 0x31393938, 0x2f31322f, 0x30382030,
+0x323a3336, 0x3a333620, 0x73687561, 0x6e672045,
+0x78702024, 0x0, 0x65767452, 0x6e674600,
+0x51657674, 0x46000000, 0x51657674, 0x505f4600,
+0x4d657674, 0x526e6746, 0x0, 0x4d516576,
+0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
+0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
+0x51725072, 0x6f644600, 0x6164644d, 0x63447570,
+0x0, 0x6164644d, 0x6346756c, 0x0,
+0x64656c4d, 0x634e6f45, 0x0, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f646d, 0x612e632c, 0x7620312e, 0x312e322e,
+0x32342031, 0x3939382f, 0x31322f32, 0x31203030,
+0x3a33333a, 0x30392073, 0x6875616e, 0x67204578,
+0x70202400, 0x65767452, 0x6e674600, 0x51657674,
+0x46000000, 0x51657674, 0x505f4600, 0x4d657674,
+0x526e6746, 0x0, 0x4d516576, 0x74460000,
+0x4d516576, 0x505f4600, 0x5173436f, 0x6e495f46,
+0x0, 0x5173436f, 0x6e734600, 0x51725072,
+0x6f644600, 0x7377446d, 0x614f6666, 0x0,
+0x31446d61, 0x4f6e0000, 0x7377446d, 0x614f6e00,
+0x2372446d, 0x6141544e, 0x0, 0x72446d61,
+0x41544e30, 0x0, 0x72446d61, 0x41544e31,
+0x0, 0x72446d61, 0x34476200, 0x2a50414e,
+0x49432a00, 0x2e2e2f2e, 0x2e2f2e2e, 0x2f2e2e2f,
+0x2e2e2f73, 0x72632f6e, 0x69632f66, 0x77322f63,
+0x6f6d6d6f, 0x6e2f646d, 0x612e6300, 0x2377446d,
+0x6141544e, 0x0, 0x77446d61, 0x41544e30,
+0x0, 0x77446d61, 0x41544e31, 0x0,
+0x77446d61, 0x34476200, 0x0, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f7472, 0x6163652e, 0x632c7620, 0x312e312e,
+0x322e3520, 0x31393938, 0x2f30392f, 0x33302031,
+0x383a3530, 0x3a323820, 0x73687561, 0x6e672045,
+0x78702024, 0x0, 0x0, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f6461, 0x74612e63, 0x2c762031, 0x2e312e32,
+0x2e313220, 0x31393939, 0x2f30312f, 0x32302031,
+0x393a3439, 0x3a353120, 0x73687561, 0x6e672045,
+0x78702024, 0x0, 0x46575f56, 0x45525349,
+0x4f4e3a20, 0x23312046, 0x72692041, 0x70722037,
+0x2031373a, 0x35373a35, 0x32205044, 0x54203230,
+0x30300000, 0x46575f43, 0x4f4d5049, 0x4c455f54,
+0x494d453a, 0x2031373a, 0x35373a35, 0x32000000,
+0x46575f43, 0x4f4d5049, 0x4c455f42, 0x593a2064,
+0x65767263, 0x73000000, 0x46575f43, 0x4f4d5049,
+0x4c455f48, 0x4f53543a, 0x20636f6d, 0x70757465,
+0x0, 0x46575f43, 0x4f4d5049, 0x4c455f44,
+0x4f4d4149, 0x4e3a2065, 0x6e672e61, 0x6374656f,
+0x6e2e636f, 0x6d000000, 0x46575f43, 0x4f4d5049,
+0x4c45523a, 0x20676363, 0x20766572, 0x73696f6e,
+0x20322e37, 0x2e320000, 0x0, 0x12041100,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f6d65, 0x6d2e632c, 0x7620312e, 0x312e322e,
+0x35203139, 0x39382f30, 0x392f3330, 0x2031383a,
+0x35303a30, 0x38207368, 0x75616e67, 0x20457870,
+0x20240000, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f7365, 0x6e642e63, 0x2c762031, 0x2e312e32,
+0x2e343420, 0x31393938, 0x2f31322f, 0x32312030,
+0x303a3333, 0x3a313820, 0x73687561, 0x6e672045,
+0x78702024, 0x0, 0x65767452, 0x6e674600,
+0x51657674, 0x46000000, 0x51657674, 0x505f4600,
+0x4d657674, 0x526e6746, 0x0, 0x4d516576,
+0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
+0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
+0x51725072, 0x6f644600, 0x69736e74, 0x54637055,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f7265, 0x63762e63, 0x2c762031, 0x2e312e32,
+0x2e353320, 0x31393939, 0x2f30312f, 0x31362030,
+0x323a3535, 0x3a343320, 0x73687561, 0x6e672045,
+0x78702024, 0x0, 0x65767452, 0x6e674600,
+0x51657674, 0x46000000, 0x51657674, 0x505f4600,
+0x4d657674, 0x526e6746, 0x0, 0x4d516576,
+0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
+0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
+0x51725072, 0x6f644600, 0x724d6163, 0x43686b30,
+0x0, 0x72784672, 0x6d324c67, 0x0,
+0x72784e6f, 0x53744264, 0x0, 0x72784e6f,
+0x4d694264, 0x0, 0x72784e6f, 0x4a6d4264,
+0x0, 0x7278436b, 0x446d6146, 0x0,
+0x72785144, 0x6d457846, 0x0, 0x72785144,
+0x6d614600, 0x72785144, 0x4c426446, 0x0,
+0x72785144, 0x6d426446, 0x0, 0x72784372,
+0x63506164, 0x0, 0x72536d51, 0x446d6146,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f6d61, 0x632e632c, 0x7620312e, 0x312e322e,
+0x32322031, 0x3939382f, 0x31322f30, 0x38203032,
+0x3a33363a, 0x33302073, 0x6875616e, 0x67204578,
+0x70202400, 0x65767452, 0x6e674600, 0x51657674,
+0x46000000, 0x51657674, 0x505f4600, 0x4d657674,
+0x526e6746, 0x0, 0x4d516576, 0x74460000,
+0x4d516576, 0x505f4600, 0x5173436f, 0x6e495f46,
+0x0, 0x5173436f, 0x6e734600, 0x51725072,
+0x6f644600, 0x6d616354, 0x68726573, 0x0,
+0x23744d61, 0x6341544e, 0x0, 0x23724d61,
+0x6341544e, 0x0, 0x72656d41, 0x73737274,
+0x0, 0x6c696e6b, 0x444f574e, 0x0,
+0x6c696e6b, 0x55500000, 0x0, 0x0,
+0x0, 0x24486561, 0x6465723a, 0x202f7072,
+0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
+0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
+0x6e2f636b, 0x73756d2e, 0x632c7620, 0x312e312e,
+0x322e3920, 0x31393939, 0x2f30312f, 0x31342030,
+0x303a3033, 0x3a343820, 0x73687561, 0x6e672045,
+0x78702024, 0x0, 0x65767452, 0x6e674600,
+0x51657674, 0x46000000, 0x51657674, 0x505f4600,
+0x4d657674, 0x526e6746, 0x0, 0x4d516576,
+0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
+0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
+0x51725072, 0x6f644600, 0x0, 0x0,
+0x0, 0x50726f62, 0x65506879, 0x0,
+0x6c6e6b41, 0x53535254, 0x0, 0x109a4,
+0x10a1c, 0x10a50, 0x10a7c, 0x11050,
+0x10aa8, 0x10b10, 0x111fc, 0x10dc0,
+0x10c68, 0x10c80, 0x10cc4, 0x10cec,
+0x10d0c, 0x10d34, 0x111fc, 0x10dc0,
+0x10df8, 0x10e10, 0x10e40, 0x10e68,
+0x10e88, 0x10eb0, 0x0, 0x10fdc,
+0x11008, 0x1102c, 0x111fc, 0x11050,
+0x11078, 0x11108, 0x0, 0x0,
+0x0, 0x1186c, 0x1193c, 0x11a14,
+0x11ae4, 0x11b40, 0x11c1c, 0x11c44,
+0x11d20, 0x11d48, 0x11ef0, 0x11f18,
+0x120c0, 0x122b8, 0x1254c, 0x12460,
+0x1254c, 0x12578, 0x120e8, 0x12290,
+0x7273745f, 0x676d6969, 0x0, 0x12608,
+0x12640, 0x12728, 0x13374, 0x133b4,
+0x133cc, 0x7365746c, 0x6f6f7000, 0x0,
+0x0, 0x13bbc, 0x13bfc, 0x13c8c,
+0x13cd0, 0x13d34, 0x13dc0, 0x13df4,
+0x13e7c, 0x13f14, 0x13fe4, 0x14024,
+0x140a8, 0x140cc, 0x141dc, 0x646f4261,
+0x73655067, 0x0, 0x0, 0x0,
+0x0, 0x73746d61, 0x634c4e4b, 0x0,
+0x6765746d, 0x636c6e6b, 0x0, 0x14ed8,
+0x14ed8, 0x14b8c, 0x14bd8, 0x14c24,
+0x14ed8, 0x7365746d, 0x61636163, 0x74000000,
+0x0, 0x0 };
+static u32 tigon2FwData[(MAX_DATA_LEN/4) + 1] __initdata = {
+0x1,
+0x1, 0x1, 0xc001fc, 0x3ffc,
+0xc00000, 0x416c7465, 0x6f6e2041, 0x63654e49,
+0x43205600, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x416c7465,
+0x6f6e2041, 0x63654e49, 0x43205600, 0x42424242,
+0x0, 0x0, 0x0, 0x1ffffc,
+0x1fff7c, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x60cf00,
+0x60, 0xcf000000, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x3, 0x0,
+0x1, 0x0, 0x0, 0x0,
+0x1, 0x0, 0x1, 0x0,
+0x0, 0x0, 0x0, 0x1,
+0x1, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x1000000, 0x21000000,
+0x12000140, 0x0, 0x0, 0x20000000,
+0x120000a0, 0x0, 0x12000060, 0x12000180,
+0x120001e0, 0x0, 0x0, 0x0,
+0x1, 0x0, 0x0, 0x0,
+0x0, 0x0, 0x0, 0x2,
+0x0, 0x0, 0x30001, 0x1,
+0x30201, 0x0, 0x0, 0x1010101,
+0x1010100, 0x10100, 0x1010001, 0x10001,
+0x1000101, 0x101, 0x0, 0x0 };
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
new file mode 100755
index 000000000000..f2e937abf7b4
--- /dev/null
+++ b/drivers/net/amd8111e.c
@@ -0,0 +1,2167 @@
+
+/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2004 Advanced Micro Devices
+ *
+ *
+ * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
+ * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
+ * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.[ pcnet32.c ]
+ * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+
+Module Name:
+
+ amd8111e.c
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller Driver.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+ 1. Dynamic interrupt coalescing.
+ 2. Removed prev_stats.
+ 3. MII support.
+ 4. Dynamic IPG support
+ 3.0.2 05/29/2003
+ 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
+ 2. Bug fix: Fixed VLAN support failure.
+ 3. Bug fix: Fixed receive interrupt coalescing bug.
+ 4. Dynamic IPG support is disabled by default.
+ 3.0.3 06/05/2003
+ 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
+ 3.0.4 12/09/2003
+ 1. Added set_mac_address routine for bonding driver support.
+ 2. Tested the driver for bonding support
+ 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ indicated to the h/w.
+ 4. Modified amd8111e_rx() routine to receive all the received packets
+ in the first interrupt.
+ 5. Bug fix: Corrected rx_errors reported in get_stats() function.
+ 3.0.5 03/22/2004
+ 1. Added NAPI support
+
+*/
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/ctype.h>
+#include <linux/crc32.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define AMD8111E_VLAN_TAG_USED 1
+#else
+#define AMD8111E_VLAN_TAG_USED 0
+#endif
+
+#include "amd8111e.h"
+#define MODULE_NAME "amd8111e"
+#define MODULE_VERS "3.0.5"
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.3");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
+module_param_array(speed_duplex, int, NULL, 0);
+MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
+module_param_array(coalesce, bool, NULL, 0);
+MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
+module_param_array(dynamic_ipg, bool, NULL, 0);
+MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
+
+static struct pci_device_id amd8111e_pci_tbl[] = {
+
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+
+};
+/*
+This function will read the PHY registers.
+*/
+static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
+{
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+ unsigned int repeat= REPEAT_CNT;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_read;
+
+ *val = reg_val & 0xffff;
+ return 0;
+err_phy_read:
+ *val = 0;
+ return -EINVAL;
+
+}
+
+/*
+This function will write into PHY registers.
+*/
+static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
+{
+ unsigned int repeat = REPEAT_CNT
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
+
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write the data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_write;
+
+ return 0;
+
+err_phy_write:
+ return -EINVAL;
+
+}
+/*
+This is the mii register read function provided to the mii interface.
+*/
+static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ unsigned int reg_val;
+
+ amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
+ return reg_val;
+
+}
+
+/*
+This is the mii register write function provided to the mii interface.
+*/
+static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+
+ amd8111e_write_phy(lp, phy_id, reg_num, val);
+}
+
+/*
+This function will set PHY speed. During initialization sets the original speed to 100 full.
+*/
+static void amd8111e_set_ext_phy(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 bmcr,advert,tmp;
+
+ /* Determine mii register values to set the speed */
+ advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ switch (lp->ext_phy_option){
+
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
+ ADVERTISE_100HALF|ADVERTISE_100FULL) ;
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
+ }
+
+ if(advert != tmp)
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
+ /* Restart auto negotiation */
+ bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
+
+}
+
+/*
+This function will unmap skb->data space and will free
+all transmit and receive skbuffs.
+*/
+static int amd8111e_free_skbs(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct sk_buff* rx_skbuff;
+ int i;
+
+ /* Freeing transmit skbs */
+ for(i = 0; i < NUM_TX_BUFFERS; i++){
+ if(lp->tx_skbuff[i]){
+ pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
+ dev_kfree_skb (lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+ }
+ /* Freeing previously allocated receive buffers */
+ for (i = 0; i < NUM_RX_BUFFERS; i++){
+ rx_skbuff = lp->rx_skbuff[i];
+ if(rx_skbuff != NULL){
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
+ lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+This will set the receive buffer length corresponding to the mtu size of networkinterface.
+*/
+static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ unsigned int mtu = dev->mtu;
+
+ if (mtu > ETH_DATA_LEN){
+ /* MTU + ethernet header + FCS
+ + optional VLAN tag + skb reserve space 2 */
+
+ lp->rx_buff_len = mtu + ETH_HLEN + 10;
+ lp->options |= OPTION_JUMBO_ENABLE;
+ } else{
+ lp->rx_buff_len = PKT_BUFF_SZ;
+ lp->options &= ~OPTION_JUMBO_ENABLE;
+ }
+}
+
+/*
+This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
+ */
+static int amd8111e_init_ring(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ lp->rx_idx = lp->tx_idx = 0;
+ lp->tx_complete_idx = 0;
+ lp->tx_ring_idx = 0;
+
+
+ if(lp->opened)
+ /* Free previously allocated transmit and receive skbs */
+ amd8111e_free_skbs(dev);
+
+ else{
+ /* allocate the tx and rx descriptors */
+ if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
+ &lp->tx_ring_dma_addr)) == NULL)
+
+ goto err_no_mem;
+
+ if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
+ &lp->rx_ring_dma_addr)) == NULL)
+
+ goto err_free_tx_ring;
+
+ }
+ /* Set new receive buff size */
+ amd8111e_set_rx_buff_len(dev);
+
+ /* Allocating receive skbs */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+
+ if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
+ /* Release previos allocated skbs */
+ for(--i; i >= 0 ;i--)
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ goto err_free_rx_ring;
+ }
+ skb_reserve(lp->rx_skbuff[i],2);
+ }
+ /* Initilaizing receive descriptors */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+ lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
+ lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+
+ lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
+ }
+
+ /* Initializing transmit descriptors */
+ for (i = 0; i < NUM_TX_RING_DR; i++) {
+ lp->tx_ring[i].buff_phy_addr = 0;
+ lp->tx_ring[i].tx_flags = 0;
+ lp->tx_ring[i].buff_count = 0;
+ }
+
+ return 0;
+
+err_free_rx_ring:
+
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
+ lp->rx_ring_dma_addr);
+
+err_free_tx_ring:
+
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
+ lp->tx_ring_dma_addr);
+
+err_no_mem:
+ return -ENOMEM;
+}
+/* This function will set the interrupt coalescing according to the input arguments */
+static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
+{
+ unsigned int timeout;
+ unsigned int event_count;
+
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+
+
+ switch(cmod)
+ {
+ case RX_INTR_COAL :
+ timeout = coal_conf->rx_timeout;
+ event_count = coal_conf->rx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_A);
+ break;
+
+ case TX_INTR_COAL :
+ timeout = coal_conf->tx_timeout;
+ event_count = coal_conf->tx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN,mmio+INTEN0);
+ writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_B);
+ break;
+
+ case DISABLE_COAL:
+ writel(0,mmio+STVAL);
+ writel(STINTEN, mmio+INTEN0);
+ writel(0, mmio +DLY_INT_B);
+ writel(0, mmio+DLY_INT_A);
+ break;
+ case ENABLE_COAL:
+ /* Start the timer */
+ writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ break;
+ default:
+ break;
+
+ }
+ return 0;
+
+}
+
+/*
+This function initializes the device registers and starts the device.
+*/
+static int amd8111e_restart(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ int i,reg_val;
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ if(amd8111e_init_ring(dev))
+ return -ENOMEM;
+
+ /* enable the port manager and set auto negotiation always */
+ writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
+ writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
+
+ amd8111e_set_ext_phy(dev);
+
+ /* set control registers */
+ reg_val = readl(mmio + CTRL1);
+ reg_val &= ~XMTSP_MASK;
+ writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
+
+ /* enable interrupt */
+ writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
+ APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
+ SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
+
+ writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
+
+ /* initialize tx and rx ring base addresses */
+ writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
+ writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
+
+ writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
+ writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
+
+ /* set default IPG to 96 */
+ writew((u32)DEFAULT_IPG,mmio+IPG);
+ writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
+
+ if(lp->options & OPTION_JUMBO_ENABLE){
+ writel((u32)VAL2|JUMBO, mmio + CMD3);
+ /* Reset REX_UFLO */
+ writel( REX_UFLO, mmio + CMD2);
+ /* Should not set REX_UFLO for jumbo frames */
+ writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
+ }else{
+ writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
+ writel((u32)JUMBO, mmio + CMD3);
+ }
+
+#if AMD8111E_VLAN_TAG_USED
+ writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
+#endif
+ writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
+
+ /* Setting the MAC address to the device */
+ for(i = 0; i < ETH_ADDR_LEN; i++)
+ writeb( dev->dev_addr[i], mmio + PADR + i );
+
+ /* Enable interrupt coalesce */
+ if(lp->options & OPTION_INTR_COAL_ENABLE){
+ printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
+ dev->name);
+ amd8111e_set_coalesce(dev,ENABLE_COAL);
+ }
+
+ /* set RUN bit to start the chip */
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ writel(VAL0 | INTREN | RUN, mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(mmio+CMD0);
+ return 0;
+}
+/*
+This function clears necessary the device registers.
+*/
+static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
+{
+ unsigned int reg_val;
+ unsigned int logic_filter[2] ={0,};
+ void __iomem *mmio = lp->mmio;
+
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
+ writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
+
+ /* Clear RCV_RING_BASE_ADDR */
+ writel(0, mmio + RCV_RING_BASE_ADDR0);
+
+ /* Clear XMT_RING_BASE_ADDR */
+ writel(0, mmio + XMT_RING_BASE_ADDR0);
+ writel(0, mmio + XMT_RING_BASE_ADDR1);
+ writel(0, mmio + XMT_RING_BASE_ADDR2);
+ writel(0, mmio + XMT_RING_BASE_ADDR3);
+
+ /* Clear CMD0 */
+ writel(CMD0_CLEAR,mmio + CMD0);
+
+ /* Clear CMD2 */
+ writel(CMD2_CLEAR, mmio +CMD2);
+
+ /* Clear CMD7 */
+ writel(CMD7_CLEAR , mmio + CMD7);
+
+ /* Clear DLY_INT_A and DLY_INT_B */
+ writel(0x0, mmio + DLY_INT_A);
+ writel(0x0, mmio + DLY_INT_B);
+
+ /* Clear FLOW_CONTROL */
+ writel(0x0, mmio + FLOW_CONTROL);
+
+ /* Clear INT0 write 1 to clear register */
+ reg_val = readl(mmio + INT0);
+ writel(reg_val, mmio + INT0);
+
+ /* Clear STVAL */
+ writel(0x0, mmio + STVAL);
+
+ /* Clear INTEN0 */
+ writel( INTEN0_CLEAR, mmio + INTEN0);
+
+ /* Clear LADRF */
+ writel(0x0 , mmio + LADRF);
+
+ /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
+ writel( 0x80010,mmio + SRAM_SIZE);
+
+ /* Clear RCV_RING0_LEN */
+ writel(0x0, mmio + RCV_RING_LEN0);
+
+ /* Clear XMT_RING0/1/2/3_LEN */
+ writel(0x0, mmio + XMT_RING_LEN0);
+ writel(0x0, mmio + XMT_RING_LEN1);
+ writel(0x0, mmio + XMT_RING_LEN2);
+ writel(0x0, mmio + XMT_RING_LEN3);
+
+ /* Clear XMT_RING_LIMIT */
+ writel(0x0, mmio + XMT_RING_LIMIT);
+
+ /* Clear MIB */
+ writew(MIB_CLEAR, mmio + MIB_ADDR);
+
+ /* Clear LARF */
+ amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
+
+ /* SRAM_SIZE register */
+ reg_val = readl(mmio + SRAM_SIZE);
+
+ if(lp->options & OPTION_JUMBO_ENABLE)
+ writel( VAL2|JUMBO, mmio + CMD3);
+#if AMD8111E_VLAN_TAG_USED
+ writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
+#endif
+ /* Set default value to CTRL1 Register */
+ writel(CTRL1_DEFAULT, mmio + CTRL1);
+
+ /* To avoid PCI posting bug */
+ readl(mmio + CMD2);
+
+}
+
+/*
+This function disables the interrupt and clears all the pending
+interrupts in INT0
+ */
+static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
+{
+ u32 intr0;
+
+ /* Disable interrupt */
+ writel(INTREN, lp->mmio + CMD0);
+
+ /* Clear INT0 */
+ intr0 = readl(lp->mmio + INT0);
+ writel(intr0, lp->mmio + INT0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + INT0);
+
+}
+
+/*
+This function stops the chip.
+*/
+static void amd8111e_stop_chip(struct amd8111e_priv* lp)
+{
+ writel(RUN, lp->mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + CMD0);
+}
+
+/*
+This function frees the transmiter and receiver descriptor rings.
+*/
+static void amd8111e_free_ring(struct amd8111e_priv* lp)
+{
+
+ /* Free transmit and receive skbs */
+ amd8111e_free_skbs(lp->amd8111e_net_dev);
+
+ /* Free transmit and receive descriptor rings */
+ if(lp->rx_ring){
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+
+ if(lp->tx_ring){
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+
+ lp->tx_ring = NULL;
+ }
+
+}
+#if AMD8111E_VLAN_TAG_USED
+/*
+This is the receive indication function for packets with vlan tag.
+*/
+static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
+{
+#ifdef CONFIG_AMD8111E_NAPI
+ return vlan_hwaccel_receive_skb(skb, lp->vlgrp,vlan_tag);
+#else
+ return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
+#endif /* CONFIG_AMD8111E_NAPI */
+}
+#endif
+
+/*
+This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
+*/
+static int amd8111e_tx(struct net_device *dev)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ int status;
+ /* Complete all the transmit packet */
+ while (lp->tx_complete_idx != lp->tx_idx){
+ tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
+
+ if(status & OWN_BIT)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[tx_index].buff_phy_addr = 0;
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[tx_index]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
+ lp->tx_skbuff[tx_index]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
+ lp->tx_skbuff[tx_index] = NULL;
+ lp->tx_dma_addr[tx_index] = 0;
+ }
+ lp->tx_complete_idx++;
+ /*COAL update tx coalescing parameters */
+ lp->coal_conf.tx_packets++;
+ lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count;
+
+ if (netif_queue_stopped(dev) &&
+ lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
+ /* The ring is no longer full, clear tbusy. */
+ /* lp->tx_full = 0; */
+ netif_wake_queue (dev);
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_AMD8111E_NAPI
+/* This function handles the driver receive operation in polling mode */
+static int amd8111e_rx_poll(struct net_device *dev, int * budget)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
+ void __iomem *mmio = lp->mmio;
+ struct sk_buff *skb,*new_skb;
+ int min_pkt_len, status;
+ unsigned int intr0;
+ int num_rx_pkt = 0;
+ /*int max_rx_pkt = NUM_RX_BUFFERS;*/
+ short pkt_len;
+#if AMD8111E_VLAN_TAG_USED
+ short vtag;
+#endif
+ int rx_pkt_limit = dev->quota;
+
+ do{
+ /* process receive packets until we use the quota*/
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while(1) {
+ status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+ if (status & OWN_BIT)
+ break;
+
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with
+ * full-sized * buffers it's possible for a
+ * jabber packet to use two buffers, with only
+ * the last correctly noting the error.
+ */
+
+ if(status & ERR_BIT) {
+ /* reseting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ /* check for STP and ENP */
+ if(!((status & STP_BIT) && (status & ENP_BIT))){
+ /* reseting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+
+#if AMD8111E_VLAN_TAG_USED
+ vtag = status & TT_MASK;
+ /*MAC will strip vlan tag*/
+ if(lp->vlgrp != NULL && vtag !=0)
+ min_pkt_len =MIN_PKT_LEN - 4;
+ else
+#endif
+ min_pkt_len =MIN_PKT_LEN;
+
+ if (pkt_len < min_pkt_len) {
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+ if(--rx_pkt_limit < 0)
+ goto rx_not_empty;
+ if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
+ /* if allocation fail,
+ ignore that pkt and go to next one */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+
+ skb_reserve(new_skb, 2);
+ skb = lp->rx_skbuff[rx_index];
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
+ lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ skb->dev = dev;
+ lp->rx_skbuff[rx_index] = new_skb;
+ new_skb->dev = dev;
+ lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+ new_skb->data,
+ lp->rx_buff_len-2,
+ PCI_DMA_FROMDEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
+ amd8111e_vlan_rx(lp, skb,
+ le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info));
+ } else
+#endif
+ netif_receive_skb(skb);
+ /*COAL update rx coalescing parameters*/
+ lp->coal_conf.rx_packets++;
+ lp->coal_conf.rx_bytes += pkt_len;
+ num_rx_pkt++;
+ dev->last_rx = jiffies;
+
+ err_next_pkt:
+ lp->rx_ring[rx_index].buff_phy_addr
+ = cpu_to_le32(lp->rx_dma_addr[rx_index]);
+ lp->rx_ring[rx_index].buff_count =
+ cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+ rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+ }
+ /* Check the interrupt status register for more packets in the
+ mean time. Process them since we have not used up our quota.*/
+
+ intr0 = readl(mmio + INT0);
+ /*Ack receive packets */
+ writel(intr0 & RINT0,mmio + INT0);
+
+ } while(intr0 & RINT0);
+
+ /* Receive descriptor is empty now */
+ dev->quota -= num_rx_pkt;
+ *budget -= num_rx_pkt;
+ netif_rx_complete(dev);
+ /* enable receive interrupt */
+ writel(VAL0|RINTEN0, mmio + INTEN0);
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ return 0;
+rx_not_empty:
+ /* Do not call a netif_rx_complete */
+ dev->quota -= num_rx_pkt;
+ *budget -= num_rx_pkt;
+ return 1;
+
+
+}
+
+#else
+/*
+This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
+*/
+static int amd8111e_rx(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct sk_buff *skb,*new_skb;
+ int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
+ int min_pkt_len, status;
+ int num_rx_pkt = 0;
+ int max_rx_pkt = NUM_RX_BUFFERS;
+ short pkt_len;
+#if AMD8111E_VLAN_TAG_USED
+ short vtag;
+#endif
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while(++num_rx_pkt <= max_rx_pkt){
+ status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+ if(status & OWN_BIT)
+ return 0;
+
+ /* check if err summary bit is set */
+ if(status & ERR_BIT){
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error. */
+ /* reseting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ /* check for STP and ENP */
+ if(!((status & STP_BIT) && (status & ENP_BIT))){
+ /* reseting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+
+#if AMD8111E_VLAN_TAG_USED
+ vtag = status & TT_MASK;
+ /*MAC will strip vlan tag*/
+ if(lp->vlgrp != NULL && vtag !=0)
+ min_pkt_len =MIN_PKT_LEN - 4;
+ else
+#endif
+ min_pkt_len =MIN_PKT_LEN;
+
+ if (pkt_len < min_pkt_len) {
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+ if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
+ /* if allocation fail,
+ ignore that pkt and go to next one */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+
+ skb_reserve(new_skb, 2);
+ skb = lp->rx_skbuff[rx_index];
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
+ lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ skb->dev = dev;
+ lp->rx_skbuff[rx_index] = new_skb;
+ new_skb->dev = dev;
+ lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+ new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
+ amd8111e_vlan_rx(lp, skb,
+ le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info));
+ } else
+#endif
+
+ netif_rx (skb);
+ /*COAL update rx coalescing parameters*/
+ lp->coal_conf.rx_packets++;
+ lp->coal_conf.rx_bytes += pkt_len;
+
+ dev->last_rx = jiffies;
+
+err_next_pkt:
+ lp->rx_ring[rx_index].buff_phy_addr
+ = cpu_to_le32(lp->rx_dma_addr[rx_index]);
+ lp->rx_ring[rx_index].buff_count =
+ cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+ rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_AMD8111E_NAPI */
+/*
+This function will indicate the link status to the kernel.
+*/
+static int amd8111e_link_change(struct net_device* dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int status0,speed;
+
+ /* read the link change */
+ status0 = readl(lp->mmio + STAT0);
+
+ if(status0 & LINK_STATS){
+ if(status0 & AUTONEG_COMPLETE)
+ lp->link_config.autoneg = AUTONEG_ENABLE;
+ else
+ lp->link_config.autoneg = AUTONEG_DISABLE;
+
+ if(status0 & FULL_DPLX)
+ lp->link_config.duplex = DUPLEX_FULL;
+ else
+ lp->link_config.duplex = DUPLEX_HALF;
+ speed = (status0 & SPEED_MASK) >> 7;
+ if(speed == PHY_SPEED_10)
+ lp->link_config.speed = SPEED_10;
+ else if(speed == PHY_SPEED_100)
+ lp->link_config.speed = SPEED_100;
+
+ printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
+ (lp->link_config.speed == SPEED_100) ? "100": "10",
+ (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
+ netif_carrier_on(dev);
+ }
+ else{
+ lp->link_config.speed = SPEED_INVALID;
+ lp->link_config.duplex = DUPLEX_INVALID;
+ lp->link_config.autoneg = AUTONEG_INVALID;
+ printk(KERN_INFO "%s: Link is Down.\n",dev->name);
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+/*
+This function reads the mib counters.
+*/
+static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
+{
+ unsigned int status;
+ unsigned int data;
+ unsigned int repeat = REPEAT_CNT;
+
+ writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
+ do {
+ status = readw(mmio + MIB_ADDR);
+ udelay(2); /* controller takes MAX 2 us to get mib data */
+ }
+ while (--repeat && (status & MIB_CMD_ACTIVE));
+
+ data = readl(mmio + MIB_DATA);
+ return data;
+}
+
+/*
+This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
+*/
+static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned long flags;
+ /* struct net_device_stats *prev_stats = &lp->prev_stats; */
+ struct net_device_stats* new_stats = &lp->stats;
+
+ if(!lp->opened)
+ return &lp->stats;
+ spin_lock_irqsave (&lp->lock, flags);
+
+ /* stats.rx_packets */
+ new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
+ amd8111e_read_mib(mmio, rcv_multicast_pkts)+
+ amd8111e_read_mib(mmio, rcv_unicast_pkts);
+
+ /* stats.tx_packets */
+ new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
+
+ /*stats.rx_bytes */
+ new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
+
+ /* stats.tx_bytes */
+ new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
+
+ /* stats.rx_errors */
+ /* hw errors + errors driver reported */
+ new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_fragments)+
+ amd8111e_read_mib(mmio, rcv_jabbers)+
+ amd8111e_read_mib(mmio, rcv_alignment_errors)+
+ amd8111e_read_mib(mmio, rcv_fcs_errors)+
+ amd8111e_read_mib(mmio, rcv_miss_pkts)+
+ lp->drv_rx_errors;
+
+ /* stats.tx_errors */
+ new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.rx_dropped*/
+ new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_dropped*/
+ new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.multicast*/
+ new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
+
+ /* stats.collisions*/
+ new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
+
+ /* stats.rx_length_errors*/
+ new_stats->rx_length_errors =
+ amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_oversize_pkts);
+
+ /* stats.rx_over_errors*/
+ new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_crc_errors*/
+ new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
+
+ /* stats.rx_frame_errors*/
+ new_stats->rx_frame_errors =
+ amd8111e_read_mib(mmio, rcv_alignment_errors);
+
+ /* stats.rx_fifo_errors */
+ new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_missed_errors */
+ new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_aborted_errors*/
+ new_stats->tx_aborted_errors =
+ amd8111e_read_mib(mmio, xmt_excessive_collision);
+
+ /* stats.tx_carrier_errors*/
+ new_stats->tx_carrier_errors =
+ amd8111e_read_mib(mmio, xmt_loss_carrier);
+
+ /* stats.tx_fifo_errors*/
+ new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.tx_window_errors*/
+ new_stats->tx_window_errors =
+ amd8111e_read_mib(mmio, xmt_late_collision);
+
+ /* Reset the mibs for collecting new statistics */
+ /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ return new_stats;
+}
+/* This function recalculate the interupt coalescing mode on every interrupt
+according to the datarate and the packet rate.
+*/
+static int amd8111e_calc_coalesce(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+ int tx_pkt_rate;
+ int rx_pkt_rate;
+ int tx_data_rate;
+ int rx_data_rate;
+ int rx_pkt_size;
+ int tx_pkt_size;
+
+ tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
+ coal_conf->tx_prev_packets = coal_conf->tx_packets;
+
+ tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
+ coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
+
+ rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
+ coal_conf->rx_prev_packets = coal_conf->rx_packets;
+
+ rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
+ coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
+
+ if(rx_pkt_rate < 800){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0x0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ rx_pkt_size = rx_data_rate/rx_pkt_rate;
+ if (rx_pkt_size < 128){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
+
+ if(coal_conf->rx_coal_type != LOW_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = LOW_COALESCE;
+ }
+ }
+ else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
+
+ if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = MEDIUM_COALESCE;
+ }
+
+ }
+ else if(rx_pkt_size >= 1024){
+ if(coal_conf->rx_coal_type != HIGH_COALESCE){
+ coal_conf->rx_timeout = 2;
+ coal_conf->rx_event_count = 3;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ /* NOW FOR TX INTR COALESC */
+ if(tx_pkt_rate < 800){
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0x0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ tx_pkt_size = tx_data_rate/tx_pkt_rate;
+ if (tx_pkt_size < 128){
+
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
+
+ if(coal_conf->tx_coal_type != LOW_COALESCE){
+ coal_conf->tx_timeout = 1;
+ coal_conf->tx_event_count = 2;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = LOW_COALESCE;
+
+ }
+ }
+ else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
+
+ if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
+ coal_conf->tx_timeout = 2;
+ coal_conf->tx_event_count = 5;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = MEDIUM_COALESCE;
+ }
+
+ }
+ else if(tx_pkt_size >= 1024){
+ if (tx_pkt_size >= 1024){
+ if(coal_conf->tx_coal_type != HIGH_COALESCE){
+ coal_conf->tx_timeout = 4;
+ coal_conf->tx_event_count = 8;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ }
+ return 0;
+
+}
+/*
+This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
+*/
+static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+ struct net_device * dev = (struct net_device *) dev_id;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned int intr0;
+ unsigned int handled = 1;
+
+ if(dev == NULL)
+ return IRQ_NONE;
+
+ if (regs) spin_lock (&lp->lock);
+ /* disabling interrupt */
+ writel(INTREN, mmio + CMD0);
+
+ /* Read interrupt status */
+ intr0 = readl(mmio + INT0);
+
+ /* Process all the INT event until INTR bit is clear. */
+
+ if (!(intr0 & INTR)){
+ handled = 0;
+ goto err_no_interrupt;
+ }
+
+ /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
+ writel(intr0, mmio + INT0);
+
+ /* Check if Receive Interrupt has occurred. */
+#if CONFIG_AMD8111E_NAPI
+ if(intr0 & RINT0){
+ if(netif_rx_schedule_prep(dev)){
+ /* Disable receive interupts */
+ writel(RINTEN0, mmio + INTEN0);
+ /* Schedule a polling routine */
+ __netif_rx_schedule(dev);
+ }
+ else {
+ printk("************Driver bug! \
+ interrupt while in poll\n");
+ /* Fix by disabling interrupts */
+ writel(RINT0, mmio + INT0);
+ }
+ }
+#else
+ if(intr0 & RINT0){
+ amd8111e_rx(dev);
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ }
+#endif /* CONFIG_AMD8111E_NAPI */
+ /* Check if Transmit Interrupt has occurred. */
+ if(intr0 & TINT0)
+ amd8111e_tx(dev);
+
+ /* Check if Link Change Interrupt has occurred. */
+ if (intr0 & LCINT)
+ amd8111e_link_change(dev);
+
+ /* Check if Hardware Timer Interrupt has occurred. */
+ if (intr0 & STINT)
+ amd8111e_calc_coalesce(dev);
+
+err_no_interrupt:
+ writel( VAL0 | INTREN,mmio + CMD0);
+
+ if (regs) spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void amd8111e_poll(struct net_device *dev)
+{
+ unsigned long flags;
+ local_save_flags(flags);
+ local_irq_disable();
+ amd8111e_interrupt(0, dev, NULL);
+ local_irq_restore(flags);
+}
+#endif
+
+
+/*
+This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
+*/
+static int amd8111e_close(struct net_device * dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_disable_interrupt(lp);
+ amd8111e_stop_chip(lp);
+ amd8111e_free_ring(lp);
+
+ netif_carrier_off(lp->amd8111e_net_dev);
+
+ /* Delete ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+
+ spin_unlock_irq(&lp->lock);
+ free_irq(dev->irq, dev);
+
+ /* Update the statistics before closing */
+ amd8111e_get_stats(dev);
+ lp->opened = 0;
+ return 0;
+}
+/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
+*/
+static int amd8111e_open(struct net_device * dev )
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
+ dev->name, dev))
+ return -EAGAIN;
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_init_hw_default(lp);
+
+ if(amd8111e_restart(dev)){
+ spin_unlock_irq(&lp->lock);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+ /* Start ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ add_timer(&lp->ipg_data.ipg_timer);
+ printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
+ }
+
+ lp->opened = 1;
+
+ spin_unlock_irq(&lp->lock);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+/*
+This function checks if there is any transmit descriptors available to queue more packet.
+*/
+static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
+{
+ int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
+ if(lp->tx_skbuff[tx_index] != 0)
+ return -1;
+ else
+ return 0;
+
+}
+/*
+This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
+*/
+
+static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int tx_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
+
+ lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
+
+ lp->tx_skbuff[tx_index] = skb;
+ lp->tx_ring[tx_index].tx_flags = 0;
+
+#if AMD8111E_VLAN_TAG_USED
+ if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
+ lp->tx_ring[tx_index].tag_ctrl_cmd |=
+ cpu_to_le16(TCC_VLAN_INSERT);
+ lp->tx_ring[tx_index].tag_ctrl_info =
+ cpu_to_le16(vlan_tx_tag_get(skb));
+
+ }
+#endif
+ lp->tx_dma_addr[tx_index] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[tx_index].buff_phy_addr =
+ (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
+
+ /* Set FCS and LTINT bits */
+ wmb();
+ lp->tx_ring[tx_index].tx_flags |=
+ cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
+
+ lp->tx_idx++;
+
+ /* Trigger an immediate send poll. */
+ writel( VAL1 | TDMD0, lp->mmio + CMD0);
+ writel( VAL2 | RDMD0,lp->mmio + CMD0);
+
+ dev->trans_start = jiffies;
+
+ if(amd8111e_tx_queue_avail(lp) < 0){
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+/*
+This function returns all the memory mapped registers of the device.
+*/
+static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
+{
+ void __iomem *mmio = lp->mmio;
+ /* Read only necessary registers */
+ buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
+ buf[1] = readl(mmio + XMT_RING_LEN0);
+ buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
+ buf[3] = readl(mmio + RCV_RING_LEN0);
+ buf[4] = readl(mmio + CMD0);
+ buf[5] = readl(mmio + CMD2);
+ buf[6] = readl(mmio + CMD3);
+ buf[7] = readl(mmio + CMD7);
+ buf[8] = readl(mmio + INT0);
+ buf[9] = readl(mmio + INTEN0);
+ buf[10] = readl(mmio + LADRF);
+ buf[11] = readl(mmio + LADRF+4);
+ buf[12] = readl(mmio + STAT0);
+}
+
+/*
+amd8111e crc generator implementation is different from the kernel
+ether_crc() function.
+*/
+static int amd8111e_ether_crc(int len, char* mac_addr)
+{
+ int i,byte;
+ unsigned char octet;
+ u32 crc= INITCRC;
+
+ for(byte=0; byte < len; byte++){
+ octet = mac_addr[byte];
+ for( i=0;i < 8; i++){
+ /*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/
+ if( (octet & 0x1) ^ (crc & 0x1) ){
+ crc >>= 1;
+ crc ^= CRC32;
+ }
+ else
+ crc >>= 1;
+
+ octet >>= 1;
+ }
+ }
+ return crc;
+}
+/*
+This function sets promiscuos mode, all-multi mode or the multicast address
+list to the device.
+*/
+static void amd8111e_set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list* mc_ptr;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 mc_filter[2] ;
+ int i,bit_num;
+ if(dev->flags & IFF_PROMISC){
+ printk(KERN_INFO "%s: Setting promiscuous mode.\n",dev->name);
+ writel( VAL2 | PROM, lp->mmio + CMD2);
+ return;
+ }
+ else
+ writel( PROM, lp->mmio + CMD2);
+ if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
+ /* get all multicast packet */
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ lp->mc_list = dev->mc_list;
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+ return;
+ }
+ if( dev->mc_count == 0 ){
+ /* get only own packets */
+ mc_filter[1] = mc_filter[0] = 0;
+ lp->mc_list = NULL;
+ lp->options &= ~OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+ /* disable promiscous mode */
+ writel(PROM, lp->mmio + CMD2);
+ return;
+ }
+ /* load all the multicast addresses in the logic filter */
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ lp->mc_list = dev->mc_list;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
+ i++, mc_ptr = mc_ptr->next) {
+ bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f;
+ mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
+ }
+ amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD2);
+
+}
+
+static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct pci_dev *pci_dev = lp->pci_dev;
+ strcpy (info->driver, MODULE_NAME);
+ strcpy (info->version, MODULE_VERS);
+ sprintf(info->fw_version,"%u",chip_version);
+ strcpy (info->bus_info, pci_name(pci_dev));
+}
+
+static int amd8111e_get_regs_len(struct net_device *dev)
+{
+ return AMD8111E_REG_DUMP_LEN;
+}
+
+static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ regs->version = 0;
+ amd8111e_read_regs(lp, buf);
+}
+
+static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ spin_lock_irq(&lp->lock);
+ mii_ethtool_gset(&lp->mii_if, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&lp->lock);
+ res = mii_ethtool_sset(&lp->mii_if, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return res;
+}
+
+static int amd8111e_nway_reset(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_nway_restart(&lp->mii_if);
+}
+
+static u32 amd8111e_get_link(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_link_ok(&lp->mii_if);
+}
+
+static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ wol_info->supported = WAKE_MAGIC|WAKE_PHY;
+ if (lp->options & OPTION_WOL_ENABLE)
+ wol_info->wolopts = WAKE_MAGIC;
+}
+
+static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
+ return -EINVAL;
+ spin_lock_irq(&lp->lock);
+ if (wol_info->wolopts & WAKE_MAGIC)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
+ else if(wol_info->wolopts & WAKE_PHY)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
+ else
+ lp->options &= ~OPTION_WOL_ENABLE;
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = amd8111e_get_drvinfo,
+ .get_regs_len = amd8111e_get_regs_len,
+ .get_regs = amd8111e_get_regs,
+ .get_settings = amd8111e_get_settings,
+ .set_settings = amd8111e_set_settings,
+ .nway_reset = amd8111e_nway_reset,
+ .get_link = amd8111e_get_link,
+ .get_wol = amd8111e_get_wol,
+ .set_wol = amd8111e_set_wol,
+};
+
+/*
+This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
+*/
+
+static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+ u32 mii_regval;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = lp->ext_phy_addr;
+
+ /* fallthru */
+ case SIOCGMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_read_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
+ spin_unlock_irq(&lp->lock);
+
+ data->val_out = mii_regval;
+ return err;
+
+ case SIOCSMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_write_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
+ spin_unlock_irq(&lp->lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+static int amd8111e_set_mac_address(struct net_device *dev, void *p)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ spin_lock_irq(&lp->lock);
+ /* Setting the MAC address to the device */
+ for(i = 0; i < ETH_ADDR_LEN; i++)
+ writeb( dev->dev_addr[i], lp->mmio + PADR + i );
+
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+/*
+This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
+*/
+static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+
+ if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* new_mtu will be used
+ when device starts netxt time */
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ spin_lock_irq(&lp->lock);
+
+ /* stop the chip */
+ writel(RUN, lp->mmio + CMD0);
+
+ dev->mtu = new_mtu;
+
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_start_queue(dev);
+ return err;
+}
+
+#if AMD8111E_VLAN_TAG_USED
+static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ spin_lock_irq(&lp->lock);
+ lp->vlgrp = grp;
+ spin_unlock_irq(&lp->lock);
+}
+
+static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ spin_lock_irq(&lp->lock);
+ if (lp->vlgrp)
+ lp->vlgrp->vlan_devices[vid] = NULL;
+ spin_unlock_irq(&lp->lock);
+}
+#endif
+static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
+{
+ writel( VAL1|MPPLBA, lp->mmio + CMD3);
+ writel( VAL0|MPEN_SW, lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+
+static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
+{
+
+ /* Adapter is already stoped/suspended/interrupt-disabled */
+ writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+/* This function is called when a packet transmission fails to complete within a resonable period, on the assumption that an interrupts have been failed or the interface is locked up. This function will reinitialize the hardware */
+
+static void amd8111e_tx_timeout(struct net_device *dev)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ int err;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n",
+ dev->name);
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_wake_queue(dev);
+}
+static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* disable the interrupt */
+ spin_lock_irq(&lp->lock);
+ amd8111e_disable_interrupt(lp);
+ spin_unlock_irq(&lp->lock);
+
+ netif_device_detach(dev);
+
+ /* stop chip */
+ spin_lock_irq(&lp->lock);
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+ amd8111e_stop_chip(lp);
+ spin_unlock_irq(&lp->lock);
+
+ if(lp->options & OPTION_WOL_ENABLE){
+ /* enable wol */
+ if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
+ amd8111e_enable_magicpkt(lp);
+ if(lp->options & OPTION_WAKE_PHY_ENABLE)
+ amd8111e_enable_link_change(lp);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 1);
+ pci_enable_wake(pci_dev, PCI_D3cold, 1);
+
+ }
+ else{
+ pci_enable_wake(pci_dev, PCI_D3hot, 0);
+ pci_enable_wake(pci_dev, PCI_D3cold, 0);
+ }
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, PCI_D3hot);
+
+ return 0;
+}
+static int amd8111e_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 0);
+ pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
+
+ netif_device_attach(dev);
+
+ spin_lock_irq(&lp->lock);
+ amd8111e_restart(dev);
+ /* Restart ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ mod_timer(&lp->ipg_data.ipg_timer,
+ jiffies + IPG_CONVERGE_JIFFIES);
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+
+static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ if (dev) {
+ unregister_netdev(dev);
+ iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+static void amd8111e_config_ipg(struct net_device* dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct ipg_info* ipg_data = &lp->ipg_data;
+ void __iomem *mmio = lp->mmio;
+ unsigned int prev_col_cnt = ipg_data->col_cnt;
+ unsigned int total_col_cnt;
+ unsigned int tmp_ipg;
+
+ if(lp->link_config.duplex == DUPLEX_FULL){
+ ipg_data->ipg = DEFAULT_IPG;
+ return;
+ }
+
+ if(ipg_data->ipg_state == SSTATE){
+
+ if(ipg_data->timer_tick == IPG_STABLE_TIME){
+
+ ipg_data->timer_tick = 0;
+ ipg_data->ipg = MIN_IPG - IPG_STEP;
+ ipg_data->current_ipg = MIN_IPG;
+ ipg_data->diff_col_cnt = 0xFFFFFFFF;
+ ipg_data->ipg_state = CSTATE;
+ }
+ else
+ ipg_data->timer_tick++;
+ }
+
+ if(ipg_data->ipg_state == CSTATE){
+
+ /* Get the current collision count */
+
+ total_col_cnt = ipg_data->col_cnt =
+ amd8111e_read_mib(mmio, xmt_collisions);
+
+ if ((total_col_cnt - prev_col_cnt) <
+ (ipg_data->diff_col_cnt)){
+
+ ipg_data->diff_col_cnt =
+ total_col_cnt - prev_col_cnt ;
+
+ ipg_data->ipg = ipg_data->current_ipg;
+ }
+
+ ipg_data->current_ipg += IPG_STEP;
+
+ if (ipg_data->current_ipg <= MAX_IPG)
+ tmp_ipg = ipg_data->current_ipg;
+ else{
+ tmp_ipg = ipg_data->ipg;
+ ipg_data->ipg_state = SSTATE;
+ }
+ writew((u32)tmp_ipg, mmio + IPG);
+ writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
+ }
+ mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
+ return;
+
+}
+
+static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0x1e; i >= 0; i--) {
+ u32 id1, id2;
+
+ if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
+ continue;
+ if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
+ continue;
+ lp->ext_phy_id = (id1 << 16) | id2;
+ lp->ext_phy_addr = i;
+ return;
+ }
+ lp->ext_phy_id = 0;
+ lp->ext_phy_addr = 1;
+}
+
+static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err,i,pm_cap;
+ unsigned long reg_addr,reg_len;
+ struct amd8111e_priv* lp;
+ struct net_device* dev;
+
+ err = pci_enable_device(pdev);
+ if(err){
+ printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
+ "exiting.\n");
+ return err;
+ }
+
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
+ printk(KERN_ERR "amd8111e: Cannot find PCI base address"
+ "exiting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, MODULE_NAME);
+ if(err){
+ printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
+ "exiting.\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /* Find power-management capability. */
+ if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
+ printk(KERN_ERR "amd8111e: No Power Management capability, "
+ "exiting.\n");
+ goto err_free_reg;
+ }
+
+ /* Initialize DMA */
+ if(!pci_dma_supported(pdev, 0xffffffff)){
+ printk(KERN_ERR "amd8111e: DMA not supported,"
+ "exiting.\n");
+ goto err_free_reg;
+ } else
+ pdev->dma_mask = 0xffffffff;
+
+ reg_addr = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+
+ dev = alloc_etherdev(sizeof(struct amd8111e_priv));
+ if (!dev) {
+ printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
+ err = -ENOMEM;
+ goto err_free_reg;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
+ dev->vlan_rx_register =amd8111e_vlan_rx_register;
+ dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
+#endif
+
+ lp = netdev_priv(dev);
+ lp->pci_dev = pdev;
+ lp->amd8111e_net_dev = dev;
+ lp->pm_cap = pm_cap;
+
+ spin_lock_init(&lp->lock);
+
+ lp->mmio = ioremap(reg_addr, reg_len);
+ if (lp->mmio == 0) {
+ printk(KERN_ERR "amd8111e: Cannot map device registers, "
+ "exiting\n");
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Initializing MAC address */
+ for(i = 0; i < ETH_ADDR_LEN; i++)
+ dev->dev_addr[i] =readb(lp->mmio + PADR + i);
+
+ /* Setting user defined parametrs */
+ lp->ext_phy_option = speed_duplex[card_idx];
+ if(coalesce[card_idx])
+ lp->options |= OPTION_INTR_COAL_ENABLE;
+ if(dynamic_ipg[card_idx++])
+ lp->options |= OPTION_DYN_IPG_ENABLE;
+
+ /* Initialize driver entry points */
+ dev->open = amd8111e_open;
+ dev->hard_start_xmit = amd8111e_start_xmit;
+ dev->stop = amd8111e_close;
+ dev->get_stats = amd8111e_get_stats;
+ dev->set_multicast_list = amd8111e_set_multicast_list;
+ dev->set_mac_address = amd8111e_set_mac_address;
+ dev->do_ioctl = amd8111e_ioctl;
+ dev->change_mtu = amd8111e_change_mtu;
+ SET_ETHTOOL_OPS(dev, &ops);
+ dev->irq =pdev->irq;
+ dev->tx_timeout = amd8111e_tx_timeout;
+ dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
+#ifdef CONFIG_AMD8111E_NAPI
+ dev->poll = amd8111e_rx_poll;
+ dev->weight = 32;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = amd8111e_poll;
+#endif
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_rx_register =amd8111e_vlan_rx_register;
+ dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
+#endif
+ /* Probe the external PHY */
+ amd8111e_probe_ext_phy(dev);
+
+ /* setting mii default values */
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = amd8111e_mdio_read;
+ lp->mii_if.mdio_write = amd8111e_mdio_write;
+ lp->mii_if.phy_id = lp->ext_phy_addr;
+
+ /* Set receive buffer length and set jumbo option*/
+ amd8111e_set_rx_buff_len(dev);
+
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR "amd8111e: Cannot register net device, "
+ "exiting.\n");
+ goto err_iounmap;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ /* Initialize software ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ init_timer(&lp->ipg_data.ipg_timer);
+ lp->ipg_data.ipg_timer.data = (unsigned long) dev;
+ lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
+ lp->ipg_data.ipg_timer.expires = jiffies +
+ IPG_CONVERGE_JIFFIES;
+ lp->ipg_data.ipg = DEFAULT_IPG;
+ lp->ipg_data.ipg_state = CSTATE;
+ };
+
+ /* display driver and device information */
+
+ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
+ printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERS);
+ printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':');
+ printk( "\n");
+ if (lp->ext_phy_id)
+ printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
+ dev->name, lp->ext_phy_id, lp->ext_phy_addr);
+ else
+ printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
+ dev->name);
+ return 0;
+err_iounmap:
+ iounmap(lp->mmio);
+
+err_free_dev:
+ free_netdev(dev);
+
+err_free_reg:
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+
+}
+
+static struct pci_driver amd8111e_driver = {
+ .name = MODULE_NAME,
+ .id_table = amd8111e_pci_tbl,
+ .probe = amd8111e_probe_one,
+ .remove = __devexit_p(amd8111e_remove_one),
+ .suspend = amd8111e_suspend,
+ .resume = amd8111e_resume
+};
+
+static int __init amd8111e_init(void)
+{
+ return pci_module_init(&amd8111e_driver);
+}
+
+static void __exit amd8111e_cleanup(void)
+{
+ pci_unregister_driver(&amd8111e_driver);
+}
+
+module_init(amd8111e_init);
+module_exit(amd8111e_cleanup);
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
new file mode 100755
index 000000000000..cfe3a4298822
--- /dev/null
+++ b/drivers/net/amd8111e.h
@@ -0,0 +1,823 @@
+/*
+ * Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2003 Advanced Micro Devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+
+Module Name:
+
+ amd8111e.h
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller driver definitions.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+*/
+
+#ifndef _AMD811E_H
+#define _AMD811E_H
+
+/* Command style register access
+
+Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register.
+
+eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered.
+
+*/
+
+/* Offset for Memory Mapped Registers. */
+/* 32 bit registers */
+
+#define ASF_STAT 0x00 /* ASF status register */
+#define CHIPID 0x04 /* Chip ID regsiter */
+#define MIB_DATA 0x10 /* MIB data register */
+#define MIB_ADDR 0x14 /* MIB address register */
+#define STAT0 0x30 /* Status0 register */
+#define INT0 0x38 /* Interrupt0 register */
+#define INTEN0 0x40 /* Interrupt0 enable register*/
+#define CMD0 0x48 /* Command0 register */
+#define CMD2 0x50 /* Command2 register */
+#define CMD3 0x54 /* Command3 resiter */
+#define CMD7 0x64 /* Command7 register */
+
+#define CTRL1 0x6C /* Control1 register */
+#define CTRL2 0x70 /* Control2 register */
+
+#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */
+
+#define AUTOPOLL0 0x88 /* Auto-poll0 register */
+#define AUTOPOLL1 0x8A /* Auto-poll1 register */
+#define AUTOPOLL2 0x8C /* Auto-poll2 register */
+#define AUTOPOLL3 0x8E /* Auto-poll3 register */
+#define AUTOPOLL4 0x90 /* Auto-poll4 register */
+#define AUTOPOLL5 0x92 /* Auto-poll5 register */
+
+#define AP_VALUE 0x98 /* Auto-poll value register */
+#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */
+#define DLY_INT_B 0xAC /* Group B delayed interrupt register */
+
+#define FLOW_CONTROL 0xC8 /* Flow control register */
+#define PHY_ACCESS 0xD0 /* PHY access register */
+
+#define STVAL 0xD8 /* Software timer value register */
+
+#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */
+#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */
+#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */
+#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */
+
+#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */
+
+#define PMAT0 0x190 /* OnNow pattern register0 */
+#define PMAT1 0x194 /* OnNow pattern register1 */
+
+/* 16bit registers */
+
+#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */
+#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */
+#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */
+#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */
+
+#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */
+
+#define SRAM_SIZE 0x178 /* SRAM size register */
+#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */
+
+/* 48bit register */
+
+#define PADR 0x160 /* Physical address register */
+
+#define IFS1 0x18C /* Inter-frame spacing Part1 register */
+#define IFS 0x18D /* Inter-frame spacing register */
+#define IPG 0x18E /* Inter-frame gap register */
+/* 64bit register */
+
+#define LADRF 0x168 /* Logical address filter register */
+
+
+/* Register Bit Definitions */
+typedef enum {
+
+ ASF_INIT_DONE = (1 << 1),
+ ASF_INIT_PRESENT = (1 << 0),
+
+}STAT_ASF_BITS;
+
+typedef enum {
+
+ MIB_CMD_ACTIVE = (1 << 15 ),
+ MIB_RD_CMD = (1 << 13 ),
+ MIB_CLEAR = (1 << 12 ),
+ MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)|
+ (1 << 4) | (1 << 5),
+}MIB_ADDR_BITS;
+
+
+typedef enum {
+
+ PMAT_DET = (1 << 12),
+ MP_DET = (1 << 11),
+ LC_DET = (1 << 10),
+ SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7),
+ FULL_DPLX = (1 << 6),
+ LINK_STATS = (1 << 5),
+ AUTONEG_COMPLETE = (1 << 4),
+ MIIPD = (1 << 3),
+ RX_SUSPENDED = (1 << 2),
+ TX_SUSPENDED = (1 << 1),
+ RUNNING = (1 << 0),
+
+}STAT0_BITS;
+
+#define PHY_SPEED_10 0x2
+#define PHY_SPEED_100 0x3
+
+/* INT0 0x38, 32bit register */
+typedef enum {
+
+ INTR = (1 << 31),
+ PCSINT = (1 << 28),
+ LCINT = (1 << 27),
+ APINT5 = (1 << 26),
+ APINT4 = (1 << 25),
+ APINT3 = (1 << 24),
+ TINT_SUM = (1 << 23),
+ APINT2 = (1 << 22),
+ APINT1 = (1 << 21),
+ APINT0 = (1 << 20),
+ MIIPDTINT = (1 << 19),
+ MCCINT = (1 << 17),
+ MREINT = (1 << 16),
+ RINT_SUM = (1 << 15),
+ SPNDINT = (1 << 14),
+ MPINT = (1 << 13),
+ SINT = (1 << 12),
+ TINT3 = (1 << 11),
+ TINT2 = (1 << 10),
+ TINT1 = (1 << 9),
+ TINT0 = (1 << 8),
+ UINT = (1 << 7),
+ STINT = (1 << 4),
+ RINT0 = (1 << 0),
+
+}INT0_BITS;
+
+typedef enum {
+
+ VAL3 = (1 << 31), /* VAL bit for byte 3 */
+ VAL2 = (1 << 23), /* VAL bit for byte 2 */
+ VAL1 = (1 << 15), /* VAL bit for byte 1 */
+ VAL0 = (1 << 7), /* VAL bit for byte 0 */
+
+}VAL_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ LCINTEN = (1 << 27),
+ APINT5EN = (1 << 26),
+ APINT4EN = (1 << 25),
+ APINT3EN = (1 << 24),
+ /* VAL2 */
+ APINT2EN = (1 << 22),
+ APINT1EN = (1 << 21),
+ APINT0EN = (1 << 20),
+ MIIPDTINTEN = (1 << 19),
+ MCCIINTEN = (1 << 18),
+ MCCINTEN = (1 << 17),
+ MREINTEN = (1 << 16),
+ /* VAL1 */
+ SPNDINTEN = (1 << 14),
+ MPINTEN = (1 << 13),
+ TINTEN3 = (1 << 11),
+ SINTEN = (1 << 12),
+ TINTEN2 = (1 << 10),
+ TINTEN1 = (1 << 9),
+ TINTEN0 = (1 << 8),
+ /* VAL0 */
+ STINTEN = (1 << 4),
+ RINTEN0 = (1 << 0),
+
+ INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */
+
+}INTEN0_BITS;
+
+typedef enum {
+ /* VAL2 */
+ RDMD0 = (1 << 16),
+ /* VAL1 */
+ TDMD3 = (1 << 11),
+ TDMD2 = (1 << 10),
+ TDMD1 = (1 << 9),
+ TDMD0 = (1 << 8),
+ /* VAL0 */
+ UINTCMD = (1 << 6),
+ RX_FAST_SPND = (1 << 5),
+ TX_FAST_SPND = (1 << 4),
+ RX_SPND = (1 << 3),
+ TX_SPND = (1 << 2),
+ INTREN = (1 << 1),
+ RUN = (1 << 0),
+
+ CMD0_CLEAR = 0x000F0F7F, /* Command style register */
+
+}CMD0_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ CONDUIT_MODE = (1 << 29),
+ /* VAL2 */
+ RPA = (1 << 19),
+ DRCVPA = (1 << 18),
+ DRCVBC = (1 << 17),
+ PROM = (1 << 16),
+ /* VAL1 */
+ ASTRP_RCV = (1 << 13),
+ RCV_DROP0 = (1 << 12),
+ EMBA = (1 << 11),
+ DXMT2PD = (1 << 10),
+ LTINTEN = (1 << 9),
+ DXMTFCS = (1 << 8),
+ /* VAL0 */
+ APAD_XMT = (1 << 6),
+ DRTY = (1 << 5),
+ INLOOP = (1 << 4),
+ EXLOOP = (1 << 3),
+ REX_RTRY = (1 << 2),
+ REX_UFLO = (1 << 1),
+ REX_LCOL = (1 << 0),
+
+ CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */
+
+}CMD2_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ ASF_INIT_DONE_ALIAS = (1 << 29),
+ /* VAL2 */
+ JUMBO = (1 << 21),
+ VSIZE = (1 << 20),
+ VLONLY = (1 << 19),
+ VL_TAG_DEL = (1 << 18),
+ /* VAL1 */
+ EN_PMGR = (1 << 14),
+ INTLEVEL = (1 << 13),
+ FORCE_FULL_DUPLEX = (1 << 12),
+ FORCE_LINK_STATUS = (1 << 11),
+ APEP = (1 << 10),
+ MPPLBA = (1 << 9),
+ /* VAL0 */
+ RESET_PHY_PULSE = (1 << 2),
+ RESET_PHY = (1 << 1),
+ PHY_RST_POL = (1 << 0),
+
+}CMD3_BITS;
+
+
+typedef enum {
+
+ /* VAL0 */
+ PMAT_SAVE_MATCH = (1 << 4),
+ PMAT_MODE = (1 << 3),
+ MPEN_SW = (1 << 1),
+ LCMODE_SW = (1 << 0),
+
+ CMD7_CLEAR = 0x0000001B /* Command style register */
+
+}CMD7_BITS;
+
+
+typedef enum {
+
+ RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */
+ XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */
+ XMTSP_128 = (1 << 9), /* 9 */
+ XMTSP_64 = (1 << 8),
+ CACHE_ALIGN = (1 << 4),
+ BURST_LIMIT_MASK = (0xF << 0 ),
+ CTRL1_DEFAULT = 0x00010111,
+
+}CTRL1_BITS;
+
+typedef enum {
+
+ FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */
+ XPHYRST = (1 << 7),
+ XPHYANE = (1 << 6),
+ XPHYFD = (1 << 5),
+ XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */
+ APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */
+
+}CTRL2_BITS;
+
+/* XMT_RING_LIMIT 0x7C, 32bit register */
+typedef enum {
+
+ XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */
+ XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */
+ XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */
+
+}XMT_RING_LIMIT_BITS;
+
+typedef enum {
+
+ AP_REG0_EN = (1 << 15),
+ AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL0_BITS;
+
+/* AUTOPOLL1 0x8A, 16bit register */
+typedef enum {
+
+ AP_REG1_EN = (1 << 15),
+ AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP1 = (1 << 6),
+ AP_PHY1_DFLT = (1 << 5),
+ AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL1_BITS;
+
+
+typedef enum {
+
+ AP_REG2_EN = (1 << 15),
+ AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP2 = (1 << 6),
+ AP_PHY2_DFLT = (1 << 5),
+ AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL2_BITS;
+
+typedef enum {
+
+ AP_REG3_EN = (1 << 15),
+ AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP3 = (1 << 6),
+ AP_PHY3_DFLT = (1 << 5),
+ AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL3_BITS;
+
+
+typedef enum {
+
+ AP_REG4_EN = (1 << 15),
+ AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP4 = (1 << 6),
+ AP_PHY4_DFLT = (1 << 5),
+ AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL4_BITS;
+
+
+typedef enum {
+
+ AP_REG5_EN = (1 << 15),
+ AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP5 = (1 << 6),
+ AP_PHY5_DFLT = (1 << 5),
+ AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL5_BITS;
+
+
+
+
+/* AP_VALUE 0x98, 32bit ragister */
+typedef enum {
+
+ AP_VAL_ACTIVE = (1 << 31),
+ AP_VAL_RD_CMD = ( 1 << 29),
+ AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */
+ AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) |
+ (0xF << 12), /* 15:0 */
+
+}AP_VALUE_BITS;
+
+typedef enum {
+
+ DLY_INT_A_R3 = (1 << 31),
+ DLY_INT_A_R2 = (1 << 30),
+ DLY_INT_A_R1 = (1 << 29),
+ DLY_INT_A_R0 = (1 << 28),
+ DLY_INT_A_T3 = (1 << 27),
+ DLY_INT_A_T2 = (1 << 26),
+ DLY_INT_A_T1 = (1 << 25),
+ DLY_INT_A_T0 = ( 1 << 24),
+ EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+
+}DLY_INT_A_BITS;
+
+typedef enum {
+
+ DLY_INT_B_R3 = (1 << 31),
+ DLY_INT_B_R2 = (1 << 30),
+ DLY_INT_B_R1 = (1 << 29),
+ DLY_INT_B_R0 = (1 << 28),
+ DLY_INT_B_T3 = (1 << 27),
+ DLY_INT_B_T2 = (1 << 26),
+ DLY_INT_B_T1 = (1 << 25),
+ DLY_INT_B_T0 = ( 1 << 24),
+ EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+}DLY_INT_B_BITS;
+
+
+/* FLOW_CONTROL 0xC8, 32bit register */
+typedef enum {
+
+ PAUSE_LEN_CHG = (1 << 30),
+ FTPE = (1 << 22),
+ FRPE = (1 << 21),
+ NAPA = (1 << 20),
+ NPA = (1 << 19),
+ FIXP = ( 1 << 18),
+ FCCMD = ( 1 << 16),
+ PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */
+
+}FLOW_CONTROL_BITS;
+
+/* PHY_ ACCESS 0xD0, 32bit register */
+typedef enum {
+
+ PHY_CMD_ACTIVE = (1 << 31),
+ PHY_WR_CMD = (1 << 30),
+ PHY_RD_CMD = (1 << 29),
+ PHY_RD_ERR = (1 << 28),
+ PHY_PRE_SUP = (1 << 27),
+ PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)|
+ (1 << 24) |(1 << 25),/* 25:21 */
+ PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */
+ PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)|
+ (0xF << 12),/* 15:0 */
+
+}PHY_ACCESS_BITS;
+
+
+/* PMAT0 0x190, 32bit register */
+typedef enum {
+ PMR_ACTIVE = (1 << 31),
+ PMR_WR_CMD = (1 << 30),
+ PMR_RD_CMD = (1 << 29),
+ PMR_BANK = (1 <<28),
+ PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)|
+ (1 << 22),/* 22:16 */
+ PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */
+}PMAT0_BITS;
+
+
+/* PMAT1 0x194, 32bit register */
+typedef enum {
+ PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */
+ PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */
+ PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */
+ PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */
+}PMAT1_BITS;
+
+/************************************************************************/
+/* */
+/* MIB counter definitions */
+/* */
+/************************************************************************/
+
+#define rcv_miss_pkts 0x00
+#define rcv_octets 0x01
+#define rcv_broadcast_pkts 0x02
+#define rcv_multicast_pkts 0x03
+#define rcv_undersize_pkts 0x04
+#define rcv_oversize_pkts 0x05
+#define rcv_fragments 0x06
+#define rcv_jabbers 0x07
+#define rcv_unicast_pkts 0x08
+#define rcv_alignment_errors 0x09
+#define rcv_fcs_errors 0x0A
+#define rcv_good_octets 0x0B
+#define rcv_mac_ctrl 0x0C
+#define rcv_flow_ctrl 0x0D
+#define rcv_pkts_64_octets 0x0E
+#define rcv_pkts_65to127_octets 0x0F
+#define rcv_pkts_128to255_octets 0x10
+#define rcv_pkts_256to511_octets 0x11
+#define rcv_pkts_512to1023_octets 0x12
+#define rcv_pkts_1024to1518_octets 0x13
+#define rcv_unsupported_opcode 0x14
+#define rcv_symbol_errors 0x15
+#define rcv_drop_pkts_ring1 0x16
+#define rcv_drop_pkts_ring2 0x17
+#define rcv_drop_pkts_ring3 0x18
+#define rcv_drop_pkts_ring4 0x19
+#define rcv_jumbo_pkts 0x1A
+
+#define xmt_underrun_pkts 0x20
+#define xmt_octets 0x21
+#define xmt_packets 0x22
+#define xmt_broadcast_pkts 0x23
+#define xmt_multicast_pkts 0x24
+#define xmt_collisions 0x25
+#define xmt_unicast_pkts 0x26
+#define xmt_one_collision 0x27
+#define xmt_multiple_collision 0x28
+#define xmt_deferred_transmit 0x29
+#define xmt_late_collision 0x2A
+#define xmt_excessive_defer 0x2B
+#define xmt_loss_carrier 0x2C
+#define xmt_excessive_collision 0x2D
+#define xmt_back_pressure 0x2E
+#define xmt_flow_ctrl 0x2F
+#define xmt_pkts_64_octets 0x30
+#define xmt_pkts_65to127_octets 0x31
+#define xmt_pkts_128to255_octets 0x32
+#define xmt_pkts_256to511_octets 0x33
+#define xmt_pkts_512to1023_octets 0x34
+#define xmt_pkts_1024to1518_octet 0x35
+#define xmt_oversize_pkts 0x36
+#define xmt_jumbo_pkts 0x37
+
+
+/* Driver definitions */
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
+
+#define MAX_UNITS 8 /* Maximum number of devices possible */
+
+#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */
+#define NUM_RX_BUFFERS 32 /* Number of receive buffers */
+
+#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */
+#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */
+
+#define NUM_TX_RING_DR 32
+#define NUM_RX_RING_DR 32
+
+#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */
+#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */
+
+#define MAX_FILTER_SIZE 64 /* Maximum multicast address */
+#define AMD8111E_MIN_MTU 60
+#define AMD8111E_MAX_MTU 9000
+
+#define PKT_BUFF_SZ 1536
+#define MIN_PKT_LEN 60
+#define ETH_ADDR_LEN 6
+
+#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
+#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
+#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion.
+ Only 500 usec resolution */
+#define OPTION_VLAN_ENABLE 0x0001
+#define OPTION_JUMBO_ENABLE 0x0002
+#define OPTION_MULTICAST_ENABLE 0x0004
+#define OPTION_WOL_ENABLE 0x0008
+#define OPTION_WAKE_MAGIC_ENABLE 0x0010
+#define OPTION_WAKE_PHY_ENABLE 0x0020
+#define OPTION_INTR_COAL_ENABLE 0x0040
+#define OPTION_DYN_IPG_ENABLE 0x0080
+
+#define PHY_REG_ADDR_MASK 0x1f
+
+/* ipg parameters */
+#define DEFAULT_IPG 0x60
+#define IFS1_DELTA 36
+#define IPG_CONVERGE_JIFFIES (HZ/2)
+#define IPG_STABLE_TIME 5
+#define MIN_IPG 96
+#define MAX_IPG 255
+#define IPG_STEP 16
+#define CSTATE 1
+#define SSTATE 2
+
+/* Assume contoller gets data 10 times the maximum processing time */
+#define REPEAT_CNT 10;
+
+/* amd8111e decriptor flag definitions */
+typedef enum {
+
+ OWN_BIT = (1 << 15),
+ ADD_FCS_BIT = (1 << 13),
+ LTINT_BIT = (1 << 12),
+ STP_BIT = (1 << 9),
+ ENP_BIT = (1 << 8),
+ KILL_BIT = (1 << 6),
+ TCC_VLAN_INSERT = (1 << 1),
+ TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0),
+
+}TX_FLAG_BITS;
+
+typedef enum {
+ ERR_BIT = (1 << 14),
+ FRAM_BIT = (1 << 13),
+ OFLO_BIT = (1 << 12),
+ CRC_BIT = (1 << 11),
+ PAM_BIT = (1 << 6),
+ LAFM_BIT = (1 << 5),
+ BAM_BIT = (1 << 4),
+ TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */
+ TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */
+
+}RX_FLAG_BITS;
+
+#define RESET_RX_FLAGS 0x0000
+#define TT_MASK 0x000c
+#define TCC_MASK 0x0003
+
+/* driver ioctl parameters */
+#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
+
+/* crc generator constants */
+#define CRC32 0xedb88320
+#define INITCRC 0xFFFFFFFF
+
+/* amd8111e desriptor format */
+
+struct amd8111e_tx_dr{
+
+ u16 buff_count; /* Size of the buffer pointed by this descriptor */
+
+ u16 tx_flags;
+
+ u16 tag_ctrl_info;
+
+ u16 tag_ctrl_cmd;
+
+ u32 buff_phy_addr;
+
+ u32 reserved;
+};
+
+struct amd8111e_rx_dr{
+
+ u32 reserved;
+
+ u16 msg_count; /* Received message len */
+
+ u16 tag_ctrl_info;
+
+ u16 buff_count; /* Len of the buffer pointed by descriptor. */
+
+ u16 rx_flags;
+
+ u32 buff_phy_addr;
+
+};
+struct amd8111e_link_config{
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+#define AUTONEG_INVALID 0xff
+
+ unsigned long orig_phy_option;
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 reserved; /* 32bit alignment */
+};
+
+enum coal_type{
+
+ NO_COALESCE,
+ LOW_COALESCE,
+ MEDIUM_COALESCE,
+ HIGH_COALESCE,
+
+};
+
+enum coal_mode{
+ RX_INTR_COAL,
+ TX_INTR_COAL,
+ DISABLE_COAL,
+ ENABLE_COAL,
+
+};
+#define MAX_TIMEOUT 40
+#define MAX_EVENT_COUNT 31
+struct amd8111e_coalesce_conf{
+
+ unsigned int rx_timeout;
+ unsigned int rx_event_count;
+ unsigned long rx_packets;
+ unsigned long rx_prev_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_prev_bytes;
+ unsigned int rx_coal_type;
+
+ unsigned int tx_timeout;
+ unsigned int tx_event_count;
+ unsigned long tx_packets;
+ unsigned long tx_prev_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_prev_bytes;
+ unsigned int tx_coal_type;
+
+};
+struct ipg_info{
+
+ unsigned int ipg_state;
+ unsigned int ipg;
+ unsigned int current_ipg;
+ unsigned int col_cnt;
+ unsigned int diff_col_cnt;
+ unsigned int timer_tick;
+ unsigned int prev_ipg;
+ struct timer_list ipg_timer;
+};
+
+struct amd8111e_priv{
+
+ struct amd8111e_tx_dr* tx_ring;
+ struct amd8111e_rx_dr* rx_ring;
+ dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */
+ dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */
+ const char *name;
+ struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
+ struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
+ /* Transmit and recive skbs */
+ struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
+ struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
+ /* Transmit and receive dma mapped addr */
+ dma_addr_t tx_dma_addr[NUM_TX_BUFFERS];
+ dma_addr_t rx_dma_addr[NUM_RX_BUFFERS];
+ /* Reg memory mapped address */
+ void __iomem *mmio;
+
+ spinlock_t lock; /* Guard lock */
+ unsigned long rx_idx, tx_idx; /* The next free ring entry */
+ unsigned long tx_complete_idx;
+ unsigned long tx_ring_complete_idx;
+ unsigned long tx_ring_idx;
+ unsigned int rx_buff_len; /* Buffer length of rx buffers */
+ int options; /* Options enabled/disabled for the device */
+
+ unsigned long ext_phy_option;
+ int ext_phy_addr;
+ u32 ext_phy_id;
+
+ struct amd8111e_link_config link_config;
+ int pm_cap;
+
+ struct net_device *next;
+ int mii;
+ struct mii_if_info mii_if;
+#if AMD8111E_VLAN_TAG_USED
+ struct vlan_group *vlgrp;
+#endif
+ char opened;
+ struct net_device_stats stats;
+ unsigned int drv_rx_errors;
+ struct dev_mc_list* mc_list;
+ struct amd8111e_coalesce_conf coal_conf;
+
+ struct ipg_info ipg_data;
+
+};
+
+/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register.
+BUG? */
+#define amd8111e_writeq(_UlData,_memMap) \
+ writel(*(u32*)(&_UlData), _memMap); \
+ writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4)
+
+/* maps the external speed options to internal value */
+typedef enum {
+ SPEED_AUTONEG,
+ SPEED10_HALF,
+ SPEED10_FULL,
+ SPEED100_HALF,
+ SPEED100_FULL,
+}EXT_PHY_OPTION;
+
+static int card_idx;
+static int speed_duplex[MAX_UNITS] = { 0, };
+static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
+static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static unsigned int chip_version;
+
+#endif /* _AMD8111E_H */
+
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
new file mode 100644
index 000000000000..a94216b87184
--- /dev/null
+++ b/drivers/net/apne.c
@@ -0,0 +1,637 @@
+/*
+ * Amiga Linux/68k 8390 based PCMCIA Ethernet Driver for the Amiga 1200
+ *
+ * (C) Copyright 1997 Alain Malek
+ * (Alain.Malek@cryogen.com)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ne.c: A general non-shared-memory NS8390 ethernet driver for linux
+ * Written 1992-94 by Donald Becker.
+ *
+ * 8390.c: A general NS8390 ethernet driver core for linux.
+ * Written 1992-94 by Donald Becker.
+ *
+ * cnetdevice: A Sana-II ethernet driver for AmigaOS
+ * Written by Bruce Abbott (bhabbott@inhb.co.nz)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <asm/amigayle.h>
+#include <asm/amipcmcia.h>
+
+#include "8390.h"
+
+/* ---- No user-serviceable parts below ---- */
+
+#define DRV_NAME "apne"
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NE_EN0_ISR 0x07
+#define NE_EN0_DCFG 0x0e
+
+#define NE_EN0_RSARLO 0x08
+#define NE_EN0_RSARHI 0x09
+#define NE_EN0_RCNTLO 0x0a
+#define NE_EN0_RXCR 0x0c
+#define NE_EN0_TXCR 0x0d
+#define NE_EN0_RCNTHI 0x0b
+#define NE_EN0_IMR 0x0f
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+
+struct net_device * __init apne_probe(int unit);
+static int apne_probe1(struct net_device *dev, int ioaddr);
+
+static int apne_open(struct net_device *dev);
+static int apne_close(struct net_device *dev);
+
+static void apne_reset_8390(struct net_device *dev);
+static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void apne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void apne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static irqreturn_t apne_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+static int init_pcmcia(void);
+
+/* IO base address used for nic */
+
+#define IOBASE 0x300
+
+/*
+ use MANUAL_CONFIG and MANUAL_OFFSET for enabling IO by hand
+ you can find the values to use by looking at the cnet.device
+ config file example (the default values are for the CNET40BC card)
+*/
+
+/*
+#define MANUAL_CONFIG 0x20
+#define MANUAL_OFFSET 0x3f8
+
+#define MANUAL_HWADDR0 0x00
+#define MANUAL_HWADDR1 0x12
+#define MANUAL_HWADDR2 0x34
+#define MANUAL_HWADDR3 0x56
+#define MANUAL_HWADDR4 0x78
+#define MANUAL_HWADDR5 0x9a
+*/
+
+static const char version[] =
+ "apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n";
+
+static int apne_owned; /* signal if card already owned */
+
+struct net_device * __init apne_probe(int unit)
+{
+ struct net_device *dev;
+#ifndef MANUAL_CONFIG
+ char tuple[8];
+#endif
+ int err;
+
+ if (apne_owned)
+ return ERR_PTR(-ENODEV);
+
+ if ( !(AMIGAHW_PRESENT(PCMCIA)) )
+ return ERR_PTR(-ENODEV);
+
+ printk("Looking for PCMCIA ethernet card : ");
+
+ /* check if a card is inserted */
+ if (!(PCMCIA_INSERTED)) {
+ printk("NO PCMCIA card inserted\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ dev = alloc_ei_netdev();
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+ SET_MODULE_OWNER(dev);
+
+ /* disable pcmcia irq for readtuple */
+ pcmcia_disable_irq();
+
+#ifndef MANUAL_CONFIG
+ if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
+ (tuple[2] != CISTPL_FUNCID_NETWORK)) {
+ printk("not an ethernet card\n");
+ /* XXX: shouldn't we re-enable irq here? */
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+ }
+#endif
+
+ printk("ethernet PCMCIA card inserted\n");
+
+ if (!init_pcmcia()) {
+ /* XXX: shouldn't we re-enable irq here? */
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!request_region(IOBASE, 0x20, DRV_NAME)) {
+ free_netdev(dev);
+ return ERR_PTR(-EBUSY);
+ }
+
+ err = apne_probe1(dev, IOBASE);
+ if (err) {
+ release_region(IOBASE, 0x20);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+
+ pcmcia_disable_irq();
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ pcmcia_reset();
+ release_region(IOBASE, 0x20);
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int __init apne_probe1(struct net_device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[32];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+#ifndef MANUAL_HWADDR0
+ int neX000, ctron;
+#endif
+ static unsigned version_printed;
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("PCMCIA NE*000 ethercard probe");
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ { unsigned long reset_start_time = jiffies;
+
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk(" not found (no reset ack).\n");
+ return -ENODEV;
+ }
+
+ outb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */
+ }
+
+#ifndef MANUAL_HWADDR0
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned long value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/
+ {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq. */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD+E8390_START, NE_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) {
+ outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+ }
+
+ }
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+ SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+ if (SA_prom[i] != SA_prom[i+1])
+ wordlength = 1;
+ }
+
+ /* At this point, wordlength *only* tells us if the SA_prom is doubled
+ up or not because some broken PCI cards don't respect the byte-wide
+ request in program_seq above, and hence don't have doubled up values.
+ These broken cards would otherwise be detected as an ne1000. */
+
+ if (wordlength == 2)
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+
+ if (wordlength == 2) {
+ /* We must set the 8390 for word mode. */
+ outb(0x49, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+ neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
+ ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
+
+ /* Set up the rest of the parameters. */
+ if (neX000) {
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+ } else if (ctron) {
+ name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
+ start_page = 0x01;
+ stop_page = (wordlength == 2) ? 0x40 : 0x20;
+ } else {
+ printk(" not found.\n");
+ return -ENXIO;
+
+ }
+
+#else
+ wordlength = 2;
+ /* We must set the 8390 for word mode. */
+ outb(0x49, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ SA_prom[0] = MANUAL_HWADDR0;
+ SA_prom[1] = MANUAL_HWADDR1;
+ SA_prom[2] = MANUAL_HWADDR2;
+ SA_prom[3] = MANUAL_HWADDR3;
+ SA_prom[4] = MANUAL_HWADDR4;
+ SA_prom[5] = MANUAL_HWADDR5;
+ name = "NE2000";
+#endif
+
+ dev->base_addr = ioaddr;
+
+ /* Install the Interrupt handler */
+ i = request_irq(IRQ_AMIGA_PORTS, apne_interrupt, SA_SHIRQ, DRV_NAME, dev);
+ if (i) return i;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ printk(" %2.2x", SA_prom[i]);
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ printk("\n%s: %s found.\n", dev->name, name);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (wordlength == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+
+ ei_status.reset_8390 = &apne_reset_8390;
+ ei_status.block_input = &apne_block_input;
+ ei_status.block_output = &apne_block_output;
+ ei_status.get_8390_hdr = &apne_get_8390_hdr;
+ dev->open = &apne_open;
+ dev->stop = &apne_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+
+ pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */
+ pcmcia_enable_irq();
+
+ apne_owned = 1;
+
+ return 0;
+}
+
+static int
+apne_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int
+apne_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void
+apne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ init_pcmcia();
+
+ if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk("%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int nic_base = dev->base_addr;
+ int cnt;
+ char *ptrc;
+ short *ptrs;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
+ outb(0, nic_base + NE_EN0_RCNTHI);
+ outb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */
+ outb(ring_page, nic_base + NE_EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16) {
+ ptrs = (short*)hdr;
+ for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++)
+ *ptrs++ = inw(NE_BASE + NE_DATAPORT);
+ } else {
+ ptrc = (char*)hdr;
+ for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++)
+ *ptrc++ = inb(NE_BASE + NE_DATAPORT);
+ }
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+
+ le16_to_cpus(&hdr->count);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void
+apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+ char *ptrc;
+ short *ptrs;
+ int cnt;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ outb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
+ outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ ptrs = (short*)buf;
+ for (cnt = 0; cnt < (count>>1); cnt++)
+ *ptrs++ = inw(NE_BASE + NE_DATAPORT);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+ }
+ } else {
+ ptrc = (char*)buf;
+ for (cnt = 0; cnt < count; cnt++)
+ *ptrc++ = inb(NE_BASE + NE_DATAPORT);
+ }
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void
+apne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+ char *ptrc;
+ short *ptrs;
+ int cnt;
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ outb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ outb(0x00, nic_base + NE_EN0_RSARLO);
+ outb(start_page, nic_base + NE_EN0_RSARHI);
+
+ outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ ptrs = (short*)buf;
+ for (cnt = 0; cnt < count>>1; cnt++)
+ outw(*ptrs++, NE_BASE+NE_DATAPORT);
+ } else {
+ ptrc = (char*)buf;
+ for (cnt = 0; cnt < count; cnt++)
+ outb(*ptrc++, NE_BASE + NE_DATAPORT);
+ }
+
+ dma_start = jiffies;
+
+ while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ apne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+static irqreturn_t apne_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned char pcmcia_intreq;
+
+ if (!(gayle.inten & GAYLE_IRQ_IRQ))
+ return IRQ_NONE;
+
+ pcmcia_intreq = pcmcia_get_intreq();
+
+ if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) {
+ pcmcia_ack_int(pcmcia_intreq);
+ return IRQ_NONE;
+ }
+ if (ei_debug > 3)
+ printk("pcmcia intreq = %x\n", pcmcia_intreq);
+ pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */
+ ei_interrupt(irq, dev_id, regs);
+ pcmcia_ack_int(pcmcia_get_intreq());
+ pcmcia_enable_irq();
+ return IRQ_HANDLED;
+}
+
+#ifdef MODULE
+static struct net_device *apne_dev;
+
+int init_module(void)
+{
+ apne_dev = apne_probe(-1);
+ if (IS_ERR(apne_dev))
+ return PTR_ERR(apne_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(apne_dev);
+
+ pcmcia_disable_irq();
+
+ free_irq(IRQ_AMIGA_PORTS, apne_dev);
+
+ pcmcia_reset();
+
+ release_region(IOBASE, 0x20);
+
+ free_netdev(apne_dev);
+}
+
+#endif
+
+static int init_pcmcia(void)
+{
+ u_char config;
+#ifndef MANUAL_CONFIG
+ u_char tuple[32];
+ int offset_len;
+#endif
+ u_long offset;
+
+ pcmcia_reset();
+ pcmcia_program_voltage(PCMCIA_0V);
+ pcmcia_access_speed(PCMCIA_SPEED_250NS);
+ pcmcia_write_enable();
+
+#ifdef MANUAL_CONFIG
+ config = MANUAL_CONFIG;
+#else
+ /* get and write config byte to enable IO port */
+
+ if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3)
+ return 0;
+
+ config = tuple[2] & 0x3f;
+#endif
+#ifdef MANUAL_OFFSET
+ offset = MANUAL_OFFSET;
+#else
+ if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6)
+ return 0;
+
+ offset_len = (tuple[2] & 0x3) + 1;
+ offset = 0;
+ while(offset_len--) {
+ offset = (offset << 8) | tuple[4+offset_len];
+ }
+#endif
+
+ out_8(GAYLE_ATTRIBUTE+offset, config);
+
+ return 1;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
new file mode 100644
index 000000000000..60b19679ca5c
--- /dev/null
+++ b/drivers/net/appletalk/Kconfig
@@ -0,0 +1,98 @@
+#
+# Appletalk driver configuration
+#
+config DEV_APPLETALK
+ bool "Appletalk interfaces support"
+ depends on ATALK
+ help
+ AppleTalk is the protocol that Apple computers can use to communicate
+ on a network. If your Linux box is connected to such a network, and wish
+ to do IP over it, or you have a LocalTalk card and wish to use it to
+ connect to the AppleTalk network, say Y.
+
+
+config LTPC
+ tristate "Apple/Farallon LocalTalk PC support"
+ depends on DEV_APPLETALK && (ISA || EISA)
+ help
+ This allows you to use the AppleTalk PC card to connect to LocalTalk
+ networks. The card is also known as the Farallon PhoneNet PC card.
+ If you are in doubt, this card is the one with the 65C02 chip on it.
+ You also need version 1.3.3 or later of the netatalk package.
+ This driver is experimental, which means that it may not work.
+ See the file <file:Documentation/networking/ltpc.txt>.
+
+config COPS
+ tristate "COPS LocalTalk PC support"
+ depends on DEV_APPLETALK && (ISA || EISA)
+ help
+ This allows you to use COPS AppleTalk cards to connect to LocalTalk
+ networks. You also need version 1.3.3 or later of the netatalk
+ package. This driver is experimental, which means that it may not
+ work. This driver will only work if you choose "AppleTalk DDP"
+ networking support, above.
+ Please read the file <file:Documentation/networking/cops.txt>.
+
+config COPS_DAYNA
+ bool "Dayna firmware support"
+ depends on COPS
+ help
+ Support COPS compatible cards with Dayna style firmware (Dayna
+ DL2000/ Daynatalk/PC (half length), COPS LT-95, Farallon PhoneNET PC
+ III, Farallon PhoneNET PC II).
+
+config COPS_TANGENT
+ bool "Tangent firmware support"
+ depends on COPS
+ help
+ Support COPS compatible cards with Tangent style firmware (Tangent
+ ATB_II, Novell NL-1000, Daystar Digital LT-200.
+
+config IPDDP
+ tristate "Appletalk-IP driver support"
+ depends on DEV_APPLETALK && ATALK
+ ---help---
+ This allows IP networking for users who only have AppleTalk
+ networking available. This feature is experimental. With this
+ driver, you can encapsulate IP inside AppleTalk (e.g. if your Linux
+ box is stuck on an AppleTalk only network) or decapsulate (e.g. if
+ you want your Linux box to act as an Internet gateway for a zoo of
+ AppleTalk connected Macs). Please see the file
+ <file:Documentation/networking/ipddp.txt> for more information.
+
+ If you say Y here, the AppleTalk-IP support will be compiled into
+ the kernel. In this case, you can either use encapsulation or
+ decapsulation, but not both. With the following two questions, you
+ decide which one you want.
+
+ To compile the AppleTalk-IP support as a module, choose M here: the
+ module will be called ipddp.
+ In this case, you will be able to use both encapsulation and
+ decapsulation simultaneously, by loading two copies of the module
+ and specifying different values for the module option ipddp_mode.
+
+config IPDDP_ENCAP
+ bool "IP to Appletalk-IP Encapsulation support"
+ depends on IPDDP
+ help
+ If you say Y here, the AppleTalk-IP code will be able to encapsulate
+ IP packets inside AppleTalk frames; this is useful if your Linux box
+ is stuck on an AppleTalk network (which hopefully contains a
+ decapsulator somewhere). Please see
+ <file:Documentation/networking/ipddp.txt> for more information. If
+ you said Y to "AppleTalk-IP driver support" above and you say Y
+ here, then you cannot say Y to "AppleTalk-IP to IP Decapsulation
+ support", below.
+
+config IPDDP_DECAP
+ bool "Appletalk-IP to IP Decapsulation support"
+ depends on IPDDP
+ help
+ If you say Y here, the AppleTalk-IP code will be able to decapsulate
+ AppleTalk-IP frames to IP packets; this is useful if you want your
+ Linux box to act as an Internet gateway for an AppleTalk network.
+ Please see <file:Documentation/networking/ipddp.txt> for more
+ information. If you said Y to "AppleTalk-IP driver support" above
+ and you say Y here, then you cannot say Y to "IP to AppleTalk-IP
+ Encapsulation support", above.
+
diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile
new file mode 100644
index 000000000000..6cfc705f7c5c
--- /dev/null
+++ b/drivers/net/appletalk/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for drivers/net/appletalk
+#
+
+obj-$(CONFIG_IPDDP) += ipddp.o
+obj-$(CONFIG_COPS) += cops.o
+obj-$(CONFIG_LTPC) += ltpc.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
new file mode 100644
index 000000000000..2161c2d585f0
--- /dev/null
+++ b/drivers/net/appletalk/cops.c
@@ -0,0 +1,1059 @@
+/* cops.c: LocalTalk driver for Linux.
+ *
+ * Authors:
+ * - Jay Schulist <jschlst@samba.org>
+ *
+ * With more than a little help from;
+ * - Alan Cox <Alan.Cox@linux.org>
+ *
+ * Derived from:
+ * - skeleton.c: A network driver outline for linux.
+ * Written 1993-94 by Donald Becker.
+ * - ltpc.c: A driver for the LocalTalk PC card.
+ * Written by Bradford W. Johnson.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * Changes:
+ * 19970608 Alan Cox Allowed dual card type support
+ * Can set board type in insmod
+ * Hooks for cops_setup routine
+ * (not yet implemented).
+ * 19971101 Jay Schulist Fixes for multiple lt* devices.
+ * 19980607 Steven Hirsch Fixed the badly broken support
+ * for Tangent type cards. Only
+ * tested on Daystar LT200. Some
+ * cleanup of formatting and program
+ * logic. Added emacs 'local-vars'
+ * setup for Jay's brace style.
+ * 20000211 Alan Cox Cleaned up for softnet
+ */
+
+static const char *version =
+"cops.c:v0.04 6/7/98 Jay Schulist <jschlst@samba.org>\n";
+/*
+ * Sources:
+ * COPS Localtalk SDK. This provides almost all of the information
+ * needed.
+ */
+
+/*
+ * insmod/modprobe configurable stuff.
+ * - IO Port, choose one your card supports or 0 if you dare.
+ * - IRQ, also choose one your card supports or nothing and let
+ * the driver figure it out.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_ltalk.h> /* For ltalk_setup() */
+#include <linux/delay.h> /* For udelay() */
+#include <linux/atalk.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "cops.h" /* Our Stuff */
+#include "cops_ltdrv.h" /* Firmware code for Tangent type cards. */
+#include "cops_ffdrv.h" /* Firmware code for Dayna type cards. */
+
+/*
+ * The name of the card. Is used for messages and in the requests for
+ * io regions, irqs and dma channels
+ */
+
+static const char *cardname = "cops";
+
+#ifdef CONFIG_COPS_DAYNA
+static int board_type = DAYNA; /* Module exported */
+#else
+static int board_type = TANGENT;
+#endif
+
+static int io = 0x240; /* Default IO for Dayna */
+static int irq = 5; /* Default IRQ */
+
+/*
+ * COPS Autoprobe information.
+ * Right now if port address is right but IRQ is not 5 this will
+ * return a 5 no matter what since we will still get a status response.
+ * Need one more additional check to narrow down after we have gotten
+ * the ioaddr. But since only other possible IRQs is 3 and 4 so no real
+ * hurry on this. I *STRONGLY* recommend using IRQ 5 for your card with
+ * this driver.
+ *
+ * This driver has 2 modes and they are: Dayna mode and Tangent mode.
+ * Each mode corresponds with the type of card. It has been found
+ * that there are 2 main types of cards and all other cards are
+ * the same and just have different names or only have minor differences
+ * such as more IO ports. As this driver is tested it will
+ * become more clear on exactly what cards are supported. The driver
+ * defaults to using Dayna mode. To change the drivers mode, simply
+ * select Dayna or Tangent mode when configuring the kernel.
+ *
+ * This driver should support:
+ * TANGENT driver mode:
+ * Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200,
+ * COPS LT-1
+ * DAYNA driver mode:
+ * Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95,
+ * Farallon PhoneNET PC III, Farallon PhoneNET PC II
+ * Other cards possibly supported mode unkown though:
+ * Dayna DL2000 (Full length), COPS LT/M (Micro-Channel)
+ *
+ * Cards NOT supported by this driver but supported by the ltpc.c
+ * driver written by Bradford W. Johnson <johns393@maroon.tc.umn.edu>
+ * Farallon PhoneNET PC
+ * Original Apple LocalTalk PC card
+ *
+ * N.B.
+ *
+ * The Daystar Digital LT200 boards do not support interrupt-driven
+ * IO. You must specify 'irq=0xff' as a module parameter to invoke
+ * polled mode. I also believe that the port probing logic is quite
+ * dangerous at best and certainly hopeless for a polled card. Best to
+ * specify both. - Steve H.
+ *
+ */
+
+/*
+ * Zero terminated list of IO ports to probe.
+ */
+
+static unsigned int ports[] = {
+ 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260,
+ 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360,
+ 0
+};
+
+/*
+ * Zero terminated list of IRQ ports to probe.
+ */
+
+static int cops_irqlist[] = {
+ 5, 4, 3, 0
+};
+
+static struct timer_list cops_timer;
+
+/* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */
+#ifndef COPS_DEBUG
+#define COPS_DEBUG 1
+#endif
+static unsigned int cops_debug = COPS_DEBUG;
+
+/* The number of low I/O ports used by the card. */
+#define COPS_IO_EXTENT 8
+
+/* Information that needs to be kept for each board. */
+
+struct cops_local
+{
+ struct net_device_stats stats;
+ int board; /* Holds what board type is. */
+ int nodeid; /* Set to 1 once have nodeid. */
+ unsigned char node_acquire; /* Node ID when acquired. */
+ struct atalk_addr node_addr; /* Full node address */
+ spinlock_t lock; /* RX/TX lock */
+};
+
+/* Index to functions, as function prototypes. */
+static int cops_probe1 (struct net_device *dev, int ioaddr);
+static int cops_irq (int ioaddr, int board);
+
+static int cops_open (struct net_device *dev);
+static int cops_jumpstart (struct net_device *dev);
+static void cops_reset (struct net_device *dev, int sleep);
+static void cops_load (struct net_device *dev);
+static int cops_nodeid (struct net_device *dev, int nodeid);
+
+static irqreturn_t cops_interrupt (int irq, void *dev_id, struct pt_regs *regs);
+static void cops_poll (unsigned long ltdev);
+static void cops_timeout(struct net_device *dev);
+static void cops_rx (struct net_device *dev);
+static int cops_send_packet (struct sk_buff *skb, struct net_device *dev);
+static void set_multicast_list (struct net_device *dev);
+static int cops_hard_header (struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len);
+
+static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+static int cops_close (struct net_device *dev);
+static struct net_device_stats *cops_get_stats (struct net_device *dev);
+
+static void cleanup_card(struct net_device *dev)
+{
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, COPS_IO_EXTENT);
+}
+
+/*
+ * Check for a network adaptor of this type, and return '0' iff one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr in [1..0x1ff], always return failure.
+ * otherwise go with what we pass in.
+ */
+struct net_device * __init cops_probe(int unit)
+{
+ struct net_device *dev;
+ unsigned *port;
+ int base_addr;
+ int err = 0;
+
+ dev = alloc_netdev(sizeof(struct cops_local), "lt%d", ltalk_setup);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "lt%d", unit);
+ netdev_boot_setup_check(dev);
+ irq = dev->irq;
+ base_addr = dev->base_addr;
+ } else {
+ base_addr = dev->base_addr = io;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (base_addr > 0x1ff) { /* Check a single specified location. */
+ err = cops_probe1(dev, base_addr);
+ } else if (base_addr != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ /* FIXME Does this really work for cards which generate irq?
+ * It's definitely N.G. for polled Tangent. sh
+ * Dayna cards don't autoprobe well at all, but if your card is
+ * at IRQ 5 & IO 0x240 we find it every time. ;) JS
+ */
+ for (port = ports; *port && cops_probe1(dev, *port) < 0; port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*
+ * This is the real probe routine. Linux has a history of friendly device
+ * probes on the ISA bus. A good device probes avoids doing writes, and
+ * verifies that the correct device exists and functions.
+ */
+static int __init cops_probe1(struct net_device *dev, int ioaddr)
+{
+ struct cops_local *lp;
+ static unsigned version_printed;
+ int board = board_type;
+ int retval;
+
+ if(cops_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ /* Grab the region so no one else tries to probe our ioports. */
+ if (!request_region(ioaddr, COPS_IO_EXTENT, dev->name))
+ return -EBUSY;
+
+ /*
+ * Since this board has jumpered interrupts, allocate the interrupt
+ * vector now. There is no point in waiting since no other device
+ * can use the interrupt, and this marks the irq as busy. Jumpered
+ * interrupts are typically not reported by the boards, and we must
+ * used AutoIRQ to find them.
+ */
+ dev->irq = irq;
+ switch (dev->irq)
+ {
+ case 0:
+ /* COPS AutoIRQ routine */
+ dev->irq = cops_irq(ioaddr, board);
+ if (dev->irq)
+ break;
+ /* No IRQ found on this port, fallthrough */
+ case 1:
+ retval = -EINVAL;
+ goto err_out;
+
+ /* Fixup for users that don't know that IRQ 2 is really
+ * IRQ 9, or don't know which one to set.
+ */
+ case 2:
+ dev->irq = 9;
+ break;
+
+ /* Polled operation requested. Although irq of zero passed as
+ * a parameter tells the init routines to probe, we'll
+ * overload it to denote polled operation at runtime.
+ */
+ case 0xff:
+ dev->irq = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Reserve any actual interrupt. */
+ if (dev->irq) {
+ retval = request_irq(dev->irq, &cops_interrupt, 0, dev->name, dev);
+ if (retval)
+ goto err_out;
+ }
+
+ dev->base_addr = ioaddr;
+
+ lp = netdev_priv(dev);
+ memset(lp, 0, sizeof(struct cops_local));
+ spin_lock_init(&lp->lock);
+
+ /* Copy local board variable to lp struct. */
+ lp->board = board;
+
+ dev->hard_start_xmit = cops_send_packet;
+ dev->tx_timeout = cops_timeout;
+ dev->watchdog_timeo = HZ * 2;
+ dev->hard_header = cops_hard_header;
+ dev->get_stats = cops_get_stats;
+ dev->open = cops_open;
+ dev->stop = cops_close;
+ dev->do_ioctl = cops_ioctl;
+ dev->set_multicast_list = set_multicast_list;
+ dev->mc_list = NULL;
+
+ /* Tell the user where the card is and what mode we're in. */
+ if(board==DAYNA)
+ printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n",
+ dev->name, cardname, ioaddr, dev->irq);
+ if(board==TANGENT) {
+ if(dev->irq)
+ printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n",
+ dev->name, cardname, ioaddr, dev->irq);
+ else
+ printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n",
+ dev->name, cardname, ioaddr);
+
+ }
+ return 0;
+
+err_out:
+ release_region(ioaddr, COPS_IO_EXTENT);
+ return retval;
+}
+
+static int __init cops_irq (int ioaddr, int board)
+{ /*
+ * This does not use the IRQ to determine where the IRQ is. We just
+ * assume that when we get a correct status response that it's the IRQ.
+ * This really just verifies the IO port but since we only have access
+ * to such a small number of IRQs (5, 4, 3) this is not bad.
+ * This will probably not work for more than one card.
+ */
+ int irqaddr=0;
+ int i, x, status;
+
+ if(board==DAYNA)
+ {
+ outb(0, ioaddr+DAYNA_RESET);
+ inb(ioaddr+DAYNA_RESET);
+ mdelay(333);
+ }
+ if(board==TANGENT)
+ {
+ inb(ioaddr);
+ outb(0, ioaddr);
+ outb(0, ioaddr+TANG_RESET);
+ }
+
+ for(i=0; cops_irqlist[i] !=0; i++)
+ {
+ irqaddr = cops_irqlist[i];
+ for(x = 0xFFFF; x>0; x --) /* wait for response */
+ {
+ if(board==DAYNA)
+ {
+ status = (inb(ioaddr+DAYNA_CARD_STATUS)&3);
+ if(status == 1)
+ return irqaddr;
+ }
+ if(board==TANGENT)
+ {
+ if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0)
+ return irqaddr;
+ }
+ }
+ }
+ return 0; /* no IRQ found */
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ */
+static int cops_open(struct net_device *dev)
+{
+ struct cops_local *lp = netdev_priv(dev);
+
+ if(dev->irq==0)
+ {
+ /*
+ * I don't know if the Dayna-style boards support polled
+ * operation. For now, only allow it for Tangent.
+ */
+ if(lp->board==TANGENT) /* Poll 20 times per second */
+ {
+ init_timer(&cops_timer);
+ cops_timer.function = cops_poll;
+ cops_timer.data = (unsigned long)dev;
+ cops_timer.expires = jiffies + HZ/20;
+ add_timer(&cops_timer);
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: No irq line set\n", dev->name);
+ return -EAGAIN;
+ }
+ }
+
+ cops_jumpstart(dev); /* Start the card up. */
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/*
+ * This allows for a dynamic start/restart of the entire card.
+ */
+static int cops_jumpstart(struct net_device *dev)
+{
+ struct cops_local *lp = netdev_priv(dev);
+
+ /*
+ * Once the card has the firmware loaded and has acquired
+ * the nodeid, if it is reset it will lose it all.
+ */
+ cops_reset(dev,1); /* Need to reset card before load firmware. */
+ cops_load(dev); /* Load the firmware. */
+
+ /*
+ * If atalkd already gave us a nodeid we will use that
+ * one again, else we wait for atalkd to give us a nodeid
+ * in cops_ioctl. This may cause a problem if someone steals
+ * our nodeid while we are resetting.
+ */
+ if(lp->nodeid == 1)
+ cops_nodeid(dev,lp->node_acquire);
+
+ return 0;
+}
+
+static void tangent_wait_reset(int ioaddr)
+{
+ int timeout=0;
+
+ while(timeout++ < 5 && (inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
+ mdelay(1); /* Wait 1 second */
+}
+
+/*
+ * Reset the LocalTalk board.
+ */
+static void cops_reset(struct net_device *dev, int sleep)
+{
+ struct cops_local *lp = netdev_priv(dev);
+ int ioaddr=dev->base_addr;
+
+ if(lp->board==TANGENT)
+ {
+ inb(ioaddr); /* Clear request latch. */
+ outb(0,ioaddr); /* Clear the TANG_TX_READY flop. */
+ outb(0, ioaddr+TANG_RESET); /* Reset the adapter. */
+
+ tangent_wait_reset(ioaddr);
+ outb(0, ioaddr+TANG_CLEAR_INT);
+ }
+ if(lp->board==DAYNA)
+ {
+ outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */
+ inb(ioaddr+DAYNA_RESET); /* Clear the reset */
+ if(sleep)
+ {
+ long snap=jiffies;
+
+ /* Let card finish initializing, about 1/3 second */
+ while(jiffies-snap<HZ/3)
+ schedule();
+ }
+ else
+ mdelay(333);
+ }
+ netif_wake_queue(dev);
+ return;
+}
+
+static void cops_load (struct net_device *dev)
+{
+ struct ifreq ifr;
+ struct ltfirmware *ltf= (struct ltfirmware *)&ifr.ifr_ifru;
+ struct cops_local *lp = netdev_priv(dev);
+ int ioaddr=dev->base_addr;
+ int length, i = 0;
+
+ strcpy(ifr.ifr_name,"lt0");
+
+ /* Get card's firmware code and do some checks on it. */
+#ifdef CONFIG_COPS_DAYNA
+ if(lp->board==DAYNA)
+ {
+ ltf->length=sizeof(ffdrv_code);
+ ltf->data=ffdrv_code;
+ }
+ else
+#endif
+#ifdef CONFIG_COPS_TANGENT
+ if(lp->board==TANGENT)
+ {
+ ltf->length=sizeof(ltdrv_code);
+ ltf->data=ltdrv_code;
+ }
+ else
+#endif
+ {
+ printk(KERN_INFO "%s; unsupported board type.\n", dev->name);
+ return;
+ }
+
+ /* Check to make sure firmware is correct length. */
+ if(lp->board==DAYNA && ltf->length!=5983)
+ {
+ printk(KERN_WARNING "%s: Firmware is not length of FFDRV.BIN.\n", dev->name);
+ return;
+ }
+ if(lp->board==TANGENT && ltf->length!=2501)
+ {
+ printk(KERN_WARNING "%s: Firmware is not length of DRVCODE.BIN.\n", dev->name);
+ return;
+ }
+
+ if(lp->board==DAYNA)
+ {
+ /*
+ * We must wait for a status response
+ * with the DAYNA board.
+ */
+ while(++i<65536)
+ {
+ if((inb(ioaddr+DAYNA_CARD_STATUS)&3)==1)
+ break;
+ }
+
+ if(i==65536)
+ return;
+ }
+
+ /*
+ * Upload the firmware and kick. Byte-by-byte works nicely here.
+ */
+ i=0;
+ length = ltf->length;
+ while(length--)
+ {
+ outb(ltf->data[i], ioaddr);
+ i++;
+ }
+
+ if(cops_debug > 1)
+ printk("%s: Uploaded firmware - %d bytes of %d bytes.\n",
+ dev->name, i, ltf->length);
+
+ if(lp->board==DAYNA) /* Tell Dayna to run the firmware code. */
+ outb(1, ioaddr+DAYNA_INT_CARD);
+ else /* Tell Tang to run the firmware code. */
+ inb(ioaddr);
+
+ if(lp->board==TANGENT)
+ {
+ tangent_wait_reset(ioaddr);
+ inb(ioaddr); /* Clear initial ready signal. */
+ }
+
+ return;
+}
+
+/*
+ * Get the LocalTalk Nodeid from the card. We can suggest
+ * any nodeid 1-254. The card will try and get that exact
+ * address else we can specify 0 as the nodeid and the card
+ * will autoprobe for a nodeid.
+ */
+static int cops_nodeid (struct net_device *dev, int nodeid)
+{
+ struct cops_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if(lp->board == DAYNA)
+ {
+ /* Empty any pending adapter responses. */
+ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
+ {
+ outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */
+ if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
+ cops_rx(dev); /* Kick any packets waiting. */
+ schedule();
+ }
+
+ outb(2, ioaddr); /* Output command packet length as 2. */
+ outb(0, ioaddr);
+ outb(LAP_INIT, ioaddr); /* Send LAP_INIT command byte. */
+ outb(nodeid, ioaddr); /* Suggest node address. */
+ }
+
+ if(lp->board == TANGENT)
+ {
+ /* Empty any pending adapter responses. */
+ while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
+ {
+ outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */
+ cops_rx(dev); /* Kick out packets waiting. */
+ schedule();
+ }
+
+ /* Not sure what Tangent does if nodeid picked is used. */
+ if(nodeid == 0) /* Seed. */
+ nodeid = jiffies&0xFF; /* Get a random try */
+ outb(2, ioaddr); /* Command length LSB */
+ outb(0, ioaddr); /* Command length MSB */
+ outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */
+ outb(nodeid, ioaddr); /* LAP address hint. */
+ outb(0xFF, ioaddr); /* Int. level to use */
+ }
+
+ lp->node_acquire=0; /* Set nodeid holder to 0. */
+ while(lp->node_acquire==0) /* Get *True* nodeid finally. */
+ {
+ outb(0, ioaddr+COPS_CLEAR_INT); /* Clear any interrupt. */
+
+ if(lp->board == DAYNA)
+ {
+ if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
+ cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
+ }
+ if(lp->board == TANGENT)
+ {
+ if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
+ cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
+ }
+ schedule();
+ }
+
+ if(cops_debug > 1)
+ printk(KERN_DEBUG "%s: Node ID %d has been acquired.\n",
+ dev->name, lp->node_acquire);
+
+ lp->nodeid=1; /* Set got nodeid to 1. */
+
+ return 0;
+}
+
+/*
+ * Poll the Tangent type cards to see if we have work.
+ */
+
+static void cops_poll(unsigned long ltdev)
+{
+ int ioaddr, status;
+ int boguscount = 0;
+
+ struct net_device *dev = (struct net_device *)ltdev;
+
+ del_timer(&cops_timer);
+
+ if(dev == NULL)
+ return; /* We've been downed */
+
+ ioaddr = dev->base_addr;
+ do {
+ status=inb(ioaddr+TANG_CARD_STATUS);
+ if(status & TANG_RX_READY)
+ cops_rx(dev);
+ if(status & TANG_TX_READY)
+ netif_wake_queue(dev);
+ status = inb(ioaddr+TANG_CARD_STATUS);
+ } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
+
+ /* poll 20 times per second */
+ cops_timer.expires = jiffies + HZ/20;
+ add_timer(&cops_timer);
+
+ return;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t cops_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct cops_local *lp;
+ int ioaddr, status;
+ int boguscount = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ if(lp->board==DAYNA)
+ {
+ do {
+ outb(0, ioaddr + COPS_CLEAR_INT);
+ status=inb(ioaddr+DAYNA_CARD_STATUS);
+ if((status&0x03)==DAYNA_RX_REQUEST)
+ cops_rx(dev);
+ netif_wake_queue(dev);
+ } while(++boguscount < 20);
+ }
+ else
+ {
+ do {
+ status=inb(ioaddr+TANG_CARD_STATUS);
+ if(status & TANG_RX_READY)
+ cops_rx(dev);
+ if(status & TANG_TX_READY)
+ netif_wake_queue(dev);
+ status=inb(ioaddr+TANG_CARD_STATUS);
+ } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * We have a good packet(s), get it/them out of the buffers.
+ */
+static void cops_rx(struct net_device *dev)
+{
+ int pkt_len = 0;
+ int rsp_type = 0;
+ struct sk_buff *skb = NULL;
+ struct cops_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int boguscount = 0;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ if(lp->board==DAYNA)
+ {
+ outb(0, ioaddr); /* Send out Zero length. */
+ outb(0, ioaddr);
+ outb(DATA_READ, ioaddr); /* Send read command out. */
+
+ /* Wait for DMA to turn around. */
+ while(++boguscount<1000000)
+ {
+ barrier();
+ if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY)
+ break;
+ }
+
+ if(boguscount==1000000)
+ {
+ printk(KERN_WARNING "%s: DMA timed out.\n",dev->name);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return;
+ }
+ }
+
+ /* Get response length. */
+ if(lp->board==DAYNA)
+ pkt_len = inb(ioaddr) & 0xFF;
+ else
+ pkt_len = inb(ioaddr) & 0x00FF;
+ pkt_len |= (inb(ioaddr) << 8);
+ /* Input IO code. */
+ rsp_type=inb(ioaddr);
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(pkt_len);
+ if(skb == NULL)
+ {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ while(pkt_len--) /* Discard packet */
+ inb(ioaddr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return;
+ }
+ skb->dev = dev;
+ skb_put(skb, pkt_len);
+ skb->protocol = htons(ETH_P_LOCALTALK);
+
+ insb(ioaddr, skb->data, pkt_len); /* Eat the Data */
+
+ if(lp->board==DAYNA)
+ outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */
+
+ spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
+
+ /* Check for bad response length */
+ if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE)
+ {
+ printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
+ dev->name, pkt_len);
+ lp->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* Set nodeid and then get out. */
+ if(rsp_type == LAP_INIT_RSP)
+ { /* Nodeid taken from received packet. */
+ lp->node_acquire = skb->data[0];
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* One last check to make sure we have a good packet. */
+ if(rsp_type != LAP_RESPONSE)
+ {
+ printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
+ lp->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb->mac.raw = skb->data; /* Point to entire packet. */
+ skb_pull(skb,3);
+ skb->h.raw = skb->data; /* Point to data (Skip header). */
+
+ /* Update the counters. */
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += skb->len;
+
+ /* Send packet to a higher place. */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+}
+
+static void cops_timeout(struct net_device *dev)
+{
+ struct cops_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ lp->stats.tx_errors++;
+ if(lp->board==TANGENT)
+ {
+ if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
+ printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name);
+ }
+ printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
+ cops_jumpstart(dev); /* Restart the card. */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * Make the card transmit a LocalTalk packet.
+ */
+
+static int cops_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cops_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ */
+
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if(lp->board == DAYNA) /* Wait for adapter transmit buffer. */
+ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
+ cpu_relax();
+ if(lp->board == TANGENT) /* Wait for adapter transmit buffer. */
+ while((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
+ cpu_relax();
+
+ /* Output IO length. */
+ outb(skb->len, ioaddr);
+ if(lp->board == DAYNA)
+ outb(skb->len >> 8, ioaddr);
+ else
+ outb((skb->len >> 8)&0x0FF, ioaddr);
+
+ /* Output IO code. */
+ outb(LAP_WRITE, ioaddr);
+
+ if(lp->board == DAYNA) /* Check the transmit buffer again. */
+ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0);
+
+ outsb(ioaddr, skb->data, skb->len); /* Send out the data. */
+
+ if(lp->board==DAYNA) /* Dayna requires you kick the card */
+ outb(1, ioaddr+DAYNA_INT_CARD);
+
+ spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
+
+ /* Done sending packet, update counters and cleanup. */
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb);
+ return 0;
+}
+
+/*
+ * Dummy function to keep the Appletalk layer happy.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ if(cops_debug >= 3)
+ printk("%s: set_multicast_list executed\n", dev->name);
+}
+
+/*
+ * Another Dummy function to keep the Appletalk layer happy.
+ */
+
+static int cops_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len)
+{
+ if(cops_debug >= 3)
+ printk("%s: cops_hard_header executed. Wow!\n", dev->name);
+ return 0;
+}
+
+/*
+ * System ioctls for the COPS LocalTalk card.
+ */
+
+static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cops_local *lp = netdev_priv(dev);
+ struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
+ struct atalk_addr *aa = (struct atalk_addr *)&lp->node_addr;
+
+ switch(cmd)
+ {
+ case SIOCSIFADDR:
+ /* Get and set the nodeid and network # atalkd wants. */
+ cops_nodeid(dev, sa->sat_addr.s_node);
+ aa->s_net = sa->sat_addr.s_net;
+ aa->s_node = lp->node_acquire;
+
+ /* Set broardcast address. */
+ dev->broadcast[0] = 0xFF;
+
+ /* Set hardware address. */
+ dev->dev_addr[0] = aa->s_node;
+ dev->addr_len = 1;
+ return 0;
+
+ case SIOCGIFADDR:
+ sa->sat_addr.s_net = aa->s_net;
+ sa->sat_addr.s_node = aa->s_node;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * The inverse routine to cops_open().
+ */
+
+static int cops_close(struct net_device *dev)
+{
+ struct cops_local *lp = netdev_priv(dev);
+
+ /* If we were running polled, yank the timer.
+ */
+ if(lp->board==TANGENT && dev->irq==0)
+ del_timer(&cops_timer);
+
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *cops_get_stats(struct net_device *dev)
+{
+ struct cops_local *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+#ifdef MODULE
+static struct net_device *cops_dev;
+
+MODULE_LICENSE("GPL");
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(board_type, int, 0);
+
+int init_module(void)
+{
+ if (io == 0)
+ printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
+ cardname);
+ cops_dev = cops_probe(-1);
+ if (IS_ERR(cops_dev))
+ return PTR_ERR(cops_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(cops_dev);
+ cleanup_card(cops_dev);
+ free_netdev(cops_dev);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -c cops.c"
+ * c-basic-offset: 4
+ * c-file-offsets: ((substatement-open . 0))
+ * End:
+ */
diff --git a/drivers/net/appletalk/cops.h b/drivers/net/appletalk/cops.h
new file mode 100644
index 000000000000..c68ba9c2ef46
--- /dev/null
+++ b/drivers/net/appletalk/cops.h
@@ -0,0 +1,60 @@
+/* cops.h: LocalTalk driver for Linux.
+ *
+ * Authors:
+ * - Jay Schulist <jschlst@samba.org>
+ */
+
+#ifndef __LINUX_COPSLTALK_H
+#define __LINUX_COPSLTALK_H
+
+#ifdef __KERNEL__
+
+/* Max LLAP size we will accept. */
+#define MAX_LLAP_SIZE 603
+
+/* Tangent */
+#define TANG_CARD_STATUS 1
+#define TANG_CLEAR_INT 1
+#define TANG_RESET 3
+
+#define TANG_TX_READY 1
+#define TANG_RX_READY 2
+
+/* Dayna */
+#define DAYNA_CMD_DATA 0
+#define DAYNA_CLEAR_INT 1
+#define DAYNA_CARD_STATUS 2
+#define DAYNA_INT_CARD 3
+#define DAYNA_RESET 4
+
+#define DAYNA_RX_READY 0
+#define DAYNA_TX_READY 1
+#define DAYNA_RX_REQUEST 3
+
+/* Same on both card types */
+#define COPS_CLEAR_INT 1
+
+/* LAP response codes received from the cards. */
+#define LAP_INIT 1 /* Init cmd */
+#define LAP_INIT_RSP 2 /* Init response */
+#define LAP_WRITE 3 /* Write cmd */
+#define DATA_READ 4 /* Data read */
+#define LAP_RESPONSE 4 /* Received ALAP frame response */
+#define LAP_GETSTAT 5 /* Get LAP and HW status */
+#define LAP_RSPSTAT 6 /* Status response */
+
+#endif
+
+/*
+ * Structure to hold the firmware information.
+ */
+struct ltfirmware
+{
+ unsigned int length;
+ unsigned char * data;
+};
+
+#define DAYNA 1
+#define TANGENT 2
+
+#endif
diff --git a/drivers/net/appletalk/cops_ffdrv.h b/drivers/net/appletalk/cops_ffdrv.h
new file mode 100644
index 000000000000..4131b4a7a65b
--- /dev/null
+++ b/drivers/net/appletalk/cops_ffdrv.h
@@ -0,0 +1,533 @@
+
+/*
+ * The firmware this driver downloads into the Localtalk card is a
+ * separate program and is not GPL'd source code, even though the Linux
+ * side driver and the routine that loads this data into the card are.
+ *
+ * It is taken from the COPS SDK and is under the following license
+ *
+ * This material is licensed to you strictly for use in conjunction with
+ * the use of COPS LocalTalk adapters.
+ * There is no charge for this SDK. And no waranty express or implied
+ * about its fitness for any purpose. However, we will cheerefully
+ * refund every penny you paid for this SDK...
+ * Regards,
+ *
+ * Thomas F. Divine
+ * Chief Scientist
+ */
+
+
+/* cops_ffdrv.h: LocalTalk driver firmware dump for Linux.
+ *
+ * Authors:
+ * - Jay Schulist <jschlst@samba.org>
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_COPS_DAYNA
+
+unsigned char ffdrv_code[] = {
+ 58,3,0,50,228,149,33,255,255,34,226,149,
+ 249,17,40,152,33,202,154,183,237,82,77,68,
+ 11,107,98,19,54,0,237,176,175,50,80,0,
+ 62,128,237,71,62,32,237,57,51,62,12,237,
+ 57,50,237,57,54,62,6,237,57,52,62,12,
+ 237,57,49,33,107,137,34,32,128,33,83,130,
+ 34,40,128,33,86,130,34,42,128,33,112,130,
+ 34,36,128,33,211,130,34,38,128,62,0,237,
+ 57,16,33,63,148,34,34,128,237,94,205,15,
+ 130,251,205,168,145,24,141,67,111,112,121,114,
+ 105,103,104,116,32,40,67,41,32,49,57,56,
+ 56,32,45,32,68,97,121,110,97,32,67,111,
+ 109,109,117,110,105,99,97,116,105,111,110,115,
+ 32,32,32,65,108,108,32,114,105,103,104,116,
+ 115,32,114,101,115,101,114,118,101,100,46,32,
+ 32,40,68,40,68,7,16,8,34,7,22,6,
+ 16,5,12,4,8,3,6,140,0,16,39,128,
+ 0,4,96,10,224,6,0,7,126,2,64,11,
+ 118,12,6,13,0,14,193,15,0,5,96,3,
+ 192,1,64,9,8,62,9,211,66,62,192,211,
+ 66,62,100,61,32,253,6,28,33,205,129,14,
+ 66,237,163,194,253,129,6,28,33,205,129,14,
+ 64,237,163,194,9,130,201,62,47,50,71,152,
+ 62,47,211,68,58,203,129,237,57,20,58,204,
+ 129,237,57,21,33,77,152,54,132,205,233,129,
+ 58,228,149,254,209,40,6,56,4,62,0,24,
+ 2,219,96,33,233,149,119,230,62,33,232,149,
+ 119,213,33,8,152,17,7,0,25,119,19,25,
+ 119,209,201,251,237,77,245,197,213,229,221,229,
+ 205,233,129,62,1,50,106,137,205,158,139,221,
+ 225,225,209,193,241,251,237,77,245,197,213,219,
+ 72,237,56,16,230,46,237,57,16,237,56,12,
+ 58,72,152,183,32,26,6,20,17,128,2,237,
+ 56,46,187,32,35,237,56,47,186,32,29,219,
+ 72,230,1,32,3,5,32,232,175,50,72,152,
+ 229,221,229,62,1,50,106,137,205,158,139,221,
+ 225,225,24,25,62,1,50,72,152,58,201,129,
+ 237,57,12,58,202,129,237,57,13,237,56,16,
+ 246,17,237,57,16,209,193,241,251,237,77,245,
+ 197,229,213,221,229,237,56,16,230,17,237,57,
+ 16,237,56,20,58,34,152,246,16,246,8,211,
+ 68,62,6,61,32,253,58,34,152,246,8,211,
+ 68,58,203,129,237,57,20,58,204,129,237,57,
+ 21,237,56,16,246,34,237,57,16,221,225,209,
+ 225,193,241,251,237,77,33,2,0,57,126,230,
+ 3,237,100,1,40,2,246,128,230,130,245,62,
+ 5,211,64,241,211,64,201,229,213,243,237,56,
+ 16,230,46,237,57,16,237,56,12,251,70,35,
+ 35,126,254,175,202,77,133,254,129,202,15,133,
+ 230,128,194,191,132,43,58,44,152,119,33,76,
+ 152,119,35,62,132,119,120,254,255,40,4,58,
+ 49,152,119,219,72,43,43,112,17,3,0,237,
+ 56,52,230,248,237,57,52,219,72,230,1,194,
+ 141,131,209,225,237,56,52,246,6,237,57,52,
+ 62,1,55,251,201,62,3,211,66,62,192,211,
+ 66,62,48,211,66,0,0,219,66,230,1,40,
+ 4,219,67,24,240,205,203,135,58,75,152,254,
+ 255,202,128,132,58,49,152,254,161,250,207,131,
+ 58,34,152,211,68,62,10,211,66,62,128,211,
+ 66,62,11,211,66,62,6,211,66,24,0,62,
+ 14,211,66,62,33,211,66,62,1,211,66,62,
+ 64,211,66,62,3,211,66,62,209,211,66,62,
+ 100,71,219,66,230,1,32,6,5,32,247,195,
+ 248,132,219,67,71,58,44,152,184,194,248,132,
+ 62,100,71,219,66,230,1,32,6,5,32,247,
+ 195,248,132,219,67,62,100,71,219,66,230,1,
+ 32,6,5,32,247,195,248,132,219,67,254,133,
+ 32,7,62,0,50,74,152,24,17,254,173,32,
+ 7,62,1,50,74,152,24,6,254,141,194,248,
+ 132,71,209,225,58,49,152,254,132,32,10,62,
+ 50,205,2,134,205,144,135,24,27,254,140,32,
+ 15,62,110,205,2,134,62,141,184,32,5,205,
+ 144,135,24,8,62,10,205,2,134,205,8,134,
+ 62,1,50,106,137,205,158,139,237,56,52,246,
+ 6,237,57,52,175,183,251,201,62,20,135,237,
+ 57,20,175,237,57,21,237,56,16,246,2,237,
+ 57,16,237,56,20,95,237,56,21,123,254,10,
+ 48,244,237,56,16,230,17,237,57,16,209,225,
+ 205,144,135,62,1,50,106,137,205,158,139,237,
+ 56,52,246,6,237,57,52,175,183,251,201,209,
+ 225,243,219,72,230,1,40,13,62,10,211,66,
+ 0,0,219,66,230,192,202,226,132,237,56,52,
+ 246,6,237,57,52,62,1,55,251,201,205,203,
+ 135,62,1,50,106,137,205,158,139,237,56,52,
+ 246,6,237,57,52,183,251,201,209,225,62,1,
+ 50,106,137,205,158,139,237,56,52,246,6,237,
+ 57,52,62,2,55,251,201,209,225,243,219,72,
+ 230,1,202,213,132,62,10,211,66,0,0,219,
+ 66,230,192,194,213,132,229,62,1,50,106,137,
+ 42,40,152,205,65,143,225,17,3,0,205,111,
+ 136,62,6,211,66,58,44,152,211,66,237,56,
+ 52,246,6,237,57,52,183,251,201,209,197,237,
+ 56,52,230,248,237,57,52,219,72,230,1,32,
+ 15,193,225,237,56,52,246,6,237,57,52,62,
+ 1,55,251,201,14,23,58,37,152,254,0,40,
+ 14,14,2,254,1,32,5,62,140,119,24,3,
+ 62,132,119,43,43,197,205,203,135,193,62,1,
+ 211,66,62,64,211,66,62,3,211,66,62,193,
+ 211,66,62,100,203,39,71,219,66,230,1,32,
+ 6,5,32,247,195,229,133,33,238,151,219,67,
+ 71,58,44,152,184,194,229,133,119,62,100,71,
+ 219,66,230,1,32,6,5,32,247,195,229,133,
+ 219,67,35,119,13,32,234,193,225,62,1,50,
+ 106,137,205,158,139,237,56,52,246,6,237,57,
+ 52,175,183,251,201,33,234,151,35,35,62,255,
+ 119,193,225,62,1,50,106,137,205,158,139,237,
+ 56,52,246,6,237,57,52,175,251,201,243,61,
+ 32,253,251,201,62,3,211,66,62,192,211,66,
+ 58,49,152,254,140,32,19,197,229,213,17,181,
+ 129,33,185,129,1,2,0,237,176,209,225,193,
+ 24,27,229,213,33,187,129,58,49,152,230,15,
+ 87,30,2,237,92,25,17,181,129,126,18,19,
+ 35,126,18,209,225,58,34,152,246,8,211,68,
+ 58,49,152,254,165,40,14,254,164,40,10,62,
+ 10,211,66,62,224,211,66,24,25,58,74,152,
+ 254,0,40,10,62,10,211,66,62,160,211,66,
+ 24,8,62,10,211,66,62,128,211,66,62,11,
+ 211,66,62,6,211,66,205,147,143,62,5,211,
+ 66,62,224,211,66,62,5,211,66,62,96,211,
+ 66,62,5,61,32,253,62,5,211,66,62,224,
+ 211,66,62,14,61,32,253,62,5,211,66,62,
+ 233,211,66,62,128,211,66,58,181,129,61,32,
+ 253,62,1,211,66,62,192,211,66,1,254,19,
+ 237,56,46,187,32,6,13,32,247,195,226,134,
+ 62,192,211,66,0,0,219,66,203,119,40,250,
+ 219,66,203,87,40,250,243,237,56,16,230,17,
+ 237,57,16,237,56,20,251,62,5,211,66,62,
+ 224,211,66,58,182,129,61,32,253,229,33,181,
+ 129,58,183,129,203,63,119,35,58,184,129,119,
+ 225,62,10,211,66,62,224,211,66,62,11,211,
+ 66,62,118,211,66,62,47,211,68,62,5,211,
+ 66,62,233,211,66,58,181,129,61,32,253,62,
+ 5,211,66,62,224,211,66,58,182,129,61,32,
+ 253,62,5,211,66,62,96,211,66,201,229,213,
+ 58,50,152,230,15,87,30,2,237,92,33,187,
+ 129,25,17,181,129,126,18,35,19,126,18,209,
+ 225,58,71,152,246,8,211,68,58,50,152,254,
+ 165,40,14,254,164,40,10,62,10,211,66,62,
+ 224,211,66,24,8,62,10,211,66,62,128,211,
+ 66,62,11,211,66,62,6,211,66,195,248,135,
+ 62,3,211,66,62,192,211,66,197,229,213,17,
+ 181,129,33,183,129,1,2,0,237,176,209,225,
+ 193,62,47,211,68,62,10,211,66,62,224,211,
+ 66,62,11,211,66,62,118,211,66,62,1,211,
+ 66,62,0,211,66,205,147,143,195,16,136,62,
+ 3,211,66,62,192,211,66,197,229,213,17,181,
+ 129,33,183,129,1,2,0,237,176,209,225,193,
+ 62,47,211,68,62,10,211,66,62,224,211,66,
+ 62,11,211,66,62,118,211,66,205,147,143,62,
+ 5,211,66,62,224,211,66,62,5,211,66,62,
+ 96,211,66,62,5,61,32,253,62,5,211,66,
+ 62,224,211,66,62,14,61,32,253,62,5,211,
+ 66,62,233,211,66,62,128,211,66,58,181,129,
+ 61,32,253,62,1,211,66,62,192,211,66,1,
+ 254,19,237,56,46,187,32,6,13,32,247,195,
+ 88,136,62,192,211,66,0,0,219,66,203,119,
+ 40,250,219,66,203,87,40,250,62,5,211,66,
+ 62,224,211,66,58,182,129,61,32,253,62,5,
+ 211,66,62,96,211,66,201,197,14,67,6,0,
+ 62,3,211,66,62,192,211,66,62,48,211,66,
+ 0,0,219,66,230,1,40,4,219,67,24,240,
+ 62,5,211,66,62,233,211,66,62,128,211,66,
+ 58,181,129,61,32,253,237,163,29,62,192,211,
+ 66,219,66,230,4,40,250,237,163,29,32,245,
+ 219,66,230,4,40,250,62,255,71,219,66,230,
+ 4,40,3,5,32,247,219,66,230,4,40,250,
+ 62,5,211,66,62,224,211,66,58,182,129,61,
+ 32,253,62,5,211,66,62,96,211,66,58,71,
+ 152,254,1,202,18,137,62,16,211,66,62,56,
+ 211,66,62,14,211,66,62,33,211,66,62,1,
+ 211,66,62,248,211,66,237,56,48,246,153,230,
+ 207,237,57,48,62,3,211,66,62,221,211,66,
+ 193,201,58,71,152,211,68,62,10,211,66,62,
+ 128,211,66,62,11,211,66,62,6,211,66,62,
+ 6,211,66,58,44,152,211,66,62,16,211,66,
+ 62,56,211,66,62,48,211,66,0,0,62,14,
+ 211,66,62,33,211,66,62,1,211,66,62,248,
+ 211,66,237,56,48,246,145,246,8,230,207,237,
+ 57,48,62,3,211,66,62,221,211,66,193,201,
+ 44,3,1,0,70,69,1,245,197,213,229,175,
+ 50,72,152,237,56,16,230,46,237,57,16,237,
+ 56,12,62,1,211,66,0,0,219,66,95,230,
+ 160,32,3,195,20,139,123,230,96,194,72,139,
+ 62,48,211,66,62,1,211,66,62,64,211,66,
+ 237,91,40,152,205,207,143,25,43,55,237,82,
+ 218,70,139,34,42,152,98,107,58,44,152,190,
+ 194,210,138,35,35,62,130,190,194,200,137,62,
+ 1,50,48,152,62,175,190,202,82,139,62,132,
+ 190,32,44,50,50,152,62,47,50,71,152,229,
+ 175,50,106,137,42,40,152,205,65,143,225,54,
+ 133,43,70,58,44,152,119,43,112,17,3,0,
+ 62,10,205,2,134,205,111,136,195,158,138,62,
+ 140,190,32,19,50,50,152,58,233,149,230,4,
+ 202,222,138,62,1,50,71,152,195,219,137,126,
+ 254,160,250,185,138,254,166,242,185,138,50,50,
+ 152,43,126,35,229,213,33,234,149,95,22,0,
+ 25,126,254,132,40,18,254,140,40,14,58,50,
+ 152,230,15,87,126,31,21,242,65,138,56,2,
+ 175,119,58,50,152,230,15,87,58,233,149,230,
+ 62,31,21,242,85,138,218,98,138,209,225,195,
+ 20,139,58,50,152,33,100,137,230,15,95,22,
+ 0,25,126,50,71,152,209,225,58,50,152,254,
+ 164,250,135,138,58,73,152,254,0,40,4,54,
+ 173,24,2,54,133,43,70,58,44,152,119,43,
+ 112,17,3,0,205,70,135,175,50,106,137,205,
+ 208,139,58,199,129,237,57,12,58,200,129,237,
+ 57,13,237,56,16,246,17,237,57,16,225,209,
+ 193,241,251,237,77,62,129,190,194,227,138,54,
+ 130,43,70,58,44,152,119,43,112,17,3,0,
+ 205,144,135,195,20,139,35,35,126,254,132,194,
+ 227,138,175,50,106,137,205,158,139,24,42,58,
+ 201,154,254,1,40,7,62,1,50,106,137,24,
+ 237,58,106,137,254,1,202,222,138,62,128,166,
+ 194,222,138,221,229,221,33,67,152,205,127,142,
+ 205,109,144,221,225,225,209,193,241,251,237,77,
+ 58,106,137,254,1,202,44,139,58,50,152,254,
+ 164,250,44,139,58,73,152,238,1,50,73,152,
+ 221,229,221,33,51,152,205,127,142,221,225,62,
+ 1,50,106,137,205,158,139,195,13,139,24,208,
+ 24,206,24,204,230,64,40,3,195,20,139,195,
+ 20,139,43,126,33,8,152,119,35,58,44,152,
+ 119,43,237,91,35,152,205,203,135,205,158,139,
+ 195,13,139,175,50,78,152,62,3,211,66,62,
+ 192,211,66,201,197,33,4,0,57,126,35,102,
+ 111,62,1,50,106,137,219,72,205,141,139,193,
+ 201,62,1,50,78,152,34,40,152,54,0,35,
+ 35,54,0,195,163,139,58,78,152,183,200,229,
+ 33,181,129,58,183,129,119,35,58,184,129,119,
+ 225,62,47,211,68,62,14,211,66,62,193,211,
+ 66,62,10,211,66,62,224,211,66,62,11,211,
+ 66,62,118,211,66,195,3,140,58,78,152,183,
+ 200,58,71,152,211,68,254,69,40,4,254,70,
+ 32,17,58,73,152,254,0,40,10,62,10,211,
+ 66,62,160,211,66,24,8,62,10,211,66,62,
+ 128,211,66,62,11,211,66,62,6,211,66,62,
+ 6,211,66,58,44,152,211,66,62,16,211,66,
+ 62,56,211,66,62,48,211,66,0,0,219,66,
+ 230,1,40,4,219,67,24,240,62,14,211,66,
+ 62,33,211,66,42,40,152,205,65,143,62,1,
+ 211,66,62,248,211,66,237,56,48,246,145,246,
+ 8,230,207,237,57,48,62,3,211,66,62,221,
+ 211,66,201,62,16,211,66,62,56,211,66,62,
+ 48,211,66,0,0,219,66,230,1,40,4,219,
+ 67,24,240,62,14,211,66,62,33,211,66,62,
+ 1,211,66,62,248,211,66,237,56,48,246,153,
+ 230,207,237,57,48,62,3,211,66,62,221,211,
+ 66,201,229,213,33,234,149,95,22,0,25,126,
+ 254,132,40,4,254,140,32,2,175,119,123,209,
+ 225,201,6,8,14,0,31,48,1,12,16,250,
+ 121,201,33,4,0,57,94,35,86,33,2,0,
+ 57,126,35,102,111,221,229,34,89,152,237,83,
+ 91,152,221,33,63,152,205,127,142,58,81,152,
+ 50,82,152,58,80,152,135,50,80,152,205,162,
+ 140,254,3,56,16,58,81,152,135,60,230,15,
+ 50,81,152,175,50,80,152,24,23,58,79,152,
+ 205,162,140,254,3,48,13,58,81,152,203,63,
+ 50,81,152,62,255,50,79,152,58,81,152,50,
+ 82,152,58,79,152,135,50,79,152,62,32,50,
+ 83,152,50,84,152,237,56,16,230,17,237,57,
+ 16,219,72,62,192,50,93,152,62,93,50,94,
+ 152,58,93,152,61,50,93,152,32,9,58,94,
+ 152,61,50,94,152,40,44,62,170,237,57,20,
+ 175,237,57,21,237,56,16,246,2,237,57,16,
+ 219,72,230,1,202,29,141,237,56,20,71,237,
+ 56,21,120,254,10,48,237,237,56,16,230,17,
+ 237,57,16,243,62,14,211,66,62,65,211,66,
+ 251,58,39,152,23,23,60,50,39,152,71,58,
+ 82,152,160,230,15,40,22,71,14,10,219,66,
+ 230,16,202,186,141,219,72,230,1,202,186,141,
+ 13,32,239,16,235,42,89,152,237,91,91,152,
+ 205,47,131,48,7,61,202,186,141,195,227,141,
+ 221,225,33,0,0,201,221,33,55,152,205,127,
+ 142,58,84,152,61,50,84,152,40,19,58,82,
+ 152,246,1,50,82,152,58,79,152,246,1,50,
+ 79,152,195,29,141,221,225,33,1,0,201,221,
+ 33,59,152,205,127,142,58,80,152,246,1,50,
+ 80,152,58,82,152,135,246,1,50,82,152,58,
+ 83,152,61,50,83,152,194,29,141,221,225,33,
+ 2,0,201,221,229,33,0,0,57,17,4,0,
+ 25,126,50,44,152,230,128,50,85,152,58,85,
+ 152,183,40,6,221,33,88,2,24,4,221,33,
+ 150,0,58,44,152,183,40,53,60,40,50,60,
+ 40,47,61,61,33,86,152,119,35,119,35,54,
+ 129,175,50,48,152,221,43,221,229,225,124,181,
+ 40,42,33,86,152,17,3,0,205,189,140,17,
+ 232,3,27,123,178,32,251,58,48,152,183,40,
+ 224,58,44,152,71,62,7,128,230,127,71,58,
+ 85,152,176,50,44,152,24,162,221,225,201,183,
+ 221,52,0,192,221,52,1,192,221,52,2,192,
+ 221,52,3,192,55,201,245,62,1,211,100,241,
+ 201,245,62,1,211,96,241,201,33,2,0,57,
+ 126,35,102,111,237,56,48,230,175,237,57,48,
+ 62,48,237,57,49,125,237,57,32,124,237,57,
+ 33,62,0,237,57,34,62,88,237,57,35,62,
+ 0,237,57,36,237,57,37,33,128,2,125,237,
+ 57,38,124,237,57,39,237,56,48,246,97,230,
+ 207,237,57,48,62,0,237,57,0,62,0,211,
+ 96,211,100,201,33,2,0,57,126,35,102,111,
+ 237,56,48,230,175,237,57,48,62,12,237,57,
+ 49,62,76,237,57,32,62,0,237,57,33,237,
+ 57,34,125,237,57,35,124,237,57,36,62,0,
+ 237,57,37,33,128,2,125,237,57,38,124,237,
+ 57,39,237,56,48,246,97,230,207,237,57,48,
+ 62,1,211,96,201,33,2,0,57,126,35,102,
+ 111,229,237,56,48,230,87,237,57,48,125,237,
+ 57,40,124,237,57,41,62,0,237,57,42,62,
+ 67,237,57,43,62,0,237,57,44,58,106,137,
+ 254,1,32,5,33,6,0,24,3,33,128,2,
+ 125,237,57,46,124,237,57,47,237,56,50,230,
+ 252,246,2,237,57,50,225,201,33,4,0,57,
+ 94,35,86,33,2,0,57,126,35,102,111,237,
+ 56,48,230,87,237,57,48,125,237,57,40,124,
+ 237,57,41,62,0,237,57,42,62,67,237,57,
+ 43,62,0,237,57,44,123,237,57,46,122,237,
+ 57,47,237,56,50,230,244,246,0,237,57,50,
+ 237,56,48,246,145,230,207,237,57,48,201,213,
+ 237,56,46,95,237,56,47,87,237,56,46,111,
+ 237,56,47,103,183,237,82,32,235,33,128,2,
+ 183,237,82,209,201,213,237,56,38,95,237,56,
+ 39,87,237,56,38,111,237,56,39,103,183,237,
+ 82,32,235,33,128,2,183,237,82,209,201,245,
+ 197,1,52,0,237,120,230,253,237,121,193,241,
+ 201,245,197,1,52,0,237,120,246,2,237,121,
+ 193,241,201,33,2,0,57,126,35,102,111,126,
+ 35,110,103,201,33,0,0,34,102,152,34,96,
+ 152,34,98,152,33,202,154,34,104,152,237,91,
+ 104,152,42,226,149,183,237,82,17,0,255,25,
+ 34,100,152,203,124,40,6,33,0,125,34,100,
+ 152,42,104,152,35,35,35,229,205,120,139,193,
+ 201,205,186,149,229,42,40,152,35,35,35,229,
+ 205,39,144,193,124,230,3,103,221,117,254,221,
+ 116,255,237,91,42,152,35,35,35,183,237,82,
+ 32,12,17,5,0,42,42,152,205,171,149,242,
+ 169,144,42,40,152,229,205,120,139,193,195,198,
+ 149,237,91,42,152,42,98,152,25,34,98,152,
+ 19,19,19,42,102,152,25,34,102,152,237,91,
+ 100,152,33,158,253,25,237,91,102,152,205,171,
+ 149,242,214,144,33,0,0,34,102,152,62,1,
+ 50,95,152,205,225,144,195,198,149,58,95,152,
+ 183,200,237,91,96,152,42,102,152,205,171,149,
+ 242,5,145,237,91,102,152,33,98,2,25,237,
+ 91,96,152,205,171,149,250,37,145,237,91,96,
+ 152,42,102,152,183,237,82,32,7,42,98,152,
+ 125,180,40,13,237,91,102,152,42,96,152,205,
+ 171,149,242,58,145,237,91,104,152,42,102,152,
+ 25,35,35,35,229,205,120,139,193,175,50,95,
+ 152,201,195,107,139,205,206,149,250,255,243,205,
+ 225,144,251,58,230,149,183,194,198,149,17,1,
+ 0,42,98,152,205,171,149,250,198,149,62,1,
+ 50,230,149,237,91,96,152,42,104,152,25,221,
+ 117,252,221,116,253,237,91,104,152,42,96,152,
+ 25,35,35,35,221,117,254,221,116,255,35,35,
+ 35,229,205,39,144,124,230,3,103,35,35,35,
+ 221,117,250,221,116,251,235,221,110,252,221,102,
+ 253,115,35,114,35,54,4,62,1,211,100,211,
+ 84,195,198,149,33,0,0,34,102,152,34,96,
+ 152,34,98,152,33,202,154,34,104,152,237,91,
+ 104,152,42,226,149,183,237,82,17,0,255,25,
+ 34,100,152,33,109,152,54,0,33,107,152,229,
+ 205,240,142,193,62,47,50,34,152,62,132,50,
+ 49,152,205,241,145,205,61,145,58,39,152,60,
+ 50,39,152,24,241,205,206,149,251,255,33,109,
+ 152,126,183,202,198,149,110,221,117,251,33,109,
+ 152,54,0,221,126,251,254,1,40,28,254,3,
+ 40,101,254,4,202,190,147,254,5,202,147,147,
+ 254,8,40,87,33,107,152,229,205,240,142,195,
+ 198,149,58,201,154,183,32,21,33,111,152,126,
+ 50,229,149,205,52,144,33,110,152,110,38,0,
+ 229,205,11,142,193,237,91,96,152,42,104,152,
+ 25,221,117,254,221,116,255,35,35,54,2,17,
+ 2,0,43,43,115,35,114,58,44,152,35,35,
+ 119,58,228,149,35,119,62,1,211,100,211,84,
+ 62,1,50,201,154,24,169,205,153,142,58,231,
+ 149,183,40,250,175,50,231,149,33,110,152,126,
+ 254,255,40,91,58,233,149,230,63,183,40,83,
+ 94,22,0,33,234,149,25,126,183,40,13,33,
+ 110,152,94,33,234,150,25,126,254,3,32,36,
+ 205,81,148,125,180,33,110,152,94,22,0,40,
+ 17,33,234,149,25,54,0,33,107,152,229,205,
+ 240,142,193,195,198,149,33,234,150,25,54,0,
+ 33,110,152,94,22,0,33,234,149,25,126,50,
+ 49,152,254,132,32,37,62,47,50,34,152,42,
+ 107,152,229,33,110,152,229,205,174,140,193,193,
+ 125,180,33,110,152,94,22,0,33,234,150,202,
+ 117,147,25,52,195,120,147,58,49,152,254,140,
+ 32,7,62,1,50,34,152,24,210,62,32,50,
+ 106,152,24,19,58,49,152,95,58,106,152,163,
+ 183,58,106,152,32,11,203,63,50,106,152,58,
+ 106,152,183,32,231,254,2,40,51,254,4,40,
+ 38,254,8,40,26,254,16,40,13,254,32,32,
+ 158,62,165,50,49,152,62,69,24,190,62,164,
+ 50,49,152,62,70,24,181,62,163,50,49,152,
+ 175,24,173,62,162,50,49,152,62,1,24,164,
+ 62,161,50,49,152,62,3,24,155,25,54,0,
+ 221,126,251,254,8,40,7,58,230,149,183,202,
+ 32,146,33,107,152,229,205,240,142,193,211,84,
+ 195,198,149,237,91,96,152,42,104,152,25,221,
+ 117,254,221,116,255,35,35,54,6,17,2,0,
+ 43,43,115,35,114,58,228,149,35,35,119,58,
+ 233,149,35,119,205,146,142,195,32,146,237,91,
+ 96,152,42,104,152,25,229,205,160,142,193,58,
+ 231,149,183,40,250,175,50,231,149,243,237,91,
+ 96,152,42,104,152,25,221,117,254,221,116,255,
+ 78,35,70,221,113,252,221,112,253,89,80,42,
+ 98,152,183,237,82,34,98,152,203,124,40,19,
+ 33,0,0,34,98,152,34,102,152,34,96,152,
+ 62,1,50,95,152,24,40,221,94,252,221,86,
+ 253,19,19,19,42,96,152,25,34,96,152,237,
+ 91,100,152,33,158,253,25,237,91,96,152,205,
+ 171,149,242,55,148,33,0,0,34,96,152,175,
+ 50,230,149,251,195,32,146,245,62,1,50,231,
+ 149,62,16,237,57,0,211,80,241,251,237,77,
+ 201,205,186,149,229,229,33,0,0,34,37,152,
+ 33,110,152,126,50,234,151,58,44,152,33,235,
+ 151,119,221,54,253,0,221,54,254,0,195,230,
+ 148,33,236,151,54,175,33,3,0,229,33,234,
+ 151,229,205,174,140,193,193,33,236,151,126,254,
+ 255,40,74,33,245,151,110,221,117,255,33,249,
+ 151,126,221,166,255,221,119,255,33,253,151,126,
+ 221,166,255,221,119,255,58,232,149,95,221,126,
+ 255,163,221,119,255,183,40,15,230,191,33,110,
+ 152,94,22,0,33,234,149,25,119,24,12,33,
+ 110,152,94,22,0,33,234,149,25,54,132,33,
+ 0,0,195,198,149,221,110,253,221,102,254,35,
+ 221,117,253,221,116,254,17,32,0,221,110,253,
+ 221,102,254,205,171,149,250,117,148,58,233,149,
+ 203,87,40,84,33,1,0,34,37,152,221,54,
+ 253,0,221,54,254,0,24,53,33,236,151,54,
+ 175,33,3,0,229,33,234,151,229,205,174,140,
+ 193,193,33,236,151,126,254,255,40,14,33,110,
+ 152,94,22,0,33,234,149,25,54,140,24,159,
+ 221,110,253,221,102,254,35,221,117,253,221,116,
+ 254,17,32,0,221,110,253,221,102,254,205,171,
+ 149,250,12,149,33,2,0,34,37,152,221,54,
+ 253,0,221,54,254,0,24,54,33,236,151,54,
+ 175,33,3,0,229,33,234,151,229,205,174,140,
+ 193,193,33,236,151,126,254,255,40,15,33,110,
+ 152,94,22,0,33,234,149,25,54,132,195,211,
+ 148,221,110,253,221,102,254,35,221,117,253,221,
+ 116,254,17,32,0,221,110,253,221,102,254,205,
+ 171,149,250,96,149,33,1,0,195,198,149,124,
+ 170,250,179,149,237,82,201,124,230,128,237,82,
+ 60,201,225,253,229,221,229,221,33,0,0,221,
+ 57,233,221,249,221,225,253,225,201,233,225,253,
+ 229,221,229,221,33,0,0,221,57,94,35,86,
+ 35,235,57,249,235,233,0,0,0,0,0,0,
+ 62,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 175,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,133,1,0,0,0,63,
+ 255,255,255,255,0,0,0,63,0,0,0,0,
+ 0,0,0,0,0,0,0,24,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0
+ } ;
+
+#endif
diff --git a/drivers/net/appletalk/cops_ltdrv.h b/drivers/net/appletalk/cops_ltdrv.h
new file mode 100644
index 000000000000..05de66dd9206
--- /dev/null
+++ b/drivers/net/appletalk/cops_ltdrv.h
@@ -0,0 +1,242 @@
+/*
+ * The firmware this driver downloads into the Localtalk card is a
+ * separate program and is not GPL'd source code, even though the Linux
+ * side driver and the routine that loads this data into the card are.
+ *
+ * It is taken from the COPS SDK and is under the following license
+ *
+ * This material is licensed to you strictly for use in conjunction with
+ * the use of COPS LocalTalk adapters.
+ * There is no charge for this SDK. And no waranty express or implied
+ * about its fitness for any purpose. However, we will cheerefully
+ * refund every penny you paid for this SDK...
+ * Regards,
+ *
+ * Thomas F. Divine
+ * Chief Scientist
+ */
+
+
+/* cops_ltdrv.h: LocalTalk driver firmware dump for Linux.
+ *
+ * Authors:
+ * - Jay Schulist <jschlst@samba.org>
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_COPS_TANGENT
+
+unsigned char ltdrv_code[] = {
+ 58,3,0,50,148,10,33,143,15,62,85,119,
+ 190,32,9,62,170,119,190,32,3,35,24,241,
+ 34,146,10,249,17,150,10,33,143,15,183,237,
+ 82,77,68,11,107,98,19,54,0,237,176,62,
+ 16,237,57,51,62,0,237,57,50,237,57,54,
+ 62,12,237,57,49,62,195,33,39,2,50,56,
+ 0,34,57,0,237,86,205,30,2,251,205,60,
+ 10,24,169,67,111,112,121,114,105,103,104,116,
+ 32,40,99,41,32,49,57,56,56,45,49,57,
+ 57,50,44,32,80,114,105,110,116,105,110,103,
+ 32,67,111,109,109,117,110,105,99,97,116,105,
+ 111,110,115,32,65,115,115,111,99,105,97,116,
+ 101,115,44,32,73,110,99,46,65,108,108,32,
+ 114,105,103,104,116,115,32,114,101,115,101,114,
+ 118,101,100,46,32,32,4,4,22,40,255,60,
+ 4,96,10,224,6,0,7,126,2,64,11,246,
+ 12,6,13,0,14,193,15,0,5,96,3,192,
+ 1,0,9,8,62,3,211,82,62,192,211,82,
+ 201,62,3,211,82,62,213,211,82,201,62,5,
+ 211,82,62,224,211,82,201,62,5,211,82,62,
+ 224,211,82,201,62,5,211,82,62,96,211,82,
+ 201,6,28,33,180,1,14,82,237,163,194,4,
+ 2,33,39,2,34,64,0,58,3,0,230,1,
+ 192,62,11,237,121,62,118,237,121,201,33,182,
+ 10,54,132,205,253,1,201,245,197,213,229,42,
+ 150,10,14,83,17,98,2,67,20,237,162,58,
+ 179,1,95,219,82,230,1,32,6,29,32,247,
+ 195,17,3,62,1,211,82,219,82,95,230,160,
+ 32,10,237,162,32,225,21,32,222,195,15,3,
+ 237,162,123,230,96,194,21,3,62,48,211,82,
+ 62,1,211,82,175,211,82,237,91,150,10,43,
+ 55,237,82,218,19,3,34,152,10,98,107,58,
+ 154,10,190,32,81,62,1,50,158,10,35,35,
+ 62,132,190,32,44,54,133,43,70,58,154,10,
+ 119,43,112,17,3,0,205,137,3,62,16,211,
+ 82,62,56,211,82,205,217,1,42,150,10,14,
+ 83,17,98,2,67,20,58,178,1,95,195,59,
+ 2,62,129,190,194,227,2,54,130,43,70,58,
+ 154,10,119,43,112,17,3,0,205,137,3,195,
+ 254,2,35,35,126,254,132,194,227,2,205,61,
+ 3,24,20,62,128,166,194,222,2,221,229,221,
+ 33,175,10,205,93,6,205,144,7,221,225,225,
+ 209,193,241,251,237,77,221,229,221,33,159,10,
+ 205,93,6,221,225,205,61,3,195,247,2,24,
+ 237,24,235,24,233,230,64,40,2,24,227,24,
+ 225,175,50,179,10,205,208,1,201,197,33,4,
+ 0,57,126,35,102,111,205,51,3,193,201,62,
+ 1,50,179,10,34,150,10,54,0,58,179,10,
+ 183,200,62,14,211,82,62,193,211,82,62,10,
+ 211,82,62,224,211,82,62,6,211,82,58,154,
+ 10,211,82,62,16,211,82,62,56,211,82,62,
+ 48,211,82,219,82,230,1,40,4,219,83,24,
+ 242,62,14,211,82,62,33,211,82,62,1,211,
+ 82,62,9,211,82,62,32,211,82,205,217,1,
+ 201,14,83,205,208,1,24,23,14,83,205,208,
+ 1,205,226,1,58,174,1,61,32,253,205,244,
+ 1,58,174,1,61,32,253,205,226,1,58,175,
+ 1,61,32,253,62,5,211,82,62,233,211,82,
+ 62,128,211,82,58,176,1,61,32,253,237,163,
+ 27,62,192,211,82,219,82,230,4,40,250,237,
+ 163,27,122,179,32,243,219,82,230,4,40,250,
+ 58,178,1,71,219,82,230,4,40,3,5,32,
+ 247,219,82,230,4,40,250,205,235,1,58,177,
+ 1,61,32,253,205,244,1,201,229,213,35,35,
+ 126,230,128,194,145,4,43,58,154,10,119,43,
+ 70,33,181,10,119,43,112,17,3,0,243,62,
+ 10,211,82,219,82,230,128,202,41,4,209,225,
+ 62,1,55,251,201,205,144,3,58,180,10,254,
+ 255,202,127,4,205,217,1,58,178,1,71,219,
+ 82,230,1,32,6,5,32,247,195,173,4,219,
+ 83,71,58,154,10,184,194,173,4,58,178,1,
+ 71,219,82,230,1,32,6,5,32,247,195,173,
+ 4,219,83,58,178,1,71,219,82,230,1,32,
+ 6,5,32,247,195,173,4,219,83,254,133,194,
+ 173,4,58,179,1,24,4,58,179,1,135,61,
+ 32,253,209,225,205,137,3,205,61,3,183,251,
+ 201,209,225,243,62,10,211,82,219,82,230,128,
+ 202,164,4,62,1,55,251,201,205,144,3,205,
+ 61,3,183,251,201,209,225,62,2,55,251,201,
+ 243,62,14,211,82,62,33,211,82,251,201,33,
+ 4,0,57,94,35,86,33,2,0,57,126,35,
+ 102,111,221,229,34,193,10,237,83,195,10,221,
+ 33,171,10,205,93,6,58,185,10,50,186,10,
+ 58,184,10,135,50,184,10,205,112,6,254,3,
+ 56,16,58,185,10,135,60,230,15,50,185,10,
+ 175,50,184,10,24,23,58,183,10,205,112,6,
+ 254,3,48,13,58,185,10,203,63,50,185,10,
+ 62,255,50,183,10,58,185,10,50,186,10,58,
+ 183,10,135,50,183,10,62,32,50,187,10,50,
+ 188,10,6,255,219,82,230,16,32,3,5,32,
+ 247,205,180,4,6,40,219,82,230,16,40,3,
+ 5,32,247,62,10,211,82,219,82,230,128,194,
+ 46,5,219,82,230,16,40,214,237,95,71,58,
+ 186,10,160,230,15,40,32,71,14,10,62,10,
+ 211,82,219,82,230,128,202,119,5,205,180,4,
+ 195,156,5,219,82,230,16,202,156,5,13,32,
+ 229,16,225,42,193,10,237,91,195,10,205,252,
+ 3,48,7,61,202,156,5,195,197,5,221,225,
+ 33,0,0,201,221,33,163,10,205,93,6,58,
+ 188,10,61,50,188,10,40,19,58,186,10,246,
+ 1,50,186,10,58,183,10,246,1,50,183,10,
+ 195,46,5,221,225,33,1,0,201,221,33,167,
+ 10,205,93,6,58,184,10,246,1,50,184,10,
+ 58,186,10,135,246,1,50,186,10,58,187,10,
+ 61,50,187,10,194,46,5,221,225,33,2,0,
+ 201,221,229,33,0,0,57,17,4,0,25,126,
+ 50,154,10,230,128,50,189,10,58,189,10,183,
+ 40,6,221,33,88,2,24,4,221,33,150,0,
+ 58,154,10,183,40,49,60,40,46,61,33,190,
+ 10,119,35,119,35,54,129,175,50,158,10,221,
+ 43,221,229,225,124,181,40,42,33,190,10,17,
+ 3,0,205,206,4,17,232,3,27,123,178,32,
+ 251,58,158,10,183,40,224,58,154,10,71,62,
+ 7,128,230,127,71,58,189,10,176,50,154,10,
+ 24,166,221,225,201,183,221,52,0,192,221,52,
+ 1,192,221,52,2,192,221,52,3,192,55,201,
+ 6,8,14,0,31,48,1,12,16,250,121,201,
+ 33,2,0,57,94,35,86,35,78,35,70,35,
+ 126,35,102,105,79,120,68,103,237,176,201,33,
+ 2,0,57,126,35,102,111,62,17,237,57,48,
+ 125,237,57,40,124,237,57,41,62,0,237,57,
+ 42,62,64,237,57,43,62,0,237,57,44,33,
+ 128,2,125,237,57,46,124,237,57,47,62,145,
+ 237,57,48,211,68,58,149,10,211,66,201,33,
+ 2,0,57,126,35,102,111,62,33,237,57,48,
+ 62,64,237,57,32,62,0,237,57,33,237,57,
+ 34,125,237,57,35,124,237,57,36,62,0,237,
+ 57,37,33,128,2,125,237,57,38,124,237,57,
+ 39,62,97,237,57,48,211,67,58,149,10,211,
+ 66,201,237,56,46,95,237,56,47,87,237,56,
+ 46,111,237,56,47,103,183,237,82,32,235,33,
+ 128,2,183,237,82,201,237,56,38,95,237,56,
+ 39,87,237,56,38,111,237,56,39,103,183,237,
+ 82,32,235,33,128,2,183,237,82,201,205,106,
+ 10,221,110,6,221,102,7,126,35,110,103,195,
+ 118,10,205,106,10,33,0,0,34,205,10,34,
+ 198,10,34,200,10,33,143,15,34,207,10,237,
+ 91,207,10,42,146,10,183,237,82,17,0,255,
+ 25,34,203,10,203,124,40,6,33,0,125,34,
+ 203,10,42,207,10,229,205,37,3,195,118,10,
+ 205,106,10,229,42,150,10,35,35,35,229,205,
+ 70,7,193,124,230,3,103,221,117,254,221,116,
+ 255,237,91,152,10,35,35,35,183,237,82,32,
+ 12,17,5,0,42,152,10,205,91,10,242,203,
+ 7,42,150,10,229,205,37,3,195,118,10,237,
+ 91,152,10,42,200,10,25,34,200,10,42,205,
+ 10,25,34,205,10,237,91,203,10,33,158,253,
+ 25,237,91,205,10,205,91,10,242,245,7,33,
+ 0,0,34,205,10,62,1,50,197,10,205,5,
+ 8,33,0,0,57,249,195,118,10,205,106,10,
+ 58,197,10,183,202,118,10,237,91,198,10,42,
+ 205,10,205,91,10,242,46,8,237,91,205,10,
+ 33,98,2,25,237,91,198,10,205,91,10,250,
+ 78,8,237,91,198,10,42,205,10,183,237,82,
+ 32,7,42,200,10,125,180,40,13,237,91,205,
+ 10,42,198,10,205,91,10,242,97,8,237,91,
+ 207,10,42,205,10,25,229,205,37,3,175,50,
+ 197,10,195,118,10,205,29,3,33,0,0,57,
+ 249,195,118,10,205,106,10,58,202,10,183,40,
+ 22,205,14,7,237,91,209,10,19,19,19,205,
+ 91,10,242,139,8,33,1,0,195,118,10,33,
+ 0,0,195,118,10,205,126,10,252,255,205,108,
+ 8,125,180,194,118,10,237,91,200,10,33,0,
+ 0,205,91,10,242,118,10,237,91,207,10,42,
+ 198,10,25,221,117,254,221,116,255,35,35,35,
+ 229,205,70,7,193,124,230,3,103,35,35,35,
+ 221,117,252,221,116,253,229,221,110,254,221,102,
+ 255,229,33,212,10,229,205,124,6,193,193,221,
+ 110,252,221,102,253,34,209,10,33,211,10,54,
+ 4,33,209,10,227,205,147,6,193,62,1,50,
+ 202,10,243,221,94,252,221,86,253,42,200,10,
+ 183,237,82,34,200,10,203,124,40,17,33,0,
+ 0,34,200,10,34,205,10,34,198,10,50,197,
+ 10,24,37,221,94,252,221,86,253,42,198,10,
+ 25,34,198,10,237,91,203,10,33,158,253,25,
+ 237,91,198,10,205,91,10,242,68,9,33,0,
+ 0,34,198,10,205,5,8,33,0,0,57,249,
+ 251,195,118,10,205,106,10,33,49,13,126,183,
+ 40,16,205,42,7,237,91,47,13,19,19,19,
+ 205,91,10,242,117,9,58,142,15,198,1,50,
+ 142,15,195,118,10,33,49,13,126,254,1,40,
+ 25,254,3,202,7,10,254,5,202,21,10,33,
+ 49,13,54,0,33,47,13,229,205,207,6,195,
+ 118,10,58,141,15,183,32,72,33,51,13,126,
+ 50,149,10,205,86,7,33,50,13,126,230,127,
+ 183,32,40,58,142,15,230,127,50,142,15,183,
+ 32,5,198,1,50,142,15,33,50,13,126,111,
+ 23,159,103,203,125,58,142,15,40,5,198,128,
+ 50,142,15,33,50,13,119,33,50,13,126,111,
+ 23,159,103,229,205,237,5,193,33,211,10,54,
+ 2,33,2,0,34,209,10,58,154,10,33,212,
+ 10,119,58,148,10,33,213,10,119,33,209,10,
+ 229,205,147,6,193,24,128,42,47,13,229,33,
+ 50,13,229,205,191,4,193,24,239,33,211,10,
+ 54,6,33,3,0,34,209,10,58,154,10,33,
+ 212,10,119,58,148,10,33,213,10,119,33,214,
+ 10,54,5,33,209,10,229,205,147,6,24,200,
+ 205,106,10,33,49,13,54,0,33,47,13,229,
+ 205,207,6,33,209,10,227,205,147,6,193,205,
+ 80,9,205,145,8,24,248,124,170,250,99,10,
+ 237,82,201,124,230,128,237,82,60,201,225,253,
+ 229,221,229,221,33,0,0,221,57,233,221,249,
+ 221,225,253,225,201,233,225,253,229,221,229,221,
+ 33,0,0,221,57,94,35,86,35,235,57,249,
+ 235,233,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0
+ } ;
+
+#endif
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
new file mode 100644
index 000000000000..1a44a79ed064
--- /dev/null
+++ b/drivers/net/appletalk/ipddp.c
@@ -0,0 +1,317 @@
+/*
+ * ipddp.c: IP to Appletalk-IP Encapsulation driver for Linux
+ * Appletalk-IP to IP Decapsulation driver for Linux
+ *
+ * Authors:
+ * - DDP-IP Encap by: Bradford W. Johnson <johns393@maroon.tc.umn.edu>
+ * - DDP-IP Decap by: Jay Schulist <jschlst@samba.org>
+ *
+ * Derived from:
+ * - Almost all code already existed in net/appletalk/ddp.c I just
+ * moved/reorginized it into a driver file. Original IP-over-DDP code
+ * was done by Bradford W. Johnson <johns393@maroon.tc.umn.edu>
+ * - skeleton.c: A network driver outline for linux.
+ * Written 1993-94 by Donald Becker.
+ * - dummy.c: A dummy net driver. By Nick Holloway.
+ * - MacGate: A user space Daemon for Appletalk-IP Decap for
+ * Linux by Jay Schulist <jschlst@samba.org>
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/atalk.h>
+#include <linux/if_arp.h>
+#include <net/route.h>
+#include <asm/uaccess.h>
+
+#include "ipddp.h" /* Our stuff */
+
+static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n";
+
+static struct ipddp_route *ipddp_route_list;
+
+#ifdef CONFIG_IPDDP_ENCAP
+static int ipddp_mode = IPDDP_ENCAP;
+#else
+static int ipddp_mode = IPDDP_DECAP;
+#endif
+
+/* Index to functions, as function prototypes. */
+static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *ipddp_get_stats(struct net_device *dev);
+static int ipddp_create(struct ipddp_route *new_rt);
+static int ipddp_delete(struct ipddp_route *rt);
+static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt);
+static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+
+static struct net_device * __init ipddp_init(void)
+{
+ static unsigned version_printed;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct net_device_stats));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ SET_MODULE_OWNER(dev);
+ strcpy(dev->name, "ipddp%d");
+
+ if (version_printed++ == 0)
+ printk(version);
+
+ /* Initalize the device structure. */
+ dev->hard_start_xmit = ipddp_xmit;
+ dev->get_stats = ipddp_get_stats;
+ dev->do_ioctl = ipddp_ioctl;
+
+ dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */
+ dev->mtu = 585;
+ dev->flags |= IFF_NOARP;
+
+ /*
+ * The worst case header we will need is currently a
+ * ethernet header (14 bytes) and a ddp header (sizeof ddpehdr+1)
+ * We send over SNAP so that takes another 8 bytes.
+ */
+ dev->hard_header_len = 14+8+sizeof(struct ddpehdr)+1;
+
+ err = register_netdev(dev);
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ /* Let the user now what mode we are in */
+ if(ipddp_mode == IPDDP_ENCAP)
+ printk("%s: Appletalk-IP Encap. mode by Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n",
+ dev->name);
+ if(ipddp_mode == IPDDP_DECAP)
+ printk("%s: Appletalk-IP Decap. mode by Jay Schulist <jschlst@samba.org>\n",
+ dev->name);
+
+ return dev;
+}
+
+/*
+ * Get the current statistics. This may be called with the card open or closed.
+ */
+static struct net_device_stats *ipddp_get_stats(struct net_device *dev)
+{
+ return dev->priv;
+}
+
+/*
+ * Transmit LLAP/ELAP frame using aarp_send_ddp.
+ */
+static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 paddr = ((struct rtable*)skb->dst)->rt_gateway;
+ struct ddpehdr *ddp;
+ struct ipddp_route *rt;
+ struct atalk_addr *our_addr;
+
+ /*
+ * Find appropriate route to use, based only on IP number.
+ */
+ for(rt = ipddp_route_list; rt != NULL; rt = rt->next)
+ {
+ if(rt->ip == paddr)
+ break;
+ }
+ if(rt == NULL)
+ return 0;
+
+ our_addr = atalk_find_dev_addr(rt->dev);
+
+ if(ipddp_mode == IPDDP_DECAP)
+ /*
+ * Pull off the excess room that should not be there.
+ * This is due to a hard-header problem. This is the
+ * quick fix for now though, till it breaks.
+ */
+ skb_pull(skb, 35-(sizeof(struct ddpehdr)+1));
+
+ /* Create the Extended DDP header */
+ ddp = (struct ddpehdr *)skb->data;
+ ddp->deh_len = skb->len;
+ ddp->deh_hops = 1;
+ ddp->deh_pad = 0;
+ ddp->deh_sum = 0;
+
+ /*
+ * For Localtalk we need aarp_send_ddp to strip the
+ * long DDP header and place a shot DDP header on it.
+ */
+ if(rt->dev->type == ARPHRD_LOCALTLK)
+ {
+ ddp->deh_dnet = 0; /* FIXME more hops?? */
+ ddp->deh_snet = 0;
+ }
+ else
+ {
+ ddp->deh_dnet = rt->at.s_net; /* FIXME more hops?? */
+ ddp->deh_snet = our_addr->s_net;
+ }
+ ddp->deh_dnode = rt->at.s_node;
+ ddp->deh_snode = our_addr->s_node;
+ ddp->deh_dport = 72;
+ ddp->deh_sport = 72;
+
+ *((__u8 *)(ddp+1)) = 22; /* ddp type = IP */
+ *((__u16 *)ddp)=ntohs(*((__u16 *)ddp)); /* fix up length field */
+
+ skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */
+
+ ((struct net_device_stats *) dev->priv)->tx_packets++;
+ ((struct net_device_stats *) dev->priv)->tx_bytes+=skb->len;
+
+ if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Create a routing entry. We first verify that the
+ * record does not already exist. If it does we return -EEXIST
+ */
+static int ipddp_create(struct ipddp_route *new_rt)
+{
+ struct ipddp_route *rt =(struct ipddp_route*) kmalloc(sizeof(*rt), GFP_KERNEL);
+
+ if (rt == NULL)
+ return -ENOMEM;
+
+ rt->ip = new_rt->ip;
+ rt->at = new_rt->at;
+ rt->next = NULL;
+ if ((rt->dev = atrtr_get_dev(&rt->at)) == NULL) {
+ kfree(rt);
+ return -ENETUNREACH;
+ }
+
+ if (ipddp_find_route(rt)) {
+ kfree(rt);
+ return -EEXIST;
+ }
+
+ rt->next = ipddp_route_list;
+ ipddp_route_list = rt;
+
+ return 0;
+}
+
+/*
+ * Delete a route, we only delete a FULL match.
+ * If route does not exist we return -ENOENT.
+ */
+static int ipddp_delete(struct ipddp_route *rt)
+{
+ struct ipddp_route **r = &ipddp_route_list;
+ struct ipddp_route *tmp;
+
+ while((tmp = *r) != NULL)
+ {
+ if(tmp->ip == rt->ip
+ && tmp->at.s_net == rt->at.s_net
+ && tmp->at.s_node == rt->at.s_node)
+ {
+ *r = tmp->next;
+ kfree(tmp);
+ return 0;
+ }
+ r = &tmp->next;
+ }
+
+ return (-ENOENT);
+}
+
+/*
+ * Find a routing entry, we only return a FULL match
+ */
+static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt)
+{
+ struct ipddp_route *f;
+
+ for(f = ipddp_route_list; f != NULL; f = f->next)
+ {
+ if(f->ip == rt->ip
+ && f->at.s_net == rt->at.s_net
+ && f->at.s_node == rt->at.s_node)
+ return (f);
+ }
+
+ return (NULL);
+}
+
+static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ipddp_route __user *rt = ifr->ifr_data;
+ struct ipddp_route rcp;
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(copy_from_user(&rcp, rt, sizeof(rcp)))
+ return -EFAULT;
+
+ switch(cmd)
+ {
+ case SIOCADDIPDDPRT:
+ return (ipddp_create(&rcp));
+
+ case SIOCFINDIPDDPRT:
+ if(copy_to_user(rt, ipddp_find_route(&rcp), sizeof(struct ipddp_route)))
+ return -EFAULT;
+ return 0;
+
+ case SIOCDELIPDDPRT:
+ return (ipddp_delete(&rcp));
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct net_device *dev_ipddp;
+
+MODULE_LICENSE("GPL");
+module_param(ipddp_mode, int, 0);
+
+static int __init ipddp_init_module(void)
+{
+ dev_ipddp = ipddp_init();
+ if (IS_ERR(dev_ipddp))
+ return PTR_ERR(dev_ipddp);
+ return 0;
+}
+
+static void __exit ipddp_cleanup_module(void)
+{
+ struct ipddp_route *p;
+
+ unregister_netdev(dev_ipddp);
+ free_netdev(dev_ipddp);
+
+ while (ipddp_route_list) {
+ p = ipddp_route_list->next;
+ kfree(ipddp_route_list);
+ ipddp_route_list = p;
+ }
+}
+
+module_init(ipddp_init_module);
+module_exit(ipddp_cleanup_module);
diff --git a/drivers/net/appletalk/ipddp.h b/drivers/net/appletalk/ipddp.h
new file mode 100644
index 000000000000..52072fb0c610
--- /dev/null
+++ b/drivers/net/appletalk/ipddp.h
@@ -0,0 +1,27 @@
+/*
+ * ipddp.h: Header for IP-over-DDP driver for Linux.
+ */
+
+#ifndef __LINUX_IPDDP_H
+#define __LINUX_IPDDP_H
+
+#ifdef __KERNEL__
+
+#define SIOCADDIPDDPRT (SIOCDEVPRIVATE)
+#define SIOCDELIPDDPRT (SIOCDEVPRIVATE+1)
+#define SIOCFINDIPDDPRT (SIOCDEVPRIVATE+2)
+
+struct ipddp_route
+{
+ struct net_device *dev; /* Carrier device */
+ __u32 ip; /* IP address */
+ struct atalk_addr at; /* Gateway appletalk address */
+ int flags;
+ struct ipddp_route *next;
+};
+
+#define IPDDP_ENCAP 1
+#define IPDDP_DECAP 2
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_IPDDP_H */
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
new file mode 100644
index 000000000000..ad8e943231a1
--- /dev/null
+++ b/drivers/net/appletalk/ltpc.c
@@ -0,0 +1,1313 @@
+/*** ltpc.c -- a driver for the LocalTalk PC card.
+ *
+ * Copyright (c) 1995,1996 Bradford W. Johnson <johns393@maroon.tc.umn.edu>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This is ALPHA code at best. It may not work for you. It may
+ * damage your equipment. It may damage your relations with other
+ * users of your network. Use it at your own risk!
+ *
+ * Based in part on:
+ * skeleton.c by Donald Becker
+ * dummy.c by Nick Holloway and Alan Cox
+ * loopback.c by Ross Biro, Fred van Kampen, Donald Becker
+ * the netatalk source code (UMICH)
+ * lots of work on the card...
+ *
+ * I do not have access to the (proprietary) SDK that goes with the card.
+ * If you do, I don't want to know about it, and you can probably write
+ * a better driver yourself anyway. This does mean that the pieces that
+ * talk to the card are guesswork on my part, so use at your own risk!
+ *
+ * This is my first try at writing Linux networking code, and is also
+ * guesswork. Again, use at your own risk! (Although on this part, I'd
+ * welcome suggestions)
+ *
+ * This is a loadable kernel module which seems to work at my site
+ * consisting of a 1.2.13 linux box running netatalk 1.3.3, and with
+ * the kernel support from 1.3.3b2 including patches routing.patch
+ * and ddp.disappears.from.chooser. In order to run it, you will need
+ * to patch ddp.c and aarp.c in the kernel, but only a little...
+ *
+ * I'm fairly confident that while this is arguably badly written, the
+ * problems that people experience will be "higher level", that is, with
+ * complications in the netatalk code. The driver itself doesn't do
+ * anything terribly complicated -- it pretends to be an ether device
+ * as far as netatalk is concerned, strips the DDP data out of the ether
+ * frame and builds a LLAP packet to send out the card. In the other
+ * direction, it receives LLAP frames from the card and builds a fake
+ * ether packet that it then tosses up to the networking code. You can
+ * argue (correctly) that this is an ugly way to do things, but it
+ * requires a minimal amount of fooling with the code in ddp.c and aarp.c.
+ *
+ * The card will do a lot more than is used here -- I *think* it has the
+ * layers up through ATP. Even if you knew how that part works (which I
+ * don't) it would be a big job to carve up the kernel ddp code to insert
+ * things at a higher level, and probably a bad idea...
+ *
+ * There are a number of other cards that do LocalTalk on the PC. If
+ * nobody finds any insurmountable (at the netatalk level) problems
+ * here, this driver should encourage people to put some work into the
+ * other cards (some of which I gather are still commercially available)
+ * and also to put hooks for LocalTalk into the official ddp code.
+ *
+ * I welcome comments and suggestions. This is my first try at Linux
+ * networking stuff, and there are probably lots of things that I did
+ * suboptimally.
+ *
+ ***/
+
+/***
+ *
+ * $Log: ltpc.c,v $
+ * Revision 1.1.2.1 2000/03/01 05:35:07 jgarzik
+ * at and tr cleanup
+ *
+ * Revision 1.8 1997/01/28 05:44:54 bradford
+ * Clean up for non-module a little.
+ * Hacked about a bit to clean things up - Alan Cox
+ * Probably broken it from the origina 1.8
+ *
+
+ * 1998/11/09: David Huggins-Daines <dhd@debian.org>
+ * Cleaned up the initialization code to use the standard autoirq methods,
+ and to probe for things in the standard order of i/o, irq, dma. This
+ removes the "reset the reset" hack, because I couldn't figure out an
+ easy way to get the card to trigger an interrupt after it.
+ * Added support for passing configuration parameters on the kernel command
+ line and through insmod
+ * Changed the device name from "ltalk0" to "lt0", both to conform with the
+ other localtalk driver, and to clear up the inconsistency between the
+ module and the non-module versions of the driver :-)
+ * Added a bunch of comments (I was going to make some enums for the state
+ codes and the register offsets, but I'm still not sure exactly what their
+ semantics are)
+ * Don't poll anymore in interrupt-driven mode
+ * It seems to work as a module now (as of 2.1.127), but I don't think
+ I'm responsible for that...
+
+ *
+ * Revision 1.7 1996/12/12 03:42:33 bradford
+ * DMA alloc cribbed from 3c505.c.
+ *
+ * Revision 1.6 1996/12/12 03:18:58 bradford
+ * Added virt_to_bus; works in 2.1.13.
+ *
+ * Revision 1.5 1996/12/12 03:13:22 root
+ * xmitQel initialization -- think through better though.
+ *
+ * Revision 1.4 1996/06/18 14:55:55 root
+ * Change names to ltpc. Tabs. Took a shot at dma alloc,
+ * although more needs to be done eventually.
+ *
+ * Revision 1.3 1996/05/22 14:59:39 root
+ * Change dev->open, dev->close to track dummy.c in 1.99.(around 7)
+ *
+ * Revision 1.2 1996/05/22 14:58:24 root
+ * Change tabs mostly.
+ *
+ * Revision 1.1 1996/04/23 04:45:09 root
+ * Initial revision
+ *
+ * Revision 0.16 1996/03/05 15:59:56 root
+ * Change ARPHRD_LOCALTLK definition to the "real" one.
+ *
+ * Revision 0.15 1996/03/05 06:28:30 root
+ * Changes for kernel 1.3.70. Still need a few patches to kernel, but
+ * it's getting closer.
+ *
+ * Revision 0.14 1996/02/25 17:38:32 root
+ * More cleanups. Removed query to card on get_stats.
+ *
+ * Revision 0.13 1996/02/21 16:27:40 root
+ * Refix debug_print_skb. Fix mac.raw gotcha that appeared in 1.3.65.
+ * Clean up receive code a little.
+ *
+ * Revision 0.12 1996/02/19 16:34:53 root
+ * Fix debug_print_skb. Kludge outgoing snet to 0 when using startup
+ * range. Change debug to mask: 1 for verbose, 2 for higher level stuff
+ * including packet printing, 4 for lower level (card i/o) stuff.
+ *
+ * Revision 0.11 1996/02/12 15:53:38 root
+ * Added router sends (requires new aarp.c patch)
+ *
+ * Revision 0.10 1996/02/11 00:19:35 root
+ * Change source LTALK_LOGGING debug switch to insmod ... debug=2.
+ *
+ * Revision 0.9 1996/02/10 23:59:35 root
+ * Fixed those fixes for 1.2 -- DANGER! The at.h that comes with netatalk
+ * has a *different* definition of struct sockaddr_at than the Linux kernel
+ * does. This is an "insidious and invidious" bug...
+ * (Actually the preceding comment is false -- it's the atalk.h in the
+ * ancient atalk-0.06 that's the problem)
+ *
+ * Revision 0.8 1996/02/10 19:09:00 root
+ * Merge 1.3 changes. Tested OK under 1.3.60.
+ *
+ * Revision 0.7 1996/02/10 17:56:56 root
+ * Added debug=1 parameter on insmod for debugging prints. Tried
+ * to fix timer unload on rmmod, but I don't think that's the problem.
+ *
+ * Revision 0.6 1995/12/31 19:01:09 root
+ * Clean up rmmod, irq comments per feedback from Corin Anderson (Thanks Corey!)
+ * Clean up initial probing -- sometimes the card wakes up latched in reset.
+ *
+ * Revision 0.5 1995/12/22 06:03:44 root
+ * Added comments in front and cleaned up a bit.
+ * This version sent out to people.
+ *
+ * Revision 0.4 1995/12/18 03:46:44 root
+ * Return shortDDP to longDDP fake to 0/0. Added command structs.
+ *
+ ***/
+
+/* ltpc jumpers are:
+*
+* Interrupts -- set at most one. If none are set, the driver uses
+* polled mode. Because the card was developed in the XT era, the
+* original documentation refers to IRQ2. Since you'll be running
+* this on an AT (or later) class machine, that really means IRQ9.
+*
+* SW1 IRQ 4
+* SW2 IRQ 3
+* SW3 IRQ 9 (2 in original card documentation only applies to XT)
+*
+*
+* DMA -- choose DMA 1 or 3, and set both corresponding switches.
+*
+* SW4 DMA 3
+* SW5 DMA 1
+* SW6 DMA 3
+* SW7 DMA 1
+*
+*
+* I/O address -- choose one.
+*
+* SW8 220 / 240
+*/
+
+/* To have some stuff logged, do
+* insmod ltpc.o debug=1
+*
+* For a whole bunch of stuff, use higher numbers.
+*
+* The default is 0, i.e. no messages except for the probe results.
+*/
+
+/* insmod-tweakable variables */
+static int debug;
+#define DEBUG_VERBOSE 1
+#define DEBUG_UPPER 2
+#define DEBUG_LOWER 4
+
+static int io;
+static int irq;
+static int dma;
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_ltalk.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/atalk.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+
+/* our stuff */
+#include "ltpc.h"
+
+static DEFINE_SPINLOCK(txqueue_lock);
+static DEFINE_SPINLOCK(mbox_lock);
+
+/* function prototypes */
+static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
+ void *dbuf, int dbuflen);
+static int sendup_buffer (struct net_device *dev);
+
+/* Dma Memory related stuff, cribbed directly from 3c505.c */
+
+static unsigned long dma_mem_alloc(int size)
+{
+ int order = get_order(size);
+
+ return __get_dma_pages(GFP_KERNEL, order);
+}
+
+/* DMA data buffer, DMA command buffer */
+static unsigned char *ltdmabuf;
+static unsigned char *ltdmacbuf;
+
+/* private struct, holds our appletalk address */
+
+struct ltpc_private
+{
+ struct net_device_stats stats;
+ struct atalk_addr my_addr;
+};
+
+/* transmit queue element struct */
+
+struct xmitQel {
+ struct xmitQel *next;
+ /* command buffer */
+ unsigned char *cbuf;
+ short cbuflen;
+ /* data buffer */
+ unsigned char *dbuf;
+ short dbuflen;
+ unsigned char QWrite; /* read or write data */
+ unsigned char mailbox;
+};
+
+/* the transmit queue itself */
+
+static struct xmitQel *xmQhd, *xmQtl;
+
+static void enQ(struct xmitQel *qel)
+{
+ unsigned long flags;
+ qel->next = NULL;
+
+ spin_lock_irqsave(&txqueue_lock, flags);
+ if (xmQtl) {
+ xmQtl->next = qel;
+ } else {
+ xmQhd = qel;
+ }
+ xmQtl = qel;
+ spin_unlock_irqrestore(&txqueue_lock, flags);
+
+ if (debug & DEBUG_LOWER)
+ printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
+}
+
+static struct xmitQel *deQ(void)
+{
+ unsigned long flags;
+ int i;
+ struct xmitQel *qel=NULL;
+
+ spin_lock_irqsave(&txqueue_lock, flags);
+ if (xmQhd) {
+ qel = xmQhd;
+ xmQhd = qel->next;
+ if(!xmQhd) xmQtl = NULL;
+ }
+ spin_unlock_irqrestore(&txqueue_lock, flags);
+
+ if ((debug & DEBUG_LOWER) && qel) {
+ int n;
+ printk(KERN_DEBUG "ltpc: dequeued command ");
+ n = qel->cbuflen;
+ if (n>100) n=100;
+ for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
+ printk("\n");
+ }
+
+ return qel;
+}
+
+/* and... the queue elements we'll be using */
+static struct xmitQel qels[16];
+
+/* and their corresponding mailboxes */
+static unsigned char mailbox[16];
+static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+
+static int wait_timeout(struct net_device *dev, int c)
+{
+ /* returns true if it stayed c */
+ /* this uses base+6, but it's ok */
+ int i;
+
+ /* twenty second or so total */
+
+ for(i=0;i<200000;i++) {
+ if ( c != inb_p(dev->base_addr+6) ) return 0;
+ udelay(100);
+ }
+ return 1; /* timed out */
+}
+
+/* get the first free mailbox */
+
+static int getmbox(void)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&mbox_lock, flags);
+ for(i=1;i<16;i++) if(!mboxinuse[i]) {
+ mboxinuse[i]=1;
+ spin_unlock_irqrestore(&mbox_lock, flags);
+ return i;
+ }
+ spin_unlock_irqrestore(&mbox_lock, flags);
+ return 0;
+}
+
+/* read a command from the card */
+static void handlefc(struct net_device *dev)
+{
+ /* called *only* from idle, non-reentrant */
+ int dma = dev->dma;
+ int base = dev->base_addr;
+ unsigned long flags;
+
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ clear_dma_ff(dma);
+ set_dma_mode(dma,DMA_MODE_READ);
+ set_dma_addr(dma,virt_to_bus(ltdmacbuf));
+ set_dma_count(dma,50);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ inb_p(base+3);
+ inb_p(base+2);
+
+ if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
+}
+
+/* read data from the card */
+static void handlefd(struct net_device *dev)
+{
+ int dma = dev->dma;
+ int base = dev->base_addr;
+ unsigned long flags;
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ clear_dma_ff(dma);
+ set_dma_mode(dma,DMA_MODE_READ);
+ set_dma_addr(dma,virt_to_bus(ltdmabuf));
+ set_dma_count(dma,800);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ inb_p(base+3);
+ inb_p(base+2);
+
+ if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
+ sendup_buffer(dev);
+}
+
+static void handlewrite(struct net_device *dev)
+{
+ /* called *only* from idle, non-reentrant */
+ /* on entry, 0xfb and ltdmabuf holds data */
+ int dma = dev->dma;
+ int base = dev->base_addr;
+ unsigned long flags;
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ clear_dma_ff(dma);
+ set_dma_mode(dma,DMA_MODE_WRITE);
+ set_dma_addr(dma,virt_to_bus(ltdmabuf));
+ set_dma_count(dma,800);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ inb_p(base+3);
+ inb_p(base+2);
+
+ if ( wait_timeout(dev,0xfb) ) {
+ flags=claim_dma_lock();
+ printk("timed out in handlewrite, dma res %d\n",
+ get_dma_residue(dev->dma) );
+ release_dma_lock(flags);
+ }
+}
+
+static void handleread(struct net_device *dev)
+{
+ /* on entry, 0xfb */
+ /* on exit, ltdmabuf holds data */
+ int dma = dev->dma;
+ int base = dev->base_addr;
+ unsigned long flags;
+
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ clear_dma_ff(dma);
+ set_dma_mode(dma,DMA_MODE_READ);
+ set_dma_addr(dma,virt_to_bus(ltdmabuf));
+ set_dma_count(dma,800);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ inb_p(base+3);
+ inb_p(base+2);
+ if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
+}
+
+static void handlecommand(struct net_device *dev)
+{
+ /* on entry, 0xfa and ltdmacbuf holds command */
+ int dma = dev->dma;
+ int base = dev->base_addr;
+ unsigned long flags;
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ clear_dma_ff(dma);
+ set_dma_mode(dma,DMA_MODE_WRITE);
+ set_dma_addr(dma,virt_to_bus(ltdmacbuf));
+ set_dma_count(dma,50);
+ enable_dma(dma);
+ release_dma_lock(flags);
+ inb_p(base+3);
+ inb_p(base+2);
+ if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
+}
+
+/* ready made command for getting the result from the card */
+static unsigned char rescbuf[2] = {LT_GETRESULT,0};
+static unsigned char resdbuf[2];
+
+static int QInIdle;
+
+/* idle expects to be called with the IRQ line high -- either because of
+ * an interrupt, or because the line is tri-stated
+ */
+
+static void idle(struct net_device *dev)
+{
+ unsigned long flags;
+ int state;
+ /* FIXME This is initialized to shut the warning up, but I need to
+ * think this through again.
+ */
+ struct xmitQel *q = NULL;
+ int oops;
+ int i;
+ int base = dev->base_addr;
+
+ spin_lock_irqsave(&txqueue_lock, flags);
+ if(QInIdle) {
+ spin_unlock_irqrestore(&txqueue_lock, flags);
+ return;
+ }
+ QInIdle = 1;
+ spin_unlock_irqrestore(&txqueue_lock, flags);
+
+ /* this tri-states the IRQ line */
+ (void) inb_p(base+6);
+
+ oops = 100;
+
+loop:
+ if (0>oops--) {
+ printk("idle: looped too many times\n");
+ goto done;
+ }
+
+ state = inb_p(base+6);
+ if (state != inb_p(base+6)) goto loop;
+
+ switch(state) {
+ case 0xfc:
+ /* incoming command */
+ if (debug & DEBUG_LOWER) printk("idle: fc\n");
+ handlefc(dev);
+ break;
+ case 0xfd:
+ /* incoming data */
+ if(debug & DEBUG_LOWER) printk("idle: fd\n");
+ handlefd(dev);
+ break;
+ case 0xf9:
+ /* result ready */
+ if (debug & DEBUG_LOWER) printk("idle: f9\n");
+ if(!mboxinuse[0]) {
+ mboxinuse[0] = 1;
+ qels[0].cbuf = rescbuf;
+ qels[0].cbuflen = 2;
+ qels[0].dbuf = resdbuf;
+ qels[0].dbuflen = 2;
+ qels[0].QWrite = 0;
+ qels[0].mailbox = 0;
+ enQ(&qels[0]);
+ }
+ inb_p(dev->base_addr+1);
+ inb_p(dev->base_addr+0);
+ if( wait_timeout(dev,0xf9) )
+ printk("timed out idle f9\n");
+ break;
+ case 0xf8:
+ /* ?? */
+ if (xmQhd) {
+ inb_p(dev->base_addr+1);
+ inb_p(dev->base_addr+0);
+ if(wait_timeout(dev,0xf8) )
+ printk("timed out idle f8\n");
+ } else {
+ goto done;
+ }
+ break;
+ case 0xfa:
+ /* waiting for command */
+ if(debug & DEBUG_LOWER) printk("idle: fa\n");
+ if (xmQhd) {
+ q=deQ();
+ memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
+ ltdmacbuf[1] = q->mailbox;
+ if (debug>1) {
+ int n;
+ printk("ltpc: sent command ");
+ n = q->cbuflen;
+ if (n>100) n=100;
+ for(i=0;i<n;i++)
+ printk("%02x ",ltdmacbuf[i]);
+ printk("\n");
+ }
+ handlecommand(dev);
+ if(0xfa==inb_p(base+6)) {
+ /* we timed out, so return */
+ goto done;
+ }
+ } else {
+ /* we don't seem to have a command */
+ if (!mboxinuse[0]) {
+ mboxinuse[0] = 1;
+ qels[0].cbuf = rescbuf;
+ qels[0].cbuflen = 2;
+ qels[0].dbuf = resdbuf;
+ qels[0].dbuflen = 2;
+ qels[0].QWrite = 0;
+ qels[0].mailbox = 0;
+ enQ(&qels[0]);
+ } else {
+ printk("trouble: response command already queued\n");
+ goto done;
+ }
+ }
+ break;
+ case 0Xfb:
+ /* data transfer ready */
+ if(debug & DEBUG_LOWER) printk("idle: fb\n");
+ if(q->QWrite) {
+ memcpy(ltdmabuf,q->dbuf,q->dbuflen);
+ handlewrite(dev);
+ } else {
+ handleread(dev);
+ /* non-zero mailbox numbers are for
+ commmands, 0 is for GETRESULT
+ requests */
+ if(q->mailbox) {
+ memcpy(q->dbuf,ltdmabuf,q->dbuflen);
+ } else {
+ /* this was a result */
+ mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
+ mboxinuse[0]=0;
+ }
+ }
+ break;
+ }
+ goto loop;
+
+done:
+ QInIdle=0;
+
+ /* now set the interrupts back as appropriate */
+ /* the first read takes it out of tri-state (but still high) */
+ /* the second resets it */
+ /* note that after this point, any read of base+6 will
+ trigger an interrupt */
+
+ if (dev->irq) {
+ inb_p(base+7);
+ inb_p(base+7);
+ }
+ return;
+}
+
+
+static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
+ void *dbuf, int dbuflen)
+{
+
+ int i = getmbox();
+ int ret;
+
+ if(i) {
+ qels[i].cbuf = (unsigned char *) cbuf;
+ qels[i].cbuflen = cbuflen;
+ qels[i].dbuf = (unsigned char *) dbuf;
+ qels[i].dbuflen = dbuflen;
+ qels[i].QWrite = 1;
+ qels[i].mailbox = i; /* this should be initted rather */
+ enQ(&qels[i]);
+ idle(dev);
+ ret = mailbox[i];
+ mboxinuse[i]=0;
+ return ret;
+ }
+ printk("ltpc: could not allocate mbox\n");
+ return -1;
+}
+
+static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
+ void *dbuf, int dbuflen)
+{
+
+ int i = getmbox();
+ int ret;
+
+ if(i) {
+ qels[i].cbuf = (unsigned char *) cbuf;
+ qels[i].cbuflen = cbuflen;
+ qels[i].dbuf = (unsigned char *) dbuf;
+ qels[i].dbuflen = dbuflen;
+ qels[i].QWrite = 0;
+ qels[i].mailbox = i; /* this should be initted rather */
+ enQ(&qels[i]);
+ idle(dev);
+ ret = mailbox[i];
+ mboxinuse[i]=0;
+ return ret;
+ }
+ printk("ltpc: could not allocate mbox\n");
+ return -1;
+}
+
+/* end of idle handlers -- what should be seen is do_read, do_write */
+
+static struct timer_list ltpc_timer;
+
+static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *ltpc_get_stats(struct net_device *dev);
+
+static int read_30 ( struct net_device *dev)
+{
+ lt_command c;
+ c.getflags.command = LT_GETFLAGS;
+ return do_read(dev, &c, sizeof(c.getflags),&c,0);
+}
+
+static int set_30 (struct net_device *dev,int x)
+{
+ lt_command c;
+ c.setflags.command = LT_SETFLAGS;
+ c.setflags.flags = x;
+ return do_write(dev, &c, sizeof(c.setflags),&c,0);
+}
+
+/* LLAP to DDP translation */
+
+static int sendup_buffer (struct net_device *dev)
+{
+ /* on entry, command is in ltdmacbuf, data in ltdmabuf */
+ /* called from idle, non-reentrant */
+
+ int dnode, snode, llaptype, len;
+ int sklen;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &((struct ltpc_private *)dev->priv)->stats;
+ struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
+
+ if (ltc->command != LT_RCVLAP) {
+ printk("unknown command 0x%02x from ltpc card\n",ltc->command);
+ return(-1);
+ }
+ dnode = ltc->dnode;
+ snode = ltc->snode;
+ llaptype = ltc->laptype;
+ len = ltc->length;
+
+ sklen = len;
+ if (llaptype == 1)
+ sklen += 8; /* correct for short ddp */
+ if(sklen > 800) {
+ printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
+ dev->name,sklen);
+ return -1;
+ }
+
+ if ( (llaptype==0) || (llaptype>2) ) {
+ printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
+ return -1;
+ }
+
+
+ skb = dev_alloc_skb(3+sklen);
+ if (skb == NULL)
+ {
+ printk("%s: dropping packet due to memory squeeze.\n",
+ dev->name);
+ return -1;
+ }
+ skb->dev = dev;
+
+ if (sklen > len)
+ skb_reserve(skb,8);
+ skb_put(skb,len+3);
+ skb->protocol = htons(ETH_P_LOCALTALK);
+ /* add LLAP header */
+ skb->data[0] = dnode;
+ skb->data[1] = snode;
+ skb->data[2] = llaptype;
+ skb->mac.raw = skb->data; /* save pointer to llap header */
+ skb_pull(skb,3);
+
+ /* copy ddp(s,e)hdr + contents */
+ memcpy(skb->data,(void*)ltdmabuf,len);
+
+ skb->h.raw = skb->data;
+
+ stats->rx_packets++;
+ stats->rx_bytes+=skb->len;
+
+ /* toss it onwards */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ return 0;
+}
+
+/* the handler for the board interrupt */
+
+static irqreturn_t
+ltpc_interrupt(int irq, void *dev_id, struct pt_regs *reg_ptr)
+{
+ struct net_device *dev = dev_id;
+
+ if (dev==NULL) {
+ printk("ltpc_interrupt: unknown device.\n");
+ return IRQ_NONE;
+ }
+
+ inb_p(dev->base_addr+6); /* disable further interrupts from board */
+
+ idle(dev); /* handle whatever is coming in */
+
+ /* idle re-enables interrupts from board */
+
+ return IRQ_HANDLED;
+}
+
+/***
+ *
+ * The ioctls that the driver responds to are:
+ *
+ * SIOCSIFADDR -- do probe using the passed node hint.
+ * SIOCGIFADDR -- return net, node.
+ *
+ * some of this stuff should be done elsewhere.
+ *
+ ***/
+
+static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
+ /* we'll keep the localtalk node address in dev->pa_addr */
+ struct atalk_addr *aa = &((struct ltpc_private *)dev->priv)->my_addr;
+ struct lt_init c;
+ int ltflags;
+
+ if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
+
+ switch(cmd) {
+ case SIOCSIFADDR:
+
+ aa->s_net = sa->sat_addr.s_net;
+
+ /* this does the probe and returns the node addr */
+ c.command = LT_INIT;
+ c.hint = sa->sat_addr.s_node;
+
+ aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
+
+ /* get all llap frames raw */
+ ltflags = read_30(dev);
+ ltflags |= LT_FLAG_ALLLAP;
+ set_30 (dev,ltflags);
+
+ dev->broadcast[0] = 0xFF;
+ dev->dev_addr[0] = aa->s_node;
+
+ dev->addr_len=1;
+
+ return 0;
+
+ case SIOCGIFADDR:
+
+ sa->sat_addr.s_net = aa->s_net;
+ sa->sat_addr.s_node = aa->s_node;
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ /* This needs to be present to keep netatalk happy. */
+ /* Actually netatalk needs fixing! */
+}
+
+static int ltpc_hard_header (struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+ if(debug & DEBUG_VERBOSE)
+ printk("ltpc_hard_header called for device %s\n",
+ dev->name);
+ return 0;
+}
+
+static int ltpc_poll_counter;
+
+static void ltpc_poll(unsigned long l)
+{
+ struct net_device *dev = (struct net_device *) l;
+
+ del_timer(&ltpc_timer);
+
+ if(debug & DEBUG_VERBOSE) {
+ if (!ltpc_poll_counter) {
+ ltpc_poll_counter = 50;
+ printk("ltpc poll is alive\n");
+ }
+ ltpc_poll_counter--;
+ }
+
+ if (!dev)
+ return; /* we've been downed */
+
+ /* poll 20 times per second */
+ idle(dev);
+ ltpc_timer.expires = jiffies + HZ/20;
+
+ add_timer(&ltpc_timer);
+}
+
+/* DDP to LLAP translation */
+
+static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ /* in kernel 1.3.xx, on entry skb->data points to ddp header,
+ * and skb->len is the length of the ddp data + ddp header
+ */
+
+ struct net_device_stats *stats = &((struct ltpc_private *)dev->priv)->stats;
+
+ int i;
+ struct lt_sendlap cbuf;
+
+ cbuf.command = LT_SENDLAP;
+ cbuf.dnode = skb->data[0];
+ cbuf.laptype = skb->data[2];
+ skb_pull(skb,3); /* skip past LLAP header */
+ cbuf.length = skb->len; /* this is host order */
+ skb->h.raw=skb->data;
+
+ if(debug & DEBUG_UPPER) {
+ printk("command ");
+ for(i=0;i<6;i++)
+ printk("%02x ",((unsigned char *)&cbuf)[i]);
+ printk("\n");
+ }
+
+ do_write(dev,&cbuf,sizeof(cbuf),skb->h.raw,skb->len);
+
+ if(debug & DEBUG_UPPER) {
+ printk("sent %d ddp bytes\n",skb->len);
+ for(i=0;i<skb->len;i++) printk("%02x ",skb->h.raw[i]);
+ printk("\n");
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes+=skb->len;
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static struct net_device_stats *ltpc_get_stats(struct net_device *dev)
+{
+ struct net_device_stats *stats = &((struct ltpc_private *) dev->priv)->stats;
+ return stats;
+}
+
+/* initialization stuff */
+
+static int __init ltpc_probe_dma(int base, int dma)
+{
+ int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
+ unsigned long timeout;
+ unsigned long f;
+
+ if (want & 1) {
+ if (request_dma(1,"ltpc")) {
+ want &= ~1;
+ } else {
+ f=claim_dma_lock();
+ disable_dma(1);
+ clear_dma_ff(1);
+ set_dma_mode(1,DMA_MODE_WRITE);
+ set_dma_addr(1,virt_to_bus(ltdmabuf));
+ set_dma_count(1,sizeof(struct lt_mem));
+ enable_dma(1);
+ release_dma_lock(f);
+ }
+ }
+ if (want & 2) {
+ if (request_dma(3,"ltpc")) {
+ want &= ~2;
+ } else {
+ f=claim_dma_lock();
+ disable_dma(3);
+ clear_dma_ff(3);
+ set_dma_mode(3,DMA_MODE_WRITE);
+ set_dma_addr(3,virt_to_bus(ltdmabuf));
+ set_dma_count(3,sizeof(struct lt_mem));
+ enable_dma(3);
+ release_dma_lock(f);
+ }
+ }
+ /* set up request */
+
+ /* FIXME -- do timings better! */
+
+ ltdmabuf[0] = LT_READMEM;
+ ltdmabuf[1] = 1; /* mailbox */
+ ltdmabuf[2] = 0; ltdmabuf[3] = 0; /* address */
+ ltdmabuf[4] = 0; ltdmabuf[5] = 1; /* read 0x0100 bytes */
+ ltdmabuf[6] = 0; /* dunno if this is necessary */
+
+ inb_p(io+1);
+ inb_p(io+0);
+ timeout = jiffies+100*HZ/100;
+ while(time_before(jiffies, timeout)) {
+ if ( 0xfa == inb_p(io+6) ) break;
+ }
+
+ inb_p(io+3);
+ inb_p(io+2);
+ while(time_before(jiffies, timeout)) {
+ if ( 0xfb == inb_p(io+6) ) break;
+ }
+
+ /* release the other dma channel (if we opened both of them) */
+
+ if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
+ want &= ~2;
+ free_dma(3);
+ }
+
+ if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
+ want &= ~1;
+ free_dma(1);
+ }
+
+ if (!want)
+ return 0;
+
+ return (want & 2) ? 3 : 1;
+}
+
+struct net_device * __init ltpc_probe(void)
+{
+ struct net_device *dev;
+ int err = -ENOMEM;
+ int x=0,y=0;
+ int autoirq;
+ unsigned long f;
+ unsigned long timeout;
+
+ dev = alloc_netdev(sizeof(struct ltpc_private), "lt%d", ltalk_setup);
+ if (!dev)
+ goto out;
+
+ SET_MODULE_OWNER(dev);
+
+ /* probe for the I/O port address */
+
+ if (io != 0x240 && request_region(0x220,8,"ltpc")) {
+ x = inb_p(0x220+6);
+ if ( (x!=0xff) && (x>=0xf0) ) {
+ io = 0x220;
+ goto got_port;
+ }
+ release_region(0x220,8);
+ }
+ if (io != 0x220 && request_region(0x240,8,"ltpc")) {
+ y = inb_p(0x240+6);
+ if ( (y!=0xff) && (y>=0xf0) ){
+ io = 0x240;
+ goto got_port;
+ }
+ release_region(0x240,8);
+ }
+
+ /* give up in despair */
+ printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
+ err = -ENODEV;
+ goto out1;
+
+ got_port:
+ /* probe for the IRQ line */
+ if (irq < 2) {
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ /* reset the interrupt line */
+ inb_p(io+7);
+ inb_p(io+7);
+ /* trigger an interrupt (I hope) */
+ inb_p(io+6);
+ mdelay(2);
+ autoirq = probe_irq_off(irq_mask);
+
+ if (autoirq == 0) {
+ printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
+ } else {
+ irq = autoirq;
+ }
+ }
+
+ /* allocate a DMA buffer */
+ ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
+ if (!ltdmabuf) {
+ printk(KERN_ERR "ltpc: mem alloc failed\n");
+ err = -ENOMEM;
+ goto out2;
+ }
+
+ ltdmacbuf = &ltdmabuf[800];
+
+ if(debug & DEBUG_VERBOSE) {
+ printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
+ }
+
+ /* reset the card */
+
+ inb_p(io+1);
+ inb_p(io+3);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(2*HZ/100);
+
+ inb_p(io+0);
+ inb_p(io+2);
+ inb_p(io+7); /* clear reset */
+ inb_p(io+4);
+ inb_p(io+5);
+ inb_p(io+5); /* enable dma */
+ inb_p(io+6); /* tri-state interrupt line */
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ /* now, figure out which dma channel we're using, unless it's
+ already been specified */
+ /* well, 0 is a legal DMA channel, but the LTPC card doesn't
+ use it... */
+ dma = ltpc_probe_dma(io, dma);
+ if (!dma) { /* no dma channel */
+ printk(KERN_ERR "No DMA channel found on ltpc card.\n");
+ err = -ENODEV;
+ goto out3;
+ }
+
+ /* print out friendly message */
+ if(irq)
+ printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
+ else
+ printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
+
+ /* Fill in the fields of the device structure with ethernet-generic values. */
+ dev->hard_start_xmit = ltpc_xmit;
+ dev->hard_header = ltpc_hard_header;
+ dev->get_stats = ltpc_get_stats;
+
+ /* add the ltpc-specific things */
+ dev->do_ioctl = &ltpc_ioctl;
+
+ dev->set_multicast_list = &set_multicast_list;
+ dev->mc_list = NULL;
+ dev->base_addr = io;
+ dev->irq = irq;
+ dev->dma = dma;
+
+ /* the card will want to send a result at this point */
+ /* (I think... leaving out this part makes the kernel crash,
+ so I put it back in...) */
+
+ f=claim_dma_lock();
+ disable_dma(dma);
+ clear_dma_ff(dma);
+ set_dma_mode(dma,DMA_MODE_READ);
+ set_dma_addr(dma,virt_to_bus(ltdmabuf));
+ set_dma_count(dma,0x100);
+ enable_dma(dma);
+ release_dma_lock(f);
+
+ (void) inb_p(io+3);
+ (void) inb_p(io+2);
+ timeout = jiffies+100*HZ/100;
+
+ while(time_before(jiffies, timeout)) {
+ if( 0xf9 == inb_p(io+6))
+ break;
+ schedule();
+ }
+
+ if(debug & DEBUG_VERBOSE) {
+ printk("setting up timer and irq\n");
+ }
+
+ /* grab it and don't let go :-) */
+ if (irq && request_irq( irq, &ltpc_interrupt, 0, "ltpc", dev) >= 0)
+ {
+ (void) inb_p(io+7); /* enable interrupts from board */
+ (void) inb_p(io+7); /* and reset irq line */
+ } else {
+ if( irq )
+ printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
+ dev->irq = 0;
+ /* polled mode -- 20 times per second */
+ /* this is really, really slow... should it poll more often? */
+ init_timer(&ltpc_timer);
+ ltpc_timer.function=ltpc_poll;
+ ltpc_timer.data = (unsigned long) dev;
+
+ ltpc_timer.expires = jiffies + HZ/20;
+ add_timer(&ltpc_timer);
+ }
+ err = register_netdev(dev);
+ if (err)
+ goto out4;
+
+ return NULL;
+out4:
+ del_timer_sync(&ltpc_timer);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+out3:
+ free_pages((unsigned long)ltdmabuf, get_order(1000));
+out2:
+ release_region(io, 8);
+out1:
+ free_netdev(dev);
+out:
+ return ERR_PTR(err);
+}
+
+#ifndef MODULE
+/* handles "ltpc=io,irq,dma" kernel command lines */
+static int __init ltpc_setup(char *str)
+{
+ int ints[5];
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (ints[0] == 0) {
+ if (str && !strncmp(str, "auto", 4)) {
+ /* do nothing :-) */
+ }
+ else {
+ /* usage message */
+ printk (KERN_ERR
+ "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
+ return 0;
+ }
+ } else {
+ io = ints[1];
+ if (ints[0] > 1) {
+ irq = ints[2];
+ }
+ if (ints[0] > 2) {
+ dma = ints[3];
+ }
+ /* ignore any other paramters */
+ }
+ return 1;
+}
+
+__setup("ltpc=", ltpc_setup);
+#endif /* MODULE */
+
+static struct net_device *dev_ltpc;
+
+#ifdef MODULE
+
+MODULE_LICENSE("GPL");
+module_param(debug, int, 0);
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(dma, int, 0);
+
+
+int __init init_module(void)
+{
+ if(io == 0)
+ printk(KERN_NOTICE
+ "ltpc: Autoprobing is not recommended for modules\n");
+
+ dev_ltpc = ltpc_probe();
+ if (IS_ERR(dev_ltpc))
+ return PTR_ERR(dev_ltpc);
+ return 0;
+}
+#endif
+
+static void __exit ltpc_cleanup(void)
+{
+
+ if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
+ unregister_netdev(dev_ltpc);
+
+ ltpc_timer.data = 0; /* signal the poll routine that we're done */
+
+ del_timer_sync(&ltpc_timer);
+
+ if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
+
+ if (dev_ltpc->irq)
+ free_irq(dev_ltpc->irq, dev_ltpc);
+
+ if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
+
+ if (dev_ltpc->dma)
+ free_dma(dev_ltpc->dma);
+
+ if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
+
+ if (dev_ltpc->base_addr)
+ release_region(dev_ltpc->base_addr,8);
+
+ free_netdev(dev_ltpc);
+
+ if(debug & DEBUG_VERBOSE) printk("free_pages\n");
+
+ free_pages( (unsigned long) ltdmabuf, get_order(1000));
+
+ if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
+}
+
+module_exit(ltpc_cleanup);
diff --git a/drivers/net/appletalk/ltpc.h b/drivers/net/appletalk/ltpc.h
new file mode 100644
index 000000000000..cd30544a3729
--- /dev/null
+++ b/drivers/net/appletalk/ltpc.h
@@ -0,0 +1,73 @@
+/*** ltpc.h
+ *
+ *
+ ***/
+
+#define LT_GETRESULT 0x00
+#define LT_WRITEMEM 0x01
+#define LT_READMEM 0x02
+#define LT_GETFLAGS 0x04
+#define LT_SETFLAGS 0x05
+#define LT_INIT 0x10
+#define LT_SENDLAP 0x13
+#define LT_RCVLAP 0x14
+
+/* the flag that we care about */
+#define LT_FLAG_ALLLAP 0x04
+
+struct lt_getresult {
+ unsigned char command;
+ unsigned char mailbox;
+};
+
+struct lt_mem {
+ unsigned char command;
+ unsigned char mailbox;
+ unsigned short addr; /* host order */
+ unsigned short length; /* host order */
+};
+
+struct lt_setflags {
+ unsigned char command;
+ unsigned char mailbox;
+ unsigned char flags;
+};
+
+struct lt_getflags {
+ unsigned char command;
+ unsigned char mailbox;
+};
+
+struct lt_init {
+ unsigned char command;
+ unsigned char mailbox;
+ unsigned char hint;
+};
+
+struct lt_sendlap {
+ unsigned char command;
+ unsigned char mailbox;
+ unsigned char dnode;
+ unsigned char laptype;
+ unsigned short length; /* host order */
+};
+
+struct lt_rcvlap {
+ unsigned char command;
+ unsigned char dnode;
+ unsigned char snode;
+ unsigned char laptype;
+ unsigned short length; /* host order */
+};
+
+union lt_command {
+ struct lt_getresult getresult;
+ struct lt_mem mem;
+ struct lt_setflags setflags;
+ struct lt_getflags getflags;
+ struct lt_init init;
+ struct lt_sendlap sendlap;
+ struct lt_rcvlap rcvlap;
+};
+typedef union lt_command lt_command;
+
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
new file mode 100644
index 000000000000..948de2532a1e
--- /dev/null
+++ b/drivers/net/arcnet/Kconfig
@@ -0,0 +1,140 @@
+#
+# Arcnet configuration
+#
+
+menu "ARCnet devices"
+ depends on NETDEVICES && (ISA || PCI)
+
+config ARCNET
+ tristate "ARCnet support"
+ ---help---
+ If you have a network card of this type, say Y and check out the
+ (arguably) beautiful poetry in
+ <file:Documentation/networking/arcnet.txt>.
+
+ You need both this driver, and the driver for the particular ARCnet
+ chipset of your card. If you don't know, then it's probably a
+ COM90xx type card, so say Y (or M) to "ARCnet COM90xx chipset
+ support" below.
+
+ You might also want to have a look at the Ethernet-HOWTO, available
+ from <http://www.tldp.org/docs.html#howto>(even though ARCnet
+ is not really Ethernet).
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called arcnet.
+
+config ARCNET_1201
+ tristate "Enable standard ARCNet packet format (RFC 1201)"
+ depends on ARCNET
+ help
+ This allows you to use RFC1201 with your ARCnet card via the virtual
+ arc0 device. You need to say Y here to communicate with
+ industry-standard RFC1201 implementations, like the arcether.com
+ packet driver or most DOS/Windows ODI drivers. Please read the
+ ARCnet documentation in <file:Documentation/networking/arcnet.txt>
+ for more information about using arc0.
+
+config ARCNET_1051
+ tristate "Enable old ARCNet packet format (RFC 1051)"
+ depends on ARCNET
+ ---help---
+ This allows you to use RFC1051 with your ARCnet card via the virtual
+ arc0s device. You only need arc0s if you want to talk to ARCnet
+ software complying with the "old" standard, specifically, the DOS
+ arcnet.com packet driver, Amigas running AmiTCP, and some variants
+ of NetBSD. You do not need to say Y here to communicate with
+ industry-standard RFC1201 implementations, like the arcether.com
+ packet driver or most DOS/Windows ODI drivers. RFC1201 is included
+ automatically as the arc0 device. Please read the ARCnet
+ documentation in <file:Documentation/networking/arcnet.txt> for more
+ information about using arc0e and arc0s.
+
+config ARCNET_RAW
+ tristate "Enable raw mode packet interface"
+ depends on ARCNET
+ help
+ ARCnet "raw mode" packet encapsulation, no soft headers. Unlikely
+ to work unless talking to a copy of the same Linux arcnet driver,
+ but perhaps marginally faster in that case.
+
+config ARCNET_CAP
+ tristate "Enable CAP mode packet interface"
+ depends on ARCNET
+ help
+ ARCnet "cap mode" packet encapsulation. Used to get the hardware
+ acknowledge back to userspace. After the initial protocol byte every
+ packet is stuffed with an extra 4 byte "cookie" which doesn't
+ actually appear on the network. After transmit the driver will send
+ back a packet with protocol byte 0 containing the status of the
+ transmition:
+ 0=no hardware acknowledge
+ 1=excessive nak
+ 2=transmition accepted by the reciever hardware
+
+ Received packets are also stuffed with the extra 4 bytes but it will
+ be random data.
+
+ Cap only listens to protocol 1-8.
+
+config ARCNET_COM90xx
+ tristate "ARCnet COM90xx (normal) chipset driver"
+ depends on ARCNET
+ help
+ This is the chipset driver for the standard COM90xx cards. If you
+ have always used the old ARCnet driver without knowing what type of
+ card you had, this is probably the one for you.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called com90xx.
+
+config ARCNET_COM90xxIO
+ tristate "ARCnet COM90xx (IO mapped) chipset driver"
+ depends on ARCNET
+ ---help---
+ This is the chipset driver for the COM90xx cards, using them in
+ IO-mapped mode instead of memory-mapped mode. This is slower than
+ the normal driver. Only use it if your card doesn't support shared
+ memory.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called com90io.
+
+config ARCNET_RIM_I
+ tristate "ARCnet COM90xx (RIM I) chipset driver"
+ depends on ARCNET
+ ---help---
+ This is yet another chipset driver for the COM90xx cards, but this
+ time only using memory-mapped mode, and no IO ports at all. This
+ driver is completely untested, so if you have one of these cards,
+ please mail <dwmw2@infradead.org>, especially if it works!
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called arc-rimi.
+
+config ARCNET_COM20020
+ tristate "ARCnet COM20020 chipset driver"
+ depends on ARCNET
+ help
+ This is the driver for the new COM20020 chipset. It supports such
+ things as promiscuous mode, so packet sniffing is possible, and
+ extra diagnostic information.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called com20020.
+
+config ARCNET_COM20020_ISA
+ tristate "Support for COM20020 on ISA"
+ depends on ARCNET_COM20020 && ISA
+
+config ARCNET_COM20020_PCI
+ tristate "Support for COM20020 on PCI"
+ depends on ARCNET_COM20020 && PCI
+
+endmenu
+
diff --git a/drivers/net/arcnet/Makefile b/drivers/net/arcnet/Makefile
new file mode 100644
index 000000000000..5861af543d42
--- /dev/null
+++ b/drivers/net/arcnet/Makefile
@@ -0,0 +1,14 @@
+# Makefile for linux/drivers/net/arcnet
+#
+
+obj-$(CONFIG_ARCNET) += arcnet.o
+obj-$(CONFIG_ARCNET_1201) += rfc1201.o
+obj-$(CONFIG_ARCNET_1051) += rfc1051.o
+obj-$(CONFIG_ARCNET_RAW) += arc-rawmode.o
+obj-$(CONFIG_ARCNET_CAP) += capmode.o
+obj-$(CONFIG_ARCNET_COM90xx) += com90xx.o
+obj-$(CONFIG_ARCNET_COM90xxIO) += com90io.o
+obj-$(CONFIG_ARCNET_RIM_I) += arc-rimi.o
+obj-$(CONFIG_ARCNET_COM20020) += com20020.o
+obj-$(CONFIG_ARCNET_COM20020_ISA) += com20020-isa.o
+obj-$(CONFIG_ARCNET_COM20020_PCI) += com20020-pci.o
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
new file mode 100644
index 000000000000..e1ea29b0cd14
--- /dev/null
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -0,0 +1,204 @@
+/*
+ * Linux ARCnet driver - "raw mode" packet encapsulation (no soft headers)
+ *
+ * Written 1994-1999 by Avery Pennarun.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/arcdevice.h>
+
+#define VERSION "arcnet: raw mode (`r') encapsulation support loaded.\n"
+
+
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length);
+static int build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr);
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum);
+
+struct ArcProto rawmode_proto =
+{
+ .suffix = 'r',
+ .mtu = XMTU,
+ .rx = rx,
+ .build_header = build_header,
+ .prepare_tx = prepare_tx,
+ .continue_tx = NULL,
+ .ack_tx = NULL
+};
+
+
+static int __init arcnet_raw_init(void)
+{
+ int count;
+
+ printk(VERSION);
+
+ for (count = 0; count < 256; count++)
+ if (arc_proto_map[count] == arc_proto_default)
+ arc_proto_map[count] = &rawmode_proto;
+
+ /* for raw mode, we only set the bcast proto if there's no better one */
+ if (arc_bcast_proto == arc_proto_default)
+ arc_bcast_proto = &rawmode_proto;
+
+ arc_proto_default = &rawmode_proto;
+ return 0;
+}
+
+static void __exit arcnet_raw_exit(void)
+{
+ arcnet_unregister_proto(&rawmode_proto);
+}
+
+module_init(arcnet_raw_init);
+module_exit(arcnet_raw_exit);
+
+MODULE_LICENSE("GPL");
+
+
+/* packet receiver */
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct sk_buff *skb;
+ struct archdr *pkt = pkthdr;
+ int ofs;
+
+ BUGMSG(D_DURING, "it's a raw packet (length=%d)\n", length);
+
+ if (length >= MinTU)
+ ofs = 512 - length;
+ else
+ ofs = 256 - length;
+
+ skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
+ if (skb == NULL) {
+ BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
+ lp->stats.rx_dropped++;
+ return;
+ }
+ skb_put(skb, length + ARC_HDR_SIZE);
+ skb->dev = dev;
+
+ pkt = (struct archdr *) skb->data;
+
+ skb->mac.raw = skb->data;
+ skb_pull(skb, ARC_HDR_SIZE);
+
+ /* up to sizeof(pkt->soft) has already been copied from the card */
+ memcpy(pkt, pkthdr, sizeof(struct archdr));
+ if (length > sizeof(pkt->soft))
+ lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
+ pkt->soft.raw + sizeof(pkt->soft),
+ length - sizeof(pkt->soft));
+
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+
+ skb->protocol = __constant_htons(ETH_P_ARCNET);
+;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+}
+
+
+/*
+ * Create the ARCnet hard/soft headers for raw mode.
+ * There aren't any soft headers in raw mode - not even the protocol id.
+ */
+static int build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr)
+{
+ int hdr_size = ARC_HDR_SIZE;
+ struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
+
+ /*
+ * Set the source hardware address.
+ *
+ * This is pretty pointless for most purposes, but it can help in
+ * debugging. ARCnet does not allow us to change the source address in
+ * the actual packet sent)
+ */
+ pkt->hard.source = *dev->dev_addr;
+
+ /* see linux/net/ethernet/eth.c to see where I got the following */
+
+ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ /*
+ * FIXME: fill in the last byte of the dest ipaddr here to better
+ * comply with RFC1051 in "noarp" mode.
+ */
+ pkt->hard.dest = 0;
+ return hdr_size;
+ }
+ /* otherwise, just fill it in and go! */
+ pkt->hard.dest = daddr;
+
+ return hdr_size; /* success */
+}
+
+
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct arc_hardware *hard = &pkt->hard;
+ int ofs;
+
+ BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
+ lp->next_tx, lp->cur_tx, bufnum);
+
+ length -= ARC_HDR_SIZE; /* hard header is not included in packet length */
+
+ if (length > XMTU) {
+ /* should never happen! other people already check for this. */
+ BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
+ length, XMTU);
+ length = XMTU;
+ }
+ if (length > MinTU) {
+ hard->offset[0] = 0;
+ hard->offset[1] = ofs = 512 - length;
+ } else if (length > MTU) {
+ hard->offset[0] = 0;
+ hard->offset[1] = ofs = 512 - length - 3;
+ } else
+ hard->offset[0] = ofs = 256 - length;
+
+ BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
+ length,ofs);
+
+ lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
+ lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
+
+ lp->lastload_dest = hard->dest;
+
+ return 1; /* done */
+}
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
new file mode 100644
index 000000000000..38c3f033f739
--- /dev/null
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -0,0 +1,368 @@
+/*
+ * Linux ARCnet driver - "RIM I" (entirely mem-mapped) cards
+ *
+ * Written 1994-1999 by Avery Pennarun.
+ * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/arcdevice.h>
+
+
+#define VERSION "arcnet: RIM I (entirely mem-mapped) support\n"
+
+
+/* Internal function declarations */
+
+static int arcrimi_probe(struct net_device *dev);
+static int arcrimi_found(struct net_device *dev);
+static void arcrimi_command(struct net_device *dev, int command);
+static int arcrimi_status(struct net_device *dev);
+static void arcrimi_setmask(struct net_device *dev, int mask);
+static int arcrimi_reset(struct net_device *dev, int really_reset);
+static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count);
+static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count);
+
+/* Handy defines for ARCnet specific stuff */
+
+/* Amount of I/O memory used by the card */
+#define BUFFER_SIZE (512)
+#define MIRROR_SIZE (BUFFER_SIZE*4)
+
+/* COM 9026 controller chip --> ARCnet register addresses */
+#define _INTMASK (ioaddr+0) /* writable */
+#define _STATUS (ioaddr+0) /* readable */
+#define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */
+#define _RESET (ioaddr+8) /* software reset (on read) */
+#define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */
+#define _ADDR_HI (ioaddr+15) /* Control registers for said */
+#define _ADDR_LO (ioaddr+14)
+#define _CONFIG (ioaddr+2) /* Configuration register */
+
+#undef ASTATUS
+#undef ACOMMAND
+#undef AINTMASK
+
+#define ASTATUS() readb(_STATUS)
+#define ACOMMAND(cmd) writeb((cmd),_COMMAND)
+#define AINTMASK(msk) writeb((msk),_INTMASK)
+#define SETCONF() writeb(lp->config,_CONFIG)
+
+
+/*
+ * We cannot probe for a RIM I card; one reason is I don't know how to reset
+ * them. In fact, we can't even get their node ID automatically. So, we
+ * need to be passed a specific shmem address, IRQ, and node ID.
+ */
+static int __init arcrimi_probe(struct net_device *dev)
+{
+ BUGLVL(D_NORMAL) printk(VERSION);
+ BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
+
+ BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n",
+ dev->dev_addr[0], dev->mem_start, dev->irq);
+
+ if (dev->mem_start <= 0 || dev->irq <= 0) {
+ BUGMSG(D_NORMAL, "No autoprobe for RIM I; you "
+ "must specify the shmem and irq!\n");
+ return -ENODEV;
+ }
+ /*
+ * Grab the memory region at mem_start for BUFFER_SIZE bytes.
+ * Later in arcrimi_found() the real size will be determined
+ * and this reserve will be released and the correct size
+ * will be taken.
+ */
+ if (!request_mem_region(dev->mem_start, BUFFER_SIZE, "arcnet (90xx)")) {
+ BUGMSG(D_NORMAL, "Card memory already allocated\n");
+ return -ENODEV;
+ }
+ if (dev->dev_addr[0] == 0) {
+ release_mem_region(dev->mem_start, BUFFER_SIZE);
+ BUGMSG(D_NORMAL, "You need to specify your card's station "
+ "ID!\n");
+ return -ENODEV;
+ }
+ return arcrimi_found(dev);
+}
+
+
+/*
+ * Set up the struct net_device associated with this card. Called after
+ * probing succeeds.
+ */
+static int __init arcrimi_found(struct net_device *dev)
+{
+ struct arcnet_local *lp;
+ unsigned long first_mirror, last_mirror, shmem;
+ int mirror_size;
+ int err;
+
+ /* reserve the irq */
+ if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
+ release_mem_region(dev->mem_start, BUFFER_SIZE);
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+ }
+
+ shmem = dev->mem_start;
+ isa_writeb(TESTvalue, shmem);
+ isa_writeb(dev->dev_addr[0], shmem + 1); /* actually the node ID */
+
+ /* find the real shared memory start/end points, including mirrors */
+
+ /* guess the actual size of one "memory mirror" - the number of
+ * bytes between copies of the shared memory. On most cards, it's
+ * 2k (or there are no mirrors at all) but on some, it's 4k.
+ */
+ mirror_size = MIRROR_SIZE;
+ if (isa_readb(shmem) == TESTvalue
+ && isa_readb(shmem - mirror_size) != TESTvalue
+ && isa_readb(shmem - 2 * mirror_size) == TESTvalue)
+ mirror_size *= 2;
+
+ first_mirror = last_mirror = shmem;
+ while (isa_readb(first_mirror) == TESTvalue)
+ first_mirror -= mirror_size;
+ first_mirror += mirror_size;
+
+ while (isa_readb(last_mirror) == TESTvalue)
+ last_mirror += mirror_size;
+ last_mirror -= mirror_size;
+
+ dev->mem_start = first_mirror;
+ dev->mem_end = last_mirror + MIRROR_SIZE - 1;
+
+ /* initialize the rest of the device structure. */
+
+ lp = dev->priv;
+ lp->card_name = "RIM I";
+ lp->hw.command = arcrimi_command;
+ lp->hw.status = arcrimi_status;
+ lp->hw.intmask = arcrimi_setmask;
+ lp->hw.reset = arcrimi_reset;
+ lp->hw.owner = THIS_MODULE;
+ lp->hw.copy_to_card = arcrimi_copy_to_card;
+ lp->hw.copy_from_card = arcrimi_copy_from_card;
+
+ /*
+ * re-reserve the memory region - arcrimi_probe() alloced this reqion
+ * but didn't know the real size. Free that region and then re-get
+ * with the correct size. There is a VERY slim chance this could
+ * fail.
+ */
+ release_mem_region(shmem, BUFFER_SIZE);
+ if (!request_mem_region(dev->mem_start,
+ dev->mem_end - dev->mem_start + 1,
+ "arcnet (90xx)")) {
+ BUGMSG(D_NORMAL, "Card memory already allocated\n");
+ goto err_free_irq;
+ }
+
+ lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+ if (!lp->mem_start) {
+ BUGMSG(D_NORMAL, "Can't remap device memory!\n");
+ goto err_release_mem;
+ }
+
+ /* get and check the station ID from offset 1 in shmem */
+ dev->dev_addr[0] = readb(lp->mem_start + 1);
+
+ BUGMSG(D_NORMAL, "ARCnet RIM I: station %02Xh found at IRQ %d, "
+ "ShMem %lXh (%ld*%d bytes).\n",
+ dev->dev_addr[0],
+ dev->irq, dev->mem_start,
+ (dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ iounmap(lp->mem_start);
+err_release_mem:
+ release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+err_free_irq:
+ free_irq(dev->irq, dev);
+ return -EIO;
+}
+
+
+/*
+ * Do a hardware reset on the card, and set up necessary registers.
+ *
+ * This should be called as little as possible, because it disrupts the
+ * token on the network (causes a RECON) and requires a significant delay.
+ *
+ * However, it does make sure the card is in a defined state.
+ */
+static int arcrimi_reset(struct net_device *dev, int really_reset)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *ioaddr = lp->mem_start + 0x800;
+
+ BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS());
+
+ if (really_reset) {
+ writeb(TESTvalue, ioaddr - 0x800); /* fake reset */
+ return 0;
+ }
+ ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */
+ ACOMMAND(CFLAGScmd | CONFIGclear);
+
+ /* enable extended (512-byte) packets */
+ ACOMMAND(CONFIGcmd | EXTconf);
+
+ /* done! return success. */
+ return 0;
+}
+
+static void arcrimi_setmask(struct net_device *dev, int mask)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *ioaddr = lp->mem_start + 0x800;
+
+ AINTMASK(mask);
+}
+
+static int arcrimi_status(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *ioaddr = lp->mem_start + 0x800;
+
+ return ASTATUS();
+}
+
+static void arcrimi_command(struct net_device *dev, int cmd)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *ioaddr = lp->mem_start + 0x800;
+
+ ACOMMAND(cmd);
+}
+
+static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset;
+ TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count));
+}
+
+
+static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset;
+ TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
+}
+
+static int node;
+static int io; /* use the insmod io= irq= node= options */
+static int irq;
+static char device[9]; /* use eg. device=arc1 to change name */
+
+module_param(node, int, 0);
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param_string(device, device, sizeof(device), 0);
+MODULE_LICENSE("GPL");
+
+static struct net_device *my_dev;
+
+static int __init arc_rimi_init(void)
+{
+ struct net_device *dev;
+
+ dev = alloc_arcdev(device);
+ if (!dev)
+ return -ENOMEM;
+
+ if (node && node != 0xff)
+ dev->dev_addr[0] = node;
+
+ dev->mem_start = io;
+ dev->irq = irq;
+ if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (arcrimi_probe(dev)) {
+ free_netdev(dev);
+ return -EIO;
+ }
+
+ my_dev = dev;
+ return 0;
+}
+
+static void __exit arc_rimi_exit(void)
+{
+ struct net_device *dev = my_dev;
+ struct arcnet_local *lp = dev->priv;
+
+ unregister_netdev(dev);
+ iounmap(lp->mem_start);
+ release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+}
+
+#ifndef MODULE
+static int __init arcrimi_setup(char *s)
+{
+ int ints[8];
+ s = get_options(s, 8, ints);
+ if (!ints[0])
+ return 1;
+ switch (ints[0]) {
+ default: /* ERROR */
+ printk("arcrimi: Too many arguments.\n");
+ case 3: /* Node ID */
+ node = ints[3];
+ case 2: /* IRQ */
+ irq = ints[2];
+ case 1: /* IO address */
+ io = ints[1];
+ }
+ if (*s)
+ snprintf(device, sizeof(device), "%s", s);
+ return 1;
+}
+__setup("arcrimi=", arcrimi_setup);
+#endif /* MODULE */
+
+module_init(arc_rimi_init)
+module_exit(arc_rimi_exit)
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
new file mode 100644
index 000000000000..4f9f69e22c1b
--- /dev/null
+++ b/drivers/net/arcnet/arcnet.c
@@ -0,0 +1,1102 @@
+/*
+ * Linux ARCnet driver - device-independent routines
+ *
+ * Written 1997 by David Woodhouse.
+ * Written 1994-1999 by Avery Pennarun.
+ * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * The change log is now in a file called ChangeLog in this directory.
+ *
+ * Sources:
+ * - Crynwr arcnet.com/arcether.com packet drivers.
+ * - arcnet.c v0.00 dated 1/1/94 and apparently by
+ * Donald Becker - it didn't work :)
+ * - skeleton.c v0.05 dated 11/16/93 by Donald Becker
+ * (from Linux Kernel 1.1.45)
+ * - RFC's 1201 and 1051 - re: TCP/IP over ARCnet
+ * - The official ARCnet COM9026 data sheets (!) thanks to
+ * Ken Cornetet <kcornete@nyx10.cs.du.edu>
+ * - The official ARCnet COM20020 data sheets.
+ * - Information on some more obscure ARCnet controller chips, thanks
+ * to the nice people at SMSC.
+ * - net/inet/eth.c (from kernel 1.1.50) for header-building info.
+ * - Alternate Linux ARCnet source by V.Shergin <vsher@sao.stavropol.su>
+ * - Textual information and more alternate source from Joachim Koenig
+ * <jojo@repas.de>
+ */
+
+#define VERSION "arcnet: v3.93 BETA 2000/04/29 - by Avery Pennarun et al.\n"
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/init.h>
+#include <linux/arcdevice.h>
+
+/* "do nothing" functions for protocol drivers */
+static void null_rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length);
+static int null_build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr);
+static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
+ int length, int bufnum);
+
+
+/*
+ * one ArcProto per possible proto ID. None of the elements of
+ * arc_proto_map are allowed to be NULL; they will get set to
+ * arc_proto_default instead. It also must not be NULL; if you would like
+ * to set it to NULL, set it to &arc_proto_null instead.
+ */
+ struct ArcProto *arc_proto_map[256], *arc_proto_default,
+ *arc_bcast_proto, *arc_raw_proto;
+
+struct ArcProto arc_proto_null =
+{
+ .suffix = '?',
+ .mtu = XMTU,
+ .is_ip = 0,
+ .rx = null_rx,
+ .build_header = null_build_header,
+ .prepare_tx = null_prepare_tx,
+ .continue_tx = NULL,
+ .ack_tx = NULL
+};
+
+/* Exported function prototypes */
+int arcnet_debug = ARCNET_DEBUG;
+
+EXPORT_SYMBOL(arc_proto_map);
+EXPORT_SYMBOL(arc_proto_default);
+EXPORT_SYMBOL(arc_bcast_proto);
+EXPORT_SYMBOL(arc_raw_proto);
+EXPORT_SYMBOL(arc_proto_null);
+EXPORT_SYMBOL(arcnet_unregister_proto);
+EXPORT_SYMBOL(arcnet_debug);
+EXPORT_SYMBOL(alloc_arcdev);
+EXPORT_SYMBOL(arcnet_interrupt);
+
+/* Internal function prototypes */
+static int arcnet_open(struct net_device *dev);
+static int arcnet_close(struct net_device *dev);
+static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
+static void arcnet_timeout(struct net_device *dev);
+static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len);
+static int arcnet_rebuild_header(struct sk_buff *skb);
+static struct net_device_stats *arcnet_get_stats(struct net_device *dev);
+static int go_tx(struct net_device *dev);
+
+static int debug = ARCNET_DEBUG;
+module_param(debug, int, 0);
+MODULE_LICENSE("GPL");
+
+static int __init arcnet_init(void)
+{
+ int count;
+
+ arcnet_debug = debug;
+
+ printk(VERSION);
+
+#ifdef ALPHA_WARNING
+ BUGLVL(D_EXTRA) {
+ printk("arcnet: ***\n"
+ "arcnet: * Read arcnet.txt for important release notes!\n"
+ "arcnet: *\n"
+ "arcnet: * This is an ALPHA version! (Last stable release: v3.02) E-mail\n"
+ "arcnet: * me if you have any questions, comments, or bug reports.\n"
+ "arcnet: ***\n");
+ }
+#endif
+
+ /* initialize the protocol map */
+ arc_raw_proto = arc_proto_default = arc_bcast_proto = &arc_proto_null;
+ for (count = 0; count < 256; count++)
+ arc_proto_map[count] = arc_proto_default;
+
+ BUGLVL(D_DURING)
+ printk("arcnet: struct sizes: %Zd %Zd %Zd %Zd %Zd\n",
+ sizeof(struct arc_hardware), sizeof(struct arc_rfc1201),
+ sizeof(struct arc_rfc1051), sizeof(struct arc_eth_encap),
+ sizeof(struct archdr));
+
+ return 0;
+}
+
+static void __exit arcnet_exit(void)
+{
+}
+
+module_init(arcnet_init);
+module_exit(arcnet_exit);
+
+/*
+ * Dump the contents of an sk_buff
+ */
+#if ARCNET_DEBUG_MAX & D_SKB
+void arcnet_dump_skb(struct net_device *dev,
+ struct sk_buff *skb, char *desc)
+{
+ int i;
+
+ printk(KERN_DEBUG "%6s: skb dump (%s) follows:", dev->name, desc);
+ for (i = 0; i < skb->len; i++) {
+ if (i % 16 == 0)
+ printk("\n" KERN_DEBUG "[%04X] ", i);
+ printk("%02X ", ((u_char *) skb->data)[i]);
+ }
+ printk("\n");
+}
+
+EXPORT_SYMBOL(arcnet_dump_skb);
+#endif
+
+
+/*
+ * Dump the contents of an ARCnet buffer
+ */
+#if (ARCNET_DEBUG_MAX & (D_RX | D_TX))
+void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc,
+ int take_arcnet_lock)
+{
+ struct arcnet_local *lp = dev->priv;
+ int i, length;
+ unsigned long flags = 0;
+ static uint8_t buf[512];
+
+ /* hw.copy_from_card expects IRQ context so take the IRQ lock
+ to keep it single threaded */
+ if(take_arcnet_lock)
+ spin_lock_irqsave(&lp->lock, flags);
+
+ lp->hw.copy_from_card(dev, bufnum, 0, buf, 512);
+ if(take_arcnet_lock)
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* if the offset[0] byte is nonzero, this is a 256-byte packet */
+ length = (buf[2] ? 256 : 512);
+
+ printk(KERN_DEBUG "%6s: packet dump (%s) follows:", dev->name, desc);
+ for (i = 0; i < length; i++) {
+ if (i % 16 == 0)
+ printk("\n" KERN_DEBUG "[%04X] ", i);
+ printk("%02X ", buf[i]);
+ }
+ printk("\n");
+
+}
+
+EXPORT_SYMBOL(arcnet_dump_packet);
+#endif
+
+
+/*
+ * Unregister a protocol driver from the arc_proto_map. Protocol drivers
+ * are responsible for registering themselves, but the unregister routine
+ * is pretty generic so we'll do it here.
+ */
+void arcnet_unregister_proto(struct ArcProto *proto)
+{
+ int count;
+
+ if (arc_proto_default == proto)
+ arc_proto_default = &arc_proto_null;
+ if (arc_bcast_proto == proto)
+ arc_bcast_proto = arc_proto_default;
+ if (arc_raw_proto == proto)
+ arc_raw_proto = arc_proto_default;
+
+ for (count = 0; count < 256; count++) {
+ if (arc_proto_map[count] == proto)
+ arc_proto_map[count] = arc_proto_default;
+ }
+}
+
+
+/*
+ * Add a buffer to the queue. Only the interrupt handler is allowed to do
+ * this, unless interrupts are disabled.
+ *
+ * Note: we don't check for a full queue, since there aren't enough buffers
+ * to more than fill it.
+ */
+static void release_arcbuf(struct net_device *dev, int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ int i;
+
+ lp->buf_queue[lp->first_free_buf++] = bufnum;
+ lp->first_free_buf %= 5;
+
+ BUGLVL(D_DURING) {
+ BUGMSG(D_DURING, "release_arcbuf: freed #%d; buffer queue is now: ",
+ bufnum);
+ for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5)
+ BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]);
+ BUGMSG2(D_DURING, "\n");
+ }
+}
+
+
+/*
+ * Get a buffer from the queue. If this returns -1, there are no buffers
+ * available.
+ */
+static int get_arcbuf(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ int buf = -1, i;
+
+ if (!atomic_dec_and_test(&lp->buf_lock)) {
+ /* already in this function */
+ BUGMSG(D_NORMAL, "get_arcbuf: overlap (%d)!\n",
+ lp->buf_lock.counter);
+ }
+ else { /* we can continue */
+ if (lp->next_buf >= 5)
+ lp->next_buf -= 5;
+
+ if (lp->next_buf == lp->first_free_buf)
+ BUGMSG(D_NORMAL, "get_arcbuf: BUG: no buffers are available??\n");
+ else {
+ buf = lp->buf_queue[lp->next_buf++];
+ lp->next_buf %= 5;
+ }
+ }
+
+
+ BUGLVL(D_DURING) {
+ BUGMSG(D_DURING, "get_arcbuf: got #%d; buffer queue is now: ", buf);
+ for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5)
+ BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]);
+ BUGMSG2(D_DURING, "\n");
+ }
+
+ atomic_inc(&lp->buf_lock);
+ return buf;
+}
+
+
+static int choose_mtu(void)
+{
+ int count, mtu = 65535;
+
+ /* choose the smallest MTU of all available encaps */
+ for (count = 0; count < 256; count++) {
+ if (arc_proto_map[count] != &arc_proto_null
+ && arc_proto_map[count]->mtu < mtu) {
+ mtu = arc_proto_map[count]->mtu;
+ }
+ }
+
+ return mtu == 65535 ? XMTU : mtu;
+}
+
+
+/* Setup a struct device for ARCnet. */
+static void arcdev_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_ARCNET;
+ dev->hard_header_len = sizeof(struct archdr);
+ dev->mtu = choose_mtu();
+
+ dev->addr_len = ARCNET_ALEN;
+ dev->tx_queue_len = 100;
+ dev->broadcast[0] = 0x00; /* for us, broadcasts are address 0 */
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST;
+
+ /*
+ * Put in this stuff here, so we don't have to export the symbols to
+ * the chipset drivers.
+ */
+ dev->open = arcnet_open;
+ dev->stop = arcnet_close;
+ dev->hard_start_xmit = arcnet_send_packet;
+ dev->tx_timeout = arcnet_timeout;
+ dev->get_stats = arcnet_get_stats;
+ dev->hard_header = arcnet_header;
+ dev->rebuild_header = arcnet_rebuild_header;
+}
+
+struct net_device *alloc_arcdev(char *name)
+{
+ struct net_device *dev;
+
+ dev = alloc_netdev(sizeof(struct arcnet_local),
+ name && *name ? name : "arc%d", arcdev_setup);
+ if(dev) {
+ struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
+ spin_lock_init(&lp->lock);
+ }
+
+ return dev;
+}
+
+/*
+ * Open/initialize the board. This is called sometime after booting when
+ * the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even registers
+ * that "should" only need to be set once at boot, so that there is
+ * non-reboot way to recover if something goes wrong.
+ */
+static int arcnet_open(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ int count, newmtu, error;
+
+ BUGMSG(D_INIT,"opened.");
+
+ if (!try_module_get(lp->hw.owner))
+ return -ENODEV;
+
+ BUGLVL(D_PROTO) {
+ int count;
+ BUGMSG(D_PROTO, "protocol map (default is '%c'): ",
+ arc_proto_default->suffix);
+ for (count = 0; count < 256; count++)
+ BUGMSG2(D_PROTO, "%c", arc_proto_map[count]->suffix);
+ BUGMSG2(D_PROTO, "\n");
+ }
+
+
+ BUGMSG(D_INIT, "arcnet_open: resetting card.\n");
+
+ /* try to put the card in a defined state - if it fails the first
+ * time, actually reset it.
+ */
+ error = -ENODEV;
+ if (ARCRESET(0) && ARCRESET(1))
+ goto out_module_put;
+
+ newmtu = choose_mtu();
+ if (newmtu < dev->mtu)
+ dev->mtu = newmtu;
+
+ BUGMSG(D_INIT, "arcnet_open: mtu: %d.\n", dev->mtu);
+
+ /* autodetect the encapsulation for each host. */
+ memset(lp->default_proto, 0, sizeof(lp->default_proto));
+
+ /* the broadcast address is special - use the 'bcast' protocol */
+ for (count = 0; count < 256; count++) {
+ if (arc_proto_map[count] == arc_bcast_proto) {
+ lp->default_proto[0] = count;
+ break;
+ }
+ }
+
+ /* initialize buffers */
+ atomic_set(&lp->buf_lock, 1);
+
+ lp->next_buf = lp->first_free_buf = 0;
+ release_arcbuf(dev, 0);
+ release_arcbuf(dev, 1);
+ release_arcbuf(dev, 2);
+ release_arcbuf(dev, 3);
+ lp->cur_tx = lp->next_tx = -1;
+ lp->cur_rx = -1;
+
+ lp->rfc1201.sequence = 1;
+
+ /* bring up the hardware driver */
+ if (lp->hw.open)
+ lp->hw.open(dev);
+
+ if (dev->dev_addr[0] == 0)
+ BUGMSG(D_NORMAL, "WARNING! Station address 00 is reserved "
+ "for broadcasts!\n");
+ else if (dev->dev_addr[0] == 255)
+ BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse "
+ "DOS networking programs!\n");
+
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ if (ASTATUS() & RESETflag) {
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ ACOMMAND(CFLAGScmd | RESETclear);
+ }
+
+
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ /* make sure we're ready to receive IRQ's. */
+ AINTMASK(0);
+ udelay(1); /* give it time to set the mask before
+ * we reset it again. (may not even be
+ * necessary)
+ */
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ lp->intmask = NORXflag | RECONflag;
+ AINTMASK(lp->intmask);
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+ out_module_put:
+ module_put(lp->hw.owner);
+ return error;
+}
+
+
+/* The inverse routine to arcnet_open - shuts down the card. */
+static int arcnet_close(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+
+ netif_stop_queue(dev);
+
+ /* flush TX and disable RX */
+ AINTMASK(0);
+ ACOMMAND(NOTXcmd); /* stop transmit */
+ ACOMMAND(NORXcmd); /* disable receive */
+ mdelay(1);
+
+ /* shut down the card */
+ lp->hw.close(dev);
+ module_put(lp->hw.owner);
+ return 0;
+}
+
+
+static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len)
+{
+ struct arcnet_local *lp = dev->priv;
+ uint8_t _daddr, proto_num;
+ struct ArcProto *proto;
+
+ BUGMSG(D_DURING,
+ "create header from %d to %d; protocol %d (%Xh); size %u.\n",
+ saddr ? *(uint8_t *) saddr : -1,
+ daddr ? *(uint8_t *) daddr : -1,
+ type, type, len);
+
+ if (skb->len!=0 && len != skb->len)
+ BUGMSG(D_NORMAL, "arcnet_header: Yikes! skb->len(%d) != len(%d)!\n",
+ skb->len, len);
+
+
+ /* Type is host order - ? */
+ if(type == ETH_P_ARCNET) {
+ proto = arc_raw_proto;
+ BUGMSG(D_DEBUG, "arc_raw_proto used. proto='%c'\n",proto->suffix);
+ _daddr = daddr ? *(uint8_t *) daddr : 0;
+ }
+ else if (!daddr) {
+ /*
+ * if the dest addr isn't provided, we can't choose an encapsulation!
+ * Store the packet type (eg. ETH_P_IP) for now, and we'll push on a
+ * real header when we do rebuild_header.
+ */
+ *(uint16_t *) skb_push(skb, 2) = type;
+ if (skb->nh.raw - skb->mac.raw != 2)
+ BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n",
+ (int)(skb->nh.raw - skb->mac.raw));
+ return -2; /* return error -- can't transmit yet! */
+ }
+ else {
+ /* otherwise, we can just add the header as usual. */
+ _daddr = *(uint8_t *) daddr;
+ proto_num = lp->default_proto[_daddr];
+ proto = arc_proto_map[proto_num];
+ BUGMSG(D_DURING, "building header for %02Xh using protocol '%c'\n",
+ proto_num, proto->suffix);
+ if (proto == &arc_proto_null && arc_bcast_proto != proto) {
+ BUGMSG(D_DURING, "actually, let's use '%c' instead.\n",
+ arc_bcast_proto->suffix);
+ proto = arc_bcast_proto;
+ }
+ }
+ return proto->build_header(skb, dev, type, _daddr);
+}
+
+
+/*
+ * Rebuild the ARCnet hard header. This is called after an ARP (or in the
+ * future other address resolution) has completed on this sk_buff. We now
+ * let ARP fill in the destination field.
+ */
+static int arcnet_rebuild_header(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct arcnet_local *lp = dev->priv;
+ int status = 0; /* default is failure */
+ unsigned short type;
+ uint8_t daddr=0;
+ struct ArcProto *proto;
+
+ if (skb->nh.raw - skb->mac.raw != 2) {
+ BUGMSG(D_NORMAL,
+ "rebuild_header: shouldn't be here! (hdrsize=%d)\n",
+ (int)(skb->nh.raw - skb->mac.raw));
+ return 0;
+ }
+ type = *(uint16_t *) skb_pull(skb, 2);
+ BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type);
+
+ if (type == ETH_P_IP) {
+#ifdef CONFIG_INET
+ BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type);
+ status = arp_find(&daddr, skb) ? 1 : 0;
+ BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n",
+ daddr, type);
+#endif
+ } else {
+ BUGMSG(D_NORMAL,
+ "I don't understand ethernet protocol %Xh addresses!\n", type);
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+ }
+
+ /* if we couldn't resolve the address... give up. */
+ if (!status)
+ return 0;
+
+ /* add the _real_ header this time! */
+ proto = arc_proto_map[lp->default_proto[daddr]];
+ proto->build_header(skb, dev, type, daddr);
+
+ return 1; /* success */
+}
+
+
+
+/* Called by the kernel in order to transmit a packet. */
+static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct archdr *pkt;
+ struct arc_rfc1201 *soft;
+ struct ArcProto *proto;
+ int txbuf;
+ unsigned long flags;
+ int freeskb = 0;
+
+ BUGMSG(D_DURING,
+ "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
+ ASTATUS(), lp->cur_tx, lp->next_tx, skb->len,skb->protocol);
+
+ pkt = (struct archdr *) skb->data;
+ soft = &pkt->soft.rfc1201;
+ proto = arc_proto_map[soft->proto];
+
+ BUGMSG(D_SKB_SIZE, "skb: transmitting %d bytes to %02X\n",
+ skb->len, pkt->hard.dest);
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "tx");
+
+ /* fits in one packet? */
+ if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) {
+ BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n");
+ dev_kfree_skb(skb);
+ return 0; /* don't try again */
+ }
+
+ /* We're busy transmitting a packet... */
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ AINTMASK(0);
+
+ txbuf = get_arcbuf(dev);
+ if (txbuf != -1) {
+ if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
+ !proto->ack_tx) {
+ /* done right away and we don't want to acknowledge
+ the package later - forget about it now */
+ lp->stats.tx_bytes += skb->len;
+ freeskb = 1;
+ } else {
+ /* do it the 'split' way */
+ lp->outgoing.proto = proto;
+ lp->outgoing.skb = skb;
+ lp->outgoing.pkt = pkt;
+
+ if (proto->continue_tx &&
+ proto->continue_tx(dev, txbuf)) {
+ BUGMSG(D_NORMAL,
+ "bug! continue_tx finished the first time! "
+ "(proto='%c')\n", proto->suffix);
+ }
+ }
+
+ lp->next_tx = txbuf;
+ } else {
+ freeskb = 1;
+ }
+
+ BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
+ /* make sure we didn't ignore a TX IRQ while we were in here */
+ AINTMASK(0);
+
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ lp->intmask |= TXFREEflag|EXCNAKflag;
+ AINTMASK(lp->intmask);
+ BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (freeskb) {
+ dev_kfree_skb(skb);
+ }
+ return 0; /* no need to try again */
+}
+
+
+/*
+ * Actually start transmitting a packet that was loaded into a buffer
+ * by prepare_tx. This should _only_ be called by the interrupt handler.
+ */
+static int go_tx(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+
+ BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n",
+ ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx);
+
+ if (lp->cur_tx != -1 || lp->next_tx == -1)
+ return 0;
+
+ BUGLVL(D_TX) arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0);
+
+ lp->cur_tx = lp->next_tx;
+ lp->next_tx = -1;
+
+ /* start sending */
+ ACOMMAND(TXcmd | (lp->cur_tx << 3));
+
+ dev->trans_start = jiffies;
+ lp->stats.tx_packets++;
+ lp->lasttrans_dest = lp->lastload_dest;
+ lp->lastload_dest = 0;
+ lp->excnak_pending = 0;
+ lp->intmask |= TXFREEflag|EXCNAKflag;
+
+ return 1;
+}
+
+
+/* Called by the kernel when transmit times out */
+static void arcnet_timeout(struct net_device *dev)
+{
+ unsigned long flags;
+ struct arcnet_local *lp = dev->priv;
+ int status = ASTATUS();
+ char *msg;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (status & TXFREEflag) { /* transmit _DID_ finish */
+ msg = " - missed IRQ?";
+ } else {
+ msg = "";
+ lp->stats.tx_aborted_errors++;
+ lp->timed_out = 1;
+ ACOMMAND(NOTXcmd | (lp->cur_tx << 3));
+ }
+ lp->stats.tx_errors++;
+
+ /* make sure we didn't miss a TX or a EXC NAK IRQ */
+ AINTMASK(0);
+ lp->intmask |= TXFREEflag|EXCNAKflag;
+ AINTMASK(lp->intmask);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (jiffies - lp->last_timeout > 10*HZ) {
+ BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n",
+ msg, status, lp->intmask, lp->lasttrans_dest);
+ lp->last_timeout = jiffies;
+ }
+
+ if (lp->cur_tx == -1)
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * The typical workload of the driver: Handle the network interface
+ * interrupts. Establish which device needs attention, and call the correct
+ * chipset interrupt handler.
+ */
+irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct arcnet_local *lp;
+ int recbuf, status, diagstatus, didsomething, boguscount;
+ int retval = IRQ_NONE;
+
+ BUGMSG(D_DURING, "\n");
+
+ BUGMSG(D_DURING, "in arcnet_interrupt\n");
+
+ lp = dev->priv;
+ if (!lp)
+ BUG();
+
+ spin_lock(&lp->lock);
+
+ /*
+ * RESET flag was enabled - if device is not running, we must clear it right
+ * away (but nothing else).
+ */
+ if (!netif_running(dev)) {
+ if (ASTATUS() & RESETflag)
+ ACOMMAND(CFLAGScmd | RESETclear);
+ AINTMASK(0);
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+ }
+
+ BUGMSG(D_DURING, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n",
+ ASTATUS(), lp->intmask);
+
+ boguscount = 5;
+ do {
+ status = ASTATUS();
+ diagstatus = (status >> 8) & 0xFF;
+
+ BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
+ __FILE__,__LINE__,__FUNCTION__,status);
+ didsomething = 0;
+
+ /*
+ * RESET flag was enabled - card is resetting and if RX is
+ * disabled, it's NOT because we just got a packet.
+ *
+ * The card is in an undefined state. Clear it out and start over.
+ */
+ if (status & RESETflag) {
+ BUGMSG(D_NORMAL, "spurious reset (status=%Xh)\n", status);
+ arcnet_close(dev);
+ arcnet_open(dev);
+
+ /* get out of the interrupt handler! */
+ break;
+ }
+ /*
+ * RX is inhibited - we must have received something. Prepare to
+ * receive into the next buffer.
+ *
+ * We don't actually copy the received packet from the card until
+ * after the transmit handler runs (and possibly launches the next
+ * tx); this should improve latency slightly if we get both types
+ * of interrupts at once.
+ */
+ recbuf = -1;
+ if (status & lp->intmask & NORXflag) {
+ recbuf = lp->cur_rx;
+ BUGMSG(D_DURING, "Buffer #%d: receive irq (status=%Xh)\n",
+ recbuf, status);
+
+ lp->cur_rx = get_arcbuf(dev);
+ if (lp->cur_rx != -1) {
+ BUGMSG(D_DURING, "enabling receive to buffer #%d\n",
+ lp->cur_rx);
+ ACOMMAND(RXcmd | (lp->cur_rx << 3) | RXbcasts);
+ }
+ didsomething++;
+ }
+
+ if((diagstatus & EXCNAKflag)) {
+ BUGMSG(D_DURING, "EXCNAK IRQ (diagstat=%Xh)\n",
+ diagstatus);
+
+ ACOMMAND(NOTXcmd); /* disable transmit */
+ lp->excnak_pending = 1;
+
+ ACOMMAND(EXCNAKclear);
+ lp->intmask &= ~(EXCNAKflag);
+ didsomething++;
+ }
+
+
+ /* a transmit finished, and we're interested in it. */
+ if ((status & lp->intmask & TXFREEflag) || lp->timed_out) {
+ lp->intmask &= ~(TXFREEflag|EXCNAKflag);
+
+ BUGMSG(D_DURING, "TX IRQ (stat=%Xh)\n", status);
+
+ if (lp->cur_tx != -1 && !lp->timed_out) {
+ if(!(status & TXACKflag)) {
+ if (lp->lasttrans_dest != 0) {
+ BUGMSG(D_EXTRA,
+ "transmit was not acknowledged! "
+ "(status=%Xh, dest=%02Xh)\n",
+ status, lp->lasttrans_dest);
+ lp->stats.tx_errors++;
+ lp->stats.tx_carrier_errors++;
+ } else {
+ BUGMSG(D_DURING,
+ "broadcast was not acknowledged; that's normal "
+ "(status=%Xh, dest=%02Xh)\n",
+ status, lp->lasttrans_dest);
+ }
+ }
+
+ if (lp->outgoing.proto &&
+ lp->outgoing.proto->ack_tx) {
+ int ackstatus;
+ if(status & TXACKflag)
+ ackstatus=2;
+ else if(lp->excnak_pending)
+ ackstatus=1;
+ else
+ ackstatus=0;
+
+ lp->outgoing.proto
+ ->ack_tx(dev, ackstatus);
+ }
+ }
+ if (lp->cur_tx != -1)
+ release_arcbuf(dev, lp->cur_tx);
+
+ lp->cur_tx = -1;
+ lp->timed_out = 0;
+ didsomething++;
+
+ /* send another packet if there is one */
+ go_tx(dev);
+
+ /* continue a split packet, if any */
+ if (lp->outgoing.proto && lp->outgoing.proto->continue_tx) {
+ int txbuf = get_arcbuf(dev);
+ if (txbuf != -1) {
+ if (lp->outgoing.proto->continue_tx(dev, txbuf)) {
+ /* that was the last segment */
+ lp->stats.tx_bytes += lp->outgoing.skb->len;
+ if(!lp->outgoing.proto->ack_tx)
+ {
+ dev_kfree_skb_irq(lp->outgoing.skb);
+ lp->outgoing.proto = NULL;
+ }
+ }
+ lp->next_tx = txbuf;
+ }
+ }
+ /* inform upper layers of idleness, if necessary */
+ if (lp->cur_tx == -1)
+ netif_wake_queue(dev);
+ }
+ /* now process the received packet, if any */
+ if (recbuf != -1) {
+ BUGLVL(D_RX) arcnet_dump_packet(dev, recbuf, "rx irq", 0);
+
+ arcnet_rx(dev, recbuf);
+ release_arcbuf(dev, recbuf);
+
+ didsomething++;
+ }
+ if (status & lp->intmask & RECONflag) {
+ ACOMMAND(CFLAGScmd | CONFIGclear);
+ lp->stats.tx_carrier_errors++;
+
+ BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
+ status);
+
+ /* is the RECON info empty or old? */
+ if (!lp->first_recon || !lp->last_recon ||
+ jiffies - lp->last_recon > HZ * 10) {
+ if (lp->network_down)
+ BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n");
+ lp->first_recon = lp->last_recon = jiffies;
+ lp->num_recons = lp->network_down = 0;
+
+ BUGMSG(D_DURING, "recon: clearing counters.\n");
+ } else { /* add to current RECON counter */
+ lp->last_recon = jiffies;
+ lp->num_recons++;
+
+ BUGMSG(D_DURING, "recon: counter=%d, time=%lds, net=%d\n",
+ lp->num_recons,
+ (lp->last_recon - lp->first_recon) / HZ,
+ lp->network_down);
+
+ /* if network is marked up;
+ * and first_recon and last_recon are 60+ apart;
+ * and the average no. of recons counted is
+ * > RECON_THRESHOLD/min;
+ * then print a warning message.
+ */
+ if (!lp->network_down
+ && (lp->last_recon - lp->first_recon) <= HZ * 60
+ && lp->num_recons >= RECON_THRESHOLD) {
+ lp->network_down = 1;
+ BUGMSG(D_NORMAL, "many reconfigurations detected: cabling problem?\n");
+ } else if (!lp->network_down
+ && lp->last_recon - lp->first_recon > HZ * 60) {
+ /* reset counters if we've gone for over a minute. */
+ lp->first_recon = lp->last_recon;
+ lp->num_recons = 1;
+ }
+ }
+ } else if (lp->network_down && jiffies - lp->last_recon > HZ * 10) {
+ if (lp->network_down)
+ BUGMSG(D_NORMAL, "cabling restored?\n");
+ lp->first_recon = lp->last_recon = 0;
+ lp->num_recons = lp->network_down = 0;
+
+ BUGMSG(D_DURING, "not recon: clearing counters anyway.\n");
+ }
+
+ if(didsomething) {
+ retval |= IRQ_HANDLED;
+ }
+ }
+ while (--boguscount && didsomething);
+
+ BUGMSG(D_DURING, "arcnet_interrupt complete (status=%Xh, count=%d)\n",
+ ASTATUS(), boguscount);
+ BUGMSG(D_DURING, "\n");
+
+
+ AINTMASK(0);
+ udelay(1);
+ AINTMASK(lp->intmask);
+
+ spin_unlock(&lp->lock);
+ return retval;
+}
+
+
+/*
+ * This is a generic packet receiver that calls arcnet??_rx depending on the
+ * protocol ID found.
+ */
+void arcnet_rx(struct net_device *dev, int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct archdr pkt;
+ struct arc_rfc1201 *soft;
+ int length, ofs;
+
+ soft = &pkt.soft.rfc1201;
+
+ lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
+ if (pkt.hard.offset[0]) {
+ ofs = pkt.hard.offset[0];
+ length = 256 - ofs;
+ } else {
+ ofs = pkt.hard.offset[1];
+ length = 512 - ofs;
+ }
+
+ /* get the full header, if possible */
+ if (sizeof(pkt.soft) <= length)
+ lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
+ else {
+ memset(&pkt.soft, 0, sizeof(pkt.soft));
+ lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
+ }
+
+ BUGMSG(D_DURING, "Buffer #%d: received packet from %02Xh to %02Xh "
+ "(%d+4 bytes)\n",
+ bufnum, pkt.hard.source, pkt.hard.dest, length);
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += length + ARC_HDR_SIZE;
+
+ /* call the right receiver for the protocol */
+ if (arc_proto_map[soft->proto]->is_ip) {
+ BUGLVL(D_PROTO) {
+ struct ArcProto
+ *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
+ *newp = arc_proto_map[soft->proto];
+
+ if (oldp != newp) {
+ BUGMSG(D_PROTO,
+ "got protocol %02Xh; encap for host %02Xh is now '%c'"
+ " (was '%c')\n", soft->proto, pkt.hard.source,
+ newp->suffix, oldp->suffix);
+ }
+ }
+
+ /* broadcasts will always be done with the last-used encap. */
+ lp->default_proto[0] = soft->proto;
+
+ /* in striking contrast, the following isn't a hack. */
+ lp->default_proto[pkt.hard.source] = soft->proto;
+ }
+ /* call the protocol-specific receiver. */
+ arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
+}
+
+
+
+/*
+ * Get the current statistics. This may be called with the card open or
+ * closed.
+ */
+static struct net_device_stats *arcnet_get_stats(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ return &lp->stats;
+}
+
+
+static void null_rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length)
+{
+ BUGMSG(D_PROTO,
+ "rx: don't know how to deal with proto %02Xh from host %02Xh.\n",
+ pkthdr->soft.rfc1201.proto, pkthdr->hard.source);
+}
+
+
+static int null_build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr)
+{
+ struct arcnet_local *lp = dev->priv;
+
+ BUGMSG(D_PROTO,
+ "tx: can't build header for encap %02Xh; load a protocol driver.\n",
+ lp->default_proto[daddr]);
+
+ /* always fails */
+ return 0;
+}
+
+
+/* the "do nothing" prepare_tx function warns that there's nothing to do. */
+static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
+ int length, int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct arc_hardware newpkt;
+
+ BUGMSG(D_PROTO, "tx: no encap for this host; load a protocol driver.\n");
+
+ /* send a packet to myself -- will never get received, of course */
+ newpkt.source = newpkt.dest = dev->dev_addr[0];
+
+ /* only one byte of actual data (and it's random) */
+ newpkt.offset[0] = 0xFF;
+
+ lp->hw.copy_to_card(dev, bufnum, 0, &newpkt, ARC_HDR_SIZE);
+
+ return 1; /* done */
+}
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
new file mode 100644
index 000000000000..16e155b04129
--- /dev/null
+++ b/drivers/net/arcnet/capmode.c
@@ -0,0 +1,296 @@
+/*
+ * Linux ARCnet driver - "cap mode" packet encapsulation.
+ * It adds sequence numbers to packets for communicating between a user space
+ * application and the driver. After a transmit it sends a packet with protocol
+ * byte 0 back up to the userspace containing the sequence number of the packet
+ * plus the transmit-status on the ArcNet.
+ *
+ * Written 2002-4 by Esben Nielsen, Vestas Wind Systems A/S
+ * Derived from arc-rawmode.c by Avery Pennarun.
+ * arc-rawmode was in turned based on skeleton.c, see below.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/arcdevice.h>
+
+#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
+
+
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length);
+static int build_header(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type,
+ uint8_t daddr);
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum);
+static int ack_tx(struct net_device *dev, int acked);
+
+
+struct ArcProto capmode_proto =
+{
+ 'r',
+ XMTU,
+ 0,
+ rx,
+ build_header,
+ prepare_tx,
+ NULL,
+ ack_tx
+};
+
+
+void arcnet_cap_init(void)
+{
+ int count;
+
+ for (count = 1; count <= 8; count++)
+ if (arc_proto_map[count] == arc_proto_default)
+ arc_proto_map[count] = &capmode_proto;
+
+ /* for cap mode, we only set the bcast proto if there's no better one */
+ if (arc_bcast_proto == arc_proto_default)
+ arc_bcast_proto = &capmode_proto;
+
+ arc_proto_default = &capmode_proto;
+ arc_raw_proto = &capmode_proto;
+}
+
+
+#ifdef MODULE
+
+int __init init_module(void)
+{
+ printk(VERSION);
+ arcnet_cap_init();
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ arcnet_unregister_proto(&capmode_proto);
+}
+
+MODULE_LICENSE("GPL");
+#endif /* MODULE */
+
+
+
+/* packet receiver */
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length)
+{
+ struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
+ struct sk_buff *skb;
+ struct archdr *pkt = pkthdr;
+ char *pktbuf, *pkthdrbuf;
+ int ofs;
+
+ BUGMSG(D_DURING, "it's a raw(cap) packet (length=%d)\n", length);
+
+ if (length >= MinTU)
+ ofs = 512 - length;
+ else
+ ofs = 256 - length;
+
+ skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
+ if (skb == NULL) {
+ BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
+ lp->stats.rx_dropped++;
+ return;
+ }
+ skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
+ skb->dev = dev;
+
+ pkt = (struct archdr *) skb->data;
+
+ skb->mac.raw = skb->data;
+ skb_pull(skb, ARC_HDR_SIZE);
+
+ /* up to sizeof(pkt->soft) has already been copied from the card */
+ /* squeeze in an int for the cap encapsulation */
+
+ /* use these variables to be sure we count in bytes, not in
+ sizeof(struct archdr) */
+ pktbuf=(char*)pkt;
+ pkthdrbuf=(char*)pkthdr;
+ memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto));
+ memcpy(pktbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto)+sizeof(int),
+ pkthdrbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto),
+ sizeof(struct archdr)-ARC_HDR_SIZE-sizeof(pkt->soft.cap.proto));
+
+ if (length > sizeof(pkt->soft))
+ lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
+ pkt->soft.raw + sizeof(pkt->soft)
+ + sizeof(int),
+ length - sizeof(pkt->soft));
+
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+
+ skb->protocol = __constant_htons(ETH_P_ARCNET);
+;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+}
+
+
+/*
+ * Create the ARCnet hard/soft headers for cap mode.
+ * There aren't any soft headers in cap mode - not even the protocol id.
+ */
+static int build_header(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type,
+ uint8_t daddr)
+{
+ int hdr_size = ARC_HDR_SIZE;
+ struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
+
+ BUGMSG(D_PROTO, "Preparing header for cap packet %x.\n",
+ *((int*)&pkt->soft.cap.cookie[0]));
+ /*
+ * Set the source hardware address.
+ *
+ * This is pretty pointless for most purposes, but it can help in
+ * debugging. ARCnet does not allow us to change the source address in
+ * the actual packet sent)
+ */
+ pkt->hard.source = *dev->dev_addr;
+
+ /* see linux/net/ethernet/eth.c to see where I got the following */
+
+ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ /*
+ * FIXME: fill in the last byte of the dest ipaddr here to better
+ * comply with RFC1051 in "noarp" mode.
+ */
+ pkt->hard.dest = 0;
+ return hdr_size;
+ }
+ /* otherwise, just fill it in and go! */
+ pkt->hard.dest = daddr;
+
+ return hdr_size; /* success */
+}
+
+
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum)
+{
+ struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
+ struct arc_hardware *hard = &pkt->hard;
+ int ofs;
+
+
+ /* hard header is not included in packet length */
+ length -= ARC_HDR_SIZE;
+ /* And neither is the cookie field */
+ length -= sizeof(int);
+
+ BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
+ lp->next_tx, lp->cur_tx, bufnum);
+
+ BUGMSG(D_PROTO, "Sending for cap packet %x.\n",
+ *((int*)&pkt->soft.cap.cookie[0]));
+
+ if (length > XMTU) {
+ /* should never happen! other people already check for this. */
+ BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
+ length, XMTU);
+ length = XMTU;
+ }
+ if (length > MinTU) {
+ hard->offset[0] = 0;
+ hard->offset[1] = ofs = 512 - length;
+ } else if (length > MTU) {
+ hard->offset[0] = 0;
+ hard->offset[1] = ofs = 512 - length - 3;
+ } else
+ hard->offset[0] = ofs = 256 - length;
+
+ BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
+ length,ofs);
+
+ // Copy the arcnet-header + the protocol byte down:
+ lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
+ lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
+ sizeof(pkt->soft.cap.proto));
+
+ // Skip the extra integer we have written into it as a cookie
+ // but write the rest of the message:
+ lp->hw.copy_to_card(dev, bufnum, ofs+1,
+ ((unsigned char*)&pkt->soft.cap.mes),length-1);
+
+ lp->lastload_dest = hard->dest;
+
+ return 1; /* done */
+}
+
+
+static int ack_tx(struct net_device *dev, int acked)
+{
+ struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
+ struct sk_buff *ackskb;
+ struct archdr *ackpkt;
+ int length=sizeof(struct arc_cap);
+
+ BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
+ lp->outgoing.skb->protocol, acked);
+
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
+
+ /* Now alloc a skb to send back up through the layers: */
+ ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
+ if (ackskb == NULL) {
+ BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
+ goto free_outskb;
+ }
+
+ skb_put(ackskb, length + ARC_HDR_SIZE );
+ ackskb->dev = dev;
+
+ ackpkt = (struct archdr *) ackskb->data;
+
+ ackskb->mac.raw = ackskb->data;
+ /* skb_pull(ackskb, ARC_HDR_SIZE); */
+
+
+ memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap));
+ ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
+ ackpkt->soft.cap.mes.ack=acked;
+
+ BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
+ *((int*)&ackpkt->soft.cap.cookie[0]));
+
+ ackskb->protocol = __constant_htons(ETH_P_ARCNET);
+
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
+ netif_rx(ackskb);
+
+ free_outskb:
+ dev_kfree_skb_irq(lp->outgoing.skb);
+ lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
+
+ return 0;
+}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
new file mode 100644
index 000000000000..9289e6103de5
--- /dev/null
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -0,0 +1,219 @@
+/*
+ * Linux ARCnet driver - COM20020 chipset support
+ *
+ * Written 1997 by David Woodhouse.
+ * Written 1994-1999 by Avery Pennarun.
+ * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/arcdevice.h>
+#include <linux/com20020.h>
+
+#include <asm/io.h>
+
+#define VERSION "arcnet: COM20020 ISA support (by David Woodhouse et al.)\n"
+
+
+/*
+ * We cannot (yet) probe for an IO mapped card, although we can check that
+ * it's where we were told it was, and even do autoirq.
+ */
+static int __init com20020isa_probe(struct net_device *dev)
+{
+ int ioaddr;
+ unsigned long airqmask;
+ struct arcnet_local *lp = dev->priv;
+ int err;
+
+ BUGLVL(D_NORMAL) printk(VERSION);
+
+ ioaddr = dev->base_addr;
+ if (!ioaddr) {
+ BUGMSG(D_NORMAL, "No autoprobe (yet) for IO mapped cards; you "
+ "must specify the base address!\n");
+ return -ENODEV;
+ }
+ if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "arcnet (COM20020)")) {
+ BUGMSG(D_NORMAL, "IO region %xh-%xh already allocated.\n",
+ ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
+ return -ENXIO;
+ }
+ if (ASTATUS() == 0xFF) {
+ BUGMSG(D_NORMAL, "IO address %x empty\n", ioaddr);
+ err = -ENODEV;
+ goto out;
+ }
+ if (com20020_check(dev)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (!dev->irq) {
+ /* if we do this, we're sure to get an IRQ since the
+ * card has just reset and the NORXflag is on until
+ * we tell it to start receiving.
+ */
+ BUGMSG(D_INIT_REASONS, "intmask was %02Xh\n", inb(_INTMASK));
+ outb(0, _INTMASK);
+ airqmask = probe_irq_on();
+ outb(NORXflag, _INTMASK);
+ udelay(1);
+ outb(0, _INTMASK);
+ dev->irq = probe_irq_off(airqmask);
+
+ if (dev->irq <= 0) {
+ BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed first time\n");
+ airqmask = probe_irq_on();
+ outb(NORXflag, _INTMASK);
+ udelay(5);
+ outb(0, _INTMASK);
+ dev->irq = probe_irq_off(airqmask);
+ if (dev->irq <= 0) {
+ BUGMSG(D_NORMAL, "Autoprobe IRQ failed.\n");
+ err = -ENODEV;
+ goto out;
+ }
+ }
+ }
+
+ lp->card_name = "ISA COM20020";
+ if ((err = com20020_found(dev, 0)) != 0)
+ goto out;
+
+ return 0;
+
+out:
+ release_region(ioaddr, ARCNET_TOTAL_SIZE);
+ return err;
+}
+
+static int node = 0;
+static int io = 0x0; /* <--- EDIT THESE LINES FOR YOUR CONFIGURATION */
+static int irq = 0; /* or use the insmod io= irq= shmem= options */
+static char device[9]; /* use eg. device="arc1" to change name */
+static int timeout = 3;
+static int backplane = 0;
+static int clockp = 0;
+static int clockm = 0;
+
+module_param(node, int, 0);
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param_string(device, device, sizeof(device), 0);
+module_param(timeout, int, 0);
+module_param(backplane, int, 0);
+module_param(clockp, int, 0);
+module_param(clockm, int, 0);
+
+MODULE_LICENSE("GPL");
+
+static struct net_device *my_dev;
+
+static int __init com20020_init(void)
+{
+ struct net_device *dev;
+ struct arcnet_local *lp;
+
+ dev = alloc_arcdev(device);
+ if (!dev)
+ return -ENOMEM;
+
+ if (node && node != 0xff)
+ dev->dev_addr[0] = node;
+
+ lp = dev->priv;
+ lp->backplane = backplane;
+ lp->clockp = clockp & 7;
+ lp->clockm = clockm & 3;
+ lp->timeout = timeout & 3;
+ lp->hw.owner = THIS_MODULE;
+
+ dev->base_addr = io;
+ dev->irq = irq;
+
+ if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (com20020isa_probe(dev)) {
+ free_netdev(dev);
+ return -EIO;
+ }
+
+ my_dev = dev;
+ return 0;
+}
+
+static void __exit com20020_exit(void)
+{
+ unregister_netdev(my_dev);
+ free_irq(my_dev->irq, my_dev);
+ release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
+ free_netdev(my_dev);
+}
+
+#ifndef MODULE
+static int __init com20020isa_setup(char *s)
+{
+ int ints[8];
+
+ s = get_options(s, 8, ints);
+ if (!ints[0])
+ return 1;
+
+ switch (ints[0]) {
+ default: /* ERROR */
+ printk("com90xx: Too many arguments.\n");
+ case 6: /* Timeout */
+ timeout = ints[6];
+ case 5: /* CKP value */
+ clockp = ints[5];
+ case 4: /* Backplane flag */
+ backplane = ints[4];
+ case 3: /* Node ID */
+ node = ints[3];
+ case 2: /* IRQ */
+ irq = ints[2];
+ case 1: /* IO address */
+ io = ints[1];
+ }
+ if (*s)
+ snprintf(device, sizeof(device), "%s", s);
+ return 1;
+}
+
+__setup("com20020=", com20020isa_setup);
+
+#endif /* MODULE */
+
+module_init(com20020_init)
+module_exit(com20020_exit)
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
new file mode 100644
index 000000000000..96636ca8754e
--- /dev/null
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -0,0 +1,189 @@
+/*
+ * Linux ARCnet driver - COM20020 PCI support
+ * Contemporary Controls PCI20 and SOHARD SH-ARC PCI
+ *
+ * Written 1994-1999 by Avery Pennarun,
+ * based on an ISA version by David Woodhouse.
+ * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/arcdevice.h>
+#include <linux/com20020.h>
+
+#include <asm/io.h>
+
+
+#define VERSION "arcnet: COM20020 PCI support\n"
+
+/* Module parameters */
+
+static int node;
+static char device[9]; /* use eg. device="arc1" to change name */
+static int timeout = 3;
+static int backplane;
+static int clockp;
+static int clockm;
+
+module_param(node, int, 0);
+module_param_string(device, device, sizeof(device), 0);
+module_param(timeout, int, 0);
+module_param(backplane, int, 0);
+module_param(clockp, int, 0);
+module_param(clockm, int, 0);
+MODULE_LICENSE("GPL");
+
+static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct arcnet_local *lp;
+ int ioaddr, err;
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+ dev = alloc_arcdev(device);
+ if (!dev)
+ return -ENOMEM;
+ lp = dev->priv;
+
+ pci_set_drvdata(pdev, dev);
+
+ // SOHARD needs PCI base addr 4
+ if (pdev->vendor==0x10B5) {
+ BUGMSG(D_NORMAL, "SOHARD\n");
+ ioaddr = pci_resource_start(pdev, 4);
+ }
+ else {
+ BUGMSG(D_NORMAL, "Contemporary Controls\n");
+ ioaddr = pci_resource_start(pdev, 2);
+ }
+
+ if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com20020-pci")) {
+ BUGMSG(D_INIT, "IO region %xh-%xh already allocated.\n",
+ ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
+ err = -EBUSY;
+ goto out_dev;
+ }
+
+ // Dummy access after Reset
+ // ARCNET controller needs this access to detect bustype
+ outb(0x00,ioaddr+1);
+ inb(ioaddr+1);
+
+ dev->base_addr = ioaddr;
+ dev->irq = pdev->irq;
+ dev->dev_addr[0] = node;
+ lp->card_name = "PCI COM20020";
+ lp->card_flags = id->driver_data;
+ lp->backplane = backplane;
+ lp->clockp = clockp & 7;
+ lp->clockm = clockm & 3;
+ lp->timeout = timeout;
+ lp->hw.owner = THIS_MODULE;
+
+ if (ASTATUS() == 0xFF) {
+ BUGMSG(D_NORMAL, "IO address %Xh was reported by PCI BIOS, "
+ "but seems empty!\n", ioaddr);
+ err = -EIO;
+ goto out_port;
+ }
+ if (com20020_check(dev)) {
+ err = -EIO;
+ goto out_port;
+ }
+
+ if ((err = com20020_found(dev, SA_SHIRQ)) != 0)
+ goto out_port;
+
+ return 0;
+
+out_port:
+ release_region(ioaddr, ARCNET_TOTAL_SIZE);
+out_dev:
+ free_netdev(dev);
+ return err;
+}
+
+static void __devexit com20020pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
+ free_netdev(dev);
+}
+
+static struct pci_device_id com20020pci_id_table[] = {
+ { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1571, 0xa009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT },
+ { 0x1571, 0xa00a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT },
+ { 0x1571, 0xa00b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT },
+ { 0x1571, 0xa00c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT },
+ { 0x1571, 0xa00d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT },
+ { 0x1571, 0xa201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x1571, 0xa202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x1571, 0xa203, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, com20020pci_id_table);
+
+static struct pci_driver com20020pci_driver = {
+ .name = "com20020",
+ .id_table = com20020pci_id_table,
+ .probe = com20020pci_probe,
+ .remove = __devexit_p(com20020pci_remove),
+};
+
+static int __init com20020pci_init(void)
+{
+ BUGLVL(D_NORMAL) printk(VERSION);
+ return pci_module_init(&com20020pci_driver);
+}
+
+static void __exit com20020pci_cleanup(void)
+{
+ pci_unregister_driver(&com20020pci_driver);
+}
+
+module_init(com20020pci_init)
+module_exit(com20020pci_cleanup)
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
new file mode 100644
index 000000000000..0dc70c7b7940
--- /dev/null
+++ b/drivers/net/arcnet/com20020.c
@@ -0,0 +1,357 @@
+/*
+ * Linux ARCnet driver - COM20020 chipset support
+ *
+ * Written 1997 by David Woodhouse.
+ * Written 1994-1999 by Avery Pennarun.
+ * Written 1999 by Martin Mares <mj@ucw.cz>.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/arcdevice.h>
+#include <linux/com20020.h>
+
+#include <asm/io.h>
+
+#define VERSION "arcnet: COM20020 chipset support (by David Woodhouse et al.)\n"
+
+static char *clockrates[] =
+{"10 Mb/s", "Reserved", "5 Mb/s",
+ "2.5 Mb/s", "1.25Mb/s", "625 Kb/s", "312.5 Kb/s",
+ "156.25 Kb/s", "Reserved", "Reserved", "Reserved"};
+
+static void com20020_command(struct net_device *dev, int command);
+static int com20020_status(struct net_device *dev);
+static void com20020_setmask(struct net_device *dev, int mask);
+static int com20020_reset(struct net_device *dev, int really_reset);
+static void com20020_copy_to_card(struct net_device *dev, int bufnum,
+ int offset, void *buf, int count);
+static void com20020_copy_from_card(struct net_device *dev, int bufnum,
+ int offset, void *buf, int count);
+static void com20020_set_mc_list(struct net_device *dev);
+static void com20020_close(struct net_device *);
+
+static void com20020_copy_from_card(struct net_device *dev, int bufnum,
+ int offset, void *buf, int count)
+{
+ int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
+
+ /* set up the address register */
+ outb((ofs >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
+ outb(ofs & 0xff, _ADDR_LO);
+
+ /* copy the data */
+ TIME("insb", count, insb(_MEMDATA, buf, count));
+}
+
+
+static void com20020_copy_to_card(struct net_device *dev, int bufnum,
+ int offset, void *buf, int count)
+{
+ int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
+
+ /* set up the address register */
+ outb((ofs >> 8) | AUTOINCflag, _ADDR_HI);
+ outb(ofs & 0xff, _ADDR_LO);
+
+ /* copy the data */
+ TIME("outsb", count, outsb(_MEMDATA, buf, count));
+}
+
+
+/* Reset the card and check some basic stuff during the detection stage. */
+int com20020_check(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr, status;
+ struct arcnet_local *lp = dev->priv;
+
+ ARCRESET0;
+ mdelay(RESETtime);
+
+ lp->setup = lp->clockm ? 0 : (lp->clockp << 1);
+ lp->setup2 = (lp->clockm << 4) | 8;
+
+ /* CHECK: should we do this for SOHARD cards ? */
+ /* Enable P1Mode for backplane mode */
+ lp->setup = lp->setup | P1MODE;
+
+ SET_SUBADR(SUB_SETUP1);
+ outb(lp->setup, _XREG);
+
+ if (lp->card_flags & ARC_CAN_10MBIT)
+ {
+ SET_SUBADR(SUB_SETUP2);
+ outb(lp->setup2, _XREG);
+
+ /* must now write the magic "restart operation" command */
+ mdelay(1);
+ outb(0x18, _COMMAND);
+ }
+
+ lp->config = 0x21 | (lp->timeout << 3) | (lp->backplane << 2);
+ /* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
+ SETCONF;
+ outb(0x42, ioaddr + BUS_ALIGN*7);
+
+ status = ASTATUS();
+
+ if ((status & 0x99) != (NORXflag | TXFREEflag | RESETflag)) {
+ BUGMSG(D_NORMAL, "status invalid (%Xh).\n", status);
+ return -ENODEV;
+ }
+ BUGMSG(D_INIT_REASONS, "status after reset: %X\n", status);
+
+ /* Enable TX */
+ outb(0x39, _CONFIG);
+ outb(inb(ioaddr + BUS_ALIGN*8), ioaddr + BUS_ALIGN*7);
+
+ ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
+
+ status = ASTATUS();
+ BUGMSG(D_INIT_REASONS, "status after reset acknowledged: %X\n",
+ status);
+
+ /* Read first location of memory */
+ outb(0 | RDDATAflag | AUTOINCflag, _ADDR_HI);
+ outb(0, _ADDR_LO);
+
+ if ((status = inb(_MEMDATA)) != TESTvalue) {
+ BUGMSG(D_NORMAL, "Signature byte not found (%02Xh != D1h).\n",
+ status);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/* Set up the struct net_device associated with this card. Called after
+ * probing succeeds.
+ */
+int com20020_found(struct net_device *dev, int shared)
+{
+ struct arcnet_local *lp;
+ int ioaddr = dev->base_addr;
+
+ /* Initialize the rest of the device structure. */
+
+ lp = dev->priv;
+
+ lp->hw.owner = THIS_MODULE;
+ lp->hw.command = com20020_command;
+ lp->hw.status = com20020_status;
+ lp->hw.intmask = com20020_setmask;
+ lp->hw.reset = com20020_reset;
+ lp->hw.copy_to_card = com20020_copy_to_card;
+ lp->hw.copy_from_card = com20020_copy_from_card;
+ lp->hw.close = com20020_close;
+
+ dev->set_multicast_list = com20020_set_mc_list;
+
+ if (!dev->dev_addr[0])
+ dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */
+
+ SET_SUBADR(SUB_SETUP1);
+ outb(lp->setup, _XREG);
+
+ if (lp->card_flags & ARC_CAN_10MBIT)
+ {
+ SET_SUBADR(SUB_SETUP2);
+ outb(lp->setup2, _XREG);
+
+ /* must now write the magic "restart operation" command */
+ mdelay(1);
+ outb(0x18, _COMMAND);
+ }
+
+ lp->config = 0x20 | (lp->timeout << 3) | (lp->backplane << 2) | 1;
+ /* Default 0x38 + register: Node ID */
+ SETCONF;
+ outb(dev->dev_addr[0], _XREG);
+
+ /* reserve the irq */
+ if (request_irq(dev->irq, &arcnet_interrupt, shared,
+ "arcnet (COM20020)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+ }
+
+ dev->base_addr = ioaddr;
+
+ BUGMSG(D_NORMAL, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
+ lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
+
+ if (lp->backplane)
+ BUGMSG(D_NORMAL, "Using backplane mode.\n");
+
+ if (lp->timeout != 3)
+ BUGMSG(D_NORMAL, "Using extended timeout value of %d.\n", lp->timeout);
+
+ BUGMSG(D_NORMAL, "Using CKP %d - data rate %s.\n",
+ lp->setup >> 1,
+ clockrates[3 - ((lp->setup2 & 0xF0) >> 4) + ((lp->setup & 0x0F) >> 1)]);
+
+ if (register_netdev(dev)) {
+ free_irq(dev->irq, dev);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+/*
+ * Do a hardware reset on the card, and set up necessary registers.
+ *
+ * This should be called as little as possible, because it disrupts the
+ * token on the network (causes a RECON) and requires a significant delay.
+ *
+ * However, it does make sure the card is in a defined state.
+ */
+static int com20020_reset(struct net_device *dev, int really_reset)
+{
+ struct arcnet_local *lp = dev->priv;
+ u_int ioaddr = dev->base_addr;
+ u_char inbyte;
+
+ BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
+ __FILE__,__LINE__,__FUNCTION__,dev,lp,dev->name);
+ BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
+ dev->name, ASTATUS());
+
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
+ /* power-up defaults */
+ SETCONF;
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+
+ if (really_reset) {
+ /* reset the card */
+ ARCRESET;
+ mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */
+ }
+ /* clear flags & end reset */
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
+
+ /* verify that the ARCnet signature byte is present */
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+
+ com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ if (inbyte != TESTvalue) {
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+ BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
+ return 1;
+ }
+ /* enable extended (512-byte) packets */
+ ACOMMAND(CONFIGcmd | EXTconf);
+ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
+
+ /* done! return success. */
+ return 0;
+}
+
+
+static void com20020_setmask(struct net_device *dev, int mask)
+{
+ u_int ioaddr = dev->base_addr;
+ BUGMSG(D_DURING, "Setting mask to %x at %x\n",mask,ioaddr);
+ AINTMASK(mask);
+}
+
+
+static void com20020_command(struct net_device *dev, int cmd)
+{
+ u_int ioaddr = dev->base_addr;
+ ACOMMAND(cmd);
+}
+
+
+static int com20020_status(struct net_device *dev)
+{
+ u_int ioaddr = dev->base_addr;
+
+ return ASTATUS() + (ADIAGSTATUS()<<8);
+}
+
+static void com20020_close(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* disable transmitter */
+ lp->config &= ~TXENcfg;
+ SETCONF;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ * best-effort filtering.
+ * FIXME - do multicast stuff, not just promiscuous.
+ */
+static void com20020_set_mc_list(struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) { /* Enable promiscuous mode */
+ if (!(lp->setup & PROMISCset))
+ BUGMSG(D_NORMAL, "Setting promiscuous flag...\n");
+ SET_SUBADR(SUB_SETUP1);
+ lp->setup |= PROMISCset;
+ outb(lp->setup, _XREG);
+ } else
+ /* Disable promiscuous mode, use normal mode */
+ {
+ if ((lp->setup & PROMISCset))
+ BUGMSG(D_NORMAL, "Resetting promiscuous flag...\n");
+ SET_SUBADR(SUB_SETUP1);
+ lp->setup &= ~PROMISCset;
+ outb(lp->setup, _XREG);
+ }
+}
+
+#ifdef MODULE
+
+EXPORT_SYMBOL(com20020_check);
+EXPORT_SYMBOL(com20020_found);
+
+MODULE_LICENSE("GPL");
+
+int init_module(void)
+{
+ BUGLVL(D_NORMAL) printk(VERSION);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
new file mode 100644
index 000000000000..52c77cbe8c62
--- /dev/null
+++ b/drivers/net/arcnet/com90io.c
@@ -0,0 +1,435 @@
+/*
+ * Linux ARCnet driver - COM90xx chipset (IO-mapped buffers)
+ *
+ * Written 1997 by David Woodhouse.
+ * Written 1994-1999 by Avery Pennarun.
+ * Written 1999-2000 by Martin Mares <mj@ucw.cz>.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/arcdevice.h>
+
+
+#define VERSION "arcnet: COM90xx IO-mapped mode support (by David Woodhouse et el.)\n"
+
+
+/* Internal function declarations */
+
+static int com90io_found(struct net_device *dev);
+static void com90io_command(struct net_device *dev, int command);
+static int com90io_status(struct net_device *dev);
+static void com90io_setmask(struct net_device *dev, int mask);
+static int com90io_reset(struct net_device *dev, int really_reset);
+static void com90io_copy_to_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count);
+static void com90io_copy_from_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count);
+
+
+/* Handy defines for ARCnet specific stuff */
+
+/* The number of low I/O ports used by the card. */
+#define ARCNET_TOTAL_SIZE 16
+
+/* COM 9026 controller chip --> ARCnet register addresses */
+#define _INTMASK (ioaddr+0) /* writable */
+#define _STATUS (ioaddr+0) /* readable */
+#define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */
+#define _RESET (ioaddr+8) /* software reset (on read) */
+#define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */
+#define _ADDR_HI (ioaddr+15) /* Control registers for said */
+#define _ADDR_LO (ioaddr+14)
+#define _CONFIG (ioaddr+2) /* Configuration register */
+
+#undef ASTATUS
+#undef ACOMMAND
+#undef AINTMASK
+
+#define ASTATUS() inb(_STATUS)
+#define ACOMMAND(cmd) outb((cmd),_COMMAND)
+#define AINTMASK(msk) outb((msk),_INTMASK)
+#define SETCONF() outb((lp->config),_CONFIG)
+
+
+/****************************************************************************
+ * *
+ * IO-mapped operation routines *
+ * *
+ ****************************************************************************/
+
+#undef ONE_AT_A_TIME_TX
+#undef ONE_AT_A_TIME_RX
+
+static u_char get_buffer_byte(struct net_device *dev, unsigned offset)
+{
+ int ioaddr = dev->base_addr;
+
+ outb(offset >> 8, _ADDR_HI);
+ outb(offset & 0xff, _ADDR_LO);
+
+ return inb(_MEMDATA);
+}
+
+#ifdef ONE_AT_A_TIME_TX
+static void put_buffer_byte(struct net_device *dev, unsigned offset, u_char datum)
+{
+ int ioaddr = dev->base_addr;
+
+ outb(offset >> 8, _ADDR_HI);
+ outb(offset & 0xff, _ADDR_LO);
+
+ outb(datum, _MEMDATA);
+}
+
+#endif
+
+
+static void get_whole_buffer(struct net_device *dev, unsigned offset, unsigned length, char *dest)
+{
+ int ioaddr = dev->base_addr;
+
+ outb((offset >> 8) | AUTOINCflag, _ADDR_HI);
+ outb(offset & 0xff, _ADDR_LO);
+
+ while (length--)
+#ifdef ONE_AT_A_TIME_RX
+ *(dest++) = get_buffer_byte(dev, offset++);
+#else
+ *(dest++) = inb(_MEMDATA);
+#endif
+}
+
+static void put_whole_buffer(struct net_device *dev, unsigned offset, unsigned length, char *dest)
+{
+ int ioaddr = dev->base_addr;
+
+ outb((offset >> 8) | AUTOINCflag, _ADDR_HI);
+ outb(offset & 0xff, _ADDR_LO);
+
+ while (length--)
+#ifdef ONE_AT_A_TIME_TX
+ put_buffer_byte(dev, offset++, *(dest++));
+#else
+ outb(*(dest++), _MEMDATA);
+#endif
+}
+
+/*
+ * We cannot probe for an IO mapped card either, although we can check that
+ * it's where we were told it was, and even autoirq
+ */
+static int __init com90io_probe(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr, status;
+ unsigned long airqmask;
+
+ BUGLVL(D_NORMAL) printk(VERSION);
+ BUGLVL(D_NORMAL) printk("E-mail me if you actually test this driver, please!\n");
+
+ if (!ioaddr) {
+ BUGMSG(D_NORMAL, "No autoprobe for IO mapped cards; you "
+ "must specify the base address!\n");
+ return -ENODEV;
+ }
+ if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com90io probe")) {
+ BUGMSG(D_INIT_REASONS, "IO check_region %x-%x failed.\n",
+ ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
+ return -ENXIO;
+ }
+ if (ASTATUS() == 0xFF) {
+ BUGMSG(D_INIT_REASONS, "IO address %x empty\n", ioaddr);
+ goto err_out;
+ }
+ inb(_RESET);
+ mdelay(RESETtime);
+
+ status = ASTATUS();
+
+ if ((status & 0x9D) != (NORXflag | RECONflag | TXFREEflag | RESETflag)) {
+ BUGMSG(D_INIT_REASONS, "Status invalid (%Xh).\n", status);
+ goto err_out;
+ }
+ BUGMSG(D_INIT_REASONS, "Status after reset: %X\n", status);
+
+ ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
+
+ BUGMSG(D_INIT_REASONS, "Status after reset acknowledged: %X\n", status);
+
+ status = ASTATUS();
+
+ if (status & RESETflag) {
+ BUGMSG(D_INIT_REASONS, "Eternal reset (status=%Xh)\n", status);
+ goto err_out;
+ }
+ outb((0x16 | IOMAPflag) & ~ENABLE16flag, _CONFIG);
+
+ /* Read first loc'n of memory */
+
+ outb(AUTOINCflag, _ADDR_HI);
+ outb(0, _ADDR_LO);
+
+ if ((status = inb(_MEMDATA)) != 0xd1) {
+ BUGMSG(D_INIT_REASONS, "Signature byte not found"
+ " (%Xh instead).\n", status);
+ goto err_out;
+ }
+ if (!dev->irq) {
+ /*
+ * if we do this, we're sure to get an IRQ since the
+ * card has just reset and the NORXflag is on until
+ * we tell it to start receiving.
+ */
+
+ airqmask = probe_irq_on();
+ outb(NORXflag, _INTMASK);
+ udelay(1);
+ outb(0, _INTMASK);
+ dev->irq = probe_irq_off(airqmask);
+
+ if (dev->irq <= 0) {
+ BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed\n");
+ goto err_out;
+ }
+ }
+ release_region(ioaddr, ARCNET_TOTAL_SIZE); /* end of probing */
+ return com90io_found(dev);
+
+err_out:
+ release_region(ioaddr, ARCNET_TOTAL_SIZE);
+ return -ENODEV;
+}
+
+
+/* Set up the struct net_device associated with this card. Called after
+ * probing succeeds.
+ */
+static int __init com90io_found(struct net_device *dev)
+{
+ struct arcnet_local *lp;
+ int ioaddr = dev->base_addr;
+ int err;
+
+ /* Reserve the irq */
+ if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+ }
+ /* Reserve the I/O region - guaranteed to work by check_region */
+ if (!request_region(dev->base_addr, ARCNET_TOTAL_SIZE, "arcnet (COM90xx-IO)")) {
+ free_irq(dev->irq, dev);
+ return -EBUSY;
+ }
+
+ lp = dev->priv;
+ lp->card_name = "COM90xx I/O";
+ lp->hw.command = com90io_command;
+ lp->hw.status = com90io_status;
+ lp->hw.intmask = com90io_setmask;
+ lp->hw.reset = com90io_reset;
+ lp->hw.owner = THIS_MODULE;
+ lp->hw.copy_to_card = com90io_copy_to_card;
+ lp->hw.copy_from_card = com90io_copy_from_card;
+
+ lp->config = (0x16 | IOMAPflag) & ~ENABLE16flag;
+ SETCONF();
+
+ /* get and check the station ID from offset 1 in shmem */
+
+ dev->dev_addr[0] = get_buffer_byte(dev, 1);
+
+ err = register_netdev(dev);
+ if (err) {
+ outb((inb(_CONFIG) & ~IOMAPflag), _CONFIG);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
+ return err;
+ }
+
+ BUGMSG(D_NORMAL, "COM90IO: station %02Xh found at %03lXh, IRQ %d.\n",
+ dev->dev_addr[0], dev->base_addr, dev->irq);
+
+ return 0;
+}
+
+
+/*
+ * Do a hardware reset on the card, and set up necessary registers.
+ *
+ * This should be called as little as possible, because it disrupts the
+ * token on the network (causes a RECON) and requires a significant delay.
+ *
+ * However, it does make sure the card is in a defined state.
+ */
+static int com90io_reset(struct net_device *dev, int really_reset)
+{
+ struct arcnet_local *lp = dev->priv;
+ short ioaddr = dev->base_addr;
+
+ BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS());
+
+ if (really_reset) {
+ /* reset the card */
+ inb(_RESET);
+ mdelay(RESETtime);
+ }
+ /* Set the thing to IO-mapped, 8-bit mode */
+ lp->config = (0x1C | IOMAPflag) & ~ENABLE16flag;
+ SETCONF();
+
+ ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */
+ ACOMMAND(CFLAGScmd | CONFIGclear);
+
+ /* verify that the ARCnet signature byte is present */
+ if (get_buffer_byte(dev, 0) != TESTvalue) {
+ BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
+ return 1;
+ }
+ /* enable extended (512-byte) packets */
+ ACOMMAND(CONFIGcmd | EXTconf);
+
+ /* done! return success. */
+ return 0;
+}
+
+
+static void com90io_command(struct net_device *dev, int cmd)
+{
+ short ioaddr = dev->base_addr;
+
+ ACOMMAND(cmd);
+}
+
+
+static int com90io_status(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ return ASTATUS();
+}
+
+
+static void com90io_setmask(struct net_device *dev, int mask)
+{
+ short ioaddr = dev->base_addr;
+
+ AINTMASK(mask);
+}
+
+static void com90io_copy_to_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count)
+{
+ TIME("put_whole_buffer", count, put_whole_buffer(dev, bufnum * 512 + offset, count, buf));
+}
+
+
+static void com90io_copy_from_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count)
+{
+ TIME("get_whole_buffer", count, get_whole_buffer(dev, bufnum * 512 + offset, count, buf));
+}
+
+static int io; /* use the insmod io= irq= shmem= options */
+static int irq;
+static char device[9]; /* use eg. device=arc1 to change name */
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param_string(device, device, sizeof(device), 0);
+MODULE_LICENSE("GPL");
+
+#ifndef MODULE
+static int __init com90io_setup(char *s)
+{
+ int ints[4];
+ s = get_options(s, 4, ints);
+ if (!ints[0])
+ return 0;
+ switch (ints[0]) {
+ default: /* ERROR */
+ printk("com90io: Too many arguments.\n");
+ case 2: /* IRQ */
+ irq = ints[2];
+ case 1: /* IO address */
+ io = ints[1];
+ }
+ if (*s)
+ snprintf(device, sizeof(device), "%s", s);
+ return 1;
+}
+__setup("com90io=", com90io_setup);
+#endif
+
+static struct net_device *my_dev;
+
+static int __init com90io_init(void)
+{
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_arcdev(device);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+
+ dev->base_addr = io;
+ dev->irq = irq;
+ if (dev->irq == 2)
+ dev->irq = 9;
+
+ err = com90io_probe(dev);
+
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+
+ my_dev = dev;
+ return 0;
+}
+
+static void __exit com90io_exit(void)
+{
+ struct net_device *dev = my_dev;
+ int ioaddr = dev->base_addr;
+
+ unregister_netdev(dev);
+
+ /* Set the thing back to MMAP mode, in case the old driver is loaded later */
+ outb((inb(_CONFIG) & ~IOMAPflag), _CONFIG);
+
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
+ free_netdev(dev);
+}
+
+module_init(com90io_init)
+module_exit(com90io_exit)
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
new file mode 100644
index 000000000000..6c2c9b9ac6db
--- /dev/null
+++ b/drivers/net/arcnet/com90xx.c
@@ -0,0 +1,646 @@
+/*
+ * Linux ARCnet driver - COM90xx chipset (memory-mapped buffers)
+ *
+ * Written 1994-1999 by Avery Pennarun.
+ * Written 1999 by Martin Mares <mj@ucw.cz>.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <asm/io.h>
+#include <linux/arcdevice.h>
+
+
+#define VERSION "arcnet: COM90xx chipset support\n"
+
+
+/* Define this to speed up the autoprobe by assuming if only one io port and
+ * shmem are left in the list at Stage 5, they must correspond to each
+ * other.
+ *
+ * This is undefined by default because it might not always be true, and the
+ * extra check makes the autoprobe even more careful. Speed demons can turn
+ * it on - I think it should be fine if you only have one ARCnet card
+ * installed.
+ *
+ * If no ARCnet cards are installed, this delay never happens anyway and thus
+ * the option has no effect.
+ */
+#undef FAST_PROBE
+
+
+/* Internal function declarations */
+static int com90xx_found(int ioaddr, int airq, u_long shmem);
+static void com90xx_command(struct net_device *dev, int command);
+static int com90xx_status(struct net_device *dev);
+static void com90xx_setmask(struct net_device *dev, int mask);
+static int com90xx_reset(struct net_device *dev, int really_reset);
+static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count);
+static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count);
+
+/* Known ARCnet cards */
+
+static struct net_device *cards[16];
+static int numcards;
+
+/* Handy defines for ARCnet specific stuff */
+
+/* The number of low I/O ports used by the card */
+#define ARCNET_TOTAL_SIZE 16
+
+/* Amount of I/O memory used by the card */
+#define BUFFER_SIZE (512)
+#define MIRROR_SIZE (BUFFER_SIZE*4)
+
+/* COM 9026 controller chip --> ARCnet register addresses */
+#define _INTMASK (ioaddr+0) /* writable */
+#define _STATUS (ioaddr+0) /* readable */
+#define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */
+#define _CONFIG (ioaddr+2) /* Configuration register */
+#define _RESET (ioaddr+8) /* software reset (on read) */
+#define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */
+#define _ADDR_HI (ioaddr+15) /* Control registers for said */
+#define _ADDR_LO (ioaddr+14)
+
+#undef ASTATUS
+#undef ACOMMAND
+#undef AINTMASK
+
+#define ASTATUS() inb(_STATUS)
+#define ACOMMAND(cmd) outb((cmd),_COMMAND)
+#define AINTMASK(msk) outb((msk),_INTMASK)
+
+
+static int com90xx_skip_probe __initdata = 0;
+
+/* Module parameters */
+
+static int io; /* use the insmod io= irq= shmem= options */
+static int irq;
+static int shmem;
+static char device[9]; /* use eg. device=arc1 to change name */
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(shmem, int, 0);
+module_param_string(device, device, sizeof(device), 0);
+
+static void __init com90xx_probe(void)
+{
+ int count, status, ioaddr, numprint, airq, openparen = 0;
+ unsigned long airqmask;
+ int ports[(0x3f0 - 0x200) / 16 + 1] =
+ {0};
+ u_long shmems[(0xFF800 - 0xA0000) / 2048 + 1] =
+ {0};
+ int numports, numshmems, *port;
+ u_long *p;
+
+ if (!io && !irq && !shmem && !*device && com90xx_skip_probe)
+ return;
+
+ BUGLVL(D_NORMAL) printk(VERSION);
+
+ /* set up the arrays where we'll store the possible probe addresses */
+ numports = numshmems = 0;
+ if (io)
+ ports[numports++] = io;
+ else
+ for (count = 0x200; count <= 0x3f0; count += 16)
+ ports[numports++] = count;
+ if (shmem)
+ shmems[numshmems++] = shmem;
+ else
+ for (count = 0xA0000; count <= 0xFF800; count += 2048)
+ shmems[numshmems++] = count;
+
+ /* Stage 1: abandon any reserved ports, or ones with status==0xFF
+ * (empty), and reset any others by reading the reset port.
+ */
+ numprint = -1;
+ for (port = &ports[0]; port - ports < numports; port++) {
+ numprint++;
+ numprint %= 8;
+ if (!numprint) {
+ BUGMSG2(D_INIT, "\n");
+ BUGMSG2(D_INIT, "S1: ");
+ }
+ BUGMSG2(D_INIT, "%Xh ", *port);
+
+ ioaddr = *port;
+
+ if (!request_region(*port, ARCNET_TOTAL_SIZE, "arcnet (90xx)")) {
+ BUGMSG2(D_INIT_REASONS, "(request_region)\n");
+ BUGMSG2(D_INIT_REASONS, "S1: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ *port-- = ports[--numports];
+ continue;
+ }
+ if (ASTATUS() == 0xFF) {
+ BUGMSG2(D_INIT_REASONS, "(empty)\n");
+ BUGMSG2(D_INIT_REASONS, "S1: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ release_region(*port, ARCNET_TOTAL_SIZE);
+ *port-- = ports[--numports];
+ continue;
+ }
+ inb(_RESET); /* begin resetting card */
+
+ BUGMSG2(D_INIT_REASONS, "\n");
+ BUGMSG2(D_INIT_REASONS, "S1: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ }
+ BUGMSG2(D_INIT, "\n");
+
+ if (!numports) {
+ BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n");
+ return;
+ }
+ /* Stage 2: we have now reset any possible ARCnet cards, so we can't
+ * do anything until they finish. If D_INIT, print the list of
+ * cards that are left.
+ */
+ numprint = -1;
+ for (port = &ports[0]; port < ports + numports; port++) {
+ numprint++;
+ numprint %= 8;
+ if (!numprint) {
+ BUGMSG2(D_INIT, "\n");
+ BUGMSG2(D_INIT, "S2: ");
+ }
+ BUGMSG2(D_INIT, "%Xh ", *port);
+ }
+ BUGMSG2(D_INIT, "\n");
+ mdelay(RESETtime);
+
+ /* Stage 3: abandon any shmem addresses that don't have the signature
+ * 0xD1 byte in the right place, or are read-only.
+ */
+ numprint = -1;
+ for (p = &shmems[0]; p < shmems + numshmems; p++) {
+ u_long ptr = *p;
+
+ numprint++;
+ numprint %= 8;
+ if (!numprint) {
+ BUGMSG2(D_INIT, "\n");
+ BUGMSG2(D_INIT, "S3: ");
+ }
+ BUGMSG2(D_INIT, "%lXh ", *p);
+
+ if (!request_mem_region(*p, BUFFER_SIZE, "arcnet (90xx)")) {
+ BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n");
+ BUGMSG2(D_INIT_REASONS, "Stage 3: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ *p-- = shmems[--numshmems];
+ continue;
+ }
+ if (isa_readb(ptr) != TESTvalue) {
+ BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n",
+ isa_readb(ptr), TESTvalue);
+ BUGMSG2(D_INIT_REASONS, "S3: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ release_mem_region(*p, BUFFER_SIZE);
+ *p-- = shmems[--numshmems];
+ continue;
+ }
+ /* By writing 0x42 to the TESTvalue location, we also make
+ * sure no "mirror" shmem areas show up - if they occur
+ * in another pass through this loop, they will be discarded
+ * because *cptr != TESTvalue.
+ */
+ isa_writeb(0x42, ptr);
+ if (isa_readb(ptr) != 0x42) {
+ BUGMSG2(D_INIT_REASONS, "(read only)\n");
+ BUGMSG2(D_INIT_REASONS, "S3: ");
+ release_mem_region(*p, BUFFER_SIZE);
+ *p-- = shmems[--numshmems];
+ continue;
+ }
+ BUGMSG2(D_INIT_REASONS, "\n");
+ BUGMSG2(D_INIT_REASONS, "S3: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ }
+ BUGMSG2(D_INIT, "\n");
+
+ if (!numshmems) {
+ BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n");
+ for (port = &ports[0]; port < ports + numports; port++)
+ release_region(*port, ARCNET_TOTAL_SIZE);
+ return;
+ }
+ /* Stage 4: something of a dummy, to report the shmems that are
+ * still possible after stage 3.
+ */
+ numprint = -1;
+ for (p = &shmems[0]; p < shmems + numshmems; p++) {
+ numprint++;
+ numprint %= 8;
+ if (!numprint) {
+ BUGMSG2(D_INIT, "\n");
+ BUGMSG2(D_INIT, "S4: ");
+ }
+ BUGMSG2(D_INIT, "%lXh ", *p);
+ }
+ BUGMSG2(D_INIT, "\n");
+
+ /* Stage 5: for any ports that have the correct status, can disable
+ * the RESET flag, and (if no irq is given) generate an autoirq,
+ * register an ARCnet device.
+ *
+ * Currently, we can only register one device per probe, so quit
+ * after the first one is found.
+ */
+ numprint = -1;
+ for (port = &ports[0]; port < ports + numports; port++) {
+ int found = 0;
+ numprint++;
+ numprint %= 8;
+ if (!numprint) {
+ BUGMSG2(D_INIT, "\n");
+ BUGMSG2(D_INIT, "S5: ");
+ }
+ BUGMSG2(D_INIT, "%Xh ", *port);
+
+ ioaddr = *port;
+ status = ASTATUS();
+
+ if ((status & 0x9D)
+ != (NORXflag | RECONflag | TXFREEflag | RESETflag)) {
+ BUGMSG2(D_INIT_REASONS, "(status=%Xh)\n", status);
+ BUGMSG2(D_INIT_REASONS, "S5: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ release_region(*port, ARCNET_TOTAL_SIZE);
+ *port-- = ports[--numports];
+ continue;
+ }
+ ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
+ status = ASTATUS();
+ if (status & RESETflag) {
+ BUGMSG2(D_INIT_REASONS, " (eternal reset, status=%Xh)\n",
+ status);
+ BUGMSG2(D_INIT_REASONS, "S5: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ release_region(*port, ARCNET_TOTAL_SIZE);
+ *port-- = ports[--numports];
+ continue;
+ }
+ /* skip this completely if an IRQ was given, because maybe
+ * we're on a machine that locks during autoirq!
+ */
+ if (!irq) {
+ /* if we do this, we're sure to get an IRQ since the
+ * card has just reset and the NORXflag is on until
+ * we tell it to start receiving.
+ */
+ airqmask = probe_irq_on();
+ AINTMASK(NORXflag);
+ udelay(1);
+ AINTMASK(0);
+ airq = probe_irq_off(airqmask);
+
+ if (airq <= 0) {
+ BUGMSG2(D_INIT_REASONS, "(airq=%d)\n", airq);
+ BUGMSG2(D_INIT_REASONS, "S5: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ release_region(*port, ARCNET_TOTAL_SIZE);
+ *port-- = ports[--numports];
+ continue;
+ }
+ } else {
+ airq = irq;
+ }
+
+ BUGMSG2(D_INIT, "(%d,", airq);
+ openparen = 1;
+
+ /* Everything seems okay. But which shmem, if any, puts
+ * back its signature byte when the card is reset?
+ *
+ * If there are multiple cards installed, there might be
+ * multiple shmems still in the list.
+ */
+#ifdef FAST_PROBE
+ if (numports > 1 || numshmems > 1) {
+ inb(_RESET);
+ mdelay(RESETtime);
+ } else {
+ /* just one shmem and port, assume they match */
+ isa_writeb(TESTvalue, shmems[0]);
+ }
+#else
+ inb(_RESET);
+ mdelay(RESETtime);
+#endif
+
+ for (p = &shmems[0]; p < shmems + numshmems; p++) {
+ u_long ptr = *p;
+
+ if (isa_readb(ptr) == TESTvalue) { /* found one */
+ BUGMSG2(D_INIT, "%lXh)\n", *p);
+ openparen = 0;
+
+ /* register the card */
+ if (com90xx_found(*port, airq, *p) == 0)
+ found = 1;
+ numprint = -1;
+
+ /* remove shmem from the list */
+ *p = shmems[--numshmems];
+ break; /* go to the next I/O port */
+ } else {
+ BUGMSG2(D_INIT_REASONS, "%Xh-", isa_readb(ptr));
+ }
+ }
+
+ if (openparen) {
+ BUGLVL(D_INIT) printk("no matching shmem)\n");
+ BUGLVL(D_INIT_REASONS) printk("S5: ");
+ BUGLVL(D_INIT_REASONS) numprint = 0;
+ }
+ if (!found)
+ release_region(*port, ARCNET_TOTAL_SIZE);
+ *port-- = ports[--numports];
+ }
+
+ BUGLVL(D_INIT_REASONS) printk("\n");
+
+ /* Now put back TESTvalue on all leftover shmems. */
+ for (p = &shmems[0]; p < shmems + numshmems; p++) {
+ isa_writeb(TESTvalue, *p);
+ release_mem_region(*p, BUFFER_SIZE);
+ }
+}
+
+
+/* Set up the struct net_device associated with this card. Called after
+ * probing succeeds.
+ */
+static int __init com90xx_found(int ioaddr, int airq, u_long shmem)
+{
+ struct net_device *dev = NULL;
+ struct arcnet_local *lp;
+ u_long first_mirror, last_mirror;
+ int mirror_size;
+
+ /* allocate struct net_device */
+ dev = alloc_arcdev(device);
+ if (!dev) {
+ BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n");
+ release_mem_region(shmem, BUFFER_SIZE);
+ return -ENOMEM;
+ }
+ lp = dev->priv;
+ /* find the real shared memory start/end points, including mirrors */
+
+ /* guess the actual size of one "memory mirror" - the number of
+ * bytes between copies of the shared memory. On most cards, it's
+ * 2k (or there are no mirrors at all) but on some, it's 4k.
+ */
+ mirror_size = MIRROR_SIZE;
+ if (isa_readb(shmem) == TESTvalue
+ && isa_readb(shmem - mirror_size) != TESTvalue
+ && isa_readb(shmem - 2 * mirror_size) == TESTvalue)
+ mirror_size *= 2;
+
+ first_mirror = last_mirror = shmem;
+ while (isa_readb(first_mirror) == TESTvalue)
+ first_mirror -= mirror_size;
+ first_mirror += mirror_size;
+
+ while (isa_readb(last_mirror) == TESTvalue)
+ last_mirror += mirror_size;
+ last_mirror -= mirror_size;
+
+ dev->mem_start = first_mirror;
+ dev->mem_end = last_mirror + MIRROR_SIZE - 1;
+
+ release_mem_region(shmem, BUFFER_SIZE);
+ if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)"))
+ goto err_free_dev;
+
+ /* reserve the irq */
+ if (request_irq(airq, &arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq);
+ goto err_release_mem;
+ }
+ dev->irq = airq;
+
+ /* Initialize the rest of the device structure. */
+ lp->card_name = "COM90xx";
+ lp->hw.command = com90xx_command;
+ lp->hw.status = com90xx_status;
+ lp->hw.intmask = com90xx_setmask;
+ lp->hw.reset = com90xx_reset;
+ lp->hw.owner = THIS_MODULE;
+ lp->hw.copy_to_card = com90xx_copy_to_card;
+ lp->hw.copy_from_card = com90xx_copy_from_card;
+ lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+ if (!lp->mem_start) {
+ BUGMSG(D_NORMAL, "Can't remap device memory!\n");
+ goto err_free_irq;
+ }
+
+ /* get and check the station ID from offset 1 in shmem */
+ dev->dev_addr[0] = readb(lp->mem_start + 1);
+
+ dev->base_addr = ioaddr;
+
+ BUGMSG(D_NORMAL, "COM90xx station %02Xh found at %03lXh, IRQ %d, "
+ "ShMem %lXh (%ld*%xh).\n",
+ dev->dev_addr[0],
+ dev->base_addr, dev->irq, dev->mem_start,
+ (dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size);
+
+ if (register_netdev(dev))
+ goto err_unmap;
+
+ cards[numcards++] = dev;
+ return 0;
+
+err_unmap:
+ iounmap(lp->mem_start);
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_release_mem:
+ release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+err_free_dev:
+ free_netdev(dev);
+ return -EIO;
+}
+
+
+static void com90xx_command(struct net_device *dev, int cmd)
+{
+ short ioaddr = dev->base_addr;
+
+ ACOMMAND(cmd);
+}
+
+
+static int com90xx_status(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ return ASTATUS();
+}
+
+
+static void com90xx_setmask(struct net_device *dev, int mask)
+{
+ short ioaddr = dev->base_addr;
+
+ AINTMASK(mask);
+}
+
+
+/*
+ * Do a hardware reset on the card, and set up necessary registers.
+ *
+ * This should be called as little as possible, because it disrupts the
+ * token on the network (causes a RECON) and requires a significant delay.
+ *
+ * However, it does make sure the card is in a defined state.
+ */
+int com90xx_reset(struct net_device *dev, int really_reset)
+{
+ struct arcnet_local *lp = dev->priv;
+ short ioaddr = dev->base_addr;
+
+ BUGMSG(D_INIT, "Resetting (status=%02Xh)\n", ASTATUS());
+
+ if (really_reset) {
+ /* reset the card */
+ inb(_RESET);
+ mdelay(RESETtime);
+ }
+ ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */
+ ACOMMAND(CFLAGScmd | CONFIGclear);
+
+ /* don't do this until we verify that it doesn't hurt older cards! */
+ /* outb(inb(_CONFIG) | ENABLE16flag, _CONFIG); */
+
+ /* verify that the ARCnet signature byte is present */
+ if (readb(lp->mem_start) != TESTvalue) {
+ if (really_reset)
+ BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
+ return 1;
+ }
+ /* enable extended (512-byte) packets */
+ ACOMMAND(CONFIGcmd | EXTconf);
+
+ /* clean out all the memory to make debugging make more sense :) */
+ BUGLVL(D_DURING)
+ memset_io(lp->mem_start, 0x42, 2048);
+
+ /* done! return success. */
+ return 0;
+}
+
+static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
+ TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count));
+}
+
+
+static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset,
+ void *buf, int count)
+{
+ struct arcnet_local *lp = dev->priv;
+ void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
+ TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
+}
+
+
+MODULE_LICENSE("GPL");
+
+static int __init com90xx_init(void)
+{
+ if (irq == 2)
+ irq = 9;
+ com90xx_probe();
+ if (!numcards)
+ return -EIO;
+ return 0;
+}
+
+static void __exit com90xx_exit(void)
+{
+ struct net_device *dev;
+ struct arcnet_local *lp;
+ int count;
+
+ for (count = 0; count < numcards; count++) {
+ dev = cards[count];
+ lp = dev->priv;
+
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ iounmap(lp->mem_start);
+ release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
+ release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+ free_netdev(dev);
+ }
+}
+
+module_init(com90xx_init);
+module_exit(com90xx_exit);
+
+#ifndef MODULE
+static int __init com90xx_setup(char *s)
+{
+ int ints[8];
+
+ s = get_options(s, 8, ints);
+ if (!ints[0] && !*s) {
+ printk("com90xx: Disabled.\n");
+ return 1;
+ }
+
+ switch (ints[0]) {
+ default: /* ERROR */
+ printk("com90xx: Too many arguments.\n");
+ case 3: /* Mem address */
+ shmem = ints[3];
+ case 2: /* IRQ */
+ irq = ints[2];
+ case 1: /* IO address */
+ io = ints[1];
+ }
+
+ if (*s)
+ snprintf(device, sizeof(device), "%s", s);
+
+ return 1;
+}
+
+__setup("com90xx=", com90xx_setup);
+#endif
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
new file mode 100644
index 000000000000..6d7913704fb5
--- /dev/null
+++ b/drivers/net/arcnet/rfc1051.c
@@ -0,0 +1,253 @@
+/*
+ * Linux ARCnet driver - RFC1051 ("simple" standard) packet encapsulation
+ *
+ * Written 1994-1999 by Avery Pennarun.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/arcdevice.h>
+
+#define VERSION "arcnet: RFC1051 \"simple standard\" (`s') encapsulation support loaded.\n"
+
+
+static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev);
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length);
+static int build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr);
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum);
+
+
+struct ArcProto rfc1051_proto =
+{
+ .suffix = 's',
+ .mtu = XMTU - RFC1051_HDR_SIZE,
+ .is_ip = 1,
+ .rx = rx,
+ .build_header = build_header,
+ .prepare_tx = prepare_tx,
+ .continue_tx = NULL,
+ .ack_tx = NULL
+};
+
+
+static int __init arcnet_rfc1051_init(void)
+{
+ printk(VERSION);
+
+ arc_proto_map[ARC_P_IP_RFC1051]
+ = arc_proto_map[ARC_P_ARP_RFC1051]
+ = &rfc1051_proto;
+
+ /* if someone else already owns the broadcast, we won't take it */
+ if (arc_bcast_proto == arc_proto_default)
+ arc_bcast_proto = &rfc1051_proto;
+
+ return 0;
+}
+
+static void __exit arcnet_rfc1051_exit(void)
+{
+ arcnet_unregister_proto(&rfc1051_proto);
+}
+
+module_init(arcnet_rfc1051_init);
+module_exit(arcnet_rfc1051_exit);
+
+MODULE_LICENSE("GPL");
+
+/*
+ * Determine a packet's protocol ID.
+ *
+ * With ARCnet we have to convert everything to Ethernet-style stuff.
+ */
+static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct archdr *pkt = (struct archdr *) skb->data;
+ struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
+ int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
+
+ /* Pull off the arcnet header. */
+ skb->mac.raw = skb->data;
+ skb_pull(skb, hdr_size);
+
+ if (pkt->hard.dest == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else if (dev->flags & IFF_PROMISC) {
+ /* if we're not sending to ourselves :) */
+ if (pkt->hard.dest != dev->dev_addr[0])
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ /* now return the protocol number */
+ switch (soft->proto) {
+ case ARC_P_IP_RFC1051:
+ return htons(ETH_P_IP);
+ case ARC_P_ARP_RFC1051:
+ return htons(ETH_P_ARP);
+
+ default:
+ lp->stats.rx_errors++;
+ lp->stats.rx_crc_errors++;
+ return 0;
+ }
+
+ return htons(ETH_P_IP);
+}
+
+
+/* packet receiver */
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct sk_buff *skb;
+ struct archdr *pkt = pkthdr;
+ int ofs;
+
+ BUGMSG(D_DURING, "it's a raw packet (length=%d)\n", length);
+
+ if (length >= MinTU)
+ ofs = 512 - length;
+ else
+ ofs = 256 - length;
+
+ skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
+ if (skb == NULL) {
+ BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
+ lp->stats.rx_dropped++;
+ return;
+ }
+ skb_put(skb, length + ARC_HDR_SIZE);
+ skb->dev = dev;
+
+ pkt = (struct archdr *) skb->data;
+
+ /* up to sizeof(pkt->soft) has already been copied from the card */
+ memcpy(pkt, pkthdr, sizeof(struct archdr));
+ if (length > sizeof(pkt->soft))
+ lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
+ pkt->soft.raw + sizeof(pkt->soft),
+ length - sizeof(pkt->soft));
+
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+
+ skb->protocol = type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+}
+
+
+/*
+ * Create the ARCnet hard/soft headers for RFC1051.
+ */
+static int build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr)
+{
+ struct arcnet_local *lp = dev->priv;
+ int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
+ struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
+ struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
+
+ /* set the protocol ID according to RFC1051 */
+ switch (type) {
+ case ETH_P_IP:
+ soft->proto = ARC_P_IP_RFC1051;
+ break;
+ case ETH_P_ARP:
+ soft->proto = ARC_P_ARP_RFC1051;
+ break;
+ default:
+ BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n",
+ type, type);
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+ return 0;
+ }
+
+
+ /*
+ * Set the source hardware address.
+ *
+ * This is pretty pointless for most purposes, but it can help in
+ * debugging. ARCnet does not allow us to change the source address in
+ * the actual packet sent)
+ */
+ pkt->hard.source = *dev->dev_addr;
+
+ /* see linux/net/ethernet/eth.c to see where I got the following */
+
+ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ /*
+ * FIXME: fill in the last byte of the dest ipaddr here to better
+ * comply with RFC1051 in "noarp" mode.
+ */
+ pkt->hard.dest = 0;
+ return hdr_size;
+ }
+ /* otherwise, just fill it in and go! */
+ pkt->hard.dest = daddr;
+
+ return hdr_size; /* success */
+}
+
+
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct arc_hardware *hard = &pkt->hard;
+ int ofs;
+
+ BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
+ lp->next_tx, lp->cur_tx, bufnum);
+
+ length -= ARC_HDR_SIZE; /* hard header is not included in packet length */
+
+ if (length > XMTU) {
+ /* should never happen! other people already check for this. */
+ BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
+ length, XMTU);
+ length = XMTU;
+ }
+ if (length > MinTU) {
+ hard->offset[0] = 0;
+ hard->offset[1] = ofs = 512 - length;
+ } else if (length > MTU) {
+ hard->offset[0] = 0;
+ hard->offset[1] = ofs = 512 - length - 3;
+ } else
+ hard->offset[0] = ofs = 256 - length;
+
+ lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
+ lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
+
+ lp->lastload_dest = hard->dest;
+
+ return 1; /* done */
+}
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
new file mode 100644
index 000000000000..6b6ae4bf3d39
--- /dev/null
+++ b/drivers/net/arcnet/rfc1201.c
@@ -0,0 +1,549 @@
+/*
+ * Linux ARCnet driver - RFC1201 (standard) packet encapsulation
+ *
+ * Written 1994-1999 by Avery Pennarun.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/arcdevice.h>
+
+MODULE_LICENSE("GPL");
+#define VERSION "arcnet: RFC1201 \"standard\" (`a') encapsulation support loaded.\n"
+
+
+static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev);
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length);
+static int build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr);
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum);
+static int continue_tx(struct net_device *dev, int bufnum);
+
+struct ArcProto rfc1201_proto =
+{
+ .suffix = 'a',
+ .mtu = 1500, /* could be more, but some receivers can't handle it... */
+ .is_ip = 1, /* This is for sending IP and ARP packages */
+ .rx = rx,
+ .build_header = build_header,
+ .prepare_tx = prepare_tx,
+ .continue_tx = continue_tx,
+ .ack_tx = NULL
+};
+
+
+static int __init arcnet_rfc1201_init(void)
+{
+ printk(VERSION);
+
+ arc_proto_map[ARC_P_IP]
+ = arc_proto_map[ARC_P_IPV6]
+ = arc_proto_map[ARC_P_ARP]
+ = arc_proto_map[ARC_P_RARP]
+ = arc_proto_map[ARC_P_IPX]
+ = arc_proto_map[ARC_P_NOVELL_EC]
+ = &rfc1201_proto;
+
+ /* if someone else already owns the broadcast, we won't take it */
+ if (arc_bcast_proto == arc_proto_default)
+ arc_bcast_proto = &rfc1201_proto;
+
+ return 0;
+}
+
+static void __exit arcnet_rfc1201_exit(void)
+{
+ arcnet_unregister_proto(&rfc1201_proto);
+}
+
+module_init(arcnet_rfc1201_init);
+module_exit(arcnet_rfc1201_exit);
+
+/*
+ * Determine a packet's protocol ID.
+ *
+ * With ARCnet we have to convert everything to Ethernet-style stuff.
+ */
+static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct archdr *pkt = (struct archdr *) skb->data;
+ struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
+ struct arcnet_local *lp = dev->priv;
+ int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
+
+ /* Pull off the arcnet header. */
+ skb->mac.raw = skb->data;
+ skb_pull(skb, hdr_size);
+
+ if (pkt->hard.dest == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else if (dev->flags & IFF_PROMISC) {
+ /* if we're not sending to ourselves :) */
+ if (pkt->hard.dest != dev->dev_addr[0])
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ /* now return the protocol number */
+ switch (soft->proto) {
+ case ARC_P_IP:
+ return htons(ETH_P_IP);
+ case ARC_P_IPV6:
+ return htons(ETH_P_IPV6);
+ case ARC_P_ARP:
+ return htons(ETH_P_ARP);
+ case ARC_P_RARP:
+ return htons(ETH_P_RARP);
+
+ case ARC_P_IPX:
+ case ARC_P_NOVELL_EC:
+ return htons(ETH_P_802_3);
+ default:
+ lp->stats.rx_errors++;
+ lp->stats.rx_crc_errors++;
+ return 0;
+ }
+
+ return htons(ETH_P_IP);
+}
+
+
+/* packet receiver */
+static void rx(struct net_device *dev, int bufnum,
+ struct archdr *pkthdr, int length)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct sk_buff *skb;
+ struct archdr *pkt = pkthdr;
+ struct arc_rfc1201 *soft = &pkthdr->soft.rfc1201;
+ int saddr = pkt->hard.source, ofs;
+ struct Incoming *in = &lp->rfc1201.incoming[saddr];
+
+ BUGMSG(D_DURING, "it's an RFC1201 packet (length=%d)\n", length);
+
+ if (length >= MinTU)
+ ofs = 512 - length;
+ else
+ ofs = 256 - length;
+
+ if (soft->split_flag == 0xFF) { /* Exception Packet */
+ if (length >= 4 + RFC1201_HDR_SIZE)
+ BUGMSG(D_DURING, "compensating for exception packet\n");
+ else {
+ BUGMSG(D_EXTRA, "short RFC1201 exception packet from %02Xh",
+ saddr);
+ return;
+ }
+
+ /* skip over 4-byte junkola */
+ length -= 4;
+ ofs += 4;
+ lp->hw.copy_from_card(dev, bufnum, 512 - length,
+ soft, sizeof(pkt->soft));
+ }
+ if (!soft->split_flag) { /* not split */
+ BUGMSG(D_RX, "incoming is not split (splitflag=%d)\n",
+ soft->split_flag);
+
+ if (in->skb) { /* already assembling one! */
+ BUGMSG(D_EXTRA, "aborting assembly (seq=%d) for unsplit packet (splitflag=%d, seq=%d)\n",
+ in->sequence, soft->split_flag, soft->sequence);
+ lp->rfc1201.aborted_seq = soft->sequence;
+ dev_kfree_skb_irq(in->skb);
+ lp->stats.rx_errors++;
+ lp->stats.rx_missed_errors++;
+ in->skb = NULL;
+ }
+ in->sequence = soft->sequence;
+
+ skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
+ if (skb == NULL) {
+ BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
+ lp->stats.rx_dropped++;
+ return;
+ }
+ skb_put(skb, length + ARC_HDR_SIZE);
+ skb->dev = dev;
+
+ pkt = (struct archdr *) skb->data;
+ soft = &pkt->soft.rfc1201;
+
+ /* up to sizeof(pkt->soft) has already been copied from the card */
+ memcpy(pkt, pkthdr, sizeof(struct archdr));
+ if (length > sizeof(pkt->soft))
+ lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
+ pkt->soft.raw + sizeof(pkt->soft),
+ length - sizeof(pkt->soft));
+
+ /*
+ * ARP packets have problems when sent from some DOS systems: the
+ * source address is always 0! So we take the hardware source addr
+ * (which is impossible to fumble) and insert it ourselves.
+ */
+ if (soft->proto == ARC_P_ARP) {
+ struct arphdr *arp = (struct arphdr *) soft->payload;
+
+ /* make sure addresses are the right length */
+ if (arp->ar_hln == 1 && arp->ar_pln == 4) {
+ uint8_t *cptr = (uint8_t *) arp + sizeof(struct arphdr);
+
+ if (!*cptr) { /* is saddr = 00? */
+ BUGMSG(D_EXTRA,
+ "ARP source address was 00h, set to %02Xh.\n",
+ saddr);
+ lp->stats.rx_crc_errors++;
+ *cptr = saddr;
+ } else {
+ BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n",
+ *cptr);
+ }
+ } else {
+ BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n",
+ arp->ar_hln, arp->ar_pln);
+ lp->stats.rx_errors++;
+ lp->stats.rx_crc_errors++;
+ }
+ }
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+
+ skb->protocol = type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ } else { /* split packet */
+ /*
+ * NOTE: MSDOS ARP packet correction should only need to apply to
+ * unsplit packets, since ARP packets are so short.
+ *
+ * My interpretation of the RFC1201 document is that if a packet is
+ * received out of order, the entire assembly process should be
+ * aborted.
+ *
+ * The RFC also mentions "it is possible for successfully received
+ * packets to be retransmitted." As of 0.40 all previously received
+ * packets are allowed, not just the most recent one.
+ *
+ * We allow multiple assembly processes, one for each ARCnet card
+ * possible on the network. Seems rather like a waste of memory,
+ * but there's no other way to be reliable.
+ */
+
+ BUGMSG(D_RX, "packet is split (splitflag=%d, seq=%d)\n",
+ soft->split_flag, in->sequence);
+
+ if (in->skb && in->sequence != soft->sequence) {
+ BUGMSG(D_EXTRA, "wrong seq number (saddr=%d, expected=%d, seq=%d, splitflag=%d)\n",
+ saddr, in->sequence, soft->sequence,
+ soft->split_flag);
+ dev_kfree_skb_irq(in->skb);
+ in->skb = NULL;
+ lp->stats.rx_errors++;
+ lp->stats.rx_missed_errors++;
+ in->lastpacket = in->numpackets = 0;
+ }
+ if (soft->split_flag & 1) { /* first packet in split */
+ BUGMSG(D_RX, "brand new splitpacket (splitflag=%d)\n",
+ soft->split_flag);
+ if (in->skb) { /* already assembling one! */
+ BUGMSG(D_EXTRA, "aborting previous (seq=%d) assembly "
+ "(splitflag=%d, seq=%d)\n",
+ in->sequence, soft->split_flag,
+ soft->sequence);
+ lp->stats.rx_errors++;
+ lp->stats.rx_missed_errors++;
+ dev_kfree_skb_irq(in->skb);
+ }
+ in->sequence = soft->sequence;
+ in->numpackets = ((unsigned) soft->split_flag >> 1) + 2;
+ in->lastpacket = 1;
+
+ if (in->numpackets > 16) {
+ BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
+ soft->split_flag);
+ lp->rfc1201.aborted_seq = soft->sequence;
+ lp->stats.rx_errors++;
+ lp->stats.rx_length_errors++;
+ return;
+ }
+ in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE,
+ GFP_ATOMIC);
+ if (skb == NULL) {
+ BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n");
+ lp->rfc1201.aborted_seq = soft->sequence;
+ lp->stats.rx_dropped++;
+ return;
+ }
+ skb->dev = dev;
+ pkt = (struct archdr *) skb->data;
+ soft = &pkt->soft.rfc1201;
+
+ memcpy(pkt, pkthdr, ARC_HDR_SIZE + RFC1201_HDR_SIZE);
+ skb_put(skb, ARC_HDR_SIZE + RFC1201_HDR_SIZE);
+
+ soft->split_flag = 0; /* end result won't be split */
+ } else { /* not first packet */
+ int packetnum = ((unsigned) soft->split_flag >> 1) + 1;
+
+ /*
+ * if we're not assembling, there's no point trying to
+ * continue.
+ */
+ if (!in->skb) {
+ if (lp->rfc1201.aborted_seq != soft->sequence) {
+ BUGMSG(D_EXTRA, "can't continue split without starting "
+ "first! (splitflag=%d, seq=%d, aborted=%d)\n",
+ soft->split_flag, soft->sequence,
+ lp->rfc1201.aborted_seq);
+ lp->stats.rx_errors++;
+ lp->stats.rx_missed_errors++;
+ }
+ return;
+ }
+ in->lastpacket++;
+ if (packetnum != in->lastpacket) { /* not the right flag! */
+ /* harmless duplicate? ignore. */
+ if (packetnum <= in->lastpacket - 1) {
+ BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n",
+ soft->split_flag);
+ lp->stats.rx_errors++;
+ lp->stats.rx_frame_errors++;
+ return;
+ }
+ /* "bad" duplicate, kill reassembly */
+ BUGMSG(D_EXTRA, "out-of-order splitpacket, reassembly "
+ "(seq=%d) aborted (splitflag=%d, seq=%d)\n",
+ in->sequence, soft->split_flag, soft->sequence);
+ lp->rfc1201.aborted_seq = soft->sequence;
+ dev_kfree_skb_irq(in->skb);
+ in->skb = NULL;
+ lp->stats.rx_errors++;
+ lp->stats.rx_missed_errors++;
+ in->lastpacket = in->numpackets = 0;
+ return;
+ }
+ pkt = (struct archdr *) in->skb->data;
+ soft = &pkt->soft.rfc1201;
+ }
+
+ skb = in->skb;
+
+ lp->hw.copy_from_card(dev, bufnum, ofs + RFC1201_HDR_SIZE,
+ skb->data + skb->len,
+ length - RFC1201_HDR_SIZE);
+ skb_put(skb, length - RFC1201_HDR_SIZE);
+
+ /* are we done? */
+ if (in->lastpacket == in->numpackets) {
+ in->skb = NULL;
+ in->lastpacket = in->numpackets = 0;
+
+ BUGMSG(D_SKB_SIZE, "skb: received %d bytes from %02X (unsplit)\n",
+ skb->len, pkt->hard.source);
+ BUGMSG(D_SKB_SIZE, "skb: received %d bytes from %02X (split)\n",
+ skb->len, pkt->hard.source);
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
+
+ skb->protocol = type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ }
+}
+
+
+/* Create the ARCnet hard/soft headers for RFC1201. */
+static int build_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, uint8_t daddr)
+{
+ struct arcnet_local *lp = dev->priv;
+ int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
+ struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
+ struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
+
+ /* set the protocol ID according to RFC1201 */
+ switch (type) {
+ case ETH_P_IP:
+ soft->proto = ARC_P_IP;
+ break;
+ case ETH_P_IPV6:
+ soft->proto = ARC_P_IPV6;
+ break;
+ case ETH_P_ARP:
+ soft->proto = ARC_P_ARP;
+ break;
+ case ETH_P_RARP:
+ soft->proto = ARC_P_RARP;
+ break;
+ case ETH_P_IPX:
+ case ETH_P_802_3:
+ case ETH_P_802_2:
+ soft->proto = ARC_P_IPX;
+ break;
+ case ETH_P_ATALK:
+ soft->proto = ARC_P_ATALK;
+ break;
+ default:
+ BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n",
+ type, type);
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+ return 0;
+ }
+
+ /*
+ * Set the source hardware address.
+ *
+ * This is pretty pointless for most purposes, but it can help in
+ * debugging. ARCnet does not allow us to change the source address in
+ * the actual packet sent)
+ */
+ pkt->hard.source = *dev->dev_addr;
+
+ soft->sequence = htons(lp->rfc1201.sequence++);
+ soft->split_flag = 0; /* split packets are done elsewhere */
+
+ /* see linux/net/ethernet/eth.c to see where I got the following */
+
+ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ /*
+ * FIXME: fill in the last byte of the dest ipaddr here to better
+ * comply with RFC1051 in "noarp" mode. For now, always broadcasting
+ * will probably at least get packets sent out :)
+ */
+ pkt->hard.dest = 0;
+ return hdr_size;
+ }
+ /* otherwise, drop in the dest address */
+ pkt->hard.dest = daddr;
+ return hdr_size;
+}
+
+
+static void load_pkt(struct net_device *dev, struct arc_hardware *hard,
+ struct arc_rfc1201 *soft, int softlen, int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ int ofs;
+
+ /* assume length <= XMTU: someone should have handled that by now. */
+
+ if (softlen > MinTU) {
+ hard->offset[0] = 0;
+ hard->offset[1] = ofs = 512 - softlen;
+ } else if (softlen > MTU) { /* exception packet - add an extra header */
+ struct arc_rfc1201 excsoft;
+
+ excsoft.proto = soft->proto;
+ excsoft.split_flag = 0xff;
+ excsoft.sequence = 0xffff;
+
+ hard->offset[0] = 0;
+ ofs = 512 - softlen;
+ hard->offset[1] = ofs - RFC1201_HDR_SIZE;
+ lp->hw.copy_to_card(dev, bufnum, ofs - RFC1201_HDR_SIZE,
+ &excsoft, RFC1201_HDR_SIZE);
+ } else
+ hard->offset[0] = ofs = 256 - softlen;
+
+ lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
+ lp->hw.copy_to_card(dev, bufnum, ofs, soft, softlen);
+
+ lp->lastload_dest = hard->dest;
+}
+
+
+static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
+ int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ const int maxsegsize = XMTU - RFC1201_HDR_SIZE;
+ struct Outgoing *out;
+
+
+ BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
+ lp->next_tx, lp->cur_tx, bufnum);
+
+ length -= ARC_HDR_SIZE; /* hard header is not included in packet length */
+ pkt->soft.rfc1201.split_flag = 0;
+
+ /* need to do a split packet? */
+ if (length > XMTU) {
+ out = &lp->outgoing;
+
+ out->length = length - RFC1201_HDR_SIZE;
+ out->dataleft = lp->outgoing.length;
+ out->numsegs = (out->dataleft + maxsegsize - 1) / maxsegsize;
+ out->segnum = 0;
+
+ BUGMSG(D_DURING, "rfc1201 prep_tx: ready for %d-segment split "
+ "(%d bytes, seq=%d)\n", out->numsegs, out->length,
+ pkt->soft.rfc1201.sequence);
+
+ return 0; /* not done */
+ }
+ /* just load the packet into the buffers and send it off */
+ load_pkt(dev, &pkt->hard, &pkt->soft.rfc1201, length, bufnum);
+
+ return 1; /* done */
+}
+
+
+static int continue_tx(struct net_device *dev, int bufnum)
+{
+ struct arcnet_local *lp = dev->priv;
+ struct Outgoing *out = &lp->outgoing;
+ struct arc_hardware *hard = &out->pkt->hard;
+ struct arc_rfc1201 *soft = &out->pkt->soft.rfc1201, *newsoft;
+ int maxsegsize = XMTU - RFC1201_HDR_SIZE;
+ int seglen;
+
+ BUGMSG(D_DURING,
+ "rfc1201 continue_tx: loading segment %d(+1) of %d (seq=%d)\n",
+ out->segnum, out->numsegs, soft->sequence);
+
+ /* the "new" soft header comes right before the data chunk */
+ newsoft = (struct arc_rfc1201 *)
+ (out->pkt->soft.raw + out->length - out->dataleft);
+
+ if (!out->segnum) /* first packet; newsoft == soft */
+ newsoft->split_flag = ((out->numsegs - 2) << 1) | 1;
+ else {
+ newsoft->split_flag = out->segnum << 1;
+ newsoft->proto = soft->proto;
+ newsoft->sequence = soft->sequence;
+ }
+
+ seglen = maxsegsize;
+ if (seglen > out->dataleft)
+ seglen = out->dataleft;
+ out->dataleft -= seglen;
+
+ load_pkt(dev, hard, newsoft, seglen + RFC1201_HDR_SIZE, bufnum);
+
+ out->segnum++;
+ if (out->segnum >= out->numsegs)
+ return 1;
+ else
+ return 0;
+}
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
new file mode 100644
index 000000000000..9fe93acfc8ef
--- /dev/null
+++ b/drivers/net/ariadne.c
@@ -0,0 +1,878 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver (p2@mind.be)
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <asm/irq.h>
+
+#include "ariadne.h"
+
+
+#ifdef ARIADNE_DEBUG
+int ariadne_debug = ARIADNE_DEBUG;
+#else
+int ariadne_debug = 1;
+#endif
+
+
+ /*
+ * Macros to Fix Endianness problems
+ */
+
+ /* Swap the Bytes in a WORD */
+#define swapw(x) (((x>>8)&0x00ff)|((x<<8)&0xff00))
+ /* Get the Low BYTE in a WORD */
+#define lowb(x) (x&0xff)
+ /* Get the Swapped High WORD in a LONG */
+#define swhighw(x) ((((x)>>8)&0xff00)|(((x)>>24)&0x00ff))
+ /* Get the Swapped Low WORD in a LONG */
+#define swloww(x) ((((x)<<8)&0xff00)|(((x)>>8)&0x00ff))
+
+
+ /*
+ * Transmit/Receive Ring Definitions
+ */
+
+#define TX_RING_SIZE 5
+#define RX_RING_SIZE 16
+
+#define PKT_BUF_SIZE 1520
+
+
+ /*
+ * Private Device Data
+ */
+
+struct ariadne_private {
+ volatile struct TDRE *tx_ring[TX_RING_SIZE];
+ volatile struct RDRE *rx_ring[RX_RING_SIZE];
+ volatile u_short *tx_buff[TX_RING_SIZE];
+ volatile u_short *rx_buff[RX_RING_SIZE];
+ int cur_tx, cur_rx; /* The next free ring entry */
+ int dirty_tx; /* The ring entries to be free()ed. */
+ struct net_device_stats stats;
+ char tx_full;
+};
+
+
+ /*
+ * Structure Created in the Ariadne's RAM Buffer
+ */
+
+struct lancedata {
+ struct TDRE tx_ring[TX_RING_SIZE];
+ struct RDRE rx_ring[RX_RING_SIZE];
+ u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE/sizeof(u_short)];
+ u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE/sizeof(u_short)];
+};
+
+static int ariadne_open(struct net_device *dev);
+static void ariadne_init_ring(struct net_device *dev);
+static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void ariadne_tx_timeout(struct net_device *dev);
+static int ariadne_rx(struct net_device *dev);
+static void ariadne_reset(struct net_device *dev);
+static irqreturn_t ariadne_interrupt(int irq, void *data, struct pt_regs *fp);
+static int ariadne_close(struct net_device *dev);
+static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
+#ifdef HAVE_MULTICAST
+static void set_multicast_list(struct net_device *dev);
+#endif
+
+
+static void memcpyw(volatile u_short *dest, u_short *src, int len)
+{
+ while (len >= 2) {
+ *(dest++) = *(src++);
+ len -= 2;
+ }
+ if (len == 1)
+ *dest = (*(u_char *)src)<<8;
+}
+
+
+static int __devinit ariadne_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static void __devexit ariadne_remove_one(struct zorro_dev *z);
+
+
+static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
+ { 0 }
+};
+
+static struct zorro_driver ariadne_driver = {
+ .name = "ariadne",
+ .id_table = ariadne_zorro_tbl,
+ .probe = ariadne_init_one,
+ .remove = __devexit_p(ariadne_remove_one),
+};
+
+static int __devinit ariadne_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board+ARIADNE_LANCE;
+ unsigned long mem_start = board+ARIADNE_RAM;
+ struct resource *r1, *r2;
+ struct net_device *dev;
+ struct ariadne_private *priv;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_resource(r1);
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct ariadne_private));
+ if (dev == NULL) {
+ release_resource(r1);
+ release_resource(r2);
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ priv = netdev_priv(dev);
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x60;
+ dev->dev_addr[2] = 0x30;
+ dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff;
+ dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff;
+ dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
+ dev->base_addr = ZTWO_VADDR(base_addr);
+ dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE;
+
+ dev->open = &ariadne_open;
+ dev->stop = &ariadne_close;
+ dev->hard_start_xmit = &ariadne_start_xmit;
+ dev->tx_timeout = &ariadne_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+ dev->get_stats = &ariadne_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_resource(r1);
+ release_resource(r2);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ printk(KERN_INFO "%s: Ariadne at 0x%08lx, Ethernet Address "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ return 0;
+}
+
+
+static int ariadne_open(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+ u_short in;
+ u_long version;
+ int i;
+
+ /* Reset the LANCE */
+ in = lance->Reset;
+
+ /* Stop the LANCE */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+
+ /* Check the LANCE version */
+ lance->RAP = CSR88; /* Chip ID */
+ version = swapw(lance->RDP);
+ lance->RAP = CSR89; /* Chip ID */
+ version |= swapw(lance->RDP)<<16;
+ if ((version & 0x00000fff) != 0x00000003) {
+ printk(KERN_WARNING "ariadne_open: Couldn't find AMD Ethernet Chip\n");
+ return -EAGAIN;
+ }
+ if ((version & 0x0ffff000) != 0x00003000) {
+ printk(KERN_WARNING "ariadne_open: Couldn't find Am79C960 (Wrong part "
+ "number = %ld)\n", (version & 0x0ffff000)>>12);
+ return -EAGAIN;
+ }
+#if 0
+ printk(KERN_DEBUG "ariadne_open: Am79C960 (PCnet-ISA) Revision %ld\n",
+ (version & 0xf0000000)>>28);
+#endif
+
+ ariadne_init_ring(dev);
+
+ /* Miscellaneous Stuff */
+ lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR4; /* Test and Features Control */
+ lance->RDP = DPOLL|APAD_XMT|MFCOM|RCVCCOM|TXSTRTM|JABM;
+
+ /* Set the Multicast Table */
+ lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
+ lance->RDP = 0x0000;
+
+ /* Set the Ethernet Hardware Address */
+ lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+
+ /* Set the Init Block Mode */
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000;
+
+ /* Set the Transmit Descriptor Ring Pointer */
+ lance->RAP = CSR30; /* Base Address of Transmit Ring */
+ lance->RDP = swloww(ARIADNE_RAM+offsetof(struct lancedata, tx_ring));
+ lance->RAP = CSR31; /* Base Address of transmit Ring */
+ lance->RDP = swhighw(ARIADNE_RAM+offsetof(struct lancedata, tx_ring));
+
+ /* Set the Receive Descriptor Ring Pointer */
+ lance->RAP = CSR24; /* Base Address of Receive Ring */
+ lance->RDP = swloww(ARIADNE_RAM+offsetof(struct lancedata, rx_ring));
+ lance->RAP = CSR25; /* Base Address of Receive Ring */
+ lance->RDP = swhighw(ARIADNE_RAM+offsetof(struct lancedata, rx_ring));
+
+ /* Set the Number of RX and TX Ring Entries */
+ lance->RAP = CSR76; /* Receive Ring Length */
+ lance->RDP = swapw(((u_short)-RX_RING_SIZE));
+ lance->RAP = CSR78; /* Transmit Ring Length */
+ lance->RDP = swapw(((u_short)-TX_RING_SIZE));
+
+ /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
+ lance->RAP = ISACSR2; /* Miscellaneous Configuration */
+ lance->IDP = ASEL;
+
+ /* LED Control */
+ lance->RAP = ISACSR5; /* LED1 Status */
+ lance->IDP = PSE|XMTE;
+ lance->RAP = ISACSR6; /* LED2 Status */
+ lance->IDP = PSE|COLE;
+ lance->RAP = ISACSR7; /* LED3 Status */
+ lance->IDP = PSE|RCVE;
+
+ netif_start_queue(dev);
+
+ i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (i) return i;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA|STRT;
+
+ return 0;
+}
+
+
+static void ariadne_init_ring(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
+ int i;
+
+ netif_stop_queue(dev);
+
+ priv->tx_full = 0;
+ priv->cur_rx = priv->cur_tx = 0;
+ priv->dirty_tx = 0;
+
+ /* Set up TX Ring */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ volatile struct TDRE *t = &lancedata->tx_ring[i];
+ t->TMD0 = swloww(ARIADNE_RAM+offsetof(struct lancedata, tx_buff[i]));
+ t->TMD1 = swhighw(ARIADNE_RAM+offsetof(struct lancedata, tx_buff[i])) |
+ TF_STP | TF_ENP;
+ t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ t->TMD3 = 0;
+ priv->tx_ring[i] = &lancedata->tx_ring[i];
+ priv->tx_buff[i] = lancedata->tx_buff[i];
+#if 0
+ printk(KERN_DEBUG "TX Entry %2d at %p, Buf at %p\n", i,
+ &lancedata->tx_ring[i], lancedata->tx_buff[i]);
+#endif
+ }
+
+ /* Set up RX Ring */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ volatile struct RDRE *r = &lancedata->rx_ring[i];
+ r->RMD0 = swloww(ARIADNE_RAM+offsetof(struct lancedata, rx_buff[i]));
+ r->RMD1 = swhighw(ARIADNE_RAM+offsetof(struct lancedata, rx_buff[i])) |
+ RF_OWN;
+ r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ r->RMD3 = 0x0000;
+ priv->rx_ring[i] = &lancedata->rx_ring[i];
+ priv->rx_buff[i] = lancedata->rx_buff[i];
+#if 0
+ printk(KERN_DEBUG "RX Entry %2d at %p, Buf at %p\n", i,
+ &lancedata->rx_ring[i], lancedata->rx_buff[i]);
+#endif
+ }
+}
+
+
+static int ariadne_close(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ lance->RAP = CSR112; /* Missed Frame Count */
+ priv->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (ariadne_debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, lance->RDP);
+ printk(KERN_DEBUG "%s: %lu packets missed\n", dev->name,
+ priv->stats.rx_missed_errors);
+ }
+
+ /* We stop the LANCE here -- it occasionally polls memory if we don't. */
+ lance->RDP = STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+
+ return 0;
+}
+
+
+static inline void ariadne_reset(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+ ariadne_init_ring(dev);
+ lance->RDP = INEA|STRT;
+ netif_start_queue(dev);
+}
+
+
+static irqreturn_t ariadne_interrupt(int irq, void *data, struct pt_regs *fp)
+{
+ struct net_device *dev = (struct net_device *)data;
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+ struct ariadne_private *priv;
+ int csr0, boguscnt;
+ int handled = 0;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
+ return IRQ_NONE;
+ }
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
+ return IRQ_NONE; /* generated by the board. */
+
+ priv = netdev_priv(dev);
+
+ boguscnt = 10;
+ while ((csr0 = lance->RDP) & (ERR|RINT|TINT) && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lance->RDP = csr0 & ~(INEA|TDMD|STOP|STRT|INIT);
+
+#if 0
+ if (ariadne_debug > 5) {
+ printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.",
+ dev->name, csr0, lance->RDP);
+ printk("[");
+ if (csr0 & INTR)
+ printk(" INTR");
+ if (csr0 & INEA)
+ printk(" INEA");
+ if (csr0 & RXON)
+ printk(" RXON");
+ if (csr0 & TXON)
+ printk(" TXON");
+ if (csr0 & TDMD)
+ printk(" TDMD");
+ if (csr0 & STOP)
+ printk(" STOP");
+ if (csr0 & STRT)
+ printk(" STRT");
+ if (csr0 & INIT)
+ printk(" INIT");
+ if (csr0 & ERR)
+ printk(" ERR");
+ if (csr0 & BABL)
+ printk(" BABL");
+ if (csr0 & CERR)
+ printk(" CERR");
+ if (csr0 & MISS)
+ printk(" MISS");
+ if (csr0 & MERR)
+ printk(" MERR");
+ if (csr0 & RINT)
+ printk(" RINT");
+ if (csr0 & TINT)
+ printk(" TINT");
+ if (csr0 & IDON)
+ printk(" IDON");
+ printk(" ]\n");
+ }
+#endif
+
+ if (csr0 & RINT) { /* Rx interrupt */
+ handled = 1;
+ ariadne_rx(dev);
+ }
+
+ if (csr0 & TINT) { /* Tx-done interrupt */
+ int dirty_tx = priv->dirty_tx;
+
+ handled = 1;
+ while (dirty_tx < priv->cur_tx) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = lowb(priv->tx_ring[entry]->TMD1);
+
+ if (status & TF_OWN)
+ break; /* It still hasn't been Txed */
+
+ priv->tx_ring[entry]->TMD1 &= 0xff00;
+
+ if (status & TF_ERR) {
+ /* There was an major error, log it. */
+ int err_status = priv->tx_ring[entry]->TMD3;
+ priv->stats.tx_errors++;
+ if (err_status & EF_RTRY)
+ priv->stats.tx_aborted_errors++;
+ if (err_status & EF_LCAR)
+ priv->stats.tx_carrier_errors++;
+ if (err_status & EF_LCOL)
+ priv->stats.tx_window_errors++;
+ if (err_status & EF_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ priv->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk(KERN_ERR "%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ lance->RDP = STRT;
+ }
+ } else {
+ if (status & (TF_MORE|TF_ONE))
+ priv->stats.collisions++;
+ priv->stats.tx_packets++;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d, "
+ "full=%d.\n", dirty_tx, priv->cur_tx, priv->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (priv->tx_full && netif_queue_stopped(dev) &&
+ dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full. */
+ priv->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ priv->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & BABL) {
+ handled = 1;
+ priv->stats.tx_errors++; /* Tx babble. */
+ }
+ if (csr0 & MISS) {
+ handled = 1;
+ priv->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & MERR) {
+ handled = 1;
+ printk(KERN_ERR "%s: Bus master arbitration failure, status "
+ "%4.4x.\n", dev->name, csr0);
+ /* Restart the chip. */
+ lance->RDP = STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA|BABL|CERR|MISS|MERR|IDON;
+
+#if 0
+ if (ariadne_debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr%d=%#4.4x.\n", dev->name,
+ lance->RAP, lance->RDP);
+#endif
+ return IRQ_RETVAL(handled);
+}
+
+
+static void ariadne_tx_timeout(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+
+ printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, lance->RDP);
+ ariadne_reset(dev);
+ netif_wake_queue(dev);
+}
+
+
+static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+ int entry;
+ unsigned long flags;
+ int len = skb->len;
+
+#if 0
+ if (ariadne_debug > 3) {
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ printk(KERN_DEBUG "%s: ariadne_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, lance->RDP);
+ lance->RDP = 0x0000;
+ }
+#endif
+
+ /* FIXME: is the 79C960 new enough to do its own padding right ? */
+ if (skb->len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ len = ETH_ZLEN;
+ }
+
+ /* Fill in a Tx ring entry */
+
+#if 0
+ printk(KERN_DEBUG "TX pkt type 0x%04x from ", ((u_short *)skb->data)[6]);
+ {
+ int i;
+ u_char *ptr = &((u_char *)skb->data)[6];
+ for (i = 0; i < 6; i++)
+ printk("%02x", ptr[i]);
+ }
+ printk(" to ");
+ {
+ int i;
+ u_char *ptr = (u_char *)skb->data;
+ for (i = 0; i < 6; i++)
+ printk("%02x", ptr[i]);
+ }
+ printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len);
+#endif
+
+ local_irq_save(flags);
+
+ entry = priv->cur_tx % TX_RING_SIZE;
+
+ /* Caution: the write order is important here, set the base address with
+ the "ownership" bits last. */
+
+ priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
+ priv->tx_ring[entry]->TMD3 = 0x0000;
+ memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
+
+#if 0
+ {
+ int i, len;
+
+ len = skb->len > 64 ? 64 : skb->len;
+ len >>= 1;
+ for (i = 0; i < len; i += 8) {
+ int j;
+ printk(KERN_DEBUG "%04x:", i);
+ for (j = 0; (j < 8) && ((i+j) < len); j++) {
+ if (!(j & 1))
+ printk(" ");
+ printk("%04x", priv->tx_buff[entry][i+j]);
+ }
+ printk("\n");
+ }
+ }
+#endif
+
+ priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1&0xff00)|TF_OWN|TF_STP|TF_ENP;
+
+ dev_kfree_skb(skb);
+
+ priv->cur_tx++;
+ if ((priv->cur_tx >= TX_RING_SIZE) && (priv->dirty_tx >= TX_RING_SIZE)) {
+
+#if 0
+ printk(KERN_DEBUG "*** Subtracting TX_RING_SIZE from cur_tx (%d) and "
+ "dirty_tx (%d)\n", priv->cur_tx, priv->dirty_tx);
+#endif
+
+ priv->cur_tx -= TX_RING_SIZE;
+ priv->dirty_tx -= TX_RING_SIZE;
+ }
+
+ /* Trigger an immediate send poll. */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA|TDMD;
+
+ dev->trans_start = jiffies;
+
+ if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) {
+ netif_stop_queue(dev);
+ priv->tx_full = 1;
+ }
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+static int ariadne_rx(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ int entry = priv->cur_rx % RX_RING_SIZE;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
+ int status = lowb(priv->rx_ring[entry]->RMD1);
+
+ if (status != (RF_STP|RF_ENP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RF_ENP)
+ /* Only count a general error at the end of a packet.*/
+ priv->stats.rx_errors++;
+ if (status & RF_FRAM)
+ priv->stats.rx_frame_errors++;
+ if (status & RF_OFLO)
+ priv->stats.rx_over_errors++;
+ if (status & RF_CRC)
+ priv->stats.rx_crc_errors++;
+ if (status & RF_BUFF)
+ priv->stats.rx_fifo_errors++;
+ priv->rx_ring[entry]->RMD1 &= 0xff00|RF_STP|RF_ENP;
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+ short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk(KERN_WARNING "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (lowb(priv->rx_ring[(entry+i) % RX_RING_SIZE]->RMD1) & RF_OWN)
+ break;
+
+ if (i > RX_RING_SIZE-2) {
+ priv->stats.rx_dropped++;
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ priv->cur_rx++;
+ }
+ break;
+ }
+
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+#if 0
+ printk(KERN_DEBUG "RX pkt type 0x%04x from ",
+ ((u_short *)skb->data)[6]);
+ {
+ int i;
+ u_char *ptr = &((u_char *)skb->data)[6];
+ for (i = 0; i < 6; i++)
+ printk("%02x", ptr[i]);
+ }
+ printk(" to ");
+ {
+ int i;
+ u_char *ptr = (u_char *)skb->data;
+ for (i = 0; i < 6; i++)
+ printk("%02x", ptr[i]);
+ }
+ printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len);
+#endif
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += pkt_len;
+ }
+
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ entry = (++priv->cur_rx) % RX_RING_SIZE;
+ }
+
+ priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ saved_addr = lance->RAP;
+ lance->RAP = CSR112; /* Missed Frame Count */
+ priv->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = saved_addr;
+ local_irq_restore(flags);
+
+ return &priv->stats;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+
+ if (!netif_running(dev))
+ return;
+
+ netif_stop_queue(dev);
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP; /* Temporarily stop the lance. */
+ ariadne_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = PROM; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = dev->mc_count;
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ lance->RAP = CSR8+(i<<8); /* Logical Address Filter */
+ lance->RDP = swapw(multicast_table[i]);
+ }
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000; /* Unset promiscuous mode */
+ }
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA|STRT|IDON; /* Resume normal operation. */
+
+ netif_wake_queue(dev);
+}
+
+
+static void __devexit ariadne_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static int __init ariadne_init_module(void)
+{
+ return zorro_module_init(&ariadne_driver);
+}
+
+static void __exit ariadne_cleanup_module(void)
+{
+ zorro_unregister_driver(&ariadne_driver);
+}
+
+module_init(ariadne_init_module);
+module_exit(ariadne_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ariadne.h b/drivers/net/ariadne.h
new file mode 100644
index 000000000000..f7913d5a39f1
--- /dev/null
+++ b/drivers/net/ariadne.h
@@ -0,0 +1,415 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver
+ * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be)
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+
+ /*
+ * Am79C960 PCnet-ISA
+ */
+
+struct Am79C960 {
+ volatile u_short AddressPROM[8];
+ /* IEEE Address PROM (Unused in the Ariadne) */
+ volatile u_short RDP; /* Register Data Port */
+ volatile u_short RAP; /* Register Address Port */
+ volatile u_short Reset; /* Reset Chip on Read Access */
+ volatile u_short IDP; /* ISACSR Data Port */
+};
+
+
+ /*
+ * Am79C960 Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ *
+ * Only registers marked with a `-' are intended for network software
+ * access
+ */
+
+#define CSR0 0x0000 /* - PCnet-ISA Controller Status */
+#define CSR1 0x0100 /* - IADR[15:0] */
+#define CSR2 0x0200 /* - IADR[23:16] */
+#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */
+#define CSR4 0x0400 /* - Test and Features Control */
+#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */
+#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */
+#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */
+#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */
+#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */
+#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */
+#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */
+#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */
+#define CSR15 0x0f00 /* - Mode Register */
+#define CSR16 0x1000 /* Initialization Block Address Lower */
+#define CSR17 0x1100 /* Initialization Block Address Upper */
+#define CSR18 0x1200 /* Current Receive Buffer Address */
+#define CSR19 0x1300 /* Current Receive Buffer Address */
+#define CSR20 0x1400 /* Current Transmit Buffer Address */
+#define CSR21 0x1500 /* Current Transmit Buffer Address */
+#define CSR22 0x1600 /* Next Receive Buffer Address */
+#define CSR23 0x1700 /* Next Receive Buffer Address */
+#define CSR24 0x1800 /* - Base Address of Receive Ring */
+#define CSR25 0x1900 /* - Base Address of Receive Ring */
+#define CSR26 0x1a00 /* Next Receive Descriptor Address */
+#define CSR27 0x1b00 /* Next Receive Descriptor Address */
+#define CSR28 0x1c00 /* Current Receive Descriptor Address */
+#define CSR29 0x1d00 /* Current Receive Descriptor Address */
+#define CSR30 0x1e00 /* - Base Address of Transmit Ring */
+#define CSR31 0x1f00 /* - Base Address of transmit Ring */
+#define CSR32 0x2000 /* Next Transmit Descriptor Address */
+#define CSR33 0x2100 /* Next Transmit Descriptor Address */
+#define CSR34 0x2200 /* Current Transmit Descriptor Address */
+#define CSR35 0x2300 /* Current Transmit Descriptor Address */
+#define CSR36 0x2400 /* Next Next Receive Descriptor Address */
+#define CSR37 0x2500 /* Next Next Receive Descriptor Address */
+#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */
+#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */
+#define CSR40 0x2800 /* Current Receive Status and Byte Count */
+#define CSR41 0x2900 /* Current Receive Status and Byte Count */
+#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */
+#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */
+#define CSR44 0x2c00 /* Next Receive Status and Byte Count */
+#define CSR45 0x2d00 /* Next Receive Status and Byte Count */
+#define CSR46 0x2e00 /* Poll Time Counter */
+#define CSR47 0x2f00 /* Polling Interval */
+#define CSR48 0x3000 /* Temporary Storage */
+#define CSR49 0x3100 /* Temporary Storage */
+#define CSR50 0x3200 /* Temporary Storage */
+#define CSR51 0x3300 /* Temporary Storage */
+#define CSR52 0x3400 /* Temporary Storage */
+#define CSR53 0x3500 /* Temporary Storage */
+#define CSR54 0x3600 /* Temporary Storage */
+#define CSR55 0x3700 /* Temporary Storage */
+#define CSR56 0x3800 /* Temporary Storage */
+#define CSR57 0x3900 /* Temporary Storage */
+#define CSR58 0x3a00 /* Temporary Storage */
+#define CSR59 0x3b00 /* Temporary Storage */
+#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */
+#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */
+#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */
+#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */
+#define CSR64 0x4000 /* Next Transmit Buffer Address */
+#define CSR65 0x4100 /* Next Transmit Buffer Address */
+#define CSR66 0x4200 /* Next Transmit Status and Byte Count */
+#define CSR67 0x4300 /* Next Transmit Status and Byte Count */
+#define CSR68 0x4400 /* Transmit Status Temporary Storage */
+#define CSR69 0x4500 /* Transmit Status Temporary Storage */
+#define CSR70 0x4600 /* Temporary Storage */
+#define CSR71 0x4700 /* Temporary Storage */
+#define CSR72 0x4800 /* Receive Ring Counter */
+#define CSR74 0x4a00 /* Transmit Ring Counter */
+#define CSR76 0x4c00 /* - Receive Ring Length */
+#define CSR78 0x4e00 /* - Transmit Ring Length */
+#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */
+#define CSR82 0x5200 /* - Bus Activity Timer */
+#define CSR84 0x5400 /* DMA Address */
+#define CSR85 0x5500 /* DMA Address */
+#define CSR86 0x5600 /* Buffer Byte Counter */
+#define CSR88 0x5800 /* - Chip ID */
+#define CSR89 0x5900 /* - Chip ID */
+#define CSR92 0x5c00 /* Ring Length Conversion */
+#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */
+#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */
+#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */
+#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */
+#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */
+#define CSR104 0x6800 /* SWAP */
+#define CSR105 0x6900 /* SWAP */
+#define CSR108 0x6c00 /* Buffer Management Scratch */
+#define CSR109 0x6d00 /* Buffer Management Scratch */
+#define CSR112 0x7000 /* - Missed Frame Count */
+#define CSR114 0x7200 /* - Receive Collision Count */
+#define CSR124 0x7c00 /* - Buffer Management Unit Test */
+
+
+ /*
+ * Am79C960 ISA Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ISACSR0 0x0000 /* Master Mode Read Active */
+#define ISACSR1 0x0100 /* Master Mode Write Active */
+#define ISACSR2 0x0200 /* Miscellaneous Configuration */
+#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */
+#define ISACSR5 0x0500 /* LED1 Status */
+#define ISACSR6 0x0600 /* LED2 Status */
+#define ISACSR7 0x0700 /* LED3 Status */
+
+
+ /*
+ * Bit definitions for CSR0 (PCnet-ISA Controller Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ERR 0x0080 /* Error */
+#define BABL 0x0040 /* Babble: Transmitted too many bits */
+#define CERR 0x0020 /* No Heartbeat (10BASE-T) */
+#define MISS 0x0010 /* Missed Frame */
+#define MERR 0x0008 /* Memory Error */
+#define RINT 0x0004 /* Receive Interrupt */
+#define TINT 0x0002 /* Transmit Interrupt */
+#define IDON 0x0001 /* Initialization Done */
+#define INTR 0x8000 /* Interrupt Flag */
+#define INEA 0x4000 /* Interrupt Enable */
+#define RXON 0x2000 /* Receive On */
+#define TXON 0x1000 /* Transmit On */
+#define TDMD 0x0800 /* Transmit Demand */
+#define STOP 0x0400 /* Stop */
+#define STRT 0x0200 /* Start */
+#define INIT 0x0100 /* Initialize */
+
+
+ /*
+ * Bit definitions for CSR3 (Interrupt Masks and Deferral Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define BABLM 0x0040 /* Babble Mask */
+#define MISSM 0x0010 /* Missed Frame Mask */
+#define MERRM 0x0008 /* Memory Error Mask */
+#define RINTM 0x0004 /* Receive Interrupt Mask */
+#define TINTM 0x0002 /* Transmit Interrupt Mask */
+#define IDONM 0x0001 /* Initialization Done Mask */
+#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */
+#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */
+
+
+ /*
+ * Bit definitions for CSR4 (Test and Features Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ENTST 0x0080 /* Enable Test Mode */
+#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */
+#define TIMER 0x0020 /* Timer Enable Register */
+#define DPOLL 0x0010 /* Disable Transmit Polling */
+#define APAD_XMT 0x0008 /* Auto Pad Transmit */
+#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */
+#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */
+#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */
+#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */
+#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */
+#define TXSTRT 0x0800 /* Transmit Start Status */
+#define TXSTRTM 0x0400 /* Transmit Start Mask */
+#define JAB 0x0200 /* Jabber Error */
+#define JABM 0x0100 /* Jabber Error Mask */
+
+
+ /*
+ * Bit definitions for CSR15 (Mode Register)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define PROM 0x0080 /* Promiscuous Mode */
+#define DRCVBC 0x0040 /* Disable Receive Broadcast */
+#define DRCVPA 0x0020 /* Disable Receive Physical Address */
+#define DLNKTST 0x0010 /* Disable Link Status */
+#define DAPC 0x0008 /* Disable Automatic Polarity Correction */
+#define MENDECL 0x0004 /* MENDEC Loopback Mode */
+#define LRTTSEL 0x0002 /* Low Receive Treshold/Transmit Mode Select */
+#define PORTSEL1 0x0001 /* Port Select Bits */
+#define PORTSEL2 0x8000 /* Port Select Bits */
+#define INTL 0x4000 /* Internal Loopback */
+#define DRTY 0x2000 /* Disable Retry */
+#define FCOLL 0x1000 /* Force Collision */
+#define DXMTFCS 0x0800 /* Disable Transmit CRC */
+#define LOOP 0x0400 /* Loopback Enable */
+#define DTX 0x0200 /* Disable Transmitter */
+#define DRX 0x0100 /* Disable Receiver */
+
+
+ /*
+ * Bit definitions for ISACSR2 (Miscellaneous Configuration)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ASEL 0x0200 /* Media Interface Port Auto Select */
+
+
+ /*
+ * Bit definitions for ISACSR5-7 (LED1-3 Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define LEDOUT 0x0080 /* Current LED Status */
+#define PSE 0x8000 /* Pulse Stretcher Enable */
+#define XMTE 0x1000 /* Enable Transmit Status Signal */
+#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */
+#define RCVE 0x0400 /* Enable Receive Status Signal */
+#define JABE 0x0200 /* Enable Jabber Signal */
+#define COLE 0x0100 /* Enable Collision Signal */
+
+
+ /*
+ * Receive Descriptor Ring Entry
+ */
+
+struct RDRE {
+ volatile u_short RMD0; /* LADR[15:0] */
+ volatile u_short RMD1; /* HADR[23:16] | Receive Flags */
+ volatile u_short RMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short RMD3; /* Message Byte Count */
+};
+
+
+ /*
+ * Transmit Descriptor Ring Entry
+ */
+
+struct TDRE {
+ volatile u_short TMD0; /* LADR[15:0] */
+ volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */
+ volatile u_short TMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short TMD3; /* Error Flags */
+};
+
+
+ /*
+ * Receive Flags
+ */
+
+#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define RF_ERR 0x0040 /* Error */
+#define RF_FRAM 0x0020 /* Framing Error */
+#define RF_OFLO 0x0010 /* Overflow Error */
+#define RF_CRC 0x0008 /* CRC Error */
+#define RF_BUFF 0x0004 /* Buffer Error */
+#define RF_STP 0x0002 /* Start of Packet */
+#define RF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Transmit Flags
+ */
+
+#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define TF_ERR 0x0040 /* Error */
+#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */
+#define TF_MORE 0x0010 /* More than one retry needed */
+#define TF_ONE 0x0008 /* One retry needed */
+#define TF_DEF 0x0004 /* Deferred */
+#define TF_STP 0x0002 /* Start of Packet */
+#define TF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Error Flags
+ */
+
+#define EF_BUFF 0x0080 /* Buffer Error */
+#define EF_UFLO 0x0040 /* Underflow Error */
+#define EF_LCOL 0x0010 /* Late Collision */
+#define EF_LCAR 0x0008 /* Loss of Carrier */
+#define EF_RTRY 0x0004 /* Retry Error */
+#define EF_TDR 0xff03 /* Time Domain Reflectometry */
+
+
+
+ /*
+ * MC68230 Parallel Interface/Timer
+ */
+
+struct MC68230 {
+ volatile u_char PGCR; /* Port General Control Register */
+ u_char Pad1[1];
+ volatile u_char PSRR; /* Port Service Request Register */
+ u_char Pad2[1];
+ volatile u_char PADDR; /* Port A Data Direction Register */
+ u_char Pad3[1];
+ volatile u_char PBDDR; /* Port B Data Direction Register */
+ u_char Pad4[1];
+ volatile u_char PCDDR; /* Port C Data Direction Register */
+ u_char Pad5[1];
+ volatile u_char PIVR; /* Port Interrupt Vector Register */
+ u_char Pad6[1];
+ volatile u_char PACR; /* Port A Control Register */
+ u_char Pad7[1];
+ volatile u_char PBCR; /* Port B Control Register */
+ u_char Pad8[1];
+ volatile u_char PADR; /* Port A Data Register */
+ u_char Pad9[1];
+ volatile u_char PBDR; /* Port B Data Register */
+ u_char Pad10[1];
+ volatile u_char PAAR; /* Port A Alternate Register */
+ u_char Pad11[1];
+ volatile u_char PBAR; /* Port B Alternate Register */
+ u_char Pad12[1];
+ volatile u_char PCDR; /* Port C Data Register */
+ u_char Pad13[1];
+ volatile u_char PSR; /* Port Status Register */
+ u_char Pad14[5];
+ volatile u_char TCR; /* Timer Control Register */
+ u_char Pad15[1];
+ volatile u_char TIVR; /* Timer Interrupt Vector Register */
+ u_char Pad16[3];
+ volatile u_char CPRH; /* Counter Preload Register (High) */
+ u_char Pad17[1];
+ volatile u_char CPRM; /* Counter Preload Register (Mid) */
+ u_char Pad18[1];
+ volatile u_char CPRL; /* Counter Preload Register (Low) */
+ u_char Pad19[3];
+ volatile u_char CNTRH; /* Count Register (High) */
+ u_char Pad20[1];
+ volatile u_char CNTRM; /* Count Register (Mid) */
+ u_char Pad21[1];
+ volatile u_char CNTRL; /* Count Register (Low) */
+ u_char Pad22[1];
+ volatile u_char TSR; /* Timer Status Register */
+ u_char Pad23[11];
+};
+
+
+ /*
+ * Ariadne Expansion Board Structure
+ */
+
+#define ARIADNE_LANCE 0x360
+
+#define ARIADNE_PIT 0x1000
+
+#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */
+#define ARIADNE_BOOTPROM_SIZE 0x4000
+
+#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */
+#define ARIADNE_RAM_SIZE 0x8000
+
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
new file mode 100644
index 000000000000..470364deded0
--- /dev/null
+++ b/drivers/net/arm/Kconfig
@@ -0,0 +1,46 @@
+#
+# Acorn Network device configuration
+# These are for Acorn's Expansion card network interfaces
+#
+config ARM_AM79C961A
+ bool "ARM EBSA110 AM79C961A support"
+ depends on NET_ETHERNET && ARM && ARCH_EBSA110
+ select CRC32
+ help
+ If you wish to compile a kernel for the EBSA-110, then you should
+ always answer Y to this.
+
+config ARM_ETHER1
+ tristate "Acorn Ether1 support"
+ depends on NET_ETHERNET && ARM && ARCH_ACORN
+ help
+ If you have an Acorn system with one of these (AKA25) network cards,
+ you should say Y to this option if you wish to use it with Linux.
+
+config ARM_ETHER3
+ tristate "Acorn/ANT Ether3 support"
+ depends on NET_ETHERNET && ARM && ARCH_ACORN
+ help
+ If you have an Acorn system with one of these network cards, you
+ should say Y to this option if you wish to use it with Linux.
+
+config ARM_ETHERH
+ tristate "I-cubed EtherH/ANT EtherM support"
+ depends on NET_ETHERNET && ARM && ARCH_ACORN
+ select CRC32
+ help
+ If you have an Acorn system with one of these network cards, you
+ should say Y to this option if you wish to use it with Linux.
+
+config ARM_ETHER00
+ tristate "Altera Ether00 support"
+ depends on NET_ETHERNET && ARM && ARCH_CAMELOT
+ help
+ This is the driver for Altera's ether00 ethernet mac IP core. Say
+ Y here if you want to build support for this into the kernel. It
+ is also available as a module (say M here) that can be inserted/
+ removed from the kernel at the same time as the PLD is configured.
+ If this driver is running on an epxa10 development board then it
+ will generate a suitable hw address based on the board serial
+ number (MTD support is required for this). Otherwise you will
+ need to set a suitable hw address using ifconfig.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
new file mode 100644
index 000000000000..b0d706834d89
--- /dev/null
+++ b/drivers/net/arm/Makefile
@@ -0,0 +1,10 @@
+# File: drivers/net/arm/Makefile
+#
+# Makefile for the ARM network device drivers
+#
+
+obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
+obj-$(CONFIG_ARM_ETHER00) += ether00.o
+obj-$(CONFIG_ARM_ETHERH) += etherh.o
+obj-$(CONFIG_ARM_ETHER3) += ether3.o
+obj-$(CONFIG_ARM_ETHER1) += ether1.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
new file mode 100644
index 000000000000..9b659e3c8d67
--- /dev/null
+++ b/drivers/net/arm/am79c961a.c
@@ -0,0 +1,750 @@
+/*
+ * linux/drivers/net/am79c961.c
+ *
+ * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from various things including skeleton.c
+ *
+ * This is a special driver for the am79c961A Lance chip used in the
+ * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
+ * note that this can not be built as a module (it doesn't make sense).
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define TX_BUFFERS 15
+#define RX_BUFFERS 25
+
+#include "am79c961a.h"
+
+static irqreturn_t
+am79c961_interrupt (int irq, void *dev_id, struct pt_regs *regs);
+
+static unsigned int net_debug = NET_DEBUG;
+
+static const char version[] =
+ "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
+
+/* --------------------------------------------------------------------------- */
+
+#ifdef __arm__
+static void write_rreg(u_long base, u_int reg, u_int val)
+{
+ __asm__(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #-4] @ NET_RDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_rreg(u_long base_addr, u_int reg)
+{
+ unsigned short v;
+ __asm__(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "ldr%?h %0, [%2, #-4] @ NET_RDP"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+static inline void write_ireg(u_long base, u_int reg, u_int val)
+{
+ __asm__(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #8] @ NET_IDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_ireg(u_long base_addr, u_int reg)
+{
+ u_short v;
+ __asm__(
+ "str%?h %1, [%2] @ NAT_RAP\n\t"
+ "str%?h %0, [%2, #8] @ NET_IDP\n\t"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
+#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
+
+static inline void
+am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ __asm__ __volatile__("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+ while (length > 8) {
+ unsigned int tmp, tmp2;
+ __asm__ __volatile__(
+ "ldm%?ia %1!, {%2, %3}\n\t"
+ "str%?h %2, [%0], #4\n\t"
+ "mov%? %2, %2, lsr #16\n\t"
+ "str%?h %2, [%0], #4\n\t"
+ "str%?h %3, [%0], #4\n\t"
+ "mov%? %3, %3, lsr #16\n\t"
+ "str%?h %3, [%0], #4"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
+ : "0" (offset), "1" (buf));
+ length -= 8;
+ }
+ while (length > 0) {
+ __asm__ __volatile__("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+}
+
+static inline void
+am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ unsigned int tmp;
+ __asm__ __volatile__(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
+ length -= 2;
+ }
+ while (length > 8) {
+ unsigned int tmp, tmp2, tmp3;
+ __asm__ __volatile__(
+ "ldr%?h %2, [%0], #4\n\t"
+ "ldr%?h %3, [%0], #4\n\t"
+ "orr%? %2, %2, %3, lsl #16\n\t"
+ "ldr%?h %3, [%0], #4\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
+ "orr%? %3, %3, %4, lsl #16\n\t"
+ "stm%?ia %1!, {%2, %3}"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
+ : "0" (offset), "1" (buf));
+ length -= 8;
+ }
+ while (length > 0) {
+ unsigned int tmp;
+ __asm__ __volatile__(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
+ length -= 2;
+ }
+}
+#else
+#error Not compatible
+#endif
+
+static int
+am79c961_ramtest(struct net_device *dev, unsigned int val)
+{
+ unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
+ int i, error = 0, errorcount = 0;
+
+ if (!buffer)
+ return 0;
+ memset (buffer, val, 65536);
+ am_writebuffer(dev, 0, buffer, 65536);
+ memset (buffer, val ^ 255, 65536);
+ am_readbuffer(dev, 0, buffer, 65536);
+ for (i = 0; i < 65536; i++) {
+ if (buffer[i] != val && !error) {
+ printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
+ error = 1;
+ errorcount ++;
+ } else if (error && buffer[i] == val) {
+ printk ("%05X\n", i);
+ error = 0;
+ }
+ }
+ if (error)
+ printk ("10000\n");
+ kfree (buffer);
+ return errorcount;
+}
+
+static void
+am79c961_init_for_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ unsigned char *p;
+ u_int hdr_addr, first_free_addr;
+ int i;
+
+ /*
+ * Stop the chip.
+ */
+ spin_lock_irqsave(priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
+ spin_unlock_irqrestore(priv->chip_lock, flags);
+
+ write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
+ write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
+ write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
+ write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
+
+ for (i = LADRL; i <= LADRH; i++)
+ write_rreg (dev->base_addr, i, 0);
+
+ for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
+ write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
+
+ i = MODE_PORT_10BT;
+ if (dev->flags & IFF_PROMISC)
+ i |= MODE_PROMISC;
+
+ write_rreg (dev->base_addr, MODE, i);
+ write_rreg (dev->base_addr, POLLINT, 0);
+ write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
+ write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
+
+ first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
+ hdr_addr = 0;
+
+ priv->rxhead = 0;
+ priv->rxtail = 0;
+ priv->rxhdr = hdr_addr;
+
+ for (i = 0; i < RX_BUFFERS; i++) {
+ priv->rxbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, RMD_OWN);
+ am_writeword (dev, hdr_addr + 4, (-1600));
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+ priv->txhead = 0;
+ priv->txtail = 0;
+ priv->txhdr = hdr_addr;
+ for (i = 0; i < TX_BUFFERS; i++) {
+ priv->txbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
+ am_writeword (dev, hdr_addr + 4, 0xf000);
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+
+ write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, BASETXL, priv->txhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
+ write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
+ write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
+}
+
+static void am79c961_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int lnkstat, carrier;
+
+ lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+ carrier = netif_carrier_ok(dev);
+
+ if (lnkstat && !carrier)
+ netif_carrier_on(dev);
+ else if (!lnkstat && carrier)
+ netif_carrier_off(dev);
+
+ mod_timer(&priv->timer, jiffies + 5*HZ);
+}
+
+/*
+ * Open/initialize the board.
+ */
+static int
+am79c961_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int ret;
+
+ memset (&priv->stats, 0, sizeof (priv->stats));
+
+ ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ am79c961_init_for_open(dev);
+
+ netif_carrier_off(dev);
+
+ priv->timer.expires = jiffies;
+ add_timer(&priv->timer);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to am79c961_open().
+ */
+static int
+am79c961_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&priv->timer);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_lock_irqsave(priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irqrestore(priv->chip_lock, flags);
+
+ free_irq (dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Get the current statistics.
+ */
+static struct net_device_stats *am79c961_getstats (struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ return &priv->stats;
+}
+
+static void am79c961_mc_hash(struct dev_mc_list *dmi, unsigned short *hash)
+{
+ if (dmi->dmi_addrlen == ETH_ALEN && dmi->dmi_addr[0] & 0x01) {
+ int idx, bit;
+ u32 crc;
+
+ crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
+
+ hash[idx] |= 1 << bit;
+ }
+}
+
+/*
+ * Set or clear promiscuous/multicast mode filter for this adapter.
+ */
+static void am79c961_setmulticastlist (struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ unsigned short multi_hash[4], mode;
+ int i, stopped;
+
+ mode = MODE_PORT_10BT;
+
+ if (dev->flags & IFF_PROMISC) {
+ mode |= MODE_PROMISC;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ memset(multi_hash, 0xff, sizeof(multi_hash));
+ } else {
+ struct dev_mc_list *dmi;
+
+ memset(multi_hash, 0x00, sizeof(multi_hash));
+
+ for (dmi = dev->mc_list; dmi; dmi = dmi->next)
+ am79c961_mc_hash(dmi, multi_hash);
+ }
+
+ spin_lock_irqsave(priv->chip_lock, flags);
+
+ stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
+
+ if (!stopped) {
+ /*
+ * Put the chip into suspend mode
+ */
+ write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
+
+ /*
+ * Spin waiting for chip to report suspend mode
+ */
+ while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
+ spin_unlock_irqrestore(priv->chip_lock, flags);
+ nop();
+ spin_lock_irqsave(priv->chip_lock, flags);
+ }
+ }
+
+ /*
+ * Update the multicast hash table
+ */
+ for (i = 0; i < sizeof(multi_hash) / sizeof(multi_hash[0]); i++)
+ write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
+
+ /*
+ * Write the mode register
+ */
+ write_rreg(dev->base_addr, MODE, mode);
+
+ if (!stopped) {
+ /*
+ * Put the chip back into running mode
+ */
+ write_rreg(dev->base_addr, CTRL1, 0);
+ }
+
+ spin_unlock_irqrestore(priv->chip_lock, flags);
+}
+
+static void am79c961_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
+ dev->name);
+
+ /*
+ * ought to do some setup of the tx side here
+ */
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Transmit a packet
+ */
+static int
+am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int hdraddr, bufaddr;
+ unsigned int head;
+ unsigned long flags;
+
+ head = priv->txhead;
+ hdraddr = priv->txhdr + (head << 3);
+ bufaddr = priv->txbuffer[head];
+ head += 1;
+ if (head >= TX_BUFFERS)
+ head = 0;
+
+ am_writebuffer (dev, bufaddr, skb->data, skb->len);
+ am_writeword (dev, hdraddr + 4, -skb->len);
+ am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
+ priv->txhead = head;
+
+ spin_lock_irqsave(priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(priv->chip_lock, flags);
+
+ /*
+ * If the next packet is owned by the ethernet device,
+ * then the tx ring is full and we can't add another
+ * packet.
+ */
+ if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
+ netif_stop_queue(dev);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * If we have a good packet(s), get it/them out of the buffers.
+ */
+static void
+am79c961_rx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ struct sk_buff *skb;
+ u_int hdraddr;
+ u_int pktaddr;
+ u_int status;
+ int len;
+
+ hdraddr = priv->rxhdr + (priv->rxtail << 3);
+ pktaddr = priv->rxbuffer[priv->rxtail];
+
+ status = am_readword (dev, hdraddr + 2);
+ if (status & RMD_OWN) /* do we own it? */
+ break;
+
+ priv->rxtail ++;
+ if (priv->rxtail >= RX_BUFFERS)
+ priv->rxtail = 0;
+
+ if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ priv->stats.rx_errors ++;
+ if (status & RMD_ERR) {
+ if (status & RMD_FRAM)
+ priv->stats.rx_frame_errors ++;
+ if (status & RMD_CRC)
+ priv->stats.rx_crc_errors ++;
+ } else if (status & RMD_STP)
+ priv->stats.rx_length_errors ++;
+ continue;
+ }
+
+ len = am_readword(dev, hdraddr + 6);
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+
+ am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
+ am_writeword(dev, hdraddr + 2, RMD_OWN);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ priv->stats.rx_bytes += len;
+ priv->stats.rx_packets ++;
+ } else {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
+ priv->stats.rx_dropped ++;
+ break;
+ }
+ } while (1);
+}
+
+/*
+ * Update stats for the transmitted packet
+ */
+static void
+am79c961_tx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ short len;
+ u_int hdraddr;
+ u_int status;
+
+ hdraddr = priv->txhdr + (priv->txtail << 3);
+ status = am_readword (dev, hdraddr + 2);
+ if (status & TMD_OWN)
+ break;
+
+ priv->txtail ++;
+ if (priv->txtail >= TX_BUFFERS)
+ priv->txtail = 0;
+
+ if (status & TMD_ERR) {
+ u_int status2;
+
+ priv->stats.tx_errors ++;
+
+ status2 = am_readword (dev, hdraddr + 6);
+
+ /*
+ * Clear the error byte
+ */
+ am_writeword (dev, hdraddr + 6, 0);
+
+ if (status2 & TST_RTRY)
+ priv->stats.collisions += 16;
+ if (status2 & TST_LCOL)
+ priv->stats.tx_window_errors ++;
+ if (status2 & TST_LCAR)
+ priv->stats.tx_carrier_errors ++;
+ if (status2 & TST_UFLO)
+ priv->stats.tx_fifo_errors ++;
+ continue;
+ }
+ priv->stats.tx_packets ++;
+ len = am_readword (dev, hdraddr + 4);
+ priv->stats.tx_bytes += -len;
+ } while (priv->txtail != priv->txhead);
+
+ netif_wake_queue(dev);
+}
+
+static irqreturn_t
+am79c961_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ u_int status, n = 100;
+ int handled = 0;
+
+ do {
+ status = read_rreg(dev->base_addr, CSR0);
+ write_rreg(dev->base_addr, CSR0, status &
+ (CSR0_IENA|CSR0_TINT|CSR0_RINT|
+ CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
+
+ if (status & CSR0_RINT) {
+ handled = 1;
+ am79c961_rx(dev, priv);
+ }
+ if (status & CSR0_TINT) {
+ handled = 1;
+ am79c961_tx(dev, priv);
+ }
+ if (status & CSR0_MISS) {
+ handled = 1;
+ priv->stats.rx_dropped ++;
+ }
+ if (status & CSR0_CERR) {
+ handled = 1;
+ mod_timer(&priv->timer, jiffies);
+ }
+ } while (--n && status & (CSR0_RINT | CSR0_TINT));
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void am79c961_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ am79c961_interrupt(dev->irq, dev, NULL);
+ local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Initialise the chip. Note that we always expect
+ * to be entered with interrupts enabled.
+ */
+static int
+am79c961_hw_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->chip_lock);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irq(&priv->chip_lock);
+
+ am79c961_ramtest(dev, 0x66);
+ am79c961_ramtest(dev, 0x99);
+
+ return 0;
+}
+
+static void __init am79c961_banner(void)
+{
+ static unsigned version_printed;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static int __init am79c961_init(void)
+{
+ struct net_device *dev;
+ struct dev_priv *priv;
+ int i, ret;
+
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ ret = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ priv = netdev_priv(dev);
+
+ /*
+ * Fixed address and IRQ lines here.
+ * The PNP initialisation should have been
+ * done by the ether bootp loader.
+ */
+ dev->base_addr = 0x220;
+ dev->irq = IRQ_EBSA110_ETHERNET;
+
+ ret = -ENODEV;
+ if (!request_region(dev->base_addr, 0x18, dev->name))
+ goto nodev;
+
+ /*
+ * Reset the device.
+ */
+ inb(dev->base_addr + NET_RESET);
+ udelay(5);
+
+ /*
+ * Check the manufacturer part of the
+ * ether address.
+ */
+ if (inb(dev->base_addr) != 0x08 ||
+ inb(dev->base_addr + 2) != 0x00 ||
+ inb(dev->base_addr + 4) != 0x2b)
+ goto release;
+
+ am79c961_banner();
+ printk(KERN_INFO "%s: ether address ", dev->name);
+
+ /* Retrive and print the ethernet address. */
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
+ printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
+ }
+
+ spin_lock_init(&priv->chip_lock);
+ init_timer(&priv->timer);
+ priv->timer.data = (unsigned long)dev;
+ priv->timer.function = am79c961_timer;
+
+ if (am79c961_hw_init(dev))
+ goto release;
+
+ dev->open = am79c961_open;
+ dev->stop = am79c961_close;
+ dev->hard_start_xmit = am79c961_sendpacket;
+ dev->get_stats = am79c961_getstats;
+ dev->set_multicast_list = am79c961_setmulticastlist;
+ dev->tx_timeout = am79c961_timeout;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = am79c961_poll_controller;
+#endif
+
+ ret = register_netdev(dev);
+ if (ret == 0)
+ return 0;
+
+release:
+ release_region(dev->base_addr, 0x18);
+nodev:
+ free_netdev(dev);
+out:
+ return ret;
+}
+
+__initcall(am79c961_init);
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h
new file mode 100644
index 000000000000..1e9b05050cbe
--- /dev/null
+++ b/drivers/net/arm/am79c961a.h
@@ -0,0 +1,148 @@
+/*
+ * linux/drivers/net/am79c961.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_am79c961a_H
+#define _LINUX_am79c961a_H
+
+/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
+#define DEBUG_TX 2
+#define DEBUG_RX 4
+#define DEBUG_INT 8
+#define DEBUG_IC 16
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define NET_UID 0
+#define NET_RDP 0x10
+#define NET_RAP 0x12
+#define NET_RESET 0x14
+#define NET_IDP 0x16
+
+/*
+ * RAP registers
+ */
+#define CSR0 0
+#define CSR0_INIT 0x0001
+#define CSR0_STRT 0x0002
+#define CSR0_STOP 0x0004
+#define CSR0_TDMD 0x0008
+#define CSR0_TXON 0x0010
+#define CSR0_RXON 0x0020
+#define CSR0_IENA 0x0040
+#define CSR0_INTR 0x0080
+#define CSR0_IDON 0x0100
+#define CSR0_TINT 0x0200
+#define CSR0_RINT 0x0400
+#define CSR0_MERR 0x0800
+#define CSR0_MISS 0x1000
+#define CSR0_CERR 0x2000
+#define CSR0_BABL 0x4000
+#define CSR0_ERR 0x8000
+
+#define CSR3 3
+#define CSR3_EMBA 0x0008
+#define CSR3_DXMT2PD 0x0010
+#define CSR3_LAPPEN 0x0020
+#define CSR3_DXSUFLO 0x0040
+#define CSR3_IDONM 0x0100
+#define CSR3_TINTM 0x0200
+#define CSR3_RINTM 0x0400
+#define CSR3_MERRM 0x0800
+#define CSR3_MISSM 0x1000
+#define CSR3_BABLM 0x4000
+#define CSR3_MASKALL 0x5F00
+
+#define CSR4 4
+#define CSR4_JABM 0x0001
+#define CSR4_JAB 0x0002
+#define CSR4_TXSTRTM 0x0004
+#define CSR4_TXSTRT 0x0008
+#define CSR4_RCVCCOM 0x0010
+#define CSR4_RCVCCO 0x0020
+#define CSR4_MFCOM 0x0100
+#define CSR4_MFCO 0x0200
+#define CSR4_ASTRP_RCV 0x0400
+#define CSR4_APAD_XMIT 0x0800
+
+#define CTRL1 5
+#define CTRL1_SPND 0x0001
+
+#define LADRL 8
+#define LADRM1 9
+#define LADRM2 10
+#define LADRH 11
+#define PADRL 12
+#define PADRM 13
+#define PADRH 14
+
+#define MODE 15
+#define MODE_DISRX 0x0001
+#define MODE_DISTX 0x0002
+#define MODE_LOOP 0x0004
+#define MODE_DTCRC 0x0008
+#define MODE_COLL 0x0010
+#define MODE_DRETRY 0x0020
+#define MODE_INTLOOP 0x0040
+#define MODE_PORT_AUI 0x0000
+#define MODE_PORT_10BT 0x0080
+#define MODE_DRXPA 0x2000
+#define MODE_DRXBA 0x4000
+#define MODE_PROMISC 0x8000
+
+#define BASERXL 24
+#define BASERXH 25
+#define BASETXL 30
+#define BASETXH 31
+
+#define POLLINT 47
+
+#define SIZERXR 76
+#define SIZETXR 78
+
+#define CSR_MFC 112
+
+#define RMD_ENP 0x0100
+#define RMD_STP 0x0200
+#define RMD_CRC 0x0800
+#define RMD_FRAM 0x2000
+#define RMD_ERR 0x4000
+#define RMD_OWN 0x8000
+
+#define TMD_ENP 0x0100
+#define TMD_STP 0x0200
+#define TMD_MORE 0x1000
+#define TMD_ERR 0x4000
+#define TMD_OWN 0x8000
+
+#define TST_RTRY 0x0400
+#define TST_LCAR 0x0800
+#define TST_LCOL 0x1000
+#define TST_UFLO 0x4000
+#define TST_BUFF 0x8000
+
+#define ISALED0 0x0004
+#define ISALED0_LNKST 0x8000
+
+struct dev_priv {
+ struct net_device_stats stats;
+ unsigned long rxbuffer[RX_BUFFERS];
+ unsigned long txbuffer[TX_BUFFERS];
+ unsigned char txhead;
+ unsigned char txtail;
+ unsigned char rxhead;
+ unsigned char rxtail;
+ unsigned long rxhdr;
+ unsigned long txhdr;
+ spinlock_t chip_lock;
+ struct timer_list timer;
+};
+
+extern int am79c961_probe (struct net_device *dev);
+
+#endif
diff --git a/drivers/net/arm/ether00.c b/drivers/net/arm/ether00.c
new file mode 100644
index 000000000000..4f1f4e31bda5
--- /dev/null
+++ b/drivers/net/arm/ether00.c
@@ -0,0 +1,1017 @@
+/*
+ * drivers/net/ether00.c
+ *
+ * Copyright (C) 2001 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* includes */
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/tqueue.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pld/pld_hotswap.h>
+#include <asm/arch/excalibur.h>
+#include <asm/arch/hardware.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+#include <asm/arch/ether00.h>
+#include <asm/arch/tdkphy.h>
+
+
+MODULE_AUTHOR("Clive Davies");
+MODULE_DESCRIPTION("Altera Ether00 IP core driver");
+MODULE_LICENSE("GPL");
+
+#define PKT_BUF_SZ 1540 /* Size of each rx buffer */
+#define ETH_NR 4 /* Number of MACs this driver supports */
+
+#define DEBUG(x)
+
+#define __dma_va(x) (unsigned int)((unsigned int)priv->dma_data+(((unsigned int)(x))&(EXC_SPSRAM_BLOCK0_SIZE-1)))
+#define __dma_pa(x) (unsigned int)(EXC_SPSRAM_BLOCK0_BASE+(((unsigned int)(x))-(unsigned int)priv->dma_data))
+
+#define ETHER00_BASE 0
+#define ETHER00_TYPE
+#define ETHER00_NAME "ether00"
+#define MAC_REG_SIZE 0x400 /* size of MAC register area */
+
+
+
+/* typedefs */
+
+/* The definition of the driver control structure */
+
+#define RX_NUM_BUFF 10
+#define RX_NUM_FDESC 10
+#define TX_NUM_FDESC 10
+
+struct tx_fda_ent{
+ FDA_DESC fd;
+ BUF_DESC bd;
+ BUF_DESC pad;
+};
+struct rx_fda_ent{
+ FDA_DESC fd;
+ BUF_DESC bd;
+ BUF_DESC pad;
+};
+struct rx_blist_ent{
+ FDA_DESC fd;
+ BUF_DESC bd;
+ BUF_DESC pad;
+};
+struct net_priv
+{
+ struct net_device_stats stats;
+ struct sk_buff* skb;
+ void* dma_data;
+ struct rx_blist_ent* rx_blist_vp;
+ struct rx_fda_ent* rx_fda_ptr;
+ struct tx_fda_ent* tx_fdalist_vp;
+ struct tq_struct tq_memupdate;
+ unsigned char memupdate_scheduled;
+ unsigned char rx_disabled;
+ unsigned char queue_stopped;
+ spinlock_t rx_lock;
+};
+
+static const char vendor_id[2]={0x07,0xed};
+
+#ifdef ETHER00_DEBUG
+
+/* Dump (most) registers for debugging puposes */
+
+static void dump_regs(struct net_device *dev){
+ struct net_priv* priv=dev->priv;
+ unsigned int* i;
+
+ printk("\n RX free descriptor area:\n");
+
+ for(i=(unsigned int*)priv->rx_fda_ptr;
+ i<((unsigned int*)(priv->rx_fda_ptr+RX_NUM_FDESC));){
+ printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
+ i+=4;
+ }
+
+ printk("\n RX buffer list:\n");
+
+ for(i=(unsigned int*)priv->rx_blist_vp;
+ i<((unsigned int*)(priv->rx_blist_vp+RX_NUM_BUFF));){
+ printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
+ i+=4;
+ }
+
+ printk("\n TX frame descriptor list:\n");
+
+ for(i=(unsigned int*)priv->tx_fdalist_vp;
+ i<((unsigned int*)(priv->tx_fdalist_vp+TX_NUM_FDESC));){
+ printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
+ i+=4;
+ }
+
+ printk("\ndma ctl=%#x\n",readw(ETHER_DMA_CTL(dev->base_addr)));
+ printk("txfrmptr=%#x\n",readw(ETHER_TXFRMPTR(dev->base_addr)));
+ printk("txthrsh=%#x\n",readw(ETHER_TXTHRSH(dev->base_addr)));
+ printk("txpollctr=%#x\n",readw(ETHER_TXPOLLCTR(dev->base_addr)));
+ printk("blfrmptr=%#x\n",readw(ETHER_BLFRMPTR(dev->base_addr)));
+ printk("rxfragsize=%#x\n",readw(ETHER_RXFRAGSIZE(dev->base_addr)));
+ printk("tx_int_en=%#x\n",readw(ETHER_INT_EN(dev->base_addr)));
+ printk("fda_bas=%#x\n",readw(ETHER_FDA_BAS(dev->base_addr)));
+ printk("fda_lim=%#x\n",readw(ETHER_FDA_LIM(dev->base_addr)));
+ printk("int_src=%#x\n",readw(ETHER_INT_SRC(dev->base_addr)));
+ printk("pausecnt=%#x\n",readw(ETHER_PAUSECNT(dev->base_addr)));
+ printk("rempaucnt=%#x\n",readw(ETHER_REMPAUCNT(dev->base_addr)));
+ printk("txconfrmstat=%#x\n",readw(ETHER_TXCONFRMSTAT(dev->base_addr)));
+ printk("mac_ctl=%#x\n",readw(ETHER_MAC_CTL(dev->base_addr)));
+ printk("arc_ctl=%#x\n",readw(ETHER_ARC_CTL(dev->base_addr)));
+ printk("tx_ctl=%#x\n",readw(ETHER_TX_CTL(dev->base_addr)));
+}
+#endif /* ETHER00_DEBUG */
+
+
+static int ether00_write_phy(struct net_device *dev, short address, short value)
+{
+ volatile int count = 1024;
+ writew(value,ETHER_MD_DATA(dev->base_addr));
+ writew( ETHER_MD_CA_BUSY_MSK |
+ ETHER_MD_CA_WR_MSK |
+ (address & ETHER_MD_CA_ADDR_MSK),
+ ETHER_MD_CA(dev->base_addr));
+
+ /* Wait for the command to complete */
+ while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
+ count--;
+ }
+ if (!count){
+ printk("Write to phy failed, addr=%#x, data=%#x\n",address, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int ether00_read_phy(struct net_device *dev, short address)
+{
+ volatile int count = 1024;
+ writew( ETHER_MD_CA_BUSY_MSK |
+ (address & ETHER_MD_CA_ADDR_MSK),
+ ETHER_MD_CA(dev->base_addr));
+
+ /* Wait for the command to complete */
+ while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
+ count--;
+ }
+ if (!count){
+ printk(KERN_WARNING "Read from phy timed out\n");
+ return -EIO;
+ }
+ return readw(ETHER_MD_DATA(dev->base_addr));
+}
+
+static void ether00_phy_int(int irq_num, void* dev_id, struct pt_regs* regs)
+{
+ struct net_device* dev=dev_id;
+ int irq_status;
+
+ irq_status=ether00_read_phy(dev, PHY_IRQ_CONTROL);
+
+ if(irq_status & PHY_IRQ_CONTROL_ANEG_COMP_INT_MSK){
+ /*
+ * Autonegotiation complete on epxa10db. The mac doesn't
+ * twig if we're in full duplex so we need to check the
+ * phy status register and configure the mac accordingly
+ */
+ if(ether00_read_phy(dev, PHY_STATUS)&(PHY_STATUS_10T_F_MSK|PHY_STATUS_100_X_F_MSK)){
+ int tmp;
+ tmp=readl(ETHER_MAC_CTL(dev->base_addr));
+ writel(tmp|ETHER_MAC_CTL_FULLDUP_MSK,ETHER_MAC_CTL(dev->base_addr));
+ }
+ }
+
+ if(irq_status&PHY_IRQ_CONTROL_LS_CHG_INT_MSK){
+
+ if(ether00_read_phy(dev, PHY_STATUS)& PHY_STATUS_LINK_MSK){
+ /* Link is up */
+ netif_carrier_on(dev);
+ //printk("Carrier on\n");
+ }else{
+ netif_carrier_off(dev);
+ //printk("Carrier off\n");
+
+ }
+ }
+
+}
+
+static void setup_blist_entry(struct sk_buff* skb,struct rx_blist_ent* blist_ent_ptr){
+ /* Make the buffer consistent with the cache as the mac is going to write
+ * directly into it*/
+ blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
+ blist_ent_ptr->bd.BuffData=(char*)__pa(skb->data);
+ consistent_sync(skb->data,PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ /* align IP on 16 Byte (DMA_CTL set to skip 2 bytes) */
+ skb_reserve(skb,2);
+ blist_ent_ptr->bd.BuffLength=PKT_BUF_SZ-2;
+ blist_ent_ptr->fd.FDLength=1;
+ blist_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
+ blist_ent_ptr->bd.BDCtl=BDCTL_COWNSBD_MSK;
+}
+
+
+static int ether00_mem_init(struct net_device* dev)
+{
+ struct net_priv* priv=dev->priv;
+ struct tx_fda_ent *tx_fd_ptr,*tx_end_ptr;
+ struct rx_blist_ent* blist_ent_ptr;
+ int i;
+
+ /*
+ * Grab a block of on chip SRAM to contain the control stuctures for
+ * the ethernet MAC. This uncached becuase it needs to be accesses by both
+ * bus masters (cpu + mac). However, it shouldn't matter too much in terms
+ * of speed as its on chip memory
+ */
+ priv->dma_data=ioremap_nocache(EXC_SPSRAM_BLOCK0_BASE,EXC_SPSRAM_BLOCK0_SIZE );
+ if (!priv->dma_data)
+ return -ENOMEM;
+
+ priv->rx_fda_ptr=(struct rx_fda_ent*)priv->dma_data;
+ /*
+ * Now share it out amongst the Frame descriptors and the buffer list
+ */
+ priv->rx_blist_vp=(struct rx_blist_ent*)((unsigned int)priv->dma_data+RX_NUM_FDESC*sizeof(struct rx_fda_ent));
+
+ /*
+ *Initalise the FDA list
+ */
+ /* set ownership to the controller */
+ memset(priv->rx_fda_ptr,0x80,RX_NUM_FDESC*sizeof(struct rx_fda_ent));
+
+ /*
+ *Initialise the buffer list
+ */
+ blist_ent_ptr=priv->rx_blist_vp;
+ i=0;
+ while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
+ struct sk_buff *skb;
+ blist_ent_ptr->fd.FDLength=1;
+ skb=dev_alloc_skb(PKT_BUF_SZ);
+ if(skb){
+ setup_blist_entry(skb,blist_ent_ptr);
+ blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(blist_ent_ptr+1);
+ blist_ent_ptr->bd.BDStat=i++;
+ blist_ent_ptr++;
+ }
+ else
+ {
+ printk("Failed to initalise buffer list\n");
+ }
+
+ }
+ blist_ent_ptr--;
+ blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->rx_blist_vp);
+
+ priv->tx_fdalist_vp=(struct tx_fda_ent*)(priv->rx_blist_vp+RX_NUM_BUFF);
+
+ /* Initialise the buffers to be a circular list. The mac will then go poll
+ * the list until it finds a frame ready to transmit */
+ tx_end_ptr=priv->tx_fdalist_vp+TX_NUM_FDESC;
+ for(tx_fd_ptr=priv->tx_fdalist_vp;tx_fd_ptr<tx_end_ptr;tx_fd_ptr++){
+ tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa((tx_fd_ptr+1));
+ tx_fd_ptr->fd.FDCtl=1;
+ tx_fd_ptr->fd.FDStat=0;
+ tx_fd_ptr->fd.FDLength=1;
+
+ }
+ /* Change the last FDNext pointer to make a circular list */
+ tx_fd_ptr--;
+ tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->tx_fdalist_vp);
+
+ /* Point the device at the chain of Rx and Tx Buffers */
+ writel((unsigned int)__dma_pa(priv->rx_fda_ptr),ETHER_FDA_BAS(dev->base_addr));
+ writel((RX_NUM_FDESC-1)*sizeof(struct rx_fda_ent),ETHER_FDA_LIM(dev->base_addr));
+ writel((unsigned int)__dma_pa(priv->rx_blist_vp),ETHER_BLFRMPTR(dev->base_addr));
+
+ writel((unsigned int)__dma_pa(priv->tx_fdalist_vp),ETHER_TXFRMPTR(dev->base_addr));
+
+ return 0;
+}
+
+
+void ether00_mem_update(void* dev_id)
+{
+ struct net_device* dev=dev_id;
+ struct net_priv* priv=dev->priv;
+ struct sk_buff* skb;
+ struct tx_fda_ent *fda_ptr=priv->tx_fdalist_vp;
+ struct rx_blist_ent* blist_ent_ptr;
+ unsigned long flags;
+
+ priv->tq_memupdate.sync=0;
+ //priv->tq_memupdate.list=
+ priv->memupdate_scheduled=0;
+
+ /* Transmit interrupt */
+ while(fda_ptr<(priv->tx_fdalist_vp+TX_NUM_FDESC)){
+ if(!(FDCTL_COWNSFD_MSK&fda_ptr->fd.FDCtl) && (ETHER_TX_STAT_COMP_MSK&fda_ptr->fd.FDStat)){
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes+=fda_ptr->bd.BuffLength;
+ skb=(struct sk_buff*)fda_ptr->fd.FDSystem;
+ //printk("%d:txcln:fda=%#x skb=%#x\n",jiffies,fda_ptr,skb);
+ dev_kfree_skb(skb);
+ fda_ptr->fd.FDSystem=0;
+ fda_ptr->fd.FDStat=0;
+ fda_ptr->fd.FDCtl=0;
+ }
+ fda_ptr++;
+ }
+ /* Fill in any missing buffers from the received queue */
+ spin_lock_irqsave(&priv->rx_lock,flags);
+ blist_ent_ptr=priv->rx_blist_vp;
+ while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
+ /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */
+ if(!blist_ent_ptr->fd.FDSystem){
+ struct sk_buff *skb;
+ skb=dev_alloc_skb(PKT_BUF_SZ);
+ blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
+ if(skb){
+ setup_blist_entry(skb,blist_ent_ptr);
+ }
+ else
+ {
+ break;
+ }
+ }
+ blist_ent_ptr++;
+ }
+ spin_unlock_irqrestore(&priv->rx_lock,flags);
+ if(priv->queue_stopped){
+ //printk("%d:cln:start q\n",jiffies);
+ netif_start_queue(dev);
+ }
+ if(priv->rx_disabled){
+ //printk("%d:enable_irq\n",jiffies);
+ priv->rx_disabled=0;
+ writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
+
+ }
+}
+
+
+static void ether00_int( int irq_num, void* dev_id, struct pt_regs* regs)
+{
+ struct net_device* dev=dev_id;
+ struct net_priv* priv=dev->priv;
+
+ unsigned int interruptValue;
+
+ interruptValue=readl(ETHER_INT_SRC(dev->base_addr));
+
+ //printk("INT_SRC=%x\n",interruptValue);
+
+ if(!(readl(ETHER_INT_SRC(dev->base_addr)) & ETHER_INT_SRC_IRQ_MSK))
+ {
+ return; /* Interrupt wasn't caused by us!! */
+ }
+
+ if(readl(ETHER_INT_SRC(dev->base_addr))&
+ (ETHER_INT_SRC_INTMACRX_MSK |
+ ETHER_INT_SRC_FDAEX_MSK |
+ ETHER_INT_SRC_BLEX_MSK)) {
+ struct rx_blist_ent* blist_ent_ptr;
+ struct rx_fda_ent* fda_ent_ptr;
+ struct sk_buff* skb;
+
+ fda_ent_ptr=priv->rx_fda_ptr;
+ spin_lock(&priv->rx_lock);
+ while(fda_ent_ptr<(priv->rx_fda_ptr+RX_NUM_FDESC)){
+ int result;
+
+ if(!(fda_ent_ptr->fd.FDCtl&FDCTL_COWNSFD_MSK))
+ {
+ /* This frame is ready for processing */
+ /*find the corresponding buffer in the bufferlist */
+ blist_ent_ptr=priv->rx_blist_vp+fda_ent_ptr->bd.BDStat;
+ skb=(struct sk_buff*)blist_ent_ptr->fd.FDSystem;
+
+ /* Pass this skb up the stack */
+ skb->dev=dev;
+ skb_put(skb,fda_ent_ptr->fd.FDLength);
+ skb->protocol=eth_type_trans(skb,dev);
+ skb->ip_summed=CHECKSUM_UNNECESSARY;
+ result=netif_rx(skb);
+ /* Update statistics */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes+=fda_ent_ptr->fd.FDLength;
+
+ /* Free the FDA entry */
+ fda_ent_ptr->bd.BDStat=0xff;
+ fda_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
+
+ /* Allocate a new skb and point the bd entry to it */
+ blist_ent_ptr->fd.FDSystem=0;
+ skb=dev_alloc_skb(PKT_BUF_SZ);
+ //printk("allocskb=%#x\n",skb);
+ if(skb){
+ setup_blist_entry(skb,blist_ent_ptr);
+
+ }
+ else if(!priv->memupdate_scheduled){
+ int tmp;
+ /* There are no buffers at the moment, so schedule */
+ /* the background task to sort this out */
+ schedule_task(&priv->tq_memupdate);
+ priv->memupdate_scheduled=1;
+ printk(KERN_DEBUG "%s:No buffers",dev->name);
+ /* If this interrupt was due to a lack of buffers then
+ * we'd better stop the receiver too */
+ if(interruptValue&ETHER_INT_SRC_BLEX_MSK){
+ priv->rx_disabled=1;
+ tmp=readl(ETHER_INT_SRC(dev->base_addr));
+ writel(tmp&~ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
+ printk(KERN_DEBUG "%s:Halting rx",dev->name);
+ }
+
+ }
+
+ }
+ fda_ent_ptr++;
+ }
+ spin_unlock(&priv->rx_lock);
+
+ /* Clear the interrupts */
+ writel(ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK
+ | ETHER_INT_SRC_BLEX_MSK,ETHER_INT_SRC(dev->base_addr));
+
+ }
+
+ if(readl(ETHER_INT_SRC(dev->base_addr))&ETHER_INT_SRC_INTMACTX_MSK){
+
+ if(!priv->memupdate_scheduled){
+ schedule_task(&priv->tq_memupdate);
+ priv->memupdate_scheduled=1;
+ }
+ /* Clear the interrupt */
+ writel(ETHER_INT_SRC_INTMACTX_MSK,ETHER_INT_SRC(dev->base_addr));
+ }
+
+ if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_SWINT_MSK|
+ ETHER_INT_SRC_INTEARNOT_MSK|
+ ETHER_INT_SRC_INTLINK_MSK|
+ ETHER_INT_SRC_INTEXBD_MSK|
+ ETHER_INT_SRC_INTTXCTLCMP_MSK))
+ {
+ /*
+ * Not using any of these so they shouldn't happen
+ *
+ * In the cased of INTEXBD - if you allocate more
+ * than 28 decsriptors you may need to think about this
+ */
+ printk("Not using this interrupt\n");
+ }
+
+ if (readl(ETHER_INT_SRC(dev->base_addr)) &
+ (ETHER_INT_SRC_INTSBUS_MSK |
+ ETHER_INT_SRC_INTNRABT_MSK
+ |ETHER_INT_SRC_DMPARERR_MSK))
+ {
+ /*
+ * Hardware errors, we can either ignore them and hope they go away
+ *or reset the device, I'll try the first for now to see if they happen
+ */
+ printk("Hardware error\n");
+ }
+}
+
+static void ether00_setup_ethernet_address(struct net_device* dev)
+{
+ int tmp;
+
+ dev->addr_len=6;
+ writew(0,ETHER_ARC_ADR(dev->base_addr));
+ writel((dev->dev_addr[0]<<24) |
+ (dev->dev_addr[1]<<16) |
+ (dev->dev_addr[2]<<8) |
+ dev->dev_addr[3],
+ ETHER_ARC_DATA(dev->base_addr));
+
+ writew(4,ETHER_ARC_ADR(dev->base_addr));
+ tmp=readl(ETHER_ARC_DATA(dev->base_addr));
+ tmp&=0xffff;
+ tmp|=(dev->dev_addr[4]<<24) | (dev->dev_addr[5]<<16);
+ writel(tmp, ETHER_ARC_DATA(dev->base_addr));
+ /* Enable this entry in the ARC */
+
+ writel(1,ETHER_ARC_ENA(dev->base_addr));
+
+ return;
+}
+
+
+static void ether00_reset(struct net_device *dev)
+{
+ /* reset the controller */
+ writew(ETHER_MAC_CTL_RESET_MSK,ETHER_MAC_CTL(dev->base_addr));
+
+ /*
+ * Make sure we're not going to send anything
+ */
+
+ writew(ETHER_TX_CTL_TXHALT_MSK,ETHER_TX_CTL(dev->base_addr));
+
+ /*
+ * Make sure we're not going to receive anything
+ */
+ writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
+
+ /*
+ * Disable Interrupts for now, and set the burst size to 8 bytes
+ */
+
+ writel(ETHER_DMA_CTL_INTMASK_MSK |
+ ((8 << ETHER_DMA_CTL_DMBURST_OFST) & ETHER_DMA_CTL_DMBURST_MSK)
+ |(2<<ETHER_DMA_CTL_RXALIGN_OFST),
+ ETHER_DMA_CTL(dev->base_addr));
+
+
+ /*
+ * Set TxThrsh - start transmitting a packet after 1514
+ * bytes or when a packet is complete, whichever comes first
+ */
+ writew(1514,ETHER_TXTHRSH(dev->base_addr));
+
+ /*
+ * Set TxPollCtr. Each cycle is
+ * 61.44 microseconds with a 33 MHz bus
+ */
+ writew(1,ETHER_TXPOLLCTR(dev->base_addr));
+
+ /*
+ * Set Rx_Ctl - Turn off reception and let RxData turn it
+ * on later
+ */
+ writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
+
+}
+
+
+static void ether00_set_multicast(struct net_device* dev)
+{
+ int count=dev->mc_count;
+
+ /* Set promiscuous mode if it's asked for. */
+
+ if (dev->flags&IFF_PROMISC){
+
+ writew( ETHER_ARC_CTL_COMPEN_MSK |
+ ETHER_ARC_CTL_BROADACC_MSK |
+ ETHER_ARC_CTL_GROUPACC_MSK |
+ ETHER_ARC_CTL_STATIONACC_MSK,
+ ETHER_ARC_CTL(dev->base_addr));
+ return;
+ }
+
+ /*
+ * Get all multicast packets if required, or if there are too
+ * many addresses to fit in hardware
+ */
+ if (dev->flags & IFF_ALLMULTI){
+ writew( ETHER_ARC_CTL_COMPEN_MSK |
+ ETHER_ARC_CTL_GROUPACC_MSK |
+ ETHER_ARC_CTL_BROADACC_MSK,
+ ETHER_ARC_CTL(dev->base_addr));
+ return;
+ }
+ if (dev->mc_count > (ETHER_ARC_SIZE - 1)){
+
+ printk(KERN_WARNING "Too many multicast addresses for hardware to filter - receiving all multicast packets\n");
+ writew( ETHER_ARC_CTL_COMPEN_MSK |
+ ETHER_ARC_CTL_GROUPACC_MSK |
+ ETHER_ARC_CTL_BROADACC_MSK,
+ ETHER_ARC_CTL(dev->base_addr));
+ return;
+ }
+
+ if(dev->mc_count){
+ struct dev_mc_list *mc_list_ent=dev->mc_list;
+ unsigned int temp,i;
+ DEBUG(printk("mc_count=%d mc_list=%#x\n",dev-> mc_count, dev->mc_list));
+ DEBUG(printk("mc addr=%02#x%02x%02x%02x%02x%02x\n",
+ mc_list_ent->dmi_addr[5],
+ mc_list_ent->dmi_addr[4],
+ mc_list_ent->dmi_addr[3],
+ mc_list_ent->dmi_addr[2],
+ mc_list_ent->dmi_addr[1],
+ mc_list_ent->dmi_addr[0]);)
+
+ /*
+ * The first 6 bytes are the MAC address, so
+ * don't change them!
+ */
+ writew(4,ETHER_ARC_ADR(dev->base_addr));
+ temp=readl(ETHER_ARC_DATA(dev->base_addr));
+ temp&=0xffff0000;
+
+ /* Disable the current multicast stuff */
+ writel(1,ETHER_ARC_ENA(dev->base_addr));
+
+ for(;;){
+ temp|=mc_list_ent->dmi_addr[1] |
+ mc_list_ent->dmi_addr[0]<<8;
+ writel(temp,ETHER_ARC_DATA(dev->base_addr));
+
+ i=readl(ETHER_ARC_ADR(dev->base_addr));
+ writew(i+4,ETHER_ARC_ADR(dev->base_addr));
+
+ temp=mc_list_ent->dmi_addr[5]|
+ mc_list_ent->dmi_addr[4]<<8 |
+ mc_list_ent->dmi_addr[3]<<16 |
+ mc_list_ent->dmi_addr[2]<<24;
+ writel(temp,ETHER_ARC_DATA(dev->base_addr));
+
+ count--;
+ if(!mc_list_ent->next || !count){
+ break;
+ }
+ DEBUG(printk("mc_list_next=%#x\n",mc_list_ent->next);)
+ mc_list_ent=mc_list_ent->next;
+
+
+ i=readl(ETHER_ARC_ADR(dev->base_addr));
+ writel(i+4,ETHER_ARC_ADR(dev->base_addr));
+
+ temp=mc_list_ent->dmi_addr[3]|
+ mc_list_ent->dmi_addr[2]<<8 |
+ mc_list_ent->dmi_addr[1]<<16 |
+ mc_list_ent->dmi_addr[0]<<24;
+ writel(temp,ETHER_ARC_DATA(dev->base_addr));
+
+ i=readl(ETHER_ARC_ADR(dev->base_addr));
+ writel(i+4,ETHER_ARC_ADR(dev->base_addr));
+
+ temp=mc_list_ent->dmi_addr[4]<<16 |
+ mc_list_ent->dmi_addr[5]<<24;
+
+ writel(temp,ETHER_ARC_DATA(dev->base_addr));
+
+ count--;
+ if(!mc_list_ent->next || !count){
+ break;
+ }
+ mc_list_ent=mc_list_ent->next;
+ }
+
+
+ if(count)
+ printk(KERN_WARNING "Multicast list size error\n");
+
+
+ writew( ETHER_ARC_CTL_BROADACC_MSK|
+ ETHER_ARC_CTL_COMPEN_MSK,
+ ETHER_ARC_CTL(dev->base_addr));
+
+ }
+
+ /* enable the active ARC enties */
+ writew((1<<(count+2))-1,ETHER_ARC_ENA(dev->base_addr));
+}
+
+
+static int ether00_open(struct net_device* dev)
+{
+ int result,tmp;
+ struct net_priv* priv;
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EINVAL;
+
+ /* Install interrupt handlers */
+ result=request_irq(dev->irq,ether00_int,0,"ether00",dev);
+ if(result)
+ goto open_err1;
+
+ result=request_irq(2,ether00_phy_int,0,"ether00_phy",dev);
+ if(result)
+ goto open_err2;
+
+ ether00_reset(dev);
+ result=ether00_mem_init(dev);
+ if(result)
+ goto open_err3;
+
+
+ ether00_setup_ethernet_address(dev);
+
+ ether00_set_multicast(dev);
+
+ result=ether00_write_phy(dev,PHY_CONTROL, PHY_CONTROL_ANEGEN_MSK | PHY_CONTROL_RANEG_MSK);
+ if(result)
+ goto open_err4;
+ result=ether00_write_phy(dev,PHY_IRQ_CONTROL, PHY_IRQ_CONTROL_LS_CHG_IE_MSK |
+ PHY_IRQ_CONTROL_ANEG_COMP_IE_MSK);
+ if(result)
+ goto open_err4;
+
+ /* Start the device enable interrupts */
+ writew(ETHER_RX_CTL_RXEN_MSK
+// | ETHER_RX_CTL_STRIPCRC_MSK
+ | ETHER_RX_CTL_ENGOOD_MSK
+ | ETHER_RX_CTL_ENRXPAR_MSK| ETHER_RX_CTL_ENLONGERR_MSK
+ | ETHER_RX_CTL_ENOVER_MSK| ETHER_RX_CTL_ENCRCERR_MSK,
+ ETHER_RX_CTL(dev->base_addr));
+
+ writew(ETHER_TX_CTL_TXEN_MSK|
+ ETHER_TX_CTL_ENEXDEFER_MSK|
+ ETHER_TX_CTL_ENLCARR_MSK|
+ ETHER_TX_CTL_ENEXCOLL_MSK|
+ ETHER_TX_CTL_ENLATECOLL_MSK|
+ ETHER_TX_CTL_ENTXPAR_MSK|
+ ETHER_TX_CTL_ENCOMP_MSK,
+ ETHER_TX_CTL(dev->base_addr));
+
+ tmp=readl(ETHER_DMA_CTL(dev->base_addr));
+ writel(tmp&~ETHER_DMA_CTL_INTMASK_MSK,ETHER_DMA_CTL(dev->base_addr));
+
+ return 0;
+
+ open_err4:
+ ether00_reset(dev);
+ open_err3:
+ free_irq(2,dev);
+ open_err2:
+ free_irq(dev->irq,dev);
+ open_err1:
+ return result;
+
+}
+
+
+static int ether00_tx(struct sk_buff* skb, struct net_device* dev)
+{
+ struct net_priv *priv=dev->priv;
+ struct tx_fda_ent *fda_ptr;
+ int i;
+
+
+ /*
+ * Find an empty slot in which to stick the frame
+ */
+ fda_ptr=(struct tx_fda_ent*)__dma_va(readl(ETHER_TXFRMPTR(dev->base_addr)));
+ i=0;
+ while(i<TX_NUM_FDESC){
+ if (fda_ptr->fd.FDStat||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
+ fda_ptr =(struct tx_fda_ent*) __dma_va((struct tx_fda_ent*)fda_ptr->fd.FDNext);
+ }
+ else {
+ break;
+ }
+ i++;
+ }
+
+ /* Write the skb data from the cache*/
+ consistent_sync(skb->data,skb->len,PCI_DMA_TODEVICE);
+ fda_ptr->bd.BuffData=(char*)__pa(skb->data);
+ fda_ptr->bd.BuffLength=(unsigned short)skb->len;
+ /* Save the pointer to the skb for freeing later */
+ fda_ptr->fd.FDSystem=(unsigned int)skb;
+ fda_ptr->fd.FDStat=0;
+ /* Pass ownership of the buffers to the controller */
+ fda_ptr->fd.FDCtl=1;
+ fda_ptr->fd.FDCtl|=FDCTL_COWNSFD_MSK;
+
+ /* If the next buffer in the list is full, stop the queue */
+ fda_ptr=(struct tx_fda_ent*)__dma_va(fda_ptr->fd.FDNext);
+ if ((fda_ptr->fd.FDStat)||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
+ netif_stop_queue(dev);
+ priv->queue_stopped=1;
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *ether00_stats(struct net_device* dev)
+{
+ struct net_priv *priv=dev->priv;
+ return &priv->stats;
+}
+
+
+static int ether00_stop(struct net_device* dev)
+{
+ struct net_priv *priv=dev->priv;
+ int tmp;
+
+ /* Stop/disable the device. */
+ tmp=readw(ETHER_RX_CTL(dev->base_addr));
+ tmp&=~(ETHER_RX_CTL_RXEN_MSK | ETHER_RX_CTL_ENGOOD_MSK);
+ tmp|=ETHER_RX_CTL_RXHALT_MSK;
+ writew(tmp,ETHER_RX_CTL(dev->base_addr));
+
+ tmp=readl(ETHER_TX_CTL(dev->base_addr));
+ tmp&=~ETHER_TX_CTL_TXEN_MSK;
+ tmp|=ETHER_TX_CTL_TXHALT_MSK;
+ writel(tmp,ETHER_TX_CTL(dev->base_addr));
+
+ /* Free up system resources */
+ free_irq(dev->irq,dev);
+ free_irq(2,dev);
+ iounmap(priv->dma_data);
+
+ return 0;
+}
+
+
+static void ether00_get_ethernet_address(struct net_device* dev)
+{
+ struct mtd_info *mymtd=NULL;
+ int i;
+ size_t retlen;
+
+ /*
+ * For the Epxa10 dev board (camelot), the ethernet MAC
+ * address is of the form 00:aa:aa:00:xx:xx where
+ * 00:aa:aa is the Altera vendor ID and xx:xx is the
+ * last 2 bytes of the board serial number, as programmed
+ * into the OTP area of the flash device on EBI1. If this
+ * isn't an expa10 dev board, or there's no mtd support to
+ * read the serial number from flash then we'll force the
+ * use to set their own mac address using ifconfig.
+ */
+
+#ifdef CONFIG_ARCH_CAMELOT
+#ifdef CONFIG_MTD
+ /* get the mtd_info structure for the first mtd device*/
+ for(i=0;i<MAX_MTD_DEVICES;i++){
+ mymtd=get_mtd_device(NULL,i);
+ if(!mymtd||!strcmp(mymtd->name,"EPXA10DB flash"))
+ break;
+ }
+
+ if(!mymtd || !mymtd->read_user_prot_reg){
+ printk(KERN_WARNING "%s: Failed to read MAC address from flash\n",dev->name);
+ }else{
+ mymtd->read_user_prot_reg(mymtd,2,1,&retlen,&dev->dev_addr[5]);
+ mymtd->read_user_prot_reg(mymtd,3,1,&retlen,&dev->dev_addr[4]);
+ dev->dev_addr[3]=0;
+ dev->dev_addr[2]=vendor_id[1];
+ dev->dev_addr[1]=vendor_id[0];
+ dev->dev_addr[0]=0;
+ }
+#else
+ printk(KERN_WARNING "%s: MTD support required to read MAC address from EPXA10 dev board\n", dev->name);
+#endif
+#endif
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ printk("%s: Invalid ethernet MAC address. Please set using "
+ "ifconfig\n", dev->name);
+
+}
+
+/*
+ * Keep a mapping of dev_info addresses -> port lines to use when
+ * removing ports dev==NULL indicates unused entry
+ */
+
+
+static struct net_device* dev_list[ETH_NR];
+
+static int ether00_add_device(struct pldhs_dev_info* dev_info,void* dev_ps_data)
+{
+ struct net_device *dev;
+ struct net_priv *priv;
+ void *map_addr;
+ int result;
+ int i;
+
+ i=0;
+ while(dev_list[i] && i < ETH_NR)
+ i++;
+
+ if(i==ETH_NR){
+ printk(KERN_WARNING "ether00: Maximum number of ports reached\n");
+ return 0;
+ }
+
+
+ if (!request_mem_region(dev_info->base_addr, MAC_REG_SIZE, "ether00"))
+ return -EBUSY;
+
+ dev = alloc_etherdev(sizeof(struct net_priv));
+ if(!dev) {
+ result = -ENOMEM;
+ goto out_release;
+ }
+ priv = dev->priv;
+
+ priv->tq_memupdate.routine=ether00_mem_update;
+ priv->tq_memupdate.data=(void*) dev;
+
+ spin_lock_init(&priv->rx_lock);
+
+ map_addr=ioremap_nocache(dev_info->base_addr,SZ_4K);
+ if(!map_addr){
+ result = -ENOMEM;
+ out_kfree;
+ }
+
+ dev->open=ether00_open;
+ dev->stop=ether00_stop;
+ dev->set_multicast_list=ether00_set_multicast;
+ dev->hard_start_xmit=ether00_tx;
+ dev->get_stats=ether00_stats;
+
+ ether00_get_ethernet_address(dev);
+
+ SET_MODULE_OWNER(dev);
+
+ dev->base_addr=(unsigned int)map_addr;
+ dev->irq=dev_info->irq;
+ dev->features=NETIF_F_DYNALLOC | NETIF_F_HW_CSUM;
+
+ result=register_netdev(dev);
+ if(result){
+ printk("Ether00: Error %i registering driver\n",result);
+ goto out_unmap;
+ }
+ printk("registered ether00 device at %#x\n",dev_info->base_addr);
+
+ dev_list[i]=dev;
+
+ return result;
+
+ out_unmap:
+ iounmap(map_addr);
+ out_kfree:
+ free_netdev(dev);
+ out_release:
+ release_mem_region(dev_info->base_addr, MAC_REG_SIZE);
+ return result;
+}
+
+
+static int ether00_remove_devices(void)
+{
+ int i;
+
+ for(i=0;i<ETH_NR;i++){
+ if(dev_list[i]){
+ netif_device_detach(dev_list[i]);
+ unregister_netdev(dev_list[i]);
+ iounmap((void*)dev_list[i]->base_addr);
+ release_mem_region(dev_list[i]->base_addr, MAC_REG_SIZE);
+ free_netdev(dev_list[i]);
+ dev_list[i]=0;
+ }
+ }
+ return 0;
+}
+
+static struct pld_hotswap_ops ether00_pldhs_ops={
+ .name = ETHER00_NAME,
+ .add_device = ether00_add_device,
+ .remove_devices = ether00_remove_devices,
+};
+
+
+static void __exit ether00_cleanup_module(void)
+{
+ int result;
+ result=ether00_remove_devices();
+ if(result)
+ printk(KERN_WARNING "ether00: failed to remove all devices\n");
+
+ pldhs_unregister_driver(ETHER00_NAME);
+}
+module_exit(ether00_cleanup_module);
+
+
+static int __init ether00_mod_init(void)
+{
+ printk("mod init\n");
+ return pldhs_register_driver(&ether00_pldhs_ops);
+
+}
+
+module_init(ether00_mod_init);
+
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
new file mode 100644
index 000000000000..36475eb2727f
--- /dev/null
+++ b/drivers/net/arm/ether1.c
@@ -0,0 +1,1110 @@
+/*
+ * linux/drivers/acorn/net/ether1.c
+ *
+ * Copyright (C) 1996-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Acorn ether1 driver (82586 chip) for Acorn machines
+ *
+ * We basically keep two queues in the cards memory - one for transmit
+ * and one for receive. Each has a head and a tail. The head is where
+ * we/the chip adds packets to be transmitted/received, and the tail
+ * is where the transmitter has got to/where the receiver will stop.
+ * Both of these queues are circular, and since the chip is running
+ * all the time, we have to be careful when we modify the pointers etc
+ * so that the buffer memory contents is valid all the time.
+ *
+ * Change log:
+ * 1.00 RMK Released
+ * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now.
+ * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready
+ * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt.
+ * Should prevent lockup.
+ * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong.
+ * TDR now only reports failure when chip reports non-zero
+ * TDR time-distance.
+ * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1
+ * 1.06 RMK 10/02/2000 Updated for 2.3.43
+ * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/ecard.h>
+
+#define __ETHER1_C
+#include "ether1.h"
+
+static unsigned int net_debug = NET_DEBUG;
+
+#define BUFFER_SIZE 0x10000
+#define TX_AREA_START 0x00100
+#define TX_AREA_END 0x05000
+#define RX_AREA_START 0x05000
+#define RX_AREA_END 0x0fc00
+
+static int ether1_open(struct net_device *dev);
+static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ether1_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int ether1_close(struct net_device *dev);
+static struct net_device_stats *ether1_getstats(struct net_device *dev);
+static void ether1_setmulticastlist(struct net_device *dev);
+static void ether1_timeout(struct net_device *dev);
+
+/* ------------------------------------------------------------------------- */
+
+static char version[] __initdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
+
+#define BUS_16 16
+#define BUS_8 8
+
+/* ------------------------------------------------------------------------- */
+
+#define DISABLEIRQS 1
+#define NORMALIRQS 0
+
+#define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs)
+#define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs)
+
+static inline unsigned short
+ether1_inw_p (struct net_device *dev, int addr, int svflgs)
+{
+ unsigned long flags;
+ unsigned short ret;
+
+ if (svflgs)
+ local_irq_save (flags);
+
+ writeb(addr >> 12, REG_PAGE);
+ ret = readw(ETHER1_RAM + ((addr & 4095) << 1));
+ if (svflgs)
+ local_irq_restore (flags);
+ return ret;
+}
+
+static inline void
+ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs)
+{
+ unsigned long flags;
+
+ if (svflgs)
+ local_irq_save (flags);
+
+ writeb(addr >> 12, REG_PAGE);
+ writew(val, ETHER1_RAM + ((addr & 4095) << 1));
+ if (svflgs)
+ local_irq_restore (flags);
+}
+
+/*
+ * Some inline assembler to allow fast transfers on to/off of the card.
+ * Since this driver depends on some features presented by the ARM
+ * specific architecture, and that you can't configure this driver
+ * without specifiing ARM mode, this is not a problem.
+ *
+ * This routine is essentially an optimised memcpy from the card's
+ * onboard RAM to kernel memory.
+ */
+static void
+ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
+{
+ unsigned int page, thislen, offset;
+ void __iomem *addr;
+
+ offset = start & 4095;
+ page = start >> 12;
+ addr = ETHER1_RAM + (offset << 1);
+
+ if (offset + length > 4096)
+ thislen = 4096 - offset;
+ else
+ thislen = length;
+
+ do {
+ int used;
+
+ writeb(page, REG_PAGE);
+ length -= thislen;
+
+ __asm__ __volatile__(
+ "subs %3, %3, #2\n\
+ bmi 2f\n\
+1: ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bpl 1b\n\
+2: adds %3, %3, #1\n\
+ ldreqb %0, [%1]\n\
+ streqb %0, [%2]"
+ : "=&r" (used), "=&r" (data)
+ : "r" (addr), "r" (thislen), "1" (data));
+
+ addr = ETHER1_RAM;
+
+ thislen = length;
+ if (thislen > 4096)
+ thislen = 4096;
+ page++;
+ } while (thislen);
+}
+
+static void
+ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
+{
+ unsigned int page, thislen, offset;
+ void __iomem *addr;
+
+ offset = start & 4095;
+ page = start >> 12;
+ addr = ETHER1_RAM + (offset << 1);
+
+ if (offset + length > 4096)
+ thislen = 4096 - offset;
+ else
+ thislen = length;
+
+ do {
+ int used;
+
+ writeb(page, REG_PAGE);
+ length -= thislen;
+
+ __asm__ __volatile__(
+ "subs %3, %3, #2\n\
+ bmi 2f\n\
+1: ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bpl 1b\n\
+2: adds %3, %3, #1\n\
+ ldreqb %0, [%2]\n\
+ streqb %0, [%1]"
+ : "=&r" (used), "=&r" (data)
+ : "r" (addr), "r" (thislen), "1" (data));
+
+ addr = ETHER1_RAM;
+
+ thislen = length;
+ if (thislen > 4096)
+ thislen = 4096;
+ page++;
+ } while (thislen);
+}
+
+static int __init
+ether1_ramtest(struct net_device *dev, unsigned char byte)
+{
+ unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
+ int i, ret = BUFFER_SIZE;
+ int max_errors = 15;
+ int bad = -1;
+ int bad_start = 0;
+
+ if (!buffer)
+ return 1;
+
+ memset (buffer, byte, BUFFER_SIZE);
+ ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE);
+ memset (buffer, byte ^ 0xff, BUFFER_SIZE);
+ ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE);
+
+ for (i = 0; i < BUFFER_SIZE; i++) {
+ if (buffer[i] != byte) {
+ if (max_errors >= 0 && bad != buffer[i]) {
+ if (bad != -1)
+ printk ("\n");
+ printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X",
+ dev->name, buffer[i], byte, i);
+ ret = -ENODEV;
+ max_errors --;
+ bad = buffer[i];
+ bad_start = i;
+ }
+ } else {
+ if (bad != -1) {
+ if (bad_start == i - 1)
+ printk ("\n");
+ else
+ printk (" - 0x%04X\n", i - 1);
+ bad = -1;
+ }
+ }
+ }
+
+ if (bad != -1)
+ printk (" - 0x%04X\n", BUFFER_SIZE);
+ kfree (buffer);
+
+ return ret;
+}
+
+static int
+ether1_reset (struct net_device *dev)
+{
+ writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
+ return BUS_16;
+}
+
+static int __init
+ether1_init_2(struct net_device *dev)
+{
+ int i;
+ dev->mem_start = 0;
+
+ i = ether1_ramtest (dev, 0x5a);
+
+ if (i > 0)
+ i = ether1_ramtest (dev, 0x1e);
+
+ if (i <= 0)
+ return -ENODEV;
+
+ dev->mem_end = i;
+ return 0;
+}
+
+/*
+ * These are the structures that are loaded into the ether RAM card to
+ * initialise the 82586
+ */
+
+/* at 0x0100 */
+#define NOP_ADDR (TX_AREA_START)
+#define NOP_SIZE (0x06)
+static nop_t init_nop = {
+ 0,
+ CMD_NOP,
+ NOP_ADDR
+};
+
+/* at 0x003a */
+#define TDR_ADDR (0x003a)
+#define TDR_SIZE (0x08)
+static tdr_t init_tdr = {
+ 0,
+ CMD_TDR | CMD_INTR,
+ NOP_ADDR,
+ 0
+};
+
+/* at 0x002e */
+#define MC_ADDR (0x002e)
+#define MC_SIZE (0x0c)
+static mc_t init_mc = {
+ 0,
+ CMD_SETMULTICAST,
+ TDR_ADDR,
+ 0,
+ { { 0, } }
+};
+
+/* at 0x0022 */
+#define SA_ADDR (0x0022)
+#define SA_SIZE (0x0c)
+static sa_t init_sa = {
+ 0,
+ CMD_SETADDRESS,
+ MC_ADDR,
+ { 0, }
+};
+
+/* at 0x0010 */
+#define CFG_ADDR (0x0010)
+#define CFG_SIZE (0x12)
+static cfg_t init_cfg = {
+ 0,
+ CMD_CONFIG,
+ SA_ADDR,
+ 8,
+ 8,
+ CFG8_SRDY,
+ CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6),
+ 0,
+ 0x60,
+ 0,
+ CFG13_RETRY(15) | CFG13_SLOTH(2),
+ 0,
+};
+
+/* at 0x0000 */
+#define SCB_ADDR (0x0000)
+#define SCB_SIZE (0x10)
+static scb_t init_scb = {
+ 0,
+ SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX,
+ CFG_ADDR,
+ RX_AREA_START,
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+/* at 0xffee */
+#define ISCP_ADDR (0xffee)
+#define ISCP_SIZE (0x08)
+static iscp_t init_iscp = {
+ 1,
+ SCB_ADDR,
+ 0x0000,
+ 0x0000
+};
+
+/* at 0xfff6 */
+#define SCP_ADDR (0xfff6)
+#define SCP_SIZE (0x0a)
+static scp_t init_scp = {
+ SCP_SY_16BBUS,
+ { 0, 0 },
+ ISCP_ADDR,
+ 0
+};
+
+#define RFD_SIZE (0x16)
+static rfd_t init_rfd = {
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, },
+ { 0, },
+ 0
+};
+
+#define RBD_SIZE (0x0a)
+static rbd_t init_rbd = {
+ 0,
+ 0,
+ 0,
+ 0,
+ ETH_FRAME_LEN + 8
+};
+
+#define TX_SIZE (0x08)
+#define TBD_SIZE (0x08)
+
+static int
+ether1_init_for_open (struct net_device *dev)
+{
+ int i, status, addr, next, next2;
+ int failures = 0;
+ unsigned long timeout;
+
+ writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
+
+ for (i = 0; i < 6; i++)
+ init_sa.sa_addr[i] = dev->dev_addr[i];
+
+ /* load data structures into ether1 RAM */
+ ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE);
+ ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE);
+ ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE);
+ ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE);
+ ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE);
+ ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE);
+ ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE);
+ ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE);
+
+ if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) {
+ printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n",
+ dev->name);
+ return 1;
+ }
+
+ /*
+ * setup circularly linked list of { rfd, rbd, buffer }, with
+ * all rfds circularly linked, rbds circularly linked.
+ * First rfd is linked to scp, first rbd is linked to first
+ * rfd. Last rbd has a suspend command.
+ */
+ addr = RX_AREA_START;
+ do {
+ next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
+ next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
+
+ if (next2 >= RX_AREA_END) {
+ next = RX_AREA_START;
+ init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND;
+ priv(dev)->rx_tail = addr;
+ } else
+ init_rfd.rfd_command = 0;
+ if (addr == RX_AREA_START)
+ init_rfd.rfd_rbdoffset = addr + RFD_SIZE;
+ else
+ init_rfd.rfd_rbdoffset = 0;
+ init_rfd.rfd_link = next;
+ init_rbd.rbd_link = next + RFD_SIZE;
+ init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE;
+
+ ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE);
+ ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE);
+ addr = next;
+ } while (next2 < RX_AREA_END);
+
+ priv(dev)->tx_link = NOP_ADDR;
+ priv(dev)->tx_head = NOP_ADDR + NOP_SIZE;
+ priv(dev)->tx_tail = TDR_ADDR;
+ priv(dev)->rx_head = RX_AREA_START;
+
+ /* release reset & give 586 a prod */
+ priv(dev)->resetting = 1;
+ priv(dev)->initialising = 1;
+ writeb(CTRL_RST, REG_CONTROL);
+ writeb(0, REG_CONTROL);
+ writeb(CTRL_CA, REG_CONTROL);
+
+ /* 586 should now unset iscp.busy */
+ timeout = jiffies + HZ/2;
+ while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) {
+ if (time_after(jiffies, timeout)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name);
+ return 1;
+ }
+ }
+
+ /* check status of commands that we issued */
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ;
+ while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ } else {
+ status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS);
+ if (status & TDR_XCVRPROB)
+ printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name);
+ else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) {
+#ifdef FANCY
+ printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name,
+ status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10,
+ (status & TDR_TIME) % 10);
+#else
+ printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name,
+ status & TDR_SHORT ? "short" : "open", (status & TDR_TIME));
+#endif
+ }
+ }
+
+ if (failures)
+ ether1_reset (dev);
+ return failures ? 1 : 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static int
+ether1_txalloc (struct net_device *dev, int size)
+{
+ int start, tail;
+
+ size = (size + 1) & ~1;
+ tail = priv(dev)->tx_tail;
+
+ if (priv(dev)->tx_head + size > TX_AREA_END) {
+ if (tail > priv(dev)->tx_head)
+ return -1;
+ start = TX_AREA_START;
+ if (start + size > tail)
+ return -1;
+ priv(dev)->tx_head = start + size;
+ } else {
+ if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail)
+ return -1;
+ start = priv(dev)->tx_head;
+ priv(dev)->tx_head += size;
+ }
+
+ return start;
+}
+
+static int
+ether1_open (struct net_device *dev)
+{
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
+ return -EAGAIN;
+
+ memset (&priv(dev)->stats, 0, sizeof (struct net_device_stats));
+
+ if (ether1_init_for_open (dev)) {
+ free_irq (dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void
+ether1_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n",
+ dev->name);
+ printk(KERN_WARNING "%s: resetting device\n", dev->name);
+
+ ether1_reset (dev);
+
+ if (ether1_init_for_open (dev))
+ printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
+
+ priv(dev)->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static int
+ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
+{
+ int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
+ unsigned long flags;
+ tx_t tx;
+ tbd_t tbd;
+ nop_t nop;
+
+ if (priv(dev)->restart) {
+ printk(KERN_WARNING "%s: resetting device\n", dev->name);
+
+ ether1_reset(dev);
+
+ if (ether1_init_for_open(dev))
+ printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
+ else
+ priv(dev)->restart = 0;
+ }
+
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ goto out;
+ }
+
+ /*
+ * insert packet followed by a nop
+ */
+ txaddr = ether1_txalloc (dev, TX_SIZE);
+ tbdaddr = ether1_txalloc (dev, TBD_SIZE);
+ dataddr = ether1_txalloc (dev, skb->len);
+ nopaddr = ether1_txalloc (dev, NOP_SIZE);
+
+ tx.tx_status = 0;
+ tx.tx_command = CMD_TX | CMD_INTR;
+ tx.tx_link = nopaddr;
+ tx.tx_tbdoffset = tbdaddr;
+ tbd.tbd_opts = TBD_EOL | skb->len;
+ tbd.tbd_link = I82586_NULL;
+ tbd.tbd_bufl = dataddr;
+ tbd.tbd_bufh = 0;
+ nop.nop_status = 0;
+ nop.nop_command = CMD_NOP;
+ nop.nop_link = nopaddr;
+
+ local_irq_save(flags);
+ ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
+ ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
+ ether1_writebuffer (dev, skb->data, dataddr, skb->len);
+ ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
+ tmp = priv(dev)->tx_link;
+ priv(dev)->tx_link = nopaddr;
+
+ /* now reset the previous nop pointer */
+ ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);
+
+ local_irq_restore(flags);
+
+ /* handle transmit */
+ dev->trans_start = jiffies;
+
+ /* check to see if we have room for a full sized ether frame */
+ tmp = priv(dev)->tx_head;
+ tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
+ priv(dev)->tx_head = tmp;
+ dev_kfree_skb (skb);
+
+ if (tst == -1)
+ netif_stop_queue(dev);
+
+ out:
+ return 0;
+}
+
+static void
+ether1_xmit_done (struct net_device *dev)
+{
+ nop_t nop;
+ int caddr, tst;
+
+ caddr = priv(dev)->tx_tail;
+
+again:
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+
+ switch (nop.nop_command & CMD_MASK) {
+ case CMD_TDR:
+ /* special case */
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
+ != (unsigned short)I82586_NULL) {
+ ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t,
+ scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ }
+ priv(dev)->tx_tail = NOP_ADDR;
+ return;
+
+ case CMD_NOP:
+ if (nop.nop_link == caddr) {
+ if (priv(dev)->initialising == 0)
+ printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name);
+ else
+ priv(dev)->initialising = 0;
+ return;
+ }
+ if (caddr == nop.nop_link)
+ return;
+ caddr = nop.nop_link;
+ goto again;
+
+ case CMD_TX:
+ if (nop.nop_status & STAT_COMPLETE)
+ break;
+ printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name);
+ priv(dev)->restart = 1;
+ return;
+
+ default:
+ printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name,
+ nop.nop_command & CMD_MASK, caddr);
+ priv(dev)->restart = 1;
+ return;
+ }
+
+ while (nop.nop_status & STAT_COMPLETE) {
+ if (nop.nop_status & STAT_OK) {
+ priv(dev)->stats.tx_packets ++;
+ priv(dev)->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
+ } else {
+ priv(dev)->stats.tx_errors ++;
+
+ if (nop.nop_status & STAT_COLLAFTERTX)
+ priv(dev)->stats.collisions ++;
+ if (nop.nop_status & STAT_NOCARRIER)
+ priv(dev)->stats.tx_carrier_errors ++;
+ if (nop.nop_status & STAT_TXLOSTCTS)
+ printk (KERN_WARNING "%s: cts lost\n", dev->name);
+ if (nop.nop_status & STAT_TXSLOWDMA)
+ priv(dev)->stats.tx_fifo_errors ++;
+ if (nop.nop_status & STAT_COLLEXCESSIVE)
+ priv(dev)->stats.collisions += 16;
+ }
+
+ if (nop.nop_link == caddr) {
+ printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name);
+ break;
+ }
+
+ caddr = nop.nop_link;
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+ if ((nop.nop_command & CMD_MASK) != CMD_NOP) {
+ printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name);
+ break;
+ }
+
+ if (caddr == nop.nop_link)
+ break;
+
+ caddr = nop.nop_link;
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+ if ((nop.nop_command & CMD_MASK) != CMD_TX) {
+ printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name);
+ break;
+ }
+ }
+ priv(dev)->tx_tail = caddr;
+
+ caddr = priv(dev)->tx_head;
+ tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
+ priv(dev)->tx_head = caddr;
+ if (tst != -1)
+ netif_wake_queue(dev);
+}
+
+static void
+ether1_recv_done (struct net_device *dev)
+{
+ int status;
+ int nexttail, rbdaddr;
+ rbd_t rbd;
+
+ do {
+ status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS);
+ if ((status & RFD_COMPLETE) == 0)
+ break;
+
+ rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS);
+ ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE);
+
+ if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) {
+ int length = rbd.rbd_status & RBD_ACNT;
+ struct sk_buff *skb;
+
+ length = (length + 1) & ~1;
+ skb = dev_alloc_skb (length + 2);
+
+ if (skb) {
+ skb->dev = dev;
+ skb_reserve (skb, 2);
+
+ ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);
+
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ priv(dev)->stats.rx_packets ++;
+ } else
+ priv(dev)->stats.rx_dropped ++;
+ } else {
+ printk(KERN_WARNING "%s: %s\n", dev->name,
+ (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
+ priv(dev)->stats.rx_dropped ++;
+ }
+
+ nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
+ /* nexttail should be rx_head */
+ if (nexttail != priv(dev)->rx_head)
+ printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n",
+ dev->name, nexttail, priv(dev)->rx_head);
+ ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS);
+
+ priv(dev)->rx_tail = nexttail;
+ priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS);
+ } while (1);
+}
+
+static irqreturn_t
+ether1_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ int status;
+
+ status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS);
+
+ if (status) {
+ ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX),
+ SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA | CTRL_ACK, REG_CONTROL);
+ if (status & SCB_STCX) {
+ ether1_xmit_done (dev);
+ }
+ if (status & SCB_STCNA) {
+ if (priv(dev)->resetting == 0)
+ printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name);
+ else
+ priv(dev)->resetting += 1;
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
+ != (unsigned short)I82586_NULL) {
+ ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ }
+ if (priv(dev)->resetting == 2)
+ priv(dev)->resetting = 0;
+ }
+ if (status & SCB_STFR) {
+ ether1_recv_done (dev);
+ }
+ if (status & SCB_STRNR) {
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) {
+ printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
+ ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ priv(dev)->stats.rx_dropped ++; /* we suspended due to lack of buffer space */
+ } else
+ printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
+ printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset,
+ NORMALIRQS));
+ }
+ } else
+ writeb(CTRL_ACK, REG_CONTROL);
+
+ return IRQ_HANDLED;
+}
+
+static int
+ether1_close (struct net_device *dev)
+{
+ ether1_reset (dev);
+
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+static struct net_device_stats *
+ether1_getstats (struct net_device *dev)
+{
+ return &priv(dev)->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets.
+ * num_addrs == 0 Normal mode, clear multicast list.
+ * num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ * best-effort filtering.
+ */
+static void
+ether1_setmulticastlist (struct net_device *dev)
+{
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void __init ether1_banner(void)
+{
+ static unsigned int version_printed = 0;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static int __devinit
+ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct net_device *dev;
+ int i, ret = 0;
+
+ ether1_banner();
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ dev = alloc_etherdev(sizeof(struct ether1_priv));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &ec->dev);
+
+ dev->irq = ec->irq;
+ priv(dev)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCFAST),
+ ecard_resource_len(ec, ECARD_RES_IOCFAST));
+ if (!priv(dev)->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+
+ if (ether1_init_2(dev)) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ dev->open = ether1_open;
+ dev->stop = ether1_close;
+ dev->hard_start_xmit = ether1_sendpacket;
+ dev->get_stats = ether1_getstats;
+ dev->set_multicast_list = ether1_setmulticastlist;
+ dev->tx_timeout = ether1_timeout;
+ dev->watchdog_timeo = 5 * HZ / 100;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto free;
+
+ printk(KERN_INFO "%s: ether1 in slot %d, ",
+ dev->name, ec->slot_no);
+
+ for (i = 0; i < 6; i++)
+ printk ("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ ecard_set_drvdata(ec, dev);
+ return 0;
+
+ free:
+ if (priv(dev)->base)
+ iounmap(priv(dev)->base);
+ free_netdev(dev);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void __devexit ether1_remove(struct expansion_card *ec)
+{
+ struct net_device *dev = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
+ iounmap(priv(dev)->base);
+ free_netdev(dev);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id ether1_ids[] = {
+ { MANU_ACORN, PROD_ACORN_ETHER1 },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver ether1_driver = {
+ .probe = ether1_probe,
+ .remove = __devexit_p(ether1_remove),
+ .id_table = ether1_ids,
+ .drv = {
+ .name = "ether1",
+ },
+};
+
+static int __init ether1_init(void)
+{
+ return ecard_register_driver(&ether1_driver);
+}
+
+static void __exit ether1_exit(void)
+{
+ ecard_remove_driver(&ether1_driver);
+}
+
+module_init(ether1_init);
+module_exit(ether1_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/arm/ether1.h b/drivers/net/arm/ether1.h
new file mode 100644
index 000000000000..c8a4b2389d85
--- /dev/null
+++ b/drivers/net/arm/ether1.h
@@ -0,0 +1,281 @@
+/*
+ * linux/drivers/acorn/net/ether1.h
+ *
+ * Copyright (C) 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Network driver for Acorn Ether1 cards.
+ */
+
+#ifndef _LINUX_ether1_H
+#define _LINUX_ether1_H
+
+#ifdef __ETHER1_C
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define priv(dev) ((struct ether1_priv *)netdev_priv(dev))
+
+/* Page register */
+#define REG_PAGE (priv(dev)->base + 0x0000)
+
+/* Control register */
+#define REG_CONTROL (priv(dev)->base + 0x0004)
+#define CTRL_RST 0x01
+#define CTRL_LOOPBACK 0x02
+#define CTRL_CA 0x04
+#define CTRL_ACK 0x08
+
+#define ETHER1_RAM (priv(dev)->base + 0x2000)
+
+/* HW address */
+#define IDPROM_ADDRESS (priv(dev)->base + 0x0024)
+
+struct ether1_priv {
+ void __iomem *base;
+ struct net_device_stats stats;
+ unsigned int tx_link;
+ unsigned int tx_head;
+ volatile unsigned int tx_tail;
+ volatile unsigned int rx_head;
+ volatile unsigned int rx_tail;
+ unsigned char bus_type;
+ unsigned char resetting;
+ unsigned char initialising : 1;
+ unsigned char restart : 1;
+};
+
+#define I82586_NULL (-1)
+
+typedef struct { /* tdr */
+ unsigned short tdr_status;
+ unsigned short tdr_command;
+ unsigned short tdr_link;
+ unsigned short tdr_result;
+#define TDR_TIME (0x7ff)
+#define TDR_SHORT (1 << 12)
+#define TDR_OPEN (1 << 13)
+#define TDR_XCVRPROB (1 << 14)
+#define TDR_LNKOK (1 << 15)
+} tdr_t;
+
+typedef struct { /* transmit */
+ unsigned short tx_status;
+ unsigned short tx_command;
+ unsigned short tx_link;
+ unsigned short tx_tbdoffset;
+} tx_t;
+
+typedef struct { /* tbd */
+ unsigned short tbd_opts;
+#define TBD_CNT (0x3fff)
+#define TBD_EOL (1 << 15)
+ unsigned short tbd_link;
+ unsigned short tbd_bufl;
+ unsigned short tbd_bufh;
+} tbd_t;
+
+typedef struct { /* rfd */
+ unsigned short rfd_status;
+#define RFD_NOEOF (1 << 6)
+#define RFD_FRAMESHORT (1 << 7)
+#define RFD_DMAOVRN (1 << 8)
+#define RFD_NORESOURCES (1 << 9)
+#define RFD_ALIGNERROR (1 << 10)
+#define RFD_CRCERROR (1 << 11)
+#define RFD_OK (1 << 13)
+#define RFD_FDCONSUMED (1 << 14)
+#define RFD_COMPLETE (1 << 15)
+ unsigned short rfd_command;
+#define RFD_CMDSUSPEND (1 << 14)
+#define RFD_CMDEL (1 << 15)
+ unsigned short rfd_link;
+ unsigned short rfd_rbdoffset;
+ unsigned char rfd_dest[6];
+ unsigned char rfd_src[6];
+ unsigned short rfd_len;
+} rfd_t;
+
+typedef struct { /* rbd */
+ unsigned short rbd_status;
+#define RBD_ACNT (0x3fff)
+#define RBD_ACNTVALID (1 << 14)
+#define RBD_EOF (1 << 15)
+ unsigned short rbd_link;
+ unsigned short rbd_bufl;
+ unsigned short rbd_bufh;
+ unsigned short rbd_len;
+} rbd_t;
+
+typedef struct { /* nop */
+ unsigned short nop_status;
+ unsigned short nop_command;
+ unsigned short nop_link;
+} nop_t;
+
+typedef struct { /* set multicast */
+ unsigned short mc_status;
+ unsigned short mc_command;
+ unsigned short mc_link;
+ unsigned short mc_cnt;
+ unsigned char mc_addrs[1][6];
+} mc_t;
+
+typedef struct { /* set address */
+ unsigned short sa_status;
+ unsigned short sa_command;
+ unsigned short sa_link;
+ unsigned char sa_addr[6];
+} sa_t;
+
+typedef struct { /* config command */
+ unsigned short cfg_status;
+ unsigned short cfg_command;
+ unsigned short cfg_link;
+ unsigned char cfg_bytecnt; /* size foll data: 4 - 12 */
+ unsigned char cfg_fifolim; /* FIFO threshold */
+ unsigned char cfg_byte8;
+#define CFG8_SRDY (1 << 6)
+#define CFG8_SAVEBADF (1 << 7)
+ unsigned char cfg_byte9;
+#define CFG9_ADDRLEN(x) (x)
+#define CFG9_ADDRLENBUF (1 << 3)
+#define CFG9_PREAMB2 (0 << 4)
+#define CFG9_PREAMB4 (1 << 4)
+#define CFG9_PREAMB8 (2 << 4)
+#define CFG9_PREAMB16 (3 << 4)
+#define CFG9_ILOOPBACK (1 << 6)
+#define CFG9_ELOOPBACK (1 << 7)
+ unsigned char cfg_byte10;
+#define CFG10_LINPRI(x) (x)
+#define CFG10_ACR(x) (x << 4)
+#define CFG10_BOFMET (1 << 7)
+ unsigned char cfg_ifs;
+ unsigned char cfg_slotl;
+ unsigned char cfg_byte13;
+#define CFG13_SLOTH(x) (x)
+#define CFG13_RETRY(x) (x << 4)
+ unsigned char cfg_byte14;
+#define CFG14_PROMISC (1 << 0)
+#define CFG14_DISBRD (1 << 1)
+#define CFG14_MANCH (1 << 2)
+#define CFG14_TNCRS (1 << 3)
+#define CFG14_NOCRC (1 << 4)
+#define CFG14_CRC16 (1 << 5)
+#define CFG14_BTSTF (1 << 6)
+#define CFG14_FLGPAD (1 << 7)
+ unsigned char cfg_byte15;
+#define CFG15_CSTF(x) (x)
+#define CFG15_ICSS (1 << 3)
+#define CFG15_CDTF(x) (x << 4)
+#define CFG15_ICDS (1 << 7)
+ unsigned short cfg_minfrmlen;
+} cfg_t;
+
+typedef struct { /* scb */
+ unsigned short scb_status; /* status of 82586 */
+#define SCB_STRXMASK (7 << 4) /* Receive unit status */
+#define SCB_STRXIDLE (0 << 4) /* Idle */
+#define SCB_STRXSUSP (1 << 4) /* Suspended */
+#define SCB_STRXNRES (2 << 4) /* No resources */
+#define SCB_STRXRDY (4 << 4) /* Ready */
+#define SCB_STCUMASK (7 << 8) /* Command unit status */
+#define SCB_STCUIDLE (0 << 8) /* Idle */
+#define SCB_STCUSUSP (1 << 8) /* Suspended */
+#define SCB_STCUACTV (2 << 8) /* Active */
+#define SCB_STRNR (1 << 12) /* Receive unit not ready */
+#define SCB_STCNA (1 << 13) /* Command unit not ready */
+#define SCB_STFR (1 << 14) /* Frame received */
+#define SCB_STCX (1 << 15) /* Command completed */
+ unsigned short scb_command; /* Next command */
+#define SCB_CMDRXSTART (1 << 4) /* Start (at rfa_offset) */
+#define SCB_CMDRXRESUME (2 << 4) /* Resume reception */
+#define SCB_CMDRXSUSPEND (3 << 4) /* Suspend reception */
+#define SCB_CMDRXABORT (4 << 4) /* Abort reception */
+#define SCB_CMDCUCSTART (1 << 8) /* Start (at cbl_offset) */
+#define SCB_CMDCUCRESUME (2 << 8) /* Resume execution */
+#define SCB_CMDCUCSUSPEND (3 << 8) /* Suspend execution */
+#define SCB_CMDCUCABORT (4 << 8) /* Abort execution */
+#define SCB_CMDACKRNR (1 << 12) /* Ack RU not ready */
+#define SCB_CMDACKCNA (1 << 13) /* Ack CU not ready */
+#define SCB_CMDACKFR (1 << 14) /* Ack Frame received */
+#define SCB_CMDACKCX (1 << 15) /* Ack Command complete */
+ unsigned short scb_cbl_offset; /* Offset of first command unit */
+ unsigned short scb_rfa_offset; /* Offset of first receive frame area */
+ unsigned short scb_crc_errors; /* Properly aligned frame with CRC error*/
+ unsigned short scb_aln_errors; /* Misaligned frames */
+ unsigned short scb_rsc_errors; /* Frames lost due to no space */
+ unsigned short scb_ovn_errors; /* Frames lost due to slow bus */
+} scb_t;
+
+typedef struct { /* iscp */
+ unsigned short iscp_busy; /* set by CPU before CA */
+ unsigned short iscp_offset; /* offset of SCB */
+ unsigned short iscp_basel; /* base of SCB */
+ unsigned short iscp_baseh;
+} iscp_t;
+
+ /* this address must be 0xfff6 */
+typedef struct { /* scp */
+ unsigned short scp_sysbus; /* bus size */
+#define SCP_SY_16BBUS 0x00
+#define SCP_SY_8BBUS 0x01
+ unsigned short scp_junk[2]; /* junk */
+ unsigned short scp_iscpl; /* lower 16 bits of iscp */
+ unsigned short scp_iscph; /* upper 16 bits of iscp */
+} scp_t;
+
+/* commands */
+#define CMD_NOP 0
+#define CMD_SETADDRESS 1
+#define CMD_CONFIG 2
+#define CMD_SETMULTICAST 3
+#define CMD_TX 4
+#define CMD_TDR 5
+#define CMD_DUMP 6
+#define CMD_DIAGNOSE 7
+
+#define CMD_MASK 7
+
+#define CMD_INTR (1 << 13)
+#define CMD_SUSP (1 << 14)
+#define CMD_EOL (1 << 15)
+
+#define STAT_COLLISIONS (15)
+#define STAT_COLLEXCESSIVE (1 << 5)
+#define STAT_COLLAFTERTX (1 << 6)
+#define STAT_TXDEFERRED (1 << 7)
+#define STAT_TXSLOWDMA (1 << 8)
+#define STAT_TXLOSTCTS (1 << 9)
+#define STAT_NOCARRIER (1 << 10)
+#define STAT_FAIL (1 << 11)
+#define STAT_ABORTED (1 << 12)
+#define STAT_OK (1 << 13)
+#define STAT_BUSY (1 << 14)
+#define STAT_COMPLETE (1 << 15)
+#endif
+#endif
+
+/*
+ * Ether1 card definitions:
+ *
+ * FAST accesses:
+ * +0 Page register
+ * 16 pages
+ * +4 Control
+ * '1' = reset
+ * '2' = loopback
+ * '4' = CA
+ * '8' = int ack
+ *
+ * RAM at address + 0x2000
+ * Pod. Prod id = 3
+ * Words after ID block [base + 8 words]
+ * +0 pcb issue (0x0c and 0xf3 invalid)
+ * +1 - +6 eth hw address
+ */
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
new file mode 100644
index 000000000000..1cc53abc3a39
--- /dev/null
+++ b/drivers/net/arm/ether3.c
@@ -0,0 +1,936 @@
+/*
+ * linux/drivers/acorn/net/ether3.c
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card
+ * for Acorn machines
+ *
+ * By Russell King, with some suggestions from borris@ant.co.uk
+ *
+ * Changelog:
+ * 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet
+ * address up to the higher levels - they're
+ * silently ignored. I/F can now be put into
+ * multicast mode. Receiver routine optimised.
+ * 1.05 RMK 30/02/1996 Now claims interrupt at open when part of
+ * the kernel rather than when a module.
+ * 1.06 RMK 02/03/1996 Various code cleanups
+ * 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit
+ * routines.
+ * 1.08 RMK 14/10/1996 Fixed problem with too many packets,
+ * prevented the kernel message about dropped
+ * packets appearing too many times a second.
+ * Now does not disable all IRQs, only the IRQ
+ * used by this card.
+ * 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low,
+ * but we still service the TX queue if we get a
+ * RX interrupt.
+ * 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004.
+ * 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A.
+ * 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1.
+ * RMK 27/06/1998 Changed asm/delay.h to linux/delay.h.
+ * 1.13 RMK 29/06/1998 Fixed problem with transmission of packets.
+ * Chip seems to have a bug in, whereby if the
+ * packet starts two bytes from the end of the
+ * buffer, it corrupts the receiver chain, and
+ * never updates the transmit status correctly.
+ * 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing.
+ * 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy
+ * hardware.
+ * 1.16 RMK 10/02/2000 Updated for 2.3.43
+ * 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/ecard.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
+
+#include "ether3.h"
+
+static unsigned int net_debug = NET_DEBUG;
+
+static void ether3_setmulticastlist(struct net_device *dev);
+static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
+static void ether3_tx(struct net_device *dev);
+static int ether3_open (struct net_device *dev);
+static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ether3_interrupt (int irq, void *dev_id, struct pt_regs *regs);
+static int ether3_close (struct net_device *dev);
+static struct net_device_stats *ether3_getstats (struct net_device *dev);
+static void ether3_setmulticastlist (struct net_device *dev);
+static void ether3_timeout(struct net_device *dev);
+
+#define BUS_16 2
+#define BUS_8 1
+#define BUS_UNKNOWN 0
+
+/* --------------------------------------------------------------------------- */
+
+typedef enum {
+ buffer_write,
+ buffer_read
+} buffer_rw_t;
+
+/*
+ * ether3 read/write. Slow things down a bit...
+ * The SEEQ8005 doesn't like us writing to its registers
+ * too quickly.
+ */
+static inline void ether3_outb(int v, const void __iomem *r)
+{
+ writeb(v, r);
+ udelay(1);
+}
+
+static inline void ether3_outw(int v, const void __iomem *r)
+{
+ writew(v, r);
+ udelay(1);
+}
+#define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; })
+#define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; })
+
+static int
+ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
+{
+ int timeout = 1000;
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+ ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
+
+ while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) {
+ if (!timeout--) {
+ printk("%s: setbuffer broken\n", dev->name);
+ priv(dev)->broken = 1;
+ return 1;
+ }
+ udelay(1);
+ }
+
+ if (read == buffer_read) {
+ ether3_outw(start, REG_DMAADDR);
+ ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND);
+ } else {
+ ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
+ ether3_outw(start, REG_DMAADDR);
+ }
+ return 0;
+}
+
+/*
+ * write data to the buffer memory
+ */
+#define ether3_writebuffer(dev,data,length) \
+ writesw(REG_BUFWIN, (data), (length) >> 1)
+
+#define ether3_writeword(dev,data) \
+ writew((data), REG_BUFWIN)
+
+#define ether3_writelong(dev,data) { \
+ void __iomem *reg_bufwin = REG_BUFWIN; \
+ writew((data), reg_bufwin); \
+ writew((data) >> 16, reg_bufwin); \
+}
+
+/*
+ * read data from the buffer memory
+ */
+#define ether3_readbuffer(dev,data,length) \
+ readsw(REG_BUFWIN, (data), (length) >> 1)
+
+#define ether3_readword(dev) \
+ readw(REG_BUFWIN)
+
+#define ether3_readlong(dev) \
+ readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16)
+
+/*
+ * Switch LED off...
+ */
+static void ether3_ledoff(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
+}
+
+/*
+ * switch LED on...
+ */
+static inline void ether3_ledon(struct net_device *dev)
+{
+ del_timer(&priv(dev)->timer);
+ priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
+ priv(dev)->timer.data = (unsigned long)dev;
+ priv(dev)->timer.function = ether3_ledoff;
+ add_timer(&priv(dev)->timer);
+ if (priv(dev)->regs.config2 & CFG2_CTRLO)
+ ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2);
+}
+
+/*
+ * Read the ethernet address string from the on board rom.
+ * This is an ascii string!!!
+ */
+static int __init
+ether3_addr(char *addr, struct expansion_card *ec)
+{
+ struct in_chunk_dir cd;
+ char *s;
+
+ if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) {
+ int i;
+ for (i = 0; i<6; i++) {
+ addr[i] = simple_strtoul(s + 1, &s, 0x10);
+ if (*s != (i==5?')' : ':' ))
+ break;
+ }
+ if (i == 6)
+ return 0;
+ }
+ /* I wonder if we should even let the user continue in this case
+ * - no, it would be better to disable the device
+ */
+ printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n");
+ return -ENODEV;
+}
+
+/* --------------------------------------------------------------------------- */
+
+static int __init
+ether3_ramtest(struct net_device *dev, unsigned char byte)
+{
+ unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
+ int i,ret = 0;
+ int max_errors = 4;
+ int bad = -1;
+
+ if (!buffer)
+ return 1;
+
+ memset(buffer, byte, RX_END);
+ ether3_setbuffer(dev, buffer_write, 0);
+ ether3_writebuffer(dev, buffer, TX_END);
+ ether3_setbuffer(dev, buffer_write, RX_START);
+ ether3_writebuffer(dev, buffer + RX_START, RX_LEN);
+ memset(buffer, byte ^ 0xff, RX_END);
+ ether3_setbuffer(dev, buffer_read, 0);
+ ether3_readbuffer(dev, buffer, TX_END);
+ ether3_setbuffer(dev, buffer_read, RX_START);
+ ether3_readbuffer(dev, buffer + RX_START, RX_LEN);
+
+ for (i = 0; i < RX_END; i++) {
+ if (buffer[i] != byte) {
+ if (max_errors > 0 && bad != buffer[i]) {
+ printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X",
+ dev->name, buffer[i], byte, i);
+ ret = 2;
+ max_errors--;
+ bad = i;
+ }
+ } else {
+ if (bad != -1) {
+ if (bad != i - 1)
+ printk(" - 0x%04X\n", i - 1);
+ printk("\n");
+ bad = -1;
+ }
+ }
+ }
+ if (bad != -1)
+ printk(" - 0xffff\n");
+ kfree(buffer);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------------------------- */
+
+static int __init ether3_init_2(struct net_device *dev)
+{
+ int i;
+
+ priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8;
+ priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC;
+ priv(dev)->regs.command = 0;
+
+ /*
+ * Set up our hardware address
+ */
+ ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
+ for (i = 0; i < 6; i++)
+ ether3_outb(dev->dev_addr[i], REG_BUFWIN);
+
+ if (dev->flags & IFF_PROMISC)
+ priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
+ else if (dev->flags & IFF_MULTICAST)
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
+ else
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
+
+ /*
+ * There is a problem with the NQ8005 in that it occasionally loses the
+ * last two bytes. To get round this problem, we receive the CRC as
+ * well. That way, if we do lose the last two, then it doesn't matter.
+ */
+ ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
+ ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
+ ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
+ ether3_outw(0, REG_TRANSMITPTR);
+ ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
+ ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+ ether3_outw(priv(dev)->regs.command, REG_COMMAND);
+
+ i = ether3_ramtest(dev, 0x5A);
+ if(i)
+ return i;
+ i = ether3_ramtest(dev, 0x1E);
+ if(i)
+ return i;
+
+ ether3_setbuffer(dev, buffer_write, 0);
+ ether3_writelong(dev, 0);
+ return 0;
+}
+
+static void
+ether3_init_for_open(struct net_device *dev)
+{
+ int i;
+
+ memset(&priv(dev)->stats, 0, sizeof(struct net_device_stats));
+
+ /* Reset the chip */
+ ether3_outw(CFG2_RESET, REG_CONFIG2);
+ udelay(4);
+
+ priv(dev)->regs.command = 0;
+ ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
+ while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
+ barrier();
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
+ for (i = 0; i < 6; i++)
+ ether3_outb(dev->dev_addr[i], REG_BUFWIN);
+
+ priv(dev)->tx_head = 0;
+ priv(dev)->tx_tail = 0;
+ priv(dev)->regs.config2 |= CFG2_CTRLO;
+ priv(dev)->rx_head = RX_START;
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
+ ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
+ ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
+ ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
+ ether3_outw(0, REG_TRANSMITPTR);
+ ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+
+ ether3_setbuffer(dev, buffer_write, 0);
+ ether3_writelong(dev, 0);
+
+ priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX;
+ ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
+}
+
+static inline int
+ether3_probe_bus_8(struct net_device *dev, int val)
+{
+ int write_low, write_high, read_low, read_high;
+
+ write_low = val & 255;
+ write_high = val >> 8;
+
+ printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low);
+
+ ether3_outb(write_low, REG_RECVPTR);
+ ether3_outb(write_high, REG_RECVPTR + 4);
+
+ read_low = ether3_inb(REG_RECVPTR);
+ read_high = ether3_inb(REG_RECVPTR + 4);
+
+ printk(", read8 [%02X:%02X]\n", read_high, read_low);
+
+ return read_low == write_low && read_high == write_high;
+}
+
+static inline int
+ether3_probe_bus_16(struct net_device *dev, int val)
+{
+ int read_val;
+
+ ether3_outw(val, REG_RECVPTR);
+ read_val = ether3_inw(REG_RECVPTR);
+
+ printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val);
+
+ return read_val == val;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int
+ether3_open(struct net_device *dev)
+{
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
+ return -EAGAIN;
+
+ ether3_init_for_open(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to ether3_open().
+ */
+static int
+ether3_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+
+ disable_irq(dev->irq);
+
+ ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
+ priv(dev)->regs.command = 0;
+ while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
+ barrier();
+ ether3_outb(0x80, REG_CONFIG2 + 4);
+ ether3_outw(0, REG_COMMAND);
+
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Get the current statistics. This may be called with the card open or
+ * closed.
+ */
+static struct net_device_stats *ether3_getstats(struct net_device *dev)
+{
+ return &priv(dev)->stats;
+}
+
+/*
+ * Set or clear promiscuous/multicast mode filter for this adaptor.
+ *
+ * We don't attempt any packet filtering. The card may have a SEEQ 8004
+ * in which does not have the other ethernet address registers present...
+ */
+static void ether3_setmulticastlist(struct net_device *dev)
+{
+ priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* promiscuous mode */
+ priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
+ } else
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+}
+
+static void ether3_timeout(struct net_device *dev)
+{
+ unsigned long flags;
+
+ del_timer(&priv(dev)->timer);
+
+ local_irq_save(flags);
+ printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name);
+ printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name,
+ ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2));
+ printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name,
+ ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR));
+ printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name,
+ priv(dev)->tx_head, priv(dev)->tx_tail);
+ ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail);
+ printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev));
+ local_irq_restore(flags);
+
+ priv(dev)->regs.config2 |= CFG2_CTRLO;
+ priv(dev)->stats.tx_errors += 1;
+ ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
+ priv(dev)->tx_head = priv(dev)->tx_tail = 0;
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Transmit a packet
+ */
+static int
+ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned int ptr, next_ptr;
+
+ if (priv(dev)->broken) {
+ dev_kfree_skb(skb);
+ priv(dev)->stats.tx_dropped ++;
+ netif_start_queue(dev);
+ return 0;
+ }
+
+ length = (length + 1) & ~1;
+ if (length != skb->len) {
+ skb = skb_padto(skb, length);
+ if (skb == NULL)
+ goto out;
+ }
+
+ next_ptr = (priv(dev)->tx_head + 1) & 15;
+
+ local_irq_save(flags);
+
+ if (priv(dev)->tx_tail == next_ptr) {
+ local_irq_restore(flags);
+ return 1; /* unable to queue */
+ }
+
+ dev->trans_start = jiffies;
+ ptr = 0x600 * priv(dev)->tx_head;
+ priv(dev)->tx_head = next_ptr;
+ next_ptr *= 0x600;
+
+#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS)
+
+ ether3_setbuffer(dev, buffer_write, next_ptr);
+ ether3_writelong(dev, 0);
+ ether3_setbuffer(dev, buffer_write, ptr);
+ ether3_writelong(dev, 0);
+ ether3_writebuffer(dev, skb->data, length);
+ ether3_writeword(dev, htons(next_ptr));
+ ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16);
+ ether3_setbuffer(dev, buffer_write, ptr);
+ ether3_writeword(dev, htons((ptr + length + 4)));
+ ether3_writeword(dev, TXHDR_FLAGS >> 16);
+ ether3_ledon(dev);
+
+ if (!(ether3_inw(REG_STATUS) & STAT_TXON)) {
+ ether3_outw(ptr, REG_TRANSMITPTR);
+ ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND);
+ }
+
+ next_ptr = (priv(dev)->tx_head + 1) & 15;
+ local_irq_restore(flags);
+
+ dev_kfree_skb(skb);
+
+ if (priv(dev)->tx_tail == next_ptr)
+ netif_stop_queue(dev);
+
+ out:
+ return 0;
+}
+
+static irqreturn_t
+ether3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ unsigned int status, handled = IRQ_NONE;
+
+#if NET_DEBUG > 1
+ if(net_debug & DEBUG_INT)
+ printk("eth3irq: %d ", irq);
+#endif
+
+ status = ether3_inw(REG_STATUS);
+
+ if (status & STAT_INTRX) {
+ ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND);
+ ether3_rx(dev, 12);
+ handled = IRQ_HANDLED;
+ }
+
+ if (status & STAT_INTTX) {
+ ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND);
+ ether3_tx(dev);
+ handled = IRQ_HANDLED;
+ }
+
+#if NET_DEBUG > 1
+ if(net_debug & DEBUG_INT)
+ printk("done\n");
+#endif
+ return handled;
+}
+
+/*
+ * If we have a good packet(s), get it/them out of the buffers.
+ */
+static int ether3_rx(struct net_device *dev, unsigned int maxcnt)
+{
+ unsigned int next_ptr = priv(dev)->rx_head, received = 0;
+
+ ether3_ledon(dev);
+
+ do {
+ unsigned int this_ptr, status;
+ unsigned char addrs[16];
+
+ /*
+ * read the first 16 bytes from the buffer.
+ * This contains the status bytes etc and ethernet addresses,
+ * and we also check the source ethernet address to see if
+ * it originated from us.
+ */
+ {
+ unsigned int temp_ptr;
+ ether3_setbuffer(dev, buffer_read, next_ptr);
+ temp_ptr = ether3_readword(dev);
+ status = ether3_readword(dev);
+ if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) !=
+ (RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr)
+ break;
+
+ this_ptr = next_ptr + 4;
+ next_ptr = ntohs(temp_ptr);
+ }
+ ether3_setbuffer(dev, buffer_read, this_ptr);
+ ether3_readbuffer(dev, addrs+2, 12);
+
+if (next_ptr < RX_START || next_ptr >= RX_END) {
+ int i;
+ printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head);
+ printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8);
+ for (i = 2; i < 14; i++)
+ printk("%02X ", addrs[i]);
+ printk("\n");
+ next_ptr = priv(dev)->rx_head;
+ break;
+}
+ /*
+ * ignore our own packets...
+ */
+ if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) &&
+ !(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) {
+ maxcnt ++; /* compensate for loopedback packet */
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ } else
+ if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) {
+ unsigned int length = next_ptr - this_ptr;
+ struct sk_buff *skb;
+
+ if (next_ptr <= this_ptr)
+ length += RX_END - RX_START;
+
+ skb = dev_alloc_skb(length + 2);
+ if (skb) {
+ unsigned char *buf;
+
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ buf = skb_put(skb, length);
+ ether3_readbuffer(dev, buf + 12, length - 12);
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ *(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2);
+ *(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4);
+ *(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8);
+ *(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ received ++;
+ } else
+ goto dropping;
+ } else {
+ struct net_device_stats *stats = &priv(dev)->stats;
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
+ if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
+ if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++;
+ if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++;
+ stats->rx_errors++;
+ }
+ }
+ while (-- maxcnt);
+
+done:
+ priv(dev)->stats.rx_packets += received;
+ priv(dev)->rx_head = next_ptr;
+ /*
+ * If rx went off line, then that means that the buffer may be full. We
+ * have dropped at least one packet.
+ */
+ if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
+ priv(dev)->stats.rx_dropped ++;
+ ether3_outw(next_ptr, REG_RECVPTR);
+ ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
+ }
+
+ return maxcnt;
+
+dropping:{
+ static unsigned long last_warned;
+
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ /*
+ * Don't print this message too many times...
+ */
+ if (time_after(jiffies, last_warned + 10 * HZ)) {
+ last_warned = jiffies;
+ printk("%s: memory squeeze, dropping packet.\n", dev->name);
+ }
+ priv(dev)->stats.rx_dropped ++;
+ goto done;
+ }
+}
+
+/*
+ * Update stats for the transmitted packet(s)
+ */
+static void ether3_tx(struct net_device *dev)
+{
+ unsigned int tx_tail = priv(dev)->tx_tail;
+ int max_work = 14;
+
+ do {
+ unsigned long status;
+
+ /*
+ * Read the packet header
+ */
+ ether3_setbuffer(dev, buffer_read, tx_tail * 0x600);
+ status = ether3_readlong(dev);
+
+ /*
+ * Check to see if this packet has been transmitted
+ */
+ if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) !=
+ (TXSTAT_DONE | TXHDR_TRANSMIT))
+ break;
+
+ /*
+ * Update errors
+ */
+ if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
+ priv(dev)->stats.tx_packets++;
+ else {
+ priv(dev)->stats.tx_errors ++;
+ if (status & TXSTAT_16COLLISIONS)
+ priv(dev)->stats.collisions += 16;
+ if (status & TXSTAT_BABBLED)
+ priv(dev)->stats.tx_fifo_errors ++;
+ }
+
+ tx_tail = (tx_tail + 1) & 15;
+ } while (--max_work);
+
+ if (priv(dev)->tx_tail != tx_tail) {
+ priv(dev)->tx_tail = tx_tail;
+ netif_wake_queue(dev);
+ }
+}
+
+static void __init ether3_banner(void)
+{
+ static unsigned version_printed = 0;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static int __devinit
+ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ const struct ether3_data *data = id->data;
+ struct net_device *dev;
+ int i, bus_type, ret;
+
+ ether3_banner();
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &ec->dev);
+
+ priv(dev)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(dev)->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ec->irqaddr = priv(dev)->base + data->base_offset;
+ ec->irqmask = 0xf0;
+
+ priv(dev)->seeq = priv(dev)->base + data->base_offset;
+ dev->irq = ec->irq;
+
+ ether3_addr(dev->dev_addr, ec);
+
+ init_timer(&priv(dev)->timer);
+
+ /* Reset card...
+ */
+ ether3_outb(0x80, REG_CONFIG2 + 4);
+ bus_type = BUS_UNKNOWN;
+ udelay(4);
+
+ /* Test using Receive Pointer (16-bit register) to find out
+ * how the ether3 is connected to the bus...
+ */
+ if (ether3_probe_bus_8(dev, 0x100) &&
+ ether3_probe_bus_8(dev, 0x201))
+ bus_type = BUS_8;
+
+ if (bus_type == BUS_UNKNOWN &&
+ ether3_probe_bus_16(dev, 0x101) &&
+ ether3_probe_bus_16(dev, 0x201))
+ bus_type = BUS_16;
+
+ switch (bus_type) {
+ case BUS_UNKNOWN:
+ printk(KERN_ERR "%s: unable to identify bus width\n", dev->name);
+ ret = -ENODEV;
+ goto free;
+
+ case BUS_8:
+ printk(KERN_ERR "%s: %s found, but is an unsupported "
+ "8-bit card\n", dev->name, data->name);
+ ret = -ENODEV;
+ goto free;
+
+ default:
+ break;
+ }
+
+ if (ether3_init_2(dev)) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ dev->open = ether3_open;
+ dev->stop = ether3_close;
+ dev->hard_start_xmit = ether3_sendpacket;
+ dev->get_stats = ether3_getstats;
+ dev->set_multicast_list = ether3_setmulticastlist;
+ dev->tx_timeout = ether3_timeout;
+ dev->watchdog_timeo = 5 * HZ / 100;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto free;
+
+ printk("%s: %s in slot %d, ", dev->name, data->name, ec->slot_no);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ ecard_set_drvdata(ec, dev);
+ return 0;
+
+ free:
+ if (priv(dev)->base)
+ iounmap(priv(dev)->base);
+ free_netdev(dev);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void __devexit ether3_remove(struct expansion_card *ec)
+{
+ struct net_device *dev = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
+ iounmap(priv(dev)->base);
+ free_netdev(dev);
+ ecard_release_resources(ec);
+}
+
+static struct ether3_data ether3 = {
+ .name = "ether3",
+ .base_offset = 0,
+};
+
+static struct ether3_data etherb = {
+ .name = "etherb",
+ .base_offset = 0x800,
+};
+
+static const struct ecard_id ether3_ids[] = {
+ { MANU_ANT2, PROD_ANT_ETHER3, &ether3 },
+ { MANU_ANT, PROD_ANT_ETHER3, &ether3 },
+ { MANU_ANT, PROD_ANT_ETHERB, &etherb },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver ether3_driver = {
+ .probe = ether3_probe,
+ .remove = __devexit_p(ether3_remove),
+ .id_table = ether3_ids,
+ .drv = {
+ .name = "ether3",
+ },
+};
+
+static int __init ether3_init(void)
+{
+ return ecard_register_driver(&ether3_driver);
+}
+
+static void __exit ether3_exit(void)
+{
+ ecard_remove_driver(&ether3_driver);
+}
+
+module_init(ether3_init);
+module_exit(ether3_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/arm/ether3.h b/drivers/net/arm/ether3.h
new file mode 100644
index 000000000000..1921a3a07da7
--- /dev/null
+++ b/drivers/net/arm/ether3.h
@@ -0,0 +1,177 @@
+/*
+ * linux/drivers/acorn/net/ether3.h
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * network driver for Acorn/ANT Ether3 cards
+ */
+
+#ifndef _LINUX_ether3_H
+#define _LINUX_ether3_H
+
+/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
+#define DEBUG_TX 2
+#define DEBUG_RX 4
+#define DEBUG_INT 8
+#define DEBUG_IC 16
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define priv(dev) ((struct dev_priv *)netdev_priv(dev))
+
+/* Command register definitions & bits */
+#define REG_COMMAND (priv(dev)->seeq + 0x0000)
+#define CMD_ENINTDMA 0x0001
+#define CMD_ENINTRX 0x0002
+#define CMD_ENINTTX 0x0004
+#define CMD_ENINTBUFWIN 0x0008
+#define CMD_ACKINTDMA 0x0010
+#define CMD_ACKINTRX 0x0020
+#define CMD_ACKINTTX 0x0040
+#define CMD_ACKINTBUFWIN 0x0080
+#define CMD_DMAON 0x0100
+#define CMD_RXON 0x0200
+#define CMD_TXON 0x0400
+#define CMD_DMAOFF 0x0800
+#define CMD_RXOFF 0x1000
+#define CMD_TXOFF 0x2000
+#define CMD_FIFOREAD 0x4000
+#define CMD_FIFOWRITE 0x8000
+
+/* status register */
+#define REG_STATUS (priv(dev)->seeq + 0x0000)
+#define STAT_ENINTSTAT 0x0001
+#define STAT_ENINTRX 0x0002
+#define STAT_ENINTTX 0x0004
+#define STAT_ENINTBUFWIN 0x0008
+#define STAT_INTDMA 0x0010
+#define STAT_INTRX 0x0020
+#define STAT_INTTX 0x0040
+#define STAT_INTBUFWIN 0x0080
+#define STAT_DMAON 0x0100
+#define STAT_RXON 0x0200
+#define STAT_TXON 0x0400
+#define STAT_FIFOFULL 0x2000
+#define STAT_FIFOEMPTY 0x4000
+#define STAT_FIFODIR 0x8000
+
+/* configuration register 1 */
+#define REG_CONFIG1 (priv(dev)->seeq + 0x0040)
+#define CFG1_BUFSELSTAT0 0x0000
+#define CFG1_BUFSELSTAT1 0x0001
+#define CFG1_BUFSELSTAT2 0x0002
+#define CFG1_BUFSELSTAT3 0x0003
+#define CFG1_BUFSELSTAT4 0x0004
+#define CFG1_BUFSELSTAT5 0x0005
+#define CFG1_ADDRPROM 0x0006
+#define CFG1_TRANSEND 0x0007
+#define CFG1_LOCBUFMEM 0x0008
+#define CFG1_INTVECTOR 0x0009
+#define CFG1_RECVSPECONLY 0x0000
+#define CFG1_RECVSPECBROAD 0x4000
+#define CFG1_RECVSPECBRMULTI 0x8000
+#define CFG1_RECVPROMISC 0xC000
+
+/* The following aren't in 8004 */
+#define CFG1_DMABURSTCONT 0x0000
+#define CFG1_DMABURST800NS 0x0010
+#define CFG1_DMABURST1600NS 0x0020
+#define CFG1_DMABURST3200NS 0x0030
+#define CFG1_DMABURST1 0x0000
+#define CFG1_DMABURST4 0x0040
+#define CFG1_DMABURST8 0x0080
+#define CFG1_DMABURST16 0x00C0
+#define CFG1_RECVCOMPSTAT0 0x0100
+#define CFG1_RECVCOMPSTAT1 0x0200
+#define CFG1_RECVCOMPSTAT2 0x0400
+#define CFG1_RECVCOMPSTAT3 0x0800
+#define CFG1_RECVCOMPSTAT4 0x1000
+#define CFG1_RECVCOMPSTAT5 0x2000
+
+/* configuration register 2 */
+#define REG_CONFIG2 (priv(dev)->seeq + 0x0080)
+#define CFG2_BYTESWAP 0x0001
+#define CFG2_ERRENCRC 0x0008
+#define CFG2_ERRENDRIBBLE 0x0010
+#define CFG2_ERRSHORTFRAME 0x0020
+#define CFG2_SLOTSELECT 0x0040
+#define CFG2_PREAMSELECT 0x0080
+#define CFG2_ADDRLENGTH 0x0100
+#define CFG2_RECVCRC 0x0200
+#define CFG2_XMITNOCRC 0x0400
+#define CFG2_LOOPBACK 0x0800
+#define CFG2_CTRLO 0x1000
+#define CFG2_RESET 0x8000
+
+#define REG_RECVEND (priv(dev)->seeq + 0x00c0)
+
+#define REG_BUFWIN (priv(dev)->seeq + 0x0100)
+
+#define REG_RECVPTR (priv(dev)->seeq + 0x0140)
+
+#define REG_TRANSMITPTR (priv(dev)->seeq + 0x0180)
+
+#define REG_DMAADDR (priv(dev)->seeq + 0x01c0)
+
+/*
+ * Cards transmit/receive headers
+ */
+#define TX_NEXT (0xffff)
+#define TXHDR_ENBABBLEINT (1 << 16)
+#define TXHDR_ENCOLLISIONINT (1 << 17)
+#define TXHDR_EN16COLLISION (1 << 18)
+#define TXHDR_ENSUCCESS (1 << 19)
+#define TXHDR_DATAFOLLOWS (1 << 21)
+#define TXHDR_CHAINCONTINUE (1 << 22)
+#define TXHDR_TRANSMIT (1 << 23)
+#define TXSTAT_BABBLED (1 << 24)
+#define TXSTAT_COLLISION (1 << 25)
+#define TXSTAT_16COLLISIONS (1 << 26)
+#define TXSTAT_DONE (1 << 31)
+
+#define RX_NEXT (0xffff)
+#define RXHDR_CHAINCONTINUE (1 << 6)
+#define RXHDR_RECEIVE (1 << 7)
+#define RXSTAT_OVERSIZE (1 << 8)
+#define RXSTAT_CRCERROR (1 << 9)
+#define RXSTAT_DRIBBLEERROR (1 << 10)
+#define RXSTAT_SHORTPACKET (1 << 11)
+#define RXSTAT_DONE (1 << 15)
+
+
+#define TX_START 0x0000
+#define TX_END 0x6000
+#define RX_START 0x6000
+#define RX_LEN 0xA000
+#define RX_END 0x10000
+/* must be a power of 2 and greater than MAX_TX_BUFFERED */
+#define MAX_TXED 16
+#define MAX_TX_BUFFERED 10
+
+struct dev_priv {
+ void __iomem *base;
+ void __iomem *seeq;
+ struct {
+ unsigned int command;
+ unsigned int config1;
+ unsigned int config2;
+ } regs;
+ unsigned char tx_head; /* buffer nr to insert next packet */
+ unsigned char tx_tail; /* buffer nr of transmitting packet */
+ unsigned int rx_head; /* address to fetch next packet from */
+ struct net_device_stats stats;
+ struct timer_list timer;
+ int broken; /* 0 = ok, 1 = something went wrong */
+};
+
+struct ether3_data {
+ const char name[8];
+ unsigned long base_offset;
+};
+
+#endif
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
new file mode 100644
index 000000000000..942a2819576c
--- /dev/null
+++ b/drivers/net/arm/etherh.c
@@ -0,0 +1,862 @@
+/*
+ * linux/drivers/acorn/net/etherh.c
+ *
+ * Copyright (C) 2000-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * NS8390 I-cubed EtherH and ANT EtherM specific driver
+ * Thanks to I-Cubed for information on their cards.
+ * EtherM conversion (C) 1999 Chris Kemp and Tim Watterton
+ * EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan)
+ * EtherM integration re-engineered by Russell King.
+ *
+ * Changelog:
+ * 08-12-1996 RMK 1.00 Created
+ * RMK 1.03 Added support for EtherLan500 cards
+ * 23-11-1997 RMK 1.04 Added media autodetection
+ * 16-04-1998 RMK 1.05 Improved media autodetection
+ * 10-02-2000 RMK 1.06 Updated for 2.3.43
+ * 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8
+ * 12-10-1999 CK/TEW EtherM driver first release
+ * 21-12-2000 TTC EtherH/EtherM integration
+ * 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver.
+ * 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/ecard.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "../8390.h"
+
+#define NET_DEBUG 0
+#define DEBUG_INIT 2
+
+#define DRV_NAME "etherh"
+#define DRV_VERSION "1.11"
+
+static unsigned int net_debug = NET_DEBUG;
+
+struct etherh_priv {
+ void __iomem *ioc_fast;
+ void __iomem *memc;
+ void __iomem *dma_base;
+ unsigned int id;
+ void __iomem *ctrl_port;
+ unsigned char ctrl;
+ u32 supported;
+};
+
+struct etherh_data {
+ unsigned long ns8390_offset;
+ unsigned long dataport_offset;
+ unsigned long ctrlport_offset;
+ int ctrl_ioc;
+ const char name[16];
+ u32 supported;
+ unsigned char tx_start_page;
+ unsigned char stop_page;
+};
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("EtherH/EtherM driver");
+MODULE_LICENSE("GPL");
+
+static char version[] __initdata =
+ "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
+
+#define ETHERH500_DATAPORT 0x800 /* MEMC */
+#define ETHERH500_NS8390 0x000 /* MEMC */
+#define ETHERH500_CTRLPORT 0x800 /* IOC */
+
+#define ETHERH600_DATAPORT 0x040 /* MEMC */
+#define ETHERH600_NS8390 0x800 /* MEMC */
+#define ETHERH600_CTRLPORT 0x200 /* MEMC */
+
+#define ETHERH_CP_IE 1
+#define ETHERH_CP_IF 2
+#define ETHERH_CP_HEARTBEAT 2
+
+#define ETHERH_TX_START_PAGE 1
+#define ETHERH_STOP_PAGE 127
+
+/*
+ * These came from CK/TEW
+ */
+#define ETHERM_DATAPORT 0x200 /* MEMC */
+#define ETHERM_NS8390 0x800 /* MEMC */
+#define ETHERM_CTRLPORT 0x23c /* MEMC */
+
+#define ETHERM_TX_START_PAGE 64
+#define ETHERM_STOP_PAGE 127
+
+/* ------------------------------------------------------------------------ */
+
+#define etherh_priv(dev) \
+ ((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device)))
+
+static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask)
+{
+ unsigned char ctrl = eh->ctrl | mask;
+ eh->ctrl = ctrl;
+ writeb(ctrl, eh->ctrl_port);
+}
+
+static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask)
+{
+ unsigned char ctrl = eh->ctrl & ~mask;
+ eh->ctrl = ctrl;
+ writeb(ctrl, eh->ctrl_port);
+}
+
+static inline unsigned int etherh_get_stat(struct etherh_priv *eh)
+{
+ return readb(eh->ctrl_port);
+}
+
+
+
+
+static void etherh_irq_enable(ecard_t *ec, int irqnr)
+{
+ struct etherh_priv *eh = ec->irq_data;
+
+ etherh_set_ctrl(eh, ETHERH_CP_IE);
+}
+
+static void etherh_irq_disable(ecard_t *ec, int irqnr)
+{
+ struct etherh_priv *eh = ec->irq_data;
+
+ etherh_clr_ctrl(eh, ETHERH_CP_IE);
+}
+
+static expansioncard_ops_t etherh_ops = {
+ .irqenable = etherh_irq_enable,
+ .irqdisable = etherh_irq_disable,
+};
+
+
+
+
+static void
+etherh_setif(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long flags;
+ void __iomem *addr;
+
+ local_irq_save(flags);
+
+ /* set the interface type */
+ switch (etherh_priv(dev)->id) {
+ case PROD_I3_ETHERLAN600:
+ case PROD_I3_ETHERLAN600A:
+ addr = (void *)dev->base_addr + EN0_RCNTHI;
+
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ writeb((readb(addr) & 0xf8) | 1, addr);
+ break;
+ case IF_PORT_10BASET:
+ writeb((readb(addr) & 0xf8), addr);
+ break;
+ }
+ break;
+
+ case PROD_I3_ETHERLAN500:
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF);
+ break;
+
+ case IF_PORT_10BASET:
+ etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF);
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+static int
+etherh_getifstat(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *addr;
+ int stat = 0;
+
+ switch (etherh_priv(dev)->id) {
+ case PROD_I3_ETHERLAN600:
+ case PROD_I3_ETHERLAN600A:
+ addr = (void *)dev->base_addr + EN0_RCNTHI;
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ stat = 1;
+ break;
+ case IF_PORT_10BASET:
+ stat = readb(addr) & 4;
+ break;
+ }
+ break;
+
+ case PROD_I3_ETHERLAN500:
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ stat = 1;
+ break;
+ case IF_PORT_10BASET:
+ stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT;
+ break;
+ }
+ break;
+
+ default:
+ stat = 0;
+ break;
+ }
+
+ return stat != 0;
+}
+
+/*
+ * Configure the interface. Note that we ignore the other
+ * parts of ifmap, since its mostly meaningless for this driver.
+ */
+static int etherh_set_config(struct net_device *dev, struct ifmap *map)
+{
+ switch (map->port) {
+ case IF_PORT_10BASE2:
+ case IF_PORT_10BASET:
+ /*
+ * If the user explicitly sets the interface
+ * media type, turn off automedia detection.
+ */
+ dev->flags &= ~IFF_AUTOMEDIA;
+ dev->if_port = map->port;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ etherh_setif(dev);
+
+ return 0;
+}
+
+/*
+ * Reset the 8390 (hard reset). Note that we can't actually do this.
+ */
+static void
+etherh_reset(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *addr = (void *)dev->base_addr;
+
+ writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
+
+ /*
+ * See if we need to change the interface type.
+ * Note that we use 'interface_num' as a flag
+ * to indicate that we need to change the media.
+ */
+ if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) {
+ ei_local->interface_num = 0;
+
+ if (dev->if_port == IF_PORT_10BASET)
+ dev->if_port = IF_PORT_10BASE2;
+ else
+ dev->if_port = IF_PORT_10BASET;
+
+ etherh_setif(dev);
+ }
+}
+
+/*
+ * Write a block of data out to the 8390
+ */
+static void
+etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long dma_start;
+ void __iomem *dma_base, *addr;
+
+ if (ei_local->dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n", dev->name,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ /*
+ * Make sure we have a round number of bytes if we're in word mode.
+ */
+ if (count & 1 && ei_local->word16)
+ count++;
+
+ ei_local->dmaing = 1;
+
+ addr = (void *)dev->base_addr;
+ dma_base = etherh_priv(dev)->dma_base;
+
+ count = (count + 1) & ~1;
+ writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
+
+ writeb (0x42, addr + EN0_RCNTLO);
+ writeb (0x00, addr + EN0_RCNTHI);
+ writeb (0x42, addr + EN0_RSARLO);
+ writeb (0x00, addr + EN0_RSARHI);
+ writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
+
+ udelay (1);
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ writeb (count, addr + EN0_RCNTLO);
+ writeb (count >> 8, addr + EN0_RCNTHI);
+ writeb (0, addr + EN0_RSARLO);
+ writeb (start_page, addr + EN0_RSARHI);
+ writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD);
+
+ if (ei_local->word16)
+ writesw (dma_base, buf, count >> 1);
+ else
+ writesb (dma_base, buf, count);
+
+ dma_start = jiffies;
+
+ while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
+ dev->name);
+ etherh_reset (dev);
+ NS8390_init (dev, 1);
+ break;
+ }
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ ei_local->dmaing = 0;
+}
+
+/*
+ * Read a block of data from the 8390
+ */
+static void
+etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char *buf;
+ void __iomem *dma_base, *addr;
+
+ if (ei_local->dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n", dev->name,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing = 1;
+
+ addr = (void *)dev->base_addr;
+ dma_base = etherh_priv(dev)->dma_base;
+
+ buf = skb->data;
+ writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
+ writeb (count, addr + EN0_RCNTLO);
+ writeb (count >> 8, addr + EN0_RCNTHI);
+ writeb (ring_offset, addr + EN0_RSARLO);
+ writeb (ring_offset >> 8, addr + EN0_RSARHI);
+ writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
+
+ if (ei_local->word16) {
+ readsw (dma_base, buf, count >> 1);
+ if (count & 1)
+ buf[count - 1] = readb (dma_base);
+ } else
+ readsb (dma_base, buf, count);
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ ei_local->dmaing = 0;
+}
+
+/*
+ * Read a header from the 8390
+ */
+static void
+etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *dma_base, *addr;
+
+ if (ei_local->dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
+ " DMAstat %d irqlock %d\n", dev->name,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing = 1;
+
+ addr = (void *)dev->base_addr;
+ dma_base = etherh_priv(dev)->dma_base;
+
+ writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
+ writeb (sizeof (*hdr), addr + EN0_RCNTLO);
+ writeb (0, addr + EN0_RCNTHI);
+ writeb (0, addr + EN0_RSARLO);
+ writeb (ring_page, addr + EN0_RSARHI);
+ writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
+
+ if (ei_local->word16)
+ readsw (dma_base, hdr, sizeof (*hdr) >> 1);
+ else
+ readsb (dma_base, hdr, sizeof (*hdr));
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ ei_local->dmaing = 0;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int
+etherh_open(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))
+ return -EAGAIN;
+
+ /*
+ * Make sure that we aren't going to change the
+ * media type on the next reset - we are about to
+ * do automedia manually now.
+ */
+ ei_local->interface_num = 0;
+
+ /*
+ * If we are doing automedia detection, do it now.
+ * This is more reliable than the 8390's detection.
+ */
+ if (dev->flags & IFF_AUTOMEDIA) {
+ dev->if_port = IF_PORT_10BASET;
+ etherh_setif(dev);
+ mdelay(1);
+ if (!etherh_getifstat(dev)) {
+ dev->if_port = IF_PORT_10BASE2;
+ etherh_setif(dev);
+ }
+ } else
+ etherh_setif(dev);
+
+ etherh_reset(dev);
+ ei_open(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to etherh_open().
+ */
+static int
+etherh_close(struct net_device *dev)
+{
+ ei_close (dev);
+ free_irq (dev->irq, dev);
+ return 0;
+}
+
+/*
+ * Initialisation
+ */
+
+static void __init etherh_banner(void)
+{
+ static int version_printed;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+/*
+ * Read the ethernet address string from the on board rom.
+ * This is an ascii string...
+ */
+static int __init etherh_addr(char *addr, struct expansion_card *ec)
+{
+ struct in_chunk_dir cd;
+ char *s;
+
+ if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
+ printk(KERN_ERR "%s: unable to read podule description string\n",
+ ec->dev.bus_id);
+ goto no_addr;
+ }
+
+ s = strchr(cd.d.string, '(');
+ if (s) {
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ addr[i] = simple_strtoul(s + 1, &s, 0x10);
+ if (*s != (i == 5? ')' : ':'))
+ break;
+ }
+
+ if (i == 6)
+ return 0;
+ }
+
+ printk(KERN_ERR "%s: unable to parse MAC address: %s\n",
+ ec->dev.bus_id, cd.d.string);
+
+ no_addr:
+ return -ENODEV;
+}
+
+/*
+ * Create an ethernet address from the system serial number.
+ */
+static int __init etherm_addr(char *addr)
+{
+ unsigned int serial;
+
+ if (system_serial_low == 0 && system_serial_high == 0)
+ return -ENODEV;
+
+ serial = system_serial_low | system_serial_high;
+
+ addr[0] = 0;
+ addr[1] = 0;
+ addr[2] = 0xa4;
+ addr[3] = 0x10 + (serial >> 24);
+ addr[4] = serial >> 16;
+ addr[5] = serial >> 8;
+ return 0;
+}
+
+static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev->class_dev.dev->bus_id,
+ sizeof(info->bus_info));
+}
+
+static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = etherh_priv(dev)->supported;
+ cmd->speed = SPEED_10;
+ cmd->duplex = DUPLEX_HALF;
+ cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
+ cmd->autoneg = dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ switch (cmd->autoneg) {
+ case AUTONEG_ENABLE:
+ dev->flags |= IFF_AUTOMEDIA;
+ break;
+
+ case AUTONEG_DISABLE:
+ switch (cmd->port) {
+ case PORT_TP:
+ dev->if_port = IF_PORT_10BASET;
+ break;
+
+ case PORT_BNC:
+ dev->if_port = IF_PORT_10BASE2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ dev->flags &= ~IFF_AUTOMEDIA;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ etherh_setif(dev);
+
+ return 0;
+}
+
+static struct ethtool_ops etherh_ethtool_ops = {
+ .get_settings = etherh_get_settings,
+ .set_settings = etherh_set_settings,
+ .get_drvinfo = etherh_get_drvinfo,
+};
+
+static u32 etherh_regoffsets[16];
+static u32 etherm_regoffsets[16];
+
+static int __init
+etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ const struct etherh_data *data = id->data;
+ struct ei_device *ei_local;
+ struct net_device *dev;
+ struct etherh_priv *eh;
+ int i, ret;
+
+ etherh_banner();
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ dev = __alloc_ei_netdev(sizeof(struct etherh_priv));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &ec->dev);
+
+ dev->open = etherh_open;
+ dev->stop = etherh_close;
+ dev->set_config = etherh_set_config;
+ dev->irq = ec->irq;
+ dev->ethtool_ops = &etherh_ethtool_ops;
+
+ if (data->supported & SUPPORTED_Autoneg)
+ dev->flags |= IFF_AUTOMEDIA;
+ if (data->supported & SUPPORTED_TP) {
+ dev->flags |= IFF_PORTSEL;
+ dev->if_port = IF_PORT_10BASET;
+ } else if (data->supported & SUPPORTED_BNC) {
+ dev->flags |= IFF_PORTSEL;
+ dev->if_port = IF_PORT_10BASE2;
+ } else
+ dev->if_port = IF_PORT_UNKNOWN;
+
+ eh = etherh_priv(dev);
+ eh->supported = data->supported;
+ eh->ctrl = 0;
+ eh->id = ec->cid.product;
+ eh->memc = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), PAGE_SIZE);
+ if (!eh->memc) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ eh->ctrl_port = eh->memc;
+ if (data->ctrl_ioc) {
+ eh->ioc_fast = ioremap(ecard_resource_start(ec, ECARD_RES_IOCFAST), PAGE_SIZE);
+ if (!eh->ioc_fast) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ eh->ctrl_port = eh->ioc_fast;
+ }
+
+ dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset;
+ eh->dma_base = eh->memc + data->dataport_offset;
+ eh->ctrl_port += data->ctrlport_offset;
+
+ /*
+ * IRQ and control port handling - only for non-NIC slot cards.
+ */
+ if (ec->slot_no != 8) {
+ ec->ops = &etherh_ops;
+ ec->irq_data = eh;
+ } else {
+ /*
+ * If we're in the NIC slot, make sure the IRQ is enabled
+ */
+ etherh_set_ctrl(eh, ETHERH_CP_IE);
+ }
+
+ ei_local = netdev_priv(dev);
+ spin_lock_init(&ei_local->page_lock);
+
+ if (ec->cid.product == PROD_ANT_ETHERM) {
+ etherm_addr(dev->dev_addr);
+ ei_local->reg_offset = etherm_regoffsets;
+ } else {
+ etherh_addr(dev->dev_addr, ec);
+ ei_local->reg_offset = etherh_regoffsets;
+ }
+
+ ei_local->name = dev->name;
+ ei_local->word16 = 1;
+ ei_local->tx_start_page = data->tx_start_page;
+ ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES;
+ ei_local->stop_page = data->stop_page;
+ ei_local->reset_8390 = etherh_reset;
+ ei_local->block_input = etherh_block_input;
+ ei_local->block_output = etherh_block_output;
+ ei_local->get_8390_hdr = etherh_get_header;
+ ei_local->interface_num = 0;
+
+ etherh_reset(dev);
+ NS8390_init(dev, 0);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto free;
+
+ printk(KERN_INFO "%s: %s in slot %d, ",
+ dev->name, data->name, ec->slot_no);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ ecard_set_drvdata(ec, dev);
+
+ return 0;
+
+ free:
+ if (eh->ioc_fast)
+ iounmap(eh->ioc_fast);
+ if (eh->memc)
+ iounmap(eh->memc);
+ free_netdev(dev);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void __devexit etherh_remove(struct expansion_card *ec)
+{
+ struct net_device *dev = ecard_get_drvdata(ec);
+ struct etherh_priv *eh = etherh_priv(dev);
+
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
+ ec->ops = NULL;
+
+ if (eh->ioc_fast)
+ iounmap(eh->ioc_fast);
+ iounmap(eh->memc);
+
+ free_netdev(dev);
+
+ ecard_release_resources(ec);
+}
+
+static struct etherh_data etherm_data = {
+ .ns8390_offset = ETHERM_NS8390,
+ .dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT,
+ .ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT,
+ .name = "ANT EtherM",
+ .supported = SUPPORTED_10baseT_Half,
+ .tx_start_page = ETHERM_TX_START_PAGE,
+ .stop_page = ETHERM_STOP_PAGE,
+};
+
+static struct etherh_data etherlan500_data = {
+ .ns8390_offset = ETHERH500_NS8390,
+ .dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT,
+ .ctrlport_offset = ETHERH500_CTRLPORT,
+ .ctrl_ioc = 1,
+ .name = "i3 EtherH 500",
+ .supported = SUPPORTED_10baseT_Half,
+ .tx_start_page = ETHERH_TX_START_PAGE,
+ .stop_page = ETHERH_STOP_PAGE,
+};
+
+static struct etherh_data etherlan600_data = {
+ .ns8390_offset = ETHERH600_NS8390,
+ .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
+ .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
+ .name = "i3 EtherH 600",
+ .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
+ .tx_start_page = ETHERH_TX_START_PAGE,
+ .stop_page = ETHERH_STOP_PAGE,
+};
+
+static struct etherh_data etherlan600a_data = {
+ .ns8390_offset = ETHERH600_NS8390,
+ .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
+ .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
+ .name = "i3 EtherH 600A",
+ .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
+ .tx_start_page = ETHERH_TX_START_PAGE,
+ .stop_page = ETHERH_STOP_PAGE,
+};
+
+static const struct ecard_id etherh_ids[] = {
+ { MANU_ANT, PROD_ANT_ETHERM, &etherm_data },
+ { MANU_I3, PROD_I3_ETHERLAN500, &etherlan500_data },
+ { MANU_I3, PROD_I3_ETHERLAN600, &etherlan600_data },
+ { MANU_I3, PROD_I3_ETHERLAN600A, &etherlan600a_data },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver etherh_driver = {
+ .probe = etherh_probe,
+ .remove = __devexit_p(etherh_remove),
+ .id_table = etherh_ids,
+ .drv = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init etherh_init(void)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ etherh_regoffsets[i] = i << 2;
+ etherm_regoffsets[i] = i << 5;
+ }
+
+ return ecard_register_driver(&etherh_driver);
+}
+
+static void __exit etherh_exit(void)
+{
+ ecard_remove_driver(&etherh_driver);
+}
+
+module_init(etherh_init);
+module_exit(etherh_exit);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
new file mode 100644
index 000000000000..b8ab2b6355eb
--- /dev/null
+++ b/drivers/net/at1700.c
@@ -0,0 +1,939 @@
+/* at1700.c: A network device driver for the Allied Telesis AT1700.
+
+ Written 1993-98 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is a device driver for the Allied Telesis AT1700, and
+ Fujitsu FMV-181/182/181A/182A/183/184/183A/184A, which are
+ straight-forward Fujitsu MB86965 implementations.
+
+ Modification for Fujitsu FMV-18X cards is done by Yutaka Tamiya
+ (tamy@flab.fujitsu.co.jp).
+
+ Sources:
+ The Fujitsu MB86965 datasheet.
+
+ After the initial version of this driver was written Gerry Sawkins of
+ ATI provided their EEPROM configuration code header file.
+ Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
+
+ MCA bus (AT1720) support by Rene Schmit <rene@bss.lu>
+
+ Bugs:
+ The MB86965 has a design flaw that makes all probes unreliable. Not
+ only is it difficult to detect, it also moves around in I/O space in
+ response to inb()s from other device probes!
+*/
+/*
+ 99/03/03 Allied Telesis RE1000 Plus support by T.Hagawa
+ 99/12/30 port to 2.3.35 by K.Takai
+*/
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mca-legacy.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+static char version[] __initdata =
+ "at1700.c:v1.15 4/7/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#define DRV_NAME "at1700"
+
+/* Tunable parameters. */
+
+/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */
+#define MC_FILTERBREAK 64
+
+/* These unusual address orders are used to verify the CONFIG register. */
+
+static int fmv18x_probe_list[] __initdata = {
+ 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
+};
+
+/*
+ * ISA
+ */
+
+static unsigned at1700_probe_list[] __initdata = {
+ 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
+};
+
+/*
+ * MCA
+ */
+#ifdef CONFIG_MCA_LEGACY
+static int at1700_ioaddr_pattern[] __initdata = {
+ 0x00, 0x04, 0x01, 0x05, 0x02, 0x06, 0x03, 0x07
+};
+
+static int at1700_mca_probe_list[] __initdata = {
+ 0x400, 0x1400, 0x2400, 0x3400, 0x4400, 0x5400, 0x6400, 0x7400, 0
+};
+
+static int at1700_irq_pattern[] __initdata = {
+ 0x00, 0x00, 0x00, 0x30, 0x70, 0xb0, 0x00, 0x00,
+ 0x00, 0xf0, 0x34, 0x74, 0xb4, 0x00, 0x00, 0xf4, 0x00
+};
+#endif
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ spinlock_t lock;
+ unsigned char mc_filter[8];
+ uint jumpered:1; /* Set iff the board has jumper config. */
+ uint tx_started:1; /* Packets are on the Tx queue. */
+ uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
+ uint rx_started:1; /* Packets are Rxing. */
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ char mca_slot; /* -1 means ISA */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define COL16CNTL 11 /* Controll Reg for 16 collisions */
+#define MODE13 13
+#define RX_CTRL 14
+/* Configuration registers only on the '865A/B chips. */
+#define EEPROM_Ctrl 16
+#define EEPROM_Data 17
+#define CARDSTATUS 16 /* FMV-18x Card Status */
+#define CARDSTATUS1 17 /* FMV-18x Card Status */
+#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */
+#define IOCONFIG1 19
+#define SAPROM 20 /* The station address PROM, if no EEPROM. */
+#define MODE24 24
+#define RESET 31 /* Write to reset some parts of the chip. */
+#define AT1700_IO_EXTENT 32
+#define PORT_OFFSET(o) (o)
+
+
+#define TX_TIMEOUT 10
+
+
+/* Index to functions, as function prototypes. */
+
+static int at1700_probe1(struct net_device *dev, int ioaddr);
+static int read_eeprom(long ioaddr, int location);
+static int net_open(struct net_device *dev);
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void net_tx_timeout (struct net_device *dev);
+
+
+#ifdef CONFIG_MCA_LEGACY
+struct at1720_mca_adapters_struct {
+ char* name;
+ int id;
+};
+/* rEnE : maybe there are others I don't know off... */
+
+static struct at1720_mca_adapters_struct at1720_mca_adapters[] __initdata = {
+ { "Allied Telesys AT1720AT", 0x6410 },
+ { "Allied Telesys AT1720BT", 0x6413 },
+ { "Allied Telesys AT1720T", 0x6416 },
+ { NULL, 0 },
+};
+#endif
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+
+static int io = 0x260;
+
+static int irq;
+
+static void cleanup_card(struct net_device *dev)
+{
+#ifdef CONFIG_MCA_LEGACY
+ struct net_local *lp = netdev_priv(dev);
+ if (lp->mca_slot >= 0)
+ mca_mark_as_unused(lp->mca_slot);
+#endif
+ free_irq(dev->irq, NULL);
+ release_region(dev->base_addr, AT1700_IO_EXTENT);
+}
+
+struct net_device * __init at1700_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ } else {
+ dev->base_addr = io;
+ dev->irq = irq;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = at1700_probe1(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = at1700_probe_list; *port; port++) {
+ if (at1700_probe1(dev, *port) == 0)
+ break;
+ dev->irq = irq;
+ }
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into an
+ EEPROM read. */
+
+static int __init at1700_probe1(struct net_device *dev, int ioaddr)
+{
+ char fmv_irqmap[4] = {3, 7, 10, 15};
+ char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
+ char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
+ unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
+ int slot, ret = -ENODEV;
+ struct net_local *lp = netdev_priv(dev);
+
+ if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe
+ for.
+ */
+#ifdef notdef
+ printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
+ ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
+ read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
+#endif
+
+#ifdef CONFIG_MCA_LEGACY
+ /* rEnE (rene@bss.lu): got this from 3c509 driver source , adapted for AT1720 */
+
+ /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, heavily
+ modified by Chris Beauregard (cpbeaure@csclub.uwaterloo.ca)
+ to support standard MCA probing. */
+
+ /* redone for multi-card detection by ZP Gu (zpg@castle.net) */
+ /* now works as a module */
+
+ if (MCA_bus) {
+ int j;
+ int l_i;
+ u_char pos3, pos4;
+
+ for (j = 0; at1720_mca_adapters[j].name != NULL; j ++) {
+ slot = 0;
+ while (slot != MCA_NOTFOUND) {
+
+ slot = mca_find_unused_adapter( at1720_mca_adapters[j].id, slot );
+ if (slot == MCA_NOTFOUND) break;
+
+ /* if we get this far, an adapter has been detected and is
+ enabled */
+
+ pos3 = mca_read_stored_pos( slot, 3 );
+ pos4 = mca_read_stored_pos( slot, 4 );
+
+ for (l_i = 0; l_i < 0x09; l_i++)
+ if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i])
+ break;
+ ioaddr = at1700_mca_probe_list[l_i];
+
+ for (irq = 0; irq < 0x10; irq++)
+ if (((((pos4>>4) & 0x0f) | (pos3 & 0xf0)) & 0xff) == at1700_irq_pattern[irq])
+ break;
+
+ /* probing for a card at a particular IO/IRQ */
+ if ((dev->irq && dev->irq != irq) ||
+ (dev->base_addr && dev->base_addr != ioaddr)) {
+ slot++; /* probing next slot */
+ continue;
+ }
+
+ dev->irq = irq;
+
+ /* claim the slot */
+ mca_set_adapter_name( slot, at1720_mca_adapters[j].name );
+ mca_mark_as_used(slot);
+
+ goto found;
+ }
+ }
+ /* if we get here, we didn't find an MCA adapter - try ISA */
+ }
+#endif
+ slot = -1;
+ /* We must check for the EEPROM-config boards first, else accessing
+ IOCONFIG0 will move the board! */
+ if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr
+ && read_eeprom(ioaddr, 4) == 0x0000
+ && (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
+ is_at1700 = 1;
+ else if (inb(ioaddr + SAPROM ) == 0x00
+ && inb(ioaddr + SAPROM + 1) == 0x00
+ && inb(ioaddr + SAPROM + 2) == 0x0e)
+ is_fmv18x = 1;
+ else {
+ goto err_out;
+ }
+
+#ifdef CONFIG_MCA_LEGACY
+found:
+#endif
+
+ /* Reset the internal state machines. */
+ outb(0, ioaddr + RESET);
+
+ if (is_at1700) {
+ irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04)
+ | (read_eeprom(ioaddr, 0)>>14)];
+ } else {
+ /* Check PnP mode for FMV-183/184/183A/184A. */
+ /* This PnP routine is very poor. IO and IRQ should be known. */
+ if (inb(ioaddr + CARDSTATUS1) & 0x20) {
+ irq = dev->irq;
+ for (i = 0; i < 8; i++) {
+ if (irq == fmv_irqmap_pnp[i])
+ break;
+ }
+ if (i == 8) {
+ goto err_mca;
+ }
+ } else {
+ if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr)
+ goto err_mca;
+ irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
+ }
+ }
+
+ printk("%s: %s found at %#3x, IRQ %d, address ", dev->name,
+ is_at1700 ? "AT1700" : "FMV-18X", ioaddr, irq);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ if (is_at1700) {
+ for(i = 0; i < 3; i++) {
+ unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
+ printk("%04x", eeprom_val);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
+ }
+ } else {
+ for(i = 0; i < 6; i++) {
+ unsigned char val = inb(ioaddr + SAPROM + i);
+ printk("%02x", val);
+ dev->dev_addr[i] = val;
+ }
+ }
+
+ /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
+ if (is_at1700) {
+ ushort setup_value = read_eeprom(ioaddr, 12);
+ dev->if_port = setup_value >> 8;
+ } else {
+ ushort setup_value = inb(ioaddr + CARDSTATUS);
+ switch (setup_value & 0x07) {
+ case 0x01: /* 10base5 */
+ case 0x02: /* 10base2 */
+ dev->if_port = 0x18; break;
+ case 0x04: /* 10baseT */
+ dev->if_port = 0x08; break;
+ default: /* auto-sense */
+ dev->if_port = 0x00; break;
+ }
+ }
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, two 4K Tx queues, and disabled Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Set the station address in bank zero. */
+ outb(0x00, ioaddr + CONFIG_1);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + PORT_OFFSET(8 + i));
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0x04, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + PORT_OFFSET(8 + i));
+
+
+ /* Switch to bank 2 */
+ /* Lock our I/O address, and set manual processing mode for 16 collisions. */
+ outb(0x08, ioaddr + CONFIG_1);
+ outb(dev->if_port, ioaddr + MODE13);
+ outb(0x00, ioaddr + COL16CNTL);
+
+ if (net_debug)
+ printk(version);
+
+ memset(lp, 0, sizeof(struct net_local));
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->tx_timeout = net_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ spin_lock_init(&lp->lock);
+
+ lp->jumpered = is_fmv18x;
+ lp->mca_slot = slot;
+ /* Snarf the interrupt vector now. */
+ ret = request_irq(irq, &net_interrupt, 0, DRV_NAME, dev);
+ if (ret) {
+ printk (" AT1700 at %#3x is unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+ goto err_mca;
+ }
+
+ return 0;
+
+err_mca:
+#ifdef CONFIG_MCA_LEGACY
+ if (slot >= 0)
+ mca_mark_as_unused(slot);
+#endif
+err_out:
+ release_region(ioaddr, AT1700_IO_EXTENT);
+ return ret;
+}
+
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
+#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
+#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
+#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+static int __init read_eeprom(long ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ long ee_addr = ioaddr + EEPROM_Ctrl;
+ long ee_daddr = ioaddr + EEPROM_Data;
+ int read_cmd = location | EE_READ_CMD;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_CS, ee_addr);
+ outb(dataval, ee_daddr);
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
+ }
+ outb(EE_DATA_WRITE, ee_daddr);
+ for (i = 16; i > 0; i--) {
+ outb(EE_CS, ee_addr);
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr);
+ retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(EE_CS, ee_addr);
+ outb(EE_SHIFT_CLK, ee_addr);
+ outb(0, ee_addr);
+ return retval;
+}
+
+
+
+static int net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, and two 4K Tx queues. */
+ outb(0x5a, ioaddr + CONFIG_0);
+
+ /* Powerup, switch to register bank 2, and enable the Rx and Tx. */
+ outb(0xe8, ioaddr + CONFIG_1);
+
+ lp->tx_started = 0;
+ lp->tx_queue_ready = 1;
+ lp->rx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on hardware Tx and Rx interrupts. */
+ outb(0x82, ioaddr + TX_INTR);
+ outb(0x81, ioaddr + RX_INTR);
+
+ /* Enable the IRQ on boards of fmv18x it is feasible. */
+ if (lp->jumpered) {
+ outb(0x80, ioaddr + IOCONFIG1);
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static void net_tx_timeout (struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ printk ("%s: transmit timed out with status %04x, %s?\n", dev->name,
+ inw (ioaddr + STATUS), inb (ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk ("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE),
+ inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START),
+ inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ outw(0xffff, ioaddr + MODE24);
+ outw (0xffff, ioaddr + TX_STATUS);
+ outb (0x5a, ioaddr + CONFIG_0);
+ outb (0xe8, ioaddr + CONFIG_1);
+ outw (0x8182, ioaddr + TX_INTR);
+ outb (0x00, ioaddr + TX_START);
+ outb (0x03, ioaddr + COL16CNTL);
+
+ dev->trans_start = jiffies;
+
+ lp->tx_started = 0;
+ lp->tx_queue_ready = 1;
+ lp->rx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ netif_wake_queue(dev);
+}
+
+
+static int net_send_packet (struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ short len = skb->len;
+ unsigned char *buf = skb->data;
+ static u8 pad[ETH_ZLEN];
+
+ netif_stop_queue (dev);
+
+ /* We may not start transmitting unless we finish transferring
+ a packet into the Tx queue. During executing the following
+ codes we possibly catch a Tx interrupt. Thus we flag off
+ tx_queue_ready, so that we prevent the interrupt routine
+ (net_interrupt) to start transmitting. */
+ lp->tx_queue_ready = 0;
+ {
+ outw (length, ioaddr + DATAPORT);
+ /* Packet data */
+ outsw (ioaddr + DATAPORT, buf, len >> 1);
+ /* Check for dribble byte */
+ if (len & 1) {
+ outw(skb->data[skb->len-1], ioaddr + DATAPORT);
+ len++;
+ }
+ /* Check for packet padding */
+ if (length != skb->len)
+ outsw(ioaddr + DATAPORT, pad, (length - len + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+ }
+ lp->tx_queue_ready = 1;
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb (0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ netif_start_queue (dev);
+ } else if (lp->tx_queue_len < 4096 - 1502)
+ /* Yes, there is room for one more packet. */
+ netif_start_queue (dev);
+ dev_kfree_skb (skb);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t
+net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+ int handled = 0;
+
+ if (dev == NULL) {
+ printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock (&lp->lock);
+
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (lp->rx_started == 0 &&
+ (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) {
+ /* Got a packet(s).
+ We cannot execute net_rx more than once at the same time for
+ the same device. During executing net_rx, we possibly catch a
+ Tx interrupt. Thus we flag on rx_started, so that we prevent
+ the interrupt routine (net_interrupt) to dive into net_rx
+ again. */
+ handled = 1;
+ lp->rx_started = 1;
+ outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */
+ net_rx(dev);
+ outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */
+ lp->rx_started = 0;
+ }
+ if (status & 0x00ff) {
+ handled = 1;
+ if (status & 0x02) {
+ /* More than 16 collisions occurred */
+ if (net_debug > 4)
+ printk("%s: 16 Collision occur during Txing.\n", dev->name);
+ /* Cancel sending a packet. */
+ outb(0x03, ioaddr + COL16CNTL);
+ lp->stats.collisions++;
+ }
+ if (status & 0x82) {
+ lp->stats.tx_packets++;
+ /* The Tx queue has any packets and is not being
+ transferred a packet from the host, start
+ transmitting. */
+ if (lp->tx_queue && lp->tx_queue_ready) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+ } else {
+ lp->tx_started = 0;
+ netif_wake_queue (dev);
+ }
+ }
+ }
+
+ spin_unlock (&lp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int boguscount = 5;
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ ushort status = inw(ioaddr + DATAPORT);
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + RX_CTRL);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x08) lp->stats.rx_length_errors++;
+ if (status & 0x04) lp->stats.rx_frame_errors++;
+ if (status & 0x02) lp->stats.rx_crc_errors++;
+ if (status & 0x01) lp->stats.rx_over_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The AT1700 claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + RX_CTRL);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + RX_CTRL);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + RX_CTRL);
+ }
+
+ if (net_debug > 5)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* No statistic counters on the chip to update. */
+
+ /* Disable the IRQ on boards of fmv18x where it is feasible. */
+ if (lp->jumpered) {
+ outb(0x00, ioaddr + IOCONFIG1);
+ free_irq(dev->irq, dev);
+ }
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+ return 0;
+}
+
+/* Get the current statistics.
+ This may be called with the card open or closed.
+ There are no on-chip counters, so this function is trivial.
+*/
+static struct net_device_stats *
+net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+static void
+set_rx_mode(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ unsigned long flags;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (dev->mc_count > MC_FILTERBREAK
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ unsigned int bit =
+ ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ mc_filter[bit >> 3] |= (1 << bit);
+ }
+ outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
+ }
+
+ spin_lock_irqsave (&lp->lock, flags);
+ if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
+ int saved_bank = inw(ioaddr + CONFIG_0);
+ /* Switch to bank 1 and set the multicast table. */
+ outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0);
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + PORT_OFFSET(8 + i));
+ memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
+ outw(saved_bank, ioaddr + CONFIG_0);
+ }
+ spin_unlock_irqrestore (&lp->lock, flags);
+ return;
+}
+
+#ifdef MODULE
+static struct net_device *dev_at1700;
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(net_debug, int, 0);
+MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
+MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
+MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("at1700: You should not use auto-probing with insmod!\n");
+ dev_at1700 = at1700_probe(-1);
+ if (IS_ERR(dev_at1700))
+ return PTR_ERR(dev_at1700);
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(dev_at1700);
+ cleanup_card(dev_at1700);
+ free_netdev(dev_at1700);
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
+ * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
+ * tab-width: 4
+ * c-basic-offset: 4
+ * c-indent-level: 4
+ * End:
+ */
+
diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c
new file mode 100644
index 000000000000..1798ce7262c9
--- /dev/null
+++ b/drivers/net/atari_bionet.c
@@ -0,0 +1,674 @@
+/* bionet.c BioNet-100 device driver for linux68k.
+ *
+ * Version: @(#)bionet.c 1.0 02/06/96
+ *
+ * Author: Hartmut Laue <laue@ifk-mp.uni-kiel.de>
+ * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de>
+ *
+ * Little adaptions for integration into pl7 by Roman Hodek
+ *
+ * Some changes in bionet_poll_rx by Karl-Heinz Lohner
+ *
+ What is it ?
+ ------------
+ This driver controls the BIONET-100 LAN-Adapter which connects
+ an ATARI ST/TT via the ACSI-port to an Ethernet-based network.
+
+ This version can be compiled as a loadable module (See the
+ compile command at the bottom of this file).
+ At load time, you can optionally set the debugging level and the
+ fastest response time on the command line of 'insmod'.
+
+ 'bionet_debug'
+ controls the amount of diagnostic messages:
+ 0 : no messages
+ >0 : see code for meaning of printed messages
+
+ 'bionet_min_poll_time' (always >=1)
+ gives the time (in jiffies) between polls. Low values
+ increase the system load (beware!)
+
+ When loaded, a net device with the name 'bio0' becomes available,
+ which can be controlled with the usual 'ifconfig' command.
+
+ It is possible to compile this driver into the kernel like other
+ (net) drivers. For this purpose, some source files (e.g. config-files
+ makefiles, Space.c) must be changed accordingly. (You may refer to
+ other drivers how to do it.) In this case, the device will be detected
+ at boot time and (probably) appear as 'eth0'.
+
+ This code is based on several sources:
+ - The driver code for a parallel port ethernet adapter by
+ Donald Becker (see file 'atp.c' from the PC linux distribution)
+ - The ACSI code by Roman Hodek for the ATARI-ACSI harddisk support
+ and DMA handling.
+ - Very limited information about moving packets in and out of the
+ BIONET-adapter from the TCP package for TOS by BioData GmbH.
+
+ Theory of Operation
+ -------------------
+ Because the ATARI DMA port is usually shared between several
+ devices (eg. harddisk, floppy) we cannot block the ACSI bus
+ while waiting for interrupts. Therefore we use a polling mechanism
+ to fetch packets from the adapter. For the same reason, we send
+ packets without checking that the previous packet has been sent to
+ the LAN. We rely on the higher levels of the networking code to detect
+ missing packets and resend them.
+
+ Before we access the ATARI DMA controller, we check if another
+ process is using the DMA. If not, we lock the DMA, perform one or
+ more packet transfers and unlock the DMA before returning.
+ We do not use 'stdma_lock' unconditionally because it is unclear
+ if the networking code can be set to sleep, which will happen if
+ another (possibly slow) device is using the DMA controller.
+
+ The polling is done via timer interrupts which periodically
+ 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies)
+ between polls varies depending on an estimate of the net activity.
+ The allowed range is given by the variable 'bionet_min_poll_time'
+ for the lower (fastest) limit and the constant 'MAX_POLL_TIME'
+ for the higher (slowest) limit.
+
+ Whenever a packet arrives, we switch to fastest response by setting
+ the polling time to its lowest limit. If the following poll fails,
+ because no packets have arrived, we increase the time for the next
+ poll. When the net activity is low, the polling time effectively
+ stays at its maximum value, resulting in the lowest load for the
+ machine.
+ */
+
+#define MAX_POLL_TIME 10
+
+static char version[] =
+ "bionet.c:v1.0 06-feb-96 (c) Hartmut Laue.\n";
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/setup.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_acsi.h>
+#include <asm/atari_stdma.h>
+
+
+/* use 0 for production, 1 for verification, >2 for debug
+ */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+/*
+ * Global variable 'bionet_debug'. Can be set at load time by 'insmod'
+ */
+unsigned int bionet_debug = NET_DEBUG;
+MODULE_PARM(bionet_debug, "i");
+MODULE_PARM_DESC(bionet_debug, "bionet debug level (0-2)");
+MODULE_LICENSE("GPL");
+
+static unsigned int bionet_min_poll_time = 2;
+
+
+/* Information that need to be kept for each board.
+ */
+struct net_local {
+ struct net_device_stats stats;
+ long open_time; /* for debugging */
+ int poll_time; /* polling time varies with net load */
+};
+
+static struct nic_pkt_s { /* packet format */
+ unsigned char status;
+ unsigned char dummy;
+ unsigned char l_lo, l_hi;
+ unsigned char buffer[3000];
+} *nic_packet;
+unsigned char *phys_nic_packet;
+
+/* Index to functions, as function prototypes.
+ */
+static int bionet_open(struct net_device *dev);
+static int bionet_send_packet(struct sk_buff *skb, struct net_device *dev);
+static void bionet_poll_rx(struct net_device *);
+static int bionet_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void bionet_tick(unsigned long);
+
+static struct timer_list bionet_timer = TIMER_INITIALIZER(bionet_tick, 0, 0);
+
+#define STRAM_ADDR(a) (((a) & 0xff000000) == 0)
+
+/* The following routines access the ethernet board connected to the
+ * ACSI port via the st_dma chip.
+ */
+#define NODE_ADR 0x60
+
+#define C_READ 8
+#define C_WRITE 0x0a
+#define C_GETEA 0x0f
+#define C_SETCR 0x0e
+
+static int
+sendcmd(unsigned int a0, unsigned int mod, unsigned int cmd) {
+ unsigned int c;
+
+ dma_wd.dma_mode_status = (mod | ((a0) ? 2 : 0) | 0x88);
+ dma_wd.fdc_acces_seccount = cmd;
+ dma_wd.dma_mode_status = (mod | 0x8a);
+
+ if( !acsi_wait_for_IRQ(HZ/2) ) /* wait for cmd ack */
+ return -1; /* timeout */
+
+ c = dma_wd.fdc_acces_seccount;
+ return (c & 0xff);
+}
+
+
+static void
+set_status(int cr) {
+ sendcmd(0,0x100,NODE_ADR | C_SETCR); /* CMD: SET CR */
+ sendcmd(1,0x100,cr);
+
+ dma_wd.dma_mode_status = 0x80;
+}
+
+static int
+get_status(unsigned char *adr) {
+ int i,c;
+
+ DISABLE_IRQ();
+ c = sendcmd(0,0x00,NODE_ADR | C_GETEA); /* CMD: GET ETH ADR*/
+ if( c < 0 ) goto gsend;
+
+ /* now read status bytes */
+
+ for (i=0; i<6; i++) {
+ dma_wd.fdc_acces_seccount = 0; /* request next byte */
+
+ if( !acsi_wait_for_IRQ(HZ/2) ) { /* wait for cmd ack */
+ c = -1;
+ goto gsend; /* timeout */
+ }
+ c = dma_wd.fdc_acces_seccount;
+ *adr++ = (unsigned char)c;
+ }
+ c = 1;
+gsend:
+ dma_wd.dma_mode_status = 0x80;
+ return c;
+}
+
+static irqreturn_t
+bionet_intr(int irq, void *data, struct pt_regs *fp) {
+ return IRQ_HANDLED;
+}
+
+
+static int
+get_frame(unsigned long paddr, int odd) {
+ int c;
+ unsigned long flags;
+
+ DISABLE_IRQ();
+ local_irq_save(flags);
+
+ dma_wd.dma_mode_status = 0x9a;
+ dma_wd.dma_mode_status = 0x19a;
+ dma_wd.dma_mode_status = 0x9a;
+ dma_wd.fdc_acces_seccount = 0x04; /* sector count (was 5) */
+ dma_wd.dma_lo = (unsigned char)paddr;
+ paddr >>= 8;
+ dma_wd.dma_md = (unsigned char)paddr;
+ paddr >>= 8;
+ dma_wd.dma_hi = (unsigned char)paddr;
+ local_irq_restore(flags);
+
+ c = sendcmd(0,0x00,NODE_ADR | C_READ); /* CMD: READ */
+ if( c < 128 ) goto rend;
+
+ /* now read block */
+
+ c = sendcmd(1,0x00,odd); /* odd flag for address shift */
+ dma_wd.dma_mode_status = 0x0a;
+
+ if( !acsi_wait_for_IRQ(100) ) { /* wait for DMA to complete */
+ c = -1;
+ goto rend;
+ }
+ dma_wd.dma_mode_status = 0x8a;
+ dma_wd.dma_mode_status = 0x18a;
+ dma_wd.dma_mode_status = 0x8a;
+ c = dma_wd.fdc_acces_seccount;
+
+ dma_wd.dma_mode_status = 0x88;
+ c = dma_wd.fdc_acces_seccount;
+ c = 1;
+
+rend:
+ dma_wd.dma_mode_status = 0x80;
+ udelay(40);
+ acsi_wait_for_noIRQ(20);
+ return c;
+}
+
+
+static int
+hardware_send_packet(unsigned long paddr, int cnt) {
+ unsigned int c;
+ unsigned long flags;
+
+ DISABLE_IRQ();
+ local_irq_save(flags);
+
+ dma_wd.dma_mode_status = 0x19a;
+ dma_wd.dma_mode_status = 0x9a;
+ dma_wd.dma_mode_status = 0x19a;
+ dma_wd.dma_lo = (unsigned char)paddr;
+ paddr >>= 8;
+ dma_wd.dma_md = (unsigned char)paddr;
+ paddr >>= 8;
+ dma_wd.dma_hi = (unsigned char)paddr;
+
+ dma_wd.fdc_acces_seccount = 0x4; /* sector count */
+ local_irq_restore(flags);
+
+ c = sendcmd(0,0x100,NODE_ADR | C_WRITE); /* CMD: WRITE */
+ c = sendcmd(1,0x100,cnt&0xff);
+ c = sendcmd(1,0x100,cnt>>8);
+
+ /* now write block */
+
+ dma_wd.dma_mode_status = 0x10a; /* DMA enable */
+ if( !acsi_wait_for_IRQ(100) ) /* wait for DMA to complete */
+ goto end;
+
+ dma_wd.dma_mode_status = 0x19a; /* DMA disable ! */
+ c = dma_wd.fdc_acces_seccount;
+
+end:
+ c = sendcmd(1,0x100,0);
+ c = sendcmd(1,0x100,0);
+
+ dma_wd.dma_mode_status = 0x180;
+ udelay(40);
+ acsi_wait_for_noIRQ(20);
+ return( c & 0x02);
+}
+
+
+/* Check for a network adaptor of this type, and return '0' if one exists.
+ */
+struct net_device * __init bionet_probe(int unit)
+{
+ struct net_device *dev;
+ unsigned char station_addr[6];
+ static unsigned version_printed;
+ static int no_more_found; /* avoid "Probing for..." printed 4 times */
+ int i;
+ int err;
+
+ if (!MACH_IS_ATARI || no_more_found)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+ SET_MODULE_OWNER(dev);
+
+ printk("Probing for BioNet 100 Adapter...\n");
+
+ stdma_lock(bionet_intr, NULL);
+ i = get_status(station_addr); /* Read the station address PROM. */
+ ENABLE_IRQ();
+ stdma_release();
+
+ /* Check the first three octets of the S.A. for the manufactor's code.
+ */
+
+ if( i < 0
+ || station_addr[0] != 'B'
+ || station_addr[1] != 'I'
+ || station_addr[2] != 'O' ) {
+ no_more_found = 1;
+ printk( "No BioNet 100 found.\n" );
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (bionet_debug > 0 && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s found, eth-addr: %02x-%02x-%02x:%02x-%02x-%02x.\n",
+ dev->name, "BioNet 100",
+ station_addr[0], station_addr[1], station_addr[2],
+ station_addr[3], station_addr[4], station_addr[5]);
+
+ /* Initialize the device structure. */
+
+ nic_packet = (struct nic_pkt_s *)acsi_buffer;
+ phys_nic_packet = (unsigned char *)phys_acsi_buffer;
+ if (bionet_debug > 0) {
+ printk("nic_packet at 0x%p, phys at 0x%p\n",
+ nic_packet, phys_nic_packet );
+ }
+
+ dev->open = bionet_open;
+ dev->stop = bionet_close;
+ dev->hard_start_xmit = bionet_send_packet;
+ dev->get_stats = net_get_stats;
+
+ /* Fill in the fields of the device structure with ethernet-generic
+ * values. This should be in a common file instead of per-driver.
+ */
+
+ for (i = 0; i < ETH_ALEN; i++) {
+#if 0
+ dev->broadcast[i] = 0xff;
+#endif
+ dev->dev_addr[i] = station_addr[i];
+ }
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int
+bionet_open(struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+
+ if (bionet_debug > 0)
+ printk("bionet_open\n");
+ stdma_lock(bionet_intr, NULL);
+
+ /* Reset the hardware here.
+ */
+ set_status(4);
+ lp->open_time = 0; /*jiffies*/
+ lp->poll_time = MAX_POLL_TIME;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ stdma_release();
+ bionet_timer.data = (long)dev;
+ bionet_timer.expires = jiffies + lp->poll_time;
+ add_timer(&bionet_timer);
+ return 0;
+}
+
+static int
+bionet_send_packet(struct sk_buff *skb, struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ */
+ local_irq_save(flags);
+
+ if (stdma_islocked()) {
+ local_irq_restore(flags);
+ lp->stats.tx_errors++;
+ }
+ else {
+ int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned long buf = virt_to_phys(skb->data);
+ int stat;
+
+ stdma_lock(bionet_intr, NULL);
+ local_irq_restore(flags);
+ if( !STRAM_ADDR(buf+length-1) ) {
+ memcpy(nic_packet->buffer, skb->data, length);
+ buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer;
+ }
+
+ if (bionet_debug >1) {
+ u_char *data = nic_packet->buffer, *p;
+ int i;
+
+ printk( "%s: TX pkt type 0x%4x from ", dev->name,
+ ((u_short *)data)[6]);
+
+ for( p = &data[6], i = 0; i < 6; i++ )
+ printk("%02x%s", *p++,i != 5 ? ":" : "" );
+ printk(" to ");
+
+ for( p = data, i = 0; i < 6; i++ )
+ printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" );
+
+ printk( "%s: ", dev->name );
+ printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x"
+ " %02x%02x%02x%02x len %d\n",
+ data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19],
+ data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27],
+ data[28], data[29], data[30], data[31], data[32], data[33],
+ length );
+ }
+ dma_cache_maintenance(buf, length, 1);
+
+ stat = hardware_send_packet(buf, length);
+ ENABLE_IRQ();
+ stdma_release();
+
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes+=length;
+ }
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/* We have a good packet(s), get it/them out of the buffers.
+ */
+static void
+bionet_poll_rx(struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+ int boguscount = 10;
+ int pkt_len, status;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* ++roman: Take care at locking the ST-DMA... This must be done with ints
+ * off, since otherwise an int could slip in between the question and the
+ * locking itself, and then we'd go to sleep... And locking itself is
+ * necessary to keep the floppy_change timer from working with ST-DMA
+ * registers. */
+ if (stdma_islocked()) {
+ local_irq_restore(flags);
+ return;
+ }
+ stdma_lock(bionet_intr, NULL);
+ DISABLE_IRQ();
+ local_irq_restore(flags);
+
+ if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++;
+
+ while(boguscount--) {
+ status = get_frame((unsigned long)phys_nic_packet, 0);
+
+ if( status == 0 ) break;
+
+ /* Good packet... */
+
+ dma_cache_maintenance((unsigned long)phys_nic_packet, 1520, 0);
+
+ pkt_len = (nic_packet->l_hi << 8) | nic_packet->l_lo;
+
+ lp->poll_time = bionet_min_poll_time; /* fast poll */
+ if( pkt_len >= 60 && pkt_len <= 1520 ) {
+ /* ^^^^ war 1514 KHL */
+ /* Malloc up new buffer.
+ */
+ struct sk_buff *skb = dev_alloc_skb( pkt_len + 2 );
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ skb_reserve( skb, 2 ); /* 16 Byte align */
+ skb_put( skb, pkt_len ); /* make room */
+
+ /* 'skb->data' points to the start of sk_buff data area.
+ */
+ memcpy(skb->data, nic_packet->buffer, pkt_len);
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes+=pkt_len;
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(INET_BH) for us and will work on them
+ when we get to the bottom-half routine.
+ */
+
+ if (bionet_debug >1) {
+ u_char *data = nic_packet->buffer, *p;
+ int i;
+
+ printk( "%s: RX pkt type 0x%4x from ", dev->name,
+ ((u_short *)data)[6]);
+
+
+ for( p = &data[6], i = 0; i < 6; i++ )
+ printk("%02x%s", *p++,i != 5 ? ":" : "" );
+ printk(" to ");
+ for( p = data, i = 0; i < 6; i++ )
+ printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" );
+
+ printk( "%s: ", dev->name );
+ printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x"
+ " %02x%02x%02x%02x len %d\n",
+ data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19],
+ data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27],
+ data[28], data[29], data[30], data[31], data[32], data[33],
+ pkt_len );
+ }
+ }
+ else {
+ printk(" Packet has wrong length: %04d bytes\n", pkt_len);
+ lp->stats.rx_errors++;
+ }
+ }
+ stdma_release();
+ ENABLE_IRQ();
+ return;
+}
+
+/* bionet_tick: called by bionet_timer. Reads packets from the adapter,
+ * passes them to the higher layers and restarts the timer.
+ */
+static void
+bionet_tick(unsigned long data) {
+ struct net_device *dev = (struct net_device *)data;
+ struct net_local *lp = netdev_priv(dev);
+
+ if( bionet_debug > 0 && (lp->open_time++ & 7) == 8 )
+ printk("bionet_tick: %ld\n", lp->open_time);
+
+ if( !stdma_islocked() ) bionet_poll_rx(dev);
+
+ bionet_timer.expires = jiffies + lp->poll_time;
+ add_timer(&bionet_timer);
+}
+
+/* The inverse routine to bionet_open().
+ */
+static int
+bionet_close(struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+
+ if (bionet_debug > 0)
+ printk("bionet_close, open_time=%ld\n", lp->open_time);
+ del_timer(&bionet_timer);
+ stdma_lock(bionet_intr, NULL);
+
+ set_status(0);
+ lp->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ stdma_release();
+ return 0;
+}
+
+/* Get the current statistics.
+ This may be called with the card open or closed.
+ */
+static struct net_device_stats *net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+
+#ifdef MODULE
+
+static struct net_device *bio_dev;
+
+int init_module(void)
+{
+ bio_dev = bionet_probe(-1);
+ if (IS_ERR(bio_dev))
+ return PTR_ERR(bio_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(bio_dev);
+ free_netdev(bio_dev);
+}
+
+#endif /* MODULE */
+
+/* Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include
+ -b m68k-linuxaout -Wall -Wstrict-prototypes -O2
+ -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c bionet.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c
new file mode 100644
index 000000000000..81c362c8cb97
--- /dev/null
+++ b/drivers/net/atari_pamsnet.c
@@ -0,0 +1,895 @@
+/* atari_pamsnet.c PAMsNet device driver for linux68k.
+ *
+ * Version: @(#)PAMsNet.c 0.2ß 03/31/96
+ *
+ * Author: Torsten Lang <Torsten.Lang@ap.physik.uni-giessen.de>
+ * <Torsten.Lang@jung.de>
+ *
+ * This driver is based on my driver PAMSDMA.c for MiNT-Net and
+ * on the driver bionet.c written by
+ * Hartmut Laue <laue@ifk-mp.uni-kiel.de>
+ * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de>
+ *
+ * Little adaptions for integration into pl7 by Roman Hodek
+ *
+ What is it ?
+ ------------
+ This driver controls the PAMsNet LAN-Adapter which connects
+ an ATARI ST/TT via the ACSI-port to an Ethernet-based network.
+
+ This version can be compiled as a loadable module (See the
+ compile command at the bottom of this file).
+ At load time, you can optionally set the debugging level and the
+ fastest response time on the command line of 'insmod'.
+
+ 'pamsnet_debug'
+ controls the amount of diagnostic messages:
+ 0 : no messages
+ >0 : see code for meaning of printed messages
+
+ 'pamsnet_min_poll_time' (always >=1)
+ gives the time (in jiffies) between polls. Low values
+ increase the system load (beware!)
+
+ When loaded, a net device with the name 'eth?' becomes available,
+ which can be controlled with the usual 'ifconfig' command.
+
+ It is possible to compile this driver into the kernel like other
+ (net) drivers. For this purpose, some source files (e.g. config-files
+ makefiles, Space.c) must be changed accordingly. (You may refer to
+ other drivers how to do it.) In this case, the device will be detected
+ at boot time and (probably) appear as 'eth0'.
+
+ Theory of Operation
+ -------------------
+ Because the ATARI DMA port is usually shared between several
+ devices (eg. harddisk, floppy) we cannot block the ACSI bus
+ while waiting for interrupts. Therefore we use a polling mechanism
+ to fetch packets from the adapter. For the same reason, we send
+ packets without checking that the previous packet has been sent to
+ the LAN. We rely on the higher levels of the networking code to detect
+ missing packets and resend them.
+
+ Before we access the ATARI DMA controller, we check if another
+ process is using the DMA. If not, we lock the DMA, perform one or
+ more packet transfers and unlock the DMA before returning.
+ We do not use 'stdma_lock' unconditionally because it is unclear
+ if the networking code can be set to sleep, which will happen if
+ another (possibly slow) device is using the DMA controller.
+
+ The polling is done via timer interrupts which periodically
+ 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies)
+ between polls varies depending on an estimate of the net activity.
+ The allowed range is given by the variable 'bionet_min_poll_time'
+ for the lower (fastest) limit and the constant 'MAX_POLL_TIME'
+ for the higher (slowest) limit.
+
+ Whenever a packet arrives, we switch to fastest response by setting
+ the polling time to its lowest limit. If the following poll fails,
+ because no packets have arrived, we increase the time for the next
+ poll. When the net activity is low, the polling time effectively
+ stays at its maximum value, resulting in the lowest load for the
+ machine.
+ */
+
+#define MAX_POLL_TIME 10
+
+static char *version =
+ "pamsnet.c:v0.2beta 30-mar-96 (c) Torsten Lang.\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_stdma.h>
+#include <asm/atari_acsi.h>
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#undef READ
+#undef WRITE
+
+/* use 0 for production, 1 for verification, >2 for debug
+ */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+/*
+ * Global variable 'pamsnet_debug'. Can be set at load time by 'insmod'
+ */
+unsigned int pamsnet_debug = NET_DEBUG;
+MODULE_PARM(pamsnet_debug, "i");
+MODULE_PARM_DESC(pamsnet_debug, "pamsnet debug enable (0-1)");
+MODULE_LICENSE("GPL");
+
+static unsigned int pamsnet_min_poll_time = 2;
+
+
+/* Information that need to be kept for each board.
+ */
+struct net_local {
+ struct net_device_stats stats;
+ long open_time; /* for debugging */
+ int poll_time; /* polling time varies with net load */
+};
+
+static struct nic_pkt_s { /* packet format */
+ unsigned char buffer[2048];
+} *nic_packet = 0;
+unsigned char *phys_nic_packet;
+
+typedef unsigned char HADDR[6]; /* 6-byte hardware address of lance */
+
+/* Index to functions, as function prototypes.
+ */
+static void start (int target);
+static int stop (int target);
+static int testpkt (int target);
+static int sendpkt (int target, unsigned char *buffer, int length);
+static int receivepkt (int target, unsigned char *buffer);
+static int inquiry (int target, unsigned char *buffer);
+static HADDR *read_hw_addr(int target, unsigned char *buffer);
+static void setup_dma (void *address, unsigned rw_flag, int num_blocks);
+static int send_first (int target, unsigned char byte);
+static int send_1_5 (int lun, unsigned char *command, int dma);
+static int get_status (void);
+static int calc_received (void *start_address);
+
+static int pamsnet_open(struct net_device *dev);
+static int pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev);
+static void pamsnet_poll_rx(struct net_device *);
+static int pamsnet_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void pamsnet_tick(unsigned long);
+
+static irqreturn_t pamsnet_intr(int irq, void *data, struct pt_regs *fp);
+
+static struct timer_list pamsnet_timer = TIMER_INITIALIZER(pamsnet_tick, 0, 0);
+
+#define STRAM_ADDR(a) (((a) & 0xff000000) == 0)
+
+typedef struct
+{
+ unsigned char reserved1[0x38];
+ HADDR hwaddr;
+ unsigned char reserved2[0x1c2];
+} DMAHWADDR;
+
+/*
+ * Definitions of commands understood by the PAMs DMA adaptor.
+ *
+ * In general the DMA adaptor uses LUN 0, 5, 6 and 7 on one ID changeable
+ * by the PAM's Net software.
+ *
+ * LUN 0 works as a harddisk. You can boot the PAM's Net driver there.
+ * LUN 5 works as a harddisk and lets you access the RAM and some I/O HW
+ * area. In sector 0, bytes 0x38-0x3d you find the ethernet HW address
+ * of the adaptor.
+ * LUN 6 works as a harddisk and lets you access the firmware ROM.
+ * LUN 7 lets you send and receive packets.
+ *
+ * Some commands like the INQUIRY command work identical on all used LUNs.
+ *
+ * UNKNOWN1 seems to read some data.
+ * Command length is 6 bytes.
+ * UNKNOWN2 seems to read some data (command byte 1 must be !=0). The
+ * following bytes seem to be something like an allocation length.
+ * Command length is 6 bytes.
+ * READPKT reads a packet received by the DMA adaptor.
+ * Command length is 6 bytes.
+ * WRITEPKT sends a packet transferred by the following DMA phase. The length
+ * of the packet is transferred in command bytes 3 and 4.
+ * The adaptor automatically replaces the src hw address in an ethernet
+ * packet by its own hw address.
+ * Command length is 6 bytes.
+ * INQUIRY has the same function as the INQUIRY command supported by harddisks
+ * and other SCSI devices. It lets you detect which device you found
+ * at a given address.
+ * Command length is 6 bytes.
+ * START initializes the DMA adaptor. After this command it is able to send
+ * and receive packets. There is no status byte returned!
+ * Command length is 1 byte.
+ * NUMPKTS gives back the number of received packets waiting in the queue in
+ * the status byte.
+ * Command length is 1 byte.
+ * UNKNOWN3
+ * UNKNOWN4 Function of these three commands is unknown.
+ * UNKNOWN5 The command length of these three commands is 1 byte.
+ * DESELECT immediately deselects the DMA adaptor. May important with interrupt
+ * driven operation.
+ * Command length is 1 byte.
+ * STOP resets the DMA adaptor. After this command packets can no longer
+ * be received or transferred.
+ * Command length is 6 byte.
+ */
+
+enum {UNKNOWN1=3, READPKT=8, UNKNOWN2, WRITEPKT=10, INQUIRY=18, START,
+ NUMPKTS=22, UNKNOWN3, UNKNOWN4, UNKNOWN5, DESELECT, STOP};
+
+#define READSECTOR READPKT
+#define WRITESECTOR WRITEPKT
+
+u_char *inquire8="MV PAM's NET/GK";
+
+#define DMALOW dma_wd.dma_lo
+#define DMAMID dma_wd.dma_md
+#define DMAHIGH dma_wd.dma_hi
+#define DACCESS dma_wd.fdc_acces_seccount
+
+#define MFP_GPIP mfp.par_dt_reg
+
+/* Some useful functions */
+
+#define INT (!(MFP_GPIP & 0x20))
+#define DELAY ({MFP_GPIP; MFP_GPIP; MFP_GPIP;})
+#define WRITEMODE(value) \
+ ({ u_short dummy = value; \
+ __asm__ volatile("movew %0, 0xFFFF8606" : : "d"(dummy)); \
+ DELAY; \
+ })
+#define WRITEBOTH(value1, value2) \
+ ({ u_long dummy = (u_long)(value1)<<16 | (u_short)(value2); \
+ __asm__ volatile("movel %0, 0xFFFF8604" : : "d"(dummy)); \
+ DELAY; \
+ })
+
+/* Definitions for DMODE */
+
+#define READ 0x000
+#define WRITE 0x100
+
+#define DMA_FDC 0x080
+#define DMA_ACSI 0x000
+
+#define DMA_DISABLE 0x040
+
+#define SEC_COUNT 0x010
+#define DMA_WINDOW 0x000
+
+#define REG_ACSI 0x008
+#define REG_FDC 0x000
+
+#define A1 0x002
+
+/* Timeout constants */
+
+#define TIMEOUTCMD HZ/2 /* ca. 500ms */
+#define TIMEOUTDMA HZ /* ca. 1s */
+#define COMMAND_DELAY 500 /* ca. 0.5ms */
+
+unsigned rw;
+int lance_target = -1;
+int if_up = 0;
+
+/* The following routines access the ethernet board connected to the
+ * ACSI port via the st_dma chip.
+ */
+
+/* The following lowlevel routines work on physical addresses only and assume
+ * that eventually needed buffers are
+ * - completely located in ST RAM
+ * - are contigous in the physical address space
+ */
+
+/* Setup the DMA counter */
+
+static void
+setup_dma (address, rw_flag, num_blocks)
+ void *address;
+ unsigned rw_flag;
+ int num_blocks;
+{
+ WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI |
+ A1);
+ WRITEMODE((unsigned)(rw_flag ^ WRITE) | DMA_FDC | SEC_COUNT | REG_ACSI |
+ A1);
+ WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI |
+ A1);
+ DMALOW = (unsigned char)((unsigned long)address & 0xFF);
+ DMAMID = (unsigned char)(((unsigned long)address >> 8) & 0xFF);
+ DMAHIGH = (unsigned char)(((unsigned long)address >> 16) & 0xFF);
+ WRITEBOTH((unsigned)num_blocks & 0xFF,
+ rw_flag | DMA_FDC | DMA_WINDOW | REG_ACSI | A1);
+ rw = rw_flag;
+}
+
+/* Send the first byte of an command block */
+
+static int
+send_first (target, byte)
+ int target;
+ unsigned char byte;
+{
+ rw = READ;
+ acsi_delay_end(COMMAND_DELAY);
+ /*
+ * wake up ACSI
+ */
+ WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI);
+ /*
+ * write command byte
+ */
+ WRITEBOTH((target << 5) | (byte & 0x1F), DMA_FDC |
+ DMA_WINDOW | REG_ACSI | A1);
+ return (!acsi_wait_for_IRQ(TIMEOUTCMD));
+}
+
+/* Send the rest of an command block */
+
+static int
+send_1_5 (lun, command, dma)
+ int lun;
+ unsigned char *command;
+ int dma;
+{
+ int i, j;
+
+ for (i=0; i<5; i++) {
+ WRITEBOTH((!i ? (((lun & 0x7) << 5) | (command[i] & 0x1F))
+ : command[i]),
+ rw | REG_ACSI | DMA_WINDOW |
+ ((i < 4) ? DMA_FDC
+ : (dma ? DMA_ACSI
+ : DMA_FDC)) | A1);
+ if (i < 4 && (j = !acsi_wait_for_IRQ(TIMEOUTCMD)))
+ return (j);
+ }
+ return (0);
+}
+
+/* Read a status byte */
+
+static int
+get_status (void)
+{
+ WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI | A1);
+ acsi_delay_start();
+ return ((int)(DACCESS & 0xFF));
+}
+
+/* Calculate the number of received bytes */
+
+static int
+calc_received (start_address)
+ void *start_address;
+{
+ return (int)(
+ (((unsigned long)DMAHIGH << 16) | ((unsigned)DMAMID << 8) | DMALOW)
+ - (unsigned long)start_address);
+}
+
+/* The following midlevel routines still work on physical addresses ... */
+
+/* start() starts the PAM's DMA adaptor */
+
+static void
+start (target)
+ int target;
+{
+ send_first(target, START);
+}
+
+/* stop() stops the PAM's DMA adaptor and returns a value of zero in case of success */
+
+static int
+stop (target)
+ int target;
+{
+ int ret = -1;
+ unsigned char cmd_buffer[5];
+
+ if (send_first(target, STOP))
+ goto bad;
+ cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] =
+ cmd_buffer[3] = cmd_buffer[4] = 0;
+ if (send_1_5(7, cmd_buffer, 0) ||
+ !acsi_wait_for_IRQ(TIMEOUTDMA) ||
+ get_status())
+ goto bad;
+ ret = 0;
+bad:
+ return (ret);
+}
+
+/* testpkt() returns the number of received packets waiting in the queue */
+
+static int
+testpkt(target)
+ int target;
+{
+ int ret = -1;
+
+ if (send_first(target, NUMPKTS))
+ goto bad;
+ ret = get_status();
+bad:
+ return (ret);
+}
+
+/* inquiry() returns 0 when PAM's DMA found, -1 when timeout, -2 otherwise */
+/* Please note: The buffer is for internal use only but must be defined! */
+
+static int
+inquiry (target, buffer)
+ int target;
+ unsigned char *buffer;
+{
+ int ret = -1;
+ unsigned char *vbuffer = phys_to_virt((unsigned long)buffer);
+ unsigned char cmd_buffer[5];
+
+ if (send_first(target, INQUIRY))
+ goto bad;
+ setup_dma(buffer, READ, 1);
+ vbuffer[8] = vbuffer[27] = 0; /* Avoid confusion with previous read data */
+ cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
+ cmd_buffer[3] = 48;
+ if (send_1_5(5, cmd_buffer, 1) ||
+ !acsi_wait_for_IRQ(TIMEOUTDMA) ||
+ get_status() ||
+ (calc_received(buffer) < 32))
+ goto bad;
+ dma_cache_maintenance((unsigned long)(buffer+8), 20, 0);
+ if (memcmp(inquire8, vbuffer+8, 20))
+ goto bad;
+ ret = 0;
+bad:
+ if (!!NET_DEBUG) {
+ vbuffer[8+20]=0;
+ printk("inquiry of target %d: %s\n", target, vbuffer+8);
+ }
+ return (ret);
+}
+
+/*
+ * read_hw_addr() reads the sector containing the hwaddr and returns
+ * a pointer to it (virtual address!) or 0 in case of an error
+ */
+
+static HADDR
+*read_hw_addr(target, buffer)
+ int target;
+ unsigned char *buffer;
+{
+ HADDR *ret = 0;
+ unsigned char cmd_buffer[5];
+
+ if (send_first(target, READSECTOR))
+ goto bad;
+ setup_dma(buffer, READ, 1);
+ cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
+ cmd_buffer[3] = 1;
+ if (send_1_5(5, cmd_buffer, 1) ||
+ !acsi_wait_for_IRQ(TIMEOUTDMA) ||
+ get_status())
+ goto bad;
+ ret = phys_to_virt((unsigned long)&(((DMAHWADDR *)buffer)->hwaddr));
+ dma_cache_maintenance((unsigned long)buffer, 512, 0);
+bad:
+ return (ret);
+}
+
+static irqreturn_t
+pamsnet_intr(irq, data, fp)
+ int irq;
+ void *data;
+ struct pt_regs *fp;
+{
+ return IRQ_HANDLED;
+}
+
+/* receivepkt() loads a packet to a given buffer and returns its length */
+
+static int
+receivepkt (target, buffer)
+ int target;
+ unsigned char *buffer;
+{
+ int ret = -1;
+ unsigned char cmd_buffer[5];
+
+ if (send_first(target, READPKT))
+ goto bad;
+ setup_dma(buffer, READ, 3);
+ cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
+ cmd_buffer[3] = 3;
+ if (send_1_5(7, cmd_buffer, 1) ||
+ !acsi_wait_for_IRQ(TIMEOUTDMA) ||
+ get_status())
+ goto bad;
+ ret = calc_received(buffer);
+bad:
+ return (ret);
+}
+
+/* sendpkt() sends a packet and returns a value of zero when the packet was sent
+ successfully */
+
+static int
+sendpkt (target, buffer, length)
+ int target;
+ unsigned char *buffer;
+ int length;
+{
+ int ret = -1;
+ unsigned char cmd_buffer[5];
+
+ if (send_first(target, WRITEPKT))
+ goto bad;
+ setup_dma(buffer, WRITE, 3);
+ cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[4] = 0;
+ cmd_buffer[2] = length >> 8;
+ cmd_buffer[3] = length & 0xFF;
+ if (send_1_5(7, cmd_buffer, 1) ||
+ !acsi_wait_for_IRQ(TIMEOUTDMA) ||
+ get_status())
+ goto bad;
+ ret = 0;
+bad:
+ return (ret);
+}
+
+/* The following higher level routines work on virtual addresses and convert them to
+ * physical addresses when passed to the lowlevel routines. It's up to the higher level
+ * routines to copy data from Alternate RAM to ST RAM if neccesary!
+ */
+
+/* Check for a network adaptor of this type, and return '0' if one exists.
+ */
+
+struct net_device * __init pamsnet_probe (int unit)
+{
+ struct net_device *dev;
+ int i;
+ HADDR *hwaddr;
+ int err;
+
+ unsigned char station_addr[6];
+ static unsigned version_printed;
+ /* avoid "Probing for..." printed 4 times - the driver is supporting only one adapter now! */
+ static int no_more_found;
+
+ if (no_more_found)
+ return ERR_PTR(-ENODEV);
+ no_more_found = 1;
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+ SET_MODULE_OWNER(dev);
+
+ printk("Probing for PAM's Net/GK Adapter...\n");
+
+ /* Allocate the DMA buffer here since we need it for probing! */
+
+ nic_packet = (struct nic_pkt_s *)acsi_buffer;
+ phys_nic_packet = (unsigned char *)phys_acsi_buffer;
+ if (pamsnet_debug > 0) {
+ printk("nic_packet at 0x%p, phys at 0x%p\n",
+ nic_packet, phys_nic_packet );
+ }
+
+ stdma_lock(pamsnet_intr, NULL);
+ DISABLE_IRQ();
+
+ for (i=0; i<8; i++) {
+ /* Do two inquiries to cover cases with strange equipment on previous ID */
+ /* blocking the ACSI bus (like the SLMC804 laser printer controller... */
+ inquiry(i, phys_nic_packet);
+ if (!inquiry(i, phys_nic_packet)) {
+ lance_target = i;
+ break;
+ }
+ }
+
+ if (!!NET_DEBUG)
+ printk("ID: %d\n",i);
+
+ if (lance_target >= 0) {
+ if (!(hwaddr = read_hw_addr(lance_target, phys_nic_packet)))
+ lance_target = -1;
+ else
+ memcpy (station_addr, hwaddr, ETH_ALEN);
+ }
+
+ ENABLE_IRQ();
+ stdma_release();
+
+ if (lance_target < 0) {
+ printk("No PAM's Net/GK found.\n");
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (pamsnet_debug > 0 && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s found on target %01d, eth-addr: %02x:%02x:%02x:%02x:%02x:%02x.\n",
+ dev->name, "PAM's Net/GK", lance_target,
+ station_addr[0], station_addr[1], station_addr[2],
+ station_addr[3], station_addr[4], station_addr[5]);
+
+ /* Initialize the device structure. */
+ dev->open = pamsnet_open;
+ dev->stop = pamsnet_close;
+ dev->hard_start_xmit = pamsnet_send_packet;
+ dev->get_stats = net_get_stats;
+
+ /* Fill in the fields of the device structure with ethernet-generic
+ * values. This should be in a common file instead of per-driver.
+ */
+
+ for (i = 0; i < ETH_ALEN; i++) {
+#if 0
+ dev->broadcast[i] = 0xff;
+#endif
+ dev->dev_addr[i] = station_addr[i];
+ }
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int
+pamsnet_open(struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+
+ if (pamsnet_debug > 0)
+ printk("pamsnet_open\n");
+ stdma_lock(pamsnet_intr, NULL);
+ DISABLE_IRQ();
+
+ /* Reset the hardware here.
+ */
+ if (!if_up)
+ start(lance_target);
+ if_up = 1;
+ lp->open_time = 0; /*jiffies*/
+ lp->poll_time = MAX_POLL_TIME;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ ENABLE_IRQ();
+ stdma_release();
+ pamsnet_timer.data = (long)dev;
+ pamsnet_timer.expires = jiffies + lp->poll_time;
+ add_timer(&pamsnet_timer);
+ return 0;
+}
+
+static int
+pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ */
+ local_irq_save(flags);
+
+ if (stdma_islocked()) {
+ local_irq_restore(flags);
+ lp->stats.tx_errors++;
+ }
+ else {
+ int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned long buf = virt_to_phys(skb->data);
+ int stat;
+
+ stdma_lock(pamsnet_intr, NULL);
+ DISABLE_IRQ();
+
+ local_irq_restore(flags);
+ if( !STRAM_ADDR(buf+length-1) ) {
+ memcpy(nic_packet->buffer, skb->data, length);
+ buf = (unsigned long)phys_nic_packet;
+ }
+
+ dma_cache_maintenance(buf, length, 1);
+
+ stat = sendpkt(lance_target, (unsigned char *)buf, length);
+ ENABLE_IRQ();
+ stdma_release();
+
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes+=length;
+ }
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/* We have a good packet(s), get it/them out of the buffers.
+ */
+static void
+pamsnet_poll_rx(struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+ int boguscount;
+ int pkt_len;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* ++roman: Take care at locking the ST-DMA... This must be done with ints
+ * off, since otherwise an int could slip in between the question and the
+ * locking itself, and then we'd go to sleep... And locking itself is
+ * necessary to keep the floppy_change timer from working with ST-DMA
+ * registers. */
+ if (stdma_islocked()) {
+ local_irq_restore(flags);
+ return;
+ }
+ stdma_lock(pamsnet_intr, NULL);
+ DISABLE_IRQ();
+ local_irq_restore(flags);
+
+ boguscount = testpkt(lance_target);
+ if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++;
+
+ while(boguscount--) {
+ pkt_len = receivepkt(lance_target, phys_nic_packet);
+
+ if( pkt_len < 60 ) break;
+
+ /* Good packet... */
+
+ dma_cache_maintenance((unsigned long)phys_nic_packet, pkt_len, 0);
+
+ lp->poll_time = pamsnet_min_poll_time; /* fast poll */
+ if( pkt_len >= 60 && pkt_len <= 2048 ) {
+ if (pkt_len > 1514)
+ pkt_len = 1514;
+
+ /* Malloc up new buffer.
+ */
+ skb = alloc_skb(pkt_len, GFP_ATOMIC);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->len = pkt_len;
+ skb->dev = dev;
+
+ /* 'skb->data' points to the start of sk_buff data area.
+ */
+ memcpy(skb->data, nic_packet->buffer, pkt_len);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes+=pkt_len;
+ }
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(INET_BH) for us and will work on them
+ when we get to the bottom-half routine.
+ */
+
+ ENABLE_IRQ();
+ stdma_release();
+ return;
+}
+
+/* pamsnet_tick: called by pamsnet_timer. Reads packets from the adapter,
+ * passes them to the higher layers and restarts the timer.
+ */
+static void
+pamsnet_tick(unsigned long data) {
+ struct net_device *dev = (struct net_device *)data;
+ struct net_local *lp = netdev_priv(dev);
+
+ if( pamsnet_debug > 0 && (lp->open_time++ & 7) == 8 )
+ printk("pamsnet_tick: %ld\n", lp->open_time);
+
+ pamsnet_poll_rx(dev);
+
+ pamsnet_timer.expires = jiffies + lp->poll_time;
+ add_timer(&pamsnet_timer);
+}
+
+/* The inverse routine to pamsnet_open().
+ */
+static int
+pamsnet_close(struct net_device *dev) {
+ struct net_local *lp = netdev_priv(dev);
+
+ if (pamsnet_debug > 0)
+ printk("pamsnet_close, open_time=%ld\n", lp->open_time);
+ del_timer(&pamsnet_timer);
+ stdma_lock(pamsnet_intr, NULL);
+ DISABLE_IRQ();
+
+ if (if_up)
+ stop(lance_target);
+ if_up = 0;
+
+ lp->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ ENABLE_IRQ();
+ stdma_release();
+ return 0;
+}
+
+/* Get the current statistics.
+ This may be called with the card open or closed.
+ */
+static struct net_device_stats *net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+
+#ifdef MODULE
+
+static struct net_device *pam_dev;
+
+int init_module(void)
+{
+ pam_dev = pamsnet_probe(-1);
+ if (IS_ERR(pam_dev))
+ return PTR_ERR(pam_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(pam_dev);
+ free_netdev(pam_dev);
+}
+
+#endif /* MODULE */
+
+/* Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include
+ -b m68k-linuxaout -Wall -Wstrict-prototypes -O2
+ -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c atari_pamsnet.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
new file mode 100644
index 000000000000..ad011214c7f2
--- /dev/null
+++ b/drivers/net/atarilance.c
@@ -0,0 +1,1206 @@
+/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */
+/*
+ Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de)
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This drivers was written with the following sources of reference:
+ - The driver for the Riebl Lance card by the TU Vienna.
+ - The modified TUW driver for PAM's VME cards
+ - The PC-Linux driver for Lance cards (but this is for bus master
+ cards, not the shared memory ones)
+ - The Amiga Ariadne driver
+
+ v1.0: (in 1.2.13pl4/0.9.13)
+ Initial version
+ v1.1: (in 1.2.13pl5)
+ more comments
+ deleted some debugging stuff
+ optimized register access (keep AREG pointing to CSR0)
+ following AMD, CSR0_STRT should be set only after IDON is detected
+ use memcpy() for data transfers, that also employs long word moves
+ better probe procedure for 24-bit systems
+ non-VME-RieblCards need extra delays in memcpy
+ must also do write test, since 0xfxe00000 may hit ROM
+ use 8/32 tx/rx buffers, which should give better NFS performance;
+ this is made possible by shifting the last packet buffer after the
+ RieblCard reserved area
+ v1.2: (in 1.2.13pl8)
+ again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000
+ and thus RAM, in case of no Lance found all memory contents have to
+ be restored!
+ Now possible to compile as module.
+ v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3)
+ Several little 1.3 adaptions
+ When the lance is stopped it jumps back into little-endian
+ mode. It is therefore necessary to put it back where it
+ belongs, in big endian mode, in order to make things work.
+ This might be the reason why multicast-mode didn't work
+ before, but I'm not able to test it as I only got an Amiga
+ (we had similar problems with the A2065 driver).
+
+*/
+
+static char version[] = "atarilance.c: v1.3 04/04/96 "
+ "Roman.Hodek@informatik.uni-erlangen.de\n";
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/io.h>
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 1
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+MODULE_PARM(lance_debug, "i");
+MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+/* Print debug messages on probing? */
+#undef LANCE_DEBUG_PROBE
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+#ifdef LANCE_DEBUG_PROBE
+# define PROBE_PRINT(a) printk a
+#else
+# define PROBE_PRINT(a)
+#endif
+
+/* These define the number of Rx and Tx buffers as log2. (Only powers
+ * of two are valid)
+ * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
+ * is more time critical then sending and packets may have to remain in the
+ * board's memory when main memory is low.
+ */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define TX_TIMEOUT 20
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+struct ringdesc {
+ unsigned short adr_lo; /* Low 16 bits of address */
+ unsigned char len; /* Length bits */
+ unsigned char adr_hi; /* High 8 bits of address (unused) */
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ struct ringdesc rx_ring;
+ struct ringdesc tx_ring;
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char packet_area[0]; /* packet data follow after the
+ * init block and the ring
+ * descriptors and are located
+ * at runtime */
+};
+
+/* RieblCard specifics:
+ * The original TOS driver for these cards reserves the area from offset
+ * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
+ * Ethernet address there, and the magic for verifying the data's validity.
+ * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
+ * is reserved for the interrupt vector number.
+ */
+#define RIEBL_RSVD_START 0xee70
+#define RIEBL_RSVD_END 0xeec0
+#define RIEBL_MAGIC 0x09051990
+#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a))
+#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e))
+#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe))
+
+/* This is a default address for the old RieblCards without a battery
+ * that have no ethernet address at boot time. 00:00:36:04 is the
+ * prefix for Riebl cards, the 00:00 at the end is arbitrary.
+ */
+
+static unsigned char OldRieblDefHwaddr[6] = {
+ 0x00, 0x00, 0x36, 0x04, 0x00, 0x00
+};
+
+
+/* I/O registers of the Lance chip */
+
+struct lance_ioreg {
+/* base+0x0 */ volatile unsigned short data;
+/* base+0x2 */ volatile unsigned short addr;
+ unsigned char _dummy1[3];
+/* base+0x7 */ volatile unsigned char ivec;
+ unsigned char _dummy2[5];
+/* base+0xd */ volatile unsigned char eeprom;
+ unsigned char _dummy3;
+/* base+0xf */ volatile unsigned char mem;
+};
+
+/* Types of boards this driver supports */
+
+enum lance_type {
+ OLD_RIEBL, /* old Riebl card without battery */
+ NEW_RIEBL, /* new Riebl card with battery */
+ PAM_CARD /* PAM card with EEPROM */
+};
+
+static char *lance_names[] = {
+ "Riebl-Card (without battery)",
+ "Riebl-Card (with battery)",
+ "PAM intern card"
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ enum lance_type cardtype;
+ struct lance_ioreg *iobase;
+ struct lance_memory *mem;
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_tx; /* Ring entries to be freed. */
+ /* copy function */
+ void *(*memcpy_f)( void *, const void *, size_t );
+ struct net_device_stats stats;
+/* This must be long for set_bit() */
+ long tx_full;
+ spinlock_t devlock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG IO->data
+#define AREG IO->addr
+#define REGA(a) ( AREG = (a), DREG )
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base)
+
+/* Possible memory/IO addresses for probing */
+
+struct lance_addr {
+ unsigned long memaddr;
+ unsigned long ioaddr;
+ int slow_flag;
+} lance_addr_list[] = {
+ { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */
+ { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE
+ (highest byte stripped) */
+ { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST
+ (highest byte stripped) */
+ { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to
+ avoid conflict with ROM
+ (highest byte stripped) */
+ { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+ { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+};
+
+#define N_LANCE_ADDR (sizeof(lance_addr_list)/sizeof(*lance_addr_list))
+
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+
+
+/***************************** Prototypes *****************************/
+
+static int addr_accessible( volatile void *regp, int wordflag, int
+ writeflag );
+static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
+ *init_rec );
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static irqreturn_t lance_interrupt( int irq, void *dev_id, struct pt_regs *fp );
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static struct net_device_stats *lance_get_stats( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+static int lance_set_mac_address( struct net_device *dev, void *addr );
+static void lance_tx_timeout (struct net_device *dev);
+
+/************************* End of Prototypes **************************/
+
+
+
+
+
+static void *slow_memcpy( void *dst, const void *src, size_t len )
+
+{ char *cto = dst;
+ const char *cfrom = src;
+
+ while( len-- ) {
+ *cto++ = *cfrom++;
+ MFPDELAY();
+ }
+ return( dst );
+}
+
+
+struct net_device * __init atarilance_probe(int unit)
+{
+ int i;
+ static int found;
+ struct net_device *dev;
+ int err = -ENODEV;
+
+ if (!MACH_IS_ATARI || found)
+ /* Assume there's only one board possible... That seems true, since
+ * the Riebl/PAM board's address cannot be changed. */
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+ SET_MODULE_OWNER(dev);
+
+ for( i = 0; i < N_LANCE_ADDR; ++i ) {
+ if (lance_probe1( dev, &lance_addr_list[i] )) {
+ found = 1;
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ free_irq(dev->irq, dev);
+ break;
+ }
+ }
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+
+/* Derived from hwreg_present() in atari/config.c: */
+
+static int __init addr_accessible( volatile void *regp, int wordflag, int writeflag )
+{
+ int ret;
+ long flags;
+ long *vbr, save_berr;
+
+ local_irq_save(flags);
+
+ __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : );
+ save_berr = vbr[2];
+
+ __asm__ __volatile__
+ ( "movel %/sp,%/d1\n\t"
+ "movel #Lberr,%2@\n\t"
+ "moveq #0,%0\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "moveb %1@,%/d0\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: movew %1@,%/d0\n\t"
+ "nop \n"
+"2: tstl %4\n\t"
+ "beq 2f\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "clrb %1@\n\t"
+ "nop \n\t"
+ "moveb %/d0,%1@\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: clrw %1@\n\t"
+ "nop \n\t"
+ "movew %/d0,%1@\n\t"
+ "nop \n"
+"2: moveq #1,%0\n"
+"Lberr: movel %/d1,%/sp"
+ : "=&d" (ret)
+ : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag)
+ : "d0", "d1", "memory"
+ );
+
+ vbr[2] = save_berr;
+ local_irq_restore(flags);
+
+ return( ret );
+}
+
+
+static unsigned long __init lance_probe1( struct net_device *dev,
+ struct lance_addr *init_rec )
+{
+ volatile unsigned short *memaddr =
+ (volatile unsigned short *)init_rec->memaddr;
+ volatile unsigned short *ioaddr =
+ (volatile unsigned short *)init_rec->ioaddr;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int i;
+ static int did_version;
+ unsigned short save1, save2;
+
+ PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
+ (long)memaddr, (long)ioaddr ));
+
+ /* Test whether memory readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
+ if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
+
+ /* Written values should come back... */
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
+ save1 = *memaddr;
+ *memaddr = 0x0001;
+ if (*memaddr != 0x0001) goto probe_fail;
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
+ *memaddr = 0x0000;
+ if (*memaddr != 0x0000) goto probe_fail;
+ *memaddr = save1;
+
+ /* First port should be readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
+ if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
+
+ /* and written values should be readable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
+ save2 = ioaddr[1];
+ ioaddr[1] = 0x0001;
+ if (ioaddr[1] != 0x0001) goto probe_fail;
+
+ /* The CSR0_INIT bit should not be readable */
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
+ save1 = ioaddr[0];
+ ioaddr[1] = CSR0;
+ ioaddr[0] = CSR0_INIT | CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
+ ioaddr[0] = CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+
+ /* Now ok... */
+ PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
+ goto probe_ok;
+
+ probe_fail:
+ return( 0 );
+
+ probe_ok:
+ lp = (struct lance_private *)dev->priv;
+ MEM = (struct lance_memory *)memaddr;
+ IO = lp->iobase = (struct lance_ioreg *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+ lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
+
+ REGA( CSR0 ) = CSR0_STOP;
+
+ /* Now test for type: If the eeprom I/O port is readable, it is a
+ * PAM card */
+ if (addr_accessible( &(IO->eeprom), 0, 0 )) {
+ /* Switch back to Ram */
+ i = IO->mem;
+ lp->cardtype = PAM_CARD;
+ }
+ else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
+ lp->cardtype = NEW_RIEBL;
+ }
+ else
+ lp->cardtype = OLD_RIEBL;
+
+ if (lp->cardtype == PAM_CARD ||
+ memaddr == (unsigned short *)0xffe00000) {
+ /* PAMs card and Riebl on ST use level 5 autovector */
+ if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
+ "PAM/Riebl-ST Ethernet", dev)) {
+ printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
+ return( 0 );
+ }
+ dev->irq = (unsigned short)IRQ_AUTO_5;
+ }
+ else {
+ /* For VME-RieblCards, request a free VME int;
+ * (This must be unsigned long, since dev->irq is short and the
+ * IRQ_MACHSPEC bit would be cut off...)
+ */
+ unsigned long irq = atari_register_vme_int();
+ if (!irq) {
+ printk( "Lance: request for VME interrupt failed\n" );
+ return( 0 );
+ }
+ if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
+ "Riebl-VME Ethernet", dev)) {
+ printk( "Lance: request for irq %ld failed\n", irq );
+ return( 0 );
+ }
+ dev->irq = irq;
+ }
+
+ printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
+ dev->name, lance_names[lp->cardtype],
+ (unsigned long)ioaddr,
+ (unsigned long)memaddr,
+ dev->irq,
+ init_rec->slow_flag ? " (slow memcpy)" : "" );
+
+ /* Get the ethernet address */
+ switch( lp->cardtype ) {
+ case OLD_RIEBL:
+ /* No ethernet address! (Set some default address) */
+ memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+ break;
+ case NEW_RIEBL:
+ lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+ break;
+ case PAM_CARD:
+ i = IO->eeprom;
+ for( i = 0; i < 6; ++i )
+ dev->dev_addr[i] =
+ ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
+ ((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ i = IO->mem;
+ break;
+ }
+ for( i = 0; i < 6; ++i )
+ printk( "%02x%s", dev->dev_addr[i], (i < 5) ? ":" : "\n" );
+ if (lp->cardtype == OLD_RIEBL) {
+ printk( "%s: Warning: This is a default ethernet address!\n",
+ dev->name );
+ printk( " Use \"ifconfig hw ether ...\" to set the address.\n" );
+ }
+
+ spin_lock_init(&lp->devlock);
+
+ MEM->init.mode = 0x0000; /* Disable Rx and Tx. */
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
+ MEM->init.rx_ring.adr_hi = 0;
+ MEM->init.rx_ring.len = RX_RING_LEN_BITS;
+ MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
+ MEM->init.tx_ring.adr_hi = 0;
+ MEM->init.tx_ring.len = TX_RING_LEN_BITS;
+
+ if (lp->cardtype == PAM_CARD)
+ IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
+ else
+ *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
+
+ if (did_version++ == 0)
+ DPRINTK( 1, ( version ));
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->open = &lance_open;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->stop = &lance_close;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->set_mac_address = &lance_set_mac_address;
+
+ /* XXX MSch */
+ dev->tx_timeout = lance_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+
+#if 0
+ dev->start = 0;
+#endif
+
+ memset( &lp->stats, 0, sizeof(lp->stats) );
+
+ return( 1 );
+}
+
+
+static int lance_open( struct net_device *dev )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+ struct lance_ioreg *IO = lp->iobase;
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ lance_init_ring(dev);
+ /* Re-initialize the LANCE, and start it when done. */
+
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ REGA( CSR2 ) = 0;
+ REGA( CSR1 ) = 0;
+ REGA( CSR0 ) = CSR0_INIT;
+ /* From now on, AREG is kept to point to CSR0 */
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i < 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return( -EIO );
+ }
+ DREG = CSR0_IDON;
+ DREG = CSR0_STRT;
+ DREG = CSR0_INEA;
+
+ netif_start_queue (dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return( 0 );
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+ unsigned offset;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_tx = 0;
+
+ offset = offsetof( struct lance_memory, packet_area );
+
+/* If the packet buffer at offset 'o' would conflict with the reserved area
+ * of RieblCards, advance it */
+#define CHECK_OFFSET(o) \
+ do { \
+ if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \
+ if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
+ : (o) < RIEBL_RSVD_END) \
+ (o) = RIEBL_RSVD_END; \
+ } \
+ } while(0)
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->tx_head[i].base = offset;
+ MEM->tx_head[i].flag = TMD1_OWN_HOST;
+ MEM->tx_head[i].base_hi = 0;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ offset += PKT_BUF_SZ;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->rx_head[i].base = offset;
+ MEM->rx_head[i].flag = TMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi = 0;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
+ MEM->rx_head[i].msg_length = 0;
+ offset += PKT_BUF_SZ;
+ }
+}
+
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+
+static void lance_tx_timeout (struct net_device *dev)
+{
+ struct lance_private *lp = (struct lance_private *) dev->priv;
+ struct lance_ioreg *IO = lp->iobase;
+
+ AREG = CSR0;
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ { int i;
+ DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
+ lp->dirty_tx, lp->cur_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->cur_rx ));
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length ));
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc ));
+ }
+#endif
+ /* XXX MSch: maybe purge/reinit ring here */
+ /* lance_restart, essentially */
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+ struct lance_ioreg *IO = lp->iobase;
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ len = skb->len;
+ if (len < ETH_ZLEN)
+ len = ETH_ZLEN;
+ /* PAM-Card has a bug: Can only send packets with even number of bytes! */
+ else if (lp->cardtype == PAM_CARD && (len & 1))
+ ++len;
+
+ if (len > skb->len) {
+ skb = skb_padto(skb, len);
+ if (skb == NULL)
+ return 0;
+ }
+
+ netif_stop_queue (dev);
+
+ /* Fill in a Tx ring entry */
+ if (lance_debug >= 3) {
+ u_char *p;
+ int i;
+ printk( "%s: TX pkt type 0x%04x from ", dev->name,
+ ((u_short *)skb->data)[6]);
+ for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" to ");
+ for( p = (u_char *)skb->data, i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" data at 0x%08x len %d\n", (int)skb->data,
+ (int)skb->len );
+ }
+
+ /* We're not prepared for the int until the last flags are set/reset. And
+ * the int may happen already after setting the OWN_CHIP... */
+ spin_lock_irqsave (&lp->devlock, flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+
+ head->length = -len;
+ head->misc = 0;
+ lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ lp->stats.tx_bytes += skb->len;
+ dev_kfree_skb( skb );
+ lp->cur_tx++;
+ while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
+ lp->cur_tx -= TX_RING_SIZE;
+ lp->dirty_tx -= TX_RING_SIZE;
+ }
+
+ /* Trigger an immediate send poll. */
+ DREG = CSR0_INEA | CSR0_TDMD;
+ dev->trans_start = jiffies;
+
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue (dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore (&lp->devlock, flags);
+
+ return 0;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id, struct pt_regs *fp)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int csr0, boguscnt = 10;
+ int handled = 0;
+
+ if (dev == NULL) {
+ DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
+ return IRQ_NONE;
+ }
+
+ lp = (struct lance_private *)dev->priv;
+ IO = lp->iobase;
+ spin_lock (&lp->devlock);
+
+ AREG = CSR0;
+
+ while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
+ --boguscnt >= 0) {
+ handled = 1;
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
+ CSR0_TDMD | CSR0_INEA);
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while( dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = MEM->tx_head[entry].flag;
+
+ if (status & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ MEM->tx_head[entry].flag = 0;
+
+ if (status & TMD1_ERR) {
+ /* There was an major error, log it. */
+ int err_status = MEM->tx_head[entry].misc;
+ lp->stats.tx_errors++;
+ if (err_status & TMD3_RTRY) lp->stats.tx_aborted_errors++;
+ if (err_status & TMD3_LCAR) lp->stats.tx_carrier_errors++;
+ if (err_status & TMD3_LCOL) lp->stats.tx_window_errors++;
+ if (err_status & TMD3_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
+ dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ } else {
+ if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* XXX MSch: free skb?? */
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ DPRINTK( 0, ( "out-of-sync dirty pointer,"
+ " %d vs. %d, full=%ld.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full ));
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && (netif_queue_stopped(dev))
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+ CSR0_IDON | CSR0_INEA;
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static int lance_rx( struct net_device *dev )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
+ MEM->rx_head[entry].flag ));
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) lp->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) lp->stats.rx_over_errors++;
+ if (status & RMD1_CRC) lp->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+ short pkt_len = head->msg_length & 0xfff;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ lp->stats.rx_errors++;
+ }
+ else {
+ skb = dev_alloc_skb( pkt_len+2 );
+ if (skb == NULL) {
+ DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
+ dev->name ));
+ for( i = 0; i < RX_RING_SIZE; i++ )
+ if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
+ RMD1_OWN_CHIP)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ lp->stats.rx_dropped++;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->cur_rx++;
+ }
+ break;
+ }
+
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head), *p;
+ printk( "%s: RX pkt type 0x%04x from ", dev->name,
+ ((u_short *)data)[6]);
+ for( p = &data[6], i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" to ");
+ for( p = data, i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
+ "len %d\n",
+ data[15], data[16], data[17], data[18],
+ data[19], data[20], data[21], data[22],
+ pkt_len );
+ }
+
+ skb->dev = dev;
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+ lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ }
+
+ head->flag |= RMD1_OWN_CHIP;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+ lp->cur_rx &= RX_RING_MOD_MASK;
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+ struct lance_ioreg *IO = lp->iobase;
+
+ netif_stop_queue (dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+
+ return 0;
+}
+
+
+static struct net_device_stats *lance_get_stats( struct net_device *dev )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+
+ return &lp->stats;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+static void set_multicast_list( struct net_device *dev )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+ struct lance_ioreg *IO = lp->iobase;
+
+ if (netif_running(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 1, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = dev->mc_count;
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+/* This is needed for old RieblCards and possible for new RieblCards */
+
+static int lance_set_mac_address( struct net_device *dev, void *addr )
+
+{ struct lance_private *lp = (struct lance_private *)dev->priv;
+ struct sockaddr *saddr = addr;
+ int i;
+
+ if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
+ return( -EOPNOTSUPP );
+
+ if (netif_running(dev)) {
+ /* Only possible while card isn't started */
+ DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
+ dev->name ));
+ return( -EIO );
+ }
+
+ memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
+ /* set also the magic for future sessions */
+ *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
+
+ return( 0 );
+}
+
+
+#ifdef MODULE
+static struct net_device *atarilance_dev;
+
+int init_module(void)
+{
+ atarilance_dev = atarilance_probe(-1);
+ if (IS_ERR(atarilance_dev))
+ return PTR_ERR(atarilance_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(atarilance_dev);
+ free_irq(atarilance_dev->irq, atarilance_dev);
+ free_netdev(atarilance_dev);
+}
+
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
new file mode 100644
index 000000000000..bfa674ed4494
--- /dev/null
+++ b/drivers/net/atp.c
@@ -0,0 +1,952 @@
+/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
+/*
+ This is a driver for commonly OEM pocket (parallel port)
+ ethernet adapters based on the Realtek RTL8002 and RTL8012 chips.
+
+ Written 1993-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. Copyright 1994-2000 retained by the original
+ author, Donald Becker. The timer-based reset code was supplied in 1995
+ by Bill Carlson, wwc@super.org.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/atp.html
+
+
+ Modular support/softnet added by Alan Cox.
+ _bit abuse fixed up by Alan Cox
+
+*/
+
+static const char versionA[] =
+"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
+static const char versionB[] =
+" http://www.scyld.com/network/atp.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+#define net_debug debug
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 15;
+
+#define NUM_UNITS 2
+/* The standard set of ISA module parameters. */
+static int io[NUM_UNITS];
+static int irq[NUM_UNITS];
+static int xcvr[NUM_UNITS]; /* The data transfer mode. */
+
+/* Operational parameters that are set at compile time. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (400*HZ/1000)
+
+/*
+ This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
+ ethernet adapter. This is a common low-cost OEM pocket ethernet
+ adapter, sold under many names.
+
+ Sources:
+ This driver was written from the packet driver assembly code provided by
+ Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
+ device works just from the assembly code? It ain't pretty. The following
+ description is written based on guesses and writing lots of special-purpose
+ code to test my theorized operation.
+
+ In 1997 Realtek made available the documentation for the second generation
+ RTL8012 chip, which has lead to several driver improvements.
+ http://www.realtek.com.tw/cn/cn.html
+
+ Theory of Operation
+
+ The RTL8002 adapter seems to be built around a custom spin of the SEEQ
+ controller core. It probably has a 16K or 64K internal packet buffer, of
+ which the first 4K is devoted to transmit and the rest to receive.
+ The controller maintains the queue of received packet and the packet buffer
+ access pointer internally, with only 'reset to beginning' and 'skip to next
+ packet' commands visible. The transmit packet queue holds two (or more?)
+ packets: both 'retransmit this packet' (due to collision) and 'transmit next
+ packet' commands must be started by hand.
+
+ The station address is stored in a standard bit-serial EEPROM which must be
+ read (ughh) by the device driver. (Provisions have been made for
+ substituting a 74S288 PROM, but I haven't gotten reports of any models
+ using it.) Unlike built-in devices, a pocket adapter can temporarily lose
+ power without indication to the device driver. The major effect is that
+ the station address, receive filter (promiscuous, etc.) and transceiver
+ must be reset.
+
+ The controller itself has 16 registers, some of which use only the lower
+ bits. The registers are read and written 4 bits at a time. The four bit
+ register address is presented on the data lines along with a few additional
+ timing and control bits. The data is then read from status port or written
+ to the data port.
+
+ Correction: the controller has two banks of 16 registers. The second
+ bank contains only the multicast filter table (now used) and the EEPROM
+ access registers.
+
+ Since the bulk data transfer of the actual packets through the slow
+ parallel port dominates the driver's running time, four distinct data
+ (non-register) transfer modes are provided by the adapter, two in each
+ direction. In the first mode timing for the nibble transfers is
+ provided through the data port. In the second mode the same timing is
+ provided through the control port. In either case the data is read from
+ the status port and written to the data port, just as it is accessing
+ registers.
+
+ In addition to the basic data transfer methods, several more are modes are
+ created by adding some delay by doing multiple reads of the data to allow
+ it to stabilize. This delay seems to be needed on most machines.
+
+ The data transfer mode is stored in the 'dev->if_port' field. Its default
+ value is '4'. It may be overridden at boot-time using the third parameter
+ to the "ether=..." initialization.
+
+ The header file <atp.h> provides inline functions that encapsulate the
+ register and data access methods. These functions are hand-tuned to
+ generate reasonable object code. This header file also documents my
+ interpretations of the device registers.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "atp.h"
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(xcvr, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "ATP debug level (0-7)");
+MODULE_PARM_DESC(io, "ATP I/O base address(es)");
+MODULE_PARM_DESC(irq, "ATP IRQ number(s)");
+MODULE_PARM_DESC(xcvr, "ATP transceiver(s) (0=internal, 1=external)");
+
+/* The number of low I/O ports used by the ethercard. */
+#define ETHERCARD_TOTAL_SIZE 3
+
+/* Sequence to switch an 8012 from printer mux to ethernet mode. */
+static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,};
+
+struct net_local {
+ spinlock_t lock;
+ struct net_device *next_module;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ int saved_tx_size;
+ unsigned int tx_unit_busy:1;
+ unsigned char re_tx, /* Number of packet retransmissions. */
+ addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
+ pac_cnt_in_tx_buf,
+ chip_type;
+};
+
+/* This code, written by wwc@super.org, resets the adapter every
+ TIMED_CHECKER ticks. This recovers from an unknown error which
+ hangs the device. */
+#define TIMED_CHECKER (HZ/4)
+#ifdef TIMED_CHECKER
+#include <linux/timer.h>
+static void atp_timed_checker(unsigned long ignored);
+#endif
+
+/* Index to functions, as function prototypes. */
+
+static int atp_probe1(long ioaddr);
+static void get_node_ID(struct net_device *dev);
+static unsigned short eeprom_op(long ioaddr, unsigned int cmd);
+static int net_open(struct net_device *dev);
+static void hardware_init(struct net_device *dev);
+static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode);
+static void trigger_send(long ioaddr, int length);
+static int atp_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t atp_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct net_device *dev);
+static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void set_rx_mode_8002(struct net_device *dev);
+static void set_rx_mode_8012(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+
+
+/* A list of all installed ATP devices, for removing the driver module. */
+static struct net_device *root_atp_dev;
+
+/* Check for a network adapter of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+
+ FIXME: we should use the parport layer for this
+ */
+static int __init atp_init(void)
+{
+ int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
+ int base_addr = io[0];
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return atp_probe1(base_addr);
+ else if (base_addr == 1) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (port = ports; *port; port++) {
+ long ioaddr = *port;
+ outb(0x57, ioaddr + PAR_DATA);
+ if (inb(ioaddr + PAR_DATA) != 0x57)
+ continue;
+ if (atp_probe1(ioaddr) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int __init atp_probe1(long ioaddr)
+{
+ struct net_device *dev = NULL;
+ struct net_local *lp;
+ int saved_ctrl_reg, status, i;
+ int res;
+
+ outb(0xff, ioaddr + PAR_DATA);
+ /* Save the original value of the Control register, in case we guessed
+ wrong. */
+ saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
+ if (net_debug > 3)
+ printk("atp: Control register was %#2.2x.\n", saved_ctrl_reg);
+ /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
+ outb(0x04, ioaddr + PAR_CONTROL);
+#ifndef final_version
+ if (net_debug > 3) {
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg(ioaddr, MODSEL, 0x00);
+ printk("atp: Registers are ");
+ for (i = 0; i < 32; i++)
+ printk(" %2.2x", read_nibble(ioaddr, i));
+ printk(".\n");
+ }
+#endif
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+ /* udelay() here? */
+ status = read_nibble(ioaddr, CMR1);
+
+ if (net_debug > 3) {
+ printk(KERN_DEBUG "atp: Status nibble was %#2.2x..", status);
+ for (i = 0; i < 32; i++)
+ printk(" %2.2x", read_nibble(ioaddr, i));
+ printk("\n");
+ }
+
+ if ((status & 0x78) != 0x08) {
+ /* The pocket adapter probe failed, restore the control register. */
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return -ENODEV;
+ }
+ status = read_nibble(ioaddr, CMR2_h);
+ if ((status & 0x78) != 0x10) {
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+
+ /* Find the IRQ used by triggering an interrupt. */
+ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
+
+ /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
+ if (irq[0])
+ dev->irq = irq[0];
+ else if (ioaddr == 0x378)
+ dev->irq = 7;
+ else
+ dev->irq = 5;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+
+ dev->base_addr = ioaddr;
+
+ /* Read the station address PROM. */
+ get_node_ID(dev);
+
+#ifndef MODULE
+ if (net_debug)
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+#endif
+
+ printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
+ "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr,
+ dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+
+ lp = netdev_priv(dev);
+ lp->chip_type = RTL8002;
+ lp->addr_mode = CMR2h_Normal;
+ spin_lock_init(&lp->lock);
+
+ /* For the ATP adapter the "if_port" is really the data transfer mode. */
+ if (xcvr[0])
+ dev->if_port = xcvr[0];
+ else
+ dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4;
+ if (dev->mem_end & 0xf)
+ net_debug = dev->mem_end & 7;
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = atp_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list =
+ lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012;
+ dev->tx_timeout = tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ res = register_netdev(dev);
+ if (res) {
+ free_netdev(dev);
+ return res;
+ }
+
+ lp->next_module = root_atp_dev;
+ root_atp_dev = dev;
+
+ return 0;
+}
+
+/* Read the station address PROM, usually a word-wide EEPROM. */
+static void __init get_node_ID(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ int sa_offset = 0;
+ int i;
+
+ write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
+
+ /* Some adapters have the station address at offset 15 instead of offset
+ zero. Check for it, and fix it if needed. */
+ if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
+ sa_offset = 15;
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] =
+ be16_to_cpu(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+}
+
+/*
+ An EEPROM read command starts by shifting out 0x60+address, and then
+ shifting in the serial data. See the NatSemi databook for details.
+ * ________________
+ * CS : __|
+ * ___ ___
+ * CLK: ______| |___| |
+ * __ _______ _______
+ * DI : __X_______X_______X
+ * DO : _________X_______X
+ */
+
+static unsigned short __init eeprom_op(long ioaddr, u32 cmd)
+{
+ unsigned eedata_out = 0;
+ int num_bits = EE_CMD_SIZE;
+
+ while (--num_bits >= 0) {
+ char outval = (cmd & (1<<num_bits)) ? EE_DATA_WRITE : 0;
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
+ eedata_out <<= 1;
+ if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
+ eedata_out++;
+ }
+ write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
+ return eedata_out;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine sets everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+
+ This is an attachable device: if there is no dev->priv entry then it wasn't
+ probed for at boot-time, and we need to probe for it again.
+ */
+static int net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ret;
+
+ /* The interrupt line is turned off (tri-stated) when the device isn't in
+ use. That's especially important for "attached" interfaces where the
+ port or interrupt may be shared. */
+ ret = request_irq(dev->irq, &atp_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ hardware_init(dev);
+
+ init_timer(&lp->timer);
+ lp->timer.expires = jiffies + TIMED_CHECKER;
+ lp->timer.data = (unsigned long)dev;
+ lp->timer.function = &atp_timed_checker; /* timer handler */
+ add_timer(&lp->timer);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* This routine resets the hardware. We initialize everything, assuming that
+ the hardware may have been temporarily detached. */
+static void hardware_init(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+
+ if (net_debug > 2) {
+ printk(KERN_DEBUG "%s: Reset: current Rx mode %d.\n", dev->name,
+ (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
+ }
+
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+
+ /* Enable the interrupt line from the serial port. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+
+ /* Unmask the interesting interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+
+ lp->tx_unit_busy = 0;
+ lp->pac_cnt_in_tx_buf = 0;
+ lp->saved_tx_size = 0;
+}
+
+static void trigger_send(long ioaddr, int length)
+{
+ write_reg_byte(ioaddr, TxCNT0, length & 0xff);
+ write_reg(ioaddr, TxCNT1, length >> 8);
+ write_reg(ioaddr, CMR1, CMR1_Xmit);
+}
+
+static void write_packet(long ioaddr, int length, unsigned char *packet, int pad_len, int data_mode)
+{
+ if (length & 1)
+ {
+ length++;
+ pad_len++;
+ }
+
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ if ((data_mode & 1) == 0) {
+ /* Write the packet out, starting with the write addr. */
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+ do {
+ write_byte_mode0(ioaddr, *packet++);
+ } while (--length > pad_len) ;
+ do {
+ write_byte_mode0(ioaddr, 0);
+ } while (--length > 0) ;
+ } else {
+ /* Write the packet out in slow mode. */
+ unsigned char outbyte = *packet++;
+
+ outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+
+ outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outbyte >>= 4;
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ while (--length > pad_len)
+ write_byte_mode1(ioaddr, *packet++);
+ while (--length > 0)
+ write_byte_mode1(ioaddr, 0);
+ }
+ /* Terminate the Tx frame. End of write: ECB. */
+ outb(0xff, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct net_local *np = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name,
+ inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
+ : "IRQ conflict");
+ np->stats.tx_errors++;
+ /* Try to restart the adapter. */
+ hardware_init(dev);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ np->stats.tx_errors++;
+}
+
+static int atp_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int length;
+ unsigned long flags;
+
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ netif_stop_queue(dev);
+
+ /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
+ This sequence must not be interrupted by an incoming packet. */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ write_reg(ioaddr, IMR, 0);
+ write_reg_high(ioaddr, IMR, 0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port);
+
+ lp->pac_cnt_in_tx_buf++;
+ if (lp->tx_unit_busy == 0) {
+ trigger_send(ioaddr, length);
+ lp->saved_tx_size = 0; /* Redundant */
+ lp->re_tx = 0;
+ lp->tx_unit_busy = 1;
+ } else
+ lp->saved_tx_size = length;
+ /* Re-enable the LPT interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb);
+ return 0;
+}
+
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t
+atp_interrupt(int irq, void *dev_instance, struct pt_regs * regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct net_local *lp;
+ long ioaddr;
+ static int num_tx_since_rx;
+ int boguscount = max_interrupt_work;
+ int handled = 0;
+
+ if (dev == NULL) {
+ printk(KERN_ERR "ATP_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ /* Disable additional spurious interrupts. */
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+
+ /* The adapter's output is currently the IRQ line, switch it to data. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+ write_reg(ioaddr, IMR, 0);
+
+ if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name);
+ while (--boguscount > 0) {
+ int status = read_nibble(ioaddr, ISR);
+ if (net_debug > 5) printk("loop status %02x..", status);
+
+ if (status & (ISR_RxOK<<3)) {
+ handled = 1;
+ write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
+ do {
+ int read_status = read_nibble(ioaddr, CMR1);
+ if (net_debug > 6)
+ printk("handling Rx packet %02x..", read_status);
+ /* We acknowledged the normal Rx interrupt, so if the interrupt
+ is still outstanding we must have a Rx error. */
+ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
+ lp->stats.rx_over_errors++;
+ /* Set to no-accept mode long enough to remove a packet. */
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+ net_rx(dev);
+ /* Clear the interrupt and return to normal Rx mode. */
+ write_reg_high(ioaddr, ISR, ISRh_RxErr);
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
+ net_rx(dev);
+ num_tx_since_rx = 0;
+ } else
+ break;
+ } while (--boguscount > 0);
+ } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
+ handled = 1;
+ if (net_debug > 6) printk("handling Tx done..");
+ /* Clear the Tx interrupt. We should check for too many failures
+ and reinitialize the adapter. */
+ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
+ if (status & (ISR_TxErr<<3)) {
+ lp->stats.collisions++;
+ if (++lp->re_tx > 15) {
+ lp->stats.tx_aborted_errors++;
+ hardware_init(dev);
+ break;
+ }
+ /* Attempt to retransmit. */
+ if (net_debug > 6) printk("attempting to ReTx");
+ write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
+ } else {
+ /* Finish up the transmit. */
+ lp->stats.tx_packets++;
+ lp->pac_cnt_in_tx_buf--;
+ if ( lp->saved_tx_size) {
+ trigger_send(ioaddr, lp->saved_tx_size);
+ lp->saved_tx_size = 0;
+ lp->re_tx = 0;
+ } else
+ lp->tx_unit_busy = 0;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+ num_tx_since_rx++;
+ } else if (num_tx_since_rx > 8
+ && time_after(jiffies, dev->last_rx + HZ)) {
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
+ "%ld jiffies status %02x CMR1 %02x.\n", dev->name,
+ num_tx_since_rx, jiffies - dev->last_rx, status,
+ (read_nibble(ioaddr, CMR1) >> 3) & 15);
+ lp->stats.rx_missed_errors++;
+ hardware_init(dev);
+ num_tx_since_rx = 0;
+ break;
+ } else
+ break;
+ }
+
+ /* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+ {
+ int i;
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+#if 0 && defined(TIMED_CHECKER)
+ mod_timer(&lp->timer, jiffies + TIMED_CHECKER);
+#endif
+ }
+
+ /* Tell the adapter that it can go back to using the output line as IRQ. */
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ /* Enable the physical interrupt line, which is sure to be low until.. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ /* .. we enable the interrupt sources. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
+
+ spin_unlock(&lp->lock);
+
+ if (net_debug > 5) printk("exiting interrupt.\n");
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef TIMED_CHECKER
+/* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+static void atp_timed_checker(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ long ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ int tickssofar = jiffies - lp->last_rx_time;
+ int i;
+
+ spin_lock(&lp->lock);
+ if (tickssofar > 2*HZ) {
+#if 1
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+ lp->last_rx_time = jiffies;
+#else
+ for (i = 0; i < 6; i++)
+ if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
+ {
+ struct net_local *lp = netdev_priv(atp_timed_dev);
+ write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
+ if (i == 2)
+ lp->stats.tx_errors++;
+ else if (i == 3)
+ lp->stats.tx_dropped++;
+ else if (i == 4)
+ lp->stats.collisions++;
+ else
+ lp->stats.rx_errors++;
+ }
+#endif
+ }
+ spin_unlock(&lp->lock);
+ lp->timer.expires = jiffies + TIMED_CHECKER;
+ add_timer(&lp->timer);
+}
+#endif
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void net_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ struct rx_header rx_head;
+
+ /* Process the received packet. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
+ if (net_debug > 5)
+ printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad,
+ rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
+ if ((rx_head.rx_status & 0x77) != 0x01) {
+ lp->stats.rx_errors++;
+ if (rx_head.rx_status & 0x0004) lp->stats.rx_frame_errors++;
+ else if (rx_head.rx_status & 0x0002) lp->stats.rx_crc_errors++;
+ if (net_debug > 3)
+ printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n",
+ dev->name, rx_head.rx_status);
+ if (rx_head.rx_status & 0x0020) {
+ lp->stats.rx_fifo_errors++;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+ } else if (rx_head.rx_status & 0x0050)
+ hardware_init(dev);
+ return;
+ } else {
+ /* Malloc up new buffer. The "-4" omits the FCS (CRC). */
+ int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ goto done;
+ }
+ skb->dev = dev;
+
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ done:
+ write_reg(ioaddr, CMR1, CMR1_NextPkt);
+ lp->last_rx_time = jiffies;
+ return;
+}
+
+static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
+{
+
+ if (data_mode <= 3) { /* Mode 0 or 1 */
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
+ ioaddr + PAR_DATA);
+ if (data_mode <= 1) { /* Mode 0 or 1 */
+ do *p++ = read_byte_mode0(ioaddr); while (--length > 0);
+ } else /* Mode 2 or 3 */
+ do *p++ = read_byte_mode2(ioaddr); while (--length > 0);
+ } else if (data_mode <= 5)
+ do *p++ = read_byte_mode4(ioaddr); while (--length > 0);
+ else
+ do *p++ = read_byte_mode6(ioaddr); while (--length > 0);
+
+ outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ del_timer_sync(&lp->timer);
+
+ /* Flush the Tx and disable Rx here. */
+ lp->addr_mode = CMR2h_OFF;
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+
+ /* Free the IRQ line. */
+ outb(0x00, ioaddr + PAR_CONTROL);
+ free_irq(dev->irq, dev);
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *
+net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void set_rx_mode_8002(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) {
+ /* We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+ lp->addr_mode = CMR2h_PROMISC;
+ } else
+ lp->addr_mode = CMR2h_Normal;
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+}
+
+static void set_rx_mode_8012(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_mode = CMR2h_PROMISC;
+ } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ new_mode = CMR2h_Normal;
+ } else {
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ {
+ int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ }
+ new_mode = CMR2h_Normal;
+ }
+ lp->addr_mode = new_mode;
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
+ for (i = 0; i < 8; i++)
+ write_reg_byte(ioaddr, i, mc_filter[i]);
+ if (net_debug > 2 || 1) {
+ lp->addr_mode = 1;
+ printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
+ dev->name, lp->addr_mode);
+ for (i = 0; i < 8; i++)
+ printk(" %2.2x", mc_filter[i]);
+ printk(".\n");
+ }
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
+}
+
+static int __init atp_init_module(void) {
+ if (debug) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ return atp_init();
+}
+
+static void __exit atp_cleanup_module(void) {
+ struct net_device *next_dev;
+
+ while (root_atp_dev) {
+ next_dev = ((struct net_local *)root_atp_dev->priv)->next_module;
+ unregister_netdev(root_atp_dev);
+ /* No need to release_region(), since we never snarf it. */
+ free_netdev(root_atp_dev);
+ root_atp_dev = next_dev;
+ }
+}
+
+module_init(atp_init_module);
+module_exit(atp_cleanup_module);
diff --git a/drivers/net/atp.h b/drivers/net/atp.h
new file mode 100644
index 000000000000..0edc642c2c2f
--- /dev/null
+++ b/drivers/net/atp.h
@@ -0,0 +1,259 @@
+/* Linux header file for the ATP pocket ethernet adapter. */
+/* v1.09 8/9/2000 becker@scyld.com. */
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+/* The header prepended to received packets. */
+struct rx_header {
+ ushort pad; /* Pad. */
+ ushort rx_count;
+ ushort rx_status; /* Unknown bit assignments :-<. */
+ ushort cur_addr; /* Apparently the current buffer address(?) */
+};
+
+#define PAR_DATA 0
+#define PAR_STATUS 1
+#define PAR_CONTROL 2
+
+enum chip_type { RTL8002, RTL8012 };
+
+#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
+#define Ctrl_HNibRead 0
+#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
+#define Ctrl_HNibWrite 0
+#define Ctrl_SelData 0x04 /* LP_PINITP */
+#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
+
+#define EOW 0xE0
+#define EOC 0xE0
+#define WrAddr 0x40 /* Set address of EPLC read, write register. */
+#define RdAddr 0xC0
+#define HNib 0x10
+
+enum page0_regs
+{
+ /* The first six registers hold the ethernet physical station address. */
+ PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+ TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
+ TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
+ ISR = 10, IMR = 11, /* Interrupt status and mask. */
+ CMR1 = 12, /* Command register 1. */
+ CMR2 = 13, /* Command register 2. */
+ MODSEL = 14, /* Mode select register. */
+ MAR = 14, /* Memory address register (?). */
+ CMR2_h = 0x1d, };
+
+enum eepage_regs
+{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
+
+
+#define ISR_TxOK 0x01
+#define ISR_RxOK 0x04
+#define ISR_TxErr 0x02
+#define ISRh_RxErr 0x11 /* ISR, high nibble */
+
+#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */
+#define CMR1h_RESET 0x04 /* Reset. */
+#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
+#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
+#define CMR1h_TxRxOFF 0x00
+#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
+#define CMR1_Xmit 0x04 /* Trigger a transmit. */
+#define CMR1_IRQ 0x02 /* Interrupt active. */
+#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
+#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
+
+#define CMR2_NULL 8
+#define CMR2_IRQOUT 9
+#define CMR2_RAMTEST 10
+#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
+
+#define CMR2h_OFF 0 /* No accept mode. */
+#define CMR2h_Physical 1 /* Accept a physical address match only. */
+#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
+#define CMR2h_PROMISC 3 /* Promiscuous mode. */
+
+/* An inline function used below: it differs from inb() by explicitly return an unsigned
+ char, saving a truncation. */
+static inline unsigned char inbyte(unsigned short port)
+{
+ unsigned char _v;
+ __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
+ return _v;
+}
+
+/* Read register OFFSET.
+ This command should always be terminated with read_end(). */
+static inline unsigned char read_nibble(short port, unsigned char offset)
+{
+ unsigned char retval;
+ outb(EOC+offset, port + PAR_DATA);
+ outb(RdAddr+offset, port + PAR_DATA);
+ inbyte(port + PAR_STATUS); /* Settling time delay */
+ retval = inbyte(port + PAR_STATUS);
+ outb(EOC+offset, port + PAR_DATA);
+
+ return retval;
+}
+
+/* Functions for bulk data read. The interrupt line is always disabled. */
+/* Get a byte using read mode 0, reading data from the control lines. */
+static inline unsigned char read_byte_mode0(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
+static inline unsigned char read_byte_mode2(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register. */
+static inline unsigned char read_byte_mode4(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register, double reading to allow settling. */
+static inline unsigned char read_byte_mode6(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+static inline void
+write_reg(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA);
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval &= 0xf0;
+ outval |= value;
+ outb(outval, port + PAR_DATA);
+ outval &= 0x1f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | outval, port + PAR_DATA);
+}
+
+static inline void
+write_reg_high(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval = EOC | HNib | reg;
+
+ outb(outval, port + PAR_DATA);
+ outval &= WrAddr | HNib | 0x0f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval = WrAddr | HNib | value;
+ outb(outval, port + PAR_DATA);
+ outval &= HNib | 0x0f; /* HNib | value */
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | HNib | outval, port + PAR_DATA);
+}
+
+/* Write a byte out using nibble mode. The low nibble is written first. */
+static inline void
+write_reg_byte(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+ outb(value & 0x0f, port + PAR_DATA);
+ value >>= 4;
+ outb(value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+
+ outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
+}
+
+/*
+ * Bulk data writes to the packet buffer. The interrupt line remains enabled.
+ * The first, faster method uses only the dataport (data modes 0, 2 & 4).
+ * The second (backup) method uses data and control regs (modes 1, 3 & 5).
+ * It should only be needed when there is skew between the individual data
+ * lines.
+ */
+static inline void write_byte_mode0(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+}
+
+static inline void write_byte_mode1(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+}
+
+/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
+static inline void write_word_mode0(short ioaddr, unsigned short value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+}
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_CLK_HIGH 0x12
+#define EE_CLK_LOW 0x16
+#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay(ticks) \
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
+#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
+#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
+#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
new file mode 100644
index 000000000000..5a2efd343db4
--- /dev/null
+++ b/drivers/net/au1000_eth.c
@@ -0,0 +1,2273 @@
+/*
+ *
+ * Alchemy Au1x00 ethernet driver
+ *
+ * Copyright 2001,2002,2003 MontaVista Software Inc.
+ * Copyright 2002 TimeSys Corp.
+ * Added ethtool/mii-tool support,
+ * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
+ * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
+ * or riemer@riemer-nt.de: fixed the link beat detection with
+ * ioctls (SIOCGMIIPHY)
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <asm/mipsregs.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/cpu.h>
+#include "au1000_eth.h"
+
+#ifdef AU1000_ETH_DEBUG
+static int au1000_debug = 5;
+#else
+static int au1000_debug = 3;
+#endif
+
+#define DRV_NAME "au1000eth"
+#define DRV_VERSION "1.5"
+#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
+#define DRV_DESC "Au1xxx on-chip Ethernet driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+// prototypes
+static void hard_stop(struct net_device *);
+static void enable_rx_tx(struct net_device *dev);
+static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
+static int au1000_init(struct net_device *);
+static int au1000_open(struct net_device *);
+static int au1000_close(struct net_device *);
+static int au1000_tx(struct sk_buff *, struct net_device *);
+static int au1000_rx(struct net_device *);
+static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
+static void au1000_tx_timeout(struct net_device *);
+static int au1000_set_config(struct net_device *dev, struct ifmap *map);
+static void set_rx_mode(struct net_device *);
+static struct net_device_stats *au1000_get_stats(struct net_device *);
+static inline void update_tx_stats(struct net_device *, u32, u32);
+static inline void update_rx_stats(struct net_device *, u32);
+static void au1000_timer(unsigned long);
+static int au1000_ioctl(struct net_device *, struct ifreq *, int);
+static int mdio_read(struct net_device *, int, int);
+static void mdio_write(struct net_device *, int, int, u16);
+static void dump_mii(struct net_device *dev, int phy_id);
+
+// externs
+extern void ack_rise_edge_irq(unsigned int);
+extern int get_ethernet_addr(char *ethernet_addr);
+extern void str2eaddr(unsigned char *ea, unsigned char *str);
+extern char * __init prom_getcmdline(void);
+
+/*
+ * Theory of operation
+ *
+ * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
+ * There are four receive and four transmit descriptors. These
+ * descriptors are not in memory; rather, they are just a set of
+ * hardware registers.
+ *
+ * Since the Au1000 has a coherent data cache, the receive and
+ * transmit buffers are allocated from the KSEG0 segment. The
+ * hardware registers, however, are still mapped at KSEG1 to
+ * make sure there's no out-of-order writes, and that all writes
+ * complete immediately.
+ */
+
+/* These addresses are only used if yamon doesn't tell us what
+ * the mac address is, and the mac address is not passed on the
+ * command line.
+ */
+static unsigned char au1000_mac_addr[6] __devinitdata = {
+ 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
+};
+
+#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
+#define RUN_AT(x) (jiffies + (x))
+
+// For reading/writing 32-bit words from/to DMA memory
+#define cpu_to_dma32 cpu_to_be32
+#define dma32_to_cpu be32_to_cpu
+
+struct au1000_private *au_macs[NUM_ETH_INTERFACES];
+
+/* FIXME
+ * All of the PHY code really should be detached from the MAC
+ * code.
+ */
+
+/* Default advertise */
+#define GENMII_DEFAULT_ADVERTISE \
+ ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+ ADVERTISED_Autoneg
+
+#define GENMII_DEFAULT_FEATURES \
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg
+
+static char *phy_link[] =
+{ "unknown",
+ "10Base2", "10BaseT",
+ "AUI",
+ "100BaseT", "100BaseTX", "100BaseFX"
+};
+
+int bcm_5201_init(struct net_device *dev, int phy_addr)
+{
+ s16 data;
+
+ /* Stop auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
+
+ /* Set advertisement to 10/100 and Half/Full duplex
+ * (full capabilities) */
+ data = mdio_read(dev, phy_addr, MII_ANADV);
+ data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
+ mdio_write(dev, phy_addr, MII_ANADV, data);
+
+ /* Restart auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
+ mdio_write(dev, phy_addr, MII_CONTROL, data);
+
+ if (au1000_debug > 4)
+ dump_mii(dev, phy_addr);
+ return 0;
+}
+
+int bcm_5201_reset(struct net_device *dev, int phy_addr)
+{
+ s16 mii_control, timeout;
+
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
+ mdelay(1);
+ for (timeout = 100; timeout > 0; --timeout) {
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ if ((mii_control & MII_CNTL_RESET) == 0)
+ break;
+ mdelay(1);
+ }
+ if (mii_control & MII_CNTL_RESET) {
+ printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+int
+bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ u16 mii_data;
+ struct au1000_private *aup;
+
+ if (!dev) {
+ printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
+ return -1;
+ }
+ aup = (struct au1000_private *) dev->priv;
+
+ mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
+ if (mii_data & MII_STAT_LINK) {
+ *link = 1;
+ mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
+ if (mii_data & MII_AUX_100) {
+ if (mii_data & MII_AUX_FDX) {
+ *speed = IF_PORT_100BASEFX;
+ dev->if_port = IF_PORT_100BASEFX;
+ }
+ else {
+ *speed = IF_PORT_100BASETX;
+ dev->if_port = IF_PORT_100BASETX;
+ }
+ }
+ else {
+ *speed = IF_PORT_10BASET;
+ dev->if_port = IF_PORT_10BASET;
+ }
+
+ }
+ else {
+ *link = 0;
+ *speed = 0;
+ dev->if_port = IF_PORT_UNKNOWN;
+ }
+ return 0;
+}
+
+int lsi_80227_init(struct net_device *dev, int phy_addr)
+{
+ if (au1000_debug > 4)
+ printk("lsi_80227_init\n");
+
+ /* restart auto-negotiation */
+ mdio_write(dev, phy_addr, MII_CONTROL,
+ MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
+ mdelay(1);
+
+ /* set up LEDs to correct display */
+#ifdef CONFIG_MIPS_MTX1
+ mdio_write(dev, phy_addr, 17, 0xff80);
+#else
+ mdio_write(dev, phy_addr, 17, 0xffc0);
+#endif
+
+ if (au1000_debug > 4)
+ dump_mii(dev, phy_addr);
+ return 0;
+}
+
+int lsi_80227_reset(struct net_device *dev, int phy_addr)
+{
+ s16 mii_control, timeout;
+
+ if (au1000_debug > 4) {
+ printk("lsi_80227_reset\n");
+ dump_mii(dev, phy_addr);
+ }
+
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
+ mdelay(1);
+ for (timeout = 100; timeout > 0; --timeout) {
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ if ((mii_control & MII_CNTL_RESET) == 0)
+ break;
+ mdelay(1);
+ }
+ if (mii_control & MII_CNTL_RESET) {
+ printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+int
+lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ u16 mii_data;
+ struct au1000_private *aup;
+
+ if (!dev) {
+ printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
+ return -1;
+ }
+ aup = (struct au1000_private *) dev->priv;
+
+ mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
+ if (mii_data & MII_STAT_LINK) {
+ *link = 1;
+ mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
+ if (mii_data & MII_LSI_PHY_STAT_SPD) {
+ if (mii_data & MII_LSI_PHY_STAT_FDX) {
+ *speed = IF_PORT_100BASEFX;
+ dev->if_port = IF_PORT_100BASEFX;
+ }
+ else {
+ *speed = IF_PORT_100BASETX;
+ dev->if_port = IF_PORT_100BASETX;
+ }
+ }
+ else {
+ *speed = IF_PORT_10BASET;
+ dev->if_port = IF_PORT_10BASET;
+ }
+
+ }
+ else {
+ *link = 0;
+ *speed = 0;
+ dev->if_port = IF_PORT_UNKNOWN;
+ }
+ return 0;
+}
+
+int am79c901_init(struct net_device *dev, int phy_addr)
+{
+ printk("am79c901_init\n");
+ return 0;
+}
+
+int am79c901_reset(struct net_device *dev, int phy_addr)
+{
+ printk("am79c901_reset\n");
+ return 0;
+}
+
+int
+am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ return 0;
+}
+
+int am79c874_init(struct net_device *dev, int phy_addr)
+{
+ s16 data;
+
+ /* 79c874 has quit resembled bit assignments to BCM5201 */
+ if (au1000_debug > 4)
+ printk("am79c847_init\n");
+
+ /* Stop auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
+
+ /* Set advertisement to 10/100 and Half/Full duplex
+ * (full capabilities) */
+ data = mdio_read(dev, phy_addr, MII_ANADV);
+ data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
+ mdio_write(dev, phy_addr, MII_ANADV, data);
+
+ /* Restart auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
+
+ mdio_write(dev, phy_addr, MII_CONTROL, data);
+
+ if (au1000_debug > 4) dump_mii(dev, phy_addr);
+ return 0;
+}
+
+int am79c874_reset(struct net_device *dev, int phy_addr)
+{
+ s16 mii_control, timeout;
+
+ if (au1000_debug > 4)
+ printk("am79c874_reset\n");
+
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
+ mdelay(1);
+ for (timeout = 100; timeout > 0; --timeout) {
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ if ((mii_control & MII_CNTL_RESET) == 0)
+ break;
+ mdelay(1);
+ }
+ if (mii_control & MII_CNTL_RESET) {
+ printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+int
+am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ u16 mii_data;
+ struct au1000_private *aup;
+
+ // printk("am79c874_status\n");
+ if (!dev) {
+ printk(KERN_ERR "am79c874_status error: NULL dev\n");
+ return -1;
+ }
+
+ aup = (struct au1000_private *) dev->priv;
+ mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
+
+ if (mii_data & MII_STAT_LINK) {
+ *link = 1;
+ mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
+ if (mii_data & MII_AMD_PHY_STAT_SPD) {
+ if (mii_data & MII_AMD_PHY_STAT_FDX) {
+ *speed = IF_PORT_100BASEFX;
+ dev->if_port = IF_PORT_100BASEFX;
+ }
+ else {
+ *speed = IF_PORT_100BASETX;
+ dev->if_port = IF_PORT_100BASETX;
+ }
+ }
+ else {
+ *speed = IF_PORT_10BASET;
+ dev->if_port = IF_PORT_10BASET;
+ }
+
+ }
+ else {
+ *link = 0;
+ *speed = 0;
+ dev->if_port = IF_PORT_UNKNOWN;
+ }
+ return 0;
+}
+
+int lxt971a_init(struct net_device *dev, int phy_addr)
+{
+ if (au1000_debug > 4)
+ printk("lxt971a_init\n");
+
+ /* restart auto-negotiation */
+ mdio_write(dev, phy_addr, MII_CONTROL,
+ MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
+
+ /* set up LEDs to correct display */
+ mdio_write(dev, phy_addr, 20, 0x0422);
+
+ if (au1000_debug > 4)
+ dump_mii(dev, phy_addr);
+ return 0;
+}
+
+int lxt971a_reset(struct net_device *dev, int phy_addr)
+{
+ s16 mii_control, timeout;
+
+ if (au1000_debug > 4) {
+ printk("lxt971a_reset\n");
+ dump_mii(dev, phy_addr);
+ }
+
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
+ mdelay(1);
+ for (timeout = 100; timeout > 0; --timeout) {
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ if ((mii_control & MII_CNTL_RESET) == 0)
+ break;
+ mdelay(1);
+ }
+ if (mii_control & MII_CNTL_RESET) {
+ printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+int
+lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ u16 mii_data;
+ struct au1000_private *aup;
+
+ if (!dev) {
+ printk(KERN_ERR "lxt971a_status error: NULL dev\n");
+ return -1;
+ }
+ aup = (struct au1000_private *) dev->priv;
+
+ mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
+ if (mii_data & MII_STAT_LINK) {
+ *link = 1;
+ mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
+ if (mii_data & MII_INTEL_PHY_STAT_SPD) {
+ if (mii_data & MII_INTEL_PHY_STAT_FDX) {
+ *speed = IF_PORT_100BASEFX;
+ dev->if_port = IF_PORT_100BASEFX;
+ }
+ else {
+ *speed = IF_PORT_100BASETX;
+ dev->if_port = IF_PORT_100BASETX;
+ }
+ }
+ else {
+ *speed = IF_PORT_10BASET;
+ dev->if_port = IF_PORT_10BASET;
+ }
+
+ }
+ else {
+ *link = 0;
+ *speed = 0;
+ dev->if_port = IF_PORT_UNKNOWN;
+ }
+ return 0;
+}
+
+int ks8995m_init(struct net_device *dev, int phy_addr)
+{
+ s16 data;
+
+// printk("ks8995m_init\n");
+ /* Stop auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
+
+ /* Set advertisement to 10/100 and Half/Full duplex
+ * (full capabilities) */
+ data = mdio_read(dev, phy_addr, MII_ANADV);
+ data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
+ mdio_write(dev, phy_addr, MII_ANADV, data);
+
+ /* Restart auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
+ mdio_write(dev, phy_addr, MII_CONTROL, data);
+
+ if (au1000_debug > 4) dump_mii(dev, phy_addr);
+
+ return 0;
+}
+
+int ks8995m_reset(struct net_device *dev, int phy_addr)
+{
+ s16 mii_control, timeout;
+
+// printk("ks8995m_reset\n");
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
+ mdelay(1);
+ for (timeout = 100; timeout > 0; --timeout) {
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ if ((mii_control & MII_CNTL_RESET) == 0)
+ break;
+ mdelay(1);
+ }
+ if (mii_control & MII_CNTL_RESET) {
+ printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ u16 mii_data;
+ struct au1000_private *aup;
+
+ if (!dev) {
+ printk(KERN_ERR "ks8995m_status error: NULL dev\n");
+ return -1;
+ }
+ aup = (struct au1000_private *) dev->priv;
+
+ mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
+ if (mii_data & MII_STAT_LINK) {
+ *link = 1;
+ mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
+ if (mii_data & MII_AUX_100) {
+ if (mii_data & MII_AUX_FDX) {
+ *speed = IF_PORT_100BASEFX;
+ dev->if_port = IF_PORT_100BASEFX;
+ }
+ else {
+ *speed = IF_PORT_100BASETX;
+ dev->if_port = IF_PORT_100BASETX;
+ }
+ }
+ else {
+ *speed = IF_PORT_10BASET;
+ dev->if_port = IF_PORT_10BASET;
+ }
+
+ }
+ else {
+ *link = 0;
+ *speed = 0;
+ dev->if_port = IF_PORT_UNKNOWN;
+ }
+ return 0;
+}
+
+int
+smsc_83C185_init (struct net_device *dev, int phy_addr)
+{
+ s16 data;
+
+ if (au1000_debug > 4)
+ printk("smsc_83C185_init\n");
+
+ /* Stop auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
+
+ /* Set advertisement to 10/100 and Half/Full duplex
+ * (full capabilities) */
+ data = mdio_read(dev, phy_addr, MII_ANADV);
+ data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
+ mdio_write(dev, phy_addr, MII_ANADV, data);
+
+ /* Restart auto-negotiation */
+ data = mdio_read(dev, phy_addr, MII_CONTROL);
+ data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
+
+ mdio_write(dev, phy_addr, MII_CONTROL, data);
+
+ if (au1000_debug > 4) dump_mii(dev, phy_addr);
+ return 0;
+}
+
+int
+smsc_83C185_reset (struct net_device *dev, int phy_addr)
+{
+ s16 mii_control, timeout;
+
+ if (au1000_debug > 4)
+ printk("smsc_83C185_reset\n");
+
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
+ mdelay(1);
+ for (timeout = 100; timeout > 0; --timeout) {
+ mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
+ if ((mii_control & MII_CNTL_RESET) == 0)
+ break;
+ mdelay(1);
+ }
+ if (mii_control & MII_CNTL_RESET) {
+ printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+int
+smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ u16 mii_data;
+ struct au1000_private *aup;
+
+ if (!dev) {
+ printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
+ return -1;
+ }
+
+ aup = (struct au1000_private *) dev->priv;
+ mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
+
+ if (mii_data & MII_STAT_LINK) {
+ *link = 1;
+ mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
+ if (mii_data & (1<<3)) {
+ if (mii_data & (1<<4)) {
+ *speed = IF_PORT_100BASEFX;
+ dev->if_port = IF_PORT_100BASEFX;
+ }
+ else {
+ *speed = IF_PORT_100BASETX;
+ dev->if_port = IF_PORT_100BASETX;
+ }
+ }
+ else {
+ *speed = IF_PORT_10BASET;
+ dev->if_port = IF_PORT_10BASET;
+ }
+ }
+ else {
+ *link = 0;
+ *speed = 0;
+ dev->if_port = IF_PORT_UNKNOWN;
+ }
+ return 0;
+}
+
+
+#ifdef CONFIG_MIPS_BOSPORUS
+int stub_init(struct net_device *dev, int phy_addr)
+{
+ //printk("PHY stub_init\n");
+ return 0;
+}
+
+int stub_reset(struct net_device *dev, int phy_addr)
+{
+ //printk("PHY stub_reset\n");
+ return 0;
+}
+
+int
+stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
+{
+ //printk("PHY stub_status\n");
+ *link = 1;
+ /* hmmm, revisit */
+ *speed = IF_PORT_100BASEFX;
+ dev->if_port = IF_PORT_100BASEFX;
+ return 0;
+}
+#endif
+
+struct phy_ops bcm_5201_ops = {
+ bcm_5201_init,
+ bcm_5201_reset,
+ bcm_5201_status,
+};
+
+struct phy_ops am79c874_ops = {
+ am79c874_init,
+ am79c874_reset,
+ am79c874_status,
+};
+
+struct phy_ops am79c901_ops = {
+ am79c901_init,
+ am79c901_reset,
+ am79c901_status,
+};
+
+struct phy_ops lsi_80227_ops = {
+ lsi_80227_init,
+ lsi_80227_reset,
+ lsi_80227_status,
+};
+
+struct phy_ops lxt971a_ops = {
+ lxt971a_init,
+ lxt971a_reset,
+ lxt971a_status,
+};
+
+struct phy_ops ks8995m_ops = {
+ ks8995m_init,
+ ks8995m_reset,
+ ks8995m_status,
+};
+
+struct phy_ops smsc_83C185_ops = {
+ smsc_83C185_init,
+ smsc_83C185_reset,
+ smsc_83C185_status,
+};
+
+#ifdef CONFIG_MIPS_BOSPORUS
+struct phy_ops stub_ops = {
+ stub_init,
+ stub_reset,
+ stub_status,
+};
+#endif
+
+static struct mii_chip_info {
+ const char * name;
+ u16 phy_id0;
+ u16 phy_id1;
+ struct phy_ops *phy_ops;
+ int dual_phy;
+} mii_chip_table[] = {
+ {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
+ {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
+ {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
+ {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
+ {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
+ {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
+ {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
+ {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
+ {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
+#ifdef CONFIG_MIPS_BOSPORUS
+ {"Stub", 0x1234, 0x5678, &stub_ops },
+#endif
+ {0,},
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ volatile u32 *mii_control_reg;
+ volatile u32 *mii_data_reg;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ #ifdef CONFIG_BCM5222_DUAL_PHY
+ /* First time we probe, it's for the mac0 phy.
+ * Since we haven't determined yet that we have a dual phy,
+ * aup->mii->mii_control_reg won't be setup and we'll
+ * default to the else statement.
+ * By the time we probe for the mac1 phy, the mii_control_reg
+ * will be setup to be the address of the mac0 phy control since
+ * both phys are controlled through mac0.
+ */
+ if (aup->mii && aup->mii->mii_control_reg) {
+ mii_control_reg = aup->mii->mii_control_reg;
+ mii_data_reg = aup->mii->mii_data_reg;
+ }
+ else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
+ /* assume both phys are controlled through mac0 */
+ mii_control_reg = au_macs[0]->mii->mii_control_reg;
+ mii_data_reg = au_macs[0]->mii->mii_data_reg;
+ }
+ else
+ #endif
+ {
+ /* default control and data reg addresses */
+ mii_control_reg = &aup->mac->mii_control;
+ mii_data_reg = &aup->mac->mii_data;
+ }
+
+ while (*mii_control_reg & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ printk(KERN_ERR "%s: read_MII busy timeout!!\n",
+ dev->name);
+ return -1;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
+
+ *mii_control_reg = mii_control;
+
+ timedout = 20;
+ while (*mii_control_reg & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
+ dev->name);
+ return -1;
+ }
+ }
+ return (int)*mii_data_reg;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ volatile u32 *mii_control_reg;
+ volatile u32 *mii_data_reg;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ #ifdef CONFIG_BCM5222_DUAL_PHY
+ if (aup->mii && aup->mii->mii_control_reg) {
+ mii_control_reg = aup->mii->mii_control_reg;
+ mii_data_reg = aup->mii->mii_data_reg;
+ }
+ else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
+ /* assume both phys are controlled through mac0 */
+ mii_control_reg = au_macs[0]->mii->mii_control_reg;
+ mii_data_reg = au_macs[0]->mii->mii_data_reg;
+ }
+ else
+ #endif
+ {
+ /* default control and data reg addresses */
+ mii_control_reg = &aup->mac->mii_control;
+ mii_data_reg = &aup->mac->mii_data;
+ }
+
+ while (*mii_control_reg & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
+ dev->name);
+ return;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
+
+ *mii_data_reg = value;
+ *mii_control_reg = mii_control;
+}
+
+
+static void dump_mii(struct net_device *dev, int phy_id)
+{
+ int i, val;
+
+ for (i = 0; i < 7; i++) {
+ if ((val = mdio_read(dev, phy_id, i)) >= 0)
+ printk("%s: MII Reg %d=%x\n", dev->name, i, val);
+ }
+ for (i = 16; i < 25; i++) {
+ if ((val = mdio_read(dev, phy_id, i)) >= 0)
+ printk("%s: MII Reg %d=%x\n", dev->name, i, val);
+ }
+}
+
+static int mii_probe (struct net_device * dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ int phy_addr;
+#ifdef CONFIG_MIPS_BOSPORUS
+ int phy_found=0;
+#endif
+
+ /* search for total of 32 possible mii phy addresses */
+ for (phy_addr = 0; phy_addr < 32; phy_addr++) {
+ u16 mii_status;
+ u16 phy_id0, phy_id1;
+ int i;
+
+ #ifdef CONFIG_BCM5222_DUAL_PHY
+ /* Mask the already found phy, try next one */
+ if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
+ if (au_macs[0]->phy_addr == phy_addr)
+ continue;
+ }
+ #endif
+
+ mii_status = mdio_read(dev, phy_addr, MII_STATUS);
+ if (mii_status == 0xffff || mii_status == 0x0000)
+ /* the mii is not accessable, try next one */
+ continue;
+
+ phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
+ phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
+
+ /* search our mii table for the current mii */
+ for (i = 0; mii_chip_table[i].phy_id1; i++) {
+ if (phy_id0 == mii_chip_table[i].phy_id0 &&
+ phy_id1 == mii_chip_table[i].phy_id1) {
+ struct mii_phy * mii_phy = aup->mii;
+
+ printk(KERN_INFO "%s: %s at phy address %d\n",
+ dev->name, mii_chip_table[i].name,
+ phy_addr);
+#ifdef CONFIG_MIPS_BOSPORUS
+ phy_found = 1;
+#endif
+ mii_phy->chip_info = mii_chip_table+i;
+ aup->phy_addr = phy_addr;
+ aup->want_autoneg = 1;
+ aup->phy_ops = mii_chip_table[i].phy_ops;
+ aup->phy_ops->phy_init(dev,phy_addr);
+
+ // Check for dual-phy and then store required
+ // values and set indicators. We need to do
+ // this now since mdio_{read,write} need the
+ // control and data register addresses.
+ #ifdef CONFIG_BCM5222_DUAL_PHY
+ if ( mii_chip_table[i].dual_phy) {
+
+ /* assume both phys are controlled
+ * through MAC0. Board specific? */
+
+ /* sanity check */
+ if (!au_macs[0] || !au_macs[0]->mii)
+ return -1;
+ aup->mii->mii_control_reg = (u32 *)
+ &au_macs[0]->mac->mii_control;
+ aup->mii->mii_data_reg = (u32 *)
+ &au_macs[0]->mac->mii_data;
+ }
+ #endif
+ goto found;
+ }
+ }
+ }
+found:
+
+#ifdef CONFIG_MIPS_BOSPORUS
+ /* This is a workaround for the Micrel/Kendin 5 port switch
+ The second MAC doesn't see a PHY connected... so we need to
+ trick it into thinking we have one.
+
+ If this kernel is run on another Au1500 development board
+ the stub will be found as well as the actual PHY. However,
+ the last found PHY will be used... usually at Addr 31 (Db1500).
+ */
+ if ( (!phy_found) )
+ {
+ u16 phy_id0, phy_id1;
+ int i;
+
+ phy_id0 = 0x1234;
+ phy_id1 = 0x5678;
+
+ /* search our mii table for the current mii */
+ for (i = 0; mii_chip_table[i].phy_id1; i++) {
+ if (phy_id0 == mii_chip_table[i].phy_id0 &&
+ phy_id1 == mii_chip_table[i].phy_id1) {
+ struct mii_phy * mii_phy;
+
+ printk(KERN_INFO "%s: %s at phy address %d\n",
+ dev->name, mii_chip_table[i].name,
+ phy_addr);
+ mii_phy = kmalloc(sizeof(struct mii_phy),
+ GFP_KERNEL);
+ if (mii_phy) {
+ mii_phy->chip_info = mii_chip_table+i;
+ aup->phy_addr = phy_addr;
+ mii_phy->next = aup->mii;
+ aup->phy_ops =
+ mii_chip_table[i].phy_ops;
+ aup->mii = mii_phy;
+ aup->phy_ops->phy_init(dev,phy_addr);
+ } else {
+ printk(KERN_ERR "%s: out of memory\n",
+ dev->name);
+ return -1;
+ }
+ mii_phy->chip_info = mii_chip_table+i;
+ aup->phy_addr = phy_addr;
+ aup->phy_ops = mii_chip_table[i].phy_ops;
+ aup->phy_ops->phy_init(dev,phy_addr);
+ break;
+ }
+ }
+ }
+ if (aup->mac_id == 0) {
+ /* the Bosporus phy responds to addresses 0-5 but
+ * 5 is the correct one.
+ */
+ aup->phy_addr = 5;
+ }
+#endif
+
+ if (aup->mii->chip_info == NULL) {
+ printk(KERN_ERR "%s: Au1x No MII transceivers found!\n",
+ dev->name);
+ return -1;
+ }
+
+ printk(KERN_INFO "%s: Using %s as default\n",
+ dev->name, aup->mii->chip_info->name);
+
+ return 0;
+}
+
+
+/*
+ * Buffer allocation/deallocation routines. The buffer descriptor returned
+ * has the virtual and dma address of a buffer suitable for
+ * both, receive and transmit operations.
+ */
+static db_dest_t *GetFreeDB(struct au1000_private *aup)
+{
+ db_dest_t *pDB;
+ pDB = aup->pDBfree;
+
+ if (pDB) {
+ aup->pDBfree = pDB->pnext;
+ }
+ return pDB;
+}
+
+void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
+{
+ db_dest_t *pDBfree = aup->pDBfree;
+ if (pDBfree)
+ pDBfree->pnext = pDB;
+ aup->pDBfree = pDB;
+}
+
+static void enable_rx_tx(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+
+ if (au1000_debug > 4)
+ printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
+
+ aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ au_sync_delay(10);
+}
+
+static void hard_stop(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+
+ if (au1000_debug > 4)
+ printk(KERN_INFO "%s: hard stop\n", dev->name);
+
+ aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ au_sync_delay(10);
+}
+
+
+static void reset_mac(struct net_device *dev)
+{
+ int i;
+ u32 flags;
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+
+ if (au1000_debug > 4)
+ printk(KERN_INFO "%s: reset mac, aup %x\n",
+ dev->name, (unsigned)aup);
+
+ spin_lock_irqsave(&aup->lock, flags);
+ if (aup->timer.function == &au1000_timer) {/* check if timer initted */
+ del_timer(&aup->timer);
+ }
+
+ hard_stop(dev);
+ #ifdef CONFIG_BCM5222_DUAL_PHY
+ if (aup->mac_id != 0) {
+ #endif
+ /* If BCM5222, we can't leave MAC0 in reset because then
+ * we can't access the dual phy for ETH1 */
+ *aup->enable = MAC_EN_CLOCK_ENABLE;
+ au_sync_delay(2);
+ *aup->enable = 0;
+ au_sync_delay(2);
+ #ifdef CONFIG_BCM5222_DUAL_PHY
+ }
+ #endif
+ aup->tx_full = 0;
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ /* reset control bits */
+ aup->rx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ /* reset control bits */
+ aup->tx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+
+/*
+ * Setup the receive and transmit "rings". These pointers are the addresses
+ * of the rx and tx MAC DMA registers so they are fixed by the hardware --
+ * these are not descriptors sitting in memory.
+ */
+static void
+setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ aup->rx_dma_ring[i] =
+ (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ aup->tx_dma_ring[i] =
+ (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
+ }
+}
+
+static struct {
+ int port;
+ u32 base_addr;
+ u32 macen_addr;
+ int irq;
+ struct net_device *dev;
+} iflist[2];
+
+static int num_ifs;
+
+/*
+ * Setup the base address and interupt of the Au1xxx ethernet macs
+ * based on cpu type and whether the interface is enabled in sys_pinfunc
+ * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
+ */
+static int __init au1000_init_module(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
+ struct net_device *dev;
+ int i, found_one = 0;
+
+ switch (c->cputype) {
+#ifdef CONFIG_SOC_AU1000
+ case CPU_AU1000:
+ num_ifs = 2 - ni;
+ iflist[0].base_addr = AU1000_ETH0_BASE;
+ iflist[1].base_addr = AU1000_ETH1_BASE;
+ iflist[0].macen_addr = AU1000_MAC0_ENABLE;
+ iflist[1].macen_addr = AU1000_MAC1_ENABLE;
+ iflist[0].irq = AU1000_MAC0_DMA_INT;
+ iflist[1].irq = AU1000_MAC1_DMA_INT;
+ break;
+#endif
+#ifdef CONFIG_SOC_AU1100
+ case CPU_AU1100:
+ num_ifs = 1 - ni;
+ iflist[0].base_addr = AU1100_ETH0_BASE;
+ iflist[0].macen_addr = AU1100_MAC0_ENABLE;
+ iflist[0].irq = AU1100_MAC0_DMA_INT;
+ break;
+#endif
+#ifdef CONFIG_SOC_AU1500
+ case CPU_AU1500:
+ num_ifs = 2 - ni;
+ iflist[0].base_addr = AU1500_ETH0_BASE;
+ iflist[1].base_addr = AU1500_ETH1_BASE;
+ iflist[0].macen_addr = AU1500_MAC0_ENABLE;
+ iflist[1].macen_addr = AU1500_MAC1_ENABLE;
+ iflist[0].irq = AU1500_MAC0_DMA_INT;
+ iflist[1].irq = AU1500_MAC1_DMA_INT;
+ break;
+#endif
+#ifdef CONFIG_SOC_AU1550
+ case CPU_AU1550:
+ num_ifs = 2 - ni;
+ iflist[0].base_addr = AU1550_ETH0_BASE;
+ iflist[1].base_addr = AU1550_ETH1_BASE;
+ iflist[0].macen_addr = AU1550_MAC0_ENABLE;
+ iflist[1].macen_addr = AU1550_MAC1_ENABLE;
+ iflist[0].irq = AU1550_MAC0_DMA_INT;
+ iflist[1].irq = AU1550_MAC1_DMA_INT;
+ break;
+#endif
+ default:
+ num_ifs = 0;
+ }
+ for(i = 0; i < num_ifs; i++) {
+ dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
+ iflist[i].dev = dev;
+ if (dev)
+ found_one++;
+ }
+ if (!found_one)
+ return -ENODEV;
+ return 0;
+}
+
+static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+ u16 ctl, adv;
+
+ /* Setup standard advertise */
+ adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
+
+ /* Start/Restart aneg */
+ ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+ u16 ctl;
+
+ ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
+
+ /* First reset the PHY */
+ mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
+
+ /* Select speed & duplex */
+ switch (speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ ctl |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ default:
+ return -EINVAL;
+ }
+ if (fd == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
+
+ return 0;
+}
+
+
+static void
+au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+ u32 advertise;
+ int autoneg;
+ int forced_speed;
+ int forced_duplex;
+
+ /* Default advertise */
+ advertise = GENMII_DEFAULT_ADVERTISE;
+ autoneg = aup->want_autoneg;
+ forced_speed = SPEED_100;
+ forced_duplex = DUPLEX_FULL;
+
+ /* Setup link parameters */
+ if (cmd) {
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ advertise = cmd->advertising;
+ autoneg = 1;
+ } else {
+ autoneg = 0;
+
+ forced_speed = cmd->speed;
+ forced_duplex = cmd->duplex;
+ }
+ }
+
+ /* Configure PHY & start aneg */
+ aup->want_autoneg = autoneg;
+ if (autoneg)
+ au1000_setup_aneg(dev, advertise);
+ else
+ au1000_setup_forced(dev, forced_speed, forced_duplex);
+ mod_timer(&aup->timer, jiffies + HZ);
+}
+
+static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+ u16 link, speed;
+
+ cmd->supported = GENMII_DEFAULT_FEATURES;
+ cmd->advertising = GENMII_DEFAULT_ADVERTISE;
+ cmd->port = PORT_MII;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = aup->phy_addr;
+ spin_lock_irq(&aup->lock);
+ cmd->autoneg = aup->want_autoneg;
+ aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
+ if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
+ cmd->speed = SPEED_100;
+ else if (speed == IF_PORT_10BASET)
+ cmd->speed = SPEED_10;
+ if (link && (dev->if_port == IF_PORT_100BASEFX))
+ cmd->duplex = DUPLEX_FULL;
+ else
+ cmd->duplex = DUPLEX_HALF;
+ spin_unlock_irq(&aup->lock);
+ return 0;
+}
+
+static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+ unsigned long features = GENMII_DEFAULT_FEATURES;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+ return -EINVAL;
+ if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_DISABLE)
+ switch (cmd->speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_HALF &&
+ (features & SUPPORTED_10baseT_Half) == 0)
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ (features & SUPPORTED_10baseT_Full) == 0)
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_HALF &&
+ (features & SUPPORTED_100baseT_Half) == 0)
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ (features & SUPPORTED_100baseT_Full) == 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else if ((features & SUPPORTED_Autoneg) == 0)
+ return -EINVAL;
+
+ spin_lock_irq(&aup->lock);
+ au1000_start_link(dev, cmd);
+ spin_unlock_irq(&aup->lock);
+ return 0;
+}
+
+static int au1000_nway_reset(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+
+ if (!aup->want_autoneg)
+ return -EINVAL;
+ spin_lock_irq(&aup->lock);
+ au1000_start_link(dev, NULL);
+ spin_unlock_irq(&aup->lock);
+ return 0;
+}
+
+static void
+au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
+ info->regdump_len = 0;
+}
+
+static u32 au1000_get_link(struct net_device *dev)
+{
+ return netif_carrier_ok(dev);
+}
+
+static struct ethtool_ops au1000_ethtool_ops = {
+ .get_settings = au1000_get_settings,
+ .set_settings = au1000_set_settings,
+ .get_drvinfo = au1000_get_drvinfo,
+ .nway_reset = au1000_nway_reset,
+ .get_link = au1000_get_link
+};
+
+static struct net_device *
+au1000_probe(u32 ioaddr, int irq, int port_num)
+{
+ static unsigned version_printed = 0;
+ struct au1000_private *aup = NULL;
+ struct net_device *dev = NULL;
+ db_dest_t *pDB, *pDBfree;
+ char *pmac, *argptr;
+ char ethaddr[6];
+ int i, err;
+
+ if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
+ return NULL;
+
+ if (version_printed++ == 0)
+ printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+ dev = alloc_etherdev(sizeof(struct au1000_private));
+ if (!dev) {
+ printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
+ return NULL;
+ }
+
+ if ((err = register_netdev(dev))) {
+ printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
+ err);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
+ dev->name, ioaddr, irq);
+
+ aup = dev->priv;
+
+ /* Allocate the data buffers */
+ /* Snooping works fine with eth on all au1xxx */
+ aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
+ MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
+ &aup->dma_addr,
+ 0);
+ if (!aup->vaddr) {
+ free_netdev(dev);
+ release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+ return NULL;
+ }
+
+ /* aup->mac is the base address of the MAC's registers */
+ aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
+ /* Setup some variables for quick register address access */
+ if (ioaddr == iflist[0].base_addr)
+ {
+ /* check env variables first */
+ if (!get_ethernet_addr(ethaddr)) {
+ memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
+ } else {
+ /* Check command line */
+ argptr = prom_getcmdline();
+ if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
+ printk(KERN_INFO "%s: No mac address found\n",
+ dev->name);
+ /* use the hard coded mac addresses */
+ } else {
+ str2eaddr(ethaddr, pmac + strlen("ethaddr="));
+ memcpy(au1000_mac_addr, ethaddr,
+ sizeof(au1000_mac_addr));
+ }
+ }
+ aup->enable = (volatile u32 *)
+ ((unsigned long)iflist[0].macen_addr);
+ memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+ setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+ aup->mac_id = 0;
+ au_macs[0] = aup;
+ }
+ else
+ if (ioaddr == iflist[1].base_addr)
+ {
+ aup->enable = (volatile u32 *)
+ ((unsigned long)iflist[1].macen_addr);
+ memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+ dev->dev_addr[4] += 0x10;
+ setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+ aup->mac_id = 1;
+ au_macs[1] = aup;
+ }
+ else
+ {
+ printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
+ }
+
+ /* bring the device out of reset, otherwise probing the mii
+ * will hang */
+ *aup->enable = MAC_EN_CLOCK_ENABLE;
+ au_sync_delay(2);
+ *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
+ MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
+ au_sync_delay(2);
+
+ aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
+ if (!aup->mii) {
+ printk(KERN_ERR "%s: out of memory\n", dev->name);
+ goto err_out;
+ }
+ aup->mii->mii_control_reg = 0;
+ aup->mii->mii_data_reg = 0;
+
+ if (mii_probe(dev) != 0) {
+ goto err_out;
+ }
+
+ pDBfree = NULL;
+ /* setup the data buffer descriptors and attach a buffer to each one */
+ pDB = aup->db;
+ for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB) {
+ goto err_out;
+ }
+ aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB) {
+ goto err_out;
+ }
+ aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->len = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ spin_lock_init(&aup->lock);
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->open = au1000_open;
+ dev->hard_start_xmit = au1000_tx;
+ dev->stop = au1000_close;
+ dev->get_stats = au1000_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &au1000_ioctl;
+ SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->set_config = &au1000_set_config;
+ dev->tx_timeout = au1000_tx_timeout;
+ dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
+ /*
+ * The boot code uses the ethernet controller, so reset it to start
+ * fresh. au1000_init() expects that the device is in reset state.
+ */
+ reset_mac(dev);
+
+ return dev;
+
+err_out:
+ /* here we should have a valid dev plus aup-> register addresses
+ * so we can reset the mac properly.*/
+ reset_mac(dev);
+ if (aup->mii)
+ kfree(aup->mii);
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ if (aup->rx_db_inuse[i])
+ ReleaseDB(aup, aup->rx_db_inuse[i]);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ if (aup->tx_db_inuse[i])
+ ReleaseDB(aup, aup->tx_db_inuse[i]);
+ }
+ dma_free_noncoherent(NULL,
+ MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
+ (void *)aup->vaddr,
+ aup->dma_addr);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+ return NULL;
+}
+
+/*
+ * Initialize the interface.
+ *
+ * When the device powers up, the clocks are disabled and the
+ * mac is in reset state. When the interface is closed, we
+ * do the same -- reset the device and disable the clocks to
+ * conserve power. Thus, whenever au1000_init() is called,
+ * the device should already be in reset state.
+ */
+static int au1000_init(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ u32 flags;
+ int i;
+ u32 control;
+ u16 link, speed;
+
+ if (au1000_debug > 4)
+ printk("%s: au1000_init\n", dev->name);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ /* bring the device out of reset */
+ *aup->enable = MAC_EN_CLOCK_ENABLE;
+ au_sync_delay(2);
+ *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
+ MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
+ au_sync_delay(20);
+
+ aup->mac->control = 0;
+ aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
+ aup->tx_tail = aup->tx_head;
+ aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
+
+ aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
+ aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
+ dev->dev_addr[1]<<8 | dev->dev_addr[0];
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
+ }
+ au_sync();
+
+ aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
+ control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ control |= MAC_BIG_ENDIAN;
+#endif
+ if (link && (dev->if_port == IF_PORT_100BASEFX)) {
+ control |= MAC_FULL_DUPLEX;
+ }
+
+ /* fix for startup without cable */
+ if (!link)
+ dev->flags &= ~IFF_RUNNING;
+
+ aup->mac->control = control;
+ aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
+ au_sync();
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+ return 0;
+}
+
+static void au1000_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ unsigned char if_port;
+ u16 link, speed;
+
+ if (!dev) {
+ /* fatal error, don't restart the timer */
+ printk(KERN_ERR "au1000_timer error: NULL dev\n");
+ return;
+ }
+
+ if_port = dev->if_port;
+ if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
+ if (link) {
+ if (!(dev->flags & IFF_RUNNING)) {
+ netif_carrier_on(dev);
+ dev->flags |= IFF_RUNNING;
+ printk(KERN_INFO "%s: link up\n", dev->name);
+ }
+ }
+ else {
+ if (dev->flags & IFF_RUNNING) {
+ netif_carrier_off(dev);
+ dev->flags &= ~IFF_RUNNING;
+ dev->if_port = 0;
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+ }
+ }
+
+ if (link && (dev->if_port != if_port) &&
+ (dev->if_port != IF_PORT_UNKNOWN)) {
+ hard_stop(dev);
+ if (dev->if_port == IF_PORT_100BASEFX) {
+ printk(KERN_INFO "%s: going to full duplex\n",
+ dev->name);
+ aup->mac->control |= MAC_FULL_DUPLEX;
+ au_sync_delay(1);
+ }
+ else {
+ aup->mac->control &= ~MAC_FULL_DUPLEX;
+ au_sync_delay(1);
+ }
+ enable_rx_tx(dev);
+ }
+
+ aup->timer.expires = RUN_AT((1*HZ));
+ aup->timer.data = (unsigned long)dev;
+ aup->timer.function = &au1000_timer; /* timer handler */
+ add_timer(&aup->timer);
+
+}
+
+static int au1000_open(struct net_device *dev)
+{
+ int retval;
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+
+ if (au1000_debug > 4)
+ printk("%s: open: dev=%p\n", dev->name, dev);
+
+ if ((retval = au1000_init(dev))) {
+ printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
+ free_irq(dev->irq, dev);
+ return retval;
+ }
+ netif_start_queue(dev);
+
+ if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
+ dev->name, dev))) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+
+ init_timer(&aup->timer); /* used in ioctl() */
+ aup->timer.expires = RUN_AT((3*HZ));
+ aup->timer.data = (unsigned long)dev;
+ aup->timer.function = &au1000_timer; /* timer handler */
+ add_timer(&aup->timer);
+
+ if (au1000_debug > 4)
+ printk("%s: open: Initialization done.\n", dev->name);
+
+ return 0;
+}
+
+static int au1000_close(struct net_device *dev)
+{
+ u32 flags;
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+
+ if (au1000_debug > 4)
+ printk("%s: close: dev=%p\n", dev->name, dev);
+
+ reset_mac(dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ /* stop the device */
+ netif_stop_queue(dev);
+
+ /* disable the interrupt */
+ free_irq(dev->irq, dev);
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ return 0;
+}
+
+static void __exit au1000_cleanup_module(void)
+{
+ int i, j;
+ struct net_device *dev;
+ struct au1000_private *aup;
+
+ for (i = 0; i < num_ifs; i++) {
+ dev = iflist[i].dev;
+ if (dev) {
+ aup = (struct au1000_private *) dev->priv;
+ unregister_netdev(dev);
+ if (aup->mii)
+ kfree(aup->mii);
+ for (j = 0; j < NUM_RX_DMA; j++) {
+ if (aup->rx_db_inuse[j])
+ ReleaseDB(aup, aup->rx_db_inuse[j]);
+ }
+ for (j = 0; j < NUM_TX_DMA; j++) {
+ if (aup->tx_db_inuse[j])
+ ReleaseDB(aup, aup->tx_db_inuse[j]);
+ }
+ dma_free_noncoherent(NULL,
+ MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
+ (void *)aup->vaddr,
+ aup->dma_addr);
+ free_netdev(dev);
+ release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
+ }
+ }
+}
+
+
+static inline void
+update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ struct net_device_stats *ps = &aup->stats;
+
+ ps->tx_packets++;
+ ps->tx_bytes += pkt_len;
+
+ if (status & TX_FRAME_ABORTED) {
+ if (dev->if_port == IF_PORT_100BASEFX) {
+ if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
+ /* any other tx errors are only valid
+ * in half duplex mode */
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+ }
+ else {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
+ ps->tx_carrier_errors++;
+ }
+ }
+}
+
+
+/*
+ * Called from the interrupt service routine to acknowledge
+ * the TX DONE bits. This is a must if the irq is setup as
+ * edge triggered.
+ */
+static void au1000_tx_ack(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ volatile tx_dma_t *ptxd;
+
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ while (ptxd->buff_stat & TX_T_DONE) {
+ update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
+ ptxd->buff_stat &= ~TX_T_DONE;
+ ptxd->len = 0;
+ au_sync();
+
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+
+/*
+ * Au1000 transmit routine.
+ */
+static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ volatile tx_dma_t *ptxd;
+ u32 buff_stat;
+ db_dest_t *pDB;
+ int i;
+
+ if (au1000_debug > 5)
+ printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
+ dev->name, (unsigned)aup, skb->len,
+ skb->data, aup->tx_head);
+
+ ptxd = aup->tx_dma_ring[aup->tx_head];
+ buff_stat = ptxd->buff_stat;
+ if (buff_stat & TX_DMA_ENABLE) {
+ /* We've wrapped around and the transmitter is still busy */
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return 1;
+ }
+ else if (buff_stat & TX_T_DONE) {
+ update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
+ ptxd->len = 0;
+ }
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ pDB = aup->tx_db_inuse[aup->tx_head];
+ memcpy((void *)pDB->vaddr, skb->data, skb->len);
+ if (skb->len < ETH_ZLEN) {
+ for (i=skb->len; i<ETH_ZLEN; i++) {
+ ((char *)pDB->vaddr)[i] = 0;
+ }
+ ptxd->len = ETH_ZLEN;
+ }
+ else
+ ptxd->len = skb->len;
+
+ ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ au_sync();
+ dev_kfree_skb(skb);
+ aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+
+static inline void update_rx_stats(struct net_device *dev, u32 status)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ struct net_device_stats *ps = &aup->stats;
+
+ ps->rx_packets++;
+ if (status & RX_MCAST_FRAME)
+ ps->multicast++;
+
+ if (status & RX_ERROR) {
+ ps->rx_errors++;
+ if (status & RX_MISSED_FRAME)
+ ps->rx_missed_errors++;
+ if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
+ ps->rx_length_errors++;
+ if (status & RX_CRC_ERROR)
+ ps->rx_crc_errors++;
+ if (status & RX_COLL)
+ ps->collisions++;
+ }
+ else
+ ps->rx_bytes += status & RX_FRAME_LEN_MASK;
+
+}
+
+/*
+ * Au1000 receive routine.
+ */
+static int au1000_rx(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ struct sk_buff *skb;
+ volatile rx_dma_t *prxd;
+ u32 buff_stat, status;
+ db_dest_t *pDB;
+ u32 frmlen;
+
+ if (au1000_debug > 5)
+ printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
+
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ while (buff_stat & RX_T_DONE) {
+ status = prxd->status;
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ update_rx_stats(dev, status);
+ if (!(status & RX_ERROR)) {
+
+ /* good frame */
+ frmlen = (status & RX_FRAME_LEN_MASK);
+ frmlen -= 4; /* Remove FCS */
+ skb = dev_alloc_skb(frmlen + 2);
+ if (skb == NULL) {
+ printk(KERN_ERR
+ "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ aup->stats.rx_dropped++;
+ continue;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte IP header align */
+ eth_copy_and_sum(skb,
+ (unsigned char *)pDB->vaddr, frmlen, 0);
+ skb_put(skb, frmlen);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb); /* pass the packet to upper layers */
+ }
+ else {
+ if (au1000_debug > 4) {
+ if (status & RX_MISSED_FRAME)
+ printk("rx miss\n");
+ if (status & RX_WDOG_TIMER)
+ printk("rx wdog\n");
+ if (status & RX_RUNT)
+ printk("rx runt\n");
+ if (status & RX_OVERLEN)
+ printk("rx overlen\n");
+ if (status & RX_COLL)
+ printk("rx coll\n");
+ if (status & RX_MII_ERROR)
+ printk("rx mii error\n");
+ if (status & RX_CRC_ERROR)
+ printk("rx crc error\n");
+ if (status & RX_LEN_ERROR)
+ printk("rx len error\n");
+ if (status & RX_U_CNTRL_FRAME)
+ printk("rx u control frame\n");
+ if (status & RX_MISSED_FRAME)
+ printk("rx miss\n");
+ }
+ }
+ prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
+ au_sync();
+
+ /* next descriptor */
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ dev->last_rx = jiffies;
+ }
+ return 0;
+}
+
+
+/*
+ * Au1000 interrupt service routine.
+ */
+static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+
+ if (dev == NULL) {
+ printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
+ return IRQ_RETVAL(1);
+ }
+
+ /* Handle RX interrupts first to minimize chance of overrun */
+
+ au1000_rx(dev);
+ au1000_tx_ack(dev);
+ return IRQ_RETVAL(1);
+}
+
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void au1000_tx_timeout(struct net_device *dev)
+{
+ printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
+ reset_mac(dev);
+ au1000_init(dev);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ?
+ ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+
+ if (au1000_debug > 4)
+ printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ aup->mac->control |= MAC_PROMISCUOUS;
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ dev->mc_count > MULTICAST_FILTER_LIMIT) {
+ aup->mac->control |= MAC_PASS_ALL_MULTI;
+ aup->mac->control &= ~MAC_PROMISCUOUS;
+ printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
+ } else {
+ int i;
+ struct dev_mc_list *mclist;
+ u32 mc_filter[2]; /* Multicast hash filter */
+
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
+ (long *)mc_filter);
+ }
+ aup->mac->multi_hash_high = mc_filter[1];
+ aup->mac->multi_hash_low = mc_filter[0];
+ aup->mac->control &= ~MAC_PROMISCUOUS;
+ aup->mac->control |= MAC_HASH_MODE;
+ }
+}
+
+
+static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct au1000_private *aup = (struct au1000_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_ifru;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ case SIOCGMIIPHY:
+ if (!netif_running(dev)) return -EINVAL;
+ data[0] = aup->phy_addr;
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ case SIOCGMIIREG:
+ data[3] = mdio_read(dev, data[0], data[1]);
+ return 0;
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(dev, data[0], data[1],data[2]);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+}
+
+
+static int au1000_set_config(struct net_device *dev, struct ifmap *map)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ u16 control;
+
+ if (au1000_debug > 4) {
+ printk("%s: set_config called: dev->if_port %d map->port %x\n",
+ dev->name, dev->if_port, map->port);
+ }
+
+ switch(map->port){
+ case IF_PORT_UNKNOWN: /* use auto here */
+ printk(KERN_INFO "%s: config phy for aneg\n",
+ dev->name);
+ dev->if_port = map->port;
+ /* Link Down: the timer will bring it up */
+ netif_carrier_off(dev);
+
+ /* read current control */
+ control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
+ control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
+
+ /* enable auto negotiation and reset the negotiation */
+ mdio_write(dev, aup->phy_addr, MII_CONTROL,
+ control | MII_CNTL_AUTO |
+ MII_CNTL_RST_AUTO);
+
+ break;
+
+ case IF_PORT_10BASET: /* 10BaseT */
+ printk(KERN_INFO "%s: config phy for 10BaseT\n",
+ dev->name);
+ dev->if_port = map->port;
+
+ /* Link Down: the timer will bring it up */
+ netif_carrier_off(dev);
+
+ /* set Speed to 10Mbps, Half Duplex */
+ control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
+ control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
+ MII_CNTL_FDX);
+
+ /* disable auto negotiation and force 10M/HD mode*/
+ mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
+ break;
+
+ case IF_PORT_100BASET: /* 100BaseT */
+ case IF_PORT_100BASETX: /* 100BaseTx */
+ printk(KERN_INFO "%s: config phy for 100BaseTX\n",
+ dev->name);
+ dev->if_port = map->port;
+
+ /* Link Down: the timer will bring it up */
+ netif_carrier_off(dev);
+
+ /* set Speed to 100Mbps, Half Duplex */
+ /* disable auto negotiation and enable 100MBit Mode */
+ control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
+ control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
+ control |= MII_CNTL_F100;
+ mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
+ break;
+
+ case IF_PORT_100BASEFX: /* 100BaseFx */
+ printk(KERN_INFO "%s: config phy for 100BaseFX\n",
+ dev->name);
+ dev->if_port = map->port;
+
+ /* Link Down: the timer will bring it up */
+ netif_carrier_off(dev);
+
+ /* set Speed to 100Mbps, Full Duplex */
+ /* disable auto negotiation and enable 100MBit Mode */
+ control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
+ control &= ~MII_CNTL_AUTO;
+ control |= MII_CNTL_F100 | MII_CNTL_FDX;
+ mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
+ break;
+ case IF_PORT_10BASE2: /* 10Base2 */
+ case IF_PORT_AUI: /* AUI */
+ /* These Modes are not supported (are they?)*/
+ printk(KERN_ERR "%s: 10Base2/AUI not supported",
+ dev->name);
+ return -EOPNOTSUPP;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: Invalid media selected",
+ dev->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct net_device_stats *au1000_get_stats(struct net_device *dev)
+{
+ struct au1000_private *aup = (struct au1000_private *) dev->priv;
+
+ if (au1000_debug > 4)
+ printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
+
+ if (netif_device_present(dev)) {
+ return &aup->stats;
+ }
+ return 0;
+}
+
+module_init(au1000_init_module);
+module_exit(au1000_cleanup_module);
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
new file mode 100644
index 000000000000..7f9326e39cc0
--- /dev/null
+++ b/drivers/net/au1000_eth.h
@@ -0,0 +1,236 @@
+/*
+ *
+ * Alchemy Au1x00 ethernet driver include file
+ *
+ * Author: Pete Popov <ppopov@mvista.com>
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ *
+ */
+
+
+#define MAC_IOSIZE 0x10000
+#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */
+#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */
+
+#define NUM_RX_BUFFS 4
+#define NUM_TX_BUFFS 4
+#define MAX_BUF_SIZE 2048
+
+#define ETH_TX_TIMEOUT HZ/4
+#define MAC_MIN_PKT_SIZE 64
+
+#define MULTICAST_FILTER_LIMIT 64
+
+/* FIXME
+ * The PHY defines should be in a separate file.
+ */
+
+/* MII register offsets */
+#define MII_CONTROL 0x0000
+#define MII_STATUS 0x0001
+#define MII_PHY_ID0 0x0002
+#define MII_PHY_ID1 0x0003
+#define MII_ANADV 0x0004
+#define MII_ANLPAR 0x0005
+#define MII_AEXP 0x0006
+#define MII_ANEXT 0x0007
+#define MII_LSI_PHY_CONFIG 0x0011
+/* Status register */
+#define MII_LSI_PHY_STAT 0x0012
+#define MII_AMD_PHY_STAT MII_LSI_PHY_STAT
+#define MII_INTEL_PHY_STAT 0x0011
+
+#define MII_AUX_CNTRL 0x0018
+/* mii registers specific to AMD 79C901 */
+#define MII_STATUS_SUMMARY = 0x0018
+
+/* MII Control register bit definitions. */
+#define MII_CNTL_FDX 0x0100
+#define MII_CNTL_RST_AUTO 0x0200
+#define MII_CNTL_ISOLATE 0x0400
+#define MII_CNTL_PWRDWN 0x0800
+#define MII_CNTL_AUTO 0x1000
+#define MII_CNTL_F100 0x2000
+#define MII_CNTL_LPBK 0x4000
+#define MII_CNTL_RESET 0x8000
+
+/* MII Status register bit */
+#define MII_STAT_EXT 0x0001
+#define MII_STAT_JAB 0x0002
+#define MII_STAT_LINK 0x0004
+#define MII_STAT_CAN_AUTO 0x0008
+#define MII_STAT_FAULT 0x0010
+#define MII_STAT_AUTO_DONE 0x0020
+#define MII_STAT_CAN_T 0x0800
+#define MII_STAT_CAN_T_FDX 0x1000
+#define MII_STAT_CAN_TX 0x2000
+#define MII_STAT_CAN_TX_FDX 0x4000
+#define MII_STAT_CAN_T4 0x8000
+
+
+#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */
+#define MII_ID1_MODEL 0x03F0 /* model number */
+#define MII_ID1_REV 0x000F /* model number */
+
+/* MII NWAY Register Bits ...
+ valid for the ANAR (Auto-Negotiation Advertisement) and
+ ANLPAR (Auto-Negotiation Link Partner) registers */
+#define MII_NWAY_NODE_SEL 0x001f
+#define MII_NWAY_CSMA_CD 0x0001
+#define MII_NWAY_T 0x0020
+#define MII_NWAY_T_FDX 0x0040
+#define MII_NWAY_TX 0x0080
+#define MII_NWAY_TX_FDX 0x0100
+#define MII_NWAY_T4 0x0200
+#define MII_NWAY_PAUSE 0x0400
+#define MII_NWAY_RF 0x2000 /* Remote Fault */
+#define MII_NWAY_ACK 0x4000 /* Remote Acknowledge */
+#define MII_NWAY_NP 0x8000 /* Next Page (Enable) */
+
+/* mii stsout register bits */
+#define MII_STSOUT_LINK_FAIL 0x4000
+#define MII_STSOUT_SPD 0x0080
+#define MII_STSOUT_DPLX 0x0040
+
+/* mii stsics register bits */
+#define MII_STSICS_SPD 0x8000
+#define MII_STSICS_DPLX 0x4000
+#define MII_STSICS_LINKSTS 0x0001
+
+/* mii stssum register bits */
+#define MII_STSSUM_LINK 0x0008
+#define MII_STSSUM_DPLX 0x0004
+#define MII_STSSUM_AUTO 0x0002
+#define MII_STSSUM_SPD 0x0001
+
+/* lsi phy status register */
+#define MII_LSI_PHY_STAT_FDX 0x0040
+#define MII_LSI_PHY_STAT_SPD 0x0080
+
+/* amd phy status register */
+#define MII_AMD_PHY_STAT_FDX 0x0800
+#define MII_AMD_PHY_STAT_SPD 0x0400
+
+/* intel phy status register */
+#define MII_INTEL_PHY_STAT_FDX 0x0200
+#define MII_INTEL_PHY_STAT_SPD 0x4000
+
+/* Auxilliary Control/Status Register */
+#define MII_AUX_FDX 0x0001
+#define MII_AUX_100 0x0002
+#define MII_AUX_F100 0x0004
+#define MII_AUX_ANEG 0x0008
+
+typedef struct mii_phy {
+ struct mii_phy * next;
+ struct mii_chip_info * chip_info;
+ u16 status;
+ u32 *mii_control_reg;
+ u32 *mii_data_reg;
+} mii_phy_t;
+
+struct phy_ops {
+ int (*phy_init) (struct net_device *, int);
+ int (*phy_reset) (struct net_device *, int);
+ int (*phy_status) (struct net_device *, int, u16 *, u16 *);
+};
+
+/*
+ * Data Buffer Descriptor. Data buffers must be aligned on 32 byte
+ * boundary for both, receive and transmit.
+ */
+typedef struct db_dest {
+ struct db_dest *pnext;
+ volatile u32 *vaddr;
+ dma_addr_t dma_addr;
+} db_dest_t;
+
+/*
+ * The transmit and receive descriptors are memory
+ * mapped registers.
+ */
+typedef struct tx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 len;
+ u32 pad;
+} tx_dma_t;
+
+typedef struct rx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 pad[2];
+} rx_dma_t;
+
+
+/*
+ * MAC control registers, memory mapped.
+ */
+typedef struct mac_reg {
+ u32 control;
+ u32 mac_addr_high;
+ u32 mac_addr_low;
+ u32 multi_hash_high;
+ u32 multi_hash_low;
+ u32 mii_control;
+ u32 mii_data;
+ u32 flow_control;
+ u32 vlan1_tag;
+ u32 vlan2_tag;
+} mac_reg_t;
+
+
+struct au1000_private {
+
+ db_dest_t *pDBfree;
+ db_dest_t db[NUM_RX_BUFFS+NUM_TX_BUFFS];
+ volatile rx_dma_t *rx_dma_ring[NUM_RX_DMA];
+ volatile tx_dma_t *tx_dma_ring[NUM_TX_DMA];
+ db_dest_t *rx_db_inuse[NUM_RX_DMA];
+ db_dest_t *tx_db_inuse[NUM_TX_DMA];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ int mac_id;
+ mii_phy_t *mii;
+ struct phy_ops *phy_ops;
+
+ /* These variables are just for quick access to certain regs addresses. */
+ volatile mac_reg_t *mac; /* mac registers */
+ volatile u32 *enable; /* address of MAC Enable Register */
+
+ u32 vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+
+ u8 *hash_table;
+ u32 hash_mode;
+ u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
+ int phy_addr; /* phy address */
+ u32 options; /* User-settable misc. driver options. */
+ u32 drv_flags;
+ int want_autoneg;
+ struct net_device_stats stats;
+ struct timer_list timer;
+ spinlock_t lock; /* Serialise access to device */
+};
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
new file mode 100644
index 000000000000..3fe8ba992c38
--- /dev/null
+++ b/drivers/net/b44.c
@@ -0,0 +1,1978 @@
+/* b44.c: Broadcom 4400 device driver.
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
+ *
+ * Distribute under GPL.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/version.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "b44.h"
+
+#define DRV_MODULE_NAME "b44"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "0.95"
+#define DRV_MODULE_RELDATE "Aug 3, 2004"
+
+#define B44_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define B44_TX_TIMEOUT (5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define B44_MIN_MTU 60
+#define B44_MAX_MTU 1500
+
+#define B44_RX_RING_SIZE 512
+#define B44_DEF_RX_RING_PENDING 200
+#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
+ B44_RX_RING_SIZE)
+#define B44_TX_RING_SIZE 512
+#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
+#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
+ B44_TX_RING_SIZE)
+#define B44_DMA_MASK 0x3fffffff
+
+#define TX_RING_GAP(BP) \
+ (B44_TX_RING_SIZE - (BP)->tx_pending)
+#define TX_BUFFS_AVAIL(BP) \
+ (((BP)->tx_cons <= (BP)->tx_prod) ? \
+ (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
+ (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
+#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
+
+#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
+#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
+MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
+module_param(b44_debug, int, 0);
+MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
+
+static struct pci_device_id b44_pci_tbl[] = {
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { } /* terminate list with empty entry */
+};
+
+MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
+
+static void b44_halt(struct b44 *);
+static void b44_init_rings(struct b44 *);
+static void b44_init_hw(struct b44 *);
+static int b44_poll(struct net_device *dev, int *budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void b44_poll_controller(struct net_device *dev);
+#endif
+
+static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+{
+ return readl(bp->regs + reg);
+}
+
+static inline void bw32(const struct b44 *bp,
+ unsigned long reg, unsigned long val)
+{
+ writel(val, bp->regs + reg);
+}
+
+static int b44_wait_bit(struct b44 *bp, unsigned long reg,
+ u32 bit, unsigned long timeout, const int clear)
+{
+ unsigned long i;
+
+ for (i = 0; i < timeout; i++) {
+ u32 val = br32(bp, reg);
+
+ if (clear && !(val & bit))
+ break;
+ if (!clear && (val & bit))
+ break;
+ udelay(10);
+ }
+ if (i == timeout) {
+ printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
+ "%lx to %s.\n",
+ bp->dev->name,
+ bit, reg,
+ (clear ? "clear" : "set"));
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/* Sonics SiliconBackplane support routines. ROFL, you should see all the
+ * buzz words used on this company's website :-)
+ *
+ * All of these routines must be invoked with bp->lock held and
+ * interrupts disabled.
+ */
+
+#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
+#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
+
+static u32 ssb_get_core_rev(struct b44 *bp)
+{
+ return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
+}
+
+static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
+{
+ u32 bar_orig, pci_rev, val;
+
+ pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
+ pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
+ pci_rev = ssb_get_core_rev(bp);
+
+ val = br32(bp, B44_SBINTVEC);
+ val |= cores;
+ bw32(bp, B44_SBINTVEC, val);
+
+ val = br32(bp, SSB_PCI_TRANS_2);
+ val |= SSB_PCI_PREF | SSB_PCI_BURST;
+ bw32(bp, SSB_PCI_TRANS_2, val);
+
+ pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
+
+ return pci_rev;
+}
+
+static void ssb_core_disable(struct b44 *bp)
+{
+ if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
+ return;
+
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
+ b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
+ b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
+ SBTMSLOW_REJECT | SBTMSLOW_RESET));
+ br32(bp, B44_SBTMSLOW);
+ udelay(1);
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
+ br32(bp, B44_SBTMSLOW);
+ udelay(1);
+}
+
+static void ssb_core_reset(struct b44 *bp)
+{
+ u32 val;
+
+ ssb_core_disable(bp);
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
+ br32(bp, B44_SBTMSLOW);
+ udelay(1);
+
+ /* Clear SERR if set, this is a hw bug workaround. */
+ if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
+ bw32(bp, B44_SBTMSHIGH, 0);
+
+ val = br32(bp, B44_SBIMSTATE);
+ if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
+ bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
+
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
+ br32(bp, B44_SBTMSLOW);
+ udelay(1);
+
+ bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
+ br32(bp, B44_SBTMSLOW);
+ udelay(1);
+}
+
+static int ssb_core_unit(struct b44 *bp)
+{
+#if 0
+ u32 val = br32(bp, B44_SBADMATCH0);
+ u32 base;
+
+ type = val & SBADMATCH0_TYPE_MASK;
+ switch (type) {
+ case 0:
+ base = val & SBADMATCH0_BS0_MASK;
+ break;
+
+ case 1:
+ base = val & SBADMATCH0_BS1_MASK;
+ break;
+
+ case 2:
+ default:
+ base = val & SBADMATCH0_BS2_MASK;
+ break;
+ };
+#endif
+ return 0;
+}
+
+static int ssb_is_core_up(struct b44 *bp)
+{
+ return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
+ == SBTMSLOW_CLOCK);
+}
+
+static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+{
+ u32 val;
+
+ val = ((u32) data[2]) << 24;
+ val |= ((u32) data[3]) << 16;
+ val |= ((u32) data[4]) << 8;
+ val |= ((u32) data[5]) << 0;
+ bw32(bp, B44_CAM_DATA_LO, val);
+ val = (CAM_DATA_HI_VALID |
+ (((u32) data[0]) << 8) |
+ (((u32) data[1]) << 0));
+ bw32(bp, B44_CAM_DATA_HI, val);
+ bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
+ (index << CAM_CTRL_INDEX_SHIFT)));
+ b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
+}
+
+static inline void __b44_disable_ints(struct b44 *bp)
+{
+ bw32(bp, B44_IMASK, 0);
+}
+
+static void b44_disable_ints(struct b44 *bp)
+{
+ __b44_disable_ints(bp);
+
+ /* Flush posted writes. */
+ br32(bp, B44_IMASK);
+}
+
+static void b44_enable_ints(struct b44 *bp)
+{
+ bw32(bp, B44_IMASK, bp->imask);
+}
+
+static int b44_readphy(struct b44 *bp, int reg, u32 *val)
+{
+ int err;
+
+ bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+ bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+ (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
+ (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
+ (reg << MDIO_DATA_RA_SHIFT) |
+ (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
+ err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+ *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
+
+ return err;
+}
+
+static int b44_writephy(struct b44 *bp, int reg, u32 val)
+{
+ bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+ bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+ (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
+ (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
+ (reg << MDIO_DATA_RA_SHIFT) |
+ (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
+ (val & MDIO_DATA_DATA)));
+ return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+}
+
+/* miilib interface */
+/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
+ * due to code existing before miilib use was added to this driver.
+ * Someone should remove this artificial driver limitation in
+ * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
+ */
+static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+{
+ u32 val;
+ struct b44 *bp = netdev_priv(dev);
+ int rc = b44_readphy(bp, location, &val);
+ if (rc)
+ return 0xffffffff;
+ return val;
+}
+
+static void b44_mii_write(struct net_device *dev, int phy_id, int location,
+ int val)
+{
+ struct b44 *bp = netdev_priv(dev);
+ b44_writephy(bp, location, val);
+}
+
+static int b44_phy_reset(struct b44 *bp)
+{
+ u32 val;
+ int err;
+
+ err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
+ if (err)
+ return err;
+ udelay(100);
+ err = b44_readphy(bp, MII_BMCR, &val);
+ if (!err) {
+ if (val & BMCR_RESET) {
+ printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
+ bp->dev->name);
+ err = -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
+{
+ u32 val;
+
+ bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
+ bp->flags |= pause_flags;
+
+ val = br32(bp, B44_RXCONFIG);
+ if (pause_flags & B44_FLAG_RX_PAUSE)
+ val |= RXCONFIG_FLOW;
+ else
+ val &= ~RXCONFIG_FLOW;
+ bw32(bp, B44_RXCONFIG, val);
+
+ val = br32(bp, B44_MAC_FLOW);
+ if (pause_flags & B44_FLAG_TX_PAUSE)
+ val |= (MAC_FLOW_PAUSE_ENAB |
+ (0xc0 & MAC_FLOW_RX_HI_WATER));
+ else
+ val &= ~MAC_FLOW_PAUSE_ENAB;
+ bw32(bp, B44_MAC_FLOW, val);
+}
+
+static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
+{
+ u32 pause_enab = bp->flags & (B44_FLAG_TX_PAUSE |
+ B44_FLAG_RX_PAUSE);
+
+ if (local & ADVERTISE_PAUSE_CAP) {
+ if (local & ADVERTISE_PAUSE_ASYM) {
+ if (remote & LPA_PAUSE_CAP)
+ pause_enab |= (B44_FLAG_TX_PAUSE |
+ B44_FLAG_RX_PAUSE);
+ else if (remote & LPA_PAUSE_ASYM)
+ pause_enab |= B44_FLAG_RX_PAUSE;
+ } else {
+ if (remote & LPA_PAUSE_CAP)
+ pause_enab |= (B44_FLAG_TX_PAUSE |
+ B44_FLAG_RX_PAUSE);
+ }
+ } else if (local & ADVERTISE_PAUSE_ASYM) {
+ if ((remote & LPA_PAUSE_CAP) &&
+ (remote & LPA_PAUSE_ASYM))
+ pause_enab |= B44_FLAG_TX_PAUSE;
+ }
+
+ __b44_set_flow_ctrl(bp, pause_enab);
+}
+
+static int b44_setup_phy(struct b44 *bp)
+{
+ u32 val;
+ int err;
+
+ if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
+ goto out;
+ if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
+ val & MII_ALEDCTRL_ALLMSK)) != 0)
+ goto out;
+ if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
+ goto out;
+ if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
+ val | MII_TLEDCTRL_ENABLE)) != 0)
+ goto out;
+
+ if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
+ u32 adv = ADVERTISE_CSMA;
+
+ if (bp->flags & B44_FLAG_ADV_10HALF)
+ adv |= ADVERTISE_10HALF;
+ if (bp->flags & B44_FLAG_ADV_10FULL)
+ adv |= ADVERTISE_10FULL;
+ if (bp->flags & B44_FLAG_ADV_100HALF)
+ adv |= ADVERTISE_100HALF;
+ if (bp->flags & B44_FLAG_ADV_100FULL)
+ adv |= ADVERTISE_100FULL;
+
+ if (bp->flags & B44_FLAG_PAUSE_AUTO)
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+ if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
+ goto out;
+ if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
+ BMCR_ANRESTART))) != 0)
+ goto out;
+ } else {
+ u32 bmcr;
+
+ if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
+ goto out;
+ bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
+ if (bp->flags & B44_FLAG_100_BASE_T)
+ bmcr |= BMCR_SPEED100;
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ bmcr |= BMCR_FULLDPLX;
+ if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
+ goto out;
+
+ /* Since we will not be negotiating there is no safe way
+ * to determine if the link partner supports flow control
+ * or not. So just disable it completely in this case.
+ */
+ b44_set_flow_ctrl(bp, 0, 0);
+ }
+
+out:
+ return err;
+}
+
+static void b44_stats_update(struct b44 *bp)
+{
+ unsigned long reg;
+ u32 *val;
+
+ val = &bp->hw_stats.tx_good_octets;
+ for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
+ *val++ += br32(bp, reg);
+ }
+ val = &bp->hw_stats.rx_good_octets;
+ for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
+ *val++ += br32(bp, reg);
+ }
+}
+
+static void b44_link_report(struct b44 *bp)
+{
+ if (!netif_carrier_ok(bp->dev)) {
+ printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
+ } else {
+ printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
+ bp->dev->name,
+ (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
+ (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
+
+ printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
+ "%s for RX.\n",
+ bp->dev->name,
+ (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
+ (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
+ }
+}
+
+static void b44_check_phy(struct b44 *bp)
+{
+ u32 bmsr, aux;
+
+ if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
+ !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
+ (bmsr != 0xffff)) {
+ if (aux & MII_AUXCTRL_SPEED)
+ bp->flags |= B44_FLAG_100_BASE_T;
+ else
+ bp->flags &= ~B44_FLAG_100_BASE_T;
+ if (aux & MII_AUXCTRL_DUPLEX)
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ else
+ bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+
+ if (!netif_carrier_ok(bp->dev) &&
+ (bmsr & BMSR_LSTATUS)) {
+ u32 val = br32(bp, B44_TX_CTRL);
+ u32 local_adv, remote_adv;
+
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
+ bw32(bp, B44_TX_CTRL, val);
+
+ if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
+ !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
+ !b44_readphy(bp, MII_LPA, &remote_adv))
+ b44_set_flow_ctrl(bp, local_adv, remote_adv);
+
+ /* Link now up */
+ netif_carrier_on(bp->dev);
+ b44_link_report(bp);
+ } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
+ /* Link now down */
+ netif_carrier_off(bp->dev);
+ b44_link_report(bp);
+ }
+
+ if (bmsr & BMSR_RFAULT)
+ printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
+ bp->dev->name);
+ if (bmsr & BMSR_JCD)
+ printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
+ bp->dev->name);
+ }
+}
+
+static void b44_timer(unsigned long __opaque)
+{
+ struct b44 *bp = (struct b44 *) __opaque;
+
+ spin_lock_irq(&bp->lock);
+
+ b44_check_phy(bp);
+
+ b44_stats_update(bp);
+
+ spin_unlock_irq(&bp->lock);
+
+ bp->timer.expires = jiffies + HZ;
+ add_timer(&bp->timer);
+}
+
+static void b44_tx(struct b44 *bp)
+{
+ u32 cur, cons;
+
+ cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
+ cur /= sizeof(struct dma_desc);
+
+ /* XXX needs updating when NETIF_F_SG is supported */
+ for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
+ struct ring_info *rp = &bp->tx_buffers[cons];
+ struct sk_buff *skb = rp->skb;
+
+ if (unlikely(skb == NULL))
+ BUG();
+
+ pci_unmap_single(bp->pdev,
+ pci_unmap_addr(rp, mapping),
+ skb->len,
+ PCI_DMA_TODEVICE);
+ rp->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+
+ bp->tx_cons = cons;
+ if (netif_queue_stopped(bp->dev) &&
+ TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
+ netif_wake_queue(bp->dev);
+
+ bw32(bp, B44_GPTIMER, 0);
+}
+
+/* Works like this. This chip writes a 'struct rx_header" 30 bytes
+ * before the DMA address you give it. So we allocate 30 more bytes
+ * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
+ * point the chip at 30 bytes past where the rx_header will go.
+ */
+static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+ struct dma_desc *dp;
+ struct ring_info *src_map, *map;
+ struct rx_header *rh;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int dest_idx;
+ u32 ctrl;
+
+ src_map = NULL;
+ if (src_idx >= 0)
+ src_map = &bp->rx_buffers[src_idx];
+ dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+ map = &bp->rx_buffers[dest_idx];
+ skb = dev_alloc_skb(RX_PKT_BUF_SZ);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ mapping = pci_map_single(bp->pdev, skb->data,
+ RX_PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+
+ /* Hardware bug work-around, the chip is unable to do PCI DMA
+ to/from anything above 1GB :-( */
+ if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ /* Sigh... */
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
+ if (skb == NULL)
+ return -ENOMEM;
+ mapping = pci_map_single(bp->pdev, skb->data,
+ RX_PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
+
+ skb->dev = bp->dev;
+ skb_reserve(skb, bp->rx_offset);
+
+ rh = (struct rx_header *)
+ (skb->data - bp->rx_offset);
+ rh->len = 0;
+ rh->flags = 0;
+
+ map->skb = skb;
+ pci_unmap_addr_set(map, mapping, mapping);
+
+ if (src_map != NULL)
+ src_map->skb = NULL;
+
+ ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
+ if (dest_idx == (B44_RX_RING_SIZE - 1))
+ ctrl |= DESC_CTRL_EOT;
+
+ dp = &bp->rx_ring[dest_idx];
+ dp->ctrl = cpu_to_le32(ctrl);
+ dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
+
+ return RX_PKT_BUF_SZ;
+}
+
+static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+ struct dma_desc *src_desc, *dest_desc;
+ struct ring_info *src_map, *dest_map;
+ struct rx_header *rh;
+ int dest_idx;
+ u32 ctrl;
+
+ dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+ dest_desc = &bp->rx_ring[dest_idx];
+ dest_map = &bp->rx_buffers[dest_idx];
+ src_desc = &bp->rx_ring[src_idx];
+ src_map = &bp->rx_buffers[src_idx];
+
+ dest_map->skb = src_map->skb;
+ rh = (struct rx_header *) src_map->skb->data;
+ rh->len = 0;
+ rh->flags = 0;
+ pci_unmap_addr_set(dest_map, mapping,
+ pci_unmap_addr(src_map, mapping));
+
+ ctrl = src_desc->ctrl;
+ if (dest_idx == (B44_RX_RING_SIZE - 1))
+ ctrl |= cpu_to_le32(DESC_CTRL_EOT);
+ else
+ ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
+
+ dest_desc->ctrl = ctrl;
+ dest_desc->addr = src_desc->addr;
+ src_map->skb = NULL;
+
+ pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
+ RX_PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+}
+
+static int b44_rx(struct b44 *bp, int budget)
+{
+ int received;
+ u32 cons, prod;
+
+ received = 0;
+ prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
+ prod /= sizeof(struct dma_desc);
+ cons = bp->rx_cons;
+
+ while (cons != prod && budget > 0) {
+ struct ring_info *rp = &bp->rx_buffers[cons];
+ struct sk_buff *skb = rp->skb;
+ dma_addr_t map = pci_unmap_addr(rp, mapping);
+ struct rx_header *rh;
+ u16 len;
+
+ pci_dma_sync_single_for_cpu(bp->pdev, map,
+ RX_PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ rh = (struct rx_header *) skb->data;
+ len = cpu_to_le16(rh->len);
+ if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
+ (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
+ drop_it:
+ b44_recycle_rx(bp, cons, bp->rx_prod);
+ drop_it_no_recycle:
+ bp->stats.rx_dropped++;
+ goto next_pkt;
+ }
+
+ if (len == 0) {
+ int i = 0;
+
+ do {
+ udelay(2);
+ barrier();
+ len = cpu_to_le16(rh->len);
+ } while (len == 0 && i++ < 5);
+ if (len == 0)
+ goto drop_it;
+ }
+
+ /* Omit CRC. */
+ len -= 4;
+
+ if (len > RX_COPY_THRESHOLD) {
+ int skb_size;
+ skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
+ if (skb_size < 0)
+ goto drop_it;
+ pci_unmap_single(bp->pdev, map,
+ skb_size, PCI_DMA_FROMDEVICE);
+ /* Leave out rx_header */
+ skb_put(skb, len+bp->rx_offset);
+ skb_pull(skb,bp->rx_offset);
+ } else {
+ struct sk_buff *copy_skb;
+
+ b44_recycle_rx(bp, cons, bp->rx_prod);
+ copy_skb = dev_alloc_skb(len + 2);
+ if (copy_skb == NULL)
+ goto drop_it_no_recycle;
+
+ copy_skb->dev = bp->dev;
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ /* DMA sync done above, copy just the actual packet */
+ memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
+
+ skb = copy_skb;
+ }
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ netif_receive_skb(skb);
+ bp->dev->last_rx = jiffies;
+ received++;
+ budget--;
+ next_pkt:
+ bp->rx_prod = (bp->rx_prod + 1) &
+ (B44_RX_RING_SIZE - 1);
+ cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
+ }
+
+ bp->rx_cons = cons;
+ bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
+
+ return received;
+}
+
+static int b44_poll(struct net_device *netdev, int *budget)
+{
+ struct b44 *bp = netdev_priv(netdev);
+ int done;
+
+ spin_lock_irq(&bp->lock);
+
+ if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
+ /* spin_lock(&bp->tx_lock); */
+ b44_tx(bp);
+ /* spin_unlock(&bp->tx_lock); */
+ }
+ spin_unlock_irq(&bp->lock);
+
+ done = 1;
+ if (bp->istat & ISTAT_RX) {
+ int orig_budget = *budget;
+ int work_done;
+
+ if (orig_budget > netdev->quota)
+ orig_budget = netdev->quota;
+
+ work_done = b44_rx(bp, orig_budget);
+
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ if (work_done >= orig_budget)
+ done = 0;
+ }
+
+ if (bp->istat & ISTAT_ERRORS) {
+ spin_lock_irq(&bp->lock);
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp);
+ netif_wake_queue(bp->dev);
+ spin_unlock_irq(&bp->lock);
+ done = 1;
+ }
+
+ if (done) {
+ netif_rx_complete(netdev);
+ b44_enable_ints(bp);
+ }
+
+ return (done ? 0 : 1);
+}
+
+static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct b44 *bp = netdev_priv(dev);
+ unsigned long flags;
+ u32 istat, imask;
+ int handled = 0;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ istat = br32(bp, B44_ISTAT);
+ imask = br32(bp, B44_IMASK);
+
+ /* ??? What the fuck is the purpose of the interrupt mask
+ * ??? register if we have to mask it out by hand anyways?
+ */
+ istat &= imask;
+ if (istat) {
+ handled = 1;
+ if (netif_rx_schedule_prep(dev)) {
+ /* NOTE: These writes are posted by the readback of
+ * the ISTAT register below.
+ */
+ bp->istat = istat;
+ __b44_disable_ints(bp);
+ __netif_rx_schedule(dev);
+ } else {
+ printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
+ dev->name);
+ }
+
+ bw32(bp, B44_ISTAT, istat);
+ br32(bp, B44_ISTAT);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+static void b44_tx_timeout(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
+ dev->name);
+
+ spin_lock_irq(&bp->lock);
+
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp);
+
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ netif_wake_queue(dev);
+}
+
+static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct sk_buff *bounce_skb;
+ dma_addr_t mapping;
+ u32 len, entry, ctrl;
+
+ len = skb->len;
+ spin_lock_irq(&bp->lock);
+
+ /* This is a hard error, log it. */
+ if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&bp->lock);
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
+ dev->name);
+ return 1;
+ }
+
+ mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if(mapping+len > B44_DMA_MASK) {
+ /* Chip can't handle DMA to/from >1GB, use bounce buffer */
+ pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
+
+ bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
+ GFP_ATOMIC|GFP_DMA);
+ if (!bounce_skb)
+ return NETDEV_TX_BUSY;
+
+ mapping = pci_map_single(bp->pdev, bounce_skb->data,
+ len, PCI_DMA_TODEVICE);
+ if(mapping+len > B44_DMA_MASK) {
+ pci_unmap_single(bp->pdev, mapping,
+ len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(bounce_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ skb = bounce_skb;
+ }
+
+ entry = bp->tx_prod;
+ bp->tx_buffers[entry].skb = skb;
+ pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
+
+ ctrl = (len & DESC_CTRL_LEN);
+ ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
+ if (entry == (B44_TX_RING_SIZE - 1))
+ ctrl |= DESC_CTRL_EOT;
+
+ bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
+ bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
+
+ entry = NEXT_TX(entry);
+
+ bp->tx_prod = entry;
+
+ wmb();
+
+ bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+ if (bp->flags & B44_FLAG_BUGGY_TXPTR)
+ bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+ if (bp->flags & B44_FLAG_REORDER_BUG)
+ br32(bp, B44_DMATX_PTR);
+
+ if (TX_BUFFS_AVAIL(bp) < 1)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&bp->lock);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static int b44_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* We'll just catch it later when the
+ * device is up'd.
+ */
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ spin_lock_irq(&bp->lock);
+ b44_halt(bp);
+ dev->mtu = new_mtu;
+ b44_init_rings(bp);
+ b44_init_hw(bp);
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ return 0;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. bp->lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void b44_free_rings(struct b44 *bp)
+{
+ struct ring_info *rp;
+ int i;
+
+ for (i = 0; i < B44_RX_RING_SIZE; i++) {
+ rp = &bp->rx_buffers[i];
+
+ if (rp->skb == NULL)
+ continue;
+ pci_unmap_single(bp->pdev,
+ pci_unmap_addr(rp, mapping),
+ RX_PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+
+ /* XXX needs changes once NETIF_F_SG is set... */
+ for (i = 0; i < B44_TX_RING_SIZE; i++) {
+ rp = &bp->tx_buffers[i];
+
+ if (rp->skb == NULL)
+ continue;
+ pci_unmap_single(bp->pdev,
+ pci_unmap_addr(rp, mapping),
+ rp->skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. bp->lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void b44_init_rings(struct b44 *bp)
+{
+ int i;
+
+ b44_free_rings(bp);
+
+ memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
+ memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+
+ for (i = 0; i < bp->rx_pending; i++) {
+ if (b44_alloc_rx_skb(bp, -1, i) < 0)
+ break;
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void b44_free_consistent(struct b44 *bp)
+{
+ if (bp->rx_buffers) {
+ kfree(bp->rx_buffers);
+ bp->rx_buffers = NULL;
+ }
+ if (bp->tx_buffers) {
+ kfree(bp->tx_buffers);
+ bp->tx_buffers = NULL;
+ }
+ if (bp->rx_ring) {
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
+ bp->rx_ring = NULL;
+ }
+ if (bp->tx_ring) {
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
+ bp->tx_ring = NULL;
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down. Can sleep.
+ */
+static int b44_alloc_consistent(struct b44 *bp)
+{
+ int size;
+
+ size = B44_RX_RING_SIZE * sizeof(struct ring_info);
+ bp->rx_buffers = kmalloc(size, GFP_KERNEL);
+ if (!bp->rx_buffers)
+ goto out_err;
+ memset(bp->rx_buffers, 0, size);
+
+ size = B44_TX_RING_SIZE * sizeof(struct ring_info);
+ bp->tx_buffers = kmalloc(size, GFP_KERNEL);
+ if (!bp->tx_buffers)
+ goto out_err;
+ memset(bp->tx_buffers, 0, size);
+
+ size = DMA_TABLE_BYTES;
+ bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
+ if (!bp->rx_ring)
+ goto out_err;
+
+ bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
+ if (!bp->tx_ring)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ b44_free_consistent(bp);
+ return -ENOMEM;
+}
+
+/* bp->lock is held. */
+static void b44_clear_stats(struct b44 *bp)
+{
+ unsigned long reg;
+
+ bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+ for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
+ br32(bp, reg);
+ for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
+ br32(bp, reg);
+}
+
+/* bp->lock is held. */
+static void b44_chip_reset(struct b44 *bp)
+{
+ if (ssb_is_core_up(bp)) {
+ bw32(bp, B44_RCV_LAZY, 0);
+ bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
+ b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
+ bw32(bp, B44_DMATX_CTRL, 0);
+ bp->tx_prod = bp->tx_cons = 0;
+ if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
+ b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
+ 100, 0);
+ }
+ bw32(bp, B44_DMARX_CTRL, 0);
+ bp->rx_prod = bp->rx_cons = 0;
+ } else {
+ ssb_pci_setup(bp, (bp->core_unit == 0 ?
+ SBINTVEC_ENET0 :
+ SBINTVEC_ENET1));
+ }
+
+ ssb_core_reset(bp);
+
+ b44_clear_stats(bp);
+
+ /* Make PHY accessible. */
+ bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+ (0x0d & MDIO_CTRL_MAXF_MASK)));
+ br32(bp, B44_MDIO_CTRL);
+
+ if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
+ bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
+ br32(bp, B44_ENET_CTRL);
+ bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+ } else {
+ u32 val = br32(bp, B44_DEVCTRL);
+
+ if (val & DEVCTRL_EPR) {
+ bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
+ br32(bp, B44_DEVCTRL);
+ udelay(100);
+ }
+ bp->flags |= B44_FLAG_INTERNAL_PHY;
+ }
+}
+
+/* bp->lock is held. */
+static void b44_halt(struct b44 *bp)
+{
+ b44_disable_ints(bp);
+ b44_chip_reset(bp);
+}
+
+/* bp->lock is held. */
+static void __b44_set_mac_addr(struct b44 *bp)
+{
+ bw32(bp, B44_CAM_CTRL, 0);
+ if (!(bp->dev->flags & IFF_PROMISC)) {
+ u32 val;
+
+ __b44_cam_write(bp, bp->dev->dev_addr, 0);
+ val = br32(bp, B44_CAM_CTRL);
+ bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+ }
+}
+
+static int b44_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ spin_lock_irq(&bp->lock);
+ __b44_set_mac_addr(bp);
+ spin_unlock_irq(&bp->lock);
+
+ return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing. Invoked with bp->lock held.
+ */
+static void __b44_set_rx_mode(struct net_device *);
+static void b44_init_hw(struct b44 *bp)
+{
+ u32 val;
+
+ b44_chip_reset(bp);
+ b44_phy_reset(bp);
+ b44_setup_phy(bp);
+
+ /* Enable CRC32, set proper LED modes and power on PHY */
+ bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
+ bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
+
+ /* This sets the MAC address too. */
+ __b44_set_rx_mode(bp->dev);
+
+ /* MTU + eth header + possible VLAN tag + struct rx_header */
+ bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+ bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+
+ bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
+ bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
+ bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
+ bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+ (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
+ bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
+
+ bw32(bp, B44_DMARX_PTR, bp->rx_pending);
+ bp->rx_prod = bp->rx_pending;
+
+ bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+
+ val = br32(bp, B44_ENET_CTRL);
+ bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+}
+
+static int b44_open(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ int err;
+
+ err = b44_alloc_consistent(bp);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
+ if (err)
+ goto err_out_free;
+
+ spin_lock_irq(&bp->lock);
+
+ b44_init_rings(bp);
+ b44_init_hw(bp);
+ bp->flags |= B44_FLAG_INIT_COMPLETE;
+
+ spin_unlock_irq(&bp->lock);
+
+ init_timer(&bp->timer);
+ bp->timer.expires = jiffies + HZ;
+ bp->timer.data = (unsigned long) bp;
+ bp->timer.function = b44_timer;
+ add_timer(&bp->timer);
+
+ b44_enable_ints(bp);
+
+ return 0;
+
+err_out_free:
+ b44_free_consistent(bp);
+ return err;
+}
+
+#if 0
+/*static*/ void b44_dump_state(struct b44 *bp)
+{
+ u32 val32, val32_2, val32_3, val32_4, val32_5;
+ u16 val16;
+
+ pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
+ printk("DEBUG: PCI status [%04x] \n", val16);
+
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void b44_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ b44_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int b44_close(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ del_timer_sync(&bp->timer);
+
+ spin_lock_irq(&bp->lock);
+
+#if 0
+ b44_dump_state(bp);
+#endif
+ b44_halt(bp);
+ b44_free_rings(bp);
+ bp->flags &= ~B44_FLAG_INIT_COMPLETE;
+ netif_carrier_off(bp->dev);
+
+ spin_unlock_irq(&bp->lock);
+
+ free_irq(dev->irq, dev);
+
+ b44_free_consistent(bp);
+
+ return 0;
+}
+
+static struct net_device_stats *b44_get_stats(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct net_device_stats *nstat = &bp->stats;
+ struct b44_hw_stats *hwstat = &bp->hw_stats;
+
+ /* Convert HW stats into netdevice stats. */
+ nstat->rx_packets = hwstat->rx_pkts;
+ nstat->tx_packets = hwstat->tx_pkts;
+ nstat->rx_bytes = hwstat->rx_octets;
+ nstat->tx_bytes = hwstat->tx_octets;
+ nstat->tx_errors = (hwstat->tx_jabber_pkts +
+ hwstat->tx_oversize_pkts +
+ hwstat->tx_underruns +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_late_cols);
+ nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->collisions = hwstat->tx_total_cols;
+
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_undersize);
+ nstat->rx_over_errors = hwstat->rx_missed_pkts;
+ nstat->rx_frame_errors = hwstat->rx_align_errs;
+ nstat->rx_crc_errors = hwstat->rx_crc_errs;
+ nstat->rx_errors = (hwstat->rx_jabber_pkts +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_missed_pkts +
+ hwstat->rx_crc_align_errs +
+ hwstat->rx_undersize +
+ hwstat->rx_crc_errs +
+ hwstat->rx_align_errs +
+ hwstat->rx_symbol_errs);
+
+ nstat->tx_aborted_errors = hwstat->tx_underruns;
+#if 0
+ /* Carrier lost counter seems to be broken for some devices */
+ nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+#endif
+
+ return nstat;
+}
+
+static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
+{
+ struct dev_mc_list *mclist;
+ int i, num_ents;
+
+ num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
+ mclist = dev->mc_list;
+ for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
+ __b44_cam_write(bp, mclist->dmi_addr, i + 1);
+ }
+ return i+1;
+}
+
+static void __b44_set_rx_mode(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ u32 val;
+ int i=0;
+ unsigned char zero[6] = {0,0,0,0,0,0};
+
+ val = br32(bp, B44_RXCONFIG);
+ val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
+ if (dev->flags & IFF_PROMISC) {
+ val |= RXCONFIG_PROMISC;
+ bw32(bp, B44_RXCONFIG, val);
+ } else {
+ __b44_set_mac_addr(bp);
+
+ if (dev->flags & IFF_ALLMULTI)
+ val |= RXCONFIG_ALLMULTI;
+ else
+ i=__b44_load_mcast(bp, dev);
+
+ for(;i<64;i++) {
+ __b44_cam_write(bp, zero, i);
+ }
+ bw32(bp, B44_RXCONFIG, val);
+ val = br32(bp, B44_CAM_CTRL);
+ bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+ }
+}
+
+static void b44_set_rx_mode(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ spin_lock_irq(&bp->lock);
+ __b44_set_rx_mode(dev);
+ spin_unlock_irq(&bp->lock);
+}
+
+static u32 b44_get_msglevel(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ return bp->msg_enable;
+}
+
+static void b44_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct b44 *bp = netdev_priv(dev);
+ bp->msg_enable = value;
+}
+
+static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct pci_dev *pci_dev = bp->pdev;
+
+ strcpy (info->driver, DRV_MODULE_NAME);
+ strcpy (info->version, DRV_MODULE_VERSION);
+ strcpy (info->bus_info, pci_name(pci_dev));
+}
+
+static int b44_nway_reset(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ u32 bmcr;
+ int r;
+
+ spin_lock_irq(&bp->lock);
+ b44_readphy(bp, MII_BMCR, &bmcr);
+ b44_readphy(bp, MII_BMCR, &bmcr);
+ r = -EINVAL;
+ if (bmcr & BMCR_ANENABLE) {
+ b44_writephy(bp, MII_BMCR,
+ bmcr | BMCR_ANRESTART);
+ r = 0;
+ }
+ spin_unlock_irq(&bp->lock);
+
+ return r;
+}
+
+static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
+ return -EAGAIN;
+ cmd->supported = (SUPPORTED_Autoneg);
+ cmd->supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_MII);
+
+ cmd->advertising = 0;
+ if (bp->flags & B44_FLAG_ADV_10HALF)
+ cmd->advertising |= ADVERTISE_10HALF;
+ if (bp->flags & B44_FLAG_ADV_10FULL)
+ cmd->advertising |= ADVERTISE_10FULL;
+ if (bp->flags & B44_FLAG_ADV_100HALF)
+ cmd->advertising |= ADVERTISE_100HALF;
+ if (bp->flags & B44_FLAG_ADV_100FULL)
+ cmd->advertising |= ADVERTISE_100FULL;
+ cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
+ SPEED_100 : SPEED_10;
+ cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ cmd->port = 0;
+ cmd->phy_address = bp->phy_addr;
+ cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+ AUTONEG_DISABLE : AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
+ return -EAGAIN;
+
+ /* We do not support gigabit. */
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->advertising &
+ (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full))
+ return -EINVAL;
+ } else if ((cmd->speed != SPEED_100 &&
+ cmd->speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)) {
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&bp->lock);
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ bp->flags &= ~B44_FLAG_FORCE_LINK;
+ bp->flags &= ~(B44_FLAG_ADV_10HALF |
+ B44_FLAG_ADV_10FULL |
+ B44_FLAG_ADV_100HALF |
+ B44_FLAG_ADV_100FULL);
+ if (cmd->advertising & ADVERTISE_10HALF)
+ bp->flags |= B44_FLAG_ADV_10HALF;
+ if (cmd->advertising & ADVERTISE_10FULL)
+ bp->flags |= B44_FLAG_ADV_10FULL;
+ if (cmd->advertising & ADVERTISE_100HALF)
+ bp->flags |= B44_FLAG_ADV_100HALF;
+ if (cmd->advertising & ADVERTISE_100FULL)
+ bp->flags |= B44_FLAG_ADV_100FULL;
+ } else {
+ bp->flags |= B44_FLAG_FORCE_LINK;
+ if (cmd->speed == SPEED_100)
+ bp->flags |= B44_FLAG_100_BASE_T;
+ if (cmd->duplex == DUPLEX_FULL)
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ }
+
+ b44_setup_phy(bp);
+
+ spin_unlock_irq(&bp->lock);
+
+ return 0;
+}
+
+static void b44_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = B44_RX_RING_SIZE - 1;
+ ering->rx_pending = bp->rx_pending;
+
+ /* XXX ethtool lacks a tx_max_pending, oops... */
+}
+
+static int b44_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
+ (ering->rx_mini_pending != 0) ||
+ (ering->rx_jumbo_pending != 0) ||
+ (ering->tx_pending > B44_TX_RING_SIZE - 1))
+ return -EINVAL;
+
+ spin_lock_irq(&bp->lock);
+
+ bp->rx_pending = ering->rx_pending;
+ bp->tx_pending = ering->tx_pending;
+
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp);
+ netif_wake_queue(bp->dev);
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ return 0;
+}
+
+static void b44_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ epause->autoneg =
+ (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
+ epause->rx_pause =
+ (bp->flags & B44_FLAG_RX_PAUSE) != 0;
+ epause->tx_pause =
+ (bp->flags & B44_FLAG_TX_PAUSE) != 0;
+}
+
+static int b44_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ spin_lock_irq(&bp->lock);
+ if (epause->autoneg)
+ bp->flags |= B44_FLAG_PAUSE_AUTO;
+ else
+ bp->flags &= ~B44_FLAG_PAUSE_AUTO;
+ if (epause->rx_pause)
+ bp->flags |= B44_FLAG_RX_PAUSE;
+ else
+ bp->flags &= ~B44_FLAG_RX_PAUSE;
+ if (epause->tx_pause)
+ bp->flags |= B44_FLAG_TX_PAUSE;
+ else
+ bp->flags &= ~B44_FLAG_TX_PAUSE;
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ return 0;
+}
+
+static struct ethtool_ops b44_ethtool_ops = {
+ .get_drvinfo = b44_get_drvinfo,
+ .get_settings = b44_get_settings,
+ .set_settings = b44_set_settings,
+ .nway_reset = b44_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = b44_get_ringparam,
+ .set_ringparam = b44_set_ringparam,
+ .get_pauseparam = b44_get_pauseparam,
+ .set_pauseparam = b44_set_pauseparam,
+ .get_msglevel = b44_get_msglevel,
+ .set_msglevel = b44_set_msglevel,
+};
+
+static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct b44 *bp = netdev_priv(dev);
+ int err;
+
+ spin_lock_irq(&bp->lock);
+ err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&bp->lock);
+
+ return err;
+}
+
+/* Read 128-bytes of EEPROM. */
+static int b44_read_eeprom(struct b44 *bp, u8 *data)
+{
+ long i;
+ u16 *ptr = (u16 *) data;
+
+ for (i = 0; i < 128; i += 2)
+ ptr[i / 2] = readw(bp->regs + 4096 + i);
+
+ return 0;
+}
+
+static int __devinit b44_get_invariants(struct b44 *bp)
+{
+ u8 eeprom[128];
+ int err;
+
+ err = b44_read_eeprom(bp, &eeprom[0]);
+ if (err)
+ goto out;
+
+ bp->dev->dev_addr[0] = eeprom[79];
+ bp->dev->dev_addr[1] = eeprom[78];
+ bp->dev->dev_addr[2] = eeprom[81];
+ bp->dev->dev_addr[3] = eeprom[80];
+ bp->dev->dev_addr[4] = eeprom[83];
+ bp->dev->dev_addr[5] = eeprom[82];
+
+ bp->phy_addr = eeprom[90] & 0x1f;
+
+ /* With this, plus the rx_header prepended to the data by the
+ * hardware, we'll land the ethernet header on a 2-byte boundary.
+ */
+ bp->rx_offset = 30;
+
+ bp->imask = IMASK_DEF;
+
+ bp->core_unit = ssb_core_unit(bp);
+ bp->dma_offset = SB_PCI_DMA;
+
+ /* XXX - really required?
+ bp->flags |= B44_FLAG_BUGGY_TXPTR;
+ */
+out:
+ return err;
+}
+
+static int __devinit b44_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int b44_version_printed = 0;
+ unsigned long b44reg_base, b44reg_len;
+ struct net_device *dev;
+ struct b44 *bp;
+ int err, i;
+
+ if (b44_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot enable PCI device, "
+ "aborting.\n");
+ return err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "Cannot find proper PCI device "
+ "base address, aborting.\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resources, "
+ "aborting.\n");
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_free_res;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_free_res;
+ }
+
+ b44reg_base = pci_resource_start(pdev, 0);
+ b44reg_len = pci_resource_len(pdev, 0);
+
+ dev = alloc_etherdev(sizeof(*bp));
+ if (!dev) {
+ printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev,&pdev->dev);
+
+ /* No interesting netdevice features in this card... */
+ dev->features |= 0;
+
+ bp = netdev_priv(dev);
+ bp->pdev = pdev;
+ bp->dev = dev;
+ if (b44_debug >= 0)
+ bp->msg_enable = (1 << b44_debug) - 1;
+ else
+ bp->msg_enable = B44_DEF_MSG_ENABLE;
+
+ spin_lock_init(&bp->lock);
+
+ bp->regs = ioremap(b44reg_base, b44reg_len);
+ if (bp->regs == 0UL) {
+ printk(KERN_ERR PFX "Cannot map device registers, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+
+ bp->rx_pending = B44_DEF_RX_RING_PENDING;
+ bp->tx_pending = B44_DEF_TX_RING_PENDING;
+
+ dev->open = b44_open;
+ dev->stop = b44_close;
+ dev->hard_start_xmit = b44_start_xmit;
+ dev->get_stats = b44_get_stats;
+ dev->set_multicast_list = b44_set_rx_mode;
+ dev->set_mac_address = b44_set_mac_addr;
+ dev->do_ioctl = b44_ioctl;
+ dev->tx_timeout = b44_tx_timeout;
+ dev->poll = b44_poll;
+ dev->weight = 64;
+ dev->watchdog_timeo = B44_TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = b44_poll_controller;
+#endif
+ dev->change_mtu = b44_change_mtu;
+ dev->irq = pdev->irq;
+ SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+
+ err = b44_get_invariants(bp);
+ if (err) {
+ printk(KERN_ERR PFX "Problem fetching invariants of chip, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ bp->mii_if.dev = dev;
+ bp->mii_if.mdio_read = b44_mii_read;
+ bp->mii_if.mdio_write = b44_mii_write;
+ bp->mii_if.phy_id = bp->phy_addr;
+ bp->mii_if.phy_id_mask = 0x1f;
+ bp->mii_if.reg_num_mask = 0x1f;
+
+ /* By default, advertise all speed/duplex settings. */
+ bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
+ B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
+
+ /* By default, auto-negotiate PAUSE. */
+ bp->flags |= B44_FLAG_PAUSE_AUTO;
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot register net device, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ pci_save_state(bp->pdev);
+
+ printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i],
+ i == 5 ? '\n' : ':');
+
+ return 0;
+
+err_out_iounmap:
+ iounmap(bp->regs);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit b44_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct b44 *bp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ iounmap(bp->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct b44 *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ del_timer_sync(&bp->timer);
+
+ spin_lock_irq(&bp->lock);
+
+ b44_halt(bp);
+ netif_carrier_off(bp->dev);
+ netif_device_detach(bp->dev);
+ b44_free_rings(bp);
+
+ spin_unlock_irq(&bp->lock);
+ return 0;
+}
+
+static int b44_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct b44 *bp = netdev_priv(dev);
+
+ pci_restore_state(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock_irq(&bp->lock);
+
+ b44_init_rings(bp);
+ b44_init_hw(bp);
+ netif_device_attach(bp->dev);
+ spin_unlock_irq(&bp->lock);
+
+ bp->timer.expires = jiffies + HZ;
+ add_timer(&bp->timer);
+
+ b44_enable_ints(bp);
+ return 0;
+}
+
+static struct pci_driver b44_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = b44_pci_tbl,
+ .probe = b44_init_one,
+ .remove = __devexit_p(b44_remove_one),
+ .suspend = b44_suspend,
+ .resume = b44_resume,
+};
+
+static int __init b44_init(void)
+{
+ return pci_module_init(&b44_driver);
+}
+
+static void __exit b44_cleanup(void)
+{
+ pci_unregister_driver(&b44_driver);
+}
+
+module_init(b44_init);
+module_exit(b44_cleanup);
+
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
new file mode 100644
index 000000000000..11c40a2e71c7
--- /dev/null
+++ b/drivers/net/b44.h
@@ -0,0 +1,427 @@
+#ifndef _B44_H
+#define _B44_H
+
+/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
+#define B44_DEVCTRL 0x0000UL /* Device Control */
+#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */
+#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */
+#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */
+#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */
+#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */
+#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */
+#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */
+#define DEVCTRL_PADDR_SHIFT 18
+#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */
+#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */
+#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */
+#define WKUP_LEN_D0 0x00000080
+#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */
+#define WKUP_LEN_P1_SHIFT 8
+#define WKUP_LEN_D1 0x00008000
+#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */
+#define WKUP_LEN_P2_SHIFT 16
+#define WKUP_LEN_D2 0x00000000
+#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */
+#define WKUP_LEN_P3_SHIFT 24
+#define WKUP_LEN_D3 0x80000000
+#define B44_ISTAT 0x0020UL /* Interrupt Status */
+#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */
+#define ISTAT_PME 0x00000040 /* Power Management Event */
+#define ISTAT_TO 0x00000080 /* General Purpose Timeout */
+#define ISTAT_DSCE 0x00000400 /* Descriptor Error */
+#define ISTAT_DATAE 0x00000800 /* Data Error */
+#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */
+#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */
+#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */
+#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */
+#define ISTAT_RX 0x00010000 /* RX Interrupt */
+#define ISTAT_TX 0x01000000 /* TX Interrupt */
+#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */
+#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */
+#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */
+#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU)
+#define B44_IMASK 0x0024UL /* Interrupt Mask */
+#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX)
+#define B44_GPTIMER 0x0028UL /* General Purpose Timer */
+#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */
+#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */
+#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */
+#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */
+#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */
+#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */
+#define B44_MAC_CTRL 0x00A8UL /* MAC Control */
+#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */
+#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */
+#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */
+#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */
+#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5
+#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */
+#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */
+#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */
+#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */
+#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */
+#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */
+#define RCV_LAZY_FC_SHIFT 24
+#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */
+#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */
+#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */
+#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */
+#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */
+#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */
+#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */
+#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */
+#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */
+#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
+#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */
+#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */
+#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */
+#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */
+#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */
+#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */
+#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */
+#define DMATX_STAT_ENONE 0x00000000 /* Error None */
+#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
+#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */
+#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */
+#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
+#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */
+#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */
+#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */
+#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */
+#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */
+#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */
+#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */
+#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
+#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
+#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
+#define DMARX_STAT_SDISABLED 0x00000000 /* State Disbaled */
+#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
+#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
+#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
+#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */
+#define DMARX_STAT_ENONE 0x00000000 /* Error None */
+#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
+#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */
+#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */
+#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
+#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */
+#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */
+#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */
+#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */
+#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */
+#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */
+#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */
+#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */
+#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */
+#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */
+#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */
+#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */
+#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */
+#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */
+#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */
+#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */
+#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */
+#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */
+#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */
+#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
+#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
+#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
+#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
+#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
+#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
+#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */
+#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */
+#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */
+#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */
+#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */
+#define MDIO_DATA_TA_SHIFT 16
+#define MDIO_TA_VALID 2
+#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */
+#define MDIO_DATA_RA_SHIFT 18
+#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */
+#define MDIO_DATA_PMD_SHIFT 23
+#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */
+#define MDIO_DATA_OP_SHIFT 28
+#define MDIO_OP_WRITE 1
+#define MDIO_OP_READ 2
+#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */
+#define MDIO_DATA_SB_SHIFT 30
+#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */
+#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */
+#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */
+#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */
+#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */
+#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */
+#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */
+#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */
+#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */
+#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */
+#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */
+#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */
+#define CAM_CTRL_READ 0x00000004 /* Read */
+#define CAM_CTRL_WRITE 0x00000008 /* Read */
+#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */
+#define CAM_CTRL_INDEX_SHIFT 16
+#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */
+#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */
+#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */
+#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */
+#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */
+#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */
+#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */
+#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */
+#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */
+#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */
+#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */
+#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */
+#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */
+#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */
+#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */
+#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */
+#define B44_TX_O 0x0508UL /* MIB TX Octets */
+#define B44_TX_P 0x050CUL /* MIB TX Packets */
+#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */
+#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */
+#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */
+#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */
+#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */
+#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */
+#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */
+#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */
+#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */
+#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */
+#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */
+#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */
+#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */
+#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */
+#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */
+#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */
+#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */
+#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */
+#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */
+#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */
+#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */
+#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */
+#define B44_RX_O 0x0588UL /* MIB RX Octets */
+#define B44_RX_P 0x058CUL /* MIB RX Packets */
+#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */
+#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */
+#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */
+#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */
+#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */
+#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */
+#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */
+#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */
+#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */
+#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */
+#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */
+#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */
+#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */
+#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */
+#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */
+#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */
+#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */
+#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
+#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
+
+/* Silicon backplane register definitions */
+#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */
+#define SBIMSTATE_PC 0x0000000f /* Pipe Count */
+#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */
+#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */
+#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */
+#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */
+#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */
+#define SBIMSTATE_IBE 0x00020000 /* In Band Error */
+#define SBIMSTATE_TO 0x00040000 /* Timeout */
+#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */
+#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
+#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
+#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */
+#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */
+#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */
+#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */
+#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */
+#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */
+#define SBTMSLOW_RESET 0x00000001 /* Reset */
+#define SBTMSLOW_REJECT 0x00000002 /* Reject */
+#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */
+#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
+#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */
+#define SBTMSLOW_BE 0x80000000 /* BIST Enable */
+#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */
+#define SBTMSHIGH_SERR 0x00000001 /* S-error */
+#define SBTMSHIGH_INT 0x00000002 /* Interrupt */
+#define SBTMSHIGH_BUSY 0x00000004 /* Busy */
+#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */
+#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */
+#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */
+#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */
+#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */
+#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */
+#define SBIDHIGH_CC_SHIFT 4
+#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */
+#define SBIDHIGH_VC_SHIFT 16
+
+/* SSB PCI config space registers. */
+#define SSB_BAR0_WIN 0x80
+#define SSB_BAR1_WIN 0x84
+#define SSB_SPROM_CONTROL 0x88
+#define SSB_BAR1_CONTROL 0x8c
+
+/* SSB core and host control registers. */
+#define SSB_CONTROL 0x0000UL
+#define SSB_ARBCONTROL 0x0010UL
+#define SSB_ISTAT 0x0020UL
+#define SSB_IMASK 0x0024UL
+#define SSB_MBOX 0x0028UL
+#define SSB_BCAST_ADDR 0x0050UL
+#define SSB_BCAST_DATA 0x0054UL
+#define SSB_PCI_TRANS_0 0x0100UL
+#define SSB_PCI_TRANS_1 0x0104UL
+#define SSB_PCI_TRANS_2 0x0108UL
+#define SSB_SPROM 0x0800UL
+
+#define SSB_PCI_MEM 0x00000000
+#define SSB_PCI_IO 0x00000001
+#define SSB_PCI_CFG0 0x00000002
+#define SSB_PCI_CFG1 0x00000003
+#define SSB_PCI_PREF 0x00000004
+#define SSB_PCI_BURST 0x00000008
+#define SSB_PCI_MASK0 0xfc000000
+#define SSB_PCI_MASK1 0xfc000000
+#define SSB_PCI_MASK2 0xc0000000
+
+/* 4400 PHY registers */
+#define B44_MII_AUXCTRL 24 /* Auxiliary Control */
+#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */
+#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */
+#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */
+#define B44_MII_ALEDCTRL 26 /* Activity LED */
+#define MII_ALEDCTRL_ALLMSK 0x7fff
+#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */
+#define MII_TLEDCTRL_ENABLE 0x0040
+
+struct dma_desc {
+ u32 ctrl;
+ u32 addr;
+};
+
+/* There are only 12 bits in the DMA engine for descriptor offsetting
+ * so the table must be aligned on a boundary of this.
+ */
+#define DMA_TABLE_BYTES 4096
+
+#define DESC_CTRL_LEN 0x00001fff
+#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */
+#define DESC_CTRL_EOT 0x10000000 /* End of Table */
+#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */
+#define DESC_CTRL_EOF 0x40000000 /* End of Frame */
+#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */
+
+#define RX_COPY_THRESHOLD 256
+
+struct rx_header {
+ u16 len;
+ u16 flags;
+ u16 pad[12];
+};
+#define RX_HEADER_LEN 28
+
+#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */
+#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */
+#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */
+#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */
+#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */
+#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */
+#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */
+#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */
+#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */
+#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO)
+
+struct ring_info {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+};
+
+#define B44_MCAST_TABLE_SIZE 32
+
+/* SW copy of device statistics, kept up to date by periodic timer
+ * which probes HW values. Must have same relative layout as HW
+ * register above, because b44_stats_update depends upon this.
+ */
+struct b44_hw_stats {
+ u32 tx_good_octets, tx_good_pkts, tx_octets;
+ u32 tx_pkts, tx_broadcast_pkts, tx_multicast_pkts;
+ u32 tx_len_64, tx_len_65_to_127, tx_len_128_to_255;
+ u32 tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max;
+ u32 tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts;
+ u32 tx_underruns, tx_total_cols, tx_single_cols;
+ u32 tx_multiple_cols, tx_excessive_cols, tx_late_cols;
+ u32 tx_defered, tx_carrier_lost, tx_pause_pkts;
+ u32 __pad1[8];
+
+ u32 rx_good_octets, rx_good_pkts, rx_octets;
+ u32 rx_pkts, rx_broadcast_pkts, rx_multicast_pkts;
+ u32 rx_len_64, rx_len_65_to_127, rx_len_128_to_255;
+ u32 rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max;
+ u32 rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts;
+ u32 rx_missed_pkts, rx_crc_align_errs, rx_undersize;
+ u32 rx_crc_errs, rx_align_errs, rx_symbol_errs;
+ u32 rx_pause_pkts, rx_nonpause_pkts;
+};
+
+struct b44 {
+ spinlock_t lock;
+
+ u32 imask, istat;
+
+ struct dma_desc *rx_ring, *tx_ring;
+
+ u32 tx_prod, tx_cons;
+ u32 rx_prod, rx_cons;
+
+ struct ring_info *rx_buffers;
+ struct ring_info *tx_buffers;
+
+ u32 dma_offset;
+ u32 flags;
+#define B44_FLAG_INIT_COMPLETE 0x00000001
+#define B44_FLAG_BUGGY_TXPTR 0x00000002
+#define B44_FLAG_REORDER_BUG 0x00000004
+#define B44_FLAG_PAUSE_AUTO 0x00008000
+#define B44_FLAG_FULL_DUPLEX 0x00010000
+#define B44_FLAG_100_BASE_T 0x00020000
+#define B44_FLAG_TX_PAUSE 0x00040000
+#define B44_FLAG_RX_PAUSE 0x00080000
+#define B44_FLAG_FORCE_LINK 0x00100000
+#define B44_FLAG_ADV_10HALF 0x01000000
+#define B44_FLAG_ADV_10FULL 0x02000000
+#define B44_FLAG_ADV_100HALF 0x04000000
+#define B44_FLAG_ADV_100FULL 0x08000000
+#define B44_FLAG_INTERNAL_PHY 0x10000000
+
+ u32 rx_offset;
+
+ u32 msg_enable;
+
+ struct timer_list timer;
+
+ struct net_device_stats stats;
+ struct b44_hw_stats hw_stats;
+
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ struct net_device *dev;
+
+ dma_addr_t rx_ring_dma, tx_ring_dma;
+
+ u32 rx_pending;
+ u32 tx_pending;
+ u8 phy_addr;
+ u8 core_unit;
+
+ struct mii_if_info mii_if;
+};
+
+#endif /* _B44_H */
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
new file mode 100644
index 000000000000..3d2be7c28e06
--- /dev/null
+++ b/drivers/net/bmac.c
@@ -0,0 +1,1708 @@
+/*
+ * Network device driver for the BMAC ethernet controller on
+ * Apple Powermacs. Assumes it's under a DBDMA controller.
+ *
+ * Copyright (C) 1998 Randy Gobbel.
+ *
+ * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
+ * dynamic procfs inode.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <asm/prom.h>
+#include <asm/dbdma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/macio.h>
+#include <asm/irq.h>
+
+#include "bmac.h"
+
+#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
+#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
+
+/*
+ * CRC polynomial - used in working out multicast filter bits.
+ */
+#define ENET_CRCPOLY 0x04c11db7
+
+/* switch to use multicast code lifted from sunhme driver */
+#define SUNHME_MULTICAST
+
+#define N_RX_RING 64
+#define N_TX_RING 32
+#define MAX_TX_ACTIVE 1
+#define ETHERCRC 4
+#define ETHERMINPACKET 64
+#define ETHERMTU 1500
+#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
+#define TX_TIMEOUT HZ /* 1 second */
+
+/* Bits in transmit DMA status */
+#define TX_DMA_ERR 0x80
+
+#define XXDEBUG(args)
+
+struct bmac_data {
+ /* volatile struct bmac *bmac; */
+ struct sk_buff_head *queue;
+ volatile struct dbdma_regs __iomem *tx_dma;
+ int tx_dma_intr;
+ volatile struct dbdma_regs __iomem *rx_dma;
+ int rx_dma_intr;
+ volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
+ volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
+ struct macio_dev *mdev;
+ int is_bmac_plus;
+ struct sk_buff *rx_bufs[N_RX_RING];
+ int rx_fill;
+ int rx_empty;
+ struct sk_buff *tx_bufs[N_TX_RING];
+ int tx_fill;
+ int tx_empty;
+ unsigned char tx_fullup;
+ struct net_device_stats stats;
+ struct timer_list tx_timeout;
+ int timeout_active;
+ int sleeping;
+ int opened;
+ unsigned short hash_use_count[64];
+ unsigned short hash_table_mask[4];
+ spinlock_t lock;
+};
+
+#if 0 /* Move that to ethtool */
+
+typedef struct bmac_reg_entry {
+ char *name;
+ unsigned short reg_offset;
+} bmac_reg_entry_t;
+
+#define N_REG_ENTRIES 31
+
+static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
+ {"MEMADD", MEMADD},
+ {"MEMDATAHI", MEMDATAHI},
+ {"MEMDATALO", MEMDATALO},
+ {"TXPNTR", TXPNTR},
+ {"RXPNTR", RXPNTR},
+ {"IPG1", IPG1},
+ {"IPG2", IPG2},
+ {"ALIMIT", ALIMIT},
+ {"SLOT", SLOT},
+ {"PALEN", PALEN},
+ {"PAPAT", PAPAT},
+ {"TXSFD", TXSFD},
+ {"JAM", JAM},
+ {"TXCFG", TXCFG},
+ {"TXMAX", TXMAX},
+ {"TXMIN", TXMIN},
+ {"PAREG", PAREG},
+ {"DCNT", DCNT},
+ {"NCCNT", NCCNT},
+ {"NTCNT", NTCNT},
+ {"EXCNT", EXCNT},
+ {"LTCNT", LTCNT},
+ {"TXSM", TXSM},
+ {"RXCFG", RXCFG},
+ {"RXMAX", RXMAX},
+ {"RXMIN", RXMIN},
+ {"FRCNT", FRCNT},
+ {"AECNT", AECNT},
+ {"FECNT", FECNT},
+ {"RXSM", RXSM},
+ {"RXCV", RXCV}
+};
+
+#endif
+
+static unsigned char *bmac_emergency_rxbuf;
+
+/*
+ * Number of bytes of private data per BMAC: allow enough for
+ * the rx and tx dma commands plus a branch dma command each,
+ * and another 16 bytes to allow us to align the dma command
+ * buffers on a 16 byte boundary.
+ */
+#define PRIV_BYTES (sizeof(struct bmac_data) \
+ + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
+ + sizeof(struct sk_buff_head))
+
+static unsigned char bitrev(unsigned char b);
+static int bmac_open(struct net_device *dev);
+static int bmac_close(struct net_device *dev);
+static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *bmac_stats(struct net_device *dev);
+static void bmac_set_multicast(struct net_device *dev);
+static void bmac_reset_and_enable(struct net_device *dev);
+static void bmac_start_chip(struct net_device *dev);
+static void bmac_init_chip(struct net_device *dev);
+static void bmac_init_registers(struct net_device *dev);
+static void bmac_enable_and_reset_chip(struct net_device *dev);
+static int bmac_set_address(struct net_device *dev, void *addr);
+static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
+static void bmac_set_timeout(struct net_device *dev);
+static void bmac_tx_timeout(unsigned long data);
+static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static void bmac_start(struct net_device *dev);
+
+#define DBDMA_SET(x) ( ((x) | (x) << 16) )
+#define DBDMA_CLEAR(x) ( (x) << 16)
+
+static inline void
+dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
+{
+ __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
+ return;
+}
+
+static inline unsigned long
+dbdma_ld32(volatile __u32 __iomem *a)
+{
+ __u32 swap;
+ __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
+ return swap;
+}
+
+static void
+dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
+{
+ dbdma_st32(&dmap->control,
+ DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
+ eieio();
+}
+
+static void
+dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
+{
+ dbdma_st32(&dmap->control,
+ DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
+ eieio();
+ while (dbdma_ld32(&dmap->status) & RUN)
+ eieio();
+}
+
+static void
+dbdma_setcmd(volatile struct dbdma_cmd *cp,
+ unsigned short cmd, unsigned count, unsigned long addr,
+ unsigned long cmd_dep)
+{
+ out_le16(&cp->command, cmd);
+ out_le16(&cp->req_count, count);
+ out_le32(&cp->phy_addr, addr);
+ out_le32(&cp->cmd_dep, cmd_dep);
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->res_count, 0);
+}
+
+static inline
+void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
+{
+ out_le16((void __iomem *)dev->base_addr + reg_offset, data);
+}
+
+
+static inline
+volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
+{
+ return in_le16((void __iomem *)dev->base_addr + reg_offset);
+}
+
+static void
+bmac_enable_and_reset_chip(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ if (rd)
+ dbdma_reset(rd);
+ if (td)
+ dbdma_reset(td);
+
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
+}
+
+#define MIFDELAY udelay(10)
+
+static unsigned int
+bmac_mif_readbits(struct net_device *dev, int nb)
+{
+ unsigned int val = 0;
+
+ while (--nb >= 0) {
+ bmwrite(dev, MIFCSR, 0);
+ MIFDELAY;
+ if (bmread(dev, MIFCSR) & 8)
+ val |= 1 << nb;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ }
+ bmwrite(dev, MIFCSR, 0);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ return val;
+}
+
+static void
+bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
+{
+ int b;
+
+ while (--nb >= 0) {
+ b = (val & (1 << nb))? 6: 4;
+ bmwrite(dev, MIFCSR, b);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, b|1);
+ MIFDELAY;
+ }
+}
+
+static unsigned int
+bmac_mif_read(struct net_device *dev, unsigned int addr)
+{
+ unsigned int val;
+
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ bmac_mif_writebits(dev, ~0U, 32);
+ bmac_mif_writebits(dev, 6, 4);
+ bmac_mif_writebits(dev, addr, 10);
+ bmwrite(dev, MIFCSR, 2);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ val = bmac_mif_readbits(dev, 17);
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ return val;
+}
+
+static void
+bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
+{
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ bmac_mif_writebits(dev, ~0U, 32);
+ bmac_mif_writebits(dev, 5, 4);
+ bmac_mif_writebits(dev, addr, 10);
+ bmac_mif_writebits(dev, 2, 2);
+ bmac_mif_writebits(dev, val, 16);
+ bmac_mif_writebits(dev, 3, 2);
+}
+
+static void
+bmac_init_registers(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile unsigned short regValue;
+ unsigned short *pWord16;
+ int i;
+
+ /* XXDEBUG(("bmac: enter init_registers\n")); */
+
+ bmwrite(dev, RXRST, RxResetValue);
+ bmwrite(dev, TXRST, TxResetBit);
+
+ i = 100;
+ do {
+ --i;
+ udelay(10000);
+ regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
+ } while ((regValue & TxResetBit) && i > 0);
+
+ if (!bp->is_bmac_plus) {
+ regValue = bmread(dev, XCVRIF);
+ regValue |= ClkBit | SerialMode | COLActiveLow;
+ bmwrite(dev, XCVRIF, regValue);
+ udelay(10000);
+ }
+
+ bmwrite(dev, RSEED, (unsigned short)0x1968);
+
+ regValue = bmread(dev, XIFC);
+ regValue |= TxOutputEnable;
+ bmwrite(dev, XIFC, regValue);
+
+ bmread(dev, PAREG);
+
+ /* set collision counters to 0 */
+ bmwrite(dev, NCCNT, 0);
+ bmwrite(dev, NTCNT, 0);
+ bmwrite(dev, EXCNT, 0);
+ bmwrite(dev, LTCNT, 0);
+
+ /* set rx counters to 0 */
+ bmwrite(dev, FRCNT, 0);
+ bmwrite(dev, LECNT, 0);
+ bmwrite(dev, AECNT, 0);
+ bmwrite(dev, FECNT, 0);
+ bmwrite(dev, RXCV, 0);
+
+ /* set tx fifo information */
+ bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
+
+ bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
+ bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
+
+ /* set rx fifo information */
+ bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
+ bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
+
+ //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
+ bmread(dev, STATUS); /* read it just to clear it */
+
+ /* zero out the chip Hash Filter registers */
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
+ bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
+ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
+ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
+ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
+
+ pWord16 = (unsigned short *)dev->dev_addr;
+ bmwrite(dev, MADD0, *pWord16++);
+ bmwrite(dev, MADD1, *pWord16++);
+ bmwrite(dev, MADD2, *pWord16);
+
+ bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
+
+ bmwrite(dev, INTDISABLE, EnableNormal);
+
+ return;
+}
+
+#if 0
+static void
+bmac_disable_interrupts(struct net_device *dev)
+{
+ bmwrite(dev, INTDISABLE, DisableAll);
+}
+
+static void
+bmac_enable_interrupts(struct net_device *dev)
+{
+ bmwrite(dev, INTDISABLE, EnableNormal);
+}
+#endif
+
+
+static void
+bmac_start_chip(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ unsigned short oldConfig;
+
+ /* enable rx dma channel */
+ dbdma_continue(rd);
+
+ oldConfig = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
+
+ /* turn on rx plus any other bits already on (promiscuous possibly) */
+ oldConfig = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
+ udelay(20000);
+}
+
+static void
+bmac_init_phy(struct net_device *dev)
+{
+ unsigned int addr;
+ struct bmac_data *bp = netdev_priv(dev);
+
+ printk(KERN_DEBUG "phy registers:");
+ for (addr = 0; addr < 32; ++addr) {
+ if ((addr & 7) == 0)
+ printk("\n" KERN_DEBUG);
+ printk(" %.4x", bmac_mif_read(dev, addr));
+ }
+ printk("\n");
+ if (bp->is_bmac_plus) {
+ unsigned int capable, ctrl;
+
+ ctrl = bmac_mif_read(dev, 0);
+ capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
+ if (bmac_mif_read(dev, 4) != capable
+ || (ctrl & 0x1000) == 0) {
+ bmac_mif_write(dev, 4, capable);
+ bmac_mif_write(dev, 0, 0x1200);
+ } else
+ bmac_mif_write(dev, 0, 0x1000);
+ }
+}
+
+static void bmac_init_chip(struct net_device *dev)
+{
+ bmac_init_phy(dev);
+ bmac_init_registers(dev);
+}
+
+#ifdef CONFIG_PM
+static int bmac_suspend(struct macio_dev *mdev, u32 state)
+{
+ struct net_device* dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+ unsigned short config;
+ int i;
+
+ netif_device_detach(dev);
+ /* prolly should wait for dma to finish & turn off the chip */
+ spin_lock_irqsave(&bp->lock, flags);
+ if (bp->timeout_active) {
+ del_timer(&bp->tx_timeout);
+ bp->timeout_active = 0;
+ }
+ disable_irq(dev->irq);
+ disable_irq(bp->tx_dma_intr);
+ disable_irq(bp->rx_dma_intr);
+ bp->sleeping = 1;
+ spin_unlock_irqrestore(&bp->lock, flags);
+ if (bp->opened) {
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+ bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
+ /* disable rx and tx dma */
+ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ /* free some skb's */
+ for (i=0; i<N_RX_RING; i++) {
+ if (bp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->rx_bufs[i]);
+ bp->rx_bufs[i] = NULL;
+ }
+ }
+ for (i = 0; i<N_TX_RING; i++) {
+ if (bp->tx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ }
+ }
+ }
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+ return 0;
+}
+
+static int bmac_resume(struct macio_dev *mdev)
+{
+ struct net_device* dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+
+ /* see if this is enough */
+ if (bp->opened)
+ bmac_reset_and_enable(dev);
+
+ enable_irq(dev->irq);
+ enable_irq(bp->tx_dma_intr);
+ enable_irq(bp->rx_dma_intr);
+ netif_device_attach(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static int bmac_set_address(struct net_device *dev, void *addr)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned char *p = addr;
+ unsigned short *pWord16;
+ unsigned long flags;
+ int i;
+
+ XXDEBUG(("bmac: enter set_address\n"));
+ spin_lock_irqsave(&bp->lock, flags);
+
+ for (i = 0; i < 6; ++i) {
+ dev->dev_addr[i] = p[i];
+ }
+ /* load up the hardware address */
+ pWord16 = (unsigned short *)dev->dev_addr;
+ bmwrite(dev, MADD0, *pWord16++);
+ bmwrite(dev, MADD1, *pWord16++);
+ bmwrite(dev, MADD2, *pWord16);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+ XXDEBUG(("bmac: exit set_address\n"));
+ return 0;
+}
+
+static inline void bmac_set_timeout(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ if (bp->timeout_active)
+ del_timer(&bp->tx_timeout);
+ bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
+ bp->tx_timeout.function = bmac_tx_timeout;
+ bp->tx_timeout.data = (unsigned long) dev;
+ add_timer(&bp->tx_timeout);
+ bp->timeout_active = 1;
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
+{
+ void *vaddr;
+ unsigned long baddr;
+ unsigned long len;
+
+ len = skb->len;
+ vaddr = skb->data;
+ baddr = virt_to_bus(vaddr);
+
+ dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
+}
+
+static void
+bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
+{
+ unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
+
+ dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
+ virt_to_bus(addr), 0);
+}
+
+/* Bit-reverse one byte of an ethernet hardware address. */
+static unsigned char
+bitrev(unsigned char b)
+{
+ int d = 0, i;
+
+ for (i = 0; i < 8; ++i, b >>= 1)
+ d = (d << 1) | (b & 1);
+ return d;
+}
+
+
+static void
+bmac_init_tx_ring(struct bmac_data *bp)
+{
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
+
+ bp->tx_empty = 0;
+ bp->tx_fill = 0;
+ bp->tx_fullup = 0;
+
+ /* put a branch at the end of the tx command list */
+ dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
+ (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
+
+ /* reset tx dma */
+ dbdma_reset(td);
+ out_le32(&td->wait_sel, 0x00200020);
+ out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
+}
+
+static int
+bmac_init_rx_ring(struct bmac_data *bp)
+{
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ int i;
+ struct sk_buff *skb;
+
+ /* initialize list of sk_buffs for receiving and set up recv dma */
+ memset((char *)bp->rx_cmds, 0,
+ (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
+ for (i = 0; i < N_RX_RING; i++) {
+ if ((skb = bp->rx_bufs[i]) == NULL) {
+ bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ if (skb != NULL)
+ skb_reserve(skb, 2);
+ }
+ bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
+ }
+
+ bp->rx_empty = 0;
+ bp->rx_fill = i;
+
+ /* Put a branch back to the beginning of the receive command list */
+ dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
+ (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
+
+ /* start rx dma */
+ dbdma_reset(rd);
+ out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
+
+ return 1;
+}
+
+
+static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ int i;
+
+ /* see if there's a free slot in the tx ring */
+ /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
+ /* bp->tx_empty, bp->tx_fill)); */
+ i = bp->tx_fill + 1;
+ if (i >= N_TX_RING)
+ i = 0;
+ if (i == bp->tx_empty) {
+ netif_stop_queue(dev);
+ bp->tx_fullup = 1;
+ XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
+ return -1; /* can't take it at the moment */
+ }
+
+ dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
+
+ bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
+
+ bp->tx_bufs[bp->tx_fill] = skb;
+ bp->tx_fill = i;
+
+ bp->stats.tx_bytes += skb->len;
+
+ dbdma_continue(td);
+
+ return 0;
+}
+
+static int rxintcount;
+
+static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ int i, nb, stat;
+ struct sk_buff *skb;
+ unsigned int residual;
+ int last;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (++rxintcount < 10) {
+ XXDEBUG(("bmac_rxdma_intr\n"));
+ }
+
+ last = -1;
+ i = bp->rx_empty;
+
+ while (1) {
+ cp = &bp->rx_cmds[i];
+ stat = ld_le16(&cp->xfer_status);
+ residual = ld_le16(&cp->res_count);
+ if ((stat & ACTIVE) == 0)
+ break;
+ nb = RX_BUFLEN - residual - 2;
+ if (nb < (ETHERMINPACKET - ETHERCRC)) {
+ skb = NULL;
+ bp->stats.rx_length_errors++;
+ bp->stats.rx_errors++;
+ } else {
+ skb = bp->rx_bufs[i];
+ bp->rx_bufs[i] = NULL;
+ }
+ if (skb != NULL) {
+ nb -= ETHERCRC;
+ skb_put(skb, nb);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ ++bp->stats.rx_packets;
+ bp->stats.rx_bytes += nb;
+ } else {
+ ++bp->stats.rx_dropped;
+ }
+ dev->last_rx = jiffies;
+ if ((skb = bp->rx_bufs[i]) == NULL) {
+ bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ if (skb != NULL)
+ skb_reserve(bp->rx_bufs[i], 2);
+ }
+ bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
+ st_le16(&cp->res_count, 0);
+ st_le16(&cp->xfer_status, 0);
+ last = i;
+ if (++i >= N_RX_RING) i = 0;
+ }
+
+ if (last != -1) {
+ bp->rx_fill = last;
+ bp->rx_empty = i;
+ }
+
+ dbdma_continue(rd);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (rxintcount < 10) {
+ XXDEBUG(("bmac_rxdma_intr done\n"));
+ }
+ return IRQ_HANDLED;
+}
+
+static int txintcount;
+
+static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_cmd *cp;
+ int stat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (txintcount++ < 10) {
+ XXDEBUG(("bmac_txdma_intr\n"));
+ }
+
+ /* del_timer(&bp->tx_timeout); */
+ /* bp->timeout_active = 0; */
+
+ while (1) {
+ cp = &bp->tx_cmds[bp->tx_empty];
+ stat = ld_le16(&cp->xfer_status);
+ if (txintcount < 10) {
+ XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
+ }
+ if (!(stat & ACTIVE)) {
+ /*
+ * status field might not have been filled by DBDMA
+ */
+ if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
+ break;
+ }
+
+ if (bp->tx_bufs[bp->tx_empty]) {
+ ++bp->stats.tx_packets;
+ dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
+ }
+ bp->tx_bufs[bp->tx_empty] = NULL;
+ bp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (++bp->tx_empty >= N_TX_RING)
+ bp->tx_empty = 0;
+ if (bp->tx_empty == bp->tx_fill)
+ break;
+ }
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (txintcount < 10) {
+ XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
+ }
+
+ bmac_start(dev);
+ return IRQ_HANDLED;
+}
+
+static struct net_device_stats *bmac_stats(struct net_device *dev)
+{
+ struct bmac_data *p = netdev_priv(dev);
+
+ return &p->stats;
+}
+
+#ifndef SUNHME_MULTICAST
+/* Real fast bit-reversal algorithm, 6-bit values */
+static int reverse6[64] = {
+ 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
+ 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
+ 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
+ 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
+ 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
+ 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
+ 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
+ 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
+};
+
+static unsigned int
+crc416(unsigned int curval, unsigned short nxtval)
+{
+ register unsigned int counter, cur = curval, next = nxtval;
+ register int high_crc_set, low_data_set;
+
+ /* Swap bytes */
+ next = ((next & 0x00FF) << 8) | (next >> 8);
+
+ /* Compute bit-by-bit */
+ for (counter = 0; counter < 16; ++counter) {
+ /* is high CRC bit set? */
+ if ((cur & 0x80000000) == 0) high_crc_set = 0;
+ else high_crc_set = 1;
+
+ cur = cur << 1;
+
+ if ((next & 0x0001) == 0) low_data_set = 0;
+ else low_data_set = 1;
+
+ next = next >> 1;
+
+ /* do the XOR */
+ if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
+ }
+ return cur;
+}
+
+static unsigned int
+bmac_crc(unsigned short *address)
+{
+ unsigned int newcrc;
+
+ XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
+ newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
+ newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
+ newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
+
+ return(newcrc);
+}
+
+/*
+ * Add requested mcast addr to BMac's hash table filter.
+ *
+ */
+
+static void
+bmac_addhash(struct bmac_data *bp, unsigned char *addr)
+{
+ unsigned int crc;
+ unsigned short mask;
+
+ if (!(*addr)) return;
+ crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
+ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ if (bp->hash_use_count[crc]++) return; /* This bit is already set */
+ mask = crc % 16;
+ mask = (unsigned char)1 << mask;
+ bp->hash_use_count[crc/16] |= mask;
+}
+
+static void
+bmac_removehash(struct bmac_data *bp, unsigned char *addr)
+{
+ unsigned int crc;
+ unsigned char mask;
+
+ /* Now, delete the address from the filter copy, as indicated */
+ crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
+ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
+ if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
+ mask = crc % 16;
+ mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
+ bp->hash_table_mask[crc/16] &= mask;
+}
+
+/*
+ * Sync the adapter with the software copy of the multicast mask
+ * (logical address filter).
+ */
+
+static void
+bmac_rx_off(struct net_device *dev)
+{
+ unsigned short rx_cfg;
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg &= ~RxMACEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ do {
+ rx_cfg = bmread(dev, RXCFG);
+ } while (rx_cfg & RxMACEnable);
+}
+
+unsigned short
+bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
+{
+ unsigned short rx_cfg;
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxMACEnable;
+ if (hash_enable) rx_cfg |= RxHashFilterEnable;
+ else rx_cfg &= ~RxHashFilterEnable;
+ if (promisc_enable) rx_cfg |= RxPromiscEnable;
+ else rx_cfg &= ~RxPromiscEnable;
+ bmwrite(dev, RXRST, RxResetValue);
+ bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
+ bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
+ bmwrite(dev, RXCFG, rx_cfg );
+ return rx_cfg;
+}
+
+static void
+bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
+{
+ bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
+ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
+ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
+ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
+}
+
+#if 0
+static void
+bmac_add_multi(struct net_device *dev,
+ struct bmac_data *bp, unsigned char *addr)
+{
+ /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
+ bmac_addhash(bp, addr);
+ bmac_rx_off(dev);
+ bmac_update_hash_table_mask(dev, bp);
+ bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
+ /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
+}
+
+static void
+bmac_remove_multi(struct net_device *dev,
+ struct bmac_data *bp, unsigned char *addr)
+{
+ bmac_removehash(bp, addr);
+ bmac_rx_off(dev);
+ bmac_update_hash_table_mask(dev, bp);
+ bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
+}
+#endif
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void bmac_set_multicast(struct net_device *dev)
+{
+ struct dev_mc_list *dmi;
+ struct bmac_data *bp = netdev_priv(dev);
+ int num_addrs = dev->mc_count;
+ unsigned short rx_cfg;
+ int i;
+
+ if (bp->sleeping)
+ return;
+
+ XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
+
+ if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
+ bmac_update_hash_table_mask(dev, bp);
+ rx_cfg = bmac_rx_on(dev, 1, 0);
+ XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
+ } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ rx_cfg = bmac_rx_on(dev, 0, 1);
+ XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
+ } else {
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
+ for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
+ if (num_addrs == 0) {
+ rx_cfg = bmac_rx_on(dev, 0, 0);
+ XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
+ } else {
+ for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
+ bmac_addhash(bp, dmi->dmi_addr);
+ bmac_update_hash_table_mask(dev, bp);
+ rx_cfg = bmac_rx_on(dev, 1, 0);
+ XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
+ }
+ }
+ /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
+}
+#else /* ifdef SUNHME_MULTICAST */
+
+/* The version of set_multicast below was lifted from sunhme.c */
+
+static void bmac_set_multicast(struct net_device *dev)
+{
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ unsigned short rx_cfg;
+ u32 crc;
+
+ if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ bmwrite(dev, BHASH0, 0xffff);
+ bmwrite(dev, BHASH1, 0xffff);
+ bmwrite(dev, BHASH2, 0xffff);
+ bmwrite(dev, BHASH3, 0xffff);
+ } else if(dev->flags & IFF_PROMISC) {
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ } else {
+ u16 hash_table[4];
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg &= ~RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+
+ for(i = 0; i < 4; i++) hash_table[i] = 0;
+
+ for(i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ if(!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ bmwrite(dev, BHASH0, hash_table[0]);
+ bmwrite(dev, BHASH1, hash_table[1]);
+ bmwrite(dev, BHASH2, hash_table[2]);
+ bmwrite(dev, BHASH3, hash_table[3]);
+ }
+}
+#endif /* SUNHME_MULTICAST */
+
+static int miscintcount;
+
+static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned int status = bmread(dev, STATUS);
+ if (miscintcount++ < 10) {
+ XXDEBUG(("bmac_misc_intr\n"));
+ }
+ /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
+ /* bmac_txdma_intr_inner(irq, dev_id, regs); */
+ /* if (status & FrameReceived) bp->stats.rx_dropped++; */
+ if (status & RxErrorMask) bp->stats.rx_errors++;
+ if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
+ if (status & RxLenCntExp) bp->stats.rx_length_errors++;
+ if (status & RxOverFlow) bp->stats.rx_over_errors++;
+ if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
+
+ /* if (status & FrameSent) bp->stats.tx_dropped++; */
+ if (status & TxErrorMask) bp->stats.tx_errors++;
+ if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
+ if (status & TxNormalCollExp) bp->stats.collisions++;
+ return IRQ_HANDLED;
+}
+
+/*
+ * Procedure for reading EEPROM
+ */
+#define SROMAddressLength 5
+#define DataInOn 0x0008
+#define DataInOff 0x0000
+#define Clk 0x0002
+#define ChipSelect 0x0001
+#define SDIShiftCount 3
+#define SD0ShiftCount 2
+#define DelayValue 1000 /* number of microseconds */
+#define SROMStartOffset 10 /* this is in words */
+#define SROMReadCount 3 /* number of words to read from SROM */
+#define SROMAddressBits 6
+#define EnetAddressOffset 20
+
+static unsigned char
+bmac_clock_out_bit(struct net_device *dev)
+{
+ unsigned short data;
+ unsigned short val;
+
+ bmwrite(dev, SROMCSR, ChipSelect | Clk);
+ udelay(DelayValue);
+
+ data = bmread(dev, SROMCSR);
+ udelay(DelayValue);
+ val = (data >> SD0ShiftCount) & 1;
+
+ bmwrite(dev, SROMCSR, ChipSelect);
+ udelay(DelayValue);
+
+ return val;
+}
+
+static void
+bmac_clock_in_bit(struct net_device *dev, unsigned int val)
+{
+ unsigned short data;
+
+ if (val != 0 && val != 1) return;
+
+ data = (val << SDIShiftCount);
+ bmwrite(dev, SROMCSR, data | ChipSelect );
+ udelay(DelayValue);
+
+ bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
+ udelay(DelayValue);
+
+ bmwrite(dev, SROMCSR, data | ChipSelect);
+ udelay(DelayValue);
+}
+
+static void
+reset_and_select_srom(struct net_device *dev)
+{
+ /* first reset */
+ bmwrite(dev, SROMCSR, 0);
+ udelay(DelayValue);
+
+ /* send it the read command (110) */
+ bmac_clock_in_bit(dev, 1);
+ bmac_clock_in_bit(dev, 1);
+ bmac_clock_in_bit(dev, 0);
+}
+
+static unsigned short
+read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
+{
+ unsigned short data, val;
+ int i;
+
+ /* send out the address we want to read from */
+ for (i = 0; i < addr_len; i++) {
+ val = addr >> (addr_len-i-1);
+ bmac_clock_in_bit(dev, val & 1);
+ }
+
+ /* Now read in the 16-bit data */
+ data = 0;
+ for (i = 0; i < 16; i++) {
+ val = bmac_clock_out_bit(dev);
+ data <<= 1;
+ data |= val;
+ }
+ bmwrite(dev, SROMCSR, 0);
+
+ return data;
+}
+
+/*
+ * It looks like Cogent and SMC use different methods for calculating
+ * checksums. What a pain..
+ */
+
+static int
+bmac_verify_checksum(struct net_device *dev)
+{
+ unsigned short data, storedCS;
+
+ reset_and_select_srom(dev);
+ data = read_srom(dev, 3, SROMAddressBits);
+ storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
+
+ return 0;
+}
+
+
+static void
+bmac_get_station_address(struct net_device *dev, unsigned char *ea)
+{
+ int i;
+ unsigned short data;
+
+ for (i = 0; i < 6; i++)
+ {
+ reset_and_select_srom(dev);
+ data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
+ ea[2*i] = bitrev(data & 0x0ff);
+ ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
+ }
+}
+
+static void bmac_reset_and_enable(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ bmac_enable_and_reset_chip(dev);
+ bmac_init_tx_ring(bp);
+ bmac_init_rx_ring(bp);
+ bmac_init_chip(dev);
+ bmac_start_chip(dev);
+ bmwrite(dev, INTDISABLE, EnableNormal);
+ bp->sleeping = 0;
+
+ /*
+ * It seems that the bmac can't receive until it's transmitted
+ * a packet. So we give it a dummy packet to transmit.
+ */
+ skb = dev_alloc_skb(ETHERMINPACKET);
+ if (skb != NULL) {
+ data = skb_put(skb, ETHERMINPACKET);
+ memset(data, 0, ETHERMINPACKET);
+ memcpy(data, dev->dev_addr, 6);
+ memcpy(data+6, dev->dev_addr, 6);
+ bmac_transmit_packet(skb, dev);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_match *match)
+{
+ int j, rev, ret;
+ struct bmac_data *bp;
+ unsigned char *addr;
+ struct net_device *dev;
+ int is_bmac_plus = ((int)match->data) != 0;
+
+ if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
+ printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
+ return -ENODEV;
+ }
+ addr = get_property(macio_get_of_node(mdev), "mac-address", NULL);
+ if (addr == NULL) {
+ addr = get_property(macio_get_of_node(mdev), "local-mac-address", NULL);
+ if (addr == NULL) {
+ printk(KERN_ERR "BMAC: Can't get mac-address\n");
+ return -ENODEV;
+ }
+ }
+
+ dev = alloc_etherdev(PRIV_BYTES);
+ if (!dev) {
+ printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
+ return -ENOMEM;
+ }
+
+ bp = netdev_priv(dev);
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
+ macio_set_drvdata(mdev, dev);
+
+ bp->mdev = mdev;
+ spin_lock_init(&bp->lock);
+
+ if (macio_request_resources(mdev, "bmac")) {
+ printk(KERN_ERR "BMAC: can't request IO resource !\n");
+ goto out_free;
+ }
+
+ dev->base_addr = (unsigned long)
+ ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
+ if (dev->base_addr == 0)
+ goto out_release;
+
+ dev->irq = macio_irq(mdev, 0);
+
+ bmac_enable_and_reset_chip(dev);
+ bmwrite(dev, INTDISABLE, DisableAll);
+
+ rev = addr[0] == 0 && addr[1] == 0xA0;
+ for (j = 0; j < 6; ++j)
+ dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
+
+ /* Enable chip without interrupts for now */
+ bmac_enable_and_reset_chip(dev);
+ bmwrite(dev, INTDISABLE, DisableAll);
+
+ dev->open = bmac_open;
+ dev->stop = bmac_close;
+ dev->hard_start_xmit = bmac_output;
+ dev->get_stats = bmac_stats;
+ dev->set_multicast_list = bmac_set_multicast;
+ dev->set_mac_address = bmac_set_address;
+
+ bmac_get_station_address(dev, addr);
+ if (bmac_verify_checksum(dev) != 0)
+ goto err_out_iounmap;
+
+ bp->is_bmac_plus = is_bmac_plus;
+ bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
+ if (!bp->tx_dma)
+ goto err_out_iounmap;
+ bp->tx_dma_intr = macio_irq(mdev, 1);
+ bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
+ if (!bp->rx_dma)
+ goto err_out_iounmap_tx;
+ bp->rx_dma_intr = macio_irq(mdev, 2);
+
+ bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
+ bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
+
+ bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
+ skb_queue_head_init(bp->queue);
+
+ init_timer(&bp->tx_timeout);
+
+ ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
+ goto err_out_iounmap_rx;
+ }
+ ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
+ goto err_out_irq0;
+ }
+ ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
+ goto err_out_irq1;
+ }
+
+ /* Mask chip interrupts and disable chip, will be
+ * re-enabled on open()
+ */
+ disable_irq(dev->irq);
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "BMAC: Ethernet registration failed\n");
+ goto err_out_irq2;
+ }
+
+ printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
+ for (j = 0; j < 6; ++j)
+ printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
+ XXDEBUG((", base_addr=%#0lx", dev->base_addr));
+ printk("\n");
+
+ return 0;
+
+err_out_irq2:
+ free_irq(bp->rx_dma_intr, dev);
+err_out_irq1:
+ free_irq(bp->tx_dma_intr, dev);
+err_out_irq0:
+ free_irq(dev->irq, dev);
+err_out_iounmap_rx:
+ iounmap(bp->rx_dma);
+err_out_iounmap_tx:
+ iounmap(bp->tx_dma);
+err_out_iounmap:
+ iounmap((void __iomem *)dev->base_addr);
+out_release:
+ macio_release_resources(mdev);
+out_free:
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+ free_netdev(dev);
+
+ return -ENODEV;
+}
+
+static int bmac_open(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ /* XXDEBUG(("bmac: enter open\n")); */
+ /* reset the chip */
+ bp->opened = 1;
+ bmac_reset_and_enable(dev);
+ enable_irq(dev->irq);
+ dev->flags |= IFF_RUNNING;
+ return 0;
+}
+
+static int bmac_close(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ unsigned short config;
+ int i;
+
+ bp->sleeping = 1;
+ dev->flags &= ~(IFF_UP | IFF_RUNNING);
+
+ /* disable rx and tx */
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+
+ bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
+
+ /* disable rx and tx dma */
+ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+
+ /* free some skb's */
+ XXDEBUG(("bmac: free rx bufs\n"));
+ for (i=0; i<N_RX_RING; i++) {
+ if (bp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->rx_bufs[i]);
+ bp->rx_bufs[i] = NULL;
+ }
+ }
+ XXDEBUG(("bmac: free tx bufs\n"));
+ for (i = 0; i<N_TX_RING; i++) {
+ if (bp->tx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ }
+ }
+ XXDEBUG(("bmac: all bufs freed\n"));
+
+ bp->opened = 0;
+ disable_irq(dev->irq);
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+
+ return 0;
+}
+
+static void
+bmac_start(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ int i;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (bp->sleeping)
+ return;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ while (1) {
+ i = bp->tx_fill + 1;
+ if (i >= N_TX_RING)
+ i = 0;
+ if (i == bp->tx_empty)
+ break;
+ skb = skb_dequeue(bp->queue);
+ if (skb == NULL)
+ break;
+ bmac_transmit_packet(skb, dev);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static int
+bmac_output(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ skb_queue_tail(bp->queue, skb);
+ bmac_start(dev);
+ return 0;
+}
+
+static void bmac_tx_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ unsigned long flags;
+ unsigned short config, oldConfig;
+ int i;
+
+ XXDEBUG(("bmac: tx_timeout called\n"));
+ spin_lock_irqsave(&bp->lock, flags);
+ bp->timeout_active = 0;
+
+ /* update various counters */
+/* bmac_handle_misc_intrs(bp, 0); */
+
+ cp = &bp->tx_cmds[bp->tx_empty];
+/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
+/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
+/* mb->pr, mb->xmtfs, mb->fifofc)); */
+
+ /* turn off both tx and rx and reset the chip */
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+ out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
+ printk(KERN_ERR "bmac: transmit timeout - resetting\n");
+ bmac_enable_and_reset_chip(dev);
+
+ /* restart rx dma */
+ cp = bus_to_virt(ld_le32(&rd->cmdptr));
+ out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
+ out_le16(&cp->xfer_status, 0);
+ out_le32(&rd->cmdptr, virt_to_bus(cp));
+ out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
+
+ /* fix up the transmit side */
+ XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
+ bp->tx_empty, bp->tx_fill, bp->tx_fullup));
+ i = bp->tx_empty;
+ ++bp->stats.tx_errors;
+ if (i != bp->tx_fill) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ if (++i >= N_TX_RING) i = 0;
+ bp->tx_empty = i;
+ }
+ bp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (i != bp->tx_fill) {
+ cp = &bp->tx_cmds[i];
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ out_le32(&td->cmdptr, virt_to_bus(cp));
+ out_le32(&td->control, DBDMA_SET(RUN));
+ /* bmac_set_timeout(dev); */
+ XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
+ }
+
+ /* turn it back on */
+ oldConfig = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
+ oldConfig = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+#if 0
+static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
+{
+ int i,*ip;
+
+ for (i=0;i< count;i++) {
+ ip = (int*)(cp+i);
+
+ printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
+ ld_le32(ip+0),
+ ld_le32(ip+1),
+ ld_le32(ip+2),
+ ld_le32(ip+3));
+ }
+
+}
+#endif
+
+#if 0
+static int
+bmac_proc_info(char *buffer, char **start, off_t offset, int length)
+{
+ int len = 0;
+ off_t pos = 0;
+ off_t begin = 0;
+ int i;
+
+ if (bmac_devs == NULL)
+ return (-ENOSYS);
+
+ len += sprintf(buffer, "BMAC counters & registers\n");
+
+ for (i = 0; i<N_REG_ENTRIES; i++) {
+ len += sprintf(buffer + len, "%s: %#08x\n",
+ reg_entries[i].name,
+ bmread(bmac_devs, reg_entries[i].reg_offset));
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ if (pos > offset+length) break;
+ }
+
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+
+ if (len > length) len = length;
+
+ return len;
+}
+#endif
+
+static int __devexit bmac_remove(struct macio_dev *mdev)
+{
+ struct net_device *dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ free_irq(dev->irq, dev);
+ free_irq(bp->tx_dma_intr, dev);
+ free_irq(bp->rx_dma_intr, dev);
+
+ iounmap((void __iomem *)dev->base_addr);
+ iounmap(bp->tx_dma);
+ iounmap(bp->rx_dma);
+
+ macio_release_resources(mdev);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct of_match bmac_match[] =
+{
+ {
+ .name = "bmac",
+ .type = OF_ANY_MATCH,
+ .compatible = OF_ANY_MATCH,
+ .data = (void *)0,
+ },
+ {
+ .name = OF_ANY_MATCH,
+ .type = "network",
+ .compatible = "bmac+",
+ .data = (void *)1,
+ },
+ {},
+};
+
+static struct macio_driver bmac_driver =
+{
+ .name = "bmac",
+ .match_table = bmac_match,
+ .probe = bmac_probe,
+ .remove = bmac_remove,
+#ifdef CONFIG_PM
+ .suspend = bmac_suspend,
+ .resume = bmac_resume,
+#endif
+};
+
+
+static int __init bmac_init(void)
+{
+ if (bmac_emergency_rxbuf == NULL) {
+ bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
+ if (bmac_emergency_rxbuf == NULL) {
+ printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return macio_register_driver(&bmac_driver);
+}
+
+static void __exit bmac_exit(void)
+{
+ macio_unregister_driver(&bmac_driver);
+
+ if (bmac_emergency_rxbuf != NULL) {
+ kfree(bmac_emergency_rxbuf);
+ bmac_emergency_rxbuf = NULL;
+ }
+}
+
+MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
+MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
+MODULE_LICENSE("GPL");
+
+module_init(bmac_init);
+module_exit(bmac_exit);
diff --git a/drivers/net/bmac.h b/drivers/net/bmac.h
new file mode 100644
index 000000000000..df3b93d1ac24
--- /dev/null
+++ b/drivers/net/bmac.h
@@ -0,0 +1,164 @@
+/*
+ * mace.h - definitions for the registers in the "Big Mac"
+ * Ethernet controller found in PowerMac G3 models.
+ *
+ * Copyright (C) 1998 Randy Gobbel.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* The "Big MAC" appears to have some parts in common with the Sun "Happy Meal"
+ * (HME) controller. See sunhme.h
+ */
+
+
+/* register offsets */
+
+/* global status and control */
+#define XIFC 0x000 /* low-level interface control */
+# define TxOutputEnable 0x0001 /* output driver enable */
+# define XIFLoopback 0x0002 /* Loopback-mode XIF enable */
+# define MIILoopback 0x0004 /* Loopback-mode MII enable */
+# define MIILoopbackBits 0x0006
+# define MIIBuffDisable 0x0008 /* MII receive buffer disable */
+# define SQETestEnable 0x0010 /* SQE test enable */
+# define SQETimeWindow 0x03e0 /* SQE time window */
+# define XIFLanceMode 0x0010 /* Lance mode enable */
+# define XIFLanceIPG0 0x03e0 /* Lance mode IPG0 */
+#define TXFIFOCSR 0x100 /* transmit FIFO control */
+# define TxFIFOEnable 0x0001
+#define TXTH 0x110 /* transmit threshold */
+# define TxThreshold 0x0004
+#define RXFIFOCSR 0x120 /* receive FIFO control */
+# define RxFIFOEnable 0x0001
+#define MEMADD 0x130 /* memory address, unknown function */
+#define MEMDATAHI 0x140 /* memory data high, presently unused in driver */
+#define MEMDATALO 0x150 /* memory data low, presently unused in driver */
+#define XCVRIF 0x160 /* transceiver interface control */
+# define COLActiveLow 0x0002
+# define SerialMode 0x0004
+# define ClkBit 0x0008
+# define LinkStatus 0x0100
+#define CHIPID 0x170 /* chip ID */
+#define MIFCSR 0x180 /* ??? */
+#define SROMCSR 0x190 /* SROM control */
+# define ChipSelect 0x0001
+# define Clk 0x0002
+#define TXPNTR 0x1a0 /* transmit pointer */
+#define RXPNTR 0x1b0 /* receive pointer */
+#define STATUS 0x200 /* status--reading this clears it */
+#define INTDISABLE 0x210 /* interrupt enable/disable control */
+/* bits below are the same in both STATUS and INTDISABLE registers */
+# define FrameReceived 0x00000001 /* Received a frame */
+# define RxFrameCntExp 0x00000002 /* Receive frame counter expired */
+# define RxAlignCntExp 0x00000004 /* Align-error counter expired */
+# define RxCRCCntExp 0x00000008 /* CRC-error counter expired */
+# define RxLenCntExp 0x00000010 /* Length-error counter expired */
+# define RxOverFlow 0x00000020 /* Receive FIFO overflow */
+# define RxCodeViolation 0x00000040 /* Code-violation counter expired */
+# define SQETestError 0x00000080 /* Test error in XIF for SQE */
+# define FrameSent 0x00000100 /* Transmitted a frame */
+# define TxUnderrun 0x00000200 /* Transmit FIFO underrun */
+# define TxMaxSizeError 0x00000400 /* Max-packet size error */
+# define TxNormalCollExp 0x00000800 /* Normal-collision counter expired */
+# define TxExcessCollExp 0x00001000 /* Excess-collision counter expired */
+# define TxLateCollExp 0x00002000 /* Late-collision counter expired */
+# define TxNetworkCollExp 0x00004000 /* First-collision counter expired */
+# define TxDeferTimerExp 0x00008000 /* Defer-timer expired */
+# define RxFIFOToHost 0x00010000 /* Data moved from FIFO to host */
+# define RxNoDescriptors 0x00020000 /* No more receive descriptors */
+# define RxDMAError 0x00040000 /* Error during receive DMA */
+# define RxDMALateErr 0x00080000 /* Receive DMA, data late */
+# define RxParityErr 0x00100000 /* Parity error during receive DMA */
+# define RxTagError 0x00200000 /* Tag error during receive DMA */
+# define TxEOPError 0x00400000 /* Tx descriptor did not have EOP set */
+# define MIFIntrEvent 0x00800000 /* MIF is signaling an interrupt */
+# define TxHostToFIFO 0x01000000 /* Data moved from host to FIFO */
+# define TxFIFOAllSent 0x02000000 /* Transmitted all packets in FIFO */
+# define TxDMAError 0x04000000 /* Error during transmit DMA */
+# define TxDMALateError 0x08000000 /* Late error during transmit DMA */
+# define TxParityError 0x10000000 /* Parity error during transmit DMA */
+# define TxTagError 0x20000000 /* Tag error during transmit DMA */
+# define PIOError 0x40000000 /* PIO access got an error */
+# define PIOParityError 0x80000000 /* PIO access got a parity error */
+# define DisableAll 0xffffffff
+# define EnableAll 0x00000000
+/* # define NormalIntEvents ~(FrameReceived | FrameSent | TxUnderrun) */
+# define EnableNormal ~(FrameReceived | FrameSent)
+# define EnableErrors (FrameReceived | FrameSent)
+# define RxErrorMask (RxFrameCntExp | RxAlignCntExp | RxCRCCntExp | \
+ RxLenCntExp | RxOverFlow | RxCodeViolation)
+# define TxErrorMask (TxUnderrun | TxMaxSizeError | TxExcessCollExp | \
+ TxLateCollExp | TxNetworkCollExp | TxDeferTimerExp)
+
+/* transmit control */
+#define TXRST 0x420 /* transmit reset */
+# define TxResetBit 0x0001
+#define TXCFG 0x430 /* transmit configuration control*/
+# define TxMACEnable 0x0001 /* output driver enable */
+# define TxSlowMode 0x0020 /* enable slow mode */
+# define TxIgnoreColl 0x0040 /* ignore transmit collisions */
+# define TxNoFCS 0x0080 /* do not emit FCS */
+# define TxNoBackoff 0x0100 /* no backoff in case of collisions */
+# define TxFullDuplex 0x0200 /* enable full-duplex */
+# define TxNeverGiveUp 0x0400 /* don't give up on transmits */
+#define IPG1 0x440 /* Inter-packet gap 1 */
+#define IPG2 0x450 /* Inter-packet gap 2 */
+#define ALIMIT 0x460 /* Transmit attempt limit */
+#define SLOT 0x470 /* Transmit slot time */
+#define PALEN 0x480 /* Size of transmit preamble */
+#define PAPAT 0x490 /* Pattern for transmit preamble */
+#define TXSFD 0x4a0 /* Transmit frame delimiter */
+#define JAM 0x4b0 /* Jam size */
+#define TXMAX 0x4c0 /* Transmit max pkt size */
+#define TXMIN 0x4d0 /* Transmit min pkt size */
+#define PAREG 0x4e0 /* Count of transmit peak attempts */
+#define DCNT 0x4f0 /* Transmit defer timer */
+#define NCCNT 0x500 /* Transmit normal-collision counter */
+#define NTCNT 0x510 /* Transmit first-collision counter */
+#define EXCNT 0x520 /* Transmit excess-collision counter */
+#define LTCNT 0x530 /* Transmit late-collision counter */
+#define RSEED 0x540 /* Transmit random number seed */
+#define TXSM 0x550 /* Transmit state machine */
+
+/* receive control */
+#define RXRST 0x620 /* receive reset */
+# define RxResetValue 0x0000
+#define RXCFG 0x630 /* receive configuration control */
+# define RxMACEnable 0x0001 /* receiver overall enable */
+# define RxCFGReserved 0x0004
+# define RxPadStripEnab 0x0020 /* enable pad byte stripping */
+# define RxPromiscEnable 0x0040 /* turn on promiscuous mode */
+# define RxNoErrCheck 0x0080 /* disable receive error checking */
+# define RxCRCNoStrip 0x0100 /* disable auto-CRC-stripping */
+# define RxRejectOwnPackets 0x0200 /* don't receive our own packets */
+# define RxGrpPromisck 0x0400 /* enable group promiscuous mode */
+# define RxHashFilterEnable 0x0800 /* enable hash filter */
+# define RxAddrFilterEnable 0x1000 /* enable address filter */
+#define RXMAX 0x640 /* Max receive packet size */
+#define RXMIN 0x650 /* Min receive packet size */
+#define MADD2 0x660 /* our enet address, high part */
+#define MADD1 0x670 /* our enet address, middle part */
+#define MADD0 0x680 /* our enet address, low part */
+#define FRCNT 0x690 /* receive frame counter */
+#define LECNT 0x6a0 /* Receive excess length error counter */
+#define AECNT 0x6b0 /* Receive misaligned error counter */
+#define FECNT 0x6c0 /* Receive CRC error counter */
+#define RXSM 0x6d0 /* Receive state machine */
+#define RXCV 0x6e0 /* Receive code violation */
+
+#define BHASH3 0x700 /* multicast hash register */
+#define BHASH2 0x710 /* multicast hash register */
+#define BHASH1 0x720 /* multicast hash register */
+#define BHASH0 0x730 /* multicast hash register */
+
+#define AFR2 0x740 /* address filtering setup? */
+#define AFR1 0x750 /* address filtering setup? */
+#define AFR0 0x760 /* address filtering setup? */
+#define AFCR 0x770 /* address filter compare register? */
+# define EnableAllCompares 0x0fff
+
+/* bits in XIFC */
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
new file mode 100644
index 000000000000..cf50384b469e
--- /dev/null
+++ b/drivers/net/bonding/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Ethernet Bonding driver
+#
+
+obj-$(CONFIG_BONDING) += bonding.o
+
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o
+
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
new file mode 100644
index 000000000000..6233c4ffb805
--- /dev/null
+++ b/drivers/net/bonding/bond_3ad.c
@@ -0,0 +1,2451 @@
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ *
+ * Changes:
+ *
+ * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Amir Noam <amir.noam at intel dot com>
+ * - Added support for lacp_rate module param.
+ *
+ * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Based on discussion on mailing list, changed locking scheme
+ * to use lock/unlock or lock_bh/unlock_bh appropriately instead
+ * of lock_irqsave/unlock_irqrestore. The new scheme helps exposing
+ * hidden bugs and solves system hangs that occurred due to the fact
+ * that holding lock_irqsave doesn't prevent softirqs from running.
+ * This also increases total throughput since interrupts are not
+ * blocked on each transmitted packets or monitor timeout.
+ *
+ * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Renamed bond_3ad_link_status_changed() to
+ * bond_3ad_handle_link_change() for compatibility with TLB.
+ *
+ * 2003/05/20 - Amir Noam <amir.noam at intel dot com>
+ * - Fix long fail over time when releasing last slave of an active
+ * aggregator - send LACPDU on unbind of slave to tell partner this
+ * port is no longer aggregatable.
+ *
+ * 2003/06/25 - Tsippy Mendelson <tsippy.mendelson at intel dot com>
+ * - Send LACPDU as highest priority packet to further fix the above
+ * problem on very high Tx traffic load where packets may get dropped
+ * by the slave.
+ *
+ * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Code cleanup and style changes
+ */
+
+//#define BONDING_DEBUG 1
+
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/if_bonding.h>
+#include <linux/pkt_sched.h>
+#include "bonding.h"
+#include "bond_3ad.h"
+
+// General definitions
+#define AD_SHORT_TIMEOUT 1
+#define AD_LONG_TIMEOUT 0
+#define AD_STANDBY 0x2
+#define AD_MAX_TX_IN_SECOND 3
+#define AD_COLLECTOR_MAX_DELAY 0
+
+// Timer definitions(43.4.4 in the 802.3ad standard)
+#define AD_FAST_PERIODIC_TIME 1
+#define AD_SLOW_PERIODIC_TIME 30
+#define AD_SHORT_TIMEOUT_TIME (3*AD_FAST_PERIODIC_TIME)
+#define AD_LONG_TIMEOUT_TIME (3*AD_SLOW_PERIODIC_TIME)
+#define AD_CHURN_DETECTION_TIME 60
+#define AD_AGGREGATE_WAIT_TIME 2
+
+// Port state definitions(43.4.2.2 in the 802.3ad standard)
+#define AD_STATE_LACP_ACTIVITY 0x1
+#define AD_STATE_LACP_TIMEOUT 0x2
+#define AD_STATE_AGGREGATION 0x4
+#define AD_STATE_SYNCHRONIZATION 0x8
+#define AD_STATE_COLLECTING 0x10
+#define AD_STATE_DISTRIBUTING 0x20
+#define AD_STATE_DEFAULTED 0x40
+#define AD_STATE_EXPIRED 0x80
+
+// Port Variables definitions used by the State Machines(43.4.7 in the 802.3ad standard)
+#define AD_PORT_BEGIN 0x1
+#define AD_PORT_LACP_ENABLED 0x2
+#define AD_PORT_ACTOR_CHURN 0x4
+#define AD_PORT_PARTNER_CHURN 0x8
+#define AD_PORT_READY 0x10
+#define AD_PORT_READY_N 0x20
+#define AD_PORT_MATCHED 0x40
+#define AD_PORT_STANDBY 0x80
+#define AD_PORT_SELECTED 0x100
+#define AD_PORT_MOVED 0x200
+
+// Port Key definitions
+// key is determined according to the link speed, duplex and
+// user key(which is yet not supported)
+// ------------------------------------------------------------
+// Port key : | User key | Speed |Duplex|
+// ------------------------------------------------------------
+// 16 6 1 0
+#define AD_DUPLEX_KEY_BITS 0x1
+#define AD_SPEED_KEY_BITS 0x3E
+#define AD_USER_KEY_BITS 0xFFC0
+
+//dalloun
+#define AD_LINK_SPEED_BITMASK_1MBPS 0x1
+#define AD_LINK_SPEED_BITMASK_10MBPS 0x2
+#define AD_LINK_SPEED_BITMASK_100MBPS 0x4
+#define AD_LINK_SPEED_BITMASK_1000MBPS 0x8
+//endalloun
+
+// compare MAC addresses
+#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
+
+static struct mac_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
+static u16 ad_ticks_per_sec;
+static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
+
+// ================= 3AD api to bonding and kernel code ==================
+static u16 __get_link_speed(struct port *port);
+static u8 __get_duplex(struct port *port);
+static inline void __initialize_port_locks(struct port *port);
+//conversions
+static void __ntohs_lacpdu(struct lacpdu *lacpdu);
+static u16 __ad_timer_to_ticks(u16 timer_type, u16 Par);
+
+
+// ================= ad code helper functions ==================
+//needed by ad_rx_machine(...)
+static void __record_pdu(struct lacpdu *lacpdu, struct port *port);
+static void __record_default(struct port *port);
+static void __update_selected(struct lacpdu *lacpdu, struct port *port);
+static void __update_default_selected(struct port *port);
+static void __choose_matched(struct lacpdu *lacpdu, struct port *port);
+static void __update_ntt(struct lacpdu *lacpdu, struct port *port);
+
+//needed for ad_mux_machine(..)
+static void __attach_bond_to_agg(struct port *port);
+static void __detach_bond_from_agg(struct port *port);
+static int __agg_ports_are_ready(struct aggregator *aggregator);
+static void __set_agg_ports_ready(struct aggregator *aggregator, int val);
+
+//needed for ad_agg_selection_logic(...)
+static u32 __get_agg_bandwidth(struct aggregator *aggregator);
+static struct aggregator *__get_active_agg(struct aggregator *aggregator);
+
+
+// ================= main 802.3ad protocol functions ==================
+static int ad_lacpdu_send(struct port *port);
+static int ad_marker_send(struct port *port, struct marker *marker);
+static void ad_mux_machine(struct port *port);
+static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
+static void ad_tx_machine(struct port *port);
+static void ad_periodic_machine(struct port *port);
+static void ad_port_selection_logic(struct port *port);
+static void ad_agg_selection_logic(struct aggregator *aggregator);
+static void ad_clear_agg(struct aggregator *aggregator);
+static void ad_initialize_agg(struct aggregator *aggregator);
+static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_initialize_lacpdu(struct lacpdu *Lacpdu);
+static void ad_enable_collecting_distributing(struct port *port);
+static void ad_disable_collecting_distributing(struct port *port);
+static void ad_marker_info_received(struct marker *marker_info, struct port *port);
+static void ad_marker_response_received(struct marker *marker, struct port *port);
+
+
+/////////////////////////////////////////////////////////////////////////////////
+// ================= api to bonding and kernel code ==================
+/////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * __get_bond_by_port - get the port's bonding struct
+ * @port: the port we're looking at
+ *
+ * Return @port's bonding struct, or %NULL if it can't be found.
+ */
+static inline struct bonding *__get_bond_by_port(struct port *port)
+{
+ if (port->slave == NULL) {
+ return NULL;
+ }
+
+ return bond_get_bond_by_slave(port->slave);
+}
+
+/**
+ * __get_first_port - get the first port in the bond
+ * @bond: the bond we're looking at
+ *
+ * Return the port of the first slave in @bond, or %NULL if it can't be found.
+ */
+static inline struct port *__get_first_port(struct bonding *bond)
+{
+ if (bond->slave_cnt == 0) {
+ return NULL;
+ }
+
+ return &(SLAVE_AD_INFO(bond->first_slave).port);
+}
+
+/**
+ * __get_next_port - get the next port in the bond
+ * @port: the port we're looking at
+ *
+ * Return the port of the slave that is next in line of @port's slave in the
+ * bond, or %NULL if it can't be found.
+ */
+static inline struct port *__get_next_port(struct port *port)
+{
+ struct bonding *bond = __get_bond_by_port(port);
+ struct slave *slave = port->slave;
+
+ // If there's no bond for this port, or this is the last slave
+ if ((bond == NULL) || (slave->next == bond->first_slave)) {
+ return NULL;
+ }
+
+ return &(SLAVE_AD_INFO(slave->next).port);
+}
+
+/**
+ * __get_first_agg - get the first aggregator in the bond
+ * @bond: the bond we're looking at
+ *
+ * Return the aggregator of the first slave in @bond, or %NULL if it can't be
+ * found.
+ */
+static inline struct aggregator *__get_first_agg(struct port *port)
+{
+ struct bonding *bond = __get_bond_by_port(port);
+
+ // If there's no bond for this port, or bond has no slaves
+ if ((bond == NULL) || (bond->slave_cnt == 0)) {
+ return NULL;
+ }
+
+ return &(SLAVE_AD_INFO(bond->first_slave).aggregator);
+}
+
+/**
+ * __get_next_agg - get the next aggregator in the bond
+ * @aggregator: the aggregator we're looking at
+ *
+ * Return the aggregator of the slave that is next in line of @aggregator's
+ * slave in the bond, or %NULL if it can't be found.
+ */
+static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
+{
+ struct slave *slave = aggregator->slave;
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+
+ // If there's no bond for this aggregator, or this is the last slave
+ if ((bond == NULL) || (slave->next == bond->first_slave)) {
+ return NULL;
+ }
+
+ return &(SLAVE_AD_INFO(slave->next).aggregator);
+}
+
+/**
+ * __disable_port - disable the port's slave
+ * @port: the port we're looking at
+ *
+ */
+static inline void __disable_port(struct port *port)
+{
+ bond_set_slave_inactive_flags(port->slave);
+}
+
+/**
+ * __enable_port - enable the port's slave, if it's up
+ * @port: the port we're looking at
+ *
+ */
+static inline void __enable_port(struct port *port)
+{
+ struct slave *slave = port->slave;
+
+ if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) {
+ bond_set_slave_active_flags(slave);
+ }
+}
+
+/**
+ * __port_is_enabled - check if the port's slave is in active state
+ * @port: the port we're looking at
+ *
+ */
+static inline int __port_is_enabled(struct port *port)
+{
+ return(port->slave->state == BOND_STATE_ACTIVE);
+}
+
+/**
+ * __get_agg_selection_mode - get the aggregator selection mode
+ * @port: the port we're looking at
+ *
+ * Get the aggregator selection mode. Can be %BANDWIDTH or %COUNT.
+ */
+static inline u32 __get_agg_selection_mode(struct port *port)
+{
+ struct bonding *bond = __get_bond_by_port(port);
+
+ if (bond == NULL) {
+ return AD_BANDWIDTH;
+ }
+
+ return BOND_AD_INFO(bond).agg_select_mode;
+}
+
+/**
+ * __check_agg_selection_timer - check if the selection timer has expired
+ * @port: the port we're looking at
+ *
+ */
+static inline int __check_agg_selection_timer(struct port *port)
+{
+ struct bonding *bond = __get_bond_by_port(port);
+
+ if (bond == NULL) {
+ return 0;
+ }
+
+ return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+}
+
+/**
+ * __get_rx_machine_lock - lock the port's RX machine
+ * @port: the port we're looking at
+ *
+ */
+static inline void __get_rx_machine_lock(struct port *port)
+{
+ spin_lock(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+}
+
+/**
+ * __release_rx_machine_lock - unlock the port's RX machine
+ * @port: the port we're looking at
+ *
+ */
+static inline void __release_rx_machine_lock(struct port *port)
+{
+ spin_unlock(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+}
+
+/**
+ * __get_link_speed - get a port's speed
+ * @port: the port we're looking at
+ *
+ * Return @port's speed in 802.3ad bitmask format. i.e. one of:
+ * 0,
+ * %AD_LINK_SPEED_BITMASK_10MBPS,
+ * %AD_LINK_SPEED_BITMASK_100MBPS,
+ * %AD_LINK_SPEED_BITMASK_1000MBPS
+ */
+static u16 __get_link_speed(struct port *port)
+{
+ struct slave *slave = port->slave;
+ u16 speed;
+
+ /* this if covers only a special case: when the configuration starts with
+ * link down, it sets the speed to 0.
+ * This is done in spite of the fact that the e100 driver reports 0 to be
+ * compatible with MVT in the future.*/
+ if (slave->link != BOND_LINK_UP) {
+ speed=0;
+ } else {
+ switch (slave->speed) {
+ case SPEED_10:
+ speed = AD_LINK_SPEED_BITMASK_10MBPS;
+ break;
+
+ case SPEED_100:
+ speed = AD_LINK_SPEED_BITMASK_100MBPS;
+ break;
+
+ case SPEED_1000:
+ speed = AD_LINK_SPEED_BITMASK_1000MBPS;
+ break;
+
+ default:
+ speed = 0; // unknown speed value from ethtool. shouldn't happen
+ break;
+ }
+ }
+
+ dprintk("Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed);
+ return speed;
+}
+
+/**
+ * __get_duplex - get a port's duplex
+ * @port: the port we're looking at
+ *
+ * Return @port's duplex in 802.3ad bitmask format. i.e.:
+ * 0x01 if in full duplex
+ * 0x00 otherwise
+ */
+static u8 __get_duplex(struct port *port)
+{
+ struct slave *slave = port->slave;
+
+ u8 retval;
+
+ // handling a special case: when the configuration starts with
+ // link down, it sets the duplex to 0.
+ if (slave->link != BOND_LINK_UP) {
+ retval=0x0;
+ } else {
+ switch (slave->duplex) {
+ case DUPLEX_FULL:
+ retval=0x1;
+ dprintk("Port %d Received status full duplex update from adapter\n", port->actor_port_number);
+ break;
+ case DUPLEX_HALF:
+ default:
+ retval=0x0;
+ dprintk("Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number);
+ break;
+ }
+ }
+ return retval;
+}
+
+/**
+ * __initialize_port_locks - initialize a port's RX machine spinlock
+ * @port: the port we're looking at
+ *
+ */
+static inline void __initialize_port_locks(struct port *port)
+{
+ // make sure it isn't called twice
+ spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+}
+
+//conversions
+/**
+ * __ntohs_lacpdu - convert the contents of a LACPDU to host byte order
+ * @lacpdu: the speicifed lacpdu
+ *
+ * For each multi-byte field in the lacpdu, convert its content
+ */
+static void __ntohs_lacpdu(struct lacpdu *lacpdu)
+{
+ if (lacpdu) {
+ lacpdu->actor_system_priority = ntohs(lacpdu->actor_system_priority);
+ lacpdu->actor_key = ntohs(lacpdu->actor_key);
+ lacpdu->actor_port_priority = ntohs(lacpdu->actor_port_priority);
+ lacpdu->actor_port = ntohs(lacpdu->actor_port);
+ lacpdu->partner_system_priority = ntohs(lacpdu->partner_system_priority);
+ lacpdu->partner_key = ntohs(lacpdu->partner_key);
+ lacpdu->partner_port_priority = ntohs(lacpdu->partner_port_priority);
+ lacpdu->partner_port = ntohs(lacpdu->partner_port);
+ lacpdu->collector_max_delay = ntohs(lacpdu->collector_max_delay);
+ }
+}
+
+/**
+ * __ad_timer_to_ticks - convert a given timer type to AD module ticks
+ * @timer_type: which timer to operate
+ * @par: timer parameter. see below
+ *
+ * If @timer_type is %current_while_timer, @par indicates long/short timer.
+ * If @timer_type is %periodic_timer, @par is one of %FAST_PERIODIC_TIME,
+ * %SLOW_PERIODIC_TIME.
+ */
+static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
+{
+ u16 retval=0; //to silence the compiler
+
+ switch (timer_type) {
+ case AD_CURRENT_WHILE_TIMER: // for rx machine usage
+ if (par) { // for short or long timeout
+ retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout
+ } else {
+ retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout
+ }
+ break;
+ case AD_ACTOR_CHURN_TIMER: // for local churn machine
+ retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
+ break;
+ case AD_PERIODIC_TIMER: // for periodic machine
+ retval = (par*ad_ticks_per_sec); // long timeout
+ break;
+ case AD_PARTNER_CHURN_TIMER: // for remote churn machine
+ retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
+ break;
+ case AD_WAIT_WHILE_TIMER: // for selection machine
+ retval = (AD_AGGREGATE_WAIT_TIME*ad_ticks_per_sec);
+ break;
+ }
+ return retval;
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+// ================= ad_rx_machine helper functions ==================
+/////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * __record_pdu - record parameters from a received lacpdu
+ * @lacpdu: the lacpdu we've received
+ * @port: the port we're looking at
+ *
+ * Record the parameter values for the Actor carried in a received lacpdu as
+ * the current partner operational parameter values and sets
+ * actor_oper_port_state.defaulted to FALSE.
+ */
+static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
+{
+ // validate lacpdu and port
+ if (lacpdu && port) {
+ // record the new parameter values for the partner operational
+ port->partner_oper_port_number = lacpdu->actor_port;
+ port->partner_oper_port_priority = lacpdu->actor_port_priority;
+ port->partner_oper_system = lacpdu->actor_system;
+ port->partner_oper_system_priority = lacpdu->actor_system_priority;
+ port->partner_oper_key = lacpdu->actor_key;
+ // zero partener's lase states
+ port->partner_oper_port_state = 0;
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_LACP_ACTIVITY);
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_LACP_TIMEOUT);
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_AGGREGATION);
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION);
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_COLLECTING);
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_DISTRIBUTING);
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_DEFAULTED);
+ port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_EXPIRED);
+
+ // set actor_oper_port_state.defaulted to FALSE
+ port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
+
+ // set the partner sync. to on if the partner is sync. and the port is matched
+ if ((port->sm_vars & AD_PORT_MATCHED) && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
+ port->partner_oper_port_state |= AD_STATE_SYNCHRONIZATION;
+ } else {
+ port->partner_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ }
+ }
+}
+
+/**
+ * __record_default - record default parameters
+ * @port: the port we're looking at
+ *
+ * This function records the default parameter values for the partner carried
+ * in the Partner Admin parameters as the current partner operational parameter
+ * values and sets actor_oper_port_state.defaulted to TRUE.
+ */
+static void __record_default(struct port *port)
+{
+ // validate the port
+ if (port) {
+ // record the partner admin parameters
+ port->partner_oper_port_number = port->partner_admin_port_number;
+ port->partner_oper_port_priority = port->partner_admin_port_priority;
+ port->partner_oper_system = port->partner_admin_system;
+ port->partner_oper_system_priority = port->partner_admin_system_priority;
+ port->partner_oper_key = port->partner_admin_key;
+ port->partner_oper_port_state = port->partner_admin_port_state;
+
+ // set actor_oper_port_state.defaulted to true
+ port->actor_oper_port_state |= AD_STATE_DEFAULTED;
+ }
+}
+
+/**
+ * __update_selected - update a port's Selected variable from a received lacpdu
+ * @lacpdu: the lacpdu we've received
+ * @port: the port we're looking at
+ *
+ * Update the value of the selected variable, using parameter values from a
+ * newly received lacpdu. The parameter values for the Actor carried in the
+ * received PDU are compared with the corresponding operational parameter
+ * values for the ports partner. If one or more of the comparisons shows that
+ * the value(s) received in the PDU differ from the current operational values,
+ * then selected is set to FALSE and actor_oper_port_state.synchronization is
+ * set to out_of_sync. Otherwise, selected remains unchanged.
+ */
+static void __update_selected(struct lacpdu *lacpdu, struct port *port)
+{
+ // validate lacpdu and port
+ if (lacpdu && port) {
+ // check if any parameter is different
+ if ((lacpdu->actor_port != port->partner_oper_port_number) ||
+ (lacpdu->actor_port_priority != port->partner_oper_port_priority) ||
+ MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->partner_oper_system)) ||
+ (lacpdu->actor_system_priority != port->partner_oper_system_priority) ||
+ (lacpdu->actor_key != port->partner_oper_key) ||
+ ((lacpdu->actor_state & AD_STATE_AGGREGATION) != (port->partner_oper_port_state & AD_STATE_AGGREGATION))
+ ) {
+ // update the state machine Selected variable
+ port->sm_vars &= ~AD_PORT_SELECTED;
+ }
+ }
+}
+
+/**
+ * __update_default_selected - update a port's Selected variable from Partner
+ * @port: the port we're looking at
+ *
+ * This function updates the value of the selected variable, using the partner
+ * administrative parameter values. The administrative values are compared with
+ * the corresponding operational parameter values for the partner. If one or
+ * more of the comparisons shows that the administrative value(s) differ from
+ * the current operational values, then Selected is set to FALSE and
+ * actor_oper_port_state.synchronization is set to OUT_OF_SYNC. Otherwise,
+ * Selected remains unchanged.
+ */
+static void __update_default_selected(struct port *port)
+{
+ // validate the port
+ if (port) {
+ // check if any parameter is different
+ if ((port->partner_admin_port_number != port->partner_oper_port_number) ||
+ (port->partner_admin_port_priority != port->partner_oper_port_priority) ||
+ MAC_ADDRESS_COMPARE(&(port->partner_admin_system), &(port->partner_oper_system)) ||
+ (port->partner_admin_system_priority != port->partner_oper_system_priority) ||
+ (port->partner_admin_key != port->partner_oper_key) ||
+ ((port->partner_admin_port_state & AD_STATE_AGGREGATION) != (port->partner_oper_port_state & AD_STATE_AGGREGATION))
+ ) {
+ // update the state machine Selected variable
+ port->sm_vars &= ~AD_PORT_SELECTED;
+ }
+ }
+}
+
+/**
+ * __choose_matched - update a port's matched variable from a received lacpdu
+ * @lacpdu: the lacpdu we've received
+ * @port: the port we're looking at
+ *
+ * Update the value of the matched variable, using parameter values from a
+ * newly received lacpdu. Parameter values for the partner carried in the
+ * received PDU are compared with the corresponding operational parameter
+ * values for the actor. Matched is set to TRUE if all of these parameters
+ * match and the PDU parameter partner_state.aggregation has the same value as
+ * actor_oper_port_state.aggregation and lacp will actively maintain the link
+ * in the aggregation. Matched is also set to TRUE if the value of
+ * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
+ * an individual link and lacp will actively maintain the link. Otherwise,
+ * matched is set to FALSE. LACP is considered to be actively maintaining the
+ * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
+ * the actor's actor_oper_port_state.lacp_activity and the PDU's
+ * partner_state.lacp_activity variables are TRUE.
+ */
+static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
+{
+ // validate lacpdu and port
+ if (lacpdu && port) {
+ // check if all parameters are alike
+ if (((lacpdu->partner_port == port->actor_port_number) &&
+ (lacpdu->partner_port_priority == port->actor_port_priority) &&
+ !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
+ (lacpdu->partner_system_priority == port->actor_system_priority) &&
+ (lacpdu->partner_key == port->actor_oper_port_key) &&
+ ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
+ // or this is individual link(aggregation == FALSE)
+ ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
+ ) {
+ // update the state machine Matched variable
+ port->sm_vars |= AD_PORT_MATCHED;
+ } else {
+ port->sm_vars &= ~AD_PORT_MATCHED;
+ }
+ }
+}
+
+/**
+ * __update_ntt - update a port's ntt variable from a received lacpdu
+ * @lacpdu: the lacpdu we've received
+ * @port: the port we're looking at
+ *
+ * Updates the value of the ntt variable, using parameter values from a newly
+ * received lacpdu. The parameter values for the partner carried in the
+ * received PDU are compared with the corresponding operational parameter
+ * values for the Actor. If one or more of the comparisons shows that the
+ * value(s) received in the PDU differ from the current operational values,
+ * then ntt is set to TRUE. Otherwise, ntt remains unchanged.
+ */
+static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
+{
+ // validate lacpdu and port
+ if (lacpdu && port) {
+ // check if any parameter is different
+ if ((lacpdu->partner_port != port->actor_port_number) ||
+ (lacpdu->partner_port_priority != port->actor_port_priority) ||
+ MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) ||
+ (lacpdu->partner_system_priority != port->actor_system_priority) ||
+ (lacpdu->partner_key != port->actor_oper_port_key) ||
+ ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
+ ((lacpdu->partner_state & AD_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)) ||
+ ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
+ ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
+ ) {
+ // set ntt to be TRUE
+ port->ntt = 1;
+ }
+ }
+}
+
+/**
+ * __attach_bond_to_agg
+ * @port: the port we're looking at
+ *
+ * Handle the attaching of the port's control parser/multiplexer and the
+ * aggregator. This function does nothing since the parser/multiplexer of the
+ * receive and the parser/multiplexer of the aggregator are already combined.
+ */
+static void __attach_bond_to_agg(struct port *port)
+{
+ port=NULL; // just to satisfy the compiler
+ // This function does nothing since the parser/multiplexer of the receive
+ // and the parser/multiplexer of the aggregator are already combined
+}
+
+/**
+ * __detach_bond_from_agg
+ * @port: the port we're looking at
+ *
+ * Handle the detaching of the port's control parser/multiplexer from the
+ * aggregator. This function does nothing since the parser/multiplexer of the
+ * receive and the parser/multiplexer of the aggregator are already combined.
+ */
+static void __detach_bond_from_agg(struct port *port)
+{
+ port=NULL; // just to satisfy the compiler
+ // This function does nothing sience the parser/multiplexer of the receive
+ // and the parser/multiplexer of the aggregator are already combined
+}
+
+/**
+ * __agg_ports_are_ready - check if all ports in an aggregator are ready
+ * @aggregator: the aggregator we're looking at
+ *
+ */
+static int __agg_ports_are_ready(struct aggregator *aggregator)
+{
+ struct port *port;
+ int retval = 1;
+
+ if (aggregator) {
+ // scan all ports in this aggregator to verfy if they are all ready
+ for (port=aggregator->lag_ports; port; port=port->next_port_in_aggregator) {
+ if (!(port->sm_vars & AD_PORT_READY_N)) {
+ retval = 0;
+ break;
+ }
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * __set_agg_ports_ready - set value of Ready bit in all ports of an aggregator
+ * @aggregator: the aggregator we're looking at
+ * @val: Should the ports' ready bit be set on or off
+ *
+ */
+static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
+{
+ struct port *port;
+
+ for (port=aggregator->lag_ports; port; port=port->next_port_in_aggregator) {
+ if (val) {
+ port->sm_vars |= AD_PORT_READY;
+ } else {
+ port->sm_vars &= ~AD_PORT_READY;
+ }
+ }
+}
+
+/**
+ * __get_agg_bandwidth - get the total bandwidth of an aggregator
+ * @aggregator: the aggregator we're looking at
+ *
+ */
+static u32 __get_agg_bandwidth(struct aggregator *aggregator)
+{
+ u32 bandwidth=0;
+ u32 basic_speed;
+
+ if (aggregator->num_of_ports) {
+ basic_speed = __get_link_speed(aggregator->lag_ports);
+ switch (basic_speed) {
+ case AD_LINK_SPEED_BITMASK_1MBPS:
+ bandwidth = aggregator->num_of_ports;
+ break;
+ case AD_LINK_SPEED_BITMASK_10MBPS:
+ bandwidth = aggregator->num_of_ports * 10;
+ break;
+ case AD_LINK_SPEED_BITMASK_100MBPS:
+ bandwidth = aggregator->num_of_ports * 100;
+ break;
+ case AD_LINK_SPEED_BITMASK_1000MBPS:
+ bandwidth = aggregator->num_of_ports * 1000;
+ break;
+ default:
+ bandwidth=0; // to silent the compilor ....
+ }
+ }
+ return bandwidth;
+}
+
+/**
+ * __get_active_agg - get the current active aggregator
+ * @aggregator: the aggregator we're looking at
+ *
+ */
+static struct aggregator *__get_active_agg(struct aggregator *aggregator)
+{
+ struct aggregator *retval = NULL;
+
+ for (; aggregator; aggregator = __get_next_agg(aggregator)) {
+ if (aggregator->is_active) {
+ retval = aggregator;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * __update_lacpdu_from_port - update a port's lacpdu fields
+ * @port: the port we're looking at
+ *
+ */
+static inline void __update_lacpdu_from_port(struct port *port)
+{
+ struct lacpdu *lacpdu = &port->lacpdu;
+
+ /* update current actual Actor parameters */
+ /* lacpdu->subtype initialized
+ * lacpdu->version_number initialized
+ * lacpdu->tlv_type_actor_info initialized
+ * lacpdu->actor_information_length initialized
+ */
+
+ lacpdu->actor_system_priority = port->actor_system_priority;
+ lacpdu->actor_system = port->actor_system;
+ lacpdu->actor_key = port->actor_oper_port_key;
+ lacpdu->actor_port_priority = port->actor_port_priority;
+ lacpdu->actor_port = port->actor_port_number;
+ lacpdu->actor_state = port->actor_oper_port_state;
+
+ /* lacpdu->reserved_3_1 initialized
+ * lacpdu->tlv_type_partner_info initialized
+ * lacpdu->partner_information_length initialized
+ */
+
+ lacpdu->partner_system_priority = port->partner_oper_system_priority;
+ lacpdu->partner_system = port->partner_oper_system;
+ lacpdu->partner_key = port->partner_oper_key;
+ lacpdu->partner_port_priority = port->partner_oper_port_priority;
+ lacpdu->partner_port = port->partner_oper_port_number;
+ lacpdu->partner_state = port->partner_oper_port_state;
+
+ /* lacpdu->reserved_3_2 initialized
+ * lacpdu->tlv_type_collector_info initialized
+ * lacpdu->collector_information_length initialized
+ * collector_max_delay initialized
+ * reserved_12[12] initialized
+ * tlv_type_terminator initialized
+ * terminator_length initialized
+ * reserved_50[50] initialized
+ */
+
+ /* Convert all non u8 parameters to Big Endian for transmit */
+ __ntohs_lacpdu(lacpdu);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+// ================= main 802.3ad protocol code ======================================
+//////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * ad_lacpdu_send - send out a lacpdu packet on a given port
+ * @port: the port we're looking at
+ *
+ * Returns: 0 on success
+ * < 0 on error
+ */
+static int ad_lacpdu_send(struct port *port)
+{
+ struct slave *slave = port->slave;
+ struct sk_buff *skb;
+ struct lacpdu_header *lacpdu_header;
+ int length = sizeof(struct lacpdu_header);
+ struct mac_addr lacpdu_multicast_address = AD_MULTICAST_LACPDU_ADDR;
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ return -ENOMEM;
+ }
+
+ skb->dev = slave->dev;
+ skb->mac.raw = skb->data;
+ skb->nh.raw = skb->data + ETH_HLEN;
+ skb->protocol = PKT_TYPE_LACPDU;
+ skb->priority = TC_PRIO_CONTROL;
+
+ lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
+
+ lacpdu_header->ad_header.destination_address = lacpdu_multicast_address;
+ /* Note: source addres is set to be the member's PERMANENT address, because we use it
+ to identify loopback lacpdus in receive. */
+ lacpdu_header->ad_header.source_address = *((struct mac_addr *)(slave->perm_hwaddr));
+ lacpdu_header->ad_header.length_type = PKT_TYPE_LACPDU;
+
+ lacpdu_header->lacpdu = port->lacpdu; // struct copy
+
+ dev_queue_xmit(skb);
+
+ return 0;
+}
+
+/**
+ * ad_marker_send - send marker information/response on a given port
+ * @port: the port we're looking at
+ * @marker: marker data to send
+ *
+ * Returns: 0 on success
+ * < 0 on error
+ */
+static int ad_marker_send(struct port *port, struct marker *marker)
+{
+ struct slave *slave = port->slave;
+ struct sk_buff *skb;
+ struct marker_header *marker_header;
+ int length = sizeof(struct marker_header);
+ struct mac_addr lacpdu_multicast_address = AD_MULTICAST_LACPDU_ADDR;
+
+ skb = dev_alloc_skb(length + 16);
+ if (!skb) {
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, 16);
+
+ skb->dev = slave->dev;
+ skb->mac.raw = skb->data;
+ skb->nh.raw = skb->data + ETH_HLEN;
+ skb->protocol = PKT_TYPE_LACPDU;
+
+ marker_header = (struct marker_header *)skb_put(skb, length);
+
+ marker_header->ad_header.destination_address = lacpdu_multicast_address;
+ /* Note: source addres is set to be the member's PERMANENT address, because we use it
+ to identify loopback MARKERs in receive. */
+ marker_header->ad_header.source_address = *((struct mac_addr *)(slave->perm_hwaddr));
+ marker_header->ad_header.length_type = PKT_TYPE_LACPDU;
+
+ marker_header->marker = *marker; // struct copy
+
+ dev_queue_xmit(skb);
+
+ return 0;
+}
+
+/**
+ * ad_mux_machine - handle a port's mux state machine
+ * @port: the port we're looking at
+ *
+ */
+static void ad_mux_machine(struct port *port)
+{
+ mux_states_t last_state;
+
+ // keep current State Machine state to compare later if it was changed
+ last_state = port->sm_mux_state;
+
+ if (port->sm_vars & AD_PORT_BEGIN) {
+ port->sm_mux_state = AD_MUX_DETACHED; // next state
+ } else {
+ switch (port->sm_mux_state) {
+ case AD_MUX_DETACHED:
+ if ((port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if SELECTED or STANDBY
+ port->sm_mux_state = AD_MUX_WAITING; // next state
+ }
+ break;
+ case AD_MUX_WAITING:
+ // if SELECTED == FALSE return to DETACH state
+ if (!(port->sm_vars & AD_PORT_SELECTED)) { // if UNSELECTED
+ port->sm_vars &= ~AD_PORT_READY_N;
+ // in order to withhold the Selection Logic to check all ports READY_N value
+ // every callback cycle to update ready variable, we check READY_N and update READY here
+ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
+ port->sm_mux_state = AD_MUX_DETACHED; // next state
+ break;
+ }
+
+ // check if the wait_while_timer expired
+ if (port->sm_mux_timer_counter && !(--port->sm_mux_timer_counter)) {
+ port->sm_vars |= AD_PORT_READY_N;
+ }
+
+ // in order to withhold the selection logic to check all ports READY_N value
+ // every callback cycle to update ready variable, we check READY_N and update READY here
+ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
+
+ // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state
+ if ((port->sm_vars & AD_PORT_READY) && !port->sm_mux_timer_counter) {
+ port->sm_mux_state = AD_MUX_ATTACHED; // next state
+ }
+ break;
+ case AD_MUX_ATTACHED:
+ // check also if agg_select_timer expired(so the edable port will take place only after this timer)
+ if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper_port_state & AD_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) {
+ port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;// next state
+ } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if UNSELECTED or STANDBY
+ port->sm_vars &= ~AD_PORT_READY_N;
+ // in order to withhold the selection logic to check all ports READY_N value
+ // every callback cycle to update ready variable, we check READY_N and update READY here
+ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
+ port->sm_mux_state = AD_MUX_DETACHED;// next state
+ }
+ break;
+ case AD_MUX_COLLECTING_DISTRIBUTING:
+ if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper_port_state & AD_STATE_SYNCHRONIZATION)
+ ) {
+ port->sm_mux_state = AD_MUX_ATTACHED;// next state
+
+ } else {
+ // if port state hasn't changed make
+ // sure that a collecting distributing
+ // port in an active aggregator is enabled
+ if (port->aggregator &&
+ port->aggregator->is_active &&
+ !__port_is_enabled(port)) {
+
+ __enable_port(port);
+ }
+ }
+ break;
+ default: //to silence the compiler
+ break;
+ }
+ }
+
+ // check if the state machine was changed
+ if (port->sm_mux_state != last_state) {
+ dprintk("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state);
+ switch (port->sm_mux_state) {
+ case AD_MUX_DETACHED:
+ __detach_bond_from_agg(port);
+ port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ ad_disable_collecting_distributing(port);
+ port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ port->ntt = 1;
+ break;
+ case AD_MUX_WAITING:
+ port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
+ break;
+ case AD_MUX_ATTACHED:
+ __attach_bond_to_agg(port);
+ port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ ad_disable_collecting_distributing(port);
+ port->ntt = 1;
+ break;
+ case AD_MUX_COLLECTING_DISTRIBUTING:
+ port->actor_oper_port_state |= AD_STATE_COLLECTING;
+ port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
+ ad_enable_collecting_distributing(port);
+ port->ntt = 1;
+ break;
+ default: //to silence the compiler
+ break;
+ }
+ }
+}
+
+/**
+ * ad_rx_machine - handle a port's rx State Machine
+ * @lacpdu: the lacpdu we've received
+ * @port: the port we're looking at
+ *
+ * If lacpdu arrived, stop previous timer (if exists) and set the next state as
+ * CURRENT. If timer expired set the state machine in the proper state.
+ * In other cases, this function checks if we need to switch to other state.
+ */
+static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
+{
+ rx_states_t last_state;
+
+ // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
+ __get_rx_machine_lock(port);
+
+ // keep current State Machine state to compare later if it was changed
+ last_state = port->sm_rx_state;
+
+ // check if state machine should change state
+ // first, check if port was reinitialized
+ if (port->sm_vars & AD_PORT_BEGIN) {
+ port->sm_rx_state = AD_RX_INITIALIZE; // next state
+ }
+ // check if port is not enabled
+ else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) {
+ port->sm_rx_state = AD_RX_PORT_DISABLED; // next state
+ }
+ // check if new lacpdu arrived
+ else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) {
+ port->sm_rx_timer_counter = 0; // zero timer
+ port->sm_rx_state = AD_RX_CURRENT;
+ } else {
+ // if timer is on, and if it is expired
+ if (port->sm_rx_timer_counter && !(--port->sm_rx_timer_counter)) {
+ switch (port->sm_rx_state) {
+ case AD_RX_EXPIRED:
+ port->sm_rx_state = AD_RX_DEFAULTED; // next state
+ break;
+ case AD_RX_CURRENT:
+ port->sm_rx_state = AD_RX_EXPIRED; // next state
+ break;
+ default: //to silence the compiler
+ break;
+ }
+ } else {
+ // if no lacpdu arrived and no timer is on
+ switch (port->sm_rx_state) {
+ case AD_RX_PORT_DISABLED:
+ if (port->sm_vars & AD_PORT_MOVED) {
+ port->sm_rx_state = AD_RX_INITIALIZE; // next state
+ } else if (port->is_enabled && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
+ port->sm_rx_state = AD_RX_EXPIRED; // next state
+ } else if (port->is_enabled && ((port->sm_vars & AD_PORT_LACP_ENABLED) == 0)) {
+ port->sm_rx_state = AD_RX_LACP_DISABLED; // next state
+ }
+ break;
+ default: //to silence the compiler
+ break;
+
+ }
+ }
+ }
+
+ // check if the State machine was changed or new lacpdu arrived
+ if ((port->sm_rx_state != last_state) || (lacpdu)) {
+ dprintk("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state);
+ switch (port->sm_rx_state) {
+ case AD_RX_INITIALIZE:
+ if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) {
+ port->sm_vars &= ~AD_PORT_LACP_ENABLED;
+ } else {
+ port->sm_vars |= AD_PORT_LACP_ENABLED;
+ }
+ port->sm_vars &= ~AD_PORT_SELECTED;
+ __record_default(port);
+ port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->sm_vars &= ~AD_PORT_MOVED;
+ port->sm_rx_state = AD_RX_PORT_DISABLED; // next state
+
+ /*- Fall Through -*/
+
+ case AD_RX_PORT_DISABLED:
+ port->sm_vars &= ~AD_PORT_MATCHED;
+ break;
+ case AD_RX_LACP_DISABLED:
+ port->sm_vars &= ~AD_PORT_SELECTED;
+ __record_default(port);
+ port->partner_oper_port_state &= ~AD_STATE_AGGREGATION;
+ port->sm_vars |= AD_PORT_MATCHED;
+ port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ break;
+ case AD_RX_EXPIRED:
+ //Reset of the Synchronization flag. (Standard 43.4.12)
+ //This reset cause to disable this port in the COLLECTING_DISTRIBUTING state of the
+ //mux machine in case of EXPIRED even if LINK_DOWN didn't arrive for the port.
+ port->partner_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->sm_vars &= ~AD_PORT_MATCHED;
+ port->partner_oper_port_state |= AD_SHORT_TIMEOUT;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
+ port->actor_oper_port_state |= AD_STATE_EXPIRED;
+ break;
+ case AD_RX_DEFAULTED:
+ __update_default_selected(port);
+ __record_default(port);
+ port->sm_vars |= AD_PORT_MATCHED;
+ port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ break;
+ case AD_RX_CURRENT:
+ // detect loopback situation
+ if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
+ // INFO_RECEIVED_LOOPBACK_FRAMES
+ printk(KERN_ERR DRV_NAME ": An illegal loopback occurred on adapter (%s)\n",
+ port->slave->dev->name);
+ printk(KERN_ERR "Check the configuration to verify that all Adapters "
+ "are connected to 802.3ad compliant switch ports\n");
+ __release_rx_machine_lock(port);
+ return;
+ }
+ __update_selected(lacpdu, port);
+ __update_ntt(lacpdu, port);
+ __record_pdu(lacpdu, port);
+ __choose_matched(lacpdu, port);
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
+ port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ // verify that if the aggregator is enabled, the port is enabled too.
+ //(because if the link goes down for a short time, the 802.3ad will not
+ // catch it, and the port will continue to be disabled)
+ if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) {
+ __enable_port(port);
+ }
+ break;
+ default: //to silence the compiler
+ break;
+ }
+ }
+ __release_rx_machine_lock(port);
+}
+
+/**
+ * ad_tx_machine - handle a port's tx state machine
+ * @port: the port we're looking at
+ *
+ */
+static void ad_tx_machine(struct port *port)
+{
+ // check if tx timer expired, to verify that we do not send more than 3 packets per second
+ if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
+ // check if there is something to send
+ if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
+ __update_lacpdu_from_port(port);
+ // send the lacpdu
+ if (ad_lacpdu_send(port) >= 0) {
+ dprintk("Sent LACPDU on port %d\n", port->actor_port_number);
+ // mark ntt as false, so it will not be sent again until demanded
+ port->ntt = 0;
+ }
+ }
+ // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND
+ port->sm_tx_timer_counter=ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
+ }
+}
+
+/**
+ * ad_periodic_machine - handle a port's periodic state machine
+ * @port: the port we're looking at
+ *
+ * Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
+ */
+static void ad_periodic_machine(struct port *port)
+{
+ periodic_states_t last_state;
+
+ // keep current state machine state to compare later if it was changed
+ last_state = port->sm_periodic_state;
+
+ // check if port was reinitialized
+ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
+ (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper_port_state & AD_STATE_LACP_ACTIVITY))
+ ) {
+ port->sm_periodic_state = AD_NO_PERIODIC; // next state
+ }
+ // check if state machine should change state
+ else if (port->sm_periodic_timer_counter) {
+ // check if periodic state machine expired
+ if (!(--port->sm_periodic_timer_counter)) {
+ // if expired then do tx
+ port->sm_periodic_state = AD_PERIODIC_TX; // next state
+ } else {
+ // If not expired, check if there is some new timeout parameter from the partner state
+ switch (port->sm_periodic_state) {
+ case AD_FAST_PERIODIC:
+ if (!(port->partner_oper_port_state & AD_STATE_LACP_TIMEOUT)) {
+ port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
+ }
+ break;
+ case AD_SLOW_PERIODIC:
+ if ((port->partner_oper_port_state & AD_STATE_LACP_TIMEOUT)) {
+ // stop current timer
+ port->sm_periodic_timer_counter = 0;
+ port->sm_periodic_state = AD_PERIODIC_TX; // next state
+ }
+ break;
+ default: //to silence the compiler
+ break;
+ }
+ }
+ } else {
+ switch (port->sm_periodic_state) {
+ case AD_NO_PERIODIC:
+ port->sm_periodic_state = AD_FAST_PERIODIC; // next state
+ break;
+ case AD_PERIODIC_TX:
+ if (!(port->partner_oper_port_state & AD_STATE_LACP_TIMEOUT)) {
+ port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
+ } else {
+ port->sm_periodic_state = AD_FAST_PERIODIC; // next state
+ }
+ break;
+ default: //to silence the compiler
+ break;
+ }
+ }
+
+ // check if the state machine was changed
+ if (port->sm_periodic_state != last_state) {
+ dprintk("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state);
+ switch (port->sm_periodic_state) {
+ case AD_NO_PERIODIC:
+ port->sm_periodic_timer_counter = 0; // zero timer
+ break;
+ case AD_FAST_PERIODIC:
+ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+ break;
+ case AD_SLOW_PERIODIC:
+ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+ break;
+ case AD_PERIODIC_TX:
+ port->ntt = 1;
+ break;
+ default: //to silence the compiler
+ break;
+ }
+ }
+}
+
+/**
+ * ad_port_selection_logic - select aggregation groups
+ * @port: the port we're looking at
+ *
+ * Select aggregation groups, and assign each port for it's aggregetor. The
+ * selection logic is called in the inititalization (after all the handshkes),
+ * and after every lacpdu receive (if selected is off).
+ */
+static void ad_port_selection_logic(struct port *port)
+{
+ struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
+ struct port *last_port = NULL, *curr_port;
+ int found = 0;
+
+ // if the port is already Selected, do nothing
+ if (port->sm_vars & AD_PORT_SELECTED) {
+ return;
+ }
+
+ // if the port is connected to other aggregator, detach it
+ if (port->aggregator) {
+ // detach the port from its former aggregator
+ temp_aggregator=port->aggregator;
+ for (curr_port=temp_aggregator->lag_ports; curr_port; last_port=curr_port, curr_port=curr_port->next_port_in_aggregator) {
+ if (curr_port == port) {
+ temp_aggregator->num_of_ports--;
+ if (!last_port) {// if it is the first port attached to the aggregator
+ temp_aggregator->lag_ports=port->next_port_in_aggregator;
+ } else {// not the first port attached to the aggregator
+ last_port->next_port_in_aggregator=port->next_port_in_aggregator;
+ }
+
+ // clear the port's relations to this aggregator
+ port->aggregator = NULL;
+ port->next_port_in_aggregator=NULL;
+ port->actor_port_aggregator_identifier=0;
+
+ dprintk("Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier);
+ // if the aggregator is empty, clear its parameters, and set it ready to be attached
+ if (!temp_aggregator->lag_ports) {
+ ad_clear_agg(temp_aggregator);
+ }
+ break;
+ }
+ }
+ if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
+ printk(KERN_WARNING DRV_NAME ": Warning: Port %d (on %s) was "
+ "related to aggregator %d but was not on its port list\n",
+ port->actor_port_number, port->slave->dev->name,
+ port->aggregator->aggregator_identifier);
+ }
+ }
+ // search on all aggregators for a suitable aggregator for this port
+ for (aggregator = __get_first_agg(port); aggregator;
+ aggregator = __get_next_agg(aggregator)) {
+
+ // keep a free aggregator for later use(if needed)
+ if (!aggregator->lag_ports) {
+ if (!free_aggregator) {
+ free_aggregator=aggregator;
+ }
+ continue;
+ }
+ // check if current aggregator suits us
+ if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
+ !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper_system)) &&
+ (aggregator->partner_system_priority == port->partner_oper_system_priority) &&
+ (aggregator->partner_oper_aggregator_key == port->partner_oper_key)
+ ) &&
+ ((MAC_ADDRESS_COMPARE(&(port->partner_oper_system), &(null_mac_addr)) && // partner answers
+ !aggregator->is_individual) // but is not individual OR
+ )
+ ) {
+ // attach to the founded aggregator
+ port->aggregator = aggregator;
+ port->actor_port_aggregator_identifier=port->aggregator->aggregator_identifier;
+ port->next_port_in_aggregator=aggregator->lag_ports;
+ port->aggregator->num_of_ports++;
+ aggregator->lag_ports=port;
+ dprintk("Port %d joined LAG %d(existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+
+ // mark this port as selected
+ port->sm_vars |= AD_PORT_SELECTED;
+ found = 1;
+ break;
+ }
+ }
+
+ // the port couldn't find an aggregator - attach it to a new aggregator
+ if (!found) {
+ if (free_aggregator) {
+ // assign port a new aggregator
+ port->aggregator = free_aggregator;
+ port->actor_port_aggregator_identifier=port->aggregator->aggregator_identifier;
+
+ // update the new aggregator's parameters
+ // if port was responsed from the end-user
+ if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) {// if port is full duplex
+ port->aggregator->is_individual = 0;
+ } else {
+ port->aggregator->is_individual = 1;
+ }
+
+ port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key;
+ port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key;
+ port->aggregator->partner_system=port->partner_oper_system;
+ port->aggregator->partner_system_priority = port->partner_oper_system_priority;
+ port->aggregator->partner_oper_aggregator_key = port->partner_oper_key;
+ port->aggregator->receive_state = 1;
+ port->aggregator->transmit_state = 1;
+ port->aggregator->lag_ports = port;
+ port->aggregator->num_of_ports++;
+
+ // mark this port as selected
+ port->sm_vars |= AD_PORT_SELECTED;
+
+ dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ } else {
+ printk(KERN_ERR DRV_NAME ": Port %d (on %s) did not find a suitable aggregator\n",
+ port->actor_port_number, port->slave->dev->name);
+ }
+ }
+ // if all aggregator's ports are READY_N == TRUE, set ready=TRUE in all aggregator's ports
+ // else set ready=FALSE in all aggregator's ports
+ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
+
+ if (!__check_agg_selection_timer(port) && (aggregator = __get_first_agg(port))) {
+ ad_agg_selection_logic(aggregator);
+ }
+}
+
+/**
+ * ad_agg_selection_logic - select an aggregation group for a team
+ * @aggregator: the aggregator we're looking at
+ *
+ * It is assumed that only one aggregator may be selected for a team.
+ * The logic of this function is to select (at first time) the aggregator with
+ * the most ports attached to it, and to reselect the active aggregator only if
+ * the previous aggregator has no more ports related to it.
+ *
+ * FIXME: this function MUST be called with the first agg in the bond, or
+ * __get_active_agg() won't work correctly. This function should be better
+ * called with the bond itself, and retrieve the first agg from it.
+ */
+static void ad_agg_selection_logic(struct aggregator *aggregator)
+{
+ struct aggregator *best_aggregator = NULL, *active_aggregator = NULL;
+ struct aggregator *last_active_aggregator = NULL, *origin_aggregator;
+ struct port *port;
+ u16 num_of_aggs=0;
+
+ origin_aggregator = aggregator;
+
+ //get current active aggregator
+ last_active_aggregator = __get_active_agg(aggregator);
+
+ // search for the aggregator with the most ports attached to it.
+ do {
+ // count how many candidate lag's we have
+ if (aggregator->lag_ports) {
+ num_of_aggs++;
+ }
+ if (aggregator->is_active && !aggregator->is_individual && // if current aggregator is the active aggregator
+ MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(null_mac_addr))) { // and partner answers to 802.3ad PDUs
+ if (aggregator->num_of_ports) { // if any ports attached to the current aggregator
+ best_aggregator=NULL; // disregard the best aggregator that was chosen by now
+ break; // stop the selection of other aggregator if there are any ports attached to this active aggregator
+ } else { // no ports attached to this active aggregator
+ aggregator->is_active = 0; // mark this aggregator as not active anymore
+ }
+ }
+ if (aggregator->num_of_ports) { // if any ports attached
+ if (best_aggregator) { // if there is a candidte aggregator
+ //The reasons for choosing new best aggregator:
+ // 1. if current agg is NOT individual and the best agg chosen so far is individual OR
+ // current and best aggs are both individual or both not individual, AND
+ // 2a. current agg partner reply but best agg partner do not reply OR
+ // 2b. current agg partner reply OR current agg partner do not reply AND best agg partner also do not reply AND
+ // current has more ports/bandwidth, or same amount of ports but current has faster ports, THEN
+ // current agg become best agg so far
+
+ //if current agg is NOT individual and the best agg chosen so far is individual change best_aggregator
+ if (!aggregator->is_individual && best_aggregator->is_individual) {
+ best_aggregator=aggregator;
+ }
+ // current and best aggs are both individual or both not individual
+ else if ((aggregator->is_individual && best_aggregator->is_individual) ||
+ (!aggregator->is_individual && !best_aggregator->is_individual)) {
+ // current and best aggs are both individual or both not individual AND
+ // current agg partner reply but best agg partner do not reply
+ if ((MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(null_mac_addr)) &&
+ !MAC_ADDRESS_COMPARE(&(best_aggregator->partner_system), &(null_mac_addr)))) {
+ best_aggregator=aggregator;
+ }
+ // current agg partner reply OR current agg partner do not reply AND best agg partner also do not reply
+ else if (! (!MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(null_mac_addr)) &&
+ MAC_ADDRESS_COMPARE(&(best_aggregator->partner_system), &(null_mac_addr)))) {
+ if ((__get_agg_selection_mode(aggregator->lag_ports) == AD_BANDWIDTH)&&
+ (__get_agg_bandwidth(aggregator) > __get_agg_bandwidth(best_aggregator))) {
+ best_aggregator=aggregator;
+ } else if (__get_agg_selection_mode(aggregator->lag_ports) == AD_COUNT) {
+ if (((aggregator->num_of_ports > best_aggregator->num_of_ports) &&
+ (aggregator->actor_oper_aggregator_key & AD_SPEED_KEY_BITS))||
+ ((aggregator->num_of_ports == best_aggregator->num_of_ports) &&
+ ((u16)(aggregator->actor_oper_aggregator_key & AD_SPEED_KEY_BITS) >
+ (u16)(best_aggregator->actor_oper_aggregator_key & AD_SPEED_KEY_BITS)))) {
+ best_aggregator=aggregator;
+ }
+ }
+ }
+ }
+ } else {
+ best_aggregator=aggregator;
+ }
+ }
+ aggregator->is_active = 0; // mark all aggregators as not active anymore
+ } while ((aggregator = __get_next_agg(aggregator)));
+
+ // if we have new aggregator selected, don't replace the old aggregator if it has an answering partner,
+ // or if both old aggregator and new aggregator don't have answering partner
+ if (best_aggregator) {
+ if (last_active_aggregator && last_active_aggregator->lag_ports && last_active_aggregator->lag_ports->is_enabled &&
+ (MAC_ADDRESS_COMPARE(&(last_active_aggregator->partner_system), &(null_mac_addr)) || // partner answers OR
+ (!MAC_ADDRESS_COMPARE(&(last_active_aggregator->partner_system), &(null_mac_addr)) && // both old and new
+ !MAC_ADDRESS_COMPARE(&(best_aggregator->partner_system), &(null_mac_addr)))) // partner do not answer
+ ) {
+ // if new aggregator has link, and old aggregator does not, replace old aggregator.(do nothing)
+ // -> don't replace otherwise.
+ if (!(!last_active_aggregator->actor_oper_aggregator_key && best_aggregator->actor_oper_aggregator_key)) {
+ best_aggregator=NULL;
+ last_active_aggregator->is_active = 1; // don't replace good old aggregator
+
+ }
+ }
+ }
+
+ // if there is new best aggregator, activate it
+ if (best_aggregator) {
+ for (aggregator = __get_first_agg(best_aggregator->lag_ports);
+ aggregator;
+ aggregator = __get_next_agg(aggregator)) {
+
+ dprintk("Agg=%d; Ports=%d; a key=%d; p key=%d; Indiv=%d; Active=%d\n",
+ aggregator->aggregator_identifier, aggregator->num_of_ports,
+ aggregator->actor_oper_aggregator_key, aggregator->partner_oper_aggregator_key,
+ aggregator->is_individual, aggregator->is_active);
+ }
+
+ // check if any partner replys
+ if (best_aggregator->is_individual) {
+ printk(KERN_WARNING DRV_NAME ": Warning: No 802.3ad response from the link partner "
+ "for any adapters in the bond\n");
+ }
+
+ // check if there are more than one aggregator
+ if (num_of_aggs > 1) {
+ dprintk("Warning: More than one Link Aggregation Group was "
+ "found in the bond. Only one group will function in the bond\n");
+ }
+
+ best_aggregator->is_active = 1;
+ dprintk("LAG %d choosed as the active LAG\n", best_aggregator->aggregator_identifier);
+ dprintk("Agg=%d; Ports=%d; a key=%d; p key=%d; Indiv=%d; Active=%d\n",
+ best_aggregator->aggregator_identifier, best_aggregator->num_of_ports,
+ best_aggregator->actor_oper_aggregator_key, best_aggregator->partner_oper_aggregator_key,
+ best_aggregator->is_individual, best_aggregator->is_active);
+
+ // disable the ports that were related to the former active_aggregator
+ if (last_active_aggregator) {
+ for (port=last_active_aggregator->lag_ports; port; port=port->next_port_in_aggregator) {
+ __disable_port(port);
+ }
+ }
+ }
+
+ // if the selected aggregator is of join individuals(partner_system is NULL), enable their ports
+ active_aggregator = __get_active_agg(origin_aggregator);
+
+ if (active_aggregator) {
+ if (!MAC_ADDRESS_COMPARE(&(active_aggregator->partner_system), &(null_mac_addr))) {
+ for (port=active_aggregator->lag_ports; port; port=port->next_port_in_aggregator) {
+ __enable_port(port);
+ }
+ }
+ }
+}
+
+/**
+ * ad_clear_agg - clear a given aggregator's parameters
+ * @aggregator: the aggregator we're looking at
+ *
+ */
+static void ad_clear_agg(struct aggregator *aggregator)
+{
+ if (aggregator) {
+ aggregator->is_individual = 0;
+ aggregator->actor_admin_aggregator_key = 0;
+ aggregator->actor_oper_aggregator_key = 0;
+ aggregator->partner_system = null_mac_addr;
+ aggregator->partner_system_priority = 0;
+ aggregator->partner_oper_aggregator_key = 0;
+ aggregator->receive_state = 0;
+ aggregator->transmit_state = 0;
+ aggregator->lag_ports = NULL;
+ aggregator->is_active = 0;
+ aggregator->num_of_ports = 0;
+ dprintk("LAG %d was cleared\n", aggregator->aggregator_identifier);
+ }
+}
+
+/**
+ * ad_initialize_agg - initialize a given aggregator's parameters
+ * @aggregator: the aggregator we're looking at
+ *
+ */
+static void ad_initialize_agg(struct aggregator *aggregator)
+{
+ if (aggregator) {
+ ad_clear_agg(aggregator);
+
+ aggregator->aggregator_mac_address = null_mac_addr;
+ aggregator->aggregator_identifier = 0;
+ aggregator->slave = NULL;
+ }
+}
+
+/**
+ * ad_initialize_port - initialize a given port's parameters
+ * @aggregator: the aggregator we're looking at
+ * @lacp_fast: boolean. whether fast periodic should be used
+ *
+ */
+static void ad_initialize_port(struct port *port, int lacp_fast)
+{
+ if (port) {
+ port->actor_port_number = 1;
+ port->actor_port_priority = 0xff;
+ port->actor_system = null_mac_addr;
+ port->actor_system_priority = 0xffff;
+ port->actor_port_aggregator_identifier = 0;
+ port->ntt = 0;
+ port->actor_admin_port_key = 1;
+ port->actor_oper_port_key = 1;
+ port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
+
+ if (lacp_fast) {
+ port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ }
+
+ port->partner_admin_system = null_mac_addr;
+ port->partner_oper_system = null_mac_addr;
+ port->partner_admin_system_priority = 0xffff;
+ port->partner_oper_system_priority = 0xffff;
+ port->partner_admin_key = 1;
+ port->partner_oper_key = 1;
+ port->partner_admin_port_number = 1;
+ port->partner_oper_port_number = 1;
+ port->partner_admin_port_priority = 0xff;
+ port->partner_oper_port_priority = 0xff;
+ port->partner_admin_port_state = 1;
+ port->partner_oper_port_state = 1;
+ port->is_enabled = 1;
+ // ****** private parameters ******
+ port->sm_vars = 0x3;
+ port->sm_rx_state = 0;
+ port->sm_rx_timer_counter = 0;
+ port->sm_periodic_state = 0;
+ port->sm_periodic_timer_counter = 0;
+ port->sm_mux_state = 0;
+ port->sm_mux_timer_counter = 0;
+ port->sm_tx_state = 0;
+ port->sm_tx_timer_counter = 0;
+ port->slave = NULL;
+ port->aggregator = NULL;
+ port->next_port_in_aggregator = NULL;
+ port->transaction_id = 0;
+
+ ad_initialize_lacpdu(&(port->lacpdu));
+ }
+}
+
+/**
+ * ad_enable_collecting_distributing - enable a port's transmit/receive
+ * @port: the port we're looking at
+ *
+ * Enable @port if it's in an active aggregator
+ */
+static void ad_enable_collecting_distributing(struct port *port)
+{
+ if (port->aggregator->is_active) {
+ dprintk("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ __enable_port(port);
+ }
+}
+
+/**
+ * ad_disable_collecting_distributing - disable a port's transmit/receive
+ * @port: the port we're looking at
+ *
+ */
+static void ad_disable_collecting_distributing(struct port *port)
+{
+ if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
+ dprintk("Disabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ __disable_port(port);
+ }
+}
+
+#if 0
+/**
+ * ad_marker_info_send - send a marker information frame
+ * @port: the port we're looking at
+ *
+ * This function does nothing since we decided not to implement send and handle
+ * response for marker PDU's, in this stage, but only to respond to marker
+ * information.
+ */
+static void ad_marker_info_send(struct port *port)
+{
+ struct marker marker;
+ u16 index;
+
+ // fill the marker PDU with the appropriate values
+ marker.subtype = 0x02;
+ marker.version_number = 0x01;
+ marker.tlv_type = AD_MARKER_INFORMATION_SUBTYPE;
+ marker.marker_length = 0x16;
+ // convert requester_port to Big Endian
+ marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8));
+ marker.requester_system = port->actor_system;
+ // convert requester_port(u32) to Big Endian
+ marker.requester_transaction_id = (((++port->transaction_id & 0xFF) << 24) |((port->transaction_id & 0xFF00) << 8) |((port->transaction_id & 0xFF0000) >> 8) |((port->transaction_id & 0xFF000000) >> 24));
+ marker.pad = 0;
+ marker.tlv_type_terminator = 0x00;
+ marker.terminator_length = 0x00;
+ for (index=0; index<90; index++) {
+ marker.reserved_90[index]=0;
+ }
+
+ // send the marker information
+ if (ad_marker_send(port, &marker) >= 0) {
+ dprintk("Sent Marker Information on port %d\n", port->actor_port_number);
+ }
+}
+#endif
+
+/**
+ * ad_marker_info_received - handle receive of a Marker information frame
+ * @marker_info: Marker info received
+ * @port: the port we're looking at
+ *
+ */
+static void ad_marker_info_received(struct marker *marker_info,struct port *port)
+{
+ struct marker marker;
+
+ // copy the received marker data to the response marker
+ //marker = *marker_info;
+ memcpy(&marker, marker_info, sizeof(struct marker));
+ // change the marker subtype to marker response
+ marker.tlv_type=AD_MARKER_RESPONSE_SUBTYPE;
+ // send the marker response
+
+ if (ad_marker_send(port, &marker) >= 0) {
+ dprintk("Sent Marker Response on port %d\n", port->actor_port_number);
+ }
+}
+
+/**
+ * ad_marker_response_received - handle receive of a marker response frame
+ * @marker: marker PDU received
+ * @port: the port we're looking at
+ *
+ * This function does nothing since we decided not to implement send and handle
+ * response for marker PDU's, in this stage, but only to respond to marker
+ * information.
+ */
+static void ad_marker_response_received(struct marker *marker, struct port *port)
+{
+ marker=NULL; // just to satisfy the compiler
+ port=NULL; // just to satisfy the compiler
+ // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW
+}
+
+/**
+ * ad_initialize_lacpdu - initialize a given lacpdu structure
+ * @lacpdu: lacpdu structure to initialize
+ *
+ */
+static void ad_initialize_lacpdu(struct lacpdu *lacpdu)
+{
+ u16 index;
+
+ // initialize lacpdu data
+ lacpdu->subtype = 0x01;
+ lacpdu->version_number = 0x01;
+ lacpdu->tlv_type_actor_info = 0x01;
+ lacpdu->actor_information_length = 0x14;
+ // lacpdu->actor_system_priority updated on send
+ // lacpdu->actor_system updated on send
+ // lacpdu->actor_key updated on send
+ // lacpdu->actor_port_priority updated on send
+ // lacpdu->actor_port updated on send
+ // lacpdu->actor_state updated on send
+ lacpdu->tlv_type_partner_info = 0x02;
+ lacpdu->partner_information_length = 0x14;
+ for (index=0; index<=2; index++) {
+ lacpdu->reserved_3_1[index]=0;
+ }
+ // lacpdu->partner_system_priority updated on send
+ // lacpdu->partner_system updated on send
+ // lacpdu->partner_key updated on send
+ // lacpdu->partner_port_priority updated on send
+ // lacpdu->partner_port updated on send
+ // lacpdu->partner_state updated on send
+ for (index=0; index<=2; index++) {
+ lacpdu->reserved_3_2[index]=0;
+ }
+ lacpdu->tlv_type_collector_info = 0x03;
+ lacpdu->collector_information_length= 0x10;
+ lacpdu->collector_max_delay = AD_COLLECTOR_MAX_DELAY;
+ for (index=0; index<=11; index++) {
+ lacpdu->reserved_12[index]=0;
+ }
+ lacpdu->tlv_type_terminator = 0x00;
+ lacpdu->terminator_length = 0;
+ for (index=0; index<=49; index++) {
+ lacpdu->reserved_50[index]=0;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+// ================= AD exported functions to the main bonding code ==================
+//////////////////////////////////////////////////////////////////////////////////////
+
+// Check aggregators status in team every T seconds
+#define AD_AGGREGATOR_SELECTION_TIMER 8
+
+static u16 aggregator_identifier;
+
+/**
+ * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
+ * @bond: bonding struct to work on
+ * @tick_resolution: tick duration (millisecond resolution)
+ * @lacp_fast: boolean. whether fast periodic should be used
+ *
+ * Can be called only after the mac address of the bond is set.
+ */
+void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast)
+{
+ // check that the bond is not initialized yet
+ if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr), &(bond->dev->dev_addr))) {
+
+ aggregator_identifier = 0;
+
+ BOND_AD_INFO(bond).lacp_fast = lacp_fast;
+ BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
+ BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
+
+ // initialize how many times this module is called in one second(should be about every 100ms)
+ ad_ticks_per_sec = tick_resolution;
+
+ // initialize the aggregator selection timer(to activate an aggregation selection after initialize)
+ BOND_AD_INFO(bond).agg_select_timer = (AD_AGGREGATOR_SELECTION_TIMER * ad_ticks_per_sec);
+ BOND_AD_INFO(bond).agg_select_mode = AD_BANDWIDTH;
+ }
+}
+
+/**
+ * bond_3ad_bind_slave - initialize a slave's port
+ * @slave: slave struct to work on
+ *
+ * Returns: 0 on success
+ * < 0 on error
+ */
+int bond_3ad_bind_slave(struct slave *slave)
+{
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+ struct port *port;
+ struct aggregator *aggregator;
+
+ if (bond == NULL) {
+ printk(KERN_ERR "The slave %s is not attached to its bond\n", slave->dev->name);
+ return -1;
+ }
+
+ //check that the slave has not been intialized yet.
+ if (SLAVE_AD_INFO(slave).port.slave != slave) {
+
+ // port initialization
+ port = &(SLAVE_AD_INFO(slave).port);
+
+ ad_initialize_port(port, BOND_AD_INFO(bond).lacp_fast);
+
+ port->slave = slave;
+ port->actor_port_number = SLAVE_AD_INFO(slave).id;
+ // key is determined according to the link speed, duplex and user key(which is yet not supported)
+ // ------------------------------------------------------------
+ // Port key : | User key | Speed |Duplex|
+ // ------------------------------------------------------------
+ // 16 6 1 0
+ port->actor_admin_port_key = 0; // initialize this parameter
+ port->actor_admin_port_key |= __get_duplex(port);
+ port->actor_admin_port_key |= (__get_link_speed(port) << 1);
+ port->actor_oper_port_key = port->actor_admin_port_key;
+ // if the port is not full duplex, then the port should be not lacp Enabled
+ if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) {
+ port->sm_vars &= ~AD_PORT_LACP_ENABLED;
+ }
+ // actor system is the bond's system
+ port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+ // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
+ port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
+ port->aggregator = NULL;
+ port->next_port_in_aggregator = NULL;
+
+ __disable_port(port);
+ __initialize_port_locks(port);
+
+
+ // aggregator initialization
+ aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+
+ ad_initialize_agg(aggregator);
+
+ aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
+ aggregator->aggregator_identifier = (++aggregator_identifier);
+ aggregator->slave = slave;
+ aggregator->is_active = 0;
+ aggregator->num_of_ports = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * bond_3ad_unbind_slave - deinitialize a slave's port
+ * @slave: slave struct to work on
+ *
+ * Search for the aggregator that is related to this port, remove the
+ * aggregator and assign another aggregator for other port related to it
+ * (if any), and remove the port.
+ */
+void bond_3ad_unbind_slave(struct slave *slave)
+{
+ struct port *port, *prev_port, *temp_port;
+ struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
+ int select_new_active_agg = 0;
+
+ // find the aggregator related to this slave
+ aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+
+ // find the port related to this slave
+ port = &(SLAVE_AD_INFO(slave).port);
+
+ // if slave is null, the whole port is not initialized
+ if (!port->slave) {
+ printk(KERN_WARNING DRV_NAME ": Trying to unbind an uninitialized port on %s\n", slave->dev->name);
+ return;
+ }
+
+ dprintk("Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier);
+
+ /* Tell the partner that this port is not suitable for aggregation */
+ port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
+ __update_lacpdu_from_port(port);
+ ad_lacpdu_send(port);
+
+ // check if this aggregator is occupied
+ if (aggregator->lag_ports) {
+ // check if there are other ports related to this aggregator except
+ // the port related to this slave(thats ensure us that there is a
+ // reason to search for new aggregator, and that we will find one
+ if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
+ // find new aggregator for the related port(s)
+ new_aggregator = __get_first_agg(port);
+ for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+ // if the new aggregator is empty, or it connected to to our port only
+ if (!new_aggregator->lag_ports || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator)) {
+ break;
+ }
+ }
+ // if new aggregator found, copy the aggregator's parameters
+ // and connect the related lag_ports to the new aggregator
+ if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
+ dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier);
+
+ if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
+ printk(KERN_INFO DRV_NAME ": Removing an active aggregator\n");
+ // select new active aggregator
+ select_new_active_agg = 1;
+ }
+
+ new_aggregator->is_individual = aggregator->is_individual;
+ new_aggregator->actor_admin_aggregator_key = aggregator->actor_admin_aggregator_key;
+ new_aggregator->actor_oper_aggregator_key = aggregator->actor_oper_aggregator_key;
+ new_aggregator->partner_system = aggregator->partner_system;
+ new_aggregator->partner_system_priority = aggregator->partner_system_priority;
+ new_aggregator->partner_oper_aggregator_key = aggregator->partner_oper_aggregator_key;
+ new_aggregator->receive_state = aggregator->receive_state;
+ new_aggregator->transmit_state = aggregator->transmit_state;
+ new_aggregator->lag_ports = aggregator->lag_ports;
+ new_aggregator->is_active = aggregator->is_active;
+ new_aggregator->num_of_ports = aggregator->num_of_ports;
+
+ // update the information that is written on the ports about the aggregator
+ for (temp_port=aggregator->lag_ports; temp_port; temp_port=temp_port->next_port_in_aggregator) {
+ temp_port->aggregator=new_aggregator;
+ temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier;
+ }
+
+ // clear the aggregator
+ ad_clear_agg(aggregator);
+
+ if (select_new_active_agg) {
+ ad_agg_selection_logic(__get_first_agg(port));
+ }
+ } else {
+ printk(KERN_WARNING DRV_NAME ": Warning: unbinding aggregator, "
+ "and could not find a new aggregator for its ports\n");
+ }
+ } else { // in case that the only port related to this aggregator is the one we want to remove
+ select_new_active_agg = aggregator->is_active;
+ // clear the aggregator
+ ad_clear_agg(aggregator);
+ if (select_new_active_agg) {
+ printk(KERN_INFO "Removing an active aggregator\n");
+ // select new active aggregator
+ ad_agg_selection_logic(__get_first_agg(port));
+ }
+ }
+ }
+
+ dprintk("Unbinding port %d\n", port->actor_port_number);
+ // find the aggregator that this port is connected to
+ temp_aggregator = __get_first_agg(port);
+ for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+ prev_port = NULL;
+ // search the port in the aggregator's related ports
+ for (temp_port=temp_aggregator->lag_ports; temp_port; prev_port=temp_port, temp_port=temp_port->next_port_in_aggregator) {
+ if (temp_port == port) { // the aggregator found - detach the port from this aggregator
+ if (prev_port) {
+ prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator;
+ } else {
+ temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
+ }
+ temp_aggregator->num_of_ports--;
+ if (temp_aggregator->num_of_ports==0) {
+ select_new_active_agg = temp_aggregator->is_active;
+ // clear the aggregator
+ ad_clear_agg(temp_aggregator);
+ if (select_new_active_agg) {
+ printk(KERN_INFO "Removing an active aggregator\n");
+ // select new active aggregator
+ ad_agg_selection_logic(__get_first_agg(port));
+ }
+ }
+ break;
+ }
+ }
+ }
+ port->slave=NULL;
+}
+
+/**
+ * bond_3ad_state_machine_handler - handle state machines timeout
+ * @bond: bonding struct to work on
+ *
+ * The state machine handling concept in this module is to check every tick
+ * which state machine should operate any function. The execution order is
+ * round robin, so when we have an interaction between state machines, the
+ * reply of one to each other might be delayed until next tick.
+ *
+ * This function also complete the initialization when the agg_select_timer
+ * times out, and it selects an aggregator for the ports that are yet not
+ * related to any aggregator, and selects the active aggregator for a bond.
+ */
+void bond_3ad_state_machine_handler(struct bonding *bond)
+{
+ struct port *port;
+ struct aggregator *aggregator;
+
+ read_lock(&bond->lock);
+
+ if (bond->kill_timers) {
+ goto out;
+ }
+
+ //check if there are any slaves
+ if (bond->slave_cnt == 0) {
+ goto re_arm;
+ }
+
+ // check if agg_select_timer timer after initialize is timed out
+ if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ // select the active aggregator for the bond
+ if ((port = __get_first_port(bond))) {
+ if (!port->slave) {
+ printk(KERN_WARNING DRV_NAME ": Warning: bond's first port is uninitialized\n");
+ goto re_arm;
+ }
+
+ aggregator = __get_first_agg(port);
+ ad_agg_selection_logic(aggregator);
+ }
+ }
+
+ // for each port run the state machines
+ for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ if (!port->slave) {
+ printk(KERN_WARNING DRV_NAME ": Warning: Found an uninitialized port\n");
+ goto re_arm;
+ }
+
+ ad_rx_machine(NULL, port);
+ ad_periodic_machine(port);
+ ad_port_selection_logic(port);
+ ad_mux_machine(port);
+ ad_tx_machine(port);
+
+ // turn off the BEGIN bit, since we already handled it
+ if (port->sm_vars & AD_PORT_BEGIN) {
+ port->sm_vars &= ~AD_PORT_BEGIN;
+ }
+ }
+
+re_arm:
+ mod_timer(&(BOND_AD_INFO(bond).ad_timer), jiffies + ad_delta_in_ticks);
+out:
+ read_unlock(&bond->lock);
+}
+
+/**
+ * bond_3ad_rx_indication - handle a received frame
+ * @lacpdu: received lacpdu
+ * @slave: slave struct to work on
+ * @length: length of the data received
+ *
+ * It is assumed that frames that were sent on this NIC don't returned as new
+ * received frames (loopback). Since only the payload is given to this
+ * function, it check for loopback.
+ */
+static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
+{
+ struct port *port;
+
+ if (length >= sizeof(struct lacpdu)) {
+
+ port = &(SLAVE_AD_INFO(slave).port);
+
+ if (!port->slave) {
+ printk(KERN_WARNING DRV_NAME ": Warning: port of slave %s is uninitialized\n", slave->dev->name);
+ return;
+ }
+
+ switch (lacpdu->subtype) {
+ case AD_TYPE_LACPDU:
+ __ntohs_lacpdu(lacpdu);
+ dprintk("Received LACPDU on port %d\n", port->actor_port_number);
+ ad_rx_machine(lacpdu, port);
+ break;
+
+ case AD_TYPE_MARKER:
+ // No need to convert fields to Little Endian since we don't use the marker's fields.
+
+ switch (((struct marker *)lacpdu)->tlv_type) {
+ case AD_MARKER_INFORMATION_SUBTYPE:
+ dprintk("Received Marker Information on port %d\n", port->actor_port_number);
+ ad_marker_info_received((struct marker *)lacpdu, port);
+ break;
+
+ case AD_MARKER_RESPONSE_SUBTYPE:
+ dprintk("Received Marker Response on port %d\n", port->actor_port_number);
+ ad_marker_response_received((struct marker *)lacpdu, port);
+ break;
+
+ default:
+ dprintk("Received an unknown Marker subtype on slot %d\n", port->actor_port_number);
+ }
+ }
+ }
+}
+
+/**
+ * bond_3ad_adapter_speed_changed - handle a slave's speed change indication
+ * @slave: slave struct to work on
+ *
+ * Handle reselection of aggregator (if needed) for this port.
+ */
+void bond_3ad_adapter_speed_changed(struct slave *slave)
+{
+ struct port *port;
+
+ port = &(SLAVE_AD_INFO(slave).port);
+
+ // if slave is null, the whole port is not initialized
+ if (!port->slave) {
+ printk(KERN_WARNING DRV_NAME ": Warning: speed changed for uninitialized port on %s\n",
+ slave->dev->name);
+ return;
+ }
+
+ port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
+ port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1);
+ dprintk("Port %d changed speed\n", port->actor_port_number);
+ // there is no need to reselect a new aggregator, just signal the
+ // state machines to reinitialize
+ port->sm_vars |= AD_PORT_BEGIN;
+}
+
+/**
+ * bond_3ad_adapter_duplex_changed - handle a slave's duplex change indication
+ * @slave: slave struct to work on
+ *
+ * Handle reselection of aggregator (if needed) for this port.
+ */
+void bond_3ad_adapter_duplex_changed(struct slave *slave)
+{
+ struct port *port;
+
+ port=&(SLAVE_AD_INFO(slave).port);
+
+ // if slave is null, the whole port is not initialized
+ if (!port->slave) {
+ printk(KERN_WARNING DRV_NAME ": Warning: duplex changed for uninitialized port on %s\n",
+ slave->dev->name);
+ return;
+ }
+
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port);
+ dprintk("Port %d changed duplex\n", port->actor_port_number);
+ // there is no need to reselect a new aggregator, just signal the
+ // state machines to reinitialize
+ port->sm_vars |= AD_PORT_BEGIN;
+}
+
+/**
+ * bond_3ad_handle_link_change - handle a slave's link status change indication
+ * @slave: slave struct to work on
+ * @status: whether the link is now up or down
+ *
+ * Handle reselection of aggregator (if needed) for this port.
+ */
+void bond_3ad_handle_link_change(struct slave *slave, char link)
+{
+ struct port *port;
+
+ port = &(SLAVE_AD_INFO(slave).port);
+
+ // if slave is null, the whole port is not initialized
+ if (!port->slave) {
+ printk(KERN_WARNING DRV_NAME ": Warning: link status changed for uninitialized port on %s\n",
+ slave->dev->name);
+ return;
+ }
+
+ // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
+ // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+ if (link == BOND_LINK_UP) {
+ port->is_enabled = 1;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port);
+ port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
+ port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1);
+ } else {
+ /* link has failed */
+ port->is_enabled = 0;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_oper_port_key= (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS);
+ }
+ //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
+ // there is no need to reselect a new aggregator, just signal the
+ // state machines to reinitialize
+ port->sm_vars |= AD_PORT_BEGIN;
+}
+
+/**
+ * bond_3ad_get_active_agg_info - get information of the active aggregator
+ * @bond: bonding struct to work on
+ * @ad_info: ad_info struct to fill with the bond's info
+ *
+ * Returns: 0 on success
+ * < 0 on error
+ */
+int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
+{
+ struct aggregator *aggregator = NULL;
+ struct port *port;
+
+ for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ if (port->aggregator && port->aggregator->is_active) {
+ aggregator = port->aggregator;
+ break;
+ }
+ }
+
+ if (aggregator) {
+ ad_info->aggregator_id = aggregator->aggregator_identifier;
+ ad_info->ports = aggregator->num_of_ports;
+ ad_info->actor_key = aggregator->actor_oper_aggregator_key;
+ ad_info->partner_key = aggregator->partner_oper_aggregator_key;
+ memcpy(ad_info->partner_system, aggregator->partner_system.mac_addr_value, ETH_ALEN);
+ return 0;
+ }
+
+ return -1;
+}
+
+int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
+{
+ struct slave *slave, *start_at;
+ struct bonding *bond = dev->priv;
+ struct ethhdr *data = (struct ethhdr *)skb->data;
+ int slave_agg_no;
+ int slaves_in_agg;
+ int agg_id;
+ int i;
+ struct ad_info ad_info;
+ int res = 1;
+
+ /* make sure that the slaves list will
+ * not change during tx
+ */
+ read_lock(&bond->lock);
+
+ if (!BOND_IS_OK(bond)) {
+ goto out;
+ }
+
+ if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ printk(KERN_DEBUG "ERROR: bond_3ad_get_active_agg_info failed\n");
+ goto out;
+ }
+
+ slaves_in_agg = ad_info.ports;
+ agg_id = ad_info.aggregator_id;
+
+ if (slaves_in_agg == 0) {
+ /*the aggregator is empty*/
+ printk(KERN_DEBUG "ERROR: active aggregator is empty\n");
+ goto out;
+ }
+
+ slave_agg_no = (data->h_dest[5]^bond->dev->dev_addr[5]) % slaves_in_agg;
+
+ bond_for_each_slave(bond, slave, i) {
+ struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+
+ if (agg && (agg->aggregator_identifier == agg_id)) {
+ slave_agg_no--;
+ if (slave_agg_no < 0) {
+ break;
+ }
+ }
+ }
+
+ if (slave_agg_no >= 0) {
+ printk(KERN_ERR DRV_NAME ": Error: Couldn't find a slave to tx on for aggregator ID %d\n", agg_id);
+ goto out;
+ }
+
+ start_at = slave;
+
+ bond_for_each_slave_from(bond, slave, i, start_at) {
+ int slave_agg_id = 0;
+ struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+
+ if (agg) {
+ slave_agg_id = agg->aggregator_identifier;
+ }
+
+ if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
+ res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ break;
+ }
+ }
+
+out:
+ if (res) {
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+ }
+ read_unlock(&bond->lock);
+ return 0;
+}
+
+int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype)
+{
+ struct bonding *bond = dev->priv;
+ struct slave *slave = NULL;
+ int ret = NET_RX_DROP;
+
+ if (!(dev->flags & IFF_MASTER)) {
+ goto out;
+ }
+
+ read_lock(&bond->lock);
+ slave = bond_get_slave_by_dev((struct bonding *)dev->priv,
+ skb->real_dev);
+ if (slave == NULL) {
+ goto out_unlock;
+ }
+
+ bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
+
+ ret = NET_RX_SUCCESS;
+
+out_unlock:
+ read_unlock(&bond->lock);
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
new file mode 100644
index 000000000000..f46823894187
--- /dev/null
+++ b/drivers/net/bonding/bond_3ad.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ *
+ * Changes:
+ *
+ * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Amir Noam <amir.noam at intel dot com>
+ * - Added support for lacp_rate module param.
+ *
+ * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Renamed bond_3ad_link_status_changed() to
+ * bond_3ad_handle_link_change() for compatibility with TLB.
+ *
+ * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Code cleanup and style changes
+ */
+
+#ifndef __BOND_3AD_H__
+#define __BOND_3AD_H__
+
+#include <asm/byteorder.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+// General definitions
+#define BOND_ETH_P_LACPDU 0x8809
+#define PKT_TYPE_LACPDU __constant_htons(BOND_ETH_P_LACPDU)
+#define AD_TIMER_INTERVAL 100 /*msec*/
+
+#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
+#define AD_MULTICAST_LACPDU_ADDR {MULTICAST_LACPDU_ADDR}
+
+#define AD_LACP_SLOW 0
+#define AD_LACP_FAST 1
+
+typedef struct mac_addr {
+ u8 mac_addr_value[ETH_ALEN];
+} mac_addr_t;
+
+typedef enum {
+ AD_BANDWIDTH = 0,
+ AD_COUNT
+} agg_selection_t;
+
+// rx machine states(43.4.11 in the 802.3ad standard)
+typedef enum {
+ AD_RX_DUMMY,
+ AD_RX_INITIALIZE, // rx Machine
+ AD_RX_PORT_DISABLED, // rx Machine
+ AD_RX_LACP_DISABLED, // rx Machine
+ AD_RX_EXPIRED, // rx Machine
+ AD_RX_DEFAULTED, // rx Machine
+ AD_RX_CURRENT // rx Machine
+} rx_states_t;
+
+// periodic machine states(43.4.12 in the 802.3ad standard)
+typedef enum {
+ AD_PERIODIC_DUMMY,
+ AD_NO_PERIODIC, // periodic machine
+ AD_FAST_PERIODIC, // periodic machine
+ AD_SLOW_PERIODIC, // periodic machine
+ AD_PERIODIC_TX // periodic machine
+} periodic_states_t;
+
+// mux machine states(43.4.13 in the 802.3ad standard)
+typedef enum {
+ AD_MUX_DUMMY,
+ AD_MUX_DETACHED, // mux machine
+ AD_MUX_WAITING, // mux machine
+ AD_MUX_ATTACHED, // mux machine
+ AD_MUX_COLLECTING_DISTRIBUTING // mux machine
+} mux_states_t;
+
+// tx machine states(43.4.15 in the 802.3ad standard)
+typedef enum {
+ AD_TX_DUMMY,
+ AD_TRANSMIT // tx Machine
+} tx_states_t;
+
+// rx indication types
+typedef enum {
+ AD_TYPE_LACPDU = 1, // type lacpdu
+ AD_TYPE_MARKER // type marker
+} pdu_type_t;
+
+// rx marker indication types
+typedef enum {
+ AD_MARKER_INFORMATION_SUBTYPE = 1, // marker imformation subtype
+ AD_MARKER_RESPONSE_SUBTYPE // marker response subtype
+} marker_subtype_t;
+
+// timers types(43.4.9 in the 802.3ad standard)
+typedef enum {
+ AD_CURRENT_WHILE_TIMER,
+ AD_ACTOR_CHURN_TIMER,
+ AD_PERIODIC_TIMER,
+ AD_PARTNER_CHURN_TIMER,
+ AD_WAIT_WHILE_TIMER
+} ad_timers_t;
+
+#pragma pack(1)
+
+typedef struct ad_header {
+ struct mac_addr destination_address;
+ struct mac_addr source_address;
+ u16 length_type;
+} ad_header_t;
+
+// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard)
+typedef struct lacpdu {
+ u8 subtype; // = LACP(= 0x01)
+ u8 version_number;
+ u8 tlv_type_actor_info; // = actor information(type/length/value)
+ u8 actor_information_length; // = 20
+ u16 actor_system_priority;
+ struct mac_addr actor_system;
+ u16 actor_key;
+ u16 actor_port_priority;
+ u16 actor_port;
+ u8 actor_state;
+ u8 reserved_3_1[3]; // = 0
+ u8 tlv_type_partner_info; // = partner information
+ u8 partner_information_length; // = 20
+ u16 partner_system_priority;
+ struct mac_addr partner_system;
+ u16 partner_key;
+ u16 partner_port_priority;
+ u16 partner_port;
+ u8 partner_state;
+ u8 reserved_3_2[3]; // = 0
+ u8 tlv_type_collector_info; // = collector information
+ u8 collector_information_length; // = 16
+ u16 collector_max_delay;
+ u8 reserved_12[12];
+ u8 tlv_type_terminator; // = terminator
+ u8 terminator_length; // = 0
+ u8 reserved_50[50]; // = 0
+} lacpdu_t;
+
+typedef struct lacpdu_header {
+ struct ad_header ad_header;
+ struct lacpdu lacpdu;
+} lacpdu_header_t;
+
+// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
+typedef struct marker {
+ u8 subtype; // = 0x02 (marker PDU)
+ u8 version_number; // = 0x01
+ u8 tlv_type; // = 0x01 (marker information)
+ // = 0x02 (marker response information)
+ u8 marker_length; // = 0x16
+ u16 requester_port; // The number assigned to the port by the requester
+ struct mac_addr requester_system; // The requester's system id
+ u32 requester_transaction_id; // The transaction id allocated by the requester,
+ u16 pad; // = 0
+ u8 tlv_type_terminator; // = 0x00
+ u8 terminator_length; // = 0x00
+ u8 reserved_90[90]; // = 0
+} marker_t;
+
+typedef struct marker_header {
+ struct ad_header ad_header;
+ struct marker marker;
+} marker_header_t;
+
+#pragma pack()
+
+struct slave;
+struct bonding;
+struct ad_info;
+struct port;
+
+#ifdef __ia64__
+#pragma pack(8)
+#endif
+
+// aggregator structure(43.4.5 in the 802.3ad standard)
+typedef struct aggregator {
+ struct mac_addr aggregator_mac_address;
+ u16 aggregator_identifier;
+ u16 is_individual; // BOOLEAN
+ u16 actor_admin_aggregator_key;
+ u16 actor_oper_aggregator_key;
+ struct mac_addr partner_system;
+ u16 partner_system_priority;
+ u16 partner_oper_aggregator_key;
+ u16 receive_state; // BOOLEAN
+ u16 transmit_state; // BOOLEAN
+ struct port *lag_ports;
+ // ****** PRIVATE PARAMETERS ******
+ struct slave *slave; // pointer to the bond slave that this aggregator belongs to
+ u16 is_active; // BOOLEAN. Indicates if this aggregator is active
+ u16 num_of_ports;
+} aggregator_t;
+
+// port structure(43.4.6 in the 802.3ad standard)
+typedef struct port {
+ u16 actor_port_number;
+ u16 actor_port_priority;
+ struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification
+ u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification
+ u16 actor_port_aggregator_identifier;
+ u16 ntt; // BOOLEAN
+ u16 actor_admin_port_key;
+ u16 actor_oper_port_key;
+ u8 actor_admin_port_state;
+ u8 actor_oper_port_state;
+ struct mac_addr partner_admin_system;
+ struct mac_addr partner_oper_system;
+ u16 partner_admin_system_priority;
+ u16 partner_oper_system_priority;
+ u16 partner_admin_key;
+ u16 partner_oper_key;
+ u16 partner_admin_port_number;
+ u16 partner_oper_port_number;
+ u16 partner_admin_port_priority;
+ u16 partner_oper_port_priority;
+ u8 partner_admin_port_state;
+ u8 partner_oper_port_state;
+ u16 is_enabled; // BOOLEAN
+ // ****** PRIVATE PARAMETERS ******
+ u16 sm_vars; // all state machines variables for this port
+ rx_states_t sm_rx_state; // state machine rx state
+ u16 sm_rx_timer_counter; // state machine rx timer counter
+ periodic_states_t sm_periodic_state;// state machine periodic state
+ u16 sm_periodic_timer_counter; // state machine periodic timer counter
+ mux_states_t sm_mux_state; // state machine mux state
+ u16 sm_mux_timer_counter; // state machine mux timer counter
+ tx_states_t sm_tx_state; // state machine tx state
+ u16 sm_tx_timer_counter; // state machine tx timer counter(allways on - enter to transmit state 3 time per second)
+ struct slave *slave; // pointer to the bond slave that this port belongs to
+ struct aggregator *aggregator; // pointer to an aggregator that this port related to
+ struct port *next_port_in_aggregator; // Next port on the linked list of the parent aggregator
+ u32 transaction_id; // continuous number for identification of Marker PDU's;
+ struct lacpdu lacpdu; // the lacpdu that will be sent for this port
+} port_t;
+
+// system structure
+typedef struct ad_system {
+ u16 sys_priority;
+ struct mac_addr sys_mac_addr;
+} ad_system_t;
+
+#ifdef __ia64__
+#pragma pack()
+#endif
+
+// ================= AD Exported structures to the main bonding code ==================
+#define BOND_AD_INFO(bond) ((bond)->ad_info)
+#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
+
+struct ad_bond_info {
+ ad_system_t system; // 802.3ad system structure
+ u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ u32 agg_select_mode; // Mode of selection of active aggregator(bandwidth/count)
+ int lacp_fast; /* whether fast periodic tx should be
+ * requested
+ */
+ struct timer_list ad_timer;
+ struct packet_type ad_pkt_type;
+};
+
+struct ad_slave_info {
+ struct aggregator aggregator; // 802.3ad aggregator structure
+ struct port port; // 802.3ad port structure
+ spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt
+ u16 id;
+};
+
+// ================= AD Exported functions to the main bonding code ==================
+void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast);
+int bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_unbind_slave(struct slave *slave);
+void bond_3ad_state_machine_handler(struct bonding *bond);
+void bond_3ad_adapter_speed_changed(struct slave *slave);
+void bond_3ad_adapter_duplex_changed(struct slave *slave);
+void bond_3ad_handle_link_change(struct slave *slave, char link);
+int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
+int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
+int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype);
+#endif //__BOND_3AD_H__
+
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
new file mode 100644
index 000000000000..5ce606d9dc03
--- /dev/null
+++ b/drivers/net/bonding/bond_alb.c
@@ -0,0 +1,1696 @@
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ *
+ * Changes:
+ *
+ * 2003/06/25 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Fixed signed/unsigned calculation errors that caused load sharing
+ * to collapse to one slave under very heavy UDP Tx stress.
+ *
+ * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
+ * - Add support for setting bond's MAC address with special
+ * handling required for ALB/TLB.
+ *
+ * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Code cleanup and style changes
+ *
+ * 2003/12/30 - Amir Noam <amir.noam at intel dot com>
+ * - Fixed: Cannot remove and re-enslave the original active slave.
+ *
+ * 2004/01/14 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Add capability to tag self generated packets in ALB/TLB modes.
+ */
+
+//#define BONDING_DEBUG 1
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_bonding.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <net/ipx.h>
+#include <net/arp.h>
+#include <asm/byteorder.h>
+#include "bonding.h"
+#include "bond_alb.h"
+
+
+#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
+#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
+ * Used for division - never set
+ * to zero !!!
+ */
+#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
+ * learning packets to the switch
+ */
+
+#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
+ * Note that this value MUST NOT be smaller
+ * because the key hash table is BYTE wide !
+ */
+
+
+#define TLB_NULL_INDEX 0xffffffff
+#define MAX_LP_BURST 3
+
+/* rlb defs */
+#define RLB_HASH_TABLE_SIZE 256
+#define RLB_NULL_INDEX 0xffffffff
+#define RLB_UPDATE_DELAY 2*ALB_TIMER_TICKS_PER_SEC /* 2 seconds */
+#define RLB_ARP_BURST_SIZE 2
+#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
+ * rebalance interval (5 min).
+ */
+/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
+ * promiscuous after failover
+ */
+#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
+
+static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff};
+static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
+
+#pragma pack(1)
+struct learning_pkt {
+ u8 mac_dst[ETH_ALEN];
+ u8 mac_src[ETH_ALEN];
+ u16 type;
+ u8 padding[ETH_ZLEN - ETH_HLEN];
+};
+
+struct arp_pkt {
+ u16 hw_addr_space;
+ u16 prot_addr_space;
+ u8 hw_addr_len;
+ u8 prot_addr_len;
+ u16 op_code;
+ u8 mac_src[ETH_ALEN]; /* sender hardware address */
+ u32 ip_src; /* sender IP address */
+ u8 mac_dst[ETH_ALEN]; /* target hardware address */
+ u32 ip_dst; /* target IP address */
+};
+#pragma pack()
+
+/* Forward declaration */
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+
+static inline u8 _simple_hash(u8 *hash_start, int hash_size)
+{
+ int i;
+ u8 hash = 0;
+
+ for (i = 0; i < hash_size; i++) {
+ hash ^= hash_start[i];
+ }
+
+ return hash;
+}
+
+/*********************** tlb specific functions ***************************/
+
+static inline void _lock_tx_hashtbl(struct bonding *bond)
+{
+ spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
+}
+
+static inline void _unlock_tx_hashtbl(struct bonding *bond)
+{
+ spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
+}
+
+/* Caller must hold tx_hashtbl lock */
+static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
+{
+ if (save_load) {
+ entry->load_history = 1 + entry->tx_bytes /
+ BOND_TLB_REBALANCE_INTERVAL;
+ entry->tx_bytes = 0;
+ }
+
+ entry->tx_slave = NULL;
+ entry->next = TLB_NULL_INDEX;
+ entry->prev = TLB_NULL_INDEX;
+}
+
+static inline void tlb_init_slave(struct slave *slave)
+{
+ SLAVE_TLB_INFO(slave).load = 0;
+ SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
+}
+
+/* Caller must hold bond lock for read */
+static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load)
+{
+ struct tlb_client_info *tx_hash_table;
+ u32 index;
+
+ _lock_tx_hashtbl(bond);
+
+ /* clear slave from tx_hashtbl */
+ tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
+
+ index = SLAVE_TLB_INFO(slave).head;
+ while (index != TLB_NULL_INDEX) {
+ u32 next_index = tx_hash_table[index].next;
+ tlb_init_table_entry(&tx_hash_table[index], save_load);
+ index = next_index;
+ }
+
+ _unlock_tx_hashtbl(bond);
+
+ tlb_init_slave(slave);
+}
+
+/* Must be called before starting the monitor timer */
+static int tlb_initialize(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
+ int i;
+
+ spin_lock_init(&(bond_info->tx_hashtbl_lock));
+
+ _lock_tx_hashtbl(bond);
+
+ bond_info->tx_hashtbl = kmalloc(size, GFP_KERNEL);
+ if (!bond_info->tx_hashtbl) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: %s: Failed to allocate TLB hash table\n",
+ bond->dev->name);
+ _unlock_tx_hashtbl(bond);
+ return -1;
+ }
+
+ memset(bond_info->tx_hashtbl, 0, size);
+
+ for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
+ tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
+ }
+
+ _unlock_tx_hashtbl(bond);
+
+ return 0;
+}
+
+/* Must be called only after all slaves have been released */
+static void tlb_deinitialize(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+
+ _lock_tx_hashtbl(bond);
+
+ kfree(bond_info->tx_hashtbl);
+ bond_info->tx_hashtbl = NULL;
+
+ _unlock_tx_hashtbl(bond);
+}
+
+/* Caller must hold bond lock for read */
+static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
+{
+ struct slave *slave, *least_loaded;
+ s64 max_gap;
+ int i, found = 0;
+
+ /* Find the first enabled slave */
+ bond_for_each_slave(bond, slave, i) {
+ if (SLAVE_IS_OK(slave)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ return NULL;
+ }
+
+ least_loaded = slave;
+ max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */
+ (s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
+
+ /* Find the slave with the largest gap */
+ bond_for_each_slave_from(bond, slave, i, least_loaded) {
+ if (SLAVE_IS_OK(slave)) {
+ s64 gap = (s64)(slave->speed << 20) -
+ (s64)(SLAVE_TLB_INFO(slave).load << 3);
+ if (max_gap < gap) {
+ least_loaded = slave;
+ max_gap = gap;
+ }
+ }
+ }
+
+ return least_loaded;
+}
+
+/* Caller must hold bond lock for read */
+static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct tlb_client_info *hash_table;
+ struct slave *assigned_slave;
+
+ _lock_tx_hashtbl(bond);
+
+ hash_table = bond_info->tx_hashtbl;
+ assigned_slave = hash_table[hash_index].tx_slave;
+ if (!assigned_slave) {
+ assigned_slave = tlb_get_least_loaded_slave(bond);
+
+ if (assigned_slave) {
+ struct tlb_slave_info *slave_info =
+ &(SLAVE_TLB_INFO(assigned_slave));
+ u32 next_index = slave_info->head;
+
+ hash_table[hash_index].tx_slave = assigned_slave;
+ hash_table[hash_index].next = next_index;
+ hash_table[hash_index].prev = TLB_NULL_INDEX;
+
+ if (next_index != TLB_NULL_INDEX) {
+ hash_table[next_index].prev = hash_index;
+ }
+
+ slave_info->head = hash_index;
+ slave_info->load +=
+ hash_table[hash_index].load_history;
+ }
+ }
+
+ if (assigned_slave) {
+ hash_table[hash_index].tx_bytes += skb_len;
+ }
+
+ _unlock_tx_hashtbl(bond);
+
+ return assigned_slave;
+}
+
+/*********************** rlb specific functions ***************************/
+static inline void _lock_rx_hashtbl(struct bonding *bond)
+{
+ spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+}
+
+static inline void _unlock_rx_hashtbl(struct bonding *bond)
+{
+ spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+}
+
+/* when an ARP REPLY is received from a client update its info
+ * in the rx_hashtbl
+ */
+static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *client_info;
+ u32 hash_index;
+
+ _lock_rx_hashtbl(bond);
+
+ hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+
+ if ((client_info->assigned) &&
+ (client_info->ip_src == arp->ip_dst) &&
+ (client_info->ip_dst == arp->ip_src)) {
+ /* update the clients MAC address */
+ memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
+ client_info->ntt = 1;
+ bond_info->rx_ntt = 1;
+ }
+
+ _unlock_rx_hashtbl(bond);
+}
+
+static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct arp_pkt *arp = (struct arp_pkt *)skb->data;
+ int res = NET_RX_DROP;
+
+ if (!(bond_dev->flags & IFF_MASTER)) {
+ goto out;
+ }
+
+ if (!arp) {
+ dprintk("Packet has no ARP data\n");
+ goto out;
+ }
+
+ if (skb->len < sizeof(struct arp_pkt)) {
+ dprintk("Packet is too small to be an ARP\n");
+ goto out;
+ }
+
+ if (arp->op_code == htons(ARPOP_REPLY)) {
+ /* update rx hash table for this ARP */
+ rlb_update_entry_from_arp(bond, arp);
+ dprintk("Server received an ARP Reply from client\n");
+ }
+
+ res = NET_RX_SUCCESS;
+
+out:
+ dev_kfree_skb(skb);
+
+ return res;
+}
+
+/* Caller must hold bond lock for read */
+static struct slave *rlb_next_rx_slave(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct slave *rx_slave, *slave, *start_at;
+ int i = 0;
+
+ if (bond_info->next_rx_slave) {
+ start_at = bond_info->next_rx_slave;
+ } else {
+ start_at = bond->first_slave;
+ }
+
+ rx_slave = NULL;
+
+ bond_for_each_slave_from(bond, slave, i, start_at) {
+ if (SLAVE_IS_OK(slave)) {
+ if (!rx_slave) {
+ rx_slave = slave;
+ } else if (slave->speed > rx_slave->speed) {
+ rx_slave = slave;
+ }
+ }
+ }
+
+ if (rx_slave) {
+ bond_info->next_rx_slave = rx_slave->next;
+ }
+
+ return rx_slave;
+}
+
+/* teach the switch the mac of a disabled slave
+ * on the primary for fault tolerance
+ *
+ * Caller must hold bond->curr_slave_lock for write or bond lock for write
+ */
+static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
+{
+ if (!bond->curr_active_slave) {
+ return;
+ }
+
+ if (!bond->alb_info.primary_is_promisc) {
+ bond->alb_info.primary_is_promisc = 1;
+ dev_set_promiscuity(bond->curr_active_slave->dev, 1);
+ }
+
+ bond->alb_info.rlb_promisc_timeout_counter = 0;
+
+ alb_send_learning_packets(bond->curr_active_slave, addr);
+}
+
+/* slave being removed should not be active at this point
+ *
+ * Caller must hold bond lock for read
+ */
+static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *rx_hash_table;
+ u32 index, next_index;
+
+ /* clear slave from rx_hashtbl */
+ _lock_rx_hashtbl(bond);
+
+ rx_hash_table = bond_info->rx_hashtbl;
+ index = bond_info->rx_hashtbl_head;
+ for (; index != RLB_NULL_INDEX; index = next_index) {
+ next_index = rx_hash_table[index].next;
+ if (rx_hash_table[index].slave == slave) {
+ struct slave *assigned_slave = rlb_next_rx_slave(bond);
+
+ if (assigned_slave) {
+ rx_hash_table[index].slave = assigned_slave;
+ if (memcmp(rx_hash_table[index].mac_dst,
+ mac_bcast, ETH_ALEN)) {
+ bond_info->rx_hashtbl[index].ntt = 1;
+ bond_info->rx_ntt = 1;
+ /* A slave has been removed from the
+ * table because it is either disabled
+ * or being released. We must retry the
+ * update to avoid clients from not
+ * being updated & disconnecting when
+ * there is stress
+ */
+ bond_info->rlb_update_retry_counter =
+ RLB_UPDATE_RETRY;
+ }
+ } else { /* there is no active slave */
+ rx_hash_table[index].slave = NULL;
+ }
+ }
+ }
+
+ _unlock_rx_hashtbl(bond);
+
+ write_lock(&bond->curr_slave_lock);
+
+ if (slave != bond->curr_active_slave) {
+ rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
+ }
+
+ write_unlock(&bond->curr_slave_lock);
+}
+
+static void rlb_update_client(struct rlb_client_info *client_info)
+{
+ int i;
+
+ if (!client_info->slave) {
+ return;
+ }
+
+ for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+ client_info->ip_dst,
+ client_info->slave->dev,
+ client_info->ip_src,
+ client_info->mac_dst,
+ client_info->slave->dev->dev_addr,
+ client_info->mac_dst);
+ if (!skb) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: failed to create an ARP packet\n");
+ continue;
+ }
+
+ skb->dev = client_info->slave->dev;
+
+ if (client_info->tag) {
+ skb = vlan_put_tag(skb, client_info->vlan_id);
+ if (!skb) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: failed to insert VLAN tag\n");
+ continue;
+ }
+ }
+
+ arp_xmit(skb);
+ }
+}
+
+/* sends ARP REPLIES that update the clients that need updating */
+static void rlb_update_rx_clients(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *client_info;
+ u32 hash_index;
+
+ _lock_rx_hashtbl(bond);
+
+ hash_index = bond_info->rx_hashtbl_head;
+ for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+ if (client_info->ntt) {
+ rlb_update_client(client_info);
+ if (bond_info->rlb_update_retry_counter == 0) {
+ client_info->ntt = 0;
+ }
+ }
+ }
+
+ /* do not update the entries again untill this counter is zero so that
+ * not to confuse the clients.
+ */
+ bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
+
+ _unlock_rx_hashtbl(bond);
+}
+
+/* The slave was assigned a new mac address - update the clients */
+static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *client_info;
+ int ntt = 0;
+ u32 hash_index;
+
+ _lock_rx_hashtbl(bond);
+
+ hash_index = bond_info->rx_hashtbl_head;
+ for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+
+ if ((client_info->slave == slave) &&
+ memcmp(client_info->mac_dst, mac_bcast, ETH_ALEN)) {
+ client_info->ntt = 1;
+ ntt = 1;
+ }
+ }
+
+ // update the team's flag only after the whole iteration
+ if (ntt) {
+ bond_info->rx_ntt = 1;
+ //fasten the change
+ bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
+ }
+
+ _unlock_rx_hashtbl(bond);
+}
+
+/* mark all clients using src_ip to be updated */
+static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *client_info;
+ u32 hash_index;
+
+ _lock_rx_hashtbl(bond);
+
+ hash_index = bond_info->rx_hashtbl_head;
+ for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+
+ if (!client_info->slave) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: found a client with no channel in "
+ "the client's hash table\n");
+ continue;
+ }
+ /*update all clients using this src_ip, that are not assigned
+ * to the team's address (curr_active_slave) and have a known
+ * unicast mac address.
+ */
+ if ((client_info->ip_src == src_ip) &&
+ memcmp(client_info->slave->dev->dev_addr,
+ bond->dev->dev_addr, ETH_ALEN) &&
+ memcmp(client_info->mac_dst, mac_bcast, ETH_ALEN)) {
+ client_info->ntt = 1;
+ bond_info->rx_ntt = 1;
+ }
+ }
+
+ _unlock_rx_hashtbl(bond);
+}
+
+/* Caller must hold both bond and ptr locks for read */
+static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw;
+ struct slave *assigned_slave;
+ struct rlb_client_info *client_info;
+ u32 hash_index = 0;
+
+ _lock_rx_hashtbl(bond);
+
+ hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src));
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+
+ if (client_info->assigned) {
+ if ((client_info->ip_src == arp->ip_src) &&
+ (client_info->ip_dst == arp->ip_dst)) {
+ /* the entry is already assigned to this client */
+ if (memcmp(arp->mac_dst, mac_bcast, ETH_ALEN)) {
+ /* update mac address from arp */
+ memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+ }
+
+ assigned_slave = client_info->slave;
+ if (assigned_slave) {
+ _unlock_rx_hashtbl(bond);
+ return assigned_slave;
+ }
+ } else {
+ /* the entry is already assigned to some other client,
+ * move the old client to primary (curr_active_slave) so
+ * that the new client can be assigned to this entry.
+ */
+ if (bond->curr_active_slave &&
+ client_info->slave != bond->curr_active_slave) {
+ client_info->slave = bond->curr_active_slave;
+ rlb_update_client(client_info);
+ }
+ }
+ }
+ /* assign a new slave */
+ assigned_slave = rlb_next_rx_slave(bond);
+
+ if (assigned_slave) {
+ client_info->ip_src = arp->ip_src;
+ client_info->ip_dst = arp->ip_dst;
+ /* arp->mac_dst is broadcast for arp reqeusts.
+ * will be updated with clients actual unicast mac address
+ * upon receiving an arp reply.
+ */
+ memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+ client_info->slave = assigned_slave;
+
+ if (memcmp(client_info->mac_dst, mac_bcast, ETH_ALEN)) {
+ client_info->ntt = 1;
+ bond->alb_info.rx_ntt = 1;
+ } else {
+ client_info->ntt = 0;
+ }
+
+ if (!list_empty(&bond->vlan_list)) {
+ unsigned short vlan_id;
+ int res = vlan_get_tag(skb, &vlan_id);
+ if (!res) {
+ client_info->tag = 1;
+ client_info->vlan_id = vlan_id;
+ }
+ }
+
+ if (!client_info->assigned) {
+ u32 prev_tbl_head = bond_info->rx_hashtbl_head;
+ bond_info->rx_hashtbl_head = hash_index;
+ client_info->next = prev_tbl_head;
+ if (prev_tbl_head != RLB_NULL_INDEX) {
+ bond_info->rx_hashtbl[prev_tbl_head].prev =
+ hash_index;
+ }
+ client_info->assigned = 1;
+ }
+ }
+
+ _unlock_rx_hashtbl(bond);
+
+ return assigned_slave;
+}
+
+/* chooses (and returns) transmit channel for arp reply
+ * does not choose channel for other arp types since they are
+ * sent on the curr_active_slave
+ */
+static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+{
+ struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw;
+ struct slave *tx_slave = NULL;
+
+ if (arp->op_code == __constant_htons(ARPOP_REPLY)) {
+ /* the arp must be sent on the selected
+ * rx channel
+ */
+ tx_slave = rlb_choose_channel(skb, bond);
+ if (tx_slave) {
+ memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
+ }
+ dprintk("Server sent ARP Reply packet\n");
+ } else if (arp->op_code == __constant_htons(ARPOP_REQUEST)) {
+ /* Create an entry in the rx_hashtbl for this client as a
+ * place holder.
+ * When the arp reply is received the entry will be updated
+ * with the correct unicast address of the client.
+ */
+ rlb_choose_channel(skb, bond);
+
+ /* The ARP relpy packets must be delayed so that
+ * they can cancel out the influence of the ARP request.
+ */
+ bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
+
+ /* arp requests are broadcast and are sent on the primary
+ * the arp request will collapse all clients on the subnet to
+ * the primary slave. We must register these clients to be
+ * updated with their assigned mac.
+ */
+ rlb_req_update_subnet_clients(bond, arp->ip_src);
+ dprintk("Server sent ARP Request packet\n");
+ }
+
+ return tx_slave;
+}
+
+/* Caller must hold bond lock for read */
+static void rlb_rebalance(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct slave *assigned_slave;
+ struct rlb_client_info *client_info;
+ int ntt;
+ u32 hash_index;
+
+ _lock_rx_hashtbl(bond);
+
+ ntt = 0;
+ hash_index = bond_info->rx_hashtbl_head;
+ for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+ assigned_slave = rlb_next_rx_slave(bond);
+ if (assigned_slave && (client_info->slave != assigned_slave)) {
+ client_info->slave = assigned_slave;
+ client_info->ntt = 1;
+ ntt = 1;
+ }
+ }
+
+ /* update the team's flag only after the whole iteration */
+ if (ntt) {
+ bond_info->rx_ntt = 1;
+ }
+ _unlock_rx_hashtbl(bond);
+}
+
+/* Caller must hold rx_hashtbl lock */
+static void rlb_init_table_entry(struct rlb_client_info *entry)
+{
+ memset(entry, 0, sizeof(struct rlb_client_info));
+ entry->next = RLB_NULL_INDEX;
+ entry->prev = RLB_NULL_INDEX;
+}
+
+static int rlb_initialize(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type);
+ int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
+ int i;
+
+ spin_lock_init(&(bond_info->rx_hashtbl_lock));
+
+ _lock_rx_hashtbl(bond);
+
+ bond_info->rx_hashtbl = kmalloc(size, GFP_KERNEL);
+ if (!bond_info->rx_hashtbl) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: %s: Failed to allocate RLB hash table\n",
+ bond->dev->name);
+ _unlock_rx_hashtbl(bond);
+ return -1;
+ }
+
+ bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
+
+ for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
+ rlb_init_table_entry(bond_info->rx_hashtbl + i);
+ }
+
+ _unlock_rx_hashtbl(bond);
+
+ /*initialize packet type*/
+ pk_type->type = __constant_htons(ETH_P_ARP);
+ pk_type->dev = bond->dev;
+ pk_type->func = rlb_arp_recv;
+
+ /* register to receive ARPs */
+ dev_add_pack(pk_type);
+
+ return 0;
+}
+
+static void rlb_deinitialize(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+
+ dev_remove_pack(&(bond_info->rlb_pkt_type));
+
+ _lock_rx_hashtbl(bond);
+
+ kfree(bond_info->rx_hashtbl);
+ bond_info->rx_hashtbl = NULL;
+ bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
+
+ _unlock_rx_hashtbl(bond);
+}
+
+static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ u32 curr_index;
+
+ _lock_rx_hashtbl(bond);
+
+ curr_index = bond_info->rx_hashtbl_head;
+ while (curr_index != RLB_NULL_INDEX) {
+ struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
+ u32 next_index = bond_info->rx_hashtbl[curr_index].next;
+ u32 prev_index = bond_info->rx_hashtbl[curr_index].prev;
+
+ if (curr->tag && (curr->vlan_id == vlan_id)) {
+ if (curr_index == bond_info->rx_hashtbl_head) {
+ bond_info->rx_hashtbl_head = next_index;
+ }
+ if (prev_index != RLB_NULL_INDEX) {
+ bond_info->rx_hashtbl[prev_index].next = next_index;
+ }
+ if (next_index != RLB_NULL_INDEX) {
+ bond_info->rx_hashtbl[next_index].prev = prev_index;
+ }
+
+ rlb_init_table_entry(curr);
+ }
+
+ curr_index = next_index;
+ }
+
+ _unlock_rx_hashtbl(bond);
+}
+
+/*********************** tlb/rlb shared functions *********************/
+
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+{
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+ struct learning_pkt pkt;
+ int size = sizeof(struct learning_pkt);
+ int i;
+
+ memset(&pkt, 0, size);
+ memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
+ memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
+ pkt.type = __constant_htons(ETH_P_LOOP);
+
+ for (i = 0; i < MAX_LP_BURST; i++) {
+ struct sk_buff *skb;
+ char *data;
+
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ return;
+ }
+
+ data = skb_put(skb, size);
+ memcpy(data, &pkt, size);
+
+ skb->mac.raw = data;
+ skb->nh.raw = data + ETH_HLEN;
+ skb->protocol = pkt.type;
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = slave->dev;
+
+ if (!list_empty(&bond->vlan_list)) {
+ struct vlan_entry *vlan;
+
+ vlan = bond_next_vlan(bond,
+ bond->alb_info.current_alb_vlan);
+
+ bond->alb_info.current_alb_vlan = vlan;
+ if (!vlan) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ skb = vlan_put_tag(skb, vlan->vlan_id);
+ if (!skb) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: failed to insert VLAN tag\n");
+ continue;
+ }
+ }
+
+ dev_queue_xmit(skb);
+ }
+}
+
+/* hw is a boolean parameter that determines whether we should try and
+ * set the hw address of the device as well as the hw address of the
+ * net_device
+ */
+static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
+{
+ struct net_device *dev = slave->dev;
+ struct sockaddr s_addr;
+
+ if (!hw) {
+ memcpy(dev->dev_addr, addr, dev->addr_len);
+ return 0;
+ }
+
+ /* for rlb each slave must have a unique hw mac addresses so that */
+ /* each slave will receive packets destined to a different mac */
+ memcpy(s_addr.sa_data, addr, dev->addr_len);
+ s_addr.sa_family = dev->type;
+ if (dev_set_mac_address(dev, &s_addr)) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: dev_set_mac_address of dev %s failed! ALB "
+ "mode requires that the base driver support setting "
+ "the hw address also when the network device's "
+ "interface is open\n",
+ dev->name);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/* Caller must hold bond lock for write or curr_slave_lock for write*/
+static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2)
+{
+ struct slave *disabled_slave = NULL;
+ u8 tmp_mac_addr[ETH_ALEN];
+ int slaves_state_differ;
+
+ slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
+
+ memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+ alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
+ alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
+
+ /* fasten the change in the switch */
+ if (SLAVE_IS_OK(slave1)) {
+ alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+ if (bond->alb_info.rlb_enabled) {
+ /* inform the clients that the mac address
+ * has changed
+ */
+ rlb_req_update_slave_clients(bond, slave1);
+ }
+ } else {
+ disabled_slave = slave1;
+ }
+
+ if (SLAVE_IS_OK(slave2)) {
+ alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+ if (bond->alb_info.rlb_enabled) {
+ /* inform the clients that the mac address
+ * has changed
+ */
+ rlb_req_update_slave_clients(bond, slave2);
+ }
+ } else {
+ disabled_slave = slave2;
+ }
+
+ if (bond->alb_info.rlb_enabled && slaves_state_differ) {
+ /* A disabled slave was assigned an active mac addr */
+ rlb_teach_disabled_mac_on_primary(bond,
+ disabled_slave->dev->dev_addr);
+ }
+}
+
+/**
+ * alb_change_hw_addr_on_detach
+ * @bond: bonding we're working on
+ * @slave: the slave that was just detached
+ *
+ * We assume that @slave was already detached from the slave list.
+ *
+ * If @slave's permanent hw address is different both from its current
+ * address and from @bond's address, then somewhere in the bond there's
+ * a slave that has @slave's permanet address as its current address.
+ * We'll make sure that that slave no longer uses @slave's permanent address.
+ *
+ * Caller must hold bond lock
+ */
+static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
+{
+ int perm_curr_diff;
+ int perm_bond_diff;
+
+ perm_curr_diff = memcmp(slave->perm_hwaddr,
+ slave->dev->dev_addr,
+ ETH_ALEN);
+ perm_bond_diff = memcmp(slave->perm_hwaddr,
+ bond->dev->dev_addr,
+ ETH_ALEN);
+
+ if (perm_curr_diff && perm_bond_diff) {
+ struct slave *tmp_slave;
+ int i, found = 0;
+
+ bond_for_each_slave(bond, tmp_slave, i) {
+ if (!memcmp(slave->perm_hwaddr,
+ tmp_slave->dev->dev_addr,
+ ETH_ALEN)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ alb_swap_mac_addr(bond, slave, tmp_slave);
+ }
+ }
+}
+
+/**
+ * alb_handle_addr_collision_on_attach
+ * @bond: bonding we're working on
+ * @slave: the slave that was just attached
+ *
+ * checks uniqueness of slave's mac address and handles the case the
+ * new slave uses the bonds mac address.
+ *
+ * If the permanent hw address of @slave is @bond's hw address, we need to
+ * find a different hw address to give @slave, that isn't in use by any other
+ * slave in the bond. This address must be, of course, one of the premanent
+ * addresses of the other slaves.
+ *
+ * We go over the slave list, and for each slave there we compare its
+ * permanent hw address with the current address of all the other slaves.
+ * If no match was found, then we've found a slave with a permanent address
+ * that isn't used by any other slave in the bond, so we can assign it to
+ * @slave.
+ *
+ * assumption: this function is called before @slave is attached to the
+ * bond slave list.
+ *
+ * caller must hold the bond lock for write since the mac addresses are compared
+ * and may be swapped.
+ */
+static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
+{
+ struct slave *tmp_slave1, *tmp_slave2, *free_mac_slave;
+ struct slave *has_bond_addr = bond->curr_active_slave;
+ int i, j, found = 0;
+
+ if (bond->slave_cnt == 0) {
+ /* this is the first slave */
+ return 0;
+ }
+
+ /* if slave's mac address differs from bond's mac address
+ * check uniqueness of slave's mac address against the other
+ * slaves in the bond.
+ */
+ if (memcmp(slave->perm_hwaddr, bond->dev->dev_addr, ETH_ALEN)) {
+ bond_for_each_slave(bond, tmp_slave1, i) {
+ if (!memcmp(tmp_slave1->dev->dev_addr, slave->dev->dev_addr,
+ ETH_ALEN)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ /* a slave was found that is using the mac address
+ * of the new slave
+ */
+ printk(KERN_ERR DRV_NAME
+ ": Error: the hw address of slave %s is not "
+ "unique - cannot enslave it!",
+ slave->dev->name);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* The slave's address is equal to the address of the bond.
+ * Search for a spare address in the bond for this slave.
+ */
+ free_mac_slave = NULL;
+
+ bond_for_each_slave(bond, tmp_slave1, i) {
+ found = 0;
+ bond_for_each_slave(bond, tmp_slave2, j) {
+ if (!memcmp(tmp_slave1->perm_hwaddr,
+ tmp_slave2->dev->dev_addr,
+ ETH_ALEN)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* no slave has tmp_slave1's perm addr
+ * as its curr addr
+ */
+ free_mac_slave = tmp_slave1;
+ break;
+ }
+
+ if (!has_bond_addr) {
+ if (!memcmp(tmp_slave1->dev->dev_addr,
+ bond->dev->dev_addr,
+ ETH_ALEN)) {
+
+ has_bond_addr = tmp_slave1;
+ }
+ }
+ }
+
+ if (free_mac_slave) {
+ alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
+ bond->alb_info.rlb_enabled);
+
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: the hw address of slave %s is in use by "
+ "the bond; giving it the hw address of %s\n",
+ slave->dev->name, free_mac_slave->dev->name);
+
+ } else if (has_bond_addr) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: the hw address of slave %s is in use by the "
+ "bond; couldn't find a slave with a free hw address to "
+ "give it (this should not have happened)\n",
+ slave->dev->name);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * alb_set_mac_address
+ * @bond:
+ * @addr:
+ *
+ * In TLB mode all slaves are configured to the bond's hw address, but set
+ * their dev_addr field to different addresses (based on their permanent hw
+ * addresses).
+ *
+ * For each slave, this function sets the interface to the new address and then
+ * changes its dev_addr field to its previous value.
+ *
+ * Unwinding assumes bond's mac address has not yet changed.
+ */
+static int alb_set_mac_address(struct bonding *bond, void *addr)
+{
+ struct sockaddr sa;
+ struct slave *slave, *stop_at;
+ char tmp_addr[ETH_ALEN];
+ int res;
+ int i;
+
+ if (bond->alb_info.rlb_enabled) {
+ return 0;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->dev->set_mac_address == NULL) {
+ res = -EOPNOTSUPP;
+ goto unwind;
+ }
+
+ /* save net_device's current hw address */
+ memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
+
+ res = dev_set_mac_address(slave->dev, addr);
+
+ /* restore net_device's hw address */
+ memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+
+ if (res) {
+ goto unwind;
+ }
+ }
+
+ return 0;
+
+unwind:
+ memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
+ sa.sa_family = bond->dev->type;
+
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
+ dev_set_mac_address(slave->dev, &sa);
+ memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ }
+
+ return res;
+}
+
+/************************ exported alb funcions ************************/
+
+int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
+{
+ int res;
+
+ res = tlb_initialize(bond);
+ if (res) {
+ return res;
+ }
+
+ if (rlb_enabled) {
+ bond->alb_info.rlb_enabled = 1;
+ /* initialize rlb */
+ res = rlb_initialize(bond);
+ if (res) {
+ tlb_deinitialize(bond);
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+void bond_alb_deinitialize(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+
+ tlb_deinitialize(bond);
+
+ if (bond_info->rlb_enabled) {
+ rlb_deinitialize(bond);
+ }
+}
+
+int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct ethhdr *eth_data;
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct slave *tx_slave = NULL;
+ static u32 ip_bcast = 0xffffffff;
+ int hash_size = 0;
+ int do_tx_balance = 1;
+ u32 hash_index = 0;
+ u8 *hash_start = NULL;
+ int res = 1;
+
+ skb->mac.raw = (unsigned char *)skb->data;
+ eth_data = eth_hdr(skb);
+
+ /* make sure that the curr_active_slave and the slaves list do
+ * not change during tx
+ */
+ read_lock(&bond->lock);
+ read_lock(&bond->curr_slave_lock);
+
+ if (!BOND_IS_OK(bond)) {
+ goto out;
+ }
+
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_IP:
+ if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) ||
+ (skb->nh.iph->daddr == ip_bcast) ||
+ (skb->nh.iph->protocol == IPPROTO_IGMP)) {
+ do_tx_balance = 0;
+ break;
+ }
+ hash_start = (char*)&(skb->nh.iph->daddr);
+ hash_size = sizeof(skb->nh.iph->daddr);
+ break;
+ case ETH_P_IPV6:
+ if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
+ do_tx_balance = 0;
+ break;
+ }
+
+ hash_start = (char*)&(skb->nh.ipv6h->daddr);
+ hash_size = sizeof(skb->nh.ipv6h->daddr);
+ break;
+ case ETH_P_IPX:
+ if (ipx_hdr(skb)->ipx_checksum !=
+ __constant_htons(IPX_NO_CHECKSUM)) {
+ /* something is wrong with this packet */
+ do_tx_balance = 0;
+ break;
+ }
+
+ if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
+ /* The only protocol worth balancing in
+ * this family since it has an "ARP" like
+ * mechanism
+ */
+ do_tx_balance = 0;
+ break;
+ }
+
+ hash_start = (char*)eth_data->h_dest;
+ hash_size = ETH_ALEN;
+ break;
+ case ETH_P_ARP:
+ do_tx_balance = 0;
+ if (bond_info->rlb_enabled) {
+ tx_slave = rlb_arp_xmit(skb, bond);
+ }
+ break;
+ default:
+ do_tx_balance = 0;
+ break;
+ }
+
+ if (do_tx_balance) {
+ hash_index = _simple_hash(hash_start, hash_size);
+ tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
+ }
+
+ if (!tx_slave) {
+ /* unbalanced or unassigned, send through primary */
+ tx_slave = bond->curr_active_slave;
+ bond_info->unbalanced_load += skb->len;
+ }
+
+ if (tx_slave && SLAVE_IS_OK(tx_slave)) {
+ if (tx_slave != bond->curr_active_slave) {
+ memcpy(eth_data->h_source,
+ tx_slave->dev->dev_addr,
+ ETH_ALEN);
+ }
+
+ res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ } else {
+ if (tx_slave) {
+ tlb_clear_slave(bond, tx_slave, 0);
+ }
+ }
+
+out:
+ if (res) {
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+ }
+ read_unlock(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ return 0;
+}
+
+void bond_alb_monitor(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct slave *slave;
+ int i;
+
+ read_lock(&bond->lock);
+
+ if (bond->kill_timers) {
+ goto out;
+ }
+
+ if (bond->slave_cnt == 0) {
+ bond_info->tx_rebalance_counter = 0;
+ bond_info->lp_counter = 0;
+ goto re_arm;
+ }
+
+ bond_info->tx_rebalance_counter++;
+ bond_info->lp_counter++;
+
+ /* send learning packets */
+ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) {
+ /* change of curr_active_slave involves swapping of mac addresses.
+ * in order to avoid this swapping from happening while
+ * sending the learning packets, the curr_slave_lock must be held for
+ * read.
+ */
+ read_lock(&bond->curr_slave_lock);
+
+ bond_for_each_slave(bond, slave, i) {
+ alb_send_learning_packets(slave,slave->dev->dev_addr);
+ }
+
+ read_unlock(&bond->curr_slave_lock);
+
+ bond_info->lp_counter = 0;
+ }
+
+ /* rebalance tx traffic */
+ if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
+
+ read_lock(&bond->curr_slave_lock);
+
+ bond_for_each_slave(bond, slave, i) {
+ tlb_clear_slave(bond, slave, 1);
+ if (slave == bond->curr_active_slave) {
+ SLAVE_TLB_INFO(slave).load =
+ bond_info->unbalanced_load /
+ BOND_TLB_REBALANCE_INTERVAL;
+ bond_info->unbalanced_load = 0;
+ }
+ }
+
+ read_unlock(&bond->curr_slave_lock);
+
+ bond_info->tx_rebalance_counter = 0;
+ }
+
+ /* handle rlb stuff */
+ if (bond_info->rlb_enabled) {
+ /* the following code changes the promiscuity of the
+ * the curr_active_slave. It needs to be locked with a
+ * write lock to protect from other code that also
+ * sets the promiscuity.
+ */
+ write_lock(&bond->curr_slave_lock);
+
+ if (bond_info->primary_is_promisc &&
+ (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
+
+ bond_info->rlb_promisc_timeout_counter = 0;
+
+ /* If the primary was set to promiscuous mode
+ * because a slave was disabled then
+ * it can now leave promiscuous mode.
+ */
+ dev_set_promiscuity(bond->curr_active_slave->dev, -1);
+ bond_info->primary_is_promisc = 0;
+ }
+
+ write_unlock(&bond->curr_slave_lock);
+
+ if (bond_info->rlb_rebalance) {
+ bond_info->rlb_rebalance = 0;
+ rlb_rebalance(bond);
+ }
+
+ /* check if clients need updating */
+ if (bond_info->rx_ntt) {
+ if (bond_info->rlb_update_delay_counter) {
+ --bond_info->rlb_update_delay_counter;
+ } else {
+ rlb_update_rx_clients(bond);
+ if (bond_info->rlb_update_retry_counter) {
+ --bond_info->rlb_update_retry_counter;
+ } else {
+ bond_info->rx_ntt = 0;
+ }
+ }
+ }
+ }
+
+re_arm:
+ mod_timer(&(bond_info->alb_timer), jiffies + alb_delta_in_ticks);
+out:
+ read_unlock(&bond->lock);
+}
+
+/* assumption: called before the slave is attached to the bond
+ * and not locked by the bond lock
+ */
+int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
+{
+ int res;
+
+ res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
+ bond->alb_info.rlb_enabled);
+ if (res) {
+ return res;
+ }
+
+ /* caller must hold the bond lock for write since the mac addresses
+ * are compared and may be swapped.
+ */
+ write_lock_bh(&bond->lock);
+
+ res = alb_handle_addr_collision_on_attach(bond, slave);
+
+ write_unlock_bh(&bond->lock);
+
+ if (res) {
+ return res;
+ }
+
+ tlb_init_slave(slave);
+
+ /* order a rebalance ASAP */
+ bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+
+ if (bond->alb_info.rlb_enabled) {
+ bond->alb_info.rlb_rebalance = 1;
+ }
+
+ return 0;
+}
+
+/* Caller must hold bond lock for write */
+void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
+{
+ if (bond->slave_cnt > 1) {
+ alb_change_hw_addr_on_detach(bond, slave);
+ }
+
+ tlb_clear_slave(bond, slave, 0);
+
+ if (bond->alb_info.rlb_enabled) {
+ bond->alb_info.next_rx_slave = NULL;
+ rlb_clear_slave(bond, slave);
+ }
+}
+
+/* Caller must hold bond lock for read */
+void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+
+ if (link == BOND_LINK_DOWN) {
+ tlb_clear_slave(bond, slave, 0);
+ if (bond->alb_info.rlb_enabled) {
+ rlb_clear_slave(bond, slave);
+ }
+ } else if (link == BOND_LINK_UP) {
+ /* order a rebalance ASAP */
+ bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+ if (bond->alb_info.rlb_enabled) {
+ bond->alb_info.rlb_rebalance = 1;
+ /* If the updelay module parameter is smaller than the
+ * forwarding delay of the switch the rebalance will
+ * not work because the rebalance arp replies will
+ * not be forwarded to the clients..
+ */
+ }
+ }
+}
+
+/**
+ * bond_alb_handle_active_change - assign new curr_active_slave
+ * @bond: our bonding struct
+ * @new_slave: new slave to assign
+ *
+ * Set the bond->curr_active_slave to @new_slave and handle
+ * mac address swapping and promiscuity changes as needed.
+ *
+ * Caller must hold bond curr_slave_lock for write (or bond lock for write)
+ */
+void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
+{
+ struct slave *swap_slave;
+ int i;
+
+ if (bond->curr_active_slave == new_slave) {
+ return;
+ }
+
+ if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
+ dev_set_promiscuity(bond->curr_active_slave->dev, -1);
+ bond->alb_info.primary_is_promisc = 0;
+ bond->alb_info.rlb_promisc_timeout_counter = 0;
+ }
+
+ swap_slave = bond->curr_active_slave;
+ bond->curr_active_slave = new_slave;
+
+ if (!new_slave || (bond->slave_cnt == 0)) {
+ return;
+ }
+
+ /* set the new curr_active_slave to the bonds mac address
+ * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
+ */
+ if (!swap_slave) {
+ struct slave *tmp_slave;
+ /* find slave that is holding the bond's mac address */
+ bond_for_each_slave(bond, tmp_slave, i) {
+ if (!memcmp(tmp_slave->dev->dev_addr,
+ bond->dev->dev_addr, ETH_ALEN)) {
+ swap_slave = tmp_slave;
+ break;
+ }
+ }
+ }
+
+ /* curr_active_slave must be set before calling alb_swap_mac_addr */
+ if (swap_slave) {
+ /* swap mac address */
+ alb_swap_mac_addr(bond, swap_slave, new_slave);
+ } else {
+ /* set the new_slave to the bond mac address */
+ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
+ bond->alb_info.rlb_enabled);
+ /* fasten bond mac on new current slave */
+ alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+ }
+}
+
+int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct sockaddr *sa = addr;
+ struct slave *slave, *swap_slave;
+ int res;
+ int i;
+
+ if (!is_valid_ether_addr(sa->sa_data)) {
+ return -EADDRNOTAVAIL;
+ }
+
+ res = alb_set_mac_address(bond, addr);
+ if (res) {
+ return res;
+ }
+
+ memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
+
+ /* If there is no curr_active_slave there is nothing else to do.
+ * Otherwise we'll need to pass the new address to it and handle
+ * duplications.
+ */
+ if (!bond->curr_active_slave) {
+ return 0;
+ }
+
+ swap_slave = NULL;
+
+ bond_for_each_slave(bond, slave, i) {
+ if (!memcmp(slave->dev->dev_addr, bond_dev->dev_addr, ETH_ALEN)) {
+ swap_slave = slave;
+ break;
+ }
+ }
+
+ if (swap_slave) {
+ alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
+ } else {
+ alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
+ bond->alb_info.rlb_enabled);
+
+ alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+ if (bond->alb_info.rlb_enabled) {
+ /* inform clients mac address has changed */
+ rlb_req_update_slave_clients(bond, bond->curr_active_slave);
+ }
+ }
+
+ return 0;
+}
+
+void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
+{
+ if (bond->alb_info.current_alb_vlan &&
+ (bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) {
+ bond->alb_info.current_alb_vlan = NULL;
+ }
+
+ if (bond->alb_info.rlb_enabled) {
+ rlb_clear_vlan(bond, vlan_id);
+ }
+}
+
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
new file mode 100644
index 000000000000..e4091cd8d654
--- /dev/null
+++ b/drivers/net/bonding/bond_alb.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ *
+ * Changes:
+ *
+ * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
+ * - Add support for setting bond's MAC address with special
+ * handling required for ALB/TLB.
+ *
+ * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Code cleanup and style changes
+ */
+
+#ifndef __BOND_ALB_H__
+#define __BOND_ALB_H__
+
+#include <linux/if_ether.h>
+
+struct bonding;
+struct slave;
+
+#define BOND_ALB_INFO(bond) ((bond)->alb_info)
+#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info)
+
+struct tlb_client_info {
+ struct slave *tx_slave; /* A pointer to slave used for transmiting
+ * packets to a Client that the Hash function
+ * gave this entry index.
+ */
+ u32 tx_bytes; /* Each Client acumulates the BytesTx that
+ * were tranmitted to it, and after each
+ * CallBack the LoadHistory is devided
+ * by the balance interval
+ */
+ u32 load_history; /* This field contains the amount of Bytes
+ * that were transmitted to this client by
+ * the server on the previous balance
+ * interval in Bps.
+ */
+ u32 next; /* The next Hash table entry index, assigned
+ * to use the same adapter for transmit.
+ */
+ u32 prev; /* The previous Hash table entry index,
+ * assigned to use the same
+ */
+};
+
+/* -------------------------------------------------------------------------
+ * struct rlb_client_info contains all info related to a specific rx client
+ * connection. This is the Clients Hash Table entry struct
+ * -------------------------------------------------------------------------
+ */
+struct rlb_client_info {
+ u32 ip_src; /* the server IP address */
+ u32 ip_dst; /* the client IP address */
+ u8 mac_dst[ETH_ALEN]; /* the client MAC address */
+ u32 next; /* The next Hash table entry index */
+ u32 prev; /* The previous Hash table entry index */
+ u8 assigned; /* checking whether this entry is assigned */
+ u8 ntt; /* flag - need to transmit client info */
+ struct slave *slave; /* the slave assigned to this client */
+ u8 tag; /* flag - need to tag skb */
+ unsigned short vlan_id; /* VLAN tag associated with IP address */
+};
+
+struct tlb_slave_info {
+ u32 head; /* Index to the head of the bi-directional clients
+ * hash table entries list. The entries in the list
+ * are the entries that were assigned to use this
+ * slave for transmit.
+ */
+ u32 load; /* Each slave sums the loadHistory of all clients
+ * assigned to it
+ */
+};
+
+struct alb_bond_info {
+ struct timer_list alb_timer;
+ struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
+ spinlock_t tx_hashtbl_lock;
+ u32 unbalanced_load;
+ int tx_rebalance_counter;
+ int lp_counter;
+ /* -------- rlb parameters -------- */
+ int rlb_enabled;
+ struct packet_type rlb_pkt_type;
+ struct rlb_client_info *rx_hashtbl; /* Receive hash table */
+ spinlock_t rx_hashtbl_lock;
+ u32 rx_hashtbl_head;
+ u8 rx_ntt; /* flag - need to transmit
+ * to all rx clients
+ */
+ struct slave *next_rx_slave;/* next slave to be assigned
+ * to a new rx client for
+ */
+ u32 rlb_interval_counter;
+ u8 primary_is_promisc; /* boolean */
+ u32 rlb_promisc_timeout_counter;/* counts primary
+ * promiscuity time
+ */
+ u32 rlb_update_delay_counter;
+ u32 rlb_update_retry_counter;/* counter of retries
+ * of client update
+ */
+ u8 rlb_rebalance; /* flag - indicates that the
+ * rx traffic should be
+ * rebalanced
+ */
+ struct vlan_entry *current_alb_vlan;
+};
+
+int bond_alb_initialize(struct bonding *bond, int rlb_enabled);
+void bond_alb_deinitialize(struct bonding *bond);
+int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
+void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
+void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
+void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
+int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+void bond_alb_monitor(struct bonding *bond);
+int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
+void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
+#endif /* __BOND_ALB_H__ */
+
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
new file mode 100644
index 000000000000..770e28f98fd5
--- /dev/null
+++ b/drivers/net/bonding/bond_main.c
@@ -0,0 +1,4708 @@
+/*
+ * originally based on the dummy device.
+ *
+ * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
+ * Licensed under the GPL. Based on dummy.c, and eql.c devices.
+ *
+ * bonding.c: an Ethernet Bonding driver
+ *
+ * This is useful to talk to a Cisco EtherChannel compatible equipment:
+ * Cisco 5500
+ * Sun Trunking (Solaris)
+ * Alteon AceDirector Trunks
+ * Linux Bonding
+ * and probably many L2 switches ...
+ *
+ * How it works:
+ * ifconfig bond0 ipaddress netmask up
+ * will setup a network device, with an ip address. No mac address
+ * will be assigned at this time. The hw mac address will come from
+ * the first slave bonded to the channel. All slaves will then use
+ * this hw mac address.
+ *
+ * ifconfig bond0 down
+ * will release all slaves, marking them as down.
+ *
+ * ifenslave bond0 eth0
+ * will attach eth0 to bond0 as a slave. eth0 hw mac address will either
+ * a: be used as initial mac address
+ * b: if a hw mac address already is there, eth0's hw mac address
+ * will then be set from bond0.
+ *
+ * v0.1 - first working version.
+ * v0.2 - changed stats to be calculated by summing slaves stats.
+ *
+ * Changes:
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * - fix leaks on failure at bond_init
+ *
+ * 2000/09/30 - Willy Tarreau <willy at meta-x.org>
+ * - added trivial code to release a slave device.
+ * - fixed security bug (CAP_NET_ADMIN not checked)
+ * - implemented MII link monitoring to disable dead links :
+ * All MII capable slaves are checked every <miimon> milliseconds
+ * (100 ms seems good). This value can be changed by passing it to
+ * insmod. A value of zero disables the monitoring (default).
+ * - fixed an infinite loop in bond_xmit_roundrobin() when there's no
+ * good slave.
+ * - made the code hopefully SMP safe
+ *
+ * 2000/10/03 - Willy Tarreau <willy at meta-x.org>
+ * - optimized slave lists based on relevant suggestions from Thomas Davis
+ * - implemented active-backup method to obtain HA with two switches:
+ * stay as long as possible on the same active interface, while we
+ * also monitor the backup one (MII link status) because we want to know
+ * if we are able to switch at any time. ( pass "mode=1" to insmod )
+ * - lots of stress testings because we need it to be more robust than the
+ * wires ! :->
+ *
+ * 2000/10/09 - Willy Tarreau <willy at meta-x.org>
+ * - added up and down delays after link state change.
+ * - optimized the slaves chaining so that when we run forward, we never
+ * repass through the bond itself, but we can find it by searching
+ * backwards. Renders the deletion more difficult, but accelerates the
+ * scan.
+ * - smarter enslaving and releasing.
+ * - finer and more robust SMP locking
+ *
+ * 2000/10/17 - Willy Tarreau <willy at meta-x.org>
+ * - fixed two potential SMP race conditions
+ *
+ * 2000/10/18 - Willy Tarreau <willy at meta-x.org>
+ * - small fixes to the monitoring FSM in case of zero delays
+ * 2000/11/01 - Willy Tarreau <willy at meta-x.org>
+ * - fixed first slave not automatically used in trunk mode.
+ * 2000/11/10 : spelling of "EtherChannel" corrected.
+ * 2000/11/13 : fixed a race condition in case of concurrent accesses to ioctl().
+ * 2000/12/16 : fixed improper usage of rtnl_exlock_nowait().
+ *
+ * 2001/1/3 - Chad N. Tindel <ctindel at ieee dot org>
+ * - The bonding driver now simulates MII status monitoring, just like
+ * a normal network device. It will show that the link is down iff
+ * every slave in the bond shows that their links are down. If at least
+ * one slave is up, the bond's MII status will appear as up.
+ *
+ * 2001/2/7 - Chad N. Tindel <ctindel at ieee dot org>
+ * - Applications can now query the bond from user space to get
+ * information which may be useful. They do this by calling
+ * the BOND_INFO_QUERY ioctl. Once the app knows how many slaves
+ * are in the bond, it can call the BOND_SLAVE_INFO_QUERY ioctl to
+ * get slave specific information (# link failures, etc). See
+ * <linux/if_bonding.h> for more details. The structs of interest
+ * are ifbond and ifslave.
+ *
+ * 2001/4/5 - Chad N. Tindel <ctindel at ieee dot org>
+ * - Ported to 2.4 Kernel
+ *
+ * 2001/5/2 - Jeffrey E. Mast <jeff at mastfamily dot com>
+ * - When a device is detached from a bond, the slave device is no longer
+ * left thinking that is has a master.
+ *
+ * 2001/5/16 - Jeffrey E. Mast <jeff at mastfamily dot com>
+ * - memset did not appropriately initialized the bond rw_locks. Used
+ * rwlock_init to initialize to unlocked state to prevent deadlock when
+ * first attempting a lock
+ * - Called SET_MODULE_OWNER for bond device
+ *
+ * 2001/5/17 - Tim Anderson <tsa at mvista.com>
+ * - 2 paths for releasing for slave release; 1 through ioctl
+ * and 2) through close. Both paths need to release the same way.
+ * - the free slave in bond release is changing slave status before
+ * the free. The netdev_set_master() is intended to change slave state
+ * so it should not be done as part of the release process.
+ * - Simple rule for slave state at release: only the active in A/B and
+ * only one in the trunked case.
+ *
+ * 2001/6/01 - Tim Anderson <tsa at mvista.com>
+ * - Now call dev_close when releasing a slave so it doesn't screw up
+ * out routing table.
+ *
+ * 2001/6/01 - Chad N. Tindel <ctindel at ieee dot org>
+ * - Added /proc support for getting bond and slave information.
+ * Information is in /proc/net/<bond device>/info.
+ * - Changed the locking when calling bond_close to prevent deadlock.
+ *
+ * 2001/8/05 - Janice Girouard <girouard at us.ibm.com>
+ * - correct problem where refcnt of slave is not incremented in bond_ioctl
+ * so the system hangs when halting.
+ * - correct locking problem when unable to malloc in bond_enslave.
+ * - adding bond_xmit_xor logic.
+ * - adding multiple bond device support.
+ *
+ * 2001/8/13 - Erik Habbinga <erik_habbinga at hp dot com>
+ * - correct locking problem with rtnl_exlock_nowait
+ *
+ * 2001/8/23 - Janice Girouard <girouard at us.ibm.com>
+ * - bzero initial dev_bonds, to correct oops
+ * - convert SIOCDEVPRIVATE to new MII ioctl calls
+ *
+ * 2001/9/13 - Takao Indoh <indou dot takao at jp dot fujitsu dot com>
+ * - Add the BOND_CHANGE_ACTIVE ioctl implementation
+ *
+ * 2001/9/14 - Mark Huth <mhuth at mvista dot com>
+ * - Change MII_LINK_READY to not check for end of auto-negotiation,
+ * but only for an up link.
+ *
+ * 2001/9/20 - Chad N. Tindel <ctindel at ieee dot org>
+ * - Add the device field to bonding_t. Previously the net_device
+ * corresponding to a bond wasn't available from the bonding_t
+ * structure.
+ *
+ * 2001/9/25 - Janice Girouard <girouard at us.ibm.com>
+ * - add arp_monitor for active backup mode
+ *
+ * 2001/10/23 - Takao Indoh <indou dot takao at jp dot fujitsu dot com>
+ * - Various memory leak fixes
+ *
+ * 2001/11/5 - Mark Huth <mark dot huth at mvista dot com>
+ * - Don't take rtnl lock in bond_mii_monitor as it deadlocks under
+ * certain hotswap conditions.
+ * Note: this same change may be required in bond_arp_monitor ???
+ * - Remove possibility of calling bond_sethwaddr with NULL slave_dev ptr
+ * - Handle hot swap ethernet interface deregistration events to remove
+ * kernel oops following hot swap of enslaved interface
+ *
+ * 2002/1/2 - Chad N. Tindel <ctindel at ieee dot org>
+ * - Restore original slave flags at release time.
+ *
+ * 2002/02/18 - Erik Habbinga <erik_habbinga at hp dot com>
+ * - bond_release(): calling kfree on our_slave after call to
+ * bond_restore_slave_flags, not before
+ * - bond_enslave(): saving slave flags into original_flags before
+ * call to netdev_set_master, so the IFF_SLAVE flag doesn't end
+ * up in original_flags
+ *
+ * 2002/04/05 - Mark Smith <mark.smith at comdev dot cc> and
+ * Steve Mead <steve.mead at comdev dot cc>
+ * - Port Gleb Natapov's multicast support patchs from 2.4.12
+ * to 2.4.18 adding support for multicast.
+ *
+ * 2002/06/10 - Tony Cureington <tony.cureington * hp_com>
+ * - corrected uninitialized pointer (ifr.ifr_data) in bond_check_dev_link;
+ * actually changed function to use MIIPHY, then MIIREG, and finally
+ * ETHTOOL to determine the link status
+ * - fixed bad ifr_data pointer assignments in bond_ioctl
+ * - corrected mode 1 being reported as active-backup in bond_get_info;
+ * also added text to distinguish type of load balancing (rr or xor)
+ * - change arp_ip_target module param from "1-12s" (array of 12 ptrs)
+ * to "s" (a single ptr)
+ *
+ * 2002/08/30 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - Removed acquisition of xmit_lock in set_multicast_list; caused
+ * deadlock on SMP (lock is held by caller).
+ * - Revamped SIOCGMIIPHY, SIOCGMIIREG portion of bond_check_dev_link().
+ *
+ * 2002/09/18 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - Fixed up bond_check_dev_link() (and callers): removed some magic
+ * numbers, banished local MII_ defines, wrapped ioctl calls to
+ * prevent EFAULT errors
+ *
+ * 2002/9/30 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - make sure the ip target matches the arp_target before saving the
+ * hw address.
+ *
+ * 2002/9/30 - Dan Eisner <eisner at 2robots dot com>
+ * - make sure my_ip is set before taking down the link, since
+ * not all switches respond if the source ip is not set.
+ *
+ * 2002/10/8 - Janice Girouard <girouard at us dot ibm dot com>
+ * - read in the local ip address when enslaving a device
+ * - add primary support
+ * - make sure 2*arp_interval has passed when a new device
+ * is brought on-line before taking it down.
+ *
+ * 2002/09/11 - Philippe De Muyter <phdm at macqel dot be>
+ * - Added bond_xmit_broadcast logic.
+ * - Added bond_mode() support function.
+ *
+ * 2002/10/26 - Laurent Deniel <laurent.deniel at free.fr>
+ * - allow to register multicast addresses only on active slave
+ * (useful in active-backup mode)
+ * - add multicast module parameter
+ * - fix deletion of multicast groups after unloading module
+ *
+ * 2002/11/06 - Kameshwara Rayaprolu <kameshwara.rao * wipro_com>
+ * - Changes to prevent panic from closing the device twice; if we close
+ * the device in bond_release, we must set the original_flags to down
+ * so it won't be closed again by the network layer.
+ *
+ * 2002/11/07 - Tony Cureington <tony.cureington * hp_com>
+ * - Fix arp_target_hw_addr memory leak
+ * - Created activebackup_arp_monitor function to handle arp monitoring
+ * in active backup mode - the bond_arp_monitor had several problems...
+ * such as allowing slaves to tx arps sequentially without any delay
+ * for a response
+ * - Renamed bond_arp_monitor to loadbalance_arp_monitor and re-wrote
+ * this function to just handle arp monitoring in load-balancing mode;
+ * it is a lot more compact now
+ * - Changes to ensure one and only one slave transmits in active-backup
+ * mode
+ * - Robustesize parameters; warn users about bad combinations of
+ * parameters; also if miimon is specified and a network driver does
+ * not support MII or ETHTOOL, inform the user of this
+ * - Changes to support link_failure_count when in arp monitoring mode
+ * - Fix up/down delay reported in /proc
+ * - Added version; log version; make version available from "modinfo -d"
+ * - Fixed problem in bond_check_dev_link - if the first IOCTL (SIOCGMIIPH)
+ * failed, the ETHTOOL ioctl never got a chance
+ *
+ * 2002/11/16 - Laurent Deniel <laurent.deniel at free.fr>
+ * - fix multicast handling in activebackup_arp_monitor
+ * - remove one unnecessary and confusing curr_active_slave == slave test
+ * in activebackup_arp_monitor
+ *
+ * 2002/11/17 - Laurent Deniel <laurent.deniel at free.fr>
+ * - fix bond_slave_info_query when slave_id = num_slaves
+ *
+ * 2002/11/19 - Janice Girouard <girouard at us dot ibm dot com>
+ * - correct ifr_data reference. Update ifr_data reference
+ * to mii_ioctl_data struct values to avoid confusion.
+ *
+ * 2002/11/22 - Bert Barbe <bert.barbe at oracle dot com>
+ * - Add support for multiple arp_ip_target
+ *
+ * 2002/12/13 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - Changed to allow text strings for mode and multicast, e.g.,
+ * insmod bonding mode=active-backup. The numbers still work.
+ * One change: an invalid choice will cause module load failure,
+ * rather than the previous behavior of just picking one.
+ * - Minor cleanups; got rid of dup ctype stuff, atoi function
+ *
+ * 2003/02/07 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - Added use_carrier module parameter that causes miimon to
+ * use netif_carrier_ok() test instead of MII/ETHTOOL ioctls.
+ * - Minor cleanups; consolidated ioctl calls to one function.
+ *
+ * 2003/02/07 - Tony Cureington <tony.cureington * hp_com>
+ * - Fix bond_mii_monitor() logic error that could result in
+ * bonding round-robin mode ignoring links after failover/recovery
+ *
+ * 2003/03/17 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - kmalloc fix (GFP_KERNEL to GFP_ATOMIC) reported by
+ * Shmulik dot Hen at intel.com.
+ * - Based on discussion on mailing list, changed use of
+ * update_slave_cnt(), created wrapper functions for adding/removing
+ * slaves, changed bond_xmit_xor() to check slave_cnt instead of
+ * checking slave and slave->dev (which only worked by accident).
+ * - Misc code cleanup: get arp_send() prototype from header file,
+ * add max_bonds to bonding.txt.
+ *
+ * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Shmulik Hen <shmulik.hen at intel dot com>
+ * - Make sure only bond_attach_slave() and bond_detach_slave() can
+ * manipulate the slave list, including slave_cnt, even when in
+ * bond_release_all().
+ * - Fixed hang in bond_release() with traffic running:
+ * netdev_set_master() must not be called from within the bond lock.
+ *
+ * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Shmulik Hen <shmulik.hen at intel dot com>
+ * - Fixed hang in bond_enslave() with traffic running:
+ * netdev_set_master() must not be called from within the bond lock.
+ *
+ * 2003/03/18 - Amir Noam <amir.noam at intel dot com>
+ * - Added support for getting slave's speed and duplex via ethtool.
+ * Needed for 802.3ad and other future modes.
+ *
+ * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Shmulik Hen <shmulik.hen at intel dot com>
+ * - Enable support of modes that need to use the unique mac address of
+ * each slave.
+ * * bond_enslave(): Moved setting the slave's mac address, and
+ * openning it, from the application to the driver. This breaks
+ * backward comaptibility with old versions of ifenslave that open
+ * the slave before enalsving it !!!.
+ * * bond_release(): The driver also takes care of closing the slave
+ * and restoring its original mac address.
+ * - Removed the code that restores all base driver's flags.
+ * Flags are automatically restored once all undo stages are done
+ * properly.
+ * - Block possibility of enslaving before the master is up. This
+ * prevents putting the system in an unstable state.
+ *
+ * 2003/03/18 - Amir Noam <amir.noam at intel dot com>,
+ * Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Shmulik Hen <shmulik.hen at intel dot com>
+ * - Added support for IEEE 802.3ad Dynamic link aggregation mode.
+ *
+ * 2003/05/01 - Amir Noam <amir.noam at intel dot com>
+ * - Added ABI version control to restore compatibility between
+ * new/old ifenslave and new/old bonding.
+ *
+ * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Fixed bug in bond_release_all(): save old value of curr_active_slave
+ * before setting it to NULL.
+ * - Changed driver versioning scheme to include version number instead
+ * of release date (that is already in another field). There are 3
+ * fields X.Y.Z where:
+ * X - Major version - big behavior changes
+ * Y - Minor version - addition of features
+ * Z - Extra version - minor changes and bug fixes
+ * The current version is 1.0.0 as a base line.
+ *
+ * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Amir Noam <amir.noam at intel dot com>
+ * - Added support for lacp_rate module param.
+ * - Code beautification and style changes (mainly in comments).
+ * new version - 1.0.1
+ *
+ * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Based on discussion on mailing list, changed locking scheme
+ * to use lock/unlock or lock_bh/unlock_bh appropriately instead
+ * of lock_irqsave/unlock_irqrestore. The new scheme helps exposing
+ * hidden bugs and solves system hangs that occurred due to the fact
+ * that holding lock_irqsave doesn't prevent softirqs from running.
+ * This also increases total throughput since interrupts are not
+ * blocked on each transmitted packets or monitor timeout.
+ * new version - 2.0.0
+ *
+ * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Added support for Transmit load balancing mode.
+ * - Concentrate all assignments of curr_active_slave to a single point
+ * so specific modes can take actions when the primary adapter is
+ * changed.
+ * - Take the updelay parameter into consideration during bond_enslave
+ * since some adapters loose their link during setting the device.
+ * - Renamed bond_3ad_link_status_changed() to
+ * bond_3ad_handle_link_change() for compatibility with TLB.
+ * new version - 2.1.0
+ *
+ * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com>
+ * - Added support for Adaptive load balancing mode which is
+ * equivalent to Transmit load balancing + Receive load balancing.
+ * new version - 2.2.0
+ *
+ * 2003/05/15 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - Applied fix to activebackup_arp_monitor posted to bonding-devel
+ * by Tony Cureington <tony.cureington * hp_com>. Fixes ARP
+ * monitor endless failover bug. Version to 2.2.10
+ *
+ * 2003/05/20 - Amir Noam <amir.noam at intel dot com>
+ * - Fixed bug in ABI version control - Don't commit to a specific
+ * ABI version if receiving unsupported ioctl commands.
+ *
+ * 2003/05/22 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - Fix ifenslave -c causing bond to loose existing routes;
+ * added bond_set_mac_address() that doesn't require the
+ * bond to be down.
+ * - In conjunction with fix for ifenslave -c, in
+ * bond_change_active(), changing to the already active slave
+ * is no longer an error (it successfully does nothing).
+ *
+ * 2003/06/30 - Amir Noam <amir.noam at intel dot com>
+ * - Fixed bond_change_active() for ALB/TLB modes.
+ * Version to 2.2.14.
+ *
+ * 2003/07/29 - Amir Noam <amir.noam at intel dot com>
+ * - Fixed ARP monitoring bug.
+ * Version to 2.2.15.
+ *
+ * 2003/07/31 - Willy Tarreau <willy at ods dot org>
+ * - Fixed kernel panic when using ARP monitoring without
+ * setting bond's IP address.
+ * Version to 2.2.16.
+ *
+ * 2003/08/06 - Amir Noam <amir.noam at intel dot com>
+ * - Back port from 2.6: use alloc_netdev(); fix /proc handling;
+ * made stats a part of bond struct so no need to allocate
+ * and free it separately; use standard list operations instead
+ * of pre-allocated array of bonds.
+ * Version to 2.3.0.
+ *
+ * 2003/08/07 - Jay Vosburgh <fubar at us dot ibm dot com>,
+ * Amir Noam <amir.noam at intel dot com> and
+ * Shmulik Hen <shmulik.hen at intel dot com>
+ * - Propagating master's settings: Distinguish between modes that
+ * use a primary slave from those that don't, and propagate settings
+ * accordingly; Consolidate change_active opeartions and add
+ * reselect_active and find_best opeartions; Decouple promiscuous
+ * handling from the multicast mode setting; Add support for changing
+ * HW address and MTU with proper unwind; Consolidate procfs code,
+ * add CHANGENAME handler; Enhance netdev notification handling.
+ * Version to 2.4.0.
+ *
+ * 2003/09/15 - Stephen Hemminger <shemminger at osdl dot org>,
+ * Amir Noam <amir.noam at intel dot com>
+ * - Convert /proc to seq_file interface.
+ * Change /proc/net/bondX/info to /proc/net/bonding/bondX.
+ * Set version to 2.4.1.
+ *
+ * 2003/11/20 - Amir Noam <amir.noam at intel dot com>
+ * - Fix /proc creation/destruction.
+ *
+ * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Massive cleanup - Set version to 2.5.0
+ * Code changes:
+ * o Consolidate format of prints and debug prints.
+ * o Remove bonding_t/slave_t typedefs and consolidate all casts.
+ * o Remove dead code and unnecessary checks.
+ * o Consolidate starting/stopping timers.
+ * o Consolidate handling of primary module param throughout the code.
+ * o Removed multicast module param support - all settings are done
+ * according to mode.
+ * o Slave list iteration - bond is no longer part of the list,
+ * added cyclic list iteration macros.
+ * o Consolidate error handling in all xmit functions.
+ * Style changes:
+ * o Consolidate function naming and declarations.
+ * o Consolidate function params and local variables names.
+ * o Consolidate return values.
+ * o Consolidate curly braces.
+ * o Consolidate conditionals format.
+ * o Change struct member names and types.
+ * o Chomp trailing spaces, remove empty lines, fix indentations.
+ * o Re-organize code according to context.
+ *
+ * 2003/12/30 - Amir Noam <amir.noam at intel dot com>
+ * - Fixed: Cannot remove and re-enslave the original active slave.
+ * - Fixed: Releasing the original active slave causes mac address
+ * duplication.
+ * - Add support for slaves that use ethtool_ops.
+ * Set version to 2.5.3.
+ *
+ * 2004/01/05 - Amir Noam <amir.noam at intel dot com>
+ * - Save bonding parameters per bond instead of using the global values.
+ * Set version to 2.5.4.
+ *
+ * 2004/01/14 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Enhance VLAN support:
+ * * Add support for VLAN hardware acceleration capable slaves.
+ * * Add capability to tag self generated packets in ALB/TLB modes.
+ * Set version to 2.6.0.
+ * 2004/10/29 - Mitch Williams <mitch.a.williams at intel dot com>
+ * - Fixed bug when unloading module while using 802.3ad. If
+ * spinlock debugging is turned on, this causes a stack dump.
+ * Solution is to move call to dev_remove_pack outside of the
+ * spinlock.
+ * Set version to 2.6.1.
+ *
+ */
+
+//#define BONDING_DEBUG 1
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/socket.h>
+#include <linux/ctype.h>
+#include <linux/inet.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <linux/rtnetlink.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/if_ether.h>
+#include <net/arp.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bonding.h>
+#include "bonding.h"
+#include "bond_3ad.h"
+#include "bond_alb.h"
+
+/*---------------------------- Module parameters ----------------------------*/
+
+/* monitor all links that often (in milliseconds). <=0 disables monitoring */
+#define BOND_LINK_MON_INTERV 0
+#define BOND_LINK_ARP_INTERV 0
+
+static int max_bonds = BOND_DEFAULT_MAX_BONDS;
+static int miimon = BOND_LINK_MON_INTERV;
+static int updelay = 0;
+static int downdelay = 0;
+static int use_carrier = 1;
+static char *mode = NULL;
+static char *primary = NULL;
+static char *lacp_rate = NULL;
+static int arp_interval = BOND_LINK_ARP_INTERV;
+static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
+
+module_param(max_bonds, int, 0);
+MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
+module_param(miimon, int, 0);
+MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
+module_param(updelay, int, 0);
+MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
+module_param(downdelay, int, 0);
+MODULE_PARM_DESC(downdelay, "Delay before considering link down, in milliseconds");
+module_param(use_carrier, int, 0);
+MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; 0 for off, 1 for on (default)");
+module_param(mode, charp, 0);
+MODULE_PARM_DESC(mode, "Mode of operation : 0 for round robin, 1 for active-backup, 2 for xor");
+module_param(primary, charp, 0);
+MODULE_PARM_DESC(primary, "Primary network device to use");
+module_param(lacp_rate, charp, 0);
+MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner (slow/fast)");
+module_param(arp_interval, int, 0);
+MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
+module_param_array(arp_ip_target, charp, NULL, 0);
+MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
+
+/*----------------------------- Global variables ----------------------------*/
+
+static const char *version =
+ DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static LIST_HEAD(bond_dev_list);
+
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *bond_proc_dir = NULL;
+#endif
+
+static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
+static int arp_ip_count = 0;
+static u32 my_ip = 0;
+static int bond_mode = BOND_MODE_ROUNDROBIN;
+static int lacp_fast = 0;
+static int app_abi_ver = 0;
+static int orig_app_abi_ver = -1; /* This is used to save the first ABI version
+ * we receive from the application. Once set,
+ * it won't be changed, and the module will
+ * refuse to enslave/release interfaces if the
+ * command comes from an application using
+ * another ABI version.
+ */
+
+struct bond_parm_tbl {
+ char *modename;
+ int mode;
+};
+
+static struct bond_parm_tbl bond_lacp_tbl[] = {
+{ "slow", AD_LACP_SLOW},
+{ "fast", AD_LACP_FAST},
+{ NULL, -1},
+};
+
+static struct bond_parm_tbl bond_mode_tbl[] = {
+{ "balance-rr", BOND_MODE_ROUNDROBIN},
+{ "active-backup", BOND_MODE_ACTIVEBACKUP},
+{ "balance-xor", BOND_MODE_XOR},
+{ "broadcast", BOND_MODE_BROADCAST},
+{ "802.3ad", BOND_MODE_8023AD},
+{ "balance-tlb", BOND_MODE_TLB},
+{ "balance-alb", BOND_MODE_ALB},
+{ NULL, -1},
+};
+
+/*-------------------------- Forward declarations ---------------------------*/
+
+static inline void bond_set_mode_ops(struct net_device *bond_dev, int mode);
+
+/*---------------------------- General routines -----------------------------*/
+
+static const char *bond_mode_name(int mode)
+{
+ switch (mode) {
+ case BOND_MODE_ROUNDROBIN :
+ return "load balancing (round-robin)";
+ case BOND_MODE_ACTIVEBACKUP :
+ return "fault-tolerance (active-backup)";
+ case BOND_MODE_XOR :
+ return "load balancing (xor)";
+ case BOND_MODE_BROADCAST :
+ return "fault-tolerance (broadcast)";
+ case BOND_MODE_8023AD:
+ return "IEEE 802.3ad Dynamic link aggregation";
+ case BOND_MODE_TLB:
+ return "transmit load balancing";
+ case BOND_MODE_ALB:
+ return "adaptive load balancing";
+ default:
+ return "unknown";
+ }
+}
+
+/*---------------------------------- VLAN -----------------------------------*/
+
+/**
+ * bond_add_vlan - add a new vlan id on bond
+ * @bond: bond that got the notification
+ * @vlan_id: the vlan id to add
+ *
+ * Returns -ENOMEM if allocation failed.
+ */
+static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
+{
+ struct vlan_entry *vlan;
+
+ dprintk("bond: %s, vlan id %d\n",
+ (bond ? bond->dev->name: "None"), vlan_id);
+
+ vlan = kmalloc(sizeof(struct vlan_entry), GFP_KERNEL);
+ if (!vlan) {
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vlan->vlan_list);
+ vlan->vlan_id = vlan_id;
+
+ write_lock_bh(&bond->lock);
+
+ list_add_tail(&vlan->vlan_list, &bond->vlan_list);
+
+ write_unlock_bh(&bond->lock);
+
+ dprintk("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name);
+
+ return 0;
+}
+
+/**
+ * bond_del_vlan - delete a vlan id from bond
+ * @bond: bond that got the notification
+ * @vlan_id: the vlan id to delete
+ *
+ * returns -ENODEV if @vlan_id was not found in @bond.
+ */
+static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
+{
+ struct vlan_entry *vlan, *next;
+ int res = -ENODEV;
+
+ dprintk("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
+
+ write_lock_bh(&bond->lock);
+
+ list_for_each_entry_safe(vlan, next, &bond->vlan_list, vlan_list) {
+ if (vlan->vlan_id == vlan_id) {
+ list_del(&vlan->vlan_list);
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ bond_alb_clear_vlan(bond, vlan_id);
+ }
+
+ dprintk("removed VLAN ID %d from bond %s\n", vlan_id,
+ bond->dev->name);
+
+ kfree(vlan);
+
+ if (list_empty(&bond->vlan_list) &&
+ (bond->slave_cnt == 0)) {
+ /* Last VLAN removed and no slaves, so
+ * restore block on adding VLANs. This will
+ * be removed once new slaves that are not
+ * VLAN challenged will be added.
+ */
+ bond->dev->features |= NETIF_F_VLAN_CHALLENGED;
+ }
+
+ res = 0;
+ goto out;
+ }
+ }
+
+ dprintk("couldn't find VLAN ID %d in bond %s\n", vlan_id,
+ bond->dev->name);
+
+out:
+ write_unlock_bh(&bond->lock);
+ return res;
+}
+
+/**
+ * bond_has_challenged_slaves
+ * @bond: the bond we're working on
+ *
+ * Searches the slave list. Returns 1 if a vlan challenged slave
+ * was found, 0 otherwise.
+ *
+ * Assumes bond->lock is held.
+ */
+static int bond_has_challenged_slaves(struct bonding *bond)
+{
+ struct slave *slave;
+ int i;
+
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
+ dprintk("found VLAN challenged slave - %s\n",
+ slave->dev->name);
+ return 1;
+ }
+ }
+
+ dprintk("no VLAN challenged slaves found\n");
+ return 0;
+}
+
+/**
+ * bond_next_vlan - safely skip to the next item in the vlans list.
+ * @bond: the bond we're working on
+ * @curr: item we're advancing from
+ *
+ * Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL,
+ * or @curr->next otherwise (even if it is @curr itself again).
+ *
+ * Caller must hold bond->lock
+ */
+struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
+{
+ struct vlan_entry *next, *last;
+
+ if (list_empty(&bond->vlan_list)) {
+ return NULL;
+ }
+
+ if (!curr) {
+ next = list_entry(bond->vlan_list.next,
+ struct vlan_entry, vlan_list);
+ } else {
+ last = list_entry(bond->vlan_list.prev,
+ struct vlan_entry, vlan_list);
+ if (last == curr) {
+ next = list_entry(bond->vlan_list.next,
+ struct vlan_entry, vlan_list);
+ } else {
+ next = list_entry(curr->vlan_list.next,
+ struct vlan_entry, vlan_list);
+ }
+ }
+
+ return next;
+}
+
+/**
+ * bond_dev_queue_xmit - Prepare skb for xmit.
+ *
+ * @bond: bond device that got this skb for tx.
+ * @skb: hw accel VLAN tagged skb to transmit
+ * @slave_dev: slave that is supposed to xmit this skbuff
+ *
+ * When the bond gets an skb to transmit that is
+ * already hardware accelerated VLAN tagged, and it
+ * needs to relay this skb to a slave that is not
+ * hw accel capable, the skb needs to be "unaccelerated",
+ * i.e. strip the hwaccel tag and re-insert it as part
+ * of the payload.
+ */
+int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev)
+{
+ unsigned short vlan_id;
+
+ if (!list_empty(&bond->vlan_list) &&
+ !(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
+ vlan_get_tag(skb, &vlan_id) == 0) {
+ skb->dev = slave_dev;
+ skb = vlan_put_tag(skb, vlan_id);
+ if (!skb) {
+ /* vlan_put_tag() frees the skb in case of error,
+ * so return success here so the calling functions
+ * won't attempt to free is again.
+ */
+ return 0;
+ }
+ } else {
+ skb->dev = slave_dev;
+ }
+
+ skb->priority = 1;
+ dev_queue_xmit(skb);
+
+ return 0;
+}
+
+/*
+ * In the following 3 functions, bond_vlan_rx_register(), bond_vlan_rx_add_vid
+ * and bond_vlan_rx_kill_vid, We don't protect the slave list iteration with a
+ * lock because:
+ * a. This operation is performed in IOCTL context,
+ * b. The operation is protected by the RTNL semaphore in the 8021q code,
+ * c. Holding a lock with BH disabled while directly calling a base driver
+ * entry point is generally a BAD idea.
+ *
+ * The design of synchronization/protection for this operation in the 8021q
+ * module is good for one or more VLAN devices over a single physical device
+ * and cannot be extended for a teaming solution like bonding, so there is a
+ * potential race condition here where a net device from the vlan group might
+ * be referenced (either by a base driver or the 8021q code) while it is being
+ * removed from the system. However, it turns out we're not making matters
+ * worse, and if it works for regular VLAN usage it will work here too.
+*/
+
+/**
+ * bond_vlan_rx_register - Propagates registration to slaves
+ * @bond_dev: bonding net device that got called
+ * @grp: vlan group being registered
+ */
+static void bond_vlan_rx_register(struct net_device *bond_dev, struct vlan_group *grp)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave;
+ int i;
+
+ bond->vlgrp = grp;
+
+ bond_for_each_slave(bond, slave, i) {
+ struct net_device *slave_dev = slave->dev;
+
+ if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
+ slave_dev->vlan_rx_register) {
+ slave_dev->vlan_rx_register(slave_dev, grp);
+ }
+ }
+}
+
+/**
+ * bond_vlan_rx_add_vid - Propagates adding an id to slaves
+ * @bond_dev: bonding net device that got called
+ * @vid: vlan id being added
+ */
+static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave;
+ int i, res;
+
+ bond_for_each_slave(bond, slave, i) {
+ struct net_device *slave_dev = slave->dev;
+
+ if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ slave_dev->vlan_rx_add_vid) {
+ slave_dev->vlan_rx_add_vid(slave_dev, vid);
+ }
+ }
+
+ res = bond_add_vlan(bond, vid);
+ if (res) {
+ printk(KERN_ERR DRV_NAME
+ ": %s: Failed to add vlan id %d\n",
+ bond_dev->name, vid);
+ }
+}
+
+/**
+ * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
+ * @bond_dev: bonding net device that got called
+ * @vid: vlan id being removed
+ */
+static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave;
+ struct net_device *vlan_dev;
+ int i, res;
+
+ bond_for_each_slave(bond, slave, i) {
+ struct net_device *slave_dev = slave->dev;
+
+ if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ slave_dev->vlan_rx_kill_vid) {
+ /* Save and then restore vlan_dev in the grp array,
+ * since the slave's driver might clear it.
+ */
+ vlan_dev = bond->vlgrp->vlan_devices[vid];
+ slave_dev->vlan_rx_kill_vid(slave_dev, vid);
+ bond->vlgrp->vlan_devices[vid] = vlan_dev;
+ }
+ }
+
+ res = bond_del_vlan(bond, vid);
+ if (res) {
+ printk(KERN_ERR DRV_NAME
+ ": %s: Failed to remove vlan id %d\n",
+ bond_dev->name, vid);
+ }
+}
+
+static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
+{
+ struct vlan_entry *vlan;
+
+ write_lock_bh(&bond->lock);
+
+ if (list_empty(&bond->vlan_list)) {
+ goto out;
+ }
+
+ if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
+ slave_dev->vlan_rx_register) {
+ slave_dev->vlan_rx_register(slave_dev, bond->vlgrp);
+ }
+
+ if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
+ !(slave_dev->vlan_rx_add_vid)) {
+ goto out;
+ }
+
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ slave_dev->vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+ }
+
+out:
+ write_unlock_bh(&bond->lock);
+}
+
+static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *slave_dev)
+{
+ struct vlan_entry *vlan;
+ struct net_device *vlan_dev;
+
+ write_lock_bh(&bond->lock);
+
+ if (list_empty(&bond->vlan_list)) {
+ goto out;
+ }
+
+ if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
+ !(slave_dev->vlan_rx_kill_vid)) {
+ goto unreg;
+ }
+
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ /* Save and then restore vlan_dev in the grp array,
+ * since the slave's driver might clear it.
+ */
+ vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+ bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev;
+ }
+
+unreg:
+ if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
+ slave_dev->vlan_rx_register) {
+ slave_dev->vlan_rx_register(slave_dev, NULL);
+ }
+
+out:
+ write_unlock_bh(&bond->lock);
+}
+
+/*------------------------------- Link status -------------------------------*/
+
+/*
+ * Get link speed and duplex from the slave's base driver
+ * using ethtool. If for some reason the call fails or the
+ * values are invalid, fake speed and duplex to 100/Full
+ * and return error.
+ */
+static int bond_update_speed_duplex(struct slave *slave)
+{
+ struct net_device *slave_dev = slave->dev;
+ static int (* ioctl)(struct net_device *, struct ifreq *, int);
+ struct ifreq ifr;
+ struct ethtool_cmd etool;
+
+ /* Fake speed and duplex */
+ slave->speed = SPEED_100;
+ slave->duplex = DUPLEX_FULL;
+
+ if (slave_dev->ethtool_ops) {
+ u32 res;
+
+ if (!slave_dev->ethtool_ops->get_settings) {
+ return -1;
+ }
+
+ res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool);
+ if (res < 0) {
+ return -1;
+ }
+
+ goto verify;
+ }
+
+ ioctl = slave_dev->do_ioctl;
+ strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
+ etool.cmd = ETHTOOL_GSET;
+ ifr.ifr_data = (char*)&etool;
+ if (!ioctl || (IOCTL(slave_dev, &ifr, SIOCETHTOOL) < 0)) {
+ return -1;
+ }
+
+verify:
+ switch (etool.speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ break;
+ default:
+ return -1;
+ }
+
+ switch (etool.duplex) {
+ case DUPLEX_FULL:
+ case DUPLEX_HALF:
+ break;
+ default:
+ return -1;
+ }
+
+ slave->speed = etool.speed;
+ slave->duplex = etool.duplex;
+
+ return 0;
+}
+
+/*
+ * if <dev> supports MII link status reporting, check its link status.
+ *
+ * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
+ * depening upon the setting of the use_carrier parameter.
+ *
+ * Return either BMSR_LSTATUS, meaning that the link is up (or we
+ * can't tell and just pretend it is), or 0, meaning that the link is
+ * down.
+ *
+ * If reporting is non-zero, instead of faking link up, return -1 if
+ * both ETHTOOL and MII ioctls fail (meaning the device does not
+ * support them). If use_carrier is set, return whatever it says.
+ * It'd be nice if there was a good way to tell if a driver supports
+ * netif_carrier, but there really isn't.
+ */
+static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting)
+{
+ static int (* ioctl)(struct net_device *, struct ifreq *, int);
+ struct ifreq ifr;
+ struct mii_ioctl_data *mii;
+ struct ethtool_value etool;
+
+ if (bond->params.use_carrier) {
+ return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
+ }
+
+ ioctl = slave_dev->do_ioctl;
+ if (ioctl) {
+ /* TODO: set pointer to correct ioctl on a per team member */
+ /* bases to make this more efficient. that is, once */
+ /* we determine the correct ioctl, we will always */
+ /* call it and not the others for that team */
+ /* member. */
+
+ /*
+ * We cannot assume that SIOCGMIIPHY will also read a
+ * register; not all network drivers (e.g., e100)
+ * support that.
+ */
+
+ /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
+ strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
+ mii = if_mii(&ifr);
+ if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
+ mii->reg_num = MII_BMSR;
+ if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0) {
+ return (mii->val_out & BMSR_LSTATUS);
+ }
+ }
+ }
+
+ /* try SIOCETHTOOL ioctl, some drivers cache ETHTOOL_GLINK */
+ /* for a period of time so we attempt to get link status */
+ /* from it last if the above MII ioctls fail... */
+ if (slave_dev->ethtool_ops) {
+ if (slave_dev->ethtool_ops->get_link) {
+ u32 link;
+
+ link = slave_dev->ethtool_ops->get_link(slave_dev);
+
+ return link ? BMSR_LSTATUS : 0;
+ }
+ }
+
+ if (ioctl) {
+ strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
+ etool.cmd = ETHTOOL_GLINK;
+ ifr.ifr_data = (char*)&etool;
+ if (IOCTL(slave_dev, &ifr, SIOCETHTOOL) == 0) {
+ if (etool.data == 1) {
+ return BMSR_LSTATUS;
+ } else {
+ dprintk("SIOCETHTOOL shows link down\n");
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * If reporting, report that either there's no dev->do_ioctl,
+ * or both SIOCGMIIREG and SIOCETHTOOL failed (meaning that we
+ * cannot report link status). If not reporting, pretend
+ * we're ok.
+ */
+ return (reporting ? -1 : BMSR_LSTATUS);
+}
+
+/*----------------------------- Multicast list ------------------------------*/
+
+/*
+ * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
+ */
+static inline int bond_is_dmi_same(struct dev_mc_list *dmi1, struct dev_mc_list *dmi2)
+{
+ return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
+ dmi1->dmi_addrlen == dmi2->dmi_addrlen;
+}
+
+/*
+ * returns dmi entry if found, NULL otherwise
+ */
+static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi, struct dev_mc_list *mc_list)
+{
+ struct dev_mc_list *idmi;
+
+ for (idmi = mc_list; idmi; idmi = idmi->next) {
+ if (bond_is_dmi_same(dmi, idmi)) {
+ return idmi;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Push the promiscuity flag down to appropriate slaves
+ */
+static void bond_set_promiscuity(struct bonding *bond, int inc)
+{
+ if (USES_PRIMARY(bond->params.mode)) {
+ /* write lock already acquired */
+ if (bond->curr_active_slave) {
+ dev_set_promiscuity(bond->curr_active_slave->dev, inc);
+ }
+ } else {
+ struct slave *slave;
+ int i;
+ bond_for_each_slave(bond, slave, i) {
+ dev_set_promiscuity(slave->dev, inc);
+ }
+ }
+}
+
+/*
+ * Push the allmulti flag down to all slaves
+ */
+static void bond_set_allmulti(struct bonding *bond, int inc)
+{
+ if (USES_PRIMARY(bond->params.mode)) {
+ /* write lock already acquired */
+ if (bond->curr_active_slave) {
+ dev_set_allmulti(bond->curr_active_slave->dev, inc);
+ }
+ } else {
+ struct slave *slave;
+ int i;
+ bond_for_each_slave(bond, slave, i) {
+ dev_set_allmulti(slave->dev, inc);
+ }
+ }
+}
+
+/*
+ * Add a Multicast address to slaves
+ * according to mode
+ */
+static void bond_mc_add(struct bonding *bond, void *addr, int alen)
+{
+ if (USES_PRIMARY(bond->params.mode)) {
+ /* write lock already acquired */
+ if (bond->curr_active_slave) {
+ dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0);
+ }
+ } else {
+ struct slave *slave;
+ int i;
+ bond_for_each_slave(bond, slave, i) {
+ dev_mc_add(slave->dev, addr, alen, 0);
+ }
+ }
+}
+
+/*
+ * Remove a multicast address from slave
+ * according to mode
+ */
+static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
+{
+ if (USES_PRIMARY(bond->params.mode)) {
+ /* write lock already acquired */
+ if (bond->curr_active_slave) {
+ dev_mc_delete(bond->curr_active_slave->dev, addr, alen, 0);
+ }
+ } else {
+ struct slave *slave;
+ int i;
+ bond_for_each_slave(bond, slave, i) {
+ dev_mc_delete(slave->dev, addr, alen, 0);
+ }
+ }
+}
+
+/*
+ * Totally destroys the mc_list in bond
+ */
+static void bond_mc_list_destroy(struct bonding *bond)
+{
+ struct dev_mc_list *dmi;
+
+ dmi = bond->mc_list;
+ while (dmi) {
+ bond->mc_list = dmi->next;
+ kfree(dmi);
+ dmi = bond->mc_list;
+ }
+}
+
+/*
+ * Copy all the Multicast addresses from src to the bonding device dst
+ */
+static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag)
+{
+ struct dev_mc_list *dmi, *new_dmi;
+
+ for (dmi = mc_list; dmi; dmi = dmi->next) {
+ new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag);
+
+ if (!new_dmi) {
+ /* FIXME: Potential memory leak !!! */
+ return -ENOMEM;
+ }
+
+ new_dmi->next = bond->mc_list;
+ bond->mc_list = new_dmi;
+ new_dmi->dmi_addrlen = dmi->dmi_addrlen;
+ memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
+ new_dmi->dmi_users = dmi->dmi_users;
+ new_dmi->dmi_gusers = dmi->dmi_gusers;
+ }
+
+ return 0;
+}
+
+/*
+ * flush all members of flush->mc_list from device dev->mc_list
+ */
+static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *slave_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct dev_mc_list *dmi;
+
+ for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
+ dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ }
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* del lacpdu mc addr from mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ }
+}
+
+/*--------------------------- Active slave change ---------------------------*/
+
+/*
+ * Update the mc list and multicast-related flags for the new and
+ * old active slaves (if any) according to the multicast mode, and
+ * promiscuous flags unconditionally.
+ */
+static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active)
+{
+ struct dev_mc_list *dmi;
+
+ if (!USES_PRIMARY(bond->params.mode)) {
+ /* nothing to do - mc list is already up-to-date on
+ * all slaves
+ */
+ return;
+ }
+
+ if (old_active) {
+ if (bond->dev->flags & IFF_PROMISC) {
+ dev_set_promiscuity(old_active->dev, -1);
+ }
+
+ if (bond->dev->flags & IFF_ALLMULTI) {
+ dev_set_allmulti(old_active->dev, -1);
+ }
+
+ for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
+ dev_mc_delete(old_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ }
+ }
+
+ if (new_active) {
+ if (bond->dev->flags & IFF_PROMISC) {
+ dev_set_promiscuity(new_active->dev, 1);
+ }
+
+ if (bond->dev->flags & IFF_ALLMULTI) {
+ dev_set_allmulti(new_active->dev, 1);
+ }
+
+ for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
+ dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ }
+ }
+}
+
+/**
+ * find_best_interface - select the best available slave to be the active one
+ * @bond: our bonding struct
+ *
+ * Warning: Caller must hold curr_slave_lock for writing.
+ */
+static struct slave *bond_find_best_slave(struct bonding *bond)
+{
+ struct slave *new_active, *old_active;
+ struct slave *bestslave = NULL;
+ int mintime = bond->params.updelay;
+ int i;
+
+ new_active = old_active = bond->curr_active_slave;
+
+ if (!new_active) { /* there were no active slaves left */
+ if (bond->slave_cnt > 0) { /* found one slave */
+ new_active = bond->first_slave;
+ } else {
+ return NULL; /* still no slave, return NULL */
+ }
+ }
+
+ /* first try the primary link; if arping, a link must tx/rx traffic
+ * before it can be considered the curr_active_slave - also, we would skip
+ * slaves between the curr_active_slave and primary_slave that may be up
+ * and able to arp
+ */
+ if ((bond->primary_slave) &&
+ (!bond->params.arp_interval) &&
+ (IS_UP(bond->primary_slave->dev))) {
+ new_active = bond->primary_slave;
+ }
+
+ /* remember where to stop iterating over the slaves */
+ old_active = new_active;
+
+ bond_for_each_slave_from(bond, new_active, i, old_active) {
+ if (IS_UP(new_active->dev)) {
+ if (new_active->link == BOND_LINK_UP) {
+ return new_active;
+ } else if (new_active->link == BOND_LINK_BACK) {
+ /* link up, but waiting for stabilization */
+ if (new_active->delay < mintime) {
+ mintime = new_active->delay;
+ bestslave = new_active;
+ }
+ }
+ }
+ }
+
+ return bestslave;
+}
+
+/**
+ * change_active_interface - change the active slave into the specified one
+ * @bond: our bonding struct
+ * @new: the new slave to make the active one
+ *
+ * Set the new slave to the bond's settings and unset them on the old
+ * curr_active_slave.
+ * Setting include flags, mc-list, promiscuity, allmulti, etc.
+ *
+ * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
+ * because it is apparently the best available slave we have, even though its
+ * updelay hasn't timed out yet.
+ *
+ * Warning: Caller must hold curr_slave_lock for writing.
+ */
+static void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
+{
+ struct slave *old_active = bond->curr_active_slave;
+
+ if (old_active == new_active) {
+ return;
+ }
+
+ if (new_active) {
+ if (new_active->link == BOND_LINK_BACK) {
+ if (USES_PRIMARY(bond->params.mode)) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: making interface %s the new "
+ "active one %d ms earlier.\n",
+ bond->dev->name, new_active->dev->name,
+ (bond->params.updelay - new_active->delay) * bond->params.miimon);
+ }
+
+ new_active->delay = 0;
+ new_active->link = BOND_LINK_UP;
+ new_active->jiffies = jiffies;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
+ }
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
+ }
+ } else {
+ if (USES_PRIMARY(bond->params.mode)) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: making interface %s the new "
+ "active one.\n",
+ bond->dev->name, new_active->dev->name);
+ }
+ }
+ }
+
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ if (old_active) {
+ bond_set_slave_inactive_flags(old_active);
+ }
+
+ if (new_active) {
+ bond_set_slave_active_flags(new_active);
+ }
+ }
+
+ if (USES_PRIMARY(bond->params.mode)) {
+ bond_mc_swap(bond, new_active, old_active);
+ }
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ bond_alb_handle_active_change(bond, new_active);
+ } else {
+ bond->curr_active_slave = new_active;
+ }
+}
+
+/**
+ * bond_select_active_slave - select a new active slave, if needed
+ * @bond: our bonding struct
+ *
+ * This functions shoud be called when one of the following occurs:
+ * - The old curr_active_slave has been released or lost its link.
+ * - The primary_slave has got its link back.
+ * - A slave has got its link back and there's no old curr_active_slave.
+ *
+ * Warning: Caller must hold curr_slave_lock for writing.
+ */
+static void bond_select_active_slave(struct bonding *bond)
+{
+ struct slave *best_slave;
+
+ best_slave = bond_find_best_slave(bond);
+ if (best_slave != bond->curr_active_slave) {
+ bond_change_active_slave(bond, best_slave);
+ }
+}
+
+/*--------------------------- slave list handling ---------------------------*/
+
+/*
+ * This function attaches the slave to the end of list.
+ *
+ * bond->lock held for writing by caller.
+ */
+static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
+{
+ if (bond->first_slave == NULL) { /* attaching the first slave */
+ new_slave->next = new_slave;
+ new_slave->prev = new_slave;
+ bond->first_slave = new_slave;
+ } else {
+ new_slave->next = bond->first_slave;
+ new_slave->prev = bond->first_slave->prev;
+ new_slave->next->prev = new_slave;
+ new_slave->prev->next = new_slave;
+ }
+
+ bond->slave_cnt++;
+}
+
+/*
+ * This function detaches the slave from the list.
+ * WARNING: no check is made to verify if the slave effectively
+ * belongs to <bond>.
+ * Nothing is freed on return, structures are just unchained.
+ * If any slave pointer in bond was pointing to <slave>,
+ * it should be changed by the calling function.
+ *
+ * bond->lock held for writing by caller.
+ */
+static void bond_detach_slave(struct bonding *bond, struct slave *slave)
+{
+ if (slave->next) {
+ slave->next->prev = slave->prev;
+ }
+
+ if (slave->prev) {
+ slave->prev->next = slave->next;
+ }
+
+ if (bond->first_slave == slave) { /* slave is the first slave */
+ if (bond->slave_cnt > 1) { /* there are more slave */
+ bond->first_slave = slave->next;
+ } else {
+ bond->first_slave = NULL; /* slave was the last one */
+ }
+ }
+
+ slave->next = NULL;
+ slave->prev = NULL;
+ bond->slave_cnt--;
+}
+
+/*---------------------------------- IOCTL ----------------------------------*/
+
+static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev)
+{
+ dprintk("bond_dev=%p\n", bond_dev);
+ dprintk("slave_dev=%p\n", slave_dev);
+ dprintk("slave_dev->addr_len=%d\n", slave_dev->addr_len);
+ memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
+ return 0;
+}
+
+/* enslave device <slave> to bond device <master> */
+static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *new_slave = NULL;
+ struct dev_mc_list *dmi;
+ struct sockaddr addr;
+ int link_reporting;
+ int old_features = bond_dev->features;
+ int res = 0;
+
+ if (slave_dev->do_ioctl == NULL) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning : no link monitoring support for %s\n",
+ slave_dev->name);
+ }
+
+ /* bond must be initialized by bond_open() before enslaving */
+ if (!(bond_dev->flags & IFF_UP)) {
+ dprintk("Error, master_dev is not up\n");
+ return -EPERM;
+ }
+
+ /* already enslaved */
+ if (slave_dev->flags & IFF_SLAVE) {
+ dprintk("Error, Device was already enslaved\n");
+ return -EBUSY;
+ }
+
+ /* vlan challenged mutual exclusion */
+ /* no need to lock since we're protected by rtnl_lock */
+ if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
+ dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
+ if (!list_empty(&bond->vlan_list)) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: cannot enslave VLAN "
+ "challenged slave %s on VLAN enabled "
+ "bond %s\n", slave_dev->name,
+ bond_dev->name);
+ return -EPERM;
+ } else {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: enslaved VLAN challenged "
+ "slave %s. Adding VLANs will be blocked as "
+ "long as %s is part of bond %s\n",
+ slave_dev->name, slave_dev->name,
+ bond_dev->name);
+ bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
+ }
+ } else {
+ dprintk("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
+ if (bond->slave_cnt == 0) {
+ /* First slave, and it is not VLAN challenged,
+ * so remove the block of adding VLANs over the bond.
+ */
+ bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
+ }
+ }
+
+ if (app_abi_ver >= 1) {
+ /* The application is using an ABI, which requires the
+ * slave interface to be closed.
+ */
+ if ((slave_dev->flags & IFF_UP)) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: %s is up\n",
+ slave_dev->name);
+ res = -EPERM;
+ goto err_undo_flags;
+ }
+
+ if (slave_dev->set_mac_address == NULL) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: The slave device you specified does "
+ "not support setting the MAC address.\n");
+ printk(KERN_ERR
+ "Your kernel likely does not support slave "
+ "devices.\n");
+
+ res = -EOPNOTSUPP;
+ goto err_undo_flags;
+ }
+ } else {
+ /* The application is not using an ABI, which requires the
+ * slave interface to be open.
+ */
+ if (!(slave_dev->flags & IFF_UP)) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: %s is not running\n",
+ slave_dev->name);
+ res = -EINVAL;
+ goto err_undo_flags;
+ }
+
+ if ((bond->params.mode == BOND_MODE_8023AD) ||
+ (bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: to use %s mode, you must upgrade "
+ "ifenslave.\n",
+ bond_mode_name(bond->params.mode));
+ res = -EOPNOTSUPP;
+ goto err_undo_flags;
+ }
+ }
+
+ new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
+ if (!new_slave) {
+ res = -ENOMEM;
+ goto err_undo_flags;
+ }
+
+ memset(new_slave, 0, sizeof(struct slave));
+
+ /* save slave's original flags before calling
+ * netdev_set_master and dev_open
+ */
+ new_slave->original_flags = slave_dev->flags;
+
+ if (app_abi_ver >= 1) {
+ /* save slave's original ("permanent") mac address for
+ * modes that needs it, and for restoring it upon release,
+ * and then set it to the master's address
+ */
+ memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+
+ /* set slave to master's mac address
+ * The application already set the master's
+ * mac address to that of the first slave
+ */
+ memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
+ addr.sa_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, &addr);
+ if (res) {
+ dprintk("Error %d calling set_mac_address\n", res);
+ goto err_free;
+ }
+
+ /* open the slave since the application closed it */
+ res = dev_open(slave_dev);
+ if (res) {
+ dprintk("Openning slave %s failed\n", slave_dev->name);
+ goto err_restore_mac;
+ }
+ }
+
+ res = netdev_set_master(slave_dev, bond_dev);
+ if (res) {
+ dprintk("Error %d calling netdev_set_master\n", res);
+ if (app_abi_ver < 1) {
+ goto err_free;
+ } else {
+ goto err_close;
+ }
+ }
+
+ new_slave->dev = slave_dev;
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ /* bond_alb_init_slave() must be called before all other stages since
+ * it might fail and we do not want to have to undo everything
+ */
+ res = bond_alb_init_slave(bond, new_slave);
+ if (res) {
+ goto err_unset_master;
+ }
+ }
+
+ /* If the mode USES_PRIMARY, then the new slave gets the
+ * master's promisc (and mc) settings only if it becomes the
+ * curr_active_slave, and that is taken care of later when calling
+ * bond_change_active()
+ */
+ if (!USES_PRIMARY(bond->params.mode)) {
+ /* set promiscuity level to new slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ dev_set_promiscuity(slave_dev, 1);
+ }
+
+ /* set allmulti level to new slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ dev_set_allmulti(slave_dev, 1);
+ }
+
+ /* upload master's mc_list to new slave */
+ for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
+ dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ }
+ }
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* add lacpdu mc addr to mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ }
+
+ bond_add_vlans_on_slave(bond, slave_dev);
+
+ write_lock_bh(&bond->lock);
+
+ bond_attach_slave(bond, new_slave);
+
+ new_slave->delay = 0;
+ new_slave->link_failure_count = 0;
+
+ if (bond->params.miimon && !bond->params.use_carrier) {
+ link_reporting = bond_check_dev_link(bond, slave_dev, 1);
+
+ if ((link_reporting == -1) && !bond->params.arp_interval) {
+ /*
+ * miimon is set but a bonded network driver
+ * does not support ETHTOOL/MII and
+ * arp_interval is not set. Note: if
+ * use_carrier is enabled, we will never go
+ * here (because netif_carrier is always
+ * supported); thus, we don't need to change
+ * the messages for netif_carrier.
+ */
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: MII and ETHTOOL support not "
+ "available for interface %s, and "
+ "arp_interval/arp_ip_target module parameters "
+ "not specified, thus bonding will not detect "
+ "link failures! see bonding.txt for details.\n",
+ slave_dev->name);
+ } else if (link_reporting == -1) {
+ /* unable get link status using mii/ethtool */
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: can't get link status from "
+ "interface %s; the network driver associated "
+ "with this interface does not support MII or "
+ "ETHTOOL link status reporting, thus miimon "
+ "has no effect on this interface.\n",
+ slave_dev->name);
+ }
+ }
+
+ /* check for initial state */
+ if (!bond->params.miimon ||
+ (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
+ if (bond->params.updelay) {
+ dprintk("Initial state of slave_dev is "
+ "BOND_LINK_BACK\n");
+ new_slave->link = BOND_LINK_BACK;
+ new_slave->delay = bond->params.updelay;
+ } else {
+ dprintk("Initial state of slave_dev is "
+ "BOND_LINK_UP\n");
+ new_slave->link = BOND_LINK_UP;
+ }
+ new_slave->jiffies = jiffies;
+ } else {
+ dprintk("Initial state of slave_dev is "
+ "BOND_LINK_DOWN\n");
+ new_slave->link = BOND_LINK_DOWN;
+ }
+
+ if (bond_update_speed_duplex(new_slave) &&
+ (new_slave->link != BOND_LINK_DOWN)) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: failed to get speed and duplex from %s, "
+ "assumed to be 100Mb/sec and Full.\n",
+ new_slave->dev->name);
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ printk(KERN_WARNING
+ "Operation of 802.3ad mode requires ETHTOOL "
+ "support in base driver for proper aggregator "
+ "selection.\n");
+ }
+ }
+
+ if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+ /* if there is a primary slave, remember it */
+ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
+ bond->primary_slave = new_slave;
+ }
+ }
+
+ switch (bond->params.mode) {
+ case BOND_MODE_ACTIVEBACKUP:
+ /* if we're in active-backup mode, we need one and only one active
+ * interface. The backup interfaces will have their NOARP flag set
+ * because we need them to be completely deaf and not to respond to
+ * any ARP request on the network to avoid fooling a switch. Thus,
+ * since we guarantee that curr_active_slave always point to the last
+ * usable interface, we just have to verify this interface's flag.
+ */
+ if (((!bond->curr_active_slave) ||
+ (bond->curr_active_slave->dev->flags & IFF_NOARP)) &&
+ (new_slave->link != BOND_LINK_DOWN)) {
+ dprintk("This is the first active slave\n");
+ /* first slave or no active slave yet, and this link
+ is OK, so make this interface the active one */
+ bond_change_active_slave(bond, new_slave);
+ } else {
+ dprintk("This is just a backup slave\n");
+ bond_set_slave_inactive_flags(new_slave);
+ }
+ break;
+ case BOND_MODE_8023AD:
+ /* in 802.3ad mode, the internal mechanism
+ * will activate the slaves in the selected
+ * aggregator
+ */
+ bond_set_slave_inactive_flags(new_slave);
+ /* if this is the first slave */
+ if (bond->slave_cnt == 1) {
+ SLAVE_AD_INFO(new_slave).id = 1;
+ /* Initialize AD with the number of times that the AD timer is called in 1 second
+ * can be called only after the mac address of the bond is set
+ */
+ bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL,
+ bond->params.lacp_fast);
+ } else {
+ SLAVE_AD_INFO(new_slave).id =
+ SLAVE_AD_INFO(new_slave->prev).id + 1;
+ }
+
+ bond_3ad_bind_slave(new_slave);
+ break;
+ case BOND_MODE_TLB:
+ case BOND_MODE_ALB:
+ new_slave->state = BOND_STATE_ACTIVE;
+ if ((!bond->curr_active_slave) &&
+ (new_slave->link != BOND_LINK_DOWN)) {
+ /* first slave or no active slave yet, and this link
+ * is OK, so make this interface the active one
+ */
+ bond_change_active_slave(bond, new_slave);
+ }
+ break;
+ default:
+ dprintk("This slave is always active in trunk mode\n");
+
+ /* always active in trunk mode */
+ new_slave->state = BOND_STATE_ACTIVE;
+
+ /* In trunking mode there is little meaning to curr_active_slave
+ * anyway (it holds no special properties of the bond device),
+ * so we can change it without calling change_active_interface()
+ */
+ if (!bond->curr_active_slave) {
+ bond->curr_active_slave = new_slave;
+ }
+ break;
+ } /* switch(bond_mode) */
+
+ write_unlock_bh(&bond->lock);
+
+ if (app_abi_ver < 1) {
+ /*
+ * !!! This is to support old versions of ifenslave.
+ * We can remove this in 2.5 because our ifenslave takes
+ * care of this for us.
+ * We check to see if the master has a mac address yet.
+ * If not, we'll give it the mac address of our slave device.
+ */
+ int ndx = 0;
+
+ for (ndx = 0; ndx < bond_dev->addr_len; ndx++) {
+ dprintk("Checking ndx=%d of bond_dev->dev_addr\n",
+ ndx);
+ if (bond_dev->dev_addr[ndx] != 0) {
+ dprintk("Found non-zero byte at ndx=%d\n",
+ ndx);
+ break;
+ }
+ }
+
+ if (ndx == bond_dev->addr_len) {
+ /*
+ * We got all the way through the address and it was
+ * all 0's.
+ */
+ dprintk("%s doesn't have a MAC address yet. \n",
+ bond_dev->name);
+ dprintk("Going to give assign it from %s.\n",
+ slave_dev->name);
+ bond_sethwaddr(bond_dev, slave_dev);
+ }
+ }
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: enslaving %s as a%s interface with a%s link.\n",
+ bond_dev->name, slave_dev->name,
+ new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
+ new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+
+ /* enslave is successful */
+ return 0;
+
+/* Undo stages on error */
+err_unset_master:
+ netdev_set_master(slave_dev, NULL);
+
+err_close:
+ dev_close(slave_dev);
+
+err_restore_mac:
+ memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
+ addr.sa_family = slave_dev->type;
+ dev_set_mac_address(slave_dev, &addr);
+
+err_free:
+ kfree(new_slave);
+
+err_undo_flags:
+ bond_dev->features = old_features;
+
+ return res;
+}
+
+/*
+ * Try to release the slave device <slave> from the bond device <master>
+ * It is legal to access curr_active_slave without a lock because all the function
+ * is write-locked.
+ *
+ * The rules for slave state should be:
+ * for Active/Backup:
+ * Active stays on all backups go down
+ * for Bonded connections:
+ * The first up interface should be left on and all others downed.
+ */
+static int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave, *oldcurrent;
+ struct sockaddr addr;
+ int mac_addr_differ;
+
+ /* slave is not a slave or master is not master of this slave */
+ if (!(slave_dev->flags & IFF_SLAVE) ||
+ (slave_dev->master != bond_dev)) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: %s: cannot release %s.\n",
+ bond_dev->name, slave_dev->name);
+ return -EINVAL;
+ }
+
+ write_lock_bh(&bond->lock);
+
+ slave = bond_get_slave_by_dev(bond, slave_dev);
+ if (!slave) {
+ /* not a slave of this bond */
+ printk(KERN_INFO DRV_NAME
+ ": %s: %s not enslaved\n",
+ bond_dev->name, slave_dev->name);
+ return -EINVAL;
+ }
+
+ mac_addr_differ = memcmp(bond_dev->dev_addr,
+ slave->perm_hwaddr,
+ ETH_ALEN);
+ if (!mac_addr_differ && (bond->slave_cnt > 1)) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: the permanent HWaddr of %s "
+ "- %02X:%02X:%02X:%02X:%02X:%02X - is "
+ "still in use by %s. Set the HWaddr of "
+ "%s to a different address to avoid "
+ "conflicts.\n",
+ slave_dev->name,
+ slave->perm_hwaddr[0],
+ slave->perm_hwaddr[1],
+ slave->perm_hwaddr[2],
+ slave->perm_hwaddr[3],
+ slave->perm_hwaddr[4],
+ slave->perm_hwaddr[5],
+ bond_dev->name,
+ slave_dev->name);
+ }
+
+ /* Inform AD package of unbinding of slave. */
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* must be called before the slave is
+ * detached from the list
+ */
+ bond_3ad_unbind_slave(slave);
+ }
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: releasing %s interface %s\n",
+ bond_dev->name,
+ (slave->state == BOND_STATE_ACTIVE)
+ ? "active" : "backup",
+ slave_dev->name);
+
+ oldcurrent = bond->curr_active_slave;
+
+ bond->current_arp_slave = NULL;
+
+ /* release the slave from its bond */
+ bond_detach_slave(bond, slave);
+
+ if (bond->primary_slave == slave) {
+ bond->primary_slave = NULL;
+ }
+
+ if (oldcurrent == slave) {
+ bond_change_active_slave(bond, NULL);
+ }
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ /* Must be called only after the slave has been
+ * detached from the list and the curr_active_slave
+ * has been cleared (if our_slave == old_current),
+ * but before a new active slave is selected.
+ */
+ bond_alb_deinit_slave(bond, slave);
+ }
+
+ if (oldcurrent == slave) {
+ bond_select_active_slave(bond);
+
+ if (!bond->curr_active_slave) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: now running without any active "
+ "interface !\n",
+ bond_dev->name);
+ }
+ }
+
+ if (bond->slave_cnt == 0) {
+ /* if the last slave was removed, zero the mac address
+ * of the master so it will be set by the application
+ * to the mac address of the first slave
+ */
+ memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
+
+ if (list_empty(&bond->vlan_list)) {
+ bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
+ } else {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: clearing HW address of %s while it "
+ "still has VLANs.\n",
+ bond_dev->name);
+ printk(KERN_WARNING DRV_NAME
+ ": When re-adding slaves, make sure the bond's "
+ "HW address matches its VLANs'.\n");
+ }
+ } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
+ !bond_has_challenged_slaves(bond)) {
+ printk(KERN_INFO DRV_NAME
+ ": last VLAN challenged slave %s "
+ "left bond %s. VLAN blocking is removed\n",
+ slave_dev->name, bond_dev->name);
+ bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
+ }
+
+ write_unlock_bh(&bond->lock);
+
+ bond_del_vlans_from_slave(bond, slave_dev);
+
+ /* If the mode USES_PRIMARY, then we should only remove its
+ * promisc and mc settings if it was the curr_active_slave, but that was
+ * already taken care of above when we detached the slave
+ */
+ if (!USES_PRIMARY(bond->params.mode)) {
+ /* unset promiscuity level from slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ dev_set_promiscuity(slave_dev, -1);
+ }
+
+ /* unset allmulti level from slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ dev_set_allmulti(slave_dev, -1);
+ }
+
+ /* flush master's mc_list from slave */
+ bond_mc_list_flush(bond_dev, slave_dev);
+ }
+
+ netdev_set_master(slave_dev, NULL);
+
+ /* close slave before restoring its mac address */
+ dev_close(slave_dev);
+
+ if (app_abi_ver >= 1) {
+ /* restore original ("permanent") mac address */
+ memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ addr.sa_family = slave_dev->type;
+ dev_set_mac_address(slave_dev, &addr);
+ }
+
+ /* restore the original state of the
+ * IFF_NOARP flag that might have been
+ * set by bond_set_slave_inactive_flags()
+ */
+ if ((slave->original_flags & IFF_NOARP) == 0) {
+ slave_dev->flags &= ~IFF_NOARP;
+ }
+
+ kfree(slave);
+
+ return 0; /* deletion OK */
+}
+
+/*
+ * This function releases all slaves.
+ */
+static int bond_release_all(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave;
+ struct net_device *slave_dev;
+ struct sockaddr addr;
+
+ write_lock_bh(&bond->lock);
+
+ if (bond->slave_cnt == 0) {
+ goto out;
+ }
+
+ bond->current_arp_slave = NULL;
+ bond->primary_slave = NULL;
+ bond_change_active_slave(bond, NULL);
+
+ while ((slave = bond->first_slave) != NULL) {
+ /* Inform AD package of unbinding of slave
+ * before slave is detached from the list.
+ */
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ bond_3ad_unbind_slave(slave);
+ }
+
+ slave_dev = slave->dev;
+ bond_detach_slave(bond, slave);
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ /* must be called only after the slave
+ * has been detached from the list
+ */
+ bond_alb_deinit_slave(bond, slave);
+ }
+
+ /* now that the slave is detached, unlock and perform
+ * all the undo steps that should not be called from
+ * within a lock.
+ */
+ write_unlock_bh(&bond->lock);
+
+ bond_del_vlans_from_slave(bond, slave_dev);
+
+ /* If the mode USES_PRIMARY, then we should only remove its
+ * promisc and mc settings if it was the curr_active_slave, but that was
+ * already taken care of above when we detached the slave
+ */
+ if (!USES_PRIMARY(bond->params.mode)) {
+ /* unset promiscuity level from slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ dev_set_promiscuity(slave_dev, -1);
+ }
+
+ /* unset allmulti level from slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ dev_set_allmulti(slave_dev, -1);
+ }
+
+ /* flush master's mc_list from slave */
+ bond_mc_list_flush(bond_dev, slave_dev);
+ }
+
+ netdev_set_master(slave_dev, NULL);
+
+ /* close slave before restoring its mac address */
+ dev_close(slave_dev);
+
+ if (app_abi_ver >= 1) {
+ /* restore original ("permanent") mac address*/
+ memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ addr.sa_family = slave_dev->type;
+ dev_set_mac_address(slave_dev, &addr);
+ }
+
+ /* restore the original state of the IFF_NOARP flag that might have
+ * been set by bond_set_slave_inactive_flags()
+ */
+ if ((slave->original_flags & IFF_NOARP) == 0) {
+ slave_dev->flags &= ~IFF_NOARP;
+ }
+
+ kfree(slave);
+
+ /* re-acquire the lock before getting the next slave */
+ write_lock_bh(&bond->lock);
+ }
+
+ /* zero the mac address of the master so it will be
+ * set by the application to the mac address of the
+ * first slave
+ */
+ memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
+
+ if (list_empty(&bond->vlan_list)) {
+ bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
+ } else {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: clearing HW address of %s while it "
+ "still has VLANs.\n",
+ bond_dev->name);
+ printk(KERN_WARNING DRV_NAME
+ ": When re-adding slaves, make sure the bond's "
+ "HW address matches its VLANs'.\n");
+ }
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: released all slaves\n",
+ bond_dev->name);
+
+out:
+ write_unlock_bh(&bond->lock);
+
+ return 0;
+}
+
+/*
+ * This function changes the active slave to slave <slave_dev>.
+ * It returns -EINVAL in the following cases.
+ * - <slave_dev> is not found in the list.
+ * - There is not active slave now.
+ * - <slave_dev> is already active.
+ * - The link state of <slave_dev> is not BOND_LINK_UP.
+ * - <slave_dev> is not running.
+ * In these cases, this fuction does nothing.
+ * In the other cases, currnt_slave pointer is changed and 0 is returned.
+ */
+static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *old_active = NULL;
+ struct slave *new_active = NULL;
+ int res = 0;
+
+ if (!USES_PRIMARY(bond->params.mode)) {
+ return -EINVAL;
+ }
+
+ /* Verify that master_dev is indeed the master of slave_dev */
+ if (!(slave_dev->flags & IFF_SLAVE) ||
+ (slave_dev->master != bond_dev)) {
+ return -EINVAL;
+ }
+
+ write_lock_bh(&bond->lock);
+
+ old_active = bond->curr_active_slave;
+ new_active = bond_get_slave_by_dev(bond, slave_dev);
+
+ /*
+ * Changing to the current active: do nothing; return success.
+ */
+ if (new_active && (new_active == old_active)) {
+ write_unlock_bh(&bond->lock);
+ return 0;
+ }
+
+ if ((new_active) &&
+ (old_active) &&
+ (new_active->link == BOND_LINK_UP) &&
+ IS_UP(new_active->dev)) {
+ bond_change_active_slave(bond, new_active);
+ } else {
+ res = -EINVAL;
+ }
+
+ write_unlock_bh(&bond->lock);
+
+ return res;
+}
+
+static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr)
+{
+ struct ethtool_drvinfo info;
+ void __user *addr = ifr->ifr_data;
+ uint32_t cmd;
+
+ if (get_user(cmd, (uint32_t __user *)addr)) {
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ if (copy_from_user(&info, addr, sizeof(info))) {
+ return -EFAULT;
+ }
+
+ if (strcmp(info.driver, "ifenslave") == 0) {
+ int new_abi_ver;
+ char *endptr;
+
+ new_abi_ver = simple_strtoul(info.fw_version,
+ &endptr, 0);
+ if (*endptr) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: got invalid ABI "
+ "version from application\n");
+
+ return -EINVAL;
+ }
+
+ if (orig_app_abi_ver == -1) {
+ orig_app_abi_ver = new_abi_ver;
+ }
+
+ app_abi_ver = new_abi_ver;
+ }
+
+ strncpy(info.driver, DRV_NAME, 32);
+ strncpy(info.version, DRV_VERSION, 32);
+ snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION);
+
+ if (copy_to_user(addr, &info, sizeof(info))) {
+ return -EFAULT;
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
+{
+ struct bonding *bond = bond_dev->priv;
+
+ info->bond_mode = bond->params.mode;
+ info->miimon = bond->params.miimon;
+
+ read_lock_bh(&bond->lock);
+ info->num_slaves = bond->slave_cnt;
+ read_unlock_bh(&bond->lock);
+
+ return 0;
+}
+
+static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave;
+ int i, found = 0;
+
+ if (info->slave_id < 0) {
+ return -ENODEV;
+ }
+
+ read_lock_bh(&bond->lock);
+
+ bond_for_each_slave(bond, slave, i) {
+ if (i == (int)info->slave_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ read_unlock_bh(&bond->lock);
+
+ if (found) {
+ strcpy(info->slave_name, slave->dev->name);
+ info->link = slave->link;
+ info->state = slave->state;
+ info->link_failure_count = slave->link_failure_count;
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*-------------------------------- Monitoring -------------------------------*/
+
+/* this function is called regularly to monitor each slave's link. */
+static void bond_mii_monitor(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave, *oldcurrent;
+ int do_failover = 0;
+ int delta_in_ticks;
+ int i;
+
+ read_lock(&bond->lock);
+
+ delta_in_ticks = (bond->params.miimon * HZ) / 1000;
+
+ if (bond->kill_timers) {
+ goto out;
+ }
+
+ if (bond->slave_cnt == 0) {
+ goto re_arm;
+ }
+
+ /* we will try to read the link status of each of our slaves, and
+ * set their IFF_RUNNING flag appropriately. For each slave not
+ * supporting MII status, we won't do anything so that a user-space
+ * program could monitor the link itself if needed.
+ */
+
+ read_lock(&bond->curr_slave_lock);
+ oldcurrent = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ bond_for_each_slave(bond, slave, i) {
+ struct net_device *slave_dev = slave->dev;
+ int link_state;
+ u16 old_speed = slave->speed;
+ u8 old_duplex = slave->duplex;
+
+ link_state = bond_check_dev_link(bond, slave_dev, 0);
+
+ switch (slave->link) {
+ case BOND_LINK_UP: /* the link was up */
+ if (link_state == BMSR_LSTATUS) {
+ /* link stays up, nothing more to do */
+ break;
+ } else { /* link going down */
+ slave->link = BOND_LINK_FAIL;
+ slave->delay = bond->params.downdelay;
+
+ if (slave->link_failure_count < UINT_MAX) {
+ slave->link_failure_count++;
+ }
+
+ if (bond->params.downdelay) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status down for %s "
+ "interface %s, disabling it in "
+ "%d ms.\n",
+ bond_dev->name,
+ IS_UP(slave_dev)
+ ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ ? ((slave == oldcurrent)
+ ? "active " : "backup ")
+ : "")
+ : "idle ",
+ slave_dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ }
+ }
+ /* no break ! fall through the BOND_LINK_FAIL test to
+ ensure proper action to be taken
+ */
+ case BOND_LINK_FAIL: /* the link has just gone down */
+ if (link_state != BMSR_LSTATUS) {
+ /* link stays down */
+ if (slave->delay <= 0) {
+ /* link down for too long time */
+ slave->link = BOND_LINK_DOWN;
+
+ /* in active/backup mode, we must
+ * completely disable this interface
+ */
+ if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) ||
+ (bond->params.mode == BOND_MODE_8023AD)) {
+ bond_set_slave_inactive_flags(slave);
+ }
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status definitely "
+ "down for interface %s, "
+ "disabling it\n",
+ bond_dev->name,
+ slave_dev->name);
+
+ /* notify ad that the link status has changed */
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ bond_3ad_handle_link_change(slave, BOND_LINK_DOWN);
+ }
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN);
+ }
+
+ if (slave == oldcurrent) {
+ do_failover = 1;
+ }
+ } else {
+ slave->delay--;
+ }
+ } else {
+ /* link up again */
+ slave->link = BOND_LINK_UP;
+ slave->jiffies = jiffies;
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status up again after %d "
+ "ms for interface %s.\n",
+ bond_dev->name,
+ (bond->params.downdelay - slave->delay) * bond->params.miimon,
+ slave_dev->name);
+ }
+ break;
+ case BOND_LINK_DOWN: /* the link was down */
+ if (link_state != BMSR_LSTATUS) {
+ /* the link stays down, nothing more to do */
+ break;
+ } else { /* link going up */
+ slave->link = BOND_LINK_BACK;
+ slave->delay = bond->params.updelay;
+
+ if (bond->params.updelay) {
+ /* if updelay == 0, no need to
+ advertise about a 0 ms delay */
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status up for "
+ "interface %s, enabling it "
+ "in %d ms.\n",
+ bond_dev->name,
+ slave_dev->name,
+ bond->params.updelay * bond->params.miimon);
+ }
+ }
+ /* no break ! fall through the BOND_LINK_BACK state in
+ case there's something to do.
+ */
+ case BOND_LINK_BACK: /* the link has just come back */
+ if (link_state != BMSR_LSTATUS) {
+ /* link down again */
+ slave->link = BOND_LINK_DOWN;
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status down again after %d "
+ "ms for interface %s.\n",
+ bond_dev->name,
+ (bond->params.updelay - slave->delay) * bond->params.miimon,
+ slave_dev->name);
+ } else {
+ /* link stays up */
+ if (slave->delay == 0) {
+ /* now the link has been up for long time enough */
+ slave->link = BOND_LINK_UP;
+ slave->jiffies = jiffies;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* prevent it from being the active one */
+ slave->state = BOND_STATE_BACKUP;
+ } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ /* make it immediately active */
+ slave->state = BOND_STATE_ACTIVE;
+ } else if (slave != bond->primary_slave) {
+ /* prevent it from being the active one */
+ slave->state = BOND_STATE_BACKUP;
+ }
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status definitely "
+ "up for interface %s.\n",
+ bond_dev->name,
+ slave_dev->name);
+
+ /* notify ad that the link status has changed */
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ bond_3ad_handle_link_change(slave, BOND_LINK_UP);
+ }
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ bond_alb_handle_link_change(bond, slave, BOND_LINK_UP);
+ }
+
+ if ((!oldcurrent) ||
+ (slave == bond->primary_slave)) {
+ do_failover = 1;
+ }
+ } else {
+ slave->delay--;
+ }
+ }
+ break;
+ default:
+ /* Should not happen */
+ printk(KERN_ERR "bonding: Error: %s Illegal value (link=%d)\n",
+ slave->dev->name, slave->link);
+ goto out;
+ } /* end of switch (slave->link) */
+
+ bond_update_speed_duplex(slave);
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ if (old_speed != slave->speed) {
+ bond_3ad_adapter_speed_changed(slave);
+ }
+
+ if (old_duplex != slave->duplex) {
+ bond_3ad_adapter_duplex_changed(slave);
+ }
+ }
+
+ } /* end of for */
+
+ if (do_failover) {
+ write_lock(&bond->curr_slave_lock);
+
+ bond_select_active_slave(bond);
+
+ if (oldcurrent && !bond->curr_active_slave) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: now running without any active "
+ "interface !\n",
+ bond_dev->name);
+ }
+
+ write_unlock(&bond->curr_slave_lock);
+ }
+
+re_arm:
+ if (bond->params.miimon) {
+ mod_timer(&bond->mii_timer, jiffies + delta_in_ticks);
+ }
+out:
+ read_unlock(&bond->lock);
+}
+
+static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
+{
+ int i;
+ u32 *targets = bond->params.arp_targets;
+
+ for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
+ arp_send(ARPOP_REQUEST, ETH_P_ARP, targets[i], slave->dev,
+ my_ip, NULL, slave->dev->dev_addr,
+ NULL);
+ }
+}
+
+/*
+ * this function is called regularly to monitor each slave's link
+ * ensuring that traffic is being sent and received when arp monitoring
+ * is used in load-balancing mode. if the adapter has been dormant, then an
+ * arp is transmitted to generate traffic. see activebackup_arp_monitor for
+ * arp monitoring in active backup mode.
+ */
+static void bond_loadbalance_arp_mon(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave, *oldcurrent;
+ int do_failover = 0;
+ int delta_in_ticks;
+ int i;
+
+ read_lock(&bond->lock);
+
+ delta_in_ticks = (bond->params.arp_interval * HZ) / 1000;
+
+ if (bond->kill_timers) {
+ goto out;
+ }
+
+ if (bond->slave_cnt == 0) {
+ goto re_arm;
+ }
+
+ read_lock(&bond->curr_slave_lock);
+ oldcurrent = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ /* see if any of the previous devices are up now (i.e. they have
+ * xmt and rcv traffic). the curr_active_slave does not come into
+ * the picture unless it is null. also, slave->jiffies is not needed
+ * here because we send an arp on each slave and give a slave as
+ * long as it needs to get the tx/rx within the delta.
+ * TODO: what about up/down delay in arp mode? it wasn't here before
+ * so it can wait
+ */
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->link != BOND_LINK_UP) {
+ if (((jiffies - slave->dev->trans_start) <= delta_in_ticks) &&
+ ((jiffies - slave->dev->last_rx) <= delta_in_ticks)) {
+
+ slave->link = BOND_LINK_UP;
+ slave->state = BOND_STATE_ACTIVE;
+
+ /* primary_slave has no meaning in round-robin
+ * mode. the window of a slave being up and
+ * curr_active_slave being null after enslaving
+ * is closed.
+ */
+ if (!oldcurrent) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status definitely "
+ "up for interface %s, ",
+ bond_dev->name,
+ slave->dev->name);
+ do_failover = 1;
+ } else {
+ printk(KERN_INFO DRV_NAME
+ ": %s: interface %s is now up\n",
+ bond_dev->name,
+ slave->dev->name);
+ }
+ }
+ } else {
+ /* slave->link == BOND_LINK_UP */
+
+ /* not all switches will respond to an arp request
+ * when the source ip is 0, so don't take the link down
+ * if we don't know our ip yet
+ */
+ if (((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) ||
+ (((jiffies - slave->dev->last_rx) >= (2*delta_in_ticks)) &&
+ my_ip)) {
+
+ slave->link = BOND_LINK_DOWN;
+ slave->state = BOND_STATE_BACKUP;
+
+ if (slave->link_failure_count < UINT_MAX) {
+ slave->link_failure_count++;
+ }
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: interface %s is now down.\n",
+ bond_dev->name,
+ slave->dev->name);
+
+ if (slave == oldcurrent) {
+ do_failover = 1;
+ }
+ }
+ }
+
+ /* note: if switch is in round-robin mode, all links
+ * must tx arp to ensure all links rx an arp - otherwise
+ * links may oscillate or not come up at all; if switch is
+ * in something like xor mode, there is nothing we can
+ * do - all replies will be rx'ed on same link causing slaves
+ * to be unstable during low/no traffic periods
+ */
+ if (IS_UP(slave->dev)) {
+ bond_arp_send_all(bond, slave);
+ }
+ }
+
+ if (do_failover) {
+ write_lock(&bond->curr_slave_lock);
+
+ bond_select_active_slave(bond);
+
+ if (oldcurrent && !bond->curr_active_slave) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: now running without any active "
+ "interface !\n",
+ bond_dev->name);
+ }
+
+ write_unlock(&bond->curr_slave_lock);
+ }
+
+re_arm:
+ if (bond->params.arp_interval) {
+ mod_timer(&bond->arp_timer, jiffies + delta_in_ticks);
+ }
+out:
+ read_unlock(&bond->lock);
+}
+
+/*
+ * When using arp monitoring in active-backup mode, this function is
+ * called to determine if any backup slaves have went down or a new
+ * current slave needs to be found.
+ * The backup slaves never generate traffic, they are considered up by merely
+ * receiving traffic. If the current slave goes down, each backup slave will
+ * be given the opportunity to tx/rx an arp before being taken down - this
+ * prevents all slaves from being taken down due to the current slave not
+ * sending any traffic for the backups to receive. The arps are not necessarily
+ * necessary, any tx and rx traffic will keep the current slave up. While any
+ * rx traffic will keep the backup slaves up, the current slave is responsible
+ * for generating traffic to keep them up regardless of any other traffic they
+ * may have received.
+ * see loadbalance_arp_monitor for arp monitoring in load balancing mode
+ */
+static void bond_activebackup_arp_mon(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave;
+ int delta_in_ticks;
+ int i;
+
+ read_lock(&bond->lock);
+
+ delta_in_ticks = (bond->params.arp_interval * HZ) / 1000;
+
+ if (bond->kill_timers) {
+ goto out;
+ }
+
+ if (bond->slave_cnt == 0) {
+ goto re_arm;
+ }
+
+ /* determine if any slave has come up or any backup slave has
+ * gone down
+ * TODO: what about up/down delay in arp mode? it wasn't here before
+ * so it can wait
+ */
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->link != BOND_LINK_UP) {
+ if ((jiffies - slave->dev->last_rx) <= delta_in_ticks) {
+
+ slave->link = BOND_LINK_UP;
+
+ write_lock(&bond->curr_slave_lock);
+
+ if ((!bond->curr_active_slave) &&
+ ((jiffies - slave->dev->trans_start) <= delta_in_ticks)) {
+ bond_change_active_slave(bond, slave);
+ bond->current_arp_slave = NULL;
+ } else if (bond->curr_active_slave != slave) {
+ /* this slave has just come up but we
+ * already have a current slave; this
+ * can also happen if bond_enslave adds
+ * a new slave that is up while we are
+ * searching for a new slave
+ */
+ bond_set_slave_inactive_flags(slave);
+ bond->current_arp_slave = NULL;
+ }
+
+ if (slave == bond->curr_active_slave) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: %s is up and now the "
+ "active interface\n",
+ bond_dev->name,
+ slave->dev->name);
+ } else {
+ printk(KERN_INFO DRV_NAME
+ ": %s: backup interface %s is "
+ "now up\n",
+ bond_dev->name,
+ slave->dev->name);
+ }
+
+ write_unlock(&bond->curr_slave_lock);
+ }
+ } else {
+ read_lock(&bond->curr_slave_lock);
+
+ if ((slave != bond->curr_active_slave) &&
+ (!bond->current_arp_slave) &&
+ (((jiffies - slave->dev->last_rx) >= 3*delta_in_ticks) &&
+ my_ip)) {
+ /* a backup slave has gone down; three times
+ * the delta allows the current slave to be
+ * taken out before the backup slave.
+ * note: a non-null current_arp_slave indicates
+ * the curr_active_slave went down and we are
+ * searching for a new one; under this
+ * condition we only take the curr_active_slave
+ * down - this gives each slave a chance to
+ * tx/rx traffic before being taken out
+ */
+
+ read_unlock(&bond->curr_slave_lock);
+
+ slave->link = BOND_LINK_DOWN;
+
+ if (slave->link_failure_count < UINT_MAX) {
+ slave->link_failure_count++;
+ }
+
+ bond_set_slave_inactive_flags(slave);
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: backup interface %s is now down\n",
+ bond_dev->name,
+ slave->dev->name);
+ } else {
+ read_unlock(&bond->curr_slave_lock);
+ }
+ }
+ }
+
+ read_lock(&bond->curr_slave_lock);
+ slave = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ if (slave) {
+ /* if we have sent traffic in the past 2*arp_intervals but
+ * haven't xmit and rx traffic in that time interval, select
+ * a different slave. slave->jiffies is only updated when
+ * a slave first becomes the curr_active_slave - not necessarily
+ * after every arp; this ensures the slave has a full 2*delta
+ * before being taken out. if a primary is being used, check
+ * if it is up and needs to take over as the curr_active_slave
+ */
+ if ((((jiffies - slave->dev->trans_start) >= (2*delta_in_ticks)) ||
+ (((jiffies - slave->dev->last_rx) >= (2*delta_in_ticks)) &&
+ my_ip)) &&
+ ((jiffies - slave->jiffies) >= 2*delta_in_ticks)) {
+
+ slave->link = BOND_LINK_DOWN;
+
+ if (slave->link_failure_count < UINT_MAX) {
+ slave->link_failure_count++;
+ }
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status down for active interface "
+ "%s, disabling it\n",
+ bond_dev->name,
+ slave->dev->name);
+
+ write_lock(&bond->curr_slave_lock);
+
+ bond_select_active_slave(bond);
+ slave = bond->curr_active_slave;
+
+ write_unlock(&bond->curr_slave_lock);
+
+ bond->current_arp_slave = slave;
+
+ if (slave) {
+ slave->jiffies = jiffies;
+ }
+ } else if ((bond->primary_slave) &&
+ (bond->primary_slave != slave) &&
+ (bond->primary_slave->link == BOND_LINK_UP)) {
+ /* at this point, slave is the curr_active_slave */
+ printk(KERN_INFO DRV_NAME
+ ": %s: changing from interface %s to primary "
+ "interface %s\n",
+ bond_dev->name,
+ slave->dev->name,
+ bond->primary_slave->dev->name);
+
+ /* primary is up so switch to it */
+ write_lock(&bond->curr_slave_lock);
+ bond_change_active_slave(bond, bond->primary_slave);
+ write_unlock(&bond->curr_slave_lock);
+
+ slave = bond->primary_slave;
+ slave->jiffies = jiffies;
+ } else {
+ bond->current_arp_slave = NULL;
+ }
+
+ /* the current slave must tx an arp to ensure backup slaves
+ * rx traffic
+ */
+ if (slave && my_ip) {
+ bond_arp_send_all(bond, slave);
+ }
+ }
+
+ /* if we don't have a curr_active_slave, search for the next available
+ * backup slave from the current_arp_slave and make it the candidate
+ * for becoming the curr_active_slave
+ */
+ if (!slave) {
+ if (!bond->current_arp_slave) {
+ bond->current_arp_slave = bond->first_slave;
+ }
+
+ if (bond->current_arp_slave) {
+ bond_set_slave_inactive_flags(bond->current_arp_slave);
+
+ /* search for next candidate */
+ bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave) {
+ if (IS_UP(slave->dev)) {
+ slave->link = BOND_LINK_BACK;
+ bond_set_slave_active_flags(slave);
+ bond_arp_send_all(bond, slave);
+ slave->jiffies = jiffies;
+ bond->current_arp_slave = slave;
+ break;
+ }
+
+ /* if the link state is up at this point, we
+ * mark it down - this can happen if we have
+ * simultaneous link failures and
+ * reselect_active_interface doesn't make this
+ * one the current slave so it is still marked
+ * up when it is actually down
+ */
+ if (slave->link == BOND_LINK_UP) {
+ slave->link = BOND_LINK_DOWN;
+ if (slave->link_failure_count < UINT_MAX) {
+ slave->link_failure_count++;
+ }
+
+ bond_set_slave_inactive_flags(slave);
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: backup interface %s is "
+ "now down.\n",
+ bond_dev->name,
+ slave->dev->name);
+ }
+ }
+ }
+ }
+
+re_arm:
+ if (bond->params.arp_interval) {
+ mod_timer(&bond->arp_timer, jiffies + delta_in_ticks);
+ }
+out:
+ read_unlock(&bond->lock);
+}
+
+/*------------------------------ proc/seq_file-------------------------------*/
+
+#ifdef CONFIG_PROC_FS
+
+#define SEQ_START_TOKEN ((void *)1)
+
+static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct bonding *bond = seq->private;
+ loff_t off = 0;
+ struct slave *slave;
+ int i;
+
+ /* make sure the bond won't be taken away */
+ read_lock(&dev_base_lock);
+ read_lock_bh(&bond->lock);
+
+ if (*pos == 0) {
+ return SEQ_START_TOKEN;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (++off == *pos) {
+ return slave;
+ }
+ }
+
+ return NULL;
+}
+
+static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct bonding *bond = seq->private;
+ struct slave *slave = v;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN) {
+ return bond->first_slave;
+ }
+
+ slave = slave->next;
+
+ return (slave == bond->first_slave) ? NULL : slave;
+}
+
+static void bond_info_seq_stop(struct seq_file *seq, void *v)
+{
+ struct bonding *bond = seq->private;
+
+ read_unlock_bh(&bond->lock);
+ read_unlock(&dev_base_lock);
+}
+
+static void bond_info_show_master(struct seq_file *seq)
+{
+ struct bonding *bond = seq->private;
+ struct slave *curr;
+
+ read_lock(&bond->curr_slave_lock);
+ curr = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ seq_printf(seq, "Bonding Mode: %s\n",
+ bond_mode_name(bond->params.mode));
+
+ if (USES_PRIMARY(bond->params.mode)) {
+ seq_printf(seq, "Primary Slave: %s\n",
+ (bond->params.primary[0]) ?
+ bond->params.primary : "None");
+
+ seq_printf(seq, "Currently Active Slave: %s\n",
+ (curr) ? curr->dev->name : "None");
+ }
+
+ seq_printf(seq, "MII Status: %s\n", (curr) ? "up" : "down");
+ seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
+ seq_printf(seq, "Up Delay (ms): %d\n",
+ bond->params.updelay * bond->params.miimon);
+ seq_printf(seq, "Down Delay (ms): %d\n",
+ bond->params.downdelay * bond->params.miimon);
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ struct ad_info ad_info;
+
+ seq_puts(seq, "\n802.3ad info\n");
+ seq_printf(seq, "LACP rate: %s\n",
+ (bond->params.lacp_fast) ? "fast" : "slow");
+
+ if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ seq_printf(seq, "bond %s has no active aggregator\n",
+ bond->dev->name);
+ } else {
+ seq_printf(seq, "Active Aggregator Info:\n");
+
+ seq_printf(seq, "\tAggregator ID: %d\n",
+ ad_info.aggregator_id);
+ seq_printf(seq, "\tNumber of ports: %d\n",
+ ad_info.ports);
+ seq_printf(seq, "\tActor Key: %d\n",
+ ad_info.actor_key);
+ seq_printf(seq, "\tPartner Key: %d\n",
+ ad_info.partner_key);
+ seq_printf(seq, "\tPartner Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ad_info.partner_system[0],
+ ad_info.partner_system[1],
+ ad_info.partner_system[2],
+ ad_info.partner_system[3],
+ ad_info.partner_system[4],
+ ad_info.partner_system[5]);
+ }
+ }
+}
+
+static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave)
+{
+ struct bonding *bond = seq->private;
+
+ seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
+ seq_printf(seq, "MII Status: %s\n",
+ (slave->link == BOND_LINK_UP) ? "up" : "down");
+ seq_printf(seq, "Link Failure Count: %d\n",
+ slave->link_failure_count);
+
+ if (app_abi_ver >= 1) {
+ seq_printf(seq,
+ "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ slave->perm_hwaddr[0],
+ slave->perm_hwaddr[1],
+ slave->perm_hwaddr[2],
+ slave->perm_hwaddr[3],
+ slave->perm_hwaddr[4],
+ slave->perm_hwaddr[5]);
+ }
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ const struct aggregator *agg
+ = SLAVE_AD_INFO(slave).port.aggregator;
+
+ if (agg) {
+ seq_printf(seq, "Aggregator ID: %d\n",
+ agg->aggregator_identifier);
+ } else {
+ seq_puts(seq, "Aggregator ID: N/A\n");
+ }
+ }
+}
+
+static int bond_info_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "%s\n", version);
+ bond_info_show_master(seq);
+ } else {
+ bond_info_show_slave(seq, v);
+ }
+
+ return 0;
+}
+
+static struct seq_operations bond_info_seq_ops = {
+ .start = bond_info_seq_start,
+ .next = bond_info_seq_next,
+ .stop = bond_info_seq_stop,
+ .show = bond_info_seq_show,
+};
+
+static int bond_info_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ struct proc_dir_entry *proc;
+ int res;
+
+ res = seq_open(file, &bond_info_seq_ops);
+ if (!res) {
+ /* recover the pointer buried in proc_dir_entry data */
+ seq = file->private_data;
+ proc = PDE(inode);
+ seq->private = proc->data;
+ }
+
+ return res;
+}
+
+static struct file_operations bond_info_fops = {
+ .owner = THIS_MODULE,
+ .open = bond_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int bond_create_proc_entry(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+
+ if (bond_proc_dir) {
+ bond->proc_entry = create_proc_entry(bond_dev->name,
+ S_IRUGO,
+ bond_proc_dir);
+ if (bond->proc_entry == NULL) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
+ } else {
+ bond->proc_entry->data = bond;
+ bond->proc_entry->proc_fops = &bond_info_fops;
+ bond->proc_entry->owner = THIS_MODULE;
+ memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
+ }
+ }
+
+ return 0;
+}
+
+static void bond_remove_proc_entry(struct bonding *bond)
+{
+ if (bond_proc_dir && bond->proc_entry) {
+ remove_proc_entry(bond->proc_file_name, bond_proc_dir);
+ memset(bond->proc_file_name, 0, IFNAMSIZ);
+ bond->proc_entry = NULL;
+ }
+}
+
+/* Create the bonding directory under /proc/net, if doesn't exist yet.
+ * Caller must hold rtnl_lock.
+ */
+static void bond_create_proc_dir(void)
+{
+ int len = strlen(DRV_NAME);
+
+ for (bond_proc_dir = proc_net->subdir; bond_proc_dir;
+ bond_proc_dir = bond_proc_dir->next) {
+ if ((bond_proc_dir->namelen == len) &&
+ !memcmp(bond_proc_dir->name, DRV_NAME, len)) {
+ break;
+ }
+ }
+
+ if (!bond_proc_dir) {
+ bond_proc_dir = proc_mkdir(DRV_NAME, proc_net);
+ if (bond_proc_dir) {
+ bond_proc_dir->owner = THIS_MODULE;
+ } else {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: cannot create /proc/net/%s\n",
+ DRV_NAME);
+ }
+ }
+}
+
+/* Destroy the bonding directory under /proc/net, if empty.
+ * Caller must hold rtnl_lock.
+ */
+static void bond_destroy_proc_dir(void)
+{
+ struct proc_dir_entry *de;
+
+ if (!bond_proc_dir) {
+ return;
+ }
+
+ /* verify that the /proc dir is empty */
+ for (de = bond_proc_dir->subdir; de; de = de->next) {
+ /* ignore . and .. */
+ if (*(de->name) != '.') {
+ break;
+ }
+ }
+
+ if (de) {
+ if (bond_proc_dir->owner == THIS_MODULE) {
+ bond_proc_dir->owner = NULL;
+ }
+ } else {
+ remove_proc_entry(DRV_NAME, proc_net);
+ bond_proc_dir = NULL;
+ }
+}
+#endif /* CONFIG_PROC_FS */
+
+/*-------------------------- netdev event handling --------------------------*/
+
+/*
+ * Change device name
+ */
+static int bond_event_changename(struct bonding *bond)
+{
+#ifdef CONFIG_PROC_FS
+ bond_remove_proc_entry(bond);
+ bond_create_proc_entry(bond);
+#endif
+
+ return NOTIFY_DONE;
+}
+
+static int bond_master_netdev_event(unsigned long event, struct net_device *bond_dev)
+{
+ struct bonding *event_bond = bond_dev->priv;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ return bond_event_changename(event_bond);
+ case NETDEV_UNREGISTER:
+ /*
+ * TODO: remove a bond from the list?
+ */
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev)
+{
+ struct net_device *bond_dev = slave_dev->master;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ if (bond_dev) {
+ bond_release(bond_dev, slave_dev);
+ }
+ break;
+ case NETDEV_CHANGE:
+ /*
+ * TODO: is this what we get if somebody
+ * sets up a hierarchical bond, then rmmod's
+ * one of the slave bonding devices?
+ */
+ break;
+ case NETDEV_DOWN:
+ /*
+ * ... Or is it this?
+ */
+ break;
+ case NETDEV_CHANGEMTU:
+ /*
+ * TODO: Should slaves be allowed to
+ * independently alter their MTU? For
+ * an active-backup bond, slaves need
+ * not be the same type of device, so
+ * MTUs may vary. For other modes,
+ * slaves arguably should have the
+ * same MTUs. To do this, we'd need to
+ * take over the slave's change_mtu
+ * function for the duration of their
+ * servitude.
+ */
+ break;
+ case NETDEV_CHANGENAME:
+ /*
+ * TODO: handle changing the primary's name
+ */
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * bond_netdev_event: handle netdev notifier chain events.
+ *
+ * This function receives events for the netdev chain. The caller (an
+ * ioctl handler calling notifier_call_chain) holds the necessary
+ * locks for us to safely manipulate the slave devices (RTNL lock,
+ * dev_probe_lock).
+ */
+static int bond_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct net_device *event_dev = (struct net_device *)ptr;
+
+ dprintk("event_dev: %s, event: %lx\n",
+ (event_dev ? event_dev->name : "None"),
+ event);
+
+ if (event_dev->flags & IFF_MASTER) {
+ dprintk("IFF_MASTER\n");
+ return bond_master_netdev_event(event, event_dev);
+ }
+
+ if (event_dev->flags & IFF_SLAVE) {
+ dprintk("IFF_SLAVE\n");
+ return bond_slave_netdev_event(event, event_dev);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block bond_netdev_notifier = {
+ .notifier_call = bond_netdev_event,
+};
+
+/*-------------------------- Packet type handling ---------------------------*/
+
+/* register to receive lacpdus on a bond */
+static void bond_register_lacpdu(struct bonding *bond)
+{
+ struct packet_type *pk_type = &(BOND_AD_INFO(bond).ad_pkt_type);
+
+ /* initialize packet type */
+ pk_type->type = PKT_TYPE_LACPDU;
+ pk_type->dev = bond->dev;
+ pk_type->func = bond_3ad_lacpdu_recv;
+
+ dev_add_pack(pk_type);
+}
+
+/* unregister to receive lacpdus on a bond */
+static void bond_unregister_lacpdu(struct bonding *bond)
+{
+ dev_remove_pack(&(BOND_AD_INFO(bond).ad_pkt_type));
+}
+
+/*-------------------------- Device entry points ----------------------------*/
+
+static int bond_open(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct timer_list *mii_timer = &bond->mii_timer;
+ struct timer_list *arp_timer = &bond->arp_timer;
+
+ bond->kill_timers = 0;
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ struct timer_list *alb_timer = &(BOND_ALB_INFO(bond).alb_timer);
+
+ /* bond_alb_initialize must be called before the timer
+ * is started.
+ */
+ if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
+ /* something went wrong - fail the open operation */
+ return -1;
+ }
+
+ init_timer(alb_timer);
+ alb_timer->expires = jiffies + 1;
+ alb_timer->data = (unsigned long)bond;
+ alb_timer->function = (void *)&bond_alb_monitor;
+ add_timer(alb_timer);
+ }
+
+ if (bond->params.miimon) { /* link check interval, in milliseconds. */
+ init_timer(mii_timer);
+ mii_timer->expires = jiffies + 1;
+ mii_timer->data = (unsigned long)bond_dev;
+ mii_timer->function = (void *)&bond_mii_monitor;
+ add_timer(mii_timer);
+ }
+
+ if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
+ init_timer(arp_timer);
+ arp_timer->expires = jiffies + 1;
+ arp_timer->data = (unsigned long)bond_dev;
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ arp_timer->function = (void *)&bond_activebackup_arp_mon;
+ } else {
+ arp_timer->function = (void *)&bond_loadbalance_arp_mon;
+ }
+ add_timer(arp_timer);
+ }
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ struct timer_list *ad_timer = &(BOND_AD_INFO(bond).ad_timer);
+ init_timer(ad_timer);
+ ad_timer->expires = jiffies + 1;
+ ad_timer->data = (unsigned long)bond;
+ ad_timer->function = (void *)&bond_3ad_state_machine_handler;
+ add_timer(ad_timer);
+
+ /* register to receive LACPDUs */
+ bond_register_lacpdu(bond);
+ }
+
+ return 0;
+}
+
+static int bond_close(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* Unregister the receive of LACPDUs */
+ bond_unregister_lacpdu(bond);
+ }
+
+ write_lock_bh(&bond->lock);
+
+ bond_mc_list_destroy(bond);
+
+ /* signal timers not to re-arm */
+ bond->kill_timers = 1;
+
+ write_unlock_bh(&bond->lock);
+
+ /* del_timer_sync must run without holding the bond->lock
+ * because a running timer might be trying to hold it too
+ */
+
+ if (bond->params.miimon) { /* link check interval, in milliseconds. */
+ del_timer_sync(&bond->mii_timer);
+ }
+
+ if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
+ del_timer_sync(&bond->arp_timer);
+ }
+
+ switch (bond->params.mode) {
+ case BOND_MODE_8023AD:
+ del_timer_sync(&(BOND_AD_INFO(bond).ad_timer));
+ break;
+ case BOND_MODE_TLB:
+ case BOND_MODE_ALB:
+ del_timer_sync(&(BOND_ALB_INFO(bond).alb_timer));
+ break;
+ default:
+ break;
+ }
+
+ /* Release the bonded slaves */
+ bond_release_all(bond_dev);
+
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB)) {
+ /* Must be called only after all
+ * slaves have been released
+ */
+ bond_alb_deinitialize(bond);
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct net_device_stats *stats = &(bond->stats), *sstats;
+ struct slave *slave;
+ int i;
+
+ memset(stats, 0, sizeof(struct net_device_stats));
+
+ read_lock_bh(&bond->lock);
+
+ bond_for_each_slave(bond, slave, i) {
+ sstats = slave->dev->get_stats(slave->dev);
+
+ stats->rx_packets += sstats->rx_packets;
+ stats->rx_bytes += sstats->rx_bytes;
+ stats->rx_errors += sstats->rx_errors;
+ stats->rx_dropped += sstats->rx_dropped;
+
+ stats->tx_packets += sstats->tx_packets;
+ stats->tx_bytes += sstats->tx_bytes;
+ stats->tx_errors += sstats->tx_errors;
+ stats->tx_dropped += sstats->tx_dropped;
+
+ stats->multicast += sstats->multicast;
+ stats->collisions += sstats->collisions;
+
+ stats->rx_length_errors += sstats->rx_length_errors;
+ stats->rx_over_errors += sstats->rx_over_errors;
+ stats->rx_crc_errors += sstats->rx_crc_errors;
+ stats->rx_frame_errors += sstats->rx_frame_errors;
+ stats->rx_fifo_errors += sstats->rx_fifo_errors;
+ stats->rx_missed_errors += sstats->rx_missed_errors;
+
+ stats->tx_aborted_errors += sstats->tx_aborted_errors;
+ stats->tx_carrier_errors += sstats->tx_carrier_errors;
+ stats->tx_fifo_errors += sstats->tx_fifo_errors;
+ stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
+ stats->tx_window_errors += sstats->tx_window_errors;
+ }
+
+ read_unlock_bh(&bond->lock);
+
+ return stats;
+}
+
+static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
+{
+ struct net_device *slave_dev = NULL;
+ struct ifbond k_binfo;
+ struct ifbond __user *u_binfo = NULL;
+ struct ifslave k_sinfo;
+ struct ifslave __user *u_sinfo = NULL;
+ struct mii_ioctl_data *mii = NULL;
+ int prev_abi_ver = orig_app_abi_ver;
+ int res = 0;
+
+ dprintk("bond_ioctl: master=%s, cmd=%d\n",
+ bond_dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return bond_ethtool_ioctl(bond_dev, ifr);
+ case SIOCGMIIPHY:
+ mii = if_mii(ifr);
+ if (!mii) {
+ return -EINVAL;
+ }
+ mii->phy_id = 0;
+ /* Fall Through */
+ case SIOCGMIIREG:
+ /*
+ * We do this again just in case we were called by SIOCGMIIREG
+ * instead of SIOCGMIIPHY.
+ */
+ mii = if_mii(ifr);
+ if (!mii) {
+ return -EINVAL;
+ }
+
+ if (mii->reg_num == 1) {
+ struct bonding *bond = bond_dev->priv;
+ mii->val_out = 0;
+ read_lock_bh(&bond->lock);
+ read_lock(&bond->curr_slave_lock);
+ if (bond->curr_active_slave) {
+ mii->val_out = BMSR_LSTATUS;
+ }
+ read_unlock(&bond->curr_slave_lock);
+ read_unlock_bh(&bond->lock);
+ }
+
+ return 0;
+ case BOND_INFO_QUERY_OLD:
+ case SIOCBONDINFOQUERY:
+ u_binfo = (struct ifbond __user *)ifr->ifr_data;
+
+ if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) {
+ return -EFAULT;
+ }
+
+ res = bond_info_query(bond_dev, &k_binfo);
+ if (res == 0) {
+ if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) {
+ return -EFAULT;
+ }
+ }
+
+ return res;
+ case BOND_SLAVE_INFO_QUERY_OLD:
+ case SIOCBONDSLAVEINFOQUERY:
+ u_sinfo = (struct ifslave __user *)ifr->ifr_data;
+
+ if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave))) {
+ return -EFAULT;
+ }
+
+ res = bond_slave_info_query(bond_dev, &k_sinfo);
+ if (res == 0) {
+ if (copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave))) {
+ return -EFAULT;
+ }
+ }
+
+ return res;
+ default:
+ /* Go on */
+ break;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+
+ if (orig_app_abi_ver == -1) {
+ /* no orig_app_abi_ver was provided yet, so we'll use the
+ * current one from now on, even if it's 0
+ */
+ orig_app_abi_ver = app_abi_ver;
+
+ } else if (orig_app_abi_ver != app_abi_ver) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: already using ifenslave ABI version %d; to "
+ "upgrade ifenslave to version %d, you must first "
+ "reload bonding.\n",
+ orig_app_abi_ver, app_abi_ver);
+ return -EINVAL;
+ }
+
+ slave_dev = dev_get_by_name(ifr->ifr_slave);
+
+ dprintk("slave_dev=%p: \n", slave_dev);
+
+ if (!slave_dev) {
+ res = -ENODEV;
+ } else {
+ dprintk("slave_dev->name=%s: \n", slave_dev->name);
+ switch (cmd) {
+ case BOND_ENSLAVE_OLD:
+ case SIOCBONDENSLAVE:
+ res = bond_enslave(bond_dev, slave_dev);
+ break;
+ case BOND_RELEASE_OLD:
+ case SIOCBONDRELEASE:
+ res = bond_release(bond_dev, slave_dev);
+ break;
+ case BOND_SETHWADDR_OLD:
+ case SIOCBONDSETHWADDR:
+ res = bond_sethwaddr(bond_dev, slave_dev);
+ break;
+ case BOND_CHANGE_ACTIVE_OLD:
+ case SIOCBONDCHANGEACTIVE:
+ res = bond_ioctl_change_active(bond_dev, slave_dev);
+ break;
+ default:
+ res = -EOPNOTSUPP;
+ }
+
+ dev_put(slave_dev);
+ }
+
+ if (res < 0) {
+ /* The ioctl failed, so there's no point in changing the
+ * orig_app_abi_ver. We'll restore it's value just in case
+ * we've changed it earlier in this function.
+ */
+ orig_app_abi_ver = prev_abi_ver;
+ }
+
+ return res;
+}
+
+static void bond_set_multicast_list(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct dev_mc_list *dmi;
+
+ write_lock_bh(&bond->lock);
+
+ /*
+ * Do promisc before checking multicast_mode
+ */
+ if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC)) {
+ bond_set_promiscuity(bond, 1);
+ }
+
+ if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC)) {
+ bond_set_promiscuity(bond, -1);
+ }
+
+ /* set allmulti flag to slaves */
+ if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI)) {
+ bond_set_allmulti(bond, 1);
+ }
+
+ if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI)) {
+ bond_set_allmulti(bond, -1);
+ }
+
+ bond->flags = bond_dev->flags;
+
+ /* looking for addresses to add to slaves' mc list */
+ for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
+ if (!bond_mc_list_find_dmi(dmi, bond->mc_list)) {
+ bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ }
+ }
+
+ /* looking for addresses to delete from slaves' list */
+ for (dmi = bond->mc_list; dmi; dmi = dmi->next) {
+ if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list)) {
+ bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ }
+ }
+
+ /* save master's multicast list */
+ bond_mc_list_destroy(bond);
+ bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
+
+ write_unlock_bh(&bond->lock);
+}
+
+/*
+ * Change the MTU of all of a master's slaves to match the master
+ */
+static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave, *stop_at;
+ int res = 0;
+ int i;
+
+ dprintk("bond=%p, name=%s, new_mtu=%d\n", bond,
+ (bond_dev ? bond_dev->name : "None"), new_mtu);
+
+ /* Can't hold bond->lock with bh disabled here since
+ * some base drivers panic. On the other hand we can't
+ * hold bond->lock without bh disabled because we'll
+ * deadlock. The only solution is to rely on the fact
+ * that we're under rtnl_lock here, and the slaves
+ * list won't change. This doesn't solve the problem
+ * of setting the slave's MTU while it is
+ * transmitting, but the assumption is that the base
+ * driver can handle that.
+ *
+ * TODO: figure out a way to safely iterate the slaves
+ * list, but without holding a lock around the actual
+ * call to the base driver.
+ */
+
+ bond_for_each_slave(bond, slave, i) {
+ dprintk("s %p s->p %p c_m %p\n", slave,
+ slave->prev, slave->dev->change_mtu);
+ res = dev_set_mtu(slave->dev, new_mtu);
+
+ if (res) {
+ /* If we failed to set the slave's mtu to the new value
+ * we must abort the operation even in ACTIVE_BACKUP
+ * mode, because if we allow the backup slaves to have
+ * different mtu values than the active slave we'll
+ * need to change their mtu when doing a failover. That
+ * means changing their mtu from timer context, which
+ * is probably not a good idea.
+ */
+ dprintk("err %d %s\n", res, slave->dev->name);
+ goto unwind;
+ }
+ }
+
+ bond_dev->mtu = new_mtu;
+
+ return 0;
+
+unwind:
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ int tmp_res;
+
+ tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+ if (tmp_res) {
+ dprintk("unwind err %d dev %s\n", tmp_res,
+ slave->dev->name);
+ }
+ }
+
+ return res;
+}
+
+/*
+ * Change HW address
+ *
+ * Note that many devices must be down to change the HW address, and
+ * downing the master releases all slaves. We can make bonds full of
+ * bonding devices to test this, however.
+ */
+static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct sockaddr *sa = addr, tmp_sa;
+ struct slave *slave, *stop_at;
+ int res = 0;
+ int i;
+
+ dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None"));
+
+ if (!is_valid_ether_addr(sa->sa_data)) {
+ return -EADDRNOTAVAIL;
+ }
+
+ /* Can't hold bond->lock with bh disabled here since
+ * some base drivers panic. On the other hand we can't
+ * hold bond->lock without bh disabled because we'll
+ * deadlock. The only solution is to rely on the fact
+ * that we're under rtnl_lock here, and the slaves
+ * list won't change. This doesn't solve the problem
+ * of setting the slave's hw address while it is
+ * transmitting, but the assumption is that the base
+ * driver can handle that.
+ *
+ * TODO: figure out a way to safely iterate the slaves
+ * list, but without holding a lock around the actual
+ * call to the base driver.
+ */
+
+ bond_for_each_slave(bond, slave, i) {
+ dprintk("slave %p %s\n", slave, slave->dev->name);
+
+ if (slave->dev->set_mac_address == NULL) {
+ res = -EOPNOTSUPP;
+ dprintk("EOPNOTSUPP %s\n", slave->dev->name);
+ goto unwind;
+ }
+
+ res = dev_set_mac_address(slave->dev, addr);
+ if (res) {
+ /* TODO: consider downing the slave
+ * and retry ?
+ * User should expect communications
+ * breakage anyway until ARP finish
+ * updating, so...
+ */
+ dprintk("err %d %s\n", res, slave->dev->name);
+ goto unwind;
+ }
+ }
+
+ /* success */
+ memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
+ return 0;
+
+unwind:
+ memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
+ tmp_sa.sa_family = bond_dev->type;
+
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ int tmp_res;
+
+ tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+ if (tmp_res) {
+ dprintk("unwind err %d dev %s\n", tmp_res,
+ slave->dev->name);
+ }
+ }
+
+ return res;
+}
+
+static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave, *start_at;
+ int i;
+ int res = 1;
+
+ read_lock(&bond->lock);
+
+ if (!BOND_IS_OK(bond)) {
+ goto out;
+ }
+
+ read_lock(&bond->curr_slave_lock);
+ slave = start_at = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ if (!slave) {
+ goto out;
+ }
+
+ bond_for_each_slave_from(bond, slave, i, start_at) {
+ if (IS_UP(slave->dev) &&
+ (slave->link == BOND_LINK_UP) &&
+ (slave->state == BOND_STATE_ACTIVE)) {
+ res = bond_dev_queue_xmit(bond, skb, slave->dev);
+
+ write_lock(&bond->curr_slave_lock);
+ bond->curr_active_slave = slave->next;
+ write_unlock(&bond->curr_slave_lock);
+
+ break;
+ }
+ }
+
+
+out:
+ if (res) {
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+ }
+ read_unlock(&bond->lock);
+ return 0;
+}
+
+/*
+ * in active-backup mode, we know that bond->curr_active_slave is always valid if
+ * the bond has a usable interface.
+ */
+static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ int res = 1;
+
+ /* if we are sending arp packets, try to at least
+ identify our own ip address */
+ if (bond->params.arp_interval && !my_ip &&
+ (skb->protocol == __constant_htons(ETH_P_ARP))) {
+ char *the_ip = (char *)skb->data +
+ sizeof(struct ethhdr) +
+ sizeof(struct arphdr) +
+ ETH_ALEN;
+ memcpy(&my_ip, the_ip, 4);
+ }
+
+ read_lock(&bond->lock);
+ read_lock(&bond->curr_slave_lock);
+
+ if (!BOND_IS_OK(bond)) {
+ goto out;
+ }
+
+ if (bond->curr_active_slave) { /* one usable interface */
+ res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
+ }
+
+out:
+ if (res) {
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+ }
+ read_unlock(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ return 0;
+}
+
+/*
+ * in XOR mode, we determine the output device by performing xor on
+ * the source and destination hw adresses. If this device is not
+ * enabled, find the next slave following this xor slave.
+ */
+static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct ethhdr *data = (struct ethhdr *)skb->data;
+ struct slave *slave, *start_at;
+ int slave_no;
+ int i;
+ int res = 1;
+
+ read_lock(&bond->lock);
+
+ if (!BOND_IS_OK(bond)) {
+ goto out;
+ }
+
+ slave_no = (data->h_dest[5]^bond_dev->dev_addr[5]) % bond->slave_cnt;
+
+ bond_for_each_slave(bond, slave, i) {
+ slave_no--;
+ if (slave_no < 0) {
+ break;
+ }
+ }
+
+ start_at = slave;
+
+ bond_for_each_slave_from(bond, slave, i, start_at) {
+ if (IS_UP(slave->dev) &&
+ (slave->link == BOND_LINK_UP) &&
+ (slave->state == BOND_STATE_ACTIVE)) {
+ res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ break;
+ }
+ }
+
+out:
+ if (res) {
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+ }
+ read_unlock(&bond->lock);
+ return 0;
+}
+
+/*
+ * in broadcast mode, we send everything to all usable interfaces.
+ */
+static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+ struct slave *slave, *start_at;
+ struct net_device *tx_dev = NULL;
+ int i;
+ int res = 1;
+
+ read_lock(&bond->lock);
+
+ if (!BOND_IS_OK(bond)) {
+ goto out;
+ }
+
+ read_lock(&bond->curr_slave_lock);
+ start_at = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ if (!start_at) {
+ goto out;
+ }
+
+ bond_for_each_slave_from(bond, slave, i, start_at) {
+ if (IS_UP(slave->dev) &&
+ (slave->link == BOND_LINK_UP) &&
+ (slave->state == BOND_STATE_ACTIVE)) {
+ if (tx_dev) {
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: bond_xmit_broadcast(): "
+ "skb_clone() failed\n");
+ continue;
+ }
+
+ res = bond_dev_queue_xmit(bond, skb2, tx_dev);
+ if (res) {
+ dev_kfree_skb(skb2);
+ continue;
+ }
+ }
+ tx_dev = slave->dev;
+ }
+ }
+
+ if (tx_dev) {
+ res = bond_dev_queue_xmit(bond, skb, tx_dev);
+ }
+
+out:
+ if (res) {
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+ }
+ /* frame sent to all suitable interfaces */
+ read_unlock(&bond->lock);
+ return 0;
+}
+
+/*------------------------- Device initialization ---------------------------*/
+
+/*
+ * set bond mode specific net device operations
+ */
+static inline void bond_set_mode_ops(struct net_device *bond_dev, int mode)
+{
+ switch (mode) {
+ case BOND_MODE_ROUNDROBIN:
+ bond_dev->hard_start_xmit = bond_xmit_roundrobin;
+ break;
+ case BOND_MODE_ACTIVEBACKUP:
+ bond_dev->hard_start_xmit = bond_xmit_activebackup;
+ break;
+ case BOND_MODE_XOR:
+ bond_dev->hard_start_xmit = bond_xmit_xor;
+ break;
+ case BOND_MODE_BROADCAST:
+ bond_dev->hard_start_xmit = bond_xmit_broadcast;
+ break;
+ case BOND_MODE_8023AD:
+ bond_dev->hard_start_xmit = bond_3ad_xmit_xor;
+ break;
+ case BOND_MODE_TLB:
+ case BOND_MODE_ALB:
+ bond_dev->hard_start_xmit = bond_alb_xmit;
+ bond_dev->set_mac_address = bond_alb_set_mac_address;
+ break;
+ default:
+ /* Should never happen, mode already checked */
+ printk(KERN_ERR DRV_NAME
+ ": Error: Unknown bonding mode %d\n",
+ mode);
+ break;
+ }
+}
+
+/*
+ * Does not allocate but creates a /proc entry.
+ * Allowed to fail.
+ */
+static int __init bond_init(struct net_device *bond_dev, struct bond_params *params)
+{
+ struct bonding *bond = bond_dev->priv;
+
+ dprintk("Begin bond_init for %s\n", bond_dev->name);
+
+ /* initialize rwlocks */
+ rwlock_init(&bond->lock);
+ rwlock_init(&bond->curr_slave_lock);
+
+ bond->params = *params; /* copy params struct */
+
+ /* Initialize pointers */
+ bond->first_slave = NULL;
+ bond->curr_active_slave = NULL;
+ bond->current_arp_slave = NULL;
+ bond->primary_slave = NULL;
+ bond->dev = bond_dev;
+ INIT_LIST_HEAD(&bond->vlan_list);
+
+ /* Initialize the device entry points */
+ bond_dev->open = bond_open;
+ bond_dev->stop = bond_close;
+ bond_dev->get_stats = bond_get_stats;
+ bond_dev->do_ioctl = bond_do_ioctl;
+ bond_dev->set_multicast_list = bond_set_multicast_list;
+ bond_dev->change_mtu = bond_change_mtu;
+ bond_dev->set_mac_address = bond_set_mac_address;
+
+ bond_set_mode_ops(bond_dev, bond->params.mode);
+
+ bond_dev->destructor = free_netdev;
+
+ /* Initialize the device options */
+ bond_dev->tx_queue_len = 0;
+ bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
+
+ /* At first, we block adding VLANs. That's the only way to
+ * prevent problems that occur when adding VLANs over an
+ * empty bond. The block will be removed once non-challenged
+ * slaves are enslaved.
+ */
+ bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
+
+ /* don't acquire bond device's xmit_lock when
+ * transmitting */
+ bond_dev->features |= NETIF_F_LLTX;
+
+ /* By default, we declare the bond to be fully
+ * VLAN hardware accelerated capable. Special
+ * care is taken in the various xmit functions
+ * when there are slaves that are not hw accel
+ * capable
+ */
+ bond_dev->vlan_rx_register = bond_vlan_rx_register;
+ bond_dev->vlan_rx_add_vid = bond_vlan_rx_add_vid;
+ bond_dev->vlan_rx_kill_vid = bond_vlan_rx_kill_vid;
+ bond_dev->features |= (NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER);
+
+#ifdef CONFIG_PROC_FS
+ bond_create_proc_entry(bond);
+#endif
+
+ list_add_tail(&bond->bond_list, &bond_dev_list);
+
+ return 0;
+}
+
+/* De-initialize device specific data.
+ * Caller must hold rtnl_lock.
+ */
+static inline void bond_deinit(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+
+ list_del(&bond->bond_list);
+
+#ifdef CONFIG_PROC_FS
+ bond_remove_proc_entry(bond);
+#endif
+}
+
+/* Unregister and free all bond devices.
+ * Caller must hold rtnl_lock.
+ */
+static void bond_free_all(void)
+{
+ struct bonding *bond, *nxt;
+
+ list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) {
+ struct net_device *bond_dev = bond->dev;
+
+ unregister_netdevice(bond_dev);
+ bond_deinit(bond_dev);
+ }
+
+#ifdef CONFIG_PROC_FS
+ bond_destroy_proc_dir();
+#endif
+}
+
+/*------------------------- Module initialization ---------------------------*/
+
+/*
+ * Convert string input module parms. Accept either the
+ * number of the mode or its string name.
+ */
+static inline int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].modename; i++) {
+ if ((isdigit(*mode_arg) &&
+ tbl[i].mode == simple_strtol(mode_arg, NULL, 0)) ||
+ (strncmp(mode_arg, tbl[i].modename,
+ strlen(tbl[i].modename)) == 0)) {
+ return tbl[i].mode;
+ }
+ }
+
+ return -1;
+}
+
+static int bond_check_params(struct bond_params *params)
+{
+ /*
+ * Convert string parameters.
+ */
+ if (mode) {
+ bond_mode = bond_parse_parm(mode, bond_mode_tbl);
+ if (bond_mode == -1) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: Invalid bonding mode \"%s\"\n",
+ mode == NULL ? "NULL" : mode);
+ return -EINVAL;
+ }
+ }
+
+ if (lacp_rate) {
+ if (bond_mode != BOND_MODE_8023AD) {
+ printk(KERN_INFO DRV_NAME
+ ": lacp_rate param is irrelevant in mode %s\n",
+ bond_mode_name(bond_mode));
+ } else {
+ lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
+ if (lacp_fast == -1) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: Invalid lacp rate \"%s\"\n",
+ lacp_rate == NULL ? "NULL" : lacp_rate);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (max_bonds < 1 || max_bonds > INT_MAX) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: max_bonds (%d) not in range %d-%d, so it "
+ "was reset to BOND_DEFAULT_MAX_BONDS (%d)",
+ max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+ max_bonds = BOND_DEFAULT_MAX_BONDS;
+ }
+
+ if (miimon < 0) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: miimon module parameter (%d), "
+ "not in range 0-%d, so it was reset to %d\n",
+ miimon, INT_MAX, BOND_LINK_MON_INTERV);
+ miimon = BOND_LINK_MON_INTERV;
+ }
+
+ if (updelay < 0) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: updelay module parameter (%d), "
+ "not in range 0-%d, so it was reset to 0\n",
+ updelay, INT_MAX);
+ updelay = 0;
+ }
+
+ if (downdelay < 0) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: downdelay module parameter (%d), "
+ "not in range 0-%d, so it was reset to 0\n",
+ downdelay, INT_MAX);
+ downdelay = 0;
+ }
+
+ if ((use_carrier != 0) && (use_carrier != 1)) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: use_carrier module parameter (%d), "
+ "not of valid value (0/1), so it was set to 1\n",
+ use_carrier);
+ use_carrier = 1;
+ }
+
+ /* reset values for 802.3ad */
+ if (bond_mode == BOND_MODE_8023AD) {
+ if (!miimon) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: miimon must be specified, "
+ "otherwise bonding will not detect link "
+ "failure, speed and duplex which are "
+ "essential for 802.3ad operation\n");
+ printk(KERN_WARNING "Forcing miimon to 100msec\n");
+ miimon = 100;
+ }
+ }
+
+ /* reset values for TLB/ALB */
+ if ((bond_mode == BOND_MODE_TLB) ||
+ (bond_mode == BOND_MODE_ALB)) {
+ if (!miimon) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: miimon must be specified, "
+ "otherwise bonding will not detect link "
+ "failure and link speed which are essential "
+ "for TLB/ALB load balancing\n");
+ printk(KERN_WARNING "Forcing miimon to 100msec\n");
+ miimon = 100;
+ }
+ }
+
+ if (bond_mode == BOND_MODE_ALB) {
+ printk(KERN_NOTICE DRV_NAME
+ ": In ALB mode you might experience client "
+ "disconnections upon reconnection of a link if the "
+ "bonding module updelay parameter (%d msec) is "
+ "incompatible with the forwarding delay time of the "
+ "switch\n",
+ updelay);
+ }
+
+ if (!miimon) {
+ if (updelay || downdelay) {
+ /* just warn the user the up/down delay will have
+ * no effect since miimon is zero...
+ */
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: miimon module parameter not set "
+ "and updelay (%d) or downdelay (%d) module "
+ "parameter is set; updelay and downdelay have "
+ "no effect unless miimon is set\n",
+ updelay, downdelay);
+ }
+ } else {
+ /* don't allow arp monitoring */
+ if (arp_interval) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: miimon (%d) and arp_interval (%d) "
+ "can't be used simultaneously, disabling ARP "
+ "monitoring\n",
+ miimon, arp_interval);
+ arp_interval = 0;
+ }
+
+ if ((updelay % miimon) != 0) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: updelay (%d) is not a multiple "
+ "of miimon (%d), updelay rounded to %d ms\n",
+ updelay, miimon, (updelay / miimon) * miimon);
+ }
+
+ updelay /= miimon;
+
+ if ((downdelay % miimon) != 0) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: downdelay (%d) is not a multiple "
+ "of miimon (%d), downdelay rounded to %d ms\n",
+ downdelay, miimon,
+ (downdelay / miimon) * miimon);
+ }
+
+ downdelay /= miimon;
+ }
+
+ if (arp_interval < 0) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: arp_interval module parameter (%d) "
+ ", not in range 0-%d, so it was reset to %d\n",
+ arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
+ arp_interval = BOND_LINK_ARP_INTERV;
+ }
+
+ for (arp_ip_count = 0;
+ (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count];
+ arp_ip_count++) {
+ /* not complete check, but should be good enough to
+ catch mistakes */
+ if (!isdigit(arp_ip_target[arp_ip_count][0])) {
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: bad arp_ip_target module parameter "
+ "(%s), ARP monitoring will not be performed\n",
+ arp_ip_target[arp_ip_count]);
+ arp_interval = 0;
+ } else {
+ u32 ip = in_aton(arp_ip_target[arp_ip_count]);
+ arp_target[arp_ip_count] = ip;
+ }
+ }
+
+ if (arp_interval && !arp_ip_count) {
+ /* don't allow arping if no arp_ip_target given... */
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: arp_interval module parameter (%d) "
+ "specified without providing an arp_ip_target "
+ "parameter, arp_interval was reset to 0\n",
+ arp_interval);
+ arp_interval = 0;
+ }
+
+ if (miimon) {
+ printk(KERN_INFO DRV_NAME
+ ": MII link monitoring set to %d ms\n",
+ miimon);
+ } else if (arp_interval) {
+ int i;
+
+ printk(KERN_INFO DRV_NAME
+ ": ARP monitoring set to %d ms with %d target(s):",
+ arp_interval, arp_ip_count);
+
+ for (i = 0; i < arp_ip_count; i++)
+ printk (" %s", arp_ip_target[i]);
+
+ printk("\n");
+
+ } else {
+ /* miimon and arp_interval not set, we need one so things
+ * work as expected, see bonding.txt for details
+ */
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: either miimon or arp_interval and "
+ "arp_ip_target module parameters must be specified, "
+ "otherwise bonding will not detect link failures! see "
+ "bonding.txt for details.\n");
+ }
+
+ if (primary && !USES_PRIMARY(bond_mode)) {
+ /* currently, using a primary only makes sense
+ * in active backup, TLB or ALB modes
+ */
+ printk(KERN_WARNING DRV_NAME
+ ": Warning: %s primary device specified but has no "
+ "effect in %s mode\n",
+ primary, bond_mode_name(bond_mode));
+ primary = NULL;
+ }
+
+ /* fill params struct with the proper values */
+ params->mode = bond_mode;
+ params->miimon = miimon;
+ params->arp_interval = arp_interval;
+ params->updelay = updelay;
+ params->downdelay = downdelay;
+ params->use_carrier = use_carrier;
+ params->lacp_fast = lacp_fast;
+ params->primary[0] = 0;
+
+ if (primary) {
+ strncpy(params->primary, primary, IFNAMSIZ);
+ params->primary[IFNAMSIZ - 1] = 0;
+ }
+
+ memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+
+ return 0;
+}
+
+static int __init bonding_init(void)
+{
+ struct bond_params params;
+ int i;
+ int res;
+
+ printk(KERN_INFO "%s", version);
+
+ res = bond_check_params(&params);
+ if (res) {
+ return res;
+ }
+
+ rtnl_lock();
+
+#ifdef CONFIG_PROC_FS
+ bond_create_proc_dir();
+#endif
+
+ for (i = 0; i < max_bonds; i++) {
+ struct net_device *bond_dev;
+
+ bond_dev = alloc_netdev(sizeof(struct bonding), "", ether_setup);
+ if (!bond_dev) {
+ res = -ENOMEM;
+ goto out_err;
+ }
+
+ res = dev_alloc_name(bond_dev, "bond%d");
+ if (res < 0) {
+ free_netdev(bond_dev);
+ goto out_err;
+ }
+
+ /* bond_init() must be called after dev_alloc_name() (for the
+ * /proc files), but before register_netdevice(), because we
+ * need to set function pointers.
+ */
+ res = bond_init(bond_dev, &params);
+ if (res < 0) {
+ free_netdev(bond_dev);
+ goto out_err;
+ }
+
+ SET_MODULE_OWNER(bond_dev);
+
+ res = register_netdevice(bond_dev);
+ if (res < 0) {
+ bond_deinit(bond_dev);
+ free_netdev(bond_dev);
+ goto out_err;
+ }
+ }
+
+ rtnl_unlock();
+ register_netdevice_notifier(&bond_netdev_notifier);
+
+ return 0;
+
+out_err:
+ /* free and unregister all bonds that were successfully added */
+ bond_free_all();
+
+ rtnl_unlock();
+
+ return res;
+}
+
+static void __exit bonding_exit(void)
+{
+ unregister_netdevice_notifier(&bond_netdev_notifier);
+
+ rtnl_lock();
+ bond_free_all();
+ rtnl_unlock();
+}
+
+module_init(bonding_init);
+module_exit(bonding_exit);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
+MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+MODULE_SUPPORTED_DEVICE("most ethernet devices");
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
new file mode 100644
index 000000000000..8c325308489d
--- /dev/null
+++ b/drivers/net/bonding/bonding.h
@@ -0,0 +1,252 @@
+/*
+ * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
+ *
+ * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * BUT, I'm the one who modified it for ethernet, so:
+ * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ *
+ * 2003/03/18 - Amir Noam <amir.noam at intel dot com>,
+ * Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Shmulik Hen <shmulik.hen at intel dot com>
+ * - Added support for IEEE 802.3ad Dynamic link aggregation mode.
+ *
+ * 2003/05/01 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
+ * Amir Noam <amir.noam at intel dot com>
+ * - Code beautification and style changes (mainly in comments).
+ *
+ * 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Added support for Transmit load balancing mode.
+ *
+ * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
+ * - Code cleanup and style changes
+ */
+
+#ifndef _LINUX_BONDING_H
+#define _LINUX_BONDING_H
+
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/if_bonding.h>
+#include "bond_3ad.h"
+#include "bond_alb.h"
+
+#define DRV_VERSION "2.6.1"
+#define DRV_RELDATE "October 29, 2004"
+#define DRV_NAME "bonding"
+#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
+
+#define BOND_MAX_ARP_TARGETS 16
+
+#ifdef BONDING_DEBUG
+#define dprintk(fmt, args...) \
+ printk(KERN_DEBUG \
+ DRV_NAME ": %s() %d: " fmt, __FUNCTION__, __LINE__ , ## args )
+#else
+#define dprintk(fmt, args...)
+#endif /* BONDING_DEBUG */
+
+#define IS_UP(dev) \
+ ((((dev)->flags & IFF_UP) == IFF_UP) && \
+ netif_running(dev) && \
+ netif_carrier_ok(dev))
+
+/*
+ * Checks whether bond is ready for transmit.
+ *
+ * Caller must hold bond->lock
+ */
+#define BOND_IS_OK(bond) \
+ (((bond)->dev->flags & IFF_UP) && \
+ netif_running((bond)->dev) && \
+ ((bond)->slave_cnt > 0))
+
+/*
+ * Checks whether slave is ready for transmit.
+ */
+#define SLAVE_IS_OK(slave) \
+ (((slave)->dev->flags & IFF_UP) && \
+ netif_running((slave)->dev) && \
+ ((slave)->link == BOND_LINK_UP) && \
+ ((slave)->state == BOND_STATE_ACTIVE))
+
+
+#define USES_PRIMARY(mode) \
+ (((mode) == BOND_MODE_ACTIVEBACKUP) || \
+ ((mode) == BOND_MODE_TLB) || \
+ ((mode) == BOND_MODE_ALB))
+
+/*
+ * Less bad way to call ioctl from within the kernel; this needs to be
+ * done some other way to get the call out of interrupt context.
+ * Needs "ioctl" variable to be supplied by calling context.
+ */
+#define IOCTL(dev, arg, cmd) ({ \
+ int res = 0; \
+ mm_segment_t fs = get_fs(); \
+ set_fs(get_ds()); \
+ res = ioctl(dev, arg, cmd); \
+ set_fs(fs); \
+ res; })
+
+/**
+ * bond_for_each_slave_from - iterate the slaves list from a starting point
+ * @bond: the bond holding this list.
+ * @pos: current slave.
+ * @cnt: counter for max number of moves
+ * @start: starting point.
+ *
+ * Caller must hold bond->lock
+ */
+#define bond_for_each_slave_from(bond, pos, cnt, start) \
+ for (cnt = 0, pos = start; \
+ cnt < (bond)->slave_cnt; \
+ cnt++, pos = (pos)->next)
+
+/**
+ * bond_for_each_slave_from_to - iterate the slaves list from start point to stop point
+ * @bond: the bond holding this list.
+ * @pos: current slave.
+ * @cnt: counter for number max of moves
+ * @start: start point.
+ * @stop: stop point.
+ *
+ * Caller must hold bond->lock
+ */
+#define bond_for_each_slave_from_to(bond, pos, cnt, start, stop) \
+ for (cnt = 0, pos = start; \
+ ((cnt < (bond)->slave_cnt) && (pos != (stop)->next)); \
+ cnt++, pos = (pos)->next)
+
+/**
+ * bond_for_each_slave - iterate the slaves list from head
+ * @bond: the bond holding this list.
+ * @pos: current slave.
+ * @cnt: counter for max number of moves
+ *
+ * Caller must hold bond->lock
+ */
+#define bond_for_each_slave(bond, pos, cnt) \
+ bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave)
+
+
+struct bond_params {
+ int mode;
+ int miimon;
+ int arp_interval;
+ int use_carrier;
+ int updelay;
+ int downdelay;
+ int lacp_fast;
+ char primary[IFNAMSIZ];
+ u32 arp_targets[BOND_MAX_ARP_TARGETS];
+};
+
+struct vlan_entry {
+ struct list_head vlan_list;
+ unsigned short vlan_id;
+};
+
+struct slave {
+ struct net_device *dev; /* first - usefull for panic debug */
+ struct slave *next;
+ struct slave *prev;
+ s16 delay;
+ u32 jiffies;
+ s8 link; /* one of BOND_LINK_XXXX */
+ s8 state; /* one of BOND_STATE_XXXX */
+ u32 original_flags;
+ u32 link_failure_count;
+ u16 speed;
+ u8 duplex;
+ u8 perm_hwaddr[ETH_ALEN];
+ struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
+ struct tlb_slave_info tlb_info;
+};
+
+/*
+ * Here are the locking policies for the two bonding locks:
+ *
+ * 1) Get bond->lock when reading/writing slave list.
+ * 2) Get bond->curr_slave_lock when reading/writing bond->curr_active_slave.
+ * (It is unnecessary when the write-lock is put with bond->lock.)
+ * 3) When we lock with bond->curr_slave_lock, we must lock with bond->lock
+ * beforehand.
+ */
+struct bonding {
+ struct net_device *dev; /* first - usefull for panic debug */
+ struct slave *first_slave;
+ struct slave *curr_active_slave;
+ struct slave *current_arp_slave;
+ struct slave *primary_slave;
+ s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
+ rwlock_t lock;
+ rwlock_t curr_slave_lock;
+ struct timer_list mii_timer;
+ struct timer_list arp_timer;
+ s8 kill_timers;
+ struct net_device_stats stats;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+ char proc_file_name[IFNAMSIZ];
+#endif /* CONFIG_PROC_FS */
+ struct list_head bond_list;
+ struct dev_mc_list *mc_list;
+ u16 flags;
+ struct ad_bond_info ad_info;
+ struct alb_bond_info alb_info;
+ struct bond_params params;
+ struct list_head vlan_list;
+ struct vlan_group *vlgrp;
+};
+
+/**
+ * Returns NULL if the net_device does not belong to any of the bond's slaves
+ *
+ * Caller must hold bond lock for read
+ */
+extern inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
+{
+ struct slave *slave = NULL;
+ int i;
+
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->dev == slave_dev) {
+ break;
+ }
+ }
+
+ return slave;
+}
+
+extern inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
+{
+ if (!slave || !slave->dev->master) {
+ return NULL;
+ }
+
+ return (struct bonding *)slave->dev->master->priv;
+}
+
+extern inline void bond_set_slave_inactive_flags(struct slave *slave)
+{
+ slave->state = BOND_STATE_BACKUP;
+ slave->dev->flags |= IFF_NOARP;
+}
+
+extern inline void bond_set_slave_active_flags(struct slave *slave)
+{
+ slave->state = BOND_STATE_ACTIVE;
+ slave->dev->flags &= ~IFF_NOARP;
+}
+
+struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
+int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+
+#endif /* _LINUX_BONDING_H */
+
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
new file mode 100644
index 000000000000..3d88ad622bdb
--- /dev/null
+++ b/drivers/net/bsd_comp.c
@@ -0,0 +1,1179 @@
+/*
+ * Update: The Berkeley copyright was changed, and the change
+ * is retroactive to all "true" BSD software (ie everything
+ * from UCB as opposed to other peoples code that just carried
+ * the same license). The new copyright doesn't clash with the
+ * GPL, so the module-only restriction has been removed..
+ */
+
+/* Because this code is derived from the 4.3BSD compress source:
+ *
+ * Copyright (c) 1985, 1986 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * James A. Woods, derived from original work by Spencer Thomas
+ * and Joseph Orost.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This version is for use with contiguous buffers on Linux-derived systems.
+ *
+ * ==FILEVERSION 20000226==
+ *
+ * NOTE TO MAINTAINERS:
+ * If you modify this file at all, please set the number above to the
+ * date of the modification as YYMMDD (year month day).
+ * bsd_comp.c is shipped with a PPP distribution as well as with
+ * the kernel; if everyone increases the FILEVERSION number above,
+ * then scripts can do the right thing when deciding whether to
+ * install a new bsd_comp.c file. Don't change the format of that
+ * line otherwise, so the installation script can recognize it.
+ *
+ * From: bsd_comp.c,v 1.3 1994/12/08 01:59:58 paulus Exp
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+
+#include <linux/ppp_defs.h>
+
+#undef PACKETPTR
+#define PACKETPTR 1
+#include <linux/ppp-comp.h>
+#undef PACKETPTR
+
+#include <asm/byteorder.h>
+
+/*
+ * PPP "BSD compress" compression
+ * The differences between this compression and the classic BSD LZW
+ * source are obvious from the requirement that the classic code worked
+ * with files while this handles arbitrarily long streams that
+ * are broken into packets. They are:
+ *
+ * When the code size expands, a block of junk is not emitted by
+ * the compressor and not expected by the decompressor.
+ *
+ * New codes are not necessarily assigned every time an old
+ * code is output by the compressor. This is because a packet
+ * end forces a code to be emitted, but does not imply that a
+ * new sequence has been seen.
+ *
+ * The compression ratio is checked at the first end of a packet
+ * after the appropriate gap. Besides simplifying and speeding
+ * things up, this makes it more likely that the transmitter
+ * and receiver will agree when the dictionary is cleared when
+ * compression is not going well.
+ */
+
+/*
+ * Macros to extract protocol version and number of bits
+ * from the third byte of the BSD Compress CCP configuration option.
+ */
+
+#define BSD_VERSION(x) ((x) >> 5)
+#define BSD_NBITS(x) ((x) & 0x1F)
+
+#define BSD_CURRENT_VERSION 1
+
+/*
+ * A dictionary for doing BSD compress.
+ */
+
+struct bsd_dict {
+ union { /* hash value */
+ unsigned long fcode;
+ struct {
+#if defined(__LITTLE_ENDIAN) /* Little endian order */
+ unsigned short prefix; /* preceding code */
+ unsigned char suffix; /* last character of new code */
+ unsigned char pad;
+#elif defined(__BIG_ENDIAN) /* Big endian order */
+ unsigned char pad;
+ unsigned char suffix; /* last character of new code */
+ unsigned short prefix; /* preceding code */
+#else
+#error Endianness not defined...
+#endif
+ } hs;
+ } f;
+ unsigned short codem1; /* output of hash table -1 */
+ unsigned short cptr; /* map code to hash table entry */
+};
+
+struct bsd_db {
+ int totlen; /* length of this structure */
+ unsigned int hsize; /* size of the hash table */
+ unsigned char hshift; /* used in hash function */
+ unsigned char n_bits; /* current bits/code */
+ unsigned char maxbits; /* maximum bits/code */
+ unsigned char debug; /* non-zero if debug desired */
+ unsigned char unit; /* ppp unit number */
+ unsigned short seqno; /* sequence # of next packet */
+ unsigned int mru; /* size of receive (decompress) bufr */
+ unsigned int maxmaxcode; /* largest valid code */
+ unsigned int max_ent; /* largest code in use */
+ unsigned int in_count; /* uncompressed bytes, aged */
+ unsigned int bytes_out; /* compressed bytes, aged */
+ unsigned int ratio; /* recent compression ratio */
+ unsigned int checkpoint; /* when to next check the ratio */
+ unsigned int clear_count; /* times dictionary cleared */
+ unsigned int incomp_count; /* incompressible packets */
+ unsigned int incomp_bytes; /* incompressible bytes */
+ unsigned int uncomp_count; /* uncompressed packets */
+ unsigned int uncomp_bytes; /* uncompressed bytes */
+ unsigned int comp_count; /* compressed packets */
+ unsigned int comp_bytes; /* compressed bytes */
+ unsigned short *lens; /* array of lengths of codes */
+ struct bsd_dict *dict; /* dictionary */
+};
+
+#define BSD_OVHD 2 /* BSD compress overhead/packet */
+#define MIN_BSD_BITS 9
+#define BSD_INIT_BITS MIN_BSD_BITS
+#define MAX_BSD_BITS 15
+
+static void bsd_free (void *state);
+static void *bsd_alloc(unsigned char *options, int opt_len, int decomp);
+static void *bsd_comp_alloc (unsigned char *options, int opt_len);
+static void *bsd_decomp_alloc (unsigned char *options, int opt_len);
+
+static int bsd_init (void *db, unsigned char *options,
+ int opt_len, int unit, int debug, int decomp);
+static int bsd_comp_init (void *state, unsigned char *options,
+ int opt_len, int unit, int opthdr, int debug);
+static int bsd_decomp_init (void *state, unsigned char *options,
+ int opt_len, int unit, int opthdr, int mru,
+ int debug);
+
+static void bsd_reset (void *state);
+static void bsd_comp_stats (void *state, struct compstat *stats);
+
+static int bsd_compress (void *state, unsigned char *rptr,
+ unsigned char *obuf, int isize, int osize);
+static void bsd_incomp (void *state, unsigned char *ibuf, int icnt);
+
+static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
+ unsigned char *obuf, int osize);
+
+/* These are in ppp_generic.c */
+extern int ppp_register_compressor (struct compressor *cp);
+extern void ppp_unregister_compressor (struct compressor *cp);
+
+/*
+ * the next two codes should not be changed lightly, as they must not
+ * lie within the contiguous general code space.
+ */
+#define CLEAR 256 /* table clear output code */
+#define FIRST 257 /* first free entry */
+#define LAST 255
+
+#define MAXCODE(b) ((1 << (b)) - 1)
+#define BADCODEM1 MAXCODE(MAX_BSD_BITS);
+
+#define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \
+ ^ (unsigned long)(prefix))
+#define BSD_KEY(prefix,suffix) ((((unsigned long)(suffix)) << 16) \
+ + (unsigned long)(prefix))
+
+#define CHECK_GAP 10000 /* Ratio check interval */
+
+#define RATIO_SCALE_LOG 8
+#define RATIO_SCALE (1<<RATIO_SCALE_LOG)
+#define RATIO_MAX (0x7fffffff>>RATIO_SCALE_LOG)
+
+/*
+ * clear the dictionary
+ */
+
+static void
+bsd_clear(struct bsd_db *db)
+{
+ db->clear_count++;
+ db->max_ent = FIRST-1;
+ db->n_bits = BSD_INIT_BITS;
+ db->bytes_out = 0;
+ db->in_count = 0;
+ db->ratio = 0;
+ db->checkpoint = CHECK_GAP;
+}
+
+/*
+ * If the dictionary is full, then see if it is time to reset it.
+ *
+ * Compute the compression ratio using fixed-point arithmetic
+ * with 8 fractional bits.
+ *
+ * Since we have an infinite stream instead of a single file,
+ * watch only the local compression ratio.
+ *
+ * Since both peers must reset the dictionary at the same time even in
+ * the absence of CLEAR codes (while packets are incompressible), they
+ * must compute the same ratio.
+ */
+
+static int bsd_check (struct bsd_db *db) /* 1=output CLEAR */
+ {
+ unsigned int new_ratio;
+
+ if (db->in_count >= db->checkpoint)
+ {
+ /* age the ratio by limiting the size of the counts */
+ if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX)
+ {
+ db->in_count -= (db->in_count >> 2);
+ db->bytes_out -= (db->bytes_out >> 2);
+ }
+
+ db->checkpoint = db->in_count + CHECK_GAP;
+
+ if (db->max_ent >= db->maxmaxcode)
+ {
+ /* Reset the dictionary only if the ratio is worse,
+ * or if it looks as if it has been poisoned
+ * by incompressible data.
+ *
+ * This does not overflow, because
+ * db->in_count <= RATIO_MAX.
+ */
+
+ new_ratio = db->in_count << RATIO_SCALE_LOG;
+ if (db->bytes_out != 0)
+ {
+ new_ratio /= db->bytes_out;
+ }
+
+ if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE)
+ {
+ bsd_clear (db);
+ return 1;
+ }
+ db->ratio = new_ratio;
+ }
+ }
+ return 0;
+ }
+
+/*
+ * Return statistics.
+ */
+
+static void bsd_comp_stats (void *state, struct compstat *stats)
+ {
+ struct bsd_db *db = (struct bsd_db *) state;
+
+ stats->unc_bytes = db->uncomp_bytes;
+ stats->unc_packets = db->uncomp_count;
+ stats->comp_bytes = db->comp_bytes;
+ stats->comp_packets = db->comp_count;
+ stats->inc_bytes = db->incomp_bytes;
+ stats->inc_packets = db->incomp_count;
+ stats->in_count = db->in_count;
+ stats->bytes_out = db->bytes_out;
+ }
+
+/*
+ * Reset state, as on a CCP ResetReq.
+ */
+
+static void bsd_reset (void *state)
+ {
+ struct bsd_db *db = (struct bsd_db *) state;
+
+ bsd_clear(db);
+
+ db->seqno = 0;
+ db->clear_count = 0;
+ }
+
+/*
+ * Release the compression structure
+ */
+
+static void bsd_free (void *state)
+ {
+ struct bsd_db *db = (struct bsd_db *) state;
+
+ if (db)
+ {
+/*
+ * Release the dictionary
+ */
+ if (db->dict)
+ {
+ vfree (db->dict);
+ db->dict = NULL;
+ }
+/*
+ * Release the string buffer
+ */
+ if (db->lens)
+ {
+ vfree (db->lens);
+ db->lens = NULL;
+ }
+/*
+ * Finally release the structure itself.
+ */
+ kfree (db);
+ }
+ }
+
+/*
+ * Allocate space for a (de) compressor.
+ */
+
+static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
+ {
+ int bits;
+ unsigned int hsize, hshift, maxmaxcode;
+ struct bsd_db *db;
+
+ if (opt_len != 3 || options[0] != CI_BSD_COMPRESS || options[1] != 3
+ || BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
+ {
+ return NULL;
+ }
+
+ bits = BSD_NBITS(options[2]);
+
+ switch (bits)
+ {
+ case 9: /* needs 82152 for both directions */
+ case 10: /* needs 84144 */
+ case 11: /* needs 88240 */
+ case 12: /* needs 96432 */
+ hsize = 5003;
+ hshift = 4;
+ break;
+ case 13: /* needs 176784 */
+ hsize = 9001;
+ hshift = 5;
+ break;
+ case 14: /* needs 353744 */
+ hsize = 18013;
+ hshift = 6;
+ break;
+ case 15: /* needs 691440 */
+ hsize = 35023;
+ hshift = 7;
+ break;
+ case 16: /* needs 1366160--far too much, */
+ /* hsize = 69001; */ /* and 69001 is too big for cptr */
+ /* hshift = 8; */ /* in struct bsd_db */
+ /* break; */
+ default:
+ return NULL;
+ }
+/*
+ * Allocate the main control structure for this instance.
+ */
+ maxmaxcode = MAXCODE(bits);
+ db = (struct bsd_db *) kmalloc (sizeof (struct bsd_db),
+ GFP_KERNEL);
+ if (!db)
+ {
+ return NULL;
+ }
+
+ memset (db, 0, sizeof(struct bsd_db));
+/*
+ * Allocate space for the dictionary. This may be more than one page in
+ * length.
+ */
+ db->dict = (struct bsd_dict *) vmalloc (hsize *
+ sizeof (struct bsd_dict));
+ if (!db->dict)
+ {
+ bsd_free (db);
+ return NULL;
+ }
+
+/*
+ * If this is the compression buffer then there is no length data.
+ */
+ if (!decomp)
+ {
+ db->lens = NULL;
+ }
+/*
+ * For decompression, the length information is needed as well.
+ */
+ else
+ {
+ db->lens = (unsigned short *) vmalloc ((maxmaxcode + 1) *
+ sizeof (db->lens[0]));
+ if (!db->lens)
+ {
+ bsd_free (db);
+ return (NULL);
+ }
+ }
+/*
+ * Initialize the data information for the compression code
+ */
+ db->totlen = sizeof (struct bsd_db) +
+ (sizeof (struct bsd_dict) * hsize);
+
+ db->hsize = hsize;
+ db->hshift = hshift;
+ db->maxmaxcode = maxmaxcode;
+ db->maxbits = bits;
+
+ return (void *) db;
+ }
+
+static void *bsd_comp_alloc (unsigned char *options, int opt_len)
+ {
+ return bsd_alloc (options, opt_len, 0);
+ }
+
+static void *bsd_decomp_alloc (unsigned char *options, int opt_len)
+ {
+ return bsd_alloc (options, opt_len, 1);
+ }
+
+/*
+ * Initialize the database.
+ */
+
+static int bsd_init (void *state, unsigned char *options,
+ int opt_len, int unit, int debug, int decomp)
+ {
+ struct bsd_db *db = state;
+ int indx;
+
+ if ((opt_len != 3) || (options[0] != CI_BSD_COMPRESS) || (options[1] != 3)
+ || (BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
+ || (BSD_NBITS(options[2]) != db->maxbits)
+ || (decomp && db->lens == NULL))
+ {
+ return 0;
+ }
+
+ if (decomp)
+ {
+ indx = LAST;
+ do
+ {
+ db->lens[indx] = 1;
+ }
+ while (indx-- > 0);
+ }
+
+ indx = db->hsize;
+ while (indx-- != 0)
+ {
+ db->dict[indx].codem1 = BADCODEM1;
+ db->dict[indx].cptr = 0;
+ }
+
+ db->unit = unit;
+ db->mru = 0;
+#ifndef DEBUG
+ if (debug)
+#endif
+ db->debug = 1;
+
+ bsd_reset(db);
+
+ return 1;
+ }
+
+static int bsd_comp_init (void *state, unsigned char *options,
+ int opt_len, int unit, int opthdr, int debug)
+ {
+ return bsd_init (state, options, opt_len, unit, debug, 0);
+ }
+
+static int bsd_decomp_init (void *state, unsigned char *options,
+ int opt_len, int unit, int opthdr, int mru,
+ int debug)
+ {
+ return bsd_init (state, options, opt_len, unit, debug, 1);
+ }
+
+/*
+ * Obtain pointers to the various structures in the compression tables
+ */
+
+#define dict_ptrx(p,idx) &(p->dict[idx])
+#define lens_ptrx(p,idx) &(p->lens[idx])
+
+#ifdef DEBUG
+static unsigned short *lens_ptr(struct bsd_db *db, int idx)
+ {
+ if ((unsigned int) idx > (unsigned int) db->maxmaxcode)
+ {
+ printk ("<9>ppp: lens_ptr(%d) > max\n", idx);
+ idx = 0;
+ }
+ return lens_ptrx (db, idx);
+ }
+
+static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx)
+ {
+ if ((unsigned int) idx >= (unsigned int) db->hsize)
+ {
+ printk ("<9>ppp: dict_ptr(%d) > max\n", idx);
+ idx = 0;
+ }
+ return dict_ptrx (db, idx);
+ }
+
+#else
+#define lens_ptr(db,idx) lens_ptrx(db,idx)
+#define dict_ptr(db,idx) dict_ptrx(db,idx)
+#endif
+
+/*
+ * compress a packet
+ *
+ * The result of this function is the size of the compressed
+ * packet. A zero is returned if the packet was not compressed
+ * for some reason, such as the size being larger than uncompressed.
+ *
+ * One change from the BSD compress command is that when the
+ * code size expands, we do not output a bunch of padding.
+ */
+
+static int bsd_compress (void *state, unsigned char *rptr, unsigned char *obuf,
+ int isize, int osize)
+ {
+ struct bsd_db *db;
+ int hshift;
+ unsigned int max_ent;
+ unsigned int n_bits;
+ unsigned int bitno;
+ unsigned long accm;
+ int ent;
+ unsigned long fcode;
+ struct bsd_dict *dictp;
+ unsigned char c;
+ int hval;
+ int disp;
+ int ilen;
+ int mxcode;
+ unsigned char *wptr;
+ int olen;
+
+#define PUTBYTE(v) \
+ { \
+ ++olen; \
+ if (wptr) \
+ { \
+ *wptr++ = (unsigned char) (v); \
+ if (olen >= osize) \
+ { \
+ wptr = NULL; \
+ } \
+ } \
+ }
+
+#define OUTPUT(ent) \
+ { \
+ bitno -= n_bits; \
+ accm |= ((ent) << bitno); \
+ do \
+ { \
+ PUTBYTE(accm >> 24); \
+ accm <<= 8; \
+ bitno += 8; \
+ } \
+ while (bitno <= 24); \
+ }
+
+ /*
+ * If the protocol is not in the range we're interested in,
+ * just return without compressing the packet. If it is,
+ * the protocol becomes the first byte to compress.
+ */
+
+ ent = PPP_PROTOCOL(rptr);
+ if (ent < 0x21 || ent > 0xf9)
+ {
+ return 0;
+ }
+
+ db = (struct bsd_db *) state;
+ hshift = db->hshift;
+ max_ent = db->max_ent;
+ n_bits = db->n_bits;
+ bitno = 32;
+ accm = 0;
+ mxcode = MAXCODE (n_bits);
+
+ /* Initialize the output pointers */
+ wptr = obuf;
+ olen = PPP_HDRLEN + BSD_OVHD;
+
+ if (osize > isize)
+ {
+ osize = isize;
+ }
+
+ /* This is the PPP header information */
+ if (wptr)
+ {
+ *wptr++ = PPP_ADDRESS(rptr);
+ *wptr++ = PPP_CONTROL(rptr);
+ *wptr++ = 0;
+ *wptr++ = PPP_COMP;
+ *wptr++ = db->seqno >> 8;
+ *wptr++ = db->seqno;
+ }
+
+ /* Skip the input header */
+ rptr += PPP_HDRLEN;
+ isize -= PPP_HDRLEN;
+ ilen = ++isize; /* Low byte of protocol is counted as input */
+
+ while (--ilen > 0)
+ {
+ c = *rptr++;
+ fcode = BSD_KEY (ent, c);
+ hval = BSD_HASH (ent, c, hshift);
+ dictp = dict_ptr (db, hval);
+
+ /* Validate and then check the entry. */
+ if (dictp->codem1 >= max_ent)
+ {
+ goto nomatch;
+ }
+
+ if (dictp->f.fcode == fcode)
+ {
+ ent = dictp->codem1 + 1;
+ continue; /* found (prefix,suffix) */
+ }
+
+ /* continue probing until a match or invalid entry */
+ disp = (hval == 0) ? 1 : hval;
+
+ do
+ {
+ hval += disp;
+ if (hval >= db->hsize)
+ {
+ hval -= db->hsize;
+ }
+ dictp = dict_ptr (db, hval);
+ if (dictp->codem1 >= max_ent)
+ {
+ goto nomatch;
+ }
+ }
+ while (dictp->f.fcode != fcode);
+
+ ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */
+ continue;
+
+nomatch:
+ OUTPUT(ent); /* output the prefix */
+
+ /* code -> hashtable */
+ if (max_ent < db->maxmaxcode)
+ {
+ struct bsd_dict *dictp2;
+ struct bsd_dict *dictp3;
+ int indx;
+
+ /* expand code size if needed */
+ if (max_ent >= mxcode)
+ {
+ db->n_bits = ++n_bits;
+ mxcode = MAXCODE (n_bits);
+ }
+
+ /* Invalidate old hash table entry using
+ * this code, and then take it over.
+ */
+
+ dictp2 = dict_ptr (db, max_ent + 1);
+ indx = dictp2->cptr;
+ dictp3 = dict_ptr (db, indx);
+
+ if (dictp3->codem1 == max_ent)
+ {
+ dictp3->codem1 = BADCODEM1;
+ }
+
+ dictp2->cptr = hval;
+ dictp->codem1 = max_ent;
+ dictp->f.fcode = fcode;
+ db->max_ent = ++max_ent;
+
+ if (db->lens)
+ {
+ unsigned short *len1 = lens_ptr (db, max_ent);
+ unsigned short *len2 = lens_ptr (db, ent);
+ *len1 = *len2 + 1;
+ }
+ }
+ ent = c;
+ }
+
+ OUTPUT(ent); /* output the last code */
+
+ db->bytes_out += olen - PPP_HDRLEN - BSD_OVHD;
+ db->uncomp_bytes += isize;
+ db->in_count += isize;
+ ++db->uncomp_count;
+ ++db->seqno;
+
+ if (bitno < 32)
+ {
+ ++db->bytes_out; /* must be set before calling bsd_check */
+ }
+
+ /*
+ * Generate the clear command if needed
+ */
+
+ if (bsd_check(db))
+ {
+ OUTPUT (CLEAR);
+ }
+
+ /*
+ * Pad dribble bits of last code with ones.
+ * Do not emit a completely useless byte of ones.
+ */
+
+ if (bitno != 32)
+ {
+ PUTBYTE((accm | (0xff << (bitno-8))) >> 24);
+ }
+
+ /*
+ * Increase code size if we would have without the packet
+ * boundary because the decompressor will do so.
+ */
+
+ if (max_ent >= mxcode && max_ent < db->maxmaxcode)
+ {
+ db->n_bits++;
+ }
+
+ /* If output length is too large then this is an incomplete frame. */
+ if (wptr == NULL)
+ {
+ ++db->incomp_count;
+ db->incomp_bytes += isize;
+ olen = 0;
+ }
+ else /* Count the number of compressed frames */
+ {
+ ++db->comp_count;
+ db->comp_bytes += olen;
+ }
+
+ /* Return the resulting output length */
+ return olen;
+#undef OUTPUT
+#undef PUTBYTE
+ }
+
+/*
+ * Update the "BSD Compress" dictionary on the receiver for
+ * incompressible data by pretending to compress the incoming data.
+ */
+
+static void bsd_incomp (void *state, unsigned char *ibuf, int icnt)
+ {
+ (void) bsd_compress (state, ibuf, (char *) 0, icnt, 0);
+ }
+
+/*
+ * Decompress "BSD Compress".
+ *
+ * Because of patent problems, we return DECOMP_ERROR for errors
+ * found by inspecting the input data and for system problems, but
+ * DECOMP_FATALERROR for any errors which could possibly be said to
+ * be being detected "after" decompression. For DECOMP_ERROR,
+ * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
+ * infringing a patent of Motorola's if we do, so we take CCP down
+ * instead.
+ *
+ * Given that the frame has the correct sequence number and a good FCS,
+ * errors such as invalid codes in the input most likely indicate a
+ * bug, so we return DECOMP_FATALERROR for them in order to turn off
+ * compression, even though they are detected by inspecting the input.
+ */
+
+static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
+ unsigned char *obuf, int osize)
+ {
+ struct bsd_db *db;
+ unsigned int max_ent;
+ unsigned long accm;
+ unsigned int bitno; /* 1st valid bit in accm */
+ unsigned int n_bits;
+ unsigned int tgtbitno; /* bitno when we have a code */
+ struct bsd_dict *dictp;
+ int explen;
+ int seq;
+ unsigned int incode;
+ unsigned int oldcode;
+ unsigned int finchar;
+ unsigned char *p;
+ unsigned char *wptr;
+ int adrs;
+ int ctrl;
+ int ilen;
+ int codelen;
+ int extra;
+
+ db = (struct bsd_db *) state;
+ max_ent = db->max_ent;
+ accm = 0;
+ bitno = 32; /* 1st valid bit in accm */
+ n_bits = db->n_bits;
+ tgtbitno = 32 - n_bits; /* bitno when we have a code */
+
+ /*
+ * Save the address/control from the PPP header
+ * and then get the sequence number.
+ */
+
+ adrs = PPP_ADDRESS (ibuf);
+ ctrl = PPP_CONTROL (ibuf);
+
+ seq = (ibuf[4] << 8) + ibuf[5];
+
+ ibuf += (PPP_HDRLEN + 2);
+ ilen = isize - (PPP_HDRLEN + 2);
+
+ /*
+ * Check the sequence number and give up if it differs from
+ * the value we're expecting.
+ */
+
+ if (seq != db->seqno)
+ {
+ if (db->debug)
+ {
+ printk("bsd_decomp%d: bad sequence # %d, expected %d\n",
+ db->unit, seq, db->seqno - 1);
+ }
+ return DECOMP_ERROR;
+ }
+
+ ++db->seqno;
+ db->bytes_out += ilen;
+
+ /*
+ * Fill in the ppp header, but not the last byte of the protocol
+ * (that comes from the decompressed data).
+ */
+
+ wptr = obuf;
+ *wptr++ = adrs;
+ *wptr++ = ctrl;
+ *wptr++ = 0;
+
+ oldcode = CLEAR;
+ explen = 3;
+
+ /*
+ * Keep the checkpoint correctly so that incompressible packets
+ * clear the dictionary at the proper times.
+ */
+
+ for (;;)
+ {
+ if (ilen-- <= 0)
+ {
+ db->in_count += (explen - 3); /* don't count the header */
+ break;
+ }
+
+ /*
+ * Accumulate bytes until we have a complete code.
+ * Then get the next code, relying on the 32-bit,
+ * unsigned accm to mask the result.
+ */
+
+ bitno -= 8;
+ accm |= *ibuf++ << bitno;
+ if (tgtbitno < bitno)
+ {
+ continue;
+ }
+
+ incode = accm >> tgtbitno;
+ accm <<= n_bits;
+ bitno += n_bits;
+
+ /*
+ * The dictionary must only be cleared at the end of a packet.
+ */
+
+ if (incode == CLEAR)
+ {
+ if (ilen > 0)
+ {
+ if (db->debug)
+ {
+ printk("bsd_decomp%d: bad CLEAR\n", db->unit);
+ }
+ return DECOMP_FATALERROR; /* probably a bug */
+ }
+
+ bsd_clear(db);
+ break;
+ }
+
+ if ((incode > max_ent + 2) || (incode > db->maxmaxcode)
+ || (incode > max_ent && oldcode == CLEAR))
+ {
+ if (db->debug)
+ {
+ printk("bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
+ db->unit, incode, oldcode);
+ printk("max_ent=0x%x explen=%d seqno=%d\n",
+ max_ent, explen, db->seqno);
+ }
+ return DECOMP_FATALERROR; /* probably a bug */
+ }
+
+ /* Special case for KwKwK string. */
+ if (incode > max_ent)
+ {
+ finchar = oldcode;
+ extra = 1;
+ }
+ else
+ {
+ finchar = incode;
+ extra = 0;
+ }
+
+ codelen = *(lens_ptr (db, finchar));
+ explen += codelen + extra;
+ if (explen > osize)
+ {
+ if (db->debug)
+ {
+ printk("bsd_decomp%d: ran out of mru\n", db->unit);
+#ifdef DEBUG
+ printk(" len=%d, finchar=0x%x, codelen=%d, explen=%d\n",
+ ilen, finchar, codelen, explen);
+#endif
+ }
+ return DECOMP_FATALERROR;
+ }
+
+ /*
+ * Decode this code and install it in the decompressed buffer.
+ */
+
+ wptr += codelen;
+ p = wptr;
+ while (finchar > LAST)
+ {
+ struct bsd_dict *dictp2 = dict_ptr (db, finchar);
+
+ dictp = dict_ptr (db, dictp2->cptr);
+#ifdef DEBUG
+ if (--codelen <= 0 || dictp->codem1 != finchar-1)
+ {
+ if (codelen <= 0)
+ {
+ printk("bsd_decomp%d: fell off end of chain ", db->unit);
+ printk("0x%x at 0x%x by 0x%x, max_ent=0x%x\n",
+ incode, finchar, dictp2->cptr, max_ent);
+ }
+ else
+ {
+ if (dictp->codem1 != finchar-1)
+ {
+ printk("bsd_decomp%d: bad code chain 0x%x "
+ "finchar=0x%x ",
+ db->unit, incode, finchar);
+
+ printk("oldcode=0x%x cptr=0x%x codem1=0x%x\n",
+ oldcode, dictp2->cptr, dictp->codem1);
+ }
+ }
+ return DECOMP_FATALERROR;
+ }
+#endif
+ *--p = dictp->f.hs.suffix;
+ finchar = dictp->f.hs.prefix;
+ }
+ *--p = finchar;
+
+#ifdef DEBUG
+ if (--codelen != 0)
+ {
+ printk("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n",
+ db->unit, codelen, incode, max_ent);
+ }
+#endif
+
+ if (extra) /* the KwKwK case again */
+ {
+ *wptr++ = finchar;
+ }
+
+ /*
+ * If not first code in a packet, and
+ * if not out of code space, then allocate a new code.
+ *
+ * Keep the hash table correct so it can be used
+ * with uncompressed packets.
+ */
+
+ if (oldcode != CLEAR && max_ent < db->maxmaxcode)
+ {
+ struct bsd_dict *dictp2, *dictp3;
+ unsigned short *lens1, *lens2;
+ unsigned long fcode;
+ int hval, disp, indx;
+
+ fcode = BSD_KEY(oldcode,finchar);
+ hval = BSD_HASH(oldcode,finchar,db->hshift);
+ dictp = dict_ptr (db, hval);
+
+ /* look for a free hash table entry */
+ if (dictp->codem1 < max_ent)
+ {
+ disp = (hval == 0) ? 1 : hval;
+ do
+ {
+ hval += disp;
+ if (hval >= db->hsize)
+ {
+ hval -= db->hsize;
+ }
+ dictp = dict_ptr (db, hval);
+ }
+ while (dictp->codem1 < max_ent);
+ }
+
+ /*
+ * Invalidate previous hash table entry
+ * assigned this code, and then take it over
+ */
+
+ dictp2 = dict_ptr (db, max_ent + 1);
+ indx = dictp2->cptr;
+ dictp3 = dict_ptr (db, indx);
+
+ if (dictp3->codem1 == max_ent)
+ {
+ dictp3->codem1 = BADCODEM1;
+ }
+
+ dictp2->cptr = hval;
+ dictp->codem1 = max_ent;
+ dictp->f.fcode = fcode;
+ db->max_ent = ++max_ent;
+
+ /* Update the length of this string. */
+ lens1 = lens_ptr (db, max_ent);
+ lens2 = lens_ptr (db, oldcode);
+ *lens1 = *lens2 + 1;
+
+ /* Expand code size if needed. */
+ if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode)
+ {
+ db->n_bits = ++n_bits;
+ tgtbitno = 32-n_bits;
+ }
+ }
+ oldcode = incode;
+ }
+
+ ++db->comp_count;
+ ++db->uncomp_count;
+ db->comp_bytes += isize - BSD_OVHD - PPP_HDRLEN;
+ db->uncomp_bytes += explen;
+
+ if (bsd_check(db))
+ {
+ if (db->debug)
+ {
+ printk("bsd_decomp%d: peer should have cleared dictionary on %d\n",
+ db->unit, db->seqno - 1);
+ }
+ }
+ return explen;
+ }
+
+/*************************************************************
+ * Table of addresses for the BSD compression module
+ *************************************************************/
+
+static struct compressor ppp_bsd_compress = {
+ .compress_proto = CI_BSD_COMPRESS,
+ .comp_alloc = bsd_comp_alloc,
+ .comp_free = bsd_free,
+ .comp_init = bsd_comp_init,
+ .comp_reset = bsd_reset,
+ .compress = bsd_compress,
+ .comp_stat = bsd_comp_stats,
+ .decomp_alloc = bsd_decomp_alloc,
+ .decomp_free = bsd_free,
+ .decomp_init = bsd_decomp_init,
+ .decomp_reset = bsd_reset,
+ .decompress = bsd_decompress,
+ .incomp = bsd_incomp,
+ .decomp_stat = bsd_comp_stats,
+ .owner = THIS_MODULE
+};
+
+/*************************************************************
+ * Module support routines
+ *************************************************************/
+
+static int __init bsdcomp_init(void)
+{
+ int answer = ppp_register_compressor(&ppp_bsd_compress);
+ if (answer == 0)
+ printk(KERN_INFO "PPP BSD Compression module registered\n");
+ return answer;
+}
+
+static void __exit bsdcomp_cleanup(void)
+{
+ ppp_unregister_compressor(&ppp_bsd_compress);
+}
+
+module_init(bsdcomp_init);
+module_exit(bsdcomp_cleanup);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
diff --git a/drivers/net/cris/Makefile b/drivers/net/cris/Makefile
new file mode 100644
index 000000000000..b4e8932227b6
--- /dev/null
+++ b/drivers/net/cris/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ETRAX_ARCH_V10) += eth_v10.o
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
new file mode 100644
index 000000000000..442670860fca
--- /dev/null
+++ b/drivers/net/cris/eth_v10.c
@@ -0,0 +1,1836 @@
+/* $Id: ethernet.c,v 1.31 2004/10/18 14:49:03 starvik Exp $
+ *
+ * e100net.c: A network driver for the ETRAX 100LX network controller.
+ *
+ * Copyright (c) 1998-2002 Axis Communications AB.
+ *
+ * The outline of this driver comes from skeleton.c.
+ *
+ * $Log: ethernet.c,v $
+ * Revision 1.31 2004/10/18 14:49:03 starvik
+ * Use RX interrupt as random source
+ *
+ * Revision 1.30 2004/09/29 10:44:04 starvik
+ * Enabed MAC-address output again
+ *
+ * Revision 1.29 2004/08/24 07:14:05 starvik
+ * Make use of generic MDIO interface and constants.
+ *
+ * Revision 1.28 2004/08/20 09:37:11 starvik
+ * Added support for Intel LXT972A. Creds to Randy Scarborough.
+ *
+ * Revision 1.27 2004/08/16 12:37:22 starvik
+ * Merge of Linux 2.6.8
+ *
+ * Revision 1.25 2004/06/21 10:29:57 starvik
+ * Merge of Linux 2.6.7
+ *
+ * Revision 1.23 2004/06/09 05:29:22 starvik
+ * Avoid any race where R_DMA_CH1_FIRST is NULL (may trigger cache bug).
+ *
+ * Revision 1.22 2004/05/14 07:58:03 starvik
+ * Merge of changes from 2.4
+ *
+ * Revision 1.20 2004/03/11 11:38:40 starvik
+ * Merge of Linux 2.6.4
+ *
+ * Revision 1.18 2003/12/03 13:45:46 starvik
+ * Use hardware pad for short packets to prevent information leakage.
+ *
+ * Revision 1.17 2003/07/04 08:27:37 starvik
+ * Merge of Linux 2.5.74
+ *
+ * Revision 1.16 2003/04/24 08:28:22 starvik
+ * New LED behaviour: LED off when no link
+ *
+ * Revision 1.15 2003/04/09 05:20:47 starvik
+ * Merge of Linux 2.5.67
+ *
+ * Revision 1.13 2003/03/06 16:11:01 henriken
+ * Off by one error in group address register setting.
+ *
+ * Revision 1.12 2003/02/27 17:24:19 starvik
+ * Corrected Rev to Revision
+ *
+ * Revision 1.11 2003/01/24 09:53:21 starvik
+ * Oops. Initialize GA to 0, not to 1
+ *
+ * Revision 1.10 2003/01/24 09:50:55 starvik
+ * Initialize GA_0 and GA_1 to 0 to avoid matching of unwanted packets
+ *
+ * Revision 1.9 2002/12/13 07:40:58 starvik
+ * Added basic ethtool interface
+ * Handled out of memory when allocating new buffers
+ *
+ * Revision 1.8 2002/12/11 13:13:57 starvik
+ * Added arch/ to v10 specific includes
+ * Added fix from Linux 2.4 in serial.c (flush_to_flip_buffer)
+ *
+ * Revision 1.7 2002/11/26 09:41:42 starvik
+ * Added e100_set_config (standard interface to set media type)
+ * Added protection against preemptive scheduling
+ * Added standard MII ioctls
+ *
+ * Revision 1.6 2002/11/21 07:18:18 starvik
+ * Timers must be initialized in 2.5.48
+ *
+ * Revision 1.5 2002/11/20 11:56:11 starvik
+ * Merge of Linux 2.5.48
+ *
+ * Revision 1.4 2002/11/18 07:26:46 starvik
+ * Linux 2.5 port of latest Linux 2.4 ethernet driver
+ *
+ * Revision 1.33 2002/10/02 20:16:17 hp
+ * SETF, SETS: Use underscored IO_x_ macros rather than incorrect token concatenation
+ *
+ * Revision 1.32 2002/09/16 06:05:58 starvik
+ * Align memory returned by dev_alloc_skb
+ * Moved handling of sent packets to interrupt to avoid reference counting problem
+ *
+ * Revision 1.31 2002/09/10 13:28:23 larsv
+ * Return -EINVAL for unknown ioctls to avoid confusing tools that tests
+ * for supported functionality by issuing special ioctls, i.e. wireless
+ * extensions.
+ *
+ * Revision 1.30 2002/05/07 18:50:08 johana
+ * Correct spelling in comments.
+ *
+ * Revision 1.29 2002/05/06 05:38:49 starvik
+ * Performance improvements:
+ * Large packets are not copied (breakpoint set to 256 bytes)
+ * The cache bug workaround is delayed until half of the receive list
+ * has been used
+ * Added transmit list
+ * Transmit interrupts are only enabled when transmit queue is full
+ *
+ * Revision 1.28.2.1 2002/04/30 08:15:51 starvik
+ * Performance improvements:
+ * Large packets are not copied (breakpoint set to 256 bytes)
+ * The cache bug workaround is delayed until half of the receive list
+ * has been used.
+ * Added transmit list
+ * Transmit interrupts are only enabled when transmit queue is full
+ *
+ * Revision 1.28 2002/04/22 11:47:21 johana
+ * Fix according to 2.4.19-pre7. time_after/time_before and
+ * missing end of comment.
+ * The patch has a typo for ethernet.c in e100_clear_network_leds(),
+ * that is fixed here.
+ *
+ * Revision 1.27 2002/04/12 11:55:11 bjornw
+ * Added TODO
+ *
+ * Revision 1.26 2002/03/15 17:11:02 bjornw
+ * Use prepare_rx_descriptor after the CPU has touched the receiving descs
+ *
+ * Revision 1.25 2002/03/08 13:07:53 bjornw
+ * Unnecessary spinlock removed
+ *
+ * Revision 1.24 2002/02/20 12:57:43 fredriks
+ * Replaced MIN() with min().
+ *
+ * Revision 1.23 2002/02/20 10:58:14 fredriks
+ * Strip the Ethernet checksum (4 bytes) before forwarding a frame to upper layers.
+ *
+ * Revision 1.22 2002/01/30 07:48:22 matsfg
+ * Initiate R_NETWORK_TR_CTRL
+ *
+ * Revision 1.21 2001/11/23 11:54:49 starvik
+ * Added IFF_PROMISC and IFF_ALLMULTI handling in set_multicast_list
+ * Removed compiler warnings
+ *
+ * Revision 1.20 2001/11/12 19:26:00 pkj
+ * * Corrected e100_negotiate() to not assign half to current_duplex when
+ * it was supposed to compare them...
+ * * Cleaned up failure handling in e100_open().
+ * * Fixed compiler warnings.
+ *
+ * Revision 1.19 2001/11/09 07:43:09 starvik
+ * Added full duplex support
+ * Added ioctl to set speed and duplex
+ * Clear LED timer only runs when LED is lit
+ *
+ * Revision 1.18 2001/10/03 14:40:43 jonashg
+ * Update rx_bytes counter.
+ *
+ * Revision 1.17 2001/06/11 12:43:46 olof
+ * Modified defines for network LED behavior
+ *
+ * Revision 1.16 2001/05/30 06:12:46 markusl
+ * TxDesc.next should not be set to NULL
+ *
+ * Revision 1.15 2001/05/29 10:27:04 markusl
+ * Updated after review remarks:
+ * +Use IO_EXTRACT
+ * +Handle underrun
+ *
+ * Revision 1.14 2001/05/29 09:20:14 jonashg
+ * Use driver name on printk output so one can tell which driver that complains.
+ *
+ * Revision 1.13 2001/05/09 12:35:59 johana
+ * Use DMA_NBR and IRQ_NBR defines from dma.h and irq.h
+ *
+ * Revision 1.12 2001/04/05 11:43:11 tobiasa
+ * Check dev before panic.
+ *
+ * Revision 1.11 2001/04/04 11:21:05 markusl
+ * Updated according to review remarks
+ *
+ * Revision 1.10 2001/03/26 16:03:06 bjornw
+ * Needs linux/config.h
+ *
+ * Revision 1.9 2001/03/19 14:47:48 pkj
+ * * Make sure there is always a pause after the network LEDs are
+ * changed so they will not look constantly lit during heavy traffic.
+ * * Always use HZ when setting times relative to jiffies.
+ * * Use LED_NETWORK_SET() when setting the network LEDs.
+ *
+ * Revision 1.8 2001/02/27 13:52:48 bjornw
+ * malloc.h -> slab.h
+ *
+ * Revision 1.7 2001/02/23 13:46:38 bjornw
+ * Spellling check
+ *
+ * Revision 1.6 2001/01/26 15:21:04 starvik
+ * Don't disable interrupts while reading MDIO registers (MDIO is slow)
+ * Corrected promiscuous mode
+ * Improved deallocation of IRQs ("ifconfig eth0 down" now works)
+ *
+ * Revision 1.5 2000/11/29 17:22:22 bjornw
+ * Get rid of the udword types legacy stuff
+ *
+ * Revision 1.4 2000/11/22 16:36:09 bjornw
+ * Please marketing by using the correct case when spelling Etrax.
+ *
+ * Revision 1.3 2000/11/21 16:43:04 bjornw
+ * Minor short->int change
+ *
+ * Revision 1.2 2000/11/08 14:27:57 bjornw
+ * 2.4 port
+ *
+ * Revision 1.1 2000/11/06 13:56:00 bjornw
+ * Verbatim copy of the 1.24 version of e100net.c from elinux
+ *
+ * Revision 1.24 2000/10/04 15:55:23 bjornw
+ * * Use virt_to_phys etc. for DMA addresses
+ * * Removed bogus CHECKSUM_UNNECESSARY
+ *
+ *
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <linux/if.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+
+#include <asm/arch/svinto.h>/* DMA and register descriptions */
+#include <asm/io.h> /* LED_* I/O functions */
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/ethernet.h>
+#include <asm/cache.h>
+
+//#define ETHDEBUG
+#define D(x)
+
+/*
+ * The name of the card. Is used for messages and in the requests for
+ * io regions, irqs and dma channels
+ */
+
+static const char* cardname = "ETRAX 100LX built-in ethernet controller";
+
+/* A default ethernet address. Highlevel SW will set the real one later */
+
+static struct sockaddr default_mac = {
+ 0,
+ { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 }
+};
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ struct mii_if_info mii_if;
+
+ /* Tx control lock. This protects the transmit buffer ring
+ * state along with the "tx full" state of the driver. This
+ * means all netif_queue flow control actions are protected
+ * by this lock as well.
+ */
+ spinlock_t lock;
+};
+
+typedef struct etrax_eth_descr
+{
+ etrax_dma_descr descr;
+ struct sk_buff* skb;
+} etrax_eth_descr;
+
+/* Some transceivers requires special handling */
+struct transceiver_ops
+{
+ unsigned int oui;
+ void (*check_speed)(struct net_device* dev);
+ void (*check_duplex)(struct net_device* dev);
+};
+
+struct transceiver_ops* transceiver;
+
+/* Duplex settings */
+enum duplex
+{
+ half,
+ full,
+ autoneg
+};
+
+/* Dma descriptors etc. */
+
+#define MAX_MEDIA_DATA_SIZE 1518
+
+#define MIN_PACKET_LEN 46
+#define ETHER_HEAD_LEN 14
+
+/*
+** MDIO constants.
+*/
+#define MDIO_START 0x1
+#define MDIO_READ 0x2
+#define MDIO_WRITE 0x1
+#define MDIO_PREAMBLE 0xfffffffful
+
+/* Broadcom specific */
+#define MDIO_AUX_CTRL_STATUS_REG 0x18
+#define MDIO_BC_FULL_DUPLEX_IND 0x1
+#define MDIO_BC_SPEED 0x2
+
+/* TDK specific */
+#define MDIO_TDK_DIAGNOSTIC_REG 18
+#define MDIO_TDK_DIAGNOSTIC_RATE 0x400
+#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
+
+/*Intel LXT972A specific*/
+#define MDIO_INT_STATUS_REG_2 0x0011
+#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 )
+#define MDIO_INT_SPEED ( 1 << 14 )
+
+/* Network flash constants */
+#define NET_FLASH_TIME (HZ/50) /* 20 ms */
+#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */
+#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */
+#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */
+
+#define NO_NETWORK_ACTIVITY 0
+#define NETWORK_ACTIVITY 1
+
+#define NBR_OF_RX_DESC 64
+#define NBR_OF_TX_DESC 256
+
+/* Large packets are sent directly to upper layers while small packets are */
+/* copied (to reduce memory waste). The following constant decides the breakpoint */
+#define RX_COPYBREAK 256
+
+/* Due to a chip bug we need to flush the cache when descriptors are returned */
+/* to the DMA. To decrease performance impact we return descriptors in chunks. */
+/* The following constant determines the number of descriptors to return. */
+#define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2
+
+#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01)
+
+/* Define some macros to access ETRAX 100 registers */
+#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
+ IO_FIELD_(reg##_, field##_, val)
+#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
+ IO_STATE_(reg##_, field##_, _##val)
+
+static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
+ to be processed */
+static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
+static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */
+
+static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
+
+static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */
+static etrax_eth_descr* myLastTxDesc; /* End of send queue */
+static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */
+static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
+
+static unsigned int network_rec_config_shadow = 0;
+static unsigned int mdio_phy_addr; /* Transciever address */
+
+static unsigned int network_tr_ctrl_shadow = 0;
+
+/* Network speed indication. */
+static struct timer_list speed_timer = TIMER_INITIALIZER(NULL, 0, 0);
+static struct timer_list clear_led_timer = TIMER_INITIALIZER(NULL, 0, 0);
+static int current_speed; /* Speed read from transceiver */
+static int current_speed_selection; /* Speed selected by user */
+static unsigned long led_next_time;
+static int led_active;
+static int rx_queue_len;
+
+/* Duplex */
+static struct timer_list duplex_timer = TIMER_INITIALIZER(NULL, 0, 0);
+static int full_duplex;
+static enum duplex current_duplex;
+
+/* Index to functions, as function prototypes. */
+
+static int etrax_ethernet_init(void);
+
+static int e100_open(struct net_device *dev);
+static int e100_set_mac_address(struct net_device *dev, void *addr);
+static int e100_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t e100nw_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void e100_rx(struct net_device *dev);
+static int e100_close(struct net_device *dev);
+static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int e100_ethtool_ioctl(struct net_device* dev, struct ifreq *ifr);
+static int e100_set_config(struct net_device* dev, struct ifmap* map);
+static void e100_tx_timeout(struct net_device *dev);
+static struct net_device_stats *e100_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void e100_hardware_send_packet(char *buf, int length);
+static void update_rx_stats(struct net_device_stats *);
+static void update_tx_stats(struct net_device_stats *);
+static int e100_probe_transceiver(struct net_device* dev);
+
+static void e100_check_speed(unsigned long priv);
+static void e100_set_speed(struct net_device* dev, unsigned long speed);
+static void e100_check_duplex(unsigned long priv);
+static void e100_set_duplex(struct net_device* dev, enum duplex);
+static void e100_negotiate(struct net_device* dev);
+
+static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location);
+static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value);
+
+static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd);
+static void e100_send_mdio_bit(unsigned char bit);
+static unsigned char e100_receive_mdio_bit(void);
+static void e100_reset_transceiver(struct net_device* net);
+
+static void e100_clear_network_leds(unsigned long dummy);
+static void e100_set_network_leds(int active);
+
+static void broadcom_check_speed(struct net_device* dev);
+static void broadcom_check_duplex(struct net_device* dev);
+static void tdk_check_speed(struct net_device* dev);
+static void tdk_check_duplex(struct net_device* dev);
+static void intel_check_speed(struct net_device* dev);
+static void intel_check_duplex(struct net_device* dev);
+static void generic_check_speed(struct net_device* dev);
+static void generic_check_duplex(struct net_device* dev);
+
+struct transceiver_ops transceivers[] =
+{
+ {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
+ {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
+ {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
+ {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
+ {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
+};
+
+#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
+
+/*
+ * Check for a network adaptor of this type, and return '0' if one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ * If dev->base_addr == 2, allocate space for the device and return success
+ * (detachable devices only).
+ */
+
+static int __init
+etrax_ethernet_init(void)
+{
+ struct net_device *dev;
+ struct net_local* np;
+ int i, err;
+
+ printk(KERN_INFO
+ "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n");
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ np = dev->priv;
+
+ if (!dev)
+ return -ENOMEM;
+
+ dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
+
+ /* now setup our etrax specific stuff */
+
+ dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */
+ dev->dma = NETWORK_RX_DMA_NBR;
+
+ /* fill in our handlers so the network layer can talk to us in the future */
+
+ dev->open = e100_open;
+ dev->hard_start_xmit = e100_send_packet;
+ dev->stop = e100_close;
+ dev->get_stats = e100_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->set_mac_address = e100_set_mac_address;
+ dev->do_ioctl = e100_ioctl;
+ dev->set_config = e100_set_config;
+ dev->tx_timeout = e100_tx_timeout;
+
+ /* Initialise the list of Etrax DMA-descriptors */
+
+ /* Initialise receive descriptors */
+
+ for (i = 0; i < NBR_OF_RX_DESC; i++) {
+ /* Allocate two extra cachelines to make sure that buffer used by DMA
+ * does not share cacheline with any other data (to avoid cache bug)
+ */
+ RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
+ RxDescList[i].descr.ctrl = 0;
+ RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE;
+ RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]);
+ RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
+ RxDescList[i].descr.status = 0;
+ RxDescList[i].descr.hw_len = 0;
+ prepare_rx_descriptor(&RxDescList[i].descr);
+ }
+
+ RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol;
+ RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]);
+ rx_queue_len = 0;
+
+ /* Initialize transmit descriptors */
+ for (i = 0; i < NBR_OF_TX_DESC; i++) {
+ TxDescList[i].descr.ctrl = 0;
+ TxDescList[i].descr.sw_len = 0;
+ TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr);
+ TxDescList[i].descr.buf = 0;
+ TxDescList[i].descr.status = 0;
+ TxDescList[i].descr.hw_len = 0;
+ TxDescList[i].skb = 0;
+ }
+
+ TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol;
+ TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr);
+
+ /* Initialise initial pointers */
+
+ myNextRxDesc = &RxDescList[0];
+ myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
+ myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
+ myFirstTxDesc = &TxDescList[0];
+ myNextTxDesc = &TxDescList[0];
+ myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
+
+ /* Register device */
+ err = register_netdev(dev);
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+
+ /* set the default MAC address */
+
+ e100_set_mac_address(dev, &default_mac);
+
+ /* Initialize speed indicator stuff. */
+
+ current_speed = 10;
+ current_speed_selection = 0; /* Auto */
+ speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
+ duplex_timer.data = (unsigned long)dev;
+ speed_timer.function = e100_check_speed;
+
+ clear_led_timer.function = e100_clear_network_leds;
+
+ full_duplex = 0;
+ current_duplex = autoneg;
+ duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
+ duplex_timer.data = (unsigned long)dev;
+ duplex_timer.function = e100_check_duplex;
+
+ /* Initialize mii interface */
+ np->mii_if.phy_id = mdio_phy_addr;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = e100_get_mdio_reg;
+ np->mii_if.mdio_write = e100_set_mdio_reg;
+
+ /* Initialize group address registers to make sure that no */
+ /* unwanted addresses are matched */
+ *R_NETWORK_GA_0 = 0x00000000;
+ *R_NETWORK_GA_1 = 0x00000000;
+ return 0;
+}
+
+/* set MAC address of the interface. called from the core after a
+ * SIOCSIFADDR ioctl, and from the bootup above.
+ */
+
+static int
+e100_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_local *np = (struct net_local *)dev->priv;
+ struct sockaddr *addr = p;
+ int i;
+
+ spin_lock(&np->lock); /* preemption protection */
+
+ /* remember it */
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /* Write it to the hardware.
+ * Note the way the address is wrapped:
+ * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
+ * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8);
+ */
+
+ *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
+ (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
+ *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
+ *R_NETWORK_SA_2 = 0;
+
+ /* show it in the log as well */
+
+ printk(KERN_INFO "%s: changed MAC to ", dev->name);
+
+ for (i = 0; i < 5; i++)
+ printk("%02X:", dev->dev_addr[i]);
+
+ printk("%02X\n", dev->dev_addr[i]);
+
+ spin_unlock(&np->lock);
+
+ return 0;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+
+static int
+e100_open(struct net_device *dev)
+{
+ unsigned long flags;
+
+ /* enable the MDIO output pin */
+
+ *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable);
+
+ *R_IRQ_MASK0_CLR =
+ IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
+ IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
+ IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
+
+ /* clear dma0 and 1 eop and descr irq masks */
+ *R_IRQ_MASK2_CLR =
+ IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
+ IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
+ IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
+ IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
+
+ /* Reset and wait for the DMA channels */
+
+ RESET_DMA(NETWORK_TX_DMA_NBR);
+ RESET_DMA(NETWORK_RX_DMA_NBR);
+ WAIT_DMA(NETWORK_TX_DMA_NBR);
+ WAIT_DMA(NETWORK_RX_DMA_NBR);
+
+ /* Initialise the etrax network controller */
+
+ /* allocate the irq corresponding to the receiving DMA */
+
+ if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt,
+ SA_SAMPLE_RANDOM, cardname, (void *)dev)) {
+ goto grace_exit0;
+ }
+
+ /* allocate the irq corresponding to the transmitting DMA */
+
+ if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
+ cardname, (void *)dev)) {
+ goto grace_exit1;
+ }
+
+ /* allocate the irq corresponding to the network errors etc */
+
+ if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
+ cardname, (void *)dev)) {
+ goto grace_exit2;
+ }
+
+ /* give the HW an idea of what MAC address we want */
+
+ *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
+ (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
+ *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
+ *R_NETWORK_SA_2 = 0;
+
+#if 0
+ /* use promiscuous mode for testing */
+ *R_NETWORK_GA_0 = 0xffffffff;
+ *R_NETWORK_GA_1 = 0xffffffff;
+
+ *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
+#else
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
+ SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
+ *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
+#endif
+
+ *R_NETWORK_GEN_CONFIG =
+ IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) |
+ IO_STATE(R_NETWORK_GEN_CONFIG, enable, on);
+
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none);
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont);
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable);
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable);
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable);
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
+ *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
+
+ save_flags(flags);
+ cli();
+
+ /* enable the irq's for ethernet DMA */
+
+ *R_IRQ_MASK2_SET =
+ IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
+ IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
+
+ *R_IRQ_MASK0_SET =
+ IO_STATE(R_IRQ_MASK0_SET, overrun, set) |
+ IO_STATE(R_IRQ_MASK0_SET, underrun, set) |
+ IO_STATE(R_IRQ_MASK0_SET, excessive_col, set);
+
+ /* make sure the irqs are cleared */
+
+ *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
+ *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
+
+ /* make sure the rec and transmit error counters are cleared */
+
+ (void)*R_REC_COUNTERS; /* dummy read */
+ (void)*R_TR_COUNTERS; /* dummy read */
+
+ /* start the receiving DMA channel so we can receive packets from now on */
+
+ *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc);
+ *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start);
+
+ /* Set up transmit DMA channel so it can be restarted later */
+
+ *R_DMA_CH0_FIRST = 0;
+ *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
+
+ restore_flags(flags);
+
+ /* Probe for transceiver */
+ if (e100_probe_transceiver(dev))
+ goto grace_exit3;
+
+ /* Start duplex/speed timers */
+ add_timer(&speed_timer);
+ add_timer(&duplex_timer);
+
+ /* We are now ready to accept transmit requeusts from
+ * the queueing layer of the networking.
+ */
+ netif_start_queue(dev);
+
+ return 0;
+
+grace_exit3:
+ free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
+grace_exit2:
+ free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
+grace_exit1:
+ free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
+grace_exit0:
+ return -EAGAIN;
+}
+
+
+static void
+generic_check_speed(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ if ((data & ADVERTISE_100FULL) ||
+ (data & ADVERTISE_100HALF))
+ current_speed = 100;
+ else
+ current_speed = 10;
+}
+
+static void
+tdk_check_speed(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
+ current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
+}
+
+static void
+broadcom_check_speed(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
+ current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
+}
+
+static void
+intel_check_speed(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
+ current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
+}
+
+static void
+e100_check_speed(unsigned long priv)
+{
+ struct net_device* dev = (struct net_device*)priv;
+ static int led_initiated = 0;
+ unsigned long data;
+ int old_speed = current_speed;
+
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR);
+ if (!(data & BMSR_LSTATUS)) {
+ current_speed = 0;
+ } else {
+ transceiver->check_speed(dev);
+ }
+
+ if ((old_speed != current_speed) || !led_initiated) {
+ led_initiated = 1;
+ e100_set_network_leds(NO_NETWORK_ACTIVITY);
+ }
+
+ /* Reinitialize the timer. */
+ speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
+ add_timer(&speed_timer);
+}
+
+static void
+e100_negotiate(struct net_device* dev)
+{
+ unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+
+ /* Discard old speed and duplex settings */
+ data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
+ ADVERTISE_10HALF | ADVERTISE_10FULL);
+
+ switch (current_speed_selection) {
+ case 10 :
+ if (current_duplex == full)
+ data |= ADVERTISE_10FULL;
+ else if (current_duplex == half)
+ data |= ADVERTISE_10HALF;
+ else
+ data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ break;
+
+ case 100 :
+ if (current_duplex == full)
+ data |= ADVERTISE_100FULL;
+ else if (current_duplex == half)
+ data |= ADVERTISE_100HALF;
+ else
+ data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ break;
+
+ case 0 : /* Auto */
+ if (current_duplex == full)
+ data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
+ else if (current_duplex == half)
+ data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
+ else
+ data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL;
+ break;
+
+ default : /* assume autoneg speed and duplex */
+ data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL;
+ }
+
+ e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data);
+
+ /* Renegotiate with link partner */
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
+ data |= BMCR_ANENABLE | BMCR_ANRESTART;
+
+ e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data);
+}
+
+static void
+e100_set_speed(struct net_device* dev, unsigned long speed)
+{
+ if (speed != current_speed_selection) {
+ current_speed_selection = speed;
+ e100_negotiate(dev);
+ }
+}
+
+static void
+e100_check_duplex(unsigned long priv)
+{
+ struct net_device *dev = (struct net_device *)priv;
+ struct net_local *np = (struct net_local *)dev->priv;
+ int old_duplex = full_duplex;
+ transceiver->check_duplex(dev);
+ if (old_duplex != full_duplex) {
+ /* Duplex changed */
+ SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
+ *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
+ }
+
+ /* Reinitialize the timer. */
+ duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
+ add_timer(&duplex_timer);
+ np->mii_if.full_duplex = full_duplex;
+}
+
+static void
+generic_check_duplex(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ if ((data & ADVERTISE_10FULL) ||
+ (data & ADVERTISE_100FULL))
+ full_duplex = 1;
+ else
+ full_duplex = 0;
+}
+
+static void
+tdk_check_duplex(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
+ full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
+}
+
+static void
+broadcom_check_duplex(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
+ full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
+}
+
+static void
+intel_check_duplex(struct net_device* dev)
+{
+ unsigned long data;
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
+ full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
+}
+
+static void
+e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
+{
+ if (new_duplex != current_duplex) {
+ current_duplex = new_duplex;
+ e100_negotiate(dev);
+ }
+}
+
+static int
+e100_probe_transceiver(struct net_device* dev)
+{
+ unsigned int phyid_high;
+ unsigned int phyid_low;
+ unsigned int oui;
+ struct transceiver_ops* ops = NULL;
+
+ /* Probe MDIO physical address */
+ for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) {
+ if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff)
+ break;
+ }
+ if (mdio_phy_addr == 32)
+ return -ENODEV;
+
+ /* Get manufacturer */
+ phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1);
+ phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2);
+ oui = (phyid_high << 6) | (phyid_low >> 10);
+
+ for (ops = &transceivers[0]; ops->oui; ops++) {
+ if (ops->oui == oui)
+ break;
+ }
+ transceiver = ops;
+
+ return 0;
+}
+
+static int
+e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
+{
+ unsigned short cmd; /* Data to be sent on MDIO port */
+ int data; /* Data read from MDIO */
+ int bitCounter;
+
+ /* Start of frame, OP Code, Physical Address, Register Address */
+ cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) |
+ (location << 2);
+
+ e100_send_mdio_cmd(cmd, 0);
+
+ data = 0;
+
+ /* Data... */
+ for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
+ data |= (e100_receive_mdio_bit() << bitCounter);
+ }
+
+ return data;
+}
+
+static void
+e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value)
+{
+ int bitCounter;
+ unsigned short cmd;
+
+ cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) |
+ (location << 2);
+
+ e100_send_mdio_cmd(cmd, 1);
+
+ /* Data... */
+ for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
+ e100_send_mdio_bit(GET_BIT(bitCounter, value));
+ }
+
+}
+
+static void
+e100_send_mdio_cmd(unsigned short cmd, int write_cmd)
+{
+ int bitCounter;
+ unsigned char data = 0x2;
+
+ /* Preamble */
+ for (bitCounter = 31; bitCounter>= 0; bitCounter--)
+ e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE));
+
+ for (bitCounter = 15; bitCounter >= 2; bitCounter--)
+ e100_send_mdio_bit(GET_BIT(bitCounter, cmd));
+
+ /* Turnaround */
+ for (bitCounter = 1; bitCounter >= 0 ; bitCounter--)
+ if (write_cmd)
+ e100_send_mdio_bit(GET_BIT(bitCounter, data));
+ else
+ e100_receive_mdio_bit();
+}
+
+static void
+e100_send_mdio_bit(unsigned char bit)
+{
+ *R_NETWORK_MGM_CTRL =
+ IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
+ IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
+ udelay(1);
+ *R_NETWORK_MGM_CTRL =
+ IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
+ IO_MASK(R_NETWORK_MGM_CTRL, mdck) |
+ IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
+ udelay(1);
+}
+
+static unsigned char
+e100_receive_mdio_bit()
+{
+ unsigned char bit;
+ *R_NETWORK_MGM_CTRL = 0;
+ bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
+ udelay(1);
+ *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck);
+ udelay(1);
+ return bit;
+}
+
+static void
+e100_reset_transceiver(struct net_device* dev)
+{
+ unsigned short cmd;
+ unsigned short data;
+ int bitCounter;
+
+ data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
+
+ cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2);
+
+ e100_send_mdio_cmd(cmd, 1);
+
+ data |= 0x8000;
+
+ for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) {
+ e100_send_mdio_bit(GET_BIT(bitCounter, data));
+ }
+}
+
+/* Called by upper layers if they decide it took too long to complete
+ * sending a packet - we need to reset and stuff.
+ */
+
+static void
+e100_tx_timeout(struct net_device *dev)
+{
+ struct net_local *np = (struct net_local *)dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ problem" : "network cable problem");
+
+ /* remember we got an error */
+
+ np->stats.tx_errors++;
+
+ /* reset the TX DMA in case it has hung on something */
+
+ RESET_DMA(NETWORK_TX_DMA_NBR);
+ WAIT_DMA(NETWORK_TX_DMA_NBR);
+
+ /* Reset the transceiver. */
+
+ e100_reset_transceiver(dev);
+
+ /* and get rid of the packets that never got an interrupt */
+ while (myFirstTxDesc != myNextTxDesc)
+ {
+ dev_kfree_skb(myFirstTxDesc->skb);
+ myFirstTxDesc->skb = 0;
+ myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
+ }
+
+ /* Set up transmit DMA channel so it can be restarted later */
+ *R_DMA_CH0_FIRST = 0;
+ *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
+
+ /* tell the upper layers we're ok again */
+
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+}
+
+
+/* This will only be invoked if the driver is _not_ in XOFF state.
+ * What this means is that we need not check it, and that this
+ * invariant will hold if we make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+
+static int
+e100_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *np = (struct net_local *)dev->priv;
+ unsigned char *buf = skb->data;
+ unsigned long flags;
+
+#ifdef ETHDEBUG
+ printk("send packet len %d\n", length);
+#endif
+ spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */
+
+ myNextTxDesc->skb = skb;
+
+ dev->trans_start = jiffies;
+
+ e100_hardware_send_packet(buf, skb->len);
+
+ myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
+
+ /* Stop queue if full */
+ if (myNextTxDesc == myFirstTxDesc) {
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return 0;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+
+static irqreturn_t
+e100rxtx_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct net_local *np = (struct net_local *)dev->priv;
+ unsigned long irqbits = *R_IRQ_MASK2_RD;
+
+ /* Disable RX/TX IRQs to avoid reentrancy */
+ *R_IRQ_MASK2_CLR =
+ IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
+ IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
+
+ /* Handle received packets */
+ if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
+ /* acknowledge the eop interrupt */
+
+ *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
+
+ /* check if one or more complete packets were indeed received */
+
+ while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) &&
+ (myNextRxDesc != myLastRxDesc)) {
+ /* Take out the buffer and give it to the OS, then
+ * allocate a new buffer to put a packet in.
+ */
+ e100_rx(dev);
+ ((struct net_local *)dev->priv)->stats.rx_packets++;
+ /* restart/continue on the channel, for safety */
+ *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
+ /* clear dma channel 1 eop/descr irq bits */
+ *R_DMA_CH1_CLR_INTR =
+ IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) |
+ IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do);
+
+ /* now, we might have gotten another packet
+ so we have to loop back and check if so */
+ }
+ }
+
+ /* Report any packets that have been sent */
+ while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) &&
+ myFirstTxDesc != myNextTxDesc)
+ {
+ np->stats.tx_bytes += myFirstTxDesc->skb->len;
+ np->stats.tx_packets++;
+
+ /* dma is ready with the transmission of the data in tx_skb, so now
+ we can release the skb memory */
+ dev_kfree_skb_irq(myFirstTxDesc->skb);
+ myFirstTxDesc->skb = 0;
+ myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
+ }
+
+ if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
+ /* acknowledge the eop interrupt and wake up queue */
+ *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
+ netif_wake_queue(dev);
+ }
+
+ /* Enable RX/TX IRQs again */
+ *R_IRQ_MASK2_SET =
+ IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
+ IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+e100nw_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct net_local *np = (struct net_local *)dev->priv;
+ unsigned long irqbits = *R_IRQ_MASK0_RD;
+
+ /* check for underrun irq */
+ if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) {
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
+ *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
+ np->stats.tx_errors++;
+ D(printk("ethernet receiver underrun!\n"));
+ }
+
+ /* check for overrun irq */
+ if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
+ update_rx_stats(&np->stats); /* this will ack the irq */
+ D(printk("ethernet receiver overrun!\n"));
+ }
+ /* check for excessive collision irq */
+ if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) {
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
+ *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
+ SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
+ *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr);
+ np->stats.tx_errors++;
+ D(printk("ethernet excessive collisions!\n"));
+ }
+ return IRQ_HANDLED;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+e100_rx(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ int length = 0;
+ struct net_local *np = (struct net_local *)dev->priv;
+ unsigned char *skb_data_ptr;
+#ifdef ETHDEBUG
+ int i;
+#endif
+
+ if (!led_active && time_after(jiffies, led_next_time)) {
+ /* light the network leds depending on the current speed. */
+ e100_set_network_leds(NETWORK_ACTIVITY);
+
+ /* Set the earliest time we may clear the LED */
+ led_next_time = jiffies + NET_FLASH_TIME;
+ led_active = 1;
+ mod_timer(&clear_led_timer, jiffies + HZ/10);
+ }
+
+ length = myNextRxDesc->descr.hw_len - 4;
+ ((struct net_local *)dev->priv)->stats.rx_bytes += length;
+
+#ifdef ETHDEBUG
+ printk("Got a packet of length %d:\n", length);
+ /* dump the first bytes in the packet */
+ skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf);
+ for (i = 0; i < 8; i++) {
+ printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8,
+ skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3],
+ skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]);
+ skb_data_ptr += 8;
+ }
+#endif
+
+ if (length < RX_COPYBREAK) {
+ /* Small packet, copy data */
+ skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
+ if (!skb) {
+ np->stats.rx_errors++;
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ return;
+ }
+
+ skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
+ skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */
+
+#ifdef ETHDEBUG
+ printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
+ skb->head, skb->data, skb->tail, skb->end);
+ printk("copying packet to 0x%x.\n", skb_data_ptr);
+#endif
+
+ memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length);
+ }
+ else {
+ /* Large packet, send directly to upper layers and allocate new
+ * memory (aligned to cache line boundary to avoid bug).
+ * Before sending the skb to upper layers we must make sure that
+ * skb->data points to the aligned start of the packet.
+ */
+ int align;
+ struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
+ if (!new_skb) {
+ np->stats.rx_errors++;
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ return;
+ }
+ skb = myNextRxDesc->skb;
+ align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
+ skb_put(skb, length + align);
+ skb_pull(skb, align); /* Remove alignment bytes */
+ myNextRxDesc->skb = new_skb;
+ myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
+ }
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Send the packet to the upper layers */
+ netif_rx(skb);
+
+ /* Prepare for next packet */
+ myNextRxDesc->descr.status = 0;
+ myPrevRxDesc = myNextRxDesc;
+ myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
+
+ rx_queue_len++;
+
+ /* Check if descriptors should be returned */
+ if (rx_queue_len == RX_QUEUE_THRESHOLD) {
+ flush_etrax_cache();
+ myPrevRxDesc->descr.ctrl |= d_eol;
+ myLastRxDesc->descr.ctrl &= ~d_eol;
+ myLastRxDesc = myPrevRxDesc;
+ rx_queue_len = 0;
+ }
+}
+
+/* The inverse routine to net_open(). */
+static int
+e100_close(struct net_device *dev)
+{
+ struct net_local *np = (struct net_local *)dev->priv;
+
+ printk(KERN_INFO "Closing %s.\n", dev->name);
+
+ netif_stop_queue(dev);
+
+ *R_IRQ_MASK0_CLR =
+ IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
+ IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
+ IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
+
+ *R_IRQ_MASK2_CLR =
+ IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
+ IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
+ IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
+ IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
+
+ /* Stop the receiver and the transmitter */
+
+ RESET_DMA(NETWORK_TX_DMA_NBR);
+ RESET_DMA(NETWORK_RX_DMA_NBR);
+
+ /* Flush the Tx and disable Rx here. */
+
+ free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
+ free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
+ free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
+
+ /* Update the statistics here. */
+
+ update_rx_stats(&np->stats);
+ update_tx_stats(&np->stats);
+
+ /* Stop speed/duplex timers */
+ del_timer(&speed_timer);
+ del_timer(&duplex_timer);
+
+ return 0;
+}
+
+static int
+e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->lock); /* Preempt protection */
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return e100_ethtool_ioctl(dev,ifr);
+ case SIOCGMIIPHY: /* Get PHY address */
+ data->phy_id = mdio_phy_addr;
+ break;
+ case SIOCGMIIREG: /* Read MII register */
+ data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num);
+ break;
+ case SIOCSMIIREG: /* Write MII register */
+ e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in);
+ break;
+ /* The ioctls below should be considered obsolete but are */
+ /* still present for compatability with old scripts/apps */
+ case SET_ETH_SPEED_10: /* 10 Mbps */
+ e100_set_speed(dev, 10);
+ break;
+ case SET_ETH_SPEED_100: /* 100 Mbps */
+ e100_set_speed(dev, 100);
+ break;
+ case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */
+ e100_set_speed(dev, 0);
+ break;
+ case SET_ETH_DUPLEX_HALF: /* Half duplex. */
+ e100_set_duplex(dev, half);
+ break;
+ case SET_ETH_DUPLEX_FULL: /* Full duplex. */
+ e100_set_duplex(dev, full);
+ break;
+ case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/
+ e100_set_duplex(dev, autoneg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ spin_unlock(&np->lock);
+ return 0;
+}
+
+static int
+e100_ethtool_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct ethtool_cmd ecmd;
+
+ if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd)))
+ return -EFAULT;
+
+ switch (ecmd.cmd) {
+ case ETHTOOL_GSET:
+ {
+ memset((void *) &ecmd, 0, sizeof (ecmd));
+ ecmd.supported =
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
+ ecmd.port = PORT_TP;
+ ecmd.transceiver = XCVR_EXTERNAL;
+ ecmd.phy_address = mdio_phy_addr;
+ ecmd.speed = current_speed;
+ ecmd.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd.advertising = ADVERTISED_TP;
+ if (current_duplex == autoneg && current_speed_selection == 0)
+ ecmd.advertising |= ADVERTISED_Autoneg;
+ else {
+ ecmd.advertising |=
+ ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ if (current_speed_selection == 10)
+ ecmd.advertising &= ~(ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full);
+ else if (current_speed_selection == 100)
+ ecmd.advertising &= ~(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full);
+ if (current_duplex == half)
+ ecmd.advertising &= ~(ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Full);
+ else if (current_duplex == full)
+ ecmd.advertising &= ~(ADVERTISED_10baseT_Half | ADVERTISED_100baseT_Half);
+ }
+ ecmd.autoneg = AUTONEG_ENABLE;
+ if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
+ return -EFAULT;
+ }
+ break;
+ case ETHTOOL_SSET:
+ {
+ if (!capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+ if (ecmd.autoneg == AUTONEG_ENABLE) {
+ e100_set_duplex(dev, autoneg);
+ e100_set_speed(dev, 0);
+ } else {
+ e100_set_duplex(dev, ecmd.duplex == DUPLEX_HALF ? half : full);
+ e100_set_speed(dev, ecmd.speed == SPEED_10 ? 10: 100);
+ }
+ }
+ break;
+ case ETHTOOL_GDRVINFO:
+ {
+ struct ethtool_drvinfo info;
+ memset((void *) &info, 0, sizeof (info));
+ strncpy(info.driver, "ETRAX 100LX", sizeof(info.driver) - 1);
+ strncpy(info.version, "$Revision: 1.31 $", sizeof(info.version) - 1);
+ strncpy(info.fw_version, "N/A", sizeof(info.fw_version) - 1);
+ strncpy(info.bus_info, "N/A", sizeof(info.bus_info) - 1);
+ info.regdump_len = 0;
+ info.eedump_len = 0;
+ info.testinfo_len = 0;
+ if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
+ return -EFAULT;
+ }
+ break;
+ case ETHTOOL_NWAY_RST:
+ if (current_duplex == autoneg && current_speed_selection == 0)
+ e100_negotiate(dev);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ break;
+ }
+ return 0;
+}
+
+static int
+e100_set_config(struct net_device *dev, struct ifmap *map)
+{
+ struct net_local *np = (struct net_local *)dev->priv;
+ spin_lock(&np->lock); /* Preempt protection */
+
+ switch(map->port) {
+ case IF_PORT_UNKNOWN:
+ /* Use autoneg */
+ e100_set_speed(dev, 0);
+ e100_set_duplex(dev, autoneg);
+ break;
+ case IF_PORT_10BASET:
+ e100_set_speed(dev, 10);
+ e100_set_duplex(dev, autoneg);
+ break;
+ case IF_PORT_100BASET:
+ case IF_PORT_100BASETX:
+ e100_set_speed(dev, 100);
+ e100_set_duplex(dev, autoneg);
+ break;
+ case IF_PORT_100BASEFX:
+ case IF_PORT_10BASE2:
+ case IF_PORT_AUI:
+ spin_unlock(&np->lock);
+ return -EOPNOTSUPP;
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid media selected", dev->name);
+ spin_unlock(&np->lock);
+ return -EINVAL;
+ }
+ spin_unlock(&np->lock);
+ return 0;
+}
+
+static void
+update_rx_stats(struct net_device_stats *es)
+{
+ unsigned long r = *R_REC_COUNTERS;
+ /* update stats relevant to reception errors */
+ es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
+ es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
+ es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r);
+ es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r);
+}
+
+static void
+update_tx_stats(struct net_device_stats *es)
+{
+ unsigned long r = *R_TR_COUNTERS;
+ /* update stats relevant to transmission errors */
+ es->collisions +=
+ IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
+ IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
+ es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r);
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *
+e100_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned long flags;
+ spin_lock_irqsave(&lp->lock, flags);
+
+ update_rx_stats(&lp->stats);
+ update_tx_stats(&lp->stats);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int num_addr = dev->mc_count;
+ unsigned long int lo_bits;
+ unsigned long int hi_bits;
+ spin_lock(&lp->lock);
+ if (dev->flags & IFF_PROMISC)
+ {
+ /* promiscuous mode */
+ lo_bits = 0xfffffffful;
+ hi_bits = 0xfffffffful;
+
+ /* Enable individual receive */
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive);
+ *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* enable all multicasts */
+ lo_bits = 0xfffffffful;
+ hi_bits = 0xfffffffful;
+
+ /* Disable individual receive */
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
+ *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
+ } else if (num_addr == 0) {
+ /* Normal, clear the mc list */
+ lo_bits = 0x00000000ul;
+ hi_bits = 0x00000000ul;
+
+ /* Disable individual receive */
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
+ *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
+ } else {
+ /* MC mode, receive normal and MC packets */
+ char hash_ix;
+ struct dev_mc_list *dmi = dev->mc_list;
+ int i;
+ char *baddr;
+ lo_bits = 0x00000000ul;
+ hi_bits = 0x00000000ul;
+ for (i=0; i<num_addr; i++) {
+ /* Calculate the hash index for the GA registers */
+
+ hash_ix = 0;
+ baddr = dmi->dmi_addr;
+ hash_ix ^= (*baddr) & 0x3f;
+ hash_ix ^= ((*baddr) >> 6) & 0x03;
+ ++baddr;
+ hash_ix ^= ((*baddr) << 2) & 0x03c;
+ hash_ix ^= ((*baddr) >> 4) & 0xf;
+ ++baddr;
+ hash_ix ^= ((*baddr) << 4) & 0x30;
+ hash_ix ^= ((*baddr) >> 2) & 0x3f;
+ ++baddr;
+ hash_ix ^= (*baddr) & 0x3f;
+ hash_ix ^= ((*baddr) >> 6) & 0x03;
+ ++baddr;
+ hash_ix ^= ((*baddr) << 2) & 0x03c;
+ hash_ix ^= ((*baddr) >> 4) & 0xf;
+ ++baddr;
+ hash_ix ^= ((*baddr) << 4) & 0x30;
+ hash_ix ^= ((*baddr) >> 2) & 0x3f;
+
+ hash_ix &= 0x3f;
+
+ if (hash_ix >= 32) {
+ hi_bits |= (1 << (hash_ix-32));
+ }
+ else {
+ lo_bits |= (1 << hash_ix);
+ }
+ dmi = dmi->next;
+ }
+ /* Disable individual receive */
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
+ *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
+ }
+ *R_NETWORK_GA_0 = lo_bits;
+ *R_NETWORK_GA_1 = hi_bits;
+ spin_unlock(&lp->lock);
+}
+
+void
+e100_hardware_send_packet(char *buf, int length)
+{
+ D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
+
+ if (!led_active && time_after(jiffies, led_next_time)) {
+ /* light the network leds depending on the current speed. */
+ e100_set_network_leds(NETWORK_ACTIVITY);
+
+ /* Set the earliest time we may clear the LED */
+ led_next_time = jiffies + NET_FLASH_TIME;
+ led_active = 1;
+ mod_timer(&clear_led_timer, jiffies + HZ/10);
+ }
+
+ /* configure the tx dma descriptor */
+ myNextTxDesc->descr.sw_len = length;
+ myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
+ myNextTxDesc->descr.buf = virt_to_phys(buf);
+
+ /* Move end of list */
+ myLastTxDesc->descr.ctrl &= ~d_eol;
+ myLastTxDesc = myNextTxDesc;
+
+ /* Restart DMA channel */
+ *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
+}
+
+static void
+e100_clear_network_leds(unsigned long dummy)
+{
+ if (led_active && time_after(jiffies, led_next_time)) {
+ e100_set_network_leds(NO_NETWORK_ACTIVITY);
+
+ /* Set the earliest time we may set the LED */
+ led_next_time = jiffies + NET_FLASH_PAUSE;
+ led_active = 0;
+ }
+}
+
+static void
+e100_set_network_leds(int active)
+{
+#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
+ int light_leds = (active == NO_NETWORK_ACTIVITY);
+#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
+ int light_leds = (active == NETWORK_ACTIVITY);
+#else
+#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
+#endif
+
+ if (!current_speed) {
+ /* Make LED red, link is down */
+#if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION)
+ LED_NETWORK_SET(LED_RED);
+#else
+ LED_NETWORK_SET(LED_OFF);
+#endif
+ }
+ else if (light_leds) {
+ if (current_speed == 10) {
+ LED_NETWORK_SET(LED_ORANGE);
+ } else {
+ LED_NETWORK_SET(LED_GREEN);
+ }
+ }
+ else {
+ LED_NETWORK_SET(LED_OFF);
+ }
+}
+
+static int
+etrax_init_module(void)
+{
+ return etrax_ethernet_init();
+}
+
+static int __init
+e100_boot_setup(char* str)
+{
+ struct sockaddr sa = {0};
+ int i;
+
+ /* Parse the colon separated Ethernet station address */
+ for (i = 0; i < ETH_ALEN; i++) {
+ unsigned int tmp;
+ if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
+ printk(KERN_WARNING "Malformed station address");
+ return 0;
+ }
+ sa.sa_data[i] = (char)tmp;
+ }
+
+ default_mac = sa;
+ return 1;
+}
+
+__setup("etrax100_eth=", e100_boot_setup);
+
+module_init(etrax_init_module);
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
new file mode 100644
index 000000000000..5c5f540da26a
--- /dev/null
+++ b/drivers/net/cs89x0.c
@@ -0,0 +1,1866 @@
+/* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0
+ * driver for linux.
+ */
+
+/*
+ Written 1996 by Russell Nelson, with reference to skeleton.c
+ written 1993-1994 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached at nelson@crynwr.com, Crynwr
+ Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
+
+ Changelog:
+
+ Mike Cruse : mcruse@cti-ltd.com
+ : Changes for Linux 2.0 compatibility.
+ : Added dev_id parameter in net_interrupt(),
+ : request_irq() and free_irq(). Just NULL for now.
+
+ Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
+ : in net_open() and net_close() so kerneld would know
+ : that the module is in use and wouldn't eject the
+ : driver prematurely.
+
+ Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c
+ : as an example. Disabled autoprobing in init_module(),
+ : not a good thing to do to other devices while Linux
+ : is running from all accounts.
+
+ Russ Nelson : Jul 13 1998. Added RxOnly DMA support.
+
+ Melody Lee : Aug 10 1999. Changes for Linux 2.2.5 compatibility.
+ : email: ethernet@crystal.cirrus.com
+
+ Alan Cox : Removed 1.2 support, added 2.1 extra counters.
+
+ Andrew Morton : andrewm@uow.edu.au
+ : Kernel 2.3.48
+ : Handle kmalloc() failures
+ : Other resource allocation fixes
+ : Add SMP locks
+ : Integrate Russ Nelson's ALLOW_DMA functionality back in.
+ : If ALLOW_DMA is true, make DMA runtime selectable
+ : Folded in changes from Cirrus (Melody Lee
+ : <klee@crystal.cirrus.com>)
+ : Don't call netif_wake_queue() in net_send_packet()
+ : Fixed an out-of-mem bug in dma_rx()
+ : Updated Documentation/networking/cs89x0.txt
+
+ Andrew Morton : andrewm@uow.edu.au / Kernel 2.3.99-pre1
+ : Use skb_reserve to longword align IP header (two places)
+ : Remove a delay loop from dma_rx()
+ : Replace '100' with HZ
+ : Clean up a couple of skb API abuses
+ : Added 'cs89x0_dma=N' kernel boot option
+ : Correctly initialise lp->lock in non-module compile
+
+ Andrew Morton : andrewm@uow.edu.au / Kernel 2.3.99-pre4-1
+ : MOD_INC/DEC race fix (see
+ : http://www.uwsg.indiana.edu/hypermail/linux/kernel/0003.3/1532.html)
+
+ Andrew Morton : andrewm@uow.edu.au / Kernel 2.4.0-test7-pre2
+ : Enhanced EEPROM support to cover more devices,
+ : abstracted IRQ mapping to support CONFIG_ARCH_CLPS7500 arch
+ : (Jason Gunthorpe <jgg@ualberta.ca>)
+
+ Andrew Morton : Kernel 2.4.0-test11-pre4
+ : Use dev->name in request_*() (Andrey Panin)
+ : Fix an error-path memleak in init_module()
+ : Preserve return value from request_irq()
+ : Fix type of `media' module parm (Keith Owens)
+ : Use SET_MODULE_OWNER()
+ : Tidied up strange request_irq() abuse in net_open().
+
+ Andrew Morton : Kernel 2.4.3-pre1
+ : Request correct number of pages for DMA (Hugh Dickens)
+ : Select PP_ChipID _after_ unregister_netdev in cleanup_module()
+ : because unregister_netdev() calls get_stats.
+ : Make `version[]' __initdata
+ : Uninlined the read/write reg/word functions.
+
+ Oskar Schirmer : oskar@scara.com
+ : HiCO.SH4 (superh) support added (irq#1, cs89x0_media=)
+
+ Deepak Saxena : dsaxena@plexity.net
+ : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support
+
+*/
+
+/* Always include 'config.h' first in case the user wants to turn on
+ or override something. */
+#include <linux/config.h>
+#include <linux/module.h>
+
+/*
+ * Set this to zero to disable DMA code
+ *
+ * Note that even if DMA is turned off we still support the 'dma' and 'use_dma'
+ * module options so we don't break any startup scripts.
+ */
+#ifndef CONFIG_ARCH_IXDP2X01
+#define ALLOW_DMA 0
+#else
+#define ALLOW_DMA 1
+#endif
+
+/*
+ * Set this to zero to remove all the debug statements via
+ * dead code elimination
+ */
+#define DEBUGGING 1
+
+/*
+ Sources:
+
+ Crynwr packet driver epktisa.
+
+ Crystal Semiconductor data sheets.
+
+*/
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#if ALLOW_DMA
+#include <asm/dma.h>
+#endif
+
+#include "cs89x0.h"
+
+static char version[] __initdata =
+"cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton <andrewm@uow.edu.au>\n";
+
+#define DRV_NAME "cs89x0"
+
+/* First, a few definitions that the brave might change.
+ A zero-terminated list of I/O addresses to be probed. Some special flags..
+ Addr & 1 = Read back the address port, look for signature and reset
+ the page window before probing
+ Addr & 3 = Reset the page window and probe
+ The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space,
+ but it is possible that a Cirrus board could be plugged into the ISA
+ slots. */
+/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
+ them to system IRQ numbers. This mapping is card specific and is set to
+ the configuration of the Cirrus Eval board for this chip. */
+#ifdef CONFIG_ARCH_CLPS7500
+static unsigned int netcard_portlist[] __initdata =
+ { 0x80090303, 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
+static unsigned int cs8900_irq_map[] = {12,0,0,0};
+#elif defined(CONFIG_SH_HICOSH4)
+static unsigned int netcard_portlist[] __initdata =
+ { 0x0300, 0};
+static unsigned int cs8900_irq_map[] = {1,0,0,0};
+#elif defined(CONFIG_ARCH_IXDP2X01)
+#include <asm/irq.h>
+static unsigned int netcard_portlist[] __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
+static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
+#else
+static unsigned int netcard_portlist[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
+static unsigned int cs8900_irq_map[] = {10,11,12,5};
+#endif
+
+#if DEBUGGING
+static unsigned int net_debug = DEBUGGING;
+#else
+#define net_debug 0 /* gcc will remove all the debug code for us */
+#endif
+
+/* The number of low I/O ports used by the ethercard. */
+#define NETCARD_IO_EXTENT 16
+
+/* we allow the user to override various values normally set in the EEPROM */
+#define FORCE_RJ45 0x0001 /* pick one of these three */
+#define FORCE_AUI 0x0002
+#define FORCE_BNC 0x0004
+
+#define FORCE_AUTO 0x0010 /* pick one of these three */
+#define FORCE_HALF 0x0020
+#define FORCE_FULL 0x0030
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ int chip_type; /* one of: CS8900, CS8920, CS8920M */
+ char chip_revision; /* revision letter of the chip ('A'...) */
+ int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */
+ int auto_neg_cnf; /* auto-negotiation word from EEPROM */
+ int adapter_cnf; /* adapter configuration from EEPROM */
+ int isa_config; /* ISA configuration from EEPROM */
+ int irq_map; /* IRQ map from EEPROM */
+ int rx_mode; /* what mode are we in? 0, RX_MULTCAST_ACCEPT, or RX_ALL_ACCEPT */
+ int curr_rx_cfg; /* a copy of PP_RxCFG */
+ int linectl; /* either 0 or LOW_RX_SQUELCH, depending on configuration. */
+ int send_underrun; /* keep track of how many underruns in a row we get */
+ int force; /* force various values; see FORCE* above. */
+ spinlock_t lock;
+#if ALLOW_DMA
+ int use_dma; /* Flag: we're using dma */
+ int dma; /* DMA channel */
+ int dmasize; /* 16 or 64 */
+ unsigned char *dma_buff; /* points to the beginning of the buffer */
+ unsigned char *end_dma_buff; /* points to the end of the buffer */
+ unsigned char *rx_dma_ptr; /* points to the next packet */
+#endif
+};
+
+/* Index to functions, as function prototypes. */
+
+static int cs89x0_probe1(struct net_device *dev, int ioaddr, int modular);
+static int net_open(struct net_device *dev);
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void set_multicast_list(struct net_device *dev);
+static void net_timeout(struct net_device *dev);
+static void net_rx(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void reset_chip(struct net_device *dev);
+static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer);
+static int get_eeprom_cksum(int off, int len, int *buffer);
+static int set_mac_address(struct net_device *dev, void *addr);
+static void count_rx_errors(int status, struct net_local *lp);
+#if ALLOW_DMA
+static void get_dma_channel(struct net_device *dev);
+static void release_dma_buff(struct net_local *lp);
+#endif
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) 1
+
+/*
+ * Permit 'cs89x0_dma=N' in the kernel boot environment
+ */
+#if !defined(MODULE) && (ALLOW_DMA != 0)
+static int g_cs89x0_dma;
+
+static int __init dma_fn(char *str)
+{
+ g_cs89x0_dma = simple_strtol(str,NULL,0);
+ return 1;
+}
+
+__setup("cs89x0_dma=", dma_fn);
+#endif /* !defined(MODULE) && (ALLOW_DMA != 0) */
+
+#ifndef MODULE
+static int g_cs89x0_media__force;
+
+static int __init media_fn(char *str)
+{
+ if (!strcmp(str, "rj45")) g_cs89x0_media__force = FORCE_RJ45;
+ else if (!strcmp(str, "aui")) g_cs89x0_media__force = FORCE_AUI;
+ else if (!strcmp(str, "bnc")) g_cs89x0_media__force = FORCE_BNC;
+ return 1;
+}
+
+__setup("cs89x0_media=", media_fn);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ Return 0 on success.
+ */
+
+struct net_device * __init cs89x0_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ unsigned *port;
+ int err = 0;
+ int irq;
+ int io;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+
+ if (net_debug)
+ printk("cs89x0:cs89x0_probe(0x%x)\n", io);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = cs89x0_probe1(dev, io, 0);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = netcard_portlist; *port; port++) {
+ if (cs89x0_probe1(dev, *port, 0) == 0)
+ break;
+ dev->irq = irq;
+ }
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ outw(PP_ChipID, dev->base_addr + ADD_PORT);
+ release_region(dev->base_addr, NETCARD_IO_EXTENT);
+out:
+ free_netdev(dev);
+ printk(KERN_WARNING "cs89x0: no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
+ return ERR_PTR(err);
+}
+#endif
+
+static int
+readreg(struct net_device *dev, int portno)
+{
+ outw(portno, dev->base_addr + ADD_PORT);
+ return inw(dev->base_addr + DATA_PORT);
+}
+
+static void
+writereg(struct net_device *dev, int portno, int value)
+{
+ outw(portno, dev->base_addr + ADD_PORT);
+ outw(value, dev->base_addr + DATA_PORT);
+}
+
+static int
+readword(struct net_device *dev, int portno)
+{
+ return inw(dev->base_addr + portno);
+}
+
+static void
+writeword(struct net_device *dev, int portno, int value)
+{
+ outw(value, dev->base_addr + portno);
+}
+
+static int __init
+wait_eeprom_ready(struct net_device *dev)
+{
+ int timeout = jiffies;
+ /* check to see if the EEPROM is ready, a timeout is used -
+ just in case EEPROM is ready when SI_BUSY in the
+ PP_SelfST is clear */
+ while(readreg(dev, PP_SelfST) & SI_BUSY)
+ if (jiffies - timeout >= 40)
+ return -1;
+ return 0;
+}
+
+static int __init
+get_eeprom_data(struct net_device *dev, int off, int len, int *buffer)
+{
+ int i;
+
+ if (net_debug > 3) printk("EEPROM data from %x for %x:\n",off,len);
+ for (i = 0; i < len; i++) {
+ if (wait_eeprom_ready(dev) < 0) return -1;
+ /* Now send the EEPROM read command and EEPROM location to read */
+ writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
+ if (wait_eeprom_ready(dev) < 0) return -1;
+ buffer[i] = readreg(dev, PP_EEData);
+ if (net_debug > 3) printk("%04x ", buffer[i]);
+ }
+ if (net_debug > 3) printk("\n");
+ return 0;
+}
+
+static int __init
+get_eeprom_cksum(int off, int len, int *buffer)
+{
+ int i, cksum;
+
+ cksum = 0;
+ for (i = 0; i < len; i++)
+ cksum += buffer[i];
+ cksum &= 0xffff;
+ if (cksum == 0)
+ return 0;
+ return -1;
+}
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions.
+ Return 0 on success.
+ */
+
+static int __init
+cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
+{
+ struct net_local *lp = netdev_priv(dev);
+ static unsigned version_printed;
+ int i;
+ unsigned rev_type = 0;
+ int eeprom_buff[CHKSUM_LEN];
+ int retval;
+
+ SET_MODULE_OWNER(dev);
+ /* Initialize the device structure. */
+ if (!modular) {
+ memset(lp, 0, sizeof(*lp));
+ spin_lock_init(&lp->lock);
+#ifndef MODULE
+#if ALLOW_DMA
+ if (g_cs89x0_dma) {
+ lp->use_dma = 1;
+ lp->dma = g_cs89x0_dma;
+ lp->dmasize = 16; /* Could make this an option... */
+ }
+#endif
+ lp->force = g_cs89x0_media__force;
+#endif
+ }
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ /* WTF is going on here? */
+ if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) {
+ printk(KERN_ERR "%s: request_region(0x%x, 0x%x) failed\n",
+ DRV_NAME, ioaddr, NETCARD_IO_EXTENT);
+ retval = -EBUSY;
+ goto out1;
+ }
+
+#ifdef CONFIG_SH_HICOSH4
+ /* truely reset the chip */
+ outw(0x0114, ioaddr + ADD_PORT);
+ outw(0x0040, ioaddr + DATA_PORT);
+#endif
+
+ /* if they give us an odd I/O address, then do ONE write to
+ the address port, to get it back to address zero, where we
+ expect to find the EISA signature word. An IO with a base of 0x3
+ will skip the test for the ADD_PORT. */
+ if (ioaddr & 1) {
+ if (net_debug > 1)
+ printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr);
+ if ((ioaddr & 2) != 2)
+ if ((inw((ioaddr & ~3)+ ADD_PORT) & ADD_MASK) != ADD_SIG) {
+ printk(KERN_ERR "%s: bad signature 0x%x\n",
+ dev->name, inw((ioaddr & ~3)+ ADD_PORT));
+ retval = -ENODEV;
+ goto out2;
+ }
+ }
+printk("PP_addr=0x%x\n", inw(ioaddr + ADD_PORT));
+
+ ioaddr &= ~3;
+ outw(PP_ChipID, ioaddr + ADD_PORT);
+
+ if (inw(ioaddr + DATA_PORT) != CHIP_EISA_ID_SIG) {
+ printk(KERN_ERR "%s: incorrect signature 0x%x\n",
+ dev->name, inw(ioaddr + DATA_PORT));
+ retval = -ENODEV;
+ goto out2;
+ }
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ /* get the chip type */
+ rev_type = readreg(dev, PRODUCT_ID_ADD);
+ lp->chip_type = rev_type &~ REVISON_BITS;
+ lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
+
+ /* Check the chip type and revision in order to set the correct send command
+ CS8920 revision C and CS8900 revision F can use the faster send. */
+ lp->send_cmd = TX_AFTER_381;
+ if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
+ lp->send_cmd = TX_NOW;
+ if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
+ lp->send_cmd = TX_NOW;
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#3lx ",
+ dev->name,
+ lp->chip_type==CS8900?'0':'2',
+ lp->chip_type==CS8920M?"M":"",
+ lp->chip_revision,
+ dev->base_addr);
+
+ reset_chip(dev);
+
+ /* Here we read the current configuration of the chip. If there
+ is no Extended EEPROM then the idea is to not disturb the chip
+ configuration, it should have been correctly setup by automatic
+ EEPROM read on reset. So, if the chip says it read the EEPROM
+ the driver will always do *something* instead of complain that
+ adapter_cnf is 0. */
+
+#ifdef CONFIG_SH_HICOSH4
+ if (1) {
+ /* For the HiCO.SH4 board, things are different: we don't
+ have EEPROM, but there is some data in flash, so we go
+ get it there directly (MAC). */
+ __u16 *confd;
+ short cnt;
+ if (((* (volatile __u32 *) 0xa0013ff0) & 0x00ffffff)
+ == 0x006c3000) {
+ confd = (__u16*) 0xa0013fc0;
+ } else {
+ confd = (__u16*) 0xa001ffc0;
+ }
+ cnt = (*confd++ & 0x00ff) >> 1;
+ while (--cnt > 0) {
+ __u16 j = *confd++;
+
+ switch (j & 0x0fff) {
+ case PP_IA:
+ for (i = 0; i < ETH_ALEN/2; i++) {
+ dev->dev_addr[i*2] = confd[i] & 0xFF;
+ dev->dev_addr[i*2+1] = confd[i] >> 8;
+ }
+ break;
+ }
+ j = (j >> 12) + 1;
+ confd += j;
+ cnt -= j;
+ }
+ } else
+#endif
+
+ if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
+ (EEPROM_OK|EEPROM_PRESENT)) {
+ /* Load the MAC. */
+ for (i=0; i < ETH_ALEN/2; i++) {
+ unsigned int Addr;
+ Addr = readreg(dev, PP_IA+i*2);
+ dev->dev_addr[i*2] = Addr & 0xFF;
+ dev->dev_addr[i*2+1] = Addr >> 8;
+ }
+
+ /* Load the Adapter Configuration.
+ Note: Barring any more specific information from some
+ other source (ie EEPROM+Schematics), we would not know
+ how to operate a 10Base2 interface on the AUI port.
+ However, since we do read the status of HCB1 and use
+ settings that always result in calls to control_dc_dc(dev,0)
+ a BNC interface should work if the enable pin
+ (dc/dc converter) is on HCB1. It will be called AUI
+ however. */
+
+ lp->adapter_cnf = 0;
+ i = readreg(dev, PP_LineCTL);
+ /* Preserve the setting of the HCB1 pin. */
+ if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
+ lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
+ /* Save the sqelch bit */
+ if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
+ lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
+ /* Check if the card is in 10Base-t only mode */
+ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
+ lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
+ /* Check if the card is in AUI only mode */
+ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
+ lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
+ /* Check if the card is in Auto mode. */
+ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
+ lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
+ A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
+
+ if (net_debug > 1)
+ printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
+ dev->name, i, lp->adapter_cnf);
+
+ /* IRQ. Other chips already probe, see below. */
+ if (lp->chip_type == CS8900)
+ lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
+
+ printk( "[Cirrus EEPROM] ");
+ }
+
+ printk("\n");
+
+ /* First check to see if an EEPROM is attached. */
+#ifdef CONFIG_SH_HICOSH4 /* no EEPROM on HiCO, don't hazzle with it here */
+ if (1) {
+ printk(KERN_NOTICE "cs89x0: No EEPROM on HiCO.SH4\n");
+ } else
+#endif
+ if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
+ printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
+ else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
+ printk(KERN_WARNING "\ncs89x0: EEPROM read failed, relying on command line.\n");
+ } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
+ /* Check if the chip was able to read its own configuration starting
+ at 0 in the EEPROM*/
+ if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
+ (EEPROM_OK|EEPROM_PRESENT))
+ printk(KERN_WARNING "cs89x0: Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
+
+ } else {
+ /* This reads an extended EEPROM that is not documented
+ in the CS8900 datasheet. */
+
+ /* get transmission control word but keep the autonegotiation bits */
+ if (!lp->auto_neg_cnf) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET/2];
+ /* Store adapter configuration */
+ if (!lp->adapter_cnf) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET/2];
+ /* Store ISA configuration */
+ lp->isa_config = eeprom_buff[ISA_CNF_OFFSET/2];
+ dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET/2] << 8;
+
+ /* eeprom_buff has 32-bit ints, so we can't just memcpy it */
+ /* store the initial memory base address */
+ for (i = 0; i < ETH_ALEN/2; i++) {
+ dev->dev_addr[i*2] = eeprom_buff[i];
+ dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8;
+ }
+ if (net_debug > 1)
+ printk(KERN_DEBUG "%s: new adapter_cnf: 0x%x\n",
+ dev->name, lp->adapter_cnf);
+ }
+
+ /* allow them to force multiple transceivers. If they force multiple, autosense */
+ {
+ int count = 0;
+ if (lp->force & FORCE_RJ45) {lp->adapter_cnf |= A_CNF_10B_T; count++; }
+ if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_AUI; count++; }
+ if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_10B_2; count++; }
+ if (count > 1) {lp->adapter_cnf |= A_CNF_MEDIA_AUTO; }
+ else if (lp->force & FORCE_RJ45){lp->adapter_cnf |= A_CNF_MEDIA_10B_T; }
+ else if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_MEDIA_AUI; }
+ else if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_MEDIA_10B_2; }
+ }
+
+ if (net_debug > 1)
+ printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n",
+ dev->name, lp->force, lp->adapter_cnf);
+
+ /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
+
+ /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
+
+ /* FIXME: we don't set the Ethernet address on the command line. Use
+ ifconfig IFACE hw ether AABBCCDDEEFF */
+
+ printk(KERN_INFO "cs89x0 media %s%s%s",
+ (lp->adapter_cnf & A_CNF_10B_T)?"RJ-45,":"",
+ (lp->adapter_cnf & A_CNF_AUI)?"AUI,":"",
+ (lp->adapter_cnf & A_CNF_10B_2)?"BNC,":"");
+
+ lp->irq_map = 0xffff;
+
+ /* If this is a CS8900 then no pnp soft */
+ if (lp->chip_type != CS8900 &&
+ /* Check if the ISA IRQ has been set */
+ (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
+ (i != 0 && i < CS8920_NO_INTS))) {
+ if (!dev->irq)
+ dev->irq = i;
+ } else {
+ i = lp->isa_config & INT_NO_MASK;
+ if (lp->chip_type == CS8900) {
+#ifdef CONFIG_ARCH_IXDP2X01
+ i = cs8900_irq_map[0];
+#else
+ /* Translate the IRQ using the IRQ mapping table. */
+ if (i >= sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0]))
+ printk("\ncs89x0: invalid ISA interrupt number %d\n", i);
+ else
+ i = cs8900_irq_map[i];
+
+ lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
+ } else {
+ int irq_map_buff[IRQ_MAP_LEN/2];
+
+ if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
+ IRQ_MAP_LEN/2,
+ irq_map_buff) >= 0) {
+ if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
+ lp->irq_map = (irq_map_buff[0]>>8) | (irq_map_buff[1] << 8);
+ }
+#endif
+ }
+ if (!dev->irq)
+ dev->irq = i;
+ }
+
+ printk(" IRQ %d", dev->irq);
+
+#if ALLOW_DMA
+ if (lp->use_dma) {
+ get_dma_channel(dev);
+ printk(", DMA %d", dev->dma);
+ }
+ else
+#endif
+ {
+ printk(", programmed I/O");
+ }
+
+ /* print the ethernet address. */
+ printk(", MAC");
+ for (i = 0; i < ETH_ALEN; i++)
+ {
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+ }
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->tx_timeout = net_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->set_mac_address = set_mac_address;
+
+ printk("\n");
+ if (net_debug)
+ printk("cs89x0_probe1() successful\n");
+ return 0;
+out2:
+ release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
+out1:
+ return retval;
+}
+
+
+/*********************************
+ * This page contains DMA routines
+**********************************/
+
+#if ALLOW_DMA
+
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+
+static void
+get_dma_channel(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (lp->dma) {
+ dev->dma = lp->dma;
+ lp->isa_config |= ISA_RxDMA;
+ } else {
+ if ((lp->isa_config & ANY_ISA_DMA) == 0)
+ return;
+ dev->dma = lp->isa_config & DMA_NO_MASK;
+ if (lp->chip_type == CS8900)
+ dev->dma += 5;
+ if (dev->dma < 5 || dev->dma > 7) {
+ lp->isa_config &= ~ANY_ISA_DMA;
+ return;
+ }
+ }
+ return;
+}
+
+static void
+write_dma(struct net_device *dev, int chip_type, int dma)
+{
+ struct net_local *lp = netdev_priv(dev);
+ if ((lp->isa_config & ANY_ISA_DMA) == 0)
+ return;
+ if (chip_type == CS8900) {
+ writereg(dev, PP_CS8900_ISADMA, dma-5);
+ } else {
+ writereg(dev, PP_CS8920_ISADMA, dma);
+ }
+}
+
+static void
+set_dma_cfg(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (lp->use_dma) {
+ if ((lp->isa_config & ANY_ISA_DMA) == 0) {
+ if (net_debug > 3)
+ printk("set_dma_cfg(): no DMA\n");
+ return;
+ }
+ if (lp->isa_config & ISA_RxDMA) {
+ lp->curr_rx_cfg |= RX_DMA_ONLY;
+ if (net_debug > 3)
+ printk("set_dma_cfg(): RX_DMA_ONLY\n");
+ } else {
+ lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */
+ if (net_debug > 3)
+ printk("set_dma_cfg(): AUTO_RX_DMA\n");
+ }
+ }
+}
+
+static int
+dma_bufcfg(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ if (lp->use_dma)
+ return (lp->isa_config & ANY_ISA_DMA)? RX_DMA_ENBL : 0;
+ else
+ return 0;
+}
+
+static int
+dma_busctl(struct net_device *dev)
+{
+ int retval = 0;
+ struct net_local *lp = netdev_priv(dev);
+ if (lp->use_dma) {
+ if (lp->isa_config & ANY_ISA_DMA)
+ retval |= RESET_RX_DMA; /* Reset the DMA pointer */
+ if (lp->isa_config & DMA_BURST)
+ retval |= DMA_BURST_MODE; /* Does ISA config specify DMA burst ? */
+ if (lp->dmasize == 64)
+ retval |= RX_DMA_SIZE_64K; /* did they ask for 64K? */
+ retval |= MEMORY_ON; /* we need memory enabled to use DMA. */
+ }
+ return retval;
+}
+
+static void
+dma_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int status, length;
+ unsigned char *bp = lp->rx_dma_ptr;
+
+ status = bp[0] + (bp[1]<<8);
+ length = bp[2] + (bp[3]<<8);
+ bp += 4;
+ if (net_debug > 5) {
+ printk( "%s: receiving DMA packet at %lx, status %x, length %x\n",
+ dev->name, (unsigned long)bp, status, length);
+ }
+ if ((status & RX_OK) == 0) {
+ count_rx_errors(status, lp);
+ goto skip_this_frame;
+ }
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(length + 2);
+ if (skb == NULL) {
+ if (net_debug) /* I don't think we want to do this to a stressed system */
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+
+ /* AKPM: advance bp to the next frame */
+skip_this_frame:
+ bp += (length + 3) & ~3;
+ if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
+ lp->rx_dma_ptr = bp;
+ return;
+ }
+ skb_reserve(skb, 2); /* longword align L3 header */
+ skb->dev = dev;
+
+ if (bp + length > lp->end_dma_buff) {
+ int semi_cnt = lp->end_dma_buff - bp;
+ memcpy(skb_put(skb,semi_cnt), bp, semi_cnt);
+ memcpy(skb_put(skb,length - semi_cnt), lp->dma_buff,
+ length - semi_cnt);
+ } else {
+ memcpy(skb_put(skb,length), bp, length);
+ }
+ bp += (length + 3) & ~3;
+ if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
+ lp->rx_dma_ptr = bp;
+
+ if (net_debug > 3) {
+ printk( "%s: received %d byte DMA packet of type %x\n",
+ dev->name, length,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += length;
+}
+
+#endif /* ALLOW_DMA */
+
+void __init reset_chip(struct net_device *dev)
+{
+#ifndef CONFIG_ARCH_IXDP2X01
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+#endif
+ int reset_start_time;
+
+ writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
+
+ /* wait 30 ms */
+ msleep(30);
+
+#ifndef CONFIG_ARCH_IXDP2X01
+ if (lp->chip_type != CS8900) {
+ /* Hardware problem requires PNP registers to be reconfigured after a reset */
+ outw(PP_CS8920_ISAINT, ioaddr + ADD_PORT);
+ outb(dev->irq, ioaddr + DATA_PORT);
+ outb(0, ioaddr + DATA_PORT + 1);
+
+ outw(PP_CS8920_ISAMemB, ioaddr + ADD_PORT);
+ outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
+ outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
+ }
+#endif /* IXDP2x01 */
+
+ /* Wait until the chip is reset */
+ reset_start_time = jiffies;
+ while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
+ ;
+}
+
+
+static void
+control_dc_dc(struct net_device *dev, int on_not_off)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned int selfcontrol;
+ int timenow = jiffies;
+ /* control the DC to DC convertor in the SelfControl register.
+ Note: This is hooked up to a general purpose pin, might not
+ always be a DC to DC convertor. */
+
+ selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */
+ if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
+ selfcontrol |= HCB1;
+ else
+ selfcontrol &= ~HCB1;
+ writereg(dev, PP_SelfCTL, selfcontrol);
+
+ /* Wait for the DC/DC converter to power up - 500ms */
+ while (jiffies - timenow < HZ)
+ ;
+}
+
+#define DETECTED_NONE 0
+#define DETECTED_RJ45H 1
+#define DETECTED_RJ45F 2
+#define DETECTED_AUI 3
+#define DETECTED_BNC 4
+
+static int
+detect_tp(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int timenow = jiffies;
+ int fdx;
+
+ if (net_debug > 1) printk("%s: Attempting TP\n", dev->name);
+
+ /* If connected to another full duplex capable 10-Base-T card the link pulses
+ seem to be lost when the auto detect bit in the LineCTL is set.
+ To overcome this the auto detect bit will be cleared whilst testing the
+ 10-Base-T interface. This would not be necessary for the sparrow chip but
+ is simpler to do it anyway. */
+ writereg(dev, PP_LineCTL, lp->linectl &~ AUI_ONLY);
+ control_dc_dc(dev, 0);
+
+ /* Delay for the hardware to work out if the TP cable is present - 150ms */
+ for (timenow = jiffies; jiffies - timenow < 15; )
+ ;
+ if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
+ return DETECTED_NONE;
+
+ if (lp->chip_type == CS8900) {
+ switch (lp->force & 0xf0) {
+#if 0
+ case FORCE_AUTO:
+ printk("%s: cs8900 doesn't autonegotiate\n",dev->name);
+ return DETECTED_NONE;
+#endif
+ /* CS8900 doesn't support AUTO, change to HALF*/
+ case FORCE_AUTO:
+ lp->force &= ~FORCE_AUTO;
+ lp->force |= FORCE_HALF;
+ break;
+ case FORCE_HALF:
+ break;
+ case FORCE_FULL:
+ writereg(dev, PP_TestCTL, readreg(dev, PP_TestCTL) | FDX_8900);
+ break;
+ }
+ fdx = readreg(dev, PP_TestCTL) & FDX_8900;
+ } else {
+ switch (lp->force & 0xf0) {
+ case FORCE_AUTO:
+ lp->auto_neg_cnf = AUTO_NEG_ENABLE;
+ break;
+ case FORCE_HALF:
+ lp->auto_neg_cnf = 0;
+ break;
+ case FORCE_FULL:
+ lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
+ break;
+ }
+
+ writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
+
+ if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
+ printk(KERN_INFO "%s: negotiating duplex...\n",dev->name);
+ while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
+ if (jiffies - timenow > 4000) {
+ printk(KERN_ERR "**** Full / half duplex auto-negotiation timed out ****\n");
+ break;
+ }
+ }
+ }
+ fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE;
+ }
+ if (fdx)
+ return DETECTED_RJ45F;
+ else
+ return DETECTED_RJ45H;
+}
+
+/* send a test packet - return true if carrier bits are ok */
+static int
+send_test_pkt(struct net_device *dev)
+{
+ char test_packet[] = { 0,0,0,0,0,0, 0,0,0,0,0,0,
+ 0, 46, /* A 46 in network order */
+ 0, 0, /* DSAP=0 & SSAP=0 fields */
+ 0xf3, 0 /* Control (Test Req + P bit set) */ };
+ long timenow = jiffies;
+
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
+
+ memcpy(test_packet, dev->dev_addr, ETH_ALEN);
+ memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN);
+
+ writeword(dev, TX_CMD_PORT, TX_AFTER_ALL);
+ writeword(dev, TX_LEN_PORT, ETH_ZLEN);
+
+ /* Test to see if the chip has allocated memory for the packet */
+ while (jiffies - timenow < 5)
+ if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
+ break;
+ if (jiffies - timenow >= 5)
+ return 0; /* this shouldn't happen */
+
+ /* Write the contents of the packet */
+ outsw(dev->base_addr + TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
+
+ if (net_debug > 1) printk("Sending test packet ");
+ /* wait a couple of jiffies for packet to be received */
+ for (timenow = jiffies; jiffies - timenow < 3; )
+ ;
+ if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
+ if (net_debug > 1) printk("succeeded\n");
+ return 1;
+ }
+ if (net_debug > 1) printk("failed\n");
+ return 0;
+}
+
+
+static int
+detect_aui(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (net_debug > 1) printk("%s: Attempting AUI\n", dev->name);
+ control_dc_dc(dev, 0);
+
+ writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
+
+ if (send_test_pkt(dev))
+ return DETECTED_AUI;
+ else
+ return DETECTED_NONE;
+}
+
+static int
+detect_bnc(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (net_debug > 1) printk("%s: Attempting BNC\n", dev->name);
+ control_dc_dc(dev, 1);
+
+ writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
+
+ if (send_test_pkt(dev))
+ return DETECTED_BNC;
+ else
+ return DETECTED_NONE;
+}
+
+
+static void
+write_irq(struct net_device *dev, int chip_type, int irq)
+{
+ int i;
+
+ if (chip_type == CS8900) {
+ /* Search the mapping table for the corresponding IRQ pin. */
+ for (i = 0; i != sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0]); i++)
+ if (cs8900_irq_map[i] == irq)
+ break;
+ /* Not found */
+ if (i == sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0]))
+ i = 3;
+ writereg(dev, PP_CS8900_ISAINT, i);
+ } else {
+ writereg(dev, PP_CS8920_ISAINT, irq);
+ }
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+
+/* AKPM: do we need to do any locking here? */
+
+static int
+net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int result = 0;
+ int i;
+ int ret;
+
+#ifndef CONFIG_SH_HICOSH4 /* uses irq#1, so this won't work */
+ if (dev->irq < 2) {
+ /* Allow interrupts to be generated by the chip */
+/* Cirrus' release had this: */
+#if 0
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
+#endif
+/* And 2.3.47 had this: */
+ writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
+
+ for (i = 2; i < CS8920_NO_INTS; i++) {
+ if ((1 << i) & lp->irq_map) {
+ if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) {
+ dev->irq = i;
+ write_irq(dev, lp->chip_type, i);
+ /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
+ break;
+ }
+ }
+ }
+
+ if (i >= CS8920_NO_INTS) {
+ writereg(dev, PP_BusCTL, 0); /* disable interrupts. */
+ printk(KERN_ERR "cs89x0: can't get an interrupt\n");
+ ret = -EAGAIN;
+ goto bad_out;
+ }
+ }
+ else
+#endif
+ {
+#ifndef CONFIG_ARCH_IXDP2X01
+ if (((1 << dev->irq) & lp->irq_map) == 0) {
+ printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
+ dev->name, dev->irq, lp->irq_map);
+ ret = -EAGAIN;
+ goto bad_out;
+ }
+#endif
+/* FIXME: Cirrus' release had this: */
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
+/* And 2.3.47 had this: */
+#if 0
+ writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
+#endif
+ write_irq(dev, lp->chip_type, dev->irq);
+ ret = request_irq(dev->irq, &net_interrupt, 0, dev->name, dev);
+ if (ret) {
+ if (net_debug)
+ printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
+ goto bad_out;
+ }
+ }
+
+#if ALLOW_DMA
+ if (lp->use_dma) {
+ if (lp->isa_config & ANY_ISA_DMA) {
+ unsigned long flags;
+ lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
+ get_order(lp->dmasize * 1024));
+
+ if (!lp->dma_buff) {
+ printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize);
+ goto release_irq;
+ }
+ if (net_debug > 1) {
+ printk( "%s: dma %lx %lx\n",
+ dev->name,
+ (unsigned long)lp->dma_buff,
+ (unsigned long)isa_virt_to_bus(lp->dma_buff));
+ }
+ if ((unsigned long) lp->dma_buff >= MAX_DMA_ADDRESS ||
+ !dma_page_eq(lp->dma_buff, lp->dma_buff+lp->dmasize*1024-1)) {
+ printk(KERN_ERR "%s: not usable as DMA buffer\n", dev->name);
+ goto release_irq;
+ }
+ memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */
+ if (request_dma(dev->dma, dev->name)) {
+ printk(KERN_ERR "%s: cannot get dma channel %d\n", dev->name, dev->dma);
+ goto release_irq;
+ }
+ write_dma(dev, lp->chip_type, dev->dma);
+ lp->rx_dma_ptr = lp->dma_buff;
+ lp->end_dma_buff = lp->dma_buff + lp->dmasize*1024;
+ spin_lock_irqsave(&lp->lock, flags);
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x14); /* auto_init as well */
+ set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
+ set_dma_count(dev->dma, lp->dmasize*1024);
+ enable_dma(dev->dma);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ }
+#endif /* ALLOW_DMA */
+
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ /* while we're testing the interface, leave interrupts disabled */
+ writereg(dev, PP_BusCTL, MEMORY_ON);
+
+ /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */
+ if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
+ lp->linectl = LOW_RX_SQUELCH;
+ else
+ lp->linectl = 0;
+
+ /* check to make sure that they have the "right" hardware available */
+ switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
+ case A_CNF_MEDIA_10B_T: result = lp->adapter_cnf & A_CNF_10B_T; break;
+ case A_CNF_MEDIA_AUI: result = lp->adapter_cnf & A_CNF_AUI; break;
+ case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break;
+ default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2);
+ }
+ if (!result) {
+ printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name);
+ release_irq:
+#if ALLOW_DMA
+ release_dma_buff(lp);
+#endif
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
+ free_irq(dev->irq, dev);
+ ret = -EAGAIN;
+ goto bad_out;
+ }
+
+ /* set the hardware to the configured choice */
+ switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
+ case A_CNF_MEDIA_10B_T:
+ result = detect_tp(dev);
+ if (result==DETECTED_NONE) {
+ printk(KERN_WARNING "%s: 10Base-T (RJ-45) has no cable\n", dev->name);
+ if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+ result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */
+ }
+ break;
+ case A_CNF_MEDIA_AUI:
+ result = detect_aui(dev);
+ if (result==DETECTED_NONE) {
+ printk(KERN_WARNING "%s: 10Base-5 (AUI) has no cable\n", dev->name);
+ if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+ result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
+ }
+ break;
+ case A_CNF_MEDIA_10B_2:
+ result = detect_bnc(dev);
+ if (result==DETECTED_NONE) {
+ printk(KERN_WARNING "%s: 10Base-2 (BNC) has no cable\n", dev->name);
+ if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+ result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */
+ }
+ break;
+ case A_CNF_MEDIA_AUTO:
+ writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
+ if (lp->adapter_cnf & A_CNF_10B_T)
+ if ((result = detect_tp(dev)) != DETECTED_NONE)
+ break;
+ if (lp->adapter_cnf & A_CNF_AUI)
+ if ((result = detect_aui(dev)) != DETECTED_NONE)
+ break;
+ if (lp->adapter_cnf & A_CNF_10B_2)
+ if ((result = detect_bnc(dev)) != DETECTED_NONE)
+ break;
+ printk(KERN_ERR "%s: no media detected\n", dev->name);
+ goto release_irq;
+ }
+ switch(result) {
+ case DETECTED_NONE:
+ printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name);
+ goto release_irq;
+ case DETECTED_RJ45H:
+ printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
+ break;
+ case DETECTED_RJ45F:
+ printk(KERN_INFO "%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
+ break;
+ case DETECTED_AUI:
+ printk(KERN_INFO "%s: using 10Base-5 (AUI)\n", dev->name);
+ break;
+ case DETECTED_BNC:
+ printk(KERN_INFO "%s: using 10Base-2 (BNC)\n", dev->name);
+ break;
+ }
+
+ /* Turn on both receive and transmit operations */
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
+
+ /* Receive only error free packets addressed to this card */
+ lp->rx_mode = 0;
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
+
+ lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
+
+ if (lp->isa_config & STREAM_TRANSFER)
+ lp->curr_rx_cfg |= RX_STREAM_ENBL;
+#if ALLOW_DMA
+ set_dma_cfg(dev);
+#endif
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
+
+ writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL |
+ TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL);
+
+ writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL |
+#if ALLOW_DMA
+ dma_bufcfg(dev) |
+#endif
+ TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL);
+
+ /* now that we've got our act together, enable everything */
+ writereg(dev, PP_BusCTL, ENABLE_IRQ
+ | (dev->mem_start?MEMORY_ON : 0) /* turn memory on */
+#if ALLOW_DMA
+ | dma_busctl(dev)
+#endif
+ );
+ netif_start_queue(dev);
+ if (net_debug > 1)
+ printk("cs89x0: net_open() succeeded\n");
+ return 0;
+bad_out:
+ return ret;
+}
+
+static void net_timeout(struct net_device *dev)
+{
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict ?" : "network cable problem");
+ /* Try to restart the adaptor. */
+ netif_wake_queue(dev);
+}
+
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (net_debug > 3) {
+ printk("%s: sent %d byte packet of type %x\n",
+ dev->name, skb->len,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ }
+
+ /* keep the upload from being interrupted, since we
+ ask the chip to start transmitting before the
+ whole packet has been completely uploaded. */
+
+ spin_lock_irq(&lp->lock);
+ netif_stop_queue(dev);
+
+ /* initiate a transmit sequence */
+ writeword(dev, TX_CMD_PORT, lp->send_cmd);
+ writeword(dev, TX_LEN_PORT, skb->len);
+
+ /* Test to see if the chip has allocated memory for the packet */
+ if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
+ /*
+ * Gasp! It hasn't. But that shouldn't happen since
+ * we're waiting for TxOk, so return 1 and requeue this packet.
+ */
+
+ spin_unlock_irq(&lp->lock);
+ if (net_debug) printk("cs89x0: Tx buffer not free!\n");
+ return 1;
+ }
+ /* Write the contents of the packet */
+ outsw(dev->base_addr + TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
+ spin_unlock_irq(&lp->lock);
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb);
+
+ /*
+ * We DO NOT call netif_wake_queue() here.
+ * We also DO NOT call netif_start_queue().
+ *
+ * Either of these would cause another bottom half run through
+ * net_send_packet() before this packet has fully gone out. That causes
+ * us to hit the "Gasp!" above and the send is rescheduled. it runs like
+ * a dog. We just return and wait for the Tx completion interrupt handler
+ * to restart the netdevice layer
+ */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* we MUST read all the events out of the ISQ, otherwise we'll never
+ get interrupted again. As a consequence, we can't have any limit
+ on the number of times we loop in the interrupt handler. The
+ hardware guarantees that eventually we'll run out of events. Of
+ course, if you're on a slow machine, and packets are arriving
+ faster than you can read them off, you're screwed. Hasta la
+ vista, baby! */
+ while ((status = readword(dev, ISQ_PORT))) {
+ if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
+ handled = 1;
+ switch(status & ISQ_EVENT_MASK) {
+ case ISQ_RECEIVER_EVENT:
+ /* Got a packet(s). */
+ net_rx(dev);
+ break;
+ case ISQ_TRANSMITTER_EVENT:
+ lp->stats.tx_packets++;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ if ((status & ( TX_OK |
+ TX_LOST_CRS |
+ TX_SQE_ERROR |
+ TX_LATE_COL |
+ TX_16_COL)) != TX_OK) {
+ if ((status & TX_OK) == 0) lp->stats.tx_errors++;
+ if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++;
+ if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++;
+ if (status & TX_LATE_COL) lp->stats.tx_window_errors++;
+ if (status & TX_16_COL) lp->stats.tx_aborted_errors++;
+ }
+ break;
+ case ISQ_BUFFER_EVENT:
+ if (status & READY_FOR_TX) {
+ /* we tried to transmit a packet earlier,
+ but inexplicably ran out of buffers.
+ That shouldn't happen since we only ever
+ load one packet. Shrug. Do the right
+ thing anyway. */
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+ if (status & TX_UNDERRUN) {
+ if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
+ lp->send_underrun++;
+ if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
+ else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
+ /* transmit cycle is done, although
+ frame wasn't transmitted - this
+ avoids having to wait for the upper
+ layers to timeout on us, in the
+ event of a tx underrun */
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+#if ALLOW_DMA
+ if (lp->use_dma && (status & RX_DMA)) {
+ int count = readreg(dev, PP_DmaFrameCnt);
+ while(count) {
+ if (net_debug > 5)
+ printk("%s: receiving %d DMA frames\n", dev->name, count);
+ if (net_debug > 2 && count >1)
+ printk("%s: receiving %d DMA frames\n", dev->name, count);
+ dma_rx(dev);
+ if (--count == 0)
+ count = readreg(dev, PP_DmaFrameCnt);
+ if (net_debug > 2 && count > 0)
+ printk("%s: continuing with %d DMA frames\n", dev->name, count);
+ }
+ }
+#endif
+ break;
+ case ISQ_RX_MISS_EVENT:
+ lp->stats.rx_missed_errors += (status >>6);
+ break;
+ case ISQ_TX_COL_EVENT:
+ lp->stats.collisions += (status >>6);
+ break;
+ }
+ }
+ return IRQ_RETVAL(handled);
+}
+
+static void
+count_rx_errors(int status, struct net_local *lp)
+{
+ lp->stats.rx_errors++;
+ if (status & RX_RUNT) lp->stats.rx_length_errors++;
+ if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++;
+ if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT)))
+ /* per str 172 */
+ lp->stats.rx_crc_errors++;
+ if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int status, length;
+
+ int ioaddr = dev->base_addr;
+ status = inw(ioaddr + RX_FRAME_PORT);
+ length = inw(ioaddr + RX_FRAME_PORT);
+
+ if ((status & RX_OK) == 0) {
+ count_rx_errors(status, lp);
+ return;
+ }
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(length + 2);
+ if (skb == NULL) {
+#if 0 /* Again, this seems a cruel thing to do */
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+#endif
+ lp->stats.rx_dropped++;
+ return;
+ }
+ skb_reserve(skb, 2); /* longword align L3 header */
+ skb->dev = dev;
+
+ insw(ioaddr + RX_FRAME_PORT, skb_put(skb, length), length >> 1);
+ if (length & 1)
+ skb->data[length-1] = inw(ioaddr + RX_FRAME_PORT);
+
+ if (net_debug > 3) {
+ printk( "%s: received %d byte packet of type %x\n",
+ dev->name, length,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += length;
+}
+
+#if ALLOW_DMA
+static void release_dma_buff(struct net_local *lp)
+{
+ if (lp->dma_buff) {
+ free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
+ lp->dma_buff = NULL;
+ }
+}
+#endif
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct net_device *dev)
+{
+#if ALLOW_DMA
+ struct net_local *lp = netdev_priv(dev);
+#endif
+
+ netif_stop_queue(dev);
+
+ writereg(dev, PP_RxCFG, 0);
+ writereg(dev, PP_TxCFG, 0);
+ writereg(dev, PP_BufCFG, 0);
+ writereg(dev, PP_BusCTL, 0);
+
+ free_irq(dev->irq, dev);
+
+#if ALLOW_DMA
+ if (lp->use_dma && lp->dma) {
+ free_dma(dev->dma);
+ release_dma_buff(lp);
+ }
+#endif
+
+ /* Update the statistics here. */
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *
+net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Update the statistics from the device registers. */
+ lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
+ lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &lp->stats;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if(dev->flags&IFF_PROMISC)
+ {
+ lp->rx_mode = RX_ALL_ACCEPT;
+ }
+ else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ {
+ /* The multicast-accept list is initialized to accept-all, and we
+ rely on higher-level filtering for now. */
+ lp->rx_mode = RX_MULTCAST_ACCEPT;
+ }
+ else
+ lp->rx_mode = 0;
+
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
+
+ /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg |
+ (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0));
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+static int set_mac_address(struct net_device *dev, void *p)
+{
+ int i;
+ struct sockaddr *addr = p;
+
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ if (net_debug) {
+ printk("%s: Setting MAC address to ", dev->name);
+ for (i = 0; i < dev->addr_len; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+ printk(".\n");
+ }
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ return 0;
+}
+
+#ifdef MODULE
+
+static struct net_device *dev_cs89x0;
+
+/*
+ * Support the 'debug' module parm even if we're compiled for non-debug to
+ * avoid breaking someone's startup scripts
+ */
+
+static int io;
+static int irq;
+static int debug;
+static char media[8];
+static int duplex=-1;
+
+static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */
+static int dma;
+static int dmasize=16; /* or 64 */
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(debug, int, 0);
+module_param_string(media, media, sizeof(media), 0);
+module_param(duplex, int, 0);
+module_param(dma , int, 0);
+module_param(dmasize , int, 0);
+module_param(use_dma , int, 0);
+MODULE_PARM_DESC(io, "cs89x0 I/O base address");
+MODULE_PARM_DESC(irq, "cs89x0 IRQ number");
+#if DEBUGGING
+MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)");
+#else
+MODULE_PARM_DESC(debug, "(ignored)");
+#endif
+MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)");
+/* No other value than -1 for duplex seems to be currently interpreted */
+MODULE_PARM_DESC(duplex, "(ignored)");
+#if ALLOW_DMA
+MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0");
+MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0");
+MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)");
+#else
+MODULE_PARM_DESC(dma , "(ignored)");
+MODULE_PARM_DESC(dmasize , "(ignored)");
+MODULE_PARM_DESC(use_dma , "(ignored)");
+#endif
+
+MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton <andrewm@uow.edu.au>");
+MODULE_LICENSE("GPL");
+
+
+/*
+* media=t - specify media type
+ or media=2
+ or media=aui
+ or medai=auto
+* duplex=0 - specify forced half/full/autonegotiate duplex
+* debug=# - debug level
+
+
+* Default Chip Configuration:
+ * DMA Burst = enabled
+ * IOCHRDY Enabled = enabled
+ * UseSA = enabled
+ * CS8900 defaults to half-duplex if not specified on command-line
+ * CS8920 defaults to autoneg if not specified on command-line
+ * Use reset defaults for other config parameters
+
+* Assumptions:
+ * media type specified is supported (circuitry is present)
+ * if memory address is > 1MB, then required mem decode hw is present
+ * if 10B-2, then agent other than driver will enable DC/DC converter
+ (hw or software util)
+
+
+*/
+
+int
+init_module(void)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ struct net_local *lp;
+ int ret = 0;
+
+#if DEBUGGING
+ net_debug = debug;
+#else
+ debug = 0;
+#endif
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq;
+ dev->base_addr = io;
+ lp = netdev_priv(dev);
+
+#if ALLOW_DMA
+ if (use_dma) {
+ lp->use_dma = use_dma;
+ lp->dma = dma;
+ lp->dmasize = dmasize;
+ }
+#endif
+
+ spin_lock_init(&lp->lock);
+
+ /* boy, they'd better get these right */
+ if (!strcmp(media, "rj45"))
+ lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
+ else if (!strcmp(media, "aui"))
+ lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI;
+ else if (!strcmp(media, "bnc"))
+ lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2;
+ else
+ lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
+
+ if (duplex==-1)
+ lp->auto_neg_cnf = AUTO_NEG_ENABLE;
+
+ if (io == 0) {
+ printk(KERN_ERR "cs89x0.c: Module autoprobing not allowed.\n");
+ printk(KERN_ERR "cs89x0.c: Append io=0xNNN\n");
+ ret = -EPERM;
+ goto out;
+ } else if (io <= 0x1ff) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+#if ALLOW_DMA
+ if (use_dma && dmasize != 16 && dmasize != 64) {
+ printk(KERN_ERR "cs89x0.c: dma size must be either 16K or 64K, not %dK\n", dmasize);
+ ret = -EPERM;
+ goto out;
+ }
+#endif
+ ret = cs89x0_probe1(dev, io, 1);
+ if (ret)
+ goto out;
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "cs89x0.c: No card found at 0x%x\n", io);
+ ret = -ENXIO;
+ outw(PP_ChipID, dev->base_addr + ADD_PORT);
+ release_region(dev->base_addr, NETCARD_IO_EXTENT);
+ goto out;
+ }
+ dev_cs89x0 = dev;
+ return 0;
+out:
+ free_netdev(dev);
+ return ret;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(dev_cs89x0);
+ outw(PP_ChipID, dev_cs89x0->base_addr + ADD_PORT);
+ release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
+ free_netdev(dev_cs89x0);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 8
+ * tab-width: 8
+ * End:
+ *
+ */
diff --git a/drivers/net/cs89x0.h b/drivers/net/cs89x0.h
new file mode 100644
index 000000000000..b0ef7ad2baad
--- /dev/null
+++ b/drivers/net/cs89x0.h
@@ -0,0 +1,476 @@
+/* Copyright, 1988-1992, Russell Nelson, Crynwr Software
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, version 1.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_ARCH_IXDP2X01
+/* IXDP2401/IXDP2801 uses dword-aligned register addressing */
+#define CS89x0_PORT(reg) ((reg) * 2)
+#else
+#define CS89x0_PORT(reg) (reg)
+#endif
+
+#define PP_ChipID 0x0000 /* offset 0h -> Corp -ID */
+ /* offset 2h -> Model/Product Number */
+ /* offset 3h -> Chip Revision Number */
+
+#define PP_ISAIOB 0x0020 /* IO base address */
+#define PP_CS8900_ISAINT 0x0022 /* ISA interrupt select */
+#define PP_CS8920_ISAINT 0x0370 /* ISA interrupt select */
+#define PP_CS8900_ISADMA 0x0024 /* ISA Rec DMA channel */
+#define PP_CS8920_ISADMA 0x0374 /* ISA Rec DMA channel */
+#define PP_ISASOF 0x0026 /* ISA DMA offset */
+#define PP_DmaFrameCnt 0x0028 /* ISA DMA Frame count */
+#define PP_DmaByteCnt 0x002A /* ISA DMA Byte count */
+#define PP_CS8900_ISAMemB 0x002C /* Memory base */
+#define PP_CS8920_ISAMemB 0x0348 /* */
+
+#define PP_ISABootBase 0x0030 /* Boot Prom base */
+#define PP_ISABootMask 0x0034 /* Boot Prom Mask */
+
+/* EEPROM data and command registers */
+#define PP_EECMD 0x0040 /* NVR Interface Command register */
+#define PP_EEData 0x0042 /* NVR Interface Data Register */
+#define PP_DebugReg 0x0044 /* Debug Register */
+
+#define PP_RxCFG 0x0102 /* Rx Bus config */
+#define PP_RxCTL 0x0104 /* Receive Control Register */
+#define PP_TxCFG 0x0106 /* Transmit Config Register */
+#define PP_TxCMD 0x0108 /* Transmit Command Register */
+#define PP_BufCFG 0x010A /* Bus configuration Register */
+#define PP_LineCTL 0x0112 /* Line Config Register */
+#define PP_SelfCTL 0x0114 /* Self Command Register */
+#define PP_BusCTL 0x0116 /* ISA bus control Register */
+#define PP_TestCTL 0x0118 /* Test Register */
+#define PP_AutoNegCTL 0x011C /* Auto Negotiation Ctrl */
+
+#define PP_ISQ 0x0120 /* Interrupt Status */
+#define PP_RxEvent 0x0124 /* Rx Event Register */
+#define PP_TxEvent 0x0128 /* Tx Event Register */
+#define PP_BufEvent 0x012C /* Bus Event Register */
+#define PP_RxMiss 0x0130 /* Receive Miss Count */
+#define PP_TxCol 0x0132 /* Transmit Collision Count */
+#define PP_LineST 0x0134 /* Line State Register */
+#define PP_SelfST 0x0136 /* Self State register */
+#define PP_BusST 0x0138 /* Bus Status */
+#define PP_TDR 0x013C /* Time Domain Reflectometry */
+#define PP_AutoNegST 0x013E /* Auto Neg Status */
+#define PP_TxCommand 0x0144 /* Tx Command */
+#define PP_TxLength 0x0146 /* Tx Length */
+#define PP_LAF 0x0150 /* Hash Table */
+#define PP_IA 0x0158 /* Physical Address Register */
+
+#define PP_RxStatus 0x0400 /* Receive start of frame */
+#define PP_RxLength 0x0402 /* Receive Length of frame */
+#define PP_RxFrame 0x0404 /* Receive frame pointer */
+#define PP_TxFrame 0x0A00 /* Transmit frame pointer */
+
+/* Primary I/O Base Address. If no I/O base is supplied by the user, then this */
+/* can be used as the default I/O base to access the PacketPage Area. */
+#define DEFAULTIOBASE 0x0300
+#define FIRST_IO 0x020C /* First I/O port to check */
+#define LAST_IO 0x037C /* Last I/O port to check (+10h) */
+#define ADD_MASK 0x3000 /* Mask it use of the ADD_PORT register */
+#define ADD_SIG 0x3000 /* Expected ID signature */
+
+/* On Macs, we only need use the ISA I/O stuff until we do MEMORY_ON */
+#ifdef CONFIG_MAC
+#define LCSLOTBASE 0xfee00000
+#define MMIOBASE 0x40000
+#endif
+
+#define CHIP_EISA_ID_SIG 0x630E /* Product ID Code for Crystal Chip (CS8900 spec 4.3) */
+
+#ifdef IBMEIPKT
+#define EISA_ID_SIG 0x4D24 /* IBM */
+#define PART_NO_SIG 0x1010 /* IBM */
+#define MONGOOSE_BIT 0x0000 /* IBM */
+#else
+#define EISA_ID_SIG 0x630E /* PnP Vendor ID (same as chip id for Crystal board) */
+#define PART_NO_SIG 0x4000 /* ID code CS8920 board (PnP Vendor Product code) */
+#define MONGOOSE_BIT 0x2000 /* PART_NO_SIG + MONGOOSE_BUT => ID of mongoose */
+#endif
+
+#define PRODUCT_ID_ADD 0x0002 /* Address of product ID */
+
+/* Mask to find out the types of registers */
+#define REG_TYPE_MASK 0x001F
+
+/* Eeprom Commands */
+#define ERSE_WR_ENBL 0x00F0
+#define ERSE_WR_DISABLE 0x0000
+
+/* Defines Control/Config register quintuplet numbers */
+#define RX_BUF_CFG 0x0003
+#define RX_CONTROL 0x0005
+#define TX_CFG 0x0007
+#define TX_COMMAND 0x0009
+#define BUF_CFG 0x000B
+#define LINE_CONTROL 0x0013
+#define SELF_CONTROL 0x0015
+#define BUS_CONTROL 0x0017
+#define TEST_CONTROL 0x0019
+
+/* Defines Status/Count registers quintuplet numbers */
+#define RX_EVENT 0x0004
+#define TX_EVENT 0x0008
+#define BUF_EVENT 0x000C
+#define RX_MISS_COUNT 0x0010
+#define TX_COL_COUNT 0x0012
+#define LINE_STATUS 0x0014
+#define SELF_STATUS 0x0016
+#define BUS_STATUS 0x0018
+#define TDR 0x001C
+
+/* PP_RxCFG - Receive Configuration and Interrupt Mask bit definition - Read/write */
+#define SKIP_1 0x0040
+#define RX_STREAM_ENBL 0x0080
+#define RX_OK_ENBL 0x0100
+#define RX_DMA_ONLY 0x0200
+#define AUTO_RX_DMA 0x0400
+#define BUFFER_CRC 0x0800
+#define RX_CRC_ERROR_ENBL 0x1000
+#define RX_RUNT_ENBL 0x2000
+#define RX_EXTRA_DATA_ENBL 0x4000
+
+/* PP_RxCTL - Receive Control bit definition - Read/write */
+#define RX_IA_HASH_ACCEPT 0x0040
+#define RX_PROM_ACCEPT 0x0080
+#define RX_OK_ACCEPT 0x0100
+#define RX_MULTCAST_ACCEPT 0x0200
+#define RX_IA_ACCEPT 0x0400
+#define RX_BROADCAST_ACCEPT 0x0800
+#define RX_BAD_CRC_ACCEPT 0x1000
+#define RX_RUNT_ACCEPT 0x2000
+#define RX_EXTRA_DATA_ACCEPT 0x4000
+#define RX_ALL_ACCEPT (RX_PROM_ACCEPT|RX_BAD_CRC_ACCEPT|RX_RUNT_ACCEPT|RX_EXTRA_DATA_ACCEPT)
+/* Default receive mode - individually addressed, broadcast, and error free */
+#define DEF_RX_ACCEPT (RX_IA_ACCEPT | RX_BROADCAST_ACCEPT | RX_OK_ACCEPT)
+
+/* PP_TxCFG - Transmit Configuration Interrupt Mask bit definition - Read/write */
+#define TX_LOST_CRS_ENBL 0x0040
+#define TX_SQE_ERROR_ENBL 0x0080
+#define TX_OK_ENBL 0x0100
+#define TX_LATE_COL_ENBL 0x0200
+#define TX_JBR_ENBL 0x0400
+#define TX_ANY_COL_ENBL 0x0800
+#define TX_16_COL_ENBL 0x8000
+
+/* PP_TxCMD - Transmit Command bit definition - Read-only */
+#define TX_START_4_BYTES 0x0000
+#define TX_START_64_BYTES 0x0040
+#define TX_START_128_BYTES 0x0080
+#define TX_START_ALL_BYTES 0x00C0
+#define TX_FORCE 0x0100
+#define TX_ONE_COL 0x0200
+#define TX_TWO_PART_DEFF_DISABLE 0x0400
+#define TX_NO_CRC 0x1000
+#define TX_RUNT 0x2000
+
+/* PP_BufCFG - Buffer Configuration Interrupt Mask bit definition - Read/write */
+#define GENERATE_SW_INTERRUPT 0x0040
+#define RX_DMA_ENBL 0x0080
+#define READY_FOR_TX_ENBL 0x0100
+#define TX_UNDERRUN_ENBL 0x0200
+#define RX_MISS_ENBL 0x0400
+#define RX_128_BYTE_ENBL 0x0800
+#define TX_COL_COUNT_OVRFLOW_ENBL 0x1000
+#define RX_MISS_COUNT_OVRFLOW_ENBL 0x2000
+#define RX_DEST_MATCH_ENBL 0x8000
+
+/* PP_LineCTL - Line Control bit definition - Read/write */
+#define SERIAL_RX_ON 0x0040
+#define SERIAL_TX_ON 0x0080
+#define AUI_ONLY 0x0100
+#define AUTO_AUI_10BASET 0x0200
+#define MODIFIED_BACKOFF 0x0800
+#define NO_AUTO_POLARITY 0x1000
+#define TWO_PART_DEFDIS 0x2000
+#define LOW_RX_SQUELCH 0x4000
+
+/* PP_SelfCTL - Software Self Control bit definition - Read/write */
+#define POWER_ON_RESET 0x0040
+#define SW_STOP 0x0100
+#define SLEEP_ON 0x0200
+#define AUTO_WAKEUP 0x0400
+#define HCB0_ENBL 0x1000
+#define HCB1_ENBL 0x2000
+#define HCB0 0x4000
+#define HCB1 0x8000
+
+/* PP_BusCTL - ISA Bus Control bit definition - Read/write */
+#define RESET_RX_DMA 0x0040
+#define MEMORY_ON 0x0400
+#define DMA_BURST_MODE 0x0800
+#define IO_CHANNEL_READY_ON 0x1000
+#define RX_DMA_SIZE_64K 0x2000
+#define ENABLE_IRQ 0x8000
+
+/* PP_TestCTL - Test Control bit definition - Read/write */
+#define LINK_OFF 0x0080
+#define ENDEC_LOOPBACK 0x0200
+#define AUI_LOOPBACK 0x0400
+#define BACKOFF_OFF 0x0800
+#define FDX_8900 0x4000
+#define FAST_TEST 0x8000
+
+/* PP_RxEvent - Receive Event Bit definition - Read-only */
+#define RX_IA_HASHED 0x0040
+#define RX_DRIBBLE 0x0080
+#define RX_OK 0x0100
+#define RX_HASHED 0x0200
+#define RX_IA 0x0400
+#define RX_BROADCAST 0x0800
+#define RX_CRC_ERROR 0x1000
+#define RX_RUNT 0x2000
+#define RX_EXTRA_DATA 0x4000
+
+#define HASH_INDEX_MASK 0x0FC00
+
+/* PP_TxEvent - Transmit Event Bit definition - Read-only */
+#define TX_LOST_CRS 0x0040
+#define TX_SQE_ERROR 0x0080
+#define TX_OK 0x0100
+#define TX_LATE_COL 0x0200
+#define TX_JBR 0x0400
+#define TX_16_COL 0x8000
+#define TX_SEND_OK_BITS (TX_OK|TX_LOST_CRS)
+#define TX_COL_COUNT_MASK 0x7800
+
+/* PP_BufEvent - Buffer Event Bit definition - Read-only */
+#define SW_INTERRUPT 0x0040
+#define RX_DMA 0x0080
+#define READY_FOR_TX 0x0100
+#define TX_UNDERRUN 0x0200
+#define RX_MISS 0x0400
+#define RX_128_BYTE 0x0800
+#define TX_COL_OVRFLW 0x1000
+#define RX_MISS_OVRFLW 0x2000
+#define RX_DEST_MATCH 0x8000
+
+/* PP_LineST - Ethernet Line Status bit definition - Read-only */
+#define LINK_OK 0x0080
+#define AUI_ON 0x0100
+#define TENBASET_ON 0x0200
+#define POLARITY_OK 0x1000
+#define CRS_OK 0x4000
+
+/* PP_SelfST - Chip Software Status bit definition */
+#define ACTIVE_33V 0x0040
+#define INIT_DONE 0x0080
+#define SI_BUSY 0x0100
+#define EEPROM_PRESENT 0x0200
+#define EEPROM_OK 0x0400
+#define EL_PRESENT 0x0800
+#define EE_SIZE_64 0x1000
+
+/* PP_BusST - ISA Bus Status bit definition */
+#define TX_BID_ERROR 0x0080
+#define READY_FOR_TX_NOW 0x0100
+
+/* PP_AutoNegCTL - Auto Negotiation Control bit definition */
+#define RE_NEG_NOW 0x0040
+#define ALLOW_FDX 0x0080
+#define AUTO_NEG_ENABLE 0x0100
+#define NLP_ENABLE 0x0200
+#define FORCE_FDX 0x8000
+#define AUTO_NEG_BITS (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE)
+#define AUTO_NEG_MASK (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE|ALLOW_FDX|RE_NEG_NOW)
+
+/* PP_AutoNegST - Auto Negotiation Status bit definition */
+#define AUTO_NEG_BUSY 0x0080
+#define FLP_LINK 0x0100
+#define FLP_LINK_GOOD 0x0800
+#define LINK_FAULT 0x1000
+#define HDX_ACTIVE 0x4000
+#define FDX_ACTIVE 0x8000
+
+/* The following block defines the ISQ event types */
+#define ISQ_RECEIVER_EVENT 0x04
+#define ISQ_TRANSMITTER_EVENT 0x08
+#define ISQ_BUFFER_EVENT 0x0c
+#define ISQ_RX_MISS_EVENT 0x10
+#define ISQ_TX_COL_EVENT 0x12
+
+#define ISQ_EVENT_MASK 0x003F /* ISQ mask to find out type of event */
+#define ISQ_HIST 16 /* small history buffer */
+#define AUTOINCREMENT 0x8000 /* Bit mask to set bit-15 for autoincrement */
+
+#define TXRXBUFSIZE 0x0600
+#define RXDMABUFSIZE 0x8000
+#define RXDMASIZE 0x4000
+#define TXRX_LENGTH_MASK 0x07FF
+
+/* rx options bits */
+#define RCV_WITH_RXON 1 /* Set SerRx ON */
+#define RCV_COUNTS 2 /* Use Framecnt1 */
+#define RCV_PONG 4 /* Pong respondent */
+#define RCV_DONG 8 /* Dong operation */
+#define RCV_POLLING 0x10 /* Poll RxEvent */
+#define RCV_ISQ 0x20 /* Use ISQ, int */
+#define RCV_AUTO_DMA 0x100 /* Set AutoRxDMAE */
+#define RCV_DMA 0x200 /* Set RxDMA only */
+#define RCV_DMA_ALL 0x400 /* Copy all DMA'ed */
+#define RCV_FIXED_DATA 0x800 /* Every frame same */
+#define RCV_IO 0x1000 /* Use ISA IO only */
+#define RCV_MEMORY 0x2000 /* Use ISA Memory */
+
+#define RAM_SIZE 0x1000 /* The card has 4k bytes or RAM */
+#define PKT_START PP_TxFrame /* Start of packet RAM */
+
+#define RX_FRAME_PORT CS89x0_PORT(0x0000)
+#define TX_FRAME_PORT RX_FRAME_PORT
+#define TX_CMD_PORT CS89x0_PORT(0x0004)
+#define TX_NOW 0x0000 /* Tx packet after 5 bytes copied */
+#define TX_AFTER_381 0x0040 /* Tx packet after 381 bytes copied */
+#define TX_AFTER_ALL 0x00c0 /* Tx packet after all bytes copied */
+#define TX_LEN_PORT CS89x0_PORT(0x0006)
+#define ISQ_PORT CS89x0_PORT(0x0008)
+#define ADD_PORT CS89x0_PORT(0x000A)
+#define DATA_PORT CS89x0_PORT(0x000C)
+
+#define EEPROM_WRITE_EN 0x00F0
+#define EEPROM_WRITE_DIS 0x0000
+#define EEPROM_WRITE_CMD 0x0100
+#define EEPROM_READ_CMD 0x0200
+
+/* Receive Header */
+/* Description of header of each packet in receive area of memory */
+#define RBUF_EVENT_LOW 0 /* Low byte of RxEvent - status of received frame */
+#define RBUF_EVENT_HIGH 1 /* High byte of RxEvent - status of received frame */
+#define RBUF_LEN_LOW 2 /* Length of received data - low byte */
+#define RBUF_LEN_HI 3 /* Length of received data - high byte */
+#define RBUF_HEAD_LEN 4 /* Length of this header */
+
+#define CHIP_READ 0x1 /* Used to mark state of the repins code (chip or dma) */
+#define DMA_READ 0x2 /* Used to mark state of the repins code (chip or dma) */
+
+/* for bios scan */
+/* */
+#ifdef CSDEBUG
+/* use these values for debugging bios scan */
+#define BIOS_START_SEG 0x00000
+#define BIOS_OFFSET_INC 0x0010
+#else
+#define BIOS_START_SEG 0x0c000
+#define BIOS_OFFSET_INC 0x0200
+#endif
+
+#define BIOS_LAST_OFFSET 0x0fc00
+
+/* Byte offsets into the EEPROM configuration buffer */
+#define ISA_CNF_OFFSET 0x6
+#define TX_CTL_OFFSET (ISA_CNF_OFFSET + 8) /* 8900 eeprom */
+#define AUTO_NEG_CNF_OFFSET (ISA_CNF_OFFSET + 8) /* 8920 eeprom */
+
+ /* the assumption here is that the bits in the eeprom are generally */
+ /* in the same position as those in the autonegctl register. */
+ /* Of course the IMM bit is not in that register so it must be */
+ /* masked out */
+#define EE_FORCE_FDX 0x8000
+#define EE_NLP_ENABLE 0x0200
+#define EE_AUTO_NEG_ENABLE 0x0100
+#define EE_ALLOW_FDX 0x0080
+#define EE_AUTO_NEG_CNF_MASK (EE_FORCE_FDX|EE_NLP_ENABLE|EE_AUTO_NEG_ENABLE|EE_ALLOW_FDX)
+
+#define IMM_BIT 0x0040 /* ignore missing media */
+
+#define ADAPTER_CNF_OFFSET (AUTO_NEG_CNF_OFFSET + 2)
+#define A_CNF_10B_T 0x0001
+#define A_CNF_AUI 0x0002
+#define A_CNF_10B_2 0x0004
+#define A_CNF_MEDIA_TYPE 0x0070
+#define A_CNF_MEDIA_AUTO 0x0070
+#define A_CNF_MEDIA_10B_T 0x0020
+#define A_CNF_MEDIA_AUI 0x0040
+#define A_CNF_MEDIA_10B_2 0x0010
+#define A_CNF_DC_DC_POLARITY 0x0080
+#define A_CNF_NO_AUTO_POLARITY 0x2000
+#define A_CNF_LOW_RX_SQUELCH 0x4000
+#define A_CNF_EXTND_10B_2 0x8000
+
+#define PACKET_PAGE_OFFSET 0x8
+
+/* Bit definitions for the ISA configuration word from the EEPROM */
+#define INT_NO_MASK 0x000F
+#define DMA_NO_MASK 0x0070
+#define ISA_DMA_SIZE 0x0200
+#define ISA_AUTO_RxDMA 0x0400
+#define ISA_RxDMA 0x0800
+#define DMA_BURST 0x1000
+#define STREAM_TRANSFER 0x2000
+#define ANY_ISA_DMA (ISA_AUTO_RxDMA | ISA_RxDMA)
+
+/* DMA controller registers */
+#define DMA_BASE 0x00 /* DMA controller base */
+#define DMA_BASE_2 0x0C0 /* DMA controller base */
+
+#define DMA_STAT 0x0D0 /* DMA controller status register */
+#define DMA_MASK 0x0D4 /* DMA controller mask register */
+#define DMA_MODE 0x0D6 /* DMA controller mode register */
+#define DMA_RESETFF 0x0D8 /* DMA controller first/last flip flop */
+
+/* DMA data */
+#define DMA_DISABLE 0x04 /* Disable channel n */
+#define DMA_ENABLE 0x00 /* Enable channel n */
+/* Demand transfers, incr. address, auto init, writes, ch. n */
+#define DMA_RX_MODE 0x14
+/* Demand transfers, incr. address, auto init, reads, ch. n */
+#define DMA_TX_MODE 0x18
+
+#define DMA_SIZE (16*1024) /* Size of dma buffer - 16k */
+
+#define CS8900 0x0000
+#define CS8920 0x4000
+#define CS8920M 0x6000
+#define REVISON_BITS 0x1F00
+#define EEVER_NUMBER 0x12
+#define CHKSUM_LEN 0x14
+#define CHKSUM_VAL 0x0000
+#define START_EEPROM_DATA 0x001c /* Offset into eeprom for start of data */
+#define IRQ_MAP_EEPROM_DATA 0x0046 /* Offset into eeprom for the IRQ map */
+#define IRQ_MAP_LEN 0x0004 /* No of bytes to read for the IRQ map */
+#define PNP_IRQ_FRMT 0x0022 /* PNP small item IRQ format */
+#ifdef CONFIG_SH_HICOSH4
+#define CS8900_IRQ_MAP 0x0002 /* HiCO-SH4 board has its IRQ on #1 */
+#else
+#define CS8900_IRQ_MAP 0x1c20 /* This IRQ map is fixed */
+#endif
+
+#define CS8920_NO_INTS 0x0F /* Max CS8920 interrupt select # */
+
+#define PNP_ADD_PORT 0x0279
+#define PNP_WRITE_PORT 0x0A79
+
+#define GET_PNP_ISA_STRUCT 0x40
+#define PNP_ISA_STRUCT_LEN 0x06
+#define PNP_CSN_CNT_OFF 0x01
+#define PNP_RD_PORT_OFF 0x02
+#define PNP_FUNCTION_OK 0x00
+#define PNP_WAKE 0x03
+#define PNP_RSRC_DATA 0x04
+#define PNP_RSRC_READY 0x01
+#define PNP_STATUS 0x05
+#define PNP_ACTIVATE 0x30
+#define PNP_CNF_IO_H 0x60
+#define PNP_CNF_IO_L 0x61
+#define PNP_CNF_INT 0x70
+#define PNP_CNF_DMA 0x74
+#define PNP_CNF_MEM 0x48
+
+#define BIT0 1
+#define BIT15 0x8000
+
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
new file mode 100644
index 000000000000..56a100fb9e4b
--- /dev/null
+++ b/drivers/net/de600.c
@@ -0,0 +1,561 @@
+static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n";
+/*
+ * de600.c
+ *
+ * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
+ * The Author may be reached as bj0rn@blox.se
+ *
+ * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
+ * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
+ * For DE600.asm:
+ * Portions (C) Copyright 1990 D-Link, Inc.
+ * Copyright, 1988-1992, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * (Now at <becker@scyld.com>)
+ *
+ **************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************/
+
+/* Add more time here if your adapter won't work OK: */
+#define DE600_SLOW_DOWN udelay(delay_time)
+
+ /*
+ * If you still have trouble reading/writing to the adapter,
+ * modify the following "#define": (see <asm/io.h> for more info)
+#define REALLY_SLOW_IO
+ */
+#define SLOW_IO_BY_JUMPING /* Looks "better" than dummy write to port 0x80 :-) */
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef DE600_DEBUG
+#define PRINTK(x) if (de600_debug >= 2) printk x
+#else
+#define DE600_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+
+#include "de600.h"
+
+static unsigned int de600_debug = DE600_DEBUG;
+module_param(de600_debug, int, 0);
+MODULE_PARM_DESC(de600_debug, "DE-600 debug level (0-2)");
+
+static unsigned int check_lost = 1;
+module_param(check_lost, bool, 0);
+MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
+
+static unsigned int delay_time = 10;
+module_param(delay_time, int, 0);
+MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds");
+
+
+/*
+ * D-Link driver variables:
+ */
+
+static volatile int rx_page;
+
+#define TX_PAGES 2
+static volatile int tx_fifo[TX_PAGES];
+static volatile int tx_fifo_in;
+static volatile int tx_fifo_out;
+static volatile int free_tx_pages = TX_PAGES;
+static int was_down;
+static DEFINE_SPINLOCK(de600_lock);
+
+static inline u8 de600_read_status(struct net_device *dev)
+{
+ u8 status;
+
+ outb_p(STATUS, DATA_PORT);
+ status = inb(STATUS_PORT);
+ outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
+
+ return status;
+}
+
+static inline u8 de600_read_byte(unsigned char type, struct net_device *dev)
+{
+ /* dev used by macros */
+ u8 lo;
+ outb_p((type), DATA_PORT);
+ lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
+ outb_p((type) | HI_NIBBLE, DATA_PORT);
+ return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ */
+
+static int de600_open(struct net_device *dev)
+{
+ unsigned long flags;
+ int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
+ return ret;
+ }
+ spin_lock_irqsave(&de600_lock, flags);
+ ret = adapter_init(dev);
+ spin_unlock_irqrestore(&de600_lock, flags);
+ return ret;
+}
+
+/*
+ * The inverse routine to de600_open().
+ */
+
+static int de600_close(struct net_device *dev)
+{
+ select_nic();
+ rx_page = 0;
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ de600_put_command(0);
+ select_prn();
+ free_irq(DE600_IRQ, dev);
+ return 0;
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ return (struct net_device_stats *)(dev->priv);
+}
+
+static inline void trigger_interrupt(struct net_device *dev)
+{
+ de600_put_command(FLIP_IRQ);
+ select_prn();
+ DE600_SLOW_DOWN;
+ select_nic();
+ de600_put_command(0);
+}
+
+/*
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+
+static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int transmit_from;
+ int len;
+ int tickssofar;
+ u8 *buffer = skb->data;
+ int i;
+
+ if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ /* else */
+ printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
+ /* Restart the adapter. */
+ spin_lock_irqsave(&de600_lock, flags);
+ if (adapter_init(dev)) {
+ spin_unlock_irqrestore(&de600_lock, flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(&de600_lock, flags);
+ }
+
+ /* Start real output */
+ PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages));
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+
+ spin_lock_irqsave(&de600_lock, flags);
+ select_nic();
+ tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
+ tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
+
+ if(check_lost)
+ {
+ /* This costs about 40 instructions per packet... */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
+ if (adapter_init(dev)) {
+ spin_unlock_irqrestore(&de600_lock, flags);
+ return 1;
+ }
+ }
+ }
+
+ de600_setup_address(transmit_from, RW_ADDR);
+ for (i = 0; i < skb->len ; ++i, ++buffer)
+ de600_put_byte(*buffer);
+ for (; i < len; ++i)
+ de600_put_byte(0);
+
+ if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
+ dev->trans_start = jiffies;
+ netif_start_queue(dev); /* allow more packets into adapter */
+ /* Send page and generate a faked interrupt */
+ de600_setup_address(transmit_from, TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ }
+ else {
+ if (free_tx_pages)
+ netif_start_queue(dev);
+ else
+ netif_stop_queue(dev);
+ select_prn();
+ }
+ spin_unlock_irqrestore(&de600_lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+
+static irqreturn_t de600_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ u8 irq_status;
+ int retrig = 0;
+ int boguscount = 0;
+
+ /* This might just as well be deleted now, no crummy drivers present :-) */
+ if ((dev == NULL) || (DE600_IRQ != irq)) {
+ printk(KERN_ERR "%s: bogus interrupt %d\n", dev?dev->name:"DE-600", irq);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&de600_lock);
+
+ select_nic();
+ irq_status = de600_read_status(dev);
+
+ do {
+ PRINTK(("de600_interrupt (%02X)\n", irq_status));
+
+ if (irq_status & RX_GOOD)
+ de600_rx_intr(dev);
+ else if (!(irq_status & RX_BUSY))
+ de600_put_command(RX_ENABLE);
+
+ /* Any transmission in progress? */
+ if (free_tx_pages < TX_PAGES)
+ retrig = de600_tx_intr(dev, irq_status);
+ else
+ retrig = 0;
+
+ irq_status = de600_read_status(dev);
+ } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
+ /*
+ * Yeah, it _looks_ like busy waiting, smells like busy waiting
+ * and I know it's not PC, but please, it will only occur once
+ * in a while and then only for a loop or so (< 1ms for sure!)
+ */
+
+ /* Enable adapter interrupts */
+ select_prn();
+ if (retrig)
+ trigger_interrupt(dev);
+ spin_unlock(&de600_lock);
+ return IRQ_HANDLED;
+}
+
+static int de600_tx_intr(struct net_device *dev, int irq_status)
+{
+ /*
+ * Returns 1 if tx still not done
+ */
+
+ /* Check if current transmission is done yet */
+ if (irq_status & TX_BUSY)
+ return 1; /* tx not done, try again */
+
+ /* else */
+ /* If last transmission OK then bump fifo index */
+ if (!(irq_status & TX_FAILED16)) {
+ tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
+ ++free_tx_pages;
+ ((struct net_device_stats *)(dev->priv))->tx_packets++;
+ netif_wake_queue(dev);
+ }
+
+ /* More to send, or resend last packet? */
+ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
+ dev->trans_start = jiffies;
+ de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ return 1;
+ }
+ /* else */
+
+ return 0;
+}
+
+/*
+ * We have a good packet, get it out of the adapter.
+ */
+static void de600_rx_intr(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ int i;
+ int read_from;
+ int size;
+ unsigned char *buffer;
+
+ /* Get size of received packet */
+ size = de600_read_byte(RX_LEN, dev); /* low byte */
+ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
+ size -= 4; /* Ignore trailing 4 CRC-bytes */
+
+ /* Tell adapter where to store next incoming packet, enable receiver */
+ read_from = rx_page_adr();
+ next_rx_page();
+ de600_put_command(RX_ENABLE);
+
+ if ((size < 32) || (size > 1535)) {
+ printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size);
+ if (size > 10000)
+ adapter_init(dev);
+ return;
+ }
+
+ skb = dev_alloc_skb(size+2);
+ if (skb == NULL) {
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
+ return;
+ }
+ /* else */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ buffer = skb_put(skb,size);
+
+ /* copy the packet into the buffer */
+ de600_setup_address(read_from, RW_ADDR);
+ for (i = size; i > 0; --i, ++buffer)
+ *buffer = de600_read_byte(READ_DATA, dev);
+
+ skb->protocol=eth_type_trans(skb,dev);
+
+ netif_rx(skb);
+
+ /* update stats */
+ dev->last_rx = jiffies;
+ ((struct net_device_stats *)(dev->priv))->rx_packets++; /* count all receives */
+ ((struct net_device_stats *)(dev->priv))->rx_bytes += size; /* count all received bytes */
+
+ /*
+ * If any worth-while packets have been received, netif_rx()
+ * will work on them when we get to the tasklets.
+ */
+}
+
+static struct net_device * __init de600_probe(void)
+{
+ int i;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct net_device_stats));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ SET_MODULE_OWNER(dev);
+
+ if (!request_region(DE600_IO, 3, "de600")) {
+ printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO);
+ err = -EBUSY;
+ goto out;
+ }
+
+ printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name);
+ /* Alpha testers must have the version number to report bugs. */
+ if (de600_debug > 1)
+ printk(version);
+
+ /* probe for adapter */
+ err = -ENODEV;
+ rx_page = 0;
+ select_nic();
+ (void)de600_read_status(dev);
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ if (de600_read_status(dev) & 0xf0) {
+ printk(": not at I/O %#3x.\n", DATA_PORT);
+ goto out1;
+ }
+
+ /*
+ * Maybe we found one,
+ * have to check if it is a D-Link DE-600 adapter...
+ */
+
+ /* Get the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
+ dev->broadcast[i] = 0xff;
+ }
+
+ /* Check magic code */
+ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
+ /* OK, install real address */
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0xc8;
+ dev->dev_addr[3] &= 0x0f;
+ dev->dev_addr[3] |= 0x70;
+ } else {
+ printk(" not identified in the printer port\n");
+ goto out1;
+ }
+
+ printk(", Ethernet Address: %02X", dev->dev_addr[0]);
+ for (i = 1; i < ETH_ALEN; i++)
+ printk(":%02X",dev->dev_addr[i]);
+ printk("\n");
+
+ dev->get_stats = get_stats;
+
+ dev->open = de600_open;
+ dev->stop = de600_close;
+ dev->hard_start_xmit = &de600_start_xmit;
+
+ dev->flags&=~IFF_MULTICAST;
+
+ select_prn();
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+
+ return dev;
+
+out1:
+ release_region(DE600_IO, 3);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int adapter_init(struct net_device *dev)
+{
+ int i;
+
+ select_nic();
+ rx_page = 0; /* used by RESET */
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+
+ /* Check if it is still there... */
+ /* Get the some bytes of the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
+ (de600_read_byte(READ_DATA, dev) != 0x15)) {
+ /* was: if (de600_read_status(dev) & 0xf0) { */
+ printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n");
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de600_close(dev);
+ was_down = 1;
+ netif_stop_queue(dev); /* Transmit busy... */
+ return 1; /* failed */
+ }
+
+ if (was_down) {
+ printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name);
+ was_down = 0;
+ }
+
+ tx_fifo_in = 0;
+ tx_fifo_out = 0;
+ free_tx_pages = TX_PAGES;
+
+
+ /* set the ether address. */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++)
+ de600_put_byte(dev->dev_addr[i]);
+
+ /* where to start saving incoming packets */
+ rx_page = RX_BP | RX_BASE_PAGE;
+ de600_setup_address(MEM_4K, RW_ADDR);
+ /* Enable receiver */
+ de600_put_command(RX_ENABLE);
+ select_prn();
+
+ netif_start_queue(dev);
+
+ return 0; /* OK */
+}
+
+static struct net_device *de600_dev;
+
+static int __init de600_init(void)
+{
+ de600_dev = de600_probe();
+ if (IS_ERR(de600_dev))
+ return PTR_ERR(de600_dev);
+ return 0;
+}
+
+static void __exit de600_exit(void)
+{
+ unregister_netdev(de600_dev);
+ release_region(DE600_IO, 3);
+ free_netdev(de600_dev);
+}
+
+module_init(de600_init);
+module_exit(de600_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/de600.h b/drivers/net/de600.h
new file mode 100644
index 000000000000..e4073015dcd8
--- /dev/null
+++ b/drivers/net/de600.h
@@ -0,0 +1,169 @@
+/**************************************************
+ * *
+ * Definition of D-Link Ethernet Pocket adapter *
+ * *
+ **************************************************/
+/*
+ * D-Link Ethernet pocket adapter ports
+ */
+/*
+ * OK, so I'm cheating, but there are an awful lot of
+ * reads and writes in order to get anything in and out
+ * of the DE-600 with 4 bits at a time in the parallel port,
+ * so every saved instruction really helps :-)
+ */
+
+#ifndef DE600_IO
+#define DE600_IO 0x378
+#endif
+
+#define DATA_PORT (DE600_IO)
+#define STATUS_PORT (DE600_IO + 1)
+#define COMMAND_PORT (DE600_IO + 2)
+
+#ifndef DE600_IRQ
+#define DE600_IRQ 7
+#endif
+/*
+ * It really should look like this, and autoprobing as well...
+ *
+#define DATA_PORT (dev->base_addr + 0)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+#define DE600_IRQ dev->irq
+ */
+
+/*
+ * D-Link COMMAND_PORT commands
+ */
+#define SELECT_NIC 0x04 /* select Network Interface Card */
+#define SELECT_PRN 0x1c /* select Printer */
+#define NML_PRN 0xec /* normal Printer situation */
+#define IRQEN 0x10 /* enable IRQ line */
+
+/*
+ * D-Link STATUS_PORT
+ */
+#define RX_BUSY 0x80
+#define RX_GOOD 0x40
+#define TX_FAILED16 0x10
+#define TX_BUSY 0x08
+
+/*
+ * D-Link DATA_PORT commands
+ * command in low 4 bits
+ * data in high 4 bits
+ * select current data nibble with HI_NIBBLE bit
+ */
+#define WRITE_DATA 0x00 /* write memory */
+#define READ_DATA 0x01 /* read memory */
+#define STATUS 0x02 /* read status register */
+#define COMMAND 0x03 /* write command register (see COMMAND below) */
+#define NULL_COMMAND 0x04 /* null command */
+#define RX_LEN 0x05 /* read received packet length */
+#define TX_ADDR 0x06 /* set adapter transmit memory address */
+#define RW_ADDR 0x07 /* set adapter read/write memory address */
+#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
+ or-ed with rest of command */
+
+/*
+ * command register, accessed through DATA_PORT with low bits = COMMAND
+ */
+#define RX_ALL 0x01 /* PROMISCUOUS */
+#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
+#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
+
+#define TX_ENABLE 0x04 /* bit 2 */
+#define RX_ENABLE 0x08 /* bit 3 */
+
+#define RESET 0x80 /* set bit 7 high */
+#define STOP_RESET 0x00 /* set bit 7 low */
+
+/*
+ * data to command register
+ * (high 4 bits in write to DATA_PORT)
+ */
+#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
+#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
+#define FLIP_IRQ 0x40 /* bit 6 */
+
+/*
+ * D-Link adapter internal memory:
+ *
+ * 0-2K 1:st transmit page (send from pointer up to 2K)
+ * 2-4K 2:nd transmit page (send from pointer up to 4K)
+ *
+ * 4-6K 1:st receive page (data from 4K upwards)
+ * 6-8K 2:nd receive page (data from 6K upwards)
+ *
+ * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
+ */
+#define MEM_2K 0x0800 /* 2048 */
+#define MEM_4K 0x1000 /* 4096 */
+#define MEM_6K 0x1800 /* 6144 */
+#define NODE_ADDRESS 0x2000 /* 8192 */
+
+#define RUNT 60 /* Too small Ethernet packet */
+
+/**************************************************
+ * *
+ * End of definition *
+ * *
+ **************************************************/
+
+/*
+ * Index to functions, as function prototypes.
+ */
+/* Routines used internally. (See "convenience macros") */
+static u8 de600_read_status(struct net_device *dev);
+static u8 de600_read_byte(unsigned char type, struct net_device *dev);
+
+/* Put in the device structure. */
+static int de600_open(struct net_device *dev);
+static int de600_close(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* Dispatch from interrupts. */
+static irqreturn_t de600_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int de600_tx_intr(struct net_device *dev, int irq_status);
+static void de600_rx_intr(struct net_device *dev);
+
+/* Initialization */
+static void trigger_interrupt(struct net_device *dev);
+static int adapter_init(struct net_device *dev);
+
+/*
+ * Convenience macros/functions for D-Link adapter
+ */
+
+#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
+#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
+
+/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
+#define de600_put_byte(data) ( \
+ outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
+ outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
+
+/*
+ * The first two outb_p()'s below could perhaps be deleted if there
+ * would be more delay in the last two. Not certain about it yet...
+ */
+#define de600_put_command(cmd) ( \
+ outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
+ outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
+ outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
+ outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
+
+#define de600_setup_address(addr,type) ( \
+ outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
+ outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
+ outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
+ outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
+
+#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
+
+/* Flip bit, only 2 pages */
+#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
+
+#define tx_page_adr(a) (((a) + 1) * MEM_2K)
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
new file mode 100644
index 000000000000..0069f5fa973a
--- /dev/null
+++ b/drivers/net/de620.c
@@ -0,0 +1,1047 @@
+/*
+ * de620.c $Revision: 1.40 $ BETA
+ *
+ *
+ * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
+ *
+ * Based on adapter information gathered from DOS packetdriver
+ * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
+ * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
+ * Copyright, 1988, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * (Now at <becker@scyld.com>)
+ *
+ * Valuable assistance from:
+ * J. Joshua Kopper <kopper@rtsg.mot.com>
+ * Olav Kvittem <Olav.Kvittem@uninett.no>
+ * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
+ * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
+ *
+ *****************************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+static const char version[] =
+ "de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n";
+
+/***********************************************************************
+ *
+ * "Tuning" section.
+ *
+ * Compile-time options: (see below for descriptions)
+ * -DDE620_IO=0x378 (lpt1)
+ * -DDE620_IRQ=7 (lpt1)
+ * -DDE602_DEBUG=...
+ * -DSHUTDOWN_WHEN_LOST
+ * -DCOUNT_LOOPS
+ * -DLOWSPEED
+ * -DREAD_DELAY
+ * -DWRITE_DELAY
+ */
+
+/*
+ * This driver assumes that the printer port is a "normal",
+ * dumb, uni-directional port!
+ * If your port is "fancy" in any way, please try to set it to "normal"
+ * with your BIOS setup. I have no access to machines with bi-directional
+ * ports, so I can't test such a driver :-(
+ * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
+ *
+ * There are some clones of DE620 out there, with different names.
+ * If the current driver does not recognize a clone, try to change
+ * the following #define to:
+ *
+ * #define DE620_CLONE 1
+ */
+#define DE620_CLONE 0
+
+/*
+ * If the adapter has problems with high speeds, enable this #define
+ * otherwise full printerport speed will be attempted.
+ *
+ * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
+ *
+#define LOWSPEED
+ */
+
+#ifndef READ_DELAY
+#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
+#endif
+
+#ifndef WRITE_DELAY
+#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
+#endif
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ *
+#define SHUTDOWN_WHEN_LOST
+ */
+
+/*
+ * Enable debugging by "-DDE620_DEBUG=3" when compiling,
+ * OR by enabling the following #define
+ *
+ * use 0 for production, 1 for verification, >2 for debug
+ *
+#define DE620_DEBUG 3
+ */
+
+#ifdef LOWSPEED
+/*
+ * Enable this #define if you want to see debugging output that show how long
+ * we have to wait before the DE-620 is ready for the next read/write/command.
+ *
+#define COUNT_LOOPS
+ */
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+/* Constant definitions for the DE-620 registers, commands and bits */
+#include "de620.h"
+
+typedef unsigned char byte;
+
+/*******************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * See also "de620.h" *
+ * *
+ *******************************************************/
+#ifndef DE620_IO /* Compile-time configurable */
+#define DE620_IO 0x378
+#endif
+
+#ifndef DE620_IRQ /* Compile-time configurable */
+#define DE620_IRQ 7
+#endif
+
+#define DATA_PORT (dev->base_addr)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+
+#define RUNT 60 /* Too small Ethernet packet */
+#define GIANT 1514 /* largest legal size packet, no fcs */
+
+#ifdef DE620_DEBUG /* Compile-time configurable */
+#define PRINTK(x) if (de620_debug >= 2) printk x
+#else
+#define DE620_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+
+
+/*
+ * Force media with insmod:
+ * insmod de620.o bnc=1
+ * or
+ * insmod de620.o utp=1
+ *
+ * Force io and/or irq with insmod:
+ * insmod de620.o io=0x378 irq=7
+ *
+ * Make a clone skip the Ethernet-address range check:
+ * insmod de620.o clone=1
+ */
+static int bnc;
+static int utp;
+static int io = DE620_IO;
+static int irq = DE620_IRQ;
+static int clone = DE620_CLONE;
+
+static unsigned int de620_debug = DE620_DEBUG;
+
+static spinlock_t de620_lock;
+
+module_param(bnc, int, 0);
+module_param(utp, int, 0);
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(clone, int, 0);
+module_param(de620_debug, int, 0);
+MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
+MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
+MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
+MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
+MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
+MODULE_PARM_DESC(de620_debug, "DE-620 debug level (0-2)");
+
+/***********************************************
+ * *
+ * Index to functions, as function prototypes. *
+ * *
+ ***********************************************/
+
+/*
+ * Routines used internally. (See also "convenience macros.. below")
+ */
+
+/* Put in the device structure. */
+static int de620_open(struct net_device *);
+static int de620_close(struct net_device *);
+static struct net_device_stats *get_stats(struct net_device *);
+static void de620_set_multicast_list(struct net_device *);
+static int de620_start_xmit(struct sk_buff *, struct net_device *);
+
+/* Dispatch from interrupts. */
+static irqreturn_t de620_interrupt(int, void *, struct pt_regs *);
+static int de620_rx_intr(struct net_device *);
+
+/* Initialization */
+static int adapter_init(struct net_device *);
+static int read_eeprom(struct net_device *);
+
+
+/*
+ * D-Link driver variables:
+ */
+#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
+#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
+#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
+#define DEF_NIC_CMD IRQEN | ICEN | DS1
+
+static volatile byte NIC_Cmd;
+static volatile byte next_rx_page;
+static byte first_rx_page;
+static byte last_rx_page;
+static byte EIPRegister;
+
+static struct nic {
+ byte NodeID[6];
+ byte RAM_Size;
+ byte Model;
+ byte Media;
+ byte SCR;
+} nic_data;
+
+/**********************************************************
+ * *
+ * Convenience macros/functions for D-Link DE-620 adapter *
+ * *
+ **********************************************************/
+#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
+#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
+
+/* Check for ready-status, and return a nibble (high 4 bits) for data input */
+#ifdef COUNT_LOOPS
+static int tot_cnt;
+#endif
+static inline byte
+de620_ready(struct net_device *dev)
+{
+ byte value;
+ register short int cnt = 0;
+
+ while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
+ ++cnt;
+
+#ifdef COUNT_LOOPS
+ tot_cnt += cnt;
+#endif
+ return value & 0xf0; /* nibble */
+}
+
+static inline void
+de620_send_command(struct net_device *dev, byte cmd)
+{
+ de620_ready(dev);
+ if (cmd == W_DUMMY)
+ outb(NIC_Cmd, COMMAND_PORT);
+
+ outb(cmd, DATA_PORT);
+
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+ de620_ready(dev);
+ outb(NIC_Cmd, COMMAND_PORT);
+}
+
+static inline void
+de620_put_byte(struct net_device *dev, byte value)
+{
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ de620_ready(dev);
+ outb(value, DATA_PORT);
+ de620_flip_ds(dev);
+}
+
+static inline byte
+de620_read_byte(struct net_device *dev)
+{
+ byte value;
+
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ value = de620_ready(dev); /* High nibble */
+ de620_flip_ds(dev);
+ value |= de620_ready(dev) >> 4; /* Low nibble */
+ return value;
+}
+
+static inline void
+de620_write_block(struct net_device *dev, byte *buffer, int count, int pad)
+{
+#ifndef LOWSPEED
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+#ifdef COUNT_LOOPS
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+ /* No further optimization useful, the limit is in the adapter. */
+ for ( ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev,*buffer);
+ }
+ for ( count = pad ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev, 0);
+ }
+ de620_send_command(dev,W_DUMMY);
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ for ( ; count > 0; count -=2) {
+ outb(*buffer++, DATA_PORT);
+ outb(uflip, COMMAND_PORT);
+ outb(*buffer++, DATA_PORT);
+ outb(dflip, COMMAND_PORT);
+ }
+ de620_send_command(dev,W_DUMMY);
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_read_block(struct net_device *dev, byte *data, int count)
+{
+#ifndef LOWSPEED
+ byte value;
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+ /* No further optimization useful, the limit is in the adapter. */
+ while (count-- > 0) {
+ *data++ = de620_read_byte(dev);
+ de620_flip_ds(dev);
+ }
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ while (count-- > 0) {
+ value = inb(STATUS_PORT) & 0xf0; /* High nibble */
+ outb(uflip, COMMAND_PORT);
+ *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
+ outb(dflip , COMMAND_PORT);
+ }
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_set_delay(struct net_device *dev)
+{
+ de620_ready(dev);
+ outb(W_DFR, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(WRITE_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(READ_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+}
+
+static inline void
+de620_set_register(struct net_device *dev, byte reg, byte value)
+{
+ de620_ready(dev);
+ outb(reg, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_put_byte(dev, value);
+}
+
+static inline byte
+de620_get_register(struct net_device *dev, byte reg)
+{
+ byte value;
+
+ de620_send_command(dev,reg);
+ value = de620_read_byte(dev);
+ de620_send_command(dev,W_DUMMY);
+
+ return value;
+}
+
+/*********************************************************************
+ *
+ * Open/initialize the board.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ *
+ */
+static int de620_open(struct net_device *dev)
+{
+ int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
+ if (ret) {
+ printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return ret;
+ }
+
+ if (adapter_init(dev)) {
+ ret = -EIO;
+ goto out_free_irq;
+ }
+
+ netif_start_queue(dev);
+ return 0;
+
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return ret;
+}
+
+/************************************************
+ *
+ * The inverse routine to de620_open().
+ *
+ */
+
+static int de620_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ /* disable recv */
+ de620_set_register(dev, W_TCR, RXOFF);
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+/*********************************************
+ *
+ * Return current statistics
+ *
+ */
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ return (struct net_device_stats *)(dev->priv);
+}
+
+/*********************************************
+ *
+ * Set or clear the multicast filter for this adaptor.
+ * (no real multicast implemented for the DE-620, but she can be promiscuous...)
+ *
+ */
+
+static void de620_set_multicast_list(struct net_device *dev)
+{
+ if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ { /* Enable promiscuous mode */
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
+ }
+ else
+ { /* Disable promiscuous mode, use normal mode */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+ }
+}
+
+/*******************************************************
+ *
+ * Handle timeouts on transmit
+ */
+
+static void de620_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem");
+ /* Restart the adapter. */
+ if (!adapter_init(dev)) /* maybe close it */
+ netif_wake_queue(dev);
+}
+
+/*******************************************************
+ *
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int len;
+ byte *buffer = skb->data;
+ byte using_txbuf;
+
+ using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
+
+ netif_stop_queue(dev);
+
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+ if (len & 1) /* send an even number of bytes */
+ ++len;
+
+ /* Start real output */
+
+ spin_lock_irqsave(&de620_lock, flags)
+ PRINTK(("de620_start_xmit: len=%d, bufs 0x%02x\n",
+ (int)skb->len, using_txbuf));
+
+ /* select a free tx buffer. if there is one... */
+ switch (using_txbuf) {
+ default: /* both are free: use TXBF0 */
+ case TXBF1: /* use TXBF0 */
+ de620_send_command(dev,W_CR | RW0);
+ using_txbuf |= TXBF0;
+ break;
+
+ case TXBF0: /* use TXBF1 */
+ de620_send_command(dev,W_CR | RW1);
+ using_txbuf |= TXBF1;
+ break;
+
+ case (TXBF0 | TXBF1): /* NONE!!! */
+ printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
+ spin_unlock_irqrestore(&de620_lock, flags);
+ return 1;
+ }
+ de620_write_block(dev, buffer, skb->len, len-skb->len);
+
+ dev->trans_start = jiffies;
+ if(!(using_txbuf == (TXBF0 | TXBF1)))
+ netif_wake_queue(dev);
+
+ ((struct net_device_stats *)(dev->priv))->tx_packets++;
+ spin_unlock_irqrestore(&de620_lock, flags);
+ dev_kfree_skb (skb);
+ return 0;
+}
+
+/*****************************************************
+ *
+ * Handle the network interface interrupts.
+ *
+ */
+static irqreturn_t
+de620_interrupt(int irq_in, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ byte irq_status;
+ int bogus_count = 0;
+ int again = 0;
+
+ spin_lock(&de620_lock);
+
+ /* Read the status register (_not_ the status port) */
+ irq_status = de620_get_register(dev, R_STS);
+
+ PRINTK(("de620_interrupt (%2.2X)\n", irq_status));
+
+ if (irq_status & RXGOOD) {
+ do {
+ again = de620_rx_intr(dev);
+ PRINTK(("again=%d\n", again));
+ }
+ while (again && (++bogus_count < 100));
+ }
+
+ if(de620_tx_buffs(dev) != (TXBF0 | TXBF1))
+ netif_wake_queue(dev);
+
+ spin_unlock(&de620_lock);
+ return IRQ_HANDLED;
+}
+
+/**************************************
+ *
+ * Get a packet from the adapter
+ *
+ * Send it "upstairs"
+ *
+ */
+static int de620_rx_intr(struct net_device *dev)
+{
+ struct header_buf {
+ byte status;
+ byte Rx_NextPage;
+ unsigned short Rx_ByteCount;
+ } header_buf;
+ struct sk_buff *skb;
+ int size;
+ byte *buffer;
+ byte pagelink;
+ byte curr_page;
+
+ PRINTK(("de620_rx_intr: next_rx_page = %d\n", next_rx_page));
+
+ /* Tell the adapter that we are going to read data, and from where */
+ de620_send_command(dev, W_CR | RRN);
+ de620_set_register(dev, W_RSA1, next_rx_page);
+ de620_set_register(dev, W_RSA0, 0);
+
+ /* Deep breath, and away we goooooo */
+ de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
+ PRINTK(("page status=0x%02x, nextpage=%d, packetsize=%d\n",
+ header_buf.status, header_buf.Rx_NextPage, header_buf.Rx_ByteCount));
+
+ /* Plausible page header? */
+ pagelink = header_buf.Rx_NextPage;
+ if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
+ /* Ouch... Forget it! Skip all and start afresh... */
+ printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name);
+ /* You win some, you lose some. And sometimes plenty... */
+ adapter_init(dev);
+ netif_wake_queue(dev);
+ ((struct net_device_stats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+
+ /* OK, this look good, so far. Let's see if it's consistent... */
+ /* Let's compute the start of the next packet, based on where we are */
+ pagelink = next_rx_page +
+ ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
+
+ /* Are we going to wrap around the page counter? */
+ if (pagelink > last_rx_page)
+ pagelink -= (last_rx_page - first_rx_page + 1);
+
+ /* Is the _computed_ next page number equal to what the adapter says? */
+ if (pagelink != header_buf.Rx_NextPage) {
+ /* Naah, we'll skip this packet. Probably bogus data as well */
+ printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name);
+ next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
+ de620_send_command(dev, W_DUMMY);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ ((struct net_device_stats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+ next_rx_page = pagelink;
+
+ size = header_buf.Rx_ByteCount - 4;
+ if ((size < RUNT) || (GIANT < size)) {
+ printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
+ }
+ else { /* Good packet? */
+ skb = dev_alloc_skb(size+2);
+ if (skb == NULL) { /* Yeah, but no place to put it... */
+ printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
+ ((struct net_device_stats *)(dev->priv))->rx_dropped++;
+ }
+ else { /* Yep! Go get it! */
+ skb_reserve(skb,2); /* Align */
+ skb->dev = dev;
+ /* skb->data points to the start of sk_buff data area */
+ buffer = skb_put(skb,size);
+ /* copy the packet into the buffer */
+ de620_read_block(dev, buffer, size);
+ PRINTK(("Read %d bytes\n", size));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* deliver it "upstairs" */
+ dev->last_rx = jiffies;
+ /* count all receives */
+ ((struct net_device_stats *)(dev->priv))->rx_packets++;
+ ((struct net_device_stats *)(dev->priv))->rx_bytes += size;
+ }
+ }
+
+ /* Let's peek ahead to see if we have read the last current packet */
+ /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
+ curr_page = de620_get_register(dev, R_CPR);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ PRINTK(("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page));
+
+ return (next_rx_page != curr_page); /* That was slightly tricky... */
+}
+
+/*********************************************
+ *
+ * Reset the adapter to a known state
+ *
+ */
+static int adapter_init(struct net_device *dev)
+{
+ int i;
+ static int was_down;
+
+ if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
+ EIPRegister = NCTL0;
+ if (nic_data.Media != 1)
+ EIPRegister |= NIS0; /* not BNC */
+ }
+ else if (nic_data.Model == 2) { /* UTP */
+ EIPRegister = NCTL0 | NIS0;
+ }
+
+ if (utp)
+ EIPRegister = NCTL0 | NIS0;
+ if (bnc)
+ EIPRegister = NCTL0;
+
+ de620_send_command(dev, W_CR | RNOP | CLEAR);
+ de620_send_command(dev, W_CR | RNOP);
+
+ de620_set_register(dev, W_SCR, SCR_DEF);
+ /* disable recv to wait init */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ /* Set the node ID in the adapter */
+ for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
+ de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
+ }
+
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ next_rx_page = first_rx_page = DE620_RX_START_PAGE;
+ if (nic_data.RAM_Size)
+ last_rx_page = nic_data.RAM_Size - 1;
+ else /* 64k RAM */
+ last_rx_page = 255;
+
+ de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
+ de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
+ de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
+ de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
+ de620_send_command(dev, W_DUMMY);
+ de620_set_delay(dev);
+
+ /* Final sanity check: Anybody out there? */
+ /* Let's hope some bits from the statusregister make a good check */
+#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
+#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
+ /* success: X 0 0 X 0 0 X X */
+ /* ignore: EEDI RXGOOD COLS LNKS*/
+
+ if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
+ printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif
+ "! (%02x)\n", dev->name, i);
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de620_close(dev);
+#endif
+ was_down = 1;
+ return 1; /* failed */
+ }
+ if (was_down) {
+ printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name);
+ was_down = 0;
+ }
+
+ /* All OK, go ahead... */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+
+ return 0; /* all ok */
+}
+
+/******************************************************************************
+ *
+ * Only start-up code below
+ *
+ */
+/****************************************
+ *
+ * Check if there is a DE-620 connected
+ */
+struct net_device * __init de620_probe(int unit)
+{
+ byte checkbyte = 0xa5;
+ struct net_device *dev;
+ int err = -ENOMEM;
+ int i;
+
+ dev = alloc_etherdev(sizeof(struct net_device_stats));
+ if (!dev)
+ goto out;
+
+ SET_MODULE_OWNER(dev);
+
+ spin_lock_init(&de620_lock);
+
+ /*
+ * This is where the base_addr and irq gets set.
+ * Tunable at compile-time and insmod-time
+ */
+ dev->base_addr = io;
+ dev->irq = irq;
+
+ /* allow overriding parameters on command line */
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ if (de620_debug)
+ printk(version);
+
+ printk(KERN_INFO "D-Link DE-620 pocket adapter");
+
+ if (!request_region(dev->base_addr, 3, "de620")) {
+ printk(" io 0x%3lX, which is busy.\n", dev->base_addr);
+ err = -EBUSY;
+ goto out1;
+ }
+
+ /* Initially, configure basic nibble mode, so we can read the EEPROM */
+ NIC_Cmd = DEF_NIC_CMD;
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ /* Anybody out there? */
+ de620_set_register(dev, W_CPR, checkbyte);
+ checkbyte = de620_get_register(dev, R_CPR);
+
+ if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
+ printk(" not identified in the printer port\n");
+ err = -ENODEV;
+ goto out2;
+ }
+
+ /* else, got it! */
+ printk(", Ethernet Address: %2.2X",
+ dev->dev_addr[0] = nic_data.NodeID[0]);
+ for (i = 1; i < ETH_ALEN; i++) {
+ printk(":%2.2X", dev->dev_addr[i] = nic_data.NodeID[i]);
+ dev->broadcast[i] = 0xff;
+ }
+
+ printk(" (%dk RAM,",
+ (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
+
+ if (nic_data.Media == 1)
+ printk(" BNC)\n");
+ else
+ printk(" UTP)\n");
+
+ dev->get_stats = get_stats;
+ dev->open = de620_open;
+ dev->stop = de620_close;
+ dev->hard_start_xmit = de620_start_xmit;
+ dev->tx_timeout = de620_timeout;
+ dev->watchdog_timeo = HZ*2;
+ dev->set_multicast_list = de620_set_multicast_list;
+
+ /* base_addr and irq are already set, see above! */
+
+ /* dump eeprom */
+ if (de620_debug) {
+ printk("\nEEPROM contents:\n");
+ printk("RAM_Size = 0x%02X\n", nic_data.RAM_Size);
+ printk("NodeID = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ nic_data.NodeID[0], nic_data.NodeID[1],
+ nic_data.NodeID[2], nic_data.NodeID[3],
+ nic_data.NodeID[4], nic_data.NodeID[5]);
+ printk("Model = %d\n", nic_data.Model);
+ printk("Media = %d\n", nic_data.Media);
+ printk("SCR = 0x%02x\n", nic_data.SCR);
+ }
+
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return dev;
+
+out2:
+ release_region(dev->base_addr, 3);
+out1:
+ free_netdev(dev);
+out:
+ return ERR_PTR(err);
+}
+
+/**********************************
+ *
+ * Read info from on-board EEPROM
+ *
+ * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
+ */
+#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
+
+static unsigned short __init ReadAWord(struct net_device *dev, int from)
+{
+ unsigned short data;
+ int nbits;
+
+ /* cs [__~~] SET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
+
+ /* Send the 9-bit address from where we want to read the 16-bit word */
+ for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
+ if (from & 0x0100) { /* bit set? */
+ /* cs [~~~~] SEND 1 */
+ /* di [~~~~] */
+ /* sck [_~~_] */
+ sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
+ }
+ else {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ }
+ }
+
+ /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
+ for (data = 0, nbits = 16; nbits > 0; --nbits) {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
+ }
+ /* cs [____] RESET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
+
+ return data;
+}
+
+static int __init read_eeprom(struct net_device *dev)
+{
+ unsigned short wrd;
+
+ /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
+ wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
+ if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[0] = wrd & 0xff;
+ nic_data.NodeID[1] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
+ if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[2] = wrd & 0xff;
+ nic_data.NodeID[3] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
+ nic_data.NodeID[4] = wrd & 0xff;
+ nic_data.NodeID[5] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
+ nic_data.RAM_Size = (wrd >> 8);
+
+ wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
+ nic_data.Model = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
+ nic_data.Media = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
+ nic_data.SCR = (wrd >> 8);
+
+ return 0; /* no errors */
+}
+
+/******************************************************************************
+ *
+ * Loadable module skeleton
+ *
+ */
+#ifdef MODULE
+static struct net_device *de620_dev;
+
+int init_module(void)
+{
+ de620_dev = de620_probe(-1);
+ if (IS_ERR(de620_dev))
+ return PTR_ERR(de620_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(de620_dev);
+ release_region(de620_dev->base_addr, 3);
+ free_netdev(de620_dev);
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+/*
+ * (add '-DMODULE' when compiling as loadable module)
+ *
+ * compile-command:
+ * gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O2 \
+ * -fomit-frame-pointer -m486 \
+ * -I/usr/src/linux/include -I../../net/inet -c de620.c
+*/
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * End:
+ */
diff --git a/drivers/net/de620.h b/drivers/net/de620.h
new file mode 100644
index 000000000000..e8d9a88f4cb5
--- /dev/null
+++ b/drivers/net/de620.h
@@ -0,0 +1,117 @@
+/*********************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * *
+ *********************************************************/
+
+/* DE-620's CMD port Command */
+#define CS0 0x08 /* 1->0 command strobe */
+#define ICEN 0x04 /* 0=enable DL3520 host interface */
+#define DS0 0x02 /* 1->0 data strobe 0 */
+#define DS1 0x01 /* 1->0 data strobe 1 */
+
+#define WDIR 0x20 /* general 0=read 1=write */
+#define RDIR 0x00 /* (not 100% confirm ) */
+#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
+#define PS2RDIR 0x20
+
+#define IRQEN 0x10 /* 1 = enable printer IRQ line */
+#define SELECTIN 0x08 /* 1 = select printer */
+#define INITP 0x04 /* 0 = initial printer */
+#define AUTOFEED 0x02 /* 1 = printer auto form feed */
+#define STROBE 0x01 /* 0->1 data strobe */
+
+#define RESET 0x08
+#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
+#define NCTL0 0x10
+
+/* DE-620 DIC Command */
+#define W_DUMMY 0x00 /* DIC reserved command */
+#define W_CR 0x20 /* DIC write command register */
+#define W_NPR 0x40 /* DIC write Next Page Register */
+#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
+#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
+
+/* DE-620's STAT port bits 7-4 */
+#define EMPTY 0x80 /* 1 = receive buffer empty */
+#define INTLEVEL 0x40 /* 1 = interrupt level is high */
+#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
+#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
+#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
+
+/* IDC 1 Command */
+#define W_RSA1 0xa0 /* write remote start address 1 */
+#define W_RSA0 0xa1 /* write remote start address 0 */
+#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
+#define W_DFR 0xa3 /* write delay factor register */
+#define W_CPR 0xa4 /* write current page register */
+#define W_SPR 0xa5 /* write start page register */
+#define W_EPR 0xa6 /* write end page register */
+#define W_SCR 0xa7 /* write system configuration register */
+#define W_TCR 0xa8 /* write Transceiver Configuration reg */
+#define W_EIP 0xa9 /* write EEPM Interface port */
+#define W_PAR0 0xaa /* write physical address register 0 */
+#define W_PAR1 0xab /* write physical address register 1 */
+#define W_PAR2 0xac /* write physical address register 2 */
+#define W_PAR3 0xad /* write physical address register 3 */
+#define W_PAR4 0xae /* write physical address register 4 */
+#define W_PAR5 0xaf /* write physical address register 5 */
+
+/* IDC 2 Command */
+#define R_STS 0xc0 /* read status register */
+#define R_CPR 0xc1 /* read current page register */
+#define R_BPR 0xc2 /* read boundary page register */
+#define R_TDR 0xc3 /* read time domain reflectometry reg */
+
+/* STATUS Register */
+#define EEDI 0x80 /* EEPM DO pin */
+#define TXSUC 0x40 /* tx success */
+#define T16 0x20 /* tx fail 16 times */
+#define TS1 0x40 /* 0=Tx success, 1=T16 */
+#define TS0 0x20 /* 0=Tx success, 1=T16 */
+#define RXGOOD 0x10 /* rx a good packet */
+#define RXCRC 0x08 /* rx a CRC error packet */
+#define RXSHORT 0x04 /* rx a short packet */
+#define COLS 0x02 /* coaxial collision status */
+#define LNKS 0x01 /* UTP link status */
+
+/* Command Register */
+#define CLEAR 0x10 /* reset part of hardware */
+#define NOPER 0x08 /* No Operation */
+#define RNOP 0x08
+#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
+#define RRN 0x04 /* Normal Remote Read mode */
+#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
+#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
+#define TXEN 0x01 /* 0->1 tx enable */
+
+/* System Configuration Register */
+#define TESTON 0x80 /* test host data transfer reliability */
+#define SLEEP 0x40 /* sleep mode */
+#if 0
+#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x02 /* byte mode */
+#else
+#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x10 /* byte mode */
+#endif
+#define NIBBLEMODE 0x00 /* nibble mode */
+#define IRQINV 0x08 /* turn off IRQ line inverter */
+#define IRQNML 0x00 /* turn on IRQ line inverter */
+#define INTON 0x04
+#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
+#define AUTOTX 0x01 /* auto tx when leave RW mode */
+
+/* Transceiver Configuration Register */
+#define JABBER 0x80 /* generate jabber condition */
+#define TXSUCINT 0x40 /* enable tx success interrupt */
+#define T16INT 0x20 /* enable T16 interrupt */
+#define RXERRPKT 0x10 /* accept CRC error or short packet */
+#define EXTERNALB2 0x0C /* external loopback 2 */
+#define EXTERNALB1 0x08 /* external loopback 1 */
+#define INTERNALB 0x04 /* internal loopback */
+#define NMLOPERATE 0x00 /* normal operation */
+#define RXPBM 0x03 /* rx physical, broadcast, multicast */
+#define RXPB 0x02 /* rx physical, broadcast */
+#define RXALL 0x01 /* rx all packet */
+#define RXOFF 0x00 /* rx disable */
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
new file mode 100644
index 000000000000..521c83137bf6
--- /dev/null
+++ b/drivers/net/declance.c
@@ -0,0 +1,1320 @@
+/*
+ * Lance ethernet driver for the MIPS processor based
+ * DECstation family
+ *
+ *
+ * adopted from sunlance.c by Richard van den Berg
+ *
+ * Copyright (C) 2002, 2003 Maciej W. Rozycki
+ *
+ * additional sources:
+ * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
+ * Revision 1.2
+ *
+ * History:
+ *
+ * v0.001: The kernel accepts the code and it shows the hardware address.
+ *
+ * v0.002: Removed most sparc stuff, left only some module and dma stuff.
+ *
+ * v0.003: Enhanced base address calculation from proposals by
+ * Harald Koerfgen and Thomas Riemer.
+ *
+ * v0.004: lance-regs is pointing at the right addresses, added prom
+ * check. First start of address mapping and DMA.
+ *
+ * v0.005: started to play around with LANCE-DMA. This driver will not
+ * work for non IOASIC lances. HK
+ *
+ * v0.006: added pointer arrays to lance_private and setup routine for
+ * them in dec_lance_init. HK
+ *
+ * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
+ * access the init block. This looks like one (short) word at a
+ * time, but the smallest amount the IOASIC can transfer is a
+ * (long) word. So we have a 2-2 padding here. Changed
+ * lance_init_block accordingly. The 16-16 padding for the buffers
+ * seems to be correct. HK
+ *
+ * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
+ *
+ * v0.009: Module support fixes, multiple interfaces support, various
+ * bits. macro
+ */
+
+#include <linux/config.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/machtype.h>
+#include <asm/dec/tc.h>
+#include <asm/system.h>
+
+static char version[] __devinitdata =
+"declance.c: v0.009 by Linux MIPS DECstation task force\n";
+
+MODULE_AUTHOR("Linux MIPS DECstation task force");
+MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * card types
+ */
+#define ASIC_LANCE 1
+#define PMAD_LANCE 2
+#define PMAX_LANCE 3
+
+#ifndef CONFIG_TC
+unsigned long system_base;
+unsigned long dmaptr;
+#endif
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x80 /* Who owns the entry */
+#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x20 /* FRA: Frame error */
+#define LE_R1_OFL 0x10 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x08 /* CRC error */
+#define LE_R1_BUF 0x04 /* BUF: Buffer error */
+#define LE_R1_SOP 0x02 /* Start of packet */
+#define LE_R1_EOP 0x01 /* End of packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T1_OWN 0x80 /* Lance owns the packet */
+#define LE_T1_ERR 0x40 /* Error summary */
+#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x08 /* Error: one retry needed */
+#define LE_T1_EDEF 0x04 /* Error: deferred */
+#define LE_T1_SOP 0x02 /* Start of packet */
+#define LE_T1_EOP 0x01 /* End of packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define PKT_BUF_SZ 1536
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+#undef TEST_HITS
+#define ZERO 0
+
+/* The DS2000/3000 have a linear 64 KB buffer.
+
+ * The PMAD-AA has 128 kb buffer on-board.
+ *
+ * The IOASIC LANCE devices use a shared memory region. This region as seen
+ * from the CPU is (max) 128 KB long and has to be on an 128 KB boundary.
+ * The LANCE sees this as a 64 KB long continuous memory region.
+ *
+ * The LANCE's DMA address is used as an index in this buffer and DMA takes
+ * place in bursts of eight 16-Bit words which are packed into four 32-Bit words
+ * by the IOASIC. This leads to a strange padding: 16 bytes of valid data followed
+ * by a 16 byte gap :-(.
+ */
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ short gap0;
+ unsigned char rmd1_hadr; /* high address of packet */
+ unsigned char rmd1_bits; /* descriptor bits */
+ short gap1;
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ short gap2;
+ unsigned short mblength; /* actual number of bytes received */
+ short gap3;
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ short gap0;
+ unsigned char tmd1_hadr; /* high address of packet */
+ unsigned char tmd1_bits; /* descriptor bits */
+ short gap1;
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ short gap2;
+ unsigned short misc;
+ short gap3;
+};
+
+
+/* First part of the LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* pre-set mode (reg. 15) */
+ short gap0;
+
+ unsigned char phys_addr[12]; /* physical ethernet address
+ only 0, 1, 4, 5, 8, 9 are valid
+ 2, 3, 6, 7, 10, 11 are gaps */
+ unsigned short filter[8]; /* multicast filter
+ only 0, 2, 4, 6 are valid
+ 1, 3, 5, 7 are gaps */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ short gap1;
+ unsigned short rx_len; /* receive len and high addr */
+ short gap2;
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ short gap3;
+ unsigned short tx_len; /* transmit len and high addr */
+ short gap4;
+ short gap5[8];
+
+ /* The buffer descriptors */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+};
+
+#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
+#define BUF_OFFSET_LNC (sizeof(struct lance_init_block)>>1)
+
+#define libdesc_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
+
+/*
+ * This works *only* for the ring descriptors
+ */
+#define LANCE_ADDR(x) (PHYSADDR(x) >> 1)
+
+struct lance_private {
+ struct net_device *next;
+ int type;
+ int slot;
+ int dma_irq;
+ volatile struct lance_regs *ll;
+ volatile struct lance_init_block *init_block;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ struct net_device_stats stats;
+
+ unsigned short busmaster_regval;
+
+ struct timer_list multicast_timer;
+
+ /* Pointers to the ring buffers as seen from the CPU */
+ char *rx_buf_ptr_cpu[RX_RING_SIZE];
+ char *tx_buf_ptr_cpu[TX_RING_SIZE];
+
+ /* Pointers to the ring buffers as seen from the LANCE */
+ char *rx_buf_ptr_lnc[RX_RING_SIZE];
+ char *tx_buf_ptr_lnc[TX_RING_SIZE];
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* The lance control ports are at an absolute address, machine and tc-slot
+ * dependent.
+ * DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
+ * so we have to give the structure an extra member making rap pointing
+ * at the right address
+ */
+struct lance_regs {
+ volatile unsigned short rdp; /* register data port */
+ unsigned short pad;
+ volatile unsigned short rap; /* register address port */
+};
+
+int dec_lance_debug = 2;
+
+static struct net_device *root_lance_dev;
+
+static inline void writereg(volatile unsigned short *regptr, short value)
+{
+ *regptr = value;
+ iob();
+}
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int leptr;
+
+ /* The address space as seen from the LANCE
+ * begins at address 0. HK
+ */
+ leptr = 0;
+
+ writereg(&ll->rap, LE_CSR1);
+ writereg(&ll->rdp, (leptr & 0xFFFF));
+ writereg(&ll->rap, LE_CSR2);
+ writereg(&ll->rdp, leptr >> 16);
+ writereg(&ll->rap, LE_CSR3);
+ writereg(&ll->rdp, lp->busmaster_regval);
+
+ /* Point back to csr0 */
+ writereg(&ll->rap, LE_CSR0);
+}
+
+/*
+ * Our specialized copy routines
+ *
+ */
+void cp_to_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp, *fp, clen;
+ unsigned char *rtp, *rfp;
+
+ if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = (unsigned short *) to;
+ fp = (unsigned short *) from;
+
+ while (clen--) {
+ *tp++ = *fp++;
+ tp++;
+ }
+
+ clen = len & 1;
+ rtp = (unsigned char *) tp;
+ rfp = (unsigned char *) fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = (unsigned short *) to;
+ fp = (unsigned short *) from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ tp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *) tp;
+ rfp = (unsigned char *) fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ }
+
+ iob();
+}
+
+void cp_from_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp, *fp, clen;
+ unsigned char *rtp, *rfp;
+
+ if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = (unsigned short *) to;
+ fp = (unsigned short *) from;
+ while (clen--) {
+ *tp++ = *fp++;
+ fp++;
+ }
+
+ clen = len & 1;
+
+ rtp = (unsigned char *) tp;
+ rfp = (unsigned char *) fp;
+
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = (unsigned short *) to;
+ fp = (unsigned short *) from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ fp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *) tp;
+ rfp = (unsigned char *) fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+
+
+ }
+
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib;
+ int leptr;
+ int i;
+
+ ib = (struct lance_init_block *) (dev->mem_start);
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block.
+ * XXX bit 0 of the physical address registers has to be zero
+ */
+ ib->phys_addr[0] = dev->dev_addr[0];
+ ib->phys_addr[1] = dev->dev_addr[1];
+ ib->phys_addr[4] = dev->dev_addr[2];
+ ib->phys_addr[5] = dev->dev_addr[3];
+ ib->phys_addr[8] = dev->dev_addr[4];
+ ib->phys_addr[9] = dev->dev_addr[5];
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(libdesc_offset(brx_ring, 0));
+ ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (ZERO)
+ printk("RX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(brx_ring, 0));
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(libdesc_offset(btx_ring, 0));
+ ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (ZERO)
+ printk("TX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(btx_ring, 0));
+
+ if (ZERO)
+ printk("TX rings:\n");
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = (int) lp->tx_buf_ptr_lnc[i];
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (i < 3 && ZERO)
+ printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->tx_buf_ptr_cpu[i]);
+ }
+
+ /* Setup the Rx ring entries */
+ if (ZERO)
+ printk("RX rings:\n");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = (int) lp->rx_buf_ptr_lnc[i];
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (i < 3 && ZERO)
+ printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->rx_buf_ptr_cpu[i]);
+ }
+ iob();
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_INIT);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
+ udelay(10);
+ }
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
+ return -1;
+ }
+ if ((ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
+ return -1;
+ }
+ writereg(&ll->rdp, LE_C0_IDON);
+ writereg(&ll->rdp, LE_C0_STRT);
+ writereg(&ll->rdp, LE_C0_INEA);
+
+ return 0;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib;
+ volatile struct lance_rx_desc *rd = 0;
+ unsigned char bits;
+ int len = 0;
+ struct sk_buff *skb = 0;
+ ib = (struct lance_init_block *) (dev->mem_start);
+
+#ifdef TEST_HITS
+ {
+ int i;
+
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s", ib->brx_ring[i].rmd1_bits &
+ LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s", ib->brx_ring[i].rmd1_bits &
+ LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
+ }
+#endif
+
+ for (rd = &ib->brx_ring[lp->rx_new];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ lp->stats.rx_over_errors++;
+ lp->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ lp->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ lp->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ lp->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ lp->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ lp->stats.rx_errors++;
+ } else {
+ len = (rd->mblength & 0xfff) - 4;
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb == 0) {
+ printk("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
+ return 0;
+ }
+ lp->stats.rx_bytes += len;
+
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+
+ cp_from_buf(lp->type, skb->data,
+ (char *)lp->rx_buf_ptr_cpu[lp->rx_new],
+ len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->length = -RX_BUFF_SIZE | 0xf000;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
+ }
+ return 0;
+}
+
+static void lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+ ib = (struct lance_init_block *) (dev->mem_start);
+ j = lp->tx_old;
+
+ spin_lock(&lp->lock);
+
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ lp->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ lp->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ lp->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ lp->stats.tx_carrier_errors++;
+ printk("%s: Carrier Lost\n", dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
+ lp->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ lp->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ lp->stats.collisions += 2;
+
+ lp->stats.tx_packets++;
+ }
+ j = (j + 1) & TX_RING_MOD_MASK;
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static void lance_dma_merr_int(const int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+
+ printk("%s: DMA error\n", dev->name);
+}
+
+static irqreturn_t
+lance_interrupt(const int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
+
+ writereg(&ll->rap, LE_CSR0);
+ csr0 = ll->rdp;
+
+ /* Acknowledge all the interrupt sources ASAP */
+ writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR);
+ }
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ lp->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ lp->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Memory error, status %04x\n", dev->name, csr0);
+
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ writereg(&ll->rdp, LE_C0_INEA);
+ writereg(&ll->rdp, LE_C0_INEA);
+ return IRQ_HANDLED;
+}
+
+struct net_device *last_dev = 0;
+
+static int lance_open(struct net_device *dev)
+{
+ volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status = 0;
+
+ last_dev = dev;
+
+ /* Stop the Lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ ib->mode = 0;
+ ib->filter [0] = 0;
+ ib->filter [2] = 0;
+ ib->filter [4] = 0;
+ ib->filter [6] = 0;
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ /* Associate IRQ with lance_interrupt */
+ if (request_irq(dev->irq, &lance_interrupt, 0, "lance", dev)) {
+ printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ if (request_irq(lp->dma_irq, &lance_dma_merr_int, 0,
+ "lance error", dev)) {
+ free_irq(dev->irq, dev);
+ printk("%s: Can't get DMA IRQ %d\n", dev->name,
+ lp->dma_irq);
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Enable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
+
+ fast_mb();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+ }
+
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Disable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
+
+ fast_iob();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+
+ free_irq(lp->dma_irq, dev);
+ }
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static inline int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ dev->trans_start = jiffies;
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
+ int entry, skblen, len;
+
+ skblen = skb->len;
+
+ len = skblen;
+
+ if (len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ len = ETH_ZLEN;
+ }
+
+ lp->stats.tx_bytes += len;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ ib->btx_ring[entry].length = (-len);
+ ib->btx_ring[entry].misc = 0;
+
+ cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data,
+ skblen);
+
+ /* Clear the slack of the packet, do I need this? */
+ /* For a firewall it's a good idea - AC */
+/*
+ if (len != skblen)
+ memset ((char *) &ib->tx_buf [entry][skblen], 0, (len - skblen) << 1);
+ */
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ lp->tx_new = (lp->tx_new + 1) & TX_RING_MOD_MASK;
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
+
+ spin_unlock_irq(&lp->lock);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *lance_get_stats(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+static void lance_load_multicast(struct net_device *dev)
+{
+ volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
+ volatile u16 *mcast_table = (u16 *) & ib->filter;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffff;
+ ib->filter[2] = 0xffff;
+ ib->filter[4] = 0xffff;
+ ib->filter[6] = 0xffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[2] = 0;
+ ib->filter[4] = 0;
+ ib->filter[6] = 0;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(ETH_ALEN, addrs);
+ crc = crc >> 26;
+ mcast_table[2 * (crc >> 4)] |= 1 << (crc & 0xf);
+ }
+ return;
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib;
+ volatile struct lance_regs *ll = lp->ll;
+
+ ib = (struct lance_init_block *) (dev->mem_start);
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(unsigned long _opaque)
+{
+ struct net_device *dev = (struct net_device *) _opaque;
+
+ lance_set_multicast(dev);
+}
+
+static int __init dec_lance_init(const int type, const int slot)
+{
+ static unsigned version_printed;
+ static const char fmt[] = "declance%d";
+ char name[10];
+ struct net_device *dev;
+ struct lance_private *lp;
+ volatile struct lance_regs *ll;
+ int i, ret;
+ unsigned long esar_base;
+ unsigned char *esar;
+
+#ifndef CONFIG_TC
+ system_base = KN01_LANCE_BASE;
+#endif
+
+ if (dec_lance_debug && version_printed++ == 0)
+ printk(version);
+
+ i = 0;
+ dev = root_lance_dev;
+ while (dev) {
+ i++;
+ lp = (struct lance_private *)dev->priv;
+ dev = lp->next;
+ }
+ snprintf(name, sizeof(name), fmt, i);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev) {
+ printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n",
+ name);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /*
+ * alloc_etherdev ensures the data structures used by the LANCE
+ * are aligned.
+ */
+ lp = netdev_priv(dev);
+ spin_lock_init(&lp->lock);
+
+ lp->type = type;
+ lp->slot = slot;
+ switch (type) {
+#ifdef CONFIG_TC
+ case ASIC_LANCE:
+ dev->base_addr = system_base + IOASIC_LANCE;
+
+ /* buffer space for the on-board LANCE shared memory */
+ /*
+ * FIXME: ugly hack!
+ */
+ dev->mem_start = KSEG1ADDR(0x00020000);
+ dev->mem_end = dev->mem_start + 0x00020000;
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ esar_base = system_base + IOASIC_ESAR;
+
+ /* Workaround crash with booting KN04 2.1k from Disk */
+ memset((void *)dev->mem_start, 0,
+ dev->mem_end - dev->mem_start);
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (char *)(BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ /* Setup I/O ASIC LANCE DMA. */
+ lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
+ ioasic_write(IO_REG_LANCE_DMA_P,
+ PHYSADDR(dev->mem_start) << 3);
+
+ break;
+
+ case PMAD_LANCE:
+ claim_tc_card(slot);
+
+ dev->mem_start = get_tc_base_addr(slot);
+ dev->base_addr = dev->mem_start + 0x100000;
+ dev->irq = get_tc_irq_nr(slot);
+ esar_base = dev->mem_start + 0x1c0002;
+ lp->dma_irq = -1;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (char *)(BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+#endif
+
+ case PMAX_LANCE:
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ dev->base_addr = KN01_LANCE_BASE;
+ dev->mem_start = KN01_LANCE_BASE + 0x01000000;
+ esar_base = KN01_RTC_BASE + 1;
+ lp->dma_irq = -1;
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (char *)(BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+
+ default:
+ printk(KERN_ERR "%s: declance_init called with unknown type\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_free_dev;
+ }
+
+ ll = (struct lance_regs *) dev->base_addr;
+ esar = (unsigned char *) esar_base;
+
+ /* prom checks */
+ /* First, check for test pattern */
+ if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
+ esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
+ printk(KERN_ERR
+ "%s: Ethernet station address prom not found!\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_free_dev;
+ }
+ /* Check the prom contents */
+ for (i = 0; i < 8; i++) {
+ if (esar[i * 4] != esar[0x3c - i * 4] &&
+ esar[i * 4] != esar[0x40 + i * 4] &&
+ esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
+ printk(KERN_ERR "%s: Something is wrong with the "
+ "ethernet station address prom!\n", name);
+ ret = -ENODEV;
+ goto err_out_free_dev;
+ }
+ }
+
+ /* Copy the ethernet address to the device structure, later to the
+ * lance initialization block so the lance gets it every time it's
+ * (re)initialized.
+ */
+ switch (type) {
+ case ASIC_LANCE:
+ printk("%s: IOASIC onboard LANCE, addr = ", name);
+ break;
+ case PMAD_LANCE:
+ printk("%s: PMAD-AA, addr = ", name);
+ break;
+ case PMAX_LANCE:
+ printk("%s: PMAX onboard LANCE, addr = ", name);
+ break;
+ }
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = esar[i * 4];
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ',' : ':');
+ }
+
+ printk(" irq = %d\n", dev->irq);
+
+ dev->open = &lance_open;
+ dev->stop = &lance_close;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->tx_timeout = &lance_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &lance_set_multicast;
+
+ /* lp->ll is the location of the registers for lance card */
+ lp->ll = ll;
+
+ /* busmaster_regval (CSR3) should be zero according to the PMAD-AA
+ * specification.
+ */
+ lp->busmaster_regval = 0;
+
+ dev->dma = 0;
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ init_timer(&lp->multicast_timer);
+ lp->multicast_timer.data = (unsigned long) dev;
+ lp->multicast_timer.function = &lance_set_multicast_retry;
+
+ ret = register_netdev(dev);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: Unable to register netdev, aborting.\n", name);
+ goto err_out_free_dev;
+ }
+
+ lp->next = root_lance_dev;
+ root_lance_dev = dev;
+
+ printk("%s: registered as %s.\n", name, dev->name);
+ return 0;
+
+err_out_free_dev:
+ kfree(dev);
+
+err_out:
+ return ret;
+}
+
+
+/* Find all the lance cards on the system and initialize them */
+static int __init dec_lance_probe(void)
+{
+ int count = 0;
+
+ /* Scan slots for PMAD-AA cards first. */
+#ifdef CONFIG_TC
+ if (TURBOCHANNEL) {
+ int slot;
+
+ while ((slot = search_tc_card("PMAD-AA")) >= 0) {
+ if (dec_lance_init(PMAD_LANCE, slot) < 0)
+ break;
+ count++;
+ }
+ }
+#endif
+
+ /* Then handle onboard devices. */
+ if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
+ if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
+#ifdef CONFIG_TC
+ if (dec_lance_init(ASIC_LANCE, -1) >= 0)
+ count++;
+#endif
+ } else if (!TURBOCHANNEL) {
+ if (dec_lance_init(PMAX_LANCE, -1) >= 0)
+ count++;
+ }
+ }
+
+ return (count > 0) ? 0 : -ENODEV;
+}
+
+static void __exit dec_lance_cleanup(void)
+{
+ while (root_lance_dev) {
+ struct net_device *dev = root_lance_dev;
+ struct lance_private *lp = netdev_priv(dev);
+ unregister_netdev(dev);
+#ifdef CONFIG_TC
+ if (lp->slot >= 0)
+ release_tc_card(lp->slot);
+#endif
+ root_lance_dev = lp->next;
+ free_netdev(dev);
+ }
+}
+
+module_init(dec_lance_probe);
+module_exit(dec_lance_cleanup);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
new file mode 100644
index 000000000000..a6aa56598f27
--- /dev/null
+++ b/drivers/net/defxx.c
@@ -0,0 +1,3463 @@
+/*
+ * File Name:
+ * defxx.c
+ *
+ * Copyright Information:
+ * Copyright Digital Equipment Corporation 1996.
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License, incorporated herein by reference.
+ *
+ * Abstract:
+ * A Linux device driver supporting the Digital Equipment Corporation
+ * FDDI EISA and PCI controller families. Supported adapters include:
+ *
+ * DEC FDDIcontroller/EISA (DEFEA)
+ * DEC FDDIcontroller/PCI (DEFPA)
+ *
+ * The original author:
+ * LVS Lawrence V. Stefani <lstefani@yahoo.com>
+ *
+ * Maintainers:
+ * macro Maciej W. Rozycki <macro@linux-mips.org>
+ *
+ * Credits:
+ * I'd like to thank Patricia Cross for helping me get started with
+ * Linux, David Davies for a lot of help upgrading and configuring
+ * my development system and for answering many OS and driver
+ * development questions, and Alan Cox for recommendations and
+ * integration help on getting FDDI support into Linux. LVS
+ *
+ * Driver Architecture:
+ * The driver architecture is largely based on previous driver work
+ * for other operating systems. The upper edge interface and
+ * functions were largely taken from existing Linux device drivers
+ * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
+ * driver.
+ *
+ * Adapter Probe -
+ * The driver scans for supported EISA adapters by reading the
+ * SLOT ID register for each EISA slot and making a match
+ * against the expected value.
+ *
+ * Bus-Specific Initialization -
+ * This driver currently supports both EISA and PCI controller
+ * families. While the custom DMA chip and FDDI logic is similar
+ * or identical, the bus logic is very different. After
+ * initialization, the only bus-specific differences is in how the
+ * driver enables and disables interrupts. Other than that, the
+ * run-time critical code behaves the same on both families.
+ * It's important to note that both adapter families are configured
+ * to I/O map, rather than memory map, the adapter registers.
+ *
+ * Driver Open/Close -
+ * In the driver open routine, the driver ISR (interrupt service
+ * routine) is registered and the adapter is brought to an
+ * operational state. In the driver close routine, the opposite
+ * occurs; the driver ISR is deregistered and the adapter is
+ * brought to a safe, but closed state. Users may use consecutive
+ * commands to bring the adapter up and down as in the following
+ * example:
+ * ifconfig fddi0 up
+ * ifconfig fddi0 down
+ * ifconfig fddi0 up
+ *
+ * Driver Shutdown -
+ * Apparently, there is no shutdown or halt routine support under
+ * Linux. This routine would be called during "reboot" or
+ * "shutdown" to allow the driver to place the adapter in a safe
+ * state before a warm reboot occurs. To be really safe, the user
+ * should close the adapter before shutdown (eg. ifconfig fddi0 down)
+ * to ensure that the adapter DMA engine is taken off-line. However,
+ * the current driver code anticipates this problem and always issues
+ * a soft reset of the adapter at the beginning of driver initialization.
+ * A future driver enhancement in this area may occur in 2.1.X where
+ * Alan indicated that a shutdown handler may be implemented.
+ *
+ * Interrupt Service Routine -
+ * The driver supports shared interrupts, so the ISR is registered for
+ * each board with the appropriate flag and the pointer to that board's
+ * device structure. This provides the context during interrupt
+ * processing to support shared interrupts and multiple boards.
+ *
+ * Interrupt enabling/disabling can occur at many levels. At the host
+ * end, you can disable system interrupts, or disable interrupts at the
+ * PIC (on Intel systems). Across the bus, both EISA and PCI adapters
+ * have a bus-logic chip interrupt enable/disable as well as a DMA
+ * controller interrupt enable/disable.
+ *
+ * The driver currently enables and disables adapter interrupts at the
+ * bus-logic chip and assumes that Linux will take care of clearing or
+ * acknowledging any host-based interrupt chips.
+ *
+ * Control Functions -
+ * Control functions are those used to support functions such as adding
+ * or deleting multicast addresses, enabling or disabling packet
+ * reception filters, or other custom/proprietary commands. Presently,
+ * the driver supports the "get statistics", "set multicast list", and
+ * "set mac address" functions defined by Linux. A list of possible
+ * enhancements include:
+ *
+ * - Custom ioctl interface for executing port interface commands
+ * - Custom ioctl interface for adding unicast addresses to
+ * adapter CAM (to support bridge functions).
+ * - Custom ioctl interface for supporting firmware upgrades.
+ *
+ * Hardware (port interface) Support Routines -
+ * The driver function names that start with "dfx_hw_" represent
+ * low-level port interface routines that are called frequently. They
+ * include issuing a DMA or port control command to the adapter,
+ * resetting the adapter, or reading the adapter state. Since the
+ * driver initialization and run-time code must make calls into the
+ * port interface, these routines were written to be as generic and
+ * usable as possible.
+ *
+ * Receive Path -
+ * The adapter DMA engine supports a 256 entry receive descriptor block
+ * of which up to 255 entries can be used at any given time. The
+ * architecture is a standard producer, consumer, completion model in
+ * which the driver "produces" receive buffers to the adapter, the
+ * adapter "consumes" the receive buffers by DMAing incoming packet data,
+ * and the driver "completes" the receive buffers by servicing the
+ * incoming packet, then "produces" a new buffer and starts the cycle
+ * again. Receive buffers can be fragmented in up to 16 fragments
+ * (descriptor entries). For simplicity, this driver posts
+ * single-fragment receive buffers of 4608 bytes, then allocates a
+ * sk_buff, copies the data, then reposts the buffer. To reduce CPU
+ * utilization, a better approach would be to pass up the receive
+ * buffer (no extra copy) then allocate and post a replacement buffer.
+ * This is a performance enhancement that should be looked into at
+ * some point.
+ *
+ * Transmit Path -
+ * Like the receive path, the adapter DMA engine supports a 256 entry
+ * transmit descriptor block of which up to 255 entries can be used at
+ * any given time. Transmit buffers can be fragmented in up to 255
+ * fragments (descriptor entries). This driver always posts one
+ * fragment per transmit packet request.
+ *
+ * The fragment contains the entire packet from FC to end of data.
+ * Before posting the buffer to the adapter, the driver sets a three-byte
+ * packet request header (PRH) which is required by the Motorola MAC chip
+ * used on the adapters. The PRH tells the MAC the type of token to
+ * receive/send, whether or not to generate and append the CRC, whether
+ * synchronous or asynchronous framing is used, etc. Since the PRH
+ * definition is not necessarily consistent across all FDDI chipsets,
+ * the driver, rather than the common FDDI packet handler routines,
+ * sets these bytes.
+ *
+ * To reduce the amount of descriptor fetches needed per transmit request,
+ * the driver takes advantage of the fact that there are at least three
+ * bytes available before the skb->data field on the outgoing transmit
+ * request. This is guaranteed by having fddi_setup() in net_init.c set
+ * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest
+ * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad"
+ * bytes which we'll use to store the PRH.
+ *
+ * There's a subtle advantage to adding these pad bytes to the
+ * hard_header_len, it ensures that the data portion of the packet for
+ * an 802.2 SNAP frame is longword aligned. Other FDDI driver
+ * implementations may not need the extra padding and can start copying
+ * or DMAing directly from the FC byte which starts at skb->data. Should
+ * another driver implementation need ADDITIONAL padding, the net_init.c
+ * module should be updated and dev->hard_header_len should be increased.
+ * NOTE: To maintain the alignment on the data portion of the packet,
+ * dev->hard_header_len should always be evenly divisible by 4 and at
+ * least 24 bytes in size.
+ *
+ * Modification History:
+ * Date Name Description
+ * 16-Aug-96 LVS Created.
+ * 20-Aug-96 LVS Updated dfx_probe so that version information
+ * string is only displayed if 1 or more cards are
+ * found. Changed dfx_rcv_queue_process to copy
+ * 3 NULL bytes before FC to ensure that data is
+ * longword aligned in receive buffer.
+ * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable
+ * LLC group promiscuous mode if multicast list
+ * is too large. LLC individual/group promiscuous
+ * mode is now disabled if IFF_PROMISC flag not set.
+ * dfx_xmt_queue_pkt no longer checks for NULL skb
+ * on Alan Cox recommendation. Added node address
+ * override support.
+ * 12-Sep-96 LVS Reset current address to factory address during
+ * device open. Updated transmit path to post a
+ * single fragment which includes PRH->end of data.
+ * Mar 2000 AC Did various cleanups for 2.3.x
+ * Jun 2000 jgarzik PCI and resource alloc cleanups
+ * Jul 2000 tjeerd Much cleanup and some bug fixes
+ * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup
+ * Feb 2001 Skb allocation fixes
+ * Feb 2001 davej PCI enable cleanups.
+ * 04 Aug 2003 macro Converted to the DMA API.
+ * 14 Aug 2004 macro Fix device names reported.
+ */
+
+/* Include files */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#include "defxx.h"
+
+/* Version information string should be updated prior to each new release! */
+#define DRV_NAME "defxx"
+#define DRV_VERSION "v1.07"
+#define DRV_RELDATE "2004/08/14"
+
+static char version[] __devinitdata =
+ DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
+ " Lawrence V. Stefani and others\n";
+
+#define DYNAMIC_BUFFERS 1
+
+#define SKBUFF_RX_COPYBREAK 200
+/*
+ * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
+ * alignment for compatibility with old EISA boards.
+ */
+#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
+
+/* Define module-wide (static) routines */
+
+static void dfx_bus_init(struct net_device *dev);
+static void dfx_bus_config_check(DFX_board_t *bp);
+
+static int dfx_driver_init(struct net_device *dev, const char *print_name);
+static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
+
+static int dfx_open(struct net_device *dev);
+static int dfx_close(struct net_device *dev);
+
+static void dfx_int_pr_halt_id(DFX_board_t *bp);
+static void dfx_int_type_0_process(DFX_board_t *bp);
+static void dfx_int_common(struct net_device *dev);
+static void dfx_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
+static void dfx_ctl_set_multicast_list(struct net_device *dev);
+static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
+static int dfx_ctl_update_cam(DFX_board_t *bp);
+static int dfx_ctl_update_filters(DFX_board_t *bp);
+
+static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
+static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
+static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
+static int dfx_hw_adap_state_rd(DFX_board_t *bp);
+static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
+
+static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
+static void dfx_rcv_queue_process(DFX_board_t *bp);
+static void dfx_rcv_flush(DFX_board_t *bp);
+
+static int dfx_xmt_queue_pkt(struct sk_buff *skb, struct net_device *dev);
+static int dfx_xmt_done(DFX_board_t *bp);
+static void dfx_xmt_flush(DFX_board_t *bp);
+
+/* Define module-wide (static) variables */
+
+static struct net_device *root_dfx_eisa_dev;
+
+
+/*
+ * =======================
+ * = dfx_port_write_byte =
+ * = dfx_port_read_byte =
+ * = dfx_port_write_long =
+ * = dfx_port_read_long =
+ * =======================
+ *
+ * Overview:
+ * Routines for reading and writing values from/to adapter
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ * offset - register offset from base I/O address
+ * data - for dfx_port_write_byte and dfx_port_write_long, this
+ * is a value to write.
+ * for dfx_port_read_byte and dfx_port_read_byte, this
+ * is a pointer to store the read value.
+ *
+ * Functional Description:
+ * These routines perform the correct operation to read or write
+ * the adapter register.
+ *
+ * EISA port block base addresses are based on the slot number in which the
+ * controller is installed. For example, if the EISA controller is installed
+ * in slot 4, the port block base address is 0x4000. If the controller is
+ * installed in slot 2, the port block base address is 0x2000, and so on.
+ * This port block can be used to access PDQ, ESIC, and DEFEA on-board
+ * registers using the register offsets defined in DEFXX.H.
+ *
+ * PCI port block base addresses are assigned by the PCI BIOS or system
+ * firmware. There is one 128 byte port block which can be accessed. It
+ * allows for I/O mapping of both PDQ and PFI registers using the register
+ * offsets defined in DEFXX.H.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * bp->base_addr is a valid base I/O address for this adapter.
+ * offset is a valid register offset for this adapter.
+ *
+ * Side Effects:
+ * Rather than produce macros for these functions, these routines
+ * are defined using "inline" to ensure that the compiler will
+ * generate inline code and not waste a procedure call and return.
+ * This provides all the benefits of macros, but with the
+ * advantage of strict data type checking.
+ */
+
+static inline void dfx_port_write_byte(
+ DFX_board_t *bp,
+ int offset,
+ u8 data
+ )
+
+ {
+ u16 port = bp->base_addr + offset;
+
+ outb(data, port);
+ }
+
+static inline void dfx_port_read_byte(
+ DFX_board_t *bp,
+ int offset,
+ u8 *data
+ )
+
+ {
+ u16 port = bp->base_addr + offset;
+
+ *data = inb(port);
+ }
+
+static inline void dfx_port_write_long(
+ DFX_board_t *bp,
+ int offset,
+ u32 data
+ )
+
+ {
+ u16 port = bp->base_addr + offset;
+
+ outl(data, port);
+ }
+
+static inline void dfx_port_read_long(
+ DFX_board_t *bp,
+ int offset,
+ u32 *data
+ )
+
+ {
+ u16 port = bp->base_addr + offset;
+
+ *data = inl(port);
+ }
+
+
+/*
+ * =============
+ * = dfx_init_one_pci_or_eisa =
+ * =============
+ *
+ * Overview:
+ * Initializes a supported FDDI EISA or PCI controller
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * pdev - pointer to pci device information (NULL for EISA)
+ * ioaddr - pointer to port (NULL for PCI)
+ *
+ * Functional Description:
+ *
+ * Return Codes:
+ * 0 - This device (fddi0, fddi1, etc) configured successfully
+ * -EBUSY - Failed to get resources, or dfx_driver_init failed.
+ *
+ * Assumptions:
+ * It compiles so it should work :-( (PCI cards do :-)
+ *
+ * Side Effects:
+ * Device structures for FDDI adapters (fddi0, fddi1, etc) are
+ * initialized and the board resources are read and stored in
+ * the device structure.
+ */
+static int __devinit dfx_init_one_pci_or_eisa(struct pci_dev *pdev, long ioaddr)
+{
+ static int version_disp;
+ char *print_name = DRV_NAME;
+ struct net_device *dev;
+ DFX_board_t *bp; /* board pointer */
+ int alloc_size; /* total buffer size used */
+ int err;
+
+ if (!version_disp) { /* display version info if adapter is found */
+ version_disp = 1; /* set display flag to TRUE so that */
+ printk(version); /* we only display this string ONCE */
+ }
+
+ if (pdev != NULL)
+ print_name = pci_name(pdev);
+
+ dev = alloc_fddidev(sizeof(*bp));
+ if (!dev) {
+ printk(KERN_ERR "%s: unable to allocate fddidev, aborting\n",
+ print_name);
+ return -ENOMEM;
+ }
+
+ /* Enable PCI device. */
+ if (pdev != NULL) {
+ err = pci_enable_device (pdev);
+ if (err) goto err_out;
+ ioaddr = pci_resource_start (pdev, 1);
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ bp = dev->priv;
+
+ if (!request_region(ioaddr,
+ pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN,
+ print_name)) {
+ printk(KERN_ERR "%s: Cannot reserve I/O resource "
+ "0x%x @ 0x%lx, aborting\n", print_name,
+ pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN, ioaddr);
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ /* Initialize new device structure */
+
+ dev->base_addr = ioaddr; /* save port (I/O) base address */
+
+ dev->get_stats = dfx_ctl_get_stats;
+ dev->open = dfx_open;
+ dev->stop = dfx_close;
+ dev->hard_start_xmit = dfx_xmt_queue_pkt;
+ dev->set_multicast_list = dfx_ctl_set_multicast_list;
+ dev->set_mac_address = dfx_ctl_set_mac_address;
+
+ if (pdev == NULL) {
+ /* EISA board */
+ bp->bus_type = DFX_BUS_TYPE_EISA;
+ bp->next = root_dfx_eisa_dev;
+ root_dfx_eisa_dev = dev;
+ } else {
+ /* PCI board */
+ bp->bus_type = DFX_BUS_TYPE_PCI;
+ bp->pci_dev = pdev;
+ pci_set_drvdata (pdev, dev);
+ pci_set_master (pdev);
+ }
+
+ if (dfx_driver_init(dev, print_name) != DFX_K_SUCCESS) {
+ err = -ENODEV;
+ goto err_out_region;
+ }
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out_kfree;
+
+ printk("%s: registered as %s\n", print_name, dev->name);
+ return 0;
+
+err_out_kfree:
+ alloc_size = sizeof(PI_DESCR_BLOCK) +
+ PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
+#ifndef DYNAMIC_BUFFERS
+ (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
+#endif
+ sizeof(PI_CONSUMER_BLOCK) +
+ (PI_ALIGN_K_DESC_BLK - 1);
+ if (bp->kmalloced)
+ pci_free_consistent(pdev, alloc_size,
+ bp->kmalloced, bp->kmalloced_dma);
+err_out_region:
+ release_region(ioaddr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN);
+err_out:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devinit dfx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return dfx_init_one_pci_or_eisa(pdev, 0);
+}
+
+static int __init dfx_eisa_init(void)
+{
+ int rc = -ENODEV;
+ int i; /* used in for loops */
+ u16 port; /* temporary I/O (port) address */
+ u32 slot_id; /* EISA hardware (slot) ID read from adapter */
+
+ DBG_printk("In dfx_eisa_init...\n");
+
+ /* Scan for FDDI EISA controllers */
+
+ for (i=0; i < DFX_MAX_EISA_SLOTS; i++) /* only scan for up to 16 EISA slots */
+ {
+ port = (i << 12) + PI_ESIC_K_SLOT_ID; /* port = I/O address for reading slot ID */
+ slot_id = inl(port); /* read EISA HW (slot) ID */
+ if ((slot_id & 0xF0FFFFFF) == DEFEA_PRODUCT_ID)
+ {
+ port = (i << 12); /* recalc base addr */
+
+ if (dfx_init_one_pci_or_eisa(NULL, port) == 0) rc = 0;
+ }
+ }
+ return rc;
+}
+
+/*
+ * ================
+ * = dfx_bus_init =
+ * ================
+ *
+ * Overview:
+ * Initializes EISA and PCI controller bus-specific logic.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * Determine and save adapter IRQ in device table,
+ * then perform bus-specific logic initialization.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * dev->base_addr has already been set with the proper
+ * base I/O address for this device.
+ *
+ * Side Effects:
+ * Interrupts are enabled at the adapter bus-specific logic.
+ * Note: Interrupts at the DMA engine (PDQ chip) are not
+ * enabled yet.
+ */
+
+static void __devinit dfx_bus_init(struct net_device *dev)
+{
+ DFX_board_t *bp = dev->priv;
+ u8 val; /* used for I/O read/writes */
+
+ DBG_printk("In dfx_bus_init...\n");
+
+ /*
+ * Initialize base I/O address field in bp structure
+ *
+ * Note: bp->base_addr is the same as dev->base_addr.
+ * It's useful because often we'll need to read
+ * or write registers where we already have the
+ * bp pointer instead of the dev pointer. Having
+ * the base address in the bp structure will
+ * save a pointer dereference.
+ *
+ * IMPORTANT!! This field must be defined before
+ * any of the dfx_port_* inline functions are
+ * called.
+ */
+
+ bp->base_addr = dev->base_addr;
+
+ /* And a pointer back to the net_device struct */
+ bp->dev = dev;
+
+ /* Initialize adapter based on bus type */
+
+ if (bp->bus_type == DFX_BUS_TYPE_EISA)
+ {
+ /* Get the interrupt level from the ESIC chip */
+
+ dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val);
+ switch ((val & PI_CONFIG_STAT_0_M_IRQ) >> PI_CONFIG_STAT_0_V_IRQ)
+ {
+ case PI_CONFIG_STAT_0_IRQ_K_9:
+ dev->irq = 9;
+ break;
+
+ case PI_CONFIG_STAT_0_IRQ_K_10:
+ dev->irq = 10;
+ break;
+
+ case PI_CONFIG_STAT_0_IRQ_K_11:
+ dev->irq = 11;
+ break;
+
+ case PI_CONFIG_STAT_0_IRQ_K_15:
+ dev->irq = 15;
+ break;
+ }
+
+ /* Enable access to I/O on the board by writing 0x03 to Function Control Register */
+
+ dfx_port_write_byte(bp, PI_ESIC_K_FUNCTION_CNTRL, PI_ESIC_K_FUNCTION_CNTRL_IO_ENB);
+
+ /* Set the I/O decode range of the board */
+
+ val = ((dev->base_addr >> 12) << PI_IO_CMP_V_SLOT);
+ dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_0_1, val);
+ dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_1_1, val);
+
+ /* Enable access to rest of module (including PDQ and packet memory) */
+
+ dfx_port_write_byte(bp, PI_ESIC_K_SLOT_CNTRL, PI_SLOT_CNTRL_M_ENB);
+
+ /*
+ * Map PDQ registers into I/O space. This is done by clearing a bit
+ * in Burst Holdoff register.
+ */
+
+ dfx_port_read_byte(bp, PI_ESIC_K_BURST_HOLDOFF, &val);
+ dfx_port_write_byte(bp, PI_ESIC_K_BURST_HOLDOFF, (val & ~PI_BURST_HOLDOFF_M_MEM_MAP));
+
+ /* Enable interrupts at EISA bus interface chip (ESIC) */
+
+ dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val);
+ dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, (val | PI_CONFIG_STAT_0_M_INT_ENB));
+ }
+ else
+ {
+ struct pci_dev *pdev = bp->pci_dev;
+
+ /* Get the interrupt level from the PCI Configuration Table */
+
+ dev->irq = pdev->irq;
+
+ /* Check Latency Timer and set if less than minimal */
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
+ if (val < PFI_K_LAT_TIMER_MIN) /* if less than min, override with default */
+ {
+ val = PFI_K_LAT_TIMER_DEF;
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
+ }
+
+ /* Enable interrupts at PCI bus interface chip (PFI) */
+
+ dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, (PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB));
+ }
+ }
+
+
+/*
+ * ========================
+ * = dfx_bus_config_check =
+ * ========================
+ *
+ * Overview:
+ * Checks the configuration (burst size, full-duplex, etc.) If any parameters
+ * are illegal, then this routine will set new defaults.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
+ * PDQ, and all FDDI PCI controllers, all values are legal.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * dfx_adap_init has NOT been called yet so burst size and other items have
+ * not been set.
+ *
+ * Side Effects:
+ * None
+ */
+
+static void __devinit dfx_bus_config_check(DFX_board_t *bp)
+{
+ int status; /* return code from adapter port control call */
+ u32 slot_id; /* EISA-bus hardware id (DEC3001, DEC3002,...) */
+ u32 host_data; /* LW data returned from port control call */
+
+ DBG_printk("In dfx_bus_config_check...\n");
+
+ /* Configuration check only valid for EISA adapter */
+
+ if (bp->bus_type == DFX_BUS_TYPE_EISA)
+ {
+ dfx_port_read_long(bp, PI_ESIC_K_SLOT_ID, &slot_id);
+
+ /*
+ * First check if revision 2 EISA controller. Rev. 1 cards used
+ * PDQ revision B, so no workaround needed in this case. Rev. 3
+ * cards used PDQ revision E, so no workaround needed in this
+ * case, either. Only Rev. 2 cards used either Rev. D or E
+ * chips, so we must verify the chip revision on Rev. 2 cards.
+ */
+
+ if (slot_id == DEFEA_PROD_ID_2)
+ {
+ /*
+ * Revision 2 FDDI EISA controller found, so let's check PDQ
+ * revision of adapter.
+ */
+
+ status = dfx_hw_port_ctrl_req(bp,
+ PI_PCTRL_M_SUB_CMD,
+ PI_SUB_CMD_K_PDQ_REV_GET,
+ 0,
+ &host_data);
+ if ((status != DFX_K_SUCCESS) || (host_data == 2))
+ {
+ /*
+ * Either we couldn't determine the PDQ revision, or
+ * we determined that it is at revision D. In either case,
+ * we need to implement the workaround.
+ */
+
+ /* Ensure that the burst size is set to 8 longwords or less */
+
+ switch (bp->burst_size)
+ {
+ case PI_PDATA_B_DMA_BURST_SIZE_32:
+ case PI_PDATA_B_DMA_BURST_SIZE_16:
+ bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Ensure that full-duplex mode is not enabled */
+
+ bp->full_duplex_enb = PI_SNMP_K_FALSE;
+ }
+ }
+ }
+ }
+
+
+/*
+ * ===================
+ * = dfx_driver_init =
+ * ===================
+ *
+ * Overview:
+ * Initializes remaining adapter board structure information
+ * and makes sure adapter is in a safe state prior to dfx_open().
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * dev - pointer to device information
+ * print_name - printable device name
+ *
+ * Functional Description:
+ * This function allocates additional resources such as the host memory
+ * blocks needed by the adapter (eg. descriptor and consumer blocks).
+ * Remaining bus initialization steps are also completed. The adapter
+ * is also reset so that it is in the DMA_UNAVAILABLE state. The OS
+ * must call dfx_open() to open the adapter and bring it on-line.
+ *
+ * Return Codes:
+ * DFX_K_SUCCESS - initialization succeeded
+ * DFX_K_FAILURE - initialization failed - could not allocate memory
+ * or read adapter MAC address
+ *
+ * Assumptions:
+ * Memory allocated from pci_alloc_consistent() call is physically
+ * contiguous, locked memory.
+ *
+ * Side Effects:
+ * Adapter is reset and should be in DMA_UNAVAILABLE state before
+ * returning from this routine.
+ */
+
+static int __devinit dfx_driver_init(struct net_device *dev,
+ const char *print_name)
+{
+ DFX_board_t *bp = dev->priv;
+ int alloc_size; /* total buffer size needed */
+ char *top_v, *curr_v; /* virtual addrs into memory block */
+ dma_addr_t top_p, curr_p; /* physical addrs into memory block */
+ u32 data; /* host data register value */
+
+ DBG_printk("In dfx_driver_init...\n");
+
+ /* Initialize bus-specific hardware registers */
+
+ dfx_bus_init(dev);
+
+ /*
+ * Initialize default values for configurable parameters
+ *
+ * Note: All of these parameters are ones that a user may
+ * want to customize. It'd be nice to break these
+ * out into Space.c or someplace else that's more
+ * accessible/understandable than this file.
+ */
+
+ bp->full_duplex_enb = PI_SNMP_K_FALSE;
+ bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */
+ bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
+ bp->rcv_bufs_to_post = RCV_BUFS_DEF;
+
+ /*
+ * Ensure that HW configuration is OK
+ *
+ * Note: Depending on the hardware revision, we may need to modify
+ * some of the configurable parameters to workaround hardware
+ * limitations. We'll perform this configuration check AFTER
+ * setting the parameters to their default values.
+ */
+
+ dfx_bus_config_check(bp);
+
+ /* Disable PDQ interrupts first */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
+
+ /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
+
+ (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
+
+ /* Read the factory MAC address from the adapter then save it */
+
+ if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
+ &data) != DFX_K_SUCCESS) {
+ printk("%s: Could not read adapter factory MAC address!\n",
+ print_name);
+ return(DFX_K_FAILURE);
+ }
+ memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32));
+
+ if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
+ &data) != DFX_K_SUCCESS) {
+ printk("%s: Could not read adapter factory MAC address!\n",
+ print_name);
+ return(DFX_K_FAILURE);
+ }
+ memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16));
+
+ /*
+ * Set current address to factory address
+ *
+ * Note: Node address override support is handled through
+ * dfx_ctl_set_mac_address.
+ */
+
+ memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
+ if (bp->bus_type == DFX_BUS_TYPE_EISA)
+ printk("%s: DEFEA at I/O addr = 0x%lX, IRQ = %d, "
+ "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
+ print_name, dev->base_addr, dev->irq,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5]);
+ else
+ printk("%s: DEFPA at I/O addr = 0x%lX, IRQ = %d, "
+ "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
+ print_name, dev->base_addr, dev->irq,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5]);
+
+ /*
+ * Get memory for descriptor block, consumer block, and other buffers
+ * that need to be DMA read or written to by the adapter.
+ */
+
+ alloc_size = sizeof(PI_DESCR_BLOCK) +
+ PI_CMD_REQ_K_SIZE_MAX +
+ PI_CMD_RSP_K_SIZE_MAX +
+#ifndef DYNAMIC_BUFFERS
+ (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
+#endif
+ sizeof(PI_CONSUMER_BLOCK) +
+ (PI_ALIGN_K_DESC_BLK - 1);
+ bp->kmalloced = top_v = pci_alloc_consistent(bp->pci_dev, alloc_size,
+ &bp->kmalloced_dma);
+ if (top_v == NULL) {
+ printk("%s: Could not allocate memory for host buffers "
+ "and structures!\n", print_name);
+ return(DFX_K_FAILURE);
+ }
+ memset(top_v, 0, alloc_size); /* zero out memory before continuing */
+ top_p = bp->kmalloced_dma; /* get physical address of buffer */
+
+ /*
+ * To guarantee the 8K alignment required for the descriptor block, 8K - 1
+ * plus the amount of memory needed was allocated. The physical address
+ * is now 8K aligned. By carving up the memory in a specific order,
+ * we'll guarantee the alignment requirements for all other structures.
+ *
+ * Note: If the assumptions change regarding the non-paged, non-cached,
+ * physically contiguous nature of the memory block or the address
+ * alignments, then we'll need to implement a different algorithm
+ * for allocating the needed memory.
+ */
+
+ curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
+ curr_v = top_v + (curr_p - top_p);
+
+ /* Reserve space for descriptor block */
+
+ bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
+ bp->descr_block_phys = curr_p;
+ curr_v += sizeof(PI_DESCR_BLOCK);
+ curr_p += sizeof(PI_DESCR_BLOCK);
+
+ /* Reserve space for command request buffer */
+
+ bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
+ bp->cmd_req_phys = curr_p;
+ curr_v += PI_CMD_REQ_K_SIZE_MAX;
+ curr_p += PI_CMD_REQ_K_SIZE_MAX;
+
+ /* Reserve space for command response buffer */
+
+ bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
+ bp->cmd_rsp_phys = curr_p;
+ curr_v += PI_CMD_RSP_K_SIZE_MAX;
+ curr_p += PI_CMD_RSP_K_SIZE_MAX;
+
+ /* Reserve space for the LLC host receive queue buffers */
+
+ bp->rcv_block_virt = curr_v;
+ bp->rcv_block_phys = curr_p;
+
+#ifndef DYNAMIC_BUFFERS
+ curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
+ curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
+#endif
+
+ /* Reserve space for the consumer block */
+
+ bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
+ bp->cons_block_phys = curr_p;
+
+ /* Display virtual and physical addresses if debug driver */
+
+ DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
+ print_name,
+ (long)bp->descr_block_virt, bp->descr_block_phys);
+ DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
+ print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
+ DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
+ print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
+ DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
+ print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
+ DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
+ print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
+
+ return(DFX_K_SUCCESS);
+}
+
+
+/*
+ * =================
+ * = dfx_adap_init =
+ * =================
+ *
+ * Overview:
+ * Brings the adapter to the link avail/link unavailable state.
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * bp - pointer to board information
+ * get_buffers - non-zero if buffers to be allocated
+ *
+ * Functional Description:
+ * Issues the low-level firmware/hardware calls necessary to bring
+ * the adapter up, or to properly reset and restore adapter during
+ * run-time.
+ *
+ * Return Codes:
+ * DFX_K_SUCCESS - Adapter brought up successfully
+ * DFX_K_FAILURE - Adapter initialization failed
+ *
+ * Assumptions:
+ * bp->reset_type should be set to a valid reset type value before
+ * calling this routine.
+ *
+ * Side Effects:
+ * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
+ * upon a successful return of this routine.
+ */
+
+static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
+ {
+ DBG_printk("In dfx_adap_init...\n");
+
+ /* Disable PDQ interrupts first */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
+
+ /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
+
+ if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
+ {
+ printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /*
+ * When the PDQ is reset, some false Type 0 interrupts may be pending,
+ * so we'll acknowledge all Type 0 interrupts now before continuing.
+ */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
+
+ /*
+ * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
+ *
+ * Note: We only need to clear host copies of these registers. The PDQ reset
+ * takes care of the on-board register values.
+ */
+
+ bp->cmd_req_reg.lword = 0;
+ bp->cmd_rsp_reg.lword = 0;
+ bp->rcv_xmt_reg.lword = 0;
+
+ /* Clear consumer block before going to DMA_AVAILABLE state */
+
+ memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
+
+ /* Initialize the DMA Burst Size */
+
+ if (dfx_hw_port_ctrl_req(bp,
+ PI_PCTRL_M_SUB_CMD,
+ PI_SUB_CMD_K_BURST_SIZE_SET,
+ bp->burst_size,
+ NULL) != DFX_K_SUCCESS)
+ {
+ printk("%s: Could not set adapter burst size!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /*
+ * Set base address of Consumer Block
+ *
+ * Assumption: 32-bit physical address of consumer block is 64 byte
+ * aligned. That is, bits 0-5 of the address must be zero.
+ */
+
+ if (dfx_hw_port_ctrl_req(bp,
+ PI_PCTRL_M_CONS_BLOCK,
+ bp->cons_block_phys,
+ 0,
+ NULL) != DFX_K_SUCCESS)
+ {
+ printk("%s: Could not set consumer block address!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /*
+ * Set base address of Descriptor Block and bring adapter to DMA_AVAILABLE state
+ *
+ * Note: We also set the literal and data swapping requirements in this
+ * command. Since this driver presently runs on Intel platforms
+ * which are Little Endian, we'll tell the adapter to byte swap
+ * data only. This code will need to change when we support
+ * Big Endian systems (eg. PowerPC).
+ *
+ * Assumption: 32-bit physical address of descriptor block is 8Kbyte
+ * aligned. That is, bits 0-12 of the address must be zero.
+ */
+
+ if (dfx_hw_port_ctrl_req(bp,
+ PI_PCTRL_M_INIT,
+ (u32) (bp->descr_block_phys | PI_PDATA_A_INIT_M_BSWAP_DATA),
+ 0,
+ NULL) != DFX_K_SUCCESS)
+ {
+ printk("%s: Could not set descriptor block address!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /* Set transmit flush timeout value */
+
+ bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
+ bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
+ bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */
+ bp->cmd_req_virt->char_set.item[0].item_index = 0;
+ bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
+ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
+ {
+ printk("%s: DMA command request failed!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /* Set the initial values for eFDXEnable and MACTReq MIB objects */
+
+ bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
+ bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
+ bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
+ bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
+ bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
+ bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
+ bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
+ bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
+ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
+ {
+ printk("%s: DMA command request failed!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /* Initialize adapter CAM */
+
+ if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
+ {
+ printk("%s: Adapter CAM update failed!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /* Initialize adapter filters */
+
+ if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
+ {
+ printk("%s: Adapter filters update failed!\n", bp->dev->name);
+ return(DFX_K_FAILURE);
+ }
+
+ /*
+ * Remove any existing dynamic buffers (i.e. if the adapter is being
+ * reinitialized)
+ */
+
+ if (get_buffers)
+ dfx_rcv_flush(bp);
+
+ /* Initialize receive descriptor block and produce buffers */
+
+ if (dfx_rcv_init(bp, get_buffers))
+ {
+ printk("%s: Receive buffer allocation failed\n", bp->dev->name);
+ if (get_buffers)
+ dfx_rcv_flush(bp);
+ return(DFX_K_FAILURE);
+ }
+
+ /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
+
+ bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
+ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
+ {
+ printk("%s: Start command failed\n", bp->dev->name);
+ if (get_buffers)
+ dfx_rcv_flush(bp);
+ return(DFX_K_FAILURE);
+ }
+
+ /* Initialization succeeded, reenable PDQ interrupts */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
+ return(DFX_K_SUCCESS);
+ }
+
+
+/*
+ * ============
+ * = dfx_open =
+ * ============
+ *
+ * Overview:
+ * Opens the adapter
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * This function brings the adapter to an operational state.
+ *
+ * Return Codes:
+ * 0 - Adapter was successfully opened
+ * -EAGAIN - Could not register IRQ or adapter initialization failed
+ *
+ * Assumptions:
+ * This routine should only be called for a device that was
+ * initialized successfully.
+ *
+ * Side Effects:
+ * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
+ * if the open is successful.
+ */
+
+static int dfx_open(struct net_device *dev)
+{
+ int ret;
+ DFX_board_t *bp = dev->priv;
+
+ DBG_printk("In dfx_open...\n");
+
+ /* Register IRQ - support shared interrupts by passing device ptr */
+
+ ret = request_irq(dev->irq, (void *)dfx_interrupt, SA_SHIRQ, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
+ return ret;
+ }
+
+ /*
+ * Set current address to factory MAC address
+ *
+ * Note: We've already done this step in dfx_driver_init.
+ * However, it's possible that a user has set a node
+ * address override, then closed and reopened the
+ * adapter. Unless we reset the device address field
+ * now, we'll continue to use the existing modified
+ * address.
+ */
+
+ memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
+
+ /* Clear local unicast/multicast address tables and counts */
+
+ memset(bp->uc_table, 0, sizeof(bp->uc_table));
+ memset(bp->mc_table, 0, sizeof(bp->mc_table));
+ bp->uc_count = 0;
+ bp->mc_count = 0;
+
+ /* Disable promiscuous filter settings */
+
+ bp->ind_group_prom = PI_FSTATE_K_BLOCK;
+ bp->group_prom = PI_FSTATE_K_BLOCK;
+
+ spin_lock_init(&bp->lock);
+
+ /* Reset and initialize adapter */
+
+ bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */
+ if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
+ {
+ printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Set device structure info */
+ netif_start_queue(dev);
+ return(0);
+}
+
+
+/*
+ * =============
+ * = dfx_close =
+ * =============
+ *
+ * Overview:
+ * Closes the device/module.
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * This routine closes the adapter and brings it to a safe state.
+ * The interrupt service routine is deregistered with the OS.
+ * The adapter can be opened again with another call to dfx_open().
+ *
+ * Return Codes:
+ * Always return 0.
+ *
+ * Assumptions:
+ * No further requests for this adapter are made after this routine is
+ * called. dfx_open() can be called to reset and reinitialize the
+ * adapter.
+ *
+ * Side Effects:
+ * Adapter should be in DMA_UNAVAILABLE state upon completion of this
+ * routine.
+ */
+
+static int dfx_close(struct net_device *dev)
+{
+ DFX_board_t *bp = dev->priv;
+
+ DBG_printk("In dfx_close...\n");
+
+ /* Disable PDQ interrupts first */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
+
+ /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
+
+ (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
+
+ /*
+ * Flush any pending transmit buffers
+ *
+ * Note: It's important that we flush the transmit buffers
+ * BEFORE we clear our copy of the Type 2 register.
+ * Otherwise, we'll have no idea how many buffers
+ * we need to free.
+ */
+
+ dfx_xmt_flush(bp);
+
+ /*
+ * Clear Type 1 and Type 2 registers after adapter reset
+ *
+ * Note: Even though we're closing the adapter, it's
+ * possible that an interrupt will occur after
+ * dfx_close is called. Without some assurance to
+ * the contrary we want to make sure that we don't
+ * process receive and transmit LLC frames and update
+ * the Type 2 register with bad information.
+ */
+
+ bp->cmd_req_reg.lword = 0;
+ bp->cmd_rsp_reg.lword = 0;
+ bp->rcv_xmt_reg.lword = 0;
+
+ /* Clear consumer block for the same reason given above */
+
+ memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
+
+ /* Release all dynamically allocate skb in the receive ring. */
+
+ dfx_rcv_flush(bp);
+
+ /* Clear device structure flags */
+
+ netif_stop_queue(dev);
+
+ /* Deregister (free) IRQ */
+
+ free_irq(dev->irq, dev);
+
+ return(0);
+}
+
+
+/*
+ * ======================
+ * = dfx_int_pr_halt_id =
+ * ======================
+ *
+ * Overview:
+ * Displays halt id's in string form.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * Determine current halt id and display appropriate string.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * None
+ *
+ * Side Effects:
+ * None
+ */
+
+static void dfx_int_pr_halt_id(DFX_board_t *bp)
+ {
+ PI_UINT32 port_status; /* PDQ port status register value */
+ PI_UINT32 halt_id; /* PDQ port status halt ID */
+
+ /* Read the latest port status */
+
+ dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
+
+ /* Display halt state transition information */
+
+ halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
+ switch (halt_id)
+ {
+ case PI_HALT_ID_K_SELFTEST_TIMEOUT:
+ printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_PARITY_ERROR:
+ printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_HOST_DIR_HALT:
+ printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_SW_FAULT:
+ printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_HW_FAULT:
+ printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_PC_TRACE:
+ printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_DMA_ERROR:
+ printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_IMAGE_CRC_ERROR:
+ printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
+ break;
+
+ case PI_HALT_ID_K_BUS_EXCEPTION:
+ printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
+ break;
+
+ default:
+ printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
+ break;
+ }
+ }
+
+
+/*
+ * ==========================
+ * = dfx_int_type_0_process =
+ * ==========================
+ *
+ * Overview:
+ * Processes Type 0 interrupts.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * Processes all enabled Type 0 interrupts. If the reason for the interrupt
+ * is a serious fault on the adapter, then an error message is displayed
+ * and the adapter is reset.
+ *
+ * One tricky potential timing window is the rapid succession of "link avail"
+ * "link unavail" state change interrupts. The acknowledgement of the Type 0
+ * interrupt must be done before reading the state from the Port Status
+ * register. This is true because a state change could occur after reading
+ * the data, but before acknowledging the interrupt. If this state change
+ * does happen, it would be lost because the driver is using the old state,
+ * and it will never know about the new state because it subsequently
+ * acknowledges the state change interrupt.
+ *
+ * INCORRECT CORRECT
+ * read type 0 int reasons read type 0 int reasons
+ * read adapter state ack type 0 interrupts
+ * ack type 0 interrupts read adapter state
+ * ... process interrupt ... ... process interrupt ...
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * None
+ *
+ * Side Effects:
+ * An adapter reset may occur if the adapter has any Type 0 error interrupts
+ * or if the port status indicates that the adapter is halted. The driver
+ * is responsible for reinitializing the adapter with the current CAM
+ * contents and adapter filter settings.
+ */
+
+static void dfx_int_type_0_process(DFX_board_t *bp)
+
+ {
+ PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */
+ PI_UINT32 state; /* current adap state (from port status) */
+
+ /*
+ * Read host interrupt Type 0 register to determine which Type 0
+ * interrupts are pending. Immediately write it back out to clear
+ * those interrupts.
+ */
+
+ dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
+ dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
+
+ /* Check for Type 0 error interrupts */
+
+ if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
+ PI_TYPE_0_STAT_M_PM_PAR_ERR |
+ PI_TYPE_0_STAT_M_BUS_PAR_ERR))
+ {
+ /* Check for Non-Existent Memory error */
+
+ if (type_0_status & PI_TYPE_0_STAT_M_NXM)
+ printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
+
+ /* Check for Packet Memory Parity error */
+
+ if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
+ printk("%s: Packet Memory Parity Error\n", bp->dev->name);
+
+ /* Check for Host Bus Parity error */
+
+ if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
+ printk("%s: Host Bus Parity Error\n", bp->dev->name);
+
+ /* Reset adapter and bring it back on-line */
+
+ bp->link_available = PI_K_FALSE; /* link is no longer available */
+ bp->reset_type = 0; /* rerun on-board diagnostics */
+ printk("%s: Resetting adapter...\n", bp->dev->name);
+ if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
+ {
+ printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
+ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
+ return;
+ }
+ printk("%s: Adapter reset successful!\n", bp->dev->name);
+ return;
+ }
+
+ /* Check for transmit flush interrupt */
+
+ if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
+ {
+ /* Flush any pending xmt's and acknowledge the flush interrupt */
+
+ bp->link_available = PI_K_FALSE; /* link is no longer available */
+ dfx_xmt_flush(bp); /* flush any outstanding packets */
+ (void) dfx_hw_port_ctrl_req(bp,
+ PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
+ 0,
+ 0,
+ NULL);
+ }
+
+ /* Check for adapter state change */
+
+ if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
+ {
+ /* Get latest adapter state */
+
+ state = dfx_hw_adap_state_rd(bp); /* get adapter state */
+ if (state == PI_STATE_K_HALTED)
+ {
+ /*
+ * Adapter has transitioned to HALTED state, try to reset
+ * adapter to bring it back on-line. If reset fails,
+ * leave the adapter in the broken state.
+ */
+
+ printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
+ dfx_int_pr_halt_id(bp); /* display halt id as string */
+
+ /* Reset adapter and bring it back on-line */
+
+ bp->link_available = PI_K_FALSE; /* link is no longer available */
+ bp->reset_type = 0; /* rerun on-board diagnostics */
+ printk("%s: Resetting adapter...\n", bp->dev->name);
+ if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
+ {
+ printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
+ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
+ return;
+ }
+ printk("%s: Adapter reset successful!\n", bp->dev->name);
+ }
+ else if (state == PI_STATE_K_LINK_AVAIL)
+ {
+ bp->link_available = PI_K_TRUE; /* set link available flag */
+ }
+ }
+ }
+
+
+/*
+ * ==================
+ * = dfx_int_common =
+ * ==================
+ *
+ * Overview:
+ * Interrupt service routine (ISR)
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * This is the ISR which processes incoming adapter interrupts.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * This routine assumes PDQ interrupts have not been disabled.
+ * When interrupts are disabled at the PDQ, the Port Status register
+ * is automatically cleared. This routine uses the Port Status
+ * register value to determine whether a Type 0 interrupt occurred,
+ * so it's important that adapter interrupts are not normally
+ * enabled/disabled at the PDQ.
+ *
+ * It's vital that this routine is NOT reentered for the
+ * same board and that the OS is not in another section of
+ * code (eg. dfx_xmt_queue_pkt) for the same board on a
+ * different thread.
+ *
+ * Side Effects:
+ * Pending interrupts are serviced. Depending on the type of
+ * interrupt, acknowledging and clearing the interrupt at the
+ * PDQ involves writing a register to clear the interrupt bit
+ * or updating completion indices.
+ */
+
+static void dfx_int_common(struct net_device *dev)
+{
+ DFX_board_t *bp = dev->priv;
+ PI_UINT32 port_status; /* Port Status register */
+
+ /* Process xmt interrupts - frequent case, so always call this routine */
+
+ if(dfx_xmt_done(bp)) /* free consumed xmt packets */
+ netif_wake_queue(dev);
+
+ /* Process rcv interrupts - frequent case, so always call this routine */
+
+ dfx_rcv_queue_process(bp); /* service received LLC frames */
+
+ /*
+ * Transmit and receive producer and completion indices are updated on the
+ * adapter by writing to the Type 2 Producer register. Since the frequent
+ * case is that we'll be processing either LLC transmit or receive buffers,
+ * we'll optimize I/O writes by doing a single register write here.
+ */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
+
+ /* Read PDQ Port Status register to find out which interrupts need processing */
+
+ dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
+
+ /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
+
+ if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
+ dfx_int_type_0_process(bp); /* process Type 0 interrupts */
+ }
+
+
+/*
+ * =================
+ * = dfx_interrupt =
+ * =================
+ *
+ * Overview:
+ * Interrupt processing routine
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * irq - interrupt vector
+ * dev_id - pointer to device information
+ * regs - pointer to registers structure
+ *
+ * Functional Description:
+ * This routine calls the interrupt processing routine for this adapter. It
+ * disables and reenables adapter interrupts, as appropriate. We can support
+ * shared interrupts since the incoming dev_id pointer provides our device
+ * structure context.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
+ * on Intel-based systems) is done by the operating system outside this
+ * routine.
+ *
+ * System interrupts are enabled through this call.
+ *
+ * Side Effects:
+ * Interrupts are disabled, then reenabled at the adapter.
+ */
+
+static void dfx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ struct net_device *dev = dev_id;
+ DFX_board_t *bp; /* private board structure pointer */
+ u8 tmp; /* used for disabling/enabling ints */
+
+ /* Get board pointer only if device structure is valid */
+
+ bp = dev->priv;
+
+ spin_lock(&bp->lock);
+
+ /* See if we're already servicing an interrupt */
+
+ /* Service adapter interrupts */
+
+ if (bp->bus_type == DFX_BUS_TYPE_PCI)
+ {
+ /* Disable PDQ-PFI interrupts at PFI */
+
+ dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, PFI_MODE_M_DMA_ENB);
+
+ /* Call interrupt service routine for this adapter */
+
+ dfx_int_common(dev);
+
+ /* Clear PDQ interrupt status bit and reenable interrupts */
+
+ dfx_port_write_long(bp, PFI_K_REG_STATUS, PFI_STATUS_M_PDQ_INT);
+ dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
+ (PFI_MODE_M_PDQ_INT_ENB + PFI_MODE_M_DMA_ENB));
+ }
+ else
+ {
+ /* Disable interrupts at the ESIC */
+
+ dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &tmp);
+ tmp &= ~PI_CONFIG_STAT_0_M_INT_ENB;
+ dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, tmp);
+
+ /* Call interrupt service routine for this adapter */
+
+ dfx_int_common(dev);
+
+ /* Reenable interrupts at the ESIC */
+
+ dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &tmp);
+ tmp |= PI_CONFIG_STAT_0_M_INT_ENB;
+ dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, tmp);
+ }
+
+ spin_unlock(&bp->lock);
+ }
+
+
+/*
+ * =====================
+ * = dfx_ctl_get_stats =
+ * =====================
+ *
+ * Overview:
+ * Get statistics for FDDI adapter
+ *
+ * Returns:
+ * Pointer to FDDI statistics structure
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * Gets current MIB objects from adapter, then
+ * returns FDDI statistics structure as defined
+ * in if_fddi.h.
+ *
+ * Note: Since the FDDI statistics structure is
+ * still new and the device structure doesn't
+ * have an FDDI-specific get statistics handler,
+ * we'll return the FDDI statistics structure as
+ * a pointer to an Ethernet statistics structure.
+ * That way, at least the first part of the statistics
+ * structure can be decoded properly, and it allows
+ * "smart" applications to perform a second cast to
+ * decode the FDDI-specific statistics.
+ *
+ * We'll have to pay attention to this routine as the
+ * device structure becomes more mature and LAN media
+ * independent.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * None
+ *
+ * Side Effects:
+ * None
+ */
+
+static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
+ {
+ DFX_board_t *bp = dev->priv;
+
+ /* Fill the bp->stats structure with driver-maintained counters */
+
+ bp->stats.gen.rx_packets = bp->rcv_total_frames;
+ bp->stats.gen.tx_packets = bp->xmt_total_frames;
+ bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
+ bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
+ bp->stats.gen.rx_errors = bp->rcv_crc_errors +
+ bp->rcv_frame_status_errors +
+ bp->rcv_length_errors;
+ bp->stats.gen.tx_errors = bp->xmt_length_errors;
+ bp->stats.gen.rx_dropped = bp->rcv_discards;
+ bp->stats.gen.tx_dropped = bp->xmt_discards;
+ bp->stats.gen.multicast = bp->rcv_multicast_frames;
+ bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */
+
+ /* Get FDDI SMT MIB objects */
+
+ bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
+ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
+ return((struct net_device_stats *) &bp->stats);
+
+ /* Fill the bp->stats structure with the SMT MIB object values */
+
+ memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
+ bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
+ bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
+ bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
+ memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
+ bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
+ bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
+ bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
+ bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
+ bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
+ bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
+ bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
+ bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
+ bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
+ bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
+ bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
+ bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
+ bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
+ bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
+ bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
+ bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
+ bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
+ bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
+ bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
+ bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
+ bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
+ bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
+ bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
+ bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
+ memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
+ memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
+ memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
+ memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
+ bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
+ bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
+ bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
+ memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
+ bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
+ bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
+ bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
+ bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
+ bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
+ bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
+ bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
+ bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
+ bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
+ bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
+ bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
+ bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
+ bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
+ bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
+ bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
+ bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
+ memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
+ bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
+ bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
+ bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
+ bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
+ bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
+ bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
+ bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
+ bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
+ bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
+ bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
+ memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
+ memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
+ bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
+ bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
+ bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
+ bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
+ bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
+ bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
+ bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
+ bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
+ bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
+ bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
+ bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
+ bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
+ bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
+ bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
+ bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
+ bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
+ bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
+ bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
+ bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
+ bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
+ bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
+ bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
+ bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
+ bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
+ bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
+ bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
+
+ /* Get FDDI counters */
+
+ bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
+ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
+ return((struct net_device_stats *) &bp->stats);
+
+ /* Fill the bp->stats structure with the FDDI counter values */
+
+ bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
+ bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
+ bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
+ bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
+ bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
+ bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
+ bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
+ bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
+ bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
+ bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
+ bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
+
+ return((struct net_device_stats *) &bp->stats);
+ }
+
+
+/*
+ * ==============================
+ * = dfx_ctl_set_multicast_list =
+ * ==============================
+ *
+ * Overview:
+ * Enable/Disable LLC frame promiscuous mode reception
+ * on the adapter and/or update multicast address table.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * This routine follows a fairly simple algorithm for setting the
+ * adapter filters and CAM:
+ *
+ * if IFF_PROMISC flag is set
+ * enable LLC individual/group promiscuous mode
+ * else
+ * disable LLC individual/group promiscuous mode
+ * if number of incoming multicast addresses >
+ * (CAM max size - number of unicast addresses in CAM)
+ * enable LLC group promiscuous mode
+ * set driver-maintained multicast address count to zero
+ * else
+ * disable LLC group promiscuous mode
+ * set driver-maintained multicast address count to incoming count
+ * update adapter CAM
+ * update adapter filters
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * Multicast addresses are presented in canonical (LSB) format.
+ *
+ * Side Effects:
+ * On-board adapter CAM and filters are updated.
+ */
+
+static void dfx_ctl_set_multicast_list(struct net_device *dev)
+ {
+ DFX_board_t *bp = dev->priv;
+ int i; /* used as index in for loop */
+ struct dev_mc_list *dmi; /* ptr to multicast addr entry */
+
+ /* Enable LLC frame promiscuous mode, if necessary */
+
+ if (dev->flags & IFF_PROMISC)
+ bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */
+
+ /* Else, update multicast address table */
+
+ else
+ {
+ bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */
+ /*
+ * Check whether incoming multicast address count exceeds table size
+ *
+ * Note: The adapters utilize an on-board 64 entry CAM for
+ * supporting perfect filtering of multicast packets
+ * and bridge functions when adding unicast addresses.
+ * There is no hash function available. To support
+ * additional multicast addresses, the all multicast
+ * filter (LLC group promiscuous mode) must be enabled.
+ *
+ * The firmware reserves two CAM entries for SMT-related
+ * multicast addresses, which leaves 62 entries available.
+ * The following code ensures that we're not being asked
+ * to add more than 62 addresses to the CAM. If we are,
+ * the driver will enable the all multicast filter.
+ * Should the number of multicast addresses drop below
+ * the high water mark, the filter will be disabled and
+ * perfect filtering will be used.
+ */
+
+ if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
+ {
+ bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
+ bp->mc_count = 0; /* Don't add mc addrs to CAM */
+ }
+ else
+ {
+ bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
+ bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */
+ }
+
+ /* Copy addresses to multicast address table, then update adapter CAM */
+
+ dmi = dev->mc_list; /* point to first multicast addr */
+ for (i=0; i < bp->mc_count; i++)
+ {
+ memcpy(&bp->mc_table[i*FDDI_K_ALEN], dmi->dmi_addr, FDDI_K_ALEN);
+ dmi = dmi->next; /* point to next multicast addr */
+ }
+ if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
+ {
+ DBG_printk("%s: Could not update multicast address table!\n", dev->name);
+ }
+ else
+ {
+ DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
+ }
+ }
+
+ /* Update adapter filters */
+
+ if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
+ {
+ DBG_printk("%s: Could not update adapter filters!\n", dev->name);
+ }
+ else
+ {
+ DBG_printk("%s: Adapter filters updated!\n", dev->name);
+ }
+ }
+
+
+/*
+ * ===========================
+ * = dfx_ctl_set_mac_address =
+ * ===========================
+ *
+ * Overview:
+ * Add node address override (unicast address) to adapter
+ * CAM and update dev_addr field in device table.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * dev - pointer to device information
+ * addr - pointer to sockaddr structure containing unicast address to add
+ *
+ * Functional Description:
+ * The adapter supports node address overrides by adding one or more
+ * unicast addresses to the adapter CAM. This is similar to adding
+ * multicast addresses. In this routine we'll update the driver and
+ * device structures with the new address, then update the adapter CAM
+ * to ensure that the adapter will copy and strip frames destined and
+ * sourced by that address.
+ *
+ * Return Codes:
+ * Always returns zero.
+ *
+ * Assumptions:
+ * The address pointed to by addr->sa_data is a valid unicast
+ * address and is presented in canonical (LSB) format.
+ *
+ * Side Effects:
+ * On-board adapter CAM is updated. On-board adapter filters
+ * may be updated.
+ */
+
+static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
+ {
+ DFX_board_t *bp = dev->priv;
+ struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
+
+ /* Copy unicast address to driver-maintained structs and update count */
+
+ memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
+ memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
+ bp->uc_count = 1;
+
+ /*
+ * Verify we're not exceeding the CAM size by adding unicast address
+ *
+ * Note: It's possible that before entering this routine we've
+ * already filled the CAM with 62 multicast addresses.
+ * Since we need to place the node address override into
+ * the CAM, we have to check to see that we're not
+ * exceeding the CAM size. If we are, we have to enable
+ * the LLC group (multicast) promiscuous mode filter as
+ * in dfx_ctl_set_multicast_list.
+ */
+
+ if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
+ {
+ bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
+ bp->mc_count = 0; /* Don't add mc addrs to CAM */
+
+ /* Update adapter filters */
+
+ if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
+ {
+ DBG_printk("%s: Could not update adapter filters!\n", dev->name);
+ }
+ else
+ {
+ DBG_printk("%s: Adapter filters updated!\n", dev->name);
+ }
+ }
+
+ /* Update adapter CAM with new unicast address */
+
+ if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
+ {
+ DBG_printk("%s: Could not set new MAC address!\n", dev->name);
+ }
+ else
+ {
+ DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
+ }
+ return(0); /* always return zero */
+ }
+
+
+/*
+ * ======================
+ * = dfx_ctl_update_cam =
+ * ======================
+ *
+ * Overview:
+ * Procedure to update adapter CAM (Content Addressable Memory)
+ * with desired unicast and multicast address entries.
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * Updates adapter CAM with current contents of board structure
+ * unicast and multicast address tables. Since there are only 62
+ * free entries in CAM, this routine ensures that the command
+ * request buffer is not overrun.
+ *
+ * Return Codes:
+ * DFX_K_SUCCESS - Request succeeded
+ * DFX_K_FAILURE - Request failed
+ *
+ * Assumptions:
+ * All addresses being added (unicast and multicast) are in canonical
+ * order.
+ *
+ * Side Effects:
+ * On-board adapter CAM is updated.
+ */
+
+static int dfx_ctl_update_cam(DFX_board_t *bp)
+ {
+ int i; /* used as index */
+ PI_LAN_ADDR *p_addr; /* pointer to CAM entry */
+
+ /*
+ * Fill in command request information
+ *
+ * Note: Even though both the unicast and multicast address
+ * table entries are stored as contiguous 6 byte entries,
+ * the firmware address filter set command expects each
+ * entry to be two longwords (8 bytes total). We must be
+ * careful to only copy the six bytes of each unicast and
+ * multicast table entry into each command entry. This
+ * is also why we must first clear the entire command
+ * request buffer.
+ */
+
+ memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */
+ bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
+ p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
+
+ /* Now add unicast addresses to command request buffer, if any */
+
+ for (i=0; i < (int)bp->uc_count; i++)
+ {
+ if (i < PI_CMD_ADDR_FILTER_K_SIZE)
+ {
+ memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
+ p_addr++; /* point to next command entry */
+ }
+ }
+
+ /* Now add multicast addresses to command request buffer, if any */
+
+ for (i=0; i < (int)bp->mc_count; i++)
+ {
+ if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
+ {
+ memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
+ p_addr++; /* point to next command entry */
+ }
+ }
+
+ /* Issue command to update adapter CAM, then return */
+
+ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
+ return(DFX_K_FAILURE);
+ return(DFX_K_SUCCESS);
+ }
+
+
+/*
+ * ==========================
+ * = dfx_ctl_update_filters =
+ * ==========================
+ *
+ * Overview:
+ * Procedure to update adapter filters with desired
+ * filter settings.
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * Enables or disables filter using current filter settings.
+ *
+ * Return Codes:
+ * DFX_K_SUCCESS - Request succeeded.
+ * DFX_K_FAILURE - Request failed.
+ *
+ * Assumptions:
+ * We must always pass up packets destined to the broadcast
+ * address (FF-FF-FF-FF-FF-FF), so we'll always keep the
+ * broadcast filter enabled.
+ *
+ * Side Effects:
+ * On-board adapter filters are updated.
+ */
+
+static int dfx_ctl_update_filters(DFX_board_t *bp)
+ {
+ int i = 0; /* used as index */
+
+ /* Fill in command request information */
+
+ bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
+
+ /* Initialize Broadcast filter - * ALWAYS ENABLED * */
+
+ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
+ bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
+
+ /* Initialize LLC Individual/Group Promiscuous filter */
+
+ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
+ bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
+
+ /* Initialize LLC Group Promiscuous filter */
+
+ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
+ bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
+
+ /* Terminate the item code list */
+
+ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
+
+ /* Issue command to update adapter filters, then return */
+
+ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
+ return(DFX_K_FAILURE);
+ return(DFX_K_SUCCESS);
+ }
+
+
+/*
+ * ======================
+ * = dfx_hw_dma_cmd_req =
+ * ======================
+ *
+ * Overview:
+ * Sends PDQ DMA command to adapter firmware
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * The command request and response buffers are posted to the adapter in the manner
+ * described in the PDQ Port Specification:
+ *
+ * 1. Command Response Buffer is posted to adapter.
+ * 2. Command Request Buffer is posted to adapter.
+ * 3. Command Request consumer index is polled until it indicates that request
+ * buffer has been DMA'd to adapter.
+ * 4. Command Response consumer index is polled until it indicates that response
+ * buffer has been DMA'd from adapter.
+ *
+ * This ordering ensures that a response buffer is already available for the firmware
+ * to use once it's done processing the request buffer.
+ *
+ * Return Codes:
+ * DFX_K_SUCCESS - DMA command succeeded
+ * DFX_K_OUTSTATE - Adapter is NOT in proper state
+ * DFX_K_HW_TIMEOUT - DMA command timed out
+ *
+ * Assumptions:
+ * Command request buffer has already been filled with desired DMA command.
+ *
+ * Side Effects:
+ * None
+ */
+
+static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
+ {
+ int status; /* adapter status */
+ int timeout_cnt; /* used in for loops */
+
+ /* Make sure the adapter is in a state that we can issue the DMA command in */
+
+ status = dfx_hw_adap_state_rd(bp);
+ if ((status == PI_STATE_K_RESET) ||
+ (status == PI_STATE_K_HALTED) ||
+ (status == PI_STATE_K_DMA_UNAVAIL) ||
+ (status == PI_STATE_K_UPGRADE))
+ return(DFX_K_OUTSTATE);
+
+ /* Put response buffer on the command response queue */
+
+ bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
+ ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
+ bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
+
+ /* Bump (and wrap) the producer index and write out to register */
+
+ bp->cmd_rsp_reg.index.prod += 1;
+ bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
+ dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
+
+ /* Put request buffer on the command request queue */
+
+ bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
+ PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
+ bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
+
+ /* Bump (and wrap) the producer index and write out to register */
+
+ bp->cmd_req_reg.index.prod += 1;
+ bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
+ dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
+
+ /*
+ * Here we wait for the command request consumer index to be equal
+ * to the producer, indicating that the adapter has DMAed the request.
+ */
+
+ for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
+ {
+ if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
+ break;
+ udelay(100); /* wait for 100 microseconds */
+ }
+ if (timeout_cnt == 0)
+ return(DFX_K_HW_TIMEOUT);
+
+ /* Bump (and wrap) the completion index and write out to register */
+
+ bp->cmd_req_reg.index.comp += 1;
+ bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
+ dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
+
+ /*
+ * Here we wait for the command response consumer index to be equal
+ * to the producer, indicating that the adapter has DMAed the response.
+ */
+
+ for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
+ {
+ if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
+ break;
+ udelay(100); /* wait for 100 microseconds */
+ }
+ if (timeout_cnt == 0)
+ return(DFX_K_HW_TIMEOUT);
+
+ /* Bump (and wrap) the completion index and write out to register */
+
+ bp->cmd_rsp_reg.index.comp += 1;
+ bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
+ dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
+ return(DFX_K_SUCCESS);
+ }
+
+
+/*
+ * ========================
+ * = dfx_hw_port_ctrl_req =
+ * ========================
+ *
+ * Overview:
+ * Sends PDQ port control command to adapter firmware
+ *
+ * Returns:
+ * Host data register value in host_data if ptr is not NULL
+ *
+ * Arguments:
+ * bp - pointer to board information
+ * command - port control command
+ * data_a - port data A register value
+ * data_b - port data B register value
+ * host_data - ptr to host data register value
+ *
+ * Functional Description:
+ * Send generic port control command to adapter by writing
+ * to various PDQ port registers, then polling for completion.
+ *
+ * Return Codes:
+ * DFX_K_SUCCESS - port control command succeeded
+ * DFX_K_HW_TIMEOUT - port control command timed out
+ *
+ * Assumptions:
+ * None
+ *
+ * Side Effects:
+ * None
+ */
+
+static int dfx_hw_port_ctrl_req(
+ DFX_board_t *bp,
+ PI_UINT32 command,
+ PI_UINT32 data_a,
+ PI_UINT32 data_b,
+ PI_UINT32 *host_data
+ )
+
+ {
+ PI_UINT32 port_cmd; /* Port Control command register value */
+ int timeout_cnt; /* used in for loops */
+
+ /* Set Command Error bit in command longword */
+
+ port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
+
+ /* Issue port command to the adapter */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
+ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
+ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
+
+ /* Now wait for command to complete */
+
+ if (command == PI_PCTRL_M_BLAST_FLASH)
+ timeout_cnt = 600000; /* set command timeout count to 60 seconds */
+ else
+ timeout_cnt = 20000; /* set command timeout count to 2 seconds */
+
+ for (; timeout_cnt > 0; timeout_cnt--)
+ {
+ dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
+ if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
+ break;
+ udelay(100); /* wait for 100 microseconds */
+ }
+ if (timeout_cnt == 0)
+ return(DFX_K_HW_TIMEOUT);
+
+ /*
+ * If the address of host_data is non-zero, assume caller has supplied a
+ * non NULL pointer, and return the contents of the HOST_DATA register in
+ * it.
+ */
+
+ if (host_data != NULL)
+ dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
+ return(DFX_K_SUCCESS);
+ }
+
+
+/*
+ * =====================
+ * = dfx_hw_adap_reset =
+ * =====================
+ *
+ * Overview:
+ * Resets adapter
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ * type - type of reset to perform
+ *
+ * Functional Description:
+ * Issue soft reset to adapter by writing to PDQ Port Reset
+ * register. Use incoming reset type to tell adapter what
+ * kind of reset operation to perform.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * This routine merely issues a soft reset to the adapter.
+ * It is expected that after this routine returns, the caller
+ * will appropriately poll the Port Status register for the
+ * adapter to enter the proper state.
+ *
+ * Side Effects:
+ * Internal adapter registers are cleared.
+ */
+
+static void dfx_hw_adap_reset(
+ DFX_board_t *bp,
+ PI_UINT32 type
+ )
+
+ {
+ /* Set Reset type and assert reset */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */
+ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
+
+ /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
+
+ udelay(20);
+
+ /* Deassert reset */
+
+ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
+ }
+
+
+/*
+ * ========================
+ * = dfx_hw_adap_state_rd =
+ * ========================
+ *
+ * Overview:
+ * Returns current adapter state
+ *
+ * Returns:
+ * Adapter state per PDQ Port Specification
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * Reads PDQ Port Status register and returns adapter state.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * None
+ *
+ * Side Effects:
+ * None
+ */
+
+static int dfx_hw_adap_state_rd(DFX_board_t *bp)
+ {
+ PI_UINT32 port_status; /* Port Status register value */
+
+ dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
+ return((port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE);
+ }
+
+
+/*
+ * =====================
+ * = dfx_hw_dma_uninit =
+ * =====================
+ *
+ * Overview:
+ * Brings adapter to DMA_UNAVAILABLE state
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * bp - pointer to board information
+ * type - type of reset to perform
+ *
+ * Functional Description:
+ * Bring adapter to DMA_UNAVAILABLE state by performing the following:
+ * 1. Set reset type bit in Port Data A Register then reset adapter.
+ * 2. Check that adapter is in DMA_UNAVAILABLE state.
+ *
+ * Return Codes:
+ * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state
+ * DFX_K_HW_TIMEOUT - adapter did not reset properly
+ *
+ * Assumptions:
+ * None
+ *
+ * Side Effects:
+ * Internal adapter registers are cleared.
+ */
+
+static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
+ {
+ int timeout_cnt; /* used in for loops */
+
+ /* Set reset type bit and reset adapter */
+
+ dfx_hw_adap_reset(bp, type);
+
+ /* Now wait for adapter to enter DMA_UNAVAILABLE state */
+
+ for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
+ {
+ if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
+ break;
+ udelay(100); /* wait for 100 microseconds */
+ }
+ if (timeout_cnt == 0)
+ return(DFX_K_HW_TIMEOUT);
+ return(DFX_K_SUCCESS);
+ }
+
+/*
+ * Align an sk_buff to a boundary power of 2
+ *
+ */
+
+static void my_skb_align(struct sk_buff *skb, int n)
+{
+ unsigned long x = (unsigned long)skb->data;
+ unsigned long v;
+
+ v = ALIGN(x, n); /* Where we want to be */
+
+ skb_reserve(skb, v - x);
+}
+
+
+/*
+ * ================
+ * = dfx_rcv_init =
+ * ================
+ *
+ * Overview:
+ * Produces buffers to adapter LLC Host receive descriptor block
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ * get_buffers - non-zero if buffers to be allocated
+ *
+ * Functional Description:
+ * This routine can be called during dfx_adap_init() or during an adapter
+ * reset. It initializes the descriptor block and produces all allocated
+ * LLC Host queue receive buffers.
+ *
+ * Return Codes:
+ * Return 0 on success or -ENOMEM if buffer allocation failed (when using
+ * dynamic buffer allocation). If the buffer allocation failed, the
+ * already allocated buffers will not be released and the caller should do
+ * this.
+ *
+ * Assumptions:
+ * The PDQ has been reset and the adapter and driver maintained Type 2
+ * register indices are cleared.
+ *
+ * Side Effects:
+ * Receive buffers are posted to the adapter LLC queue and the adapter
+ * is notified.
+ */
+
+static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
+ {
+ int i, j; /* used in for loop */
+
+ /*
+ * Since each receive buffer is a single fragment of same length, initialize
+ * first longword in each receive descriptor for entire LLC Host descriptor
+ * block. Also initialize second longword in each receive descriptor with
+ * physical address of receive buffer. We'll always allocate receive
+ * buffers in powers of 2 so that we can easily fill the 256 entry descriptor
+ * block and produce new receive buffers by simply updating the receive
+ * producer index.
+ *
+ * Assumptions:
+ * To support all shipping versions of PDQ, the receive buffer size
+ * must be mod 128 in length and the physical address must be 128 byte
+ * aligned. In other words, bits 0-6 of the length and address must
+ * be zero for the following descriptor field entries to be correct on
+ * all PDQ-based boards. We guaranteed both requirements during
+ * driver initialization when we allocated memory for the receive buffers.
+ */
+
+ if (get_buffers) {
+#ifdef DYNAMIC_BUFFERS
+ for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
+ for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
+ {
+ struct sk_buff *newskb = __dev_alloc_skb(NEW_SKB_SIZE, GFP_NOIO);
+ if (!newskb)
+ return -ENOMEM;
+ bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
+ ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
+ /*
+ * align to 128 bytes for compatibility with
+ * the old EISA boards.
+ */
+
+ my_skb_align(newskb, 128);
+ bp->descr_block_virt->rcv_data[i + j].long_1 =
+ (u32)pci_map_single(bp->pci_dev, newskb->data,
+ NEW_SKB_SIZE,
+ PCI_DMA_FROMDEVICE);
+ /*
+ * p_rcv_buff_va is only used inside the
+ * kernel so we put the skb pointer here.
+ */
+ bp->p_rcv_buff_va[i+j] = (char *) newskb;
+ }
+#else
+ for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
+ for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
+ {
+ bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
+ ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
+ bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
+ bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
+ }
+#endif
+ }
+
+ /* Update receive producer and Type 2 register */
+
+ bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
+ dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
+ return 0;
+ }
+
+
+/*
+ * =========================
+ * = dfx_rcv_queue_process =
+ * =========================
+ *
+ * Overview:
+ * Process received LLC frames.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * Received LLC frames are processed until there are no more consumed frames.
+ * Once all frames are processed, the receive buffers are returned to the
+ * adapter. Note that this algorithm fixes the length of time that can be spent
+ * in this routine, because there are a fixed number of receive buffers to
+ * process and buffers are not produced until this routine exits and returns
+ * to the ISR.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * None
+ *
+ * Side Effects:
+ * None
+ */
+
+static void dfx_rcv_queue_process(
+ DFX_board_t *bp
+ )
+
+ {
+ PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
+ char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
+ u32 descr, pkt_len; /* FMC descriptor field and packet length */
+ struct sk_buff *skb; /* pointer to a sk_buff to hold incoming packet data */
+
+ /* Service all consumed LLC receive frames */
+
+ p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
+ while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
+ {
+ /* Process any errors */
+
+ int entry;
+
+ entry = bp->rcv_xmt_reg.index.rcv_comp;
+#ifdef DYNAMIC_BUFFERS
+ p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
+#else
+ p_buff = (char *) bp->p_rcv_buff_va[entry];
+#endif
+ memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
+
+ if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
+ {
+ if (descr & PI_FMC_DESCR_M_RCC_CRC)
+ bp->rcv_crc_errors++;
+ else
+ bp->rcv_frame_status_errors++;
+ }
+ else
+ {
+ int rx_in_place = 0;
+
+ /* The frame was received without errors - verify packet length */
+
+ pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
+ pkt_len -= 4; /* subtract 4 byte CRC */
+ if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
+ bp->rcv_length_errors++;
+ else{
+#ifdef DYNAMIC_BUFFERS
+ if (pkt_len > SKBUFF_RX_COPYBREAK) {
+ struct sk_buff *newskb;
+
+ newskb = dev_alloc_skb(NEW_SKB_SIZE);
+ if (newskb){
+ rx_in_place = 1;
+
+ my_skb_align(newskb, 128);
+ skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
+ pci_unmap_single(bp->pci_dev,
+ bp->descr_block_virt->rcv_data[entry].long_1,
+ NEW_SKB_SIZE,
+ PCI_DMA_FROMDEVICE);
+ skb_reserve(skb, RCV_BUFF_K_PADDING);
+ bp->p_rcv_buff_va[entry] = (char *)newskb;
+ bp->descr_block_virt->rcv_data[entry].long_1 =
+ (u32)pci_map_single(bp->pci_dev,
+ newskb->data,
+ NEW_SKB_SIZE,
+ PCI_DMA_FROMDEVICE);
+ } else
+ skb = NULL;
+ } else
+#endif
+ skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */
+ if (skb == NULL)
+ {
+ printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
+ bp->rcv_discards++;
+ break;
+ }
+ else {
+#ifndef DYNAMIC_BUFFERS
+ if (! rx_in_place)
+#endif
+ {
+ /* Receive buffer allocated, pass receive packet up */
+
+ memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3);
+ }
+
+ skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
+ skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
+ skb->dev = bp->dev; /* pass up device pointer */
+
+ skb->protocol = fddi_type_trans(skb, bp->dev);
+ bp->rcv_total_bytes += skb->len;
+ netif_rx(skb);
+
+ /* Update the rcv counters */
+ bp->dev->last_rx = jiffies;
+ bp->rcv_total_frames++;
+ if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
+ bp->rcv_multicast_frames++;
+ }
+ }
+ }
+
+ /*
+ * Advance the producer (for recycling) and advance the completion
+ * (for servicing received frames). Note that it is okay to
+ * advance the producer without checking that it passes the
+ * completion index because they are both advanced at the same
+ * rate.
+ */
+
+ bp->rcv_xmt_reg.index.rcv_prod += 1;
+ bp->rcv_xmt_reg.index.rcv_comp += 1;
+ }
+ }
+
+
+/*
+ * =====================
+ * = dfx_xmt_queue_pkt =
+ * =====================
+ *
+ * Overview:
+ * Queues packets for transmission
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * skb - pointer to sk_buff to queue for transmission
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * Here we assume that an incoming skb transmit request
+ * is contained in a single physically contiguous buffer
+ * in which the virtual address of the start of packet
+ * (skb->data) can be converted to a physical address
+ * by using pci_map_single().
+ *
+ * Since the adapter architecture requires a three byte
+ * packet request header to prepend the start of packet,
+ * we'll write the three byte field immediately prior to
+ * the FC byte. This assumption is valid because we've
+ * ensured that dev->hard_header_len includes three pad
+ * bytes. By posting a single fragment to the adapter,
+ * we'll reduce the number of descriptor fetches and
+ * bus traffic needed to send the request.
+ *
+ * Also, we can't free the skb until after it's been DMA'd
+ * out by the adapter, so we'll queue it in the driver and
+ * return it in dfx_xmt_done.
+ *
+ * Return Codes:
+ * 0 - driver queued packet, link is unavailable, or skbuff was bad
+ * 1 - caller should requeue the sk_buff for later transmission
+ *
+ * Assumptions:
+ * First and foremost, we assume the incoming skb pointer
+ * is NOT NULL and is pointing to a valid sk_buff structure.
+ *
+ * The outgoing packet is complete, starting with the
+ * frame control byte including the last byte of data,
+ * but NOT including the 4 byte CRC. We'll let the
+ * adapter hardware generate and append the CRC.
+ *
+ * The entire packet is stored in one physically
+ * contiguous buffer which is not cached and whose
+ * 32-bit physical address can be determined.
+ *
+ * It's vital that this routine is NOT reentered for the
+ * same board and that the OS is not in another section of
+ * code (eg. dfx_int_common) for the same board on a
+ * different thread.
+ *
+ * Side Effects:
+ * None
+ */
+
+static int dfx_xmt_queue_pkt(
+ struct sk_buff *skb,
+ struct net_device *dev
+ )
+
+ {
+ DFX_board_t *bp = dev->priv;
+ u8 prod; /* local transmit producer index */
+ PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
+ XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ /*
+ * Verify that incoming transmit request is OK
+ *
+ * Note: The packet size check is consistent with other
+ * Linux device drivers, although the correct packet
+ * size should be verified before calling the
+ * transmit routine.
+ */
+
+ if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
+ {
+ printk("%s: Invalid packet length - %u bytes\n",
+ dev->name, skb->len);
+ bp->xmt_length_errors++; /* bump error counter */
+ netif_wake_queue(dev);
+ dev_kfree_skb(skb);
+ return(0); /* return "success" */
+ }
+ /*
+ * See if adapter link is available, if not, free buffer
+ *
+ * Note: If the link isn't available, free buffer and return 0
+ * rather than tell the upper layer to requeue the packet.
+ * The methodology here is that by the time the link
+ * becomes available, the packet to be sent will be
+ * fairly stale. By simply dropping the packet, the
+ * higher layer protocols will eventually time out
+ * waiting for response packets which it won't receive.
+ */
+
+ if (bp->link_available == PI_K_FALSE)
+ {
+ if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */
+ bp->link_available = PI_K_TRUE; /* if so, set flag and continue */
+ else
+ {
+ bp->xmt_discards++; /* bump error counter */
+ dev_kfree_skb(skb); /* free sk_buff now */
+ netif_wake_queue(dev);
+ return(0); /* return "success" */
+ }
+ }
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ /* Get the current producer and the next free xmt data descriptor */
+
+ prod = bp->rcv_xmt_reg.index.xmt_prod;
+ p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
+
+ /*
+ * Get pointer to auxiliary queue entry to contain information
+ * for this packet.
+ *
+ * Note: The current xmt producer index will become the
+ * current xmt completion index when we complete this
+ * packet later on. So, we'll get the pointer to the
+ * next auxiliary queue entry now before we bump the
+ * producer index.
+ */
+
+ p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
+
+ /* Write the three PRH bytes immediately before the FC byte */
+
+ skb_push(skb,3);
+ skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
+ skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
+ skb->data[2] = DFX_PRH2_BYTE; /* specification */
+
+ /*
+ * Write the descriptor with buffer info and bump producer
+ *
+ * Note: Since we need to start DMA from the packet request
+ * header, we'll add 3 bytes to the DMA buffer length,
+ * and we'll determine the physical address of the
+ * buffer from the PRH, not skb->data.
+ *
+ * Assumptions:
+ * 1. Packet starts with the frame control (FC) byte
+ * at skb->data.
+ * 2. The 4-byte CRC is not appended to the buffer or
+ * included in the length.
+ * 3. Packet length (skb->len) is from FC to end of
+ * data, inclusive.
+ * 4. The packet length does not exceed the maximum
+ * FDDI LLC frame length of 4491 bytes.
+ * 5. The entire packet is contained in a physically
+ * contiguous, non-cached, locked memory space
+ * comprised of a single buffer pointed to by
+ * skb->data.
+ * 6. The physical address of the start of packet
+ * can be determined from the virtual address
+ * by using pci_map_single() and is only 32-bits
+ * wide.
+ */
+
+ p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
+ p_xmt_descr->long_1 = (u32)pci_map_single(bp->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+
+ /*
+ * Verify that descriptor is actually available
+ *
+ * Note: If descriptor isn't available, return 1 which tells
+ * the upper layer to requeue the packet for later
+ * transmission.
+ *
+ * We need to ensure that the producer never reaches the
+ * completion, except to indicate that the queue is empty.
+ */
+
+ if (prod == bp->rcv_xmt_reg.index.xmt_comp)
+ {
+ skb_pull(skb,3);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return(1); /* requeue packet for later */
+ }
+
+ /*
+ * Save info for this packet for xmt done indication routine
+ *
+ * Normally, we'd save the producer index in the p_xmt_drv_descr
+ * structure so that we'd have it handy when we complete this
+ * packet later (in dfx_xmt_done). However, since the current
+ * transmit architecture guarantees a single fragment for the
+ * entire packet, we can simply bump the completion index by
+ * one (1) for each completed packet.
+ *
+ * Note: If this assumption changes and we're presented with
+ * an inconsistent number of transmit fragments for packet
+ * data, we'll need to modify this code to save the current
+ * transmit producer index.
+ */
+
+ p_xmt_drv_descr->p_skb = skb;
+
+ /* Update Type 2 register */
+
+ bp->rcv_xmt_reg.index.xmt_prod = prod;
+ dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ netif_wake_queue(dev);
+ return(0); /* packet queued to adapter */
+ }
+
+
+/*
+ * ================
+ * = dfx_xmt_done =
+ * ================
+ *
+ * Overview:
+ * Processes all frames that have been transmitted.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * For all consumed transmit descriptors that have not
+ * yet been completed, we'll free the skb we were holding
+ * onto using dev_kfree_skb and bump the appropriate
+ * counters.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * The Type 2 register is not updated in this routine. It is
+ * assumed that it will be updated in the ISR when dfx_xmt_done
+ * returns.
+ *
+ * Side Effects:
+ * None
+ */
+
+static int dfx_xmt_done(DFX_board_t *bp)
+ {
+ XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
+ PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
+ u8 comp; /* local transmit completion index */
+ int freed = 0; /* buffers freed */
+
+ /* Service all consumed transmit frames */
+
+ p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
+ while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
+ {
+ /* Get pointer to the transmit driver descriptor block information */
+
+ p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
+
+ /* Increment transmit counters */
+
+ bp->xmt_total_frames++;
+ bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
+
+ /* Return skb to operating system */
+ comp = bp->rcv_xmt_reg.index.xmt_comp;
+ pci_unmap_single(bp->pci_dev,
+ bp->descr_block_virt->xmt_data[comp].long_1,
+ p_xmt_drv_descr->p_skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
+
+ /*
+ * Move to start of next packet by updating completion index
+ *
+ * Here we assume that a transmit packet request is always
+ * serviced by posting one fragment. We can therefore
+ * simplify the completion code by incrementing the
+ * completion index by one. This code will need to be
+ * modified if this assumption changes. See comments
+ * in dfx_xmt_queue_pkt for more details.
+ */
+
+ bp->rcv_xmt_reg.index.xmt_comp += 1;
+ freed++;
+ }
+ return freed;
+ }
+
+
+/*
+ * =================
+ * = dfx_rcv_flush =
+ * =================
+ *
+ * Overview:
+ * Remove all skb's in the receive ring.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * Free's all the dynamically allocated skb's that are
+ * currently attached to the device receive ring. This
+ * function is typically only used when the device is
+ * initialized or reinitialized.
+ *
+ * Return Codes:
+ * None
+ *
+ * Side Effects:
+ * None
+ */
+#ifdef DYNAMIC_BUFFERS
+static void dfx_rcv_flush( DFX_board_t *bp )
+ {
+ int i, j;
+
+ for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
+ for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
+ {
+ struct sk_buff *skb;
+ skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
+ if (skb)
+ dev_kfree_skb(skb);
+ bp->p_rcv_buff_va[i+j] = NULL;
+ }
+
+ }
+#else
+static inline void dfx_rcv_flush( DFX_board_t *bp )
+{
+}
+#endif /* DYNAMIC_BUFFERS */
+
+/*
+ * =================
+ * = dfx_xmt_flush =
+ * =================
+ *
+ * Overview:
+ * Processes all frames whether they've been transmitted
+ * or not.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * bp - pointer to board information
+ *
+ * Functional Description:
+ * For all produced transmit descriptors that have not
+ * yet been completed, we'll free the skb we were holding
+ * onto using dev_kfree_skb and bump the appropriate
+ * counters. Of course, it's possible that some of
+ * these transmit requests actually did go out, but we
+ * won't make that distinction here. Finally, we'll
+ * update the consumer index to match the producer.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * This routine does NOT update the Type 2 register. It
+ * is assumed that this routine is being called during a
+ * transmit flush interrupt, or a shutdown or close routine.
+ *
+ * Side Effects:
+ * None
+ */
+
+static void dfx_xmt_flush( DFX_board_t *bp )
+ {
+ u32 prod_cons; /* rcv/xmt consumer block longword */
+ XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
+ u8 comp; /* local transmit completion index */
+
+ /* Flush all outstanding transmit frames */
+
+ while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
+ {
+ /* Get pointer to the transmit driver descriptor block information */
+
+ p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
+
+ /* Return skb to operating system */
+ comp = bp->rcv_xmt_reg.index.xmt_comp;
+ pci_unmap_single(bp->pci_dev,
+ bp->descr_block_virt->xmt_data[comp].long_1,
+ p_xmt_drv_descr->p_skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(p_xmt_drv_descr->p_skb);
+
+ /* Increment transmit error counter */
+
+ bp->xmt_discards++;
+
+ /*
+ * Move to start of next packet by updating completion index
+ *
+ * Here we assume that a transmit packet request is always
+ * serviced by posting one fragment. We can therefore
+ * simplify the completion code by incrementing the
+ * completion index by one. This code will need to be
+ * modified if this assumption changes. See comments
+ * in dfx_xmt_queue_pkt for more details.
+ */
+
+ bp->rcv_xmt_reg.index.xmt_comp += 1;
+ }
+
+ /* Update the transmit consumer index in the consumer block */
+
+ prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
+ prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
+ bp->cons_block_virt->xmt_rcv_data = prod_cons;
+ }
+
+static void __devexit dfx_remove_one_pci_or_eisa(struct pci_dev *pdev, struct net_device *dev)
+{
+ DFX_board_t *bp = dev->priv;
+ int alloc_size; /* total buffer size used */
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN );
+
+ alloc_size = sizeof(PI_DESCR_BLOCK) +
+ PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
+#ifndef DYNAMIC_BUFFERS
+ (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
+#endif
+ sizeof(PI_CONSUMER_BLOCK) +
+ (PI_ALIGN_K_DESC_BLK - 1);
+ if (bp->kmalloced)
+ pci_free_consistent(pdev, alloc_size, bp->kmalloced,
+ bp->kmalloced_dma);
+ free_netdev(dev);
+}
+
+static void __devexit dfx_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ dfx_remove_one_pci_or_eisa(pdev, dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_device_id dfx_pci_tbl[] = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, dfx_pci_tbl);
+
+static struct pci_driver dfx_driver = {
+ .name = "defxx",
+ .probe = dfx_init_one,
+ .remove = __devexit_p(dfx_remove_one),
+ .id_table = dfx_pci_tbl,
+};
+
+static int dfx_have_pci;
+static int dfx_have_eisa;
+
+
+static void __exit dfx_eisa_cleanup(void)
+{
+ struct net_device *dev = root_dfx_eisa_dev;
+
+ while (dev)
+ {
+ struct net_device *tmp;
+ DFX_board_t *bp;
+
+ bp = (DFX_board_t*)dev->priv;
+ tmp = bp->next;
+ dfx_remove_one_pci_or_eisa(NULL, dev);
+ dev = tmp;
+ }
+}
+
+static int __init dfx_init(void)
+{
+ int rc_pci, rc_eisa;
+
+ rc_pci = pci_module_init(&dfx_driver);
+ if (rc_pci >= 0) dfx_have_pci = 1;
+
+ rc_eisa = dfx_eisa_init();
+ if (rc_eisa >= 0) dfx_have_eisa = 1;
+
+ return ((rc_eisa < 0) ? 0 : rc_eisa) + ((rc_pci < 0) ? 0 : rc_pci);
+}
+
+static void __exit dfx_cleanup(void)
+{
+ if (dfx_have_pci)
+ pci_unregister_driver(&dfx_driver);
+ if (dfx_have_eisa)
+ dfx_eisa_cleanup();
+
+}
+
+module_init(dfx_init);
+module_exit(dfx_cleanup);
+MODULE_AUTHOR("Lawrence V. Stefani");
+MODULE_DESCRIPTION("DEC FDDIcontroller EISA/PCI (DEFEA/DEFPA) driver "
+ DRV_VERSION " " DRV_RELDATE);
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -I/root/linux/include -Wall -Wstrict-prototypes -O2 -pipe -fomit-frame-pointer -fno-strength-reduce -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -c defxx.c"
+ * End:
+ */
diff --git a/drivers/net/defxx.h b/drivers/net/defxx.h
new file mode 100644
index 000000000000..a480b80d2f9c
--- /dev/null
+++ b/drivers/net/defxx.h
@@ -0,0 +1,1778 @@
+/*
+ * File Name:
+ * defxx.h
+ *
+ * Copyright Information:
+ * Copyright Digital Equipment Corporation 1996.
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License, incorporated herein by reference.
+ *
+ * Abstract:
+ * Contains all definitions specified by port specification and required
+ * by the defxx.c driver.
+ *
+ * The original author:
+ * LVS Lawrence V. Stefani <lstefani@yahoo.com>
+ *
+ * Maintainers:
+ * macro Maciej W. Rozycki <macro@linux-mips.org>
+ *
+ * Modification History:
+ * Date Name Description
+ * 16-Aug-96 LVS Created.
+ * 09-Sep-96 LVS Added group_prom field. Moved read/write I/O
+ * macros to DEFXX.C.
+ * 12-Sep-96 LVS Removed packet request header pointers.
+ * 04 Aug 2003 macro Converted to the DMA API.
+ */
+
+#ifndef _DEFXX_H_
+#define _DEFXX_H_
+
+/* Define basic types for unsigned chars, shorts, longs */
+
+typedef u8 PI_UINT8;
+typedef u16 PI_UINT16;
+typedef u32 PI_UINT32;
+
+/* Define general structures */
+
+typedef struct /* 64-bit counter */
+ {
+ PI_UINT32 ms;
+ PI_UINT32 ls;
+ } PI_CNTR;
+
+typedef struct /* LAN address */
+ {
+ PI_UINT32 lwrd_0;
+ PI_UINT32 lwrd_1;
+ } PI_LAN_ADDR;
+
+typedef struct /* Station ID address */
+ {
+ PI_UINT32 octet_7_4;
+ PI_UINT32 octet_3_0;
+ } PI_STATION_ID;
+
+
+/* Define general constants */
+
+#define PI_ALIGN_K_DESC_BLK 8192 /* Descriptor block boundary */
+#define PI_ALIGN_K_CONS_BLK 64 /* Consumer block boundary */
+#define PI_ALIGN_K_CMD_REQ_BUFF 128 /* Xmt Command que buffer alignment */
+#define PI_ALIGN_K_CMD_RSP_BUFF 128 /* Rcv Command que buffer alignment */
+#define PI_ALIGN_K_UNSOL_BUFF 128 /* Unsol que buffer alignment */
+#define PI_ALIGN_K_XMT_DATA_BUFF 0 /* Xmt data que buffer alignment */
+#define PI_ALIGN_K_RCV_DATA_BUFF 128 /* Rcv que buffer alignment */
+
+/* Define PHY index values */
+
+#define PI_PHY_K_S 0 /* Index to S phy */
+#define PI_PHY_K_A 0 /* Index to A phy */
+#define PI_PHY_K_B 1 /* Index to B phy */
+#define PI_PHY_K_MAX 2 /* Max number of phys */
+
+/* Define FMC descriptor fields */
+
+#define PI_FMC_DESCR_V_SOP 31
+#define PI_FMC_DESCR_V_EOP 30
+#define PI_FMC_DESCR_V_FSC 27
+#define PI_FMC_DESCR_V_FSB_ERROR 26
+#define PI_FMC_DESCR_V_FSB_ADDR_RECOG 25
+#define PI_FMC_DESCR_V_FSB_ADDR_COPIED 24
+#define PI_FMC_DESCR_V_FSB 22
+#define PI_FMC_DESCR_V_RCC_FLUSH 21
+#define PI_FMC_DESCR_V_RCC_CRC 20
+#define PI_FMC_DESCR_V_RCC_RRR 17
+#define PI_FMC_DESCR_V_RCC_DD 15
+#define PI_FMC_DESCR_V_RCC_SS 13
+#define PI_FMC_DESCR_V_RCC 13
+#define PI_FMC_DESCR_V_LEN 0
+
+#define PI_FMC_DESCR_M_SOP 0x80000000
+#define PI_FMC_DESCR_M_EOP 0x40000000
+#define PI_FMC_DESCR_M_FSC 0x38000000
+#define PI_FMC_DESCR_M_FSB_ERROR 0x04000000
+#define PI_FMC_DESCR_M_FSB_ADDR_RECOG 0x02000000
+#define PI_FMC_DESCR_M_FSB_ADDR_COPIED 0x01000000
+#define PI_FMC_DESCR_M_FSB 0x07C00000
+#define PI_FMC_DESCR_M_RCC_FLUSH 0x00200000
+#define PI_FMC_DESCR_M_RCC_CRC 0x00100000
+#define PI_FMC_DESCR_M_RCC_RRR 0x000E0000
+#define PI_FMC_DESCR_M_RCC_DD 0x00018000
+#define PI_FMC_DESCR_M_RCC_SS 0x00006000
+#define PI_FMC_DESCR_M_RCC 0x003FE000
+#define PI_FMC_DESCR_M_LEN 0x00001FFF
+
+#define PI_FMC_DESCR_K_RCC_FMC_INT_ERR 0x01AA
+
+#define PI_FMC_DESCR_K_RRR_SUCCESS 0x00
+#define PI_FMC_DESCR_K_RRR_SA_MATCH 0x01
+#define PI_FMC_DESCR_K_RRR_DA_MATCH 0x02
+#define PI_FMC_DESCR_K_RRR_FMC_ABORT 0x03
+#define PI_FMC_DESCR_K_RRR_LENGTH_BAD 0x04
+#define PI_FMC_DESCR_K_RRR_FRAGMENT 0x05
+#define PI_FMC_DESCR_K_RRR_FORMAT_ERR 0x06
+#define PI_FMC_DESCR_K_RRR_MAC_RESET 0x07
+
+#define PI_FMC_DESCR_K_DD_NO_MATCH 0x0
+#define PI_FMC_DESCR_K_DD_PROMISCUOUS 0x1
+#define PI_FMC_DESCR_K_DD_CAM_MATCH 0x2
+#define PI_FMC_DESCR_K_DD_LOCAL_MATCH 0x3
+
+#define PI_FMC_DESCR_K_SS_NO_MATCH 0x0
+#define PI_FMC_DESCR_K_SS_BRIDGE_MATCH 0x1
+#define PI_FMC_DESCR_K_SS_NOT_POSSIBLE 0x2
+#define PI_FMC_DESCR_K_SS_LOCAL_MATCH 0x3
+
+/* Define some max buffer sizes */
+
+#define PI_CMD_REQ_K_SIZE_MAX 512
+#define PI_CMD_RSP_K_SIZE_MAX 512
+#define PI_UNSOL_K_SIZE_MAX 512
+#define PI_SMT_HOST_K_SIZE_MAX 4608 /* 4 1/2 K */
+#define PI_RCV_DATA_K_SIZE_MAX 4608 /* 4 1/2 K */
+#define PI_XMT_DATA_K_SIZE_MAX 4608 /* 4 1/2 K */
+
+/* Define adapter states */
+
+#define PI_STATE_K_RESET 0
+#define PI_STATE_K_UPGRADE 1
+#define PI_STATE_K_DMA_UNAVAIL 2
+#define PI_STATE_K_DMA_AVAIL 3
+#define PI_STATE_K_LINK_AVAIL 4
+#define PI_STATE_K_LINK_UNAVAIL 5
+#define PI_STATE_K_HALTED 6
+#define PI_STATE_K_RING_MEMBER 7
+#define PI_STATE_K_NUMBER 8
+
+/* Define codes for command type */
+
+#define PI_CMD_K_START 0x00
+#define PI_CMD_K_FILTERS_SET 0x01
+#define PI_CMD_K_FILTERS_GET 0x02
+#define PI_CMD_K_CHARS_SET 0x03
+#define PI_CMD_K_STATUS_CHARS_GET 0x04
+#define PI_CMD_K_CNTRS_GET 0x05
+#define PI_CMD_K_CNTRS_SET 0x06
+#define PI_CMD_K_ADDR_FILTER_SET 0x07
+#define PI_CMD_K_ADDR_FILTER_GET 0x08
+#define PI_CMD_K_ERROR_LOG_CLEAR 0x09
+#define PI_CMD_K_ERROR_LOG_GET 0x0A
+#define PI_CMD_K_FDDI_MIB_GET 0x0B
+#define PI_CMD_K_DEC_EXT_MIB_GET 0x0C
+#define PI_CMD_K_DEVICE_SPECIFIC_GET 0x0D
+#define PI_CMD_K_SNMP_SET 0x0E
+#define PI_CMD_K_UNSOL_TEST 0x0F
+#define PI_CMD_K_SMT_MIB_GET 0x10
+#define PI_CMD_K_SMT_MIB_SET 0x11
+#define PI_CMD_K_MAX 0x11 /* Must match last */
+
+/* Define item codes for Chars_Set and Filters_Set commands */
+
+#define PI_ITEM_K_EOL 0x00 /* End-of-Item list */
+#define PI_ITEM_K_T_REQ 0x01 /* DECnet T_REQ */
+#define PI_ITEM_K_TVX 0x02 /* DECnet TVX */
+#define PI_ITEM_K_RESTRICTED_TOKEN 0x03 /* DECnet Restricted Token */
+#define PI_ITEM_K_LEM_THRESHOLD 0x04 /* DECnet LEM Threshold */
+#define PI_ITEM_K_RING_PURGER 0x05 /* DECnet Ring Purger Enable */
+#define PI_ITEM_K_CNTR_INTERVAL 0x06 /* Chars_Set */
+#define PI_ITEM_K_IND_GROUP_PROM 0x07 /* Filters_Set */
+#define PI_ITEM_K_GROUP_PROM 0x08 /* Filters_Set */
+#define PI_ITEM_K_BROADCAST 0x09 /* Filters_Set */
+#define PI_ITEM_K_SMT_PROM 0x0A /* Filters_Set */
+#define PI_ITEM_K_SMT_USER 0x0B /* Filters_Set */
+#define PI_ITEM_K_RESERVED 0x0C /* Filters_Set */
+#define PI_ITEM_K_IMPLEMENTOR 0x0D /* Filters_Set */
+#define PI_ITEM_K_LOOPBACK_MODE 0x0E /* Chars_Set */
+#define PI_ITEM_K_CONFIG_POLICY 0x10 /* SMTConfigPolicy */
+#define PI_ITEM_K_CON_POLICY 0x11 /* SMTConnectionPolicy */
+#define PI_ITEM_K_T_NOTIFY 0x12 /* SMTTNotify */
+#define PI_ITEM_K_STATION_ACTION 0x13 /* SMTStationAction */
+#define PI_ITEM_K_MAC_PATHS_REQ 0x15 /* MACPathsRequested */
+#define PI_ITEM_K_MAC_ACTION 0x17 /* MACAction */
+#define PI_ITEM_K_CON_POLICIES 0x18 /* PORTConnectionPolicies */
+#define PI_ITEM_K_PORT_PATHS_REQ 0x19 /* PORTPathsRequested */
+#define PI_ITEM_K_MAC_LOOP_TIME 0x1A /* PORTMACLoopTime */
+#define PI_ITEM_K_TB_MAX 0x1B /* PORTTBMax */
+#define PI_ITEM_K_LER_CUTOFF 0x1C /* PORTLerCutoff */
+#define PI_ITEM_K_LER_ALARM 0x1D /* PORTLerAlarm */
+#define PI_ITEM_K_PORT_ACTION 0x1E /* PORTAction */
+#define PI_ITEM_K_FLUSH_TIME 0x20 /* Chars_Set */
+#define PI_ITEM_K_MAC_T_REQ 0x29 /* MACTReq */
+#define PI_ITEM_K_EMAC_RING_PURGER 0x2A /* eMACRingPurgerEnable */
+#define PI_ITEM_K_EMAC_RTOKEN_TIMEOUT 0x2B /* eMACRestrictedTokenTimeout */
+#define PI_ITEM_K_FDX_ENB_DIS 0x2C /* eFDXEnable */
+#define PI_ITEM_K_MAX 0x2C /* Must equal high item */
+
+/* Values for some of the items */
+
+#define PI_K_FALSE 0 /* Generic false */
+#define PI_K_TRUE 1 /* Generic true */
+
+#define PI_SNMP_K_TRUE 1 /* SNMP true/false values */
+#define PI_SNMP_K_FALSE 2
+
+#define PI_FSTATE_K_BLOCK 0 /* Filter State */
+#define PI_FSTATE_K_PASS 1
+
+/* Define command return codes */
+
+#define PI_RSP_K_SUCCESS 0x00
+#define PI_RSP_K_FAILURE 0x01
+#define PI_RSP_K_WARNING 0x02
+#define PI_RSP_K_LOOP_MODE_BAD 0x03
+#define PI_RSP_K_ITEM_CODE_BAD 0x04
+#define PI_RSP_K_TVX_BAD 0x05
+#define PI_RSP_K_TREQ_BAD 0x06
+#define PI_RSP_K_TOKEN_BAD 0x07
+#define PI_RSP_K_NO_EOL 0x0C
+#define PI_RSP_K_FILTER_STATE_BAD 0x0D
+#define PI_RSP_K_CMD_TYPE_BAD 0x0E
+#define PI_RSP_K_ADAPTER_STATE_BAD 0x0F
+#define PI_RSP_K_RING_PURGER_BAD 0x10
+#define PI_RSP_K_LEM_THRESHOLD_BAD 0x11
+#define PI_RSP_K_LOOP_NOT_SUPPORTED 0x12
+#define PI_RSP_K_FLUSH_TIME_BAD 0x13
+#define PI_RSP_K_NOT_IMPLEMENTED 0x14
+#define PI_RSP_K_CONFIG_POLICY_BAD 0x15
+#define PI_RSP_K_STATION_ACTION_BAD 0x16
+#define PI_RSP_K_MAC_ACTION_BAD 0x17
+#define PI_RSP_K_CON_POLICIES_BAD 0x18
+#define PI_RSP_K_MAC_LOOP_TIME_BAD 0x19
+#define PI_RSP_K_TB_MAX_BAD 0x1A
+#define PI_RSP_K_LER_CUTOFF_BAD 0x1B
+#define PI_RSP_K_LER_ALARM_BAD 0x1C
+#define PI_RSP_K_MAC_PATHS_REQ_BAD 0x1D
+#define PI_RSP_K_MAC_T_REQ_BAD 0x1E
+#define PI_RSP_K_EMAC_RING_PURGER_BAD 0x1F
+#define PI_RSP_K_EMAC_RTOKEN_TIME_BAD 0x20
+#define PI_RSP_K_NO_SUCH_ENTRY 0x21
+#define PI_RSP_K_T_NOTIFY_BAD 0x22
+#define PI_RSP_K_TR_MAX_EXP_BAD 0x23
+#define PI_RSP_K_MAC_FRM_ERR_THR_BAD 0x24
+#define PI_RSP_K_MAX_T_REQ_BAD 0x25
+#define PI_RSP_K_FDX_ENB_DIS_BAD 0x26
+#define PI_RSP_K_ITEM_INDEX_BAD 0x27
+#define PI_RSP_K_PORT_ACTION_BAD 0x28
+
+/* Commonly used structures */
+
+typedef struct /* Item list */
+ {
+ PI_UINT32 item_code;
+ PI_UINT32 value;
+ } PI_ITEM_LIST;
+
+typedef struct /* Response header */
+ {
+ PI_UINT32 reserved;
+ PI_UINT32 cmd_type;
+ PI_UINT32 status;
+ } PI_RSP_HEADER;
+
+
+/* Start Command */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_START_REQ;
+
+/* Start Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_START_RSP;
+
+/* Filters_Set Request */
+
+#define PI_CMD_FILTERS_SET_K_ITEMS_MAX 63 /* Fits in a 512 byte buffer */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ PI_ITEM_LIST item[PI_CMD_FILTERS_SET_K_ITEMS_MAX];
+ } PI_CMD_FILTERS_SET_REQ;
+
+/* Filters_Set Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_FILTERS_SET_RSP;
+
+/* Filters_Get Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_FILTERS_GET_REQ;
+
+/* Filters_Get Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ PI_UINT32 ind_group_prom;
+ PI_UINT32 group_prom;
+ PI_UINT32 broadcast_all;
+ PI_UINT32 smt_all;
+ PI_UINT32 smt_user;
+ PI_UINT32 reserved_all;
+ PI_UINT32 implementor_all;
+ } PI_CMD_FILTERS_GET_RSP;
+
+
+/* Chars_Set Request */
+
+#define PI_CMD_CHARS_SET_K_ITEMS_MAX 42 /* Fits in a 512 byte buffer */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ struct /* Item list */
+ {
+ PI_UINT32 item_code;
+ PI_UINT32 value;
+ PI_UINT32 item_index;
+ } item[PI_CMD_CHARS_SET_K_ITEMS_MAX];
+ } PI_CMD_CHARS_SET_REQ;
+
+/* Chars_Set Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_CHARS_SET_RSP;
+
+
+/* SNMP_Set Request */
+
+#define PI_CMD_SNMP_SET_K_ITEMS_MAX 42 /* Fits in a 512 byte buffer */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ struct /* Item list */
+ {
+ PI_UINT32 item_code;
+ PI_UINT32 value;
+ PI_UINT32 item_index;
+ } item[PI_CMD_SNMP_SET_K_ITEMS_MAX];
+ } PI_CMD_SNMP_SET_REQ;
+
+/* SNMP_Set Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_SNMP_SET_RSP;
+
+
+/* SMT_MIB_Set Request */
+
+#define PI_CMD_SMT_MIB_SET_K_ITEMS_MAX 42 /* Max number of items */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ struct
+ {
+ PI_UINT32 item_code;
+ PI_UINT32 value;
+ PI_UINT32 item_index;
+ } item[PI_CMD_SMT_MIB_SET_K_ITEMS_MAX];
+ } PI_CMD_SMT_MIB_SET_REQ;
+
+/* SMT_MIB_Set Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_SMT_MIB_SET_RSP;
+
+/* SMT_MIB_Get Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_SMT_MIB_GET_REQ;
+
+/* SMT_MIB_Get Response */
+
+typedef struct /* Refer to ANSI FDDI SMT Rev. 7.3 */
+ {
+ PI_RSP_HEADER header;
+
+ /* SMT GROUP */
+
+ PI_STATION_ID smt_station_id;
+ PI_UINT32 smt_op_version_id;
+ PI_UINT32 smt_hi_version_id;
+ PI_UINT32 smt_lo_version_id;
+ PI_UINT32 smt_user_data[8];
+ PI_UINT32 smt_mib_version_id;
+ PI_UINT32 smt_mac_ct;
+ PI_UINT32 smt_non_master_ct;
+ PI_UINT32 smt_master_ct;
+ PI_UINT32 smt_available_paths;
+ PI_UINT32 smt_config_capabilities;
+ PI_UINT32 smt_config_policy;
+ PI_UINT32 smt_connection_policy;
+ PI_UINT32 smt_t_notify;
+ PI_UINT32 smt_stat_rpt_policy;
+ PI_UINT32 smt_trace_max_expiration;
+ PI_UINT32 smt_bypass_present;
+ PI_UINT32 smt_ecm_state;
+ PI_UINT32 smt_cf_state;
+ PI_UINT32 smt_remote_disconnect_flag;
+ PI_UINT32 smt_station_status;
+ PI_UINT32 smt_peer_wrap_flag;
+ PI_CNTR smt_msg_time_stamp;
+ PI_CNTR smt_transition_time_stamp;
+
+ /* MAC GROUP */
+
+ PI_UINT32 mac_frame_status_functions;
+ PI_UINT32 mac_t_max_capability;
+ PI_UINT32 mac_tvx_capability;
+ PI_UINT32 mac_available_paths;
+ PI_UINT32 mac_current_path;
+ PI_LAN_ADDR mac_upstream_nbr;
+ PI_LAN_ADDR mac_downstream_nbr;
+ PI_LAN_ADDR mac_old_upstream_nbr;
+ PI_LAN_ADDR mac_old_downstream_nbr;
+ PI_UINT32 mac_dup_address_test;
+ PI_UINT32 mac_requested_paths;
+ PI_UINT32 mac_downstream_port_type;
+ PI_LAN_ADDR mac_smt_address;
+ PI_UINT32 mac_t_req;
+ PI_UINT32 mac_t_neg;
+ PI_UINT32 mac_t_max;
+ PI_UINT32 mac_tvx_value;
+ PI_UINT32 mac_frame_error_threshold;
+ PI_UINT32 mac_frame_error_ratio;
+ PI_UINT32 mac_rmt_state;
+ PI_UINT32 mac_da_flag;
+ PI_UINT32 mac_unda_flag;
+ PI_UINT32 mac_frame_error_flag;
+ PI_UINT32 mac_ma_unitdata_available;
+ PI_UINT32 mac_hardware_present;
+ PI_UINT32 mac_ma_unitdata_enable;
+
+ /* PATH GROUP */
+
+ PI_UINT32 path_configuration[8];
+ PI_UINT32 path_tvx_lower_bound;
+ PI_UINT32 path_t_max_lower_bound;
+ PI_UINT32 path_max_t_req;
+
+ /* PORT GROUP */
+
+ PI_UINT32 port_my_type[PI_PHY_K_MAX];
+ PI_UINT32 port_neighbor_type[PI_PHY_K_MAX];
+ PI_UINT32 port_connection_policies[PI_PHY_K_MAX];
+ PI_UINT32 port_mac_indicated[PI_PHY_K_MAX];
+ PI_UINT32 port_current_path[PI_PHY_K_MAX];
+ PI_UINT32 port_requested_paths[PI_PHY_K_MAX];
+ PI_UINT32 port_mac_placement[PI_PHY_K_MAX];
+ PI_UINT32 port_available_paths[PI_PHY_K_MAX];
+ PI_UINT32 port_pmd_class[PI_PHY_K_MAX];
+ PI_UINT32 port_connection_capabilities[PI_PHY_K_MAX];
+ PI_UINT32 port_bs_flag[PI_PHY_K_MAX];
+ PI_UINT32 port_ler_estimate[PI_PHY_K_MAX];
+ PI_UINT32 port_ler_cutoff[PI_PHY_K_MAX];
+ PI_UINT32 port_ler_alarm[PI_PHY_K_MAX];
+ PI_UINT32 port_connect_state[PI_PHY_K_MAX];
+ PI_UINT32 port_pcm_state[PI_PHY_K_MAX];
+ PI_UINT32 port_pc_withhold[PI_PHY_K_MAX];
+ PI_UINT32 port_ler_flag[PI_PHY_K_MAX];
+ PI_UINT32 port_hardware_present[PI_PHY_K_MAX];
+
+ /* GROUP for things that were added later, so must be at the end. */
+
+ PI_CNTR path_ring_latency;
+
+ } PI_CMD_SMT_MIB_GET_RSP;
+
+
+/*
+ * Item and group code definitions for SMT 7.3 mandatory objects. These
+ * definitions are to be used as appropriate in SMT_MIB_SET commands and
+ * certain host-sent SMT frames such as PMF Get and Set requests. The
+ * codes have been taken from the MIB summary section of ANSI SMT 7.3.
+ */
+
+#define PI_GRP_K_SMT_STATION_ID 0x100A
+#define PI_ITEM_K_SMT_STATION_ID 0x100B
+#define PI_ITEM_K_SMT_OP_VERS_ID 0x100D
+#define PI_ITEM_K_SMT_HI_VERS_ID 0x100E
+#define PI_ITEM_K_SMT_LO_VERS_ID 0x100F
+#define PI_ITEM_K_SMT_USER_DATA 0x1011
+#define PI_ITEM_K_SMT_MIB_VERS_ID 0x1012
+
+#define PI_GRP_K_SMT_STATION_CONFIG 0x1014
+#define PI_ITEM_K_SMT_MAC_CT 0x1015
+#define PI_ITEM_K_SMT_NON_MASTER_CT 0x1016
+#define PI_ITEM_K_SMT_MASTER_CT 0x1017
+#define PI_ITEM_K_SMT_AVAIL_PATHS 0x1018
+#define PI_ITEM_K_SMT_CONFIG_CAPS 0x1019
+#define PI_ITEM_K_SMT_CONFIG_POL 0x101A
+#define PI_ITEM_K_SMT_CONN_POL 0x101B
+#define PI_ITEM_K_SMT_T_NOTIFY 0x101D
+#define PI_ITEM_K_SMT_STAT_POL 0x101E
+#define PI_ITEM_K_SMT_TR_MAX_EXP 0x101F
+#define PI_ITEM_K_SMT_PORT_INDEXES 0x1020
+#define PI_ITEM_K_SMT_MAC_INDEXES 0x1021
+#define PI_ITEM_K_SMT_BYPASS_PRESENT 0x1022
+
+#define PI_GRP_K_SMT_STATUS 0x1028
+#define PI_ITEM_K_SMT_ECM_STATE 0x1029
+#define PI_ITEM_K_SMT_CF_STATE 0x102A
+#define PI_ITEM_K_SMT_REM_DISC_FLAG 0x102C
+#define PI_ITEM_K_SMT_STATION_STATUS 0x102D
+#define PI_ITEM_K_SMT_PEER_WRAP_FLAG 0x102E
+
+#define PI_GRP_K_SMT_MIB_OPERATION 0x1032
+#define PI_ITEM_K_SMT_MSG_TIME_STAMP 0x1033
+#define PI_ITEM_K_SMT_TRN_TIME_STAMP 0x1034
+
+#define PI_ITEM_K_SMT_STATION_ACT 0x103C
+
+#define PI_GRP_K_MAC_CAPABILITIES 0x200A
+#define PI_ITEM_K_MAC_FRM_STAT_FUNC 0x200B
+#define PI_ITEM_K_MAC_T_MAX_CAP 0x200D
+#define PI_ITEM_K_MAC_TVX_CAP 0x200E
+
+#define PI_GRP_K_MAC_CONFIG 0x2014
+#define PI_ITEM_K_MAC_AVAIL_PATHS 0x2016
+#define PI_ITEM_K_MAC_CURRENT_PATH 0x2017
+#define PI_ITEM_K_MAC_UP_NBR 0x2018
+#define PI_ITEM_K_MAC_DOWN_NBR 0x2019
+#define PI_ITEM_K_MAC_OLD_UP_NBR 0x201A
+#define PI_ITEM_K_MAC_OLD_DOWN_NBR 0x201B
+#define PI_ITEM_K_MAC_DUP_ADDR_TEST 0x201D
+#define PI_ITEM_K_MAC_REQ_PATHS 0x2020
+#define PI_ITEM_K_MAC_DOWN_PORT_TYPE 0x2021
+#define PI_ITEM_K_MAC_INDEX 0x2022
+
+#define PI_GRP_K_MAC_ADDRESS 0x2028
+#define PI_ITEM_K_MAC_SMT_ADDRESS 0x2029
+
+#define PI_GRP_K_MAC_OPERATION 0x2032
+#define PI_ITEM_K_MAC_TREQ 0x2033
+#define PI_ITEM_K_MAC_TNEG 0x2034
+#define PI_ITEM_K_MAC_TMAX 0x2035
+#define PI_ITEM_K_MAC_TVX_VALUE 0x2036
+
+#define PI_GRP_K_MAC_COUNTERS 0x2046
+#define PI_ITEM_K_MAC_FRAME_CT 0x2047
+#define PI_ITEM_K_MAC_COPIED_CT 0x2048
+#define PI_ITEM_K_MAC_TRANSMIT_CT 0x2049
+#define PI_ITEM_K_MAC_ERROR_CT 0x2051
+#define PI_ITEM_K_MAC_LOST_CT 0x2052
+
+#define PI_GRP_K_MAC_FRM_ERR_COND 0x205A
+#define PI_ITEM_K_MAC_FRM_ERR_THR 0x205F
+#define PI_ITEM_K_MAC_FRM_ERR_RAT 0x2060
+
+#define PI_GRP_K_MAC_STATUS 0x206E
+#define PI_ITEM_K_MAC_RMT_STATE 0x206F
+#define PI_ITEM_K_MAC_DA_FLAG 0x2070
+#define PI_ITEM_K_MAC_UNDA_FLAG 0x2071
+#define PI_ITEM_K_MAC_FRM_ERR_FLAG 0x2072
+#define PI_ITEM_K_MAC_MA_UNIT_AVAIL 0x2074
+#define PI_ITEM_K_MAC_HW_PRESENT 0x2075
+#define PI_ITEM_K_MAC_MA_UNIT_ENAB 0x2076
+
+#define PI_GRP_K_PATH_CONFIG 0x320A
+#define PI_ITEM_K_PATH_INDEX 0x320B
+#define PI_ITEM_K_PATH_CONFIGURATION 0x3212
+#define PI_ITEM_K_PATH_TVX_LB 0x3215
+#define PI_ITEM_K_PATH_T_MAX_LB 0x3216
+#define PI_ITEM_K_PATH_MAX_T_REQ 0x3217
+
+#define PI_GRP_K_PORT_CONFIG 0x400A
+#define PI_ITEM_K_PORT_MY_TYPE 0x400C
+#define PI_ITEM_K_PORT_NBR_TYPE 0x400D
+#define PI_ITEM_K_PORT_CONN_POLS 0x400E
+#define PI_ITEM_K_PORT_MAC_INDICATED 0x400F
+#define PI_ITEM_K_PORT_CURRENT_PATH 0x4010
+#define PI_ITEM_K_PORT_REQ_PATHS 0x4011
+#define PI_ITEM_K_PORT_MAC_PLACEMENT 0x4012
+#define PI_ITEM_K_PORT_AVAIL_PATHS 0x4013
+#define PI_ITEM_K_PORT_PMD_CLASS 0x4016
+#define PI_ITEM_K_PORT_CONN_CAPS 0x4017
+#define PI_ITEM_K_PORT_INDEX 0x401D
+
+#define PI_GRP_K_PORT_OPERATION 0x401E
+#define PI_ITEM_K_PORT_BS_FLAG 0x4021
+
+#define PI_GRP_K_PORT_ERR_CNTRS 0x4028
+#define PI_ITEM_K_PORT_LCT_FAIL_CT 0x402A
+
+#define PI_GRP_K_PORT_LER 0x4032
+#define PI_ITEM_K_PORT_LER_ESTIMATE 0x4033
+#define PI_ITEM_K_PORT_LEM_REJ_CT 0x4034
+#define PI_ITEM_K_PORT_LEM_CT 0x4035
+#define PI_ITEM_K_PORT_LER_CUTOFF 0x403A
+#define PI_ITEM_K_PORT_LER_ALARM 0x403B
+
+#define PI_GRP_K_PORT_STATUS 0x403C
+#define PI_ITEM_K_PORT_CONNECT_STATE 0x403D
+#define PI_ITEM_K_PORT_PCM_STATE 0x403E
+#define PI_ITEM_K_PORT_PC_WITHHOLD 0x403F
+#define PI_ITEM_K_PORT_LER_FLAG 0x4040
+#define PI_ITEM_K_PORT_HW_PRESENT 0x4041
+
+#define PI_ITEM_K_PORT_ACT 0x4046
+
+/* Addr_Filter_Set Request */
+
+#define PI_CMD_ADDR_FILTER_K_SIZE 62
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ PI_LAN_ADDR entry[PI_CMD_ADDR_FILTER_K_SIZE];
+ } PI_CMD_ADDR_FILTER_SET_REQ;
+
+/* Addr_Filter_Set Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_ADDR_FILTER_SET_RSP;
+
+/* Addr_Filter_Get Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_ADDR_FILTER_GET_REQ;
+
+/* Addr_Filter_Get Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ PI_LAN_ADDR entry[PI_CMD_ADDR_FILTER_K_SIZE];
+ } PI_CMD_ADDR_FILTER_GET_RSP;
+
+/* Status_Chars_Get Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_STATUS_CHARS_GET_REQ;
+
+/* Status_Chars_Get Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ PI_STATION_ID station_id; /* Station */
+ PI_UINT32 station_type;
+ PI_UINT32 smt_ver_id;
+ PI_UINT32 smt_ver_id_max;
+ PI_UINT32 smt_ver_id_min;
+ PI_UINT32 station_state;
+ PI_LAN_ADDR link_addr; /* Link */
+ PI_UINT32 t_req;
+ PI_UINT32 tvx;
+ PI_UINT32 token_timeout;
+ PI_UINT32 purger_enb;
+ PI_UINT32 link_state;
+ PI_UINT32 tneg;
+ PI_UINT32 dup_addr_flag;
+ PI_LAN_ADDR una;
+ PI_LAN_ADDR una_old;
+ PI_UINT32 un_dup_addr_flag;
+ PI_LAN_ADDR dna;
+ PI_LAN_ADDR dna_old;
+ PI_UINT32 purger_state;
+ PI_UINT32 fci_mode;
+ PI_UINT32 error_reason;
+ PI_UINT32 loopback;
+ PI_UINT32 ring_latency;
+ PI_LAN_ADDR last_dir_beacon_sa;
+ PI_LAN_ADDR last_dir_beacon_una;
+ PI_UINT32 phy_type[PI_PHY_K_MAX]; /* Phy */
+ PI_UINT32 pmd_type[PI_PHY_K_MAX];
+ PI_UINT32 lem_threshold[PI_PHY_K_MAX];
+ PI_UINT32 phy_state[PI_PHY_K_MAX];
+ PI_UINT32 nbor_phy_type[PI_PHY_K_MAX];
+ PI_UINT32 link_error_est[PI_PHY_K_MAX];
+ PI_UINT32 broken_reason[PI_PHY_K_MAX];
+ PI_UINT32 reject_reason[PI_PHY_K_MAX];
+ PI_UINT32 cntr_interval; /* Miscellaneous */
+ PI_UINT32 module_rev;
+ PI_UINT32 firmware_rev;
+ PI_UINT32 mop_device_type;
+ PI_UINT32 phy_led[PI_PHY_K_MAX];
+ PI_UINT32 flush_time;
+ } PI_CMD_STATUS_CHARS_GET_RSP;
+
+/* FDDI_MIB_Get Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_FDDI_MIB_GET_REQ;
+
+/* FDDI_MIB_Get Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+
+ /* SMT GROUP */
+
+ PI_STATION_ID smt_station_id;
+ PI_UINT32 smt_op_version_id;
+ PI_UINT32 smt_hi_version_id;
+ PI_UINT32 smt_lo_version_id;
+ PI_UINT32 smt_mac_ct;
+ PI_UINT32 smt_non_master_ct;
+ PI_UINT32 smt_master_ct;
+ PI_UINT32 smt_paths_available;
+ PI_UINT32 smt_config_capabilities;
+ PI_UINT32 smt_config_policy;
+ PI_UINT32 smt_connection_policy;
+ PI_UINT32 smt_t_notify;
+ PI_UINT32 smt_status_reporting;
+ PI_UINT32 smt_ecm_state;
+ PI_UINT32 smt_cf_state;
+ PI_UINT32 smt_hold_state;
+ PI_UINT32 smt_remote_disconnect_flag;
+ PI_UINT32 smt_station_action;
+
+ /* MAC GROUP */
+
+ PI_UINT32 mac_frame_status_capabilities;
+ PI_UINT32 mac_t_max_greatest_lower_bound;
+ PI_UINT32 mac_tvx_greatest_lower_bound;
+ PI_UINT32 mac_paths_available;
+ PI_UINT32 mac_current_path;
+ PI_LAN_ADDR mac_upstream_nbr;
+ PI_LAN_ADDR mac_old_upstream_nbr;
+ PI_UINT32 mac_dup_addr_test;
+ PI_UINT32 mac_paths_requested;
+ PI_UINT32 mac_downstream_port_type;
+ PI_LAN_ADDR mac_smt_address;
+ PI_UINT32 mac_t_req;
+ PI_UINT32 mac_t_neg;
+ PI_UINT32 mac_t_max;
+ PI_UINT32 mac_tvx_value;
+ PI_UINT32 mac_t_min;
+ PI_UINT32 mac_current_frame_status;
+ /* mac_frame_cts */
+ /* mac_error_cts */
+ /* mac_lost_cts */
+ PI_UINT32 mac_frame_error_threshold;
+ PI_UINT32 mac_frame_error_ratio;
+ PI_UINT32 mac_rmt_state;
+ PI_UINT32 mac_da_flag;
+ PI_UINT32 mac_una_da_flag;
+ PI_UINT32 mac_frame_condition;
+ PI_UINT32 mac_chip_set;
+ PI_UINT32 mac_action;
+
+ /* PATH GROUP => Does not need to be implemented */
+
+ /* PORT GROUP */
+
+ PI_UINT32 port_pc_type[PI_PHY_K_MAX];
+ PI_UINT32 port_pc_neighbor[PI_PHY_K_MAX];
+ PI_UINT32 port_connection_policies[PI_PHY_K_MAX];
+ PI_UINT32 port_remote_mac_indicated[PI_PHY_K_MAX];
+ PI_UINT32 port_ce_state[PI_PHY_K_MAX];
+ PI_UINT32 port_paths_requested[PI_PHY_K_MAX];
+ PI_UINT32 port_mac_placement[PI_PHY_K_MAX];
+ PI_UINT32 port_available_paths[PI_PHY_K_MAX];
+ PI_UINT32 port_mac_loop_time[PI_PHY_K_MAX];
+ PI_UINT32 port_tb_max[PI_PHY_K_MAX];
+ PI_UINT32 port_bs_flag[PI_PHY_K_MAX];
+ /* port_lct_fail_cts[PI_PHY_K_MAX]; */
+ PI_UINT32 port_ler_estimate[PI_PHY_K_MAX];
+ /* port_lem_reject_cts[PI_PHY_K_MAX]; */
+ /* port_lem_cts[PI_PHY_K_MAX]; */
+ PI_UINT32 port_ler_cutoff[PI_PHY_K_MAX];
+ PI_UINT32 port_ler_alarm[PI_PHY_K_MAX];
+ PI_UINT32 port_connect_state[PI_PHY_K_MAX];
+ PI_UINT32 port_pcm_state[PI_PHY_K_MAX];
+ PI_UINT32 port_pc_withhold[PI_PHY_K_MAX];
+ PI_UINT32 port_ler_condition[PI_PHY_K_MAX];
+ PI_UINT32 port_chip_set[PI_PHY_K_MAX];
+ PI_UINT32 port_action[PI_PHY_K_MAX];
+
+ /* ATTACHMENT GROUP */
+
+ PI_UINT32 attachment_class;
+ PI_UINT32 attachment_ob_present;
+ PI_UINT32 attachment_imax_expiration;
+ PI_UINT32 attachment_inserted_status;
+ PI_UINT32 attachment_insert_policy;
+
+ /* CHIP SET GROUP => Does not need to be implemented */
+
+ } PI_CMD_FDDI_MIB_GET_RSP;
+
+/* DEC_Ext_MIB_Get Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_DEC_EXT_MIB_GET_REQ;
+
+/* DEC_Ext_MIB_Get (efddi and efdx groups only) Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+
+ /* SMT GROUP */
+
+ PI_UINT32 esmt_station_type;
+
+ /* MAC GROUP */
+
+ PI_UINT32 emac_link_state;
+ PI_UINT32 emac_ring_purger_state;
+ PI_UINT32 emac_ring_purger_enable;
+ PI_UINT32 emac_frame_strip_mode;
+ PI_UINT32 emac_ring_error_reason;
+ PI_UINT32 emac_up_nbr_dup_addr_flag;
+ PI_UINT32 emac_restricted_token_timeout;
+
+ /* PORT GROUP */
+
+ PI_UINT32 eport_pmd_type[PI_PHY_K_MAX];
+ PI_UINT32 eport_phy_state[PI_PHY_K_MAX];
+ PI_UINT32 eport_reject_reason[PI_PHY_K_MAX];
+
+ /* FDX (Full-Duplex) GROUP */
+
+ PI_UINT32 efdx_enable; /* Valid only in SMT 7.3 */
+ PI_UINT32 efdx_op; /* Valid only in SMT 7.3 */
+ PI_UINT32 efdx_state; /* Valid only in SMT 7.3 */
+
+ } PI_CMD_DEC_EXT_MIB_GET_RSP;
+
+typedef struct
+ {
+ PI_CNTR traces_rcvd; /* Station */
+ PI_CNTR frame_cnt; /* Link */
+ PI_CNTR error_cnt;
+ PI_CNTR lost_cnt;
+ PI_CNTR octets_rcvd;
+ PI_CNTR octets_sent;
+ PI_CNTR pdus_rcvd;
+ PI_CNTR pdus_sent;
+ PI_CNTR mcast_octets_rcvd;
+ PI_CNTR mcast_octets_sent;
+ PI_CNTR mcast_pdus_rcvd;
+ PI_CNTR mcast_pdus_sent;
+ PI_CNTR xmt_underruns;
+ PI_CNTR xmt_failures;
+ PI_CNTR block_check_errors;
+ PI_CNTR frame_status_errors;
+ PI_CNTR pdu_length_errors;
+ PI_CNTR rcv_overruns;
+ PI_CNTR user_buff_unavailable;
+ PI_CNTR inits_initiated;
+ PI_CNTR inits_rcvd;
+ PI_CNTR beacons_initiated;
+ PI_CNTR dup_addrs;
+ PI_CNTR dup_tokens;
+ PI_CNTR purge_errors;
+ PI_CNTR fci_strip_errors;
+ PI_CNTR traces_initiated;
+ PI_CNTR directed_beacons_rcvd;
+ PI_CNTR emac_frame_alignment_errors;
+ PI_CNTR ebuff_errors[PI_PHY_K_MAX]; /* Phy */
+ PI_CNTR lct_rejects[PI_PHY_K_MAX];
+ PI_CNTR lem_rejects[PI_PHY_K_MAX];
+ PI_CNTR link_errors[PI_PHY_K_MAX];
+ PI_CNTR connections[PI_PHY_K_MAX];
+ PI_CNTR copied_cnt; /* Valid only if using SMT 7.3 */
+ PI_CNTR transmit_cnt; /* Valid only if using SMT 7.3 */
+ PI_CNTR tokens;
+ } PI_CNTR_BLK;
+
+/* Counters_Get Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_CNTRS_GET_REQ;
+
+/* Counters_Get Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ PI_CNTR time_since_reset;
+ PI_CNTR_BLK cntrs;
+ } PI_CMD_CNTRS_GET_RSP;
+
+/* Counters_Set Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ PI_CNTR_BLK cntrs;
+ } PI_CMD_CNTRS_SET_REQ;
+
+/* Counters_Set Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_CNTRS_SET_RSP;
+
+/* Error_Log_Clear Request */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ } PI_CMD_ERROR_LOG_CLEAR_REQ;
+
+/* Error_Log_Clear Response */
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ } PI_CMD_ERROR_LOG_CLEAR_RSP;
+
+/* Error_Log_Get Request */
+
+#define PI_LOG_ENTRY_K_INDEX_MIN 0 /* Minimum index for entry */
+
+typedef struct
+ {
+ PI_UINT32 cmd_type;
+ PI_UINT32 entry_index;
+ } PI_CMD_ERROR_LOG_GET_REQ;
+
+/* Error_Log_Get Response */
+
+#define PI_K_LOG_FW_SIZE 111 /* Max number of fw longwords */
+#define PI_K_LOG_DIAG_SIZE 6 /* Max number of diag longwords */
+
+typedef struct
+ {
+ struct
+ {
+ PI_UINT32 fru_imp_mask;
+ PI_UINT32 test_id;
+ PI_UINT32 reserved[PI_K_LOG_DIAG_SIZE];
+ } diag;
+ PI_UINT32 fw[PI_K_LOG_FW_SIZE];
+ } PI_LOG_ENTRY;
+
+typedef struct
+ {
+ PI_RSP_HEADER header;
+ PI_UINT32 event_status;
+ PI_UINT32 caller_id;
+ PI_UINT32 timestamp_l;
+ PI_UINT32 timestamp_h;
+ PI_UINT32 write_count;
+ PI_LOG_ENTRY entry_info;
+ } PI_CMD_ERROR_LOG_GET_RSP;
+
+/* Define error log related constants and types. */
+/* Not all of the caller id's can occur. The only ones currently */
+/* implemented are: none, selftest, mfg, fw, console */
+
+#define PI_LOG_EVENT_STATUS_K_VALID 0 /* Valid Event Status */
+#define PI_LOG_EVENT_STATUS_K_INVALID 1 /* Invalid Event Status */
+#define PI_LOG_CALLER_ID_K_NONE 0 /* No caller */
+#define PI_LOG_CALLER_ID_K_SELFTEST 1 /* Normal power-up selftest */
+#define PI_LOG_CALLER_ID_K_MFG 2 /* Mfg power-up selftest */
+#define PI_LOG_CALLER_ID_K_ONLINE 3 /* On-line diagnostics */
+#define PI_LOG_CALLER_ID_K_HW 4 /* Hardware */
+#define PI_LOG_CALLER_ID_K_FW 5 /* Firmware */
+#define PI_LOG_CALLER_ID_K_CNS_HW 6 /* CNS firmware */
+#define PI_LOG_CALLER_ID_K_CNS_FW 7 /* CNS hardware */
+#define PI_LOG_CALLER_ID_K_CONSOLE 8 /* Console Caller Id */
+
+/*
+ * Place all DMA commands in the following request and response structures
+ * to simplify code.
+ */
+
+typedef union
+ {
+ PI_UINT32 cmd_type;
+ PI_CMD_START_REQ start;
+ PI_CMD_FILTERS_SET_REQ filter_set;
+ PI_CMD_FILTERS_GET_REQ filter_get;
+ PI_CMD_CHARS_SET_REQ char_set;
+ PI_CMD_ADDR_FILTER_SET_REQ addr_filter_set;
+ PI_CMD_ADDR_FILTER_GET_REQ addr_filter_get;
+ PI_CMD_STATUS_CHARS_GET_REQ stat_char_get;
+ PI_CMD_CNTRS_GET_REQ cntrs_get;
+ PI_CMD_CNTRS_SET_REQ cntrs_set;
+ PI_CMD_ERROR_LOG_CLEAR_REQ error_log_clear;
+ PI_CMD_ERROR_LOG_GET_REQ error_log_read;
+ PI_CMD_SNMP_SET_REQ snmp_set;
+ PI_CMD_FDDI_MIB_GET_REQ fddi_mib_get;
+ PI_CMD_DEC_EXT_MIB_GET_REQ dec_mib_get;
+ PI_CMD_SMT_MIB_SET_REQ smt_mib_set;
+ PI_CMD_SMT_MIB_GET_REQ smt_mib_get;
+ char pad[PI_CMD_REQ_K_SIZE_MAX];
+ } PI_DMA_CMD_REQ;
+
+typedef union
+ {
+ PI_RSP_HEADER header;
+ PI_CMD_START_RSP start;
+ PI_CMD_FILTERS_SET_RSP filter_set;
+ PI_CMD_FILTERS_GET_RSP filter_get;
+ PI_CMD_CHARS_SET_RSP char_set;
+ PI_CMD_ADDR_FILTER_SET_RSP addr_filter_set;
+ PI_CMD_ADDR_FILTER_GET_RSP addr_filter_get;
+ PI_CMD_STATUS_CHARS_GET_RSP stat_char_get;
+ PI_CMD_CNTRS_GET_RSP cntrs_get;
+ PI_CMD_CNTRS_SET_RSP cntrs_set;
+ PI_CMD_ERROR_LOG_CLEAR_RSP error_log_clear;
+ PI_CMD_ERROR_LOG_GET_RSP error_log_get;
+ PI_CMD_SNMP_SET_RSP snmp_set;
+ PI_CMD_FDDI_MIB_GET_RSP fddi_mib_get;
+ PI_CMD_DEC_EXT_MIB_GET_RSP dec_mib_get;
+ PI_CMD_SMT_MIB_SET_RSP smt_mib_set;
+ PI_CMD_SMT_MIB_GET_RSP smt_mib_get;
+ char pad[PI_CMD_RSP_K_SIZE_MAX];
+ } PI_DMA_CMD_RSP;
+
+typedef union
+ {
+ PI_DMA_CMD_REQ request;
+ PI_DMA_CMD_RSP response;
+ } PI_DMA_CMD_BUFFER;
+
+
+/* Define format of Consumer Block (resident in host memory) */
+
+typedef struct
+ {
+ volatile PI_UINT32 xmt_rcv_data;
+ volatile PI_UINT32 reserved_1;
+ volatile PI_UINT32 smt_host;
+ volatile PI_UINT32 reserved_2;
+ volatile PI_UINT32 unsol;
+ volatile PI_UINT32 reserved_3;
+ volatile PI_UINT32 cmd_rsp;
+ volatile PI_UINT32 reserved_4;
+ volatile PI_UINT32 cmd_req;
+ volatile PI_UINT32 reserved_5;
+ } PI_CONSUMER_BLOCK;
+
+#define PI_CONS_M_RCV_INDEX 0x000000FF
+#define PI_CONS_M_XMT_INDEX 0x00FF0000
+#define PI_CONS_V_RCV_INDEX 0
+#define PI_CONS_V_XMT_INDEX 16
+
+/* Offsets into consumer block */
+
+#define PI_CONS_BLK_K_XMT_RCV 0x00
+#define PI_CONS_BLK_K_SMT_HOST 0x08
+#define PI_CONS_BLK_K_UNSOL 0x10
+#define PI_CONS_BLK_K_CMD_RSP 0x18
+#define PI_CONS_BLK_K_CMD_REQ 0x20
+
+/* Offsets into descriptor block */
+
+#define PI_DESCR_BLK_K_RCV_DATA 0x0000
+#define PI_DESCR_BLK_K_XMT_DATA 0x0800
+#define PI_DESCR_BLK_K_SMT_HOST 0x1000
+#define PI_DESCR_BLK_K_UNSOL 0x1200
+#define PI_DESCR_BLK_K_CMD_RSP 0x1280
+#define PI_DESCR_BLK_K_CMD_REQ 0x1300
+
+/* Define format of a rcv descr (Rcv Data, Cmd Rsp, Unsolicited, SMT Host) */
+/* Note a field has been added for later versions of the PDQ to allow for */
+/* finer granularity of the rcv buffer alignment. For backwards */
+/* compatibility, the two bits (which allow the rcv buffer to be longword */
+/* aligned) have been added at the MBZ bits. To support previous drivers, */
+/* the MBZ definition is left intact. */
+
+typedef struct
+ {
+ PI_UINT32 long_0;
+ PI_UINT32 long_1;
+ } PI_RCV_DESCR;
+
+#define PI_RCV_DESCR_M_SOP 0x80000000
+#define PI_RCV_DESCR_M_SEG_LEN_LO 0x60000000
+#define PI_RCV_DESCR_M_MBZ 0x60000000
+#define PI_RCV_DESCR_M_SEG_LEN 0x1F800000
+#define PI_RCV_DESCR_M_SEG_LEN_HI 0x1FF00000
+#define PI_RCV_DESCR_M_SEG_CNT 0x000F0000
+#define PI_RCV_DESCR_M_BUFF_HI 0x0000FFFF
+
+#define PI_RCV_DESCR_V_SOP 31
+#define PI_RCV_DESCR_V_SEG_LEN_LO 29
+#define PI_RCV_DESCR_V_MBZ 29
+#define PI_RCV_DESCR_V_SEG_LEN 23
+#define PI_RCV_DESCR_V_SEG_LEN_HI 20
+#define PI_RCV_DESCR_V_SEG_CNT 16
+#define PI_RCV_DESCR_V_BUFF_HI 0
+
+/* Define the format of a transmit descriptor (Xmt Data, Cmd Req) */
+
+typedef struct
+ {
+ PI_UINT32 long_0;
+ PI_UINT32 long_1;
+ } PI_XMT_DESCR;
+
+#define PI_XMT_DESCR_M_SOP 0x80000000
+#define PI_XMT_DESCR_M_EOP 0x40000000
+#define PI_XMT_DESCR_M_MBZ 0x20000000
+#define PI_XMT_DESCR_M_SEG_LEN 0x1FFF0000
+#define PI_XMT_DESCR_M_BUFF_HI 0x0000FFFF
+
+#define PI_XMT_DESCR_V_SOP 31
+#define PI_XMT_DESCR_V_EOP 30
+#define PI_XMT_DESCR_V_MBZ 29
+#define PI_XMT_DESCR_V_SEG_LEN 16
+#define PI_XMT_DESCR_V_BUFF_HI 0
+
+/* Define format of the Descriptor Block (resident in host memory) */
+
+#define PI_RCV_DATA_K_NUM_ENTRIES 256
+#define PI_XMT_DATA_K_NUM_ENTRIES 256
+#define PI_SMT_HOST_K_NUM_ENTRIES 64
+#define PI_UNSOL_K_NUM_ENTRIES 16
+#define PI_CMD_RSP_K_NUM_ENTRIES 16
+#define PI_CMD_REQ_K_NUM_ENTRIES 16
+
+typedef struct
+ {
+ PI_RCV_DESCR rcv_data[PI_RCV_DATA_K_NUM_ENTRIES];
+ PI_XMT_DESCR xmt_data[PI_XMT_DATA_K_NUM_ENTRIES];
+ PI_RCV_DESCR smt_host[PI_SMT_HOST_K_NUM_ENTRIES];
+ PI_RCV_DESCR unsol[PI_UNSOL_K_NUM_ENTRIES];
+ PI_RCV_DESCR cmd_rsp[PI_CMD_RSP_K_NUM_ENTRIES];
+ PI_XMT_DESCR cmd_req[PI_CMD_REQ_K_NUM_ENTRIES];
+ } PI_DESCR_BLOCK;
+
+/* Define Port Registers - offsets from PDQ Base address */
+
+#define PI_PDQ_K_REG_PORT_RESET 0x00000000
+#define PI_PDQ_K_REG_HOST_DATA 0x00000004
+#define PI_PDQ_K_REG_PORT_CTRL 0x00000008
+#define PI_PDQ_K_REG_PORT_DATA_A 0x0000000C
+#define PI_PDQ_K_REG_PORT_DATA_B 0x00000010
+#define PI_PDQ_K_REG_PORT_STATUS 0x00000014
+#define PI_PDQ_K_REG_TYPE_0_STATUS 0x00000018
+#define PI_PDQ_K_REG_HOST_INT_ENB 0x0000001C
+#define PI_PDQ_K_REG_TYPE_2_PROD_NOINT 0x00000020
+#define PI_PDQ_K_REG_TYPE_2_PROD 0x00000024
+#define PI_PDQ_K_REG_CMD_RSP_PROD 0x00000028
+#define PI_PDQ_K_REG_CMD_REQ_PROD 0x0000002C
+#define PI_PDQ_K_REG_SMT_HOST_PROD 0x00000030
+#define PI_PDQ_K_REG_UNSOL_PROD 0x00000034
+
+/* Port Control Register - Command codes for primary commands */
+
+#define PI_PCTRL_M_CMD_ERROR 0x8000
+#define PI_PCTRL_M_BLAST_FLASH 0x4000
+#define PI_PCTRL_M_HALT 0x2000
+#define PI_PCTRL_M_COPY_DATA 0x1000
+#define PI_PCTRL_M_ERROR_LOG_START 0x0800
+#define PI_PCTRL_M_ERROR_LOG_READ 0x0400
+#define PI_PCTRL_M_XMT_DATA_FLUSH_DONE 0x0200
+#define PI_PCTRL_M_INIT 0x0100
+#define PI_PCTRL_M_INIT_START 0x0080
+#define PI_PCTRL_M_CONS_BLOCK 0x0040
+#define PI_PCTRL_M_UNINIT 0x0020
+#define PI_PCTRL_M_RING_MEMBER 0x0010
+#define PI_PCTRL_M_MLA 0x0008
+#define PI_PCTRL_M_FW_REV_READ 0x0004
+#define PI_PCTRL_M_DEV_SPECIFIC 0x0002
+#define PI_PCTRL_M_SUB_CMD 0x0001
+
+/* Define sub-commands accessed via the PI_PCTRL_M_SUB_CMD command */
+
+#define PI_SUB_CMD_K_LINK_UNINIT 0x0001
+#define PI_SUB_CMD_K_BURST_SIZE_SET 0x0002
+#define PI_SUB_CMD_K_PDQ_REV_GET 0x0004
+#define PI_SUB_CMD_K_HW_REV_GET 0x0008
+
+/* Define some Port Data B values */
+
+#define PI_PDATA_B_DMA_BURST_SIZE_4 0 /* valid values for command */
+#define PI_PDATA_B_DMA_BURST_SIZE_8 1
+#define PI_PDATA_B_DMA_BURST_SIZE_16 2
+#define PI_PDATA_B_DMA_BURST_SIZE_32 3 /* not supported on PCI */
+#define PI_PDATA_B_DMA_BURST_SIZE_DEF PI_PDATA_B_DMA_BURST_SIZE_16
+
+/* Port Data A Reset state */
+
+#define PI_PDATA_A_RESET_M_UPGRADE 0x00000001
+#define PI_PDATA_A_RESET_M_SOFT_RESET 0x00000002
+#define PI_PDATA_A_RESET_M_SKIP_ST 0x00000004
+
+/* Read adapter MLA address port control command constants */
+
+#define PI_PDATA_A_MLA_K_LO 0
+#define PI_PDATA_A_MLA_K_HI 1
+
+/* Byte Swap values for init command */
+
+#define PI_PDATA_A_INIT_M_DESC_BLK_ADDR 0x0FFFFE000
+#define PI_PDATA_A_INIT_M_RESERVED 0x000001FFC
+#define PI_PDATA_A_INIT_M_BSWAP_DATA 0x000000002
+#define PI_PDATA_A_INIT_M_BSWAP_LITERAL 0x000000001
+
+#define PI_PDATA_A_INIT_V_DESC_BLK_ADDR 13
+#define PI_PDATA_A_INIT_V_RESERVED 3
+#define PI_PDATA_A_INIT_V_BSWAP_DATA 1
+#define PI_PDATA_A_INIT_V_BSWAP_LITERAL 0
+
+/* Port Reset Register */
+
+#define PI_RESET_M_ASSERT_RESET 1
+
+/* Port Status register */
+
+#define PI_PSTATUS_V_RCV_DATA_PENDING 31
+#define PI_PSTATUS_V_XMT_DATA_PENDING 30
+#define PI_PSTATUS_V_SMT_HOST_PENDING 29
+#define PI_PSTATUS_V_UNSOL_PENDING 28
+#define PI_PSTATUS_V_CMD_RSP_PENDING 27
+#define PI_PSTATUS_V_CMD_REQ_PENDING 26
+#define PI_PSTATUS_V_TYPE_0_PENDING 25
+#define PI_PSTATUS_V_RESERVED_1 16
+#define PI_PSTATUS_V_RESERVED_2 11
+#define PI_PSTATUS_V_STATE 8
+#define PI_PSTATUS_V_HALT_ID 0
+
+#define PI_PSTATUS_M_RCV_DATA_PENDING 0x80000000
+#define PI_PSTATUS_M_XMT_DATA_PENDING 0x40000000
+#define PI_PSTATUS_M_SMT_HOST_PENDING 0x20000000
+#define PI_PSTATUS_M_UNSOL_PENDING 0x10000000
+#define PI_PSTATUS_M_CMD_RSP_PENDING 0x08000000
+#define PI_PSTATUS_M_CMD_REQ_PENDING 0x04000000
+#define PI_PSTATUS_M_TYPE_0_PENDING 0x02000000
+#define PI_PSTATUS_M_RESERVED_1 0x01FF0000
+#define PI_PSTATUS_M_RESERVED_2 0x0000F800
+#define PI_PSTATUS_M_STATE 0x00000700
+#define PI_PSTATUS_M_HALT_ID 0x000000FF
+
+/* Define Halt Id's */
+/* Do not insert into this list, only append. */
+
+#define PI_HALT_ID_K_SELFTEST_TIMEOUT 0
+#define PI_HALT_ID_K_PARITY_ERROR 1
+#define PI_HALT_ID_K_HOST_DIR_HALT 2
+#define PI_HALT_ID_K_SW_FAULT 3
+#define PI_HALT_ID_K_HW_FAULT 4
+#define PI_HALT_ID_K_PC_TRACE 5
+#define PI_HALT_ID_K_DMA_ERROR 6 /* Host Data has error reg */
+#define PI_HALT_ID_K_IMAGE_CRC_ERROR 7 /* Image is bad, update it */
+#define PI_HALT_ID_K_BUS_EXCEPTION 8 /* 68K bus exception */
+
+/* Host Interrupt Enable Register as seen by host */
+
+#define PI_HOST_INT_M_XMT_DATA_ENB 0x80000000 /* Type 2 Enables */
+#define PI_HOST_INT_M_RCV_DATA_ENB 0x40000000
+#define PI_HOST_INT_M_SMT_HOST_ENB 0x10000000 /* Type 1 Enables */
+#define PI_HOST_INT_M_UNSOL_ENB 0x20000000
+#define PI_HOST_INT_M_CMD_RSP_ENB 0x08000000
+#define PI_HOST_INT_M_CMD_REQ_ENB 0x04000000
+#define PI_HOST_INT_M_TYPE_1_RESERVED 0x00FF0000
+#define PI_HOST_INT_M_TYPE_0_RESERVED 0x0000FF00 /* Type 0 Enables */
+#define PI_HOST_INT_M_1MS 0x00000080
+#define PI_HOST_INT_M_20MS 0x00000040
+#define PI_HOST_INT_M_CSR_CMD_DONE 0x00000020
+#define PI_HOST_INT_M_STATE_CHANGE 0x00000010
+#define PI_HOST_INT_M_XMT_FLUSH 0x00000008
+#define PI_HOST_INT_M_NXM 0x00000004
+#define PI_HOST_INT_M_PM_PAR_ERR 0x00000002
+#define PI_HOST_INT_M_BUS_PAR_ERR 0x00000001
+
+#define PI_HOST_INT_V_XMT_DATA_ENB 31 /* Type 2 Enables */
+#define PI_HOST_INT_V_RCV_DATA_ENB 30
+#define PI_HOST_INT_V_SMT_HOST_ENB 29 /* Type 1 Enables */
+#define PI_HOST_INT_V_UNSOL_ENB 28
+#define PI_HOST_INT_V_CMD_RSP_ENB 27
+#define PI_HOST_INT_V_CMD_REQ_ENB 26
+#define PI_HOST_INT_V_TYPE_1_RESERVED 16
+#define PI_HOST_INT_V_TYPE_0_RESERVED 8 /* Type 0 Enables */
+#define PI_HOST_INT_V_1MS_ENB 7
+#define PI_HOST_INT_V_20MS_ENB 6
+#define PI_HOST_INT_V_CSR_CMD_DONE_ENB 5
+#define PI_HOST_INT_V_STATE_CHANGE_ENB 4
+#define PI_HOST_INT_V_XMT_FLUSH_ENB 3
+#define PI_HOST_INT_V_NXM_ENB 2
+#define PI_HOST_INT_V_PM_PAR_ERR_ENB 1
+#define PI_HOST_INT_V_BUS_PAR_ERR_ENB 0
+
+#define PI_HOST_INT_K_ACK_ALL_TYPE_0 0x000000FF
+#define PI_HOST_INT_K_DISABLE_ALL_INTS 0x00000000
+#define PI_HOST_INT_K_ENABLE_ALL_INTS 0xFFFFFFFF
+#define PI_HOST_INT_K_ENABLE_DEF_INTS 0xC000001F
+
+/* Type 0 Interrupt Status Register */
+
+#define PI_TYPE_0_STAT_M_1MS 0x00000080
+#define PI_TYPE_0_STAT_M_20MS 0x00000040
+#define PI_TYPE_0_STAT_M_CSR_CMD_DONE 0x00000020
+#define PI_TYPE_0_STAT_M_STATE_CHANGE 0x00000010
+#define PI_TYPE_0_STAT_M_XMT_FLUSH 0x00000008
+#define PI_TYPE_0_STAT_M_NXM 0x00000004
+#define PI_TYPE_0_STAT_M_PM_PAR_ERR 0x00000002
+#define PI_TYPE_0_STAT_M_BUS_PAR_ERR 0x00000001
+
+#define PI_TYPE_0_STAT_V_1MS 7
+#define PI_TYPE_0_STAT_V_20MS 6
+#define PI_TYPE_0_STAT_V_CSR_CMD_DONE 5
+#define PI_TYPE_0_STAT_V_STATE_CHANGE 4
+#define PI_TYPE_0_STAT_V_XMT_FLUSH 3
+#define PI_TYPE_0_STAT_V_NXM 2
+#define PI_TYPE_0_STAT_V_PM_PAR_ERR 1
+#define PI_TYPE_0_STAT_V_BUS_PAR_ERR 0
+
+/* Register definition structures are defined for both big and little endian systems */
+
+#ifndef BIG_ENDIAN
+
+/* Little endian format of Type 1 Producer register */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 prod;
+ PI_UINT8 comp;
+ PI_UINT8 mbz_1;
+ PI_UINT8 mbz_2;
+ } index;
+ } PI_TYPE_1_PROD_REG;
+
+/* Little endian format of Type 2 Producer register */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 rcv_prod;
+ PI_UINT8 xmt_prod;
+ PI_UINT8 rcv_comp;
+ PI_UINT8 xmt_comp;
+ } index;
+ } PI_TYPE_2_PROD_REG;
+
+/* Little endian format of Type 1 Consumer Block longword */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 cons;
+ PI_UINT8 res0;
+ PI_UINT8 res1;
+ PI_UINT8 res2;
+ } index;
+ } PI_TYPE_1_CONSUMER;
+
+/* Little endian format of Type 2 Consumer Block longword */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 rcv_cons;
+ PI_UINT8 res0;
+ PI_UINT8 xmt_cons;
+ PI_UINT8 res1;
+ } index;
+ } PI_TYPE_2_CONSUMER;
+
+#else
+
+/* Big endian format of Type 1 Producer register */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 mbz_2;
+ PI_UINT8 mbz_1;
+ PI_UINT8 comp;
+ PI_UINT8 prod;
+ } index;
+ } PI_TYPE_1_PROD_REG;
+
+/* Big endian format of Type 2 Producer register */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 xmt_comp;
+ PI_UINT8 rcv_comp;
+ PI_UINT8 xmt_prod;
+ PI_UINT8 rcv_prod;
+ } index;
+ } PI_TYPE_2_PROD_REG;
+
+/* Big endian format of Type 1 Consumer Block longword */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 res2;
+ PI_UINT8 res1;
+ PI_UINT8 res0;
+ PI_UINT8 cons;
+ } index;
+ } PI_TYPE_1_CONSUMER;
+
+/* Big endian format of Type 2 Consumer Block longword */
+
+typedef union
+ {
+ PI_UINT32 lword;
+ struct
+ {
+ PI_UINT8 res1;
+ PI_UINT8 xmt_cons;
+ PI_UINT8 res0;
+ PI_UINT8 rcv_cons;
+ } index;
+ } PI_TYPE_2_CONSUMER;
+
+#endif /* #ifndef BIG_ENDIAN */
+
+/* Define EISA controller register offsets */
+
+#define PI_ESIC_K_BURST_HOLDOFF 0x040
+#define PI_ESIC_K_SLOT_ID 0xC80
+#define PI_ESIC_K_SLOT_CNTRL 0xC84
+#define PI_ESIC_K_MEM_ADD_CMP_0 0xC85
+#define PI_ESIC_K_MEM_ADD_CMP_1 0xC86
+#define PI_ESIC_K_MEM_ADD_CMP_2 0xC87
+#define PI_ESIC_K_MEM_ADD_HI_CMP_0 0xC88
+#define PI_ESIC_K_MEM_ADD_HI_CMP_1 0xC89
+#define PI_ESIC_K_MEM_ADD_HI_CMP_2 0xC8A
+#define PI_ESIC_K_MEM_ADD_MASK_0 0xC8B
+#define PI_ESIC_K_MEM_ADD_MASK_1 0xC8C
+#define PI_ESIC_K_MEM_ADD_MASK_2 0xC8D
+#define PI_ESIC_K_MEM_ADD_LO_CMP_0 0xC8E
+#define PI_ESIC_K_MEM_ADD_LO_CMP_1 0xC8F
+#define PI_ESIC_K_MEM_ADD_LO_CMP_2 0xC90
+#define PI_ESIC_K_IO_CMP_0_0 0xC91
+#define PI_ESIC_K_IO_CMP_0_1 0xC92
+#define PI_ESIC_K_IO_CMP_1_0 0xC93
+#define PI_ESIC_K_IO_CMP_1_1 0xC94
+#define PI_ESIC_K_IO_CMP_2_0 0xC95
+#define PI_ESIC_K_IO_CMP_2_1 0xC96
+#define PI_ESIC_K_IO_CMP_3_0 0xC97
+#define PI_ESIC_K_IO_CMP_3_1 0xC98
+#define PI_ESIC_K_IO_ADD_MASK_0_0 0xC99
+#define PI_ESIC_K_IO_ADD_MASK_0_1 0xC9A
+#define PI_ESIC_K_IO_ADD_MASK_1_0 0xC9B
+#define PI_ESIC_K_IO_ADD_MASK_1_1 0xC9C
+#define PI_ESIC_K_IO_ADD_MASK_2_0 0xC9D
+#define PI_ESIC_K_IO_ADD_MASK_2_1 0xC9E
+#define PI_ESIC_K_IO_ADD_MASK_3_0 0xC9F
+#define PI_ESIC_K_IO_ADD_MASK_3_1 0xCA0
+#define PI_ESIC_K_MOD_CONFIG_1 0xCA1
+#define PI_ESIC_K_MOD_CONFIG_2 0xCA2
+#define PI_ESIC_K_MOD_CONFIG_3 0xCA3
+#define PI_ESIC_K_MOD_CONFIG_4 0xCA4
+#define PI_ESIC_K_MOD_CONFIG_5 0xCA5
+#define PI_ESIC_K_MOD_CONFIG_6 0xCA6
+#define PI_ESIC_K_MOD_CONFIG_7 0xCA7
+#define PI_ESIC_K_DIP_SWITCH 0xCA8
+#define PI_ESIC_K_IO_CONFIG_STAT_0 0xCA9
+#define PI_ESIC_K_IO_CONFIG_STAT_1 0xCAA
+#define PI_ESIC_K_DMA_CONFIG 0xCAB
+#define PI_ESIC_K_INPUT_PORT 0xCAC
+#define PI_ESIC_K_OUTPUT_PORT 0xCAD
+#define PI_ESIC_K_FUNCTION_CNTRL 0xCAE
+#define PI_ESIC_K_CSR_IO_LEN PI_ESIC_K_FUNCTION_CNTRL+1 /* always last reg + 1 */
+
+/* Define the value all drivers must write to the function control register. */
+
+#define PI_ESIC_K_FUNCTION_CNTRL_IO_ENB 0x03
+
+/* Define the bits in the slot control register. */
+
+#define PI_SLOT_CNTRL_M_RESET 0x04 /* Don't use. */
+#define PI_SLOT_CNTRL_M_ERROR 0x02 /* Not implemented. */
+#define PI_SLOT_CNTRL_M_ENB 0x01 /* Must be set. */
+
+/* Define the bits in the burst holdoff register. */
+
+#define PI_BURST_HOLDOFF_M_HOLDOFF 0xFC
+#define PI_BURST_HOLDOFF_M_RESERVED 0x02
+#define PI_BURST_HOLDOFF_M_MEM_MAP 0x01
+
+#define PI_BURST_HOLDOFF_V_HOLDOFF 2
+#define PI_BURST_HOLDOFF_V_RESERVED 1
+#define PI_BURST_HOLDOFF_V_MEM_MAP 0
+
+/*
+ * Define the fields in the IO Compare registers.
+ * The driver must initialize the slot field with the slot ID shifted by the
+ * amount shown below.
+ */
+
+#define PI_IO_CMP_V_SLOT 4
+
+/* Define the fields in the Interrupt Channel Configuration and Status reg */
+
+#define PI_CONFIG_STAT_0_M_PEND 0x80
+#define PI_CONFIG_STAT_0_M_RES_1 0x40
+#define PI_CONFIG_STAT_0_M_IREQ_OUT 0x20
+#define PI_CONFIG_STAT_0_M_IREQ_IN 0x10
+#define PI_CONFIG_STAT_0_M_INT_ENB 0x08
+#define PI_CONFIG_STAT_0_M_RES_0 0x04
+#define PI_CONFIG_STAT_0_M_IRQ 0x03
+
+#define PI_CONFIG_STAT_0_V_PEND 7
+#define PI_CONFIG_STAT_0_V_RES_1 6
+#define PI_CONFIG_STAT_0_V_IREQ_OUT 5
+#define PI_CONFIG_STAT_0_V_IREQ_IN 4
+#define PI_CONFIG_STAT_0_V_INT_ENB 3
+#define PI_CONFIG_STAT_0_V_RES_0 2
+#define PI_CONFIG_STAT_0_V_IRQ 0
+
+#define PI_CONFIG_STAT_0_IRQ_K_9 0
+#define PI_CONFIG_STAT_0_IRQ_K_10 1
+#define PI_CONFIG_STAT_0_IRQ_K_11 2
+#define PI_CONFIG_STAT_0_IRQ_K_15 3
+
+/* Define DEC FDDIcontroller/EISA (DEFEA) EISA hardware ID's */
+
+#define DEFEA_PRODUCT_ID 0x0030A310 /* DEC product 300 (no rev) */
+#define DEFEA_PROD_ID_1 0x0130A310 /* DEC product 300, rev 1 */
+#define DEFEA_PROD_ID_2 0x0230A310 /* DEC product 300, rev 2 */
+#define DEFEA_PROD_ID_3 0x0330A310 /* DEC product 300, rev 3 */
+
+/**********************************************/
+/* Digital PFI Specification v1.0 Definitions */
+/**********************************************/
+
+/* PCI Configuration Space Constants */
+
+#define PFI_K_LAT_TIMER_DEF 0x88 /* def max master latency timer */
+#define PFI_K_LAT_TIMER_MIN 0x20 /* min max master latency timer */
+#define PFI_K_CSR_MEM_LEN 0x80 /* 128 bytes */
+#define PFI_K_CSR_IO_LEN 0x80 /* 128 bytes */
+#define PFI_K_PKT_MEM_LEN 0x10000 /* 64K bytes */
+
+/* PFI Register Offsets (starting at PDQ Register Base Address) */
+
+#define PFI_K_REG_RESERVED_0 0X00000038
+#define PFI_K_REG_RESERVED_1 0X0000003C
+#define PFI_K_REG_MODE_CTRL 0X00000040
+#define PFI_K_REG_STATUS 0X00000044
+#define PFI_K_REG_FIFO_WRITE 0X00000048
+#define PFI_K_REG_FIFO_READ 0X0000004C
+
+/* PFI Mode Control Register Constants */
+
+#define PFI_MODE_M_RESERVED 0XFFFFFFF0
+#define PFI_MODE_M_TGT_ABORT_ENB 0X00000008
+#define PFI_MODE_M_PDQ_INT_ENB 0X00000004
+#define PFI_MODE_M_PFI_INT_ENB 0X00000002
+#define PFI_MODE_M_DMA_ENB 0X00000001
+
+#define PFI_MODE_V_RESERVED 4
+#define PFI_MODE_V_TGT_ABORT_ENB 3
+#define PFI_MODE_V_PDQ_INT_ENB 2
+#define PFI_MODE_V_PFI_INT_ENB 1
+#define PFI_MODE_V_DMA_ENB 0
+
+#define PFI_MODE_K_ALL_DISABLE 0X00000000
+
+/* PFI Status Register Constants */
+
+#define PFI_STATUS_M_RESERVED 0XFFFFFFC0
+#define PFI_STATUS_M_PFI_ERROR 0X00000020 /* only valid in rev 1 or later PFI */
+#define PFI_STATUS_M_PDQ_INT 0X00000010
+#define PFI_STATUS_M_PDQ_DMA_ABORT 0X00000008
+#define PFI_STATUS_M_FIFO_FULL 0X00000004
+#define PFI_STATUS_M_FIFO_EMPTY 0X00000002
+#define PFI_STATUS_M_DMA_IN_PROGRESS 0X00000001
+
+#define PFI_STATUS_V_RESERVED 6
+#define PFI_STATUS_V_PFI_ERROR 5 /* only valid in rev 1 or later PFI */
+#define PFI_STATUS_V_PDQ_INT 4
+#define PFI_STATUS_V_PDQ_DMA_ABORT 3
+#define PFI_STATUS_V_FIFO_FULL 2
+#define PFI_STATUS_V_FIFO_EMPTY 1
+#define PFI_STATUS_V_DMA_IN_PROGRESS 0
+
+#define DFX_MAX_EISA_SLOTS 16 /* maximum number of EISA slots to scan */
+#define DFX_MAX_NUM_BOARDS 8 /* maximum number of adapters supported */
+
+#define DFX_BUS_TYPE_PCI 0 /* type code for DEC FDDIcontroller/PCI */
+#define DFX_BUS_TYPE_EISA 1 /* type code for DEC FDDIcontroller/EISA */
+
+#define DFX_FC_PRH2_PRH1_PRH0 0x54003820 /* Packet Request Header bytes + FC */
+#define DFX_PRH0_BYTE 0x20 /* Packet Request Header byte 0 */
+#define DFX_PRH1_BYTE 0x38 /* Packet Request Header byte 1 */
+#define DFX_PRH2_BYTE 0x00 /* Packet Request Header byte 2 */
+
+/* Driver routine status (return) codes */
+
+#define DFX_K_SUCCESS 0 /* routine succeeded */
+#define DFX_K_FAILURE 1 /* routine failed */
+#define DFX_K_OUTSTATE 2 /* bad state for command */
+#define DFX_K_HW_TIMEOUT 3 /* command timed out */
+
+/* Define LLC host receive buffer min/max/default values */
+
+#define RCV_BUFS_MIN 2 /* minimum pre-allocated receive buffers */
+#define RCV_BUFS_MAX 32 /* maximum pre-allocated receive buffers */
+#define RCV_BUFS_DEF 8 /* default pre-allocated receive buffers */
+
+/* Define offsets into FDDI LLC or SMT receive frame buffers - used when indicating frames */
+
+#define RCV_BUFF_K_DESCR 0 /* four byte FMC descriptor */
+#define RCV_BUFF_K_PADDING 4 /* three null bytes */
+#define RCV_BUFF_K_FC 7 /* one byte frame control */
+#define RCV_BUFF_K_DA 8 /* six byte destination address */
+#define RCV_BUFF_K_SA 14 /* six byte source address */
+#define RCV_BUFF_K_DATA 20 /* offset to start of packet data */
+
+/* Define offsets into FDDI LLC transmit frame buffers - used when sending frames */
+
+#define XMT_BUFF_K_FC 0 /* one byte frame control */
+#define XMT_BUFF_K_DA 1 /* six byte destination address */
+#define XMT_BUFF_K_SA 7 /* six byte source address */
+#define XMT_BUFF_K_DATA 13 /* offset to start of packet data */
+
+/* Macro for checking a "value" is within a specific range */
+
+#define IN_RANGE(value,low,high) ((value >= low) && (value <= high))
+
+/* Only execute special print call when debug driver was built */
+
+#ifdef DEFXX_DEBUG
+#define DBG_printk(args...) printk(## args)
+#else
+#define DBG_printk(args...)
+#endif
+
+/* Define constants for masking/unmasking interrupts */
+
+#define DFX_MASK_INTERRUPTS 1
+#define DFX_UNMASK_INTERRUPTS 0
+
+/* Define structure for driver transmit descriptor block */
+
+typedef struct
+ {
+ struct sk_buff *p_skb; /* ptr to skb */
+ } XMT_DRIVER_DESCR;
+
+typedef struct DFX_board_tag
+ {
+ /* Keep virtual and physical pointers to locked, physically contiguous memory */
+
+ char *kmalloced; /* pci_free_consistent this on unload */
+ dma_addr_t kmalloced_dma;
+ /* DMA handle for the above */
+ PI_DESCR_BLOCK *descr_block_virt; /* PDQ descriptor block virt address */
+ dma_addr_t descr_block_phys; /* PDQ descriptor block phys address */
+ PI_DMA_CMD_REQ *cmd_req_virt; /* Command request buffer virt address */
+ dma_addr_t cmd_req_phys; /* Command request buffer phys address */
+ PI_DMA_CMD_RSP *cmd_rsp_virt; /* Command response buffer virt address */
+ dma_addr_t cmd_rsp_phys; /* Command response buffer phys address */
+ char *rcv_block_virt; /* LLC host receive queue buf blk virt */
+ dma_addr_t rcv_block_phys; /* LLC host receive queue buf blk phys */
+ PI_CONSUMER_BLOCK *cons_block_virt; /* PDQ consumer block virt address */
+ dma_addr_t cons_block_phys; /* PDQ consumer block phys address */
+
+ /* Keep local copies of Type 1 and Type 2 register data */
+
+ PI_TYPE_1_PROD_REG cmd_req_reg; /* Command Request register */
+ PI_TYPE_1_PROD_REG cmd_rsp_reg; /* Command Response register */
+ PI_TYPE_2_PROD_REG rcv_xmt_reg; /* Type 2 (RCV/XMT) register */
+
+ /* Storage for unicast and multicast address entries in adapter CAM */
+
+ u8 uc_table[1*FDDI_K_ALEN];
+ u32 uc_count; /* number of unicast addresses */
+ u8 mc_table[PI_CMD_ADDR_FILTER_K_SIZE*FDDI_K_ALEN];
+ u32 mc_count; /* number of multicast addresses */
+
+ /* Current packet filter settings */
+
+ u32 ind_group_prom; /* LLC individual & group frame prom mode */
+ u32 group_prom; /* LLC group (multicast) frame prom mode */
+
+ /* Link available flag needed to determine whether to drop outgoing packet requests */
+
+ u32 link_available; /* is link available? */
+
+ /* Resources to indicate reset type when resetting adapter */
+
+ u32 reset_type; /* skip or rerun diagnostics */
+
+ /* Store pointers to receive buffers for queue processing code */
+
+ char *p_rcv_buff_va[PI_RCV_DATA_K_NUM_ENTRIES];
+
+ /* Store pointers to transmit buffers for transmit completion code */
+
+ XMT_DRIVER_DESCR xmt_drv_descr_blk[PI_XMT_DATA_K_NUM_ENTRIES];
+
+ /* Transmit spinlocks */
+
+ spinlock_t lock;
+
+ /* Store device, bus-specific, and parameter information for this adapter */
+
+ struct net_device *dev; /* pointer to device structure */
+ struct net_device *next;
+ u32 bus_type; /* bus type (0 == PCI, 1 == EISA) */
+ u16 base_addr; /* base I/O address (same as dev->base_addr) */
+ struct pci_dev * pci_dev;
+ u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */
+ u32 req_ttrt; /* requested TTRT value (in 80ns units) */
+ u32 burst_size; /* adapter burst size (enumerated) */
+ u32 rcv_bufs_to_post; /* receive buffers to post for LLC host queue */
+ u8 factory_mac_addr[FDDI_K_ALEN]; /* factory (on-board) MAC address */
+
+ /* Common FDDI statistics structure and private counters */
+
+ struct fddi_statistics stats;
+
+ u32 rcv_discards;
+ u32 rcv_crc_errors;
+ u32 rcv_frame_status_errors;
+ u32 rcv_length_errors;
+ u32 rcv_total_frames;
+ u32 rcv_multicast_frames;
+ u32 rcv_total_bytes;
+
+ u32 xmt_discards;
+ u32 xmt_length_errors;
+ u32 xmt_total_frames;
+ u32 xmt_total_bytes;
+ } DFX_board_t;
+
+#endif /* #ifndef _DEFXX_H_ */
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
new file mode 100644
index 000000000000..c4aa5fe2840e
--- /dev/null
+++ b/drivers/net/depca.c
@@ -0,0 +1,2122 @@
+/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
+
+ Written 1994, 1995 by David C. Davies.
+
+
+ Copyright 1994 David C. Davies
+ and
+ United States Government
+ (as represented by the Director, National Security Agency).
+
+ Copyright 1995 Digital Equipment Corporation.
+
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of DEPCA and EtherWORKS ethernet cards:
+
+ DEPCA (the original)
+ DE100
+ DE101
+ DE200 Turbo
+ DE201 Turbo
+ DE202 Turbo (TP BNC)
+ DE210
+ DE422 (EISA)
+
+ The driver has been tested on DE100, DE200 and DE202 cards in a
+ relatively busy network. The DE422 has been tested a little.
+
+ This driver will NOT work for the DE203, DE204 and DE205 series of
+ cards, since they have a new custom ASIC in place of the AMD LANCE
+ chip. See the 'ewrk3.c' driver in the Linux source tree for running
+ those cards.
+
+ I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
+ a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com
+
+ =========================================================================
+
+ The driver was originally based on the 'lance.c' driver from Donald
+ Becker which is included with the standard driver distribution for
+ linux. V0.4 is a complete re-write with only the kernel interface
+ remaining from the original code.
+
+ 1) Lance.c code in /linux/drivers/net/
+ 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
+ AMD, 1992 [(800) 222-9323].
+ 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
+ AMD, Pub. #17881, May 1993.
+ 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
+ AMD, Pub. #16907, May 1992
+ 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
+ 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
+ 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
+ Digital Equipment Corporation, 1989
+ 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
+
+
+ Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
+ driver.
+
+ The original DEPCA card requires that the ethernet ROM address counter
+ be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
+ only done when a 0x08 is read as the first address octet (to minimise
+ the chances of writing over some other hardware's I/O register). The
+ NICSR accesses have been changed to byte accesses for all the cards
+ supported by this driver, since there is only one useful bit in the MSB
+ (remote boot timeout) and it is not used. Also, there is a maximum of
+ only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
+ help debugging all this (and holding my feet to the fire until I got it
+ right).
+
+ The DE200 series boards have on-board 64kB RAM for use as a shared
+ memory network buffer. Only the DE100 cards make use of a 2kB buffer
+ mode which has not been implemented in this driver (only the 32kB and
+ 64kB modes are supported [16kB/48kB for the original DEPCA]).
+
+ At the most only 2 DEPCA cards can be supported on the ISA bus because
+ there is only provision for two I/O base addresses on each card (0x300
+ and 0x200). The I/O address is detected by searching for a byte sequence
+ in the Ethernet station address PROM at the expected I/O address for the
+ Ethernet PROM. The shared memory base address is 'autoprobed' by
+ looking for the self test PROM and detecting the card name. When a
+ second DEPCA is detected, information is placed in the base_addr
+ variable of the next device structure (which is created if necessary),
+ thus enabling ethif_probe initialization for the device. More than 2
+ EISA cards can be supported, but care will be needed assigning the
+ shared memory to ensure that each slot has the correct IRQ, I/O address
+ and shared memory address assigned.
+
+ ************************************************************************
+
+ NOTE: If you are using two ISA DEPCAs, it is important that you assign
+ the base memory addresses correctly. The driver autoprobes I/O 0x300
+ then 0x200. The base memory address for the first device must be less
+ than that of the second so that the auto probe will correctly assign the
+ I/O and memory addresses on the same card. I can't think of a way to do
+ this unambiguously at the moment, since there is nothing on the cards to
+ tie I/O and memory information together.
+
+ I am unable to test 2 cards together for now, so this code is
+ unchecked. All reports, good or bad, are welcome.
+
+ ************************************************************************
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
+ {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
+ really IRQ9 in machines with 16 IRQ lines.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been added. To
+ utilise this ability, you have to do <8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy depca.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) if you wish, edit the source code near line 1530 to reflect the I/O
+ address and IRQ you're using (see also 5).
+ 3) compile depca.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the depca configuration turned off and reboot.
+ 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
+ [Alan Cox: Changed the code to allow command line irq/io assignments]
+ [Dave Davies: Changed the code to allow command line mem/name
+ assignments]
+ 6) run the net startup bits for your eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod depca'.
+
+ To assign a base memory address for the shared memory when running as a
+ loadable module, see 5 above. To include the adapter name (if you have
+ no PROM but know the card name) also see 5 above. Note that this last
+ option will not work with kernel built-in depca's.
+
+ The shared memory assignment for a loadable module makes sense to avoid
+ the 'memory autoprobe' picking the wrong shared memory (for the case of
+ 2 depca's in a PC).
+
+ ************************************************************************
+ Support for MCA EtherWORKS cards added 11-3-98.
+ Verified to work with up to 2 DE212 cards in a system (although not
+ fully stress-tested).
+
+ Currently known bugs/limitations:
+
+ Note: with the MCA stuff as a module, it trusts the MCA configuration,
+ not the command line for IRQ and memory address. You can
+ specify them if you want, but it will throw your values out.
+ You still have to pass the IO address it was configured as
+ though.
+
+ ************************************************************************
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 25-jan-94 Initial writing.
+ 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
+ 0.3 1-feb-94 Added multiple DEPCA support.
+ 0.31 4-feb-94 Added DE202 recognition.
+ 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
+ 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
+ Add jabber packet fix from murf@perftech.com
+ and becker@super.org
+ 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
+ 0.35 8-mar-94 Added DE201 recognition. Tidied up.
+ 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
+ 0.36 16-may-94 DE422 fix released.
+ 0.37 22-jul-94 Added MODULE support
+ 0.38 15-aug-94 Added DBR ROM switch in depca_close().
+ Multi DEPCA bug fix.
+ 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
+ 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
+ 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
+ <stromain@alf.dec.com>
+ 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
+ 0.385 3-apr-95 Fix a recognition bug reported by
+ <ryan.niemi@lastfrontier.com>
+ 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
+ 0.40 25-May-95 Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
+ suggestion by <heiko@colossus.escape.de>
+ 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
+ modules.
+ Add 'adapter_name' for loadable modules when no PROM.
+ Both above from a suggestion by
+ <pchen@woodruffs121.residence.gatech.edu>.
+ Add new multicasting code.
+ 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
+ 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+ 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug
+ reported by <mmogilvi@elbert.uccs.edu>
+ 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards
+ by <tymm@computer.org>
+ 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
+ 0.5 14-Nov-98 Re-spin for 2.1.x kernels.
+ 0.51 27-Jun-99 Correct received packet length for CRC from
+ report by <worm@dkik.dk>
+ 0.52 16-Oct-00 Fixes for 2.3 io memory accesses
+ Fix show-stopper (ints left masked) in depca_interrupt
+ by <peterd@pnd-pc.demon.co.uk>
+ 0.53 12-Jan-01 Release resources on failure, bss tidbits
+ by acme@conectiva.com.br
+ 0.54 08-Nov-01 use library crc32 functions
+ by Matt_Domsch@dell.com
+ 0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org>
+
+ =========================================================================
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#ifdef CONFIG_MCA
+#include <linux/mca.h>
+#endif
+
+#ifdef CONFIG_EISA
+#include <linux/eisa.h>
+#endif
+
+#include "depca.h"
+
+static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
+
+#ifdef DEPCA_DEBUG
+static int depca_debug = DEPCA_DEBUG;
+#else
+static int depca_debug = 1;
+#endif
+
+#define DEPCA_NDA 0xffe0 /* No Device Address */
+
+#define TX_TIMEOUT (1*HZ)
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Set the number of Tx and Rx buffers. Ensure that the memory requested
+** here is <= to the amount of shared memory set up by the board switches.
+** The number of descriptors MUST BE A POWER OF 2.
+**
+** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 8 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
+#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
+
+/*
+** EISA bus defines
+*/
+#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+
+/*
+** ISA Bus defines
+*/
+#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
+#define DEPCA_TOTAL_SIZE 0x10
+
+static struct {
+ u_long iobase;
+ struct platform_device *device;
+} depca_io_ports[] = {
+ { 0x300, NULL },
+ { 0x200, NULL },
+ { 0 , NULL },
+};
+
+/*
+** Name <-> Adapter mapping
+*/
+#define DEPCA_SIGNATURE {"DEPCA",\
+ "DE100","DE101",\
+ "DE200","DE201","DE202",\
+ "DE210","DE212",\
+ "DE422",\
+ ""}
+
+static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
+
+enum depca_type {
+ DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
+};
+
+static char depca_string[] = "depca";
+
+static int depca_device_remove (struct device *device);
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id depca_eisa_ids[] = {
+ { "DEC4220", de422 },
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, depca_eisa_ids);
+
+static int depca_eisa_probe (struct device *device);
+
+static struct eisa_driver depca_eisa_driver = {
+ .id_table = depca_eisa_ids,
+ .driver = {
+ .name = depca_string,
+ .probe = depca_eisa_probe,
+ .remove = __devexit_p (depca_device_remove)
+ }
+};
+#endif
+
+#ifdef CONFIG_MCA
+/*
+** Adapter ID for the MCA EtherWORKS DE210/212 adapter
+*/
+#define DE210_ID 0x628d
+#define DE212_ID 0x6def
+
+static short depca_mca_adapter_ids[] = {
+ DE210_ID,
+ DE212_ID,
+ 0x0000
+};
+
+static char *depca_mca_adapter_name[] = {
+ "DEC EtherWORKS MC Adapter (DE210)",
+ "DEC EtherWORKS MC Adapter (DE212)",
+ NULL
+};
+
+static enum depca_type depca_mca_adapter_type[] = {
+ de210,
+ de212,
+ 0
+};
+
+static int depca_mca_probe (struct device *);
+
+static struct mca_driver depca_mca_driver = {
+ .id_table = depca_mca_adapter_ids,
+ .driver = {
+ .name = depca_string,
+ .bus = &mca_bus_type,
+ .probe = depca_mca_probe,
+ .remove = __devexit_p(depca_device_remove),
+ },
+};
+#endif
+
+static int depca_isa_probe (struct device *);
+
+static struct device_driver depca_isa_driver = {
+ .name = depca_string,
+ .bus = &platform_bus_type,
+ .probe = depca_isa_probe,
+ .remove = __devexit_p(depca_device_remove),
+};
+
+/*
+** Miscellaneous info...
+*/
+#define DEPCA_STRLEN 16
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
+#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */
+
+/*
+** The DEPCA Rx and Tx ring descriptors.
+*/
+struct depca_rx_desc {
+ volatile s32 base;
+ s16 buf_length; /* This length is negative 2's complement! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct depca_tx_desc {
+ volatile s32 base;
+ s16 length; /* This length is negative 2's complement! */
+ s16 misc; /* Errors and TDR info */
+};
+
+#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
+ to LANCE memory address space */
+
+/*
+** The Lance initialization block, described in databook, in common memory.
+*/
+struct depca_init {
+ u16 mode; /* Mode register */
+ u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
+ u8 mcast_table[8]; /* Multicast Hash Table. */
+ u32 rx_ring; /* Rx ring base pointer & ring length */
+ u32 tx_ring; /* Tx ring base pointer & ring length */
+};
+
+#define DEPCA_PKT_STAT_SZ 16
+#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DEPCA_PKT_STAT_SZ */
+struct depca_private {
+ char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
+ enum depca_type adapter; /* Adapter type */
+ enum {
+ DEPCA_BUS_MCA = 1,
+ DEPCA_BUS_ISA,
+ DEPCA_BUS_EISA,
+ } depca_bus; /* type of bus */
+ struct depca_init init_block; /* Shadow Initialization block */
+/* CPU address space fields */
+ struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */
+ struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */
+ void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
+ void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
+ void __iomem *sh_mem; /* CPU mapped virt address of device RAM */
+ u_long mem_start; /* Bus address of device RAM (before remap) */
+ u_long mem_len; /* device memory size */
+/* Device address space fields */
+ u_long device_ram_start; /* Start of RAM in device addr space */
+/* Offsets used in both address spaces */
+ u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */
+ u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */
+ u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */
+/* Kernel-only (not device) fields */
+ int rx_new, tx_new; /* The next free ring entry */
+ int rx_old, tx_old; /* The ring entries to be free()ed. */
+ struct net_device_stats stats;
+ spinlock_t lock;
+ struct { /* Private stats counters */
+ u32 bins[DEPCA_PKT_STAT_SZ];
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ int txRingMask; /* TX ring mask */
+ int rxRingMask; /* RX ring mask */
+ s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
+ s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
+};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingMask = tx_new Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingMask-lp->tx_new:\
+ lp->tx_old -lp->tx_new-1)
+
+/*
+** Public Functions
+*/
+static int depca_open(struct net_device *dev);
+static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t depca_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int depca_close(struct net_device *dev);
+static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void depca_tx_timeout(struct net_device *dev);
+static struct net_device_stats *depca_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+/*
+** Private functions
+*/
+static void depca_init_ring(struct net_device *dev);
+static int depca_rx(struct net_device *dev);
+static int depca_tx(struct net_device *dev);
+
+static void LoadCSRs(struct net_device *dev);
+static int InitRestartDepca(struct net_device *dev);
+static int DepcaSignature(char *name, u_long paddr);
+static int DevicePresent(u_long ioaddr);
+static int get_hw_addr(struct net_device *dev);
+static void SetMulticastFilter(struct net_device *dev);
+static int load_packet(struct net_device *dev, struct sk_buff *skb);
+static void depca_dbg_open(struct net_device *dev);
+
+static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
+static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
+static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
+static u_char *depca_irq;
+
+static int irq;
+static int io;
+static char *adapter_name;
+static int mem; /* For loadable module assignment
+ use insmod mem=0x????? .... */
+module_param (irq, int, 0);
+module_param (io, int, 0);
+module_param (adapter_name, charp, 0);
+module_param (mem, int, 0);
+MODULE_PARM_DESC(irq, "DEPCA IRQ number");
+MODULE_PARM_DESC(io, "DEPCA I/O base address");
+MODULE_PARM_DESC(adapter_name, "DEPCA adapter name");
+MODULE_PARM_DESC(mem, "DEPCA shared memory address");
+MODULE_LICENSE("GPL");
+
+/*
+** Miscellaneous defines...
+*/
+#define STOP_DEPCA \
+ outw(CSR0, DEPCA_ADDR);\
+ outw(STOP, DEPCA_DATA)
+
+static int __init depca_hw_init (struct net_device *dev, struct device *device)
+{
+ struct depca_private *lp;
+ int i, j, offset, netRAM, mem_len, status = 0;
+ s16 nicsr;
+ u_long ioaddr;
+ u_long mem_start;
+
+ /*
+ * We are now supposed to enter this function with the
+ * following fields filled with proper values :
+ *
+ * dev->base_addr
+ * lp->mem_start
+ * lp->depca_bus
+ * lp->adapter
+ *
+ * dev->irq can be set if known from device configuration (on
+ * MCA or EISA) or module option. Otherwise, it will be auto
+ * detected.
+ */
+
+ ioaddr = dev->base_addr;
+
+ STOP_DEPCA;
+
+ nicsr = inb(DEPCA_NICSR);
+ nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
+ outb(nicsr, DEPCA_NICSR);
+
+ if (inw(DEPCA_DATA) != STOP) {
+ return -ENXIO;
+ }
+
+ lp = (struct depca_private *) dev->priv;
+ mem_start = lp->mem_start;
+
+ if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
+ return -ENXIO;
+
+ printk ("%s: %s at 0x%04lx",
+ device->bus_id, depca_signature[lp->adapter], ioaddr);
+
+ switch (lp->depca_bus) {
+#ifdef CONFIG_MCA
+ case DEPCA_BUS_MCA:
+ printk(" (MCA slot %d)", to_mca_device(device)->slot + 1);
+ break;
+#endif
+
+#ifdef CONFIG_EISA
+ case DEPCA_BUS_EISA:
+ printk(" (EISA slot %d)", to_eisa_device(device)->slot);
+ break;
+#endif
+
+ case DEPCA_BUS_ISA:
+ break;
+
+ default:
+ printk("Unknown DEPCA bus %d\n", lp->depca_bus);
+ return -ENXIO;
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ if (status != 0) {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ return -ENXIO;
+ }
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet address */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x", dev->dev_addr[i]);
+
+ /* Set up the maximum amount of network RAM(kB) */
+ netRAM = ((lp->adapter != DEPCA) ? 64 : 48);
+ if ((nicsr & _128KB) && (lp->adapter == de422))
+ netRAM = 128;
+
+ /* Shared Memory Base Address */
+ if (nicsr & BUF) {
+ nicsr &= ~BS; /* DEPCA RAM in top 32k */
+ netRAM -= 32;
+
+ /* Only EISA/ISA needs start address to be re-computed */
+ if (lp->depca_bus != DEPCA_BUS_MCA)
+ mem_start += 0x8000;
+ }
+
+ if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
+ > (netRAM << 10)) {
+ printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM);
+ return -ENXIO;
+ }
+
+ printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
+
+ /* Enable the shadow RAM. */
+ if (lp->adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ spin_lock_init(&lp->lock);
+ sprintf(lp->adapter_name, "%s (%s)",
+ depca_signature[lp->adapter], device->bus_id);
+ status = -EBUSY;
+
+ /* Initialisation Block */
+ if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) {
+ printk(KERN_ERR "depca: cannot request ISA memory, aborting\n");
+ goto out_priv;
+ }
+
+ status = -EIO;
+ lp->sh_mem = ioremap(mem_start, mem_len);
+ if (lp->sh_mem == NULL) {
+ printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
+ goto out1;
+ }
+
+ lp->mem_start = mem_start;
+ lp->mem_len = mem_len;
+ lp->device_ram_start = mem_start & LA_MASK;
+
+ offset = 0;
+ offset += sizeof(struct depca_init);
+
+ /* Tx & Rx descriptors (aligned to a quadword boundary) */
+ offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
+ lp->rx_ring = (struct depca_rx_desc __iomem *) (lp->sh_mem + offset);
+ lp->rx_ring_offset = offset;
+
+ offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
+ lp->tx_ring = (struct depca_tx_desc __iomem *) (lp->sh_mem + offset);
+ lp->tx_ring_offset = offset;
+
+ offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
+
+ lp->buffs_offset = offset;
+
+ /* Finish initialising the ring information. */
+ lp->rxRingMask = NUM_RX_DESC - 1;
+ lp->txRingMask = NUM_TX_DESC - 1;
+
+ /* Calculate Tx/Rx RLEN size for the descriptors. */
+ for (i = 0, j = lp->rxRingMask; j > 0; i++) {
+ j >>= 1;
+ }
+ lp->rx_rlen = (s32) (i << 29);
+ for (i = 0, j = lp->txRingMask; j > 0; i++) {
+ j >>= 1;
+ }
+ lp->tx_rlen = (s32) (i << 29);
+
+ /* Load the initialisation block */
+ depca_init_ring(dev);
+
+ /* Initialise the control and status registers */
+ LoadCSRs(dev);
+
+ /* Enable DEPCA board interrupts for autoprobing */
+ nicsr = ((nicsr & ~IM) | IEN);
+ outb(nicsr, DEPCA_NICSR);
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+ unsigned char irqnum;
+ unsigned long irq_mask, delay;
+
+ irq_mask = probe_irq_on();
+
+ /* Assign the correct irq list */
+ switch (lp->adapter) {
+ case DEPCA:
+ case de100:
+ case de101:
+ depca_irq = de1xx_irq;
+ break;
+ case de200:
+ case de201:
+ case de202:
+ case de210:
+ case de212:
+ depca_irq = de2xx_irq;
+ break;
+ case de422:
+ depca_irq = de422_irq;
+ break;
+
+ default:
+ break; /* Not reached */
+ }
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(INEA | INIT, DEPCA_DATA);
+
+ delay = jiffies + HZ/50;
+ while (time_before(jiffies, delay))
+ yield();
+
+ irqnum = probe_irq_off(irq_mask);
+
+ status = -ENXIO;
+ if (!irqnum) {
+ printk(" and failed to detect IRQ line.\n");
+ goto out2;
+ } else {
+ for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++)
+ if (irqnum == depca_irq[i]) {
+ dev->irq = irqnum;
+ printk(" and uses IRQ%d.\n", dev->irq);
+ }
+
+ if (!dev->irq) {
+ printk(" but incorrect IRQ line detected.\n");
+ goto out2;
+ }
+ }
+ } else {
+ printk(" and assigned IRQ%d.\n", dev->irq);
+ }
+
+ if (depca_debug > 1) {
+ printk(version);
+ }
+
+ /* The DEPCA-specific entries in the device structure. */
+ dev->open = &depca_open;
+ dev->hard_start_xmit = &depca_start_xmit;
+ dev->stop = &depca_close;
+ dev->get_stats = &depca_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &depca_ioctl;
+ dev->tx_timeout = depca_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->mem_start = 0;
+
+ device->driver_data = dev;
+ SET_NETDEV_DEV (dev, device);
+
+ status = register_netdev(dev);
+ if (status == 0)
+ return 0;
+out2:
+ iounmap(lp->sh_mem);
+out1:
+ release_mem_region (mem_start, mem_len);
+out_priv:
+ return status;
+}
+
+
+static int depca_open(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ s16 nicsr;
+ int status = 0;
+
+ STOP_DEPCA;
+ nicsr = inb(DEPCA_NICSR);
+
+ /* Make sure the shadow RAM is enabled */
+ if (lp->adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Re-initialize the DEPCA... */
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+
+ depca_dbg_open(dev);
+
+ if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name, dev)) {
+ printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /* Enable DEPCA board interrupts and turn off LED */
+ nicsr = ((nicsr & ~IM & ~LED) | IEN);
+ outb(nicsr, DEPCA_NICSR);
+ outw(CSR0, DEPCA_ADDR);
+
+ netif_start_queue(dev);
+
+ status = InitRestartDepca(dev);
+
+ if (depca_debug > 1) {
+ printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA));
+ printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR));
+ }
+ }
+ return status;
+}
+
+/* Initialize the lance Rx and Tx descriptor rings. */
+static void depca_init_ring(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ u_int i;
+ u_long offset;
+
+ /* Lock out other processes whilst setting up the hardware */
+ netif_stop_queue(dev);
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Initialize the base address and length of each buffer in the ring */
+ for (i = 0; i <= lp->rxRingMask; i++) {
+ offset = lp->buffs_offset + i * RX_BUFF_SZ;
+ writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base);
+ writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
+ lp->rx_buff[i] = lp->sh_mem + offset;
+ }
+
+ for (i = 0; i <= lp->txRingMask; i++) {
+ offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ;
+ writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base);
+ lp->tx_buff[i] = lp->sh_mem + offset;
+ }
+
+ /* Set up the initialization block */
+ lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen;
+ lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen;
+
+ SetMulticastFilter(dev);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ }
+
+ lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
+}
+
+
+static void depca_tx_timeout(struct net_device *dev)
+{
+ u_long ioaddr = dev->base_addr;
+
+ printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA));
+
+ STOP_DEPCA;
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ InitRestartDepca(dev);
+}
+
+
+/*
+** Writes a socket buffer to TX descriptor ring and starts transmission
+*/
+static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int status = 0;
+
+ /* Transmitter timeout, serious problems. */
+ if (skb->len < 1)
+ goto out;
+
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ goto out;
+ }
+
+ netif_stop_queue(dev);
+
+ if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
+ status = load_packet(dev, skb);
+
+ if (!status) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+ }
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
+ } else
+ status = -1;
+
+ out:
+ return status;
+}
+
+/*
+** The DEPCA interrupt handler.
+*/
+static irqreturn_t depca_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct depca_private *lp;
+ s16 csr0, nicsr;
+ u_long ioaddr;
+
+ if (dev == NULL) {
+ printk("depca_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ lp = (struct depca_private *) dev->priv;
+ ioaddr = dev->base_addr;
+
+ spin_lock(&lp->lock);
+
+ /* mask the DEPCA board interrupts and turn on the LED */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= (IM | LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ outw(CSR0, DEPCA_ADDR);
+ csr0 = inw(DEPCA_DATA);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & INTE, DEPCA_DATA);
+
+ if (csr0 & RINT) /* Rx interrupt (packet arrived) */
+ depca_rx(dev);
+
+ if (csr0 & TINT) /* Tx interrupt (packet sent) */
+ depca_tx(dev);
+
+ /* Any resources available? */
+ if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
+ netif_wake_queue(dev);
+ }
+
+ /* Unmask the DEPCA board interrupts and turn off the LED */
+ nicsr = (nicsr & ~IM & ~LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+/* Called with lp->lock held */
+static int depca_rx(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ int i, entry;
+ s32 status;
+
+ for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) {
+ status = readl(&lp->rx_ring[entry].base) >> 16;
+ if (status & R_STP) { /* Remember start of frame */
+ lp->rx_old = entry;
+ }
+ if (status & R_ENP) { /* Valid frame status */
+ if (status & R_ERR) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & R_FRAM)
+ lp->stats.rx_frame_errors++;
+ if (status & R_OFLO)
+ lp->stats.rx_over_errors++;
+ if (status & R_CRC)
+ lp->stats.rx_crc_errors++;
+ if (status & R_BUFF)
+ lp->stats.rx_fifo_errors++;
+ } else {
+ short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb != NULL) {
+ unsigned char *buf;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ buf = skb_put(skb, pkt_len);
+ skb->dev = dev;
+ if (entry < lp->rx_old) { /* Wrapped buffer */
+ len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
+ memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
+ memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len);
+ } else { /* Linear buffer */
+ memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
+ if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DEPCA_PKT_STAT_SZ;
+ }
+ }
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s16 *) & buf[0] == -1) && (*(s16 *) & buf[2] == -1) && (*(s16 *) & buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s16 *) & buf[0] == *(s16 *) & dev->dev_addr[0]) && (*(s16 *) & buf[2] == *(s16 *) & dev->dev_addr[2]) && (*(s16 *) & buf[4] == *(s16 *) & dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ /* Change buffer ownership for this last frame, back to the adapter */
+ for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) {
+ writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
+ }
+ writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for buffer errors.
+** Called with lp->lock held
+*/
+static int depca_tx(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ int entry;
+ s32 status;
+ u_long ioaddr = dev->base_addr;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = readl(&lp->tx_ring[entry].base) >> 16;
+
+ if (status < 0) { /* Packet not yet sent! */
+ break;
+ } else if (status & T_ERR) { /* An error occurred. */
+ status = readl(&lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (status & TMD3_RTRY)
+ lp->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR)
+ lp->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL)
+ lp->stats.tx_window_errors++;
+ if (status & TMD3_UFLO)
+ lp->stats.tx_fifo_errors++;
+ if (status & (TMD3_BUFF | TMD3_UFLO)) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+ }
+ } else if (status & (T_MORE | T_ONE)) {
+ lp->stats.collisions++;
+ } else {
+ lp->stats.tx_packets++;
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+ }
+
+ return 0;
+}
+
+static int depca_close(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ s16 nicsr;
+ u_long ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ outw(CSR0, DEPCA_ADDR);
+
+ if (depca_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA));
+ }
+
+ /*
+ ** We stop the DEPCA here -- it occasionally polls
+ ** memory if we don't.
+ */
+ outw(STOP, DEPCA_DATA);
+
+ /*
+ ** Give back the ROM in case the user wants to go to DOS
+ */
+ if (lp->adapter != DEPCA) {
+ nicsr = inb(DEPCA_NICSR);
+ nicsr &= ~SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /*
+ ** Free the associated irq
+ */
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static void LoadCSRs(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
+ outw((u16) lp->device_ram_start, DEPCA_DATA);
+ outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
+ outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA);
+ outw(CSR3, DEPCA_ADDR); /* ALE control */
+ outw(ACON, DEPCA_DATA);
+
+ outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
+
+ return;
+}
+
+static int InitRestartDepca(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int i, status = 0;
+
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
+
+ outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
+ outw(INIT, DEPCA_DATA); /* initialize DEPCA */
+
+ /* wait for lance to complete initialisation */
+ for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++);
+
+ if (i != 100) {
+ /* clear IDON by writing a "1", enable interrupts and start lance */
+ outw(IDON | INEA | STRT, DEPCA_DATA);
+ if (depca_debug > 2) {
+ printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
+ }
+ } else {
+ printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
+ status = -1;
+ }
+
+ return status;
+}
+
+static struct net_device_stats *depca_get_stats(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+
+ /* Null body since there is no framing error counter */
+
+ return &lp->stats;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void set_multicast_list(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ if (dev) {
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
+ lp->init_block.mode |= PROM;
+ } else {
+ SetMulticastFilter(dev);
+ lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
+ }
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Big endian crc one liner is mine, all mine, ha ha ha ha!
+** LANCE calculates its hash codes big endian.
+*/
+static void SetMulticastFilter(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i, j, bit, byte;
+ u16 hashcode;
+ u32 crc;
+
+ if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
+ lp->init_block.mcast_table[i] = (char) 0xff;
+ }
+ } else {
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */
+ lp->init_block.mcast_table[i] = 0;
+ }
+ /* Add multicast addresses */
+ for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = ether_crc(ETH_ALEN, addrs);
+ hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
+ for (j = 0; j < 5; j++) { /* ... in reverse order. */
+ hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
+ }
+
+
+ byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+ lp->init_block.mcast_table[byte] |= bit;
+ }
+ }
+ }
+
+ return;
+}
+
+static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
+{
+ int status = 0;
+
+ if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) {
+ status = -EBUSY;
+ goto out;
+ }
+
+ if (DevicePresent(ioaddr)) {
+ status = -ENODEV;
+ goto out_release;
+ }
+
+ if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) {
+ status = -ENOMEM;
+ goto out_release;
+ }
+
+ return 0;
+
+ out_release:
+ release_region (ioaddr, DEPCA_TOTAL_SIZE);
+ out:
+ return status;
+}
+
+#ifdef CONFIG_MCA
+/*
+** Microchannel bus I/O device probe
+*/
+static int __init depca_mca_probe(struct device *device)
+{
+ unsigned char pos[2];
+ unsigned char where;
+ unsigned long iobase, mem_start;
+ int irq, err;
+ struct mca_device *mdev = to_mca_device (device);
+ struct net_device *dev;
+ struct depca_private *lp;
+
+ /*
+ ** Search for the adapter. If an address has been given, search
+ ** specifically for the card at that address. Otherwise find the
+ ** first card in the system.
+ */
+
+ pos[0] = mca_device_read_stored_pos(mdev, 2);
+ pos[1] = mca_device_read_stored_pos(mdev, 3);
+
+ /*
+ ** IO of card is handled by bits 1 and 2 of pos0.
+ **
+ ** bit2 bit1 IO
+ ** 0 0 0x2c00
+ ** 0 1 0x2c10
+ ** 1 0 0x2c20
+ ** 1 1 0x2c30
+ */
+ where = (pos[0] & 6) >> 1;
+ iobase = 0x2c00 + (0x10 * where);
+
+ /*
+ ** Found the adapter we were looking for. Now start setting it up.
+ **
+ ** First work on decoding the IRQ. It's stored in the lower 4 bits
+ ** of pos1. Bits are as follows (from the ADF file):
+ **
+ ** Bits
+ ** 3 2 1 0 IRQ
+ ** --------------------
+ ** 0 0 1 0 5
+ ** 0 0 0 1 9
+ ** 0 1 0 0 10
+ ** 1 0 0 0 11
+ */
+ where = pos[1] & 0x0f;
+ switch (where) {
+ case 1:
+ irq = 9;
+ break;
+ case 2:
+ irq = 5;
+ break;
+ case 4:
+ irq = 10;
+ break;
+ case 8:
+ irq = 11;
+ break;
+ default:
+ printk("%s: mca_probe IRQ error. You should never get here (%d).\n", dev->name, where);
+ return -EINVAL;
+ }
+
+ /*
+ ** Shared memory address of adapter is stored in bits 3-5 of pos0.
+ ** They are mapped as follows:
+ **
+ ** Bit
+ ** 5 4 3 Memory Addresses
+ ** 0 0 0 C0000-CFFFF (64K)
+ ** 1 0 0 C8000-CFFFF (32K)
+ ** 0 0 1 D0000-DFFFF (64K)
+ ** 1 0 1 D8000-DFFFF (32K)
+ ** 0 1 0 E0000-EFFFF (64K)
+ ** 1 1 0 E8000-EFFFF (32K)
+ */
+ where = (pos[0] & 0x18) >> 3;
+ mem_start = 0xc0000 + (where * 0x10000);
+ if (pos[0] & 0x20) {
+ mem_start += 0x8000;
+ }
+
+ /* claim the slot */
+ strncpy(mdev->name, depca_mca_adapter_name[mdev->index],
+ sizeof(mdev->name));
+ mca_device_set_claim(mdev, 1);
+
+ /*
+ ** Get everything allocated and initialized... (almost just
+ ** like the ISA and EISA probes)
+ */
+ irq = mca_device_transform_irq(mdev, irq);
+ iobase = mca_device_transform_ioport(mdev, iobase);
+
+ if ((err = depca_common_init (iobase, &dev)))
+ goto out_unclaim;
+
+ dev->irq = irq;
+ dev->base_addr = iobase;
+ lp = dev->priv;
+ lp->depca_bus = DEPCA_BUS_MCA;
+ lp->adapter = depca_mca_adapter_type[mdev->index];
+ lp->mem_start = mem_start;
+
+ if ((err = depca_hw_init(dev, device)))
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ free_netdev (dev);
+ release_region (iobase, DEPCA_TOTAL_SIZE);
+ out_unclaim:
+ mca_device_set_claim(mdev, 0);
+
+ return err;
+}
+#endif
+
+/*
+** ISA bus I/O device probe
+*/
+
+static void depca_platform_release (struct device *device)
+{
+ struct platform_device *pldev;
+
+ /* free device */
+ pldev = to_platform_device (device);
+ kfree (pldev);
+}
+
+static void __init depca_platform_probe (void)
+{
+ int i;
+ struct platform_device *pldev;
+
+ for (i = 0; depca_io_ports[i].iobase; i++) {
+ depca_io_ports[i].device = NULL;
+
+ /* if an address has been specified on the command
+ * line, use it (if valid) */
+ if (io && io != depca_io_ports[i].iobase)
+ continue;
+
+ if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL)))
+ continue;
+
+ memset (pldev, 0, sizeof (*pldev));
+ pldev->name = depca_string;
+ pldev->id = i;
+ pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
+ pldev->dev.release = depca_platform_release;
+ depca_io_ports[i].device = pldev;
+
+ if (platform_device_register (pldev)) {
+ kfree (pldev);
+ depca_io_ports[i].device = NULL;
+ continue;
+ }
+
+ if (!pldev->dev.driver) {
+ /* The driver was not bound to this device, there was
+ * no hardware at this address. Unregister it, as the
+ * release fuction will take care of freeing the
+ * allocated structure */
+
+ depca_io_ports[i].device = NULL;
+ platform_device_unregister (pldev);
+ }
+ }
+}
+
+static enum depca_type __init depca_shmem_probe (ulong *mem_start)
+{
+ u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
+ enum depca_type adapter = unknown;
+ int i;
+
+ for (i = 0; mem_base[i]; i++) {
+ *mem_start = mem ? mem : mem_base[i];
+ adapter = DepcaSignature (adapter_name, *mem_start);
+ if (adapter != unknown)
+ break;
+ }
+
+ return adapter;
+}
+
+static int __init depca_isa_probe (struct device *device)
+{
+ struct net_device *dev;
+ struct depca_private *lp;
+ u_long ioaddr, mem_start = 0;
+ enum depca_type adapter = unknown;
+ int status = 0;
+
+ ioaddr = (u_long) device->platform_data;
+
+ if ((status = depca_common_init (ioaddr, &dev)))
+ goto out;
+
+ adapter = depca_shmem_probe (&mem_start);
+
+ if (adapter == unknown) {
+ status = -ENODEV;
+ goto out_free;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq; /* Use whatever value the user gave
+ * us, and 0 if he didn't. */
+ lp = dev->priv;
+ lp->depca_bus = DEPCA_BUS_ISA;
+ lp->adapter = adapter;
+ lp->mem_start = mem_start;
+
+ if ((status = depca_hw_init(dev, device)))
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ free_netdev (dev);
+ release_region (ioaddr, DEPCA_TOTAL_SIZE);
+ out:
+ return status;
+}
+
+/*
+** EISA callbacks from sysfs.
+*/
+
+#ifdef CONFIG_EISA
+static int __init depca_eisa_probe (struct device *device)
+{
+ struct eisa_device *edev;
+ struct net_device *dev;
+ struct depca_private *lp;
+ u_long ioaddr, mem_start;
+ int status = 0;
+
+ edev = to_eisa_device (device);
+ ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS;
+
+ if ((status = depca_common_init (ioaddr, &dev)))
+ goto out;
+
+ /* It would have been nice to get card configuration from the
+ * card. Unfortunately, this register is write-only (shares
+ * it's address with the ethernet prom)... As we don't parse
+ * the EISA configuration structures (yet... :-), just rely on
+ * the ISA probing to sort it out... */
+
+ depca_shmem_probe (&mem_start);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ lp = dev->priv;
+ lp->depca_bus = DEPCA_BUS_EISA;
+ lp->adapter = edev->id.driver_data;
+ lp->mem_start = mem_start;
+
+ if ((status = depca_hw_init(dev, device)))
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ free_netdev (dev);
+ release_region (ioaddr, DEPCA_TOTAL_SIZE);
+ out:
+ return status;
+}
+#endif
+
+static int __devexit depca_device_remove (struct device *device)
+{
+ struct net_device *dev;
+ struct depca_private *lp;
+ int bus;
+
+ dev = device->driver_data;
+ lp = dev->priv;
+
+ unregister_netdev (dev);
+ iounmap (lp->sh_mem);
+ release_mem_region (lp->mem_start, lp->mem_len);
+ release_region (dev->base_addr, DEPCA_TOTAL_SIZE);
+ bus = lp->depca_bus;
+ free_netdev (dev);
+
+ return 0;
+}
+
+/*
+** Look for a particular board name in the on-board Remote Diagnostics
+** and Boot (readb) ROM. This will also give us a clue to the network RAM
+** base address.
+*/
+static int __init DepcaSignature(char *name, u_long base_addr)
+{
+ u_int i, j, k;
+ void __iomem *ptr;
+ char tmpstr[16];
+ u_long prom_addr = base_addr + 0xc000;
+ u_long mem_addr = base_addr + 0x8000; /* 32KB */
+
+ /* Can't reserve the prom region, it is already marked as
+ * used, at least on x86. Instead, reserve a memory region a
+ * board would certainly use. If it works, go ahead. If not,
+ * run like hell... */
+
+ if (!request_mem_region (mem_addr, 16, depca_string))
+ return unknown;
+
+ /* Copy the first 16 bytes of ROM */
+
+ ptr = ioremap(prom_addr, 16);
+ if (ptr == NULL) {
+ printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr);
+ return unknown;
+ }
+ for (i = 0; i < 16; i++) {
+ tmpstr[i] = readb(ptr + i);
+ }
+ iounmap(ptr);
+
+ release_mem_region (mem_addr, 16);
+
+ /* Check if PROM contains a valid string */
+ for (i = 0; *depca_signature[i] != '\0'; i++) {
+ for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) {
+ if (depca_signature[i][k] == tmpstr[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k = 0;
+ }
+ }
+ if (k == strlen(depca_signature[i]))
+ break;
+ }
+
+ /* Check if name string is valid, provided there's no PROM */
+ if (name && *name && (i == unknown)) {
+ for (i = 0; *depca_signature[i] != '\0'; i++) {
+ if (strcmp(name, depca_signature[i]) == 0)
+ break;
+ }
+ }
+
+ return i;
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all DEPCA products. Note that the original DEPCA needs
+** its ROM address counter to be initialized and enabled. Only enable
+** if the first address octet is a 0x08 - this minimises the chances of
+** messing around with some other hardware, but it assumes that this DEPCA
+** card initialized itself correctly.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+static int __init DevicePresent(u_long ioaddr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ }
+ dev;
+ short sigLength = 0;
+ s8 data;
+ s16 nicsr;
+ int i, j, status = 0;
+
+ data = inb(DEPCA_PROM); /* clear counter on DEPCA */
+ data = inb(DEPCA_PROM); /* read data */
+
+ if (data == 0x08) { /* Enable counter on DEPCA */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= AAC;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
+ data = inb(DEPCA_PROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j = 1;
+ } else {
+ j = 0;
+ }
+ }
+ }
+
+ if (j != sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+/*
+** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
+** reason: access the upper half of the PROM with x=0; access the lower half
+** with x=1.
+*/
+static int __init get_hw_addr(struct net_device *dev)
+{
+ u_long ioaddr = dev->base_addr;
+ struct depca_private *lp = dev->priv;
+ int i, k, tmp, status = 0;
+ u_short j, x, chksum;
+
+ x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0);
+
+ for (i = 0, k = 0, j = 0; j < 3; j++) {
+ k <<= 1;
+ if (k > 0xffff)
+ k -= 0xffff;
+
+ k += (u_char) (tmp = inb(DEPCA_PROM + x));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+
+ if (k > 0xffff)
+ k -= 0xffff;
+ }
+ if (k == 0xffff)
+ k = 0;
+
+ chksum = (u_char) inb(DEPCA_PROM + x);
+ chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
+ if (k != chksum)
+ status = -1;
+
+ return status;
+}
+
+/*
+** Load a packet into the shared memory
+*/
+static int load_packet(struct net_device *dev, struct sk_buff *skb)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ int i, entry, end, len, status = 0;
+
+ entry = lp->tx_new; /* Ring around buffer number. */
+ end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
+ if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */
+ /*
+ ** Caution: the write order is important here... don't set up the
+ ** ownership rights until all the other information is in place.
+ */
+ if (end < entry) { /* wrapped buffer */
+ len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
+ memcpy_toio(lp->tx_buff[entry], skb->data, len);
+ memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len);
+ } else { /* linear buffer */
+ memcpy_toio(lp->tx_buff[entry], skb->data, skb->len);
+ }
+
+ /* set up the buffer descriptors */
+ len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ for (i = entry; i != end; i = (i+1) & lp->txRingMask) {
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
+ writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
+ writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */
+ len -= TX_BUFF_SZ;
+ }
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
+ writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
+ writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
+
+ /* start of packet */
+ writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
+ /* end of packet */
+ writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
+
+ for (i = end; i != entry; --i) {
+ /* ownership of packet */
+ writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
+ if (i == 0)
+ i = lp->txRingMask + 1;
+ }
+ writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
+
+ lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
+ } else {
+ status = -1;
+ }
+
+ return status;
+}
+
+static void depca_dbg_open(struct net_device *dev)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ struct depca_init *p = &lp->init_block;
+ int i;
+
+ if (depca_debug > 1) {
+ /* Do not copy the shadow init block into shared memory */
+ /* Debugging should not affect normal operation! */
+ /* The shadow init block will get copied across during InitRestartDepca */
+ printk("%s: depca open with irq %d\n", dev->name, dev->irq);
+ printk("Descriptor head addresses (CPU):\n");
+ printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring);
+ printk("Descriptor addresses (CPU):\nRX: ");
+ for (i = 0; i < lp->rxRingMask; i++) {
+ if (i < 3) {
+ printk("%p ", &lp->rx_ring[i].base);
+ }
+ }
+ printk("...%p\n", &lp->rx_ring[i].base);
+ printk("TX: ");
+ for (i = 0; i < lp->txRingMask; i++) {
+ if (i < 3) {
+ printk("%p ", &lp->tx_ring[i].base);
+ }
+ }
+ printk("...%p\n", &lp->tx_ring[i].base);
+ printk("\nDescriptor buffers (Device):\nRX: ");
+ for (i = 0; i < lp->rxRingMask; i++) {
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
+ printk("TX: ");
+ for (i = 0; i < lp->txRingMask; i++) {
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
+ printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
+ printk(" mode: 0x%4.4x\n", p->mode);
+ printk(" physical address: ");
+ for (i = 0; i < ETH_ALEN - 1; i++) {
+ printk("%2.2x:", p->phys_addr[i]);
+ }
+ printk("%2.2x\n", p->phys_addr[i]);
+ printk(" multicast hash table: ");
+ for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
+ printk("%2.2x:", p->mcast_table[i]);
+ }
+ printk("%2.2x\n", p->mcast_table[i]);
+ printk(" rx_ring at: 0x%8.8x\n", p->rx_ring);
+ printk(" tx_ring at: 0x%8.8x\n", p->tx_ring);
+ printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset);
+ printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen);
+ printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen);
+ outw(CSR2, DEPCA_ADDR);
+ printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA));
+ outw(CSR1, DEPCA_ADDR);
+ printk("%4.4x\n", inw(DEPCA_DATA));
+ outw(CSR3, DEPCA_ADDR);
+ printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+** All multicast IOCTLs will not work here and are for testing purposes only.
+*/
+static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct depca_private *lp = (struct depca_private *) dev->priv;
+ struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
+ int i, status = 0;
+ u_long ioaddr = dev->base_addr;
+ union {
+ u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
+ u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
+ } tmp;
+ unsigned long flags;
+ void *buf;
+
+ switch (ioc->cmd) {
+ case DEPCA_GET_HWADDR: /* Get the hardware address */
+ for (i = 0; i < ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len))
+ return -EFAULT;
+ break;
+
+ case DEPCA_SET_HWADDR: /* Set the hardware address */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
+ return -EFAULT;
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new)
+ cpu_relax(); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DEPCA_SET_PROM: /* Set Promiscuous Mode */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new)
+ cpu_relax(); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode |= PROM; /* Set promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new)
+ cpu_relax(); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ printk("%s: Boo!\n", dev->name);
+ break;
+
+ case DEPCA_GET_MCA: /* Get the multicast address table */
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
+ return -EFAULT;
+ break;
+
+ case DEPCA_SET_MCA: /* Set a multicast address */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (ioc->len >= HASH_TABLE_LEN)
+ return -EINVAL;
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
+ return -EFAULT;
+ set_multicast_list(dev);
+ break;
+
+ case DEPCA_CLR_MCA: /* Clear all multicast addresses */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ set_multicast_list(dev);
+ break;
+
+ case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ set_multicast_list(dev);
+ break;
+
+ case DEPCA_GET_STATS: /* Get the driver statistics */
+ ioc->len = sizeof(lp->pktStats);
+ buf = kmalloc(ioc->len, GFP_KERNEL);
+ if(!buf)
+ return -ENOMEM;
+ spin_lock_irqsave(&lp->lock, flags);
+ memcpy(buf, &lp->pktStats, ioc->len);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (copy_to_user(ioc->data, buf, ioc->len))
+ status = -EFAULT;
+ kfree(buf);
+ break;
+
+ case DEPCA_CLR_STATS: /* Zero out the driver statistics */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ spin_lock_irqsave(&lp->lock, flags);
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case DEPCA_GET_REG: /* Get the DEPCA Registers */
+ i = 0;
+ tmp.sval[i++] = inw(DEPCA_NICSR);
+ outw(CSR0, DEPCA_ADDR); /* status register */
+ tmp.sval[i++] = inw(DEPCA_DATA);
+ memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
+ ioc->len = i + sizeof(struct depca_init);
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len))
+ return -EFAULT;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+static int __init depca_module_init (void)
+{
+ int err = 0;
+
+#ifdef CONFIG_MCA
+ err = mca_register_driver (&depca_mca_driver);
+#endif
+#ifdef CONFIG_EISA
+ err |= eisa_driver_register (&depca_eisa_driver);
+#endif
+ err |= driver_register (&depca_isa_driver);
+ depca_platform_probe ();
+
+ return err;
+}
+
+static void __exit depca_module_exit (void)
+{
+ int i;
+#ifdef CONFIG_MCA
+ mca_unregister_driver (&depca_mca_driver);
+#endif
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&depca_eisa_driver);
+#endif
+ driver_unregister (&depca_isa_driver);
+
+ for (i = 0; depca_io_ports[i].iobase; i++) {
+ if (depca_io_ports[i].device) {
+ platform_device_unregister (depca_io_ports[i].device);
+ depca_io_ports[i].device = NULL;
+ }
+ }
+}
+
+module_init (depca_module_init);
+module_exit (depca_module_exit);
diff --git a/drivers/net/depca.h b/drivers/net/depca.h
new file mode 100644
index 000000000000..11785275a669
--- /dev/null
+++ b/drivers/net/depca.h
@@ -0,0 +1,185 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 David C. Davies. This software may be used and distributed
+ according to the terms of the GNU General Public License, incorporated herein by
+ reference.
+*/
+
+/*
+** I/O addresses. Note that the 2k buffer option is not supported in
+** this driver.
+*/
+#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
+#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
+#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
+#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
+#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
+#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
+#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
+#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
+
+/*
+** These are LANCE registers addressable through DEPCA_ADDR
+*/
+#define CSR0 0
+#define CSR1 1
+#define CSR2 2
+#define CSR3 3
+
+/*
+** NETWORK INTERFACE CSR (NI_CSR) bit definitions
+*/
+
+#define TO 0x0100 /* Time Out for remote boot */
+#define SHE 0x0080 /* SHadow memory Enable */
+#define BS 0x0040 /* Bank Select */
+#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
+#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
+#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
+#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
+#define IM 0x0004 /* Interrupt Mask (1->mask) */
+#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
+#define LED 0x0001 /* LED control */
+
+/*
+** Control and Status Register 0 (CSR0) bit definitions
+*/
+
+#define ERR 0x8000 /* Error summary */
+#define BABL 0x4000 /* Babble transmitter timeout error */
+#define CERR 0x2000 /* Collision Error */
+#define MISS 0x1000 /* Missed packet */
+#define MERR 0x0800 /* Memory Error */
+#define RINT 0x0400 /* Receiver Interrupt */
+#define TINT 0x0200 /* Transmit Interrupt */
+#define IDON 0x0100 /* Initialization Done */
+#define INTR 0x0080 /* Interrupt Flag */
+#define INEA 0x0040 /* Interrupt Enable */
+#define RXON 0x0020 /* Receiver on */
+#define TXON 0x0010 /* Transmitter on */
+#define TDMD 0x0008 /* Transmit Demand */
+#define STOP 0x0004 /* Stop */
+#define STRT 0x0002 /* Start */
+#define INIT 0x0001 /* Initialize */
+#define INTM 0xff00 /* Interrupt Mask */
+#define INTE 0xfff0 /* Interrupt Enable */
+
+/*
+** CONTROL AND STATUS REGISTER 3 (CSR3)
+*/
+
+#define BSWP 0x0004 /* Byte SWaP */
+#define ACON 0x0002 /* ALE control */
+#define BCON 0x0001 /* Byte CONtrol */
+
+/*
+** Initialization Block Mode Register
+*/
+
+#define PROM 0x8000 /* Promiscuous Mode */
+#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
+#define INTL 0x0040 /* Internal Loopback */
+#define DRTY 0x0020 /* Disable Retry */
+#define COLL 0x0010 /* Force Collision */
+#define DTCR 0x0008 /* Disable Transmit CRC */
+#define LOOP 0x0004 /* Loopback */
+#define DTX 0x0002 /* Disable the Transmitter */
+#define DRX 0x0001 /* Disable the Receiver */
+
+/*
+** Receive Message Descriptor 1 (RMD1) bit definitions.
+*/
+
+#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define R_ERR 0x4000 /* Error Summary */
+#define R_FRAM 0x2000 /* Framing Error */
+#define R_OFLO 0x1000 /* Overflow Error */
+#define R_CRC 0x0800 /* CRC Error */
+#define R_BUFF 0x0400 /* Buffer Error */
+#define R_STP 0x0200 /* Start of Packet */
+#define R_ENP 0x0100 /* End of Packet */
+
+/*
+** Transmit Message Descriptor 1 (TMD1) bit definitions.
+*/
+
+#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define T_ERR 0x4000 /* Error Summary */
+#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
+#define T_MORE 0x1000 /* >1 retry to transmit packet */
+#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
+#define T_DEF 0x0400 /* Deferred */
+#define T_STP 0x02000000 /* Start of Packet */
+#define T_ENP 0x01000000 /* End of Packet */
+#define T_FLAGS 0xff000000 /* TX Flags Field */
+
+/*
+** Transmit Message Descriptor 3 (TMD3) bit definitions.
+*/
+
+#define TMD3_BUFF 0x8000 /* BUFFer error */
+#define TMD3_UFLO 0x4000 /* UnderFLOw error */
+#define TMD3_RES 0x2000 /* REServed */
+#define TMD3_LCOL 0x1000 /* Late COLlision */
+#define TMD3_LCAR 0x0800 /* Loss of CARrier */
+#define TMD3_RTRY 0x0400 /* ReTRY error */
+
+/*
+** EISA configuration Register (CNFG) bit definitions
+*/
+
+#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
+#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
+#define IRQ11 0x0040 /* Enable -> 1 */
+#define IRQ10 0x0020 /* Enable -> 1 */
+#define IRQ9 0x0010 /* Enable -> 1 */
+#define IRQ5 0x0008 /* Enable -> 1 */
+#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
+#define PADR16 0x0002 /* RAM on 64kB boundary */
+#define PADR17 0x0001 /* RAM on 128kB boundary */
+
+/*
+** Miscellaneous
+*/
+#define HASH_TABLE_LEN 64 /* Bits */
+#define HASH_BITS 0x003f /* 6 LS bits */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
+#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DEPCAIOCTL SIOCDEVPRIVATE
+
+struct depca_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
+#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
+#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
+#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
+#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
+#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
+#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
+#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DEPCA_GET_REG 0x0c /* Get the Register contents */
+#define DEPCA_SET_REG 0x0d /* Set the Register contents */
+#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
+
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c
new file mode 100644
index 000000000000..7809838e6c4c
--- /dev/null
+++ b/drivers/net/dgrs.c
@@ -0,0 +1,1617 @@
+/*
+ * Digi RightSwitch SE-X loadable device driver for Linux
+ *
+ * The RightSwitch is a 4 (EISA) or 6 (PCI) port etherswitch and
+ * a NIC on an internal board.
+ *
+ * Author: Rick Richardson, rick@remotepoint.com
+ * Derived from the SVR4.2 (UnixWare) driver for the same card.
+ *
+ * Copyright 1995-1996 Digi International Inc.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * For information on purchasing a RightSwitch SE-4 or SE-6
+ * board, please contact Digi's sales department at 1-612-912-3444
+ * or 1-800-DIGIBRD. Outside the U.S., please check our Web page
+ * at http://www.dgii.com for sales offices worldwide.
+ *
+ * OPERATION:
+ * When compiled as a loadable module, this driver can operate
+ * the board as either a 4/6 port switch with a 5th or 7th port
+ * that is a conventional NIC interface as far as the host is
+ * concerned, OR as 4/6 independent NICs. To select multi-NIC
+ * mode, add "nicmode=1" on the insmod load line for the driver.
+ *
+ * This driver uses the "dev" common ethernet device structure
+ * and a private "priv" (dev->priv) structure that contains
+ * mostly DGRS-specific information and statistics. To keep
+ * the code for both the switch mode and the multi-NIC mode
+ * as similar as possible, I have introduced the concept of
+ * "dev0"/"priv0" and "devN"/"privN" pointer pairs in subroutines
+ * where needed. The first pair of pointers points to the
+ * "dev" and "priv" structures of the zeroth (0th) device
+ * interface associated with a board. The second pair of
+ * pointers points to the current (Nth) device interface
+ * for the board: the one for which we are processing data.
+ *
+ * In switch mode, the pairs of pointers are always the same,
+ * that is, dev0 == devN and priv0 == privN. This is just
+ * like previous releases of this driver which did not support
+ * NIC mode.
+ *
+ * In multi-NIC mode, the pairs of pointers may be different.
+ * We use the devN and privN pointers to reference just the
+ * name, port number, and statistics for the current interface.
+ * We use the dev0 and priv0 pointers to access the variables
+ * that control access to the board, such as board address
+ * and simulated 82596 variables. This is because there is
+ * only one "fake" 82596 that serves as the interface to
+ * the board. We do not want to try to keep the variables
+ * associated with this 82596 in sync across all devices.
+ *
+ * This scheme works well. As you will see, except for
+ * initialization, there is very little difference between
+ * the two modes as far as this driver is concerned. On the
+ * receive side in NIC mode, the interrupt *always* comes in on
+ * the 0th interface (dev0/priv0). We then figure out which
+ * real 82596 port it came in on from looking at the "chan"
+ * member that the board firmware adds at the end of each
+ * RBD (a.k.a. TBD). We get the channel number like this:
+ * int chan = ((I596_RBD *) S2H(cbp->xmit.tbdp))->chan;
+ *
+ * On the transmit side in multi-NIC mode, we specify the
+ * output 82596 port by setting the new "dstchan" structure
+ * member that is at the end of the RFD, like this:
+ * priv0->rfdp->dstchan = privN->chan;
+ *
+ * TODO:
+ * - Multi-NIC mode is not yet supported when the driver is linked
+ * into the kernel.
+ * - Better handling of multicast addresses.
+ *
+ * Fixes:
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+ * - fix dgrs_found_device wrt checking kmalloc return and
+ * rollbacking the partial steps of the whole process when
+ * one of the devices can't be allocated. Fix SET_MODULE_OWNER
+ * on the loop to use devN instead of repeated calls to dev.
+ *
+ * davej <davej@suse.de> - 9/2/2001
+ * - Enable PCI device before reading ioaddr/irq
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+static char version[] __initdata =
+ "$Id: dgrs.c,v 1.13 2000/06/06 04:07:00 rick Exp $";
+
+/*
+ * DGRS include files
+ */
+typedef unsigned char uchar;
+typedef unsigned int bool;
+#define vol volatile
+
+#include "dgrs.h"
+#include "dgrs_es4h.h"
+#include "dgrs_plx9060.h"
+#include "dgrs_i82596.h"
+#include "dgrs_ether.h"
+#include "dgrs_asstruct.h"
+#include "dgrs_bcomm.h"
+
+#ifdef CONFIG_PCI
+static struct pci_device_id dgrs_pci_tbl[] = {
+ { SE6_PCI_VENDOR_ID, SE6_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, dgrs_pci_tbl);
+#endif
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id dgrs_eisa_tbl[] = {
+ { "DBI0A01" },
+ { }
+};
+MODULE_DEVICE_TABLE(eisa, dgrs_eisa_tbl);
+#endif
+
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Firmware. Compiled separately for local compilation,
+ * but #included for Linux distribution.
+ */
+#ifndef NOFW
+ #include "dgrs_firmware.c"
+#else
+ extern int dgrs_firmnum;
+ extern char dgrs_firmver[];
+ extern char dgrs_firmdate[];
+ extern uchar dgrs_code[];
+ extern int dgrs_ncode;
+#endif
+
+/*
+ * Linux out*() is backwards from all other operating systems
+ */
+#define OUTB(ADDR, VAL) outb(VAL, ADDR)
+#define OUTW(ADDR, VAL) outw(VAL, ADDR)
+#define OUTL(ADDR, VAL) outl(VAL, ADDR)
+
+/*
+ * Macros to convert switch to host and host to switch addresses
+ * (assumes a local variable priv points to board dependent struct)
+ */
+#define S2H(A) ( ((unsigned long)(A)&0x00ffffff) + priv0->vmem )
+#define S2HN(A) ( ((unsigned long)(A)&0x00ffffff) + privN->vmem )
+#define H2S(A) ( ((char *) (A) - priv0->vmem) + 0xA3000000 )
+
+/*
+ * Convert a switch address to a "safe" address for use with the
+ * PLX 9060 DMA registers and the associated HW kludge that allows
+ * for host access of the DMA registers.
+ */
+#define S2DMA(A) ( (unsigned long)(A) & 0x00ffffff)
+
+/*
+ * "Space.c" variables, now settable from module interface
+ * Use the name below, minus the "dgrs_" prefix. See init_module().
+ */
+static int dgrs_debug = 1;
+static int dgrs_dma = 1;
+static int dgrs_spantree = -1;
+static int dgrs_hashexpire = -1;
+static uchar dgrs_ipaddr[4] = { 0xff, 0xff, 0xff, 0xff};
+static uchar dgrs_iptrap[4] = { 0xff, 0xff, 0xff, 0xff};
+static __u32 dgrs_ipxnet = -1;
+static int dgrs_nicmode;
+
+/*
+ * Private per-board data structure (dev->priv)
+ */
+typedef struct
+{
+ /*
+ * Stuff for generic ethercard I/F
+ */
+ struct net_device_stats stats;
+
+ /*
+ * DGRS specific data
+ */
+ char *vmem;
+
+ struct bios_comm *bcomm; /* Firmware BIOS comm structure */
+ PORT *port; /* Ptr to PORT[0] struct in VM */
+ I596_SCB *scbp; /* Ptr to SCB struct in VM */
+ I596_RFD *rfdp; /* Current RFD list */
+ I596_RBD *rbdp; /* Current RBD list */
+
+ volatile int intrcnt; /* Count of interrupts */
+
+ /*
+ * SE-4 (EISA) board variables
+ */
+ uchar is_reg; /* EISA: Value for ES4H_IS reg */
+
+ /*
+ * SE-6 (PCI) board variables
+ *
+ * The PLX "expansion rom" space is used for DMA register
+ * access from the host on the SE-6. These are the physical
+ * and virtual addresses of that space.
+ */
+ ulong plxreg; /* Phys address of PLX chip */
+ char *vplxreg; /* Virtual address of PLX chip */
+ ulong plxdma; /* Phys addr of PLX "expansion rom" */
+ ulong volatile *vplxdma; /* Virtual addr of "expansion rom" */
+ int use_dma; /* Flag: use DMA */
+ DMACHAIN *dmadesc_s; /* area for DMA chains (SW addr.) */
+ DMACHAIN *dmadesc_h; /* area for DMA chains (Host Virtual) */
+
+ /*
+ * Multi-NIC mode variables
+ *
+ * All entries of the devtbl[] array are valid for the 0th
+ * device (i.e. eth0, but not eth1...eth5). devtbl[0] is
+ * valid for all devices (i.e. eth0, eth1, ..., eth5).
+ */
+ int nports; /* Number of physical ports (4 or 6) */
+ int chan; /* Channel # (1-6) for this device */
+ struct net_device *devtbl[6]; /* Ptrs to N device structs */
+
+} DGRS_PRIV;
+
+
+/*
+ * reset or un-reset the IDT processor
+ */
+static void
+proc_reset(struct net_device *dev0, int reset)
+{
+ DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv;
+
+ if (priv0->plxreg)
+ {
+ ulong val;
+ val = inl(dev0->base_addr + PLX_MISC_CSR);
+ if (reset)
+ val |= SE6_RESET;
+ else
+ val &= ~SE6_RESET;
+ OUTL(dev0->base_addr + PLX_MISC_CSR, val);
+ }
+ else
+ {
+ OUTB(dev0->base_addr + ES4H_PC, reset ? ES4H_PC_RESET : 0);
+ }
+}
+
+/*
+ * See if the board supports bus master DMA
+ */
+static int
+check_board_dma(struct net_device *dev0)
+{
+ DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv;
+ ulong x;
+
+ /*
+ * If Space.c says not to use DMA, or if it's not a PLX based
+ * PCI board, or if the expansion ROM space is not PCI
+ * configured, then return false.
+ */
+ if (!dgrs_dma || !priv0->plxreg || !priv0->plxdma)
+ return (0);
+
+ /*
+ * Set the local address remap register of the "expansion rom"
+ * area to 0x80000000 so that we can use it to access the DMA
+ * registers from the host side.
+ */
+ OUTL(dev0->base_addr + PLX_ROM_BASE_ADDR, 0x80000000);
+
+ /*
+ * Set the PCI region descriptor to:
+ * Space 0:
+ * disable read-prefetch
+ * enable READY
+ * enable BURST
+ * 0 internal wait states
+ * Expansion ROM: (used for host DMA register access)
+ * disable read-prefetch
+ * enable READY
+ * disable BURST
+ * 0 internal wait states
+ */
+ OUTL(dev0->base_addr + PLX_BUS_REGION, 0x49430343);
+
+ /*
+ * Now map the DMA registers into our virtual space
+ */
+ priv0->vplxdma = (ulong *) ioremap (priv0->plxdma, 256);
+ if (!priv0->vplxdma)
+ {
+ printk("%s: can't *remap() the DMA regs\n", dev0->name);
+ return (0);
+ }
+
+ /*
+ * Now test to see if we can access the DMA registers
+ * If we write -1 and get back 1FFF, then we accessed the
+ * DMA register. Otherwise, we probably have an old board
+ * and wrote into regular RAM.
+ */
+ priv0->vplxdma[PLX_DMA0_MODE/4] = 0xFFFFFFFF;
+ x = priv0->vplxdma[PLX_DMA0_MODE/4];
+ if (x != 0x00001FFF) {
+ iounmap((void *)priv0->vplxdma);
+ return (0);
+ }
+
+ return (1);
+}
+
+/*
+ * Initiate DMA using PLX part on PCI board. Spin the
+ * processor until completed. All addresses are physical!
+ *
+ * If pciaddr is NULL, then it's a chaining DMA, and lcladdr is
+ * the address of the first DMA descriptor in the chain.
+ *
+ * If pciaddr is not NULL, then it's a single DMA.
+ *
+ * In either case, "lcladdr" must have been fixed up to make
+ * sure the MSB isn't set using the S2DMA macro before passing
+ * the address to this routine.
+ */
+static int
+do_plx_dma(
+ struct net_device *dev,
+ ulong pciaddr,
+ ulong lcladdr,
+ int len,
+ int to_host
+)
+{
+ int i;
+ ulong csr = 0;
+ DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv;
+
+ if (pciaddr)
+ {
+ /*
+ * Do a single, non-chain DMA
+ */
+ priv->vplxdma[PLX_DMA0_PCI_ADDR/4] = pciaddr;
+ priv->vplxdma[PLX_DMA0_LCL_ADDR/4] = lcladdr;
+ priv->vplxdma[PLX_DMA0_SIZE/4] = len;
+ priv->vplxdma[PLX_DMA0_DESCRIPTOR/4] = to_host
+ ? PLX_DMA_DESC_TO_HOST
+ : PLX_DMA_DESC_TO_BOARD;
+ priv->vplxdma[PLX_DMA0_MODE/4] =
+ PLX_DMA_MODE_WIDTH32
+ | PLX_DMA_MODE_WAITSTATES(0)
+ | PLX_DMA_MODE_READY
+ | PLX_DMA_MODE_NOBTERM
+ | PLX_DMA_MODE_BURST
+ | PLX_DMA_MODE_NOCHAIN;
+ }
+ else
+ {
+ /*
+ * Do a chaining DMA
+ */
+ priv->vplxdma[PLX_DMA0_MODE/4] =
+ PLX_DMA_MODE_WIDTH32
+ | PLX_DMA_MODE_WAITSTATES(0)
+ | PLX_DMA_MODE_READY
+ | PLX_DMA_MODE_NOBTERM
+ | PLX_DMA_MODE_BURST
+ | PLX_DMA_MODE_CHAIN;
+ priv->vplxdma[PLX_DMA0_DESCRIPTOR/4] = lcladdr;
+ }
+
+ priv->vplxdma[PLX_DMA_CSR/4] =
+ PLX_DMA_CSR_0_ENABLE | PLX_DMA_CSR_0_START;
+
+ /*
+ * Wait for DMA to complete
+ */
+ for (i = 0; i < 1000000; ++i)
+ {
+ /*
+ * Spin the host CPU for 1 usec, so we don't thrash
+ * the PCI bus while the PLX 9060 is doing DMA.
+ */
+ udelay(1);
+
+ csr = (volatile unsigned long) priv->vplxdma[PLX_DMA_CSR/4];
+
+ if (csr & PLX_DMA_CSR_0_DONE)
+ break;
+ }
+
+ if ( ! (csr & PLX_DMA_CSR_0_DONE) )
+ {
+ printk("%s: DMA done never occurred. DMA disabled.\n",
+ dev->name);
+ priv->use_dma = 0;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * dgrs_rcv_frame()
+ *
+ * Process a received frame. This is called from the interrupt
+ * routine, and works for both switch mode and multi-NIC mode.
+ *
+ * Note that when in multi-NIC mode, we want to always access the
+ * hardware using the dev and priv structures of the first port,
+ * so that we are using only one set of variables to maintain
+ * the board interface status, but we want to use the Nth port
+ * dev and priv structures to maintain statistics and to pass
+ * the packet up.
+ *
+ * Only the first device structure is attached to the interrupt.
+ * We use the special "chan" variable at the end of the first RBD
+ * to select the Nth device in multi-NIC mode.
+ *
+ * We currently do chained DMA on a per-packet basis when the
+ * packet is "long", and we spin the CPU a short time polling
+ * for DMA completion. This avoids a second interrupt overhead,
+ * and gives the best performance for light traffic to the host.
+ *
+ * However, a better scheme that could be implemented would be
+ * to see how many packets are outstanding for the host, and if
+ * the number is "large", create a long chain to DMA several
+ * packets into the host in one go. In this case, we would set
+ * up some state variables to let the host CPU continue doing
+ * other things until a DMA completion interrupt comes along.
+ */
+static void
+dgrs_rcv_frame(
+ struct net_device *dev0,
+ DGRS_PRIV *priv0,
+ I596_CB *cbp
+)
+{
+ int len;
+ I596_TBD *tbdp;
+ struct sk_buff *skb;
+ uchar *putp;
+ uchar *p;
+ struct net_device *devN;
+ DGRS_PRIV *privN;
+
+ /*
+ * Determine Nth priv and dev structure pointers
+ */
+ if (dgrs_nicmode)
+ { /* Multi-NIC mode */
+ int chan = ((I596_RBD *) S2H(cbp->xmit.tbdp))->chan;
+
+ devN = priv0->devtbl[chan-1];
+ /*
+ * If devN is null, we got an interrupt before the I/F
+ * has been initialized. Pitch the packet.
+ */
+ if (devN == NULL)
+ goto out;
+ privN = (DGRS_PRIV *) devN->priv;
+ }
+ else
+ { /* Switch mode */
+ devN = dev0;
+ privN = priv0;
+ }
+
+ if (0) printk("%s: rcv len=%ld\n", devN->name, cbp->xmit.count);
+
+ /*
+ * Allocate a message block big enough to hold the whole frame
+ */
+ len = cbp->xmit.count;
+ if ((skb = dev_alloc_skb(len+5)) == NULL)
+ {
+ printk("%s: dev_alloc_skb failed for rcv buffer\n", devN->name);
+ ++privN->stats.rx_dropped;
+ /* discarding the frame */
+ goto out;
+ }
+ skb->dev = devN;
+ skb_reserve(skb, 2); /* Align IP header */
+
+again:
+ putp = p = skb_put(skb, len);
+
+ /*
+ * There are three modes here for doing the packet copy.
+ * If we have DMA, and the packet is "long", we use the
+ * chaining mode of DMA. If it's shorter, we use single
+ * DMA's. Otherwise, we use memcpy().
+ */
+ if (priv0->use_dma && priv0->dmadesc_h && len > 64)
+ {
+ /*
+ * If we can use DMA and it's a long frame, copy it using
+ * DMA chaining.
+ */
+ DMACHAIN *ddp_h; /* Host virtual DMA desc. pointer */
+ DMACHAIN *ddp_s; /* Switch physical DMA desc. pointer */
+ uchar *phys_p;
+
+ /*
+ * Get the physical address of the STREAMS buffer.
+ * NOTE: allocb() guarantees that the whole buffer
+ * is in a single page if the length < 4096.
+ */
+ phys_p = (uchar *) virt_to_phys(putp);
+
+ ddp_h = priv0->dmadesc_h;
+ ddp_s = priv0->dmadesc_s;
+ tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp);
+ for (;;)
+ {
+ int count;
+ int amt;
+
+ count = tbdp->count;
+ amt = count & 0x3fff;
+ if (amt == 0)
+ break; /* For safety */
+ if ( (p-putp) >= len)
+ {
+ printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp));
+ proc_reset(dev0, 1); /* Freeze IDT */
+ break; /* For Safety */
+ }
+
+ ddp_h->pciaddr = (ulong) phys_p;
+ ddp_h->lcladdr = S2DMA(tbdp->buf);
+ ddp_h->len = amt;
+
+ phys_p += amt;
+ p += amt;
+
+ if (count & I596_TBD_EOF)
+ {
+ ddp_h->next = PLX_DMA_DESC_TO_HOST
+ | PLX_DMA_DESC_EOC;
+ ++ddp_h;
+ break;
+ }
+ else
+ {
+ ++ddp_s;
+ ddp_h->next = PLX_DMA_DESC_TO_HOST
+ | (ulong) ddp_s;
+ tbdp = (I596_TBD *) S2H(tbdp->next);
+ ++ddp_h;
+ }
+ }
+ if (ddp_h - priv0->dmadesc_h)
+ {
+ int rc;
+
+ rc = do_plx_dma(dev0,
+ 0, (ulong) priv0->dmadesc_s, len, 0);
+ if (rc)
+ {
+ printk("%s: Chained DMA failure\n", devN->name);
+ goto again;
+ }
+ }
+ }
+ else if (priv0->use_dma)
+ {
+ /*
+ * If we can use DMA and it's a shorter frame, copy it
+ * using single DMA transfers.
+ */
+ uchar *phys_p;
+
+ /*
+ * Get the physical address of the STREAMS buffer.
+ * NOTE: allocb() guarantees that the whole buffer
+ * is in a single page if the length < 4096.
+ */
+ phys_p = (uchar *) virt_to_phys(putp);
+
+ tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp);
+ for (;;)
+ {
+ int count;
+ int amt;
+ int rc;
+
+ count = tbdp->count;
+ amt = count & 0x3fff;
+ if (amt == 0)
+ break; /* For safety */
+ if ( (p-putp) >= len)
+ {
+ printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp));
+ proc_reset(dev0, 1); /* Freeze IDT */
+ break; /* For Safety */
+ }
+ rc = do_plx_dma(dev0, (ulong) phys_p,
+ S2DMA(tbdp->buf), amt, 1);
+ if (rc)
+ {
+ memcpy(p, S2H(tbdp->buf), amt);
+ printk("%s: Single DMA failed\n", devN->name);
+ }
+ phys_p += amt;
+ p += amt;
+ if (count & I596_TBD_EOF)
+ break;
+ tbdp = (I596_TBD *) S2H(tbdp->next);
+ }
+ }
+ else
+ {
+ /*
+ * Otherwise, copy it piece by piece using memcpy()
+ */
+ tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp);
+ for (;;)
+ {
+ int count;
+ int amt;
+
+ count = tbdp->count;
+ amt = count & 0x3fff;
+ if (amt == 0)
+ break; /* For safety */
+ if ( (p-putp) >= len)
+ {
+ printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp));
+ proc_reset(dev0, 1); /* Freeze IDT */
+ break; /* For Safety */
+ }
+ memcpy(p, S2H(tbdp->buf), amt);
+ p += amt;
+ if (count & I596_TBD_EOF)
+ break;
+ tbdp = (I596_TBD *) S2H(tbdp->next);
+ }
+ }
+
+ /*
+ * Pass the frame to upper half
+ */
+ skb->protocol = eth_type_trans(skb, devN);
+ netif_rx(skb);
+ devN->last_rx = jiffies;
+ ++privN->stats.rx_packets;
+ privN->stats.rx_bytes += len;
+
+out:
+ cbp->xmit.status = I596_CB_STATUS_C | I596_CB_STATUS_OK;
+}
+
+/*
+ * Start transmission of a frame
+ *
+ * The interface to the board is simple: we pretend that we are
+ * a fifth 82596 ethernet controller 'receiving' data, and copy the
+ * data into the same structures that a real 82596 would. This way,
+ * the board firmware handles the host 'port' the same as any other.
+ *
+ * NOTE: we do not use Bus master DMA for this routine. Turns out
+ * that it is not needed. Slave writes over the PCI bus are about
+ * as fast as DMA, due to the fact that the PLX part can do burst
+ * writes. The same is not true for data being read from the board.
+ *
+ * For multi-NIC mode, we tell the firmware the desired 82596
+ * output port by setting the special "dstchan" member at the
+ * end of the traditional 82596 RFD structure.
+ */
+
+static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN)
+{
+ DGRS_PRIV *privN = (DGRS_PRIV *) devN->priv;
+ struct net_device *dev0;
+ DGRS_PRIV *priv0;
+ I596_RBD *rbdp;
+ int count;
+ int i, len, amt;
+
+ /*
+ * Determine 0th priv and dev structure pointers
+ */
+ if (dgrs_nicmode)
+ {
+ dev0 = privN->devtbl[0];
+ priv0 = (DGRS_PRIV *) dev0->priv;
+ }
+ else
+ {
+ dev0 = devN;
+ priv0 = privN;
+ }
+
+ if (dgrs_debug > 1)
+ printk("%s: xmit len=%d\n", devN->name, (int) skb->len);
+
+ devN->trans_start = jiffies;
+ netif_start_queue(devN);
+
+ if (priv0->rfdp->cmd & I596_RFD_EL)
+ { /* Out of RFD's */
+ if (0) printk("%s: NO RFD's\n", devN->name);
+ goto no_resources;
+ }
+
+ rbdp = priv0->rbdp;
+ count = 0;
+ priv0->rfdp->rbdp = (I596_RBD *) H2S(rbdp);
+
+ i = 0; len = skb->len;
+ for (;;)
+ {
+ if (rbdp->size & I596_RBD_EL)
+ { /* Out of RBD's */
+ if (0) printk("%s: NO RBD's\n", devN->name);
+ goto no_resources;
+ }
+
+ amt = min_t(unsigned int, len, rbdp->size - count);
+ memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt);
+ i += amt;
+ count += amt;
+ len -= amt;
+ if (len == 0)
+ {
+ if (skb->len < 60)
+ rbdp->count = 60 | I596_RBD_EOF;
+ else
+ rbdp->count = count | I596_RBD_EOF;
+ rbdp = (I596_RBD *) S2H(rbdp->next);
+ goto frame_done;
+ }
+ else if (count < 32)
+ {
+ /* More data to come, but we used less than 32
+ * bytes of this RBD. Keep filling this RBD.
+ */
+ {} /* Yes, we do nothing here */
+ }
+ else
+ {
+ rbdp->count = count;
+ rbdp = (I596_RBD *) S2H(rbdp->next);
+ count = 0;
+ }
+ }
+
+frame_done:
+ priv0->rbdp = rbdp;
+ if (dgrs_nicmode)
+ priv0->rfdp->dstchan = privN->chan;
+ priv0->rfdp->status = I596_RFD_C | I596_RFD_OK;
+ priv0->rfdp = (I596_RFD *) S2H(priv0->rfdp->next);
+
+ ++privN->stats.tx_packets;
+
+ dev_kfree_skb (skb);
+ return (0);
+
+no_resources:
+ priv0->scbp->status |= I596_SCB_RNR; /* simulate I82596 */
+ return (-EAGAIN);
+}
+
+/*
+ * Open the interface
+ */
+static int
+dgrs_open( struct net_device *dev )
+{
+ netif_start_queue(dev);
+ return (0);
+}
+
+/*
+ * Close the interface
+ */
+static int dgrs_close( struct net_device *dev )
+{
+ netif_stop_queue(dev);
+ return (0);
+}
+
+/*
+ * Get statistics
+ */
+static struct net_device_stats *dgrs_get_stats( struct net_device *dev )
+{
+ DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv;
+
+ return (&priv->stats);
+}
+
+/*
+ * Set multicast list and/or promiscuous mode
+ */
+
+static void dgrs_set_multicast_list( struct net_device *dev)
+{
+ DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv;
+
+ priv->port->is_promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
+}
+
+/*
+ * Unique ioctl's
+ */
+static int dgrs_ioctl(struct net_device *devN, struct ifreq *ifr, int cmd)
+{
+ DGRS_PRIV *privN = (DGRS_PRIV *) devN->priv;
+ DGRS_IOCTL ioc;
+ int i;
+
+ if (cmd != DGRSIOCTL)
+ return -EINVAL;
+
+ if(copy_from_user(&ioc, ifr->ifr_data, sizeof(DGRS_IOCTL)))
+ return -EFAULT;
+
+ switch (ioc.cmd)
+ {
+ case DGRS_GETMEM:
+ if (ioc.len != sizeof(ulong))
+ return -EINVAL;
+ if(copy_to_user(ioc.data, &devN->mem_start, ioc.len))
+ return -EFAULT;
+ return (0);
+ case DGRS_SETFILTER:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (ioc.port > privN->bcomm->bc_nports)
+ return -EINVAL;
+ if (ioc.filter >= NFILTERS)
+ return -EINVAL;
+ if (ioc.len > privN->bcomm->bc_filter_area_len)
+ return -EINVAL;
+
+ /* Wait for old command to finish */
+ for (i = 0; i < 1000; ++i)
+ {
+ if ( (volatile long) privN->bcomm->bc_filter_cmd <= 0 )
+ break;
+ udelay(1);
+ }
+ if (i >= 1000)
+ return -EIO;
+
+ privN->bcomm->bc_filter_port = ioc.port;
+ privN->bcomm->bc_filter_num = ioc.filter;
+ privN->bcomm->bc_filter_len = ioc.len;
+
+ if (ioc.len)
+ {
+ if(copy_from_user(S2HN(privN->bcomm->bc_filter_area),
+ ioc.data, ioc.len))
+ return -EFAULT;
+ privN->bcomm->bc_filter_cmd = BC_FILTER_SET;
+ }
+ else
+ privN->bcomm->bc_filter_cmd = BC_FILTER_CLR;
+ return(0);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * Process interrupts
+ *
+ * dev, priv will always refer to the 0th device in Multi-NIC mode.
+ */
+
+static irqreturn_t dgrs_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev0 = (struct net_device *) dev_id;
+ DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv;
+ I596_CB *cbp;
+ int cmd;
+ int i;
+
+ ++priv0->intrcnt;
+ if (1) ++priv0->bcomm->bc_cnt[4];
+ if (0)
+ {
+ static int cnt = 100;
+ if (--cnt > 0)
+ printk("%s: interrupt: irq %d\n", dev0->name, irq);
+ }
+
+ /*
+ * Get 596 command
+ */
+ cmd = priv0->scbp->cmd;
+
+ /*
+ * See if RU has been restarted
+ */
+ if ( (cmd & I596_SCB_RUC) == I596_SCB_RUC_START)
+ {
+ if (0) printk("%s: RUC start\n", dev0->name);
+ priv0->rfdp = (I596_RFD *) S2H(priv0->scbp->rfdp);
+ priv0->rbdp = (I596_RBD *) S2H(priv0->rfdp->rbdp);
+ priv0->scbp->status &= ~(I596_SCB_RNR|I596_SCB_RUS);
+ /*
+ * Tell upper half (halves)
+ */
+ if (dgrs_nicmode)
+ {
+ for (i = 0; i < priv0->nports; ++i)
+ netif_wake_queue (priv0->devtbl[i]);
+ }
+ else
+ netif_wake_queue (dev0);
+ /* if (bd->flags & TX_QUEUED)
+ DL_sched(bd, bdd); */
+ }
+
+ /*
+ * See if any CU commands to process
+ */
+ if ( (cmd & I596_SCB_CUC) != I596_SCB_CUC_START)
+ {
+ priv0->scbp->cmd = 0; /* Ignore all other commands */
+ goto ack_intr;
+ }
+ priv0->scbp->status &= ~(I596_SCB_CNA|I596_SCB_CUS);
+
+ /*
+ * Process a command
+ */
+ cbp = (I596_CB *) S2H(priv0->scbp->cbp);
+ priv0->scbp->cmd = 0; /* Safe to clear the command */
+ for (;;)
+ {
+ switch (cbp->nop.cmd & I596_CB_CMD)
+ {
+ case I596_CB_CMD_XMIT:
+ dgrs_rcv_frame(dev0, priv0, cbp);
+ break;
+ default:
+ cbp->nop.status = I596_CB_STATUS_C | I596_CB_STATUS_OK;
+ break;
+ }
+ if (cbp->nop.cmd & I596_CB_CMD_EL)
+ break;
+ cbp = (I596_CB *) S2H(cbp->nop.next);
+ }
+ priv0->scbp->status |= I596_SCB_CNA;
+
+ /*
+ * Ack the interrupt
+ */
+ack_intr:
+ if (priv0->plxreg)
+ OUTL(dev0->base_addr + PLX_LCL2PCI_DOORBELL, 1);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Download the board firmware
+ */
+static int __init
+dgrs_download(struct net_device *dev0)
+{
+ DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv;
+ int is;
+ unsigned long i;
+
+ static int iv2is[16] = {
+ 0, 0, 0, ES4H_IS_INT3,
+ 0, ES4H_IS_INT5, 0, ES4H_IS_INT7,
+ 0, 0, ES4H_IS_INT10, ES4H_IS_INT11,
+ ES4H_IS_INT12, 0, 0, ES4H_IS_INT15 };
+
+ /*
+ * Map in the dual port memory
+ */
+ priv0->vmem = ioremap(dev0->mem_start, 2048*1024);
+ if (!priv0->vmem)
+ {
+ printk("%s: cannot map in board memory\n", dev0->name);
+ return -ENXIO;
+ }
+
+ /*
+ * Hold the processor and configure the board addresses
+ */
+ if (priv0->plxreg)
+ { /* PCI bus */
+ proc_reset(dev0, 1);
+ }
+ else
+ { /* EISA bus */
+ is = iv2is[dev0->irq & 0x0f];
+ if (!is)
+ {
+ printk("%s: Illegal IRQ %d\n", dev0->name, dev0->irq);
+ iounmap(priv0->vmem);
+ priv0->vmem = NULL;
+ return -ENXIO;
+ }
+ OUTB(dev0->base_addr + ES4H_AS_31_24,
+ (uchar) (dev0->mem_start >> 24) );
+ OUTB(dev0->base_addr + ES4H_AS_23_16,
+ (uchar) (dev0->mem_start >> 16) );
+ priv0->is_reg = ES4H_IS_LINEAR | is |
+ ((uchar) (dev0->mem_start >> 8) & ES4H_IS_AS15);
+ OUTB(dev0->base_addr + ES4H_IS, priv0->is_reg);
+ OUTB(dev0->base_addr + ES4H_EC, ES4H_EC_ENABLE);
+ OUTB(dev0->base_addr + ES4H_PC, ES4H_PC_RESET);
+ OUTB(dev0->base_addr + ES4H_MW, ES4H_MW_ENABLE | 0x00);
+ }
+
+ /*
+ * See if we can do DMA on the SE-6
+ */
+ priv0->use_dma = check_board_dma(dev0);
+ if (priv0->use_dma)
+ printk("%s: Bus Master DMA is enabled.\n", dev0->name);
+
+ /*
+ * Load and verify the code at the desired address
+ */
+ memcpy(priv0->vmem, dgrs_code, dgrs_ncode); /* Load code */
+ if (memcmp(priv0->vmem, dgrs_code, dgrs_ncode))
+ {
+ iounmap(priv0->vmem);
+ priv0->vmem = NULL;
+ printk("%s: download compare failed\n", dev0->name);
+ return -ENXIO;
+ }
+
+ /*
+ * Configurables
+ */
+ priv0->bcomm = (struct bios_comm *) (priv0->vmem + 0x0100);
+ priv0->bcomm->bc_nowait = 1; /* Tell board to make printf not wait */
+ priv0->bcomm->bc_squelch = 0; /* Flag from Space.c */
+ priv0->bcomm->bc_150ohm = 0; /* Flag from Space.c */
+
+ priv0->bcomm->bc_spew = 0; /* Debug flag from Space.c */
+ priv0->bcomm->bc_maxrfd = 0; /* Debug flag from Space.c */
+ priv0->bcomm->bc_maxrbd = 0; /* Debug flag from Space.c */
+
+ /*
+ * Tell board we are operating in switch mode (1) or in
+ * multi-NIC mode (2).
+ */
+ priv0->bcomm->bc_host = dgrs_nicmode ? BC_MULTINIC : BC_SWITCH;
+
+ /*
+ * Request memory space on board for DMA chains
+ */
+ if (priv0->use_dma)
+ priv0->bcomm->bc_hostarea_len = (2048/64) * 16;
+
+ /*
+ * NVRAM configurables from Space.c
+ */
+ priv0->bcomm->bc_spantree = dgrs_spantree;
+ priv0->bcomm->bc_hashexpire = dgrs_hashexpire;
+ memcpy(priv0->bcomm->bc_ipaddr, dgrs_ipaddr, 4);
+ memcpy(priv0->bcomm->bc_iptrap, dgrs_iptrap, 4);
+ memcpy(priv0->bcomm->bc_ipxnet, &dgrs_ipxnet, 4);
+
+ /*
+ * Release processor, wait 8 seconds for board to initialize
+ */
+ proc_reset(dev0, 0);
+
+ for (i = jiffies + 8 * HZ; time_after(i, jiffies); )
+ {
+ barrier(); /* Gcc 2.95 needs this */
+ if (priv0->bcomm->bc_status >= BC_RUN)
+ break;
+ }
+
+ if (priv0->bcomm->bc_status < BC_RUN)
+ {
+ printk("%s: board not operating\n", dev0->name);
+ iounmap(priv0->vmem);
+ priv0->vmem = NULL;
+ return -ENXIO;
+ }
+
+ priv0->port = (PORT *) S2H(priv0->bcomm->bc_port);
+ priv0->scbp = (I596_SCB *) S2H(priv0->port->scbp);
+ priv0->rfdp = (I596_RFD *) S2H(priv0->scbp->rfdp);
+ priv0->rbdp = (I596_RBD *) S2H(priv0->rfdp->rbdp);
+
+ priv0->scbp->status = I596_SCB_CNA; /* CU is idle */
+
+ /*
+ * Get switch physical and host virtual pointers to DMA
+ * chaining area. NOTE: the MSB of the switch physical
+ * address *must* be turned off. Otherwise, the HW kludge
+ * that allows host access of the PLX DMA registers will
+ * erroneously select the PLX registers.
+ */
+ priv0->dmadesc_s = (DMACHAIN *) S2DMA(priv0->bcomm->bc_hostarea);
+ if (priv0->dmadesc_s)
+ priv0->dmadesc_h = (DMACHAIN *) S2H(priv0->dmadesc_s);
+ else
+ priv0->dmadesc_h = NULL;
+
+ /*
+ * Enable board interrupts
+ */
+ if (priv0->plxreg)
+ { /* PCI bus */
+ OUTL(dev0->base_addr + PLX_INT_CSR,
+ inl(dev0->base_addr + PLX_INT_CSR)
+ | PLX_PCI_DOORBELL_IE); /* Enable intr to host */
+ OUTL(dev0->base_addr + PLX_LCL2PCI_DOORBELL, 1);
+ }
+ else
+ { /* EISA bus */
+ }
+
+ return (0);
+}
+
+/*
+ * Probe (init) a board
+ */
+static int __init
+dgrs_probe1(struct net_device *dev)
+{
+ DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv;
+ unsigned long i;
+ int rc;
+
+ printk("%s: Digi RightSwitch io=%lx mem=%lx irq=%d plx=%lx dma=%lx\n",
+ dev->name, dev->base_addr, dev->mem_start, dev->irq,
+ priv->plxreg, priv->plxdma);
+
+ /*
+ * Download the firmware and light the processor
+ */
+ rc = dgrs_download(dev);
+ if (rc)
+ goto err_out;
+
+ /*
+ * Get ether address of board
+ */
+ printk("%s: Ethernet address", dev->name);
+ memcpy(dev->dev_addr, priv->port->ethaddr, 6);
+ for (i = 0; i < 6; ++i)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ printk("\n");
+
+ if (dev->dev_addr[0] & 1)
+ {
+ printk("%s: Illegal Ethernet Address\n", dev->name);
+ rc = -ENXIO;
+ goto err_out;
+ }
+
+ /*
+ * ACK outstanding interrupts, hook the interrupt,
+ * and verify that we are getting interrupts from the board.
+ */
+ if (priv->plxreg)
+ OUTL(dev->base_addr + PLX_LCL2PCI_DOORBELL, 1);
+
+ rc = request_irq(dev->irq, &dgrs_intr, SA_SHIRQ, "RightSwitch", dev);
+ if (rc)
+ goto err_out;
+
+ priv->intrcnt = 0;
+ for (i = jiffies + 2*HZ + HZ/2; time_after(i, jiffies); )
+ {
+ cpu_relax();
+ if (priv->intrcnt >= 2)
+ break;
+ }
+ if (priv->intrcnt < 2)
+ {
+ printk(KERN_ERR "%s: Not interrupting on IRQ %d (%d)\n",
+ dev->name, dev->irq, priv->intrcnt);
+ rc = -ENXIO;
+ goto err_free_irq;
+ }
+
+ /*
+ * Entry points...
+ */
+ dev->open = &dgrs_open;
+ dev->stop = &dgrs_close;
+ dev->get_stats = &dgrs_get_stats;
+ dev->hard_start_xmit = &dgrs_start_xmit;
+ dev->set_multicast_list = &dgrs_set_multicast_list;
+ dev->do_ioctl = &dgrs_ioctl;
+
+ return rc;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_out:
+ return rc;
+}
+
+static int __init
+dgrs_initclone(struct net_device *dev)
+{
+ DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv;
+ int i;
+
+ printk("%s: Digi RightSwitch port %d ",
+ dev->name, priv->chan);
+ for (i = 0; i < 6; ++i)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ printk("\n");
+
+ return (0);
+}
+
+static struct net_device * __init
+dgrs_found_device(
+ int io,
+ ulong mem,
+ int irq,
+ ulong plxreg,
+ ulong plxdma,
+ struct device *pdev
+)
+{
+ DGRS_PRIV *priv;
+ struct net_device *dev;
+ int i, ret = -ENOMEM;
+
+ dev = alloc_etherdev(sizeof(DGRS_PRIV));
+ if (!dev)
+ goto err0;
+
+ priv = (DGRS_PRIV *)dev->priv;
+
+ dev->base_addr = io;
+ dev->mem_start = mem;
+ dev->mem_end = mem + 2048 * 1024 - 1;
+ dev->irq = irq;
+ priv->plxreg = plxreg;
+ priv->plxdma = plxdma;
+ priv->vplxdma = NULL;
+
+ priv->chan = 1;
+ priv->devtbl[0] = dev;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, pdev);
+
+ ret = dgrs_probe1(dev);
+ if (ret)
+ goto err1;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto err2;
+
+ if ( !dgrs_nicmode )
+ return dev; /* Switch mode, we are done */
+
+ /*
+ * Operating card as N separate NICs
+ */
+
+ priv->nports = priv->bcomm->bc_nports;
+
+ for (i = 1; i < priv->nports; ++i)
+ {
+ struct net_device *devN;
+ DGRS_PRIV *privN;
+ /* Allocate new dev and priv structures */
+ devN = alloc_etherdev(sizeof(DGRS_PRIV));
+ ret = -ENOMEM;
+ if (!devN)
+ goto fail;
+
+ /* Don't copy the network device structure! */
+
+ /* copy the priv structure of dev[0] */
+ privN = (DGRS_PRIV *)devN->priv;
+ *privN = *priv;
+
+ /* ... and zero out VM areas */
+ privN->vmem = NULL;
+ privN->vplxdma = NULL;
+ /* ... and zero out IRQ */
+ devN->irq = 0;
+ /* ... and base MAC address off address of 1st port */
+ devN->dev_addr[5] += i;
+
+ ret = dgrs_initclone(devN);
+ if (ret)
+ goto fail;
+
+ SET_MODULE_OWNER(devN);
+ SET_NETDEV_DEV(dev, pdev);
+
+ ret = register_netdev(devN);
+ if (ret) {
+ free_netdev(devN);
+ goto fail;
+ }
+ privN->chan = i+1;
+ priv->devtbl[i] = devN;
+ }
+ return dev;
+
+ fail:
+ while (i >= 0) {
+ struct net_device *d = priv->devtbl[i--];
+ unregister_netdev(d);
+ free_netdev(d);
+ }
+
+ err2:
+ free_irq(dev->irq, dev);
+ err1:
+ free_netdev(dev);
+ err0:
+ return ERR_PTR(ret);
+}
+
+static void __devexit dgrs_remove(struct net_device *dev)
+{
+ DGRS_PRIV *priv = dev->priv;
+ int i;
+
+ unregister_netdev(dev);
+
+ for (i = 1; i < priv->nports; ++i) {
+ struct net_device *d = priv->devtbl[i];
+ if (d) {
+ unregister_netdev(d);
+ free_netdev(d);
+ }
+ }
+
+ proc_reset(priv->devtbl[0], 1);
+
+ if (priv->vmem)
+ iounmap(priv->vmem);
+ if (priv->vplxdma)
+ iounmap((uchar *) priv->vplxdma);
+
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+
+ for (i = 1; i < priv->nports; ++i) {
+ if (priv->devtbl[i])
+ unregister_netdev(priv->devtbl[i]);
+ }
+}
+
+#ifdef CONFIG_PCI
+static int __init dgrs_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ int err;
+ uint io;
+ uint mem;
+ uint irq;
+ uint plxreg;
+ uint plxdma;
+
+ /*
+ * Get and check the bus-master and latency values.
+ * Some PCI BIOSes fail to set the master-enable bit,
+ * and the latency timer must be set to the maximum
+ * value to avoid data corruption that occurs when the
+ * timer expires during a transfer. Yes, it's a bug.
+ */
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ err = pci_request_regions(pdev, "RightSwitch");
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ plxreg = pci_resource_start (pdev, 0);
+ io = pci_resource_start (pdev, 1);
+ mem = pci_resource_start (pdev, 2);
+ pci_read_config_dword(pdev, 0x30, &plxdma);
+ irq = pdev->irq;
+ plxdma &= ~15;
+
+ /*
+ * On some BIOSES, the PLX "expansion rom" (used for DMA)
+ * address comes up as "0". This is probably because
+ * the BIOS doesn't see a valid 55 AA ROM signature at
+ * the "ROM" start and zeroes the address. To get
+ * around this problem the SE-6 is configured to ask
+ * for 4 MB of space for the dual port memory. We then
+ * must set its range back to 2 MB, and use the upper
+ * half for DMA register access
+ */
+ OUTL(io + PLX_SPACE0_RANGE, 0xFFE00000L);
+ if (plxdma == 0)
+ plxdma = mem + (2048L * 1024L);
+ pci_write_config_dword(pdev, 0x30, plxdma + 1);
+ pci_read_config_dword(pdev, 0x30, &plxdma);
+ plxdma &= ~15;
+
+ dev = dgrs_found_device(io, mem, irq, plxreg, plxdma, &pdev->dev);
+ if (IS_ERR(dev)) {
+ pci_release_regions(pdev);
+ return PTR_ERR(dev);
+ }
+
+ pci_set_drvdata(pdev, dev);
+ return 0;
+}
+
+static void __devexit dgrs_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ dgrs_remove(dev);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+}
+
+static struct pci_driver dgrs_pci_driver = {
+ .name = "dgrs",
+ .id_table = dgrs_pci_tbl,
+ .probe = dgrs_pci_probe,
+ .remove = __devexit_p(dgrs_pci_remove),
+};
+#endif
+
+
+#ifdef CONFIG_EISA
+static int is2iv[8] __initdata = { 0, 3, 5, 7, 10, 11, 12, 15 };
+
+static int __init dgrs_eisa_probe (struct device *gendev)
+{
+ struct net_device *dev;
+ struct eisa_device *edev = to_eisa_device(gendev);
+ uint io = edev->base_addr;
+ uint mem;
+ uint irq;
+ int rc = -ENODEV; /* Not EISA configured */
+
+ if (!request_region(io, 256, "RightSwitch")) {
+ printk(KERN_ERR "dgrs: eisa io 0x%x, which is busy.\n", io);
+ return -EBUSY;
+ }
+
+ if ( ! (inb(io+ES4H_EC) & ES4H_EC_ENABLE) )
+ goto err_out;
+
+ mem = (inb(io+ES4H_AS_31_24) << 24)
+ + (inb(io+ES4H_AS_23_16) << 16);
+
+ irq = is2iv[ inb(io+ES4H_IS) & ES4H_IS_INTMASK ];
+
+ dev = dgrs_found_device(io, mem, irq, 0L, 0L, gendev);
+ if (IS_ERR(dev)) {
+ rc = PTR_ERR(dev);
+ goto err_out;
+ }
+
+ gendev->driver_data = dev;
+ return 0;
+ err_out:
+ release_region(io, 256);
+ return rc;
+}
+
+static int __devexit dgrs_eisa_remove(struct device *gendev)
+{
+ struct net_device *dev = gendev->driver_data;
+
+ dgrs_remove(dev);
+
+ release_region(dev->base_addr, 256);
+
+ free_netdev(dev);
+ return 0;
+}
+
+
+static struct eisa_driver dgrs_eisa_driver = {
+ .id_table = dgrs_eisa_tbl,
+ .driver = {
+ .name = "dgrs",
+ .probe = dgrs_eisa_probe,
+ .remove = __devexit_p(dgrs_eisa_remove),
+ }
+};
+#endif
+
+/*
+ * Variables that can be overriden from module command line
+ */
+static int debug = -1;
+static int dma = -1;
+static int hashexpire = -1;
+static int spantree = -1;
+static int ipaddr[4] = { -1 };
+static int iptrap[4] = { -1 };
+static __u32 ipxnet = -1;
+static int nicmode = -1;
+
+module_param(debug, int, 0);
+module_param(dma, int, 0);
+module_param(hashexpire, int, 0);
+module_param(spantree, int, 0);
+module_param_array(ipaddr, int, NULL, 0);
+module_param_array(iptrap, int, NULL, 0);
+module_param(ipxnet, int, 0);
+module_param(nicmode, int, 0);
+MODULE_PARM_DESC(debug, "Digi RightSwitch enable debugging (0-1)");
+MODULE_PARM_DESC(dma, "Digi RightSwitch enable BM DMA (0-1)");
+MODULE_PARM_DESC(nicmode, "Digi RightSwitch operating mode (1: switch, 2: multi-NIC)");
+
+static int __init dgrs_init_module (void)
+{
+ int i;
+ int eisacount = 0, pcicount = 0;
+
+ /*
+ * Command line variable overrides
+ * debug=NNN
+ * dma=0/1
+ * spantree=0/1
+ * hashexpire=NNN
+ * ipaddr=A,B,C,D
+ * iptrap=A,B,C,D
+ * ipxnet=NNN
+ * nicmode=NNN
+ */
+ if (debug >= 0)
+ dgrs_debug = debug;
+ if (dma >= 0)
+ dgrs_dma = dma;
+ if (nicmode >= 0)
+ dgrs_nicmode = nicmode;
+ if (hashexpire >= 0)
+ dgrs_hashexpire = hashexpire;
+ if (spantree >= 0)
+ dgrs_spantree = spantree;
+ if (ipaddr[0] != -1)
+ for (i = 0; i < 4; ++i)
+ dgrs_ipaddr[i] = ipaddr[i];
+ if (iptrap[0] != -1)
+ for (i = 0; i < 4; ++i)
+ dgrs_iptrap[i] = iptrap[i];
+ if (ipxnet != -1)
+ dgrs_ipxnet = htonl( ipxnet );
+
+ if (dgrs_debug)
+ {
+ printk(KERN_INFO "dgrs: SW=%s FW=Build %d %s\nFW Version=%s\n",
+ version, dgrs_firmnum, dgrs_firmdate, dgrs_firmver);
+ }
+
+ /*
+ * Find and configure all the cards
+ */
+#ifdef CONFIG_EISA
+ eisacount = eisa_driver_register(&dgrs_eisa_driver);
+ if (eisacount < 0)
+ return eisacount;
+#endif
+#ifdef CONFIG_PCI
+ pcicount = pci_register_driver(&dgrs_pci_driver);
+ if (pcicount)
+ return pcicount;
+#endif
+ return 0;
+}
+
+static void __exit dgrs_cleanup_module (void)
+{
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&dgrs_eisa_driver);
+#endif
+#ifdef CONFIG_PCI
+ pci_unregister_driver (&dgrs_pci_driver);
+#endif
+}
+
+module_init(dgrs_init_module);
+module_exit(dgrs_cleanup_module);
diff --git a/drivers/net/dgrs.h b/drivers/net/dgrs.h
new file mode 100644
index 000000000000..c347cd117409
--- /dev/null
+++ b/drivers/net/dgrs.h
@@ -0,0 +1,38 @@
+/*
+ * ioctl's for the Digi Intl. RightSwitch
+ *
+ * These network driver ioctl's are a bit obtuse compared to the usual
+ * ioctl's for a "normal" device driver. Hey, I didn't invent it.
+ *
+ * Typical use:
+ *
+ * struct ifreq ifr;
+ * DGRS_IOCTL ioc;
+ * int x;
+ *
+ * strcpy(ifr.ifr_name, "eth1");
+ * ifr.ifr_data = (caddr_t) &ioc;
+ * ioc.cmd = DGRS_GETMEM;
+ * ioc.len = sizeof(x);
+ * ioc.data = (caddr_t) &x;
+ * rc = ioctl(fd, DGRSIOCTL, &ifr);
+ * printf("rc=%d mem=%x\n", rc, x);
+ *
+ */
+#include <linux/sockios.h>
+
+#define DGRSIOCTL SIOCDEVPRIVATE
+
+typedef struct dgrs_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+ unsigned short port; /* port number for command, if needed */
+ unsigned short filter; /* filter number for command, if needed */
+} DGRS_IOCTL;
+
+/*
+ * Commands for the driver
+ */
+#define DGRS_GETMEM 0x01 /* Get the dual port memory address */
+#define DGRS_SETFILTER 0x02 /* Set a filter */
diff --git a/drivers/net/dgrs_asstruct.h b/drivers/net/dgrs_asstruct.h
new file mode 100644
index 000000000000..a8e5bb5ef534
--- /dev/null
+++ b/drivers/net/dgrs_asstruct.h
@@ -0,0 +1,37 @@
+/*
+ * For declaring structures shared with assembly routines
+ *
+ * $Id: asstruct.h,v 1.1.1.1 1994/10/23 05:08:32 rick Exp $
+ */
+
+#ifdef ASSEMBLER
+
+# define MO(t,a) (a)
+# define VMO(t,a) (a)
+
+# define BEGIN_STRUCT(x) _Off=0
+# define S1A(t,x,n) _Off=(_Off+0)&~0; x=_Off; _Off=_Off+(1*n)
+# define S2A(t,x,n) _Off=(_Off+1)&~1; x=_Off; _Off=_Off+(2*n)
+# define S4A(t,x,n) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+(4*n)
+# define WORD(x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4
+# define WORDA(x,n) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+(4*n)
+# define VWORD(x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4
+# define S1(t,x) _Off=(_Off+0)&~0; x=_Off; _Off=_Off+1
+# define S2(t,x) _Off=(_Off+1)&~1; x=_Off; _Off=_Off+2
+# define S4(t,x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4
+# define END_STRUCT(x) _Off=(_Off+3)&~3; x=_Off
+
+#else /* C */
+
+#define VMO(t,a) (*(volatile t *)(a))
+
+# define BEGIN_STRUCT(x) struct x {
+# define S1(t,x) t x ;
+# define S1A(t,x,n) t x[n] ;
+# define S2(t,x) t x ;
+# define S2A(t,x,n) t x[n] ;
+# define S4(t,x) t x ;
+# define S4A(t,x,n) t x[n] ;
+# define END_STRUCT(x) } ;
+
+#endif
diff --git a/drivers/net/dgrs_bcomm.h b/drivers/net/dgrs_bcomm.h
new file mode 100644
index 000000000000..6646608811cd
--- /dev/null
+++ b/drivers/net/dgrs_bcomm.h
@@ -0,0 +1,148 @@
+/*
+ * The bios low-memory structure
+ *
+ * Some of the variables in here can be used to set parameters that
+ * are stored in NVRAM and will retain their old values the next time
+ * the card is brought up. To use the values stored in NVRAM, the
+ * parameter should be set to "all ones". This tells the firmware to
+ * use the NVRAM value or a suitable default. The value that is used
+ * will be stored back into this structure by the firmware. If the
+ * value of the variable is not "all ones", then that value will be
+ * used and will be stored into NVRAM if it isn't already there.
+ * The variables this applies to are the following:
+ * Variable Set to: Gets default of:
+ * bc_hashexpire -1 300 (5 minutes)
+ * bc_spantree -1 1 (spanning tree on)
+ * bc_ipaddr FF:FF:FF:FF 0 (no SNMP IP address)
+ * bc_ipxnet FF:FF:FF:FF 0 (no SNMP IPX net)
+ * bc_iptrap FF:FF:FF:FF 0 (no SNMP IP trap address)
+ *
+ * Some variables MUST have their value set after the firmware
+ * is loaded onto the board, but before the processor is released.
+ * These are:
+ * bc_host 0 means no host "port", run as standalone switch.
+ * 1 means run as a switch, with a host port. (normal)
+ * 2 means run as multiple NICs, not as a switch.
+ * -1 means run in diagnostics mode.
+ * bc_nowait
+ * bc_hostarea_len
+ * bc_filter_len
+ *
+ */
+BEGIN_STRUCT(bios_comm)
+ S4(ulong, bc_intflag) /* Count of all interrupts */
+ S4(ulong, bc_lbolt) /* Count of timer interrupts */
+ S4(ulong, bc_maincnt) /* Count of main loops */
+ S4(ulong, bc_hashcnt) /* Count of entries in hash table */
+ S4A(ulong, bc_cnt, 8) /* Misc counters, for debugging */
+ S4A(ulong, bc_flag, 8) /* Misc flags, for debugging */
+ S4(ulong, bc_memsize) /* Size of memory */
+ S4(ulong, bc_dcache) /* Size of working dcache */
+ S4(ulong, bc_icache) /* Size of working icache */
+ S4(long, bc_status) /* Firmware status */
+ S1A(char, bc_file, 8) /* File name of assertion failure */
+ S4(ulong, bc_line) /* Line # of assertion failure */
+ S4(uchar *, bc_ramstart)
+ S4(uchar *, bc_ramend)
+ S4(uchar *, bc_heapstart) /* Start of heap (end of loaded memory) */
+ S4(uchar *, bc_heapend) /* End of heap */
+
+ /* Configurable Parameters */
+ S4(long, bc_host) /* 1=Host Port, 0=No Host Port, -1=Test Mode */
+ S4(long, bc_nowait) /* Don't wait for 2host circ buffer to empty*/
+ S4(long, bc_150ohm) /* 0 == 100 ohm UTP, 1 == 150 ohm STP */
+ S4(long, bc_squelch) /* 0 == normal squelch, 1 == reduced squelch */
+ S4(ulong, bc_hashexpire) /* Expiry time in seconds for hash table */
+ S4(long, bc_spantree) /* 1 == enable IEEE spanning tree */
+
+ S2A(ushort, bc_eaddr, 3) /* New ether address */
+ S2(ushort, bc_dummy1) /* padding for DOS compilers */
+
+ /* Various debugging aids */
+ S4(long, bc_debug) /* Debugging is turned on */
+ S4(long, bc_spew) /* Spew data on port 4 for bs_spew seconds */
+ S4(long, bc_spewlen) /* Length of spewed data packets */
+ S4(long, bc_maxrfd) /* If != 0, max number of RFD's to allocate */
+ S4(long, bc_maxrbd) /* If != 0, max number of RBD's to allocate */
+
+ /* Circular buffers for messages to/from host */
+ S4(ulong, bc_2host_head)
+ S4(ulong, bc_2host_tail)
+ S4(ulong, bc_2host_mask)
+ S1A(char, bc_2host, 0x200) /* Circ buff to host */
+
+ S4(ulong, bc_2idt_head)
+ S4(ulong, bc_2idt_tail)
+ S4(ulong, bc_2idt_mask)
+ S1A(char, bc_2idt, 0x200) /* Circ buff to idt */
+
+ /* Pointers to structures for driver access */
+ S4(uchar *, bc_port) /* pointer to Port[] structures */
+ S4(long, bc_nports) /* Number of ports */
+ S4(long, bc_portlen) /* sizeof(PORT) */
+ S4(uchar *, bc_hash) /* Pointer to hash table */
+ S4(long, bc_hashlen) /* sizeof(Table) */
+
+ /* SNMP agent addresses */
+ S1A(uchar, bc_ipaddr, 4) /* IP address for SNMP */
+ S1A(uchar, bc_ipxnet, 4) /* IPX net address for SNMP */
+
+ S4(long, bc_nohostintr) /* Do not cause periodic host interrupts */
+
+ S4(uchar *, bc_dmaaddr) /* Physical addr of host DMA buf for diags */
+ S4(ulong, bc_dmalen) /* Length of DMA buffer 0..2048 */
+
+ /*
+ * Board memory allocated on startup for use by host, usually
+ * for the purposes of creating DMA chain descriptors. The
+ * "len" must be set before the processor is released. The
+ * address of the area is returned in bc_hostarea. The area
+ * is guaranteed to be aligned on a 16 byte boundary.
+ */
+ S4(ulong, bc_hostarea_len) /* RW: Number of bytes to allocate */
+ S4(uchar *, bc_hostarea) /* RO: Address of allocated memory */
+
+ /*
+ * Variables for communicating filters into the board
+ */
+ S4(ulong *, bc_filter_area) /* RO: Space to put filter into */
+ S4(ulong, bc_filter_area_len) /* RO: Length of area, in bytes */
+ S4(long, bc_filter_cmd) /* RW: Filter command, see below */
+ S4(ulong, bc_filter_len) /* RW: Actual length of filter */
+ S4(ulong, bc_filter_port) /* RW: Port # for filter 0..6 */
+ S4(ulong, bc_filter_num) /* RW: Filter #, 0=input, 1=output */
+
+ /* more SNMP agent addresses */
+ S1A(uchar, bc_iptrap, 4) /* IP address for SNMP */
+
+ S4A(long, bc_spare, 2) /* spares */
+END_STRUCT(bios_comm)
+
+#define bc VMO(struct bios_comm, 0xa3000100)
+
+/*
+ * bc_status values
+ */
+#define BC_INIT 0
+#define BC_RUN 100
+
+/*
+ * bc_host values
+ */
+#define BC_DIAGS -1
+#define BC_SASWITCH 0
+#define BC_SWITCH 1
+#define BC_MULTINIC 2
+
+/*
+ * Values for spew (debugging)
+ */
+#define BC_SPEW_ENABLE 0x80000000
+
+/*
+ * filter commands
+ */
+#define BC_FILTER_ERR -1
+#define BC_FILTER_OK 0
+#define BC_FILTER_SET 1
+#define BC_FILTER_CLR 2
diff --git a/drivers/net/dgrs_es4h.h b/drivers/net/dgrs_es4h.h
new file mode 100644
index 000000000000..5518fba46b2c
--- /dev/null
+++ b/drivers/net/dgrs_es4h.h
@@ -0,0 +1,183 @@
+/************************************************************************/
+/* */
+/* es4h.h: Hardware definition of the ES/4h Ethernet Switch, from */
+/* both the host and the 3051's point of view. */
+/* NOTE: this name is a misnomer now that there is a PCI */
+/* board. Everything that says "es4h" should really be */
+/* "se4". But we'll keep the old name for now. */
+/* */
+/* $Id: es4h.h,v 1.10 1996/08/22 17:16:53 rick Exp $ */
+/* */
+/************************************************************************/
+
+/************************************************************************/
+/* */
+/* EISA I/O Registers. These are located at 0x1000 * slot-number */
+/* plus the indicated address. I.E. 0x4000-0x4009 for slot 4. */
+/* */
+/************************************************************************/
+
+#define ES4H_MANUFmsb 0x00 /* Read-only */
+#define ES4H_MANUFlsb 0x01 /* Read-only */
+# define ES4H_MANUF_CODE 0x1049 /* = "DBI" */
+
+#define ES4H_PRODUCT 0x02 /* Read-only */
+# define ES4H_PRODUCT_CODE 0x0A
+# define EPC_PRODUCT_CODE 0x03
+
+#define ES4H_REVISION 0x03 /* Read-only */
+# define ES4H_REVISION_CODE 0x01
+
+#define ES4H_EC 0x04 /* EISA Control */
+# define ES4H_EC_RESET 0x04 /* WO, EISA reset */
+# define ES4H_EC_ENABLE 0x01 /* RW, EISA enable - set to */
+ /* 1 before memory enable */
+#define ES4H_PC 0x05 /* Processor Control */
+# define ES4H_PC_RESET 0x04 /* RW, 3051 reset */
+# define ES4H_PC_INT 0x08 /* WO, assert 3051 intr. 3 */
+
+#define ES4H_MW 0x06 /* Memory Window select and enable */
+# define ES4H_MW_ENABLE 0x80 /* WO, enable memory */
+# define ES4H_MW_SELECT_MASK 0x1f /* WO, 32k window selected */
+
+#define ES4H_IS 0x07 /* Interrupt, addr select */
+# define ES4H_IS_INTMASK 0x07 /* WO, interrupt select */
+# define ES4H_IS_INTOFF 0x00 /* No IRQ */
+# define ES4H_IS_INT3 0x03 /* IRQ 3 */
+# define ES4H_IS_INT5 0x02 /* IRQ 5 */
+# define ES4H_IS_INT7 0x01 /* IRQ 7 */
+# define ES4H_IS_INT10 0x04 /* IRQ 10 */
+# define ES4H_IS_INT11 0x05 /* IRQ 11 */
+# define ES4H_IS_INT12 0x06 /* IRQ 12 */
+# define ES4H_IS_INT15 0x07 /* IRQ 15 */
+# define ES4H_IS_INTACK 0x10 /* WO, interrupt ack */
+# define ES4H_IS_INTPEND 0x10 /* RO, interrupt pending */
+# define ES4H_IS_LINEAR 0x40 /* WO, no memory windowing */
+# define ES4H_IS_AS15 0x80 /* RW, address select bit 15 */
+
+#define ES4H_AS_23_16 0x08 /* Address select bits 23-16 */
+#define ES4H_AS_31_24 0x09 /* Address select bits 31-24 */
+
+#define ES4H_IO_MAX 0x09 /* Size of I/O space */
+
+/*
+ * PCI
+ */
+#define SE6_RESET PLX_USEROUT
+
+/************************************************************************/
+/* */
+/* 3051 Memory Map */
+/* */
+/* Note: 3051 has 4K I-cache, 2K D-cache. 1 cycle is 50 nsec. */
+/* */
+/************************************************************************/
+#define SE4_NPORTS 4 /* # of ethernet ports */
+#define SE6_NPORTS 6 /* # of ethernet ports */
+#define SE_NPORTS 6 /* Max # of ethernet ports */
+
+#define ES4H_RAM_BASE 0x83000000 /* Base address of RAM */
+#define ES4H_RAM_SIZE 0x00200000 /* Size of RAM (2MB) */
+#define ES4H_RAM_INTBASE 0x83800000 /* Base of int-on-write RAM */
+ /* a.k.a. PKT RAM */
+
+ /* Ethernet controllers */
+ /* See: i82596.h */
+#define ES4H_ETHER0_PORT 0xA2000000
+#define ES4H_ETHER0_CMD 0xA2000100
+#define ES4H_ETHER1_PORT 0xA2000200
+#define ES4H_ETHER1_CMD 0xA2000300
+#define ES4H_ETHER2_PORT 0xA2000400
+#define ES4H_ETHER2_CMD 0xA2000500
+#define ES4H_ETHER3_PORT 0xA2000600
+#define ES4H_ETHER3_CMD 0xA2000700
+#define ES4H_ETHER4_PORT 0xA2000800 /* RS SE-6 only */
+#define ES4H_ETHER4_CMD 0xA2000900 /* RS SE-6 only */
+#define ES4H_ETHER5_PORT 0xA2000A00 /* RS SE-6 only */
+#define ES4H_ETHER5_CMD 0xA2000B00 /* RS SE-6 only */
+
+#define ES4H_I8254 0xA2040000 /* 82C54 timers */
+ /* See: i8254.h */
+
+#define SE4_I8254_HZ (23000000/4) /* EISA clock input freq. */
+#define SE4_IDT_HZ (46000000) /* EISA CPU freq. */
+#define SE6_I8254_HZ (20000000/4) /* PCI clock input freq. */
+#define SE6_IDT_HZ (50000000) /* PCI CPU freq. */
+#define ES4H_I8254_HZ (23000000/4) /* EISA clock input freq. */
+
+#define ES4H_GPP 0xA2050000 /* General purpose port */
+ /*
+ * SE-4 (EISA) GPP bits
+ */
+# define ES4H_GPP_C0_100 0x0001 /* WO, Chan 0: 100 ohm TP */
+# define ES4H_GPP_C0_SQE 0x0002 /* WO, Chan 0: normal squelch */
+# define ES4H_GPP_C1_100 0x0004 /* WO, Chan 1: 100 ohm TP */
+# define ES4H_GPP_C1_SQE 0x0008 /* WO, Chan 1: normal squelch */
+# define ES4H_GPP_C2_100 0x0010 /* WO, Chan 2: 100 ohm TP */
+# define ES4H_GPP_C2_SQE 0x0020 /* WO, Chan 2: normal squelch */
+# define ES4H_GPP_C3_100 0x0040 /* WO, Chan 3: 100 ohm TP */
+# define ES4H_GPP_C3_SQE 0x0080 /* WO, Chan 3: normal squelch */
+# define ES4H_GPP_SQE 0x00AA /* WO, All: normal squelch */
+# define ES4H_GPP_100 0x0055 /* WO, All: 100 ohm TP */
+# define ES4H_GPP_HOSTINT 0x0100 /* RO, cause intr. to host */
+ /* Hold high > 250 nsec */
+# define SE4_GPP_EED 0x0200 /* RW, EEPROM data bit */
+# define SE4_GPP_EECS 0x0400 /* RW, EEPROM chip select */
+# define SE4_GPP_EECK 0x0800 /* RW, EEPROM clock */
+
+ /*
+ * SE-6 (PCI) GPP bits
+ */
+# define SE6_GPP_EED 0x0001 /* RW, EEPROM data bit */
+# define SE6_GPP_EECS 0x0002 /* RW, EEPROM chip select */
+# define SE6_GPP_EECK 0x0004 /* RW, EEPROM clock */
+# define SE6_GPP_LINK 0x00fc /* R, Link status LEDs */
+
+#define ES4H_INTVEC 0xA2060000 /* RO: Interrupt Vector */
+# define ES4H_IV_DMA0 0x01 /* Chan 0 DMA interrupt */
+# define ES4H_IV_PKT0 0x02 /* Chan 0 PKT interrupt */
+# define ES4H_IV_DMA1 0x04 /* Chan 1 DMA interrupt */
+# define ES4H_IV_PKT1 0x08 /* Chan 1 PKT interrupt */
+# define ES4H_IV_DMA2 0x10 /* Chan 2 DMA interrupt */
+# define ES4H_IV_PKT2 0x20 /* Chan 2 PKT interrupt */
+# define ES4H_IV_DMA3 0x40 /* Chan 3 DMA interrupt */
+# define ES4H_IV_PKT3 0x80 /* Chan 3 PKT interrupt */
+
+#define ES4H_INTACK 0xA2060000 /* WO: Interrupt Ack */
+# define ES4H_INTACK_8254 0x01 /* Real Time Clock (int 0) */
+# define ES4H_INTACK_HOST 0x02 /* Host (int 1) */
+# define ES4H_INTACK_PKT0 0x04 /* Chan 0 Pkt (int 2) */
+# define ES4H_INTACK_PKT1 0x08 /* Chan 1 Pkt (int 3) */
+# define ES4H_INTACK_PKT2 0x10 /* Chan 2 Pkt (int 4) */
+# define ES4H_INTACK_PKT3 0x20 /* Chan 3 Pkt (int 5) */
+
+#define SE6_PLX 0xA2070000 /* PLX 9060, SE-6 (PCI) only */
+ /* see plx9060.h */
+
+#define SE6_PCI_VENDOR_ID 0x114F /* Digi PCI vendor ID */
+#define SE6_PCI_DEVICE_ID 0x0003 /* RS SE-6 device ID */
+#define SE6_PCI_ID ((SE6_PCI_DEVICE_ID<<16) | SE6_PCI_VENDOR_ID)
+
+/*
+ * IDT Interrupts
+ */
+#define ES4H_INT_8254 IDT_INT0
+#define ES4H_INT_HOST IDT_INT1
+#define ES4H_INT_ETHER0 IDT_INT2
+#define ES4H_INT_ETHER1 IDT_INT3
+#define ES4H_INT_ETHER2 IDT_INT4
+#define ES4H_INT_ETHER3 IDT_INT5
+
+/*
+ * Because there are differences between the SE-4 and the SE-6,
+ * we assume that the following globals will be set up at init
+ * time in main.c to containt the appropriate constants from above
+ */
+extern ushort Gpp; /* Softcopy of GPP register */
+extern ushort EEck; /* Clock bit */
+extern ushort EEcs; /* CS bit */
+extern ushort EEd; /* Data bit */
+extern ulong I8254_Hz; /* i8254 input frequency */
+extern ulong IDT_Hz; /* IDT CPU frequency */
+extern int Nports; /* Number of ethernet controllers */
+extern int Nchan; /* Nports+1 */
diff --git a/drivers/net/dgrs_ether.h b/drivers/net/dgrs_ether.h
new file mode 100644
index 000000000000..51596ce6cf84
--- /dev/null
+++ b/drivers/net/dgrs_ether.h
@@ -0,0 +1,135 @@
+/*
+ * A filtering function. There are two filters/port. Filter "0"
+ * is the input filter, and filter "1" is the output filter.
+ */
+typedef int (FILTER_FUNC)(uchar *pktp, int pktlen, ulong *scratch, int port);
+#define NFILTERS 2
+
+/*
+ * The per port structure
+ */
+typedef struct
+{
+ int chan; /* Channel number (0-3) */
+ ulong portaddr; /* address of 596 port register */
+ volatile ulong *ca; /* address of 596 chan attention */
+ ulong intmask; /* Interrupt mask for this port */
+ ulong intack; /* Ack bit for this port */
+
+ uchar ethaddr[6]; /* Ethernet address of this port */
+ int is_promisc; /* Port is promiscuous */
+
+ int debug; /* Debugging turned on */
+
+ I596_ISCP *iscpp; /* Uncached ISCP pointer */
+ I596_SCP *scpp; /* Uncached SCP pointer */
+ I596_SCB *scbp; /* Uncached SCB pointer */
+
+ I596_ISCP iscp;
+ I596_SCB scb;
+
+ /* Command Queue */
+ I596_CB *cb0;
+ I596_CB *cbN;
+ I596_CB *cb_head;
+ I596_CB *cb_tail;
+
+ /* Receive Queue */
+ I596_RFD *rfd0;
+ I596_RFD *rfdN;
+ I596_RFD *rfd_head;
+ I596_RFD *rfd_tail;
+
+ /* Receive Buffers */
+ I596_RBD *rbd0;
+ I596_RBD *rbdN;
+ I596_RBD *rbd_head;
+ I596_RBD *rbd_tail;
+ int buf_size; /* Size of an RBD buffer */
+ int buf_cnt; /* Total RBD's allocated */
+
+ /* Rx Statistics */
+ ulong cnt_rx_cnt; /* Total packets rcvd, good and bad */
+ ulong cnt_rx_good; /* Total good packets rcvd */
+ ulong cnt_rx_bad; /* Total of all bad packets rcvd */
+ /* Subtotals can be gotten from SCB */
+ ulong cnt_rx_nores; /* No resources */
+ ulong cnt_rx_bytes; /* Total bytes rcvd */
+
+ /* Tx Statistics */
+ ulong cnt_tx_queued;
+ ulong cnt_tx_done;
+ ulong cnt_tx_freed;
+ ulong cnt_tx_nores; /* No resources */
+
+ ulong cnt_tx_bad;
+ ulong cnt_tx_err_late;
+ ulong cnt_tx_err_nocrs;
+ ulong cnt_tx_err_nocts;
+ ulong cnt_tx_err_under;
+ ulong cnt_tx_err_maxcol;
+ ulong cnt_tx_collisions;
+
+ /* Special stuff for host */
+# define rfd_freed cnt_rx_cnt
+ ulong rbd_freed;
+ int host_timer;
+
+ /* Added after first beta */
+ ulong cnt_tx_races; /* Counts race conditions */
+ int spanstate;
+ ulong cnt_st_tx; /* send span tree pkts */
+ ulong cnt_st_fail_tx; /* Failures to send span tree pkts */
+ ulong cnt_st_fail_rbd;/* Failures to send span tree pkts */
+ ulong cnt_st_rx; /* rcv span tree pkts */
+ ulong cnt_st_rx_bad; /* bogus st packets rcvd */
+ ulong cnt_rx_fwd; /* Rcvd packets that were forwarded */
+
+ ulong cnt_rx_mcast; /* Multicast pkts received */
+ ulong cnt_tx_mcast; /* Multicast pkts transmitted */
+ ulong cnt_tx_bytes; /* Bytes transmitted */
+
+ /*
+ * Packet filtering
+ * Filter 0: input filter
+ * Filter 1: output filter
+ */
+
+ ulong *filter_space[NFILTERS];
+ FILTER_FUNC *filter_func[NFILTERS];
+ ulong filter_cnt[NFILTERS];
+ ulong filter_len[NFILTERS];
+
+ ulong pad[ (512-300) / 4];
+} PORT;
+
+/*
+ * Port[0] is host interface
+ * Port[1..SE_NPORTS] are external 10 Base T ports. Fewer may be in
+ * use, depending on whether this is an SE-4 or
+ * an SE-6.
+ * Port[SE_NPORTS] Pseudo-port for Spanning tree and SNMP
+ */
+extern PORT Port[1+SE_NPORTS+1];
+
+extern int Nports; /* Number of genuine ethernet controllers */
+extern int Nchan; /* ... plus one for host interface */
+
+extern int FirstChan; /* 0 or 1, depedning on whether host is used */
+extern int NumChan; /* 4 or 5 */
+
+/*
+ * A few globals
+ */
+extern int IsPromisc;
+extern int MultiNicMode;
+
+/*
+ * Functions
+ */
+extern void eth_xmit_spew_on(PORT *p, int cnt);
+extern void eth_xmit_spew_off(PORT *p);
+
+extern I596_RBD *alloc_rbds(PORT *p, int num);
+
+extern I596_CB * eth_cb_alloc(PORT *p);
diff --git a/drivers/net/dgrs_firmware.c b/drivers/net/dgrs_firmware.c
new file mode 100644
index 000000000000..1e49e1e1f201
--- /dev/null
+++ b/drivers/net/dgrs_firmware.c
@@ -0,0 +1,9966 @@
+static int dgrs_firmnum = 550;
+static char dgrs_firmver[] = "$Version$";
+static char dgrs_firmdate[] = "11/16/96 03:45:15";
+static unsigned char dgrs_code[] __initdata = {
+ 213,5,192,8,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,64,40,35,41,
+ 101,115,52,104,46,98,105,110,32,32,32,32,
+ 32,32,49,46,48,32,48,48,47,48,48,47,
+ 57,52,0,64,40,35,41,67,111,112,121,114,
+ 105,103,104,116,32,49,57,57,53,44,32,68,
+ 105,103,105,32,73,110,116,101,114,110,97,116,
+ 105,111,110,97,108,46,32,32,65,108,108,32,
+ 82,105,103,104,116,115,32,82,101,115,101,114,
+ 118,101,100,46,0,0,0,0,97,5,192,8,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,255,255,0,16,0,0,0,0,
+ 0,0,0,0,8,0,224,3,0,0,0,0,
+ 148,255,189,39,16,0,161,175,20,0,162,175,
+ 24,0,163,175,28,0,164,175,32,0,165,175,
+ 36,0,166,175,40,0,167,175,44,0,168,175,
+ 48,0,169,175,52,0,170,175,56,0,171,175,
+ 60,0,172,175,64,0,173,175,68,0,174,175,
+ 72,0,175,175,76,0,184,175,80,0,185,175,
+ 88,0,190,175,92,0,191,175,0,112,8,64,
+ 18,72,0,0,16,80,0,0,0,96,11,64,
+ 84,0,168,175,96,0,169,175,100,0,170,175,
+ 104,0,171,175,33,56,0,1,0,131,24,60,
+ 0,1,24,39,0,0,8,143,0,0,0,0,
+ 1,0,8,33,0,0,8,175,0,104,5,64,
+ 0,96,6,64,124,0,168,48,212,255,0,21,
+ 0,0,0,0,36,64,166,0,0,255,8,49,
+ 27,0,0,17,0,0,0,0,130,65,8,0,
+ 2,131,9,60,33,72,40,1,0,220,41,141,
+ 66,64,8,0,2,131,10,60,33,80,72,1,
+ 0,224,74,141,0,0,0,0,38,80,70,1,
+ 1,255,74,49,33,40,192,0,38,48,202,0,
+ 0,96,134,64,66,64,8,0,2,131,4,60,
+ 33,32,136,0,0,226,132,144,9,248,32,1,
+ 0,0,0,0,104,0,166,143,0,0,0,0,
+ 0,96,134,64,0,104,5,64,227,255,0,16,
+ 0,0,0,0,104,0,168,143,96,0,169,143,
+ 100,0,170,143,0,0,0,0,0,96,136,64,
+ 19,0,32,1,17,0,64,1,20,0,162,143,
+ 24,0,163,143,28,0,164,143,32,0,165,143,
+ 36,0,166,143,40,0,167,143,44,0,168,143,
+ 48,0,169,143,52,0,170,143,56,0,171,143,
+ 60,0,172,143,64,0,173,143,68,0,174,143,
+ 72,0,175,143,76,0,184,143,80,0,185,143,
+ 88,0,190,143,92,0,191,143,0,0,0,0,
+ 84,0,186,143,16,0,161,143,108,0,189,39,
+ 8,0,64,3,16,0,0,66,0,96,26,64,
+ 0,0,0,0,255,255,27,60,254,0,123,55,
+ 0,0,0,0,36,208,91,3,0,0,0,0,
+ 0,96,154,64,0,0,0,0,0,112,26,64,
+ 0,0,0,0,16,0,0,66,0,0,0,0,
+ 8,0,64,3,0,0,0,0,255,255,8,36,
+ 133,255,0,17,0,0,0,0,1,0,8,37,
+ 130,255,0,21,0,0,0,0,255,255,8,36,
+ 33,8,0,1,126,255,40,20,0,0,0,0,
+ 1,0,33,36,123,255,32,20,0,0,0,0,
+ 255,255,2,36,120,255,72,20,0,0,0,0,
+ 1,0,66,36,117,255,64,20,0,0,0,0,
+ 255,255,3,36,114,255,104,20,0,0,0,0,
+ 1,0,99,36,111,255,96,20,0,0,0,0,
+ 255,255,4,36,108,255,136,20,0,0,0,0,
+ 1,0,132,36,105,255,128,20,0,0,0,0,
+ 255,255,5,36,102,255,168,20,0,0,0,0,
+ 1,0,165,36,99,255,160,20,0,0,0,0,
+ 255,255,6,36,96,255,200,20,0,0,0,0,
+ 1,0,198,36,93,255,192,20,0,0,0,0,
+ 255,255,7,36,90,255,232,20,0,0,0,0,
+ 1,0,231,36,87,255,224,20,0,0,0,0,
+ 255,255,9,36,84,255,40,21,0,0,0,0,
+ 1,0,41,37,81,255,32,21,0,0,0,0,
+ 255,255,10,36,78,255,72,21,0,0,0,0,
+ 1,0,74,37,75,255,64,21,0,0,0,0,
+ 255,255,11,36,72,255,104,21,0,0,0,0,
+ 1,0,107,37,69,255,96,21,0,0,0,0,
+ 255,255,12,36,66,255,136,21,0,0,0,0,
+ 1,0,140,37,63,255,128,21,0,0,0,0,
+ 255,255,13,36,60,255,168,21,0,0,0,0,
+ 1,0,173,37,57,255,160,21,0,0,0,0,
+ 255,255,14,36,54,255,200,21,0,0,0,0,
+ 1,0,206,37,51,255,192,21,0,0,0,0,
+ 255,255,15,36,48,255,232,21,0,0,0,0,
+ 1,0,239,37,45,255,224,21,0,0,0,0,
+ 255,255,24,36,42,255,8,23,0,0,0,0,
+ 1,0,24,39,39,255,0,23,0,0,0,0,
+ 255,255,16,36,36,255,8,22,0,0,0,0,
+ 1,0,16,38,33,255,0,22,0,0,0,0,
+ 255,255,17,36,30,255,40,22,0,0,0,0,
+ 1,0,49,38,27,255,32,22,0,0,0,0,
+ 255,255,18,36,24,255,72,22,0,0,0,0,
+ 1,0,82,38,21,255,64,22,0,0,0,0,
+ 255,255,19,36,18,255,104,22,0,0,0,0,
+ 1,0,115,38,15,255,96,22,0,0,0,0,
+ 255,255,20,36,12,255,136,22,0,0,0,0,
+ 1,0,148,38,9,255,128,22,0,0,0,0,
+ 255,255,21,36,6,255,168,22,0,0,0,0,
+ 1,0,181,38,3,255,160,22,0,0,0,0,
+ 255,255,22,36,0,255,200,22,0,0,0,0,
+ 1,0,214,38,253,254,192,22,0,0,0,0,
+ 255,255,23,36,250,254,232,22,0,0,0,0,
+ 1,0,247,38,247,254,224,22,0,0,0,0,
+ 255,255,26,36,244,254,72,23,0,0,0,0,
+ 1,0,90,39,241,254,64,23,0,0,0,0,
+ 255,255,27,36,238,254,104,23,0,0,0,0,
+ 1,0,123,39,235,254,96,23,0,0,0,0,
+ 255,255,28,36,232,254,136,23,0,0,0,0,
+ 1,0,156,39,229,254,128,23,0,0,0,0,
+ 255,255,29,36,226,254,168,23,0,0,0,0,
+ 1,0,189,39,223,254,160,23,0,0,0,0,
+ 255,255,30,36,220,254,200,23,0,0,0,0,
+ 1,0,222,39,217,254,192,23,0,0,0,0,
+ 255,255,31,36,214,254,232,23,0,0,0,0,
+ 1,0,255,39,211,254,224,23,0,0,0,0,
+ 0,131,24,60,0,1,24,39,0,32,1,60,
+ 37,192,1,3,0,96,8,64,0,0,0,0,
+ 1,0,1,60,37,64,1,1,0,96,136,64,
+ 33,16,0,0,165,165,3,60,165,165,99,52,
+ 0,128,1,60,0,0,35,172,0,128,9,60,
+ 0,0,41,141,0,0,0,0,0,96,10,64,
+ 0,0,0,0,8,0,1,60,36,80,65,1,
+ 29,0,64,21,0,0,0,0,27,0,105,20,
+ 0,0,0,0,0,1,2,36,0,128,1,60,
+ 33,8,34,0,0,0,32,172,64,16,2,0,
+ 1,0,1,60,1,0,33,52,43,8,65,0,
+ 248,255,32,20,0,0,0,0,255,255,3,36,
+ 0,128,1,60,0,0,35,172,0,1,2,36,
+ 0,128,3,60,33,24,98,0,0,0,99,140,
+ 0,0,0,0,7,0,96,20,0,0,0,0,
+ 64,16,2,0,1,0,1,60,1,0,33,52,
+ 43,8,65,0,245,255,32,20,0,0,0,0,
+ 0,96,128,64,0,0,0,0,84,0,2,175,
+ 0,96,8,64,0,0,0,0,3,0,1,60,
+ 37,64,1,1,0,96,136,64,33,16,0,0,
+ 165,165,3,60,165,165,99,52,0,128,1,60,
+ 0,0,35,172,0,128,9,60,0,0,41,141,
+ 0,0,0,0,0,96,10,64,0,0,0,0,
+ 8,0,1,60,36,80,65,1,29,0,64,21,
+ 0,0,0,0,27,0,105,20,0,0,0,0,
+ 0,1,2,36,0,128,1,60,33,8,34,0,
+ 0,0,32,172,64,16,2,0,1,0,1,60,
+ 1,0,33,52,43,8,65,0,248,255,32,20,
+ 0,0,0,0,255,255,3,36,0,128,1,60,
+ 0,0,35,172,0,1,2,36,0,128,3,60,
+ 33,24,98,0,0,0,99,140,0,0,0,0,
+ 7,0,96,20,0,0,0,0,64,16,2,0,
+ 1,0,1,60,1,0,33,52,43,8,65,0,
+ 245,255,32,20,0,0,0,0,0,96,128,64,
+ 0,0,0,0,88,0,2,175,88,0,9,143,
+ 0,0,0,0,17,0,32,17,0,0,0,0,
+ 0,0,0,0,3,0,2,60,0,0,0,0,
+ 0,96,130,64,0,128,8,60,37,72,40,1,
+ 0,0,0,161,4,0,0,161,8,0,0,161,
+ 12,0,0,161,16,0,0,161,20,0,0,161,
+ 24,0,0,161,32,0,8,37,247,255,9,21,
+ 252,255,0,161,84,0,9,143,0,0,0,0,
+ 17,0,32,17,0,0,0,0,0,0,0,0,
+ 1,0,2,60,0,0,0,0,0,96,130,64,
+ 0,128,8,60,37,72,40,1,0,0,0,161,
+ 4,0,0,161,8,0,0,161,12,0,0,161,
+ 16,0,0,161,20,0,0,161,24,0,0,161,
+ 32,0,8,37,247,255,9,21,252,255,0,161,
+ 32,0,8,60,0,96,136,64,0,104,128,64,
+ 0,131,2,60,152,28,66,36,255,31,9,60,
+ 255,255,41,53,36,16,73,0,0,128,9,60,
+ 37,16,73,0,8,0,64,0,0,0,0,0,
+ 2,131,8,60,224,210,8,37,252,255,1,36,
+ 36,64,1,1,3,131,9,60,124,18,41,37,
+ 252,255,1,36,36,72,33,1,0,0,10,36,
+ 0,0,10,173,254,255,9,21,4,0,8,37,
+ 3,131,8,60,128,18,8,37,252,255,1,36,
+ 36,64,1,1,31,131,9,60,252,255,41,53,
+ 252,255,1,36,36,72,33,1,237,254,10,60,
+ 175,222,74,53,0,0,10,173,254,255,9,21,
+ 4,0,8,37,2,131,8,60,0,212,8,37,
+ 252,255,1,36,36,64,1,1,2,131,9,60,
+ 252,219,41,37,252,255,1,36,36,72,33,1,
+ 173,222,10,60,239,190,74,53,0,0,10,173,
+ 254,255,9,21,4,0,8,37,0,4,8,60,
+ 0,0,0,0,0,24,136,64,0,0,0,0,
+ 2,131,29,60,0,220,189,39,0,0,30,36,
+ 2,131,28,60,51,8,192,12,16,78,156,39,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 232,255,189,39,16,0,191,175,8,128,132,39,
+ 15,63,192,12,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,16,0,191,175,12,128,132,39,
+ 15,63,192,12,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,16,0,191,175,16,128,132,39,
+ 15,63,192,12,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,16,0,191,175,20,128,132,39,
+ 15,63,192,12,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,24,133,131,143,6,0,2,36,
+ 20,0,191,175,6,0,98,20,16,0,176,175,
+ 7,162,3,60,228,0,99,52,1,0,2,36,
+ 184,7,192,8,0,0,98,172,0,128,130,151,
+ 5,162,16,60,0,1,66,52,120,63,192,12,
+ 0,0,2,166,0,128,130,151,0,0,0,0,
+ 255,254,66,48,0,0,2,166,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 232,255,189,39,33,16,128,0,3,0,64,4,
+ 16,0,191,175,254,255,2,60,192,29,66,52,
+ 0,163,4,60,96,1,132,52,0,163,1,60,
+ 92,1,34,172,0,163,1,60,104,1,38,172,
+ 204,63,192,12,8,0,6,36,228,63,192,12,
+ 255,255,4,36,204,7,192,8,0,0,0,0,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,216,255,189,39,1,0,6,36,
+ 3,131,2,60,143,18,66,36,240,255,3,36,
+ 36,16,67,0,0,163,1,60,120,1,34,172,
+ 0,163,2,60,120,1,66,140,33,56,0,0,
+ 32,0,191,175,28,0,177,175,24,0,176,175,
+ 16,0,160,175,0,163,1,60,116,1,34,172,
+ 0,163,3,60,112,1,99,140,0,163,2,60,
+ 116,1,66,140,0,163,4,60,116,1,132,140,
+ 35,136,98,0,84,64,192,12,33,40,32,2,
+ 13,0,64,16,0,0,0,0,1,131,4,60,
+ 96,127,132,36,24,128,144,39,33,40,0,2,
+ 1,131,7,60,128,127,231,36,15,63,192,12,
+ 148,0,6,36,1,0,4,36,33,40,0,2,
+ 188,7,192,12,148,0,6,36,2,0,33,6,
+ 33,16,32,2,3,0,34,38,131,136,2,0,
+ 0,163,2,60,116,1,66,140,0,0,0,0,
+ 6,0,32,18,237,254,3,60,175,222,99,52,
+ 0,0,67,172,255,255,49,38,253,255,32,22,
+ 4,0,66,36,32,0,191,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,40,0,189,39,
+ 224,255,189,39,15,0,132,36,240,255,3,36,
+ 20,0,177,175,0,163,17,60,120,1,49,142,
+ 0,163,2,60,120,1,66,140,36,32,131,0,
+ 33,16,68,0,0,163,1,60,120,1,34,172,
+ 0,163,3,60,120,1,99,140,0,163,2,60,
+ 112,1,66,140,24,0,191,175,43,16,67,0,
+ 13,0,64,16,16,0,176,175,1,131,4,60,
+ 96,127,132,36,24,128,144,39,33,40,0,2,
+ 1,131,7,60,176,127,231,36,15,63,192,12,
+ 171,0,6,36,1,0,4,36,33,40,0,2,
+ 188,7,192,12,171,0,6,36,33,16,32,2,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,216,255,189,39,
+ 3,0,2,60,7,162,3,60,36,0,191,175,
+ 32,0,176,175,0,163,1,60,92,1,32,172,
+ 0,0,99,140,79,17,66,52,32,0,98,20,
+ 87,0,4,60,76,0,8,60,64,75,8,53,
+ 250,2,7,60,128,240,231,52,7,162,6,60,
+ 152,0,198,52,67,73,3,60,67,3,99,52,
+ 7,162,4,60,48,1,132,52,7,162,5,60,
+ 0,1,165,52,6,0,2,36,24,133,130,175,
+ 0,163,1,60,204,5,34,172,4,0,2,36,
+ 50,133,130,167,2,0,2,36,48,133,130,167,
+ 1,0,2,36,20,133,130,167,119,119,2,36,
+ 28,133,136,175,16,133,135,175,0,0,195,172,
+ 0,0,130,172,67,1,2,36,0,0,162,172,
+ 109,8,192,8,31,131,4,60,240,188,132,52,
+ 189,2,3,60,128,231,99,52,4,0,2,36,
+ 24,133,130,175,0,163,1,60,204,5,34,172,
+ 0,8,2,36,50,133,130,167,0,4,2,36,
+ 48,133,130,167,0,2,2,36,20,133,130,167,
+ 28,133,132,175,16,133,131,175,31,131,4,60,
+ 0,240,132,52,24,133,131,143,0,131,2,60,
+ 0,163,1,60,108,1,34,172,0,163,1,60,
+ 112,1,36,172,1,0,99,36,32,133,131,175,
+ 210,7,192,12,0,0,0,0,0,163,2,60,
+ 132,1,66,140,0,128,128,167,2,0,64,20,
+ 85,0,2,36,0,128,130,167,0,163,2,60,
+ 136,1,66,140,0,0,0,0,5,0,64,20,
+ 0,0,0,0,0,128,130,151,0,0,0,0,
+ 170,0,66,52,0,128,130,167,0,128,131,151,
+ 5,162,2,60,0,0,67,164,188,64,192,12,
+ 1,0,16,36,2,131,4,60,0,220,132,36,
+ 2,131,5,60,0,224,165,36,2,131,6,60,
+ 0,226,198,36,8,0,7,36,2,131,2,60,
+ 112,154,66,36,16,0,162,175,2,131,2,60,
+ 144,154,66,36,20,0,162,175,2,131,2,60,
+ 176,154,66,36,24,0,162,175,0,131,2,60,
+ 36,30,66,36,0,163,1,60,92,1,48,172,
+ 240,64,192,12,28,0,162,175,0,163,3,60,
+ 124,1,99,140,40,133,128,175,2,0,98,40,
+ 7,0,64,16,2,0,2,36,18,0,97,4,
+ 255,255,2,36,7,0,98,16,0,0,0,0,
+ 196,8,192,8,0,0,0,0,16,0,98,16,
+ 0,0,0,0,196,8,192,8,0,0,0,0,
+ 24,133,133,143,1,131,4,60,15,63,192,12,
+ 208,127,132,36,0,163,1,60,112,25,192,12,
+ 124,1,32,172,207,8,192,8,0,0,0,0,
+ 211,8,192,12,0,0,0,0,207,8,192,8,
+ 0,0,0,0,40,133,144,175,31,10,192,12,
+ 0,0,0,0,207,8,192,8,0,0,0,0,
+ 1,131,4,60,96,127,132,36,24,128,144,39,
+ 33,40,0,2,32,128,135,39,15,63,192,12,
+ 58,1,6,36,1,0,4,36,33,40,0,2,
+ 188,7,192,12,58,1,6,36,36,0,191,143,
+ 32,0,176,143,8,0,224,3,40,0,189,39,
+ 192,255,189,39,56,0,191,175,52,0,181,175,
+ 48,0,180,175,44,0,179,175,40,0,178,175,
+ 36,0,177,175,180,10,192,12,32,0,176,175,
+ 33,32,0,0,2,0,2,36,0,163,1,60,
+ 244,57,192,12,92,1,34,172,3,0,2,36,
+ 0,163,1,60,0,12,192,12,92,1,34,172,
+ 1,0,4,36,4,0,2,36,0,163,1,60,
+ 34,11,192,12,92,1,34,172,5,0,2,36,
+ 0,163,1,60,92,1,34,172,0,163,19,60,
+ 124,1,115,142,0,163,3,60,160,1,99,140,
+ 1,0,98,46,80,133,130,175,0,128,2,60,
+ 5,0,98,20,0,0,0,0,32,133,130,143,
+ 0,0,0,0,252,8,192,8,255,255,66,36,
+ 32,133,130,143,0,0,0,0,84,133,130,175,
+ 130,11,192,12,0,0,0,0,33,32,64,0,
+ 2,0,5,36,232,3,6,36,0,131,7,60,
+ 196,37,231,36,156,11,192,12,16,0,160,175,
+ 35,35,192,12,0,0,0,0,6,0,2,36,
+ 0,163,1,60,84,35,192,12,92,1,34,172,
+ 7,0,2,36,0,163,1,60,141,47,192,12,
+ 92,1,34,172,8,0,2,36,0,163,1,60,
+ 120,50,192,12,92,1,34,172,9,0,2,36,
+ 0,163,1,60,92,1,34,172,0,163,2,60,
+ 240,5,66,140,0,0,0,0,8,0,64,16,
+ 10,0,2,36,0,163,4,60,240,5,132,140,
+ 13,8,192,12,0,0,0,0,0,163,1,60,
+ 244,5,34,172,10,0,2,36,0,163,1,60,
+ 92,1,34,172,157,15,192,12,1,0,21,36,
+ 2,131,2,60,192,246,66,36,33,160,64,0,
+ 80,133,131,143,11,0,2,36,0,163,1,60,
+ 92,1,34,172,100,0,2,36,0,163,1,60,
+ 92,1,34,172,84,133,130,143,64,26,3,0,
+ 33,136,116,0,64,18,2,0,33,144,84,0,
+ 0,163,2,60,8,1,66,140,0,0,0,0,
+ 1,0,66,36,0,163,1,60,8,1,34,172,
+ 0,163,2,60,8,1,66,140,0,163,2,60,
+ 124,1,66,140,0,0,0,0,14,0,83,16,
+ 0,0,0,0,4,0,64,16,33,152,64,0,
+ 80,133,128,175,76,9,192,8,0,0,0,0,
+ 80,133,149,175,2,131,4,60,163,23,192,12,
+ 192,246,132,36,80,133,130,143,0,0,0,0,
+ 64,18,2,0,33,136,84,0,0,163,2,60,
+ 0,6,66,140,0,0,0,0,3,0,64,24,
+ 33,128,32,2,239,15,192,12,0,0,0,0,
+ 43,16,18,2,11,0,64,16,33,32,0,2,
+ 151,18,192,12,10,0,5,36,27,22,192,12,
+ 33,32,0,2,142,22,192,12,33,32,0,2,
+ 0,2,16,38,43,16,18,2,247,255,64,20,
+ 33,32,0,2,184,11,192,12,0,0,0,0,
+ 54,9,192,8,0,0,0,0,56,0,191,143,
+ 52,0,181,143,48,0,180,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,64,0,189,39,4,128,130,143,
+ 232,255,189,39,20,0,191,175,16,0,176,175,
+ 1,0,67,36,4,128,131,175,255,255,3,36,
+ 4,0,67,20,255,31,4,60,0,163,1,60,
+ 8,1,32,172,255,31,4,60,255,255,132,52,
+ 0,163,16,60,0,163,2,60,8,1,66,140,
+ 208,132,131,143,220,5,16,54,35,16,67,0,
+ 0,163,1,60,16,1,34,172,2,131,2,60,
+ 192,246,66,36,36,16,68,0,0,160,3,60,
+ 37,16,67,0,0,163,3,60,8,1,99,140,
+ 28,0,68,140,0,0,5,142,3,131,2,60,
+ 20,18,66,140,208,132,131,175,36,133,132,175,
+ 18,0,162,16,0,163,4,60,99,59,192,12,
+ 220,5,132,52,255,0,5,60,255,0,165,52,
+ 0,255,6,60,0,0,4,142,0,255,198,52,
+ 0,20,4,0,2,28,4,0,37,16,67,0,
+ 2,26,2,0,36,24,101,0,0,18,2,0,
+ 36,16,70,0,37,24,98,0,176,133,132,175,
+ 184,133,131,175,0,163,16,60,16,6,16,54,
+ 0,0,3,142,3,131,2,60,68,18,66,140,
+ 0,0,0,0,18,0,98,16,0,163,4,60,
+ 119,59,192,12,16,6,132,52,255,0,5,60,
+ 255,0,165,52,0,255,6,60,0,0,4,142,
+ 0,255,198,52,0,20,4,0,2,28,4,0,
+ 37,16,67,0,2,26,2,0,36,24,101,0,
+ 0,18,2,0,36,16,70,0,37,24,98,0,
+ 196,133,132,175,192,133,131,175,0,163,16,60,
+ 224,5,16,54,0,0,3,142,3,131,2,60,
+ 24,18,66,140,0,0,0,0,18,0,98,16,
+ 0,163,4,60,139,59,192,12,224,5,132,52,
+ 255,0,5,60,255,0,165,52,0,255,6,60,
+ 0,0,4,142,0,255,198,52,0,20,4,0,
+ 2,28,4,0,37,16,67,0,2,26,2,0,
+ 36,24,101,0,0,18,2,0,36,16,70,0,
+ 37,24,98,0,188,133,132,175,180,133,131,175,
+ 44,133,131,143,0,163,2,60,144,1,66,140,
+ 0,0,0,0,5,0,98,16,0,0,0,0,
+ 0,163,4,60,144,1,132,140,159,59,192,12,
+ 0,0,0,0,0,163,3,60,140,1,99,140,
+ 3,131,2,60,64,18,66,140,0,0,0,0,
+ 5,0,98,16,0,0,0,0,0,163,4,60,
+ 140,1,132,140,51,60,192,12,0,0,0,0,
+ 44,133,130,143,0,0,0,0,3,0,64,16,
+ 0,0,0,0,116,38,192,12,0,0,0,0,
+ 164,7,192,12,0,0,0,0,36,128,130,143,
+ 0,0,0,0,1,0,66,36,36,128,130,175,
+ 60,0,66,40,8,0,64,20,0,0,0,0,
+ 3,131,2,60,24,18,66,140,36,128,128,175,
+ 3,0,64,16,0,0,0,0,222,48,192,12,
+ 0,0,0,0,0,163,2,60,48,1,66,140,
+ 0,0,0,0,20,0,64,16,0,0,0,0,
+ 0,163,1,60,48,1,32,172,0,163,1,60,
+ 16,1,32,172,0,163,1,60,20,1,32,172,
+ 0,163,1,60,24,1,32,172,0,163,1,60,
+ 28,1,32,172,0,163,1,60,32,1,32,172,
+ 0,163,1,60,36,1,32,172,0,163,1,60,
+ 40,1,32,172,0,163,1,60,201,13,192,12,
+ 44,1,32,172,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,216,255,189,39,
+ 36,0,191,175,32,0,178,175,28,0,177,175,
+ 180,10,192,12,24,0,176,175,33,32,0,0,
+ 2,0,2,36,0,163,1,60,244,57,192,12,
+ 92,1,34,172,3,0,2,36,0,163,1,60,
+ 0,12,192,12,92,1,34,172,1,0,4,36,
+ 4,0,2,36,0,163,1,60,34,11,192,12,
+ 92,1,34,172,32,133,131,143,5,0,2,36,
+ 0,163,1,60,92,1,34,172,80,133,128,175,
+ 84,133,131,175,130,11,192,12,0,0,0,0,
+ 33,32,64,0,2,0,5,36,232,3,6,36,
+ 0,131,7,60,196,37,231,36,156,11,192,12,
+ 16,0,160,175,0,163,2,60,240,5,66,140,
+ 0,0,0,0,8,0,64,16,10,0,2,36,
+ 0,163,4,60,240,5,132,140,13,8,192,12,
+ 0,0,0,0,0,163,1,60,244,5,34,172,
+ 10,0,2,36,0,163,1,60,92,1,34,172,
+ 100,0,2,36,80,133,131,143,2,131,4,60,
+ 192,246,132,36,0,163,1,60,92,1,34,172,
+ 84,133,130,143,64,26,3,0,33,144,100,0,
+ 64,18,2,0,33,136,68,0,0,163,2,60,
+ 8,1,66,140,33,128,64,2,1,0,66,36,
+ 0,163,1,60,8,1,34,172,0,163,2,60,
+ 8,1,66,140,43,16,17,2,11,0,64,16,
+ 33,32,0,2,151,18,192,12,10,0,5,36,
+ 27,22,192,12,33,32,0,2,142,22,192,12,
+ 33,32,0,2,0,2,16,38,43,16,17,2,
+ 247,255,64,20,33,32,0,2,184,11,192,12,
+ 0,0,0,0,91,10,192,8,0,0,0,0,
+ 36,0,191,143,32,0,178,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,40,0,189,39,
+ 4,128,130,143,232,255,189,39,16,0,191,175,
+ 1,0,67,36,4,128,131,175,255,255,3,36,
+ 4,0,67,20,255,31,4,60,0,163,1,60,
+ 8,1,32,172,255,31,4,60,0,163,2,60,
+ 8,1,66,140,212,132,131,143,255,255,132,52,
+ 35,16,67,0,0,163,1,60,16,1,34,172,
+ 2,131,2,60,192,246,66,36,36,16,68,0,
+ 0,160,3,60,37,16,67,0,0,163,3,60,
+ 8,1,99,140,28,0,66,140,212,132,131,175,
+ 36,133,130,175,164,7,192,12,0,0,0,0,
+ 0,163,2,60,48,1,66,140,0,0,0,0,
+ 20,0,64,16,0,0,0,0,0,163,1,60,
+ 48,1,32,172,0,163,1,60,16,1,32,172,
+ 0,163,1,60,20,1,32,172,0,163,1,60,
+ 24,1,32,172,0,163,1,60,28,1,32,172,
+ 0,163,1,60,32,1,32,172,0,163,1,60,
+ 36,1,32,172,0,163,1,60,40,1,32,172,
+ 0,163,1,60,201,13,192,12,44,1,32,172,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,224,255,189,39,24,0,191,175,
+ 20,0,177,175,120,63,192,12,16,0,176,175,
+ 52,0,2,36,4,162,1,60,12,0,34,160,
+ 120,63,192,12,232,3,16,36,28,133,130,143,
+ 0,0,0,0,27,0,80,0,2,0,0,22,
+ 0,0,0,0,13,0,7,0,18,16,0,0,
+ 4,162,17,60,120,63,192,12,0,0,34,162,
+ 28,133,130,143,0,0,0,0,27,0,80,0,
+ 2,0,0,22,0,0,0,0,13,0,7,0,
+ 18,16,0,0,33,40,0,0,33,32,0,0,
+ 6,162,3,60,2,18,2,0,0,0,34,162,
+ 1,0,2,36,0,163,1,60,4,1,32,172,
+ 0,0,98,172,2,131,1,60,33,8,36,0,
+ 8,245,32,172,1,0,165,36,22,0,162,44,
+ 250,255,64,20,20,0,132,36,31,131,4,60,
+ 0,240,132,52,52,128,131,143,1,0,2,36,
+ 68,133,128,175,48,128,130,175,64,133,128,175,
+ 32,131,1,60,252,239,36,172,8,0,96,16,
+ 31,131,5,60,252,239,165,52,31,131,6,60,
+ 1,131,4,60,224,127,132,36,15,63,192,12,
+ 0,240,198,52,52,128,128,175,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,16,0,176,175,
+ 116,0,2,36,20,0,191,175,4,162,1,60,
+ 12,0,34,160,130,63,192,12,33,128,128,0,
+ 4,162,1,60,4,0,48,160,130,63,192,12,
+ 3,130,16,0,4,162,1,60,130,63,192,12,
+ 4,0,48,160,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,224,255,189,39,
+ 64,0,2,36,24,0,191,175,20,0,177,175,
+ 16,0,176,175,4,162,1,60,130,63,192,12,
+ 12,0,34,160,4,162,17,60,4,0,49,146,
+ 0,0,0,0,130,63,192,12,255,0,49,50,
+ 4,162,16,60,4,0,16,146,0,0,0,0,
+ 130,63,192,12,255,0,16,50,0,130,16,0,
+ 37,16,17,2,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 48,128,130,143,232,255,189,39,16,0,176,175,
+ 33,128,128,0,3,0,64,20,20,0,191,175,
+ 180,10,192,12,0,0,0,0,5,0,0,18,
+ 0,0,0,0,236,63,192,12,1,4,4,36,
+ 50,11,192,8,0,0,0,0,228,63,192,12,
+ 0,4,4,36,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,216,255,189,39,
+ 6,162,3,60,1,0,2,36,32,0,191,175,
+ 28,0,177,175,24,0,176,175,0,0,98,172,
+ 0,163,2,60,4,1,66,140,33,136,224,0,
+ 1,0,66,36,0,163,1,60,4,1,34,172,
+ 56,128,130,143,0,163,3,60,4,1,99,140,
+ 1,0,66,36,56,128,130,175,232,3,66,40,
+ 21,0,64,20,255,127,3,60,68,133,130,143,
+ 254,255,99,52,56,128,128,175,1,0,66,36,
+ 43,24,98,0,68,133,130,175,13,0,96,16,
+ 0,0,0,0,2,131,4,60,28,128,132,36,
+ 60,128,144,39,33,40,0,2,2,131,7,60,
+ 60,128,231,36,15,63,192,12,144,0,6,36,
+ 1,0,4,36,33,40,0,2,188,7,192,12,
+ 144,0,6,36,64,133,134,143,0,0,0,0,
+ 14,0,192,24,33,24,0,0,2,131,5,60,
+ 0,245,165,36,33,32,0,0,2,131,2,60,
+ 33,16,68,0,0,245,66,140,20,0,132,36,
+ 1,0,99,36,255,255,66,36,0,0,162,172,
+ 42,16,102,0,247,255,64,20,20,0,165,36,
+ 31,131,4,60,252,239,132,52,31,131,2,60,
+ 0,0,131,140,255,255,66,52,0,0,113,172,
+ 4,0,99,36,43,16,67,0,3,0,64,16,
+ 0,0,0,0,31,131,3,60,0,240,99,52,
+ 0,0,131,172,32,0,191,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,40,0,189,39,
+ 64,133,130,143,232,255,189,39,20,0,191,175,
+ 22,0,66,40,13,0,64,20,16,0,176,175,
+ 2,131,4,60,28,128,132,36,60,128,144,39,
+ 33,40,0,2,2,131,7,60,84,128,231,36,
+ 15,63,192,12,173,0,6,36,1,0,4,36,
+ 33,40,0,2,188,7,192,12,173,0,6,36,
+ 64,133,130,143,0,0,0,0,1,0,67,36,
+ 64,133,131,175,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,128,16,4,0,
+ 33,16,68,0,16,0,163,143,128,16,2,0,
+ 2,131,1,60,33,8,34,0,4,245,38,172,
+ 2,131,1,60,33,8,34,0,12,245,39,172,
+ 2,131,1,60,33,8,34,0,0,245,38,172,
+ 2,131,1,60,33,8,34,0,8,245,37,172,
+ 2,131,1,60,33,8,34,0,16,245,35,172,
+ 8,0,224,3,33,16,0,1,128,16,4,0,
+ 33,16,68,0,128,16,2,0,2,131,1,60,
+ 33,8,34,0,8,0,224,3,8,245,32,172,
+ 64,133,130,143,192,255,189,39,40,0,180,175,
+ 33,160,0,0,56,0,191,175,52,0,183,175,
+ 48,0,182,175,44,0,181,175,36,0,179,175,
+ 32,0,178,175,28,0,177,175,48,0,64,24,
+ 24,0,176,175,1,0,23,36,2,0,22,36,
+ 2,131,16,60,12,245,16,38,4,0,19,38,
+ 244,255,17,38,252,255,18,38,33,168,0,0,
+ 0,0,67,142,0,0,0,0,7,0,119,16,
+ 2,0,98,40,25,0,64,20,0,0,0,0,
+ 9,0,118,16,0,0,0,0,236,11,192,8,
+ 20,0,16,38,0,0,34,142,0,0,0,0,
+ 17,0,64,28,0,0,0,0,230,11,192,8,
+ 0,0,64,174,0,0,34,142,0,0,0,0,
+ 11,0,64,28,0,0,0,0,2,131,2,60,
+ 33,16,85,0,4,245,66,140,0,0,0,0,
+ 0,0,34,174,0,0,100,142,0,0,2,142,
+ 0,0,0,0,9,248,64,0,0,0,0,0,
+ 20,0,16,38,20,0,115,38,20,0,49,38,
+ 20,0,82,38,64,133,130,143,1,0,148,38,
+ 42,16,130,2,218,255,64,20,20,0,181,38,
+ 56,0,191,143,52,0,183,143,48,0,182,143,
+ 44,0,181,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,64,0,189,39,0,0,0,0,
+ 2,131,3,60,192,246,99,36,0,2,2,36,
+ 0,163,1,60,200,5,35,172,0,163,1,60,
+ 208,5,34,172,0,163,2,60,124,1,66,140,
+ 216,255,189,39,16,0,176,175,33,128,0,0,
+ 28,0,179,175,255,255,19,36,24,0,178,175,
+ 21,0,114,36,20,0,177,175,32,0,191,175,
+ 1,0,66,44,80,133,130,175,139,14,192,12,
+ 20,0,113,36,184,24,192,12,0,0,0,0,
+ 27,67,192,12,33,32,0,2,6,0,83,20,
+ 1,0,16,38,2,131,4,60,15,63,192,12,
+ 112,128,132,36,126,12,192,8,1,0,2,36,
+ 0,0,34,162,3,18,2,0,0,0,66,162,
+ 2,0,82,38,3,0,2,42,241,255,64,20,
+ 2,0,49,38,2,131,17,60,212,246,49,38,
+ 33,32,32,2,33,40,0,0,255,127,6,60,
+ 247,24,192,12,255,255,198,52,255,31,3,60,
+ 255,255,99,52,236,255,48,38,36,0,34,38,
+ 36,16,67,0,0,160,3,60,37,16,67,0,
+ 0,32,3,36,236,255,32,174,2,131,1,60,
+ 220,246,32,172,2,131,1,60,204,246,32,172,
+ 2,131,1,60,236,246,34,172,0,0,67,164,
+ 222,21,192,12,33,32,0,2,122,15,192,12,
+ 33,32,0,2,242,21,192,12,33,32,0,2,
+ 32,133,130,143,1,0,16,36,42,16,2,2,
+ 12,0,64,16,255,31,3,60,236,1,49,38,
+ 133,12,192,12,33,32,0,2,242,21,192,12,
+ 33,32,32,2,32,133,130,143,1,0,16,38,
+ 42,16,2,2,248,255,64,20,0,2,49,38,
+ 255,31,3,60,255,255,99,52,2,131,16,60,
+ 192,4,16,38,7,0,2,36,0,0,2,174,
+ 56,0,2,38,36,16,67,0,0,160,3,60,
+ 37,16,67,0,0,32,3,36,2,131,1,60,
+ 220,4,32,172,2,131,1,60,204,4,32,172,
+ 2,131,1,60,236,4,34,172,0,0,67,164,
+ 2,131,2,60,212,246,66,140,2,131,3,60,
+ 216,246,99,132,20,0,2,174,24,0,3,166,
+ 2,131,2,60,217,4,66,144,0,0,0,0,
+ 7,0,66,36,2,131,1,60,217,4,34,160,
+ 112,15,192,12,33,32,0,2,33,32,0,2,
+ 19,15,192,12,32,0,5,36,20,0,16,38,
+ 33,32,0,2,7,0,5,36,255,127,6,60,
+ 247,24,192,12,255,255,198,52,33,16,0,0,
+ 32,0,191,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,200,255,189,39,48,0,180,175,
+ 33,160,128,0,255,31,6,60,255,255,198,52,
+ 64,26,20,0,2,131,2,60,192,246,66,36,
+ 40,0,178,175,33,144,98,0,255,255,132,38,
+ 64,18,4,0,0,162,3,60,33,16,67,0,
+ 52,0,191,175,44,0,179,175,36,0,177,175,
+ 32,0,176,175,4,0,66,174,0,1,66,36,
+ 8,0,66,174,0,16,2,36,4,16,130,0,
+ 12,0,66,174,4,0,2,36,4,16,130,0,
+ 0,160,5,60,16,0,66,174,48,0,66,38,
+ 36,16,70,0,37,16,69,0,36,0,66,174,
+ 64,16,4,0,33,16,68,0,128,16,2,0,
+ 2,131,3,60,240,231,99,36,33,16,67,0,
+ 36,16,70,0,37,16,69,0,40,0,66,174,
+ 56,0,66,38,36,16,70,0,37,16,69,0,
+ 0,0,84,174,44,0,66,174,32,0,64,174,
+ 2,131,2,60,212,246,66,140,2,131,3,60,
+ 216,246,99,132,20,0,66,174,24,0,67,166,
+ 25,0,66,146,0,0,0,0,33,32,84,0,
+ 2,131,2,60,0,227,66,36,36,16,70,0,
+ 37,128,69,0,2,131,2,60,32,227,66,36,
+ 36,16,70,0,25,0,68,162,40,133,131,143,
+ 0,0,0,0,3,0,96,16,37,136,69,0,
+ 255,255,130,36,25,0,66,162,12,0,68,142,
+ 28,0,64,174,228,63,192,12,1,0,132,52,
+ 4,0,68,142,0,0,0,0,76,67,192,12,
+ 33,40,0,0,76,63,192,12,0,0,0,0,
+ 76,63,192,12,0,0,0,0,255,255,2,36,
+ 4,0,2,174,4,0,2,142,0,0,0,0,
+ 0,0,2,174,4,0,68,142,0,0,0,0,
+ 76,67,192,12,1,0,5,54,4,0,4,38,
+ 33,40,0,0,255,255,6,36,211,67,192,12,
+ 208,7,7,36,8,0,64,20,255,255,2,52,
+ 2,131,4,60,184,128,132,36,4,0,6,142,
+ 0,0,0,0,15,63,192,12,33,40,128,2,
+ 255,255,2,52,48,1,34,174,4,0,68,142,
+ 0,0,0,0,76,67,192,12,3,0,37,54,
+ 48,1,36,38,33,40,0,0,255,255,6,52,
+ 211,67,192,12,208,7,7,36,7,0,64,20,
+ 0,0,0,0,2,131,4,60,8,129,132,36,
+ 48,1,38,142,0,0,0,0,15,63,192,12,
+ 33,40,128,2,143,63,192,12,0,0,0,0,
+ 40,0,69,142,4,0,68,142,0,0,0,0,
+ 76,67,192,12,2,0,165,52,44,0,81,142,
+ 84,128,131,143,80,128,132,143,100,0,2,36,
+ 0,0,32,166,2,0,32,166,4,0,32,174,
+ 8,0,32,174,12,0,32,174,16,0,32,174,
+ 24,0,32,174,20,0,32,174,28,0,32,174,
+ 32,0,32,174,36,0,34,166,38,0,34,166,
+ 36,0,35,166,38,0,36,166,36,0,83,142,
+ 1,0,2,36,0,0,98,174,44,0,66,142,
+ 0,0,0,0,4,0,98,174,40,0,67,142,
+ 116,0,2,60,0,0,98,172,40,0,67,142,
+ 36,0,66,142,0,0,0,0,8,0,98,172,
+ 8,0,66,142,0,0,0,0,0,0,64,172,
+ 0,0,98,142,0,0,0,0,10,0,64,16,
+ 33,128,0,0,208,7,2,42,7,0,64,16,
+ 0,0,0,0,143,63,192,12,0,0,0,0,
+ 0,0,98,142,0,0,0,0,248,255,64,20,
+ 1,0,16,38,0,0,98,142,0,0,0,0,
+ 6,0,64,16,33,32,32,2,2,131,4,60,
+ 76,129,132,36,15,63,192,12,33,40,128,2,
+ 33,32,32,2,8,0,5,36,0,0,34,150,
+ 8,0,6,36,0,240,66,48,0,6,66,52,
+ 2,0,34,166,8,0,66,142,208,7,7,36,
+ 129,67,192,12,0,0,64,172,6,0,64,20,
+ 2,0,36,38,2,131,4,60,160,129,132,36,
+ 15,63,192,12,33,40,128,2,2,0,36,38,
+ 33,40,0,0,0,0,34,150,33,48,0,0,
+ 0,240,66,48,2,0,34,166,8,0,66,142,
+ 208,7,7,36,129,67,192,12,0,0,64,172,
+ 4,0,64,20,0,0,0,0,2,131,4,60,
+ 15,63,192,12,248,129,132,36,143,63,192,12,
+ 0,0,0,0,108,0,80,142,0,128,2,52,
+ 0,0,0,166,2,0,2,166,44,0,66,142,
+ 0,32,5,36,4,0,80,172,44,0,67,142,
+ 0,241,2,52,2,0,98,164,8,0,66,142,
+ 0,32,6,36,0,0,64,172,44,0,68,142,
+ 0,0,0,0,129,67,192,12,208,7,7,36,
+ 12,0,64,20,0,0,0,0,44,0,66,142,
+ 0,0,0,0,0,0,69,148,2,131,4,60,
+ 15,63,192,12,16,130,132,36,254,255,4,36,
+ 2,131,5,60,44,130,165,36,188,7,192,12,
+ 1,1,6,36,108,0,80,142,2,128,2,52,
+ 0,0,0,166,2,0,2,166,14,0,2,36,
+ 8,0,2,162,200,0,2,36,9,0,2,162,
+ 65,0,2,36,10,0,2,162,46,0,2,36,
+ 11,0,2,162,87,0,2,36,12,0,0,162,
+ 13,0,2,162,242,0,2,36,14,0,0,162,
+ 15,0,2,162,1,0,2,36,16,0,2,162,
+ 8,0,2,36,17,0,2,162,88,128,130,143,
+ 0,0,0,0,6,0,64,16,64,0,2,36,
+ 2,131,4,60,15,63,192,12,56,130,132,36,
+ 88,128,128,175,64,0,2,36,18,0,2,162,
+ 255,0,2,36,19,0,2,162,63,0,2,36,
+ 20,0,0,162,21,0,2,162,44,0,66,142,
+ 0,32,5,36,4,0,80,172,44,0,67,142,
+ 0,33,2,36,2,0,98,164,8,0,66,142,
+ 0,32,6,36,0,0,64,172,44,0,68,142,
+ 0,0,0,0,129,67,192,12,208,7,7,36,
+ 12,0,64,20,0,0,0,0,44,0,66,142,
+ 0,0,0,0,0,0,69,148,2,131,4,60,
+ 15,63,192,12,16,130,132,36,253,255,4,36,
+ 2,131,5,60,44,130,165,36,188,7,192,12,
+ 85,1,6,36,222,21,192,12,33,32,64,2,
+ 122,15,192,12,33,32,64,2,52,0,191,143,
+ 48,0,180,143,44,0,179,143,40,0,178,143,
+ 36,0,177,143,32,0,176,143,8,0,224,3,
+ 56,0,189,39,248,255,189,39,32,133,133,143,
+ 0,0,0,0,50,0,160,24,33,32,0,0,
+ 2,131,3,60,192,246,99,36,44,0,98,140,
+ 152,0,96,172,156,0,96,172,160,0,96,172,
+ 164,0,96,172,168,0,96,172,172,0,96,172,
+ 176,0,96,172,180,0,96,172,184,0,96,172,
+ 188,0,96,172,192,0,96,172,196,0,96,172,
+ 200,0,96,172,204,0,96,172,208,0,96,172,
+ 212,0,96,172,216,0,96,172,224,0,96,172,
+ 232,0,96,172,236,0,96,172,240,0,96,172,
+ 244,0,96,172,248,0,96,172,252,0,96,172,
+ 0,1,96,172,4,1,96,172,8,1,96,172,
+ 12,0,64,172,44,0,98,140,0,0,0,0,
+ 16,0,64,172,44,0,98,140,0,0,0,0,
+ 24,0,64,172,44,0,98,140,0,0,0,0,
+ 20,0,64,172,44,0,98,140,1,0,132,36,
+ 28,0,64,172,44,0,98,140,0,2,99,36,
+ 32,0,64,172,42,16,133,0,210,255,64,20,
+ 0,0,0,0,33,32,0,0,0,163,3,60,
+ 0,1,99,52,32,0,5,36,33,16,131,0,
+ 188,0,69,160,1,0,132,36,0,2,130,44,
+ 251,255,64,20,0,0,0,0,8,0,224,3,
+ 8,0,189,39,0,0,0,0,124,133,130,143,
+ 232,255,189,39,20,0,191,175,17,0,64,20,
+ 16,0,176,175,208,7,16,36,7,0,0,26,
+ 0,0,0,0,143,63,192,12,255,255,16,38,
+ 124,133,130,143,0,0,0,0,249,255,64,16,
+ 0,0,0,0,6,0,0,22,0,0,0,0,
+ 2,131,4,60,15,63,192,12,80,130,132,36,
+ 45,14,192,8,33,16,0,0,220,63,192,12,
+ 33,32,0,0,33,32,64,0,124,133,144,143,
+ 128,133,130,143,4,0,3,142,255,255,66,36,
+ 128,133,130,175,124,133,131,175,220,63,192,12,
+ 0,0,0,0,33,16,0,2,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 232,255,189,39,96,133,130,143,33,40,128,0,
+ 43,16,162,0,6,0,64,20,16,0,191,175,
+ 100,133,130,143,0,0,0,0,43,16,162,0,
+ 6,0,64,20,0,0,0,0,2,131,4,60,
+ 15,63,192,12,116,130,132,36,71,14,192,8,
+ 0,0,0,0,124,133,131,143,128,133,130,143,
+ 124,133,133,175,1,0,66,36,4,0,163,172,
+ 128,133,130,175,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,108,133,130,143,
+ 232,255,189,39,20,0,191,175,17,0,64,20,
+ 16,0,176,175,208,7,16,36,7,0,0,26,
+ 0,0,0,0,143,63,192,12,255,255,16,38,
+ 108,133,130,143,0,0,0,0,249,255,64,16,
+ 0,0,0,0,6,0,0,22,0,0,0,0,
+ 2,131,4,60,15,63,192,12,148,130,132,36,
+ 108,14,192,8,33,16,0,0,220,63,192,12,
+ 33,32,0,0,33,32,64,0,108,133,144,143,
+ 120,133,130,143,0,0,3,142,255,255,66,36,
+ 120,133,130,175,108,133,131,175,220,63,192,12,
+ 0,0,0,0,33,16,0,2,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 232,255,189,39,104,133,130,143,33,40,128,0,
+ 43,16,162,0,6,0,64,20,16,0,191,175,
+ 112,133,130,143,0,0,0,0,43,16,162,0,
+ 6,0,64,20,0,0,0,0,2,131,4,60,
+ 15,63,192,12,184,130,132,36,135,14,192,8,
+ 0,0,0,0,108,133,130,143,0,0,0,0,
+ 0,0,162,172,120,133,130,143,108,133,133,175,
+ 1,0,66,36,120,133,130,175,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,20,0,191,175,16,0,176,175,
+ 124,133,128,175,13,8,192,12,0,32,4,36,
+ 255,31,3,60,255,255,99,52,255,1,16,36,
+ 36,16,67,0,0,160,3,60,37,16,67,0,
+ 96,133,130,175,0,32,66,36,100,133,130,175,
+ 0,17,16,0,96,133,132,143,255,255,16,38,
+ 49,14,192,12,33,32,130,0,251,255,1,6,
+ 0,17,16,0,0,2,2,36,132,133,130,175,
+ 108,133,128,175,13,8,192,12,18,0,4,60,
+ 255,31,3,60,255,255,99,52,255,17,16,36,
+ 36,16,67,0,0,160,3,60,37,16,67,0,
+ 18,0,3,60,104,133,130,175,33,16,67,0,
+ 112,133,130,175,0,18,16,0,104,133,132,143,
+ 255,255,16,38,112,14,192,12,33,32,130,0,
+ 251,255,1,6,0,18,16,0,0,18,2,36,
+ 116,133,130,175,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,163,2,60,
+ 168,1,66,140,216,255,189,39,28,0,177,175,
+ 33,136,128,0,32,0,178,175,33,144,160,0,
+ 36,0,191,175,17,0,64,16,24,0,176,175,
+ 0,163,2,60,168,1,66,140,0,0,0,0,
+ 42,16,82,0,12,0,64,16,128,128,18,0,
+ 0,0,34,142,0,163,18,60,168,1,82,142,
+ 0,0,0,0,6,0,64,20,128,128,18,0,
+ 2,131,4,60,224,130,132,36,15,63,192,12,
+ 33,40,64,2,128,128,18,0,33,128,18,2,
+ 128,128,16,0,13,8,192,12,33,32,0,2,
+ 255,31,3,60,255,255,99,52,33,32,0,0,
+ 36,16,67,0,0,160,3,60,37,16,67,0,
+ 112,0,34,174,112,0,35,142,33,16,80,0,
+ 15,0,64,26,116,0,34,174,8,0,5,36,
+ 1,0,132,36,20,0,98,36,4,0,98,172,
+ 2,0,101,164,0,0,96,164,8,0,96,172,
+ 14,0,96,164,12,0,96,164,33,24,64,0,
+ 42,16,146,0,246,255,64,20,1,0,132,36,
+ 255,255,132,36,116,0,35,142,112,0,34,142,
+ 0,0,0,0,240,255,98,172,116,0,35,142,
+ 0,0,0,0,218,255,98,148,0,0,0,0,
+ 0,128,66,52,218,255,98,164,116,0,35,142,
+ 0,0,0,0,238,255,98,148,0,0,0,0,
+ 0,128,66,52,238,255,98,164,116,0,34,142,
+ 112,0,35,142,216,255,66,36,120,0,35,174,
+ 124,0,34,174,36,0,191,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 40,0,189,39,200,255,189,39,32,0,178,175,
+ 33,144,128,0,0,1,2,36,48,0,191,175,
+ 44,0,181,175,40,0,180,175,36,0,179,175,
+ 28,0,177,175,24,0,176,175,144,0,66,174,
+ 0,163,2,60,172,1,66,140,0,0,0,0,
+ 17,0,64,16,33,160,160,0,0,163,2,60,
+ 172,1,66,140,0,0,0,0,42,16,84,0,
+ 12,0,64,16,128,128,20,0,0,0,66,142,
+ 0,163,20,60,172,1,148,142,0,0,0,0,
+ 6,0,64,20,128,128,20,0,2,131,4,60,
+ 236,130,132,36,15,63,192,12,33,40,128,2,
+ 128,128,20,0,33,128,20,2,128,128,16,0,
+ 33,32,0,2,13,8,192,12,148,0,84,174,
+ 255,31,3,60,255,255,99,52,33,152,0,0,
+ 36,16,67,0,0,160,3,60,37,16,67,0,
+ 128,0,66,174,128,0,81,142,33,16,80,0,
+ 15,0,128,26,132,0,66,174,0,1,21,36,
+ 20,0,48,38,4,0,48,174,75,14,192,12,
+ 0,0,32,174,8,0,34,174,12,0,53,174,
+ 0,0,66,142,1,0,115,38,16,0,34,162,
+ 17,0,32,162,42,16,116,2,244,255,64,20,
+ 33,136,0,2,132,0,67,142,128,0,66,142,
+ 0,0,0,0,240,255,98,172,132,0,67,142,
+ 0,0,0,0,228,255,98,140,0,0,0,0,
+ 0,128,66,52,228,255,98,172,132,0,67,142,
+ 0,0,0,0,248,255,98,140,0,0,0,0,
+ 0,128,66,52,248,255,98,172,132,0,66,142,
+ 128,0,67,142,216,255,66,36,136,0,67,174,
+ 140,0,66,174,48,0,191,143,44,0,181,143,
+ 40,0,180,143,36,0,179,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 56,0,189,39,152,0,128,172,156,0,128,172,
+ 160,0,128,172,164,0,128,172,168,0,128,172,
+ 252,0,128,172,0,1,128,172,152,0,128,172,
+ 8,0,224,3,216,0,128,172,232,255,189,39,
+ 16,0,176,175,20,0,191,175,112,15,192,12,
+ 33,128,128,0,33,32,0,2,192,14,192,12,
+ 0,4,5,36,33,32,0,2,19,15,192,12,
+ 128,2,5,36,120,0,3,142,136,0,2,142,
+ 0,0,0,0,8,0,98,172,44,0,3,142,
+ 120,0,2,142,0,0,0,0,8,0,98,172,
+ 0,0,2,142,0,0,0,0,255,255,66,36,
+ 6,0,66,44,7,0,64,16,16,0,3,36,
+ 44,0,2,142,0,0,0,0,2,0,67,164,
+ 8,0,2,142,0,0,0,0,0,0,64,172,
+ 20,0,191,143,16,0,176,143,8,0,224,3,
+ 24,0,189,39,184,255,189,39,0,32,6,36,
+ 68,0,191,175,64,0,190,175,60,0,183,175,
+ 56,0,182,175,52,0,181,175,48,0,180,175,
+ 44,0,179,175,40,0,178,175,36,0,177,175,
+ 32,0,176,175,0,163,1,60,252,5,38,172,
+ 13,8,192,12,0,32,4,36,255,31,4,60,
+ 255,255,132,52,33,168,0,0,255,31,6,60,
+ 255,255,198,52,2,131,3,60,212,247,99,36,
+ 16,0,101,36,8,0,126,36,248,255,119,36,
+ 33,176,96,0,36,16,68,0,0,160,3,60,
+ 37,16,67,0,16,0,166,175,0,163,1,60,
+ 248,5,34,172,0,163,1,60,0,6,32,172,
+ 33,160,0,0,33,128,224,2,33,152,160,0,
+ 33,144,192,3,33,136,192,2,32,133,130,143,
+ 0,0,32,174,0,0,0,174,0,0,64,174,
+ 42,16,162,2,10,0,64,16,0,0,96,174,
+ 0,32,4,36,13,8,192,12,24,0,165,175,
+ 16,0,166,143,0,128,3,60,36,16,70,0,
+ 37,16,67,0,0,0,2,174,24,0,165,143,
+ 4,0,16,38,4,0,115,38,4,0,82,38,
+ 1,0,148,38,2,0,130,42,234,255,64,20,
+ 4,0,49,38,0,2,165,36,0,2,222,39,
+ 0,2,247,38,1,0,181,38,7,0,162,42,
+ 222,255,64,20,0,2,214,38,68,0,191,143,
+ 64,0,190,143,60,0,183,143,56,0,182,143,
+ 52,0,181,143,48,0,180,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,72,0,189,39,0,163,4,60,
+ 0,6,132,140,0,163,3,60,8,6,99,140,
+ 32,133,130,143,224,255,189,39,16,0,176,175,
+ 0,163,16,60,12,6,16,142,20,0,177,175,
+ 0,163,17,60,4,6,49,142,43,16,98,0,
+ 42,0,64,16,24,0,191,175,2,0,2,46,
+ 40,0,64,16,255,255,2,36,0,163,2,60,
+ 252,5,66,140,0,0,0,0,43,16,81,0,
+ 34,0,64,20,255,255,2,36,64,18,3,0,
+ 2,131,3,60,192,246,99,36,33,24,67,0,
+ 1,0,2,36,5,0,130,16,2,0,2,36,
+ 18,0,130,16,128,16,16,0,36,16,192,8,
+ 0,0,0,0,128,128,16,0,33,128,3,2,
+ 12,1,4,142,0,163,5,60,248,5,165,140,
+ 33,48,32,2,80,68,192,12,36,1,17,174,
+ 12,1,4,142,12,1,2,142,33,40,32,2,
+ 114,68,192,12,20,1,2,174,36,16,192,8,
+ 0,0,0,0,33,16,67,0,20,1,64,172,
+ 36,1,64,172,0,163,1,60,42,16,192,8,
+ 0,6,32,172,255,255,2,36,0,163,1,60,
+ 0,6,34,172,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 176,133,136,143,188,133,137,143,232,255,189,39,
+ 3,0,0,21,16,0,191,175,124,0,32,17,
+ 0,0,0,0,12,0,194,148,0,0,0,0,
+ 0,26,2,0,2,18,2,0,37,56,98,0,
+ 255,255,227,48,221,5,98,44,36,0,64,20,
+ 170,170,2,52,0,8,2,36,23,0,98,20,
+ 6,8,2,36,21,0,0,17,0,0,0,0,
+ 32,0,194,148,30,0,195,148,0,20,2,0,
+ 37,56,67,0,36,0,195,148,0,161,2,52,
+ 5,0,98,16,8,0,2,36,34,0,195,148,
+ 0,0,0,0,98,0,98,20,0,0,0,0,
+ 3,0,232,16,255,255,2,36,94,0,226,20,
+ 0,0,0,0,226,46,192,12,14,0,6,36,
+ 177,16,192,8,0,0,0,0,7,0,98,20,
+ 255,255,227,48,71,0,0,17,55,129,2,52,
+ 108,43,192,12,14,0,6,36,177,16,192,8,
+ 0,0,0,0,162,16,192,8,55,129,2,52,
+ 14,0,195,148,0,0,0,0,61,0,98,20,
+ 255,255,2,52,16,0,195,144,3,0,2,36,
+ 55,0,98,20,255,255,2,52,20,0,194,148,
+ 0,0,0,0,0,26,2,0,2,18,2,0,
+ 37,56,98,0,255,255,227,48,0,8,2,36,
+ 23,0,98,20,6,8,2,36,21,0,0,17,
+ 0,0,0,0,40,0,194,148,38,0,195,148,
+ 0,20,2,0,37,56,67,0,44,0,195,148,
+ 0,161,2,52,5,0,98,16,8,0,2,36,
+ 42,0,195,148,0,0,0,0,49,0,98,20,
+ 0,0,0,0,3,0,232,16,255,255,2,36,
+ 45,0,226,20,0,0,0,0,226,46,192,12,
+ 22,0,6,36,177,16,192,8,0,0,0,0,
+ 7,0,98,20,255,255,227,48,6,0,0,17,
+ 55,129,2,52,108,43,192,12,22,0,6,36,
+ 177,16,192,8,0,0,0,0,55,129,2,52,
+ 30,0,98,20,0,0,0,0,28,0,32,17,
+ 144,15,3,36,38,0,194,148,28,0,198,140,
+ 24,0,67,20,0,0,0,0,3,0,201,16,
+ 0,0,0,0,20,0,192,20,0,0,0,0,
+ 175,16,192,8,22,0,6,36,14,0,195,148,
+ 0,0,0,0,14,0,98,20,0,0,0,0,
+ 12,0,32,17,144,15,3,36,30,0,194,148,
+ 20,0,198,140,8,0,67,20,0,0,0,0,
+ 3,0,201,16,0,0,0,0,4,0,192,20,
+ 0,0,0,0,14,0,6,36,126,49,192,12,
+ 0,0,0,0,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,128,255,189,39,
+ 116,0,183,175,33,184,128,0,3,0,3,36,
+ 124,0,191,175,120,0,190,175,112,0,182,175,
+ 108,0,181,175,104,0,180,175,100,0,179,175,
+ 96,0,178,175,92,0,177,175,88,0,176,175,
+ 0,0,245,142,8,0,178,140,192,17,21,0,
+ 3,131,4,60,33,32,130,0,20,13,132,140,
+ 8,0,84,142,0,0,0,0,59,0,131,16,
+ 5,0,130,44,57,0,64,16,128,16,4,0,
+ 2,131,1,60,33,8,34,0,104,131,34,140,
+ 0,0,0,0,8,0,64,0,0,0,0,0,
+ 44,133,130,143,0,0,0,0,48,0,64,16,
+ 6,0,132,38,4,0,131,150,2,131,2,60,
+ 68,207,66,148,0,0,0,0,6,0,98,20,
+ 33,32,0,0,0,0,130,142,48,129,131,143,
+ 0,0,0,0,38,16,67,0,1,0,68,44,
+ 72,1,128,16,33,32,160,2,114,42,192,12,
+ 33,40,128,2,45,18,192,8,33,32,64,2,
+ 44,133,130,143,0,0,0,0,27,0,64,16,
+ 6,0,132,38,4,0,131,150,2,131,2,60,
+ 68,207,66,148,0,0,0,0,6,0,98,20,
+ 33,32,0,0,0,0,130,142,48,129,131,143,
+ 0,0,0,0,38,16,67,0,1,0,68,44,
+ 5,0,128,16,33,32,160,2,114,42,192,12,
+ 33,40,128,2,45,18,192,8,33,32,64,2,
+ 6,0,132,38,0,163,6,60,140,1,198,140,
+ 0,0,0,0,247,24,192,12,33,40,160,2,
+ 45,18,192,8,33,32,64,2,6,0,132,38,
+ 0,163,6,60,140,1,198,140,0,0,0,0,
+ 247,24,192,12,33,40,160,2,203,24,192,12,
+ 33,32,128,2,20,1,227,142,0,0,0,0,
+ 14,0,96,16,33,240,64,0,33,32,128,2,
+ 16,0,166,39,18,0,69,150,0,0,0,0,
+ 9,248,96,0,33,56,192,3,6,0,64,16,
+ 33,32,64,2,28,1,226,142,0,0,0,0,
+ 1,0,66,36,45,18,192,8,28,1,226,174,
+ 132,0,193,7,7,0,2,36,4,0,131,150,
+ 2,131,2,60,68,207,66,148,0,0,0,0,
+ 6,0,98,20,33,32,0,0,0,0,130,142,
+ 48,129,131,143,0,0,0,0,38,16,67,0,
+ 1,0,68,44,9,0,128,16,255,255,2,36,
+ 44,133,130,143,0,0,0,0,251,0,64,16,
+ 33,32,160,2,114,42,192,12,33,40,128,2,
+ 45,18,192,8,33,32,64,2,10,0,194,23,
+ 0,0,0,0,8,0,160,18,0,0,0,0,
+ 36,133,130,143,0,0,0,0,8,0,64,16,
+ 1,0,19,36,80,133,147,143,69,17,192,8,
+ 0,0,0,0,0,1,226,142,80,133,147,143,
+ 1,0,66,36,0,1,226,174,84,133,130,143,
+ 0,0,0,0,35,16,83,0,255,255,66,36,
+ 17,0,66,162,84,133,130,143,33,128,96,2,
+ 42,16,2,2,15,0,64,16,64,18,16,0,
+ 2,131,3,60,192,246,99,36,33,136,67,0,
+ 5,0,21,18,0,0,0,0,247,22,192,12,
+ 33,32,32,2,217,0,64,16,33,16,0,0,
+ 84,133,130,143,1,0,16,38,42,16,2,2,
+ 246,255,64,20,0,2,49,38,84,133,130,143,
+ 33,128,96,2,42,16,2,2,55,0,64,16,
+ 64,18,16,0,2,131,3,60,192,246,99,36,
+ 33,152,67,0,33,136,64,0,192,177,16,0,
+ 41,0,21,18,0,0,0,0,2,131,2,60,
+ 33,16,81,0,216,247,66,140,0,0,0,0,
+ 15,0,64,16,33,32,128,2,16,0,166,39,
+ 18,0,69,150,0,0,0,0,9,248,64,0,
+ 33,56,160,2,8,0,64,16,0,0,0,0,
+ 2,131,2,60,33,16,81,0,224,247,66,140,
+ 0,0,0,0,1,0,66,36,140,17,192,8,
+ 32,1,98,174,44,133,130,143,0,0,0,0,
+ 7,0,64,16,3,0,8,36,3,131,2,60,
+ 33,16,86,0,20,13,66,140,0,0,0,0,
+ 6,0,72,20,0,0,0,0,33,32,96,2,
+ 6,23,192,12,33,40,64,2,146,17,192,8,
+ 0,2,115,38,17,0,66,146,0,0,0,0,
+ 255,255,66,36,17,0,66,162,17,0,66,146,
+ 0,2,115,38,0,2,49,38,84,133,130,143,
+ 1,0,16,38,42,16,2,2,208,255,64,20,
+ 128,0,214,38,254,255,2,36,4,0,194,23,
+ 33,32,224,2,33,40,64,2,47,16,192,12,
+ 33,48,128,2,17,0,66,146,0,0,0,0,
+ 140,0,64,16,33,32,64,2,36,18,192,8,
+ 0,0,0,0,26,0,194,23,0,0,0,0,
+ 36,133,130,143,0,0,0,0,11,0,64,16,
+ 33,32,224,2,9,0,160,18,1,0,2,36,
+ 17,0,66,162,2,131,4,60,192,246,132,36,
+ 6,23,192,12,33,40,64,2,126,0,64,16,
+ 33,16,0,0,33,32,224,2,33,40,64,2,
+ 47,16,192,12,33,48,128,2,36,133,130,143,
+ 0,0,0,0,115,0,64,16,33,32,64,2,
+ 116,0,160,22,1,0,2,36,45,18,192,8,
+ 0,0,0,0,87,0,213,19,64,130,30,0,
+ 2,131,2,60,33,16,80,0,216,247,66,140,
+ 0,0,0,0,18,0,64,16,33,32,128,2,
+ 16,0,166,39,18,0,69,150,0,0,0,0,
+ 9,248,64,0,33,56,160,2,11,0,64,16,
+ 33,32,64,2,2,131,2,60,33,16,80,0,
+ 224,247,66,140,0,0,0,0,1,0,66,36,
+ 2,131,1,60,33,8,48,0,224,247,34,172,
+ 45,18,192,8,17,0,128,160,36,133,130,143,
+ 0,0,0,0,43,0,64,16,0,0,0,0,
+ 41,0,192,19,0,0,0,0,39,0,160,18,
+ 64,18,30,0,2,131,16,60,192,246,16,38,
+ 33,136,80,0,247,22,192,12,33,32,32,2,
+ 74,0,64,16,33,16,0,0,247,22,192,12,
+ 33,32,0,2,63,0,64,16,2,0,2,36,
+ 17,0,66,162,44,133,130,143,0,0,0,0,
+ 7,0,64,16,192,17,30,0,3,131,3,60,
+ 33,24,98,0,20,13,99,140,3,0,2,36,
+ 6,0,98,20,0,0,0,0,33,32,32,2,
+ 6,23,192,12,33,40,64,2,0,18,192,8,
+ 0,0,0,0,17,0,66,146,0,0,0,0,
+ 255,255,66,36,17,0,66,162,17,0,66,146,
+ 2,131,4,60,192,246,132,36,6,23,192,12,
+ 33,40,64,2,36,18,192,8,0,0,0,0,
+ 44,133,130,143,0,0,0,0,7,0,64,16,
+ 192,17,30,0,3,131,3,60,33,24,98,0,
+ 20,13,99,140,3,0,2,36,28,0,98,20,
+ 0,0,0,0,1,0,2,36,17,0,66,162,
+ 64,18,30,0,2,131,4,60,192,246,132,36,
+ 32,18,192,8,33,32,68,0,36,133,130,143,
+ 0,0,0,0,17,0,64,16,0,0,0,0,
+ 15,0,192,19,1,0,2,36,17,0,66,162,
+ 2,131,4,60,192,246,132,36,6,23,192,12,
+ 33,40,64,2,13,0,64,16,33,16,0,0,
+ 252,0,226,142,0,0,0,0,1,0,66,36,
+ 47,18,192,8,252,0,226,174,48,18,192,8,
+ 33,16,0,0,17,0,64,162,33,32,64,2,
+ 152,21,192,12,0,0,0,0,1,0,2,36,
+ 124,0,191,143,120,0,190,143,116,0,183,143,
+ 112,0,182,143,108,0,181,143,104,0,180,143,
+ 100,0,179,143,96,0,178,143,92,0,177,143,
+ 88,0,176,143,8,0,224,3,128,0,189,39,
+ 216,255,189,39,24,0,178,175,33,144,128,0,
+ 32,0,191,175,28,0,179,175,20,0,177,175,
+ 16,0,176,175,8,0,177,140,0,0,66,142,
+ 8,0,38,142,36,0,64,16,0,0,0,0,
+ 28,0,66,142,0,0,0,0,18,0,64,20,
+ 1,0,2,36,0,0,194,144,0,0,0,0,
+ 1,0,66,48,13,0,64,20,1,0,2,36,
+ 4,0,195,148,24,0,66,150,0,0,0,0,
+ 6,0,98,20,33,32,0,0,0,0,194,140,
+ 20,0,67,142,0,0,0,0,38,16,67,0,
+ 1,0,68,44,10,0,128,16,1,0,2,36,
+ 17,0,34,162,2,131,4,60,192,246,132,36,
+ 6,23,192,12,33,40,32,2,45,0,64,16,
+ 33,16,0,0,139,18,192,8,0,0,0,0,
+ 17,0,32,162,152,21,192,12,33,32,32,2,
+ 144,18,192,8,1,0,2,36,16,0,179,140,
+ 0,0,0,0,6,0,96,26,0,0,0,0,
+ 32,133,130,143,0,0,0,0,42,16,98,2,
+ 15,0,64,20,1,0,2,36,2,131,4,60,
+ 248,130,132,36,2,131,16,60,24,131,16,38,
+ 33,40,0,2,2,131,7,60,36,131,231,36,
+ 15,63,192,12,188,2,6,36,1,0,4,36,
+ 33,40,0,2,188,7,192,12,188,2,6,36,
+ 1,0,2,36,17,0,34,162,64,18,19,0,
+ 2,131,4,60,192,246,132,36,33,32,68,0,
+ 6,23,192,12,33,40,32,2,6,0,64,16,
+ 33,16,0,0,252,0,66,142,0,0,0,0,
+ 1,0,66,36,252,0,66,174,1,0,2,36,
+ 32,0,191,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,72,255,189,39,164,0,181,175,
+ 33,168,128,0,180,0,191,175,176,0,190,175,
+ 172,0,183,175,168,0,182,175,160,0,180,175,
+ 156,0,179,175,152,0,178,175,148,0,177,175,
+ 144,0,176,175,88,0,165,175,120,0,160,175,
+ 120,0,168,142,0,0,0,0,96,0,168,175,
+ 124,0,169,142,0,0,0,0,15,2,160,24,
+ 104,0,169,175,96,0,168,143,0,0,0,0,
+ 0,0,4,149,0,0,0,0,0,128,130,48,
+ 9,2,64,16,0,0,0,0,128,0,160,175,
+ 8,0,2,141,136,0,169,142,255,255,8,36,
+ 18,0,72,16,112,0,169,175,112,0,169,143,
+ 0,0,0,0,0,0,35,141,4,0,40,141,
+ 128,0,169,143,255,63,98,48,33,72,34,1,
+ 0,128,99,48,112,0,168,175,246,255,96,16,
+ 128,0,169,175,96,0,168,143,0,0,0,0,
+ 8,0,2,141,128,0,169,151,0,0,0,0,
+ 18,0,73,164,0,32,130,48,200,1,64,16,
+ 0,0,0,0,40,133,130,143,0,0,0,0,
+ 75,0,64,16,3,0,8,36,96,0,168,143,
+ 0,0,0,0,8,0,16,141,0,0,162,142,
+ 8,0,5,142,30,0,64,16,0,0,0,0,
+ 28,0,162,142,0,0,0,0,18,0,64,20,
+ 1,0,9,36,0,0,162,144,0,0,0,0,
+ 1,0,66,48,13,0,64,20,0,0,0,0,
+ 4,0,163,148,24,0,162,150,0,0,0,0,
+ 6,0,98,20,33,32,0,0,0,0,162,140,
+ 20,0,163,142,0,0,0,0,38,16,67,0,
+ 1,0,68,44,6,0,128,16,1,0,9,36,
+ 17,0,9,162,2,131,4,60,192,246,132,36,
+ 18,19,192,8,33,40,0,2,17,0,0,162,
+ 130,20,192,8,33,32,0,2,16,0,17,141,
+ 0,0,0,0,6,0,32,26,0,0,0,0,
+ 32,133,130,143,0,0,0,0,42,16,34,2,
+ 15,0,64,20,1,0,9,36,2,131,4,60,
+ 248,130,132,36,2,131,5,60,24,131,165,36,
+ 2,131,7,60,36,131,231,36,15,63,192,12,
+ 188,2,6,36,1,0,4,36,2,131,5,60,
+ 24,131,165,36,188,7,192,12,188,2,6,36,
+ 1,0,9,36,17,0,9,162,64,34,17,0,
+ 2,131,8,60,192,246,8,37,33,32,136,0,
+ 33,40,0,2,6,23,192,12,0,0,0,0,
+ 112,1,64,16,33,16,0,0,252,0,162,142,
+ 0,0,0,0,1,0,66,36,132,20,192,8,
+ 252,0,162,174,0,0,182,142,96,0,169,143,
+ 192,17,22,0,8,0,50,141,3,131,3,60,
+ 33,24,98,0,20,13,99,140,8,0,84,142,
+ 0,0,0,0,59,0,104,16,5,0,98,44,
+ 57,0,64,16,128,16,3,0,2,131,1,60,
+ 33,8,34,0,128,131,34,140,0,0,0,0,
+ 8,0,64,0,0,0,0,0,44,133,130,143,
+ 0,0,0,0,48,0,64,16,6,0,132,38,
+ 4,0,131,150,2,131,2,60,68,207,66,148,
+ 0,0,0,0,6,0,98,20,33,32,0,0,
+ 0,0,130,142,48,129,131,143,0,0,0,0,
+ 38,16,67,0,1,0,68,44,67,1,128,16,
+ 33,32,192,2,114,42,192,12,33,40,128,2,
+ 130,20,192,8,33,32,64,2,44,133,130,143,
+ 0,0,0,0,27,0,64,16,6,0,132,38,
+ 4,0,131,150,2,131,2,60,68,207,66,148,
+ 0,0,0,0,6,0,98,20,33,32,0,0,
+ 0,0,130,142,48,129,131,143,0,0,0,0,
+ 38,16,67,0,1,0,68,44,5,0,128,16,
+ 33,32,192,2,114,42,192,12,33,40,128,2,
+ 130,20,192,8,33,32,64,2,6,0,132,38,
+ 0,163,6,60,140,1,198,140,0,0,0,0,
+ 247,24,192,12,33,40,192,2,130,20,192,8,
+ 33,32,64,2,6,0,132,38,0,163,6,60,
+ 140,1,198,140,0,0,0,0,247,24,192,12,
+ 33,40,192,2,203,24,192,12,33,32,128,2,
+ 20,1,163,142,0,0,0,0,14,0,96,16,
+ 33,240,64,0,33,32,128,2,16,0,166,39,
+ 18,0,69,150,0,0,0,0,9,248,96,0,
+ 33,56,192,3,6,0,64,16,33,32,64,2,
+ 28,1,162,142,0,0,0,0,1,0,66,36,
+ 130,20,192,8,28,1,162,174,132,0,193,7,
+ 7,0,2,36,4,0,131,150,2,131,2,60,
+ 68,207,66,148,0,0,0,0,6,0,98,20,
+ 33,32,0,0,0,0,130,142,48,129,131,143,
+ 0,0,0,0,38,16,67,0,1,0,68,44,
+ 9,0,128,16,255,255,9,36,44,133,130,143,
+ 0,0,0,0,246,0,64,16,33,32,192,2,
+ 114,42,192,12,33,40,128,2,130,20,192,8,
+ 33,32,64,2,10,0,201,23,0,0,0,0,
+ 8,0,192,18,0,0,0,0,36,133,130,143,
+ 0,0,0,0,8,0,64,16,1,0,19,36,
+ 80,133,147,143,159,19,192,8,0,0,0,0,
+ 0,1,162,142,80,133,147,143,1,0,66,36,
+ 0,1,162,174,84,133,130,143,0,0,0,0,
+ 35,16,83,0,255,255,66,36,17,0,66,162,
+ 84,133,130,143,33,136,96,2,42,16,34,2,
+ 15,0,64,16,64,18,17,0,2,131,8,60,
+ 192,246,8,37,33,128,72,0,5,0,54,18,
+ 0,0,0,0,247,22,192,12,33,32,0,2,
+ 212,0,64,16,33,16,0,0,84,133,130,143,
+ 1,0,49,38,42,16,34,2,246,255,64,20,
+ 0,2,16,38,84,133,130,143,33,136,96,2,
+ 42,16,34,2,55,0,64,16,64,18,17,0,
+ 2,131,9,60,192,246,41,37,33,152,73,0,
+ 33,128,64,0,192,185,17,0,41,0,54,18,
+ 0,0,0,0,2,131,2,60,33,16,80,0,
+ 216,247,66,140,0,0,0,0,15,0,64,16,
+ 33,32,128,2,16,0,166,39,18,0,69,150,
+ 0,0,0,0,9,248,64,0,33,56,192,2,
+ 8,0,64,16,0,0,0,0,2,131,2,60,
+ 33,16,80,0,224,247,66,140,0,0,0,0,
+ 1,0,66,36,230,19,192,8,32,1,98,174,
+ 44,133,130,143,0,0,0,0,7,0,64,16,
+ 3,0,8,36,3,131,2,60,33,16,87,0,
+ 20,13,66,140,0,0,0,0,6,0,72,20,
+ 0,0,0,0,33,32,96,2,6,23,192,12,
+ 33,40,64,2,236,19,192,8,0,2,115,38,
+ 17,0,66,146,0,0,0,0,255,255,66,36,
+ 17,0,66,162,17,0,66,146,0,2,115,38,
+ 0,2,16,38,84,133,130,143,1,0,49,38,
+ 42,16,34,2,208,255,64,20,128,0,247,38,
+ 254,255,2,36,4,0,194,23,33,32,160,2,
+ 33,40,64,2,47,16,192,12,33,48,128,2,
+ 17,0,66,146,0,0,0,0,135,0,64,16,
+ 33,32,64,2,22,19,192,8,0,0,0,0,
+ 26,0,194,23,0,0,0,0,36,133,130,143,
+ 0,0,0,0,11,0,64,16,33,32,160,2,
+ 9,0,192,18,1,0,9,36,17,0,73,162,
+ 2,131,4,60,192,246,132,36,6,23,192,12,
+ 33,40,64,2,121,0,64,16,33,16,0,0,
+ 33,32,160,2,33,40,64,2,47,16,192,12,
+ 33,48,128,2,36,133,130,143,0,0,0,0,
+ 110,0,64,16,33,32,64,2,111,0,192,22,
+ 1,0,2,36,130,20,192,8,0,0,0,0,
+ 89,0,214,19,64,130,30,0,2,131,2,60,
+ 33,16,80,0,216,247,66,140,0,0,0,0,
+ 18,0,64,16,33,32,128,2,16,0,166,39,
+ 18,0,69,150,0,0,0,0,9,248,64,0,
+ 33,56,192,2,11,0,64,16,33,32,64,2,
+ 2,131,8,60,192,246,8,37,2,131,2,60,
+ 33,16,80,0,224,247,66,140,33,24,8,2,
+ 1,0,66,36,32,1,98,172,130,20,192,8,
+ 17,0,128,160,36,133,130,143,0,0,0,0,
+ 44,0,64,16,0,0,0,0,42,0,192,19,
+ 0,0,0,0,40,0,192,18,64,18,30,0,
+ 2,131,9,60,192,246,41,37,33,128,73,0,
+ 247,22,192,12,33,32,0,2,69,0,64,16,
+ 33,16,0,0,2,131,4,60,247,22,192,12,
+ 192,246,132,36,57,0,64,16,2,0,2,36,
+ 17,0,66,162,44,133,130,143,0,0,0,0,
+ 7,0,64,16,192,17,30,0,3,131,1,60,
+ 33,8,34,0,20,13,34,140,3,0,8,36,
+ 6,0,72,20,0,0,0,0,33,32,0,2,
+ 6,23,192,12,33,40,64,2,91,20,192,8,
+ 0,0,0,0,17,0,66,146,0,0,0,0,
+ 255,255,66,36,17,0,66,162,17,0,66,146,
+ 2,131,4,60,192,246,132,36,6,23,192,12,
+ 33,40,64,2,22,19,192,8,0,0,0,0,
+ 44,133,130,143,0,0,0,0,7,0,64,16,
+ 192,17,30,0,3,131,1,60,33,8,34,0,
+ 20,13,34,140,3,0,9,36,22,0,73,20,
+ 0,0,0,0,1,0,8,36,17,0,72,162,
+ 64,34,30,0,2,131,9,60,192,246,41,37,
+ 33,32,137,0,18,19,192,8,33,40,64,2,
+ 36,133,130,143,0,0,0,0,10,0,64,16,
+ 0,0,0,0,8,0,192,19,1,0,8,36,
+ 17,0,72,162,2,131,4,60,192,246,132,36,
+ 18,19,192,8,33,40,64,2,133,20,192,8,
+ 33,16,0,0,17,0,64,162,33,32,64,2,
+ 152,21,192,12,0,0,0,0,1,0,2,36,
+ 52,0,64,16,0,0,0,0,152,0,162,142,
+ 0,0,0,0,1,0,66,36,152,0,162,174,
+ 156,0,162,142,168,0,163,142,1,0,66,36,
+ 156,0,162,174,128,0,169,143,0,0,0,0,
+ 33,24,105,0,163,20,192,8,168,0,163,174,
+ 152,0,162,142,160,0,163,142,1,0,66,36,
+ 1,0,99,36,152,0,162,174,160,0,163,174,
+ 96,0,168,143,0,0,0,0,8,0,2,141,
+ 255,255,9,36,4,0,73,16,0,0,0,0,
+ 8,0,4,141,152,21,192,12,0,0,0,0,
+ 120,0,168,143,112,0,169,143,1,0,8,37,
+ 120,0,168,175,136,0,169,174,96,0,168,143,
+ 8,128,2,52,0,0,0,165,2,0,2,165,
+ 104,0,169,143,8,0,2,36,2,0,34,165,
+ 4,0,40,141,96,0,169,143,104,0,168,175,
+ 4,0,41,141,120,0,168,143,96,0,169,175,
+ 88,0,169,143,0,0,0,0,42,16,9,1,
+ 243,253,64,20,0,0,0,0,96,0,168,143,
+ 44,0,163,142,120,0,168,174,104,0,169,143,
+ 0,0,0,0,124,0,169,174,0,0,98,148,
+ 0,0,0,0,0,16,66,48,43,0,64,16,
+ 0,0,0,0,2,0,98,148,0,0,0,0,
+ 39,0,64,20,0,0,0,0,0,0,2,149,
+ 0,0,0,0,35,0,64,20,0,0,0,0,
+ 2,0,2,149,8,0,3,36,255,255,66,48,
+ 30,0,67,20,0,0,0,0,136,0,162,142,
+ 0,0,0,0,12,0,66,140,0,0,0,0,
+ 0,128,66,48,23,0,64,20,0,0,0,0,
+ 164,0,162,142,44,0,163,142,1,0,66,36,
+ 164,0,162,174,8,0,104,172,136,0,162,142,
+ 0,0,0,0,8,0,2,173,44,0,163,142,
+ 16,16,2,36,2,0,98,164,0,0,162,142,
+ 0,0,0,0,5,0,64,20,0,0,0,0,
+ 164,7,192,12,0,0,0,0,239,20,192,8,
+ 0,0,0,0,8,0,162,142,0,0,0,0,
+ 0,0,64,172,180,0,191,143,176,0,190,143,
+ 172,0,183,143,168,0,182,143,164,0,181,143,
+ 160,0,180,143,156,0,179,143,152,0,178,143,
+ 148,0,177,143,144,0,176,143,8,0,224,3,
+ 184,0,189,39,216,255,189,39,28,0,177,175,
+ 33,136,128,0,32,0,178,175,33,144,160,0,
+ 96,128,132,39,6,0,37,38,24,0,176,175,
+ 104,128,144,39,36,0,191,175,31,21,192,12,
+ 33,48,0,2,108,128,132,39,33,40,32,2,
+ 31,21,192,12,33,48,0,2,10,0,64,26,
+ 33,128,0,0,116,128,132,39,33,16,17,2,
+ 12,0,69,144,0,0,0,0,15,63,192,12,
+ 1,0,16,38,42,16,18,2,248,255,64,20,
+ 0,0,0,0,124,128,132,39,15,63,192,12,
+ 0,0,0,0,36,0,191,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 40,0,189,39,208,255,189,39,40,0,191,175,
+ 2,0,162,144,0,0,163,144,1,0,167,144,
+ 16,0,162,175,3,0,162,144,33,64,128,0,
+ 20,0,162,175,4,0,162,144,2,131,4,60,
+ 68,131,132,36,24,0,162,175,5,0,162,144,
+ 33,40,0,1,32,0,166,175,33,48,96,0,
+ 15,63,192,12,28,0,162,175,40,0,191,143,
+ 48,0,189,39,8,0,224,3,0,0,0,0,
+ 248,255,189,39,136,0,135,140,255,255,163,36,
+ 12,0,160,16,33,48,224,0,255,255,5,36,
+ 12,0,194,140,0,0,0,0,0,128,66,48,
+ 8,0,64,20,33,16,0,0,255,255,99,36,
+ 0,0,192,172,4,0,198,140,247,255,101,20,
+ 0,0,0,0,136,0,134,172,33,16,224,0,
+ 8,0,224,3,8,0,189,39,224,255,189,39,
+ 16,0,176,175,33,128,160,0,28,0,191,175,
+ 24,0,178,175,33,0,128,20,20,0,177,175,
+ 84,133,130,143,80,133,131,143,0,0,0,0,
+ 35,16,67,0,17,0,2,162,80,133,145,143,
+ 84,133,130,143,0,0,0,0,42,16,34,2,
+ 19,0,64,16,64,18,17,0,2,131,3,60,
+ 192,246,99,36,33,144,67,0,33,32,64,2,
+ 6,23,192,12,33,40,0,2,6,0,64,20,
+ 0,0,0,0,17,0,2,146,0,0,0,0,
+ 255,255,66,36,17,0,2,162,17,0,2,146,
+ 84,133,130,143,1,0,49,38,42,16,34,2,
+ 242,255,64,20,0,2,82,38,17,0,2,146,
+ 144,21,192,8,0,0,0,0,36,133,130,143,
+ 0,0,0,0,25,0,64,16,1,0,2,36,
+ 0,0,130,140,0,0,0,0,20,0,64,16,
+ 2,0,2,36,17,0,2,162,6,23,192,12,
+ 33,40,0,2,19,0,64,16,33,16,0,0,
+ 2,131,4,60,192,246,132,36,6,23,192,12,
+ 33,40,0,2,7,0,64,20,0,0,0,0,
+ 17,0,2,146,0,0,0,0,255,255,66,36,
+ 17,0,2,162,17,0,2,146,0,0,0,0,
+ 144,21,192,8,1,0,2,36,1,0,2,36,
+ 17,0,2,162,6,23,192,12,33,40,0,2,
+ 28,0,191,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 0,0,0,0,0,0,0,0,0,129,9,52,
+ 16,0,130,144,2,131,3,60,192,246,99,36,
+ 64,18,2,0,33,56,67,0,140,0,230,140,
+ 0,1,8,36,4,0,197,140,0,0,131,140,
+ 0,0,128,172,12,0,137,172,4,0,164,172,
+ 12,0,200,172,33,48,160,0,216,0,226,140,
+ 33,40,128,0,1,0,66,36,0,128,99,48,
+ 4,0,96,20,216,0,226,172,4,0,132,140,
+ 161,21,192,8,0,0,0,0,8,0,224,3,
+ 140,0,230,172,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,8,0,224,3,0,0,0,0,
+ 172,0,128,172,176,0,128,172,180,0,128,172,
+ 184,0,128,172,188,0,128,172,192,0,128,172,
+ 196,0,128,172,200,0,128,172,204,0,128,172,
+ 208,0,128,172,212,0,128,172,224,0,128,172,
+ 8,1,128,172,4,1,128,172,236,0,128,172,
+ 240,0,128,172,232,0,128,172,244,0,128,172,
+ 8,0,224,3,248,0,128,172,224,255,189,39,
+ 16,0,176,175,33,128,128,0,20,0,177,175,
+ 0,2,17,36,24,0,191,175,13,8,192,12,
+ 0,48,4,36,255,31,3,60,255,255,99,52,
+ 33,32,0,0,36,16,67,0,0,128,3,60,
+ 37,40,67,0,33,24,160,0,0,128,6,52,
+ 1,0,132,36,24,0,98,36,0,0,96,164,
+ 2,0,102,164,4,0,98,172,33,24,64,0,
+ 42,16,145,0,249,255,64,20,1,0,132,36,
+ 255,255,132,36,64,16,17,0,33,16,81,0,
+ 192,16,2,0,33,16,69,0,48,0,163,36,
+ 236,255,69,172,108,0,3,174,104,0,3,174,
+ 96,0,5,174,100,0,2,174,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,248,255,189,39,0,32,14,60,
+ 4,0,177,175,7,0,17,60,0,0,176,175,
+ 4,0,16,60,2,131,25,60,192,246,57,39,
+ 0,1,15,36,108,0,152,140,104,0,137,140,
+ 176,0,140,140,180,0,141,140,94,0,56,17,
+ 0,0,0,0,4,0,43,141,0,0,0,0,
+ 37,16,110,1,0,0,66,148,0,0,0,0,
+ 0,128,66,48,86,0,64,16,37,16,46,1,
+ 0,0,67,140,0,0,0,0,36,16,113,0,
+ 76,0,80,20,0,32,98,48,41,0,64,20,
+ 15,0,98,48,188,0,130,140,0,0,0,0,
+ 1,0,66,36,188,0,130,172,0,8,98,48,
+ 6,0,64,16,0,4,98,48,192,0,130,140,
+ 0,0,0,0,1,0,66,36,192,0,130,172,
+ 0,4,98,48,6,0,64,16,0,2,98,48,
+ 196,0,130,140,0,0,0,0,1,0,66,36,
+ 196,0,130,172,0,2,98,48,6,0,64,16,
+ 0,1,98,48,200,0,130,140,0,0,0,0,
+ 1,0,66,36,200,0,130,172,0,1,98,48,
+ 6,0,64,16,32,0,98,48,204,0,130,140,
+ 0,0,0,0,1,0,66,36,204,0,130,172,
+ 32,0,98,48,6,0,64,16,15,0,98,48,
+ 208,0,130,140,0,0,0,0,1,0,66,36,
+ 208,0,130,172,15,0,98,48,212,0,131,140,
+ 8,0,37,141,33,24,98,0,212,0,131,172,
+ 17,0,162,144,1,0,140,37,255,255,66,36,
+ 17,0,162,160,25,0,64,20,37,24,46,1,
+ 16,0,162,144,1,0,173,37,64,18,2,0,
+ 33,64,89,0,140,0,7,141,0,129,10,52,
+ 4,0,230,140,0,0,163,140,0,0,160,172,
+ 12,0,170,172,4,0,197,172,12,0,239,172,
+ 33,56,192,0,216,0,2,141,33,48,160,0,
+ 1,0,66,36,0,128,99,48,4,0,96,20,
+ 216,0,2,173,4,0,165,140,114,22,192,8,
+ 0,0,0,0,140,0,7,173,37,24,46,1,
+ 0,128,2,60,0,0,98,172,40,22,192,8,
+ 33,72,96,1,104,0,137,172,176,0,140,172,
+ 180,0,141,172,4,0,177,143,0,0,176,143,
+ 8,0,224,3,8,0,189,39,224,255,189,39,
+ 16,0,176,175,33,128,128,0,24,0,191,175,
+ 20,0,177,175,44,0,17,142,0,0,0,0,
+ 0,0,34,150,0,0,0,0,0,32,66,48,
+ 89,0,64,16,0,0,0,0,2,0,34,150,
+ 0,0,0,0,0,1,66,48,84,0,64,20,
+ 0,0,0,0,27,22,192,12,0,0,0,0,
+ 104,0,4,142,0,0,0,0,2,0,130,148,
+ 0,128,3,52,255,255,66,48,75,0,67,16,
+ 0,0,0,0,224,0,2,142,0,0,0,0,
+ 1,0,66,36,224,0,2,174,4,0,36,174,
+ 0,0,128,164,4,0,130,140,0,0,0,0,
+ 0,0,64,164,0,0,2,142,0,0,0,0,
+ 51,0,64,16,0,33,2,36,2,0,34,150,
+ 0,0,0,0,47,0,64,16,0,33,2,36,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 2,0,34,150,0,0,0,0,211,255,64,20,
+ 0,33,2,36,2,0,34,166,0,0,2,142,
+ 0,0,0,0,5,0,64,16,0,0,0,0,
+ 8,0,2,142,0,0,0,0,242,22,192,8,
+ 0,0,64,172,164,7,192,12,0,0,0,0,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,108,0,133,140,
+ 0,0,0,0,4,0,162,140,0,0,0,0,
+ 4,0,67,140,104,0,130,140,0,0,0,0,
+ 5,0,98,20,33,16,160,0,184,0,131,140,
+ 33,16,0,0,1,0,99,36,184,0,131,172,
+ 8,0,224,3,0,0,0,0,224,255,189,39,
+ 16,0,176,175,33,128,128,0,28,0,191,175,
+ 24,0,178,175,20,0,177,175,108,0,18,142,
+ 8,1,6,142,44,0,17,142,4,0,66,142,
+ 104,0,7,142,4,0,66,140,18,0,163,148,
+ 172,0,4,142,0,0,0,0,6,0,71,20,
+ 255,255,99,48,184,0,3,142,33,16,0,0,
+ 1,0,99,36,157,23,192,8,184,0,3,174,
+ 33,48,195,0,1,0,130,36,172,0,2,174,
+ 8,1,6,174,8,0,162,140,4,1,3,142,
+ 0,0,70,144,33,32,64,2,8,0,69,174,
+ 12,0,64,174,1,0,194,48,2,0,64,16,
+ 1,0,98,36,4,1,2,174,0,0,2,142,
+ 0,0,0,0,35,0,64,20,0,0,0,0,
+ 18,0,162,148,0,0,0,0,255,255,66,48,
+ 12,0,66,174,0,0,34,150,0,0,0,0,
+ 0,32,66,48,24,0,64,16,12,0,2,36,
+ 2,0,34,150,0,0,0,0,0,1,66,48,
+ 19,0,64,20,12,0,2,36,4,0,242,16,
+ 0,0,0,0,27,22,192,12,33,32,0,2,
+ 12,0,2,36,2,0,66,166,104,0,4,142,
+ 0,0,0,0,0,0,128,164,4,0,130,140,
+ 0,0,0,0,0,0,64,164,0,33,2,36,
+ 4,0,36,174,164,7,192,12,2,0,34,166,
+ 154,23,192,8,0,0,0,0,154,23,192,8,
+ 2,0,130,164,0,0,34,150,0,0,0,0,
+ 0,32,66,48,69,0,64,16,12,0,2,36,
+ 4,0,242,16,0,0,0,0,27,22,192,12,
+ 33,32,0,2,12,0,2,36,2,0,66,166,
+ 104,0,4,142,0,0,0,0,0,0,128,164,
+ 4,0,130,140,0,0,0,0,0,0,64,164,
+ 4,0,36,174,2,0,34,150,0,0,0,0,
+ 47,0,64,16,0,33,2,36,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,2,0,34,150,
+ 0,0,0,0,211,255,64,20,0,33,2,36,
+ 2,0,34,166,8,0,2,142,0,0,0,0,
+ 154,23,192,8,0,0,64,172,2,0,66,166,
+ 4,0,67,142,1,0,2,36,108,0,3,174,
+ 28,0,191,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 216,255,189,39,20,0,177,175,33,136,128,0,
+ 24,0,178,175,2,131,18,60,192,131,82,38,
+ 28,0,179,175,0,1,19,36,32,0,191,175,
+ 16,0,176,175,104,0,48,142,108,0,34,142,
+ 0,0,0,0,4,0,2,22,0,0,0,0,
+ 2,0,2,150,245,23,192,8,0,0,0,0,
+ 2,0,2,150,0,0,0,0,7,0,66,48,
+ 11,0,64,20,33,40,64,2,2,131,4,60,
+ 160,131,132,36,2,131,7,60,252,131,231,36,
+ 15,63,192,12,0,2,6,36,1,0,4,36,
+ 33,40,64,2,188,7,192,12,0,2,6,36,
+ 2,0,2,150,4,0,3,36,7,0,66,48,
+ 40,0,67,20,0,128,2,52,8,0,3,142,
+ 0,0,0,0,17,0,98,144,0,0,0,0,
+ 255,255,66,36,17,0,98,160,17,0,98,144,
+ 0,0,0,0,30,0,64,20,0,128,2,52,
+ 180,0,34,142,33,32,96,0,1,0,66,36,
+ 180,0,34,174,16,0,130,144,2,131,3,60,
+ 192,246,99,36,64,18,2,0,33,56,67,0,
+ 140,0,230,140,0,129,8,52,4,0,197,140,
+ 0,0,131,140,0,0,128,172,12,0,136,172,
+ 4,0,164,172,12,0,211,172,33,48,160,0,
+ 216,0,226,140,33,40,128,0,1,0,66,36,
+ 0,128,99,48,4,0,96,20,216,0,226,172,
+ 4,0,132,140,223,23,192,8,0,0,0,0,
+ 140,0,230,172,0,128,2,52,2,0,2,166,
+ 0,0,0,166,4,0,16,142,174,23,192,8,
+ 0,0,0,0,44,0,35,142,104,0,48,174,
+ 0,0,98,148,0,0,0,0,0,32,66,52,
+ 0,0,98,164,32,0,191,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,0,163,2,60,
+ 0,1,66,52,0,0,66,140,2,131,3,60,
+ 192,6,99,36,255,3,66,48,60,0,66,36,
+ 0,128,66,52,8,0,224,3,0,0,98,172,
+ 208,255,189,39,28,0,177,175,33,136,128,0,
+ 32,0,178,175,33,144,160,0,24,0,176,175,
+ 0,163,16,60,0,163,2,60,164,1,66,140,
+ 0,1,16,54,44,0,191,175,40,0,180,175,
+ 4,0,64,20,36,0,179,175,60,0,2,36,
+ 0,163,1,60,164,1,34,172,0,163,2,60,
+ 164,1,66,140,0,0,0,0,221,5,66,40,
+ 3,0,64,20,220,5,2,36,0,163,1,60,
+ 164,1,34,172,0,163,3,60,164,1,99,140,
+ 255,255,2,36,21,0,98,20,0,0,0,0,
+ 128,128,130,143,0,0,0,0,5,0,67,20,
+ 2,0,5,36,130,11,192,12,0,0,0,0,
+ 128,128,130,175,2,0,5,36,10,0,6,36,
+ 128,128,132,143,0,131,7,60,8,96,231,36,
+ 156,11,192,12,16,0,160,175,0,0,2,142,
+ 2,131,3,60,192,6,99,36,255,3,66,48,
+ 66,24,192,8,64,0,66,36,0,163,2,60,
+ 164,1,66,140,2,131,3,60,192,6,99,36,
+ 0,128,66,52,0,0,98,172,255,31,4,60,
+ 255,255,132,52,2,131,2,60,208,6,66,36,
+ 36,16,68,0,0,160,5,60,37,16,69,0,
+ 2,131,3,60,176,12,99,36,2,131,1,60,
+ 196,6,32,172,2,131,1,60,200,6,34,172,
+ 12,0,2,36,0,0,96,164,2,131,1,60,
+ 178,12,34,164,6,0,65,6,36,16,100,0,
+ 37,16,69,0,2,131,1,60,180,12,34,172,
+ 99,24,192,8,255,31,18,60,2,131,2,60,
+ 178,12,66,148,0,0,0,0,0,128,66,52,
+ 2,131,1,60,178,12,34,164,255,31,18,60,
+ 255,255,82,54,2,131,2,60,192,6,66,36,
+ 36,16,82,0,0,160,20,60,37,16,84,0,
+ 2,131,1,60,184,12,34,172,2,131,1,60,
+ 188,12,32,172,44,0,34,142,0,0,0,0,
+ 0,0,66,148,2,131,19,60,176,12,115,38,
+ 0,32,66,48,15,0,64,20,33,40,0,0,
+ 2,131,4,60,160,131,132,36,2,131,16,60,
+ 192,131,16,38,33,40,0,2,2,131,7,60,
+ 24,132,231,36,15,63,192,12,71,2,6,36,
+ 1,0,4,36,33,40,0,2,188,7,192,12,
+ 71,2,6,36,33,40,0,0,33,48,0,0,
+ 36,16,114,2,44,0,35,142,37,16,84,0,
+ 4,0,98,172,44,0,36,142,208,7,7,36,
+ 129,67,192,12,2,0,132,36,12,0,64,20,
+ 0,0,0,0,44,0,34,142,0,0,0,0,
+ 2,0,69,148,2,131,4,60,15,63,192,12,
+ 56,132,132,36,255,255,4,36,2,131,5,60,
+ 192,131,165,36,188,7,192,12,79,2,6,36,
+ 44,0,34,142,0,33,3,36,2,0,67,164,
+ 8,0,34,142,0,0,0,0,0,0,64,172,
+ 44,0,191,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,48,0,189,39,232,255,189,39,
+ 128,128,132,143,0,128,2,52,16,0,191,175,
+ 2,131,1,60,3,0,128,4,178,12,34,164,
+ 177,11,192,12,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 8,0,224,3,0,0,0,0,0,0,0,0,
+ 0,0,0,0,240,255,2,52,2,131,1,60,
+ 33,8,34,0,208,12,32,172,240,255,66,36,
+ 251,255,65,4,0,0,0,0,2,131,2,60,
+ 208,12,66,36,0,163,1,60,12,1,32,172,
+ 0,163,1,60,212,5,34,172,1,0,2,60,
+ 148,133,128,175,144,133,128,175,0,163,1,60,
+ 8,0,224,3,216,5,34,172,0,0,136,148,
+ 4,0,138,148,1,0,2,49,34,0,64,20,
+ 2,0,137,148,0,25,10,0,38,24,106,0,
+ 38,24,105,0,240,255,99,48,2,131,15,60,
+ 208,12,239,37,33,40,111,0,68,133,142,143,
+ 128,0,3,36,0,0,162,140,4,0,171,148,
+ 23,0,64,16,43,16,194,1,9,0,64,16,
+ 6,0,172,148,7,0,11,21,8,0,173,148,
+ 5,0,44,21,0,0,0,0,3,0,77,21,
+ 10,0,162,148,8,0,224,3,0,0,0,0,
+ 255,255,99,36,10,0,96,16,240,255,165,36,
+ 43,16,175,0,238,255,64,16,0,0,162,140,
+ 248,127,229,37,248,127,165,36,218,24,192,8,
+ 0,0,162,140,8,0,224,3,254,255,2,36,
+ 8,0,224,3,255,255,2,36,8,0,224,3,
+ 0,0,0,0,0,0,136,148,68,133,142,143,
+ 1,0,2,49,53,0,64,20,2,0,137,148,
+ 0,131,2,60,4,0,138,148,12,1,89,140,
+ 0,25,10,0,38,24,106,0,38,24,105,0,
+ 240,255,99,48,2,131,15,60,208,12,239,37,
+ 33,56,111,0,128,0,3,36,0,0,248,140,
+ 4,0,235,148,43,16,216,1,14,0,64,16,
+ 6,0,236,148,24,0,11,21,8,0,237,148,
+ 22,0,44,21,255,127,2,60,20,0,77,21,
+ 255,255,66,52,43,16,2,3,2,0,64,16,
+ 33,16,198,1,0,0,226,172,10,0,229,164,
+ 8,0,224,3,0,0,2,36,3,0,0,23,
+ 1,0,57,35,0,131,2,60,12,1,89,172,
+ 33,16,198,1,0,0,226,172,10,0,229,164,
+ 4,0,232,164,6,0,233,164,8,0,234,164,
+ 8,0,224,3,1,0,2,36,255,255,99,36,
+ 11,0,96,16,0,0,0,0,240,255,231,36,
+ 43,16,239,0,221,255,64,16,0,0,248,140,
+ 248,127,231,37,248,127,231,36,8,25,192,8,
+ 0,0,248,140,8,0,224,3,0,0,2,36,
+ 144,133,130,143,0,0,0,0,1,0,66,32,
+ 144,133,130,175,8,0,224,3,255,255,2,36,
+ 8,0,224,3,0,0,0,0,164,128,130,143,
+ 0,0,0,0,7,0,130,20,232,255,189,39,
+ 160,128,130,143,2,131,3,60,208,12,99,36,
+ 0,17,2,0,108,25,192,8,33,16,67,0,
+ 42,16,130,0,3,0,64,16,255,255,2,36,
+ 164,128,128,175,160,128,130,175,164,128,130,143,
+ 160,128,131,143,35,48,130,0,1,0,101,36,
+ 0,16,162,40,25,0,64,16,0,25,5,0,
+ 68,133,135,143,2,131,2,60,33,16,67,0,
+ 208,12,66,140,0,0,0,0,43,16,226,0,
+ 4,0,64,16,0,0,0,0,255,255,198,36,
+ 6,0,192,16,0,16,162,40,1,0,165,36,
+ 0,16,162,40,243,255,64,20,16,0,99,36,
+ 0,16,162,40,7,0,64,16,0,25,5,0,
+ 2,131,2,60,208,12,66,36,160,128,133,175,
+ 164,128,132,175,108,25,192,8,33,16,98,0,
+ 33,16,0,0,255,255,3,36,164,128,128,175,
+ 160,128,131,175,8,0,224,3,24,0,189,39,
+ 0,0,0,0,0,0,0,0,24,255,189,39,
+ 228,0,191,175,224,0,190,175,220,0,183,175,
+ 216,0,182,175,212,0,181,175,208,0,180,175,
+ 204,0,179,175,200,0,178,175,196,0,177,175,
+ 192,0,176,175,44,28,192,12,0,0,0,0,
+ 176,128,132,39,15,63,192,12,1,0,17,36,
+ 24,0,176,39,164,68,192,12,33,32,0,2,
+ 24,0,162,131,0,0,0,0,137,25,192,8,
+ 32,0,8,36,0,0,2,130,32,0,8,36,
+ 253,255,72,16,1,0,16,38,255,255,16,38,
+ 9,0,8,36,249,255,72,16,1,0,16,38,
+ 255,255,16,38,0,0,2,146,0,0,0,0,
+ 208,255,66,36,10,0,66,44,27,0,64,16,
+ 33,32,0,2,33,40,0,0,212,68,192,12,
+ 33,48,0,0,0,0,3,146,0,0,0,0,
+ 208,255,99,36,10,0,99,44,9,0,96,16,
+ 33,136,64,0,1,0,16,38,0,0,2,146,
+ 0,0,0,0,208,255,66,36,10,0,66,44,
+ 251,255,64,20,1,0,16,38,255,255,16,38,
+ 0,0,2,130,32,0,8,36,253,255,72,16,
+ 1,0,16,38,255,255,16,38,9,0,8,36,
+ 249,255,72,16,1,0,16,38,255,255,16,38,
+ 0,0,2,130,0,0,3,146,0,0,0,0,
+ 22,0,64,16,104,0,180,39,32,0,8,36,
+ 19,0,72,16,9,0,8,36,17,0,72,16,
+ 32,0,5,36,9,0,4,36,208,255,98,36,
+ 10,0,66,44,12,0,64,20,0,0,0,0,
+ 1,0,16,38,0,0,131,162,0,0,2,130,
+ 0,0,3,146,0,0,0,0,5,0,64,16,
+ 1,0,148,38,3,0,69,16,0,0,0,0,
+ 243,255,68,20,208,255,98,36,0,0,128,162,
+ 104,0,180,39,0,0,2,130,32,0,8,36,
+ 253,255,72,16,1,0,16,38,255,255,16,38,
+ 9,0,8,36,249,255,72,16,1,0,16,38,
+ 255,255,16,38,33,240,0,2,0,0,196,131,
+ 0,0,0,0,32,69,192,12,144,0,190,175,
+ 11,0,64,16,33,32,192,3,33,40,0,0,
+ 212,68,192,12,33,48,0,0,33,152,64,0,
+ 33,32,192,3,33,40,0,0,44,69,192,12,
+ 16,0,6,36,232,25,192,8,33,144,64,0,
+ 255,255,18,36,255,255,19,36,0,0,3,130,
+ 0,0,2,146,0,0,0,0,17,0,96,16,
+ 32,0,8,36,15,0,104,16,1,0,16,38,
+ 255,255,16,38,32,0,4,36,0,22,2,0,
+ 3,22,2,0,9,0,8,36,8,0,72,16,
+ 0,0,0,0,1,0,16,38,0,0,3,130,
+ 0,0,2,146,3,0,96,16,0,0,0,0,
+ 246,255,100,20,0,22,2,0,0,0,2,130,
+ 32,0,8,36,253,255,72,16,1,0,16,38,
+ 255,255,16,38,9,0,8,36,249,255,72,16,
+ 1,0,16,38,255,255,16,38,33,184,0,2,
+ 33,32,224,2,33,40,0,0,212,68,192,12,
+ 33,48,0,0,33,32,224,2,33,40,0,0,
+ 16,0,6,36,44,69,192,12,33,176,64,0,
+ 0,0,227,130,0,0,0,0,15,0,96,16,
+ 33,168,64,0,32,0,8,36,12,0,104,16,
+ 32,0,3,36,0,0,2,130,9,0,8,36,
+ 8,0,72,16,0,0,0,0,1,0,16,38,
+ 0,0,2,130,0,0,0,0,3,0,64,16,
+ 0,0,0,0,248,255,67,20,0,0,0,0,
+ 0,0,131,130,0,0,0,0,121,0,98,44,
+ 244,1,64,16,128,16,3,0,2,131,1,60,
+ 33,8,34,0,160,138,34,140,0,0,0,0,
+ 8,0,64,0,0,0,0,0,1,0,131,130,
+ 104,0,2,36,26,0,98,16,105,0,98,40,
+ 7,0,64,16,116,0,2,36,34,0,96,16,
+ 98,0,2,36,11,0,98,16,0,0,0,0,
+ 26,28,192,8,0,0,0,0,5,0,98,16,
+ 119,0,8,36,27,0,104,16,33,16,32,2,
+ 26,28,192,8,0,0,0,0,4,162,2,60,
+ 33,144,66,2,2,0,130,130,0,0,0,0,
+ 214,1,64,20,0,0,0,0,2,131,4,60,
+ 108,132,132,36,0,0,70,146,82,26,192,8,
+ 0,0,0,0,2,0,130,130,0,0,0,0,
+ 205,1,64,20,0,0,0,0,2,131,4,60,
+ 120,132,132,36,0,0,70,150,0,0,0,0,
+ 15,63,192,12,33,40,64,2,125,25,192,8,
+ 0,0,0,0,33,16,32,2,37,255,64,16,
+ 255,255,49,38,0,0,80,142,2,131,4,60,
+ 132,132,132,36,33,40,64,2,4,0,82,38,
+ 15,63,192,12,33,48,0,2,33,16,32,2,
+ 247,255,64,20,255,255,49,38,125,25,192,8,
+ 0,0,0,0,1,0,131,130,104,0,2,36,
+ 23,0,98,16,105,0,98,40,7,0,64,16,
+ 116,0,2,36,25,0,96,16,98,0,2,36,
+ 11,0,98,16,0,0,0,0,26,28,192,8,
+ 0,0,0,0,5,0,98,16,119,0,8,36,
+ 17,0,104,16,0,0,0,0,26,28,192,8,
+ 0,0,0,0,4,162,2,60,33,144,66,2,
+ 2,0,130,130,0,0,0,0,158,1,64,20,
+ 0,0,0,0,125,25,192,8,0,0,85,162,
+ 2,0,130,130,0,0,0,0,152,1,64,20,
+ 0,0,0,0,125,25,192,8,0,0,85,166,
+ 125,25,192,8,0,0,85,174,0,163,16,60,
+ 31,163,17,60,255,255,49,54,0,0,2,142,
+ 0,0,0,0,4,0,82,20,0,0,0,0,
+ 180,128,132,39,15,63,192,12,33,40,0,2,
+ 4,0,16,38,43,16,48,2,246,255,64,16,
+ 0,0,0,0,125,25,192,8,0,0,0,0,
+ 33,16,32,2,228,254,64,16,255,255,49,38,
+ 33,32,96,2,164,32,192,12,33,40,192,2,
+ 33,16,32,2,251,255,64,20,255,255,49,38,
+ 125,25,192,8,0,0,0,0,1,0,130,130,
+ 0,0,0,0,117,1,64,20,33,32,32,2,
+ 133,29,192,12,33,40,96,2,125,25,192,8,
+ 0,0,0,0,33,32,96,2,33,40,32,2,
+ 234,31,192,12,33,48,192,2,125,25,192,8,
+ 0,0,0,0,1,0,130,130,0,0,0,0,
+ 103,1,64,20,33,16,32,2,200,254,64,16,
+ 255,255,49,38,33,32,96,2,33,40,192,2,
+ 182,29,192,12,33,48,0,2,33,16,32,2,
+ 250,255,64,20,255,255,49,38,125,25,192,8,
+ 0,0,0,0,33,32,32,2,33,40,96,2,
+ 33,48,192,2,38,30,192,12,33,56,0,2,
+ 125,25,192,8,0,0,0,0,5,162,2,60,
+ 0,0,69,144,2,131,4,60,15,63,192,12,
+ 144,132,132,36,125,25,192,8,0,0,0,0,
+ 0,163,1,60,20,1,32,172,14,0,32,18,
+ 33,128,0,0,164,7,192,12,1,0,16,38,
+ 143,63,192,12,0,0,0,0,143,63,192,12,
+ 0,0,0,0,143,63,192,12,0,0,0,0,
+ 143,63,192,12,0,0,0,0,43,16,17,2,
+ 244,255,64,20,0,0,0,0,184,63,192,12,
+ 0,0,0,0,0,163,16,60,20,1,16,142,
+ 0,0,0,0,7,0,17,22,33,40,32,2,
+ 2,131,4,60,164,132,132,36,15,63,192,12,
+ 33,40,32,2,125,25,192,8,0,0,0,0,
+ 2,131,4,60,188,132,132,36,15,63,192,12,
+ 33,48,0,2,125,25,192,8,0,0,0,0,
+ 0,0,226,130,7,162,8,60,16,0,64,16,
+ 33,144,72,2,33,16,32,2,134,254,64,16,
+ 255,255,49,38,0,0,85,174,2,131,4,60,
+ 132,132,132,36,33,40,64,2,15,63,192,12,
+ 33,48,160,2,4,0,82,38,33,16,32,2,
+ 247,255,64,20,255,255,49,38,125,25,192,8,
+ 0,0,0,0,33,16,32,2,119,254,64,16,
+ 255,255,49,38,0,0,80,142,2,131,4,60,
+ 132,132,132,36,33,40,64,2,4,0,82,38,
+ 15,63,192,12,33,48,0,2,33,16,32,2,
+ 247,255,64,20,255,255,49,38,125,25,192,8,
+ 0,0,0,0,7,162,16,60,64,0,17,38,
+ 2,131,4,60,228,132,132,36,33,40,0,2,
+ 0,0,6,142,0,0,0,0,15,63,192,12,
+ 4,0,16,38,42,16,17,2,247,255,64,20,
+ 7,162,8,60,128,0,16,37,176,0,17,37,
+ 2,131,4,60,228,132,132,36,33,40,0,2,
+ 0,0,6,142,0,0,0,0,15,63,192,12,
+ 4,0,16,38,42,16,17,2,247,255,64,20,
+ 7,162,8,60,192,0,16,37,240,0,17,37,
+ 2,131,4,60,228,132,132,36,33,40,0,2,
+ 0,0,6,142,0,0,0,0,15,63,192,12,
+ 4,0,16,38,42,16,17,2,247,255,64,20,
+ 0,0,0,0,125,25,192,8,0,0,0,0,
+ 1,0,130,130,0,0,0,0,222,0,64,20,
+ 33,16,32,2,63,254,64,16,255,255,49,38,
+ 33,32,96,2,213,29,192,12,33,40,160,2,
+ 33,16,32,2,251,255,64,20,255,255,49,38,
+ 125,25,192,8,0,0,0,0,1,0,130,130,
+ 0,0,0,0,208,0,64,20,33,16,32,2,
+ 49,254,64,16,255,255,49,38,33,32,96,2,
+ 161,31,192,12,33,40,192,2,33,16,32,2,
+ 251,255,64,20,255,255,49,38,125,25,192,8,
+ 0,0,0,0,33,16,32,2,38,254,64,16,
+ 255,255,49,38,208,32,192,12,33,32,0,0,
+ 33,32,0,0,164,32,192,12,33,40,0,0,
+ 40,29,192,12,0,0,0,0,133,29,192,12,
+ 255,255,4,36,33,16,32,2,245,255,64,20,
+ 255,255,49,38,125,25,192,8,0,0,0,0,
+ 1,0,131,130,87,0,2,36,27,0,98,16,
+ 88,0,98,40,7,0,64,16,114,0,2,36,
+ 37,0,96,16,82,0,2,36,9,0,98,16,
+ 0,0,0,0,125,25,192,8,0,0,0,0,
+ 5,0,98,16,119,0,8,36,15,0,104,16,
+ 0,0,0,0,125,25,192,8,0,0,0,0,
+ 2,0,130,130,0,0,0,0,159,0,64,20,
+ 0,0,0,0,60,65,192,12,33,32,96,2,
+ 184,128,132,39,33,40,96,2,15,63,192,12,
+ 33,48,64,0,125,25,192,8,0,0,0,0,
+ 2,0,130,130,0,0,0,0,147,0,64,20,
+ 33,32,96,2,162,65,192,12,33,40,160,2,
+ 242,253,64,20,33,40,160,2,2,131,4,60,
+ 8,133,132,36,15,63,192,12,33,48,96,2,
+ 125,25,192,8,0,0,0,0,33,16,32,2,
+ 233,253,64,16,255,255,49,38,40,29,192,12,
+ 0,0,0,0,33,16,32,2,252,255,64,20,
+ 255,255,49,38,125,25,192,8,0,0,0,0,
+ 1,0,133,130,87,0,2,36,29,0,162,16,
+ 88,0,162,40,5,0,64,16,82,0,2,36,
+ 11,0,162,16,33,16,32,2,125,25,192,8,
+ 0,0,0,0,114,0,2,36,5,0,162,16,
+ 119,0,8,36,19,0,168,16,33,32,64,2,
+ 125,25,192,8,0,0,0,0,33,16,32,2,
+ 206,253,64,16,255,255,49,38,168,69,192,12,
+ 33,32,64,2,184,128,132,39,33,40,64,2,
+ 15,63,192,12,33,48,64,0,1,0,82,38,
+ 33,16,32,2,247,255,64,20,255,255,49,38,
+ 125,25,192,8,0,0,0,0,33,32,64,2,
+ 29,70,192,12,33,40,160,2,189,253,64,20,
+ 33,40,160,2,2,131,4,60,40,133,132,36,
+ 15,63,192,12,33,48,64,2,125,25,192,8,
+ 0,0,0,0,144,0,164,143,122,28,192,12,
+ 0,0,0,0,125,25,192,8,0,0,0,0,
+ 33,16,96,2,175,253,64,16,255,255,115,38,
+ 143,63,192,12,0,0,0,0,33,16,96,2,
+ 252,255,64,20,255,255,115,38,125,25,192,8,
+ 0,0,0,0,33,16,32,2,165,253,64,16,
+ 255,255,49,38,33,32,96,2,208,32,192,12,
+ 33,40,192,2,33,16,32,2,251,255,64,20,
+ 255,255,49,38,125,25,192,8,0,0,0,0,
+ 1,0,130,146,0,0,0,0,159,255,66,36,
+ 0,22,2,0,3,30,2,0,24,0,98,44,
+ 27,0,64,16,128,16,3,0,2,131,1,60,
+ 33,8,34,0,136,140,34,140,0,0,0,0,
+ 8,0,64,0,0,0,0,0,12,33,192,12,
+ 33,32,64,2,125,25,192,8,0,0,0,0,
+ 15,33,192,12,33,32,96,2,125,25,192,8,
+ 0,0,0,0,18,33,192,12,33,32,96,2,
+ 125,25,192,8,0,0,0,0,22,33,192,12,
+ 33,32,96,2,125,25,192,8,0,0,0,0,
+ 25,33,192,12,33,32,64,2,125,25,192,8,
+ 0,0,0,0,33,32,64,2,7,33,192,12,
+ 33,40,192,2,125,25,192,8,0,0,0,0,
+ 16,0,182,175,33,32,32,2,33,40,192,3,
+ 33,48,224,2,161,33,192,12,33,56,160,2,
+ 125,25,192,8,0,0,0,0,33,136,0,0,
+ 2,131,4,60,72,133,132,36,15,63,192,12,
+ 1,0,49,38,32,0,34,46,250,255,64,20,
+ 0,0,0,0,125,25,192,8,0,0,0,0,
+ 2,131,4,60,92,133,132,36,15,63,192,12,
+ 33,40,128,2,123,25,192,8,0,0,0,0,
+ 228,0,191,143,224,0,190,143,220,0,183,143,
+ 216,0,182,143,212,0,181,143,208,0,180,143,
+ 204,0,179,143,200,0,178,143,196,0,177,143,
+ 192,0,176,143,8,0,224,3,232,0,189,39,
+ 232,255,189,39,2,131,5,60,192,154,165,36,
+ 20,0,191,175,16,0,176,175,0,0,162,140,
+ 0,0,0,0,9,0,64,16,33,128,160,0,
+ 0,0,5,142,192,128,132,39,15,63,192,12,
+ 4,0,16,38,0,0,2,142,0,0,0,0,
+ 249,255,64,20,0,0,0,0,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 0,0,132,144,0,0,0,0,208,255,130,36,
+ 10,0,66,44,4,0,64,16,0,22,4,0,
+ 3,22,2,0,89,28,192,8,208,255,66,36,
+ 159,255,130,36,6,0,66,44,4,0,64,16,
+ 0,22,4,0,3,22,2,0,89,28,192,8,
+ 169,255,66,36,191,255,130,36,6,0,66,44,
+ 3,0,64,20,0,22,4,0,89,28,192,8,
+ 255,255,2,36,3,22,2,0,201,255,66,36,
+ 8,0,224,3,0,0,0,0,216,255,189,39,
+ 24,0,178,175,33,144,128,0,32,0,191,175,
+ 28,0,179,175,20,0,177,175,16,0,176,175,
+ 0,0,81,142,0,0,0,0,65,28,192,12,
+ 33,32,32,2,33,24,64,0,255,255,19,36,
+ 9,0,115,16,0,129,3,0,65,28,192,12,
+ 1,0,36,38,33,24,64,0,4,0,115,16,
+ 2,0,34,38,0,0,66,174,115,28,192,8,
+ 37,16,3,2,255,255,2,36,32,0,191,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,40,0,189,39,
+ 176,255,189,39,64,0,180,175,33,160,128,0,
+ 72,0,191,175,68,0,181,175,60,0,179,175,
+ 56,0,178,175,52,0,177,175,48,0,176,175,
+ 0,0,130,130,0,0,0,0,53,0,64,20,
+ 33,128,0,0,27,67,192,12,33,32,0,0,
+ 1,0,4,36,27,67,192,12,33,128,64,0,
+ 2,0,4,36,27,67,192,12,33,136,64,0,
+ 33,24,64,0,255,255,2,36,5,0,2,18,
+ 0,0,0,0,3,0,34,18,0,0,0,0,
+ 6,0,98,20,255,255,2,52,2,131,4,60,
+ 15,63,192,12,60,137,132,36,29,29,192,8,
+ 0,0,0,0,5,0,2,18,0,0,0,0,
+ 3,0,34,18,0,0,0,0,6,0,98,20,
+ 1,0,2,50,2,131,4,60,15,63,192,12,
+ 104,137,132,36,29,29,192,8,0,0,0,0,
+ 6,0,64,16,255,0,5,50,2,131,4,60,
+ 15,63,192,12,132,137,132,36,29,29,192,8,
+ 0,0,0,0,2,131,4,60,176,137,132,36,
+ 3,50,16,0,3,18,17,0,16,0,162,175,
+ 255,0,98,48,20,0,162,175,3,18,3,0,
+ 255,0,39,50,15,63,192,12,24,0,162,175,
+ 29,29,192,8,0,0,0,0,40,0,180,175,
+ 58,0,21,36,32,0,19,36,255,255,18,36,
+ 32,0,177,39,40,0,162,143,0,0,0,0,
+ 0,0,67,128,0,0,0,0,3,0,117,16,
+ 0,0,0,0,3,0,115,20,0,0,0,0,
+ 1,0,66,36,40,0,162,175,91,28,192,12,
+ 40,0,164,39,33,24,64,0,75,0,114,16,
+ 0,0,0,0,40,0,162,143,0,0,35,166,
+ 0,0,67,128,0,0,0,0,3,0,117,16,
+ 0,0,0,0,3,0,115,20,0,0,0,0,
+ 1,0,66,36,40,0,162,175,91,28,192,12,
+ 40,0,164,39,33,24,64,0,60,0,114,16,
+ 1,0,16,38,0,0,34,150,0,26,3,0,
+ 37,16,67,0,0,0,34,166,3,0,2,42,
+ 220,255,64,20,2,0,49,38,32,0,165,151,
+ 0,0,0,0,1,0,162,48,7,0,64,16,
+ 0,0,0,0,2,131,4,60,208,137,132,36,
+ 15,63,192,12,255,0,165,48,25,29,192,8,
+ 0,0,0,0,36,0,162,151,0,0,0,0,
+ 0,7,66,48,6,0,64,16,0,0,0,0,
+ 2,131,4,60,15,63,192,12,0,138,132,36,
+ 25,29,192,8,0,0,0,0,255,66,192,12,
+ 33,32,0,0,1,0,4,36,34,0,165,151,
+ 0,0,0,0,255,66,192,12,33,128,0,0,
+ 36,0,165,151,0,0,0,0,255,66,192,12,
+ 2,0,4,36,2,131,4,60,15,63,192,12,
+ 32,138,132,36,2,131,4,60,80,138,132,36,
+ 15,63,192,12,33,40,0,2,196,128,132,39,
+ 200,128,134,39,31,21,192,12,32,0,165,39,
+ 36,0,162,151,1,0,16,38,0,1,66,36,
+ 36,0,162,167,8,0,2,42,7,0,64,16,
+ 0,0,0,0,8,29,192,8,0,0,0,0,
+ 2,131,4,60,116,138,132,36,15,63,192,12,
+ 33,40,128,2,72,0,191,143,68,0,181,143,
+ 64,0,180,143,60,0,179,143,56,0,178,143,
+ 52,0,177,143,48,0,176,143,8,0,224,3,
+ 80,0,189,39,0,0,0,0,0,0,0,0,
+ 224,255,189,39,16,0,176,175,33,128,0,0,
+ 20,0,177,175,33,136,0,0,24,0,191,175,
+ 33,32,0,2,162,65,192,12,33,40,0,0,
+ 43,0,64,16,0,0,0,0,1,0,16,38,
+ 64,0,2,42,249,255,64,20,33,32,0,2,
+ 33,128,0,0,85,85,17,36,33,32,0,2,
+ 162,65,192,12,85,85,5,36,32,0,64,16,
+ 0,0,0,0,1,0,16,38,64,0,2,42,
+ 249,255,64,20,33,32,0,2,33,128,0,0,
+ 170,170,17,52,33,32,0,2,162,65,192,12,
+ 170,170,5,52,21,0,64,16,0,0,0,0,
+ 1,0,16,38,64,0,2,42,249,255,64,20,
+ 33,32,0,2,33,128,0,0,255,255,17,52,
+ 33,32,0,2,162,65,192,12,255,255,5,52,
+ 10,0,64,16,0,0,0,0,1,0,16,38,
+ 64,0,2,42,249,255,64,20,33,32,0,2,
+ 2,131,4,60,15,63,192,12,240,140,132,36,
+ 101,29,192,8,0,0,0,0,60,65,192,12,
+ 33,32,0,2,2,131,4,60,4,141,132,36,
+ 33,40,32,2,33,48,0,2,15,63,192,12,
+ 33,56,64,0,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 0,0,0,0,0,0,0,0,232,255,189,39,
+ 16,0,191,175,210,7,192,12,0,0,0,0,
+ 139,14,192,12,0,0,0,0,180,10,192,12,
+ 0,0,0,0,32,133,132,143,1,0,2,36,
+ 42,16,68,0,9,0,64,16,0,2,3,36,
+ 64,34,4,0,2,131,1,60,33,8,35,0,
+ 196,246,32,172,0,2,99,36,42,16,100,0,
+ 250,255,64,20,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 208,255,189,39,24,0,178,175,33,144,128,0,
+ 32,0,180,175,33,160,160,0,44,0,191,175,
+ 40,0,182,175,36,0,181,175,28,0,179,175,
+ 20,0,177,175,3,0,128,26,16,0,176,175,
+ 149,29,192,8,1,0,147,38,1,0,20,36,
+ 32,133,147,143,255,255,82,38,255,255,2,36,
+ 20,0,66,18,255,255,21,36,2,131,22,60,
+ 192,246,214,38,108,29,192,12,33,128,128,2,
+ 42,16,19,2,10,0,64,16,64,18,16,0,
+ 33,136,86,0,242,21,192,12,33,32,32,2,
+ 133,12,192,12,33,32,0,2,1,0,16,38,
+ 42,16,19,2,249,255,64,20,0,2,49,38,
+ 255,255,82,38,240,255,85,22,0,0,0,0,
+ 44,0,191,143,40,0,182,143,36,0,181,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 48,0,189,39,216,255,189,39,24,0,178,175,
+ 33,144,160,0,28,0,179,175,33,152,192,0,
+ 32,0,191,175,20,0,177,175,3,0,128,24,
+ 16,0,176,175,195,29,192,8,1,0,145,36,
+ 1,0,4,36,32,133,145,143,33,128,128,0,
+ 42,16,17,2,8,0,64,16,33,32,0,2,
+ 33,40,64,2,250,29,192,12,33,48,96,2,
+ 1,0,16,38,42,16,17,2,250,255,64,20,
+ 33,32,0,2,32,0,191,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,224,255,189,39,
+ 24,0,191,175,20,0,177,175,3,0,128,24,
+ 16,0,176,175,222,29,192,8,1,0,145,36,
+ 1,0,4,36,32,133,145,143,33,128,128,0,
+ 42,16,17,2,7,0,64,16,0,0,0,0,
+ 237,29,192,12,33,32,0,2,1,0,16,38,
+ 42,16,17,2,251,255,64,20,0,0,0,0,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,64,34,4,0,
+ 2,131,2,60,192,246,66,36,33,32,130,0,
+ 44,0,131,140,1,0,2,36,32,0,130,172,
+ 16,0,2,36,2,0,98,164,8,0,130,140,
+ 0,0,0,0,8,0,224,3,0,0,64,172,
+ 208,255,189,39,33,48,128,0,64,18,6,0,
+ 2,131,3,60,192,246,99,36,36,0,177,175,
+ 33,136,67,0,40,0,191,175,32,0,176,175,
+ 4,0,34,142,0,0,0,0,4,0,64,20,
+ 33,128,160,0,1,0,4,36,133,29,192,12,
+ 33,40,192,0,3,0,0,30,221,5,2,42,
+ 17,30,192,8,1,0,16,36,3,0,64,20,
+ 33,32,32,2,220,5,16,36,33,32,32,2,
+ 208,7,5,36,108,0,131,140,12,0,2,36,
+ 2,0,98,164,16,0,162,39,8,0,98,172,
+ 0,128,2,54,12,0,96,172,16,0,162,175,
+ 255,255,2,36,20,0,162,175,2,131,2,60,
+ 0,155,66,36,98,31,192,12,24,0,162,175,
+ 40,0,191,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,48,0,189,39,56,254,189,39,
+ 160,1,176,175,33,128,192,0,48,1,164,175,
+ 33,32,224,0,64,18,5,0,2,131,3,60,
+ 192,246,99,36,33,16,67,0,56,1,162,175,
+ 64,18,16,0,33,16,67,0,40,0,168,39,
+ 196,1,191,175,192,1,190,175,188,1,183,175,
+ 184,1,182,175,180,1,181,175,176,1,180,175,
+ 172,1,179,175,168,1,178,175,164,1,177,175,
+ 64,1,162,175,12,0,160,24,96,1,168,175,
+ 32,133,131,143,0,0,0,0,42,16,163,0,
+ 19,1,64,16,1,0,2,36,5,0,0,26,
+ 42,16,3,2,15,1,64,16,1,0,2,36,
+ 3,0,176,20,33,40,0,0,86,31,192,8,
+ 1,0,2,36,212,68,192,12,33,48,0,0,
+ 6,0,65,4,104,1,162,175,33,72,64,0,
+ 35,72,9,0,104,1,169,175,87,30,192,8,
+ 112,1,160,175,1,0,8,36,112,1,168,175,
+ 1,0,4,36,133,29,192,12,33,40,0,0,
+ 237,29,192,12,33,32,0,2,24,0,169,39,
+ 56,1,168,143,255,0,2,36,80,1,169,175,
+ 108,0,8,141,43,1,163,39,72,1,168,175,
+ 0,0,98,160,255,255,66,36,253,255,65,4,
+ 255,255,99,36,64,1,169,143,0,0,0,0,
+ 120,0,41,141,64,1,168,143,128,1,169,175,
+ 124,0,8,141,64,1,169,143,136,1,168,175,
+ 44,0,34,141,0,0,0,0,12,0,64,172,
+ 44,0,34,141,0,0,0,0,16,0,64,172,
+ 44,0,34,141,120,1,160,175,32,0,64,172,
+ 44,0,34,141,88,1,160,175,24,0,64,172,
+ 48,1,168,143,0,0,0,0,168,0,0,25,
+ 40,0,169,39,144,1,169,175,88,1,168,143,
+ 0,0,0,0,255,0,2,49,4,0,86,36,
+ 60,0,194,42,2,0,64,16,0,0,0,0,
+ 60,0,22,36,104,1,169,143,0,0,0,0,
+ 2,0,32,17,0,0,0,0,104,1,182,143,
+ 56,1,164,143,72,1,168,143,12,0,2,36,
+ 2,0,2,165,80,1,169,143,0,128,194,54,
+ 8,0,9,173,12,0,0,173,0,0,34,173,
+ 255,255,8,36,4,0,40,173,144,1,168,143,
+ 0,0,0,0,8,0,40,173,88,1,168,143,
+ 96,1,169,143,208,7,5,36,98,31,192,12,
+ 0,0,40,173,0,128,5,52,0,128,6,52,
+ 128,1,164,143,0,0,0,0,129,67,192,12,
+ 2,0,7,36,13,0,64,20,0,0,0,0,
+ 88,1,165,143,2,131,4,60,15,63,192,12,
+ 64,141,132,36,120,1,169,143,0,0,0,0,
+ 1,0,41,37,20,0,34,41,117,0,64,16,
+ 120,1,169,175,32,31,192,8,0,0,0,0,
+ 128,1,168,143,64,1,169,143,8,0,2,141,
+ 255,255,8,36,136,0,53,141,0,0,0,0,
+ 50,0,72,16,33,184,0,0,1,0,4,36,
+ 4,0,18,36,4,0,3,36,0,0,190,142,
+ 8,0,166,142,112,1,169,143,255,63,212,51,
+ 30,0,32,17,33,184,244,2,42,16,116,0,
+ 27,0,64,16,33,152,96,0,144,1,168,143,
+ 0,0,0,0,33,136,72,2,33,128,102,0,
+ 15,0,128,16,0,0,0,0,0,0,2,146,
+ 0,0,35,146,0,0,0,0,10,0,67,16,
+ 33,48,192,2,2,131,4,60,92,141,132,36,
+ 88,1,165,143,16,0,163,175,0,0,2,146,
+ 33,56,64,2,15,63,192,12,20,0,162,175,
+ 33,32,0,0,1,0,115,38,1,0,16,38,
+ 1,0,49,38,42,16,116,2,235,255,64,20,
+ 1,0,82,38,33,24,0,0,4,0,181,142,
+ 0,128,194,51,217,255,64,16,0,0,0,0,
+ 128,1,169,143,0,0,0,0,8,0,34,141,
+ 0,0,0,0,25,0,128,16,18,0,87,164,
+ 9,0,246,18,33,48,192,2,2,131,4,60,
+ 140,141,132,36,88,1,165,143,0,0,0,0,
+ 15,63,192,12,33,56,224,2,5,31,192,8,
+ 0,0,0,0,64,1,168,143,0,0,0,0,
+ 136,0,2,141,96,1,169,143,8,0,70,140,
+ 0,0,34,141,0,0,198,140,0,0,0,0,
+ 7,0,194,16,0,0,0,0,88,1,165,143,
+ 2,131,4,60,15,63,192,12,184,141,132,36,
+ 64,1,168,143,0,0,0,0,136,0,4,141,
+ 152,21,192,12,0,0,0,0,64,1,169,143,
+ 0,0,0,0,136,0,53,173,128,1,168,143,
+ 8,128,2,52,0,0,0,165,2,0,2,165,
+ 8,0,0,173,12,0,0,165,136,1,169,143,
+ 8,0,2,36,2,0,34,165,4,0,40,141,
+ 128,1,169,143,136,1,168,175,4,0,41,141,
+ 64,1,168,143,128,1,169,175,120,0,9,173,
+ 136,1,169,143,0,0,0,0,124,0,9,173,
+ 88,1,168,143,48,1,169,143,1,0,8,37,
+ 42,16,9,1,91,255,64,20,88,1,168,175,
+ 64,1,168,143,0,0,0,0,44,0,3,141,
+ 0,0,0,0,12,0,98,140,0,0,0,0,
+ 5,0,64,16,0,0,0,0,12,0,101,140,
+ 2,131,4,60,15,63,192,12,212,141,132,36,
+ 64,1,169,143,0,0,0,0,44,0,35,141,
+ 0,0,0,0,16,0,98,140,0,0,0,0,
+ 5,0,64,16,0,0,0,0,16,0,101,140,
+ 2,131,4,60,15,63,192,12,240,141,132,36,
+ 64,1,168,143,0,0,0,0,44,0,3,141,
+ 0,0,0,0,32,0,98,140,0,0,0,0,
+ 5,0,64,16,0,0,0,0,32,0,101,140,
+ 2,131,4,60,15,63,192,12,16,142,132,36,
+ 64,1,169,143,0,0,0,0,44,0,35,141,
+ 0,0,0,0,24,0,98,140,0,0,0,0,
+ 5,0,64,16,0,0,0,0,24,0,101,140,
+ 2,131,4,60,15,63,192,12,48,142,132,36,
+ 196,1,191,143,192,1,190,143,188,1,183,143,
+ 184,1,182,143,180,1,181,143,176,1,180,143,
+ 172,1,179,143,168,1,178,143,164,1,177,143,
+ 160,1,176,143,8,0,224,3,200,1,189,39,
+ 224,255,189,39,16,0,176,175,33,128,128,0,
+ 24,0,191,175,20,0,177,175,44,0,4,142,
+ 0,0,0,0,0,0,130,148,0,0,0,0,
+ 0,32,66,48,7,0,64,20,33,136,160,0,
+ 0,0,133,148,2,131,4,60,15,63,192,12,
+ 80,142,132,36,156,31,192,8,3,0,2,36,
+ 2,0,132,36,33,40,0,0,33,48,0,0,
+ 129,67,192,12,33,56,32,2,13,0,64,16,
+ 33,40,0,0,44,0,3,142,0,33,2,36,
+ 2,0,98,164,8,0,2,142,33,48,0,0,
+ 0,0,64,172,44,0,4,142,33,56,32,2,
+ 129,67,192,12,2,0,132,36,9,0,64,20,
+ 0,32,5,36,44,0,2,142,0,0,0,0,
+ 2,0,69,148,2,131,4,60,15,63,192,12,
+ 108,142,132,36,156,31,192,8,1,0,2,36,
+ 44,0,4,142,0,32,6,36,129,67,192,12,
+ 33,56,32,2,8,0,64,20,33,16,0,0,
+ 44,0,2,142,0,0,0,0,0,0,69,148,
+ 2,131,4,60,15,63,192,12,132,142,132,36,
+ 2,0,2,36,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 224,255,189,39,24,0,178,175,33,144,160,0,
+ 28,0,191,175,20,0,177,175,7,0,128,4,
+ 16,0,176,175,24,133,130,143,0,0,0,0,
+ 255,255,66,36,42,16,68,0,4,0,64,16,
+ 33,136,128,0,24,133,130,143,33,32,0,0,
+ 255,255,81,36,33,128,128,0,42,16,48,2,
+ 7,0,64,20,33,32,0,2,193,31,192,12,
+ 33,40,64,2,1,0,16,38,42,16,48,2,
+ 251,255,64,16,33,32,0,2,28,0,191,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,8,0,224,3,
+ 0,0,0,0,232,255,189,39,16,0,191,175,
+ 236,63,192,12,1,16,4,36,85,0,2,36,
+ 131,131,1,60,128,18,34,160,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 216,255,189,39,28,0,177,175,33,136,128,0,
+ 32,0,178,175,33,144,160,0,212,128,132,39,
+ 36,0,191,175,15,63,192,12,24,0,176,175,
+ 9,0,64,26,33,128,0,0,0,0,37,146,
+ 1,0,49,38,220,128,132,39,15,63,192,12,
+ 1,0,16,38,42,16,18,2,249,255,64,20,
+ 0,0,0,0,228,128,132,39,15,63,192,12,
+ 0,0,0,0,36,0,191,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 40,0,189,39,48,255,189,39,33,56,128,0,
+ 192,0,178,175,33,144,160,0,200,0,180,175,
+ 33,160,192,0,255,255,226,36,6,0,66,44,
+ 204,0,191,175,196,0,179,175,188,0,177,175,
+ 2,0,64,20,184,0,176,175,1,0,7,36,
+ 2,0,64,30,0,0,0,0,1,0,18,36,
+ 2,0,128,30,64,18,7,0,60,0,20,36,
+ 2,131,3,60,192,246,99,36,33,136,67,0,
+ 4,0,34,142,0,0,0,0,4,0,64,20,
+ 33,152,64,2,1,0,4,36,133,29,192,12,
+ 33,40,224,0,255,31,4,60,255,255,132,52,
+ 0,128,133,54,120,0,162,39,36,16,68,0,
+ 0,160,3,60,37,16,67,0,104,0,165,175,
+ 108,0,160,175,112,0,162,175,12,0,2,36,
+ 80,0,160,167,82,0,162,167,80,0,162,39,
+ 36,16,68,0,37,128,67,0,104,0,162,39,
+ 36,16,68,0,232,128,132,143,37,16,67,0,
+ 84,0,176,175,88,0,162,175,92,0,160,175,
+ 5,0,128,16,4,0,2,36,82,0,162,167,
+ 255,255,2,36,92,0,165,175,88,0,162,175,
+ 44,0,34,142,0,0,0,0,0,0,66,148,
+ 0,0,0,0,0,32,66,48,7,0,64,20,
+ 33,40,0,0,255,255,4,36,2,131,5,60,
+ 184,142,165,36,188,7,192,12,208,1,6,36,
+ 33,40,0,0,44,0,34,142,33,48,0,0,
+ 4,0,80,172,44,0,36,142,208,7,7,36,
+ 129,67,192,12,2,0,132,36,12,0,64,20,
+ 0,0,0,0,44,0,34,142,0,0,0,0,
+ 2,0,69,148,2,131,4,60,15,63,192,12,
+ 108,142,132,36,255,255,4,36,2,131,5,60,
+ 184,142,165,36,188,7,192,12,216,1,6,36,
+ 34,11,192,12,1,0,4,36,0,163,16,60,
+ 4,1,16,142,0,163,2,60,4,1,66,140,
+ 0,0,0,0,252,255,2,18,0,33,3,36,
+ 44,0,34,142,0,0,0,0,2,0,67,164,
+ 8,0,34,142,0,0,0,0,0,0,64,172,
+ 0,163,16,60,4,1,16,142,44,0,36,142,
+ 0,0,0,0,4,0,130,140,0,0,0,0,
+ 0,0,66,148,0,0,0,0,0,128,66,48,
+ 10,0,64,20,0,0,0,0,44,0,35,142,
+ 0,0,0,0,4,0,98,140,0,0,0,0,
+ 0,0,66,148,0,0,0,0,0,128,66,48,
+ 250,255,64,16,0,0,0,0,255,255,115,38,
+ 19,0,96,18,33,40,64,2,44,0,35,142,
+ 0,0,0,0,4,0,98,140,0,0,0,0,
+ 0,0,66,148,0,0,0,0,0,128,66,48,
+ 229,255,64,16,0,0,0,0,4,0,98,140,
+ 0,0,0,0,0,0,66,148,0,0,0,0,
+ 0,128,66,48,250,255,64,20,0,0,0,0,
+ 89,32,192,8,0,0,0,0,2,131,4,60,
+ 200,142,132,36,33,48,128,2,0,163,3,60,
+ 4,1,99,140,0,128,2,52,82,0,162,167,
+ 35,128,112,0,15,63,192,12,33,56,0,2,
+ 19,0,0,18,64,41,18,0,35,40,178,0,
+ 128,40,5,0,33,40,178,0,192,40,5,0,
+ 26,0,176,0,2,0,0,22,0,0,0,0,
+ 13,0,7,0,255,255,1,36,4,0,1,22,
+ 0,128,1,60,2,0,161,20,0,0,0,0,
+ 13,0,6,0,18,40,0,0,236,128,132,39,
+ 15,63,192,12,0,0,0,0,204,0,191,143,
+ 200,0,180,143,196,0,179,143,192,0,178,143,
+ 188,0,177,143,184,0,176,143,8,0,224,3,
+ 208,0,189,39,224,255,189,39,20,0,177,175,
+ 33,136,128,0,24,0,191,175,180,10,192,12,
+ 16,0,176,175,34,11,192,12,1,0,4,36,
+ 16,133,132,143,0,163,16,60,4,1,16,142,
+ 193,63,192,12,0,0,0,0,0,163,2,60,
+ 4,1,66,140,0,0,0,0,35,40,80,0,
+ 73,252,162,36,99,0,66,44,4,0,64,16,
+ 0,0,0,0,2,131,4,60,196,32,192,8,
+ 0,143,132,36,5,0,160,20,0,0,0,0,
+ 2,131,4,60,36,143,132,36,196,32,192,8,
+ 33,40,0,0,2,131,4,60,76,143,132,36,
+ 15,63,192,12,1,0,16,36,3,0,48,18,
+ 0,0,0,0,34,11,192,12,33,32,0,0,
+ 0,129,144,175,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 200,255,189,39,32,0,178,175,33,144,128,0,
+ 33,48,64,2,44,0,181,175,1,131,21,60,
+ 60,252,181,38,33,56,160,2,40,0,180,175,
+ 2,131,20,60,144,143,148,38,36,0,179,175,
+ 0,163,19,60,120,1,115,142,0,163,3,60,
+ 120,1,99,140,32,131,2,60,48,0,191,175,
+ 28,0,177,175,24,0,176,175,16,0,180,175,
+ 33,32,96,2,35,136,67,0,84,64,192,12,
+ 33,40,32,2,3,0,64,18,33,128,64,0,
+ 10,0,0,22,0,0,0,0,16,0,180,175,
+ 33,32,96,2,33,40,32,2,33,48,64,2,
+ 244,63,192,12,33,56,160,2,33,128,2,2,
+ 5,0,0,18,33,40,96,2,2,131,4,60,
+ 168,143,132,36,252,32,192,8,33,40,96,2,
+ 2,131,4,60,204,143,132,36,15,63,192,12,
+ 33,48,177,0,48,0,191,143,44,0,181,143,
+ 40,0,180,143,36,0,179,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 56,0,189,39,0,163,1,60,232,5,36,172,
+ 0,163,1,60,8,0,224,3,236,5,37,172,
+ 28,129,132,175,8,0,224,3,0,0,0,0,
+ 16,129,132,175,8,0,224,3,0,0,0,0,
+ 15,0,132,48,20,129,132,175,8,0,224,3,
+ 0,0,0,0,24,129,132,175,8,0,224,3,
+ 0,0,0,0,32,129,132,175,8,0,224,3,
+ 0,0,0,0,33,72,128,0,33,80,160,0,
+ 33,88,192,0,7,162,4,60,48,1,132,52,
+ 7,162,8,60,0,1,8,53,20,129,130,143,
+ 24,129,131,143,128,48,2,0,28,129,130,143,
+ 3,0,197,52,2,0,96,16,0,0,130,172,
+ 67,0,197,52,16,129,130,143,0,0,0,0,
+ 2,0,64,16,33,24,160,0,0,1,99,52,
+ 36,129,130,143,0,0,0,0,2,0,64,16,
+ 0,0,0,0,0,4,99,52,32,129,130,143,
+ 0,0,3,173,3,0,64,16,7,162,5,60,
+ 0,0,2,173,7,162,5,60,4,1,165,52,
+ 7,162,6,60,8,1,198,52,255,0,2,60,
+ 255,255,66,52,7,162,3,60,12,1,99,52,
+ 7,162,4,60,16,1,132,52,36,16,66,1,
+ 0,0,169,172,0,0,194,172,43,16,7,0,
+ 192,16,2,0,0,0,107,172,8,0,224,3,
+ 0,0,130,172,7,162,3,60,40,1,99,52,
+ 3,0,2,36,0,163,1,60,20,1,32,172,
+ 8,0,224,3,0,0,98,172,232,255,189,39,
+ 16,0,191,175,33,24,0,0,7,162,6,60,
+ 40,1,198,52,15,0,4,60,63,66,132,52,
+ 0,0,197,140,0,0,0,0,16,0,162,48,
+ 7,0,64,20,1,0,99,36,42,16,131,0,
+ 249,255,64,16,0,0,0,0,2,131,4,60,
+ 122,33,192,8,240,143,132,36,36,129,130,143,
+ 0,0,0,0,3,0,64,20,33,24,0,0,
+ 125,33,192,8,33,16,0,0,1,0,5,36,
+ 15,0,4,60,63,66,132,52,0,163,2,60,
+ 20,1,66,140,0,0,0,0,247,255,69,16,
+ 1,0,99,36,42,16,131,0,249,255,64,16,
+ 0,0,0,0,0,163,5,60,20,1,165,140,
+ 2,131,4,60,24,144,132,36,15,63,192,12,
+ 0,0,0,0,1,0,2,36,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 224,255,189,39,24,0,191,175,33,72,192,0,
+ 255,31,3,60,255,255,99,52,33,64,0,0,
+ 36,32,131,0,0,160,2,60,37,32,130,0,
+ 36,40,163,0,16,0,32,25,37,40,162,0,
+ 0,0,134,144,0,0,167,144,0,0,0,0,
+ 7,0,199,16,1,0,165,36,2,131,4,60,
+ 72,144,132,36,15,63,192,12,33,40,0,1,
+ 157,33,192,8,1,0,2,36,1,0,8,37,
+ 42,16,9,1,242,255,64,20,1,0,132,36,
+ 33,16,0,0,24,0,191,143,32,0,189,39,
+ 8,0,224,3,0,0,0,0,0,163,2,60,
+ 232,5,66,140,152,255,189,39,80,0,180,175,
+ 120,0,180,143,64,0,176,175,33,128,160,0,
+ 68,0,177,175,33,136,192,0,72,0,178,175,
+ 33,144,224,0,100,0,191,175,96,0,190,175,
+ 92,0,183,175,88,0,182,175,84,0,181,175,
+ 76,0,179,175,12,0,64,16,16,0,164,175,
+ 0,163,2,60,236,5,66,140,0,0,0,0,
+ 7,0,64,16,0,0,0,0,0,163,2,60,
+ 236,5,66,140,0,0,0,0,1,8,66,44,
+ 10,0,64,20,16,0,2,60,0,163,5,60,
+ 232,5,165,140,0,163,6,60,236,5,198,140,
+ 2,131,4,60,15,63,192,12,124,144,132,36,
+ 7,35,192,8,0,0,0,0,16,0,168,143,
+ 0,0,0,0,43,16,72,0,6,0,64,16,
+ 0,0,0,0,2,131,4,60,15,63,192,12,
+ 172,144,132,36,7,35,192,8,0,0,0,0,
+ 224,132,130,143,0,0,0,0,11,0,64,20,
+ 0,0,0,0,0,163,4,60,236,5,132,140,
+ 13,8,192,12,0,0,0,0,255,31,3,60,
+ 255,255,99,52,36,16,67,0,0,160,3,60,
+ 37,16,67,0,224,132,130,175,228,132,130,143,
+ 0,0,0,0,11,0,64,20,0,0,0,0,
+ 0,163,4,60,236,5,132,140,13,8,192,12,
+ 0,0,0,0,255,31,3,60,255,255,99,52,
+ 36,16,67,0,0,160,3,60,37,16,67,0,
+ 228,132,130,175,224,132,133,143,0,163,6,60,
+ 232,5,198,140,228,132,135,143,2,131,4,60,
+ 15,63,192,12,208,144,132,36,16,129,133,143,
+ 20,129,134,143,2,131,4,60,15,63,192,12,
+ 8,145,132,36,7,162,2,60,232,0,66,52,
+ 0,0,83,140,1,0,3,130,105,0,2,36,
+ 7,0,98,20,251,255,2,60,1,0,2,36,
+ 36,129,130,175,4,0,2,60,0,8,66,52,
+ 10,34,192,8,37,152,98,2,36,129,128,175,
+ 255,247,66,52,36,152,98,2,7,162,2,60,
+ 232,0,66,52,0,0,83,172,0,0,5,130,
+ 114,0,2,36,3,0,162,16,82,0,2,36,
+ 3,0,162,20,119,0,2,36,42,34,192,8,
+ 33,176,0,0,3,0,162,16,87,0,2,36,
+ 3,0,162,20,108,0,2,36,42,34,192,8,
+ 1,0,22,36,3,0,162,16,76,0,2,36,
+ 3,0,162,20,116,0,2,36,42,34,192,8,
+ 2,0,22,36,118,0,162,16,84,0,2,36,
+ 116,0,162,16,0,0,0,0,2,131,4,60,
+ 15,63,192,12,52,145,132,36,7,35,192,8,
+ 0,0,0,0,0,0,38,130,0,0,0,0,
+ 12,0,192,16,99,0,2,36,3,0,194,16,
+ 67,0,2,36,4,0,194,20,33,152,0,0,
+ 5,0,19,36,61,34,192,8,5,0,21,36,
+ 2,131,1,60,80,155,50,160,61,34,192,8,
+ 33,168,0,0,33,168,0,0,5,0,19,36,
+ 2,131,1,60,80,155,32,160,16,0,168,143,
+ 0,163,18,60,236,5,82,142,0,0,0,0,
+ 197,0,0,17,255,255,20,37,255,255,194,38,
+ 2,0,87,44,2,0,30,36,33,128,160,2,
+ 42,16,112,2,73,0,64,20,0,0,0,0,
+ 42,0,224,18,5,0,2,36,13,0,2,22,
+ 0,0,0,0,25,0,64,26,33,136,0,0,
+ 224,132,130,143,0,0,0,0,33,16,81,0,
+ 0,0,81,160,1,0,49,38,42,16,50,2,
+ 249,255,64,20,33,48,64,2,105,34,192,8,
+ 0,0,0,0,2,131,3,60,33,24,112,0,
+ 80,155,99,144,0,0,0,0,9,0,64,26,
+ 33,136,0,0,224,132,130,143,0,0,0,0,
+ 33,16,81,0,1,0,49,38,0,0,67,160,
+ 42,16,50,2,249,255,64,20,0,0,0,0,
+ 33,48,64,2,0,163,4,60,232,5,132,140,
+ 224,132,133,143,0,0,0,0,28,33,192,12,
+ 1,0,7,36,76,33,192,12,0,0,0,0,
+ 83,33,192,12,0,0,0,0,147,0,64,20,
+ 0,0,0,0,3,0,192,18,33,48,64,2,
+ 22,0,222,22,0,0,0,0,0,163,4,60,
+ 232,5,132,140,228,132,133,143,0,0,0,0,
+ 28,33,192,12,33,56,0,0,76,33,192,12,
+ 0,0,0,0,83,33,192,12,0,0,0,0,
+ 131,0,64,20,0,0,0,0,8,0,222,22,
+ 0,0,0,0,224,132,132,143,228,132,133,143,
+ 0,0,0,0,129,33,192,12,33,48,64,2,
+ 122,0,64,20,0,0,0,0,1,0,16,38,
+ 42,16,112,2,185,255,64,16,0,0,0,0,
+ 255,255,148,38,255,255,2,36,178,255,130,22,
+ 33,128,160,2,7,35,192,8,0,0,0,0,
+ 180,10,192,12,0,0,0,0,34,11,192,12,
+ 1,0,4,36,0,0,34,130,0,0,0,0,
+ 6,0,64,16,33,184,0,0,24,0,160,175,
+ 2,131,1,60,88,155,52,164,171,34,192,8,
+ 33,176,0,0,6,0,23,36,4,0,2,36,
+ 24,0,160,175,2,131,1,60,88,155,34,164,
+ 33,176,0,0,0,8,30,36,24,0,177,143,
+ 0,0,0,0,42,16,241,2,83,0,64,20,
+ 64,16,17,0,2,131,8,60,88,155,8,37,
+ 33,168,72,0,0,0,178,150,0,0,0,0,
+ 26,0,210,3,2,0,64,22,0,0,0,0,
+ 13,0,7,0,255,255,1,36,4,0,65,22,
+ 0,128,1,60,2,0,193,23,0,0,0,0,
+ 13,0,6,0,18,16,0,0,16,0,168,143,
+ 0,0,0,0,24,0,72,0,33,56,192,2,
+ 33,128,0,0,0,163,19,60,4,1,115,142,
+ 0,163,4,60,232,5,132,140,224,132,133,143,
+ 18,160,0,0,0,0,0,0,0,0,0,0,
+ 28,33,192,12,33,48,64,2,10,0,128,26,
+ 0,0,0,0,76,33,192,12,0,0,0,0,
+ 83,33,192,12,0,0,0,0,48,0,64,20,
+ 1,0,16,38,42,16,20,2,248,255,64,20,
+ 0,0,0,0,2,131,5,60,140,145,165,36,
+ 0,163,16,60,4,1,16,142,3,0,192,18,
+ 0,0,0,0,2,131,5,60,128,145,165,36,
+ 2,131,4,60,96,145,132,36,15,63,192,12,
+ 33,48,64,2,19,0,19,18,24,0,146,2,
+ 18,24,0,0,35,16,19,2,0,0,0,0,
+ 27,0,98,0,2,0,64,20,0,0,0,0,
+ 13,0,7,0,18,16,0,0,2,131,4,60,
+ 152,145,132,36,64,41,2,0,35,40,162,0,
+ 128,40,5,0,33,40,162,0,15,63,192,12,
+ 192,40,5,0,255,34,192,8,2,0,181,38,
+ 2,131,4,60,168,145,132,36,15,63,192,12,
+ 2,0,181,38,1,0,49,38,42,16,241,2,
+ 178,255,64,16,0,0,0,0,1,0,214,38,
+ 2,0,194,42,166,255,64,20,0,0,0,0,
+ 100,0,191,143,96,0,190,143,92,0,183,143,
+ 88,0,182,143,84,0,181,143,80,0,180,143,
+ 76,0,179,143,72,0,178,143,68,0,177,143,
+ 64,0,176,143,8,0,224,3,104,0,189,39,
+ 0,0,0,0,43,16,134,0,0,0,164,175,
+ 4,0,165,175,8,0,166,175,7,0,64,20,
+ 12,0,167,175,43,16,196,0,5,0,64,20,
+ 1,0,2,36,43,16,167,0,2,0,64,16,
+ 43,16,229,0,255,255,2,36,8,0,224,3,
+ 0,0,0,0,232,255,189,39,3,131,4,60,
+ 208,12,132,36,170,0,5,36,16,0,191,175,
+ 144,71,192,12,60,0,6,36,2,131,6,60,
+ 112,155,198,36,2,131,2,60,212,246,66,140,
+ 2,131,3,60,216,246,99,132,0,0,194,172,
+ 4,0,195,164,2,131,2,60,138,155,66,148,
+ 2,131,3,60,132,155,99,148,2,131,4,60,
+ 134,155,132,148,2,131,5,60,136,155,165,148,
+ 3,131,1,60,216,12,34,164,2,131,2,60,
+ 130,155,66,148,3,131,10,60,218,12,74,37,
+ 3,0,199,136,0,0,199,152,4,0,200,128,
+ 5,0,201,128,3,0,71,169,0,0,71,185,
+ 4,0,72,161,5,0,73,161,3,131,1,60,
+ 238,12,35,164,3,131,1,60,242,12,36,164,
+ 3,131,1,60,246,12,37,164,3,131,1,60,
+ 240,12,34,164,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,3,131,2,60,
+ 216,12,66,140,3,131,3,60,220,12,99,140,
+ 3,131,1,60,208,12,34,172,3,131,1,60,
+ 212,12,35,172,3,131,2,60,238,12,66,148,
+ 3,131,3,60,240,12,99,148,3,131,4,60,
+ 242,12,132,148,232,255,189,39,16,0,176,175,
+ 3,131,1,60,234,12,35,164,24,133,131,143,
+ 20,0,191,175,3,131,1,60,224,12,32,172,
+ 3,131,1,60,228,12,32,172,3,131,1,60,
+ 248,12,32,172,3,131,1,60,252,12,32,172,
+ 3,131,1,60,8,13,32,164,3,131,1,60,
+ 4,13,32,164,3,131,1,60,232,12,34,164,
+ 33,16,68,0,3,131,1,60,236,12,36,164,
+ 3,131,1,60,244,12,34,164,8,0,96,24,
+ 1,0,16,36,150,35,192,12,33,32,0,2,
+ 24,133,130,143,1,0,16,38,42,16,80,0,
+ 250,255,64,16,0,0,0,0,206,35,192,12,
+ 0,0,0,0,52,36,192,12,0,0,0,0,
+ 1,0,2,36,3,131,1,60,0,13,34,164,
+ 3,0,2,36,3,131,1,60,2,13,32,164,
+ 3,131,1,60,20,13,34,172,2,131,1,60,
+ 164,247,34,172,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,224,255,189,39,
+ 20,0,177,175,33,136,128,0,16,0,176,175,
+ 192,129,17,0,3,131,4,60,16,13,132,36,
+ 33,32,4,2,187,0,5,36,24,0,191,175,
+ 144,71,192,12,128,0,6,36,2,131,2,60,
+ 140,155,66,148,100,0,3,36,3,131,1,60,
+ 33,8,48,0,24,13,35,172,0,18,2,0,
+ 37,16,34,2,3,131,1,60,33,8,48,0,
+ 16,13,34,164,22,36,192,12,33,32,32,2,
+ 4,0,2,36,64,138,17,0,3,131,1,60,
+ 33,8,48,0,20,13,34,172,2,131,1,60,
+ 33,8,49,0,164,247,34,172,3,131,1,60,
+ 33,8,48,0,52,13,32,172,3,131,1,60,
+ 33,8,48,0,56,13,32,172,3,131,1,60,
+ 33,8,48,0,106,13,32,164,3,131,1,60,
+ 33,8,48,0,110,13,32,164,3,131,1,60,
+ 33,8,48,0,114,13,32,164,3,131,1,60,
+ 33,8,48,0,120,13,32,172,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,24,133,130,143,216,255,189,39,
+ 20,0,177,175,1,0,17,36,36,0,191,175,
+ 32,0,180,175,28,0,179,175,24,0,178,175,
+ 55,0,64,24,16,0,176,175,3,131,20,60,
+ 228,12,148,38,3,131,2,60,56,13,66,36,
+ 128,0,83,36,124,0,82,36,128,0,16,36,
+ 0,0,130,142,0,0,0,0,6,0,34,22,
+ 33,32,32,2,0,0,64,174,101,36,192,12,
+ 0,0,96,174,8,36,192,8,128,0,115,38,
+ 3,131,4,60,33,32,144,0,40,13,132,140,
+ 3,131,5,60,33,40,176,0,44,13,165,140,
+ 244,255,134,142,248,255,135,142,20,35,192,12,
+ 0,0,0,0,17,0,64,20,33,32,32,2,
+ 3,131,3,60,33,24,112,0,48,13,99,148,
+ 3,131,2,60,33,16,80,0,16,13,66,148,
+ 0,0,0,0,8,0,98,20,0,0,0,0,
+ 3,131,1,60,33,8,48,0,106,13,32,164,
+ 101,36,192,12,33,32,32,2,8,36,192,8,
+ 128,0,115,38,0,0,64,174,125,36,192,12,
+ 0,0,96,174,128,0,115,38,128,0,82,38,
+ 24,133,130,143,1,0,49,38,42,16,81,0,
+ 210,255,64,16,128,0,16,38,36,0,191,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,192,33,4,0,3,131,2,60,
+ 28,13,66,36,33,24,130,0,3,131,5,60,
+ 208,12,165,140,3,131,6,60,212,12,198,140,
+ 0,0,101,172,4,0,102,172,12,0,66,36,
+ 3,131,3,60,224,12,99,140,33,16,130,0,
+ 3,131,1,60,33,8,36,0,36,13,35,172,
+ 3,131,3,60,216,12,99,140,3,131,5,60,
+ 220,12,165,140,0,0,67,172,4,0,69,172,
+ 3,131,2,60,33,16,68,0,16,13,66,148,
+ 3,131,1,60,33,8,36,0,8,0,224,3,
+ 48,13,34,164,24,133,130,143,224,255,189,39,
+ 20,0,177,175,1,0,17,36,24,0,191,175,
+ 38,0,64,24,16,0,176,175,128,0,16,36,
+ 3,131,4,60,33,32,144,0,40,13,132,140,
+ 3,131,5,60,33,40,176,0,44,13,165,140,
+ 3,131,6,60,216,12,198,140,3,131,7,60,
+ 220,12,231,140,20,35,192,12,0,0,0,0,
+ 18,0,64,20,0,0,0,0,3,131,3,60,
+ 33,24,112,0,48,13,99,148,3,131,2,60,
+ 33,16,80,0,16,13,66,148,0,0,0,0,
+ 9,0,98,20,0,0,0,0,3,131,2,60,
+ 33,16,80,0,20,13,66,140,0,0,0,0,
+ 3,0,64,16,0,0,0,0,203,36,192,12,
+ 33,32,32,2,24,133,130,143,1,0,49,38,
+ 42,16,81,0,221,255,64,16,128,0,16,38,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,192,41,4,0,
+ 3,131,3,60,33,24,101,0,20,13,99,140,
+ 4,0,2,36,16,0,98,20,240,255,189,39,
+ 1,0,2,36,64,26,4,0,3,131,1,60,
+ 33,8,37,0,20,13,34,172,2,131,1,60,
+ 33,8,35,0,164,247,34,172,1,0,2,36,
+ 3,131,1,60,33,8,37,0,110,13,34,164,
+ 3,131,1,60,33,8,37,0,112,13,32,164,
+ 8,0,224,3,16,0,189,39,224,255,189,39,
+ 24,0,178,175,33,144,128,0,16,0,176,175,
+ 192,129,18,0,28,0,191,175,20,0,177,175,
+ 3,131,2,60,33,16,80,0,20,13,66,140,
+ 0,0,0,0,18,0,64,16,4,0,17,36,
+ 16,0,81,16,254,255,66,36,2,0,66,44,
+ 4,0,64,16,64,18,18,0,161,36,192,12,
+ 0,0,0,0,64,18,18,0,3,131,1,60,
+ 33,8,48,0,20,13,49,172,2,131,1,60,
+ 33,8,34,0,164,247,49,172,3,131,1,60,
+ 33,8,48,0,110,13,32,164,28,0,191,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,3,131,4,60,
+ 208,12,132,140,3,131,5,60,212,12,165,140,
+ 3,131,6,60,216,12,198,140,3,131,7,60,
+ 220,12,231,140,232,255,189,39,16,0,191,175,
+ 20,35,192,12,0,0,0,0,10,0,64,20,
+ 1,0,2,36,3,131,1,60,252,12,34,172,
+ 1,0,2,36,3,131,1,60,4,13,34,164,
+ 3,131,1,60,6,13,32,164,197,36,192,8,
+ 1,0,2,36,3,131,2,60,248,12,66,140,
+ 0,0,0,0,9,0,64,20,1,0,2,36,
+ 72,37,192,12,0,0,0,0,1,0,2,36,
+ 3,131,1,60,8,13,34,164,3,131,1,60,
+ 10,13,32,164,1,0,2,36,3,131,1,60,
+ 248,12,34,172,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,224,255,189,39,
+ 20,0,177,175,33,136,128,0,16,0,176,175,
+ 192,129,17,0,24,0,191,175,3,131,2,60,
+ 33,16,80,0,114,13,66,132,0,0,0,0,
+ 5,0,64,16,1,0,2,36,3,131,1,60,
+ 33,8,48,0,67,37,192,8,56,13,34,172,
+ 3,131,4,60,208,12,132,36,3,131,2,60,
+ 64,13,66,36,33,24,2,2,3,131,1,60,
+ 33,8,48,0,60,13,32,164,0,0,133,140,
+ 4,0,134,140,0,0,101,172,4,0,102,172,
+ 12,0,66,36,3,131,3,60,224,12,99,140,
+ 33,16,2,2,3,131,1,60,33,8,48,0,
+ 72,13,35,172,3,131,3,60,216,12,99,140,
+ 3,131,5,60,220,12,165,140,0,0,67,172,
+ 4,0,69,172,3,131,2,60,33,16,80,0,
+ 16,13,66,148,3,131,1,60,33,8,48,0,
+ 84,13,34,164,0,0,132,140,3,131,5,60,
+ 212,12,165,140,3,131,6,60,216,12,198,140,
+ 3,131,7,60,220,12,231,140,20,35,192,12,
+ 0,0,0,0,5,0,64,20,0,0,0,0,
+ 3,131,1,60,33,8,48,0,22,37,192,8,
+ 86,13,32,164,3,131,2,60,228,12,66,140,
+ 2,131,3,60,128,155,99,148,192,17,2,0,
+ 3,131,1,60,33,8,34,0,108,13,34,148,
+ 0,0,0,0,33,16,67,0,3,131,1,60,
+ 33,8,48,0,86,13,34,164,3,131,2,60,
+ 232,12,66,148,192,129,17,0,3,131,1,60,
+ 33,8,48,0,88,13,34,164,3,131,2,60,
+ 234,12,66,148,33,32,32,2,3,131,1,60,
+ 33,8,48,0,90,13,34,164,3,131,3,60,
+ 236,12,99,148,3,131,2,60,33,16,80,0,
+ 52,13,66,140,3,131,5,60,60,13,165,36,
+ 3,131,1,60,33,8,48,0,52,13,32,172,
+ 3,131,1,60,33,8,48,0,96,13,34,172,
+ 3,131,1,60,33,8,48,0,92,13,35,164,
+ 3,131,2,60,252,12,66,140,3,131,1,60,
+ 33,8,48,0,100,13,34,172,80,40,192,12,
+ 33,40,5,2,1,0,2,36,3,131,1,60,
+ 33,8,48,0,56,13,32,172,3,131,1,60,
+ 33,8,48,0,114,13,34,164,3,131,1,60,
+ 33,8,48,0,116,13,32,164,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,128,0,2,36,
+ 3,131,4,60,228,12,132,140,3,131,5,60,
+ 104,13,165,36,16,0,191,175,192,25,4,0,
+ 3,131,1,60,33,8,35,0,104,13,34,164,
+ 151,40,192,12,33,40,101,0,2,131,4,60,
+ 15,63,192,12,12,146,132,36,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,16,0,191,175,102,37,192,12,
+ 0,0,0,0,13,38,192,12,0,0,0,0,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,24,133,130,143,208,255,189,39,
+ 24,0,178,175,33,144,0,0,28,0,179,175,
+ 1,0,19,36,44,0,191,175,40,0,182,175,
+ 36,0,181,175,32,0,180,175,20,0,177,175,
+ 110,0,64,24,16,0,176,175,3,131,21,60,
+ 216,12,181,38,3,131,22,60,16,13,214,38,
+ 128,0,208,38,128,0,20,36,192,17,18,0,
+ 3,131,4,60,33,32,148,0,40,13,132,140,
+ 3,131,5,60,33,40,180,0,44,13,165,140,
+ 0,0,166,142,3,131,7,60,220,12,231,140,
+ 0,0,0,0,20,35,192,12,33,136,86,0,
+ 10,0,64,20,0,0,0,0,3,131,3,60,
+ 33,24,116,0,48,13,99,148,3,131,2,60,
+ 33,16,84,0,16,13,66,148,0,0,0,0,
+ 74,0,98,16,0,0,0,0,4,0,2,142,
+ 0,0,0,0,70,0,64,16,0,0,0,0,
+ 12,0,4,142,16,0,5,142,0,0,166,142,
+ 3,131,7,60,220,12,231,140,20,35,192,12,
+ 0,0,0,0,61,0,65,4,0,0,0,0,
+ 58,0,64,18,0,0,0,0,12,0,4,142,
+ 16,0,5,142,12,0,38,142,16,0,39,142,
+ 20,35,192,12,0,0,0,0,50,0,64,4,
+ 0,0,0,0,12,0,4,142,16,0,5,142,
+ 12,0,38,142,16,0,39,142,20,35,192,12,
+ 0,0,0,0,43,0,64,20,0,0,0,0,
+ 20,0,5,142,8,0,3,142,20,0,36,142,
+ 8,0,34,142,33,40,163,0,33,32,130,0,
+ 43,16,164,0,33,0,64,20,0,0,0,0,
+ 32,0,164,20,0,0,0,0,24,0,4,142,
+ 28,0,5,142,24,0,38,142,28,0,39,142,
+ 20,35,192,12,0,0,0,0,23,0,64,4,
+ 0,0,0,0,24,0,4,142,28,0,5,142,
+ 24,0,38,142,28,0,39,142,20,35,192,12,
+ 0,0,0,0,16,0,64,20,0,0,0,0,
+ 32,0,4,150,32,0,35,150,0,0,0,0,
+ 43,16,131,0,9,0,64,20,0,0,0,0,
+ 8,0,131,20,0,0,0,0,0,0,2,150,
+ 0,0,35,150,0,0,0,0,43,16,67,0,
+ 2,0,64,16,0,0,0,0,33,144,96,2,
+ 128,0,16,38,24,133,130,143,1,0,115,38,
+ 42,16,83,0,154,255,64,16,128,0,148,38,
+ 3,131,1,60,228,12,50,172,12,0,64,22,
+ 192,17,18,0,3,131,2,60,216,12,66,140,
+ 3,131,3,60,220,12,99,140,3,131,1,60,
+ 208,12,34,172,3,131,1,60,212,12,35,172,
+ 3,131,1,60,3,38,192,8,224,12,32,172,
+ 3,131,3,60,33,24,98,0,28,13,99,140,
+ 3,131,4,60,33,32,130,0,32,13,132,140,
+ 3,131,1,60,208,12,35,172,3,131,1,60,
+ 212,12,36,172,3,131,3,60,33,24,98,0,
+ 36,13,99,140,3,131,1,60,33,8,34,0,
+ 24,13,34,140,0,0,0,0,33,24,98,0,
+ 3,131,1,60,224,12,35,172,44,0,191,143,
+ 40,0,182,143,36,0,181,143,32,0,180,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,48,0,189,39,
+ 24,133,130,143,208,255,189,39,36,0,181,175,
+ 1,0,21,36,44,0,191,175,40,0,182,175,
+ 32,0,180,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,82,0,64,24,16,0,176,175,
+ 3,131,22,60,216,12,214,38,3,131,2,60,
+ 48,13,66,36,128,0,84,36,96,0,83,36,
+ 124,0,82,36,120,0,81,36,128,0,16,36,
+ 0,0,36,142,0,0,69,142,0,0,198,142,
+ 3,131,7,60,220,12,231,140,20,35,192,12,
+ 0,0,0,0,55,0,64,20,0,0,0,0,
+ 0,0,131,150,0,0,98,150,0,0,0,0,
+ 50,0,98,20,0,0,0,0,3,131,4,60,
+ 33,32,144,0,28,13,132,140,3,131,5,60,
+ 33,40,176,0,32,13,165,140,248,255,198,142,
+ 3,131,7,60,212,12,231,140,20,35,192,12,
+ 0,0,0,0,37,0,64,16,0,0,0,0,
+ 8,0,196,142,3,131,3,60,33,24,112,0,
+ 36,13,99,140,0,0,0,0,43,16,131,0,
+ 29,0,64,20,0,0,0,0,27,0,131,20,
+ 0,0,0,0,0,0,196,142,3,131,5,60,
+ 220,12,165,140,0,0,38,142,0,0,71,142,
+ 20,35,192,12,0,0,0,0,16,0,64,4,
+ 0,0,0,0,0,0,196,142,3,131,5,60,
+ 220,12,165,140,0,0,38,142,0,0,71,142,
+ 20,35,192,12,0,0,0,0,9,0,64,20,
+ 0,0,0,0,0,0,99,150,0,0,130,150,
+ 0,0,0,0,43,16,67,0,3,0,64,20,
+ 0,0,0,0,22,36,192,12,33,32,160,2,
+ 128,0,148,38,128,0,115,38,128,0,82,38,
+ 128,0,49,38,24,133,130,143,1,0,181,38,
+ 42,16,85,0,185,255,64,16,128,0,16,38,
+ 44,0,191,143,40,0,182,143,36,0,181,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 48,0,189,39,216,255,189,39,3,131,4,60,
+ 0,13,132,36,32,0,191,175,28,0,179,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 0,0,130,132,0,0,0,0,14,0,64,16,
+ 0,0,0,0,3,131,2,60,2,13,66,148,
+ 3,131,3,60,234,12,99,148,1,0,66,36,
+ 3,131,1,60,2,13,34,164,255,255,66,48,
+ 43,16,67,0,3,0,64,20,0,0,0,0,
+ 11,39,192,12,0,0,128,164,3,131,4,60,
+ 8,13,132,36,0,0,130,132,0,0,0,0,
+ 14,0,64,16,0,0,0,0,3,131,2,60,
+ 10,13,66,148,3,131,3,60,234,12,99,148,
+ 1,0,66,36,3,131,1,60,10,13,34,164,
+ 255,255,66,48,43,16,67,0,3,0,64,20,
+ 0,0,0,0,24,39,192,12,0,0,128,164,
+ 3,131,4,60,4,13,132,36,0,0,130,132,
+ 0,0,0,0,14,0,64,16,0,0,0,0,
+ 3,131,2,60,6,13,66,148,3,131,3,60,
+ 244,12,99,148,1,0,66,36,3,131,1,60,
+ 6,13,34,164,255,255,66,48,43,16,67,0,
+ 3,0,64,20,0,0,0,0,37,39,192,12,
+ 0,0,128,164,24,133,130,143,0,0,0,0,
+ 78,0,64,24,1,0,17,36,3,131,2,60,
+ 16,13,66,36,226,0,83,36,128,0,82,36,
+ 128,0,16,36,3,131,2,60,33,16,80,0,
+ 110,13,66,132,0,0,0,0,18,0,64,16,
+ 0,0,0,0,3,131,2,60,33,16,80,0,
+ 112,13,66,148,0,0,0,0,1,0,66,36,
+ 96,0,66,166,3,131,3,60,236,12,99,148,
+ 255,255,66,48,43,16,67,0,6,0,64,20,
+ 0,0,0,0,3,131,1,60,33,8,48,0,
+ 110,13,32,164,79,39,192,12,33,32,32,2,
+ 3,131,2,60,33,16,80,0,106,13,66,132,
+ 0,0,0,0,18,0,64,16,0,0,0,0,
+ 3,131,2,60,33,16,80,0,108,13,66,148,
+ 0,0,0,0,1,0,66,36,92,0,66,166,
+ 3,131,3,60,232,12,99,148,255,255,66,48,
+ 43,16,67,0,6,0,64,20,0,0,0,0,
+ 3,131,1,60,33,8,48,0,106,13,32,164,
+ 129,39,192,12,33,32,32,2,0,0,98,134,
+ 0,0,0,0,16,0,64,16,0,0,0,0,
+ 3,131,2,60,33,16,80,0,116,13,66,148,
+ 0,0,0,0,1,0,66,36,100,0,66,166,
+ 3,131,3,60,246,12,99,148,255,255,66,48,
+ 43,16,67,0,4,0,64,20,0,0,0,0,
+ 0,0,96,166,191,39,192,12,33,32,32,2,
+ 128,0,115,38,128,0,82,38,24,133,130,143,
+ 1,0,49,38,42,16,81,0,185,255,64,16,
+ 128,0,16,38,32,0,191,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,232,255,189,39,
+ 16,0,191,175,52,36,192,12,0,0,0,0,
+ 1,0,2,36,3,131,1,60,0,13,34,164,
+ 3,131,1,60,2,13,32,164,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,16,0,191,175,72,37,192,12,
+ 0,0,0,0,1,0,2,36,3,131,1,60,
+ 8,13,34,164,3,131,1,60,10,13,32,164,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,240,255,189,39,3,131,1,60,
+ 248,12,32,172,3,131,1,60,252,12,32,172,
+ 8,0,224,3,16,0,189,39,24,133,130,143,
+ 224,255,189,39,20,0,177,175,1,0,17,36,
+ 24,0,191,175,23,0,64,24,16,0,176,175,
+ 128,0,16,36,3,131,4,60,33,32,144,0,
+ 40,13,132,140,3,131,5,60,33,40,176,0,
+ 44,13,165,140,3,131,6,60,216,12,198,140,
+ 3,131,7,60,220,12,231,140,20,35,192,12,
+ 0,0,0,0,3,0,64,20,1,0,49,38,
+ 74,39,192,8,1,0,2,36,24,133,130,143,
+ 0,0,0,0,42,16,81,0,236,255,64,16,
+ 128,0,16,38,33,16,0,0,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,192,41,4,0,
+ 16,0,191,175,3,131,3,60,33,24,101,0,
+ 20,13,99,140,1,0,2,36,16,0,98,20,
+ 2,0,2,36,64,26,4,0,3,131,1,60,
+ 33,8,37,0,20,13,34,172,2,131,1,60,
+ 33,8,35,0,164,247,34,172,1,0,2,36,
+ 3,131,1,60,33,8,37,0,110,13,34,164,
+ 3,131,1,60,33,8,37,0,125,39,192,8,
+ 112,13,32,164,21,0,98,20,3,0,3,36,
+ 64,18,4,0,3,131,1,60,33,8,37,0,
+ 20,13,35,172,2,131,1,60,33,8,34,0,
+ 164,247,35,172,3,131,2,60,33,16,69,0,
+ 120,13,66,140,0,0,0,0,1,0,66,36,
+ 3,131,1,60,33,8,37,0,44,39,192,12,
+ 120,13,34,172,3,0,64,16,0,0,0,0,
+ 161,36,192,12,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 224,255,189,39,16,0,176,175,33,128,128,0,
+ 20,0,177,175,3,131,17,60,208,12,49,38,
+ 24,0,191,175,0,0,36,142,3,131,5,60,
+ 212,12,165,140,3,131,6,60,216,12,198,140,
+ 3,131,7,60,220,12,231,140,20,35,192,12,
+ 0,0,0,0,33,32,0,2,22,36,192,12,
+ 1,0,80,44,92,37,192,12,0,0,0,0,
+ 206,35,192,12,0,0,0,0,33,0,0,22,
+ 0,0,0,0,0,0,36,142,3,131,5,60,
+ 212,12,165,140,3,131,6,60,216,12,198,140,
+ 3,131,7,60,220,12,231,140,20,35,192,12,
+ 0,0,0,0,22,0,64,20,0,0,0,0,
+ 3,131,2,60,238,12,66,148,3,131,3,60,
+ 240,12,99,148,3,131,4,60,242,12,132,148,
+ 3,131,1,60,232,12,34,164,3,131,1,60,
+ 234,12,35,164,3,131,1,60,161,36,192,12,
+ 236,12,36,164,3,131,1,60,52,36,192,12,
+ 8,13,32,164,1,0,2,36,3,131,1,60,
+ 0,13,34,164,3,131,1,60,2,13,32,164,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,232,255,189,39,
+ 192,17,4,0,16,0,191,175,3,131,1,60,
+ 33,8,34,0,56,13,34,140,0,0,0,0,
+ 3,0,64,16,0,0,0,0,203,36,192,12,
+ 0,0,0,0,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,3,131,1,60,
+ 248,12,32,172,3,131,1,60,8,0,224,3,
+ 8,13,32,164,232,255,189,39,192,25,4,0,
+ 1,0,2,36,16,0,191,175,3,131,1,60,
+ 33,8,35,0,203,36,192,12,52,13,34,172,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,192,33,4,0,3,131,2,60,
+ 28,13,66,36,33,24,130,0,4,0,166,140,
+ 8,0,167,140,0,0,102,172,4,0,103,172,
+ 12,0,66,36,12,0,163,140,33,16,130,0,
+ 3,131,1,60,33,8,36,0,36,13,35,172,
+ 16,0,163,140,20,0,166,140,0,0,67,172,
+ 4,0,70,172,24,0,163,148,1,0,2,36,
+ 3,131,1,60,33,8,36,0,106,13,34,164,
+ 3,131,1,60,33,8,36,0,48,13,35,164,
+ 26,0,162,148,3,131,1,60,33,8,36,0,
+ 8,0,224,3,108,13,34,164,28,0,130,148,
+ 3,131,1,60,232,12,34,164,30,0,130,148,
+ 3,131,1,60,234,12,34,164,32,0,130,148,
+ 3,131,1,60,236,12,34,164,40,0,130,140,
+ 3,131,1,60,8,0,224,3,252,12,34,172,
+ 224,255,189,39,16,0,176,175,33,128,160,0,
+ 192,25,4,0,3,131,2,60,16,13,66,36,
+ 20,0,177,175,33,136,98,0,24,0,191,175,
+ 4,0,4,142,8,0,5,142,12,0,38,142,
+ 16,0,39,142,20,35,192,12,0,0,0,0,
+ 48,0,64,4,1,0,2,36,4,0,4,142,
+ 8,0,5,142,12,0,38,142,16,0,39,142,
+ 20,35,192,12,0,0,0,0,40,0,64,20,
+ 33,16,0,0,12,0,4,142,20,0,35,142,
+ 0,0,0,0,43,16,131,0,34,0,64,20,
+ 1,0,2,36,32,0,131,20,33,16,0,0,
+ 16,0,4,142,20,0,5,142,24,0,38,142,
+ 28,0,39,142,20,35,192,12,0,0,0,0,
+ 24,0,64,4,1,0,2,36,16,0,4,142,
+ 20,0,5,142,24,0,38,142,28,0,39,142,
+ 20,35,192,12,0,0,0,0,16,0,64,20,
+ 33,16,0,0,16,0,4,142,20,0,5,142,
+ 3,131,6,60,216,12,198,140,3,131,7,60,
+ 220,12,231,140,20,35,192,12,0,0,0,0,
+ 6,0,64,20,1,0,2,36,24,0,3,150,
+ 32,0,34,150,0,0,0,0,43,16,67,0,
+ 1,0,66,56,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 44,133,130,143,216,255,189,39,20,0,177,175,
+ 33,136,128,0,32,0,180,175,33,160,160,0,
+ 36,0,191,175,28,0,179,175,24,0,178,175,
+ 53,0,64,16,16,0,176,175,2,131,19,60,
+ 192,4,115,38,33,32,96,2,54,21,192,12,
+ 1,0,5,36,33,128,64,0,8,0,0,22,
+ 64,26,17,0,2,131,2,60,33,16,67,0,
+ 176,247,66,140,33,24,99,2,1,0,66,36,
+ 143,40,192,8,240,242,98,172,8,0,4,142,
+ 64,146,17,0,20,242,101,38,33,40,69,2,
+ 172,41,192,12,33,48,128,2,33,24,64,0,
+ 60,0,98,40,2,0,64,16,0,242,98,38,
+ 60,0,3,36,33,136,66,2,33,32,32,2,
+ 33,40,0,2,1,0,2,36,17,0,2,162,
+ 0,128,98,52,0,0,2,174,6,23,192,12,
+ 18,0,3,166,10,0,64,20,33,32,0,2,
+ 2,131,2,60,33,16,82,0,172,247,66,140,
+ 0,0,0,0,1,0,66,36,152,21,192,12,
+ 236,0,34,174,143,40,192,8,0,0,0,0,
+ 2,131,2,60,33,16,82,0,168,247,66,140,
+ 0,0,0,0,1,0,66,36,232,0,34,174,
+ 36,0,191,143,32,0,180,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,44,133,130,143,
+ 216,255,189,39,20,0,177,175,33,136,128,0,
+ 32,0,180,175,33,160,160,0,36,0,191,175,
+ 28,0,179,175,24,0,178,175,53,0,64,16,
+ 16,0,176,175,2,131,19,60,192,4,115,38,
+ 33,32,96,2,54,21,192,12,1,0,5,36,
+ 33,128,64,0,8,0,0,22,64,26,17,0,
+ 2,131,2,60,33,16,67,0,176,247,66,140,
+ 33,24,99,2,1,0,66,36,214,40,192,8,
+ 240,242,98,172,8,0,4,142,64,146,17,0,
+ 20,242,101,38,33,40,69,2,74,42,192,12,
+ 33,48,128,2,33,24,64,0,60,0,98,40,
+ 2,0,64,16,0,242,98,38,60,0,3,36,
+ 33,136,66,2,33,32,32,2,33,40,0,2,
+ 1,0,2,36,17,0,2,162,0,128,98,52,
+ 0,0,2,174,6,23,192,12,18,0,3,166,
+ 10,0,64,20,33,32,0,2,2,131,2,60,
+ 33,16,82,0,172,247,66,140,0,0,0,0,
+ 1,0,66,36,152,21,192,12,236,0,34,174,
+ 214,40,192,8,0,0,0,0,2,131,2,60,
+ 33,16,82,0,168,247,66,140,0,0,0,0,
+ 1,0,66,36,232,0,34,174,36,0,191,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,216,255,189,39,24,0,178,175,
+ 33,144,128,0,20,0,177,175,33,136,160,0,
+ 16,0,176,175,33,128,192,0,36,0,191,175,
+ 32,0,180,175,28,0,179,175,4,0,36,142,
+ 8,0,37,142,160,133,134,143,2,131,7,60,
+ 180,211,231,140,20,35,192,12,0,0,0,0,
+ 11,0,64,20,0,0,0,0,2,131,4,60,
+ 144,146,132,36,15,63,192,12,33,40,0,2,
+ 100,129,132,39,108,129,134,39,31,21,192,12,
+ 6,0,5,38,92,41,192,8,0,0,0,0,
+ 3,131,20,60,208,12,148,38,0,0,132,142,
+ 3,131,5,60,212,12,165,140,3,131,6,60,
+ 216,12,198,140,3,131,7,60,220,12,231,140,
+ 0,0,0,0,20,35,192,12,192,129,18,0,
+ 3,131,3,60,33,24,112,0,20,13,99,140,
+ 0,0,0,0,80,0,96,16,1,0,83,44,
+ 33,32,64,2,11,40,192,12,33,40,32,2,
+ 50,0,64,16,33,32,64,2,223,39,192,12,
+ 33,40,32,2,92,37,192,12,0,0,0,0,
+ 206,35,192,12,0,0,0,0,0,0,132,142,
+ 3,131,5,60,212,12,165,140,3,131,6,60,
+ 216,12,198,140,3,131,7,60,220,12,231,140,
+ 20,35,192,12,0,0,0,0,16,0,64,16,
+ 0,0,0,0,14,0,96,18,0,0,0,0,
+ 3,131,2,60,248,12,66,140,3,131,1,60,
+ 9,0,64,16,0,13,32,164,3,131,1,60,
+ 72,37,192,12,4,13,32,164,1,0,2,36,
+ 3,131,1,60,8,13,34,164,3,131,1,60,
+ 10,13,32,164,3,131,2,60,228,12,66,140,
+ 0,0,0,0,38,0,66,22,0,0,0,0,
+ 254,39,192,12,33,32,32,2,52,36,192,12,
+ 0,0,0,0,36,0,34,142,0,0,0,0,
+ 30,0,64,16,0,0,0,0,206,39,192,12,
+ 0,0,0,0,92,41,192,8,0,0,0,0,
+ 3,131,4,60,33,32,144,0,40,13,132,140,
+ 3,131,5,60,33,40,176,0,44,13,165,140,
+ 3,131,6,60,216,12,198,140,3,131,7,60,
+ 220,12,231,140,20,35,192,12,0,0,0,0,
+ 12,0,64,20,0,0,0,0,3,131,3,60,
+ 33,24,112,0,48,13,99,148,3,131,2,60,
+ 33,16,80,0,16,13,66,148,0,0,0,0,
+ 3,0,98,20,0,0,0,0,203,36,192,12,
+ 33,32,64,2,36,0,191,143,32,0,180,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,40,0,189,39,
+ 224,255,189,39,20,0,177,175,33,136,128,0,
+ 16,0,176,175,192,129,17,0,24,0,191,175,
+ 3,131,2,60,33,16,80,0,20,13,66,140,
+ 0,0,0,0,28,0,64,16,0,0,0,0,
+ 3,131,4,60,33,32,144,0,40,13,132,140,
+ 3,131,5,60,33,40,176,0,44,13,165,140,
+ 3,131,6,60,216,12,198,140,3,131,7,60,
+ 220,12,231,140,20,35,192,12,0,0,0,0,
+ 14,0,64,20,0,0,0,0,3,131,3,60,
+ 33,24,112,0,48,13,99,148,3,131,2,60,
+ 33,16,80,0,16,13,66,148,0,0,0,0,
+ 5,0,98,20,0,0,0,0,161,36,192,12,
+ 0,0,0,0,211,39,192,12,33,32,32,2,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,2,18,5,0,
+ 0,0,130,160,8,0,224,3,1,0,133,160,
+ 2,22,5,0,0,0,130,160,2,20,5,0,
+ 1,0,130,160,2,18,5,0,2,0,130,160,
+ 8,0,224,3,3,0,133,160,0,0,130,144,
+ 1,0,131,144,0,18,2,0,8,0,224,3,
+ 37,16,98,0,0,0,130,144,1,0,131,144,
+ 2,0,133,144,0,22,2,0,0,28,3,0,
+ 33,16,67,0,0,42,5,0,3,0,131,144,
+ 33,16,69,0,8,0,224,3,37,16,67,0,
+ 224,255,189,39,16,0,176,175,33,128,128,0,
+ 24,0,191,175,20,0,177,175,48,129,135,39,
+ 3,0,226,136,0,0,226,152,4,0,227,128,
+ 5,0,228,128,3,0,2,170,0,0,2,186,
+ 4,0,3,162,5,0,4,162,3,0,162,136,
+ 0,0,162,152,4,0,163,128,5,0,164,128,
+ 9,0,2,170,6,0,2,186,10,0,3,162,
+ 11,0,4,162,12,0,4,38,38,0,5,36,
+ 144,41,192,12,33,136,192,0,14,0,4,38,
+ 144,41,192,12,66,66,5,36,17,0,4,38,
+ 33,40,0,0,3,0,2,36,144,41,192,12,
+ 16,0,2,162,19,0,0,162,20,0,0,162,
+ 40,0,34,142,0,0,0,0,43,32,2,0,
+ 36,0,34,142,0,0,0,0,3,0,64,16,
+ 33,24,128,0,218,41,192,8,128,0,130,52,
+ 33,16,96,0,21,0,2,162,4,0,37,150,
+ 0,0,0,0,144,41,192,12,22,0,4,38,
+ 9,0,34,138,6,0,34,154,10,0,35,130,
+ 11,0,36,130,27,0,2,170,24,0,2,186,
+ 28,0,3,162,29,0,4,162,12,0,37,142,
+ 0,0,0,0,148,41,192,12,30,0,4,38,
+ 16,0,37,150,0,0,0,0,144,41,192,12,
+ 34,0,4,38,21,0,34,138,18,0,34,154,
+ 22,0,35,130,23,0,36,130,39,0,2,170,
+ 36,0,2,186,40,0,3,162,41,0,4,162,
+ 24,0,37,150,0,0,0,0,144,41,192,12,
+ 42,0,4,38,26,0,37,150,0,0,0,0,
+ 144,41,192,12,44,0,4,38,28,0,37,150,
+ 0,0,0,0,144,41,192,12,46,0,4,38,
+ 30,0,37,150,0,0,0,0,144,41,192,12,
+ 48,0,4,38,32,0,37,150,0,0,0,0,
+ 144,41,192,12,50,0,4,38,52,0,2,36,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,224,255,189,39,
+ 16,0,176,175,33,128,160,0,24,0,191,175,
+ 20,0,177,175,21,0,2,146,33,136,128,0,
+ 1,0,66,48,40,0,34,174,22,0,2,146,
+ 22,0,4,38,128,0,66,48,156,41,192,12,
+ 36,0,34,174,4,0,34,166,27,0,2,138,
+ 24,0,2,154,28,0,3,130,29,0,4,130,
+ 9,0,34,170,6,0,34,186,10,0,35,162,
+ 11,0,36,162,161,41,192,12,30,0,4,38,
+ 34,0,4,38,156,41,192,12,12,0,34,174,
+ 16,0,34,166,39,0,2,138,36,0,2,154,
+ 40,0,3,130,41,0,4,130,21,0,34,170,
+ 18,0,34,186,22,0,35,162,23,0,36,162,
+ 156,41,192,12,42,0,4,38,44,0,4,38,
+ 156,41,192,12,24,0,34,166,46,0,4,38,
+ 156,41,192,12,26,0,34,166,48,0,4,38,
+ 156,41,192,12,28,0,34,166,50,0,4,38,
+ 156,41,192,12,30,0,34,166,32,0,34,166,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,232,255,189,39,
+ 16,0,176,175,33,128,128,0,20,0,191,175,
+ 48,129,134,39,3,0,194,136,0,0,194,152,
+ 4,0,195,128,5,0,196,128,3,0,2,170,
+ 0,0,2,186,4,0,3,162,5,0,4,162,
+ 3,0,162,136,0,0,162,152,4,0,163,128,
+ 5,0,164,128,9,0,2,170,6,0,2,186,
+ 10,0,3,162,11,0,4,162,12,0,4,38,
+ 144,41,192,12,7,0,5,36,14,0,4,38,
+ 144,41,192,12,66,66,5,36,17,0,4,38,
+ 33,40,0,0,3,0,2,36,144,41,192,12,
+ 16,0,2,162,21,0,2,36,128,0,3,36,
+ 19,0,0,162,20,0,3,162,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 176,255,189,39,68,0,177,175,64,0,176,175,
+ 33,128,160,0,72,0,191,175,14,0,3,146,
+ 66,0,2,36,9,0,98,20,33,136,128,0,
+ 15,0,2,146,0,0,0,0,6,0,67,20,
+ 64,26,17,0,16,0,3,146,3,0,2,36,
+ 11,0,98,16,0,0,0,0,64,26,17,0,
+ 2,131,2,60,33,16,67,0,184,247,66,140,
+ 0,0,0,0,1,0,66,36,2,131,1,60,
+ 33,8,35,0,182,42,192,8,184,247,34,172,
+ 20,0,3,146,0,0,0,0,5,0,96,16,
+ 128,0,2,36,21,0,98,16,64,26,17,0,
+ 179,42,192,8,0,0,0,0,16,0,164,39,
+ 64,26,17,0,2,131,2,60,33,16,67,0,
+ 180,247,66,140,0,0,0,0,1,0,66,36,
+ 2,131,1,60,33,8,35,0,180,247,34,172,
+ 17,42,192,12,33,40,0,2,33,32,32,2,
+ 16,0,165,39,222,40,192,12,33,48,0,2,
+ 182,42,192,8,0,0,0,0,2,131,2,60,
+ 33,16,67,0,180,247,66,140,0,0,0,0,
+ 1,0,66,36,2,131,1,60,33,8,35,0,
+ 180,247,34,172,100,41,192,12,33,32,32,2,
+ 182,42,192,8,0,0,0,0,112,129,132,39,
+ 15,63,192,12,0,0,0,0,72,0,191,143,
+ 68,0,177,143,64,0,176,143,8,0,224,3,
+ 80,0,189,39,8,0,224,3,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,162,48,8,0,64,16,255,255,198,48,
+ 67,40,5,0,64,16,5,0,33,16,68,0,
+ 0,0,66,144,0,0,0,0,203,42,192,8,
+ 33,48,194,0,67,40,5,0,255,255,165,36,
+ 255,255,2,36,6,0,162,16,255,255,3,36,
+ 0,0,130,148,2,0,132,36,255,255,165,36,
+ 252,255,163,20,33,48,194,0,255,255,195,48,
+ 2,20,6,0,33,48,98,0,255,255,195,48,
+ 2,20,6,0,33,48,98,0,8,0,224,3,
+ 255,255,194,48,208,255,189,39,16,0,176,175,
+ 33,128,128,0,28,0,179,175,33,152,160,0,
+ 24,0,178,175,33,144,192,0,36,0,181,175,
+ 33,168,0,2,32,0,180,175,33,160,0,0,
+ 40,0,191,175,20,0,177,175,12,0,3,142,
+ 0,0,2,142,0,0,0,0,35,24,98,0,
+ 42,16,114,0,2,0,64,16,33,136,64,2,
+ 33,136,96,0,13,0,32,18,33,40,96,2,
+ 35,144,81,2,8,0,2,142,0,0,4,142,
+ 33,48,32,2,80,68,192,12,33,32,68,0,
+ 8,0,2,142,0,0,2,142,0,0,2,142,
+ 33,152,113,2,33,16,81,0,0,0,2,174,
+ 0,0,2,142,0,0,0,0,4,0,64,18,
+ 33,160,130,2,4,0,16,142,233,42,192,8,
+ 0,0,0,0,18,0,180,166,33,16,0,2,
+ 40,0,191,143,36,0,181,143,32,0,180,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,48,0,189,39,
+ 224,255,189,39,24,0,178,175,33,144,128,0,
+ 2,131,4,60,192,4,132,36,36,0,165,175,
+ 1,0,5,36,28,0,191,175,20,0,177,175,
+ 54,21,192,12,16,0,176,175,33,136,64,0,
+ 8,0,32,22,33,40,0,0,2,131,2,60,
+ 176,5,66,140,0,0,0,0,1,0,66,36,
+ 2,131,1,60,102,43,192,8,176,5,34,172,
+ 0,1,3,36,8,0,48,142,8,0,2,36,
+ 16,0,2,166,6,0,2,36,18,0,2,162,
+ 4,0,2,36,14,0,3,166,19,0,2,162,
+ 20,0,3,166,2,131,6,60,212,4,198,36,
+ 3,0,194,136,0,0,194,152,4,0,195,132,
+ 25,0,2,170,22,0,2,186,26,0,3,166,
+ 2,131,1,60,195,211,34,136,176,133,130,155,
+ 0,0,0,0,31,0,2,170,28,0,2,186,
+ 32,0,4,38,144,71,192,12,6,0,6,36,
+ 39,0,162,139,36,0,162,155,0,0,0,0,
+ 41,0,2,170,38,0,2,186,132,129,133,39,
+ 3,0,162,136,0,0,162,152,4,0,163,128,
+ 5,0,164,128,3,0,2,170,0,0,2,186,
+ 4,0,3,162,5,0,4,162,2,131,5,60,
+ 212,4,165,36,3,0,162,136,0,0,162,152,
+ 4,0,163,128,5,0,164,128,9,0,2,170,
+ 6,0,2,186,10,0,3,162,11,0,4,162,
+ 33,32,64,2,8,6,2,36,12,0,2,166,
+ 60,128,2,52,0,0,34,174,60,0,2,36,
+ 18,0,34,166,74,21,192,12,33,40,32,2,
+ 3,0,64,20,0,0,0,0,152,21,192,12,
+ 33,32,32,2,28,0,191,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,216,255,189,39,32,0,180,175,
+ 33,160,128,0,16,0,176,175,33,128,160,0,
+ 36,0,191,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,8,0,2,142,33,152,192,0,
+ 33,136,83,0,6,0,35,150,0,1,4,36,
+ 5,0,100,16,0,2,2,36,113,0,98,16,
+ 0,0,0,0,15,44,192,8,0,0,0,0,
+ 24,0,35,150,176,133,130,151,0,0,0,0,
+ 139,0,98,20,0,0,0,0,26,0,35,150,
+ 2,131,2,60,194,211,66,148,0,0,0,0,
+ 133,0,98,20,0,0,0,0,0,0,34,150,
+ 0,0,0,0,129,0,68,20,8,0,2,36,
+ 2,0,35,150,0,0,0,0,125,0,98,20,
+ 6,4,2,36,4,0,35,150,0,0,0,0,
+ 121,0,98,20,0,0,0,0,2,131,4,60,
+ 192,4,132,36,54,21,192,12,1,0,5,36,
+ 33,144,64,0,8,0,64,22,0,0,0,0,
+ 2,131,2,60,176,5,66,140,0,0,0,0,
+ 1,0,66,36,2,131,1,60,15,44,192,8,
+ 176,5,34,172,8,0,5,142,8,0,80,142,
+ 9,0,162,136,6,0,162,152,10,0,163,128,
+ 11,0,164,128,3,0,2,170,0,0,2,186,
+ 4,0,3,162,5,0,4,162,2,131,6,60,
+ 212,4,198,36,3,0,194,136,0,0,194,152,
+ 4,0,195,128,5,0,196,128,9,0,2,170,
+ 6,0,2,186,10,0,3,162,11,0,4,162,
+ 12,0,4,38,12,0,165,36,33,128,19,2,
+ 80,68,192,12,244,255,102,38,0,1,2,36,
+ 0,0,2,166,8,0,2,36,2,0,2,166,
+ 6,0,2,36,4,0,2,162,4,0,2,36,
+ 5,0,2,162,0,2,2,36,6,0,2,166,
+ 2,131,5,60,212,4,165,36,3,0,162,136,
+ 0,0,162,152,4,0,163,132,11,0,2,170,
+ 8,0,2,186,12,0,3,166,2,131,1,60,
+ 195,211,34,136,176,133,130,155,0,0,0,0,
+ 17,0,2,170,14,0,2,186,11,0,34,138,
+ 8,0,34,154,12,0,35,134,21,0,2,170,
+ 18,0,2,186,22,0,3,166,17,0,34,138,
+ 14,0,34,154,0,0,0,0,27,0,2,170,
+ 24,0,2,186,33,32,128,2,33,40,64,2,
+ 60,128,2,52,0,0,66,174,60,0,2,36,
+ 74,21,192,12,18,0,66,166,38,0,64,20,
+ 0,0,0,0,152,21,192,12,33,32,64,2,
+ 15,44,192,8,0,0,0,0,14,0,35,150,
+ 196,133,130,151,0,0,0,0,29,0,98,20,
+ 0,0,0,0,16,0,35,150,2,131,2,60,
+ 214,211,66,148,0,0,0,0,23,0,98,20,
+ 0,0,0,0,0,0,34,150,0,0,0,0,
+ 19,0,68,20,8,0,2,36,2,0,35,150,
+ 0,0,0,0,15,0,98,20,6,4,2,36,
+ 4,0,35,150,0,0,0,0,11,0,98,20,
+ 0,0,0,0,68,133,130,143,140,129,134,39,
+ 11,0,35,138,8,0,35,154,12,0,36,134,
+ 3,0,195,168,0,0,195,184,4,0,196,164,
+ 20,0,66,36,152,129,130,175,36,0,191,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,192,255,189,39,80,0,169,143,
+ 84,0,168,143,56,0,180,175,33,160,128,0,
+ 44,0,177,175,88,0,177,143,16,0,164,39,
+ 52,0,179,175,92,0,179,143,3,131,3,60,
+ 32,17,99,36,40,0,176,175,33,128,192,0,
+ 60,0,191,175,48,0,178,175,0,0,98,140,
+ 8,0,50,142,1,0,66,36,0,0,98,172,
+ 3,0,162,136,0,0,162,152,4,0,163,128,
+ 5,0,170,128,3,0,66,170,0,0,66,186,
+ 4,0,67,162,5,0,74,162,2,131,10,60,
+ 212,4,74,37,3,0,66,137,0,0,66,153,
+ 4,0,67,129,5,0,69,129,9,0,66,170,
+ 6,0,66,186,10,0,67,162,11,0,69,162,
+ 69,0,2,36,16,0,162,163,17,0,168,163,
+ 18,0,34,150,240,132,131,143,33,48,0,0,
+ 25,0,169,163,20,0,66,36,0,66,2,0,
+ 255,255,66,48,2,18,2,0,37,64,2,1,
+ 0,74,3,0,255,255,98,48,2,18,2,0,
+ 37,72,34,1,3,131,2,60,0,17,66,140,
+ 22,0,160,167,26,0,160,167,18,0,168,167,
+ 176,133,136,143,1,0,99,36,240,132,131,175,
+ 20,0,169,167,24,0,162,163,28,0,168,175,
+ 3,0,226,136,0,0,226,152,0,0,0,0,
+ 35,0,162,171,32,0,162,187,192,42,192,12,
+ 20,0,5,36,39,16,2,0,26,0,162,167,
+ 14,0,2,36,44,0,2,22,8,0,2,36,
+ 12,0,66,166,19,0,162,139,16,0,162,155,
+ 23,0,163,139,20,0,163,155,27,0,164,139,
+ 24,0,164,155,31,0,165,139,28,0,165,155,
+ 17,0,66,170,14,0,66,186,21,0,67,170,
+ 18,0,67,186,25,0,68,170,22,0,68,186,
+ 29,0,69,170,26,0,69,186,35,0,162,139,
+ 32,0,162,155,0,0,0,0,33,0,66,170,
+ 30,0,66,186,34,0,2,36,0,0,34,174,
+ 0,0,35,142,18,0,34,150,0,0,0,0,
+ 33,16,67,0,18,0,34,166,18,0,34,150,
+ 0,0,0,0,60,0,66,44,68,0,64,16,
+ 33,32,128,2,0,0,98,142,18,0,35,150,
+ 60,0,66,36,35,16,67,0,0,0,98,174,
+ 60,0,2,36,18,0,34,166,201,44,192,8,
+ 33,32,128,2,164,129,133,39,3,0,162,136,
+ 0,0,162,152,4,0,163,128,5,0,164,128,
+ 17,0,66,170,14,0,66,186,18,0,67,162,
+ 19,0,68,162,8,0,2,36,20,0,66,166,
+ 19,0,162,139,16,0,162,155,23,0,163,139,
+ 20,0,163,155,27,0,164,139,24,0,164,155,
+ 31,0,165,139,28,0,165,155,25,0,66,170,
+ 22,0,66,186,29,0,67,170,26,0,67,186,
+ 33,0,68,170,30,0,68,186,37,0,69,170,
+ 34,0,69,186,35,0,162,139,32,0,162,155,
+ 0,0,0,0,41,0,66,170,38,0,66,186,
+ 42,0,2,36,0,0,34,174,0,0,35,142,
+ 18,0,34,150,0,0,0,0,33,16,67,0,
+ 18,0,34,166,18,0,34,150,0,0,0,0,
+ 60,0,66,44,8,0,64,16,0,0,0,0,
+ 0,0,98,142,18,0,35,150,60,0,66,36,
+ 35,16,67,0,0,0,98,174,60,0,2,36,
+ 18,0,34,166,18,0,34,150,0,0,0,0,
+ 0,26,2,0,2,18,2,0,37,24,98,0,
+ 12,0,67,166,33,32,128,2,74,21,192,12,
+ 33,40,32,2,8,0,64,20,33,32,32,2,
+ 3,131,3,60,36,17,99,36,0,0,98,140,
+ 0,0,0,0,1,0,66,36,152,21,192,12,
+ 0,0,98,172,60,0,191,143,56,0,180,143,
+ 52,0,179,143,48,0,178,143,44,0,177,143,
+ 40,0,176,143,8,0,224,3,64,0,189,39,
+ 176,255,189,39,56,0,180,175,112,0,180,143,
+ 48,0,178,175,100,0,178,143,52,0,179,175,
+ 104,0,179,143,64,0,182,175,33,176,128,0,
+ 72,0,190,175,33,240,160,0,60,0,181,175,
+ 33,168,224,0,68,0,183,175,108,0,183,143,
+ 2,131,4,60,192,4,132,36,76,0,191,175,
+ 44,0,177,175,40,0,176,175,32,0,166,175,
+ 7,2,130,38,2,130,2,0,54,21,192,12,
+ 33,40,0,2,33,136,64,0,8,0,32,22,
+ 0,74,18,0,2,131,4,60,232,146,132,36,
+ 33,40,128,2,15,63,192,12,33,48,0,2,
+ 74,45,192,8,0,0,0,0,255,255,66,50,
+ 2,18,2,0,37,72,34,1,0,66,19,0,
+ 255,255,98,50,2,18,2,0,37,64,2,1,
+ 8,0,130,38,0,58,2,0,255,255,66,48,
+ 2,18,2,0,37,56,226,0,0,163,4,60,
+ 220,5,132,52,4,0,5,36,4,0,34,142,
+ 0,17,6,36,8,0,80,140,4,0,35,142,
+ 8,0,2,36,0,0,98,172,0,0,9,166,
+ 2,0,8,166,6,0,0,166,192,42,192,12,
+ 4,0,7,166,33,32,160,2,4,0,5,36,
+ 192,42,192,12,255,255,70,48,4,0,4,38,
+ 2,0,5,36,192,42,192,12,255,255,70,48,
+ 33,32,0,2,8,0,5,36,192,42,192,12,
+ 255,255,70,48,33,32,224,2,33,40,128,2,
+ 192,42,192,12,255,255,70,48,39,24,2,0,
+ 255,255,98,48,2,0,64,20,33,40,224,2,
+ 255,255,3,52,6,0,3,166,4,0,36,142,
+ 0,0,0,0,220,42,192,12,33,48,128,2,
+ 33,32,192,2,0,0,67,140,33,40,192,3,
+ 0,128,99,52,0,0,67,172,4,0,35,142,
+ 32,0,166,143,18,0,99,148,33,56,160,2,
+ 18,0,35,166,96,0,170,143,17,0,3,36,
+ 16,0,163,175,24,0,177,175,28,0,162,175,
+ 23,44,192,12,20,0,170,175,3,131,3,60,
+ 124,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,76,0,191,143,
+ 72,0,190,143,68,0,183,143,64,0,182,143,
+ 60,0,181,143,56,0,180,143,52,0,179,143,
+ 48,0,178,143,44,0,177,143,40,0,176,143,
+ 8,0,224,3,80,0,189,39,128,255,189,39,
+ 116,0,183,175,33,184,128,0,112,0,182,175,
+ 33,176,160,0,104,0,180,175,33,160,192,0,
+ 108,0,181,175,33,168,224,0,40,0,164,39,
+ 96,0,178,175,144,0,178,143,33,40,0,0,
+ 100,0,179,175,148,0,179,143,16,0,6,36,
+ 120,0,191,175,92,0,177,175,144,71,192,12,
+ 88,0,176,175,56,0,177,39,33,32,32,2,
+ 33,40,0,0,2,0,16,36,40,0,176,167,
+ 2,0,162,150,0,0,0,0,42,0,162,167,
+ 19,0,130,138,16,0,130,154,0,0,0,0,
+ 47,0,162,171,44,0,162,187,144,71,192,12,
+ 16,0,6,36,33,32,64,2,33,40,96,2,
+ 40,0,166,39,33,56,32,2,56,0,176,167,
+ 0,0,162,150,2,131,16,60,8,239,16,38,
+ 58,0,162,167,15,0,130,138,12,0,130,154,
+ 0,0,0,0,63,0,162,171,60,0,162,187,
+ 242,5,2,36,84,0,162,167,72,0,162,39,
+ 72,0,160,167,76,0,176,175,80,0,176,175,
+ 247,71,192,12,16,0,162,175,255,255,3,36,
+ 22,0,67,16,12,0,145,38,33,32,224,2,
+ 6,0,197,38,35,48,150,2,0,0,163,150,
+ 4,0,2,36,16,0,162,175,161,0,2,36,
+ 20,0,162,175,28,0,176,175,0,18,3,0,
+ 2,26,3,0,37,16,67,0,255,255,66,48,
+ 24,0,162,175,80,0,162,143,76,0,163,143,
+ 33,56,32,2,35,16,67,0,255,255,66,48,
+ 220,44,192,12,32,0,162,175,120,0,191,143,
+ 116,0,183,143,112,0,182,143,108,0,181,143,
+ 104,0,180,143,100,0,179,143,96,0,178,143,
+ 92,0,177,143,88,0,176,143,8,0,224,3,
+ 128,0,189,39,196,133,130,143,184,255,189,39,
+ 64,0,191,175,60,0,177,175,109,0,64,16,
+ 56,0,176,175,255,255,3,36,106,0,67,16,
+ 0,0,0,0,176,133,130,143,176,133,145,39,
+ 102,0,64,16,0,0,0,0,100,0,67,16,
+ 0,0,0,0,2,131,2,60,8,239,66,36,
+ 44,0,162,175,48,0,162,175,242,5,2,36,
+ 40,0,160,167,6,0,128,16,52,0,162,167,
+ 1,0,2,36,23,0,130,16,0,0,0,0,
+ 36,46,192,8,0,0,0,0,2,131,16,60,
+ 160,204,16,38,156,71,192,12,33,32,0,2,
+ 0,163,4,60,4,1,132,140,204,204,3,60,
+ 205,204,99,52,25,0,131,0,33,40,32,2,
+ 33,48,0,2,33,56,64,0,40,0,164,39,
+ 16,64,0,0,194,16,8,0,0,0,0,0,
+ 104,56,192,12,16,0,162,175,251,45,192,8,
+ 33,24,64,0,3,131,2,60,28,18,66,148,
+ 0,0,0,0,2,0,66,48,61,0,64,16,
+ 0,0,0,0,2,131,16,60,160,204,16,38,
+ 156,71,192,12,33,32,0,2,0,163,4,60,
+ 4,1,132,140,204,204,3,60,205,204,99,52,
+ 25,0,131,0,33,40,32,2,33,48,0,2,
+ 33,56,64,0,40,0,164,39,16,64,0,0,
+ 194,16,8,0,0,0,0,0,105,57,192,12,
+ 16,0,162,175,33,24,64,0,255,255,2,36,
+ 39,0,98,16,0,0,0,0,140,129,130,147,
+ 0,0,0,0,1,0,66,48,7,0,64,20,
+ 0,0,0,0,68,133,131,143,152,129,130,143,
+ 0,0,0,0,43,16,67,0,11,0,64,16,
+ 33,32,0,0,68,133,131,143,148,129,130,143,
+ 0,0,0,0,6,0,98,16,33,32,0,0,
+ 196,133,133,143,148,129,131,175,17,43,192,12,
+ 33,32,0,0,33,32,0,0,140,129,133,39,
+ 14,0,6,36,4,0,2,36,16,0,162,175,
+ 162,0,2,36,20,0,162,175,24,0,162,175,
+ 2,131,2,60,8,239,66,36,28,0,162,175,
+ 48,0,162,143,44,0,163,143,196,133,135,39,
+ 35,16,67,0,255,255,66,48,220,44,192,12,
+ 32,0,162,175,64,0,191,143,60,0,177,143,
+ 56,0,176,143,8,0,224,3,72,0,189,39,
+ 208,255,189,39,36,0,179,175,33,152,128,0,
+ 40,0,180,175,33,160,160,0,32,0,178,175,
+ 24,0,176,175,33,128,224,0,44,0,191,175,
+ 28,0,177,175,6,0,2,150,64,0,177,143,
+ 0,0,0,0,20,0,64,16,33,144,192,0,
+ 12,0,68,38,8,0,5,36,192,42,192,12,
+ 0,17,6,36,4,0,4,38,2,0,5,36,
+ 192,42,192,12,255,255,70,48,33,32,0,2,
+ 33,40,32,2,192,42,192,12,255,255,70,48,
+ 255,255,66,48,255,255,3,52,4,0,67,16,
+ 0,0,0,0,3,131,3,60,111,46,192,8,
+ 120,17,99,36,4,0,2,150,0,0,0,0,
+ 0,26,2,0,2,18,2,0,37,24,98,0,
+ 255,255,99,48,4,0,113,16,8,0,7,38,
+ 3,131,3,60,111,46,192,8,120,17,99,36,
+ 2,0,2,150,0,0,0,0,0,26,2,0,
+ 2,18,2,0,37,24,98,0,255,255,99,48,
+ 161,0,2,36,15,0,98,20,248,255,40,38,
+ 33,32,96,2,33,40,128,2,3,131,3,60,
+ 112,17,99,36,0,0,98,140,33,48,64,2,
+ 16,0,167,175,33,56,0,2,20,0,168,175,
+ 1,0,66,36,86,45,192,12,0,0,98,172,
+ 115,46,192,8,0,0,0,0,3,131,3,60,
+ 116,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,44,0,191,143,
+ 40,0,180,143,36,0,179,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 48,0,189,39,192,255,189,39,52,0,181,175,
+ 33,168,128,0,44,0,179,175,33,152,160,0,
+ 48,0,180,175,33,160,192,0,32,0,176,175,
+ 33,128,224,0,33,32,0,2,33,48,0,0,
+ 40,0,178,175,80,0,178,143,3,131,3,60,
+ 144,16,99,36,56,0,191,175,36,0,177,175,
+ 0,0,98,140,33,40,64,2,1,0,66,36,
+ 192,42,192,12,0,0,98,172,255,255,66,48,
+ 255,255,3,52,8,0,67,16,8,0,2,36,
+ 3,131,2,60,148,16,66,140,0,0,0,0,
+ 1,0,66,36,3,131,1,60,217,46,192,8,
+ 148,16,34,172,0,0,3,150,0,0,0,0,
+ 58,0,98,20,255,1,69,38,2,131,4,60,
+ 192,4,132,36,3,131,2,60,172,16,66,140,
+ 3,131,3,60,196,16,99,140,1,0,66,36,
+ 1,0,99,36,3,131,1,60,172,16,34,172,
+ 3,131,1,60,196,16,35,172,54,21,192,12,
+ 2,42,5,0,33,136,64,0,8,0,32,22,
+ 33,32,0,2,3,131,2,60,200,16,66,140,
+ 0,0,0,0,1,0,66,36,3,131,1,60,
+ 217,46,192,8,200,16,34,172,33,40,64,2,
+ 33,48,0,0,0,0,0,162,192,42,192,12,
+ 2,0,0,166,33,40,0,2,39,16,2,0,
+ 2,0,162,164,4,0,36,142,0,0,0,0,
+ 220,42,192,12,33,48,64,2,33,32,160,2,
+ 6,0,101,38,35,48,147,2,0,0,67,140,
+ 12,0,135,38,0,128,99,52,0,0,67,172,
+ 1,0,3,36,18,0,50,166,16,0,163,175,
+ 4,0,3,36,20,0,163,175,24,0,177,175,
+ 23,44,192,12,28,0,162,175,3,131,2,60,
+ 228,16,66,140,0,0,0,0,1,0,66,36,
+ 3,131,1,60,228,16,34,172,56,0,191,143,
+ 52,0,181,143,48,0,180,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,64,0,189,39,200,255,189,39,
+ 44,0,181,175,33,168,128,0,3,131,3,60,
+ 4,17,99,36,48,0,191,175,40,0,180,175,
+ 36,0,179,175,32,0,178,175,28,0,177,175,
+ 24,0,176,175,0,0,98,140,33,136,160,0,
+ 1,0,66,36,0,0,98,172,18,0,34,150,
+ 0,0,0,0,255,255,84,48,243,5,130,46,
+ 8,0,64,20,33,152,192,0,3,131,2,60,
+ 8,17,66,140,0,0,0,0,1,0,66,36,
+ 3,131,1,60,132,47,192,8,8,17,34,172,
+ 2,131,18,60,18,233,82,38,33,32,64,2,
+ 0,0,48,142,8,0,37,142,255,63,16,50,
+ 80,68,192,12,33,48,0,2,0,0,34,142,
+ 0,0,0,0,0,128,66,48,5,0,64,20,
+ 33,144,80,2,4,0,49,142,0,0,0,0,
+ 1,47,192,8,33,32,64,2,2,131,18,60,
+ 18,233,82,38,33,128,114,2,16,0,17,38,
+ 33,32,32,2,176,133,133,39,168,71,192,12,
+ 4,0,6,36,9,0,64,16,33,32,32,2,
+ 128,129,133,39,168,71,192,12,4,0,6,36,
+ 4,0,64,16,0,0,0,0,3,131,3,60,
+ 128,47,192,8,12,17,99,36,0,0,4,146,
+ 64,0,2,36,240,0,131,48,4,0,98,16,
+ 15,0,130,48,3,131,3,60,128,47,192,8,
+ 8,17,99,36,128,136,2,0,20,0,34,42,
+ 4,0,64,16,33,32,0,2,3,131,3,60,
+ 128,47,192,8,8,17,99,36,33,40,32,2,
+ 192,42,192,12,33,48,0,0,255,255,66,48,
+ 255,255,3,52,4,0,67,16,0,0,0,0,
+ 3,131,3,60,128,47,192,8,8,17,99,36,
+ 6,0,2,150,0,0,0,0,63,255,66,48,
+ 18,0,64,16,33,56,17,2,3,131,3,60,
+ 48,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,3,131,2,60,
+ 56,17,66,140,3,131,3,60,24,17,99,140,
+ 1,0,66,36,1,0,99,36,3,131,1,60,
+ 56,17,34,172,3,131,1,60,132,47,192,8,
+ 24,17,35,172,2,0,2,150,0,0,0,0,
+ 0,26,2,0,2,18,2,0,37,24,98,0,
+ 255,255,99,48,35,64,113,0,35,16,242,0,
+ 35,16,130,2,42,16,72,0,4,0,64,16,
+ 1,0,2,36,3,131,3,60,128,47,192,8,
+ 24,17,99,36,9,0,3,146,0,0,0,0,
+ 5,0,98,16,17,0,2,36,15,0,98,16,
+ 33,32,160,2,126,47,192,8,0,0,0,0,
+ 33,32,160,2,33,40,64,2,3,131,3,60,
+ 28,17,99,36,0,0,98,140,33,48,0,2,
+ 16,0,168,175,1,0,66,36,123,46,192,12,
+ 0,0,98,172,132,47,192,8,0,0,0,0,
+ 33,40,64,2,3,131,3,60,28,17,99,36,
+ 0,0,98,140,33,48,0,2,16,0,168,175,
+ 1,0,66,36,41,46,192,12,0,0,98,172,
+ 132,47,192,8,0,0,0,0,3,131,3,60,
+ 20,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,48,0,191,143,
+ 44,0,181,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,56,0,189,39,232,255,189,39,
+ 255,0,12,60,255,0,140,53,0,255,13,60,
+ 0,255,173,53,16,0,176,175,3,131,16,60,
+ 0,17,16,38,33,32,0,2,33,40,0,0,
+ 0,163,9,60,220,5,41,141,0,163,10,60,
+ 16,6,74,141,0,163,11,60,224,5,107,141,
+ 20,0,191,175,0,28,9,0,2,20,9,0,
+ 37,24,98,0,0,60,10,0,2,20,10,0,
+ 37,56,226,0,0,68,11,0,2,20,11,0,
+ 37,64,2,1,2,18,3,0,36,16,76,0,
+ 0,26,3,0,36,24,109,0,37,16,67,0,
+ 184,133,130,175,2,18,7,0,36,16,76,0,
+ 0,58,7,0,36,56,237,0,37,16,71,0,
+ 192,133,130,175,2,18,8,0,36,16,76,0,
+ 0,66,8,0,36,64,13,1,37,16,72,0,
+ 176,133,137,175,196,133,138,175,188,133,139,175,
+ 180,133,130,175,144,71,192,12,76,0,6,36,
+ 3,131,4,60,144,16,132,36,33,40,0,0,
+ 32,0,2,36,0,0,2,174,10,0,2,36,
+ 3,131,1,60,44,17,34,172,144,71,192,12,
+ 104,0,6,36,3,131,4,60,112,17,132,36,
+ 33,40,0,0,144,71,192,12,16,0,6,36,
+ 3,131,4,60,80,17,132,36,33,40,0,0,
+ 144,71,192,12,32,0,6,36,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 176,255,189,39,100,0,162,143,96,0,169,143,
+ 72,0,182,175,33,176,128,0,48,0,176,175,
+ 104,0,176,143,34,0,164,39,60,0,179,175,
+ 108,0,179,143,3,131,3,60,104,17,99,36,
+ 56,0,178,175,2,131,18,60,212,4,82,38,
+ 52,0,177,175,33,136,192,0,68,0,181,175,
+ 112,0,181,143,4,0,6,36,76,0,191,175,
+ 64,0,180,175,0,66,2,0,255,255,66,48,
+ 2,18,2,0,37,64,2,1,0,0,98,140,
+ 8,0,116,142,1,0,66,36,0,0,98,172,
+ 3,0,162,136,0,0,162,152,4,0,163,128,
+ 5,0,170,128,3,0,130,170,0,0,130,186,
+ 4,0,131,162,5,0,138,162,3,0,66,138,
+ 0,0,66,154,4,0,67,130,5,0,69,130,
+ 9,0,130,170,6,0,130,186,10,0,131,162,
+ 11,0,133,162,255,255,2,52,16,0,162,167,
+ 18,0,98,150,33,40,0,0,20,0,160,163,
+ 21,0,160,163,30,0,66,36,0,26,2,0,
+ 255,255,66,48,2,18,2,0,37,24,98,0,
+ 18,0,163,167,3,0,226,136,0,0,226,152,
+ 0,0,0,0,25,0,162,171,22,0,162,187,
+ 3,0,34,137,0,0,34,153,4,0,35,129,
+ 5,0,39,129,29,0,162,171,26,0,162,187,
+ 30,0,163,163,31,0,167,163,144,71,192,12,
+ 32,0,168,167,3,0,66,138,0,0,66,154,
+ 4,0,67,134,41,0,162,171,38,0,162,187,
+ 42,0,163,167,33,16,0,2,0,130,16,0,
+ 255,255,66,48,2,18,2,0,37,128,2,2,
+ 14,0,2,36,58,0,34,22,44,0,176,167,
+ 18,0,162,151,0,0,0,0,12,0,130,166,
+ 19,0,162,139,16,0,162,155,23,0,163,139,
+ 20,0,163,155,27,0,164,139,24,0,164,155,
+ 31,0,165,139,28,0,165,155,17,0,130,170,
+ 14,0,130,186,21,0,131,170,18,0,131,186,
+ 25,0,132,170,22,0,132,186,29,0,133,170,
+ 26,0,133,186,35,0,162,139,32,0,162,155,
+ 39,0,163,139,36,0,163,155,43,0,164,139,
+ 40,0,164,155,44,0,165,131,33,0,130,170,
+ 30,0,130,186,37,0,131,170,34,0,131,186,
+ 41,0,132,170,38,0,132,186,42,0,133,162,
+ 45,0,162,131,0,0,0,0,43,0,130,162,
+ 44,0,2,36,0,0,98,174,0,0,99,142,
+ 18,0,98,150,0,0,0,0,33,16,67,0,
+ 18,0,98,166,18,0,98,150,0,0,0,0,
+ 60,0,66,44,80,0,64,16,33,32,192,2,
+ 0,0,162,142,18,0,99,150,60,0,66,36,
+ 35,16,67,0,0,0,162,174,60,0,2,36,
+ 18,0,98,166,172,48,192,8,33,32,192,2,
+ 208,129,133,39,3,0,162,136,0,0,162,152,
+ 4,0,163,128,5,0,164,128,17,0,130,170,
+ 14,0,130,186,18,0,131,162,19,0,132,162,
+ 129,55,2,36,20,0,130,166,19,0,162,139,
+ 16,0,162,155,23,0,163,139,20,0,163,155,
+ 27,0,164,139,24,0,164,155,31,0,165,139,
+ 28,0,165,155,25,0,130,170,22,0,130,186,
+ 29,0,131,170,26,0,131,186,33,0,132,170,
+ 30,0,132,186,37,0,133,170,34,0,133,186,
+ 35,0,162,139,32,0,162,155,39,0,163,139,
+ 36,0,163,155,43,0,164,139,40,0,164,155,
+ 44,0,165,131,41,0,130,170,38,0,130,186,
+ 45,0,131,170,42,0,131,186,49,0,132,170,
+ 46,0,132,186,50,0,133,162,45,0,162,131,
+ 0,0,0,0,51,0,130,162,52,0,2,36,
+ 0,0,98,174,0,0,99,142,18,0,98,150,
+ 0,0,0,0,33,16,67,0,18,0,98,166,
+ 18,0,98,150,0,0,0,0,60,0,66,44,
+ 8,0,64,16,0,0,0,0,0,0,162,142,
+ 18,0,99,150,60,0,66,36,35,16,67,0,
+ 0,0,162,174,60,0,2,36,18,0,98,166,
+ 18,0,98,150,0,0,0,0,0,26,2,0,
+ 2,18,2,0,37,24,98,0,12,0,131,166,
+ 33,32,192,2,74,21,192,12,33,40,96,2,
+ 8,0,64,20,33,32,96,2,3,131,3,60,
+ 108,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,152,21,192,12,0,0,98,172,
+ 76,0,191,143,72,0,182,143,68,0,181,143,
+ 64,0,180,143,60,0,179,143,56,0,178,143,
+ 52,0,177,143,48,0,176,143,8,0,224,3,
+ 80,0,189,39,33,24,0,0,5,0,7,36,
+ 58,0,6,36,0,0,162,144,0,0,0,0,
+ 2,17,2,0,2,131,1,60,33,8,34,0,
+ 176,155,34,144,0,0,0,0,0,0,130,160,
+ 0,0,162,144,1,0,132,36,15,0,66,48,
+ 2,131,1,60,33,8,34,0,176,155,34,144,
+ 1,0,165,36,0,0,130,160,3,0,103,16,
+ 1,0,132,36,0,0,134,160,1,0,132,36,
+ 1,0,99,36,6,0,98,40,233,255,64,20,
+ 0,0,0,0,8,0,224,3,0,0,0,0,
+ 128,255,189,39,2,101,2,36,0,2,3,36,
+ 112,0,176,175,44,0,176,39,33,32,0,2,
+ 33,40,0,0,48,0,6,36,120,0,191,175,
+ 116,0,177,175,40,0,162,167,144,71,192,12,
+ 42,0,163,167,3,131,17,60,96,18,49,38,
+ 2,131,5,60,224,147,165,36,188,71,192,12,
+ 33,32,32,2,18,0,64,20,33,32,0,2,
+ 2,131,5,60,236,147,165,36,0,0,162,140,
+ 4,0,163,140,8,0,164,140,44,0,162,175,
+ 48,0,163,175,52,0,164,175,12,0,162,128,
+ 0,0,0,0,56,0,162,163,2,131,5,60,
+ 212,4,165,36,193,48,192,12,56,0,164,39,
+ 8,49,192,8,92,0,177,39,33,40,32,2,
+ 204,63,192,12,48,0,6,36,92,0,177,39,
+ 33,32,32,2,33,40,0,0,144,71,192,12,
+ 4,0,6,36,2,131,4,60,212,4,132,36,
+ 0,0,130,140,4,0,131,132,96,0,162,175,
+ 100,0,163,167,4,82,2,36,0,1,3,36,
+ 236,255,132,36,2,0,5,36,102,0,162,167,
+ 54,21,192,12,104,0,163,167,33,128,64,0,
+ 22,0,0,18,40,0,165,39,4,0,4,142,
+ 0,0,0,0,220,42,192,12,66,0,6,36,
+ 33,32,0,0,0,0,67,140,132,129,133,39,
+ 0,128,99,52,0,0,67,172,4,0,3,142,
+ 14,0,6,36,18,0,99,148,33,56,32,2,
+ 18,0,3,166,82,4,3,36,16,0,165,175,
+ 20,0,163,175,24,0,163,175,28,0,176,175,
+ 214,47,192,12,32,0,162,175,120,0,191,143,
+ 116,0,177,143,112,0,176,143,8,0,224,3,
+ 128,0,189,39,144,255,189,39,104,0,180,175,
+ 33,160,128,0,100,0,179,175,33,152,160,0,
+ 92,0,177,175,33,136,192,0,33,32,224,0,
+ 40,0,166,39,56,0,167,39,96,0,178,175,
+ 2,131,18,60,8,239,82,38,88,0,176,175,
+ 128,0,176,143,242,5,2,36,84,0,162,167,
+ 72,0,162,39,108,0,191,175,72,0,160,167,
+ 76,0,178,175,80,0,178,175,16,0,162,175,
+ 247,71,192,12,33,40,0,2,255,255,3,36,
+ 37,0,67,16,255,1,5,38,2,131,4,60,
+ 192,4,132,36,54,21,192,12,2,42,5,0,
+ 33,128,64,0,30,0,0,18,33,40,64,2,
+ 80,0,166,143,76,0,162,143,4,0,4,142,
+ 35,48,194,0,220,42,192,12,255,255,198,48,
+ 33,32,128,2,0,0,67,140,6,0,101,38,
+ 0,128,99,52,0,0,67,172,4,0,3,142,
+ 35,48,51,2,18,0,99,148,18,0,39,38,
+ 18,0,3,166,28,0,40,150,22,0,35,38,
+ 16,0,163,175,15,144,3,52,24,0,163,175,
+ 28,0,176,175,32,0,162,175,0,18,8,0,
+ 2,66,8,0,37,16,72,0,255,255,66,48,
+ 214,47,192,12,20,0,162,175,108,0,191,143,
+ 104,0,180,143,100,0,179,143,96,0,178,143,
+ 92,0,177,143,88,0,176,143,8,0,224,3,
+ 112,0,189,39,200,255,189,39,44,0,181,175,
+ 33,168,128,0,28,0,177,175,33,136,160,0,
+ 48,0,191,175,40,0,180,175,36,0,179,175,
+ 32,0,178,175,24,0,176,175,18,0,34,150,
+ 0,0,0,0,255,255,84,48,243,5,130,46,
+ 4,0,64,20,33,152,192,0,3,131,3,60,
+ 241,49,192,8,84,17,99,36,2,131,18,60,
+ 16,233,82,38,33,32,64,2,0,0,48,142,
+ 8,0,37,142,255,63,16,50,80,68,192,12,
+ 33,48,0,2,0,0,34,142,0,0,0,0,
+ 0,128,66,48,5,0,64,20,33,144,80,2,
+ 4,0,49,142,0,0,0,0,148,49,192,8,
+ 33,32,64,2,2,131,2,60,16,233,66,36,
+ 33,128,98,2,6,0,17,38,33,32,32,2,
+ 0,163,5,60,224,5,165,52,168,71,192,12,
+ 4,0,6,36,9,0,64,16,33,32,32,2,
+ 224,129,133,39,168,71,192,12,4,0,6,36,
+ 5,0,64,16,10,0,17,38,3,131,3,60,
+ 241,49,192,8,88,17,99,36,10,0,17,38,
+ 33,32,32,2,2,131,5,60,212,4,165,36,
+ 168,71,192,12,6,0,6,36,9,0,64,16,
+ 33,32,32,2,228,129,133,39,168,71,192,12,
+ 6,0,6,36,4,0,64,16,0,0,0,0,
+ 3,131,3,60,241,49,192,8,88,17,99,36,
+ 0,0,3,150,255,255,2,52,4,0,98,16,
+ 30,0,7,38,3,131,3,60,241,49,192,8,
+ 88,17,99,36,2,0,2,150,2,131,5,60,
+ 16,233,165,36,0,26,2,0,2,18,2,0,
+ 37,24,98,0,255,255,99,48,226,255,104,36,
+ 35,16,229,0,35,16,130,2,42,16,72,0,
+ 4,0,64,16,0,0,0,0,3,131,3,60,
+ 241,49,192,8,96,17,99,36,16,0,2,150,
+ 0,0,0,0,0,26,2,0,2,18,2,0,
+ 37,24,98,0,255,255,99,48,15,144,2,52,
+ 11,0,98,20,33,32,160,2,3,131,3,60,
+ 100,17,99,36,0,0,98,140,33,48,0,2,
+ 16,0,168,175,1,0,66,36,54,49,192,12,
+ 0,0,98,172,245,49,192,8,0,0,0,0,
+ 3,131,3,60,92,17,99,36,0,0,98,140,
+ 0,0,0,0,1,0,66,36,0,0,98,172,
+ 48,0,191,143,44,0,181,143,40,0,180,143,
+ 36,0,179,143,32,0,178,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,56,0,189,39,
+ 0,0,0,0,0,0,0,0,232,255,189,39,
+ 16,0,191,175,13,8,192,12,0,8,4,36,
+ 8,133,130,175,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,232,255,189,39,
+ 45,0,128,16,16,0,191,175,240,129,133,143,
+ 7,0,130,36,194,16,2,0,10,0,160,20,
+ 1,0,70,36,8,133,133,143,0,133,130,39,
+ 0,133,133,175,0,0,162,172,0,8,2,36,
+ 240,129,133,175,2,131,1,60,20,211,32,172,
+ 4,0,162,172,0,0,164,140,0,0,0,0,
+ 4,0,131,140,0,0,0,0,43,16,102,0,
+ 14,0,64,20,0,0,0,0,5,0,102,20,
+ 35,16,102,0,0,0,130,140,0,0,0,0,
+ 43,50,192,8,0,0,162,172,4,0,130,172,
+ 192,16,2,0,33,32,130,0,4,0,134,172,
+ 240,129,133,175,57,50,192,8,8,0,130,36,
+ 240,129,130,143,0,0,0,0,4,0,130,16,
+ 33,40,128,0,0,0,132,140,28,50,192,8,
+ 0,0,0,0,2,131,4,60,15,63,192,12,
+ 64,148,132,36,33,16,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 56,0,128,16,248,255,132,36,240,129,133,143,
+ 0,0,0,0,78,50,192,8,43,16,164,0,
+ 0,0,163,140,0,0,0,0,43,16,163,0,
+ 5,0,64,20,43,16,164,0,12,0,64,20,
+ 43,16,131,0,10,0,64,20,0,0,0,0,
+ 33,40,96,0,43,16,164,0,244,255,64,16,
+ 0,0,0,0,0,0,162,140,0,0,0,0,
+ 43,16,130,0,239,255,64,16,0,0,0,0,
+ 4,0,134,140,0,0,163,140,192,16,6,0,
+ 33,16,130,0,11,0,67,20,0,0,0,0,
+ 4,0,98,140,0,0,0,0,33,16,194,0,
+ 4,0,130,172,0,0,162,140,0,0,0,0,
+ 0,0,66,140,0,0,0,0,102,50,192,8,
+ 0,0,130,172,0,0,131,172,4,0,163,140,
+ 0,0,0,0,192,16,3,0,33,16,162,0,
+ 9,0,68,20,0,0,0,0,4,0,130,140,
+ 0,0,0,0,33,16,98,0,4,0,162,172,
+ 0,0,130,140,0,0,0,0,117,50,192,8,
+ 0,0,162,172,0,0,164,172,240,129,133,175,
+ 8,0,224,3,0,0,0,0,232,255,189,39,
+ 16,0,191,175,0,50,192,12,0,0,0,0,
+ 178,45,192,12,33,32,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 1,0,3,36,5,0,195,20,255,255,2,36,
+ 0,0,226,140,0,0,0,0,43,16,2,0,
+ 35,16,2,0,8,0,224,3,0,0,0,0,
+ 224,255,189,39,16,0,176,175,33,128,224,0,
+ 20,0,177,175,48,0,177,143,1,0,2,36,
+ 5,0,162,20,24,0,191,175,0,0,194,140,
+ 0,0,0,0,8,0,64,16,0,0,0,0,
+ 11,0,2,36,33,32,0,2,33,40,32,2,
+ 48,72,192,12,96,0,2,174,1,0,66,36,
+ 100,0,2,174,17,0,34,146,0,0,0,0,
+ 1,0,66,52,17,0,34,162,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,8,0,224,3,0,0,0,0,
+ 16,0,163,143,0,0,0,0,17,0,98,144,
+ 0,0,0,0,2,0,66,52,8,0,224,3,
+ 17,0,98,160,8,0,224,3,0,0,0,0,
+ 224,255,189,39,16,0,176,175,33,128,128,0,
+ 244,129,131,151,255,0,2,36,28,0,191,175,
+ 24,0,178,175,20,0,177,175,4,0,2,174,
+ 60,0,0,174,1,0,98,36,244,129,130,167,
+ 10,0,3,166,3,0,162,136,0,0,162,152,
+ 7,0,163,136,4,0,163,152,11,0,164,136,
+ 8,0,164,152,15,0,167,136,12,0,167,152,
+ 15,0,2,170,12,0,2,186,19,0,3,170,
+ 16,0,3,186,23,0,4,170,20,0,4,186,
+ 27,0,7,170,24,0,7,186,3,0,194,136,
+ 0,0,194,152,7,0,195,136,4,0,195,152,
+ 11,0,196,136,8,0,196,152,15,0,197,136,
+ 12,0,197,152,31,0,2,170,28,0,2,186,
+ 35,0,3,170,32,0,3,186,39,0,4,170,
+ 36,0,4,186,43,0,5,170,40,0,5,186,
+ 80,0,2,142,76,0,3,142,0,0,0,0,
+ 35,16,67,0,255,255,81,48,88,0,3,150,
+ 3,0,2,36,13,0,98,16,0,0,0,0,
+ 2,131,18,60,160,204,82,38,156,71,192,12,
+ 33,32,64,2,7,0,81,20,33,32,64,2,
+ 76,0,5,142,0,0,0,0,168,71,192,12,
+ 33,48,32,2,21,0,64,16,33,16,0,0,
+ 2,131,18,60,192,204,82,38,156,71,192,12,
+ 33,32,64,2,7,0,81,20,33,32,64,2,
+ 76,0,5,142,0,0,0,0,168,71,192,12,
+ 33,48,32,2,9,0,64,16,33,16,0,0,
+ 3,131,3,60,132,17,99,36,0,0,98,140,
+ 1,0,4,36,1,0,66,36,178,45,192,12,
+ 0,0,98,172,1,0,2,36,28,0,191,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,0,0,0,0,
+ 0,0,0,0,224,255,189,39,20,0,177,175,
+ 33,136,224,0,16,0,176,175,48,0,176,143,
+ 24,0,191,175,156,71,192,12,33,32,32,2,
+ 0,0,2,174,33,16,32,2,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,8,0,224,3,33,16,224,0,
+ 0,0,227,140,204,204,2,60,205,204,66,52,
+ 25,0,98,0,16,32,0,0,0,0,0,0,
+ 0,0,0,0,8,0,224,3,194,16,4,0,
+ 224,255,189,39,16,0,176,175,33,128,224,0,
+ 33,32,0,2,33,40,0,0,20,0,177,175,
+ 48,0,177,143,24,0,191,175,208,71,192,12,
+ 16,0,6,36,2,0,64,20,35,16,80,0,
+ 16,0,2,36,0,0,34,174,33,16,0,2,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,232,255,189,39,
+ 40,0,164,143,44,0,165,143,16,0,191,175,
+ 205,59,192,12,0,0,0,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,40,0,164,143,44,0,165,143,
+ 16,0,191,175,239,59,192,12,0,0,0,0,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,232,255,189,39,40,0,164,143,
+ 44,0,165,143,16,0,191,175,17,60,192,12,
+ 0,0,0,0,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,8,0,224,3,
+ 33,16,224,0,0,0,226,140,8,0,224,3,
+ 0,0,0,0,216,255,189,39,24,0,176,175,
+ 56,0,176,143,32,0,191,175,28,0,177,175,
+ 36,0,2,142,1,0,3,36,20,0,81,140,
+ 187,0,163,20,0,0,0,0,0,0,195,140,
+ 0,0,0,0,183,0,96,16,0,0,0,0,
+ 32,133,130,143,0,0,0,0,43,16,67,0,
+ 178,0,64,20,255,255,104,36,64,18,8,0,
+ 2,131,3,60,192,246,99,36,33,40,67,0,
+ 255,255,132,36,22,0,130,44,170,0,64,16,
+ 128,16,4,0,2,131,1,60,33,8,34,0,
+ 144,148,34,140,0,0,0,0,8,0,64,0,
+ 0,0,0,0,2,0,2,36,16,0,2,162,
+ 17,0,2,146,0,0,195,140,0,0,0,0,
+ 15,52,192,8,2,0,66,52,33,32,32,2,
+ 17,0,3,146,4,0,2,36,16,0,2,162,
+ 40,0,0,166,44,0,17,174,2,0,99,52,
+ 156,71,192,12,17,0,3,162,255,255,66,48,
+ 33,16,34,2,48,0,2,174,40,52,192,8,
+ 52,0,0,166,17,0,3,146,2,0,2,36,
+ 16,0,2,162,243,51,192,8,40,0,17,174,
+ 17,0,3,146,2,0,2,36,16,0,2,162,
+ 243,51,192,8,40,0,17,174,66,0,2,36,
+ 13,0,0,21,16,0,2,162,24,133,132,143,
+ 0,0,0,0,64,25,4,0,35,24,100,0,
+ 128,17,3,0,35,16,67,0,192,16,2,0,
+ 33,16,68,0,128,24,2,0,33,16,67,0,
+ 178,51,192,8,192,17,2,0,152,0,2,60,
+ 128,150,66,52,40,0,2,174,17,0,2,146,
+ 0,0,0,0,199,51,192,8,2,0,66,52,
+ 17,0,3,146,4,0,2,36,16,0,2,162,
+ 20,0,162,36,44,0,2,174,26,0,162,36,
+ 40,0,0,166,48,0,2,174,243,51,192,8,
+ 52,0,0,166,2,0,2,36,16,0,2,162,
+ 17,0,2,146,1,0,3,36,40,0,3,174,
+ 2,0,66,52,40,52,192,8,17,0,2,162,
+ 17,0,3,146,0,0,0,0,241,51,192,8,
+ 67,0,2,36,65,0,2,36,16,0,2,162,
+ 17,0,2,146,168,0,163,140,0,0,0,0,
+ 15,52,192,8,2,0,66,52,65,0,2,36,
+ 16,0,2,162,156,0,162,140,0,1,164,140,
+ 22,52,192,8,0,0,0,0,65,0,2,36,
+ 16,0,2,162,17,0,2,146,0,1,163,140,
+ 0,0,0,0,15,52,192,8,2,0,66,52,
+ 65,0,2,36,16,0,2,162,17,0,2,146,
+ 164,0,163,140,0,0,0,0,15,52,192,8,
+ 2,0,66,52,65,0,2,36,16,0,2,162,
+ 17,0,2,146,160,0,163,140,0,0,0,0,
+ 15,52,192,8,2,0,66,52,17,0,3,146,
+ 65,0,2,36,16,0,2,162,40,0,0,174,
+ 2,0,99,52,40,52,192,8,17,0,3,162,
+ 65,0,2,36,16,0,2,162,172,0,162,140,
+ 4,1,164,140,22,52,192,8,0,0,0,0,
+ 65,0,2,36,16,0,2,162,17,0,2,146,
+ 4,1,163,140,0,0,0,0,15,52,192,8,
+ 2,0,66,52,65,0,2,36,16,0,2,162,
+ 17,0,2,146,184,0,163,140,0,0,0,0,
+ 15,52,192,8,2,0,66,52,65,0,2,36,
+ 16,0,2,162,17,0,2,146,188,0,163,140,
+ 2,0,66,52,40,0,3,174,40,52,192,8,
+ 17,0,2,162,66,0,2,36,16,0,2,162,
+ 172,0,162,140,176,0,164,140,17,0,3,146,
+ 35,16,68,0,2,0,99,52,40,0,2,174,
+ 40,52,192,8,17,0,3,162,16,0,160,175,
+ 33,32,224,0,33,40,0,2,2,131,7,60,
+ 96,204,231,36,226,76,192,12,2,0,6,36,
+ 40,52,192,8,0,0,0,0,33,32,224,0,
+ 200,76,192,12,33,40,0,2,32,0,191,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 40,0,189,39,224,255,189,39,16,0,176,175,
+ 33,128,224,0,20,0,177,175,48,0,177,143,
+ 1,0,2,36,10,0,162,20,24,0,191,175,
+ 0,0,198,140,0,0,0,0,6,0,192,16,
+ 0,0,0,0,32,133,130,143,0,0,0,0,
+ 43,16,70,0,5,0,64,16,7,0,2,36,
+ 33,32,0,2,33,40,32,2,70,52,192,8,
+ 11,0,2,36,7,0,130,16,33,32,0,2,
+ 33,40,32,2,17,0,2,36,48,72,192,12,
+ 96,0,2,174,1,0,66,36,100,0,2,174,
+ 17,0,34,146,0,0,0,0,1,0,66,52,
+ 17,0,34,162,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 208,255,189,39,32,0,176,175,64,0,176,143,
+ 36,0,177,175,33,136,224,0,4,0,160,20,
+ 40,0,191,175,1,0,2,36,106,52,192,8,
+ 24,0,162,175,0,0,198,140,32,133,130,143,
+ 0,0,0,0,43,16,194,0,3,0,64,16,
+ 1,0,194,36,106,52,192,8,24,0,162,175,
+ 17,0,2,146,0,0,0,0,18,0,66,52,
+ 116,52,192,8,17,0,2,162,16,0,176,175,
+ 1,0,5,36,24,0,166,39,97,51,192,12,
+ 33,56,32,2,33,32,32,2,33,40,0,2,
+ 1,0,6,36,253,76,192,12,24,0,167,39,
+ 40,0,191,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,48,0,189,39,16,0,163,143,
+ 1,0,2,36,13,0,162,20,14,0,2,36,
+ 0,0,198,140,0,0,0,0,9,0,192,16,
+ 0,0,0,0,32,133,130,143,0,0,0,0,
+ 43,16,70,0,4,0,64,20,14,0,2,36,
+ 7,0,2,36,2,0,130,16,14,0,2,36,
+ 96,0,226,172,17,0,98,144,0,0,0,0,
+ 2,0,66,52,8,0,224,3,17,0,98,160,
+ 16,0,162,143,0,0,0,0,8,0,224,3,
+ 0,0,226,172,0,0,226,140,8,0,224,3,
+ 0,0,0,0,232,255,189,39,40,0,168,143,
+ 1,0,2,36,61,0,162,20,16,0,191,175,
+ 0,0,197,140,0,0,0,0,57,0,160,16,
+ 0,0,0,0,32,133,130,143,0,0,0,0,
+ 43,16,69,0,52,0,64,20,255,255,132,36,
+ 5,0,130,44,49,0,64,16,128,16,4,0,
+ 2,131,1,60,33,8,34,0,232,148,34,140,
+ 0,0,0,0,8,0,64,0,0,0,0,0,
+ 64,0,2,36,16,0,2,161,0,163,5,60,
+ 220,5,165,52,3,0,162,136,0,0,162,152,
+ 0,0,0,0,43,0,2,169,40,0,2,185,
+ 17,0,2,145,0,0,0,0,213,52,192,8,
+ 2,0,66,52,2,0,2,36,16,0,2,161,
+ 17,0,2,145,0,0,195,140,0,0,0,0,
+ 198,52,192,8,2,0,66,52,64,0,2,36,
+ 16,0,2,161,17,0,2,145,128,132,131,143,
+ 2,0,66,52,40,0,3,173,218,52,192,8,
+ 17,0,2,161,2,0,2,36,16,0,2,161,
+ 17,0,2,145,0,0,0,0,211,52,192,8,
+ 1,0,3,36,2,0,2,36,16,0,2,161,
+ 17,0,2,145,220,5,3,36,40,0,3,173,
+ 2,0,66,52,218,52,192,8,17,0,2,161,
+ 33,32,224,0,200,76,192,12,33,40,0,1,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,208,255,189,39,32,0,176,175,
+ 64,0,176,143,36,0,177,175,33,136,224,0,
+ 4,0,160,20,40,0,191,175,1,0,2,36,
+ 245,52,192,8,24,0,162,175,0,0,198,140,
+ 32,133,130,143,0,0,0,0,43,16,194,0,
+ 3,0,64,16,1,0,194,36,245,52,192,8,
+ 24,0,162,175,17,0,2,146,0,0,0,0,
+ 18,0,66,52,255,52,192,8,17,0,2,162,
+ 16,0,176,175,1,0,5,36,24,0,166,39,
+ 150,52,192,12,33,56,32,2,33,32,32,2,
+ 33,40,0,2,1,0,6,36,253,76,192,12,
+ 24,0,167,39,40,0,191,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,48,0,189,39,
+ 232,255,189,39,40,0,165,143,16,0,191,175,
+ 200,76,192,12,33,32,224,0,16,0,191,143,
+ 24,0,189,39,8,0,224,3,0,0,0,0,
+ 16,0,163,143,14,0,2,36,96,0,226,172,
+ 17,0,98,144,0,0,0,0,2,0,66,52,
+ 8,0,224,3,17,0,98,160,224,255,189,39,
+ 16,0,176,175,33,128,224,0,17,0,2,36,
+ 24,0,191,175,20,0,177,175,96,0,2,174,
+ 48,0,177,143,33,32,0,2,48,72,192,12,
+ 33,40,32,2,1,0,66,36,100,0,2,174,
+ 17,0,34,146,0,0,0,0,1,0,66,52,
+ 17,0,34,162,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 16,0,163,143,0,0,0,0,17,0,98,144,
+ 0,0,0,0,18,0,66,52,8,0,224,3,
+ 17,0,98,160,8,0,224,3,33,16,224,0,
+ 224,255,189,39,48,0,168,143,1,0,2,36,
+ 114,0,162,20,24,0,191,175,0,0,195,140,
+ 0,0,0,0,110,0,96,16,0,0,0,0,
+ 32,133,130,143,0,0,0,0,43,16,67,0,
+ 105,0,64,20,255,255,98,36,64,18,2,0,
+ 2,131,3,60,192,246,99,36,33,24,67,0,
+ 255,255,132,36,17,0,130,44,97,0,64,16,
+ 128,16,4,0,2,131,1,60,33,8,34,0,
+ 0,149,34,140,0,0,0,0,8,0,64,0,
+ 0,0,0,0,2,0,2,36,16,0,2,161,
+ 17,0,2,145,0,0,195,140,0,0,0,0,
+ 140,53,192,8,2,0,66,52,2,0,2,36,
+ 16,0,2,161,44,0,99,140,17,0,2,145,
+ 16,0,99,140,0,0,0,0,101,53,192,8,
+ 2,0,66,52,2,0,2,36,16,0,2,161,
+ 44,0,99,140,17,0,2,145,12,0,99,140,
+ 2,0,66,52,17,0,2,161,173,53,192,8,
+ 40,0,3,173,2,0,2,36,16,0,2,161,
+ 17,0,2,145,212,0,99,140,0,0,0,0,
+ 140,53,192,8,2,0,66,52,2,0,2,36,
+ 16,0,2,161,17,0,2,145,192,0,99,140,
+ 0,0,0,0,140,53,192,8,2,0,66,52,
+ 2,0,2,36,16,0,2,161,17,0,2,145,
+ 208,0,99,140,0,0,0,0,140,53,192,8,
+ 2,0,66,52,2,0,2,36,16,0,2,161,
+ 204,0,98,140,184,0,100,140,17,0,3,145,
+ 33,16,68,0,2,0,99,52,40,0,2,173,
+ 173,53,192,8,17,0,3,161,2,0,2,36,
+ 16,0,2,161,17,0,2,145,196,0,99,140,
+ 2,0,66,52,40,0,3,173,173,53,192,8,
+ 17,0,2,161,17,0,3,145,2,0,2,36,
+ 16,0,2,161,40,0,0,173,2,0,99,52,
+ 173,53,192,8,17,0,3,161,2,0,2,36,
+ 16,0,2,161,44,0,100,140,17,0,2,145,
+ 20,0,131,140,24,0,132,140,2,0,66,52,
+ 17,0,2,161,33,24,100,0,173,53,192,8,
+ 40,0,3,173,16,0,160,175,33,32,224,0,
+ 33,40,0,1,2,131,7,60,104,204,231,36,
+ 226,76,192,12,11,0,6,36,173,53,192,8,
+ 0,0,0,0,33,32,224,0,200,76,192,12,
+ 33,40,0,1,24,0,191,143,32,0,189,39,
+ 8,0,224,3,0,0,0,0,208,255,189,39,
+ 32,0,176,175,64,0,176,143,36,0,177,175,
+ 33,136,224,0,4,0,160,20,40,0,191,175,
+ 1,0,2,36,200,53,192,8,24,0,162,175,
+ 0,0,198,140,32,133,130,143,0,0,0,0,
+ 43,16,194,0,3,0,64,16,1,0,194,36,
+ 200,53,192,8,24,0,162,175,17,0,2,146,
+ 0,0,0,0,18,0,66,52,210,53,192,8,
+ 17,0,2,162,16,0,176,175,1,0,5,36,
+ 24,0,166,39,52,53,192,12,33,56,32,2,
+ 33,32,32,2,33,40,0,2,1,0,6,36,
+ 253,76,192,12,24,0,167,39,40,0,191,143,
+ 36,0,177,143,32,0,176,143,8,0,224,3,
+ 48,0,189,39,0,0,226,140,8,0,224,3,
+ 0,0,0,0,3,131,2,60,28,18,66,148,
+ 0,0,0,0,2,0,66,48,2,0,64,16,
+ 2,0,3,36,1,0,3,36,8,0,224,3,
+ 33,16,96,0,232,255,189,39,40,0,164,143,
+ 16,0,191,175,1,0,132,56,186,59,192,12,
+ 1,0,132,44,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,16,0,163,143,
+ 6,0,2,36,0,0,98,172,8,0,224,3,
+ 33,16,224,0,224,255,189,39,48,0,168,143,
+ 1,0,2,36,52,0,162,20,24,0,191,175,
+ 0,0,197,140,0,0,0,0,48,0,160,16,
+ 0,0,0,0,24,133,130,143,0,0,0,0,
+ 43,16,69,0,43,0,64,20,255,255,132,36,
+ 5,0,130,44,40,0,64,16,128,16,4,0,
+ 2,131,1,60,33,8,34,0,72,149,34,140,
+ 0,0,0,0,8,0,64,0,0,0,0,0,
+ 2,0,2,36,16,0,2,161,17,0,2,145,
+ 0,0,195,140,2,0,66,52,40,0,3,173,
+ 45,54,192,8,17,0,2,161,2,0,2,36,
+ 16,0,2,161,0,0,194,140,17,0,3,145,
+ 1,0,66,36,2,0,99,52,40,0,2,173,
+ 45,54,192,8,17,0,3,161,16,0,160,175,
+ 33,32,224,0,33,40,0,1,2,131,7,60,
+ 148,204,231,36,226,76,192,12,2,0,6,36,
+ 45,54,192,8,0,0,0,0,17,0,3,145,
+ 2,0,2,36,16,0,2,161,40,0,0,173,
+ 2,0,99,52,45,54,192,8,17,0,3,161,
+ 33,32,224,0,200,76,192,12,33,40,0,1,
+ 24,0,191,143,32,0,189,39,8,0,224,3,
+ 0,0,0,0,208,255,189,39,32,0,176,175,
+ 64,0,176,143,36,0,177,175,33,136,224,0,
+ 4,0,160,20,40,0,191,175,1,0,2,36,
+ 72,54,192,8,24,0,162,175,0,0,198,140,
+ 24,133,130,143,0,0,0,0,43,16,194,0,
+ 3,0,64,16,1,0,194,36,72,54,192,8,
+ 24,0,162,175,17,0,2,146,0,0,0,0,
+ 18,0,66,52,82,54,192,8,17,0,2,162,
+ 16,0,176,175,1,0,5,36,24,0,166,39,
+ 242,53,192,12,33,56,32,2,33,32,32,2,
+ 33,40,0,2,1,0,6,36,253,76,192,12,
+ 24,0,167,39,40,0,191,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,48,0,189,39,
+ 0,0,226,148,8,0,224,3,0,0,0,0,
+ 8,0,224,3,33,16,224,0,16,0,163,143,
+ 8,0,2,36,0,0,98,172,8,0,224,3,
+ 33,16,224,0,224,255,189,39,16,0,176,175,
+ 48,0,176,143,1,0,2,36,24,0,191,175,
+ 126,0,162,20,20,0,177,175,0,0,198,140,
+ 0,0,0,0,122,0,192,16,0,0,0,0,
+ 24,133,130,143,0,0,0,0,43,16,70,0,
+ 117,0,64,20,192,17,6,0,3,131,3,60,
+ 16,13,99,36,33,136,67,0,255,255,132,36,
+ 10,0,130,44,110,0,64,16,128,16,4,0,
+ 2,131,1,60,33,8,34,0,96,149,34,140,
+ 0,0,0,0,8,0,64,0,0,0,0,0,
+ 17,0,3,146,2,0,2,36,16,0,2,162,
+ 211,54,192,8,40,0,6,174,2,0,2,36,
+ 16,0,2,162,0,0,34,150,17,0,3,146,
+ 0,0,0,0,143,54,192,8,2,18,2,0,
+ 2,0,2,36,16,0,2,162,4,0,34,142,
+ 17,0,3,146,1,0,66,36,2,0,99,52,
+ 40,0,2,174,232,54,192,8,17,0,3,162,
+ 2,0,2,36,16,0,2,162,4,0,34,142,
+ 0,0,0,0,2,0,64,16,2,0,3,36,
+ 1,0,3,36,17,0,2,146,40,0,3,174,
+ 2,0,66,52,232,54,192,8,17,0,2,162,
+ 2,0,2,36,16,0,2,162,17,0,2,146,
+ 8,0,35,142,0,0,0,0,226,54,192,8,
+ 2,0,66,52,9,50,192,12,8,0,4,36,
+ 33,48,64,0,15,0,34,138,12,0,34,154,
+ 19,0,35,138,16,0,35,154,3,0,194,168,
+ 0,0,194,184,7,0,195,168,196,54,192,8,
+ 4,0,195,184,2,0,2,36,16,0,2,162,
+ 17,0,2,146,20,0,35,142,0,0,0,0,
+ 226,54,192,8,2,0,66,52,9,50,192,12,
+ 8,0,4,36,33,48,64,0,27,0,34,138,
+ 24,0,34,154,31,0,35,138,28,0,35,154,
+ 3,0,194,168,0,0,194,184,7,0,195,168,
+ 4,0,195,184,0,0,194,148,0,0,0,0,
+ 0,26,2,0,2,18,2,0,37,24,98,0,
+ 0,0,195,164,17,0,3,146,4,0,2,36,
+ 16,0,2,162,1,0,2,36,40,0,2,166,
+ 8,0,194,36,44,0,6,174,48,0,2,174,
+ 52,0,0,166,2,0,99,52,232,54,192,8,
+ 17,0,3,162,2,0,2,36,16,0,2,162,
+ 17,0,2,146,32,0,35,150,0,0,0,0,
+ 226,54,192,8,2,0,66,52,2,0,2,36,
+ 16,0,2,162,17,0,2,146,104,0,35,142,
+ 2,0,66,52,40,0,3,174,232,54,192,8,
+ 17,0,2,162,33,32,224,0,200,76,192,12,
+ 33,40,0,2,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 224,255,189,39,16,0,176,175,33,128,224,0,
+ 20,0,177,175,48,0,177,143,1,0,2,36,
+ 10,0,162,20,24,0,191,175,0,0,198,140,
+ 0,0,0,0,6,0,192,16,0,0,0,0,
+ 24,133,130,143,0,0,0,0,43,16,70,0,
+ 5,0,64,16,2,0,2,36,33,32,0,2,
+ 33,40,32,2,13,55,192,8,11,0,2,36,
+ 14,0,130,16,2,0,130,44,5,0,64,20,
+ 6,0,130,44,3,0,64,16,4,0,130,44,
+ 8,0,64,16,0,0,0,0,33,32,0,2,
+ 33,40,32,2,17,0,2,36,48,72,192,12,
+ 96,0,2,174,1,0,66,36,100,0,2,174,
+ 17,0,34,146,0,0,0,0,1,0,66,52,
+ 17,0,34,162,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 208,255,189,39,32,0,176,175,64,0,176,143,
+ 36,0,177,175,33,136,224,0,4,0,160,20,
+ 40,0,191,175,1,0,2,36,49,55,192,8,
+ 24,0,162,175,0,0,198,140,24,133,130,143,
+ 0,0,0,0,43,16,194,0,3,0,64,16,
+ 1,0,194,36,49,55,192,8,24,0,162,175,
+ 17,0,2,146,0,0,0,0,18,0,66,52,
+ 59,55,192,8,17,0,2,162,16,0,176,175,
+ 1,0,5,36,24,0,166,39,97,54,192,12,
+ 33,56,32,2,33,32,32,2,33,40,0,2,
+ 1,0,6,36,253,76,192,12,24,0,167,39,
+ 40,0,191,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,48,0,189,39,232,255,189,39,
+ 33,64,128,0,16,0,176,175,40,0,176,143,
+ 1,0,2,36,57,0,162,20,20,0,191,175,
+ 0,0,196,140,0,0,0,0,54,0,128,16,
+ 14,0,2,36,24,133,130,143,0,0,0,0,
+ 43,16,68,0,49,0,64,20,14,0,2,36,
+ 192,17,4,0,3,131,3,60,16,13,99,36,
+ 33,48,67,0,4,0,2,36,21,0,2,17,
+ 5,0,2,45,5,0,64,16,2,0,2,36,
+ 8,0,2,17,14,0,2,36,129,55,192,8,
+ 96,0,226,172,5,0,2,36,28,0,2,17,
+ 14,0,2,36,129,55,192,8,96,0,226,172,
+ 0,0,195,144,0,0,0,0,0,0,195,164,
+ 40,0,2,142,0,0,0,0,0,18,2,0,
+ 37,24,98,0,129,55,192,8,0,0,195,164,
+ 40,0,3,142,0,0,0,0,5,0,101,16,
+ 2,0,2,36,7,0,98,16,14,0,2,36,
+ 129,55,192,8,96,0,226,172,187,42,192,12,
+ 1,0,5,36,129,55,192,8,0,0,0,0,
+ 187,42,192,12,33,40,0,0,129,55,192,8,
+ 0,0,0,0,40,0,2,142,0,0,0,0,
+ 129,55,192,8,8,0,194,172,14,0,2,36,
+ 96,0,226,172,17,0,2,146,0,0,0,0,
+ 2,0,66,52,17,0,2,162,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 216,255,189,39,20,0,177,175,33,136,128,0,
+ 28,0,179,175,33,152,160,0,24,0,178,175,
+ 33,144,224,0,16,0,176,175,56,0,176,143,
+ 1,0,2,36,46,0,98,22,32,0,191,175,
+ 0,0,196,140,0,0,0,0,42,0,128,16,
+ 0,0,0,0,58,25,192,12,0,0,0,0,
+ 33,32,64,0,37,0,128,16,2,0,2,36,
+ 11,0,34,18,3,0,34,46,5,0,64,16,
+ 3,0,2,36,15,0,51,18,4,0,2,36,
+ 195,55,192,8,33,32,64,2,20,0,34,18,
+ 33,32,64,2,195,55,192,8,0,0,0,0,
+ 2,0,2,36,16,0,2,162,17,0,2,146,
+ 10,0,131,132,2,0,66,52,40,0,3,174,
+ 197,55,192,8,17,0,2,162,17,0,3,146,
+ 16,0,2,162,4,0,130,36,44,0,2,174,
+ 10,0,130,36,40,0,0,166,48,0,2,174,
+ 191,55,192,8,52,0,0,166,17,0,3,146,
+ 2,0,2,36,16,0,2,162,40,0,17,174,
+ 2,0,99,52,197,55,192,8,17,0,3,162,
+ 33,32,64,2,200,76,192,12,33,40,0,2,
+ 32,0,191,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,208,255,189,39,32,0,176,175,
+ 64,0,176,143,40,0,178,175,33,144,128,0,
+ 36,0,177,175,33,136,224,0,3,0,160,20,
+ 44,0,191,175,218,55,192,8,1,0,2,36,
+ 0,0,194,140,0,0,0,0,1,0,66,36,
+ 24,0,162,175,24,0,164,143,58,25,192,12,
+ 0,0,0,0,6,0,64,20,33,32,64,2,
+ 17,0,2,146,0,0,0,0,18,0,66,52,
+ 239,55,192,8,17,0,2,162,16,0,176,175,
+ 1,0,5,36,24,0,166,39,137,55,192,12,
+ 33,56,32,2,33,32,32,2,33,40,0,2,
+ 1,0,6,36,253,76,192,12,24,0,167,39,
+ 44,0,191,143,40,0,178,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,48,0,189,39,
+ 232,255,189,39,40,0,168,143,1,0,2,36,
+ 63,0,162,20,16,0,191,175,0,0,195,140,
+ 0,0,0,0,59,0,96,16,0,0,0,0,
+ 24,133,130,143,0,0,0,0,43,16,67,0,
+ 54,0,64,20,64,18,3,0,2,131,3,60,
+ 192,246,99,36,33,24,67,0,255,255,132,36,
+ 5,0,130,44,47,0,64,16,128,16,4,0,
+ 2,131,1,60,33,8,34,0,136,149,34,140,
+ 0,0,0,0,8,0,64,0,0,0,0,0,
+ 2,0,2,36,16,0,2,161,17,0,2,145,
+ 0,0,195,140,0,0,0,0,43,56,192,8,
+ 2,0,66,52,2,0,2,36,16,0,2,161,
+ 17,0,2,145,220,5,3,36,40,0,3,173,
+ 2,0,66,52,59,56,192,8,17,0,2,161,
+ 65,0,2,36,16,0,2,161,17,0,2,145,
+ 156,0,99,140,0,0,0,0,43,56,192,8,
+ 2,0,66,52,65,0,2,36,16,0,2,161,
+ 17,0,2,145,172,0,99,140,2,0,66,52,
+ 40,0,3,173,59,56,192,8,17,0,2,161,
+ 65,0,2,36,16,0,2,161,156,0,98,140,
+ 252,0,100,140,17,0,3,145,35,16,68,0,
+ 2,0,99,52,40,0,2,173,59,56,192,8,
+ 17,0,3,161,33,32,224,0,200,76,192,12,
+ 33,40,0,1,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,208,255,189,39,
+ 32,0,176,175,64,0,176,143,36,0,177,175,
+ 33,136,224,0,4,0,160,20,40,0,191,175,
+ 1,0,2,36,86,56,192,8,24,0,162,175,
+ 0,0,198,140,24,133,130,143,0,0,0,0,
+ 43,16,194,0,3,0,64,16,1,0,194,36,
+ 86,56,192,8,24,0,162,175,17,0,2,146,
+ 0,0,0,0,18,0,66,52,96,56,192,8,
+ 17,0,2,162,16,0,176,175,1,0,5,36,
+ 24,0,166,39,245,55,192,12,33,56,32,2,
+ 33,32,32,2,33,40,0,2,1,0,6,36,
+ 253,76,192,12,24,0,167,39,40,0,191,143,
+ 36,0,177,143,32,0,176,143,8,0,224,3,
+ 48,0,189,39,0,0,0,0,0,0,0,0,
+ 0,0,0,0,200,255,189,39,72,0,163,143,
+ 44,0,177,175,33,136,128,0,20,0,165,175,
+ 33,40,224,0,2,131,2,60,172,210,66,140,
+ 152,132,135,143,33,32,0,0,48,0,191,175,
+ 40,0,176,175,24,0,160,175,28,0,160,175,
+ 36,0,160,175,16,0,162,175,104,77,192,12,
+ 32,0,163,175,33,128,64,0,3,0,0,22,
+ 33,32,0,2,143,56,192,8,33,16,0,0,
+ 197,80,192,12,33,40,32,2,255,255,3,36,
+ 5,0,67,20,0,0,0,0,167,83,192,12,
+ 33,32,0,2,143,56,192,8,33,16,0,0,
+ 167,83,192,12,33,32,0,2,8,0,34,142,
+ 4,0,35,142,0,0,0,0,35,16,67,0,
+ 255,255,66,48,48,0,191,143,44,0,177,143,
+ 40,0,176,143,8,0,224,3,56,0,189,39,
+ 200,255,189,39,44,0,177,175,33,136,128,0,
+ 72,0,168,143,33,32,0,0,20,0,165,175,
+ 33,40,224,0,2,131,3,60,172,210,99,140,
+ 152,132,135,143,1,0,2,36,48,0,191,175,
+ 40,0,176,175,24,0,162,175,28,0,160,175,
+ 36,0,160,175,16,0,163,175,104,77,192,12,
+ 32,0,168,175,33,128,64,0,3,0,0,22,
+ 33,32,0,2,188,56,192,8,33,16,0,0,
+ 197,80,192,12,33,40,32,2,255,255,3,36,
+ 5,0,67,20,0,0,0,0,167,83,192,12,
+ 33,32,0,2,188,56,192,8,33,16,0,0,
+ 167,83,192,12,33,32,0,2,8,0,34,142,
+ 4,0,35,142,0,0,0,0,35,16,67,0,
+ 255,255,66,48,48,0,191,143,44,0,177,143,
+ 40,0,176,143,8,0,224,3,56,0,189,39,
+ 176,255,189,39,44,0,177,175,108,0,177,143,
+ 68,0,183,175,96,0,183,143,72,0,190,175,
+ 100,0,190,143,48,0,178,175,33,144,128,0,
+ 56,0,180,175,33,160,160,0,52,0,179,175,
+ 33,152,192,0,40,0,176,175,33,128,224,0,
+ 60,0,181,175,1,0,21,36,76,0,191,175,
+ 3,0,53,18,64,0,182,175,9,57,192,8,
+ 255,255,2,36,4,0,6,36,2,131,22,60,
+ 48,205,214,38,160,132,132,143,104,0,165,143,
+ 128,32,4,0,80,68,192,12,33,32,150,0,
+ 33,32,0,0,33,40,0,2,152,132,135,143,
+ 2,0,2,36,24,0,162,175,160,132,130,143,
+ 2,131,3,60,172,210,99,140,33,48,96,2,
+ 20,0,180,175,28,0,160,175,32,0,183,175,
+ 36,0,181,175,1,0,81,36,104,77,192,12,
+ 16,0,163,175,33,128,64,0,23,0,0,18,
+ 33,40,0,0,16,0,190,175,33,32,0,2,
+ 33,48,32,2,108,84,192,12,33,56,192,2,
+ 255,255,17,36,13,0,81,16,33,32,0,2,
+ 197,80,192,12,33,40,64,2,9,0,81,16,
+ 0,0,0,0,167,83,192,12,33,32,0,2,
+ 8,0,66,142,4,0,67,142,0,0,0,0,
+ 35,16,67,0,9,57,192,8,255,255,66,48,
+ 167,83,192,12,33,32,0,2,33,16,0,0,
+ 76,0,191,143,72,0,190,143,68,0,183,143,
+ 64,0,182,143,60,0,181,143,56,0,180,143,
+ 52,0,179,143,48,0,178,143,44,0,177,143,
+ 40,0,176,143,8,0,224,3,80,0,189,39,
+ 176,255,189,39,44,0,177,175,108,0,177,143,
+ 68,0,183,175,96,0,183,143,72,0,190,175,
+ 100,0,190,143,48,0,178,175,33,144,128,0,
+ 56,0,180,175,33,160,160,0,52,0,179,175,
+ 33,152,192,0,40,0,176,175,33,128,224,0,
+ 60,0,181,175,1,0,21,36,76,0,191,175,
+ 3,0,53,18,64,0,182,175,93,57,192,8,
+ 255,255,2,36,4,0,6,36,2,131,22,60,
+ 48,205,214,38,160,132,132,143,104,0,165,143,
+ 128,32,4,0,80,68,192,12,33,32,150,0,
+ 33,32,0,0,33,40,0,2,152,132,135,143,
+ 3,0,2,36,24,0,162,175,160,132,130,143,
+ 2,131,3,60,172,210,99,140,33,48,96,2,
+ 20,0,180,175,28,0,160,175,32,0,183,175,
+ 36,0,181,175,1,0,81,36,104,77,192,12,
+ 16,0,163,175,33,128,64,0,23,0,0,18,
+ 33,40,0,0,16,0,190,175,33,32,0,2,
+ 33,48,32,2,108,84,192,12,33,56,192,2,
+ 255,255,17,36,13,0,81,16,33,32,0,2,
+ 197,80,192,12,33,40,64,2,9,0,81,16,
+ 0,0,0,0,167,83,192,12,33,32,0,2,
+ 8,0,66,142,4,0,67,142,0,0,0,0,
+ 35,16,67,0,93,57,192,8,255,255,66,48,
+ 167,83,192,12,33,32,0,2,33,16,0,0,
+ 76,0,191,143,72,0,190,143,68,0,183,143,
+ 64,0,182,143,60,0,181,143,56,0,180,143,
+ 52,0,179,143,48,0,178,143,44,0,177,143,
+ 40,0,176,143,8,0,224,3,80,0,189,39,
+ 200,255,189,39,44,0,177,175,33,136,128,0,
+ 72,0,168,143,33,32,0,0,20,0,165,175,
+ 33,40,224,0,2,131,3,60,172,210,99,140,
+ 152,132,135,143,4,0,2,36,48,0,191,175,
+ 40,0,176,175,24,0,162,175,28,0,160,175,
+ 36,0,160,175,16,0,163,175,104,77,192,12,
+ 32,0,168,175,33,128,64,0,3,0,0,22,
+ 33,32,0,2,145,57,192,8,33,16,0,0,
+ 197,80,192,12,33,40,32,2,255,255,3,36,
+ 5,0,67,20,0,0,0,0,167,83,192,12,
+ 33,32,0,2,145,57,192,8,33,16,0,0,
+ 167,83,192,12,33,32,0,2,8,0,34,142,
+ 4,0,35,142,0,0,0,0,35,16,67,0,
+ 255,255,66,48,48,0,191,143,44,0,177,143,
+ 40,0,176,143,8,0,224,3,56,0,189,39,
+ 200,255,189,39,44,0,177,175,33,136,128,0,
+ 72,0,163,143,33,32,0,0,20,0,165,175,
+ 33,40,224,0,164,132,135,143,2,131,2,60,
+ 92,205,66,36,16,0,162,175,6,0,2,36,
+ 24,0,162,175,1,0,2,36,48,0,191,175,
+ 40,0,176,175,28,0,162,175,36,0,160,175,
+ 104,77,192,12,32,0,163,175,33,128,64,0,
+ 3,0,0,22,33,32,0,2,191,57,192,8,
+ 33,16,0,0,197,80,192,12,33,40,32,2,
+ 255,255,3,36,5,0,67,20,0,0,0,0,
+ 167,83,192,12,33,32,0,2,191,57,192,8,
+ 33,16,0,0,167,83,192,12,33,32,0,2,
+ 8,0,34,142,4,0,35,142,0,0,0,0,
+ 35,16,67,0,255,255,66,48,48,0,191,143,
+ 44,0,177,143,40,0,176,143,8,0,224,3,
+ 56,0,189,39,200,255,189,39,44,0,177,175,
+ 33,136,128,0,72,0,163,143,33,32,0,0,
+ 20,0,165,175,33,40,224,0,164,132,135,143,
+ 2,131,2,60,92,205,66,36,16,0,162,175,
+ 6,0,2,36,24,0,162,175,2,0,2,36,
+ 48,0,191,175,40,0,176,175,28,0,162,175,
+ 36,0,160,175,104,77,192,12,32,0,163,175,
+ 33,128,64,0,3,0,0,22,33,32,0,2,
+ 237,57,192,8,33,16,0,0,197,80,192,12,
+ 33,40,32,2,255,255,3,36,5,0,67,20,
+ 0,0,0,0,167,83,192,12,33,32,0,2,
+ 237,57,192,8,33,16,0,0,167,83,192,12,
+ 33,32,0,2,8,0,34,142,4,0,35,142,
+ 0,0,0,0,35,16,67,0,255,255,66,48,
+ 48,0,191,143,44,0,177,143,40,0,176,143,
+ 8,0,224,3,56,0,189,39,0,0,0,0,
+ 0,0,0,0,224,255,189,39,24,0,178,175,
+ 33,144,128,0,20,0,177,175,3,131,17,60,
+ 0,18,49,38,16,0,176,175,33,128,0,0,
+ 28,0,191,175,208,133,128,175,60,65,192,12,
+ 33,32,0,2,0,0,34,166,1,0,16,38,
+ 64,0,2,42,250,255,64,20,2,0,49,38,
+ 3,131,3,60,18,18,99,144,255,0,2,36,
+ 3,0,98,16,0,0,0,0,6,0,64,18,
+ 0,163,4,60,75,59,192,12,32,0,4,36,
+ 87,59,192,12,255,0,4,36,0,163,4,60,
+ 220,5,132,52,176,132,133,39,168,71,192,12,
+ 4,0,6,36,25,0,64,20,0,163,4,60,
+ 3,131,16,60,20,18,16,38,33,32,0,2,
+ 176,132,133,39,168,71,192,12,4,0,6,36,
+ 3,0,64,16,0,163,4,60,7,0,64,18,
+ 0,0,0,0,220,5,132,52,33,40,0,0,
+ 144,71,192,12,4,0,6,36,47,58,192,8,
+ 0,163,4,60,0,163,5,60,220,5,165,52,
+ 3,0,2,138,0,0,2,154,0,0,0,0,
+ 3,0,162,168,0,0,162,184,0,163,4,60,
+ 99,59,192,12,220,5,132,52,0,163,4,60,
+ 16,6,132,52,176,132,133,39,168,71,192,12,
+ 4,0,6,36,25,0,64,20,0,163,4,60,
+ 3,131,16,60,68,18,16,38,33,32,0,2,
+ 176,132,133,39,168,71,192,12,4,0,6,36,
+ 3,0,64,16,0,163,4,60,7,0,64,18,
+ 0,0,0,0,16,6,132,52,33,40,0,0,
+ 144,71,192,12,4,0,6,36,80,58,192,8,
+ 0,163,4,60,0,163,5,60,16,6,165,52,
+ 3,0,2,138,0,0,2,154,0,0,0,0,
+ 3,0,162,168,0,0,162,184,0,163,4,60,
+ 119,59,192,12,16,6,132,52,0,163,4,60,
+ 224,5,132,52,176,132,133,39,168,71,192,12,
+ 4,0,6,36,25,0,64,20,0,163,4,60,
+ 3,131,16,60,24,18,16,38,33,32,0,2,
+ 176,132,133,39,168,71,192,12,4,0,6,36,
+ 3,0,64,16,0,163,4,60,7,0,64,18,
+ 0,0,0,0,224,5,132,52,33,40,0,0,
+ 144,71,192,12,4,0,6,36,113,58,192,8,
+ 0,163,4,60,0,163,5,60,224,5,165,52,
+ 3,0,2,138,0,0,2,154,0,0,0,0,
+ 3,0,162,168,0,0,162,184,0,163,4,60,
+ 139,59,192,12,224,5,132,52,3,131,3,60,
+ 28,18,99,36,0,0,98,148,0,0,0,0,
+ 0,128,66,48,3,0,64,20,1,0,2,36,
+ 2,0,64,18,0,0,0,0,0,0,98,164,
+ 0,163,2,60,144,1,66,140,0,0,0,0,
+ 7,0,64,20,0,0,0,0,3,131,3,60,
+ 28,18,99,36,0,0,98,148,0,0,0,0,
+ 146,58,192,8,254,255,66,48,0,163,2,60,
+ 144,1,66,140,0,0,0,0,7,0,64,24,
+ 0,0,0,0,3,131,3,60,28,18,99,36,
+ 0,0,98,148,0,0,0,0,1,0,66,52,
+ 0,0,98,164,3,131,4,60,28,18,132,148,
+ 0,0,0,0,159,59,192,12,1,0,132,48,
+ 3,131,3,60,80,18,99,144,255,0,2,36,
+ 3,0,98,16,0,0,0,0,5,0,64,18,
+ 0,0,0,0,2,131,4,60,160,149,132,36,
+ 205,59,192,12,14,0,5,36,3,131,3,60,
+ 96,18,99,144,255,0,2,36,3,0,98,16,
+ 0,0,0,0,5,0,64,18,0,0,0,0,
+ 2,131,4,60,176,149,132,36,239,59,192,12,
+ 11,0,5,36,3,131,3,60,112,18,99,144,
+ 255,0,2,36,3,0,98,16,0,0,0,0,
+ 5,0,64,18,0,0,0,0,2,131,4,60,
+ 188,149,132,36,17,60,192,12,15,0,5,36,
+ 0,163,2,60,140,1,66,140,0,0,0,0,
+ 7,0,64,16,15,0,2,60,0,163,3,60,
+ 140,1,99,140,64,66,66,52,43,16,67,0,
+ 26,0,64,16,0,0,0,0,3,131,3,60,
+ 64,18,99,140,255,255,2,36,3,0,98,16,
+ 44,1,2,36,4,0,64,18,0,0,0,0,
+ 0,163,1,60,221,58,192,8,140,1,34,172,
+ 5,0,96,20,15,0,4,60,1,0,2,36,
+ 0,163,1,60,221,58,192,8,140,1,34,172,
+ 64,66,132,52,43,16,131,0,4,0,64,16,
+ 0,0,0,0,0,163,1,60,221,58,192,8,
+ 140,1,36,172,0,163,1,60,140,1,35,172,
+ 0,163,4,60,140,1,132,140,51,60,192,12,
+ 0,0,0,0,28,0,191,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,208,255,189,39,20,0,177,175,
+ 33,136,128,0,36,0,181,175,33,168,160,0,
+ 28,0,179,175,33,152,192,0,44,0,191,175,
+ 40,0,182,175,32,0,180,175,24,0,178,175,
+ 168,71,192,12,16,0,176,175,76,0,64,16,
+ 0,0,0,0,3,131,22,60,0,18,214,38,
+ 35,16,54,2,194,31,2,0,33,16,67,0,
+ 67,144,2,0,1,0,98,38,194,31,2,0,
+ 33,16,67,0,67,128,2,0,255,255,20,38,
+ 64,0,130,46,14,0,64,20,64,0,66,46,
+ 2,131,4,60,204,149,132,36,180,132,144,39,
+ 33,40,0,2,2,131,7,60,236,149,231,36,
+ 15,63,192,12,143,0,6,36,1,0,4,36,
+ 33,40,0,2,188,7,192,12,143,0,6,36,
+ 64,0,66,46,14,0,64,20,33,32,32,2,
+ 2,131,4,60,204,149,132,36,180,132,144,39,
+ 33,40,0,2,2,131,7,60,20,150,231,36,
+ 15,63,192,12,144,0,6,36,1,0,4,36,
+ 33,40,0,2,188,7,192,12,144,0,6,36,
+ 33,32,32,2,33,40,160,2,80,68,192,12,
+ 33,48,96,2,64,16,18,0,33,136,86,0,
+ 33,128,128,2,255,255,2,36,25,0,2,18,
+ 255,255,20,36,180,132,147,39,33,32,64,2,
+ 208,133,130,143,1,0,82,38,1,0,66,36,
+ 208,133,130,175,0,0,37,150,0,0,0,0,
+ 162,65,192,12,2,0,49,38,10,0,64,20,
+ 33,40,96,2,2,131,4,60,204,149,132,36,
+ 188,132,135,39,15,63,192,12,159,0,6,36,
+ 1,0,4,36,33,40,96,2,188,7,192,12,
+ 159,0,6,36,255,255,16,38,235,255,20,22,
+ 33,32,64,2,44,0,191,143,40,0,182,143,
+ 36,0,181,143,32,0,180,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,48,0,189,39,224,255,189,39,
+ 16,0,164,163,3,131,4,60,18,18,132,36,
+ 16,0,165,39,24,0,191,175,231,58,192,12,
+ 1,0,6,36,24,0,191,143,32,0,189,39,
+ 8,0,224,3,0,0,0,0,224,255,189,39,
+ 16,0,164,163,3,131,4,60,19,18,132,36,
+ 16,0,165,39,24,0,191,175,231,58,192,12,
+ 1,0,6,36,24,0,191,143,32,0,189,39,
+ 8,0,224,3,0,0,0,0,232,255,189,39,
+ 33,40,128,0,16,0,176,175,3,131,16,60,
+ 20,18,16,38,33,32,0,2,20,0,191,175,
+ 231,58,192,12,4,0,6,36,0,163,5,60,
+ 220,5,165,52,3,0,2,138,0,0,2,154,
+ 0,0,0,0,3,0,162,168,0,0,162,184,
+ 20,0,191,143,16,0,176,143,8,0,224,3,
+ 24,0,189,39,232,255,189,39,33,40,128,0,
+ 16,0,176,175,3,131,16,60,68,18,16,38,
+ 33,32,0,2,20,0,191,175,231,58,192,12,
+ 4,0,6,36,0,163,5,60,16,6,165,52,
+ 3,0,2,138,0,0,2,154,0,0,0,0,
+ 3,0,162,168,0,0,162,184,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 232,255,189,39,33,40,128,0,16,0,176,175,
+ 3,131,16,60,24,18,16,38,33,32,0,2,
+ 20,0,191,175,231,58,192,12,4,0,6,36,
+ 0,163,5,60,224,5,165,52,3,0,2,138,
+ 0,0,2,154,0,0,0,0,3,0,162,168,
+ 0,0,162,184,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,3,131,2,60,
+ 28,18,66,148,224,255,189,39,24,0,191,175,
+ 8,0,128,16,16,0,162,167,1,0,66,52,
+ 16,0,162,167,1,0,2,36,44,133,130,175,
+ 0,163,1,60,177,59,192,8,144,1,34,172,
+ 254,255,66,48,16,0,162,167,44,133,128,175,
+ 0,163,1,60,144,1,32,172,3,131,4,60,
+ 28,18,132,36,16,0,165,39,231,58,192,12,
+ 2,0,6,36,24,0,191,143,32,0,189,39,
+ 8,0,224,3,0,0,0,0,3,131,2,60,
+ 28,18,66,148,224,255,189,39,24,0,191,175,
+ 3,0,128,16,16,0,162,167,195,59,192,8,
+ 2,0,66,52,253,255,66,48,16,0,162,167,
+ 3,131,4,60,28,18,132,36,16,0,165,39,
+ 231,58,192,12,2,0,6,36,24,0,191,143,
+ 32,0,189,39,8,0,224,3,0,0,0,0,
+ 216,255,189,39,32,0,191,175,33,56,128,0,
+ 33,48,160,0,3,0,226,136,0,0,226,152,
+ 7,0,227,136,4,0,227,152,11,0,228,136,
+ 8,0,228,152,15,0,229,136,12,0,229,152,
+ 19,0,162,171,16,0,162,187,23,0,163,171,
+ 20,0,163,187,27,0,164,171,24,0,164,187,
+ 31,0,165,171,28,0,165,187,16,0,194,44,
+ 3,0,64,16,16,0,163,39,33,16,102,0,
+ 0,0,64,160,3,131,4,60,80,18,132,36,
+ 33,40,224,0,231,58,192,12,16,0,6,36,
+ 32,0,191,143,40,0,189,39,8,0,224,3,
+ 0,0,0,0,216,255,189,39,32,0,191,175,
+ 33,56,128,0,33,48,160,0,3,0,226,136,
+ 0,0,226,152,7,0,227,136,4,0,227,152,
+ 11,0,228,136,8,0,228,152,15,0,229,136,
+ 12,0,229,152,19,0,162,171,16,0,162,187,
+ 23,0,163,171,20,0,163,187,27,0,164,171,
+ 24,0,164,187,31,0,165,171,28,0,165,187,
+ 16,0,194,44,3,0,64,16,16,0,163,39,
+ 33,16,102,0,0,0,64,160,3,131,4,60,
+ 96,18,132,36,33,40,224,0,231,58,192,12,
+ 16,0,6,36,32,0,191,143,40,0,189,39,
+ 8,0,224,3,0,0,0,0,216,255,189,39,
+ 32,0,191,175,33,56,128,0,33,48,160,0,
+ 3,0,226,136,0,0,226,152,7,0,227,136,
+ 4,0,227,152,11,0,228,136,8,0,228,152,
+ 15,0,229,136,12,0,229,152,19,0,162,171,
+ 16,0,162,187,23,0,163,171,20,0,163,187,
+ 27,0,164,171,24,0,164,187,31,0,165,171,
+ 28,0,165,187,16,0,194,44,3,0,64,16,
+ 16,0,163,39,33,16,102,0,0,0,64,160,
+ 3,131,4,60,112,18,132,36,33,40,224,0,
+ 231,58,192,12,16,0,6,36,32,0,191,143,
+ 40,0,189,39,8,0,224,3,0,0,0,0,
+ 232,255,189,39,15,0,2,60,54,66,66,52,
+ 24,0,164,175,246,255,132,36,43,16,68,0,
+ 3,0,64,16,16,0,191,175,44,1,2,36,
+ 24,0,162,175,3,131,4,60,64,18,132,36,
+ 24,0,165,39,231,58,192,12,4,0,6,36,
+ 24,0,162,143,0,163,1,60,140,1,34,172,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,232,255,189,39,16,0,191,175,
+ 0,38,4,0,196,64,192,12,3,38,4,0,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,232,255,189,39,16,0,191,175,
+ 0,38,4,0,196,64,192,12,3,38,4,0,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,160,255,189,39,112,0,162,143,
+ 72,0,176,175,33,128,224,0,88,0,180,175,
+ 33,160,0,0,84,0,179,175,33,152,192,0,
+ 92,0,191,175,80,0,178,175,7,0,160,16,
+ 76,0,177,175,6,0,65,4,51,0,177,39,
+ 45,0,20,36,3,0,0,18,35,16,2,0,
+ 255,255,16,38,51,0,177,39,51,0,160,163,
+ 27,0,68,0,2,0,128,20,0,0,0,0,
+ 13,0,7,0,18,24,0,0,16,16,0,0,
+ 2,131,1,60,33,8,34,0,128,205,34,144,
+ 255,255,49,38,2,0,0,18,0,0,34,162,
+ 255,255,16,38,33,16,96,0,241,255,64,20,
+ 1,0,3,36,0,22,19,0,3,22,2,0,
+ 11,0,67,20,33,32,128,2,255,255,16,38,
+ 255,255,2,36,7,0,2,18,0,0,0,0,
+ 255,255,18,36,196,64,192,12,32,0,4,36,
+ 255,255,16,38,252,255,18,22,33,32,128,2,
+ 4,0,128,16,0,22,19,0,196,64,192,12,
+ 0,0,0,0,0,22,19,0,3,22,2,0,
+ 2,0,3,36,14,0,67,20,255,255,2,36,
+ 255,255,16,38,11,0,2,18,255,255,18,36,
+ 196,64,192,12,48,0,4,36,255,255,16,38,
+ 6,0,18,18,0,0,0,0,156,60,192,8,
+ 0,0,0,0,0,38,4,0,196,64,192,12,
+ 3,38,4,0,0,0,34,130,0,0,36,146,
+ 0,0,0,0,249,255,64,20,1,0,49,38,
+ 255,255,49,38,0,22,19,0,3,22,2,0,
+ 3,0,3,36,9,0,67,20,255,255,16,38,
+ 255,255,2,36,6,0,2,18,255,255,17,36,
+ 196,64,192,12,32,0,4,36,255,255,16,38,
+ 252,255,17,22,0,0,0,0,92,0,191,143,
+ 88,0,180,143,84,0,179,143,80,0,178,143,
+ 76,0,177,143,72,0,176,143,8,0,224,3,
+ 96,0,189,39,200,255,189,39,40,0,178,175,
+ 33,144,128,0,32,0,176,175,33,128,160,0,
+ 36,0,177,175,33,136,192,0,33,32,32,2,
+ 48,0,191,175,156,71,192,12,44,0,179,175,
+ 33,32,0,0,33,24,64,0,42,16,112,0,
+ 2,0,64,16,33,152,64,2,35,32,3,2,
+ 0,22,18,0,3,22,2,0,1,0,3,36,
+ 11,0,67,20,33,128,128,0,255,255,16,38,
+ 255,255,2,36,8,0,2,18,0,22,19,0,
+ 255,255,18,36,196,64,192,12,32,0,4,36,
+ 255,255,16,38,252,255,18,22,0,0,0,0,
+ 0,22,19,0,3,22,2,0,2,0,3,36,
+ 14,0,67,20,255,255,2,36,255,255,16,38,
+ 11,0,2,18,255,255,18,36,196,64,192,12,
+ 48,0,4,36,255,255,16,38,6,0,18,18,
+ 0,0,0,0,233,60,192,8,0,0,0,0,
+ 0,38,4,0,196,64,192,12,3,38,4,0,
+ 0,0,34,130,0,0,36,146,0,0,0,0,
+ 249,255,64,20,1,0,49,38,255,255,49,38,
+ 0,22,19,0,3,22,2,0,3,0,3,36,
+ 9,0,67,20,255,255,16,38,255,255,2,36,
+ 6,0,2,18,255,255,17,36,196,64,192,12,
+ 32,0,4,36,255,255,16,38,252,255,17,22,
+ 0,0,0,0,48,0,191,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,56,0,189,39,32,255,189,39,
+ 192,0,178,175,33,144,160,0,196,0,179,175,
+ 33,152,0,0,220,0,191,175,216,0,190,175,
+ 212,0,183,175,208,0,182,175,204,0,181,175,
+ 200,0,180,175,188,0,177,175,184,0,176,175,
+ 1,0,130,128,0,0,0,0,229,1,64,16,
+ 33,136,0,0,255,255,22,36,1,0,23,36,
+ 2,0,30,36,1,0,149,36,0,0,162,146,
+ 0,0,0,0,219,255,66,36,0,22,2,0,
+ 3,30,2,0,84,0,98,44,217,1,64,16,
+ 128,16,3,0,2,131,1,60,33,8,34,0,
+ 80,150,34,140,0,0,0,0,8,0,64,0,
+ 0,0,0,0,209,61,192,8,37,0,4,36,
+ 2,131,16,60,64,150,16,38,156,71,192,12,
+ 33,32,0,2,59,61,192,8,0,0,0,0,
+ 0,38,4,0,196,64,192,12,3,38,4,0,
+ 0,0,2,130,0,0,4,146,0,0,0,0,
+ 249,255,64,20,1,0,16,38,3,63,192,8,
+ 1,0,162,38,0,38,18,0,209,61,192,8,
+ 3,38,4,0,2,0,3,36,33,128,32,2,
+ 33,40,64,2,33,160,0,0,51,0,177,39,
+ 51,0,160,163,27,0,163,0,2,0,96,20,
+ 0,0,0,0,13,0,7,0,18,40,0,0,
+ 16,16,0,0,2,131,1,60,33,8,34,0,
+ 128,205,34,144,255,255,49,38,2,0,0,18,
+ 0,0,34,162,255,255,16,38,242,255,160,20,
+ 0,0,0,0,10,0,119,22,0,22,20,0,
+ 255,255,16,38,8,0,22,18,3,38,2,0,
+ 255,255,18,36,196,64,192,12,32,0,4,36,
+ 255,255,16,38,252,255,18,22,0,22,20,0,
+ 3,38,2,0,3,0,128,16,0,0,0,0,
+ 196,64,192,12,0,0,0,0,14,0,126,22,
+ 0,0,0,0,255,255,16,38,11,0,22,18,
+ 255,255,18,36,196,64,192,12,48,0,4,36,
+ 255,255,16,38,6,0,18,18,0,0,0,0,
+ 111,61,192,8,0,0,0,0,0,38,4,0,
+ 196,64,192,12,3,38,4,0,0,0,34,130,
+ 0,0,36,146,0,0,0,0,249,255,64,20,
+ 1,0,49,38,255,255,49,38,3,0,6,36,
+ 80,0,102,22,66,0,4,36,255,255,16,38,
+ 77,0,22,18,255,255,17,36,196,64,192,12,
+ 32,0,4,36,255,255,16,38,252,255,17,22,
+ 66,0,4,36,209,61,192,8,0,0,0,0,
+ 8,0,3,36,33,128,32,2,33,40,64,2,
+ 33,160,0,0,51,0,177,39,51,0,160,163,
+ 27,0,163,0,2,0,96,20,0,0,0,0,
+ 13,0,7,0,18,40,0,0,16,16,0,0,
+ 2,131,1,60,33,8,34,0,128,205,34,144,
+ 255,255,49,38,2,0,0,18,0,0,34,162,
+ 255,255,16,38,242,255,160,20,0,0,0,0,
+ 10,0,119,22,0,22,20,0,255,255,16,38,
+ 8,0,22,18,3,38,2,0,255,255,18,36,
+ 196,64,192,12,32,0,4,36,255,255,16,38,
+ 252,255,18,22,0,22,20,0,3,38,2,0,
+ 3,0,128,16,0,0,0,0,196,64,192,12,
+ 0,0,0,0,14,0,126,22,0,0,0,0,
+ 255,255,16,38,11,0,22,18,255,255,18,36,
+ 196,64,192,12,48,0,4,36,255,255,16,38,
+ 6,0,18,18,0,0,0,0,182,61,192,8,
+ 0,0,0,0,0,38,4,0,196,64,192,12,
+ 3,38,4,0,0,0,34,130,0,0,36,146,
+ 0,0,0,0,249,255,64,20,1,0,49,38,
+ 255,255,49,38,3,0,6,36,9,0,102,22,
+ 81,0,4,36,255,255,16,38,6,0,22,18,
+ 255,255,17,36,196,64,192,12,32,0,4,36,
+ 255,255,16,38,252,255,17,22,81,0,4,36,
+ 196,64,192,12,0,0,0,0,3,63,192,8,
+ 1,0,162,38,33,128,32,2,33,16,64,2,
+ 33,160,0,0,5,0,65,6,10,0,4,36,
+ 45,0,20,36,2,0,32,18,35,16,18,0,
+ 255,255,48,38,51,0,177,39,51,0,160,163,
+ 27,0,68,0,2,0,128,20,0,0,0,0,
+ 13,0,7,0,18,24,0,0,16,16,0,0,
+ 2,131,1,60,33,8,34,0,128,205,34,144,
+ 255,255,49,38,2,0,0,18,0,0,34,162,
+ 255,255,16,38,33,16,96,0,241,255,64,20,
+ 0,0,0,0,10,0,119,22,33,32,128,2,
+ 255,255,16,38,7,0,22,18,0,0,0,0,
+ 255,255,18,36,196,64,192,12,32,0,4,36,
+ 255,255,16,38,252,255,18,22,33,32,128,2,
+ 3,0,128,16,0,0,0,0,196,64,192,12,
+ 0,0,0,0,14,0,126,22,0,0,0,0,
+ 255,255,16,38,11,0,22,18,255,255,18,36,
+ 196,64,192,12,48,0,4,36,255,255,16,38,
+ 6,0,18,18,0,0,0,0,4,62,192,8,
+ 0,0,0,0,0,38,4,0,196,64,192,12,
+ 3,38,4,0,0,0,34,130,0,0,36,146,
+ 0,0,0,0,249,255,64,20,1,0,49,38,
+ 255,255,49,38,3,0,6,36,237,0,102,22,
+ 1,0,162,38,255,255,16,38,234,0,22,18,
+ 255,255,17,36,196,64,192,12,32,0,4,36,
+ 255,255,16,38,252,255,17,22,1,0,162,38,
+ 3,63,192,8,0,0,0,0,10,0,3,36,
+ 33,128,32,2,33,40,64,2,33,160,0,0,
+ 51,0,177,39,51,0,160,163,27,0,163,0,
+ 2,0,96,20,0,0,0,0,13,0,7,0,
+ 18,40,0,0,16,16,0,0,2,131,1,60,
+ 33,8,34,0,128,205,34,144,255,255,49,38,
+ 2,0,0,18,0,0,34,162,255,255,16,38,
+ 242,255,160,20,0,0,0,0,10,0,119,22,
+ 0,22,20,0,255,255,16,38,8,0,22,18,
+ 3,38,2,0,255,255,18,36,196,64,192,12,
+ 32,0,4,36,255,255,16,38,252,255,18,22,
+ 0,22,20,0,3,38,2,0,3,0,128,16,
+ 0,0,0,0,196,64,192,12,0,0,0,0,
+ 14,0,126,22,0,0,0,0,255,255,16,38,
+ 11,0,22,18,255,255,18,36,196,64,192,12,
+ 48,0,4,36,255,255,16,38,6,0,18,18,
+ 0,0,0,0,75,62,192,8,0,0,0,0,
+ 0,38,4,0,196,64,192,12,3,38,4,0,
+ 0,0,34,130,0,0,36,146,0,0,0,0,
+ 249,255,64,20,1,0,49,38,255,255,49,38,
+ 3,0,6,36,166,0,102,22,1,0,162,38,
+ 255,255,16,38,163,0,22,18,255,255,17,36,
+ 196,64,192,12,32,0,4,36,255,255,16,38,
+ 252,255,17,22,1,0,162,38,3,63,192,8,
+ 0,0,0,0,192,132,144,39,156,71,192,12,
+ 33,32,0,2,112,62,192,8,0,0,0,0,
+ 0,38,4,0,196,64,192,12,3,38,4,0,
+ 0,0,2,130,0,0,4,146,0,0,0,0,
+ 249,255,64,20,1,0,16,38,16,0,3,36,
+ 33,128,32,2,33,40,64,2,33,160,0,0,
+ 51,0,177,39,51,0,160,163,27,0,163,0,
+ 2,0,96,20,0,0,0,0,13,0,7,0,
+ 18,40,0,0,16,16,0,0,2,131,1,60,
+ 33,8,34,0,128,205,34,144,255,255,49,38,
+ 2,0,0,18,0,0,34,162,255,255,16,38,
+ 242,255,160,20,0,0,0,0,10,0,119,22,
+ 0,22,20,0,255,255,16,38,8,0,22,18,
+ 3,38,2,0,255,255,18,36,196,64,192,12,
+ 32,0,4,36,255,255,16,38,252,255,18,22,
+ 0,22,20,0,3,38,2,0,3,0,128,16,
+ 0,0,0,0,196,64,192,12,0,0,0,0,
+ 14,0,126,22,0,0,0,0,255,255,16,38,
+ 11,0,22,18,255,255,18,36,196,64,192,12,
+ 48,0,4,36,255,255,16,38,6,0,18,18,
+ 0,0,0,0,159,62,192,8,0,0,0,0,
+ 0,38,4,0,196,64,192,12,3,38,4,0,
+ 0,0,34,130,0,0,36,146,0,0,0,0,
+ 249,255,64,20,1,0,49,38,255,255,49,38,
+ 3,0,6,36,82,0,102,22,1,0,162,38,
+ 255,255,16,38,79,0,22,18,255,255,17,36,
+ 196,64,192,12,32,0,4,36,255,255,16,38,
+ 252,255,17,22,1,0,162,38,3,63,192,8,
+ 0,0,0,0,156,71,192,12,33,32,64,2,
+ 33,24,64,0,42,16,113,0,2,0,64,16,
+ 33,32,0,0,35,32,35,2,10,0,119,22,
+ 33,128,128,0,255,255,16,38,7,0,22,18,
+ 0,0,0,0,255,255,17,36,196,64,192,12,
+ 32,0,4,36,255,255,16,38,252,255,17,22,
+ 0,0,0,0,14,0,126,22,0,0,0,0,
+ 255,255,16,38,11,0,22,18,255,255,17,36,
+ 196,64,192,12,48,0,4,36,255,255,16,38,
+ 6,0,17,18,0,0,0,0,211,62,192,8,
+ 0,0,0,0,0,38,4,0,196,64,192,12,
+ 3,38,4,0,0,0,66,130,0,0,68,146,
+ 0,0,0,0,249,255,64,20,1,0,82,38,
+ 255,255,82,38,3,0,6,36,30,0,102,22,
+ 1,0,162,38,255,255,16,38,27,0,22,18,
+ 255,255,17,36,196,64,192,12,32,0,4,36,
+ 255,255,16,38,252,255,17,22,1,0,162,38,
+ 3,63,192,8,0,0,0,0,253,62,192,8,
+ 3,0,19,36,3,0,96,22,128,16,17,0,
+ 2,0,19,36,128,16,17,0,33,16,81,0,
+ 64,16,2,0,0,0,163,130,208,255,66,36,
+ 2,0,96,22,33,136,67,0,1,0,19,36,
+ 1,0,181,38,0,0,162,130,0,0,0,0,
+ 33,254,64,20,0,0,0,0,1,0,130,36,
+ 220,0,191,143,216,0,190,143,212,0,183,143,
+ 208,0,182,143,204,0,181,143,200,0,180,143,
+ 196,0,179,143,192,0,178,143,188,0,177,143,
+ 184,0,176,143,8,0,224,3,224,0,189,39,
+ 0,0,164,175,4,0,165,175,8,0,166,175,
+ 12,0,167,175,200,255,189,39,59,0,162,39,
+ 252,255,3,36,36,16,67,0,52,0,191,175,
+ 48,0,180,175,44,0,179,175,40,0,178,175,
+ 36,0,177,175,32,0,176,175,56,0,164,175,
+ 0,0,80,140,4,0,81,36,0,0,2,130,
+ 0,0,3,146,0,0,0,0,31,0,64,16,
+ 37,0,20,36,69,0,19,36,252,255,18,36,
+ 0,22,3,0,3,38,2,0,18,0,148,20,
+ 0,0,0,0,1,0,3,130,0,0,0,0,
+ 4,0,100,16,33,32,0,2,4,0,115,20,
+ 3,0,34,38,33,32,0,2,56,63,192,8,
+ 33,40,0,0,36,16,82,0,4,0,81,36,
+ 0,0,69,140,33,32,0,2,13,61,192,12,
+ 0,0,0,0,62,63,192,8,33,128,64,0,
+ 196,64,192,12,1,0,16,38,0,0,2,130,
+ 0,0,3,146,0,0,0,0,230,255,64,20,
+ 0,22,3,0,52,0,191,143,48,0,180,143,
+ 44,0,179,143,40,0,178,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,56,0,189,39,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,8,0,224,3,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,8,0,224,3,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 8,0,224,3,0,0,0,0,33,72,224,3,
+ 170,3,8,36,76,63,192,12,0,0,0,0,
+ 255,255,8,33,252,255,0,21,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,8,0,32,1,
+ 0,0,0,0,33,88,224,3,232,3,10,36,
+ 143,63,192,12,0,0,0,0,255,255,74,33,
+ 252,255,64,21,0,0,0,0,8,0,96,1,
+ 0,0,0,0,250,255,132,32,130,32,4,0,
+ 255,255,132,32,0,0,0,0,253,255,128,20,
+ 0,0,0,0,8,0,224,3,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 248,255,189,39,255,255,195,36,10,0,192,16,
+ 33,56,160,0,255,255,6,36,0,0,162,144,
+ 1,0,165,36,0,0,130,160,4,0,64,16,
+ 1,0,132,36,255,255,99,36,249,255,102,20,
+ 0,0,0,0,33,16,224,0,8,0,224,3,
+ 8,0,189,39,0,96,2,64,0,0,0,0,
+ 38,64,68,0,1,255,8,49,38,64,2,1,
+ 0,96,136,64,8,0,224,3,1,255,66,48,
+ 0,96,2,64,0,0,0,0,1,255,132,48,
+ 39,32,128,0,36,64,68,0,0,96,136,64,
+ 8,0,224,3,1,255,66,48,0,96,2,64,
+ 0,0,0,0,0,255,132,48,37,64,68,0,
+ 1,0,8,53,0,96,136,64,8,0,224,3,
+ 1,255,66,48,176,255,189,39,64,0,182,175,
+ 33,176,128,0,52,0,179,175,33,152,160,0,
+ 72,0,190,175,33,240,192,0,68,0,183,175,
+ 33,184,224,0,60,0,181,175,33,168,0,0,
+ 56,0,180,175,33,160,0,0,76,0,191,175,
+ 48,0,178,175,44,0,177,175,40,0,176,175,
+ 2,131,6,60,33,48,212,0,160,205,198,144,
+ 0,0,0,0,28,0,96,26,33,128,0,0,
+ 33,24,192,2,33,32,118,2,0,0,102,160,
+ 1,0,99,36,42,16,100,0,252,255,64,20,
+ 0,0,0,0,19,0,96,26,33,128,0,0,
+ 255,0,210,48,33,136,192,2,0,0,39,146,
+ 0,0,0,0,9,0,242,16,0,0,0,0,
+ 5,0,224,18,33,40,0,2,96,0,164,143,
+ 0,0,0,0,9,248,224,2,33,48,64,2,
+ 32,0,192,23,1,0,181,38,1,0,16,38,
+ 42,16,19,2,241,255,64,20,1,0,49,38,
+ 1,0,148,38,4,0,130,46,220,255,64,20,
+ 0,0,0,0,7,0,96,26,33,128,0,0,
+ 33,24,192,2,0,0,112,160,1,0,16,38,
+ 42,16,19,2,252,255,64,20,1,0,99,36,
+ 20,0,96,26,33,128,0,0,33,136,192,2,
+ 0,0,39,146,255,0,6,50,11,0,230,16,
+ 0,0,0,0,5,0,224,18,0,0,0,0,
+ 96,0,164,143,0,0,0,0,9,248,224,2,
+ 33,40,0,2,3,0,192,19,1,0,181,38,
+ 72,64,192,8,1,0,2,36,1,0,16,38,
+ 42,16,19,2,239,255,64,20,1,0,49,38,
+ 33,16,160,2,76,0,191,143,72,0,190,143,
+ 68,0,183,143,64,0,182,143,60,0,181,143,
+ 56,0,180,143,52,0,179,143,48,0,178,143,
+ 44,0,177,143,40,0,176,143,8,0,224,3,
+ 80,0,189,39,160,255,189,39,88,0,190,175,
+ 33,240,128,0,68,0,179,175,33,152,160,0,
+ 76,0,181,175,33,168,224,0,72,0,180,175,
+ 33,160,0,0,33,16,96,2,92,0,191,175,
+ 84,0,183,175,80,0,182,175,64,0,178,175,
+ 60,0,177,175,56,0,176,175,2,0,97,6,
+ 16,0,166,175,3,0,98,38,131,152,2,0,
+ 33,184,0,0,2,131,22,60,164,205,214,38,
+ 0,0,210,142,0,0,0,0,7,0,96,18,
+ 33,128,0,0,33,24,192,3,0,0,114,172,
+ 1,0,16,38,43,16,19,2,252,255,64,20,
+ 4,0,99,36,20,0,96,18,33,128,0,0,
+ 33,136,192,3,0,0,39,142,0,0,0,0,
+ 11,0,242,16,128,40,16,0,5,0,160,18,
+ 0,0,0,0,112,0,164,143,0,0,0,0,
+ 9,248,160,2,33,48,64,2,16,0,168,143,
+ 0,0,0,0,34,0,0,21,1,0,148,38,
+ 1,0,16,38,43,16,19,2,239,255,64,20,
+ 4,0,49,38,1,0,247,38,4,0,226,46,
+ 222,255,64,20,4,0,214,38,7,0,96,18,
+ 33,128,0,0,33,24,192,3,0,0,112,172,
+ 1,0,16,38,43,16,19,2,252,255,64,20,
+ 4,0,99,36,22,0,96,18,33,128,0,0,
+ 33,136,192,3,0,0,39,142,0,0,0,0,
+ 13,0,240,16,128,40,16,0,5,0,160,18,
+ 0,0,0,0,112,0,164,143,0,0,0,0,
+ 9,248,160,2,33,48,0,2,16,0,168,143,
+ 0,0,0,0,3,0,0,17,1,0,148,38,
+ 174,64,192,8,1,0,2,36,1,0,16,38,
+ 43,16,19,2,237,255,64,20,4,0,49,38,
+ 33,16,128,2,92,0,191,143,88,0,190,143,
+ 84,0,183,143,80,0,182,143,76,0,181,143,
+ 72,0,180,143,68,0,179,143,64,0,178,143,
+ 60,0,177,143,56,0,176,143,8,0,224,3,
+ 96,0,189,39,0,0,0,0,0,0,0,0,
+ 255,1,2,36,0,163,1,60,176,1,32,172,
+ 0,163,1,60,180,1,32,172,0,163,1,60,
+ 8,0,224,3,184,1,34,172,232,255,189,39,
+ 16,0,176,175,33,128,128,0,20,0,191,175,
+ 220,63,192,12,33,32,0,0,33,40,64,0,
+ 0,163,3,60,180,1,99,140,0,163,2,60,
+ 184,1,66,140,0,163,4,60,128,1,132,140,
+ 1,0,99,36,11,0,128,20,36,24,98,0,
+ 0,163,2,60,176,1,66,140,0,0,0,0,
+ 6,0,98,20,0,0,0,0,0,163,2,60,
+ 128,1,66,140,0,0,0,0,247,255,64,16,
+ 0,0,0,0,0,163,2,60,180,1,66,140,
+ 33,32,160,0,0,163,1,60,33,8,34,0,
+ 188,1,48,160,0,163,1,60,220,63,192,12,
+ 180,1,35,172,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,8,0,224,3,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,192,255,189,39,33,80,0,0,
+ 80,0,185,143,84,0,184,143,88,0,175,143,
+ 1,0,2,36,60,0,177,175,92,0,177,143,
+ 4,112,226,0,12,0,224,24,56,0,176,175,
+ 1,0,9,36,33,64,160,3,33,24,32,3,
+ 0,0,98,140,4,0,99,36,1,0,74,37,
+ 4,16,73,0,0,0,2,173,42,16,71,1,
+ 249,255,64,20,4,0,8,37,46,0,192,25,
+ 33,80,0,0,255,0,16,36,33,72,0,0,
+ 0,0,145,172,0,0,160,164,34,0,224,24,
+ 0,0,208,160,33,88,128,0,33,96,160,0,
+ 33,104,192,0,33,64,32,3,33,24,160,3,
+ 0,0,98,140,0,0,0,0,36,16,66,1,
+ 19,0,64,16,0,0,0,0,0,0,2,141,
+ 0,0,0,0,128,16,2,0,33,16,88,0,
+ 0,0,66,140,0,0,0,0,0,0,98,173,
+ 0,0,2,141,0,0,0,0,64,16,2,0,
+ 33,16,79,0,0,0,66,148,0,0,0,0,
+ 0,0,130,165,0,0,2,141,0,0,0,0,
+ 47,65,192,8,0,0,162,161,4,0,8,37,
+ 1,0,41,37,42,16,39,1,229,255,64,20,
+ 4,0,99,36,1,0,198,36,2,0,165,36,
+ 1,0,74,37,42,16,78,1,213,255,64,20,
+ 4,0,132,36,60,0,177,143,56,0,176,143,
+ 8,0,224,3,64,0,189,39,0,0,0,0,
+ 0,0,0,0,0,0,0,0,216,255,189,39,
+ 63,0,132,48,28,0,179,175,128,1,147,52,
+ 50,133,130,151,48,133,132,151,0,128,131,151,
+ 20,0,177,175,16,0,176,175,5,162,16,60,
+ 32,0,191,175,24,0,178,175,39,16,68,0,
+ 36,24,98,0,0,128,131,167,0,0,3,166,
+ 76,63,192,12,0,1,17,36,0,128,130,151,
+ 48,133,131,151,5,162,18,60,37,16,67,0,
+ 0,128,130,167,0,0,2,166,36,16,51,2,
+ 6,0,64,16,0,0,0,0,0,128,131,151,
+ 20,133,130,151,0,0,0,0,96,65,192,8,
+ 37,24,98,0,20,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,0,128,131,167,
+ 0,0,67,166,76,63,192,12,66,136,17,0,
+ 0,128,130,151,50,133,131,151,0,0,0,0,
+ 37,16,67,0,0,128,130,167,76,63,192,12,
+ 0,0,66,166,50,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,255,255,34,50,
+ 0,128,131,167,0,0,67,166,226,255,64,20,
+ 36,16,51,2,33,136,0,0,16,0,16,36,
+ 5,162,18,60,255,255,19,36,76,63,192,12,
+ 0,0,0,0,0,128,131,151,50,133,130,151,
+ 0,0,0,0,37,24,98,0,0,128,131,167,
+ 76,63,192,12,0,0,67,166,8,0,0,18,
+ 0,0,0,0,0,0,67,150,20,133,130,151,
+ 0,0,0,0,36,16,67,0,2,0,64,16,
+ 64,136,17,0,1,0,49,54,76,63,192,12,
+ 255,255,16,38,50,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,0,128,131,167,
+ 230,255,19,22,0,0,67,166,48,133,130,151,
+ 0,128,131,151,39,16,2,0,36,24,98,0,
+ 5,162,2,60,0,128,131,167,0,0,67,164,
+ 255,255,34,50,32,0,191,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,208,255,189,39,
+ 36,0,181,175,33,168,160,0,32,0,180,175,
+ 63,0,148,48,33,32,128,2,44,0,191,175,
+ 40,0,182,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,60,65,192,12,16,0,176,175,
+ 33,152,64,0,255,255,163,50,255,255,98,50,
+ 3,0,98,20,48,1,22,36,245,66,192,8,
+ 1,0,2,36,5,162,16,60,50,133,130,151,
+ 48,133,132,151,0,128,131,151,39,16,68,0,
+ 36,24,98,0,0,128,131,167,0,0,3,166,
+ 76,63,192,12,0,1,17,36,0,128,130,151,
+ 48,133,131,151,5,162,18,60,37,16,67,0,
+ 0,128,130,167,0,0,2,166,36,16,54,2,
+ 6,0,64,16,0,0,0,0,0,128,131,151,
+ 20,133,130,151,0,0,0,0,210,65,192,8,
+ 37,24,98,0,20,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,0,128,131,167,
+ 0,0,67,166,76,63,192,12,66,136,17,0,
+ 0,128,130,151,50,133,131,151,0,0,0,0,
+ 37,16,67,0,0,128,130,167,76,63,192,12,
+ 0,0,66,166,50,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,255,255,34,50,
+ 0,128,131,167,0,0,67,166,226,255,64,20,
+ 36,16,54,2,255,255,163,50,255,255,98,50,
+ 39,16,2,0,36,24,98,0,84,0,96,16,
+ 192,1,147,54,5,162,16,60,50,133,130,151,
+ 48,133,132,151,0,128,131,151,39,16,68,0,
+ 36,24,98,0,0,128,131,167,0,0,3,166,
+ 76,63,192,12,0,1,17,36,0,128,130,151,
+ 48,133,131,151,5,162,18,60,37,16,67,0,
+ 0,128,130,167,0,0,2,166,36,16,51,2,
+ 6,0,64,16,0,0,0,0,0,128,131,151,
+ 20,133,130,151,0,0,0,0,8,66,192,8,
+ 37,24,98,0,20,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,0,128,131,167,
+ 0,0,67,166,76,63,192,12,66,136,17,0,
+ 0,128,130,151,50,133,131,151,0,0,0,0,
+ 37,16,67,0,0,128,130,167,76,63,192,12,
+ 0,0,66,166,50,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,255,255,34,50,
+ 0,128,131,167,0,0,67,166,226,255,64,20,
+ 36,16,51,2,5,162,16,60,50,133,130,151,
+ 48,133,132,151,0,128,131,151,39,16,68,0,
+ 36,24,98,0,0,128,131,167,76,63,192,12,
+ 0,0,3,166,0,128,131,151,48,133,130,151,
+ 0,0,0,0,37,24,98,0,0,0,3,166,
+ 0,0,4,150,20,133,130,151,0,128,131,167,
+ 36,16,68,0,9,0,64,20,0,0,0,0,
+ 76,63,192,12,0,0,0,0,0,0,3,150,
+ 20,133,130,151,0,0,0,0,36,16,67,0,
+ 249,255,64,16,0,0,0,0,48,133,130,151,
+ 0,128,131,151,39,16,2,0,36,24,98,0,
+ 5,162,2,60,0,128,131,167,0,0,67,164,
+ 255,255,163,50,255,255,2,52,125,0,98,16,
+ 64,1,147,54,5,162,16,60,50,133,130,151,
+ 48,133,132,151,0,128,131,151,39,16,68,0,
+ 36,24,98,0,0,128,131,167,0,0,3,166,
+ 76,63,192,12,0,1,17,36,0,128,130,151,
+ 48,133,131,151,5,162,18,60,37,16,67,0,
+ 0,128,130,167,0,0,2,166,36,16,51,2,
+ 6,0,64,16,0,0,0,0,0,128,131,151,
+ 20,133,130,151,0,0,0,0,95,66,192,8,
+ 37,24,98,0,20,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,0,128,131,167,
+ 0,0,67,166,76,63,192,12,66,136,17,0,
+ 0,128,130,151,50,133,131,151,0,0,0,0,
+ 37,16,67,0,0,128,130,167,76,63,192,12,
+ 0,0,66,166,50,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,255,255,34,50,
+ 0,128,131,167,0,0,67,166,226,255,64,20,
+ 36,16,51,2,33,144,160,2,0,128,16,52,
+ 0,128,130,151,48,133,131,151,5,162,17,60,
+ 37,16,67,0,5,162,3,60,0,128,130,167,
+ 0,0,98,164,36,16,18,2,6,0,64,16,
+ 0,0,0,0,0,128,131,151,20,133,130,151,
+ 0,0,0,0,136,66,192,8,37,24,98,0,
+ 20,133,130,151,0,128,131,151,39,16,2,0,
+ 36,24,98,0,0,128,131,167,0,0,35,166,
+ 76,63,192,12,66,128,16,0,0,128,130,151,
+ 50,133,131,151,0,0,0,0,37,16,67,0,
+ 0,128,130,167,76,63,192,12,0,0,34,166,
+ 50,133,130,151,0,128,131,151,39,16,2,0,
+ 36,24,98,0,255,255,2,50,0,128,131,167,
+ 0,0,35,166,226,255,64,20,36,16,18,2,
+ 5,162,16,60,50,133,130,151,48,133,132,151,
+ 0,128,131,151,39,16,68,0,36,24,98,0,
+ 0,128,131,167,76,63,192,12,0,0,3,166,
+ 0,128,131,151,48,133,130,151,0,0,0,0,
+ 37,24,98,0,0,0,3,166,0,0,4,150,
+ 20,133,130,151,0,128,131,167,36,16,68,0,
+ 9,0,64,20,0,0,0,0,76,63,192,12,
+ 0,0,0,0,0,0,3,150,20,133,130,151,
+ 0,0,0,0,36,16,67,0,249,255,64,16,
+ 0,0,0,0,48,133,130,151,0,128,131,151,
+ 39,16,2,0,36,24,98,0,5,162,2,60,
+ 0,128,131,167,0,0,67,164,0,1,19,36,
+ 5,162,16,60,50,133,130,151,48,133,132,151,
+ 0,128,131,151,39,16,68,0,36,24,98,0,
+ 0,128,131,167,0,0,3,166,76,63,192,12,
+ 0,1,17,36,0,128,130,151,48,133,131,151,
+ 5,162,18,60,37,16,67,0,0,128,130,167,
+ 0,0,2,166,36,16,113,2,6,0,64,16,
+ 0,0,0,0,0,128,131,151,20,133,130,151,
+ 0,0,0,0,220,66,192,8,37,24,98,0,
+ 20,133,130,151,0,128,131,151,39,16,2,0,
+ 36,24,98,0,0,128,131,167,0,0,67,166,
+ 76,63,192,12,66,136,17,0,0,128,130,151,
+ 50,133,131,151,0,0,0,0,37,16,67,0,
+ 0,128,130,167,76,63,192,12,0,0,66,166,
+ 50,133,130,151,0,128,131,151,39,16,2,0,
+ 36,24,98,0,255,255,34,50,0,128,131,167,
+ 0,0,67,166,226,255,64,20,36,16,113,2,
+ 60,65,192,12,33,32,128,2,38,16,162,2,
+ 255,255,66,48,1,0,66,44,44,0,191,143,
+ 40,0,182,143,36,0,181,143,32,0,180,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,48,0,189,39,
+ 224,255,189,39,24,0,178,175,33,144,0,0,
+ 64,16,4,0,16,0,176,175,33,128,68,0,
+ 33,32,0,2,20,0,177,175,255,255,177,48,
+ 28,0,191,175,162,65,192,12,33,40,32,2,
+ 8,0,64,16,1,0,4,38,162,65,192,12,
+ 33,40,32,2,4,0,64,16,2,0,4,38,
+ 162,65,192,12,33,40,32,2,43,144,2,0,
+ 33,16,64,2,28,0,191,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,216,255,189,39,64,16,4,0,
+ 24,0,178,175,33,144,68,0,33,32,64,2,
+ 36,0,191,175,32,0,180,175,28,0,179,175,
+ 20,0,177,175,60,65,192,12,16,0,176,175,
+ 1,0,84,38,33,32,128,2,60,65,192,12,
+ 33,136,64,0,2,0,83,38,33,32,96,2,
+ 60,65,192,12,33,128,64,0,255,255,35,50,
+ 255,255,17,50,8,0,113,20,0,0,0,0,
+ 255,255,66,48,3,0,34,18,33,32,96,2,
+ 162,65,192,12,33,40,32,2,66,67,192,8,
+ 33,16,32,2,255,255,80,48,4,0,112,16,
+ 33,32,128,2,5,0,48,22,255,255,2,36,
+ 33,32,64,2,162,65,192,12,33,40,0,2,
+ 33,16,0,2,36,0,191,143,32,0,180,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,40,0,189,39,
+ 0,0,0,0,0,0,0,0,0,96,8,64,
+ 0,0,0,0,254,255,1,36,36,72,1,1,
+ 0,96,137,64,0,0,133,164,2,0,132,32,
+ 2,44,5,0,0,0,133,164,0,96,136,64,
+ 8,0,224,3,0,0,0,0,208,255,189,39,
+ 32,0,178,175,33,144,128,0,28,0,177,175,
+ 33,136,160,0,36,0,179,175,33,152,192,0,
+ 40,0,180,175,33,160,224,0,44,0,191,175,
+ 2,0,32,22,24,0,176,175,255,255,17,36,
+ 0,0,66,142,0,0,0,0,36,16,81,0,
+ 3,0,83,20,0,0,0,0,121,67,192,8,
+ 1,0,2,36,11,0,128,26,33,128,0,0,
+ 143,63,192,12,0,0,0,0,0,0,66,142,
+ 0,0,0,0,36,16,81,0,246,255,83,16,
+ 1,0,16,38,42,16,20,2,247,255,64,20,
+ 0,0,0,0,33,16,0,0,44,0,191,143,
+ 40,0,180,143,36,0,179,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 48,0,189,39,208,255,189,39,32,0,178,175,
+ 33,144,128,0,28,0,177,175,33,136,160,0,
+ 36,0,179,175,33,152,192,0,40,0,180,175,
+ 33,160,224,0,44,0,191,175,2,0,32,22,
+ 24,0,176,175,255,255,17,52,0,0,66,150,
+ 0,0,0,0,36,16,81,0,3,0,83,20,
+ 0,0,0,0,162,67,192,8,1,0,2,36,
+ 11,0,128,26,33,128,0,0,143,63,192,12,
+ 0,0,0,0,0,0,66,150,0,0,0,0,
+ 36,16,81,0,246,255,83,16,1,0,16,38,
+ 42,16,20,2,247,255,64,20,0,0,0,0,
+ 33,16,0,0,44,0,191,143,40,0,180,143,
+ 36,0,179,143,32,0,178,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,48,0,189,39,
+ 208,255,189,39,32,0,178,175,33,144,128,0,
+ 28,0,177,175,33,136,160,0,36,0,179,175,
+ 33,152,192,0,40,0,180,175,33,160,224,0,
+ 44,0,191,175,2,0,32,22,24,0,176,175,
+ 255,0,17,36,0,0,66,146,0,0,0,0,
+ 36,16,81,0,3,0,83,20,0,0,0,0,
+ 203,67,192,8,1,0,2,36,11,0,128,26,
+ 33,128,0,0,143,63,192,12,0,0,0,0,
+ 0,0,66,146,0,0,0,0,36,16,81,0,
+ 246,255,83,16,1,0,16,38,42,16,20,2,
+ 247,255,64,20,0,0,0,0,33,16,0,0,
+ 44,0,191,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,48,0,189,39,208,255,189,39,
+ 32,0,178,175,33,144,128,0,28,0,177,175,
+ 33,136,160,0,36,0,179,175,33,152,192,0,
+ 40,0,180,175,33,160,224,0,44,0,191,175,
+ 2,0,32,22,24,0,176,175,255,255,17,36,
+ 0,0,66,142,0,0,0,0,36,16,81,0,
+ 3,0,83,20,0,0,0,0,244,67,192,8,
+ 1,0,2,36,11,0,128,26,33,128,0,0,
+ 143,63,192,12,0,0,0,0,0,0,66,142,
+ 0,0,0,0,36,16,81,0,246,255,83,20,
+ 1,0,16,38,42,16,20,2,247,255,64,20,
+ 0,0,0,0,33,16,0,0,44,0,191,143,
+ 40,0,180,143,36,0,179,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 48,0,189,39,208,255,189,39,32,0,178,175,
+ 33,144,128,0,28,0,177,175,33,136,160,0,
+ 36,0,179,175,33,152,192,0,40,0,180,175,
+ 33,160,224,0,44,0,191,175,2,0,32,22,
+ 24,0,176,175,255,255,17,52,0,0,66,150,
+ 0,0,0,0,36,16,81,0,3,0,83,20,
+ 0,0,0,0,29,68,192,8,1,0,2,36,
+ 11,0,128,26,33,128,0,0,143,63,192,12,
+ 0,0,0,0,0,0,66,150,0,0,0,0,
+ 36,16,81,0,246,255,83,20,1,0,16,38,
+ 42,16,20,2,247,255,64,20,0,0,0,0,
+ 33,16,0,0,44,0,191,143,40,0,180,143,
+ 36,0,179,143,32,0,178,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,48,0,189,39,
+ 208,255,189,39,32,0,178,175,33,144,128,0,
+ 28,0,177,175,33,136,160,0,36,0,179,175,
+ 33,152,192,0,40,0,180,175,33,160,224,0,
+ 44,0,191,175,2,0,32,22,24,0,176,175,
+ 255,0,17,36,0,0,66,146,0,0,0,0,
+ 36,16,81,0,3,0,83,20,0,0,0,0,
+ 70,68,192,8,1,0,2,36,11,0,128,26,
+ 33,128,0,0,143,63,192,12,0,0,0,0,
+ 0,0,66,146,0,0,0,0,36,16,81,0,
+ 246,255,83,20,1,0,16,38,42,16,20,2,
+ 247,255,64,20,0,0,0,0,33,16,0,0,
+ 44,0,191,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,48,0,189,39,0,0,0,0,
+ 0,0,0,0,248,255,189,39,255,255,195,36,
+ 8,0,192,16,33,56,128,0,255,255,6,36,
+ 0,0,162,144,1,0,165,36,255,255,99,36,
+ 0,0,130,160,251,255,102,20,1,0,132,36,
+ 33,16,224,0,8,0,224,3,8,0,189,39,
+ 0,0,0,0,0,0,0,0,0,96,8,64,
+ 1,0,9,60,0,96,137,64,15,0,138,48,
+ 33,40,170,0,192,255,165,36,0,0,128,160,
+ 16,0,128,160,32,0,128,160,251,255,160,28,
+ 48,0,128,160,64,0,132,36,0,96,136,64,
+ 0,0,0,0,0,0,0,0,8,0,224,3,
+ 0,0,0,0,0,0,0,0,1,131,2,60,
+ 224,17,66,36,0,32,9,60,37,16,73,0,
+ 8,0,64,0,0,0,0,0,0,96,8,64,
+ 3,0,9,60,0,96,137,64,15,0,138,48,
+ 33,40,170,0,192,255,165,36,0,0,128,160,
+ 16,0,128,160,32,0,128,160,251,255,160,28,
+ 48,0,128,160,64,0,132,36,0,96,136,64,
+ 0,0,0,0,0,0,0,0,8,0,224,3,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,163,3,60,192,3,99,140,
+ 0,163,2,60,188,3,66,140,0,0,0,0,
+ 16,0,98,16,255,255,2,36,0,163,2,60,
+ 188,3,66,140,0,163,1,60,33,8,34,0,
+ 200,3,34,144,0,163,3,60,188,3,99,140,
+ 0,163,4,60,196,3,132,140,0,22,2,0,
+ 3,22,2,0,1,0,99,36,36,24,100,0,
+ 0,163,1,60,188,3,35,172,8,0,224,3,
+ 0,0,0,0,0,163,6,60,0,1,198,52,
+ 10,0,9,36,255,255,8,36,13,0,7,36,
+ 0,163,3,60,192,3,99,140,0,163,2,60,
+ 188,3,66,140,0,0,0,0,17,0,98,16,
+ 255,255,5,36,0,163,2,60,188,3,66,140,
+ 0,0,0,0,33,16,70,0,200,2,66,144,
+ 0,0,0,0,0,22,2,0,3,46,2,0,
+ 0,163,2,60,188,3,66,140,0,163,3,60,
+ 196,3,99,140,1,0,66,36,36,16,67,0,
+ 0,163,1,60,188,3,34,172,11,0,169,16,
+ 11,0,162,40,5,0,64,16,0,0,0,0,
+ 228,255,168,16,0,0,0,0,206,68,192,8,
+ 0,0,133,160,224,255,167,16,0,0,0,0,
+ 206,68,192,8,0,0,133,160,208,68,192,8,
+ 0,0,128,160,169,68,192,8,1,0,132,36,
+ 8,0,224,3,0,0,0,0,0,0,0,0,
+ 0,0,0,0,208,255,189,39,24,0,176,175,
+ 33,128,128,0,36,0,179,175,33,152,160,0,
+ 28,0,177,175,33,136,0,0,32,0,178,175,
+ 33,144,0,2,5,0,64,22,40,0,191,175,
+ 54,0,96,22,33,16,0,0,22,69,192,8,
+ 0,0,32,174,0,0,67,130,0,0,0,0,
+ 233,68,192,8,32,0,2,36,0,0,3,130,
+ 32,0,2,36,253,255,98,16,1,0,16,38,
+ 255,255,16,38,9,0,2,36,249,255,98,16,
+ 1,0,16,38,255,255,16,38,0,0,3,130,
+ 45,0,2,36,4,0,98,20,43,0,2,36,
+ 1,0,16,38,250,68,192,8,1,0,17,36,
+ 3,0,98,20,33,32,0,2,1,0,16,38,
+ 33,32,0,2,44,69,192,12,16,0,165,39,
+ 7,0,96,18,33,24,64,0,16,0,162,143,
+ 0,0,0,0,2,0,80,20,0,0,0,0,
+ 33,16,64,2,0,0,98,174,6,0,32,18,
+ 0,128,2,60,43,16,67,0,5,0,64,20,
+ 255,127,2,60,19,69,192,8,33,16,96,0,
+ 5,0,97,4,255,127,2,60,7,0,32,18,
+ 255,255,66,52,22,69,192,8,0,128,2,60,
+ 33,16,96,0,2,0,32,18,0,0,0,0,
+ 35,16,2,0,40,0,191,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,48,0,189,39,0,0,0,0,
+ 0,0,0,0,0,0,0,0,208,255,130,36,
+ 10,0,66,44,7,0,64,20,1,0,2,36,
+ 191,255,130,36,6,0,66,44,3,0,64,20,
+ 1,0,2,36,159,255,130,36,6,0,66,44,
+ 8,0,224,3,0,0,0,0,248,255,189,39,
+ 0,0,176,175,33,56,0,0,33,72,0,0,
+ 33,80,0,0,33,112,0,0,5,0,128,20,
+ 33,200,128,0,110,0,160,20,33,16,0,0,
+ 163,69,192,8,0,0,192,173,0,0,131,128,
+ 32,0,2,36,253,255,98,16,1,0,132,36,
+ 255,255,132,36,9,0,2,36,249,255,98,16,
+ 1,0,132,36,255,255,132,36,0,0,131,128,
+ 43,0,2,36,3,0,98,20,45,0,2,36,
+ 75,69,192,8,1,0,132,36,3,0,98,20,
+ 0,0,0,0,1,0,132,36,1,0,14,36,
+ 3,0,192,16,16,0,2,36,16,0,194,20,
+ 0,0,0,0,0,0,131,128,48,0,2,36,
+ 9,0,98,20,10,0,2,36,1,0,131,128,
+ 88,0,2,36,3,0,98,16,120,0,2,36,
+ 3,0,98,20,8,0,2,36,16,0,2,36,
+ 2,0,132,36,2,0,192,20,0,0,0,0,
+ 33,48,64,0,0,0,131,128,255,255,2,36,
+ 27,0,70,0,2,0,192,20,0,0,0,0,
+ 13,0,7,0,18,64,0,0,16,192,0,0,
+ 0,0,0,0,0,0,0,0,42,0,96,16,
+ 48,0,98,44,48,0,207,36,11,0,205,40,
+ 87,0,204,36,55,0,203,36,5,0,64,20,
+ 43,16,111,0,3,0,64,16,0,0,0,0,
+ 130,69,192,8,208,255,99,36,30,0,160,21,
+ 97,0,98,44,6,0,64,20,65,0,98,44,
+ 43,16,108,0,3,0,64,16,65,0,98,44,
+ 130,69,192,8,169,255,99,36,21,0,64,20,
+ 43,16,107,0,19,0,64,16,0,0,0,0,
+ 201,255,99,36,43,16,7,1,6,0,64,20,
+ 1,0,9,36,6,0,232,20,24,0,230,0,
+ 43,16,3,3,3,0,64,16,0,0,0,0,
+ 1,0,10,36,24,0,230,0,1,0,132,36,
+ 18,128,0,0,33,56,3,2,0,0,131,128,
+ 0,0,0,0,220,255,96,20,48,0,98,44,
+ 5,0,64,17,0,0,0,0,13,0,160,16,
+ 255,255,2,36,163,69,192,8,0,0,164,172,
+ 6,0,160,16,0,0,0,0,3,0,32,21,
+ 0,0,0,0,160,69,192,8,0,0,185,172,
+ 0,0,164,172,2,0,192,17,33,16,224,0,
+ 35,16,2,0,0,0,176,143,8,0,224,3,
+ 8,0,189,39,0,0,0,0,0,0,0,0,
+ 200,255,189,39,16,0,176,175,7,162,16,60,
+ 236,0,16,54,255,240,3,60,255,255,99,52,
+ 63,0,132,48,36,0,181,175,128,1,149,52,
+ 24,0,178,175,0,1,18,36,20,0,177,175,
+ 7,162,17,60,236,0,49,54,44,0,183,175,
+ 0,4,23,60,32,0,180,175,255,251,20,60,
+ 255,255,148,54,40,0,182,175,0,1,22,60,
+ 28,0,179,175,255,254,19,60,48,0,191,175,
+ 0,0,2,142,0,0,0,0,36,16,67,0,
+ 224,133,130,175,0,0,2,174,76,63,192,12,
+ 255,255,115,54,224,133,130,143,0,2,3,60,
+ 37,16,67,0,224,133,130,175,0,0,2,174,
+ 36,16,85,2,5,0,64,16,0,0,0,0,
+ 224,133,130,143,0,0,0,0,214,69,192,8,
+ 37,16,87,0,224,133,130,143,0,0,0,0,
+ 36,16,84,0,224,133,130,175,76,63,192,12,
+ 0,0,34,174,224,133,130,143,0,0,0,0,
+ 37,16,86,0,224,133,130,175,0,0,34,174,
+ 76,63,192,12,66,144,18,0,224,133,130,143,
+ 0,0,0,0,36,16,83,0,224,133,130,175,
+ 0,0,34,174,255,255,66,50,230,255,64,20,
+ 36,16,85,2,33,136,0,0,16,0,16,36,
+ 7,162,18,60,236,0,82,54,0,1,22,60,
+ 0,8,21,60,255,254,19,60,255,255,115,54,
+ 255,255,20,36,76,63,192,12,0,0,0,0,
+ 224,133,130,143,0,0,0,0,37,16,86,0,
+ 224,133,130,175,76,63,192,12,0,0,66,174,
+ 7,0,0,18,0,0,0,0,0,0,66,142,
+ 0,0,0,0,36,16,85,0,2,0,64,16,
+ 64,136,17,0,1,0,49,54,76,63,192,12,
+ 255,255,16,38,224,133,130,143,0,0,0,0,
+ 36,16,83,0,224,133,130,175,0,0,66,174,
+ 232,255,20,22,7,162,4,60,236,0,132,52,
+ 255,253,3,60,224,133,130,143,255,255,99,52,
+ 36,16,67,0,224,133,130,175,0,0,130,172,
+ 255,255,34,50,48,0,191,143,44,0,183,143,
+ 40,0,182,143,36,0,181,143,32,0,180,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,56,0,189,39,
+ 200,255,189,39,48,0,190,175,33,240,160,0,
+ 40,0,182,175,63,0,150,48,33,32,192,2,
+ 52,0,191,175,44,0,183,175,36,0,181,175,
+ 32,0,180,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,168,69,192,12,16,0,176,175,
+ 33,152,64,0,255,255,195,51,255,255,98,50,
+ 3,0,98,20,7,162,16,60,131,71,192,8,
+ 1,0,2,36,236,0,16,54,255,252,3,60,
+ 255,255,99,52,0,1,18,36,7,162,17,60,
+ 236,0,49,54,255,251,21,60,255,255,181,54,
+ 0,1,23,60,255,254,20,60,224,133,130,143,
+ 0,0,0,0,36,16,67,0,224,133,130,175,
+ 0,0,2,174,76,63,192,12,255,255,148,54,
+ 224,133,130,143,0,2,3,60,37,16,67,0,
+ 224,133,130,175,0,0,2,174,48,1,66,50,
+ 5,0,64,16,0,4,6,60,224,133,130,143,
+ 0,0,0,0,83,70,192,8,37,16,70,0,
+ 224,133,130,143,0,0,0,0,36,16,85,0,
+ 224,133,130,175,76,63,192,12,0,0,34,174,
+ 224,133,130,143,0,0,0,0,37,16,87,0,
+ 224,133,130,175,0,0,34,174,76,63,192,12,
+ 66,144,18,0,224,133,130,143,0,0,0,0,
+ 36,16,84,0,224,133,130,175,0,0,34,174,
+ 255,255,66,50,230,255,64,20,48,1,66,50,
+ 255,255,195,51,255,255,98,50,39,16,2,0,
+ 36,24,98,0,88,0,96,16,192,1,213,54,
+ 7,162,16,60,236,0,16,54,255,252,3,60,
+ 255,255,99,52,0,1,18,36,7,162,17,60,
+ 236,0,49,54,255,251,20,60,255,255,148,54,
+ 0,1,23,60,255,254,19,60,224,133,130,143,
+ 0,0,0,0,36,16,67,0,224,133,130,175,
+ 0,0,2,174,76,63,192,12,255,255,115,54,
+ 224,133,130,143,0,2,3,60,37,16,67,0,
+ 224,133,130,175,0,0,2,174,36,16,85,2,
+ 5,0,64,16,0,4,6,60,224,133,130,143,
+ 0,0,0,0,140,70,192,8,37,16,70,0,
+ 224,133,130,143,0,0,0,0,36,16,84,0,
+ 224,133,130,175,76,63,192,12,0,0,34,174,
+ 224,133,130,143,0,0,0,0,37,16,87,0,
+ 224,133,130,175,0,0,34,174,76,63,192,12,
+ 66,144,18,0,224,133,130,143,0,0,0,0,
+ 36,16,83,0,224,133,130,175,0,0,34,174,
+ 255,255,66,50,230,255,64,20,36,16,85,2,
+ 7,162,16,60,236,0,16,54,255,252,3,60,
+ 224,133,130,143,255,255,99,52,36,16,67,0,
+ 224,133,130,175,76,63,192,12,0,0,2,174,
+ 224,133,130,143,0,2,3,60,37,16,67,0,
+ 224,133,130,175,0,0,2,174,0,0,2,142,
+ 0,8,3,60,36,16,67,0,11,0,64,20,
+ 7,162,4,60,7,162,16,60,236,0,16,54,
+ 0,8,17,60,76,63,192,12,0,0,0,0,
+ 0,0,2,142,0,0,0,0,36,16,81,0,
+ 250,255,64,16,7,162,4,60,236,0,132,52,
+ 255,253,3,60,224,133,130,143,255,255,99,52,
+ 36,16,67,0,224,133,130,175,0,0,130,172,
+ 255,255,195,51,255,255,2,52,133,0,98,16,
+ 64,1,213,54,7,162,16,60,236,0,16,54,
+ 255,252,3,60,255,255,99,52,0,1,18,36,
+ 7,162,17,60,236,0,49,54,255,251,20,60,
+ 255,255,148,54,0,1,23,60,255,254,19,60,
+ 224,133,130,143,0,0,0,0,36,16,67,0,
+ 224,133,130,175,0,0,2,174,76,63,192,12,
+ 255,255,115,54,224,133,130,143,0,2,3,60,
+ 37,16,67,0,224,133,130,175,0,0,2,174,
+ 36,16,85,2,5,0,64,16,0,4,6,60,
+ 224,133,130,143,0,0,0,0,231,70,192,8,
+ 37,16,70,0,224,133,130,143,0,0,0,0,
+ 36,16,84,0,224,133,130,175,76,63,192,12,
+ 0,0,34,174,224,133,130,143,0,0,0,0,
+ 37,16,87,0,224,133,130,175,0,0,34,174,
+ 76,63,192,12,66,144,18,0,224,133,130,143,
+ 0,0,0,0,36,16,83,0,224,133,130,175,
+ 0,0,34,174,255,255,66,50,230,255,64,20,
+ 36,16,85,2,33,160,192,3,7,162,2,60,
+ 236,0,66,52,0,128,17,52,7,162,16,60,
+ 236,0,16,54,0,4,23,60,255,251,19,60,
+ 255,255,115,54,0,1,21,60,255,254,18,60,
+ 255,255,82,54,224,133,131,143,0,2,4,60,
+ 37,24,100,0,224,133,131,175,0,0,67,172,
+ 36,16,52,2,5,0,64,16,0,0,0,0,
+ 224,133,130,143,0,0,0,0,20,71,192,8,
+ 37,16,87,0,224,133,130,143,0,0,0,0,
+ 36,16,83,0,224,133,130,175,76,63,192,12,
+ 0,0,2,174,224,133,130,143,0,0,0,0,
+ 37,16,85,0,224,133,130,175,0,0,2,174,
+ 76,63,192,12,66,136,17,0,224,133,130,143,
+ 0,0,0,0,36,16,82,0,224,133,130,175,
+ 0,0,2,174,255,255,34,50,230,255,64,20,
+ 36,16,52,2,7,162,16,60,236,0,16,54,
+ 255,252,3,60,224,133,130,143,255,255,99,52,
+ 36,16,67,0,224,133,130,175,76,63,192,12,
+ 0,0,2,174,224,133,130,143,0,2,3,60,
+ 37,16,67,0,224,133,130,175,0,0,2,174,
+ 0,0,2,142,0,8,3,60,36,16,67,0,
+ 11,0,64,20,7,162,4,60,7,162,16,60,
+ 236,0,16,54,0,8,17,60,76,63,192,12,
+ 0,0,0,0,0,0,2,142,0,0,0,0,
+ 36,16,81,0,250,255,64,16,7,162,4,60,
+ 236,0,132,52,255,253,3,60,224,133,130,143,
+ 255,255,99,52,36,16,67,0,224,133,130,175,
+ 0,0,130,172,7,162,16,60,236,0,16,54,
+ 255,252,3,60,255,255,99,52,0,1,18,36,
+ 7,162,17,60,236,0,49,54,0,4,23,60,
+ 255,251,20,60,255,255,148,54,0,1,21,60,
+ 255,254,19,60,224,133,130,143,0,0,0,0,
+ 36,16,67,0,224,133,130,175,0,0,2,174,
+ 76,63,192,12,255,255,115,54,224,133,130,143,
+ 0,2,3,60,37,16,67,0,224,133,130,175,
+ 0,0,2,174,0,1,66,50,5,0,64,16,
+ 0,0,0,0,224,133,130,143,0,0,0,0,
+ 108,71,192,8,37,16,87,0,224,133,130,143,
+ 0,0,0,0,36,16,84,0,224,133,130,175,
+ 76,63,192,12,0,0,34,174,224,133,130,143,
+ 0,0,0,0,37,16,85,0,224,133,130,175,
+ 0,0,34,174,76,63,192,12,66,144,18,0,
+ 224,133,130,143,0,0,0,0,36,16,83,0,
+ 224,133,130,175,0,0,34,174,255,255,66,50,
+ 230,255,64,20,0,1,66,50,168,69,192,12,
+ 33,32,192,2,38,16,194,3,255,255,66,48,
+ 1,0,66,44,52,0,191,143,48,0,190,143,
+ 44,0,183,143,40,0,182,143,36,0,181,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 56,0,189,39,0,0,0,0,248,255,189,39,
+ 255,255,195,36,6,0,192,16,33,16,128,0,
+ 255,255,6,36,0,0,133,160,255,255,99,36,
+ 253,255,102,20,1,0,132,36,8,0,189,39,
+ 8,0,224,3,0,0,0,0,159,71,192,8,
+ 33,24,0,0,1,0,99,36,0,0,130,128,
+ 0,0,0,0,252,255,64,20,1,0,132,36,
+ 8,0,224,3,33,16,96,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,255,255,198,36,
+ 10,0,192,16,0,0,0,0,0,0,131,128,
+ 0,0,162,128,0,0,0,0,5,0,98,20,
+ 0,0,0,0,1,0,132,36,255,255,198,36,
+ 248,255,192,20,1,0,165,36,0,0,131,144,
+ 0,0,162,144,0,0,0,0,8,0,224,3,
+ 35,16,98,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,196,71,192,8,240,255,189,39,
+ 0,0,163,128,3,22,2,0,8,0,67,20,
+ 0,0,0,0,1,0,132,36,1,0,165,36,
+ 0,0,130,128,0,0,131,144,0,0,0,0,
+ 246,255,64,20,0,22,3,0,0,0,131,144,
+ 0,0,162,144,0,0,0,0,35,16,98,0,
+ 8,0,224,3,16,0,189,39,0,0,0,0,
+ 255,255,198,36,9,0,192,4,33,16,0,0,
+ 0,0,130,144,0,0,0,0,5,0,69,16,
+ 33,16,128,0,255,255,198,36,250,255,193,4,
+ 1,0,132,36,33,16,0,0,8,0,224,3,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,2,0,192,20,255,255,2,36,
+ 1,0,2,36,8,0,224,3,0,0,226,172,
+ 232,255,189,39,20,0,191,175,16,0,176,175,
+ 40,0,176,143,33,32,192,0,4,0,5,142,
+ 0,0,0,0,15,86,192,12,255,255,230,48,
+ 3,0,64,16,255,255,2,36,243,71,192,8,
+ 0,0,2,174,0,0,0,174,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 208,255,189,39,40,0,191,175,33,24,128,0,
+ 64,0,162,143,32,0,160,175,36,0,162,175,
+ 12,0,66,148,0,0,0,0,2,0,64,20,
+ 33,32,160,0,120,5,2,36,255,255,66,48,
+ 16,0,162,175,1,131,2,60,148,31,66,36,
+ 20,0,162,175,1,131,2,60,128,31,66,36,
+ 24,0,162,175,32,0,162,39,28,0,162,175,
+ 166,85,192,12,33,40,96,0,32,0,162,143,
+ 40,0,191,143,48,0,189,39,8,0,224,3,
+ 0,0,0,0,0,0,0,0,88,0,131,148,
+ 4,0,2,36,9,0,98,20,0,0,0,0,
+ 33,72,192,8,116,0,132,36,33,16,69,0,
+ 128,16,2,0,8,0,131,140,0,0,0,0,
+ 46,72,192,8,33,16,67,0,104,0,132,36,
+ 12,0,128,16,33,16,0,0,4,0,130,140,
+ 0,0,0,0,42,16,162,0,243,255,64,20,
+ 0,17,5,0,4,0,130,140,12,0,132,140,
+ 0,0,0,0,247,255,128,20,35,40,162,0,
+ 33,16,0,0,8,0,224,3,0,0,0,0,
+ 88,0,131,148,4,0,2,36,3,0,98,20,
+ 33,48,0,0,55,72,192,8,116,0,132,36,
+ 104,0,132,36,8,0,130,140,0,0,0,0,
+ 43,16,162,0,14,0,64,16,255,255,2,36,
+ 92,72,192,8,0,0,0,0,35,24,163,0,
+ 0,17,3,0,35,16,67,0,0,26,2,0,
+ 33,16,67,0,0,28,2,0,33,16,67,0,
+ 35,16,2,0,131,16,2,0,92,72,192,8,
+ 33,16,194,0,18,0,128,16,0,0,0,0,
+ 4,0,131,140,0,0,0,0,0,17,3,0,
+ 33,16,67,0,128,16,2,0,8,0,131,140,
+ 0,0,0,0,33,16,67,0,43,16,162,0,
+ 233,255,64,20,0,0,0,0,4,0,130,140,
+ 12,0,132,140,0,0,0,0,241,255,128,20,
+ 33,48,194,0,255,255,2,36,8,0,224,3,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 8,0,130,36,144,0,163,140,120,132,135,39,
+ 2,0,96,16,20,0,137,36,33,56,96,0,
+ 0,0,72,140,4,0,68,140,132,72,192,8,
+ 0,0,0,0,30,0,0,25,0,0,0,0,
+ 4,0,227,140,0,0,0,0,4,0,98,140,
+ 0,0,0,0,55,0,64,16,255,255,2,36,
+ 0,0,135,140,0,0,98,140,0,0,0,0,
+ 8,0,71,16,0,0,0,0,8,0,99,36,
+ 4,0,98,140,0,0,0,0,248,255,64,20,
+ 255,255,2,36,168,72,192,8,0,0,0,0,
+ 0,0,130,140,0,0,0,0,4,0,34,173,
+ 4,0,103,140,255,255,8,37,4,0,132,36,
+ 0,0,226,148,0,0,0,0,1,0,66,48,
+ 226,255,64,16,0,0,0,0,0,0,226,148,
+ 0,0,0,0,64,0,66,48,27,0,64,20,
+ 255,255,2,36,0,0,226,148,0,0,0,0,
+ 1,0,66,48,17,0,64,16,0,0,0,0,
+ 13,0,192,16,1,0,2,36,64,0,162,140,
+ 0,0,0,0,9,0,64,20,1,0,2,36,
+ 28,0,226,140,4,0,163,140,0,0,0,0,
+ 36,16,67,0,3,0,64,20,1,0,2,36,
+ 168,72,192,8,255,255,2,36,164,72,192,8,
+ 0,0,34,165,0,0,32,165,8,0,40,173,
+ 12,0,36,173,16,0,39,173,33,16,0,0,
+ 8,0,224,3,0,0,0,0,200,255,189,39,
+ 48,0,191,175,44,0,179,175,40,0,178,175,
+ 36,0,177,175,32,0,176,175,33,152,128,0,
+ 33,128,160,0,33,144,192,0,0,0,4,142,
+ 0,0,0,0,48,0,130,40,2,0,64,16,
+ 8,0,113,38,48,0,4,36,0,0,36,174,
+ 9,50,192,12,128,32,4,0,3,0,64,20,
+ 4,0,34,174,214,72,192,8,255,255,2,36,
+ 144,0,66,142,120,132,132,39,2,0,64,16,
+ 0,0,0,0,33,32,64,0,16,0,160,175,
+ 20,0,179,175,24,0,178,175,0,0,5,142,
+ 4,0,6,142,0,0,0,0,221,72,192,12,
+ 33,56,32,2,33,128,64,0,4,0,0,26,
+ 0,0,0,0,0,0,34,142,214,72,192,8,
+ 0,0,0,0,110,86,192,12,33,32,32,2,
+ 33,16,0,2,48,0,191,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,56,0,189,39,184,255,189,39,
+ 68,0,191,175,64,0,190,175,60,0,183,175,
+ 56,0,182,175,52,0,181,175,48,0,180,175,
+ 44,0,179,175,40,0,178,175,36,0,177,175,
+ 32,0,176,175,33,144,128,0,33,176,160,0,
+ 33,136,224,0,88,0,183,143,92,0,179,143,
+ 96,0,190,143,32,0,226,38,0,0,36,142,
+ 0,0,0,0,42,16,130,0,22,0,64,16,
+ 33,160,192,0,4,0,132,36,9,50,192,12,
+ 128,32,4,0,33,128,64,0,3,0,0,22,
+ 33,32,0,2,153,73,192,8,255,255,2,36,
+ 0,0,38,142,4,0,37,142,0,0,0,0,
+ 80,68,192,12,128,48,6,0,4,0,36,142,
+ 61,50,192,12,0,0,0,0,4,0,48,174,
+ 0,0,34,142,0,0,0,0,4,0,66,36,
+ 0,0,34,174,0,0,66,150,0,0,0,0,
+ 1,0,66,48,96,0,64,20,0,0,0,0,
+ 33,0,192,30,0,0,0,0,4,0,80,142,
+ 0,0,0,0,4,0,2,142,0,0,0,0,
+ 108,0,64,16,128,160,23,0,1,0,242,38,
+ 4,0,34,142,0,0,0,0,33,16,130,2,
+ 0,0,3,142,0,0,0,0,0,0,67,172,
+ 0,0,2,142,0,0,0,0,24,0,98,174,
+ 16,0,178,175,20,0,179,175,24,0,190,175,
+ 4,0,4,142,33,40,0,0,33,48,0,0,
+ 221,72,192,12,33,56,32,2,112,0,64,20,
+ 8,0,16,38,4,0,2,142,0,0,0,0,
+ 234,255,64,20,33,16,0,0,153,73,192,8,
+ 0,0,0,0,4,0,80,142,0,0,0,0,
+ 4,0,2,142,0,0,0,0,76,0,64,16,
+ 128,168,23,0,1,0,242,38,0,0,3,142,
+ 0,0,132,142,0,0,0,0,43,16,100,0,
+ 42,0,64,20,0,0,0,0,19,0,100,20,
+ 255,255,197,38,4,0,34,142,0,0,0,0,
+ 33,16,162,2,0,0,67,172,0,0,2,142,
+ 0,0,0,0,24,0,98,174,16,0,178,175,
+ 20,0,179,175,24,0,190,175,4,0,4,142,
+ 4,0,134,38,221,72,192,12,33,56,32,2,
+ 25,0,64,16,8,0,16,38,153,73,192,8,
+ 0,0,0,0,0,0,130,142,0,0,0,0,
+ 43,16,67,0,17,0,64,16,33,40,0,0,
+ 4,0,34,142,0,0,0,0,33,16,162,2,
+ 0,0,67,172,0,0,2,142,0,0,0,0,
+ 24,0,98,174,16,0,178,175,20,0,179,175,
+ 24,0,190,175,4,0,4,142,33,48,0,0,
+ 221,72,192,12,33,56,32,2,52,0,64,20,
+ 0,0,0,0,8,0,16,38,4,0,2,142,
+ 0,0,0,0,205,255,64,20,33,16,0,0,
+ 153,73,192,8,0,0,0,0,0,0,66,150,
+ 0,0,0,0,64,0,66,48,40,0,64,20,
+ 33,16,0,0,3,0,66,146,0,0,0,0,
+ 1,0,66,48,11,0,64,16,33,24,64,2,
+ 64,0,194,143,0,0,0,0,9,0,64,20,
+ 0,0,0,0,28,0,98,140,4,0,195,143,
+ 0,0,0,0,36,16,67,0,3,0,64,20,
+ 0,0,0,0,153,73,192,8,33,16,0,0,
+ 14,0,192,26,33,16,246,2,0,0,34,174,
+ 4,0,36,142,128,128,23,0,33,32,4,2,
+ 33,40,128,2,80,68,192,12,128,48,22,0,
+ 28,0,118,174,4,0,34,142,0,0,0,0,
+ 33,128,2,2,149,73,192,8,32,0,112,174,
+ 0,0,55,174,28,0,96,174,32,0,96,174,
+ 36,0,114,174,1,0,2,36,20,0,98,166,
+ 1,0,2,36,68,0,191,143,64,0,190,143,
+ 60,0,183,143,56,0,182,143,52,0,181,143,
+ 48,0,180,143,44,0,179,143,40,0,178,143,
+ 36,0,177,143,32,0,176,143,8,0,224,3,
+ 72,0,189,39,3,0,160,28,33,16,0,0,
+ 0,0,224,172,1,0,2,36,8,0,224,3,
+ 0,0,0,0,208,255,189,39,44,0,191,175,
+ 40,0,178,175,36,0,177,175,32,0,176,175,
+ 33,144,128,0,33,136,224,0,64,0,176,143,
+ 0,0,0,0,6,0,160,24,24,0,160,175,
+ 17,0,2,146,0,0,0,0,18,0,66,52,
+ 200,73,192,8,17,0,2,162,33,32,32,2,
+ 33,40,0,2,1,0,6,36,253,76,192,12,
+ 24,0,167,39,36,0,2,142,16,0,176,175,
+ 8,0,66,140,33,32,64,2,1,0,5,36,
+ 24,0,166,39,9,248,64,0,33,56,32,2,
+ 44,0,191,143,40,0,178,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,48,0,189,39,
+ 224,255,189,39,28,0,191,175,24,0,178,175,
+ 20,0,177,175,33,144,128,0,33,136,160,0,
+ 31,0,81,18,16,0,176,175,4,0,80,142,
+ 0,0,0,0,4,0,2,142,0,0,0,0,
+ 10,0,64,16,0,0,0,0,4,0,4,142,
+ 0,0,0,0,206,73,192,12,33,40,32,2,
+ 8,0,16,38,4,0,2,142,0,0,0,0,
+ 248,255,64,20,0,0,0,0,0,0,66,150,
+ 0,0,0,0,32,0,66,48,4,0,64,16,
+ 0,0,0,0,4,0,68,142,61,50,192,12,
+ 0,0,0,0,0,0,66,150,0,0,0,0,
+ 16,0,66,48,3,0,64,16,0,0,0,0,
+ 61,50,192,12,33,32,64,2,28,0,191,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,120,132,131,39,
+ 2,0,128,16,0,0,0,0,33,24,128,0,
+ 0,0,167,140,4,0,165,140,26,74,192,8,
+ 0,0,0,0,28,0,224,24,0,0,0,0,
+ 4,0,99,140,0,0,0,0,4,0,98,140,
+ 0,0,0,0,11,0,64,16,0,0,0,0,
+ 0,0,164,140,0,0,98,140,0,0,0,0,
+ 9,0,68,16,0,0,0,0,8,0,99,36,
+ 4,0,98,140,0,0,0,0,248,255,64,20,
+ 0,0,0,0,0,0,192,172,33,74,192,8,
+ 2,0,2,36,4,0,99,140,255,255,231,36,
+ 4,0,165,36,0,0,98,148,0,0,0,0,
+ 1,0,66,48,228,255,64,16,0,0,0,0,
+ 0,0,195,172,42,16,7,0,8,0,224,3,
+ 0,0,0,0,208,255,189,39,44,0,191,175,
+ 40,0,182,175,36,0,181,175,32,0,180,175,
+ 28,0,179,175,24,0,178,175,20,0,177,175,
+ 16,0,176,175,33,168,192,0,0,0,162,140,
+ 0,0,0,0,3,0,64,28,33,48,0,0,
+ 220,74,192,8,5,0,2,36,120,132,147,39,
+ 2,0,128,16,0,0,224,172,33,152,128,0,
+ 0,0,177,140,4,0,180,140,84,74,192,8,
+ 0,0,0,0,29,0,32,26,0,0,0,0,
+ 4,0,102,142,0,0,0,0,4,0,194,140,
+ 0,0,0,0,23,0,64,16,0,0,0,0,
+ 0,0,131,142,0,0,194,140,0,0,0,0,
+ 6,0,67,16,0,0,0,0,8,0,198,36,
+ 4,0,194,140,0,0,0,0,248,255,64,20,
+ 0,0,0,0,4,0,194,140,0,0,0,0,
+ 9,0,64,16,0,0,0,0,255,255,49,38,
+ 4,0,148,38,33,152,64,0,0,0,98,150,
+ 0,0,0,0,1,0,66,48,227,255,64,16,
+ 0,0,0,0,29,0,32,22,0,0,0,0,
+ 0,0,99,150,0,0,0,0,1,0,98,48,
+ 11,0,64,16,4,0,98,48,123,0,64,16,
+ 3,0,2,36,0,0,162,150,0,0,0,0,
+ 1,0,66,48,118,0,64,16,4,0,2,36,
+ 0,0,243,172,219,74,192,8,4,0,213,172,
+ 0,0,98,150,0,0,0,0,4,0,66,48,
+ 110,0,64,16,3,0,2,36,0,0,162,150,
+ 0,0,0,0,1,0,66,48,105,0,64,20,
+ 4,0,2,36,0,0,243,172,219,74,192,8,
+ 4,0,213,172,0,0,98,150,0,0,0,0,
+ 1,0,66,48,97,0,64,20,2,0,2,36,
+ 2,0,34,42,23,0,64,20,33,144,160,2,
+ 56,0,22,36,9,50,192,12,16,0,4,36,
+ 33,128,64,0,40,0,0,18,128,16,17,0,
+ 33,16,84,0,252,255,66,140,0,0,0,0,
+ 0,0,2,174,4,0,18,174,8,0,0,174,
+ 12,0,0,174,9,50,192,12,8,0,4,36,
+ 33,144,64,0,24,0,64,18,255,255,49,38,
+ 0,0,86,166,2,0,34,42,236,255,64,16,
+ 4,0,80,174,4,0,102,142,0,0,0,0,
+ 4,0,194,140,0,0,0,0,6,0,64,16,
+ 1,0,17,36,8,0,198,36,4,0,194,140,
+ 0,0,0,0,252,255,64,20,1,0,49,38,
+ 1,0,36,38,9,50,192,12,192,32,4,0,
+ 33,128,64,0,12,0,0,22,33,32,64,2,
+ 173,74,192,8,0,0,0,0,0,0,18,142,
+ 0,0,0,0,61,50,192,12,33,32,0,2,
+ 33,32,64,2,206,73,192,12,33,40,160,2,
+ 220,74,192,8,1,0,2,36,4,0,102,142,
+ 0,0,0,0,192,74,192,8,33,168,0,2,
+ 4,0,194,140,0,0,0,0,14,0,64,16,
+ 0,0,0,0,0,0,194,140,4,0,195,140,
+ 0,0,2,174,4,0,3,174,8,0,198,36,
+ 8,0,16,38,255,255,49,38,0,0,194,140,
+ 0,0,131,142,0,0,0,0,43,16,67,0,
+ 240,255,64,20,0,0,0,0,0,0,130,142,
+ 0,0,0,0,0,0,2,174,4,0,18,174,
+ 8,0,4,38,33,40,192,0,80,68,192,12,
+ 192,48,17,0,4,0,102,142,4,0,117,174,
+ 0,0,98,150,0,0,0,0,32,0,66,48,
+ 3,0,64,16,0,0,0,0,61,50,192,12,
+ 33,32,192,0,0,0,98,150,0,0,0,0,
+ 32,0,66,52,0,0,98,166,33,16,0,0,
+ 44,0,191,143,40,0,182,143,36,0,181,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 48,0,189,39,232,255,189,39,20,0,191,175,
+ 16,0,176,175,0,0,162,140,0,0,0,0,
+ 85,0,64,24,33,16,0,0,120,132,144,39,
+ 2,0,128,16,0,0,0,0,33,128,128,0,
+ 0,0,164,140,4,0,165,140,0,0,0,0,
+ 0,0,168,140,0,0,2,150,0,0,0,0,
+ 33,24,64,0,1,0,66,48,34,0,64,20,
+ 33,56,0,2,32,0,128,24,8,0,98,48,
+ 4,0,6,142,5,0,64,16,0,0,0,0,
+ 12,0,194,140,0,0,0,0,3,0,64,16,
+ 0,0,0,0,33,56,0,2,0,0,168,140,
+ 0,0,195,140,0,0,162,140,0,0,0,0,
+ 10,0,98,16,0,0,0,0,0,0,163,140,
+ 4,0,194,140,0,0,0,0,20,0,64,16,
+ 8,0,198,36,0,0,194,140,0,0,0,0,
+ 249,255,67,20,0,0,0,0,255,255,132,36,
+ 4,0,208,140,0,0,0,0,0,0,3,150,
+ 0,0,0,0,1,0,98,48,224,255,64,16,
+ 4,0,165,36,36,0,128,20,33,16,0,0,
+ 0,0,2,150,0,0,0,0,2,0,66,48,
+ 3,0,64,20,0,0,0,0,65,75,192,8,
+ 33,16,0,0,4,0,230,140,0,0,0,0,
+ 0,0,194,140,0,0,0,0,7,0,72,16,
+ 0,0,0,0,8,0,198,36,0,0,194,140,
+ 0,0,0,0,253,255,72,20,8,0,198,36,
+ 248,255,198,36,4,0,199,140,0,0,0,0,
+ 10,0,224,16,33,32,224,0,8,0,194,140,
+ 12,0,195,140,0,0,194,172,4,0,195,172,
+ 8,0,198,36,4,0,194,140,0,0,0,0,
+ 248,255,64,20,33,32,224,0,206,73,192,12,
+ 33,40,0,2,33,16,0,2,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 192,255,189,39,56,0,191,175,52,0,181,175,
+ 48,0,180,175,44,0,179,175,40,0,178,175,
+ 36,0,177,175,32,0,176,175,33,136,128,0,
+ 33,144,160,0,33,152,192,0,33,160,224,0,
+ 80,0,181,143,0,0,0,0,36,0,162,142,
+ 0,0,0,0,20,0,80,140,33,32,128,2,
+ 48,72,192,12,33,40,160,2,16,0,3,142,
+ 0,0,0,0,16,0,163,175,20,0,180,175,
+ 24,0,162,175,0,0,2,142,1,0,4,36,
+ 33,40,32,2,33,48,64,2,9,248,64,0,
+ 33,56,96,2,6,0,64,20,33,32,128,2,
+ 17,0,162,146,0,0,0,0,1,0,66,52,
+ 113,75,192,8,17,0,162,162,33,40,160,2,
+ 59,77,192,12,33,48,64,0,56,0,191,143,
+ 52,0,181,143,48,0,180,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,64,0,189,39,192,255,189,39,
+ 60,0,191,175,56,0,178,175,52,0,177,175,
+ 48,0,176,175,33,136,224,0,80,0,178,143,
+ 36,0,160,175,36,0,66,142,0,0,0,0,
+ 20,0,67,140,2,0,80,144,0,0,0,0,
+ 255,0,2,50,254,255,71,36,70,0,226,44,
+ 110,0,64,16,128,16,7,0,2,131,1,60,
+ 33,8,34,0,160,151,34,140,0,0,0,0,
+ 8,0,64,0,0,0,0,0,16,0,177,175,
+ 8,0,98,140,16,0,103,140,9,248,64,0,
+ 0,0,0,0,2,0,3,36,16,0,67,162,
+ 251,75,192,8,40,0,66,174,16,0,177,175,
+ 8,0,98,140,16,0,103,140,9,248,64,0,
+ 0,0,0,0,40,0,162,175,16,0,80,162,
+ 40,0,162,143,0,0,0,0,251,75,192,8,
+ 40,0,66,174,32,0,162,39,16,0,162,175,
+ 20,0,177,175,36,0,162,39,24,0,162,175,
+ 8,0,98,140,16,0,103,140,9,248,64,0,
+ 0,0,0,0,33,48,64,0,16,0,80,162,
+ 17,0,66,146,0,0,0,0,2,0,66,52,
+ 17,0,66,162,36,0,162,143,0,0,0,0,
+ 43,16,2,0,40,0,66,166,44,0,70,174,
+ 32,0,162,151,0,0,0,0,33,16,194,0,
+ 48,0,66,174,255,75,192,8,52,0,64,166,
+ 16,0,177,175,36,0,162,39,20,0,162,175,
+ 8,0,98,140,16,0,103,140,9,248,64,0,
+ 0,0,0,0,33,128,64,0,8,0,0,22,
+ 33,32,32,2,16,0,160,175,33,40,64,2,
+ 33,48,0,0,226,76,192,12,33,56,0,0,
+ 255,75,192,8,0,0,0,0,36,0,162,143,
+ 0,0,0,0,16,0,162,175,0,0,6,142,
+ 4,0,7,142,0,0,0,0,226,76,192,12,
+ 33,40,64,2,36,0,162,143,0,0,0,0,
+ 35,0,64,16,0,0,0,0,61,50,192,12,
+ 33,32,0,2,255,75,192,8,0,0,0,0,
+ 5,0,2,36,251,75,192,8,16,0,66,162,
+ 16,0,177,175,40,0,162,39,20,0,162,175,
+ 8,0,98,140,16,0,103,140,9,248,64,0,
+ 0,0,0,0,33,48,64,0,7,0,192,16,
+ 64,0,2,36,3,0,194,136,0,0,194,152,
+ 0,0,0,0,43,0,162,171,40,0,162,187,
+ 64,0,2,36,16,0,66,162,40,0,162,143,
+ 0,0,0,0,251,75,192,8,40,0,66,174,
+ 5,0,2,36,96,0,34,174,17,0,66,146,
+ 0,0,0,0,2,0,66,52,17,0,66,162,
+ 60,0,191,143,56,0,178,143,52,0,177,143,
+ 48,0,176,143,8,0,224,3,64,0,189,39,
+ 80,255,189,39,168,0,191,175,164,0,179,175,
+ 160,0,178,175,156,0,177,175,152,0,176,175,
+ 33,152,128,0,33,136,224,0,192,0,178,143,
+ 0,0,0,0,36,0,66,142,0,0,0,0,
+ 20,0,67,140,0,0,0,0,16,0,98,140,
+ 0,0,0,0,16,0,162,175,20,0,177,175,
+ 4,0,98,140,0,0,0,0,9,248,64,0,
+ 24,0,167,39,33,128,64,0,6,0,0,22,
+ 33,32,32,2,17,0,66,146,0,0,0,0,
+ 18,0,66,52,45,76,192,8,17,0,66,162,
+ 33,40,64,2,33,48,0,2,253,76,192,12,
+ 24,0,167,39,16,0,178,175,33,32,96,2,
+ 33,40,0,2,24,0,166,39,122,75,192,12,
+ 33,56,32,2,168,0,191,143,164,0,179,143,
+ 160,0,178,143,156,0,177,143,152,0,176,143,
+ 8,0,224,3,176,0,189,39,192,255,189,39,
+ 56,0,191,175,52,0,181,175,48,0,180,175,
+ 44,0,179,175,40,0,178,175,36,0,177,175,
+ 32,0,176,175,33,152,128,0,33,160,160,0,
+ 33,168,192,0,33,136,224,0,80,0,178,143,
+ 0,0,0,0,36,0,66,142,0,0,0,0,
+ 20,0,80,140,33,32,32,2,48,72,192,12,
+ 33,40,64,2,16,0,3,142,0,0,0,0,
+ 16,0,163,175,20,0,177,175,24,0,162,175,
+ 0,0,2,142,33,32,0,0,33,40,96,2,
+ 33,48,128,2,9,248,64,0,33,56,160,2,
+ 5,0,64,16,33,32,32,2,200,76,192,12,
+ 33,40,64,2,95,76,192,8,0,0,0,0,
+ 16,0,178,175,33,32,96,2,33,40,128,2,
+ 33,48,160,2,122,75,192,12,33,56,32,2,
+ 56,0,191,143,52,0,181,143,48,0,180,143,
+ 44,0,179,143,40,0,178,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,64,0,189,39,
+ 192,255,189,39,56,0,191,175,52,0,181,175,
+ 48,0,180,175,44,0,179,175,40,0,178,175,
+ 36,0,177,175,32,0,176,175,33,152,128,0,
+ 33,160,160,0,33,168,192,0,80,0,177,143,
+ 0,0,0,0,36,0,34,142,0,0,0,0,
+ 20,0,82,140,2,0,66,144,0,0,0,0,
+ 254,255,67,36,70,0,98,44,57,0,64,16,
+ 33,128,224,0,128,16,3,0,2,131,1,60,
+ 33,8,34,0,184,152,34,140,0,0,0,0,
+ 8,0,64,0,0,0,0,0,33,32,0,2,
+ 48,72,192,12,33,40,32,2,40,0,35,142,
+ 0,0,0,0,16,0,163,175,20,0,176,175,
+ 173,76,192,8,24,0,162,175,33,32,0,2,
+ 48,72,192,12,33,40,32,2,44,0,35,142,
+ 0,0,0,0,16,0,163,175,48,0,35,142,
+ 44,0,36,142,0,0,0,0,35,24,100,0,
+ 170,76,192,8,255,255,99,48,33,32,0,2,
+ 48,72,192,12,33,40,32,2,40,0,35,142,
+ 0,0,0,0,16,0,163,175,44,0,35,142,
+ 0,0,0,0,171,76,192,8,20,0,163,175,
+ 33,32,0,2,48,72,192,12,33,40,32,2,
+ 40,0,35,38,16,0,163,175,4,0,3,36,
+ 20,0,163,175,24,0,176,175,28,0,162,175,
+ 12,0,66,142,33,32,96,2,33,40,128,2,
+ 16,0,71,142,0,0,0,0,9,248,64,0,
+ 33,48,160,2,184,76,192,8,0,0,0,0,
+ 5,0,2,36,96,0,2,174,17,0,34,146,
+ 0,0,0,0,2,0,66,52,17,0,34,162,
+ 56,0,191,143,52,0,181,143,48,0,180,143,
+ 44,0,179,143,40,0,178,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,64,0,189,39,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 224,255,189,39,24,0,191,175,20,0,177,175,
+ 16,0,176,175,33,128,128,0,64,0,2,142,
+ 0,0,0,0,7,0,64,20,33,136,160,0,
+ 2,0,2,36,48,72,192,12,96,0,2,174,
+ 1,0,66,36,217,76,192,8,100,0,2,174,
+ 129,0,2,36,16,0,34,162,17,0,34,146,
+ 0,0,0,0,2,0,66,52,17,0,34,162,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,232,255,189,39,
+ 20,0,191,175,16,0,176,175,33,128,128,0,
+ 33,64,160,0,33,32,192,0,33,40,224,0,
+ 40,0,162,143,17,0,3,145,0,0,0,0,
+ 2,0,99,52,17,0,3,161,6,0,3,36,
+ 4,0,64,16,16,0,3,161,40,0,4,173,
+ 249,76,192,8,44,0,5,173,80,86,192,12,
+ 40,0,6,37,2,0,64,16,5,0,2,36,
+ 96,0,2,174,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,200,255,189,39,
+ 48,0,191,175,44,0,183,175,40,0,182,175,
+ 36,0,181,175,32,0,180,175,28,0,179,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,176,128,0,33,144,160,0,33,152,192,0,
+ 37,0,96,18,33,184,224,0,28,0,84,38,
+ 8,0,67,142,28,0,66,142,0,0,0,0,
+ 35,128,98,0,42,16,83,0,23,0,64,16,
+ 33,168,19,2,9,50,192,12,128,32,21,0,
+ 33,136,64,0,8,0,32,22,128,128,16,0,
+ 5,0,2,36,96,0,194,174,17,0,66,146,
+ 0,0,0,0,2,0,66,52,48,77,192,8,
+ 17,0,66,162,33,32,32,2,12,0,69,142,
+ 0,0,0,0,80,68,192,12,33,48,0,2,
+ 110,86,192,12,8,0,68,38,12,0,81,174,
+ 33,128,48,2,4,0,144,174,4,0,132,142,
+ 33,40,224,2,80,68,192,12,128,48,19,0,
+ 0,0,147,174,8,0,85,174,48,0,191,143,
+ 44,0,183,143,40,0,182,143,36,0,181,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 56,0,189,39,224,255,189,39,24,0,191,175,
+ 20,0,177,175,16,0,176,175,33,136,128,0,
+ 5,0,192,24,33,128,160,0,3,0,2,36,
+ 96,0,34,174,95,77,192,8,100,0,38,174,
+ 19,0,195,36,19,0,98,44,9,0,64,16,
+ 128,16,3,0,2,131,1,60,33,8,34,0,
+ 208,153,34,140,0,0,0,0,8,0,64,0,
+ 0,0,0,0,89,77,192,8,2,0,6,36,
+ 89,77,192,8,5,0,6,36,89,77,192,8,
+ 3,0,6,36,89,77,192,8,1,0,6,36,
+ 35,48,6,0,96,0,38,174,33,32,32,2,
+ 48,72,192,12,33,40,0,2,1,0,66,36,
+ 100,0,34,174,17,0,2,146,0,0,0,0,
+ 1,0,66,52,17,0,2,162,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,200,255,189,39,52,0,191,175,
+ 48,0,190,175,44,0,183,175,40,0,182,175,
+ 36,0,181,175,32,0,180,175,28,0,179,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,152,128,0,33,168,160,0,33,160,192,0,
+ 76,0,182,143,80,0,183,143,84,0,190,143,
+ 92,0,177,143,0,0,0,0,209,83,192,12,
+ 33,144,224,0,33,128,64,0,3,0,0,22,
+ 4,0,2,36,168,77,192,8,33,16,0,0,
+ 88,0,2,166,64,0,19,174,33,32,64,2,
+ 72,0,165,143,0,0,0,0,80,86,192,12,
+ 92,0,6,38,255,255,3,36,23,0,67,16,
+ 0,0,0,0,3,0,194,138,0,0,194,154,
+ 0,0,0,0,103,0,2,170,100,0,2,186,
+ 104,0,23,174,108,0,30,174,88,0,168,143,
+ 0,0,0,0,112,0,8,174,72,0,0,166,
+ 76,0,20,174,255,255,162,50,33,16,130,2,
+ 80,0,2,174,84,0,0,166,9,0,32,18,
+ 120,0,17,174,224,83,192,12,33,32,32,2,
+ 6,0,64,20,124,0,2,174,167,83,192,12,
+ 33,32,0,2,168,77,192,8,33,16,0,0,
+ 124,0,0,174,33,16,0,2,52,0,191,143,
+ 48,0,190,143,44,0,183,143,40,0,182,143,
+ 36,0,181,143,32,0,180,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,56,0,189,39,224,255,189,39,
+ 28,0,191,175,24,0,178,175,20,0,177,175,
+ 16,0,176,175,33,144,128,0,92,0,68,142,
+ 128,86,192,12,0,0,0,0,96,0,68,142,
+ 0,0,0,0,128,86,192,12,33,128,64,0,
+ 100,0,68,142,0,0,0,0,128,86,192,12,
+ 33,136,64,0,33,128,17,2,6,0,16,38,
+ 33,16,80,0,90,0,66,166,191,79,192,12,
+ 104,0,68,38,255,255,67,48,90,0,68,150,
+ 128,0,98,44,5,0,64,20,2,0,130,36,
+ 0,1,98,44,2,0,64,20,3,0,130,36,
+ 4,0,130,36,33,16,67,0,90,0,66,166,
+ 80,0,66,142,76,0,67,142,90,0,80,150,
+ 64,0,68,142,0,0,0,0,128,86,192,12,
+ 35,136,67,0,255,255,67,48,90,0,68,150,
+ 0,0,0,0,128,0,130,44,9,0,64,20,
+ 0,1,130,44,4,0,64,16,0,0,0,0,
+ 33,24,112,0,237,77,192,8,6,0,99,36,
+ 33,24,112,0,237,77,192,8,7,0,99,36,
+ 33,24,112,0,5,0,99,36,255,255,36,50,
+ 128,0,130,44,5,0,64,20,1,0,130,36,
+ 0,1,130,44,2,0,64,20,2,0,130,36,
+ 3,0,130,36,33,16,98,0,2,0,66,166,
+ 2,0,67,150,2,0,68,150,0,0,0,0,
+ 128,0,130,44,6,0,64,20,1,0,99,36,
+ 0,1,130,44,4,0,64,20,2,0,98,36,
+ 3,78,192,8,3,0,98,36,1,0,98,36,
+ 0,0,66,166,0,0,66,150,28,0,191,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,216,255,189,39,
+ 32,0,191,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,16,0,176,175,33,152,128,0,
+ 171,86,192,12,92,0,100,38,104,0,100,142,
+ 0,0,0,0,128,86,192,12,33,144,64,0,
+ 108,0,100,142,0,0,0,0,128,86,192,12,
+ 33,136,64,0,112,0,100,142,0,0,0,0,
+ 153,86,192,12,33,128,64,0,10,0,82,38,
+ 33,136,50,2,33,128,17,2,4,0,16,38,
+ 33,16,80,0,90,0,98,166,191,79,192,12,
+ 116,0,100,38,255,255,67,48,90,0,100,150,
+ 128,0,98,44,5,0,64,20,2,0,130,36,
+ 0,1,98,44,2,0,64,20,3,0,130,36,
+ 4,0,130,36,33,16,67,0,90,0,98,166,
+ 80,0,98,142,76,0,99,142,90,0,113,150,
+ 64,0,100,142,0,0,0,0,128,86,192,12,
+ 35,128,67,0,255,255,67,48,90,0,100,150,
+ 0,0,0,0,128,0,130,44,9,0,64,20,
+ 0,1,130,44,4,0,64,16,0,0,0,0,
+ 33,24,113,0,74,78,192,8,6,0,99,36,
+ 33,24,113,0,74,78,192,8,7,0,99,36,
+ 33,24,113,0,5,0,99,36,255,255,4,50,
+ 128,0,130,44,5,0,64,20,1,0,130,36,
+ 0,1,130,44,2,0,64,20,2,0,130,36,
+ 3,0,130,36,33,16,98,0,2,0,98,166,
+ 2,0,99,150,2,0,100,150,0,0,0,0,
+ 128,0,130,44,6,0,64,20,1,0,99,36,
+ 0,1,130,44,4,0,64,20,2,0,112,36,
+ 96,78,192,8,3,0,112,36,1,0,112,36,
+ 255,255,2,50,32,0,191,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,208,255,189,39,
+ 40,0,191,175,36,0,179,175,32,0,178,175,
+ 28,0,177,175,24,0,176,175,33,128,128,0,
+ 33,152,192,0,33,144,224,0,33,136,160,0,
+ 16,0,4,36,32,0,5,36,1,131,6,60,
+ 56,97,198,36,242,86,192,12,33,56,0,2,
+ 255,255,36,50,1,131,5,60,56,97,165,36,
+ 44,87,192,12,33,48,0,2,16,0,176,175,
+ 2,0,4,36,33,40,0,0,1,131,7,60,
+ 56,97,231,36,94,87,192,12,33,48,96,2,
+ 8,0,71,142,4,0,66,142,0,0,0,0,
+ 35,56,226,0,1,131,2,60,56,97,66,36,
+ 16,0,162,175,20,0,176,175,4,0,4,36,
+ 33,40,0,0,4,0,70,142,0,0,0,0,
+ 194,87,192,12,255,255,231,48,40,0,191,143,
+ 36,0,179,143,32,0,178,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,48,0,189,39,
+ 216,255,189,39,36,0,191,175,32,0,178,175,
+ 28,0,177,175,24,0,176,175,33,128,128,0,
+ 33,136,160,0,88,0,4,150,160,0,5,36,
+ 1,131,6,60,56,97,198,36,242,86,192,12,
+ 33,56,32,2,90,0,4,150,1,131,5,60,
+ 56,97,165,36,44,87,192,12,33,48,32,2,
+ 16,0,177,175,6,0,4,36,33,40,0,0,
+ 1,131,7,60,56,97,231,36,15,88,192,12,
+ 92,0,6,38,1,131,18,60,56,97,82,38,
+ 16,0,178,175,20,0,177,175,33,32,0,0,
+ 64,0,5,36,100,0,6,38,194,87,192,12,
+ 4,0,7,36,16,0,177,175,2,0,4,36,
+ 33,40,0,0,104,0,6,142,0,0,0,0,
+ 94,87,192,12,33,56,64,2,16,0,177,175,
+ 2,0,4,36,33,40,0,0,108,0,6,142,
+ 0,0,0,0,94,87,192,12,33,56,64,2,
+ 16,0,177,175,3,0,4,36,64,0,5,36,
+ 112,0,6,142,0,0,0,0,143,87,192,12,
+ 33,56,64,2,116,0,4,38,7,79,192,12,
+ 33,40,32,2,36,0,191,143,32,0,178,143,
+ 28,0,177,143,24,0,176,143,8,0,224,3,
+ 40,0,189,39,216,255,189,39,32,0,191,175,
+ 28,0,177,175,24,0,176,175,33,128,128,0,
+ 33,136,160,0,88,0,4,150,160,0,5,36,
+ 1,131,6,60,56,97,198,36,242,86,192,12,
+ 33,56,32,2,90,0,4,150,1,131,5,60,
+ 56,97,165,36,44,87,192,12,33,48,32,2,
+ 16,0,177,175,2,0,4,36,92,0,6,142,
+ 1,131,7,60,56,97,231,36,94,87,192,12,
+ 33,40,0,0,16,0,177,175,2,0,4,36,
+ 96,0,6,142,1,131,7,60,56,97,231,36,
+ 94,87,192,12,33,40,0,0,16,0,177,175,
+ 2,0,4,36,100,0,6,142,1,131,7,60,
+ 56,97,231,36,94,87,192,12,33,40,0,0,
+ 104,0,4,38,7,79,192,12,33,40,32,2,
+ 32,0,191,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,40,0,189,39,200,255,189,39,
+ 52,0,191,175,48,0,180,175,44,0,179,175,
+ 40,0,178,175,36,0,177,175,32,0,176,175,
+ 33,144,128,0,33,136,160,0,16,0,4,36,
+ 32,0,5,36,1,131,6,60,56,97,198,36,
+ 242,86,192,12,33,56,32,2,0,0,68,150,
+ 1,131,5,60,56,97,165,36,44,87,192,12,
+ 33,48,32,2,155,0,64,18,0,0,0,0,
+ 1,131,20,60,56,97,148,38,8,0,80,142,
+ 0,0,0,0,145,0,0,18,0,0,0,0,
+ 4,0,66,142,0,0,0,0,141,0,64,24,
+ 33,152,0,0,16,0,4,36,32,0,5,36,
+ 33,48,128,2,242,86,192,12,33,56,32,2,
+ 4,0,4,150,33,40,128,2,44,87,192,12,
+ 33,48,32,2,16,0,177,175,6,0,4,36,
+ 33,40,0,0,8,0,6,38,15,88,192,12,
+ 33,56,128,2,16,0,3,146,65,0,2,36,
+ 47,0,98,16,66,0,98,40,18,0,64,16,
+ 5,0,2,36,88,0,98,16,6,0,98,40,
+ 7,0,64,16,2,0,2,36,30,0,98,16,
+ 4,0,2,36,51,0,98,16,4,0,4,36,
+ 174,79,192,8,1,0,115,38,6,0,2,36,
+ 68,0,98,16,64,0,2,36,78,0,98,16,
+ 33,32,0,0,174,79,192,8,1,0,115,38,
+ 68,0,2,36,47,0,98,16,69,0,98,40,
+ 7,0,64,16,66,0,2,36,24,0,98,16,
+ 67,0,2,36,25,0,98,16,3,0,4,36,
+ 174,79,192,8,1,0,115,38,131,0,98,40,
+ 83,0,64,16,128,0,98,40,68,0,64,16,
+ 0,0,0,0,174,79,192,8,1,0,115,38,
+ 16,0,177,175,2,0,4,36,40,0,6,142,
+ 1,131,7,60,56,97,231,36,94,87,192,12,
+ 33,40,0,0,174,79,192,8,1,0,115,38,
+ 16,0,177,175,111,79,192,8,1,0,4,36,
+ 16,0,177,175,111,79,192,8,2,0,4,36,
+ 16,0,177,175,40,0,6,142,1,131,7,60,
+ 56,97,231,36,143,87,192,12,64,0,5,36,
+ 174,79,192,8,1,0,115,38,48,0,7,142,
+ 44,0,2,142,0,0,0,0,35,56,226,0,
+ 16,0,180,175,20,0,177,175,134,79,192,8,
+ 33,40,0,0,48,0,7,142,44,0,2,142,
+ 0,0,0,0,35,56,226,0,16,0,180,175,
+ 20,0,177,175,4,0,4,36,64,0,5,36,
+ 44,0,6,142,0,0,0,0,194,87,192,12,
+ 255,255,231,48,174,79,192,8,1,0,115,38,
+ 16,0,177,175,6,0,4,36,33,40,0,0,
+ 1,131,7,60,56,97,231,36,15,88,192,12,
+ 40,0,6,38,174,79,192,8,1,0,115,38,
+ 5,0,4,36,164,79,192,8,33,40,0,0,
+ 16,0,180,175,20,0,177,175,64,0,5,36,
+ 40,0,6,38,194,87,192,12,4,0,7,36,
+ 174,79,192,8,1,0,115,38,16,0,4,146,
+ 16,0,5,146,31,0,132,48,224,0,165,48,
+ 1,131,6,60,56,97,198,36,242,86,192,12,
+ 33,56,32,2,33,32,0,0,1,131,5,60,
+ 56,97,165,36,44,87,192,12,33,48,32,2,
+ 1,0,115,38,4,0,66,142,0,0,0,0,
+ 42,16,98,2,117,255,64,20,68,0,16,38,
+ 12,0,82,142,0,0,0,0,105,255,64,22,
+ 0,0,0,0,52,0,191,143,48,0,180,143,
+ 44,0,179,143,40,0,178,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,56,0,189,39,
+ 200,255,189,39,52,0,191,175,48,0,182,175,
+ 44,0,181,175,40,0,180,175,36,0,179,175,
+ 32,0,178,175,28,0,177,175,24,0,176,175,
+ 33,168,128,0,33,152,160,2,113,0,160,18,
+ 33,144,0,0,4,0,22,36,8,0,112,142,
+ 0,0,0,0,104,0,0,18,0,0,0,0,
+ 4,0,98,142,0,0,0,0,100,0,64,24,
+ 33,160,0,0,171,86,192,12,8,0,4,38,
+ 255,255,67,48,128,0,98,44,5,0,64,20,
+ 2,0,113,36,0,1,98,44,2,0,64,20,
+ 3,0,113,36,4,0,113,36,16,0,3,146,
+ 64,0,2,36,50,0,98,16,65,0,98,40,
+ 16,0,64,16,68,0,2,36,34,0,118,16,
+ 5,0,98,40,5,0,64,16,2,0,2,36,
+ 20,0,98,16,255,255,36,50,22,80,192,8,
+ 0,0,0,0,5,0,2,36,35,0,98,16,
+ 6,0,2,36,29,0,98,16,255,255,36,50,
+ 22,80,192,8,0,0,0,0,19,0,98,16,
+ 68,0,98,40,12,0,64,20,131,0,98,40,
+ 28,0,64,16,128,0,98,40,27,0,64,20,
+ 255,255,36,50,22,80,192,8,18,0,0,166,
+ 40,0,4,142,128,86,192,12,0,0,0,0,
+ 21,80,192,8,18,0,2,166,40,0,4,142,
+ 153,86,192,12,0,0,0,0,21,80,192,8,
+ 18,0,2,166,48,0,2,142,44,0,3,142,
+ 0,0,0,0,35,16,67,0,21,80,192,8,
+ 18,0,2,166,171,86,192,12,40,0,4,38,
+ 21,80,192,8,18,0,2,166,21,80,192,8,
+ 18,0,0,166,18,0,22,166,255,255,36,50,
+ 18,0,3,150,0,0,0,0,128,0,98,44,
+ 6,0,64,20,1,0,132,36,0,1,98,44,
+ 4,0,64,20,2,0,98,36,33,80,192,8,
+ 3,0,98,36,1,0,98,36,33,16,130,0,
+ 4,0,2,166,4,0,4,150,0,0,0,0,
+ 1,0,132,36,4,0,3,150,0,0,0,0,
+ 128,0,98,44,6,0,64,20,255,255,69,50,
+ 0,1,98,44,4,0,64,20,2,0,162,36,
+ 49,80,192,8,3,0,162,36,1,0,162,36,
+ 33,144,68,0,1,0,148,38,4,0,98,142,
+ 0,0,0,0,42,16,130,2,158,255,64,20,
+ 68,0,16,38,12,0,115,142,0,0,0,0,
+ 146,255,96,22,0,0,0,0,0,0,178,166,
+ 255,255,66,50,52,0,191,143,48,0,182,143,
+ 44,0,181,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,56,0,189,39,224,255,189,39,
+ 24,0,191,175,20,0,177,175,16,0,176,175,
+ 33,128,128,0,171,86,192,12,8,0,4,38,
+ 255,255,67,48,128,0,98,44,5,0,64,20,
+ 2,0,113,36,0,1,98,44,2,0,64,20,
+ 3,0,113,36,4,0,113,36,16,0,3,146,
+ 64,0,2,36,52,0,98,16,65,0,98,40,
+ 17,0,64,16,4,0,2,36,37,0,98,16,
+ 0,0,0,0,5,0,98,40,5,0,64,16,
+ 2,0,2,36,22,0,98,16,255,255,36,50,
+ 145,80,192,8,0,0,0,0,5,0,2,36,
+ 36,0,98,16,6,0,2,36,30,0,98,16,
+ 255,255,36,50,145,80,192,8,0,0,0,0,
+ 68,0,2,36,20,0,98,16,0,0,0,0,
+ 68,0,98,40,12,0,64,20,131,0,98,40,
+ 28,0,64,16,128,0,98,40,27,0,64,20,
+ 255,255,36,50,145,80,192,8,18,0,0,166,
+ 40,0,4,142,128,86,192,12,0,0,0,0,
+ 144,80,192,8,18,0,2,166,40,0,4,142,
+ 153,86,192,12,0,0,0,0,144,80,192,8,
+ 18,0,2,166,48,0,2,142,44,0,3,142,
+ 0,0,0,0,143,80,192,8,35,16,67,0,
+ 171,86,192,12,40,0,4,38,144,80,192,8,
+ 18,0,2,166,144,80,192,8,18,0,0,166,
+ 4,0,2,36,18,0,2,166,255,255,36,50,
+ 18,0,3,150,0,0,0,0,128,0,98,44,
+ 6,0,64,20,1,0,132,36,0,1,98,44,
+ 4,0,64,20,2,0,98,36,156,80,192,8,
+ 3,0,98,36,1,0,98,36,33,16,130,0,
+ 4,0,2,166,4,0,3,150,4,0,4,150,
+ 0,0,0,0,128,0,130,44,6,0,64,20,
+ 1,0,99,36,0,1,130,44,4,0,64,20,
+ 2,0,98,36,170,80,192,8,3,0,98,36,
+ 1,0,98,36,255,255,66,48,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,16,0,191,175,
+ 64,0,130,140,0,0,0,0,12,0,64,20,
+ 33,16,0,0,88,0,131,148,4,0,2,36,
+ 5,0,98,16,0,0,0,0,180,77,192,12,
+ 0,0,0,0,193,80,192,8,255,255,66,48,
+ 11,78,192,12,0,0,0,0,255,255,66,48,
+ 16,0,191,143,24,0,189,39,8,0,224,3,
+ 0,0,0,0,224,255,189,39,24,0,191,175,
+ 20,0,177,175,16,0,176,175,33,128,128,0,
+ 176,80,192,12,33,136,160,0,33,32,0,2,
+ 33,40,32,2,213,80,192,12,255,255,70,48,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,224,255,189,39,
+ 28,0,191,175,24,0,178,175,20,0,177,175,
+ 16,0,176,175,33,144,128,0,33,136,192,0,
+ 255,255,34,50,41,0,64,16,33,128,160,0,
+ 4,0,2,142,0,0,0,0,11,0,64,20,
+ 255,255,35,50,9,50,192,12,255,255,36,50,
+ 33,24,64,0,32,0,96,16,1,0,2,36,
+ 0,0,2,166,4,0,3,174,8,0,3,174,
+ 242,80,192,8,12,0,17,166,12,0,2,150,
+ 0,0,0,0,43,16,67,0,23,0,64,20,
+ 255,255,2,36,64,0,66,142,0,0,0,0,
+ 19,0,64,20,255,255,2,36,33,32,0,2,
+ 2,0,69,150,33,48,0,0,104,78,192,12,
+ 72,0,71,38,88,0,67,150,4,0,2,36,
+ 5,0,98,16,33,32,64,2,217,78,192,12,
+ 33,40,0,2,8,81,192,8,33,16,0,0,
+ 153,78,192,12,33,40,0,2,8,81,192,8,
+ 33,16,0,0,255,255,2,36,28,0,191,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,0,0,0,0,
+ 0,0,0,0,168,255,189,39,80,0,191,175,
+ 76,0,183,175,72,0,182,175,68,0,181,175,
+ 64,0,180,175,60,0,179,175,56,0,178,175,
+ 52,0,177,175,48,0,176,175,33,24,128,0,
+ 33,152,160,0,33,176,192,0,33,184,224,0,
+ 104,0,162,143,0,0,0,0,2,0,64,20,
+ 44,0,160,175,40,0,162,39,0,0,64,172,
+ 24,0,164,39,33,40,96,0,156,88,192,12,
+ 33,48,96,2,33,136,64,0,82,0,32,18,
+ 33,16,0,0,209,83,192,12,0,0,0,0,
+ 33,144,64,0,5,0,64,22,1,0,2,36,
+ 183,88,192,12,33,32,32,2,124,81,192,8,
+ 33,16,0,0,148,0,66,162,196,88,192,12,
+ 33,32,32,2,224,0,85,48,44,0,176,39,
+ 33,32,32,2,124,89,192,12,33,40,0,2,
+ 33,160,64,0,33,32,32,2,198,89,192,12,
+ 33,40,0,2,2,0,66,166,44,0,162,143,
+ 0,0,0,0,14,0,64,20,0,0,0,0,
+ 8,0,35,142,4,0,34,142,0,0,0,0,
+ 35,128,98,0,2,0,66,150,0,0,0,0,
+ 33,128,2,2,42,16,19,2,16,0,64,20,
+ 33,32,32,2,42,16,112,2,16,0,64,16,
+ 0,0,0,0,167,83,192,12,33,32,64,2,
+ 183,88,192,12,33,32,32,2,3,131,3,60,
+ 140,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,124,81,192,8,
+ 33,16,0,0,33,40,0,2,42,89,192,12,
+ 33,48,0,0,255,0,162,50,255,255,131,50,
+ 37,16,67,0,48,0,3,36,8,0,67,20,
+ 33,32,64,2,16,0,176,175,33,40,32,2,
+ 33,48,192,2,135,81,192,12,33,56,224,2,
+ 117,81,192,8,33,128,64,0,3,131,3,60,
+ 140,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,33,128,0,0,
+ 3,0,0,22,0,0,0,0,167,83,192,12,
+ 33,32,64,2,183,88,192,12,33,32,32,2,
+ 33,16,0,2,80,0,191,143,76,0,183,143,
+ 72,0,182,143,68,0,181,143,64,0,180,143,
+ 60,0,179,143,56,0,178,143,52,0,177,143,
+ 48,0,176,143,8,0,224,3,88,0,189,39,
+ 176,255,189,39,76,0,191,175,72,0,182,175,
+ 68,0,181,175,64,0,180,175,60,0,179,175,
+ 56,0,178,175,52,0,177,175,48,0,176,175,
+ 33,128,128,0,33,136,160,0,33,160,192,0,
+ 33,168,224,0,96,0,182,143,24,0,160,175,
+ 33,32,32,2,24,0,165,39,2,0,6,36,
+ 239,90,192,12,33,56,0,0,64,0,2,174,
+ 24,0,162,143,0,0,0,0,155,0,64,20,
+ 0,0,0,0,64,0,2,142,0,0,0,0,
+ 4,0,64,16,33,32,32,2,3,131,3,60,
+ 60,82,192,8,148,17,99,36,16,0,160,175,
+ 72,0,5,38,24,0,166,39,110,90,192,12,
+ 4,0,7,36,24,0,162,143,0,0,0,0,
+ 139,0,64,20,0,0,0,0,196,88,192,12,
+ 33,32,32,2,224,0,66,48,160,0,3,36,
+ 133,0,67,20,33,32,32,2,124,89,192,12,
+ 24,0,165,39,33,144,64,0,33,32,32,2,
+ 198,89,192,12,24,0,165,39,33,152,64,0,
+ 24,0,162,143,0,0,0,0,122,0,64,20,
+ 0,0,0,0,255,255,66,50,5,0,66,44,
+ 118,0,64,16,0,0,0,0,8,0,34,142,
+ 4,0,35,142,0,0,0,0,35,16,67,0,
+ 255,255,99,50,33,16,67,0,110,0,194,22,
+ 33,32,0,2,88,0,18,166,90,0,19,166,
+ 33,40,128,2,178,50,192,12,33,48,160,2,
+ 118,0,64,20,33,16,0,0,255,255,67,50,
+ 4,0,2,36,24,0,98,16,33,32,32,2,
+ 24,0,165,39,2,0,6,36,239,90,192,12,
+ 33,56,0,0,92,0,2,174,33,32,32,2,
+ 24,0,165,39,2,0,6,36,239,90,192,12,
+ 33,56,0,0,96,0,2,174,33,32,32,2,
+ 24,0,165,39,2,0,6,36,239,90,192,12,
+ 33,56,0,0,100,0,2,174,24,0,162,143,
+ 0,0,0,0,78,0,64,20,33,32,32,2,
+ 67,82,192,8,104,0,5,38,4,0,2,36,
+ 88,0,2,166,90,0,19,166,124,0,0,174,
+ 16,0,160,175,92,0,5,38,24,0,166,39,
+ 186,91,192,12,6,0,7,36,24,0,162,143,
+ 0,0,0,0,63,0,64,20,100,0,4,38,
+ 33,40,0,0,144,71,192,12,4,0,6,36,
+ 32,0,160,167,40,0,160,175,36,0,160,175,
+ 44,0,160,167,32,0,178,39,64,0,2,36,
+ 16,0,162,175,33,32,32,2,33,40,64,2,
+ 24,0,166,39,110,90,192,12,33,56,0,0,
+ 24,0,162,143,0,0,0,0,5,0,64,16,
+ 0,0,0,0,24,92,192,12,33,32,64,2,
+ 58,82,192,8,0,0,0,0,40,0,162,143,
+ 36,0,163,143,0,0,0,0,35,16,67,0,
+ 255,255,70,48,5,0,194,44,2,0,64,20,
+ 0,0,0,0,4,0,6,36,8,0,192,16,
+ 33,32,32,2,36,0,165,143,0,0,0,0,
+ 80,68,192,12,100,0,4,38,24,92,192,12,
+ 32,0,164,39,33,32,32,2,24,0,165,39,
+ 2,0,6,36,239,90,192,12,33,56,0,0,
+ 104,0,2,174,33,32,32,2,24,0,165,39,
+ 2,0,6,36,239,90,192,12,33,56,0,0,
+ 108,0,2,174,33,32,32,2,24,0,165,39,
+ 3,0,6,36,239,90,192,12,64,0,7,36,
+ 112,0,2,174,24,0,162,143,0,0,0,0,
+ 9,0,64,16,33,32,32,2,3,131,3,60,
+ 140,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,73,82,192,8,
+ 33,16,0,0,116,0,5,38,33,48,192,2,
+ 163,82,192,12,33,56,0,2,255,255,3,36,
+ 248,255,67,16,33,16,0,2,76,0,191,143,
+ 72,0,182,143,68,0,181,143,64,0,180,143,
+ 60,0,179,143,56,0,178,143,52,0,177,143,
+ 48,0,176,143,8,0,224,3,80,0,189,39,
+ 184,255,189,39,64,0,191,175,60,0,183,175,
+ 56,0,182,175,52,0,181,175,48,0,180,175,
+ 44,0,179,175,40,0,178,175,36,0,177,175,
+ 32,0,176,175,33,128,128,0,16,0,160,175,
+ 8,0,2,142,4,0,3,142,0,0,0,0,
+ 35,184,67,0,33,144,0,0,255,255,162,48,
+ 45,0,64,16,33,136,0,0,3,131,19,60,
+ 140,17,115,38,255,255,22,36,255,255,181,48,
+ 8,0,3,142,4,0,2,142,0,0,0,0,
+ 35,160,98,0,0,0,2,146,0,0,0,0,
+ 128,0,66,48,32,0,64,20,33,32,0,2,
+ 124,89,192,12,16,0,165,39,33,32,0,2,
+ 198,89,192,12,16,0,165,39,33,40,64,0,
+ 16,0,162,143,0,0,0,0,6,0,64,20,
+ 33,32,0,2,255,255,165,48,251,88,192,12,
+ 1,0,6,36,7,0,86,20,0,0,0,0,
+ 0,0,98,142,0,0,0,0,1,0,66,36,
+ 0,0,98,174,147,82,192,8,255,255,17,36,
+ 8,0,2,142,4,0,3,142,0,0,0,0,
+ 35,16,67,0,33,16,66,2,35,144,84,0,
+ 255,255,66,50,43,16,85,0,217,255,64,20,
+ 1,0,49,38,33,32,0,2,33,40,224,2,
+ 251,88,192,12,33,48,0,0,33,16,32,2,
+ 64,0,191,143,60,0,183,143,56,0,182,143,
+ 52,0,181,143,48,0,180,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,72,0,189,39,192,255,189,39,
+ 56,0,191,175,52,0,181,175,48,0,180,175,
+ 44,0,179,175,40,0,178,175,36,0,177,175,
+ 32,0,176,175,33,144,128,0,33,152,160,0,
+ 33,168,224,0,16,0,160,175,124,89,192,12,
+ 16,0,165,39,33,32,64,2,198,89,192,12,
+ 16,0,165,39,0,0,98,166,16,0,162,143,
+ 0,0,0,0,28,0,64,20,0,0,0,0,
+ 12,0,66,142,8,0,67,142,0,0,0,0,
+ 35,16,67,0,0,0,99,150,255,255,66,48,
+ 20,0,98,20,0,0,0,0,4,0,96,174,
+ 0,0,101,150,0,0,0,0,83,82,192,12,
+ 33,32,64,2,33,32,64,0,255,255,2,36,
+ 46,0,130,16,0,0,0,0,3,0,128,20,
+ 0,0,0,0,246,82,192,8,8,0,96,174,
+ 224,83,192,12,4,0,100,174,10,0,64,20,
+ 8,0,98,174,247,82,192,8,255,255,2,36,
+ 3,131,3,60,140,17,99,36,0,0,98,140,
+ 0,0,0,0,1,0,66,36,210,82,192,8,
+ 0,0,98,172,8,0,112,142,4,0,98,142,
+ 0,0,0,0,23,0,64,24,33,136,0,0,
+ 255,255,20,36,33,32,64,2,124,89,192,12,
+ 16,0,165,39,33,32,64,2,198,89,192,12,
+ 16,0,165,39,4,0,2,166,16,0,162,143,
+ 0,0,0,0,233,255,64,20,33,32,64,2,
+ 33,40,0,2,0,83,192,12,33,48,160,2,
+ 226,255,84,16,1,0,49,38,4,0,98,142,
+ 0,0,0,0,42,16,34,2,236,255,64,20,
+ 68,0,16,38,33,16,0,0,56,0,191,143,
+ 52,0,181,143,48,0,180,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,64,0,189,39,184,255,189,39,
+ 68,0,191,175,64,0,180,175,60,0,179,175,
+ 56,0,178,175,52,0,177,175,48,0,176,175,
+ 33,128,128,0,33,144,160,0,24,0,160,175,
+ 16,0,160,175,8,0,69,38,24,0,166,39,
+ 186,91,192,12,6,0,7,36,24,0,162,143,
+ 0,0,0,0,103,0,64,20,0,0,0,0,
+ 196,88,192,12,33,32,0,2,224,0,84,48,
+ 33,32,0,2,124,89,192,12,24,0,165,39,
+ 33,152,64,0,33,32,0,2,198,89,192,12,
+ 24,0,165,39,33,136,64,0,24,0,162,143,
+ 0,0,0,0,88,0,64,20,37,16,116,2,
+ 18,0,81,166,16,0,66,162,16,0,67,146,
+ 64,0,2,36,48,0,98,16,65,0,98,40,
+ 16,0,64,16,4,0,2,36,31,0,98,16,
+ 5,0,98,40,5,0,64,16,2,0,2,36,
+ 22,0,98,16,33,32,0,2,121,83,192,8,
+ 0,0,0,0,5,0,2,36,65,0,98,16,
+ 6,0,2,36,27,0,98,16,33,32,0,2,
+ 121,83,192,8,0,0,0,0,68,0,2,36,
+ 15,0,98,16,68,0,98,40,8,0,64,20,
+ 33,32,0,2,131,0,98,40,57,0,64,16,
+ 128,0,98,40,51,0,64,16,0,0,0,0,
+ 121,83,192,8,0,0,0,0,255,255,37,50,
+ 164,90,192,12,24,0,166,39,117,83,192,8,
+ 40,0,66,174,33,32,0,2,255,255,37,50,
+ 40,0,70,38,19,90,192,12,24,0,167,39,
+ 117,83,192,8,0,0,0,0,255,255,37,50,
+ 40,0,70,38,24,91,192,12,24,0,167,39,
+ 117,83,192,8,0,0,0,0,40,0,68,38,
+ 33,40,0,0,144,71,192,12,4,0,6,36,
+ 32,0,160,167,40,0,160,175,36,0,160,175,
+ 44,0,160,167,33,32,0,2,255,255,37,50,
+ 32,0,166,39,19,90,192,12,24,0,167,39,
+ 40,0,162,143,36,0,163,143,0,0,0,0,
+ 35,16,67,0,255,255,70,48,5,0,194,44,
+ 2,0,64,20,0,0,0,0,4,0,6,36,
+ 7,0,192,16,0,0,0,0,36,0,165,143,
+ 0,0,0,0,80,68,192,12,40,0,68,38,
+ 24,92,192,12,32,0,164,39,24,0,162,143,
+ 0,0,0,0,8,0,64,16,33,16,0,0,
+ 3,131,3,60,140,17,99,36,0,0,98,140,
+ 0,0,0,0,1,0,66,36,0,0,98,172,
+ 255,255,2,36,68,0,191,143,64,0,180,143,
+ 60,0,179,143,56,0,178,143,52,0,177,143,
+ 48,0,176,143,8,0,224,3,72,0,189,39,
+ 232,255,189,39,20,0,191,175,16,0,176,175,
+ 33,128,128,0,76,0,2,142,0,0,0,0,
+ 3,0,64,16,0,0,0,0,24,92,192,12,
+ 72,0,4,38,88,0,3,150,4,0,2,36,
+ 5,0,98,20,0,0,0,0,110,86,192,12,
+ 92,0,4,38,157,83,192,8,116,0,4,38,
+ 13,84,192,12,104,0,4,38,120,0,4,38,
+ 13,84,192,12,0,0,0,0,148,0,3,146,
+ 0,0,0,0,248,83,192,12,33,32,0,2,
+ 20,0,191,143,16,0,176,143,8,0,224,3,
+ 24,0,189,39,232,255,189,39,20,0,191,175,
+ 16,0,176,175,33,128,128,0,5,0,0,18,
+ 0,0,0,0,136,83,192,12,0,0,0,0,
+ 61,50,192,12,33,32,0,2,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 224,255,189,39,24,0,191,175,20,0,177,175,
+ 16,0,176,175,33,136,128,0,9,50,192,12,
+ 16,0,4,36,33,128,64,0,11,0,0,18,
+ 33,32,0,2,33,40,0,0,144,71,192,12,
+ 16,0,6,36,224,83,192,12,33,32,32,2,
+ 4,0,64,16,8,0,2,174,4,0,17,174,
+ 204,83,192,8,33,16,0,2,61,50,192,12,
+ 33,32,0,2,33,16,0,0,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,20,0,191,175,
+ 16,0,176,175,9,50,192,12,152,0,4,36,
+ 33,128,64,0,4,0,0,18,33,16,0,0,
+ 248,83,192,12,33,32,0,2,33,16,0,2,
+ 20,0,191,143,16,0,176,143,8,0,224,3,
+ 24,0,189,39,224,255,189,39,24,0,191,175,
+ 20,0,177,175,0,17,4,0,33,16,68,0,
+ 128,136,2,0,11,0,32,18,16,0,176,175,
+ 9,50,192,12,33,32,32,2,33,128,64,0,
+ 4,0,0,18,33,32,0,2,33,40,0,0,
+ 144,71,192,12,33,48,32,2,243,83,192,8,
+ 33,16,0,2,33,16,0,0,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,20,0,191,175,
+ 16,0,176,175,33,128,128,0,33,40,0,0,
+ 144,71,192,12,152,0,6,36,255,0,2,36,
+ 88,0,2,166,120,5,2,36,58,0,2,166,
+ 72,0,0,166,80,0,0,174,76,0,0,174,
+ 84,0,0,166,148,0,0,162,149,0,0,162,
+ 20,0,191,143,16,0,176,143,8,0,224,3,
+ 24,0,189,39,208,255,189,39,40,0,191,175,
+ 36,0,179,175,32,0,178,175,28,0,177,175,
+ 24,0,176,175,33,128,128,0,31,0,0,18,
+ 1,0,19,36,8,0,18,142,0,0,0,0,
+ 16,0,64,18,0,0,0,0,4,0,2,142,
+ 0,0,0,0,9,0,64,24,33,136,0,0,
+ 59,84,192,12,33,32,64,2,1,0,49,38,
+ 4,0,2,142,0,0,0,0,42,16,34,2,
+ 249,255,64,20,68,0,82,38,8,0,4,142,
+ 61,50,192,12,0,0,0,0,12,0,17,142,
+ 4,0,96,18,0,0,0,0,33,152,0,0,
+ 49,84,192,8,4,0,0,174,61,50,192,12,
+ 33,32,0,2,33,128,32,2,227,255,0,22,
+ 0,0,0,0,40,0,191,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,48,0,189,39,232,255,189,39,
+ 20,0,191,175,16,0,176,175,33,128,128,0,
+ 60,0,2,142,0,0,0,0,4,0,64,16,
+ 0,0,0,0,9,248,64,0,0,0,0,0,
+ 60,0,0,174,110,86,192,12,8,0,4,38,
+ 78,84,192,12,33,32,0,2,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 232,255,189,39,16,0,191,175,16,0,131,144,
+ 6,0,2,36,18,0,98,16,7,0,98,40,
+ 5,0,64,16,4,0,2,36,6,0,98,16,
+ 0,0,0,0,103,84,192,8,0,0,0,0,
+ 68,0,2,36,11,0,98,20,0,0,0,0,
+ 44,0,130,140,0,0,0,0,7,0,64,16,
+ 0,0,0,0,24,92,192,12,40,0,132,36,
+ 103,84,192,8,0,0,0,0,110,86,192,12,
+ 40,0,132,36,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,0,0,0,0,
+ 216,255,189,39,32,0,191,175,28,0,179,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,136,192,0,56,0,179,143,0,0,0,0,
+ 20,72,192,12,33,144,224,0,33,128,64,0,
+ 11,0,0,18,33,32,32,2,33,40,64,2,
+ 80,86,192,12,8,0,6,38,255,255,3,36,
+ 5,0,67,16,2,0,2,36,16,0,2,162,
+ 40,0,19,174,133,84,192,8,33,16,0,0,
+ 255,255,2,36,32,0,191,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,216,255,189,39,
+ 32,0,191,175,28,0,177,175,24,0,176,175,
+ 33,128,128,0,96,0,5,174,100,0,6,174,
+ 128,0,2,142,0,0,0,0,11,0,64,16,
+ 0,0,0,0,13,84,192,12,104,0,4,38,
+ 124,0,2,142,0,0,0,0,108,0,2,174,
+ 128,0,2,142,0,0,0,0,112,0,2,174,
+ 128,0,0,174,124,0,0,174,88,0,17,150,
+ 2,0,2,36,88,0,2,166,176,80,192,12,
+ 33,32,0,2,33,56,64,0,64,0,2,142,
+ 0,0,0,0,35,0,64,20,255,255,35,50,
+ 3,0,2,36,14,0,98,16,255,255,227,48,
+ 58,0,2,150,0,0,0,0,43,16,67,0,
+ 9,0,64,16,12,0,4,38,48,0,2,142,
+ 28,0,5,38,52,0,7,142,0,0,0,0,
+ 9,248,64,0,1,0,6,36,214,84,192,8,
+ 0,0,0,0,96,0,2,142,0,0,0,0,
+ 250,255,67,36,13,0,98,44,13,0,64,16,
+ 128,16,3,0,2,131,1,60,33,8,34,0,
+ 32,154,34,140,0,0,0,0,8,0,64,0,
+ 0,0,0,0,204,84,192,8,2,0,2,36,
+ 204,84,192,8,3,0,2,36,5,0,2,36,
+ 96,0,2,174,52,0,2,142,0,0,0,0,
+ 16,0,162,175,44,0,2,142,12,0,4,38,
+ 28,0,5,38,33,48,0,2,9,248,64,0,
+ 255,255,231,48,32,0,191,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,40,0,189,39,
+ 224,255,189,39,28,0,191,175,24,0,176,175,
+ 33,128,128,0,96,0,5,142,0,0,0,0,
+ 6,0,160,16,0,0,0,0,100,0,6,142,
+ 140,84,192,12,0,0,0,0,56,85,192,8,
+ 0,0,0,0,176,80,192,12,33,32,0,2,
+ 33,56,64,0,88,0,2,150,0,0,0,0,
+ 2,0,66,44,11,0,64,16,255,255,227,48,
+ 58,0,2,150,0,0,0,0,43,16,67,0,
+ 6,0,64,16,33,32,0,2,1,0,5,36,
+ 140,84,192,12,33,48,0,0,56,85,192,8,
+ 0,0,0,0,96,0,2,142,0,0,0,0,
+ 49,0,64,20,2,0,2,36,88,0,3,150,
+ 3,0,2,36,33,0,98,16,4,0,98,40,
+ 7,0,64,16,2,0,98,40,41,0,64,16,
+ 2,0,2,36,39,0,96,4,0,0,0,0,
+ 13,85,192,8,0,0,0,0,5,0,2,36,
+ 34,0,98,20,2,0,2,36,3,131,3,60,
+ 236,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,104,0,4,38,25,0,128,16,
+ 0,0,98,172,3,131,5,60,176,17,165,36,
+ 0,0,162,140,108,0,3,142,0,0,0,0,
+ 33,16,67,0,0,0,162,172,12,0,132,140,
+ 0,0,0,0,248,255,128,20,2,0,2,36,
+ 47,85,192,8,88,0,2,166,3,131,4,60,
+ 236,17,132,36,0,0,130,140,0,0,0,0,
+ 1,0,66,36,0,0,130,172,200,255,130,140,
+ 108,0,3,142,0,0,0,0,33,16,67,0,
+ 200,255,130,172,2,0,2,36,88,0,2,166,
+ 52,0,2,142,0,0,0,0,16,0,162,175,
+ 44,0,2,142,12,0,4,38,28,0,5,38,
+ 33,48,0,2,9,248,64,0,255,255,231,48,
+ 28,0,191,143,24,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,20,0,191,175,
+ 16,0,176,175,33,128,128,0,88,0,3,150,
+ 1,0,2,36,25,0,98,16,2,0,98,40,
+ 5,0,64,16,3,0,2,36,7,0,96,16,
+ 0,0,0,0,116,85,192,8,0,0,0,0,
+ 37,0,98,16,0,0,0,0,116,85,192,8,
+ 0,0,0,0,112,0,4,142,108,0,3,142,
+ 0,0,0,0,34,0,96,16,0,0,0,0,
+ 17,0,130,144,0,0,0,0,2,0,66,48,
+ 33,0,64,16,255,255,99,36,250,255,96,20,
+ 68,0,132,36,116,85,192,8,0,0,0,0,
+ 112,0,4,142,108,0,3,142,0,0,0,0,
+ 8,0,96,16,0,0,0,0,17,0,130,144,
+ 0,0,0,0,2,0,66,48,19,0,64,16,
+ 255,255,99,36,250,255,96,20,68,0,132,36,
+ 118,93,192,12,33,32,0,2,241,255,64,28,
+ 255,255,66,40,7,0,64,16,0,0,0,0,
+ 92,85,192,8,0,0,0,0,120,94,192,12,
+ 33,32,0,2,5,0,64,20,0,0,0,0,
+ 219,84,192,12,33,32,0,2,167,83,192,12,
+ 33,32,0,2,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,232,255,189,39,
+ 20,0,191,175,16,0,176,175,33,128,128,0,
+ 88,0,3,150,1,0,2,36,17,0,98,16,
+ 2,0,98,40,5,0,64,16,3,0,2,36,
+ 9,0,96,16,0,0,0,0,156,85,192,8,
+ 0,0,0,0,13,0,98,16,5,0,2,36,
+ 7,0,98,16,0,0,0,0,156,85,192,8,
+ 0,0,0,0,60,95,192,12,33,32,0,2,
+ 154,85,192,8,0,0,0,0,0,93,192,12,
+ 33,32,0,2,154,85,192,8,0,0,0,0,
+ 252,93,192,12,33,32,0,2,5,0,64,20,
+ 0,0,0,0,60,85,192,12,33,32,0,2,
+ 162,85,192,8,0,0,0,0,167,83,192,12,
+ 33,32,0,2,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,192,255,189,39,
+ 60,0,191,175,56,0,182,175,52,0,181,175,
+ 48,0,180,175,44,0,179,175,40,0,178,175,
+ 36,0,177,175,32,0,176,175,33,64,128,0,
+ 33,136,192,0,33,152,224,0,84,0,182,143,
+ 88,0,181,143,92,0,180,143,80,0,178,151,
+ 3,131,3,60,128,17,99,36,0,0,98,140,
+ 0,0,0,0,1,0,66,36,0,0,98,172,
+ 24,0,162,39,16,0,162,175,33,32,160,0,
+ 16,81,192,12,33,40,0,1,33,128,64,0,
+ 57,0,0,18,255,255,66,50,58,0,3,150,
+ 0,0,0,0,43,16,67,0,2,0,64,16,
+ 0,0,0,0,58,0,18,166,44,0,22,174,
+ 48,0,21,174,52,0,20,174,24,0,162,143,
+ 0,0,0,0,7,0,64,16,1,0,2,36,
+ 219,84,192,12,33,32,0,2,167,83,192,12,
+ 33,32,0,2,5,86,192,8,0,0,0,0,
+ 88,0,3,150,0,0,0,0,16,0,98,16,
+ 2,0,98,40,5,0,64,16,3,0,2,36,
+ 9,0,96,16,0,0,0,0,244,85,192,8,
+ 0,0,0,0,11,0,98,16,5,0,2,36,
+ 31,0,98,16,0,0,0,0,244,85,192,8,
+ 0,0,0,0,3,131,3,60,239,85,192,8,
+ 184,17,99,36,3,131,3,60,239,85,192,8,
+ 188,17,99,36,3,131,3,60,192,17,99,36,
+ 0,0,98,140,0,0,0,0,1,0,66,36,
+ 3,86,192,8,0,0,98,172,3,131,3,60,
+ 140,17,99,36,0,0,98,140,0,0,0,0,
+ 1,0,66,36,0,0,98,172,167,83,192,12,
+ 33,32,0,2,33,32,32,2,33,40,96,2,
+ 1,0,6,36,9,248,160,2,33,56,128,2,
+ 5,86,192,8,0,0,0,0,124,85,192,12,
+ 33,32,0,2,60,0,191,143,56,0,182,143,
+ 52,0,181,143,48,0,180,143,44,0,179,143,
+ 40,0,178,143,36,0,177,143,32,0,176,143,
+ 8,0,224,3,64,0,189,39,232,255,189,39,
+ 20,0,191,175,16,0,176,175,33,128,128,0,
+ 213,80,192,12,255,255,198,48,53,0,64,20,
+ 255,255,2,36,3,131,3,60,144,17,99,36,
+ 0,0,98,140,0,0,0,0,1,0,66,36,
+ 0,0,98,172,96,0,2,142,0,0,0,0,
+ 32,0,64,16,4,0,2,36,92,0,98,140,
+ 0,0,0,0,1,0,66,36,92,0,98,172,
+ 96,0,2,142,0,0,0,0,255,255,67,36,
+ 5,0,98,44,32,0,64,16,128,16,3,0,
+ 2,131,1,60,33,8,34,0,88,154,34,140,
+ 0,0,0,0,8,0,64,0,0,0,0,0,
+ 3,131,3,60,70,86,192,8,204,17,99,36,
+ 3,131,3,60,70,86,192,8,212,17,99,36,
+ 3,131,3,60,70,86,192,8,216,17,99,36,
+ 3,131,3,60,70,86,192,8,208,17,99,36,
+ 3,131,3,60,70,86,192,8,220,17,99,36,
+ 88,0,3,150,0,0,0,0,8,0,98,20,
+ 33,16,0,0,3,131,3,60,240,17,99,36,
+ 0,0,98,140,0,0,0,0,1,0,66,36,
+ 0,0,98,172,33,16,0,0,20,0,191,143,
+ 16,0,176,143,8,0,224,3,24,0,189,39,
+ 0,0,0,0,224,255,189,39,28,0,191,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,144,160,0,33,128,192,0,4,0,0,174,
+ 14,0,128,16,0,0,4,174,128,136,4,0,
+ 9,50,192,12,33,32,32,2,3,0,64,20,
+ 4,0,2,174,104,86,192,8,255,255,2,36,
+ 5,0,32,18,33,40,64,2,4,0,4,142,
+ 0,0,0,0,80,68,192,12,33,48,32,2,
+ 33,16,0,0,28,0,191,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,20,0,191,175,
+ 16,0,176,175,33,128,128,0,4,0,4,142,
+ 0,0,0,0,4,0,128,16,0,0,0,0,
+ 61,50,192,12,0,0,0,0,4,0,0,174,
+ 0,0,0,174,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,0,0,0,0,
+ 0,0,0,0,11,0,128,4,128,0,130,40,
+ 20,0,64,20,1,0,3,36,255,127,2,36,
+ 42,16,68,0,16,0,64,16,2,0,3,36,
+ 127,0,2,60,255,255,66,52,148,86,192,8,
+ 42,16,68,0,128,255,130,40,9,0,64,16,
+ 1,0,3,36,0,128,130,40,6,0,64,16,
+ 2,0,3,36,128,255,2,60,42,16,130,0,
+ 2,0,64,20,4,0,3,36,3,0,3,36,
+ 8,0,224,3,33,16,96,0,128,0,130,44,
+ 14,0,64,20,1,0,2,36,255,127,2,36,
+ 43,16,68,0,9,0,64,16,127,0,2,60,
+ 255,255,66,52,43,16,68,0,6,0,64,16,
+ 3,0,2,36,4,0,128,4,5,0,2,36,
+ 169,86,192,8,4,0,2,36,2,0,2,36,
+ 8,0,224,3,0,0,0,0,4,0,135,140,
+ 0,0,130,140,0,0,0,0,65,0,64,16,
+ 33,16,0,0,0,0,227,140,4,0,231,36,
+ 128,16,3,0,33,16,67,0,192,16,2,0,
+ 0,0,227,140,0,0,0,0,33,24,67,0,
+ 128,0,98,44,17,0,64,20,4,0,231,36,
+ 0,64,98,44,15,0,64,20,2,0,5,36,
+ 31,0,2,60,255,255,66,52,43,16,67,0,
+ 7,0,64,16,255,15,2,60,255,255,66,52,
+ 43,16,67,0,6,0,64,20,5,0,5,36,
+ 204,86,192,8,4,0,5,36,204,86,192,8,
+ 3,0,5,36,1,0,5,36,2,0,6,36,
+ 0,0,130,140,0,0,0,0,42,16,194,0,
+ 31,0,64,16,255,255,162,48,31,0,9,60,
+ 255,255,41,53,255,15,8,60,255,255,8,53,
+ 0,0,132,140,0,0,227,140,4,0,231,36,
+ 128,0,98,44,16,0,64,20,255,255,165,48,
+ 0,64,98,44,11,0,64,20,43,16,35,1,
+ 7,0,64,16,43,16,3,1,3,0,64,20,
+ 0,0,0,0,236,86,192,8,4,0,165,36,
+ 236,86,192,8,5,0,165,36,236,86,192,8,
+ 3,0,165,36,236,86,192,8,2,0,165,36,
+ 1,0,165,36,1,0,198,36,42,16,196,0,
+ 232,255,64,20,255,255,162,48,8,0,224,3,
+ 0,0,0,0,208,255,189,39,40,0,191,175,
+ 33,72,192,0,224,0,165,48,255,255,130,48,
+ 31,0,66,44,7,0,64,16,33,48,160,0,
+ 37,16,133,0,16,0,162,163,33,32,224,0,
+ 16,0,165,39,38,87,192,8,1,0,6,36,
+ 32,0,163,39,33,40,0,0,31,0,194,52,
+ 24,0,162,163,255,255,130,48,8,0,64,16,
+ 25,0,168,39,127,0,130,48,0,0,98,160,
+ 1,0,99,36,255,255,130,48,194,33,2,0,
+ 250,255,128,20,1,0,165,36,1,0,166,36,
+ 33,16,160,0,255,255,66,48,2,0,66,44,
+ 13,0,64,20,255,255,165,36,255,255,4,52,
+ 255,255,99,36,0,0,98,144,0,0,0,0,
+ 128,0,66,52,0,0,2,161,1,0,8,37,
+ 33,16,160,0,255,255,66,48,2,0,66,44,
+ 246,255,64,16,33,40,164,0,255,255,98,144,
+ 0,0,0,0,0,0,2,161,33,32,224,0,
+ 24,0,165,39,255,255,198,48,9,248,32,1,
+ 0,0,0,0,40,0,191,143,48,0,189,39,
+ 8,0,224,3,0,0,0,0,208,255,189,39,
+ 40,0,191,175,33,72,160,0,255,255,130,48,
+ 128,0,66,44,6,0,64,16,33,64,192,0,
+ 16,0,164,163,33,32,0,1,16,0,165,39,
+ 88,87,192,8,1,0,6,36,24,0,167,39,
+ 32,0,165,39,255,255,130,48,7,0,64,16,
+ 33,24,0,0,0,0,164,160,1,0,165,36,
+ 255,255,130,48,2,34,2,0,251,255,128,20,
+ 1,0,99,36,128,0,98,52,0,0,226,160,
+ 1,0,231,36,1,0,102,36,33,16,96,0,
+ 255,255,66,48,11,0,64,16,255,255,99,36,
+ 255,255,4,52,255,255,165,36,0,0,162,144,
+ 0,0,0,0,0,0,226,160,1,0,231,36,
+ 33,16,96,0,255,255,66,48,248,255,64,20,
+ 33,24,100,0,33,32,0,1,24,0,165,39,
+ 255,255,198,48,9,248,32,1,0,0,0,0,
+ 40,0,191,143,48,0,189,39,8,0,224,3,
+ 0,0,0,0,200,255,189,39,48,0,191,175,
+ 44,0,181,175,40,0,180,175,36,0,179,175,
+ 32,0,178,175,28,0,177,175,24,0,176,175,
+ 33,136,160,0,33,144,192,0,33,152,224,0,
+ 72,0,180,143,33,128,128,0,128,86,192,12,
+ 33,32,64,2,33,168,64,0,255,255,4,50,
+ 192,0,37,50,33,48,96,2,242,86,192,12,
+ 33,56,128,2,255,255,176,50,33,32,0,2,
+ 33,40,96,2,44,87,192,12,33,48,128,2,
+ 16,0,162,39,33,24,80,0,255,255,99,36,
+ 6,0,98,16,0,0,114,160,16,0,162,39,
+ 3,146,18,0,255,255,99,36,253,255,98,20,
+ 0,0,114,160,33,32,128,2,16,0,165,39,
+ 9,248,96,2,255,255,166,50,48,0,191,143,
+ 44,0,181,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,56,0,189,39,200,255,189,39,
+ 48,0,191,175,44,0,181,175,40,0,180,175,
+ 36,0,179,175,32,0,178,175,28,0,177,175,
+ 24,0,176,175,33,136,160,0,33,144,192,0,
+ 33,160,224,0,72,0,181,143,33,128,128,0,
+ 153,86,192,12,33,32,64,2,33,152,64,0,
+ 255,255,4,50,192,0,37,50,33,48,128,2,
+ 242,86,192,12,33,56,160,2,255,255,112,50,
+ 33,32,0,2,33,40,128,2,44,87,192,12,
+ 33,48,160,2,16,0,162,39,33,32,80,0,
+ 9,0,0,18,255,255,99,38,255,255,5,52,
+ 255,255,132,36,0,0,146,160,2,146,18,0,
+ 33,16,96,0,255,255,66,48,250,255,64,20,
+ 33,24,101,0,33,32,160,2,16,0,165,39,
+ 9,248,128,2,255,255,102,50,48,0,191,143,
+ 44,0,181,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,56,0,189,39,216,255,189,39,
+ 32,0,191,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,16,0,176,175,33,152,192,0,
+ 56,0,178,143,60,0,177,143,33,128,224,0,
+ 255,255,132,48,192,0,165,48,33,48,64,2,
+ 242,86,192,12,33,56,32,2,255,255,16,50,
+ 33,32,0,2,33,40,64,2,44,87,192,12,
+ 33,48,32,2,4,0,0,18,33,32,32,2,
+ 33,40,96,2,9,248,64,2,33,48,0,2,
+ 32,0,191,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,224,255,189,39,24,0,191,175,
+ 33,72,160,0,128,0,130,44,18,0,64,20,
+ 33,64,192,0,0,64,130,44,13,0,64,20,
+ 31,0,2,60,255,255,66,52,43,16,68,0,
+ 7,0,64,16,255,15,2,60,255,255,66,52,
+ 43,16,68,0,8,0,64,20,5,0,6,36,
+ 250,87,192,8,4,0,6,36,250,87,192,8,
+ 3,0,6,36,250,87,192,8,2,0,6,36,
+ 1,0,6,36,255,255,194,48,16,0,163,39,
+ 33,40,98,0,9,0,163,16,33,56,0,0,
+ 16,0,163,39,255,255,165,36,127,0,130,48,
+ 37,16,226,0,0,0,162,160,194,33,4,0,
+ 250,255,163,20,128,0,7,36,33,32,0,1,
+ 16,0,165,39,9,248,32,1,255,255,198,48,
+ 24,0,191,143,32,0,189,39,8,0,224,3,
+ 0,0,0,0,208,255,189,39,44,0,191,175,
+ 40,0,182,175,36,0,181,175,32,0,180,175,
+ 28,0,179,175,24,0,178,175,20,0,177,175,
+ 16,0,176,175,33,144,160,0,33,168,192,0,
+ 33,160,224,0,64,0,182,143,33,136,128,0,
+ 4,0,179,142,0,0,0,0,171,86,192,12,
+ 33,32,160,2,33,128,64,0,255,255,36,50,
+ 192,0,69,50,33,48,128,2,242,86,192,12,
+ 33,56,192,2,255,255,16,50,33,32,0,2,
+ 33,40,128,2,44,87,192,12,33,48,192,2,
+ 23,0,0,18,33,40,128,2,0,0,98,142,
+ 4,0,115,38,128,32,2,0,33,32,130,0,
+ 192,32,4,0,0,0,98,142,4,0,115,38,
+ 33,32,130,0,226,87,192,12,33,48,192,2,
+ 63,88,192,8,2,0,16,36,0,0,100,142,
+ 4,0,115,38,226,87,192,12,33,48,192,2,
+ 1,0,16,38,0,0,162,142,0,0,0,0,
+ 42,16,2,2,247,255,64,20,33,40,128,2,
+ 44,0,191,143,40,0,182,143,36,0,181,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 48,0,189,39,224,255,189,39,28,0,191,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,136,128,0,33,144,192,0,255,255,67,50,
+ 12,0,34,150,0,0,0,0,43,16,67,0,
+ 4,0,64,16,1,0,2,36,12,0,50,150,
+ 0,0,0,0,255,255,67,50,11,0,98,16,
+ 2,0,98,40,5,0,64,16,2,0,2,36,
+ 50,0,96,16,255,255,80,50,137,88,192,8,
+ 0,0,0,0,15,0,98,16,255,255,80,50,
+ 137,88,192,8,0,0,0,0,8,0,35,142,
+ 0,0,0,0,1,0,98,36,8,0,34,174,
+ 0,0,162,144,0,0,0,0,0,0,98,160,
+ 12,0,34,150,0,0,0,0,255,255,66,36,
+ 149,88,192,8,12,0,34,166,8,0,35,142,
+ 0,0,0,0,1,0,98,36,8,0,34,174,
+ 0,0,162,144,0,0,0,0,0,0,98,160,
+ 8,0,35,142,0,0,0,0,1,0,98,36,
+ 8,0,34,174,1,0,162,144,0,0,0,0,
+ 0,0,98,160,12,0,34,150,0,0,0,0,
+ 254,255,66,36,149,88,192,8,12,0,34,166,
+ 8,0,36,142,0,0,0,0,80,68,192,12,
+ 33,48,0,2,12,0,34,150,0,0,0,0,
+ 35,16,82,0,12,0,34,166,8,0,34,142,
+ 0,0,0,0,33,128,2,2,8,0,48,174,
+ 255,255,66,50,28,0,191,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,224,255,189,39,24,0,191,175,
+ 20,0,177,175,16,0,176,175,33,128,160,0,
+ 10,0,128,20,33,136,192,0,9,50,192,12,
+ 16,0,4,36,33,32,64,0,3,0,128,20,
+ 1,0,2,36,178,88,192,8,33,16,0,0,
+ 173,88,192,8,0,0,130,160,0,0,128,160,
+ 4,0,144,172,8,0,144,172,33,16,17,2,
+ 12,0,130,172,33,16,128,0,24,0,191,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,232,255,189,39,16,0,191,175,
+ 0,0,130,144,0,0,0,0,1,0,66,48,
+ 3,0,64,16,0,0,0,0,61,50,192,12,
+ 0,0,0,0,16,0,191,143,24,0,189,39,
+ 8,0,224,3,0,0,0,0,0,0,130,144,
+ 0,0,0,0,128,0,66,48,16,0,64,20,
+ 255,255,2,36,8,0,130,140,12,0,131,140,
+ 0,0,0,0,43,16,67,0,7,0,64,20,
+ 0,0,0,0,0,0,130,144,0,0,0,0,
+ 128,0,66,52,0,0,130,160,216,88,192,8,
+ 255,255,2,36,8,0,130,140,0,0,0,0,
+ 0,0,66,144,8,0,224,3,0,0,0,0,
+ 30,0,192,24,33,56,192,0,0,0,130,144,
+ 0,0,0,0,128,0,66,48,16,0,64,20,
+ 255,0,3,36,8,0,131,140,12,0,130,140,
+ 0,0,0,0,43,16,98,0,5,0,64,16,
+ 1,0,98,36,8,0,130,172,0,0,99,144,
+ 240,88,192,8,0,0,0,0,0,0,130,144,
+ 0,0,0,0,128,0,66,52,0,0,130,160,
+ 255,0,3,36,0,0,130,144,0,0,0,0,
+ 128,0,66,48,5,0,64,20,0,0,0,0,
+ 0,0,163,160,255,255,198,36,228,255,192,28,
+ 1,0,165,36,8,0,224,3,35,16,230,0,
+ 1,0,2,36,15,0,194,16,2,0,194,40,
+ 5,0,64,16,2,0,2,36,7,0,192,16,
+ 255,255,2,36,40,89,192,8,0,0,0,0,
+ 11,0,194,16,255,255,2,36,40,89,192,8,
+ 0,0,0,0,4,0,130,140,0,0,0,0,
+ 21,89,192,8,33,40,162,0,8,0,130,140,
+ 0,0,0,0,19,89,192,8,33,40,162,0,
+ 12,0,130,140,0,0,0,0,35,40,69,0,
+ 4,0,130,140,0,0,0,0,43,16,162,0,
+ 17,0,64,20,255,255,2,36,12,0,130,140,
+ 0,0,0,0,43,16,69,0,12,0,64,20,
+ 255,255,2,36,12,0,130,140,0,0,0,0,
+ 43,16,162,0,5,0,64,16,0,0,0,0,
+ 0,0,130,144,0,0,0,0,127,0,66,48,
+ 0,0,130,160,8,0,133,172,33,16,0,0,
+ 8,0,224,3,0,0,0,0,12,0,130,140,
+ 4,0,131,140,0,0,0,0,35,56,67,0,
+ 1,0,2,36,15,0,194,16,2,0,194,40,
+ 5,0,64,16,2,0,2,36,7,0,192,16,
+ 255,255,2,36,87,89,192,8,0,0,0,0,
+ 12,0,194,16,255,255,2,36,87,89,192,8,
+ 0,0,0,0,4,0,130,140,0,0,0,0,
+ 66,89,192,8,33,16,162,0,8,0,130,140,
+ 0,0,0,0,33,16,162,0,72,89,192,8,
+ 12,0,130,172,12,0,130,140,0,0,0,0,
+ 35,16,69,0,12,0,130,172,8,0,130,140,
+ 12,0,131,140,0,0,0,0,43,16,67,0,
+ 5,0,64,16,0,0,0,0,0,0,130,144,
+ 0,0,0,0,85,89,192,8,127,0,66,48,
+ 0,0,130,144,0,0,0,0,128,0,66,52,
+ 0,0,130,160,33,16,224,0,8,0,224,3,
+ 0,0,0,0,232,255,189,39,20,0,191,175,
+ 16,0,176,175,12,0,128,20,33,128,160,0,
+ 9,50,192,12,16,0,4,36,33,32,64,0,
+ 3,0,128,20,0,0,0,0,119,89,192,8,
+ 33,16,0,0,0,0,2,146,0,0,0,0,
+ 108,89,192,8,1,0,66,52,0,0,2,146,
+ 0,0,0,0,254,0,66,48,0,0,130,160,
+ 4,0,2,142,0,0,0,0,4,0,130,172,
+ 8,0,2,142,0,0,0,0,8,0,130,172,
+ 12,0,2,142,0,0,0,0,12,0,130,172,
+ 33,16,128,0,20,0,191,143,16,0,176,143,
+ 8,0,224,3,24,0,189,39,0,0,0,0,
+ 0,0,130,144,0,0,0,0,128,0,66,48,
+ 17,0,64,20,31,0,3,36,8,0,131,140,
+ 12,0,130,140,0,0,0,0,43,16,98,0,
+ 6,0,64,16,1,0,98,36,8,0,130,172,
+ 0,0,98,144,0,0,0,0,145,89,192,8,
+ 31,0,67,48,0,0,130,144,0,0,0,0,
+ 128,0,66,52,0,0,130,160,31,0,3,36,
+ 0,0,130,144,0,0,0,0,128,0,66,48,
+ 4,0,64,16,1,0,2,36,0,0,162,172,
+ 196,89,192,8,33,16,0,0,255,0,99,48,
+ 31,0,2,36,6,0,98,16,33,16,96,0,
+ 196,89,192,8,0,0,0,0,1,0,2,36,
+ 195,89,192,8,0,0,162,172,33,48,0,0,
+ 0,0,130,144,0,0,0,0,128,0,66,48,
+ 16,0,64,20,255,0,3,36,8,0,131,140,
+ 12,0,130,140,0,0,0,0,43,16,98,0,
+ 5,0,64,16,1,0,98,36,8,0,130,172,
+ 0,0,99,144,183,89,192,8,0,0,0,0,
+ 0,0,130,144,0,0,0,0,128,0,66,52,
+ 0,0,130,160,255,0,3,36,0,0,130,144,
+ 0,0,0,0,128,0,66,48,228,255,64,20,
+ 128,0,98,48,4,0,64,16,127,0,98,48,
+ 37,16,194,0,163,89,192,8,192,49,2,0,
+ 255,0,98,48,37,48,70,0,255,255,194,48,
+ 8,0,224,3,0,0,0,0,0,0,130,144,
+ 0,0,0,0,128,0,66,48,16,0,64,20,
+ 255,0,6,36,8,0,131,140,12,0,130,140,
+ 0,0,0,0,43,16,98,0,5,0,64,16,
+ 1,0,98,36,8,0,130,172,0,0,102,144,
+ 218,89,192,8,0,0,0,0,0,0,130,144,
+ 0,0,0,0,128,0,66,52,0,0,130,160,
+ 255,0,6,36,0,0,130,144,0,0,0,0,
+ 128,0,66,48,13,0,64,20,1,0,2,36,
+ 255,0,195,48,128,0,2,36,4,0,98,20,
+ 2,0,2,36,0,0,162,172,17,90,192,8,
+ 255,255,2,52,128,0,194,48,6,0,64,20,
+ 33,24,0,0,17,90,192,8,255,0,194,48,
+ 0,0,162,172,17,90,192,8,33,16,0,0,
+ 127,0,194,48,32,0,64,16,255,255,71,36,
+ 0,26,3,0,0,0,130,144,0,0,0,0,
+ 128,0,66,48,16,0,64,20,255,255,102,48,
+ 8,0,131,140,12,0,130,140,0,0,0,0,
+ 43,16,98,0,6,0,64,16,1,0,98,36,
+ 8,0,130,172,0,0,98,144,0,0,0,0,
+ 7,90,192,8,37,24,194,0,0,0,130,144,
+ 0,0,0,0,128,0,66,52,0,0,130,160,
+ 255,0,195,52,0,0,130,144,0,0,0,0,
+ 128,0,66,48,224,255,64,20,1,0,2,36,
+ 33,16,224,0,255,0,66,48,226,255,64,20,
+ 255,255,231,36,255,255,98,48,8,0,224,3,
+ 0,0,0,0,216,255,189,39,36,0,191,175,
+ 32,0,180,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,16,0,176,175,33,152,128,0,
+ 33,128,192,0,33,144,160,0,255,255,81,50,
+ 33,0,32,18,33,160,224,0,255,255,2,52,
+ 30,0,34,18,0,0,0,0,9,50,192,12,
+ 33,32,32,2,33,24,64,0,29,0,96,16,
+ 1,0,2,36,0,0,2,166,4,0,3,174,
+ 8,0,3,174,12,0,18,166,33,32,96,2,
+ 8,0,5,142,0,0,0,0,218,88,192,12,
+ 33,48,32,2,33,24,64,0,255,255,100,48,
+ 10,0,145,20,1,0,2,36,12,0,2,150,
+ 0,0,0,0,35,16,67,0,12,0,2,166,
+ 8,0,2,142,0,0,0,0,33,16,130,0,
+ 68,90,192,8,8,0,2,174,68,90,192,8,
+ 0,0,130,174,0,0,0,166,4,0,0,174,
+ 8,0,0,174,12,0,0,166,36,0,191,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,224,255,189,39,28,0,191,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,144,128,0,33,136,160,0,33,128,192,0,
+ 124,89,192,12,33,40,0,2,33,32,64,2,
+ 198,89,192,12,33,40,0,2,33,40,64,0,
+ 0,0,2,142,0,0,0,0,7,0,64,20,
+ 33,32,64,2,255,255,165,48,33,48,32,2,
+ 19,90,192,12,33,56,0,2,104,90,192,8,
+ 0,0,0,0,0,0,32,166,4,0,32,174,
+ 8,0,32,174,12,0,32,166,28,0,191,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,216,255,189,39,
+ 36,0,191,175,32,0,180,175,28,0,179,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,152,128,0,33,136,160,0,33,144,192,0,
+ 56,0,176,147,0,0,0,0,196,88,192,12,
+ 33,160,224,0,224,0,66,48,7,0,80,20,
+ 33,32,96,2,124,89,192,12,33,40,64,2,
+ 255,255,66,48,255,255,131,50,7,0,67,16,
+ 33,32,96,2,0,0,66,142,0,0,0,0,
+ 16,0,64,20,4,0,2,36,152,90,192,8,
+ 0,0,66,174,198,89,192,12,33,40,64,2,
+ 33,40,64,0,0,0,66,142,0,0,0,0,
+ 7,0,64,20,33,32,96,2,255,255,165,48,
+ 33,48,32,2,19,90,192,12,33,56,64,2,
+ 156,90,192,8,0,0,0,0,0,0,32,166,
+ 4,0,32,174,8,0,32,174,12,0,32,166,
+ 36,0,191,143,32,0,180,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,33,56,0,0,
+ 255,255,168,36,255,255,165,48,50,0,160,16,
+ 1,0,9,36,1,0,12,36,4,0,10,36,
+ 3,0,11,36,255,255,5,52,0,0,130,144,
+ 0,0,0,0,128,0,66,48,16,0,64,20,
+ 255,0,3,36,8,0,131,140,12,0,130,140,
+ 0,0,0,0,43,16,98,0,5,0,64,16,
+ 1,0,98,36,8,0,130,172,0,0,99,144,
+ 193,90,192,8,0,0,0,0,0,0,130,144,
+ 0,0,0,0,128,0,66,52,0,0,130,160,
+ 255,0,3,36,0,0,130,144,0,0,0,0,
+ 128,0,66,48,3,0,64,16,0,0,0,0,
+ 218,90,192,8,0,0,204,172,11,0,32,17,
+ 255,255,2,49,5,0,74,20,33,72,0,0,
+ 3,0,96,16,0,0,0,0,218,90,192,8,
+ 0,0,203,172,128,0,98,48,3,0,64,16,
+ 0,18,7,0,255,255,7,36,0,18,7,0,
+ 37,56,67,0,33,16,0,1,255,255,66,48,
+ 212,255,64,20,33,64,5,1,8,0,224,3,
+ 33,16,224,0,224,255,189,39,24,0,191,175,
+ 20,0,177,175,16,0,176,175,33,128,128,0,
+ 124,89,192,12,33,136,160,0,33,32,0,2,
+ 198,89,192,12,33,40,32,2,33,32,0,2,
+ 255,255,69,48,164,90,192,12,33,48,32,2,
+ 24,0,191,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,32,0,189,39,216,255,189,39,
+ 32,0,191,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,16,0,176,175,33,144,128,0,
+ 33,136,160,0,33,152,192,0,196,88,192,12,
+ 33,128,224,0,224,0,66,48,255,0,16,50,
+ 7,0,80,20,33,32,64,2,124,89,192,12,
+ 33,40,32,2,255,255,66,48,255,255,99,50,
+ 8,0,67,16,33,32,64,2,0,0,34,142,
+ 0,0,0,0,2,0,64,20,4,0,2,36,
+ 0,0,34,174,17,91,192,8,33,16,0,0,
+ 198,89,192,12,33,40,32,2,33,32,64,2,
+ 255,255,69,48,164,90,192,12,33,48,32,2,
+ 32,0,191,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,208,255,189,39,44,0,191,175,
+ 40,0,180,175,36,0,179,175,32,0,178,175,
+ 28,0,177,175,24,0,176,175,33,128,128,0,
+ 33,152,192,0,33,160,224,0,0,0,96,174,
+ 4,0,96,174,8,0,3,142,4,0,2,142,
+ 0,0,0,0,35,24,98,0,255,255,165,48,
+ 35,0,160,24,33,144,0,0,1,0,6,36,
+ 0,0,2,146,0,0,0,0,128,0,66,48,
+ 16,0,64,20,255,0,4,36,8,0,4,142,
+ 12,0,2,142,0,0,0,0,43,16,130,0,
+ 5,0,64,16,1,0,130,36,8,0,2,174,
+ 0,0,132,144,64,91,192,8,0,0,0,0,
+ 0,0,2,146,0,0,0,0,128,0,66,52,
+ 0,0,2,162,255,0,4,36,0,0,2,146,
+ 0,0,0,0,128,0,66,48,3,0,64,16,
+ 128,0,130,48,150,91,192,8,0,0,134,174,
+ 2,0,64,20,0,0,0,0,1,0,82,38,
+ 255,255,165,36,224,255,160,28,0,0,0,0,
+ 33,32,0,2,33,40,96,0,251,88,192,12,
+ 33,48,0,0,68,0,64,18,1,0,81,38,
+ 9,50,192,12,128,32,17,0,33,40,64,0,
+ 63,0,160,16,0,0,0,0,0,0,113,174,
+ 4,0,101,174,59,0,64,26,33,56,0,0,
+ 1,0,8,36,33,48,0,0,0,0,2,146,
+ 0,0,0,0,128,0,66,48,16,0,64,20,
+ 255,0,4,36,8,0,3,142,12,0,2,142,
+ 0,0,0,0,43,16,98,0,5,0,64,16,
+ 1,0,98,36,8,0,2,174,0,0,100,144,
+ 114,91,192,8,0,0,0,0,0,0,2,146,
+ 0,0,0,0,128,0,66,52,0,0,2,162,
+ 255,0,4,36,0,0,2,146,0,0,0,0,
+ 128,0,66,48,3,0,64,16,192,49,6,0,
+ 150,91,192,8,0,0,136,174,127,0,130,48,
+ 37,48,194,0,128,0,130,48,225,255,64,20,
+ 0,0,0,0,18,0,224,20,40,0,194,44,
+ 4,0,64,16,80,0,194,44,0,0,160,172,
+ 145,91,192,8,4,0,165,36,5,0,64,16,
+ 216,255,194,36,0,0,168,172,4,0,165,36,
+ 146,91,192,8,0,0,162,172,2,0,2,36,
+ 0,0,162,172,4,0,165,36,176,255,194,36,
+ 146,91,192,8,0,0,162,172,0,0,166,172,
+ 1,0,231,36,42,16,242,0,200,255,64,20,
+ 4,0,165,36,44,0,191,143,40,0,180,143,
+ 36,0,179,143,32,0,178,143,28,0,177,143,
+ 24,0,176,143,8,0,224,3,48,0,189,39,
+ 224,255,189,39,28,0,191,175,24,0,178,175,
+ 20,0,177,175,16,0,176,175,33,136,128,0,
+ 33,144,160,0,33,128,192,0,124,89,192,12,
+ 33,40,0,2,33,32,32,2,198,89,192,12,
+ 33,40,0,2,33,40,64,0,0,0,2,142,
+ 0,0,0,0,5,0,64,20,33,32,32,2,
+ 255,255,165,48,33,48,64,2,24,91,192,12,
+ 33,56,0,2,28,0,191,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 32,0,189,39,216,255,189,39,36,0,191,175,
+ 32,0,180,175,28,0,179,175,24,0,178,175,
+ 20,0,177,175,16,0,176,175,33,144,128,0,
+ 33,160,160,0,33,136,192,0,56,0,176,147,
+ 0,0,0,0,196,88,192,12,33,152,224,0,
+ 224,0,66,48,7,0,80,20,33,32,64,2,
+ 124,89,192,12,33,40,32,2,255,255,66,48,
+ 255,255,99,50,7,0,67,16,33,32,64,2,
+ 0,0,34,142,0,0,0,0,14,0,64,20,
+ 4,0,2,36,226,91,192,8,0,0,34,174,
+ 198,89,192,12,33,40,32,2,33,40,64,0,
+ 0,0,34,142,0,0,0,0,5,0,64,20,
+ 33,32,64,2,255,255,165,48,33,48,128,2,
+ 24,91,192,12,33,56,32,2,36,0,191,143,
+ 32,0,180,143,28,0,179,143,24,0,178,143,
+ 20,0,177,143,16,0,176,143,8,0,224,3,
+ 40,0,189,39,0,0,0,0,0,0,0,0,
+ 216,255,189,39,32,0,191,175,28,0,179,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,152,128,0,8,0,99,142,4,0,98,142,
+ 0,0,0,0,35,128,98,0,255,255,4,50,
+ 19,0,128,16,33,136,160,0,9,50,192,12,
+ 0,0,0,0,33,144,64,0,3,0,64,22,
+ 255,255,16,50,17,92,192,8,255,255,2,36,
+ 33,32,64,2,4,0,101,142,0,0,0,0,
+ 80,68,192,12,33,48,0,2,1,0,2,36,
+ 0,0,34,166,4,0,50,174,33,128,80,2,
+ 15,92,192,8,8,0,48,174,0,0,32,166,
+ 4,0,32,174,8,0,32,174,12,0,32,166,
+ 33,16,0,0,32,0,191,143,28,0,179,143,
+ 24,0,178,143,20,0,177,143,16,0,176,143,
+ 8,0,224,3,40,0,189,39,232,255,189,39,
+ 20,0,191,175,16,0,176,175,33,128,128,0,
+ 0,0,2,150,0,0,0,0,1,0,66,48,
+ 7,0,64,16,0,0,0,0,4,0,4,142,
+ 0,0,0,0,3,0,128,16,0,0,0,0,
+ 61,50,192,12,0,0,0,0,0,0,0,166,
+ 8,0,0,174,4,0,0,174,12,0,0,166,
+ 20,0,191,143,16,0,176,143,8,0,224,3,
+ 24,0,189,39,224,255,189,39,24,0,191,175,
+ 20,0,177,175,16,0,176,175,33,128,128,0,
+ 8,0,163,140,4,0,162,140,0,0,0,0,
+ 35,136,98,0,255,255,35,50,12,0,2,150,
+ 0,0,0,0,43,16,67,0,4,0,64,16,
+ 255,255,38,50,12,0,17,150,0,0,0,0,
+ 255,255,38,50,6,0,192,16,255,255,34,50,
+ 8,0,4,142,4,0,165,140,80,68,192,12,
+ 0,0,0,0,255,255,34,50,8,0,3,142,
+ 0,0,0,0,33,16,67,0,8,0,2,174,
+ 12,0,2,150,0,0,0,0,35,16,81,0,
+ 12,0,2,166,24,0,191,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,32,0,189,39,
+ 1,0,2,36,23,0,194,16,2,0,194,40,
+ 5,0,64,16,2,0,2,36,7,0,192,16,
+ 255,255,2,36,132,92,192,8,0,0,0,0,
+ 23,0,194,16,255,255,2,36,132,92,192,8,
+ 0,0,0,0,255,255,162,48,4,0,131,140,
+ 0,0,0,0,33,48,67,0,8,0,130,140,
+ 0,0,0,0,35,16,67,0,12,0,131,148,
+ 0,0,0,0,33,16,67,0,124,92,192,8,
+ 35,40,69,0,255,255,162,48,8,0,131,140,
+ 0,0,0,0,33,48,67,0,12,0,130,148,
+ 0,0,0,0,124,92,192,8,35,40,69,0,
+ 12,0,130,148,8,0,131,140,0,0,0,0,
+ 33,48,67,0,255,255,162,48,35,48,194,0,
+ 4,0,130,140,0,0,0,0,43,16,194,0,
+ 4,0,64,20,255,255,2,36,8,0,134,172,
+ 12,0,133,164,33,16,0,0,8,0,224,3,
+ 0,0,0,0,216,255,189,39,32,0,191,175,
+ 28,0,179,175,24,0,178,175,20,0,177,175,
+ 16,0,176,175,33,128,128,0,33,152,160,0,
+ 8,0,3,142,4,0,2,142,0,0,0,0,
+ 35,144,98,0,255,255,66,50,12,0,3,150,
+ 0,0,0,0,33,16,67,0,255,255,99,50,
+ 42,16,67,0,35,0,64,16,1,0,2,36,
+ 0,0,3,150,0,0,0,0,32,0,98,20,
+ 255,255,2,36,9,50,192,12,255,255,100,50,
+ 33,136,64,0,3,0,32,22,255,255,70,50,
+ 189,92,192,8,255,255,2,36,5,0,192,16,
+ 0,0,0,0,4,0,5,142,0,0,0,0,
+ 80,68,192,12,33,32,32,2,0,0,2,150,
+ 0,0,0,0,1,0,66,48,7,0,64,16,
+ 0,0,0,0,4,0,4,142,0,0,0,0,
+ 3,0,128,16,0,0,0,0,61,50,192,12,
+ 0,0,0,0,4,0,17,174,255,255,66,50,
+ 33,16,34,2,8,0,2,174,35,16,114,2,
+ 12,0,2,166,33,16,0,0,32,0,191,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,40,0,189,39,
+ 216,255,189,39,32,0,191,175,28,0,179,175,
+ 24,0,178,175,20,0,177,175,16,0,176,175,
+ 33,128,128,0,33,136,192,0,255,255,36,50,
+ 35,0,128,16,33,152,160,0,8,0,2,142,
+ 4,0,3,142,0,0,0,0,35,16,67,0,
+ 255,255,66,48,12,0,3,150,0,0,0,0,
+ 33,16,67,0,42,16,68,0,20,0,64,16,
+ 0,0,0,0,9,50,192,12,0,0,0,0,
+ 33,144,64,0,24,0,64,18,255,255,2,36,
+ 0,0,2,150,0,0,0,0,1,0,66,48,
+ 8,0,64,16,1,0,2,36,4,0,4,142,
+ 0,0,0,0,4,0,128,16,0,0,0,0,
+ 61,50,192,12,0,0,0,0,1,0,2,36,
+ 0,0,2,166,4,0,18,174,4,0,4,142,
+ 33,40,96,2,80,68,192,12,255,255,38,50,
+ 33,32,0,2,255,255,37,50,85,92,192,12,
+ 33,48,0,0,33,16,0,0,32,0,191,143,
+ 28,0,179,143,24,0,178,143,20,0,177,143,
+ 16,0,176,143,8,0,224,3,40,0,189,39,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 184,255,189,39,64,0,191,175,60,0,183,175,
+ 56,0,182,175,52,0,181,175,48,0,180,175,
+ 44,0,179,175,40,0,178,175,36,0,177,175,
+ 32,0,176,175,33,144,128,0,112,0,84,142,
+ 0,0,0,0,92,0,128,18,1,0,2,36,
+ 108,0,81,142,0,0,0,0,88,0,32,18,
+ 0,0,0,0,96,0,87,38,136,0,66,174,
+ 140,0,64,174,96,0,64,174,100,0,64,174,
+ 224,83,192,12,33,32,32,2,33,168,64,0,
+ 25,0,160,18,33,128,160,2,108,0,66,142,
+ 0,0,0,0,124,0,66,174,128,0,84,174,
+ 108,0,81,174,112,0,85,174,29,0,32,26,
+ 33,152,0,0,255,255,22,36,33,32,0,2,
+ 8,0,133,38,33,48,64,2,170,72,192,12,
+ 1,0,7,36,10,0,86,16,33,32,64,2,
+ 14,0,64,20,0,0,0,0,64,0,66,142,
+ 0,0,0,0,10,0,64,20,2,0,5,36,
+ 56,93,192,8,1,0,102,38,33,32,64,2,
+ 5,0,5,36,33,48,0,0,140,84,192,12,
+ 0,0,0,0,107,93,192,8,1,0,2,36,
+ 1,0,115,38,68,0,16,38,42,16,113,2,
+ 230,255,64,20,68,0,148,38,40,0,32,18,
+ 33,128,160,2,17,0,2,146,0,0,0,0,
+ 34,0,66,48,32,0,64,20,0,0,0,0,
+ 36,0,2,142,16,0,176,175,16,0,66,140,
+ 24,0,4,142,28,0,5,142,32,0,6,142,
+ 0,0,0,0,9,248,64,0,33,56,64,2,
+ 17,0,2,146,0,0,0,0,32,0,66,52,
+ 17,0,2,162,0,0,226,142,0,0,0,0,
+ 15,0,64,16,0,0,0,0,255,255,49,38,
+ 15,0,32,18,68,0,16,38,17,0,3,146,
+ 0,0,0,0,32,0,98,48,2,0,64,20,
+ 34,0,98,52,17,0,2,162,255,255,49,38,
+ 248,255,32,22,68,0,16,38,107,93,192,8,
+ 33,16,0,0,255,255,49,38,218,255,32,22,
+ 68,0,16,38,33,16,0,0,64,0,191,143,
+ 60,0,183,143,56,0,182,143,52,0,181,143,
+ 48,0,180,143,44,0,179,143,40,0,178,143,
+ 36,0,177,143,32,0,176,143,8,0,224,3,
+ 72,0,189,39,168,255,189,39,80,0,191,175,
+ 76,0,183,175,72,0,182,175,68,0,181,175,
+ 64,0,180,175,60,0,179,175,56,0,178,175,
+ 52,0,177,175,48,0,176,175,33,144,128,0,
+ 96,0,66,142,0,0,0,0,3,0,64,16,
+ 33,32,0,0,240,93,192,8,255,255,2,36,
+ 116,0,66,142,0,0,0,0,7,0,64,16,
+ 104,0,67,38,12,0,99,140,0,0,0,0,
+ 12,0,98,140,0,0,0,0,251,255,64,20,
+ 0,0,0,0,8,0,116,140,0,0,0,0,
+ 92,0,128,18,33,16,0,0,4,0,115,140,
+ 0,0,0,0,88,0,96,18,0,0,0,0,
+ 64,0,66,142,0,0,0,0,55,0,64,20,
+ 0,0,0,0,33,128,128,2,52,0,96,26,
+ 33,136,0,0,255,255,23,36,2,0,22,36,
+ 5,0,21,36,17,0,2,146,0,0,0,0,
+ 16,0,66,48,40,0,64,16,24,0,165,39,
+ 8,0,3,142,28,0,2,142,0,0,0,0,
+ 35,24,98,0,24,0,163,175,12,0,2,142,
+ 0,0,0,0,28,0,162,175,128,24,3,0,
+ 33,24,98,0,252,255,98,140,0,0,0,0,
+ 1,0,66,36,252,255,98,172,12,0,0,174,
+ 8,0,0,174,33,32,0,2,33,48,64,2,
+ 170,72,192,12,1,0,7,36,6,0,87,16,
+ 0,0,0,0,9,0,64,20,1,0,34,38,
+ 96,0,86,174,196,93,192,8,100,0,66,174,
+ 96,0,85,174,110,86,192,12,24,0,164,39,
+ 240,93,192,8,255,255,2,36,110,86,192,12,
+ 24,0,164,39,17,0,2,146,0,0,0,0,
+ 12,0,66,48,17,0,2,162,1,0,4,36,
+ 1,0,49,38,42,16,51,2,209,255,64,20,
+ 68,0,16,38,27,0,128,16,33,128,128,2,
+ 23,0,96,26,33,136,0,0,17,0,2,146,
+ 0,0,0,0,34,0,66,48,14,0,64,20,
+ 0,0,0,0,36,0,2,142,16,0,176,175,
+ 16,0,66,140,24,0,4,142,28,0,5,142,
+ 32,0,6,142,0,0,0,0,9,248,64,0,
+ 33,56,64,2,17,0,2,146,0,0,0,0,
+ 32,0,66,52,17,0,2,162,1,0,49,38,
+ 42,16,51,2,235,255,64,20,68,0,16,38,
+ 240,93,192,8,1,0,2,36,33,16,0,0,
+ 80,0,191,143,76,0,183,143,72,0,182,143,
+ 68,0,181,143,64,0,180,143,60,0,179,143,
+ 56,0,178,143,52,0,177,143,48,0,176,143,
+ 8,0,224,3,88,0,189,39,0,0,0,0,
+ 200,255,189,39,48,0,191,175,44,0,179,175,
+ 40,0,178,175,36,0,177,175,32,0,176,175,
+ 33,144,128,0,96,0,64,174,100,0,64,174,
+ 112,0,80,142,0,0,0,0,105,0,0,18,
+ 33,16,0,0,108,0,81,142,0,0,0,0,
+ 101,0,32,18,0,0,0,0,64,0,66,142,
+ 0,0,0,0,43,0,64,20,33,32,64,2,
+ 50,0,32,26,33,152,0,0,33,32,0,2,
+ 33,40,64,2,96,72,192,12,1,0,6,36,
+ 13,0,64,20,33,32,64,2,20,0,2,150,
+ 0,0,0,0,1,0,66,48,34,0,64,16,
+ 2,0,5,36,36,0,2,142,0,0,0,0,
+ 3,0,66,144,0,0,0,0,2,0,66,48,
+ 3,0,64,20,0,0,0,0,63,94,192,8,
+ 2,0,5,36,36,0,2,142,0,0,0,0,
+ 32,0,66,140,4,0,67,142,0,0,0,0,
+ 36,16,67,0,16,0,64,16,33,32,64,2,
+ 36,0,2,142,16,0,3,146,2,0,66,144,
+ 0,0,0,0,11,0,98,20,3,0,5,36,
+ 1,0,115,38,42,16,113,2,219,255,64,20,
+ 68,0,16,38,69,94,192,8,96,0,83,38,
+ 5,0,5,36,64,94,192,8,33,48,0,0,
+ 2,0,5,36,1,0,102,38,140,84,192,12,
+ 0,0,0,0,113,94,192,8,1,0,2,36,
+ 96,0,83,38,112,0,80,142,0,0,0,0,
+ 41,0,32,18,33,16,0,0,17,0,2,146,
+ 0,0,0,0,17,0,66,48,32,0,64,20,
+ 0,0,0,0,36,0,2,142,16,0,176,175,
+ 4,0,66,140,24,0,4,142,28,0,5,142,
+ 32,0,6,142,0,0,0,0,9,248,64,0,
+ 33,56,64,2,17,0,2,146,0,0,0,0,
+ 16,0,66,52,17,0,2,162,0,0,98,142,
+ 0,0,0,0,15,0,64,16,0,0,0,0,
+ 255,255,49,38,15,0,32,18,68,0,16,38,
+ 17,0,3,146,0,0,0,0,16,0,98,48,
+ 2,0,64,20,17,0,98,52,17,0,2,162,
+ 255,255,49,38,248,255,32,22,68,0,16,38,
+ 113,94,192,8,33,16,0,0,255,255,49,38,
+ 218,255,32,22,68,0,16,38,33,16,0,0,
+ 48,0,191,143,44,0,179,143,40,0,178,143,
+ 36,0,177,143,32,0,176,143,8,0,224,3,
+ 56,0,189,39,192,255,189,39,56,0,191,175,
+ 52,0,183,175,48,0,182,175,44,0,181,175,
+ 40,0,180,175,36,0,179,175,32,0,178,175,
+ 28,0,177,175,24,0,176,175,33,144,128,0,
+ 112,0,84,142,0,0,0,0,171,0,128,18,
+ 33,16,0,0,108,0,85,142,0,0,0,0,
+ 166,0,160,18,32,0,2,36,56,0,67,146,
+ 0,0,0,0,75,0,98,16,96,0,83,38,
+ 33,0,98,40,5,0,64,16,64,0,2,36,
+ 9,0,96,16,33,16,0,0,49,95,192,8,
+ 0,0,0,0,85,0,98,16,128,0,2,36,
+ 137,0,98,16,33,16,0,0,49,95,192,8,
+ 0,0,0,0,33,136,160,2,8,0,32,18,
+ 33,128,128,2,17,0,2,146,0,0,0,0,
+ 1,0,66,48,139,0,64,16,255,255,49,38,
+ 250,255,32,22,68,0,16,38,0,0,98,142,
+ 0,0,0,0,136,0,64,20,33,16,0,0,
+ 33,136,160,2,43,0,32,18,33,128,128,2,
+ 14,0,22,36,64,0,23,36,17,0,2,146,
+ 0,0,0,0,34,0,66,48,33,0,64,20,
+ 0,0,0,0,36,0,2,142,16,0,176,175,
+ 12,0,66,140,24,0,4,142,28,0,5,142,
+ 32,0,6,142,0,0,0,0,9,248,64,0,
+ 33,56,64,2,17,0,2,146,0,0,0,0,
+ 32,0,66,52,17,0,2,162,0,0,98,142,
+ 0,0,0,0,16,0,64,16,0,0,0,0,
+ 255,255,49,38,10,0,32,18,68,0,16,38,
+ 17,0,3,146,0,0,0,0,32,0,98,48,
+ 2,0,64,20,192,0,98,52,17,0,2,162,
+ 255,255,49,38,248,255,32,22,68,0,16,38,
+ 0,0,118,174,236,94,192,8,56,0,87,162,
+ 255,255,49,38,217,255,32,22,68,0,16,38,
+ 32,0,2,36,56,0,66,162,0,0,98,142,
+ 0,0,0,0,13,0,64,20,64,0,2,36,
+ 33,136,160,2,81,0,32,18,33,128,128,2,
+ 17,0,2,146,0,0,0,0,2,0,66,48,
+ 74,0,64,16,255,255,49,38,250,255,32,22,
+ 68,0,16,38,49,95,192,8,33,16,0,0,
+ 56,0,66,162,14,0,2,36,0,0,98,174,
+ 33,136,160,2,53,0,32,18,33,128,128,2,
+ 2,0,23,36,15,0,22,36,17,0,2,146,
+ 0,0,0,0,194,0,66,48,5,0,64,16,
+ 0,0,0,0,19,0,87,16,0,0,0,0,
+ 32,95,192,8,255,255,49,38,64,0,2,142,
+ 0,0,0,0,34,0,64,16,0,0,0,0,
+ 16,0,176,175,64,0,2,142,24,0,4,142,
+ 28,0,5,142,32,0,6,142,0,0,0,0,
+ 9,248,64,0,33,56,64,2,17,0,2,146,
+ 0,0,0,0,30,95,192,8,64,0,66,52,
+ 64,0,2,142,0,0,0,0,13,0,64,16,
+ 0,0,0,0,16,0,176,175,64,0,2,142,
+ 24,0,4,142,28,0,5,142,32,0,6,142,
+ 0,0,0,0,9,248,64,0,33,56,64,2,
+ 17,0,2,146,0,0,0,0,30,95,192,8,
+ 64,0,66,52,0,0,118,174,17,0,2,146,
+ 0,0,0,0,128,0,66,52,17,0,2,162,
+ 255,255,49,38,208,255,32,22,68,0,16,38,
+ 33,136,160,2,12,0,32,18,33,128,128,2,
+ 17,0,2,146,0,0,0,0,128,0,66,48,
+ 5,0,64,16,255,255,49,38,250,255,32,22,
+ 68,0,16,38,49,95,192,8,33,16,0,0,
+ 49,95,192,8,1,0,2,36,33,16,0,0,
+ 56,0,191,143,52,0,183,143,48,0,182,143,
+ 44,0,181,143,40,0,180,143,36,0,179,143,
+ 32,0,178,143,28,0,177,143,24,0,176,143,
+ 8,0,224,3,64,0,189,39,184,255,189,39,
+ 64,0,191,175,60,0,183,175,56,0,182,175,
+ 52,0,181,175,48,0,180,175,44,0,179,175,
+ 40,0,178,175,36,0,177,175,32,0,176,175,
+ 33,160,128,0,112,0,147,142,0,0,0,0,
+ 129,0,96,18,33,16,0,0,108,0,146,142,
+ 0,0,0,0,125,0,64,18,96,0,151,38,
+ 96,0,128,174,100,0,128,174,224,83,192,12,
+ 33,32,64,2,33,168,64,0,5,0,160,22,
+ 33,136,64,2,33,32,128,2,5,0,5,36,
+ 123,95,192,8,33,48,0,0,36,0,64,18,
+ 33,128,160,2,5,0,22,36,16,0,22,162,
+ 8,0,100,142,12,0,101,142,0,0,0,0,
+ 80,86,192,12,8,0,6,38,5,0,64,20,
+ 0,0,0,0,255,255,49,38,68,0,115,38,
+ 245,255,32,22,68,0,16,38,21,0,32,18,
+ 42,16,50,2,7,0,64,16,33,128,160,2,
+ 59,84,192,12,33,32,0,2,1,0,49,38,
+ 42,16,50,2,251,255,64,20,68,0,16,38,
+ 61,50,192,12,33,32,160,2,33,32,128,2,
+ 5,0,5,36,123,95,192,8,33,48,0,0,
+ 2,0,5,36,1,0,38,38,140,84,192,12,
+ 0,0,0,0,203,95,192,8,1,0,2,36,
+ 124,0,146,174,112,0,130,142,0,0,0,0,
+ 128,0,130,174,112,0,149,174,33,128,160,2,
+ 27,0,64,26,33,136,0,0,33,32,0,2,
+ 33,40,128,2,96,72,192,12,1,0,6,36,
+ 13,0,64,20,0,0,0,0,20,0,2,150,
+ 0,0,0,0,1,0,66,48,8,0,64,16,
+ 0,0,0,0,36,0,2,142,0,0,0,0,
+ 3,0,66,144,0,0,0,0,1,0,66,48,
+ 5,0,64,20,0,0,0,0,64,0,130,142,
+ 0,0,0,0,221,255,64,16,33,32,128,2,
+ 1,0,49,38,42,16,50,2,231,255,64,20,
+ 68,0,16,38,40,0,64,18,33,128,160,2,
+ 17,0,2,146,0,0,0,0,34,0,66,48,
+ 32,0,64,20,0,0,0,0,36,0,2,142,
+ 16,0,176,175,8,0,66,140,24,0,4,142,
+ 28,0,5,142,32,0,6,142,0,0,0,0,
+ 9,248,64,0,33,56,128,2,17,0,2,146,
+ 0,0,0,0,32,0,66,52,17,0,2,162,
+ 0,0,226,142,0,0,0,0,15,0,64,16,
+ 0,0,0,0,255,255,82,38,15,0,64,18,
+ 68,0,16,38,17,0,3,146,0,0,0,0,
+ 32,0,98,48,2,0,64,20,34,0,98,52,
+ 17,0,2,162,255,255,82,38,248,255,64,22,
+ 68,0,16,38,203,95,192,8,33,16,0,0,
+ 255,255,82,38,218,255,64,22,68,0,16,38,
+ 33,16,0,0,64,0,191,143,60,0,183,143,
+ 56,0,182,143,52,0,181,143,48,0,180,143,
+ 44,0,179,143,40,0,178,143,36,0,177,143,
+ 32,0,176,143,8,0,224,3,72,0,189,39,
+ 0,0,0,0,0,0,0,0,37,115,58,37,
+ 100,58,32,102,97,105,108,101,100,32,97,115,
+ 115,101,114,116,105,111,110,32,96,37,115,39,
+ 10,0,0,0,114,97,109,116,101,115,116,100,
+ 119,40,98,99,46,98,99,95,104,101,97,112,
+ 115,116,97,114,116,44,32,108,101,110,44,32,
+ 49,44,32,48,44,32,48,41,32,61,61,32,
+ 48,0,0,0,98,99,46,98,99,95,104,101,
+ 97,112,101,110,100,32,60,61,32,98,99,46,
+ 98,99,95,114,97,109,101,110,100,0,0,0,
+ 35,32,112,111,114,116,115,58,32,37,100,10,
+ 0,0,0,0,42,42,42,80,114,111,102,105,
+ 108,105,110,103,32,64,32,37,120,44,32,37,
+ 120,10,0,0,103,111,116,32,104,101,114,101,
+ 32,99,97,117,115,101,61,37,120,32,115,116,
+ 97,116,117,115,61,37,120,32,118,101,99,61,
+ 37,120,10,0,37,115,58,37,100,58,32,102,
+ 97,105,108,101,100,32,97,115,115,101,114,116,
+ 105,111,110,32,96,37,115,39,10,0,0,0,
+ 83,101,99,111,110,100,115,32,60,32,48,120,
+ 55,70,70,70,102,102,102,102,0,0,0,0,
+ 84,105,109,101,114,115,85,115,101,100,32,60,
+ 32,78,84,73,77,69,82,83,0,0,0,0,
+ 0,0,0,0,69,69,80,82,79,77,32,105,
+ 115,32,98,97,100,10,0,0,80,111,114,116,
+ 32,37,100,32,101,116,104,101,114,32,97,100,
+ 100,114,101,115,115,58,32,37,48,50,88,58,
+ 37,48,50,88,58,37,48,50,88,58,37,48,
+ 50,88,58,37,48,50,88,58,37,48,50,88,
+ 10,0,0,0,35,35,35,32,56,50,53,57,
+ 54,32,67,104,97,110,32,37,100,58,32,115,
+ 101,108,102,116,101,115,116,32,102,97,105,108,
+ 101,100,32,40,37,120,41,10,0,0,0,0,
+ 42,42,42,32,56,50,53,57,54,32,80,111,
+ 114,116,32,37,100,58,32,115,101,108,102,116,
+ 101,115,116,32,112,97,115,115,101,100,10,0,
+ 56,50,53,57,54,32,80,111,114,116,32,37,
+ 100,58,32,100,117,109,112,32,102,97,105,108,
+ 101,100,32,40,37,120,41,10,0,0,0,0,
+ 42,42,42,32,56,50,53,57,54,32,80,111,
+ 114,116,32,37,100,58,32,100,117,109,112,32,
+ 112,97,115,115,101,100,10,0,35,35,35,32,
+ 56,50,53,57,54,32,80,111,114,116,32,37,
+ 100,58,32,83,67,80,32,102,101,116,99,104,
+ 32,102,97,105,108,101,100,10,0,0,0,0,
+ 42,42,42,32,56,50,53,57,54,32,80,111,
+ 114,116,32,37,100,58,32,83,67,80,32,102,
+ 101,116,99,104,32,112,97,115,115,101,100,32,
+ 37,120,32,10,0,0,0,0,35,35,35,32,
+ 56,50,53,57,54,32,80,111,114,116,32,37,
+ 100,58,32,66,85,83,84,73,77,69,82,83,
+ 32,108,111,97,100,32,102,97,105,108,101,100,
+ 10,0,0,0,42,42,42,32,56,50,53,57,
+ 54,32,80,111,114,116,32,37,100,58,32,66,
+ 85,83,84,73,77,69,82,83,32,108,111,97,
+ 100,32,112,97,115,115,101,100,10,0,0,0,
+ 35,35,35,32,65,67,75,32,100,105,100,32,
+ 110,111,116,32,111,99,99,117,114,10,0,0,
+ 35,35,35,32,115,116,97,116,117,115,32,115,
+ 116,105,108,108,32,98,117,115,121,58,32,37,
+ 120,10,0,0,101,116,104,95,105,110,105,116,
+ 46,99,0,0,42,42,42,76,49,87,65,10,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,35,35,35,32,84,66,68,32,
+ 98,108,111,99,107,115,32,97,114,101,110,39,
+ 116,32,98,101,105,110,103,32,102,114,101,101,
+ 100,10,0,0,65,116,116,101,109,112,116,32,
+ 116,111,32,102,114,101,101,32,98,111,103,117,
+ 115,32,84,66,68,32,37,120,10,0,0,0,
+ 35,35,35,32,66,85,70,32,98,108,111,99,
+ 107,115,32,97,114,101,110,39,116,32,98,101,
+ 105,110,103,32,102,114,101,101,100,10,0,0,
+ 65,116,116,101,109,112,116,32,116,111,32,102,
+ 114,101,101,32,98,111,103,117,115,32,66,85,
+ 70,32,37,120,10,0,0,0,0,0,0,0,
+ 0,0,0,0,82,70,68,115,32,37,100,32,
+ 10,0,0,0,82,66,68,115,32,37,100,32,
+ 10,0,0,0,37,115,58,37,100,58,32,102,
+ 97,105,108,101,100,32,97,115,115,101,114,116,
+ 105,111,110,32,96,37,115,39,10,0,0,0,
+ 101,116,104,95,114,99,118,46,99,0,0,0,
+ 100,115,116,99,104,97,110,32,62,61,32,49,
+ 32,38,38,32,100,115,116,99,104,97,110,32,
+ 60,32,78,99,104,97,110,0,37,115,37,48,
+ 50,88,58,37,48,50,88,58,37,48,50,88,
+ 58,37,48,50,88,58,37,48,50,88,58,37,
+ 48,50,88,37,115,0,0,0,176,72,0,131,
+ 80,67,0,131,164,67,0,131,24,68,0,131,
+ 80,67,0,131,0,0,0,0,4,82,0,131,
+ 184,76,0,131,12,77,0,131,128,77,0,131,
+ 184,76,0,131,0,0,0,0,0,0,0,0,
+ 0,0,0,0,37,115,58,37,100,58,32,102,
+ 97,105,108,101,100,32,97,115,115,101,114,116,
+ 105,111,110,32,96,37,115,39,10,0,0,0,
+ 101,116,104,95,120,109,105,116,46,99,0,0,
+ 99,98,112,45,62,110,111,112,46,99,109,100,
+ 32,61,61,32,73,53,57,54,95,67,66,95,
+ 67,77,68,95,78,79,80,124,73,53,57,54,
+ 95,67,66,95,67,77,68,95,69,76,0,0,
+ 99,98,112,45,62,110,111,112,46,99,109,100,
+ 32,38,32,73,53,57,54,95,67,66,95,67,
+ 77,68,0,0,112,45,62,115,99,98,112,45,
+ 62,115,116,97,116,117,115,32,38,32,73,53,
+ 57,54,95,83,67,66,95,67,78,65,0,0,
+ 35,35,35,32,99,109,100,32,115,116,105,108,
+ 108,32,98,117,115,121,58,32,37,120,10,0,
+ 37,100,61,37,100,44,37,120,44,37,100,10,
+ 0,0,0,0,39,37,115,39,32,37,120,32,
+ 37,120,10,0,37,48,56,120,58,32,37,48,
+ 50,120,10,0,37,48,56,120,58,32,37,48,
+ 52,120,10,0,37,48,56,120,58,32,37,48,
+ 56,120,10,0,108,105,110,107,32,115,116,97,
+ 116,101,32,37,48,50,120,10,0,0,0,0,
+ 42,42,42,32,103,111,116,32,37,100,32,105,
+ 110,116,101,114,114,117,112,116,115,10,0,0,
+ 35,35,35,32,69,120,112,101,99,116,101,100,
+ 32,37,100,32,98,117,116,32,103,111,116,32,
+ 37,100,32,105,110,116,101,114,114,117,112,116,
+ 115,10,0,0,80,76,88,57,48,54,48,32,
+ 65,100,100,114,101,115,115,32,61,32,37,88,
+ 32,68,65,84,65,32,61,32,37,88,32,10,
+ 0,0,0,0,42,42,42,32,87,114,105,116,
+ 101,32,111,102,32,37,120,32,116,111,32,37,
+ 100,32,102,97,105,108,101,100,10,0,0,0,
+ 42,42,42,32,87,114,105,116,101,32,111,102,
+ 32,37,120,32,116,111,32,37,120,32,102,97,
+ 105,108,101,100,10,0,0,0,42,42,42,42,
+ 42,42,42,42,42,42,42,42,42,42,42,42,
+ 0,0,0,0,42,42,42,32,73,108,108,101,
+ 103,97,108,32,99,111,109,109,97,110,100,32,
+ 39,37,115,39,10,0,0,0,45,45,45,32,
+ 99,111,109,109,97,110,100,115,32,109,44,116,
+ 44,101,44,69,44,97,44,120,44,108,44,115,
+ 44,112,32,99,97,110,32,98,101,32,112,114,
+ 101,102,105,120,101,100,32,119,105,116,104,32,
+ 97,32,114,101,112,101,97,116,32,99,111,117,
+ 110,116,0,0,108,32,120,112,111,114,116,32,
+ 114,112,111,114,116,32,91,108,101,110,93,32,
+ 32,32,32,32,32,32,32,32,32,76,111,111,
+ 112,98,97,99,107,32,116,101,115,116,32,102,
+ 114,111,109,32,120,112,111,114,116,32,40,49,
+ 45,54,41,32,116,111,32,114,112,111,114,116,
+ 32,40,49,45,54,41,0,0,105,32,32,32,
+ 32,32,32,32,32,32,73,110,116,101,114,114,
+ 117,112,116,32,72,111,115,116,32,32,124,32,
+ 32,115,32,91,112,111,114,116,93,32,91,108,
+ 101,110,93,32,66,97,99,107,50,98,97,99,
+ 107,32,120,109,105,116,32,99,110,116,32,112,
+ 97,99,107,101,116,115,0,0,80,32,32,32,
+ 32,32,32,32,32,32,84,101,115,116,32,80,
+ 76,88,32,57,48,54,48,32,32,32,124,32,
+ 32,100,32,91,114,124,119,124,108,124,116,93,
+ 32,91,118,97,108,124,39,99,39,93,32,32,
+ 82,101,97,100,47,87,114,105,116,101,47,76,
+ 111,111,112,47,84,105,109,101,32,68,77,65,
+ 0,0,0,0,76,32,32,32,32,32,32,32,
+ 32,32,82,101,97,100,32,76,105,110,107,32,
+ 76,69,68,115,32,32,124,32,32,112,32,114,
+ 101,103,110,111,32,91,118,97,108,93,32,32,
+ 82,101,97,100,47,91,119,114,105,116,101,93,
+ 32,80,76,88,32,114,101,103,105,115,116,101,
+ 114,0,0,0,65,32,97,100,100,114,32,32,
+ 32,32,83,101,116,32,101,116,104,101,114,32,
+ 97,100,100,114,32,32,124,32,32,36,32,115,
+ 99,114,105,112,116,32,32,32,32,32,32,32,
+ 82,101,97,100,32,99,109,100,115,32,102,114,
+ 111,109,32,102,105,108,101,32,39,115,99,114,
+ 105,112,116,39,0,0,0,0,120,32,91,112,
+ 111,114,116,93,32,32,84,120,32,101,116,104,
+ 101,114,32,32,32,32,32,32,32,32,124,32,
+ 32,82,32,91,112,111,114,116,93,32,32,32,
+ 32,32,32,32,82,120,32,101,116,104,101,114,
+ 32,91,111,110,32,112,111,114,116,32,49,45,
+ 54,93,0,0,72,32,32,32,32,32,32,32,
+ 32,32,84,111,103,103,108,101,32,70,67,67,
+ 32,116,101,115,116,32,124,32,32,113,44,94,
+ 68,44,94,90,32,32,32,32,32,32,32,32,
+ 81,117,105,116,0,0,0,0,97,32,32,32,
+ 32,32,32,32,32,32,84,101,115,116,32,97,
+ 108,108,32,32,32,32,32,32,32,32,124,32,
+ 32,90,32,109,115,101,99,115,32,32,32,32,
+ 32,32,32,32,80,97,117,115,101,32,102,111,
+ 114,32,97,32,119,104,105,108,101,0,0,0,
+ 69,32,32,32,32,32,32,32,32,32,84,101,
+ 115,116,32,69,69,80,82,79,77,32,32,32,
+ 32,32,124,32,32,83,114,32,97,100,100,114,
+ 59,32,83,119,32,97,100,100,114,32,118,97,
+ 108,59,32,32,82,101,97,100,47,87,114,105,
+ 116,101,32,80,76,88,32,69,50,32,114,101,
+ 103,0,0,0,101,32,91,112,111,114,116,93,
+ 32,32,84,101,115,116,32,101,116,104,101,114,
+ 110,101,116,32,32,32,124,32,32,69,114,32,
+ 97,100,100,114,59,32,69,119,32,97,100,100,
+ 114,32,118,97,108,59,32,32,82,101,97,100,
+ 47,87,114,105,116,101,32,69,69,80,82,79,
+ 77,32,114,101,103,0,0,0,116,32,32,32,
+ 32,32,32,32,32,32,84,101,115,116,32,116,
+ 105,109,101,114,115,32,32,32,32,32,124,32,
+ 32,119,91,42,93,32,97,100,100,114,32,118,
+ 97,108,32,32,87,114,105,116,101,32,109,101,
+ 109,111,114,121,58,32,119,98,32,119,104,44,
+ 32,119,119,44,32,119,116,0,109,32,32,32,
+ 32,32,32,32,32,32,84,101,115,116,32,109,
+ 101,109,111,114,121,32,32,32,32,32,124,32,
+ 32,114,91,42,93,32,97,100,100,114,32,32,
+ 32,32,32,32,82,101,97,100,32,109,101,109,
+ 111,114,121,58,32,114,98,32,114,104,44,32,
+ 114,119,44,32,114,116,0,0,42,42,42,32,
+ 82,105,103,104,116,83,119,105,116,99,104,32,
+ 68,105,97,103,110,111,115,116,105,99,115,32,
+ 109,101,110,117,32,42,42,42,0,0,0,0,
+ 45,45,45,32,84,104,114,101,101,32,99,111,
+ 112,105,101,115,32,111,102,32,97,100,100,114,
+ 101,115,115,32,100,111,32,110,111,116,32,97,
+ 103,114,101,101,33,10,0,0,45,45,45,32,
+ 69,116,104,101,114,32,65,100,100,114,101,115,
+ 115,32,78,111,116,32,83,101,116,33,10,0,
+ 45,45,45,32,69,116,104,101,114,32,65,100,
+ 100,114,101,115,115,32,105,115,32,97,32,109,
+ 117,108,116,105,99,97,115,116,32,97,100,100,
+ 114,101,115,115,33,10,0,0,42,42,42,32,
+ 37,48,50,88,37,48,50,88,37,48,50,88,
+ 58,37,48,50,88,58,37,48,50,88,37,48,
+ 50,88,10,0,45,45,45,32,70,105,114,115,
+ 116,32,98,121,116,101,32,40,37,48,50,88,
+ 41,32,105,115,32,97,32,98,114,111,97,100,
+ 99,97,115,116,32,97,100,100,114,101,115,115,
+ 10,0,0,0,45,45,45,32,76,97,115,116,
+ 32,100,105,103,105,116,32,109,117,115,116,32,
+ 98,101,32,48,32,111,114,32,56,10,0,0,
+ 42,42,42,32,69,105,103,104,116,32,101,116,
+ 104,101,114,110,101,116,32,97,100,100,114,101,
+ 115,115,101,115,32,104,97,118,101,32,98,101,
+ 101,110,32,117,115,101,100,58,10,0,0,0,
+ 42,42,42,32,80,111,114,116,32,37,100,32,
+ 101,116,104,101,114,110,101,116,32,97,100,100,
+ 114,101,115,115,32,105,115,32,0,0,0,0,
+ 45,45,45,32,66,97,100,32,101,116,104,101,
+ 114,32,97,100,100,114,101,115,115,32,39,37,
+ 115,39,32,115,112,101,99,105,102,105,101,100,
+ 10,0,0,0,0,0,0,0,244,101,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,244,101,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,64,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,28,111,0,131,104,112,0,131,
+ 104,112,0,131,132,111,0,131,152,109,0,131,
+ 104,112,0,131,104,112,0,131,244,101,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 24,107,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,76,108,0,131,104,112,0,131,
+ 228,108,0,131,112,110,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 28,109,0,131,104,112,0,131,48,111,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 104,112,0,131,104,112,0,131,104,112,0,131,
+ 84,109,0,131,104,112,0,131,104,112,0,131,
+ 32,112,0,131,136,106,0,131,28,106,0,131,
+ 104,112,0,131,104,112,0,131,52,107,0,131,
+ 104,112,0,131,104,112,0,131,252,106,0,131,
+ 88,111,0,131,104,112,0,131,104,112,0,131,
+ 196,107,0,131,104,112,0,131,180,104,0,131,
+ 168,106,0,131,92,106,0,131,104,112,0,131,
+ 104,112,0,131,148,105,0,131,192,106,0,131,
+ 0,0,0,0,188,111,0,131,204,111,0,131,
+ 12,112,0,131,12,112,0,131,12,112,0,131,
+ 12,112,0,131,12,112,0,131,12,112,0,131,
+ 12,112,0,131,12,112,0,131,12,112,0,131,
+ 12,112,0,131,252,111,0,131,12,112,0,131,
+ 12,112,0,131,12,112,0,131,12,112,0,131,
+ 236,111,0,131,12,112,0,131,12,112,0,131,
+ 12,112,0,131,12,112,0,131,220,111,0,131,
+ 252,111,0,131,0,0,0,0,0,0,0,0,
+ 42,42,42,32,69,69,80,82,79,77,32,80,
+ 97,115,115,101,100,10,0,0,33,33,33,32,
+ 69,69,80,82,79,77,32,70,97,105,108,117,
+ 114,101,58,32,87,114,111,116,101,32,37,48,
+ 52,120,32,97,116,32,37,100,44,32,103,111,
+ 116,32,37,48,52,120,10,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,35,35,35,70,
+ 114,97,109,101,32,37,100,32,100,105,100,32,
+ 110,111,116,32,97,114,114,105,118,101,10,0,
+ 35,35,35,32,70,114,97,109,101,32,37,100,
+ 44,32,108,101,110,32,37,100,44,32,98,121,
+ 116,101,32,37,100,44,32,119,97,110,116,32,
+ 37,120,32,103,111,116,32,37,120,10,0,0,
+ 35,35,35,70,114,97,109,101,32,37,100,32,
+ 119,114,111,110,103,32,108,101,110,103,116,104,
+ 32,40,119,97,110,116,32,37,100,32,103,111,
+ 116,32,37,100,41,10,0,0,35,35,35,70,
+ 114,97,109,101,32,37,100,58,32,103,111,116,
+ 32,115,101,113,32,37,100,10,0,0,0,0,
+ 35,35,35,32,37,100,32,67,82,67,32,101,
+ 114,114,111,114,115,32,111,99,99,117,114,101,
+ 100,10,0,0,35,35,35,32,37,100,32,65,
+ 108,105,103,110,32,101,114,114,111,114,115,32,
+ 111,99,99,117,114,101,100,10,0,0,0,0,
+ 35,35,35,32,37,100,32,83,104,111,114,116,
+ 32,101,114,114,111,114,115,32,111,99,99,117,
+ 114,101,100,10,0,0,0,0,35,35,35,32,
+ 37,100,32,79,118,101,114,114,117,110,32,101,
+ 114,114,111,114,115,32,111,99,99,117,114,101,
+ 100,10,0,0,35,35,35,32,67,85,32,115,
+ 116,105,108,108,32,114,117,110,110,105,110,103,
+ 58,32,37,120,10,0,0,0,35,35,35,32,
+ 99,109,100,32,115,116,105,108,108,32,98,117,
+ 115,121,58,32,37,120,10,0,35,35,35,32,
+ 115,116,97,116,117,115,32,115,116,105,108,108,
+ 32,98,117,115,121,58,32,37,120,10,0,0,
+ 67,66,61,37,120,44,32,84,66,68,61,37,
+ 120,44,32,66,85,70,61,37,120,10,0,0,
+ 116,101,115,116,95,101,116,104,101,114,46,99,
+ 0,0,0,0,37,100,32,102,114,97,109,101,
+ 115,32,111,102,32,108,101,110,103,116,104,32,
+ 37,100,32,115,101,110,116,32,105,110,32,37,
+ 100,32,109,115,101,99,115,10,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 42,42,42,32,56,50,53,52,32,84,105,109,
+ 101,114,32,48,32,79,75,44,32,99,111,117,
+ 110,116,32,119,97,115,32,37,100,10,0,0,
+ 42,42,42,32,56,50,53,52,32,84,105,109,
+ 101,114,32,48,32,110,111,116,32,105,110,116,
+ 101,114,114,117,112,116,105,110,103,32,37,100,
+ 10,0,0,0,42,42,42,32,56,50,53,52,
+ 32,84,105,109,101,114,32,48,32,115,112,101,
+ 101,100,32,119,114,111,110,103,44,32,103,111,
+ 116,32,37,100,32,115,104,111,117,108,100,32,
+ 98,101,32,49,48,48,48,10,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 9,37,120,58,32,119,97,110,116,32,37,120,
+ 32,103,111,116,32,37,120,10,0,0,0,0,
+ 45,45,45,32,82,65,77,32,84,101,115,116,
+ 32,111,102,32,37,120,32,116,111,32,37,120,
+ 32,102,97,105,108,101,100,10,0,0,0,0,
+ 42,42,42,32,82,65,77,32,84,101,115,116,
+ 32,111,102,32,37,120,32,116,111,32,37,120,
+ 32,112,97,115,115,101,100,10,0,0,0,0,
+ 35,35,35,32,68,77,65,32,68,79,78,69,
+ 32,110,101,118,101,114,32,111,99,99,117,114,
+ 114,101,100,46,32,32,99,115,114,32,61,32,
+ 37,120,10,0,35,35,35,32,72,111,115,116,
+ 32,110,101,118,101,114,32,103,111,116,32,68,
+ 77,65,32,105,110,116,101,114,114,117,112,116,
+ 46,32,98,99,95,99,110,116,32,61,32,37,
+ 100,10,0,0,35,35,35,32,68,77,65,32,
+ 101,114,114,111,114,32,97,116,32,105,110,100,
+ 101,120,32,37,100,58,32,119,97,110,116,101,
+ 100,32,37,48,50,120,32,103,111,116,32,37,
+ 48,50,120,10,0,0,0,0,35,35,35,32,
+ 73,108,108,101,103,97,108,32,72,111,115,116,
+ 32,97,100,100,114,32,40,61,37,120,41,32,
+ 111,114,32,108,101,110,103,116,104,32,40,61,
+ 37,100,41,10,0,0,0,0,35,35,35,32,
+ 67,111,117,110,116,32,99,97,110,110,111,116,
+ 32,98,101,32,62,32,49,48,50,52,42,49,
+ 48,50,52,10,0,0,0,0,42,42,42,32,
+ 108,99,108,46,66,117,102,49,32,61,32,37,
+ 120,32,45,45,62,32,104,111,115,116,46,66,
+ 117,102,32,61,32,37,120,32,45,45,62,32,
+ 108,99,108,46,66,117,102,50,32,61,32,37,
+ 120,10,0,0,42,42,42,32,62,32,68,98,
+ 32,37,100,32,40,98,117,114,115,116,41,59,
+ 32,62,32,68,119,32,37,100,32,40,119,97,
+ 105,116,115,116,97,116,101,115,41,10,0,0,
+ 35,35,35,32,83,101,99,111,110,100,32,97,
+ 114,103,32,109,117,115,116,32,98,101,32,39,
+ 114,39,32,111,114,32,39,119,39,32,111,114,
+ 32,39,108,39,10,0,0,0,42,42,42,32,
+ 68,77,65,32,37,115,32,105,110,32,37,52,
+ 100,32,98,121,116,101,32,99,104,117,110,107,
+ 115,58,32,0,32,32,116,111,32,104,111,115,
+ 116,0,0,0,102,114,111,109,32,104,111,115,
+ 116,0,0,0,37,56,100,32,98,121,116,101,
+ 115,47,115,101,99,46,10,0,116,105,109,101,
+ 32,116,111,111,32,115,104,111,114,116,32,116,
+ 111,32,109,101,97,115,117,114,101,10,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 80,37,100,45,62,37,115,32,10,0,0,0,
+ 116,114,97,110,115,109,105,116,32,112,101,110,
+ 100,105,110,103,32,111,110,32,37,100,10,0,
+ 116,114,97,110,115,109,105,116,32,99,111,110,
+ 102,105,103,32,111,110,32,37,100,10,0,0,
+ 116,114,97,110,115,109,105,116,32,116,99,110,
+ 10,0,0,0,116,99,110,32,101,120,112,10,
+ 0,0,0,0,102,111,114,119,97,114,100,95,
+ 100,101,108,97,121,32,101,120,112,32,37,100,
+ 10,0,0,0,109,101,115,115,97,103,101,95,
+ 97,103,101,32,101,120,112,32,37,100,10,0,
+ 104,111,108,100,32,101,120,112,32,37,100,10,
+ 0,0,0,0,84,120,67,79,78,70,73,71,
+ 37,100,10,0,84,120,84,67,78,37,100,10,
+ 0,0,0,0,114,99,118,32,99,111,110,102,
+ 105,103,32,111,110,32,37,100,10,0,0,0,
+ 90,69,82,79,32,114,111,111,116,33,32,97,
+ 116,32,37,120,32,0,0,0,115,117,112,101,
+ 114,99,101,100,101,115,32,37,100,10,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 65,82,80,82,69,81,32,37,120,33,10,0,
+ 65,82,80,82,69,80,32,37,120,33,10,0,
+ 83,101,110,100,32,85,68,80,32,37,100,10,
+ 0,0,0,0,78,111,32,82,66,68,39,115,
+ 32,105,110,32,85,68,80,32,40,37,100,32,
+ 37,100,41,10,0,0,0,0,83,101,110,116,
+ 32,85,68,80,32,37,100,10,0,0,0,0,
+ 83,78,77,80,32,39,37,99,39,32,108,101,
+ 110,32,37,100,10,0,0,0,69,110,118,111,
+ 121,32,114,99,61,37,100,10,0,0,0,0,
+ 71,101,110,32,116,114,97,112,32,37,100,32,
+ 114,99,61,37,100,10,0,0,66,97,100,32,
+ 85,68,80,32,99,104,101,99,107,115,117,109,
+ 32,37,120,32,108,101,110,32,37,100,10,0,
+ 66,97,100,32,85,68,80,32,108,101,110,103,
+ 116,104,32,119,97,110,116,32,37,100,32,103,
+ 111,116,32,37,100,10,0,0,66,97,100,32,
+ 73,67,77,80,32,99,104,101,99,107,115,117,
+ 109,10,0,0,78,111,32,82,66,68,39,115,
+ 32,105,110,32,73,67,77,80,10,0,0,0,
+ 66,97,100,32,73,80,32,99,104,101,99,107,
+ 115,117,109,10,0,0,0,0,84,114,117,110,
+ 99,97,116,101,100,32,73,80,10,0,0,0,
+ 83,69,78,84,32,73,80,88,33,10,0,0,
+ 110,111,32,115,121,115,78,97,109,101,0,0,
+ 114,105,103,104,116,115,119,105,116,99,104,45,
+ 0,0,0,0,78,111,32,82,66,68,39,115,
+ 32,105,110,32,115,101,110,100,95,115,97,112,
+ 10,0,0,0,78,111,32,82,66,68,39,115,
+ 32,105,110,32,73,80,88,10,0,0,0,0,
+ 84,114,117,110,99,97,116,101,100,32,73,80,
+ 88,10,0,0,0,0,0,0,0,0,0,0,
+ 77,97,108,108,111,99,32,114,101,116,117,114,
+ 110,115,32,78,85,76,76,33,0,0,0,0,
+ 0,0,0,0,0,0,0,0,68,105,103,105,
+ 32,73,110,116,108,46,32,82,105,103,104,116,
+ 83,119,105,116,99,104,32,83,69,45,88,0,
+ 73,110,116,101,108,32,56,50,53,57,54,0,
+ 0,0,0,0,0,0,0,0,8,206,0,131,
+ 36,206,0,131,92,206,0,131,112,206,0,131,
+ 132,206,0,131,220,206,0,131,4,207,0,131,
+ 4,207,0,131,36,207,0,131,52,207,0,131,
+ 80,207,0,131,104,207,0,131,132,207,0,131,
+ 160,207,0,131,188,207,0,131,188,207,0,131,
+ 216,207,0,131,240,207,0,131,12,208,0,131,
+ 40,208,0,131,72,208,0,131,112,208,0,131,
+ 180,210,0,131,232,210,0,131,4,211,0,131,
+ 36,211,0,131,60,211,0,131,0,0,0,0,
+ 64,213,0,131,92,213,0,131,124,213,0,131,
+ 160,213,0,131,60,214,0,131,60,214,0,131,
+ 60,214,0,131,188,213,0,131,216,213,0,131,
+ 244,213,0,131,28,214,0,131,168,214,0,131,
+ 60,214,0,131,168,214,0,131,168,214,0,131,
+ 88,214,0,131,132,214,0,131,0,0,0,0,
+ 36,216,0,131,68,216,0,131,104,216,0,131,
+ 140,216,0,131,140,216,0,131,0,0,0,0,
+ 248,217,0,131,12,218,0,131,40,218,0,131,
+ 76,218,0,131,124,218,0,131,152,218,0,131,
+ 200,218,0,131,228,218,0,131,88,219,0,131,
+ 116,219,0,131,64,224,0,131,92,224,0,131,
+ 124,224,0,131,152,224,0,131,184,224,0,131,
+ 0,0,0,0,110,111,32,115,121,115,67,111,
+ 110,116,97,99,116,0,0,0,110,111,32,115,
+ 121,115,78,97,109,101,0,0,110,111,32,115,
+ 121,115,76,111,99,97,116,105,111,110,0,0,
+ 37,115,58,37,100,58,32,102,97,105,108,101,
+ 100,32,97,115,115,101,114,116,105,111,110,32,
+ 96,37,115,39,10,0,0,0,110,117,109,114,
+ 101,103,115,32,60,61,32,78,86,82,65,77,
+ 95,78,82,69,71,83,32,38,38,32,110,117,
+ 109,114,101,103,115,32,62,32,48,0,0,0,
+ 102,105,114,115,116,114,101,103,32,60,32,78,
+ 86,82,65,77,95,78,82,69,71,83,32,38,
+ 38,32,102,105,114,115,116,114,101,103,32,62,
+ 61,32,48,0,0,0,0,0,10,13,69,82,
+ 82,79,82,32,45,0,0,0,0,0,0,0,
+ 192,244,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,192,251,0,131,
+ 8,252,0,131,8,252,0,131,200,251,0,131,
+ 212,251,0,131,212,251,0,131,212,251,0,131,
+ 212,251,0,131,212,251,0,131,212,251,0,131,
+ 212,251,0,131,212,251,0,131,212,251,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,200,244,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 212,249,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,20,245,0,131,8,245,0,131,
+ 84,247,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 8,252,0,131,8,252,0,131,244,251,0,131,
+ 8,252,0,131,8,252,0,131,48,246,0,131,
+ 8,252,0,131,8,252,0,131,8,252,0,131,
+ 240,250,0,131,8,252,0,131,132,248,0,131,
+ 8,252,0,131,8,252,0,131,160,249,0,131,
+ 72,46,1,131,228,47,1,131,152,46,1,131,
+ 132,47,1,131,0,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,228,47,1,131,
+ 228,47,1,131,228,47,1,131,144,47,1,131,
+ 108,46,1,131,108,46,1,131,108,46,1,131,
+ 152,46,1,131,152,46,1,131,228,47,1,131,
+ 108,46,1,131,20,50,1,131,216,50,1,131,
+ 56,50,1,131,224,50,1,131,104,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 216,50,1,131,216,50,1,131,216,50,1,131,
+ 144,50,1,131,20,50,1,131,20,50,1,131,
+ 20,50,1,131,56,50,1,131,56,50,1,131,
+ 216,50,1,131,20,50,1,131,124,53,1,131,
+ 96,53,1,131,96,53,1,131,96,53,1,131,
+ 96,53,1,131,96,53,1,131,96,53,1,131,
+ 96,53,1,131,96,53,1,131,96,53,1,131,
+ 96,53,1,131,96,53,1,131,96,53,1,131,
+ 96,53,1,131,88,53,1,131,64,53,1,131,
+ 80,53,1,131,72,53,1,131,64,53,1,131,
+ 0,0,0,0,28,83,1,131,36,83,1,131,
+ 36,83,1,131,36,83,1,131,36,83,1,131,
+ 28,83,1,131,44,83,1,131,44,83,1,131,
+ 44,83,1,131,44,83,1,131,44,83,1,131,
+ 28,83,1,131,44,83,1,131,0,0,0,0,
+ 196,88,1,131,232,88,1,131,208,88,1,131,
+ 220,88,1,131,244,88,1,131,0,0,0,0,
+ 4,0,0,0,5,0,0,0,6,0,0,0,
+ 7,0,0,0,2,0,0,0,3,0,0,0,
+ 0,0,0,0,1,0,0,0,72,30,0,131,
+ 72,30,0,131,216,44,0,131,0,30,0,131,
+ 108,30,0,131,108,30,0,131,108,30,0,131,
+ 108,30,0,131,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,20,137,1,131,
+ 204,136,1,131,132,136,1,131,56,136,1,131,
+ 236,135,1,131,172,135,1,131,120,135,1,131,
+ 52,135,1,131,232,134,1,131,160,134,1,131,
+ 80,134,1,131,8,134,1,131,188,133,1,131,
+ 120,133,1,131,0,0,0,0,0,0,0,0,
+ 72,72,72,72,72,72,72,72,72,72,72,72,
+ 72,72,72,72,72,72,72,72,72,72,72,72,
+ 72,72,72,72,72,72,72,72,72,72,72,72,
+ 72,72,72,72,72,72,72,72,72,72,72,72,
+ 72,72,72,72,72,72,72,72,72,72,72,72,
+ 72,72,72,72,72,72,72,72,72,72,72,72,
+ 72,72,72,72,72,0,0,0,0,255,85,170,
+ 0,0,0,0,4,0,8,0,16,0,32,0,
+ 64,0,0,1,0,8,0,0,0,0,0,0,
+ 0,0,0,0,0,4,3,2,1,0,0,0,
+ 7,0,0,0,1,0,1,0,1,0,2,0,
+ 20,0,15,0,1,0,0,128,128,0,0,0,
+ 100,0,0,0,96,207,1,131,92,207,1,131,
+ 88,207,1,131,84,207,1,131,80,207,1,131,
+ 0,0,0,0,0,0,0,0,48,49,50,51,
+ 52,53,54,55,56,57,65,66,67,68,69,70,
+ 0,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,64,204,0,131,156,202,0,131,
+ 96,148,1,131,1,0,4,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 200,155,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,124,204,0,131,156,202,0,131,
+ 168,210,1,131,1,0,6,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 4,156,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,132,204,0,131,156,202,0,131,
+ 4,1,0,163,1,0,67,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 64,156,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,168,204,0,131,248,204,0,131,
+ 80,18,3,131,1,0,4,3,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 124,156,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,168,204,0,131,32,205,0,131,
+ 96,18,3,131,1,0,4,3,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 184,156,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,168,204,0,131,72,205,0,131,
+ 112,18,3,131,1,0,4,3,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 244,156,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,112,205,0,131,156,202,0,131,
+ 2,0,0,0,1,0,2,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 48,157,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,0,0,
+ 220,155,1,131,2,0,0,0,24,156,1,131,
+ 3,0,0,0,84,156,1,131,4,0,0,0,
+ 144,156,1,131,5,0,0,0,204,156,1,131,
+ 6,0,0,0,8,157,1,131,7,0,0,0,
+ 68,157,1,131,0,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,120,205,0,131,
+ 156,202,0,131,48,211,1,131,1,0,2,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,172,157,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,1,180,208,0,131,132,205,0,131,
+ 164,202,0,131,76,209,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,4,1,180,208,0,131,
+ 132,205,0,131,164,202,0,131,76,209,0,131,
+ 124,148,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,2,1,
+ 180,208,0,131,132,205,0,131,164,202,0,131,
+ 76,209,0,131,6,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,1,180,208,0,131,132,205,0,131,
+ 164,202,0,131,76,209,0,131,220,5,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,66,1,180,208,0,131,
+ 132,205,0,131,164,202,0,131,76,209,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,4,1,
+ 180,208,0,131,132,205,0,131,164,202,0,131,
+ 76,209,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,3,180,208,0,131,132,205,0,131,
+ 228,209,0,131,76,209,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,1,180,208,0,131,
+ 132,205,0,131,164,202,0,131,76,209,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,67,1,
+ 180,208,0,131,132,205,0,131,164,202,0,131,
+ 76,209,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,180,208,0,131,132,205,0,131,
+ 164,202,0,131,76,209,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,180,208,0,131,
+ 132,205,0,131,164,202,0,131,76,209,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 180,208,0,131,132,205,0,131,164,202,0,131,
+ 76,209,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,180,208,0,131,132,205,0,131,
+ 164,202,0,131,76,209,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,180,208,0,131,
+ 132,205,0,131,164,202,0,131,76,209,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 180,208,0,131,132,205,0,131,164,202,0,131,
+ 76,209,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,180,208,0,131,132,205,0,131,
+ 164,202,0,131,76,209,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,180,208,0,131,
+ 132,205,0,131,164,202,0,131,76,209,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 180,208,0,131,132,205,0,131,164,202,0,131,
+ 76,209,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,180,208,0,131,132,205,0,131,
+ 164,202,0,131,76,209,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,180,208,0,131,
+ 132,205,0,131,164,202,0,131,76,209,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,66,1,
+ 180,208,0,131,132,205,0,131,164,202,0,131,
+ 76,209,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,6,1,180,208,0,131,132,205,0,131,
+ 164,202,0,131,76,209,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,0,0,232,157,1,131,
+ 2,0,0,0,16,158,1,131,3,0,0,0,
+ 56,158,1,131,4,0,0,0,96,158,1,131,
+ 5,0,0,0,136,158,1,131,6,0,0,0,
+ 176,158,1,131,7,0,0,0,216,158,1,131,
+ 8,0,0,0,0,159,1,131,9,0,0,0,
+ 40,159,1,131,10,0,0,0,80,159,1,131,
+ 11,0,0,0,120,159,1,131,12,0,0,0,
+ 160,159,1,131,13,0,0,0,200,159,1,131,
+ 14,0,0,0,240,159,1,131,15,0,0,0,
+ 24,160,1,131,16,0,0,0,64,160,1,131,
+ 17,0,0,0,104,160,1,131,18,0,0,0,
+ 144,160,1,131,19,0,0,0,184,160,1,131,
+ 20,0,0,0,224,160,1,131,21,0,0,0,
+ 8,161,1,131,22,0,0,0,48,161,1,131,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 32,208,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,192,157,1,131,2,0,0,0,
+ 40,208,1,131,0,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,112,205,0,131,
+ 60,210,0,131,2,0,0,0,1,0,2,3,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,56,162,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,120,205,0,131,
+ 60,210,0,131,0,17,3,131,1,0,2,3,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,116,162,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,4,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,176,162,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,8,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,236,162,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,12,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,40,163,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,16,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,100,163,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,20,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,160,163,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,24,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,220,163,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,28,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,24,164,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,32,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,84,164,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,36,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,144,164,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,40,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,204,164,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,120,205,0,131,
+ 156,202,0,131,44,17,3,131,1,0,2,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,8,165,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,48,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,68,165,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,52,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,128,165,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,56,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,188,165,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,60,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,248,165,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,64,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,52,166,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,68,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,112,166,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,64,1,44,202,0,131,88,210,0,131,
+ 164,202,0,131,120,211,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,1,44,202,0,131,
+ 88,210,0,131,164,202,0,131,120,211,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,64,1,
+ 44,202,0,131,88,210,0,131,164,202,0,131,
+ 120,211,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,1,44,202,0,131,88,210,0,131,
+ 164,202,0,131,120,211,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,1,44,202,0,131,
+ 88,210,0,131,164,202,0,131,120,211,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,0,0,
+ 172,166,1,131,2,0,0,0,212,166,1,131,
+ 3,0,0,0,252,166,1,131,4,0,0,0,
+ 36,167,1,131,5,0,0,0,76,167,1,131,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 56,208,1,131,0,0,0,0,0,0,0,0,
+ 1,0,64,3,84,212,0,131,16,212,0,131,
+ 52,212,0,131,172,212,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,3,84,212,0,131,
+ 16,212,0,131,52,212,0,131,172,212,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,2,3,
+ 84,212,0,131,16,212,0,131,52,212,0,131,
+ 172,212,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,3,84,212,0,131,16,212,0,131,
+ 52,212,0,131,172,212,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,3,84,212,0,131,
+ 16,212,0,131,52,212,0,131,172,212,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,2,3,
+ 84,212,0,131,16,212,0,131,52,212,0,131,
+ 172,212,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,64,3,84,212,0,131,16,212,0,131,
+ 52,212,0,131,172,212,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,3,84,212,0,131,
+ 16,212,0,131,52,212,0,131,172,212,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,2,1,
+ 84,212,0,131,16,212,0,131,164,202,0,131,
+ 172,212,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,3,84,212,0,131,16,212,0,131,
+ 52,212,0,131,172,212,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,64,3,84,212,0,131,
+ 16,212,0,131,52,212,0,131,172,212,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,2,3,
+ 84,212,0,131,16,212,0,131,52,212,0,131,
+ 172,212,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,6,1,84,212,0,131,16,212,0,131,
+ 164,202,0,131,172,212,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,0,0,180,167,1,131,
+ 2,0,0,0,220,167,1,131,3,0,0,0,
+ 4,168,1,131,4,0,0,0,44,168,1,131,
+ 5,0,0,0,84,168,1,131,6,0,0,0,
+ 124,168,1,131,7,0,0,0,164,168,1,131,
+ 8,0,0,0,204,168,1,131,9,0,0,0,
+ 244,168,1,131,10,0,0,0,28,169,1,131,
+ 11,0,0,0,68,169,1,131,12,0,0,0,
+ 108,169,1,131,13,0,0,0,148,169,1,131,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 72,208,1,131,0,0,0,0,0,0,0,0,
+ 1,0,2,3,84,212,0,131,16,212,0,131,
+ 52,212,0,131,172,212,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,4,3,84,212,0,131,
+ 16,212,0,131,52,212,0,131,172,212,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,64,3,
+ 84,212,0,131,16,212,0,131,52,212,0,131,
+ 172,212,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,3,84,212,0,131,16,212,0,131,
+ 52,212,0,131,172,212,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,0,0,60,170,1,131,
+ 2,0,0,0,100,170,1,131,3,0,0,0,
+ 140,170,1,131,4,0,0,0,180,170,1,131,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 88,208,1,131,0,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,72,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,20,171,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,0,0,76,162,1,131,2,0,0,0,
+ 136,162,1,131,3,0,0,0,196,162,1,131,
+ 4,0,0,0,0,163,1,131,5,0,0,0,
+ 60,163,1,131,6,0,0,0,120,163,1,131,
+ 7,0,0,0,180,163,1,131,8,0,0,0,
+ 240,163,1,131,9,0,0,0,44,164,1,131,
+ 10,0,0,0,104,164,1,131,11,0,0,0,
+ 164,164,1,131,12,0,0,0,224,164,1,131,
+ 13,0,0,0,28,165,1,131,14,0,0,0,
+ 88,165,1,131,15,0,0,0,148,165,1,131,
+ 16,0,0,0,208,165,1,131,17,0,0,0,
+ 12,166,1,131,18,0,0,0,72,166,1,131,
+ 19,0,0,0,132,166,1,131,20,0,0,0,
+ 64,208,1,131,21,0,0,0,80,208,1,131,
+ 22,0,0,0,96,208,1,131,23,0,0,0,
+ 40,171,1,131,0,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,144,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,16,172,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,148,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,76,172,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,152,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,136,172,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,156,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,196,172,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,160,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,0,173,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,164,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,60,173,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,168,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,120,173,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,172,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,180,173,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,176,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,240,173,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,180,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,44,174,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,184,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,104,174,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,188,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,164,174,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,192,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,224,174,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,196,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,28,175,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,200,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,88,175,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,204,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,148,175,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,208,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,208,175,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,212,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,12,176,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,216,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,72,176,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,220,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,132,176,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,224,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,192,176,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,228,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,252,176,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,232,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,56,177,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,236,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,116,177,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,240,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,176,177,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,244,16,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,236,177,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,0,0,36,172,1,131,2,0,0,0,
+ 96,172,1,131,3,0,0,0,156,172,1,131,
+ 4,0,0,0,216,172,1,131,5,0,0,0,
+ 20,173,1,131,6,0,0,0,80,173,1,131,
+ 7,0,0,0,140,173,1,131,8,0,0,0,
+ 200,173,1,131,9,0,0,0,4,174,1,131,
+ 10,0,0,0,64,174,1,131,11,0,0,0,
+ 124,174,1,131,12,0,0,0,184,174,1,131,
+ 13,0,0,0,244,174,1,131,14,0,0,0,
+ 48,175,1,131,15,0,0,0,108,175,1,131,
+ 16,0,0,0,168,175,1,131,17,0,0,0,
+ 228,175,1,131,18,0,0,0,32,176,1,131,
+ 19,0,0,0,92,176,1,131,20,0,0,0,
+ 152,176,1,131,21,0,0,0,212,176,1,131,
+ 22,0,0,0,16,177,1,131,23,0,0,0,
+ 76,177,1,131,24,0,0,0,136,177,1,131,
+ 25,0,0,0,196,177,1,131,26,0,0,0,
+ 0,178,1,131,0,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,112,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,0,179,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,116,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,60,179,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,120,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,120,179,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,76,210,0,131,
+ 156,202,0,131,124,17,3,131,1,0,65,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,180,179,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,200,212,0,131,
+ 156,202,0,131,220,5,0,163,1,0,64,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,240,179,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,112,205,0,131,
+ 156,202,0,131,161,0,0,0,1,0,2,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,44,180,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,0,0,4,180,1,131,2,0,0,0,
+ 64,180,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,120,208,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,20,179,1,131,
+ 2,0,0,0,80,179,1,131,3,0,0,0,
+ 140,179,1,131,4,0,0,0,200,179,1,131,
+ 5,0,0,0,128,208,1,131,0,0,0,0,
+ 0,0,0,0,1,0,2,1,44,202,0,131,
+ 208,212,0,131,164,202,0,131,196,214,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 44,202,0,131,208,212,0,131,164,202,0,131,
+ 196,214,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,44,202,0,131,208,212,0,131,
+ 164,202,0,131,196,214,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,44,202,0,131,
+ 208,212,0,131,164,202,0,131,196,214,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 44,202,0,131,208,212,0,131,164,202,0,131,
+ 196,214,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,44,202,0,131,208,212,0,131,
+ 164,202,0,131,196,214,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,44,202,0,131,
+ 208,212,0,131,164,202,0,131,196,214,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 44,202,0,131,208,212,0,131,164,202,0,131,
+ 196,214,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,44,202,0,131,208,212,0,131,
+ 164,202,0,131,196,214,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,44,202,0,131,
+ 208,212,0,131,164,202,0,131,196,214,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 44,202,0,131,208,212,0,131,164,202,0,131,
+ 196,214,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,44,202,0,131,208,212,0,131,
+ 164,202,0,131,196,214,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,44,202,0,131,
+ 208,212,0,131,164,202,0,131,196,214,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,6,1,
+ 44,202,0,131,208,212,0,131,164,202,0,131,
+ 196,214,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,0,0,192,180,1,131,2,0,0,0,
+ 232,180,1,131,3,0,0,0,16,181,1,131,
+ 4,0,0,0,56,181,1,131,5,0,0,0,
+ 96,181,1,131,6,0,0,0,136,181,1,131,
+ 7,0,0,0,176,181,1,131,8,0,0,0,
+ 216,181,1,131,9,0,0,0,0,182,1,131,
+ 10,0,0,0,40,182,1,131,11,0,0,0,
+ 80,182,1,131,13,0,0,0,120,182,1,131,
+ 16,0,0,0,160,182,1,131,17,0,0,0,
+ 200,182,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,144,208,1,131,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 160,208,1,131,2,0,0,0,168,208,1,131,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,184,208,1,131,2,0,0,0,
+ 192,208,1,131,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,208,208,1,131,2,0,0,0,
+ 216,208,1,131,3,0,0,0,224,208,1,131,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,240,208,1,131,2,0,0,0,
+ 248,208,1,131,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 8,209,1,131,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,1,0,0,0,24,209,1,131,
+ 2,0,0,0,32,209,1,131,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,48,209,1,131,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,64,209,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,232,208,1,131,
+ 2,0,0,0,0,209,1,131,3,0,0,0,
+ 16,209,1,131,4,0,0,0,40,209,1,131,
+ 5,0,0,0,56,209,1,131,6,0,0,0,
+ 72,209,1,131,0,0,0,0,0,0,0,0,
+ 2,0,0,0,152,208,1,131,6,0,0,0,
+ 176,208,1,131,7,0,0,0,200,208,1,131,
+ 8,0,0,0,80,209,1,131,0,0,0,0,
+ 0,0,0,0,7,0,0,0,88,209,1,131,
+ 0,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 128,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 8,185,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 144,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 68,185,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 148,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 128,185,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 132,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 188,185,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 136,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 248,185,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 140,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 52,186,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 156,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 112,186,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 160,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 172,186,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 164,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 232,186,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 168,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 36,187,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 172,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 96,187,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 176,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 156,187,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 180,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 216,187,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 184,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 20,188,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 188,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 80,188,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 192,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 140,188,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 196,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 200,188,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 200,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 4,189,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 204,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 64,189,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 208,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 124,189,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 212,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 184,189,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 220,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 244,189,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 224,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 48,190,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 228,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 108,190,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 232,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 168,190,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 236,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 228,190,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,92,215,0,131,156,202,0,131,
+ 240,17,3,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 32,191,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,104,215,0,131,140,215,0,131,
+ 0,0,0,0,1,0,2,3,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 92,191,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,0,0,
+ 28,185,1,131,2,0,0,0,88,185,1,131,
+ 3,0,0,0,148,185,1,131,4,0,0,0,
+ 208,185,1,131,5,0,0,0,12,186,1,131,
+ 6,0,0,0,72,186,1,131,8,0,0,0,
+ 132,186,1,131,9,0,0,0,192,186,1,131,
+ 10,0,0,0,252,186,1,131,11,0,0,0,
+ 56,187,1,131,12,0,0,0,116,187,1,131,
+ 13,0,0,0,176,187,1,131,14,0,0,0,
+ 236,187,1,131,15,0,0,0,40,188,1,131,
+ 16,0,0,0,100,188,1,131,17,0,0,0,
+ 160,188,1,131,18,0,0,0,220,188,1,131,
+ 19,0,0,0,24,189,1,131,20,0,0,0,
+ 84,189,1,131,21,0,0,0,144,189,1,131,
+ 22,0,0,0,204,189,1,131,24,0,0,0,
+ 8,190,1,131,25,0,0,0,68,190,1,131,
+ 26,0,0,0,128,190,1,131,27,0,0,0,
+ 188,190,1,131,28,0,0,0,248,190,1,131,
+ 29,0,0,0,52,191,1,131,30,0,0,0,
+ 112,191,1,131,0,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,180,215,0,131,
+ 156,202,0,131,218,12,3,131,1,0,4,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,128,192,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,120,205,0,131,
+ 156,202,0,131,40,211,1,131,1,0,2,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,188,192,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 8,202,0,131,148,38,1,131,112,205,0,131,
+ 156,202,0,131,2,0,0,0,1,0,2,1,
+ 32,45,1,131,208,48,1,131,160,49,1,131,
+ 20,48,1,131,248,192,1,131,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,1,44,202,0,131,200,215,0,131,
+ 164,202,0,131,196,216,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,1,44,202,0,131,
+ 200,215,0,131,164,202,0,131,196,216,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,6,1,
+ 44,202,0,131,200,215,0,131,164,202,0,131,
+ 196,216,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,44,202,0,131,200,215,0,131,
+ 164,202,0,131,196,216,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,44,202,0,131,
+ 200,215,0,131,164,202,0,131,196,216,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,0,0,
+ 52,193,1,131,2,0,0,0,92,193,1,131,
+ 3,0,0,0,132,193,1,131,4,0,0,0,
+ 172,193,1,131,5,0,0,0,212,193,1,131,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 112,209,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,148,192,1,131,2,0,0,0,
+ 208,192,1,131,3,0,0,0,12,193,1,131,
+ 4,0,0,0,120,209,1,131,0,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 112,205,0,131,156,202,0,131,3,0,0,0,
+ 1,0,2,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,100,194,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,60,210,0,131,216,12,3,131,
+ 1,0,2,3,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,160,194,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 104,217,0,131,156,202,0,131,0,0,0,0,
+ 1,0,67,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,220,194,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 104,217,0,131,156,202,0,131,0,0,0,0,
+ 1,0,65,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,24,195,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 112,217,0,131,156,202,0,131,216,12,3,131,
+ 1,0,4,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,84,195,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 120,205,0,131,156,202,0,131,224,12,3,131,
+ 1,0,2,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,144,195,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 120,205,0,131,156,202,0,131,228,12,3,131,
+ 1,0,2,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,204,195,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,156,202,0,131,232,12,3,131,
+ 1,0,2,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,8,196,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,156,202,0,131,234,12,3,131,
+ 1,0,2,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,68,196,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,156,202,0,131,246,12,3,131,
+ 1,0,2,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,128,196,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,156,202,0,131,236,12,3,131,
+ 1,0,2,1,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,188,196,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,60,210,0,131,238,12,3,131,
+ 1,0,2,3,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,248,196,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,60,210,0,131,240,12,3,131,
+ 1,0,2,3,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,52,197,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,8,202,0,131,148,38,1,131,
+ 92,217,0,131,60,210,0,131,242,12,3,131,
+ 1,0,2,3,32,45,1,131,208,48,1,131,
+ 160,49,1,131,20,48,1,131,112,197,1,131,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,1,180,219,0,131,
+ 132,217,0,131,164,202,0,131,104,220,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,2,3,
+ 180,219,0,131,132,217,0,131,0,221,0,131,
+ 104,220,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,1,180,219,0,131,132,217,0,131,
+ 164,202,0,131,104,220,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,3,180,219,0,131,
+ 132,217,0,131,0,221,0,131,104,220,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,2,3,
+ 180,219,0,131,132,217,0,131,0,221,0,131,
+ 104,220,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,4,1,180,219,0,131,132,217,0,131,
+ 164,202,0,131,104,220,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,1,180,219,0,131,
+ 132,217,0,131,164,202,0,131,104,220,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,4,1,
+ 180,219,0,131,132,217,0,131,164,202,0,131,
+ 104,220,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,4,1,180,219,0,131,132,217,0,131,
+ 164,202,0,131,104,220,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,180,219,0,131,
+ 132,217,0,131,164,202,0,131,104,220,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,0,0,
+ 172,197,1,131,2,0,0,0,212,197,1,131,
+ 3,0,0,0,252,197,1,131,4,0,0,0,
+ 36,198,1,131,5,0,0,0,76,198,1,131,
+ 6,0,0,0,116,198,1,131,7,0,0,0,
+ 156,198,1,131,8,0,0,0,196,198,1,131,
+ 9,0,0,0,236,198,1,131,10,0,0,0,
+ 20,199,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,136,209,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,120,194,1,131,
+ 2,0,0,0,180,194,1,131,3,0,0,0,
+ 240,194,1,131,4,0,0,0,44,195,1,131,
+ 5,0,0,0,104,195,1,131,6,0,0,0,
+ 164,195,1,131,7,0,0,0,224,195,1,131,
+ 8,0,0,0,28,196,1,131,9,0,0,0,
+ 88,196,1,131,10,0,0,0,148,196,1,131,
+ 11,0,0,0,208,196,1,131,12,0,0,0,
+ 12,197,1,131,13,0,0,0,72,197,1,131,
+ 14,0,0,0,132,197,1,131,15,0,0,0,
+ 144,209,1,131,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,76,210,0,131,156,202,0,131,
+ 160,211,1,131,1,0,65,1,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 44,200,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,8,202,0,131,
+ 148,38,1,131,120,205,0,131,60,210,0,131,
+ 140,1,0,163,1,0,2,3,32,45,1,131,
+ 208,48,1,131,160,49,1,131,20,48,1,131,
+ 104,200,1,131,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,4,1,
+ 44,202,0,131,36,222,0,131,164,202,0,131,
+ 48,223,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,1,44,202,0,131,36,222,0,131,
+ 164,202,0,131,48,223,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,2,1,44,202,0,131,
+ 36,222,0,131,164,202,0,131,48,223,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,0,0,
+ 164,200,1,131,2,0,0,0,204,200,1,131,
+ 3,0,0,0,244,200,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,168,209,1,131,
+ 0,0,0,0,0,0,0,0,1,0,2,1,
+ 44,202,0,131,212,223,0,131,164,202,0,131,
+ 252,224,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,2,1,44,202,0,131,212,223,0,131,
+ 164,202,0,131,252,224,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,65,1,44,202,0,131,
+ 212,223,0,131,164,202,0,131,252,224,0,131,
+ 0,0,0,0,0,0,0,0,255,0,0,0,
+ 255,0,0,0,0,0,0,0,1,0,65,1,
+ 44,202,0,131,212,223,0,131,164,202,0,131,
+ 252,224,0,131,0,0,0,0,0,0,0,0,
+ 255,0,0,0,255,0,0,0,0,0,0,0,
+ 1,0,65,1,44,202,0,131,212,223,0,131,
+ 164,202,0,131,252,224,0,131,0,0,0,0,
+ 0,0,0,0,255,0,0,0,255,0,0,0,
+ 0,0,0,0,1,0,0,0,76,201,1,131,
+ 2,0,0,0,116,201,1,131,3,0,0,0,
+ 156,201,1,131,4,0,0,0,196,201,1,131,
+ 5,0,0,0,236,201,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,184,209,1,131,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 64,200,1,131,2,0,0,0,124,200,1,131,
+ 3,0,0,0,176,209,1,131,4,0,0,0,
+ 192,209,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,128,209,1,131,2,0,0,0,
+ 152,209,1,131,3,0,0,0,160,209,1,131,
+ 4,0,0,0,200,209,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,24,208,1,131,
+ 2,0,0,0,48,208,1,131,4,0,0,0,
+ 104,208,1,131,5,0,0,0,112,208,1,131,
+ 7,0,0,0,136,208,1,131,10,0,0,0,
+ 96,209,1,131,11,0,0,0,104,209,1,131,
+ 17,0,0,0,208,209,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,216,209,1,131,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,240,209,1,131,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,1,0,0,0,8,210,1,131,
+ 2,0,0,0,16,210,1,131,3,0,0,0,
+ 24,210,1,131,4,0,0,0,32,210,1,131,
+ 5,0,0,0,40,210,1,131,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 56,210,1,131,2,0,0,0,64,210,1,131,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 72,210,1,131,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 48,210,1,131,2,0,0,0,80,210,1,131,
+ 3,0,0,0,88,210,1,131,0,0,0,0,
+ 0,0,0,0,1,0,0,0,16,208,1,131,
+ 2,0,0,0,224,209,1,131,3,0,0,0,
+ 232,209,1,131,4,0,0,0,248,209,1,131,
+ 5,0,0,0,0,210,1,131,6,0,0,0,
+ 96,210,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,104,210,1,131,0,0,0,0,
+ 0,0,0,0,6,0,0,0,112,210,1,131,
+ 0,0,0,0,0,0,0,0,3,0,0,0,
+ 120,210,1,131,0,0,0,0,0,0,0,0,
+ 1,0,0,0,128,210,1,131,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,3,0,0,0,6,0,0,0,
+ 1,0,0,0,2,0,0,0,1,0,0,0,
+ 10,0,0,0,7,0,0,0,8,0,0,0,
+ 2,0,0,0,2,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,112,117,98,108,
+ 105,99,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,112,114,105,118,97,116,101,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 83,78,77,80,95,116,114,97,112,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 3,0,0,0,6,0,0,0,1,0,0,0,
+ 4,0,0,0,1,0,0,0,76,1,0,0,
+ 5,0,0,0,1,0,0,0,1,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 3,0,0,0,6,0,0,0,1,0,0,0,
+ 2,0,0,0,1,0,0,0,2,0,0,0,
+ 2,0,0,0,1,0,0,0,1,0,0,0,
+ 0,0,0,0,1,0,0,0,3,0,0,0,
+ 6,0,0,0,1,0,0,0,2,0,0,0,
+ 1,0,0,0,17,0,0,0,0,0,0,0,
+ 0,0,0,0,48,49,50,51,52,53,54,55,
+ 56,57,65,66,67,68,69,70,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 255,85,170,0,255,255,255,255,85,85,85,85,
+ 170,170,170,170,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,64,40,35,41,
+ 32,67,111,112,121,114,105,103,104,116,32,40,
+ 99,41,32,49,57,56,54,32,45,32,49,57,
+ 57,53,32,32,69,112,105,108,111,103,117,101,
+ 32,84,101,99,104,110,111,108,111,103,121,32,
+ 67,111,114,112,111,114,97,116,105,111,110,10,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,255,255,255,255,
+ 72,10,0,0,78,10,0,0,83,10,0,0,
+ 69,10,0,0,109,97,105,110,46,99,0,0,
+ 48,0,0,0,55,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 0,0,0,0,116,105,109,101,114,46,99,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 100,0,0,0,100,0,0,0,1,0,0,0,
+ 0,0,0,0,115,114,99,32,0,0,0,0,
+ 32,0,0,0,100,115,116,32,0,0,0,0,
+ 32,37,48,50,88,0,0,0,10,0,0,0,
+ 255,255,255,255,48,48,48,48,48,48,0,0,
+ 48,48,48,48,48,49,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,255,255,255,255,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 62,32,0,0,37,120,10,0,37,120,58,9,
+ 37,120,10,0,37,115,10,0,0,0,0,0,
+ 10,0,0,0,0,0,0,0,0,0,0,0,
+ 68,85,77,80,10,0,0,0,37,48,50,120,
+ 32,0,0,0,10,0,0,0,0,0,0,0,
+ 37,100,32,112,112,115,10,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,0,0,0,0,0,0,0,1,0,0,0,
+ 119,119,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,128,194,0,
+ 0,0,0,0,1,128,194,0,0,16,0,0,
+ 66,76,75,0,70,87,68,0,76,82,78,0,
+ 76,73,83,0,68,73,83,0,72,69,76,76,
+ 79,10,0,0,116,99,32,101,120,112,10,0,
+ 102,114,111,109,32,0,0,0,10,0,0,0,
+ 87,101,105,114,100,0,0,0,0,0,0,0,
+ 0,0,0,0,255,255,255,255,255,255,255,255,
+ 255,255,0,0,255,255,255,255,255,255,0,0,
+ 0,0,0,0,0,0,0,0,80,65,68,37,
+ 100,10,0,0,170,170,3,0,0,0,0,0,
+ 83,69,78,84,33,10,0,0,85,68,80,10,
+ 0,0,0,0,73,67,77,80,10,0,0,0,
+ 69,67,72,79,10,0,0,0,73,80,10,0,
+ 170,170,3,0,0,0,0,0,73,80,88,33,
+ 10,0,0,0,0,0,0,0,255,255,255,255,
+ 255,255,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,192,155,1,131,0,0,0,0,
+ 108,157,1,131,0,0,0,0,88,161,1,131,
+ 0,0,0,0,16,162,1,131,0,0,0,0,
+ 32,162,1,131,0,0,0,0,116,167,1,131,
+ 0,0,0,0,164,167,1,131,0,0,0,0,
+ 188,169,1,131,0,0,0,0,44,170,1,131,
+ 0,0,0,0,220,170,1,131,0,0,0,0,
+ 4,171,1,131,0,0,0,0,80,171,1,131,
+ 0,0,0,0,40,178,1,131,0,0,0,0,
+ 104,180,1,131,0,0,0,0,128,180,1,131,
+ 0,0,0,0,144,180,1,131,0,0,0,0,
+ 240,182,1,131,0,0,0,0,104,183,1,131,
+ 0,0,0,0,120,183,1,131,0,0,0,0,
+ 128,183,1,131,0,0,0,0,136,183,1,131,
+ 0,0,0,0,160,183,1,131,0,0,0,0,
+ 168,183,1,131,0,0,0,0,176,183,1,131,
+ 0,0,0,0,200,183,1,131,0,0,0,0,
+ 208,183,1,131,0,0,0,0,216,183,1,131,
+ 0,0,0,0,224,183,1,131,0,0,0,0,
+ 0,184,1,131,0,0,0,0,8,184,1,131,
+ 0,0,0,0,16,184,1,131,0,0,0,0,
+ 40,184,1,131,0,0,0,0,48,184,1,131,
+ 0,0,0,0,64,184,1,131,0,0,0,0,
+ 72,184,1,131,0,0,0,0,80,184,1,131,
+ 0,0,0,0,104,184,1,131,0,0,0,0,
+ 112,184,1,131,0,0,0,0,128,184,1,131,
+ 0,0,0,0,136,184,1,131,0,0,0,0,
+ 152,184,1,131,0,0,0,0,208,184,1,131,
+ 0,0,0,0,248,184,1,131,0,0,0,0,
+ 152,191,1,131,0,0,0,0,252,193,1,131,
+ 0,0,0,0,44,194,1,131,0,0,0,0,
+ 60,194,1,131,0,0,0,0,60,199,1,131,
+ 0,0,0,0,148,199,1,131,0,0,0,0,
+ 164,199,1,131,0,0,0,0,36,200,1,131,
+ 0,0,0,0,28,201,1,131,0,0,0,0,
+ 60,201,1,131,0,0,0,0,20,202,1,131,
+ 0,0,0,0,68,202,1,131,0,0,0,0,
+ 84,202,1,131,0,0,0,0,124,202,1,131,
+ 0,0,0,0,164,202,1,131,0,0,0,0,
+ 236,202,1,131,0,0,0,0,252,202,1,131,
+ 0,0,0,0,4,203,1,131,0,0,0,0,
+ 12,203,1,131,0,0,0,0,28,203,1,131,
+ 0,0,0,0,36,203,1,131,0,0,0,0,
+ 44,203,1,131,0,0,0,0,52,203,1,131,
+ 0,0,0,0,60,203,1,131,0,0,0,0,
+ 68,203,1,131,0,0,0,0,76,203,1,131,
+ 0,0,0,0,124,203,1,131,0,0,0,0,
+ 132,203,1,131,0,0,0,0,140,203,1,131,
+ 0,0,0,0,164,203,1,131,0,0,0,0,
+ 180,203,1,131,0,0,0,0,188,203,1,131,
+ 0,0,0,0,220,203,1,131,0,0,0,0,
+ 20,204,1,131,0,0,0,0,36,204,1,131,
+ 0,0,0,0,52,204,1,131,0,0,0,0,
+ 68,204,1,131,255,255,255,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,1,0,0,0,
+ 10,0,0,0,10,0,0,0,0,205,1,131,
+ 10,0,0,0,7,0,0,0,0,0,0,0,
+ 0,0,0,0,255,255,255,255,110,118,114,97,
+ 109,46,99,0,114,99,0,0,48,120,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0
+ } ;
+static int dgrs_ncode = 119520 ;
diff --git a/drivers/net/dgrs_i82596.h b/drivers/net/dgrs_i82596.h
new file mode 100644
index 000000000000..c7a38c16a00b
--- /dev/null
+++ b/drivers/net/dgrs_i82596.h
@@ -0,0 +1,473 @@
+/*
+ * i82596 ethernet controller bits and structures (little endian)
+ *
+ * $Id: i82596.h,v 1.8 1996/09/03 11:19:03 rick Exp $
+ */
+
+/************************************************************************/
+/* */
+/* PORT commands (p. 4-20). The least significant nibble is one */
+/* of these commands, the rest of the command is a memory address */
+/* aligned on a 16 byte boundary. Note that port commands must */
+/* be written to the PORT address and the PORT address+2 with two */
+/* halfword writes. Write the LSH first to PORT, then the MSH to */
+/* PORT+2. Blame Intel. */
+/* */
+/************************************************************************/
+#define I596_PORT_RESET 0x0 /* Reset. Wait 5 SysClks & 10 TxClks */
+#define I596_PORT_SELFTEST 0x1 /* Do a selftest */
+#define I596_PORT_SCP_ADDR 0x2 /* Set new SCP address */
+#define I596_PORT_DUMP 0x3 /* Dump internal data structures */
+
+/*
+ * I596_ST: Selftest results (p. 4-21)
+ */
+typedef volatile struct
+{
+ ulong signature; /* ROM checksum */
+ ulong result; /* Selftest results: non-zero is a failure */
+} I596_ST;
+
+#define I596_ST_SELFTEST_FAIL 0x1000 /* Selftest Failed */
+#define I596_ST_DIAGNOSE_FAIL 0x0020 /* Diagnose Failed */
+#define I596_ST_BUSTIMER_FAIL 0x0010 /* Bus Timer Failed */
+#define I596_ST_REGISTER_FAIL 0x0008 /* Register Failed */
+#define I596_ST_ROM_FAIL 0x0004 /* ROM Failed */
+
+/*
+ * I596_DUMP: Dump results
+ */
+typedef volatile struct
+{
+ ulong dump[77];
+} I596_DUMP;
+
+/************************************************************************/
+/* */
+/* I596_TBD: Transmit Buffer Descriptor (p. 4-59) */
+/* */
+/************************************************************************/
+typedef volatile struct _I596_TBD
+{
+ ulong count;
+ vol struct _I596_TBD *next;
+ uchar *buf;
+ ushort unused1;
+ ushort unused2;
+} I596_TBD;
+
+#define I596_TBD_NOLINK ((I596_TBD *) 0xffffffff)
+#define I596_TBD_EOF 0x8000
+#define I596_TBD_COUNT_MASK 0x3fff
+
+/************************************************************************/
+/* */
+/* I596_TFD: Transmit Frame Descriptor (p. 4-56) */
+/* a.k.a. I596_CB_XMIT */
+/* */
+/************************************************************************/
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+ I596_TBD *tbdp;
+ ulong count; /* for speed */
+
+ /* Application defined data follows structure... */
+
+#if 0 /* We don't use these intel defined ones */
+ uchar addr[6];
+ ushort len;
+ uchar data[1];
+#else
+ ulong dstchan;/* Used by multi-NIC mode */
+#endif
+} I596_TFD;
+
+#define I596_TFD_NOCRC 0x0010 /* cmd: No CRC insertion */
+#define I596_TFD_FLEX 0x0008 /* cmd: Flexible mode */
+
+/************************************************************************/
+/* */
+/* I596_RBD: Receive Buffer Descriptor (p. 4-84) */
+/* */
+/************************************************************************/
+typedef volatile struct _I596_RBD
+{
+#ifdef INTEL_RETENTIVE
+ ushort count; /* Length of data in buf */
+ ushort offset;
+#else
+ ulong count; /* Length of data in buf */
+#endif
+ vol struct _I596_RBD *next; /* Next buffer descriptor in list */
+ uchar *buf; /* Data buffer */
+#ifdef INTEL_RETENTIVE
+ ushort size; /* Size of buf (constant) */
+ ushort zero;
+#else
+ ulong size; /* Size of buf (constant) */
+#endif
+
+ /* Application defined data follows structure... */
+
+ uchar chan;
+ uchar refcnt;
+ ushort len;
+} I596_RBD;
+
+#define I596_RBD_NOLINK ((I596_RBD *) 0xffffffff)
+#define I596_RBD_EOF 0x8000 /* This is last buffer in a frame */
+#define I596_RBD_F 0x4000 /* The actual count is valid */
+
+#define I596_RBD_EL 0x8000 /* Last buffer in list */
+
+/************************************************************************/
+/* */
+/* I596_RFD: Receive Frame Descriptor (p. 4-79) */
+/* */
+/************************************************************************/
+typedef volatile struct _I596_RFD
+{
+ ushort status;
+ ushort cmd;
+ vol struct _I596_RFD *next;
+ vol struct _I596_RBD *rbdp;
+ ushort count; /* Len of data in RFD: always 0 */
+ ushort size; /* Size of RFD buffer: always 0 */
+
+ /* Application defined data follows structure... */
+
+# if 0 /* We don't use these intel defined ones */
+ uchar addr[6];
+ ushort len;
+ uchar data[1];
+# else
+ ulong dstchan;/* Used by multi-nic mode */
+# endif
+} I596_RFD;
+
+#define I596_RFD_C 0x8000 /* status: frame complete */
+#define I596_RFD_B 0x4000 /* status: frame busy or waiting */
+#define I596_RFD_OK 0x2000 /* status: frame OK */
+#define I596_RFD_ERR_LENGTH 0x1000 /* status: length error */
+#define I596_RFD_ERR_CRC 0x0800 /* status: CRC error */
+#define I596_RFD_ERR_ALIGN 0x0400 /* status: alignment error */
+#define I596_RFD_ERR_NOBUFS 0x0200 /* status: resource error */
+#define I596_RFD_ERR_DMA 0x0100 /* status: DMA error */
+#define I596_RFD_ERR_SHORT 0x0080 /* status: too short error */
+#define I596_RFD_NOMATCH 0x0002 /* status: IA was not matched */
+#define I596_RFD_COLLISION 0x0001 /* status: collision during receive */
+
+#define I596_RFD_EL 0x8000 /* cmd: end of RFD list */
+#define I596_RFD_FLEX 0x0008 /* cmd: Flexible mode */
+#define I596_RFD_EOF 0x8000 /* count: last buffer in the frame */
+#define I596_RFD_F 0x4000 /* count: The actual count is valid */
+
+/************************************************************************/
+/* */
+/* Commands */
+/* */
+/************************************************************************/
+
+ /* values for cmd halfword in all the structs below */
+#define I596_CB_CMD 0x07 /* CB COMMANDS */
+#define I596_CB_CMD_NOP 0
+#define I596_CB_CMD_IA 1
+#define I596_CB_CMD_CONF 2
+#define I596_CB_CMD_MCAST 3
+#define I596_CB_CMD_XMIT 4
+#define I596_CB_CMD_TDR 5
+#define I596_CB_CMD_DUMP 6
+#define I596_CB_CMD_DIAG 7
+
+#define I596_CB_CMD_EL 0x8000 /* CB is last in linked list */
+#define I596_CB_CMD_S 0x4000 /* Suspend after execution */
+#define I596_CB_CMD_I 0x2000 /* cause interrupt */
+
+ /* values for the status halfword in all the struct below */
+#define I596_CB_STATUS 0xF000 /* All four status bits */
+#define I596_CB_STATUS_C 0x8000 /* Command complete */
+#define I596_CB_STATUS_B 0x4000 /* Command busy executing */
+#define I596_CB_STATUS_C_OR_B 0xC000 /* Command complete or busy */
+#define I596_CB_STATUS_OK 0x2000 /* Command complete, no errors */
+#define I596_CB_STATUS_A 0x1000 /* Command busy executing */
+
+#define I596_CB_NOLINK ((I596_CB *) 0xffffffff)
+
+/*
+ * I596_CB_NOP: NOP Command (p. 4-34)
+ */
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+} I596_CB_NOP;
+
+/*
+ * Same as above, but command and status in one ulong for speed
+ */
+typedef volatile struct
+{
+ ulong csr;
+ union _I596_CB *next;
+} I596_CB_FAST;
+#define FASTs(X) (X)
+#define FASTc(X) ((X)<<16)
+
+/*
+ * I596_CB_IA: Individual (MAC) Address Command (p. 4-35)
+ */
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+ uchar addr[6];
+} I596_CB_IA;
+
+/*
+ * I596_CB_CONF: Configure Command (p. 4-37)
+ */
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+ uchar conf[14];
+} I596_CB_CONF;
+
+#define I596_CONF0_P 0x80 /* Enable RBD Prefetch Bit */
+#define I596_CONF0_COUNT 14 /* Count of configuration bytes */
+
+#define I596_CONF1_MON_OFF 0xC0 /* Monitor mode: Monitor off */
+#define I596_CONF1_MON_ON 0x80 /* Monitor mode: Monitor on */
+#define I596_CONF1_TxFIFO(W) (W) /* TxFIFO trigger, in words */
+
+#define I596_CONF2_SAVEBF 0x80 /* Save bad frames */
+
+#define I596_CONF3_ADDRLEN(B) (B) /* Address length */
+#define I596_CONF3_NOSRCINSERT 0x08 /* Do not insert source address */
+#define I596_CONF3_PREAMBLE8 0x20 /* 8 byte preamble */
+#define I596_CONF3_LOOPOFF 0x00 /* Loopback: Off */
+#define I596_CONF3_LOOPINT 0x40 /* Loopback: internal */
+#define I596_CONF3_LOOPEXT 0xC0 /* Loopback: external */
+
+#define I596_CONF4_LINPRI(ST) (ST) /* Linear priority: slot times */
+#define I596_CONF4_EXPPRI(ST) (ST) /* Exponential priority: slot times */
+#define I596_CONF4_IEEE_BOM 0 /* IEEE 802.3 backoff method */
+
+#define I596_CONF5_IFS(X) (X) /* Interframe spacing in clocks */
+
+#define I596_CONF6_ST_LOW(X) (X&255) /* Slot time, low byte */
+
+#define I596_CONF7_ST_HI(X) (X>>8) /* Slot time, high bits */
+#define I596_CONF7_RETRY(X) (X<<4) /* Max retry number */
+
+#define I596_CONF8_PROMISC 0x01 /* Rcv all frames */
+#define I596_CONF8_NOBROAD 0x02
+#define I596_CONF8_MANCHESTER 0x04
+#define I596_CONF8_TxNOCRS 0x08
+#define I596_CONF8_NOCRC 0x10
+#define I596_CONF8_CRC_CCITT 0x20
+#define I596_CONF8_BITSTUFFING 0x40
+#define I596_CONF8_PADDING 0x80
+
+#define I596_CONF9_CSFILTER(X) (X)
+#define I596_CONF9_CSINT(X) 0x08
+#define I596_CONF9_CDFILTER(X) (X<<4)
+#define I596_CONF9_CDINT(X) 0x80
+
+#define I596_CONF10_MINLEN(X) (X) /* Minimum frame length */
+
+#define I596_CONF11_PRECRS_ 0x01 /* Preamble before carrier sense */
+#define I596_CONF11_LNGFLD_ 0x02 /* Padding in End of Carrier */
+#define I596_CONF11_CRCINM_ 0x04 /* CRC in memory */
+#define I596_CONF11_AUTOTX 0x08 /* Auto retransmit */
+#define I596_CONF11_CSBSAC_ 0x10 /* Collision detect by src addr cmp. */
+#define I596_CONF11_MCALL_ 0x20 /* Multicast all */
+
+#define I596_CONF13_RESERVED 0x3f /* Reserved: must be ones */
+#define I596_CONF13_MULTIA 0x40 /* Enable multiple addr. reception */
+#define I596_CONF13_DISBOF 0x80 /* Disable backoff algorithm */
+/*
+ * I596_CB_MCAST: Multicast-Setup Command (p. 4-54)
+ */
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+ ushort count; /* Number of 6-byte addrs that follow */
+ uchar addr[6][1];
+} I596_CB_MCAST;
+
+/*
+ * I596_CB_XMIT: Transmit Command (p. 4-56)
+ */
+typedef I596_TFD I596_CB_XMIT;
+
+#define I596_CB_XMIT_NOCRC 0x0010 /* cmd: No CRC insertion */
+#define I596_CB_XMIT_FLEX 0x0008 /* cmd: Flexible memory mode */
+
+#define I596_CB_XMIT_ERR_LATE 0x0800 /* status: error: late collision */
+#define I596_CB_XMIT_ERR_NOCRS 0x0400 /* status: error: no carriers sense */
+#define I596_CB_XMIT_ERR_NOCTS 0x0200 /* status: error: loss of CTS */
+#define I596_CB_XMIT_ERR_UNDER 0x0100 /* status: error: DMA underrun */
+#define I596_CB_XMIT_ERR_MAXCOL 0x0020 /* status: error: maximum collisions */
+#define I596_CB_XMIT_COLLISIONS 0x000f /* status: number of collisions */
+
+/*
+ * I596_CB_TDR: Time Domain Reflectometry Command (p. 4-63)
+ */
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+ ushort time;
+} I596_CB_TDR;
+
+/*
+ * I596_CB_DUMP: Dump Command (p. 4-65)
+ */
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+ uchar *buf;
+} I596_CB_DUMP;
+
+/*
+ * I596_CB_DIAG: Diagnose Command (p. 4-77)
+ */
+typedef volatile struct
+{
+ ushort status;
+ ushort cmd;
+ union _I596_CB *next;
+} I596_CB_DIAG;
+
+/*
+ * I596_CB: Command Block
+ */
+typedef union _I596_CB
+{
+ I596_CB_NOP nop;
+ I596_CB_IA ia;
+ I596_CB_CONF conf;
+ I596_CB_MCAST mcast;
+ I596_CB_XMIT xmit;
+ I596_CB_TDR tdr;
+ I596_CB_DUMP dump;
+ I596_CB_DIAG diag;
+
+ /* command and status in one ulong for speed... */
+ I596_CB_FAST fast;
+} I596_CB;
+
+/************************************************************************/
+/* */
+/* I596_SCB: System Configuration Block (p. 4-26) */
+/* */
+/************************************************************************/
+typedef volatile struct
+{
+ volatile ushort status; /* Status word */
+ volatile ushort cmd; /* Command word */
+ I596_CB *cbp;
+ I596_RFD *rfdp;
+ ulong crc_errs;
+ ulong align_errs;
+ ulong resource_errs;
+ ulong overrun_errs;
+ ulong rcvcdt_errs;
+ ulong short_errs;
+ ushort toff;
+ ushort ton;
+} I596_SCB;
+
+ /* cmd halfword values */
+#define I596_SCB_ACK 0xF000 /* ACKNOWLEDGMENTS */
+#define I596_SCB_ACK_CX 0x8000 /* Ack command completion */
+#define I596_SCB_ACK_FR 0x4000 /* Ack received frame */
+#define I596_SCB_ACK_CNA 0x2000 /* Ack command unit not active */
+#define I596_SCB_ACK_RNR 0x1000 /* Ack rcv unit not ready */
+#define I596_SCB_ACK_ALL 0xF000 /* Ack everything */
+
+#define I596_SCB_CUC 0x0700 /* COMMAND UNIT COMMANDS */
+#define I596_SCB_CUC_NOP 0x0000 /* No operation */
+#define I596_SCB_CUC_START 0x0100 /* Start execution of first CB */
+#define I596_SCB_CUC_RESUME 0x0200 /* Resume execution */
+#define I596_SCB_CUC_SUSPEND 0x0300 /* Suspend after current CB */
+#define I596_SCB_CUC_ABORT 0x0400 /* Abort current CB immediately */
+#define I596_SCB_CUC_LOAD 0x0500 /* Load Bus throttle timers */
+#define I596_SCB_CUC_LOADIMM 0x0600 /* Load Bus throttle timers, now */
+
+#define I596_SCB_RUC 0x0070 /* RECEIVE UNIT COMMANDS */
+#define I596_SCB_RUC_NOP 0x0000 /* No operation */
+#define I596_SCB_RUC_START 0x0010 /* Start reception */
+#define I596_SCB_RUC_RESUME 0x0020 /* Resume reception */
+#define I596_SCB_RUC_SUSPEND 0x0030 /* Suspend reception */
+#define I596_SCB_RUC_ABORT 0x0040 /* Abort reception */
+
+#define I596_SCB_RESET 0x0080 /* Hard reset chip */
+
+ /* status halfword values */
+#define I596_SCB_STAT 0xF000 /* STATUS */
+#define I596_SCB_CX 0x8000 /* command completion */
+#define I596_SCB_FR 0x4000 /* received frame */
+#define I596_SCB_CNA 0x2000 /* command unit not active */
+#define I596_SCB_RNR 0x1000 /* rcv unit not ready */
+
+#define I596_SCB_CUS 0x0700 /* COMMAND UNIT STATUS */
+#define I596_SCB_CUS_IDLE 0x0000 /* Idle */
+#define I596_SCB_CUS_SUSPENDED 0x0100 /* Suspended */
+#define I596_SCB_CUS_ACTIVE 0x0200 /* Active */
+
+#define I596_SCB_RUS 0x00F0 /* RECEIVE UNIT STATUS */
+#define I596_SCB_RUS_IDLE 0x0000 /* Idle */
+#define I596_SCB_RUS_SUSPENDED 0x0010 /* Suspended */
+#define I596_SCB_RUS_NORES 0x0020 /* No Resources */
+#define I596_SCB_RUS_READY 0x0040 /* Ready */
+#define I596_SCB_RUS_NORBDS 0x0080 /* No more RBDs modifier */
+
+#define I596_SCB_LOADED 0x0008 /* Bus timers loaded */
+
+/************************************************************************/
+/* */
+/* I596_ISCP: Intermediate System Configuration Ptr (p 4-26) */
+/* */
+/************************************************************************/
+typedef volatile struct
+{
+ ulong busy; /* Set to 1; I596 clears it when scbp is read */
+ I596_SCB *scbp;
+} I596_ISCP;
+
+/************************************************************************/
+/* */
+/* I596_SCP: System Configuration Pointer (p. 4-23) */
+/* */
+/************************************************************************/
+typedef volatile struct
+{
+ ulong sysbus;
+ ulong dummy;
+ I596_ISCP *iscpp;
+} I596_SCP;
+
+ /* .sysbus values */
+#define I596_SCP_RESERVED 0x400000 /* Reserved bits must be set */
+#define I596_SCP_INTLOW 0x200000 /* Intr. Polarity active low */
+#define I596_SCP_INTHIGH 0 /* Intr. Polarity active high */
+#define I596_SCP_LOCKDIS 0x100000 /* Lock Function disabled */
+#define I596_SCP_LOCKEN 0 /* Lock Function enabled */
+#define I596_SCP_ETHROTTLE 0x080000 /* External Bus Throttle */
+#define I596_SCP_ITHROTTLE 0 /* Internal Bus Throttle */
+#define I596_SCP_LINEAR 0x040000 /* Linear Mode */
+#define I596_SCP_SEGMENTED 0x020000 /* Segmented Mode */
+#define I596_SCP_82586 0x000000 /* 82586 Mode */
diff --git a/drivers/net/dgrs_plx9060.h b/drivers/net/dgrs_plx9060.h
new file mode 100644
index 000000000000..6888ae0d0ce0
--- /dev/null
+++ b/drivers/net/dgrs_plx9060.h
@@ -0,0 +1,175 @@
+/*
+ * PLX 9060 PCI Interface chip
+ */
+
+/*
+ * PCI configuration registers, same offset on local and PCI sides,
+ * but on PCI side, must use PCI BIOS calls to read/write.
+ */
+#define PCI_PLXREGS_BASE_ADDR 0x10
+
+#define PCI_PLXREGS_IO_ADDR 0x14
+
+#define PCI_SPACE0_BASE_ADDR 0x18
+
+#define PCI_ROM_BASE_ADDR 0x30
+# define PCI_ROM_ENABLED 0x00000001
+
+#define PCI_INT_LINE 0x3C
+
+/*
+ * Registers accessible directly from PCI and local side.
+ * Offset is from PCI side. Add PLX_LCL_OFFSET for local address.
+ */
+#define PLX_LCL_OFFSET 0x80 /* Offset of regs from local side */
+
+/*
+ * Local Configuration Registers
+ */
+#define PLX_SPACE0_RANGE 0x00 /* Range for PCI to Lcl Addr Space 0 */
+#define PLX_SPACE0_BASE_ADDR 0x04 /* Lcl Base address remap */
+
+#define PLX_ROM_RANGE 0x10 /* Range for expansion ROM (DMA) */
+#define PLX_ROM_BASE_ADDR 0x14 /* Lcl base address remap for ROM */
+
+#define PLX_BUS_REGION 0x18 /* Bus Region Descriptors */
+
+/*
+ * Shared Run Time Registers
+ */
+#define PLX_MBOX0 0x40
+#define PLX_MBOX1 0x44
+#define PLX_MBOX2 0x48
+#define PLX_MBOX3 0x4C
+#define PLX_MBOX4 0x50
+#define PLX_MBOX5 0x54
+#define PLX_MBOX6 0x58
+#define PLX_MBOX7 0x5C
+
+#define PLX_PCI2LCL_DOORBELL 0x60
+
+#define PLX_LCL2PCI_DOORBELL 0x64
+
+#define PLX_INT_CSR 0x68 /* Interrupt Control/Status */
+# define PLX_LSERR_ENABLE 0x00000001
+# define PLX_LSERR_PE 0x00000002
+# define PLX_SERR 0x00000004
+# undef PLX_UNUSED /* 0x00000008 */
+# undef PLX_UNUSED /* 0x00000010 */
+# undef PLX_UNUSED /* 0x00000020 */
+# undef PLX_UNUSED /* 0x00000040 */
+# undef PLX_UNUSED /* 0x00000080 */
+# define PLX_PCI_IE 0x00000100
+# define PLX_PCI_DOORBELL_IE 0x00000200
+# define PLX_PCI_ABORT_IE 0x00000400
+# define PLX_PCI_LOCAL_IE 0x00000800
+# define PLX_RETRY_ABORT_ENABLE 0x00001000
+# define PLX_PCI_DOORBELL_INT 0x00002000
+# define PLX_PCI_ABORT_INT 0x00004000
+# define PLX_PCI_LOCAL_INT 0x00008000
+# define PLX_LCL_IE 0x00010000
+# define PLX_LCL_DOORBELL_IE 0x00020000
+# define PLX_LCL_DMA0_IE 0x00040000
+# define PLX_LCL_DMA1_IE 0x00080000
+# define PLX_LCL_DOORBELL_INT 0x00100000
+# define PLX_LCL_DMA0_INT 0x00200000
+# define PLX_LCL_DMA1_INT 0x00400000
+# define PLX_LCL_BIST_INT 0x00800000
+# define PLX_BM_DIRECT_ 0x01000000
+# define PLX_BM_DMA0_ 0x02000000
+# define PLX_BM_DMA1_ 0x04000000
+# define PLX_BM_ABORT_ 0x08000000
+# undef PLX_UNUSED /* 0x10000000 */
+# undef PLX_UNUSED /* 0x20000000 */
+# undef PLX_UNUSED /* 0x40000000 */
+# undef PLX_UNUSED /* 0x80000000 */
+
+#define PLX_MISC_CSR 0x6c /* EEPROM,PCI,User,Init Control/Status*/
+# define PLX_USEROUT 0x00010000
+# define PLX_USERIN 0x00020000
+# define PLX_EECK 0x01000000
+# define PLX_EECS 0x02000000
+# define PLX_EEWD 0x04000000
+# define PLX_EERD 0x08000000
+
+/*
+ * DMA registers. Offset is from local side
+ */
+#define PLX_DMA0_MODE 0x100
+# define PLX_DMA_MODE_WIDTH32 0x00000003
+# define PLX_DMA_MODE_WAITSTATES(X) ((X)<<2)
+# define PLX_DMA_MODE_NOREADY 0x00000000
+# define PLX_DMA_MODE_READY 0x00000040
+# define PLX_DMA_MODE_NOBTERM 0x00000000
+# define PLX_DMA_MODE_BTERM 0x00000080
+# define PLX_DMA_MODE_NOBURST 0x00000000
+# define PLX_DMA_MODE_BURST 0x00000100
+# define PLX_DMA_MODE_NOCHAIN 0x00000000
+# define PLX_DMA_MODE_CHAIN 0x00000200
+# define PLX_DMA_MODE_DONE_IE 0x00000400
+# define PLX_DMA_MODE_ADDR_HOLD 0x00000800
+
+#define PLX_DMA0_PCI_ADDR 0x104
+ /* non-chaining mode PCI address */
+
+#define PLX_DMA0_LCL_ADDR 0x108
+ /* non-chaining mode local address */
+
+#define PLX_DMA0_SIZE 0x10C
+ /* non-chaining mode length */
+
+#define PLX_DMA0_DESCRIPTOR 0x110
+# define PLX_DMA_DESC_EOC 0x00000002
+# define PLX_DMA_DESC_TC_IE 0x00000004
+# define PLX_DMA_DESC_TO_HOST 0x00000008
+# define PLX_DMA_DESC_TO_BOARD 0x00000000
+# define PLX_DMA_DESC_NEXTADDR 0xFFFFfff0
+
+#define PLX_DMA1_MODE 0x114
+#define PLX_DMA1_PCI_ADDR 0x118
+#define PLX_DMA1_LCL_ADDR 0x11C
+#define PLX_DMA1_SIZE 0x110
+#define PLX_DMA1_DESCRIPTOR 0x124
+
+#define PLX_DMA_CSR 0x128
+# define PLX_DMA_CSR_0_ENABLE 0x00000001
+# define PLX_DMA_CSR_0_START 0x00000002
+# define PLX_DMA_CSR_0_ABORT 0x00000004
+# define PLX_DMA_CSR_0_CLR_INTR 0x00000008
+# define PLX_DMA_CSR_0_DONE 0x00000010
+# define PLX_DMA_CSR_1_ENABLE 0x00000100
+# define PLX_DMA_CSR_1_START 0x00000200
+# define PLX_DMA_CSR_1_ABORT 0x00000400
+# define PLX_DMA_CSR_1_CLR_INTR 0x00000800
+# define PLX_DMA_CSR_1_DONE 0x00001000
+
+#define PLX_DMA_ARB0 0x12C
+# define PLX_DMA_ARB0_LATENCY_T 0x000000FF
+# define PLX_DMA_ARB0_PAUSE_T 0x0000FF00
+# define PLX_DMA_ARB0_LATENCY_EN 0x00010000
+# define PLX_DMA_ARB0_PAUSE_EN 0x00020000
+# define PLX_DMA_ARB0_BREQ_EN 0x00040000
+# define PLX_DMA_ARB0_PRI 0x00180000
+# define PLX_DMA_ARB0_PRI_ROUND 0x00000000
+# define PLX_DMA_ARB0_PRI_0 0x00080000
+# define PLX_DMA_ARB0_PRI_1 0x00100000
+
+#define PLX_DMA_ARB1 0x130
+ /* Chan 0: FIFO DEPTH=16 */
+# define PLX_DMA_ARB1_0_P2L_LW_TRIG(X) ( ((X)&15) << 0 )
+# define PLX_DMA_ARB1_0_L2P_LR_TRIG(X) ( ((X)&15) << 4 )
+# define PLX_DMA_ARB1_0_L2P_PW_TRIG(X) ( ((X)&15) << 8 )
+# define PLX_DMA_ARB1_0_P2L_PR_TRIG(X) ( ((X)&15) << 12 )
+ /* Chan 1: FIFO DEPTH=8 */
+# define PLX_DMA_ARB1_1_P2L_LW_TRIG(X) ( ((X)& 7) << 16 )
+# define PLX_DMA_ARB1_1_L2P_LR_TRIG(X) ( ((X)& 7) << 20 )
+# define PLX_DMA_ARB1_1_L2P_PW_TRIG(X) ( ((X)& 7) << 24 )
+# define PLX_DMA_ARB1_1_P2L_PR_TRIG(X) ( ((X)& 7) << 28 )
+
+typedef struct _dmachain
+{
+ ulong pciaddr;
+ ulong lcladdr;
+ ulong len;
+ ulong next;
+} DMACHAIN;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
new file mode 100644
index 000000000000..aa42b7a27735
--- /dev/null
+++ b/drivers/net/dl2k.c
@@ -0,0 +1,1872 @@
+/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
+/*
+ Copyright (c) 2001, 2002 by D-Link Corporation
+ Written by Edward Peng.<edward_peng@dlink.com.tw>
+ Created 03-May-2001, base on Linux' sundance.c.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+*/
+/*
+ Rev Date Description
+ ==========================================================================
+ 0.01 2001/05/03 Created DL2000-based linux driver
+ 0.02 2001/05/21 Added VLAN and hardware checksum support.
+ 1.00 2001/06/26 Added jumbo frame support.
+ 1.01 2001/08/21 Added two parameters, rx_coalesce and rx_timeout.
+ 1.02 2001/10/08 Supported fiber media.
+ Added flow control parameters.
+ 1.03 2001/10/12 Changed the default media to 1000mbps_fd for
+ the fiber devices.
+ 1.04 2001/11/08 Fixed Tx stopped when tx very busy.
+ 1.05 2001/11/22 Fixed Tx stopped when unidirectional tx busy.
+ 1.06 2001/12/13 Fixed disconnect bug at 10Mbps mode.
+ Fixed tx_full flag incorrect.
+ Added tx_coalesce paramter.
+ 1.07 2002/01/03 Fixed miscount of RX frame error.
+ 1.08 2002/01/17 Fixed the multicast bug.
+ 1.09 2002/03/07 Move rx-poll-now to re-fill loop.
+ Added rio_timer() to watch rx buffers.
+ 1.10 2002/04/16 Fixed miscount of carrier error.
+ 1.11 2002/05/23 Added ISR schedule scheme
+ Fixed miscount of rx frame error for DGE-550SX.
+ Fixed VLAN bug.
+ 1.12 2002/06/13 Lock tx_coalesce=1 on 10/100Mbps mode.
+ 1.13 2002/08/13 1. Fix disconnection (many tx:carrier/rx:frame
+ errs) with some mainboards.
+ 2. Use definition "DRV_NAME" "DRV_VERSION"
+ "DRV_RELDATE" for flexibility.
+ 1.14 2002/08/14 Support ethtool.
+ 1.15 2002/08/27 Changed the default media to Auto-Negotiation
+ for the fiber devices.
+ 1.16 2002/09/04 More power down time for fiber devices auto-
+ negotiation.
+ Fix disconnect bug after ifup and ifdown.
+ 1.17 2002/10/03 Fix RMON statistics overflow.
+ Always use I/O mapping to access eeprom,
+ avoid system freezing with some chipsets.
+
+*/
+#define DRV_NAME "D-Link DL2000-based linux driver"
+#define DRV_VERSION "v1.17a"
+#define DRV_RELDATE "2002/10/04"
+#include "dl2k.h"
+
+static char version[] __devinitdata =
+ KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
+#define MAX_UNITS 8
+static int mtu[MAX_UNITS];
+static int vlan[MAX_UNITS];
+static int jumbo[MAX_UNITS];
+static char *media[MAX_UNITS];
+static int tx_flow=-1;
+static int rx_flow=-1;
+static int copy_thresh;
+static int rx_coalesce=10; /* Rx frame count each interrupt */
+static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
+static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
+
+
+MODULE_AUTHOR ("Edward Peng");
+MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
+MODULE_LICENSE("GPL");
+module_param_array(mtu, int, NULL, 0);
+module_param_array(media, charp, NULL, 0);
+module_param_array(vlan, int, NULL, 0);
+module_param_array(jumbo, int, NULL, 0);
+module_param(tx_flow, int, 0);
+module_param(rx_flow, int, 0);
+module_param(copy_thresh, int, 0);
+module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
+module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
+module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
+
+
+/* Enable the default interrupts */
+#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
+ UpdateStats | LinkEvent)
+#define EnableInt() \
+writew(DEFAULT_INTR, ioaddr + IntEnable)
+
+static int max_intrloop = 50;
+static int multicast_filter_limit = 0x40;
+
+static int rio_open (struct net_device *dev);
+static void rio_timer (unsigned long data);
+static void rio_tx_timeout (struct net_device *dev);
+static void alloc_list (struct net_device *dev);
+static int start_xmit (struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t rio_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
+static void rio_free_tx (struct net_device *dev, int irq);
+static void tx_error (struct net_device *dev, int tx_status);
+static int receive_packet (struct net_device *dev);
+static void rio_error (struct net_device *dev, int int_status);
+static int change_mtu (struct net_device *dev, int new_mtu);
+static void set_multicast (struct net_device *dev);
+static struct net_device_stats *get_stats (struct net_device *dev);
+static int clear_stats (struct net_device *dev);
+static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+static int rio_close (struct net_device *dev);
+static int find_miiphy (struct net_device *dev);
+static int parse_eeprom (struct net_device *dev);
+static int read_eeprom (long ioaddr, int eep_addr);
+static int mii_wait_link (struct net_device *dev, int wait);
+static int mii_set_media (struct net_device *dev);
+static int mii_get_media (struct net_device *dev);
+static int mii_set_media_pcs (struct net_device *dev);
+static int mii_get_media_pcs (struct net_device *dev);
+static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
+static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
+ u16 data);
+
+static struct ethtool_ops ethtool_ops;
+
+static int __devinit
+rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int card_idx;
+ int chip_idx = ent->driver_data;
+ int err, irq;
+ long ioaddr;
+ static int version_printed;
+ void *ring_space;
+ dma_addr_t ring_dma;
+
+ if (!version_printed++)
+ printk ("%s", version);
+
+ err = pci_enable_device (pdev);
+ if (err)
+ return err;
+
+ irq = pdev->irq;
+ err = pci_request_regions (pdev, "dl2k");
+ if (err)
+ goto err_out_disable;
+
+ pci_set_master (pdev);
+ dev = alloc_etherdev (sizeof (*np));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_res;
+ }
+ SET_MODULE_OWNER (dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#ifdef MEM_MAPPING
+ ioaddr = pci_resource_start (pdev, 1);
+ ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
+ if (!ioaddr) {
+ err = -ENOMEM;
+ goto err_out_dev;
+ }
+#else
+ ioaddr = pci_resource_start (pdev, 0);
+#endif
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ np = netdev_priv(dev);
+ np->chip_id = chip_idx;
+ np->pdev = pdev;
+ spin_lock_init (&np->tx_lock);
+ spin_lock_init (&np->rx_lock);
+
+ /* Parse manual configuration */
+ np->an_enable = 1;
+ np->tx_coalesce = 1;
+ if (card_idx < MAX_UNITS) {
+ if (media[card_idx] != NULL) {
+ np->an_enable = 0;
+ if (strcmp (media[card_idx], "auto") == 0 ||
+ strcmp (media[card_idx], "autosense") == 0 ||
+ strcmp (media[card_idx], "0") == 0 ) {
+ np->an_enable = 2;
+ } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
+ strcmp (media[card_idx], "4") == 0) {
+ np->speed = 100;
+ np->full_duplex = 1;
+ } else if (strcmp (media[card_idx], "100mbps_hd") == 0
+ || strcmp (media[card_idx], "3") == 0) {
+ np->speed = 100;
+ np->full_duplex = 0;
+ } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
+ strcmp (media[card_idx], "2") == 0) {
+ np->speed = 10;
+ np->full_duplex = 1;
+ } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
+ strcmp (media[card_idx], "1") == 0) {
+ np->speed = 10;
+ np->full_duplex = 0;
+ } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
+ strcmp (media[card_idx], "6") == 0) {
+ np->speed=1000;
+ np->full_duplex=1;
+ } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
+ strcmp (media[card_idx], "5") == 0) {
+ np->speed = 1000;
+ np->full_duplex = 0;
+ } else {
+ np->an_enable = 1;
+ }
+ }
+ if (jumbo[card_idx] != 0) {
+ np->jumbo = 1;
+ dev->mtu = MAX_JUMBO;
+ } else {
+ np->jumbo = 0;
+ if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
+ dev->mtu = mtu[card_idx];
+ }
+ np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
+ vlan[card_idx] : 0;
+ if (rx_coalesce > 0 && rx_timeout > 0) {
+ np->rx_coalesce = rx_coalesce;
+ np->rx_timeout = rx_timeout;
+ np->coalesce = 1;
+ }
+ np->tx_flow = (tx_flow == 0) ? 0 : 1;
+ np->rx_flow = (rx_flow == 0) ? 0 : 1;
+
+ if (tx_coalesce < 1)
+ tx_coalesce = 1;
+ else if (tx_coalesce > TX_RING_SIZE-1)
+ tx_coalesce = TX_RING_SIZE - 1;
+ }
+ dev->open = &rio_open;
+ dev->hard_start_xmit = &start_xmit;
+ dev->stop = &rio_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_multicast;
+ dev->do_ioctl = &rio_ioctl;
+ dev->tx_timeout = &rio_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->change_mtu = &change_mtu;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+#if 0
+ dev->features = NETIF_F_IP_CSUM;
+#endif
+ pci_set_drvdata (pdev, dev);
+
+ ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_iounmap;
+ np->tx_ring = (struct netdev_desc *) ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = (struct netdev_desc *) ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ /* Parse eeprom data */
+ parse_eeprom (dev);
+
+ /* Find PHY address */
+ err = find_miiphy (dev);
+ if (err)
+ goto err_out_unmap_rx;
+
+ /* Fiber device? */
+ np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
+ np->link_status = 0;
+ /* Set media and reset PHY */
+ if (np->phy_media) {
+ /* default Auto-Negotiation for fiber deivices */
+ if (np->an_enable == 2) {
+ np->an_enable = 1;
+ }
+ mii_set_media_pcs (dev);
+ } else {
+ /* Auto-Negotiation is mandatory for 1000BASE-T,
+ IEEE 802.3ab Annex 28D page 14 */
+ if (np->speed == 1000)
+ np->an_enable = 1;
+ mii_set_media (dev);
+ }
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
+
+ err = register_netdev (dev);
+ if (err)
+ goto err_out_unmap_rx;
+
+ card_idx++;
+
+ printk (KERN_INFO "%s: %s, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
+ dev->name, np->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
+ if (tx_coalesce > 1)
+ printk(KERN_INFO "tx_coalesce:\t%d packets\n",
+ tx_coalesce);
+ if (np->coalesce)
+ printk(KERN_INFO "rx_coalesce:\t%d packets\n"
+ KERN_INFO "rx_timeout: \t%d ns\n",
+ np->rx_coalesce, np->rx_timeout*640);
+ if (np->vlan)
+ printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
+ return 0;
+
+ err_out_unmap_rx:
+ pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ err_out_unmap_tx:
+ pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ err_out_iounmap:
+#ifdef MEM_MAPPING
+ iounmap ((void *) ioaddr);
+
+ err_out_dev:
+#endif
+ free_netdev (dev);
+
+ err_out_res:
+ pci_release_regions (pdev);
+
+ err_out_disable:
+ pci_disable_device (pdev);
+ return err;
+}
+
+int
+find_miiphy (struct net_device *dev)
+{
+ int i, phy_found = 0;
+ struct netdev_private *np;
+ long ioaddr;
+ np = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+ np->phy_addr = 1;
+
+ for (i = 31; i >= 0; i--) {
+ int mii_status = mii_read (dev, i, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phy_addr = i;
+ phy_found++;
+ }
+ }
+ if (!phy_found) {
+ printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int
+parse_eeprom (struct net_device *dev)
+{
+ int i, j;
+ long ioaddr = dev->base_addr;
+ u8 sromdata[256];
+ u8 *psib;
+ u32 crc;
+ PSROM_t psrom = (PSROM_t) sromdata;
+ struct netdev_private *np = netdev_priv(dev);
+
+ int cid, next;
+
+#ifdef MEM_MAPPING
+ ioaddr = pci_resource_start (np->pdev, 0);
+#endif
+ /* Read eeprom */
+ for (i = 0; i < 128; i++) {
+ ((u16 *) sromdata)[i] = le16_to_cpu (read_eeprom (ioaddr, i));
+ }
+#ifdef MEM_MAPPING
+ ioaddr = dev->base_addr;
+#endif
+ /* Check CRC */
+ crc = ~ether_crc_le (256 - 4, sromdata);
+ if (psrom->crc != crc) {
+ printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name);
+ return -1;
+ }
+
+ /* Set MAC address */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = psrom->mac_addr[i];
+
+ /* Parse Software Infomation Block */
+ i = 0x30;
+ psib = (u8 *) sromdata;
+ do {
+ cid = psib[i++];
+ next = psib[i++];
+ if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
+ printk (KERN_ERR "Cell data error\n");
+ return -1;
+ }
+ switch (cid) {
+ case 0: /* Format version */
+ break;
+ case 1: /* End of cell */
+ return 0;
+ case 2: /* Duplex Polarity */
+ np->duplex_polarity = psib[i];
+ writeb (readb (ioaddr + PhyCtrl) | psib[i],
+ ioaddr + PhyCtrl);
+ break;
+ case 3: /* Wake Polarity */
+ np->wake_polarity = psib[i];
+ break;
+ case 9: /* Adapter description */
+ j = (next - i > 255) ? 255 : next - i;
+ memcpy (np->name, &(psib[i]), j);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8: /* Reversed */
+ break;
+ default: /* Unknown cell */
+ return -1;
+ }
+ i = next;
+ } while (1);
+
+ return 0;
+}
+
+static int
+rio_open (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+ u16 macctrl;
+
+ i = request_irq (dev->irq, &rio_interrupt, SA_SHIRQ, dev->name, dev);
+ if (i)
+ return i;
+
+ /* Reset all logic functions */
+ writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
+ ioaddr + ASICCtrl + 2);
+ mdelay(10);
+
+ /* DebugCtrl bit 4, 5, 9 must set */
+ writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
+
+ /* Jumbo frame */
+ if (np->jumbo != 0)
+ writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
+
+ alloc_list (dev);
+
+ /* Get station address */
+ for (i = 0; i < 6; i++)
+ writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
+
+ set_multicast (dev);
+ if (np->coalesce) {
+ writel (np->rx_coalesce | np->rx_timeout << 16,
+ ioaddr + RxDMAIntCtrl);
+ }
+ /* Set RIO to poll every N*320nsec. */
+ writeb (0x20, ioaddr + RxDMAPollPeriod);
+ writeb (0xff, ioaddr + TxDMAPollPeriod);
+ writeb (0x30, ioaddr + RxDMABurstThresh);
+ writeb (0x30, ioaddr + RxDMAUrgentThresh);
+ writel (0x0007ffff, ioaddr + RmonStatMask);
+ /* clear statistics */
+ clear_stats (dev);
+
+ /* VLAN supported */
+ if (np->vlan) {
+ /* priority field in RxDMAIntCtrl */
+ writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
+ ioaddr + RxDMAIntCtrl);
+ /* VLANId */
+ writew (np->vlan, ioaddr + VLANId);
+ /* Length/Type should be 0x8100 */
+ writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
+ /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
+ VLAN information tagged by TFC' VID, CFI fields. */
+ writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
+ ioaddr + MACCtrl);
+ }
+
+ init_timer (&np->timer);
+ np->timer.expires = jiffies + 1*HZ;
+ np->timer.data = (unsigned long) dev;
+ np->timer.function = &rio_timer;
+ add_timer (&np->timer);
+
+ /* Start Tx/Rx */
+ writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
+ ioaddr + MACCtrl);
+
+ macctrl = 0;
+ macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
+ macctrl |= (np->full_duplex) ? DuplexSelect : 0;
+ macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
+ macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
+ writew(macctrl, ioaddr + MACCtrl);
+
+ netif_start_queue (dev);
+
+ /* Enable default interrupts */
+ EnableInt ();
+ return 0;
+}
+
+static void
+rio_timer (unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int entry;
+ int next_tick = 1*HZ;
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->rx_lock, flags);
+ /* Recover rx ring exhausted error */
+ if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
+ printk(KERN_INFO "Try to recover rx ring exhausted...\n");
+ /* Re-allocate skbuffs to fill the descriptor ring */
+ for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
+ struct sk_buff *skb;
+ entry = np->old_rx % RX_RING_SIZE;
+ /* Dropped packets don't need to re-allocate */
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb (np->rx_buf_sz);
+ if (skb == NULL) {
+ np->rx_ring[entry].fraginfo = 0;
+ printk (KERN_INFO
+ "%s: Still unable to re-allocate Rx skbuff.#%d\n",
+ dev->name, entry);
+ break;
+ }
+ np->rx_skbuff[entry] = skb;
+ skb->dev = dev;
+ /* 16 byte align the IP header */
+ skb_reserve (skb, 2);
+ np->rx_ring[entry].fraginfo =
+ cpu_to_le64 (pci_map_single
+ (np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ }
+ np->rx_ring[entry].fraginfo |=
+ cpu_to_le64 (np->rx_buf_sz) << 48;
+ np->rx_ring[entry].status = 0;
+ } /* end for */
+ } /* end if */
+ spin_unlock_irqrestore (&np->rx_lock, flags);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void
+rio_tx_timeout (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
+ dev->name, readl (ioaddr + TxStatus));
+ rio_free_tx(dev, 0);
+ dev->if_port = 0;
+ dev->trans_start = jiffies;
+}
+
+ /* allocate and initialize Tx and Rx descriptors */
+static void
+alloc_list (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = 0;
+ np->old_rx = np->old_tx = 0;
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
+
+ /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = cpu_to_le64 (TFDDone);
+ np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
+ ((i+1)%TX_RING_SIZE) *
+ sizeof (struct netdev_desc));
+ }
+
+ /* Initialize Rx descriptors */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
+ ((i + 1) % RX_RING_SIZE) *
+ sizeof (struct netdev_desc));
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+
+ /* Allocate the rx buffers */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* Allocated fixed size of skbuff */
+ struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL) {
+ printk (KERN_ERR
+ "%s: alloc_list: allocate Rx buffer error! ",
+ dev->name);
+ break;
+ }
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve (skb, 2); /* 16 byte align the IP header. */
+ /* Rubicon now supports 40 bits of addressing space. */
+ np->rx_ring[i].fraginfo =
+ cpu_to_le64 ( pci_map_single (
+ np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
+ }
+
+ /* Set RFDListPtr */
+ writel (cpu_to_le32 (np->rx_ring_dma), dev->base_addr + RFDListPtr0);
+ writel (0, dev->base_addr + RFDListPtr1);
+
+ return;
+}
+
+static int
+start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct netdev_desc *txdesc;
+ unsigned entry;
+ u32 ioaddr;
+ u64 tfc_vlan_tag = 0;
+
+ if (np->link_status == 0) { /* Link Down */
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ ioaddr = dev->base_addr;
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+#if 0
+ if (skb->ip_summed == CHECKSUM_HW) {
+ txdesc->status |=
+ cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
+ IPChecksumEnable);
+ }
+#endif
+ if (np->vlan) {
+ tfc_vlan_tag =
+ cpu_to_le64 (VLANTagInsert) |
+ (cpu_to_le64 (np->vlan) << 32) |
+ (cpu_to_le64 (skb->priority) << 45);
+ }
+ txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
+ skb->len,
+ PCI_DMA_TODEVICE));
+ txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
+
+ /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
+ * Work around: Always use 1 descriptor in 10Mbps mode */
+ if (entry % np->tx_coalesce == 0 || np->speed == 10)
+ txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
+ WordAlignDisable |
+ TxDMAIndicate |
+ (1 << FragCountShift));
+ else
+ txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
+ WordAlignDisable |
+ (1 << FragCountShift));
+
+ /* TxDMAPollNow */
+ writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
+ /* Schedule ISR */
+ writel(10000, ioaddr + CountDown);
+ np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
+ if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
+ < TX_QUEUE_LEN - 1 && np->speed != 10) {
+ /* do nothing */
+ } else if (!netif_queue_stopped(dev)) {
+ netif_stop_queue (dev);
+ }
+
+ /* The first TFDListPtr */
+ if (readl (dev->base_addr + TFDListPtr0) == 0) {
+ writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
+ dev->base_addr + TFDListPtr0);
+ writel (0, dev->base_addr + TFDListPtr1);
+ }
+
+ /* NETDEV WATCHDOG timer */
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+static irqreturn_t
+rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np;
+ unsigned int_status;
+ long ioaddr;
+ int cnt = max_intrloop;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ np = netdev_priv(dev);
+ while (1) {
+ int_status = readw (ioaddr + IntStatus);
+ writew (int_status, ioaddr + IntStatus);
+ int_status &= DEFAULT_INTR;
+ if (int_status == 0 || --cnt < 0)
+ break;
+ handled = 1;
+ /* Processing received packets */
+ if (int_status & RxDMAComplete)
+ receive_packet (dev);
+ /* TxDMAComplete interrupt */
+ if ((int_status & (TxDMAComplete|IntRequested))) {
+ int tx_status;
+ tx_status = readl (ioaddr + TxStatus);
+ if (tx_status & 0x01)
+ tx_error (dev, tx_status);
+ /* Free used tx skbuffs */
+ rio_free_tx (dev, 1);
+ }
+
+ /* Handle uncommon events */
+ if (int_status &
+ (HostError | LinkEvent | UpdateStats))
+ rio_error (dev, int_status);
+ }
+ if (np->cur_tx != np->old_tx)
+ writel (100, ioaddr + CountDown);
+ return IRQ_RETVAL(handled);
+}
+
+static void
+rio_free_tx (struct net_device *dev, int irq)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->old_tx % TX_RING_SIZE;
+ int tx_use = 0;
+ unsigned long flag = 0;
+
+ if (irq)
+ spin_lock(&np->tx_lock);
+ else
+ spin_lock_irqsave(&np->tx_lock, flag);
+
+ /* Free used tx skbuffs */
+ while (entry != np->cur_tx) {
+ struct sk_buff *skb;
+
+ if (!(np->tx_ring[entry].status & TFDDone))
+ break;
+ skb = np->tx_skbuff[entry];
+ pci_unmap_single (np->pdev,
+ np->tx_ring[entry].fraginfo,
+ skb->len, PCI_DMA_TODEVICE);
+ if (irq)
+ dev_kfree_skb_irq (skb);
+ else
+ dev_kfree_skb (skb);
+
+ np->tx_skbuff[entry] = NULL;
+ entry = (entry + 1) % TX_RING_SIZE;
+ tx_use++;
+ }
+ if (irq)
+ spin_unlock(&np->tx_lock);
+ else
+ spin_unlock_irqrestore(&np->tx_lock, flag);
+ np->old_tx = entry;
+
+ /* If the ring is no longer full, clear tx_full and
+ call netif_wake_queue() */
+
+ if (netif_queue_stopped(dev) &&
+ ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
+ < TX_QUEUE_LEN - 1 || np->speed == 10)) {
+ netif_wake_queue (dev);
+ }
+}
+
+static void
+tx_error (struct net_device *dev, int tx_status)
+{
+ struct netdev_private *np;
+ long ioaddr = dev->base_addr;
+ int frame_id;
+ int i;
+
+ np = netdev_priv(dev);
+
+ frame_id = (tx_status & 0xffff0000);
+ printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
+ dev->name, tx_status, frame_id);
+ np->stats.tx_errors++;
+ /* Ttransmit Underrun */
+ if (tx_status & 0x10) {
+ np->stats.tx_fifo_errors++;
+ writew (readw (ioaddr + TxStartThresh) + 0x10,
+ ioaddr + TxStartThresh);
+ /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
+ writew (TxReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr + ASICCtrl + 2);
+ /* Wait for ResetBusy bit clear */
+ for (i = 50; i > 0; i--) {
+ if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+ break;
+ mdelay (1);
+ }
+ rio_free_tx (dev, 1);
+ /* Reset TFDListPtr */
+ writel (np->tx_ring_dma +
+ np->old_tx * sizeof (struct netdev_desc),
+ dev->base_addr + TFDListPtr0);
+ writel (0, dev->base_addr + TFDListPtr1);
+
+ /* Let TxStartThresh stay default value */
+ }
+ /* Late Collision */
+ if (tx_status & 0x04) {
+ np->stats.tx_fifo_errors++;
+ /* TxReset and clear FIFO */
+ writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
+ /* Wait reset done */
+ for (i = 50; i > 0; i--) {
+ if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+ break;
+ mdelay (1);
+ }
+ /* Let TxStartThresh stay default value */
+ }
+ /* Maximum Collisions */
+#ifdef ETHER_STATS
+ if (tx_status & 0x08)
+ np->stats.collisions16++;
+#else
+ if (tx_status & 0x08)
+ np->stats.collisions++;
+#endif
+ /* Restart the Tx */
+ writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
+}
+
+static int
+receive_packet (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int cnt = 30;
+
+ /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
+ while (1) {
+ struct netdev_desc *desc = &np->rx_ring[entry];
+ int pkt_len;
+ u64 frame_status;
+
+ if (!(desc->status & RFDDone) ||
+ !(desc->status & FrameStart) || !(desc->status & FrameEnd))
+ break;
+
+ /* Chip omits the CRC. */
+ pkt_len = le64_to_cpu (desc->status & 0xffff);
+ frame_status = le64_to_cpu (desc->status);
+ if (--cnt < 0)
+ break;
+ /* Update rx error statistics, drop packet. */
+ if (frame_status & RFS_Errors) {
+ np->stats.rx_errors++;
+ if (frame_status & (RxRuntFrame | RxLengthError))
+ np->stats.rx_length_errors++;
+ if (frame_status & RxFCSError)
+ np->stats.rx_crc_errors++;
+ if (frame_status & RxAlignmentError && np->speed != 1000)
+ np->stats.rx_frame_errors++;
+ if (frame_status & RxFIFOOverrun)
+ np->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+
+ /* Small skbuffs for short packets */
+ if (pkt_len > copy_thresh) {
+ pci_unmap_single (np->pdev, desc->fraginfo,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_put (skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
+ pci_dma_sync_single_for_cpu(np->pdev,
+ desc->fraginfo,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb->dev = dev;
+ /* 16 byte align the IP header */
+ skb_reserve (skb, 2);
+ eth_copy_and_sum (skb,
+ np->rx_skbuff[entry]->tail,
+ pkt_len, 0);
+ skb_put (skb, pkt_len);
+ pci_dma_sync_single_for_device(np->pdev,
+ desc->fraginfo,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ }
+ skb->protocol = eth_type_trans (skb, dev);
+#if 0
+ /* Checksum done by hw, but csum value unavailable. */
+ if (np->pci_rev_id >= 0x0c &&
+ !(frame_status & (TCPError | UDPError | IPError))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+#endif
+ netif_rx (skb);
+ dev->last_rx = jiffies;
+ }
+ entry = (entry + 1) % RX_RING_SIZE;
+ }
+ spin_lock(&np->rx_lock);
+ np->cur_rx = entry;
+ /* Re-allocate skbuffs to fill the descriptor ring */
+ entry = np->old_rx;
+ while (entry != np->cur_rx) {
+ struct sk_buff *skb;
+ /* Dropped packets don't need to re-allocate */
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb (np->rx_buf_sz);
+ if (skb == NULL) {
+ np->rx_ring[entry].fraginfo = 0;
+ printk (KERN_INFO
+ "%s: receive_packet: "
+ "Unable to re-allocate Rx skbuff.#%d\n",
+ dev->name, entry);
+ break;
+ }
+ np->rx_skbuff[entry] = skb;
+ skb->dev = dev;
+ /* 16 byte align the IP header */
+ skb_reserve (skb, 2);
+ np->rx_ring[entry].fraginfo =
+ cpu_to_le64 (pci_map_single
+ (np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ }
+ np->rx_ring[entry].fraginfo |=
+ cpu_to_le64 (np->rx_buf_sz) << 48;
+ np->rx_ring[entry].status = 0;
+ entry = (entry + 1) % RX_RING_SIZE;
+ }
+ np->old_rx = entry;
+ spin_unlock(&np->rx_lock);
+ return 0;
+}
+
+static void
+rio_error (struct net_device *dev, int int_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ u16 macctrl;
+
+ /* Link change event */
+ if (int_status & LinkEvent) {
+ if (mii_wait_link (dev, 10) == 0) {
+ printk (KERN_INFO "%s: Link up\n", dev->name);
+ if (np->phy_media)
+ mii_get_media_pcs (dev);
+ else
+ mii_get_media (dev);
+ if (np->speed == 1000)
+ np->tx_coalesce = tx_coalesce;
+ else
+ np->tx_coalesce = 1;
+ macctrl = 0;
+ macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
+ macctrl |= (np->full_duplex) ? DuplexSelect : 0;
+ macctrl |= (np->tx_flow) ?
+ TxFlowControlEnable : 0;
+ macctrl |= (np->rx_flow) ?
+ RxFlowControlEnable : 0;
+ writew(macctrl, ioaddr + MACCtrl);
+ np->link_status = 1;
+ netif_carrier_on(dev);
+ } else {
+ printk (KERN_INFO "%s: Link off\n", dev->name);
+ np->link_status = 0;
+ netif_carrier_off(dev);
+ }
+ }
+
+ /* UpdateStats statistics registers */
+ if (int_status & UpdateStats) {
+ get_stats (dev);
+ }
+
+ /* PCI Error, a catastronphic error related to the bus interface
+ occurs, set GlobalReset and HostReset to reset. */
+ if (int_status & HostError) {
+ printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
+ dev->name, int_status);
+ writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
+ mdelay (500);
+ }
+}
+
+static struct net_device_stats *
+get_stats (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+#ifdef MEM_MAPPING
+ int i;
+#endif
+ unsigned int stat_reg;
+
+ /* All statistics registers need to be acknowledged,
+ else statistic overflow could cause problems */
+
+ np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
+ np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
+ np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
+ np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
+
+ np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
+ np->stats.collisions += readl (ioaddr + SingleColFrames)
+ + readl (ioaddr + MultiColFrames);
+
+ /* detailed tx errors */
+ stat_reg = readw (ioaddr + FramesAbortXSColls);
+ np->stats.tx_aborted_errors += stat_reg;
+ np->stats.tx_errors += stat_reg;
+
+ stat_reg = readw (ioaddr + CarrierSenseErrors);
+ np->stats.tx_carrier_errors += stat_reg;
+ np->stats.tx_errors += stat_reg;
+
+ /* Clear all other statistic register. */
+ readl (ioaddr + McstOctetXmtOk);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readw (ioaddr + BcstFramesRcvdOk);
+ readw (ioaddr + MacControlFramesRcvd);
+ readw (ioaddr + FrameTooLongErrors);
+ readw (ioaddr + InRangeLengthErrors);
+ readw (ioaddr + FramesCheckSeqErrors);
+ readw (ioaddr + FramesLostRxErrors);
+ readl (ioaddr + McstOctetXmtOk);
+ readl (ioaddr + BcstOctetXmtOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readl (ioaddr + FramesWDeferredXmt);
+ readl (ioaddr + LateCollisions);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readw (ioaddr + MacControlFramesXmtd);
+ readw (ioaddr + FramesWEXDeferal);
+
+#ifdef MEM_MAPPING
+ for (i = 0x100; i <= 0x150; i += 4)
+ readl (ioaddr + i);
+#endif
+ readw (ioaddr + TxJumboFrames);
+ readw (ioaddr + RxJumboFrames);
+ readw (ioaddr + TCPCheckSumErrors);
+ readw (ioaddr + UDPCheckSumErrors);
+ readw (ioaddr + IPCheckSumErrors);
+ return &np->stats;
+}
+
+static int
+clear_stats (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+#ifdef MEM_MAPPING
+ int i;
+#endif
+
+ /* All statistics registers need to be acknowledged,
+ else statistic overflow could cause problems */
+ readl (ioaddr + FramesRcvOk);
+ readl (ioaddr + FramesXmtOk);
+ readl (ioaddr + OctetRcvOk);
+ readl (ioaddr + OctetXmtOk);
+
+ readl (ioaddr + McstFramesRcvdOk);
+ readl (ioaddr + SingleColFrames);
+ readl (ioaddr + MultiColFrames);
+ readl (ioaddr + LateCollisions);
+ /* detailed rx errors */
+ readw (ioaddr + FrameTooLongErrors);
+ readw (ioaddr + InRangeLengthErrors);
+ readw (ioaddr + FramesCheckSeqErrors);
+ readw (ioaddr + FramesLostRxErrors);
+
+ /* detailed tx errors */
+ readw (ioaddr + FramesAbortXSColls);
+ readw (ioaddr + CarrierSenseErrors);
+
+ /* Clear all other statistic register. */
+ readl (ioaddr + McstOctetXmtOk);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readw (ioaddr + BcstFramesRcvdOk);
+ readw (ioaddr + MacControlFramesRcvd);
+ readl (ioaddr + McstOctetXmtOk);
+ readl (ioaddr + BcstOctetXmtOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readl (ioaddr + FramesWDeferredXmt);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readw (ioaddr + MacControlFramesXmtd);
+ readw (ioaddr + FramesWEXDeferal);
+#ifdef MEM_MAPPING
+ for (i = 0x100; i <= 0x150; i += 4)
+ readl (ioaddr + i);
+#endif
+ readw (ioaddr + TxJumboFrames);
+ readw (ioaddr + RxJumboFrames);
+ readw (ioaddr + TCPCheckSumErrors);
+ readw (ioaddr + UDPCheckSumErrors);
+ readw (ioaddr + IPCheckSumErrors);
+ return 0;
+}
+
+
+int
+change_mtu (struct net_device *dev, int new_mtu)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int max = (np->jumbo) ? MAX_JUMBO : 1536;
+
+ if ((new_mtu < 68) || (new_mtu > max)) {
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static void
+set_multicast (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ u32 hash_table[2];
+ u16 rx_mode = 0;
+ struct netdev_private *np = netdev_priv(dev);
+
+ hash_table[0] = hash_table[1] = 0;
+ /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
+ hash_table[1] |= cpu_to_le32(0x02000000);
+ if (dev->flags & IFF_PROMISC) {
+ /* Receive all frames promiscuously. */
+ rx_mode = ReceiveAllFrames;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > multicast_filter_limit)) {
+ /* Receive broadcast and multicast frames */
+ rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
+ } else if (dev->mc_count > 0) {
+ int i;
+ struct dev_mc_list *mclist;
+ /* Receive broadcast frames and multicast frames filtering
+ by Hashtable */
+ rx_mode =
+ ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
+ for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist=mclist->next)
+ {
+ int bit, index = 0;
+ int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ /* The inverted high significant 6 bits of CRC are
+ used as an index to hashtable */
+ for (bit = 0; bit < 6; bit++)
+ if (crc & (1 << (31 - bit)))
+ index |= (1 << bit);
+ hash_table[index / 32] |= (1 << (index % 32));
+ }
+ } else {
+ rx_mode = ReceiveBroadcast | ReceiveUnicast;
+ }
+ if (np->vlan) {
+ /* ReceiveVLANMatch field in ReceiveMode */
+ rx_mode |= ReceiveVLANMatch;
+ }
+
+ writel (hash_table[0], ioaddr + HashTable0);
+ writel (hash_table[1], ioaddr + HashTable1);
+ writew (rx_mode, ioaddr + ReceiveMode);
+}
+
+static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strcpy(info->driver, "dl2k");
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pdev));
+}
+
+static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ if (np->phy_media) {
+ /* fiber device */
+ cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+ } else {
+ /* copper device */
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_MII;
+ cmd->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
+ ADVERTISED_Autoneg | ADVERTISED_MII;
+ cmd->port = PORT_MII;
+ cmd->transceiver = XCVR_INTERNAL;
+ }
+ if ( np->link_status ) {
+ cmd->speed = np->speed;
+ cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ if ( np->an_enable)
+ cmd->autoneg = AUTONEG_ENABLE;
+ else
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ cmd->phy_address = np->phy_addr;
+ return 0;
+}
+
+static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ netif_carrier_off(dev);
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (np->an_enable)
+ return 0;
+ else {
+ np->an_enable = 1;
+ mii_set_media(dev);
+ return 0;
+ }
+ } else {
+ np->an_enable = 0;
+ if (np->speed == 1000) {
+ cmd->speed = SPEED_100;
+ cmd->duplex = DUPLEX_FULL;
+ printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
+ }
+ switch(cmd->speed + cmd->duplex) {
+
+ case SPEED_10 + DUPLEX_HALF:
+ np->speed = 10;
+ np->full_duplex = 0;
+ break;
+
+ case SPEED_10 + DUPLEX_FULL:
+ np->speed = 10;
+ np->full_duplex = 1;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ np->speed = 100;
+ np->full_duplex = 0;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ np->speed = 100;
+ np->full_duplex = 1;
+ break;
+ case SPEED_1000 + DUPLEX_HALF:/* not supported */
+ case SPEED_1000 + DUPLEX_FULL:/* not supported */
+ default:
+ return -EINVAL;
+ }
+ mii_set_media(dev);
+ }
+ return 0;
+}
+
+static u32 rio_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->link_status;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = rio_get_drvinfo,
+ .get_settings = rio_get_settings,
+ .set_settings = rio_set_settings,
+ .get_link = rio_get_link,
+};
+
+static int
+rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ int phy_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
+
+ struct netdev_desc *desc;
+ int i;
+
+ phy_addr = np->phy_addr;
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ break;
+
+ case SIOCDEVPRIVATE + 1:
+ miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
+ break;
+ case SIOCDEVPRIVATE + 2:
+ mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
+ break;
+ case SIOCDEVPRIVATE + 3:
+ break;
+ case SIOCDEVPRIVATE + 4:
+ break;
+ case SIOCDEVPRIVATE + 5:
+ netif_stop_queue (dev);
+ break;
+ case SIOCDEVPRIVATE + 6:
+ netif_wake_queue (dev);
+ break;
+ case SIOCDEVPRIVATE + 7:
+ printk
+ ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
+ netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
+ np->old_rx);
+ break;
+ case SIOCDEVPRIVATE + 8:
+ printk("TX ring:\n");
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ desc = &np->tx_ring[i];
+ printk
+ ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+ i,
+ (u32) (np->tx_ring_dma + i * sizeof (*desc)),
+ (u32) desc->next_desc,
+ (u32) desc->status, (u32) (desc->fraginfo >> 32),
+ (u32) desc->fraginfo);
+ printk ("\n");
+ }
+ printk ("\n");
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+#define EEP_READ 0x0200
+#define EEP_BUSY 0x8000
+/* Read the EEPROM word */
+/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
+int
+read_eeprom (long ioaddr, int eep_addr)
+{
+ int i = 1000;
+ outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
+ while (i-- > 0) {
+ if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
+ return inw (ioaddr + EepromData);
+ }
+ }
+ return 0;
+}
+
+enum phy_ctrl_bits {
+ MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
+ MII_DUPLEX = 0x08,
+};
+
+#define mii_delay() readb(ioaddr)
+static void
+mii_sendbit (struct net_device *dev, u32 data)
+{
+ long ioaddr = dev->base_addr + PhyCtrl;
+ data = (data) ? MII_DATA1 : 0;
+ data |= MII_WRITE;
+ data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
+ writeb (data, ioaddr);
+ mii_delay ();
+ writeb (data | MII_CLK, ioaddr);
+ mii_delay ();
+}
+
+static int
+mii_getbit (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr + PhyCtrl;
+ u8 data;
+
+ data = (readb (ioaddr) & 0xf8) | MII_READ;
+ writeb (data, ioaddr);
+ mii_delay ();
+ writeb (data | MII_CLK, ioaddr);
+ mii_delay ();
+ return ((readb (ioaddr) >> 1) & 1);
+}
+
+static void
+mii_send_bits (struct net_device *dev, u32 data, int len)
+{
+ int i;
+ for (i = len - 1; i >= 0; i--) {
+ mii_sendbit (dev, data & (1 << i));
+ }
+}
+
+static int
+mii_read (struct net_device *dev, int phy_addr, int reg_num)
+{
+ u32 cmd;
+ int i;
+ u32 retval = 0;
+
+ /* Preamble */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP = 0110'b for read operation */
+ cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
+ mii_send_bits (dev, cmd, 14);
+ /* Turnaround */
+ if (mii_getbit (dev))
+ goto err_out;
+ /* Read data */
+ for (i = 0; i < 16; i++) {
+ retval |= mii_getbit (dev);
+ retval <<= 1;
+ }
+ /* End cycle */
+ mii_getbit (dev);
+ return (retval >> 1) & 0xffff;
+
+ err_out:
+ return 0;
+}
+static int
+mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
+{
+ u32 cmd;
+
+ /* Preamble */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
+ cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
+ mii_send_bits (dev, cmd, 32);
+ /* End cycle */
+ mii_getbit (dev);
+ return 0;
+}
+static int
+mii_wait_link (struct net_device *dev, int wait)
+{
+ BMSR_t bmsr;
+ int phy_addr;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ do {
+ bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
+ if (bmsr.bits.link_status)
+ return 0;
+ mdelay (1);
+ } while (--wait > 0);
+ return -1;
+}
+static int
+mii_get_media (struct net_device *dev)
+{
+ ANAR_t negotiate;
+ BMSR_t bmsr;
+ BMCR_t bmcr;
+ MSCR_t mscr;
+ MSSR_t mssr;
+ int phy_addr;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
+ if (np->an_enable) {
+ if (!bmsr.bits.an_complete) {
+ /* Auto-Negotiation not completed */
+ return -1;
+ }
+ negotiate.image = mii_read (dev, phy_addr, MII_ANAR) &
+ mii_read (dev, phy_addr, MII_ANLPAR);
+ mscr.image = mii_read (dev, phy_addr, MII_MSCR);
+ mssr.image = mii_read (dev, phy_addr, MII_MSSR);
+ if (mscr.bits.media_1000BT_FD & mssr.bits.lp_1000BT_FD) {
+ np->speed = 1000;
+ np->full_duplex = 1;
+ printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
+ } else if (mscr.bits.media_1000BT_HD & mssr.bits.lp_1000BT_HD) {
+ np->speed = 1000;
+ np->full_duplex = 0;
+ printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
+ } else if (negotiate.bits.media_100BX_FD) {
+ np->speed = 100;
+ np->full_duplex = 1;
+ printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
+ } else if (negotiate.bits.media_100BX_HD) {
+ np->speed = 100;
+ np->full_duplex = 0;
+ printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
+ } else if (negotiate.bits.media_10BT_FD) {
+ np->speed = 10;
+ np->full_duplex = 1;
+ printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
+ } else if (negotiate.bits.media_10BT_HD) {
+ np->speed = 10;
+ np->full_duplex = 0;
+ printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
+ }
+ if (negotiate.bits.pause) {
+ np->tx_flow &= 1;
+ np->rx_flow &= 1;
+ } else if (negotiate.bits.asymmetric) {
+ np->tx_flow = 0;
+ np->rx_flow &= 1;
+ }
+ /* else tx_flow, rx_flow = user select */
+ } else {
+ bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
+ if (bmcr.bits.speed100 == 1 && bmcr.bits.speed1000 == 0) {
+ printk (KERN_INFO "Operating at 100 Mbps, ");
+ } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 0) {
+ printk (KERN_INFO "Operating at 10 Mbps, ");
+ } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 1) {
+ printk (KERN_INFO "Operating at 1000 Mbps, ");
+ }
+ if (bmcr.bits.duplex_mode) {
+ printk ("Full duplex\n");
+ } else {
+ printk ("Half duplex\n");
+ }
+ }
+ if (np->tx_flow)
+ printk(KERN_INFO "Enable Tx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Tx Flow Control\n");
+ if (np->rx_flow)
+ printk(KERN_INFO "Enable Rx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Rx Flow Control\n");
+
+ return 0;
+}
+
+static int
+mii_set_media (struct net_device *dev)
+{
+ PHY_SCR_t pscr;
+ BMCR_t bmcr;
+ BMSR_t bmsr;
+ ANAR_t anar;
+ int phy_addr;
+ struct netdev_private *np;
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ /* Does user set speed? */
+ if (np->an_enable) {
+ /* Advertise capabilities */
+ bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
+ anar.image = mii_read (dev, phy_addr, MII_ANAR);
+ anar.bits.media_100BX_FD = bmsr.bits.media_100BX_FD;
+ anar.bits.media_100BX_HD = bmsr.bits.media_100BX_HD;
+ anar.bits.media_100BT4 = bmsr.bits.media_100BT4;
+ anar.bits.media_10BT_FD = bmsr.bits.media_10BT_FD;
+ anar.bits.media_10BT_HD = bmsr.bits.media_10BT_HD;
+ anar.bits.pause = 1;
+ anar.bits.asymmetric = 1;
+ mii_write (dev, phy_addr, MII_ANAR, anar.image);
+
+ /* Enable Auto crossover */
+ pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
+ pscr.bits.mdi_crossover_mode = 3; /* 11'b */
+ mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
+
+ /* Soft reset PHY */
+ mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
+ bmcr.image = 0;
+ bmcr.bits.an_enable = 1;
+ bmcr.bits.restart_an = 1;
+ bmcr.bits.reset = 1;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
+ mdelay(1);
+ } else {
+ /* Force speed setting */
+ /* 1) Disable Auto crossover */
+ pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
+ pscr.bits.mdi_crossover_mode = 0;
+ mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
+
+ /* 2) PHY Reset */
+ bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
+ bmcr.bits.reset = 1;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
+
+ /* 3) Power Down */
+ bmcr.image = 0x1940; /* must be 0x1940 */
+ mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
+ mdelay (100); /* wait a certain time */
+
+ /* 4) Advertise nothing */
+ mii_write (dev, phy_addr, MII_ANAR, 0);
+
+ /* 5) Set media and Power Up */
+ bmcr.image = 0;
+ bmcr.bits.power_down = 1;
+ if (np->speed == 100) {
+ bmcr.bits.speed100 = 1;
+ bmcr.bits.speed1000 = 0;
+ printk (KERN_INFO "Manual 100 Mbps, ");
+ } else if (np->speed == 10) {
+ bmcr.bits.speed100 = 0;
+ bmcr.bits.speed1000 = 0;
+ printk (KERN_INFO "Manual 10 Mbps, ");
+ }
+ if (np->full_duplex) {
+ bmcr.bits.duplex_mode = 1;
+ printk ("Full duplex\n");
+ } else {
+ bmcr.bits.duplex_mode = 0;
+ printk ("Half duplex\n");
+ }
+#if 0
+ /* Set 1000BaseT Master/Slave setting */
+ mscr.image = mii_read (dev, phy_addr, MII_MSCR);
+ mscr.bits.cfg_enable = 1;
+ mscr.bits.cfg_value = 0;
+#endif
+ mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
+ mdelay(10);
+ }
+ return 0;
+}
+
+static int
+mii_get_media_pcs (struct net_device *dev)
+{
+ ANAR_PCS_t negotiate;
+ BMSR_t bmsr;
+ BMCR_t bmcr;
+ int phy_addr;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ bmsr.image = mii_read (dev, phy_addr, PCS_BMSR);
+ if (np->an_enable) {
+ if (!bmsr.bits.an_complete) {
+ /* Auto-Negotiation not completed */
+ return -1;
+ }
+ negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) &
+ mii_read (dev, phy_addr, PCS_ANLPAR);
+ np->speed = 1000;
+ if (negotiate.bits.full_duplex) {
+ printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
+ np->full_duplex = 1;
+ } else {
+ printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
+ np->full_duplex = 0;
+ }
+ if (negotiate.bits.pause) {
+ np->tx_flow &= 1;
+ np->rx_flow &= 1;
+ } else if (negotiate.bits.asymmetric) {
+ np->tx_flow = 0;
+ np->rx_flow &= 1;
+ }
+ /* else tx_flow, rx_flow = user select */
+ } else {
+ bmcr.image = mii_read (dev, phy_addr, PCS_BMCR);
+ printk (KERN_INFO "Operating at 1000 Mbps, ");
+ if (bmcr.bits.duplex_mode) {
+ printk ("Full duplex\n");
+ } else {
+ printk ("Half duplex\n");
+ }
+ }
+ if (np->tx_flow)
+ printk(KERN_INFO "Enable Tx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Tx Flow Control\n");
+ if (np->rx_flow)
+ printk(KERN_INFO "Enable Rx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Rx Flow Control\n");
+
+ return 0;
+}
+
+static int
+mii_set_media_pcs (struct net_device *dev)
+{
+ BMCR_t bmcr;
+ ESR_t esr;
+ ANAR_PCS_t anar;
+ int phy_addr;
+ struct netdev_private *np;
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ /* Auto-Negotiation? */
+ if (np->an_enable) {
+ /* Advertise capabilities */
+ esr.image = mii_read (dev, phy_addr, PCS_ESR);
+ anar.image = mii_read (dev, phy_addr, MII_ANAR);
+ anar.bits.half_duplex =
+ esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD;
+ anar.bits.full_duplex =
+ esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD;
+ anar.bits.pause = 1;
+ anar.bits.asymmetric = 1;
+ mii_write (dev, phy_addr, MII_ANAR, anar.image);
+
+ /* Soft reset PHY */
+ mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
+ bmcr.image = 0;
+ bmcr.bits.an_enable = 1;
+ bmcr.bits.restart_an = 1;
+ bmcr.bits.reset = 1;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
+ mdelay(1);
+ } else {
+ /* Force speed setting */
+ /* PHY Reset */
+ bmcr.image = 0;
+ bmcr.bits.reset = 1;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
+ mdelay(10);
+ bmcr.image = 0;
+ bmcr.bits.an_enable = 0;
+ if (np->full_duplex) {
+ bmcr.bits.duplex_mode = 1;
+ printk (KERN_INFO "Manual full duplex\n");
+ } else {
+ bmcr.bits.duplex_mode = 0;
+ printk (KERN_INFO "Manual half duplex\n");
+ }
+ mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
+ mdelay(10);
+
+ /* Advertise nothing */
+ mii_write (dev, phy_addr, MII_ANAR, 0);
+ }
+ return 0;
+}
+
+
+static int
+rio_close (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue (dev);
+
+ /* Disable interrupts */
+ writew (0, ioaddr + IntEnable);
+
+ /* Stop Tx and Rx logics */
+ writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
+ synchronize_irq (dev->irq);
+ free_irq (dev->irq, dev);
+ del_timer_sync (&np->timer);
+
+ /* Free all the skbuffs in the queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
+ skb = np->rx_skbuff[i];
+ if (skb) {
+ pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
+ skb->len, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb (skb);
+ np->rx_skbuff[i] = NULL;
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb (skb);
+ np->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void __devexit
+rio_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ unregister_netdev (dev);
+ pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+#ifdef MEM_MAPPING
+ iounmap ((char *) (dev->base_addr));
+#endif
+ free_netdev (dev);
+ pci_release_regions (pdev);
+ pci_disable_device (pdev);
+ }
+ pci_set_drvdata (pdev, NULL);
+}
+
+static struct pci_driver rio_driver = {
+ .name = "dl2k",
+ .id_table = rio_pci_tbl,
+ .probe = rio_probe1,
+ .remove = __devexit_p(rio_remove1),
+};
+
+static int __init
+rio_init (void)
+{
+ return pci_module_init (&rio_driver);
+}
+
+static void __exit
+rio_exit (void)
+{
+ pci_unregister_driver (&rio_driver);
+}
+
+module_init (rio_init);
+module_exit (rio_exit);
+
+/*
+
+Compile command:
+
+gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
+
+Read Documentation/networking/dl2k.txt for details.
+
+*/
+
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
new file mode 100644
index 000000000000..6e75482d75f2
--- /dev/null
+++ b/drivers/net/dl2k.h
@@ -0,0 +1,711 @@
+/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
+/*
+ Copyright (c) 2001, 2002 by D-Link Corporation
+ Written by Edward Peng.<edward_peng@dlink.com.tw>
+ Created 03-May-2001, base on Linux' sundance.c.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+*/
+
+#ifndef __DL2K_H__
+#define __DL2K_H__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#define TX_RING_SIZE 256
+#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
+#define RX_RING_SIZE 256
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+#ifndef MEM_MAPPING
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum dl2x_offsets {
+ /* I/O register offsets */
+ DMACtrl = 0x00,
+ RxDMAStatus = 0x08,
+ TFDListPtr0 = 0x10,
+ TFDListPtr1 = 0x14,
+ TxDMABurstThresh = 0x18,
+ TxDMAUrgentThresh = 0x19,
+ TxDMAPollPeriod = 0x1a,
+ RFDListPtr0 = 0x1c,
+ RFDListPtr1 = 0x20,
+ RxDMABurstThresh = 0x24,
+ RxDMAUrgentThresh = 0x25,
+ RxDMAPollPeriod = 0x26,
+ RxDMAIntCtrl = 0x28,
+ DebugCtrl = 0x2c,
+ ASICCtrl = 0x30,
+ FifoCtrl = 0x38,
+ RxEarlyThresh = 0x3a,
+ FlowOffThresh = 0x3c,
+ FlowOnThresh = 0x3e,
+ TxStartThresh = 0x44,
+ EepromData = 0x48,
+ EepromCtrl = 0x4a,
+ ExpromAddr = 0x4c,
+ Exprodata = 0x50,
+ WakeEvent = 0x51,
+ CountDown = 0x54,
+ IntStatusAck = 0x5a,
+ IntEnable = 0x5c,
+ IntStatus = 0x5e,
+ TxStatus = 0x60,
+ MACCtrl = 0x6c,
+ VLANTag = 0x70,
+ PhyCtrl = 0x76,
+ StationAddr0 = 0x78,
+ StationAddr1 = 0x7a,
+ StationAddr2 = 0x7c,
+ VLANId = 0x80,
+ MaxFrameSize = 0x86,
+ ReceiveMode = 0x88,
+ HashTable0 = 0x8c,
+ HashTable1 = 0x90,
+ RmonStatMask = 0x98,
+ StatMask = 0x9c,
+ RxJumboFrames = 0xbc,
+ TCPCheckSumErrors = 0xc0,
+ IPCheckSumErrors = 0xc2,
+ UDPCheckSumErrors = 0xc4,
+ TxJumboFrames = 0xf4,
+ /* Ethernet MIB statistic register offsets */
+ OctetRcvOk = 0xa8,
+ McstOctetRcvOk = 0xac,
+ BcstOctetRcvOk = 0xb0,
+ FramesRcvOk = 0xb4,
+ McstFramesRcvdOk = 0xb8,
+ BcstFramesRcvdOk = 0xbe,
+ MacControlFramesRcvd = 0xc6,
+ FrameTooLongErrors = 0xc8,
+ InRangeLengthErrors = 0xca,
+ FramesCheckSeqErrors = 0xcc,
+ FramesLostRxErrors = 0xce,
+ OctetXmtOk = 0xd0,
+ McstOctetXmtOk = 0xd4,
+ BcstOctetXmtOk = 0xd8,
+ FramesXmtOk = 0xdc,
+ McstFramesXmtdOk = 0xe0,
+ FramesWDeferredXmt = 0xe4,
+ LateCollisions = 0xe8,
+ MultiColFrames = 0xec,
+ SingleColFrames = 0xf0,
+ BcstFramesXmtdOk = 0xf6,
+ CarrierSenseErrors = 0xf8,
+ MacControlFramesXmtd = 0xfa,
+ FramesAbortXSColls = 0xfc,
+ FramesWEXDeferal = 0xfe,
+ /* RMON statistic register offsets */
+ EtherStatsCollisions = 0x100,
+ EtherStatsOctetsTransmit = 0x104,
+ EtherStatsPktsTransmit = 0x108,
+ EtherStatsPkts64OctetTransmit = 0x10c,
+ EtherStats65to127OctetsTransmit = 0x110,
+ EtherStatsPkts128to255OctetsTransmit = 0x114,
+ EtherStatsPkts256to511OctetsTransmit = 0x118,
+ EtherStatsPkts512to1023OctetsTransmit = 0x11c,
+ EtherStatsPkts1024to1518OctetsTransmit = 0x120,
+ EtherStatsCRCAlignErrors = 0x124,
+ EtherStatsUndersizePkts = 0x128,
+ EtherStatsFragments = 0x12c,
+ EtherStatsJabbers = 0x130,
+ EtherStatsOctets = 0x134,
+ EtherStatsPkts = 0x138,
+ EtherStats64Octets = 0x13c,
+ EtherStatsPkts65to127Octets = 0x140,
+ EtherStatsPkts128to255Octets = 0x144,
+ EtherStatsPkts256to511Octets = 0x148,
+ EtherStatsPkts512to1023Octets = 0x14c,
+ EtherStatsPkts1024to1518Octets = 0x150,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum IntStatus_bits {
+ InterruptStatus = 0x0001,
+ HostError = 0x0002,
+ MACCtrlFrame = 0x0008,
+ TxComplete = 0x0004,
+ RxComplete = 0x0010,
+ RxEarly = 0x0020,
+ IntRequested = 0x0040,
+ UpdateStats = 0x0080,
+ LinkEvent = 0x0100,
+ TxDMAComplete = 0x0200,
+ RxDMAComplete = 0x0400,
+ RFDListEnd = 0x0800,
+ RxDMAPriority = 0x1000,
+};
+
+/* Bits in the ReceiveMode register. */
+enum ReceiveMode_bits {
+ ReceiveUnicast = 0x0001,
+ ReceiveMulticast = 0x0002,
+ ReceiveBroadcast = 0x0004,
+ ReceiveAllFrames = 0x0008,
+ ReceiveMulticastHash = 0x0010,
+ ReceiveIPMulticast = 0x0020,
+ ReceiveVLANMatch = 0x0100,
+ ReceiveVLANHash = 0x0200,
+};
+/* Bits in MACCtrl. */
+enum MACCtrl_bits {
+ DuplexSelect = 0x20,
+ TxFlowControlEnable = 0x80,
+ RxFlowControlEnable = 0x0100,
+ RcvFCS = 0x200,
+ AutoVLANtagging = 0x1000,
+ AutoVLANuntagging = 0x2000,
+ StatsEnable = 0x00200000,
+ StatsDisable = 0x00400000,
+ StatsEnabled = 0x00800000,
+ TxEnable = 0x01000000,
+ TxDisable = 0x02000000,
+ TxEnabled = 0x04000000,
+ RxEnable = 0x08000000,
+ RxDisable = 0x10000000,
+ RxEnabled = 0x20000000,
+};
+
+enum ASICCtrl_LoWord_bits {
+ PhyMedia = 0x0080,
+};
+
+enum ASICCtrl_HiWord_bits {
+ GlobalReset = 0x0001,
+ RxReset = 0x0002,
+ TxReset = 0x0004,
+ DMAReset = 0x0008,
+ FIFOReset = 0x0010,
+ NetworkReset = 0x0020,
+ HostReset = 0x0040,
+ ResetBusy = 0x0400,
+};
+
+/* Transmit Frame Control bits */
+enum TFC_bits {
+ DwordAlign = 0x00000000,
+ WordAlignDisable = 0x00030000,
+ WordAlign = 0x00020000,
+ TCPChecksumEnable = 0x00040000,
+ UDPChecksumEnable = 0x00080000,
+ IPChecksumEnable = 0x00100000,
+ FCSAppendDisable = 0x00200000,
+ TxIndicate = 0x00400000,
+ TxDMAIndicate = 0x00800000,
+ FragCountShift = 24,
+ VLANTagInsert = 0x0000000010000000,
+ TFDDone = 0x80000000,
+ VIDShift = 32,
+ UsePriorityShift = 48,
+};
+
+/* Receive Frames Status bits */
+enum RFS_bits {
+ RxFIFOOverrun = 0x00010000,
+ RxRuntFrame = 0x00020000,
+ RxAlignmentError = 0x00040000,
+ RxFCSError = 0x00080000,
+ RxOverSizedFrame = 0x00100000,
+ RxLengthError = 0x00200000,
+ VLANDetected = 0x00400000,
+ TCPDetected = 0x00800000,
+ TCPError = 0x01000000,
+ UDPDetected = 0x02000000,
+ UDPError = 0x04000000,
+ IPDetected = 0x08000000,
+ IPError = 0x10000000,
+ FrameStart = 0x20000000,
+ FrameEnd = 0x40000000,
+ RFDDone = 0x80000000,
+ TCIShift = 32,
+ RFS_Errors = 0x003f0000,
+};
+
+#define MII_RESET_TIME_OUT 10000
+/* MII register */
+enum _mii_reg {
+ MII_BMCR = 0,
+ MII_BMSR = 1,
+ MII_PHY_ID1 = 2,
+ MII_PHY_ID2 = 3,
+ MII_ANAR = 4,
+ MII_ANLPAR = 5,
+ MII_ANER = 6,
+ MII_ANNPT = 7,
+ MII_ANLPRNP = 8,
+ MII_MSCR = 9,
+ MII_MSSR = 10,
+ MII_ESR = 15,
+ MII_PHY_SCR = 16,
+};
+/* PCS register */
+enum _pcs_reg {
+ PCS_BMCR = 0,
+ PCS_BMSR = 1,
+ PCS_ANAR = 4,
+ PCS_ANLPAR = 5,
+ PCS_ANER = 6,
+ PCS_ANNPT = 7,
+ PCS_ANLPRNP = 8,
+ PCS_ESR = 15,
+};
+
+/* Basic Mode Control Register */
+typedef union t_MII_BMCR {
+ u16 image;
+ struct {
+ u16 _bit_5_0:6; // bit 5:0
+ u16 speed1000:1; // bit 6
+ u16 col_test_enable:1; // bit 7
+ u16 duplex_mode:1; // bit 8
+ u16 restart_an:1; // bit 9
+ u16 isolate:1; // bit 10
+ u16 power_down:1; // bit 11
+ u16 an_enable:1; // bit 12
+ u16 speed100:1; // bit 13
+ u16 loopback:1; // bit 14
+ u16 reset:1; // bit 15
+ } bits;
+} BMCR_t, *PBMCR_t;
+
+enum _mii_bmcr {
+ MII_BMCR_RESET = 0x8000,
+ MII_BMCR_LOOP_BACK = 0x4000,
+ MII_BMCR_SPEED_LSB = 0x2000,
+ MII_BMCR_AN_ENABLE = 0x1000,
+ MII_BMCR_POWER_DOWN = 0x0800,
+ MII_BMCR_ISOLATE = 0x0400,
+ MII_BMCR_RESTART_AN = 0x0200,
+ MII_BMCR_DUPLEX_MODE = 0x0100,
+ MII_BMCR_COL_TEST = 0x0080,
+ MII_BMCR_SPEED_MSB = 0x0040,
+ MII_BMCR_SPEED_RESERVED = 0x003f,
+ MII_BMCR_SPEED_10 = 0,
+ MII_BMCR_SPEED_100 = MII_BMCR_SPEED_LSB,
+ MII_BMCR_SPEED_1000 = MII_BMCR_SPEED_MSB,
+};
+
+/* Basic Mode Status Register */
+typedef union t_MII_BMSR {
+ u16 image;
+ struct {
+ u16 ext_capability:1; // bit 0
+ u16 japper_detect:1; // bit 1
+ u16 link_status:1; // bit 2
+ u16 an_ability:1; // bit 3
+ u16 remote_fault:1; // bit 4
+ u16 an_complete:1; // bit 5
+ u16 preamble_supp:1; // bit 6
+ u16 _bit_7:1; // bit 7
+ u16 ext_status:1; // bit 8
+ u16 media_100BT2_HD:1; // bit 9
+ u16 media_100BT2_FD:1; // bit 10
+ u16 media_10BT_HD:1; // bit 11
+ u16 media_10BT_FD:1; // bit 12
+ u16 media_100BX_HD:1; // bit 13
+ u16 media_100BX_FD:1; // bit 14
+ u16 media_100BT4:1; // bit 15
+ } bits;
+} BMSR_t, *PBMSR_t;
+
+enum _mii_bmsr {
+ MII_BMSR_100BT4 = 0x8000,
+ MII_BMSR_100BX_FD = 0x4000,
+ MII_BMSR_100BX_HD = 0x2000,
+ MII_BMSR_10BT_FD = 0x1000,
+ MII_BMSR_10BT_HD = 0x0800,
+ MII_BMSR_100BT2_FD = 0x0400,
+ MII_BMSR_100BT2_HD = 0x0200,
+ MII_BMSR_EXT_STATUS = 0x0100,
+ MII_BMSR_PREAMBLE_SUPP = 0x0040,
+ MII_BMSR_AN_COMPLETE = 0x0020,
+ MII_BMSR_REMOTE_FAULT = 0x0010,
+ MII_BMSR_AN_ABILITY = 0x0008,
+ MII_BMSR_LINK_STATUS = 0x0004,
+ MII_BMSR_JABBER_DETECT = 0x0002,
+ MII_BMSR_EXT_CAP = 0x0001,
+};
+
+/* ANAR */
+typedef union t_MII_ANAR {
+ u16 image;
+ struct {
+ u16 selector:5; // bit 4:0
+ u16 media_10BT_HD:1; // bit 5
+ u16 media_10BT_FD:1; // bit 6
+ u16 media_100BX_HD:1; // bit 7
+ u16 media_100BX_FD:1; // bit 8
+ u16 media_100BT4:1; // bit 9
+ u16 pause:1; // bit 10
+ u16 asymmetric:1; // bit 11
+ u16 _bit12:1; // bit 12
+ u16 remote_fault:1; // bit 13
+ u16 _bit14:1; // bit 14
+ u16 next_page:1; // bit 15
+ } bits;
+} ANAR_t, *PANAR_t;
+
+enum _mii_anar {
+ MII_ANAR_NEXT_PAGE = 0x8000,
+ MII_ANAR_REMOTE_FAULT = 0x4000,
+ MII_ANAR_ASYMMETRIC = 0x0800,
+ MII_ANAR_PAUSE = 0x0400,
+ MII_ANAR_100BT4 = 0x0200,
+ MII_ANAR_100BX_FD = 0x0100,
+ MII_ANAR_100BX_HD = 0x0080,
+ MII_ANAR_10BT_FD = 0x0020,
+ MII_ANAR_10BT_HD = 0x0010,
+ MII_ANAR_SELECTOR = 0x001f,
+ MII_IEEE8023_CSMACD = 0x0001,
+};
+
+/* ANLPAR */
+typedef union t_MII_ANLPAR {
+ u16 image;
+ struct {
+ u16 selector:5; // bit 4:0
+ u16 media_10BT_HD:1; // bit 5
+ u16 media_10BT_FD:1; // bit 6
+ u16 media_100BX_HD:1; // bit 7
+ u16 media_100BX_FD:1; // bit 8
+ u16 media_100BT4:1; // bit 9
+ u16 pause:1; // bit 10
+ u16 asymmetric:1; // bit 11
+ u16 _bit12:1; // bit 12
+ u16 remote_fault:1; // bit 13
+ u16 _bit14:1; // bit 14
+ u16 next_page:1; // bit 15
+ } bits;
+} ANLPAR_t, *PANLPAR_t;
+
+enum _mii_anlpar {
+ MII_ANLPAR_NEXT_PAGE = MII_ANAR_NEXT_PAGE,
+ MII_ANLPAR_REMOTE_FAULT = MII_ANAR_REMOTE_FAULT,
+ MII_ANLPAR_ASYMMETRIC = MII_ANAR_ASYMMETRIC,
+ MII_ANLPAR_PAUSE = MII_ANAR_PAUSE,
+ MII_ANLPAR_100BT4 = MII_ANAR_100BT4,
+ MII_ANLPAR_100BX_FD = MII_ANAR_100BX_FD,
+ MII_ANLPAR_100BX_HD = MII_ANAR_100BX_HD,
+ MII_ANLPAR_10BT_FD = MII_ANAR_10BT_FD,
+ MII_ANLPAR_10BT_HD = MII_ANAR_10BT_HD,
+ MII_ANLPAR_SELECTOR = MII_ANAR_SELECTOR,
+};
+
+/* Auto-Negotiation Expansion Register */
+typedef union t_MII_ANER {
+ u16 image;
+ struct {
+ u16 lp_negotiable:1; // bit 0
+ u16 page_received:1; // bit 1
+ u16 nextpagable:1; // bit 2
+ u16 lp_nextpagable:1; // bit 3
+ u16 pdetect_fault:1; // bit 4
+ u16 _bit15_5:11; // bit 15:5
+ } bits;
+} ANER_t, *PANER_t;
+
+enum _mii_aner {
+ MII_ANER_PAR_DETECT_FAULT = 0x0010,
+ MII_ANER_LP_NEXTPAGABLE = 0x0008,
+ MII_ANER_NETXTPAGABLE = 0x0004,
+ MII_ANER_PAGE_RECEIVED = 0x0002,
+ MII_ANER_LP_NEGOTIABLE = 0x0001,
+};
+
+/* MASTER-SLAVE Control Register */
+typedef union t_MII_MSCR {
+ u16 image;
+ struct {
+ u16 _bit_7_0:8; // bit 7:0
+ u16 media_1000BT_HD:1; // bit 8
+ u16 media_1000BT_FD:1; // bit 9
+ u16 port_type:1; // bit 10
+ u16 cfg_value:1; // bit 11
+ u16 cfg_enable:1; // bit 12
+ u16 test_mode:3; // bit 15:13
+ } bits;
+} MSCR_t, *PMSCR_t;
+
+enum _mii_mscr {
+ MII_MSCR_TEST_MODE = 0xe000,
+ MII_MSCR_CFG_ENABLE = 0x1000,
+ MII_MSCR_CFG_VALUE = 0x0800,
+ MII_MSCR_PORT_VALUE = 0x0400,
+ MII_MSCR_1000BT_FD = 0x0200,
+ MII_MSCR_1000BT_HD = 0X0100,
+};
+
+/* MASTER-SLAVE Status Register */
+typedef union t_MII_MSSR {
+ u16 image;
+ struct {
+ u16 idle_err_count:8; // bit 7:0
+ u16 _bit_9_8:2; // bit 9:8
+ u16 lp_1000BT_HD:1; // bit 10
+ u16 lp_1000BT_FD:1; // bit 11
+ u16 remote_rcv_status:1; // bit 12
+ u16 local_rcv_status:1; // bit 13
+ u16 cfg_resolution:1; // bit 14
+ u16 cfg_fault:1; // bit 15
+ } bits;
+} MSSR_t, *PMSSR_t;
+
+enum _mii_mssr {
+ MII_MSSR_CFG_FAULT = 0x8000,
+ MII_MSSR_CFG_RES = 0x4000,
+ MII_MSSR_LOCAL_RCV_STATUS = 0x2000,
+ MII_MSSR_REMOTE_RCVR = 0x1000,
+ MII_MSSR_LP_1000BT_HD = 0x0800,
+ MII_MSSR_LP_1000BT_FD = 0x0400,
+ MII_MSSR_IDLE_ERR_COUNT = 0x00ff,
+};
+
+/* IEEE Extened Status Register */
+typedef union t_MII_ESR {
+ u16 image;
+ struct {
+ u16 _bit_11_0:12; // bit 11:0
+ u16 media_1000BT_HD:2; // bit 12
+ u16 media_1000BT_FD:1; // bit 13
+ u16 media_1000BX_HD:1; // bit 14
+ u16 media_1000BX_FD:1; // bit 15
+ } bits;
+} ESR_t, *PESR_t;
+
+enum _mii_esr {
+ MII_ESR_1000BX_FD = 0x8000,
+ MII_ESR_1000BX_HD = 0x4000,
+ MII_ESR_1000BT_FD = 0x2000,
+ MII_ESR_1000BT_HD = 0x1000,
+};
+/* PHY Specific Control Register */
+typedef union t_MII_PHY_SCR {
+ u16 image;
+ struct {
+ u16 disable_jabber:1; // bit 0
+ u16 polarity_reversal:1; // bit 1
+ u16 SEQ_test:1; // bit 2
+ u16 _bit_3:1; // bit 3
+ u16 disable_CLK125:1; // bit 4
+ u16 mdi_crossover_mode:2; // bit 6:5
+ u16 enable_ext_dist:1; // bit 7
+ u16 _bit_8_9:2; // bit 9:8
+ u16 force_link:1; // bit 10
+ u16 assert_CRS:1; // bit 11
+ u16 rcv_fifo_depth:2; // bit 13:12
+ u16 xmit_fifo_depth:2; // bit 15:14
+ } bits;
+} PHY_SCR_t, *PPHY_SCR_t;
+
+typedef enum t_MII_ADMIN_STATUS {
+ adm_reset,
+ adm_operational,
+ adm_loopback,
+ adm_power_down,
+ adm_isolate
+} MII_ADMIN_t, *PMII_ADMIN_t;
+
+/* Physical Coding Sublayer Management (PCS) */
+/* PCS control and status registers bitmap as the same as MII */
+/* PCS Extended Status register bitmap as the same as MII */
+/* PCS ANAR */
+typedef union t_PCS_ANAR {
+ u16 image;
+ struct {
+ u16 _bit_4_0:5; // bit 4:0
+ u16 full_duplex:1; // bit 5
+ u16 half_duplex:1; // bit 6
+ u16 asymmetric:1; // bit 7
+ u16 pause:1; // bit 8
+ u16 _bit_11_9:3; // bit 11:9
+ u16 remote_fault:2; // bit 13:12
+ u16 _bit_14:1; // bit 14
+ u16 next_page:1; // bit 15
+ } bits;
+} ANAR_PCS_t, *PANAR_PCS_t;
+
+enum _pcs_anar {
+ PCS_ANAR_NEXT_PAGE = 0x8000,
+ PCS_ANAR_REMOTE_FAULT = 0x3000,
+ PCS_ANAR_ASYMMETRIC = 0x0100,
+ PCS_ANAR_PAUSE = 0x0080,
+ PCS_ANAR_HALF_DUPLEX = 0x0040,
+ PCS_ANAR_FULL_DUPLEX = 0x0020,
+};
+/* PCS ANLPAR */
+typedef union t_PCS_ANLPAR {
+ u16 image;
+ struct {
+ u16 _bit_4_0:5; // bit 4:0
+ u16 full_duplex:1; // bit 5
+ u16 half_duplex:1; // bit 6
+ u16 asymmetric:1; // bit 7
+ u16 pause:1; // bit 8
+ u16 _bit_11_9:3; // bit 11:9
+ u16 remote_fault:2; // bit 13:12
+ u16 _bit_14:1; // bit 14
+ u16 next_page:1; // bit 15
+ } bits;
+} ANLPAR_PCS_t, *PANLPAR_PCS_t;
+
+enum _pcs_anlpar {
+ PCS_ANLPAR_NEXT_PAGE = PCS_ANAR_NEXT_PAGE,
+ PCS_ANLPAR_REMOTE_FAULT = PCS_ANAR_REMOTE_FAULT,
+ PCS_ANLPAR_ASYMMETRIC = PCS_ANAR_ASYMMETRIC,
+ PCS_ANLPAR_PAUSE = PCS_ANAR_PAUSE,
+ PCS_ANLPAR_HALF_DUPLEX = PCS_ANAR_HALF_DUPLEX,
+ PCS_ANLPAR_FULL_DUPLEX = PCS_ANAR_FULL_DUPLEX,
+};
+
+typedef struct t_SROM {
+ u16 config_param; /* 0x00 */
+ u16 asic_ctrl; /* 0x02 */
+ u16 sub_vendor_id; /* 0x04 */
+ u16 sub_system_id; /* 0x06 */
+ u16 reserved1[12]; /* 0x08-0x1f */
+ u8 mac_addr[6]; /* 0x20-0x25 */
+ u8 reserved2[10]; /* 0x26-0x2f */
+ u8 sib[204]; /* 0x30-0xfb */
+ u32 crc; /* 0xfc-0xff */
+} SROM_t, *PSROM_t;
+
+/* Ioctl custom data */
+struct ioctl_data {
+ char signature[10];
+ int cmd;
+ int len;
+ char *data;
+};
+
+struct mii_data {
+ __u16 reserved;
+ __u16 reg_num;
+ __u16 in_value;
+ __u16 out_value;
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct netdev_desc {
+ u64 next_desc;
+ u64 status;
+ u64 fraginfo;
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct pci_dev *pdev;
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ struct net_device_stats stats;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ unsigned int speed; /* Operating speed */
+ unsigned int vlan; /* VLAN Id */
+ unsigned int chip_id; /* PCI table chip id */
+ unsigned int rx_coalesce; /* Maximum frames each RxDMAComplete intr */
+ unsigned int rx_timeout; /* Wait time between RxDMAComplete intr */
+ unsigned int tx_coalesce; /* Maximum frames each tx interrupt */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int an_enable:2; /* Auto-Negotiated Enable */
+ unsigned int jumbo:1; /* Jumbo frame enable */
+ unsigned int coalesce:1; /* Rx coalescing enable */
+ unsigned int tx_flow:1; /* Tx flow control enable */
+ unsigned int rx_flow:1; /* Rx flow control enable */
+ unsigned int phy_media:1; /* 1: fiber, 0: copper */
+ unsigned int link_status:1; /* Current link status */
+ unsigned char pci_rev_id; /* PCI revision ID */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */
+ unsigned long cur_tx, old_tx;
+ struct timer_list timer;
+ int wake_polarity;
+ char name[256]; /* net device description */
+ u8 duplex_polarity;
+ u16 mcast_filter[4];
+ u16 advertising; /* NWay media advertisement */
+ u16 negotiate; /* Negotiated media */
+ int phy_addr; /* PHY addresses. */
+};
+
+/* The station address location in the EEPROM. */
+#ifdef MEM_MAPPING
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#endif
+/* The struct pci_device_id consist of:
+ vendor, device Vendor and device ID to match (or PCI_ANY_ID)
+ subvendor, subdevice Subsystem vendor and device ID to match (or PCI_ANY_ID)
+ class Device class to match. The class_mask tells which bits
+ class_mask of the class are honored during the comparison.
+ driver_data Data private to the driver.
+*/
+static struct pci_device_id rio_pci_tbl[] = {
+ {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
+#define TX_TIMEOUT (4*HZ)
+#define PACKET_SIZE 1536
+#define MAX_JUMBO 8000
+#define RIO_IO_SIZE 340
+#define DEFAULT_RXC 5
+#define DEFAULT_RXT 750
+#define DEFAULT_TXC 1
+#define MAX_TXC 8
+#endif /* __DL2K_H__ */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
new file mode 100644
index 000000000000..dd8c15ac5c77
--- /dev/null
+++ b/drivers/net/dummy.c
@@ -0,0 +1,152 @@
+/* dummy.c: a dummy net driver
+
+ The purpose of this driver is to provide a device to point a
+ route through, but not to actually transmit packets.
+
+ Why? If you have a machine whose only connection is an occasional
+ PPP/SLIP/PLIP link, you can only connect to your own hostname
+ when the link is up. Otherwise you have to use localhost.
+ This isn't very consistent.
+
+ One solution is to set up a dummy link using PPP/SLIP/PLIP,
+ but this seems (to me) too much overhead for too little gain.
+ This driver provides a small alternative. Thus you can do
+
+ [when not running slip]
+ ifconfig dummy slip.addr.ess.here up
+ [to go to slip]
+ ifconfig dummy down
+ dip whatever
+
+ This was written by looking at Donald Becker's skeleton driver
+ and the loopback driver. I then threw away anything that didn't
+ apply! Thanks to Alan Cox for the key clue on what to do with
+ misguided packets.
+
+ Nick Holloway, 27th May 1994
+ [I tweaked this explanation a little but that's all]
+ Alan Cox, 30th May 1994
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+
+static int numdummies = 1;
+
+static int dummy_xmit(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *dummy_get_stats(struct net_device *dev);
+
+static int dummy_set_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *sa = p;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ return 0;
+}
+
+/* fake multicast ability */
+static void set_multicast_list(struct net_device *dev)
+{
+}
+
+static void __init dummy_setup(struct net_device *dev)
+{
+ /* Initialize the device structure. */
+ dev->get_stats = dummy_get_stats;
+ dev->hard_start_xmit = dummy_xmit;
+ dev->set_multicast_list = set_multicast_list;
+ dev->set_mac_address = dummy_set_address;
+
+ /* Fill in device structure with ethernet-generic values. */
+ ether_setup(dev);
+ dev->tx_queue_len = 0;
+ dev->change_mtu = NULL;
+ dev->flags |= IFF_NOARP;
+ dev->flags &= ~IFF_MULTICAST;
+ SET_MODULE_OWNER(dev);
+ random_ether_addr(dev->dev_addr);
+}
+
+static int dummy_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *stats = netdev_priv(dev);
+
+ stats->tx_packets++;
+ stats->tx_bytes+=skb->len;
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static struct net_device_stats *dummy_get_stats(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static struct net_device **dummies;
+
+/* Number of dummy devices to be set up by this module. */
+module_param(numdummies, int, 0);
+MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
+
+static int __init dummy_init_one(int index)
+{
+ struct net_device *dev_dummy;
+ int err;
+
+ dev_dummy = alloc_netdev(sizeof(struct net_device_stats),
+ "dummy%d", dummy_setup);
+
+ if (!dev_dummy)
+ return -ENOMEM;
+
+ if ((err = register_netdev(dev_dummy))) {
+ free_netdev(dev_dummy);
+ dev_dummy = NULL;
+ } else {
+ dummies[index] = dev_dummy;
+ }
+
+ return err;
+}
+
+static void dummy_free_one(int index)
+{
+ unregister_netdev(dummies[index]);
+ free_netdev(dummies[index]);
+}
+
+static int __init dummy_init_module(void)
+{
+ int i, err = 0;
+ dummies = kmalloc(numdummies * sizeof(void *), GFP_KERNEL);
+ if (!dummies)
+ return -ENOMEM;
+ for (i = 0; i < numdummies && !err; i++)
+ err = dummy_init_one(i);
+ if (err) {
+ while (--i >= 0)
+ dummy_free_one(i);
+ }
+ return err;
+}
+
+static void __exit dummy_cleanup_module(void)
+{
+ int i;
+ for (i = 0; i < numdummies; i++)
+ dummy_free_one(i);
+ kfree(dummies);
+}
+
+module_init(dummy_init_module);
+module_exit(dummy_cleanup_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
new file mode 100644
index 000000000000..1b68dd5a49b6
--- /dev/null
+++ b/drivers/net/e100.c
@@ -0,0 +1,2374 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * e100.c: Intel(R) PRO/100 ethernet driver
+ *
+ * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
+ * original e100 driver, but better described as a munging of
+ * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
+ *
+ * References:
+ * Intel 8255x 10/100 Mbps Ethernet Controller Family,
+ * Open Source Software Developers Manual,
+ * http://sourceforge.net/projects/e1000
+ *
+ *
+ * Theory of Operation
+ *
+ * I. General
+ *
+ * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
+ * controller family, which includes the 82557, 82558, 82559, 82550,
+ * 82551, and 82562 devices. 82558 and greater controllers
+ * integrate the Intel 82555 PHY. The controllers are used in
+ * server and client network interface cards, as well as in
+ * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
+ * configurations. 8255x supports a 32-bit linear addressing
+ * mode and operates at 33Mhz PCI clock rate.
+ *
+ * II. Driver Operation
+ *
+ * Memory-mapped mode is used exclusively to access the device's
+ * shared-memory structure, the Control/Status Registers (CSR). All
+ * setup, configuration, and control of the device, including queuing
+ * of Tx, Rx, and configuration commands is through the CSR.
+ * cmd_lock serializes accesses to the CSR command register. cb_lock
+ * protects the shared Command Block List (CBL).
+ *
+ * 8255x is highly MII-compliant and all access to the PHY go
+ * through the Management Data Interface (MDI). Consequently, the
+ * driver leverages the mii.c library shared with other MII-compliant
+ * devices.
+ *
+ * Big- and Little-Endian byte order as well as 32- and 64-bit
+ * archs are supported. Weak-ordered memory and non-cache-coherent
+ * archs are supported.
+ *
+ * III. Transmit
+ *
+ * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
+ * together in a fixed-size ring (CBL) thus forming the flexible mode
+ * memory structure. A TCB marked with the suspend-bit indicates
+ * the end of the ring. The last TCB processed suspends the
+ * controller, and the controller can be restarted by issue a CU
+ * resume command to continue from the suspend point, or a CU start
+ * command to start at a given position in the ring.
+ *
+ * Non-Tx commands (config, multicast setup, etc) are linked
+ * into the CBL ring along with Tx commands. The common structure
+ * used for both Tx and non-Tx commands is the Command Block (CB).
+ *
+ * cb_to_use is the next CB to use for queuing a command; cb_to_clean
+ * is the next CB to check for completion; cb_to_send is the first
+ * CB to start on in case of a previous failure to resume. CB clean
+ * up happens in interrupt context in response to a CU interrupt.
+ * cbs_avail keeps track of number of free CB resources available.
+ *
+ * Hardware padding of short packets to minimum packet size is
+ * enabled. 82557 pads with 7Eh, while the later controllers pad
+ * with 00h.
+ *
+ * IV. Recieve
+ *
+ * The Receive Frame Area (RFA) comprises a ring of Receive Frame
+ * Descriptors (RFD) + data buffer, thus forming the simplified mode
+ * memory structure. Rx skbs are allocated to contain both the RFD
+ * and the data buffer, but the RFD is pulled off before the skb is
+ * indicated. The data buffer is aligned such that encapsulated
+ * protocol headers are u32-aligned. Since the RFD is part of the
+ * mapped shared memory, and completion status is contained within
+ * the RFD, the RFD must be dma_sync'ed to maintain a consistent
+ * view from software and hardware.
+ *
+ * Under typical operation, the receive unit (RU) is start once,
+ * and the controller happily fills RFDs as frames arrive. If
+ * replacement RFDs cannot be allocated, or the RU goes non-active,
+ * the RU must be restarted. Frame arrival generates an interrupt,
+ * and Rx indication and re-allocation happen in the same context,
+ * therefore no locking is required. A software-generated interrupt
+ * is generated from the watchdog to recover from a failed allocation
+ * senario where all Rx resources have been indicated and none re-
+ * placed.
+ *
+ * V. Miscellaneous
+ *
+ * VLAN offloading of tagging, stripping and filtering is not
+ * supported, but driver will accommodate the extra 4-byte VLAN tag
+ * for processing by upper layers. Tx/Rx Checksum offloading is not
+ * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
+ * not supported (hardware limitation).
+ *
+ * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
+ *
+ * Thanks to JC (jchapman@katalix.com) for helping with
+ * testing/troubleshooting the development driver.
+ *
+ * TODO:
+ * o several entry points race with dev->close
+ * o check for tx-no-resources/stop Q races with tx clean/wake Q
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <asm/unaligned.h>
+
+
+#define DRV_NAME "e100"
+#define DRV_EXT "-NAPI"
+#define DRV_VERSION "3.3.6-k2"DRV_EXT
+#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
+#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
+#define PFX DRV_NAME ": "
+
+#define E100_WATCHDOG_PERIOD (2 * HZ)
+#define E100_NAPI_WEIGHT 16
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
+ __FUNCTION__ , ## args))
+
+#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
+ PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
+static struct pci_device_id e100_id_table[] = {
+ INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
+ INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
+ INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, e100_id_table);
+
+enum mac {
+ mac_82557_D100_A = 0,
+ mac_82557_D100_B = 1,
+ mac_82557_D100_C = 2,
+ mac_82558_D101_A4 = 4,
+ mac_82558_D101_B0 = 5,
+ mac_82559_D101M = 8,
+ mac_82559_D101S = 9,
+ mac_82550_D102 = 12,
+ mac_82550_D102_C = 13,
+ mac_82551_E = 14,
+ mac_82551_F = 15,
+ mac_82551_10 = 16,
+ mac_unknown = 0xFF,
+};
+
+enum phy {
+ phy_100a = 0x000003E0,
+ phy_100c = 0x035002A8,
+ phy_82555_tx = 0x015002A8,
+ phy_nsc_tx = 0x5C002000,
+ phy_82562_et = 0x033002A8,
+ phy_82562_em = 0x032002A8,
+ phy_82562_ek = 0x031002A8,
+ phy_82562_eh = 0x017002A8,
+ phy_unknown = 0xFFFFFFFF,
+};
+
+/* CSR (Control/Status Registers) */
+struct csr {
+ struct {
+ u8 status;
+ u8 stat_ack;
+ u8 cmd_lo;
+ u8 cmd_hi;
+ u32 gen_ptr;
+ } scb;
+ u32 port;
+ u16 flash_ctrl;
+ u8 eeprom_ctrl_lo;
+ u8 eeprom_ctrl_hi;
+ u32 mdi_ctrl;
+ u32 rx_dma_count;
+};
+
+enum scb_status {
+ rus_ready = 0x10,
+ rus_mask = 0x3C,
+};
+
+enum scb_stat_ack {
+ stat_ack_not_ours = 0x00,
+ stat_ack_sw_gen = 0x04,
+ stat_ack_rnr = 0x10,
+ stat_ack_cu_idle = 0x20,
+ stat_ack_frame_rx = 0x40,
+ stat_ack_cu_cmd_done = 0x80,
+ stat_ack_not_present = 0xFF,
+ stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
+ stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
+};
+
+enum scb_cmd_hi {
+ irq_mask_none = 0x00,
+ irq_mask_all = 0x01,
+ irq_sw_gen = 0x02,
+};
+
+enum scb_cmd_lo {
+ cuc_nop = 0x00,
+ ruc_start = 0x01,
+ ruc_load_base = 0x06,
+ cuc_start = 0x10,
+ cuc_resume = 0x20,
+ cuc_dump_addr = 0x40,
+ cuc_dump_stats = 0x50,
+ cuc_load_base = 0x60,
+ cuc_dump_reset = 0x70,
+};
+
+enum cuc_dump {
+ cuc_dump_complete = 0x0000A005,
+ cuc_dump_reset_complete = 0x0000A007,
+};
+
+enum port {
+ software_reset = 0x0000,
+ selftest = 0x0001,
+ selective_reset = 0x0002,
+};
+
+enum eeprom_ctrl_lo {
+ eesk = 0x01,
+ eecs = 0x02,
+ eedi = 0x04,
+ eedo = 0x08,
+};
+
+enum mdi_ctrl {
+ mdi_write = 0x04000000,
+ mdi_read = 0x08000000,
+ mdi_ready = 0x10000000,
+};
+
+enum eeprom_op {
+ op_write = 0x05,
+ op_read = 0x06,
+ op_ewds = 0x10,
+ op_ewen = 0x13,
+};
+
+enum eeprom_offsets {
+ eeprom_cnfg_mdix = 0x03,
+ eeprom_id = 0x0A,
+ eeprom_config_asf = 0x0D,
+ eeprom_smbus_addr = 0x90,
+};
+
+enum eeprom_cnfg_mdix {
+ eeprom_mdix_enabled = 0x0080,
+};
+
+enum eeprom_id {
+ eeprom_id_wol = 0x0020,
+};
+
+enum eeprom_config_asf {
+ eeprom_asf = 0x8000,
+ eeprom_gcl = 0x4000,
+};
+
+enum cb_status {
+ cb_complete = 0x8000,
+ cb_ok = 0x2000,
+};
+
+enum cb_command {
+ cb_nop = 0x0000,
+ cb_iaaddr = 0x0001,
+ cb_config = 0x0002,
+ cb_multi = 0x0003,
+ cb_tx = 0x0004,
+ cb_ucode = 0x0005,
+ cb_dump = 0x0006,
+ cb_tx_sf = 0x0008,
+ cb_cid = 0x1f00,
+ cb_i = 0x2000,
+ cb_s = 0x4000,
+ cb_el = 0x8000,
+};
+
+struct rfd {
+ u16 status;
+ u16 command;
+ u32 link;
+ u32 rbd;
+ u16 actual_size;
+ u16 size;
+};
+
+struct rx {
+ struct rx *next, *prev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+#define X(a,b) b,a
+#else
+#define X(a,b) a,b
+#endif
+struct config {
+/*0*/ u8 X(byte_count:6, pad0:2);
+/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
+/*2*/ u8 adaptive_ifs;
+/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
+ term_write_cache_line:1), pad3:4);
+/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
+/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
+/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
+ tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
+ rx_discard_overruns:1), rx_save_bad_frames:1);
+/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
+ pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
+ tx_dynamic_tbd:1);
+/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
+/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
+ link_status_wake:1), arp_wake:1), mcmatch_wake:1);
+/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
+ loopback:2);
+/*11*/ u8 X(linear_priority:3, pad11:5);
+/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
+/*13*/ u8 ip_addr_lo;
+/*14*/ u8 ip_addr_hi;
+/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
+ wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
+ pad15_2:1), crs_or_cdt:1);
+/*16*/ u8 fc_delay_lo;
+/*17*/ u8 fc_delay_hi;
+/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
+ rx_long_ok:1), fc_priority_threshold:3), pad18:1);
+/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
+ fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
+ full_duplex_force:1), full_duplex_pin:1);
+/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
+/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
+/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
+ u8 pad_d102[9];
+};
+
+#define E100_MAX_MULTICAST_ADDRS 64
+struct multi {
+ u16 count;
+ u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
+};
+
+/* Important: keep total struct u32-aligned */
+#define UCODE_SIZE 134
+struct cb {
+ u16 status;
+ u16 command;
+ u32 link;
+ union {
+ u8 iaaddr[ETH_ALEN];
+ u32 ucode[UCODE_SIZE];
+ struct config config;
+ struct multi multi;
+ struct {
+ u32 tbd_array;
+ u16 tcb_byte_count;
+ u8 threshold;
+ u8 tbd_count;
+ struct {
+ u32 buf_addr;
+ u16 size;
+ u16 eol;
+ } tbd;
+ } tcb;
+ u32 dump_buffer_addr;
+ } u;
+ struct cb *next, *prev;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
+
+enum loopback {
+ lb_none = 0, lb_mac = 1, lb_phy = 3,
+};
+
+struct stats {
+ u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
+ tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
+ tx_multiple_collisions, tx_total_collisions;
+ u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
+ rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
+ rx_short_frame_errors;
+ u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
+ u16 xmt_tco_frames, rcv_tco_frames;
+ u32 complete;
+};
+
+struct mem {
+ struct {
+ u32 signature;
+ u32 result;
+ } selftest;
+ struct stats stats;
+ u8 dump_buf[596];
+};
+
+struct param_range {
+ u32 min;
+ u32 max;
+ u32 count;
+};
+
+struct params {
+ struct param_range rfds;
+ struct param_range cbs;
+};
+
+struct nic {
+ /* Begin: frequently used values: keep adjacent for cache effect */
+ u32 msg_enable ____cacheline_aligned;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ struct rx *rxs ____cacheline_aligned;
+ struct rx *rx_to_use;
+ struct rx *rx_to_clean;
+ struct rfd blank_rfd;
+ int ru_running;
+
+ spinlock_t cb_lock ____cacheline_aligned;
+ spinlock_t cmd_lock;
+ struct csr __iomem *csr;
+ enum scb_cmd_lo cuc_cmd;
+ unsigned int cbs_avail;
+ struct cb *cbs;
+ struct cb *cb_to_use;
+ struct cb *cb_to_send;
+ struct cb *cb_to_clean;
+ u16 tx_command;
+ /* End: frequently used values: keep adjacent for cache effect */
+
+ enum {
+ ich = (1 << 0),
+ promiscuous = (1 << 1),
+ multicast_all = (1 << 2),
+ wol_magic = (1 << 3),
+ ich_10h_workaround = (1 << 4),
+ } flags ____cacheline_aligned;
+
+ enum mac mac;
+ enum phy phy;
+ struct params params;
+ struct net_device_stats net_stats;
+ struct timer_list watchdog;
+ struct timer_list blink_timer;
+ struct mii_if_info mii;
+ enum loopback loopback;
+
+ struct mem *mem;
+ dma_addr_t dma_addr;
+
+ dma_addr_t cbs_dma_addr;
+ u8 adaptive_ifs;
+ u8 tx_threshold;
+ u32 tx_frames;
+ u32 tx_collisions;
+ u32 tx_deferred;
+ u32 tx_single_collisions;
+ u32 tx_multiple_collisions;
+ u32 tx_fc_pause;
+ u32 tx_tco_frames;
+
+ u32 rx_fc_pause;
+ u32 rx_fc_unsupported;
+ u32 rx_tco_frames;
+ u32 rx_over_length_errors;
+
+ u8 rev_id;
+ u16 leds;
+ u16 eeprom_wc;
+ u16 eeprom[256];
+};
+
+static inline void e100_write_flush(struct nic *nic)
+{
+ /* Flush previous PCI writes through intermediate bridges
+ * by doing a benign read */
+ (void)readb(&nic->csr->scb.status);
+}
+
+static inline void e100_enable_irq(struct nic *nic)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nic->cmd_lock, flags);
+ writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
+ spin_unlock_irqrestore(&nic->cmd_lock, flags);
+ e100_write_flush(nic);
+}
+
+static inline void e100_disable_irq(struct nic *nic)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nic->cmd_lock, flags);
+ writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
+ spin_unlock_irqrestore(&nic->cmd_lock, flags);
+ e100_write_flush(nic);
+}
+
+static void e100_hw_reset(struct nic *nic)
+{
+ /* Put CU and RU into idle with a selective reset to get
+ * device off of PCI bus */
+ writel(selective_reset, &nic->csr->port);
+ e100_write_flush(nic); udelay(20);
+
+ /* Now fully reset device */
+ writel(software_reset, &nic->csr->port);
+ e100_write_flush(nic); udelay(20);
+
+ /* Mask off our interrupt line - it's unmasked after reset */
+ e100_disable_irq(nic);
+}
+
+static int e100_self_test(struct nic *nic)
+{
+ u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
+
+ /* Passing the self-test is a pretty good indication
+ * that the device can DMA to/from host memory */
+
+ nic->mem->selftest.signature = 0;
+ nic->mem->selftest.result = 0xFFFFFFFF;
+
+ writel(selftest | dma_addr, &nic->csr->port);
+ e100_write_flush(nic);
+ /* Wait 10 msec for self-test to complete */
+ msleep(10);
+
+ /* Interrupts are enabled after self-test */
+ e100_disable_irq(nic);
+
+ /* Check results of self-test */
+ if(nic->mem->selftest.result != 0) {
+ DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
+ nic->mem->selftest.result);
+ return -ETIMEDOUT;
+ }
+ if(nic->mem->selftest.signature == 0) {
+ DPRINTK(HW, ERR, "Self-test failed: timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
+{
+ u32 cmd_addr_data[3];
+ u8 ctrl;
+ int i, j;
+
+ /* Three cmds: write/erase enable, write data, write/erase disable */
+ cmd_addr_data[0] = op_ewen << (addr_len - 2);
+ cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
+ cpu_to_le16(data);
+ cmd_addr_data[2] = op_ewds << (addr_len - 2);
+
+ /* Bit-bang cmds to write word to eeprom */
+ for(j = 0; j < 3; j++) {
+
+ /* Chip select */
+ writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ for(i = 31; i >= 0; i--) {
+ ctrl = (cmd_addr_data[j] & (1 << i)) ?
+ eecs | eedi : eecs;
+ writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+ }
+ /* Wait 10 msec for cmd to complete */
+ msleep(10);
+
+ /* Chip deselect */
+ writeb(0, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+ }
+};
+
+/* General technique stolen from the eepro100 driver - very clever */
+static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
+{
+ u32 cmd_addr_data;
+ u16 data = 0;
+ u8 ctrl;
+ int i;
+
+ cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
+
+ /* Chip select */
+ writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ /* Bit-bang to read word from eeprom */
+ for(i = 31; i >= 0; i--) {
+ ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
+ writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ /* Eeprom drives a dummy zero to EEDO after receiving
+ * complete address. Use this to adjust addr_len. */
+ ctrl = readb(&nic->csr->eeprom_ctrl_lo);
+ if(!(ctrl & eedo) && i > 16) {
+ *addr_len -= (i - 16);
+ i = 17;
+ }
+
+ data = (data << 1) | (ctrl & eedo ? 1 : 0);
+ }
+
+ /* Chip deselect */
+ writeb(0, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ return le16_to_cpu(data);
+};
+
+/* Load entire EEPROM image into driver cache and validate checksum */
+static int e100_eeprom_load(struct nic *nic)
+{
+ u16 addr, addr_len = 8, checksum = 0;
+
+ /* Try reading with an 8-bit addr len to discover actual addr len */
+ e100_eeprom_read(nic, &addr_len, 0);
+ nic->eeprom_wc = 1 << addr_len;
+
+ for(addr = 0; addr < nic->eeprom_wc; addr++) {
+ nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
+ if(addr < nic->eeprom_wc - 1)
+ checksum += cpu_to_le16(nic->eeprom[addr]);
+ }
+
+ /* The checksum, stored in the last word, is calculated such that
+ * the sum of words should be 0xBABA */
+ checksum = le16_to_cpu(0xBABA - checksum);
+ if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
+ DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/* Save (portion of) driver EEPROM cache to device and update checksum */
+static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
+{
+ u16 addr, addr_len = 8, checksum = 0;
+
+ /* Try reading with an 8-bit addr len to discover actual addr len */
+ e100_eeprom_read(nic, &addr_len, 0);
+ nic->eeprom_wc = 1 << addr_len;
+
+ if(start + count >= nic->eeprom_wc)
+ return -EINVAL;
+
+ for(addr = start; addr < start + count; addr++)
+ e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
+
+ /* The checksum, stored in the last word, is calculated such that
+ * the sum of words should be 0xBABA */
+ for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
+ checksum += cpu_to_le16(nic->eeprom[addr]);
+ nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
+ e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
+ nic->eeprom[nic->eeprom_wc - 1]);
+
+ return 0;
+}
+
+#define E100_WAIT_SCB_TIMEOUT 40
+static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
+{
+ unsigned long flags;
+ unsigned int i;
+ int err = 0;
+
+ spin_lock_irqsave(&nic->cmd_lock, flags);
+
+ /* Previous command is accepted when SCB clears */
+ for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
+ if(likely(!readb(&nic->csr->scb.cmd_lo)))
+ break;
+ cpu_relax();
+ if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1)))
+ udelay(5);
+ }
+ if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
+ err = -EAGAIN;
+ goto err_unlock;
+ }
+
+ if(unlikely(cmd != cuc_resume))
+ writel(dma_addr, &nic->csr->scb.gen_ptr);
+ writeb(cmd, &nic->csr->scb.cmd_lo);
+
+err_unlock:
+ spin_unlock_irqrestore(&nic->cmd_lock, flags);
+
+ return err;
+}
+
+static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
+ void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+{
+ struct cb *cb;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&nic->cb_lock, flags);
+
+ if(unlikely(!nic->cbs_avail)) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ cb = nic->cb_to_use;
+ nic->cb_to_use = cb->next;
+ nic->cbs_avail--;
+ cb->skb = skb;
+
+ if(unlikely(!nic->cbs_avail))
+ err = -ENOSPC;
+
+ cb_prepare(nic, cb, skb);
+
+ /* Order is important otherwise we'll be in a race with h/w:
+ * set S-bit in current first, then clear S-bit in previous. */
+ cb->command |= cpu_to_le16(cb_s);
+ wmb();
+ cb->prev->command &= cpu_to_le16(~cb_s);
+
+ while(nic->cb_to_send != nic->cb_to_use) {
+ if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
+ nic->cb_to_send->dma_addr))) {
+ /* Ok, here's where things get sticky. It's
+ * possible that we can't schedule the command
+ * because the controller is too busy, so
+ * let's just queue the command and try again
+ * when another command is scheduled. */
+ break;
+ } else {
+ nic->cuc_cmd = cuc_resume;
+ nic->cb_to_send = nic->cb_to_send->next;
+ }
+ }
+
+err_unlock:
+ spin_unlock_irqrestore(&nic->cb_lock, flags);
+
+ return err;
+}
+
+static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
+{
+ u32 data_out = 0;
+ unsigned int i;
+
+ writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
+
+ for(i = 0; i < 100; i++) {
+ udelay(20);
+ if((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
+ break;
+ }
+
+ DPRINTK(HW, DEBUG,
+ "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
+ dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
+ return (u16)data_out;
+}
+
+static int mdio_read(struct net_device *netdev, int addr, int reg)
+{
+ return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
+}
+
+static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
+{
+ mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
+}
+
+static void e100_get_defaults(struct nic *nic)
+{
+ struct param_range rfds = { .min = 64, .max = 256, .count = 64 };
+ struct param_range cbs = { .min = 64, .max = 256, .count = 64 };
+
+ pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
+ /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
+ nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
+ if(nic->mac == mac_unknown)
+ nic->mac = mac_82557_D100_A;
+
+ nic->params.rfds = rfds;
+ nic->params.cbs = cbs;
+
+ /* Quadwords to DMA into FIFO before starting frame transmit */
+ nic->tx_threshold = 0xE0;
+
+ nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf |
+ ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0));
+
+ /* Template for a freshly allocated RFD */
+ nic->blank_rfd.command = cpu_to_le16(cb_el);
+ nic->blank_rfd.rbd = 0xFFFFFFFF;
+ nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
+
+ /* MII setup */
+ nic->mii.phy_id_mask = 0x1F;
+ nic->mii.reg_num_mask = 0x1F;
+ nic->mii.dev = nic->netdev;
+ nic->mii.mdio_read = mdio_read;
+ nic->mii.mdio_write = mdio_write;
+}
+
+static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ struct config *config = &cb->u.config;
+ u8 *c = (u8 *)config;
+
+ cb->command = cpu_to_le16(cb_config);
+
+ memset(config, 0, sizeof(struct config));
+
+ config->byte_count = 0x16; /* bytes in this struct */
+ config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
+ config->direct_rx_dma = 0x1; /* reserved */
+ config->standard_tcb = 0x1; /* 1=standard, 0=extended */
+ config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
+ config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
+ config->tx_underrun_retry = 0x3; /* # of underrun retries */
+ config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
+ config->pad10 = 0x6;
+ config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
+ config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
+ config->ifs = 0x6; /* x16 = inter frame spacing */
+ config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
+ config->pad15_1 = 0x1;
+ config->pad15_2 = 0x1;
+ config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
+ config->fc_delay_hi = 0x40; /* time delay for fc frame */
+ config->tx_padding = 0x1; /* 1=pad short frames */
+ config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
+ config->pad18 = 0x1;
+ config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
+ config->pad20_1 = 0x1F;
+ config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
+ config->pad21_1 = 0x5;
+
+ config->adaptive_ifs = nic->adaptive_ifs;
+ config->loopback = nic->loopback;
+
+ if(nic->mii.force_media && nic->mii.full_duplex)
+ config->full_duplex_force = 0x1; /* 1=force, 0=auto */
+
+ if(nic->flags & promiscuous || nic->loopback) {
+ config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
+ config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
+ config->promiscuous_mode = 0x1; /* 1=on, 0=off */
+ }
+
+ if(nic->flags & multicast_all)
+ config->multicast_all = 0x1; /* 1=accept, 0=no */
+
+ if(!(nic->flags & wol_magic))
+ config->magic_packet_disable = 0x1; /* 1=off, 0=on */
+
+ if(nic->mac >= mac_82558_D101_A4) {
+ config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
+ config->mwi_enable = 0x1; /* 1=enable, 0=disable */
+ config->standard_tcb = 0x0; /* 1=standard, 0=extended */
+ config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
+ if(nic->mac >= mac_82559_D101M)
+ config->tno_intr = 0x1; /* TCO stats enable */
+ else
+ config->standard_stat_counter = 0x0;
+ }
+
+ DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+ DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
+ DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+}
+
+static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ int i;
+ static const u32 ucode[UCODE_SIZE] = {
+ /* NFS packets are misinterpreted as TCO packets and
+ * incorrectly routed to the BMC over SMBus. This
+ * microcode patch checks the fragmented IP bit in the
+ * NFS/UDP header to distinguish between NFS and TCO. */
+ 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF,
+ 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000,
+ 0x00906EFD, 0x00900EFD, 0x00E00EF8,
+ };
+
+ if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
+ for(i = 0; i < UCODE_SIZE; i++)
+ cb->u.ucode[i] = cpu_to_le32(ucode[i]);
+ cb->command = cpu_to_le16(cb_ucode);
+ } else
+ cb->command = cpu_to_le16(cb_nop);
+}
+
+static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
+ struct sk_buff *skb)
+{
+ cb->command = cpu_to_le16(cb_iaaddr);
+ memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
+}
+
+static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ cb->command = cpu_to_le16(cb_dump);
+ cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
+ offsetof(struct mem, dump_buf));
+}
+
+#define NCONFIG_AUTO_SWITCH 0x0080
+#define MII_NSC_CONG MII_RESV1
+#define NSC_CONG_ENABLE 0x0100
+#define NSC_CONG_TXREADY 0x0400
+#define ADVERTISE_FC_SUPPORTED 0x0400
+static int e100_phy_init(struct nic *nic)
+{
+ struct net_device *netdev = nic->netdev;
+ u32 addr;
+ u16 bmcr, stat, id_lo, id_hi, cong;
+
+ /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
+ for(addr = 0; addr < 32; addr++) {
+ nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
+ bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
+ stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
+ stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
+ if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
+ break;
+ }
+ DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
+ if(addr == 32)
+ return -EAGAIN;
+
+ /* Selected the phy and isolate the rest */
+ for(addr = 0; addr < 32; addr++) {
+ if(addr != nic->mii.phy_id) {
+ mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
+ } else {
+ bmcr = mdio_read(netdev, addr, MII_BMCR);
+ mdio_write(netdev, addr, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ }
+ }
+
+ /* Get phy ID */
+ id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
+ id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
+ nic->phy = (u32)id_hi << 16 | (u32)id_lo;
+ DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
+
+ /* Handle National tx phys */
+#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
+ if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
+ /* Disable congestion control */
+ cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
+ cong |= NSC_CONG_TXREADY;
+ cong &= ~NSC_CONG_ENABLE;
+ mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
+ }
+
+ if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
+ (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
+ (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)))
+ /* enable/disable MDI/MDI-X auto-switching */
+ mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
+ nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
+
+ return 0;
+}
+
+static int e100_hw_init(struct nic *nic)
+{
+ int err;
+
+ e100_hw_reset(nic);
+
+ DPRINTK(HW, ERR, "e100_hw_init\n");
+ if(!in_interrupt() && (err = e100_self_test(nic)))
+ return err;
+
+ if((err = e100_phy_init(nic)))
+ return err;
+ if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
+ return err;
+ if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
+ return err;
+ if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
+ return err;
+ if((err = e100_exec_cb(nic, NULL, e100_configure)))
+ return err;
+ if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
+ return err;
+ if((err = e100_exec_cmd(nic, cuc_dump_addr,
+ nic->dma_addr + offsetof(struct mem, stats))))
+ return err;
+ if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
+ return err;
+
+ e100_disable_irq(nic);
+
+ return 0;
+}
+
+static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ struct net_device *netdev = nic->netdev;
+ struct dev_mc_list *list = netdev->mc_list;
+ u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
+
+ cb->command = cpu_to_le16(cb_multi);
+ cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
+ for(i = 0; list && i < count; i++, list = list->next)
+ memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
+ ETH_ALEN);
+}
+
+static void e100_set_multicast_list(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
+ netdev->mc_count, netdev->flags);
+
+ if(netdev->flags & IFF_PROMISC)
+ nic->flags |= promiscuous;
+ else
+ nic->flags &= ~promiscuous;
+
+ if(netdev->flags & IFF_ALLMULTI ||
+ netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
+ nic->flags |= multicast_all;
+ else
+ nic->flags &= ~multicast_all;
+
+ e100_exec_cb(nic, NULL, e100_configure);
+ e100_exec_cb(nic, NULL, e100_multi);
+}
+
+static void e100_update_stats(struct nic *nic)
+{
+ struct net_device_stats *ns = &nic->net_stats;
+ struct stats *s = &nic->mem->stats;
+ u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
+ (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
+ &s->complete;
+
+ /* Device's stats reporting may take several microseconds to
+ * complete, so where always waiting for results of the
+ * previous command. */
+
+ if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
+ *complete = 0;
+ nic->tx_frames = le32_to_cpu(s->tx_good_frames);
+ nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
+ ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
+ ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
+ ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
+ ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
+ ns->collisions += nic->tx_collisions;
+ ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
+ le32_to_cpu(s->tx_lost_crs);
+ ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
+ ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
+ nic->rx_over_length_errors;
+ ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
+ ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
+ ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
+ ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
+ ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
+ le32_to_cpu(s->rx_alignment_errors) +
+ le32_to_cpu(s->rx_short_frame_errors) +
+ le32_to_cpu(s->rx_cdt_errors);
+ nic->tx_deferred += le32_to_cpu(s->tx_deferred);
+ nic->tx_single_collisions +=
+ le32_to_cpu(s->tx_single_collisions);
+ nic->tx_multiple_collisions +=
+ le32_to_cpu(s->tx_multiple_collisions);
+ if(nic->mac >= mac_82558_D101_A4) {
+ nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
+ nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
+ nic->rx_fc_unsupported +=
+ le32_to_cpu(s->fc_rcv_unsupported);
+ if(nic->mac >= mac_82559_D101M) {
+ nic->tx_tco_frames +=
+ le16_to_cpu(s->xmt_tco_frames);
+ nic->rx_tco_frames +=
+ le16_to_cpu(s->rcv_tco_frames);
+ }
+ }
+ }
+
+ e100_exec_cmd(nic, cuc_dump_reset, 0);
+}
+
+static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
+{
+ /* Adjust inter-frame-spacing (IFS) between two transmits if
+ * we're getting collisions on a half-duplex connection. */
+
+ if(duplex == DUPLEX_HALF) {
+ u32 prev = nic->adaptive_ifs;
+ u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
+
+ if((nic->tx_frames / 32 < nic->tx_collisions) &&
+ (nic->tx_frames > min_frames)) {
+ if(nic->adaptive_ifs < 60)
+ nic->adaptive_ifs += 5;
+ } else if (nic->tx_frames < min_frames) {
+ if(nic->adaptive_ifs >= 5)
+ nic->adaptive_ifs -= 5;
+ }
+ if(nic->adaptive_ifs != prev)
+ e100_exec_cb(nic, NULL, e100_configure);
+ }
+}
+
+static void e100_watchdog(unsigned long data)
+{
+ struct nic *nic = (struct nic *)data;
+ struct ethtool_cmd cmd;
+
+ DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
+
+ /* mii library handles link maintenance tasks */
+
+ mii_ethtool_gset(&nic->mii, &cmd);
+
+ if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
+ DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
+ cmd.speed == SPEED_100 ? "100" : "10",
+ cmd.duplex == DUPLEX_FULL ? "full" : "half");
+ } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
+ DPRINTK(LINK, INFO, "link down\n");
+ }
+
+ mii_check_link(&nic->mii);
+
+ /* Software generated interrupt to recover from (rare) Rx
+ * allocation failure.
+ * Unfortunately have to use a spinlock to not re-enable interrupts
+ * accidentally, due to hardware that shares a register between the
+ * interrupt mask bit and the SW Interrupt generation bit */
+ spin_lock_irq(&nic->cmd_lock);
+ writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
+ spin_unlock_irq(&nic->cmd_lock);
+ e100_write_flush(nic);
+
+ e100_update_stats(nic);
+ e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
+
+ if(nic->mac <= mac_82557_D100_C)
+ /* Issue a multicast command to workaround a 557 lock up */
+ e100_set_multicast_list(nic->netdev);
+
+ if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
+ /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
+ nic->flags |= ich_10h_workaround;
+ else
+ nic->flags &= ~ich_10h_workaround;
+
+ mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
+}
+
+static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
+ struct sk_buff *skb)
+{
+ cb->command = nic->tx_command;
+ cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
+ cb->u.tcb.tcb_byte_count = 0;
+ cb->u.tcb.threshold = nic->tx_threshold;
+ cb->u.tcb.tbd_count = 1;
+ cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE));
+ cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
+}
+
+static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int err;
+
+ if(nic->flags & ich_10h_workaround) {
+ /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
+ Issue a NOP command followed by a 1us delay before
+ issuing the Tx command. */
+ e100_exec_cmd(nic, cuc_nop, 0);
+ udelay(1);
+ }
+
+ err = e100_exec_cb(nic, skb, e100_xmit_prepare);
+
+ switch(err) {
+ case -ENOSPC:
+ /* We queued the skb, but now we're out of space. */
+ DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
+ netif_stop_queue(netdev);
+ break;
+ case -ENOMEM:
+ /* This is a hard error - log it. */
+ DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
+ netif_stop_queue(netdev);
+ return 1;
+ }
+
+ netdev->trans_start = jiffies;
+ return 0;
+}
+
+static inline int e100_tx_clean(struct nic *nic)
+{
+ struct cb *cb;
+ int tx_cleaned = 0;
+
+ spin_lock(&nic->cb_lock);
+
+ DPRINTK(TX_DONE, DEBUG, "cb->status = 0x%04X\n",
+ nic->cb_to_clean->status);
+
+ /* Clean CBs marked complete */
+ for(cb = nic->cb_to_clean;
+ cb->status & cpu_to_le16(cb_complete);
+ cb = nic->cb_to_clean = cb->next) {
+ if(likely(cb->skb != NULL)) {
+ nic->net_stats.tx_packets++;
+ nic->net_stats.tx_bytes += cb->skb->len;
+
+ pci_unmap_single(nic->pdev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ tx_cleaned = 1;
+ }
+ cb->status = 0;
+ nic->cbs_avail++;
+ }
+
+ spin_unlock(&nic->cb_lock);
+
+ /* Recover from running out of Tx resources in xmit_frame */
+ if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
+ netif_wake_queue(nic->netdev);
+
+ return tx_cleaned;
+}
+
+static void e100_clean_cbs(struct nic *nic)
+{
+ if(nic->cbs) {
+ while(nic->cbs_avail != nic->params.cbs.count) {
+ struct cb *cb = nic->cb_to_clean;
+ if(cb->skb) {
+ pci_unmap_single(nic->pdev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(cb->skb);
+ }
+ nic->cb_to_clean = nic->cb_to_clean->next;
+ nic->cbs_avail++;
+ }
+ pci_free_consistent(nic->pdev,
+ sizeof(struct cb) * nic->params.cbs.count,
+ nic->cbs, nic->cbs_dma_addr);
+ nic->cbs = NULL;
+ nic->cbs_avail = 0;
+ }
+ nic->cuc_cmd = cuc_start;
+ nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
+ nic->cbs;
+}
+
+static int e100_alloc_cbs(struct nic *nic)
+{
+ struct cb *cb;
+ unsigned int i, count = nic->params.cbs.count;
+
+ nic->cuc_cmd = cuc_start;
+ nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
+ nic->cbs_avail = 0;
+
+ nic->cbs = pci_alloc_consistent(nic->pdev,
+ sizeof(struct cb) * count, &nic->cbs_dma_addr);
+ if(!nic->cbs)
+ return -ENOMEM;
+
+ for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
+ cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
+ cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
+
+ cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
+ cb->link = cpu_to_le32(nic->cbs_dma_addr +
+ ((i+1) % count) * sizeof(struct cb));
+ cb->skb = NULL;
+ }
+
+ nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
+ nic->cbs_avail = count;
+
+ return 0;
+}
+
+static inline void e100_start_receiver(struct nic *nic)
+{
+ /* (Re)start RU if suspended or idle and RFA is non-NULL */
+ if(!nic->ru_running && nic->rx_to_clean->skb) {
+ e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
+ nic->ru_running = 1;
+ }
+}
+
+#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
+static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
+{
+ if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN)))
+ return -ENOMEM;
+
+ /* Align, init, and map the RFD. */
+ rx->skb->dev = nic->netdev;
+ skb_reserve(rx->skb, NET_IP_ALIGN);
+ memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
+ rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+
+ /* Link the RFD to end of RFA by linking previous RFD to
+ * this one, and clearing EL bit of previous. */
+ if(rx->prev->skb) {
+ struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
+ put_unaligned(cpu_to_le32(rx->dma_addr),
+ (u32 *)&prev_rfd->link);
+ wmb();
+ prev_rfd->command &= ~cpu_to_le16(cb_el);
+ pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
+ sizeof(struct rfd), PCI_DMA_TODEVICE);
+ }
+
+ return 0;
+}
+
+static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
+ unsigned int *work_done, unsigned int work_to_do)
+{
+ struct sk_buff *skb = rx->skb;
+ struct rfd *rfd = (struct rfd *)skb->data;
+ u16 rfd_status, actual_size;
+
+ if(unlikely(work_done && *work_done >= work_to_do))
+ return -EAGAIN;
+
+ /* Need to sync before taking a peek at cb_complete bit */
+ pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
+ sizeof(struct rfd), PCI_DMA_FROMDEVICE);
+ rfd_status = le16_to_cpu(rfd->status);
+
+ DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
+
+ /* If data isn't ready, nothing to indicate */
+ if(unlikely(!(rfd_status & cb_complete)))
+ return -EAGAIN;
+
+ /* Get actual data size */
+ actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
+ if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
+ actual_size = RFD_BUF_LEN - sizeof(struct rfd);
+
+ /* Get data */
+ pci_unmap_single(nic->pdev, rx->dma_addr,
+ RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
+
+ /* Pull off the RFD and put the actual data (minus eth hdr) */
+ skb_reserve(skb, sizeof(struct rfd));
+ skb_put(skb, actual_size);
+ skb->protocol = eth_type_trans(skb, nic->netdev);
+
+ if(unlikely(!(rfd_status & cb_ok))) {
+ /* Don't indicate if hardware indicates errors */
+ nic->net_stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ } else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) {
+ /* Don't indicate oversized frames */
+ nic->rx_over_length_errors++;
+ nic->net_stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ } else {
+ nic->net_stats.rx_packets++;
+ nic->net_stats.rx_bytes += actual_size;
+ nic->netdev->last_rx = jiffies;
+ netif_receive_skb(skb);
+ if(work_done)
+ (*work_done)++;
+ }
+
+ rx->skb = NULL;
+
+ return 0;
+}
+
+static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
+ unsigned int work_to_do)
+{
+ struct rx *rx;
+
+ /* Indicate newly arrived packets */
+ for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
+ if(e100_rx_indicate(nic, rx, work_done, work_to_do))
+ break; /* No more to clean */
+ }
+
+ /* Alloc new skbs to refill list */
+ for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
+ if(unlikely(e100_rx_alloc_skb(nic, rx)))
+ break; /* Better luck next time (see watchdog) */
+ }
+
+ e100_start_receiver(nic);
+}
+
+static void e100_rx_clean_list(struct nic *nic)
+{
+ struct rx *rx;
+ unsigned int i, count = nic->params.rfds.count;
+
+ if(nic->rxs) {
+ for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
+ if(rx->skb) {
+ pci_unmap_single(nic->pdev, rx->dma_addr,
+ RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rx->skb);
+ }
+ }
+ kfree(nic->rxs);
+ nic->rxs = NULL;
+ }
+
+ nic->rx_to_use = nic->rx_to_clean = NULL;
+ nic->ru_running = 0;
+}
+
+static int e100_rx_alloc_list(struct nic *nic)
+{
+ struct rx *rx;
+ unsigned int i, count = nic->params.rfds.count;
+
+ nic->rx_to_use = nic->rx_to_clean = NULL;
+
+ if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
+ return -ENOMEM;
+ memset(nic->rxs, 0, sizeof(struct rx) * count);
+
+ for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
+ rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
+ rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
+ if(e100_rx_alloc_skb(nic, rx)) {
+ e100_rx_clean_list(nic);
+ return -ENOMEM;
+ }
+ }
+
+ nic->rx_to_use = nic->rx_to_clean = nic->rxs;
+
+ return 0;
+}
+
+static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *netdev = dev_id;
+ struct nic *nic = netdev_priv(netdev);
+ u8 stat_ack = readb(&nic->csr->scb.stat_ack);
+
+ DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
+
+ if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
+ stat_ack == stat_ack_not_present) /* Hardware is ejected */
+ return IRQ_NONE;
+
+ /* Ack interrupt(s) */
+ writeb(stat_ack, &nic->csr->scb.stat_ack);
+
+ /* We hit Receive No Resource (RNR); restart RU after cleaning */
+ if(stat_ack & stat_ack_rnr)
+ nic->ru_running = 0;
+
+ e100_disable_irq(nic);
+ netif_rx_schedule(netdev);
+
+ return IRQ_HANDLED;
+}
+
+static int e100_poll(struct net_device *netdev, int *budget)
+{
+ struct nic *nic = netdev_priv(netdev);
+ unsigned int work_to_do = min(netdev->quota, *budget);
+ unsigned int work_done = 0;
+ int tx_cleaned;
+
+ e100_rx_clean(nic, &work_done, work_to_do);
+ tx_cleaned = e100_tx_clean(nic);
+
+ /* If no Rx and Tx cleanup work was done, exit polling mode. */
+ if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
+ netif_rx_complete(netdev);
+ e100_enable_irq(nic);
+ return 0;
+ }
+
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ return 1;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void e100_netpoll(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ e100_disable_irq(nic);
+ e100_intr(nic->pdev->irq, netdev, NULL);
+ e100_tx_clean(nic);
+ e100_enable_irq(nic);
+}
+#endif
+
+static struct net_device_stats *e100_get_stats(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return &nic->net_stats;
+}
+
+static int e100_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct nic *nic = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ e100_exec_cb(nic, NULL, e100_setup_iaaddr);
+
+ return 0;
+}
+
+static int e100_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+static int e100_asf(struct nic *nic)
+{
+ /* ASF can be enabled from eeprom */
+ return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
+ (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
+ !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
+ ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
+}
+
+static int e100_up(struct nic *nic)
+{
+ int err;
+
+ if((err = e100_rx_alloc_list(nic)))
+ return err;
+ if((err = e100_alloc_cbs(nic)))
+ goto err_rx_clean_list;
+ if((err = e100_hw_init(nic)))
+ goto err_clean_cbs;
+ e100_set_multicast_list(nic->netdev);
+ e100_start_receiver(nic);
+ mod_timer(&nic->watchdog, jiffies);
+ if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
+ nic->netdev->name, nic->netdev)))
+ goto err_no_irq;
+ e100_enable_irq(nic);
+ netif_wake_queue(nic->netdev);
+ return 0;
+
+err_no_irq:
+ del_timer_sync(&nic->watchdog);
+err_clean_cbs:
+ e100_clean_cbs(nic);
+err_rx_clean_list:
+ e100_rx_clean_list(nic);
+ return err;
+}
+
+static void e100_down(struct nic *nic)
+{
+ e100_hw_reset(nic);
+ free_irq(nic->pdev->irq, nic->netdev);
+ del_timer_sync(&nic->watchdog);
+ netif_carrier_off(nic->netdev);
+ netif_stop_queue(nic->netdev);
+ e100_clean_cbs(nic);
+ e100_rx_clean_list(nic);
+}
+
+static void e100_tx_timeout(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
+ readb(&nic->csr->scb.status));
+ e100_down(netdev_priv(netdev));
+ e100_up(netdev_priv(netdev));
+}
+
+static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
+{
+ int err;
+ struct sk_buff *skb;
+
+ /* Use driver resources to perform internal MAC or PHY
+ * loopback test. A single packet is prepared and transmitted
+ * in loopback mode, and the test passes if the received
+ * packet compares byte-for-byte to the transmitted packet. */
+
+ if((err = e100_rx_alloc_list(nic)))
+ return err;
+ if((err = e100_alloc_cbs(nic)))
+ goto err_clean_rx;
+
+ /* ICH PHY loopback is broken so do MAC loopback instead */
+ if(nic->flags & ich && loopback_mode == lb_phy)
+ loopback_mode = lb_mac;
+
+ nic->loopback = loopback_mode;
+ if((err = e100_hw_init(nic)))
+ goto err_loopback_none;
+
+ if(loopback_mode == lb_phy)
+ mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
+ BMCR_LOOPBACK);
+
+ e100_start_receiver(nic);
+
+ if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
+ err = -ENOMEM;
+ goto err_loopback_none;
+ }
+ skb_put(skb, ETH_DATA_LEN);
+ memset(skb->data, 0xFF, ETH_DATA_LEN);
+ e100_xmit_frame(skb, nic->netdev);
+
+ msleep(10);
+
+ if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
+ skb->data, ETH_DATA_LEN))
+ err = -EAGAIN;
+
+err_loopback_none:
+ mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
+ nic->loopback = lb_none;
+ e100_hw_init(nic);
+ e100_clean_cbs(nic);
+err_clean_rx:
+ e100_rx_clean_list(nic);
+ return err;
+}
+
+#define MII_LED_CONTROL 0x1B
+static void e100_blink_led(unsigned long data)
+{
+ struct nic *nic = (struct nic *)data;
+ enum led_state {
+ led_on = 0x01,
+ led_off = 0x04,
+ led_on_559 = 0x05,
+ led_on_557 = 0x07,
+ };
+
+ nic->leds = (nic->leds & led_on) ? led_off :
+ (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
+ mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
+ mod_timer(&nic->blink_timer, jiffies + HZ / 4);
+}
+
+static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return mii_ethtool_gset(&nic->mii, cmd);
+}
+
+static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int err;
+
+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
+ err = mii_ethtool_sset(&nic->mii, cmd);
+ e100_exec_cb(nic, NULL, e100_configure);
+
+ return err;
+}
+
+static void e100_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct nic *nic = netdev_priv(netdev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+ strcpy(info->bus_info, pci_name(nic->pdev));
+}
+
+static int e100_get_regs_len(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+#define E100_PHY_REGS 0x1C
+#define E100_REGS_LEN 1 + E100_PHY_REGS + \
+ sizeof(nic->mem->dump_buf) / sizeof(u32)
+ return E100_REGS_LEN * sizeof(u32);
+}
+
+static void e100_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct nic *nic = netdev_priv(netdev);
+ u32 *buff = p;
+ int i;
+
+ regs->version = (1 << 24) | nic->rev_id;
+ buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
+ readb(&nic->csr->scb.cmd_lo) << 16 |
+ readw(&nic->csr->scb.status);
+ for(i = E100_PHY_REGS; i >= 0; i--)
+ buff[1 + E100_PHY_REGS - i] =
+ mdio_read(netdev, nic->mii.phy_id, i);
+ memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
+ e100_exec_cb(nic, NULL, e100_dump);
+ msleep(10);
+ memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
+ sizeof(nic->mem->dump_buf));
+}
+
+static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct nic *nic = netdev_priv(netdev);
+ wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
+ wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
+}
+
+static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
+ return -EOPNOTSUPP;
+
+ if(wol->wolopts)
+ nic->flags |= wol_magic;
+ else
+ nic->flags &= ~wol_magic;
+
+ pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
+ e100_exec_cb(nic, NULL, e100_configure);
+
+ return 0;
+}
+
+static u32 e100_get_msglevel(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return nic->msg_enable;
+}
+
+static void e100_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct nic *nic = netdev_priv(netdev);
+ nic->msg_enable = value;
+}
+
+static int e100_nway_reset(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return mii_nway_restart(&nic->mii);
+}
+
+static u32 e100_get_link(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return mii_link_ok(&nic->mii);
+}
+
+static int e100_get_eeprom_len(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return nic->eeprom_wc << 1;
+}
+
+#define E100_EEPROM_MAGIC 0x1234
+static int e100_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ eeprom->magic = E100_EEPROM_MAGIC;
+ memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
+
+ return 0;
+}
+
+static int e100_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ if(eeprom->magic != E100_EEPROM_MAGIC)
+ return -EINVAL;
+
+ memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
+
+ return e100_eeprom_save(nic, eeprom->offset >> 1,
+ (eeprom->len >> 1) + 1);
+}
+
+static void e100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nic *nic = netdev_priv(netdev);
+ struct param_range *rfds = &nic->params.rfds;
+ struct param_range *cbs = &nic->params.cbs;
+
+ ring->rx_max_pending = rfds->max;
+ ring->tx_max_pending = cbs->max;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rfds->count;
+ ring->tx_pending = cbs->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int e100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nic *nic = netdev_priv(netdev);
+ struct param_range *rfds = &nic->params.rfds;
+ struct param_range *cbs = &nic->params.cbs;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ if(netif_running(netdev))
+ e100_down(nic);
+ rfds->count = max(ring->rx_pending, rfds->min);
+ rfds->count = min(rfds->count, rfds->max);
+ cbs->count = max(ring->tx_pending, cbs->min);
+ cbs->count = min(cbs->count, cbs->max);
+ DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
+ rfds->count, cbs->count);
+ if(netif_running(netdev))
+ e100_up(nic);
+
+ return 0;
+}
+
+static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Link test (on/offline)",
+ "Eeprom test (on/offline)",
+ "Self test (offline)",
+ "Mac loopback (offline)",
+ "Phy loopback (offline)",
+};
+#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
+
+static int e100_diag_test_count(struct net_device *netdev)
+{
+ return E100_TEST_LEN;
+}
+
+static void e100_diag_test(struct net_device *netdev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct ethtool_cmd cmd;
+ struct nic *nic = netdev_priv(netdev);
+ int i, err;
+
+ memset(data, 0, E100_TEST_LEN * sizeof(u64));
+ data[0] = !mii_link_ok(&nic->mii);
+ data[1] = e100_eeprom_load(nic);
+ if(test->flags & ETH_TEST_FL_OFFLINE) {
+
+ /* save speed, duplex & autoneg settings */
+ err = mii_ethtool_gset(&nic->mii, &cmd);
+
+ if(netif_running(netdev))
+ e100_down(nic);
+ data[2] = e100_self_test(nic);
+ data[3] = e100_loopback_test(nic, lb_mac);
+ data[4] = e100_loopback_test(nic, lb_phy);
+
+ /* restore speed, duplex & autoneg settings */
+ err = mii_ethtool_sset(&nic->mii, &cmd);
+
+ if(netif_running(netdev))
+ e100_up(nic);
+ }
+ for(i = 0; i < E100_TEST_LEN; i++)
+ test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
+}
+
+static int e100_phys_id(struct net_device *netdev, u32 data)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
+ data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+ mod_timer(&nic->blink_timer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&nic->blink_timer);
+ mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
+
+ return 0;
+}
+
+static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+ /* device-specific stats */
+ "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
+ "tx_flow_control_pause", "rx_flow_control_pause",
+ "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
+};
+#define E100_NET_STATS_LEN 21
+#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
+
+static int e100_get_stats_count(struct net_device *netdev)
+{
+ return E100_STATS_LEN;
+}
+
+static void e100_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int i;
+
+ for(i = 0; i < E100_NET_STATS_LEN; i++)
+ data[i] = ((unsigned long *)&nic->net_stats)[i];
+
+ data[i++] = nic->tx_deferred;
+ data[i++] = nic->tx_single_collisions;
+ data[i++] = nic->tx_multiple_collisions;
+ data[i++] = nic->tx_fc_pause;
+ data[i++] = nic->rx_fc_pause;
+ data[i++] = nic->rx_fc_unsupported;
+ data[i++] = nic->tx_tco_frames;
+ data[i++] = nic->rx_tco_frames;
+}
+
+static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ switch(stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
+ break;
+ }
+}
+
+static struct ethtool_ops e100_ethtool_ops = {
+ .get_settings = e100_get_settings,
+ .set_settings = e100_set_settings,
+ .get_drvinfo = e100_get_drvinfo,
+ .get_regs_len = e100_get_regs_len,
+ .get_regs = e100_get_regs,
+ .get_wol = e100_get_wol,
+ .set_wol = e100_set_wol,
+ .get_msglevel = e100_get_msglevel,
+ .set_msglevel = e100_set_msglevel,
+ .nway_reset = e100_nway_reset,
+ .get_link = e100_get_link,
+ .get_eeprom_len = e100_get_eeprom_len,
+ .get_eeprom = e100_get_eeprom,
+ .set_eeprom = e100_set_eeprom,
+ .get_ringparam = e100_get_ringparam,
+ .set_ringparam = e100_set_ringparam,
+ .self_test_count = e100_diag_test_count,
+ .self_test = e100_diag_test,
+ .get_strings = e100_get_strings,
+ .phys_id = e100_phys_id,
+ .get_stats_count = e100_get_stats_count,
+ .get_ethtool_stats = e100_get_ethtool_stats,
+};
+
+static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
+}
+
+static int e100_alloc(struct nic *nic)
+{
+ nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
+ &nic->dma_addr);
+ return nic->mem ? 0 : -ENOMEM;
+}
+
+static void e100_free(struct nic *nic)
+{
+ if(nic->mem) {
+ pci_free_consistent(nic->pdev, sizeof(struct mem),
+ nic->mem, nic->dma_addr);
+ nic->mem = NULL;
+ }
+}
+
+static int e100_open(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int err = 0;
+
+ netif_carrier_off(netdev);
+ if((err = e100_up(nic)))
+ DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
+ return err;
+}
+
+static int e100_close(struct net_device *netdev)
+{
+ e100_down(netdev_priv(netdev));
+ return 0;
+}
+
+static int __devinit e100_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct nic *nic;
+ int err;
+
+ if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
+ if(((1 << debug) - 1) & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
+ return -ENOMEM;
+ }
+
+ netdev->open = e100_open;
+ netdev->stop = e100_close;
+ netdev->hard_start_xmit = e100_xmit_frame;
+ netdev->get_stats = e100_get_stats;
+ netdev->set_multicast_list = e100_set_multicast_list;
+ netdev->set_mac_address = e100_set_mac_address;
+ netdev->change_mtu = e100_change_mtu;
+ netdev->do_ioctl = e100_do_ioctl;
+ SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+ netdev->tx_timeout = e100_tx_timeout;
+ netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
+ netdev->poll = e100_poll;
+ netdev->weight = E100_NAPI_WEIGHT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = e100_netpoll;
+#endif
+ strcpy(netdev->name, pci_name(pdev));
+
+ nic = netdev_priv(netdev);
+ nic->netdev = netdev;
+ nic->pdev = pdev;
+ nic->msg_enable = (1 << debug) - 1;
+ pci_set_drvdata(pdev, netdev);
+
+ if((err = pci_enable_device(pdev))) {
+ DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
+ goto err_out_free_dev;
+ }
+
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
+ "base address, aborting.\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ if((err = pci_request_regions(pdev, DRV_NAME))) {
+ DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
+ goto err_out_disable_pdev;
+ }
+
+ if((err = pci_set_dma_mask(pdev, 0xFFFFFFFFULL))) {
+ DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
+ goto err_out_free_res;
+ }
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
+ if(!nic->csr) {
+ DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ if(ent->driver_data)
+ nic->flags |= ich;
+ else
+ nic->flags &= ~ich;
+
+ e100_get_defaults(nic);
+
+ spin_lock_init(&nic->cb_lock);
+ spin_lock_init(&nic->cmd_lock);
+
+ /* Reset the device before pci_set_master() in case device is in some
+ * funky state and has an interrupt pending - hint: we don't have the
+ * interrupt handler registered yet. */
+ e100_hw_reset(nic);
+
+ pci_set_master(pdev);
+
+ init_timer(&nic->watchdog);
+ nic->watchdog.function = e100_watchdog;
+ nic->watchdog.data = (unsigned long)nic;
+ init_timer(&nic->blink_timer);
+ nic->blink_timer.function = e100_blink_led;
+ nic->blink_timer.data = (unsigned long)nic;
+
+ if((err = e100_alloc(nic))) {
+ DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ e100_phy_init(nic);
+
+ if((err = e100_eeprom_load(nic)))
+ goto err_out_free;
+
+ memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
+ if(!is_valid_ether_addr(netdev->dev_addr)) {
+ DPRINTK(PROBE, ERR, "Invalid MAC address from "
+ "EEPROM, aborting.\n");
+ err = -EAGAIN;
+ goto err_out_free;
+ }
+
+ /* Wol magic packet can be enabled from eeprom */
+ if((nic->mac >= mac_82558_D101_A4) &&
+ (nic->eeprom[eeprom_id] & eeprom_id_wol))
+ nic->flags |= wol_magic;
+
+ pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
+
+ strcpy(netdev->name, "eth%d");
+ if((err = register_netdev(netdev))) {
+ DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
+ goto err_out_free;
+ }
+
+ DPRINTK(PROBE, INFO, "addr 0x%lx, irq %d, "
+ "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ pci_resource_start(pdev, 0), pdev->irq,
+ netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+ netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+
+ return 0;
+
+err_out_free:
+ e100_free(nic);
+err_out_iounmap:
+ iounmap(nic->csr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+err_out_free_dev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ return err;
+}
+
+static void __devexit e100_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ if(netdev) {
+ struct nic *nic = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ e100_free(nic);
+ iounmap(nic->csr);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+#ifdef CONFIG_PM
+static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ if(netif_running(netdev))
+ e100_down(nic);
+ e100_hw_reset(nic);
+ netif_device_detach(netdev);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic)));
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int e100_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ e100_hw_init(nic);
+
+ netif_device_attach(netdev);
+ if(netif_running(netdev))
+ e100_up(nic);
+
+ return 0;
+}
+#endif
+
+static struct pci_driver e100_driver = {
+ .name = DRV_NAME,
+ .id_table = e100_id_table,
+ .probe = e100_probe,
+ .remove = __devexit_p(e100_remove),
+#ifdef CONFIG_PM
+ .suspend = e100_suspend,
+ .resume = e100_resume,
+#endif
+};
+
+static int __init e100_init_module(void)
+{
+ if(((1 << debug) - 1) & NETIF_MSG_DRV) {
+ printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
+ }
+ return pci_module_init(&e100_driver);
+}
+
+static void __exit e100_cleanup_module(void)
+{
+ pci_unregister_driver(&e100_driver);
+}
+
+module_init(e100_init_module);
+module_exit(e100_cleanup_module);
diff --git a/drivers/net/e1000/LICENSE b/drivers/net/e1000/LICENSE
new file mode 100644
index 000000000000..5f297e5bb465
--- /dev/null
+++ b/drivers/net/e1000/LICENSE
@@ -0,0 +1,339 @@
+
+"This software program is licensed subject to the GNU General Public License
+(GPL). Version 2, June 1991, available at
+<http://www.fsf.org/copyleft/gpl.html>"
+
+GNU General Public License
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+Everyone is permitted to copy and distribute verbatim copies of this license
+document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public License is intended
+to guarantee your freedom to share and change free software--to make sure
+the software is free for all its users. This General Public License applies
+to most of the Free Software Foundation's software and to any other program
+whose authors commit to using it. (Some other Free Software Foundation
+software is covered by the GNU Library General Public License instead.) You
+can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or
+for a fee, you must give the recipients all the rights that you have. You
+must make sure that they, too, receive or can get the source code. And you
+must show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2)
+offer you this license which gives you legal permission to copy, distribute
+and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that
+everyone understands that there is no warranty for this free software. If
+the software is modified by someone else and passed on, we want its
+recipients to know that what they have is not the original, so that any
+problems introduced by others will not reflect on the original authors'
+reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program
+proprietary. To prevent this, we have made it clear that any patent must be
+licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification
+follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice
+ placed by the copyright holder saying it may be distributed under the
+ terms of this General Public License. The "Program", below, refers to any
+ such program or work, and a "work based on the Program" means either the
+ Program or any derivative work under copyright law: that is to say, a
+ work containing the Program or a portion of it, either verbatim or with
+ modifications and/or translated into another language. (Hereinafter,
+ translation is included without limitation in the term "modification".)
+ Each licensee is addressed as "you".
+
+ Activities other than copying, distribution and modification are not
+ covered by this License; they are outside its scope. The act of running
+ the Program is not restricted, and the output from the Program is covered
+ only if its contents constitute a work based on the Program (independent
+ of having been made by running the Program). Whether that is true depends
+ on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code
+ as you receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice and
+ disclaimer of warranty; keep intact all the notices that refer to this
+ License and to the absence of any warranty; and give any other recipients
+ of the Program a copy of this License along with the Program.
+
+ You may charge a fee for the physical act of transferring a copy, and you
+ may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it,
+ thus forming a work based on the Program, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that
+ you also meet all of these conditions:
+
+ * a) You must cause the modified files to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ * b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any part
+ thereof, to be licensed as a whole at no charge to all third parties
+ under the terms of this License.
+
+ * c) If the modified program normally reads commands interactively when
+ run, you must cause it, when started running for such interactive
+ use in the most ordinary way, to print or display an announcement
+ including an appropriate copyright notice and a notice that there is
+ no warranty (or else, saying that you provide a warranty) and that
+ users may redistribute the program under these conditions, and
+ telling the user how to view a copy of this License. (Exception: if
+ the Program itself is interactive but does not normally print such
+ an announcement, your work based on the Program is not required to
+ print an announcement.)
+
+ These requirements apply to the modified work as a whole. If identifiable
+ sections of that work are not derived from the Program, and can be
+ reasonably considered independent and separate works in themselves, then
+ this License, and its terms, do not apply to those sections when you
+ distribute them as separate works. But when you distribute the same
+ sections as part of a whole which is a work based on the Program, the
+ distribution of the whole must be on the terms of this License, whose
+ permissions for other licensees extend to the entire whole, and thus to
+ each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest
+ your rights to work written entirely by you; rather, the intent is to
+ exercise the right to control the distribution of derivative or
+ collective works based on the Program.
+
+ In addition, mere aggregation of another work not based on the Program
+ with the Program (or with a work based on the Program) on a volume of a
+ storage or distribution medium does not bring the other work under the
+ scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it, under
+ Section 2) in object code or executable form under the terms of Sections
+ 1 and 2 above provided that you also do one of the following:
+
+ * a) Accompany it with the complete corresponding machine-readable source
+ code, which must be distributed under the terms of Sections 1 and 2
+ above on a medium customarily used for software interchange; or,
+
+ * b) Accompany it with a written offer, valid for at least three years,
+ to give any third party, for a charge no more than your cost of
+ physically performing source distribution, a complete machine-
+ readable copy of the corresponding source code, to be distributed
+ under the terms of Sections 1 and 2 above on a medium customarily
+ used for software interchange; or,
+
+ * c) Accompany it with the information you received as to the offer to
+ distribute corresponding source code. (This alternative is allowed
+ only for noncommercial distribution and only if you received the
+ program in object code or executable form with such an offer, in
+ accord with Subsection b above.)
+
+ The source code for a work means the preferred form of the work for
+ making modifications to it. For an executable work, complete source code
+ means all the source code for all modules it contains, plus any
+ associated interface definition files, plus the scripts used to control
+ compilation and installation of the executable. However, as a special
+ exception, the source code distributed need not include anything that is
+ normally distributed (in either source or binary form) with the major
+ components (compiler, kernel, and so on) of the operating system on which
+ the executable runs, unless that component itself accompanies the
+ executable.
+
+ If distribution of executable or object code is made by offering access
+ to copy from a designated place, then offering equivalent access to copy
+ the source code from the same place counts as distribution of the source
+ code, even though third parties are not compelled to copy the source
+ along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as
+ expressly provided under this License. Any attempt otherwise to copy,
+ modify, sublicense or distribute the Program is void, and will
+ automatically terminate your rights under this License. However, parties
+ who have received copies, or rights, from you under this License will not
+ have their licenses terminated so long as such parties remain in full
+ compliance.
+
+5. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute
+ the Program or its derivative works. These actions are prohibited by law
+ if you do not accept this License. Therefore, by modifying or
+ distributing the Program (or any work based on the Program), you
+ indicate your acceptance of this License to do so, and all its terms and
+ conditions for copying, distributing or modifying the Program or works
+ based on it.
+
+6. Each time you redistribute the Program (or any work based on the
+ Program), the recipient automatically receives a license from the
+ original licensor to copy, distribute or modify the Program subject to
+ these terms and conditions. You may not impose any further restrictions
+ on the recipients' exercise of the rights granted herein. You are not
+ responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot distribute
+ so as to satisfy simultaneously your obligations under this License and
+ any other pertinent obligations, then as a consequence you may not
+ distribute the Program at all. For example, if a patent license would
+ not permit royalty-free redistribution of the Program by all those who
+ receive copies directly or indirectly through you, then the only way you
+ could satisfy both it and this License would be to refrain entirely from
+ distribution of the Program.
+
+ If any portion of this section is held invalid or unenforceable under any
+ particular circumstance, the balance of the section is intended to apply
+ and the section as a whole is intended to apply in other circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of any
+ such claims; this section has the sole purpose of protecting the
+ integrity of the free software distribution system, which is implemented
+ by public license practices. Many people have made generous contributions
+ to the wide range of software distributed through that system in
+ reliance on consistent application of that system; it is up to the
+ author/donor to decide if he or she is willing to distribute software
+ through any other system and a licensee cannot impose that choice.
+
+ This section is intended to make thoroughly clear what is believed to be
+ a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Program under this License may add an
+ explicit geographical distribution limitation excluding those countries,
+ so that distribution is permitted only in or among countries not thus
+ excluded. In such case, this License incorporates the limitation as if
+ written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of
+ the General Public License from time to time. Such new versions will be
+ similar in spirit to the present version, but may differ in detail to
+ address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the Program
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and
+ conditions either of that version or of any later version published by
+ the Free Software Foundation. If the Program does not specify a version
+ number of this License, you may choose any version ever published by the
+ Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs
+ whose distribution conditions are different, write to the author to ask
+ for permission. For software which is copyrighted by the Free Software
+ Foundation, write to the Free Software Foundation; we sometimes make
+ exceptions for this. Our decision will be guided by the two goals of
+ preserving the free status of all derivatives of our free software and
+ of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+ FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
+ EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
+ YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+ NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+ DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+ DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
+ (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+ INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+ THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
+ OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it free
+software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively convey the
+exclusion of warranty; and each file should have at least the "copyright"
+line and a pointer to where the full notice is found.
+
+one line to give the program's name and an idea of what it does.
+Copyright (C) yyyy name of author
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59
+Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this when
+it starts in an interactive mode:
+
+Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
+with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
+software, and you are welcome to redistribute it under certain conditions;
+type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may be
+called something other than 'show w' and 'show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+'Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+signature of Ty Coon, 1 April 1989
+Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General Public
+License instead of this License.
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
new file mode 100644
index 000000000000..ca9f89552da3
--- /dev/null
+++ b/drivers/net/e1000/Makefile
@@ -0,0 +1,35 @@
+################################################################################
+#
+#
+# Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# The full GNU General Public License is included in this distribution in the
+# file called LICENSE.
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/1000 ethernet driver
+#
+
+obj-$(CONFIG_E1000) += e1000.o
+
+e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
new file mode 100644
index 000000000000..148930d4e9bd
--- /dev/null
+++ b/drivers/net/e1000/e1000.h
@@ -0,0 +1,261 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw.h"
+
+#ifdef DBG
+#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
+#else
+#define E1000_DBG(args...)
+#endif
+
+#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
+
+#define PFX "e1000: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __FUNCTION__ , ## args))
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 80
+#define E1000_MAX_82544_TXD 4096
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 80
+#define E1000_MAX_82544_RXD 4096
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE 16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+/* only works for sizes that are powers of 2 */
+#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+ struct sk_buff *skb;
+ uint64_t dma;
+ unsigned long time_stamp;
+ uint16_t length;
+ uint16_t next_to_watch;
+};
+
+struct e1000_desc_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+};
+
+#define E1000_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+ struct timer_list tx_fifo_stall_timer;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+ struct vlan_group *vlgrp;
+ uint32_t bd_number;
+ uint32_t rx_buffer_len;
+ uint32_t part_num;
+ uint32_t wol;
+ uint32_t smartspeed;
+ uint32_t en_mng_pt;
+ uint16_t link_speed;
+ uint16_t link_duplex;
+ spinlock_t stats_lock;
+ atomic_t irq_sem;
+ struct work_struct tx_timeout_task;
+ struct work_struct watchdog_task;
+ uint8_t fc_autoneg;
+
+ struct timer_list blink_timer;
+ unsigned long led_status;
+
+ /* TX */
+ struct e1000_desc_ring tx_ring;
+ struct e1000_buffer previous_buffer_info;
+ spinlock_t tx_lock;
+ uint32_t txd_cmd;
+ uint32_t tx_int_delay;
+ uint32_t tx_abs_int_delay;
+ uint32_t gotcl;
+ uint64_t gotcl_old;
+ uint64_t tpt_old;
+ uint64_t colc_old;
+ uint32_t tx_fifo_head;
+ uint32_t tx_head_addr;
+ uint32_t tx_fifo_size;
+ atomic_t tx_fifo_stall;
+ boolean_t pcix_82544;
+ boolean_t detect_tx_hung;
+
+ /* RX */
+ struct e1000_desc_ring rx_ring;
+ uint64_t hw_csum_err;
+ uint64_t hw_csum_good;
+ uint32_t rx_int_delay;
+ uint32_t rx_abs_int_delay;
+ boolean_t rx_csum;
+ uint32_t gorcl;
+ uint64_t gorcl_old;
+
+ /* Interrupt Throttle Rate */
+ uint32_t itr;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ uint32_t test_icr;
+ struct e1000_desc_ring test_tx_ring;
+ struct e1000_desc_ring test_rx_ring;
+
+
+ int msg_enable;
+};
+#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
new file mode 100644
index 000000000000..0a2ca7c73a41
--- /dev/null
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -0,0 +1,1673 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include "e1000.h"
+
+#include <asm/uaccess.h>
+
+extern char e1000_driver_name[];
+extern char e1000_driver_version[];
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
+extern int e1000_setup_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+
+struct e1000_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
+ offsetof(struct e1000_adapter, m)
+static const struct e1000_stats e1000_gstrings_stats[] = {
+ { "rx_packets", E1000_STAT(net_stats.rx_packets) },
+ { "tx_packets", E1000_STAT(net_stats.tx_packets) },
+ { "rx_bytes", E1000_STAT(net_stats.rx_bytes) },
+ { "tx_bytes", E1000_STAT(net_stats.tx_bytes) },
+ { "rx_errors", E1000_STAT(net_stats.rx_errors) },
+ { "tx_errors", E1000_STAT(net_stats.tx_errors) },
+ { "rx_dropped", E1000_STAT(net_stats.rx_dropped) },
+ { "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+ { "multicast", E1000_STAT(net_stats.multicast) },
+ { "collisions", E1000_STAT(net_stats.collisions) },
+ { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) },
+ { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+ { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
+ { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+ { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) },
+ { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
+ { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
+ { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) },
+ { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
+ { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+ { "tx_window_errors", E1000_STAT(net_stats.tx_window_errors) },
+ { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+ { "tx_deferred_ok", E1000_STAT(stats.dc) },
+ { "tx_single_coll_ok", E1000_STAT(stats.scc) },
+ { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+ { "rx_long_length_errors", E1000_STAT(stats.roc) },
+ { "rx_short_length_errors", E1000_STAT(stats.ruc) },
+ { "rx_align_errors", E1000_STAT(stats.algnerrc) },
+ { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+ { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+ { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+ { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+ { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+ { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+ { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
+ { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+ { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }
+};
+#define E1000_STATS_LEN \
+ sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
+
+static int
+e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if(hw->media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+
+ ecmd->advertising = ADVERTISED_TP;
+
+ if(hw->autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+
+ /* the e1000 autoneg seems to match ethtool nicely */
+
+ ecmd->advertising |= hw->autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy_addr;
+
+ if(hw->mac_type == e1000_82543)
+ ecmd->transceiver = XCVR_EXTERNAL;
+ else
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+
+ if(hw->mac_type >= e1000_82545)
+ ecmd->transceiver = XCVR_INTERNAL;
+ else
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ if(netif_carrier_ok(adapter->netdev)) {
+
+ e1000_get_speed_and_duplex(hw, &adapter->link_speed,
+ &adapter->link_duplex);
+ ecmd->speed = adapter->link_speed;
+
+ /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
+ * and HALF_DUPLEX != DUPLEX_HALF */
+
+ if(adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+static int
+e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if(ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->autoneg = 1;
+ hw->autoneg_advertised = 0x002F;
+ ecmd->advertising = 0x002F;
+ } else
+ if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
+ return -EINVAL;
+
+ /* reset the link */
+
+ if(netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_reset(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+
+ return 0;
+}
+
+static void
+e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if(hw->fc == e1000_fc_rx_pause)
+ pause->rx_pause = 1;
+ else if(hw->fc == e1000_fc_tx_pause)
+ pause->tx_pause = 1;
+ else if(hw->fc == e1000_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int
+e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ if(pause->rx_pause && pause->tx_pause)
+ hw->fc = e1000_fc_full;
+ else if(pause->rx_pause && !pause->tx_pause)
+ hw->fc = e1000_fc_rx_pause;
+ else if(!pause->rx_pause && pause->tx_pause)
+ hw->fc = e1000_fc_tx_pause;
+ else if(!pause->rx_pause && !pause->tx_pause)
+ hw->fc = e1000_fc_none;
+
+ hw->original_fc = hw->fc;
+
+ if(adapter->fc_autoneg == AUTONEG_ENABLE) {
+ if(netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ }
+ else
+ return ((hw->media_type == e1000_media_type_fiber) ?
+ e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+
+ return 0;
+}
+
+static uint32_t
+e1000_get_rx_csum(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ return adapter->rx_csum;
+}
+
+static int
+e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ adapter->rx_csum = data;
+
+ if(netif_running(netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ return 0;
+}
+
+static uint32_t
+e1000_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int
+e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+
+ if(adapter->hw.mac_type < e1000_82543) {
+ if (!data)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int
+e1000_set_tso(struct net_device *netdev, uint32_t data)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ if ((adapter->hw.mac_type < e1000_82544) ||
+ (adapter->hw.mac_type == e1000_82547))
+ return data ? -EINVAL : 0;
+
+ if (data)
+ netdev->features |= NETIF_F_TSO;
+ else
+ netdev->features &= ~NETIF_F_TSO;
+ return 0;
+}
+#endif /* NETIF_F_TSO */
+
+static uint32_t
+e1000_get_msglevel(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ return adapter->msg_enable;
+}
+
+static void
+e1000_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ adapter->msg_enable = data;
+}
+
+static int
+e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+ return E1000_REGS_LEN * sizeof(uint32_t);
+}
+
+static void
+e1000_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ uint32_t *regs_buff = p;
+ uint16_t phy_data;
+
+ memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ regs_buff[0] = E1000_READ_REG(hw, CTRL);
+ regs_buff[1] = E1000_READ_REG(hw, STATUS);
+
+ regs_buff[2] = E1000_READ_REG(hw, RCTL);
+ regs_buff[3] = E1000_READ_REG(hw, RDLEN);
+ regs_buff[4] = E1000_READ_REG(hw, RDH);
+ regs_buff[5] = E1000_READ_REG(hw, RDT);
+ regs_buff[6] = E1000_READ_REG(hw, RDTR);
+
+ regs_buff[7] = E1000_READ_REG(hw, TCTL);
+ regs_buff[8] = E1000_READ_REG(hw, TDLEN);
+ regs_buff[9] = E1000_READ_REG(hw, TDH);
+ regs_buff[10] = E1000_READ_REG(hw, TDT);
+ regs_buff[11] = E1000_READ_REG(hw, TIDV);
+
+ regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
+ if(hw->phy_type == e1000_phy_igp) {
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_A);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[13] = (uint32_t)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_B);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[14] = (uint32_t)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_C);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[15] = (uint32_t)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_D);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[16] = (uint32_t)phy_data; /* cable length */
+ regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[18] = (uint32_t)phy_data; /* cable polarity */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_PCS_INIT_REG);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[19] = (uint32_t)phy_data; /* cable polarity */
+ regs_buff[20] = 0; /* polarity correction enabled (always) */
+ regs_buff[22] = 0; /* phy receive errors (unavailable) */
+ regs_buff[23] = regs_buff[18]; /* mdix mode */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ } else {
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ regs_buff[13] = (uint32_t)phy_data; /* cable length */
+ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
+ regs_buff[18] = regs_buff[13]; /* cable polarity */
+ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[20] = regs_buff[17]; /* polarity correction */
+ /* phy receive errors */
+ regs_buff[22] = adapter->phy_stats.receive_errors;
+ regs_buff[23] = regs_buff[13]; /* mdix mode */
+ }
+ regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ if(hw->mac_type >= e1000_82540 &&
+ hw->media_type == e1000_media_type_copper) {
+ regs_buff[26] = E1000_READ_REG(hw, MANC);
+ }
+}
+
+static int
+e1000_get_eeprom_len(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ return adapter->hw.eeprom.word_size * 2;
+}
+
+static int
+e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *bytes)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ uint16_t *eeprom_buff;
+ int first_word, last_word;
+ int ret_val = 0;
+ uint16_t i;
+
+ if(eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(uint16_t) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if(!eeprom_buff)
+ return -ENOMEM;
+
+ if(hw->eeprom.type == e1000_eeprom_spi)
+ ret_val = e1000_read_eeprom(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ else {
+ for (i = 0; i < last_word - first_word + 1; i++)
+ if((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+ &eeprom_buff[i])))
+ break;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int
+e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *bytes)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ uint16_t *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ uint16_t i;
+
+ if(eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if(!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if(eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = e1000_write_eeprom(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum over the first part of the EEPROM if needed */
+ if((ret_val == 0) && first_word <= EEPROM_CHECKSUM_REG)
+ e1000_update_eeprom_checksum(hw);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void
+e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+
+ strncpy(drvinfo->driver, e1000_driver_name, 32);
+ strncpy(drvinfo->version, e1000_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_stats = E1000_STATS_LEN;
+ drvinfo->testinfo_len = E1000_TEST_LEN;
+ drvinfo->regdump_len = e1000_get_regs_len(netdev);
+ drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void
+e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+ struct e1000_desc_ring *txdr = &adapter->tx_ring;
+ struct e1000_desc_ring *rxdr = &adapter->rx_ring;
+
+ ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD;
+ ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+ E1000_MAX_82544_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int
+e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+ struct e1000_desc_ring *txdr = &adapter->tx_ring;
+ struct e1000_desc_ring *rxdr = &adapter->rx_ring;
+ struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new;
+ int err;
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ if(netif_running(adapter->netdev))
+ e1000_down(adapter);
+
+ rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
+ rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
+ txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if(netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ if((err = e1000_setup_rx_resources(adapter)))
+ goto err_setup_rx;
+ if((err = e1000_setup_tx_resources(adapter)))
+ goto err_setup_tx;
+
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+
+ rx_new = adapter->rx_ring;
+ tx_new = adapter->tx_ring;
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ e1000_free_rx_resources(adapter);
+ e1000_free_tx_resources(adapter);
+ adapter->rx_ring = rx_new;
+ adapter->tx_ring = tx_new;
+ if((err = e1000_up(adapter)))
+ return err;
+ }
+
+ return 0;
+err_setup_tx:
+ e1000_free_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ e1000_up(adapter);
+ return err;
+}
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ uint32_t pat, value; \
+ uint32_t test[] = \
+ {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
+ E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
+ value = E1000_READ_REG(&adapter->hw, R); \
+ if(value != (test[pat] & W & M)) { \
+ *data = (adapter->hw.mac_type < e1000_82543) ? \
+ E1000_82542_##R : E1000_##R; \
+ return 1; \
+ } \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ uint32_t value; \
+ E1000_WRITE_REG(&adapter->hw, R, W & M); \
+ value = E1000_READ_REG(&adapter->hw, R); \
+ if ((W & M) != (value & M)) { \
+ *data = (adapter->hw.mac_type < e1000_82543) ? \
+ E1000_82542_##R : E1000_##R; \
+ return 1; \
+ } \
+}
+
+static int
+e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
+{
+ uint32_t value;
+ uint32_t i;
+
+ /* The status register is Read Only, so a write should fail.
+ * Some bits that get toggled are ignored.
+ */
+ value = (E1000_READ_REG(&adapter->hw, STATUS) & (0xFFFFF833));
+ E1000_WRITE_REG(&adapter->hw, STATUS, (0xFFFFFFFF));
+ if(value != (E1000_READ_REG(&adapter->hw, STATUS) & (0xFFFFF833))) {
+ *data = 1;
+ return 1;
+ }
+
+ REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
+ REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
+ REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+ REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+ REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
+ REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
+
+ if(adapter->hw.mac_type >= e1000_82543) {
+
+ REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
+
+ for(i = 0; i < E1000_RAR_ENTRIES; i++) {
+ REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
+ 0xFFFFFFFF);
+ REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
+ 0xFFFFFFFF);
+ }
+
+ } else {
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
+
+ }
+
+ for(i = 0; i < E1000_MC_TBL_SIZE; i++)
+ REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
+
+ *data = 0;
+ return 0;
+}
+
+static int
+e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
+{
+ uint16_t temp;
+ uint16_t checksum = 0;
+ uint16_t i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ break;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if((checksum != (uint16_t) EEPROM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t
+e1000_test_intr(int irq,
+ void *data,
+ struct pt_regs *regs)
+{
+ struct net_device *netdev = (struct net_device *) data;
+ struct e1000_adapter *adapter = netdev->priv;
+
+ adapter->test_icr |= E1000_READ_REG(&adapter->hw, ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int
+e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ uint32_t mask, i=0, shared_int = TRUE;
+ uint32_t irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+ if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+ shared_int = FALSE;
+ } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
+ netdev->name, netdev)){
+ *data = 1;
+ return -1;
+ }
+
+ /* Disable all the interrupts */
+ E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
+ msec_delay(10);
+
+ /* Test each interrupt */
+ for(; i < 10; i++) {
+
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if(!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ E1000_WRITE_REG(&adapter->hw, IMC, mask);
+ E1000_WRITE_REG(&adapter->hw, ICS, mask);
+ msec_delay(10);
+
+ if(adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /* Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ E1000_WRITE_REG(&adapter->hw, IMS, mask);
+ E1000_WRITE_REG(&adapter->hw, ICS, mask);
+ msec_delay(10);
+
+ if(!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if(!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ E1000_WRITE_REG(&adapter->hw, IMC,
+ (~mask & 0x00007FFF));
+ E1000_WRITE_REG(&adapter->hw, ICS,
+ (~mask & 0x00007FFF));
+ msec_delay(10);
+
+ if(adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
+ msec_delay(10);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+static void
+e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ if(txdr->desc && txdr->buffer_info) {
+ for(i = 0; i < txdr->count; i++) {
+ if(txdr->buffer_info[i].dma)
+ pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+ txdr->buffer_info[i].length,
+ PCI_DMA_TODEVICE);
+ if(txdr->buffer_info[i].skb)
+ dev_kfree_skb(txdr->buffer_info[i].skb);
+ }
+ }
+
+ if(rxdr->desc && rxdr->buffer_info) {
+ for(i = 0; i < rxdr->count; i++) {
+ if(rxdr->buffer_info[i].dma)
+ pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+ rxdr->buffer_info[i].length,
+ PCI_DMA_FROMDEVICE);
+ if(rxdr->buffer_info[i].skb)
+ dev_kfree_skb(rxdr->buffer_info[i].skb);
+ }
+ }
+
+ if(txdr->desc)
+ pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+ if(rxdr->desc)
+ pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+
+ if(txdr->buffer_info)
+ kfree(txdr->buffer_info);
+ if(rxdr->buffer_info)
+ kfree(rxdr->buffer_info);
+
+ return;
+}
+
+static int
+e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ uint32_t rctl;
+ int size, i, ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ txdr->count = 80;
+
+ size = txdr->count * sizeof(struct e1000_buffer);
+ if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+ memset(txdr->buffer_info, 0, size);
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ E1000_ROUNDUP(txdr->size, 4096);
+ if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ memset(txdr->desc, 0, txdr->size);
+ txdr->next_to_use = txdr->next_to_clean = 0;
+
+ E1000_WRITE_REG(&adapter->hw, TDBAL,
+ ((uint64_t) txdr->dma & 0x00000000FFFFFFFF));
+ E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32));
+ E1000_WRITE_REG(&adapter->hw, TDLEN,
+ txdr->count * sizeof(struct e1000_tx_desc));
+ E1000_WRITE_REG(&adapter->hw, TDH, 0);
+ E1000_WRITE_REG(&adapter->hw, TDT, 0);
+ E1000_WRITE_REG(&adapter->hw, TCTL,
+ E1000_TCTL_PSP | E1000_TCTL_EN |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+ for(i = 0; i < txdr->count; i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
+ struct sk_buff *skb;
+ unsigned int size = 1024;
+
+ if(!(skb = alloc_skb(size, GFP_KERNEL))) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, size);
+ txdr->buffer_info[i].skb = skb;
+ txdr->buffer_info[i].length = skb->len;
+ txdr->buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
+ tx_desc->lower.data = cpu_to_le32(skb->len);
+ tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RPS);
+ tx_desc->upper.data = 0;
+ }
+
+ /* Setup Rx descriptor ring and Rx buffers */
+
+ rxdr->count = 80;
+
+ size = rxdr->count * sizeof(struct e1000_buffer);
+ if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+ memset(rxdr->buffer_info, 0, size);
+
+ rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
+ if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+ rxdr->next_to_use = rxdr->next_to_clean = 0;
+
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
+ E1000_WRITE_REG(&adapter->hw, RDBAL,
+ ((uint64_t) rxdr->dma & 0xFFFFFFFF));
+ E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32));
+ E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
+ E1000_WRITE_REG(&adapter->hw, RDH, 0);
+ E1000_WRITE_REG(&adapter->hw, RDT, 0);
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+
+ for(i = 0; i < rxdr->count; i++) {
+ struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
+ struct sk_buff *skb;
+
+ if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
+ GFP_KERNEL))) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rxdr->buffer_info[i].skb = skb;
+ rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
+ rxdr->buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ e1000_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void
+e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
+ e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
+ e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
+ e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
+}
+
+static void
+e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+ uint16_t phy_reg;
+
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This
+ * value defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+ e1000_write_phy_reg(&adapter->hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+ /* In addition, because of the s/w reset above, we need to enable
+ * CRS on TX. This must be set for both full and half duplex
+ * operation.
+ */
+ e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ e1000_write_phy_reg(&adapter->hw,
+ M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int
+e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ uint32_t ctrl_reg;
+ uint16_t phy_reg;
+
+ /* Setup the Device Control Register for PHY loopback test. */
+
+ ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
+ E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
+
+ /* Read the PHY Specific Control Register (0x10) */
+ e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+ /* Clear Auto-Crossover bits in PHY Specific Control Register
+ * (bits 6:5).
+ */
+ phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+ e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+ /* Perform software reset on the PHY */
+ e1000_phy_reset(&adapter->hw);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8100);
+
+ /* Wait for reset to complete. */
+ udelay(500);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_phy_disable_receiver(adapter);
+
+ /* Set the loopback bit in the PHY control register. */
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
+
+ /* Setup TX_CLK and TX_CRS one more time. */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Check Phy Configuration */
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
+ if(phy_reg != 0x4100)
+ return 9;
+
+ e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ if(phy_reg != 0x0070)
+ return 10;
+
+ e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
+ if(phy_reg != 0x001A)
+ return 11;
+
+ return 0;
+}
+
+static int
+e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ uint32_t ctrl_reg = 0;
+ uint32_t stat_reg = 0;
+
+ adapter->hw.autoneg = FALSE;
+
+ if(adapter->hw.phy_type == e1000_phy_m88) {
+ /* Auto-MDI/MDIX Off */
+ e1000_write_phy_reg(&adapter->hw,
+ M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
+ /* autoneg off */
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
+ }
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if(adapter->hw.media_type == e1000_media_type_copper &&
+ adapter->hw.phy_type == e1000_phy_m88) {
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+ } else {
+ /* Set the ILOS bit on the fiber Nic is half
+ * duplex link is detected. */
+ stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
+ if((stat_reg & E1000_STATUS_FD) == 0)
+ ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+ }
+
+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if(adapter->hw.phy_type == e1000_phy_m88)
+ e1000_phy_disable_receiver(adapter);
+
+ udelay(500);
+
+ return 0;
+}
+
+static int
+e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+ uint16_t phy_reg = 0;
+ uint16_t count = 0;
+
+ switch (adapter->hw.mac_type) {
+ case e1000_82543:
+ if(adapter->hw.media_type == e1000_media_type_copper) {
+ /* Attempt to setup Loopback mode on Non-integrated PHY.
+ * Some PHY registers get corrupted at random, so
+ * attempt this 10 times.
+ */
+ while(e1000_nonintegrated_phy_loopback(adapter) &&
+ count++ < 10);
+ if(count < 11)
+ return 0;
+ }
+ break;
+
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ return e1000_integrated_phy_loopback(adapter);
+ break;
+
+ default:
+ /* Default PHY loopback work is to read the MII
+ * control register and assert bit 14 (loopback mode).
+ */
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
+ return 0;
+ break;
+ }
+
+ return 8;
+}
+
+static int
+e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+ uint32_t rctl;
+
+ if(adapter->hw.media_type == e1000_media_type_fiber ||
+ adapter->hw.media_type == e1000_media_type_internal_serdes) {
+ if(adapter->hw.mac_type == e1000_82545 ||
+ adapter->hw.mac_type == e1000_82546 ||
+ adapter->hw.mac_type == e1000_82545_rev_3 ||
+ adapter->hw.mac_type == e1000_82546_rev_3)
+ return e1000_set_phy_loopback(adapter);
+ else {
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl |= E1000_RCTL_LBM_TCVR;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ return 0;
+ }
+ } else if(adapter->hw.media_type == e1000_media_type_copper)
+ return e1000_set_phy_loopback(adapter);
+
+ return 7;
+}
+
+static void
+e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+ uint32_t rctl;
+ uint16_t phy_reg;
+
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+
+ if(adapter->hw.media_type == e1000_media_type_copper ||
+ ((adapter->hw.media_type == e1000_media_type_fiber ||
+ adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+ (adapter->hw.mac_type == e1000_82545 ||
+ adapter->hw.mac_type == e1000_82546 ||
+ adapter->hw.mac_type == e1000_82545_rev_3 ||
+ adapter->hw.mac_type == e1000_82546_rev_3))) {
+ adapter->hw.autoneg = TRUE;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
+ if(phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
+ e1000_phy_reset(&adapter->hw);
+ }
+ }
+}
+
+static void
+e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int
+e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+{
+ frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
+ if(*(skb->data + 3) == 0xFF) {
+ if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static int
+e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, ret_val;
+
+ E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
+
+ for(i = 0; i < 64; i++) {
+ e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024);
+ pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma,
+ txdr->buffer_info[i].length,
+ PCI_DMA_TODEVICE);
+ }
+ E1000_WRITE_REG(&adapter->hw, TDT, i);
+
+ msec_delay(200);
+
+ i = 0;
+ do {
+ pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma,
+ rxdr->buffer_info[i].length,
+ PCI_DMA_FROMDEVICE);
+
+ ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb,
+ 1024);
+ i++;
+ } while (ret_val != 0 && i < 64);
+
+ return ret_val;
+}
+
+static int
+e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
+{
+ if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback;
+ if((*data = e1000_setup_loopback_test(adapter))) goto err_loopback;
+ *data = e1000_run_loopback_test(adapter);
+ e1000_loopback_cleanup(adapter);
+ e1000_free_desc_rings(adapter);
+err_loopback:
+ return *data;
+}
+
+static int
+e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
+{
+ *data = 0;
+
+ if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ adapter->hw.serdes_link_down = TRUE;
+
+ /* on some blade server designs link establishment */
+ /* could take as long as 2-3 minutes. */
+ do {
+ e1000_check_for_link(&adapter->hw);
+ if (adapter->hw.serdes_link_down == FALSE)
+ return *data;
+ msec_delay(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ e1000_check_for_link(&adapter->hw);
+
+ if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
+ *data = 1;
+ }
+ }
+ return *data;
+}
+
+static int
+e1000_diag_test_count(struct net_device *netdev)
+{
+ return E1000_TEST_LEN;
+}
+
+static void
+e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, uint64_t *data)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ boolean_t if_running = netif_running(netdev);
+
+ if(eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ uint16_t autoneg_advertised = adapter->hw.autoneg_advertised;
+ uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
+ uint8_t autoneg = adapter->hw.autoneg;
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if(e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if(if_running)
+ e1000_down(adapter);
+ else
+ e1000_reset(adapter);
+
+ if(e1000_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if(e1000_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if(e1000_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if(e1000_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ adapter->hw.autoneg_advertised = autoneg_advertised;
+ adapter->hw.forced_speed_duplex = forced_speed_duplex;
+ adapter->hw.autoneg = autoneg;
+
+ e1000_reset(adapter);
+ if(if_running)
+ e1000_up(adapter);
+ } else {
+ /* Online tests */
+ if(e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Offline tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ }
+}
+
+static void
+e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch(adapter->hw.device_id) {
+ case E1000_DEV_ID_82542:
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82545EM_COPPER:
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ /* Wake events only supported on port A for dual fiber */
+ if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+ }
+ /* Fall Through */
+
+ default:
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC;
+
+ wol->wolopts = 0;
+ if(adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if(adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if(adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if(adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ return;
+ }
+}
+
+static int
+e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch(adapter->hw.device_id) {
+ case E1000_DEV_ID_82542:
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82545EM_COPPER:
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ /* Wake events only supported on port A for dual fiber */
+ if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+ /* Fall Through */
+
+ default:
+ if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ adapter->wol = 0;
+
+ if(wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if(wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if(wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if(wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+ }
+
+ return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define E1000_ID_INTERVAL (HZ/4)
+
+/* bit defines for adapter->led_status */
+#define E1000_LED_ON 0
+
+static void
+e1000_led_blink_callback(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ if(test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+ e1000_led_off(&adapter->hw);
+ else
+ e1000_led_on(&adapter->hw);
+
+ mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
+}
+
+static int
+e1000_phys_id(struct net_device *netdev, uint32_t data)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+
+ if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
+ data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
+
+ if(!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long) adapter;
+ }
+
+ e1000_setup_led(&adapter->hw);
+ mod_timer(&adapter->blink_timer, jiffies);
+
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
+ e1000_led_off(&adapter->hw);
+ clear_bit(E1000_LED_ON, &adapter->led_status);
+ e1000_cleanup_led(&adapter->hw);
+
+ return 0;
+}
+
+static int
+e1000_nway_reset(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ if(netif_running(netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ }
+ return 0;
+}
+
+static int
+e1000_get_stats_count(struct net_device *netdev)
+{
+ return E1000_STATS_LEN;
+}
+
+static void
+e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ int i;
+
+ e1000_update_stats(adapter);
+ for(i = 0; i < E1000_STATS_LEN; i++) {
+ char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
+ }
+}
+
+static void
+e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+{
+ int i;
+
+ switch(stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *e1000_gstrings_test,
+ E1000_TEST_LEN*ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i=0; i < E1000_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+struct ethtool_ops e1000_ethtool_ops = {
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .get_rx_csum = e1000_get_rx_csum,
+ .set_rx_csum = e1000_set_rx_csum,
+ .get_tx_csum = e1000_get_tx_csum,
+ .set_tx_csum = e1000_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = e1000_set_tso,
+#endif
+ .self_test_count = e1000_diag_test_count,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .phys_id = e1000_phys_id,
+ .get_stats_count = e1000_get_stats_count,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
new file mode 100644
index 000000000000..786a9b935659
--- /dev/null
+++ b/drivers/net/e1000/e1000_hw.c
@@ -0,0 +1,5405 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+#include "e1000_hw.h"
+
+static int32_t e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static int32_t e1000_setup_copper_link(struct e1000_hw *hw);
+static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data,
+ uint16_t count);
+static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw);
+static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset,
+ uint16_t words, uint16_t *data);
+static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw,
+ uint16_t offset, uint16_t words,
+ uint16_t *data);
+static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data,
+ uint16_t count);
+static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
+ uint16_t phy_data);
+static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr,
+ uint16_t *phy_data);
+static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count);
+static int32_t e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static int32_t e1000_id_led_init(struct e1000_hw * hw);
+static int32_t e1000_set_vco_speed(struct e1000_hw *hw);
+static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static int32_t e1000_set_phy_mode(struct e1000_hw *hw);
+
+/* IGP cable length table */
+static const
+uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+ 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+ 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+ 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+ 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+
+
+/******************************************************************************
+ * Set the phy type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_set_phy_type(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_set_phy_type");
+
+ switch(hw->phy_id) {
+ case M88E1000_E_PHY_ID:
+ case M88E1000_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ hw->phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID:
+ if(hw->mac_type == e1000_82541 ||
+ hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ hw->phy_type = e1000_phy_igp;
+ break;
+ }
+ /* Fall Through */
+ default:
+ /* Should never have loaded on this device */
+ hw->phy_type = e1000_phy_undefined;
+ return -E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * IGP phy init script - initializes the GbE PHY
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_phy_init_script(struct e1000_hw *hw)
+{
+ uint32_t ret_val;
+ uint16_t phy_saved_data;
+
+ DEBUGFUNC("e1000_phy_init_script");
+
+
+ if(hw->phy_init_script) {
+ msec_delay(20);
+
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ msec_delay(20);
+
+ e1000_write_phy_reg(hw,0x0000,0x0140);
+
+ msec_delay(5);
+
+ switch(hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+ e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+ e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+ e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+ e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+ e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+ e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+ e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+ e1000_write_phy_reg(hw, 0x2010, 0x0008);
+ break;
+
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
+
+ e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+ msec_delay(20);
+
+ /* Now enable the transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if(hw->mac_type == e1000_82547) {
+ uint16_t fused, fine, coarse;
+
+ /* Move to analog registers page */
+ e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
+
+ if(!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if(coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if(coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
+ e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+ }
+}
+
+/******************************************************************************
+ * Set the mac type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_set_mac_type(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ switch (hw->revision_id) {
+ case E1000_82542_2_0_REV_ID:
+ hw->mac_type = e1000_82542_rev2_0;
+ break;
+ case E1000_82542_2_1_REV_ID:
+ hw->mac_type = e1000_82542_rev2_1;
+ break;
+ default:
+ /* Invalid 82542 revision ID */
+ return -E1000_ERR_MAC_TYPE;
+ }
+ break;
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ hw->mac_type = e1000_82543;
+ break;
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ hw->mac_type = e1000_82544;
+ break;
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ hw->mac_type = e1000_82540;
+ break;
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ hw->mac_type = e1000_82545;
+ break;
+ case E1000_DEV_ID_82545GM_COPPER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82545GM_SERDES:
+ hw->mac_type = e1000_82545_rev_3;
+ break;
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ hw->mac_type = e1000_82546;
+ break;
+ case E1000_DEV_ID_82546GB_COPPER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82546GB_PCIE:
+ hw->mac_type = e1000_82546_rev_3;
+ break;
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ hw->mac_type = e1000_82541;
+ break;
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ hw->mac_type = e1000_82541_rev_2;
+ break;
+ case E1000_DEV_ID_82547EI:
+ hw->mac_type = e1000_82547;
+ break;
+ case E1000_DEV_ID_82547GI:
+ hw->mac_type = e1000_82547_rev_2;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ return -E1000_ERR_MAC_TYPE;
+ }
+
+ switch(hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->asf_firmware_present = TRUE;
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set media type and TBI compatibility.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * **************************************************************************/
+void
+e1000_set_media_type(struct e1000_hw *hw)
+{
+ uint32_t status;
+
+ DEBUGFUNC("e1000_set_media_type");
+
+ if(hw->mac_type != e1000_82543) {
+ /* tbi_compatibility is only valid on 82543 */
+ hw->tbi_compatibility_en = FALSE;
+ }
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ hw->media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ if(hw->mac_type >= e1000_82543) {
+ status = E1000_READ_REG(hw, STATUS);
+ if(status & E1000_STATUS_TBIMODE) {
+ hw->media_type = e1000_media_type_fiber;
+ /* tbi_compatibility not valid on fiber */
+ hw->tbi_compatibility_en = FALSE;
+ } else {
+ hw->media_type = e1000_media_type_copper;
+ }
+ } else {
+ /* This is an 82542 (fiber only) */
+ hw->media_type = e1000_media_type_fiber;
+ }
+ }
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_reset_hw(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+ uint32_t ctrl_ext;
+ uint32_t icr;
+ uint32_t manc;
+ uint32_t led_ctrl;
+
+ DEBUGFUNC("e1000_reset_hw");
+
+ /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+ if(hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ E1000_WRITE_REG(hw, RCTL, 0);
+ E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+ hw->tbi_compatibility_on = FALSE;
+
+ /* Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, CTRL);
+
+ /* Must reset the PHY before resetting the MAC */
+ if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ msec_delay(5);
+ }
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ DEBUGOUT("Issuing a global reset to MAC\n");
+
+ switch(hw->mac_type) {
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /* These controllers can't ack the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround to issue the reset */
+ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ /* Reset is performed on a shadow of the control register */
+ E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST));
+ break;
+ default:
+ E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ }
+
+ /* After MAC reset, force reload of EEPROM to restore power-on settings to
+ * device. Later controllers reload the EEPROM automatically, so just wait
+ * for reload to complete.
+ */
+ switch(hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* Wait for reset to complete */
+ udelay(10);
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ /* Wait for EEPROM reload */
+ msec_delay(2);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* Wait for EEPROM reload */
+ msec_delay(20);
+ break;
+ default:
+ /* Wait for EEPROM reload (it happens automatically) */
+ msec_delay(5);
+ break;
+ }
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ if(hw->mac_type >= e1000_82540) {
+ manc = E1000_READ_REG(hw, MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ E1000_WRITE_REG(hw, MANC, manc);
+ }
+
+ if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ e1000_phy_init_script(hw);
+
+ /* Configure activity LED after PHY reset */
+ led_ctrl = E1000_READ_REG(hw, LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr = E1000_READ_REG(hw, ICR);
+
+ /* If MWI was previously enabled, reenable it. */
+ if(hw->mac_type == e1000_82542_rev2_0) {
+ if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ *****************************************************************************/
+int32_t
+e1000_init_hw(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+ uint32_t i;
+ int32_t ret_val;
+ uint16_t pcix_cmd_word;
+ uint16_t pcix_stat_hi_word;
+ uint16_t cmd_mmrbc;
+ uint16_t stat_mmrbc;
+ DEBUGFUNC("e1000_init_hw");
+
+ /* Initialize Identification LED */
+ ret_val = e1000_id_led_init(hw);
+ if(ret_val) {
+ DEBUGOUT("Error Initializing Identification LED\n");
+ return ret_val;
+ }
+
+ /* Set the media type and TBI compatibility */
+ e1000_set_media_type(hw);
+
+ /* Disabling VLAN filtering. */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ E1000_WRITE_REG(hw, VET, 0);
+
+ e1000_clear_vfta(hw);
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if(hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(5);
+ }
+
+ /* Setup the receive address. This involves initializing all of the Receive
+ * Address Registers (RARs 0 - 15).
+ */
+ e1000_init_rx_addrs(hw);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if(hw->mac_type == e1000_82542_rev2_0) {
+ E1000_WRITE_REG(hw, RCTL, 0);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+ if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for(i = 0; i < E1000_MC_TBL_SIZE; i++)
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+
+ /* Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives.
+ */
+ if(hw->dma_fairness) {
+ ctrl = E1000_READ_REG(hw, CTRL);
+ E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ switch(hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ if(hw->bus_type == e1000_bus_type_pcix) {
+ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word);
+ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI,
+ &pcix_stat_hi_word);
+ cmd_mmrbc = (pcix_cmd_word & PCIX_COMMAND_MMRBC_MASK) >>
+ PCIX_COMMAND_MMRBC_SHIFT;
+ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+ PCIX_STATUS_HI_MMRBC_SHIFT;
+ if(stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+ if(cmd_mmrbc > stat_mmrbc) {
+ pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK;
+ pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER,
+ &pcix_cmd_word);
+ }
+ }
+ break;
+ }
+
+ /* Call a subroutine to configure the link and setup flow control. */
+ ret_val = e1000_setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ if(hw->mac_type > e1000_82544) {
+ ctrl = E1000_READ_REG(hw, TXDCTL);
+ ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+ E1000_WRITE_REG(hw, TXDCTL, ctrl);
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs(hw);
+
+ return ret_val;
+}
+
+/******************************************************************************
+ * Adjust SERDES output amplitude based on EEPROM setting.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *****************************************************************************/
+static int32_t
+e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+ uint16_t eeprom_data;
+ int32_t ret_val;
+
+ DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+ if(hw->media_type != e1000_media_type_internal_serdes)
+ return E1000_SUCCESS;
+
+ switch(hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if(eeprom_data != EEPROM_RESERVED_WORD) {
+ /* Adjust SERDES output amplitude only. */
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+ if(ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control and link settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the apropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ *****************************************************************************/
+int32_t
+e1000_setup_link(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t ret_val;
+ uint16_t eeprom_data;
+
+ DEBUGFUNC("e1000_setup_link");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ if(hw->fc == e1000_fc_default) {
+ if((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc = e1000_fc_none;
+ else if((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+ EEPROM_WORD0F_ASM_DIR)
+ hw->fc = e1000_fc_tx_pause;
+ else
+ hw->fc = e1000_fc_full;
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ if(hw->mac_type == e1000_82542_rev2_0)
+ hw->fc &= (~e1000_fc_tx_pause);
+
+ if((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+ hw->fc &= (~e1000_fc_rx_pause);
+
+ hw->original_fc = hw->fc;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+ /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before e1000_setup_pcs_link()
+ * or e1000_phy_setup() is called.
+ */
+ if(hw->mac_type == e1000_82543) {
+ ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+ SWDPIO__EXT_SHIFT);
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ }
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = (hw->media_type == e1000_media_type_copper) ?
+ e1000_setup_copper_link(hw) :
+ e1000_setup_fiber_serdes_link(hw);
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+
+ E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
+ E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if(!(hw->fc & e1000_fc_tx_pause)) {
+ E1000_WRITE_REG(hw, FCRTL, 0);
+ E1000_WRITE_REG(hw, FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water marks
+ * as well as (optionally) enabling the transmission of XON frames.
+ */
+ if(hw->fc_send_xon) {
+ E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+ E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+ } else {
+ E1000_WRITE_REG(hw, FCRTL, hw->fc_low_water);
+ E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+ }
+ }
+ return ret_val;
+}
+
+/******************************************************************************
+ * Sets up link for a fiber based or serdes based adapter
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ *****************************************************************************/
+static int32_t
+e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t txcw = 0;
+ uint32_t i;
+ uint32_t signal = 0;
+ int32_t ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+ /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ * If we're on serdes media, adjust the output amplitude to value set in
+ * the EEPROM.
+ */
+ ctrl = E1000_READ_REG(hw, CTRL);
+ if(hw->media_type == e1000_media_type_fiber)
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+ ret_val = e1000_adjust_serdes_amplitude(hw);
+ if(ret_val)
+ return ret_val;
+
+ /* Take the link out of reset */
+ ctrl &= ~(E1000_CTRL_LRST);
+
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed(hw);
+ if(ret_val)
+ return ret_val;
+
+ e1000_config_collision_dist(hw);
+
+ /* Check for a software override of the flow control settings, and setup
+ * the device accordingly. If auto-negotiation is enabled, then software
+ * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+ * Config Word Register (TXCW) and re-start auto-negotiation. However, if
+ * auto-negotiation is disabled, then software will have to manually
+ * configure the two flow control enable bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ */
+ switch (hw->fc) {
+ case e1000_fc_none:
+ /* Flow control is completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case e1000_fc_rx_pause:
+ /* RX Flow control is enabled and TX Flow control is disabled by a
+ * software over-ride. Since there really isn't a way to advertise
+ * that we are capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case e1000_fc_tx_pause:
+ /* TX Flow control is enabled, and RX Flow control is disabled, by a
+ * software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case e1000_fc_full:
+ /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the link
+ * will be in reset, because we previously reset the chip). This will
+ * restart auto-negotiation. If auto-neogtiation is successful then the
+ * link-up status bit will be set and the flow control enable bits (RFCE
+ * and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ E1000_WRITE_REG(hw, TXCW, txcw);
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ hw->txcw = txcw;
+ msec_delay(1);
+
+ /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+ * indication in the Device Status Register. Time-out if a link isn't
+ * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+ * less than 500 milliseconds even if the other end is doing it in SW).
+ * For internal serdes, we just assume a signal is present, then poll.
+ */
+ if(hw->media_type == e1000_media_type_internal_serdes ||
+ (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+ DEBUGOUT("Looking for Link\n");
+ for(i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+ msec_delay(10);
+ status = E1000_READ_REG(hw, STATUS);
+ if(status & E1000_STATUS_LU) break;
+ }
+ if(i == (LINK_UP_TIMEOUT / 10)) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ hw->autoneg_failed = 1;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * e1000_check_for_link. This routine will force the link up if
+ * we detect a signal. This will allow us to communicate with
+ * non-autonegotiating link partners.
+ */
+ ret_val = e1000_check_for_link(hw);
+ if(ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ hw->autoneg_failed = 0;
+ } else {
+ hw->autoneg_failed = 0;
+ DEBUGOUT("Valid Link Found\n");
+ }
+ } else {
+ DEBUGOUT("No Signal Detected\n");
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Detects which PHY is present and the speed and duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_setup_copper_link(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+ uint32_t led_ctrl;
+ int32_t ret_val;
+ uint16_t i;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_setup_copper_link");
+
+ ctrl = E1000_READ_REG(hw, CTRL);
+ /* With 82543, we need to force speed and duplex on the MAC equal to what
+ * the PHY speed and duplex configuration is. In addition, we need to
+ * perform a hardware reset on the PHY to take it out of reset.
+ */
+ if(hw->mac_type > e1000_82543) {
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ } else {
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ e1000_phy_hw_reset(hw);
+ }
+
+ /* Make sure we have a valid PHY */
+ ret_val = e1000_detect_gig_phy(hw);
+ if(ret_val) {
+ DEBUGOUT("Error, did not detect valid phy.\n");
+ return ret_val;
+ }
+ DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+ /* Set PHY to class A mode (if necessary) */
+ ret_val = e1000_set_phy_mode(hw);
+ if(ret_val)
+ return ret_val;
+
+ if((hw->mac_type == e1000_82545_rev_3) ||
+ (hw->mac_type == e1000_82546_rev_3)) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ phy_data |= 0x00000008;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ }
+
+ if(hw->mac_type <= e1000_82543 ||
+ hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+ hw->phy_reset_disable = FALSE;
+
+ if(!hw->phy_reset_disable) {
+ if (hw->phy_type == e1000_phy_igp) {
+
+ ret_val = e1000_phy_reset(hw);
+ if(ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Wait 10ms for MAC to configure PHY from eeprom settings */
+ msec_delay(15);
+
+ /* Configure activity LED after PHY reset */
+ led_ctrl = E1000_READ_REG(hw, LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+
+ /* disable lplu d3 during driver init */
+ ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+ if(ret_val) {
+ DEBUGOUT("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+
+ /* Configure mdi-mdix settings */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ hw->dsp_config_state = e1000_dsp_config_disabled;
+ /* Force MDI for earlier revs of the IGP PHY */
+ phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX |
+ IGP01E1000_PSCR_FORCE_MDI_MDIX);
+ hw->mdix = 1;
+
+ } else {
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ }
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL,
+ phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if(hw->autoneg) {
+ e1000_ms_type phy_ms_setting = hw->master_slave;
+
+ if(hw->ffe_config_state == e1000_ffe_config_active)
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+
+ if(hw->dsp_config_state == e1000_dsp_config_activated)
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+
+ /* when autonegotiation advertisment is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default. */
+ if(hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if(ret_val)
+ return ret_val;
+ /* Set auto Master/Slave resolution process */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) :
+ e1000_ms_auto;
+
+ switch (phy_ms_setting) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+ }
+ } else {
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if(hw->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if (hw->phy_revision < M88E1011_I_REV_4) {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if(ret_val)
+ return ret_val;
+ }
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if(ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+ }
+
+ /* Options:
+ * autoneg = 1 (default)
+ * PHY will advertise value(s) parsed from
+ * autoneg_advertised and fc
+ * autoneg = 0
+ * PHY will be set to 10H, 10F, 100H, or 100F
+ * depending on value parsed from forced_speed_duplex.
+ */
+
+ /* Is autoneg enabled? This is enabled by default or by software
+ * override. If so, call e1000_phy_setup_autoneg routine to parse the
+ * autoneg_advertised and fc options. If autoneg is NOT enabled, then
+ * the user should have provided a speed/duplex override. If so, then
+ * call e1000_phy_force_speed_duplex to parse and set this up.
+ */
+ if(hw->autoneg) {
+ /* Perform some bounds checking on the hw->autoneg_advertised
+ * parameter. If this variable is zero, then set it to the default.
+ */
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if(hw->autoneg_advertised == 0)
+ hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if(ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if(hw->wait_autoneg_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if(ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+ hw->get_link_status = TRUE;
+ } else {
+ DEBUGOUT("Forcing speed and duplex\n");
+ ret_val = e1000_phy_force_speed_duplex(hw);
+ if(ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+ } /* !hw->phy_reset_disable */
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ for(i = 0; i < 10; i++) {
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if(phy_data & MII_SR_LINK_STATUS) {
+ /* We have link, so we need to finish the config process:
+ * 1) Set up the MAC to the current PHY speed/duplex
+ * if we are on 82543. If we
+ * are on newer silicon, we only need to configure
+ * collision distance in the Transmit Control Register.
+ * 2) Set up flow control on the MAC to that established with
+ * the link partner.
+ */
+ if(hw->mac_type >= e1000_82544) {
+ e1000_config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if(ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if(ret_val) {
+ DEBUGOUT("Error Configuring Flow Control\n");
+ return ret_val;
+ }
+ DEBUGOUT("Valid link established!!!\n");
+
+ if(hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
+ if(ret_val) {
+ DEBUGOUT("Error Configuring DSP after link up\n");
+ return ret_val;
+ }
+ }
+ DEBUGOUT("Valid link established!!!\n");
+ return E1000_SUCCESS;
+ }
+ udelay(10);
+ }
+
+ DEBUGOUT("Unable to establish link!!!\n");
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Configures PHY autoneg and flow control advertisement settings
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t mii_autoneg_adv_reg;
+ uint16_t mii_1000t_ctrl_reg;
+
+ DEBUGFUNC("e1000_phy_setup_autoneg");
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if(ret_val)
+ return ret_val;
+
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if(ret_val)
+ return ret_val;
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+ DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if(hw->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if(hw->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if(hw->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if(hw->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if(hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+ DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
+ }
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if(hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc) {
+ case e1000_fc_none: /* 0 */
+ /* Flow control (RX & TX) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_rx_pause: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled, by a software over-ride.
+ */
+ /* Since there really isn't a way to advertise that we are
+ * capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ *hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_tx_pause: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case e1000_fc_full: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if(ret_val)
+ return ret_val;
+
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ if(ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Force PHY speed and duplex settings to hw->forced_speed_duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+ int32_t ret_val;
+ uint16_t mii_ctrl_reg;
+ uint16_t mii_status_reg;
+ uint16_t phy_data;
+ uint16_t i;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+ /* Turn off Flow control if we are forcing speed and duplex. */
+ hw->fc = e1000_fc_none;
+
+ DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+ /* Read the Device Control Register. */
+ ctrl = E1000_READ_REG(hw, CTRL);
+
+ /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(DEVICE_SPEED_MASK);
+
+ /* Clear the Auto Speed Detect Enable bit. */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Read the MII Control Register. */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+ if(ret_val)
+ return ret_val;
+
+ /* We need to disable autoneg in order to force link and duplex. */
+
+ mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Are we forcing Full or Half Duplex? */
+ if(hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_10_full) {
+ /* We want to force full duplex so we SET the full duplex bits in the
+ * Device and MII Control Registers.
+ */
+ ctrl |= E1000_CTRL_FD;
+ mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ /* We want to force half duplex so we CLEAR the full duplex bits in
+ * the Device and MII Control Registers.
+ */
+ ctrl &= ~E1000_CTRL_FD;
+ mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ /* Are we forcing 100Mbps??? */
+ if(hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_100_half) {
+ /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+ ctrl |= E1000_CTRL_SPD_100;
+ mii_ctrl_reg |= MII_CR_SPEED_100;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ DEBUGOUT("Forcing 100mb ");
+ } else {
+ /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ mii_ctrl_reg |= MII_CR_SPEED_10;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb ");
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Write the configured values back to the Device Control Reg. */
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+
+ if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed are duplex are forced.
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+ /* Need to reset the PHY or these changes will be ignored */
+ mii_ctrl_reg |= MII_CR_RESET;
+ } else {
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed or duplex are forced.
+ */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+ }
+
+ /* Write back the modified PHY MII control register. */
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+ if(ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ /* The wait_autoneg_complete flag may be a little misleading here.
+ * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+ * But we do want to delay for a period while forcing only so we
+ * don't generate false No Link messages. So we will wait here
+ * only if the user has set wait_autoneg_complete to 1, which is
+ * the default.
+ */
+ if(hw->wait_autoneg_complete) {
+ /* We will wait for autoneg to complete. */
+ DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ mii_status_reg = 0;
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for(i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ if(mii_status_reg & MII_SR_LINK_STATUS) break;
+ msec_delay(100);
+ }
+ if((i == 0) &&
+ (hw->phy_type == e1000_phy_m88)) {
+ /* We didn't get link. Reset the DSP and wait again for link. */
+ ret_val = e1000_phy_reset_dsp(hw);
+ if(ret_val) {
+ DEBUGOUT("Error Resetting PHY DSP\n");
+ return ret_val;
+ }
+ }
+ /* This loop will early-out if the link condition has been met. */
+ for(i = PHY_FORCE_TIME; i > 0; i--) {
+ if(mii_status_reg & MII_SR_LINK_STATUS) break;
+ msec_delay(100);
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+ }
+ }
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This value
+ * defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* In addition, because of the s/w reset above, we need to enable CRS on
+ * TX. This must be set for both full and half duplex operation.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) &&
+ (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ if(ret_val)
+ return ret_val;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Sets the collision distance in the Transmit Control register
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Link should have been established previously. Reads the speed and duplex
+* information from the Device Status register.
+******************************************************************************/
+void
+e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ uint32_t tctl;
+
+ DEBUGFUNC("e1000_config_collision_dist");
+
+ tctl = E1000_READ_REG(hw, TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, TCTL, tctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/******************************************************************************
+* Sets MAC speed and duplex settings to reflect the those in the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* mii_reg - data to write to the MII control register
+*
+* The contents of the PHY register containing the needed information need to
+* be passed in.
+******************************************************************************/
+static int32_t
+e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_config_mac_to_phy");
+
+ /* Read the Device Control Register and set the bits to Force Speed
+ * and Duplex.
+ */
+ ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if(phy_data & IGP01E1000_PSSR_FULL_DUPLEX) ctrl |= E1000_CTRL_FD;
+ else ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_100MBPS)
+ ctrl |= E1000_CTRL_SPD_100;
+ } else {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if(phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD;
+ else ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+ }
+ /* Write the configured values back to the Device Control Reg. */
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Forces the MAC's flow control settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ *****************************************************************************/
+int32_t
+e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+
+ DEBUGFUNC("e1000_force_mac_fc");
+
+ /* Get the current configuration of the Device Control Register */
+ ctrl = E1000_READ_REG(hw, CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+
+ switch (hw->fc) {
+ case e1000_fc_none:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case e1000_fc_rx_pause:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case e1000_fc_tx_pause:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case e1000_fc_full:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Disable TX Flow Control for 82542 (rev 2.0) */
+ if(hw->mac_type == e1000_82542_rev2_0)
+ ctrl &= (~E1000_CTRL_TFCE);
+
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control settings after link is established
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automaticaly set to the negotiated flow control mode.
+ *****************************************************************************/
+int32_t
+e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t mii_status_reg;
+ uint16_t mii_nway_adv_reg;
+ uint16_t mii_nway_lp_ability_reg;
+ uint16_t speed;
+ uint16_t duplex;
+
+ DEBUGFUNC("e1000_config_fc_after_link_up");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if(((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_internal_serdes) && (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+ ret_val = e1000_force_mac_fc(hw);
+ if(ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ if(mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement Register
+ * (Address 4) and the Auto_Negotiation Base Page Ability
+ * Register (Address 5) to determine how flow control was
+ * negotiated.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if(ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ */
+ /* Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if(hw->original_fc == e1000_fc_full) {
+ hw->fc = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\r\n");
+ } else {
+ hw->fc = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ *
+ */
+ else if(!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ */
+ else if((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if((hw->original_fc == e1000_fc_none ||
+ hw->original_fc == e1000_fc_tx_pause) ||
+ hw->fc_strict_ieee) {
+ hw->fc = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\r\n");
+ } else {
+ hw->fc = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if(ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if(duplex == HALF_DUPLEX)
+ hw->fc = e1000_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc(hw);
+ if(ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ } else {
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\r\n");
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+int32_t
+e1000_check_for_link(struct e1000_hw *hw)
+{
+ uint32_t rxcw = 0;
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t rctl;
+ uint32_t icr;
+ uint32_t signal = 0;
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_check_for_link");
+
+ ctrl = E1000_READ_REG(hw, CTRL);
+ status = E1000_READ_REG(hw, STATUS);
+
+ /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ */
+ if((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ rxcw = E1000_READ_REG(hw, RXCW);
+
+ if(hw->media_type == e1000_media_type_fiber) {
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+ if(status & E1000_STATUS_LU)
+ hw->get_link_status = FALSE;
+ }
+ }
+
+ /* If we have a copper PHY then we only want to go out to the PHY
+ * registers to see if Auto-Neg has completed and/or if our link
+ * status has changed. The get_link_status flag will be set if we
+ * receive a Link Status Change interrupt or we have Rx Sequence
+ * Errors.
+ */
+ if((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ * Read the register twice since the link bit is sticky.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if(phy_data & MII_SR_LINK_STATUS) {
+ hw->get_link_status = FALSE;
+ /* Check if there was DownShift, must be checked immediately after
+ * link-up */
+ e1000_check_downshift(hw);
+
+ /* If we are on 82544 or 82543 silicon and speed/duplex
+ * are forced to 10H or 10F, then we will implement the polarity
+ * reversal workaround. We disable interrupts first, and upon
+ * returning, place the devices interrupt state to its previous
+ * value except for the link status change interrupt which will
+ * happen due to the execution of this workaround.
+ */
+
+ if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) &&
+ (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
+ E1000_WRITE_REG(hw, IMC, 0xffffffff);
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ icr = E1000_READ_REG(hw, ICR);
+ E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC));
+ E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK);
+ }
+
+ } else {
+ /* No link detected */
+ e1000_config_dsp_after_link_change(hw, FALSE);
+ return 0;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if(!hw->autoneg) return -E1000_ERR_CONFIG;
+
+ /* optimize the dsp settings for the igp phy */
+ e1000_config_dsp_after_link_change(hw, TRUE);
+
+ /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if(hw->mac_type >= e1000_82544)
+ e1000_config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if(ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure Flow Control now that Auto-Neg has completed. First, we
+ * need to restore the desired flow control settings because we may
+ * have had to re-autoneg with a different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if(ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+
+ /* At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if(hw->tbi_compatibility_en) {
+ uint16_t speed, duplex;
+ e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if(speed != SPEED_1000) {
+ /* If link speed is not set to gigabit speed, we do not need
+ * to enable TBI compatibility.
+ */
+ if(hw->tbi_compatibility_on) {
+ /* If we previously were in the mode, turn it off. */
+ rctl = E1000_READ_REG(hw, RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ E1000_WRITE_REG(hw, RCTL, rctl);
+ hw->tbi_compatibility_on = FALSE;
+ }
+ } else {
+ /* If TBI compatibility is was previously off, turn it on. For
+ * compatibility with a TBI link partner, we will store bad
+ * packets. Some frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if(!hw->tbi_compatibility_on) {
+ hw->tbi_compatibility_on = TRUE;
+ rctl = E1000_READ_REG(hw, RCTL);
+ rctl |= E1000_RCTL_SBP;
+ E1000_WRITE_REG(hw, RCTL, rctl);
+ }
+ }
+ }
+ }
+ /* If we don't have link (auto-negotiation failed or link partner cannot
+ * auto-negotiate), the cable is plugged in (we have signal), and our
+ * link partner is not trying to auto-negotiate with us (we are receiving
+ * idles or data), we need to force link up. We also need to give
+ * auto-negotiation time to complete, in case the cable was just plugged
+ * in. The autoneg_failed flag does this.
+ */
+ else if((((hw->media_type == e1000_media_type_fiber) &&
+ ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (!(status & E1000_STATUS_LU)) &&
+ (!(rxcw & E1000_RXCW_C))) {
+ if(hw->autoneg_failed == 0) {
+ hw->autoneg_failed = 1;
+ return 0;
+ }
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if(ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ }
+ /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable forced link in the
+ * Device Control register in an attempt to auto-negotiate with our link
+ * partner.
+ */
+ else if(((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\r\n");
+ E1000_WRITE_REG(hw, TXCW, hw->txcw);
+ E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ hw->serdes_link_down = FALSE;
+ }
+ /* If we force link for non-auto-negotiation switch, check link status
+ * based on MAC synchronization for internal serdes media type.
+ */
+ else if((hw->media_type == e1000_media_type_internal_serdes) &&
+ !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+ /* SYNCH bit and IV bit are sticky. */
+ udelay(10);
+ if(E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
+ if(!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_link_down = FALSE;
+ DEBUGOUT("SERDES: Link is up.\n");
+ }
+ } else {
+ hw->serdes_link_down = TRUE;
+ DEBUGOUT("SERDES: Link is down.\n");
+ }
+ }
+ if((hw->media_type == e1000_media_type_internal_serdes) &&
+ (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+ hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS));
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ *****************************************************************************/
+int32_t
+e1000_get_speed_and_duplex(struct e1000_hw *hw,
+ uint16_t *speed,
+ uint16_t *duplex)
+{
+ uint32_t status;
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_get_speed_and_duplex");
+
+ if(hw->mac_type >= e1000_82543) {
+ status = E1000_READ_REG(hw, STATUS);
+ if(status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if(status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if(status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\r\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT(" Half Duplex\r\n");
+ }
+ } else {
+ DEBUGOUT("1000 Mbs, Full Duplex\r\n");
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+ }
+
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+ * if it is operating at half duplex. Here we set the duplex settings to
+ * match the duplex in the link partner's capabilities.
+ */
+ if(hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if(!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+ *duplex = HALF_DUPLEX;
+ else {
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+ if(ret_val)
+ return ret_val;
+ if((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+ (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Blocks until autoneg completes or times out (~4.5 seconds)
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t i;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_wait_autoneg");
+ DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for(i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+ if(phy_data & MII_SR_AUTONEG_COMPLETE) {
+ return E1000_SUCCESS;
+ }
+ msec_delay(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Raises the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void
+e1000_raise_mdi_clk(struct e1000_hw *hw,
+ uint32_t *ctrl)
+{
+ /* Raise the clock input to the Management Data Clock (by setting the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ E1000_WRITE_REG(hw, CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH(hw);
+ udelay(10);
+}
+
+/******************************************************************************
+* Lowers the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void
+e1000_lower_mdi_clk(struct e1000_hw *hw,
+ uint32_t *ctrl)
+{
+ /* Lower the clock input to the Management Data Clock (by clearing the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ E1000_WRITE_REG(hw, CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH(hw);
+ udelay(10);
+}
+
+/******************************************************************************
+* Shifts data bits out to the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* data - Data to send out to the PHY
+* count - Number of bits to shift out
+*
+* Bits are shifted out in MSB to LSB order.
+******************************************************************************/
+static void
+e1000_shift_out_mdi_bits(struct e1000_hw *hw,
+ uint32_t data,
+ uint16_t count)
+{
+ uint32_t ctrl;
+ uint32_t mask;
+
+ /* We need to shift "count" number of bits out to the PHY. So, the value
+ * in the "data" parameter will be shifted out to the PHY one bit at a
+ * time. In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
+
+ ctrl = E1000_READ_REG(hw, CTRL);
+
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+ while(mask) {
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+ * then raising and lowering the Management Data Clock. A "0" is
+ * shifted out to the PHY by setting the MDIO bit to "0" and then
+ * raising and lowering the clock.
+ */
+ if(data & mask) ctrl |= E1000_CTRL_MDIO;
+ else ctrl &= ~E1000_CTRL_MDIO;
+
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ udelay(10);
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ mask = mask >> 1;
+ }
+}
+
+/******************************************************************************
+* Shifts data bits in from the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Bits are shifted in in MSB to LSB order.
+******************************************************************************/
+static uint16_t
+e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+ uint32_t ctrl;
+ uint16_t data = 0;
+ uint8_t i;
+
+ /* In order to read a register from the PHY, we need to shift in a total
+ * of 18 bits from the PHY. The first two bit (turnaround) times are used
+ * to avoid contention on the MDIO pin when a read operation is performed.
+ * These two bits are ignored by us and thrown away. Bits are "shifted in"
+ * by raising the input to the Management Data Clock (setting the MDC bit),
+ * and then reading the value of the MDIO bit.
+ */
+ ctrl = E1000_READ_REG(hw, CTRL);
+
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Raise and Lower the clock before reading in the data. This accounts for
+ * the turnaround bits. The first clock occurred when we clocked out the
+ * last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ for(data = 0, i = 0; i < 16; i++) {
+ data = data << 1;
+ e1000_raise_mdi_clk(hw, &ctrl);
+ ctrl = E1000_READ_REG(hw, CTRL);
+ /* Check to see if we shifted in a "1". */
+ if(ctrl & E1000_CTRL_MDIO) data |= 1;
+ e1000_lower_mdi_clk(hw, &ctrl);
+ }
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ return data;
+}
+
+/*****************************************************************************
+* Reads the value from a PHY register, if the value is on a specific non zero
+* page, sets the page first.
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to read
+******************************************************************************/
+int32_t
+e1000_read_phy_reg(struct e1000_hw *hw,
+ uint32_t reg_addr,
+ uint16_t *phy_data)
+{
+ uint32_t ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg");
+
+
+ if(hw->phy_type == e1000_phy_igp &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (uint16_t)reg_addr);
+ if(ret_val) {
+ return ret_val;
+ }
+ }
+
+ ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
+}
+
+int32_t
+e1000_read_phy_reg_ex(struct e1000_hw *hw,
+ uint32_t reg_addr,
+ uint16_t *phy_data)
+{
+ uint32_t i;
+ uint32_t mdic = 0;
+ const uint32_t phy_addr = 1;
+
+ DEBUGFUNC("e1000_read_phy_reg_ex");
+
+ if(reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if(hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, and register address in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ E1000_WRITE_REG(hw, MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for(i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = E1000_READ_REG(hw, MDIC);
+ if(mdic & E1000_MDIC_READY) break;
+ }
+ if(!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if(mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (uint16_t) mdic;
+ } else {
+ /* We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format of
+ * a MII read instruction consists of a shift out of 14 bits and is
+ * defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = ((reg_addr) | (phy_addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+ /* Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *phy_data = e1000_shift_in_mdi_bits(hw);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Writes a value to a PHY register
+*
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to write
+* data - data to write to the PHY
+******************************************************************************/
+int32_t
+e1000_write_phy_reg(struct e1000_hw *hw,
+ uint32_t reg_addr,
+ uint16_t phy_data)
+{
+ uint32_t ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg");
+
+
+ if(hw->phy_type == e1000_phy_igp &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (uint16_t)reg_addr);
+ if(ret_val) {
+ return ret_val;
+ }
+ }
+
+ ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
+}
+
+int32_t
+e1000_write_phy_reg_ex(struct e1000_hw *hw,
+ uint32_t reg_addr,
+ uint16_t phy_data)
+{
+ uint32_t i;
+ uint32_t mdic = 0;
+ const uint32_t phy_addr = 1;
+
+ DEBUGFUNC("e1000_write_phy_reg_ex");
+
+ if(reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if(hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, register address, and data intended
+ * for the PHY register in the MDI Control register. The MAC will take
+ * care of interfacing with the PHY to send the desired data.
+ */
+ mdic = (((uint32_t) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ E1000_WRITE_REG(hw, MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for(i = 0; i < 640; i++) {
+ udelay(5);
+ mdic = E1000_READ_REG(hw, MDIC);
+ if(mdic & E1000_MDIC_READY) break;
+ }
+ if(!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ /* We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (uint32_t) phy_data;
+
+ e1000_shift_out_mdi_bits(hw, mdic, 32);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Returns the PHY to the power-on reset state
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+void
+e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ uint32_t ctrl, ctrl_ext;
+ uint32_t led_ctrl;
+
+ DEBUGFUNC("e1000_phy_hw_reset");
+
+ DEBUGOUT("Resetting Phy...\n");
+
+ if(hw->mac_type > e1000_82543) {
+ /* Read the device control register and assert the E1000_CTRL_PHY_RST
+ * bit. Then, take it out of reset.
+ */
+ ctrl = E1000_READ_REG(hw, CTRL);
+ E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(10);
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+ } else {
+ /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset. Then, take it out of reset.
+ */
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(10);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ }
+ udelay(150);
+
+ if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = E1000_READ_REG(hw, LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+ }
+}
+
+/******************************************************************************
+* Resets the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Sets bit 15 of the MII Control regiser
+******************************************************************************/
+int32_t
+e1000_phy_reset(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_phy_reset");
+
+ if(hw->mac_type != e1000_82541_rev_2) {
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= MII_CR_RESET;
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ udelay(1);
+ } else e1000_phy_hw_reset(hw);
+
+ if(hw->phy_type == e1000_phy_igp)
+ e1000_phy_init_script(hw);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Probes the expected PHY address for known PHY IDs
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+ int32_t phy_init_status, ret_val;
+ uint16_t phy_id_high, phy_id_low;
+ boolean_t match = FALSE;
+
+ DEBUGFUNC("e1000_detect_gig_phy");
+
+ /* Read the PHY ID Registers to identify which PHY is onboard. */
+ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+ if(ret_val)
+ return ret_val;
+
+ hw->phy_id = (uint32_t) (phy_id_high << 16);
+ udelay(20);
+ ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+ if(ret_val)
+ return ret_val;
+
+ hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK);
+ hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
+
+ switch(hw->mac_type) {
+ case e1000_82543:
+ if(hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
+ break;
+ case e1000_82544:
+ if(hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if(hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
+ break;
+ default:
+ DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ return -E1000_ERR_CONFIG;
+ }
+ phy_init_status = e1000_set_phy_type(hw);
+
+ if ((match) && (phy_init_status == E1000_SUCCESS)) {
+ DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ return -E1000_ERR_PHY;
+}
+
+/******************************************************************************
+* Resets the PHY's DSP
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ DEBUGFUNC("e1000_phy_reset_dsp");
+
+ do {
+ ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+ if(ret_val) break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+ if(ret_val) break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+ if(ret_val) break;
+ ret_val = E1000_SUCCESS;
+ } while(0);
+
+ return ret_val;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for igp PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ int32_t ret_val;
+ uint16_t phy_data, polarity, min_length, max_length, average;
+
+ DEBUGFUNC("e1000_phy_igp_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = hw->speed_downgraded;
+
+ /* IGP01E1000 does not need to support it. */
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ /* IGP01E1000 always correct polarity reversal */
+ phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if(ret_val)
+ return ret_val;
+
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >>
+ IGP01E1000_PSSR_MDIX_SHIFT;
+
+ if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT;
+ phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT;
+
+ /* Get cable length */
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if(ret_val)
+ return ret_val;
+
+ /* transalte to old method */
+ average = (max_length + min_length) / 2;
+
+ if(average <= e1000_igp_cable_length_50)
+ phy_info->cable_length = e1000_cable_length_50;
+ else if(average <= e1000_igp_cable_length_80)
+ phy_info->cable_length = e1000_cable_length_50_80;
+ else if(average <= e1000_igp_cable_length_110)
+ phy_info->cable_length = e1000_cable_length_80_110;
+ else if(average <= e1000_igp_cable_length_140)
+ phy_info->cable_length = e1000_cable_length_110_140;
+ else
+ phy_info->cable_length = e1000_cable_length_140;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers fot m88 PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ int32_t ret_val;
+ uint16_t phy_data, polarity;
+
+ DEBUGFUNC("e1000_phy_m88_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = hw->speed_downgraded;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_info->extended_10bt_distance =
+ (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+ M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT;
+ phy_info->polarity_correction =
+ (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+ M88E1000_PSCR_POLARITY_REVERSAL_SHIFT;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if(ret_val)
+ return ret_val;
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >>
+ M88E1000_PSSR_MDIX_SHIFT;
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ /* Cable Length Estimation and Local/Remote Receiver Information
+ * are only valid at 1000 Mbps.
+ */
+ phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT;
+
+ phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_phy_get_info");
+
+ phy_info->cable_length = e1000_cable_length_undefined;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+ phy_info->cable_polarity = e1000_rev_polarity_undefined;
+ phy_info->downshift = e1000_downshift_undefined;
+ phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+ phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+ phy_info->local_rx = e1000_1000t_rx_status_undefined;
+ phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+ if(hw->media_type != e1000_media_type_copper) {
+ DEBUGOUT("PHY info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+ DEBUGOUT("PHY info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if(hw->phy_type == e1000_phy_igp)
+ return e1000_phy_igp_get_info(hw, phy_info);
+ else
+ return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+int32_t
+e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_settings");
+
+ if(!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+ return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Sets up eeprom variables in the hw struct. Must be called after mac_type
+ * is configured.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ uint32_t eecd = E1000_READ_REG(hw, EECD);
+ uint16_t eeprom_size;
+
+ DEBUGFUNC("e1000_init_eeprom_params");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->word_size = 64;
+ eeprom->opcode_bits = 3;
+ eeprom->address_bits = 6;
+ eeprom->delay_usec = 50;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if(eecd & E1000_EECD_SIZE) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (eecd & E1000_EECD_TYPE) {
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ } else {
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (eeprom->type == e1000_eeprom_spi) {
+ eeprom->word_size = 64;
+ if (e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size) == 0) {
+ eeprom_size &= EEPROM_SIZE_MASK;
+
+ switch (eeprom_size) {
+ case EEPROM_SIZE_16KB:
+ eeprom->word_size = 8192;
+ break;
+ case EEPROM_SIZE_8KB:
+ eeprom->word_size = 4096;
+ break;
+ case EEPROM_SIZE_4KB:
+ eeprom->word_size = 2048;
+ break;
+ case EEPROM_SIZE_2KB:
+ eeprom->word_size = 1024;
+ break;
+ case EEPROM_SIZE_1KB:
+ eeprom->word_size = 512;
+ break;
+ case EEPROM_SIZE_512B:
+ eeprom->word_size = 256;
+ break;
+ case EEPROM_SIZE_128B:
+ default:
+ eeprom->word_size = 64;
+ break;
+ }
+ }
+ }
+}
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void
+e1000_raise_ee_clk(struct e1000_hw *hw,
+ uint32_t *eecd)
+{
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait <delay> microseconds.
+ */
+ *eecd = *eecd | E1000_EECD_SK;
+ E1000_WRITE_REG(hw, EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void
+e1000_lower_ee_clk(struct e1000_hw *hw,
+ uint32_t *eecd)
+{
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd = *eecd & ~E1000_EECD_SK;
+ E1000_WRITE_REG(hw, EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void
+e1000_shift_out_ee_bits(struct e1000_hw *hw,
+ uint16_t data,
+ uint16_t count)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ uint32_t eecd;
+ uint32_t mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd = E1000_READ_REG(hw, EECD);
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~E1000_EECD_DO;
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_DO;
+ }
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd &= ~E1000_EECD_DI;
+
+ if(data & mask)
+ eecd |= E1000_EECD_DI;
+
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+
+ udelay(eeprom->delay_usec);
+
+ e1000_raise_ee_clk(hw, &eecd);
+ e1000_lower_ee_clk(hw, &eecd);
+
+ mask = mask >> 1;
+
+ } while(mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd &= ~E1000_EECD_DI;
+ E1000_WRITE_REG(hw, EECD, eecd);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static uint16_t
+e1000_shift_in_ee_bits(struct e1000_hw *hw,
+ uint16_t count)
+{
+ uint32_t eecd;
+ uint32_t i;
+ uint16_t data;
+
+ /* In order to read a register from the EEPROM, we need to shift 'count'
+ * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+ * input to the EEPROM (setting the SK bit), and then reading the value of
+ * the "DO" bit. During this "shifting in" process the "DI" bit should
+ * always be clear.
+ */
+
+ eecd = E1000_READ_REG(hw, EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for(i = 0; i < count; i++) {
+ data = data << 1;
+ e1000_raise_ee_clk(hw, &eecd);
+
+ eecd = E1000_READ_REG(hw, EECD);
+
+ eecd &= ~(E1000_EECD_DI);
+ if(eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_ee_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static int32_t
+e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ uint32_t eecd, i=0;
+
+ DEBUGFUNC("e1000_acquire_eeprom");
+
+ eecd = E1000_READ_REG(hw, EECD);
+
+ /* Request EEPROM Access */
+ if(hw->mac_type > e1000_82544) {
+ eecd |= E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ eecd = E1000_READ_REG(hw, EECD);
+ while((!(eecd & E1000_EECD_GNT)) &&
+ (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+ i++;
+ udelay(5);
+ eecd = E1000_READ_REG(hw, EECD);
+ }
+ if(!(eecd & E1000_EECD_GNT)) {
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+ return -E1000_ERR_EEPROM;
+ }
+ }
+
+ /* Setup EEPROM for Read/Write */
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, EECD, eecd);
+
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, EECD, eecd);
+ udelay(1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_standby_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ uint32_t eecd;
+
+ eecd = E1000_READ_REG(hw, EECD);
+
+ if(eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(eeprom->delay_usec);
+
+ /* Clock high */
+ eecd |= E1000_EECD_SK;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(eeprom->delay_usec);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(eeprom->delay_usec);
+
+ /* Clock low */
+ eecd &= ~E1000_EECD_SK;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(eeprom->delay_usec);
+ } else if(eeprom->type == e1000_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(eeprom->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(eeprom->delay_usec);
+ }
+}
+
+/******************************************************************************
+ * Terminates a command by inverting the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_release_eeprom(struct e1000_hw *hw)
+{
+ uint32_t eecd;
+
+ DEBUGFUNC("e1000_release_eeprom");
+
+ eecd = E1000_READ_REG(hw, EECD);
+
+ if (hw->eeprom.type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_CS; /* Pull CS high */
+ eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+ E1000_WRITE_REG(hw, EECD, eecd);
+
+ udelay(hw->eeprom.delay_usec);
+ } else if(hw->eeprom.type == e1000_eeprom_microwire) {
+ /* cleanup eeprom */
+
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+ E1000_WRITE_REG(hw, EECD, eecd);
+
+ /* Rising edge of clock */
+ eecd |= E1000_EECD_SK;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(hw->eeprom.delay_usec);
+
+ /* Falling edge of clock */
+ eecd &= ~E1000_EECD_SK;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ udelay(hw->eeprom.delay_usec);
+ }
+
+ /* Stop requesting EEPROM access */
+ if(hw->mac_type > e1000_82544) {
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ }
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+ uint16_t retry_count = 0;
+ uint8_t spi_stat_reg;
+
+ DEBUGFUNC("e1000_spi_eeprom_ready");
+
+ /* Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ retry_count = 0;
+ do {
+ e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+ hw->eeprom.opcode_bits);
+ spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8);
+ if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ retry_count += 5;
+
+ e1000_standby_eeprom(hw);
+ } while(retry_count < EEPROM_MAX_RETRY_SPI);
+
+ /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+ * only 0-5mSec on 5V devices)
+ */
+ if(retry_count >= EEPROM_MAX_RETRY_SPI) {
+ DEBUGOUT("SPI EEPROM Status error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom(struct e1000_hw *hw,
+ uint16_t offset,
+ uint16_t words,
+ uint16_t *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ uint32_t i = 0;
+
+ DEBUGFUNC("e1000_read_eeprom");
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("\"words\" parameter out of bounds\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Prepare the EEPROM for reading */
+ if(e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ if(eeprom->type == e1000_eeprom_spi) {
+ uint16_t word_in;
+ uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
+
+ if(e1000_spi_eeprom_ready(hw)) {
+ e1000_release_eeprom(hw);
+ return -E1000_ERR_EEPROM;
+ }
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if((eeprom->address_bits == 8) && (offset >= 128))
+ read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits);
+
+ /* Read the data. The address of the eeprom internally increments with
+ * each byte (spi) being read, saving on the overhead of eeprom setup
+ * and tear-down. The address counter will roll over if reading beyond
+ * the size of the eeprom, thus allowing the entire memory to be read
+ * starting from any offset. */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_ee_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+ } else if(eeprom->type == e1000_eeprom_microwire) {
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i),
+ eeprom->address_bits);
+
+ /* Read the data. For microwire, each word requires the overhead
+ * of eeprom setup and tear-down. */
+ data[i] = e1000_shift_in_ee_bits(hw, 16);
+ e1000_standby_eeprom(hw);
+ }
+ }
+
+ /* End this read operation */
+ e1000_release_eeprom(hw);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *****************************************************************************/
+int32_t
+e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+ uint16_t checksum = 0;
+ uint16_t i, eeprom_data;
+
+ DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+ for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+
+ if(checksum == (uint16_t) EEPROM_SUM)
+ return E1000_SUCCESS;
+ else {
+ DEBUGOUT("EEPROM Checksum Invalid\n");
+ return -E1000_ERR_EEPROM;
+ }
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+int32_t
+e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+ uint16_t checksum = 0;
+ uint16_t i, eeprom_data;
+
+ DEBUGFUNC("e1000_update_eeprom_checksum");
+
+ for(i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+ if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+ checksum = (uint16_t) EEPROM_SUM - checksum;
+ if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+ DEBUGOUT("EEPROM Write Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Parent function for writing words to the different EEPROM types.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom(struct e1000_hw *hw,
+ uint16_t offset,
+ uint16_t words,
+ uint16_t *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ int32_t status = 0;
+
+ DEBUGFUNC("e1000_write_eeprom");
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("\"words\" parameter out of bounds\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Prepare the EEPROM for writing */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ if(eeprom->type == e1000_eeprom_microwire) {
+ status = e1000_write_eeprom_microwire(hw, offset, words, data);
+ } else {
+ status = e1000_write_eeprom_spi(hw, offset, words, data);
+ msec_delay(10);
+ }
+
+ /* Done with writing */
+ e1000_release_eeprom(hw);
+
+ return status;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in an SPI EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 8 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_spi(struct e1000_hw *hw,
+ uint16_t offset,
+ uint16_t words,
+ uint16_t *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ uint16_t widx = 0;
+
+ DEBUGFUNC("e1000_write_eeprom_spi");
+
+ while (widx < words) {
+ uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+ if(e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+
+ e1000_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+ eeprom->opcode_bits);
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if((eeprom->address_bits == 8) && (offset >= 128))
+ write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2),
+ eeprom->address_bits);
+
+ /* Send the data */
+
+ /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ while (widx < words) {
+ uint16_t word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_ee_bits(hw, word_out, 16);
+ widx++;
+
+ /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+ * operation, while the smaller eeproms are capable of an 8-byte
+ * PAGE WRITE operation. Break the inner loop to pass new address
+ */
+ if((((offset + widx)*2) % eeprom->page_size) == 0) {
+ e1000_standby_eeprom(hw);
+ break;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 16 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_microwire(struct e1000_hw *hw,
+ uint16_t offset,
+ uint16_t words,
+ uint16_t *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ uint32_t eecd;
+ uint16_t words_written = 0;
+ uint16_t i = 0;
+
+ DEBUGFUNC("e1000_write_eeprom_microwire");
+
+ /* Send the write enable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 11). It's less work to include
+ * the 11 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This puts the
+ * EEPROM into write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+ (uint16_t)(eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+
+ /* Prepare the EEPROM */
+ e1000_standby_eeprom(hw);
+
+ while (words_written < words) {
+ /* Send the Write command (3-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written),
+ eeprom->address_bits);
+
+ /* Send the data */
+ e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+ /* Toggle the CS line. This in effect tells the EEPROM to execute
+ * the previous command.
+ */
+ e1000_standby_eeprom(hw);
+
+ /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for(i = 0; i < 200; i++) {
+ eecd = E1000_READ_REG(hw, EECD);
+ if(eecd & E1000_EECD_DO) break;
+ udelay(50);
+ }
+ if(i == 200) {
+ DEBUGOUT("EEPROM Write did not complete\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Recover from write */
+ e1000_standby_eeprom(hw);
+
+ words_written++;
+ }
+
+ /* Send the write disable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 10). It's less work to include
+ * the 10 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This takes the
+ * EEPROM out of write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+ (uint16_t)(eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads the adapter's part number from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ * part_num - Adapter's part number
+ *****************************************************************************/
+int32_t
+e1000_read_part_num(struct e1000_hw *hw,
+ uint32_t *part_num)
+{
+ uint16_t offset = EEPROM_PBA_BYTE_1;
+ uint16_t eeprom_data;
+
+ DEBUGFUNC("e1000_read_part_num");
+
+ /* Get word 0 from EEPROM */
+ if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ /* Save word 0 in upper half of part_num */
+ *part_num = (uint32_t) (eeprom_data << 16);
+
+ /* Get word 1 from EEPROM */
+ if(e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ /* Save word 1 in lower half of part_num */
+ *part_num |= eeprom_data;
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_read_mac_addr(struct e1000_hw * hw)
+{
+ uint16_t offset;
+ uint16_t eeprom_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for(i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+ offset = i >> 1;
+ if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
+ hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
+ }
+ if(((hw->mac_type == e1000_82546) || (hw->mac_type == e1000_82546_rev_3)) &&
+ (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1))
+ hw->perm_mac_addr[5] ^= 0x01;
+
+ for(i = 0; i < NODE_ADDRESS_SIZE; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+void
+e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+ uint32_t i;
+
+ DEBUGFUNC("e1000_init_rx_addrs");
+
+ /* Setup the receive address. */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ /* Zero out the other 15 receive addresses. */
+ DEBUGOUT("Clearing RAR[1-15]\n");
+ for(i = 1; i < E1000_RAR_ENTRIES; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ }
+}
+
+/******************************************************************************
+ * Updates the MAC's list of multicast addresses.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr_list - the list of new multicast addresses
+ * mc_addr_count - number of addresses
+ * pad - number of bytes between addresses in the list
+ * rar_used_count - offset where to start adding mc addresses into the RAR's
+ *
+ * The given list replaces any existing list. Clears the last 15 receive
+ * address registers and the multicast table. Uses receive address registers
+ * for the first 15 multicast addresses, and hashes the rest into the
+ * multicast table.
+ *****************************************************************************/
+void
+e1000_mc_addr_list_update(struct e1000_hw *hw,
+ uint8_t *mc_addr_list,
+ uint32_t mc_addr_count,
+ uint32_t pad,
+ uint32_t rar_used_count)
+{
+ uint32_t hash_value;
+ uint32_t i;
+
+ DEBUGFUNC("e1000_mc_addr_list_update");
+
+ /* Set the new number of MC addresses that we are being requested to use. */
+ hw->num_mc_addrs = mc_addr_count;
+
+ /* Clear RAR[1-15] */
+ DEBUGOUT(" Clearing RAR[1-15]\n");
+ for(i = rar_used_count; i < E1000_RAR_ENTRIES; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ }
+
+ /* Clear the MTA */
+ DEBUGOUT(" Clearing MTA\n");
+ for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ }
+
+ /* Add the new addresses */
+ for(i = 0; i < mc_addr_count; i++) {
+ DEBUGOUT(" Adding the multicast addresses:\n");
+ DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
+ mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)],
+ mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 1],
+ mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 2],
+ mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 3],
+ mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 4],
+ mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 5]);
+
+ hash_value = e1000_hash_mc_addr(hw,
+ mc_addr_list +
+ (i * (ETH_LENGTH_OF_ADDRESS + pad)));
+
+ DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+
+ /* Place this multicast address in the RAR if there is room, *
+ * else put it in the MTA
+ */
+ if(rar_used_count < E1000_RAR_ENTRIES) {
+ e1000_rar_set(hw,
+ mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)),
+ rar_used_count);
+ rar_used_count++;
+ } else {
+ e1000_mta_set(hw, hash_value);
+ }
+ }
+ DEBUGOUT("MC Update Complete\n");
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *****************************************************************************/
+uint32_t
+e1000_hash_mc_addr(struct e1000_hw *hw,
+ uint8_t *mc_addr)
+{
+ uint32_t hash_value = 0;
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ */
+ case 0:
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ break;
+ case 1:
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+ break;
+ case 2:
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ break;
+ case 3:
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+void
+e1000_mta_set(struct e1000_hw *hw,
+ uint32_t hash_value)
+{
+ uint32_t hash_bit, hash_reg;
+ uint32_t mta;
+ uint32_t temp;
+
+ /* The MTA is a register array of 128 32-bit registers.
+ * It is treated like an array of 4096 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+
+ mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ /* If we are on an 82544 and we are trying to write an odd offset
+ * in the MTA, save off the previous entry before writing and
+ * restore the old value after writing.
+ */
+ if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
+ E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ }
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void
+e1000_rar_set(struct e1000_hw *hw,
+ uint8_t *addr,
+ uint32_t index)
+{
+ uint32_t rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((uint32_t) addr[0] |
+ ((uint32_t) addr[1] << 8) |
+ ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24));
+
+ rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8) | E1000_RAH_AV);
+
+ E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void
+e1000_write_vfta(struct e1000_hw *hw,
+ uint32_t offset,
+ uint32_t value)
+{
+ uint32_t temp;
+
+ if((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ }
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_clear_vfta(struct e1000_hw *hw)
+{
+ uint32_t offset;
+
+ for(offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++)
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
+}
+
+static int32_t
+e1000_id_led_init(struct e1000_hw * hw)
+{
+ uint32_t ledctl;
+ const uint32_t ledctl_mask = 0x000000FF;
+ const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ uint16_t eeprom_data, i, temp;
+ const uint16_t led_mask = 0x0F;
+
+ DEBUGFUNC("e1000_id_led_init");
+
+ if(hw->mac_type < e1000_82540) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+
+ ledctl = E1000_READ_REG(hw, LEDCTL);
+ hw->ledctl_default = ledctl;
+ hw->ledctl_mode1 = hw->ledctl_default;
+ hw->ledctl_mode2 = hw->ledctl_default;
+
+ if(e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ if((eeprom_data== ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) eeprom_data = ID_LED_DEFAULT;
+ for(i = 0; i < 4; i++) {
+ temp = (eeprom_data >> (i << 2)) & led_mask;
+ switch(temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch(temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_setup_led(struct e1000_hw *hw)
+{
+ uint32_t ledctl;
+ int32_t ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_setup_led");
+
+ switch(hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No setup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn off PHY Smart Power Down (if enabled) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ &hw->phy_spd_default);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ (uint16_t)(hw->phy_spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if(ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if(hw->media_type == e1000_media_type_fiber) {
+ ledctl = E1000_READ_REG(hw, LEDCTL);
+ /* Save current LEDCTL settings */
+ hw->ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ E1000_WRITE_REG(hw, LEDCTL, ledctl);
+ } else if(hw->media_type == e1000_media_type_copper)
+ E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Restores the saved state of the SW controlable LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_cleanup_led(struct e1000_hw *hw)
+{
+ int32_t ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_cleanup_led");
+
+ switch(hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No cleanup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn on PHY Smart Power Down (if previously enabled) */
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ hw->phy_spd_default);
+ if(ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ /* Restore LEDCTL settings */
+ E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_led_on(struct e1000_hw *hw)
+{
+ uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+
+ DEBUGFUNC("e1000_led_on");
+
+ switch(hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if(hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if(hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if(hw->media_type == e1000_media_type_copper) {
+ E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_led_off(struct e1000_hw *hw)
+{
+ uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+
+ DEBUGFUNC("e1000_led_off");
+
+ switch(hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if(hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if(hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if(hw->media_type == e1000_media_type_copper) {
+ E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ E1000_WRITE_REG(hw, CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+ volatile uint32_t temp;
+
+ temp = E1000_READ_REG(hw, CRCERRS);
+ temp = E1000_READ_REG(hw, SYMERRS);
+ temp = E1000_READ_REG(hw, MPC);
+ temp = E1000_READ_REG(hw, SCC);
+ temp = E1000_READ_REG(hw, ECOL);
+ temp = E1000_READ_REG(hw, MCC);
+ temp = E1000_READ_REG(hw, LATECOL);
+ temp = E1000_READ_REG(hw, COLC);
+ temp = E1000_READ_REG(hw, DC);
+ temp = E1000_READ_REG(hw, SEC);
+ temp = E1000_READ_REG(hw, RLEC);
+ temp = E1000_READ_REG(hw, XONRXC);
+ temp = E1000_READ_REG(hw, XONTXC);
+ temp = E1000_READ_REG(hw, XOFFRXC);
+ temp = E1000_READ_REG(hw, XOFFTXC);
+ temp = E1000_READ_REG(hw, FCRUC);
+ temp = E1000_READ_REG(hw, PRC64);
+ temp = E1000_READ_REG(hw, PRC127);
+ temp = E1000_READ_REG(hw, PRC255);
+ temp = E1000_READ_REG(hw, PRC511);
+ temp = E1000_READ_REG(hw, PRC1023);
+ temp = E1000_READ_REG(hw, PRC1522);
+ temp = E1000_READ_REG(hw, GPRC);
+ temp = E1000_READ_REG(hw, BPRC);
+ temp = E1000_READ_REG(hw, MPRC);
+ temp = E1000_READ_REG(hw, GPTC);
+ temp = E1000_READ_REG(hw, GORCL);
+ temp = E1000_READ_REG(hw, GORCH);
+ temp = E1000_READ_REG(hw, GOTCL);
+ temp = E1000_READ_REG(hw, GOTCH);
+ temp = E1000_READ_REG(hw, RNBC);
+ temp = E1000_READ_REG(hw, RUC);
+ temp = E1000_READ_REG(hw, RFC);
+ temp = E1000_READ_REG(hw, ROC);
+ temp = E1000_READ_REG(hw, RJC);
+ temp = E1000_READ_REG(hw, TORL);
+ temp = E1000_READ_REG(hw, TORH);
+ temp = E1000_READ_REG(hw, TOTL);
+ temp = E1000_READ_REG(hw, TOTH);
+ temp = E1000_READ_REG(hw, TPR);
+ temp = E1000_READ_REG(hw, TPT);
+ temp = E1000_READ_REG(hw, PTC64);
+ temp = E1000_READ_REG(hw, PTC127);
+ temp = E1000_READ_REG(hw, PTC255);
+ temp = E1000_READ_REG(hw, PTC511);
+ temp = E1000_READ_REG(hw, PTC1023);
+ temp = E1000_READ_REG(hw, PTC1522);
+ temp = E1000_READ_REG(hw, MPTC);
+ temp = E1000_READ_REG(hw, BPTC);
+
+ if(hw->mac_type < e1000_82543) return;
+
+ temp = E1000_READ_REG(hw, ALGNERRC);
+ temp = E1000_READ_REG(hw, RXERRC);
+ temp = E1000_READ_REG(hw, TNCRS);
+ temp = E1000_READ_REG(hw, CEXTERR);
+ temp = E1000_READ_REG(hw, TSCTC);
+ temp = E1000_READ_REG(hw, TSCTFC);
+
+ if(hw->mac_type <= e1000_82544) return;
+
+ temp = E1000_READ_REG(hw, MGTPRC);
+ temp = E1000_READ_REG(hw, MGTPDC);
+ temp = E1000_READ_REG(hw, MGTPTC);
+}
+
+/******************************************************************************
+ * Resets Adaptive IFS to its default state.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to TRUE. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ *****************************************************************************/
+void
+e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_reset_adaptive");
+
+ if(hw->adaptive_ifs) {
+ if(!hw->ifs_params_forced) {
+ hw->current_ifs_val = 0;
+ hw->ifs_min_val = IFS_MIN;
+ hw->ifs_max_val = IFS_MAX;
+ hw->ifs_step_size = IFS_STEP;
+ hw->ifs_ratio = IFS_RATIO;
+ }
+ hw->in_ifs_mode = FALSE;
+ E1000_WRITE_REG(hw, AIT, 0);
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/******************************************************************************
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * tx_packets - Number of transmits since last callback
+ * total_collisions - Number of collisions since last callback
+ *****************************************************************************/
+void
+e1000_update_adaptive(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_update_adaptive");
+
+ if(hw->adaptive_ifs) {
+ if((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+ if(hw->tx_packet_delta > MIN_NUM_XMITS) {
+ hw->in_ifs_mode = TRUE;
+ if(hw->current_ifs_val < hw->ifs_max_val) {
+ if(hw->current_ifs_val == 0)
+ hw->current_ifs_val = hw->ifs_min_val;
+ else
+ hw->current_ifs_val += hw->ifs_step_size;
+ E1000_WRITE_REG(hw, AIT, hw->current_ifs_val);
+ }
+ }
+ } else {
+ if(hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+ hw->current_ifs_val = 0;
+ hw->in_ifs_mode = FALSE;
+ E1000_WRITE_REG(hw, AIT, 0);
+ }
+ }
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/******************************************************************************
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ *
+ * hw - Struct containing variables accessed by shared code
+ * frame_len - The length of the frame in question
+ * mac_addr - The Ethernet destination address of the frame in question
+ *****************************************************************************/
+void
+e1000_tbi_adjust_stats(struct e1000_hw *hw,
+ struct e1000_hw_stats *stats,
+ uint32_t frame_len,
+ uint8_t *mac_addr)
+{
+ uint64_t carry_bit;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /* We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ carry_bit = 0x80000000 & stats->gorcl;
+ stats->gorcl += frame_len;
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+ * Received Count) was one before the addition,
+ * AND it is zero after, then we lost the carry out,
+ * need to add one to Gorch (Good Octets Received Count High).
+ * This could be simplified if all environments supported
+ * 64-bit integers.
+ */
+ if(carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ stats->gorch++;
+ /* Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if(*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ if(frame_len == hw->max_frame_size) {
+ /* In this case, the hardware has overcounted the number of
+ * oversize frames.
+ */
+ if(stats->roc > 0)
+ stats->roc--;
+ }
+
+ /* Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if(frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if(frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if(frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if(frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if(frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if(frame_len == 1522) {
+ stats->prc1522++;
+ }
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_get_bus_info(struct e1000_hw *hw)
+{
+ uint32_t status;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->bus_type = e1000_bus_type_unknown;
+ hw->bus_speed = e1000_bus_speed_unknown;
+ hw->bus_width = e1000_bus_width_unknown;
+ break;
+ default:
+ status = E1000_READ_REG(hw, STATUS);
+ hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+ e1000_bus_type_pcix : e1000_bus_type_pci;
+
+ if(hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+ hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+ e1000_bus_speed_66 : e1000_bus_speed_120;
+ } else if(hw->bus_type == e1000_bus_type_pci) {
+ hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+ e1000_bus_speed_66 : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ hw->bus_speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ hw->bus_speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ hw->bus_speed = e1000_bus_speed_133;
+ break;
+ default:
+ hw->bus_speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+ hw->bus_width = (status & E1000_STATUS_BUS64) ?
+ e1000_bus_width_64 : e1000_bus_width_32;
+ break;
+ }
+}
+/******************************************************************************
+ * Reads a value from one of the devices registers using port I/O (as opposed
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to read from
+ *****************************************************************************/
+uint32_t
+e1000_read_reg_io(struct e1000_hw *hw,
+ uint32_t offset)
+{
+ unsigned long io_addr = hw->io_base;
+ unsigned long io_data = hw->io_base + 4;
+
+ e1000_io_write(hw, io_addr, offset);
+ return e1000_io_read(hw, io_data);
+}
+
+/******************************************************************************
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to write to
+ * value - value to write
+ *****************************************************************************/
+void
+e1000_write_reg_io(struct e1000_hw *hw,
+ uint32_t offset,
+ uint32_t value)
+{
+ unsigned long io_addr = hw->io_base;
+ unsigned long io_data = hw->io_base + 4;
+
+ e1000_io_write(hw, io_addr, offset);
+ e1000_io_write(hw, io_data, value);
+}
+
+
+/******************************************************************************
+ * Estimates the cable length.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * min_length - The estimated minimum length
+ * max_length - The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ *****************************************************************************/
+int32_t
+e1000_get_cable_length(struct e1000_hw *hw,
+ uint16_t *min_length,
+ uint16_t *max_length)
+{
+ int32_t ret_val;
+ uint16_t agc_value = 0;
+ uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ uint16_t i, phy_data;
+ uint16_t cable_length;
+
+ DEBUGFUNC("e1000_get_cable_length");
+
+ *min_length = *max_length = 0;
+
+ /* Use old method for Phy older than IGP */
+ if(hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+ cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ /* Convert the enum value to ranged values */
+ switch (cable_length) {
+ case e1000_cable_length_50:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_50;
+ break;
+ case e1000_cable_length_50_80:
+ *min_length = e1000_igp_cable_length_50;
+ *max_length = e1000_igp_cable_length_80;
+ break;
+ case e1000_cable_length_80_110:
+ *min_length = e1000_igp_cable_length_80;
+ *max_length = e1000_igp_cable_length_110;
+ break;
+ case e1000_cable_length_110_140:
+ *min_length = e1000_igp_cable_length_110;
+ *max_length = e1000_igp_cable_length_140;
+ break;
+ case e1000_cable_length_140:
+ *min_length = e1000_igp_cable_length_140;
+ *max_length = e1000_igp_cable_length_170;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ {IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D};
+ /* Read the AGC registers for all channels */
+ for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+ ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ cur_agc = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Array bound check. */
+ if((cur_agc >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+ (cur_agc == 0))
+ return -E1000_ERR_PHY;
+
+ agc_value += cur_agc;
+
+ /* Update minimal AGC value. */
+ if(min_agc > cur_agc)
+ min_agc = cur_agc;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if(agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc;
+
+ /* Get the average length of the remaining 3 channels */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Get the average length of all the 4 channels. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ /* Set the range of the calculated length. */
+ *min_length = ((e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) > 0) ?
+ (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) : 0;
+ *max_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check the cable polarity
+ *
+ * hw - Struct containing variables accessed by shared code
+ * polarity - output parameter : 0 - Polarity is not reversed
+ * 1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function simply reads the polarity bit in the
+ * Phy Status register. For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0. If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ *****************************************************************************/
+int32_t
+e1000_check_polarity(struct e1000_hw *hw,
+ uint16_t *polarity)
+{
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_check_polarity");
+
+ if(hw->phy_type == e1000_phy_m88) {
+ /* return the Polarity bit in the Status register. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+ *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
+ M88E1000_PSSR_REV_POLARITY_SHIFT;
+ } else if(hw->phy_type == e1000_phy_igp) {
+ /* Read the Status register to check the speed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+ * find the polarity status */
+ if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+ /* Read the GIG initialization PCS register (0x00B4) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* Check the polarity bits */
+ *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? 1 : 0;
+ } else {
+ /* For 10 Mbps, read the polarity bit in the status register. (for
+ * 100 Mbps this bit is always 0) */
+ *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check if Downshift occured
+ *
+ * hw - Struct containing variables accessed by shared code
+ * downshift - output parameter : 0 - No Downshift ocured.
+ * 1 - Downshift ocured.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register. For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register. In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ *****************************************************************************/
+int32_t
+e1000_check_downshift(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_check_downshift");
+
+ if(hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+ } else if(hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+ M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+ boolean_t link_up)
+{
+ int32_t ret_val;
+ uint16_t phy_data, phy_saved_data, speed, duplex, i;
+ uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ {IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D};
+ uint16_t min_length, max_length;
+
+ DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+ if(hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ if(link_up) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if(ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if(speed == SPEED_1000) {
+
+ e1000_get_cable_length(hw, &min_length, &max_length);
+
+ if((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+ min_length >= e1000_igp_cable_length_50) {
+
+ for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+ phy_data);
+ if(ret_val)
+ return ret_val;
+ }
+ hw->dsp_config_state = e1000_dsp_config_activated;
+ }
+
+ if((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+ (min_length < e1000_igp_cable_length_50)) {
+
+ uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ uint32_t idle_errs = 0;
+
+ /* clear previous idle error counts */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ for(i = 0; i < ffe_idle_err_timeout; i++) {
+ udelay(1000);
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+ if(idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+ hw->ffe_config_state = e1000_ffe_config_active;
+
+ ret_val = e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if(ret_val)
+ return ret_val;
+ break;
+ }
+
+ if(idle_errs)
+ ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ }
+ }
+ } else {
+ if(hw->dsp_config_state == e1000_dsp_config_activated) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if(ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if(ret_val)
+ return ret_val;
+
+ msec_delay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if(ret_val)
+ return ret_val;
+ for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
+ if(ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if(ret_val)
+ return ret_val;
+
+ msec_delay(20);
+
+ /* Now enable the transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if(ret_val)
+ return ret_val;
+
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ }
+
+ if(hw->ffe_config_state == e1000_ffe_config_active) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if(ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if(ret_val)
+ return ret_val;
+
+ msec_delay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if(ret_val)
+ return ret_val;
+
+ msec_delay(20);
+
+ /* Now enable the transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if(ret_val)
+ return ret_val;
+
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set PHY to class A mode
+ * Assumes the following operations will follow to enable the new class mode.
+ * 1. Do a PHY soft reset
+ * 2. Restart auto-negotiation or force link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ ****************************************************************************/
+static int32_t
+e1000_set_phy_mode(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t eeprom_data;
+
+ DEBUGFUNC("e1000_set_phy_mode");
+
+ if((hw->mac_type == e1000_82545_rev_3) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
+ if(ret_val) {
+ return ret_val;
+ }
+
+ if((eeprom_data != EEPROM_RESERVED_WORD) &&
+ (eeprom_data & EEPROM_PHY_CLASS_A)) {
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
+ if(ret_val)
+ return ret_val;
+
+ hw->phy_reset_disable = FALSE;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_set_d3_lplu_state(struct e1000_hw *hw,
+ boolean_t active)
+{
+ int32_t ret_val;
+ uint16_t phy_data;
+ DEBUGFUNC("e1000_set_d3_lplu_state");
+
+ if(!((hw->mac_type == e1000_82541_rev_2) ||
+ (hw->mac_type == e1000_82547_rev_2)))
+ return E1000_SUCCESS;
+
+ /* During driver activity LPLU should not be used or it will attain link
+ * from the lowest speeds starting from 10Mbps. The capability is used for
+ * Dx transitions and states */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ if(!active) {
+ phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if(ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if(ret_val)
+ return ret_val;
+ }
+
+ } else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+
+ phy_data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static int32_t
+e1000_set_vco_speed(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t default_page = 0;
+ uint16_t phy_data;
+
+ DEBUGFUNC("e1000_set_vco_speed");
+
+ switch(hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ /* Set PHY register 30, page 5, bit 8 to 0 */
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if(ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+ if(ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+static int32_t
+e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t mii_status_reg;
+ uint16_t i;
+
+ /* Polarity reversal workaround for forced 10F/10H links. */
+
+ /* Disable the transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if(ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if(ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the NO link condition has been met. */
+ for(i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ if((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+ msec_delay_irq(100);
+ }
+
+ /* Recommended delay time after link has been lost */
+ msec_delay_irq(1000);
+
+ /* Now we will re-enable th transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if(ret_val)
+ return ret_val;
+ msec_delay_irq(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if(ret_val)
+ return ret_val;
+ msec_delay_irq(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if(ret_val)
+ return ret_val;
+ msec_delay_irq(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if(ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the link condition has been met. */
+ for(i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if(ret_val)
+ return ret_val;
+
+ if(mii_status_reg & MII_SR_LINK_STATUS) break;
+ msec_delay_irq(100);
+ }
+ return E1000_SUCCESS;
+}
+
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
new file mode 100644
index 000000000000..f397e637a3c5
--- /dev/null
+++ b/drivers/net/e1000/e1000_hw.h
@@ -0,0 +1,2144 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controlers */
+typedef enum {
+ e1000_undefined = 0,
+ e1000_82542_rev2_0,
+ e1000_82542_rev2_1,
+ e1000_82543,
+ e1000_82544,
+ e1000_82540,
+ e1000_82545,
+ e1000_82545_rev_3,
+ e1000_82546,
+ e1000_82546_rev_3,
+ e1000_82541,
+ e1000_82541_rev_2,
+ e1000_82547,
+ e1000_82547_rev_2,
+ e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+ e1000_eeprom_uninitialized = 0,
+ e1000_eeprom_spi,
+ e1000_eeprom_microwire,
+ e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+ e1000_media_type_copper = 0,
+ e1000_media_type_fiber = 1,
+ e1000_media_type_internal_serdes = 2,
+ e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+ e1000_10_half = 0,
+ e1000_10_full = 1,
+ e1000_100_half = 2,
+ e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+ e1000_fc_none = 0,
+ e1000_fc_rx_pause = 1,
+ e1000_fc_tx_pause = 2,
+ e1000_fc_full = 3,
+ e1000_fc_default = 0xFF
+} e1000_fc_type;
+
+/* PCI bus types */
+typedef enum {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+ e1000_cable_length_50 = 0,
+ e1000_cable_length_50_80,
+ e1000_cable_length_80_110,
+ e1000_cable_length_110_140,
+ e1000_cable_length_140,
+ e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+ e1000_igp_cable_length_10 = 10,
+ e1000_igp_cable_length_20 = 20,
+ e1000_igp_cable_length_30 = 30,
+ e1000_igp_cable_length_40 = 40,
+ e1000_igp_cable_length_50 = 50,
+ e1000_igp_cable_length_60 = 60,
+ e1000_igp_cable_length_70 = 70,
+ e1000_igp_cable_length_80 = 80,
+ e1000_igp_cable_length_90 = 90,
+ e1000_igp_cable_length_100 = 100,
+ e1000_igp_cable_length_110 = 110,
+ e1000_igp_cable_length_120 = 120,
+ e1000_igp_cable_length_130 = 130,
+ e1000_igp_cable_length_140 = 140,
+ e1000_igp_cable_length_150 = 150,
+ e1000_igp_cable_length_160 = 160,
+ e1000_igp_cable_length_170 = 170,
+ e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+ e1000_10bt_ext_dist_enable_normal = 0,
+ e1000_10bt_ext_dist_enable_lower,
+ e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+ e1000_downshift_normal = 0,
+ e1000_downshift_activated,
+ e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+ e1000_polarity_reversal_enabled = 0,
+ e1000_polarity_reversal_disabled,
+ e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+ e1000_auto_x_mode_manual_mdi = 0,
+ e1000_auto_x_mode_manual_mdix,
+ e1000_auto_x_mode_auto1,
+ e1000_auto_x_mode_auto2,
+ e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+ e1000_phy_m88 = 0,
+ e1000_phy_igp,
+ e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+ e1000_ffe_config_enabled = 0,
+ e1000_ffe_config_active,
+ e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+ e1000_dsp_config_disabled = 0,
+ e1000_dsp_config_enabled,
+ e1000_dsp_config_activated,
+ e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+ e1000_cable_length cable_length;
+ e1000_10bt_ext_dist_enable extended_10bt_distance;
+ e1000_rev_polarity cable_polarity;
+ e1000_downshift downshift;
+ e1000_polarity_reversal polarity_correction;
+ e1000_auto_x_mode mdix_mode;
+ e1000_1000t_rx_status local_rx;
+ e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+ uint32_t idle_errors;
+ uint32_t receive_errors;
+};
+
+struct e1000_eeprom_info {
+ e1000_eeprom_type type;
+ uint16_t word_size;
+ uint16_t opcode_bits;
+ uint16_t address_bits;
+ uint16_t delay_usec;
+ uint16_t page_size;
+};
+
+
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_EEPROM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+
+/* Function prototypes */
+/* Initialization */
+int32_t e1000_reset_hw(struct e1000_hw *hw);
+int32_t e1000_init_hw(struct e1000_hw *hw);
+int32_t e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+int32_t e1000_setup_link(struct e1000_hw *hw);
+int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw);
+int32_t e1000_check_for_link(struct e1000_hw *hw);
+int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t * speed, uint16_t * duplex);
+int32_t e1000_wait_autoneg(struct e1000_hw *hw);
+int32_t e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data);
+int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
+void e1000_phy_hw_reset(struct e1000_hw *hw);
+int32_t e1000_phy_reset(struct e1000_hw *hw);
+int32_t e1000_detect_gig_phy(struct e1000_hw *hw);
+int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length);
+int32_t e1000_check_polarity(struct e1000_hw *hw, uint16_t *polarity);
+int32_t e1000_check_downshift(struct e1000_hw *hw);
+int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
+
+/* EEPROM Functions */
+void e1000_init_eeprom_params(struct e1000_hw *hw);
+int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
+int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
+int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
+int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
+int32_t e1000_read_mac_addr(struct e1000_hw * hw);
+
+/* Filters (multicast, vlan, receive) */
+void e1000_init_rx_addrs(struct e1000_hw *hw);
+void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
+uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
+void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value);
+void e1000_clear_vfta(struct e1000_hw *hw);
+
+/* LED functions */
+int32_t e1000_setup_led(struct e1000_hw *hw);
+int32_t e1000_cleanup_led(struct e1000_hw *hw);
+int32_t e1000_led_on(struct e1000_hw *hw);
+int32_t e1000_led_off(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
+/* Port I/O is only supported on 82544 and newer */
+uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
+uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
+void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value);
+int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up);
+int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
+
+#define E1000_READ_REG_IO(a, reg) \
+ e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+ e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82547EI 0x1019
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE 14
+#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+ (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+ (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
+#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP 6
+#define IP_PROTOCOL_UDP 0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ uint64_t buffer_addr; /* Address of the descriptor's data buffer */
+ uint16_t length; /* Length of data DMAed into data buffer */
+ uint16_t csum; /* Packet checksum */
+ uint8_t status; /* Descriptor status */
+ uint8_t errors; /* Descriptor Errors */
+ uint16_t special;
+};
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
+#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 0x000C /* CFI is bit 12 */
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ uint64_t buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ uint32_t data;
+ struct {
+ uint16_t length; /* Data buffer length */
+ uint8_t cso; /* Checksum offset */
+ uint8_t cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ uint32_t data;
+ struct {
+ uint8_t status; /* Descriptor status */
+ uint8_t css; /* Checksum start */
+ uint16_t special;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ uint32_t ip_config;
+ struct {
+ uint8_t ipcss; /* IP checksum start */
+ uint8_t ipcso; /* IP checksum offset */
+ uint16_t ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ uint32_t tcp_config;
+ struct {
+ uint8_t tucss; /* TCP checksum start */
+ uint8_t tucso; /* TCP checksum offset */
+ uint16_t tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ uint32_t cmd_and_length; /* */
+ union {
+ uint32_t data;
+ struct {
+ uint8_t status; /* Descriptor status */
+ uint8_t hdr_len; /* Header length */
+ uint16_t mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ uint64_t buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ uint32_t data;
+ struct {
+ uint16_t length; /* Data buffer length */
+ uint8_t typ_len_ext; /* */
+ uint8_t cmd; /* */
+ } flags;
+ } lower;
+ union {
+ uint32_t data;
+ struct {
+ uint8_t status; /* Descriptor status */
+ uint8_t popts; /* Packet Options */
+ uint16_t special; /* */
+ } fields;
+ } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+
+/* Receive Address Register */
+struct e1000_rar {
+ volatile uint32_t low; /* receive address low */
+ volatile uint32_t high; /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+ volatile uint32_t ipv4_addr; /* IP Address (RW) */
+ volatile uint32_t reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP6AT_SIZE 1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+ volatile uint8_t ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+ volatile uint32_t length; /* Flexible Filter Length (RW) */
+ volatile uint32_t reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+ volatile uint32_t mask; /* Flexible Filter Mask (RW) */
+ volatile uint32_t reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+ volatile uint32_t value; /* Flexible Filter Value (RW) */
+ volatile uint32_t reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
+#define E1000_TCTL 0x00400 /* TX Control - RW */
+#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
+#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
+#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
+#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
+#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */
+#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
+#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
+#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
+#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
+#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
+#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
+#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
+
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTRL E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS E1000_STATUS
+#define E1000_82542_EECD E1000_EECD
+#define E1000_82542_EERD E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA E1000_FLA
+#define E1000_82542_MDIC E1000_MDIC
+#define E1000_82542_FCAL E1000_FCAL
+#define E1000_82542_FCAH E1000_FCAH
+#define E1000_82542_FCT E1000_FCT
+#define E1000_82542_VET E1000_VET
+#define E1000_82542_RA 0x00040
+#define E1000_82542_ICR E1000_ICR
+#define E1000_82542_ITR E1000_ITR
+#define E1000_82542_ICS E1000_ICS
+#define E1000_82542_IMS E1000_IMS
+#define E1000_82542_IMC E1000_IMC
+#define E1000_82542_RCTL E1000_RCTL
+#define E1000_82542_RDTR 0x00108
+#define E1000_82542_RDBAL 0x00110
+#define E1000_82542_RDBAH 0x00114
+#define E1000_82542_RDLEN 0x00118
+#define E1000_82542_RDH 0x00120
+#define E1000_82542_RDT 0x00128
+#define E1000_82542_FCRTH 0x00160
+#define E1000_82542_FCRTL 0x00168
+#define E1000_82542_FCTTV E1000_FCTTV
+#define E1000_82542_TXCW E1000_TXCW
+#define E1000_82542_RXCW E1000_RXCW
+#define E1000_82542_MTA 0x00200
+#define E1000_82542_TCTL E1000_TCTL
+#define E1000_82542_TIPG E1000_TIPG
+#define E1000_82542_TDBAL 0x00420
+#define E1000_82542_TDBAH 0x00424
+#define E1000_82542_TDLEN 0x00428
+#define E1000_82542_TDH 0x00430
+#define E1000_82542_TDT 0x00438
+#define E1000_82542_TIDV 0x00440
+#define E1000_82542_TBT E1000_TBT
+#define E1000_82542_AIT E1000_AIT
+#define E1000_82542_VFTA 0x00600
+#define E1000_82542_LEDCTL E1000_LEDCTL
+#define E1000_82542_PBA E1000_PBA
+#define E1000_82542_RXDCTL E1000_RXDCTL
+#define E1000_82542_RADV E1000_RADV
+#define E1000_82542_RSRPD E1000_RSRPD
+#define E1000_82542_TXDMAC E1000_TXDMAC
+#define E1000_82542_TDFHS E1000_TDFHS
+#define E1000_82542_TDFTS E1000_TDFTS
+#define E1000_82542_TDFPC E1000_TDFPC
+#define E1000_82542_TXDCTL E1000_TXDCTL
+#define E1000_82542_TADV E1000_TADV
+#define E1000_82542_TSPMT E1000_TSPMT
+#define E1000_82542_CRCERRS E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS E1000_SYMERRS
+#define E1000_82542_RXERRC E1000_RXERRC
+#define E1000_82542_MPC E1000_MPC
+#define E1000_82542_SCC E1000_SCC
+#define E1000_82542_ECOL E1000_ECOL
+#define E1000_82542_MCC E1000_MCC
+#define E1000_82542_LATECOL E1000_LATECOL
+#define E1000_82542_COLC E1000_COLC
+#define E1000_82542_DC E1000_DC
+#define E1000_82542_TNCRS E1000_TNCRS
+#define E1000_82542_SEC E1000_SEC
+#define E1000_82542_CEXTERR E1000_CEXTERR
+#define E1000_82542_RLEC E1000_RLEC
+#define E1000_82542_XONRXC E1000_XONRXC
+#define E1000_82542_XONTXC E1000_XONTXC
+#define E1000_82542_XOFFRXC E1000_XOFFRXC
+#define E1000_82542_XOFFTXC E1000_XOFFTXC
+#define E1000_82542_FCRUC E1000_FCRUC
+#define E1000_82542_PRC64 E1000_PRC64
+#define E1000_82542_PRC127 E1000_PRC127
+#define E1000_82542_PRC255 E1000_PRC255
+#define E1000_82542_PRC511 E1000_PRC511
+#define E1000_82542_PRC1023 E1000_PRC1023
+#define E1000_82542_PRC1522 E1000_PRC1522
+#define E1000_82542_GPRC E1000_GPRC
+#define E1000_82542_BPRC E1000_BPRC
+#define E1000_82542_MPRC E1000_MPRC
+#define E1000_82542_GPTC E1000_GPTC
+#define E1000_82542_GORCL E1000_GORCL
+#define E1000_82542_GORCH E1000_GORCH
+#define E1000_82542_GOTCL E1000_GOTCL
+#define E1000_82542_GOTCH E1000_GOTCH
+#define E1000_82542_RNBC E1000_RNBC
+#define E1000_82542_RUC E1000_RUC
+#define E1000_82542_RFC E1000_RFC
+#define E1000_82542_ROC E1000_ROC
+#define E1000_82542_RJC E1000_RJC
+#define E1000_82542_MGTPRC E1000_MGTPRC
+#define E1000_82542_MGTPDC E1000_MGTPDC
+#define E1000_82542_MGTPTC E1000_MGTPTC
+#define E1000_82542_TORL E1000_TORL
+#define E1000_82542_TORH E1000_TORH
+#define E1000_82542_TOTL E1000_TOTL
+#define E1000_82542_TOTH E1000_TOTH
+#define E1000_82542_TPR E1000_TPR
+#define E1000_82542_TPT E1000_TPT
+#define E1000_82542_PTC64 E1000_PTC64
+#define E1000_82542_PTC127 E1000_PTC127
+#define E1000_82542_PTC255 E1000_PTC255
+#define E1000_82542_PTC511 E1000_PTC511
+#define E1000_82542_PTC1023 E1000_PTC1023
+#define E1000_82542_PTC1522 E1000_PTC1522
+#define E1000_82542_MPTC E1000_MPTC
+#define E1000_82542_BPTC E1000_BPTC
+#define E1000_82542_TSCTC E1000_TSCTC
+#define E1000_82542_TSCTFC E1000_TSCTFC
+#define E1000_82542_RXCSUM E1000_RXCSUM
+#define E1000_82542_WUC E1000_WUC
+#define E1000_82542_WUFC E1000_WUFC
+#define E1000_82542_WUS E1000_WUS
+#define E1000_82542_MANC E1000_MANC
+#define E1000_82542_IPAV E1000_IPAV
+#define E1000_82542_IP4AT E1000_IP4AT
+#define E1000_82542_IP6AT E1000_IP6AT
+#define E1000_82542_WUPL E1000_WUPL
+#define E1000_82542_WUPM E1000_WUPM
+#define E1000_82542_FFLT E1000_FFLT
+#define E1000_82542_TDFH 0x08010
+#define E1000_82542_TDFT 0x08018
+#define E1000_82542_FFMT E1000_FFMT
+#define E1000_82542_FFVT E1000_FFVT
+#define E1000_82542_HOST_IF E1000_HOST_IF
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ uint64_t crcerrs;
+ uint64_t algnerrc;
+ uint64_t symerrs;
+ uint64_t rxerrc;
+ uint64_t mpc;
+ uint64_t scc;
+ uint64_t ecol;
+ uint64_t mcc;
+ uint64_t latecol;
+ uint64_t colc;
+ uint64_t dc;
+ uint64_t tncrs;
+ uint64_t sec;
+ uint64_t cexterr;
+ uint64_t rlec;
+ uint64_t xonrxc;
+ uint64_t xontxc;
+ uint64_t xoffrxc;
+ uint64_t xofftxc;
+ uint64_t fcruc;
+ uint64_t prc64;
+ uint64_t prc127;
+ uint64_t prc255;
+ uint64_t prc511;
+ uint64_t prc1023;
+ uint64_t prc1522;
+ uint64_t gprc;
+ uint64_t bprc;
+ uint64_t mprc;
+ uint64_t gptc;
+ uint64_t gorcl;
+ uint64_t gorch;
+ uint64_t gotcl;
+ uint64_t gotch;
+ uint64_t rnbc;
+ uint64_t ruc;
+ uint64_t rfc;
+ uint64_t roc;
+ uint64_t rjc;
+ uint64_t mgprc;
+ uint64_t mgpdc;
+ uint64_t mgptc;
+ uint64_t torl;
+ uint64_t torh;
+ uint64_t totl;
+ uint64_t toth;
+ uint64_t tpr;
+ uint64_t tpt;
+ uint64_t ptc64;
+ uint64_t ptc127;
+ uint64_t ptc255;
+ uint64_t ptc511;
+ uint64_t ptc1023;
+ uint64_t ptc1522;
+ uint64_t mptc;
+ uint64_t bptc;
+ uint64_t tsctc;
+ uint64_t tsctfc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+ uint8_t __iomem *hw_addr;
+ e1000_mac_type mac_type;
+ e1000_phy_type phy_type;
+ uint32_t phy_init_script;
+ e1000_media_type media_type;
+ void *back;
+ e1000_fc_type fc;
+ e1000_bus_speed bus_speed;
+ e1000_bus_width bus_width;
+ e1000_bus_type bus_type;
+ struct e1000_eeprom_info eeprom;
+ e1000_ms_type master_slave;
+ e1000_ms_type original_master_slave;
+ e1000_ffe_config ffe_config_state;
+ uint32_t asf_firmware_present;
+ unsigned long io_base;
+ uint32_t phy_id;
+ uint32_t phy_revision;
+ uint32_t phy_addr;
+ uint32_t original_fc;
+ uint32_t txcw;
+ uint32_t autoneg_failed;
+ uint32_t max_frame_size;
+ uint32_t min_frame_size;
+ uint32_t mc_filter_type;
+ uint32_t num_mc_addrs;
+ uint32_t collision_delta;
+ uint32_t tx_packet_delta;
+ uint32_t ledctl_default;
+ uint32_t ledctl_mode1;
+ uint32_t ledctl_mode2;
+ uint16_t phy_spd_default;
+ uint16_t autoneg_advertised;
+ uint16_t pci_cmd_word;
+ uint16_t fc_high_water;
+ uint16_t fc_low_water;
+ uint16_t fc_pause_time;
+ uint16_t current_ifs_val;
+ uint16_t ifs_min_val;
+ uint16_t ifs_max_val;
+ uint16_t ifs_step_size;
+ uint16_t ifs_ratio;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_id;
+ uint16_t subsystem_vendor_id;
+ uint8_t revision_id;
+ uint8_t autoneg;
+ uint8_t mdix;
+ uint8_t forced_speed_duplex;
+ uint8_t wait_autoneg_complete;
+ uint8_t dma_fairness;
+ uint8_t mac_addr[NODE_ADDRESS_SIZE];
+ uint8_t perm_mac_addr[NODE_ADDRESS_SIZE];
+ boolean_t disable_polarity_correction;
+ boolean_t speed_downgraded;
+ e1000_smart_speed smart_speed;
+ e1000_dsp_config dsp_config_state;
+ boolean_t get_link_status;
+ boolean_t serdes_link_down;
+ boolean_t tbi_compatibility_en;
+ boolean_t tbi_compatibility_on;
+ boolean_t phy_reset_disable;
+ boolean_t fc_send_xon;
+ boolean_t fc_strict_ieee;
+ boolean_t report_tx_early;
+ boolean_t adaptive_ifs;
+ boolean_t ifs_params_forced;
+ boolean_t in_ifs_mode;
+};
+
+
+#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
+#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
+#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
+#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK 0x00000030
+#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+ * (0-small, 1-large) */
+#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+
+/* EEPROM Read */
+#define E1000_EERD_START 0x00000001 /* Start Read */
+#define E1000_EERD_DONE 0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI 0x01
+#define EEPROM_STATUS_WEN_SPI 0x02
+#define EEPROM_STATUS_BP0_SPI 0x04
+#define EEPROM_STATUS_BP1_SPI 0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
+#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_INT_EN 0x20000000
+#define E1000_MDIC_ERROR 0x40000000
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT 8
+#define E1000_LEDCTL_LED1_IVRT 0x00004000
+#define E1000_LEDCTL_LED1_BLINK 0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT 16
+#define E1000_LEDCTL_LED2_IVRT 0x00400000
+#define E1000_LEDCTL_LED2_BLINK 0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT 24
+#define E1000_LEDCTL_LED3_IVRT 0x40000000
+#define E1000_LEDCTL_LED3_BLINK 0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000 0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_ACTIVITY 0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10 0x5
+#define E1000_LEDCTL_MODE_LINK_100 0x6
+#define E1000_LEDCTL_MODE_LINK_1000 0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE 0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9
+#define E1000_LEDCTL_MODE_COLLISION 0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED 0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE 0xC
+#define E1000_LEDCTL_MODE_PAUSED 0xD
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Receive Address */
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_SRPD 0x00010000
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD E1000_ICR_SRPD
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD E1000_ICR_SRPD
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD E1000_ICR_SRPD
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
+#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
+#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x000000FF /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x0000FF00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x00FF0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
+#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
+#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
+#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_CC 0x10000000 /* Receive config change */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST 0x00000001 /* software reset */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
+#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
+ * Filtering */
+#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
+ * filtering */
+#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
+ * memory */
+#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN 4096
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI 0x3 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI 0x2 /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI 0x8 /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI 0x6 /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI 0x4 /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI 0x5 /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI 0x1 /* EEPROM write Status register */
+
+/* EEPROM Size definitions */
+#define EEPROM_SIZE_16KB 0x1800
+#define EEPROM_SIZE_8KB 0x1400
+#define EEPROM_SIZE_4KB 0x1000
+#define EEPROM_SIZE_2KB 0x0C00
+#define EEPROM_SIZE_1KB 0x0800
+#define EEPROM_SIZE_512B 0x0400
+#define EEPROM_SIZE_128B 0x0000
+#define EEPROM_SIZE_MASK 0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT 0x0003
+#define EEPROM_ID_LED_SETTINGS 0x0004
+#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD 0x0007
+#define EEPROM_INIT_CONTROL1_REG 0x000A
+#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
+#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
+#define EEPROM_CFG 0x0012
+#define EEPROM_FLASH_VERSION 0x0032
+#define EEPROM_CHECKSUM_REG 0x003F
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A 0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS 0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST 0x0200
+#define EEPROM_WORD0A_FD 0x0400
+#define EEPROM_WORD0A_66MHZ 0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE 0x1000
+#define EEPROM_WORD0F_ASM_DIR 0x2000
+#define EEPROM_WORD0F_ANE 0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1 8
+
+#define EEPROM_RESERVED_WORD 0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 64
+#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT 12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT 10
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START 8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP 190
+#define TX_THRESHOLD_DISABLE 0
+#define TX_THRESHOLD_TIMER_MS 10000
+#define MIN_NUM_XMITS 1000
+#define IFS_MAX 80
+#define IFS_STEP 10
+#define IFS_MIN 40
+#define IFS_RATIO 4
+
+/* PBA constants */
+#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT 3
+
+
+#define RECEIVE_BUFFER_ALIGN_SIZE (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT 500
+
+#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * adapter = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the RX descriptor with EOP set
+ * error = the 8 bit error field of the RX descriptor with EOP set
+ * length = the sum of all the length fields of the RX descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * max_frame_length = the maximum frame length we want to accept.
+ * min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = TRUE;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = FALSE;
+ * }
+ * ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+ ((adapter)->tbi_compatibility_on && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+ ((length) <= ((adapter)->max_frame_size + 1))) : \
+ (((length) > (adapter)->min_frame_size) && \
+ ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Regiser */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE 0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA 0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A 0x1172
+#define IGP01E1000_PHY_AGC_B 0x1272
+#define IGP01E1000_PHY_AGC_C 0x1472
+#define IGP01E1000_PHY_AGC_D 0x1872
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET 0x1F33
+#define IGP01E1000_PHY_DSP_SET 0x1F71
+#define IGP01E1000_PHY_DSP_FFE 0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM 4
+#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
+
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
+ /* 0=DTE device */
+#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+ /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+ /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
+ /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
+ * 0=CLK125 toggling
+ */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
+ * 100BASE-TX/10BASE-T:
+ * MDI Mode
+ */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+ /* 1=Enable Extended 10BASE-T distance
+ * (Lower 10BASE-T RX Threshold)
+ * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
+ /* 1=5-Bit interface in 100BASE-TX
+ * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+ * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5
+#define M88E1000_PSSR_MDIX_SHIFT 6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010
+#define IGP01E1000_PSCFR_PRE_EN 0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200
+#define IGP01E1000_PSSR_LINK_UP 0x0400
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D 0x000F
+#define IGP01E1000_MSE_CHANNEL_C 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A 0xF000
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE 0x0
+#define DSP_RESET_DISABLE 0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+
+/* The precision of the length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE 10
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
+ * on Link-Up */
+#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
+
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4 0x04
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE 0xFFFFFFFF
+#define PHY_SOF 0x01
+#define PHY_OP_READ 0x02
+#define PHY_OP_WRITE 0x01
+#define PHY_TURNAROUND 0x02
+#define PHY_PREAMBLE_SIZE 32
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+#define E1000_PHY_ADDRESS 0x01
+#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK 0x01E0
+#define REG9_SPEED_MASK 0x0300
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds*/
+#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds*/
+
+#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
new file mode 100644
index 000000000000..82549a6fcfb3
--- /dev/null
+++ b/drivers/net/e1000/e1000_main.c
@@ -0,0 +1,3162 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* Change Log
+ * 5.3.12 6/7/04
+ * - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com>
+ * - if_mii support and associated kcompat for older kernels
+ * - More errlogging support from Jon Mason <jonmason@us.ibm.com>
+ * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
+ *
+ * 5.7.1 12/16/04
+ * - Resurrect 82547EI/GI related fix in e1000_intr to avoid deadlocks. This
+ * fix was removed as it caused system instability. The suspected cause of
+ * this is the called to e1000_irq_disable in e1000_intr. Inlined the
+ * required piece of e1000_irq_disable into e1000_intr - Anton Blanchard
+ * 5.7.0 12/10/04
+ * - include fix to the condition that determines when to quit NAPI - Robert Olsson
+ * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
+ * 5.6.5 11/01/04
+ * - Enabling NETIF_F_SG without checksum offload is illegal -
+ John Mason <jdmason@us.ibm.com>
+ * 5.6.3 10/26/04
+ * - Remove redundant initialization - Jamal Hadi
+ * - Reset buffer_info->dma in tx resource cleanup logic
+ * 5.6.2 10/12/04
+ * - Avoid filling tx_ring completely - shemminger@osdl.org
+ * - Replace schedule_timeout() with msleep()/msleep_interruptible() -
+ * nacc@us.ibm.com
+ * - Sparse cleanup - shemminger@osdl.org
+ * - Fix tx resource cleanup logic
+ * - LLTX support - ak@suse.de and hadi@cyberus.ca
+ */
+
+char e1000_driver_name[] = "e1000";
+char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#ifndef CONFIG_E1000_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+#define DRV_VERSION "5.7.6-k2"DRIVERNAPI
+char e1000_driver_version[] = DRV_VERSION;
+char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static struct pci_device_id e1000_pci_tbl[] = {
+ INTEL_E1000_ETHERNET_DEVICE(0x1000),
+ INTEL_E1000_ETHERNET_DEVICE(0x1001),
+ INTEL_E1000_ETHERNET_DEVICE(0x1004),
+ INTEL_E1000_ETHERNET_DEVICE(0x1008),
+ INTEL_E1000_ETHERNET_DEVICE(0x1009),
+ INTEL_E1000_ETHERNET_DEVICE(0x100C),
+ INTEL_E1000_ETHERNET_DEVICE(0x100D),
+ INTEL_E1000_ETHERNET_DEVICE(0x100E),
+ INTEL_E1000_ETHERNET_DEVICE(0x100F),
+ INTEL_E1000_ETHERNET_DEVICE(0x1010),
+ INTEL_E1000_ETHERNET_DEVICE(0x1011),
+ INTEL_E1000_ETHERNET_DEVICE(0x1012),
+ INTEL_E1000_ETHERNET_DEVICE(0x1013),
+ INTEL_E1000_ETHERNET_DEVICE(0x1014),
+ INTEL_E1000_ETHERNET_DEVICE(0x1015),
+ INTEL_E1000_ETHERNET_DEVICE(0x1016),
+ INTEL_E1000_ETHERNET_DEVICE(0x1017),
+ INTEL_E1000_ETHERNET_DEVICE(0x1018),
+ INTEL_E1000_ETHERNET_DEVICE(0x1019),
+ INTEL_E1000_ETHERNET_DEVICE(0x101D),
+ INTEL_E1000_ETHERNET_DEVICE(0x101E),
+ INTEL_E1000_ETHERNET_DEVICE(0x1026),
+ INTEL_E1000_ETHERNET_DEVICE(0x1027),
+ INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1075),
+ INTEL_E1000_ETHERNET_DEVICE(0x1076),
+ INTEL_E1000_ETHERNET_DEVICE(0x1077),
+ INTEL_E1000_ETHERNET_DEVICE(0x1078),
+ INTEL_E1000_ETHERNET_DEVICE(0x1079),
+ INTEL_E1000_ETHERNET_DEVICE(0x107A),
+ INTEL_E1000_ETHERNET_DEVICE(0x107B),
+ INTEL_E1000_ETHERNET_DEVICE(0x107C),
+ INTEL_E1000_ETHERNET_DEVICE(0x108A),
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
+int e1000_setup_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_rx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+
+/* Local Function Prototypes */
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
+static void e1000_set_multi(struct net_device *netdev);
+static void e1000_update_phy_info(unsigned long data);
+static void e1000_watchdog(unsigned long data);
+static void e1000_watchdog_task(struct e1000_adapter *adapter);
+static void e1000_82547_tx_fifo_stall(unsigned long data);
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
+static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
+#ifdef CONFIG_E1000_NAPI
+static int e1000_clean(struct net_device *netdev, int *budget);
+static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do);
+#else
+static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
+#endif
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_tx_timeout_task(struct net_device *dev);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb);
+
+static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+
+static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
+static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+struct notifier_block e1000_notifier_reboot = {
+ .notifier_call = e1000_notify_reboot,
+ .next = NULL,
+ .priority = 0
+};
+
+/* Exported from other modules */
+
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+static struct pci_driver e1000_driver = {
+ .name = e1000_driver_name,
+ .id_table = e1000_pci_tbl,
+ .probe = e1000_probe,
+ .remove = __devexit_p(e1000_remove),
+ /* Power Managment Hooks */
+#ifdef CONFIG_PM
+ .suspend = e1000_suspend,
+ .resume = e1000_resume
+#endif
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init
+e1000_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "%s - version %s\n",
+ e1000_driver_string, e1000_driver_version);
+
+ printk(KERN_INFO "%s\n", e1000_copyright);
+
+ ret = pci_module_init(&e1000_driver);
+ if(ret >= 0) {
+ register_reboot_notifier(&e1000_notifier_reboot);
+ }
+ return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit
+e1000_exit_module(void)
+{
+ unregister_reboot_notifier(&e1000_notifier_reboot);
+ pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static inline void
+e1000_irq_disable(struct e1000_adapter *adapter)
+{
+ atomic_inc(&adapter->irq_sem);
+ E1000_WRITE_REG(&adapter->hw, IMC, ~0);
+ E1000_WRITE_FLUSH(&adapter->hw);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static inline void
+e1000_irq_enable(struct e1000_adapter *adapter)
+{
+ if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
+ E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
+ E1000_WRITE_FLUSH(&adapter->hw);
+ }
+}
+
+int
+e1000_up(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ /* hardware has been reset, we need to reload some things */
+
+ /* Reset the PHY if it was previously powered down */
+ if(adapter->hw.media_type == e1000_media_type_copper) {
+ uint16_t mii_reg;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ if(mii_reg & MII_CR_POWER_DOWN)
+ e1000_phy_reset(&adapter->hw);
+ }
+
+ e1000_set_multi(netdev);
+
+ e1000_restore_vlan(adapter);
+
+ e1000_configure_tx(adapter);
+ e1000_setup_rctl(adapter);
+ e1000_configure_rx(adapter);
+ e1000_alloc_rx_buffers(adapter);
+
+ if((err = request_irq(adapter->pdev->irq, &e1000_intr,
+ SA_SHIRQ | SA_SAMPLE_RANDOM,
+ netdev->name, netdev)))
+ return err;
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ e1000_irq_enable(adapter);
+
+#ifdef CONFIG_E1000_NAPI
+ netif_poll_enable(netdev);
+#endif
+ return 0;
+}
+
+void
+e1000_down(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ e1000_irq_disable(adapter);
+ free_irq(adapter->pdev->irq, netdev);
+ del_timer_sync(&adapter->tx_fifo_stall_timer);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+#ifdef CONFIG_E1000_NAPI
+ netif_poll_disable(netdev);
+#endif
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ e1000_reset(adapter);
+ e1000_clean_tx_ring(adapter);
+ e1000_clean_rx_ring(adapter);
+
+ /* If WoL is not enabled
+ * Power down the PHY so no link is implied when interface is down */
+ if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) {
+ uint16_t mii_reg;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ }
+}
+
+void
+e1000_reset(struct e1000_adapter *adapter)
+{
+ uint32_t pba;
+
+ /* Repartition Pba for greater than 9k mtu
+ * To take effect CTRL.RST is required.
+ */
+
+ if(adapter->hw.mac_type < e1000_82547) {
+ if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
+ pba = E1000_PBA_40K;
+ else
+ pba = E1000_PBA_48K;
+ } else {
+ if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
+ pba = E1000_PBA_22K;
+ else
+ pba = E1000_PBA_30K;
+ adapter->tx_fifo_head = 0;
+ adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+ adapter->tx_fifo_size =
+ (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ }
+ E1000_WRITE_REG(&adapter->hw, PBA, pba);
+
+ /* flow control settings */
+ adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
+ E1000_FC_HIGH_DIFF;
+ adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
+ E1000_FC_LOW_DIFF;
+ adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
+ adapter->hw.fc_send_xon = 1;
+ adapter->hw.fc = adapter->hw.original_fc;
+
+ e1000_reset_hw(&adapter->hw);
+ if(adapter->hw.mac_type >= e1000_82544)
+ E1000_WRITE_REG(&adapter->hw, WUC, 0);
+ if(e1000_init_hw(&adapter->hw))
+ DPRINTK(PROBE, ERR, "Hardware Error\n");
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
+
+ e1000_reset_adaptive(&adapter->hw);
+ e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+
+static int __devinit
+e1000_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct e1000_adapter *adapter;
+ static int cards_found = 0;
+ unsigned long mmio_start;
+ int mmio_len;
+ int pci_using_dac;
+ int i;
+ int err;
+ uint16_t eeprom_data;
+ uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
+
+ if((err = pci_enable_device(pdev)))
+ return err;
+
+ if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ pci_using_dac = 1;
+ } else {
+ if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ E1000_ERR("No usable DMA configuration, aborting\n");
+ return err;
+ }
+ pci_using_dac = 0;
+ }
+
+ if((err = pci_request_regions(pdev, e1000_driver_name)))
+ return err;
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+ if(!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev->priv;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+ adapter->msg_enable = (1 << debug) - 1;
+
+ mmio_start = pci_resource_start(pdev, BAR_0);
+ mmio_len = pci_resource_len(pdev, BAR_0);
+
+ adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+ if(!adapter->hw.hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ for(i = BAR_1; i <= BAR_5; i++) {
+ if(pci_resource_len(pdev, i) == 0)
+ continue;
+ if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ adapter->hw.io_base = pci_resource_start(pdev, i);
+ break;
+ }
+ }
+
+ netdev->open = &e1000_open;
+ netdev->stop = &e1000_close;
+ netdev->hard_start_xmit = &e1000_xmit_frame;
+ netdev->get_stats = &e1000_get_stats;
+ netdev->set_multicast_list = &e1000_set_multi;
+ netdev->set_mac_address = &e1000_set_mac;
+ netdev->change_mtu = &e1000_change_mtu;
+ netdev->do_ioctl = &e1000_ioctl;
+ e1000_set_ethtool_ops(netdev);
+ netdev->tx_timeout = &e1000_tx_timeout;
+ netdev->watchdog_timeo = 5 * HZ;
+#ifdef CONFIG_E1000_NAPI
+ netdev->poll = &e1000_clean;
+ netdev->weight = 64;
+#endif
+ netdev->vlan_rx_register = e1000_vlan_rx_register;
+ netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = e1000_netpoll;
+#endif
+ strcpy(netdev->name, pci_name(pdev));
+
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len;
+ netdev->base_addr = adapter->hw.io_base;
+
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+
+ if((err = e1000_sw_init(adapter)))
+ goto err_sw_init;
+
+ if(adapter->hw.mac_type >= e1000_82543) {
+ netdev->features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+ }
+
+#ifdef NETIF_F_TSO
+ if((adapter->hw.mac_type >= e1000_82544) &&
+ (adapter->hw.mac_type != e1000_82547))
+ netdev->features |= NETIF_F_TSO;
+#endif
+ if(pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ /* hard_start_xmit is safe against parallel locking */
+ netdev->features |= NETIF_F_LLTX;
+
+ /* before reading the EEPROM, reset the controller to
+ * put the device in a known good starting state */
+
+ e1000_reset_hw(&adapter->hw);
+
+ /* make sure the EEPROM is good */
+
+ if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
+ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* copy the MAC address out of the EEPROM */
+
+ if (e1000_read_mac_addr(&adapter->hw))
+ DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+ memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+ if(!is_valid_ether_addr(netdev->dev_addr)) {
+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ e1000_read_part_num(&adapter->hw, &(adapter->part_num));
+
+ e1000_get_bus_info(&adapter->hw);
+
+ init_timer(&adapter->tx_fifo_stall_timer);
+ adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+ adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.data = (unsigned long) adapter;
+
+ INIT_WORK(&adapter->watchdog_task,
+ (void (*)(void *))e1000_watchdog_task, adapter);
+
+ init_timer(&adapter->phy_info_timer);
+ adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.data = (unsigned long) adapter;
+
+ INIT_WORK(&adapter->tx_timeout_task,
+ (void (*)(void *))e1000_tx_timeout_task, netdev);
+
+ /* we're going to reset, so assume we have no link for now */
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ e1000_check_options(adapter);
+
+ /* Initial Wake on LAN setting
+ * If APM wake is enabled in the EEPROM,
+ * enable the ACPI Magic Packet filter
+ */
+
+ switch(adapter->hw.mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ break;
+ case e1000_82544:
+ e1000_read_eeprom(&adapter->hw,
+ EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_82544_APM;
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
+ && (adapter->hw.media_type == e1000_media_type_copper)) {
+ e1000_read_eeprom(&adapter->hw,
+ EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ break;
+ }
+ /* Fall Through */
+ default:
+ e1000_read_eeprom(&adapter->hw,
+ EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ break;
+ }
+ if(eeprom_data & eeprom_apme_mask)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ if((err = register_netdev(netdev)))
+ goto err_register;
+
+ DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+err_eeprom:
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+ return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit
+e1000_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t manc;
+
+ flush_scheduled_work();
+
+ if(adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
+ manc = E1000_READ_REG(&adapter->hw, MANC);
+ if(manc & E1000_MANC_SMBUS_EN) {
+ manc |= E1000_MANC_ARP_EN;
+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
+ }
+ }
+
+ unregister_netdev(netdev);
+
+ e1000_phy_hw_reset(&adapter->hw);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int __devinit
+e1000_sw_init(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ hw->max_frame_size = netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+
+ if(e1000_set_mac_type(hw)) {
+ DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ /* initialize eeprom parameters */
+
+ e1000_init_eeprom_params(hw);
+
+ switch(hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+
+ hw->wait_autoneg_complete = FALSE;
+ hw->tbi_compatibility_en = TRUE;
+ hw->adaptive_ifs = TRUE;
+
+ /* Copper options */
+
+ if(hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = FALSE;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
+
+ atomic_set(&adapter->irq_sem, 1);
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->tx_lock);
+
+ return 0;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int
+e1000_open(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ int err;
+
+ /* allocate transmit descriptors */
+
+ if((err = e1000_setup_tx_resources(adapter)))
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+
+ if((err = e1000_setup_rx_resources(adapter)))
+ goto err_setup_rx;
+
+ if((err = e1000_up(adapter)))
+ goto err_up;
+
+ return E1000_SUCCESS;
+
+err_up:
+ e1000_free_rx_resources(adapter);
+err_setup_rx:
+ e1000_free_tx_resources(adapter);
+err_setup_tx:
+ e1000_reset(adapter);
+
+ return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int
+e1000_close(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+
+ e1000_down(adapter);
+
+ e1000_free_tx_resources(adapter);
+ e1000_free_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @begin: address of beginning of memory
+ * @end: address of end of memory
+ **/
+static inline boolean_t
+e1000_check_64k_bound(struct e1000_adapter *adapter,
+ void *start, unsigned long len)
+{
+ unsigned long begin = (unsigned long) start;
+ unsigned long end = begin + len;
+
+ /* first rev 82545 and 82546 need to not allow any memory
+ * write location to cross a 64k boundary due to errata 23 */
+ if (adapter->hw.mac_type == e1000_82545 ||
+ adapter->hw.mac_type == e1000_82546 ) {
+
+ /* check buffer doesn't cross 64kB */
+ return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
+ }
+
+ return TRUE;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_tx_resources(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *txdr = &adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct e1000_buffer) * txdr->count;
+ txdr->buffer_info = vmalloc(size);
+ if(!txdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to Allocate Memory for the Transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(txdr->buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ E1000_ROUNDUP(txdr->size, 4096);
+
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ if(!txdr->desc) {
+setup_tx_desc_die:
+ DPRINTK(PROBE, ERR,
+ "Unable to Allocate Memory for the Transmit descriptor ring\n");
+ vfree(txdr->buffer_info);
+ return -ENOMEM;
+ }
+
+ /* fix for errata 23, cant cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ void *olddesc = txdr->desc;
+ dma_addr_t olddma = txdr->dma;
+ DPRINTK(TX_ERR,ERR,"txdr align check failed: %u bytes at %p\n",
+ txdr->size, txdr->desc);
+ /* try again, without freeing the previous */
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ /* failed allocation, critial failure */
+ if(!txdr->desc) {
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ goto setup_tx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ /* give up */
+ pci_free_consistent(pdev, txdr->size,
+ txdr->desc, txdr->dma);
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to Allocate aligned Memory for the Transmit"
+ " descriptor ring\n");
+ vfree(txdr->buffer_info);
+ return -ENOMEM;
+ } else {
+ /* free old, move on with the new one since its okay */
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ }
+ }
+ memset(txdr->desc, 0, txdr->size);
+
+ txdr->next_to_use = 0;
+ txdr->next_to_clean = 0;
+
+ return 0;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void
+e1000_configure_tx(struct e1000_adapter *adapter)
+{
+ uint64_t tdba = adapter->tx_ring.dma;
+ uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
+ uint32_t tctl, tipg;
+
+ E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
+ E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
+
+ E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+
+ E1000_WRITE_REG(&adapter->hw, TDH, 0);
+ E1000_WRITE_REG(&adapter->hw, TDT, 0);
+
+ /* Set the default values for the Tx Inter Packet Gap timer */
+
+ switch (adapter->hw.mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ tipg = DEFAULT_82542_TIPG_IPGT;
+ tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+ break;
+ default:
+ if(adapter->hw.media_type == e1000_media_type_fiber ||
+ adapter->hw.media_type == e1000_media_type_internal_serdes)
+ tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+ else
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+ tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+ }
+ E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
+
+ /* Set the Tx Interrupt Delay register */
+
+ E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
+ if(adapter->hw.mac_type >= e1000_82540)
+ E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
+
+ /* Program the Transmit Control Register */
+
+ tctl = E1000_READ_REG(&adapter->hw, TCTL);
+
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+
+ e1000_config_collision_dist(&adapter->hw);
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS;
+
+ if(adapter->hw.mac_type < e1000_82543)
+ adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+ else
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+ /* Cache if we're 82544 running in PCI-X because we'll
+ * need this to apply a workaround later in the send path. */
+ if(adapter->hw.mac_type == e1000_82544 &&
+ adapter->hw.bus_type == e1000_bus_type_pcix)
+ adapter->pcix_82544 = 1;
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_rx_resources(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *rxdr = &adapter->rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct e1000_buffer) * rxdr->count;
+ rxdr->buffer_info = vmalloc(size);
+ if(!rxdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to Allocate Memory for the Recieve descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(rxdr->buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+
+ rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
+ E1000_ROUNDUP(rxdr->size, 4096);
+
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+ if(!rxdr->desc) {
+setup_rx_desc_die:
+ DPRINTK(PROBE, ERR,
+ "Unble to Allocate Memory for the Recieve descriptor ring\n");
+ vfree(rxdr->buffer_info);
+ return -ENOMEM;
+ }
+
+ /* fix for errata 23, cant cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ void *olddesc = rxdr->desc;
+ dma_addr_t olddma = rxdr->dma;
+ DPRINTK(RX_ERR,ERR,
+ "rxdr align check failed: %u bytes at %p\n",
+ rxdr->size, rxdr->desc);
+ /* try again, without freeing the previous */
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ /* failed allocation, critial failure */
+ if(!rxdr->desc) {
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ goto setup_rx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ /* give up */
+ pci_free_consistent(pdev, rxdr->size,
+ rxdr->desc, rxdr->dma);
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to Allocate aligned Memory for the"
+ " Receive descriptor ring\n");
+ vfree(rxdr->buffer_info);
+ return -ENOMEM;
+ } else {
+ /* free old, move on with the new one since its okay */
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ }
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+
+ rxdr->next_to_clean = 0;
+ rxdr->next_to_use = 0;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control register
+ * @adapter: Board private structure
+ **/
+
+static void
+e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ if(adapter->hw.tbi_compatibility_on == 1)
+ rctl |= E1000_RCTL_SBP;
+ else
+ rctl &= ~E1000_RCTL_SBP;
+
+ /* Setup buffer sizes */
+ rctl &= ~(E1000_RCTL_SZ_4096);
+ rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE);
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
+ break;
+ case E1000_RXBUFFER_4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case E1000_RXBUFFER_8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case E1000_RXBUFFER_16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
+
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void
+e1000_configure_rx(struct e1000_adapter *adapter)
+{
+ uint64_t rdba = adapter->rx_ring.dma;
+ uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
+ uint32_t rctl;
+ uint32_t rxcsum;
+
+ /* disable receives while setting up the descriptors */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
+
+ /* set the Receive Delay Timer Register */
+ E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
+
+ if(adapter->hw.mac_type >= e1000_82540) {
+ E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
+ if(adapter->itr > 1)
+ E1000_WRITE_REG(&adapter->hw, ITR,
+ 1000000000 / (adapter->itr * 256));
+ }
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
+ E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
+
+ E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers */
+ E1000_WRITE_REG(&adapter->hw, RDH, 0);
+ E1000_WRITE_REG(&adapter->hw, RDT, 0);
+
+ /* Enable 82543 Receive Checksum Offload for TCP and UDP */
+ if((adapter->hw.mac_type >= e1000_82543) &&
+ (adapter->rx_csum == TRUE)) {
+ rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
+ rxcsum |= E1000_RXCSUM_TUOFL;
+ E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
+ }
+
+ /* Enable Receives */
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void
+e1000_free_tx_resources(struct e1000_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_tx_ring(adapter);
+
+ vfree(adapter->tx_ring.buffer_info);
+ adapter->tx_ring.buffer_info = NULL;
+
+ pci_free_consistent(pdev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
+
+ adapter->tx_ring.desc = NULL;
+}
+
+static inline void
+e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+ struct e1000_buffer *buffer_info)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ if(buffer_info->dma) {
+ pci_unmap_page(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+ if(buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_tx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+ struct e1000_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+
+ if (likely(adapter->previous_buffer_info.skb != NULL)) {
+ e1000_unmap_and_free_tx_resource(adapter,
+ &adapter->previous_buffer_info);
+ }
+
+ for(i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ E1000_WRITE_REG(&adapter->hw, TDH, 0);
+ E1000_WRITE_REG(&adapter->hw, TDT, 0);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void
+e1000_free_rx_resources(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_rx_ring(adapter);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_rx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+
+ for(i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if(buffer_info->skb) {
+
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ E1000_WRITE_REG(&adapter->hw, RDH, 0);
+ E1000_WRITE_REG(&adapter->hw, RDT, 0);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void
+e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ uint32_t rctl;
+
+ e1000_pci_clear_mwi(&adapter->hw);
+
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl |= E1000_RCTL_RST;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ E1000_WRITE_FLUSH(&adapter->hw);
+ mdelay(5);
+
+ if(netif_running(netdev))
+ e1000_clean_rx_ring(adapter);
+}
+
+static void
+e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl &= ~E1000_RCTL_RST;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ E1000_WRITE_FLUSH(&adapter->hw);
+ mdelay(5);
+
+ if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(&adapter->hw);
+
+ if(netif_running(netdev)) {
+ e1000_configure_rx(adapter);
+ e1000_alloc_rx_buffers(adapter);
+ }
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+e1000_set_mac(struct net_device *netdev, void *p)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct sockaddr *addr = p;
+
+ if(!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if(adapter->hw.mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+
+ if(adapter->hw.mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void
+e1000_set_multi(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ struct dev_mc_list *mc_ptr;
+ uint32_t rctl;
+ uint32_t hash_value;
+ int i;
+ unsigned long flags;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ spin_lock_irqsave(&adapter->tx_lock, flags);
+
+ rctl = E1000_READ_REG(hw, RCTL);
+
+ if(netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ } else if(netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ rctl &= ~E1000_RCTL_UPE;
+ } else {
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+ }
+
+ E1000_WRITE_REG(hw, RCTL, rctl);
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if(hw->mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ /* load the first 14 multicast address into the exact filters 1-14
+ * RAR 0 is used for the station MAC adddress
+ * if there are not 14 addresses, go ahead and clear the filters
+ */
+ mc_ptr = netdev->mc_list;
+
+ for(i = 1; i < E1000_RAR_ENTRIES; i++) {
+ if(mc_ptr) {
+ e1000_rar_set(hw, mc_ptr->dmi_addr, i);
+ mc_ptr = mc_ptr->next;
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ }
+ }
+
+ /* clear the old settings from the multicast hash table */
+
+ for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+
+ /* load any remaining addresses into the hash table */
+
+ for(; mc_ptr; mc_ptr = mc_ptr->next) {
+ hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ e1000_mta_set(hw, hash_value);
+ }
+
+ if(hw->mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+
+static void
+e1000_update_phy_info(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+
+static void
+e1000_82547_tx_fifo_stall(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct net_device *netdev = adapter->netdev;
+ uint32_t tctl;
+
+ if(atomic_read(&adapter->tx_fifo_stall)) {
+ if((E1000_READ_REG(&adapter->hw, TDT) ==
+ E1000_READ_REG(&adapter->hw, TDH)) &&
+ (E1000_READ_REG(&adapter->hw, TDFT) ==
+ E1000_READ_REG(&adapter->hw, TDFH)) &&
+ (E1000_READ_REG(&adapter->hw, TDFTS) ==
+ E1000_READ_REG(&adapter->hw, TDFHS))) {
+ tctl = E1000_READ_REG(&adapter->hw, TCTL);
+ E1000_WRITE_REG(&adapter->hw, TCTL,
+ tctl & ~E1000_TCTL_EN);
+ E1000_WRITE_REG(&adapter->hw, TDFT,
+ adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, TDFH,
+ adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, TDFTS,
+ adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, TDFHS,
+ adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+ E1000_WRITE_FLUSH(&adapter->hw);
+
+ adapter->tx_fifo_head = 0;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ netif_wake_queue(netdev);
+ } else {
+ mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+ }
+ }
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void
+e1000_watchdog(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void
+e1000_watchdog_task(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_desc_ring *txdr = &adapter->tx_ring;
+ uint32_t link;
+
+ e1000_check_for_link(&adapter->hw);
+
+ if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+ !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
+ link = !adapter->hw.serdes_link_down;
+ else
+ link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
+
+ if(link) {
+ if(!netif_carrier_ok(netdev)) {
+ e1000_get_speed_and_duplex(&adapter->hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex");
+
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
+ adapter->smartspeed = 0;
+ }
+ } else {
+ if(netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ DPRINTK(LINK, INFO, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
+ }
+
+ e1000_smartspeed(adapter);
+ }
+
+ e1000_update_stats(adapter);
+
+ adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ adapter->tpt_old = adapter->stats.tpt;
+ adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
+ adapter->colc_old = adapter->stats.colc;
+
+ adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+ adapter->gorcl_old = adapter->stats.gorcl;
+ adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+ adapter->gotcl_old = adapter->stats.gotcl;
+
+ e1000_update_adaptive(&adapter->hw);
+
+ if(!netif_carrier_ok(netdev)) {
+ if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ schedule_work(&adapter->tx_timeout_task);
+ }
+ }
+
+ /* Dynamic mode for Interrupt Throttle Rate (ITR) */
+ if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
+ /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
+ * asymmetrical Tx or Rx gets ITR=8000; everyone
+ * else is between 2000-8000. */
+ uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
+ uint32_t dif = (adapter->gotcl > adapter->gorcl ?
+ adapter->gotcl - adapter->gorcl :
+ adapter->gorcl - adapter->gotcl) / 10000;
+ uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+ E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
+ }
+
+ /* Cause software interrupt to ensure rx ring is cleaned */
+ E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
+
+ /* Force detection of hung controller every watchdog period*/
+ adapter->detect_tx_hung = TRUE;
+
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
+#define E1000_TX_FLAGS_CSUM 0x00000001
+#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
+#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT 16
+
+static inline int
+e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
+{
+#ifdef NETIF_F_TSO
+ struct e1000_context_desc *context_desc;
+ unsigned int i;
+ uint32_t cmd_length = 0;
+ uint16_t ipcse, tucse, mss;
+ uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
+ int err;
+
+ if(skb_shinfo(skb)->tso_size) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+ mss = skb_shinfo(skb)->tso_size;
+ skb->nh.iph->tot_len = 0;
+ skb->nh.iph->check = 0;
+ skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
+ skb->nh.iph->daddr,
+ 0,
+ IPPROTO_TCP,
+ 0);
+ ipcss = skb->nh.raw - skb->data;
+ ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
+ ipcse = skb->h.raw - skb->data - 1;
+ tucss = skb->h.raw - skb->data;
+ tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
+ tucse = 0;
+
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
+ (skb->len - (hdr_len)));
+
+ i = adapter->tx_ring.next_to_use;
+ context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+ if(++i == adapter->tx_ring.count) i = 0;
+ adapter->tx_ring.next_to_use = i;
+
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+static inline boolean_t
+e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+{
+ struct e1000_context_desc *context_desc;
+ unsigned int i;
+ uint8_t css;
+
+ if(likely(skb->ip_summed == CHECKSUM_HW)) {
+ css = skb->h.raw - skb->data;
+
+ i = adapter->tx_ring.next_to_use;
+ context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
+
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
+
+ if(unlikely(++i == adapter->tx_ring.count)) i = 0;
+ adapter->tx_ring.next_to_use = i;
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+#define E1000_MAX_TXD_PWR 12
+#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
+
+static inline int
+e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
+ unsigned int first, unsigned int max_per_txd,
+ unsigned int nr_frags, unsigned int mss)
+{
+ struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+ struct e1000_buffer *buffer_info;
+ unsigned int len = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int f;
+ len -= skb->data_len;
+
+ i = tx_ring->next_to_use;
+
+ while(len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+#ifdef NETIF_F_TSO
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if(unlikely(mss && !nr_frags && size == len && size > 8))
+ size -= 4;
+#endif
+ /* Workaround for potential 82544 hang in PCI-X. Avoid
+ * terminating buffers within evenly-aligned dwords. */
+ if(unlikely(adapter->pcix_82544 &&
+ !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+
+ len -= size;
+ offset += size;
+ count++;
+ if(unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ for(f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = frag->page_offset;
+
+ while(len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+#ifdef NETIF_F_TSO
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ size -= 4;
+#endif
+ /* Workaround for potential 82544 hang in PCI-X.
+ * Avoid terminating buffers within evenly-aligned
+ * dwords. */
+ if(unlikely(adapter->pcix_82544 &&
+ !((unsigned long)(frag->page+offset+size-1) & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+
+ len -= size;
+ offset += size;
+ count++;
+ if(unlikely(++i == tx_ring->count)) i = 0;
+ }
+ }
+
+ i = (i == 0) ? tx_ring->count - 1 : i - 1;
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+}
+
+static inline void
+e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
+{
+ struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc = NULL;
+ struct e1000_buffer *buffer_info;
+ uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
+ }
+
+ if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+ txd_lower |= E1000_TXD_CMD_VLE;
+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+ }
+
+ i = tx_ring->next_to_use;
+
+ while(count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->lower.data =
+ cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->upper.data = cpu_to_le32(txd_upper);
+ if(unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ E1000_WRITE_REG(&adapter->hw, TDT, i);
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time. This gives the Tx FIFO an opportunity to
+ * flush all packets. When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR 0x10
+#define E1000_82547_PAD_LEN 0x3E0
+
+static inline int
+e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
+{
+ uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+ uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+ E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
+
+ if(adapter->link_duplex != HALF_DUPLEX)
+ goto no_fifo_stall_required;
+
+ if(atomic_read(&adapter->tx_fifo_stall))
+ return 1;
+
+ if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+ atomic_set(&adapter->tx_fifo_stall, 1);
+ return 1;
+ }
+
+no_fifo_stall_required:
+ adapter->tx_fifo_head += skb_fifo_len;
+ if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
+ adapter->tx_fifo_head -= adapter->tx_fifo_size;
+ return 0;
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int
+e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+ unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb->len;
+ unsigned long flags;
+ unsigned int nr_frags = 0;
+ unsigned int mss = 0;
+ int count = 0;
+ int tso;
+ unsigned int f;
+ len -= skb->data_len;
+
+ if(unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+#ifdef NETIF_F_TSO
+ mss = skb_shinfo(skb)->tso_size;
+ /* The controller does a simple calculation to
+ * make sure there is enough room in the FIFO before
+ * initiating the DMA for each buffer. The calc is:
+ * 4 = ceil(buffer len/mss). To make sure we don't
+ * overrun the FIFO, adjust the max buffer len if mss
+ * drops. */
+ if(mss) {
+ max_per_txd = min(mss << 2, max_per_txd);
+ max_txd_pwr = fls(max_per_txd) - 1;
+ }
+
+ if((mss) || (skb->ip_summed == CHECKSUM_HW))
+ count++;
+ count++; /* for sentinel desc */
+#else
+ if(skb->ip_summed == CHECKSUM_HW)
+ count++;
+#endif
+ count += TXD_USE_COUNT(len, max_txd_pwr);
+
+ if(adapter->pcix_82544)
+ count++;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for(f = 0; f < nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ max_txd_pwr);
+ if(adapter->pcix_82544)
+ count += nr_frags;
+
+ local_irq_save(flags);
+ if (!spin_trylock(&adapter->tx_lock)) {
+ /* Collision - tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+
+ /* need: count + 2 desc gap to keep tail from touching
+ * head, otherwise try next time */
+ if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if(unlikely(adapter->hw.mac_type == e1000_82547)) {
+ if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+ netif_stop_queue(netdev);
+ mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+ tx_flags |= E1000_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ first = adapter->tx_ring.next_to_use;
+
+ tso = e1000_tso(adapter, skb);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (likely(tso))
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ else if(likely(e1000_tx_csum(adapter, skb)))
+ tx_flags |= E1000_TX_FLAGS_CSUM;
+
+ e1000_tx_queue(adapter,
+ e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
+ tx_flags);
+
+ netdev->trans_start = jiffies;
+
+ /* Make sure there is space in the ring for the next send. */
+ if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
+ netif_stop_queue(netdev);
+
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void
+e1000_tx_timeout(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void
+e1000_tx_timeout_task(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+
+ e1000_down(adapter);
+ e1000_up(adapter);
+}
+
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+
+static struct net_device_stats *
+e1000_get_stats(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+
+ e1000_update_stats(adapter);
+ return &adapter->net_stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ int old_mtu = adapter->rx_buffer_len;
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+
+ if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+
+ } else if(adapter->hw.mac_type < e1000_82543) {
+ DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n");
+ return -EINVAL;
+
+ } else if(max_frame <= E1000_RXBUFFER_4096) {
+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+
+ } else if(max_frame <= E1000_RXBUFFER_8192) {
+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+
+ } else {
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+ }
+
+ if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ }
+
+ netdev->mtu = new_mtu;
+ adapter->hw.max_frame_size = max_frame;
+
+ return 0;
+}
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void
+e1000_update_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
+ uint16_t phy_tmp;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* these counters are modified from e1000_adjust_tbi_stats,
+ * called from the interrupt context, so they must only
+ * be written while holding adapter->stats_lock
+ */
+
+ adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
+ adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
+ adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
+ adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
+ adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
+ adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
+ adapter->stats.roc += E1000_READ_REG(hw, ROC);
+ adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
+ adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
+ adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
+ adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
+ adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
+ adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+
+ adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
+ adapter->stats.mpc += E1000_READ_REG(hw, MPC);
+ adapter->stats.scc += E1000_READ_REG(hw, SCC);
+ adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
+ adapter->stats.mcc += E1000_READ_REG(hw, MCC);
+ adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
+ adapter->stats.dc += E1000_READ_REG(hw, DC);
+ adapter->stats.sec += E1000_READ_REG(hw, SEC);
+ adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
+ adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
+ adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
+ adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
+ adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
+ adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
+ adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
+ adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
+ adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
+ adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
+ adapter->stats.ruc += E1000_READ_REG(hw, RUC);
+ adapter->stats.rfc += E1000_READ_REG(hw, RFC);
+ adapter->stats.rjc += E1000_READ_REG(hw, RJC);
+ adapter->stats.torl += E1000_READ_REG(hw, TORL);
+ adapter->stats.torh += E1000_READ_REG(hw, TORH);
+ adapter->stats.totl += E1000_READ_REG(hw, TOTL);
+ adapter->stats.toth += E1000_READ_REG(hw, TOTH);
+ adapter->stats.tpr += E1000_READ_REG(hw, TPR);
+ adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
+ adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
+ adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
+ adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
+ adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
+ adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+ adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
+ adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
+
+ /* used for adaptive IFS */
+
+ hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
+ adapter->stats.tpt += hw->tx_packet_delta;
+ hw->collision_delta = E1000_READ_REG(hw, COLC);
+ adapter->stats.colc += hw->collision_delta;
+
+ if(hw->mac_type >= e1000_82543) {
+ adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
+ adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
+ adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
+ adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
+ adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
+ adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
+ }
+
+ /* Fill out the OS statistics structure */
+
+ adapter->net_stats.rx_packets = adapter->stats.gprc;
+ adapter->net_stats.tx_packets = adapter->stats.gptc;
+ adapter->net_stats.rx_bytes = adapter->stats.gorcl;
+ adapter->net_stats.tx_bytes = adapter->stats.gotcl;
+ adapter->net_stats.multicast = adapter->stats.mprc;
+ adapter->net_stats.collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.rlec + adapter->stats.rnbc +
+ adapter->stats.mpc + adapter->stats.cexterr;
+ adapter->net_stats.rx_dropped = adapter->stats.rnbc;
+ adapter->net_stats.rx_length_errors = adapter->stats.rlec;
+ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+ adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
+ adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
+ adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+
+ adapter->net_stats.tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
+ adapter->net_stats.tx_window_errors = adapter->stats.latecol;
+ adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Phy Stats */
+
+ if(hw->media_type == e1000_media_type_copper) {
+ if((adapter->link_speed == SPEED_1000) &&
+ (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+ adapter->phy_stats.idle_errors += phy_tmp;
+ }
+
+ if((hw->mac_type <= e1000_82546) &&
+ (hw->phy_type == e1000_phy_m88) &&
+ !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
+ adapter->phy_stats.receive_errors += phy_tmp;
+ }
+
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ **/
+
+static irqreturn_t
+e1000_intr(int irq, void *data, struct pt_regs *regs)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ uint32_t icr = E1000_READ_REG(hw, ICR);
+#ifndef CONFIG_E1000_NAPI
+ unsigned int i;
+#endif
+
+ if(unlikely(!icr))
+ return IRQ_NONE; /* Not our interrupt */
+
+ if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+ hw->get_link_status = 1;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ }
+
+#ifdef CONFIG_E1000_NAPI
+ if(likely(netif_rx_schedule_prep(netdev))) {
+
+ /* Disable interrupts and register for poll. The flush
+ of the posted write is intentionally left out.
+ */
+
+ atomic_inc(&adapter->irq_sem);
+ E1000_WRITE_REG(hw, IMC, ~0);
+ __netif_rx_schedule(netdev);
+ }
+#else
+ /* Writing IMC and IMS is needed for 82547.
+ Due to Hub Link bus being occupied, an interrupt
+ de-assertion message is not able to be sent.
+ When an interrupt assertion message is generated later,
+ two messages are re-ordered and sent out.
+ That causes APIC to think 82547 is in de-assertion
+ state, while 82547 is in assertion state, resulting
+ in dead lock. Writing IMC forces 82547 into
+ de-assertion state.
+ */
+ if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
+ atomic_inc(&adapter->irq_sem);
+ E1000_WRITE_REG(&adapter->hw, IMC, ~0);
+ }
+
+ for(i = 0; i < E1000_MAX_INTR; i++)
+ if(unlikely(!e1000_clean_rx_irq(adapter) &
+ !e1000_clean_tx_irq(adapter)))
+ break;
+
+ if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
+ e1000_irq_enable(adapter);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_E1000_NAPI
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+
+static int
+e1000_clean(struct net_device *netdev, int *budget)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ int work_to_do = min(*budget, netdev->quota);
+ int tx_cleaned;
+ int work_done = 0;
+
+ tx_cleaned = e1000_clean_tx_irq(adapter);
+ e1000_clean_rx_irq(adapter, &work_done, work_to_do);
+
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ /* if no Tx and not enough Rx work done, exit the polling mode */
+ if((!tx_cleaned && (work_done < work_to_do)) ||
+ !netif_running(netdev)) {
+ netif_rx_complete(netdev);
+ e1000_irq_enable(adapter);
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+e1000_clean_tx_irq(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ boolean_t cleaned = FALSE;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+ while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+ /* pre-mature writeback of Tx descriptors */
+ /* clear (free buffers and unmap pci_mapping) */
+ /* previous_buffer_info */
+ if (likely(adapter->previous_buffer_info.skb != NULL)) {
+ e1000_unmap_and_free_tx_resource(adapter,
+ &adapter->previous_buffer_info);
+ }
+
+ for(cleaned = FALSE; !cleaned; ) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+
+ /* pre-mature writeback of Tx descriptors */
+ /* save the cleaning of the this for the */
+ /* next iteration */
+ if (cleaned) {
+ memcpy(&adapter->previous_buffer_info,
+ buffer_info,
+ sizeof(struct e1000_buffer));
+ memset(buffer_info,
+ 0,
+ sizeof(struct e1000_buffer));
+ } else {
+ e1000_unmap_and_free_tx_resource(adapter,
+ buffer_info);
+ }
+
+ tx_desc->buffer_addr = 0;
+ tx_desc->lower.data = 0;
+ tx_desc->upper.data = 0;
+
+ cleaned = (i == eop);
+ if(unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+ spin_lock(&adapter->tx_lock);
+
+ if(unlikely(cleaned && netif_queue_stopped(netdev) &&
+ netif_carrier_ok(netdev)))
+ netif_wake_queue(netdev);
+
+ spin_unlock(&adapter->tx_lock);
+
+ if(adapter->detect_tx_hung) {
+ /* detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ adapter->detect_tx_hung = FALSE;
+ if(tx_ring->buffer_info[i].dma &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) &&
+ !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
+ netif_stop_queue(netdev);
+ }
+
+ return cleaned;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter: board private structure
+ * @rx_desc: receive descriptor
+ * @sk_buff: socket buffer with received data
+ **/
+
+static inline void
+e1000_rx_checksum(struct e1000_adapter *adapter,
+ struct e1000_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ /* 82543 or newer only */
+ if(unlikely((adapter->hw.mac_type < e1000_82543) ||
+ /* Ignore Checksum bit is set */
+ (rx_desc->status & E1000_RXD_STAT_IXSM) ||
+ /* TCP Checksum has not been calculated */
+ (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ /* At this point we know the hardware did the TCP checksum */
+ /* now look at the TCP checksum error bit */
+ if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
+ /* let the stack verify checksum errors */
+ skb->ip_summed = CHECKSUM_NONE;
+ adapter->hw_csum_err++;
+ } else {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_good++;
+ }
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+#ifdef CONFIG_E1000_NAPI
+e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
+ int work_to_do)
+#else
+e1000_clean_rx_irq(struct e1000_adapter *adapter)
+#endif
+{
+ struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned long flags;
+ uint32_t length;
+ uint8_t last_byte;
+ unsigned int i;
+ boolean_t cleaned = FALSE;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+
+ while(rx_desc->status & E1000_RXD_STAT_DD) {
+ buffer_info = &rx_ring->buffer_info[i];
+#ifdef CONFIG_E1000_NAPI
+ if(*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+#endif
+ cleaned = TRUE;
+
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer_info->skb;
+ length = le16_to_cpu(rx_desc->length);
+
+ if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
+ /* All receives must fit into a single buffer */
+ E1000_DBG("%s: Receive packet consumed multiple"
+ " buffers\n", netdev->name);
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+ last_byte = *(skb->data + length - 1);
+ if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
+ rx_desc->errors, length, last_byte)) {
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ e1000_tbi_adjust_stats(&adapter->hw,
+ &adapter->stats,
+ length, skb->data);
+ spin_unlock_irqrestore(&adapter->stats_lock,
+ flags);
+ length--;
+ } else {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+ }
+
+ /* Good Receive */
+ skb_put(skb, length - ETHERNET_FCS_SIZE);
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, rx_desc, skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+#ifdef CONFIG_E1000_NAPI
+ if(unlikely(adapter->vlgrp &&
+ (rx_desc->status & E1000_RXD_STAT_VP))) {
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+ le16_to_cpu(rx_desc->special) &
+ E1000_RXD_SPC_VLAN_MASK);
+ } else {
+ netif_receive_skb(skb);
+ }
+#else /* CONFIG_E1000_NAPI */
+ if(unlikely(adapter->vlgrp &&
+ (rx_desc->status & E1000_RXD_STAT_VP))) {
+ vlan_hwaccel_rx(skb, adapter->vlgrp,
+ le16_to_cpu(rx_desc->special) &
+ E1000_RXD_SPC_VLAN_MASK);
+ } else {
+ netif_rx(skb);
+ }
+#endif /* CONFIG_E1000_NAPI */
+ netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->status = 0;
+ buffer_info->skb = NULL;
+ if(unlikely(++i == rx_ring->count)) i = 0;
+
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ }
+
+ rx_ring->next_to_clean = i;
+
+ e1000_alloc_rx_buffers(adapter);
+
+ return cleaned;
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ **/
+
+static void
+e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
+{
+ struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i, bufsz;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while(!buffer_info->skb) {
+ bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+ skb = dev_alloc_skb(bufsz);
+ if(unlikely(!skb)) {
+ /* Better luck next round */
+ break;
+ }
+
+ /* fix for errata 23, cant cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ struct sk_buff *oldskb = skb;
+ DPRINTK(RX_ERR,ERR,
+ "skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
+ /* try again, without freeing the previous */
+ skb = dev_alloc_skb(bufsz);
+ if (!skb) {
+ dev_kfree_skb(oldskb);
+ break;
+ }
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ /* give up */
+ dev_kfree_skb(skb);
+ dev_kfree_skb(oldskb);
+ break; /* while !buffer_info->skb */
+ } else {
+ /* move on with the new one */
+ dev_kfree_skb(oldskb);
+ }
+ }
+
+ /* Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ skb->dev = netdev;
+
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+ buffer_info->dma = pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+
+ /* fix for errata 23, cant cross 64kB boundary */
+ if(!e1000_check_64k_bound(adapter,
+ (void *)(unsigned long)buffer_info->dma,
+ adapter->rx_buffer_len)) {
+ DPRINTK(RX_ERR,ERR,
+ "dma align check failed: %u bytes at %ld\n",
+ adapter->rx_buffer_len, (unsigned long)buffer_info->dma);
+
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+
+ break; /* while !buffer_info->skb */
+ }
+
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ E1000_WRITE_REG(&adapter->hw, RDT, i);
+ }
+
+ if(unlikely(++i == rx_ring->count)) i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void
+e1000_smartspeed(struct e1000_adapter *adapter)
+{
+ uint16_t phy_status;
+ uint16_t phy_ctrl;
+
+ if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
+ !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
+ return;
+
+ if(adapter->smartspeed == 0) {
+ /* If Master/Slave config fault is asserted twice,
+ * we assume back-to-back */
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+ if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+ if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+ if(phy_ctrl & CR_1000T_MS_ENABLE) {
+ phy_ctrl &= ~CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
+ phy_ctrl);
+ adapter->smartspeed++;
+ if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+ !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
+ &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
+ phy_ctrl);
+ }
+ }
+ return;
+ } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+ /* If still no link, perhaps using 2/3 pair cable */
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+ phy_ctrl |= CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
+ if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+ !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
+ }
+ }
+ /* Restart process after E1000_SMARTSPEED_MAX iterations */
+ if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+ adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int
+e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return e1000_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int
+e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ struct mii_ioctl_data *data = if_mii(ifr);
+ int retval;
+ uint16_t mii_reg;
+ uint16_t spddplx;
+
+ if(adapter->hw.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->hw.phy_addr;
+ break;
+ case SIOCGMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out))
+ return -EIO;
+ break;
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data->reg_num & ~(0x1F))
+ return -EFAULT;
+ mii_reg = data->val_in;
+ if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
+ mii_reg))
+ return -EIO;
+ if (adapter->hw.phy_type == e1000_phy_m88) {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if(mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if(mii_reg & MII_CR_AUTO_NEG_EN) {
+ adapter->hw.autoneg = 1;
+ adapter->hw.autoneg_advertised = 0x2F;
+ } else {
+ if (mii_reg & 0x40)
+ spddplx = SPEED_1000;
+ else if (mii_reg & 0x2000)
+ spddplx = SPEED_100;
+ else
+ spddplx = SPEED_10;
+ spddplx += (mii_reg & 0x100)
+ ? FULL_DUPLEX :
+ HALF_DUPLEX;
+ retval = e1000_set_spd_dplx(adapter,
+ spddplx);
+ if(retval)
+ return retval;
+ }
+ if(netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ break;
+ case M88E1000_PHY_SPEC_CTRL:
+ case M88E1000_EXT_PHY_SPEC_CTRL:
+ if (e1000_phy_reset(&adapter->hw))
+ return -EIO;
+ break;
+ }
+ } else {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if(mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if(netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ break;
+ }
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return E1000_SUCCESS;
+}
+
+void
+e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+
+ int ret;
+ ret = pci_set_mwi(adapter->pdev);
+}
+
+void
+e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+
+ pci_clear_mwi(adapter->pdev);
+}
+
+void
+e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+{
+ struct e1000_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void
+e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+{
+ struct e1000_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+uint32_t
+e1000_io_read(struct e1000_hw *hw, unsigned long port)
+{
+ return inl(port);
+}
+
+void
+e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
+{
+ outl(value, port);
+}
+
+static void
+e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t ctrl, rctl;
+
+ e1000_irq_disable(adapter);
+ adapter->vlgrp = grp;
+
+ if(grp) {
+ /* enable VLAN tag insert/strip */
+ ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl |= E1000_CTRL_VME;
+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+
+ /* enable VLAN receive filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+
+ /* disable VLAN filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ }
+
+ e1000_irq_enable(adapter);
+}
+
+static void
+e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t vfta, index;
+
+ /* add VID to filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ e1000_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void
+e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t vfta, index;
+
+ e1000_irq_disable(adapter);
+
+ if(adapter->vlgrp)
+ adapter->vlgrp->vlan_devices[vid] = NULL;
+
+ e1000_irq_enable(adapter);
+
+ /* remove VID from filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ e1000_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void
+e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+ e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if(adapter->vlgrp) {
+ uint16_t vid;
+ for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if(!adapter->vlgrp->vlan_devices[vid])
+ continue;
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+int
+e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
+{
+ adapter->hw.autoneg = 0;
+
+ switch(spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ adapter->hw.forced_speed_duplex = e1000_10_half;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ adapter->hw.forced_speed_duplex = e1000_10_full;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ adapter->hw.forced_speed_duplex = e1000_100_half;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ adapter->hw.forced_speed_duplex = e1000_100_full;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ adapter->hw.autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ DPRINTK(PROBE, ERR,
+ "Unsupported Speed/Duplexity configuration\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
+{
+ struct pci_dev *pdev = NULL;
+
+ switch(event) {
+ case SYS_DOWN:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ if(pci_dev_driver(pdev) == &e1000_driver)
+ e1000_suspend(pdev, 3);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static int
+e1000_suspend(struct pci_dev *pdev, uint32_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t ctrl, ctrl_ext, rctl, manc, status;
+ uint32_t wufc = adapter->wol;
+
+ netif_device_detach(netdev);
+
+ if(netif_running(netdev))
+ e1000_down(adapter);
+
+ status = E1000_READ_REG(&adapter->hw, STATUS);
+ if(status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if(wufc) {
+ e1000_setup_rctl(adapter);
+ e1000_set_multi(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if(adapter->wol & E1000_WUFC_MC) {
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ }
+
+ if(adapter->hw.mac_type >= e1000_82540) {
+ ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ /* advertise wake from D3Cold */
+ #define E1000_CTRL_ADVD3WUC 0x00100000
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC |
+ E1000_CTRL_EN_PHY_PWR_MGMT;
+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ }
+
+ if(adapter->hw.media_type == e1000_media_type_fiber ||
+ adapter->hw.media_type == e1000_media_type_internal_serdes) {
+ /* keep the laser running in D3 */
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
+ }
+
+ E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
+ E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
+ pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+ } else {
+ E1000_WRITE_REG(&adapter->hw, WUC, 0);
+ E1000_WRITE_REG(&adapter->hw, WUFC, 0);
+ pci_enable_wake(pdev, 3, 0);
+ pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+ }
+
+ pci_save_state(pdev);
+
+ if(adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
+ manc = E1000_READ_REG(&adapter->hw, MANC);
+ if(manc & E1000_MANC_SMBUS_EN) {
+ manc |= E1000_MANC_ARP_EN;
+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
+ pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+ }
+ }
+
+ pci_disable_device(pdev);
+
+ state = (state > 0) ? 3 : 0;
+ pci_set_power_state(pdev, state);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+e1000_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t manc, ret;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, 3, 0);
+ pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+
+ e1000_reset(adapter);
+ E1000_WRITE_REG(&adapter->hw, WUS, ~0);
+
+ if(netif_running(netdev))
+ e1000_up(adapter);
+
+ netif_device_attach(netdev);
+
+ if(adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
+ manc = E1000_READ_REG(&adapter->hw, MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void
+e1000_netpoll (struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev, NULL);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/* e1000_main.c */
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
new file mode 100644
index 000000000000..970c656a517c
--- /dev/null
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifndef msec_delay
+#define msec_delay(x) msleep(x)
+
+/* Some workarounds require millisecond delays and are run during interrupt
+ * context. Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+#define msec_delay_irq(x) mdelay(x)
+#endif
+
+#define PCI_COMMAND_REGISTER PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+
+typedef enum {
+#undef FALSE
+ FALSE = 0,
+#undef TRUE
+ TRUE = 1
+} boolean_t;
+
+#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
+
+#ifdef DBG
+#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F)
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+
+#define E1000_WRITE_REG(a, reg, value) ( \
+ writel((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_READ_REG(a, reg) ( \
+ readl((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2)))
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
new file mode 100644
index 000000000000..e914d09fe6f9
--- /dev/null
+++ b/drivers/net/e1000/e1000_param.c
@@ -0,0 +1,744 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+ static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+ static int num_##X = 0; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ * - 0 - auto-negotiate at all supported speeds
+ * - 10 - only link at 10 Mbps
+ * - 100 - only link at 100 Mbps
+ * - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ * - 0 - auto-negotiate for duplex
+ * - 1 - only link at half duplex
+ * - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit 7 6 5 4 3 2 1 0
+ * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
+ * Duplex Full Full Half Full Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ * - 0 - No Flow Control
+ * - 1 - Rx only, respond to PAUSE frames but do not generate them
+ * - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ * - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+
+E1000_PARAM(FlowControl, "Flow Control setting");
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables all checksum offload
+ * - 1 - enables receive IP/TCP/UDP checksum offload
+ * on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 64
+ */
+
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 128
+ */
+
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic)
+ *
+ * Default Value: 1
+ */
+
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+#define DEFAULT_RDTR 0
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+#define DEFAULT_RADV 128
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+#define DEFAULT_TIDV 64
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+#define DEFAULT_TADV 64
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+#define DEFAULT_ITR 8000
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+struct e1000_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct e1000_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit
+e1000_validate_option(int *value, struct e1000_option *opt,
+ struct e1000_adapter *adapter)
+{
+ if(*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ DPRINTK(PROBE, INFO,
+ "%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ struct e1000_opt_list *ent;
+
+ for(i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if(*value == ent->i) {
+ if(ent->str[0] != '\0')
+ DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ DPRINTK(PROBE, INFO, "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit
+e1000_check_options(struct e1000_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if(bd >= E1000_MAX_NIC) {
+ DPRINTK(PROBE, NOTICE,
+ "Warning: no configuration for board #%i\n", bd);
+ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ }
+
+ { /* Transmit Descriptor Count */
+ struct e1000_option opt = {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_TXD),
+ .def = E1000_DEFAULT_TXD,
+ .arg = { .r = { .min = E1000_MIN_TXD }}
+ };
+ struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+ opt.arg.r.max = mac_type < e1000_82544 ?
+ E1000_MAX_TXD : E1000_MAX_82544_TXD;
+
+ if (num_TxDescriptors > bd) {
+ tx_ring->count = TxDescriptors[bd];
+ e1000_validate_option(&tx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(tx_ring->count,
+ REQ_TX_DESCRIPTOR_MULTIPLE);
+ } else {
+ tx_ring->count = opt.def;
+ }
+ }
+ { /* Receive Descriptor Count */
+ struct e1000_option opt = {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_RXD),
+ .def = E1000_DEFAULT_RXD,
+ .arg = { .r = { .min = E1000_MIN_RXD }}
+ };
+ struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+ opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD;
+
+ if (num_RxDescriptors > bd) {
+ rx_ring->count = RxDescriptors[bd];
+ e1000_validate_option(&rx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(rx_ring->count,
+ REQ_RX_DESCRIPTOR_MULTIPLE);
+ } else {
+ rx_ring->count = opt.def;
+ }
+ }
+ { /* Checksum Offload Enable/Disable */
+ struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_XsumRX > bd) {
+ int rx_csum = XsumRX[bd];
+ e1000_validate_option(&rx_csum, &opt, adapter);
+ adapter->rx_csum = rx_csum;
+ } else {
+ adapter->rx_csum = opt.def;
+ }
+ }
+ { /* Flow Control */
+
+ struct e1000_opt_list fc_list[] =
+ {{ e1000_fc_none, "Flow Control Disabled" },
+ { e1000_fc_rx_pause,"Flow Control Receive Only" },
+ { e1000_fc_tx_pause,"Flow Control Transmit Only" },
+ { e1000_fc_full, "Flow Control Enabled" },
+ { e1000_fc_default, "Flow Control Hardware Default" }};
+
+ struct e1000_option opt = {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = e1000_fc_default,
+ .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
+ .p = fc_list }}
+ };
+
+ if (num_FlowControl > bd) {
+ int fc = FlowControl[bd];
+ e1000_validate_option(&fc, &opt, adapter);
+ adapter->hw.fc = adapter->hw.original_fc = fc;
+ } else {
+ adapter->hw.fc = opt.def;
+ }
+ }
+ { /* Transmit Interrupt Delay */
+ struct e1000_option opt = {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TXDELAY,
+ .max = MAX_TXDELAY }}
+ };
+
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+ { /* Transmit Absolute Interrupt Delay */
+ struct e1000_option opt = {
+ .type = range_option,
+ .name = "Transmit Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TADV),
+ .def = DEFAULT_TADV,
+ .arg = { .r = { .min = MIN_TXABSDELAY,
+ .max = MAX_TXABSDELAY }}
+ };
+
+ if (num_TxAbsIntDelay > bd) {
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Receive Interrupt Delay */
+ struct e1000_option opt = {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RXDELAY,
+ .max = MAX_RXDELAY }}
+ };
+
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ { /* Receive Absolute Interrupt Delay */
+ struct e1000_option opt = {
+ .type = range_option,
+ .name = "Receive Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RADV),
+ .def = DEFAULT_RADV,
+ .arg = { .r = { .min = MIN_RXABSDELAY,
+ .max = MAX_RXABSDELAY }}
+ };
+
+ if (num_RxAbsIntDelay > bd) {
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Interrupt Throttling Rate */
+ struct e1000_option opt = {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR }}
+ };
+
+ if (num_InterruptThrottleRate > bd) {
+ adapter->itr = InterruptThrottleRate[bd];
+ switch(adapter->itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ break;
+ case -1:
+ default:
+ e1000_validate_option(&adapter->itr, &opt,
+ adapter);
+ break;
+ }
+ } else {
+ adapter->itr = opt.def;
+ }
+ }
+
+ switch(adapter->hw.media_type) {
+ case e1000_media_type_fiber:
+ case e1000_media_type_internal_serdes:
+ e1000_check_fiber_options(adapter);
+ break;
+ case e1000_media_type_copper:
+ e1000_check_copper_options(adapter);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void __devinit
+e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if(num_Speed > bd) {
+ DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+
+ if(num_Duplex > bd) {
+ DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+
+ if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+ DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+ "not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void __devinit
+e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+ int speed, dplx;
+ int bd = adapter->bd_number;
+
+ { /* Speed */
+ struct e1000_opt_list speed_list[] = {{ 0, "" },
+ { SPEED_10, "" },
+ { SPEED_100, "" },
+ { SPEED_1000, "" }};
+
+ struct e1000_option opt = {
+ .type = list_option,
+ .name = "Speed",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(speed_list),
+ .p = speed_list }}
+ };
+
+ if (num_Speed > bd) {
+ speed = Speed[bd];
+ e1000_validate_option(&speed, &opt, adapter);
+ } else {
+ speed = opt.def;
+ }
+ }
+ { /* Duplex */
+ struct e1000_opt_list dplx_list[] = {{ 0, "" },
+ { HALF_DUPLEX, "" },
+ { FULL_DUPLEX, "" }};
+
+ struct e1000_option opt = {
+ .type = list_option,
+ .name = "Duplex",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
+ .p = dplx_list }}
+ };
+
+ if (num_Duplex > bd) {
+ dplx = Duplex[bd];
+ e1000_validate_option(&dplx, &opt, adapter);
+ } else {
+ dplx = opt.def;
+ }
+ }
+
+ if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+ DPRINTK(PROBE, INFO,
+ "AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
+ adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ } else { /* Autoneg */
+ struct e1000_opt_list an_list[] =
+ #define AA "AutoNeg advertising "
+ {{ 0x01, AA "10/HD" },
+ { 0x02, AA "10/FD" },
+ { 0x03, AA "10/FD, 10/HD" },
+ { 0x04, AA "100/HD" },
+ { 0x05, AA "100/HD, 10/HD" },
+ { 0x06, AA "100/HD, 10/FD" },
+ { 0x07, AA "100/HD, 10/FD, 10/HD" },
+ { 0x08, AA "100/FD" },
+ { 0x09, AA "100/FD, 10/HD" },
+ { 0x0a, AA "100/FD, 10/FD" },
+ { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+ { 0x0c, AA "100/FD, 100/HD" },
+ { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+ { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+ { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x20, AA "1000/FD" },
+ { 0x21, AA "1000/FD, 10/HD" },
+ { 0x22, AA "1000/FD, 10/FD" },
+ { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+ { 0x24, AA "1000/FD, 100/HD" },
+ { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+ { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+ { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x28, AA "1000/FD, 100/FD" },
+ { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+ { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+ { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+ { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+ { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+ { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+ { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+ struct e1000_option opt = {
+ .type = list_option,
+ .name = "AutoNeg",
+ .err = "parameter ignored",
+ .def = AUTONEG_ADV_DEFAULT,
+ .arg = { .l = { .nr = ARRAY_SIZE(an_list),
+ .p = an_list }}
+ };
+
+ int an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
+ adapter->hw.autoneg_advertised = an;
+ }
+
+ switch (speed + dplx) {
+ case 0:
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ if((num_Speed > bd) && (speed != 0 || dplx != 0))
+ DPRINTK(PROBE, INFO,
+ "Speed and duplex autonegotiation enabled\n");
+ break;
+ case HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Half Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_100_HALF;
+ break;
+ case FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
+ break;
+ case SPEED_10:
+ DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+ "without Duplex\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_10_FULL;
+ break;
+ case SPEED_10 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_10 + FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100:
+ DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+ "without Duplex\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "100 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+ ADVERTISE_100_FULL;
+ break;
+ case SPEED_100 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100 + FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_1000:
+ DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+ "Duplex\n");
+ DPRINTK(PROBE, INFO,
+ "Using Autonegotiation at 1000 Mbps "
+ "Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO,
+ "Half Duplex is not supported at 1000 Mbps\n");
+ DPRINTK(PROBE, INFO,
+ "Using Autonegotiation at 1000 Mbps "
+ "Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + FULL_DUPLEX:
+ DPRINTK(PROBE, INFO,
+ "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Speed, AutoNeg and MDI/MDI-X must all play nice */
+ if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+ DPRINTK(PROBE, INFO,
+ "Speed, AutoNeg and MDI-X specifications are "
+ "incompatible. Setting MDI-X to a compatible value.\n");
+ }
+}
+
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
new file mode 100644
index 000000000000..51c9fa260830
--- /dev/null
+++ b/drivers/net/e2100.c
@@ -0,0 +1,485 @@
+/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
+/*
+ Written 1993-1994 by Donald Becker.
+
+ Copyright 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ This is a driver for the Cabletron E2100 series ethercards.
+
+ The Author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ The E2100 series ethercard is a fairly generic shared memory 8390
+ implementation. The only unusual aspect is the way the shared memory
+ registers are set: first you do an inb() in what is normally the
+ station address region, and the low three bits of next outb() *address*
+ is used as the write value for that register. Either someone wasn't
+ too used to dem bit en bites, or they were trying to obfuscate the
+ programming interface.
+
+ There is an additional complication when setting the window on the packet
+ buffer. You must first do a read into the packet buffer region with the
+ low 8 address bits the address setting the page for the start of the packet
+ buffer window, and then do the above operation. See mem_on() for details.
+
+ One bug on the chip is that even a hard reset won't disable the memory
+ window, usually resulting in a hung machine if mem_off() isn't called.
+ If this happens, you must power down the machine for about 30 seconds.
+*/
+
+static const char version[] =
+ "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "e2100"
+
+static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
+
+/* Offsets from the base_addr.
+ Read from the ASIC register, and the low three bits of the next outb()
+ address is used to set the corresponding register. */
+#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
+#define E21_ASIC 0x10
+#define E21_MEM_ENABLE 0x10
+#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
+#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
+#define E21_MEM_BASE 0x11
+#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
+#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
+#define E21_MEDIA 0x14 /* (alias). */
+#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
+#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
+#define E21_SAPROM 0x10 /* Offset to station address data. */
+#define E21_IO_EXTENT 0x20
+
+static inline void mem_on(short port, volatile char __iomem *mem_base,
+ unsigned char start_page )
+{
+ /* This is a little weird: set the shared memory window by doing a
+ read. The low address bits specify the starting page. */
+ readb(mem_base+start_page);
+ inb(port + E21_MEM_ENABLE);
+ outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
+}
+
+static inline void mem_off(short port)
+{
+ inb(port + E21_MEM_ENABLE);
+ outb(0x00, port + E21_MEM_ENABLE);
+}
+
+/* In other drivers I put the TX pages first, but the E2100 window circuitry
+ is designed to have a 4K Tx region last. The windowing circuitry wraps the
+ window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
+ appear contiguously in the window. */
+#define E21_RX_START_PG 0x00 /* First page of RX buffer */
+#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
+#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
+#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
+
+static int e21_probe1(struct net_device *dev, int ioaddr);
+
+static int e21_open(struct net_device *dev);
+static void e21_reset_8390(struct net_device *dev);
+static void e21_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void e21_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int e21_close(struct net_device *dev);
+
+
+/* Probe for the E2100 series ethercards. These cards have an 8390 at the
+ base address and the station address at both offset 0x10 and 0x18. I read
+ the station address from offset 0x18 to avoid the dataport of NE2000
+ ethercards, and look for Ctron's unique ID (first three octets of the
+ station address).
+ */
+
+static int __init do_e2100_probe(struct net_device *dev)
+{
+ int *port;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ SET_MODULE_OWNER(dev);
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return e21_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (port = e21_probe_list; *port; port++) {
+ dev->irq = irq;
+ if (e21_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: e21_close() handles free_irq */
+ iounmap(ei_status.mem);
+ release_region(dev->base_addr, E21_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init e2100_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_e2100_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init e21_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, status, retval;
+ unsigned char *station_addr = dev->dev_addr;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* First check the station address for the Ctron prefix. */
+ if (inb(ioaddr + E21_SAPROM + 0) != 0x00
+ || inb(ioaddr + E21_SAPROM + 1) != 0x00
+ || inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Verify by making certain that there is a 8390 at there. */
+ outb(E8390_NODMA + E8390_STOP, ioaddr);
+ udelay(1); /* we want to delay one I/O cycle - which is 2MHz */
+ status = inb(ioaddr);
+ if (status != 0x21 && status != 0x23) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Read the station address PROM. */
+ for (i = 0; i < 6; i++)
+ station_addr[i] = inb(ioaddr + E21_SAPROM + i);
+
+ inb(ioaddr + E21_MEDIA); /* Point to media selection. */
+ outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ for (i = 0; i < 6; i++)
+ printk(" %02X", station_addr[i]);
+
+ if (dev->irq < 2) {
+ int irqlist[] = {15,11,10,12,5,9,3,4}, i;
+ for (i = 0; i < 8; i++)
+ if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
+ dev->irq = irqlist[i];
+ break;
+ }
+ if (i >= 8) {
+ printk(" unable to get IRQ %d.\n", dev->irq);
+ retval = -EAGAIN;
+ goto out;
+ }
+ } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* The 8390 is at the base address. */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "E2100";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = E21_TX_START_PG;
+ ei_status.rx_start_page = E21_RX_START_PG;
+ ei_status.stop_page = E21_RX_STOP_PG;
+ ei_status.saved_irq = dev->irq;
+
+ /* Check the media port used. The port can be passed in on the
+ low mem_end bits. */
+ if (dev->mem_end & 15)
+ dev->if_port = dev->mem_end & 7;
+ else {
+ dev->if_port = 0;
+ inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
+ for(i = 0; i < 6; i++)
+ if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
+ dev->if_port = 1;
+ break;
+ }
+ }
+
+ /* Never map in the E21 shared memory unless you are actively using it.
+ Also, the shared memory has effective only one setting -- spread all
+ over the 128K region! */
+ if (dev->mem_start == 0)
+ dev->mem_start = 0xd0000;
+
+ ei_status.mem = ioremap(dev->mem_start, 2*1024);
+ if (!ei_status.mem) {
+ printk("unable to remap memory\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+
+#ifdef notdef
+ /* These values are unused. The E2100 has a 2K window into the packet
+ buffer. The window can be set to start on any page boundary. */
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024;
+#endif
+
+ printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
+ dev->if_port ? "secondary" : "primary", dev->mem_start);
+
+ ei_status.reset_8390 = &e21_reset_8390;
+ ei_status.block_input = &e21_block_input;
+ ei_status.block_output = &e21_block_output;
+ ei_status.get_8390_hdr = &e21_get_8390_hdr;
+ dev->open = &e21_open;
+ dev->stop = &e21_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+
+ return 0;
+out:
+ release_region(ioaddr, E21_IO_EXTENT);
+ return retval;
+}
+
+static int
+e21_open(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+ int retval;
+
+ if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
+ return retval;
+
+ /* Set the interrupt line and memory base on the hardware. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
+ + (dev->if_port ? E21_ALT_IFPORT : 0));
+ inb(ioaddr + E21_MEM_BASE);
+ outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
+
+ ei_open(dev);
+ return 0;
+}
+
+static void
+e21_reset_8390(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outb(0x01, ioaddr);
+ if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. We put the 2k window so the header page
+ appears at the start of the shared memory. */
+
+static void
+e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ short ioaddr = dev->base_addr;
+ char __iomem *shared_mem = ei_status.mem;
+
+ mem_on(ioaddr, shared_mem, ring_page);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(shared_mem);
+#endif
+
+ /* Turn off memory access: we would need to reprogram the window anyway. */
+ mem_off(ioaddr);
+
+}
+
+/* Block input and output are easy on shared memory ethercards.
+ The E21xx makes block_input() especially easy by wrapping the top
+ ring buffer to the bottom automatically. */
+static void
+e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ short ioaddr = dev->base_addr;
+ char __iomem *shared_mem = ei_status.mem;
+
+ mem_on(ioaddr, shared_mem, (ring_offset>>8));
+
+ /* Packet is always in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, ei_status.mem + (ring_offset & 0xff), count, 0);
+
+ mem_off(ioaddr);
+}
+
+static void
+e21_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ short ioaddr = dev->base_addr;
+ volatile char __iomem *shared_mem = ei_status.mem;
+
+ /* Set the shared memory window start by doing a read, with the low address
+ bits specifying the starting page. */
+ readb(shared_mem + start_page);
+ mem_on(ioaddr, shared_mem, start_page);
+
+ memcpy_toio(shared_mem, buf, count);
+ mem_off(ioaddr);
+}
+
+static int
+e21_close(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ free_irq(dev->irq, dev);
+ dev->irq = ei_status.saved_irq;
+
+ /* Shut off the interrupt line and secondary interface. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC);
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC);
+
+ ei_close(dev);
+
+ /* Double-check that the memory has been turned off, because really
+ really bad things happen if it isn't. */
+ mem_off(ioaddr);
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
+static struct net_device *dev_e21[MAX_E21_CARDS];
+static int io[MAX_E21_CARDS];
+static int irq[MAX_E21_CARDS];
+static int mem[MAX_E21_CARDS];
+static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+module_param_array(xcvr, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, " memory base address(es)");
+MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
+MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ if (do_e2100_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_e21[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct net_device *dev = dev_e21[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
new file mode 100644
index 000000000000..cd2475683027
--- /dev/null
+++ b/drivers/net/eepro.c
@@ -0,0 +1,1865 @@
+/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
+/*
+ Written 1994, 1995,1996 by Bao C. Ha.
+
+ Copyright (C) 1994, 1995,1996 by Bao C. Ha.
+
+ This software may be used and distributed
+ according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ The author may be reached at bao.ha@srs.gov
+ or 418 Hastings Place, Martinez, GA 30907.
+
+ Things remaining to do:
+ Better record keeping of errors.
+ Eliminate transmit interrupt to reduce overhead.
+ Implement "concurrent processing". I won't be doing it!
+
+ Bugs:
+
+ If you have a problem of not detecting the 82595 during a
+ reboot (warm reset), disable the FLASH memory should fix it.
+ This is a compatibility hardware problem.
+
+ Versions:
+ 0.13b basic ethtool support (aris, 09/13/2004)
+ 0.13a in memory shortage, drop packets also in board
+ (Michael Westermann <mw@microdata-pos.de>, 07/30/2002)
+ 0.13 irq sharing, rewrote probe function, fixed a nasty bug in
+ hardware_send_packet and a major cleanup (aris, 11/08/2001)
+ 0.12d fixing a problem with single card detected as eight eth devices
+ fixing a problem with sudden drop in card performance
+ (chris (asdn@go2.pl), 10/29/2001)
+ 0.12c fixing some problems with old cards (aris, 01/08/2001)
+ 0.12b misc fixes (aris, 06/26/2000)
+ 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x
+ (aris (aris@conectiva.com.br), 05/19/2000)
+ 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999)
+ 0.11d added __initdata, __init stuff; call spin_lock_init
+ in eepro_probe1. Replaced "eepro" by dev->name. Augmented
+ the code protected by spin_lock in interrupt routine
+ (PdP, 12/12/1998)
+ 0.11c minor cleanup (PdP, RMC, 09/12/1998)
+ 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module
+ under 2.1.xx. Debug messages are flagged as KERN_DEBUG to
+ avoid console flooding. Added locking at critical parts. Now
+ the dawn thing is SMP safe.
+ 0.11a Attempt to get 2.1.xx support up (RMC)
+ 0.11 Brian Candler added support for multiple cards. Tested as
+ a module, no idea if it works when compiled into kernel.
+
+ 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails
+ because the irq is lost somewhere. Fixed that by moving
+ request_irq and free_irq to eepro_open and eepro_close respectively.
+ 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt.
+ I'll need to find a way to specify an ioport other than
+ the default one in the PnP case. PnP definitively sucks.
+ And, yes, this is not the only reason.
+ 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup;
+ to use.
+ 0.10b Should work now with (some) Pro/10+. At least for
+ me (and my two cards) it does. _No_ guarantee for
+ function with non-Pro/10+ cards! (don't have any)
+ (RMC, 9/11/96)
+
+ 0.10 Added support for the Etherexpress Pro/10+. The
+ IRQ map was changed significantly from the old
+ pro/10. The new interrupt map was provided by
+ Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu).
+ (BCH, 9/3/96)
+
+ 0.09 Fixed a race condition in the transmit algorithm,
+ which causes crashes under heavy load with fast
+ pentium computers. The performance should also
+ improve a bit. The size of RX buffer, and hence
+ TX buffer, can also be changed via lilo or insmod.
+ (BCH, 7/31/96)
+
+ 0.08 Implement 32-bit I/O for the 82595TX and 82595FX
+ based lan cards. Disable full-duplex mode if TPE
+ is not used. (BCH, 4/8/96)
+
+ 0.07a Fix a stat report which counts every packet as a
+ heart-beat failure. (BCH, 6/3/95)
+
+ 0.07 Modified to support all other 82595-based lan cards.
+ The IRQ vector of the EtherExpress Pro will be set
+ according to the value saved in the EEPROM. For other
+ cards, I will do autoirq_request() to grab the next
+ available interrupt vector. (BCH, 3/17/95)
+
+ 0.06a,b Interim released. Minor changes in the comments and
+ print out format. (BCH, 3/9/95 and 3/14/95)
+
+ 0.06 First stable release that I am comfortable with. (BCH,
+ 3/2/95)
+
+ 0.05 Complete testing of multicast. (BCH, 2/23/95)
+
+ 0.04 Adding multicast support. (BCH, 2/14/95)
+
+ 0.03 First widely alpha release for public testing.
+ (BCH, 2/14/95)
+
+*/
+
+static const char version[] =
+ "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n";
+
+#include <linux/module.h>
+
+/*
+ Sources:
+
+ This driver wouldn't have been written without the availability
+ of the Crynwr's Lan595 driver source code. It helps me to
+ familiarize with the 82595 chipset while waiting for the Intel
+ documentation. I also learned how to detect the 82595 using
+ the packet driver's technique.
+
+ This driver is written by cutting and pasting the skeleton.c driver
+ provided by Donald Becker. I also borrowed the EEPROM routine from
+ Donald Becker's 82586 driver.
+
+ Datasheet for the Intel 82595 (including the TX and FX version). It
+ provides just enough info that the casual reader might think that it
+ documents the i82595.
+
+ The User Manual for the 82595. It provides a lot of the missing
+ information.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define DRV_NAME "eepro"
+#define DRV_VERSION "0.13b"
+
+#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) )
+/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */
+#define SLOW_DOWN inb(0x80)
+/* udelay(2) */
+#define compat_init_data __initdata
+enum iftype { AUI=0, BNC=1, TPE=2 };
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int eepro_portlist[] compat_init_data =
+ { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0};
+/* note: 0x300 is default, the 595FX supports ALL IO Ports
+ from 0x000 to 0x3F0, some of which are reserved in PCs */
+
+/* To try the (not-really PnP Wakeup: */
+/*
+#define PnPWakeup
+*/
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define EEPRO_IO_EXTENT 16
+
+/* Different 82595 chips */
+#define LAN595 0
+#define LAN595TX 1
+#define LAN595FX 2
+#define LAN595FX_10ISA 3
+
+/* Information that need to be kept for each board. */
+struct eepro_local {
+ struct net_device_stats stats;
+ unsigned rx_start;
+ unsigned tx_start; /* start of the transmit chain */
+ int tx_last; /* pointer to last packet in the transmit chain */
+ unsigned tx_end; /* end of the transmit chain (plus 1) */
+ int eepro; /* 1 for the EtherExpress Pro/10,
+ 2 for the EtherExpress Pro/10+,
+ 3 for the EtherExpress 10 (blue cards),
+ 0 for other 82595-based lan cards. */
+ int version; /* a flag to indicate if this is a TX or FX
+ version of the 82595 chip. */
+ int stepping;
+
+ spinlock_t lock; /* Serializing lock */
+
+ unsigned rcv_ram; /* pre-calculated space for rx */
+ unsigned xmt_ram; /* pre-calculated space for tx */
+ unsigned char xmt_bar;
+ unsigned char xmt_lower_limit_reg;
+ unsigned char xmt_upper_limit_reg;
+ short xmt_lower_limit;
+ short xmt_upper_limit;
+ short rcv_lower_limit;
+ short rcv_upper_limit;
+ unsigned char eeprom_reg;
+ unsigned short word[8];
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */
+#define SA_ADDR1 0xaa
+#define SA_ADDR2 0x00
+
+#define GetBit(x,y) ((x & (1<<y))>>y)
+
+/* EEPROM Word 0: */
+#define ee_PnP 0 /* Plug 'n Play enable bit */
+#define ee_Word1 1 /* Word 1? */
+#define ee_BusWidth 2 /* 8/16 bit */
+#define ee_FlashAddr 3 /* Flash Address */
+#define ee_FlashMask 0x7 /* Mask */
+#define ee_AutoIO 6 /* */
+#define ee_reserved0 7 /* =0! */
+#define ee_Flash 8 /* Flash there? */
+#define ee_AutoNeg 9 /* Auto Negotiation enabled? */
+#define ee_IO0 10 /* IO Address LSB */
+#define ee_IO0Mask 0x /*...*/
+#define ee_IO1 15 /* IO MSB */
+
+/* EEPROM Word 1: */
+#define ee_IntSel 0 /* Interrupt */
+#define ee_IntMask 0x7
+#define ee_LI 3 /* Link Integrity 0= enabled */
+#define ee_PC 4 /* Polarity Correction 0= enabled */
+#define ee_TPE_AUI 5 /* PortSelection 1=TPE */
+#define ee_Jabber 6 /* Jabber prevention 0= enabled */
+#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */
+#define ee_SMOUT 8 /* SMout Pin Control 0= Input */
+#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */
+#define ee_reserved1 10 /* .. 12 =0! */
+#define ee_AltReady 13 /* Alternate Ready, 0=normal */
+#define ee_reserved2 14 /* =0! */
+#define ee_Duplex 15
+
+/* Word2,3,4: */
+#define ee_IA5 0 /*bit start for individual Addr Byte 5 */
+#define ee_IA4 8 /*bit start for individual Addr Byte 5 */
+#define ee_IA3 0 /*bit start for individual Addr Byte 5 */
+#define ee_IA2 8 /*bit start for individual Addr Byte 5 */
+#define ee_IA1 0 /*bit start for individual Addr Byte 5 */
+#define ee_IA0 8 /*bit start for individual Addr Byte 5 */
+
+/* Word 5: */
+#define ee_BNC_TPE 0 /* 0=TPE */
+#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */
+#define ee_BootTypeMask 0x3
+#define ee_NumConn 3 /* Number of Connections 0= One or Two */
+#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */
+#define ee_PortTPE 5
+#define ee_PortBNC 6
+#define ee_PortAUI 7
+#define ee_PowerMgt 10 /* 0= disabled */
+#define ee_CP 13 /* Concurrent Processing */
+#define ee_CPMask 0x7
+
+/* Word 6: */
+#define ee_Stepping 0 /* Stepping info */
+#define ee_StepMask 0x0F
+#define ee_BoardID 4 /* Manucaturer Board ID, reserved */
+#define ee_BoardMask 0x0FFF
+
+/* Word 7: */
+#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */
+#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */
+
+/*..*/
+#define ee_SIZE 0x40 /* total EEprom Size */
+#define ee_Checksum 0xBABA /* initial and final value for adding checksum */
+
+
+/* Card identification via EEprom: */
+#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */
+#define ee_addr_id 0x11 /* Word offset for Card ID */
+#define ee_addr_SN 0x12 /* Serial Number */
+#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */
+
+
+#define ee_vendor_intel0 0x25 /* Vendor ID Intel */
+#define ee_vendor_intel1 0xD4
+#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
+#define ee_id_eepro10p1 0x31
+
+#define TX_TIMEOUT 40
+
+/* Index to functions, as function prototypes. */
+
+static int eepro_probe1(struct net_device *dev, int autoprobe);
+static int eepro_open(struct net_device *dev);
+static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t eepro_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void eepro_rx(struct net_device *dev);
+static void eepro_transmit_interrupt(struct net_device *dev);
+static int eepro_close(struct net_device *dev);
+static struct net_device_stats *eepro_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void eepro_tx_timeout (struct net_device *dev);
+
+static int read_eeprom(int ioaddr, int location, struct net_device *dev);
+static int hardware_send_packet(struct net_device *dev, void *buf, short length);
+static int eepro_grab_irq(struct net_device *dev);
+
+/*
+ Details of the i82595.
+
+You will need either the datasheet or the user manual to understand what
+is going on here. The 82595 is very different from the 82586, 82593.
+
+The receive algorithm in eepro_rx() is just an implementation of the
+RCV ring structure that the Intel 82595 imposes at the hardware level.
+The receive buffer is set at 24K, and the transmit buffer is 8K. I
+am assuming that the total buffer memory is 32K, which is true for the
+Intel EtherExpress Pro/10. If it is less than that on a generic card,
+the driver will be broken.
+
+The transmit algorithm in the hardware_send_packet() is similar to the
+one in the eepro_rx(). The transmit buffer is a ring linked list.
+I just queue the next available packet to the end of the list. In my
+system, the 82595 is so fast that the list seems to always contain a
+single packet. In other systems with faster computers and more congested
+network traffics, the ring linked list should improve performance by
+allowing up to 8K worth of packets to be queued.
+
+The sizes of the receive and transmit buffers can now be changed via lilo
+or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0"
+where rx-buffer is in KB unit. Modules uses the parameter mem which is
+also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer."
+The receive buffer has to be more than 3K or less than 29K. Otherwise,
+it is reset to the default of 24K, and, hence, 8K for the trasnmit
+buffer (transmit-buffer = 32K - receive-buffer).
+
+*/
+#define RAM_SIZE 0x8000
+
+#define RCV_HEADER 8
+#define RCV_DEFAULT_RAM 0x6000
+
+#define XMT_HEADER 8
+#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM)
+
+#define XMT_START_PRO RCV_DEFAULT_RAM
+#define XMT_START_10 0x0000
+#define RCV_START_PRO 0x0000
+#define RCV_START_10 XMT_DEFAULT_RAM
+
+#define RCV_DONE 0x0008
+#define RX_OK 0x2000
+#define RX_ERROR 0x0d81
+
+#define TX_DONE_BIT 0x0080
+#define TX_OK 0x2000
+#define CHAIN_BIT 0x8000
+#define XMT_STATUS 0x02
+#define XMT_CHAIN 0x04
+#define XMT_COUNT 0x06
+
+#define BANK0_SELECT 0x00
+#define BANK1_SELECT 0x40
+#define BANK2_SELECT 0x80
+
+/* Bank 0 registers */
+#define COMMAND_REG 0x00 /* Register 0 */
+#define MC_SETUP 0x03
+#define XMT_CMD 0x04
+#define DIAGNOSE_CMD 0x07
+#define RCV_ENABLE_CMD 0x08
+#define RCV_DISABLE_CMD 0x0a
+#define STOP_RCV_CMD 0x0b
+#define RESET_CMD 0x0e
+#define POWER_DOWN_CMD 0x18
+#define RESUME_XMT_CMD 0x1c
+#define SEL_RESET_CMD 0x1e
+#define STATUS_REG 0x01 /* Register 1 */
+#define RX_INT 0x02
+#define TX_INT 0x04
+#define EXEC_STATUS 0x30
+#define ID_REG 0x02 /* Register 2 */
+#define R_ROBIN_BITS 0xc0 /* round robin counter */
+#define ID_REG_MASK 0x2c
+#define ID_REG_SIG 0x24
+#define AUTO_ENABLE 0x10
+#define INT_MASK_REG 0x03 /* Register 3 */
+#define RX_STOP_MASK 0x01
+#define RX_MASK 0x02
+#define TX_MASK 0x04
+#define EXEC_MASK 0x08
+#define ALL_MASK 0x0f
+#define IO_32_BIT 0x10
+#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
+#define RCV_STOP 0x06
+
+#define XMT_BAR_PRO 0x0a
+#define XMT_BAR_10 0x0b
+
+#define HOST_ADDRESS_REG 0x0c
+#define IO_PORT 0x0e
+#define IO_PORT_32_BIT 0x0c
+
+/* Bank 1 registers */
+#define REG1 0x01
+#define WORD_WIDTH 0x02
+#define INT_ENABLE 0x80
+#define INT_NO_REG 0x02
+#define RCV_LOWER_LIMIT_REG 0x08
+#define RCV_UPPER_LIMIT_REG 0x09
+
+#define XMT_LOWER_LIMIT_REG_PRO 0x0a
+#define XMT_UPPER_LIMIT_REG_PRO 0x0b
+#define XMT_LOWER_LIMIT_REG_10 0x0b
+#define XMT_UPPER_LIMIT_REG_10 0x0a
+
+/* Bank 2 registers */
+#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
+#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
+#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
+#define REG2 0x02
+#define PRMSC_Mode 0x01
+#define Multi_IA 0x20
+#define REG3 0x03
+#define TPE_BIT 0x04
+#define BNC_BIT 0x20
+#define REG13 0x0d
+#define FDX 0x00
+#define A_N_ENABLE 0x02
+
+#define I_ADD_REG0 0x04
+#define I_ADD_REG1 0x05
+#define I_ADD_REG2 0x06
+#define I_ADD_REG3 0x07
+#define I_ADD_REG4 0x08
+#define I_ADD_REG5 0x09
+
+#define EEPROM_REG_PRO 0x0a
+#define EEPROM_REG_10 0x0b
+
+#define EESK 0x01
+#define EECS 0x02
+#define EEDI 0x04
+#define EEDO 0x08
+
+/* do a full reset */
+#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr)
+
+/* do a nice reset */
+#define eepro_sel_reset(ioaddr) { \
+ outb(SEL_RESET_CMD, ioaddr); \
+ SLOW_DOWN; \
+ SLOW_DOWN; \
+ }
+
+/* disable all interrupts */
+#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG)
+
+/* clear all interrupts */
+#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG)
+
+/* enable tx/rx */
+#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \
+ ioaddr + INT_MASK_REG)
+
+/* enable exec event interrupt */
+#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG)
+
+/* enable rx */
+#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr)
+
+/* disable rx */
+#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr)
+
+/* switch bank */
+#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr)
+#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr)
+#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr)
+
+/* enable interrupt line */
+#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\
+ ioaddr + REG1)
+
+/* disable interrupt line */
+#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \
+ ioaddr + REG1);
+
+/* set diagnose flag */
+#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr)
+
+/* ack for rx int */
+#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG)
+
+/* ack for tx int */
+#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG)
+
+/* a complete sel reset */
+#define eepro_complete_selreset(ioaddr) { \
+ lp->stats.tx_errors++;\
+ eepro_sel_reset(ioaddr);\
+ lp->tx_end = \
+ lp->xmt_lower_limit;\
+ lp->tx_start = lp->tx_end;\
+ lp->tx_last = 0;\
+ dev->trans_start = jiffies;\
+ netif_wake_queue(dev);\
+ eepro_en_rx(ioaddr);\
+ }
+
+/* Check for a network adaptor of this type, and return '0' if one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+static int __init do_eepro_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ SET_MODULE_OWNER(dev);
+
+#ifdef PnPWakeup
+ /* XXXX for multiple cards should this only be run once? */
+
+ /* Wakeup: */
+ #define WakeupPort 0x279
+ #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\
+ 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\
+ 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\
+ 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43}
+
+ {
+ unsigned short int WS[32]=WakeupSeq;
+
+ if (check_region(WakeupPort, 2)==0) {
+
+ if (net_debug>5)
+ printk(KERN_DEBUG "Waking UP\n");
+
+ outb_p(0,WakeupPort);
+ outb_p(0,WakeupPort);
+ for (i=0; i<32; i++) {
+ outb_p(WS[i],WakeupPort);
+ if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
+ }
+ } else printk(KERN_WARNING "Checkregion Failed!\n");
+ }
+#endif
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return eepro_probe1(dev, 0);
+
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; eepro_portlist[i]; i++) {
+ dev->base_addr = eepro_portlist[i];
+ dev->irq = irq;
+ if (eepro_probe1(dev, 1) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init eepro_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ SET_MODULE_OWNER(dev);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_eepro_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, EEPRO_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static void __init printEEPROMInfo(struct net_device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned short Word;
+ int i,j;
+
+ j = ee_Checksum;
+ for (i = 0; i < 8; i++)
+ j += lp->word[i];
+ for ( ; i < ee_SIZE; i++)
+ j += read_eeprom(ioaddr, i, dev);
+
+ printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff);
+
+ Word = lp->word[0];
+ printk(KERN_DEBUG "Word0:\n");
+ printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP));
+ printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 );
+ printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg));
+ printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4);
+
+ if (net_debug>4) {
+ Word = lp->word[1];
+ printk(KERN_DEBUG "Word1:\n");
+ printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask);
+ printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI));
+ printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
+ printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
+ printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
+ printk(KERN_DEBUG " AutoPort: %d\n", GetBit(!Word,ee_Jabber));
+ printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
+ }
+
+ Word = lp->word[5];
+ printk(KERN_DEBUG "Word5:\n");
+ printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE));
+ printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn));
+ printk(KERN_DEBUG " Has ");
+ if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
+ if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
+ if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
+ printk(KERN_DEBUG "port(s) \n");
+
+ Word = lp->word[6];
+ printk(KERN_DEBUG "Word6:\n");
+ printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask);
+ printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID);
+
+ Word = lp->word[7];
+ printk(KERN_DEBUG "Word7:\n");
+ printk(KERN_DEBUG " INT to IRQ:\n");
+
+ for (i=0, j=0; i<15; i++)
+ if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i);
+
+ printk(KERN_DEBUG "\n");
+}
+
+/* function to recalculate the limits of buffer based on rcv_ram */
+static void eepro_recalc (struct net_device *dev)
+{
+ struct eepro_local * lp;
+
+ lp = netdev_priv(dev);
+ lp->xmt_ram = RAM_SIZE - lp->rcv_ram;
+
+ if (lp->eepro == LAN595FX_10ISA) {
+ lp->xmt_lower_limit = XMT_START_10;
+ lp->xmt_upper_limit = (lp->xmt_ram - 2);
+ lp->rcv_lower_limit = lp->xmt_ram;
+ lp->rcv_upper_limit = (RAM_SIZE - 2);
+ }
+ else {
+ lp->rcv_lower_limit = RCV_START_PRO;
+ lp->rcv_upper_limit = (lp->rcv_ram - 2);
+ lp->xmt_lower_limit = lp->rcv_ram;
+ lp->xmt_upper_limit = (RAM_SIZE - 2);
+ }
+}
+
+/* prints boot-time info */
+static void __init eepro_print_info (struct net_device *dev)
+{
+ struct eepro_local * lp = netdev_priv(dev);
+ int i;
+ const char * ifmap[] = {"AUI", "10Base2", "10BaseT"};
+
+ i = inb(dev->base_addr + ID_REG);
+ printk(KERN_DEBUG " id: %#x ",i);
+ printk(" io: %#x ", (unsigned)dev->base_addr);
+
+ switch (lp->eepro) {
+ case LAN595FX_10ISA:
+ printk("%s: Intel EtherExpress 10 ISA\n at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ break;
+ case LAN595FX:
+ printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ break;
+ case LAN595TX:
+ printk("%s: Intel EtherExpress Pro/10 ISA at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ break;
+ case LAN595:
+ printk("%s: Intel 82595-based lan card at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ }
+
+ for (i=0; i < 6; i++)
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG ", %dK RCV buffer",
+ (int)(lp->rcv_ram)/1024);
+
+ if (dev->irq > 2)
+ printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
+ else
+ printk(", %s.\n", ifmap[dev->if_port]);
+
+ if (net_debug > 3) {
+ i = lp->word[5];
+ if (i & 0x2000) /* bit 13 of EEPROM word 5 */
+ printk(KERN_DEBUG "%s: Concurrent Processing is "
+ "enabled but not used!\n", dev->name);
+ }
+
+ /* Check the station address for the manufacturer's code */
+ if (net_debug>3)
+ printEEPROMInfo(dev);
+}
+
+static struct ethtool_ops eepro_ethtool_ops;
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probe avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+static int __init eepro_probe1(struct net_device *dev, int autoprobe)
+{
+ unsigned short station_addr[3], id, counter;
+ int i;
+ struct eepro_local *lp;
+ int ioaddr = dev->base_addr;
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
+ if (!autoprobe)
+ printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
+ ioaddr);
+ return -EBUSY;
+ }
+
+ /* Now, we are going to check for the signature of the
+ ID_REG (register 2 of bank 0) */
+
+ id = inb(ioaddr + ID_REG);
+
+ if ((id & ID_REG_MASK) != ID_REG_SIG)
+ goto exit;
+
+ /* We seem to have the 82595 signature, let's
+ play with its counter (last 2 bits of
+ register 2 of bank 0) to be sure. */
+
+ counter = id & R_ROBIN_BITS;
+
+ if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40))
+ goto exit;
+
+ lp = netdev_priv(dev);
+ memset(lp, 0, sizeof(struct eepro_local));
+ lp->xmt_bar = XMT_BAR_PRO;
+ lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO;
+ lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO;
+ lp->eeprom_reg = EEPROM_REG_PRO;
+ spin_lock_init(&lp->lock);
+
+ /* Now, get the ethernet hardware address from
+ the EEPROM */
+ station_addr[0] = read_eeprom(ioaddr, 2, dev);
+
+ /* FIXME - find another way to know that we've found
+ * an Etherexpress 10
+ */
+ if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) {
+ lp->eepro = LAN595FX_10ISA;
+ lp->eeprom_reg = EEPROM_REG_10;
+ lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10;
+ lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10;
+ lp->xmt_bar = XMT_BAR_10;
+ station_addr[0] = read_eeprom(ioaddr, 2, dev);
+ }
+
+ /* get all words at once. will be used here and for ethtool */
+ for (i = 0; i < 8; i++) {
+ lp->word[i] = read_eeprom(ioaddr, i, dev);
+ }
+ station_addr[1] = lp->word[3];
+ station_addr[2] = lp->word[4];
+
+ if (!lp->eepro) {
+ if (lp->word[7] == ee_FX_INT2IRQ)
+ lp->eepro = 2;
+ else if (station_addr[2] == SA_ADDR1)
+ lp->eepro = 1;
+ }
+
+ /* Fill in the 'dev' fields. */
+ for (i=0; i < 6; i++)
+ dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
+
+ /* RX buffer must be more than 3K and less than 29K */
+ if (dev->mem_end < 3072 || dev->mem_end > 29696)
+ lp->rcv_ram = RCV_DEFAULT_RAM;
+
+ /* calculate {xmt,rcv}_{lower,upper}_limit */
+ eepro_recalc(dev);
+
+ if (GetBit(lp->word[5], ee_BNC_TPE))
+ dev->if_port = BNC;
+ else
+ dev->if_port = TPE;
+
+ if (dev->irq < 2 && lp->eepro != 0) {
+ /* Mask off INT number */
+ int count = lp->word[1] & 7;
+ unsigned irqMask = lp->word[7];
+
+ while (count--)
+ irqMask &= irqMask - 1;
+
+ count = ffs(irqMask);
+
+ if (count)
+ dev->irq = count - 1;
+
+ if (dev->irq < 2) {
+ printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
+ goto exit;
+ } else if (dev->irq == 2) {
+ dev->irq = 9;
+ }
+ }
+
+ dev->open = eepro_open;
+ dev->stop = eepro_close;
+ dev->hard_start_xmit = eepro_send_packet;
+ dev->get_stats = eepro_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->tx_timeout = eepro_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &eepro_ethtool_ops;
+
+ /* print boot time info */
+ eepro_print_info(dev);
+
+ /* reset 82595 */
+ eepro_reset(ioaddr);
+ return 0;
+exit:
+ release_region(dev->base_addr, EEPRO_IO_EXTENT);
+ return -ENODEV;
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+
+static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
+static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
+static int eepro_grab_irq(struct net_device *dev)
+{
+ int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
+ int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
+
+ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Enable the interrupt line. */
+ eepro_en_intline(ioaddr);
+
+ /* be CAREFUL, BANK 0 now */
+ eepro_sw2bank0(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ /* Let EXEC event to interrupt */
+ eepro_en_intexec(ioaddr);
+
+ do {
+ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
+
+ eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
+
+ if (request_irq (*irqp, NULL, SA_SHIRQ, "bogus", dev) != EBUSY) {
+ unsigned long irq_mask;
+ /* Twinkle the interrupt, and check if it's seen */
+ irq_mask = probe_irq_on();
+
+ eepro_diag(ioaddr); /* RESET the 82595 */
+ mdelay(20);
+
+ if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */
+ break;
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+ }
+ } while (*++irqp);
+
+ eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ eepro_dis_intline(ioaddr);
+
+ eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
+
+ /* Mask all the interrupts. */
+ eepro_dis_int(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ return dev->irq;
+}
+
+static int eepro_open(struct net_device *dev)
+{
+ unsigned short temp_reg, old8, old9;
+ int irqMask;
+ int i, ioaddr = dev->base_addr;
+ struct eepro_local *lp = netdev_priv(dev);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name);
+
+ irqMask = lp->word[7];
+
+ if (lp->eepro == LAN595FX_10ISA) {
+ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n");
+ }
+ else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */
+ {
+ lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */
+ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n");
+ }
+
+ else if ((dev->dev_addr[0] == SA_ADDR0 &&
+ dev->dev_addr[1] == SA_ADDR1 &&
+ dev->dev_addr[2] == SA_ADDR2))
+ {
+ lp->eepro = 1;
+ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n");
+ } /* Yes, an Intel EtherExpress Pro/10 */
+
+ else lp->eepro = 0; /* No, it is a generic 82585 lan card */
+
+ /* Get the interrupt vector for the 82595 */
+ if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ if (request_irq(dev->irq , &eepro_interrupt, 0, dev->name, dev)) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+#ifdef irq2dev_map
+ if (((irq2dev_map[dev->irq] != 0)
+ || (irq2dev_map[dev->irq] = dev) == 0) &&
+ (irq2dev_map[dev->irq]!=dev)) {
+ /* printk("%s: IRQ map wrong\n", dev->name); */
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+#endif
+
+ /* Initialize the 82595. */
+
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + lp->eeprom_reg);
+
+ lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping);
+
+ if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
+ outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg);
+ for (i=0; i < 6; i++)
+ outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
+
+ temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
+ outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
+ | RCV_Discard_BadFrame, ioaddr + REG1);
+
+ temp_reg = inb(ioaddr + REG2); /* Match broadcast */
+ outb(temp_reg | 0x14, ioaddr + REG2);
+
+ temp_reg = inb(ioaddr + REG3);
+ outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
+
+ /* Set the receiving mode */
+ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Set the interrupt vector */
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
+ outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
+ outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg);
+
+
+ /* Initialize the RCV and XMT upper and lower limits */
+ outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
+ outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
+ outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
+ outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
+
+ /* Enable the interrupt line. */
+ eepro_en_intline(ioaddr);
+
+ /* Switch back to Bank 0 */
+ eepro_sw2bank0(ioaddr);
+
+ /* Let RX and TX events to interrupt */
+ eepro_en_int(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ /* Initialize RCV */
+ outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
+ lp->rx_start = lp->rcv_lower_limit;
+ outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
+
+ /* Initialize XMT */
+ outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
+ lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
+ lp->tx_last = 0;
+
+ /* Check for the i82595TX and i82595FX */
+ old8 = inb(ioaddr + 8);
+ outb(~old8, ioaddr + 8);
+
+ if ((temp_reg = inb(ioaddr + 8)) == old8) {
+ if (net_debug > 3)
+ printk(KERN_DEBUG "i82595 detected!\n");
+ lp->version = LAN595;
+ }
+ else {
+ lp->version = LAN595TX;
+ outb(old8, ioaddr + 8);
+ old9 = inb(ioaddr + 9);
+
+ if (irqMask==ee_FX_INT2IRQ) {
+ if (net_debug > 3) {
+ printk(KERN_DEBUG "IrqMask: %#x\n",irqMask);
+ printk(KERN_DEBUG "i82595FX detected!\n");
+ }
+ lp->version = LAN595FX;
+ outb(old9, ioaddr + 9);
+ if (dev->if_port != TPE) { /* Hopefully, this will fix the
+ problem of using Pentiums and
+ pro/10 w/ BNC. */
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + REG13);
+ /* disable the full duplex mode since it is not
+ applicable with the 10Base2 cable. */
+ outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
+ eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */
+ }
+ }
+ else if (net_debug > 3) {
+ printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff));
+ printk(KERN_DEBUG "i82595TX detected!\n");
+ }
+ }
+
+ eepro_sel_reset(ioaddr);
+
+ netif_start_queue(dev);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name);
+
+ /* enabling rx */
+ eepro_en_rx(ioaddr);
+
+ return 0;
+}
+
+static void eepro_tx_timeout (struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* if (net_debug > 1) */
+ printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
+ "network cable problem");
+ /* This is not a duplicate. One message for the console,
+ one for the the log file */
+ printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
+ "network cable problem");
+ eepro_complete_selreset(ioaddr);
+}
+
+
+static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ short length = skb->len;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+ netif_stop_queue (dev);
+
+ eepro_dis_int(ioaddr);
+ spin_lock_irqsave(&lp->lock, flags);
+
+ {
+ unsigned char *buf = skb->data;
+
+ if (hardware_send_packet(dev, buf, length))
+ /* we won't wake queue here because we're out of space */
+ lp->stats.tx_dropped++;
+ else {
+ lp->stats.tx_bytes+=skb->len;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ }
+
+ }
+
+ dev_kfree_skb (skb);
+
+ /* You might need to clean up and record Tx statistics here. */
+ /* lp->stats.tx_aborted_errors++; */
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name);
+
+ eepro_en_int(ioaddr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
+}
+
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+
+static irqreturn_t
+eepro_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ /* (struct net_device *)(irq2dev_map[irq]);*/
+ struct eepro_local *lp;
+ int ioaddr, status, boguscount = 20;
+ int handled = 0;
+
+ if (dev == NULL) {
+ printk (KERN_ERR "eepro_interrupt(): irq %d for unknown device.\\n", irq);
+ return IRQ_NONE;
+ }
+
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name);
+
+ ioaddr = dev->base_addr;
+
+ while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--))
+ {
+ handled = 1;
+ if (status & RX_INT) {
+ if (net_debug > 4)
+ printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name);
+
+ eepro_dis_int(ioaddr);
+
+ /* Get the received packets */
+ eepro_ack_rx(ioaddr);
+ eepro_rx(dev);
+
+ eepro_en_int(ioaddr);
+ }
+ if (status & TX_INT) {
+ if (net_debug > 4)
+ printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name);
+
+
+ eepro_dis_int(ioaddr);
+
+ /* Process the status of transmitted packets */
+ eepro_ack_tx(ioaddr);
+ eepro_transmit_interrupt(dev);
+
+ eepro_en_int(ioaddr);
+ }
+ }
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name);
+
+ spin_unlock(&lp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static int eepro_close(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ short temp_reg;
+
+ netif_stop_queue(dev);
+
+ eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+
+ eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
+
+ /* Flush the Tx and disable Rx. */
+ outb(STOP_RCV_CMD, ioaddr);
+ lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
+ lp->tx_last = 0;
+
+ /* Mask all the interrupts. */
+ eepro_dis_int(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ /* Reset the 82595 */
+ eepro_reset(ioaddr);
+
+ /* release the interrupt */
+ free_irq(dev->irq, dev);
+
+#ifdef irq2dev_map
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ /* Update the statistics here. What statistics? */
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *
+eepro_get_stats(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ unsigned short mode;
+ struct dev_mc_list *dmi=dev->mc_list;
+
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. If it was a promisc request the
+ * flag is already set. If not we assert it.
+ */
+ dev->flags|=IFF_PROMISC;
+
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | PRMSC_Mode, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
+ printk(KERN_INFO "%s: promiscuous mode enabled.\n", dev->name);
+ }
+
+ else if (dev->mc_count==0 )
+ {
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
+ }
+
+ else
+ {
+ unsigned short status, *eaddrs;
+ int i, boguscount = 0;
+
+ /* Disable RX and TX interrupts. Necessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ eepro_dis_int(ioaddr);
+
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | Multi_IA, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
+ outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
+ outw(MC_SETUP, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+
+ for (i = 0; i < dev->mc_count; i++)
+ {
+ eaddrs=(unsigned short *)dmi->dmi_addr;
+ dmi=dmi->next;
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ }
+
+ eaddrs = (unsigned short *) dev->dev_addr;
+ outw(eaddrs[0], ioaddr + IO_PORT);
+ outw(eaddrs[1], ioaddr + IO_PORT);
+ outw(eaddrs[2], ioaddr + IO_PORT);
+ outw(lp->tx_end, ioaddr + lp->xmt_bar);
+ outb(MC_SETUP, ioaddr);
+
+ /* Update the transmit queue */
+ i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+
+ if (lp->tx_start != lp->tx_end)
+ {
+ /* update the next address and the chain bit in the
+ last packet */
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(i, ioaddr + IO_PORT);
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+ lp->tx_end = i ;
+ }
+ else {
+ lp->tx_start = lp->tx_end = i ;
+ }
+
+ /* Acknowledge that the MC setup is done */
+ do { /* We should be doing this in the eepro_interrupt()! */
+ SLOW_DOWN;
+ SLOW_DOWN;
+ if (inb(ioaddr + STATUS_REG) & 0x08)
+ {
+ i = inb(ioaddr);
+ outb(0x08, ioaddr + STATUS_REG);
+
+ if (i & 0x20) { /* command ABORTed */
+ printk(KERN_NOTICE "%s: multicast setup failed.\n",
+ dev->name);
+ break;
+ } else if ((i & 0x0f) == 0x03) { /* MC-Done */
+ printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
+ dev->name, dev->mc_count,
+ dev->mc_count > 1 ? "es":"");
+ break;
+ }
+ }
+ } while (++boguscount < 100);
+
+ /* Re-enable RX and TX interrupts */
+ eepro_en_int(ioaddr);
+ }
+ if (lp->eepro == LAN595FX_10ISA) {
+ eepro_complete_selreset(ioaddr);
+ }
+ else
+ eepro_en_rx(ioaddr);
+}
+
+/* The horrible routine to read a word from the serial EEPROM. */
+/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
+
+/* The delay between EEPROM clock transitions. */
+#define eeprom_delay() { udelay(40); }
+#define EE_READ_CMD (6 << 6)
+
+int
+read_eeprom(int ioaddr, int location, struct net_device *dev)
+{
+ int i;
+ unsigned short retval = 0;
+ struct eepro_local *lp = netdev_priv(dev);
+ short ee_addr = ioaddr + lp->eeprom_reg;
+ int read_cmd = location | EE_READ_CMD;
+ short ctrl_val = EECS ;
+
+ /* XXXX - black magic */
+ eepro_sw2bank1(ioaddr);
+ outb(0x00, ioaddr + STATUS_REG);
+ /* XXXX - black magic */
+
+ eepro_sw2bank2(ioaddr);
+ outb(ctrl_val, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 8; i >= 0; i--) {
+ short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
+ : ctrl_val;
+ outb(outval, ee_addr);
+ outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay();
+ }
+ outb(ctrl_val, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(ctrl_val | EESK, ee_addr); eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
+ outb(ctrl_val, ee_addr); eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ ctrl_val &= ~EECS;
+ outb(ctrl_val | EESK, ee_addr);
+ eeprom_delay();
+ outb(ctrl_val, ee_addr);
+ eeprom_delay();
+ eepro_sw2bank0(ioaddr);
+ return retval;
+}
+
+static int
+hardware_send_packet(struct net_device *dev, void *buf, short length)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ unsigned status, tx_available, last, end;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
+
+ /* determine how much of the transmit buffer space is available */
+ if (lp->tx_end > lp->tx_start)
+ tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
+ else if (lp->tx_end < lp->tx_start)
+ tx_available = lp->tx_start - lp->tx_end;
+ else tx_available = lp->xmt_ram;
+
+ if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
+ /* No space available ??? */
+ return 1;
+ }
+
+ last = lp->tx_end;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+
+ if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
+ if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
+ /* Arrrr!!!, must keep the xmt header together,
+ several days were lost to chase this one down. */
+ last = lp->xmt_lower_limit;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+ }
+ else end = lp->xmt_lower_limit + (end -
+ lp->xmt_upper_limit + 2);
+ }
+
+ outw(last, ioaddr + HOST_ADDRESS_REG);
+ outw(XMT_CMD, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(end, ioaddr + IO_PORT);
+ outw(length, ioaddr + IO_PORT);
+
+ if (lp->version == LAN595)
+ outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ /* A dummy read to flush the DRAM write pipeline */
+ status = inw(ioaddr + IO_PORT);
+
+ if (lp->tx_start == lp->tx_end) {
+ outw(last, ioaddr + lp->xmt_bar);
+ outb(XMT_CMD, ioaddr);
+ lp->tx_start = last; /* I don't like to change tx_start here */
+ }
+ else {
+ /* update the next address and the chain bit in the
+ last packet */
+
+ if (lp->tx_end != last) {
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(last, ioaddr + IO_PORT);
+ }
+
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+
+ /* Continue the transmit command */
+ outb(RESUME_XMT_CMD, ioaddr);
+ }
+
+ lp->tx_last = last;
+ lp->tx_end = end;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name);
+
+ return 0;
+}
+
+static void
+eepro_rx(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ short boguscount = 20;
+ short rcv_car = lp->rx_start;
+ unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name);
+
+ /* Set the read pointer to the start of the RCV */
+ outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
+
+ rcv_event = inw(ioaddr + IO_PORT);
+
+ while (rcv_event == RCV_DONE) {
+
+ rcv_status = inw(ioaddr + IO_PORT);
+ rcv_next_frame = inw(ioaddr + IO_PORT);
+ rcv_size = inw(ioaddr + IO_PORT);
+
+ if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
+
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ lp->stats.rx_bytes+=rcv_size;
+ rcv_size &= 0x3fff;
+ skb = dev_alloc_skb(rcv_size+5);
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
+ lp->rx_start = rcv_next_frame;
+ outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
+
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ if (lp->version == LAN595)
+ insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size),
+ (rcv_size + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
+
+ else { /* Not sure will ever reach here,
+ I set the 595 to discard bad received frames */
+ lp->stats.rx_errors++;
+
+ if (rcv_status & 0x0100)
+ lp->stats.rx_over_errors++;
+
+ else if (rcv_status & 0x0400)
+ lp->stats.rx_frame_errors++;
+
+ else if (rcv_status & 0x0800)
+ lp->stats.rx_crc_errors++;
+
+ printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
+ dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
+ }
+
+ if (rcv_status & 0x1000)
+ lp->stats.rx_length_errors++;
+
+ rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
+ lp->rx_start = rcv_next_frame;
+
+ if (--boguscount == 0)
+ break;
+
+ outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
+ rcv_event = inw(ioaddr + IO_PORT);
+
+ }
+ if (rcv_car == 0)
+ rcv_car = lp->rcv_upper_limit | 0xff;
+
+ outw(rcv_car - 1, ioaddr + RCV_STOP);
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name);
+}
+
+static void
+eepro_transmit_interrupt(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ short boguscount = 25;
+ short xmt_status;
+
+ while ((lp->tx_start != lp->tx_end) && boguscount--) {
+
+ outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
+ xmt_status = inw(ioaddr+IO_PORT);
+
+ if (!(xmt_status & TX_DONE_BIT))
+ break;
+
+ xmt_status = inw(ioaddr+IO_PORT);
+ lp->tx_start = inw(ioaddr+IO_PORT);
+
+ netif_wake_queue (dev);
+
+ if (xmt_status & TX_OK)
+ lp->stats.tx_packets++;
+ else {
+ lp->stats.tx_errors++;
+ if (xmt_status & 0x0400) {
+ lp->stats.tx_carrier_errors++;
+ printk(KERN_DEBUG "%s: carrier error\n",
+ dev->name);
+ printk(KERN_DEBUG "%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ }
+ else {
+ printk(KERN_DEBUG "%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ printk(KERN_DEBUG "%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ }
+ }
+ if (xmt_status & 0x000f) {
+ lp->stats.collisions += (xmt_status & 0x000f);
+ }
+
+ if ((xmt_status & 0x0040) == 0x0) {
+ lp->stats.tx_heartbeat_errors++;
+ }
+ }
+}
+
+static int eepro_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_Autoneg;
+ cmd->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_Autoneg;
+
+ if (GetBit(lp->word[5], ee_PortTPE)) {
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ }
+ if (GetBit(lp->word[5], ee_PortBNC)) {
+ cmd->supported |= SUPPORTED_BNC;
+ cmd->advertising |= ADVERTISED_BNC;
+ }
+ if (GetBit(lp->word[5], ee_PortAUI)) {
+ cmd->supported |= SUPPORTED_AUI;
+ cmd->advertising |= ADVERTISED_AUI;
+ }
+
+ cmd->speed = SPEED_10;
+
+ if (dev->if_port == TPE && lp->word[1] & ee_Duplex) {
+ cmd->duplex = DUPLEX_FULL;
+ }
+ else {
+ cmd->duplex = DUPLEX_HALF;
+ }
+
+ cmd->port = dev->if_port;
+ cmd->phy_address = dev->base_addr;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ if (lp->word[0] & ee_AutoNeg) {
+ cmd->autoneg = 1;
+ }
+
+ return 0;
+}
+
+static void eepro_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strcpy(drvinfo->driver, DRV_NAME);
+ strcpy(drvinfo->version, DRV_VERSION);
+ sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static struct ethtool_ops eepro_ethtool_ops = {
+ .get_settings = eepro_ethtool_get_settings,
+ .get_drvinfo = eepro_ethtool_get_drvinfo,
+};
+
+#ifdef MODULE
+
+#define MAX_EEPRO 8
+static struct net_device *dev_eepro[MAX_EEPRO];
+
+static int io[MAX_EEPRO] = {
+ [0 ... MAX_EEPRO-1] = -1
+};
+static int irq[MAX_EEPRO];
+static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */
+ [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024
+};
+static int autodetect;
+
+static int n_eepro;
+/* For linux 2.1.xx */
+
+MODULE_AUTHOR("Pascal Dupuis and others");
+MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
+MODULE_LICENSE("GPL");
+
+static int num_params;
+module_param_array(io, int, &num_params, 0);
+module_param_array(irq, int, &num_params, 0);
+module_param_array(mem, int, &num_params, 0);
+module_param(autodetect, int, 0);
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
+MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
+MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
+
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int i;
+ if (io[0] == -1 && autodetect == 0) {
+ printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n");
+ printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n");
+ return -ENODEV;
+ }
+ else if (autodetect) {
+ /* if autodetect is set then we must force detection */
+ for (i = 0; i < MAX_EEPRO; i++) {
+ io[i] = 0;
+ }
+
+ printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n");
+ }
+
+ for (i = 0; io[i] != -1 && i < MAX_EEPRO; i++) {
+ dev = alloc_etherdev(sizeof(struct eepro_local));
+ if (!dev)
+ break;
+
+ dev->mem_end = mem[i];
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+
+ if (do_eepro_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_eepro[n_eepro++] = dev;
+ continue;
+ }
+ release_region(dev->base_addr, EEPRO_IO_EXTENT);
+ }
+ free_netdev(dev);
+ break;
+ }
+
+ if (n_eepro)
+ printk(KERN_INFO "%s", version);
+
+ return n_eepro ? 0 : -ENODEV;
+}
+
+void
+cleanup_module(void)
+{
+ int i;
+
+ for (i=0; i<n_eepro; i++) {
+ struct net_device *dev = dev_eepro[i];
+ unregister_netdev(dev);
+ release_region(dev->base_addr, EEPRO_IO_EXTENT);
+ free_netdev(dev);
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
new file mode 100644
index 000000000000..98b3a2fdce90
--- /dev/null
+++ b/drivers/net/eepro100.c
@@ -0,0 +1,2412 @@
+/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
+/*
+ Written 1996-1999 by Donald Becker.
+
+ The driver also contains updates by different kernel developers
+ (see incomplete list below).
+ Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
+ Please use this email address and linux-kernel mailing list for bug reports.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
+ It should work with all i82557/558/559 boards.
+
+ Version history:
+ 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
+ Serious fixes for multicast filter list setting, TX timeout routine;
+ RX ring refilling logic; other stuff
+ 2000 Feb Jeff Garzik <jgarzik@pobox.com>
+ Convert to new PCI driver interface
+ 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
+ Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
+ 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
+ PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
+ 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
+ rx_align support: enables rx DMA without causing unaligned accesses.
+*/
+
+static const char *version =
+"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
+"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
+
+/* A few user-configurable values that apply to all boards.
+ First set is undocumented and spelled per Intel recommendations. */
+
+static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
+static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
+static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
+/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
+static int txdmacount = 128;
+static int rxdmacount /* = 0 */;
+
+#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
+ defined(__arm__)
+ /* align rx buffers to 2 bytes so that IP header is aligned */
+# define rx_align(skb) skb_reserve((skb), 2)
+# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
+#else
+# define rx_align(skb)
+# define RxFD_ALIGNMENT
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
+ Lower values use more memory, but are faster. */
+static int rx_copybreak = 200;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
+static int multicast_filter_limit = 64;
+
+/* 'options' is used to pass a transceiver override or full-duplex flag
+ e.g. "options=16" for FD, "options=32" for 100mbps-only. */
+static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* A few values that may be tweaked. */
+/* The ring sizes should be a power of two for efficiency. */
+#define TX_RING_SIZE 64
+#define RX_RING_SIZE 64
+/* How much slots multicast filter setup may take.
+ Do not descrease without changing set_rx_mode() implementaion. */
+#define TX_MULTICAST_SIZE 2
+#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
+/* Actual number of TX packets queued, must be
+ <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
+#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
+/* Hysteresis marking queue as no longer full. */
+#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
+#define PKT_BUF_SZ 1536
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+
+static int use_io;
+static int debug = -1;
+#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
+
+
+MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
+MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
+MODULE_LICENSE("GPL");
+module_param(use_io, int, 0);
+module_param(debug, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(congenb, int, 0);
+module_param(txfifo, int, 0);
+module_param(rxfifo, int, 0);
+module_param(txdmacount, int, 0);
+module_param(rxdmacount, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param(multicast_filter_limit, int, 0);
+MODULE_PARM_DESC(debug, "debug level (0-6)");
+MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
+MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
+MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
+
+#define RUN_AT(x) (jiffies + (x))
+
+#define netdevice_start(dev)
+#define netdevice_stop(dev)
+#define netif_set_tx_timeout(dev, tf, tm) \
+ do { \
+ (dev)->tx_timeout = (tf); \
+ (dev)->watchdog_timeo = (tm); \
+ } while(0)
+
+
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
+single-chip fast Ethernet controller for PCI, as used on the Intel
+EtherExpress Pro 100 adapter.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line. While it's
+possible to share PCI interrupt lines, it negatively impacts performance and
+only recent kernels support it.
+
+III. Driver operation
+
+IIIA. General
+The Speedo3 is very similar to other Intel network chips, that is to say
+"apparently designed on a different planet". This chips retains the complex
+Rx and Tx descriptors and multiple buffers pointers as previous chips, but
+also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
+Tx mode, but in a simplified lower-overhead manner: it associates only a
+single buffer descriptor with each frame descriptor.
+
+Despite the extra space overhead in each receive skbuff, the driver must use
+the simplified Rx buffer mode to assure that only a single data buffer is
+associated with each RxFD. The driver implements this by reserving space
+for the Rx descriptor at the head of each Rx skbuff.
+
+The Speedo-3 has receive and command unit base addresses that are added to
+almost all descriptor pointers. The driver sets these to zero, so that all
+pointer fields are absolute addresses.
+
+The System Control Block (SCB) of some previous Intel chips exists on the
+chip in both PCI I/O and memory space. This driver uses the I/O space
+registers, but might switch to memory mapped mode to better support non-x86
+processors.
+
+IIIB. Transmit structure
+
+The driver must use the complex Tx command+descriptor mode in order to
+have a indirect pointer to the skbuff data section. Each Tx command block
+(TxCB) is associated with two immediately appended Tx Buffer Descriptor
+(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
+speedo_private data structure for each adapter instance.
+
+The newer i82558 explicitly supports this structure, and can read the two
+TxBDs in the same PCI burst as the TxCB.
+
+This ring structure is used for all normal transmit packets, but the
+transmit packet descriptors aren't long enough for most non-Tx commands such
+as CmdConfigure. This is complicated by the possibility that the chip has
+already loaded the link address in the previous descriptor. So for these
+commands we convert the next free descriptor on the ring to a NoOp, and point
+that descriptor's link to the complex command.
+
+An additional complexity of these non-transmit commands are that they may be
+added asynchronous to the normal transmit queue, so we disable interrupts
+whenever the Tx descriptor ring is manipulated.
+
+A notable aspect of these special configure commands is that they do
+work with the normal Tx ring entry scavenge method. The Tx ring scavenge
+is done at interrupt time using the 'dirty_tx' index, and checking for the
+command-complete bit. While the setup frames may have the NoOp command on the
+Tx ring marked as complete, but not have completed the setup command, this
+is not a problem. The tx_ring entry can be still safely reused, as the
+tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
+
+Commands may have bits set e.g. CmdSuspend in the command word to either
+suspend or stop the transmit/command unit. This driver always flags the last
+command with CmdSuspend, erases the CmdSuspend in the previous command, and
+then issues a CU_RESUME.
+Note: Watch out for the potential race condition here: imagine
+ erasing the previous suspend
+ the chip processes the previous command
+ the chip processes the final command, and suspends
+ doing the CU_RESUME
+ the chip processes the next-yet-valid post-final-command.
+So blindly sending a CU_RESUME is only safe if we do it immediately after
+after erasing the previous CmdSuspend, without the possibility of an
+intervening delay. Thus the resume command is always within the
+interrupts-disabled region. This is a timing dependence, but handling this
+condition in a timing-independent way would considerably complicate the code.
+
+Note: In previous generation Intel chips, restarting the command unit was a
+notoriously slow process. This is presumably no longer true.
+
+IIIC. Receive structure
+
+Because of the bus-master support on the Speedo3 this driver uses the new
+SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
+This scheme allocates full-sized skbuffs as receive buffers. The value
+SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
+trade-off the memory wasted by passing the full-sized skbuff to the queue
+layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+For small frames the copying cost is negligible (esp. considering that we
+are pre-loading the cache with immediately useful header information), so we
+allocate a new, minimally-sized skbuff. For large frames the copying cost
+is non-trivial, and the larger copy might flush the cache of useful data, so
+we pass up the skbuff the packet was received into.
+
+IV. Notes
+
+Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
+that stated that I could disclose the information. But I still resent
+having to sign an Intel NDA when I'm helping Intel sell their own product!
+
+*/
+
+static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
+
+enum pci_flags_bit {
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+/* Offsets to the various registers.
+ All accesses need not be longword aligned. */
+enum speedo_offsets {
+ SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
+ SCBIntmask = 3,
+ SCBPointer = 4, /* General purpose pointer. */
+ SCBPort = 8, /* Misc. commands and operands. */
+ SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
+ SCBCtrlMDI = 16, /* MDI interface control. */
+ SCBEarlyRx = 20, /* Early receive byte count. */
+};
+/* Commands that can be put in a command list entry. */
+enum commands {
+ CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
+ CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
+ CmdDump = 0x60000, CmdDiagnose = 0x70000,
+ CmdSuspend = 0x40000000, /* Suspend after completion. */
+ CmdIntr = 0x20000000, /* Interrupt after completion. */
+ CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
+};
+/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
+ status bits. Previous driver versions used separate 16 bit fields for
+ commands and statuses. --SAW
+ */
+#if defined(__alpha__)
+# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
+#else
+# if defined(__LITTLE_ENDIAN)
+# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
+# elif defined(__BIG_ENDIAN)
+# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
+# else
+# error Unsupported byteorder
+# endif
+#endif
+
+enum SCBCmdBits {
+ SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
+ SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
+ SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
+ /* The rest are Rx and Tx commands. */
+ CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
+ CUCmdBase=0x0060, /* CU Base address (set to zero) . */
+ CUDumpStats=0x0070, /* Dump then reset stats counters. */
+ RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
+ RxResumeNoResources=0x0007,
+};
+
+enum SCBPort_cmds {
+ PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
+};
+
+/* The Speedo3 Rx and Tx frame/buffer descriptors. */
+struct descriptor { /* A generic descriptor. */
+ volatile s32 cmd_status; /* All command and status fields. */
+ u32 link; /* struct descriptor * */
+ unsigned char params[0];
+};
+
+/* The Speedo3 Rx and Tx buffer descriptors. */
+struct RxFD { /* Receive frame descriptor. */
+ volatile s32 status;
+ u32 link; /* struct RxFD * */
+ u32 rx_buf_addr; /* void * */
+ u32 count;
+} RxFD_ALIGNMENT;
+
+/* Selected elements of the Tx/RxFD.status word. */
+enum RxFD_bits {
+ RxComplete=0x8000, RxOK=0x2000,
+ RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
+ RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
+ TxUnderrun=0x1000, StatusComplete=0x8000,
+};
+
+#define CONFIG_DATA_SIZE 22
+struct TxFD { /* Transmit frame descriptor set. */
+ s32 status;
+ u32 link; /* void * */
+ u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
+ s32 count; /* # of TBD (=1), Tx start thresh., etc. */
+ /* This constitutes two "TBD" entries -- we only use one. */
+#define TX_DESCR_BUF_OFFSET 16
+ u32 tx_buf_addr0; /* void *, frame to be transmitted. */
+ s32 tx_buf_size0; /* Length of Tx frame. */
+ u32 tx_buf_addr1; /* void *, frame to be transmitted. */
+ s32 tx_buf_size1; /* Length of Tx frame. */
+ /* the structure must have space for at least CONFIG_DATA_SIZE starting
+ * from tx_desc_addr field */
+};
+
+/* Multicast filter setting block. --SAW */
+struct speedo_mc_block {
+ struct speedo_mc_block *next;
+ unsigned int tx;
+ dma_addr_t frame_dma;
+ unsigned int len;
+ struct descriptor frame __attribute__ ((__aligned__(16)));
+};
+
+/* Elements of the dump_statistics block. This block must be lword aligned. */
+struct speedo_stats {
+ u32 tx_good_frames;
+ u32 tx_coll16_errs;
+ u32 tx_late_colls;
+ u32 tx_underruns;
+ u32 tx_lost_carrier;
+ u32 tx_deferred;
+ u32 tx_one_colls;
+ u32 tx_multi_colls;
+ u32 tx_total_colls;
+ u32 rx_good_frames;
+ u32 rx_crc_errs;
+ u32 rx_align_errs;
+ u32 rx_resource_errs;
+ u32 rx_overrun_errs;
+ u32 rx_colls_errs;
+ u32 rx_runt_errs;
+ u32 done_marker;
+};
+
+enum Rx_ring_state_bits {
+ RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
+};
+
+/* Do not change the position (alignment) of the first few elements!
+ The later elements are grouped for cache locality.
+
+ Unfortunately, all the positions have been shifted since there.
+ A new re-alignment is required. 2000/03/06 SAW */
+struct speedo_private {
+ void __iomem *regs;
+ struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
+ struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
+ /* The addresses of a Tx/Rx-in-place packets/buffers. */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ /* Mapped addresses of the rings. */
+ dma_addr_t tx_ring_dma;
+#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
+ dma_addr_t rx_ring_dma[RX_RING_SIZE];
+ struct descriptor *last_cmd; /* Last command sent. */
+ unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ u32 tx_threshold; /* The value for txdesc.count. */
+ struct RxFD *last_rxf; /* Last filled RX buffer. */
+ dma_addr_t last_rxf_dma;
+ unsigned int cur_rx, dirty_rx; /* The next free ring entry */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ struct net_device_stats stats;
+ struct speedo_stats *lstats;
+ dma_addr_t lstats_dma;
+ int chip_id;
+ struct pci_dev *pdev;
+ struct timer_list timer; /* Media selection timer. */
+ struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
+ struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
+ long in_interrupt; /* Word-aligned dev->interrupt */
+ unsigned char acpi_pwr;
+ signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
+ unsigned int rx_bug:1; /* Work around receiver hang errata. */
+ unsigned char default_port:8; /* Last dev->if_port value. */
+ unsigned char rx_ring_state; /* RX ring status flags. */
+ unsigned short phy[2]; /* PHY media interfaces available. */
+ unsigned short partner; /* Link partner caps. */
+ struct mii_if_info mii_if; /* MII API hooks, info */
+ u32 msg_enable; /* debug message level */
+};
+
+/* The parameters for a CmdConfigure operation.
+ There are so many options that it would be difficult to document each bit.
+ We mostly use the default or recommended settings. */
+static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
+ 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
+ 0, 0x2E, 0, 0x60, 0,
+ 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
+ 0x3f, 0x05, };
+static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
+ 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
+ 0, 0x2E, 0, 0x60, 0x08, 0x88,
+ 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
+ 0x31, 0x05, };
+
+/* PHY media interface chips. */
+static const char *phys[] = {
+ "None", "i82553-A/B", "i82553-C", "i82503",
+ "DP83840", "80c240", "80c24", "i82555",
+ "unknown-8", "unknown-9", "DP83840A", "unknown-11",
+ "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
+enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
+ S80C24, I82555, DP83840A=10, };
+static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
+#define EE_READ_CMD (6)
+
+static int eepro100_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int speedo_open(struct net_device *dev);
+static void speedo_resume(struct net_device *dev);
+static void speedo_timer(unsigned long data);
+static void speedo_init_rx_ring(struct net_device *dev);
+static void speedo_tx_timeout(struct net_device *dev);
+static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void speedo_refill_rx_buffers(struct net_device *dev, int force);
+static int speedo_rx(struct net_device *dev);
+static void speedo_tx_buffer_gc(struct net_device *dev);
+static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int speedo_close(struct net_device *dev);
+static struct net_device_stats *speedo_get_stats(struct net_device *dev);
+static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+static void speedo_show_state(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
+
+
+
+#ifdef honor_default_port
+/* Optional driver feature to allow forcing the transceiver setting.
+ Not recommended. */
+static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
+ 0x2000, 0x2100, 0x0400, 0x3100};
+#endif
+
+/* How to wait for the command unit to accept a command.
+ Typically this takes 0 ticks. */
+static inline unsigned char wait_for_cmd_done(struct net_device *dev,
+ struct speedo_private *sp)
+{
+ int wait = 1000;
+ void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
+ unsigned char r;
+
+ do {
+ udelay(1);
+ r = ioread8(cmd_ioaddr);
+ } while(r && --wait >= 0);
+
+ if (wait < 0)
+ printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
+ return r;
+}
+
+static int __devinit eepro100_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ void __iomem *ioaddr;
+ int irq, pci_bar;
+ int acpi_idle_state = 0, pm;
+ static int cards_found /* = 0 */;
+ unsigned long pci_base;
+
+#ifndef MODULE
+ /* when built-in, we only print version if device is found */
+ static int did_version;
+ if (did_version++ == 0)
+ printk(version);
+#endif
+
+ /* save power state before pci_enable_device overwrites it */
+ pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm) {
+ u16 pwr_command;
+ pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
+ acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
+ }
+
+ if (pci_enable_device(pdev))
+ goto err_out_free_mmio_region;
+
+ pci_set_master(pdev);
+
+ if (!request_region(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1), "eepro100")) {
+ printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
+ goto err_out_none;
+ }
+ if (!request_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0), "eepro100")) {
+ printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
+ goto err_out_free_pio_region;
+ }
+
+ irq = pdev->irq;
+ pci_bar = use_io ? 1 : 0;
+ pci_base = pci_resource_start(pdev, pci_bar);
+ if (DEBUG & NETIF_MSG_PROBE)
+ printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
+ pci_base, irq);
+
+ ioaddr = pci_iomap(pdev, pci_bar, 0);
+ if (!ioaddr) {
+ printk (KERN_ERR "eepro100: cannot remap IO\n");
+ goto err_out_free_mmio_region;
+ }
+
+ if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
+ cards_found++;
+ else
+ goto err_out_iounmap;
+
+ return 0;
+
+err_out_iounmap: ;
+ pci_iounmap(pdev, ioaddr);
+err_out_free_mmio_region:
+ release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+err_out_free_pio_region:
+ release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+err_out_none:
+ return -ENODEV;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void poll_speedo (struct net_device *dev)
+{
+ /* disable_irq is not very nice, but with the funny lockless design
+ we have no other choice. */
+ disable_irq(dev->irq);
+ speedo_interrupt (dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int __devinit speedo_found1(struct pci_dev *pdev,
+ void __iomem *ioaddr, int card_idx, int acpi_idle_state)
+{
+ struct net_device *dev;
+ struct speedo_private *sp;
+ const char *product;
+ int i, option;
+ u16 eeprom[0x100];
+ int size;
+ void *tx_ring_space;
+ dma_addr_t tx_ring_dma;
+
+ size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
+ tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
+ if (tx_ring_space == NULL)
+ return -1;
+
+ dev = alloc_etherdev(sizeof(struct speedo_private));
+ if (dev == NULL) {
+ printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
+ pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
+ return -1;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (dev->mem_start > 0)
+ option = dev->mem_start;
+ else if (card_idx >= 0 && options[card_idx] >= 0)
+ option = options[card_idx];
+ else
+ option = 0;
+
+ rtnl_lock();
+ if (dev_alloc_name(dev, dev->name) < 0)
+ goto err_free_unlock;
+
+ /* Read the station address EEPROM before doing the reset.
+ Nominally his should even be done before accepting the device, but
+ then we wouldn't have a device name with which to report the error.
+ The size test is for 6 bit vs. 8 bit address serial EEPROMs.
+ */
+ {
+ void __iomem *iobase;
+ int read_cmd, ee_size;
+ u16 sum;
+ int j;
+
+ /* Use IO only to avoid postponed writes and satisfy EEPROM timing
+ requirements. */
+ iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!iobase)
+ goto err_free_unlock;
+ if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
+ == 0xffe0000) {
+ ee_size = 0x100;
+ read_cmd = EE_READ_CMD << 24;
+ } else {
+ ee_size = 0x40;
+ read_cmd = EE_READ_CMD << 22;
+ }
+
+ for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
+ u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
+ eeprom[i] = value;
+ sum += value;
+ if (i < 3) {
+ dev->dev_addr[j++] = value;
+ dev->dev_addr[j++] = value >> 8;
+ }
+ }
+ if (sum != 0xBABA)
+ printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
+ "check settings before activating this device!\n",
+ dev->name, sum);
+ /* Don't unregister_netdev(dev); as the EEPro may actually be
+ usable, especially if the MAC address is set later.
+ On the other hand, it may be unusable if MDI data is corrupted. */
+
+ pci_iounmap(pdev, iobase);
+ }
+
+ /* Reset the chip: stop Tx and Rx processes and clear counters.
+ This takes less than 10usec and will easily finish before the next
+ action. */
+ iowrite32(PortReset, ioaddr + SCBPort);
+ ioread32(ioaddr + SCBPort);
+ udelay(10);
+
+ if (eeprom[3] & 0x0100)
+ product = "OEM i82557/i82558 10/100 Ethernet";
+ else
+ product = pci_name(pdev);
+
+ printk(KERN_INFO "%s: %s, ", dev->name, product);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2X:", dev->dev_addr[i]);
+ printk("%2.2X, ", dev->dev_addr[i]);
+ printk("IRQ %d.\n", pdev->irq);
+
+ sp = netdev_priv(dev);
+
+ /* we must initialize this early, for mdio_{read,write} */
+ sp->regs = ioaddr;
+
+#if 1 || defined(kernel_bloat)
+ /* OK, this is pure kernel bloat. I don't like it when other drivers
+ waste non-pageable kernel space to emit similar messages, but I need
+ them for bug reports. */
+ {
+ const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
+ /* The self-test results must be paragraph aligned. */
+ volatile s32 *self_test_results;
+ int boguscnt = 16000; /* Timeout for set-test. */
+ if ((eeprom[3] & 0x03) != 0x03)
+ printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
+ " work-around.\n");
+ printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
+ " connectors present:",
+ eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
+ for (i = 0; i < 4; i++)
+ if (eeprom[5] & (1<<i))
+ printk(connectors[i]);
+ printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
+ phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
+ if (eeprom[7] & 0x0700)
+ printk(KERN_INFO " Secondary interface chip %s.\n",
+ phys[(eeprom[7]>>8)&7]);
+ if (((eeprom[6]>>8) & 0x3f) == DP83840
+ || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
+ int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
+ if (congenb)
+ mdi_reg23 |= 0x0100;
+ printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
+ mdi_reg23);
+ mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
+ }
+ if ((option >= 0) && (option & 0x70)) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x20 ? 100 : 10),
+ (option & 0x10 ? "full" : "half"));
+ mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
+ ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
+ ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ /* Perform a system self-test. */
+ self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
+ self_test_results[0] = 0;
+ self_test_results[1] = -1;
+ iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
+ do {
+ udelay(10);
+ } while (self_test_results[1] == -1 && --boguscnt >= 0);
+
+ if (boguscnt < 0) { /* Test optimized out. */
+ printk(KERN_ERR "Self test failed, status %8.8x:\n"
+ KERN_ERR " Failure to initialize the i82557.\n"
+ KERN_ERR " Verify that the card is a bus-master"
+ " capable slot.\n",
+ self_test_results[1]);
+ } else
+ printk(KERN_INFO " General self-test: %s.\n"
+ KERN_INFO " Serial sub-system self-test: %s.\n"
+ KERN_INFO " Internal registers self-test: %s.\n"
+ KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
+ self_test_results[1] & 0x1000 ? "failed" : "passed",
+ self_test_results[1] & 0x0020 ? "failed" : "passed",
+ self_test_results[1] & 0x0008 ? "failed" : "passed",
+ self_test_results[1] & 0x0004 ? "failed" : "passed",
+ self_test_results[0]);
+ }
+#endif /* kernel_bloat */
+
+ iowrite32(PortReset, ioaddr + SCBPort);
+ ioread32(ioaddr + SCBPort);
+ udelay(10);
+
+ /* Return the chip to its original power state. */
+ pci_set_power_state(pdev, acpi_idle_state);
+
+ pci_set_drvdata (pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ dev->irq = pdev->irq;
+
+ sp->pdev = pdev;
+ sp->msg_enable = DEBUG;
+ sp->acpi_pwr = acpi_idle_state;
+ sp->tx_ring = tx_ring_space;
+ sp->tx_ring_dma = tx_ring_dma;
+ sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
+ sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
+ init_timer(&sp->timer); /* used in ioctl() */
+ spin_lock_init(&sp->lock);
+
+ sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
+ if (card_idx >= 0) {
+ if (full_duplex[card_idx] >= 0)
+ sp->mii_if.full_duplex = full_duplex[card_idx];
+ }
+ sp->default_port = option >= 0 ? (option & 0x0f) : 0;
+
+ sp->phy[0] = eeprom[6];
+ sp->phy[1] = eeprom[7];
+
+ sp->mii_if.phy_id = eeprom[6] & 0x1f;
+ sp->mii_if.phy_id_mask = 0x1f;
+ sp->mii_if.reg_num_mask = 0x1f;
+ sp->mii_if.dev = dev;
+ sp->mii_if.mdio_read = mdio_read;
+ sp->mii_if.mdio_write = mdio_write;
+
+ sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
+ if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
+ || (pdev->device == 0x2449) || (pdev->device == 0x2459)
+ || (pdev->device == 0x245D)) {
+ sp->chip_id = 1;
+ }
+
+ if (sp->rx_bug)
+ printk(KERN_INFO " Receiver lock-up workaround activated.\n");
+
+ /* The Speedo-specific entries in the device structure. */
+ dev->open = &speedo_open;
+ dev->hard_start_xmit = &speedo_start_xmit;
+ netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
+ dev->stop = &speedo_close;
+ dev->get_stats = &speedo_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &speedo_ioctl;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &poll_speedo;
+#endif
+
+ if (register_netdevice(dev))
+ goto err_free_unlock;
+ rtnl_unlock();
+
+ return 0;
+
+ err_free_unlock:
+ rtnl_unlock();
+ free_netdev(dev);
+ return -1;
+}
+
+static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
+{
+ void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
+ int wait = 0;
+ do
+ if (ioread8(cmd_ioaddr) == 0) break;
+ while(++wait <= 200);
+ if (wait > 100)
+ printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
+ ioread8(cmd_ioaddr), wait);
+
+ iowrite8(cmd, cmd_ioaddr);
+
+ for (wait = 0; wait <= 100; wait++)
+ if (ioread8(cmd_ioaddr) == 0) return;
+ for (; wait <= 20000; wait++)
+ if (ioread8(cmd_ioaddr) == 0) return;
+ else udelay(1);
+ printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
+ " Current status %8.8x.\n",
+ cmd, wait, ioread32(sp->regs + SCBStatus));
+}
+
+/* Serial EEPROM section.
+ A "bit" grungy, but we work our way through bit-by-bit :->. */
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+#define EE_ENB (0x4800 | EE_CS)
+#define EE_WRITE_0 0x4802
+#define EE_WRITE_1 0x4806
+#define EE_OFFSET SCBeeprom
+
+/* The fixes for the code were kindly provided by Dragan Stancevic
+ <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
+ access timing.
+ The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
+ interval for serial EEPROM. However, it looks like that there is an
+ additional requirement dictating larger udelay's in the code below.
+ 2000/05/24 SAW */
+static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
+{
+ unsigned retval = 0;
+ void __iomem *ee_addr = ioaddr + SCBeeprom;
+
+ iowrite16(EE_ENB, ee_addr); udelay(2);
+ iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
+
+ /* Shift the command bits out. */
+ do {
+ short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
+ iowrite16(dataval, ee_addr); udelay(2);
+ iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
+ retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ } while (--cmd_len >= 0);
+ iowrite16(EE_ENB, ee_addr); udelay(2);
+
+ /* Terminate the EEPROM access. */
+ iowrite16(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+ iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+ do {
+ val = ioread32(ioaddr + SCBCtrlMDI);
+ if (--boguscnt < 0) {
+ printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
+ break;
+ }
+ } while (! (val & 0x10000000));
+ return val & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+ iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
+ ioaddr + SCBCtrlMDI);
+ do {
+ val = ioread32(ioaddr + SCBCtrlMDI);
+ if (--boguscnt < 0) {
+ printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
+ break;
+ }
+ } while (! (val & 0x10000000));
+}
+
+static int
+speedo_open(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ int retval;
+
+ if (netif_msg_ifup(sp))
+ printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
+
+ pci_set_power_state(sp->pdev, PCI_D0);
+
+ /* Set up the Tx queue early.. */
+ sp->cur_tx = 0;
+ sp->dirty_tx = 0;
+ sp->last_cmd = NULL;
+ sp->tx_full = 0;
+ sp->in_interrupt = 0;
+
+ /* .. we can safely take handler calls during init. */
+ retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval) {
+ return retval;
+ }
+
+ dev->if_port = sp->default_port;
+
+#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
+ /* Retrigger negotiation to reset previous errors. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+ int phy_addr = sp->phy[0] & 0x1f ;
+ /* Use 0x3300 for restarting NWay, other values to force xcvr:
+ 0x0000 10-HD
+ 0x0100 10-FD
+ 0x2000 100-HD
+ 0x2100 100-FD
+ */
+#ifdef honor_default_port
+ mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
+#else
+ mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
+#endif
+ }
+#endif
+
+ speedo_init_rx_ring(dev);
+
+ /* Fire up the hardware. */
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
+ speedo_resume(dev);
+
+ netdevice_start(dev);
+ netif_start_queue(dev);
+
+ /* Setup the chip and configure the multicast list. */
+ sp->mc_setup_head = NULL;
+ sp->mc_setup_tail = NULL;
+ sp->flow_ctrl = sp->partner = 0;
+ sp->rx_mode = -1; /* Invalid -> always reset the mode. */
+ set_rx_mode(dev);
+ if ((sp->phy[0] & 0x8000) == 0)
+ sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
+
+ mii_check_link(&sp->mii_if);
+
+ if (netif_msg_ifup(sp)) {
+ printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
+ dev->name, ioread16(ioaddr + SCBStatus));
+ }
+
+ /* Set the timer. The timer serves a dual purpose:
+ 1) to monitor the media interface (e.g. link beat) and perhaps switch
+ to an alternate media type
+ 2) to monitor Rx activity, and restart the Rx process if the receiver
+ hangs. */
+ sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+ sp->timer.data = (unsigned long)dev;
+ sp->timer.function = &speedo_timer; /* timer handler */
+ add_timer(&sp->timer);
+
+ /* No need to wait for the command unit to accept here. */
+ if ((sp->phy[0] & 0x8000) == 0)
+ mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
+
+ return 0;
+}
+
+/* Start the chip hardware after a full reset. */
+static void speedo_resume(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+
+ /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
+ sp->tx_threshold = 0x01208000;
+
+ /* Set the segment registers to '0'. */
+ if (wait_for_cmd_done(dev, sp) != 0) {
+ iowrite32(PortPartialReset, ioaddr + SCBPort);
+ udelay(10);
+ }
+
+ iowrite32(0, ioaddr + SCBPointer);
+ ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
+ udelay(10); /* Bogus, but it avoids the bug. */
+
+ /* Note: these next two operations can take a while. */
+ do_slow_command(dev, sp, RxAddrLoad);
+ do_slow_command(dev, sp, CUCmdBase);
+
+ /* Load the statistics block and rx ring addresses. */
+ iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
+ ioread32(ioaddr + SCBPointer); /* Flush to PCI */
+
+ iowrite8(CUStatsAddr, ioaddr + SCBCmd);
+ sp->lstats->done_marker = 0;
+ wait_for_cmd_done(dev, sp);
+
+ if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
+ if (netif_msg_rx_err(sp))
+ printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
+ dev->name);
+ } else {
+ iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+ ioaddr + SCBPointer);
+ ioread32(ioaddr + SCBPointer); /* Flush to PCI */
+ }
+
+ /* Note: RxStart should complete instantly. */
+ do_slow_command(dev, sp, RxStart);
+ do_slow_command(dev, sp, CUDumpStats);
+
+ /* Fill the first command with our physical address. */
+ {
+ struct descriptor *ias_cmd;
+
+ ias_cmd =
+ (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
+ /* Avoid a bug(?!) here by marking the command already completed. */
+ ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
+ ias_cmd->link =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+ memcpy(ias_cmd->params, dev->dev_addr, 6);
+ if (sp->last_cmd)
+ clear_suspend(sp->last_cmd);
+ sp->last_cmd = ias_cmd;
+ }
+
+ /* Start the chip's Tx process and unmask interrupts. */
+ iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
+ ioaddr + SCBPointer);
+ /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
+ remain masked --Dragan */
+ iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
+}
+
+/*
+ * Sometimes the receiver stops making progress. This routine knows how to
+ * get it going again, without losing packets or being otherwise nasty like
+ * a chip reset would be. Previously the driver had a whole sequence
+ * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
+ * do another, etc. But those things don't really matter. Separate logic
+ * in the ISR provides for allocating buffers--the other half of operation
+ * is just making sure the receiver is active. speedo_rx_soft_reset does that.
+ * This problem with the old, more involved algorithm is shown up under
+ * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
+ */
+static void
+speedo_rx_soft_reset(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ struct RxFD *rfd;
+ void __iomem *ioaddr;
+
+ ioaddr = sp->regs;
+ if (wait_for_cmd_done(dev, sp) != 0) {
+ printk("%s: previous command stalled\n", dev->name);
+ return;
+ }
+ /*
+ * Put the hardware into a known state.
+ */
+ iowrite8(RxAbort, ioaddr + SCBCmd);
+
+ rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+
+ rfd->rx_buf_addr = 0xffffffff;
+
+ if (wait_for_cmd_done(dev, sp) != 0) {
+ printk("%s: RxAbort command stalled\n", dev->name);
+ return;
+ }
+ iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+ ioaddr + SCBPointer);
+ iowrite8(RxStart, ioaddr + SCBCmd);
+}
+
+
+/* Media monitoring and control. */
+static void speedo_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ int phy_num = sp->phy[0] & 0x1f;
+
+ /* We have MII and lost link beat. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+ int partner = mdio_read(dev, phy_num, MII_LPA);
+ if (partner != sp->partner) {
+ int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
+ if (netif_msg_link(sp)) {
+ printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
+ printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
+ dev->name, sp->partner, partner, sp->mii_if.advertising);
+ }
+ sp->partner = partner;
+ if (flow_ctrl != sp->flow_ctrl) {
+ sp->flow_ctrl = flow_ctrl;
+ sp->rx_mode = -1; /* Trigger a reload. */
+ }
+ }
+ }
+ mii_check_link(&sp->mii_if);
+ if (netif_msg_timer(sp)) {
+ printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
+ dev->name, ioread16(ioaddr + SCBStatus));
+ }
+ if (sp->rx_mode < 0 ||
+ (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
+ /* We haven't received a packet in a Long Time. We might have been
+ bitten by the receiver hang bug. This can be cleared by sending
+ a set multicast list command. */
+ if (netif_msg_timer(sp))
+ printk(KERN_DEBUG "%s: Sending a multicast list set command"
+ " from a timer routine,"
+ " m=%d, j=%ld, l=%ld.\n",
+ dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
+ set_rx_mode(dev);
+ }
+ /* We must continue to monitor the media. */
+ sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
+ add_timer(&sp->timer);
+}
+
+static void speedo_show_state(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ int i;
+
+ if (netif_msg_pktdata(sp)) {
+ printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
+ dev->name, sp->cur_tx, sp->dirty_tx);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
+ i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
+ i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
+ i, sp->tx_ring[i].status);
+
+ printk(KERN_DEBUG "%s: Printing Rx ring"
+ " (next to receive into %u, dirty index %u).\n",
+ dev->name, sp->cur_rx, sp->dirty_rx);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
+ sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
+ i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
+ i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
+ i, (sp->rx_ringp[i] != NULL) ?
+ (unsigned)sp->rx_ringp[i]->status : 0);
+ }
+
+#if 0
+ {
+ void __iomem *ioaddr = sp->regs;
+ int phy_num = sp->phy[0] & 0x1f;
+ for (i = 0; i < 16; i++) {
+ /* FIXME: what does it mean? --SAW */
+ if (i == 6) i = 21;
+ printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
+ dev->name, phy_num, i, mdio_read(dev, phy_num, i));
+ }
+ }
+#endif
+
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+speedo_init_rx_ring(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ struct RxFD *rxf, *last_rxf = NULL;
+ dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
+ int i;
+
+ sp->cur_rx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
+ /* XXX: do we really want to call this before the NULL check? --hch */
+ rx_align(skb); /* Align IP on 16 byte boundary */
+ sp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* OK. Just initially short of Rx bufs. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ rxf = (struct RxFD *)skb->tail;
+ sp->rx_ringp[i] = rxf;
+ sp->rx_ring_dma[i] =
+ pci_map_single(sp->pdev, rxf,
+ PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
+ skb_reserve(skb, sizeof(struct RxFD));
+ if (last_rxf) {
+ last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
+ pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
+ sizeof(struct RxFD), PCI_DMA_TODEVICE);
+ }
+ last_rxf = rxf;
+ last_rxf_dma = sp->rx_ring_dma[i];
+ rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
+ rxf->link = 0; /* None yet. */
+ /* This field unused by i82557. */
+ rxf->rx_buf_addr = 0xffffffff;
+ rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+ pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
+ sizeof(struct RxFD), PCI_DMA_TODEVICE);
+ }
+ sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as end-of-list. */
+ last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
+ pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
+ sizeof(struct RxFD), PCI_DMA_TODEVICE);
+ sp->last_rxf = last_rxf;
+ sp->last_rxf_dma = last_rxf_dma;
+}
+
+static void speedo_purge_tx(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ int entry;
+
+ while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
+ entry = sp->dirty_tx % TX_RING_SIZE;
+ if (sp->tx_skbuff[entry]) {
+ sp->stats.tx_errors++;
+ pci_unmap_single(sp->pdev,
+ le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
+ sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(sp->tx_skbuff[entry]);
+ sp->tx_skbuff[entry] = NULL;
+ }
+ sp->dirty_tx++;
+ }
+ while (sp->mc_setup_head != NULL) {
+ struct speedo_mc_block *t;
+ if (netif_msg_tx_err(sp))
+ printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+ pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
+ sp->mc_setup_head->len, PCI_DMA_TODEVICE);
+ t = sp->mc_setup_head->next;
+ kfree(sp->mc_setup_head);
+ sp->mc_setup_head = t;
+ }
+ sp->mc_setup_tail = NULL;
+ sp->tx_full = 0;
+ netif_wake_queue(dev);
+}
+
+static void reset_mii(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+
+ /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+ int phy_addr = sp->phy[0] & 0x1f;
+ int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
+ int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
+ mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
+ mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
+ mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
+ mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
+#ifdef honor_default_port
+ mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
+#else
+ mdio_read(dev, phy_addr, MII_BMCR);
+ mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
+ mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
+#endif
+ }
+}
+
+static void speedo_tx_timeout(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ int status = ioread16(ioaddr + SCBStatus);
+ unsigned long flags;
+
+ if (netif_msg_tx_err(sp)) {
+ printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
+ " %4.4x at %d/%d command %8.8x.\n",
+ dev->name, status, ioread16(ioaddr + SCBCmd),
+ sp->dirty_tx, sp->cur_tx,
+ sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
+
+ }
+ speedo_show_state(dev);
+#if 0
+ if ((status & 0x00C0) != 0x0080
+ && (status & 0x003C) == 0x0010) {
+ /* Only the command unit has stopped. */
+ printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
+ dev->name);
+ iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ iowrite16(CUStart, ioaddr + SCBCmd);
+ reset_mii(dev);
+ } else {
+#else
+ {
+#endif
+ del_timer_sync(&sp->timer);
+ /* Reset the Tx and Rx units. */
+ iowrite32(PortReset, ioaddr + SCBPort);
+ /* We may get spurious interrupts here. But I don't think that they
+ may do much harm. 1999/12/09 SAW */
+ udelay(10);
+ /* Disable interrupts. */
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
+ synchronize_irq(dev->irq);
+ speedo_tx_buffer_gc(dev);
+ /* Free as much as possible.
+ It helps to recover from a hang because of out-of-memory.
+ It also simplifies speedo_resume() in case TX ring is full or
+ close-to-be full. */
+ speedo_purge_tx(dev);
+ speedo_refill_rx_buffers(dev, 1);
+ spin_lock_irqsave(&sp->lock, flags);
+ speedo_resume(dev);
+ sp->rx_mode = -1;
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&sp->lock, flags);
+ set_rx_mode(dev); /* it takes the spinlock itself --SAW */
+ /* Reset MII transceiver. Do it before starting the timer to serialize
+ mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
+ reset_mii(dev);
+ sp->timer.expires = RUN_AT(2*HZ);
+ add_timer(&sp->timer);
+ }
+ return;
+}
+
+static int
+speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ int entry;
+
+ /* Prevent interrupts from changing the Tx ring from underneath us. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ /* Check if there are enough space. */
+ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+ printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
+ netif_stop_queue(dev);
+ sp->tx_full = 1;
+ spin_unlock_irqrestore(&sp->lock, flags);
+ return 1;
+ }
+
+ /* Calculate the Tx descriptor entry. */
+ entry = sp->cur_tx++ % TX_RING_SIZE;
+
+ sp->tx_skbuff[entry] = skb;
+ sp->tx_ring[entry].status =
+ cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+ if (!(entry & ((TX_RING_SIZE>>2)-1)))
+ sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
+ sp->tx_ring[entry].link =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+ sp->tx_ring[entry].tx_desc_addr =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
+ /* The data region is always in one buffer descriptor. */
+ sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+ sp->tx_ring[entry].tx_buf_addr0 =
+ cpu_to_le32(pci_map_single(sp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE));
+ sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+
+ /* workaround for hardware bug on 10 mbit half duplex */
+
+ if ((sp->partner == 0) && (sp->chip_id == 1)) {
+ wait_for_cmd_done(dev, sp);
+ iowrite8(0 , ioaddr + SCBCmd);
+ udelay(1);
+ }
+
+ /* Trigger the command unit resume. */
+ wait_for_cmd_done(dev, sp);
+ clear_suspend(sp->last_cmd);
+ /* We want the time window between clearing suspend flag on the previous
+ command and resuming CU to be as small as possible.
+ Interrupts in between are very undesired. --SAW */
+ iowrite8(CUResume, ioaddr + SCBCmd);
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ /* Leave room for set_rx_mode(). If there is no more space than reserved
+ for multicast filter mark the ring as full. */
+ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+ netif_stop_queue(dev);
+ sp->tx_full = 1;
+ }
+
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static void speedo_tx_buffer_gc(struct net_device *dev)
+{
+ unsigned int dirty_tx;
+ struct speedo_private *sp = netdev_priv(dev);
+
+ dirty_tx = sp->dirty_tx;
+ while ((int)(sp->cur_tx - dirty_tx) > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(sp->tx_ring[entry].status);
+
+ if (netif_msg_tx_done(sp))
+ printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
+ entry, status);
+ if ((status & StatusComplete) == 0)
+ break; /* It still hasn't been processed. */
+ if (status & TxUnderrun)
+ if (sp->tx_threshold < 0x01e08000) {
+ if (netif_msg_tx_err(sp))
+ printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
+ dev->name);
+ sp->tx_threshold += 0x00040000;
+ }
+ /* Free the original skb. */
+ if (sp->tx_skbuff[entry]) {
+ sp->stats.tx_packets++; /* Count only user packets. */
+ sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+ pci_unmap_single(sp->pdev,
+ le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
+ sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(sp->tx_skbuff[entry]);
+ sp->tx_skbuff[entry] = NULL;
+ }
+ dirty_tx++;
+ }
+
+ if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
+ printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
+ " full=%d.\n",
+ dirty_tx, sp->cur_tx, sp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+
+ while (sp->mc_setup_head != NULL
+ && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
+ struct speedo_mc_block *t;
+ if (netif_msg_tx_err(sp))
+ printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+ pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
+ sp->mc_setup_head->len, PCI_DMA_TODEVICE);
+ t = sp->mc_setup_head->next;
+ kfree(sp->mc_setup_head);
+ sp->mc_setup_head = t;
+ }
+ if (sp->mc_setup_head == NULL)
+ sp->mc_setup_tail = NULL;
+
+ sp->dirty_tx = dirty_tx;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct speedo_private *sp;
+ void __iomem *ioaddr;
+ long boguscnt = max_interrupt_work;
+ unsigned short status;
+ unsigned int handled = 0;
+
+ sp = netdev_priv(dev);
+ ioaddr = sp->regs;
+
+#ifndef final_version
+ /* A lock to prevent simultaneous entry on SMP machines. */
+ if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ sp->in_interrupt = 0; /* Avoid halting machine. */
+ return IRQ_NONE;
+ }
+#endif
+
+ do {
+ status = ioread16(ioaddr + SCBStatus);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ /* Will change from 0xfc00 to 0xff00 when we start handling
+ FCP and ER interrupts --Dragan */
+ iowrite16(status & 0xfc00, ioaddr + SCBStatus);
+
+ if (netif_msg_intr(sp))
+ printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
+ dev->name, status);
+
+ if ((status & 0xfc00) == 0)
+ break;
+ handled = 1;
+
+
+ if ((status & 0x5000) || /* Packet received, or Rx error. */
+ (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
+ /* Need to gather the postponed packet. */
+ speedo_rx(dev);
+
+ /* Always check if all rx buffers are allocated. --SAW */
+ speedo_refill_rx_buffers(dev, 0);
+
+ spin_lock(&sp->lock);
+ /*
+ * The chip may have suspended reception for various reasons.
+ * Check for that, and re-prime it should this be the case.
+ */
+ switch ((status >> 2) & 0xf) {
+ case 0: /* Idle */
+ break;
+ case 1: /* Suspended */
+ case 2: /* No resources (RxFDs) */
+ case 9: /* Suspended with no more RBDs */
+ case 10: /* No resources due to no RBDs */
+ case 12: /* Ready with no RBDs */
+ speedo_rx_soft_reset(dev);
+ break;
+ case 3: case 5: case 6: case 7: case 8:
+ case 11: case 13: case 14: case 15:
+ /* these are all reserved values */
+ break;
+ }
+
+
+ /* User interrupt, Command/Tx unit interrupt or CU not active. */
+ if (status & 0xA400) {
+ speedo_tx_buffer_gc(dev);
+ if (sp->tx_full
+ && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
+ /* The ring is no longer full. */
+ sp->tx_full = 0;
+ netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
+ }
+ }
+
+ spin_unlock(&sp->lock);
+
+ if (--boguscnt < 0) {
+ printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ /* Will change from 0xfc00 to 0xff00 when we start handling
+ FCP and ER interrupts --Dragan */
+ iowrite16(0xfc00, ioaddr + SCBStatus);
+ break;
+ }
+ } while (1);
+
+ if (netif_msg_intr(sp))
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread16(ioaddr + SCBStatus));
+
+ clear_bit(0, (void*)&sp->in_interrupt);
+ return IRQ_RETVAL(handled);
+}
+
+static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ struct RxFD *rxf;
+ struct sk_buff *skb;
+ /* Get a fresh skbuff to replace the consumed one. */
+ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
+ /* XXX: do we really want to call this before the NULL check? --hch */
+ rx_align(skb); /* Align IP on 16 byte boundary */
+ sp->rx_skbuff[entry] = skb;
+ if (skb == NULL) {
+ sp->rx_ringp[entry] = NULL;
+ return NULL;
+ }
+ rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+ sp->rx_ring_dma[entry] =
+ pci_map_single(sp->pdev, rxf,
+ PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+ skb->dev = dev;
+ skb_reserve(skb, sizeof(struct RxFD));
+ rxf->rx_buf_addr = 0xffffffff;
+ pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
+ sizeof(struct RxFD), PCI_DMA_TODEVICE);
+ return rxf;
+}
+
+static inline void speedo_rx_link(struct net_device *dev, int entry,
+ struct RxFD *rxf, dma_addr_t rxf_dma)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
+ rxf->link = 0; /* None yet. */
+ rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+ sp->last_rxf->link = cpu_to_le32(rxf_dma);
+ sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
+ pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
+ sizeof(struct RxFD), PCI_DMA_TODEVICE);
+ sp->last_rxf = rxf;
+ sp->last_rxf_dma = rxf_dma;
+}
+
+static int speedo_refill_rx_buf(struct net_device *dev, int force)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ int entry;
+ struct RxFD *rxf;
+
+ entry = sp->dirty_rx % RX_RING_SIZE;
+ if (sp->rx_skbuff[entry] == NULL) {
+ rxf = speedo_rx_alloc(dev, entry);
+ if (rxf == NULL) {
+ unsigned int forw;
+ int forw_entry;
+ if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
+ printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
+ dev->name, force);
+ sp->rx_ring_state |= RrOOMReported;
+ }
+ speedo_show_state(dev);
+ if (!force)
+ return -1; /* Better luck next time! */
+ /* Borrow an skb from one of next entries. */
+ for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
+ if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
+ break;
+ if (forw == sp->cur_rx)
+ return -1;
+ forw_entry = forw % RX_RING_SIZE;
+ sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
+ sp->rx_skbuff[forw_entry] = NULL;
+ rxf = sp->rx_ringp[forw_entry];
+ sp->rx_ringp[forw_entry] = NULL;
+ sp->rx_ringp[entry] = rxf;
+ }
+ } else {
+ rxf = sp->rx_ringp[entry];
+ }
+ speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
+ sp->dirty_rx++;
+ sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
+ return 0;
+}
+
+static void speedo_refill_rx_buffers(struct net_device *dev, int force)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+
+ /* Refill the RX ring. */
+ while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
+ speedo_refill_rx_buf(dev, force) != -1);
+}
+
+static int
+speedo_rx(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ int entry = sp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+ int alloc_ok = 1;
+ int npkts = 0;
+
+ if (netif_msg_intr(sp))
+ printk(KERN_DEBUG " In speedo_rx().\n");
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (sp->rx_ringp[entry] != NULL) {
+ int status;
+ int pkt_len;
+
+ pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
+ sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+ status = le32_to_cpu(sp->rx_ringp[entry]->status);
+ pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
+
+ if (!(status & RxComplete))
+ break;
+
+ if (--rx_work_limit < 0)
+ break;
+
+ /* Check for a rare out-of-memory case: the current buffer is
+ the last buffer allocated in the RX ring. --SAW */
+ if (sp->last_rxf == sp->rx_ringp[entry]) {
+ /* Postpone the packet. It'll be reaped at an interrupt when this
+ packet is no longer the last packet in the ring. */
+ if (netif_msg_rx_err(sp))
+ printk(KERN_DEBUG "%s: RX packet postponed!\n",
+ dev->name);
+ sp->rx_ring_state |= RrPostponed;
+ break;
+ }
+
+ if (netif_msg_rx_status(sp))
+ printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
+ pkt_len);
+ if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+ if (status & RxErrTooBig)
+ printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
+ "status %8.8x!\n", dev->name, status);
+ else if (! (status & RxOK)) {
+ /* There was a fatal error. This *should* be impossible. */
+ sp->stats.rx_errors++;
+ printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
+ "status %8.8x.\n",
+ dev->name, status);
+ }
+ } else {
+ struct sk_buff *skb;
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
+ sizeof(struct RxFD) + pkt_len,
+ PCI_DMA_FROMDEVICE);
+
+#if 1 || USE_IP_CSUM
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
+ sizeof(struct RxFD) + pkt_len,
+ PCI_DMA_FROMDEVICE);
+ npkts++;
+ } else {
+ /* Pass up the already-filled skbuff. */
+ skb = sp->rx_skbuff[entry];
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
+ dev->name);
+ break;
+ }
+ sp->rx_skbuff[entry] = NULL;
+ skb_put(skb, pkt_len);
+ npkts++;
+ sp->rx_ringp[entry] = NULL;
+ pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
+ PKT_BUF_SZ + sizeof(struct RxFD),
+ PCI_DMA_FROMDEVICE);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ sp->stats.rx_packets++;
+ sp->stats.rx_bytes += pkt_len;
+ }
+ entry = (++sp->cur_rx) % RX_RING_SIZE;
+ sp->rx_ring_state &= ~RrPostponed;
+ /* Refill the recently taken buffers.
+ Do it one-by-one to handle traffic bursts better. */
+ if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
+ alloc_ok = 0;
+ }
+
+ /* Try hard to refill the recently taken buffers. */
+ speedo_refill_rx_buffers(dev, 1);
+
+ if (npkts)
+ sp->last_rx_time = jiffies;
+
+ return 0;
+}
+
+static int
+speedo_close(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ int i;
+
+ netdevice_stop(dev);
+ netif_stop_queue(dev);
+
+ if (netif_msg_ifdown(sp))
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, ioread16(ioaddr + SCBStatus));
+
+ /* Shut off the media monitoring timer. */
+ del_timer_sync(&sp->timer);
+
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
+
+ /* Shutting down the chip nicely fails to disable flow control. So.. */
+ iowrite32(PortPartialReset, ioaddr + SCBPort);
+ ioread32(ioaddr + SCBPort); /* flush posted write */
+ /*
+ * The chip requires a 10 microsecond quiet period. Wait here!
+ */
+ udelay(10);
+
+ free_irq(dev->irq, dev);
+ speedo_show_state(dev);
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = sp->rx_skbuff[i];
+ sp->rx_skbuff[i] = NULL;
+ /* Clear the Rx descriptors. */
+ if (skb) {
+ pci_unmap_single(sp->pdev,
+ sp->rx_ring_dma[i],
+ PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = sp->tx_skbuff[i];
+ sp->tx_skbuff[i] = NULL;
+ /* Clear the Tx descriptors. */
+ if (skb) {
+ pci_unmap_single(sp->pdev,
+ le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ }
+ }
+
+ /* Free multicast setting blocks. */
+ for (i = 0; sp->mc_setup_head != NULL; i++) {
+ struct speedo_mc_block *t;
+ t = sp->mc_setup_head->next;
+ kfree(sp->mc_setup_head);
+ sp->mc_setup_head = t;
+ }
+ sp->mc_setup_tail = NULL;
+ if (netif_msg_ifdown(sp))
+ printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
+
+ pci_set_power_state(sp->pdev, PCI_D2);
+
+ return 0;
+}
+
+/* The Speedo-3 has an especially awkward and unusable method of getting
+ statistics out of the chip. It takes an unpredictable length of time
+ for the dump-stats command to complete. To avoid a busy-wait loop we
+ update the stats with the previous dump results, and then trigger a
+ new dump.
+
+ Oh, and incoming frames are dropped while executing dump-stats!
+ */
+static struct net_device_stats *
+speedo_get_stats(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+
+ /* Update only if the previous dump finished. */
+ if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
+ sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
+ sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
+ /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
+ sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
+ sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
+ sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
+ sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
+ sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
+ sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
+ sp->lstats->done_marker = 0x0000;
+ if (netif_running(dev)) {
+ unsigned long flags;
+ /* Take a spinlock to make wait_for_cmd_done and sending the
+ command atomic. --SAW */
+ spin_lock_irqsave(&sp->lock, flags);
+ wait_for_cmd_done(dev, sp);
+ iowrite8(CUDumpStats, ioaddr + SCBCmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ }
+ }
+ return &sp->stats;
+}
+
+static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
+ strncpy(info->version, version, sizeof(info->version)-1);
+ if (sp->pdev)
+ strcpy(info->bus_info, pci_name(sp->pdev));
+}
+
+static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ spin_lock_irq(&sp->lock);
+ mii_ethtool_gset(&sp->mii_if, ecmd);
+ spin_unlock_irq(&sp->lock);
+ return 0;
+}
+
+static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&sp->lock);
+ res = mii_ethtool_sset(&sp->mii_if, ecmd);
+ spin_unlock_irq(&sp->lock);
+ return res;
+}
+
+static int speedo_nway_reset(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ return mii_nway_restart(&sp->mii_if);
+}
+
+static u32 speedo_get_link(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ return mii_link_ok(&sp->mii_if);
+}
+
+static u32 speedo_get_msglevel(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ return sp->msg_enable;
+}
+
+static void speedo_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ sp->msg_enable = v;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = speedo_get_drvinfo,
+ .get_settings = speedo_get_settings,
+ .set_settings = speedo_set_settings,
+ .nway_reset = speedo_nway_reset,
+ .get_link = speedo_get_link,
+ .get_msglevel = speedo_get_msglevel,
+ .set_msglevel = speedo_set_msglevel,
+};
+
+static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ int phy = sp->phy[0] & 0x1f;
+ int saved_acpi;
+ int t;
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = phy;
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ /* FIXME: these operations need to be serialized with MDIO
+ access from the timeout handler.
+ They are currently serialized only with MDIO access from the
+ timer routine. 2000/05/09 SAW */
+ saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
+ t = del_timer_sync(&sp->timer);
+ data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ if (t)
+ add_timer(&sp->timer); /* may be set to the past --SAW */
+ pci_set_power_state(sp->pdev, saved_acpi);
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
+ t = del_timer_sync(&sp->timer);
+ mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
+ if (t)
+ add_timer(&sp->timer); /* may be set to the past --SAW */
+ pci_set_power_state(sp->pdev, saved_acpi);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This is very ugly with Intel chips -- we usually have to execute an
+ entire configuration command, plus process a multicast command.
+ This is complicated. We must put a large configuration command and
+ an arbitrarily-sized multicast command in the transmit list.
+ To minimize the disruption -- the previous command might have already
+ loaded the link -- we convert the current command block, normally a Tx
+ command, into a no-op and link it to the new command.
+*/
+static void set_rx_mode(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+ struct descriptor *last_cmd;
+ char new_rx_mode;
+ unsigned long flags;
+ int entry, i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_rx_mode = 3;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ dev->mc_count > multicast_filter_limit) {
+ new_rx_mode = 1;
+ } else
+ new_rx_mode = 0;
+
+ if (netif_msg_rx_status(sp))
+ printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
+ sp->rx_mode, new_rx_mode);
+
+ if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
+ /* The Tx ring is full -- don't add anything! Hope the mode will be
+ * set again later. */
+ sp->rx_mode = -1;
+ return;
+ }
+
+ if (new_rx_mode != sp->rx_mode) {
+ u8 *config_cmd_data;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx++ % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = NULL; /* Redundant. */
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
+ sp->tx_ring[entry].link =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
+ config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
+ /* Construct a full CmdConfig frame. */
+ memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
+ config_cmd_data[1] = (txfifo << 4) | rxfifo;
+ config_cmd_data[4] = rxdmacount;
+ config_cmd_data[5] = txdmacount + 0x80;
+ config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
+ /* 0x80 doesn't disable FC 0x84 does.
+ Disable Flow control since we are not ACK-ing any FC interrupts
+ for now. --Dragan */
+ config_cmd_data[19] = 0x84;
+ config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
+ config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
+ if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
+ config_cmd_data[15] |= 0x80;
+ config_cmd_data[8] = 0;
+ }
+ /* Trigger the command unit resume. */
+ wait_for_cmd_done(dev, sp);
+ clear_suspend(last_cmd);
+ iowrite8(CUResume, ioaddr + SCBCmd);
+ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+ netif_stop_queue(dev);
+ sp->tx_full = 1;
+ }
+ spin_unlock_irqrestore(&sp->lock, flags);
+ }
+
+ if (new_rx_mode == 0 && dev->mc_count < 4) {
+ /* The simple case of 0-3 multicast list entries occurs often, and
+ fits within one tx_ring[] entry. */
+ struct dev_mc_list *mclist;
+ u16 *setup_params, *eaddrs;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx++ % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = NULL;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
+ sp->tx_ring[entry].link =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
+ sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
+ setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
+ *setup_params++ = cpu_to_le16(dev->mc_count*6);
+ /* Fill in the multicast addresses. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ }
+
+ wait_for_cmd_done(dev, sp);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ iowrite8(CUResume, ioaddr + SCBCmd);
+
+ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+ netif_stop_queue(dev);
+ sp->tx_full = 1;
+ }
+ spin_unlock_irqrestore(&sp->lock, flags);
+ } else if (new_rx_mode == 0) {
+ struct dev_mc_list *mclist;
+ u16 *setup_params, *eaddrs;
+ struct speedo_mc_block *mc_blk;
+ struct descriptor *mc_setup_frm;
+ int i;
+
+ mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
+ GFP_ATOMIC);
+ if (mc_blk == NULL) {
+ printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
+ dev->name);
+ sp->rx_mode = -1; /* We failed, try again. */
+ return;
+ }
+ mc_blk->next = NULL;
+ mc_blk->len = 2 + multicast_filter_limit*6;
+ mc_blk->frame_dma =
+ pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
+ PCI_DMA_TODEVICE);
+ mc_setup_frm = &mc_blk->frame;
+
+ /* Fill the setup frame. */
+ if (netif_msg_ifup(sp))
+ printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
+ dev->name, mc_setup_frm);
+ mc_setup_frm->cmd_status =
+ cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
+ /* Link set below. */
+ setup_params = (u16 *)&mc_setup_frm->params;
+ *setup_params++ = cpu_to_le16(dev->mc_count*6);
+ /* Fill in the multicast addresses. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ }
+
+ /* Disable interrupts while playing with the Tx Cmd list. */
+ spin_lock_irqsave(&sp->lock, flags);
+
+ if (sp->mc_setup_tail)
+ sp->mc_setup_tail->next = mc_blk;
+ else
+ sp->mc_setup_head = mc_blk;
+ sp->mc_setup_tail = mc_blk;
+ mc_blk->tx = sp->cur_tx;
+
+ entry = sp->cur_tx++ % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = mc_setup_frm;
+
+ /* Change the command to a NoOp, pointing to the CmdMulti command. */
+ sp->tx_skbuff[entry] = NULL;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
+ sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
+
+ /* Set the link in the setup frame. */
+ mc_setup_frm->link =
+ cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
+
+ pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
+ mc_blk->len, PCI_DMA_TODEVICE);
+
+ wait_for_cmd_done(dev, sp);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ iowrite8(CUResume, ioaddr + SCBCmd);
+
+ if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+ netif_stop_queue(dev);
+ sp->tx_full = 1;
+ }
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ if (netif_msg_rx_status(sp))
+ printk(" CmdMCSetup frame length %d in entry %d.\n",
+ dev->mc_count, entry);
+ }
+
+ sp->rx_mode = new_rx_mode;
+}
+
+#ifdef CONFIG_PM
+static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+
+ pci_save_state(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ del_timer_sync(&sp->timer);
+
+ netif_device_detach(dev);
+ iowrite32(PortPartialReset, ioaddr + SCBPort);
+
+ /* XXX call pci_set_power_state ()? */
+ pci_disable_device(pdev);
+ pci_set_power_state (pdev, PCI_D3hot);
+ return 0;
+}
+
+static int eepro100_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_device(pdev);
+ pci_set_master(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* I'm absolutely uncertain if this part of code may work.
+ The problems are:
+ - correct hardware reinitialization;
+ - correct driver behavior between different steps of the
+ reinitialization;
+ - serialization with other driver calls.
+ 2000/03/08 SAW */
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
+ speedo_resume(dev);
+ netif_device_attach(dev);
+ sp->rx_mode = -1;
+ sp->flow_ctrl = sp->partner = 0;
+ set_rx_mode(dev);
+ sp->timer.expires = RUN_AT(2*HZ);
+ add_timer(&sp->timer);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void __devexit eepro100_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct speedo_private *sp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+ release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+
+ pci_iounmap(pdev, sp->regs);
+ pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
+ + sizeof(struct speedo_stats),
+ sp->tx_ring, sp->tx_ring_dma);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static struct pci_device_id eepro100_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
+
+static struct pci_driver eepro100_driver = {
+ .name = "eepro100",
+ .id_table = eepro100_pci_tbl,
+ .probe = eepro100_init_one,
+ .remove = __devexit_p(eepro100_remove_one),
+#ifdef CONFIG_PM
+ .suspend = eepro100_suspend,
+ .resume = eepro100_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init eepro100_init_module(void)
+{
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init(&eepro100_driver);
+}
+
+static void __exit eepro100_cleanup_module(void)
+{
+ pci_unregister_driver(&eepro100_driver);
+}
+
+module_init(eepro100_init_module);
+module_exit(eepro100_cleanup_module);
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
new file mode 100644
index 000000000000..fc8e7947b334
--- /dev/null
+++ b/drivers/net/eexpress.c
@@ -0,0 +1,1752 @@
+/* Intel EtherExpress 16 device driver for Linux
+ *
+ * Written by John Sullivan, 1995
+ * based on original code by Donald Becker, with changes by
+ * Alan Cox and Pauline Middelink.
+ *
+ * Support for 8-bit mode by Zoltan Szilagyi <zoltans@cs.arizona.edu>
+ *
+ * Many modifications, and currently maintained, by
+ * Philip Blundell <philb@gnu.org>
+ * Added the Compaq LTE Alan Cox <alan@redhat.com>
+ * Added MCA support Adam Fritzler <mid@auk.cx>
+ *
+ * Note - this driver is experimental still - it has problems on faster
+ * machines. Someone needs to sit down and go through it line by line with
+ * a databook...
+ */
+
+/* The EtherExpress 16 is a fairly simple card, based on a shared-memory
+ * design using the i82586 Ethernet coprocessor. It bears no relationship,
+ * as far as I know, to the similarly-named "EtherExpress Pro" range.
+ *
+ * Historically, Linux support for these cards has been very bad. However,
+ * things seem to be getting better slowly.
+ */
+
+/* If your card is confused about what sort of interface it has (eg it
+ * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART'
+ * or 'SOFTSET /LISA' from DOS seems to help.
+ */
+
+/* Here's the scoop on memory mapping.
+ *
+ * There are three ways to access EtherExpress card memory: either using the
+ * shared-memory mapping, or using PIO through the dataport, or using PIO
+ * through the "shadow memory" ports.
+ *
+ * The shadow memory system works by having the card map some of its memory
+ * as follows:
+ *
+ * (the low five bits of the SMPTR are ignored)
+ *
+ * base+0x4000..400f memory at SMPTR+0..15
+ * base+0x8000..800f memory at SMPTR+16..31
+ * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently)
+ * base+0xc008..c00f memory at 0x0008..0x000f
+ *
+ * This last set (the one at c008) is particularly handy because the SCB
+ * lives at 0x0008. So that set of ports gives us easy random access to data
+ * in the SCB without having to mess around setting up pointers and the like.
+ * We always use this method to access the SCB (via the scb_xx() functions).
+ *
+ * Dataport access works by aiming the appropriate (read or write) pointer
+ * at the first address you're interested in, and then reading or writing from
+ * the dataport. The pointers auto-increment after each transfer. We use
+ * this for data transfer.
+ *
+ * We don't use the shared-memory system because it allegedly doesn't work on
+ * all cards, and because it's a bit more prone to go wrong (it's one more
+ * thing to configure...).
+ */
+
+/* Known bugs:
+ *
+ * - The card seems to want to give us two interrupts every time something
+ * happens, where just one would be better.
+ */
+
+/*
+ *
+ * Note by Zoltan Szilagyi 10-12-96:
+ *
+ * I've succeeded in eliminating the "CU wedged" messages, and hence the
+ * lockups, which were only occurring with cards running in 8-bit mode ("force
+ * 8-bit operation" in Intel's SoftSet utility). This version of the driver
+ * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the
+ * CU before submitting a packet for transmission, and then restarts it as soon
+ * as the process of handing the packet is complete. This is definitely an
+ * unnecessary slowdown if the card is running in 16-bit mode; therefore one
+ * should detect 16-bit vs 8-bit mode from the EEPROM settings and act
+ * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for
+ * ftp's, which is significantly better than I get in DOS, so the overhead of
+ * stopping and restarting the CU with each transmit is not prohibitive in
+ * practice.
+ *
+ * Update by David Woodhouse 11/5/99:
+ *
+ * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture.
+ * I assume that this is because 16-bit accesses are actually handled as two
+ * 8-bit accesses.
+ */
+
+#ifdef __alpha__
+#define LOCKUP16 1
+#endif
+#ifndef LOCKUP16
+#define LOCKUP16 0
+#endif
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/mca-legacy.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#ifndef NET_DEBUG
+#define NET_DEBUG 4
+#endif
+
+#include "eexpress.h"
+
+#define EEXP_IO_EXTENT 16
+
+/*
+ * Private data declarations
+ */
+
+struct net_local
+{
+ struct net_device_stats stats;
+ unsigned long last_tx; /* jiffies when last transmit started */
+ unsigned long init_time; /* jiffies when eexp_hw_init586 called */
+ unsigned short rx_first; /* first rx buf, same as RX_BUF_START */
+ unsigned short rx_last; /* last rx buf */
+ unsigned short rx_ptr; /* first rx buf to look at */
+ unsigned short tx_head; /* next free tx buf */
+ unsigned short tx_reap; /* first in-use tx buf */
+ unsigned short tx_tail; /* previous tx buf to tx_head */
+ unsigned short tx_link; /* last known-executing tx buf */
+ unsigned short last_tx_restart; /* set to tx_link when we
+ restart the CU */
+ unsigned char started;
+ unsigned short rx_buf_start;
+ unsigned short rx_buf_end;
+ unsigned short num_tx_bufs;
+ unsigned short num_rx_bufs;
+ unsigned char width; /* 0 for 16bit, 1 for 8bit */
+ unsigned char was_promisc;
+ unsigned char old_mc_count;
+ spinlock_t lock;
+};
+
+/* This is the code and data that is downloaded to the EtherExpress card's
+ * memory at boot time.
+ */
+
+static unsigned short start_code[] = {
+/* 0x0000 */
+ 0x0001, /* ISCP: busy - cleared after reset */
+ 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */
+
+ 0x0000,0x0000, /* SCB: status, commands */
+ 0x0000,0x0000, /* links to first command block,
+ first receive descriptor */
+ 0x0000,0x0000, /* CRC error, alignment error counts */
+ 0x0000,0x0000, /* out of resources, overrun error counts */
+
+ 0x0000,0x0000, /* pad */
+ 0x0000,0x0000,
+
+/* 0x20 -- start of 82586 CU program */
+#define CONF_LINK 0x20
+ 0x0000,Cmd_Config,
+ 0x0032, /* link to next command */
+ 0x080c, /* 12 bytes follow : fifo threshold=8 */
+ 0x2e40, /* don't rx bad frames
+ * SRDY/ARDY => ext. sync. : preamble len=8
+ * take addresses from data buffers
+ * 6 bytes/address
+ */
+ 0x6000, /* default backoff method & priority
+ * interframe spacing = 0x60 */
+ 0xf200, /* slot time=0x200
+ * max collision retry = 0xf */
+#define CONF_PROMISC 0x2e
+ 0x0000, /* no HDLC : normal CRC : enable broadcast
+ * disable promiscuous/multicast modes */
+ 0x003c, /* minimum frame length = 60 octets) */
+
+ 0x0000,Cmd_SetAddr,
+ 0x003e, /* link to next command */
+#define CONF_HWADDR 0x38
+ 0x0000,0x0000,0x0000, /* hardware address placed here */
+
+ 0x0000,Cmd_MCast,
+ 0x0076, /* link to next command */
+#define CONF_NR_MULTICAST 0x44
+ 0x0000, /* number of multicast addresses */
+#define CONF_MULTICAST 0x46
+ 0x0000, 0x0000, 0x0000, /* some addresses */
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+
+#define CONF_DIAG_RESULT 0x76
+ 0x0000, Cmd_Diag,
+ 0x007c, /* link to next command */
+
+ 0x0000,Cmd_TDR|Cmd_INT,
+ 0x0084,
+#define CONF_TDR_RESULT 0x82
+ 0x0000,
+
+ 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */
+ 0x0084 /* dummy link */
+};
+
+/* maps irq number to EtherExpress magic value */
+static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
+
+#ifdef CONFIG_MCA_LEGACY
+/* mapping of the first four bits of the second POS register */
+static unsigned short mca_iomap[] = {
+ 0x270, 0x260, 0x250, 0x240, 0x230, 0x220, 0x210, 0x200,
+ 0x370, 0x360, 0x350, 0x340, 0x330, 0x320, 0x310, 0x300
+};
+/* bits 5-7 of the second POS register */
+static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 };
+#endif
+
+/*
+ * Prototypes for Linux interface
+ */
+
+static int eexp_open(struct net_device *dev);
+static int eexp_close(struct net_device *dev);
+static void eexp_timeout(struct net_device *dev);
+static struct net_device_stats *eexp_stats(struct net_device *dev);
+static int eexp_xmit(struct sk_buff *buf, struct net_device *dev);
+
+static irqreturn_t eexp_irq(int irq, void *dev_addr, struct pt_regs *regs);
+static void eexp_set_multicast(struct net_device *dev);
+
+/*
+ * Prototypes for hardware access functions
+ */
+
+static void eexp_hw_rx_pio(struct net_device *dev);
+static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
+ unsigned short len);
+static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr);
+static unsigned short eexp_hw_readeeprom(unsigned short ioaddr,
+ unsigned char location);
+
+static unsigned short eexp_hw_lasttxstat(struct net_device *dev);
+static void eexp_hw_txrestart(struct net_device *dev);
+
+static void eexp_hw_txinit (struct net_device *dev);
+static void eexp_hw_rxinit (struct net_device *dev);
+
+static void eexp_hw_init586 (struct net_device *dev);
+static void eexp_setup_filter (struct net_device *dev);
+
+static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"};
+enum eexp_iftype {AUI=0, BNC=1, TPE=2};
+
+#define STARTED_RU 2
+#define STARTED_CU 1
+
+/*
+ * Primitive hardware access functions.
+ */
+
+static inline unsigned short scb_status(struct net_device *dev)
+{
+ return inw(dev->base_addr + 0xc008);
+}
+
+static inline unsigned short scb_rdcmd(struct net_device *dev)
+{
+ return inw(dev->base_addr + 0xc00a);
+}
+
+static inline void scb_command(struct net_device *dev, unsigned short cmd)
+{
+ outw(cmd, dev->base_addr + 0xc00a);
+}
+
+static inline void scb_wrcbl(struct net_device *dev, unsigned short val)
+{
+ outw(val, dev->base_addr + 0xc00c);
+}
+
+static inline void scb_wrrfa(struct net_device *dev, unsigned short val)
+{
+ outw(val, dev->base_addr + 0xc00e);
+}
+
+static inline void set_loopback(struct net_device *dev)
+{
+ outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config);
+}
+
+static inline void clear_loopback(struct net_device *dev)
+{
+ outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config);
+}
+
+static inline unsigned short int SHADOW(short int addr)
+{
+ addr &= 0x1f;
+ if (addr > 0xf) addr += 0x3ff0;
+ return addr + 0x4000;
+}
+
+/*
+ * Linux interface
+ */
+
+/*
+ * checks for presence of EtherExpress card
+ */
+
+static int __init do_express_probe(struct net_device *dev)
+{
+ unsigned short *port;
+ static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 };
+ unsigned short ioaddr = dev->base_addr;
+ int dev_irq = dev->irq;
+ int err;
+
+ SET_MODULE_OWNER(dev);
+
+ dev->if_port = 0xff; /* not set */
+
+#ifdef CONFIG_MCA_LEGACY
+ if (MCA_bus) {
+ int slot = 0;
+
+ /*
+ * Only find one card at a time. Subsequent calls
+ * will find others, however, proper multicard MCA
+ * probing and setup can't be done with the
+ * old-style Space.c init routines. -- ASF
+ */
+ while (slot != MCA_NOTFOUND) {
+ int pos0, pos1;
+
+ slot = mca_find_unused_adapter(0x628B, slot);
+ if (slot == MCA_NOTFOUND)
+ break;
+
+ pos0 = mca_read_stored_pos(slot, 2);
+ pos1 = mca_read_stored_pos(slot, 3);
+ ioaddr = mca_iomap[pos1&0xf];
+
+ dev->irq = mca_irqmap[(pos1>>4)&0x7];
+
+ /*
+ * XXX: Transciever selection is done
+ * differently on the MCA version.
+ * How to get it to select something
+ * other than external/AUI is currently
+ * unknown. This code is just for looks. -- ASF
+ */
+ if ((pos0 & 0x7) == 0x1)
+ dev->if_port = AUI;
+ else if ((pos0 & 0x7) == 0x5) {
+ if (pos1 & 0x80)
+ dev->if_port = BNC;
+ else
+ dev->if_port = TPE;
+ }
+
+ mca_set_adapter_name(slot, "Intel EtherExpress 16 MCA");
+ mca_set_adapter_procfn(slot, NULL, dev);
+ mca_mark_as_used(slot);
+
+ break;
+ }
+ }
+#endif
+ if (ioaddr&0xfe00) {
+ if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress"))
+ return -EBUSY;
+ err = eexp_hw_probe(dev,ioaddr);
+ release_region(ioaddr, EEXP_IO_EXTENT);
+ return err;
+ } else if (ioaddr)
+ return -ENXIO;
+
+ for (port=&ports[0] ; *port ; port++ )
+ {
+ unsigned short sum = 0;
+ int i;
+ if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress"))
+ continue;
+ for ( i=0 ; i<4 ; i++ )
+ {
+ unsigned short t;
+ t = inb(*port + ID_PORT);
+ sum |= (t>>4) << ((t & 0x03)<<2);
+ }
+ if (sum==0xbaba && !eexp_hw_probe(dev,*port)) {
+ release_region(*port, EEXP_IO_EXTENT);
+ return 0;
+ }
+ release_region(*port, EEXP_IO_EXTENT);
+ dev->irq = dev_irq;
+ }
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init express_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_express_probe(dev);
+ if (!err) {
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ }
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+/*
+ * open and initialize the adapter, ready for use
+ */
+
+static int eexp_open(struct net_device *dev)
+{
+ int ret;
+ unsigned short ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_open()\n", dev->name);
+#endif
+
+ if (!dev->irq || !irqrmap[dev->irq])
+ return -ENXIO;
+
+ ret = request_irq(dev->irq,&eexp_irq,0,dev->name,dev);
+ if (ret) return ret;
+
+ if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr);
+ goto err_out1;
+ }
+ if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr+0x4000);
+ goto err_out2;
+ }
+ if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr+0x8000);
+ goto err_out3;
+ }
+ if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr+0xc000);
+ goto err_out4;
+ }
+
+ if (lp->width) {
+ printk("%s: forcing ASIC to 8-bit mode\n", dev->name);
+ outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config);
+ }
+
+ eexp_hw_init586(dev);
+ netif_start_queue(dev);
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name);
+#endif
+ return 0;
+
+ err_out4:
+ release_region(ioaddr+0x8000, EEXP_IO_EXTENT);
+ err_out3:
+ release_region(ioaddr+0x4000, EEXP_IO_EXTENT);
+ err_out2:
+ release_region(ioaddr, EEXP_IO_EXTENT);
+ err_out1:
+ free_irq(dev->irq, dev);
+ return -EBUSY;
+}
+
+/*
+ * close and disable the interface, leaving the 586 in reset.
+ */
+
+static int eexp_close(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+
+ int irq = dev->irq;
+
+ netif_stop_queue(dev);
+
+ outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
+ lp->started = 0;
+ scb_command(dev, SCB_CUsuspend|SCB_RUsuspend);
+ outb(0,ioaddr+SIGNAL_CA);
+ free_irq(irq,dev);
+ outb(i586_RST,ioaddr+EEPROM_Ctrl);
+ release_region(ioaddr, EEXP_IO_EXTENT);
+ release_region(ioaddr+0x4000, 16);
+ release_region(ioaddr+0x8000, 16);
+ release_region(ioaddr+0xc000, 16);
+
+ return 0;
+}
+
+/*
+ * Return interface stats
+ */
+
+static struct net_device_stats *eexp_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+/*
+ * This gets called when a higher level thinks we are broken. Check that
+ * nothing has become jammed in the CU.
+ */
+
+static void unstick_cu(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+
+ if (lp->started)
+ {
+ if ((jiffies - dev->trans_start)>50)
+ {
+ if (lp->tx_link==lp->last_tx_restart)
+ {
+ unsigned short boguscount=200,rsst;
+ printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n",
+ dev->name, scb_status(dev));
+ eexp_hw_txinit(dev);
+ lp->last_tx_restart = 0;
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ while (!SCB_complete(rsst=scb_status(dev)))
+ {
+ if (!--boguscount)
+ {
+ boguscount=200;
+ printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n",
+ dev->name,rsst);
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+ }
+ netif_wake_queue(dev);
+ }
+ else
+ {
+ unsigned short status = scb_status(dev);
+ if (SCB_CUdead(status))
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n",
+ dev->name, status, txstatus);
+ eexp_hw_txrestart(dev);
+ }
+ else
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ if (netif_queue_stopped(dev) && !txstatus)
+ {
+ printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
+ dev->name,status,txstatus);
+ eexp_hw_init586(dev);
+ netif_wake_queue(dev);
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ if ((jiffies-lp->init_time)>10)
+ {
+ unsigned short status = scb_status(dev);
+ printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n",
+ dev->name, status);
+ eexp_hw_init586(dev);
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+static void eexp_timeout(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+#endif
+ int status;
+
+ disable_irq(dev->irq);
+
+ /*
+ * Best would be to use synchronize_irq(); spin_lock() here
+ * lets make it work first..
+ */
+
+#ifdef CONFIG_SMP
+ spin_lock_irqsave(&lp->lock, flags);
+#endif
+
+ status = scb_status(dev);
+ unstick_cu(dev);
+ printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name,
+ (SCB_complete(status)?"lost interrupt":
+ "board on fire"));
+ lp->stats.tx_errors++;
+ lp->last_tx = jiffies;
+ if (!SCB_complete(status)) {
+ scb_command(dev, SCB_CUabort);
+ outb(0,dev->base_addr+SIGNAL_CA);
+ }
+ netif_wake_queue(dev);
+#ifdef CONFIG_SMP
+ spin_unlock_irqrestore(&lp->lock, flags);
+#endif
+}
+
+/*
+ * Called to transmit a packet, or to allow us to right ourselves
+ * if the kernel thinks we've died.
+ */
+static int eexp_xmit(struct sk_buff *buf, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ short length = buf->len;
+#ifdef CONFIG_SMP
+ unsigned long flags;
+#endif
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
+#endif
+
+ if (buf->len < ETH_ZLEN) {
+ buf = skb_padto(buf, ETH_ZLEN);
+ if (buf == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
+ disable_irq(dev->irq);
+
+ /*
+ * Best would be to use synchronize_irq(); spin_lock() here
+ * lets make it work first..
+ */
+
+#ifdef CONFIG_SMP
+ spin_lock_irqsave(&lp->lock, flags);
+#endif
+
+ {
+ unsigned short *data = (unsigned short *)buf->data;
+
+ lp->stats.tx_bytes += length;
+
+ eexp_hw_tx_pio(dev,data,length);
+ }
+ dev_kfree_skb(buf);
+#ifdef CONFIG_SMP
+ spin_unlock_irqrestore(&lp->lock, flags);
+#endif
+ enable_irq(dev->irq);
+ return 0;
+}
+
+/*
+ * Handle an EtherExpress interrupt
+ * If we've finished initializing, start the RU and CU up.
+ * If we've already started, reap tx buffers, handle any received packets,
+ * check to make sure we've not become wedged.
+ */
+
+/*
+ * Handle an EtherExpress interrupt
+ * If we've finished initializing, start the RU and CU up.
+ * If we've already started, reap tx buffers, handle any received packets,
+ * check to make sure we've not become wedged.
+ */
+
+static unsigned short eexp_start_irq(struct net_device *dev,
+ unsigned short status)
+{
+ unsigned short ack_cmd = SCB_ack(status);
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+ if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) {
+ short diag_status, tdr_status;
+ while (SCB_CUstat(status)==2)
+ status = scb_status(dev);
+#if NET_DEBUG > 4
+ printk("%s: CU went non-active (status %04x)\n",
+ dev->name, status);
+#endif
+
+ outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR);
+ diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT));
+ if (diag_status & 1<<11) {
+ printk(KERN_WARNING "%s: 82586 failed self-test\n",
+ dev->name);
+ } else if (!(diag_status & 1<<13)) {
+ printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name);
+ }
+
+ outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR);
+ tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT));
+ if (tdr_status & (TDR_SHORT|TDR_OPEN)) {
+ printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : "");
+ }
+ else if (tdr_status & TDR_XCVRPROBLEM) {
+ printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name);
+ }
+ else if (tdr_status & TDR_LINKOK) {
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name);
+#endif
+ } else {
+ printk("%s: TDR is ga-ga (status %04x)\n", dev->name,
+ tdr_status);
+ }
+
+ lp->started |= STARTED_CU;
+ scb_wrcbl(dev, lp->tx_link);
+ /* if the RU isn't running, start it now */
+ if (!(lp->started & STARTED_RU)) {
+ ack_cmd |= SCB_RUstart;
+ scb_wrrfa(dev, lp->rx_buf_start);
+ lp->rx_ptr = lp->rx_buf_start;
+ lp->started |= STARTED_RU;
+ }
+ ack_cmd |= SCB_CUstart | 0x2000;
+ }
+
+ if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4)
+ lp->started|=STARTED_RU;
+
+ return ack_cmd;
+}
+
+static void eexp_cmd_clear(struct net_device *dev)
+{
+ unsigned long int oldtime = jiffies;
+ while (scb_rdcmd(dev) && ((jiffies-oldtime)<10));
+ if (scb_rdcmd(dev)) {
+ printk("%s: command didn't clear\n", dev->name);
+ }
+}
+
+static irqreturn_t eexp_irq(int irq, void *dev_info, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_info;
+ struct net_local *lp;
+ unsigned short ioaddr,status,ack_cmd;
+ unsigned short old_read_ptr, old_write_ptr;
+
+ if (dev==NULL)
+ {
+ printk(KERN_WARNING "eexpress: irq %d for unknown device\n",
+ irq);
+ return IRQ_NONE;
+ }
+
+ lp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ spin_lock(&lp->lock);
+
+ old_read_ptr = inw(ioaddr+READ_PTR);
+ old_write_ptr = inw(ioaddr+WRITE_PTR);
+
+ outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
+
+
+ status = scb_status(dev);
+
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status);
+#endif
+
+ if (lp->started == (STARTED_CU | STARTED_RU)) {
+
+ do {
+ eexp_cmd_clear(dev);
+
+ ack_cmd = SCB_ack(status);
+ scb_command(dev, ack_cmd);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ eexp_cmd_clear(dev);
+
+ if (SCB_complete(status)) {
+ if (!eexp_hw_lasttxstat(dev)) {
+ printk("%s: tx interrupt but no status\n", dev->name);
+ }
+ }
+
+ if (SCB_rxdframe(status))
+ eexp_hw_rx_pio(dev);
+
+ status = scb_status(dev);
+ } while (status & 0xc000);
+
+ if (SCB_RUdead(status))
+ {
+ printk(KERN_WARNING "%s: RU stopped: status %04x\n",
+ dev->name,status);
+#if 0
+ printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd);
+ outw(lp->cur_rfd, ioaddr+READ_PTR);
+ printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT));
+ outw(lp->cur_rfd+6, ioaddr+READ_PTR);
+ printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT));
+ outw(rbd, ioaddr+READ_PTR);
+ printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT));
+ outw(rbd+8, ioaddr+READ_PTR);
+ printk("[%04x]\n", inw(ioaddr+DATAPORT));
+#endif
+ lp->stats.rx_errors++;
+#if 1
+ eexp_hw_rxinit(dev);
+#else
+ lp->cur_rfd = lp->first_rfd;
+#endif
+ scb_wrrfa(dev, lp->rx_buf_start);
+ scb_command(dev, SCB_RUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+ } else {
+ if (status & 0x8000)
+ ack_cmd = eexp_start_irq(dev, status);
+ else
+ ack_cmd = SCB_ack(status);
+ scb_command(dev, ack_cmd);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+
+ eexp_cmd_clear(dev);
+
+ outb(SIRQ_en|irqrmap[irq],ioaddr+SET_IRQ);
+
+#if NET_DEBUG > 6
+ printk("%s: leaving eexp_irq()\n", dev->name);
+#endif
+ outw(old_read_ptr, ioaddr+READ_PTR);
+ outw(old_write_ptr, ioaddr+WRITE_PTR);
+
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Hardware access functions
+ */
+
+/*
+ * Set the cable type to use.
+ */
+
+static void eexp_hw_set_interface(struct net_device *dev)
+{
+ unsigned char oldval = inb(dev->base_addr + 0x300e);
+ oldval &= ~0x82;
+ switch (dev->if_port) {
+ case TPE:
+ oldval |= 0x2;
+ case BNC:
+ oldval |= 0x80;
+ break;
+ }
+ outb(oldval, dev->base_addr+0x300e);
+ mdelay(20);
+}
+
+/*
+ * Check all the receive buffers, and hand any received packets
+ * to the upper levels. Basic sanity check on each frame
+ * descriptor, though we don't bother trying to fix broken ones.
+ */
+
+static void eexp_hw_rx_pio(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short rx_block = lp->rx_ptr;
+ unsigned short boguscount = lp->num_rx_bufs;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short status;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name);
+#endif
+
+ do {
+ unsigned short rfd_cmd, rx_next, pbuf, pkt_len;
+
+ outw(rx_block, ioaddr + READ_PTR);
+ status = inw(ioaddr + DATAPORT);
+
+ if (FD_Done(status))
+ {
+ rfd_cmd = inw(ioaddr + DATAPORT);
+ rx_next = inw(ioaddr + DATAPORT);
+ pbuf = inw(ioaddr + DATAPORT);
+
+ outw(pbuf, ioaddr + READ_PTR);
+ pkt_len = inw(ioaddr + DATAPORT);
+
+ if (rfd_cmd!=0x0000)
+ {
+ printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n",
+ dev->name, rfd_cmd);
+ continue;
+ }
+ else if (pbuf!=rx_block+0x16)
+ {
+ printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n",
+ dev->name, rx_block+0x16, pbuf);
+ continue;
+ }
+ else if ((pkt_len & 0xc000)!=0xc000)
+ {
+ printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n",
+ dev->name, pkt_len & 0xc000);
+ continue;
+ }
+ else if (!FD_OK(status))
+ {
+ lp->stats.rx_errors++;
+ if (FD_CRC(status))
+ lp->stats.rx_crc_errors++;
+ if (FD_Align(status))
+ lp->stats.rx_frame_errors++;
+ if (FD_Resrc(status))
+ lp->stats.rx_fifo_errors++;
+ if (FD_DMA(status))
+ lp->stats.rx_over_errors++;
+ if (FD_Short(status))
+ lp->stats.rx_length_errors++;
+ }
+ else
+ {
+ struct sk_buff *skb;
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+16);
+ if (skb == NULL)
+ {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ outw(pbuf+10, ioaddr+READ_PTR);
+ insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ outw(rx_block, ioaddr+WRITE_PTR);
+ outw(0, ioaddr+DATAPORT);
+ outw(0, ioaddr+DATAPORT);
+ rx_block = rx_next;
+ }
+ } while (FD_Done(status) && boguscount--);
+ lp->rx_ptr = rx_block;
+}
+
+/*
+ * Hand a packet to the card for transmission
+ * If we get here, we MUST have already checked
+ * to make sure there is room in the transmit
+ * buffer region.
+ */
+
+static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
+ unsigned short len)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+
+ if (LOCKUP16 || lp->width) {
+ /* Stop the CU so that there is no chance that it
+ jumps off to a bogus address while we are writing the
+ pointer to the next transmit packet in 8-bit mode --
+ this eliminates the "CU wedged" errors in 8-bit mode.
+ (Zoltan Szilagyi 10-12-96) */
+ scb_command(dev, SCB_CUsuspend);
+ outw(0xFFFF, ioaddr+SIGNAL_CA);
+ }
+
+ outw(lp->tx_head, ioaddr + WRITE_PTR);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x08, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x0e, ioaddr + DATAPORT);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x08, ioaddr + DATAPORT);
+
+ outw(0x8000|len, ioaddr + DATAPORT);
+ outw(-1, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x16, ioaddr + DATAPORT);
+ outw(0, ioaddr + DATAPORT);
+
+ outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
+
+ outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
+ outw(lp->tx_head, ioaddr + DATAPORT);
+
+ dev->trans_start = jiffies;
+ lp->tx_tail = lp->tx_head;
+ if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_head = TX_BUF_START;
+ else
+ lp->tx_head += TX_BUF_SIZE;
+ if (lp->tx_head != lp->tx_reap)
+ netif_wake_queue(dev);
+
+ if (LOCKUP16 || lp->width) {
+ /* Restart the CU so that the packet can actually
+ be transmitted. (Zoltan Szilagyi 10-12-96) */
+ scb_command(dev, SCB_CUresume);
+ outw(0xFFFF, ioaddr+SIGNAL_CA);
+ }
+
+ lp->stats.tx_packets++;
+ lp->last_tx = jiffies;
+}
+
+/*
+ * Sanity check the suspected EtherExpress card
+ * Read hardware address, reset card, size memory and initialize buffer
+ * memory pointers. These are held in dev->priv, in case someone has more
+ * than one card in a machine.
+ */
+
+static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
+{
+ unsigned short hw_addr[3];
+ unsigned char buswidth;
+ unsigned int memory_size;
+ int i;
+ unsigned short xsum = 0;
+ struct net_local *lp = netdev_priv(dev);
+
+ printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr);
+
+ outb(ASIC_RST, ioaddr+EEPROM_Ctrl);
+ outb(0, ioaddr+EEPROM_Ctrl);
+ udelay(500);
+ outb(i586_RST, ioaddr+EEPROM_Ctrl);
+
+ hw_addr[0] = eexp_hw_readeeprom(ioaddr,2);
+ hw_addr[1] = eexp_hw_readeeprom(ioaddr,3);
+ hw_addr[2] = eexp_hw_readeeprom(ioaddr,4);
+
+ /* Standard Address or Compaq LTE Address */
+ if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) ||
+ (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00))))
+ {
+ printk(" rejected: invalid address %04x%04x%04x\n",
+ hw_addr[2],hw_addr[1],hw_addr[0]);
+ return -ENODEV;
+ }
+
+ /* Calculate the EEPROM checksum. Carry on anyway if it's bad,
+ * though.
+ */
+ for (i = 0; i < 64; i++)
+ xsum += eexp_hw_readeeprom(ioaddr, i);
+ if (xsum != 0xbaba)
+ printk(" (bad EEPROM xsum 0x%02x)", xsum);
+
+ dev->base_addr = ioaddr;
+ for ( i=0 ; i<6 ; i++ )
+ dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
+
+ {
+ static char irqmap[]={0, 9, 3, 4, 5, 10, 11, 0};
+ unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
+
+ /* Use the IRQ from EEPROM if none was given */
+ if (!dev->irq)
+ dev->irq = irqmap[setupval>>13];
+
+ if (dev->if_port == 0xff) {
+ dev->if_port = !(setupval & 0x1000) ? AUI :
+ eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC;
+ }
+
+ buswidth = !((setupval & 0x400) >> 10);
+ }
+
+ memset(lp, 0, sizeof(struct net_local));
+ spin_lock_init(&lp->lock);
+
+ printk("(IRQ %d, %s connector, %d-bit bus", dev->irq,
+ eexp_ifmap[dev->if_port], buswidth?8:16);
+
+ if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress"))
+ return -EBUSY;
+
+ eexp_hw_set_interface(dev);
+
+ release_region(dev->base_addr + 0x300e, 1);
+
+ /* Find out how much RAM we have on the card */
+ outw(0, dev->base_addr + WRITE_PTR);
+ for (i = 0; i < 32768; i++)
+ outw(0, dev->base_addr + DATAPORT);
+
+ for (memory_size = 0; memory_size < 64; memory_size++)
+ {
+ outw(memory_size<<10, dev->base_addr + READ_PTR);
+ if (inw(dev->base_addr+DATAPORT))
+ break;
+ outw(memory_size<<10, dev->base_addr + WRITE_PTR);
+ outw(memory_size | 0x5000, dev->base_addr+DATAPORT);
+ outw(memory_size<<10, dev->base_addr + READ_PTR);
+ if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000))
+ break;
+ }
+
+ /* Sort out the number of buffers. We may have 16, 32, 48 or 64k
+ * of RAM to play with.
+ */
+ lp->num_tx_bufs = 4;
+ lp->rx_buf_end = 0x3ff6;
+ switch (memory_size)
+ {
+ case 64:
+ lp->rx_buf_end += 0x4000;
+ case 48:
+ lp->num_tx_bufs += 4;
+ lp->rx_buf_end += 0x4000;
+ case 32:
+ lp->rx_buf_end += 0x4000;
+ case 16:
+ printk(", %dk RAM)\n", memory_size);
+ break;
+ default:
+ printk(") bad memory size (%dk).\n", memory_size);
+ return -ENODEV;
+ break;
+ }
+
+ lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
+ lp->width = buswidth;
+
+ dev->open = eexp_open;
+ dev->stop = eexp_close;
+ dev->hard_start_xmit = eexp_xmit;
+ dev->get_stats = eexp_stats;
+ dev->set_multicast_list = &eexp_set_multicast;
+ dev->tx_timeout = eexp_timeout;
+ dev->watchdog_timeo = 2*HZ;
+ return 0;
+}
+
+/*
+ * Read a word from the EtherExpress on-board serial EEPROM.
+ * The EEPROM contains 64 words of 16 bits.
+ */
+static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr,
+ unsigned char location)
+{
+ unsigned short cmd = 0x180|(location&0x7f);
+ unsigned short rval = 0,wval = EC_CS|i586_RST;
+ int i;
+
+ outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl);
+ for (i=0x100 ; i ; i>>=1 )
+ {
+ if (cmd&i)
+ wval |= EC_Wr;
+ else
+ wval &= ~EC_Wr;
+
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_Wr;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ for (i=0x8000 ; i ; i>>=1 )
+ {
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd)
+ rval |= i;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_CS;
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ return rval;
+}
+
+/*
+ * Reap tx buffers and return last transmit status.
+ * if ==0 then either:
+ * a) we're not transmitting anything, so why are we here?
+ * b) we've died.
+ * otherwise, Stat_Busy(return) means we've still got some packets
+ * to transmit, Stat_Done(return) means our buffers should be empty
+ * again
+ */
+
+static unsigned short eexp_hw_lasttxstat(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short tx_block = lp->tx_reap;
+ unsigned short status;
+
+ if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap)
+ return 0x0000;
+
+ do
+ {
+ outw(tx_block & ~31, dev->base_addr + SM_PTR);
+ status = inw(dev->base_addr + SHADOW(tx_block));
+ if (!Stat_Done(status))
+ {
+ lp->tx_link = tx_block;
+ return status;
+ }
+ else
+ {
+ lp->last_tx_restart = 0;
+ lp->stats.collisions += Stat_NoColl(status);
+ if (!Stat_OK(status))
+ {
+ char *whatsup = NULL;
+ lp->stats.tx_errors++;
+ if (Stat_Abort(status))
+ lp->stats.tx_aborted_errors++;
+ if (Stat_TNoCar(status)) {
+ whatsup = "aborted, no carrier";
+ lp->stats.tx_carrier_errors++;
+ }
+ if (Stat_TNoCTS(status)) {
+ whatsup = "aborted, lost CTS";
+ lp->stats.tx_carrier_errors++;
+ }
+ if (Stat_TNoDMA(status)) {
+ whatsup = "FIFO underran";
+ lp->stats.tx_fifo_errors++;
+ }
+ if (Stat_TXColl(status)) {
+ whatsup = "aborted, too many collisions";
+ lp->stats.tx_aborted_errors++;
+ }
+ if (whatsup)
+ printk(KERN_INFO "%s: transmit %s\n",
+ dev->name, whatsup);
+ }
+ else
+ lp->stats.tx_packets++;
+ }
+ if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_reap = tx_block = TX_BUF_START;
+ else
+ lp->tx_reap = tx_block += TX_BUF_SIZE;
+ netif_wake_queue(dev);
+ }
+ while (lp->tx_reap != lp->tx_head);
+
+ lp->tx_link = lp->tx_tail + 0x08;
+
+ return status;
+}
+
+/*
+ * This should never happen. It is called when some higher routine detects
+ * that the CU has stopped, to try to restart it from the last packet we knew
+ * we were working on, or the idle loop if we had finished for the time.
+ */
+
+static void eexp_hw_txrestart(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+
+ lp->last_tx_restart = lp->tx_link;
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ {
+ unsigned short boguscount=50,failcount=5;
+ while (!scb_status(dev))
+ {
+ if (!--boguscount)
+ {
+ if (--failcount)
+ {
+ printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev));
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ boguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name);
+ eexp_hw_init586(dev);
+ netif_wake_queue(dev);
+ return;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Writes down the list of transmit buffers into card memory. Each
+ * entry consists of an 82586 transmit command, followed by a jump
+ * pointing to itself. When we want to transmit a packet, we write
+ * the data into the appropriate transmit buffer and then modify the
+ * preceding jump to point at the new transmit command. This means that
+ * the 586 command unit is continuously active.
+ */
+
+static void eexp_hw_txinit(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short tx_block = TX_BUF_START;
+ unsigned short curtbuf;
+ unsigned short ioaddr = dev->base_addr;
+
+ for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ )
+ {
+ outw(tx_block, ioaddr + WRITE_PTR);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
+ outw(tx_block+0x08, ioaddr + DATAPORT);
+ outw(tx_block+0x0e, ioaddr + DATAPORT);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(tx_block+0x08, ioaddr + DATAPORT);
+
+ outw(0x8000, ioaddr + DATAPORT);
+ outw(-1, ioaddr + DATAPORT);
+ outw(tx_block+0x16, ioaddr + DATAPORT);
+ outw(0x0000, ioaddr + DATAPORT);
+
+ tx_block += TX_BUF_SIZE;
+ }
+ lp->tx_head = TX_BUF_START;
+ lp->tx_reap = TX_BUF_START;
+ lp->tx_tail = tx_block - TX_BUF_SIZE;
+ lp->tx_link = lp->tx_tail + 0x08;
+ lp->rx_buf_start = tx_block;
+
+}
+
+/*
+ * Write the circular list of receive buffer descriptors to card memory.
+ * The end of the list isn't marked, which means that the 82586 receive
+ * unit will loop until buffers become available (this avoids it giving us
+ * "out of resources" messages).
+ */
+
+static void eexp_hw_rxinit(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short rx_block = lp->rx_buf_start;
+ unsigned short ioaddr = dev->base_addr;
+
+ lp->num_rx_bufs = 0;
+ lp->rx_first = lp->rx_ptr = rx_block;
+ do
+ {
+ lp->num_rx_bufs++;
+
+ outw(rx_block, ioaddr + WRITE_PTR);
+
+ outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT);
+ outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT);
+ outw(0xffff, ioaddr+DATAPORT);
+
+ outw(0x0000, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+
+ outw(0x0000, ioaddr+DATAPORT);
+ outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT);
+ outw(rx_block + 0x20, ioaddr+DATAPORT);
+ outw(0, ioaddr+DATAPORT);
+ outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT);
+
+ lp->rx_last = rx_block;
+ rx_block += RX_BUF_SIZE;
+ } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE);
+
+
+ /* Make first Rx frame descriptor point to first Rx buffer
+ descriptor */
+ outw(lp->rx_first + 6, ioaddr+WRITE_PTR);
+ outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
+
+ /* Close Rx frame descriptor ring */
+ outw(lp->rx_last + 4, ioaddr+WRITE_PTR);
+ outw(lp->rx_first, ioaddr+DATAPORT);
+
+ /* Close Rx buffer descriptor ring */
+ outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR);
+ outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
+
+}
+
+/*
+ * Un-reset the 586, and start the configuration sequence. We don't wait for
+ * this to finish, but allow the interrupt handler to start the CU and RU for
+ * us. We can't start the receive/transmission system up before we know that
+ * the hardware is configured correctly.
+ */
+
+static void eexp_hw_init586(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+ int i;
+
+#if NET_DEBUG > 6
+ printk("%s: eexp_hw_init586()\n", dev->name);
+#endif
+
+ lp->started = 0;
+
+ set_loopback(dev);
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+
+ /* Download the startup code */
+ outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR);
+ outw(lp->width?0x0001:0x0000, ioaddr + 0x8006);
+ outw(0x0000, ioaddr + 0x8008);
+ outw(0x0000, ioaddr + 0x800a);
+ outw(0x0000, ioaddr + 0x800c);
+ outw(0x0000, ioaddr + 0x800e);
+
+ for (i = 0; i < (sizeof(start_code)); i+=32) {
+ int j;
+ outw(i, ioaddr + SM_PTR);
+ for (j = 0; j < 16; j+=2)
+ outw(start_code[(i+j)/2],
+ ioaddr+0x4000+j);
+ for (j = 0; j < 16; j+=2)
+ outw(start_code[(i+j+16)/2],
+ ioaddr+0x8000+j);
+ }
+
+ /* Do we want promiscuous mode or multicast? */
+ outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
+ i = inw(ioaddr+SHADOW(CONF_PROMISC));
+ outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
+ ioaddr+SHADOW(CONF_PROMISC));
+ lp->was_promisc = dev->flags & IFF_PROMISC;
+#if 0
+ eexp_setup_filter(dev);
+#endif
+
+ /* Write our hardware address */
+ outw(CONF_HWADDR & ~31, ioaddr+SM_PTR);
+ outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR));
+ outw(((unsigned short *)dev->dev_addr)[1],
+ ioaddr+SHADOW(CONF_HWADDR+2));
+ outw(((unsigned short *)dev->dev_addr)[2],
+ ioaddr+SHADOW(CONF_HWADDR+4));
+
+ eexp_hw_txinit(dev);
+ eexp_hw_rxinit(dev);
+
+ outb(0,ioaddr+EEPROM_Ctrl);
+ mdelay(5);
+
+ scb_command(dev, 0xf000);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ outw(0, ioaddr+SM_PTR);
+
+ {
+ unsigned short rboguscount=50,rfailcount=5;
+ while (inw(ioaddr+0x4000))
+ {
+ if (!--rboguscount)
+ {
+ printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n",
+ dev->name);
+ scb_command(dev, 0);
+ outb(0,ioaddr+SIGNAL_CA);
+ rboguscount = 100;
+ if (!--rfailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 not responding, giving up.\n",
+ dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ scb_wrcbl(dev, CONF_LINK);
+ scb_command(dev, 0xf000|SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ {
+ unsigned short iboguscount=50,ifailcount=5;
+ while (!scb_status(dev))
+ {
+ if (!--iboguscount)
+ {
+ if (--ifailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n",
+ dev->name, scb_status(dev), scb_rdcmd(dev));
+ scb_wrcbl(dev, CONF_LINK);
+ scb_command(dev, 0xf000|SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ iboguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ clear_loopback(dev);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+
+ lp->init_time = jiffies;
+#if NET_DEBUG > 6
+ printk("%s: leaving eexp_hw_init586()\n", dev->name);
+#endif
+ return;
+}
+
+static void eexp_setup_filter(struct net_device *dev)
+{
+ struct dev_mc_list *dmi = dev->mc_list;
+ unsigned short ioaddr = dev->base_addr;
+ int count = dev->mc_count;
+ int i;
+ if (count > 8) {
+ printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
+ dev->name, count);
+ count = 8;
+ }
+
+ outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
+ outw(count, ioaddr+SHADOW(CONF_NR_MULTICAST));
+ for (i = 0; i < count; i++) {
+ unsigned short *data = (unsigned short *)dmi->dmi_addr;
+ if (!dmi) {
+ printk(KERN_INFO "%s: too few multicast addresses\n", dev->name);
+ break;
+ }
+ if (dmi->dmi_addrlen != ETH_ALEN) {
+ printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
+ continue;
+ }
+ outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
+ outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
+ outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
+ outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
+ outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
+ outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
+ }
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+eexp_set_multicast(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ int kick = 0, i;
+ if ((dev->flags & IFF_PROMISC) != lp->was_promisc) {
+ outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
+ i = inw(ioaddr+SHADOW(CONF_PROMISC));
+ outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
+ ioaddr+SHADOW(CONF_PROMISC));
+ lp->was_promisc = dev->flags & IFF_PROMISC;
+ kick = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC)) {
+ eexp_setup_filter(dev);
+ if (lp->old_mc_count != dev->mc_count) {
+ kick = 1;
+ lp->old_mc_count = dev->mc_count;
+ }
+ }
+ if (kick) {
+ unsigned long oj;
+ scb_command(dev, SCB_CUsuspend);
+ outb(0, ioaddr+SIGNAL_CA);
+ outb(0, ioaddr+SIGNAL_CA);
+#if 0
+ printk("%s: waiting for CU to go suspended\n", dev->name);
+#endif
+ oj = jiffies;
+ while ((SCB_CUstat(scb_status(dev)) == 2) &&
+ ((jiffies-oj) < 2000));
+ if (SCB_CUstat(scb_status(dev)) == 2)
+ printk("%s: warning, CU didn't stop\n", dev->name);
+ lp->started &= ~(STARTED_CU);
+ scb_wrcbl(dev, CONF_LINK);
+ scb_command(dev, SCB_CUstart);
+ outb(0, ioaddr+SIGNAL_CA);
+ }
+}
+
+
+/*
+ * MODULE stuff
+ */
+
+#ifdef MODULE
+
+#define EEXP_MAX_CARDS 4 /* max number of cards to support */
+
+static struct net_device *dev_eexp[EEXP_MAX_CARDS];
+static int irq[EEXP_MAX_CARDS];
+static int io[EEXP_MAX_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)");
+MODULE_LICENSE("GPL");
+
+
+/* Ideally the user would give us io=, irq= for every card. If any parameters
+ * are specified, we verify and then use them. If no parameters are given, we
+ * autoprobe for one card only.
+ */
+int init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ dev = alloc_etherdev(sizeof(struct net_local));
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (io[this_dev] == 0) {
+ if (this_dev)
+ break;
+ printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n");
+ }
+ if (do_express_probe(dev) == 0 && register_netdev(dev) == 0) {
+ dev_eexp[this_dev] = dev;
+ found++;
+ continue;
+ }
+ printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]);
+ free_netdev(dev);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_eexp[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif
+
+/*
+ * Local Variables:
+ * c-file-style: "linux"
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/eexpress.h b/drivers/net/eexpress.h
new file mode 100644
index 000000000000..28b431268480
--- /dev/null
+++ b/drivers/net/eexpress.h
@@ -0,0 +1,179 @@
+/*
+ * eexpress.h: Intel EtherExpress16 defines
+ */
+
+/*
+ * EtherExpress card register addresses
+ * as offsets from the base IO region (dev->base_addr)
+ */
+
+#define DATAPORT 0x0000
+#define WRITE_PTR 0x0002
+#define READ_PTR 0x0004
+#define SIGNAL_CA 0x0006
+#define SET_IRQ 0x0007
+#define SM_PTR 0x0008
+#define MEM_Dec 0x000a
+#define MEM_Ctrl 0x000b
+#define MEM_Page_Ctrl 0x000c
+#define Config 0x000d
+#define EEPROM_Ctrl 0x000e
+#define ID_PORT 0x000f
+#define MEM_ECtrl 0x000f
+
+/*
+ * card register defines
+ */
+
+/* SET_IRQ */
+#define SIRQ_en 0x08
+#define SIRQ_dis 0x00
+
+/* EEPROM_Ctrl */
+#define EC_Clk 0x01
+#define EC_CS 0x02
+#define EC_Wr 0x04
+#define EC_Rd 0x08
+#define ASIC_RST 0x40
+#define i586_RST 0x80
+
+#define eeprom_delay() { udelay(40); }
+
+/*
+ * i82586 Memory Configuration
+ */
+
+/* (System Configuration Pointer) System start up block, read after 586_RST */
+#define SCP_START 0xfff6
+
+/* Intermediate System Configuration Pointer */
+#define ISCP_START 0x0000
+
+/* System Command Block */
+#define SCB_START 0x0008
+
+/* Start of buffer region. Everything before this is used for control
+ * structures and the CU configuration program. The memory layout is
+ * determined in eexp_hw_probe(), once we know how much memory is
+ * available on the card.
+ */
+
+#define TX_BUF_START 0x0100
+
+#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f)
+#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f)
+
+/*
+ * SCB defines
+ */
+
+/* these functions take the SCB status word and test the relevant status bit */
+#define SCB_complete(s) ((s&0x8000)!=0)
+#define SCB_rxdframe(s) ((s&0x4000)!=0)
+#define SCB_CUdead(s) ((s&0x2000)!=0)
+#define SCB_RUdead(s) ((s&0x1000)!=0)
+#define SCB_ack(s) (s & 0xf000)
+
+/* Command unit status: 0=idle, 1=suspended, 2=active */
+#define SCB_CUstat(s) ((s&0x0300)>>8)
+
+/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
+#define SCB_RUstat(s) ((s&0x0070)>>4)
+
+/* SCB commands */
+#define SCB_CUnop 0x0000
+#define SCB_CUstart 0x0100
+#define SCB_CUresume 0x0200
+#define SCB_CUsuspend 0x0300
+#define SCB_CUabort 0x0400
+#define SCB_resetchip 0x0080
+
+#define SCB_RUnop 0x0000
+#define SCB_RUstart 0x0010
+#define SCB_RUresume 0x0020
+#define SCB_RUsuspend 0x0030
+#define SCB_RUabort 0x0040
+
+/*
+ * Command block defines
+ */
+
+#define Stat_Done(s) ((s&0x8000)!=0)
+#define Stat_Busy(s) ((s&0x4000)!=0)
+#define Stat_OK(s) ((s&0x2000)!=0)
+#define Stat_Abort(s) ((s&0x1000)!=0)
+#define Stat_STFail ((s&0x0800)!=0)
+#define Stat_TNoCar(s) ((s&0x0400)!=0)
+#define Stat_TNoCTS(s) ((s&0x0200)!=0)
+#define Stat_TNoDMA(s) ((s&0x0100)!=0)
+#define Stat_TDefer(s) ((s&0x0080)!=0)
+#define Stat_TColl(s) ((s&0x0040)!=0)
+#define Stat_TXColl(s) ((s&0x0020)!=0)
+#define Stat_NoColl(s) (s&0x000f)
+
+/* Cmd_END will end AFTER the command if this is the first
+ * command block after an SCB_CUstart, but BEFORE the command
+ * for all subsequent commands. Best strategy is to place
+ * Cmd_INT on the last command in the sequence, followed by a
+ * dummy Cmd_Nop with Cmd_END after this.
+ */
+
+#define Cmd_END 0x8000
+#define Cmd_SUS 0x4000
+#define Cmd_INT 0x2000
+
+#define Cmd_Nop 0x0000
+#define Cmd_SetAddr 0x0001
+#define Cmd_Config 0x0002
+#define Cmd_MCast 0x0003
+#define Cmd_Xmit 0x0004
+#define Cmd_TDR 0x0005
+#define Cmd_Dump 0x0006
+#define Cmd_Diag 0x0007
+
+
+/*
+ * Frame Descriptor (Receive block) defines
+ */
+
+#define FD_Done(s) ((s&0x8000)!=0)
+#define FD_Busy(s) ((s&0x4000)!=0)
+#define FD_OK(s) ((s&0x2000)!=0)
+
+#define FD_CRC(s) ((s&0x0800)!=0)
+#define FD_Align(s) ((s&0x0400)!=0)
+#define FD_Resrc(s) ((s&0x0200)!=0)
+#define FD_DMA(s) ((s&0x0100)!=0)
+#define FD_Short(s) ((s&0x0080)!=0)
+#define FD_NoEOF(s) ((s&0x0040)!=0)
+
+struct rfd_header {
+ volatile unsigned long flags;
+ volatile unsigned short link;
+ volatile unsigned short rbd_offset;
+ volatile unsigned short dstaddr1;
+ volatile unsigned short dstaddr2;
+ volatile unsigned short dstaddr3;
+ volatile unsigned short srcaddr1;
+ volatile unsigned short srcaddr2;
+ volatile unsigned short srcaddr3;
+ volatile unsigned short length;
+
+ /* This is actually a Receive Buffer Descriptor. The way we
+ * arrange memory means that an RBD always follows the RFD that
+ * points to it, so they might as well be in the same structure.
+ */
+ volatile unsigned short actual_count;
+ volatile unsigned short next_rbd;
+ volatile unsigned short buf_addr1;
+ volatile unsigned short buf_addr2;
+ volatile unsigned short size;
+};
+
+/* Returned data from the Time Domain Reflectometer */
+
+#define TDR_LINKOK (1<<15)
+#define TDR_XCVRPROBLEM (1<<14)
+#define TDR_OPEN (1<<13)
+#define TDR_SHORT (1<<12)
+#define TDR_TIME 0x7ff
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
new file mode 100644
index 000000000000..81ebaedaa240
--- /dev/null
+++ b/drivers/net/epic100.c
@@ -0,0 +1,1687 @@
+/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
+/*
+ Written/copyright 1997-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the SMC83c170/175 "EPIC" series, as used on the
+ SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Information and updates available at
+ http://www.scyld.com/network/epic100.html
+
+ ---------------------------------------------------------------------
+
+ Linux kernel-specific changes:
+
+ LK1.1.2 (jgarzik):
+ * Merge becker version 1.09 (4/08/2000)
+
+ LK1.1.3:
+ * Major bugfix to 1.09 driver (Francis Romieu)
+
+ LK1.1.4 (jgarzik):
+ * Merge becker test version 1.09 (5/29/2000)
+
+ LK1.1.5:
+ * Fix locking (jgarzik)
+ * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
+
+ LK1.1.6:
+ * Merge becker version 1.11
+ * Move pci_enable_device before any PCI BAR len checks
+
+ LK1.1.7:
+ * { fill me in }
+
+ LK1.1.8:
+ * ethtool driver info support (jgarzik)
+
+ LK1.1.9:
+ * ethtool media get/set support (jgarzik)
+
+ LK1.1.10:
+ * revert MII transceiver init change (jgarzik)
+
+ LK1.1.11:
+ * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
+ * replace some MII-related magic numbers with constants
+
+ LK1.1.12:
+ * fix power-up sequence
+
+ LK1.1.13:
+ * revert version 1.1.12, power-up sequence "fix"
+
+ LK1.1.14 (Kryzsztof Halasa):
+ * fix spurious bad initializations
+ * pound phy a la SMSC's app note on the subject
+
+ AC1.1.14ac
+ * fix power up/down for ethtool that broke in 1.11
+
+*/
+
+#define DRV_NAME "epic100"
+#define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
+#define DRV_RELDATE "June 2, 2004"
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+/* Used to pass the full-duplex flag, etc. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for operational efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 256
+#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 256
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Bytes transferred to chip before transmission starts. */
+/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
+#define TX_FIFO_THRESH 256
+#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
+static char version2[] __devinitdata =
+" http://www.scyld.com/network/epic100.html\n";
+static char version3[] __devinitdata =
+" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
+MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
+MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the SMC "EPIC/100", the SMC
+single-chip Ethernet controllers for PCI. This chip is used on
+the SMC EtherPower II boards.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+IVb. References
+
+http://www.smsc.com/main/datasheets/83c171.pdf
+http://www.smsc.com/main/datasheets/83c175.pdf
+http://scyld.com/expert/NWay.html
+http://www.national.com/pf/DP/DP83840A.html
+
+IVc. Errata
+
+*/
+
+
+enum pci_id_flags_bits {
+ /* Set PCI command register bits before calling probe1(). */
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ /* Read and map the single following PCI BAR. */
+ PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
+ PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
+};
+
+enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
+
+#define EPIC_TOTAL_SIZE 0x100
+#define USE_IO_OPS 1
+#ifdef USE_IO_OPS
+#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
+#else
+#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
+#endif
+
+typedef enum {
+ SMSC_83C170_0,
+ SMSC_83C170,
+ SMSC_83C175,
+} chip_t;
+
+
+struct epic_chip_info {
+ const char *name;
+ enum pci_id_flags_bits pci_flags;
+ int io_size; /* Needed for I/O region check or ioremap(). */
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+
+
+/* indexed by chip_t */
+static struct epic_chip_info pci_id_tbl[] = {
+ { "SMSC EPIC/100 83c170",
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
+ { "SMSC EPIC/100 83c170",
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
+ { "SMSC EPIC/C 83c175",
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
+};
+
+
+static struct pci_device_id epic_pci_tbl[] = {
+ { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
+ { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
+ { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
+
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Offsets to registers, using the (ugh) SMC names. */
+enum epic_registers {
+ COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
+ PCIBurstCnt=0x18,
+ TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
+ MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
+ LAN0=64, /* MAC address. */
+ MC0=80, /* Multicast filter table. */
+ RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
+ PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatus {
+ TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
+ PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
+ RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
+ TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
+ RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
+};
+enum CommandBits {
+ StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
+ StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
+};
+
+#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
+
+#define EpicNapiEvent (TxEmpty | TxDone | \
+ RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
+#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
+
+static u16 media2miictl[16] = {
+ 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+/* The EPIC100 Rx and Tx buffer descriptors. */
+
+struct epic_tx_desc {
+ u32 txstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+struct epic_rx_desc {
+ u32 rxstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+enum desc_status_bits {
+ DescOwn=0x8000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct epic_private {
+ struct epic_rx_desc *rx_ring;
+ struct epic_tx_desc *tx_ring;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+
+ /* Ring pointers. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ spinlock_t napi_lock;
+ unsigned int reschedule_in_poll;
+ unsigned int cur_tx, dirty_tx;
+
+ unsigned int cur_rx, dirty_rx;
+ u32 irq_mask;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+
+ struct pci_dev *pci_dev; /* PCI bus location. */
+ int chip_id, chip_flags;
+
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ int tx_threshold;
+ unsigned char mc_filter[8];
+ signed char phys[4]; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ int mii_phy_cnt;
+ struct mii_if_info mii;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+};
+
+static int epic_open(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
+static void epic_restart(struct net_device *dev);
+static void epic_timer(unsigned long data);
+static void epic_tx_timeout(struct net_device *dev);
+static void epic_init_ring(struct net_device *dev);
+static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int epic_rx(struct net_device *dev, int budget);
+static int epic_poll(struct net_device *dev, int *budget);
+static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static int epic_close(struct net_device *dev);
+static struct net_device_stats *epic_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+static int __devinit epic_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int card_idx = -1;
+ long ioaddr;
+ int chip_idx = (int) ent->driver_data;
+ int irq;
+ struct net_device *dev;
+ struct epic_private *ep;
+ int i, ret, option = 0, duplex = 0;
+ void *ring_space;
+ dma_addr_t ring_dma;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
+ version, version2, version3);
+#endif
+
+ card_idx++;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out;
+ irq = pdev->irq;
+
+ if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
+ printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret < 0)
+ goto err_out_disable;
+
+ ret = -ENOMEM;
+
+ dev = alloc_etherdev(sizeof (*ep));
+ if (!dev) {
+ printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
+ goto err_out_free_res;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#ifdef USE_IO_OPS
+ ioaddr = pci_resource_start (pdev, 0);
+#else
+ ioaddr = pci_resource_start (pdev, 1);
+ ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
+ if (!ioaddr) {
+ printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
+ goto err_out_free_netdev;
+ }
+#endif
+
+ pci_set_drvdata(pdev, dev);
+ ep = dev->priv;
+ ep->mii.dev = dev;
+ ep->mii.mdio_read = mdio_read;
+ ep->mii.mdio_write = mdio_write;
+ ep->mii.phy_id_mask = 0x1f;
+ ep->mii.reg_num_mask = 0x1f;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_iounmap;
+ ep->tx_ring = (struct epic_tx_desc *)ring_space;
+ ep->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ ep->rx_ring = (struct epic_rx_desc *)ring_space;
+ ep->rx_ring_dma = ring_dma;
+
+ if (dev->mem_start) {
+ option = dev->mem_start;
+ duplex = (dev->mem_start & 16) ? 1 : 0;
+ } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
+ if (options[card_idx] >= 0)
+ option = options[card_idx];
+ if (full_duplex[card_idx] >= 0)
+ duplex = full_duplex[card_idx];
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ spin_lock_init(&ep->lock);
+ spin_lock_init(&ep->napi_lock);
+ ep->reschedule_in_poll = 0;
+
+ /* Bring the chip out of low-power mode. */
+ outl(0x4200, ioaddr + GENCTL);
+ /* Magic?! If we don't set this bit the MII interface won't work. */
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+
+ /* Turn on the MII transceiver. */
+ outl(0x12, ioaddr + MIICfg);
+ if (chip_idx == 1)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ outl(0x0200, ioaddr + GENCTL);
+
+ /* Note: the '175 does not have a serial EEPROM. */
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
+
+ if (debug > 2) {
+ printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
+ pci_name(pdev));
+ for (i = 0; i < 64; i++)
+ printk(" %4.4x%s", read_eeprom(ioaddr, i),
+ i % 16 == 15 ? "\n" : "");
+ }
+
+ ep->pci_dev = pdev;
+ ep->chip_id = chip_idx;
+ ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
+ ep->irq_mask =
+ (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun | EpicNapiEvent;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes much time and no cards have external MII. */
+ {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ ep->phys[phy_idx++] = phy;
+ printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
+ "%4.4x status %4.4x.\n",
+ pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
+ }
+ }
+ ep->mii_phy_cnt = phy_idx;
+ if (phy_idx != 0) {
+ phy = ep->phys[0];
+ ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
+ "partner %4.4x.\n",
+ pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
+ } else if ( ! (ep->chip_flags & NO_MII)) {
+ printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
+ pci_name(pdev));
+ /* Use the known PHY address of the EPII. */
+ ep->phys[0] = 3;
+ }
+ ep->mii.phy_id = ep->phys[0];
+ }
+
+ /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
+ if (ep->chip_flags & MII_PWRDWN)
+ outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
+ outl(0x0008, ioaddr + GENCTL);
+
+ /* The lower four bits are the media type. */
+ if (duplex) {
+ ep->mii.force_media = ep->mii.full_duplex = 1;
+ printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
+ pci_name(pdev));
+ }
+ dev->if_port = ep->default_port = option;
+
+ /* The Epic-specific entries in the device structure. */
+ dev->open = &epic_open;
+ dev->hard_start_xmit = &epic_start_xmit;
+ dev->stop = &epic_close;
+ dev->get_stats = &epic_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &netdev_ioctl;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->tx_timeout = &epic_tx_timeout;
+ dev->poll = epic_poll;
+ dev->weight = 64;
+
+ ret = register_netdev(dev);
+ if (ret < 0)
+ goto err_out_unmap_rx;
+
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+out:
+ return ret;
+
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+err_out_unmap_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
+err_out_iounmap:
+#ifndef USE_IO_OPS
+ iounmap(ioaddr);
+err_out_free_netdev:
+#endif
+ free_netdev(dev);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+ goto out;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x09
+#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
+#define EE_ENB (0x0001 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ This serves to flush the operation to the PCI bus.
+ */
+
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ64_CMD (6 << 6)
+#define EE_READ256_CMD (6 << 8)
+#define EE_ERASE_CMD (7 << 6)
+
+static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ outl(0x00000000, ioaddr + INTMASK);
+}
+
+static inline void __epic_pci_commit(long ioaddr)
+{
+#ifndef USE_IO_OPS
+ inl(ioaddr + INTMASK);
+#endif
+}
+
+static inline void epic_napi_irq_off(struct net_device *dev,
+ struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
+ __epic_pci_commit(ioaddr);
+}
+
+static inline void epic_napi_irq_on(struct net_device *dev,
+ struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ /* No need to commit possible posted write */
+ outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
+}
+
+static int __devinit read_eeprom(long ioaddr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = ioaddr + EECTL;
+ int read_cmd = location |
+ (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 12; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ }
+ outl(EE_ENB, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+#define MII_READOP 1
+#define MII_WRITEOP 2
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long ioaddr = dev->base_addr;
+ int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
+ int i;
+
+ outl(read_cmd, ioaddr + MIICtrl);
+ /* Typical operation takes 25 loops. */
+ for (i = 400; i > 0; i--) {
+ barrier();
+ if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
+ /* Work around read failure bug. */
+ if (phy_id == 1 && location < 6
+ && inw(ioaddr + MIIData) == 0xffff) {
+ outl(read_cmd, ioaddr + MIICtrl);
+ continue;
+ }
+ return inw(ioaddr + MIIData);
+ }
+ }
+ return 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ long ioaddr = dev->base_addr;
+ int i;
+
+ outw(value, ioaddr + MIIData);
+ outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+ for (i = 10000; i > 0; i--) {
+ barrier();
+ if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+ break;
+ }
+ return;
+}
+
+
+static int epic_open(struct net_device *dev)
+{
+ struct epic_private *ep = dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+ int retval;
+
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+
+ if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
+ return retval;
+
+ epic_init_ring(dev);
+
+ outl(0x4000, ioaddr + GENCTL);
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+
+ /* Pull the chip out of low-power mode, enable interrupts, and set for
+ PCI read multiple. The MIIcfg setting and strange write order are
+ required by the details of which bits are reset and the transceiver
+ wiring on the Ositech CardBus card.
+ */
+#if 0
+ outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+#endif
+ if (ep->chip_flags & MII_PWRDWN)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+
+#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
+ outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+ inl(ioaddr + GENCTL);
+ outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#else
+ outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+ inl(ioaddr + GENCTL);
+ outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#endif
+
+ udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
+
+ for (i = 0; i < 3; i++)
+ outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+
+ ep->tx_threshold = TX_FIFO_THRESH;
+ outl(ep->tx_threshold, ioaddr + TxThresh);
+
+ if (media2miictl[dev->if_port & 15]) {
+ if (ep->mii_phy_cnt)
+ mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
+ if (dev->if_port == 1) {
+ if (debug > 1)
+ printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
+ "status %4.4x.\n",
+ dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
+ }
+ } else {
+ int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
+ if (mii_lpa != 0xffff) {
+ if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
+ ep->mii.full_duplex = 1;
+ else if (! (mii_lpa & LPA_LPACK))
+ mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
+ if (debug > 1)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
+ " register read of %4.4x.\n", dev->name,
+ ep->mii.full_duplex ? "full" : "half",
+ ep->phys[0], mii_lpa);
+ }
+ }
+
+ outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
+ outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+ outl(StartRx | RxQueued, ioaddr + COMMAND);
+
+ netif_start_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun
+ | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
+ "%s-duplex.\n",
+ dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
+ ep->mii.full_duplex ? "full" : "half");
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&ep->timer);
+ ep->timer.expires = jiffies + 3*HZ;
+ ep->timer.data = (unsigned long)dev;
+ ep->timer.function = &epic_timer; /* timer handler */
+ add_timer(&ep->timer);
+
+ return 0;
+}
+
+/* Reset the chip to recover from a PCI transaction error.
+ This may occur at interrupt time. */
+static void epic_pause(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = dev->priv;
+
+ netif_stop_queue (dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + INTMASK);
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+
+ /* Update the error counts. */
+ if (inw(ioaddr + COMMAND) != 0xffff) {
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ /* Remove the packets on the Rx queue. */
+ epic_rx(dev, RX_RING_SIZE);
+}
+
+static void epic_restart(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = dev->priv;
+ int i;
+
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+
+ printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+ dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+ udelay(1);
+
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+
+#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
+ outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#else
+ outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#endif
+ outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+ if (ep->chip_flags & MII_PWRDWN)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+
+ for (i = 0; i < 3; i++)
+ outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+
+ ep->tx_threshold = TX_FIFO_THRESH;
+ outl(ep->tx_threshold, ioaddr + TxThresh);
+ outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
+ sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
+ outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
+ sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+ outl(StartRx | RxQueued, ioaddr + COMMAND);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun
+ | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+
+ printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
+ " interrupt %4.4x.\n",
+ dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
+ (int)inl(ioaddr + INTSTAT));
+ return;
+}
+
+static void check_media(struct net_device *dev)
+{
+ struct epic_private *ep = dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
+ int negotiated = mii_lpa & ep->mii.advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+
+ if (ep->mii.force_media)
+ return;
+ if (mii_lpa == 0xffff) /* Bogus read */
+ return;
+ if (ep->mii.full_duplex != duplex) {
+ ep->mii.full_duplex = duplex;
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
+ outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ }
+}
+
+static void epic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct epic_private *ep = dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 5*HZ;
+
+ if (debug > 3) {
+ printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
+ dev->name, (int)inl(ioaddr + TxSTAT));
+ printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
+ "IntStatus %4.4x RxStatus %4.4x.\n",
+ dev->name, (int)inl(ioaddr + INTMASK),
+ (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+ }
+
+ check_media(dev);
+
+ ep->timer.expires = jiffies + next_tick;
+ add_timer(&ep->timer);
+}
+
+static void epic_tx_timeout(struct net_device *dev)
+{
+ struct epic_private *ep = dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (debug > 0) {
+ printk(KERN_WARNING "%s: Transmit timeout using MII device, "
+ "Tx status %4.4x.\n",
+ dev->name, (int)inw(ioaddr + TxSTAT));
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
+ dev->name, ep->dirty_tx, ep->cur_tx);
+ }
+ }
+ if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+ outl(RestartTx, ioaddr + COMMAND);
+ } else {
+ epic_restart(dev);
+ outl(TxQueued, dev->base_addr + COMMAND);
+ }
+
+ dev->trans_start = jiffies;
+ ep->stats.tx_errors++;
+ if (!ep->tx_full)
+ netif_wake_queue(dev);
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void epic_init_ring(struct net_device *dev)
+{
+ struct epic_private *ep = dev->priv;
+ int i;
+
+ ep->tx_full = 0;
+ ep->dirty_tx = ep->cur_tx = 0;
+ ep->cur_rx = ep->dirty_rx = 0;
+ ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ ep->rx_ring[i].rxstatus = 0;
+ ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
+ ep->rx_ring[i].next = ep->rx_ring_dma +
+ (i+1)*sizeof(struct epic_rx_desc);
+ ep->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ ep->rx_ring[i-1].next = ep->rx_ring_dma;
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
+ ep->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
+ skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
+ }
+ ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ ep->tx_skbuff[i] = NULL;
+ ep->tx_ring[i].txstatus = 0x0000;
+ ep->tx_ring[i].next = ep->tx_ring_dma +
+ (i+1)*sizeof(struct epic_tx_desc);
+ }
+ ep->tx_ring[i-1].next = ep->tx_ring_dma;
+ return;
+}
+
+static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct epic_private *ep = dev->priv;
+ int entry, free_count;
+ u32 ctrl_word;
+ unsigned long flags;
+
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ }
+
+ /* Caution: the write order is important here, set the field with the
+ "ownership" bit last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ spin_lock_irqsave(&ep->lock, flags);
+ free_count = ep->cur_tx - ep->dirty_tx;
+ entry = ep->cur_tx % TX_RING_SIZE;
+
+ ep->tx_skbuff[entry] = skb;
+ ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
+ ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
+ } else if (free_count == TX_QUEUE_LEN/2) {
+ ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
+ } else if (free_count < TX_QUEUE_LEN - 1) {
+ ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
+ } else {
+ /* Leave room for an additional entry. */
+ ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
+ ep->tx_full = 1;
+ }
+ ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
+ ep->tx_ring[entry].txstatus =
+ ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
+ | cpu_to_le32(DescOwn);
+
+ ep->cur_tx++;
+ if (ep->tx_full)
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ /* Trigger an immediate transmit demand. */
+ outl(TxQueued, dev->base_addr + COMMAND);
+
+ dev->trans_start = jiffies;
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
+ "flag %2.2x Tx status %8.8x.\n",
+ dev->name, (int)skb->len, entry, ctrl_word,
+ (int)inl(dev->base_addr + TxSTAT));
+
+ return 0;
+}
+
+static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
+ int status)
+{
+ struct net_device_stats *stats = &ep->stats;
+
+#ifndef final_version
+ /* There was an major error, log it. */
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+#endif
+ stats->tx_errors++;
+ if (status & 0x1050)
+ stats->tx_aborted_errors++;
+ if (status & 0x0008)
+ stats->tx_carrier_errors++;
+ if (status & 0x0040)
+ stats->tx_window_errors++;
+ if (status & 0x0010)
+ stats->tx_fifo_errors++;
+}
+
+static void epic_tx(struct net_device *dev, struct epic_private *ep)
+{
+ unsigned int dirty_tx, cur_tx;
+
+ /*
+ * Note: if this lock becomes a problem we can narrow the locked
+ * region at the cost of occasionally grabbing the lock more times.
+ */
+ cur_tx = ep->cur_tx;
+ for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
+ struct sk_buff *skb;
+ int entry = dirty_tx % TX_RING_SIZE;
+ int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
+
+ if (txstatus & DescOwn)
+ break; /* It still hasn't been Txed */
+
+ if (likely(txstatus & 0x0001)) {
+ ep->stats.collisions += (txstatus >> 8) & 15;
+ ep->stats.tx_packets++;
+ ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
+ } else
+ epic_tx_error(dev, ep, txstatus);
+
+ /* Free the original skb. */
+ skb = ep->tx_skbuff[entry];
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ ep->tx_skbuff[entry] = NULL;
+ }
+
+#ifndef final_version
+ if (cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_WARNING
+ "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, cur_tx, ep->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+ ep->dirty_tx = dirty_tx;
+ if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ ep->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct epic_private *ep = dev->priv;
+ long ioaddr = dev->base_addr;
+ unsigned int handled = 0;
+ int status;
+
+ status = inl(ioaddr + INTSTAT);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(status & EpicNormalEvent, ioaddr + INTSTAT);
+
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
+ "intstat=%#8.8x.\n", dev->name, status,
+ (int)inl(ioaddr + INTSTAT));
+ }
+
+ if ((status & IntrSummary) == 0)
+ goto out;
+
+ handled = 1;
+
+ if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
+ spin_lock(&ep->napi_lock);
+ if (netif_rx_schedule_prep(dev)) {
+ epic_napi_irq_off(dev, ep);
+ __netif_rx_schedule(dev);
+ } else
+ ep->reschedule_in_poll++;
+ spin_unlock(&ep->napi_lock);
+ }
+ status &= ~EpicNapiEvent;
+
+ /* Check uncommon events all at once. */
+ if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
+ if (status == EpicRemoved)
+ goto out;
+
+ /* Always update the error counts to avoid overhead later. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+
+ if (status & TxUnderrun) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+ outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+ /* Restart the transmit process. */
+ outl(RestartTx, ioaddr + COMMAND);
+ }
+ if (status & PCIBusErr170) {
+ printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
+ dev->name, status);
+ epic_pause(dev);
+ epic_restart(dev);
+ }
+ /* Clear all error sources. */
+ outl(status & 0x7f18, ioaddr + INTSTAT);
+ }
+
+out:
+ if (debug > 3) {
+ printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
+ dev->name, status);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int epic_rx(struct net_device *dev, int budget)
+{
+ struct epic_private *ep = dev->priv;
+ int entry = ep->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
+ int work_done = 0;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+ ep->rx_ring[entry].rxstatus);
+
+ if (rx_work_limit > budget)
+ rx_work_limit = budget;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
+ int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
+
+ if (debug > 4)
+ printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
+ if (--rx_work_limit < 0)
+ break;
+ if (status & 0x2006) {
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
+ dev->name, status);
+ if (status & 0x2000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, status %4.4x!\n", dev->name, status);
+ ep->stats.rx_length_errors++;
+ } else if (status & 0x0006)
+ /* Rx Frame errors are counted in hardware. */
+ ep->stats.rx_errors++;
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = (status >> 16) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len > PKT_BUF_SZ - 4) {
+ printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
+ "%d bytes.\n",
+ dev->name, status, pkt_len);
+ pkt_len = 1514;
+ }
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb_put(skb = ep->rx_skbuff[entry], pkt_len);
+ ep->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ ep->stats.rx_packets++;
+ ep->stats.rx_bytes += pkt_len;
+ }
+ work_done++;
+ entry = (++ep->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
+ entry = ep->dirty_rx % RX_RING_SIZE;
+ if (ep->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
+ skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ work_done++;
+ }
+ ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
+ }
+ return work_done;
+}
+
+static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+ int status;
+
+ status = inl(ioaddr + INTSTAT);
+
+ if (status == EpicRemoved)
+ return;
+ if (status & RxOverflow) /* Missed a Rx frame. */
+ ep->stats.rx_errors++;
+ if (status & (RxOverflow | RxFull))
+ outw(RxQueued, ioaddr + COMMAND);
+}
+
+static int epic_poll(struct net_device *dev, int *budget)
+{
+ struct epic_private *ep = dev->priv;
+ int work_done, orig_budget;
+ long ioaddr = dev->base_addr;
+
+ orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
+
+rx_action:
+
+ epic_tx(dev, ep);
+
+ work_done = epic_rx(dev, *budget);
+
+ epic_rx_err(dev, ep);
+
+ *budget -= work_done;
+ dev->quota -= work_done;
+
+ if (netif_running(dev) && (work_done < orig_budget)) {
+ unsigned long flags;
+ int more;
+
+ /* A bit baroque but it avoids a (space hungry) spin_unlock */
+
+ spin_lock_irqsave(&ep->napi_lock, flags);
+
+ more = ep->reschedule_in_poll;
+ if (!more) {
+ __netif_rx_complete(dev);
+ outl(EpicNapiEvent, ioaddr + INTSTAT);
+ epic_napi_irq_on(dev, ep);
+ } else
+ ep->reschedule_in_poll--;
+
+ spin_unlock_irqrestore(&ep->napi_lock, flags);
+
+ if (more)
+ goto rx_action;
+ }
+
+ return (work_done >= orig_budget);
+}
+
+static int epic_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = dev->priv;
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue(dev);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, (int)inl(ioaddr + INTSTAT));
+
+ del_timer_sync(&ep->timer);
+
+ epic_disable_int(dev, ep);
+
+ free_irq(dev->irq, dev);
+
+ epic_pause(dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = ep->rx_skbuff[i];
+ ep->rx_skbuff[i] = NULL;
+ ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
+ ep->rx_ring[i].buflength = 0;
+ if (skb) {
+ pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
+ ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ }
+ ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = ep->tx_skbuff[i];
+ ep->tx_skbuff[i] = NULL;
+ if (!skb)
+ continue;
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ }
+
+ /* Green! Leave the chip in low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+
+ return 0;
+}
+
+static struct net_device_stats *epic_get_stats(struct net_device *dev)
+{
+ struct epic_private *ep = dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (netif_running(dev)) {
+ /* Update the error counts. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ return &ep->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling ep->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ outl(0x002C, ioaddr + RxCtrl);
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ /* There is apparently a chip bug, so the multicast filter
+ is never enabled. */
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outl(0x000C, ioaddr + RxCtrl);
+ } else if (dev->mc_count == 0) {
+ outl(0x0004, ioaddr + RxCtrl);
+ return;
+ } else { /* Never executed, for now. */
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ unsigned int bit_nr =
+ ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ mc_filter[bit_nr >> 3] |= (1 << bit_nr);
+ }
+ }
+ /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+ if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
+ for (i = 0; i < 4; i++)
+ outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+ memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
+ }
+ return;
+}
+
+static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct epic_private *np = dev->priv;
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(np->pci_dev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct epic_private *np = dev->priv;
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_gset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct epic_private *np = dev->priv;
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_sset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct epic_private *np = dev->priv;
+ return mii_nway_restart(&np->mii);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct epic_private *np = dev->priv;
+ return mii_link_ok(&np->mii);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static int ethtool_begin(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ /* power-up, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+ return 0;
+}
+
+static void ethtool_complete(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ /* power-down, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_sg = ethtool_op_get_sg,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .begin = ethtool_begin,
+ .complete = ethtool_complete
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct epic_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ struct mii_ioctl_data *data = if_mii(rq);
+ int rc;
+
+ /* power-up, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+
+ /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ /* power-down, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+ return rc;
+}
+
+
+static void __devexit epic_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct epic_private *ep = dev->priv;
+
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ unregister_netdev(dev);
+#ifndef USE_IO_OPS
+ iounmap((void*) dev->base_addr);
+#endif
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ /* pci_power_off(pdev, -1); */
+}
+
+
+#ifdef CONFIG_PM
+
+static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ long ioaddr = dev->base_addr;
+
+ if (!netif_running(dev))
+ return 0;
+ epic_pause(dev);
+ /* Put the chip into low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+ /* pci_power_off(pdev, -1); */
+ return 0;
+}
+
+
+static int epic_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+ epic_restart(dev);
+ /* pci_power_on(pdev); */
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver epic_driver = {
+ .name = DRV_NAME,
+ .id_table = epic_pci_tbl,
+ .probe = epic_init_one,
+ .remove = __devexit_p(epic_remove_one),
+#ifdef CONFIG_PM
+ .suspend = epic_suspend,
+ .resume = epic_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init epic_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
+ version, version2, version3);
+#endif
+
+ return pci_module_init (&epic_driver);
+}
+
+
+static void __exit epic_cleanup (void)
+{
+ pci_unregister_driver (&epic_driver);
+}
+
+
+module_init(epic_init);
+module_exit(epic_cleanup);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
new file mode 100644
index 000000000000..dd6865820372
--- /dev/null
+++ b/drivers/net/eql.c
@@ -0,0 +1,613 @@
+/*
+ * Equalizer Load-balancer for serial network interfaces.
+ *
+ * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * (c) Copyright 2002 David S. Miller (davem@redhat.com)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * The author may be reached as simon@ncm.com, or C/O
+ * NCM
+ * Attn: Simon Janes
+ * 6803 Whittier Ave
+ * McLean VA 22101
+ * Phone: 1-703-847-0040 ext 103
+ */
+
+/*
+ * Sources:
+ * skeleton.c by Donald Becker.
+ * Inspirations:
+ * The Harried and Overworked Alan Cox
+ * Conspiracies:
+ * The Alan Cox and Mike McLagan plot to get someone else to do the code,
+ * which turned out to be me.
+ */
+
+/*
+ * $Log: eql.c,v $
+ * Revision 1.2 1996/04/11 17:51:52 guru
+ * Added one-line eql_remove_slave patch.
+ *
+ * Revision 1.1 1996/04/11 17:44:17 guru
+ * Initial revision
+ *
+ * Revision 3.13 1996/01/21 15:17:18 alan
+ * tx_queue_len changes.
+ * reformatted.
+ *
+ * Revision 3.12 1995/03/22 21:07:51 anarchy
+ * Added capable() checks on configuration.
+ * Moved header file.
+ *
+ * Revision 3.11 1995/01/19 23:14:31 guru
+ * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
+ * (priority_Bps) + bytes_queued * 8;
+ *
+ * Revision 3.10 1995/01/19 23:07:53 guru
+ * back to
+ * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
+ * (priority_Bps) + bytes_queued;
+ *
+ * Revision 3.9 1995/01/19 22:38:20 guru
+ * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
+ * (priority_Bps) + bytes_queued * 4;
+ *
+ * Revision 3.8 1995/01/19 22:30:55 guru
+ * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
+ * (priority_Bps) + bytes_queued * 2;
+ *
+ * Revision 3.7 1995/01/19 21:52:35 guru
+ * printk's trimmed out.
+ *
+ * Revision 3.6 1995/01/19 21:49:56 guru
+ * This is working pretty well. I gained 1 K/s in speed.. now it's just
+ * robustness and printk's to be diked out.
+ *
+ * Revision 3.5 1995/01/18 22:29:59 guru
+ * still crashes the kernel when the lock_wait thing is woken up.
+ *
+ * Revision 3.4 1995/01/18 21:59:47 guru
+ * Broken set-bit locking snapshot
+ *
+ * Revision 3.3 1995/01/17 22:09:18 guru
+ * infinite sleep in a lock somewhere..
+ *
+ * Revision 3.2 1995/01/15 16:46:06 guru
+ * Log trimmed of non-pertinent 1.x branch messages
+ *
+ * Revision 3.1 1995/01/15 14:41:45 guru
+ * New Scheduler and timer stuff...
+ *
+ * Revision 1.15 1995/01/15 14:29:02 guru
+ * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
+ * with the dumber scheduler
+ *
+ * Revision 1.14 1995/01/15 02:37:08 guru
+ * shock.. the kept-new-versions could have zonked working
+ * stuff.. shudder
+ *
+ * Revision 1.13 1995/01/15 02:36:31 guru
+ * big changes
+ *
+ * scheduler was torn out and replaced with something smarter
+ *
+ * global names not prefixed with eql_ were renamed to protect
+ * against namespace collisions
+ *
+ * a few more abstract interfaces were added to facilitate any
+ * potential change of datastructure. the driver is still using
+ * a linked list of slaves. going to a heap would be a bit of
+ * an overkill.
+ *
+ * this compiles fine with no warnings.
+ *
+ * the locking mechanism and timer stuff must be written however,
+ * this version will not work otherwise
+ *
+ * Sorry, I had to rewrite most of this for 2.5.x -DaveM
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/netdevice.h>
+
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/if_eql.h>
+
+#include <asm/uaccess.h>
+
+static int eql_open(struct net_device *dev);
+static int eql_close(struct net_device *dev);
+static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *eql_get_stats(struct net_device *dev);
+
+#define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
+#define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
+
+static void eql_kill_one_slave(slave_t *slave);
+
+static void eql_timer(unsigned long param)
+{
+ equalizer_t *eql = (equalizer_t *) param;
+ struct list_head *this, *tmp, *head;
+
+ spin_lock_bh(&eql->queue.lock);
+ head = &eql->queue.all_slaves;
+ list_for_each_safe(this, tmp, head) {
+ slave_t *slave = list_entry(this, slave_t, list);
+
+ if ((slave->dev->flags & IFF_UP) == IFF_UP) {
+ slave->bytes_queued -= slave->priority_Bps;
+ if (slave->bytes_queued < 0)
+ slave->bytes_queued = 0;
+ } else {
+ eql_kill_one_slave(slave);
+ }
+
+ }
+ spin_unlock_bh(&eql->queue.lock);
+
+ eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
+ add_timer(&eql->timer);
+}
+
+static char version[] __initdata =
+ "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
+
+static void __init eql_setup(struct net_device *dev)
+{
+ equalizer_t *eql = netdev_priv(dev);
+
+ SET_MODULE_OWNER(dev);
+
+ init_timer(&eql->timer);
+ eql->timer.data = (unsigned long) eql;
+ eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
+ eql->timer.function = eql_timer;
+
+ spin_lock_init(&eql->queue.lock);
+ INIT_LIST_HEAD(&eql->queue.all_slaves);
+ eql->queue.master_dev = dev;
+
+ dev->open = eql_open;
+ dev->stop = eql_close;
+ dev->do_ioctl = eql_ioctl;
+ dev->hard_start_xmit = eql_slave_xmit;
+ dev->get_stats = eql_get_stats;
+
+ /*
+ * Now we undo some of the things that eth_setup does
+ * that we don't like
+ */
+
+ dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */
+ dev->flags = IFF_MASTER;
+
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = 5; /* Hands them off fast */
+}
+
+static int eql_open(struct net_device *dev)
+{
+ equalizer_t *eql = netdev_priv(dev);
+
+ /* XXX We should force this off automatically for the user. */
+ printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
+ "your slave devices.\n", dev->name);
+
+ if (!list_empty(&eql->queue.all_slaves))
+ BUG();
+
+ eql->min_slaves = 1;
+ eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
+
+ add_timer(&eql->timer);
+
+ return 0;
+}
+
+static void eql_kill_one_slave(slave_t *slave)
+{
+ list_del(&slave->list);
+ slave->dev->flags &= ~IFF_SLAVE;
+ dev_put(slave->dev);
+ kfree(slave);
+}
+
+static void eql_kill_slave_queue(slave_queue_t *queue)
+{
+ struct list_head *head, *tmp, *this;
+
+ spin_lock_bh(&queue->lock);
+
+ head = &queue->all_slaves;
+ list_for_each_safe(this, tmp, head) {
+ slave_t *s = list_entry(this, slave_t, list);
+
+ eql_kill_one_slave(s);
+ queue->num_slaves--;
+ }
+
+ spin_unlock_bh(&queue->lock);
+}
+
+static int eql_close(struct net_device *dev)
+{
+ equalizer_t *eql = netdev_priv(dev);
+
+ /*
+ * The timer has to be stopped first before we start hacking away
+ * at the data structure it scans every so often...
+ */
+
+ del_timer_sync(&eql->timer);
+
+ eql_kill_slave_queue(&eql->queue);
+
+ return 0;
+}
+
+static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
+static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
+
+static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
+static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
+
+static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
+static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
+
+static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case EQL_ENSLAVE:
+ return eql_enslave(dev, ifr->ifr_data);
+ case EQL_EMANCIPATE:
+ return eql_emancipate(dev, ifr->ifr_data);
+ case EQL_GETSLAVECFG:
+ return eql_g_slave_cfg(dev, ifr->ifr_data);
+ case EQL_SETSLAVECFG:
+ return eql_s_slave_cfg(dev, ifr->ifr_data);
+ case EQL_GETMASTRCFG:
+ return eql_g_master_cfg(dev, ifr->ifr_data);
+ case EQL_SETMASTRCFG:
+ return eql_s_master_cfg(dev, ifr->ifr_data);
+ default:
+ return -EOPNOTSUPP;
+ };
+}
+
+/* queue->lock must be held */
+static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
+{
+ unsigned long best_load = ~0UL;
+ struct list_head *this, *tmp, *head;
+ slave_t *best_slave;
+
+ best_slave = NULL;
+
+ /* Make a pass to set the best slave. */
+ head = &queue->all_slaves;
+ list_for_each_safe(this, tmp, head) {
+ slave_t *slave = list_entry(this, slave_t, list);
+ unsigned long slave_load, bytes_queued, priority_Bps;
+
+ /* Go through the slave list once, updating best_slave
+ * whenever a new best_load is found.
+ */
+ bytes_queued = slave->bytes_queued;
+ priority_Bps = slave->priority_Bps;
+ if ((slave->dev->flags & IFF_UP) == IFF_UP) {
+ slave_load = (~0UL - (~0UL / 2)) -
+ (priority_Bps) + bytes_queued * 8;
+
+ if (slave_load < best_load) {
+ best_load = slave_load;
+ best_slave = slave;
+ }
+ } else {
+ /* We found a dead slave, kill it. */
+ eql_kill_one_slave(slave);
+ }
+ }
+ return best_slave;
+}
+
+static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ equalizer_t *eql = netdev_priv(dev);
+ slave_t *slave;
+
+ spin_lock(&eql->queue.lock);
+
+ slave = __eql_schedule_slaves(&eql->queue);
+ if (slave) {
+ struct net_device *slave_dev = slave->dev;
+
+ skb->dev = slave_dev;
+ skb->priority = 1;
+ slave->bytes_queued += skb->len;
+ dev_queue_xmit(skb);
+ eql->stats.tx_packets++;
+ } else {
+ eql->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ }
+
+ spin_unlock(&eql->queue.lock);
+
+ return 0;
+}
+
+static struct net_device_stats * eql_get_stats(struct net_device *dev)
+{
+ equalizer_t *eql = netdev_priv(dev);
+ return &eql->stats;
+}
+
+/*
+ * Private ioctl functions
+ */
+
+/* queue->lock must be held */
+static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
+{
+ struct list_head *this, *head;
+
+ head = &queue->all_slaves;
+ list_for_each(this, head) {
+ slave_t *slave = list_entry(this, slave_t, list);
+
+ if (slave->dev == dev)
+ return slave;
+ }
+
+ return NULL;
+}
+
+static inline int eql_is_full(slave_queue_t *queue)
+{
+ equalizer_t *eql = netdev_priv(queue->master_dev);
+
+ if (queue->num_slaves >= eql->max_slaves)
+ return 1;
+ return 0;
+}
+
+/* queue->lock must be held */
+static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
+{
+ if (!eql_is_full(queue)) {
+ slave_t *duplicate_slave = NULL;
+
+ duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
+ if (duplicate_slave != 0)
+ eql_kill_one_slave(duplicate_slave);
+
+ list_add(&slave->list, &queue->all_slaves);
+ queue->num_slaves++;
+ slave->dev->flags |= IFF_SLAVE;
+
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
+{
+ struct net_device *slave_dev;
+ slaving_request_t srq;
+
+ if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
+ return -EFAULT;
+
+ slave_dev = dev_get_by_name(srq.slave_name);
+ if (slave_dev) {
+ if ((master_dev->flags & IFF_UP) == IFF_UP) {
+ /* slave is not a master & not already a slave: */
+ if (!eql_is_master(slave_dev) &&
+ !eql_is_slave(slave_dev)) {
+ slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ equalizer_t *eql = netdev_priv(master_dev);
+ int ret;
+
+ if (!s) {
+ dev_put(slave_dev);
+ return -ENOMEM;
+ }
+
+ memset(s, 0, sizeof(*s));
+ s->dev = slave_dev;
+ s->priority = srq.priority;
+ s->priority_bps = srq.priority;
+ s->priority_Bps = srq.priority / 8;
+
+ spin_lock_bh(&eql->queue.lock);
+ ret = __eql_insert_slave(&eql->queue, s);
+ if (ret) {
+ dev_put(slave_dev);
+ kfree(s);
+ }
+ spin_unlock_bh(&eql->queue.lock);
+
+ return ret;
+ }
+ }
+ dev_put(slave_dev);
+ }
+
+ return -EINVAL;
+}
+
+static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
+{
+ equalizer_t *eql = netdev_priv(master_dev);
+ struct net_device *slave_dev;
+ slaving_request_t srq;
+ int ret;
+
+ if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
+ return -EFAULT;
+
+ slave_dev = dev_get_by_name(srq.slave_name);
+ ret = -EINVAL;
+ if (slave_dev) {
+ spin_lock_bh(&eql->queue.lock);
+
+ if (eql_is_slave(slave_dev)) {
+ slave_t *slave = __eql_find_slave_dev(&eql->queue,
+ slave_dev);
+
+ if (slave) {
+ eql_kill_one_slave(slave);
+ ret = 0;
+ }
+ }
+ dev_put(slave_dev);
+
+ spin_unlock_bh(&eql->queue.lock);
+ }
+
+ return ret;
+}
+
+static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
+{
+ equalizer_t *eql = netdev_priv(dev);
+ slave_t *slave;
+ struct net_device *slave_dev;
+ slave_config_t sc;
+ int ret;
+
+ if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
+ return -EFAULT;
+
+ slave_dev = dev_get_by_name(sc.slave_name);
+ if (!slave_dev)
+ return -ENODEV;
+
+ ret = -EINVAL;
+
+ spin_lock_bh(&eql->queue.lock);
+ if (eql_is_slave(slave_dev)) {
+ slave = __eql_find_slave_dev(&eql->queue, slave_dev);
+ if (slave) {
+ sc.priority = slave->priority;
+ ret = 0;
+ }
+ }
+ spin_unlock_bh(&eql->queue.lock);
+
+ dev_put(slave_dev);
+
+ if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
+{
+ slave_t *slave;
+ equalizer_t *eql;
+ struct net_device *slave_dev;
+ slave_config_t sc;
+ int ret;
+
+ if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
+ return -EFAULT;
+
+ slave_dev = dev_get_by_name(sc.slave_name);
+ if (!slave_dev)
+ return -ENODEV;
+
+ ret = -EINVAL;
+
+ eql = netdev_priv(dev);
+ spin_lock_bh(&eql->queue.lock);
+ if (eql_is_slave(slave_dev)) {
+ slave = __eql_find_slave_dev(&eql->queue, slave_dev);
+ if (slave) {
+ slave->priority = sc.priority;
+ slave->priority_bps = sc.priority;
+ slave->priority_Bps = sc.priority / 8;
+ ret = 0;
+ }
+ }
+ spin_unlock_bh(&eql->queue.lock);
+
+ return ret;
+}
+
+static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
+{
+ equalizer_t *eql;
+ master_config_t mc;
+
+ if (eql_is_master(dev)) {
+ eql = netdev_priv(dev);
+ mc.max_slaves = eql->max_slaves;
+ mc.min_slaves = eql->min_slaves;
+ if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
+{
+ equalizer_t *eql;
+ master_config_t mc;
+
+ if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
+ return -EFAULT;
+
+ if (eql_is_master(dev)) {
+ eql = netdev_priv(dev);
+ eql->max_slaves = mc.max_slaves;
+ eql->min_slaves = mc.min_slaves;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct net_device *dev_eql;
+
+static int __init eql_init_module(void)
+{
+ int err;
+
+ printk(version);
+
+ dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
+ if (!dev_eql)
+ return -ENOMEM;
+
+ err = register_netdev(dev_eql);
+ if (err)
+ free_netdev(dev_eql);
+ return err;
+}
+
+static void __exit eql_cleanup_module(void)
+{
+ unregister_netdev(dev_eql);
+ free_netdev(dev_eql);
+}
+
+module_init(eql_init_module);
+module_exit(eql_cleanup_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
new file mode 100644
index 000000000000..f1e8150ed2a0
--- /dev/null
+++ b/drivers/net/es3210.c
@@ -0,0 +1,478 @@
+/*
+ es3210.c
+
+ Linux driver for Racal-Interlan ES3210 EISA Network Adapter
+
+ Copyright (C) 1996, Paul Gortmaker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Information and Code Sources:
+
+ 1) The existing myriad of Linux 8390 drivers written by Donald Becker.
+
+ 2) Once again Russ Nelson's asm packet driver provided additional info.
+
+ 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
+ Too bad it doesn't work -- see below.
+
+ The ES3210 is an EISA shared memory NS8390 implementation. Note
+ that all memory copies to/from the board must be 32bit transfers.
+ Which rules out using eth_io_copy_and_sum() in this driver.
+
+ Apparently there are two slightly different revisions of the
+ card, since there are two distinct EISA cfg files (!rii0101.cfg
+ and !rii0102.cfg) One has media select in the cfg file and the
+ other doesn't. Hopefully this will work with either.
+
+ That is about all I can tell you about it, having never actually
+ even seen one of these cards. :) Try http://www.interlan.com
+ if you want more info.
+
+ Thanks go to Mark Salazar for testing v0.02 of this driver.
+
+ Bugs, to-fix, etc:
+
+ 1) The EISA cfg ports that are *supposed* to have the IRQ and shared
+ mem values just read 0xff all the time. Hrrmpf. Apparently the
+ same happens with the packet driver as the code for reading
+ these registers is disabled there. In the meantime, boot with:
+ ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and
+ shared memory detection. (The i/o port detection is okay.)
+
+ 2) Module support currently untested. Probably works though.
+
+*/
+
+static const char version[] =
+ "es3210.c: Driver revision v0.03, 14/09/96\n";
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+static int es_probe1(struct net_device *dev, int ioaddr);
+
+static int es_open(struct net_device *dev);
+static int es_close(struct net_device *dev);
+
+static void es_reset_8390(struct net_device *dev);
+
+static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
+static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
+static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page);
+
+#define ES_START_PG 0x00 /* First page of TX buffer */
+#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */
+#define ES_ID_PORT 0xc80 /* Same for all EISA cards */
+#define ES_SA_PROM 0xc90 /* Start of e'net addr. */
+#define ES_RESET_PORT 0xc84 /* From the packet driver source */
+#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */
+
+#define ES_ADDR0 0x02 /* 3 byte vendor prefix */
+#define ES_ADDR1 0x07
+#define ES_ADDR2 0x01
+
+/*
+ * Two card revisions. EISA ID's are always rev. minor, rev. major,, and
+ * then the three vendor letters stored in 5 bits each, with an "a" = 1.
+ * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA
+ * config utility determines automagically what config file(s) to use.
+ */
+#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */
+#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */
+
+#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */
+#define ES_CFG2 0xcc1
+#define ES_CFG3 0xcc2
+#define ES_CFG4 0xcc3
+#define ES_CFG5 0xcc4
+#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */
+
+/*
+ * You can OR any of the following bits together and assign it
+ * to ES_DEBUG to get verbose driver info during operation.
+ * Some of these don't do anything yet.
+ */
+
+#define ES_D_PROBE 0x01
+#define ES_D_RX_PKT 0x02
+#define ES_D_TX_PKT 0x04
+#define ED_D_IRQ 0x08
+
+#define ES_DEBUG 0
+
+static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10};
+static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15};
+
+/*
+ * Probe for the card. The best way is to read the EISA ID if it
+ * is known. Then we check the prefix of the station address
+ * PROM for a match against the Racal-Interlan assigned value.
+ */
+
+static int __init do_es_probe(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+
+ SET_MODULE_OWNER(dev);
+
+ if (ioaddr > 0x1ff) /* Check a single specified location. */
+ return es_probe1(dev, ioaddr);
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ if (!EISA_bus) {
+#if ES_DEBUG & ES_D_PROBE
+ printk("es3210.c: Not EISA bus. Not probing high ports.\n");
+#endif
+ return -ENXIO;
+ }
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (es_probe1(dev, ioaddr) == 0)
+ return 0;
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ES_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+#ifndef MODULE
+struct net_device * __init es_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_es_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init es_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval;
+ unsigned long eisa_id;
+
+ if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210"))
+ return -ENODEV;
+
+#if ES_DEBUG & ES_D_PROBE
+ printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT));
+ printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n",
+ inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3),
+ inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6));
+#endif
+
+
+/* Check the EISA ID of the card. */
+ eisa_id = inl(ioaddr + ES_ID_PORT);
+ if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+/* Check the Racal vendor ID as well. */
+ if (inb(ioaddr + ES_SA_PROM + 0) != ES_ADDR0
+ || inb(ioaddr + ES_SA_PROM + 1) != ES_ADDR1
+ || inb(ioaddr + ES_SA_PROM + 2) != ES_ADDR2 ) {
+ printk("es3210.c: card not found");
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %02x", inb(ioaddr + ES_SA_PROM + i));
+ printk(" (invalid prefix).\n");
+ retval = -ENODEV;
+ goto out;
+ }
+
+ printk("es3210.c: ES3210 rev. %ld at %#x, node", eisa_id>>24, ioaddr);
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i)));
+
+ /* Snarf the interrupt now. */
+ if (dev->irq == 0) {
+ unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07;
+ unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe;
+
+ if (hi_irq != 0) {
+ dev->irq = hi_irq_map[hi_irq - 1];
+ } else {
+ int i = 0;
+ while (lo_irq > (1<<i)) i++;
+ dev->irq = lo_irq_map[i];
+ }
+ printk(" using IRQ %d", dev->irq);
+#if ES_DEBUG & ES_D_PROBE
+ printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n",
+ hi_irq, lo_irq, dev->irq);
+#endif
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9; /* Doh! */
+ printk(" assigning IRQ %d", dev->irq);
+ }
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ if (dev->mem_start == 0) {
+ unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0;
+ unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07;
+
+ if (mem_enabled != 0x80) {
+ printk(" shared mem disabled - giving up\n");
+ retval = -ENXIO;
+ goto out1;
+ }
+ dev->mem_start = 0xC0000 + mem_bits*0x4000;
+ printk(" using ");
+ } else {
+ printk(" assigning ");
+ }
+
+ ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256);
+ if (!ei_status.mem) {
+ printk("ioremap failed - giving up\n");
+ retval = -ENXIO;
+ goto out1;
+ }
+
+ dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256;
+
+ printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1);
+
+#if ES_DEBUG & ES_D_PROBE
+ if (inb(ioaddr + ES_CFG5))
+ printk("es3210: Warning - DMA channel enabled, but not used here.\n");
+#endif
+ /* Note, point at the 8390, and not the card... */
+ dev->base_addr = ioaddr + ES_NIC_OFFSET;
+
+ ei_status.name = "ES3210";
+ ei_status.tx_start_page = ES_START_PG;
+ ei_status.rx_start_page = ES_START_PG + TX_PAGES;
+ ei_status.stop_page = ES_STOP_PG;
+ ei_status.word16 = 1;
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &es_reset_8390;
+ ei_status.block_input = &es_block_input;
+ ei_status.block_output = &es_block_output;
+ ei_status.get_8390_hdr = &es_get_8390_hdr;
+
+ dev->open = &es_open;
+ dev->stop = &es_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+ return 0;
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT);
+ return retval;
+}
+
+/*
+ * Reset as per the packet driver method. Judging by the EISA cfg
+ * file, this just toggles the "Board Enable" bits (bit 2 and 0).
+ */
+
+static void es_reset_8390(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ unsigned long end;
+
+ outb(0x04, ioaddr + ES_RESET_PORT);
+ if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name);
+
+ end = jiffies + 2*HZ/100;
+ while ((signed)(end - jiffies) > 0) continue;
+
+ ei_status.txing = 0;
+ outb(0x01, ioaddr + ES_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+
+ return;
+}
+
+/*
+ * Note: In the following three functions is the implicit assumption
+ * that the associated memcpy will only use "rep; movsl" as long as
+ * we keep the counts as some multiple of doublewords. This is a
+ * requirement of the hardware, and also prevents us from using
+ * eth_io_copy_and_sum() since we can't guarantee it will limit
+ * itself to doubleword access.
+ */
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly. (A single doubleword.)
+ */
+
+static void
+es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+/*
+ * Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps. The count will already
+ * be rounded up to a doubleword value via es_get_8390_hdr() above.
+ */
+
+static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256;
+
+ if (ring_offset + count > ES_STOP_PG*256) {
+ /* Packet wraps over end of ring buffer. */
+ int semi_count = ES_STOP_PG*256 - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
+ } else {
+ /* Packet is in one chunk. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+}
+
+static void es_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8);
+
+ count = (count + 3) & ~3; /* Round up to doubleword */
+ memcpy_toio(shmem, buf, count);
+}
+
+static int es_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int es_close(struct net_device *dev)
+{
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ ei_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static struct net_device *dev_es3210[MAX_ES_CARDS];
+static int io[MAX_ES_CARDS];
+static int irq[MAX_ES_CARDS];
+static int mem[MAX_ES_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, "memory base address(es)");
+MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
+ if (io[this_dev] == 0 && this_dev != 0)
+ break;
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ if (do_es_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_es3210[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
+ struct net_device *dev = dev_es3210[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
new file mode 100644
index 000000000000..ccae6ba5f7c5
--- /dev/null
+++ b/drivers/net/eth16i.c
@@ -0,0 +1,1509 @@
+/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
+
+ Written 1994-1999 by Mika Kuoppala
+
+ Copyright (C) 1994-1999 by Mika Kuoppala
+ Based on skeleton.c and heavily on at1700.c by Donald Becker
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as miku@iki.fi
+
+ This driver supports following cards :
+ - ICL EtherTeam 16i
+ - ICL EtherTeam 32 EISA
+ (Uses true 32 bit transfers rather than 16i compability mode)
+
+ Example Module usage:
+ insmod eth16i.o io=0x2a0 mediatype=bnc
+
+ mediatype can be one of the following: bnc,tp,dix,auto,eprom
+
+ 'auto' will try to autoprobe mediatype.
+ 'eprom' will use whatever type defined in eprom.
+
+ I have benchmarked driver with PII/300Mhz as a ftp client
+ and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec.
+
+ Sources:
+ - skeleton.c a sample network driver core for linux,
+ written by Donald Becker <becker@scyld.com>
+ - at1700.c a driver for Allied Telesis AT1700, written
+ by Donald Becker.
+ - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
+ written by Markku Viima
+ - The Fujitsu MB86965 databook.
+
+ Author thanks following persons due to their valueble assistance:
+ Markku Viima (ICL)
+ Ari Valve (ICL)
+ Donald Becker
+ Kurt Huwig <kurt@huwig.de>
+
+ Revision history:
+
+ Version Date Description
+
+ 0.01 15.12-94 Initial version (card detection)
+ 0.02 23.01-95 Interrupt is now hooked correctly
+ 0.03 01.02-95 Rewrote initialization part
+ 0.04 07.02-95 Base skeleton done...
+ Made a few changes to signature checking
+ to make it a bit reliable.
+ - fixed bug in tx_buf mapping
+ - fixed bug in initialization (DLC_EN
+ wasn't enabled when initialization
+ was done.)
+ 0.05 08.02-95 If there were more than one packet to send,
+ transmit was jammed due to invalid
+ register write...now fixed
+ 0.06 19.02-95 Rewrote interrupt handling
+ 0.07 13.04-95 Wrote EEPROM read routines
+ Card configuration now set according to
+ data read from EEPROM
+ 0.08 23.06-95 Wrote part that tries to probe used interface
+ port if AUTO is selected
+
+ 0.09 01.09-95 Added module support
+
+ 0.10 04.09-95 Fixed receive packet allocation to work
+ with kernels > 1.3.x
+
+ 0.20 20.09-95 Added support for EtherTeam32 EISA
+
+ 0.21 17.10-95 Removed the unnecessary extern
+ init_etherdev() declaration. Some
+ other cleanups.
+
+ 0.22 22.02-96 Receive buffer was not flushed
+ correctly when faulty packet was
+ received. Now fixed.
+
+ 0.23 26.02-96 Made resetting the adapter
+ more reliable.
+
+ 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx
+
+ 0.25 22.05-96 kfree() was missing from cleanup_module.
+
+ 0.26 11.06-96 Sometimes card was not found by
+ check_signature(). Now made more reliable.
+
+ 0.27 23.06-96 Oops. 16 consecutive collisions halted
+ adapter. Now will try to retransmit
+ MAX_COL_16 times before finally giving up.
+
+ 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq
+
+ 0.29 29.10-97 Multiple card support for module users
+
+ 0.30 30.10-97 Fixed irq allocation bug.
+ (request_irq moved from probe to open)
+
+ 0.30a 21.08-98 Card detection made more relaxed. Driver
+ had problems with some TCP/IP-PROM boots
+ to find the card. Suggested by
+ Kurt Huwig <kurt@huwig.de>
+
+ 0.31 28.08-98 Media interface port can now be selected
+ with module parameters or kernel
+ boot parameters.
+
+ 0.32 31.08-98 IRQ was never freed if open/close
+ pair wasn't called. Now fixed.
+
+ 0.33 10.09-98 When eth16i_open() was called after
+ eth16i_close() chip never recovered.
+ Now more shallow reset is made on
+ close.
+
+ 0.34 29.06-99 Fixed one bad #ifdef.
+ Changed ioaddr -> io for consistency
+
+ 0.35 01.07-99 transmit,-receive bytes were never
+ updated in stats.
+
+ Bugs:
+ In some cases the media interface autoprobing code doesn't find
+ the correct interface type. In this case you can
+ manually choose the interface type in DOS with E16IC.EXE which is
+ configuration software for EtherTeam16i and EtherTeam32 cards.
+ This is also true for IRQ setting. You cannot use module
+ parameter to configure IRQ of the card (yet).
+
+ To do:
+ - Real multicast support
+ - Rewrite the media interface autoprobing code. Its _horrible_ !
+ - Possibly merge all the MB86965 specific code to external
+ module for use by eth16.c and Donald's at1700.c
+ - IRQ configuration with module parameter. I will do
+ this when i will get enough info about setting
+ irq without configuration utility.
+*/
+
+static char *version =
+ "eth16i.c: v0.35 01-Jul-1999 Mika Kuoppala (miku@iki.fi)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+
+
+/* Few macros */
+#define BIT(a) ( (1 << (a)) )
+#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
+#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
+
+/* This is the I/O address space for Etherteam 16i adapter. */
+#define ETH16I_IO_EXTENT 32
+
+/* Ticks before deciding that transmit has timed out */
+#define TX_TIMEOUT (400*HZ/1000)
+
+/* Maximum loop count when receiving packets */
+#define MAX_RX_LOOP 20
+
+/* Some interrupt masks */
+#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */
+#define ETH16I_INTR_OFF 0x0000
+
+/* Buffers header status byte meanings */
+#define PKT_GOOD BIT(5)
+#define PKT_GOOD_RMT BIT(4)
+#define PKT_SHORT BIT(3)
+#define PKT_ALIGN_ERR BIT(2)
+#define PKT_CRC_ERR BIT(1)
+#define PKT_RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit status register (DLCR0) */
+#define TX_STATUS_REG 0
+#define TX_DONE BIT(7)
+#define NET_BUSY BIT(6)
+#define TX_PKT_RCD BIT(5)
+#define CR_LOST BIT(4)
+#define TX_JABBER_ERR BIT(3)
+#define COLLISION BIT(2)
+#define COLLISIONS_16 BIT(1)
+
+/* Receive status register (DLCR1) */
+#define RX_STATUS_REG 1
+#define RX_PKT BIT(7) /* Packet received */
+#define BUS_RD_ERR BIT(6)
+#define SHORT_PKT_ERR BIT(3)
+#define ALIGN_ERR BIT(2)
+#define CRC_ERR BIT(1)
+#define RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit Interrupt Enable Register (DLCR2) */
+#define TX_INTR_REG 2
+#define TX_INTR_DONE BIT(7)
+#define TX_INTR_COL BIT(2)
+#define TX_INTR_16_COL BIT(1)
+
+/* Receive Interrupt Enable Register (DLCR3) */
+#define RX_INTR_REG 3
+#define RX_INTR_RECEIVE BIT(7)
+#define RX_INTR_SHORT_PKT BIT(3)
+#define RX_INTR_CRC_ERR BIT(1)
+#define RX_INTR_BUF_OVERFLOW BIT(0)
+
+/* Transmit Mode Register (DLCR4) */
+#define TRANSMIT_MODE_REG 4
+#define LOOPBACK_CONTROL BIT(1)
+#define CONTROL_OUTPUT BIT(2)
+
+/* Receive Mode Register (DLCR5) */
+#define RECEIVE_MODE_REG 5
+#define RX_BUFFER_EMPTY BIT(6)
+#define ACCEPT_BAD_PACKETS BIT(5)
+#define RECEIVE_SHORT_ADDR BIT(4)
+#define ACCEPT_SHORT_PACKETS BIT(3)
+#define REMOTE_RESET BIT(2)
+
+#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
+#define REJECT_ALL 0
+#define ACCEPT_ALL 3
+#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
+#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
+
+/* Configuration Register 0 (DLCR6) */
+#define CONFIG_REG_0 6
+#define DLC_EN BIT(7)
+#define SRAM_CYCLE_TIME_100NS BIT(6)
+#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
+#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
+#define TBS1 BIT(3)
+#define TBS0 BIT(2)
+#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */
+#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */
+
+#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
+#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */
+#endif
+#define TX_BUF_1x2048 0
+#define TX_BUF_2x2048 1
+#define TX_BUF_2x4098 2
+#define TX_BUF_2x8192 3
+
+/* Configuration Register 1 (DLCR7) */
+#define CONFIG_REG_1 7
+#define POWERUP BIT(5)
+
+/* Transmit start register */
+#define TRANSMIT_START_REG 10
+#define TRANSMIT_START_RB 2
+#define TX_START BIT(7) /* Rest of register bit indicate*/
+ /* number of packets in tx buffer*/
+/* Node ID registers (DLCR8-13) */
+#define NODE_ID_0 8
+#define NODE_ID_RB 0
+
+/* Hash Table registers (HT8-15) */
+#define HASH_TABLE_0 8
+#define HASH_TABLE_RB 1
+
+/* Buffer memory ports */
+#define BUFFER_MEM_PORT_LB 8
+#define DATAPORT BUFFER_MEM_PORT_LB
+#define BUFFER_MEM_PORT_HB 9
+
+/* 16 Collision control register (BMPR11) */
+#define COL_16_REG 11
+#define HALT_ON_16 0x00
+#define RETRANS_AND_HALT_ON_16 0x02
+
+/* Maximum number of attempts to send after 16 concecutive collisions */
+#define MAX_COL_16 10
+
+/* DMA Burst and Transceiver Mode Register (BMPR13) */
+#define TRANSCEIVER_MODE_REG 13
+#define TRANSCEIVER_MODE_RB 2
+#define IO_BASE_UNLOCK BIT(7)
+#define LOWER_SQUELCH_TRESH BIT(6)
+#define LINK_TEST_DISABLE BIT(5)
+#define AUI_SELECT BIT(4)
+#define DIS_AUTO_PORT_SEL BIT(3)
+
+/* Filter Self Receive Register (BMPR14) */
+#define FILTER_SELF_RX_REG 14
+#define SKIP_RX_PACKET BIT(2)
+#define FILTER_SELF_RECEIVE BIT(0)
+
+/* EEPROM Control Register (BMPR 16) */
+#define EEPROM_CTRL_REG 16
+
+/* EEPROM Data Register (BMPR 17) */
+#define EEPROM_DATA_REG 17
+
+/* NMC93CSx6 EEPROM Control Bits */
+#define CS_0 0x00
+#define CS_1 0x20
+#define SK_0 0x00
+#define SK_1 0x40
+#define DI_0 0x00
+#define DI_1 0x80
+
+/* NMC93CSx6 EEPROM Instructions */
+#define EEPROM_READ 0x80
+
+/* NMC93CSx6 EEPROM Addresses */
+#define E_NODEID_0 0x02
+#define E_NODEID_1 0x03
+#define E_NODEID_2 0x04
+#define E_PORT_SELECT 0x14
+ #define E_PORT_BNC 0x00
+ #define E_PORT_DIX 0x01
+ #define E_PORT_TP 0x02
+ #define E_PORT_AUTO 0x03
+ #define E_PORT_FROM_EPROM 0x04
+#define E_PRODUCT_CFG 0x30
+
+
+/* Macro to slow down io between EEPROM clock transitions */
+#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0)
+
+/* Jumperless Configuration Register (BMPR19) */
+#define JUMPERLESS_CONFIG 19
+
+/* ID ROM registers, writing to them also resets some parts of chip */
+#define ID_ROM_0 24
+#define ID_ROM_7 31
+#define RESET ID_ROM_0
+
+/* This is the I/O address list to be probed when seeking the card */
+static unsigned int eth16i_portlist[] __initdata = {
+ 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
+};
+
+static unsigned int eth32i_portlist[] __initdata = {
+ 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
+ 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0
+};
+
+/* This is the Interrupt lookup table for Eth16i card */
+static unsigned int eth16i_irqmap[] __initdata = { 9, 10, 5, 15, 0 };
+#define NUM_OF_ISA_IRQS 4
+
+/* This is the Interrupt lookup table for Eth32i card */
+static unsigned int eth32i_irqmap[] __initdata = { 3, 5, 7, 9, 10, 11, 12, 15, 0 };
+#define EISA_IRQ_REG 0xc89
+#define NUM_OF_EISA_IRQS 8
+
+static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef ETH16I_DEBUG
+#define ETH16I_DEBUG 0
+#endif
+static unsigned int eth16i_debug = ETH16I_DEBUG;
+
+/* Information for each board */
+
+struct eth16i_local {
+ struct net_device_stats stats;
+ unsigned char tx_started;
+ unsigned char tx_buf_busy;
+ unsigned short tx_queue; /* Number of packets in transmit buffer */
+ unsigned short tx_queue_len;
+ unsigned int tx_buf_size;
+ unsigned long open_time;
+ unsigned long tx_buffered_packets;
+ unsigned long tx_buffered_bytes;
+ unsigned long col_16;
+ spinlock_t lock;
+};
+
+/* Function prototypes */
+
+static int eth16i_probe1(struct net_device *dev, int ioaddr);
+static int eth16i_check_signature(int ioaddr);
+static int eth16i_probe_port(int ioaddr);
+static void eth16i_set_port(int ioaddr, int porttype);
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l);
+static int eth16i_receive_probe_packet(int ioaddr);
+static int eth16i_get_irq(int ioaddr);
+static int eth16i_read_eeprom(int ioaddr, int offset);
+static int eth16i_read_eeprom_word(int ioaddr);
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
+static int eth16i_open(struct net_device *dev);
+static int eth16i_close(struct net_device *dev);
+static int eth16i_tx(struct sk_buff *skb, struct net_device *dev);
+static void eth16i_rx(struct net_device *dev);
+static void eth16i_timeout(struct net_device *dev);
+static irqreturn_t eth16i_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void eth16i_reset(struct net_device *dev);
+static void eth16i_timeout(struct net_device *dev);
+static void eth16i_skip_packet(struct net_device *dev);
+static void eth16i_multicast(struct net_device *dev);
+static void eth16i_select_regbank(unsigned char regbank, int ioaddr);
+static void eth16i_initialize(struct net_device *dev, int boot);
+
+#if 0
+static int eth16i_set_irq(struct net_device *dev);
+#endif
+
+#ifdef MODULE
+static ushort eth16i_parse_mediatype(const char* s);
+#endif
+
+static struct net_device_stats *eth16i_get_stats(struct net_device *dev);
+
+static char cardname[] __initdata = "ICL EtherTeam 16i/32";
+
+static int __init do_eth16i_probe(struct net_device *dev)
+{
+ int i;
+ int ioaddr;
+ int base_addr = dev->base_addr;
+
+ SET_MODULE_OWNER(dev);
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "Probing started for %s\n", cardname);
+
+ if(base_addr > 0x1ff) /* Check only single location */
+ return eth16i_probe1(dev, base_addr);
+ else if(base_addr != 0) /* Don't probe at all */
+ return -ENXIO;
+
+ /* Seek card from the ISA io address space */
+ for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++)
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+
+ /* Seek card from the EISA io address space */
+ for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++)
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init eth16i_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct eth16i_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_eth16i_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ETH16I_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ static unsigned version_printed;
+ int retval;
+
+ /* Let's grab the region */
+ if (!request_region(ioaddr, ETH16I_IO_EXTENT, cardname))
+ return -EBUSY;
+
+ /*
+ The MB86985 chip has on register which holds information in which
+ io address the chip lies. First read this register and compare
+ it to our current io address and if match then this could
+ be our chip.
+ */
+
+ if(ioaddr < 0x1000) {
+ if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)]
+ != ioaddr) {
+ retval = -ENODEV;
+ goto out;
+ }
+ }
+
+ /* Now we will go a bit deeper and try to find the chip's signature */
+
+ if(eth16i_check_signature(ioaddr) != 0) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /*
+ Now it seems that we have found a ethernet chip in this particular
+ ioaddr. The MB86985 chip has this feature, that when you read a
+ certain register it will increase it's io base address to next
+ configurable slot. Now when we have found the chip, first thing is
+ to make sure that the chip's ioaddr will hold still here.
+ */
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
+
+ outb(0x00, ioaddr + RESET); /* Reset some parts of chip */
+ BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */
+
+ if( (eth16i_debug & version_printed++) == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev->base_addr = ioaddr;
+ dev->irq = eth16i_get_irq(ioaddr);
+
+ /* Try to obtain interrupt vector */
+
+ if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, cardname, dev))) {
+ printk(KERN_WARNING "%s at %#3x, but is unusable due to conflicting IRQ %d.\n",
+ cardname, ioaddr, dev->irq);
+ goto out;
+ }
+
+ printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ",
+ dev->name, cardname, ioaddr, dev->irq);
+
+
+ /* Now we will have to lock the chip's io address */
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
+
+ eth16i_initialize(dev, 1); /* Initialize rest of the chip's registers */
+
+ /* Now let's same some energy by shutting down the chip ;) */
+ BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
+
+ /* Initialize the device structure */
+ memset(lp, 0, sizeof(struct eth16i_local));
+ dev->open = eth16i_open;
+ dev->stop = eth16i_close;
+ dev->hard_start_xmit = eth16i_tx;
+ dev->get_stats = eth16i_get_stats;
+ dev->set_multicast_list = eth16i_multicast;
+ dev->tx_timeout = eth16i_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ spin_lock_init(&lp->lock);
+ return 0;
+out:
+ release_region(ioaddr, ETH16I_IO_EXTENT);
+ return retval;
+}
+
+
+static void eth16i_initialize(struct net_device *dev, int boot)
+{
+ int ioaddr = dev->base_addr;
+ int i, node_w = 0;
+ unsigned char node_byte = 0;
+
+ /* Setup station address */
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+ for(i = 0 ; i < 3 ; i++) {
+ unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
+ }
+
+ for(i = 0; i < 6; i++) {
+ outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
+ if(boot) {
+ printk("%02x", inb(ioaddr + NODE_ID_0 + i));
+ if(i != 5)
+ printk(":");
+ }
+ }
+
+ /* Now we will set multicast addresses to accept none */
+ eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
+ for(i = 0; i < 8; i++)
+ outb(0x00, ioaddr + HASH_TABLE_0 + i);
+
+ /*
+ Now let's disable the transmitter and receiver, set the buffer ram
+ cycle time, bus width and buffer data path width. Also we shall
+ set transmit buffer size and total buffer size.
+ */
+
+ eth16i_select_regbank(2, ioaddr);
+
+ node_byte = 0;
+ node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
+
+ if( (node_w & 0xFF00) == 0x0800)
+ node_byte |= BUFFER_WIDTH_8;
+
+ node_byte |= SRAM_BS1;
+
+ if( (node_w & 0x00FF) == 64)
+ node_byte |= SRAM_BS0;
+
+ node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
+
+ outb(node_byte, ioaddr + CONFIG_REG_0);
+
+ /* We shall halt the transmitting, if 16 collisions are detected */
+ outb(HALT_ON_16, ioaddr + COL_16_REG);
+
+#ifdef MODULE
+ /* if_port already set by init_module() */
+#else
+ dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ?
+ dev->mem_start : E_PORT_FROM_EPROM;
+#endif
+
+ /* Set interface port type */
+ if(boot) {
+ char *porttype[] = {"BNC", "DIX", "TP", "AUTO", "FROM_EPROM" };
+
+ switch(dev->if_port)
+ {
+
+ case E_PORT_FROM_EPROM:
+ dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
+ break;
+
+ case E_PORT_AUTO:
+ dev->if_port = eth16i_probe_port(ioaddr);
+ break;
+
+ case E_PORT_BNC:
+ case E_PORT_TP:
+ case E_PORT_DIX:
+ break;
+ }
+
+ printk(" %s interface.\n", porttype[dev->if_port]);
+
+ eth16i_set_port(ioaddr, dev->if_port);
+ }
+
+ /* Set Receive Mode to normal operation */
+ outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
+}
+
+static int eth16i_probe_port(int ioaddr)
+{
+ int i;
+ int retcode;
+ unsigned char dummy_packet[64];
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+
+ for(i = 0; i < 6; i++) {
+ dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
+ dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
+ }
+
+ dummy_packet[12] = 0x00;
+ dummy_packet[13] = 0x04;
+ memset(dummy_packet + 14, 0, sizeof(dummy_packet) - 14);
+
+ eth16i_select_regbank(2, ioaddr);
+
+ for(i = 0; i < 3; i++) {
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+ eth16i_set_port(ioaddr, i);
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Set port number %d\n", i);
+
+ retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
+ if(retcode == 0) {
+ retcode = eth16i_receive_probe_packet(ioaddr);
+ if(retcode != -1) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Eth16i interface port found at %d\n", i);
+ return i;
+ }
+ }
+ else {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n");
+ }
+ }
+
+ if( eth16i_debug > 1)
+ printk(KERN_DEBUG "Using default port\n");
+
+ return E_PORT_BNC;
+}
+
+static void eth16i_set_port(int ioaddr, int porttype)
+{
+ unsigned short temp = 0;
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
+
+ temp |= DIS_AUTO_PORT_SEL;
+
+ switch(porttype) {
+
+ case E_PORT_BNC :
+ temp |= AUI_SELECT;
+ break;
+
+ case E_PORT_TP :
+ break;
+
+ case E_PORT_DIX :
+ temp |= AUI_SELECT;
+ BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
+ break;
+ }
+
+ outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
+ printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n",
+ inb(ioaddr+TRANSCEIVER_MODE_REG));
+ }
+}
+
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
+{
+ int starttime;
+
+ outb(0xff, ioaddr + TX_STATUS_REG);
+
+ outw(l, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
+
+ starttime = jiffies;
+ outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
+
+ while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int eth16i_receive_probe_packet(int ioaddr)
+{
+ int starttime;
+
+ starttime = jiffies;
+
+ while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n");
+ starttime = jiffies;
+ while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occurred waiting receive packet\n");
+ return -1;
+ }
+ }
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "RECEIVE_PACKET\n");
+ return(0); /* Found receive packet */
+ }
+ }
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
+ printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
+ }
+
+ return(0); /* Return success */
+}
+
+#if 0
+static int eth16i_set_irq(struct net_device* dev)
+{
+ const int ioaddr = dev->base_addr;
+ const int irq = dev->irq;
+ int i = 0;
+
+ if(ioaddr < 0x1000) {
+ while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq)
+ i++;
+
+ if(i < NUM_OF_ISA_IRQS) {
+ u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ cbyte = (cbyte & 0x3F) | (i << 6);
+ outb(cbyte, ioaddr + JUMPERLESS_CONFIG);
+ return 0;
+ }
+ }
+ else {
+ printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name);
+ }
+
+ return -1;
+
+}
+#endif
+
+static int __init eth16i_get_irq(int ioaddr)
+{
+ unsigned char cbyte;
+
+ if( ioaddr < 0x1000) {
+ cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ return( eth16i_irqmap[ ((cbyte & 0xC0) >> 6) ] );
+ } else { /* Oh..the card is EISA so method getting IRQ different */
+ unsigned short index = 0;
+ cbyte = inb(ioaddr + EISA_IRQ_REG);
+ while( (cbyte & 0x01) == 0) {
+ cbyte = cbyte >> 1;
+ index++;
+ }
+ return( eth32i_irqmap[ index ] );
+ }
+}
+
+static int __init eth16i_check_signature(int ioaddr)
+{
+ int i;
+ unsigned char creg[4] = { 0 };
+
+ for(i = 0; i < 4 ; i++) {
+
+ creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
+
+ if(eth16i_debug > 1)
+ printk("eth16i: read signature byte %x at %x\n",
+ creg[i],
+ ioaddr + TRANSMIT_MODE_REG + i);
+ }
+
+ creg[0] &= 0x0F; /* Mask collision cnr */
+ creg[2] &= 0x7F; /* Mask DCLEN bit */
+
+#if 0
+ /*
+ This was removed because the card was sometimes left to state
+ from which it couldn't be find anymore. If there is need
+ to more strict check still this have to be fixed.
+ */
+ if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) {
+ if(creg[1] != 0x42)
+ return -1;
+ }
+#endif
+
+ if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
+ creg[2] &= 0x40;
+ creg[3] &= 0x03;
+
+ if( !((creg[2] == 0x40) && (creg[3] == 0x00)) )
+ return -1;
+ }
+
+ if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
+ return -1;
+
+ if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
+ return -1;
+
+ return 0;
+}
+
+static int eth16i_read_eeprom(int ioaddr, int offset)
+{
+ int data = 0;
+
+ eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
+ outb(CS_1, ioaddr + EEPROM_CTRL_REG);
+ data = eth16i_read_eeprom_word(ioaddr);
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+
+ return(data);
+}
+
+static int eth16i_read_eeprom_word(int ioaddr)
+{
+ int i;
+ int data = 0;
+
+ for(i = 16; i > 0; i--) {
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ data = (data << 1) |
+ ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
+
+ eeprom_slow_io();
+ }
+
+ return(data);
+}
+
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
+{
+ int i;
+
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_0, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_1, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+
+ for(i = 7; i >= 0; i--) {
+ short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
+ outb(cmd, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ }
+}
+
+static int eth16i_open(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ /* Initialize the chip */
+ eth16i_initialize(dev, 0);
+
+ /* Set the transmit buffer size */
+ lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
+
+ if(eth16i_debug > 0)
+ printk(KERN_DEBUG "%s: transmit buffer size %d\n",
+ dev->name, lp->tx_buf_size);
+
+ /* Now enable Transmitter and Receiver sections */
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Now switch to register bank 2, for run time operation */
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->open_time = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on interrupts*/
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int eth16i_close(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ eth16i_reset(dev);
+
+ /* Turn off interrupts*/
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ netif_stop_queue(dev);
+
+ lp->open_time = 0;
+
+ /* Disable transmit and receive */
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Reset the chip */
+ /* outb(0xff, ioaddr + RESET); */
+ /* outw(0xffff, ioaddr + TX_STATUS_REG); */
+
+ outb(0x00, ioaddr + CONFIG_REG_1);
+
+ return 0;
+}
+
+static void eth16i_timeout(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ /*
+ If we get here, some higher level has decided that
+ we are broken. There should really be a "kick me"
+ function call instead.
+ */
+
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+ printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n",
+ dev->name,
+ inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
+ "IRQ conflict" : "network cable problem");
+
+ dev->trans_start = jiffies;
+
+ /* Let's dump all registers */
+ if(eth16i_debug > 0) {
+ printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
+ dev->name, inb(ioaddr + 0),
+ inb(ioaddr + 1), inb(ioaddr + 2),
+ inb(ioaddr + 3), inb(ioaddr + 4),
+ inb(ioaddr + 5),
+ inb(ioaddr + 6), inb(ioaddr + 7));
+
+ printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n",
+ dev->name, inb(ioaddr + TRANSMIT_START_REG),
+ inb(ioaddr + COL_16_REG));
+ printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue);
+ printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len);
+ printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started);
+ }
+ lp->stats.tx_errors++;
+ eth16i_reset(dev);
+ dev->trans_start = jiffies;
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+ netif_wake_queue(dev);
+}
+
+static int eth16i_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int status = 0;
+ ushort length = skb->len;
+ unsigned char *buf;
+ unsigned long flags;
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+ buf = skb->data;
+
+ netif_stop_queue(dev);
+
+ /* Turn off TX interrupts */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ /* We would be better doing the disable_irq tricks the 3c509 does,
+ that would make this suck a lot less */
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
+ if(eth16i_debug > 0)
+ printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
+ }
+ else {
+ outw(length, ioaddr + DATAPORT);
+
+ if( ioaddr < 0x1000 )
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+ else {
+ unsigned char frag = length % 4;
+ outsl(ioaddr + DATAPORT, buf, length >> 2);
+ if( frag != 0 ) {
+ outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
+ if( frag == 3 )
+ outsw(ioaddr + DATAPORT,
+ (buf + (length & 0xFFFC) + 2), 1);
+ }
+ }
+ lp->tx_buffered_packets++;
+ lp->tx_buffered_bytes = length;
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+ }
+ lp->tx_buf_busy = 0;
+
+ if(lp->tx_started == 0) {
+ /* If the transmitter is idle..always trigger a transmit */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ netif_wake_queue(dev);
+ }
+ else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+ /* Turn TX interrupts back on */
+ /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
+ status = 0;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void eth16i_rx(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int boguscount = MAX_RX_LOOP;
+
+ /* Loop until all packets have been read */
+ while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
+
+ /* Read status byte from receive buffer */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ /* Get the size of the packet from receive buffer */
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n",
+ dev->name,
+ inb(ioaddr + RECEIVE_MODE_REG), status);
+
+ if( !(status & PKT_GOOD) ) {
+ lp->stats.rx_errors++;
+
+ if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) {
+ lp->stats.rx_length_errors++;
+ eth16i_reset(dev);
+ return;
+ }
+ else {
+ eth16i_skip_packet(dev);
+ lp->stats.rx_dropped++;
+ }
+ }
+ else { /* Ok so now we should have a good packet */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 3);
+ if( skb == NULL ) {
+ printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
+ dev->name, pkt_len);
+ eth16i_skip_packet(dev);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ /*
+ Now let's get the packet out of buffer.
+ size is (pkt_len + 1) >> 1, cause we are now reading words
+ and it have to be even aligned.
+ */
+
+ if(ioaddr < 0x1000)
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ else {
+ unsigned char *buf = skb_put(skb, pkt_len);
+ unsigned char frag = pkt_len % 4;
+
+ insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
+
+ if(frag != 0) {
+ unsigned short rest[2];
+ rest[0] = inw( ioaddr + DATAPORT );
+ if(frag == 3)
+ rest[1] = inw( ioaddr + DATAPORT );
+
+ memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
+ }
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+
+ if( eth16i_debug > 5 ) {
+ int i;
+ printk(KERN_DEBUG "%s: Received packet of length %d.\n",
+ dev->name, pkt_len);
+ for(i = 0; i < 14; i++)
+ printk(KERN_DEBUG " %02x", skb->data[i]);
+ printk(KERN_DEBUG ".\n");
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ } /* else */
+
+ if(--boguscount <= 0)
+ break;
+
+ } /* while */
+}
+
+static irqreturn_t eth16i_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct eth16i_local *lp;
+ int ioaddr = 0, status;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* Turn off all interrupts from adapter */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ /* eth16i_tx won't be called */
+ spin_lock(&lp->lock);
+
+ status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
+ outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
+
+ if (status)
+ handled = 1;
+
+ if(eth16i_debug > 3)
+ printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status);
+
+ if( status & 0x7f00 ) {
+
+ lp->stats.rx_errors++;
+
+ if(status & (BUS_RD_ERR << 8) )
+ printk(KERN_WARNING "%s: Bus read error.\n",dev->name);
+ if(status & (SHORT_PKT_ERR << 8) ) lp->stats.rx_length_errors++;
+ if(status & (ALIGN_ERR << 8) ) lp->stats.rx_frame_errors++;
+ if(status & (CRC_ERR << 8) ) lp->stats.rx_crc_errors++;
+ if(status & (RX_BUF_OVERFLOW << 8) ) lp->stats.rx_over_errors++;
+ }
+ if( status & 0x001a) {
+
+ lp->stats.tx_errors++;
+
+ if(status & CR_LOST) lp->stats.tx_carrier_errors++;
+ if(status & TX_JABBER_ERR) lp->stats.tx_window_errors++;
+
+#if 0
+ if(status & COLLISION) {
+ lp->stats.collisions +=
+ ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4);
+ }
+#endif
+ if(status & COLLISIONS_16) {
+ if(lp->col_16 < MAX_COL_16) {
+ lp->col_16++;
+ lp->stats.collisions++;
+ /* Resume transmitting, skip failed packet */
+ outb(0x02, ioaddr + COL_16_REG);
+ }
+ else {
+ printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name);
+ }
+ }
+ }
+
+ if( status & 0x00ff ) { /* Let's check the transmit status reg */
+
+ if(status & TX_DONE) { /* The transmit has been done */
+ lp->stats.tx_packets = lp->tx_buffered_packets;
+ lp->stats.tx_bytes += lp->tx_buffered_bytes;
+ lp->col_16 = 0;
+
+ if(lp->tx_queue) { /* Is there still packets ? */
+ /* There was packet(s) so start transmitting and write also
+ how many packets there is to be sended */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->tx_started = 1;
+ }
+ else {
+ lp->tx_started = 0;
+ }
+ netif_wake_queue(dev);
+ }
+ }
+
+ if( ( status & 0x8000 ) ||
+ ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
+ eth16i_rx(dev); /* We have packet in receive buffer */
+ }
+
+ /* Turn interrupts back on */
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void eth16i_skip_packet(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0);
+}
+
+static void eth16i_reset(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting device.\n", dev->name);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ outw(0xffff, ioaddr + TX_STATUS_REG);
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->tx_started = 0;
+ lp->tx_buf_busy = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+}
+
+static void eth16i_multicast(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ {
+ dev->flags|=IFF_PROMISC; /* Must do this */
+ outb(3, ioaddr + RECEIVE_MODE_REG);
+ } else {
+ outb(2, ioaddr + RECEIVE_MODE_REG);
+ }
+}
+
+static struct net_device_stats *eth16i_get_stats(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+static void eth16i_select_regbank(unsigned char banknbr, int ioaddr)
+{
+ unsigned char data;
+
+ data = inb(ioaddr + CONFIG_REG_1);
+ outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
+}
+
+#ifdef MODULE
+
+static ushort eth16i_parse_mediatype(const char* s)
+{
+ if(!s)
+ return E_PORT_FROM_EPROM;
+
+ if (!strncmp(s, "bnc", 3))
+ return E_PORT_BNC;
+ else if (!strncmp(s, "tp", 2))
+ return E_PORT_TP;
+ else if (!strncmp(s, "dix", 3))
+ return E_PORT_DIX;
+ else if (!strncmp(s, "auto", 4))
+ return E_PORT_AUTO;
+ else
+ return E_PORT_FROM_EPROM;
+}
+
+#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */
+
+static struct net_device *dev_eth16i[MAX_ETH16I_CARDS];
+static int io[MAX_ETH16I_CARDS];
+#if 0
+static int irq[MAX_ETH16I_CARDS];
+#endif
+static char* mediatype[MAX_ETH16I_CARDS];
+static int debug = -1;
+
+MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>");
+MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
+MODULE_LICENSE("GPL");
+
+
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "eth16i I/O base address(es)");
+
+#if 0
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "eth16i interrupt request number");
+#endif
+
+module_param_array(mediatype, charp, NULL, 0);
+MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)");
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+ struct net_device *dev;
+
+ for (this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
+ dev = alloc_etherdev(sizeof(struct eth16i_local));
+ if (!dev)
+ break;
+
+ dev->base_addr = io[this_dev];
+
+ if(debug != -1)
+ eth16i_debug = debug;
+
+ if(eth16i_debug > 1)
+ printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" );
+
+ dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
+
+ if(io[this_dev] == 0) {
+ if(this_dev != 0) /* Only autoprobe 1st one */
+ break;
+
+ printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+
+ if (do_eth16i_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_eth16i[found++] = dev;
+ continue;
+ }
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ETH16I_IO_EXTENT);
+ }
+ printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n",
+ io[this_dev]);
+ free_netdev(dev);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
+ struct net_device *dev = dev_eth16i[this_dev];
+
+ if(dev->priv) {
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ETH16I_IO_EXTENT);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c eth16i.c"
+ * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict -prototypes -O6 -c eth16i.c"
+ * tab-width: 8
+ * c-basic-offset: 8
+ * c-indent-level: 8
+ * End:
+ */
+
+/* End of file eth16i.c */
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
new file mode 100644
index 000000000000..dcf969b20be9
--- /dev/null
+++ b/drivers/net/ewrk3.c
@@ -0,0 +1,2007 @@
+/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
+
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of EtherWORKS ethernet cards:
+
+ DE203 Turbo (BNC)
+ DE204 Turbo (TP)
+ DE205 Turbo (TP BNC)
+
+ The driver has been tested on a relatively busy network using the DE205
+ card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
+ (7.8Mb/s) to a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'depca.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious.
+
+ The DE203/4/5 boards all use a new proprietary chip in place of the
+ LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
+ Use the depca.c driver in the standard distribution for the LANCE based
+ cards from DIGITAL; this driver will not work with them.
+
+ The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
+ only makes all the card accesses through I/O transactions and no high
+ (shared) memory is used. This mode provides a >48% performance penalty
+ and is deprecated in this driver, although allowed to provide initial
+ setup when hardstrapped.
+
+ The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
+ no point in using any mode other than the 2kB mode - their performances
+ are virtually identical, although the driver has been tested in the 2kB
+ and 32kB modes. I would suggest you uncomment the line:
+
+ FORCE_2K_MODE;
+
+ to allow the driver to configure the card as a 2kB card at your current
+ base address, thus leaving more room to clutter your system box with
+ other memory hungry boards.
+
+ As many ISA and EISA cards can be supported under this driver as you
+ wish, limited primarily by the available IRQ lines, rather than by the
+ available I/O addresses (24 ISA, 16 EISA). I have checked different
+ configurations of multiple depca cards and ewrk3 cards and have not
+ found a problem yet (provided you have at least depca.c v0.38) ...
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. All these cards are at
+ {5,10,11,15}.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) edit the source code near line 1898 to reflect the I/O address and
+ IRQ you're using.
+ 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the ewrk3 configuration turned off and reboot.
+ 5) insmod ewrk3.o
+ [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
+ [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards]
+ 6) run the net startup bits for your new eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod ewrk3'.
+
+ Promiscuous mode has been turned off in this driver, but all the
+ multicast address bits have been turned on. This improved the send
+ performance on a busy network by about 13%.
+
+ Ioctl's have now been provided (primarily because I wanted to grab some
+ packet size statistics). They are patterned after 'plipconfig.c' from a
+ suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
+ mode, add/delete multicast addresses, change the hardware address, get
+ packet size distribution statistics and muck around with the control and
+ status register. I'll add others if and when the need arises.
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 26-aug-94 Initial writing. ALPHA code release.
+ 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
+ LeMAC version calc.,
+ IRQ vector assignments during autoprobe.
+ 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
+ Fixed up MCA hash table algorithm.
+ 0.20 4-sep-94 Added IOCTL functionality.
+ 0.21 14-sep-94 Added I/O mode.
+ 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
+ 0.22 16-sep-94 Added more IOCTLs & tidied up.
+ 0.23 21-sep-94 Added transmit cut through.
+ 0.24 31-oct-94 Added uid checks in some ioctls.
+ 0.30 1-nov-94 BETA code release.
+ 0.31 5-dec-94 Added check/allocate region code.
+ 0.32 16-jan-95 Broadcast packet fix.
+ 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
+ Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ Added verify_area() calls in ewrk3_ioctl() from
+ suggestion by <heiko@colossus.escape.de>.
+ Add new multicasting code.
+ 0.41 20-Jan-96 Fix IRQ set up problem reported by
+ <kenneth@bbs.sas.ntu.ac.sg>.
+ 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+ 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com>
+ 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net>
+ 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com>
+ 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com>
+ 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua>
+ ioctl locking, signature search cleanup <akropel1@rochester.rr.com>
+
+ =========================================================================
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+#include "ewrk3.h"
+
+#define DRV_NAME "ewrk3"
+#define DRV_VERSION "0.48"
+
+static char version[] __initdata =
+DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n";
+
+#ifdef EWRK3_DEBUG
+static int ewrk3_debug = EWRK3_DEBUG;
+#else
+static int ewrk3_debug = 1;
+#endif
+
+#define EWRK3_NDA 0xffe0 /* No Device Address */
+
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+#ifndef EWRK3_SIGNATURE
+#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
+#define EWRK3_STRLEN 8
+#endif
+
+#ifndef EWRK3_RAM_BASE_ADDRESSES
+#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
+#endif
+
+/*
+ ** Sets up the I/O area for the autoprobe.
+ */
+#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
+#define EWRK3_IOP_INC 0x20 /* I/O address increment */
+#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
+
+#ifndef MAX_NUM_EWRK3S
+#define MAX_NUM_EWRK3S 21
+#endif
+
+#ifndef EWRK3_EISA_IO_PORTS
+#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#endif
+
+#ifndef MAX_EISA_SLOTS
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+#endif
+
+#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */
+
+/*
+ ** EtherWORKS 3 shared memory window sizes
+ */
+#define IO_ONLY 0x00
+#define SHMEM_2K 0x800
+#define SHMEM_32K 0x8000
+#define SHMEM_64K 0x10000
+
+/*
+ ** EtherWORKS 3 IRQ ENABLE/DISABLE
+ */
+#define ENABLE_IRQs { \
+ icr |= lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs { \
+ icr = inb(EWRK3_ICR);\
+ icr &= ~lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Disable the IRQs */\
+}
+
+/*
+ ** EtherWORKS 3 START/STOP
+ */
+#define START_EWRK3 { \
+ csr = inb(EWRK3_CSR);\
+ csr &= ~(CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_EWRK3 { \
+ csr = (CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
+}
+
+/*
+ ** The EtherWORKS 3 private structure
+ */
+#define EWRK3_PKT_STAT_SZ 16
+#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase EWRK3_PKT_STAT_SZ */
+
+struct ewrk3_stats {
+ u32 bins[EWRK3_PKT_STAT_SZ];
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+};
+
+struct ewrk3_private {
+ char adapter_name[80]; /* Name exported to /proc/ioports */
+ u_long shmem_base; /* Shared memory start address */
+ void __iomem *shmem;
+ u_long shmem_length; /* Shared memory window length */
+ struct net_device_stats stats; /* Public stats */
+ struct ewrk3_stats pktStats; /* Private stats counters */
+ u_char irq_mask; /* Adapter IRQ mask bits */
+ u_char mPage; /* Maximum 2kB Page number */
+ u_char lemac; /* Chip rev. level */
+ u_char hard_strapped; /* Don't allow a full open */
+ u_char txc; /* Transmit cut through */
+ void __iomem *mctbl; /* Pointer to the multicast table */
+ u_char led_mask; /* Used to reserve LED access for ethtool */
+ spinlock_t hw_lock;
+};
+
+/*
+ ** Force the EtherWORKS 3 card to be in 2kB MODE
+ */
+#define FORCE_2K_MODE { \
+ shmem_length = SHMEM_2K;\
+ outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
+}
+
+/*
+ ** Public Functions
+ */
+static int ewrk3_open(struct net_device *dev);
+static int ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ewrk3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int ewrk3_close(struct net_device *dev);
+static struct net_device_stats *ewrk3_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops ethtool_ops_203;
+static struct ethtool_ops ethtool_ops;
+
+/*
+ ** Private functions
+ */
+static int ewrk3_hw_init(struct net_device *dev, u_long iobase);
+static void ewrk3_init(struct net_device *dev);
+static int ewrk3_rx(struct net_device *dev);
+static int ewrk3_tx(struct net_device *dev);
+static void ewrk3_timeout(struct net_device *dev);
+
+static void EthwrkSignature(char *name, char *eeprom_image);
+static int DevicePresent(u_long iobase);
+static void SetMulticastFilter(struct net_device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+
+static int Read_EEPROM(u_long iobase, u_char eaddr);
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
+static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType);
+
+static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq);
+static int isa_probe(struct net_device *dev, u_long iobase);
+static int eisa_probe(struct net_device *dev, u_long iobase);
+
+static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12};
+
+static char name[EWRK3_STRLEN + 1];
+static int num_ewrks3s;
+
+/*
+ ** Miscellaneous defines...
+ */
+#define INIT_EWRK3 {\
+ outb(EEPROM_INIT, EWRK3_IOPR);\
+ mdelay(1);\
+}
+
+#ifndef MODULE
+struct net_device * __init ewrk3_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+ SET_MODULE_OWNER(dev);
+
+ err = ewrk3_probe1(dev, dev->base_addr, dev->irq);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+
+}
+#endif
+
+static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
+{
+ int err;
+
+ dev->base_addr = iobase;
+ dev->irq = irq;
+
+ /* Address PROM pattern */
+ err = isa_probe(dev, iobase);
+ if (err != 0)
+ err = eisa_probe(dev, iobase);
+
+ if (err)
+ return err;
+
+ err = register_netdev(dev);
+ if (err)
+ release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
+
+ return err;
+}
+
+static int __init
+ewrk3_hw_init(struct net_device *dev, u_long iobase)
+{
+ struct ewrk3_private *lp;
+ int i, status = 0;
+ u_long mem_start, shmem_length;
+ u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
+ u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
+
+ /*
+ ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
+ ** This also disables the EISA_ENABLE bit in the EISA Control Register.
+ */
+ if (iobase > 0x400)
+ eisa_cr = inb(EISA_CR);
+ INIT_EWRK3;
+
+ nicsr = inb(EWRK3_CSR);
+
+ icr = inb(EWRK3_ICR);
+ icr &= 0x70;
+ outb(icr, EWRK3_ICR); /* Disable all the IRQs */
+
+ if (nicsr == (CSR_TXD | CSR_RXD))
+ return -ENXIO;
+
+
+ /* Check that the EEPROM is alive and well and not living on Pluto... */
+ for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) {
+ union {
+ short val;
+ char c[2];
+ } tmp;
+
+ tmp.val = (short) Read_EEPROM(iobase, (i >> 1));
+ eeprom_image[i] = tmp.c[0];
+ eeprom_image[i + 1] = tmp.c[1];
+ chksum += eeprom_image[i] + eeprom_image[i + 1];
+ }
+
+ if (chksum != 0) { /* Bad EEPROM Data! */
+ printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
+ return -ENXIO;
+ }
+
+ EthwrkSignature(name, eeprom_image);
+ if (*name == '\0')
+ return -ENXIO;
+
+ dev->base_addr = iobase;
+
+ if (iobase > 0x400) {
+ outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
+ }
+ lemac = eeprom_image[EEPROM_CHIPVER];
+ cmr = inb(EWRK3_CMR);
+
+ if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
+ ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ hard_strapped = 1;
+ } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) {
+ /* EISA slot address */
+ printk("%s: %s at %#4lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase >> 12) & 0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ }
+
+ printk(", h/w address ");
+ if (lemac != LeMAC2)
+ DevicePresent(iobase); /* need after EWRK3_INIT */
+ status = get_hw_addr(dev, eeprom_image, lemac);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ if (status) {
+ printk(" which has an EEPROM CRC error.\n");
+ return -ENXIO;
+ }
+
+ if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
+ cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
+ if (eeprom_image[EEPROM_MISC0] & READ_AHEAD)
+ cmr |= CMR_RA;
+ if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND)
+ cmr |= CMR_WB;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL)
+ cmr |= CMR_POLARITY;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK)
+ cmr |= CMR_LINK;
+ if (eeprom_image[EEPROM_MISC0] & _0WS_ENA)
+ cmr |= CMR_0WS;
+ }
+ if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM)
+ cmr |= CMR_DRAM;
+ outb(cmr, EWRK3_CMR);
+
+ cr = inb(EWRK3_CR); /* Set up the Control Register */
+ cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
+ if (cr & SETUP_APD)
+ cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
+ cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
+ cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
+ outb(cr, EWRK3_CR);
+
+ /*
+ ** Determine the base address and window length for the EWRK3
+ ** RAM from the memory base register.
+ */
+ mem_start = inb(EWRK3_MBR);
+ shmem_length = 0;
+ if (mem_start != 0) {
+ if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
+ mem_start *= SHMEM_64K;
+ shmem_length = SHMEM_64K;
+ } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
+ mem_start *= SHMEM_32K;
+ shmem_length = SHMEM_32K;
+ } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
+ mem_start = mem_start * SHMEM_2K + 0x80000;
+ shmem_length = SHMEM_2K;
+ } else {
+ return -ENXIO;
+ }
+ }
+ /*
+ ** See the top of this source code for comments about
+ ** uncommenting this line.
+ */
+/* FORCE_2K_MODE; */
+
+ if (hard_strapped) {
+ printk(" is hard strapped.\n");
+ } else if (mem_start) {
+ printk(" has a %dk RAM window", (int) (shmem_length >> 10));
+ printk(" at 0x%.5lx", mem_start);
+ } else {
+ printk(" is in I/O only mode");
+ }
+
+ lp = netdev_priv(dev);
+ lp->shmem_base = mem_start;
+ lp->shmem = ioremap(mem_start, shmem_length);
+ if (!lp->shmem)
+ return -ENOMEM;
+ lp->shmem_length = shmem_length;
+ lp->lemac = lemac;
+ lp->hard_strapped = hard_strapped;
+ lp->led_mask = CR_LED;
+ spin_lock_init(&lp->hw_lock);
+
+ lp->mPage = 64;
+ if (cmr & CMR_DRAM)
+ lp->mPage <<= 1; /* 2 DRAMS on module */
+
+ sprintf(lp->adapter_name, "%s (%s)", name, dev->name);
+
+ lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM;
+
+ if (!hard_strapped) {
+ /*
+ ** Enable EWRK3 board interrupts for autoprobing
+ */
+ icr |= ICR_IE; /* Enable interrupts */
+ outb(icr, EWRK3_ICR);
+
+ /* The DMA channel may be passed in on this parameter. */
+ dev->dma = 0;
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ u_char irqnum;
+ unsigned long irq_mask;
+
+
+ irq_mask = probe_irq_on();
+
+ /*
+ ** Trigger a TNE interrupt.
+ */
+ icr |= ICR_TNEM;
+ outb(1, EWRK3_TDQ); /* Write to the TX done queue */
+ outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
+
+ irqnum = irq[((icr & IRQ_SEL) >> 4)];
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if ((dev->irq) && (irqnum == dev->irq)) {
+ printk(" and uses IRQ%d.\n", dev->irq);
+ } else {
+ if (!dev->irq) {
+ printk(" and failed to detect IRQ line.\n");
+ } else if ((irqnum == 1) && (lemac == LeMAC2)) {
+ printk(" and an illegal IRQ line detected.\n");
+ } else {
+ printk(", but incorrect IRQ line detected.\n");
+ }
+ iounmap(lp->shmem);
+ return -ENXIO;
+ }
+
+ DISABLE_IRQs; /* Mask all interrupts */
+
+#endif /* MODULE */
+ } else {
+ printk(" and requires IRQ%d.\n", dev->irq);
+ }
+ }
+
+ if (ewrk3_debug > 1) {
+ printk(version);
+ }
+ /* The EWRK3-specific entries in the device structure. */
+ dev->open = ewrk3_open;
+ dev->hard_start_xmit = ewrk3_queue_pkt;
+ dev->stop = ewrk3_close;
+ dev->get_stats = ewrk3_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->do_ioctl = ewrk3_ioctl;
+ if (lp->adapter_name[4] == '3')
+ SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
+ else
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->tx_timeout = ewrk3_timeout;
+ dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
+
+ dev->mem_start = 0;
+
+ return 0;
+}
+
+
+static int ewrk3_open(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char icr, csr;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ if (!lp->hard_strapped) {
+ if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) {
+ printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /*
+ ** Re-initialize the EWRK3...
+ */
+ ewrk3_init(dev);
+
+ if (ewrk3_debug > 1) {
+ printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq);
+ printk(" physical address: ");
+ for (i = 0; i < 5; i++) {
+ printk("%2.2x:", (u_char) dev->dev_addr[i]);
+ }
+ printk("%2.2x\n", (u_char) dev->dev_addr[i]);
+ if (lp->shmem_length == 0) {
+ printk(" no shared memory, I/O only mode\n");
+ } else {
+ printk(" start of shared memory: 0x%08lx\n", lp->shmem_base);
+ printk(" window length: 0x%04lx\n", lp->shmem_length);
+ }
+ printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
+ printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
+ printk(" cr: 0x%02x\n", inb(EWRK3_CR));
+ printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
+ printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
+ printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
+ }
+ netif_start_queue(dev);
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ icr = inb(EWRK3_ICR);
+ ENABLE_IRQs;
+
+ }
+ } else {
+ printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name);
+ printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n");
+ return -EINVAL;
+ }
+
+ return status;
+}
+
+/*
+ ** Initialize the EtherWORKS 3 operating conditions
+ */
+static void ewrk3_init(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_char csr, page;
+ u_long iobase = dev->base_addr;
+ int i;
+
+ /*
+ ** Enable any multicasts
+ */
+ set_multicast_list(dev);
+
+ /*
+ ** Set hardware MAC address. Address is initialized from the EEPROM
+ ** during startup but may have since been changed by the user.
+ */
+ for (i=0; i<ETH_ALEN; i++)
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+
+ /*
+ ** Clean out any remaining entries in all the queues here
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+ while (inb(EWRK3_FMQ));
+
+ /*
+ ** Write a clean free memory queue
+ */
+ for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */
+ outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
+ }
+
+ START_EWRK3; /* Enable the TX and/or RX */
+}
+
+/*
+ * Transmit timeout
+ */
+
+static void ewrk3_timeout(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_char icr, csr;
+ u_long iobase = dev->base_addr;
+
+ if (!lp->hard_strapped)
+ {
+ printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n",
+ dev->name, inb(EWRK3_CSR));
+
+ /*
+ ** Mask all board interrupts
+ */
+ DISABLE_IRQs;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ ewrk3_init(dev);
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ ENABLE_IRQs;
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ }
+}
+
+/*
+ ** Writes a socket buffer to the free page queue
+ */
+static int ewrk3_queue_pkt (struct sk_buff *skb, struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ void __iomem *buf = NULL;
+ u_char icr;
+ u_char page;
+
+ spin_lock_irq (&lp->hw_lock);
+ DISABLE_IRQs;
+
+ /* if no resources available, exit, request packet be queued */
+ if (inb (EWRK3_FMQC) == 0) {
+ printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n",
+ dev->name);
+ printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",
+ dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR),
+ inb (EWRK3_FMQC));
+ goto err_out;
+ }
+
+ /*
+ ** Get a free page from the FMQ
+ */
+ if ((page = inb (EWRK3_FMQ)) >= lp->mPage) {
+ printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
+ (u_char) page);
+ goto err_out;
+ }
+
+
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ outb (page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem;
+ outb (page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = (((short) page << 11) & 0x7800) + lp->shmem;
+ outb ((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = (((short) page << 11) & 0xf800) + lp->shmem;
+ outb ((page >> 5), EWRK3_MPR);
+ } else {
+ printk (KERN_ERR "%s: Oops - your private data area is hosed!\n",
+ dev->name);
+ BUG ();
+ }
+
+ /*
+ ** Set up the buffer control structures and copy the data from
+ ** the socket buffer to the shared memory .
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ int i;
+ u_char *p = skb->data;
+ outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
+ outb ((char) (skb->len & 0xff), EWRK3_DATA);
+ outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA);
+ outb ((char) 0x04, EWRK3_DATA);
+ for (i = 0; i < skb->len; i++) {
+ outb (*p++, EWRK3_DATA);
+ }
+ outb (page, EWRK3_TQ); /* Start sending pkt */
+ } else {
+ writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */
+ buf += 1;
+ writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */
+ buf += 1;
+ if (lp->txc) {
+ writeb(((skb->len >> 8) & 0xff) | XCT, buf);
+ buf += 1;
+ writeb (0x04, buf); /* index byte */
+ buf += 1;
+ writeb (0x00, (buf + skb->len)); /* Write the XCT flag */
+ memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */
+ outb (page, EWRK3_TQ); /* Start sending pkt */
+ memcpy_toio (buf + PRELOAD,
+ skb->data + PRELOAD,
+ skb->len - PRELOAD);
+ writeb (0xff, (buf + skb->len)); /* Write the XCT flag */
+ } else {
+ writeb ((skb->len >> 8) & 0xff, buf);
+ buf += 1;
+ writeb (0x04, buf); /* index byte */
+ buf += 1;
+ memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */
+ outb (page, EWRK3_TQ); /* Start sending pkt */
+ }
+ }
+
+ ENABLE_IRQs;
+ spin_unlock_irq (&lp->hw_lock);
+
+ lp->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb);
+
+ /* Check for free resources: stop Tx queue if there are none */
+ if (inb (EWRK3_FMQC) == 0)
+ netif_stop_queue (dev);
+
+ return 0;
+
+err_out:
+ ENABLE_IRQs;
+ spin_unlock_irq (&lp->hw_lock);
+ return 1;
+}
+
+/*
+ ** The EWRK3 interrupt handler.
+ */
+static irqreturn_t ewrk3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct ewrk3_private *lp;
+ u_long iobase;
+ u_char icr, cr, csr;
+
+ lp = netdev_priv(dev);
+ iobase = dev->base_addr;
+
+ /* get the interrupt information */
+ csr = inb(EWRK3_CSR);
+
+ /*
+ ** Mask the EWRK3 board interrupts and turn on the LED
+ */
+ spin_lock(&lp->hw_lock);
+ DISABLE_IRQs;
+
+ cr = inb(EWRK3_CR);
+ cr |= lp->led_mask;
+ outb(cr, EWRK3_CR);
+
+ if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
+ ewrk3_rx(dev);
+
+ if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
+ ewrk3_tx(dev);
+
+ /*
+ ** Now deal with the TX/RX disable flags. These are set when there
+ ** are no more resources. If resources free up then enable these
+ ** interrupts, otherwise mask them - failure to do this will result
+ ** in the system hanging in an interrupt loop.
+ */
+ if (inb(EWRK3_FMQC)) { /* any resources available? */
+ lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */
+ csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */
+ outb(csr, EWRK3_CSR);
+ netif_wake_queue(dev);
+ } else {
+ lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */
+ }
+
+ /* Unmask the EWRK3 board interrupts and turn off the LED */
+ cr &= ~(lp->led_mask);
+ outb(cr, EWRK3_CR);
+ ENABLE_IRQs;
+ spin_unlock(&lp->hw_lock);
+ return IRQ_HANDLED;
+}
+
+/* Called with lp->hw_lock held */
+static int ewrk3_rx(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char page;
+ void __iomem *buf = NULL;
+
+ while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
+ if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = (((short) page << 11) & 0x7800) + lp->shmem;
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = (((short) page << 11) & 0xf800) + lp->shmem;
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n", dev->name);
+ }
+
+ if (!status) {
+ char rx_status;
+ int pkt_len;
+
+ if (lp->shmem_length == IO_ONLY) {
+ rx_status = inb(EWRK3_DATA);
+ pkt_len = inb(EWRK3_DATA);
+ pkt_len |= ((u_short) inb(EWRK3_DATA) << 8);
+ } else {
+ rx_status = readb(buf);
+ buf += 1;
+ pkt_len = readw(buf);
+ buf += 3;
+ }
+
+ if (!(rx_status & R_ROK)) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (rx_status & R_DBE)
+ lp->stats.rx_frame_errors++;
+ if (rx_status & R_CRC)
+ lp->stats.rx_crc_errors++;
+ if (rx_status & R_PLL)
+ lp->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ unsigned char *p;
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align to 16 bytes */
+ p = skb_put(skb, pkt_len);
+
+ if (lp->shmem_length == IO_ONLY) {
+ *p = inb(EWRK3_DATA); /* dummy read */
+ for (i = 0; i < pkt_len; i++) {
+ *p++ = inb(EWRK3_DATA);
+ }
+ } else {
+ memcpy_fromio(p, buf, pkt_len);
+ }
+
+ for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) {
+ if (pkt_len < i * EWRK3_PKT_BIN_SZ) {
+ lp->pktStats.bins[i]++;
+ i = EWRK3_PKT_STAT_SZ;
+ }
+ }
+ p = skb->data; /* Look at the dest addr */
+ if (p[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s16 *) & p[0] == -1) && (*(s16 *) & p[2] == -1) && (*(s16 *) & p[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s16 *) & p[0] == *(s16 *) & dev->dev_addr[0]) &&
+ (*(s16 *) & p[2] == *(s16 *) & dev->dev_addr[2]) &&
+ (*(s16 *) & p[4] == *(s16 *) & dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ } else {
+ printk("%s: Insufficient memory; nuking packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ }
+ /*
+ ** Return the received buffer to the free memory queue
+ */
+ outb(page, EWRK3_FMQ);
+ } else {
+ printk("ewrk3_rx(): Illegal page number, page %d\n", page);
+ printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC));
+ }
+ }
+ return status;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+** Called with lp->hw_lock held
+*/
+static int ewrk3_tx(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char tx_status;
+
+ while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
+ if (tx_status & T_VSTS) { /* The status is valid */
+ if (tx_status & T_TXE) {
+ lp->stats.tx_errors++;
+ if (tx_status & T_NCL)
+ lp->stats.tx_carrier_errors++;
+ if (tx_status & T_LCL)
+ lp->stats.tx_window_errors++;
+ if (tx_status & T_CTU) {
+ if ((tx_status & T_COLL) ^ T_XUR) {
+ lp->pktStats.tx_underruns++;
+ } else {
+ lp->pktStats.excessive_underruns++;
+ }
+ } else if (tx_status & T_COLL) {
+ if ((tx_status & T_COLL) ^ T_XCOLL) {
+ lp->stats.collisions++;
+ } else {
+ lp->pktStats.excessive_collisions++;
+ }
+ }
+ } else {
+ lp->stats.tx_packets++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ewrk3_close(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char icr, csr;
+
+ netif_stop_queue(dev);
+
+ if (ewrk3_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inb(EWRK3_CSR));
+ }
+ /*
+ ** We stop the EWRK3 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+
+ STOP_EWRK3;
+
+ /*
+ ** Clean out the TX and RX queues here (note that one entry
+ ** may get added to either the TXD or RX queues if the TX or RX
+ ** just starts processing a packet before the STOP_EWRK3 command
+ ** is received. This will be flushed in the ewrk3_open() call).
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+
+ if (!lp->hard_strapped) {
+ free_irq(dev->irq, dev);
+ }
+ return 0;
+}
+
+static struct net_device_stats *ewrk3_get_stats(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+
+ /* Null body since there is no framing error counter */
+ return &lp->stats;
+}
+
+/*
+ ** Set or clear the multicast filter for this adapter.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char csr;
+
+ csr = inb(EWRK3_CSR);
+
+ if (lp->shmem_length == IO_ONLY) {
+ lp->mctbl = NULL;
+ } else {
+ lp->mctbl = lp->shmem + PAGE0_HTE;
+ }
+
+ csr &= ~(CSR_PME | CSR_MCE);
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ csr |= CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ SetMulticastFilter(dev);
+ csr |= CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ }
+}
+
+/*
+ ** Calculate the hash code and update the logical address filter
+ ** from a list of ethernet multicast addresses.
+ ** Little endian crc one liner from Matt Thomas, DEC.
+ **
+ ** Note that when clearing the table, the broadcast bit must remain asserted
+ ** to receive broadcast messages.
+ */
+static void SetMulticastFilter(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ struct dev_mc_list *dmi = dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i;
+ char *addrs, bit, byte;
+ short __iomem *p = lp->mctbl;
+ u16 hashcode;
+ u32 crc;
+
+ spin_lock_irq(&lp->hw_lock);
+
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(PAGE0_HTE, EWRK3_PIR1);
+ } else {
+ outb(0, EWRK3_MPR);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0xff, EWRK3_DATA);
+ } else { /* memset didn't work here */
+ writew(0xffff, p);
+ p++;
+ i++;
+ }
+ }
+ } else {
+ /* Clear table except for broadcast bit */
+ if (lp->shmem_length == IO_ONLY) {
+ for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ outb(0x80, EWRK3_DATA);
+ i++; /* insert the broadcast bit */
+ for (; i < (HASH_TABLE_LEN >> 3); i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ } else {
+ memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3);
+ writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1);
+ }
+
+ /* Update table */
+ for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = ether_crc_le(ETH_ALEN, addrs);
+ hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+
+ if (lp->shmem_length == IO_ONLY) {
+ u_char tmp;
+
+ outw(PAGE0_HTE + byte, EWRK3_PIR1);
+ tmp = inb(EWRK3_DATA);
+ tmp |= bit;
+ outw(PAGE0_HTE + byte, EWRK3_PIR1);
+ outb(tmp, EWRK3_DATA);
+ } else {
+ writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
+ }
+ }
+ }
+ }
+
+ spin_unlock_irq(&lp->hw_lock);
+}
+
+/*
+ ** ISA bus I/O device probe
+ */
+static int __init isa_probe(struct net_device *dev, u_long ioaddr)
+{
+ int i = num_ewrks3s, maxSlots;
+ int ret = -ENODEV;
+
+ u_long iobase;
+
+ if (ioaddr >= 0x400)
+ goto out;
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EWRK3_IO_BASE; /* Get the first slot address */
+ maxSlots = 24;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i < maxSlots) && (dev != NULL);
+ iobase += EWRK3_IOP_INC, i++)
+ {
+ if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) {
+ if (DevicePresent(iobase) == 0) {
+ int irq = dev->irq;
+ ret = ewrk3_hw_init(dev, iobase);
+ if (!ret)
+ break;
+ dev->irq = irq;
+ }
+ release_region(iobase, EWRK3_TOTAL_SIZE);
+ }
+ }
+ out:
+
+ return ret;
+}
+
+/*
+ ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+ ** the motherboard.
+ */
+static int __init eisa_probe(struct net_device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ int ret = -ENODEV;
+
+ if (ioaddr < 0x1000)
+ goto out;
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+
+ for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID) == 0) {
+ if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) &&
+ DevicePresent(iobase) == 0) {
+ int irq = dev->irq;
+ ret = ewrk3_hw_init(dev, iobase);
+ if (!ret)
+ break;
+ dev->irq = irq;
+ }
+ release_region(iobase, EWRK3_TOTAL_SIZE);
+ }
+ }
+
+ out:
+ return ret;
+}
+
+
+/*
+ ** Read the EWRK3 EEPROM using this routine
+ */
+static int Read_EEPROM(u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
+ for (i = 0; i < 5000; i++)
+ inb(EWRK3_CSR); /* wait 1msec */
+
+ return inw(EWRK3_EPROM1); /* 16 bits data return */
+}
+
+/*
+ ** Write the EWRK3 EEPROM using this routine
+ */
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
+ for (i = 0; i < 5000; i++)
+ inb(EWRK3_CSR); /* wait 1msec */
+ outw(data, EWRK3_EPROM1); /* write data to register */
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
+ for (i = 0; i < 75000; i++)
+ inb(EWRK3_CSR); /* wait 15msec */
+ outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
+ for (i = 0; i < 5000; i++)
+ inb(EWRK3_CSR); /* wait 1msec */
+
+ return 0;
+}
+
+/*
+ ** Look for a particular board name in the on-board EEPROM.
+ */
+static void __init EthwrkSignature(char *name, char *eeprom_image)
+{
+ int i;
+ char *signatures[] = EWRK3_SIGNATURE;
+
+ for (i=0; *signatures[i] != '\0'; i++)
+ if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) )
+ break;
+
+ if (*signatures[i] != '\0') {
+ memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN);
+ name[EWRK3_STRLEN] = '\0';
+ } else
+ name[0] = '\0';
+
+ return;
+}
+
+/*
+ ** Look for a special sequence in the Ethernet station address PROM that
+ ** is common across all EWRK3 products.
+ **
+ ** Search the Ethernet address ROM for the signature. Since the ROM address
+ ** counter can start at an arbitrary point, the search must include the entire
+ ** probe sequence length plus the (length_of_the_signature - 1).
+ ** Stop the search IMMEDIATELY after the signature is found so that the
+ ** PROM address counter is correctly positioned at the start of the
+ ** ethernet address for later read out.
+ */
+
+static int __init DevicePresent(u_long iobase)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ }
+ dev;
+ short sigLength;
+ char data;
+ int i, j, status = 0;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
+ data = inb(EWRK3_APROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) {
+ j = 1;
+ } else {
+ j = 0;
+ }
+ }
+ }
+
+ if (j != sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+ return status;
+}
+
+static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType)
+{
+ int i, j, k;
+ u_short chksum;
+ u_char crc, lfsr, sd, status = 0;
+ u_long iobase = dev->base_addr;
+ u16 tmp;
+
+ if (chipType == LeMAC2) {
+ for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) {
+ sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
+ outb(dev->dev_addr[j], EWRK3_PAR0 + j);
+ for (k = 0; k < 8; k++, sd >>= 1) {
+ lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
+ crc = (crc >> 1) + lfsr;
+ }
+ }
+ if (crc != eeprom_image[EEPROM_PA_CRC])
+ status = -1;
+ } else {
+ for (i = 0, k = 0; i < ETH_ALEN;) {
+ k <<= 1;
+ if (k > 0xffff)
+ k -= 0xffff;
+
+ k += (u_char) (tmp = inb(EWRK3_APROM));
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+ k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+
+ if (k > 0xffff)
+ k -= 0xffff;
+ }
+ if (k == 0xffff)
+ k = 0;
+ chksum = inb(EWRK3_APROM);
+ chksum |= (inb(EWRK3_APROM) << 8);
+ if (k != chksum)
+ status = -1;
+ }
+
+ return status;
+}
+
+/*
+ ** Look for a particular board name in the EISA configuration space
+ */
+static int __init EISA_signature(char *name, s32 eisa_id)
+{
+ u_long i;
+ char *signatures[] = EWRK3_SIGNATURE;
+ char ManCode[EWRK3_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ for (i = 0; i < 4; i++) {
+ Eisa.Id[i] = inb(eisa_id + i);
+ }
+
+ ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40);
+ ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40);
+ ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30);
+ ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30);
+ ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30);
+ ManCode[5] = '\0';
+
+ for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name, ManCode);
+ status = 1;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->fw_version, "%d", fwrev);
+ strcpy(info->bus_info, "N/A");
+ info->eedump_len = EEPROM_MAX;
+}
+
+static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ unsigned long iobase = dev->base_addr;
+ u8 cr = inb(EWRK3_CR);
+
+ switch (lp->adapter_name[4]) {
+ case '3': /* DE203 */
+ ecmd->supported = SUPPORTED_BNC;
+ ecmd->port = PORT_BNC;
+ break;
+
+ case '4': /* DE204 */
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->port = PORT_TP;
+ break;
+
+ case '5': /* DE205 */
+ ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI;
+ ecmd->autoneg = !(cr & CR_APD);
+ /*
+ ** Port is only valid if autoneg is disabled
+ ** and even then we don't know if AUI is jumpered.
+ */
+ if (!ecmd->autoneg)
+ ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP;
+ break;
+ }
+
+ ecmd->supported |= SUPPORTED_10baseT_Half;
+ ecmd->speed = SPEED_10;
+ ecmd->duplex = DUPLEX_HALF;
+ return 0;
+}
+
+static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ unsigned long iobase = dev->base_addr;
+ unsigned long flags;
+ u8 cr;
+
+ /* DE205 is the only card with anything to set */
+ if (lp->adapter_name[4] != '5')
+ return -EOPNOTSUPP;
+
+ /* Sanity-check parameters */
+ if (ecmd->speed != SPEED_10)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC)
+ return -EINVAL; /* AUI is not software-selectable */
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF)
+ return -EINVAL;
+ if (ecmd->phy_address != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ cr = inb(EWRK3_CR);
+
+ /* If Autoneg is set, change to Auto Port mode */
+ /* Otherwise, disable Auto Port and set port explicitly */
+ if (ecmd->autoneg) {
+ cr &= ~CR_APD;
+ } else {
+ cr |= CR_APD;
+ if (ecmd->port == PORT_TP)
+ cr &= ~CR_PSEL; /* Force TP */
+ else
+ cr |= CR_PSEL; /* Force BNC */
+ }
+
+ /* Commit the changes */
+ outb(cr, EWRK3_CR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ return 0;
+}
+
+static u32 ewrk3_get_link(struct net_device *dev)
+{
+ unsigned long iobase = dev->base_addr;
+ u8 cmr = inb(EWRK3_CMR);
+ /* DE203 has BNC only and link status does not apply */
+ /* On DE204 this is always valid since TP is the only port. */
+ /* On DE205 this reflects TP status even if BNC or AUI is selected. */
+ return !(cmr & CMR_LINK);
+}
+
+static int ewrk3_phys_id(struct net_device *dev, u32 data)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ unsigned long iobase = dev->base_addr;
+ unsigned long flags;
+ u8 cr;
+ int count;
+
+ /* Toggle LED 4x per second */
+ count = data << 2;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ /* Bail if a PHYS_ID is already in progress */
+ if (lp->led_mask == 0) {
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ return -EBUSY;
+ }
+
+ /* Prevent ISR from twiddling the LED */
+ lp->led_mask = 0;
+
+ while (count--) {
+ /* Toggle the LED */
+ cr = inb(EWRK3_CR);
+ outb(cr ^ CR_LED, EWRK3_CR);
+
+ /* Wait a little while */
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ msleep(250);
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ /* Exit if we got a signal */
+ if (signal_pending(current))
+ break;
+ }
+
+ lp->led_mask = CR_LED;
+ cr = inb(EWRK3_CR);
+ outb(cr & ~CR_LED, EWRK3_CR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ return signal_pending(current) ? -ERESTARTSYS : 0;
+}
+
+static struct ethtool_ops ethtool_ops_203 = {
+ .get_drvinfo = ewrk3_get_drvinfo,
+ .get_settings = ewrk3_get_settings,
+ .set_settings = ewrk3_set_settings,
+ .phys_id = ewrk3_phys_id,
+};
+
+static struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = ewrk3_get_drvinfo,
+ .get_settings = ewrk3_get_settings,
+ .set_settings = ewrk3_set_settings,
+ .get_link = ewrk3_get_link,
+ .phys_id = ewrk3_phys_id,
+};
+
+/*
+ ** Perform IOCTL call functions here. Some are privileged operations and the
+ ** effective uid is checked in those cases.
+ */
+static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ u_char csr;
+ unsigned long flags;
+ union ewrk3_addr {
+ u_char addr[HASH_TABLE_LEN * ETH_ALEN];
+ u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ };
+
+ union ewrk3_addr *tmp;
+
+ /* All we handle are private IOCTLs */
+ if (cmd != EWRK3IOCTL)
+ return -EOPNOTSUPP;
+
+ tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL);
+ if(tmp==NULL)
+ return -ENOMEM;
+
+ switch (ioc->cmd) {
+ case EWRK3_GET_HWADDR: /* Get the hardware address */
+ for (i = 0; i < ETH_ALEN; i++) {
+ tmp->addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ break;
+
+ case EWRK3_SET_HWADDR: /* Set the hardware address */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr |= (CSR_TXD | CSR_RXD);
+ outb(csr, EWRK3_CSR); /* Disable the TX and RX */
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) {
+ status = -EFAULT;
+ break;
+ }
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp->addr[i];
+ outb(tmp->addr[i], EWRK3_PAR0 + i);
+ }
+
+ csr = inb(EWRK3_CSR);
+ csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_PROM: /* Set Promiscuous Mode */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_PME;
+ csr &= ~CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_MCA: /* Get the multicast address table */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(PAGE0_HTE, EWRK3_PIR1);
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
+ tmp->addr[i] = inb(EWRK3_DATA);
+ }
+ } else {
+ outb(0, EWRK3_MPR);
+ memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3));
+ }
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+
+ break;
+ case EWRK3_SET_MCA: /* Set a multicast address */
+ if (capable(CAP_NET_ADMIN)) {
+ if (ioc->len > 1024)
+ {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) {
+ status = -EFAULT;
+ break;
+ }
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_MCA: /* Clear all multicast addresses */
+ if (capable(CAP_NET_ADMIN)) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_MCA_EN: /* Enable multicast addressing */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_MCE;
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_STATS: { /* Get the driver statistics */
+ struct ewrk3_stats *tmp_stats =
+ kmalloc(sizeof(lp->pktStats), GFP_KERNEL);
+ if (!tmp_stats) {
+ status = -ENOMEM;
+ break;
+ }
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ ioc->len = sizeof(lp->pktStats);
+ if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats)))
+ status = -EFAULT;
+ kfree(tmp_stats);
+ break;
+ }
+ case EWRK3_CLR_STATS: /* Zero out the driver statistics */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->hw_lock,flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CSR: /* Get the CSR Register contents */
+ tmp->addr[0] = inb(EWRK3_CSR);
+ ioc->len = 1;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ break;
+ case EWRK3_SET_CSR: /* Set the CSR Register contents */
+ if (capable(CAP_NET_ADMIN)) {
+ if (copy_from_user(tmp->addr, ioc->data, 1)) {
+ status = -EFAULT;
+ break;
+ }
+ outb(tmp->addr[0], EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
+ if (capable(CAP_NET_ADMIN)) {
+ for (i = 0; i < (EEPROM_MAX >> 1); i++) {
+ tmp->val[i] = (short) Read_EEPROM(iobase, i);
+ }
+ i = EEPROM_MAX;
+ tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
+ for (j = 0; j < ETH_ALEN; j++) {
+ tmp->addr[i++] = inb(EWRK3_PAR0 + j);
+ }
+ ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
+ if (capable(CAP_NET_ADMIN)) {
+ if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) {
+ status = -EFAULT;
+ break;
+ }
+ for (i = 0; i < (EEPROM_MAX >> 1); i++) {
+ Write_EEPROM(tmp->val[i], iobase, i);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CMR: /* Get the CMR Register contents */
+ tmp->addr[0] = inb(EWRK3_CMR);
+ ioc->len = 1;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ break;
+ case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
+ if (capable(CAP_NET_ADMIN)) {
+ lp->txc = 1;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
+ if (capable(CAP_NET_ADMIN)) {
+ lp->txc = 0;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+ kfree(tmp);
+ return status;
+}
+
+#ifdef MODULE
+static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
+static int ndevs;
+static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
+
+/* '21' below should really be 'MAX_NUM_EWRK3S' */
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
+
+static __exit void ewrk3_exit_module(void)
+{
+ int i;
+
+ for( i=0; i<ndevs; i++ ) {
+ struct net_device *dev = ewrk3_devs[i];
+ struct ewrk3_private *lp = netdev_priv(dev);
+ ewrk3_devs[i] = NULL;
+ unregister_netdev(dev);
+ release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
+ iounmap(lp->shmem);
+ free_netdev(dev);
+ }
+}
+
+static __init int ewrk3_init_module(void)
+{
+ int i=0;
+
+ while( io[i] && irq[i] ) {
+ struct net_device *dev
+ = alloc_etherdev(sizeof(struct ewrk3_private));
+
+ if (!dev)
+ break;
+
+ if (ewrk3_probe1(dev, io[i], irq[i]) != 0) {
+ free_netdev(dev);
+ break;
+ }
+
+ ewrk3_devs[ndevs++] = dev;
+ i++;
+ }
+
+ return ndevs ? 0 : -EIO;
+}
+
+
+/* Hack for breakage in new module stuff */
+module_exit(ewrk3_exit_module);
+module_init(ewrk3_init_module);
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
+ *
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
+ * End:
+ */
diff --git a/drivers/net/ewrk3.h b/drivers/net/ewrk3.h
new file mode 100644
index 000000000000..fb74bd053672
--- /dev/null
+++ b/drivers/net/ewrk3.h
@@ -0,0 +1,322 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** I/O Address Register Map
+*/
+#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
+#define EWRK3_CR iobase+0x01 /* Control Register */
+#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
+#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
+#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
+#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
+#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
+#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
+#define EWRK3_RQ iobase+0x08 /* Receive Queue */
+#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
+#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
+#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
+#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
+#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
+#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
+#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
+#define EWRK3_DATA iobase+0x10 /* Data Register */
+#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
+#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
+#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
+#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
+#define EWRK3_APROM iobase+0x15 /* Address PROM */
+#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
+#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
+#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
+#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
+#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
+#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
+#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
+#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
+#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
+
+/*
+** Control Page Map
+*/
+#define PAGE0_FMQ 0x000 /* Free Memory Queue */
+#define PAGE0_RQ 0x080 /* Receive Queue */
+#define PAGE0_TQ 0x100 /* Transmit Queue */
+#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
+#define PAGE0_HTE 0x200 /* Hash Table Entries */
+#define PAGE0_RSVD 0x240 /* RESERVED */
+#define PAGE0_USRD 0x600 /* User Data */
+
+/*
+** Control and Status Register bit definitions (EWRK3_CSR)
+*/
+#define CSR_RA 0x80 /* Runt Accept */
+#define CSR_PME 0x40 /* Promiscuous Mode Enable */
+#define CSR_MCE 0x20 /* Multicast Enable */
+#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
+#define CSR_RNE 0x04 /* RX Queue Not Empty */
+#define CSR_TXD 0x02 /* TX Disable */
+#define CSR_RXD 0x01 /* RX Disable */
+
+/*
+** Control Register bit definitions (EWRK3_CR)
+*/
+#define CR_APD 0x80 /* Auto Port Disable */
+#define CR_PSEL 0x40 /* Port Select (0->TP port) */
+#define CR_LBCK 0x20 /* LoopBaCK enable */
+#define CR_FDUP 0x10 /* Full DUPlex enable */
+#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
+#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
+#define CR_LED 0x02 /* LED (1-> turn on) */
+
+/*
+** Interrupt Control Register bit definitions (EWRK3_ICR)
+*/
+#define ICR_IE 0x80 /* Interrupt Enable */
+#define ICR_IS 0x60 /* Interrupt Selected */
+#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
+#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
+#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
+#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
+
+/*
+** Transmit Status Register bit definitions (EWRK3_TSR)
+*/
+#define TSR_NCL 0x80 /* No Carrier Loopback */
+#define TSR_ID 0x40 /* Initially Deferred */
+#define TSR_LCL 0x20 /* Late CoLlision */
+#define TSR_ECL 0x10 /* Excessive CoLlisions */
+#define TSR_RCNTR 0x0f /* Retries CouNTeR */
+
+/*
+** I/O Page Register bit definitions (EWRK3_IOPR)
+*/
+#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
+#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
+#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
+#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
+#define EEPROM_RD 0xe0 /* EEPROM READ command */
+
+/*
+** I/O Base Register bit definitions (EWRK3_IOBR)
+*/
+#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
+#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
+
+/*
+** I/O Configuration/Management Register bit definitions (EWRK3_CMR)
+*/
+#define CMR_RA 0x80 /* Read Ahead */
+#define CMR_WB 0x40 /* Write Behind */
+#define CMR_LINK 0x20 /* 0->TP */
+#define CMR_POLARITY 0x10 /* Informational */
+#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
+#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
+#define CMR_PNP 0x04 /* Plug 'n Play */
+#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
+#define CMR_0WS 0x01 /* Zero Wait State */
+
+/*
+** MAC Receive Status Register bit definitions
+*/
+
+#define R_ROK 0x80 /* Receive OK summary */
+#define R_IAM 0x10 /* Individual Address Match */
+#define R_MCM 0x08 /* MultiCast Match */
+#define R_DBE 0x04 /* Dribble Bit Error */
+#define R_CRC 0x02 /* CRC error */
+#define R_PLL 0x01 /* Phase Lock Lost */
+
+/*
+** MAC Transmit Control Register bit definitions
+*/
+
+#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
+#define TCR_SED 0x20 /* Stop when Error Detected */
+#define TCR_QMODE 0x10 /* Q_MODE */
+#define TCR_LAB 0x08 /* Less Aggressive Backoff */
+#define TCR_PAD 0x04 /* PAD Runt Packets */
+#define TCR_IFC 0x02 /* Insert Frame Check */
+#define TCR_ISA 0x01 /* Insert Source Address */
+
+/*
+** MAC Transmit Status Register bit definitions
+*/
+
+#define T_VSTS 0x80 /* Valid STatuS */
+#define T_CTU 0x40 /* Cut Through Used */
+#define T_SQE 0x20 /* Signal Quality Error */
+#define T_NCL 0x10 /* No Carrier Loopback */
+#define T_LCL 0x08 /* Late Collision */
+#define T_ID 0x04 /* Initially Deferred */
+#define T_COLL 0x03 /* COLLision status */
+#define T_XCOLL 0x03 /* Excessive Collisions */
+#define T_MCOLL 0x02 /* Multiple Collisions */
+#define T_OCOLL 0x01 /* One Collision */
+#define T_NOCOLL 0x00 /* No Collisions */
+#define T_XUR 0x03 /* Excessive Underruns */
+#define T_TXE 0x7f /* TX Errors */
+
+/*
+** EISA Configuration Register bit definitions
+*/
+
+#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
+
+/*
+** EEPROM BYTES
+*/
+#define EEPROM_MEMB 0x00
+#define EEPROM_IOB 0x01
+#define EEPROM_EISA_ID0 0x02
+#define EEPROM_EISA_ID1 0x03
+#define EEPROM_EISA_ID2 0x04
+#define EEPROM_EISA_ID3 0x05
+#define EEPROM_MISC0 0x06
+#define EEPROM_MISC1 0x07
+#define EEPROM_PNAME7 0x08
+#define EEPROM_PNAME6 0x09
+#define EEPROM_PNAME5 0x0a
+#define EEPROM_PNAME4 0x0b
+#define EEPROM_PNAME3 0x0c
+#define EEPROM_PNAME2 0x0d
+#define EEPROM_PNAME1 0x0e
+#define EEPROM_PNAME0 0x0f
+#define EEPROM_SWFLAGS 0x10
+#define EEPROM_HWCAT 0x11
+#define EEPROM_NETMAN2 0x12
+#define EEPROM_REVLVL 0x13
+#define EEPROM_NETMAN0 0x14
+#define EEPROM_NETMAN1 0x15
+#define EEPROM_CHIPVER 0x16
+#define EEPROM_SETUP 0x17
+#define EEPROM_PADDR0 0x18
+#define EEPROM_PADDR1 0x19
+#define EEPROM_PADDR2 0x1a
+#define EEPROM_PADDR3 0x1b
+#define EEPROM_PADDR4 0x1c
+#define EEPROM_PADDR5 0x1d
+#define EEPROM_PA_CRC 0x1e
+#define EEPROM_CHKSUM 0x1f
+
+/*
+** EEPROM bytes for checksumming
+*/
+#define EEPROM_MAX 32 /* bytes */
+
+/*
+** EEPROM MISCELLANEOUS FLAGS
+*/
+#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
+#define READ_AHEAD 0x0080 /* Read Ahead feature */
+#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
+#define IRQ_SEL 0x0060 /* IRQ line selection */
+#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
+#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
+#define WRITE_BEHIND 0x0002 /* Write Behind feature */
+#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
+
+/*
+** EEPROM NETWORK MANAGEMENT FLAGS
+*/
+#define NETMAN_POL 0x04 /* Polarity defeat */
+#define NETMAN_LINK 0x02 /* Link defeat */
+#define NETMAN_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM SW FLAGS
+*/
+#define SW_SQE 0x10 /* Signal Quality Error */
+#define SW_LAB 0x08 /* Less Aggressive Backoff */
+#define SW_INIT 0x04 /* Initialized */
+#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
+#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
+
+/*
+** EEPROM SETUP FLAGS
+*/
+#define SETUP_APD 0x80 /* AutoPort Disable */
+#define SETUP_PS 0x40 /* Port Select */
+#define SETUP_MP 0x20 /* MultiPort */
+#define SETUP_1TP 0x10 /* 1 port, TP */
+#define SETUP_1COAX 0x00 /* 1 port, Coax */
+#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
+
+/*
+** EEPROM MANAGEMENT FLAGS
+*/
+#define MGMT_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM VERSIONS
+*/
+#define LeMAC 0x11
+#define LeMAC2 0x12
+
+/*
+** Miscellaneous
+*/
+
+#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+
+#define HASH_TABLE_LEN 512 /* Bits */
+
+#define XCT 0x80 /* Transmit Cut Through */
+#define PRELOAD 16 /* 4 long words */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define EWRK3IOCTL SIOCDEVPRIVATE
+
+struct ewrk3_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
+#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
+#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
+#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
+#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
+#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
+#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
+#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
+#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
+#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
+#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
+#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
+#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
+#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
new file mode 100644
index 000000000000..d05e9dd1e140
--- /dev/null
+++ b/drivers/net/fealnx.c
@@ -0,0 +1,2005 @@
+/*
+ Written 1998-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/pci-skeleton.html
+
+ Linux kernel updates:
+
+ Version 2.51, Nov 17, 2001 (jgarzik):
+ - Add ethtool support
+ - Replace some MII-related magic numbers with constants
+
+*/
+
+#define DRV_NAME "fealnx"
+#define DRV_VERSION "2.51"
+#define DRV_RELDATE "Nov-17-2001"
+
+static int debug; /* 1-> print debug message */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
+/* Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc. */
+/* Both 'options[]' and 'full_duplex[]' should exist for driver */
+/* interoperability. */
+/* The media type is usually passed in 'options[]'. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+
+/* Operational parameters that are set at compile time. */
+/* Keep the ring sizes a power of two for compile efficiency. */
+/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
+/* Making the Tx ring too large decreases the effectiveness of channel */
+/* bonding and packet priority. */
+/* There are no ill effects from too-large receive rings. */
+// 88-12-9 modify,
+// #define TX_RING_SIZE 16
+// #define RX_RING_SIZE 32
+#define TX_RING_SIZE 6
+#define RX_RING_SIZE 12
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
+
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. */
+#ifndef __alpha__
+#define USE_IO_OPS
+#endif
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
+/* This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+MODULE_AUTHOR("Myson or whoever");
+MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
+MODULE_LICENSE("GPL");
+module_param(max_interrupt_work, int, 0);
+//MODULE_PARM(min_pci_latency, "i");
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
+MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
+
+#define MIN_REGION_SIZE 136
+
+enum pci_flags_bit {
+ PCI_USES_IO = 1,
+ PCI_USES_MEM = 2,
+ PCI_USES_MASTER = 4,
+ PCI_ADDR0 = 0x10 << 0,
+ PCI_ADDR1 = 0x10 << 1,
+ PCI_ADDR2 = 0x10 << 2,
+ PCI_ADDR3 = 0x10 << 3,
+};
+
+/* A chip capabilities table, matching the entries in pci_tbl[] above. */
+enum chip_capability_flags {
+ HAS_MII_XCVR,
+ HAS_CHIP_XCVR,
+};
+
+/* 89/6/13 add, */
+/* for different PHY */
+enum phy_type_flags {
+ MysonPHY = 1,
+ AhdocPHY = 2,
+ SeeqPHY = 3,
+ MarvellPHY = 4,
+ Myson981 = 5,
+ LevelOnePHY = 6,
+ OtherPHY = 10,
+};
+
+struct chip_info {
+ char *chip_name;
+ int io_size;
+ int flags;
+};
+
+static struct chip_info skel_netdrv_tbl[] = {
+ {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
+ {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR},
+ {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
+};
+
+/* Offsets to the Command and Status Registers. */
+enum fealnx_offsets {
+ PAR0 = 0x0, /* physical address 0-3 */
+ PAR1 = 0x04, /* physical address 4-5 */
+ MAR0 = 0x08, /* multicast address 0-3 */
+ MAR1 = 0x0C, /* multicast address 4-7 */
+ FAR0 = 0x10, /* flow-control address 0-3 */
+ FAR1 = 0x14, /* flow-control address 4-5 */
+ TCRRCR = 0x18, /* receive & transmit configuration */
+ BCR = 0x1C, /* bus command */
+ TXPDR = 0x20, /* transmit polling demand */
+ RXPDR = 0x24, /* receive polling demand */
+ RXCWP = 0x28, /* receive current word pointer */
+ TXLBA = 0x2C, /* transmit list base address */
+ RXLBA = 0x30, /* receive list base address */
+ ISR = 0x34, /* interrupt status */
+ IMR = 0x38, /* interrupt mask */
+ FTH = 0x3C, /* flow control high/low threshold */
+ MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
+ TALLY = 0x44, /* tally counters for crc and mpa */
+ TSR = 0x48, /* tally counter for transmit status */
+ BMCRSR = 0x4c, /* basic mode control and status */
+ PHYIDENTIFIER = 0x50, /* phy identifier */
+ ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
+ partner ability */
+ ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
+ BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+ RFCON = 0x00020000, /* receive flow control xon packet */
+ RFCOFF = 0x00010000, /* receive flow control xoff packet */
+ LSCStatus = 0x00008000, /* link status change */
+ ANCStatus = 0x00004000, /* autonegotiation completed */
+ FBE = 0x00002000, /* fatal bus error */
+ FBEMask = 0x00001800, /* mask bit12-11 */
+ ParityErr = 0x00000000, /* parity error */
+ TargetErr = 0x00001000, /* target abort */
+ MasterErr = 0x00000800, /* master error */
+ TUNF = 0x00000400, /* transmit underflow */
+ ROVF = 0x00000200, /* receive overflow */
+ ETI = 0x00000100, /* transmit early int */
+ ERI = 0x00000080, /* receive early int */
+ CNTOVF = 0x00000040, /* counter overflow */
+ RBU = 0x00000020, /* receive buffer unavailable */
+ TBU = 0x00000010, /* transmit buffer unavilable */
+ TI = 0x00000008, /* transmit interrupt */
+ RI = 0x00000004, /* receive interrupt */
+ RxErr = 0x00000002, /* receive error */
+};
+
+/* Bits in the NetworkConfig register, W for writing, R for reading */
+/* FIXME: some names are invented by me. Marked with (name?) */
+/* If you have docs and know bit names, please fix 'em */
+enum rx_mode_bits {
+ CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
+ CR_W_FD = 0x00100000, /* full duplex */
+ CR_W_PS10 = 0x00080000, /* 10 mbit */
+ CR_W_TXEN = 0x00040000, /* tx enable (name?) */
+ CR_W_PS1000 = 0x00010000, /* 1000 mbit */
+ /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
+ CR_W_RXMODEMASK = 0x000000e0,
+ CR_W_PROM = 0x00000080, /* promiscuous mode */
+ CR_W_AB = 0x00000040, /* accept broadcast */
+ CR_W_AM = 0x00000020, /* accept mutlicast */
+ CR_W_ARP = 0x00000008, /* receive runt pkt */
+ CR_W_ALP = 0x00000004, /* receive long pkt */
+ CR_W_SEP = 0x00000002, /* receive error pkt */
+ CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
+
+ CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
+ CR_R_FD = 0x00100000, /* full duplex detected */
+ CR_R_PS10 = 0x00080000, /* 10 mbit detected */
+ CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
+};
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct fealnx_desc {
+ s32 status;
+ s32 control;
+ u32 buffer;
+ u32 next_desc;
+ struct fealnx_desc *next_desc_logical;
+ struct sk_buff *skbuff;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+/* Bits in network_desc.status */
+enum rx_desc_status_bits {
+ RXOWN = 0x80000000, /* own bit */
+ FLNGMASK = 0x0fff0000, /* frame length */
+ FLNGShift = 16,
+ MARSTATUS = 0x00004000, /* multicast address received */
+ BARSTATUS = 0x00002000, /* broadcast address received */
+ PHYSTATUS = 0x00001000, /* physical address received */
+ RXFSD = 0x00000800, /* first descriptor */
+ RXLSD = 0x00000400, /* last descriptor */
+ ErrorSummary = 0x80, /* error summary */
+ RUNT = 0x40, /* runt packet received */
+ LONG = 0x20, /* long packet received */
+ FAE = 0x10, /* frame align error */
+ CRC = 0x08, /* crc error */
+ RXER = 0x04, /* receive error */
+};
+
+enum rx_desc_control_bits {
+ RXIC = 0x00800000, /* interrupt control */
+ RBSShift = 0,
+};
+
+enum tx_desc_status_bits {
+ TXOWN = 0x80000000, /* own bit */
+ JABTO = 0x00004000, /* jabber timeout */
+ CSL = 0x00002000, /* carrier sense lost */
+ LC = 0x00001000, /* late collision */
+ EC = 0x00000800, /* excessive collision */
+ UDF = 0x00000400, /* fifo underflow */
+ DFR = 0x00000200, /* deferred */
+ HF = 0x00000100, /* heartbeat fail */
+ NCRMask = 0x000000ff, /* collision retry count */
+ NCRShift = 0,
+};
+
+enum tx_desc_control_bits {
+ TXIC = 0x80000000, /* interrupt control */
+ ETIControl = 0x40000000, /* early transmit interrupt */
+ TXLD = 0x20000000, /* last descriptor */
+ TXFD = 0x10000000, /* first descriptor */
+ CRCEnable = 0x08000000, /* crc control */
+ PADEnable = 0x04000000, /* padding control */
+ RetryTxLC = 0x02000000, /* retry late collision */
+ PKTSMask = 0x3ff800, /* packet size bit21-11 */
+ PKTSShift = 11,
+ TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
+ TBSShift = 0,
+};
+
+/* BootROM/EEPROM/MII Management Register */
+#define MASK_MIIR_MII_READ 0x00000000
+#define MASK_MIIR_MII_WRITE 0x00000008
+#define MASK_MIIR_MII_MDO 0x00000004
+#define MASK_MIIR_MII_MDI 0x00000002
+#define MASK_MIIR_MII_MDC 0x00000001
+
+/* ST+OP+PHYAD+REGAD+TA */
+#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
+#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Myson PHY */
+/* ------------------------------------------------------------------------- */
+#define MysonPHYID 0xd0000302
+/* 89-7-27 add, (begin) */
+#define MysonPHYID0 0x0302
+#define StatusRegister 18
+#define SPEED100 0x0400 // bit10
+#define FULLMODE 0x0800 // bit11
+/* 89-7-27 add, (end) */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Seeq 80225 PHY */
+/* ------------------------------------------------------------------------- */
+#define SeeqPHYID0 0x0016
+
+#define MIIRegister18 18
+#define SPD_DET_100 0x80
+#define DPLX_DET_FULL 0x40
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Ahdoc 101 PHY */
+/* ------------------------------------------------------------------------- */
+#define AhdocPHYID0 0x0022
+
+#define DiagnosticReg 18
+#define DPLX_FULL 0x0800
+#define Speed_100 0x0400
+
+/* 89/6/13 add, */
+/* -------------------------------------------------------------------------- */
+/* Constants */
+/* -------------------------------------------------------------------------- */
+#define MarvellPHYID0 0x0141
+#define LevelOnePHYID0 0x0013
+
+#define MII1000BaseTControlReg 9
+#define MII1000BaseTStatusReg 10
+#define SpecificReg 17
+
+/* for 1000BaseT Control Register */
+#define PHYAbletoPerform1000FullDuplex 0x0200
+#define PHYAbletoPerform1000HalfDuplex 0x0100
+#define PHY1000AbilityMask 0x300
+
+// for phy specific status register, marvell phy.
+#define SpeedMask 0x0c000
+#define Speed_1000M 0x08000
+#define Speed_100M 0x4000
+#define Speed_10M 0
+#define Full_Duplex 0x2000
+
+// 89/12/29 add, for phy specific status register, levelone phy, (begin)
+#define LXT1000_100M 0x08000
+#define LXT1000_1000M 0x0c000
+#define LXT1000_Full 0x200
+// 89/12/29 add, for phy specific status register, levelone phy, (end)
+
+/* for 3-in-1 case, BMCRSR register */
+#define LinkIsUp2 0x00040000
+
+/* for PHY */
+#define LinkIsUp 0x0004
+
+
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct fealnx_desc *rx_ring;
+ struct fealnx_desc *tx_ring;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ spinlock_t lock;
+
+ struct net_device_stats stats;
+
+ /* Media monitoring timer. */
+ struct timer_list timer;
+
+ /* Reset timer */
+ struct timer_list reset_timer;
+ int reset_timer_armed;
+ unsigned long crvalue_sv;
+ unsigned long imrvalue_sv;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int flags;
+ struct pci_dev *pci_dev;
+ unsigned long crvalue;
+ unsigned long bcrvalue;
+ unsigned long imrvalue;
+ struct fealnx_desc *cur_rx;
+ struct fealnx_desc *lack_rxbuf;
+ int really_rx_count;
+ struct fealnx_desc *cur_tx;
+ struct fealnx_desc *cur_tx_copy;
+ int really_tx_count;
+ int free_tx_count;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int linkok;
+ unsigned int line_speed;
+ unsigned int duplexmode;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int PHYType;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ unsigned char phys[2]; /* MII device addresses. */
+ struct mii_if_info mii;
+ void __iomem *mem;
+};
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void getlinktype(struct net_device *dev);
+static void getlinkstatus(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void reset_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static int netdev_rx(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void __set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static int netdev_close(struct net_device *dev);
+static void reset_rx_descriptors(struct net_device *dev);
+static void reset_tx_descriptors(struct net_device *dev);
+
+static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
+ break;
+ }
+}
+
+
+static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
+ == (CR_R_RXSTOP+CR_R_TXSTOP) )
+ break;
+ }
+}
+
+
+static int __devinit fealnx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct netdev_private *np;
+ int i, option, err, irq;
+ static int card_idx = -1;
+ char boardname[12];
+ void __iomem *ioaddr;
+ unsigned long len;
+ unsigned int chip_id = ent->driver_data;
+ struct net_device *dev;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ card_idx++;
+ sprintf(boardname, "fealnx%d", card_idx);
+
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+ pci_set_master(pdev);
+
+ len = pci_resource_len(pdev, bar);
+ if (len < MIN_REGION_SIZE) {
+ printk(KERN_ERR "%s: region size %ld too small, aborting\n",
+ boardname, len);
+ return -ENODEV;
+ }
+
+ i = pci_request_regions(pdev, boardname);
+ if (i) return i;
+
+ irq = pdev->irq;
+
+ ioaddr = pci_iomap(pdev, bar, len);
+ if (!ioaddr) {
+ err = -ENOMEM;
+ goto err_out_res;
+ }
+
+ dev = alloc_etherdev(sizeof(struct netdev_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* read ethernet id */
+ for (i = 0; i < 6; ++i)
+ dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ /* Make certain the descriptor lists are aligned. */
+ np = netdev_priv(dev);
+ np->mem = ioaddr;
+ spin_lock_init(&np->lock);
+ np->pci_dev = pdev;
+ np->flags = skel_netdrv_tbl[chip_id].flags;
+ pci_set_drvdata(pdev, dev);
+ np->mii.dev = dev;
+ np->mii.mdio_read = mdio_read;
+ np->mii.mdio_write = mdio_write;
+ np->mii.phy_id_mask = 0x1f;
+ np->mii.reg_num_mask = 0x1f;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+ np->rx_ring = (struct fealnx_desc *)ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_rx;
+ }
+ np->tx_ring = (struct fealnx_desc *)ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ /* find the connected MII xcvrs */
+ if (np->flags == HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ printk(KERN_INFO
+ "%s: MII PHY found at address %d, status "
+ "0x%4.4x.\n", dev->name, phy, mii_status);
+ /* get phy type */
+ {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 2);
+ if (data == SeeqPHYID0)
+ np->PHYType = SeeqPHY;
+ else if (data == AhdocPHYID0)
+ np->PHYType = AhdocPHY;
+ else if (data == MarvellPHYID0)
+ np->PHYType = MarvellPHY;
+ else if (data == MysonPHYID0)
+ np->PHYType = Myson981;
+ else if (data == LevelOnePHYID0)
+ np->PHYType = LevelOnePHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ }
+ }
+
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0) {
+ printk(KERN_WARNING "%s: MII PHY not found -- this device may "
+ "not operate correctly.\n", dev->name);
+ }
+ } else {
+ np->phys[0] = 32;
+/* 89/6/23 add, (begin) */
+ /* get phy type */
+ if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
+ np->PHYType = MysonPHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ np->mii.phy_id = np->phys[0];
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->mii.full_duplex = 1;
+ np->default_port = option & 15;
+ }
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->mii.full_duplex = full_duplex[card_idx];
+
+ if (np->mii.full_duplex) {
+ printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+/* 89/6/13 add, (begin) */
+// if (np->PHYType==MarvellPHY)
+ if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 9);
+ data = (data & 0xfcff) | 0x0200;
+ mdio_write(dev, np->phys[0], 9, data);
+ }
+/* 89/6/13 add, (end) */
+ if (np->flags == HAS_MII_XCVR)
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+ else
+ iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
+ np->mii.force_media = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->tx_timeout = &tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out_free_tx;
+
+ printk(KERN_INFO "%s: %s at %p, ",
+ dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ return 0;
+
+err_out_free_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+err_out_free_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+ return err;
+}
+
+
+static void __devexit fealnx_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ unregister_netdev(dev);
+ pci_iounmap(pdev, np->mem);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ } else
+ printk(KERN_ERR "fealnx: remove for unknown device\n");
+}
+
+
+static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
+{
+ ulong miir;
+ int i;
+ unsigned int mask, data;
+
+ /* enable MII output */
+ miir = (ulong) ioread32(miiport);
+ miir &= 0xfffffff0;
+
+ miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
+
+ /* send 32 1's preamble */
+ for (i = 0; i < 32; i++) {
+ /* low MDC; MDO is already high (miir) */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ }
+
+ /* calculate ST+OP+PHYAD+REGAD+TA */
+ data = opcode | (phyad << 7) | (regad << 2);
+
+ /* sent out */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+
+ iowrite32(miir, miiport);
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ if (mask == 0x2 && opcode == OP_READ)
+ miir &= ~MASK_MIIR_MII_WRITE;
+ }
+ return miir;
+}
+
+
+static int mdio_read(struct net_device *dev, int phyad, int regad)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask, data;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
+
+ /* read data */
+ mask = 0x8000;
+ data = 0;
+ while (mask) {
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* read MDI */
+ miir = ioread32(miiport);
+ if (miir & MASK_MIIR_MII_MDI)
+ data |= mask;
+
+ /* high MDC, and wait */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ return data & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
+
+ /* write data */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int i;
+
+ iowrite32(0x00000001, ioaddr + BCR); /* Reset */
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
+ return -EAGAIN;
+
+ for (i = 0; i < 3; i++)
+ iowrite16(((unsigned short*)dev->dev_addr)[i],
+ ioaddr + PAR0 + i*2);
+
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
+ iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword burst.
+ 586: no burst limit.
+ Burst length 5:3
+ 0 0 0 1
+ 0 0 1 4
+ 0 1 0 8
+ 0 1 1 16
+ 1 0 0 32
+ 1 0 1 64
+ 1 1 0 128
+ 1 1 1 256
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list.
+ FIXME (Ueimor): optimistic for alpha + posted writes ? */
+#if defined(__powerpc__) || defined(__sparc__)
+// 89/9/1 modify,
+// np->bcrvalue=0x04 | 0x0x38; /* big-endian, 256 burst length */
+ np->bcrvalue = 0x04 | 0x10; /* big-endian, tx 8 burst length */
+ np->crvalue = 0xe00; /* rx 128 burst length */
+#elif defined(__alpha__) || defined(__x86_64__)
+// 89/9/1 modify,
+// np->bcrvalue=0x38; /* little-endian, 256 burst length */
+ np->bcrvalue = 0x10; /* little-endian, 8 burst length */
+ np->crvalue = 0xe00; /* rx 128 burst length */
+#elif defined(__i386__)
+#if defined(MODULE)
+// 89/9/1 modify,
+// np->bcrvalue=0x38; /* little-endian, 256 burst length */
+ np->bcrvalue = 0x10; /* little-endian, 8 burst length */
+ np->crvalue = 0xe00; /* rx 128 burst length */
+#else
+ /* When not a module we can work around broken '486 PCI boards. */
+#define x86 boot_cpu_data.x86
+// 89/9/1 modify,
+// np->bcrvalue=(x86 <= 4 ? 0x10 : 0x38);
+ np->bcrvalue = 0x10;
+ np->crvalue = (x86 <= 4 ? 0xa00 : 0xe00);
+ if (x86 <= 4)
+ printk(KERN_INFO "%s: This is a 386/486 PCI system, setting burst "
+ "length to %x.\n", dev->name, (x86 <= 4 ? 0x10 : 0x38));
+#endif
+#else
+// 89/9/1 modify,
+// np->bcrvalue=0x38;
+ np->bcrvalue = 0x10;
+ np->crvalue = 0xe00; /* rx 128 burst length */
+#warning Processor architecture undefined!
+#endif
+// 89/12/29 add,
+// 90/1/16 modify,
+// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
+ np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
+ if (np->pci_dev->device == 0x891) {
+ np->bcrvalue |= 0x200; /* set PROG bit */
+ np->crvalue |= CR_W_ENH; /* set enhanced bit */
+ np->imrvalue |= ETI;
+ }
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ iowrite32(0, ioaddr + RXPDR);
+// 89/9/1 modify,
+// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->mii.full_duplex = np->mii.force_media;
+ getlinkstatus(dev);
+ if (np->linkok)
+ getlinktype(dev);
+ __set_rx_mode(dev);
+
+ netif_start_queue(dev);
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = RUN_AT(3 * HZ);
+ np->timer.data = (unsigned long) dev;
+ np->timer.function = &netdev_timer;
+
+ /* timer handler */
+ add_timer(&np->timer);
+
+ init_timer(&np->reset_timer);
+ np->reset_timer.data = (unsigned long) dev;
+ np->reset_timer.function = &reset_timer;
+ np->reset_timer_armed = 0;
+
+ return 0;
+}
+
+
+static void getlinkstatus(struct net_device *dev)
+/* function: Routine will read MII Status Register to get link status. */
+/* input : dev... pointer to the adapter block. */
+/* output : none. */
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int i, DelayTime = 0x1000;
+
+ np->linkok = 0;
+
+ if (np->PHYType == MysonPHY) {
+ for (i = 0; i < DelayTime; ++i) {
+ if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ } else {
+ for (i = 0; i < DelayTime; ++i) {
+ if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ }
+}
+
+
+static void getlinktype(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (np->PHYType == MysonPHY) { /* 3-in-1 case */
+ if (ioread32(np->mem + TCRRCR) & CR_R_FD)
+ np->duplexmode = 2; /* full duplex */
+ else
+ np->duplexmode = 1; /* half duplex */
+ if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
+ np->line_speed = 1; /* 10M */
+ else
+ np->line_speed = 2; /* 100M */
+ } else {
+ if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], MIIRegister18);
+ if (data & SPD_DET_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_DET_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ } else if (np->PHYType == AhdocPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], DiagnosticReg);
+ if (data & Speed_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ }
+/* 89/6/13 add, (begin) */
+ else if (np->PHYType == MarvellPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & Full_Duplex)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == Speed_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == Speed_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+/* 89/6/13 add, (end) */
+/* 89/7/27 add, (begin) */
+ else if (np->PHYType == Myson981) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], StatusRegister);
+
+ if (data & SPEED100)
+ np->line_speed = 2;
+ else
+ np->line_speed = 1;
+
+ if (data & FULLMODE)
+ np->duplexmode = 2;
+ else
+ np->duplexmode = 1;
+ }
+/* 89/7/27 add, (end) */
+/* 89/12/29 add */
+ else if (np->PHYType == LevelOnePHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & LXT1000_Full)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == LXT1000_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == LXT1000_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+ np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
+ if (np->line_speed == 1)
+ np->crvalue |= CR_W_PS10;
+ else if (np->line_speed == 3)
+ np->crvalue |= CR_W_PS1000;
+ if (np->duplexmode == 2)
+ np->crvalue |= CR_W_FD;
+ }
+}
+
+
+/* Take lock before calling this */
+static void allocate_rx_buffers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* allocate skb for rx buffers */
+ while (np->really_rx_count != RX_RING_SIZE) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Better luck next round. */
+
+ while (np->lack_rxbuf->skbuff)
+ np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
+
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->lack_rxbuf->skbuff = skb;
+ np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->lack_rxbuf->status = RXOWN;
+ ++np->really_rx_count;
+ }
+}
+
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int old_crvalue = np->crvalue;
+ unsigned int old_linkok = np->linkok;
+ unsigned long flags;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+ "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
+ ioread32(ioaddr + TCRRCR));
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (np->flags == HAS_MII_XCVR) {
+ getlinkstatus(dev);
+ if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
+ getlinktype(dev);
+ if (np->crvalue != old_crvalue) {
+ stop_nic_rxtx(ioaddr, np->crvalue);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+ }
+
+ allocate_rx_buffers(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ np->timer.expires = RUN_AT(10 * HZ);
+ add_timer(&np->timer);
+}
+
+
+/* Take lock before calling */
+/* Reset chip and disable rx, tx and interrupts */
+static void reset_and_disable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int delay=51;
+
+ /* Reset the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0, ioaddr + IMR);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
+ We surely wait too long (address+data phase). Who cares? */
+ while (--delay) {
+ ioread32(ioaddr + BCR);
+ rmb();
+ }
+}
+
+
+/* Take lock before calling */
+/* Restore chip after reset */
+static void enable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ reset_rx_descriptors(dev);
+
+ iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
+ ioaddr + TXLBA);
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ ioaddr + RXLBA);
+
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ iowrite32(0, ioaddr + RXPDR);
+ __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ iowrite32(0, ioaddr + TXPDR);
+}
+
+
+static void reset_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
+
+ spin_lock_irqsave(&np->lock, flags);
+ np->crvalue = np->crvalue_sv;
+ np->imrvalue = np->imrvalue_sv;
+
+ reset_and_disable_rxtx(dev);
+ /* works for me without this:
+ reset_tx_descriptors(dev); */
+ enable_rxtx(dev);
+ netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
+
+ np->reset_timer_armed = 0;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+}
+
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ unsigned long flags;
+ int i;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, ioread32(ioaddr + ISR));
+
+ {
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int) np->rx_ring[i].status);
+ printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].status);
+ printk("\n");
+ }
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ reset_and_disable_rxtx(dev);
+ reset_tx_descriptors(dev);
+ enable_rxtx(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ netif_wake_queue(dev); /* or .._start_.. ?? */
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* initialize rx variables */
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ np->cur_rx = &np->rx_ring[0];
+ np->lack_rxbuf = np->rx_ring;
+ np->really_rx_count = 0;
+
+ /* initial rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
+ np->rx_ring[i].next_desc = np->rx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
+ np->rx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last rx descriptor */
+ np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
+ np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
+
+ /* allocate skb for rx buffers */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+
+ if (skb == NULL) {
+ np->lack_rxbuf = &np->rx_ring[i];
+ break;
+ }
+
+ ++np->really_rx_count;
+ np->rx_ring[i].skbuff = skb;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_ring[i].status = RXOWN;
+ np->rx_ring[i].control |= RXIC;
+ }
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].status = 0;
+ /* do we need np->tx_ring[i].control = XXX; ?? */
+ np->tx_ring[i].next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
+ np->tx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last tx descriptor */
+ np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ np->cur_tx_copy->skbuff = skb;
+
+#define one_buffer
+#define BPT 1022
+#if defined(one_buffer)
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+#elif defined(two_buffer)
+ if (skb->len > BPT) {
+ struct fealnx_desc *next;
+
+ /* for the first descriptor */
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ BPT, PCI_DMA_TODEVICE);
+ np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
+
+ /* for the last descriptor */
+ next = np->cur_tx_copy->next_desc_logical;
+ next->skbuff = skb;
+ next->control = TXIC | TXLD | CRCEnable | PADEnable;
+ next->control |= (skb->len << PKTSShift); /* pkt size */
+ next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
+ skb->len - BPT, PCI_DMA_TODEVICE);
+
+ next->status = TXOWN;
+ np->cur_tx_copy->status = TXOWN;
+
+ np->cur_tx_copy = next->next_desc_logical;
+ np->free_tx_count -= 2;
+ } else {
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+ }
+#endif
+
+ if (np->free_tx_count < 2)
+ netif_stop_queue(dev);
+ ++np->really_tx_count;
+ iowrite32(0, np->mem + TXPDR);
+ dev->trans_start = jiffies;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+ return 0;
+}
+
+
+/* Take lock before calling */
+/* Chip probably hosed tx ring. Clean up. */
+static void reset_tx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur;
+ int i;
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ cur = &np->tx_ring[i];
+ if (cur->skbuff) {
+ pci_unmap_single(np->pci_dev, cur->buffer,
+ cur->skbuff->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(cur->skbuff);
+ /* or dev_kfree_skb_irq(cur->skbuff); ? */
+ cur->skbuff = NULL;
+ }
+ cur->status = 0;
+ cur->control = 0; /* needed? */
+ /* probably not needed. We do it for purely paranoid reasons */
+ cur->next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ cur->next_desc_logical = &np->tx_ring[i + 1];
+ }
+ /* for the last tx descriptor */
+ np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+/* Take lock and stop rx before calling this */
+static void reset_rx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur = np->cur_rx;
+ int i;
+
+ allocate_rx_buffers(dev);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (cur->skbuff)
+ cur->status = RXOWN;
+ cur = cur->next_desc_logical;
+ }
+
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ np->mem + RXLBA);
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ long boguscnt = max_interrupt_work;
+ unsigned int num_tx = 0;
+ int handled = 0;
+
+ spin_lock(&np->lock);
+
+ iowrite32(0, ioaddr + IMR);
+
+ do {
+ u32 intr_status = ioread32(ioaddr + ISR);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(intr_status, ioaddr + ISR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
+ intr_status);
+
+ if (!(intr_status & np->imrvalue))
+ break;
+
+ handled = 1;
+
+// 90/1/16 delete,
+//
+// if (intr_status & FBE)
+// { /* fatal error */
+// stop_nic_tx(ioaddr, 0);
+// stop_nic_rx(ioaddr, 0);
+// break;
+// };
+
+ if (intr_status & TUNF)
+ iowrite32(0, ioaddr + TXPDR);
+
+ if (intr_status & CNTOVF) {
+ /* missed pkts */
+ np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ np->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ if (intr_status & (RI | RBU)) {
+ if (intr_status & RI)
+ netdev_rx(dev);
+ else {
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+
+ while (np->really_tx_count) {
+ long tx_status = np->cur_tx->status;
+ long tx_control = np->cur_tx->control;
+
+ if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
+ struct fealnx_desc *next;
+
+ next = np->cur_tx->next_desc_logical;
+ tx_status = next->status;
+ tx_control = next->control;
+ }
+
+ if (tx_status & TXOWN)
+ break;
+
+ if (!(np->crvalue & CR_W_ENH)) {
+ if (tx_status & (CSL | LC | EC | UDF | HF)) {
+ np->stats.tx_errors++;
+ if (tx_status & EC)
+ np->stats.tx_aborted_errors++;
+ if (tx_status & CSL)
+ np->stats.tx_carrier_errors++;
+ if (tx_status & LC)
+ np->stats.tx_window_errors++;
+ if (tx_status & UDF)
+ np->stats.tx_fifo_errors++;
+ if ((tx_status & HF) && np->mii.full_duplex == 0)
+ np->stats.tx_heartbeat_errors++;
+
+ } else {
+ np->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+
+ np->stats.collisions +=
+ ((tx_status & NCRMask) >> NCRShift);
+ np->stats.tx_packets++;
+ }
+ } else {
+ np->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+ np->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
+ np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(np->cur_tx->skbuff);
+ np->cur_tx->skbuff = NULL;
+ --np->really_tx_count;
+ if (np->cur_tx->control & TXLD) {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ ++np->free_tx_count;
+ } else {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->free_tx_count += 2;
+ }
+ num_tx++;
+ } /* end of for loop */
+
+ if (num_tx && np->free_tx_count >= 2)
+ netif_wake_queue(dev);
+
+ /* read transmit status for enhanced mode only */
+ if (np->crvalue & CR_W_ENH) {
+ long data;
+
+ data = ioread32(ioaddr + TSR);
+ np->stats.tx_errors += (data & 0xff000000) >> 24;
+ np->stats.tx_aborted_errors += (data & 0xff000000) >> 24;
+ np->stats.tx_window_errors += (data & 0x00ff0000) >> 16;
+ np->stats.collisions += (data & 0x0000ffff);
+ }
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n", dev->name, intr_status);
+ if (!np->reset_timer_armed) {
+ np->reset_timer_armed = 1;
+ np->reset_timer.expires = RUN_AT(HZ/2);
+ add_timer(&np->reset_timer);
+ stop_nic_rxtx(ioaddr, 0);
+ netif_stop_queue(dev);
+ /* or netif_tx_disable(dev); ?? */
+ /* Prevent other paths from enabling tx,rx,intrs */
+ np->crvalue_sv = np->crvalue;
+ np->imrvalue_sv = np->imrvalue;
+ np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
+ np->imrvalue = 0;
+ }
+
+ break;
+ }
+ } while (1);
+
+ /* read the tally counters */
+ /* missed pkts */
+ np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread32(ioaddr + ISR));
+
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ spin_unlock(&np->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
+ s32 rx_status = np->cur_rx->status;
+
+ if (np->really_rx_count == 0)
+ break;
+
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
+
+ if ((!((rx_status & RXFSD) && (rx_status & RXLSD)))
+ || (rx_status & ErrorSummary)) {
+ if (rx_status & ErrorSummary) { /* there was a fatal error */
+ if (debug)
+ printk(KERN_DEBUG
+ "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, rx_status);
+
+ np->stats.rx_errors++; /* end of a packet. */
+ if (rx_status & (LONG | RUNT))
+ np->stats.rx_length_errors++;
+ if (rx_status & RXER)
+ np->stats.rx_frame_errors++;
+ if (rx_status & CRC)
+ np->stats.rx_crc_errors++;
+ } else {
+ int need_to_reset = 0;
+ int desno = 0;
+
+ if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
+ struct fealnx_desc *cur;
+
+ /* check this packet is received completely? */
+ cur = np->cur_rx;
+ while (desno <= np->really_rx_count) {
+ ++desno;
+ if ((!(cur->status & RXOWN))
+ && (cur->status & RXLSD))
+ break;
+ /* goto next rx descriptor */
+ cur = cur->next_desc_logical;
+ }
+ if (desno > np->really_rx_count)
+ need_to_reset = 1;
+ } else /* RXLSD did not find, something error */
+ need_to_reset = 1;
+
+ if (need_to_reset == 0) {
+ int i;
+
+ np->stats.rx_length_errors++;
+
+ /* free all rx descriptors related this long pkt */
+ for (i = 0; i < desno; ++i) {
+ if (!np->cur_rx->skbuff) {
+ printk(KERN_DEBUG
+ "%s: I'm scared\n", dev->name);
+ break;
+ }
+ np->cur_rx->status = RXOWN;
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ }
+ continue;
+ } else { /* rx error, need to reset this chip */
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ break; /* exit the while loop */
+ }
+ } else { /* this received pkt is ok */
+
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
+
+#ifndef final_version
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " status %x.\n", pkt_len, rx_status);
+#endif
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(np->pci_dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ /* Call copy + cksum if available. */
+
+#if ! defined(__alpha__)
+ eth_copy_and_sum(skb,
+ np->cur_rx->skbuff->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ np->cur_rx->skbuff->tail, pkt_len);
+#endif
+ pci_dma_sync_single_for_device(np->pci_dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(np->pci_dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->cur_rx->skbuff, pkt_len);
+ np->cur_rx->skbuff = NULL;
+ --np->really_rx_count;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += pkt_len;
+ }
+
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ } /* end of while loop */
+
+ /* allocate skb for rx buffers */
+ allocate_rx_buffers(dev);
+
+ return 0;
+}
+
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* The chip only need report frame silently dropped. */
+ if (netif_running(dev)) {
+ np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+ np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ return &np->stats;
+}
+
+
+/* for dev->set_multicast_list */
+static void set_rx_mode(struct net_device *dev)
+{
+ spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
+ unsigned long flags;
+ spin_lock_irqsave(lp, flags);
+ __set_rx_mode(dev);
+ spin_unlock_irqrestore(lp, flags);
+}
+
+
+/* Take lock before calling */
+static void __set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_AB | CR_W_AM;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ unsigned int bit;
+ bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
+ mc_filter[bit >> 5] |= (1 << bit);
+ }
+ rx_mode = CR_W_AB | CR_W_AM;
+ }
+
+ stop_nic_rxtx(ioaddr, np->crvalue);
+
+ iowrite32(mc_filter[0], ioaddr + MAR0);
+ iowrite32(mc_filter[1], ioaddr + MAR1);
+ np->crvalue &= ~CR_W_RXMODEMASK;
+ np->crvalue |= rx_mode;
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_gset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_sset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_sg = ethtool_op_get_sg,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+};
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int i;
+
+ netif_stop_queue(dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0x0000, ioaddr + IMR);
+
+ /* Stop the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ del_timer_sync(&np->timer);
+ del_timer_sync(&np->reset_timer);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->rx_ring[i].skbuff;
+
+ np->rx_ring[i].status = 0;
+ if (skb) {
+ pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ np->rx_ring[i].skbuff = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->tx_ring[i].skbuff;
+
+ if (skb) {
+ pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ np->tx_ring[i].skbuff = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static struct pci_device_id fealnx_pci_tbl[] = {
+ {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ {} /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
+
+
+static struct pci_driver fealnx_driver = {
+ .name = "fealnx",
+ .id_table = fealnx_pci_tbl,
+ .probe = fealnx_init_one,
+ .remove = __devexit_p(fealnx_remove_one),
+};
+
+static int __init fealnx_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+
+ return pci_module_init(&fealnx_driver);
+}
+
+static void __exit fealnx_exit(void)
+{
+ pci_unregister_driver(&fealnx_driver);
+}
+
+module_init(fealnx_init);
+module_exit(fealnx_exit);
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
new file mode 100644
index 000000000000..2c7008491378
--- /dev/null
+++ b/drivers/net/fec.c
@@ -0,0 +1,2259 @@
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * This version of the driver is specific to the FADS implementation,
+ * since the board contains control registers external to the processor
+ * for the control of the LevelOne LXT970 transceiver. The MPC860T manual
+ * describes connections using the internal parallel port I/O, which
+ * is basically all of Port D.
+ *
+ * Right now, I am very watseful with the buffers. I allocate memory
+ * pages and then divide them into 2K frame buffers. This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Support for FEC controller of ColdFire/5270/5271/5272/5274/5275/5280/5282.
+ * Copyrught (c) 2001-2004 Greg Ungerer (gerg@snapgear.com)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#if defined(CONFIG_M527x) || defined(CONFIG_M5272) || defined(CONFIG_M528x)
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include "fec.h"
+#else
+#include <asm/8xx_immap.h>
+#include <asm/mpc8xx.h>
+#include "commproc.h"
+#endif
+
+#if defined(CONFIG_FEC2)
+#define FEC_MAX_PORTS 2
+#else
+#define FEC_MAX_PORTS 1
+#endif
+
+/*
+ * Define the fixed address of the FEC hardware.
+ */
+static unsigned int fec_hw[] = {
+#if defined(CONFIG_M5272)
+ (MCF_MBAR + 0x840),
+#elif defined(CONFIG_M527x)
+ (MCF_MBAR + 0x1000),
+ (MCF_MBAR + 0x1800),
+#elif defined(CONFIG_M528x)
+ (MCF_MBAR + 0x1000),
+#else
+ &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec),
+#endif
+};
+
+static unsigned char fec_mac_default[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/*
+ * Some hardware gets it MAC address out of local flash memory.
+ * if this is non-zero then assume it is the address to get MAC from.
+ */
+#if defined(CONFIG_NETtel)
+#define FEC_FLASHMAC 0xf0006006
+#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
+#define FEC_FLASHMAC 0xf0006000
+#elif defined (CONFIG_MTD_KeyTechnology)
+#define FEC_FLASHMAC 0xffe04000
+#elif defined(CONFIG_CANCam)
+#define FEC_FLASHMAC 0xf0020000
+#else
+#define FEC_FLASHMAC 0
+#endif
+
+unsigned char *fec_flashmac = (unsigned char *) FEC_FLASHMAC;
+
+/* Forward declarations of some structures to support different PHYs
+*/
+
+typedef struct {
+ uint mii_data;
+ void (*funct)(uint mii_reg, struct net_device *dev);
+} phy_cmd_t;
+
+typedef struct {
+ uint id;
+ char *name;
+
+ const phy_cmd_t *config;
+ const phy_cmd_t *startup;
+ const phy_cmd_t *ack_int;
+ const phy_cmd_t *shutdown;
+} phy_info_t;
+
+/* The number of Tx and Rx buffers. These are allocated from the page
+ * pool. The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter. We just use
+ * the skbuffer directly.
+ */
+#define FEC_ENET_RX_PAGES 8
+#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE 2048
+#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE 16 /* Must be power of two */
+#define TX_RING_MOD_MASK 15 /* for this to work */
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
+#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
+#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
+#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
+#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
+#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
+#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
+#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE 1518
+#define PKT_MINBUF_SIZE 64
+#define PKT_MAXBLR_SIZE 1520
+
+
+/*
+ * The 5270/5271/5280/5282 RX control register also contains maximum frame
+ * size bits. Other FEC hardware does not, so we need to take that into
+ * account when setting it.
+ */
+#if defined(CONFIG_M527x) || defined(CONFIG_M528x)
+#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#else
+#define OPT_FRAME_SIZE 0
+#endif
+
+/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+ /* Hardware registers of the FEC device */
+ volatile fec_t *hwp;
+
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ ushort skb_cur;
+ ushort skb_dirty;
+
+ /* CPM dual port RAM relative addresses.
+ */
+ cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
+ cbd_t *tx_bd_base;
+ cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
+ cbd_t *dirty_tx; /* The ring entries to be free()ed. */
+ struct net_device_stats stats;
+ uint tx_full;
+ spinlock_t lock;
+
+ uint phy_id;
+ uint phy_id_done;
+ uint phy_status;
+ uint phy_speed;
+ phy_info_t *phy;
+ struct work_struct phy_task;
+
+ uint sequence_done;
+ uint mii_phy_task_queued;
+
+ uint phy_addr;
+
+ int index;
+ int opened;
+ int link;
+ int old_link;
+ int full_duplex;
+ unsigned char mac_addr[ETH_ALEN];
+};
+
+static int fec_enet_open(struct net_device *dev);
+static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void fec_enet_mii(struct net_device *dev);
+static irqreturn_t fec_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs);
+static void fec_enet_tx(struct net_device *dev);
+static void fec_enet_rx(struct net_device *dev);
+static int fec_enet_close(struct net_device *dev);
+static struct net_device_stats *fec_enet_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void fec_restart(struct net_device *dev, int duplex);
+static void fec_stop(struct net_device *dev);
+static void fec_set_mac_address(struct net_device *dev);
+
+
+/* MII processing. We keep this as simple as possible. Requests are
+ * placed on the list (if there is room). When the request is finished
+ * by the MII, an optional function may be called.
+ */
+typedef struct mii_list {
+ uint mii_regval;
+ void (*mii_func)(uint val, struct net_device *dev);
+ struct mii_list *mii_next;
+} mii_list_t;
+
+#define NMII 20
+mii_list_t mii_cmds[NMII];
+mii_list_t *mii_free;
+mii_list_t *mii_head;
+mii_list_t *mii_tail;
+
+static int mii_queue(struct net_device *dev, int request,
+ void (*func)(uint, struct net_device *));
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
+ (VAL & 0xffff))
+#define mk_mii_end 0
+
+/* Transmitter timeout.
+*/
+#define TX_TIMEOUT (2*HZ)
+
+/* Register definitions for the PHY.
+*/
+
+#define MII_REG_CR 0 /* Control Register */
+#define MII_REG_SR 1 /* Status Register */
+#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
+#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
+#define MII_REG_ANAR 4 /* A-N Advertisement Register */
+#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
+#define MII_REG_ANER 6 /* A-N Expansion Register */
+#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
+#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
+
+/* values for phy_status */
+
+#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
+#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
+#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
+#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
+#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
+#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
+#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
+#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
+
+
+static int
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ volatile fec_t *fecp;
+ volatile cbd_t *bdp;
+
+ fep = netdev_priv(dev);
+ fecp = (volatile fec_t*)dev->base_addr;
+
+ if (!fep->link) {
+ /* Link is down or autonegotiation is in progress. */
+ return 1;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = fep->cur_tx;
+
+#ifndef final_version
+ if (bdp->cbd_sc & BD_ENET_TX_READY) {
+ /* Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since dev->tbusy should be set.
+ */
+ printk("%s: tx queue full!.\n", dev->name);
+ return 1;
+ }
+#endif
+
+ /* Clear all of the status flags.
+ */
+ bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+
+ /* Set buffer length and buffer pointer.
+ */
+ bdp->cbd_bufaddr = __pa(skb->data);
+ bdp->cbd_datlen = skb->len;
+
+ /*
+ * On some FEC implementations data must be aligned on
+ * 4-byte boundaries. Use bounce buffers to copy data
+ * and get it aligned. Ugh.
+ */
+ if (bdp->cbd_bufaddr & 0x3) {
+ unsigned int index;
+ index = bdp - fep->tx_bd_base;
+ memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen);
+ bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
+ }
+
+ /* Save skb pointer.
+ */
+ fep->tx_skbuff[fep->skb_cur] = skb;
+
+ fep->stats.tx_bytes += skb->len;
+ fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+
+ /* Push the data cache so the CPM does not get stale memory
+ * data.
+ */
+ flush_dcache_range((unsigned long)skb->data,
+ (unsigned long)skb->data + skb->len);
+
+ spin_lock_irq(&fep->lock);
+
+ /* Send it on its way. Tell FEC its ready, interrupt when done,
+ * its the last BD of the frame, and to put the CRC on the end.
+ */
+
+ bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+
+ dev->trans_start = jiffies;
+
+ /* Trigger transmission start */
+ fecp->fec_x_des_active = 0x01000000;
+
+ /* If this was the last BD in the ring, start at the beginning again.
+ */
+ if (bdp->cbd_sc & BD_ENET_TX_WRAP) {
+ bdp = fep->tx_bd_base;
+ } else {
+ bdp++;
+ }
+
+ if (bdp == fep->dirty_tx) {
+ fep->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+
+ fep->cur_tx = (cbd_t *)bdp;
+
+ spin_unlock_irq(&fep->lock);
+
+ return 0;
+}
+
+static void
+fec_timeout(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ printk("%s: transmit timed out.\n", dev->name);
+ fep->stats.tx_errors++;
+#ifndef final_version
+ {
+ int i;
+ cbd_t *bdp;
+
+ printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n",
+ (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "",
+ (unsigned long)fep->dirty_tx,
+ (unsigned long)fep->cur_rx);
+
+ bdp = fep->tx_bd_base;
+ printk(" tx: %u buffers\n", TX_RING_SIZE);
+ for (i = 0 ; i < TX_RING_SIZE; i++) {
+ printk(" %08x: %04x %04x %08x\n",
+ (uint) bdp,
+ bdp->cbd_sc,
+ bdp->cbd_datlen,
+ (int) bdp->cbd_bufaddr);
+ bdp++;
+ }
+
+ bdp = fep->rx_bd_base;
+ printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE);
+ for (i = 0 ; i < RX_RING_SIZE; i++) {
+ printk(" %08x: %04x %04x %08x\n",
+ (uint) bdp,
+ bdp->cbd_sc,
+ bdp->cbd_datlen,
+ (int) bdp->cbd_bufaddr);
+ bdp++;
+ }
+ }
+#endif
+ fec_restart(dev, 0);
+ netif_wake_queue(dev);
+}
+
+/* The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static irqreturn_t
+fec_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ volatile fec_t *fecp;
+ uint int_events;
+ int handled = 0;
+
+ fecp = (volatile fec_t*)dev->base_addr;
+
+ /* Get the interrupt events that caused us to be here.
+ */
+ while ((int_events = fecp->fec_ievent) != 0) {
+ fecp->fec_ievent = int_events;
+
+ /* Handle receive event in its own function.
+ */
+ if (int_events & FEC_ENET_RXF) {
+ handled = 1;
+ fec_enet_rx(dev);
+ }
+
+ /* Transmit OK, or non-fatal error. Update the buffer
+ descriptors. FEC handles all errors, we just discover
+ them as part of the transmit process.
+ */
+ if (int_events & FEC_ENET_TXF) {
+ handled = 1;
+ fec_enet_tx(dev);
+ }
+
+ if (int_events & FEC_ENET_MII) {
+ handled = 1;
+ fec_enet_mii(dev);
+ }
+
+ }
+ return IRQ_RETVAL(handled);
+}
+
+
+static void
+fec_enet_tx(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ volatile cbd_t *bdp;
+ struct sk_buff *skb;
+
+ fep = netdev_priv(dev);
+ spin_lock(&fep->lock);
+ bdp = fep->dirty_tx;
+
+ while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) {
+ if (bdp == fep->cur_tx && fep->tx_full == 0) break;
+
+ skb = fep->tx_skbuff[fep->skb_dirty];
+ /* Check for errors. */
+ if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN |
+ BD_ENET_TX_CSL)) {
+ fep->stats.tx_errors++;
+ if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+ } else {
+ fep->stats.tx_packets++;
+ }
+
+#ifndef final_version
+ if (bdp->cbd_sc & BD_ENET_TX_READY)
+ printk("HEY! Enet xmit interrupt and TX_READY.\n");
+#endif
+ /* Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (bdp->cbd_sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /* Free the sk buffer associated with this last transmit.
+ */
+ dev_kfree_skb_any(skb);
+ fep->tx_skbuff[fep->skb_dirty] = NULL;
+ fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+ /* Update pointer to next buffer descriptor to be transmitted.
+ */
+ if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+ bdp = fep->tx_bd_base;
+ else
+ bdp++;
+
+ /* Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (fep->tx_full) {
+ fep->tx_full = 0;
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
+ }
+ fep->dirty_tx = (cbd_t *)bdp;
+ spin_unlock(&fep->lock);
+}
+
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static void
+fec_enet_rx(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ volatile fec_t *fecp;
+ volatile cbd_t *bdp;
+ struct sk_buff *skb;
+ ushort pkt_len;
+ __u8 *data;
+
+ fep = netdev_priv(dev);
+ fecp = (volatile fec_t*)dev->base_addr;
+
+ /* First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
+
+#ifndef final_version
+ /* Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0)
+ printk("FEC ENET: rcv is not +last\n");
+#endif
+
+ if (!fep->opened)
+ goto rx_processing_done;
+
+ /* Check for errors. */
+ if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ fep->stats.rx_errors++;
+ if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ /* Frame too long or too short. */
+ fep->stats.rx_length_errors++;
+ }
+ if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */
+ fep->stats.rx_frame_errors++;
+ if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */
+ fep->stats.rx_crc_errors++;
+ if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */
+ fep->stats.rx_crc_errors++;
+ }
+
+ /* Report late collisions as a frame error.
+ * On this error, the BD is closed, but we don't know what we
+ * have in the buffer. So, just drop this frame on the floor.
+ */
+ if (bdp->cbd_sc & BD_ENET_RX_CL) {
+ fep->stats.rx_errors++;
+ fep->stats.rx_frame_errors++;
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame.
+ */
+ fep->stats.rx_packets++;
+ pkt_len = bdp->cbd_datlen;
+ fep->stats.rx_bytes += pkt_len;
+ data = (__u8*)__va(bdp->cbd_bufaddr);
+
+ /* This does 16 byte alignment, exactly what we need.
+ * The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = dev_alloc_skb(pkt_len-4);
+
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ fep->stats.rx_dropped++;
+ } else {
+ skb->dev = dev;
+ skb_put(skb,pkt_len-4); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)__va(bdp->cbd_bufaddr),
+ pkt_len-4, 0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ rx_processing_done:
+
+ /* Clear the status flags for this buffer.
+ */
+ bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty.
+ */
+ bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+
+ /* Update BD pointer to next entry.
+ */
+ if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+ bdp = fep->rx_bd_base;
+ else
+ bdp++;
+
+#if 1
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ fecp->fec_r_des_active = 0x01000000;
+#endif
+ } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */
+ fep->cur_rx = (cbd_t *)bdp;
+
+#if 0
+ /* Doing this here will allow us to process all frames in the
+ * ring before the FEC is allowed to put more there. On a heavily
+ * loaded network, some frames may be lost. Unfortunately, this
+ * increases the interrupt overhead since we can potentially work
+ * our way back to the interrupt return only to come right back
+ * here.
+ */
+ fecp->fec_r_des_active = 0x01000000;
+#endif
+}
+
+
+static void
+fec_enet_mii(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ volatile fec_t *ep;
+ mii_list_t *mip;
+ uint mii_reg;
+
+ fep = netdev_priv(dev);
+ ep = fep->hwp;
+ mii_reg = ep->fec_mii_data;
+
+ if ((mip = mii_head) == NULL) {
+ printk("MII and no head!\n");
+ return;
+ }
+
+ if (mip->mii_func != NULL)
+ (*(mip->mii_func))(mii_reg, dev);
+
+ mii_head = mip->mii_next;
+ mip->mii_next = mii_free;
+ mii_free = mip;
+
+ if ((mip = mii_head) != NULL)
+ ep->fec_mii_data = mip->mii_regval;
+}
+
+static int
+mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
+{
+ struct fec_enet_private *fep;
+ unsigned long flags;
+ mii_list_t *mip;
+ int retval;
+
+ /* Add PHY address to register command.
+ */
+ fep = netdev_priv(dev);
+ regval |= fep->phy_addr << 23;
+
+ retval = 0;
+
+ save_flags(flags);
+ cli();
+
+ if ((mip = mii_free) != NULL) {
+ mii_free = mip->mii_next;
+ mip->mii_regval = regval;
+ mip->mii_func = func;
+ mip->mii_next = NULL;
+ if (mii_head) {
+ mii_tail->mii_next = mip;
+ mii_tail = mip;
+ }
+ else {
+ mii_head = mii_tail = mip;
+ fep->hwp->fec_mii_data = regval;
+ }
+ }
+ else {
+ retval = 1;
+ }
+
+ restore_flags(flags);
+
+ return(retval);
+}
+
+static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
+{
+ int k;
+
+ if(!c)
+ return;
+
+ for(k = 0; (c+k)->mii_data != mk_mii_end; k++) {
+ mii_queue(dev, (c+k)->mii_data, (c+k)->funct);
+ }
+}
+
+static void mii_parse_sr(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ *s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
+
+ if (mii_reg & 0x0004)
+ *s |= PHY_STAT_LINK;
+ if (mii_reg & 0x0010)
+ *s |= PHY_STAT_FAULT;
+ if (mii_reg & 0x0020)
+ *s |= PHY_STAT_ANC;
+}
+
+static void mii_parse_cr(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ *s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
+
+ if (mii_reg & 0x1000)
+ *s |= PHY_CONF_ANE;
+ if (mii_reg & 0x4000)
+ *s |= PHY_CONF_LOOP;
+}
+
+static void mii_parse_anar(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ *s &= ~(PHY_CONF_SPMASK);
+
+ if (mii_reg & 0x0020)
+ *s |= PHY_CONF_10HDX;
+ if (mii_reg & 0x0040)
+ *s |= PHY_CONF_10FDX;
+ if (mii_reg & 0x0080)
+ *s |= PHY_CONF_100HDX;
+ if (mii_reg & 0x00100)
+ *s |= PHY_CONF_100FDX;
+}
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT970 is used by many boards */
+
+#define MII_LXT970_MIRROR 16 /* Mirror register */
+#define MII_LXT970_IER 17 /* Interrupt Enable Register */
+#define MII_LXT970_ISR 18 /* Interrupt Status Register */
+#define MII_LXT970_CONFIG 19 /* Configuration Register */
+#define MII_LXT970_CSR 20 /* Chip Status Register */
+
+static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ *s &= ~(PHY_STAT_SPMASK);
+
+ if (mii_reg & 0x0800) {
+ if (mii_reg & 0x1000)
+ *s |= PHY_STAT_100FDX;
+ else
+ *s |= PHY_STAT_100HDX;
+ } else {
+ if (mii_reg & 0x1000)
+ *s |= PHY_STAT_10FDX;
+ else
+ *s |= PHY_STAT_10HDX;
+ }
+}
+
+static phy_info_t phy_info_lxt970 = {
+ 0x07810000,
+ "LXT970",
+
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup - enable interrupts */
+ { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* ack_int */
+ /* read SR and ISR to acknowledge */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_read(MII_LXT970_ISR), NULL },
+
+ /* find out the current status */
+ { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown - disable interrupts */
+ { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
+ { mk_mii_end, }
+ },
+};
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards */
+
+/* register definitions for the 971 */
+
+#define MII_LXT971_PCR 16 /* Port Control Register */
+#define MII_LXT971_SR2 17 /* Status Register 2 */
+#define MII_LXT971_IER 18 /* Interrupt Enable Register */
+#define MII_LXT971_ISR 19 /* Interrupt Status Register */
+#define MII_LXT971_LCR 20 /* LED Control Register */
+#define MII_LXT971_TCR 30 /* Transmit Control Register */
+
+/*
+ * I had some nice ideas of running the MDIO faster...
+ * The 971 should support 8MHz and I tried it, but things acted really
+ * weird, so 2.5 MHz ought to be enough for anyone...
+ */
+
+static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
+
+ if (mii_reg & 0x0400) {
+ fep->link = 1;
+ *s |= PHY_STAT_LINK;
+ } else {
+ fep->link = 0;
+ }
+ if (mii_reg & 0x0080)
+ *s |= PHY_STAT_ANC;
+ if (mii_reg & 0x4000) {
+ if (mii_reg & 0x0200)
+ *s |= PHY_STAT_100FDX;
+ else
+ *s |= PHY_STAT_100HDX;
+ } else {
+ if (mii_reg & 0x0200)
+ *s |= PHY_STAT_10FDX;
+ else
+ *s |= PHY_STAT_10HDX;
+ }
+ if (mii_reg & 0x0008)
+ *s |= PHY_STAT_FAULT;
+}
+
+static phy_info_t phy_info_lxt971 = {
+ 0x0001378e,
+ "LXT971",
+
+ (const phy_cmd_t []) { /* config */
+ /* limit to 10MBit because my protorype board
+ * doesn't work with 100. */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup - enable interrupts */
+ { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
+ /* Somehow does the 971 tell me that the link is down
+ * the first read after power-up.
+ * read here to get a valid value in ack_int */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* ack_int */
+ /* find out the current status */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+ /* we only need to read ISR to acknowledge */
+ { mk_mii_read(MII_LXT971_ISR), NULL },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown - disable interrupts */
+ { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
+ { mk_mii_end, }
+ },
+};
+
+/* ------------------------------------------------------------------------- */
+/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
+
+/* register definitions */
+
+#define MII_QS6612_MCR 17 /* Mode Control Register */
+#define MII_QS6612_FTR 27 /* Factory Test Register */
+#define MII_QS6612_MCO 28 /* Misc. Control Register */
+#define MII_QS6612_ISR 29 /* Interrupt Source Register */
+#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
+#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
+
+static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ *s &= ~(PHY_STAT_SPMASK);
+
+ switch((mii_reg >> 2) & 7) {
+ case 1: *s |= PHY_STAT_10HDX; break;
+ case 2: *s |= PHY_STAT_100HDX; break;
+ case 5: *s |= PHY_STAT_10FDX; break;
+ case 6: *s |= PHY_STAT_100FDX; break;
+ }
+}
+
+static phy_info_t phy_info_qs6612 = {
+ 0x00181440,
+ "QS6612",
+
+ (const phy_cmd_t []) { /* config */
+ /* The PHY powers up isolated on the RPX,
+ * so send a command to allow operation.
+ */
+ { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
+
+ /* parse cr and anar to get some info */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup - enable interrupts */
+ { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* ack_int */
+ /* we need to read ISR, SR and ANER to acknowledge */
+ { mk_mii_read(MII_QS6612_ISR), NULL },
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_read(MII_REG_ANER), NULL },
+
+ /* read pcr to get info */
+ { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown - disable interrupts */
+ { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
+ { mk_mii_end, }
+ },
+};
+
+/* ------------------------------------------------------------------------- */
+/* AMD AM79C874 phy */
+
+/* register definitions for the 874 */
+
+#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
+#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
+#define MII_AM79C874_DR 18 /* Diagnostic Register */
+#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
+#define MII_AM79C874_MCR 21 /* ModeControl Register */
+#define MII_AM79C874_DC 23 /* Disconnect Counter */
+#define MII_AM79C874_REC 24 /* Recieve Error Counter */
+
+static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ *s &= ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
+
+ if (mii_reg & 0x0080)
+ *s |= PHY_STAT_ANC;
+ if (mii_reg & 0x0400)
+ *s |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
+ else
+ *s |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
+}
+
+static phy_info_t phy_info_am79c874 = {
+ 0x00022561,
+ "AM79C874",
+
+ (const phy_cmd_t []) { /* config */
+ /* limit to 10MBit because my protorype board
+ * doesn't work with 100. */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup - enable interrupts */
+ { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* ack_int */
+ /* find out the current status */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
+ /* we only need to read ISR to acknowledge */
+ { mk_mii_read(MII_AM79C874_ICSR), NULL },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown - disable interrupts */
+ { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
+ { mk_mii_end, }
+ },
+};
+
+/* ------------------------------------------------------------------------- */
+/* Kendin KS8721BL phy */
+
+/* register definitions for the 8721 */
+
+#define MII_KS8721BL_RXERCR 21
+#define MII_KS8721BL_ICSR 22
+#define MII_KS8721BL_PHYCR 31
+
+static phy_info_t phy_info_ks8721bl = {
+ 0x00022161,
+ "KS8721BL",
+
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* ack_int */
+ /* find out the current status */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ /* we only need to read ISR to acknowledge */
+ { mk_mii_read(MII_KS8721BL_ICSR), NULL },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
+ { mk_mii_end, }
+ },
+};
+
+/* ------------------------------------------------------------------------- */
+
+static phy_info_t *phy_info[] = {
+ &phy_info_lxt970,
+ &phy_info_lxt971,
+ &phy_info_qs6612,
+ &phy_info_am79c874,
+ &phy_info_ks8721bl,
+ NULL
+};
+
+/* ------------------------------------------------------------------------- */
+
+#ifdef CONFIG_RPXCLASSIC
+static void
+mii_link_interrupt(void *dev_id);
+#else
+static irqreturn_t
+mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs);
+#endif
+
+#if defined(CONFIG_M5272)
+
+/*
+ * Code specific to Coldfire 5272 setup.
+ */
+static void __inline__ fec_request_intrs(struct net_device *dev)
+{
+ volatile unsigned long *icrp;
+
+ /* Setup interrupt handlers. */
+ if (request_irq(86, fec_enet_interrupt, 0, "fec(RX)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RC) IRQ(86)!\n");
+ if (request_irq(87, fec_enet_interrupt, 0, "fec(TX)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RC) IRQ(87)!\n");
+ if (request_irq(88, fec_enet_interrupt, 0, "fec(OTHER)", dev) != 0)
+ printk("FEC: Could not allocate FEC(OTHER) IRQ(88)!\n");
+ if (request_irq(66, mii_link_interrupt, 0, "fec(MII)", dev) != 0)
+ printk("FEC: Could not allocate MII IRQ(66)!\n");
+
+ /* Unmask interrupt at ColdFire 5272 SIM */
+ icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR3);
+ *icrp = 0x00000ddd;
+ icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
+ *icrp = (*icrp & 0x70777777) | 0x0d000000;
+}
+
+static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
+{
+ volatile fec_t *fecp;
+
+ fecp = fep->hwp;
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
+ fecp->fec_x_cntrl = 0x00;
+
+ /*
+ * Set MII speed to 2.5 MHz
+ * See 5272 manual section 11.5.8: MSCR
+ */
+ fep->phy_speed = ((((MCF_CLK / 4) / (2500000 / 10)) + 5) / 10) * 2;
+ fecp->fec_mii_speed = fep->phy_speed;
+
+ fec_restart(dev, 0);
+}
+
+static void __inline__ fec_get_mac(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp;
+ unsigned char *iap, tmpaddr[6];
+ int i;
+
+ fecp = fep->hwp;
+
+ if (fec_flashmac) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = fec_flashmac;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
+ } else {
+ *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low;
+ *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16);
+ iap = &tmpaddr[0];
+ }
+
+ for (i=0; i<ETH_ALEN; i++)
+ dev->dev_addr[i] = fep->mac_addr[i] = *iap++;
+
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default) {
+ dev->dev_addr[ETH_ALEN-1] = fep->mac_addr[ETH_ALEN-1] =
+ iap[ETH_ALEN-1] + fep->index;
+ }
+}
+
+static void __inline__ fec_enable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_disable_phy_intr(void)
+{
+ volatile unsigned long *icrp;
+ icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
+ *icrp = (*icrp & 0x70777777) | 0x08000000;
+}
+
+static void __inline__ fec_phy_ack_intr(void)
+{
+ volatile unsigned long *icrp;
+ /* Acknowledge the interrupt */
+ icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
+ *icrp = (*icrp & 0x77777777) | 0x08000000;
+}
+
+static void __inline__ fec_localhw_setup(void)
+{
+}
+
+/*
+ * Do not need to make region uncached on 5272.
+ */
+static void __inline__ fec_uncache(unsigned long addr)
+{
+}
+
+/* ------------------------------------------------------------------------- */
+
+#elif defined(CONFIG_M527x) || defined(CONFIG_M528x)
+
+/*
+ * Code specific to Coldfire 5270/5271/5274/5275 and 5280/5282 setups.
+ */
+static void __inline__ fec_request_intrs(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ int b;
+
+ fep = netdev_priv(dev);
+ b = (fep->index) ? 128 : 64;
+
+ /* Setup interrupt handlers. */
+ if (request_irq(b+23, fec_enet_interrupt, 0, "fec(TXF)", dev) != 0)
+ printk("FEC: Could not allocate FEC(TXF) IRQ(%d+23)!\n", b);
+ if (request_irq(b+24, fec_enet_interrupt, 0, "fec(TXB)", dev) != 0)
+ printk("FEC: Could not allocate FEC(TXB) IRQ(%d+24)!\n", b);
+ if (request_irq(b+25, fec_enet_interrupt, 0, "fec(TXFIFO)", dev) != 0)
+ printk("FEC: Could not allocate FEC(TXFIFO) IRQ(%d+25)!\n", b);
+ if (request_irq(b+26, fec_enet_interrupt, 0, "fec(TXCR)", dev) != 0)
+ printk("FEC: Could not allocate FEC(TXCR) IRQ(%d+26)!\n", b);
+
+ if (request_irq(b+27, fec_enet_interrupt, 0, "fec(RXF)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RXF) IRQ(%d+27)!\n", b);
+ if (request_irq(b+28, fec_enet_interrupt, 0, "fec(RXB)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RXB) IRQ(%d+28)!\n", b);
+
+ if (request_irq(b+29, fec_enet_interrupt, 0, "fec(MII)", dev) != 0)
+ printk("FEC: Could not allocate FEC(MII) IRQ(%d+29)!\n", b);
+ if (request_irq(b+30, fec_enet_interrupt, 0, "fec(LC)", dev) != 0)
+ printk("FEC: Could not allocate FEC(LC) IRQ(%d+30)!\n", b);
+ if (request_irq(b+31, fec_enet_interrupt, 0, "fec(HBERR)", dev) != 0)
+ printk("FEC: Could not allocate FEC(HBERR) IRQ(%d+31)!\n", b);
+ if (request_irq(b+32, fec_enet_interrupt, 0, "fec(GRA)", dev) != 0)
+ printk("FEC: Could not allocate FEC(GRA) IRQ(%d+32)!\n", b);
+ if (request_irq(b+33, fec_enet_interrupt, 0, "fec(EBERR)", dev) != 0)
+ printk("FEC: Could not allocate FEC(EBERR) IRQ(%d+33)!\n", b);
+ if (request_irq(b+34, fec_enet_interrupt, 0, "fec(BABT)", dev) != 0)
+ printk("FEC: Could not allocate FEC(BABT) IRQ(%d+34)!\n", b);
+ if (request_irq(b+35, fec_enet_interrupt, 0, "fec(BABR)", dev) != 0)
+ printk("FEC: Could not allocate FEC(BABR) IRQ(%d+35)!\n", b);
+
+ /* Unmask interrupts at ColdFire 5280/5282 interrupt controller */
+ {
+ volatile unsigned char *icrp;
+ volatile unsigned long *imrp;
+ int i;
+
+ b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0;
+ icrp = (volatile unsigned char *) (MCF_IPSBAR + b +
+ MCFINTC_ICR0);
+ for (i = 23; (i < 36); i++)
+ icrp[i] = 0x23;
+
+ imrp = (volatile unsigned long *) (MCF_IPSBAR + b +
+ MCFINTC_IMRH);
+ *imrp &= ~0x0000000f;
+ imrp = (volatile unsigned long *) (MCF_IPSBAR + b +
+ MCFINTC_IMRL);
+ *imrp &= ~0xff800001;
+ }
+
+#if defined(CONFIG_M528x)
+ /* Set up gpio outputs for MII lines */
+ {
+ volatile unsigned short *gpio_paspar;
+
+ gpio_paspar = (volatile unsigned short *) (MCF_IPSBAR +
+ 0x100056);
+ *gpio_paspar = 0x0f00;
+ }
+#endif
+}
+
+static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
+{
+ volatile fec_t *fecp;
+
+ fecp = fep->hwp;
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
+ fecp->fec_x_cntrl = 0x00;
+
+ /*
+ * Set MII speed to 2.5 MHz
+ * See 5282 manual section 17.5.4.7: MSCR
+ */
+ fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
+ fecp->fec_mii_speed = fep->phy_speed;
+
+ fec_restart(dev, 0);
+}
+
+static void __inline__ fec_get_mac(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp;
+ unsigned char *iap, tmpaddr[6];
+ int i;
+
+ fecp = fep->hwp;
+
+ if (fec_flashmac) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = fec_flashmac;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
+ } else {
+ *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low;
+ *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16);
+ iap = &tmpaddr[0];
+ }
+
+ for (i=0; i<ETH_ALEN; i++)
+ dev->dev_addr[i] = fep->mac_addr[i] = *iap++;
+
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default) {
+ dev->dev_addr[ETH_ALEN-1] = fep->mac_addr[ETH_ALEN-1] =
+ iap[ETH_ALEN-1] + fep->index;
+ }
+}
+
+static void __inline__ fec_enable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_disable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_phy_ack_intr(void)
+{
+}
+
+static void __inline__ fec_localhw_setup(void)
+{
+}
+
+/*
+ * Do not need to make region uncached on 5272.
+ */
+static void __inline__ fec_uncache(unsigned long addr)
+{
+}
+
+/* ------------------------------------------------------------------------- */
+
+#else
+
+/*
+ * Code sepcific to the MPC860T setup.
+ */
+static void __inline__ fec_request_intrs(struct net_device *dev)
+{
+ volatile immap_t *immap;
+
+ immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */
+
+ if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0)
+ panic("Could not allocate FEC IRQ!");
+
+#ifdef CONFIG_RPXCLASSIC
+ /* Make Port C, bit 15 an input that causes interrupts.
+ */
+ immap->im_ioport.iop_pcpar &= ~0x0001;
+ immap->im_ioport.iop_pcdir &= ~0x0001;
+ immap->im_ioport.iop_pcso &= ~0x0001;
+ immap->im_ioport.iop_pcint |= 0x0001;
+ cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
+
+ /* Make LEDS reflect Link status.
+ */
+ *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
+#endif
+#ifdef CONFIG_FADS
+ if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0)
+ panic("Could not allocate MII IRQ!");
+#endif
+}
+
+static void __inline__ fec_get_mac(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned char *iap, tmpaddr[6];
+ bd_t *bd;
+ int i;
+
+ iap = bd->bi_enetaddr;
+ bd = (bd_t *)__res;
+
+#ifdef CONFIG_RPXCLASSIC
+ /* The Embedded Planet boards have only one MAC address in
+ * the EEPROM, but can have two Ethernet ports. For the
+ * FEC port, we create another address by setting one of
+ * the address bits above something that would have (up to
+ * now) been allocated.
+ */
+ for (i=0; i<6; i++)
+ tmpaddr[i] = *iap++;
+ tmpaddr[3] |= 0x80;
+ iap = tmpaddr;
+#endif
+
+ for (i=0; i<6; i++)
+ dev->dev_addr[i] = fep->mac_addr[i] = *iap++;
+}
+
+static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
+{
+ extern uint _get_IMMR(void);
+ volatile immap_t *immap;
+ volatile fec_t *fecp;
+
+ fecp = fep->hwp;
+ immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */
+
+ /* Configure all of port D for MII.
+ */
+ immap->im_ioport.iop_pdpar = 0x1fff;
+
+ /* Bits moved from Rev. D onward.
+ */
+ if ((_get_IMMR() & 0xffff) < 0x0501)
+ immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */
+ else
+ immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */
+
+ /* Set MII speed to 2.5 MHz
+ */
+ fecp->fec_mii_speed = fep->phy_speed =
+ ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e;
+}
+
+static void __inline__ fec_enable_phy_intr(void)
+{
+ volatile fec_t *fecp;
+
+ fecp = fep->hwp;
+
+ /* Enable MII command finished interrupt
+ */
+ fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
+}
+
+static void __inline__ fec_disable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_phy_ack_intr(void)
+{
+}
+
+static void __inline__ fec_localhw_setup(void)
+{
+ volatile fec_t *fecp;
+
+ fecp = fep->hwp;
+ fecp->fec_r_hash = PKT_MAXBUF_SIZE;
+ /* Enable big endian and don't care about SDMA FC.
+ */
+ fecp->fec_fun_code = 0x78000000;
+}
+
+static void __inline__ fec_uncache(unsigned long addr)
+{
+ pte_t *pte;
+ pte = va_to_pte(mem_addr);
+ pte_val(*pte) |= _PAGE_NO_CACHE;
+ flush_tlb_page(init_mm.mmap, mem_addr);
+}
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+static void mii_display_status(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ if (!fep->link && !fep->old_link) {
+ /* Link is still down - don't print anything */
+ return;
+ }
+
+ printk("%s: status: ", dev->name);
+
+ if (!fep->link) {
+ printk("link down");
+ } else {
+ printk("link up");
+
+ switch(*s & PHY_STAT_SPMASK) {
+ case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
+ case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
+ case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
+ case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
+ default:
+ printk(", Unknown speed/duplex");
+ }
+
+ if (*s & PHY_STAT_ANC)
+ printk(", auto-negotiation complete");
+ }
+
+ if (*s & PHY_STAT_FAULT)
+ printk(", remote fault");
+
+ printk(".\n");
+}
+
+static void mii_display_config(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile uint *s = &(fep->phy_status);
+
+ /*
+ ** When we get here, phy_task is already removed from
+ ** the workqueue. It is thus safe to allow to reuse it.
+ */
+ fep->mii_phy_task_queued = 0;
+ printk("%s: config: auto-negotiation ", dev->name);
+
+ if (*s & PHY_CONF_ANE)
+ printk("on");
+ else
+ printk("off");
+
+ if (*s & PHY_CONF_100FDX)
+ printk(", 100FDX");
+ if (*s & PHY_CONF_100HDX)
+ printk(", 100HDX");
+ if (*s & PHY_CONF_10FDX)
+ printk(", 10FDX");
+ if (*s & PHY_CONF_10HDX)
+ printk(", 10HDX");
+ if (!(*s & PHY_CONF_SPMASK))
+ printk(", No speed/duplex selected?");
+
+ if (*s & PHY_CONF_LOOP)
+ printk(", loopback enabled");
+
+ printk(".\n");
+
+ fep->sequence_done = 1;
+}
+
+static void mii_relink(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int duplex;
+
+ /*
+ ** When we get here, phy_task is already removed from
+ ** the workqueue. It is thus safe to allow to reuse it.
+ */
+ fep->mii_phy_task_queued = 0;
+ fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
+ mii_display_status(dev);
+ fep->old_link = fep->link;
+
+ if (fep->link) {
+ duplex = 0;
+ if (fep->phy_status
+ & (PHY_STAT_100FDX | PHY_STAT_10FDX))
+ duplex = 1;
+ fec_restart(dev, duplex);
+ }
+ else
+ fec_stop(dev);
+
+#if 0
+ enable_irq(fep->mii_irq);
+#endif
+
+}
+
+/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
+static void mii_queue_relink(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ /*
+ ** We cannot queue phy_task twice in the workqueue. It
+ ** would cause an endless loop in the workqueue.
+ ** Fortunately, if the last mii_relink entry has not yet been
+ ** executed now, it will do the job for the current interrupt,
+ ** which is just what we want.
+ */
+ if (fep->mii_phy_task_queued)
+ return;
+
+ fep->mii_phy_task_queued = 1;
+ INIT_WORK(&fep->phy_task, (void*)mii_relink, dev);
+ schedule_work(&fep->phy_task);
+}
+
+/* mii_queue_config is called in user context from fec_enet_open */
+static void mii_queue_config(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ if (fep->mii_phy_task_queued)
+ return;
+
+ fep->mii_phy_task_queued = 1;
+ INIT_WORK(&fep->phy_task, (void*)mii_display_config, dev);
+ schedule_work(&fep->phy_task);
+}
+
+
+
+phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink },
+ { mk_mii_end, } };
+phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
+ { mk_mii_end, } };
+
+
+
+/* Read remainder of PHY ID.
+*/
+static void
+mii_discover_phy3(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ int i;
+
+ fep = netdev_priv(dev);
+ fep->phy_id |= (mii_reg & 0xffff);
+ printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
+
+ for(i = 0; phy_info[i]; i++) {
+ if(phy_info[i]->id == (fep->phy_id >> 4))
+ break;
+ }
+
+ if (phy_info[i])
+ printk(" -- %s\n", phy_info[i]->name);
+ else
+ printk(" -- unknown PHY!\n");
+
+ fep->phy = phy_info[i];
+ fep->phy_id_done = 1;
+}
+
+/* Scan all of the MII PHY addresses looking for someone to respond
+ * with a valid ID. This usually happens quickly.
+ */
+static void
+mii_discover_phy(uint mii_reg, struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ volatile fec_t *fecp;
+ uint phytype;
+
+ fep = netdev_priv(dev);
+ fecp = fep->hwp;
+
+ if (fep->phy_addr < 32) {
+ if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
+
+ /* Got first part of ID, now get remainder.
+ */
+ fep->phy_id = phytype << 16;
+ mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
+ mii_discover_phy3);
+ }
+ else {
+ fep->phy_addr++;
+ mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
+ mii_discover_phy);
+ }
+ } else {
+ printk("FEC: No PHY device found.\n");
+ /* Disable external MII interface */
+ fecp->fec_mii_speed = fep->phy_speed = 0;
+ fec_disable_phy_intr();
+ }
+}
+
+/* This interrupt occurs when the PHY detects a link change.
+*/
+#ifdef CONFIG_RPXCLASSIC
+static void
+mii_link_interrupt(void *dev_id)
+#else
+static irqreturn_t
+mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+#endif
+{
+ struct net_device *dev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ fec_phy_ack_intr();
+
+#if 0
+ disable_irq(fep->mii_irq); /* disable now, enable later */
+#endif
+
+ mii_do_cmd(dev, fep->phy->ack_int);
+ mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
+
+ return IRQ_HANDLED;
+}
+
+static int
+fec_enet_open(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ /* I should reset the ring buffers here, but I don't yet know
+ * a simple way to do that.
+ */
+ fec_set_mac_address(dev);
+
+ fep->sequence_done = 0;
+ fep->link = 0;
+
+ if (fep->phy) {
+ mii_do_cmd(dev, fep->phy->ack_int);
+ mii_do_cmd(dev, fep->phy->config);
+ mii_do_cmd(dev, phy_cmd_config); /* display configuration */
+
+ /* FIXME: use netif_carrier_{on,off} ; this polls
+ * until link is up which is wrong... could be
+ * 30 seconds or more we are trapped in here. -jgarzik
+ */
+ while(!fep->sequence_done)
+ schedule();
+
+ mii_do_cmd(dev, fep->phy->startup);
+
+ /* Set the initial link state to true. A lot of hardware
+ * based on this device does not implement a PHY interrupt,
+ * so we are never notified of link change.
+ */
+ fep->link = 1;
+ } else {
+ fep->link = 1; /* lets just try it and see */
+ /* no phy, go full duplex, it's most likely a hub chip */
+ fec_restart(dev, 1);
+ }
+
+ netif_start_queue(dev);
+ fep->opened = 1;
+ return 0; /* Success */
+}
+
+static int
+fec_enet_close(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ /* Don't know what to do yet.
+ */
+ fep->opened = 0;
+ netif_stop_queue(dev);
+ fec_stop(dev);
+
+ return 0;
+}
+
+static struct net_device_stats *fec_enet_get_stats(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ return &fep->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering. Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not. I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+#define HASH_BITS 6 /* #bits in hash */
+#define CRC32_POLY 0xEDB88320
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ volatile fec_t *ep;
+ struct dev_mc_list *dmi;
+ unsigned int i, j, bit, data, crc;
+ unsigned char hash;
+
+ fep = netdev_priv(dev);
+ ep = fep->hwp;
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ ep->fec_r_cntrl |= 0x0008;
+ } else {
+
+ ep->fec_r_cntrl &= ~0x0008;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's.
+ */
+ ep->fec_hash_table_high = 0xffffffff;
+ ep->fec_hash_table_low = 0xffffffff;
+ } else {
+ /* Clear filter and add the addresses in hash register.
+ */
+ ep->fec_hash_table_high = 0;
+ ep->fec_hash_table_low = 0;
+
+ dmi = dev->mc_list;
+
+ for (j = 0; j < dev->mc_count; j++, dmi = dmi->next)
+ {
+ /* Only support group multicast for now.
+ */
+ if (!(dmi->dmi_addr[0] & 1))
+ continue;
+
+ /* calculate crc32 value of mac address
+ */
+ crc = 0xffffffff;
+
+ for (i = 0; i < dmi->dmi_addrlen; i++)
+ {
+ data = dmi->dmi_addr[i];
+ for (bit = 0; bit < 8; bit++, data >>= 1)
+ {
+ crc = (crc >> 1) ^
+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
+ }
+ }
+
+ /* only upper 6 bits (HASH_BITS) are used
+ which point to specific bit in he hash registers
+ */
+ hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+
+ if (hash > 31)
+ ep->fec_hash_table_high |= 1 << (hash - 32);
+ else
+ ep->fec_hash_table_low |= 1 << hash;
+ }
+ }
+ }
+}
+
+/* Set a MAC change in hardware.
+ */
+static void
+fec_set_mac_address(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ volatile fec_t *fecp;
+
+ fep = netdev_priv(dev);
+ fecp = fep->hwp;
+
+ /* Set station address. */
+ fecp->fec_addr_low = fep->mac_addr[3] | (fep->mac_addr[2] << 8) |
+ (fep->mac_addr[1] << 16) | (fep->mac_addr[0] << 24);
+ fecp->fec_addr_high = (fep->mac_addr[5] << 16) |
+ (fep->mac_addr[4] << 24);
+
+}
+
+/* Initialize the FEC Ethernet on 860T (or ColdFire 5272).
+ */
+ /*
+ * XXX: We need to clean up on failure exits here.
+ */
+int __init fec_enet_init(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned long mem_addr;
+ volatile cbd_t *bdp;
+ cbd_t *cbd_base;
+ volatile fec_t *fecp;
+ int i, j;
+ static int index = 0;
+
+ /* Only allow us to be probed once. */
+ if (index >= FEC_MAX_PORTS)
+ return -ENXIO;
+
+ /* Create an Ethernet device instance.
+ */
+ fecp = (volatile fec_t *) fec_hw[index];
+
+ fep->index = index;
+ fep->hwp = fecp;
+
+ /* Whack a reset. We should wait for this.
+ */
+ fecp->fec_ecntrl = 1;
+ udelay(10);
+
+ /* Clear and enable interrupts */
+ fecp->fec_ievent = 0xffc0;
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+ fecp->fec_hash_table_high = 0;
+ fecp->fec_hash_table_low = 0;
+ fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
+ fecp->fec_ecntrl = 2;
+ fecp->fec_r_des_active = 0x01000000;
+
+ /* Set the Ethernet address. If using multiple Enets on the 8xx,
+ * this needs some work to get unique addresses.
+ *
+ * This is our default MAC address unless the user changes
+ * it via eth_mac_addr (our dev->set_mac_addr handler).
+ */
+ fec_get_mac(dev);
+
+ /* Allocate memory for buffer descriptors.
+ */
+ if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) {
+ printk("FEC init error. Need more space.\n");
+ printk("FEC initialization failed.\n");
+ return 1;
+ }
+ mem_addr = __get_free_page(GFP_KERNEL);
+ cbd_base = (cbd_t *)mem_addr;
+ /* XXX: missing check for allocation failure */
+
+ fec_uncache(mem_addr);
+
+ /* Set receive and transmit descriptor base.
+ */
+ fep->rx_bd_base = cbd_base;
+ fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->cur_rx = fep->rx_bd_base;
+
+ fep->skb_cur = fep->skb_dirty = 0;
+
+ /* Initialize the receive buffer descriptors.
+ */
+ bdp = fep->rx_bd_base;
+ for (i=0; i<FEC_ENET_RX_PAGES; i++) {
+
+ /* Allocate a page.
+ */
+ mem_addr = __get_free_page(GFP_KERNEL);
+ /* XXX: missing check for allocation failure */
+
+ fec_uncache(mem_addr);
+
+ /* Initialize the BD for every fragment in the page.
+ */
+ for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp->cbd_bufaddr = __pa(mem_addr);
+ mem_addr += FEC_ENET_RX_FRSIZE;
+ bdp++;
+ }
+ }
+
+ /* Set the last buffer to wrap.
+ */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmmit.
+ */
+ bdp = fep->tx_bd_base;
+ for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
+ if (j >= FEC_ENET_TX_FRPPG) {
+ mem_addr = __get_free_page(GFP_KERNEL);
+ j = 1;
+ } else {
+ mem_addr += FEC_ENET_TX_FRSIZE;
+ j++;
+ }
+ fep->tx_bounce[i] = (unsigned char *) mem_addr;
+
+ /* Initialize the BD for every fragment in the page.
+ */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap.
+ */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* Set receive and transmit descriptor base.
+ */
+ fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
+ fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
+
+ /* Install our interrupt handlers. This varies depending on
+ * the architecture.
+ */
+ fec_request_intrs(dev);
+
+ dev->base_addr = (unsigned long)fecp;
+
+ /* The FEC Ethernet specific entries in the device structure. */
+ dev->open = fec_enet_open;
+ dev->hard_start_xmit = fec_enet_start_xmit;
+ dev->tx_timeout = fec_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->stop = fec_enet_close;
+ dev->get_stats = fec_enet_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+
+ for (i=0; i<NMII-1; i++)
+ mii_cmds[i].mii_next = &mii_cmds[i+1];
+ mii_free = mii_cmds;
+
+ /* setup MII interface */
+ fec_set_mii(dev, fep);
+
+ printk("%s: FEC ENET Version 0.2, ", dev->name);
+ for (i=0; i<5; i++)
+ printk("%02x:", dev->dev_addr[i]);
+ printk("%02x\n", dev->dev_addr[5]);
+
+ /* Queue up command to detect the PHY and initialize the
+ * remainder of the interface.
+ */
+ fep->phy_id_done = 0;
+ fep->phy_addr = 0;
+ mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
+
+ index++;
+ return 0;
+}
+
+/* This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
+static void
+fec_restart(struct net_device *dev, int duplex)
+{
+ struct fec_enet_private *fep;
+ volatile cbd_t *bdp;
+ volatile fec_t *fecp;
+ int i;
+
+ fep = netdev_priv(dev);
+ fecp = fep->hwp;
+
+ /* Whack a reset. We should wait for this.
+ */
+ fecp->fec_ecntrl = 1;
+ udelay(10);
+
+ /* Enable interrupts we wish to service.
+ */
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+
+ /* Clear any outstanding interrupt.
+ */
+ fecp->fec_ievent = 0xffc0;
+ fec_enable_phy_intr();
+
+ /* Set station address.
+ */
+ fecp->fec_addr_low = fep->mac_addr[3] | (fep->mac_addr[2] << 8) |
+ (fep->mac_addr[1] << 16) | (fep->mac_addr[0] << 24);
+ fecp->fec_addr_high = (fep->mac_addr[5] << 16) |
+ (fep->mac_addr[4] << 24);
+
+ for (i=0; i<ETH_ALEN; i++)
+ dev->dev_addr[i] = fep->mac_addr[i];
+
+ /* Reset all multicast.
+ */
+ fecp->fec_hash_table_high = 0;
+ fecp->fec_hash_table_low = 0;
+
+ /* Set maximum receive buffer size.
+ */
+ fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
+
+ fec_localhw_setup();
+
+ /* Set receive and transmit descriptor base.
+ */
+ fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
+ fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /* Reset SKB transmit buffers.
+ */
+ fep->skb_cur = fep->skb_dirty = 0;
+ for (i=0; i<=TX_RING_MOD_MASK; i++) {
+ if (fep->tx_skbuff[i] != NULL) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
+ }
+ }
+
+ /* Initialize the receive buffer descriptors.
+ */
+ bdp = fep->rx_bd_base;
+ for (i=0; i<RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page.
+ */
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap.
+ */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmmit.
+ */
+ bdp = fep->tx_bd_base;
+ for (i=0; i<TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page.
+ */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap.
+ */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* Enable MII mode.
+ */
+ if (duplex) {
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */
+ fecp->fec_x_cntrl = 0x04; /* FD enable */
+ }
+ else {
+ /* MII enable|No Rcv on Xmit */
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06;
+ fecp->fec_x_cntrl = 0x00;
+ }
+ fep->full_duplex = duplex;
+
+ /* Set MII speed.
+ */
+ fecp->fec_mii_speed = fep->phy_speed;
+
+ /* And last, enable the transmit and receive processing.
+ */
+ fecp->fec_ecntrl = 2;
+ fecp->fec_r_des_active = 0x01000000;
+}
+
+static void
+fec_stop(struct net_device *dev)
+{
+ volatile fec_t *fecp;
+ struct fec_enet_private *fep;
+
+ fep = netdev_priv(dev);
+ fecp = fep->hwp;
+
+ fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
+
+ while(!(fecp->fec_ievent & 0x10000000));
+
+ /* Whack a reset. We should wait for this.
+ */
+ fecp->fec_ecntrl = 1;
+ udelay(10);
+
+ /* Clear outstanding MII command interrupts.
+ */
+ fecp->fec_ievent = FEC_ENET_MII;
+ fec_enable_phy_intr();
+
+ fecp->fec_imask = FEC_ENET_MII;
+ fecp->fec_mii_speed = fep->phy_speed;
+}
+
+static int __init fec_enet_module_init(void)
+{
+ struct net_device *dev;
+ int i, err;
+
+ for (i = 0; (i < FEC_MAX_PORTS); i++) {
+ dev = alloc_etherdev(sizeof(struct fec_enet_private));
+ if (!dev)
+ return -ENOMEM;
+ err = fec_enet_init(dev);
+ if (err) {
+ free_netdev(dev);
+ continue;
+ }
+ if (register_netdev(dev) != 0) {
+ /* XXX: missing cleanup here */
+ free_netdev(dev);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+module_init(fec_enet_module_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
new file mode 100644
index 000000000000..c6e4f979ff5d
--- /dev/null
+++ b/drivers/net/fec.h
@@ -0,0 +1,164 @@
+/****************************************************************************/
+
+/*
+ * fec.h -- Fast Ethernet Controller for Motorola ColdFire 5270,
+ 5271, 5272, 5274, 5275, 5280 and 5282.
+ *
+ * (C) Copyright 2000-2003, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000-2001, Lineo (www.lineo.com)
+ */
+
+/****************************************************************************/
+#ifndef FEC_H
+#define FEC_H
+/****************************************************************************/
+
+#if defined(CONFIG_M527x) || defined(CONFIG_M528x)
+/*
+ * Just figures, Motorola would have to change the offsets for
+ * registers in the same peripheral device on different models
+ * of the ColdFire!
+ */
+typedef struct fec {
+ unsigned long fec_reserved0;
+ unsigned long fec_ievent; /* Interrupt event reg */
+ unsigned long fec_imask; /* Interrupt mask reg */
+ unsigned long fec_reserved1;
+ unsigned long fec_r_des_active; /* Receive descriptor reg */
+ unsigned long fec_x_des_active; /* Transmit descriptor reg */
+ unsigned long fec_reserved2[3];
+ unsigned long fec_ecntrl; /* Ethernet control reg */
+ unsigned long fec_reserved3[6];
+ unsigned long fec_mii_data; /* MII manage frame reg */
+ unsigned long fec_mii_speed; /* MII speed control reg */
+ unsigned long fec_reserved4[7];
+ unsigned long fec_mib_ctrlstat; /* MIB control/status reg */
+ unsigned long fec_reserved5[7];
+ unsigned long fec_r_cntrl; /* Receive control reg */
+ unsigned long fec_reserved6[15];
+ unsigned long fec_x_cntrl; /* Transmit Control reg */
+ unsigned long fec_reserved7[7];
+ unsigned long fec_addr_low; /* Low 32bits MAC address */
+ unsigned long fec_addr_high; /* High 16bits MAC address */
+ unsigned long fec_opd; /* Opcode + Pause duration */
+ unsigned long fec_reserved8[10];
+ unsigned long fec_hash_table_high; /* High 32bits hash table */
+ unsigned long fec_hash_table_low; /* Low 32bits hash table */
+ unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
+ unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
+ unsigned long fec_reserved9[7];
+ unsigned long fec_x_wmrk; /* FIFO transmit water mark */
+ unsigned long fec_reserved10;
+ unsigned long fec_r_bound; /* FIFO receive bound reg */
+ unsigned long fec_r_fstart; /* FIFO receive start reg */
+ unsigned long fec_reserved11[11];
+ unsigned long fec_r_des_start; /* Receive descriptor ring */
+ unsigned long fec_x_des_start; /* Transmit descriptor ring */
+ unsigned long fec_r_buff_size; /* Maximum receive buff size */
+} fec_t;
+
+#else
+
+/*
+ * Define device register set address map.
+ */
+typedef struct fec {
+ unsigned long fec_ecntrl; /* Ethernet control reg */
+ unsigned long fec_ievent; /* Interrupt even reg */
+ unsigned long fec_imask; /* Interrupt mask reg */
+ unsigned long fec_ivec; /* Interrupt vec status reg */
+ unsigned long fec_r_des_active; /* Receive descriptor reg */
+ unsigned long fec_x_des_active; /* Transmit descriptor reg */
+ unsigned long fec_reserved1[10];
+ unsigned long fec_mii_data; /* MII manage frame reg */
+ unsigned long fec_mii_speed; /* MII speed control reg */
+ unsigned long fec_reserved2[17];
+ unsigned long fec_r_bound; /* FIFO receive bound reg */
+ unsigned long fec_r_fstart; /* FIFO receive start reg */
+ unsigned long fec_reserved3[4];
+ unsigned long fec_x_wmrk; /* FIFO transmit water mark */
+ unsigned long fec_reserved4;
+ unsigned long fec_x_fstart; /* FIFO transmit start reg */
+ unsigned long fec_reserved5[21];
+ unsigned long fec_r_cntrl; /* Receive control reg */
+ unsigned long fec_max_frm_len; /* Maximum frame length reg */
+ unsigned long fec_reserved6[14];
+ unsigned long fec_x_cntrl; /* Transmit Control reg */
+ unsigned long fec_reserved7[158];
+ unsigned long fec_addr_low; /* Low 32bits MAC address */
+ unsigned long fec_addr_high; /* High 16bits MAC address */
+ unsigned long fec_hash_table_high; /* High 32bits hash table */
+ unsigned long fec_hash_table_low; /* Low 32bits hash table */
+ unsigned long fec_r_des_start; /* Receive descriptor ring */
+ unsigned long fec_x_des_start; /* Transmit descriptor ring */
+ unsigned long fec_r_buff_size; /* Maximum receive buff size */
+ unsigned long reserved8[9];
+ unsigned long fec_fifo_ram[112]; /* FIFO RAM buffer */
+} fec_t;
+
+#endif /* CONFIG_M5272 */
+
+
+/*
+ * Define the buffer descriptor structure.
+ */
+typedef struct bufdesc {
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned short cbd_datlen; /* Data length */
+ unsigned long cbd_bufaddr; /* Buffer address */
+} cbd_t;
+
+
+/*
+ * The following definitions courtesy of commproc.h, which where
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
+ */
+#define BD_SC_EMPTY ((ushort)0x8000) /* Recieve is empty */
+#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
+#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
+#define BD_SC_CM ((ushort)0x0200) /* Continous mode */
+#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
+#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
+#define BD_SC_BR ((ushort)0x0020) /* Break received */
+#define BD_SC_FR ((ushort)0x0010) /* Framing error */
+#define BD_SC_PR ((ushort)0x0008) /* Parity error */
+#define BD_SC_OV ((ushort)0x0002) /* Overrun */
+#define BD_SC_CD ((ushort)0x0001) /* ?? */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+*/
+#define BD_ENET_RX_EMPTY ((ushort)0x8000)
+#define BD_ENET_RX_WRAP ((ushort)0x2000)
+#define BD_ENET_RX_INTR ((ushort)0x1000)
+#define BD_ENET_RX_LAST ((ushort)0x0800)
+#define BD_ENET_RX_FIRST ((ushort)0x0400)
+#define BD_ENET_RX_MISS ((ushort)0x0100)
+#define BD_ENET_RX_LG ((ushort)0x0020)
+#define BD_ENET_RX_NO ((ushort)0x0010)
+#define BD_ENET_RX_SH ((ushort)0x0008)
+#define BD_ENET_RX_CR ((ushort)0x0004)
+#define BD_ENET_RX_OV ((ushort)0x0002)
+#define BD_ENET_RX_CL ((ushort)0x0001)
+#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+*/
+#define BD_ENET_TX_READY ((ushort)0x8000)
+#define BD_ENET_TX_PAD ((ushort)0x4000)
+#define BD_ENET_TX_WRAP ((ushort)0x2000)
+#define BD_ENET_TX_INTR ((ushort)0x1000)
+#define BD_ENET_TX_LAST ((ushort)0x0800)
+#define BD_ENET_TX_TC ((ushort)0x0400)
+#define BD_ENET_TX_DEF ((ushort)0x0200)
+#define BD_ENET_TX_HB ((ushort)0x0100)
+#define BD_ENET_TX_LC ((ushort)0x0080)
+#define BD_ENET_TX_RL ((ushort)0x0040)
+#define BD_ENET_TX_RCMASK ((ushort)0x003c)
+#define BD_ENET_TX_UN ((ushort)0x0002)
+#define BD_ENET_TX_CSL ((ushort)0x0001)
+#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
+
+
+/****************************************************************************/
+#endif /* FEC_H */
diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig
new file mode 100644
index 000000000000..db36ac3ea453
--- /dev/null
+++ b/drivers/net/fec_8xx/Kconfig
@@ -0,0 +1,14 @@
+config FEC_8XX
+ tristate "Motorola 8xx FEC driver"
+ depends on NET_ETHERNET && 8xx && (NETTA || NETPHONE)
+ select MII
+
+config FEC_8XX_GENERIC_PHY
+ bool "Support any generic PHY"
+ depends on FEC_8XX
+ default y
+
+config FEC_8XX_DM9161_PHY
+ bool "Support DM9161 PHY"
+ depends on FEC_8XX
+ default n
diff --git a/drivers/net/fec_8xx/Makefile b/drivers/net/fec_8xx/Makefile
new file mode 100644
index 000000000000..70c54f8c48e5
--- /dev/null
+++ b/drivers/net/fec_8xx/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Motorola 8xx FEC ethernet controller
+#
+
+obj-$(CONFIG_FEC_8XX) += fec_8xx.o
+
+fec_8xx-objs := fec_main.o fec_mii.o
+
+# the platform instantatiation objects
+ifeq ($(CONFIG_NETTA),y)
+fec_8xx-objs += fec_8xx-netta.o
+endif
diff --git a/drivers/net/fec_8xx/fec_8xx-netta.c b/drivers/net/fec_8xx/fec_8xx-netta.c
new file mode 100644
index 000000000000..29c275e1d566
--- /dev/null
+++ b/drivers/net/fec_8xx/fec_8xx-netta.c
@@ -0,0 +1,153 @@
+/*
+ * FEC instantatiation file for NETTA
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/commproc.h>
+
+#include "fec_8xx.h"
+
+/*************************************************/
+
+static struct fec_platform_info fec1_info = {
+ .fec_no = 0,
+ .use_mdio = 1,
+ .phy_addr = 8,
+ .fec_irq = SIU_LEVEL1,
+ .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC6,
+ .rx_ring = 128,
+ .tx_ring = 16,
+ .rx_copybreak = 240,
+ .use_napi = 1,
+ .napi_weight = 17,
+};
+
+static struct fec_platform_info fec2_info = {
+ .fec_no = 1,
+ .use_mdio = 1,
+ .phy_addr = 2,
+ .fec_irq = SIU_LEVEL3,
+ .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC7,
+ .rx_ring = 128,
+ .tx_ring = 16,
+ .rx_copybreak = 240,
+ .use_napi = 1,
+ .napi_weight = 17,
+};
+
+static struct net_device *fec1_dev;
+static struct net_device *fec2_dev;
+
+/* XXX custom u-boot & Linux startup needed */
+extern const char *__fw_getenv(const char *var);
+
+/* access ports */
+#define setbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) | (_v))
+#define clrbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) & ~(_v))
+
+#define setbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) | (_v))
+#define clrbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) & ~(_v))
+
+int fec_8xx_platform_init(void)
+{
+ immap_t *immap = (immap_t *)IMAP_ADDR;
+ bd_t *bd = (bd_t *) __res;
+ const char *s;
+ char *e;
+ int i;
+
+ /* use MDC for MII */
+ setbits16(immap->im_ioport.iop_pdpar, 0x0080);
+ clrbits16(immap->im_ioport.iop_pddir, 0x0080);
+
+ /* configure FEC1 pins */
+ setbits16(immap->im_ioport.iop_papar, 0xe810);
+ setbits16(immap->im_ioport.iop_padir, 0x0810);
+ clrbits16(immap->im_ioport.iop_padir, 0xe000);
+
+ setbits32(immap->im_cpm.cp_pbpar, 0x00000001);
+ clrbits32(immap->im_cpm.cp_pbdir, 0x00000001);
+
+ setbits32(immap->im_cpm.cp_cptr, 0x00000100);
+ clrbits32(immap->im_cpm.cp_cptr, 0x00000050);
+
+ clrbits16(immap->im_ioport.iop_pcpar, 0x0200);
+ clrbits16(immap->im_ioport.iop_pcdir, 0x0200);
+ clrbits16(immap->im_ioport.iop_pcso, 0x0200);
+ setbits16(immap->im_ioport.iop_pcint, 0x0200);
+
+ /* configure FEC2 pins */
+ setbits32(immap->im_cpm.cp_pepar, 0x00039620);
+ setbits32(immap->im_cpm.cp_pedir, 0x00039620);
+ setbits32(immap->im_cpm.cp_peso, 0x00031000);
+ clrbits32(immap->im_cpm.cp_peso, 0x00008620);
+
+ setbits32(immap->im_cpm.cp_cptr, 0x00000080);
+ clrbits32(immap->im_cpm.cp_cptr, 0x00000028);
+
+ clrbits16(immap->im_ioport.iop_pcpar, 0x0200);
+ clrbits16(immap->im_ioport.iop_pcdir, 0x0200);
+ clrbits16(immap->im_ioport.iop_pcso, 0x0200);
+ setbits16(immap->im_ioport.iop_pcint, 0x0200);
+
+ /* fill up */
+ fec1_info.sys_clk = bd->bi_intfreq;
+ fec2_info.sys_clk = bd->bi_intfreq;
+
+ s = __fw_getenv("ethaddr");
+ if (s != NULL) {
+ for (i = 0; i < 6; i++) {
+ fec1_info.macaddr[i] = simple_strtoul(s, &e, 16);
+ if (*e)
+ s = e + 1;
+ }
+ }
+
+ s = __fw_getenv("eth1addr");
+ if (s != NULL) {
+ for (i = 0; i < 6; i++) {
+ fec2_info.macaddr[i] = simple_strtoul(s, &e, 16);
+ if (*e)
+ s = e + 1;
+ }
+ }
+
+ fec_8xx_init_one(&fec1_info, &fec1_dev);
+ fec_8xx_init_one(&fec2_info, &fec2_dev);
+
+ return fec1_dev != NULL && fec2_dev != NULL ? 0 : -1;
+}
+
+void fec_8xx_platform_cleanup(void)
+{
+ if (fec2_dev != NULL)
+ fec_8xx_cleanup_one(fec2_dev);
+
+ if (fec1_dev != NULL)
+ fec_8xx_cleanup_one(fec1_dev);
+}
diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h
new file mode 100644
index 000000000000..5af60b0f9208
--- /dev/null
+++ b/drivers/net/fec_8xx/fec_8xx.h
@@ -0,0 +1,218 @@
+#ifndef FEC_8XX_H
+#define FEC_8XX_H
+
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+
+#include <linux/types.h>
+
+/* HW info */
+
+/* CRC polynomium used by the FEC for the multicast group filtering */
+#define FEC_CRC_POLY 0x04C11DB7
+
+#define MII_ADVERTISE_HALF (ADVERTISE_100HALF | \
+ ADVERTISE_10HALF | ADVERTISE_CSMA)
+#define MII_ADVERTISE_ALL (ADVERTISE_100FULL | \
+ ADVERTISE_10FULL | MII_ADVERTISE_HALF)
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
+#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
+#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
+#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
+#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
+#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
+#define FEC_ENET_RXF 0x02000000U /* Full frame received */
+#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
+#define FEC_ENET_MII 0x00800000U /* MII interrupt */
+#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
+
+#define FEC_ECNTRL_PINMUX 0x00000004
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+#define FEC_ECNTRL_RESET 0x00000001
+
+#define FEC_RCNTRL_BC_REJ 0x00000010
+#define FEC_RCNTRL_PROM 0x00000008
+#define FEC_RCNTRL_MII_MODE 0x00000004
+#define FEC_RCNTRL_DRT 0x00000002
+#define FEC_RCNTRL_LOOP 0x00000001
+
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_GTS 0x00000001
+
+/* values for MII phy_status */
+
+#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
+#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
+#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
+#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
+#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
+#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
+#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
+#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
+
+typedef struct phy_info {
+ unsigned int id;
+ const char *name;
+ void (*startup) (struct net_device * dev);
+ void (*shutdown) (struct net_device * dev);
+ void (*ack_int) (struct net_device * dev);
+} phy_info_t;
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
+#define MIN_MTU 46 /* this is data size */
+#define CRC_LEN 4
+
+#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
+#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
+
+/* Must be a multiple of 4 */
+#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE+3) & ~3)
+/* This is needed so that invalidate_xxx wont invalidate too much */
+#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE)
+
+/* platform interface */
+
+struct fec_platform_info {
+ int fec_no; /* FEC index */
+ int use_mdio; /* use external MII */
+ int phy_addr; /* the phy address */
+ int fec_irq, phy_irq; /* the irq for the controller */
+ int rx_ring, tx_ring; /* number of buffers on rx */
+ int sys_clk; /* system clock */
+ __u8 macaddr[6]; /* mac address */
+ int rx_copybreak; /* limit we copy small frames */
+ int use_napi; /* use NAPI */
+ int napi_weight; /* NAPI weight */
+};
+
+/* forward declaration */
+struct fec;
+
+struct fec_enet_private {
+ spinlock_t lock; /* during all ops except TX pckt processing */
+ spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */
+ int fecno;
+ struct fec *fecp;
+ const struct fec_platform_info *fpi;
+ int rx_ring, tx_ring;
+ dma_addr_t ring_mem_addr;
+ void *ring_base;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
+ cbd_t *tx_bd_base;
+ cbd_t *dirty_tx; /* ring entries to be free()ed. */
+ cbd_t *cur_rx;
+ cbd_t *cur_tx;
+ int tx_free;
+ struct net_device_stats stats;
+ struct timer_list phy_timer_list;
+ const struct phy_info *phy;
+ unsigned int fec_phy_speed;
+ __u32 msg_enable;
+ struct mii_if_info mii_if;
+};
+
+/***************************************************************************/
+
+void fec_restart(struct net_device *dev, int duplex, int speed);
+void fec_stop(struct net_device *dev);
+
+/***************************************************************************/
+
+int fec_mii_read(struct net_device *dev, int phy_id, int location);
+void fec_mii_write(struct net_device *dev, int phy_id, int location, int value);
+
+int fec_mii_phy_id_detect(struct net_device *dev);
+void fec_mii_startup(struct net_device *dev);
+void fec_mii_shutdown(struct net_device *dev);
+void fec_mii_ack_int(struct net_device *dev);
+
+void fec_mii_link_status_change_check(struct net_device *dev, int init_media);
+
+/***************************************************************************/
+
+#define FEC1_NO 0x00
+#define FEC2_NO 0x01
+#define FEC3_NO 0x02
+
+int fec_8xx_init_one(const struct fec_platform_info *fpi,
+ struct net_device **devp);
+int fec_8xx_cleanup_one(struct net_device *dev);
+
+/***************************************************************************/
+
+#define DRV_MODULE_NAME "fec_8xx"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "0.1"
+#define DRV_MODULE_RELDATE "May 6, 2004"
+
+/***************************************************************************/
+
+int fec_8xx_platform_init(void);
+void fec_8xx_platform_cleanup(void);
+
+/***************************************************************************/
+
+/* FEC access macros */
+#if defined(CONFIG_8xx)
+/* for a 8xx __raw_xxx's are sufficient */
+#define __fec_out32(addr, x) __raw_writel(x, addr)
+#define __fec_out16(addr, x) __raw_writew(x, addr)
+#define __fec_in32(addr) __raw_readl(addr)
+#define __fec_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __fec_out32(addr, x) out_be32(addr, x)
+#define __fec_out16(addr, x) out_be16(addr, x)
+#define __fec_in32(addr) in_be32(addr)
+#define __fec_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define FW(_fecp, _reg, _v) __fec_out32(&(_fecp)->fec_ ## _reg, (_v))
+
+/* read */
+#define FR(_fecp, _reg) __fec_in32(&(_fecp)->fec_ ## _reg)
+
+/* set bits */
+#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
+
+/* clear bits */
+#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
+
+/* buffer descriptor access macros */
+
+/* write */
+#define CBDW_SC(_cbd, _sc) __fec_out16(&(_cbd)->cbd_sc, (_sc))
+#define CBDW_DATLEN(_cbd, _datlen) __fec_out16(&(_cbd)->cbd_datlen, (_datlen))
+#define CBDW_BUFADDR(_cbd, _bufaddr) __fec_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
+
+/* read */
+#define CBDR_SC(_cbd) __fec_in16(&(_cbd)->cbd_sc)
+#define CBDR_DATLEN(_cbd) __fec_in16(&(_cbd)->cbd_datlen)
+#define CBDR_BUFADDR(_cbd) __fec_in32(&(_cbd)->cbd_bufaddr)
+
+/* set bits */
+#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
+
+/* clear bits */
+#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
+
+/***************************************************************************/
+
+#endif
diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c
new file mode 100644
index 000000000000..b4f3a9f8a535
--- /dev/null
+++ b/drivers/net/fec_8xx/fec_main.c
@@ -0,0 +1,1275 @@
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
+ * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
+ *
+ * Released under the GPL
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/commproc.h>
+#include <asm/dma-mapping.h>
+
+#include "fec_8xx.h"
+
+/*************************************************/
+
+#define FEC_MAX_MULTICAST_ADDRS 64
+
+/*************************************************/
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
+
+MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
+MODULE_DESCRIPTION("Motorola 8xx FEC ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM(fec_8xx_debug, "i");
+MODULE_PARM_DESC(fec_8xx_debug,
+ "FEC 8xx bitmapped debugging message enable value");
+
+int fec_8xx_debug = -1; /* -1 == use FEC_8XX_DEF_MSG_ENABLE as value */
+
+/*************************************************/
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+
+/*****************************************************************************************/
+
+static void fec_whack_reset(fec_t * fecp)
+{
+ int i;
+
+ /*
+ * Whack a reset. We should wait for this.
+ */
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
+ for (i = 0;
+ (FR(fecp, ecntrl) & FEC_ECNTRL_RESET) != 0 && i < FEC_RESET_DELAY;
+ i++)
+ udelay(1);
+
+ if (i == FEC_RESET_DELAY)
+ printk(KERN_WARNING "FEC Reset timeout!\n");
+
+}
+
+/****************************************************************************/
+
+/*
+ * Transmitter timeout.
+ */
+#define TX_TIMEOUT (2*HZ)
+
+/****************************************************************************/
+
+/*
+ * Returns the CRC needed when filling in the hash table for
+ * multicast group filtering
+ * pAddr must point to a MAC address (6 bytes)
+ */
+static __u32 fec_mulicast_calc_crc(char *pAddr)
+{
+ u8 byte;
+ int byte_count;
+ int bit_count;
+ __u32 crc = 0xffffffff;
+ u8 msb;
+
+ for (byte_count = 0; byte_count < 6; byte_count++) {
+ byte = pAddr[byte_count];
+ for (bit_count = 0; bit_count < 8; bit_count++) {
+ msb = crc >> 31;
+ crc <<= 1;
+ if (msb ^ (byte & 0x1)) {
+ crc ^= FEC_CRC_POLY;
+ }
+ byte >>= 1;
+ }
+ }
+ return (crc);
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering. Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not. I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+static void fec_set_multicast_list(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fecp;
+ struct dev_mc_list *pmc;
+ __u32 crc;
+ int temp;
+ __u32 csrVal;
+ int hash_index;
+ __u32 hthi, htlo;
+ unsigned long flags;
+
+
+ if ((dev->flags & IFF_PROMISC) != 0) {
+
+ spin_lock_irqsave(&fep->lock, flags);
+ FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ /*
+ * Log any net taps.
+ */
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s: Promiscuous mode enabled.\n", dev->name);
+ return;
+
+ }
+
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
+ /*
+ * Catch all multicast addresses, set the filter to all 1's.
+ */
+ hthi = 0xffffffffU;
+ htlo = 0xffffffffU;
+ } else {
+ hthi = 0;
+ htlo = 0;
+
+ /*
+ * Now populate the hash table
+ */
+ for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) {
+ crc = fec_mulicast_calc_crc(pmc->dmi_addr);
+ temp = (crc & 0x3f) >> 1;
+ hash_index = ((temp & 0x01) << 4) |
+ ((temp & 0x02) << 2) |
+ ((temp & 0x04)) |
+ ((temp & 0x08) >> 2) |
+ ((temp & 0x10) >> 4);
+ csrVal = (1 << hash_index);
+ if (crc & 1)
+ hthi |= csrVal;
+ else
+ htlo |= csrVal;
+ }
+ }
+
+ spin_lock_irqsave(&fep->lock, flags);
+ FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
+ FW(fecp, hash_table_high, hthi);
+ FW(fecp, hash_table_low, htlo);
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static int fec_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *mac = addr;
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec *fecp = fep->fecp;
+ int i;
+ __u32 addrhi, addrlo;
+ unsigned long flags;
+
+ /* Get pointer to SCC area in parameter RAM. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = mac->sa_data[i];
+
+ /*
+ * Set station address.
+ */
+ addrhi = ((__u32) dev->dev_addr[0] << 24) |
+ ((__u32) dev->dev_addr[1] << 16) |
+ ((__u32) dev->dev_addr[2] << 8) |
+ (__u32) dev->dev_addr[3];
+ addrlo = ((__u32) dev->dev_addr[4] << 24) |
+ ((__u32) dev->dev_addr[5] << 16);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ FW(fecp, addr_low, addrhi);
+ FW(fecp, addr_high, addrlo);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ return 0;
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
+void fec_restart(struct net_device *dev, int duplex, int speed)
+{
+#ifdef CONFIG_DUET
+ immap_t *immap = (immap_t *) IMAP_ADDR;
+ __u32 cptr;
+#endif
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec *fecp = fep->fecp;
+ const struct fec_platform_info *fpi = fep->fpi;
+ cbd_t *bdp;
+ struct sk_buff *skb;
+ int i;
+ __u32 addrhi, addrlo;
+
+ fec_whack_reset(fep->fecp);
+
+ /*
+ * Set station address.
+ */
+ addrhi = ((__u32) dev->dev_addr[0] << 24) |
+ ((__u32) dev->dev_addr[1] << 16) |
+ ((__u32) dev->dev_addr[2] << 8) |
+ (__u32) dev->dev_addr[3];
+ addrlo = ((__u32) dev->dev_addr[4] << 24) |
+ ((__u32) dev->dev_addr[5] << 16);
+ FW(fecp, addr_low, addrhi);
+ FW(fecp, addr_high, addrlo);
+
+ /*
+ * Reset all multicast.
+ */
+ FW(fecp, hash_table_high, 0);
+ FW(fecp, hash_table_low, 0);
+
+ /*
+ * Set maximum receive buffer size.
+ */
+ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
+ FW(fecp, r_hash, PKT_MAXBUF_SIZE);
+
+ /*
+ * Set receive and transmit descriptor base.
+ */
+ FW(fecp, r_des_start, iopa((__u32) (fep->rx_bd_base)));
+ FW(fecp, x_des_start, iopa((__u32) (fep->tx_bd_base)));
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->tx_free = fep->tx_ring;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /*
+ * Reset SKB receive buffers
+ */
+ for (i = 0; i < fep->rx_ring; i++) {
+ if ((skb = fep->rx_skbuff[i]) == NULL)
+ continue;
+ fep->rx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Initialize the receive buffer descriptors.
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ skb = dev_alloc_skb(ENET_RX_FRSIZE);
+ if (skb == NULL) {
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s Memory squeeze, unable to allocate skb\n",
+ dev->name);
+ fep->stats.rx_dropped++;
+ break;
+ }
+ fep->rx_skbuff[i] = skb;
+ skb->dev = dev;
+ CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0); /* zero */
+ CBDW_SC(bdp, BD_ENET_RX_EMPTY |
+ ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
+ }
+ /*
+ * if we failed, fillup remainder
+ */
+ for (; i < fep->rx_ring; i++, bdp++) {
+ fep->rx_skbuff[i] = NULL;
+ CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+
+ /*
+ * Reset SKB transmit buffers.
+ */
+ for (i = 0; i < fep->tx_ring; i++) {
+ if ((skb = fep->tx_skbuff[i]) == NULL)
+ continue;
+ fep->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * ...and the same for transmit.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ fep->tx_skbuff[i] = NULL;
+ CBDW_BUFADDR(bdp, virt_to_bus(NULL));
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+
+ /*
+ * Enable big endian and don't care about SDMA FC.
+ */
+ FW(fecp, fun_code, 0x78000000);
+
+ /*
+ * Set MII speed.
+ */
+ FW(fecp, mii_speed, fep->fec_phy_speed);
+
+ /*
+ * Clear any outstanding interrupt.
+ */
+ FW(fecp, ievent, 0xffc0);
+ FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
+
+ /*
+ * adjust to speed (only for DUET & RMII)
+ */
+#ifdef CONFIG_DUET
+ cptr = in_be32(&immap->im_cpm.cp_cptr);
+ switch (fpi->fec_no) {
+ case 0:
+ /*
+ * check if in RMII mode
+ */
+ if ((cptr & 0x100) == 0)
+ break;
+
+ if (speed == 10)
+ cptr |= 0x0000010;
+ else if (speed == 100)
+ cptr &= ~0x0000010;
+ break;
+ case 1:
+ /*
+ * check if in RMII mode
+ */
+ if ((cptr & 0x80) == 0)
+ break;
+
+ if (speed == 10)
+ cptr |= 0x0000008;
+ else if (speed == 100)
+ cptr &= ~0x0000008;
+ break;
+ default:
+ break;
+ }
+ out_be32(&immap->im_cpm.cp_cptr, cptr);
+#endif
+
+ FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ /*
+ * adjust to duplex mode
+ */
+ if (duplex) {
+ FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
+ } else {
+ FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
+ }
+
+ /*
+ * Enable interrupts we wish to service.
+ */
+ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB);
+
+ /*
+ * And last, enable the transmit and receive processing.
+ */
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+void fec_stop(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fecp;
+ struct sk_buff *skb;
+ int i;
+
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
+ return; /* already down */
+
+ FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
+ for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
+ i < FEC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == FEC_RESET_DELAY)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s FEC timeout on graceful transmit stop\n",
+ dev->name);
+ /*
+ * Disable FEC. Let only MII interrupts.
+ */
+ FW(fecp, imask, 0);
+ FW(fecp, ecntrl, ~FEC_ECNTRL_ETHER_EN);
+
+ /*
+ * Reset SKB transmit buffers.
+ */
+ for (i = 0; i < fep->tx_ring; i++) {
+ if ((skb = fep->tx_skbuff[i]) == NULL)
+ continue;
+ fep->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Reset SKB receive buffers
+ */
+ for (i = 0; i < fep->rx_ring; i++) {
+ if ((skb = fep->rx_skbuff[i]) == NULL)
+ continue;
+ fep->rx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+}
+
+/* common receive function */
+static int fec_enet_rx_common(struct net_device *dev, int *budget)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fecp;
+ const struct fec_platform_info *fpi = fep->fpi;
+ cbd_t *bdp;
+ struct sk_buff *skb, *skbn, *skbt;
+ int received = 0;
+ __u16 pkt_len, sc;
+ int curidx;
+ int rx_work_limit;
+
+ if (fpi->use_napi) {
+ rx_work_limit = min(dev->quota, *budget);
+
+ if (!netif_running(dev))
+ return 0;
+ }
+
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ /* clear RX status bits for napi*/
+ if (fpi->use_napi)
+ FW(fecp, ievent, FEC_ENET_RXF | FEC_ENET_RXB);
+
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+
+ curidx = bdp - fep->rx_bd_base;
+
+ /*
+ * Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((sc & BD_ENET_RX_LAST) == 0)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s rcv is not +last\n",
+ dev->name);
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
+ BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ fep->stats.rx_errors++;
+ /* Frame too long or too short. */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+ fep->stats.rx_length_errors++;
+ /* Frame alignment */
+ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ fep->stats.rx_frame_errors++;
+ /* CRC Error */
+ if (sc & BD_ENET_RX_CR)
+ fep->stats.rx_crc_errors++;
+ /* FIFO overrun */
+ if (sc & BD_ENET_RX_OV)
+ fep->stats.rx_crc_errors++;
+
+ skbn = fep->rx_skbuff[curidx];
+ BUG_ON(skbn == NULL);
+
+ } else {
+
+ /* napi, got packet but no quota */
+ if (fpi->use_napi && --rx_work_limit < 0)
+ break;
+
+ skb = fep->rx_skbuff[curidx];
+ BUG_ON(skb == NULL);
+
+ /*
+ * Process the incoming frame.
+ */
+ fep->stats.rx_packets++;
+ pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
+ fep->stats.rx_bytes += pkt_len + 4;
+
+ if (pkt_len <= fpi->rx_copybreak) {
+ /* +2 to make IP header L1 cache aligned */
+ skbn = dev_alloc_skb(pkt_len + 2);
+ if (skbn != NULL) {
+ skb_reserve(skbn, 2); /* align IP header */
+ memcpy(skbn->data, skb->data, pkt_len);
+ /* swap */
+ skbt = skb;
+ skb = skbn;
+ skbn = skbt;
+ }
+ } else
+ skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+
+ if (skbn != NULL) {
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ skb->protocol = eth_type_trans(skb, dev);
+ received++;
+ if (!fpi->use_napi)
+ netif_rx(skb);
+ else
+ netif_receive_skb(skb);
+ } else {
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s Memory squeeze, dropping packet.\n",
+ dev->name);
+ fep->stats.rx_dropped++;
+ skbn = skb;
+ }
+ }
+
+ fep->rx_skbuff[curidx] = skbn;
+ CBDW_BUFADDR(bdp, dma_map_single(NULL, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
+
+ /*
+ * Update BD pointer to next entry.
+ */
+ if ((sc & BD_ENET_RX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->rx_bd_base;
+
+ /*
+ * Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ FW(fecp, r_des_active, 0x01000000);
+ }
+
+ fep->cur_rx = bdp;
+
+ if (fpi->use_napi) {
+ dev->quota -= received;
+ *budget -= received;
+
+ if (rx_work_limit < 0)
+ return 1; /* not done */
+
+ /* done */
+ netif_rx_complete(dev);
+
+ /* enable RX interrupt bits */
+ FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
+ }
+
+ return 0;
+}
+
+static void fec_enet_tx(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ cbd_t *bdp;
+ struct sk_buff *skb;
+ int dirtyidx, do_wake;
+ __u16 sc;
+
+ spin_lock(&fep->lock);
+ bdp = fep->dirty_tx;
+
+ do_wake = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
+
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+ fep->stats.tx_errors++;
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+ } else
+ fep->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s HEY! Enet xmit interrupt and TX_READY.\n",
+ dev->name);
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ dev_kfree_skb_irq(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (!fep->tx_free++)
+ do_wake = 1;
+ }
+
+ fep->dirty_tx = bdp;
+
+ spin_unlock(&fep->lock);
+
+ if (do_wake && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+}
+
+/*
+ * The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct fec_enet_private *fep;
+ const struct fec_platform_info *fpi;
+ fec_t *fecp;
+ __u32 int_events;
+ __u32 int_events_napi;
+
+ if (unlikely(dev == NULL))
+ return IRQ_NONE;
+
+ fep = netdev_priv(dev);
+ fecp = fep->fecp;
+ fpi = fep->fpi;
+
+ /*
+ * Get the interrupt events that caused us to be here.
+ */
+ while ((int_events = FR(fecp, ievent) & FR(fecp, imask)) != 0) {
+
+ if (!fpi->use_napi)
+ FW(fecp, ievent, int_events);
+ else {
+ int_events_napi = int_events & ~(FEC_ENET_RXF | FEC_ENET_RXB);
+ FW(fecp, ievent, int_events_napi);
+ }
+
+ if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR |
+ FEC_ENET_BABT | FEC_ENET_EBERR)) != 0)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s FEC ERROR(s) 0x%x\n",
+ dev->name, int_events);
+
+ if ((int_events & FEC_ENET_RXF) != 0) {
+ if (!fpi->use_napi)
+ fec_enet_rx_common(dev, NULL);
+ else {
+ if (netif_rx_schedule_prep(dev)) {
+ /* disable rx interrupts */
+ FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
+ __netif_rx_schedule(dev);
+ } else {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s driver bug! interrupt while in poll!\n",
+ dev->name);
+ FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
+ }
+ }
+ }
+
+ if ((int_events & FEC_ENET_TXF) != 0)
+ fec_enet_tx(dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* This interrupt occurs when the PHY detects a link change. */
+static irqreturn_t
+fec_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct fec_enet_private *fep;
+ const struct fec_platform_info *fpi;
+
+ if (unlikely(dev == NULL))
+ return IRQ_NONE;
+
+ fep = netdev_priv(dev);
+ fpi = fep->fpi;
+
+ if (!fpi->use_mdio)
+ return IRQ_NONE;
+
+ /*
+ * Acknowledge the interrupt if possible. If we have not
+ * found the PHY yet we can't process or acknowledge the
+ * interrupt now. Instead we ignore this interrupt for now,
+ * which we can do since it is edge triggered. It will be
+ * acknowledged later by fec_enet_open().
+ */
+ if (!fep->phy)
+ return IRQ_NONE;
+
+ fec_mii_ack_int(dev);
+ fec_mii_link_status_change_check(dev, 0);
+
+ return IRQ_HANDLED;
+}
+
+
+/**********************************************************************************/
+
+static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fecp;
+ cbd_t *bdp;
+ int curidx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tx_lock, flags);
+
+ /*
+ * Fill in a Tx ring entry
+ */
+ bdp = fep->cur_tx;
+
+ if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&fep->tx_lock, flags);
+
+ /*
+ * Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since the tx queue should be stopped.
+ */
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s tx queue full!.\n", dev->name);
+ return 1;
+ }
+
+ curidx = bdp - fep->tx_bd_base;
+ /*
+ * Clear all of the status flags.
+ */
+ CBDC_SC(bdp, BD_ENET_TX_STATS);
+
+ /*
+ * Save skb pointer.
+ */
+ fep->tx_skbuff[curidx] = skb;
+
+ fep->stats.tx_bytes += skb->len;
+
+ /*
+ * Push the data cache so the CPM does not get stale memory data.
+ */
+ CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, skb->len);
+
+ dev->trans_start = jiffies;
+
+ /*
+ * If this was the last BD in the ring, start at the beginning again.
+ */
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ fep->cur_tx++;
+ else
+ fep->cur_tx = fep->tx_bd_base;
+
+ if (!--fep->tx_free)
+ netif_stop_queue(dev);
+
+ /*
+ * Trigger transmission start
+ */
+ CBDS_SC(bdp, BD_ENET_TX_READY | BD_ENET_TX_INTR |
+ BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ FW(fecp, x_des_active, 0x01000000);
+
+ spin_unlock_irqrestore(&fep->tx_lock, flags);
+
+ return 0;
+}
+
+static void fec_timeout(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ fep->stats.tx_errors++;
+
+ if (fep->tx_free)
+ netif_wake_queue(dev);
+
+ /* check link status again */
+ fec_mii_link_status_change_check(dev, 0);
+}
+
+static int fec_enet_open(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ const struct fec_platform_info *fpi = fep->fpi;
+ unsigned long flags;
+
+ /* Install our interrupt handler. */
+ if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s Could not allocate FEC IRQ!", dev->name);
+ return -EINVAL;
+ }
+
+ /* Install our phy interrupt handler */
+ if (fpi->phy_irq != -1 &&
+ request_irq(fpi->phy_irq, fec_mii_link_interrupt, 0, "fec-phy",
+ dev) != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s Could not allocate PHY IRQ!", dev->name);
+ free_irq(fpi->fec_irq, dev);
+ return -EINVAL;
+ }
+
+ if (fpi->use_mdio) {
+ fec_mii_startup(dev);
+ netif_carrier_off(dev);
+ fec_mii_link_status_change_check(dev, 1);
+ } else {
+ spin_lock_irqsave(&fep->lock, flags);
+ fec_restart(dev, 1, 100); /* XXX this sucks */
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+ }
+ return 0;
+}
+
+static int fec_enet_close(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ const struct fec_platform_info *fpi = fep->fpi;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ if (fpi->use_mdio)
+ fec_mii_shutdown(dev);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ fec_stop(dev);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ /* release any irqs */
+ if (fpi->phy_irq != -1)
+ free_irq(fpi->phy_irq, dev);
+ free_irq(fpi->fec_irq, dev);
+
+ return 0;
+}
+
+static struct net_device_stats *fec_enet_get_stats(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ return &fep->stats;
+}
+
+static int fec_enet_poll(struct net_device *dev, int *budget)
+{
+ return fec_enet_rx_common(dev, budget);
+}
+
+/*************************************************************************/
+
+static void fec_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static int fec_get_regs_len(struct net_device *dev)
+{
+ return sizeof(fec_t);
+}
+
+static void fec_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ if (regs->len < sizeof(fec_t))
+ return;
+
+ regs->version = 0;
+ spin_lock_irqsave(&fep->lock, flags);
+ memcpy_fromio(p, fep->fecp, sizeof(fec_t));
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static int fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ rc = mii_ethtool_gset(&fep->mii_if, cmd);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ return rc;
+}
+
+static int fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ rc = mii_ethtool_sset(&fep->mii_if, cmd);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ return rc;
+}
+
+static int fec_nway_reset(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ return mii_nway_restart(&fep->mii_if);
+}
+
+static __u32 fec_get_msglevel(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ return fep->msg_enable;
+}
+
+static void fec_set_msglevel(struct net_device *dev, __u32 value)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fep->msg_enable = value;
+}
+
+static struct ethtool_ops fec_ethtool_ops = {
+ .get_drvinfo = fec_get_drvinfo,
+ .get_regs_len = fec_get_regs_len,
+ .get_settings = fec_get_settings,
+ .set_settings = fec_set_settings,
+ .nway_reset = fec_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = fec_get_msglevel,
+ .set_msglevel = fec_set_msglevel,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_regs = fec_get_regs,
+};
+
+static int fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
+ unsigned long flags;
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
+ spin_unlock_irqrestore(&fep->lock, flags);
+ return rc;
+}
+
+int fec_8xx_init_one(const struct fec_platform_info *fpi,
+ struct net_device **devp)
+{
+ immap_t *immap = (immap_t *) IMAP_ADDR;
+ static int fec_8xx_version_printed = 0;
+ struct net_device *dev = NULL;
+ struct fec_enet_private *fep = NULL;
+ fec_t *fecp = NULL;
+ int i;
+ int err = 0;
+ int registered = 0;
+ __u32 siel;
+
+ *devp = NULL;
+
+ switch (fpi->fec_no) {
+ case 0:
+ fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
+ break;
+#ifdef CONFIG_DUET
+ case 1:
+ fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec2;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ if (fec_8xx_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ i = sizeof(*fep) + (sizeof(struct sk_buff **) *
+ (fpi->rx_ring + fpi->tx_ring));
+
+ dev = alloc_etherdev(i);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err;
+ }
+ SET_MODULE_OWNER(dev);
+
+ fep = netdev_priv(dev);
+
+ /* partial reset of FEC */
+ fec_whack_reset(fecp);
+
+ /* point rx_skbuff, tx_skbuff */
+ fep->rx_skbuff = (struct sk_buff **)&fep[1];
+ fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+
+ fep->fecp = fecp;
+ fep->fpi = fpi;
+
+ /* init locks */
+ spin_lock_init(&fep->lock);
+ spin_lock_init(&fep->tx_lock);
+
+ /*
+ * Set the Ethernet address.
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = fpi->macaddr[i];
+
+ fep->ring_base = dma_alloc_coherent(NULL,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s dma alloc failed.\n", dev->name);
+ err = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Set receive and transmit descriptor base.
+ */
+ fep->rx_bd_base = fep->ring_base;
+ fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
+
+ /* initialize ring size variables */
+ fep->tx_ring = fpi->tx_ring;
+ fep->rx_ring = fpi->rx_ring;
+
+ /* SIU interrupt */
+ if (fpi->phy_irq != -1 &&
+ (fpi->phy_irq >= SIU_IRQ0 && fpi->phy_irq < SIU_LEVEL7)) {
+
+ siel = in_be32(&immap->im_siu_conf.sc_siel);
+ if ((fpi->phy_irq & 1) == 0)
+ siel |= (0x80000000 >> fpi->phy_irq);
+ else
+ siel &= ~(0x80000000 >> (fpi->phy_irq & ~1));
+ out_be32(&immap->im_siu_conf.sc_siel, siel);
+ }
+
+ /*
+ * The FEC Ethernet specific entries in the device structure.
+ */
+ dev->open = fec_enet_open;
+ dev->hard_start_xmit = fec_enet_start_xmit;
+ dev->tx_timeout = fec_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->stop = fec_enet_close;
+ dev->get_stats = fec_enet_get_stats;
+ dev->set_multicast_list = fec_set_multicast_list;
+ dev->set_mac_address = fec_set_mac_address;
+ if (fpi->use_napi) {
+ dev->poll = fec_enet_poll;
+ dev->weight = fpi->napi_weight;
+ }
+ dev->ethtool_ops = &fec_ethtool_ops;
+ dev->do_ioctl = fec_ioctl;
+
+ fep->fec_phy_speed =
+ ((((fpi->sys_clk + 4999999) / 2500000) / 2) & 0x3F) << 1;
+
+ init_timer(&fep->phy_timer_list);
+
+ /* partial reset of FEC so that only MII works */
+ FW(fecp, mii_speed, fep->fec_phy_speed);
+ FW(fecp, ievent, 0xffc0);
+ FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
+ FW(fecp, imask, 0);
+ FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+ if (err != 0)
+ goto err;
+ registered = 1;
+
+ if (fpi->use_mdio) {
+ fep->mii_if.dev = dev;
+ fep->mii_if.mdio_read = fec_mii_read;
+ fep->mii_if.mdio_write = fec_mii_write;
+ fep->mii_if.phy_id_mask = 0x1f;
+ fep->mii_if.reg_num_mask = 0x1f;
+ fep->mii_if.phy_id = fec_mii_phy_id_detect(dev);
+ }
+
+ *devp = dev;
+
+ return 0;
+
+ err:
+ if (dev != NULL) {
+ if (fecp != NULL)
+ fec_whack_reset(fecp);
+
+ if (registered)
+ unregister_netdev(dev);
+
+ if (fep != NULL) {
+ if (fep->ring_base)
+ dma_free_coherent(NULL,
+ (fpi->tx_ring +
+ fpi->rx_ring) *
+ sizeof(cbd_t), fep->ring_base,
+ fep->ring_mem_addr);
+ }
+ free_netdev(dev);
+ }
+ return err;
+}
+
+int fec_8xx_cleanup_one(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fecp;
+ const struct fec_platform_info *fpi = fep->fpi;
+
+ fec_whack_reset(fecp);
+
+ unregister_netdev(dev);
+
+ dma_free_coherent(NULL, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
+ fep->ring_base, fep->ring_mem_addr);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+/**************************************************************************************/
+/**************************************************************************************/
+/**************************************************************************************/
+
+static int __init fec_8xx_init(void)
+{
+ return fec_8xx_platform_init();
+}
+
+static void __exit fec_8xx_cleanup(void)
+{
+ fec_8xx_platform_cleanup();
+}
+
+/**************************************************************************************/
+/**************************************************************************************/
+/**************************************************************************************/
+
+module_init(fec_8xx_init);
+module_exit(fec_8xx_cleanup);
diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c
new file mode 100644
index 000000000000..803eb095cf8e
--- /dev/null
+++ b/drivers/net/fec_8xx/fec_mii.c
@@ -0,0 +1,380 @@
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
+ * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
+ *
+ * Released under the GPL
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/commproc.h>
+
+/*************************************************/
+
+#include "fec_8xx.h"
+
+/*************************************************/
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+/*************************************************/
+
+/* XXX both FECs use the MII interface of FEC1 */
+static DEFINE_SPINLOCK(fec_mii_lock);
+
+#define FEC_MII_LOOPS 10000
+
+int fec_mii_read(struct net_device *dev, int phy_id, int location)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp;
+ int i, ret = -1;
+ unsigned long flags;
+
+ /* XXX MII interface is only connected to FEC1 */
+ fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
+
+ spin_lock_irqsave(&fec_mii_lock, flags);
+
+ if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) {
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, ievent, FEC_ENET_MII);
+ }
+
+ /* Add PHY address to register command. */
+ FW(fecp, mii_speed, fep->fec_phy_speed);
+ FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS) {
+ FW(fecp, ievent, FEC_ENET_MII);
+ ret = FR(fecp, mii_data) & 0xffff;
+ }
+
+ spin_unlock_irqrestore(&fec_mii_lock, flags);
+
+ return ret;
+}
+
+void fec_mii_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp;
+ unsigned long flags;
+ int i;
+
+ /* XXX MII interface is only connected to FEC1 */
+ fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
+
+ spin_lock_irqsave(&fec_mii_lock, flags);
+
+ if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) {
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, ievent, FEC_ENET_MII);
+ }
+
+ /* Add PHY address to register command. */
+ FW(fecp, mii_speed, fep->fec_phy_speed); /* always adapt mii speed */
+ FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS)
+ FW(fecp, ievent, FEC_ENET_MII);
+
+ spin_unlock_irqrestore(&fec_mii_lock, flags);
+}
+
+/*************************************************/
+
+#ifdef CONFIG_FEC_8XX_GENERIC_PHY
+
+/*
+ * Generic PHY support.
+ * Should work for all PHYs, but link change is detected by polling
+ */
+
+static void generic_timer_callback(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ fep->phy_timer_list.expires = jiffies + HZ / 2;
+
+ add_timer(&fep->phy_timer_list);
+
+ fec_mii_link_status_change_check(dev, 0);
+}
+
+static void generic_startup(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
+ fep->phy_timer_list.data = (unsigned long)dev;
+ fep->phy_timer_list.function = generic_timer_callback;
+ add_timer(&fep->phy_timer_list);
+}
+
+static void generic_shutdown(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ del_timer_sync(&fep->phy_timer_list);
+}
+
+#endif
+
+#ifdef CONFIG_FEC_8XX_DM9161_PHY
+
+/* ------------------------------------------------------------------------- */
+/* The Davicom DM9161 is used on the NETTA board */
+
+/* register definitions */
+
+#define MII_DM9161_ACR 16 /* Aux. Config Register */
+#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
+#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
+#define MII_DM9161_INTR 21 /* Interrupt Register */
+#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
+#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
+
+static void dm9161_startup(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
+}
+
+static void dm9161_ack_int(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ fec_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
+}
+
+static void dm9161_shutdown(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
+}
+
+#endif
+
+/**********************************************************************************/
+
+static const struct phy_info phy_info[] = {
+#ifdef CONFIG_FEC_8XX_DM9161_PHY
+ {
+ .id = 0x00181b88,
+ .name = "DM9161",
+ .startup = dm9161_startup,
+ .ack_int = dm9161_ack_int,
+ .shutdown = dm9161_shutdown,
+ },
+#endif
+#ifdef CONFIG_FEC_8XX_GENERIC_PHY
+ {
+ .id = 0,
+ .name = "GENERIC",
+ .startup = generic_startup,
+ .shutdown = generic_shutdown,
+ },
+#endif
+};
+
+/**********************************************************************************/
+
+int fec_mii_phy_id_detect(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ const struct fec_platform_info *fpi = fep->fpi;
+ int i, r, start, end, phytype, physubtype;
+ const struct phy_info *phy;
+ int phy_hwid, phy_id;
+
+ /* if no MDIO */
+ if (fpi->use_mdio == 0)
+ return -1;
+
+ phy_hwid = -1;
+ fep->phy = NULL;
+
+ /* auto-detect? */
+ if (fpi->phy_addr == -1) {
+ start = 0;
+ end = 32;
+ } else { /* direct */
+ start = fpi->phy_addr;
+ end = start + 1;
+ }
+
+ for (phy_id = start; phy_id < end; phy_id++) {
+ r = fec_mii_read(dev, phy_id, MII_PHYSID1);
+ if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
+ continue;
+ r = fec_mii_read(dev, phy_id, MII_PHYSID2);
+ if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
+ continue;
+ phy_hwid = (phytype << 16) | physubtype;
+ if (phy_hwid != -1)
+ break;
+ }
+
+ if (phy_hwid == -1) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s No PHY detected!\n", dev->name);
+ return -1;
+ }
+
+ for (i = 0, phy = phy_info; i < sizeof(phy_info) / sizeof(phy_info[0]);
+ i++, phy++)
+ if (phy->id == (phy_hwid >> 4) || phy->id == 0)
+ break;
+
+ if (i >= sizeof(phy_info) / sizeof(phy_info[0])) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s PHY id 0x%08x is not supported!\n",
+ dev->name, phy_hwid);
+ return -1;
+ }
+
+ fep->phy = phy;
+
+ printk(KERN_INFO DRV_MODULE_NAME
+ ": %s Phy @ 0x%x, type %s (0x%08x)\n",
+ dev->name, phy_id, fep->phy->name, phy_hwid);
+
+ return phy_id;
+}
+
+void fec_mii_startup(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ const struct fec_platform_info *fpi = fep->fpi;
+
+ if (!fpi->use_mdio || fep->phy == NULL)
+ return;
+
+ if (fep->phy->startup == NULL)
+ return;
+
+ (*fep->phy->startup) (dev);
+}
+
+void fec_mii_shutdown(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ const struct fec_platform_info *fpi = fep->fpi;
+
+ if (!fpi->use_mdio || fep->phy == NULL)
+ return;
+
+ if (fep->phy->shutdown == NULL)
+ return;
+
+ (*fep->phy->shutdown) (dev);
+}
+
+void fec_mii_ack_int(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ const struct fec_platform_info *fpi = fep->fpi;
+
+ if (!fpi->use_mdio || fep->phy == NULL)
+ return;
+
+ if (fep->phy->ack_int == NULL)
+ return;
+
+ (*fep->phy->ack_int) (dev);
+}
+
+/* helper function */
+static int mii_negotiated(struct mii_if_info *mii)
+{
+ int advert, lpa, val;
+
+ if (!mii_link_ok(mii))
+ return 0;
+
+ val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
+ if ((val & BMSR_ANEGCOMPLETE) == 0)
+ return 0;
+
+ advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
+ lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
+
+ return mii_nway_result(advert & lpa);
+}
+
+void fec_mii_link_status_change_check(struct net_device *dev, int init_media)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned int media;
+ unsigned long flags;
+
+ if (mii_check_media(&fep->mii_if, netif_msg_link(fep), init_media) == 0)
+ return;
+
+ media = mii_negotiated(&fep->mii_if);
+
+ if (netif_carrier_ok(dev)) {
+ spin_lock_irqsave(&fep->lock, flags);
+ fec_restart(dev, !!(media & ADVERTISE_FULL),
+ (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) ?
+ 100 : 10);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ netif_start_queue(dev);
+ } else {
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ fec_stop(dev);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ }
+}
diff --git a/drivers/net/fmv18x.c b/drivers/net/fmv18x.c
new file mode 100644
index 000000000000..04c748523471
--- /dev/null
+++ b/drivers/net/fmv18x.c
@@ -0,0 +1,689 @@
+/* fmv18x.c: A network device driver for the Fujitsu FMV-181/182/183/184.
+
+ Original: at1700.c (1993-94 by Donald Becker).
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Modified by Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)
+ Copyright 1994 Fujitsu Laboratories Ltd.
+ Special thanks to:
+ Masayoshi UTAKA (utaka@ace.yk.fujitsu.co.jp)
+ for testing this driver.
+ H. NEGISHI (agy, negishi@sun45.psd.cs.fujitsu.co.jp)
+ for suggestion of some program modification.
+ Masahiro SEKIGUCHI <seki@sysrap.cs.fujitsu.co.jp>
+ for suggestion of some program modification.
+ Kazutoshi MORIOKA (morioka@aurora.oaks.cs.fujitsu.co.jp)
+ for testing this driver.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This is a device driver for the Fujitsu FMV-181/182/183/184, which
+ is a straight-forward Fujitsu MB86965 implementation.
+
+ Sources:
+ at1700.c
+ The Fujitsu MB86965 datasheet.
+ The Fujitsu FMV-181/182 user's guide
+*/
+
+static const char version[] =
+ "fmv18x.c:v2.2.0 09/24/98 Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define DRV_NAME "fmv18x"
+
+static unsigned fmv18x_probe_list[] __initdata = {
+ 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
+};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ long open_time; /* Useless example local info. */
+ uint tx_started:1; /* Number of packet on the Tx queue. */
+ uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
+ uint rx_started:1; /* Packets are Rxing. */
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+ spinlock_t lock;
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define COL16CNTL 11 /* Controll Reg for 16 collisions */
+#define MODE13 13
+/* Fujitsu FMV-18x Card Configuration */
+#define FJ_STATUS0 0x10
+#define FJ_STATUS1 0x11
+#define FJ_CONFIG0 0x12
+#define FJ_CONFIG1 0x13
+#define FJ_MACADDR 0x14 /* 0x14 - 0x19 */
+#define FJ_BUFCNTL 0x1A
+#define FJ_BUFDATA 0x1C
+#define FMV18X_IO_EXTENT 32
+
+/* Index to functions, as function prototypes. */
+
+static int fmv18x_probe1(struct net_device *dev, short ioaddr);
+static int net_open(struct net_device *dev);
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct net_device *dev);
+static void net_timeout(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+
+static int io = 0x220;
+static int irq;
+
+struct net_device * __init fmv18x_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = fmv18x_probe1(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = fmv18x_probe_list; *port; port++)
+ if (fmv18x_probe1(dev, *port) == 0)
+ break;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, FMV18X_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into MAC
+ address check. */
+
+static int __init fmv18x_probe1(struct net_device *dev, short ioaddr)
+{
+ char irqmap[4] = {3, 7, 10, 15};
+ char irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
+ unsigned int i, retval;
+ struct net_local *lp;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe for.
+ */
+
+ if (!request_region(ioaddr, FMV18X_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ dev->irq = irq;
+ dev->base_addr = ioaddr;
+
+ /* Check I/O address configuration and Fujitsu vendor code */
+ if (inb(ioaddr+FJ_MACADDR ) != 0x00
+ || inb(ioaddr+FJ_MACADDR+1) != 0x00
+ || inb(ioaddr+FJ_MACADDR+2) != 0x0e) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Check PnP mode for FMV-183/184/183A/184A. */
+ /* This PnP routine is very poor. IO and IRQ should be known. */
+ if (inb(ioaddr + FJ_STATUS1) & 0x20) {
+ for (i = 0; i < 8; i++) {
+ if (dev->irq == irqmap_pnp[i])
+ break;
+ }
+ if (i == 8) {
+ retval = -ENODEV;
+ goto out;
+ }
+ } else {
+ if (fmv18x_probe_list[inb(ioaddr + FJ_CONFIG0) & 0x07] != ioaddr)
+ return -ENODEV;
+ dev->irq = irqmap[(inb(ioaddr + FJ_CONFIG0)>>6) & 0x03];
+ }
+
+ /* Snarf the interrupt vector now. */
+ retval = request_irq(dev->irq, &net_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk ("FMV-18x found at %#3x, but it's unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, dev->irq);
+ goto out;
+ }
+
+ printk("%s: FMV-18x found at %#3x, IRQ %d, address ", dev->name,
+ ioaddr, dev->irq);
+
+ for(i = 0; i < 6; i++) {
+ unsigned char val = inb(ioaddr + FJ_MACADDR + i);
+ printk("%02x", val);
+ dev->dev_addr[i] = val;
+ }
+
+ /* "FJ_STATUS0" 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2/5"};
+ ushort setup_value = inb(ioaddr + FJ_STATUS0);
+
+ switch( setup_value & 0x07 ){
+ case 0x01 /* 10base5 */:
+ case 0x02 /* 10base2 */: dev->if_port = 0x18; break;
+ case 0x04 /* 10baseT */: dev->if_port = 0x08; break;
+ default /* auto-sense*/: dev->if_port = 0x00; break;
+ }
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Initialize LAN Controller and LAN Card */
+ outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
+ outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
+ outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
+ outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure (TAMIYA) */
+
+ /* wait for a while */
+ udelay(200);
+
+ /* Set the station address in bank zero. */
+ outb(0x00, ioaddr + CONFIG_1);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0x04, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Switch to bank 2 and lock our I/O address. */
+ outb(0x08, ioaddr + CONFIG_1);
+ outb(dev->if_port, ioaddr + MODE13);
+ outb(0x00, ioaddr + COL16CNTL);
+
+ if (net_debug)
+ printk(version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (!dev->priv) {
+ retval = -ENOMEM;
+ goto out_irq;
+ }
+ memset(dev->priv, 0, sizeof(struct net_local));
+ lp = dev->priv;
+ spin_lock_init(&lp->lock);
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->tx_timeout = net_timeout;
+ dev->watchdog_timeo = HZ/10;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ return 0;
+
+out_irq:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr, FMV18X_IO_EXTENT);
+ return retval;
+}
+
+
+static int net_open(struct net_device *dev)
+{
+ struct net_local *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory,
+ 16 bit bus access, and two 4K Tx, enable the Rx and Tx. */
+ outb(0x5a, ioaddr + CONFIG_0);
+
+ /* Powerup and switch to register bank 2 for the run-time registers. */
+ outb(0xe8, ioaddr + CONFIG_1);
+
+ lp->tx_started = 0;
+ lp->tx_queue_ready = 1;
+ lp->rx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Clear Tx and Rx Status */
+ outb(0xff, ioaddr + TX_STATUS);
+ outb(0xff, ioaddr + RX_STATUS);
+ lp->open_time = jiffies;
+
+ netif_start_queue(dev);
+
+ /* Enable the IRQ of the LAN Card */
+ outb(0x80, ioaddr + FJ_CONFIG1);
+
+ /* Enable both Tx and Rx interrupts */
+ outw(0x8182, ioaddr+TX_INTR);
+
+ return 0;
+}
+
+static void net_timeout(struct net_device *dev)
+{
+ struct net_local *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+
+ printk(KERN_WARNING "%s: transmit timed out with status %04x, %s?\n", dev->name,
+ htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk(KERN_WARNING "%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, htons(inw(ioaddr + 0)),
+ htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
+ htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
+ htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
+ htons(inw(ioaddr +14)));
+ printk(KERN_WARNING "eth card: %04x %04x\n",
+ htons(inw(ioaddr+FJ_STATUS0)),
+ htons(inw(ioaddr+FJ_CONFIG0)));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Initialize LAN Controller and LAN Card */
+ outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
+ outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
+ outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
+ outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure */
+ net_open(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ netif_wake_queue(dev);
+}
+
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+ short length = skb->len;
+ unsigned char *buf;
+ unsigned long flags;
+
+ /* Block a transmit from overlapping. */
+
+ if (length > ETH_FRAME_LEN) {
+ if (net_debug)
+ printk("%s: Attempting to send a large packet (%d bytes).\n",
+ dev->name, length);
+ return 1;
+ }
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+ buf = skb->data;
+
+ if (net_debug > 4)
+ printk("%s: Transmitting a packet of length %lu.\n", dev->name,
+ (unsigned long)skb->len);
+ /* We may not start transmitting unless we finish transferring
+ a packet into the Tx queue. During executing the following
+ codes we possibly catch a Tx interrupt. Thus we flag off
+ tx_queue_ready, so that we prevent the interrupt routine
+ (net_interrupt) to start transmitting. */
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->tx_queue_ready = 0;
+ {
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+ }
+ lp->tx_queue_ready = 1;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ } else if (lp->tx_queue_len >= 4096 - 1502) /* No room for a packet */
+ netif_stop_queue(dev);
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t
+net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+
+ ioaddr = dev->base_addr;
+ lp = dev->priv;
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (lp->rx_started == 0 &&
+ (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) {
+ /* Got a packet(s).
+ We cannot execute net_rx more than once at the same time for
+ the same device. During executing net_rx, we possibly catch a
+ Tx interrupt. Thus we flag on rx_started, so that we prevent
+ the interrupt routine (net_interrupt) to dive into net_rx
+ again. */
+ lp->rx_started = 1;
+ outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */
+ net_rx(dev);
+ outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */
+ lp->rx_started = 0;
+ }
+ if (status & 0x00ff) {
+ if (status & 0x02) {
+ /* More than 16 collisions occurred */
+ if (net_debug > 4)
+ printk("%s: 16 Collision occur during Txing.\n", dev->name);
+ /* Cancel sending a packet. */
+ outb(0x03, ioaddr + COL16CNTL);
+ lp->stats.collisions++;
+ }
+ if (status & 0x82) {
+ spin_lock(&lp->lock);
+ lp->stats.tx_packets++;
+ if (lp->tx_queue && lp->tx_queue_ready) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ } else {
+ lp->tx_started = 0;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+ spin_unlock(&lp->lock);
+ }
+ }
+ return IRQ_RETVAL(status);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void net_rx(struct net_device *dev)
+{
+ struct net_local *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 5;
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ /* Clear PKT_RDY bit: by agy 19940922 */
+ /* outb(0x80, ioaddr + RX_STATUS); */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + 14);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x08) lp->stats.rx_length_errors++;
+ if (status & 0x04) lp->stats.rx_frame_errors++;
+ if (status & 0x02) lp->stats.rx_crc_errors++;
+ if (status & 0x01) lp->stats.rx_over_errors++;
+ } else {
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The FMV-18x claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+
+ if (net_debug > 5) {
+ int i;
+ printk("%s: Rxed packet of length %d: ", dev->name, pkt_len);
+ for (i = 0; i < 14; i++)
+ printk(" %02x", skb->data[i]);
+ printk(".\n");
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ (void)inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + 14);
+ }
+
+ if (net_debug > 5 && i > 0)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ ((struct net_local *)dev->priv)->open_time = 0;
+
+ netif_stop_queue(dev);
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ /* Set the ethernet adaptor disable IRQ */
+ outb(0x00, ioaddr + FJ_CONFIG1);
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = dev->priv;
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+ if (dev->mc_count || dev->flags&(IFF_PROMISC|IFF_ALLMULTI))
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ }
+ else
+ outb(2, ioaddr + RX_MODE); /* Disable promiscuous, use normal mode */
+}
+
+#ifdef MODULE
+static struct net_device *dev_fmv18x;
+
+MODULE_PARM(io, "i");
+MODULE_PARM(irq, "i");
+MODULE_PARM(net_debug, "i");
+MODULE_PARM_DESC(io, "FMV-18X I/O address");
+MODULE_PARM_DESC(irq, "FMV-18X IRQ number");
+MODULE_PARM_DESC(net_debug, "FMV-18X debug level (0-1,5-6)");
+MODULE_LICENSE("GPL");
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("fmv18x: You should not use auto-probing with insmod!\n");
+ dev_fmv18x = fmv18x_probe(-1);
+ if (IS_ERR(dev_fmv18x))
+ return PTR_ERR(dev_fmv18x);
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(dev_fmv18x);
+ free_irq(dev_fmv18x->irq, dev_fmv18x);
+ release_region(dev_fmv18x->base_addr, FMV18X_IO_EXTENT);
+ free_netdev(dev_fmv18x);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c fmv18x.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
new file mode 100644
index 000000000000..cda48c5d72a9
--- /dev/null
+++ b/drivers/net/forcedeth.c
@@ -0,0 +1,2232 @@
+/*
+ * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
+ *
+ * Note: This driver is a cleanroom reimplementation based on reverse
+ * engineered documentation written by Carl-Daniel Hailfinger
+ * and Andrew de Quincey. It's neither supported nor endorsed
+ * by NVIDIA Corp. Use at your own risk.
+ *
+ * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
+ * trademarks of NVIDIA Corporation in the United States and other
+ * countries.
+ *
+ * Copyright (C) 2003,4 Manfred Spraul
+ * Copyright (C) 2004 Andrew de Quincey (wol support)
+ * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
+ * IRQ rate fixes, bigendian fixes, cleanups, verification)
+ * Copyright (c) 2004 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Changelog:
+ * 0.01: 05 Oct 2003: First release that compiles without warnings.
+ * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
+ * Check all PCI BARs for the register window.
+ * udelay added to mii_rw.
+ * 0.03: 06 Oct 2003: Initialize dev->irq.
+ * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
+ * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
+ * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
+ * irq mask updated
+ * 0.07: 14 Oct 2003: Further irq mask updates.
+ * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
+ * added into irq handler, NULL check for drain_ring.
+ * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
+ * requested interrupt sources.
+ * 0.10: 20 Oct 2003: First cleanup for release.
+ * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
+ * MAC Address init fix, set_multicast cleanup.
+ * 0.12: 23 Oct 2003: Cleanups for release.
+ * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
+ * Set link speed correctly. start rx before starting
+ * tx (nv_start_rx sets the link speed).
+ * 0.14: 25 Oct 2003: Nic dependant irq mask.
+ * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
+ * open.
+ * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
+ * increased to 1628 bytes.
+ * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
+ * the tx length.
+ * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
+ * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
+ * addresses, really stop rx if already running
+ * in nv_start_rx, clean up a bit.
+ * 0.20: 07 Dec 2003: alloc fixes
+ * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
+ * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
+ * on close.
+ * 0.23: 26 Jan 2004: various small cleanups
+ * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
+ * 0.25: 09 Mar 2004: wol support
+ * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
+ * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
+ * added CK804/MCP04 device IDs, code fixes
+ * for registers, link status and other minor fixes.
+ * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
+ * 0.29: 31 Aug 2004: Add backup timer for link change notification.
+ * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
+ * into nv_close, otherwise reenabling for wol can
+ * cause DMA to kfree'd memory.
+ * 0.31: 14 Nov 2004: ethtool support for getting/setting link
+ * capabilities.
+ *
+ * Known bugs:
+ * We suspect that on some hardware no TX done interrupts are generated.
+ * This means recovery from netif_stop_queue only happens if the hw timer
+ * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
+ * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
+ * If your hardware reliably generates tx done interrupts, then you can remove
+ * DEV_NEED_TIMERIRQ from the driver_data flags.
+ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
+ * superfluous timer interrupts from the nic.
+ */
+#define FORCEDETH_VERSION "0.31"
+#define DRV_NAME "forcedeth"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/random.h>
+#include <linux/init.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#if 0
+#define dprintk printk
+#else
+#define dprintk(x...) do { } while (0)
+#endif
+
+
+/*
+ * Hardware access:
+ */
+
+#define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */
+#define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */
+#define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */
+#define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */
+#define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */
+
+enum {
+ NvRegIrqStatus = 0x000,
+#define NVREG_IRQSTAT_MIIEVENT 0x040
+#define NVREG_IRQSTAT_MASK 0x1ff
+ NvRegIrqMask = 0x004,
+#define NVREG_IRQ_RX_ERROR 0x0001
+#define NVREG_IRQ_RX 0x0002
+#define NVREG_IRQ_RX_NOBUF 0x0004
+#define NVREG_IRQ_TX_ERR 0x0008
+#define NVREG_IRQ_TX2 0x0010
+#define NVREG_IRQ_TIMER 0x0020
+#define NVREG_IRQ_LINK 0x0040
+#define NVREG_IRQ_TX1 0x0100
+#define NVREG_IRQMASK_WANTED_1 0x005f
+#define NVREG_IRQMASK_WANTED_2 0x0147
+#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1))
+
+ NvRegUnknownSetupReg6 = 0x008,
+#define NVREG_UNKSETUP6_VAL 3
+
+/*
+ * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
+ * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
+ */
+ NvRegPollingInterval = 0x00c,
+#define NVREG_POLL_DEFAULT 970
+ NvRegMisc1 = 0x080,
+#define NVREG_MISC1_HD 0x02
+#define NVREG_MISC1_FORCE 0x3b0f3c
+
+ NvRegTransmitterControl = 0x084,
+#define NVREG_XMITCTL_START 0x01
+ NvRegTransmitterStatus = 0x088,
+#define NVREG_XMITSTAT_BUSY 0x01
+
+ NvRegPacketFilterFlags = 0x8c,
+#define NVREG_PFF_ALWAYS 0x7F0008
+#define NVREG_PFF_PROMISC 0x80
+#define NVREG_PFF_MYADDR 0x20
+
+ NvRegOffloadConfig = 0x90,
+#define NVREG_OFFLOAD_HOMEPHY 0x601
+#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
+ NvRegReceiverControl = 0x094,
+#define NVREG_RCVCTL_START 0x01
+ NvRegReceiverStatus = 0x98,
+#define NVREG_RCVSTAT_BUSY 0x01
+
+ NvRegRandomSeed = 0x9c,
+#define NVREG_RNDSEED_MASK 0x00ff
+#define NVREG_RNDSEED_FORCE 0x7f00
+#define NVREG_RNDSEED_FORCE2 0x2d00
+#define NVREG_RNDSEED_FORCE3 0x7400
+
+ NvRegUnknownSetupReg1 = 0xA0,
+#define NVREG_UNKSETUP1_VAL 0x16070f
+ NvRegUnknownSetupReg2 = 0xA4,
+#define NVREG_UNKSETUP2_VAL 0x16
+ NvRegMacAddrA = 0xA8,
+ NvRegMacAddrB = 0xAC,
+ NvRegMulticastAddrA = 0xB0,
+#define NVREG_MCASTADDRA_FORCE 0x01
+ NvRegMulticastAddrB = 0xB4,
+ NvRegMulticastMaskA = 0xB8,
+ NvRegMulticastMaskB = 0xBC,
+
+ NvRegPhyInterface = 0xC0,
+#define PHY_RGMII 0x10000000
+
+ NvRegTxRingPhysAddr = 0x100,
+ NvRegRxRingPhysAddr = 0x104,
+ NvRegRingSizes = 0x108,
+#define NVREG_RINGSZ_TXSHIFT 0
+#define NVREG_RINGSZ_RXSHIFT 16
+ NvRegUnknownTransmitterReg = 0x10c,
+ NvRegLinkSpeed = 0x110,
+#define NVREG_LINKSPEED_FORCE 0x10000
+#define NVREG_LINKSPEED_10 1000
+#define NVREG_LINKSPEED_100 100
+#define NVREG_LINKSPEED_1000 50
+#define NVREG_LINKSPEED_MASK (0xFFF)
+ NvRegUnknownSetupReg5 = 0x130,
+#define NVREG_UNKSETUP5_BIT31 (1<<31)
+ NvRegUnknownSetupReg3 = 0x13c,
+#define NVREG_UNKSETUP3_VAL1 0x200010
+ NvRegTxRxControl = 0x144,
+#define NVREG_TXRXCTL_KICK 0x0001
+#define NVREG_TXRXCTL_BIT1 0x0002
+#define NVREG_TXRXCTL_BIT2 0x0004
+#define NVREG_TXRXCTL_IDLE 0x0008
+#define NVREG_TXRXCTL_RESET 0x0010
+#define NVREG_TXRXCTL_RXCHECK 0x0400
+ NvRegMIIStatus = 0x180,
+#define NVREG_MIISTAT_ERROR 0x0001
+#define NVREG_MIISTAT_LINKCHANGE 0x0008
+#define NVREG_MIISTAT_MASK 0x000f
+#define NVREG_MIISTAT_MASK2 0x000f
+ NvRegUnknownSetupReg4 = 0x184,
+#define NVREG_UNKSETUP4_VAL 8
+
+ NvRegAdapterControl = 0x188,
+#define NVREG_ADAPTCTL_START 0x02
+#define NVREG_ADAPTCTL_LINKUP 0x04
+#define NVREG_ADAPTCTL_PHYVALID 0x40000
+#define NVREG_ADAPTCTL_RUNNING 0x100000
+#define NVREG_ADAPTCTL_PHYSHIFT 24
+ NvRegMIISpeed = 0x18c,
+#define NVREG_MIISPEED_BIT8 (1<<8)
+#define NVREG_MIIDELAY 5
+ NvRegMIIControl = 0x190,
+#define NVREG_MIICTL_INUSE 0x08000
+#define NVREG_MIICTL_WRITE 0x00400
+#define NVREG_MIICTL_ADDRSHIFT 5
+ NvRegMIIData = 0x194,
+ NvRegWakeUpFlags = 0x200,
+#define NVREG_WAKEUPFLAGS_VAL 0x7770
+#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
+#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
+#define NVREG_WAKEUPFLAGS_D3SHIFT 12
+#define NVREG_WAKEUPFLAGS_D2SHIFT 8
+#define NVREG_WAKEUPFLAGS_D1SHIFT 4
+#define NVREG_WAKEUPFLAGS_D0SHIFT 0
+#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
+#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
+#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
+#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
+
+ NvRegPatternCRC = 0x204,
+ NvRegPatternMask = 0x208,
+ NvRegPowerCap = 0x268,
+#define NVREG_POWERCAP_D3SUPP (1<<30)
+#define NVREG_POWERCAP_D2SUPP (1<<26)
+#define NVREG_POWERCAP_D1SUPP (1<<25)
+ NvRegPowerState = 0x26c,
+#define NVREG_POWERSTATE_POWEREDUP 0x8000
+#define NVREG_POWERSTATE_VALID 0x0100
+#define NVREG_POWERSTATE_MASK 0x0003
+#define NVREG_POWERSTATE_D0 0x0000
+#define NVREG_POWERSTATE_D1 0x0001
+#define NVREG_POWERSTATE_D2 0x0002
+#define NVREG_POWERSTATE_D3 0x0003
+};
+
+/* Big endian: should work, but is untested */
+struct ring_desc {
+ u32 PacketBuffer;
+ u32 FlagLen;
+};
+
+#define FLAG_MASK_V1 0xffff0000
+#define FLAG_MASK_V2 0xffffc000
+#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
+#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
+
+#define NV_TX_LASTPACKET (1<<16)
+#define NV_TX_RETRYERROR (1<<19)
+#define NV_TX_LASTPACKET1 (1<<24)
+#define NV_TX_DEFERRED (1<<26)
+#define NV_TX_CARRIERLOST (1<<27)
+#define NV_TX_LATECOLLISION (1<<28)
+#define NV_TX_UNDERFLOW (1<<29)
+#define NV_TX_ERROR (1<<30)
+#define NV_TX_VALID (1<<31)
+
+#define NV_TX2_LASTPACKET (1<<29)
+#define NV_TX2_RETRYERROR (1<<18)
+#define NV_TX2_LASTPACKET1 (1<<23)
+#define NV_TX2_DEFERRED (1<<25)
+#define NV_TX2_CARRIERLOST (1<<26)
+#define NV_TX2_LATECOLLISION (1<<27)
+#define NV_TX2_UNDERFLOW (1<<28)
+/* error and valid are the same for both */
+#define NV_TX2_ERROR (1<<30)
+#define NV_TX2_VALID (1<<31)
+
+#define NV_RX_DESCRIPTORVALID (1<<16)
+#define NV_RX_MISSEDFRAME (1<<17)
+#define NV_RX_SUBSTRACT1 (1<<18)
+#define NV_RX_ERROR1 (1<<23)
+#define NV_RX_ERROR2 (1<<24)
+#define NV_RX_ERROR3 (1<<25)
+#define NV_RX_ERROR4 (1<<26)
+#define NV_RX_CRCERR (1<<27)
+#define NV_RX_OVERFLOW (1<<28)
+#define NV_RX_FRAMINGERR (1<<29)
+#define NV_RX_ERROR (1<<30)
+#define NV_RX_AVAIL (1<<31)
+
+#define NV_RX2_CHECKSUMMASK (0x1C000000)
+#define NV_RX2_CHECKSUMOK1 (0x10000000)
+#define NV_RX2_CHECKSUMOK2 (0x14000000)
+#define NV_RX2_CHECKSUMOK3 (0x18000000)
+#define NV_RX2_DESCRIPTORVALID (1<<29)
+#define NV_RX2_SUBSTRACT1 (1<<25)
+#define NV_RX2_ERROR1 (1<<18)
+#define NV_RX2_ERROR2 (1<<19)
+#define NV_RX2_ERROR3 (1<<20)
+#define NV_RX2_ERROR4 (1<<21)
+#define NV_RX2_CRCERR (1<<22)
+#define NV_RX2_OVERFLOW (1<<23)
+#define NV_RX2_FRAMINGERR (1<<24)
+/* error and avail are the same for both */
+#define NV_RX2_ERROR (1<<30)
+#define NV_RX2_AVAIL (1<<31)
+
+/* Miscelaneous hardware related defines: */
+#define NV_PCI_REGSZ 0x270
+
+/* various timeout delays: all in usec */
+#define NV_TXRX_RESET_DELAY 4
+#define NV_TXSTOP_DELAY1 10
+#define NV_TXSTOP_DELAY1MAX 500000
+#define NV_TXSTOP_DELAY2 100
+#define NV_RXSTOP_DELAY1 10
+#define NV_RXSTOP_DELAY1MAX 500000
+#define NV_RXSTOP_DELAY2 100
+#define NV_SETUP5_DELAY 5
+#define NV_SETUP5_DELAYMAX 50000
+#define NV_POWERUP_DELAY 5
+#define NV_POWERUP_DELAYMAX 5000
+#define NV_MIIBUSY_DELAY 50
+#define NV_MIIPHY_DELAY 10
+#define NV_MIIPHY_DELAYMAX 10000
+
+#define NV_WAKEUPPATTERNS 5
+#define NV_WAKEUPMASKENTRIES 4
+
+/* General driver defaults */
+#define NV_WATCHDOG_TIMEO (5*HZ)
+
+#define RX_RING 128
+#define TX_RING 64
+/*
+ * If your nic mysteriously hangs then try to reduce the limits
+ * to 1/0: It might be required to set NV_TX_LASTPACKET in the
+ * last valid ring entry. But this would be impossible to
+ * implement - probably a disassembly error.
+ */
+#define TX_LIMIT_STOP 63
+#define TX_LIMIT_START 62
+
+/* rx/tx mac addr + type + vlan + align + slack*/
+#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64)
+/* even more slack */
+#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128)
+
+#define OOM_REFILL (1+HZ/20)
+#define POLL_WAIT (1+HZ/100)
+#define LINK_TIMEOUT (3*HZ)
+
+/*
+ * desc_ver values:
+ * This field has two purposes:
+ * - Newer nics uses a different ring layout. The layout is selected by
+ * comparing np->desc_ver with DESC_VER_xy.
+ * - It contains bits that are forced on when writing to NvRegTxRxControl.
+ */
+#define DESC_VER_1 0x0
+#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK)
+
+/* PHY defines */
+#define PHY_OUI_MARVELL 0x5043
+#define PHY_OUI_CICADA 0x03f1
+#define PHYID1_OUI_MASK 0x03ff
+#define PHYID1_OUI_SHFT 6
+#define PHYID2_OUI_MASK 0xfc00
+#define PHYID2_OUI_SHFT 10
+#define PHY_INIT1 0x0f000
+#define PHY_INIT2 0x0e00
+#define PHY_INIT3 0x01000
+#define PHY_INIT4 0x0200
+#define PHY_INIT5 0x0004
+#define PHY_INIT6 0x02000
+#define PHY_GIGABIT 0x0100
+
+#define PHY_TIMEOUT 0x1
+#define PHY_ERROR 0x2
+
+#define PHY_100 0x1
+#define PHY_1000 0x2
+#define PHY_HALF 0x100
+
+/* FIXME: MII defines that should be added to <linux/mii.h> */
+#define MII_1000BT_CR 0x09
+#define MII_1000BT_SR 0x0a
+#define ADVERTISE_1000FULL 0x0200
+#define ADVERTISE_1000HALF 0x0100
+#define LPA_1000FULL 0x0800
+#define LPA_1000HALF 0x0400
+
+
+/*
+ * SMP locking:
+ * All hardware access under dev->priv->lock, except the performance
+ * critical parts:
+ * - rx is (pseudo-) lockless: it relies on the single-threading provided
+ * by the arch code for interrupts.
+ * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
+ * needs dev->priv->lock :-(
+ * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
+ */
+
+/* in dev: base, irq */
+struct fe_priv {
+ spinlock_t lock;
+
+ /* General data:
+ * Locking: spin_lock(&np->lock); */
+ struct net_device_stats stats;
+ int in_shutdown;
+ u32 linkspeed;
+ int duplex;
+ int autoneg;
+ int fixed_mode;
+ int phyaddr;
+ int wolenabled;
+ unsigned int phy_oui;
+ u16 gigabit;
+
+ /* General data: RO fields */
+ dma_addr_t ring_addr;
+ struct pci_dev *pci_dev;
+ u32 orig_mac[2];
+ u32 irqmask;
+ u32 desc_ver;
+
+ void __iomem *base;
+
+ /* rx specific fields.
+ * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
+ */
+ struct ring_desc *rx_ring;
+ unsigned int cur_rx, refill_rx;
+ struct sk_buff *rx_skbuff[RX_RING];
+ dma_addr_t rx_dma[RX_RING];
+ unsigned int rx_buf_sz;
+ struct timer_list oom_kick;
+ struct timer_list nic_poll;
+
+ /* media detection workaround.
+ * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
+ */
+ int need_linktimer;
+ unsigned long link_timeout;
+ /*
+ * tx specific fields.
+ */
+ struct ring_desc *tx_ring;
+ unsigned int next_tx, nic_tx;
+ struct sk_buff *tx_skbuff[TX_RING];
+ dma_addr_t tx_dma[TX_RING];
+ u32 tx_flags;
+};
+
+/*
+ * Maximum number of loops until we assume that a bit in the irq mask
+ * is stuck. Overridable with module param.
+ */
+static int max_interrupt_work = 5;
+
+static inline struct fe_priv *get_nvpriv(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static inline u8 __iomem *get_hwbase(struct net_device *dev)
+{
+ return get_nvpriv(dev)->base;
+}
+
+static inline void pci_push(u8 __iomem *base)
+{
+ /* force out pending posted writes */
+ readl(base);
+}
+
+static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
+{
+ return le32_to_cpu(prd->FlagLen)
+ & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
+}
+
+static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
+ int delay, int delaymax, const char *msg)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ pci_push(base);
+ do {
+ udelay(delay);
+ delaymax -= delay;
+ if (delaymax < 0) {
+ if (msg)
+ printk(msg);
+ return 1;
+ }
+ } while ((readl(base + offset) & mask) != target);
+ return 0;
+}
+
+#define MII_READ (-1)
+/* mii_rw: read/write a register on the PHY.
+ *
+ * Caller must guarantee serialization
+ */
+static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 reg;
+ int retval;
+
+ writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+
+ reg = readl(base + NvRegMIIControl);
+ if (reg & NVREG_MIICTL_INUSE) {
+ writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
+ udelay(NV_MIIBUSY_DELAY);
+ }
+
+ reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
+ if (value != MII_READ) {
+ writel(value, base + NvRegMIIData);
+ reg |= NVREG_MIICTL_WRITE;
+ }
+ writel(reg, base + NvRegMIIControl);
+
+ if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
+ NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
+ dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
+ dev->name, miireg, addr);
+ retval = -1;
+ } else if (value != MII_READ) {
+ /* it was a write operation - fewer failures are detectable */
+ dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
+ dev->name, value, miireg, addr);
+ retval = 0;
+ } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
+ dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
+ dev->name, miireg, addr);
+ retval = -1;
+ } else {
+ retval = readl(base + NvRegMIIData);
+ dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
+ dev->name, miireg, addr, retval);
+ }
+
+ return retval;
+}
+
+static int phy_reset(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 miicontrol;
+ unsigned int tries = 0;
+
+ miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ miicontrol |= BMCR_RESET;
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
+ return -1;
+ }
+
+ /* wait for 500ms */
+ msleep(500);
+
+ /* must wait till reset is deasserted */
+ while (miicontrol & BMCR_RESET) {
+ msleep(10);
+ miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ /* FIXME: 100 tries seem excessive */
+ if (tries++ > 100)
+ return -1;
+ }
+ return 0;
+}
+
+static int phy_init(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
+
+ /* set advertise register */
+ reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
+ if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
+ printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+
+ /* get phy interface type */
+ phyinterface = readl(base + NvRegPhyInterface);
+
+ /* see if gigabit phy */
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ mii_control_1000 &= ~ADVERTISE_1000HALF;
+ if (phyinterface & PHY_RGMII)
+ mii_control_1000 |= ADVERTISE_1000FULL;
+ else
+ mii_control_1000 &= ~ADVERTISE_1000FULL;
+
+ if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ else
+ np->gigabit = 0;
+
+ /* reset the phy */
+ if (phy_reset(dev)) {
+ printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+
+ /* phy vendor specific configuration */
+ if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
+ phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
+ phy_reserved |= (PHY_INIT3 | PHY_INIT4);
+ if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ phy_reserved |= PHY_INIT5;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ if (np->phy_oui == PHY_OUI_CICADA) {
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
+ phy_reserved |= PHY_INIT6;
+ if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+
+ /* restart auto negotiation */
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static void nv_start_rx(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
+ /* Already running? Stop it. */
+ if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
+ writel(0, base + NvRegReceiverControl);
+ pci_push(base);
+ }
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+ writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
+ dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
+ dev->name, np->duplex, np->linkspeed);
+ pci_push(base);
+}
+
+static void nv_stop_rx(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
+ writel(0, base + NvRegReceiverControl);
+ reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
+ NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
+ KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
+
+ udelay(NV_RXSTOP_DELAY2);
+ writel(0, base + NvRegLinkSpeed);
+}
+
+static void nv_start_tx(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
+ writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
+ pci_push(base);
+}
+
+static void nv_stop_tx(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
+ writel(0, base + NvRegTransmitterControl);
+ reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
+ NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
+ KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
+
+ udelay(NV_TXSTOP_DELAY2);
+ writel(0, base + NvRegUnknownTransmitterReg);
+}
+
+static void nv_txrx_reset(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
+ writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl);
+ pci_push(base);
+ udelay(NV_TXRX_RESET_DELAY);
+ writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl);
+ pci_push(base);
+}
+
+/*
+ * nv_get_stats: dev->get_stats function
+ * Get latest stats value from the nic.
+ * Called with read_lock(&dev_base_lock) held for read -
+ * only synchronized against unregister_netdevice.
+ */
+static struct net_device_stats *nv_get_stats(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ /* It seems that the nic always generates interrupts and doesn't
+ * accumulate errors internally. Thus the current values in np->stats
+ * are already up to date.
+ */
+ return &np->stats;
+}
+
+/*
+ * nv_alloc_rx: fill rx ring entries.
+ * Return 1 if the allocations for the skbs failed and the
+ * rx engine is without Available descriptors
+ */
+static int nv_alloc_rx(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ unsigned int refill_rx = np->refill_rx;
+ int nr;
+
+ while (np->cur_rx != refill_rx) {
+ struct sk_buff *skb;
+
+ nr = refill_rx % RX_RING;
+ if (np->rx_skbuff[nr] == NULL) {
+
+ skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
+ if (!skb)
+ break;
+
+ skb->dev = dev;
+ np->rx_skbuff[nr] = skb;
+ } else {
+ skb = np->rx_skbuff[nr];
+ }
+ np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
+ PCI_DMA_FROMDEVICE);
+ np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
+ wmb();
+ np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL);
+ dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
+ dev->name, refill_rx);
+ refill_rx++;
+ }
+ np->refill_rx = refill_rx;
+ if (np->cur_rx - refill_rx == RX_RING)
+ return 1;
+ return 0;
+}
+
+static void nv_do_rx_refill(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = get_nvpriv(dev);
+
+ disable_irq(dev->irq);
+ if (nv_alloc_rx(dev)) {
+ spin_lock(&np->lock);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock(&np->lock);
+ }
+ enable_irq(dev->irq);
+}
+
+static int nv_init_ring(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+
+ np->next_tx = np->nic_tx = 0;
+ for (i = 0; i < TX_RING; i++)
+ np->tx_ring[i].FlagLen = 0;
+
+ np->cur_rx = RX_RING;
+ np->refill_rx = 0;
+ for (i = 0; i < RX_RING; i++)
+ np->rx_ring[i].FlagLen = 0;
+ return nv_alloc_rx(dev);
+}
+
+static void nv_drain_tx(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+ for (i = 0; i < TX_RING; i++) {
+ np->tx_ring[i].FlagLen = 0;
+ if (np->tx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev, np->tx_dma[i],
+ np->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = NULL;
+ np->stats.tx_dropped++;
+ }
+ }
+}
+
+static void nv_drain_rx(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+ for (i = 0; i < RX_RING; i++) {
+ np->rx_ring[i].FlagLen = 0;
+ wmb();
+ if (np->rx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev, np->rx_dma[i],
+ np->rx_skbuff[i]->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skbuff[i]);
+ np->rx_skbuff[i] = NULL;
+ }
+ }
+}
+
+static void drain_ring(struct net_device *dev)
+{
+ nv_drain_tx(dev);
+ nv_drain_rx(dev);
+}
+
+/*
+ * nv_start_xmit: dev->hard_start_xmit function
+ * Called with dev->xmit_lock held.
+ */
+static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int nr = np->next_tx % TX_RING;
+
+ np->tx_skbuff[nr] = skb;
+ np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
+ PCI_DMA_TODEVICE);
+
+ np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+
+ spin_lock_irq(&np->lock);
+ wmb();
+ np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
+ dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
+ dev->name, np->next_tx);
+ {
+ int j;
+ for (j=0; j<64; j++) {
+ if ((j%16) == 0)
+ dprintk("\n%03x:", j);
+ dprintk(" %02x", ((unsigned char*)skb->data)[j]);
+ }
+ dprintk("\n");
+ }
+
+ np->next_tx++;
+
+ dev->trans_start = jiffies;
+ if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
+ netif_stop_queue(dev);
+ spin_unlock_irq(&np->lock);
+ writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(get_hwbase(dev));
+ return 0;
+}
+
+/*
+ * nv_tx_done: check for completed packets, release the skbs.
+ *
+ * Caller must own np->lock.
+ */
+static void nv_tx_done(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 Flags;
+ int i;
+
+ while (np->nic_tx != np->next_tx) {
+ i = np->nic_tx % TX_RING;
+
+ Flags = le32_to_cpu(np->tx_ring[i].FlagLen);
+
+ dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
+ dev->name, np->nic_tx, Flags);
+ if (Flags & NV_TX_VALID)
+ break;
+ if (np->desc_ver == DESC_VER_1) {
+ if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
+ NV_TX_UNDERFLOW|NV_TX_ERROR)) {
+ if (Flags & NV_TX_UNDERFLOW)
+ np->stats.tx_fifo_errors++;
+ if (Flags & NV_TX_CARRIERLOST)
+ np->stats.tx_carrier_errors++;
+ np->stats.tx_errors++;
+ } else {
+ np->stats.tx_packets++;
+ np->stats.tx_bytes += np->tx_skbuff[i]->len;
+ }
+ } else {
+ if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
+ NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
+ if (Flags & NV_TX2_UNDERFLOW)
+ np->stats.tx_fifo_errors++;
+ if (Flags & NV_TX2_CARRIERLOST)
+ np->stats.tx_carrier_errors++;
+ np->stats.tx_errors++;
+ } else {
+ np->stats.tx_packets++;
+ np->stats.tx_bytes += np->tx_skbuff[i]->len;
+ }
+ }
+ pci_unmap_single(np->pci_dev, np->tx_dma[i],
+ np->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = NULL;
+ np->nic_tx++;
+ }
+ if (np->next_tx - np->nic_tx < TX_LIMIT_START)
+ netif_wake_queue(dev);
+}
+
+/*
+ * nv_tx_timeout: dev->tx_timeout function
+ * Called with dev->xmit_lock held.
+ */
+static void nv_tx_timeout(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name,
+ readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
+
+ spin_lock_irq(&np->lock);
+
+ /* 1) stop tx engine */
+ nv_stop_tx(dev);
+
+ /* 2) check that the packets were not sent already: */
+ nv_tx_done(dev);
+
+ /* 3) if there are dead entries: clear everything */
+ if (np->next_tx != np->nic_tx) {
+ printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
+ nv_drain_tx(dev);
+ np->next_tx = np->nic_tx = 0;
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ netif_wake_queue(dev);
+ }
+
+ /* 4) restart tx engine */
+ nv_start_tx(dev);
+ spin_unlock_irq(&np->lock);
+}
+
+static void nv_rx_process(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u32 Flags;
+
+ for (;;) {
+ struct sk_buff *skb;
+ int len;
+ int i;
+ if (np->cur_rx - np->refill_rx >= RX_RING)
+ break; /* we scanned the whole ring - do not continue */
+
+ i = np->cur_rx % RX_RING;
+ Flags = le32_to_cpu(np->rx_ring[i].FlagLen);
+ len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver);
+
+ dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
+ dev->name, np->cur_rx, Flags);
+
+ if (Flags & NV_RX_AVAIL)
+ break; /* still owned by hardware, */
+
+ /*
+ * the packet is for us - immediately tear down the pci mapping.
+ * TODO: check if a prefetch of the first cacheline improves
+ * the performance.
+ */
+ pci_unmap_single(np->pci_dev, np->rx_dma[i],
+ np->rx_skbuff[i]->len,
+ PCI_DMA_FROMDEVICE);
+
+ {
+ int j;
+ dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
+ for (j=0; j<64; j++) {
+ if ((j%16) == 0)
+ dprintk("\n%03x:", j);
+ dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
+ }
+ dprintk("\n");
+ }
+ /* look at what we actually got: */
+ if (np->desc_ver == DESC_VER_1) {
+ if (!(Flags & NV_RX_DESCRIPTORVALID))
+ goto next_pkt;
+
+ if (Flags & NV_RX_MISSEDFRAME) {
+ np->stats.rx_missed_errors++;
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) {
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ if (Flags & NV_RX_CRCERR) {
+ np->stats.rx_crc_errors++;
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ if (Flags & NV_RX_OVERFLOW) {
+ np->stats.rx_over_errors++;
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ if (Flags & NV_RX_ERROR) {
+ /* framing errors are soft errors, the rest is fatal. */
+ if (Flags & NV_RX_FRAMINGERR) {
+ if (Flags & NV_RX_SUBSTRACT1) {
+ len--;
+ }
+ } else {
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ }
+ } else {
+ if (!(Flags & NV_RX2_DESCRIPTORVALID))
+ goto next_pkt;
+
+ if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4)) {
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ if (Flags & NV_RX2_CRCERR) {
+ np->stats.rx_crc_errors++;
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ if (Flags & NV_RX2_OVERFLOW) {
+ np->stats.rx_over_errors++;
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ if (Flags & NV_RX2_ERROR) {
+ /* framing errors are soft errors, the rest is fatal. */
+ if (Flags & NV_RX2_FRAMINGERR) {
+ if (Flags & NV_RX2_SUBSTRACT1) {
+ len--;
+ }
+ } else {
+ np->stats.rx_errors++;
+ goto next_pkt;
+ }
+ }
+ Flags &= NV_RX2_CHECKSUMMASK;
+ if (Flags == NV_RX2_CHECKSUMOK1 ||
+ Flags == NV_RX2_CHECKSUMOK2 ||
+ Flags == NV_RX2_CHECKSUMOK3) {
+ dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
+ np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
+ }
+ }
+ /* got a valid packet - forward it to the network core */
+ skb = np->rx_skbuff[i];
+ np->rx_skbuff[i] = NULL;
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
+ dev->name, np->cur_rx, len, skb->protocol);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += len;
+next_pkt:
+ np->cur_rx++;
+ }
+}
+
+/*
+ * nv_change_mtu: dev->change_mtu function
+ * Called with dev_base_lock held for read.
+ */
+static int nv_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/*
+ * nv_set_multicast: dev->set_multicast function
+ * Called with dev->xmit_lock held.
+ */
+static void nv_set_multicast(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 addr[2];
+ u32 mask[2];
+ u32 pff;
+
+ memset(addr, 0, sizeof(addr));
+ memset(mask, 0, sizeof(mask));
+
+ if (dev->flags & IFF_PROMISC) {
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ pff = NVREG_PFF_PROMISC;
+ } else {
+ pff = NVREG_PFF_MYADDR;
+
+ if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
+ u32 alwaysOff[2];
+ u32 alwaysOn[2];
+
+ alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
+ if (dev->flags & IFF_ALLMULTI) {
+ alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
+ } else {
+ struct dev_mc_list *walk;
+
+ walk = dev->mc_list;
+ while (walk != NULL) {
+ u32 a, b;
+ a = le32_to_cpu(*(u32 *) walk->dmi_addr);
+ b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
+ alwaysOn[0] &= a;
+ alwaysOff[0] &= ~a;
+ alwaysOn[1] &= b;
+ alwaysOff[1] &= ~b;
+ walk = walk->next;
+ }
+ }
+ addr[0] = alwaysOn[0];
+ addr[1] = alwaysOn[1];
+ mask[0] = alwaysOn[0] | alwaysOff[0];
+ mask[1] = alwaysOn[1] | alwaysOff[1];
+ }
+ }
+ addr[0] |= NVREG_MCASTADDRA_FORCE;
+ pff |= NVREG_PFF_ALWAYS;
+ spin_lock_irq(&np->lock);
+ nv_stop_rx(dev);
+ writel(addr[0], base + NvRegMulticastAddrA);
+ writel(addr[1], base + NvRegMulticastAddrB);
+ writel(mask[0], base + NvRegMulticastMaskA);
+ writel(mask[1], base + NvRegMulticastMaskB);
+ writel(pff, base + NvRegPacketFilterFlags);
+ dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
+ dev->name);
+ nv_start_rx(dev);
+ spin_unlock_irq(&np->lock);
+}
+
+static int nv_update_linkspeed(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int adv, lpa;
+ int newls = np->linkspeed;
+ int newdup = np->duplex;
+ int mii_status;
+ int retval = 0;
+ u32 control_1000, status_1000, phyreg;
+
+ /* BMSR_LSTATUS is latched, read it twice:
+ * we want the current value.
+ */
+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+
+ if (!(mii_status & BMSR_LSTATUS)) {
+ dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
+ dev->name);
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ retval = 0;
+ goto set_speed;
+ }
+
+ if (np->autoneg == 0) {
+ dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
+ dev->name, np->fixed_mode);
+ if (np->fixed_mode & LPA_100FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 1;
+ } else if (np->fixed_mode & LPA_100HALF) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 0;
+ } else if (np->fixed_mode & LPA_10FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 1;
+ } else {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ }
+ retval = 1;
+ goto set_speed;
+ }
+ /* check auto negotiation is complete */
+ if (!(mii_status & BMSR_ANEGCOMPLETE)) {
+ /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ retval = 0;
+ dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
+ goto set_speed;
+ }
+
+ retval = 1;
+ if (np->gigabit == PHY_GIGABIT) {
+ control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
+
+ if ((control_1000 & ADVERTISE_1000FULL) &&
+ (status_1000 & LPA_1000FULL)) {
+ dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
+ dev->name);
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
+ newdup = 1;
+ goto set_speed;
+ }
+ }
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
+ dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
+ dev->name, adv, lpa);
+
+ /* FIXME: handle parallel detection properly */
+ lpa = lpa & adv;
+ if (lpa & LPA_100FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 1;
+ } else if (lpa & LPA_100HALF) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 0;
+ } else if (lpa & LPA_10FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 1;
+ } else if (lpa & LPA_10HALF) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ } else {
+ dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ }
+
+set_speed:
+ if (np->duplex == newdup && np->linkspeed == newls)
+ return retval;
+
+ dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
+ dev->name, np->linkspeed, np->duplex, newls, newdup);
+
+ np->duplex = newdup;
+ np->linkspeed = newls;
+
+ if (np->gigabit == PHY_GIGABIT) {
+ phyreg = readl(base + NvRegRandomSeed);
+ phyreg &= ~(0x3FF00);
+ if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+ phyreg |= NVREG_RNDSEED_FORCE3;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+ phyreg |= NVREG_RNDSEED_FORCE2;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+ phyreg |= NVREG_RNDSEED_FORCE;
+ writel(phyreg, base + NvRegRandomSeed);
+ }
+
+ phyreg = readl(base + NvRegPhyInterface);
+ phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+ if (np->duplex == 0)
+ phyreg |= PHY_HALF;
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+ phyreg |= PHY_100;
+ else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
+ phyreg |= PHY_1000;
+ writel(phyreg, base + NvRegPhyInterface);
+
+ writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
+ base + NvRegMisc1);
+ pci_push(base);
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+
+ return retval;
+}
+
+static void nv_linkchange(struct net_device *dev)
+{
+ if (nv_update_linkspeed(dev)) {
+ if (netif_carrier_ok(dev)) {
+ nv_stop_rx(dev);
+ } else {
+ netif_carrier_on(dev);
+ printk(KERN_INFO "%s: link up.\n", dev->name);
+ }
+ nv_start_rx(dev);
+ } else {
+ if (netif_carrier_ok(dev)) {
+ netif_carrier_off(dev);
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ nv_stop_rx(dev);
+ }
+ }
+}
+
+static void nv_link_irq(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 miistat;
+
+ miistat = readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
+
+ if (miistat & (NVREG_MIISTAT_LINKCHANGE))
+ nv_linkchange(dev);
+ dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
+}
+
+static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+ int i;
+
+ dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
+
+ for (i=0; ; i++) {
+ events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ pci_push(base);
+ dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
+ if (!(events & np->irqmask))
+ break;
+
+ if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) {
+ spin_lock(&np->lock);
+ nv_tx_done(dev);
+ spin_unlock(&np->lock);
+ }
+
+ if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
+ nv_rx_process(dev);
+ if (nv_alloc_rx(dev)) {
+ spin_lock(&np->lock);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock(&np->lock);
+ }
+ }
+
+ if (events & NVREG_IRQ_LINK) {
+ spin_lock(&np->lock);
+ nv_link_irq(dev);
+ spin_unlock(&np->lock);
+ }
+ if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
+ spin_lock(&np->lock);
+ nv_linkchange(dev);
+ spin_unlock(&np->lock);
+ np->link_timeout = jiffies + LINK_TIMEOUT;
+ }
+ if (events & (NVREG_IRQ_TX_ERR)) {
+ dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
+ dev->name, events);
+ }
+ if (events & (NVREG_IRQ_UNKNOWN)) {
+ printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
+ dev->name, events);
+ }
+ if (i > max_interrupt_work) {
+ spin_lock(&np->lock);
+ /* disable interrupts on the nic */
+ writel(0, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown)
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
+ spin_unlock(&np->lock);
+ break;
+ }
+
+ }
+ dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
+
+ return IRQ_RETVAL(i);
+}
+
+static void nv_do_nic_poll(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ disable_irq(dev->irq);
+ /* FIXME: Do we need synchronize_irq(dev->irq) here? */
+ /*
+ * reenable interrupts on the nic, we have to do this before calling
+ * nv_nic_irq because that may decide to do otherwise
+ */
+ writel(np->irqmask, base + NvRegIrqMask);
+ pci_push(base);
+ nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
+ enable_irq(dev->irq);
+}
+
+static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ strcpy(info->driver, "forcedeth");
+ strcpy(info->version, FORCEDETH_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ wolinfo->supported = WAKE_MAGIC;
+
+ spin_lock_irq(&np->lock);
+ if (np->wolenabled)
+ wolinfo->wolopts = WAKE_MAGIC;
+ spin_unlock_irq(&np->lock);
+}
+
+static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ spin_lock_irq(&np->lock);
+ if (wolinfo->wolopts == 0) {
+ writel(0, base + NvRegWakeUpFlags);
+ np->wolenabled = 0;
+ }
+ if (wolinfo->wolopts & WAKE_MAGIC) {
+ writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
+ np->wolenabled = 1;
+ }
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int adv;
+
+ spin_lock_irq(&np->lock);
+ ecmd->port = PORT_MII;
+ if (!netif_running(dev)) {
+ /* We do not track link speed / duplex setting if the
+ * interface is disabled. Force a link check */
+ nv_update_linkspeed(dev);
+ }
+ switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+ case NVREG_LINKSPEED_10:
+ ecmd->speed = SPEED_10;
+ break;
+ case NVREG_LINKSPEED_100:
+ ecmd->speed = SPEED_100;
+ break;
+ case NVREG_LINKSPEED_1000:
+ ecmd->speed = SPEED_1000;
+ break;
+ }
+ ecmd->duplex = DUPLEX_HALF;
+ if (np->duplex)
+ ecmd->duplex = DUPLEX_FULL;
+
+ ecmd->autoneg = np->autoneg;
+
+ ecmd->advertising = ADVERTISED_MII;
+ if (np->autoneg) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ } else {
+ adv = np->fixed_mode;
+ }
+ if (adv & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (adv & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (adv & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (adv & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (np->autoneg && np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ if (adv & ADVERTISE_1000FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
+
+ ecmd->supported = (SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_MII);
+ if (np->gigabit == PHY_GIGABIT)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->phy_address = np->phyaddr;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (ecmd->port != PORT_MII)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_EXTERNAL)
+ return -EINVAL;
+ if (ecmd->phy_address != np->phyaddr) {
+ /* TODO: support switching between multiple phys. Should be
+ * trivial, but not enabled due to lack of test hardware. */
+ return -EINVAL;
+ }
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u32 mask;
+
+ mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ if (np->gigabit == PHY_GIGABIT)
+ mask |= ADVERTISED_1000baseT_Full;
+
+ if ((ecmd->advertising & mask) == 0)
+ return -EINVAL;
+
+ } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+ /* Note: autonegotiation disable, speed 1000 intentionally
+ * forbidden - noone should need that. */
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&np->lock);
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ int adv, bmcr;
+
+ np->autoneg = 1;
+
+ /* advertise only what has been requested */
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv &= ~ADVERTISE_1000FULL;
+ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ adv |= ADVERTISE_1000FULL;
+ mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ }
+
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+
+ } else {
+ int adv, bmcr;
+
+ np->autoneg = 0;
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_10HALF;
+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_10FULL;
+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_100HALF;
+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_100FULL;
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+ np->fixed_mode = adv;
+
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv &= ~ADVERTISE_1000FULL;
+ mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ }
+
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
+ if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+ bmcr |= BMCR_FULLDPLX;
+ if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+ bmcr |= BMCR_SPEED100;
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+
+ if (netif_running(dev)) {
+ /* Wait a bit and then reconfigure the nic. */
+ udelay(10);
+ nv_linkchange(dev);
+ }
+ }
+ spin_unlock_irq(&np->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = nv_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_wol = nv_get_wol,
+ .set_wol = nv_set_wol,
+ .get_settings = nv_get_settings,
+ .set_settings = nv_set_settings,
+};
+
+static int nv_open(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret, oom, i;
+
+ dprintk(KERN_DEBUG "nv_open: begin\n");
+
+ /* 1) erase previous misconfiguration */
+ /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
+ writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
+ writel(0, base + NvRegMulticastAddrB);
+ writel(0, base + NvRegMulticastMaskA);
+ writel(0, base + NvRegMulticastMaskB);
+ writel(0, base + NvRegPacketFilterFlags);
+
+ writel(0, base + NvRegTransmitterControl);
+ writel(0, base + NvRegReceiverControl);
+
+ writel(0, base + NvRegAdapterControl);
+
+ /* 2) initialize descriptor rings */
+ oom = nv_init_ring(dev);
+
+ writel(0, base + NvRegLinkSpeed);
+ writel(0, base + NvRegUnknownTransmitterReg);
+ nv_txrx_reset(dev);
+ writel(0, base + NvRegUnknownSetupReg6);
+
+ np->in_shutdown = 0;
+
+ /* 3) set mac address */
+ {
+ u32 mac[2];
+
+ mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+ mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
+
+ writel(mac[0], base + NvRegMacAddrA);
+ writel(mac[1], base + NvRegMacAddrB);
+ }
+
+ /* 4) give hw rings */
+ writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+
+ /* 5) continue setup */
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
+ writel(np->desc_ver, base + NvRegTxRxControl);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl);
+ reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
+ NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
+ KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
+
+ writel(0, base + NvRegUnknownSetupReg4);
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+
+ /* 6) continue setup */
+ writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
+ writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
+ writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
+ writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
+
+ writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
+ get_random_bytes(&i, sizeof(i));
+ writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
+ writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
+ writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
+ writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+ writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
+ base + NvRegAdapterControl);
+ writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
+ writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
+ writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
+
+ i = readl(base + NvRegPowerState);
+ if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
+ writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
+
+ pci_push(base);
+ udelay(10);
+ writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
+
+ writel(0, base + NvRegIrqMask);
+ pci_push(base);
+ writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ pci_push(base);
+
+ ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
+ if (ret)
+ goto out_drain;
+
+ /* ask for interrupts */
+ writel(np->irqmask, base + NvRegIrqMask);
+
+ spin_lock_irq(&np->lock);
+ writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
+ writel(0, base + NvRegMulticastAddrB);
+ writel(0, base + NvRegMulticastMaskA);
+ writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
+ /* One manual link speed update: Interrupts are enabled, future link
+ * speed changes cause interrupts and are handled by nv_link_irq().
+ */
+ {
+ u32 miistat;
+ miistat = readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
+ }
+ ret = nv_update_linkspeed(dev);
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ netif_start_queue(dev);
+ if (ret) {
+ netif_carrier_on(dev);
+ } else {
+ printk("%s: no link during initialization.\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ if (oom)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock_irq(&np->lock);
+
+ return 0;
+out_drain:
+ drain_ring(dev);
+ return ret;
+}
+
+static int nv_close(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base;
+
+ spin_lock_irq(&np->lock);
+ np->in_shutdown = 1;
+ spin_unlock_irq(&np->lock);
+ synchronize_irq(dev->irq);
+
+ del_timer_sync(&np->oom_kick);
+ del_timer_sync(&np->nic_poll);
+
+ netif_stop_queue(dev);
+ spin_lock_irq(&np->lock);
+ nv_stop_tx(dev);
+ nv_stop_rx(dev);
+ nv_txrx_reset(dev);
+
+ /* disable interrupts on the nic or we will lock up */
+ base = get_hwbase(dev);
+ writel(0, base + NvRegIrqMask);
+ pci_push(base);
+ dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
+
+ spin_unlock_irq(&np->lock);
+
+ free_irq(dev->irq, dev);
+
+ drain_ring(dev);
+
+ if (np->wolenabled)
+ nv_start_rx(dev);
+
+ /* FIXME: power down nic */
+
+ return 0;
+}
+
+static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct fe_priv *np;
+ unsigned long addr;
+ u8 __iomem *base;
+ int err, i;
+
+ dev = alloc_etherdev(sizeof(struct fe_priv));
+ err = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ np = get_nvpriv(dev);
+ np->pci_dev = pci_dev;
+ spin_lock_init(&np->lock);
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pci_dev->dev);
+
+ init_timer(&np->oom_kick);
+ np->oom_kick.data = (unsigned long) dev;
+ np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
+ init_timer(&np->nic_poll);
+ np->nic_poll.data = (unsigned long) dev;
+ np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
+
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
+ err, pci_name(pci_dev));
+ goto out_free;
+ }
+
+ pci_set_master(pci_dev);
+
+ err = pci_request_regions(pci_dev, DRV_NAME);
+ if (err < 0)
+ goto out_disable;
+
+ err = -EINVAL;
+ addr = 0;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
+ pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
+ pci_resource_len(pci_dev, i),
+ pci_resource_flags(pci_dev, i));
+ if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
+ pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
+ addr = pci_resource_start(pci_dev, i);
+ break;
+ }
+ }
+ if (i == DEVICE_COUNT_RESOURCE) {
+ printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
+ pci_name(pci_dev));
+ goto out_relreg;
+ }
+
+ /* handle different descriptor versions */
+ if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 ||
+ pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 ||
+ pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3)
+ np->desc_ver = DESC_VER_1;
+ else
+ np->desc_ver = DESC_VER_2;
+
+ err = -ENOMEM;
+ np->base = ioremap(addr, NV_PCI_REGSZ);
+ if (!np->base)
+ goto out_relreg;
+ dev->base_addr = (unsigned long)np->base;
+ dev->irq = pci_dev->irq;
+ np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
+ &np->ring_addr);
+ if (!np->rx_ring)
+ goto out_unmap;
+ np->tx_ring = &np->rx_ring[RX_RING];
+
+ dev->open = nv_open;
+ dev->stop = nv_close;
+ dev->hard_start_xmit = nv_start_xmit;
+ dev->get_stats = nv_get_stats;
+ dev->change_mtu = nv_change_mtu;
+ dev->set_multicast_list = nv_set_multicast;
+ SET_ETHTOOL_OPS(dev, &ops);
+ dev->tx_timeout = nv_tx_timeout;
+ dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
+
+ pci_set_drvdata(pci_dev, dev);
+
+ /* read the mac address */
+ base = get_hwbase(dev);
+ np->orig_mac[0] = readl(base + NvRegMacAddrA);
+ np->orig_mac[1] = readl(base + NvRegMacAddrB);
+
+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ /*
+ * Bad mac address. At least one bios sets the mac address
+ * to 01:23:45:67:89:ab
+ */
+ printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ pci_name(pci_dev),
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x6c;
+ get_random_bytes(&dev->dev_addr[3], 3);
+ }
+
+ dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /* disable WOL */
+ writel(0, base + NvRegWakeUpFlags);
+ np->wolenabled = 0;
+
+ if (np->desc_ver == DESC_VER_1) {
+ np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
+ if (id->driver_data & DEV_NEED_LASTPACKET1)
+ np->tx_flags |= NV_TX_LASTPACKET1;
+ } else {
+ np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
+ if (id->driver_data & DEV_NEED_LASTPACKET1)
+ np->tx_flags |= NV_TX2_LASTPACKET1;
+ }
+ if (id->driver_data & DEV_IRQMASK_1)
+ np->irqmask = NVREG_IRQMASK_WANTED_1;
+ if (id->driver_data & DEV_IRQMASK_2)
+ np->irqmask = NVREG_IRQMASK_WANTED_2;
+ if (id->driver_data & DEV_NEED_TIMERIRQ)
+ np->irqmask |= NVREG_IRQ_TIMER;
+ if (id->driver_data & DEV_NEED_LINKTIMER) {
+ dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
+ np->need_linktimer = 1;
+ np->link_timeout = jiffies + LINK_TIMEOUT;
+ } else {
+ dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
+ np->need_linktimer = 0;
+ }
+
+ /* find a suitable phy */
+ for (i = 1; i < 32; i++) {
+ int id1, id2;
+
+ spin_lock_irq(&np->lock);
+ id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ);
+ spin_unlock_irq(&np->lock);
+ if (id1 < 0 || id1 == 0xffff)
+ continue;
+ spin_lock_irq(&np->lock);
+ id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ);
+ spin_unlock_irq(&np->lock);
+ if (id2 < 0 || id2 == 0xffff)
+ continue;
+
+ id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
+ id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
+ dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
+ pci_name(pci_dev), id1, id2, i);
+ np->phyaddr = i;
+ np->phy_oui = id1 | id2;
+ break;
+ }
+ if (i == 32) {
+ /* PHY in isolate mode? No phy attached and user wants to
+ * test loopback? Very odd, but can be correct.
+ */
+ printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
+ pci_name(pci_dev));
+ }
+
+ if (i != 32) {
+ /* reset it */
+ phy_init(dev);
+ }
+
+ /* set default link speed settings */
+ np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ np->duplex = 0;
+ np->autoneg = 1;
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
+ goto out_freering;
+ }
+ printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
+ dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ pci_name(pci_dev));
+
+ return 0;
+
+out_freering:
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
+ np->rx_ring, np->ring_addr);
+ pci_set_drvdata(pci_dev, NULL);
+out_unmap:
+ iounmap(get_hwbase(dev));
+out_relreg:
+ pci_release_regions(pci_dev);
+out_disable:
+ pci_disable_device(pci_dev);
+out_free:
+ free_netdev(dev);
+out:
+ return err;
+}
+
+static void __devexit nv_remove(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ unregister_netdev(dev);
+
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
+ writel(np->orig_mac[0], base + NvRegMacAddrA);
+ writel(np->orig_mac[1], base + NvRegMacAddrB);
+
+ /* free all structures */
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr);
+ iounmap(get_hwbase(dev));
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+ free_netdev(dev);
+ pci_set_drvdata(pci_dev, NULL);
+}
+
+static struct pci_device_id pci_tbl[] = {
+ { /* nForce Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ },
+ { /* nForce2 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ },
+ { /* nForce3 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_3,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ },
+ { /* nForce3 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ { /* nForce3 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_5,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ { /* nForce3 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_6,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ { /* nForce3 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_7,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ { /* CK804 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_8,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ { /* CK804 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_9,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ { /* MCP04 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_10,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ { /* MCP04 Ethernet Controller */
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NVENET_11,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
+ },
+ {0,},
+};
+
+static struct pci_driver driver = {
+ .name = "forcedeth",
+ .id_table = pci_tbl,
+ .probe = nv_probe,
+ .remove = __devexit_p(nv_remove),
+};
+
+
+static int __init init_nic(void)
+{
+ printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
+ return pci_module_init(&driver);
+}
+
+static void __exit exit_nic(void)
+{
+ pci_unregister_driver(&driver);
+}
+
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
+
+MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
+MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+module_init(init_nic);
+module_exit(exit_nic);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
new file mode 100644
index 000000000000..b43b2b11aacd
--- /dev/null
+++ b/drivers/net/gianfar.c
@@ -0,0 +1,1849 @@
+/*
+ * drivers/net/gianfar.c
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Gianfar: AKA Lambda Draconis, "Dragon"
+ * RA 11 31 24.2
+ * Dec +69 19 52
+ * V 3.84
+ * B-V +1.62
+ *
+ * Theory of operation
+ * This driver is designed for the Triple-speed Ethernet
+ * controllers on the Freescale 8540/8560 integrated processors,
+ * as well as the Fast Ethernet Controller on the 8540.
+ *
+ * The driver is initialized through platform_device. Structures which
+ * define the configuration needed by the board are defined in a
+ * board structure in arch/ppc/platforms (though I do not
+ * discount the possibility that other architectures could one
+ * day be supported. One assumption the driver currently makes
+ * is that the PHY is configured in such a way to advertise all
+ * capabilities. This is a sensible default, and on certain
+ * PHYs, changing this default encounters substantial errata
+ * issues. Future versions may remove this requirement, but for
+ * now, it is best for the firmware to ensure this is the case.
+ *
+ * The Gianfar Ethernet Controller uses a ring of buffer
+ * descriptors. The beginning is indicated by a register
+ * pointing to the physical address of the start of the ring.
+ * The end is determined by a "wrap" bit being set in the
+ * last descriptor of the ring.
+ *
+ * When a packet is received, the RXF bit in the
+ * IEVENT register is set, triggering an interrupt when the
+ * corresponding bit in the IMASK register is also set (if
+ * interrupt coalescing is active, then the interrupt may not
+ * happen immediately, but will wait until either a set number
+ * of frames or amount of time have passed.). In NAPI, the
+ * interrupt handler will signal there is work to be done, and
+ * exit. Without NAPI, the packet(s) will be handled
+ * immediately. Both methods will start at the last known empty
+ * descriptor, and process every subsequent descriptor until there
+ * are none left with data (NAPI will stop after a set number of
+ * packets to give time to other tasks, but will eventually
+ * process all the packets). The data arrives inside a
+ * pre-allocated skb, and so after the skb is passed up to the
+ * stack, a new skb must be allocated, and the address field in
+ * the buffer descriptor must be updated to indicate this new
+ * skb.
+ *
+ * When the kernel requests that a packet be transmitted, the
+ * driver starts where it left off last time, and points the
+ * descriptor at the buffer which was passed in. The driver
+ * then informs the DMA engine that there are packets ready to
+ * be transmitted. Once the controller is finished transmitting
+ * the packet, an interrupt may be triggered (under the same
+ * conditions as for reception, but depending on the TXF bit).
+ * The driver then cleans up the buffer.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+
+#include "gianfar.h"
+#include "gianfar_phy.h"
+
+#define TX_TIMEOUT (1*HZ)
+#define SKB_ALLOC_TIMEOUT 1000000
+#undef BRIEF_GFAR_ERRORS
+#undef VERBOSE_GFAR_ERRORS
+
+#ifdef CONFIG_GFAR_NAPI
+#define RECEIVE(x) netif_receive_skb(x)
+#else
+#define RECEIVE(x) netif_rx(x)
+#endif
+
+const char gfar_driver_name[] = "Gianfar Ethernet";
+const char gfar_driver_version[] = "1.1";
+
+int startup_gfar(struct net_device *dev);
+static int gfar_enet_open(struct net_device *dev);
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void gfar_timeout(struct net_device *dev);
+static int gfar_close(struct net_device *dev);
+struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
+static struct net_device_stats *gfar_get_stats(struct net_device *dev);
+static int gfar_set_mac_address(struct net_device *dev);
+static int gfar_change_mtu(struct net_device *dev, int new_mtu);
+static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
+irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void gfar_phy_change(void *data);
+static void gfar_phy_timer(unsigned long data);
+static void adjust_link(struct net_device *dev);
+static void init_registers(struct net_device *dev);
+static int init_phy(struct net_device *dev);
+static int gfar_probe(struct device *device);
+static int gfar_remove(struct device *device);
+void free_skb_resources(struct gfar_private *priv);
+static void gfar_set_multi(struct net_device *dev);
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_poll(struct net_device *dev, int *budget);
+#endif
+static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
+static void gfar_phy_startup_timer(unsigned long data);
+
+extern struct ethtool_ops gfar_ethtool_ops;
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Gianfar Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+static int gfar_probe(struct device *device)
+{
+ u32 tempval;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct platform_device *pdev = to_platform_device(device);
+ struct gianfar_platform_data *einfo;
+ struct resource *r;
+ int idx;
+ int err = 0;
+ int dev_ethtool_ops = 0;
+
+ einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
+
+ if (einfo == NULL) {
+ printk(KERN_ERR "gfar %d: Missing additional data!\n",
+ pdev->id);
+
+ return -ENODEV;
+ }
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof (*priv));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+
+ /* Set the info in the priv to the current info */
+ priv->einfo = einfo;
+
+ /* fill out IRQ fields */
+ if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
+ priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
+ priv->interruptError = platform_get_irq_byname(pdev, "error");
+ } else {
+ priv->interruptTransmit = platform_get_irq(pdev, 0);
+ }
+
+ /* get a pointer to the register memory */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = (struct gfar *)
+ ioremap(r->start, sizeof (struct gfar));
+
+ if (priv->regs == NULL) {
+ err = -ENOMEM;
+ goto regs_fail;
+ }
+
+ /* Set the PHY base address */
+ priv->phyregs = (struct gfar *)
+ ioremap(einfo->phy_reg_addr, sizeof (struct gfar));
+
+ if (priv->phyregs == NULL) {
+ err = -ENOMEM;
+ goto phy_regs_fail;
+ }
+
+ spin_lock_init(&priv->lock);
+
+ dev_set_drvdata(device, dev);
+
+ /* Stop the DMA engine now, in case it was running before */
+ /* (The firmware could have used it, and left it running). */
+ /* To do this, we write Graceful Receive Stop and Graceful */
+ /* Transmit Stop, and then wait until the corresponding bits */
+ /* in IEVENT indicate the stops have completed. */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+
+ /* Reset MAC layer */
+ gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ gfar_write(&priv->regs->maccfg1, tempval);
+
+ /* Initialize MACCFG2. */
+ gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+
+ /* Initialize ECNTRL */
+ gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
+
+ /* Copy the station address into the dev structure, */
+ memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) (priv->regs);
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, device);
+
+ /* Fill in the dev structure */
+ dev->open = gfar_enet_open;
+ dev->hard_start_xmit = gfar_start_xmit;
+ dev->tx_timeout = gfar_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_GFAR_NAPI
+ dev->poll = gfar_poll;
+ dev->weight = GFAR_DEV_WEIGHT;
+#endif
+ dev->stop = gfar_close;
+ dev->get_stats = gfar_get_stats;
+ dev->change_mtu = gfar_change_mtu;
+ dev->mtu = 1500;
+ dev->set_multicast_list = gfar_set_multi;
+
+ /* Index into the array of possible ethtool
+ * ops to catch all 4 possibilities */
+ if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) == 0)
+ dev_ethtool_ops += 1;
+
+ if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE) == 0)
+ dev_ethtool_ops += 2;
+
+ dev->ethtool_ops = gfar_op_array[dev_ethtool_ops];
+
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+#ifdef CONFIG_GFAR_BUFSTASH
+ priv->rx_stash_size = STASH_LENGTH;
+#endif
+ priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
+
+ priv->txcoalescing = DEFAULT_TX_COALESCE;
+ priv->txcount = DEFAULT_TXCOUNT;
+ priv->txtime = DEFAULT_TXTIME;
+ priv->rxcoalescing = DEFAULT_RX_COALESCE;
+ priv->rxcount = DEFAULT_RXCOUNT;
+ priv->rxtime = DEFAULT_RXTIME;
+
+ err = register_netdev(dev);
+
+ if (err) {
+ printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+ dev->name);
+ goto register_fail;
+ }
+
+ /* Print out the device info */
+ printk(KERN_INFO DEVICE_NAME, dev->name);
+ for (idx = 0; idx < 6; idx++)
+ printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
+ printk("\n");
+
+ /* Even more device info helps when determining which kernel */
+ /* provided which set of benchmarks. Since this is global for all */
+ /* devices, we only print it once */
+#ifdef CONFIG_GFAR_NAPI
+ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
+#else
+ printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
+#endif
+ printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
+ dev->name, priv->rx_ring_size, priv->tx_ring_size);
+
+ return 0;
+
+register_fail:
+ iounmap((void *) priv->phyregs);
+phy_regs_fail:
+ iounmap((void *) priv->regs);
+regs_fail:
+ free_netdev(dev);
+ return -ENOMEM;
+}
+
+static int gfar_remove(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct gfar_private *priv = netdev_priv(dev);
+
+ dev_set_drvdata(device, NULL);
+
+ iounmap((void *) priv->regs);
+ iounmap((void *) priv->phyregs);
+ free_netdev(dev);
+
+ return 0;
+}
+
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_info *curphy;
+ unsigned int timeout = PHY_INIT_TIMEOUT;
+ struct gfar *phyregs = priv->phyregs;
+ struct gfar_mii_info *mii_info;
+ int err;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ mii_info = kmalloc(sizeof(struct gfar_mii_info),
+ GFP_KERNEL);
+
+ if(NULL == mii_info) {
+ printk(KERN_ERR "%s: Could not allocate mii_info\n",
+ dev->name);
+ return -ENOMEM;
+ }
+
+ mii_info->speed = SPEED_1000;
+ mii_info->duplex = DUPLEX_FULL;
+ mii_info->pause = 0;
+ mii_info->link = 1;
+
+ mii_info->advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ mii_info->autoneg = 1;
+
+ spin_lock_init(&mii_info->mdio_lock);
+
+ mii_info->mii_id = priv->einfo->phyid;
+
+ mii_info->dev = dev;
+
+ mii_info->mdio_read = &read_phy_reg;
+ mii_info->mdio_write = &write_phy_reg;
+
+ priv->mii_info = mii_info;
+
+ /* Reset the management interface */
+ gfar_write(&phyregs->miimcfg, MIIMCFG_RESET);
+
+ /* Setup the MII Mgmt clock speed */
+ gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE);
+
+ /* Wait until the bus is free */
+ while ((gfar_read(&phyregs->miimind) & MIIMIND_BUSY) &&
+ timeout--)
+ cpu_relax();
+
+ if(timeout <= 0) {
+ printk(KERN_ERR "%s: The MII Bus is stuck!\n",
+ dev->name);
+ err = -1;
+ goto bus_fail;
+ }
+
+ /* get info for this PHY */
+ curphy = get_phy_info(priv->mii_info);
+
+ if (curphy == NULL) {
+ printk(KERN_ERR "%s: No PHY found\n", dev->name);
+ err = -1;
+ goto no_phy;
+ }
+
+ mii_info->phyinfo = curphy;
+
+ /* Run the commands which initialize the PHY */
+ if(curphy->init) {
+ err = curphy->init(priv->mii_info);
+
+ if (err)
+ goto phy_init_fail;
+ }
+
+ return 0;
+
+phy_init_fail:
+no_phy:
+bus_fail:
+ kfree(mii_info);
+
+ return err;
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Initialize IMASK */
+ gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
+
+ /* Init hash registers to zero */
+ gfar_write(&priv->regs->iaddr0, 0);
+ gfar_write(&priv->regs->iaddr1, 0);
+ gfar_write(&priv->regs->iaddr2, 0);
+ gfar_write(&priv->regs->iaddr3, 0);
+ gfar_write(&priv->regs->iaddr4, 0);
+ gfar_write(&priv->regs->iaddr5, 0);
+ gfar_write(&priv->regs->iaddr6, 0);
+ gfar_write(&priv->regs->iaddr7, 0);
+
+ gfar_write(&priv->regs->gaddr0, 0);
+ gfar_write(&priv->regs->gaddr1, 0);
+ gfar_write(&priv->regs->gaddr2, 0);
+ gfar_write(&priv->regs->gaddr3, 0);
+ gfar_write(&priv->regs->gaddr4, 0);
+ gfar_write(&priv->regs->gaddr5, 0);
+ gfar_write(&priv->regs->gaddr6, 0);
+ gfar_write(&priv->regs->gaddr7, 0);
+
+ /* Zero out rctrl */
+ gfar_write(&priv->regs->rctrl, 0x00000000);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ memset((void *) &(priv->regs->rmon), 0,
+ sizeof (struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
+ gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
+ }
+
+ /* Initialize the max receive buffer length */
+ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+
+#ifdef CONFIG_GFAR_BUFSTASH
+ /* If we are stashing buffers, we need to set the
+ * extraction length to the size of the buffer */
+ gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
+#endif
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
+
+ /* Setup Attributes so that snooping is on for rx */
+ gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
+ gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
+
+ /* Assign the TBI an address which won't conflict with the PHYs */
+ gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
+}
+
+void stop_gfar(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ unsigned long flags;
+ u32 tempval;
+
+ /* Lock it down */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Tell the kernel the link is down */
+ priv->mii_info->link = 0;
+ adjust_link(dev);
+
+ /* Mask all interrupts */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+
+ /* Clear all interrupts */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
+ != (DMACTRL_GRS | DMACTRL_GTS)) {
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+ }
+
+ /* Disable Rx and Tx */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
+ /* Clear any pending interrupts */
+ mii_clear_phy_interrupt(priv->mii_info);
+
+ /* Disable PHY Interrupts */
+ mii_configure_phy_interrupt(priv->mii_info,
+ MII_INTERRUPT_DISABLED);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Free the IRQs */
+ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ free_irq(priv->interruptError, dev);
+ free_irq(priv->interruptTransmit, dev);
+ free_irq(priv->interruptReceive, dev);
+ } else {
+ free_irq(priv->interruptTransmit, dev);
+ }
+
+ if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
+ free_irq(priv->einfo->interruptPHY, dev);
+ } else {
+ del_timer_sync(&priv->phy_info_timer);
+ }
+
+ free_skb_resources(priv);
+
+ dma_free_coherent(NULL,
+ sizeof(struct txbd8)*priv->tx_ring_size
+ + sizeof(struct rxbd8)*priv->rx_ring_size,
+ priv->tx_bd_base,
+ gfar_read(&regs->tbase));
+}
+
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff */
+void free_skb_resources(struct gfar_private *priv)
+{
+ struct rxbd8 *rxbdp;
+ struct txbd8 *txbdp;
+ int i;
+
+ /* Go through all the buffer descriptors and free their data buffers */
+ txbdp = priv->tx_bd_base;
+
+ for (i = 0; i < priv->tx_ring_size; i++) {
+
+ if (priv->tx_skbuff[i]) {
+ dma_unmap_single(NULL, txbdp->bufPtr,
+ txbdp->length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+ }
+
+ kfree(priv->tx_skbuff);
+
+ rxbdp = priv->rx_bd_base;
+
+ /* rx_skbuff is not guaranteed to be allocated, so only
+ * free it and its contents if it is allocated */
+ if(priv->rx_skbuff != NULL) {
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(NULL, rxbdp->bufPtr,
+ priv->rx_buffer_size
+ + RXBUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->rx_skbuff[i] = NULL;
+ }
+
+ rxbdp->status = 0;
+ rxbdp->length = 0;
+ rxbdp->bufPtr = 0;
+
+ rxbdp++;
+ }
+
+ kfree(priv->rx_skbuff);
+ }
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *dev)
+{
+ struct txbd8 *txbdp;
+ struct rxbd8 *rxbdp;
+ dma_addr_t addr;
+ unsigned long vaddr;
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+ int err = 0;
+
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+
+ /* Allocate memory for the buffer descriptors */
+ vaddr = (unsigned long) dma_alloc_coherent(NULL,
+ sizeof (struct txbd8) * priv->tx_ring_size +
+ sizeof (struct rxbd8) * priv->rx_ring_size,
+ &addr, GFP_KERNEL);
+
+ if (vaddr == 0) {
+ printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
+ dev->name);
+ return -ENOMEM;
+ }
+
+ priv->tx_bd_base = (struct txbd8 *) vaddr;
+
+ /* enet DMA only understands physical addresses */
+ gfar_write(&regs->tbase, addr);
+
+ /* Start the rx descriptor ring where the tx ring leaves off */
+ addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
+ vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
+ priv->rx_bd_base = (struct rxbd8 *) vaddr;
+ gfar_write(&regs->rbase, addr);
+
+ /* Setup the skbuff rings */
+ priv->tx_skbuff =
+ (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
+ priv->tx_ring_size, GFP_KERNEL);
+
+ if (priv->tx_skbuff == NULL) {
+ printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
+ dev->name);
+ err = -ENOMEM;
+ goto tx_skb_fail;
+ }
+
+ for (i = 0; i < priv->tx_ring_size; i++)
+ priv->tx_skbuff[i] = NULL;
+
+ priv->rx_skbuff =
+ (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
+ priv->rx_ring_size, GFP_KERNEL);
+
+ if (priv->rx_skbuff == NULL) {
+ printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
+ dev->name);
+ err = -ENOMEM;
+ goto rx_skb_fail;
+ }
+
+ for (i = 0; i < priv->rx_ring_size; i++)
+ priv->rx_skbuff[i] = NULL;
+
+ /* Initialize some variables in our dev structure */
+ priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
+ priv->cur_rx = priv->rx_bd_base;
+ priv->skb_curtx = priv->skb_dirtytx = 0;
+ priv->skb_currx = 0;
+
+ /* Initialize Transmit Descriptor Ring */
+ txbdp = priv->tx_bd_base;
+ for (i = 0; i < priv->tx_ring_size; i++) {
+ txbdp->status = 0;
+ txbdp->length = 0;
+ txbdp->bufPtr = 0;
+ txbdp++;
+ }
+
+ /* Set the last descriptor in the ring to indicate wrap */
+ txbdp--;
+ txbdp->status |= TXBD_WRAP;
+
+ rxbdp = priv->rx_bd_base;
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct sk_buff *skb = NULL;
+
+ rxbdp->status = 0;
+
+ skb = gfar_new_skb(dev, rxbdp);
+
+ priv->rx_skbuff[i] = skb;
+
+ rxbdp++;
+ }
+
+ /* Set the last descriptor in the ring to wrap */
+ rxbdp--;
+ rxbdp->status |= RXBD_WRAP;
+
+ /* If the device has multiple interrupts, register for
+ * them. Otherwise, only register for the one */
+ if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ /* Install our interrupt handlers for Error,
+ * Transmit, and Receive */
+ if (request_irq(priv->interruptError, gfar_error,
+ 0, "enet_error", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->interruptError);
+
+ err = -1;
+ goto err_irq_fail;
+ }
+
+ if (request_irq(priv->interruptTransmit, gfar_transmit,
+ 0, "enet_tx", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->interruptTransmit);
+
+ err = -1;
+
+ goto tx_irq_fail;
+ }
+
+ if (request_irq(priv->interruptReceive, gfar_receive,
+ 0, "enet_rx", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
+ dev->name, priv->interruptReceive);
+
+ err = -1;
+ goto rx_irq_fail;
+ }
+ } else {
+ if (request_irq(priv->interruptTransmit, gfar_interrupt,
+ 0, "gfar_interrupt", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->interruptError);
+
+ err = -1;
+ goto err_irq_fail;
+ }
+ }
+
+ /* Set up the PHY change work queue */
+ INIT_WORK(&priv->tq, gfar_phy_change, dev);
+
+ init_timer(&priv->phy_info_timer);
+ priv->phy_info_timer.function = &gfar_phy_startup_timer;
+ priv->phy_info_timer.data = (unsigned long) priv->mii_info;
+ mod_timer(&priv->phy_info_timer, jiffies + HZ);
+
+ /* Configure the coalescing support */
+ if (priv->txcoalescing)
+ gfar_write(&regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&regs->txic, 0);
+
+ if (priv->rxcoalescing)
+ gfar_write(&regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&regs->rxic, 0);
+
+ init_waitqueue_head(&priv->rxcleanupq);
+
+ /* Enable Rx and Tx in MACCFG1 */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ /* Clear THLT, so that the DMA starts polling now */
+ gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+
+ return 0;
+
+rx_irq_fail:
+ free_irq(priv->interruptTransmit, dev);
+tx_irq_fail:
+ free_irq(priv->interruptError, dev);
+err_irq_fail:
+rx_skb_fail:
+ free_skb_resources(priv);
+tx_skb_fail:
+ dma_free_coherent(NULL,
+ sizeof(struct txbd8)*priv->tx_ring_size
+ + sizeof(struct rxbd8)*priv->rx_ring_size,
+ priv->tx_bd_base,
+ gfar_read(&regs->tbase));
+
+ if (priv->mii_info->phyinfo->close)
+ priv->mii_info->phyinfo->close(priv->mii_info);
+
+ kfree(priv->mii_info);
+
+ return err;
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int gfar_enet_open(struct net_device *dev)
+{
+ int err;
+
+ /* Initialize a bunch of registers */
+ init_registers(dev);
+
+ gfar_set_mac_address(dev);
+
+ err = init_phy(dev);
+
+ if(err)
+ return err;
+
+ err = startup_gfar(dev);
+
+ netif_start_queue(dev);
+
+ return err;
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *txbdp;
+
+ /* Update transmit stats */
+ priv->stats.tx_bytes += skb->len;
+
+ /* Lock priv now */
+ spin_lock_irq(&priv->lock);
+
+ /* Point at the first free tx descriptor */
+ txbdp = priv->cur_tx;
+
+ /* Clear all but the WRAP status flags */
+ txbdp->status &= TXBD_WRAP;
+
+ /* Set buffer length and pointer */
+ txbdp->length = skb->len;
+ txbdp->bufPtr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+
+ /* Save the skb pointer so we can free it later */
+ priv->tx_skbuff[priv->skb_curtx] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ priv->skb_curtx =
+ (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ /* Flag the BD as interrupt-causing */
+ txbdp->status |= TXBD_INTERRUPT;
+
+ /* Flag the BD as ready to go, last in frame, and */
+ /* in need of CRC */
+ txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
+
+ dev->trans_start = jiffies;
+
+ /* If this was the last BD in the ring, the next one */
+ /* is at the beginning of the ring */
+ if (txbdp->status & TXBD_WRAP)
+ txbdp = priv->tx_bd_base;
+ else
+ txbdp++;
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (txbdp == priv->dirty_tx) {
+ netif_stop_queue(dev);
+
+ priv->stats.tx_fifo_errors++;
+ }
+
+ /* Update the current txbd to the next one */
+ priv->cur_tx = txbdp;
+
+ /* Tell the DMA to go go go */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+
+ /* Unlock priv */
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int gfar_close(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ stop_gfar(dev);
+
+ /* Shutdown the PHY */
+ if (priv->mii_info->phyinfo->close)
+ priv->mii_info->phyinfo->close(priv->mii_info);
+
+ kfree(priv->mii_info);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/* returns a net_device_stats structure pointer */
+static struct net_device_stats * gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ return &(priv->stats);
+}
+
+/* Changes the mac address if the controller is not running. */
+int gfar_set_mac_address(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i;
+ char tmpbuf[MAC_ADDR_LEN];
+ u32 tempval;
+
+ /* Now copy it into the mac registers backwards, cuz */
+ /* little endian is silly */
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
+
+ gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
+
+ tempval = *((u32 *) (tmpbuf + 4));
+
+ gfar_write(&priv->regs->macstnaddr2, tempval);
+
+ return 0;
+}
+
+
+static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int tempsize, tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ int oldsize = priv->rx_buffer_size;
+ int frame_size = new_mtu + 18;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name);
+ return -EINVAL;
+ }
+
+ tempsize =
+ (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ /* Only stop and start the controller if it isn't already
+ * stopped */
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ stop_gfar(dev);
+
+ priv->rx_buffer_size = tempsize;
+
+ dev->mtu = new_mtu;
+
+ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+ gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length */
+ tempval = gfar_read(&priv->regs->maccfg2);
+
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
+ tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+ else
+ tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+
+ gfar_write(&priv->regs->maccfg2, tempval);
+
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ startup_gfar(dev);
+
+ return 0;
+}
+
+/* gfar_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem. */
+static void gfar_timeout(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ priv->stats.tx_errors++;
+
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ startup_gfar(dev);
+ }
+
+ netif_schedule(dev);
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *bdp;
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
+
+ /* Lock priv */
+ spin_lock(&priv->lock);
+ bdp = priv->dirty_tx;
+ while ((bdp->status & TXBD_READY) == 0) {
+ /* If dirty_tx and cur_tx are the same, then either the */
+ /* ring is empty or full now (it could only be full in the beginning, */
+ /* obviously). If it is empty, we are done. */
+ if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
+ break;
+
+ priv->stats.tx_packets++;
+
+ /* Deferred means some collisions occurred during transmit, */
+ /* but we eventually sent the packet. */
+ if (bdp->status & TXBD_DEF)
+ priv->stats.collisions++;
+
+ /* Free the sk buffer associated with this TxBD */
+ dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+ priv->tx_skbuff[priv->skb_dirtytx] = NULL;
+ priv->skb_dirtytx =
+ (priv->skb_dirtytx +
+ 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ /* update bdp to point at next bd in the ring (wrapping if necessary) */
+ if (bdp->status & TXBD_WRAP)
+ bdp = priv->tx_bd_base;
+ else
+ bdp++;
+
+ /* Move dirty_tx to be the next bd */
+ priv->dirty_tx = bdp;
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } /* while ((bdp->status & TXBD_READY) == 0) */
+
+ /* If we are coalescing the interrupts, reset the timer */
+ /* Otherwise, clear it */
+ if (priv->txcoalescing)
+ gfar_write(&priv->regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&priv->regs->txic, 0);
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb = NULL;
+ unsigned int timeout = SKB_ALLOC_TIMEOUT;
+
+ /* We have to allocate the skb, so keep trying till we succeed */
+ while ((!skb) && timeout--)
+ skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
+
+ if (skb == NULL)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ RXBUF_ALIGNMENT -
+ (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
+
+ skb->dev = dev;
+
+ bdp->bufPtr = dma_map_single(NULL, skb->data,
+ priv->rx_buffer_size + RXBUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ bdp->length = 0;
+
+ /* Mark the buffer empty */
+ bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
+
+ return skb;
+}
+
+static inline void count_errors(unsigned short status, struct gfar_private *priv)
+{
+ struct net_device_stats *stats = &priv->stats;
+ struct gfar_extra_stats *estats = &priv->extra_stats;
+
+ /* If the packet was truncated, none of the other errors
+ * matter */
+ if (status & RXBD_TRUNCATED) {
+ stats->rx_length_errors++;
+
+ estats->rx_trunc++;
+
+ return;
+ }
+ /* Count the errors, if there were any */
+ if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ stats->rx_length_errors++;
+
+ if (status & RXBD_LARGE)
+ estats->rx_large++;
+ else
+ estats->rx_short++;
+ }
+ if (status & RXBD_NONOCTET) {
+ stats->rx_frame_errors++;
+ estats->rx_nonoctet++;
+ }
+ if (status & RXBD_CRCERR) {
+ estats->rx_crcerr++;
+ stats->rx_crc_errors++;
+ }
+ if (status & RXBD_OVERRUN) {
+ estats->rx_overrun++;
+ stats->rx_crc_errors++;
+ }
+}
+
+irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+#ifdef CONFIG_GFAR_NAPI
+ u32 tempval;
+#endif
+
+ /* Clear IEVENT, so rx interrupt isn't called again
+ * because of this interrupt */
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
+
+ /* support NAPI */
+#ifdef CONFIG_GFAR_NAPI
+ if (netif_rx_schedule_prep(dev)) {
+ tempval = gfar_read(&priv->regs->imask);
+ tempval &= IMASK_RX_DISABLED;
+ gfar_write(&priv->regs->imask, tempval);
+
+ __netif_rx_schedule(dev);
+ } else {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
+ dev->name, gfar_read(&priv->regs->ievent),
+ gfar_read(&priv->regs->imask));
+#endif
+ }
+#else
+
+ spin_lock(&priv->lock);
+ gfar_clean_rx_ring(dev, priv->rx_ring_size);
+
+ /* If we are coalescing interrupts, update the timer */
+ /* Otherwise, clear it */
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ /* Just in case we need to wake the ring param changer */
+ priv->rxclean = 1;
+
+ spin_unlock(&priv->lock);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+
+/* gfar_process_frame() -- handle one incoming packet if skb
+ * isn't NULL. */
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int length)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (skb == NULL) {
+#ifdef BRIEF_GFAR_ERRORS
+ printk(KERN_WARNING "%s: Missing skb!!.\n",
+ dev->name);
+#endif
+ priv->stats.rx_dropped++;
+ priv->extra_stats.rx_skbmissing++;
+ } else {
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Send the packet up the stack */
+ if (RECEIVE(skb) == NET_RX_DROP) {
+ priv->extra_stats.kernel_dropped++;
+ }
+ }
+
+ return 0;
+}
+
+/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
+ */
+static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
+{
+ struct rxbd8 *bdp;
+ struct sk_buff *skb;
+ u16 pkt_len;
+ int howmany = 0;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Get the first full descriptor */
+ bdp = priv->cur_rx;
+
+ while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
+ skb = priv->rx_skbuff[priv->skb_currx];
+
+ if (!(bdp->status &
+ (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
+ | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
+ /* Increment the number of packets */
+ priv->stats.rx_packets++;
+ howmany++;
+
+ /* Remove the FCS from the packet length */
+ pkt_len = bdp->length - 4;
+
+ gfar_process_frame(dev, skb, pkt_len);
+
+ priv->stats.rx_bytes += pkt_len;
+ } else {
+ count_errors(bdp->status, priv);
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ priv->rx_skbuff[priv->skb_currx] = NULL;
+ }
+
+ dev->last_rx = jiffies;
+
+ /* Clear the status flags for this buffer */
+ bdp->status &= ~RXBD_STATS;
+
+ /* Add another skb for the future */
+ skb = gfar_new_skb(dev, bdp);
+ priv->rx_skbuff[priv->skb_currx] = skb;
+
+ /* Update to the next pointer */
+ if (bdp->status & RXBD_WRAP)
+ bdp = priv->rx_bd_base;
+ else
+ bdp++;
+
+ /* update to point at the next skb */
+ priv->skb_currx =
+ (priv->skb_currx +
+ 1) & RX_RING_MOD_MASK(priv->rx_ring_size);
+
+ }
+
+ /* Update the current rxbd pointer to be the next one */
+ priv->cur_rx = bdp;
+
+ /* If no packets have arrived since the
+ * last one we processed, clear the IEVENT RX and
+ * BSY bits so that another interrupt won't be
+ * generated when we set IMASK */
+ if (bdp->status & RXBD_EMPTY)
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
+
+ return howmany;
+}
+
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_poll(struct net_device *dev, int *budget)
+{
+ int howmany;
+ struct gfar_private *priv = netdev_priv(dev);
+ int rx_work_limit = *budget;
+
+ if (rx_work_limit > dev->quota)
+ rx_work_limit = dev->quota;
+
+ howmany = gfar_clean_rx_ring(dev, rx_work_limit);
+
+ dev->quota -= howmany;
+ rx_work_limit -= howmany;
+ *budget -= howmany;
+
+ if (rx_work_limit >= 0) {
+ netif_rx_complete(dev);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+
+ gfar_write(&priv->regs->imask, IMASK_DEFAULT);
+
+ /* If we are coalescing interrupts, update the timer */
+ /* Otherwise, clear it */
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ /* Signal to the ring size changer that it's safe to go */
+ priv->rxclean = 1;
+ }
+
+ return (rx_work_limit < 0) ? 1 : 0;
+}
+#endif
+
+/* The interrupt handler for devices with one interrupt */
+static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&priv->regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, events);
+
+ /* Check for reception */
+ if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
+ gfar_receive(irq, dev_id, regs);
+
+ /* Check for transmit completion */
+ if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
+ gfar_transmit(irq, dev_id, regs);
+
+ /* Update error statistics */
+ if (events & IEVENT_TXE) {
+ priv->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ priv->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ priv->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_WARNING "%s: tx underrun. dropped packet\n",
+ dev->name);
+#endif
+ priv->stats.tx_dropped++;
+ priv->extra_stats.tx_underrun++;
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+ }
+ }
+ if (events & IEVENT_BSY) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_bsy++;
+
+ gfar_receive(irq, dev_id, regs);
+
+#ifndef CONFIG_GFAR_NAPI
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+#endif
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
+ gfar_read(&priv->regs->rstat));
+#endif
+ }
+ if (events & IEVENT_BABR) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_babr++;
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babbling error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_EBERR) {
+ priv->extra_stats.eberr++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: EBERR\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_RXC) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: control frame\n", dev->name);
+#endif
+ }
+
+ if (events & IEVENT_BABT) {
+ priv->extra_stats.tx_babt++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babt error\n", dev->name);
+#endif
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Clear the interrupt */
+ mii_clear_phy_interrupt(priv->mii_info);
+
+ /* Disable PHY interrupts */
+ mii_configure_phy_interrupt(priv->mii_info,
+ MII_INTERRUPT_DISABLED);
+
+ /* Schedule the phy change */
+ schedule_work(&priv->tq);
+
+ return IRQ_HANDLED;
+}
+
+/* Scheduled by the phy_interrupt/timer to handle PHY changes */
+static void gfar_phy_change(void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct gfar_private *priv = netdev_priv(dev);
+ int result = 0;
+
+ /* Delay to give the PHY a chance to change the
+ * register state */
+ msleep(1);
+
+ /* Update the link, speed, duplex */
+ result = priv->mii_info->phyinfo->read_status(priv->mii_info);
+
+ /* Adjust the known status as long as the link
+ * isn't still coming up */
+ if((0 == result) || (priv->mii_info->link == 0))
+ adjust_link(dev);
+
+ /* Reenable interrupts, if needed */
+ if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR)
+ mii_configure_phy_interrupt(priv->mii_info,
+ MII_INTERRUPT_ENABLED);
+}
+
+/* Called every so often on systems that don't interrupt
+ * the core for PHY changes */
+static void gfar_phy_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tq);
+
+ mod_timer(&priv->phy_info_timer, jiffies +
+ GFAR_PHY_CHANGE_TIME * HZ);
+}
+
+/* Keep trying aneg for some time
+ * If, after GFAR_AN_TIMEOUT seconds, it has not
+ * finished, we switch to forced.
+ * Either way, once the process has completed, we either
+ * request the interrupt, or switch the timer over to
+ * using gfar_phy_timer to check status */
+static void gfar_phy_startup_timer(unsigned long data)
+{
+ int result;
+ static int secondary = GFAR_AN_TIMEOUT;
+ struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
+ struct gfar_private *priv = netdev_priv(mii_info->dev);
+
+ /* Configure the Auto-negotiation */
+ result = mii_info->phyinfo->config_aneg(mii_info);
+
+ /* If autonegotiation failed to start, and
+ * we haven't timed out, reset the timer, and return */
+ if (result && secondary--) {
+ mod_timer(&priv->phy_info_timer, jiffies + HZ);
+ return;
+ } else if (result) {
+ /* Couldn't start autonegotiation.
+ * Try switching to forced */
+ mii_info->autoneg = 0;
+ result = mii_info->phyinfo->config_aneg(mii_info);
+
+ /* Forcing failed! Give up */
+ if(result) {
+ printk(KERN_ERR "%s: Forcing failed!\n",
+ mii_info->dev->name);
+ return;
+ }
+ }
+
+ /* Kill the timer so it can be restarted */
+ del_timer_sync(&priv->phy_info_timer);
+
+ /* Grab the PHY interrupt, if necessary/possible */
+ if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
+ if (request_irq(priv->einfo->interruptPHY,
+ phy_interrupt,
+ SA_SHIRQ,
+ "phy_interrupt",
+ mii_info->dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
+ mii_info->dev->name,
+ priv->einfo->interruptPHY);
+ } else {
+ mii_configure_phy_interrupt(priv->mii_info,
+ MII_INTERRUPT_ENABLED);
+ return;
+ }
+ }
+
+ /* Start the timer again, this time in order to
+ * handle a change in status */
+ init_timer(&priv->phy_info_timer);
+ priv->phy_info_timer.function = &gfar_phy_timer;
+ priv->phy_info_timer.data = (unsigned long) mii_info->dev;
+ mod_timer(&priv->phy_info_timer, jiffies +
+ GFAR_PHY_CHANGE_TIME * HZ);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the priv structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+ struct gfar_mii_info *mii_info = priv->mii_info;
+
+ if (mii_info->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (mii_info->duplex != priv->oldduplex) {
+ if (!(mii_info->duplex)) {
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~(MACCFG2_FULL_DUPLEX);
+ gfar_write(&regs->maccfg2, tempval);
+
+ printk(KERN_INFO "%s: Half Duplex\n",
+ dev->name);
+ } else {
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_FULL_DUPLEX;
+ gfar_write(&regs->maccfg2, tempval);
+
+ printk(KERN_INFO "%s: Full Duplex\n",
+ dev->name);
+ }
+
+ priv->oldduplex = mii_info->duplex;
+ }
+
+ if (mii_info->speed != priv->oldspeed) {
+ switch (mii_info->speed) {
+ case 1000:
+ tempval = gfar_read(&regs->maccfg2);
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+ gfar_write(&regs->maccfg2, tempval);
+ break;
+ case 100:
+ case 10:
+ tempval = gfar_read(&regs->maccfg2);
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+ gfar_write(&regs->maccfg2, tempval);
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Ack! Speed (%d) is not 10/100/1000!\n",
+ dev->name, mii_info->speed);
+ break;
+ }
+
+ printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
+ mii_info->speed);
+
+ priv->oldspeed = mii_info->speed;
+ }
+
+ if (!priv->oldlink) {
+ printk(KERN_INFO "%s: Link is up\n", dev->name);
+ priv->oldlink = 1;
+ netif_carrier_on(dev);
+ netif_schedule(dev);
+ }
+ } else {
+ if (priv->oldlink) {
+ printk(KERN_INFO "%s: Link is down\n", dev->name);
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ netif_carrier_off(dev);
+ }
+ }
+}
+
+
+/* Update the hash table based on the current list of multicast
+ * addresses we subscribe to. Also, change the promiscuity of
+ * the device based on the flags (this function is called
+ * whenever dev->flags is changed */
+static void gfar_set_multi(struct net_device *dev)
+{
+ struct dev_mc_list *mc_ptr;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+
+ if(dev->flags & IFF_PROMISC) {
+ printk(KERN_INFO "%s: Entering promiscuous mode.\n",
+ dev->name);
+ /* Set RCTRL to PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval |= RCTRL_PROM;
+ gfar_write(&regs->rctrl, tempval);
+ } else {
+ /* Set RCTRL to not PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval &= ~(RCTRL_PROM);
+ gfar_write(&regs->rctrl, tempval);
+ }
+
+ if(dev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ gfar_write(&regs->gaddr0, 0xffffffff);
+ gfar_write(&regs->gaddr1, 0xffffffff);
+ gfar_write(&regs->gaddr2, 0xffffffff);
+ gfar_write(&regs->gaddr3, 0xffffffff);
+ gfar_write(&regs->gaddr4, 0xffffffff);
+ gfar_write(&regs->gaddr5, 0xffffffff);
+ gfar_write(&regs->gaddr6, 0xffffffff);
+ gfar_write(&regs->gaddr7, 0xffffffff);
+ } else {
+ /* zero out the hash */
+ gfar_write(&regs->gaddr0, 0x0);
+ gfar_write(&regs->gaddr1, 0x0);
+ gfar_write(&regs->gaddr2, 0x0);
+ gfar_write(&regs->gaddr3, 0x0);
+ gfar_write(&regs->gaddr4, 0x0);
+ gfar_write(&regs->gaddr5, 0x0);
+ gfar_write(&regs->gaddr6, 0x0);
+ gfar_write(&regs->gaddr7, 0x0);
+
+ if(dev->mc_count == 0)
+ return;
+
+ /* Parse the list, and set the appropriate bits */
+ for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+ }
+ }
+
+ return;
+}
+
+/* Set the appropriate hash bit for the given addr */
+/* The algorithm works like so:
+ * 1) Take the Destination Address (ie the multicast address), and
+ * do a CRC on it (little endian), and reverse the bits of the
+ * result.
+ * 2) Use the 8 most significant bits as a hash into a 256-entry
+ * table. The table is controlled through 8 32-bit registers:
+ * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
+ * gaddr7. This means that the 3 most significant bits in the
+ * hash index which gaddr register to use, and the 5 other bits
+ * indicate which bit (assuming an IBM numbering scheme, which
+ * for PowerPC (tm) is usually the case) in the register holds
+ * the entry. */
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
+{
+ u32 tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 *hash = &regs->gaddr0;
+ u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ u8 whichreg = ((result >> 29) & 0x7);
+ u8 whichbit = ((result >> 24) & 0x1f);
+ u32 value = (1 << (31-whichbit));
+
+ tempval = gfar_read(&hash[whichreg]);
+ tempval |= value;
+ gfar_write(&hash[whichreg], tempval);
+
+ return;
+}
+
+/* GFAR error interrupt handler */
+static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&priv->regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
+
+ /* Hmm... */
+#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS)
+ printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ dev->name, events, gfar_read(&priv->regs->imask));
+#endif
+
+ /* Update the error counters */
+ if (events & IEVENT_TXE) {
+ priv->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ priv->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ priv->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
+ dev->name);
+#endif
+ priv->stats.tx_dropped++;
+ priv->extra_stats.tx_underrun++;
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+ }
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_BSY) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_bsy++;
+
+ gfar_receive(irq, dev_id, regs);
+
+#ifndef CONFIG_GFAR_NAPI
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+#endif
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
+ gfar_read(&priv->regs->rstat));
+#endif
+ }
+ if (events & IEVENT_BABR) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_babr++;
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babbling error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_EBERR) {
+ priv->extra_stats.eberr++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: EBERR\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_RXC)
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: control frame\n", dev->name);
+#endif
+
+ if (events & IEVENT_BABT) {
+ priv->extra_stats.tx_babt++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babt error\n", dev->name);
+#endif
+ }
+ return IRQ_HANDLED;
+}
+
+/* Structure for a device driver */
+static struct device_driver gfar_driver = {
+ .name = "fsl-gianfar",
+ .bus = &platform_bus_type,
+ .probe = gfar_probe,
+ .remove = gfar_remove,
+};
+
+static int __init gfar_init(void)
+{
+ return driver_register(&gfar_driver);
+}
+
+static void __exit gfar_exit(void)
+{
+ driver_unregister(&gfar_driver);
+}
+
+module_init(gfar_init);
+module_exit(gfar_exit);
+
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
new file mode 100644
index 000000000000..c2f783a6a9fa
--- /dev/null
+++ b/drivers/net/gianfar.h
@@ -0,0 +1,538 @@
+/*
+ * drivers/net/gianfar.h
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Still left to do:
+ * -Add support for module parameters
+ * -Add support for ethtool -s
+ * -Add patch for ethtool phys id
+ */
+#ifndef __GIANFAR_H
+#define __GIANFAR_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include "gianfar_phy.h"
+
+/* The maximum number of packets to be handled in one call of gfar_poll */
+#define GFAR_DEV_WEIGHT 64
+
+/* Number of bytes to align the rx bufs to */
+#define RXBUF_ALIGNMENT 64
+
+/* The number of bytes which composes a unit for the purpose of
+ * allocating data buffers. ie-for any given MTU, the data buffer
+ * will be the next highest multiple of 512 bytes. */
+#define INCREMENTAL_BUFFER_SIZE 512
+
+
+#define MAC_ADDR_LEN 6
+
+#define PHY_INIT_TIMEOUT 100000
+#define GFAR_PHY_CHANGE_TIME 2
+
+#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.1, "
+#define DRV_NAME "gfar-enet"
+extern const char gfar_driver_name[];
+extern const char gfar_driver_version[];
+
+/* These need to be powers of 2 for this driver */
+#ifdef CONFIG_GFAR_NAPI
+#define DEFAULT_TX_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+#else
+#define DEFAULT_TX_RING_SIZE 64
+#define DEFAULT_RX_RING_SIZE 64
+#endif
+
+#define GFAR_RX_MAX_RING_SIZE 256
+#define GFAR_TX_MAX_RING_SIZE 256
+
+#define DEFAULT_RX_BUFFER_SIZE 1536
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+#define JUMBO_BUFFER_SIZE 9728
+#define JUMBO_FRAME_SIZE 9600
+
+/* Latency of interface clock in nanoseconds */
+/* Interface clock latency , in this case, means the
+ * time described by a value of 1 in the interrupt
+ * coalescing registers' time fields. Since those fields
+ * refer to the time it takes for 64 clocks to pass, the
+ * latencies are as such:
+ * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick
+ * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick
+ * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick
+ */
+#define GFAR_GBIT_TIME 512
+#define GFAR_100_TIME 2560
+#define GFAR_10_TIME 25600
+
+#define DEFAULT_TX_COALESCE 1
+#define DEFAULT_TXCOUNT 16
+#define DEFAULT_TXTIME 400
+
+#define DEFAULT_RX_COALESCE 1
+#define DEFAULT_RXCOUNT 16
+#define DEFAULT_RXTIME 400
+
+#define TBIPA_VALUE 0x1f
+#define MIIMCFG_INIT_VALUE 0x00000007
+#define MIIMCFG_RESET 0x80000000
+#define MIIMIND_BUSY 0x00000001
+
+/* MAC register bits */
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RESET_RX_MC 0x00080000
+#define MACCFG1_RESET_TX_MC 0x00040000
+#define MACCFG1_RESET_RX_FUN 0x00020000
+#define MACCFG1_RESET_TX_FUN 0x00010000
+#define MACCFG1_LOOPBACK 0x00000100
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_SYNCD_RX_EN 0x00000008
+#define MACCFG1_RX_EN 0x00000004
+#define MACCFG1_SYNCD_TX_EN 0x00000002
+#define MACCFG1_TX_EN 0x00000001
+
+#define MACCFG2_INIT_SETTINGS 0x00007205
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_IF 0x00000300
+#define MACCFG2_MII 0x00000100
+#define MACCFG2_GMII 0x00000200
+#define MACCFG2_HUGEFRAME 0x00000020
+#define MACCFG2_LENGTHCHECK 0x00000010
+
+#define ECNTRL_INIT_SETTINGS 0x00001000
+#define ECNTRL_TBI_MODE 0x00000020
+
+#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
+
+#define MINFLR_INIT_SETTINGS 0x00000040
+
+/* Init to do tx snooping for buffers and descriptors */
+#define DMACTRL_INIT_SETTINGS 0x000000c3
+#define DMACTRL_GRS 0x00000010
+#define DMACTRL_GTS 0x00000008
+
+#define TSTAT_CLEAR_THALT 0x80000000
+
+/* Interrupt coalescing macros */
+#define IC_ICEN 0x80000000
+#define IC_ICFT_MASK 0x1fe00000
+#define IC_ICFT_SHIFT 21
+#define mk_ic_icft(x) \
+ (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK)
+#define IC_ICTT_MASK 0x0000ffff
+#define mk_ic_ictt(x) (x&IC_ICTT_MASK)
+
+#define mk_ic_value(count, time) (IC_ICEN | \
+ mk_ic_icft(count) | \
+ mk_ic_ictt(time))
+
+#define RCTRL_PROM 0x00000008
+#define RSTAT_CLEAR_RHALT 0x00800000
+
+#define IEVENT_INIT_CLEAR 0xffffffff
+#define IEVENT_BABR 0x80000000
+#define IEVENT_RXC 0x40000000
+#define IEVENT_BSY 0x20000000
+#define IEVENT_EBERR 0x10000000
+#define IEVENT_MSRO 0x04000000
+#define IEVENT_GTSC 0x02000000
+#define IEVENT_BABT 0x01000000
+#define IEVENT_TXC 0x00800000
+#define IEVENT_TXE 0x00400000
+#define IEVENT_TXB 0x00200000
+#define IEVENT_TXF 0x00100000
+#define IEVENT_LC 0x00040000
+#define IEVENT_CRL 0x00020000
+#define IEVENT_XFUN 0x00010000
+#define IEVENT_RXB0 0x00008000
+#define IEVENT_GRSC 0x00000100
+#define IEVENT_RXF0 0x00000080
+#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
+#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
+#define IEVENT_ERR_MASK \
+(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
+ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
+ | IEVENT_CRL | IEVENT_XFUN)
+
+#define IMASK_INIT_CLEAR 0x00000000
+#define IMASK_BABR 0x80000000
+#define IMASK_RXC 0x40000000
+#define IMASK_BSY 0x20000000
+#define IMASK_EBERR 0x10000000
+#define IMASK_MSRO 0x04000000
+#define IMASK_GRSC 0x02000000
+#define IMASK_BABT 0x01000000
+#define IMASK_TXC 0x00800000
+#define IMASK_TXEEN 0x00400000
+#define IMASK_TXBEN 0x00200000
+#define IMASK_TXFEN 0x00100000
+#define IMASK_LC 0x00040000
+#define IMASK_CRL 0x00020000
+#define IMASK_XFUN 0x00010000
+#define IMASK_RXB0 0x00008000
+#define IMASK_GTSC 0x00000100
+#define IMASK_RXFEN0 0x00000080
+#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT)
+
+
+/* Attribute fields */
+
+/* This enables rx snooping for buffers and descriptors */
+#ifdef CONFIG_GFAR_BDSTASH
+#define ATTR_BDSTASH 0x00000800
+#else
+#define ATTR_BDSTASH 0x00000000
+#endif
+
+#ifdef CONFIG_GFAR_BUFSTASH
+#define ATTR_BUFSTASH 0x00004000
+#define STASH_LENGTH 64
+#else
+#define ATTR_BUFSTASH 0x00000000
+#endif
+
+#define ATTR_SNOOPING 0x000000c0
+#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \
+ | ATTR_BDSTASH | ATTR_BUFSTASH)
+
+#define ATTRELI_INIT_SETTINGS 0x0
+
+
+/* TxBD status field bits */
+#define TXBD_READY 0x8000
+#define TXBD_PADCRC 0x4000
+#define TXBD_WRAP 0x2000
+#define TXBD_INTERRUPT 0x1000
+#define TXBD_LAST 0x0800
+#define TXBD_CRC 0x0400
+#define TXBD_DEF 0x0200
+#define TXBD_HUGEFRAME 0x0080
+#define TXBD_LATECOLLISION 0x0080
+#define TXBD_RETRYLIMIT 0x0040
+#define TXBD_RETRYCOUNTMASK 0x003c
+#define TXBD_UNDERRUN 0x0002
+
+/* RxBD status field bits */
+#define RXBD_EMPTY 0x8000
+#define RXBD_RO1 0x4000
+#define RXBD_WRAP 0x2000
+#define RXBD_INTERRUPT 0x1000
+#define RXBD_LAST 0x0800
+#define RXBD_FIRST 0x0400
+#define RXBD_MISS 0x0100
+#define RXBD_BROADCAST 0x0080
+#define RXBD_MULTICAST 0x0040
+#define RXBD_LARGE 0x0020
+#define RXBD_NONOCTET 0x0010
+#define RXBD_SHORT 0x0008
+#define RXBD_CRCERR 0x0004
+#define RXBD_OVERRUN 0x0002
+#define RXBD_TRUNCATED 0x0001
+#define RXBD_STATS 0x01ff
+
+struct txbd8
+{
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer length */
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct rxbd8
+{
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer Length */
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct rmon_mib
+{
+ u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
+ u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */
+ u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */
+ u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */
+ u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */
+ u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */
+ u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */
+ u32 rbyt; /* 0x.69c - Receive Byte Counter */
+ u32 rpkt; /* 0x.6a0 - Receive Packet Counter */
+ u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */
+ u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */
+ u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */
+ u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */
+ u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */
+ u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */
+ u32 raln; /* 0x.6bc - Receive Alignment Error Counter */
+ u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */
+ u32 rcde; /* 0x.6c4 - Receive Code Error Counter */
+ u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */
+ u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */
+ u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */
+ u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */
+ u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */
+ u32 rdrp; /* 0x.6dc - Receive Drop Counter */
+ u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */
+ u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */
+ u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */
+ u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */
+ u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */
+ u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */
+ u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */
+ u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */
+ u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */
+ u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */
+ u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */
+ u32 tncl; /* 0x.70c - Transmit Total Collision Counter */
+ u8 res1[4];
+ u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */
+ u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */
+ u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */
+ u32 txcf; /* 0x.720 - Transmit Control Frame Counter */
+ u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */
+ u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */
+ u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */
+ u32 car1; /* 0x.730 - Carry Register One */
+ u32 car2; /* 0x.734 - Carry Register Two */
+ u32 cam1; /* 0x.738 - Carry Mask Register One */
+ u32 cam2; /* 0x.73c - Carry Mask Register Two */
+};
+
+struct gfar_extra_stats {
+ u64 kernel_dropped;
+ u64 rx_large;
+ u64 rx_short;
+ u64 rx_nonoctet;
+ u64 rx_crcerr;
+ u64 rx_overrun;
+ u64 rx_bsy;
+ u64 rx_babr;
+ u64 rx_trunc;
+ u64 eberr;
+ u64 tx_babt;
+ u64 tx_underrun;
+ u64 rx_skbmissing;
+ u64 tx_timeout;
+};
+
+#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
+#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+
+/* Number of stats in the stats structure (ignore car and cam regs)*/
+#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
+
+#define GFAR_INFOSTR_LEN 32
+
+struct gfar_stats {
+ u64 extra[GFAR_EXTRA_STATS_LEN];
+ u64 rmon[GFAR_RMON_LEN];
+};
+
+
+struct gfar {
+ u8 res1[16];
+ u32 ievent; /* 0x.010 - Interrupt Event Register */
+ u32 imask; /* 0x.014 - Interrupt Mask Register */
+ u32 edis; /* 0x.018 - Error Disabled Register */
+ u8 res2[4];
+ u32 ecntrl; /* 0x.020 - Ethernet Control Register */
+ u32 minflr; /* 0x.024 - Minimum Frame Length Register */
+ u32 ptv; /* 0x.028 - Pause Time Value Register */
+ u32 dmactrl; /* 0x.02c - DMA Control Register */
+ u32 tbipa; /* 0x.030 - TBI PHY Address Register */
+ u8 res3[88];
+ u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
+ u8 res4[8];
+ u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
+ u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
+ u8 res5[96];
+ u32 tctrl; /* 0x.100 - Transmit Control Register */
+ u32 tstat; /* 0x.104 - Transmit Status Register */
+ u8 res6[4];
+ u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
+ u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
+ u8 res7[16];
+ u32 ctbptr; /* 0x.124 - Current Transmit Buffer Descriptor Pointer Register */
+ u8 res8[92];
+ u32 tbptr; /* 0x.184 - Transmit Buffer Descriptor Pointer Low Register */
+ u8 res9[124];
+ u32 tbase; /* 0x.204 - Transmit Descriptor Base Address Register */
+ u8 res10[168];
+ u32 ostbd; /* 0x.2b0 - Out-of-Sequence Transmit Buffer Descriptor Register */
+ u32 ostbdp; /* 0x.2b4 - Out-of-Sequence Transmit Data Buffer Pointer Register */
+ u8 res11[72];
+ u32 rctrl; /* 0x.300 - Receive Control Register */
+ u32 rstat; /* 0x.304 - Receive Status Register */
+ u8 res12[4];
+ u32 rbdlen; /* 0x.30c - RxBD Data Length Register */
+ u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
+ u8 res13[16];
+ u32 crbptr; /* 0x.324 - Current Receive Buffer Descriptor Pointer */
+ u8 res14[24];
+ u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
+ u8 res15[64];
+ u32 rbptr; /* 0x.384 - Receive Buffer Descriptor Pointer */
+ u8 res16[124];
+ u32 rbase; /* 0x.404 - Receive Descriptor Base Address */
+ u8 res17[248];
+ u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
+ u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
+ u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
+ u32 hafdup; /* 0x.50c - Half Duplex Register */
+ u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
+ u8 res18[12];
+ u32 miimcfg; /* 0x.520 - MII Management Configuration Register */
+ u32 miimcom; /* 0x.524 - MII Management Command Register */
+ u32 miimadd; /* 0x.528 - MII Management Address Register */
+ u32 miimcon; /* 0x.52c - MII Management Control Register */
+ u32 miimstat; /* 0x.530 - MII Management Status Register */
+ u32 miimind; /* 0x.534 - MII Management Indicator Register */
+ u8 res19[4];
+ u32 ifstat; /* 0x.53c - Interface Status Register */
+ u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
+ u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
+ u8 res20[312];
+ struct rmon_mib rmon;
+ u8 res21[192];
+ u32 iaddr0; /* 0x.800 - Indivdual address register 0 */
+ u32 iaddr1; /* 0x.804 - Indivdual address register 1 */
+ u32 iaddr2; /* 0x.808 - Indivdual address register 2 */
+ u32 iaddr3; /* 0x.80c - Indivdual address register 3 */
+ u32 iaddr4; /* 0x.810 - Indivdual address register 4 */
+ u32 iaddr5; /* 0x.814 - Indivdual address register 5 */
+ u32 iaddr6; /* 0x.818 - Indivdual address register 6 */
+ u32 iaddr7; /* 0x.81c - Indivdual address register 7 */
+ u8 res22[96];
+ u32 gaddr0; /* 0x.880 - Global address register 0 */
+ u32 gaddr1; /* 0x.884 - Global address register 1 */
+ u32 gaddr2; /* 0x.888 - Global address register 2 */
+ u32 gaddr3; /* 0x.88c - Global address register 3 */
+ u32 gaddr4; /* 0x.890 - Global address register 4 */
+ u32 gaddr5; /* 0x.894 - Global address register 5 */
+ u32 gaddr6; /* 0x.898 - Global address register 6 */
+ u32 gaddr7; /* 0x.89c - Global address register 7 */
+ u8 res23[856];
+ u32 attr; /* 0x.bf8 - Attributes Register */
+ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
+ u8 res24[1024];
+
+};
+
+/* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblence)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+ * and tx_bd_base always point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct gfar_private {
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff ** tx_skbuff;
+ struct sk_buff ** rx_skbuff;
+
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx;
+ u16 skb_currx;
+
+ /* index of the first skb which hasn't been transmitted
+ * yet. */
+ u16 skb_dirtytx;
+
+ /* Configuration info for the coalescing features */
+ unsigned char txcoalescing;
+ unsigned short txcount;
+ unsigned short txtime;
+ unsigned char rxcoalescing;
+ unsigned short rxcount;
+ unsigned short rxtime;
+
+ /* GFAR addresses */
+ struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */
+ struct txbd8 *tx_bd_base;
+ struct rxbd8 *cur_rx; /* Next free rx ring entry */
+ struct txbd8 *cur_tx; /* Next free ring entry */
+ struct txbd8 *dirty_tx; /* The Ring entry to be freed. */
+ struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */
+ struct gfar *phyregs;
+ struct work_struct tq;
+ struct timer_list phy_info_timer;
+ struct net_device_stats stats; /* linux network statistics */
+ struct gfar_extra_stats extra_stats;
+ spinlock_t lock;
+ unsigned int rx_buffer_size;
+ unsigned int rx_stash_size;
+ unsigned int tx_ring_size;
+ unsigned int rx_ring_size;
+ wait_queue_head_t rxcleanupq;
+ unsigned int rxclean;
+
+ /* Info structure initialized by board setup code */
+ unsigned int interruptTransmit;
+ unsigned int interruptReceive;
+ unsigned int interruptError;
+ struct gianfar_platform_data *einfo;
+
+ struct gfar_mii_info *mii_info;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+};
+
+extern inline u32 gfar_read(volatile unsigned *addr)
+{
+ u32 val;
+ val = in_be32(addr);
+ return val;
+}
+
+extern inline void gfar_write(volatile unsigned *addr, u32 val)
+{
+ out_be32(addr, val);
+}
+
+extern struct ethtool_ops *gfar_op_array[];
+
+#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
new file mode 100644
index 000000000000..28046e9e88ba
--- /dev/null
+++ b/drivers/net/gianfar_ethtool.c
@@ -0,0 +1,527 @@
+/*
+ * drivers/net/gianfar_ethtool.c
+ *
+ * Gianfar Ethernet Driver
+ * Ethtool support for Gianfar Enet
+ * Based on e1000 ethtool support
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2003,2004 Freescale Semiconductor, Inc.
+ *
+ * This software may be used and distributed according to
+ * the terms of the GNU Public License, Version 2, incorporated herein
+ * by reference.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <linux/ethtool.h>
+
+#include "gianfar.h"
+
+#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+extern int startup_gfar(struct net_device *dev);
+extern void stop_gfar(struct net_device *dev);
+extern void gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+
+void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 * buf);
+void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
+int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+
+static char stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-dropped-by-kernel",
+ "rx-large-frame-errors",
+ "rx-short-frame-errors",
+ "rx-non-octet-errors",
+ "rx-crc-errors",
+ "rx-overrun-errors",
+ "rx-busy-errors",
+ "rx-babbling-errors",
+ "rx-truncated-frames",
+ "ethernet-bus-error",
+ "tx-babbling-errors",
+ "tx-underrun-errors",
+ "rx-skb-missing-errors",
+ "tx-timeout-errors",
+ "tx-rx-64-frames",
+ "tx-rx-65-127-frames",
+ "tx-rx-128-255-frames",
+ "tx-rx-256-511-frames",
+ "tx-rx-512-1023-frames",
+ "tx-rx-1024-1518-frames",
+ "tx-rx-1519-1522-good-vlan",
+ "rx-bytes",
+ "rx-packets",
+ "rx-fcs-errors",
+ "receive-multicast-packet",
+ "receive-broadcast-packet",
+ "rx-control-frame-packets",
+ "rx-pause-frame-packets",
+ "rx-unknown-op-code",
+ "rx-alignment-error",
+ "rx-frame-length-error",
+ "rx-code-error",
+ "rx-carrier-sense-error",
+ "rx-undersize-packets",
+ "rx-oversize-packets",
+ "rx-fragmented-frames",
+ "rx-jabber-frames",
+ "rx-dropped-frames",
+ "tx-byte-counter",
+ "tx-packets",
+ "tx-multicast-packets",
+ "tx-broadcast-packets",
+ "tx-pause-control-frames",
+ "tx-deferral-packets",
+ "tx-excessive-deferral-packets",
+ "tx-single-collision-packets",
+ "tx-multiple-collision-packets",
+ "tx-late-collision-packets",
+ "tx-excessive-collision-packets",
+ "tx-total-collision",
+ "reserved",
+ "tx-dropped-frames",
+ "tx-jabber-frames",
+ "tx-fcs-errors",
+ "tx-control-frames",
+ "tx-oversize-frames",
+ "tx-undersize-frames",
+ "tx-fragmented-frames",
+};
+
+/* Fill in an array of 64-bit statistics from various sources.
+ * This array will be appended to the end of the ethtool_stats
+ * structure, and returned to user space
+ */
+void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 *rmon = (u32 *) & priv->regs->rmon;
+ u64 *extra = (u64 *) & priv->extra_stats;
+ struct gfar_stats *stats = (struct gfar_stats *) buf;
+
+ for (i = 0; i < GFAR_RMON_LEN; i++) {
+ stats->rmon[i] = (u64) (rmon[i]);
+ }
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
+ stats->extra[i] = extra[i];
+ }
+}
+
+/* Returns the number of stats (and their corresponding strings) */
+int gfar_stats_count(struct net_device *dev)
+{
+ return GFAR_STATS_LEN;
+}
+
+void gfar_gstrings_normon(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ memcpy(buf, stat_gstrings, GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+void gfar_fill_stats_normon(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ u64 *extra = (u64 *) & priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
+ buf[i] = extra[i];
+ }
+}
+
+
+int gfar_stats_count_normon(struct net_device *dev)
+{
+ return GFAR_EXTRA_STATS_LEN;
+}
+/* Fills in the drvinfo structure with some basic info */
+void gfar_gdrvinfo(struct net_device *dev, struct
+ ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ drvinfo->n_stats = GFAR_STATS_LEN;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+/* Return the current settings in the ethtool_cmd structure */
+int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ uint gigabit_support =
+ priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
+ SUPPORTED_1000baseT_Full : 0;
+ uint gigabit_advert =
+ priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
+ ADVERTISED_1000baseT_Full: 0;
+
+ cmd->supported = (SUPPORTED_10baseT_Half
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | gigabit_support | SUPPORTED_Autoneg);
+
+ /* For now, we always advertise everything */
+ cmd->advertising = (ADVERTISED_10baseT_Half
+ | ADVERTISED_100baseT_Half
+ | ADVERTISED_100baseT_Full
+ | gigabit_advert | ADVERTISED_Autoneg);
+
+ cmd->speed = priv->mii_info->speed;
+ cmd->duplex = priv->mii_info->duplex;
+ cmd->port = PORT_MII;
+ cmd->phy_address = priv->mii_info->mii_id;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = priv->txcount;
+ cmd->maxrxpkt = priv->rxcount;
+
+ return 0;
+}
+
+/* Return the length of the register structure */
+int gfar_reglen(struct net_device *dev)
+{
+ return sizeof (struct gfar);
+}
+
+/* Return a dump of the GFAR register space */
+void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 *theregs = (u32 *) priv->regs;
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
+ buf[i] = theregs[i];
+}
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats */
+void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+/* Convert microseconds to ethernet clock ticks, which changes
+ * depending on what speed the controller is running at */
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->mii_info->speed) {
+ case 1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case 100:
+ count = GFAR_100_TIME;
+ break;
+ case 10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0
+ * if usecs > 0 */
+ return ((usecs * 1000 + count - 1) / count);
+}
+
+/* Convert ethernet clock ticks to microseconds */
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->mii_info->speed) {
+ case 1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case 100:
+ count = GFAR_100_TIME;
+ break;
+ case 10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0 */
+ /* if ticks is > 0 */
+ return ((ticks * count) / 1000);
+}
+
+/* Get the coalescing parameters, and put them in the cvals
+ * structure. */
+int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
+ cvals->rx_max_coalesced_frames = priv->rxcount;
+
+ cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, priv->txtime);
+ cvals->tx_max_coalesced_frames = priv->txcount;
+
+ cvals->use_adaptive_rx_coalesce = 0;
+ cvals->use_adaptive_tx_coalesce = 0;
+
+ cvals->pkt_rate_low = 0;
+ cvals->rx_coalesce_usecs_low = 0;
+ cvals->rx_max_coalesced_frames_low = 0;
+ cvals->tx_coalesce_usecs_low = 0;
+ cvals->tx_max_coalesced_frames_low = 0;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ cvals->pkt_rate_high = 0;
+ cvals->rx_coalesce_usecs_high = 0;
+ cvals->rx_max_coalesced_frames_high = 0;
+ cvals->tx_coalesce_usecs_high = 0;
+ cvals->tx_max_coalesced_frames_high = 0;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ cvals->rate_sample_interval = 0;
+
+ return 0;
+}
+
+/* Change the coalescing values.
+ * Both cvals->*_usecs and cvals->*_frames have to be > 0
+ * in order for coalescing to be active
+ */
+int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0))
+ priv->rxcoalescing = 0;
+ else
+ priv->rxcoalescing = 1;
+
+ priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs);
+ priv->rxcount = cvals->rx_max_coalesced_frames;
+
+ /* Set up tx coalescing */
+ if ((cvals->tx_coalesce_usecs == 0) ||
+ (cvals->tx_max_coalesced_frames == 0))
+ priv->txcoalescing = 0;
+ else
+ priv->txcoalescing = 1;
+
+ priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs);
+ priv->txcount = cvals->tx_max_coalesced_frames;
+
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ if (priv->txcoalescing)
+ gfar_write(&priv->regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&priv->regs->txic, 0);
+
+ return 0;
+}
+
+/* Fills in rvals with the current ring parameters. Currently,
+ * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
+ * jumbo are ignored by the driver */
+void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ rvals->rx_pending = priv->rx_ring_size;
+ rvals->rx_mini_pending = priv->rx_ring_size;
+ rvals->rx_jumbo_pending = priv->rx_ring_size;
+ rvals->tx_pending = priv->tx_ring_size;
+}
+
+/* Change the current ring parameters, stopping the controller if
+ * necessary so that we don't mess things up while we're in
+ * motion. We wait for the ring to be clean before reallocating
+ * the rings. */
+int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ u32 tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->rx_pending)) {
+ printk("%s: Ring sizes must be a power of 2\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->tx_pending)) {
+ printk("%s: Ring sizes must be a power of 2\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ /* Stop the controller so we don't rx any more frames */
+ /* But first, make sure we clear the bits */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+
+ /* Note that rx is not clean right now */
+ priv->rxclean = 0;
+
+ if (dev->flags & IFF_UP) {
+ /* Tell the driver to process the rest of the frames */
+ gfar_receive(0, (void *) dev, NULL);
+
+ /* Now wait for it to be done */
+ wait_event_interruptible(priv->rxcleanupq, priv->rxclean);
+
+ /* Ok, all packets have been handled. Now we bring it down,
+ * change the ring size, and bring it up */
+
+ stop_gfar(dev);
+ }
+
+ priv->rx_ring_size = rvals->rx_pending;
+ priv->tx_ring_size = rvals->tx_pending;
+
+ if (dev->flags & IFF_UP)
+ err = startup_gfar(dev);
+
+ return err;
+}
+
+struct ethtool_ops gfar_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings,
+ .get_stats_count = gfar_stats_count,
+ .get_ethtool_stats = gfar_fill_stats,
+};
+
+struct ethtool_ops gfar_normon_nocoalesce_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings_normon,
+ .get_stats_count = gfar_stats_count_normon,
+ .get_ethtool_stats = gfar_fill_stats_normon,
+};
+
+struct ethtool_ops gfar_nocoalesce_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings,
+ .get_stats_count = gfar_stats_count,
+ .get_ethtool_stats = gfar_fill_stats,
+};
+
+struct ethtool_ops gfar_normon_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings_normon,
+ .get_stats_count = gfar_stats_count_normon,
+ .get_ethtool_stats = gfar_fill_stats_normon,
+};
+
+struct ethtool_ops *gfar_op_array[] = {
+ &gfar_ethtool_ops,
+ &gfar_normon_ethtool_ops,
+ &gfar_nocoalesce_ethtool_ops,
+ &gfar_normon_nocoalesce_ethtool_ops
+};
diff --git a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c
new file mode 100644
index 000000000000..02b16abc89bd
--- /dev/null
+++ b/drivers/net/gianfar_phy.c
@@ -0,0 +1,661 @@
+/*
+ * drivers/net/gianfar_phy.c
+ *
+ * Gianfar Ethernet Driver -- PHY handling
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+
+#include "gianfar.h"
+#include "gianfar_phy.h"
+
+static void config_genmii_advert(struct gfar_mii_info *mii_info);
+static void genmii_setup_forced(struct gfar_mii_info *mii_info);
+static void genmii_restart_aneg(struct gfar_mii_info *mii_info);
+static int gbit_config_aneg(struct gfar_mii_info *mii_info);
+static int genmii_config_aneg(struct gfar_mii_info *mii_info);
+static int genmii_update_link(struct gfar_mii_info *mii_info);
+static int genmii_read_status(struct gfar_mii_info *mii_info);
+u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum);
+void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val);
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regbase = priv->phyregs;
+
+ /* Set the PHY address and the register address we want to write */
+ gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
+
+ /* Write out the value we want */
+ gfar_write(&regbase->miimcon, value);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(&regbase->miimind) & MIIMIND_BUSY)
+ cpu_relax();
+}
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value. Clears miimcom first. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regbase = priv->phyregs;
+ u16 value;
+
+ /* Set the PHY address and the register address we want to read */
+ gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
+
+ /* Clear miimcom, and then initiate a read */
+ gfar_write(&regbase->miimcom, 0);
+ gfar_write(&regbase->miimcom, MII_READ_COMMAND);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(&regbase->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
+ cpu_relax();
+
+ /* Grab the value of the register from miimstat */
+ value = gfar_read(&regbase->miimstat);
+
+ return value;
+}
+
+void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info)
+{
+ if(mii_info->phyinfo->ack_interrupt)
+ mii_info->phyinfo->ack_interrupt(mii_info);
+}
+
+
+void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts)
+{
+ mii_info->interrupts = interrupts;
+ if(mii_info->phyinfo->config_intr)
+ mii_info->phyinfo->config_intr(mii_info);
+}
+
+
+/* Writes MII_ADVERTISE with the appropriate values, after
+ * sanitizing advertise to make sure only supported features
+ * are advertised
+ */
+static void config_genmii_advert(struct gfar_mii_info *mii_info)
+{
+ u32 advertise;
+ u16 adv;
+
+ /* Only allow advertising what this PHY supports */
+ mii_info->advertising &= mii_info->phyinfo->features;
+ advertise = mii_info->advertising;
+
+ /* Setup standard advertisement */
+ adv = phy_read(mii_info, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(mii_info, MII_ADVERTISE, adv);
+}
+
+static void genmii_setup_forced(struct gfar_mii_info *mii_info)
+{
+ u16 ctrl;
+ u32 features = mii_info->phyinfo->features;
+
+ ctrl = phy_read(mii_info, MII_BMCR);
+
+ ctrl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPEED1000|BMCR_ANENABLE);
+ ctrl |= BMCR_RESET;
+
+ switch(mii_info->speed) {
+ case SPEED_1000:
+ if(features & (SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full)) {
+ ctrl |= BMCR_SPEED1000;
+ break;
+ }
+ mii_info->speed = SPEED_100;
+ case SPEED_100:
+ if (features & (SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full)) {
+ ctrl |= BMCR_SPEED100;
+ break;
+ }
+ mii_info->speed = SPEED_10;
+ case SPEED_10:
+ if (features & (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full))
+ break;
+ default: /* Unsupported speed! */
+ printk(KERN_ERR "%s: Bad speed!\n",
+ mii_info->dev->name);
+ break;
+ }
+
+ phy_write(mii_info, MII_BMCR, ctrl);
+}
+
+
+/* Enable and Restart Autonegotiation */
+static void genmii_restart_aneg(struct gfar_mii_info *mii_info)
+{
+ u16 ctl;
+
+ ctl = phy_read(mii_info, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(mii_info, MII_BMCR, ctl);
+}
+
+
+static int gbit_config_aneg(struct gfar_mii_info *mii_info)
+{
+ u16 adv;
+ u32 advertise;
+
+ if(mii_info->autoneg) {
+ /* Configure the ADVERTISE register */
+ config_genmii_advert(mii_info);
+ advertise = mii_info->advertising;
+
+ adv = phy_read(mii_info, MII_1000BASETCONTROL);
+ adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
+ MII_1000BASETCONTROL_HALFDUPLEXCAP);
+ if (advertise & SUPPORTED_1000baseT_Half)
+ adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+ if (advertise & SUPPORTED_1000baseT_Full)
+ adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+ phy_write(mii_info, MII_1000BASETCONTROL, adv);
+
+ /* Start/Restart aneg */
+ genmii_restart_aneg(mii_info);
+ } else
+ genmii_setup_forced(mii_info);
+
+ return 0;
+}
+
+static int marvell_config_aneg(struct gfar_mii_info *mii_info)
+{
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation */
+ phy_write(mii_info, MII_BMCR, BMCR_RESET);
+
+ phy_write(mii_info, 0x1d, 0x1f);
+ phy_write(mii_info, 0x1e, 0x200c);
+ phy_write(mii_info, 0x1d, 0x5);
+ phy_write(mii_info, 0x1e, 0);
+ phy_write(mii_info, 0x1e, 0x100);
+
+ gbit_config_aneg(mii_info);
+
+ return 0;
+}
+static int genmii_config_aneg(struct gfar_mii_info *mii_info)
+{
+ if (mii_info->autoneg) {
+ config_genmii_advert(mii_info);
+ genmii_restart_aneg(mii_info);
+ } else
+ genmii_setup_forced(mii_info);
+
+ return 0;
+}
+
+
+static int genmii_update_link(struct gfar_mii_info *mii_info)
+{
+ u16 status;
+
+ /* Do a fake read */
+ phy_read(mii_info, MII_BMSR);
+
+ /* Read link and autonegotiation status */
+ status = phy_read(mii_info, MII_BMSR);
+ if ((status & BMSR_LSTATUS) == 0)
+ mii_info->link = 0;
+ else
+ mii_info->link = 1;
+
+ /* If we are autonegotiating, and not done,
+ * return an error */
+ if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int genmii_read_status(struct gfar_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ if (mii_info->autoneg) {
+ status = phy_read(mii_info, MII_LPA);
+
+ if (status & (LPA_10FULL | LPA_100FULL))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ if (status & (LPA_100FULL | LPA_100HALF))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+ mii_info->pause = 0;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+static int marvell_read_status(struct gfar_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ int speed;
+ status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
+
+#if 0
+ /* If speed and duplex aren't resolved,
+ * return an error. Isn't this handled
+ * by checking aneg?
+ */
+ if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
+ return -EAGAIN;
+#endif
+
+ /* Get the duplexity */
+ if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+
+ /* Get the speed */
+ speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
+ switch(speed) {
+ case MII_M1011_PHY_SPEC_STATUS_1000:
+ mii_info->speed = SPEED_1000;
+ break;
+ case MII_M1011_PHY_SPEC_STATUS_100:
+ mii_info->speed = SPEED_100;
+ break;
+ default:
+ mii_info->speed = SPEED_10;
+ break;
+ }
+ mii_info->pause = 0;
+ }
+
+ return 0;
+}
+
+
+static int cis820x_read_status(struct gfar_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ int speed;
+
+ status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
+ if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+
+ speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
+
+ switch (speed) {
+ case MII_CIS8201_AUXCONSTAT_GBIT:
+ mii_info->speed = SPEED_1000;
+ break;
+ case MII_CIS8201_AUXCONSTAT_100:
+ mii_info->speed = SPEED_100;
+ break;
+ default:
+ mii_info->speed = SPEED_10;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int marvell_ack_interrupt(struct gfar_mii_info *mii_info)
+{
+ /* Clear the interrupts by reading the reg */
+ phy_read(mii_info, MII_M1011_IEVENT);
+
+ return 0;
+}
+
+static int marvell_config_intr(struct gfar_mii_info *mii_info)
+{
+ if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+ else
+ phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+
+ return 0;
+}
+
+static int cis820x_init(struct gfar_mii_info *mii_info)
+{
+ phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
+ MII_CIS8201_AUXCONSTAT_INIT);
+ phy_write(mii_info, MII_CIS8201_EXT_CON1,
+ MII_CIS8201_EXTCON1_INIT);
+
+ return 0;
+}
+
+static int cis820x_ack_interrupt(struct gfar_mii_info *mii_info)
+{
+ phy_read(mii_info, MII_CIS8201_ISTAT);
+
+ return 0;
+}
+
+static int cis820x_config_intr(struct gfar_mii_info *mii_info)
+{
+ if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
+ else
+ phy_write(mii_info, MII_CIS8201_IMASK, 0);
+
+ return 0;
+}
+
+#define DM9161_DELAY 10
+
+static int dm9161_read_status(struct gfar_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ status = phy_read(mii_info, MII_DM9161_SCSR);
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+
+static int dm9161_config_aneg(struct gfar_mii_info *mii_info)
+{
+ struct dm9161_private *priv = mii_info->priv;
+
+ if(0 == priv->resetdone)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void dm9161_timer(unsigned long data)
+{
+ struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
+ struct dm9161_private *priv = mii_info->priv;
+ u16 status = phy_read(mii_info, MII_BMSR);
+
+ if (status & BMSR_ANEGCOMPLETE) {
+ priv->resetdone = 1;
+ } else
+ mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
+}
+
+static int dm9161_init(struct gfar_mii_info *mii_info)
+{
+ struct dm9161_private *priv;
+
+ /* Allocate the private data structure */
+ priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
+
+ if (NULL == priv)
+ return -ENOMEM;
+
+ mii_info->priv = priv;
+
+ /* Reset is not done yet */
+ priv->resetdone = 0;
+
+ /* Isolate the PHY */
+ phy_write(mii_info, MII_BMCR, BMCR_ISOLATE);
+
+ /* Do not bypass the scrambler/descrambler */
+ phy_write(mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
+
+ /* Clear 10BTCSR to default */
+ phy_write(mii_info, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
+
+ /* Reconnect the PHY, and enable Autonegotiation */
+ phy_write(mii_info, MII_BMCR, BMCR_ANENABLE);
+
+ /* Start a timer for DM9161_DELAY seconds to wait
+ * for the PHY to be ready */
+ init_timer(&priv->timer);
+ priv->timer.function = &dm9161_timer;
+ priv->timer.data = (unsigned long) mii_info;
+ mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
+
+ return 0;
+}
+
+static void dm9161_close(struct gfar_mii_info *mii_info)
+{
+ struct dm9161_private *priv = mii_info->priv;
+
+ del_timer_sync(&priv->timer);
+ kfree(priv);
+}
+
+#if 0
+static int dm9161_ack_interrupt(struct gfar_mii_info *mii_info)
+{
+ phy_read(mii_info, MII_DM9161_INTR);
+
+ return 0;
+}
+#endif
+
+/* Cicada 820x */
+static struct phy_info phy_info_cis820x = {
+ 0x000fc440,
+ "Cicada Cis8204",
+ 0x000fffc0,
+ .features = MII_GBIT_FEATURES,
+ .init = &cis820x_init,
+ .config_aneg = &gbit_config_aneg,
+ .read_status = &cis820x_read_status,
+ .ack_interrupt = &cis820x_ack_interrupt,
+ .config_intr = &cis820x_config_intr,
+};
+
+static struct phy_info phy_info_dm9161 = {
+ .phy_id = 0x0181b880,
+ .name = "Davicom DM9161E",
+ .phy_id_mask = 0x0ffffff0,
+ .init = dm9161_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = dm9161_read_status,
+ .close = dm9161_close,
+};
+
+static struct phy_info phy_info_marvell = {
+ .phy_id = 0x01410c00,
+ .phy_id_mask = 0xffffff00,
+ .name = "Marvell 88E1101",
+ .features = MII_GBIT_FEATURES,
+ .config_aneg = &marvell_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+};
+
+static struct phy_info phy_info_genmii= {
+ .phy_id = 0x00000000,
+ .phy_id_mask = 0x00000000,
+ .name = "Generic MII",
+ .features = MII_BASIC_FEATURES,
+ .config_aneg = genmii_config_aneg,
+ .read_status = genmii_read_status,
+};
+
+static struct phy_info *phy_info[] = {
+ &phy_info_cis820x,
+ &phy_info_marvell,
+ &phy_info_dm9161,
+ &phy_info_genmii,
+ NULL
+};
+
+u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum)
+{
+ u16 retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mii_info->mdio_lock, flags);
+ retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
+ spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
+
+ return retval;
+}
+
+void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mii_info->mdio_lock, flags);
+ mii_info->mdio_write(mii_info->dev,
+ mii_info->mii_id,
+ regnum, val);
+ spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
+}
+
+/* Use the PHY ID registers to determine what type of PHY is attached
+ * to device dev. return a struct phy_info structure describing that PHY
+ */
+struct phy_info * get_phy_info(struct gfar_mii_info *mii_info)
+{
+ u16 phy_reg;
+ u32 phy_ID;
+ int i;
+ struct phy_info *theInfo = NULL;
+ struct net_device *dev = mii_info->dev;
+
+ /* Grab the bits from PHYIR1, and put them in the upper half */
+ phy_reg = phy_read(mii_info, MII_PHYSID1);
+ phy_ID = (phy_reg & 0xffff) << 16;
+
+ /* Grab the bits from PHYIR2, and put them in the lower half */
+ phy_reg = phy_read(mii_info, MII_PHYSID2);
+ phy_ID |= (phy_reg & 0xffff);
+
+ /* loop through all the known PHY types, and find one that */
+ /* matches the ID we read from the PHY. */
+ for (i = 0; phy_info[i]; i++)
+ if (phy_info[i]->phy_id ==
+ (phy_ID & phy_info[i]->phy_id_mask)) {
+ theInfo = phy_info[i];
+ break;
+ }
+
+ /* This shouldn't happen, as we have generic PHY support */
+ if (theInfo == NULL) {
+ printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID);
+ return NULL;
+ } else {
+ printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name,
+ phy_ID);
+ }
+
+ return theInfo;
+}
diff --git a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h
new file mode 100644
index 000000000000..1e9b3abf1e6d
--- /dev/null
+++ b/drivers/net/gianfar_phy.h
@@ -0,0 +1,213 @@
+/*
+ * drivers/net/gianfar_phy.h
+ *
+ * Gianfar Ethernet Driver -- PHY handling
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __GIANFAR_PHY_H
+#define __GIANFAR_PHY_H
+
+#define MII_end ((u32)-2)
+#define MII_read ((u32)-1)
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+
+#define GFAR_AN_TIMEOUT 2000
+
+/* 1000BT control (Marvell & BCM54xx at least) */
+#define MII_1000BASETCONTROL 0x09
+#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
+#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
+
+/* Cicada Extended Control Register 1 */
+#define MII_CIS8201_EXT_CON1 0x17
+#define MII_CIS8201_EXTCON1_INIT 0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MII_CIS8201_IMASK 0x19
+#define MII_CIS8201_IMASK_IEN 0x8000
+#define MII_CIS8201_IMASK_SPEED 0x4000
+#define MII_CIS8201_IMASK_LINK 0x2000
+#define MII_CIS8201_IMASK_DUPLEX 0x1000
+#define MII_CIS8201_IMASK_MASK 0xf000
+
+/* Cicada Interrupt Status Register */
+#define MII_CIS8201_ISTAT 0x1a
+#define MII_CIS8201_ISTAT_STATUS 0x8000
+#define MII_CIS8201_ISTAT_SPEED 0x4000
+#define MII_CIS8201_ISTAT_LINK 0x2000
+#define MII_CIS8201_ISTAT_DUPLEX 0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MII_CIS8201_AUX_CONSTAT 0x1c
+#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
+#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
+#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
+#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
+#define MII_CIS8201_AUXCONSTAT_100 0x0008
+
+/* 88E1011 PHY Status Register */
+#define MII_M1011_PHY_SPEC_STATUS 0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
+#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
+
+#define MII_M1011_IEVENT 0x13
+#define MII_M1011_IEVENT_CLEAR 0x0000
+
+#define MII_M1011_IMASK 0x12
+#define MII_M1011_IMASK_INIT 0x6400
+#define MII_M1011_IMASK_CLEAR 0x0000
+
+#define MII_DM9161_SCR 0x10
+#define MII_DM9161_SCR_INIT 0x0610
+
+/* DM9161 Specified Configuration and Status Register */
+#define MII_DM9161_SCSR 0x11
+#define MII_DM9161_SCSR_100F 0x8000
+#define MII_DM9161_SCSR_100H 0x4000
+#define MII_DM9161_SCSR_10F 0x2000
+#define MII_DM9161_SCSR_10H 0x1000
+
+/* DM9161 Interrupt Register */
+#define MII_DM9161_INTR 0x15
+#define MII_DM9161_INTR_PEND 0x8000
+#define MII_DM9161_INTR_DPLX_MASK 0x0800
+#define MII_DM9161_INTR_SPD_MASK 0x0400
+#define MII_DM9161_INTR_LINK_MASK 0x0200
+#define MII_DM9161_INTR_MASK 0x0100
+#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
+#define MII_DM9161_INTR_SPD_CHANGE 0x0008
+#define MII_DM9161_INTR_LINK_CHANGE 0x0004
+#define MII_DM9161_INTR_INIT 0x0000
+#define MII_DM9161_INTR_STOP \
+(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
+ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MII_DM9161_10BTCSR 0x12
+#define MII_DM9161_10BTCSR_INIT 0x7800
+
+#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP | \
+ SUPPORTED_MII)
+
+#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full)
+
+#define MII_READ_COMMAND 0x00000001
+
+#define MII_INTERRUPT_DISABLED 0x0
+#define MII_INTERRUPT_ENABLED 0x1
+/* Taken from mii_if_info and sungem_phy.h */
+struct gfar_mii_info {
+ /* Information about the PHY type */
+ /* And management functions */
+ struct phy_info *phyinfo;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+
+ /* The most recently read link state */
+ int link;
+
+ /* Enabled Interrupts */
+ u32 interrupts;
+
+ u32 advertising;
+ int autoneg;
+ int mii_id;
+
+ /* private data pointer */
+ /* For use by PHYs to maintain extra state */
+ void *priv;
+
+ /* Provided by host chip */
+ struct net_device *dev;
+
+ /* A lock to ensure that only one thing can read/write
+ * the MDIO bus at a time */
+ spinlock_t mdio_lock;
+
+ /* Provided by ethernet driver */
+ int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
+ void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
+};
+
+/* struct phy_info: a structure which defines attributes for a PHY
+ *
+ * id will contain a number which represents the PHY. During
+ * startup, the driver will poll the PHY to find out what its
+ * UID--as defined by registers 2 and 3--is. The 32-bit result
+ * gotten from the PHY will be ANDed with phy_id_mask to
+ * discard any bits which may change based on revision numbers
+ * unimportant to functionality
+ *
+ * There are 6 commands which take a gfar_mii_info structure.
+ * Each PHY must declare config_aneg, and read_status.
+ */
+struct phy_info {
+ u32 phy_id;
+ char *name;
+ unsigned int phy_id_mask;
+ u32 features;
+
+ /* Called to initialize the PHY */
+ int (*init)(struct gfar_mii_info *mii_info);
+
+ /* Called to suspend the PHY for power */
+ int (*suspend)(struct gfar_mii_info *mii_info);
+
+ /* Reconfigures autonegotiation (or disables it) */
+ int (*config_aneg)(struct gfar_mii_info *mii_info);
+
+ /* Determines the negotiated speed and duplex */
+ int (*read_status)(struct gfar_mii_info *mii_info);
+
+ /* Clears any pending interrupts */
+ int (*ack_interrupt)(struct gfar_mii_info *mii_info);
+
+ /* Enables or disables interrupts */
+ int (*config_intr)(struct gfar_mii_info *mii_info);
+
+ /* Clears up any memory if needed */
+ void (*close)(struct gfar_mii_info *mii_info);
+};
+
+struct phy_info *get_phy_info(struct gfar_mii_info *mii_info);
+int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
+void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
+void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info);
+void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts);
+
+struct dm9161_private {
+ struct timer_list timer;
+ int resetdone;
+};
+
+#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/gt64240eth.h b/drivers/net/gt64240eth.h
new file mode 100644
index 000000000000..7e7af0d56587
--- /dev/null
+++ b/drivers/net/gt64240eth.h
@@ -0,0 +1,402 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Patton Electronics Company
+ * Copyright (C) 2002 Momentum Computer
+ *
+ * Copyright 2000 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * stevel@mvista.com or support@mvista.com
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Ethernet driver definitions for the MIPS GT96100 Advanced
+ * Communication Controller.
+ *
+ * Modified for the Marvellous GT64240 Retarded Communication Controller.
+ */
+#ifndef _GT64240ETH_H
+#define _GT64240ETH_H
+
+#include <asm/gt64240.h>
+
+#define ETHERNET_PORTS_DIFFERENCE_OFFSETS 0x400
+
+/* Translate those weanie names from Galileo/VxWorks header files: */
+
+#define GT64240_MRR MAIN_ROUTING_REGISTER
+#define GT64240_CIU_ARBITER_CONFIG COMM_UNIT_ARBITER_CONFIGURATION_REGISTER
+#define GT64240_CIU_ARBITER_CONTROL COMM_UNIT_ARBITER_CONTROL
+#define GT64240_MAIN_LOW_CAUSE LOW_INTERRUPT_CAUSE_REGISTER
+#define GT64240_MAIN_HIGH_CAUSE HIGH_INTERRUPT_CAUSE_REGISTER
+#define GT64240_CPU_LOW_MASK CPU_INTERRUPT_MASK_REGISTER_LOW
+#define GT64240_CPU_HIGH_MASK CPU_INTERRUPT_MASK_REGISTER_HIGH
+#define GT64240_CPU_SELECT_CAUSE CPU_SELECT_CAUSE_REGISTER
+
+#define GT64240_ETH_PHY_ADDR_REG ETHERNET_PHY_ADDRESS_REGISTER
+#define GT64240_ETH_PORT_CONFIG ETHERNET0_PORT_CONFIGURATION_REGISTER
+#define GT64240_ETH_PORT_CONFIG_EXT ETHERNET0_PORT_CONFIGURATION_EXTEND_REGISTER
+#define GT64240_ETH_PORT_COMMAND ETHERNET0_PORT_COMMAND_REGISTER
+#define GT64240_ETH_PORT_STATUS ETHERNET0_PORT_STATUS_REGISTER
+#define GT64240_ETH_IO_SIZE ETHERNET_PORTS_DIFFERENCE_OFFSETS
+#define GT64240_ETH_SMI_REG ETHERNET_SMI_REGISTER
+#define GT64240_ETH_MIB_COUNT_BASE ETHERNET0_MIB_COUNTER_BASE
+#define GT64240_ETH_SDMA_CONFIG ETHERNET0_SDMA_CONFIGURATION_REGISTER
+#define GT64240_ETH_SDMA_COMM ETHERNET0_SDMA_COMMAND_REGISTER
+#define GT64240_ETH_INT_MASK ETHERNET0_INTERRUPT_MASK_REGISTER
+#define GT64240_ETH_INT_CAUSE ETHERNET0_INTERRUPT_CAUSE_REGISTER
+#define GT64240_ETH_CURR_TX_DESC_PTR0 ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_CURR_TX_DESC_PTR1 ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER1
+#define GT64240_ETH_1ST_RX_DESC_PTR0 ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_CURR_RX_DESC_PTR0 ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_HASH_TBL_PTR ETHERNET0_HASH_TABLE_POINTER_REGISTER
+
+/* Turn on NAPI by default */
+
+#define GT64240_NAPI 1
+
+/* Some 64240 settings that SHOULD eventually be setup in PROM monitor: */
+/* (Board-specific to the DSL3224 Rev A board ONLY!) */
+#define D3224_MPP_CTRL0_SETTING 0x66669900
+#define D3224_MPP_CTRL1_SETTING 0x00000000
+#define D3224_MPP_CTRL2_SETTING 0x00887700
+#define D3224_MPP_CTRL3_SETTING 0x00000044
+#define D3224_GPP_IO_CTRL_SETTING 0x0000e800
+#define D3224_GPP_LEVEL_CTRL_SETTING 0xf001f703
+#define D3224_GPP_VALUE_SETTING 0x00000000
+
+/* Keep the ring sizes a power of two for efficiency. */
+//-#define TX_RING_SIZE 16
+#define TX_RING_SIZE 64 /* TESTING !!! */
+#define RX_RING_SIZE 32
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+#define RX_HASH_TABLE_SIZE 16384
+#define HASH_HOP_NUMBER 12
+
+#define NUM_INTERFACES 3
+
+#define GT64240ETH_TX_TIMEOUT HZ/4
+
+#define MIPS_GT64240_BASE 0xf4000000
+#define GT64240_ETH0_BASE (MIPS_GT64240_BASE + GT64240_ETH_PORT_CONFIG)
+#define GT64240_ETH1_BASE (GT64240_ETH0_BASE + GT64240_ETH_IO_SIZE)
+#define GT64240_ETH2_BASE (GT64240_ETH1_BASE + GT64240_ETH_IO_SIZE)
+
+#if defined(CONFIG_MIPS_DSL3224)
+#define GT64240_ETHER0_IRQ 4
+#define GT64240_ETHER1_IRQ 4
+#else
+#define GT64240_ETHER0_IRQ -1
+#define GT64240_ETHER1_IRQ -1
+#endif
+
+#define REV_GT64240 0x1
+#define REV_GT64240A 0x10
+
+#define GT64240ETH_READ(gp, offset) \
+ GT_READ((gp)->port_offset + (offset))
+
+#define GT64240ETH_WRITE(gp, offset, data) \
+ GT_WRITE((gp)->port_offset + (offset), (data))
+
+#define GT64240ETH_SETBIT(gp, offset, bits) \
+ GT64240ETH_WRITE((gp), (offset), \
+ GT64240ETH_READ((gp), (offset)) | (bits))
+
+#define GT64240ETH_CLRBIT(gp, offset, bits) \
+ GT64240ETH_WRITE((gp), (offset), \
+ GT64240ETH_READ((gp), (offset)) & ~(bits))
+
+#define GT64240_READ(ofs) GT_READ(ofs)
+#define GT64240_WRITE(ofs, data) GT_WRITE((ofs), (data))
+
+/* Bit definitions of the SMI Reg */
+enum {
+ smirDataMask = 0xffff,
+ smirPhyAdMask = 0x1f << 16,
+ smirPhyAdBit = 16,
+ smirRegAdMask = 0x1f << 21,
+ smirRegAdBit = 21,
+ smirOpCode = 1 << 26,
+ smirReadValid = 1 << 27,
+ smirBusy = 1 << 28
+};
+
+/* Bit definitions of the Port Config Reg */
+enum pcr_bits {
+ pcrPM = 1 << 0,
+ pcrRBM = 1 << 1,
+ pcrPBF = 1 << 2,
+ pcrEN = 1 << 7,
+ pcrLPBKMask = 0x3 << 8,
+ pcrLPBKBit = 1 << 8,
+ pcrFC = 1 << 10,
+ pcrHS = 1 << 12,
+ pcrHM = 1 << 13,
+ pcrHDM = 1 << 14,
+ pcrHD = 1 << 15,
+ pcrISLMask = 0x7 << 28,
+ pcrISLBit = 28,
+ pcrACCS = 1 << 31
+};
+
+/* Bit definitions of the Port Config Extend Reg */
+enum pcxr_bits {
+ pcxrIGMP = 1,
+ pcxrSPAN = 2,
+ pcxrPAR = 4,
+ pcxrPRIOtxMask = 0x7 << 3,
+ pcxrPRIOtxBit = 3,
+ pcxrPRIOrxMask = 0x3 << 6,
+ pcxrPRIOrxBit = 6,
+ pcxrPRIOrxOverride = 1 << 8,
+ pcxrDPLXen = 1 << 9,
+ pcxrFCTLen = 1 << 10,
+ pcxrFLP = 1 << 11,
+ pcxrFCTL = 1 << 12,
+ pcxrMFLMask = 0x3 << 14,
+ pcxrMFLBit = 14,
+ pcxrMIBclrMode = 1 << 16,
+ pcxrSpeed = 1 << 18,
+ pcxrSpeeden = 1 << 19,
+ pcxrRMIIen = 1 << 20,
+ pcxrDSCPen = 1 << 21
+};
+
+/* Bit definitions of the Port Command Reg */
+enum pcmr_bits {
+ pcmrFJ = 1 << 15
+};
+
+
+/* Bit definitions of the Port Status Reg */
+enum psr_bits {
+ psrSpeed = 1,
+ psrDuplex = 2,
+ psrFctl = 4,
+ psrLink = 8,
+ psrPause = 1 << 4,
+ psrTxLow = 1 << 5,
+ psrTxHigh = 1 << 6,
+ psrTxInProg = 1 << 7
+};
+
+/* Bit definitions of the SDMA Config Reg */
+enum sdcr_bits {
+ sdcrRCMask = 0xf << 2,
+ sdcrRCBit = 2,
+ sdcrBLMR = 1 << 6,
+ sdcrBLMT = 1 << 7,
+ sdcrPOVR = 1 << 8,
+ sdcrRIFB = 1 << 9,
+ sdcrBSZMask = 0x3 << 12,
+ sdcrBSZBit = 12
+};
+
+/* Bit definitions of the SDMA Command Reg */
+enum sdcmr_bits {
+ sdcmrERD = 1 << 7,
+ sdcmrAR = 1 << 15,
+ sdcmrSTDH = 1 << 16,
+ sdcmrSTDL = 1 << 17,
+ sdcmrTXDH = 1 << 23,
+ sdcmrTXDL = 1 << 24,
+ sdcmrAT = 1 << 31
+};
+
+/* Bit definitions of the Interrupt Cause Reg */
+enum icr_bits {
+ icrRxBuffer = 1,
+ icrTxBufferHigh = 1 << 2,
+ icrTxBufferLow = 1 << 3,
+ icrTxEndHigh = 1 << 6,
+ icrTxEndLow = 1 << 7,
+ icrRxError = 1 << 8,
+ icrTxErrorHigh = 1 << 10,
+ icrTxErrorLow = 1 << 11,
+ icrRxOVR = 1 << 12,
+ icrTxUdr = 1 << 13,
+ icrRxBufferQ0 = 1 << 16,
+ icrRxBufferQ1 = 1 << 17,
+ icrRxBufferQ2 = 1 << 18,
+ icrRxBufferQ3 = 1 << 19,
+ icrRxErrorQ0 = 1 << 20,
+ icrRxErrorQ1 = 1 << 21,
+ icrRxErrorQ2 = 1 << 22,
+ icrRxErrorQ3 = 1 << 23,
+ icrMIIPhySTC = 1 << 28,
+ icrSMIdone = 1 << 29,
+ icrEtherIntSum = 1 << 31
+};
+
+
+/* The Rx and Tx descriptor lists. */
+#ifdef __LITTLE_ENDIAN
+typedef struct {
+ u32 cmdstat;
+ u16 reserved; //-prk21aug01 u32 reserved:16;
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u32 buff_ptr;
+ u32 next;
+} gt64240_td_t;
+
+typedef struct {
+ u32 cmdstat;
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u16 buff_sz; //-prk21aug01 u32 buff_sz:16;
+ u32 buff_ptr;
+ u32 next;
+} gt64240_rd_t;
+#elif defined(__BIG_ENDIAN)
+typedef struct {
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u16 reserved; //-prk21aug01 u32 reserved:16;
+ u32 cmdstat;
+ u32 next;
+ u32 buff_ptr;
+} gt64240_td_t;
+
+typedef struct {
+ u16 buff_sz; //-prk21aug01 u32 buff_sz:16;
+ u16 byte_cnt; //-prk21aug01 u32 byte_cnt:16;
+ u32 cmdstat;
+ u32 next;
+ u32 buff_ptr;
+} gt64240_rd_t;
+#else
+#error Either __BIG_ENDIAN or __LITTLE_ENDIAN must be defined!
+#endif
+
+
+/* Values for the Tx command-status descriptor entry. */
+enum td_cmdstat {
+ txOwn = 1 << 31,
+ txAutoMode = 1 << 30,
+ txEI = 1 << 23,
+ txGenCRC = 1 << 22,
+ txPad = 1 << 18,
+ txFirst = 1 << 17,
+ txLast = 1 << 16,
+ txErrorSummary = 1 << 15,
+ txReTxCntMask = 0x0f << 10,
+ txReTxCntBit = 10,
+ txCollision = 1 << 9,
+ txReTxLimit = 1 << 8,
+ txUnderrun = 1 << 6,
+ txLateCollision = 1 << 5
+};
+
+
+/* Values for the Rx command-status descriptor entry. */
+enum rd_cmdstat {
+ rxOwn = 1 << 31,
+ rxAutoMode = 1 << 30,
+ rxEI = 1 << 23,
+ rxFirst = 1 << 17,
+ rxLast = 1 << 16,
+ rxErrorSummary = 1 << 15,
+ rxIGMP = 1 << 14,
+ rxHashExpired = 1 << 13,
+ rxMissedFrame = 1 << 12,
+ rxFrameType = 1 << 11,
+ rxShortFrame = 1 << 8,
+ rxMaxFrameLen = 1 << 7,
+ rxOverrun = 1 << 6,
+ rxCollision = 1 << 4,
+ rxCRCError = 1
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ hteValid = 1,
+ hteSkip = 2,
+ hteRD = 4
+};
+
+// The MIB counters
+typedef struct {
+ u32 byteReceived;
+ u32 byteSent;
+ u32 framesReceived;
+ u32 framesSent;
+ u32 totalByteReceived;
+ u32 totalFramesReceived;
+ u32 broadcastFramesReceived;
+ u32 multicastFramesReceived;
+ u32 cRCError;
+ u32 oversizeFrames;
+ u32 fragments;
+ u32 jabber;
+ u32 collision;
+ u32 lateCollision;
+ u32 frames64;
+ u32 frames65_127;
+ u32 frames128_255;
+ u32 frames256_511;
+ u32 frames512_1023;
+ u32 frames1024_MaxSize;
+ u32 macRxError;
+ u32 droppedFrames;
+ u32 outMulticastFrames;
+ u32 outBroadcastFrames;
+ u32 undersizeFrames;
+} mib_counters_t;
+
+
+struct gt64240_private {
+ gt64240_rd_t *rx_ring;
+ gt64240_td_t *tx_ring;
+ // The Rx and Tx rings must be 16-byte aligned
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ char *hash_table;
+ // The Hash Table must be 8-byte aligned
+ dma_addr_t hash_table_dma;
+ int hash_mode;
+
+ // The Rx buffers must be 8-byte aligned
+ char *rx_buff;
+ dma_addr_t rx_buff_dma;
+ // Tx buffers (tx_skbuff[i]->data) with less than 8 bytes
+ // of payload must be 8-byte aligned
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ int rx_next_out; /* The next free ring entry to receive */
+ int tx_next_in; /* The next free ring entry to send */
+ int tx_next_out; /* The last ring entry the ISR processed */
+ int tx_count; /* current # of pkts waiting to be sent in Tx ring */
+ int intr_work_done; /* number of Rx and Tx pkts processed in the isr */
+ int tx_full; /* Tx ring is full */
+
+ mib_counters_t mib;
+ struct net_device_stats stats;
+
+ int io_size;
+ int port_num; // 0 or 1
+ u32 port_offset;
+
+ int phy_addr; // PHY address
+ u32 last_psr; // last value of the port status register
+
+ int options; /* User-settable misc. driver options. */
+ int drv_flags;
+ spinlock_t lock; /* Serialise access to device */
+ struct mii_if_info mii_if;
+
+ u32 msg_enable;
+};
+
+#endif /* _GT64240ETH_H */
diff --git a/drivers/net/gt96100eth.c b/drivers/net/gt96100eth.c
new file mode 100644
index 000000000000..666cfbbcf6d9
--- /dev/null
+++ b/drivers/net/gt96100eth.c
@@ -0,0 +1,1569 @@
+/*
+ * Copyright 2000, 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * stevel@mvista.com or source@mvista.com
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Ethernet driver for the MIPS GT96100 Advanced Communication Controller.
+ *
+ * Revision history
+ *
+ * 11.11.2001 Moved to 2.4.14, ppopov@mvista.com. Modified driver to add
+ * proper gt96100A support.
+ * 12.05.2001 Moved eth port 0 to irq 3 (mapped to GT_SERINT0 on EV96100A)
+ * in order for both ports to work. Also cleaned up boot
+ * option support (mac address string parsing), fleshed out
+ * gt96100_cleanup_module(), and other general code cleanups
+ * <stevel@mvista.com>.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#define DESC_BE 1
+#define DESC_DATA_BE 1
+
+#define GT96100_DEBUG 2
+
+#include "gt96100eth.h"
+
+// prototypes
+static void* dmaalloc(size_t size, dma_addr_t *dma_handle);
+static void dmafree(size_t size, void *vaddr);
+static void gt96100_delay(int msec);
+static int gt96100_add_hash_entry(struct net_device *dev,
+ unsigned char* addr);
+static void read_mib_counters(struct gt96100_private *gp);
+static int read_MII(int phy_addr, u32 reg);
+static int write_MII(int phy_addr, u32 reg, u16 data);
+static int gt96100_init_module(void);
+static void gt96100_cleanup_module(void);
+static void dump_MII(int dbg_lvl, struct net_device *dev);
+static void dump_tx_desc(int dbg_lvl, struct net_device *dev, int i);
+static void dump_rx_desc(int dbg_lvl, struct net_device *dev, int i);
+static void dump_skb(int dbg_lvl, struct net_device *dev,
+ struct sk_buff *skb);
+static void dump_hw_addr(int dbg_lvl, struct net_device *dev,
+ const char* pfx, unsigned char* addr_str);
+static void update_stats(struct gt96100_private *gp);
+static void abort(struct net_device *dev, u32 abort_bits);
+static void hard_stop(struct net_device *dev);
+static void enable_ether_irq(struct net_device *dev);
+static void disable_ether_irq(struct net_device *dev);
+static int gt96100_probe1(struct pci_dev *pci, int port_num);
+static void reset_tx(struct net_device *dev);
+static void reset_rx(struct net_device *dev);
+static int gt96100_check_tx_consistent(struct gt96100_private *gp);
+static int gt96100_init(struct net_device *dev);
+static int gt96100_open(struct net_device *dev);
+static int gt96100_close(struct net_device *dev);
+static int gt96100_tx(struct sk_buff *skb, struct net_device *dev);
+static int gt96100_rx(struct net_device *dev, u32 status);
+static irqreturn_t gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void gt96100_tx_timeout(struct net_device *dev);
+static void gt96100_set_rx_mode(struct net_device *dev);
+static struct net_device_stats* gt96100_get_stats(struct net_device *dev);
+
+extern char * __init prom_getcmdline(void);
+
+static int max_interrupt_work = 32;
+
+#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
+
+#define RUN_AT(x) (jiffies + (x))
+
+// For reading/writing 32-bit words and half-words from/to DMA memory
+#ifdef DESC_BE
+#define cpu_to_dma32 cpu_to_be32
+#define dma32_to_cpu be32_to_cpu
+#define cpu_to_dma16 cpu_to_be16
+#define dma16_to_cpu be16_to_cpu
+#else
+#define cpu_to_dma32 cpu_to_le32
+#define dma32_to_cpu le32_to_cpu
+#define cpu_to_dma16 cpu_to_le16
+#define dma16_to_cpu le16_to_cpu
+#endif
+
+static char mac0[18] = "00.02.03.04.05.06";
+static char mac1[18] = "00.01.02.03.04.05";
+MODULE_PARM(mac0, "c18");
+MODULE_PARM(mac1, "c18");
+MODULE_PARM_DESC(mac0, "MAC address for GT96100 ethernet port 0");
+MODULE_PARM_DESC(mac1, "MAC address for GT96100 ethernet port 1");
+
+/*
+ * Info for the GT96100 ethernet controller's ports.
+ */
+static struct gt96100_if_t {
+ struct net_device *dev;
+ unsigned int iobase; // IO Base address of this port
+ int irq; // IRQ number of this port
+ char *mac_str;
+} gt96100_iflist[NUM_INTERFACES] = {
+ {
+ NULL,
+ GT96100_ETH0_BASE, GT96100_ETHER0_IRQ,
+ mac0
+ },
+ {
+ NULL,
+ GT96100_ETH1_BASE, GT96100_ETHER1_IRQ,
+ mac1
+ }
+};
+
+static inline const char*
+chip_name(int chip_rev)
+{
+ switch (chip_rev) {
+ case REV_GT96100:
+ return "GT96100";
+ case REV_GT96100A_1:
+ case REV_GT96100A:
+ return "GT96100A";
+ default:
+ return "Unknown GT96100";
+ }
+}
+
+/*
+ DMA memory allocation, derived from pci_alloc_consistent.
+*/
+static void * dmaalloc(size_t size, dma_addr_t *dma_handle)
+{
+ void *ret;
+
+ ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, get_order(size));
+
+ if (ret != NULL) {
+ dma_cache_inv((unsigned long)ret, size);
+ if (dma_handle != NULL)
+ *dma_handle = virt_to_phys(ret);
+
+ /* bump virtual address up to non-cached area */
+ ret = (void*)KSEG1ADDR(ret);
+ }
+
+ return ret;
+}
+
+static void dmafree(size_t size, void *vaddr)
+{
+ vaddr = (void*)KSEG0ADDR(vaddr);
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static void gt96100_delay(int ms)
+{
+ if (in_interrupt())
+ return;
+ else
+ msleep_interruptible(ms);
+}
+
+static int
+parse_mac_addr(struct net_device *dev, char* macstr)
+{
+ int i, j;
+ unsigned char result, value;
+
+ for (i=0; i<6; i++) {
+ result = 0;
+ if (i != 5 && *(macstr+2) != '.') {
+ err(__FILE__ "invalid mac address format: %d %c\n",
+ i, *(macstr+2));
+ return -EINVAL;
+ }
+
+ for (j=0; j<2; j++) {
+ if (isxdigit(*macstr) &&
+ (value = isdigit(*macstr) ? *macstr-'0' :
+ toupper(*macstr)-'A'+10) < 16) {
+ result = result*16 + value;
+ macstr++;
+ } else {
+ err(__FILE__ "invalid mac address "
+ "character: %c\n", *macstr);
+ return -EINVAL;
+ }
+ }
+
+ macstr++; // step over '.'
+ dev->dev_addr[i] = result;
+ }
+
+ return 0;
+}
+
+
+static int
+read_MII(int phy_addr, u32 reg)
+{
+ int timedout = 20;
+ u32 smir = smirOpCode | (phy_addr << smirPhyAdBit) |
+ (reg << smirRegAdBit);
+
+ // wait for last operation to complete
+ while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
+ // snooze for 1 msec and check again
+ gt96100_delay(1);
+
+ if (--timedout == 0) {
+ printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
+ return -ENODEV;
+ }
+ }
+
+ GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
+
+ timedout = 20;
+ // wait for read to complete
+ while (!((smir = GT96100_READ(GT96100_ETH_SMI_REG)) & smirReadValid)) {
+ // snooze for 1 msec and check again
+ gt96100_delay(1);
+
+ if (--timedout == 0) {
+ printk(KERN_ERR "%s: timeout!!\n", __FUNCTION__);
+ return -ENODEV;
+ }
+ }
+
+ return (int)(smir & smirDataMask);
+}
+
+static void
+dump_tx_desc(int dbg_lvl, struct net_device *dev, int i)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ gt96100_td_t *td = &gp->tx_ring[i];
+
+ dbg(dbg_lvl, "Tx descriptor at 0x%08lx:\n", virt_to_phys(td));
+ dbg(dbg_lvl,
+ " cmdstat=%04x, byte_cnt=%04x, buff_ptr=%04x, next=%04x\n",
+ dma32_to_cpu(td->cmdstat),
+ dma16_to_cpu(td->byte_cnt),
+ dma32_to_cpu(td->buff_ptr),
+ dma32_to_cpu(td->next));
+}
+
+static void
+dump_rx_desc(int dbg_lvl, struct net_device *dev, int i)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ gt96100_rd_t *rd = &gp->rx_ring[i];
+
+ dbg(dbg_lvl, "Rx descriptor at 0x%08lx:\n", virt_to_phys(rd));
+ dbg(dbg_lvl, " cmdstat=%04x, buff_sz=%04x, byte_cnt=%04x, "
+ "buff_ptr=%04x, next=%04x\n",
+ dma32_to_cpu(rd->cmdstat),
+ dma16_to_cpu(rd->buff_sz),
+ dma16_to_cpu(rd->byte_cnt),
+ dma32_to_cpu(rd->buff_ptr),
+ dma32_to_cpu(rd->next));
+}
+
+static int
+write_MII(int phy_addr, u32 reg, u16 data)
+{
+ int timedout = 20;
+ u32 smir = (phy_addr << smirPhyAdBit) |
+ (reg << smirRegAdBit) | data;
+
+ // wait for last operation to complete
+ while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
+ // snooze for 1 msec and check again
+ gt96100_delay(1);
+
+ if (--timedout == 0) {
+ printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
+ return -1;
+ }
+ }
+
+ GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
+ return 0;
+}
+
+static void
+dump_MII(int dbg_lvl, struct net_device *dev)
+{
+ int i, val;
+ struct gt96100_private *gp = netdev_priv(dev);
+
+ if (dbg_lvl <= GT96100_DEBUG) {
+ for (i=0; i<7; i++) {
+ if ((val = read_MII(gp->phy_addr, i)) >= 0)
+ printk("MII Reg %d=%x\n", i, val);
+ }
+ for (i=16; i<21; i++) {
+ if ((val = read_MII(gp->phy_addr, i)) >= 0)
+ printk("MII Reg %d=%x\n", i, val);
+ }
+ }
+}
+
+static void
+dump_hw_addr(int dbg_lvl, struct net_device *dev, const char* pfx,
+ unsigned char* addr_str)
+{
+ int i;
+ char buf[100], octet[5];
+
+ if (dbg_lvl <= GT96100_DEBUG) {
+ strcpy(buf, pfx);
+ for (i = 0; i < 6; i++) {
+ sprintf(octet, "%2.2x%s",
+ addr_str[i], i<5 ? ":" : "\n");
+ strcat(buf, octet);
+ }
+ info("%s", buf);
+ }
+}
+
+
+static void
+dump_skb(int dbg_lvl, struct net_device *dev, struct sk_buff *skb)
+{
+ int i;
+ unsigned char* skbdata;
+
+ if (dbg_lvl <= GT96100_DEBUG) {
+ dbg(dbg_lvl, "%s: skb=%p, skb->data=%p, skb->len=%d\n",
+ __FUNCTION__, skb, skb->data, skb->len);
+
+ skbdata = (unsigned char*)KSEG1ADDR(skb->data);
+
+ for (i=0; i<skb->len; i++) {
+ if (!(i % 16))
+ printk(KERN_DEBUG "\n %3.3x: %2.2x,",
+ i, skbdata[i]);
+ else
+ printk(KERN_DEBUG "%2.2x,", skbdata[i]);
+ }
+ printk(KERN_DEBUG "\n");
+ }
+}
+
+
+static int
+gt96100_add_hash_entry(struct net_device *dev, unsigned char* addr)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ //u16 hashResult, stmp;
+ //unsigned char ctmp, hash_ea[6];
+ u32 tblEntry1, tblEntry0, *tblEntryAddr;
+ int i;
+
+ tblEntry1 = hteValid | hteRD;
+ tblEntry1 |= (u32)addr[5] << 3;
+ tblEntry1 |= (u32)addr[4] << 11;
+ tblEntry1 |= (u32)addr[3] << 19;
+ tblEntry1 |= ((u32)addr[2] & 0x1f) << 27;
+ dbg(3, "%s: tblEntry1=%x\n", __FUNCTION__, tblEntry1);
+ tblEntry0 = ((u32)addr[2] >> 5) & 0x07;
+ tblEntry0 |= (u32)addr[1] << 3;
+ tblEntry0 |= (u32)addr[0] << 11;
+ dbg(3, "%s: tblEntry0=%x\n", __FUNCTION__, tblEntry0);
+
+#if 0
+
+ for (i=0; i<6; i++) {
+ // nibble swap
+ ctmp = nibswap(addr[i]);
+ // invert every nibble
+ hash_ea[i] = ((ctmp&1)<<3) | ((ctmp&8)>>3) |
+ ((ctmp&2)<<1) | ((ctmp&4)>>1);
+ hash_ea[i] |= ((ctmp&0x10)<<3) | ((ctmp&0x80)>>3) |
+ ((ctmp&0x20)<<1) | ((ctmp&0x40)>>1);
+ }
+
+ dump_hw_addr(3, dev, "%s: nib swap/invt addr=", __FUNCTION__, hash_ea);
+
+ if (gp->hash_mode == 0) {
+ hashResult = ((u16)hash_ea[0] & 0xfc) << 7;
+ stmp = ((u16)hash_ea[0] & 0x03) |
+ (((u16)hash_ea[1] & 0x7f) << 2);
+ stmp ^= (((u16)hash_ea[1] >> 7) & 0x01) |
+ ((u16)hash_ea[2] << 1);
+ stmp ^= (u16)hash_ea[3] | (((u16)hash_ea[4] & 1) << 8);
+ hashResult |= stmp;
+ } else {
+ return -1; // don't support hash mode 1
+ }
+
+ dbg(3, "%s: hashResult=%x\n", __FUNCTION__, hashResult);
+
+ tblEntryAddr =
+ (u32 *)(&gp->hash_table[((u32)hashResult & 0x7ff) << 3]);
+
+ dbg(3, "%s: tblEntryAddr=%p\n", tblEntryAddr, __FUNCTION__);
+
+ for (i=0; i<HASH_HOP_NUMBER; i++) {
+ if ((*tblEntryAddr & hteValid) &&
+ !(*tblEntryAddr & hteSkip)) {
+ // This entry is already occupied, go to next entry
+ tblEntryAddr += 2;
+ dbg(3, "%s: skipping to %p\n", __FUNCTION__,
+ tblEntryAddr);
+ } else {
+ memset(tblEntryAddr, 0, 8);
+ tblEntryAddr[1] = cpu_to_dma32(tblEntry1);
+ tblEntryAddr[0] = cpu_to_dma32(tblEntry0);
+ break;
+ }
+ }
+
+ if (i >= HASH_HOP_NUMBER) {
+ err("%s: expired!\n", __FUNCTION__);
+ return -1; // Couldn't find an unused entry
+ }
+
+#else
+
+ tblEntryAddr = (u32 *)gp->hash_table;
+ for (i=0; i<RX_HASH_TABLE_SIZE/4; i+=2) {
+ tblEntryAddr[i+1] = cpu_to_dma32(tblEntry1);
+ tblEntryAddr[i] = cpu_to_dma32(tblEntry0);
+ }
+
+#endif
+
+ return 0;
+}
+
+
+static void
+read_mib_counters(struct gt96100_private *gp)
+{
+ u32* mib_regs = (u32*)&gp->mib;
+ int i;
+
+ for (i=0; i<sizeof(mib_counters_t)/sizeof(u32); i++)
+ mib_regs[i] = GT96100ETH_READ(gp, GT96100_ETH_MIB_COUNT_BASE +
+ i*sizeof(u32));
+}
+
+
+static void
+update_stats(struct gt96100_private *gp)
+{
+ mib_counters_t *mib = &gp->mib;
+ struct net_device_stats *stats = &gp->stats;
+
+ read_mib_counters(gp);
+
+ stats->rx_packets = mib->totalFramesReceived;
+ stats->tx_packets = mib->framesSent;
+ stats->rx_bytes = mib->totalByteReceived;
+ stats->tx_bytes = mib->byteSent;
+ stats->rx_errors = mib->totalFramesReceived - mib->framesReceived;
+ //the tx error counters are incremented by the ISR
+ //rx_dropped incremented by gt96100_rx
+ //tx_dropped incremented by gt96100_tx
+ stats->multicast = mib->multicastFramesReceived;
+ // collisions incremented by gt96100_tx_complete
+ stats->rx_length_errors = mib->oversizeFrames + mib->fragments;
+ // The RxError condition means the Rx DMA encountered a
+ // CPU owned descriptor, which, if things are working as
+ // they should, means the Rx ring has overflowed.
+ stats->rx_over_errors = mib->macRxError;
+ stats->rx_crc_errors = mib->cRCError;
+}
+
+static void
+abort(struct net_device *dev, u32 abort_bits)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ int timedout = 100; // wait up to 100 msec for hard stop to complete
+
+ dbg(3, "%s\n", __FUNCTION__);
+
+ // Return if neither Rx or Tx abort bits are set
+ if (!(abort_bits & (sdcmrAR | sdcmrAT)))
+ return;
+
+ // make sure only the Rx/Tx abort bits are set
+ abort_bits &= (sdcmrAR | sdcmrAT);
+
+ spin_lock(&gp->lock);
+
+ // abort any Rx/Tx DMA immediately
+ GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, abort_bits);
+
+ dbg(3, "%s: SDMA comm = %x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
+
+ // wait for abort to complete
+ while (GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM) & abort_bits) {
+ // snooze for 1 msec and check again
+ gt96100_delay(1);
+
+ if (--timedout == 0) {
+ err("%s: timeout!!\n", __FUNCTION__);
+ break;
+ }
+ }
+
+ spin_unlock(&gp->lock);
+}
+
+
+static void
+hard_stop(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+
+ dbg(3, "%s\n", __FUNCTION__);
+
+ disable_ether_irq(dev);
+
+ abort(dev, sdcmrAR | sdcmrAT);
+
+ // disable port
+ GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, 0);
+}
+
+
+static void
+enable_ether_irq(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ u32 intMask;
+ /*
+ * route ethernet interrupt to GT_SERINT0 for port 0,
+ * GT_INT0 for port 1.
+ */
+ int intr_mask_reg = (gp->port_num == 0) ?
+ GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
+
+ if (gp->chip_rev >= REV_GT96100A_1) {
+ intMask = icrTxBufferLow | icrTxEndLow |
+ icrTxErrorLow | icrRxOVR | icrTxUdr |
+ icrRxBufferQ0 | icrRxErrorQ0 |
+ icrMIIPhySTC | icrEtherIntSum;
+ }
+ else {
+ intMask = icrTxBufferLow | icrTxEndLow |
+ icrTxErrorLow | icrRxOVR | icrTxUdr |
+ icrRxBuffer | icrRxError |
+ icrMIIPhySTC | icrEtherIntSum;
+ }
+
+ // unmask interrupts
+ GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, intMask);
+
+ intMask = GT96100_READ(intr_mask_reg);
+ intMask |= 1<<gp->port_num;
+ GT96100_WRITE(intr_mask_reg, intMask);
+}
+
+static void
+disable_ether_irq(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ u32 intMask;
+ int intr_mask_reg = (gp->port_num == 0) ?
+ GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
+
+ intMask = GT96100_READ(intr_mask_reg);
+ intMask &= ~(1<<gp->port_num);
+ GT96100_WRITE(intr_mask_reg, intMask);
+
+ GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, 0);
+}
+
+
+/*
+ * Init GT96100 ethernet controller driver
+ */
+static int gt96100_init_module(void)
+{
+ struct pci_dev *pci;
+ int i, retval=0;
+ u32 cpuConfig;
+
+ /*
+ * Stupid probe because this really isn't a PCI device
+ */
+ if (!(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_GT96100, NULL)) &&
+ !(pci = pci_find_device(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_GT96100A, NULL))) {
+ printk(KERN_ERR __FILE__ ": GT96100 not found!\n");
+ return -ENODEV;
+ }
+
+ cpuConfig = GT96100_READ(GT96100_CPU_INTERF_CONFIG);
+ if (cpuConfig & (1<<12)) {
+ printk(KERN_ERR __FILE__
+ ": must be in Big Endian mode!\n");
+ return -ENODEV;
+ }
+
+ for (i=0; i < NUM_INTERFACES; i++)
+ retval |= gt96100_probe1(pci, i);
+
+ return retval;
+}
+
+static int __init gt96100_probe1(struct pci_dev *pci, int port_num)
+{
+ struct gt96100_private *gp = NULL;
+ struct gt96100_if_t *gtif = &gt96100_iflist[port_num];
+ int phy_addr, phy_id1, phy_id2;
+ u32 phyAD;
+ int retval;
+ unsigned char chip_rev;
+ struct net_device *dev = NULL;
+
+ if (gtif->irq < 0) {
+ printk(KERN_ERR "%s: irq unknown - probing not supported\n",
+ __FUNCTION__);
+ return -ENODEV;
+ }
+
+ pci_read_config_byte(pci, PCI_REVISION_ID, &chip_rev);
+
+ if (chip_rev >= REV_GT96100A_1) {
+ phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
+ phy_addr = (phyAD >> (5*port_num)) & 0x1f;
+ } else {
+ /*
+ * not sure what's this about -- probably a gt bug
+ */
+ phy_addr = port_num;
+ phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
+ phyAD &= ~(0x1f << (port_num*5));
+ phyAD |= phy_addr << (port_num*5);
+ GT96100_WRITE(GT96100_ETH_PHY_ADDR_REG, phyAD);
+ }
+
+ // probe for the external PHY
+ if ((phy_id1 = read_MII(phy_addr, 2)) <= 0 ||
+ (phy_id2 = read_MII(phy_addr, 3)) <= 0) {
+ printk(KERN_ERR "%s: no PHY found on MII%d\n", __FUNCTION__, port_num);
+ return -ENODEV;
+ }
+
+ if (!request_region(gtif->iobase, GT96100_ETH_IO_SIZE, "GT96100ETH")) {
+ printk(KERN_ERR "%s: request_region failed\n", __FUNCTION__);
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct gt96100_private));
+ if (!dev)
+ goto out;
+ gtif->dev = dev;
+
+ /* private struct aligned and zeroed by alloc_etherdev */
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = gtif->iobase;
+ dev->irq = gtif->irq;
+
+ if ((retval = parse_mac_addr(dev, gtif->mac_str))) {
+ err("%s: MAC address parse failed\n", __FUNCTION__);
+ retval = -EINVAL;
+ goto out1;
+ }
+
+ gp = netdev_priv(dev);
+
+ memset(gp, 0, sizeof(*gp)); // clear it
+
+ gp->port_num = port_num;
+ gp->io_size = GT96100_ETH_IO_SIZE;
+ gp->port_offset = port_num * GT96100_ETH_IO_SIZE;
+ gp->phy_addr = phy_addr;
+ gp->chip_rev = chip_rev;
+
+ info("%s found at 0x%x, irq %d\n",
+ chip_name(gp->chip_rev), gtif->iobase, gtif->irq);
+ dump_hw_addr(0, dev, "HW Address ", dev->dev_addr);
+ info("%s chip revision=%d\n", chip_name(gp->chip_rev), gp->chip_rev);
+ info("%s ethernet port %d\n", chip_name(gp->chip_rev), gp->port_num);
+ info("external PHY ID1=0x%04x, ID2=0x%04x\n", phy_id1, phy_id2);
+
+ // Allocate Rx and Tx descriptor rings
+ if (gp->rx_ring == NULL) {
+ // All descriptors in ring must be 16-byte aligned
+ gp->rx_ring = dmaalloc(sizeof(gt96100_rd_t) * RX_RING_SIZE
+ + sizeof(gt96100_td_t) * TX_RING_SIZE,
+ &gp->rx_ring_dma);
+ if (gp->rx_ring == NULL) {
+ retval = -ENOMEM;
+ goto out1;
+ }
+
+ gp->tx_ring = (gt96100_td_t *)(gp->rx_ring + RX_RING_SIZE);
+ gp->tx_ring_dma =
+ gp->rx_ring_dma + sizeof(gt96100_rd_t) * RX_RING_SIZE;
+ }
+
+ // Allocate the Rx Data Buffers
+ if (gp->rx_buff == NULL) {
+ gp->rx_buff = dmaalloc(PKT_BUF_SZ*RX_RING_SIZE,
+ &gp->rx_buff_dma);
+ if (gp->rx_buff == NULL) {
+ retval = -ENOMEM;
+ goto out2;
+ }
+ }
+
+ dbg(3, "%s: rx_ring=%p, tx_ring=%p\n", __FUNCTION__,
+ gp->rx_ring, gp->tx_ring);
+
+ // Allocate Rx Hash Table
+ if (gp->hash_table == NULL) {
+ gp->hash_table = (char*)dmaalloc(RX_HASH_TABLE_SIZE,
+ &gp->hash_table_dma);
+ if (gp->hash_table == NULL) {
+ retval = -ENOMEM;
+ goto out3;
+ }
+ }
+
+ dbg(3, "%s: hash=%p\n", __FUNCTION__, gp->hash_table);
+
+ spin_lock_init(&gp->lock);
+
+ dev->open = gt96100_open;
+ dev->hard_start_xmit = gt96100_tx;
+ dev->stop = gt96100_close;
+ dev->get_stats = gt96100_get_stats;
+ //dev->do_ioctl = gt96100_ioctl;
+ dev->set_multicast_list = gt96100_set_rx_mode;
+ dev->tx_timeout = gt96100_tx_timeout;
+ dev->watchdog_timeo = GT96100ETH_TX_TIMEOUT;
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out4;
+ return 0;
+
+out4:
+ dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
+out3:
+ dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
+out2:
+ dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
+ + sizeof(gt96100_td_t) * TX_RING_SIZE,
+ gp->rx_ring);
+out1:
+ free_netdev (dev);
+out:
+ release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
+
+ err("%s failed. Returns %d\n", __FUNCTION__, retval);
+ return retval;
+}
+
+
+static void
+reset_tx(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ int i;
+
+ abort(dev, sdcmrAT);
+
+ for (i=0; i<TX_RING_SIZE; i++) {
+ if (gp->tx_skbuff[i]) {
+ if (in_interrupt())
+ dev_kfree_skb_irq(gp->tx_skbuff[i]);
+ else
+ dev_kfree_skb(gp->tx_skbuff[i]);
+ gp->tx_skbuff[i] = NULL;
+ }
+
+ gp->tx_ring[i].cmdstat = 0; // CPU owns
+ gp->tx_ring[i].byte_cnt = 0;
+ gp->tx_ring[i].buff_ptr = 0;
+ gp->tx_ring[i].next =
+ cpu_to_dma32(gp->tx_ring_dma +
+ sizeof(gt96100_td_t) * (i+1));
+ dump_tx_desc(4, dev, i);
+ }
+ /* Wrap the ring. */
+ gp->tx_ring[i-1].next = cpu_to_dma32(gp->tx_ring_dma);
+
+ // setup only the lowest priority TxCDP reg
+ GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR0, gp->tx_ring_dma);
+ GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR1, 0);
+
+ // init Tx indeces and pkt counter
+ gp->tx_next_in = gp->tx_next_out = 0;
+ gp->tx_count = 0;
+
+}
+
+static void
+reset_rx(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ int i;
+
+ abort(dev, sdcmrAR);
+
+ for (i=0; i<RX_RING_SIZE; i++) {
+ gp->rx_ring[i].next =
+ cpu_to_dma32(gp->rx_ring_dma +
+ sizeof(gt96100_rd_t) * (i+1));
+ gp->rx_ring[i].buff_ptr =
+ cpu_to_dma32(gp->rx_buff_dma + i*PKT_BUF_SZ);
+ gp->rx_ring[i].buff_sz = cpu_to_dma16(PKT_BUF_SZ);
+ // Give ownership to device, set first and last, enable intr
+ gp->rx_ring[i].cmdstat =
+ cpu_to_dma32((u32)(rxFirst | rxLast | rxOwn | rxEI));
+ dump_rx_desc(4, dev, i);
+ }
+ /* Wrap the ring. */
+ gp->rx_ring[i-1].next = cpu_to_dma32(gp->rx_ring_dma);
+
+ // Setup only the lowest priority RxFDP and RxCDP regs
+ for (i=0; i<4; i++) {
+ if (i == 0) {
+ GT96100ETH_WRITE(gp, GT96100_ETH_1ST_RX_DESC_PTR0,
+ gp->rx_ring_dma);
+ GT96100ETH_WRITE(gp, GT96100_ETH_CURR_RX_DESC_PTR0,
+ gp->rx_ring_dma);
+ } else {
+ GT96100ETH_WRITE(gp,
+ GT96100_ETH_1ST_RX_DESC_PTR0 + i*4,
+ 0);
+ GT96100ETH_WRITE(gp,
+ GT96100_ETH_CURR_RX_DESC_PTR0 + i*4,
+ 0);
+ }
+ }
+
+ // init Rx NextOut index
+ gp->rx_next_out = 0;
+}
+
+
+// Returns 1 if the Tx counter and indeces don't gel
+static int
+gt96100_check_tx_consistent(struct gt96100_private *gp)
+{
+ int diff = gp->tx_next_in - gp->tx_next_out;
+
+ diff = diff<0 ? TX_RING_SIZE + diff : diff;
+ diff = gp->tx_count == TX_RING_SIZE ? diff + TX_RING_SIZE : diff;
+
+ return (diff != gp->tx_count);
+}
+
+static int
+gt96100_init(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ u32 tmp;
+ u16 mii_reg;
+
+ dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
+ dbg(3, "%s: scs10_lo=%4x, scs10_hi=%4x\n", __FUNCTION__,
+ GT96100_READ(0x8), GT96100_READ(0x10));
+ dbg(3, "%s: scs32_lo=%4x, scs32_hi=%4x\n", __FUNCTION__,
+ GT96100_READ(0x18), GT96100_READ(0x20));
+
+ // Stop and disable Port
+ hard_stop(dev);
+
+ // Setup CIU Arbiter
+ tmp = GT96100_READ(GT96100_CIU_ARBITER_CONFIG);
+ tmp |= (0x0c << (gp->port_num*2)); // set Ether DMA req priority to hi
+#ifndef DESC_BE
+ tmp &= ~(1<<31); // set desc endianess to little
+#else
+ tmp |= (1<<31);
+#endif
+ GT96100_WRITE(GT96100_CIU_ARBITER_CONFIG, tmp);
+ dbg(3, "%s: CIU Config=%x/%x\n", __FUNCTION__,
+ tmp, GT96100_READ(GT96100_CIU_ARBITER_CONFIG));
+
+ // Set routing.
+ tmp = GT96100_READ(GT96100_ROUTE_MAIN) & (0x3f << 18);
+ tmp |= (0x07 << (18 + gp->port_num*3));
+ GT96100_WRITE(GT96100_ROUTE_MAIN, tmp);
+
+ /* set MII as peripheral func */
+ tmp = GT96100_READ(GT96100_GPP_CONFIG2);
+ tmp |= 0x7fff << (gp->port_num*16);
+ GT96100_WRITE(GT96100_GPP_CONFIG2, tmp);
+
+ /* Set up MII port pin directions */
+ tmp = GT96100_READ(GT96100_GPP_IO2);
+ tmp |= 0x003d << (gp->port_num*16);
+ GT96100_WRITE(GT96100_GPP_IO2, tmp);
+
+ // Set-up hash table
+ memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear it
+ gp->hash_mode = 0;
+ // Add a single entry to hash table - our ethernet address
+ gt96100_add_hash_entry(dev, dev->dev_addr);
+ // Set-up DMA ptr to hash table
+ GT96100ETH_WRITE(gp, GT96100_ETH_HASH_TBL_PTR, gp->hash_table_dma);
+ dbg(3, "%s: Hash Tbl Ptr=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_HASH_TBL_PTR));
+
+ // Setup Tx
+ reset_tx(dev);
+
+ dbg(3, "%s: Curr Tx Desc Ptr0=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0));
+
+ // Setup Rx
+ reset_rx(dev);
+
+ dbg(3, "%s: 1st/Curr Rx Desc Ptr0=%x/%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0),
+ GT96100ETH_READ(gp, GT96100_ETH_CURR_RX_DESC_PTR0));
+
+ // eth port config register
+ GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
+ pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen);
+
+ mii_reg = read_MII(gp->phy_addr, 0x11); /* int enable register */
+ mii_reg |= 2; /* enable mii interrupt */
+ write_MII(gp->phy_addr, 0x11, mii_reg);
+
+ dbg(3, "%s: PhyAD=%x\n", __FUNCTION__,
+ GT96100_READ(GT96100_ETH_PHY_ADDR_REG));
+
+ // setup DMA
+
+ // We want the Rx/Tx DMA to write/read data to/from memory in
+ // Big Endian mode. Also set DMA Burst Size to 8 64Bit words.
+#ifdef DESC_DATA_BE
+ GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
+ (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
+#else
+ GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
+ sdcrBLMR | sdcrBLMT |
+ (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
+#endif
+ dbg(3, "%s: SDMA Config=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_SDMA_CONFIG));
+
+ // start Rx DMA
+ GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
+ dbg(3, "%s: SDMA Comm=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
+
+ // enable this port (set hash size to 1/2K)
+ GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, pcrEN | pcrHS);
+ dbg(3, "%s: Port Config=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG));
+
+ /*
+ * Disable all Type-of-Service queueing. All Rx packets will be
+ * treated normally and will be sent to the lowest priority
+ * queue.
+ *
+ * Disable flow-control for now. FIXME: support flow control?
+ */
+
+ // clear all the MIB ctr regs
+ GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
+ pcxrFCTL | pcxrFCTLen | pcxrFLP |
+ pcxrPRIOrxOverride);
+ read_mib_counters(gp);
+ GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
+ pcxrFCTL | pcxrFCTLen | pcxrFLP |
+ pcxrPRIOrxOverride | pcxrMIBclrMode);
+
+ dbg(3, "%s: Port Config Ext=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG_EXT));
+
+ netif_start_queue(dev);
+
+ dump_MII(4, dev);
+
+ // enable interrupts
+ enable_ether_irq(dev);
+
+ // we should now be receiving frames
+ return 0;
+}
+
+
+static int
+gt96100_open(struct net_device *dev)
+{
+ int retval;
+
+ dbg(2, "%s: dev=%p\n", __FUNCTION__, dev);
+
+ // Initialize and startup the GT-96100 ethernet port
+ if ((retval = gt96100_init(dev))) {
+ err("error in gt96100_init\n");
+ free_irq(dev->irq, dev);
+ return retval;
+ }
+
+ if ((retval = request_irq(dev->irq, &gt96100_interrupt,
+ SA_SHIRQ, dev->name, dev))) {
+ err("unable to get IRQ %d\n", dev->irq);
+ return retval;
+ }
+
+ dbg(2, "%s: Initialization done.\n", __FUNCTION__);
+
+ return 0;
+}
+
+static int
+gt96100_close(struct net_device *dev)
+{
+ dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
+
+ // stop the device
+ if (netif_device_present(dev)) {
+ netif_stop_queue(dev);
+ hard_stop(dev);
+ }
+
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+
+static int
+gt96100_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ unsigned long flags;
+ int nextIn;
+
+ spin_lock_irqsave(&gp->lock, flags);
+
+ nextIn = gp->tx_next_in;
+
+ dbg(3, "%s: nextIn=%d\n", __FUNCTION__, nextIn);
+
+ if (gp->tx_count >= TX_RING_SIZE) {
+ warn("Tx Ring full, pkt dropped.\n");
+ gp->stats.tx_dropped++;
+ spin_unlock_irqrestore(&gp->lock, flags);
+ return 1;
+ }
+
+ if (!(gp->last_psr & psrLink)) {
+ err("%s: Link down, pkt dropped.\n", __FUNCTION__);
+ gp->stats.tx_dropped++;
+ spin_unlock_irqrestore(&gp->lock, flags);
+ return 1;
+ }
+
+ if (dma32_to_cpu(gp->tx_ring[nextIn].cmdstat) & txOwn) {
+ err("%s: device owns descriptor, pkt dropped.\n", __FUNCTION__);
+ gp->stats.tx_dropped++;
+ // stop the queue, so Tx timeout can fix it
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&gp->lock, flags);
+ return 1;
+ }
+
+ // Prepare the Descriptor at tx_next_in
+ gp->tx_skbuff[nextIn] = skb;
+ gp->tx_ring[nextIn].byte_cnt = cpu_to_dma16(skb->len);
+ gp->tx_ring[nextIn].buff_ptr = cpu_to_dma32(virt_to_phys(skb->data));
+ // make sure packet gets written back to memory
+ dma_cache_wback_inv((unsigned long)(skb->data), skb->len);
+ // Give ownership to device, set first and last desc, enable interrupt
+ // Setting of ownership bit must be *last*!
+ gp->tx_ring[nextIn].cmdstat =
+ cpu_to_dma32((u32)(txOwn | txGenCRC | txEI |
+ txPad | txFirst | txLast));
+
+ dump_tx_desc(4, dev, nextIn);
+ dump_skb(4, dev, skb);
+
+ // increment tx_next_in with wrap
+ gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
+ // If DMA is stopped, restart
+ if (!(GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS) & psrTxLow))
+ GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
+ sdcmrERD | sdcmrTXDL);
+
+ // increment count and stop queue if full
+ if (++gp->tx_count == TX_RING_SIZE) {
+ gp->tx_full = 1;
+ netif_stop_queue(dev);
+ dbg(2, "Tx Ring now full, queue stopped.\n");
+ }
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ return 0;
+}
+
+
+static int
+gt96100_rx(struct net_device *dev, u32 status)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int pkt_len, nextOut, cdp;
+ gt96100_rd_t *rd;
+ u32 cmdstat;
+
+ dbg(3, "%s: dev=%p, status=%x\n", __FUNCTION__, dev, status);
+
+ cdp = (GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0)
+ - gp->rx_ring_dma) / sizeof(gt96100_rd_t);
+
+ // Continue until we reach 1st descriptor pointer
+ for (nextOut = gp->rx_next_out; nextOut != cdp;
+ nextOut = (nextOut + 1) % RX_RING_SIZE) {
+
+ if (--gp->intr_work_done == 0)
+ break;
+
+ rd = &gp->rx_ring[nextOut];
+ cmdstat = dma32_to_cpu(rd->cmdstat);
+
+ dbg(4, "%s: Rx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
+ cmdstat, nextOut);
+
+ if (cmdstat & (u32)rxOwn) {
+ //err("%s: device owns descriptor!\n", __FUNCTION__);
+ // DMA is not finished updating descriptor???
+ // Leave and come back later to pick-up where
+ // we left off.
+ break;
+ }
+
+ // Drop this received pkt if there were any errors
+ if (((cmdstat & (u32)(rxErrorSummary)) &&
+ (cmdstat & (u32)(rxFirst))) || (status & icrRxError)) {
+ // update the detailed rx error counters that
+ // are not covered by the MIB counters.
+ if (cmdstat & (u32)rxOverrun)
+ gp->stats.rx_fifo_errors++;
+ cmdstat |= (u32)rxOwn;
+ rd->cmdstat = cpu_to_dma32(cmdstat);
+ continue;
+ }
+
+ /*
+ * Must be first and last (ie only) descriptor of packet. We
+ * ignore (drop) any packets that do not fit in one descriptor.
+ * Every descriptor's receive buffer is large enough to hold
+ * the maximum 802.3 frame size, so a multi-descriptor packet
+ * indicates an error. Most if not all corrupted packets will
+ * have already been dropped by the above check for the
+ * rxErrorSummary status bit.
+ */
+ if (!(cmdstat & (u32)rxFirst) || !(cmdstat & (u32)rxLast)) {
+ if (cmdstat & (u32)rxFirst) {
+ /*
+ * This is the first descriptor of a
+ * multi-descriptor packet. It isn't corrupted
+ * because the above check for rxErrorSummary
+ * would have dropped it already, so what's
+ * the deal with this packet? Good question,
+ * let's dump it out.
+ */
+ err("%s: desc not first and last!\n", __FUNCTION__);
+ dump_rx_desc(0, dev, nextOut);
+ }
+ cmdstat |= (u32)rxOwn;
+ rd->cmdstat = cpu_to_dma32(cmdstat);
+ // continue to drop every descriptor of this packet
+ continue;
+ }
+
+ pkt_len = dma16_to_cpu(rd->byte_cnt);
+
+ /* Create new skb. */
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ err("%s: Memory squeeze, dropping packet.\n", __FUNCTION__);
+ gp->stats.rx_dropped++;
+ cmdstat |= (u32)rxOwn;
+ rd->cmdstat = cpu_to_dma32(cmdstat);
+ continue;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte IP header align */
+ memcpy(skb_put(skb, pkt_len),
+ &gp->rx_buff[nextOut*PKT_BUF_SZ], pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ dump_skb(4, dev, skb);
+
+ netif_rx(skb); /* pass the packet to upper layers */
+ dev->last_rx = jiffies;
+
+ // now we can release ownership of this desc back to device
+ cmdstat |= (u32)rxOwn;
+ rd->cmdstat = cpu_to_dma32(cmdstat);
+ }
+
+ if (nextOut == gp->rx_next_out)
+ dbg(3, "%s: RxCDP did not increment?\n", __FUNCTION__);
+
+ gp->rx_next_out = nextOut;
+ return 0;
+}
+
+
+static void
+gt96100_tx_complete(struct net_device *dev, u32 status)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ int nextOut, cdp;
+ gt96100_td_t *td;
+ u32 cmdstat;
+
+ cdp = (GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0)
+ - gp->tx_ring_dma) / sizeof(gt96100_td_t);
+
+ // Continue until we reach the current descriptor pointer
+ for (nextOut = gp->tx_next_out; nextOut != cdp;
+ nextOut = (nextOut + 1) % TX_RING_SIZE) {
+
+ if (--gp->intr_work_done == 0)
+ break;
+
+ td = &gp->tx_ring[nextOut];
+ cmdstat = dma32_to_cpu(td->cmdstat);
+
+ dbg(3, "%s: Tx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
+ cmdstat, nextOut);
+
+ if (cmdstat & (u32)txOwn) {
+ /*
+ * DMA is not finished writing descriptor???
+ * Leave and come back later to pick-up where
+ * we left off.
+ */
+ break;
+ }
+
+ // increment Tx error stats
+ if (cmdstat & (u32)txErrorSummary) {
+ dbg(2, "%s: Tx error, cmdstat = %x\n", __FUNCTION__,
+ cmdstat);
+ gp->stats.tx_errors++;
+ if (cmdstat & (u32)txReTxLimit)
+ gp->stats.tx_aborted_errors++;
+ if (cmdstat & (u32)txUnderrun)
+ gp->stats.tx_fifo_errors++;
+ if (cmdstat & (u32)txLateCollision)
+ gp->stats.tx_window_errors++;
+ }
+
+ if (cmdstat & (u32)txCollision)
+ gp->stats.collisions +=
+ (u32)((cmdstat & txReTxCntMask) >>
+ txReTxCntBit);
+
+ // Wake the queue if the ring was full
+ if (gp->tx_full) {
+ gp->tx_full = 0;
+ if (gp->last_psr & psrLink) {
+ netif_wake_queue(dev);
+ dbg(2, "%s: Tx Ring was full, queue waked\n",
+ __FUNCTION__);
+ }
+ }
+
+ // decrement tx ring buffer count
+ if (gp->tx_count) gp->tx_count--;
+
+ // free the skb
+ if (gp->tx_skbuff[nextOut]) {
+ dbg(3, "%s: good Tx, skb=%p\n", __FUNCTION__,
+ gp->tx_skbuff[nextOut]);
+ dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
+ gp->tx_skbuff[nextOut] = NULL;
+ } else {
+ err("%s: no skb!\n", __FUNCTION__);
+ }
+ }
+
+ gp->tx_next_out = nextOut;
+
+ if (gt96100_check_tx_consistent(gp)) {
+ err("%s: Tx queue inconsistent!\n", __FUNCTION__);
+ }
+
+ if ((status & icrTxEndLow) && gp->tx_count != 0) {
+ // we must restart the DMA
+ dbg(3, "%s: Restarting Tx DMA\n", __FUNCTION__);
+ GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
+ sdcmrERD | sdcmrTXDL);
+ }
+}
+
+
+static irqreturn_t
+gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct gt96100_private *gp = netdev_priv(dev);
+ u32 status;
+ int handled = 0;
+
+ if (dev == NULL) {
+ err("%s: null dev ptr\n", __FUNCTION__);
+ return IRQ_NONE;
+ }
+
+ dbg(3, "%s: entry, icr=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
+
+ spin_lock(&gp->lock);
+
+ gp->intr_work_done = max_interrupt_work;
+
+ while (gp->intr_work_done > 0) {
+
+ status = GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE);
+ // ACK interrupts
+ GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, ~status);
+
+ if ((status & icrEtherIntSum) == 0 &&
+ !(status & (icrTxBufferLow|icrTxBufferHigh|icrRxBuffer)))
+ break;
+
+ handled = 1;
+
+ if (status & icrMIIPhySTC) {
+ u32 psr = GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS);
+ if (gp->last_psr != psr) {
+ dbg(0, "port status:\n");
+ dbg(0, " %s MBit/s, %s-duplex, "
+ "flow-control %s, link is %s,\n",
+ psr & psrSpeed ? "100":"10",
+ psr & psrDuplex ? "full":"half",
+ psr & psrFctl ? "disabled":"enabled",
+ psr & psrLink ? "up":"down");
+ dbg(0, " TxLowQ is %s, TxHighQ is %s, "
+ "Transmitter is %s\n",
+ psr & psrTxLow ? "running":"stopped",
+ psr & psrTxHigh ? "running":"stopped",
+ psr & psrTxInProg ? "on":"off");
+
+ if ((psr & psrLink) && !gp->tx_full &&
+ netif_queue_stopped(dev)) {
+ dbg(0, "%s: Link up, waking queue.\n",
+ __FUNCTION__);
+ netif_wake_queue(dev);
+ } else if (!(psr & psrLink) &&
+ !netif_queue_stopped(dev)) {
+ dbg(0, "%s: Link down, stopping queue.\n",
+ __FUNCTION__);
+ netif_stop_queue(dev);
+ }
+
+ gp->last_psr = psr;
+ }
+
+ if (--gp->intr_work_done == 0)
+ break;
+ }
+
+ if (status & (icrTxBufferLow | icrTxEndLow))
+ gt96100_tx_complete(dev, status);
+
+ if (status & (icrRxBuffer | icrRxError)) {
+ gt96100_rx(dev, status);
+ }
+
+ // Now check TX errors (RX errors were handled in gt96100_rx)
+ if (status & icrTxErrorLow) {
+ err("%s: Tx resource error\n", __FUNCTION__);
+ if (--gp->intr_work_done == 0)
+ break;
+ }
+
+ if (status & icrTxUdr) {
+ err("%s: Tx underrun error\n", __FUNCTION__);
+ if (--gp->intr_work_done == 0)
+ break;
+ }
+ }
+
+ if (gp->intr_work_done == 0) {
+ // ACK any remaining pending interrupts
+ GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, 0);
+ dbg(3, "%s: hit max work\n", __FUNCTION__);
+ }
+
+ dbg(3, "%s: exit, icr=%x\n", __FUNCTION__,
+ GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
+
+ spin_unlock(&gp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static void
+gt96100_tx_timeout(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gp->lock, flags);
+
+ if (!(gp->last_psr & psrLink)) {
+ err("tx_timeout: link down.\n");
+ spin_unlock_irqrestore(&gp->lock, flags);
+ } else {
+ if (gt96100_check_tx_consistent(gp))
+ err("tx_timeout: Tx ring error.\n");
+
+ disable_ether_irq(dev);
+ spin_unlock_irqrestore(&gp->lock, flags);
+ reset_tx(dev);
+ enable_ether_irq(dev);
+
+ netif_wake_queue(dev);
+ }
+}
+
+
+static void
+gt96100_set_rx_mode(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ unsigned long flags;
+ //struct dev_mc_list *mcptr;
+
+ dbg(3, "%s: dev=%p, flags=%x\n", __FUNCTION__, dev, dev->flags);
+
+ // stop the Receiver DMA
+ abort(dev, sdcmrAR);
+
+ spin_lock_irqsave(&gp->lock, flags);
+
+ if (dev->flags & IFF_PROMISC) {
+ GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG,
+ pcrEN | pcrHS | pcrPM);
+ }
+
+#if 0
+ /*
+ FIXME: currently multicast doesn't work - need to get hash table
+ working first.
+ */
+ if (dev->mc_count) {
+ // clear hash table
+ memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);
+ // Add our ethernet address
+ gt96100_add_hash_entry(dev, dev->dev_addr);
+
+ for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) {
+ dump_hw_addr(2, dev, __FUNCTION__ ": addr=",
+ mcptr->dmi_addr);
+ gt96100_add_hash_entry(dev, mcptr->dmi_addr);
+ }
+ }
+#endif
+
+ // restart Rx DMA
+ GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
+
+ spin_unlock_irqrestore(&gp->lock, flags);
+}
+
+static struct net_device_stats *
+gt96100_get_stats(struct net_device *dev)
+{
+ struct gt96100_private *gp = netdev_priv(dev);
+ unsigned long flags;
+
+ dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
+
+ if (netif_device_present(dev)) {
+ spin_lock_irqsave (&gp->lock, flags);
+ update_stats(gp);
+ spin_unlock_irqrestore (&gp->lock, flags);
+ }
+
+ return &gp->stats;
+}
+
+static void gt96100_cleanup_module(void)
+{
+ int i;
+ for (i=0; i<NUM_INTERFACES; i++) {
+ struct gt96100_if_t *gtif = &gt96100_iflist[i];
+ if (gtif->dev != NULL) {
+ struct gt96100_private *gp = (struct gt96100_private *)
+ netdev_priv(gtif->dev);
+ unregister_netdev(gtif->dev);
+ dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
+ dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
+ dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
+ + sizeof(gt96100_td_t) * TX_RING_SIZE,
+ gp->rx_ring);
+ free_netdev(gtif->dev);
+ release_region(gtif->iobase, gp->io_size);
+ }
+ }
+}
+
+static int __init gt96100_setup(char *options)
+{
+ char *this_opt;
+
+ if (!options || !*options)
+ return 0;
+
+ while ((this_opt = strsep (&options, ",")) != NULL) {
+ if (!*this_opt)
+ continue;
+ if (!strncmp(this_opt, "mac0:", 5)) {
+ memcpy(mac0, this_opt+5, 17);
+ mac0[17]= '\0';
+ } else if (!strncmp(this_opt, "mac1:", 5)) {
+ memcpy(mac1, this_opt+5, 17);
+ mac1[17]= '\0';
+ }
+ }
+
+ return 1;
+}
+
+__setup("gt96100eth=", gt96100_setup);
+
+module_init(gt96100_init_module);
+module_exit(gt96100_cleanup_module);
+
+MODULE_AUTHOR("Steve Longerbeam <stevel@mvista.com>");
+MODULE_DESCRIPTION("GT96100 Ethernet driver");
diff --git a/drivers/net/gt96100eth.h b/drivers/net/gt96100eth.h
new file mode 100644
index 000000000000..2f4bfd4dacbe
--- /dev/null
+++ b/drivers/net/gt96100eth.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2000 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * stevel@mvista.com or source@mvista.com
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * Ethernet driver definitions for the MIPS GT96100 Advanced
+ * Communication Controller.
+ *
+ */
+#ifndef _GT96100ETH_H
+#define _GT96100ETH_H
+
+#include <linux/config.h>
+#include <asm/galileo-boards/gt96100.h>
+
+#define dbg(lvl, format, arg...) \
+ if (lvl <= GT96100_DEBUG) \
+ printk(KERN_DEBUG "%s: " format, dev->name , ## arg)
+#define err(format, arg...) \
+ printk(KERN_ERR "%s: " format, dev->name , ## arg)
+#define info(format, arg...) \
+ printk(KERN_INFO "%s: " format, dev->name , ## arg)
+#define warn(format, arg...) \
+ printk(KERN_WARNING "%s: " format, dev->name , ## arg)
+
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 32
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#define RX_HASH_TABLE_SIZE 16384
+#define HASH_HOP_NUMBER 12
+
+#define NUM_INTERFACES 2
+
+#define GT96100ETH_TX_TIMEOUT HZ/4
+
+#define GT96100_ETH0_BASE (MIPS_GT96100_BASE + GT96100_ETH_PORT_CONFIG)
+#define GT96100_ETH1_BASE (GT96100_ETH0_BASE + GT96100_ETH_IO_SIZE)
+
+#ifdef CONFIG_MIPS_EV96100
+#define GT96100_ETHER0_IRQ 3
+#define GT96100_ETHER1_IRQ 4
+#else
+#define GT96100_ETHER0_IRQ -1
+#define GT96100_ETHER1_IRQ -1
+#endif
+
+#define REV_GT96100 1
+#define REV_GT96100A_1 2
+#define REV_GT96100A 3
+
+#define GT96100ETH_READ(gp, offset) \
+ GT96100_READ((gp->port_offset + offset))
+
+#define GT96100ETH_WRITE(gp, offset, data) \
+ GT96100_WRITE((gp->port_offset + offset), data)
+
+#define GT96100ETH_SETBIT(gp, offset, bits) {\
+ u32 val = GT96100ETH_READ(gp, offset); val |= (u32)(bits); \
+ GT96100ETH_WRITE(gp, offset, val); }
+
+#define GT96100ETH_CLRBIT(gp, offset, bits) {\
+ u32 val = GT96100ETH_READ(gp, offset); val &= (u32)(~(bits)); \
+ GT96100ETH_WRITE(gp, offset, val); }
+
+
+/* Bit definitions of the SMI Reg */
+enum {
+ smirDataMask = 0xffff,
+ smirPhyAdMask = 0x1f<<16,
+ smirPhyAdBit = 16,
+ smirRegAdMask = 0x1f<<21,
+ smirRegAdBit = 21,
+ smirOpCode = 1<<26,
+ smirReadValid = 1<<27,
+ smirBusy = 1<<28
+};
+
+/* Bit definitions of the Port Config Reg */
+enum pcr_bits {
+ pcrPM = 1,
+ pcrRBM = 2,
+ pcrPBF = 4,
+ pcrEN = 1<<7,
+ pcrLPBKMask = 0x3<<8,
+ pcrLPBKBit = 8,
+ pcrFC = 1<<10,
+ pcrHS = 1<<12,
+ pcrHM = 1<<13,
+ pcrHDM = 1<<14,
+ pcrHD = 1<<15,
+ pcrISLMask = 0x7<<28,
+ pcrISLBit = 28,
+ pcrACCS = 1<<31
+};
+
+/* Bit definitions of the Port Config Extend Reg */
+enum pcxr_bits {
+ pcxrIGMP = 1,
+ pcxrSPAN = 2,
+ pcxrPAR = 4,
+ pcxrPRIOtxMask = 0x7<<3,
+ pcxrPRIOtxBit = 3,
+ pcxrPRIOrxMask = 0x3<<6,
+ pcxrPRIOrxBit = 6,
+ pcxrPRIOrxOverride = 1<<8,
+ pcxrDPLXen = 1<<9,
+ pcxrFCTLen = 1<<10,
+ pcxrFLP = 1<<11,
+ pcxrFCTL = 1<<12,
+ pcxrMFLMask = 0x3<<14,
+ pcxrMFLBit = 14,
+ pcxrMIBclrMode = 1<<16,
+ pcxrSpeed = 1<<18,
+ pcxrSpeeden = 1<<19,
+ pcxrRMIIen = 1<<20,
+ pcxrDSCPen = 1<<21
+};
+
+/* Bit definitions of the Port Command Reg */
+enum pcmr_bits {
+ pcmrFJ = 1<<15
+};
+
+
+/* Bit definitions of the Port Status Reg */
+enum psr_bits {
+ psrSpeed = 1,
+ psrDuplex = 2,
+ psrFctl = 4,
+ psrLink = 8,
+ psrPause = 1<<4,
+ psrTxLow = 1<<5,
+ psrTxHigh = 1<<6,
+ psrTxInProg = 1<<7
+};
+
+/* Bit definitions of the SDMA Config Reg */
+enum sdcr_bits {
+ sdcrRCMask = 0xf<<2,
+ sdcrRCBit = 2,
+ sdcrBLMR = 1<<6,
+ sdcrBLMT = 1<<7,
+ sdcrPOVR = 1<<8,
+ sdcrRIFB = 1<<9,
+ sdcrBSZMask = 0x3<<12,
+ sdcrBSZBit = 12
+};
+
+/* Bit definitions of the SDMA Command Reg */
+enum sdcmr_bits {
+ sdcmrERD = 1<<7,
+ sdcmrAR = 1<<15,
+ sdcmrSTDH = 1<<16,
+ sdcmrSTDL = 1<<17,
+ sdcmrTXDH = 1<<23,
+ sdcmrTXDL = 1<<24,
+ sdcmrAT = 1<<31
+};
+
+/* Bit definitions of the Interrupt Cause Reg */
+enum icr_bits {
+ icrRxBuffer = 1,
+ icrTxBufferHigh = 1<<2,
+ icrTxBufferLow = 1<<3,
+ icrTxEndHigh = 1<<6,
+ icrTxEndLow = 1<<7,
+ icrRxError = 1<<8,
+ icrTxErrorHigh = 1<<10,
+ icrTxErrorLow = 1<<11,
+ icrRxOVR = 1<<12,
+ icrTxUdr = 1<<13,
+ icrRxBufferQ0 = 1<<16,
+ icrRxBufferQ1 = 1<<17,
+ icrRxBufferQ2 = 1<<18,
+ icrRxBufferQ3 = 1<<19,
+ icrRxErrorQ0 = 1<<20,
+ icrRxErrorQ1 = 1<<21,
+ icrRxErrorQ2 = 1<<22,
+ icrRxErrorQ3 = 1<<23,
+ icrMIIPhySTC = 1<<28,
+ icrSMIdone = 1<<29,
+ icrEtherIntSum = 1<<31
+};
+
+
+/* The Rx and Tx descriptor lists. */
+typedef struct {
+#ifdef DESC_BE
+ u16 byte_cnt;
+ u16 reserved;
+#else
+ u16 reserved;
+ u16 byte_cnt;
+#endif
+ u32 cmdstat;
+ u32 next;
+ u32 buff_ptr;
+} gt96100_td_t __attribute__ ((packed));
+
+typedef struct {
+#ifdef DESC_BE
+ u16 buff_sz;
+ u16 byte_cnt;
+#else
+ u16 byte_cnt;
+ u16 buff_sz;
+#endif
+ u32 cmdstat;
+ u32 next;
+ u32 buff_ptr;
+} gt96100_rd_t __attribute__ ((packed));
+
+
+/* Values for the Tx command-status descriptor entry. */
+enum td_cmdstat {
+ txOwn = 1<<31,
+ txAutoMode = 1<<30,
+ txEI = 1<<23,
+ txGenCRC = 1<<22,
+ txPad = 1<<18,
+ txFirst = 1<<17,
+ txLast = 1<<16,
+ txErrorSummary = 1<<15,
+ txReTxCntMask = 0x0f<<10,
+ txReTxCntBit = 10,
+ txCollision = 1<<9,
+ txReTxLimit = 1<<8,
+ txUnderrun = 1<<6,
+ txLateCollision = 1<<5
+};
+
+
+/* Values for the Rx command-status descriptor entry. */
+enum rd_cmdstat {
+ rxOwn = 1<<31,
+ rxAutoMode = 1<<30,
+ rxEI = 1<<23,
+ rxFirst = 1<<17,
+ rxLast = 1<<16,
+ rxErrorSummary = 1<<15,
+ rxIGMP = 1<<14,
+ rxHashExpired = 1<<13,
+ rxMissedFrame = 1<<12,
+ rxFrameType = 1<<11,
+ rxShortFrame = 1<<8,
+ rxMaxFrameLen = 1<<7,
+ rxOverrun = 1<<6,
+ rxCollision = 1<<4,
+ rxCRCError = 1
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ hteValid = 1,
+ hteSkip = 2,
+ hteRD = 4
+};
+
+// The MIB counters
+typedef struct {
+ u32 byteReceived;
+ u32 byteSent;
+ u32 framesReceived;
+ u32 framesSent;
+ u32 totalByteReceived;
+ u32 totalFramesReceived;
+ u32 broadcastFramesReceived;
+ u32 multicastFramesReceived;
+ u32 cRCError;
+ u32 oversizeFrames;
+ u32 fragments;
+ u32 jabber;
+ u32 collision;
+ u32 lateCollision;
+ u32 frames64;
+ u32 frames65_127;
+ u32 frames128_255;
+ u32 frames256_511;
+ u32 frames512_1023;
+ u32 frames1024_MaxSize;
+ u32 macRxError;
+ u32 droppedFrames;
+ u32 outMulticastFrames;
+ u32 outBroadcastFrames;
+ u32 undersizeFrames;
+} mib_counters_t;
+
+
+struct gt96100_private {
+ gt96100_rd_t* rx_ring;
+ gt96100_td_t* tx_ring;
+ // The Rx and Tx rings must be 16-byte aligned
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ char* hash_table;
+ // The Hash Table must be 8-byte aligned
+ dma_addr_t hash_table_dma;
+ int hash_mode;
+
+ // The Rx buffers must be 8-byte aligned
+ char* rx_buff;
+ dma_addr_t rx_buff_dma;
+ // Tx buffers (tx_skbuff[i]->data) with less than 8 bytes
+ // of payload must be 8-byte aligned
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ int rx_next_out; /* The next free ring entry to receive */
+ int tx_next_in; /* The next free ring entry to send */
+ int tx_next_out; /* The last ring entry the ISR processed */
+ int tx_count; /* current # of pkts waiting to be sent in Tx ring */
+ int intr_work_done; /* number of Rx and Tx pkts processed in the isr */
+ int tx_full; /* Tx ring is full */
+
+ mib_counters_t mib;
+ struct net_device_stats stats;
+
+ int io_size;
+ int port_num; // 0 or 1
+ int chip_rev;
+ u32 port_offset;
+
+ int phy_addr; // PHY address
+ u32 last_psr; // last value of the port status register
+
+ int options; /* User-settable misc. driver options. */
+ int drv_flags;
+ struct timer_list timer;
+ spinlock_t lock; /* Serialise access to device */
+};
+
+#endif
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
new file mode 100644
index 000000000000..3d96714ed3cf
--- /dev/null
+++ b/drivers/net/hamachi.c
@@ -0,0 +1,2024 @@
+/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
+/*
+ Written 1998-2000 by Donald Becker.
+ Updates 2000 by Keith Underwood.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
+ adapter.
+
+ Support and updates available at
+ http://www.scyld.com/network/hamachi.html
+ or
+ http://www.parl.clemson.edu/~keithu/hamachi.html
+
+
+
+ Linux kernel changelog:
+
+ LK1.0.1:
+ - fix lack of pci_dev<->dev association
+ - ethtool support (jgarzik)
+
+*/
+
+#define DRV_NAME "hamachi"
+#define DRV_VERSION "1.01+LK1.0.1"
+#define DRV_RELDATE "5/18/2001"
+
+
+/* A few user-configurable values. */
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+#define final_version
+#define hamachi_debug debug
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 40;
+static int mtu;
+/* Default values selected by testing on a dual processor PIII-450 */
+/* These six interrupt control parameters may be set directly when loading the
+ * module, or through the rx_params and tx_params variables
+ */
+static int max_rx_latency = 0x11;
+static int max_rx_gap = 0x05;
+static int min_rx_pkt = 0x18;
+static int max_tx_latency = 0x00;
+static int max_tx_gap = 0x00;
+static int min_tx_pkt = 0x30;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ -Setting to > 1518 causes all frames to be copied
+ -Setting to 0 disables copies
+*/
+static int rx_copybreak;
+
+/* An override for the hardware detection of bus width.
+ Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit.
+ Add 2 to disable parity detection.
+*/
+static int force32;
+
+
+/* Used to pass the media type, etc.
+ These exist for driver interoperability.
+ No media types are currently defined.
+ - The lower 4 bits are reserved for the media type.
+ - The next three bits may be set to one of the following:
+ 0x00000000 : Autodetect PCI bus
+ 0x00000010 : Force 32 bit PCI bus
+ 0x00000020 : Disable parity detection
+ 0x00000040 : Force 64 bit PCI bus
+ Default is autodetect
+ - The next bit can be used to force half-duplex. This is a bad
+ idea since no known implementations implement half-duplex, and,
+ in general, half-duplex for gigabit ethernet is a bad idea.
+ 0x00000080 : Force half-duplex
+ Default is full-duplex.
+ - In the original driver, the ninth bit could be used to force
+ full-duplex. Maintain that for compatibility
+ 0x00000200 : Force full-duplex
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+/* The Hamachi chipset supports 3 parameters each for Rx and Tx
+ * interruput management. Parameters will be loaded as specified into
+ * the TxIntControl and RxIntControl registers.
+ *
+ * The registers are arranged as follows:
+ * 23 - 16 15 - 8 7 - 0
+ * _________________________________
+ * | min_pkt | max_gap | max_latency |
+ * ---------------------------------
+ * min_pkt : The minimum number of packets processed between
+ * interrupts.
+ * max_gap : The maximum inter-packet gap in units of 8.192 us
+ * max_latency : The absolute time between interrupts in units of 8.192 us
+ *
+ */
+static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings, except for
+ excessive memory usage */
+/* Empirically it appears that the Tx ring needs to be a little bigger
+ for these Gbit adapters or you get into an overrun condition really
+ easily. Also, things appear to work a bit better in back-to-back
+ configurations if the Rx ring is 8 times the size of the Tx ring
+*/
+#define TX_RING_SIZE 64
+#define RX_RING_SIZE 512
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct hamachi_desc)
+
+/*
+ * Enable netdev_ioctl. Added interrupt coalescing parameter adjustment.
+ * 2/19/99 Pete Wyckoff <wyckoff@ca.sandia.gov>
+ */
+
+/* play with 64-bit addrlen; seems to be a teensy bit slower --pw */
+/* #define ADDRLEN 64 */
+
+/*
+ * RX_CHECKSUM turns on card-generated receive checksum generation for
+ * TCP and UDP packets. Otherwise the upper layers do the calculation.
+ * TX_CHECKSUM won't do anything too useful, even if it works. There's no
+ * easy mechanism by which to tell the TCP/UDP stack that it need not
+ * generate checksums for this device. But if somebody can find a way
+ * to get that to work, most of the card work is in here already.
+ * 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov>
+ */
+#undef TX_CHECKSUM
+#define RX_CHECKSUM
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (5*HZ)
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <asm/cache.h>
+
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
+KERN_INFO " Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
+KERN_INFO " Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
+
+
+/* IP_MF appears to be only defined in <netinet/ip.h>, however,
+ we need it for hardware checksumming support. FYI... some of
+ the definitions in <netinet/ip.h> conflict/duplicate those in
+ other linux headers causing many compiler warnings.
+*/
+#ifndef IP_MF
+ #define IP_MF 0x2000 /* IP more frags from <netinet/ip.h> */
+#endif
+
+/* Define IP_OFFSET to be IPOPT_OFFSET */
+#ifndef IP_OFFSET
+ #ifdef IPOPT_OFFSET
+ #define IP_OFFSET IPOPT_OFFSET
+ #else
+ #define IP_OFFSET 2
+ #endif
+#endif
+
+#define RUN_AT(x) (jiffies + (x))
+
+/* Condensed bus+endian portability operations. */
+#if ADDRLEN == 64
+#define cpu_to_leXX(addr) cpu_to_le64(addr)
+#else
+#define cpu_to_leXX(addr) cpu_to_le32(addr)
+#endif
+
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Hamachi"
+Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit
+66Mhz PCI card.
+
+II. Board-specific settings
+
+No jumpers exist on the board. The chip supports software correction of
+various motherboard wiring errors, however this driver does not support
+that feature.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Hamachi uses a typical descriptor based bus-master architecture.
+The descriptor list is similar to that used by the Digital Tulip.
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+This driver uses a zero-copy receive and transmit scheme similar my other
+network drivers.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Hamachi as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. Gigabit cards are typically used on generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.
+
+IIIb/c. Transmit/Receive Structure
+
+The Rx and Tx descriptor structure are straight-forward, with no historical
+baggage that must be explained. Unlike the awkward DBDMA structure, there
+are no unused fields or option bits that had only one allowable setting.
+
+Two details should be noted about the descriptors: The chip supports both 32
+bit and 64 bit address structures, and the length field is overwritten on
+the receive descriptors. The descriptor length is set in the control word
+for each channel. The development driver uses 32 bit addresses only, however
+64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
+
+IIId. Synchronization
+
+This driver is very similar to my other network drivers.
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'hmp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
+
+IVb. References
+
+Hamachi Engineering Design Specification, 5/15/97
+(Note: This version was marked "Confidential".)
+
+IVc. Errata
+
+None noted.
+
+V. Recent Changes
+
+01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears
+ to help avoid some stall conditions -- this needs further research.
+
+01/15/1999 EPK Creation of the hamachi_tx function. This function cleans
+ the Tx ring and is called from hamachi_start_xmit (this used to be
+ called from hamachi_interrupt but it tends to delay execution of the
+ interrupt handler and thus reduce bandwidth by reducing the latency
+ between hamachi_rx()'s). Notably, some modification has been made so
+ that the cleaning loop checks only to make sure that the DescOwn bit
+ isn't set in the status flag since the card is not required
+ to set the entire flag to zero after processing.
+
+01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is
+ checked before attempting to add a buffer to the ring. If the ring is full
+ an attempt is made to free any dirty buffers and thus find space for
+ the new buffer or the function returns non-zero which should case the
+ scheduler to reschedule the buffer later.
+
+01/15/1999 EPK Some adjustments were made to the chip initialization.
+ End-to-end flow control should now be fully active and the interrupt
+ algorithm vars have been changed. These could probably use further tuning.
+
+01/15/1999 EPK Added the max_{rx,tx}_latency options. These are used to
+ set the rx and tx latencies for the Hamachi interrupts. If you're having
+ problems with network stalls, try setting these to higher values.
+ Valid values are 0x00 through 0xff.
+
+01/15/1999 EPK In general, the overall bandwidth has increased and
+ latencies are better (sometimes by a factor of 2). Stalls are rare at
+ this point, however there still appears to be a bug somewhere between the
+ hardware and driver. TCP checksum errors under load also appear to be
+ eliminated at this point.
+
+01/18/1999 EPK Ensured that the DescEndRing bit was being set on both the
+ Rx and Tx rings. This appears to have been affecting whether a particular
+ peer-to-peer connection would hang under high load. I believe the Rx
+ rings was typically getting set correctly, but the Tx ring wasn't getting
+ the DescEndRing bit set during initialization. ??? Does this mean the
+ hamachi card is using the DescEndRing in processing even if a particular
+ slot isn't in use -- hypothetically, the card might be searching the
+ entire Tx ring for slots with the DescOwn bit set and then processing
+ them. If the DescEndRing bit isn't set, then it might just wander off
+ through memory until it hits a chunk of data with that bit set
+ and then looping back.
+
+02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout
+ problem (TxCmd and RxCmd need only to be set when idle or stopped.
+
+02/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt.
+ (Michel Mueller pointed out the ``permanently busy'' potential
+ problem here).
+
+02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies.
+
+02/23/1999 EPK Verified that the interrupt status field bits for Tx were
+ incorrectly defined and corrected (as per Michel Mueller).
+
+02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
+ were available before reseting the tbusy and tx_full flags
+ (as per Michel Mueller).
+
+03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
+
+12/31/1999 KDU Cleaned up assorted things and added Don's code to force
+32 bit.
+
+02/20/2000 KDU Some of the control was just plain odd. Cleaned up the
+hamachi_start_xmit() and hamachi_interrupt() code. There is still some
+re-structuring I would like to do.
+
+03/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation
+parameters on a dual P3-450 setup yielded the new default interrupt
+mitigation parameters. Tx should interrupt VERY infrequently due to
+Eric's scheme. Rx should be more often...
+
+03/13/2000 KDU Added a patch to make the Rx Checksum code interact
+nicely with non-linux machines.
+
+03/13/2000 KDU Experimented with some of the configuration values:
+
+ -It seems that enabling PCI performance commands for descriptors
+ (changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal
+ performance impact for any of my tests. (ttcp, netpipe, netperf) I will
+ leave them that way until I hear further feedback.
+
+ -Increasing the PCI_LATENCY_TIMER to 130
+ (2 + (burst size of 128 * (0 wait states + 1))) seems to slightly
+ degrade performance. Leaving default at 64 pending further information.
+
+03/14/2000 KDU Further tuning:
+
+ -adjusted boguscnt in hamachi_rx() to depend on interrupt
+ mitigation parameters chosen.
+
+ -Selected a set of interrupt parameters based on some extensive testing.
+ These may change with more testing.
+
+TO DO:
+
+-Consider borrowing from the acenic driver code to check PCI_COMMAND for
+PCI_COMMAND_INVALIDATE. Set maximum burst size to cache line size in
+that case.
+
+-fix the reset procedure. It doesn't quite work.
+*/
+
+/* A few values that may be tweaked. */
+/* Size of each temporary Rx buffer, calculated as:
+ * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
+ * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum +
+ * 2 more because we use skb_reserve.
+ */
+#define PKT_BUF_SZ 1538
+
+/* For now, this is going to be set to the maximum size of an ethernet
+ * packet. Eventually, we may want to make it a variable that is
+ * related to the MTU
+ */
+#define MAX_FRAME_SIZE 1518
+
+/* The rest of these values should never change. */
+
+static void hamachi_timer(unsigned long data);
+
+enum capability_flags {CanHaveMII=1, };
+static struct chip_info {
+ u16 vendor_id, device_id, device_id_mask, pad;
+ const char *name;
+ void (*media_timer)(unsigned long data);
+ int flags;
+} chip_tbl[] = {
+ {0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0},
+ {0,},
+};
+
+/* Offsets to the Hamachi registers. Various sizes. */
+enum hamachi_offsets {
+ TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
+ RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
+ PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
+ LEDCtrl=0x06C, VirtualJumpers=0x06D, GPIO=0x6E,
+ TxChecksum=0x074, RxChecksum=0x076,
+ TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
+ InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
+ EventStatus=0x08C,
+ MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
+ /* See enum MII_offsets below. */
+ MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
+ AddrMode=0x0D0, StationAddr=0x0D2,
+ /* Gigabit AutoNegotiation. */
+ ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
+ ANLinkPartnerAbility=0x0EA,
+ EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
+ FIFOcfg=0x0F8,
+};
+
+/* Offsets to the MII-mode registers. */
+enum MII_offsets {
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
+ IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
+ LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
+
+/* The Hamachi Rx and Tx buffer descriptors. */
+struct hamachi_desc {
+ u32 status_n_length;
+#if ADDRLEN == 64
+ u32 pad;
+ u64 addr;
+#else
+ u32 addr;
+#endif
+};
+
+/* Bits in hamachi_desc.status_n_length */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
+ DescIntr=0x10000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+#define MII_CNT 4
+struct hamachi_private {
+ /* Descriptor rings first for alignment. Tx requires a second descriptor
+ for status. */
+ struct hamachi_desc *rx_ring;
+ struct hamachi_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ spinlock_t lock;
+ int chip_id;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int duplex_lock:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ struct mii_if_info mii_if; /* MII lib hooks/info */
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ u32 rx_int_var, tx_int_var; /* interrupt control variables */
+ u32 option; /* Hold on to a copy of the options */
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+};
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>, Eric Kasten <kasten@nscl.msu.edu>, Keith Underwood <keithu@parl.clemson.edu>");
+MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(min_rx_pkt, int, 0);
+module_param(max_rx_gap, int, 0);
+module_param(max_rx_latency, int, 0);
+module_param(min_tx_pkt, int, 0);
+module_param(max_tx_gap, int, 0);
+module_param(max_tx_latency, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(rx_params, int, NULL, 0);
+module_param_array(tx_params, int, NULL, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(force32, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)");
+MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)");
+MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts");
+MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units");
+MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units");
+MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts");
+MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units");
+MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units");
+MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency");
+MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency");
+MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex");
+MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)");
+MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)");
+
+static int read_eeprom(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int hamachi_open(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void hamachi_timer(unsigned long data);
+static void hamachi_tx_timeout(struct net_device *dev);
+static void hamachi_init_ring(struct net_device *dev);
+static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int hamachi_rx(struct net_device *dev);
+static inline int hamachi_tx(struct net_device *dev);
+static void hamachi_error(struct net_device *dev, int intr_status);
+static int hamachi_close(struct net_device *dev);
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
+static struct ethtool_ops ethtool_ops_no_mii;
+
+static int __devinit hamachi_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct hamachi_private *hmp;
+ int option, i, rx_int_var, tx_int_var, boguscnt;
+ int chip_id = ent->driver_data;
+ int irq;
+ void __iomem *ioaddr;
+ unsigned long base;
+ static int card_idx;
+ struct net_device *dev;
+ void *ring_space;
+ dma_addr_t ring_dma;
+ int ret = -ENOMEM;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ if (pci_enable_device(pdev)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ base = pci_resource_start(pdev, 0);
+#ifdef __alpha__ /* Really "64 bit addrs" */
+ base |= (pci_resource_start(pdev, 1) << 32);
+#endif
+
+ pci_set_master(pdev);
+
+ i = pci_request_regions(pdev, DRV_NAME);
+ if (i) return i;
+
+ irq = pdev->irq;
+ ioaddr = ioremap(base, 0x400);
+ if (!ioaddr)
+ goto err_out_release;
+
+ dev = alloc_etherdev(sizeof(struct hamachi_private));
+ if (!dev)
+ goto err_out_iounmap;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#ifdef TX_CHECKSUM
+ printk("check that skbcopy in ip_queue_xmit isn't happening\n");
+ dev->hard_header_len += 8; /* for cksum tag */
+#endif
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
+ : readb(ioaddr + StationAddr + i);
+
+#if ! defined(final_version)
+ if (hamachi_debug > 4)
+ for (i = 0; i < 0x10; i++)
+ printk("%2.2x%s",
+ read_eeprom(ioaddr, i), i % 16 != 15 ? " " : "\n");
+#endif
+
+ hmp = netdev_priv(dev);
+ spin_lock_init(&hmp->lock);
+
+ hmp->mii_if.dev = dev;
+ hmp->mii_if.mdio_read = mdio_read;
+ hmp->mii_if.mdio_write = mdio_write;
+ hmp->mii_if.phy_id_mask = 0x1f;
+ hmp->mii_if.reg_num_mask = 0x1f;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_cleardev;
+ hmp->tx_ring = (struct hamachi_desc *)ring_space;
+ hmp->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ hmp->rx_ring = (struct hamachi_desc *)ring_space;
+ hmp->rx_ring_dma = ring_dma;
+
+ /* Check for options being passed in */
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* If the bus size is misidentified, do the following. */
+ force32 = force32 ? force32 :
+ ((option >= 0) ? ((option & 0x00000070) >> 4) : 0 );
+ if (force32)
+ writeb(force32, ioaddr + VirtualJumpers);
+
+ /* Hmmm, do we really need to reset the chip???. */
+ writeb(0x01, ioaddr + ChipReset);
+
+ /* After a reset, the clock speed measurement of the PCI bus will not
+ * be valid for a moment. Wait for a little while until it is. If
+ * it takes more than 10ms, forget it.
+ */
+ udelay(10);
+ i = readb(ioaddr + PCIClkMeas);
+ for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){
+ udelay(10);
+ i = readb(ioaddr + PCIClkMeas);
+ }
+
+ hmp->base = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+ pci_set_drvdata(pdev, dev);
+
+ hmp->chip_id = chip_id;
+ hmp->pci_dev = pdev;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ hmp->option = option;
+ if (option & 0x200)
+ hmp->mii_if.full_duplex = 1;
+ else if (option & 0x080)
+ hmp->mii_if.full_duplex = 0;
+ hmp->default_port = option & 15;
+ if (hmp->default_port)
+ hmp->mii_if.force_media = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ hmp->mii_if.full_duplex = 1;
+
+ /* lock the duplex mode if someone specified a value */
+ if (hmp->mii_if.full_duplex || (option & 0x080))
+ hmp->duplex_lock = 1;
+
+ /* Set interrupt tuning parameters */
+ max_rx_latency = max_rx_latency & 0x00ff;
+ max_rx_gap = max_rx_gap & 0x00ff;
+ min_rx_pkt = min_rx_pkt & 0x00ff;
+ max_tx_latency = max_tx_latency & 0x00ff;
+ max_tx_gap = max_tx_gap & 0x00ff;
+ min_tx_pkt = min_tx_pkt & 0x00ff;
+
+ rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1;
+ tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1;
+ hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
+ (min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency);
+ hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
+ (min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency);
+
+
+ /* The Hamachi-specific entries in the device structure. */
+ dev->open = &hamachi_open;
+ dev->hard_start_xmit = &hamachi_start_xmit;
+ dev->stop = &hamachi_close;
+ dev->get_stats = &hamachi_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &netdev_ioctl;
+ if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ else
+ SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
+ dev->tx_timeout = &hamachi_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ if (mtu)
+ dev->mtu = mtu;
+
+ i = register_netdev(dev);
+ if (i) {
+ ret = i;
+ goto err_out_unmap_rx;
+ }
+
+ printk(KERN_INFO "%s: %s type %x at %p, ",
+ dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev),
+ ioaddr);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+ i = readb(ioaddr + PCIClkMeas);
+ printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
+ "%2.2x, LPA %4.4x.\n",
+ dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
+ i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
+ readw(ioaddr + ANLinkPartnerAbility));
+
+ if (chip_tbl[hmp->chip_id].flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0xffff &&
+ mii_status != 0x0000) {
+ hmp->phys[phy_idx++] = phy;
+ hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, hmp->mii_if.advertising);
+ }
+ }
+ hmp->mii_cnt = phy_idx;
+ if (hmp->mii_cnt > 0)
+ hmp->mii_if.phy_id = hmp->phys[0];
+ else
+ memset(&hmp->mii_if, 0, sizeof(hmp->mii_if));
+ }
+ /* Configure gigabit autonegotiation. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */
+ writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */
+
+ card_idx++;
+ return 0;
+
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
+ hmp->rx_ring_dma);
+err_out_unmap_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
+ hmp->tx_ring_dma);
+err_out_cleardev:
+ free_netdev (dev);
+err_out_iounmap:
+ iounmap(ioaddr);
+err_out_release:
+ pci_release_regions(pdev);
+err_out:
+ return ret;
+}
+
+static int __devinit read_eeprom(void __iomem *ioaddr, int location)
+{
+ int bogus_cnt = 1000;
+
+ /* We should check busy first - per docs -KDU */
+ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
+ writew(location, ioaddr + EEAddr);
+ writeb(0x02, ioaddr + EECmdStatus);
+ bogus_cnt = 1000;
+ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
+ if (hamachi_debug > 5)
+ printk(" EEPROM status is %2.2x after %d ticks.\n",
+ (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
+ return readb(ioaddr + EEData);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int i;
+
+ /* We should check busy first - per docs -KDU */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(0x0001, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return readw(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int i;
+
+ /* We should check busy first - per docs -KDU */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return;
+}
+
+
+static int hamachi_open(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int i;
+ u32 rx_int_var, tx_int_var;
+ u16 fifo_info;
+
+ i = request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ, dev->name, dev);
+ if (i)
+ return i;
+
+ if (hamachi_debug > 1)
+ printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ hamachi_init_ring(dev);
+
+#if ADDRLEN == 64
+ /* writellll anyone ? */
+ writel(cpu_to_le64(hmp->rx_ring_dma), ioaddr + RxPtr);
+ writel(cpu_to_le64(hmp->rx_ring_dma) >> 32, ioaddr + RxPtr + 4);
+ writel(cpu_to_le64(hmp->tx_ring_dma), ioaddr + TxPtr);
+ writel(cpu_to_le64(hmp->tx_ring_dma) >> 32, ioaddr + TxPtr + 4);
+#else
+ writel(cpu_to_le32(hmp->rx_ring_dma), ioaddr + RxPtr);
+ writel(cpu_to_le32(hmp->tx_ring_dma), ioaddr + TxPtr);
+#endif
+
+ /* TODO: It would make sense to organize this as words since the card
+ * documentation does. -KDU
+ */
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+
+ /* Configure the FIFO */
+ fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6;
+ switch (fifo_info){
+ case 0 :
+ /* No FIFO */
+ writew(0x0000, ioaddr + FIFOcfg);
+ break;
+ case 1 :
+ /* Configure the FIFO for 512K external, 16K used for Tx. */
+ writew(0x0028, ioaddr + FIFOcfg);
+ break;
+ case 2 :
+ /* Configure the FIFO for 1024 external, 32K used for Tx. */
+ writew(0x004C, ioaddr + FIFOcfg);
+ break;
+ case 3 :
+ /* Configure the FIFO for 2048 external, 32K used for Tx. */
+ writew(0x006C, ioaddr + FIFOcfg);
+ break;
+ default :
+ printk(KERN_WARNING "%s: Unsupported external memory config!\n",
+ dev->name);
+ /* Default to no FIFO */
+ writew(0x0000, ioaddr + FIFOcfg);
+ break;
+ }
+
+ if (dev->if_port == 0)
+ dev->if_port = hmp->default_port;
+
+
+ /* Setting the Rx mode will start the Rx process. */
+ /* If someone didn't choose a duplex, default to full-duplex */
+ if (hmp->duplex_lock != 1)
+ hmp->mii_if.full_duplex = 1;
+
+ /* always 1, takes no more time to do it */
+ writew(0x0001, ioaddr + RxChecksum);
+#ifdef TX_CHECKSUM
+ writew(0x0001, ioaddr + TxChecksum);
+#else
+ writew(0x0000, ioaddr + TxChecksum);
+#endif
+ writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
+ writew(0x215F, ioaddr + MACCnfg);
+ writew(0x000C, ioaddr + FrameGap0);
+ /* WHAT?!?!? Why isn't this documented somewhere? -KDU */
+ writew(0x1018, ioaddr + FrameGap1);
+ /* Why do we enable receives/transmits here? -KDU */
+ writew(0x0780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ writel(0x0030FFFF, ioaddr + FlowCtrl);
+ writew(MAX_FRAME_SIZE, ioaddr + MaxFrameSize); /* dev->mtu+14 ??? */
+
+ /* Enable legacy links. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ /* Initial Link LED to blinking red. */
+ writeb(0x03, ioaddr + LEDCtrl);
+
+ /* Configure interrupt mitigation. This has a great effect on
+ performance, so systems tuning should start here!. */
+
+ rx_int_var = hmp->rx_int_var;
+ tx_int_var = hmp->tx_int_var;
+
+ if (hamachi_debug > 1) {
+ printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n",
+ tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8,
+ (tx_int_var & 0x00ff0000) >> 16);
+ printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n",
+ rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8,
+ (rx_int_var & 0x00ff0000) >> 16);
+ printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var);
+ }
+
+ writel(tx_int_var, ioaddr + TxIntrCtrl);
+ writel(rx_int_var, ioaddr + RxIntrCtrl);
+
+ set_rx_mode(dev);
+
+ netif_start_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(0x80878787, ioaddr + InterruptEnable);
+ writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+
+ /* Configure and start the DMA channels. */
+ /* Burst sizes are in the low three bits: size = 4<<(val&7) */
+#if ADDRLEN == 64
+ writew(0x005D, ioaddr + RxDMACtrl); /* 128 dword bursts */
+ writew(0x005D, ioaddr + TxDMACtrl);
+#else
+ writew(0x001D, ioaddr + RxDMACtrl);
+ writew(0x001D, ioaddr + TxDMACtrl);
+#endif
+ writew(0x0001, ioaddr + RxCmd);
+
+ if (hamachi_debug > 2) {
+ printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
+ dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus));
+ }
+ /* Set the timer to check for link beat. */
+ init_timer(&hmp->timer);
+ hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+ hmp->timer.data = (unsigned long)dev;
+ hmp->timer.function = &hamachi_timer; /* timer handler */
+ add_timer(&hmp->timer);
+
+ return 0;
+}
+
+static inline int hamachi_tx(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+
+ /* Update the dirty pointer until we find an entry that is
+ still owned by the card */
+ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
+ int entry = hmp->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
+ break;
+ /* Free the original skb. */
+ skb = hmp->tx_skbuff[entry];
+ if (skb != 0) {
+ pci_unmap_single(hmp->pci_dev,
+ hmp->tx_ring[entry].addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ hmp->tx_skbuff[entry] = NULL;
+ }
+ hmp->tx_ring[entry].status_n_length = 0;
+ if (entry >= TX_RING_SIZE-1)
+ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
+ cpu_to_le32(DescEndRing);
+ hmp->stats.tx_packets++;
+ }
+
+ return 0;
+}
+
+static void hamachi_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int next_tick = 10*HZ;
+
+ if (hamachi_debug > 2) {
+ printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
+ "%4.4x.\n", dev->name, readw(ioaddr + ANStatus),
+ readw(ioaddr + ANLinkPartnerAbility));
+ printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
+ "%4.4x %4.4x %4.4x.\n", dev->name,
+ readw(ioaddr + 0x0e0),
+ readw(ioaddr + 0x0e2),
+ readw(ioaddr + 0x0e4),
+ readw(ioaddr + 0x0e6),
+ readw(ioaddr + 0x0e8),
+ readw(ioaddr + 0x0eA));
+ }
+ /* We could do something here... nah. */
+ hmp->timer.expires = RUN_AT(next_tick);
+ add_timer(&hmp->timer);
+}
+
+static void hamachi_tx_timeout(struct net_device *dev)
+{
+ int i;
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
+
+ {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)hmp->rx_ring[i].status_n_length);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", hmp->tx_ring[i].status_n_length);
+ printk("\n");
+ }
+
+ /* Reinit the hardware and make sure the Rx and Tx processes
+ are up and running.
+ */
+ dev->if_port = 0;
+ /* The right way to do Reset. -KDU
+ * -Clear OWN bit in all Rx/Tx descriptors
+ * -Wait 50 uS for channels to go idle
+ * -Turn off MAC receiver
+ * -Issue Reset
+ */
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn);
+
+ /* Presume that all packets in the Tx queue are gone if we have to
+ * re-init the hardware.
+ */
+ for (i = 0; i < TX_RING_SIZE; i++){
+ struct sk_buff *skb;
+
+ if (i >= TX_RING_SIZE - 1)
+ hmp->tx_ring[i].status_n_length = cpu_to_le32(
+ DescEndRing |
+ (hmp->tx_ring[i].status_n_length & 0x0000FFFF));
+ else
+ hmp->tx_ring[i].status_n_length &= 0x0000ffff;
+ skb = hmp->tx_skbuff[i];
+ if (skb){
+ pci_unmap_single(hmp->pci_dev, hmp->tx_ring[i].addr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ hmp->tx_skbuff[i] = NULL;
+ }
+ }
+
+ udelay(60); /* Sleep 60 us just for safety sake */
+ writew(0x0002, ioaddr + RxCmd); /* STOP Rx */
+
+ writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */
+
+ hmp->tx_full = 0;
+ hmp->cur_rx = hmp->cur_tx = 0;
+ hmp->dirty_rx = hmp->dirty_tx = 0;
+ /* Rx packets are also presumed lost; however, we need to make sure a
+ * ring of buffers is in tact. -KDU
+ */
+ for (i = 0; i < RX_RING_SIZE; i++){
+ struct sk_buff *skb = hmp->rx_skbuff[i];
+
+ if (skb){
+ pci_unmap_single(hmp->pci_dev, hmp->rx_ring[i].addr,
+ hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ hmp->rx_skbuff[i] = NULL;
+ }
+ }
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ hmp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
+ }
+ hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as wrapping the ring. */
+ hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
+
+ /* Trigger an immediate transmit demand. */
+ dev->trans_start = jiffies;
+ hmp->stats.tx_errors++;
+
+ /* Restart the chip's Tx/Rx processes . */
+ writew(0x0002, ioaddr + TxCmd); /* STOP Tx */
+ writew(0x0001, ioaddr + TxCmd); /* START Tx */
+ writew(0x0001, ioaddr + RxCmd); /* START Rx */
+
+ netif_wake_queue(dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void hamachi_init_ring(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ int i;
+
+ hmp->tx_full = 0;
+ hmp->cur_rx = hmp->cur_tx = 0;
+ hmp->dirty_rx = hmp->dirty_tx = 0;
+
+#if 0
+ /* This is wrong. I'm not sure what the original plan was, but this
+ * is wrong. An MTU of 1 gets you a buffer of 1536, while an MTU
+ * of 1501 gets a buffer of 1533? -KDU
+ */
+ hmp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+#endif
+ /* My attempt at a reasonable correction */
+ /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ * card needs room to do 8 byte alignment, +2 so we can reserve
+ * the first 2 bytes, and +16 gets room for the status word from the
+ * card. -KDU
+ */
+ hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
+ (((dev->mtu+26+7) & ~7) + 2 + 16));
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ hmp->rx_ring[i].status_n_length = 0;
+ hmp->rx_skbuff[i] = NULL;
+ }
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ hmp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ /* -2 because it doesn't REALLY have that first 2 bytes -KDU */
+ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
+ }
+ hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ hmp->tx_skbuff[i] = NULL;
+ hmp->tx_ring[i].status_n_length = 0;
+ }
+ /* Mark the last entry of the ring */
+ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
+
+ return;
+}
+
+
+#ifdef TX_CHECKSUM
+#define csum_add(it, val) \
+do { \
+ it += (u16) (val); \
+ if (it & 0xffff0000) { \
+ it &= 0xffff; \
+ ++it; \
+ } \
+} while (0)
+ /* printk("add %04x --> %04x\n", val, it); \ */
+
+/* uh->len already network format, do not swap */
+#define pseudo_csum_udp(sum,ih,uh) do { \
+ sum = 0; \
+ csum_add(sum, (ih)->saddr >> 16); \
+ csum_add(sum, (ih)->saddr & 0xffff); \
+ csum_add(sum, (ih)->daddr >> 16); \
+ csum_add(sum, (ih)->daddr & 0xffff); \
+ csum_add(sum, __constant_htons(IPPROTO_UDP)); \
+ csum_add(sum, (uh)->len); \
+} while (0)
+
+/* swap len */
+#define pseudo_csum_tcp(sum,ih,len) do { \
+ sum = 0; \
+ csum_add(sum, (ih)->saddr >> 16); \
+ csum_add(sum, (ih)->saddr & 0xffff); \
+ csum_add(sum, (ih)->daddr >> 16); \
+ csum_add(sum, (ih)->daddr & 0xffff); \
+ csum_add(sum, __constant_htons(IPPROTO_TCP)); \
+ csum_add(sum, htons(len)); \
+} while (0)
+#endif
+
+static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ unsigned entry;
+ u16 status;
+
+ /* Ok, now make sure that the queue has space before trying to
+ add another skbuff. if we return non-zero the scheduler
+ should interpret this as a queue full and requeue the buffer
+ for later.
+ */
+ if (hmp->tx_full) {
+ /* We should NEVER reach this point -KDU */
+ printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx);
+
+ /* Wake the potentially-idle transmit channel. */
+ /* If we don't need to read status, DON'T -KDU */
+ status=readw(hmp->base + TxStatus);
+ if( !(status & 0x0001) || (status & 0x0002))
+ writew(0x0001, hmp->base + TxCmd);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = hmp->cur_tx % TX_RING_SIZE;
+
+ hmp->tx_skbuff[entry] = skb;
+
+#ifdef TX_CHECKSUM
+ {
+ /* tack on checksum tag */
+ u32 tagval = 0;
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+ if (eh->h_proto == __constant_htons(ETH_P_IP)) {
+ struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN);
+ if (ih->protocol == IPPROTO_UDP) {
+ struct udphdr *uh
+ = (struct udphdr *)((char *)ih + ih->ihl*4);
+ u32 offset = ((unsigned char *)uh + 6) - skb->data;
+ u32 pseudo;
+ pseudo_csum_udp(pseudo, ih, uh);
+ pseudo = htons(pseudo);
+ printk("udp cksum was %04x, sending pseudo %04x\n",
+ uh->check, pseudo);
+ uh->check = 0; /* zero out uh->check before card calc */
+ /*
+ * start at 14 (skip ethhdr), store at offset (uh->check),
+ * use pseudo value given.
+ */
+ tagval = (14 << 24) | (offset << 16) | pseudo;
+ } else if (ih->protocol == IPPROTO_TCP) {
+ printk("tcp, no auto cksum\n");
+ }
+ }
+ *(u32 *)skb_push(skb, 8) = tagval;
+ }
+#endif
+
+ hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->data, skb->len, PCI_DMA_TODEVICE));
+
+ /* Hmmmm, could probably put a DescIntr on these, but the way
+ the driver is currently coded makes Tx interrupts unnecessary
+ since the clearing of the Tx ring is handled by the start_xmit
+ routine. This organization helps mitigate the interrupts a
+ bit and probably renders the max_tx_latency param useless.
+
+ Update: Putting a DescIntr bit on all of the descriptors and
+ mitigating interrupt frequency with the tx_min_pkt parameter. -KDU
+ */
+ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
+ hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescEndRing | DescIntr | skb->len);
+ else
+ hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr | skb->len);
+ hmp->cur_tx++;
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ /* If we don't need to read status, DON'T -KDU */
+ status=readw(hmp->base + TxStatus);
+ if( !(status & 0x0001) || (status & 0x0002))
+ writew(0x0001, hmp->base + TxCmd);
+
+ /* Immediately before returning, let's clear as many entries as we can. */
+ hamachi_tx(dev);
+
+ /* We should kick the bottom half here, since we are not accepting
+ * interrupts with every packet. i.e. realize that Gigabit ethernet
+ * can transmit faster than ordinary machines can load packets;
+ * hence, any packet that got put off because we were in the transmit
+ * routine should IMMEDIATELY get a chance to be re-queued. -KDU
+ */
+ if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
+ netif_wake_queue(dev); /* Typical path */
+ else {
+ hmp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+ dev->trans_start = jiffies;
+
+ if (hamachi_debug > 4) {
+ printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
+ dev->name, hmp->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = dev_instance;
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ long boguscnt = max_interrupt_work;
+ int handled = 0;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+#endif
+
+ spin_lock(&hmp->lock);
+
+ do {
+ u32 intr_status = readl(ioaddr + InterruptClear);
+
+ if (hamachi_debug > 4)
+ printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ handled = 1;
+
+ if (intr_status & IntrRxDone)
+ hamachi_rx(dev);
+
+ if (intr_status & IntrTxDone){
+ /* This code should RARELY need to execute. After all, this is
+ * a gigabit link, it should consume packets as fast as we put
+ * them in AND we clear the Tx ring in hamachi_start_xmit().
+ */
+ if (hmp->tx_full){
+ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){
+ int entry = hmp->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
+ break;
+ skb = hmp->tx_skbuff[entry];
+ /* Free the original skb. */
+ if (skb){
+ pci_unmap_single(hmp->pci_dev,
+ hmp->tx_ring[entry].addr,
+ skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ hmp->tx_skbuff[entry] = NULL;
+ }
+ hmp->tx_ring[entry].status_n_length = 0;
+ if (entry >= TX_RING_SIZE-1)
+ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
+ cpu_to_le32(DescEndRing);
+ hmp->stats.tx_packets++;
+ }
+ if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
+ /* The ring is no longer full */
+ hmp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ } else {
+ netif_wake_queue(dev);
+ }
+ }
+
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status &
+ (IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
+ LinkChange | NegotiationChange | StatsMax))
+ hamachi_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (hamachi_debug > 3)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, readl(ioaddr + IntrStatus));
+
+#ifndef final_version
+ /* Code that should never be run! Perhaps remove after testing.. */
+ {
+ static int stopit = 10;
+ if (dev->start == 0 && --stopit < 0) {
+ printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
+ dev->name);
+ free_irq(irq, dev);
+ }
+ }
+#endif
+
+ spin_unlock(&hmp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int hamachi_rx(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ int entry = hmp->cur_rx % RX_RING_SIZE;
+ int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx;
+
+ if (hamachi_debug > 4) {
+ printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
+ entry, hmp->rx_ring[entry].status_n_length);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct hamachi_desc *desc = &(hmp->rx_ring[entry]);
+ u32 desc_status = le32_to_cpu(desc->status_n_length);
+ u16 data_size = desc_status; /* Implicit truncate */
+ u8 *buf_addr;
+ s32 frame_status;
+
+ if (desc_status & DescOwn)
+ break;
+ pci_dma_sync_single_for_cpu(hmp->pci_dev,
+ desc->addr,
+ hmp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ buf_addr = (u8 *) hmp->rx_skbuff[entry]->tail;
+ frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
+ if (hamachi_debug > 4)
+ printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & DescEndPacket)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, hmp->cur_rx, data_size, desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+ dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n",
+ dev->name,
+ hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length & 0xffff0000,
+ hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length & 0x0000ffff,
+ hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length);
+ hmp->stats.rx_length_errors++;
+ } /* else Omit for prototype errata??? */
+ if (frame_status & 0x00380000) {
+ /* There was an error. */
+ if (hamachi_debug > 2)
+ printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
+ frame_status);
+ hmp->stats.rx_errors++;
+ if (frame_status & 0x00600000) hmp->stats.rx_length_errors++;
+ if (frame_status & 0x00080000) hmp->stats.rx_frame_errors++;
+ if (frame_status & 0x00100000) hmp->stats.rx_crc_errors++;
+ if (frame_status < 0) hmp->stats.rx_dropped++;
+ } else {
+ struct sk_buff *skb;
+ /* Omit CRC */
+ u16 pkt_len = (frame_status & 0x07ff) - 4;
+#ifdef RX_CHECKSUM
+ u32 pfck = *(u32 *) &buf_addr[data_size - 8];
+#endif
+
+
+#ifndef final_version
+ if (hamachi_debug > 4)
+ printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+ if (hamachi_debug > 5)
+ printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name,
+ *(s32*)&(buf_addr[data_size - 20]),
+ *(s32*)&(buf_addr[data_size - 16]),
+ *(s32*)&(buf_addr[data_size - 12]),
+ *(s32*)&(buf_addr[data_size - 8]),
+ *(s32*)&(buf_addr[data_size - 4]));
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+#ifdef RX_CHECKSUM
+ printk(KERN_ERR "%s: rx_copybreak non-zero "
+ "not good with RX_CHECKSUM\n", dev->name);
+#endif
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(hmp->pci_dev,
+ hmp->rx_ring[entry].addr,
+ hmp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ /* Call copy + cksum if available. */
+#if 1 || USE_IP_COPYSUM
+ eth_copy_and_sum(skb,
+ hmp->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
+ + entry*sizeof(*desc), pkt_len);
+#endif
+ pci_dma_sync_single_for_device(hmp->pci_dev,
+ hmp->rx_ring[entry].addr,
+ hmp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(hmp->pci_dev,
+ hmp->rx_ring[entry].addr,
+ hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
+ hmp->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+
+
+#ifdef RX_CHECKSUM
+ /* TCP or UDP on ipv4, DIX encoding */
+ if (pfck>>24 == 0x91 || pfck>>24 == 0x51) {
+ struct iphdr *ih = (struct iphdr *) skb->data;
+ /* Check that IP packet is at least 46 bytes, otherwise,
+ * there may be pad bytes included in the hardware checksum.
+ * This wouldn't happen if everyone padded with 0.
+ */
+ if (ntohs(ih->tot_len) >= 46){
+ /* don't worry about frags */
+ if (!(ih->frag_off & __constant_htons(IP_MF|IP_OFFSET))) {
+ u32 inv = *(u32 *) &buf_addr[data_size - 16];
+ u32 *p = (u32 *) &buf_addr[data_size - 20];
+ register u32 crc, p_r, p_r1;
+
+ if (inv & 4) {
+ inv &= ~4;
+ --p;
+ }
+ p_r = *p;
+ p_r1 = *(p-1);
+ switch (inv) {
+ case 0:
+ crc = (p_r & 0xffff) + (p_r >> 16);
+ break;
+ case 1:
+ crc = (p_r >> 16) + (p_r & 0xffff)
+ + (p_r1 >> 16 & 0xff00);
+ break;
+ case 2:
+ crc = p_r + (p_r1 >> 16);
+ break;
+ case 3:
+ crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16);
+ break;
+ default: /*NOTREACHED*/ crc = 0;
+ }
+ if (crc & 0xffff0000) {
+ crc &= 0xffff;
+ ++crc;
+ }
+ /* tcp/udp will add in pseudo */
+ skb->csum = ntohs(pfck & 0xffff);
+ if (skb->csum > crc)
+ skb->csum -= crc;
+ else
+ skb->csum += (~crc & 0xffff);
+ /*
+ * could do the pseudo myself and return
+ * CHECKSUM_UNNECESSARY
+ */
+ skb->ip_summed = CHECKSUM_HW;
+ }
+ }
+ }
+#endif /* RX_CHECKSUM */
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ hmp->stats.rx_packets++;
+ }
+ entry = (++hmp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
+ struct hamachi_desc *desc;
+
+ entry = hmp->dirty_rx % RX_RING_SIZE;
+ desc = &(hmp->rx_ring[entry]);
+ if (hmp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+
+ hmp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ }
+ desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
+ if (entry >= RX_RING_SIZE-1)
+ desc->status_n_length |= cpu_to_le32(DescOwn |
+ DescEndPacket | DescEndRing | DescIntr);
+ else
+ desc->status_n_length |= cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr);
+ }
+
+ /* Restart Rx engine if stopped. */
+ /* If we don't need to check status, don't. -KDU */
+ if (readw(hmp->base + RxStatus) & 0x0002)
+ writew(0x0001, hmp->base + RxCmd);
+
+ return 0;
+}
+
+/* This is more properly named "uncommon interrupt events", as it covers more
+ than just errors. */
+static void hamachi_error(struct net_device *dev, int intr_status)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ if (intr_status & (LinkChange|NegotiationChange)) {
+ if (hamachi_debug > 1)
+ printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
+ " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
+ dev->name, readw(ioaddr + 0x0E0), readw(ioaddr + 0x0E2),
+ readw(ioaddr + ANLinkPartnerAbility),
+ readl(ioaddr + IntrStatus));
+ if (readw(ioaddr + ANStatus) & 0x20)
+ writeb(0x01, ioaddr + LEDCtrl);
+ else
+ writeb(0x03, ioaddr + LEDCtrl);
+ }
+ if (intr_status & StatsMax) {
+ hamachi_get_stats(dev);
+ /* Read the overflow bits to clear. */
+ readl(ioaddr + 0x370);
+ readl(ioaddr + 0x3F0);
+ }
+ if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone))
+ && hamachi_debug)
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ hmp->stats.tx_fifo_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ hmp->stats.rx_fifo_errors++;
+}
+
+static int hamachi_close(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue(dev);
+
+ if (hamachi_debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
+ dev->name, readw(ioaddr + TxStatus),
+ readw(ioaddr + RxStatus), readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + InterruptEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(2, ioaddr + RxCmd);
+ writew(2, ioaddr + TxCmd);
+
+#ifdef __i386__
+ if (hamachi_debug > 2) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)hmp->tx_ring_dma);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %c #%d desc. %8.8x %8.8x.\n",
+ readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
+ i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)hmp->rx_ring_dma);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n",
+ readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
+ i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
+ if (hamachi_debug > 6) {
+ if (*(u8*)hmp->rx_skbuff[i]->tail != 0x69) {
+ u16 *addr = (u16 *)
+ hmp->rx_skbuff[i]->tail;
+ int j;
+
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x", addr[j]);
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ del_timer_sync(&hmp->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = hmp->rx_skbuff[i];
+ hmp->rx_ring[i].status_n_length = 0;
+ hmp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+ pci_unmap_single(hmp->pci_dev,
+ hmp->rx_ring[i].addr, hmp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ hmp->rx_skbuff[i] = NULL;
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = hmp->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(hmp->pci_dev,
+ hmp->tx_ring[i].addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ hmp->tx_skbuff[i] = NULL;
+ }
+ }
+
+ writeb(0x00, ioaddr + LEDCtrl);
+
+ return 0;
+}
+
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ /* We should lock this segment of code for SMP eventually, although
+ the vulnerability window is very small and statistics are
+ non-critical. */
+ /* Ok, what goes here? This appears to be stuck at 21 packets
+ according to ifconfig. It does get incremented in hamachi_tx(),
+ so I think I'll comment it out here and see if better things
+ happen.
+ */
+ /* hmp->stats.tx_packets = readl(ioaddr + 0x000); */
+
+ hmp->stats.rx_bytes = readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */
+ hmp->stats.tx_bytes = readl(ioaddr + 0x3B0); /* Total Uni+Brd+Multi */
+ hmp->stats.multicast = readl(ioaddr + 0x320); /* Multicast Rx */
+
+ hmp->stats.rx_length_errors = readl(ioaddr + 0x368); /* Over+Undersized */
+ hmp->stats.rx_over_errors = readl(ioaddr + 0x35C); /* Jabber */
+ hmp->stats.rx_crc_errors = readl(ioaddr + 0x360); /* Jabber */
+ hmp->stats.rx_frame_errors = readl(ioaddr + 0x364); /* Symbol Errs */
+ hmp->stats.rx_missed_errors = readl(ioaddr + 0x36C); /* Dropped */
+
+ return &hmp->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ writew(0x000F, ioaddr + AddrMode);
+ } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ writew(0x000B, ioaddr + AddrMode);
+ } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ struct dev_mc_list *mclist;
+ int i;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
+ ioaddr + 0x104 + i*8);
+ }
+ /* Clear remaining entries. */
+ for (; i < 64; i++)
+ writel(0, ioaddr + 0x104 + i*8);
+ writew(0x0003, ioaddr + AddrMode);
+ } else { /* Normal, unicast/broadcast-only mode. */
+ writew(0x0001, ioaddr + AddrMode);
+ }
+}
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_sset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static int hamachi_nway_reset(struct net_device *dev)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 hamachi_get_link(struct net_device *dev)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = hamachi_get_drvinfo,
+ .get_settings = hamachi_get_settings,
+ .set_settings = hamachi_set_settings,
+ .nway_reset = hamachi_nway_reset,
+ .get_link = hamachi_get_link,
+};
+
+static struct ethtool_ops ethtool_ops_no_mii = {
+ .begin = check_if_running,
+ .get_drvinfo = hamachi_get_drvinfo,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */
+ u32 *d = (u32 *)&rq->ifr_ifru;
+ /* Should add this check here or an ordinary user can do nasty
+ * things. -KDU
+ *
+ * TODO: Shut down the Rx and Tx engines while doing this.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ writel(d[0], np->base + TxIntrCtrl);
+ writel(d[1], np->base + RxIntrCtrl);
+ printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
+ (u32) readl(np->base + TxIntrCtrl),
+ (u32) readl(np->base + RxIntrCtrl));
+ rc = 0;
+ }
+
+ else {
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+ }
+
+ return rc;
+}
+
+
+static void __devexit hamachi_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct hamachi_private *hmp = netdev_priv(dev);
+
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
+ hmp->rx_ring_dma);
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
+ hmp->tx_ring_dma);
+ unregister_netdev(dev);
+ iounmap(hmp->base);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static struct pci_device_id hamachi_pci_tbl[] = {
+ { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hamachi_pci_tbl);
+
+static struct pci_driver hamachi_driver = {
+ .name = DRV_NAME,
+ .id_table = hamachi_pci_tbl,
+ .probe = hamachi_init_one,
+ .remove = __devexit_p(hamachi_remove_one),
+};
+
+static int __init hamachi_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_register_driver(&hamachi_driver);
+}
+
+static void __exit hamachi_exit (void)
+{
+ pci_unregister_driver(&hamachi_driver);
+}
+
+
+module_init(hamachi_init);
+module_exit(hamachi_exit);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
new file mode 100644
index 000000000000..067b353e1cbd
--- /dev/null
+++ b/drivers/net/hamradio/6pack.c
@@ -0,0 +1,1051 @@
+/*
+ * 6pack.c This module implements the 6pack protocol for kernel-based
+ * devices like TTY. It interfaces between a raw TTY and the
+ * kernel's AX.25 protocol layers.
+ *
+ * Authors: Andreas Könsgen <ajk@iehk.rwth-aachen.de>
+ * Ralf Baechle DL5RB <ralf@linux-mips.org>
+ *
+ * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by
+ *
+ * Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <net/ax25.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/spinlock.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <asm/semaphore.h>
+#include <asm/atomic.h>
+
+#define SIXPACK_VERSION "Revision: 0.3.0"
+
+/* sixpack priority commands */
+#define SIXP_SEOF 0x40 /* start and end of a 6pack frame */
+#define SIXP_TX_URUN 0x48 /* transmit overrun */
+#define SIXP_RX_ORUN 0x50 /* receive overrun */
+#define SIXP_RX_BUF_OVL 0x58 /* receive buffer overflow */
+
+#define SIXP_CHKSUM 0xFF /* valid checksum of a 6pack frame */
+
+/* masks to get certain bits out of the status bytes sent by the TNC */
+
+#define SIXP_CMD_MASK 0xC0
+#define SIXP_CHN_MASK 0x07
+#define SIXP_PRIO_CMD_MASK 0x80
+#define SIXP_STD_CMD_MASK 0x40
+#define SIXP_PRIO_DATA_MASK 0x38
+#define SIXP_TX_MASK 0x20
+#define SIXP_RX_MASK 0x10
+#define SIXP_RX_DCD_MASK 0x18
+#define SIXP_LEDS_ON 0x78
+#define SIXP_LEDS_OFF 0x60
+#define SIXP_CON 0x08
+#define SIXP_STA 0x10
+
+#define SIXP_FOUND_TNC 0xe9
+#define SIXP_CON_ON 0x68
+#define SIXP_DCD_MASK 0x08
+#define SIXP_DAMA_OFF 0
+
+/* default level 2 parameters */
+#define SIXP_TXDELAY (HZ/4) /* in 1 s */
+#define SIXP_PERSIST 50 /* in 256ths */
+#define SIXP_SLOTTIME (HZ/10) /* in 1 s */
+#define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */
+#define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */
+
+/* 6pack configuration. */
+#define SIXP_NRUNIT 31 /* MAX number of 6pack channels */
+#define SIXP_MTU 256 /* Default MTU */
+
+enum sixpack_flags {
+ SIXPF_ERROR, /* Parity, etc. error */
+};
+
+struct sixpack {
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char *rbuff; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char *xbuff; /* transmitter buffer */
+ unsigned char *xhead; /* next byte to XMIT */
+ int xleft; /* bytes left in XMIT queue */
+
+ unsigned char raw_buf[4];
+ unsigned char cooked_buf[400];
+
+ unsigned int rx_count;
+ unsigned int rx_count_cooked;
+
+ /* 6pack interface statistics. */
+ struct net_device_stats stats;
+
+ int mtu; /* Our mtu (to spot changes!) */
+ int buffsize; /* Max buffers sizes */
+
+ unsigned long flags; /* Flag values/ mode etc */
+ unsigned char mode; /* 6pack mode */
+
+ /* 6pack stuff */
+ unsigned char tx_delay;
+ unsigned char persistence;
+ unsigned char slottime;
+ unsigned char duplex;
+ unsigned char led_state;
+ unsigned char status;
+ unsigned char status1;
+ unsigned char status2;
+ unsigned char tx_enable;
+ unsigned char tnc_state;
+
+ struct timer_list tx_t;
+ struct timer_list resync_t;
+ atomic_t refcnt;
+ struct semaphore dead_sem;
+ spinlock_t lock;
+};
+
+#define AX25_6PACK_HEADER_LEN 0
+
+static void sp_start_tx_timer(struct sixpack *);
+static void sixpack_decode(struct sixpack *, unsigned char[], int);
+static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
+
+/*
+ * perform the persistence/slottime algorithm for CSMA access. If the
+ * persistence check was successful, write the data to the serial driver.
+ * Note that in case of DAMA operation, the data is not sent here.
+ */
+
+static void sp_xmit_on_air(unsigned long channel)
+{
+ struct sixpack *sp = (struct sixpack *) channel;
+ int actual;
+ static unsigned char random;
+
+ random = random * 17 + 41;
+
+ if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) {
+ sp->led_state = 0x70;
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ sp->tx_enable = 1;
+ actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2);
+ sp->xleft -= actual;
+ sp->xhead += actual;
+ sp->led_state = 0x60;
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ sp->status2 = 0;
+ } else
+ sp_start_tx_timer(sp);
+}
+
+/* ----> 6pack timer interrupt handler and friends. <---- */
+static void sp_start_tx_timer(struct sixpack *sp)
+{
+ int when = sp->slottime;
+
+ del_timer(&sp->tx_t);
+ sp->tx_t.data = (unsigned long) sp;
+ sp->tx_t.function = sp_xmit_on_air;
+ sp->tx_t.expires = jiffies + ((when + 1) * HZ) / 100;
+ add_timer(&sp->tx_t);
+}
+
+/* Encapsulate one AX.25 frame and stuff into a TTY queue. */
+static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
+{
+ unsigned char *msg, *p = icp;
+ int actual, count;
+
+ if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */
+ msg = "oversized transmit packet!";
+ goto out_drop;
+ }
+
+ if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */
+ msg = "oversized transmit packet!";
+ goto out_drop;
+ }
+
+ if (p[0] > 5) {
+ msg = "invalid KISS command";
+ goto out_drop;
+ }
+
+ if ((p[0] != 0) && (len > 2)) {
+ msg = "KISS control packet too long";
+ goto out_drop;
+ }
+
+ if ((p[0] == 0) && (len < 15)) {
+ msg = "bad AX.25 packet to transmit";
+ goto out_drop;
+ }
+
+ count = encode_sixpack(p, sp->xbuff, len, sp->tx_delay);
+ set_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags);
+
+ switch (p[0]) {
+ case 1: sp->tx_delay = p[1];
+ return;
+ case 2: sp->persistence = p[1];
+ return;
+ case 3: sp->slottime = p[1];
+ return;
+ case 4: /* ignored */
+ return;
+ case 5: sp->duplex = p[1];
+ return;
+ }
+
+ if (p[0] != 0)
+ return;
+
+ /*
+ * In case of fullduplex or DAMA operation, we don't take care about the
+ * state of the DCD or of any timers, as the determination of the
+ * correct time to send is the job of the AX.25 layer. We send
+ * immediately after data has arrived.
+ */
+ if (sp->duplex == 1) {
+ sp->led_state = 0x70;
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ sp->tx_enable = 1;
+ actual = sp->tty->driver->write(sp->tty, sp->xbuff, count);
+ sp->xleft = count - actual;
+ sp->xhead = sp->xbuff + actual;
+ sp->led_state = 0x60;
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ } else {
+ sp->xleft = count;
+ sp->xhead = sp->xbuff;
+ sp->status2 = count;
+ if (sp->duplex == 0)
+ sp_start_tx_timer(sp);
+ }
+
+ return;
+
+out_drop:
+ sp->stats.tx_dropped++;
+ netif_start_queue(sp->dev);
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg);
+}
+
+/* Encapsulate an IP datagram and kick it into a TTY queue. */
+
+static int sp_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sixpack *sp = netdev_priv(dev);
+
+ spin_lock_bh(&sp->lock);
+ /* We were not busy, so we are now... :-) */
+ netif_stop_queue(dev);
+ sp->stats.tx_bytes += skb->len;
+ sp_encaps(sp, skb->data, skb->len);
+ spin_unlock_bh(&sp->lock);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sp_open_dev(struct net_device *dev)
+{
+ struct sixpack *sp = netdev_priv(dev);
+
+ if (sp->tty == NULL)
+ return -ENODEV;
+ return 0;
+}
+
+/* Close the low-level part of the 6pack channel. */
+static int sp_close(struct net_device *dev)
+{
+ struct sixpack *sp = netdev_priv(dev);
+
+ spin_lock_bh(&sp->lock);
+ if (sp->tty) {
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags);
+ }
+ netif_stop_queue(dev);
+ spin_unlock_bh(&sp->lock);
+
+ return 0;
+}
+
+/* Return the frame type ID */
+static int sp_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+#ifdef CONFIG_INET
+ if (type != htons(ETH_P_AX25))
+ return ax25_encapsulate(skb, dev, type, daddr, saddr, len);
+#endif
+ return 0;
+}
+
+static struct net_device_stats *sp_get_stats(struct net_device *dev)
+{
+ struct sixpack *sp = netdev_priv(dev);
+ return &sp->stats;
+}
+
+static int sp_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr_ax25 *sa = addr;
+
+ if (sa->sax25_family != AF_AX25)
+ return -EINVAL;
+
+ if (!sa->sax25_ndigis)
+ return -EINVAL;
+
+ spin_lock_irq(&dev->xmit_lock);
+ memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ spin_unlock_irq(&dev->xmit_lock);
+
+ return 0;
+}
+
+static int sp_rebuild_header(struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ return ax25_rebuild_header(skb);
+#else
+ return 0;
+#endif
+}
+
+static void sp_setup(struct net_device *dev)
+{
+ static char ax25_bcast[AX25_ADDR_LEN] =
+ {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
+ static char ax25_test[AX25_ADDR_LEN] =
+ {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
+
+ /* Finish setting up the DEVICE info. */
+ dev->mtu = SIXP_MTU;
+ dev->hard_start_xmit = sp_xmit;
+ dev->open = sp_open_dev;
+ dev->destructor = free_netdev;
+ dev->stop = sp_close;
+ dev->hard_header = sp_header;
+ dev->get_stats = sp_get_stats;
+ dev->set_mac_address = sp_set_mac_address;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN;
+ dev->addr_len = AX25_ADDR_LEN;
+ dev->type = ARPHRD_AX25;
+ dev->tx_queue_len = 10;
+ dev->rebuild_header = sp_rebuild_header;
+ dev->tx_timeout = NULL;
+
+ /* Only activated in AX.25 mode */
+ memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
+ memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
+
+ SET_MODULE_OWNER(dev);
+
+ dev->flags = 0;
+}
+
+/* Send one completely decapsulated IP datagram to the IP layer. */
+
+/*
+ * This is the routine that sends the received data to the kernel AX.25.
+ * 'cmd' is the KISS command. For AX.25 data, it is zero.
+ */
+
+static void sp_bump(struct sixpack *sp, char cmd)
+{
+ struct sk_buff *skb;
+ int count;
+ unsigned char *ptr;
+
+ count = sp->rcount + 1;
+
+ sp->stats.rx_bytes += count;
+
+ if ((skb = dev_alloc_skb(count)) == NULL)
+ goto out_mem;
+
+ skb->dev = sp->dev;
+ ptr = skb_put(skb, count);
+ *ptr++ = cmd; /* KISS command */
+
+ memcpy(ptr, sp->cooked_buf + 1, count);
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_AX25);
+ netif_rx(skb);
+ sp->dev->last_rx = jiffies;
+ sp->stats.rx_packets++;
+
+ return;
+
+out_mem:
+ sp->stats.rx_dropped++;
+}
+
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * We have a potential race on dereferencing tty->disc_data, because the tty
+ * layer provides no locking at all - thus one cpu could be running
+ * sixpack_receive_buf while another calls sixpack_close, which zeroes
+ * tty->disc_data and frees the memory that sixpack_receive_buf is using. The
+ * best way to fix this is to use a rwlock in the tty struct, but for now we
+ * use a single global rwlock for all ttys in ppp line discipline.
+ */
+static DEFINE_RWLOCK(disc_data_lock);
+
+static struct sixpack *sp_get(struct tty_struct *tty)
+{
+ struct sixpack *sp;
+
+ read_lock(&disc_data_lock);
+ sp = tty->disc_data;
+ if (sp)
+ atomic_inc(&sp->refcnt);
+ read_unlock(&disc_data_lock);
+
+ return sp;
+}
+
+static void sp_put(struct sixpack *sp)
+{
+ if (atomic_dec_and_test(&sp->refcnt))
+ up(&sp->dead_sem);
+}
+
+/*
+ * Called by the TTY driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+static void sixpack_write_wakeup(struct tty_struct *tty)
+{
+ struct sixpack *sp = sp_get(tty);
+ int actual;
+
+ if (!sp)
+ return;
+ if (sp->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sp->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ sp->tx_enable = 0;
+ netif_wake_queue(sp->dev);
+ goto out;
+ }
+
+ if (sp->tx_enable) {
+ actual = tty->driver->write(tty, sp->xhead, sp->xleft);
+ sp->xleft -= actual;
+ sp->xhead += actual;
+ }
+
+out:
+ sp_put(sp);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int sixpack_receive_room(struct tty_struct *tty)
+{
+ return 65536; /* We can handle an infinite amount of data. :-) */
+}
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of 6pack data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing.
+ */
+static void sixpack_receive_buf(struct tty_struct *tty,
+ const unsigned char *cp, char *fp, int count)
+{
+ struct sixpack *sp;
+ unsigned char buf[512];
+ int count1;
+
+ if (!count)
+ return;
+
+ sp = sp_get(tty);
+ if (!sp)
+ return;
+
+ memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
+
+ /* Read the characters out of the buffer */
+
+ count1 = count;
+ while (count) {
+ count--;
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SIXPF_ERROR, &sp->flags))
+ sp->stats.rx_errors++;
+ continue;
+ }
+ }
+ sixpack_decode(sp, buf, count1);
+
+ sp_put(sp);
+ if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
+ && tty->driver->unthrottle)
+ tty->driver->unthrottle(tty);
+}
+
+/*
+ * Try to resync the TNC. Called by the resync timer defined in
+ * decode_prio_command
+ */
+
+#define TNC_UNINITIALIZED 0
+#define TNC_UNSYNC_STARTUP 1
+#define TNC_UNSYNCED 2
+#define TNC_IN_SYNC 3
+
+static void __tnc_set_sync_state(struct sixpack *sp, int new_tnc_state)
+{
+ char *msg;
+
+ switch (new_tnc_state) {
+ default: /* gcc oh piece-o-crap ... */
+ case TNC_UNSYNC_STARTUP:
+ msg = "Synchronizing with TNC";
+ break;
+ case TNC_UNSYNCED:
+ msg = "Lost synchronization with TNC\n";
+ break;
+ case TNC_IN_SYNC:
+ msg = "Found TNC";
+ break;
+ }
+
+ sp->tnc_state = new_tnc_state;
+ printk(KERN_INFO "%s: %s\n", sp->dev->name, msg);
+}
+
+static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state)
+{
+ int old_tnc_state = sp->tnc_state;
+
+ if (old_tnc_state != new_tnc_state)
+ __tnc_set_sync_state(sp, new_tnc_state);
+}
+
+static void resync_tnc(unsigned long channel)
+{
+ struct sixpack *sp = (struct sixpack *) channel;
+ static char resync_cmd = 0xe8;
+
+ /* clear any data that might have been received */
+
+ sp->rx_count = 0;
+ sp->rx_count_cooked = 0;
+
+ /* reset state machine */
+
+ sp->status = 1;
+ sp->status1 = 1;
+ sp->status2 = 0;
+
+ /* resync the TNC */
+
+ sp->led_state = 0x60;
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ sp->tty->driver->write(sp->tty, &resync_cmd, 1);
+
+
+ /* Start resync timer again -- the TNC might be still absent */
+
+ del_timer(&sp->resync_t);
+ sp->resync_t.data = (unsigned long) sp;
+ sp->resync_t.function = resync_tnc;
+ sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
+ add_timer(&sp->resync_t);
+}
+
+static inline int tnc_init(struct sixpack *sp)
+{
+ unsigned char inbyte = 0xe8;
+
+ tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP);
+
+ sp->tty->driver->write(sp->tty, &inbyte, 1);
+
+ del_timer(&sp->resync_t);
+ sp->resync_t.data = (unsigned long) sp;
+ sp->resync_t.function = resync_tnc;
+ sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
+ add_timer(&sp->resync_t);
+
+ return 0;
+}
+
+/*
+ * Open the high-level part of the 6pack channel.
+ * This function is called by the TTY module when the
+ * 6pack line discipline is called for. Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free 6pcack channel...
+ */
+static int sixpack_open(struct tty_struct *tty)
+{
+ char *rbuff = NULL, *xbuff = NULL;
+ struct net_device *dev;
+ struct sixpack *sp;
+ unsigned long len;
+ int err = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup);
+ if (!dev) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sp = netdev_priv(dev);
+ sp->dev = dev;
+
+ spin_lock_init(&sp->lock);
+ atomic_set(&sp->refcnt, 1);
+ init_MUTEX_LOCKED(&sp->dead_sem);
+
+ /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
+
+ len = dev->mtu * 2;
+
+ rbuff = kmalloc(len + 4, GFP_KERNEL);
+ xbuff = kmalloc(len + 4, GFP_KERNEL);
+
+ if (rbuff == NULL || xbuff == NULL) {
+ err = -ENOBUFS;
+ goto out_free;
+ }
+
+ spin_lock_bh(&sp->lock);
+
+ sp->tty = tty;
+
+ sp->rbuff = rbuff;
+ sp->xbuff = xbuff;
+
+ sp->mtu = AX25_MTU + 73;
+ sp->buffsize = len;
+ sp->rcount = 0;
+ sp->rx_count = 0;
+ sp->rx_count_cooked = 0;
+ sp->xleft = 0;
+
+ sp->flags = 0; /* Clear ESCAPE & ERROR flags */
+
+ sp->duplex = 0;
+ sp->tx_delay = SIXP_TXDELAY;
+ sp->persistence = SIXP_PERSIST;
+ sp->slottime = SIXP_SLOTTIME;
+ sp->led_state = 0x60;
+ sp->status = 1;
+ sp->status1 = 1;
+ sp->status2 = 0;
+ sp->tx_enable = 0;
+
+ netif_start_queue(dev);
+
+ init_timer(&sp->tx_t);
+ init_timer(&sp->resync_t);
+
+ spin_unlock_bh(&sp->lock);
+
+ /* Done. We have linked the TTY line to a channel. */
+ tty->disc_data = sp;
+
+ /* Now we're ready to register. */
+ if (register_netdev(dev))
+ goto out_free;
+
+ tnc_init(sp);
+
+ return 0;
+
+out_free:
+ kfree(xbuff);
+ kfree(rbuff);
+
+ if (dev)
+ free_netdev(dev);
+
+out:
+ return err;
+}
+
+
+/*
+ * Close down a 6pack channel.
+ * This means flushing out any pending queues, and then restoring the
+ * TTY line discipline to what it was before it got hooked to 6pack
+ * (which usually is TTY again).
+ */
+static void sixpack_close(struct tty_struct *tty)
+{
+ struct sixpack *sp;
+
+ write_lock(&disc_data_lock);
+ sp = tty->disc_data;
+ tty->disc_data = NULL;
+ write_unlock(&disc_data_lock);
+ if (sp == 0)
+ return;
+
+ /*
+ * We have now ensured that nobody can start using ap from now on, but
+ * we have to wait for all existing users to finish.
+ */
+ if (!atomic_dec_and_test(&sp->refcnt))
+ down(&sp->dead_sem);
+
+ unregister_netdev(sp->dev);
+
+ del_timer(&sp->tx_t);
+ del_timer(&sp->resync_t);
+
+ /* Free all 6pack frame buffers. */
+ kfree(sp->rbuff);
+ kfree(sp->xbuff);
+}
+
+/* Perform I/O control on an active 6pack channel. */
+static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct sixpack *sp = sp_get(tty);
+ struct net_device *dev = sp->dev;
+ unsigned int tmp, err;
+
+ if (!sp)
+ return -ENXIO;
+
+ switch(cmd) {
+ case SIOCGIFNAME:
+ err = copy_to_user((void __user *) arg, dev->name,
+ strlen(dev->name) + 1) ? -EFAULT : 0;
+ break;
+
+ case SIOCGIFENCAP:
+ err = put_user(0, (int __user *) arg);
+ break;
+
+ case SIOCSIFENCAP:
+ if (get_user(tmp, (int __user *) arg)) {
+ err = -EFAULT;
+ break;
+ }
+
+ sp->mode = tmp;
+ dev->addr_len = AX25_ADDR_LEN;
+ dev->hard_header_len = AX25_KISS_HEADER_LEN +
+ AX25_MAX_HEADER_LEN + 3;
+ dev->type = ARPHRD_AX25;
+
+ err = 0;
+ break;
+
+ case SIOCSIFHWADDR: {
+ char addr[AX25_ADDR_LEN];
+
+ if (copy_from_user(&addr,
+ (void __user *) arg, AX25_ADDR_LEN)) {
+ err = -EFAULT;
+ break;
+ }
+
+ spin_lock_irq(&dev->xmit_lock);
+ memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
+ spin_unlock_irq(&dev->xmit_lock);
+
+ err = 0;
+ break;
+ }
+
+ /* Allow stty to read, but not set, the serial port */
+ case TCGETS:
+ case TCGETA:
+ err = n_tty_ioctl(tty, (struct file *) file, cmd, arg);
+ break;
+
+ default:
+ err = -ENOIOCTLCMD;
+ }
+
+ sp_put(sp);
+
+ return err;
+}
+
+static struct tty_ldisc sp_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "6pack",
+ .open = sixpack_open,
+ .close = sixpack_close,
+ .ioctl = sixpack_ioctl,
+ .receive_buf = sixpack_receive_buf,
+ .receive_room = sixpack_receive_room,
+ .write_wakeup = sixpack_write_wakeup,
+};
+
+/* Initialize 6pack control device -- register 6pack line discipline */
+
+static char msg_banner[] __initdata = KERN_INFO \
+ "AX.25: 6pack driver, " SIXPACK_VERSION "\n";
+static char msg_regfail[] __initdata = KERN_ERR \
+ "6pack: can't register line discipline (err = %d)\n";
+
+static int __init sixpack_init_driver(void)
+{
+ int status;
+
+ printk(msg_banner);
+
+ /* Register the provided line protocol discipline */
+ if ((status = tty_register_ldisc(N_6PACK, &sp_ldisc)) != 0)
+ printk(msg_regfail, status);
+
+ return status;
+}
+
+static const char msg_unregfail[] __exitdata = KERN_ERR \
+ "6pack: can't unregister line discipline (err = %d)\n";
+
+static void __exit sixpack_exit_driver(void)
+{
+ int ret;
+
+ if ((ret = tty_register_ldisc(N_6PACK, NULL)))
+ printk(msg_unregfail, ret);
+}
+
+/* encode an AX.25 packet into 6pack */
+
+static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw,
+ int length, unsigned char tx_delay)
+{
+ int count = 0;
+ unsigned char checksum = 0, buf[400];
+ int raw_count = 0;
+
+ tx_buf_raw[raw_count++] = SIXP_PRIO_CMD_MASK | SIXP_TX_MASK;
+ tx_buf_raw[raw_count++] = SIXP_SEOF;
+
+ buf[0] = tx_delay;
+ for (count = 1; count < length; count++)
+ buf[count] = tx_buf[count];
+
+ for (count = 0; count < length; count++)
+ checksum += buf[count];
+ buf[length] = (unsigned char) 0xff - checksum;
+
+ for (count = 0; count <= length; count++) {
+ if ((count % 3) == 0) {
+ tx_buf_raw[raw_count++] = (buf[count] & 0x3f);
+ tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x30);
+ } else if ((count % 3) == 1) {
+ tx_buf_raw[raw_count++] |= (buf[count] & 0x0f);
+ tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x3c);
+ } else {
+ tx_buf_raw[raw_count++] |= (buf[count] & 0x03);
+ tx_buf_raw[raw_count++] = (buf[count] >> 2);
+ }
+ }
+ if ((length % 3) != 2)
+ raw_count++;
+ tx_buf_raw[raw_count++] = SIXP_SEOF;
+ return raw_count;
+}
+
+/* decode 4 sixpack-encoded bytes into 3 data bytes */
+
+static void decode_data(struct sixpack *sp, unsigned char inbyte)
+{
+ unsigned char *buf;
+
+ if (sp->rx_count != 3) {
+ sp->raw_buf[sp->rx_count++] = inbyte;
+
+ return;
+ }
+
+ buf = sp->raw_buf;
+ sp->cooked_buf[sp->rx_count_cooked++] =
+ buf[0] | ((buf[1] << 2) & 0xc0);
+ sp->cooked_buf[sp->rx_count_cooked++] =
+ (buf[1] & 0x0f) | ((buf[2] << 2) & 0xf0);
+ sp->cooked_buf[sp->rx_count_cooked++] =
+ (buf[2] & 0x03) | (inbyte << 2);
+ sp->rx_count = 0;
+}
+
+/* identify and execute a 6pack priority command byte */
+
+static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
+{
+ unsigned char channel;
+ int actual;
+
+ channel = cmd & SIXP_CHN_MASK;
+ if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
+
+ /* RX and DCD flags can only be set in the same prio command,
+ if the DCD flag has been set without the RX flag in the previous
+ prio command. If DCD has not been set before, something in the
+ transmission has gone wrong. In this case, RX and DCD are
+ cleared in order to prevent the decode_data routine from
+ reading further data that might be corrupt. */
+
+ if (((sp->status & SIXP_DCD_MASK) == 0) &&
+ ((cmd & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)) {
+ if (sp->status != 1)
+ printk(KERN_DEBUG "6pack: protocol violation\n");
+ else
+ sp->status = 0;
+ cmd &= !SIXP_RX_DCD_MASK;
+ }
+ sp->status = cmd & SIXP_PRIO_DATA_MASK;
+ } else { /* output watchdog char if idle */
+ if ((sp->status2 != 0) && (sp->duplex == 1)) {
+ sp->led_state = 0x70;
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ sp->tx_enable = 1;
+ actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2);
+ sp->xleft -= actual;
+ sp->xhead += actual;
+ sp->led_state = 0x60;
+ sp->status2 = 0;
+
+ }
+ }
+
+ /* needed to trigger the TNC watchdog */
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+
+ /* if the state byte has been received, the TNC is present,
+ so the resync timer can be reset. */
+
+ if (sp->tnc_state == TNC_IN_SYNC) {
+ del_timer(&sp->resync_t);
+ sp->resync_t.data = (unsigned long) sp;
+ sp->resync_t.function = resync_tnc;
+ sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
+ add_timer(&sp->resync_t);
+ }
+
+ sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
+}
+
+/* identify and execute a standard 6pack command byte */
+
+static void decode_std_command(struct sixpack *sp, unsigned char cmd)
+{
+ unsigned char checksum = 0, rest = 0, channel;
+ short i;
+
+ channel = cmd & SIXP_CHN_MASK;
+ switch (cmd & SIXP_CMD_MASK) { /* normal command */
+ case SIXP_SEOF:
+ if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) {
+ if ((sp->status & SIXP_RX_DCD_MASK) ==
+ SIXP_RX_DCD_MASK) {
+ sp->led_state = 0x68;
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ }
+ } else {
+ sp->led_state = 0x60;
+ /* fill trailing bytes with zeroes */
+ sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+ rest = sp->rx_count;
+ if (rest != 0)
+ for (i = rest; i <= 3; i++)
+ decode_data(sp, 0);
+ if (rest == 2)
+ sp->rx_count_cooked -= 2;
+ else if (rest == 3)
+ sp->rx_count_cooked -= 1;
+ for (i = 0; i < sp->rx_count_cooked; i++)
+ checksum += sp->cooked_buf[i];
+ if (checksum != SIXP_CHKSUM) {
+ printk(KERN_DEBUG "6pack: bad checksum %2.2x\n", checksum);
+ } else {
+ sp->rcount = sp->rx_count_cooked-2;
+ sp_bump(sp, 0);
+ }
+ sp->rx_count_cooked = 0;
+ }
+ break;
+ case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n");
+ break;
+ case SIXP_RX_ORUN: printk(KERN_DEBUG "6pack: RX overrun\n");
+ break;
+ case SIXP_RX_BUF_OVL:
+ printk(KERN_DEBUG "6pack: RX buffer overflow\n");
+ }
+}
+
+/* decode a 6pack packet */
+
+static void
+sixpack_decode(struct sixpack *sp, unsigned char *pre_rbuff, int count)
+{
+ unsigned char inbyte;
+ int count1;
+
+ for (count1 = 0; count1 < count; count1++) {
+ inbyte = pre_rbuff[count1];
+ if (inbyte == SIXP_FOUND_TNC) {
+ tnc_set_sync_state(sp, TNC_IN_SYNC);
+ del_timer(&sp->resync_t);
+ }
+ if ((inbyte & SIXP_PRIO_CMD_MASK) != 0)
+ decode_prio_command(sp, inbyte);
+ else if ((inbyte & SIXP_STD_CMD_MASK) != 0)
+ decode_std_command(sp, inbyte);
+ else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)
+ decode_data(sp, inbyte);
+ }
+}
+
+MODULE_AUTHOR("Ralf Baechle DO1GRB <ralf@linux-mips.org>");
+MODULE_DESCRIPTION("6pack driver for AX.25");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_6PACK);
+
+module_init(sixpack_init_driver);
+module_exit(sixpack_exit_driver);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
new file mode 100644
index 000000000000..34068f81d45e
--- /dev/null
+++ b/drivers/net/hamradio/Kconfig
@@ -0,0 +1,191 @@
+config MKISS
+ tristate "Serial port KISS driver"
+ depends on AX25 && BROKEN_ON_SMP
+ ---help---
+ KISS is a protocol used for the exchange of data between a computer
+ and a Terminal Node Controller (a small embedded system commonly
+ used for networking over AX.25 amateur radio connections; it
+ connects the computer's serial port with the radio's microphone
+ input and speaker output).
+
+ Although KISS is less advanced than the 6pack protocol, it has
+ the advantage that it is already supported by most modern TNCs
+ without the need for a firmware upgrade.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mkiss.
+
+config 6PACK
+ tristate "Serial port 6PACK driver"
+ depends on AX25 && BROKEN_ON_SMP
+ ---help---
+ 6pack is a transmission protocol for the data exchange between your
+ PC and your TNC (the Terminal Node Controller acts as a kind of
+ modem connecting your computer's serial port to your radio's
+ microphone input and speaker output). This protocol can be used as
+ an alternative to KISS for networking over AX.25 amateur radio
+ connections, but it has some extended functionality.
+
+ Note that this driver is still experimental and might cause
+ problems. For details about the features and the usage of the
+ driver, read <file:Documentation/networking/6pack.txt>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 6pack.
+
+config BPQETHER
+ tristate "BPQ Ethernet driver"
+ depends on AX25
+ help
+ AX.25 is the protocol used for computer communication over amateur
+ radio. If you say Y here, you will be able to send and receive AX.25
+ traffic over Ethernet (also called "BPQ AX.25"), which could be
+ useful if some other computer on your local network has a direct
+ amateur radio connection.
+
+config DMASCC
+ tristate "High-speed (DMA) SCC driver for AX.25"
+ depends on ISA && AX25 && BROKEN_ON_SMP
+ ---help---
+ This is a driver for high-speed SCC boards, i.e. those supporting
+ DMA on one port. You usually use those boards to connect your
+ computer to an amateur radio modem (such as the WA4DSY 56kbps
+ modem), in order to send and receive AX.25 packet radio network
+ traffic.
+
+ Currently, this driver supports Ottawa PI/PI2, Paccomm/Gracilis
+ PackeTwin, and S5SCC/DMA boards. They are detected automatically.
+ If you have one of these cards, say Y here and read the AX25-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ This driver can operate multiple boards simultaneously. If you
+ compile it as a module (by saying M instead of Y), it will be called
+ dmascc. If you don't pass any parameter to the driver, all
+ possible I/O addresses are probed. This could irritate other devices
+ that are currently not in use. You may specify the list of addresses
+ to be probed by "dmascc=addr1,addr2,..." (when compiled into the
+ kernel image) or "io=addr1,addr2,..." (when loaded as a module). The
+ network interfaces will be called dmascc0 and dmascc1 for the board
+ detected first, dmascc2 and dmascc3 for the second one, and so on.
+
+ Before you configure each interface with ifconfig, you MUST set
+ certain parameters, such as channel access timing, clock mode, and
+ DMA channel. This is accomplished with a small utility program,
+ dmascc_cfg, available at
+ <http://cacofonix.nt.tuwien.ac.at/~oe1kib/Linux/>. Please be sure to
+ get at least version 1.27 of dmascc_cfg, as older versions will not
+ work with the current driver.
+
+config SCC
+ tristate "Z8530 SCC driver"
+ depends on ISA && AX25
+ ---help---
+ These cards are used to connect your Linux box to an amateur radio
+ in order to communicate with other computers. If you want to use
+ this, read <file:Documentation/networking/z8530drv.txt> and the
+ AX25-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Also make sure to say Y
+ to "Amateur Radio AX.25 Level 2" support.
+
+ To compile this driver as a module, choose M here: the module
+ will be called scc.
+
+config SCC_DELAY
+ bool "additional delay for PA0HZP OptoSCC compatible boards"
+ depends on SCC
+ help
+ Say Y here if you experience problems with the SCC driver not
+ working properly; please read
+ <file:Documentation/networking/z8530drv.txt> for details.
+
+ If unsure, say N.
+
+config SCC_TRXECHO
+ bool "support for TRX that feedback the tx signal to rx"
+ depends on SCC
+ help
+ Some transmitters feed the transmitted signal back to the receive
+ line. Say Y here to foil this by explicitly disabling the receiver
+ during data transmission.
+
+ If in doubt, say Y.
+
+config BAYCOM_SER_FDX
+ tristate "BAYCOM ser12 fullduplex driver for AX.25"
+ depends on AX25
+ select CRC_CCITT
+ ---help---
+ This is one of two drivers for Baycom style simple amateur radio
+ modems that connect to a serial interface. The driver supports the
+ ser12 design in full-duplex mode. In addition, it allows the
+ baudrate to be set between 300 and 4800 baud (however not all modems
+ support all baudrates). This is the preferred driver. The next
+ driver, "BAYCOM ser12 half-duplex driver for AX.25" is the old
+ driver and still provided in case this driver does not work with
+ your serial interface chip. To configure the driver, use the sethdlc
+ utility available in the standard ax25 utilities package. For
+ information on the modems, see <http://www.baycom.de/> and
+ <file:Documentation/networking/baycom.txt>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called baycom_ser_fdx. This is recommended.
+
+config BAYCOM_SER_HDX
+ tristate "BAYCOM ser12 halfduplex driver for AX.25"
+ depends on AX25
+ select CRC_CCITT
+ ---help---
+ This is one of two drivers for Baycom style simple amateur radio
+ modems that connect to a serial interface. The driver supports the
+ ser12 design in full-duplex mode. This is the old driver. It is
+ still provided in case your serial interface chip does not work with
+ the full-duplex driver. This driver is depreciated. To configure
+ the driver, use the sethdlc utility available in the standard ax25
+ utilities package. For information on the modems, see
+ <http://www.baycom.de/> and
+ <file:Documentation/networking/baycom.txt>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called baycom_ser_hdx. This is recommended.
+
+config BAYCOM_PAR
+ tristate "BAYCOM picpar and par96 driver for AX.25"
+ depends on PARPORT && AX25
+ select CRC_CCITT
+ ---help---
+ This is a driver for Baycom style simple amateur radio modems that
+ connect to a parallel interface. The driver supports the picpar and
+ par96 designs. To configure the driver, use the sethdlc utility
+ available in the standard ax25 utilities package. For information on
+ the modems, see <http://www.baycom.de/> and the file
+ <file:Documentation/networking/baycom.txt>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called baycom_par. This is recommended.
+
+config BAYCOM_EPP
+ tristate "BAYCOM epp driver for AX.25"
+ depends on PARPORT && AX25 && !64BIT
+ select CRC_CCITT
+ ---help---
+ This is a driver for Baycom style simple amateur radio modems that
+ connect to a parallel interface. The driver supports the EPP
+ designs. To configure the driver, use the sethdlc utility available
+ in the standard ax25 utilities package. For information on the
+ modems, see <http://www.baycom.de/> and the file
+ <file:Documentation/networking/baycom.txt>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called baycom_epp. This is recommended.
+
+config YAM
+ tristate "YAM driver for AX.25"
+ depends on AX25
+ help
+ The YAM is a modem for packet radio which connects to the serial
+ port and includes some of the functions of a Terminal Node
+ Controller. If you have one of those, say Y here.
+
+ To compile this driver as a module, choose M here: the module
+ will be called yam.
+
diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile
new file mode 100644
index 000000000000..9def86704a91
--- /dev/null
+++ b/drivers/net/hamradio/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the Linux AX.25 and HFMODEM device drivers.
+#
+#
+# 19971130 Moved the amateur radio related network drivers from
+# drivers/net/ to drivers/hamradio for easier maintainance.
+# Joerg Reuter DL1BKE <jreuter@yaina.de>
+#
+# 20000806 Rewritten to use lists instead of if-statements.
+# Christoph Hellwig <hch@infradead.org>
+#
+
+obj-$(CONFIG_DMASCC) += dmascc.o
+obj-$(CONFIG_SCC) += scc.o
+obj-$(CONFIG_MKISS) += mkiss.o
+obj-$(CONFIG_6PACK) += 6pack.o
+obj-$(CONFIG_YAM) += yam.o
+obj-$(CONFIG_BPQETHER) += bpqether.o
+obj-$(CONFIG_BAYCOM_SER_FDX) += baycom_ser_fdx.o hdlcdrv.o
+obj-$(CONFIG_BAYCOM_SER_HDX) += baycom_ser_hdx.o hdlcdrv.o
+obj-$(CONFIG_BAYCOM_PAR) += baycom_par.o hdlcdrv.o
+obj-$(CONFIG_BAYCOM_EPP) += baycom_epp.o hdlcdrv.o
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
new file mode 100644
index 000000000000..e8cb87d906fc
--- /dev/null
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -0,0 +1,1382 @@
+/*****************************************************************************/
+
+/*
+ * baycom_epp.c -- baycom epp radio modem driver.
+ *
+ * Copyright (C) 1998-2000
+ * Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Please note that the GPL allows you to use the driver, NOT the radio.
+ * In order to use the radio, you need a license from the communications
+ * authority of your country.
+ *
+ *
+ * History:
+ * 0.1 xx.xx.1998 Initial version by Matthias Welwarsky (dg2fef)
+ * 0.2 21.04.1998 Massive rework by Thomas Sailer
+ * Integrated FPGA EPP modem configuration routines
+ * 0.3 11.05.1998 Took FPGA config out and moved it into a separate program
+ * 0.4 26.07.1999 Adapted to new lowlevel parport driver interface
+ * 0.5 03.08.1999 adapt to Linus' new __setup/__initcall
+ * removed some pre-2.2 kernel compatibility cruft
+ * 0.6 10.08.1999 Check if parport can do SPP and is safe to access during interrupt contexts
+ * 0.7 12.02.2000 adapted to softnet driver interface
+ *
+ */
+
+/*****************************************************************************/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/parport.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+#include <linux/if_arp.h>
+#include <linux/kmod.h>
+#include <linux/hdlcdrv.h>
+#include <linux/baycom.h>
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+/* prototypes for ax25_encapsulate and ax25_rebuild_header */
+#include <net/ax25.h>
+#endif /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+#include <linux/crc-ccitt.h>
+
+/* --------------------------------------------------------------------- */
+
+#define BAYCOM_DEBUG
+#define BAYCOM_MAGIC 19730510
+
+/* --------------------------------------------------------------------- */
+
+static const char paranoia_str[] = KERN_ERR
+ "baycom_epp: bad magic number for hdlcdrv_state struct in routine %s\n";
+
+static const char bc_drvname[] = "baycom_epp";
+static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n"
+KERN_INFO "baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n";
+
+/* --------------------------------------------------------------------- */
+
+#define NR_PORTS 4
+
+static struct net_device *baycom_device[NR_PORTS];
+
+/* --------------------------------------------------------------------- */
+
+/* EPP status register */
+#define EPP_DCDBIT 0x80
+#define EPP_PTTBIT 0x08
+#define EPP_NREF 0x01
+#define EPP_NRAEF 0x02
+#define EPP_NRHF 0x04
+#define EPP_NTHF 0x20
+#define EPP_NTAEF 0x10
+#define EPP_NTEF EPP_PTTBIT
+
+/* EPP control register */
+#define EPP_TX_FIFO_ENABLE 0x10
+#define EPP_RX_FIFO_ENABLE 0x08
+#define EPP_MODEM_ENABLE 0x20
+#define EPP_LEDS 0xC0
+#define EPP_IRQ_ENABLE 0x10
+
+/* LPT registers */
+#define LPTREG_ECONTROL 0x402
+#define LPTREG_CONFIGB 0x401
+#define LPTREG_CONFIGA 0x400
+#define LPTREG_EPPDATA 0x004
+#define LPTREG_EPPADDR 0x003
+#define LPTREG_CONTROL 0x002
+#define LPTREG_STATUS 0x001
+#define LPTREG_DATA 0x000
+
+/* LPT control register */
+#define LPTCTRL_PROGRAM 0x04 /* 0 to reprogram */
+#define LPTCTRL_WRITE 0x01
+#define LPTCTRL_ADDRSTB 0x08
+#define LPTCTRL_DATASTB 0x02
+#define LPTCTRL_INTEN 0x10
+
+/* LPT status register */
+#define LPTSTAT_SHIFT_NINTR 6
+#define LPTSTAT_WAIT 0x80
+#define LPTSTAT_NINTR (1<<LPTSTAT_SHIFT_NINTR)
+#define LPTSTAT_PE 0x20
+#define LPTSTAT_DONE 0x10
+#define LPTSTAT_NERROR 0x08
+#define LPTSTAT_EPPTIMEOUT 0x01
+
+/* LPT data register */
+#define LPTDATA_SHIFT_TDI 0
+#define LPTDATA_SHIFT_TMS 2
+#define LPTDATA_TDI (1<<LPTDATA_SHIFT_TDI)
+#define LPTDATA_TCK 0x02
+#define LPTDATA_TMS (1<<LPTDATA_SHIFT_TMS)
+#define LPTDATA_INITBIAS 0x80
+
+
+/* EPP modem config/status bits */
+#define EPP_DCDBIT 0x80
+#define EPP_PTTBIT 0x08
+#define EPP_RXEBIT 0x01
+#define EPP_RXAEBIT 0x02
+#define EPP_RXHFULL 0x04
+
+#define EPP_NTHF 0x20
+#define EPP_NTAEF 0x10
+#define EPP_NTEF EPP_PTTBIT
+
+#define EPP_TX_FIFO_ENABLE 0x10
+#define EPP_RX_FIFO_ENABLE 0x08
+#define EPP_MODEM_ENABLE 0x20
+#define EPP_LEDS 0xC0
+#define EPP_IRQ_ENABLE 0x10
+
+/* Xilinx 4k JTAG instructions */
+#define XC4K_IRLENGTH 3
+#define XC4K_EXTEST 0
+#define XC4K_PRELOAD 1
+#define XC4K_CONFIGURE 5
+#define XC4K_BYPASS 7
+
+#define EPP_CONVENTIONAL 0
+#define EPP_FPGA 1
+#define EPP_FPGAEXTSTATUS 2
+
+#define TXBUFFER_SIZE ((HDLCDRV_MAXFLEN*6/5)+8)
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Information that need to be kept for each board.
+ */
+
+struct baycom_state {
+ int magic;
+
+ struct pardevice *pdev;
+ unsigned int work_running;
+ struct work_struct run_work;
+ unsigned int modem;
+ unsigned int bitrate;
+ unsigned char stat;
+
+ struct {
+ unsigned int intclk;
+ unsigned int fclk;
+ unsigned int bps;
+ unsigned int extmodem;
+ unsigned int loopback;
+ } cfg;
+
+ struct hdlcdrv_channel_params ch_params;
+
+ struct {
+ unsigned int bitbuf, bitstream, numbits, state;
+ unsigned char *bufptr;
+ int bufcnt;
+ unsigned char buf[TXBUFFER_SIZE];
+ } hdlcrx;
+
+ struct {
+ int calibrate;
+ int slotcnt;
+ int flags;
+ enum { tx_idle = 0, tx_keyup, tx_data, tx_tail } state;
+ unsigned char *bufptr;
+ int bufcnt;
+ unsigned char buf[TXBUFFER_SIZE];
+ } hdlctx;
+
+ struct net_device_stats stats;
+ unsigned int ptt_keyed;
+ struct sk_buff *skb; /* next transmit packet */
+
+#ifdef BAYCOM_DEBUG
+ struct debug_vals {
+ unsigned long last_jiffies;
+ unsigned cur_intcnt;
+ unsigned last_intcnt;
+ int cur_pllcorr;
+ int last_pllcorr;
+ unsigned int mod_cycles;
+ unsigned int demod_cycles;
+ } debug_vals;
+#endif /* BAYCOM_DEBUG */
+};
+
+/* --------------------------------------------------------------------- */
+
+#define KISS_VERBOSE
+
+/* --------------------------------------------------------------------- */
+
+#define PARAM_TXDELAY 1
+#define PARAM_PERSIST 2
+#define PARAM_SLOTTIME 3
+#define PARAM_TXTAIL 4
+#define PARAM_FULLDUP 5
+#define PARAM_HARDWARE 6
+#define PARAM_RETURN 255
+
+/* --------------------------------------------------------------------- */
+/*
+ * the CRC routines are stolen from WAMPES
+ * by Dieter Deyke
+ */
+
+
+/*---------------------------------------------------------------------------*/
+
+#if 0
+static inline void append_crc_ccitt(unsigned char *buffer, int len)
+{
+ unsigned int crc = 0xffff;
+
+ for (;len>0;len--)
+ crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buffer++) & 0xff];
+ crc ^= 0xffff;
+ *buffer++ = crc;
+ *buffer++ = crc >> 8;
+}
+#endif
+
+/*---------------------------------------------------------------------------*/
+
+static inline int check_crc_ccitt(const unsigned char *buf, int cnt)
+{
+ return (crc_ccitt(0xffff, buf, cnt) & 0xffff) == 0xf0b8;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static inline int calc_crc_ccitt(const unsigned char *buf, int cnt)
+{
+ return (crc_ccitt(0xffff, buf, cnt) ^ 0xffff) & 0xffff;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define tenms_to_flags(bc,tenms) ((tenms * bc->bitrate) / 800)
+
+/* --------------------------------------------------------------------- */
+
+static inline void baycom_int_freq(struct baycom_state *bc)
+{
+#ifdef BAYCOM_DEBUG
+ unsigned long cur_jiffies = jiffies;
+ /*
+ * measure the interrupt frequency
+ */
+ bc->debug_vals.cur_intcnt++;
+ if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ bc->debug_vals.last_jiffies = cur_jiffies;
+ bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
+ bc->debug_vals.cur_intcnt = 0;
+ bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
+ bc->debug_vals.cur_pllcorr = 0;
+ }
+#endif /* BAYCOM_DEBUG */
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * eppconfig_path should be setable via /proc/sys.
+ */
+
+static char eppconfig_path[256] = "/usr/sbin/eppfpga";
+
+static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL };
+
+/* eppconfig: called during ifconfig up to configure the modem */
+static int eppconfig(struct baycom_state *bc)
+{
+ char modearg[256];
+ char portarg[16];
+ char *argv[] = { eppconfig_path, "-s", "-p", portarg, "-m", modearg,
+ NULL };
+
+ /* set up arguments */
+ sprintf(modearg, "%sclk,%smodem,fclk=%d,bps=%d,divider=%d%s,extstat",
+ bc->cfg.intclk ? "int" : "ext",
+ bc->cfg.extmodem ? "ext" : "int", bc->cfg.fclk, bc->cfg.bps,
+ (bc->cfg.fclk + 8 * bc->cfg.bps) / (16 * bc->cfg.bps),
+ bc->cfg.loopback ? ",loopback" : "");
+ sprintf(portarg, "%ld", bc->pdev->port->base);
+ printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg);
+
+ return call_usermodehelper(eppconfig_path, argv, envp, 1);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void epp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline void do_kiss_params(struct baycom_state *bc,
+ unsigned char *data, unsigned long len)
+{
+
+#ifdef KISS_VERBOSE
+#define PKP(a,b) printk(KERN_INFO "baycomm_epp: channel params: " a "\n", b)
+#else /* KISS_VERBOSE */
+#define PKP(a,b)
+#endif /* KISS_VERBOSE */
+
+ if (len < 2)
+ return;
+ switch(data[0]) {
+ case PARAM_TXDELAY:
+ bc->ch_params.tx_delay = data[1];
+ PKP("TX delay = %ums", 10 * bc->ch_params.tx_delay);
+ break;
+ case PARAM_PERSIST:
+ bc->ch_params.ppersist = data[1];
+ PKP("p persistence = %u", bc->ch_params.ppersist);
+ break;
+ case PARAM_SLOTTIME:
+ bc->ch_params.slottime = data[1];
+ PKP("slot time = %ums", bc->ch_params.slottime);
+ break;
+ case PARAM_TXTAIL:
+ bc->ch_params.tx_tail = data[1];
+ PKP("TX tail = %ums", bc->ch_params.tx_tail);
+ break;
+ case PARAM_FULLDUP:
+ bc->ch_params.fulldup = !!data[1];
+ PKP("%s duplex", bc->ch_params.fulldup ? "full" : "half");
+ break;
+ default:
+ break;
+ }
+#undef PKP
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * high performance HDLC encoder
+ * yes, it's ugly, but generates pretty good code
+ */
+
+#define ENCODEITERA(j) \
+({ \
+ if (!(notbitstream & (0x1f0 << j))) \
+ goto stuff##j; \
+ encodeend##j: ; \
+})
+
+#define ENCODEITERB(j) \
+({ \
+ stuff##j: \
+ bitstream &= ~(0x100 << j); \
+ bitbuf = (bitbuf & (((2 << j) << numbit) - 1)) | \
+ ((bitbuf & ~(((2 << j) << numbit) - 1)) << 1); \
+ numbit++; \
+ notbitstream = ~bitstream; \
+ goto encodeend##j; \
+})
+
+
+static void encode_hdlc(struct baycom_state *bc)
+{
+ struct sk_buff *skb;
+ unsigned char *wp, *bp;
+ int pkt_len;
+ unsigned bitstream, notbitstream, bitbuf, numbit, crc;
+ unsigned char crcarr[2];
+
+ if (bc->hdlctx.bufcnt > 0)
+ return;
+ skb = bc->skb;
+ if (!skb)
+ return;
+ bc->skb = NULL;
+ pkt_len = skb->len-1; /* strip KISS byte */
+ wp = bc->hdlctx.buf;
+ bp = skb->data+1;
+ crc = calc_crc_ccitt(bp, pkt_len);
+ crcarr[0] = crc;
+ crcarr[1] = crc >> 8;
+ *wp++ = 0x7e;
+ bitstream = bitbuf = numbit = 0;
+ while (pkt_len > -2) {
+ bitstream >>= 8;
+ bitstream |= ((unsigned int)*bp) << 8;
+ bitbuf |= ((unsigned int)*bp) << numbit;
+ notbitstream = ~bitstream;
+ bp++;
+ pkt_len--;
+ if (!pkt_len)
+ bp = crcarr;
+ ENCODEITERA(0);
+ ENCODEITERA(1);
+ ENCODEITERA(2);
+ ENCODEITERA(3);
+ ENCODEITERA(4);
+ ENCODEITERA(5);
+ ENCODEITERA(6);
+ ENCODEITERA(7);
+ goto enditer;
+ ENCODEITERB(0);
+ ENCODEITERB(1);
+ ENCODEITERB(2);
+ ENCODEITERB(3);
+ ENCODEITERB(4);
+ ENCODEITERB(5);
+ ENCODEITERB(6);
+ ENCODEITERB(7);
+ enditer:
+ numbit += 8;
+ while (numbit >= 8) {
+ *wp++ = bitbuf;
+ bitbuf >>= 8;
+ numbit -= 8;
+ }
+ }
+ bitbuf |= 0x7e7e << numbit;
+ numbit += 16;
+ while (numbit >= 8) {
+ *wp++ = bitbuf;
+ bitbuf >>= 8;
+ numbit -= 8;
+ }
+ bc->hdlctx.bufptr = bc->hdlctx.buf;
+ bc->hdlctx.bufcnt = wp - bc->hdlctx.buf;
+ dev_kfree_skb(skb);
+ bc->stats.tx_packets++;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static unsigned short random_seed;
+
+static inline unsigned short random_num(void)
+{
+ random_seed = 28629 * random_seed + 157;
+ return random_seed;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
+{
+ struct parport *pp = bc->pdev->port;
+ unsigned char tmp[128];
+ int i, j;
+
+ if (bc->hdlctx.state == tx_tail && !(stat & EPP_PTTBIT))
+ bc->hdlctx.state = tx_idle;
+ if (bc->hdlctx.state == tx_idle && bc->hdlctx.calibrate <= 0) {
+ if (bc->hdlctx.bufcnt <= 0)
+ encode_hdlc(bc);
+ if (bc->hdlctx.bufcnt <= 0)
+ return 0;
+ if (!bc->ch_params.fulldup) {
+ if (!(stat & EPP_DCDBIT)) {
+ bc->hdlctx.slotcnt = bc->ch_params.slottime;
+ return 0;
+ }
+ if ((--bc->hdlctx.slotcnt) > 0)
+ return 0;
+ bc->hdlctx.slotcnt = bc->ch_params.slottime;
+ if ((random_num() % 256) > bc->ch_params.ppersist)
+ return 0;
+ }
+ }
+ if (bc->hdlctx.state == tx_idle && bc->hdlctx.bufcnt > 0) {
+ bc->hdlctx.state = tx_keyup;
+ bc->hdlctx.flags = tenms_to_flags(bc, bc->ch_params.tx_delay);
+ bc->ptt_keyed++;
+ }
+ while (cnt > 0) {
+ switch (bc->hdlctx.state) {
+ case tx_keyup:
+ i = min_t(int, cnt, bc->hdlctx.flags);
+ cnt -= i;
+ bc->hdlctx.flags -= i;
+ if (bc->hdlctx.flags <= 0)
+ bc->hdlctx.state = tx_data;
+ memset(tmp, 0x7e, sizeof(tmp));
+ while (i > 0) {
+ j = (i > sizeof(tmp)) ? sizeof(tmp) : i;
+ if (j != pp->ops->epp_write_data(pp, tmp, j, 0))
+ return -1;
+ i -= j;
+ }
+ break;
+
+ case tx_data:
+ if (bc->hdlctx.bufcnt <= 0) {
+ encode_hdlc(bc);
+ if (bc->hdlctx.bufcnt <= 0) {
+ bc->hdlctx.state = tx_tail;
+ bc->hdlctx.flags = tenms_to_flags(bc, bc->ch_params.tx_tail);
+ break;
+ }
+ }
+ i = min_t(int, cnt, bc->hdlctx.bufcnt);
+ bc->hdlctx.bufcnt -= i;
+ cnt -= i;
+ if (i != pp->ops->epp_write_data(pp, bc->hdlctx.bufptr, i, 0))
+ return -1;
+ bc->hdlctx.bufptr += i;
+ break;
+
+ case tx_tail:
+ encode_hdlc(bc);
+ if (bc->hdlctx.bufcnt > 0) {
+ bc->hdlctx.state = tx_data;
+ break;
+ }
+ i = min_t(int, cnt, bc->hdlctx.flags);
+ if (i) {
+ cnt -= i;
+ bc->hdlctx.flags -= i;
+ memset(tmp, 0x7e, sizeof(tmp));
+ while (i > 0) {
+ j = (i > sizeof(tmp)) ? sizeof(tmp) : i;
+ if (j != pp->ops->epp_write_data(pp, tmp, j, 0))
+ return -1;
+ i -= j;
+ }
+ break;
+ }
+
+ default: /* fall through */
+ if (bc->hdlctx.calibrate <= 0)
+ return 0;
+ i = min_t(int, cnt, bc->hdlctx.calibrate);
+ cnt -= i;
+ bc->hdlctx.calibrate -= i;
+ memset(tmp, 0, sizeof(tmp));
+ while (i > 0) {
+ j = (i > sizeof(tmp)) ? sizeof(tmp) : i;
+ if (j != pp->ops->epp_write_data(pp, tmp, j, 0))
+ return -1;
+ i -= j;
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void do_rxpacket(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ struct sk_buff *skb;
+ unsigned char *cp;
+ unsigned pktlen;
+
+ if (bc->hdlcrx.bufcnt < 4)
+ return;
+ if (!check_crc_ccitt(bc->hdlcrx.buf, bc->hdlcrx.bufcnt))
+ return;
+ pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */
+ if (!(skb = dev_alloc_skb(pktlen))) {
+ printk("%s: memory squeeze, dropping packet\n", dev->name);
+ bc->stats.rx_dropped++;
+ return;
+ }
+ skb->dev = dev;
+ cp = skb_put(skb, pktlen);
+ *cp++ = 0; /* KISS kludge */
+ memcpy(cp, bc->hdlcrx.buf, pktlen - 1);
+ skb->protocol = htons(ETH_P_AX25);
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ bc->stats.rx_packets++;
+}
+
+#define DECODEITERA(j) \
+({ \
+ if (!(notbitstream & (0x0fc << j))) /* flag or abort */ \
+ goto flgabrt##j; \
+ if ((bitstream & (0x1f8 << j)) == (0xf8 << j)) /* stuffed bit */ \
+ goto stuff##j; \
+ enditer##j: ; \
+})
+
+#define DECODEITERB(j) \
+({ \
+ flgabrt##j: \
+ if (!(notbitstream & (0x1fc << j))) { /* abort received */ \
+ state = 0; \
+ goto enditer##j; \
+ } \
+ if ((bitstream & (0x1fe << j)) != (0x0fc << j)) /* flag received */ \
+ goto enditer##j; \
+ if (state) \
+ do_rxpacket(dev); \
+ bc->hdlcrx.bufcnt = 0; \
+ bc->hdlcrx.bufptr = bc->hdlcrx.buf; \
+ state = 1; \
+ numbits = 7-j; \
+ goto enditer##j; \
+ stuff##j: \
+ numbits--; \
+ bitbuf = (bitbuf & ((~0xff) << j)) | ((bitbuf & ~((~0xff) << j)) << 1); \
+ goto enditer##j; \
+})
+
+static int receive(struct net_device *dev, int cnt)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ struct parport *pp = bc->pdev->port;
+ unsigned int bitbuf, notbitstream, bitstream, numbits, state;
+ unsigned char tmp[128];
+ unsigned char *cp;
+ int cnt2, ret = 0;
+
+ numbits = bc->hdlcrx.numbits;
+ state = bc->hdlcrx.state;
+ bitstream = bc->hdlcrx.bitstream;
+ bitbuf = bc->hdlcrx.bitbuf;
+ while (cnt > 0) {
+ cnt2 = (cnt > sizeof(tmp)) ? sizeof(tmp) : cnt;
+ cnt -= cnt2;
+ if (cnt2 != pp->ops->epp_read_data(pp, tmp, cnt2, 0)) {
+ ret = -1;
+ break;
+ }
+ cp = tmp;
+ for (; cnt2 > 0; cnt2--, cp++) {
+ bitstream >>= 8;
+ bitstream |= (*cp) << 8;
+ bitbuf >>= 8;
+ bitbuf |= (*cp) << 8;
+ numbits += 8;
+ notbitstream = ~bitstream;
+ DECODEITERA(0);
+ DECODEITERA(1);
+ DECODEITERA(2);
+ DECODEITERA(3);
+ DECODEITERA(4);
+ DECODEITERA(5);
+ DECODEITERA(6);
+ DECODEITERA(7);
+ goto enddec;
+ DECODEITERB(0);
+ DECODEITERB(1);
+ DECODEITERB(2);
+ DECODEITERB(3);
+ DECODEITERB(4);
+ DECODEITERB(5);
+ DECODEITERB(6);
+ DECODEITERB(7);
+ enddec:
+ while (state && numbits >= 8) {
+ if (bc->hdlcrx.bufcnt >= TXBUFFER_SIZE) {
+ state = 0;
+ } else {
+ *(bc->hdlcrx.bufptr)++ = bitbuf >> (16-numbits);
+ bc->hdlcrx.bufcnt++;
+ numbits -= 8;
+ }
+ }
+ }
+ }
+ bc->hdlcrx.numbits = numbits;
+ bc->hdlcrx.state = state;
+ bc->hdlcrx.bitstream = bitstream;
+ bc->hdlcrx.bitbuf = bitbuf;
+ return ret;
+}
+
+/* --------------------------------------------------------------------- */
+
+#ifdef __i386__
+#include <asm/msr.h>
+#define GETTICK(x) \
+({ \
+ if (cpu_has_tsc) \
+ rdtscl(x); \
+})
+#else /* __i386__ */
+#define GETTICK(x)
+#endif /* __i386__ */
+
+static void epp_bh(struct net_device *dev)
+{
+ struct baycom_state *bc;
+ struct parport *pp;
+ unsigned char stat;
+ unsigned char tmp[2];
+ unsigned int time1 = 0, time2 = 0, time3 = 0;
+ int cnt, cnt2;
+
+ bc = netdev_priv(dev);
+ if (!bc->work_running)
+ return;
+ baycom_int_freq(bc);
+ pp = bc->pdev->port;
+ /* update status */
+ if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
+ goto epptimeout;
+ bc->stat = stat;
+ bc->debug_vals.last_pllcorr = stat;
+ GETTICK(time1);
+ if (bc->modem == EPP_FPGAEXTSTATUS) {
+ /* get input count */
+ tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE|1;
+ if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
+ goto epptimeout;
+ if (pp->ops->epp_read_addr(pp, tmp, 2, 0) != 2)
+ goto epptimeout;
+ cnt = tmp[0] | (tmp[1] << 8);
+ cnt &= 0x7fff;
+ /* get output count */
+ tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE|2;
+ if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
+ goto epptimeout;
+ if (pp->ops->epp_read_addr(pp, tmp, 2, 0) != 2)
+ goto epptimeout;
+ cnt2 = tmp[0] | (tmp[1] << 8);
+ cnt2 = 16384 - (cnt2 & 0x7fff);
+ /* return to normal */
+ tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE;
+ if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
+ goto epptimeout;
+ if (transmit(bc, cnt2, stat))
+ goto epptimeout;
+ GETTICK(time2);
+ if (receive(dev, cnt))
+ goto epptimeout;
+ if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
+ goto epptimeout;
+ bc->stat = stat;
+ } else {
+ /* try to tx */
+ switch (stat & (EPP_NTAEF|EPP_NTHF)) {
+ case EPP_NTHF:
+ cnt = 2048 - 256;
+ break;
+
+ case EPP_NTAEF:
+ cnt = 2048 - 1793;
+ break;
+
+ case 0:
+ cnt = 0;
+ break;
+
+ default:
+ cnt = 2048 - 1025;
+ break;
+ }
+ if (transmit(bc, cnt, stat))
+ goto epptimeout;
+ GETTICK(time2);
+ /* do receiver */
+ while ((stat & (EPP_NRAEF|EPP_NRHF)) != EPP_NRHF) {
+ switch (stat & (EPP_NRAEF|EPP_NRHF)) {
+ case EPP_NRAEF:
+ cnt = 1025;
+ break;
+
+ case 0:
+ cnt = 1793;
+ break;
+
+ default:
+ cnt = 256;
+ break;
+ }
+ if (receive(dev, cnt))
+ goto epptimeout;
+ if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
+ goto epptimeout;
+ }
+ cnt = 0;
+ if (bc->bitrate < 50000)
+ cnt = 256;
+ else if (bc->bitrate < 100000)
+ cnt = 128;
+ while (cnt > 0 && stat & EPP_NREF) {
+ if (receive(dev, 1))
+ goto epptimeout;
+ cnt--;
+ if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
+ goto epptimeout;
+ }
+ }
+ GETTICK(time3);
+#ifdef BAYCOM_DEBUG
+ bc->debug_vals.mod_cycles = time2 - time1;
+ bc->debug_vals.demod_cycles = time3 - time2;
+#endif /* BAYCOM_DEBUG */
+ schedule_delayed_work(&bc->run_work, 1);
+ if (!bc->skb)
+ netif_wake_queue(dev);
+ return;
+ epptimeout:
+ printk(KERN_ERR "%s: EPP timeout!\n", bc_drvname);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * ===================== network driver interface =========================
+ */
+
+static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+
+ if (skb->data[0] != 0) {
+ do_kiss_params(bc, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ if (bc->skb)
+ return -1;
+ /* strip KISS byte */
+ if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ netif_stop_queue(dev);
+ bc->skb = skb;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = (struct sockaddr *)addr;
+
+ /* addr is an AX.25 shifted ASCII mac address */
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static struct net_device_stats *baycom_get_stats(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+
+ /*
+ * Get the current statistics. This may be called with the
+ * card open or closed.
+ */
+ return &bc->stats;
+}
+
+/* --------------------------------------------------------------------- */
+
+static void epp_wakeup(void *handle)
+{
+ struct net_device *dev = (struct net_device *)handle;
+ struct baycom_state *bc = netdev_priv(dev);
+
+ printk(KERN_DEBUG "baycom_epp: %s: why am I being woken up?\n", dev->name);
+ if (!parport_claim(bc->pdev))
+ printk(KERN_DEBUG "baycom_epp: %s: I'm broken.\n", dev->name);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+
+static int epp_open(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ struct parport *pp = parport_find_base(dev->base_addr);
+ unsigned int i, j;
+ unsigned char tmp[128];
+ unsigned char stat;
+ unsigned long tstart;
+
+ if (!pp) {
+ printk(KERN_ERR "%s: parport at 0x%lx unknown\n", bc_drvname, dev->base_addr);
+ return -ENXIO;
+ }
+#if 0
+ if (pp->irq < 0) {
+ printk(KERN_ERR "%s: parport at 0x%lx has no irq\n", bc_drvname, pp->base);
+ parport_put_port(pp);
+ return -ENXIO;
+ }
+#endif
+ if ((~pp->modes) & (PARPORT_MODE_TRISTATE | PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT)) {
+ printk(KERN_ERR "%s: parport at 0x%lx cannot be used\n",
+ bc_drvname, pp->base);
+ parport_put_port(pp);
+ return -EIO;
+ }
+ memset(&bc->modem, 0, sizeof(bc->modem));
+ bc->pdev = parport_register_device(pp, dev->name, NULL, epp_wakeup,
+ epp_interrupt, PARPORT_DEV_EXCL, dev);
+ parport_put_port(pp);
+ if (!bc->pdev) {
+ printk(KERN_ERR "%s: cannot register parport at 0x%lx\n", bc_drvname, pp->base);
+ return -ENXIO;
+ }
+ if (parport_claim(bc->pdev)) {
+ printk(KERN_ERR "%s: parport at 0x%lx busy\n", bc_drvname, pp->base);
+ parport_unregister_device(bc->pdev);
+ return -EBUSY;
+ }
+ dev->irq = /*pp->irq*/ 0;
+ INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev);
+ bc->work_running = 1;
+ bc->modem = EPP_CONVENTIONAL;
+ if (eppconfig(bc))
+ printk(KERN_INFO "%s: no FPGA detected, assuming conventional EPP modem\n", bc_drvname);
+ else
+ bc->modem = /*EPP_FPGA*/ EPP_FPGAEXTSTATUS;
+ parport_write_control(pp, LPTCTRL_PROGRAM); /* prepare EPP mode; we aren't using interrupts */
+ /* reset the modem */
+ tmp[0] = 0;
+ tmp[1] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE;
+ if (pp->ops->epp_write_addr(pp, tmp, 2, 0) != 2)
+ goto epptimeout;
+ /* autoprobe baud rate */
+ tstart = jiffies;
+ i = 0;
+ while ((signed)(jiffies-tstart-HZ/3) < 0) {
+ if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
+ goto epptimeout;
+ if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) {
+ schedule();
+ continue;
+ }
+ if (pp->ops->epp_read_data(pp, tmp, 128, 0) != 128)
+ goto epptimeout;
+ if (pp->ops->epp_read_data(pp, tmp, 128, 0) != 128)
+ goto epptimeout;
+ i += 256;
+ }
+ for (j = 0; j < 256; j++) {
+ if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
+ goto epptimeout;
+ if (!(stat & EPP_NREF))
+ break;
+ if (pp->ops->epp_read_data(pp, tmp, 1, 0) != 1)
+ goto epptimeout;
+ i++;
+ }
+ tstart = jiffies - tstart;
+ bc->bitrate = i * (8 * HZ) / tstart;
+ j = 1;
+ i = bc->bitrate >> 3;
+ while (j < 7 && i > 150) {
+ j++;
+ i >>= 1;
+ }
+ printk(KERN_INFO "%s: autoprobed bitrate: %d int divider: %d int rate: %d\n",
+ bc_drvname, bc->bitrate, j, bc->bitrate >> (j+2));
+ tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE/*|j*/;
+ if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
+ goto epptimeout;
+ /*
+ * initialise hdlc variables
+ */
+ bc->hdlcrx.state = 0;
+ bc->hdlcrx.numbits = 0;
+ bc->hdlctx.state = tx_idle;
+ bc->hdlctx.bufcnt = 0;
+ bc->hdlctx.slotcnt = bc->ch_params.slottime;
+ bc->hdlctx.calibrate = 0;
+ /* start the bottom half stuff */
+ schedule_delayed_work(&bc->run_work, 1);
+ netif_start_queue(dev);
+ return 0;
+
+ epptimeout:
+ printk(KERN_ERR "%s: epp timeout during bitrate probe\n", bc_drvname);
+ parport_write_control(pp, 0); /* reset the adapter */
+ parport_release(bc->pdev);
+ parport_unregister_device(bc->pdev);
+ return -EIO;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int epp_close(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ struct parport *pp = bc->pdev->port;
+ unsigned char tmp[1];
+
+ bc->work_running = 0;
+ flush_scheduled_work();
+ bc->stat = EPP_DCDBIT;
+ tmp[0] = 0;
+ pp->ops->epp_write_addr(pp, tmp, 1, 0);
+ parport_write_control(pp, 0); /* reset the adapter */
+ parport_release(bc->pdev);
+ parport_unregister_device(bc->pdev);
+ if (bc->skb)
+ dev_kfree_skb(bc->skb);
+ bc->skb = NULL;
+ printk(KERN_INFO "%s: close epp at iobase 0x%lx irq %u\n",
+ bc_drvname, dev->base_addr, dev->irq);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_setmode(struct baycom_state *bc, const char *modestr)
+{
+ const char *cp;
+
+ if (strstr(modestr,"intclk"))
+ bc->cfg.intclk = 1;
+ if (strstr(modestr,"extclk"))
+ bc->cfg.intclk = 0;
+ if (strstr(modestr,"intmodem"))
+ bc->cfg.extmodem = 0;
+ if (strstr(modestr,"extmodem"))
+ bc->cfg.extmodem = 1;
+ if (strstr(modestr,"noloopback"))
+ bc->cfg.loopback = 0;
+ if (strstr(modestr,"loopback"))
+ bc->cfg.loopback = 1;
+ if ((cp = strstr(modestr,"fclk="))) {
+ bc->cfg.fclk = simple_strtoul(cp+5, NULL, 0);
+ if (bc->cfg.fclk < 1000000)
+ bc->cfg.fclk = 1000000;
+ if (bc->cfg.fclk > 25000000)
+ bc->cfg.fclk = 25000000;
+ }
+ if ((cp = strstr(modestr,"bps="))) {
+ bc->cfg.bps = simple_strtoul(cp+4, NULL, 0);
+ if (bc->cfg.bps < 1000)
+ bc->cfg.bps = 1000;
+ if (bc->cfg.bps > 1500000)
+ bc->cfg.bps = 1500000;
+ }
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ struct hdlcdrv_ioctl hi;
+
+ if (cmd != SIOCDEVPRIVATE)
+ return -ENOIOCTLCMD;
+
+ if (copy_from_user(&hi, ifr->ifr_data, sizeof(hi)))
+ return -EFAULT;
+ switch (hi.cmd) {
+ default:
+ return -ENOIOCTLCMD;
+
+ case HDLCDRVCTL_GETCHANNELPAR:
+ hi.data.cp.tx_delay = bc->ch_params.tx_delay;
+ hi.data.cp.tx_tail = bc->ch_params.tx_tail;
+ hi.data.cp.slottime = bc->ch_params.slottime;
+ hi.data.cp.ppersist = bc->ch_params.ppersist;
+ hi.data.cp.fulldup = bc->ch_params.fulldup;
+ break;
+
+ case HDLCDRVCTL_SETCHANNELPAR:
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+ bc->ch_params.tx_delay = hi.data.cp.tx_delay;
+ bc->ch_params.tx_tail = hi.data.cp.tx_tail;
+ bc->ch_params.slottime = hi.data.cp.slottime;
+ bc->ch_params.ppersist = hi.data.cp.ppersist;
+ bc->ch_params.fulldup = hi.data.cp.fulldup;
+ bc->hdlctx.slotcnt = 1;
+ return 0;
+
+ case HDLCDRVCTL_GETMODEMPAR:
+ hi.data.mp.iobase = dev->base_addr;
+ hi.data.mp.irq = dev->irq;
+ hi.data.mp.dma = dev->dma;
+ hi.data.mp.dma2 = 0;
+ hi.data.mp.seriobase = 0;
+ hi.data.mp.pariobase = 0;
+ hi.data.mp.midiiobase = 0;
+ break;
+
+ case HDLCDRVCTL_SETMODEMPAR:
+ if ((!capable(CAP_SYS_RAWIO)) || netif_running(dev))
+ return -EACCES;
+ dev->base_addr = hi.data.mp.iobase;
+ dev->irq = /*hi.data.mp.irq*/0;
+ dev->dma = /*hi.data.mp.dma*/0;
+ return 0;
+
+ case HDLCDRVCTL_GETSTAT:
+ hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT);
+ hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT);
+ hi.data.cs.ptt_keyed = bc->ptt_keyed;
+ hi.data.cs.tx_packets = bc->stats.tx_packets;
+ hi.data.cs.tx_errors = bc->stats.tx_errors;
+ hi.data.cs.rx_packets = bc->stats.rx_packets;
+ hi.data.cs.rx_errors = bc->stats.rx_errors;
+ break;
+
+ case HDLCDRVCTL_OLDGETSTAT:
+ hi.data.ocs.ptt = !!(bc->stat & EPP_PTTBIT);
+ hi.data.ocs.dcd = !(bc->stat & EPP_DCDBIT);
+ hi.data.ocs.ptt_keyed = bc->ptt_keyed;
+ break;
+
+ case HDLCDRVCTL_CALIBRATE:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ bc->hdlctx.calibrate = hi.data.calibrate * bc->bitrate / 8;
+ return 0;
+
+ case HDLCDRVCTL_DRIVERNAME:
+ strncpy(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername));
+ break;
+
+ case HDLCDRVCTL_GETMODE:
+ sprintf(hi.data.modename, "%sclk,%smodem,fclk=%d,bps=%d%s",
+ bc->cfg.intclk ? "int" : "ext",
+ bc->cfg.extmodem ? "ext" : "int", bc->cfg.fclk, bc->cfg.bps,
+ bc->cfg.loopback ? ",loopback" : "");
+ break;
+
+ case HDLCDRVCTL_SETMODE:
+ if (!capable(CAP_NET_ADMIN) || netif_running(dev))
+ return -EACCES;
+ hi.data.modename[sizeof(hi.data.modename)-1] = '\0';
+ return baycom_setmode(bc, hi.data.modename);
+
+ case HDLCDRVCTL_MODELIST:
+ strncpy(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x",
+ sizeof(hi.data.modename));
+ break;
+
+ case HDLCDRVCTL_MODEMPARMASK:
+ return HDLCDRV_PARMASK_IOBASE;
+
+ }
+ if (copy_to_user(ifr->ifr_data, &hi, sizeof(hi)))
+ return -EFAULT;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Check for a network adaptor of this type, and return '0' if one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ * If dev->base_addr == 2, allocate space for the device and return success
+ * (detachable devices only).
+ */
+static void baycom_probe(struct net_device *dev)
+{
+ static char ax25_bcast[AX25_ADDR_LEN] = {
+ 'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1
+ };
+ static char ax25_nocall[AX25_ADDR_LEN] = {
+ 'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1
+ };
+ const struct hdlcdrv_channel_params dflt_ch_params = {
+ 20, 2, 10, 40, 0
+ };
+ struct baycom_state *bc;
+
+ /*
+ * not a real probe! only initialize data structures
+ */
+ bc = netdev_priv(dev);
+ /*
+ * initialize the baycom_state struct
+ */
+ bc->ch_params = dflt_ch_params;
+ bc->ptt_keyed = 0;
+
+ /*
+ * initialize the device struct
+ */
+ dev->open = epp_open;
+ dev->stop = epp_close;
+ dev->do_ioctl = baycom_ioctl;
+ dev->hard_start_xmit = baycom_send_packet;
+ dev->get_stats = baycom_get_stats;
+
+ /* Fill in the fields of the device structure */
+ bc->skb = NULL;
+
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ dev->hard_header = ax25_encapsulate;
+ dev->rebuild_header = ax25_rebuild_header;
+#else /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+ dev->hard_header = NULL;
+ dev->rebuild_header = NULL;
+#endif /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+ dev->set_mac_address = baycom_set_mac_address;
+
+ dev->type = ARPHRD_AX25; /* AF_AX25 device */
+ dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
+ dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
+ dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
+ memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
+ memcpy(dev->dev_addr, ax25_nocall, AX25_ADDR_LEN);
+ dev->tx_queue_len = 16;
+
+ /* New style flags */
+ dev->flags = 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * command line settable parameters
+ */
+static const char *mode[NR_PORTS] = { "", };
+static int iobase[NR_PORTS] = { 0x378, };
+
+module_param_array(mode, charp, NULL, 0);
+MODULE_PARM_DESC(mode, "baycom operating mode");
+module_param_array(iobase, int, NULL, 0);
+MODULE_PARM_DESC(iobase, "baycom io base address");
+
+MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
+MODULE_DESCRIPTION("Baycom epp amateur radio modem driver");
+MODULE_LICENSE("GPL");
+
+/* --------------------------------------------------------------------- */
+
+static void __init baycom_epp_dev_setup(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+
+ /*
+ * initialize part of the baycom_state struct
+ */
+ bc->magic = BAYCOM_MAGIC;
+ bc->cfg.fclk = 19666600;
+ bc->cfg.bps = 9600;
+ /*
+ * initialize part of the device struct
+ */
+ baycom_probe(dev);
+}
+
+static int __init init_baycomepp(void)
+{
+ int i, found = 0;
+ char set_hw = 1;
+
+ printk(bc_drvinfo);
+ /*
+ * register net devices
+ */
+ for (i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev;
+
+ dev = alloc_netdev(sizeof(struct baycom_state), "bce%d",
+ baycom_epp_dev_setup);
+
+ if (!dev) {
+ printk(KERN_WARNING "bce%d : out of memory\n", i);
+ return found ? 0 : -ENOMEM;
+ }
+
+ sprintf(dev->name, "bce%d", i);
+ dev->base_addr = iobase[i];
+
+ if (!mode[i])
+ set_hw = 0;
+ if (!set_hw)
+ iobase[i] = 0;
+
+ if (register_netdev(dev)) {
+ printk(KERN_WARNING "%s: cannot register net device %s\n", bc_drvname, dev->name);
+ free_netdev(dev);
+ break;
+ }
+ if (set_hw && baycom_setmode(netdev_priv(dev), mode[i]))
+ set_hw = 0;
+ baycom_device[i] = dev;
+ found++;
+ }
+
+ return found ? 0 : -ENXIO;
+}
+
+static void __exit cleanup_baycomepp(void)
+{
+ int i;
+
+ for(i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev = baycom_device[i];
+
+ if (dev) {
+ struct baycom_state *bc = netdev_priv(dev);
+ if (bc->magic == BAYCOM_MAGIC) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ } else
+ printk(paranoia_str, "cleanup_module");
+ }
+ }
+}
+
+module_init(init_baycomepp);
+module_exit(cleanup_baycomepp);
+
+/* --------------------------------------------------------------------- */
+
+#ifndef MODULE
+
+/*
+ * format: baycom_epp=io,mode
+ * mode: fpga config options
+ */
+
+static int __init baycom_epp_setup(char *str)
+{
+ static unsigned __initdata nr_dev = 0;
+ int ints[2];
+
+ if (nr_dev >= NR_PORTS)
+ return 0;
+ str = get_options(str, 2, ints);
+ if (ints[0] < 1)
+ return 0;
+ mode[nr_dev] = str;
+ iobase[nr_dev] = ints[1];
+ nr_dev++;
+ return 1;
+}
+
+__setup("baycom_epp=", baycom_epp_setup);
+
+#endif /* MODULE */
+/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
new file mode 100644
index 000000000000..612ad452bee0
--- /dev/null
+++ b/drivers/net/hamradio/baycom_par.c
@@ -0,0 +1,576 @@
+/*****************************************************************************/
+
+/*
+ * baycom_par.c -- baycom par96 and picpar radio modem driver.
+ *
+ * Copyright (C) 1996-2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Please note that the GPL allows you to use the driver, NOT the radio.
+ * In order to use the radio, you need a license from the communications
+ * authority of your country.
+ *
+ *
+ * Supported modems
+ *
+ * par96: This is a modem for 9600 baud FSK compatible to the G3RUH standard.
+ * The modem does all the filtering and regenerates the receiver clock.
+ * Data is transferred from and to the PC via a shift register.
+ * The shift register is filled with 16 bits and an interrupt is
+ * signalled. The PC then empties the shift register in a burst. This
+ * modem connects to the parallel port, hence the name. The modem
+ * leaves the implementation of the HDLC protocol and the scrambler
+ * polynomial to the PC. This modem is no longer available (at least
+ * from Baycom) and has been replaced by the PICPAR modem (see below).
+ * You may however still build one from the schematics published in
+ * cq-DL :-).
+ *
+ * picpar: This is a redesign of the par96 modem by Henning Rech, DF9IC. The
+ * modem is protocol compatible to par96, but uses only three low
+ * power ICs and can therefore be fed from the parallel port and
+ * does not require an additional power supply. It features
+ * built in DCD circuitry. The driver should therefore be configured
+ * for hardware DCD.
+ *
+ *
+ * Command line options (insmod command line)
+ *
+ * mode driver mode string. Valid choices are par96 and picpar.
+ * iobase base address of the port; common values are 0x378, 0x278, 0x3bc
+ *
+ *
+ * History:
+ * 0.1 26.06.1996 Adapted from baycom.c and made network driver interface
+ * 18.10.1996 Changed to new user space access routines (copy_{to,from}_user)
+ * 0.3 26.04.1997 init code/data tagged
+ * 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints)
+ * 0.5 11.11.1997 split into separate files for ser12/par96
+ * 0.6 03.08.1999 adapt to Linus' new __setup/__initcall
+ * removed some pre-2.2 kernel compatibility cruft
+ * 0.7 10.08.1999 Check if parport can do SPP and is safe to access during interrupt contexts
+ * 0.8 12.02.2000 adapted to softnet driver interface
+ * removed direct parport access, uses parport driver methods
+ * 0.9 03.07.2000 fix interface name handling
+ */
+
+/*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/hdlcdrv.h>
+#include <linux/baycom.h>
+#include <linux/parport.h>
+#include <linux/bitops.h>
+
+#include <asm/bug.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/* --------------------------------------------------------------------- */
+
+#define BAYCOM_DEBUG
+
+/*
+ * modem options; bit mask
+ */
+#define BAYCOM_OPTIONS_SOFTDCD 1
+
+/* --------------------------------------------------------------------- */
+
+static const char bc_drvname[] = "baycom_par";
+static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
+KERN_INFO "baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n";
+
+/* --------------------------------------------------------------------- */
+
+#define NR_PORTS 4
+
+static struct net_device *baycom_device[NR_PORTS];
+
+/* --------------------------------------------------------------------- */
+
+#define PAR96_BURSTBITS 16
+#define PAR96_BURST 4
+#define PAR96_PTT 2
+#define PAR96_TXBIT 1
+#define PAR96_ACK 0x40
+#define PAR96_RXBIT 0x20
+#define PAR96_DCD 0x10
+#define PAR97_POWER 0xf8
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Information that need to be kept for each board.
+ */
+
+struct baycom_state {
+ struct hdlcdrv_state hdrv;
+
+ struct pardevice *pdev;
+ unsigned int options;
+
+ struct modem_state {
+ short arb_divider;
+ unsigned char flags;
+ unsigned int shreg;
+ struct modem_state_par96 {
+ int dcd_count;
+ unsigned int dcd_shreg;
+ unsigned long descram;
+ unsigned long scram;
+ } par96;
+ } modem;
+
+#ifdef BAYCOM_DEBUG
+ struct debug_vals {
+ unsigned long last_jiffies;
+ unsigned cur_intcnt;
+ unsigned last_intcnt;
+ int cur_pllcorr;
+ int last_pllcorr;
+ } debug_vals;
+#endif /* BAYCOM_DEBUG */
+};
+
+/* --------------------------------------------------------------------- */
+
+static void __inline__ baycom_int_freq(struct baycom_state *bc)
+{
+#ifdef BAYCOM_DEBUG
+ unsigned long cur_jiffies = jiffies;
+ /*
+ * measure the interrupt frequency
+ */
+ bc->debug_vals.cur_intcnt++;
+ if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ bc->debug_vals.last_jiffies = cur_jiffies;
+ bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
+ bc->debug_vals.cur_intcnt = 0;
+ bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
+ bc->debug_vals.cur_pllcorr = 0;
+ }
+#endif /* BAYCOM_DEBUG */
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * ===================== PAR96 specific routines =========================
+ */
+
+#define PAR96_DESCRAM_TAP1 0x20000
+#define PAR96_DESCRAM_TAP2 0x01000
+#define PAR96_DESCRAM_TAP3 0x00001
+
+#define PAR96_DESCRAM_TAPSH1 17
+#define PAR96_DESCRAM_TAPSH2 12
+#define PAR96_DESCRAM_TAPSH3 0
+
+#define PAR96_SCRAM_TAP1 0x20000 /* X^17 */
+#define PAR96_SCRAM_TAPN 0x00021 /* X^0+X^5 */
+
+/* --------------------------------------------------------------------- */
+
+static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc)
+{
+ int i;
+ unsigned int data = hdlcdrv_getbits(&bc->hdrv);
+ struct parport *pp = bc->pdev->port;
+
+ for(i = 0; i < PAR96_BURSTBITS; i++, data >>= 1) {
+ unsigned char val = PAR97_POWER;
+ bc->modem.par96.scram = ((bc->modem.par96.scram << 1) |
+ (bc->modem.par96.scram & 1));
+ if (!(data & 1))
+ bc->modem.par96.scram ^= 1;
+ if (bc->modem.par96.scram & (PAR96_SCRAM_TAP1 << 1))
+ bc->modem.par96.scram ^=
+ (PAR96_SCRAM_TAPN << 1);
+ if (bc->modem.par96.scram & (PAR96_SCRAM_TAP1 << 2))
+ val |= PAR96_TXBIT;
+ pp->ops->write_data(pp, val);
+ pp->ops->write_data(pp, val | PAR96_BURST);
+ }
+}
+
+/* --------------------------------------------------------------------- */
+
+static __inline__ void par96_rx(struct net_device *dev, struct baycom_state *bc)
+{
+ int i;
+ unsigned int data, mask, mask2, descx;
+ struct parport *pp = bc->pdev->port;
+
+ /*
+ * do receiver; differential decode and descramble on the fly
+ */
+ for(data = i = 0; i < PAR96_BURSTBITS; i++) {
+ bc->modem.par96.descram = (bc->modem.par96.descram << 1);
+ if (pp->ops->read_status(pp) & PAR96_RXBIT)
+ bc->modem.par96.descram |= 1;
+ descx = bc->modem.par96.descram ^
+ (bc->modem.par96.descram >> 1);
+ /* now the diff decoded data is inverted in descram */
+ pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT);
+ descx ^= ((descx >> PAR96_DESCRAM_TAPSH1) ^
+ (descx >> PAR96_DESCRAM_TAPSH2));
+ data >>= 1;
+ if (!(descx & 1))
+ data |= 0x8000;
+ pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT | PAR96_BURST);
+ }
+ hdlcdrv_putbits(&bc->hdrv, data);
+ /*
+ * do DCD algorithm
+ */
+ if (bc->options & BAYCOM_OPTIONS_SOFTDCD) {
+ bc->modem.par96.dcd_shreg = (bc->modem.par96.dcd_shreg >> 16)
+ | (data << 16);
+ /* search for flags and set the dcd counter appropriately */
+ for(mask = 0x1fe00, mask2 = 0xfc00, i = 0;
+ i < PAR96_BURSTBITS; i++, mask <<= 1, mask2 <<= 1)
+ if ((bc->modem.par96.dcd_shreg & mask) == mask2)
+ bc->modem.par96.dcd_count = HDLCDRV_MAXFLEN+4;
+ /* check for abort/noise sequences */
+ for(mask = 0x1fe00, mask2 = 0x1fe00, i = 0;
+ i < PAR96_BURSTBITS; i++, mask <<= 1, mask2 <<= 1)
+ if (((bc->modem.par96.dcd_shreg & mask) == mask2) &&
+ (bc->modem.par96.dcd_count >= 0))
+ bc->modem.par96.dcd_count -= HDLCDRV_MAXFLEN-10;
+ /* decrement and set the dcd variable */
+ if (bc->modem.par96.dcd_count >= 0)
+ bc->modem.par96.dcd_count -= 2;
+ hdlcdrv_setdcd(&bc->hdrv, bc->modem.par96.dcd_count > 0);
+ } else {
+ hdlcdrv_setdcd(&bc->hdrv, !!(pp->ops->read_status(pp) & PAR96_DCD));
+ }
+}
+
+/* --------------------------------------------------------------------- */
+
+static void par96_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct baycom_state *bc = netdev_priv(dev);
+
+ baycom_int_freq(bc);
+ /*
+ * check if transmitter active
+ */
+ if (hdlcdrv_ptt(&bc->hdrv))
+ par96_tx(dev, bc);
+ else {
+ par96_rx(dev, bc);
+ if (--bc->modem.arb_divider <= 0) {
+ bc->modem.arb_divider = 6;
+ local_irq_enable();
+ hdlcdrv_arbitrate(dev, &bc->hdrv);
+ }
+ }
+ local_irq_enable();
+ hdlcdrv_transmitter(dev, &bc->hdrv);
+ hdlcdrv_receiver(dev, &bc->hdrv);
+ local_irq_disable();
+}
+
+/* --------------------------------------------------------------------- */
+
+static void par96_wakeup(void *handle)
+{
+ struct net_device *dev = (struct net_device *)handle;
+ struct baycom_state *bc = netdev_priv(dev);
+
+ printk(KERN_DEBUG "baycom_par: %s: why am I being woken up?\n", dev->name);
+ if (!parport_claim(bc->pdev))
+ printk(KERN_DEBUG "baycom_par: %s: I'm broken.\n", dev->name);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int par96_open(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ struct parport *pp;
+
+ if (!dev || !bc)
+ return -ENXIO;
+ pp = parport_find_base(dev->base_addr);
+ if (!pp) {
+ printk(KERN_ERR "baycom_par: parport at 0x%lx unknown\n", dev->base_addr);
+ return -ENXIO;
+ }
+ if (pp->irq < 0) {
+ printk(KERN_ERR "baycom_par: parport at 0x%lx has no irq\n", pp->base);
+ parport_put_port(pp);
+ return -ENXIO;
+ }
+ if ((~pp->modes) & (PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT)) {
+ printk(KERN_ERR "baycom_par: parport at 0x%lx cannot be used\n", pp->base);
+ parport_put_port(pp);
+ return -ENXIO;
+ }
+ memset(&bc->modem, 0, sizeof(bc->modem));
+ bc->hdrv.par.bitrate = 9600;
+ bc->pdev = parport_register_device(pp, dev->name, NULL, par96_wakeup,
+ par96_interrupt, PARPORT_DEV_EXCL, dev);
+ parport_put_port(pp);
+ if (!bc->pdev) {
+ printk(KERN_ERR "baycom_par: cannot register parport at 0x%lx\n", dev->base_addr);
+ return -ENXIO;
+ }
+ if (parport_claim(bc->pdev)) {
+ printk(KERN_ERR "baycom_par: parport at 0x%lx busy\n", pp->base);
+ parport_unregister_device(bc->pdev);
+ return -EBUSY;
+ }
+ pp = bc->pdev->port;
+ dev->irq = pp->irq;
+ pp->ops->data_forward(pp);
+ bc->hdrv.par.bitrate = 9600;
+ pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER); /* switch off PTT */
+ pp->ops->enable_irq(pp);
+ printk(KERN_INFO "%s: par96 at iobase 0x%lx irq %u options 0x%x\n",
+ bc_drvname, dev->base_addr, dev->irq, bc->options);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int par96_close(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ struct parport *pp;
+
+ if (!dev || !bc)
+ return -EINVAL;
+ pp = bc->pdev->port;
+ /* disable interrupt */
+ pp->ops->disable_irq(pp);
+ /* switch off PTT */
+ pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER);
+ parport_release(bc->pdev);
+ parport_unregister_device(bc->pdev);
+ printk(KERN_INFO "%s: close par96 at iobase 0x%lx irq %u\n",
+ bc_drvname, dev->base_addr, dev->irq);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * ===================== hdlcdrv driver interface =========================
+ */
+
+static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct hdlcdrv_ioctl *hi, int cmd);
+
+/* --------------------------------------------------------------------- */
+
+static struct hdlcdrv_ops par96_ops = {
+ .drvname = bc_drvname,
+ .drvinfo = bc_drvinfo,
+ .open = par96_open,
+ .close = par96_close,
+ .ioctl = baycom_ioctl
+};
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_setmode(struct baycom_state *bc, const char *modestr)
+{
+ if (!strncmp(modestr, "picpar", 6))
+ bc->options = 0;
+ else if (!strncmp(modestr, "par96", 5))
+ bc->options = BAYCOM_OPTIONS_SOFTDCD;
+ else
+ bc->options = !!strchr(modestr, '*');
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct hdlcdrv_ioctl *hi, int cmd)
+{
+ struct baycom_state *bc;
+ struct baycom_ioctl bi;
+
+ if (!dev)
+ return -EINVAL;
+
+ bc = netdev_priv(dev);
+ BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC);
+
+ if (cmd != SIOCDEVPRIVATE)
+ return -ENOIOCTLCMD;
+ switch (hi->cmd) {
+ default:
+ break;
+
+ case HDLCDRVCTL_GETMODE:
+ strcpy(hi->data.modename, bc->options ? "par96" : "picpar");
+ if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ return -EFAULT;
+ return 0;
+
+ case HDLCDRVCTL_SETMODE:
+ if (netif_running(dev) || !capable(CAP_NET_ADMIN))
+ return -EACCES;
+ hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
+ return baycom_setmode(bc, hi->data.modename);
+
+ case HDLCDRVCTL_MODELIST:
+ strcpy(hi->data.modename, "par96,picpar");
+ if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ return -EFAULT;
+ return 0;
+
+ case HDLCDRVCTL_MODEMPARMASK:
+ return HDLCDRV_PARMASK_IOBASE;
+
+ }
+
+ if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ return -EFAULT;
+ switch (bi.cmd) {
+ default:
+ return -ENOIOCTLCMD;
+
+#ifdef BAYCOM_DEBUG
+ case BAYCOMCTL_GETDEBUG:
+ bi.data.dbg.debug1 = bc->hdrv.ptt_keyed;
+ bi.data.dbg.debug2 = bc->debug_vals.last_intcnt;
+ bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr;
+ break;
+#endif /* BAYCOM_DEBUG */
+
+ }
+ if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ return -EFAULT;
+ return 0;
+
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * command line settable parameters
+ */
+static const char *mode[NR_PORTS] = { "picpar", };
+static int iobase[NR_PORTS] = { 0x378, };
+
+module_param_array(mode, charp, NULL, 0);
+MODULE_PARM_DESC(mode, "baycom operating mode; eg. par96 or picpar");
+module_param_array(iobase, int, NULL, 0);
+MODULE_PARM_DESC(iobase, "baycom io base address");
+
+MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
+MODULE_DESCRIPTION("Baycom par96 and picpar amateur radio modem driver");
+MODULE_LICENSE("GPL");
+
+/* --------------------------------------------------------------------- */
+
+static int __init init_baycompar(void)
+{
+ int i, found = 0;
+ char set_hw = 1;
+
+ printk(bc_drvinfo);
+ /*
+ * register net devices
+ */
+ for (i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev;
+ struct baycom_state *bc;
+ char ifname[IFNAMSIZ];
+
+ sprintf(ifname, "bcp%d", i);
+
+ if (!mode[i])
+ set_hw = 0;
+ if (!set_hw)
+ iobase[i] = 0;
+
+ dev = hdlcdrv_register(&par96_ops,
+ sizeof(struct baycom_state),
+ ifname, iobase[i], 0, 0);
+ if (IS_ERR(dev))
+ break;
+
+ bc = netdev_priv(dev);
+ if (set_hw && baycom_setmode(bc, mode[i]))
+ set_hw = 0;
+ found++;
+ baycom_device[i] = dev;
+ }
+
+ if (!found)
+ return -ENXIO;
+ return 0;
+}
+
+static void __exit cleanup_baycompar(void)
+{
+ int i;
+
+ for(i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev = baycom_device[i];
+
+ if (dev)
+ hdlcdrv_unregister(dev);
+ }
+}
+
+module_init(init_baycompar);
+module_exit(cleanup_baycompar);
+
+/* --------------------------------------------------------------------- */
+
+#ifndef MODULE
+
+/*
+ * format: baycom_par=io,mode
+ * mode: par96,picpar
+ */
+
+static int __init baycom_par_setup(char *str)
+{
+ static unsigned nr_dev;
+ int ints[2];
+
+ if (nr_dev >= NR_PORTS)
+ return 0;
+ str = get_options(str, 2, ints);
+ if (ints[0] < 1)
+ return 0;
+ mode[nr_dev] = str;
+ iobase[nr_dev] = ints[1];
+ nr_dev++;
+ return 1;
+}
+
+__setup("baycom_par=", baycom_par_setup);
+
+#endif /* MODULE */
+/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
new file mode 100644
index 000000000000..25f270b05378
--- /dev/null
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -0,0 +1,704 @@
+/*****************************************************************************/
+
+/*
+ * baycom_ser_fdx.c -- baycom ser12 fullduplex radio modem driver.
+ *
+ * Copyright (C) 1996-2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Please note that the GPL allows you to use the driver, NOT the radio.
+ * In order to use the radio, you need a license from the communications
+ * authority of your country.
+ *
+ *
+ * Supported modems
+ *
+ * ser12: This is a very simple 1200 baud AFSK modem. The modem consists only
+ * of a modulator/demodulator chip, usually a TI TCM3105. The computer
+ * is responsible for regenerating the receiver bit clock, as well as
+ * for handling the HDLC protocol. The modem connects to a serial port,
+ * hence the name. Since the serial port is not used as an async serial
+ * port, the kernel driver for serial ports cannot be used, and this
+ * driver only supports standard serial hardware (8250, 16450, 16550A)
+ *
+ * This modem usually draws its supply current out of the otherwise unused
+ * TXD pin of the serial port. Thus a contignuous stream of 0x00-bytes
+ * is transmitted to achieve a positive supply voltage.
+ *
+ * hsk: This is a 4800 baud FSK modem, designed for TNC use. It works fine
+ * in 'baycom-mode' :-) In contrast to the TCM3105 modem, power is
+ * externally supplied. So there's no need to provide the 0x00-byte-stream
+ * when receiving or idle, which drastically reduces interrupt load.
+ *
+ * Command line options (insmod command line)
+ *
+ * mode ser# hardware DCD
+ * ser#* software DCD
+ * ser#+ hardware DCD, inverted signal at DCD pin
+ * '#' denotes the baud rate / 100, eg. ser12* is '1200 baud, soft DCD'
+ * iobase base address of the port; common values are 0x3f8, 0x2f8, 0x3e8, 0x2e8
+ * baud baud rate (between 300 and 4800)
+ * irq interrupt line of the port; common values are 4,3
+ *
+ *
+ * History:
+ * 0.1 26.06.1996 Adapted from baycom.c and made network driver interface
+ * 18.10.1996 Changed to new user space access routines (copy_{to,from}_user)
+ * 0.3 26.04.1997 init code/data tagged
+ * 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints)
+ * 0.5 11.11.1997 ser12/par96 split into separate files
+ * 0.6 24.01.1998 Thorsten Kranzkowski, dl8bcu and Thomas Sailer:
+ * reduced interrupt load in transmit case
+ * reworked receiver
+ * 0.7 03.08.1999 adapt to Linus' new __setup/__initcall
+ * 0.8 10.08.1999 use module_init/module_exit
+ * 0.9 12.02.2000 adapted to softnet driver interface
+ * 0.10 03.07.2000 fix interface name handling
+ */
+
+/*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <linux/hdlcdrv.h>
+#include <linux/baycom.h>
+
+/* --------------------------------------------------------------------- */
+
+#define BAYCOM_DEBUG
+
+/* --------------------------------------------------------------------- */
+
+static const char bc_drvname[] = "baycom_ser_fdx";
+static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
+KERN_INFO "baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
+
+/* --------------------------------------------------------------------- */
+
+#define NR_PORTS 4
+
+static struct net_device *baycom_device[NR_PORTS];
+
+/* --------------------------------------------------------------------- */
+
+#define RBR(iobase) (iobase+0)
+#define THR(iobase) (iobase+0)
+#define IER(iobase) (iobase+1)
+#define IIR(iobase) (iobase+2)
+#define FCR(iobase) (iobase+2)
+#define LCR(iobase) (iobase+3)
+#define MCR(iobase) (iobase+4)
+#define LSR(iobase) (iobase+5)
+#define MSR(iobase) (iobase+6)
+#define SCR(iobase) (iobase+7)
+#define DLL(iobase) (iobase+0)
+#define DLM(iobase) (iobase+1)
+
+#define SER12_EXTENT 8
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Information that need to be kept for each board.
+ */
+
+struct baycom_state {
+ struct hdlcdrv_state hdrv;
+
+ unsigned int baud, baud_us, baud_arbdiv, baud_uartdiv, baud_dcdtimeout;
+ int opt_dcd;
+
+ struct modem_state {
+ unsigned char flags;
+ unsigned char ptt;
+ unsigned int shreg;
+ struct modem_state_ser12 {
+ unsigned char tx_bit;
+ unsigned char last_rxbit;
+ int dcd_sum0, dcd_sum1, dcd_sum2;
+ int dcd_time;
+ unsigned int pll_time;
+ unsigned int txshreg;
+ } ser12;
+ } modem;
+
+#ifdef BAYCOM_DEBUG
+ struct debug_vals {
+ unsigned long last_jiffies;
+ unsigned cur_intcnt;
+ unsigned last_intcnt;
+ int cur_pllcorr;
+ int last_pllcorr;
+ } debug_vals;
+#endif /* BAYCOM_DEBUG */
+};
+
+/* --------------------------------------------------------------------- */
+
+static inline void baycom_int_freq(struct baycom_state *bc)
+{
+#ifdef BAYCOM_DEBUG
+ unsigned long cur_jiffies = jiffies;
+ /*
+ * measure the interrupt frequency
+ */
+ bc->debug_vals.cur_intcnt++;
+ if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ bc->debug_vals.last_jiffies = cur_jiffies;
+ bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
+ bc->debug_vals.cur_intcnt = 0;
+ bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
+ bc->debug_vals.cur_pllcorr = 0;
+ }
+#endif /* BAYCOM_DEBUG */
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * ===================== SER12 specific routines =========================
+ */
+
+/* --------------------------------------------------------------------- */
+
+static inline void ser12_set_divisor(struct net_device *dev,
+ unsigned int divisor)
+{
+ outb(0x81, LCR(dev->base_addr)); /* DLAB = 1 */
+ outb(divisor, DLL(dev->base_addr));
+ outb(divisor >> 8, DLM(dev->base_addr));
+ outb(0x01, LCR(dev->base_addr)); /* word length = 6 */
+ /*
+ * make sure the next interrupt is generated;
+ * 0 must be used to power the modem; the modem draws its
+ * power from the TxD line
+ */
+ outb(0x00, THR(dev->base_addr));
+ /*
+ * it is important not to set the divider while transmitting;
+ * this reportedly makes some UARTs generating interrupts
+ * in the hundredthousands per second region
+ * Reported by: Ignacio.Arenaza@studi.epfl.ch (Ignacio Arenaza Nuno)
+ */
+}
+
+/* --------------------------------------------------------------------- */
+
+#if 0
+static inline unsigned int hweight16(unsigned int w)
+ __attribute__ ((unused));
+static inline unsigned int hweight8(unsigned int w)
+ __attribute__ ((unused));
+
+static inline unsigned int hweight16(unsigned int w)
+{
+ unsigned short res = (w & 0x5555) + ((w >> 1) & 0x5555);
+ res = (res & 0x3333) + ((res >> 2) & 0x3333);
+ res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
+ return (res & 0x00FF) + ((res >> 8) & 0x00FF);
+}
+
+static inline unsigned int hweight8(unsigned int w)
+{
+ unsigned short res = (w & 0x55) + ((w >> 1) & 0x55);
+ res = (res & 0x33) + ((res >> 2) & 0x33);
+ return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timeval *tv, unsigned char curs)
+{
+ int timediff;
+ int bdus8 = bc->baud_us >> 3;
+ int bdus4 = bc->baud_us >> 2;
+ int bdus2 = bc->baud_us >> 1;
+
+ timediff = 1000000 + tv->tv_usec - bc->modem.ser12.pll_time;
+ while (timediff >= 500000)
+ timediff -= 1000000;
+ while (timediff >= bdus2) {
+ timediff -= bc->baud_us;
+ bc->modem.ser12.pll_time += bc->baud_us;
+ bc->modem.ser12.dcd_time--;
+ /* first check if there is room to add a bit */
+ if (bc->modem.shreg & 1) {
+ hdlcdrv_putbits(&bc->hdrv, (bc->modem.shreg >> 1) ^ 0xffff);
+ bc->modem.shreg = 0x10000;
+ }
+ /* add a one bit */
+ bc->modem.shreg >>= 1;
+ }
+ if (bc->modem.ser12.dcd_time <= 0) {
+ if (!bc->opt_dcd)
+ hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 +
+ bc->modem.ser12.dcd_sum1 +
+ bc->modem.ser12.dcd_sum2) < 0);
+ bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1;
+ bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0;
+ bc->modem.ser12.dcd_sum0 = 2; /* slight bias */
+ bc->modem.ser12.dcd_time += 120;
+ }
+ if (bc->modem.ser12.last_rxbit != curs) {
+ bc->modem.ser12.last_rxbit = curs;
+ bc->modem.shreg |= 0x10000;
+ /* adjust the PLL */
+ if (timediff > 0)
+ bc->modem.ser12.pll_time += bdus8;
+ else
+ bc->modem.ser12.pll_time += 1000000 - bdus8;
+ /* update DCD */
+ if (abs(timediff) > bdus4)
+ bc->modem.ser12.dcd_sum0 += 4;
+ else
+ bc->modem.ser12.dcd_sum0--;
+#ifdef BAYCOM_DEBUG
+ bc->debug_vals.cur_pllcorr = timediff;
+#endif /* BAYCOM_DEBUG */
+ }
+ while (bc->modem.ser12.pll_time >= 1000000)
+ bc->modem.ser12.pll_time -= 1000000;
+}
+
+/* --------------------------------------------------------------------- */
+
+static irqreturn_t ser12_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct baycom_state *bc = netdev_priv(dev);
+ struct timeval tv;
+ unsigned char iir, msr;
+ unsigned int txcount = 0;
+
+ if (!bc || bc->hdrv.magic != HDLCDRV_MAGIC)
+ return IRQ_NONE;
+ /* fast way out for shared irq */
+ if ((iir = inb(IIR(dev->base_addr))) & 1)
+ return IRQ_NONE;
+ /* get current time */
+ do_gettimeofday(&tv);
+ msr = inb(MSR(dev->base_addr));
+ /* delta DCD */
+ if ((msr & 8) && bc->opt_dcd)
+ hdlcdrv_setdcd(&bc->hdrv, !((msr ^ bc->opt_dcd) & 0x80));
+ do {
+ switch (iir & 6) {
+ case 6:
+ inb(LSR(dev->base_addr));
+ break;
+
+ case 4:
+ inb(RBR(dev->base_addr));
+ break;
+
+ case 2:
+ /*
+ * make sure the next interrupt is generated;
+ * 0 must be used to power the modem; the modem draws its
+ * power from the TxD line
+ */
+ outb(0x00, THR(dev->base_addr));
+ baycom_int_freq(bc);
+ txcount++;
+ /*
+ * first output the last bit (!) then call HDLC transmitter,
+ * since this may take quite long
+ */
+ if (bc->modem.ptt)
+ outb(0x0e | (!!bc->modem.ser12.tx_bit), MCR(dev->base_addr));
+ else
+ outb(0x0d, MCR(dev->base_addr)); /* transmitter off */
+ break;
+
+ default:
+ msr = inb(MSR(dev->base_addr));
+ /* delta DCD */
+ if ((msr & 8) && bc->opt_dcd)
+ hdlcdrv_setdcd(&bc->hdrv, !((msr ^ bc->opt_dcd) & 0x80));
+ break;
+ }
+ iir = inb(IIR(dev->base_addr));
+ } while (!(iir & 1));
+ ser12_rx(dev, bc, &tv, msr & 0x10); /* CTS */
+ if (bc->modem.ptt && txcount) {
+ if (bc->modem.ser12.txshreg <= 1) {
+ bc->modem.ser12.txshreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv);
+ if (!hdlcdrv_ptt(&bc->hdrv)) {
+ ser12_set_divisor(dev, 115200/100/8);
+ bc->modem.ptt = 0;
+ goto end_transmit;
+ }
+ }
+ bc->modem.ser12.tx_bit = !(bc->modem.ser12.tx_bit ^ (bc->modem.ser12.txshreg & 1));
+ bc->modem.ser12.txshreg >>= 1;
+ }
+ end_transmit:
+ local_irq_enable();
+ if (!bc->modem.ptt && txcount) {
+ hdlcdrv_arbitrate(dev, &bc->hdrv);
+ if (hdlcdrv_ptt(&bc->hdrv)) {
+ ser12_set_divisor(dev, bc->baud_uartdiv);
+ bc->modem.ser12.txshreg = 1;
+ bc->modem.ptt = 1;
+ }
+ }
+ hdlcdrv_transmitter(dev, &bc->hdrv);
+ hdlcdrv_receiver(dev, &bc->hdrv);
+ local_irq_disable();
+ return IRQ_HANDLED;
+}
+
+/* --------------------------------------------------------------------- */
+
+enum uart { c_uart_unknown, c_uart_8250,
+ c_uart_16450, c_uart_16550, c_uart_16550A};
+static const char *uart_str[] = {
+ "unknown", "8250", "16450", "16550", "16550A"
+};
+
+static enum uart ser12_check_uart(unsigned int iobase)
+{
+ unsigned char b1,b2,b3;
+ enum uart u;
+ enum uart uart_tab[] =
+ { c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A };
+
+ b1 = inb(MCR(iobase));
+ outb(b1 | 0x10, MCR(iobase)); /* loopback mode */
+ b2 = inb(MSR(iobase));
+ outb(0x1a, MCR(iobase));
+ b3 = inb(MSR(iobase)) & 0xf0;
+ outb(b1, MCR(iobase)); /* restore old values */
+ outb(b2, MSR(iobase));
+ if (b3 != 0x90)
+ return c_uart_unknown;
+ inb(RBR(iobase));
+ inb(RBR(iobase));
+ outb(0x01, FCR(iobase)); /* enable FIFOs */
+ u = uart_tab[(inb(IIR(iobase)) >> 6) & 3];
+ if (u == c_uart_16450) {
+ outb(0x5a, SCR(iobase));
+ b1 = inb(SCR(iobase));
+ outb(0xa5, SCR(iobase));
+ b2 = inb(SCR(iobase));
+ if ((b1 != 0x5a) || (b2 != 0xa5))
+ u = c_uart_8250;
+ }
+ return u;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int ser12_open(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ enum uart u;
+
+ if (!dev || !bc)
+ return -ENXIO;
+ if (!dev->base_addr || dev->base_addr > 0x1000-SER12_EXTENT ||
+ dev->irq < 2 || dev->irq > 15)
+ return -ENXIO;
+ if (bc->baud < 300 || bc->baud > 4800)
+ return -EINVAL;
+ if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
+ printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n",
+ dev->base_addr);
+ return -EACCES;
+ }
+ memset(&bc->modem, 0, sizeof(bc->modem));
+ bc->hdrv.par.bitrate = bc->baud;
+ bc->baud_us = 1000000/bc->baud;
+ bc->baud_uartdiv = (115200/8)/bc->baud;
+ if ((u = ser12_check_uart(dev->base_addr)) == c_uart_unknown){
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EIO;
+ }
+ outb(0, FCR(dev->base_addr)); /* disable FIFOs */
+ outb(0x0d, MCR(dev->base_addr));
+ outb(0, IER(dev->base_addr));
+ if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ,
+ "baycom_ser_fdx", dev)) {
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EBUSY;
+ }
+ /*
+ * set the SIO to 6 Bits/character; during receive,
+ * the baud rate is set to produce 100 ints/sec
+ * to feed the channel arbitration process,
+ * during transmit to baud ints/sec to run
+ * the transmitter
+ */
+ ser12_set_divisor(dev, 115200/100/8);
+ /*
+ * enable transmitter empty interrupt and modem status interrupt
+ */
+ outb(0x0a, IER(dev->base_addr));
+ /*
+ * make sure the next interrupt is generated;
+ * 0 must be used to power the modem; the modem draws its
+ * power from the TxD line
+ */
+ outb(0x00, THR(dev->base_addr));
+ hdlcdrv_setdcd(&bc->hdrv, 0);
+ printk(KERN_INFO "%s: ser_fdx at iobase 0x%lx irq %u baud %u uart %s\n",
+ bc_drvname, dev->base_addr, dev->irq, bc->baud, uart_str[u]);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int ser12_close(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+
+ if (!dev || !bc)
+ return -EINVAL;
+ /*
+ * disable interrupts
+ */
+ outb(0, IER(dev->base_addr));
+ outb(1, MCR(dev->base_addr));
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, SER12_EXTENT);
+ printk(KERN_INFO "%s: close ser_fdx at iobase 0x%lx irq %u\n",
+ bc_drvname, dev->base_addr, dev->irq);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * ===================== hdlcdrv driver interface =========================
+ */
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct hdlcdrv_ioctl *hi, int cmd);
+
+/* --------------------------------------------------------------------- */
+
+static struct hdlcdrv_ops ser12_ops = {
+ .drvname = bc_drvname,
+ .drvinfo = bc_drvinfo,
+ .open = ser12_open,
+ .close = ser12_close,
+ .ioctl = baycom_ioctl,
+};
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_setmode(struct baycom_state *bc, const char *modestr)
+{
+ unsigned int baud;
+
+ if (!strncmp(modestr, "ser", 3)) {
+ baud = simple_strtoul(modestr+3, NULL, 10);
+ if (baud >= 3 && baud <= 48)
+ bc->baud = baud*100;
+ }
+ if (strchr(modestr, '*'))
+ bc->opt_dcd = 0;
+ else if (strchr(modestr, '+'))
+ bc->opt_dcd = -1;
+ else
+ bc->opt_dcd = 1;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct hdlcdrv_ioctl *hi, int cmd)
+{
+ struct baycom_state *bc;
+ struct baycom_ioctl bi;
+
+ if (!dev)
+ return -EINVAL;
+
+ bc = netdev_priv(dev);
+ BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC);
+
+ if (cmd != SIOCDEVPRIVATE)
+ return -ENOIOCTLCMD;
+ switch (hi->cmd) {
+ default:
+ break;
+
+ case HDLCDRVCTL_GETMODE:
+ sprintf(hi->data.modename, "ser%u", bc->baud / 100);
+ if (bc->opt_dcd <= 0)
+ strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : "+");
+ if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ return -EFAULT;
+ return 0;
+
+ case HDLCDRVCTL_SETMODE:
+ if (netif_running(dev) || !capable(CAP_NET_ADMIN))
+ return -EACCES;
+ hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
+ return baycom_setmode(bc, hi->data.modename);
+
+ case HDLCDRVCTL_MODELIST:
+ strcpy(hi->data.modename, "ser12,ser3,ser24");
+ if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ return -EFAULT;
+ return 0;
+
+ case HDLCDRVCTL_MODEMPARMASK:
+ return HDLCDRV_PARMASK_IOBASE | HDLCDRV_PARMASK_IRQ;
+
+ }
+
+ if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ return -EFAULT;
+ switch (bi.cmd) {
+ default:
+ return -ENOIOCTLCMD;
+
+#ifdef BAYCOM_DEBUG
+ case BAYCOMCTL_GETDEBUG:
+ bi.data.dbg.debug1 = bc->hdrv.ptt_keyed;
+ bi.data.dbg.debug2 = bc->debug_vals.last_intcnt;
+ bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr;
+ break;
+#endif /* BAYCOM_DEBUG */
+
+ }
+ if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ return -EFAULT;
+ return 0;
+
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * command line settable parameters
+ */
+static char *mode[NR_PORTS] = { "ser12*", };
+static int iobase[NR_PORTS] = { 0x3f8, };
+static int irq[NR_PORTS] = { 4, };
+static int baud[NR_PORTS] = { [0 ... NR_PORTS-1] = 1200 };
+
+module_param_array(mode, charp, NULL, 0);
+MODULE_PARM_DESC(mode, "baycom operating mode; * for software DCD");
+module_param_array(iobase, int, NULL, 0);
+MODULE_PARM_DESC(iobase, "baycom io base address");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "baycom irq number");
+module_param_array(baud, int, NULL, 0);
+MODULE_PARM_DESC(baud, "baycom baud rate (300 to 4800)");
+
+MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
+MODULE_DESCRIPTION("Baycom ser12 full duplex amateur radio modem driver");
+MODULE_LICENSE("GPL");
+
+/* --------------------------------------------------------------------- */
+
+static int __init init_baycomserfdx(void)
+{
+ int i, found = 0;
+ char set_hw = 1;
+
+ printk(bc_drvinfo);
+ /*
+ * register net devices
+ */
+ for (i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev;
+ struct baycom_state *bc;
+ char ifname[IFNAMSIZ];
+
+ sprintf(ifname, "bcsf%d", i);
+
+ if (!mode[i])
+ set_hw = 0;
+ if (!set_hw)
+ iobase[i] = irq[i] = 0;
+
+ dev = hdlcdrv_register(&ser12_ops,
+ sizeof(struct baycom_state),
+ ifname, iobase[i], irq[i], 0);
+ if (IS_ERR(dev))
+ break;
+
+ bc = netdev_priv(dev);
+ if (set_hw && baycom_setmode(bc, mode[i]))
+ set_hw = 0;
+ bc->baud = baud[i];
+ found++;
+ baycom_device[i] = dev;
+ }
+
+ if (!found)
+ return -ENXIO;
+ return 0;
+}
+
+static void __exit cleanup_baycomserfdx(void)
+{
+ int i;
+
+ for(i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev = baycom_device[i];
+ if (dev)
+ hdlcdrv_unregister(dev);
+ }
+}
+
+module_init(init_baycomserfdx);
+module_exit(cleanup_baycomserfdx);
+
+/* --------------------------------------------------------------------- */
+
+#ifndef MODULE
+
+/*
+ * format: baycom_ser_fdx=io,irq,mode
+ * mode: ser# hardware DCD
+ * ser#* software DCD
+ * ser#+ hardware DCD, inverted signal at DCD pin
+ * '#' denotes the baud rate / 100, eg. ser12* is '1200 baud, soft DCD'
+ */
+
+static int __init baycom_ser_fdx_setup(char *str)
+{
+ static unsigned nr_dev;
+ int ints[4];
+
+ if (nr_dev >= NR_PORTS)
+ return 0;
+ str = get_options(str, 4, ints);
+ if (ints[0] < 2)
+ return 0;
+ mode[nr_dev] = str;
+ iobase[nr_dev] = ints[1];
+ irq[nr_dev] = ints[2];
+ if (ints[0] >= 3)
+ baud[nr_dev] = ints[3];
+ nr_dev++;
+ return 1;
+}
+
+__setup("baycom_ser_fdx=", baycom_ser_fdx_setup);
+
+#endif /* MODULE */
+/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
new file mode 100644
index 000000000000..eead85d00962
--- /dev/null
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -0,0 +1,740 @@
+/*****************************************************************************/
+
+/*
+ * baycom_ser_hdx.c -- baycom ser12 halfduplex radio modem driver.
+ *
+ * Copyright (C) 1996-2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Please note that the GPL allows you to use the driver, NOT the radio.
+ * In order to use the radio, you need a license from the communications
+ * authority of your country.
+ *
+ *
+ * Supported modems
+ *
+ * ser12: This is a very simple 1200 baud AFSK modem. The modem consists only
+ * of a modulator/demodulator chip, usually a TI TCM3105. The computer
+ * is responsible for regenerating the receiver bit clock, as well as
+ * for handling the HDLC protocol. The modem connects to a serial port,
+ * hence the name. Since the serial port is not used as an async serial
+ * port, the kernel driver for serial ports cannot be used, and this
+ * driver only supports standard serial hardware (8250, 16450, 16550A)
+ *
+ *
+ * Command line options (insmod command line)
+ *
+ * mode ser12 hardware DCD
+ * ser12* software DCD
+ * ser12@ hardware/software DCD, i.e. no explicit DCD signal but hardware
+ * mutes audio input to the modem
+ * ser12+ hardware DCD, inverted signal at DCD pin
+ * iobase base address of the port; common values are 0x3f8, 0x2f8, 0x3e8, 0x2e8
+ * irq interrupt line of the port; common values are 4,3
+ *
+ *
+ * History:
+ * 0.1 26.06.1996 Adapted from baycom.c and made network driver interface
+ * 18.10.1996 Changed to new user space access routines (copy_{to,from}_user)
+ * 0.3 26.04.1997 init code/data tagged
+ * 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints)
+ * 0.5 11.11.1997 ser12/par96 split into separate files
+ * 0.6 14.04.1998 cleanups
+ * 0.7 03.08.1999 adapt to Linus' new __setup/__initcall
+ * 0.8 10.08.1999 use module_init/module_exit
+ * 0.9 12.02.2000 adapted to softnet driver interface
+ * 0.10 03.07.2000 fix interface name handling
+ */
+
+/*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <linux/hdlcdrv.h>
+#include <linux/baycom.h>
+
+/* --------------------------------------------------------------------- */
+
+#define BAYCOM_DEBUG
+
+/* --------------------------------------------------------------------- */
+
+static const char bc_drvname[] = "baycom_ser_hdx";
+static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
+KERN_INFO "baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
+
+/* --------------------------------------------------------------------- */
+
+#define NR_PORTS 4
+
+static struct net_device *baycom_device[NR_PORTS];
+
+/* --------------------------------------------------------------------- */
+
+#define RBR(iobase) (iobase+0)
+#define THR(iobase) (iobase+0)
+#define IER(iobase) (iobase+1)
+#define IIR(iobase) (iobase+2)
+#define FCR(iobase) (iobase+2)
+#define LCR(iobase) (iobase+3)
+#define MCR(iobase) (iobase+4)
+#define LSR(iobase) (iobase+5)
+#define MSR(iobase) (iobase+6)
+#define SCR(iobase) (iobase+7)
+#define DLL(iobase) (iobase+0)
+#define DLM(iobase) (iobase+1)
+
+#define SER12_EXTENT 8
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Information that need to be kept for each board.
+ */
+
+struct baycom_state {
+ struct hdlcdrv_state hdrv;
+
+ int opt_dcd;
+
+ struct modem_state {
+ short arb_divider;
+ unsigned char flags;
+ unsigned int shreg;
+ struct modem_state_ser12 {
+ unsigned char tx_bit;
+ int dcd_sum0, dcd_sum1, dcd_sum2;
+ unsigned char last_sample;
+ unsigned char last_rxbit;
+ unsigned int dcd_shreg;
+ unsigned int dcd_time;
+ unsigned int bit_pll;
+ unsigned char interm_sample;
+ } ser12;
+ } modem;
+
+#ifdef BAYCOM_DEBUG
+ struct debug_vals {
+ unsigned long last_jiffies;
+ unsigned cur_intcnt;
+ unsigned last_intcnt;
+ int cur_pllcorr;
+ int last_pllcorr;
+ } debug_vals;
+#endif /* BAYCOM_DEBUG */
+};
+
+/* --------------------------------------------------------------------- */
+
+static inline void baycom_int_freq(struct baycom_state *bc)
+{
+#ifdef BAYCOM_DEBUG
+ unsigned long cur_jiffies = jiffies;
+ /*
+ * measure the interrupt frequency
+ */
+ bc->debug_vals.cur_intcnt++;
+ if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ bc->debug_vals.last_jiffies = cur_jiffies;
+ bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
+ bc->debug_vals.cur_intcnt = 0;
+ bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
+ bc->debug_vals.cur_pllcorr = 0;
+ }
+#endif /* BAYCOM_DEBUG */
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * ===================== SER12 specific routines =========================
+ */
+
+static inline void ser12_set_divisor(struct net_device *dev,
+ unsigned char divisor)
+{
+ outb(0x81, LCR(dev->base_addr)); /* DLAB = 1 */
+ outb(divisor, DLL(dev->base_addr));
+ outb(0, DLM(dev->base_addr));
+ outb(0x01, LCR(dev->base_addr)); /* word length = 6 */
+ /*
+ * make sure the next interrupt is generated;
+ * 0 must be used to power the modem; the modem draws its
+ * power from the TxD line
+ */
+ outb(0x00, THR(dev->base_addr));
+ /*
+ * it is important not to set the divider while transmitting;
+ * this reportedly makes some UARTs generating interrupts
+ * in the hundredthousands per second region
+ * Reported by: Ignacio.Arenaza@studi.epfl.ch (Ignacio Arenaza Nuno)
+ */
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * must call the TX arbitrator every 10ms
+ */
+#define SER12_ARB_DIVIDER(bc) (bc->opt_dcd ? 24 : 36)
+
+#define SER12_DCD_INTERVAL(bc) (bc->opt_dcd ? 12 : 240)
+
+static inline void ser12_tx(struct net_device *dev, struct baycom_state *bc)
+{
+ /* one interrupt per channel bit */
+ ser12_set_divisor(dev, 12);
+ /*
+ * first output the last bit (!) then call HDLC transmitter,
+ * since this may take quite long
+ */
+ outb(0x0e | (!!bc->modem.ser12.tx_bit), MCR(dev->base_addr));
+ if (bc->modem.shreg <= 1)
+ bc->modem.shreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv);
+ bc->modem.ser12.tx_bit = !(bc->modem.ser12.tx_bit ^
+ (bc->modem.shreg & 1));
+ bc->modem.shreg >>= 1;
+}
+
+/* --------------------------------------------------------------------- */
+
+static inline void ser12_rx(struct net_device *dev, struct baycom_state *bc)
+{
+ unsigned char cur_s;
+ /*
+ * do demodulator
+ */
+ cur_s = inb(MSR(dev->base_addr)) & 0x10; /* the CTS line */
+ hdlcdrv_channelbit(&bc->hdrv, cur_s);
+ bc->modem.ser12.dcd_shreg = (bc->modem.ser12.dcd_shreg << 1) |
+ (cur_s != bc->modem.ser12.last_sample);
+ bc->modem.ser12.last_sample = cur_s;
+ if(bc->modem.ser12.dcd_shreg & 1) {
+ if (!bc->opt_dcd) {
+ unsigned int dcdspos, dcdsneg;
+
+ dcdspos = dcdsneg = 0;
+ dcdspos += ((bc->modem.ser12.dcd_shreg >> 1) & 1);
+ if (!(bc->modem.ser12.dcd_shreg & 0x7ffffffe))
+ dcdspos += 2;
+ dcdsneg += ((bc->modem.ser12.dcd_shreg >> 2) & 1);
+ dcdsneg += ((bc->modem.ser12.dcd_shreg >> 3) & 1);
+ dcdsneg += ((bc->modem.ser12.dcd_shreg >> 4) & 1);
+
+ bc->modem.ser12.dcd_sum0 += 16*dcdspos - dcdsneg;
+ } else
+ bc->modem.ser12.dcd_sum0--;
+ }
+ if(!bc->modem.ser12.dcd_time) {
+ hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 +
+ bc->modem.ser12.dcd_sum1 +
+ bc->modem.ser12.dcd_sum2) < 0);
+ bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1;
+ bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0;
+ /* offset to ensure DCD off on silent input */
+ bc->modem.ser12.dcd_sum0 = 2;
+ bc->modem.ser12.dcd_time = SER12_DCD_INTERVAL(bc);
+ }
+ bc->modem.ser12.dcd_time--;
+ if (!bc->opt_dcd) {
+ /*
+ * PLL code for the improved software DCD algorithm
+ */
+ if (bc->modem.ser12.interm_sample) {
+ /*
+ * intermediate sample; set timing correction to normal
+ */
+ ser12_set_divisor(dev, 4);
+ } else {
+ /*
+ * do PLL correction and call HDLC receiver
+ */
+ switch (bc->modem.ser12.dcd_shreg & 7) {
+ case 1: /* transition too late */
+ ser12_set_divisor(dev, 5);
+#ifdef BAYCOM_DEBUG
+ bc->debug_vals.cur_pllcorr++;
+#endif /* BAYCOM_DEBUG */
+ break;
+ case 4: /* transition too early */
+ ser12_set_divisor(dev, 3);
+#ifdef BAYCOM_DEBUG
+ bc->debug_vals.cur_pllcorr--;
+#endif /* BAYCOM_DEBUG */
+ break;
+ default:
+ ser12_set_divisor(dev, 4);
+ break;
+ }
+ bc->modem.shreg >>= 1;
+ if (bc->modem.ser12.last_sample ==
+ bc->modem.ser12.last_rxbit)
+ bc->modem.shreg |= 0x10000;
+ bc->modem.ser12.last_rxbit =
+ bc->modem.ser12.last_sample;
+ }
+ if (++bc->modem.ser12.interm_sample >= 3)
+ bc->modem.ser12.interm_sample = 0;
+ /*
+ * DCD stuff
+ */
+ if (bc->modem.ser12.dcd_shreg & 1) {
+ unsigned int dcdspos, dcdsneg;
+
+ dcdspos = dcdsneg = 0;
+ dcdspos += ((bc->modem.ser12.dcd_shreg >> 1) & 1);
+ dcdspos += (!(bc->modem.ser12.dcd_shreg & 0x7ffffffe))
+ << 1;
+ dcdsneg += ((bc->modem.ser12.dcd_shreg >> 2) & 1);
+ dcdsneg += ((bc->modem.ser12.dcd_shreg >> 3) & 1);
+ dcdsneg += ((bc->modem.ser12.dcd_shreg >> 4) & 1);
+
+ bc->modem.ser12.dcd_sum0 += 16*dcdspos - dcdsneg;
+ }
+ } else {
+ /*
+ * PLL algorithm for the hardware squelch DCD algorithm
+ */
+ if (bc->modem.ser12.interm_sample) {
+ /*
+ * intermediate sample; set timing correction to normal
+ */
+ ser12_set_divisor(dev, 6);
+ } else {
+ /*
+ * do PLL correction and call HDLC receiver
+ */
+ switch (bc->modem.ser12.dcd_shreg & 3) {
+ case 1: /* transition too late */
+ ser12_set_divisor(dev, 7);
+#ifdef BAYCOM_DEBUG
+ bc->debug_vals.cur_pllcorr++;
+#endif /* BAYCOM_DEBUG */
+ break;
+ case 2: /* transition too early */
+ ser12_set_divisor(dev, 5);
+#ifdef BAYCOM_DEBUG
+ bc->debug_vals.cur_pllcorr--;
+#endif /* BAYCOM_DEBUG */
+ break;
+ default:
+ ser12_set_divisor(dev, 6);
+ break;
+ }
+ bc->modem.shreg >>= 1;
+ if (bc->modem.ser12.last_sample ==
+ bc->modem.ser12.last_rxbit)
+ bc->modem.shreg |= 0x10000;
+ bc->modem.ser12.last_rxbit =
+ bc->modem.ser12.last_sample;
+ }
+ bc->modem.ser12.interm_sample = !bc->modem.ser12.interm_sample;
+ /*
+ * DCD stuff
+ */
+ bc->modem.ser12.dcd_sum0 -= (bc->modem.ser12.dcd_shreg & 1);
+ }
+ outb(0x0d, MCR(dev->base_addr)); /* transmitter off */
+ if (bc->modem.shreg & 1) {
+ hdlcdrv_putbits(&bc->hdrv, bc->modem.shreg >> 1);
+ bc->modem.shreg = 0x10000;
+ }
+ if(!bc->modem.ser12.dcd_time) {
+ if (bc->opt_dcd & 1)
+ hdlcdrv_setdcd(&bc->hdrv, !((inb(MSR(dev->base_addr)) ^ bc->opt_dcd) & 0x80));
+ else
+ hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 +
+ bc->modem.ser12.dcd_sum1 +
+ bc->modem.ser12.dcd_sum2) < 0);
+ bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1;
+ bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0;
+ /* offset to ensure DCD off on silent input */
+ bc->modem.ser12.dcd_sum0 = 2;
+ bc->modem.ser12.dcd_time = SER12_DCD_INTERVAL(bc);
+ }
+ bc->modem.ser12.dcd_time--;
+}
+
+/* --------------------------------------------------------------------- */
+
+static irqreturn_t ser12_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct baycom_state *bc = netdev_priv(dev);
+ unsigned char iir;
+
+ if (!dev || !bc || bc->hdrv.magic != HDLCDRV_MAGIC)
+ return IRQ_NONE;
+ /* fast way out */
+ if ((iir = inb(IIR(dev->base_addr))) & 1)
+ return IRQ_NONE;
+ baycom_int_freq(bc);
+ do {
+ switch (iir & 6) {
+ case 6:
+ inb(LSR(dev->base_addr));
+ break;
+
+ case 4:
+ inb(RBR(dev->base_addr));
+ break;
+
+ case 2:
+ /*
+ * check if transmitter active
+ */
+ if (hdlcdrv_ptt(&bc->hdrv))
+ ser12_tx(dev, bc);
+ else {
+ ser12_rx(dev, bc);
+ bc->modem.arb_divider--;
+ }
+ outb(0x00, THR(dev->base_addr));
+ break;
+
+ default:
+ inb(MSR(dev->base_addr));
+ break;
+ }
+ iir = inb(IIR(dev->base_addr));
+ } while (!(iir & 1));
+ if (bc->modem.arb_divider <= 0) {
+ bc->modem.arb_divider = SER12_ARB_DIVIDER(bc);
+ local_irq_enable();
+ hdlcdrv_arbitrate(dev, &bc->hdrv);
+ }
+ local_irq_enable();
+ hdlcdrv_transmitter(dev, &bc->hdrv);
+ hdlcdrv_receiver(dev, &bc->hdrv);
+ local_irq_disable();
+ return IRQ_HANDLED;
+}
+
+/* --------------------------------------------------------------------- */
+
+enum uart { c_uart_unknown, c_uart_8250,
+ c_uart_16450, c_uart_16550, c_uart_16550A};
+static const char *uart_str[] = {
+ "unknown", "8250", "16450", "16550", "16550A"
+};
+
+static enum uart ser12_check_uart(unsigned int iobase)
+{
+ unsigned char b1,b2,b3;
+ enum uart u;
+ enum uart uart_tab[] =
+ { c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A };
+
+ b1 = inb(MCR(iobase));
+ outb(b1 | 0x10, MCR(iobase)); /* loopback mode */
+ b2 = inb(MSR(iobase));
+ outb(0x1a, MCR(iobase));
+ b3 = inb(MSR(iobase)) & 0xf0;
+ outb(b1, MCR(iobase)); /* restore old values */
+ outb(b2, MSR(iobase));
+ if (b3 != 0x90)
+ return c_uart_unknown;
+ inb(RBR(iobase));
+ inb(RBR(iobase));
+ outb(0x01, FCR(iobase)); /* enable FIFOs */
+ u = uart_tab[(inb(IIR(iobase)) >> 6) & 3];
+ if (u == c_uart_16450) {
+ outb(0x5a, SCR(iobase));
+ b1 = inb(SCR(iobase));
+ outb(0xa5, SCR(iobase));
+ b2 = inb(SCR(iobase));
+ if ((b1 != 0x5a) || (b2 != 0xa5))
+ u = c_uart_8250;
+ }
+ return u;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int ser12_open(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+ enum uart u;
+
+ if (!dev || !bc)
+ return -ENXIO;
+ if (!dev->base_addr || dev->base_addr > 0x1000-SER12_EXTENT ||
+ dev->irq < 2 || dev->irq > 15)
+ return -ENXIO;
+ if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser12"))
+ return -EACCES;
+ memset(&bc->modem, 0, sizeof(bc->modem));
+ bc->hdrv.par.bitrate = 1200;
+ if ((u = ser12_check_uart(dev->base_addr)) == c_uart_unknown) {
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EIO;
+ }
+ outb(0, FCR(dev->base_addr)); /* disable FIFOs */
+ outb(0x0d, MCR(dev->base_addr));
+ outb(0, IER(dev->base_addr));
+ if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ,
+ "baycom_ser12", dev)) {
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EBUSY;
+ }
+ /*
+ * enable transmitter empty interrupt
+ */
+ outb(2, IER(dev->base_addr));
+ /*
+ * set the SIO to 6 Bits/character and 19200 or 28800 baud, so that
+ * we get exactly (hopefully) 2 or 3 interrupts per radio symbol,
+ * depending on the usage of the software DCD routine
+ */
+ ser12_set_divisor(dev, bc->opt_dcd ? 6 : 4);
+ printk(KERN_INFO "%s: ser12 at iobase 0x%lx irq %u uart %s\n",
+ bc_drvname, dev->base_addr, dev->irq, uart_str[u]);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int ser12_close(struct net_device *dev)
+{
+ struct baycom_state *bc = netdev_priv(dev);
+
+ if (!dev || !bc)
+ return -EINVAL;
+ /*
+ * disable interrupts
+ */
+ outb(0, IER(dev->base_addr));
+ outb(1, MCR(dev->base_addr));
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, SER12_EXTENT);
+ printk(KERN_INFO "%s: close ser12 at iobase 0x%lx irq %u\n",
+ bc_drvname, dev->base_addr, dev->irq);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * ===================== hdlcdrv driver interface =========================
+ */
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct hdlcdrv_ioctl *hi, int cmd);
+
+/* --------------------------------------------------------------------- */
+
+static struct hdlcdrv_ops ser12_ops = {
+ .drvname = bc_drvname,
+ .drvinfo = bc_drvinfo,
+ .open = ser12_open,
+ .close = ser12_close,
+ .ioctl = baycom_ioctl,
+};
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_setmode(struct baycom_state *bc, const char *modestr)
+{
+ if (strchr(modestr, '*'))
+ bc->opt_dcd = 0;
+ else if (strchr(modestr, '+'))
+ bc->opt_dcd = -1;
+ else if (strchr(modestr, '@'))
+ bc->opt_dcd = -2;
+ else
+ bc->opt_dcd = 1;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+ struct hdlcdrv_ioctl *hi, int cmd)
+{
+ struct baycom_state *bc;
+ struct baycom_ioctl bi;
+
+ if (!dev)
+ return -EINVAL;
+
+ bc = netdev_priv(dev);
+ BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC);
+
+ if (cmd != SIOCDEVPRIVATE)
+ return -ENOIOCTLCMD;
+ switch (hi->cmd) {
+ default:
+ break;
+
+ case HDLCDRVCTL_GETMODE:
+ strcpy(hi->data.modename, "ser12");
+ if (bc->opt_dcd <= 0)
+ strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+");
+ if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ return -EFAULT;
+ return 0;
+
+ case HDLCDRVCTL_SETMODE:
+ if (netif_running(dev) || !capable(CAP_NET_ADMIN))
+ return -EACCES;
+ hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
+ return baycom_setmode(bc, hi->data.modename);
+
+ case HDLCDRVCTL_MODELIST:
+ strcpy(hi->data.modename, "ser12");
+ if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ return -EFAULT;
+ return 0;
+
+ case HDLCDRVCTL_MODEMPARMASK:
+ return HDLCDRV_PARMASK_IOBASE | HDLCDRV_PARMASK_IRQ;
+
+ }
+
+ if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ return -EFAULT;
+ switch (bi.cmd) {
+ default:
+ return -ENOIOCTLCMD;
+
+#ifdef BAYCOM_DEBUG
+ case BAYCOMCTL_GETDEBUG:
+ bi.data.dbg.debug1 = bc->hdrv.ptt_keyed;
+ bi.data.dbg.debug2 = bc->debug_vals.last_intcnt;
+ bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr;
+ break;
+#endif /* BAYCOM_DEBUG */
+
+ }
+ if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ return -EFAULT;
+ return 0;
+
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * command line settable parameters
+ */
+static char *mode[NR_PORTS] = { "ser12*", };
+static int iobase[NR_PORTS] = { 0x3f8, };
+static int irq[NR_PORTS] = { 4, };
+
+module_param_array(mode, charp, NULL, 0);
+MODULE_PARM_DESC(mode, "baycom operating mode; * for software DCD");
+module_param_array(iobase, int, NULL, 0);
+MODULE_PARM_DESC(iobase, "baycom io base address");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "baycom irq number");
+
+MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
+MODULE_DESCRIPTION("Baycom ser12 half duplex amateur radio modem driver");
+MODULE_LICENSE("GPL");
+
+/* --------------------------------------------------------------------- */
+
+static int __init init_baycomserhdx(void)
+{
+ int i, found = 0;
+ char set_hw = 1;
+
+ printk(bc_drvinfo);
+ /*
+ * register net devices
+ */
+ for (i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev;
+ struct baycom_state *bc;
+ char ifname[IFNAMSIZ];
+
+ sprintf(ifname, "bcsh%d", i);
+
+ if (!mode[i])
+ set_hw = 0;
+ if (!set_hw)
+ iobase[i] = irq[i] = 0;
+
+ dev = hdlcdrv_register(&ser12_ops,
+ sizeof(struct baycom_state),
+ ifname, iobase[i], irq[i], 0);
+ if (IS_ERR(dev))
+ break;
+
+ bc = netdev_priv(dev);
+ if (set_hw && baycom_setmode(bc, mode[i]))
+ set_hw = 0;
+ found++;
+ baycom_device[i] = dev;
+ }
+
+ if (!found)
+ return -ENXIO;
+ return 0;
+}
+
+static void __exit cleanup_baycomserhdx(void)
+{
+ int i;
+
+ for(i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev = baycom_device[i];
+
+ if (dev)
+ hdlcdrv_unregister(dev);
+ }
+}
+
+module_init(init_baycomserhdx);
+module_exit(cleanup_baycomserhdx);
+
+/* --------------------------------------------------------------------- */
+
+#ifndef MODULE
+
+/*
+ * format: baycom_ser_hdx=io,irq,mode
+ * mode: ser12 hardware DCD
+ * ser12* software DCD
+ * ser12@ hardware/software DCD, i.e. no explicit DCD signal but hardware
+ * mutes audio input to the modem
+ * ser12+ hardware DCD, inverted signal at DCD pin
+ */
+
+static int __init baycom_ser_hdx_setup(char *str)
+{
+ static unsigned nr_dev;
+ int ints[3];
+
+ if (nr_dev >= NR_PORTS)
+ return 0;
+ str = get_options(str, 3, ints);
+ if (ints[0] < 2)
+ return 0;
+ mode[nr_dev] = str;
+ iobase[nr_dev] = ints[1];
+ irq[nr_dev] = ints[2];
+ nr_dev++;
+ return 1;
+}
+
+__setup("baycom_ser_hdx=", baycom_ser_hdx_setup);
+
+#endif /* MODULE */
+/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
new file mode 100644
index 000000000000..ef1a359e2273
--- /dev/null
+++ b/drivers/net/hamradio/bpqether.c
@@ -0,0 +1,643 @@
+/*
+ * G8BPQ compatible "AX.25 via ethernet" driver release 004
+ *
+ * This code REQUIRES 2.0.0 or higher/ NET3.029
+ *
+ * This module:
+ * This module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This is a "pseudo" network driver to allow AX.25 over Ethernet
+ * using G8BPQ encapsulation. It has been extracted from the protocol
+ * implementation because
+ *
+ * - things got unreadable within the protocol stack
+ * - to cure the protocol stack from "feature-ism"
+ * - a protocol implementation shouldn't need to know on
+ * which hardware it is running
+ * - user-level programs like the AX.25 utilities shouldn't
+ * need to know about the hardware.
+ * - IP over ethernet encapsulated AX.25 was impossible
+ * - rxecho.c did not work
+ * - to have room for extensions
+ * - it just deserves to "live" as an own driver
+ *
+ * This driver can use any ethernet destination address, and can be
+ * limited to accept frames from one dedicated ethernet card only.
+ *
+ * Note that the driver sets up the BPQ devices automagically on
+ * startup or (if started before the "insmod" of an ethernet device)
+ * on "ifconfig up". It hopefully will remove the BPQ on "rmmod"ing
+ * the ethernet device (in fact: as soon as another ethernet or bpq
+ * device gets "ifconfig"ured).
+ *
+ * I have heard that several people are thinking of experiments
+ * with highspeed packet radio using existing ethernet cards.
+ * Well, this driver is prepared for this purpose, just add
+ * your tx key control and a txdelay / tailtime algorithm,
+ * probably some buffering, and /voila/...
+ *
+ * History
+ * BPQ 001 Joerg(DL1BKE) Extracted BPQ code from AX.25
+ * protocol stack and added my own
+ * yet existing patches
+ * BPQ 002 Joerg(DL1BKE) Scan network device list on
+ * startup.
+ * BPQ 003 Joerg(DL1BKE) Ethernet destination address
+ * and accepted source address
+ * can be configured by an ioctl()
+ * call.
+ * Fixed to match Linux networking
+ * changes - 2.1.15.
+ * BPQ 004 Joerg(DL1BKE) Fixed to not lock up on ifconfig.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/net.h>
+#include <net/ax25.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+
+#include <net/ip.h>
+#include <net/arp.h>
+
+#include <linux/bpqether.h>
+
+static char banner[] __initdata = KERN_INFO "AX.25: bpqether driver version 004\n";
+
+static unsigned char ax25_bcast[AX25_ADDR_LEN] =
+ {'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1};
+static unsigned char ax25_defaddr[AX25_ADDR_LEN] =
+ {'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1};
+
+static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
+
+static char bpq_eth_addr[6];
+
+static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *);
+static int bpq_device_event(struct notifier_block *, unsigned long, void *);
+static const char *bpq_print_ethaddr(const unsigned char *);
+
+static struct packet_type bpq_packet_type = {
+ .type = __constant_htons(ETH_P_BPQ),
+ .func = bpq_rcv,
+};
+
+static struct notifier_block bpq_dev_notifier = {
+ .notifier_call =bpq_device_event,
+};
+
+
+struct bpqdev {
+ struct list_head bpq_list; /* list of bpq devices chain */
+ struct net_device *ethdev; /* link to ethernet device */
+ struct net_device *axdev; /* bpq device (bpq#) */
+ struct net_device_stats stats; /* some statistics */
+ char dest_addr[6]; /* ether destination address */
+ char acpt_addr[6]; /* accept ether frames from this address only */
+};
+
+static LIST_HEAD(bpq_devices);
+
+
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Get the ethernet device for a BPQ device
+ */
+static inline struct net_device *bpq_get_ether_dev(struct net_device *dev)
+{
+ struct bpqdev *bpq = netdev_priv(dev);
+
+ return bpq ? bpq->ethdev : NULL;
+}
+
+/*
+ * Get the BPQ device for the ethernet device
+ */
+static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
+{
+ struct bpqdev *bpq;
+
+ list_for_each_entry(bpq, &bpq_devices, bpq_list) {
+ if (bpq->ethdev == dev)
+ return bpq->axdev;
+ }
+ return NULL;
+}
+
+static inline int dev_is_ethdev(struct net_device *dev)
+{
+ return (
+ dev->type == ARPHRD_ETHER
+ && strncmp(dev->name, "dummy", 5)
+ );
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Receive an AX.25 frame via an ethernet interface.
+ */
+static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype)
+{
+ int len;
+ char * ptr;
+ struct ethhdr *eth;
+ struct bpqdev *bpq;
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ return NET_RX_DROP;
+
+ if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
+ goto drop;
+
+ rcu_read_lock();
+ dev = bpq_get_ax25_dev(dev);
+
+ if (dev == NULL || !netif_running(dev))
+ goto drop_unlock;
+
+ /*
+ * if we want to accept frames from just one ethernet device
+ * we check the source address of the sender.
+ */
+
+ bpq = netdev_priv(dev);
+
+ eth = eth_hdr(skb);
+
+ if (!(bpq->acpt_addr[0] & 0x01) &&
+ memcmp(eth->h_source, bpq->acpt_addr, ETH_ALEN))
+ goto drop_unlock;
+
+ if (skb_cow(skb, sizeof(struct ethhdr)))
+ goto drop_unlock;
+
+ len = skb->data[0] + skb->data[1] * 256 - 5;
+
+ skb_pull(skb, 2); /* Remove the length bytes */
+ skb_trim(skb, len); /* Set the length of the data */
+
+ bpq->stats.rx_packets++;
+ bpq->stats.rx_bytes += len;
+
+ ptr = skb_push(skb, 1);
+ *ptr = 0;
+
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_AX25);
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+unlock:
+
+ rcu_read_unlock();
+
+ return 0;
+drop_unlock:
+ kfree_skb(skb);
+ goto unlock;
+
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Send an AX.25 frame via an ethernet interface
+ */
+static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *newskb;
+ unsigned char *ptr;
+ struct bpqdev *bpq;
+ int size;
+
+ /*
+ * Just to be *really* sure not to send anything if the interface
+ * is down, the ethernet device may have gone.
+ */
+ if (!netif_running(dev)) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ skb_pull(skb, 1);
+ size = skb->len;
+
+ /*
+ * The AX.25 code leaves enough room for the ethernet header, but
+ * sendto() does not.
+ */
+ if (skb_headroom(skb) < AX25_BPQ_HEADER_LEN) { /* Ough! */
+ if ((newskb = skb_realloc_headroom(skb, AX25_BPQ_HEADER_LEN)) == NULL) {
+ printk(KERN_WARNING "bpqether: out of memory\n");
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ if (skb->sk != NULL)
+ skb_set_owner_w(newskb, skb->sk);
+
+ kfree_skb(skb);
+ skb = newskb;
+ }
+
+ skb->protocol = htons(ETH_P_AX25);
+
+ ptr = skb_push(skb, 2);
+
+ *ptr++ = (size + 5) % 256;
+ *ptr++ = (size + 5) / 256;
+
+ bpq = netdev_priv(dev);
+
+ if ((dev = bpq_get_ether_dev(dev)) == NULL) {
+ bpq->stats.tx_dropped++;
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ skb->dev = dev;
+ skb->nh.raw = skb->data;
+ dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
+ bpq->stats.tx_packets++;
+ bpq->stats.tx_bytes+=skb->len;
+
+ dev_queue_xmit(skb);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+/*
+ * Statistics
+ */
+static struct net_device_stats *bpq_get_stats(struct net_device *dev)
+{
+ struct bpqdev *bpq = netdev_priv(dev);
+
+ return &bpq->stats;
+}
+
+/*
+ * Set AX.25 callsign
+ */
+static int bpq_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = (struct sockaddr *)addr;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+/* Ioctl commands
+ *
+ * SIOCSBPQETHOPT reserved for enhancements
+ * SIOCSBPQETHADDR set the destination and accepted
+ * source ethernet address (broadcast
+ * or multicast: accept all)
+ */
+static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct bpq_ethaddr __user *ethaddr = ifr->ifr_data;
+ struct bpqdev *bpq = netdev_priv(dev);
+ struct bpq_req req;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case SIOCSBPQETHOPT:
+ if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req)))
+ return -EFAULT;
+ switch (req.cmd) {
+ case SIOCGBPQETHPARAM:
+ case SIOCSBPQETHPARAM:
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case SIOCSBPQETHADDR:
+ if (copy_from_user(bpq->dest_addr, ethaddr->destination, ETH_ALEN))
+ return -EFAULT;
+ if (copy_from_user(bpq->acpt_addr, ethaddr->accept, ETH_ALEN))
+ return -EFAULT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * open/close a device
+ */
+static int bpq_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int bpq_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Proc filesystem
+ */
+static const char * bpq_print_ethaddr(const unsigned char *e)
+{
+ static char buf[18];
+
+ sprintf(buf, "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X",
+ e[0], e[1], e[2], e[3], e[4], e[5]);
+
+ return buf;
+}
+
+static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ int i = 1;
+ struct bpqdev *bpqdev;
+
+ rcu_read_lock();
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ list_for_each_entry(bpqdev, &bpq_devices, bpq_list) {
+ if (i == *pos)
+ return bpqdev;
+ }
+ return NULL;
+}
+
+static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct list_head *p;
+
+ ++*pos;
+
+ if (v == SEQ_START_TOKEN)
+ p = bpq_devices.next;
+ else
+ p = ((struct bpqdev *)v)->bpq_list.next;
+
+ return (p == &bpq_devices) ? NULL
+ : list_entry(p, struct bpqdev, bpq_list);
+}
+
+static void bpq_seq_stop(struct seq_file *seq, void *v)
+{
+ rcu_read_unlock();
+}
+
+
+static int bpq_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq,
+ "dev ether destination accept from\n");
+ else {
+ const struct bpqdev *bpqdev = v;
+
+ seq_printf(seq, "%-5s %-10s %s ",
+ bpqdev->axdev->name, bpqdev->ethdev->name,
+ bpq_print_ethaddr(bpqdev->dest_addr));
+
+ seq_printf(seq, "%s\n",
+ (bpqdev->acpt_addr[0] & 0x01) ? "*"
+ : bpq_print_ethaddr(bpqdev->acpt_addr));
+
+ }
+ return 0;
+}
+
+static struct seq_operations bpq_seqops = {
+ .start = bpq_seq_start,
+ .next = bpq_seq_next,
+ .stop = bpq_seq_stop,
+ .show = bpq_seq_show,
+};
+
+static int bpq_info_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &bpq_seqops);
+}
+
+static struct file_operations bpq_info_fops = {
+ .owner = THIS_MODULE,
+ .open = bpq_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+
+/* ------------------------------------------------------------------------ */
+
+
+static void bpq_setup(struct net_device *dev)
+{
+
+ dev->hard_start_xmit = bpq_xmit;
+ dev->open = bpq_open;
+ dev->stop = bpq_close;
+ dev->set_mac_address = bpq_set_mac_address;
+ dev->get_stats = bpq_get_stats;
+ dev->do_ioctl = bpq_ioctl;
+ dev->destructor = free_netdev;
+
+ memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
+ memcpy(dev->dev_addr, ax25_defaddr, AX25_ADDR_LEN);
+
+ dev->flags = 0;
+
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ dev->hard_header = ax25_encapsulate;
+ dev->rebuild_header = ax25_rebuild_header;
+#endif
+
+ dev->type = ARPHRD_AX25;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
+ dev->mtu = AX25_DEF_PACLEN;
+ dev->addr_len = AX25_ADDR_LEN;
+
+}
+
+/*
+ * Setup a new device.
+ */
+static int bpq_new_device(struct net_device *edev)
+{
+ int err;
+ struct net_device *ndev;
+ struct bpqdev *bpq;
+
+ ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d",
+ bpq_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+
+ bpq = netdev_priv(ndev);
+ dev_hold(edev);
+ bpq->ethdev = edev;
+ bpq->axdev = ndev;
+
+ memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
+ memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+
+ err = dev_alloc_name(ndev, ndev->name);
+ if (err < 0)
+ goto error;
+
+ err = register_netdevice(ndev);
+ if (err)
+ goto error;
+
+ /* List protected by RTNL */
+ list_add_rcu(&bpq->bpq_list, &bpq_devices);
+ return 0;
+
+ error:
+ dev_put(edev);
+ free_netdev(ndev);
+ return err;
+
+}
+
+static void bpq_free_device(struct net_device *ndev)
+{
+ struct bpqdev *bpq = netdev_priv(ndev);
+
+ dev_put(bpq->ethdev);
+ list_del_rcu(&bpq->bpq_list);
+
+ unregister_netdevice(ndev);
+}
+
+/*
+ * Handle device status changes.
+ */
+static int bpq_device_event(struct notifier_block *this,unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+
+ if (!dev_is_ethdev(dev))
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+
+ switch (event) {
+ case NETDEV_UP: /* new ethernet device -> new BPQ interface */
+ if (bpq_get_ax25_dev(dev) == NULL)
+ bpq_new_device(dev);
+ break;
+
+ case NETDEV_DOWN: /* ethernet device closed -> close BPQ interface */
+ if ((dev = bpq_get_ax25_dev(dev)) != NULL)
+ dev_close(dev);
+ break;
+
+ case NETDEV_UNREGISTER: /* ethernet device removed -> free BPQ interface */
+ if ((dev = bpq_get_ax25_dev(dev)) != NULL)
+ bpq_free_device(dev);
+ break;
+ default:
+ break;
+ }
+ rcu_read_unlock();
+
+ return NOTIFY_DONE;
+}
+
+
+/* ------------------------------------------------------------------------ */
+
+/*
+ * Initialize driver. To be called from af_ax25 if not compiled as a
+ * module
+ */
+static int __init bpq_init_driver(void)
+{
+#ifdef CONFIG_PROC_FS
+ if (!proc_net_fops_create("bpqether", S_IRUGO, &bpq_info_fops)) {
+ printk(KERN_ERR
+ "bpq: cannot create /proc/net/bpqether entry.\n");
+ return -ENOENT;
+ }
+#endif /* CONFIG_PROC_FS */
+
+ dev_add_pack(&bpq_packet_type);
+
+ register_netdevice_notifier(&bpq_dev_notifier);
+
+ printk(banner);
+
+ return 0;
+}
+
+static void __exit bpq_cleanup_driver(void)
+{
+ struct bpqdev *bpq;
+
+ dev_remove_pack(&bpq_packet_type);
+
+ unregister_netdevice_notifier(&bpq_dev_notifier);
+
+ proc_net_remove("bpqether");
+
+ rtnl_lock();
+ while (!list_empty(&bpq_devices)) {
+ bpq = list_entry(bpq_devices.next, struct bpqdev, bpq_list);
+ bpq_free_device(bpq->axdev);
+ }
+ rtnl_unlock();
+}
+
+MODULE_AUTHOR("Joerg Reuter DL1BKE <jreuter@yaina.de>");
+MODULE_DESCRIPTION("Transmit and receive AX.25 packets over Ethernet");
+MODULE_LICENSE("GPL");
+module_init(bpq_init_driver);
+module_exit(bpq_cleanup_driver);
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
new file mode 100644
index 000000000000..f3269b70a8c5
--- /dev/null
+++ b/drivers/net/hamradio/dmascc.c
@@ -0,0 +1,1493 @@
+/*
+ * Driver for high-speed SCC boards (those with DMA support)
+ * Copyright (C) 1997-2000 Klaus Kudielka
+ *
+ * S5SCC/DMA support by Janko Koleznik S52HI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/sockios.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <net/ax25.h>
+#include "z8530.h"
+
+
+/* Number of buffers per channel */
+
+#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
+#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
+#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
+
+
+/* Cards supported */
+
+#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
+ 0, 8, 1843200, 3686400 }
+#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
+ 0, 8, 3686400, 7372800 }
+#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
+ 0, 4, 6144000, 6144000 }
+#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
+ 0, 8, 4915200, 9830400 }
+
+#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
+
+#define TMR_0_HZ 25600 /* Frequency of timer 0 */
+
+#define TYPE_PI 0
+#define TYPE_PI2 1
+#define TYPE_TWIN 2
+#define TYPE_S5 3
+#define NUM_TYPES 4
+
+#define MAX_NUM_DEVS 32
+
+
+/* SCC chips supported */
+
+#define Z8530 0
+#define Z85C30 1
+#define Z85230 2
+
+#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
+
+
+/* I/O registers */
+
+/* 8530 registers relative to card base */
+#define SCCB_CMD 0x00
+#define SCCB_DATA 0x01
+#define SCCA_CMD 0x02
+#define SCCA_DATA 0x03
+
+/* 8253/8254 registers relative to card base */
+#define TMR_CNT0 0x00
+#define TMR_CNT1 0x01
+#define TMR_CNT2 0x02
+#define TMR_CTRL 0x03
+
+/* Additional PI/PI2 registers relative to card base */
+#define PI_DREQ_MASK 0x04
+
+/* Additional PackeTwin registers relative to card base */
+#define TWIN_INT_REG 0x08
+#define TWIN_CLR_TMR1 0x09
+#define TWIN_CLR_TMR2 0x0a
+#define TWIN_SPARE_1 0x0b
+#define TWIN_DMA_CFG 0x08
+#define TWIN_SERIAL_CFG 0x09
+#define TWIN_DMA_CLR_FF 0x0a
+#define TWIN_SPARE_2 0x0b
+
+
+/* PackeTwin I/O register values */
+
+/* INT_REG */
+#define TWIN_SCC_MSK 0x01
+#define TWIN_TMR1_MSK 0x02
+#define TWIN_TMR2_MSK 0x04
+#define TWIN_INT_MSK 0x07
+
+/* SERIAL_CFG */
+#define TWIN_DTRA_ON 0x01
+#define TWIN_DTRB_ON 0x02
+#define TWIN_EXTCLKA 0x04
+#define TWIN_EXTCLKB 0x08
+#define TWIN_LOOPA_ON 0x10
+#define TWIN_LOOPB_ON 0x20
+#define TWIN_EI 0x80
+
+/* DMA_CFG */
+#define TWIN_DMA_HDX_T1 0x08
+#define TWIN_DMA_HDX_R1 0x0a
+#define TWIN_DMA_HDX_T3 0x14
+#define TWIN_DMA_HDX_R3 0x16
+#define TWIN_DMA_FDX_T3R1 0x1b
+#define TWIN_DMA_FDX_T1R3 0x1d
+
+
+/* Status values */
+
+#define IDLE 0
+#define TX_HEAD 1
+#define TX_DATA 2
+#define TX_PAUSE 3
+#define TX_TAIL 4
+#define RTS_OFF 5
+#define WAIT 6
+#define DCD_ON 7
+#define RX_ON 8
+#define DCD_OFF 9
+
+
+/* Ioctls */
+
+#define SIOCGSCCPARAM SIOCDEVPRIVATE
+#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
+
+
+/* Data types */
+
+struct scc_param {
+ int pclk_hz; /* frequency of BRG input (don't change) */
+ int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
+ int nrzi; /* 0 (nrz), 1 (nrzi) */
+ int clocks; /* see dmascc_cfg documentation */
+ int txdelay; /* [1/TMR_0_HZ] */
+ int txtimeout; /* [1/HZ] */
+ int txtail; /* [1/TMR_0_HZ] */
+ int waittime; /* [1/TMR_0_HZ] */
+ int slottime; /* [1/TMR_0_HZ] */
+ int persist; /* 1 ... 256 */
+ int dma; /* -1 (disable), 0, 1, 3 */
+ int txpause; /* [1/TMR_0_HZ] */
+ int rtsoff; /* [1/TMR_0_HZ] */
+ int dcdon; /* [1/TMR_0_HZ] */
+ int dcdoff; /* [1/TMR_0_HZ] */
+};
+
+struct scc_hardware {
+ char *name;
+ int io_region;
+ int io_delta;
+ int io_size;
+ int num_devs;
+ int scc_offset;
+ int tmr_offset;
+ int tmr_hz;
+ int pclk_hz;
+};
+
+struct scc_priv {
+ int type;
+ int chip;
+ struct net_device *dev;
+ struct scc_info *info;
+ struct net_device_stats stats;
+ int channel;
+ int card_base, scc_cmd, scc_data;
+ int tmr_cnt, tmr_ctrl, tmr_mode;
+ struct scc_param param;
+ char rx_buf[NUM_RX_BUF][BUF_SIZE];
+ int rx_len[NUM_RX_BUF];
+ int rx_ptr;
+ struct work_struct rx_work;
+ int rx_head, rx_tail, rx_count;
+ int rx_over;
+ char tx_buf[NUM_TX_BUF][BUF_SIZE];
+ int tx_len[NUM_TX_BUF];
+ int tx_ptr;
+ int tx_head, tx_tail, tx_count;
+ int state;
+ unsigned long tx_start;
+ int rr0;
+ spinlock_t *register_lock; /* Per scc_info */
+ spinlock_t ring_lock;
+};
+
+struct scc_info {
+ int irq_used;
+ int twin_serial_cfg;
+ struct net_device *dev[2];
+ struct scc_priv priv[2];
+ struct scc_info *next;
+ spinlock_t register_lock; /* Per device register lock */
+};
+
+
+/* Function declarations */
+static int setup_adapter(int card_base, int type, int n) __init;
+
+static void write_scc(struct scc_priv *priv, int reg, int val);
+static void write_scc_data(struct scc_priv *priv, int val, int fast);
+static int read_scc(struct scc_priv *priv, int reg);
+static int read_scc_data(struct scc_priv *priv);
+
+static int scc_open(struct net_device *dev);
+static int scc_close(struct net_device *dev);
+static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *scc_get_stats(struct net_device *dev);
+static int scc_set_mac_address(struct net_device *dev, void *sa);
+
+static inline void tx_on(struct scc_priv *priv);
+static inline void rx_on(struct scc_priv *priv);
+static inline void rx_off(struct scc_priv *priv);
+static void start_timer(struct scc_priv *priv, int t, int r15);
+static inline unsigned char random(void);
+
+static inline void z8530_isr(struct scc_info *info);
+static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs);
+static void rx_isr(struct scc_priv *priv);
+static void special_condition(struct scc_priv *priv, int rc);
+static void rx_bh(void *arg);
+static void tx_isr(struct scc_priv *priv);
+static void es_isr(struct scc_priv *priv);
+static void tm_isr(struct scc_priv *priv);
+
+
+/* Initialization variables */
+
+static int io[MAX_NUM_DEVS] __initdata = { 0, };
+
+/* Beware! hw[] is also used in cleanup_module(). */
+static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
+static char ax25_broadcast[7] __initdata =
+ { 'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1,
+'0' << 1 };
+static char ax25_test[7] __initdata =
+ { 'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1,
+'1' << 1 };
+
+
+/* Global variables */
+
+static struct scc_info *first;
+static unsigned long rand;
+
+
+MODULE_AUTHOR("Klaus Kudielka");
+MODULE_DESCRIPTION("Driver for high-speed SCC boards");
+MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
+MODULE_LICENSE("GPL");
+
+static void __exit dmascc_exit(void)
+{
+ int i;
+ struct scc_info *info;
+
+ while (first) {
+ info = first;
+
+ /* Unregister devices */
+ for (i = 0; i < 2; i++)
+ unregister_netdev(info->dev[i]);
+
+ /* Reset board */
+ if (info->priv[0].type == TYPE_TWIN)
+ outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
+ write_scc(&info->priv[0], R9, FHWRES);
+ release_region(info->dev[0]->base_addr,
+ hw[info->priv[0].type].io_size);
+
+ for (i = 0; i < 2; i++)
+ free_netdev(info->dev[i]);
+
+ /* Free memory */
+ first = info->next;
+ kfree(info);
+ }
+}
+
+#ifndef MODULE
+void __init dmascc_setup(char *str, int *ints)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_DEVS && i < ints[0]; i++)
+ io[i] = ints[i + 1];
+}
+#endif
+
+static int __init dmascc_init(void)
+{
+ int h, i, j, n;
+ int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
+ t1[MAX_NUM_DEVS];
+ unsigned t_val;
+ unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
+ counting[MAX_NUM_DEVS];
+
+ /* Initialize random number generator */
+ rand = jiffies;
+ /* Cards found = 0 */
+ n = 0;
+ /* Warning message */
+ if (!io[0])
+ printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
+
+ /* Run autodetection for each card type */
+ for (h = 0; h < NUM_TYPES; h++) {
+
+ if (io[0]) {
+ /* User-specified I/O address regions */
+ for (i = 0; i < hw[h].num_devs; i++)
+ base[i] = 0;
+ for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
+ j = (io[i] -
+ hw[h].io_region) / hw[h].io_delta;
+ if (j >= 0 && j < hw[h].num_devs
+ && hw[h].io_region +
+ j * hw[h].io_delta == io[i]) {
+ base[j] = io[i];
+ }
+ }
+ } else {
+ /* Default I/O address regions */
+ for (i = 0; i < hw[h].num_devs; i++) {
+ base[i] =
+ hw[h].io_region + i * hw[h].io_delta;
+ }
+ }
+
+ /* Check valid I/O address regions */
+ for (i = 0; i < hw[h].num_devs; i++)
+ if (base[i]) {
+ if (!request_region
+ (base[i], hw[h].io_size, "dmascc"))
+ base[i] = 0;
+ else {
+ tcmd[i] =
+ base[i] + hw[h].tmr_offset +
+ TMR_CTRL;
+ t0[i] =
+ base[i] + hw[h].tmr_offset +
+ TMR_CNT0;
+ t1[i] =
+ base[i] + hw[h].tmr_offset +
+ TMR_CNT1;
+ }
+ }
+
+ /* Start timers */
+ for (i = 0; i < hw[h].num_devs; i++)
+ if (base[i]) {
+ /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
+ outb(0x36, tcmd[i]);
+ outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
+ t0[i]);
+ outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
+ t0[i]);
+ /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
+ outb(0x70, tcmd[i]);
+ outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
+ outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
+ start[i] = jiffies;
+ delay[i] = 0;
+ counting[i] = 1;
+ /* Timer 2: LSB+MSB, Mode 0 */
+ outb(0xb0, tcmd[i]);
+ }
+ time = jiffies;
+ /* Wait until counter registers are loaded */
+ udelay(2000000 / TMR_0_HZ);
+
+ /* Timing loop */
+ while (jiffies - time < 13) {
+ for (i = 0; i < hw[h].num_devs; i++)
+ if (base[i] && counting[i]) {
+ /* Read back Timer 1: latch; read LSB; read MSB */
+ outb(0x40, tcmd[i]);
+ t_val =
+ inb(t1[i]) + (inb(t1[i]) << 8);
+ /* Also check whether counter did wrap */
+ if (t_val == 0
+ || t_val > TMR_0_HZ / HZ * 10)
+ counting[i] = 0;
+ delay[i] = jiffies - start[i];
+ }
+ }
+
+ /* Evaluate measurements */
+ for (i = 0; i < hw[h].num_devs; i++)
+ if (base[i]) {
+ if ((delay[i] >= 9 && delay[i] <= 11) &&
+ /* Ok, we have found an adapter */
+ (setup_adapter(base[i], h, n) == 0))
+ n++;
+ else
+ release_region(base[i],
+ hw[h].io_size);
+ }
+
+ } /* NUM_TYPES */
+
+ /* If any adapter was successfully initialized, return ok */
+ if (n)
+ return 0;
+
+ /* If no adapter found, return error */
+ printk(KERN_INFO "dmascc: no adapters found\n");
+ return -EIO;
+}
+
+module_init(dmascc_init);
+module_exit(dmascc_exit);
+
+static void dev_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_AX25;
+ dev->hard_header_len = 73;
+ dev->mtu = 1500;
+ dev->addr_len = 7;
+ dev->tx_queue_len = 64;
+ memcpy(dev->broadcast, ax25_broadcast, 7);
+ memcpy(dev->dev_addr, ax25_test, 7);
+}
+
+static int __init setup_adapter(int card_base, int type, int n)
+{
+ int i, irq, chip;
+ struct scc_info *info;
+ struct net_device *dev;
+ struct scc_priv *priv;
+ unsigned long time;
+ unsigned int irqs;
+ int tmr_base = card_base + hw[type].tmr_offset;
+ int scc_base = card_base + hw[type].scc_offset;
+ char *chipnames[] = CHIPNAMES;
+
+ /* Allocate memory */
+ info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
+ if (!info) {
+ printk(KERN_ERR "dmascc: "
+ "could not allocate memory for %s at %#3x\n",
+ hw[type].name, card_base);
+ goto out;
+ }
+
+ /* Initialize what is necessary for write_scc and write_scc_data */
+ memset(info, 0, sizeof(struct scc_info));
+
+ info->dev[0] = alloc_netdev(0, "", dev_setup);
+ if (!info->dev[0]) {
+ printk(KERN_ERR "dmascc: "
+ "could not allocate memory for %s at %#3x\n",
+ hw[type].name, card_base);
+ goto out1;
+ }
+
+ info->dev[1] = alloc_netdev(0, "", dev_setup);
+ if (!info->dev[1]) {
+ printk(KERN_ERR "dmascc: "
+ "could not allocate memory for %s at %#3x\n",
+ hw[type].name, card_base);
+ goto out2;
+ }
+ spin_lock_init(&info->register_lock);
+
+ priv = &info->priv[0];
+ priv->type = type;
+ priv->card_base = card_base;
+ priv->scc_cmd = scc_base + SCCA_CMD;
+ priv->scc_data = scc_base + SCCA_DATA;
+ priv->register_lock = &info->register_lock;
+
+ /* Reset SCC */
+ write_scc(priv, R9, FHWRES | MIE | NV);
+
+ /* Determine type of chip by enabling SDLC/HDLC enhancements */
+ write_scc(priv, R15, SHDLCE);
+ if (!read_scc(priv, R15)) {
+ /* WR7' not present. This is an ordinary Z8530 SCC. */
+ chip = Z8530;
+ } else {
+ /* Put one character in TX FIFO */
+ write_scc_data(priv, 0, 0);
+ if (read_scc(priv, R0) & Tx_BUF_EMP) {
+ /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
+ chip = Z85230;
+ } else {
+ /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
+ chip = Z85C30;
+ }
+ }
+ write_scc(priv, R15, 0);
+
+ /* Start IRQ auto-detection */
+ irqs = probe_irq_on();
+
+ /* Enable interrupts */
+ if (type == TYPE_TWIN) {
+ outb(0, card_base + TWIN_DMA_CFG);
+ inb(card_base + TWIN_CLR_TMR1);
+ inb(card_base + TWIN_CLR_TMR2);
+ info->twin_serial_cfg = TWIN_EI;
+ outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
+ } else {
+ write_scc(priv, R15, CTSIE);
+ write_scc(priv, R0, RES_EXT_INT);
+ write_scc(priv, R1, EXT_INT_ENAB);
+ }
+
+ /* Start timer */
+ outb(1, tmr_base + TMR_CNT1);
+ outb(0, tmr_base + TMR_CNT1);
+
+ /* Wait and detect IRQ */
+ time = jiffies;
+ while (jiffies - time < 2 + HZ / TMR_0_HZ);
+ irq = probe_irq_off(irqs);
+
+ /* Clear pending interrupt, disable interrupts */
+ if (type == TYPE_TWIN) {
+ inb(card_base + TWIN_CLR_TMR1);
+ } else {
+ write_scc(priv, R1, 0);
+ write_scc(priv, R15, 0);
+ write_scc(priv, R0, RES_EXT_INT);
+ }
+
+ if (irq <= 0) {
+ printk(KERN_ERR
+ "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
+ hw[type].name, card_base, irq);
+ goto out3;
+ }
+
+ /* Set up data structures */
+ for (i = 0; i < 2; i++) {
+ dev = info->dev[i];
+ priv = &info->priv[i];
+ priv->type = type;
+ priv->chip = chip;
+ priv->dev = dev;
+ priv->info = info;
+ priv->channel = i;
+ spin_lock_init(&priv->ring_lock);
+ priv->register_lock = &info->register_lock;
+ priv->card_base = card_base;
+ priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
+ priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
+ priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
+ priv->tmr_ctrl = tmr_base + TMR_CTRL;
+ priv->tmr_mode = i ? 0xb0 : 0x70;
+ priv->param.pclk_hz = hw[type].pclk_hz;
+ priv->param.brg_tc = -1;
+ priv->param.clocks = TCTRxCP | RCRTxCP;
+ priv->param.persist = 256;
+ priv->param.dma = -1;
+ INIT_WORK(&priv->rx_work, rx_bh, priv);
+ dev->priv = priv;
+ sprintf(dev->name, "dmascc%i", 2 * n + i);
+ SET_MODULE_OWNER(dev);
+ dev->base_addr = card_base;
+ dev->irq = irq;
+ dev->open = scc_open;
+ dev->stop = scc_close;
+ dev->do_ioctl = scc_ioctl;
+ dev->hard_start_xmit = scc_send_packet;
+ dev->get_stats = scc_get_stats;
+ dev->hard_header = ax25_encapsulate;
+ dev->rebuild_header = ax25_rebuild_header;
+ dev->set_mac_address = scc_set_mac_address;
+ }
+ if (register_netdev(info->dev[0])) {
+ printk(KERN_ERR "dmascc: could not register %s\n",
+ info->dev[0]->name);
+ goto out3;
+ }
+ if (register_netdev(info->dev[1])) {
+ printk(KERN_ERR "dmascc: could not register %s\n",
+ info->dev[1]->name);
+ goto out4;
+ }
+
+
+ info->next = first;
+ first = info;
+ printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
+ hw[type].name, chipnames[chip], card_base, irq);
+ return 0;
+
+ out4:
+ unregister_netdev(info->dev[0]);
+ out3:
+ if (info->priv[0].type == TYPE_TWIN)
+ outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
+ write_scc(&info->priv[0], R9, FHWRES);
+ free_netdev(info->dev[1]);
+ out2:
+ free_netdev(info->dev[0]);
+ out1:
+ kfree(info);
+ out:
+ return -1;
+}
+
+
+/* Driver functions */
+
+static void write_scc(struct scc_priv *priv, int reg, int val)
+{
+ unsigned long flags;
+ switch (priv->type) {
+ case TYPE_S5:
+ if (reg)
+ outb(reg, priv->scc_cmd);
+ outb(val, priv->scc_cmd);
+ return;
+ case TYPE_TWIN:
+ if (reg)
+ outb_p(reg, priv->scc_cmd);
+ outb_p(val, priv->scc_cmd);
+ return;
+ default:
+ spin_lock_irqsave(priv->register_lock, flags);
+ outb_p(0, priv->card_base + PI_DREQ_MASK);
+ if (reg)
+ outb_p(reg, priv->scc_cmd);
+ outb_p(val, priv->scc_cmd);
+ outb(1, priv->card_base + PI_DREQ_MASK);
+ spin_unlock_irqrestore(priv->register_lock, flags);
+ return;
+ }
+}
+
+
+static void write_scc_data(struct scc_priv *priv, int val, int fast)
+{
+ unsigned long flags;
+ switch (priv->type) {
+ case TYPE_S5:
+ outb(val, priv->scc_data);
+ return;
+ case TYPE_TWIN:
+ outb_p(val, priv->scc_data);
+ return;
+ default:
+ if (fast)
+ outb_p(val, priv->scc_data);
+ else {
+ spin_lock_irqsave(priv->register_lock, flags);
+ outb_p(0, priv->card_base + PI_DREQ_MASK);
+ outb_p(val, priv->scc_data);
+ outb(1, priv->card_base + PI_DREQ_MASK);
+ spin_unlock_irqrestore(priv->register_lock, flags);
+ }
+ return;
+ }
+}
+
+
+static int read_scc(struct scc_priv *priv, int reg)
+{
+ int rc;
+ unsigned long flags;
+ switch (priv->type) {
+ case TYPE_S5:
+ if (reg)
+ outb(reg, priv->scc_cmd);
+ return inb(priv->scc_cmd);
+ case TYPE_TWIN:
+ if (reg)
+ outb_p(reg, priv->scc_cmd);
+ return inb_p(priv->scc_cmd);
+ default:
+ spin_lock_irqsave(priv->register_lock, flags);
+ outb_p(0, priv->card_base + PI_DREQ_MASK);
+ if (reg)
+ outb_p(reg, priv->scc_cmd);
+ rc = inb_p(priv->scc_cmd);
+ outb(1, priv->card_base + PI_DREQ_MASK);
+ spin_unlock_irqrestore(priv->register_lock, flags);
+ return rc;
+ }
+}
+
+
+static int read_scc_data(struct scc_priv *priv)
+{
+ int rc;
+ unsigned long flags;
+ switch (priv->type) {
+ case TYPE_S5:
+ return inb(priv->scc_data);
+ case TYPE_TWIN:
+ return inb_p(priv->scc_data);
+ default:
+ spin_lock_irqsave(priv->register_lock, flags);
+ outb_p(0, priv->card_base + PI_DREQ_MASK);
+ rc = inb_p(priv->scc_data);
+ outb(1, priv->card_base + PI_DREQ_MASK);
+ spin_unlock_irqrestore(priv->register_lock, flags);
+ return rc;
+ }
+}
+
+
+static int scc_open(struct net_device *dev)
+{
+ struct scc_priv *priv = dev->priv;
+ struct scc_info *info = priv->info;
+ int card_base = priv->card_base;
+
+ /* Request IRQ if not already used by other channel */
+ if (!info->irq_used) {
+ if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
+ return -EAGAIN;
+ }
+ }
+ info->irq_used++;
+
+ /* Request DMA if required */
+ if (priv->param.dma >= 0) {
+ if (request_dma(priv->param.dma, "dmascc")) {
+ if (--info->irq_used == 0)
+ free_irq(dev->irq, info);
+ return -EAGAIN;
+ } else {
+ unsigned long flags = claim_dma_lock();
+ clear_dma_ff(priv->param.dma);
+ release_dma_lock(flags);
+ }
+ }
+
+ /* Initialize local variables */
+ priv->rx_ptr = 0;
+ priv->rx_over = 0;
+ priv->rx_head = priv->rx_tail = priv->rx_count = 0;
+ priv->state = IDLE;
+ priv->tx_head = priv->tx_tail = priv->tx_count = 0;
+ priv->tx_ptr = 0;
+
+ /* Reset channel */
+ write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
+ /* X1 clock, SDLC mode */
+ write_scc(priv, R4, SDLC | X1CLK);
+ /* DMA */
+ write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
+ /* 8 bit RX char, RX disable */
+ write_scc(priv, R3, Rx8);
+ /* 8 bit TX char, TX disable */
+ write_scc(priv, R5, Tx8);
+ /* SDLC address field */
+ write_scc(priv, R6, 0);
+ /* SDLC flag */
+ write_scc(priv, R7, FLAG);
+ switch (priv->chip) {
+ case Z85C30:
+ /* Select WR7' */
+ write_scc(priv, R15, SHDLCE);
+ /* Auto EOM reset */
+ write_scc(priv, R7, AUTOEOM);
+ write_scc(priv, R15, 0);
+ break;
+ case Z85230:
+ /* Select WR7' */
+ write_scc(priv, R15, SHDLCE);
+ /* The following bits are set (see 2.5.2.1):
+ - Automatic EOM reset
+ - Interrupt request if RX FIFO is half full
+ This bit should be ignored in DMA mode (according to the
+ documentation), but actually isn't. The receiver doesn't work if
+ it is set. Thus, we have to clear it in DMA mode.
+ - Interrupt/DMA request if TX FIFO is completely empty
+ a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
+ compatibility).
+ b) If cleared, DMA requests may follow each other very quickly,
+ filling up the TX FIFO.
+ Advantage: TX works even in case of high bus latency.
+ Disadvantage: Edge-triggered DMA request circuitry may miss
+ a request. No more data is delivered, resulting
+ in a TX FIFO underrun.
+ Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
+ The PackeTwin doesn't. I don't know about the PI, but let's
+ assume it behaves like the PI2.
+ */
+ if (priv->param.dma >= 0) {
+ if (priv->type == TYPE_TWIN)
+ write_scc(priv, R7, AUTOEOM | TXFIFOE);
+ else
+ write_scc(priv, R7, AUTOEOM);
+ } else {
+ write_scc(priv, R7, AUTOEOM | RXFIFOH);
+ }
+ write_scc(priv, R15, 0);
+ break;
+ }
+ /* Preset CRC, NRZ(I) encoding */
+ write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
+
+ /* Configure baud rate generator */
+ if (priv->param.brg_tc >= 0) {
+ /* Program BR generator */
+ write_scc(priv, R12, priv->param.brg_tc & 0xFF);
+ write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
+ /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
+ PackeTwin, not connected on the PI2); set DPLL source to BRG */
+ write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
+ /* Enable DPLL */
+ write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
+ } else {
+ /* Disable BR generator */
+ write_scc(priv, R14, DTRREQ | BRSRC);
+ }
+
+ /* Configure clocks */
+ if (priv->type == TYPE_TWIN) {
+ /* Disable external TX clock receiver */
+ outb((info->twin_serial_cfg &=
+ ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
+ card_base + TWIN_SERIAL_CFG);
+ }
+ write_scc(priv, R11, priv->param.clocks);
+ if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
+ /* Enable external TX clock receiver */
+ outb((info->twin_serial_cfg |=
+ (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
+ card_base + TWIN_SERIAL_CFG);
+ }
+
+ /* Configure PackeTwin */
+ if (priv->type == TYPE_TWIN) {
+ /* Assert DTR, enable interrupts */
+ outb((info->twin_serial_cfg |= TWIN_EI |
+ (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
+ card_base + TWIN_SERIAL_CFG);
+ }
+
+ /* Read current status */
+ priv->rr0 = read_scc(priv, R0);
+ /* Enable DCD interrupt */
+ write_scc(priv, R15, DCDIE);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+
+static int scc_close(struct net_device *dev)
+{
+ struct scc_priv *priv = dev->priv;
+ struct scc_info *info = priv->info;
+ int card_base = priv->card_base;
+
+ netif_stop_queue(dev);
+
+ if (priv->type == TYPE_TWIN) {
+ /* Drop DTR */
+ outb((info->twin_serial_cfg &=
+ (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
+ card_base + TWIN_SERIAL_CFG);
+ }
+
+ /* Reset channel, free DMA and IRQ */
+ write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
+ if (priv->param.dma >= 0) {
+ if (priv->type == TYPE_TWIN)
+ outb(0, card_base + TWIN_DMA_CFG);
+ free_dma(priv->param.dma);
+ }
+ if (--info->irq_used == 0)
+ free_irq(dev->irq, info);
+
+ return 0;
+}
+
+
+static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct scc_priv *priv = dev->priv;
+
+ switch (cmd) {
+ case SIOCGSCCPARAM:
+ if (copy_to_user
+ (ifr->ifr_data, &priv->param,
+ sizeof(struct scc_param)))
+ return -EFAULT;
+ return 0;
+ case SIOCSSCCPARAM:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (netif_running(dev))
+ return -EAGAIN;
+ if (copy_from_user
+ (&priv->param, ifr->ifr_data,
+ sizeof(struct scc_param)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+
+static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct scc_priv *priv = dev->priv;
+ unsigned long flags;
+ int i;
+
+ /* Temporarily stop the scheduler feeding us packets */
+ netif_stop_queue(dev);
+
+ /* Transfer data to DMA buffer */
+ i = priv->tx_head;
+ memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
+ priv->tx_len[i] = skb->len - 1;
+
+ /* Clear interrupts while we touch our circular buffers */
+
+ spin_lock_irqsave(&priv->ring_lock, flags);
+ /* Move the ring buffer's head */
+ priv->tx_head = (i + 1) % NUM_TX_BUF;
+ priv->tx_count++;
+
+ /* If we just filled up the last buffer, leave queue stopped.
+ The higher layers must wait until we have a DMA buffer
+ to accept the data. */
+ if (priv->tx_count < NUM_TX_BUF)
+ netif_wake_queue(dev);
+
+ /* Set new TX state */
+ if (priv->state == IDLE) {
+ /* Assert RTS, start timer */
+ priv->state = TX_HEAD;
+ priv->tx_start = jiffies;
+ write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
+ write_scc(priv, R15, 0);
+ start_timer(priv, priv->param.txdelay, 0);
+ }
+
+ /* Turn interrupts back on and free buffer */
+ spin_unlock_irqrestore(&priv->ring_lock, flags);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+
+static struct net_device_stats *scc_get_stats(struct net_device *dev)
+{
+ struct scc_priv *priv = dev->priv;
+
+ return &priv->stats;
+}
+
+
+static int scc_set_mac_address(struct net_device *dev, void *sa)
+{
+ memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
+ dev->addr_len);
+ return 0;
+}
+
+
+static inline void tx_on(struct scc_priv *priv)
+{
+ int i, n;
+ unsigned long flags;
+
+ if (priv->param.dma >= 0) {
+ n = (priv->chip == Z85230) ? 3 : 1;
+ /* Program DMA controller */
+ flags = claim_dma_lock();
+ set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
+ set_dma_addr(priv->param.dma,
+ (int) priv->tx_buf[priv->tx_tail] + n);
+ set_dma_count(priv->param.dma,
+ priv->tx_len[priv->tx_tail] - n);
+ release_dma_lock(flags);
+ /* Enable TX underrun interrupt */
+ write_scc(priv, R15, TxUIE);
+ /* Configure DREQ */
+ if (priv->type == TYPE_TWIN)
+ outb((priv->param.dma ==
+ 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
+ priv->card_base + TWIN_DMA_CFG);
+ else
+ write_scc(priv, R1,
+ EXT_INT_ENAB | WT_FN_RDYFN |
+ WT_RDY_ENAB);
+ /* Write first byte(s) */
+ spin_lock_irqsave(priv->register_lock, flags);
+ for (i = 0; i < n; i++)
+ write_scc_data(priv,
+ priv->tx_buf[priv->tx_tail][i], 1);
+ enable_dma(priv->param.dma);
+ spin_unlock_irqrestore(priv->register_lock, flags);
+ } else {
+ write_scc(priv, R15, TxUIE);
+ write_scc(priv, R1,
+ EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
+ tx_isr(priv);
+ }
+ /* Reset EOM latch if we do not have the AUTOEOM feature */
+ if (priv->chip == Z8530)
+ write_scc(priv, R0, RES_EOM_L);
+}
+
+
+static inline void rx_on(struct scc_priv *priv)
+{
+ unsigned long flags;
+
+ /* Clear RX FIFO */
+ while (read_scc(priv, R0) & Rx_CH_AV)
+ read_scc_data(priv);
+ priv->rx_over = 0;
+ if (priv->param.dma >= 0) {
+ /* Program DMA controller */
+ flags = claim_dma_lock();
+ set_dma_mode(priv->param.dma, DMA_MODE_READ);
+ set_dma_addr(priv->param.dma,
+ (int) priv->rx_buf[priv->rx_head]);
+ set_dma_count(priv->param.dma, BUF_SIZE);
+ release_dma_lock(flags);
+ enable_dma(priv->param.dma);
+ /* Configure PackeTwin DMA */
+ if (priv->type == TYPE_TWIN) {
+ outb((priv->param.dma ==
+ 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
+ priv->card_base + TWIN_DMA_CFG);
+ }
+ /* Sp. cond. intr. only, ext int enable, RX DMA enable */
+ write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
+ WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
+ } else {
+ /* Reset current frame */
+ priv->rx_ptr = 0;
+ /* Intr. on all Rx characters and Sp. cond., ext int enable */
+ write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
+ WT_FN_RDYFN);
+ }
+ write_scc(priv, R0, ERR_RES);
+ write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
+}
+
+
+static inline void rx_off(struct scc_priv *priv)
+{
+ /* Disable receiver */
+ write_scc(priv, R3, Rx8);
+ /* Disable DREQ / RX interrupt */
+ if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
+ outb(0, priv->card_base + TWIN_DMA_CFG);
+ else
+ write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
+ /* Disable DMA */
+ if (priv->param.dma >= 0)
+ disable_dma(priv->param.dma);
+}
+
+
+static void start_timer(struct scc_priv *priv, int t, int r15)
+{
+ unsigned long flags;
+
+ outb(priv->tmr_mode, priv->tmr_ctrl);
+ if (t == 0) {
+ tm_isr(priv);
+ } else if (t > 0) {
+ save_flags(flags);
+ cli();
+ outb(t & 0xFF, priv->tmr_cnt);
+ outb((t >> 8) & 0xFF, priv->tmr_cnt);
+ if (priv->type != TYPE_TWIN) {
+ write_scc(priv, R15, r15 | CTSIE);
+ priv->rr0 |= CTS;
+ }
+ restore_flags(flags);
+ }
+}
+
+
+static inline unsigned char random(void)
+{
+ /* See "Numerical Recipes in C", second edition, p. 284 */
+ rand = rand * 1664525L + 1013904223L;
+ return (unsigned char) (rand >> 24);
+}
+
+static inline void z8530_isr(struct scc_info *info)
+{
+ int is, i = 100;
+
+ while ((is = read_scc(&info->priv[0], R3)) && i--) {
+ if (is & CHARxIP) {
+ rx_isr(&info->priv[0]);
+ } else if (is & CHATxIP) {
+ tx_isr(&info->priv[0]);
+ } else if (is & CHAEXT) {
+ es_isr(&info->priv[0]);
+ } else if (is & CHBRxIP) {
+ rx_isr(&info->priv[1]);
+ } else if (is & CHBTxIP) {
+ tx_isr(&info->priv[1]);
+ } else {
+ es_isr(&info->priv[1]);
+ }
+ write_scc(&info->priv[0], R0, RES_H_IUS);
+ i++;
+ }
+ if (i < 0) {
+ printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
+ is);
+ }
+ /* Ok, no interrupts pending from this 8530. The INT line should
+ be inactive now. */
+}
+
+
+static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct scc_info *info = dev_id;
+
+ spin_lock(info->priv[0].register_lock);
+ /* At this point interrupts are enabled, and the interrupt under service
+ is already acknowledged, but masked off.
+
+ Interrupt processing: We loop until we know that the IRQ line is
+ low. If another positive edge occurs afterwards during the ISR,
+ another interrupt will be triggered by the interrupt controller
+ as soon as the IRQ level is enabled again (see asm/irq.h).
+
+ Bottom-half handlers will be processed after scc_isr(). This is
+ important, since we only have small ringbuffers and want new data
+ to be fetched/delivered immediately. */
+
+ if (info->priv[0].type == TYPE_TWIN) {
+ int is, card_base = info->priv[0].card_base;
+ while ((is = ~inb(card_base + TWIN_INT_REG)) &
+ TWIN_INT_MSK) {
+ if (is & TWIN_SCC_MSK) {
+ z8530_isr(info);
+ } else if (is & TWIN_TMR1_MSK) {
+ inb(card_base + TWIN_CLR_TMR1);
+ tm_isr(&info->priv[0]);
+ } else {
+ inb(card_base + TWIN_CLR_TMR2);
+ tm_isr(&info->priv[1]);
+ }
+ }
+ } else
+ z8530_isr(info);
+ spin_unlock(info->priv[0].register_lock);
+ return IRQ_HANDLED;
+}
+
+
+static void rx_isr(struct scc_priv *priv)
+{
+ if (priv->param.dma >= 0) {
+ /* Check special condition and perform error reset. See 2.4.7.5. */
+ special_condition(priv, read_scc(priv, R1));
+ write_scc(priv, R0, ERR_RES);
+ } else {
+ /* Check special condition for each character. Error reset not necessary.
+ Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
+ int rc;
+ while (read_scc(priv, R0) & Rx_CH_AV) {
+ rc = read_scc(priv, R1);
+ if (priv->rx_ptr < BUF_SIZE)
+ priv->rx_buf[priv->rx_head][priv->
+ rx_ptr++] =
+ read_scc_data(priv);
+ else {
+ priv->rx_over = 2;
+ read_scc_data(priv);
+ }
+ special_condition(priv, rc);
+ }
+ }
+}
+
+
+static void special_condition(struct scc_priv *priv, int rc)
+{
+ int cb;
+ unsigned long flags;
+
+ /* See Figure 2-15. Only overrun and EOF need to be checked. */
+
+ if (rc & Rx_OVR) {
+ /* Receiver overrun */
+ priv->rx_over = 1;
+ if (priv->param.dma < 0)
+ write_scc(priv, R0, ERR_RES);
+ } else if (rc & END_FR) {
+ /* End of frame. Get byte count */
+ if (priv->param.dma >= 0) {
+ flags = claim_dma_lock();
+ cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
+ 2;
+ release_dma_lock(flags);
+ } else {
+ cb = priv->rx_ptr - 2;
+ }
+ if (priv->rx_over) {
+ /* We had an overrun */
+ priv->stats.rx_errors++;
+ if (priv->rx_over == 2)
+ priv->stats.rx_length_errors++;
+ else
+ priv->stats.rx_fifo_errors++;
+ priv->rx_over = 0;
+ } else if (rc & CRC_ERR) {
+ /* Count invalid CRC only if packet length >= minimum */
+ if (cb >= 15) {
+ priv->stats.rx_errors++;
+ priv->stats.rx_crc_errors++;
+ }
+ } else {
+ if (cb >= 15) {
+ if (priv->rx_count < NUM_RX_BUF - 1) {
+ /* Put good frame in FIFO */
+ priv->rx_len[priv->rx_head] = cb;
+ priv->rx_head =
+ (priv->rx_head +
+ 1) % NUM_RX_BUF;
+ priv->rx_count++;
+ schedule_work(&priv->rx_work);
+ } else {
+ priv->stats.rx_errors++;
+ priv->stats.rx_over_errors++;
+ }
+ }
+ }
+ /* Get ready for new frame */
+ if (priv->param.dma >= 0) {
+ flags = claim_dma_lock();
+ set_dma_addr(priv->param.dma,
+ (int) priv->rx_buf[priv->rx_head]);
+ set_dma_count(priv->param.dma, BUF_SIZE);
+ release_dma_lock(flags);
+ } else {
+ priv->rx_ptr = 0;
+ }
+ }
+}
+
+
+static void rx_bh(void *arg)
+{
+ struct scc_priv *priv = arg;
+ int i = priv->rx_tail;
+ int cb;
+ unsigned long flags;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ spin_lock_irqsave(&priv->ring_lock, flags);
+ while (priv->rx_count) {
+ spin_unlock_irqrestore(&priv->ring_lock, flags);
+ cb = priv->rx_len[i];
+ /* Allocate buffer */
+ skb = dev_alloc_skb(cb + 1);
+ if (skb == NULL) {
+ /* Drop packet */
+ priv->stats.rx_dropped++;
+ } else {
+ /* Fill buffer */
+ data = skb_put(skb, cb + 1);
+ data[0] = 0;
+ memcpy(&data[1], priv->rx_buf[i], cb);
+ skb->dev = priv->dev;
+ skb->protocol = ntohs(ETH_P_AX25);
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ priv->dev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += cb;
+ }
+ spin_lock_irqsave(&priv->ring_lock, flags);
+ /* Move tail */
+ priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
+ priv->rx_count--;
+ }
+ spin_unlock_irqrestore(&priv->ring_lock, flags);
+}
+
+
+static void tx_isr(struct scc_priv *priv)
+{
+ int i = priv->tx_tail, p = priv->tx_ptr;
+
+ /* Suspend TX interrupts if we don't want to send anything.
+ See Figure 2-22. */
+ if (p == priv->tx_len[i]) {
+ write_scc(priv, R0, RES_Tx_P);
+ return;
+ }
+
+ /* Write characters */
+ while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
+ write_scc_data(priv, priv->tx_buf[i][p++], 0);
+ }
+
+ /* Reset EOM latch of Z8530 */
+ if (!priv->tx_ptr && p && priv->chip == Z8530)
+ write_scc(priv, R0, RES_EOM_L);
+
+ priv->tx_ptr = p;
+}
+
+
+static void es_isr(struct scc_priv *priv)
+{
+ int i, rr0, drr0, res;
+ unsigned long flags;
+
+ /* Read status, reset interrupt bit (open latches) */
+ rr0 = read_scc(priv, R0);
+ write_scc(priv, R0, RES_EXT_INT);
+ drr0 = priv->rr0 ^ rr0;
+ priv->rr0 = rr0;
+
+ /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
+ it might have already been cleared again by AUTOEOM. */
+ if (priv->state == TX_DATA) {
+ /* Get remaining bytes */
+ i = priv->tx_tail;
+ if (priv->param.dma >= 0) {
+ disable_dma(priv->param.dma);
+ flags = claim_dma_lock();
+ res = get_dma_residue(priv->param.dma);
+ release_dma_lock(flags);
+ } else {
+ res = priv->tx_len[i] - priv->tx_ptr;
+ priv->tx_ptr = 0;
+ }
+ /* Disable DREQ / TX interrupt */
+ if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
+ outb(0, priv->card_base + TWIN_DMA_CFG);
+ else
+ write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
+ if (res) {
+ /* Update packet statistics */
+ priv->stats.tx_errors++;
+ priv->stats.tx_fifo_errors++;
+ /* Other underrun interrupts may already be waiting */
+ write_scc(priv, R0, RES_EXT_INT);
+ write_scc(priv, R0, RES_EXT_INT);
+ } else {
+ /* Update packet statistics */
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += priv->tx_len[i];
+ /* Remove frame from FIFO */
+ priv->tx_tail = (i + 1) % NUM_TX_BUF;
+ priv->tx_count--;
+ /* Inform upper layers */
+ netif_wake_queue(priv->dev);
+ }
+ /* Switch state */
+ write_scc(priv, R15, 0);
+ if (priv->tx_count &&
+ (jiffies - priv->tx_start) < priv->param.txtimeout) {
+ priv->state = TX_PAUSE;
+ start_timer(priv, priv->param.txpause, 0);
+ } else {
+ priv->state = TX_TAIL;
+ start_timer(priv, priv->param.txtail, 0);
+ }
+ }
+
+ /* DCD transition */
+ if (drr0 & DCD) {
+ if (rr0 & DCD) {
+ switch (priv->state) {
+ case IDLE:
+ case WAIT:
+ priv->state = DCD_ON;
+ write_scc(priv, R15, 0);
+ start_timer(priv, priv->param.dcdon, 0);
+ }
+ } else {
+ switch (priv->state) {
+ case RX_ON:
+ rx_off(priv);
+ priv->state = DCD_OFF;
+ write_scc(priv, R15, 0);
+ start_timer(priv, priv->param.dcdoff, 0);
+ }
+ }
+ }
+
+ /* CTS transition */
+ if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
+ tm_isr(priv);
+
+}
+
+
+static void tm_isr(struct scc_priv *priv)
+{
+ switch (priv->state) {
+ case TX_HEAD:
+ case TX_PAUSE:
+ tx_on(priv);
+ priv->state = TX_DATA;
+ break;
+ case TX_TAIL:
+ write_scc(priv, R5, TxCRC_ENAB | Tx8);
+ priv->state = RTS_OFF;
+ if (priv->type != TYPE_TWIN)
+ write_scc(priv, R15, 0);
+ start_timer(priv, priv->param.rtsoff, 0);
+ break;
+ case RTS_OFF:
+ write_scc(priv, R15, DCDIE);
+ priv->rr0 = read_scc(priv, R0);
+ if (priv->rr0 & DCD) {
+ priv->stats.collisions++;
+ rx_on(priv);
+ priv->state = RX_ON;
+ } else {
+ priv->state = WAIT;
+ start_timer(priv, priv->param.waittime, DCDIE);
+ }
+ break;
+ case WAIT:
+ if (priv->tx_count) {
+ priv->state = TX_HEAD;
+ priv->tx_start = jiffies;
+ write_scc(priv, R5,
+ TxCRC_ENAB | RTS | TxENAB | Tx8);
+ write_scc(priv, R15, 0);
+ start_timer(priv, priv->param.txdelay, 0);
+ } else {
+ priv->state = IDLE;
+ if (priv->type != TYPE_TWIN)
+ write_scc(priv, R15, DCDIE);
+ }
+ break;
+ case DCD_ON:
+ case DCD_OFF:
+ write_scc(priv, R15, DCDIE);
+ priv->rr0 = read_scc(priv, R0);
+ if (priv->rr0 & DCD) {
+ rx_on(priv);
+ priv->state = RX_ON;
+ } else {
+ priv->state = WAIT;
+ start_timer(priv,
+ random() / priv->param.persist *
+ priv->param.slottime, DCDIE);
+ }
+ break;
+ }
+}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
new file mode 100644
index 000000000000..b89959a596d7
--- /dev/null
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -0,0 +1,817 @@
+/*****************************************************************************/
+
+/*
+ * hdlcdrv.c -- HDLC packet radio network driver.
+ *
+ * Copyright (C) 1996-2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Please note that the GPL allows you to use the driver, NOT the radio.
+ * In order to use the radio, you need a license from the communications
+ * authority of your country.
+ *
+ * The driver was derived from Donald Beckers skeleton.c
+ * Written 1993-94 by Donald Becker.
+ *
+ * History:
+ * 0.1 21.09.1996 Started
+ * 18.10.1996 Changed to new user space access routines
+ * (copy_{to,from}_user)
+ * 0.2 21.11.1996 various small changes
+ * 0.3 03.03.1997 fixed (hopefully) IP not working with ax.25 as a module
+ * 0.4 16.04.1997 init code/data tagged
+ * 0.5 30.07.1997 made HDLC buffers bigger (solves a problem with the
+ * soundmodem driver)
+ * 0.6 05.04.1998 add spinlocks
+ * 0.7 03.08.1999 removed some old compatibility cruft
+ * 0.8 12.02.2000 adapted to softnet driver interface
+ */
+
+/*****************************************************************************/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/if.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/hdlcdrv.h>
+/* prototypes for ax25_encapsulate and ax25_rebuild_header */
+#include <net/ax25.h>
+
+/* make genksyms happy */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/crc-ccitt.h>
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * The name of the card. Is used for messages and in the requests for
+ * io regions, irqs and dma channels
+ */
+
+static char ax25_bcast[AX25_ADDR_LEN] =
+{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1};
+static char ax25_nocall[AX25_ADDR_LEN] =
+{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1};
+
+/* --------------------------------------------------------------------- */
+
+#define KISS_VERBOSE
+
+/* --------------------------------------------------------------------- */
+
+#define PARAM_TXDELAY 1
+#define PARAM_PERSIST 2
+#define PARAM_SLOTTIME 3
+#define PARAM_TXTAIL 4
+#define PARAM_FULLDUP 5
+#define PARAM_HARDWARE 6
+#define PARAM_RETURN 255
+
+/* --------------------------------------------------------------------- */
+/*
+ * the CRC routines are stolen from WAMPES
+ * by Dieter Deyke
+ */
+
+
+/*---------------------------------------------------------------------------*/
+
+static inline void append_crc_ccitt(unsigned char *buffer, int len)
+{
+ unsigned int crc = crc_ccitt(0xffff, buffer, len) ^ 0xffff;
+ *buffer++ = crc;
+ *buffer++ = crc >> 8;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static inline int check_crc_ccitt(const unsigned char *buf, int cnt)
+{
+ return (crc_ccitt(0xffff, buf, cnt) & 0xffff) == 0xf0b8;
+}
+
+/*---------------------------------------------------------------------------*/
+
+#if 0
+static int calc_crc_ccitt(const unsigned char *buf, int cnt)
+{
+ unsigned int crc = 0xffff;
+
+ for (; cnt > 0; cnt--)
+ crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buf++) & 0xff];
+ crc ^= 0xffff;
+ return (crc & 0xffff);
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+#define tenms_to_2flags(s,tenms) ((tenms * s->par.bitrate) / 100 / 16)
+
+/* ---------------------------------------------------------------------- */
+/*
+ * The HDLC routines
+ */
+
+static int hdlc_rx_add_bytes(struct hdlcdrv_state *s, unsigned int bits,
+ int num)
+{
+ int added = 0;
+
+ while (s->hdlcrx.rx_state && num >= 8) {
+ if (s->hdlcrx.len >= sizeof(s->hdlcrx.buffer)) {
+ s->hdlcrx.rx_state = 0;
+ return 0;
+ }
+ *s->hdlcrx.bp++ = bits >> (32-num);
+ s->hdlcrx.len++;
+ num -= 8;
+ added += 8;
+ }
+ return added;
+}
+
+static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
+{
+ struct sk_buff *skb;
+ int pkt_len;
+ unsigned char *cp;
+
+ if (s->hdlcrx.len < 4)
+ return;
+ if (!check_crc_ccitt(s->hdlcrx.buffer, s->hdlcrx.len))
+ return;
+ pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */
+ if (!(skb = dev_alloc_skb(pkt_len))) {
+ printk("%s: memory squeeze, dropping packet\n", dev->name);
+ s->stats.rx_dropped++;
+ return;
+ }
+ skb->dev = dev;
+ cp = skb_put(skb, pkt_len);
+ *cp++ = 0; /* KISS kludge */
+ memcpy(cp, s->hdlcrx.buffer, pkt_len - 1);
+ skb->protocol = htons(ETH_P_AX25);
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ s->stats.rx_packets++;
+}
+
+void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s)
+{
+ int i;
+ unsigned int mask1, mask2, mask3, mask4, mask5, mask6, word;
+
+ if (!s || s->magic != HDLCDRV_MAGIC)
+ return;
+ if (test_and_set_bit(0, &s->hdlcrx.in_hdlc_rx))
+ return;
+
+ while (!hdlcdrv_hbuf_empty(&s->hdlcrx.hbuf)) {
+ word = hdlcdrv_hbuf_get(&s->hdlcrx.hbuf);
+
+#ifdef HDLCDRV_DEBUG
+ hdlcdrv_add_bitbuffer_word(&s->bitbuf_hdlc, word);
+#endif /* HDLCDRV_DEBUG */
+ s->hdlcrx.bitstream >>= 16;
+ s->hdlcrx.bitstream |= word << 16;
+ s->hdlcrx.bitbuf >>= 16;
+ s->hdlcrx.bitbuf |= word << 16;
+ s->hdlcrx.numbits += 16;
+ for(i = 15, mask1 = 0x1fc00, mask2 = 0x1fe00, mask3 = 0x0fc00,
+ mask4 = 0x1f800, mask5 = 0xf800, mask6 = 0xffff;
+ i >= 0;
+ i--, mask1 <<= 1, mask2 <<= 1, mask3 <<= 1, mask4 <<= 1,
+ mask5 <<= 1, mask6 = (mask6 << 1) | 1) {
+ if ((s->hdlcrx.bitstream & mask1) == mask1)
+ s->hdlcrx.rx_state = 0; /* abort received */
+ else if ((s->hdlcrx.bitstream & mask2) == mask3) {
+ /* flag received */
+ if (s->hdlcrx.rx_state) {
+ hdlc_rx_add_bytes(s, s->hdlcrx.bitbuf
+ << (8+i),
+ s->hdlcrx.numbits
+ -8-i);
+ hdlc_rx_flag(dev, s);
+ }
+ s->hdlcrx.len = 0;
+ s->hdlcrx.bp = s->hdlcrx.buffer;
+ s->hdlcrx.rx_state = 1;
+ s->hdlcrx.numbits = i;
+ } else if ((s->hdlcrx.bitstream & mask4) == mask5) {
+ /* stuffed bit */
+ s->hdlcrx.numbits--;
+ s->hdlcrx.bitbuf = (s->hdlcrx.bitbuf & (~mask6)) |
+ ((s->hdlcrx.bitbuf & mask6) << 1);
+ }
+ }
+ s->hdlcrx.numbits -= hdlc_rx_add_bytes(s, s->hdlcrx.bitbuf,
+ s->hdlcrx.numbits);
+ }
+ clear_bit(0, &s->hdlcrx.in_hdlc_rx);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline void do_kiss_params(struct hdlcdrv_state *s,
+ unsigned char *data, unsigned long len)
+{
+
+#ifdef KISS_VERBOSE
+#define PKP(a,b) printk(KERN_INFO "hdlcdrv.c: channel params: " a "\n", b)
+#else /* KISS_VERBOSE */
+#define PKP(a,b)
+#endif /* KISS_VERBOSE */
+
+ if (len < 2)
+ return;
+ switch(data[0]) {
+ case PARAM_TXDELAY:
+ s->ch_params.tx_delay = data[1];
+ PKP("TX delay = %ums", 10 * s->ch_params.tx_delay);
+ break;
+ case PARAM_PERSIST:
+ s->ch_params.ppersist = data[1];
+ PKP("p persistence = %u", s->ch_params.ppersist);
+ break;
+ case PARAM_SLOTTIME:
+ s->ch_params.slottime = data[1];
+ PKP("slot time = %ums", s->ch_params.slottime);
+ break;
+ case PARAM_TXTAIL:
+ s->ch_params.tx_tail = data[1];
+ PKP("TX tail = %ums", s->ch_params.tx_tail);
+ break;
+ case PARAM_FULLDUP:
+ s->ch_params.fulldup = !!data[1];
+ PKP("%s duplex", s->ch_params.fulldup ? "full" : "half");
+ break;
+ default:
+ break;
+ }
+#undef PKP
+}
+
+/* ---------------------------------------------------------------------- */
+
+void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
+{
+ unsigned int mask1, mask2, mask3;
+ int i;
+ struct sk_buff *skb;
+ int pkt_len;
+
+ if (!s || s->magic != HDLCDRV_MAGIC)
+ return;
+ if (test_and_set_bit(0, &s->hdlctx.in_hdlc_tx))
+ return;
+ for (;;) {
+ if (s->hdlctx.numbits >= 16) {
+ if (hdlcdrv_hbuf_full(&s->hdlctx.hbuf)) {
+ clear_bit(0, &s->hdlctx.in_hdlc_tx);
+ return;
+ }
+ hdlcdrv_hbuf_put(&s->hdlctx.hbuf, s->hdlctx.bitbuf);
+ s->hdlctx.bitbuf >>= 16;
+ s->hdlctx.numbits -= 16;
+ }
+ switch (s->hdlctx.tx_state) {
+ default:
+ clear_bit(0, &s->hdlctx.in_hdlc_tx);
+ return;
+ case 0:
+ case 1:
+ if (s->hdlctx.numflags) {
+ s->hdlctx.numflags--;
+ s->hdlctx.bitbuf |=
+ 0x7e7e << s->hdlctx.numbits;
+ s->hdlctx.numbits += 16;
+ break;
+ }
+ if (s->hdlctx.tx_state == 1) {
+ clear_bit(0, &s->hdlctx.in_hdlc_tx);
+ return;
+ }
+ if (!(skb = s->skb)) {
+ int flgs = tenms_to_2flags(s, s->ch_params.tx_tail);
+ if (flgs < 2)
+ flgs = 2;
+ s->hdlctx.tx_state = 1;
+ s->hdlctx.numflags = flgs;
+ break;
+ }
+ s->skb = NULL;
+ netif_wake_queue(dev);
+ pkt_len = skb->len-1; /* strip KISS byte */
+ if (pkt_len >= HDLCDRV_MAXFLEN || pkt_len < 2) {
+ s->hdlctx.tx_state = 0;
+ s->hdlctx.numflags = 1;
+ dev_kfree_skb_irq(skb);
+ break;
+ }
+ memcpy(s->hdlctx.buffer, skb->data+1, pkt_len);
+ dev_kfree_skb_irq(skb);
+ s->hdlctx.bp = s->hdlctx.buffer;
+ append_crc_ccitt(s->hdlctx.buffer, pkt_len);
+ s->hdlctx.len = pkt_len+2; /* the appended CRC */
+ s->hdlctx.tx_state = 2;
+ s->hdlctx.bitstream = 0;
+ s->stats.tx_packets++;
+ break;
+ case 2:
+ if (!s->hdlctx.len) {
+ s->hdlctx.tx_state = 0;
+ s->hdlctx.numflags = 1;
+ break;
+ }
+ s->hdlctx.len--;
+ s->hdlctx.bitbuf |= *s->hdlctx.bp <<
+ s->hdlctx.numbits;
+ s->hdlctx.bitstream >>= 8;
+ s->hdlctx.bitstream |= (*s->hdlctx.bp++) << 16;
+ mask1 = 0x1f000;
+ mask2 = 0x10000;
+ mask3 = 0xffffffff >> (31-s->hdlctx.numbits);
+ s->hdlctx.numbits += 8;
+ for(i = 0; i < 8; i++, mask1 <<= 1, mask2 <<= 1,
+ mask3 = (mask3 << 1) | 1) {
+ if ((s->hdlctx.bitstream & mask1) != mask1)
+ continue;
+ s->hdlctx.bitstream &= ~mask2;
+ s->hdlctx.bitbuf =
+ (s->hdlctx.bitbuf & mask3) |
+ ((s->hdlctx.bitbuf &
+ (~mask3)) << 1);
+ s->hdlctx.numbits++;
+ mask3 = (mask3 << 1) | 1;
+ }
+ break;
+ }
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void start_tx(struct net_device *dev, struct hdlcdrv_state *s)
+{
+ s->hdlctx.tx_state = 0;
+ s->hdlctx.numflags = tenms_to_2flags(s, s->ch_params.tx_delay);
+ s->hdlctx.bitbuf = s->hdlctx.bitstream = s->hdlctx.numbits = 0;
+ hdlcdrv_transmitter(dev, s);
+ s->hdlctx.ptt = 1;
+ s->ptt_keyed++;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static unsigned short random_seed;
+
+static inline unsigned short random_num(void)
+{
+ random_seed = 28629 * random_seed + 157;
+ return random_seed;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s)
+{
+ if (!s || s->magic != HDLCDRV_MAGIC || s->hdlctx.ptt || !s->skb)
+ return;
+ if (s->ch_params.fulldup) {
+ start_tx(dev, s);
+ return;
+ }
+ if (s->hdlcrx.dcd) {
+ s->hdlctx.slotcnt = s->ch_params.slottime;
+ return;
+ }
+ if ((--s->hdlctx.slotcnt) > 0)
+ return;
+ s->hdlctx.slotcnt = s->ch_params.slottime;
+ if ((random_num() % 256) > s->ch_params.ppersist)
+ return;
+ start_tx(dev, s);
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * ===================== network driver interface =========================
+ */
+
+static int hdlcdrv_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hdlcdrv_state *sm = netdev_priv(dev);
+
+ if (skb->data[0] != 0) {
+ do_kiss_params(sm, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ if (sm->skb)
+ return -1;
+ netif_stop_queue(dev);
+ sm->skb = skb;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = (struct sockaddr *)addr;
+
+ /* addr is an AX.25 shifted ASCII mac address */
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static struct net_device_stats *hdlcdrv_get_stats(struct net_device *dev)
+{
+ struct hdlcdrv_state *sm = netdev_priv(dev);
+
+ /*
+ * Get the current statistics. This may be called with the
+ * card open or closed.
+ */
+ return &sm->stats;
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+
+static int hdlcdrv_open(struct net_device *dev)
+{
+ struct hdlcdrv_state *s = netdev_priv(dev);
+ int i;
+
+ if (!s->ops || !s->ops->open)
+ return -ENODEV;
+
+ /*
+ * initialise some variables
+ */
+ s->opened = 1;
+ s->hdlcrx.hbuf.rd = s->hdlcrx.hbuf.wr = 0;
+ s->hdlcrx.in_hdlc_rx = 0;
+ s->hdlcrx.rx_state = 0;
+
+ s->hdlctx.hbuf.rd = s->hdlctx.hbuf.wr = 0;
+ s->hdlctx.in_hdlc_tx = 0;
+ s->hdlctx.tx_state = 1;
+ s->hdlctx.numflags = 0;
+ s->hdlctx.bitstream = s->hdlctx.bitbuf = s->hdlctx.numbits = 0;
+ s->hdlctx.ptt = 0;
+ s->hdlctx.slotcnt = s->ch_params.slottime;
+ s->hdlctx.calibrate = 0;
+
+ i = s->ops->open(dev);
+ if (i)
+ return i;
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+/*
+ * The inverse routine to hdlcdrv_open().
+ */
+
+static int hdlcdrv_close(struct net_device *dev)
+{
+ struct hdlcdrv_state *s = netdev_priv(dev);
+ int i = 0;
+
+ netif_stop_queue(dev);
+
+ if (s->ops && s->ops->close)
+ i = s->ops->close(dev);
+ if (s->skb)
+ dev_kfree_skb(s->skb);
+ s->skb = NULL;
+ s->opened = 0;
+ return i;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct hdlcdrv_state *s = netdev_priv(dev);
+ struct hdlcdrv_ioctl bi;
+
+ if (cmd != SIOCDEVPRIVATE) {
+ if (s->ops && s->ops->ioctl)
+ return s->ops->ioctl(dev, ifr, &bi, cmd);
+ return -ENOIOCTLCMD;
+ }
+ if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ return -EFAULT;
+
+ switch (bi.cmd) {
+ default:
+ if (s->ops && s->ops->ioctl)
+ return s->ops->ioctl(dev, ifr, &bi, cmd);
+ return -ENOIOCTLCMD;
+
+ case HDLCDRVCTL_GETCHANNELPAR:
+ bi.data.cp.tx_delay = s->ch_params.tx_delay;
+ bi.data.cp.tx_tail = s->ch_params.tx_tail;
+ bi.data.cp.slottime = s->ch_params.slottime;
+ bi.data.cp.ppersist = s->ch_params.ppersist;
+ bi.data.cp.fulldup = s->ch_params.fulldup;
+ break;
+
+ case HDLCDRVCTL_SETCHANNELPAR:
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+ s->ch_params.tx_delay = bi.data.cp.tx_delay;
+ s->ch_params.tx_tail = bi.data.cp.tx_tail;
+ s->ch_params.slottime = bi.data.cp.slottime;
+ s->ch_params.ppersist = bi.data.cp.ppersist;
+ s->ch_params.fulldup = bi.data.cp.fulldup;
+ s->hdlctx.slotcnt = 1;
+ return 0;
+
+ case HDLCDRVCTL_GETMODEMPAR:
+ bi.data.mp.iobase = dev->base_addr;
+ bi.data.mp.irq = dev->irq;
+ bi.data.mp.dma = dev->dma;
+ bi.data.mp.dma2 = s->ptt_out.dma2;
+ bi.data.mp.seriobase = s->ptt_out.seriobase;
+ bi.data.mp.pariobase = s->ptt_out.pariobase;
+ bi.data.mp.midiiobase = s->ptt_out.midiiobase;
+ break;
+
+ case HDLCDRVCTL_SETMODEMPAR:
+ if ((!capable(CAP_SYS_RAWIO)) || netif_running(dev))
+ return -EACCES;
+ dev->base_addr = bi.data.mp.iobase;
+ dev->irq = bi.data.mp.irq;
+ dev->dma = bi.data.mp.dma;
+ s->ptt_out.dma2 = bi.data.mp.dma2;
+ s->ptt_out.seriobase = bi.data.mp.seriobase;
+ s->ptt_out.pariobase = bi.data.mp.pariobase;
+ s->ptt_out.midiiobase = bi.data.mp.midiiobase;
+ return 0;
+
+ case HDLCDRVCTL_GETSTAT:
+ bi.data.cs.ptt = hdlcdrv_ptt(s);
+ bi.data.cs.dcd = s->hdlcrx.dcd;
+ bi.data.cs.ptt_keyed = s->ptt_keyed;
+ bi.data.cs.tx_packets = s->stats.tx_packets;
+ bi.data.cs.tx_errors = s->stats.tx_errors;
+ bi.data.cs.rx_packets = s->stats.rx_packets;
+ bi.data.cs.rx_errors = s->stats.rx_errors;
+ break;
+
+ case HDLCDRVCTL_OLDGETSTAT:
+ bi.data.ocs.ptt = hdlcdrv_ptt(s);
+ bi.data.ocs.dcd = s->hdlcrx.dcd;
+ bi.data.ocs.ptt_keyed = s->ptt_keyed;
+ break;
+
+ case HDLCDRVCTL_CALIBRATE:
+ if(!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
+ return 0;
+
+ case HDLCDRVCTL_GETSAMPLES:
+#ifndef HDLCDRV_DEBUG
+ return -EPERM;
+#else /* HDLCDRV_DEBUG */
+ if (s->bitbuf_channel.rd == s->bitbuf_channel.wr)
+ return -EAGAIN;
+ bi.data.bits =
+ s->bitbuf_channel.buffer[s->bitbuf_channel.rd];
+ s->bitbuf_channel.rd = (s->bitbuf_channel.rd+1) %
+ sizeof(s->bitbuf_channel.buffer);
+ break;
+#endif /* HDLCDRV_DEBUG */
+
+ case HDLCDRVCTL_GETBITS:
+#ifndef HDLCDRV_DEBUG
+ return -EPERM;
+#else /* HDLCDRV_DEBUG */
+ if (s->bitbuf_hdlc.rd == s->bitbuf_hdlc.wr)
+ return -EAGAIN;
+ bi.data.bits =
+ s->bitbuf_hdlc.buffer[s->bitbuf_hdlc.rd];
+ s->bitbuf_hdlc.rd = (s->bitbuf_hdlc.rd+1) %
+ sizeof(s->bitbuf_hdlc.buffer);
+ break;
+#endif /* HDLCDRV_DEBUG */
+
+ case HDLCDRVCTL_DRIVERNAME:
+ if (s->ops && s->ops->drvname) {
+ strncpy(bi.data.drivername, s->ops->drvname,
+ sizeof(bi.data.drivername));
+ break;
+ }
+ bi.data.drivername[0] = '\0';
+ break;
+
+ }
+ if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ return -EFAULT;
+ return 0;
+
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Initialize fields in hdlcdrv
+ */
+static void hdlcdrv_setup(struct net_device *dev)
+{
+ static const struct hdlcdrv_channel_params dflt_ch_params = {
+ 20, 2, 10, 40, 0
+ };
+ struct hdlcdrv_state *s = netdev_priv(dev);
+
+ /*
+ * initialize the hdlcdrv_state struct
+ */
+ s->ch_params = dflt_ch_params;
+ s->ptt_keyed = 0;
+
+ spin_lock_init(&s->hdlcrx.hbuf.lock);
+ s->hdlcrx.hbuf.rd = s->hdlcrx.hbuf.wr = 0;
+ s->hdlcrx.in_hdlc_rx = 0;
+ s->hdlcrx.rx_state = 0;
+
+ spin_lock_init(&s->hdlctx.hbuf.lock);
+ s->hdlctx.hbuf.rd = s->hdlctx.hbuf.wr = 0;
+ s->hdlctx.in_hdlc_tx = 0;
+ s->hdlctx.tx_state = 1;
+ s->hdlctx.numflags = 0;
+ s->hdlctx.bitstream = s->hdlctx.bitbuf = s->hdlctx.numbits = 0;
+ s->hdlctx.ptt = 0;
+ s->hdlctx.slotcnt = s->ch_params.slottime;
+ s->hdlctx.calibrate = 0;
+
+#ifdef HDLCDRV_DEBUG
+ s->bitbuf_channel.rd = s->bitbuf_channel.wr = 0;
+ s->bitbuf_channel.shreg = 0x80;
+
+ s->bitbuf_hdlc.rd = s->bitbuf_hdlc.wr = 0;
+ s->bitbuf_hdlc.shreg = 0x80;
+#endif /* HDLCDRV_DEBUG */
+
+ /*
+ * initialize the device struct
+ */
+ dev->open = hdlcdrv_open;
+ dev->stop = hdlcdrv_close;
+ dev->do_ioctl = hdlcdrv_ioctl;
+ dev->hard_start_xmit = hdlcdrv_send_packet;
+ dev->get_stats = hdlcdrv_get_stats;
+
+ /* Fill in the fields of the device structure */
+
+ s->skb = NULL;
+
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ dev->hard_header = ax25_encapsulate;
+ dev->rebuild_header = ax25_rebuild_header;
+#else /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+ dev->hard_header = NULL;
+ dev->rebuild_header = NULL;
+#endif /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+ dev->set_mac_address = hdlcdrv_set_mac_address;
+
+ dev->type = ARPHRD_AX25; /* AF_AX25 device */
+ dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
+ dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
+ dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
+ memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
+ memcpy(dev->dev_addr, ax25_nocall, AX25_ADDR_LEN);
+ dev->tx_queue_len = 16;
+}
+
+/* --------------------------------------------------------------------- */
+struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
+ unsigned int privsize, const char *ifname,
+ unsigned int baseaddr, unsigned int irq,
+ unsigned int dma)
+{
+ struct net_device *dev;
+ struct hdlcdrv_state *s;
+ int err;
+
+ BUG_ON(ops == NULL);
+
+ if (privsize < sizeof(struct hdlcdrv_state))
+ privsize = sizeof(struct hdlcdrv_state);
+
+ dev = alloc_netdev(privsize, ifname, hdlcdrv_setup);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * initialize part of the hdlcdrv_state struct
+ */
+ s = netdev_priv(dev);
+ s->magic = HDLCDRV_MAGIC;
+ s->ops = ops;
+ dev->base_addr = baseaddr;
+ dev->irq = irq;
+ dev->dma = dma;
+
+ err = register_netdev(dev);
+ if (err < 0) {
+ printk(KERN_WARNING "hdlcdrv: cannot register net "
+ "device %s\n", dev->name);
+ free_netdev(dev);
+ dev = ERR_PTR(err);
+ }
+ return dev;
+}
+
+/* --------------------------------------------------------------------- */
+
+void hdlcdrv_unregister(struct net_device *dev)
+{
+ struct hdlcdrv_state *s = netdev_priv(dev);
+
+ BUG_ON(s->magic != HDLCDRV_MAGIC);
+
+ if (s->opened && s->ops->close)
+ s->ops->close(dev);
+ unregister_netdev(dev);
+
+ free_netdev(dev);
+}
+
+/* --------------------------------------------------------------------- */
+
+EXPORT_SYMBOL(hdlcdrv_receiver);
+EXPORT_SYMBOL(hdlcdrv_transmitter);
+EXPORT_SYMBOL(hdlcdrv_arbitrate);
+EXPORT_SYMBOL(hdlcdrv_register);
+EXPORT_SYMBOL(hdlcdrv_unregister);
+
+/* --------------------------------------------------------------------- */
+
+static int __init hdlcdrv_init_driver(void)
+{
+ printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n");
+ printk(KERN_INFO "hdlcdrv: version 0.8 compiled " __TIME__ " " __DATE__ "\n");
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static void __exit hdlcdrv_cleanup_driver(void)
+{
+ printk(KERN_INFO "hdlcdrv: cleanup\n");
+}
+
+/* --------------------------------------------------------------------- */
+
+MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
+MODULE_DESCRIPTION("Packet Radio network interface HDLC encoder/decoder");
+MODULE_LICENSE("GPL");
+module_init(hdlcdrv_init_driver);
+module_exit(hdlcdrv_cleanup_driver);
+
+/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
new file mode 100644
index 000000000000..d9ea080aea0f
--- /dev/null
+++ b/drivers/net/hamradio/mkiss.c
@@ -0,0 +1,951 @@
+/*
+ * MKISS Driver
+ *
+ * This module:
+ * This module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This module implements the AX.25 protocol for kernel-based
+ * devices like TTYs. It interfaces between a raw TTY, and the
+ * kernel's AX.25 protocol layers, just like slip.c.
+ * AX.25 needs to be separated from slip.c while slip.c is no
+ * longer a static kernel device since it is a module.
+ * This method clears the way to implement other kiss protocols
+ * like mkiss smack g8bpq ..... so far only mkiss is implemented.
+ *
+ * Hans Alblas <hans@esrac.ele.tue.nl>
+ *
+ * History
+ * Jonathan (G4KLX) Fixed to match Linux networking changes - 2.1.15.
+ * Matthias (DG2FEF) Added support for FlexNet CRC (on special request)
+ * Fixed bug in ax25_close(): dev_lock_wait() was
+ * called twice, causing a deadlock.
+ * Jeroen (PE1RXQ) Removed old MKISS_MAGIC stuff and calls to
+ * MOD_*_USE_COUNT
+ * Remove cli() and fix rtnl lock usage.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/major.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#include <net/ax25.h>
+
+#include "mkiss.h"
+
+#ifdef CONFIG_INET
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#endif
+
+static char banner[] __initdata = KERN_INFO "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n";
+
+typedef struct ax25_ctrl {
+ struct ax_disp ctrl; /* */
+ struct net_device dev; /* the device */
+} ax25_ctrl_t;
+
+static ax25_ctrl_t **ax25_ctrls;
+
+int ax25_maxdev = AX25_MAXDEV; /* Can be overridden with insmod! */
+
+static struct tty_ldisc ax_ldisc;
+
+static int ax25_init(struct net_device *);
+static int kiss_esc(unsigned char *, unsigned char *, int);
+static int kiss_esc_crc(unsigned char *, unsigned char *, unsigned short, int);
+static void kiss_unesc(struct ax_disp *, unsigned char);
+
+/*---------------------------------------------------------------------------*/
+
+static const unsigned short Crc_flex_table[] = {
+ 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38,
+ 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770,
+ 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9,
+ 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1,
+ 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a,
+ 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672,
+ 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb,
+ 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3,
+ 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c,
+ 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574,
+ 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd,
+ 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5,
+ 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e,
+ 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476,
+ 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf,
+ 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7,
+ 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30,
+ 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378,
+ 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1,
+ 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9,
+ 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32,
+ 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a,
+ 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3,
+ 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb,
+ 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34,
+ 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c,
+ 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5,
+ 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd,
+ 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36,
+ 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e,
+ 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7,
+ 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff
+};
+
+/*---------------------------------------------------------------------------*/
+
+static unsigned short calc_crc_flex(unsigned char *cp, int size)
+{
+ unsigned short crc = 0xffff;
+
+ while (size--)
+ crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
+
+ return crc;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int check_crc_flex(unsigned char *cp, int size)
+{
+ unsigned short crc = 0xffff;
+
+ if (size < 3)
+ return -1;
+
+ while (size--)
+ crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
+
+ if ((crc & 0xffff) != 0x7070)
+ return -1;
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Find a free channel, and link in this `tty' line. */
+static inline struct ax_disp *ax_alloc(void)
+{
+ ax25_ctrl_t *axp=NULL;
+ int i;
+
+ for (i = 0; i < ax25_maxdev; i++) {
+ axp = ax25_ctrls[i];
+
+ /* Not allocated ? */
+ if (axp == NULL)
+ break;
+
+ /* Not in use ? */
+ if (!test_and_set_bit(AXF_INUSE, &axp->ctrl.flags))
+ break;
+ }
+
+ /* Sorry, too many, all slots in use */
+ if (i >= ax25_maxdev)
+ return NULL;
+
+ /* If no channels are available, allocate one */
+ if (axp == NULL && (ax25_ctrls[i] = kmalloc(sizeof(ax25_ctrl_t), GFP_KERNEL)) != NULL) {
+ axp = ax25_ctrls[i];
+ }
+ memset(axp, 0, sizeof(ax25_ctrl_t));
+
+ /* Initialize channel control data */
+ set_bit(AXF_INUSE, &axp->ctrl.flags);
+ sprintf(axp->dev.name, "ax%d", i++);
+ axp->ctrl.tty = NULL;
+ axp->dev.base_addr = i;
+ axp->dev.priv = (void *)&axp->ctrl;
+ axp->dev.next = NULL;
+ axp->dev.init = ax25_init;
+
+ if (axp != NULL) {
+ /*
+ * register device so that it can be ifconfig'ed
+ * ax25_init() will be called as a side-effect
+ * SIDE-EFFECT WARNING: ax25_init() CLEARS axp->ctrl !
+ */
+ if (register_netdev(&axp->dev) == 0) {
+ /* (Re-)Set the INUSE bit. Very Important! */
+ set_bit(AXF_INUSE, &axp->ctrl.flags);
+ axp->ctrl.dev = &axp->dev;
+ axp->dev.priv = (void *) &axp->ctrl;
+
+ return &axp->ctrl;
+ } else {
+ clear_bit(AXF_INUSE,&axp->ctrl.flags);
+ printk(KERN_ERR "mkiss: ax_alloc() - register_netdev() failure.\n");
+ }
+ }
+
+ return NULL;
+}
+
+/* Free an AX25 channel. */
+static inline void ax_free(struct ax_disp *ax)
+{
+ /* Free all AX25 frame buffers. */
+ if (ax->rbuff)
+ kfree(ax->rbuff);
+ ax->rbuff = NULL;
+ if (ax->xbuff)
+ kfree(ax->xbuff);
+ ax->xbuff = NULL;
+ if (!test_and_clear_bit(AXF_INUSE, &ax->flags))
+ printk(KERN_ERR "mkiss: %s: ax_free for already free unit.\n", ax->dev->name);
+}
+
+static void ax_changedmtu(struct ax_disp *ax)
+{
+ struct net_device *dev = ax->dev;
+ unsigned char *xbuff, *rbuff, *oxbuff, *orbuff;
+ int len;
+
+ len = dev->mtu * 2;
+
+ /*
+ * allow for arrival of larger UDP packets, even if we say not to
+ * also fixes a bug in which SunOS sends 512-byte packets even with
+ * an MSS of 128
+ */
+ if (len < 576 * 2)
+ len = 576 * 2;
+
+ xbuff = kmalloc(len + 4, GFP_ATOMIC);
+ rbuff = kmalloc(len + 4, GFP_ATOMIC);
+
+ if (xbuff == NULL || rbuff == NULL) {
+ printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, MTU change cancelled.\n",
+ ax->dev->name);
+ dev->mtu = ax->mtu;
+ if (xbuff != NULL)
+ kfree(xbuff);
+ if (rbuff != NULL)
+ kfree(rbuff);
+ return;
+ }
+
+ spin_lock_bh(&ax->buflock);
+
+ oxbuff = ax->xbuff;
+ ax->xbuff = xbuff;
+ orbuff = ax->rbuff;
+ ax->rbuff = rbuff;
+
+ if (ax->xleft) {
+ if (ax->xleft <= len) {
+ memcpy(ax->xbuff, ax->xhead, ax->xleft);
+ } else {
+ ax->xleft = 0;
+ ax->tx_dropped++;
+ }
+ }
+
+ ax->xhead = ax->xbuff;
+
+ if (ax->rcount) {
+ if (ax->rcount <= len) {
+ memcpy(ax->rbuff, orbuff, ax->rcount);
+ } else {
+ ax->rcount = 0;
+ ax->rx_over_errors++;
+ set_bit(AXF_ERROR, &ax->flags);
+ }
+ }
+
+ ax->mtu = dev->mtu + 73;
+ ax->buffsize = len;
+
+ spin_unlock_bh(&ax->buflock);
+
+ if (oxbuff != NULL)
+ kfree(oxbuff);
+ if (orbuff != NULL)
+ kfree(orbuff);
+}
+
+
+/* Set the "sending" flag. This must be atomic. */
+static inline void ax_lock(struct ax_disp *ax)
+{
+ netif_stop_queue(ax->dev);
+}
+
+
+/* Clear the "sending" flag. This must be atomic. */
+static inline void ax_unlock(struct ax_disp *ax)
+{
+ netif_start_queue(ax->dev);
+}
+
+/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
+static void ax_bump(struct ax_disp *ax)
+{
+ struct sk_buff *skb;
+ int count;
+
+ spin_lock_bh(&ax->buflock);
+ if (ax->rbuff[0] > 0x0f) {
+ if (ax->rbuff[0] & 0x20) {
+ ax->crcmode = CRC_MODE_FLEX;
+ if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
+ ax->rx_errors++;
+ return;
+ }
+ ax->rcount -= 2;
+ /* dl9sau bugfix: the trailling two bytes flexnet crc
+ * will not be passed to the kernel. thus we have
+ * to correct the kissparm signature, because it
+ * indicates a crc but there's none
+ */
+ *ax->rbuff &= ~0x20;
+ }
+ }
+ spin_unlock_bh(&ax->buflock);
+
+ count = ax->rcount;
+
+ if ((skb = dev_alloc_skb(count)) == NULL) {
+ printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", ax->dev->name);
+ ax->rx_dropped++;
+ return;
+ }
+
+ skb->dev = ax->dev;
+ spin_lock_bh(&ax->buflock);
+ memcpy(skb_put(skb,count), ax->rbuff, count);
+ spin_unlock_bh(&ax->buflock);
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_AX25);
+ netif_rx(skb);
+ ax->dev->last_rx = jiffies;
+ ax->rx_packets++;
+ ax->rx_bytes+=count;
+}
+
+/* Encapsulate one AX.25 packet and stuff into a TTY queue. */
+static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
+{
+ unsigned char *p;
+ int actual, count;
+
+ if (ax->mtu != ax->dev->mtu + 73) /* Someone has been ifconfigging */
+ ax_changedmtu(ax);
+
+ if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
+ len = ax->mtu;
+ printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
+ ax->tx_dropped++;
+ ax_unlock(ax);
+ return;
+ }
+
+ p = icp;
+
+ spin_lock_bh(&ax->buflock);
+ switch (ax->crcmode) {
+ unsigned short crc;
+
+ case CRC_MODE_FLEX:
+ *p |= 0x20;
+ crc = calc_crc_flex(p, len);
+ count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ break;
+
+ default:
+ count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ break;
+ }
+
+ ax->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
+ ax->tx_packets++;
+ ax->tx_bytes+=actual;
+ ax->dev->trans_start = jiffies;
+ ax->xleft = count - actual;
+ ax->xhead = ax->xbuff + actual;
+
+ spin_unlock_bh(&ax->buflock);
+}
+
+/*
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+static void ax25_write_wakeup(struct tty_struct *tty)
+{
+ int actual;
+ struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev))
+ return;
+ if (ax->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+
+ netif_wake_queue(ax->dev);
+ return;
+ }
+
+ actual = tty->driver->write(tty, ax->xhead, ax->xleft);
+ ax->xleft -= actual;
+ ax->xhead += actual;
+}
+
+/* Encapsulate an AX.25 packet and kick it into a TTY queue. */
+static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ax_disp *ax = netdev_priv(dev);
+
+ if (!netif_running(dev)) {
+ printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
+ return 1;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ /*
+ * May be we must check transmitter timeout here ?
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ if (jiffies - dev->trans_start < 20 * HZ) {
+ /* 20 sec timeout not reached */
+ return 1;
+ }
+
+ printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
+ (ax->tty->driver->chars_in_buffer(ax->tty) || ax->xleft) ?
+ "bad line quality" : "driver error");
+
+ ax->xleft = 0;
+ ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ ax_unlock(ax);
+ }
+
+ /* We were not busy, so we are now... :-) */
+ if (skb != NULL) {
+ ax_lock(ax);
+ ax_encaps(ax, skb->data, skb->len);
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+
+/* Return the frame type ID */
+static int ax_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+ void *daddr, void *saddr, unsigned len)
+{
+#ifdef CONFIG_INET
+ if (type != htons(ETH_P_AX25))
+ return ax25_encapsulate(skb, dev, type, daddr, saddr, len);
+#endif
+ return 0;
+}
+
+
+static int ax_rebuild_header(struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ return ax25_rebuild_header(skb);
+#else
+ return 0;
+#endif
+}
+
+#endif /* CONFIG_{AX25,AX25_MODULE} */
+
+/* Open the low-level part of the AX25 channel. Easy! */
+static int ax_open(struct net_device *dev)
+{
+ struct ax_disp *ax = netdev_priv(dev);
+ unsigned long len;
+
+ if (ax->tty == NULL)
+ return -ENODEV;
+
+ /*
+ * Allocate the frame buffers:
+ *
+ * rbuff Receive buffer.
+ * xbuff Transmit buffer.
+ */
+ len = dev->mtu * 2;
+
+ /*
+ * allow for arrival of larger UDP packets, even if we say not to
+ * also fixes a bug in which SunOS sends 512-byte packets even with
+ * an MSS of 128
+ */
+ if (len < 576 * 2)
+ len = 576 * 2;
+
+ if ((ax->rbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL)
+ goto norbuff;
+
+ if ((ax->xbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL)
+ goto noxbuff;
+
+ ax->mtu = dev->mtu + 73;
+ ax->buffsize = len;
+ ax->rcount = 0;
+ ax->xleft = 0;
+
+ ax->flags &= (1 << AXF_INUSE); /* Clear ESCAPE & ERROR flags */
+
+ spin_lock_init(&ax->buflock);
+
+ netif_start_queue(dev);
+ return 0;
+
+noxbuff:
+ kfree(ax->rbuff);
+
+norbuff:
+ return -ENOMEM;
+}
+
+
+/* Close the low-level part of the AX25 channel. Easy! */
+static int ax_close(struct net_device *dev)
+{
+ struct ax_disp *ax = netdev_priv(dev);
+
+ if (ax->tty == NULL)
+ return -EBUSY;
+
+ ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int ax25_receive_room(struct tty_struct *tty)
+{
+ return 65536; /* We can handle an infinite amount of data. :-) */
+}
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of data has been received, which can now be decapsulated
+ * and sent on to the AX.25 layer for further processing.
+ */
+static void ax25_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+{
+ struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+
+ if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev))
+ return;
+
+ /*
+ * Argh! mtu change time! - costs us the packet part received
+ * at the change
+ */
+ if (ax->mtu != ax->dev->mtu + 73)
+ ax_changedmtu(ax);
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp != NULL && *fp++) {
+ if (!test_and_set_bit(AXF_ERROR, &ax->flags))
+ ax->rx_errors++;
+ cp++;
+ continue;
+ }
+
+ kiss_unesc(ax, *cp++);
+ }
+}
+
+static int ax25_open(struct tty_struct *tty)
+{
+ struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+ int err;
+
+ /* First make sure we're not already connected. */
+ if (ax && ax->magic == AX25_MAGIC)
+ return -EEXIST;
+
+ /* OK. Find a free AX25 channel to use. */
+ if ((ax = ax_alloc()) == NULL)
+ return -ENFILE;
+
+ ax->tty = tty;
+ tty->disc_data = ax;
+
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+
+ /* Restore default settings */
+ ax->dev->type = ARPHRD_AX25;
+
+ /* Perform the low-level AX25 initialization. */
+ if ((err = ax_open(ax->dev)))
+ return err;
+
+ /* Done. We have linked the TTY line to a channel. */
+ return ax->dev->base_addr;
+}
+
+static void ax25_close(struct tty_struct *tty)
+{
+ struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (ax == NULL || ax->magic != AX25_MAGIC)
+ return;
+
+ unregister_netdev(ax->dev);
+
+ tty->disc_data = NULL;
+ ax->tty = NULL;
+
+ ax_free(ax);
+}
+
+
+static struct net_device_stats *ax_get_stats(struct net_device *dev)
+{
+ static struct net_device_stats stats;
+ struct ax_disp *ax = netdev_priv(dev);
+
+ memset(&stats, 0, sizeof(struct net_device_stats));
+
+ stats.rx_packets = ax->rx_packets;
+ stats.tx_packets = ax->tx_packets;
+ stats.rx_bytes = ax->rx_bytes;
+ stats.tx_bytes = ax->tx_bytes;
+ stats.rx_dropped = ax->rx_dropped;
+ stats.tx_dropped = ax->tx_dropped;
+ stats.tx_errors = ax->tx_errors;
+ stats.rx_errors = ax->rx_errors;
+ stats.rx_over_errors = ax->rx_over_errors;
+
+ return &stats;
+}
+
+
+/************************************************************************
+ * STANDARD ENCAPSULATION *
+ ************************************************************************/
+
+static int kiss_esc(unsigned char *s, unsigned char *d, int len)
+{
+ unsigned char *ptr = d;
+ unsigned char c;
+
+ /*
+ * Send an initial END character to flush out any
+ * data that may have accumulated in the receiver
+ * due to line noise.
+ */
+
+ *ptr++ = END;
+
+ while (len-- > 0) {
+ switch (c = *s++) {
+ case END:
+ *ptr++ = ESC;
+ *ptr++ = ESC_END;
+ break;
+ case ESC:
+ *ptr++ = ESC;
+ *ptr++ = ESC_ESC;
+ break;
+ default:
+ *ptr++ = c;
+ break;
+ }
+ }
+
+ *ptr++ = END;
+
+ return ptr - d;
+}
+
+/*
+ * MW:
+ * OK its ugly, but tell me a better solution without copying the
+ * packet to a temporary buffer :-)
+ */
+static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc, int len)
+{
+ unsigned char *ptr = d;
+ unsigned char c=0;
+
+ *ptr++ = END;
+ while (len > 0) {
+ if (len > 2)
+ c = *s++;
+ else if (len > 1)
+ c = crc >> 8;
+ else if (len > 0)
+ c = crc & 0xff;
+
+ len--;
+
+ switch (c) {
+ case END:
+ *ptr++ = ESC;
+ *ptr++ = ESC_END;
+ break;
+ case ESC:
+ *ptr++ = ESC;
+ *ptr++ = ESC_ESC;
+ break;
+ default:
+ *ptr++ = c;
+ break;
+ }
+ }
+ *ptr++ = END;
+ return ptr - d;
+}
+
+static void kiss_unesc(struct ax_disp *ax, unsigned char s)
+{
+ switch (s) {
+ case END:
+ /* drop keeptest bit = VSV */
+ if (test_bit(AXF_KEEPTEST, &ax->flags))
+ clear_bit(AXF_KEEPTEST, &ax->flags);
+
+ if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2))
+ ax_bump(ax);
+
+ clear_bit(AXF_ESCAPE, &ax->flags);
+ ax->rcount = 0;
+ return;
+
+ case ESC:
+ set_bit(AXF_ESCAPE, &ax->flags);
+ return;
+ case ESC_ESC:
+ if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
+ s = ESC;
+ break;
+ case ESC_END:
+ if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
+ s = END;
+ break;
+ }
+
+ spin_lock_bh(&ax->buflock);
+ if (!test_bit(AXF_ERROR, &ax->flags)) {
+ if (ax->rcount < ax->buffsize) {
+ ax->rbuff[ax->rcount++] = s;
+ spin_unlock_bh(&ax->buflock);
+ return;
+ }
+
+ ax->rx_over_errors++;
+ set_bit(AXF_ERROR, &ax->flags);
+ }
+ spin_unlock_bh(&ax->buflock);
+}
+
+
+static int ax_set_mac_address(struct net_device *dev, void __user *addr)
+{
+ if (copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN))
+ return -EFAULT;
+ return 0;
+}
+
+static int ax_set_dev_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ memcpy(dev->dev_addr, sa->sa_data, AX25_ADDR_LEN);
+
+ return 0;
+}
+
+
+/* Perform I/O control on an active ax25 channel. */
+static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void __user *arg)
+{
+ struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+ unsigned int tmp;
+
+ /* First make sure we're connected. */
+ if (ax == NULL || ax->magic != AX25_MAGIC)
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ if (copy_to_user(arg, ax->dev->name, strlen(ax->dev->name) + 1))
+ return -EFAULT;
+ return 0;
+
+ case SIOCGIFENCAP:
+ return put_user(4, (int __user *)arg);
+
+ case SIOCSIFENCAP:
+ if (get_user(tmp, (int __user *)arg))
+ return -EFAULT;
+ ax->mode = tmp;
+ ax->dev->addr_len = AX25_ADDR_LEN; /* sizeof an AX.25 addr */
+ ax->dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3;
+ ax->dev->type = ARPHRD_AX25;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return ax_set_mac_address(ax->dev, arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int ax_open_dev(struct net_device *dev)
+{
+ struct ax_disp *ax = netdev_priv(dev);
+
+ if (ax->tty == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+
+/* Initialize the driver. Called by network startup. */
+static int ax25_init(struct net_device *dev)
+{
+ struct ax_disp *ax = netdev_priv(dev);
+
+ static char ax25_bcast[AX25_ADDR_LEN] =
+ {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
+ static char ax25_test[AX25_ADDR_LEN] =
+ {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
+
+ if (ax == NULL) /* Allocation failed ?? */
+ return -ENODEV;
+
+ /* Set up the "AX25 Control Block". (And clear statistics) */
+ memset(ax, 0, sizeof (struct ax_disp));
+ ax->magic = AX25_MAGIC;
+ ax->dev = dev;
+
+ /* Finish setting up the DEVICE info. */
+ dev->mtu = AX_MTU;
+ dev->hard_start_xmit = ax_xmit;
+ dev->open = ax_open_dev;
+ dev->stop = ax_close;
+ dev->get_stats = ax_get_stats;
+ dev->set_mac_address = ax_set_dev_mac_address;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_AX25;
+ dev->tx_queue_len = 10;
+ dev->hard_header = ax_header;
+ dev->rebuild_header = ax_rebuild_header;
+
+ memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
+ memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+
+ return 0;
+}
+
+
+/* ******************************************************************** */
+/* * Init MKISS driver * */
+/* ******************************************************************** */
+
+static int __init mkiss_init_driver(void)
+{
+ int status;
+
+ printk(banner);
+
+ if (ax25_maxdev < 4)
+ ax25_maxdev = 4; /* Sanity */
+
+ if ((ax25_ctrls = kmalloc(sizeof(void *) * ax25_maxdev, GFP_KERNEL)) == NULL) {
+ printk(KERN_ERR "mkiss: Can't allocate ax25_ctrls[] array!\n");
+ return -ENOMEM;
+ }
+
+ /* Clear the pointer array, we allocate devices when we need them */
+ memset(ax25_ctrls, 0, sizeof(void*) * ax25_maxdev); /* Pointers */
+
+ /* Fill in our line protocol discipline, and register it */
+ ax_ldisc.magic = TTY_LDISC_MAGIC;
+ ax_ldisc.name = "mkiss";
+ ax_ldisc.open = ax25_open;
+ ax_ldisc.close = ax25_close;
+ ax_ldisc.ioctl = (int (*)(struct tty_struct *, struct file *,
+ unsigned int, unsigned long))ax25_disp_ioctl;
+ ax_ldisc.receive_buf = ax25_receive_buf;
+ ax_ldisc.receive_room = ax25_receive_room;
+ ax_ldisc.write_wakeup = ax25_write_wakeup;
+
+ if ((status = tty_register_ldisc(N_AX25, &ax_ldisc)) != 0) {
+ printk(KERN_ERR "mkiss: can't register line discipline (err = %d)\n", status);
+ kfree(ax25_ctrls);
+ }
+ return status;
+}
+
+static void __exit mkiss_exit_driver(void)
+{
+ int i;
+
+ for (i = 0; i < ax25_maxdev; i++) {
+ if (ax25_ctrls[i]) {
+ /*
+ * VSV = if dev->start==0, then device
+ * unregistered while close proc.
+ */
+ if (netif_running(&ax25_ctrls[i]->dev))
+ unregister_netdev(&ax25_ctrls[i]->dev);
+ kfree(ax25_ctrls[i]);
+ }
+ }
+
+ kfree(ax25_ctrls);
+ ax25_ctrls = NULL;
+
+ if ((i = tty_register_ldisc(N_AX25, NULL)))
+ printk(KERN_ERR "mkiss: can't unregister line discipline (err = %d)\n", i);
+}
+
+MODULE_AUTHOR("Hans Albas PE1AYX <hans@esrac.ele.tue.nl>");
+MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
+MODULE_PARM(ax25_maxdev, "i");
+MODULE_PARM_DESC(ax25_maxdev, "number of MKISS devices");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_AX25);
+module_init(mkiss_init_driver);
+module_exit(mkiss_exit_driver);
+
diff --git a/drivers/net/hamradio/mkiss.h b/drivers/net/hamradio/mkiss.h
new file mode 100644
index 000000000000..4ab700478598
--- /dev/null
+++ b/drivers/net/hamradio/mkiss.h
@@ -0,0 +1,62 @@
+/****************************************************************************
+ * Defines for the Multi-KISS driver.
+ ****************************************************************************/
+
+#define AX25_MAXDEV 16 /* MAX number of AX25 channels;
+ This can be overridden with
+ insmod -oax25_maxdev=nnn */
+#define AX_MTU 236
+
+/* SLIP/KISS protocol characters. */
+#define END 0300 /* indicates end of frame */
+#define ESC 0333 /* indicates byte stuffing */
+#define ESC_END 0334 /* ESC ESC_END means END 'data' */
+#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
+
+struct ax_disp {
+ int magic;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char *rbuff; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char *xbuff; /* transmitter buffer */
+ unsigned char *xhead; /* pointer to next byte to XMIT */
+ int xleft; /* bytes left in XMIT queue */
+
+ /* SLIP interface statistics. */
+ unsigned long rx_packets; /* inbound frames counter */
+ unsigned long tx_packets; /* outbound frames counter */
+ unsigned long rx_bytes; /* inbound bytes counter */
+ unsigned long tx_bytes; /* outbound bytes counter */
+ unsigned long rx_errors; /* Parity, etc. errors */
+ unsigned long tx_errors; /* Planned stuff */
+ unsigned long rx_dropped; /* No memory for skb */
+ unsigned long tx_dropped; /* When MTU change */
+ unsigned long rx_over_errors; /* Frame bigger then SLIP buf. */
+
+ /* Detailed SLIP statistics. */
+ int mtu; /* Our mtu (to spot changes!) */
+ int buffsize; /* Max buffers sizes */
+
+
+ unsigned long flags; /* Flag values/ mode etc */
+ /* long req'd: used by set_bit --RR */
+#define AXF_INUSE 0 /* Channel in use */
+#define AXF_ESCAPE 1 /* ESC received */
+#define AXF_ERROR 2 /* Parity, etc. error */
+#define AXF_KEEPTEST 3 /* Keepalive test flag */
+#define AXF_OUTWAIT 4 /* is outpacket was flag */
+
+ int mode;
+ int crcmode; /* MW: for FlexNet, SMACK etc. */
+#define CRC_MODE_NONE 0
+#define CRC_MODE_FLEX 1
+#define CRC_MODE_SMACK 2
+ spinlock_t buflock; /* lock for rbuf and xbuf */
+};
+
+#define AX25_MAGIC 0x5316
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
new file mode 100644
index 000000000000..ce9e7af020da
--- /dev/null
+++ b/drivers/net/hamradio/scc.c
@@ -0,0 +1,2191 @@
+#define RCS_ID "$Id: scc.c,v 1.75 1998/11/04 15:15:01 jreuter Exp jreuter $"
+
+#define VERSION "3.0"
+
+/*
+ * Please use z8530drv-utils-3.0 with this version.
+ * ------------------
+ *
+ * You can find a subset of the documentation in
+ * Documentation/networking/z8530drv.txt.
+ */
+
+/*
+ ********************************************************************
+ * SCC.C - Linux driver for Z8530 based HDLC cards for AX.25 *
+ ********************************************************************
+
+
+ ********************************************************************
+
+ Copyright (c) 1993, 2000 Joerg Reuter DL1BKE
+
+ portions (c) 1993 Guido ten Dolle PE1NNZ
+
+ ********************************************************************
+
+ The driver and the programs in the archive are UNDER CONSTRUCTION.
+ The code is likely to fail, and so your kernel could --- even
+ a whole network.
+
+ This driver is intended for Amateur Radio use. If you are running it
+ for commercial purposes, please drop me a note. I am nosy...
+
+ ...BUT:
+
+ ! You m u s t recognize the appropriate legislations of your country !
+ ! before you connect a radio to the SCC board and start to transmit or !
+ ! receive. The GPL allows you to use the d r i v e r, NOT the RADIO! !
+
+ For non-Amateur-Radio use please note that you might need a special
+ allowance/licence from the designer of the SCC Board and/or the
+ MODEM.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the (modified) GNU General Public License
+ delivered with the Linux kernel source.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should find a copy of the GNU General Public License in
+ /usr/src/linux/COPYING;
+
+ ********************************************************************
+
+
+ Incomplete history of z8530drv:
+ -------------------------------
+
+ 1994-09-13 started to write the driver, rescued most of my own
+ code (and Hans Alblas' memory buffer pool concept) from
+ an earlier project "sccdrv" which was initiated by
+ Guido ten Dolle. Not much of the old driver survived,
+ though. The first version I put my hands on was sccdrv1.3
+ from August 1993. The memory buffer pool concept
+ appeared in an unauthorized sccdrv version (1.5) from
+ August 1994.
+
+ 1995-01-31 changed copyright notice to GPL without limitations.
+
+ .
+ . <SNIP>
+ .
+
+ 1996-10-05 New semester, new driver...
+
+ * KISS TNC emulator removed (TTY driver)
+ * Source moved to drivers/net/
+ * Includes Z8530 defines from drivers/net/z8530.h
+ * Uses sk_buffer memory management
+ * Reduced overhead of /proc/net/z8530drv output
+ * Streamlined quite a lot things
+ * Invents brand new bugs... ;-)
+
+ The move to version number 3.0 reflects theses changes.
+ You can use 'kissbridge' if you need a KISS TNC emulator.
+
+ 1996-12-13 Fixed for Linux networking changes. (G4KLX)
+ 1997-01-08 Fixed the remaining problems.
+ 1997-04-02 Hopefully fixed the problems with the new *_timer()
+ routines, added calibration code.
+ 1997-10-12 Made SCC_DELAY a CONFIG option, added CONFIG_SCC_TRXECHO
+ 1998-01-29 Small fix to avoid lock-up on initialization
+ 1998-09-29 Fixed the "grouping" bugs, tx_inhibit works again,
+ using dev->tx_queue_len now instead of MAXQUEUE now.
+ 1998-10-21 Postponed the spinlock changes, would need a lot of
+ testing I currently don't have the time to. Softdcd doesn't
+ work.
+ 1998-11-04 Softdcd does not work correctly in DPLL mode, in fact it
+ never did. The DPLL locks on noise, the SYNC unit sees
+ flags that aren't... Restarting the DPLL does not help
+ either, it resynchronizes too slow and the first received
+ frame gets lost.
+ 2000-02-13 Fixed for new network driver interface changes, still
+ does TX timeouts itself since it uses its own queue
+ scheme.
+
+ Thanks to all who contributed to this driver with ideas and bug
+ reports!
+
+ NB -- if you find errors, change something, please let me know
+ first before you distribute it... And please don't touch
+ the version number. Just replace my callsign in
+ "v3.0.dl1bke" with your own. Just to avoid confusion...
+
+ If you want to add your modification to the linux distribution
+ please (!) contact me first.
+
+ New versions of the driver will be announced on the linux-hams
+ mailing list on vger.kernel.org. To subscribe send an e-mail
+ to majordomo@vger.kernel.org with the following line in
+ the body of the mail:
+
+ subscribe linux-hams
+
+ The content of the "Subject" field will be ignored.
+
+ vy 73,
+ Joerg Reuter ampr-net: dl1bke@db0pra.ampr.org
+ AX-25 : DL1BKE @ DB0ABH.#BAY.DEU.EU
+ Internet: jreuter@yaina.de
+ www : http://yaina.de/jreuter
+*/
+
+/* ----------------------------------------------------------------------- */
+
+#undef SCC_LDELAY /* slow it even a bit more down */
+#undef SCC_DONT_CHECK /* don't look if the SCCs you specified are available */
+
+#define SCC_MAXCHIPS 4 /* number of max. supported chips */
+#define SCC_BUFSIZE 384 /* must not exceed 4096 */
+#undef SCC_DEBUG
+
+#define SCC_DEFAULT_CLOCK 4915200
+ /* default pclock if nothing is specified */
+
+/* ----------------------------------------------------------------------- */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/init.h>
+#include <linux/scc.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/bitops.h>
+
+#include <net/ax25.h>
+
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "z8530.h"
+
+static char banner[] __initdata = KERN_INFO "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
+
+static void t_dwait(unsigned long);
+static void t_txdelay(unsigned long);
+static void t_tail(unsigned long);
+static void t_busy(unsigned long);
+static void t_maxkeyup(unsigned long);
+static void t_idle(unsigned long);
+static void scc_tx_done(struct scc_channel *);
+static void scc_start_tx_timer(struct scc_channel *, void (*)(unsigned long), unsigned long);
+static void scc_start_maxkeyup(struct scc_channel *);
+static void scc_start_defer(struct scc_channel *);
+
+static void z8530_init(void);
+
+static void init_channel(struct scc_channel *scc);
+static void scc_key_trx (struct scc_channel *scc, char tx);
+static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs);
+static void scc_init_timer(struct scc_channel *scc);
+
+static int scc_net_alloc(const char *name, struct scc_channel *scc);
+static void scc_net_setup(struct net_device *dev);
+static int scc_net_open(struct net_device *dev);
+static int scc_net_close(struct net_device *dev);
+static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
+static int scc_net_tx(struct sk_buff *skb, struct net_device *dev);
+static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int scc_net_set_mac_address(struct net_device *dev, void *addr);
+static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
+
+static unsigned char SCC_DriverName[] = "scc";
+
+static struct irqflags { unsigned char used : 1; } Ivec[NR_IRQS];
+
+static struct scc_channel SCC_Info[2 * SCC_MAXCHIPS]; /* information per channel */
+
+static struct scc_ctrl {
+ io_port chan_A;
+ io_port chan_B;
+ int irq;
+} SCC_ctrl[SCC_MAXCHIPS+1];
+
+static unsigned char Driver_Initialized;
+static int Nchips;
+static io_port Vector_Latch;
+
+
+/* ******************************************************************** */
+/* * Port Access Functions * */
+/* ******************************************************************** */
+
+/* These provide interrupt save 2-step access to the Z8530 registers */
+
+static DEFINE_SPINLOCK(iolock); /* Guards paired accesses */
+
+static inline unsigned char InReg(io_port port, unsigned char reg)
+{
+ unsigned long flags;
+ unsigned char r;
+
+ spin_lock_irqsave(&iolock, flags);
+#ifdef SCC_LDELAY
+ Outb(port, reg);
+ udelay(SCC_LDELAY);
+ r=Inb(port);
+ udelay(SCC_LDELAY);
+#else
+ Outb(port, reg);
+ r=Inb(port);
+#endif
+ spin_unlock_irqrestore(&iolock, flags);
+ return r;
+}
+
+static inline void OutReg(io_port port, unsigned char reg, unsigned char val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&iolock, flags);
+#ifdef SCC_LDELAY
+ Outb(port, reg); udelay(SCC_LDELAY);
+ Outb(port, val); udelay(SCC_LDELAY);
+#else
+ Outb(port, reg);
+ Outb(port, val);
+#endif
+ spin_unlock_irqrestore(&iolock, flags);
+}
+
+static inline void wr(struct scc_channel *scc, unsigned char reg,
+ unsigned char val)
+{
+ OutReg(scc->ctrl, reg, (scc->wreg[reg] = val));
+}
+
+static inline void or(struct scc_channel *scc, unsigned char reg, unsigned char val)
+{
+ OutReg(scc->ctrl, reg, (scc->wreg[reg] |= val));
+}
+
+static inline void cl(struct scc_channel *scc, unsigned char reg, unsigned char val)
+{
+ OutReg(scc->ctrl, reg, (scc->wreg[reg] &= ~val));
+}
+
+/* ******************************************************************** */
+/* * Some useful macros * */
+/* ******************************************************************** */
+
+static inline void scc_discard_buffers(struct scc_channel *scc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ if (scc->tx_buff != NULL)
+ {
+ dev_kfree_skb(scc->tx_buff);
+ scc->tx_buff = NULL;
+ }
+
+ while (skb_queue_len(&scc->tx_queue))
+ dev_kfree_skb(skb_dequeue(&scc->tx_queue));
+
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+
+
+/* ******************************************************************** */
+/* * Interrupt Service Routines * */
+/* ******************************************************************** */
+
+
+/* ----> subroutines for the interrupt handlers <---- */
+
+static inline void scc_notify(struct scc_channel *scc, int event)
+{
+ struct sk_buff *skb;
+ char *bp;
+
+ if (scc->kiss.fulldup != KISS_DUPLEX_OPTIMA)
+ return;
+
+ skb = dev_alloc_skb(2);
+ if (skb != NULL)
+ {
+ bp = skb_put(skb, 2);
+ *bp++ = PARAM_HWEVENT;
+ *bp++ = event;
+ scc_net_rx(scc, skb);
+ } else
+ scc->stat.nospace++;
+}
+
+static inline void flush_rx_FIFO(struct scc_channel *scc)
+{
+ int k;
+
+ for (k=0; k<3; k++)
+ Inb(scc->data);
+
+ if(scc->rx_buff != NULL) /* did we receive something? */
+ {
+ scc->stat.rxerrs++; /* then count it as an error */
+ dev_kfree_skb_irq(scc->rx_buff);
+ scc->rx_buff = NULL;
+ }
+}
+
+static void start_hunt(struct scc_channel *scc)
+{
+ if ((scc->modem.clocksrc != CLK_EXTERNAL))
+ OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]); /* DPLL: enter search mode */
+ or(scc,R3,ENT_HM|RxENABLE); /* enable the receiver, hunt mode */
+}
+
+/* ----> four different interrupt handlers for Tx, Rx, changing of */
+/* DCD/CTS and Rx/Tx errors */
+
+/* Transmitter interrupt handler */
+static inline void scc_txint(struct scc_channel *scc)
+{
+ struct sk_buff *skb;
+
+ scc->stat.txints++;
+ skb = scc->tx_buff;
+
+ /* send first octet */
+
+ if (skb == NULL)
+ {
+ skb = skb_dequeue(&scc->tx_queue);
+ scc->tx_buff = skb;
+ netif_wake_queue(scc->dev);
+
+ if (skb == NULL)
+ {
+ scc_tx_done(scc);
+ Outb(scc->ctrl, RES_Tx_P);
+ return;
+ }
+
+ if (skb->len == 0) /* Paranoia... */
+ {
+ dev_kfree_skb_irq(skb);
+ scc->tx_buff = NULL;
+ scc_tx_done(scc);
+ Outb(scc->ctrl, RES_Tx_P);
+ return;
+ }
+
+ scc->stat.tx_state = TXS_ACTIVE;
+
+ OutReg(scc->ctrl, R0, RES_Tx_CRC);
+ /* reset CRC generator */
+ or(scc,R10,ABUNDER); /* re-install underrun protection */
+ Outb(scc->data,*skb->data); /* send byte */
+ skb_pull(skb, 1);
+
+ if (!scc->enhanced) /* reset EOM latch */
+ Outb(scc->ctrl,RES_EOM_L);
+ return;
+ }
+
+ /* End Of Frame... */
+
+ if (skb->len == 0)
+ {
+ Outb(scc->ctrl, RES_Tx_P); /* reset pending int */
+ cl(scc, R10, ABUNDER); /* send CRC */
+ dev_kfree_skb_irq(skb);
+ scc->tx_buff = NULL;
+ scc->stat.tx_state = TXS_NEWFRAME; /* next frame... */
+ return;
+ }
+
+ /* send octet */
+
+ Outb(scc->data,*skb->data);
+ skb_pull(skb, 1);
+}
+
+
+/* External/Status interrupt handler */
+static inline void scc_exint(struct scc_channel *scc)
+{
+ unsigned char status,changes,chg_and_stat;
+
+ scc->stat.exints++;
+
+ status = InReg(scc->ctrl,R0);
+ changes = status ^ scc->status;
+ chg_and_stat = changes & status;
+
+ /* ABORT: generated whenever DCD drops while receiving */
+
+ if (chg_and_stat & BRK_ABRT) /* Received an ABORT */
+ flush_rx_FIFO(scc);
+
+ /* HUNT: software DCD; on = waiting for SYNC, off = receiving frame */
+
+ if ((changes & SYNC_HUNT) && scc->kiss.softdcd)
+ {
+ if (status & SYNC_HUNT)
+ {
+ scc->dcd = 0;
+ flush_rx_FIFO(scc);
+ if ((scc->modem.clocksrc != CLK_EXTERNAL))
+ OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]); /* DPLL: enter search mode */
+ } else {
+ scc->dcd = 1;
+ }
+
+ scc_notify(scc, scc->dcd? HWEV_DCD_OFF:HWEV_DCD_ON);
+ }
+
+ /* DCD: on = start to receive packet, off = ABORT condition */
+ /* (a successfully received packet generates a special condition int) */
+
+ if((changes & DCD) && !scc->kiss.softdcd) /* DCD input changed state */
+ {
+ if(status & DCD) /* DCD is now ON */
+ {
+ start_hunt(scc);
+ scc->dcd = 1;
+ } else { /* DCD is now OFF */
+ cl(scc,R3,ENT_HM|RxENABLE); /* disable the receiver */
+ flush_rx_FIFO(scc);
+ scc->dcd = 0;
+ }
+
+ scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
+ }
+
+#ifdef notdef
+ /* CTS: use external TxDelay (what's that good for?!)
+ * Anyway: If we _could_ use it (BayCom USCC uses CTS for
+ * own purposes) we _should_ use the "autoenable" feature
+ * of the Z8530 and not this interrupt...
+ */
+
+ if (chg_and_stat & CTS) /* CTS is now ON */
+ {
+ if (scc->kiss.txdelay == 0) /* zero TXDELAY = wait for CTS */
+ scc_start_tx_timer(scc, t_txdelay, 0);
+ }
+#endif
+
+ if (scc->stat.tx_state == TXS_ACTIVE && (status & TxEOM))
+ {
+ scc->stat.tx_under++; /* oops, an underrun! count 'em */
+ Outb(scc->ctrl, RES_EXT_INT); /* reset ext/status interrupts */
+
+ if (scc->tx_buff != NULL)
+ {
+ dev_kfree_skb_irq(scc->tx_buff);
+ scc->tx_buff = NULL;
+ }
+
+ or(scc,R10,ABUNDER);
+ scc_start_tx_timer(scc, t_txdelay, 0); /* restart transmission */
+ }
+
+ scc->status = status;
+ Outb(scc->ctrl,RES_EXT_INT);
+}
+
+
+/* Receiver interrupt handler */
+static inline void scc_rxint(struct scc_channel *scc)
+{
+ struct sk_buff *skb;
+
+ scc->stat.rxints++;
+
+ if((scc->wreg[5] & RTS) && scc->kiss.fulldup == KISS_DUPLEX_HALF)
+ {
+ Inb(scc->data); /* discard char */
+ or(scc,R3,ENT_HM); /* enter hunt mode for next flag */
+ return;
+ }
+
+ skb = scc->rx_buff;
+
+ if (skb == NULL)
+ {
+ skb = dev_alloc_skb(scc->stat.bufsize);
+ if (skb == NULL)
+ {
+ scc->dev_stat.rx_dropped++;
+ scc->stat.nospace++;
+ Inb(scc->data);
+ or(scc, R3, ENT_HM);
+ return;
+ }
+
+ scc->rx_buff = skb;
+ *(skb_put(skb, 1)) = 0; /* KISS data */
+ }
+
+ if (skb->len >= scc->stat.bufsize)
+ {
+#ifdef notdef
+ printk(KERN_DEBUG "z8530drv: oops, scc_rxint() received huge frame...\n");
+#endif
+ dev_kfree_skb_irq(skb);
+ scc->rx_buff = NULL;
+ Inb(scc->data);
+ or(scc, R3, ENT_HM);
+ return;
+ }
+
+ *(skb_put(skb, 1)) = Inb(scc->data);
+}
+
+
+/* Receive Special Condition interrupt handler */
+static inline void scc_spint(struct scc_channel *scc)
+{
+ unsigned char status;
+ struct sk_buff *skb;
+
+ scc->stat.spints++;
+
+ status = InReg(scc->ctrl,R1); /* read receiver status */
+
+ Inb(scc->data); /* throw away Rx byte */
+ skb = scc->rx_buff;
+
+ if(status & Rx_OVR) /* receiver overrun */
+ {
+ scc->stat.rx_over++; /* count them */
+ or(scc,R3,ENT_HM); /* enter hunt mode for next flag */
+
+ if (skb != NULL)
+ dev_kfree_skb_irq(skb);
+ scc->rx_buff = skb = NULL;
+ }
+
+ if(status & END_FR && skb != NULL) /* end of frame */
+ {
+ /* CRC okay, frame ends on 8 bit boundary and received something ? */
+
+ if (!(status & CRC_ERR) && (status & 0xe) == RES8 && skb->len > 0)
+ {
+ /* ignore last received byte (first of the CRC bytes) */
+ skb_trim(skb, skb->len-1);
+ scc_net_rx(scc, skb);
+ scc->rx_buff = NULL;
+ scc->stat.rxframes++;
+ } else { /* a bad frame */
+ dev_kfree_skb_irq(skb);
+ scc->rx_buff = NULL;
+ scc->stat.rxerrs++;
+ }
+ }
+
+ Outb(scc->ctrl,ERR_RES);
+}
+
+
+/* ----> interrupt service routine for the Z8530 <---- */
+
+static void scc_isr_dispatch(struct scc_channel *scc, int vector)
+{
+ spin_lock(&scc->lock);
+ switch (vector & VECTOR_MASK)
+ {
+ case TXINT: scc_txint(scc); break;
+ case EXINT: scc_exint(scc); break;
+ case RXINT: scc_rxint(scc); break;
+ case SPINT: scc_spint(scc); break;
+ }
+ spin_unlock(&scc->lock);
+}
+
+/* If the card has a latch for the interrupt vector (like the PA0HZP card)
+ use it to get the number of the chip that generated the int.
+ If not: poll all defined chips.
+ */
+
+#define SCC_IRQTIMEOUT 30000
+
+static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned char vector;
+ struct scc_channel *scc;
+ struct scc_ctrl *ctrl;
+ int k;
+
+ if (Vector_Latch)
+ {
+ for(k=0; k < SCC_IRQTIMEOUT; k++)
+ {
+ Outb(Vector_Latch, 0); /* Generate INTACK */
+
+ /* Read the vector */
+ if((vector=Inb(Vector_Latch)) >= 16 * Nchips) break;
+ if (vector & 0x01) break;
+
+ scc=&SCC_Info[vector >> 3 ^ 0x01];
+ if (!scc->dev) break;
+
+ scc_isr_dispatch(scc, vector);
+
+ OutReg(scc->ctrl,R0,RES_H_IUS); /* Reset Highest IUS */
+ }
+
+ if (k == SCC_IRQTIMEOUT)
+ printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?\n");
+
+ return IRQ_HANDLED;
+ }
+
+ /* Find the SCC generating the interrupt by polling all attached SCCs
+ * reading RR3A (the interrupt pending register)
+ */
+
+ ctrl = SCC_ctrl;
+ while (ctrl->chan_A)
+ {
+ if (ctrl->irq != irq)
+ {
+ ctrl++;
+ continue;
+ }
+
+ scc = NULL;
+ for (k = 0; InReg(ctrl->chan_A,R3) && k < SCC_IRQTIMEOUT; k++)
+ {
+ vector=InReg(ctrl->chan_B,R2); /* Read the vector */
+ if (vector & 0x01) break;
+
+ scc = &SCC_Info[vector >> 3 ^ 0x01];
+ if (!scc->dev) break;
+
+ scc_isr_dispatch(scc, vector);
+ }
+
+ if (k == SCC_IRQTIMEOUT)
+ {
+ printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?!\n");
+ break;
+ }
+
+ /* This looks weird and it is. At least the BayCom USCC doesn't
+ * use the Interrupt Daisy Chain, thus we'll have to start
+ * all over again to be sure not to miss an interrupt from
+ * (any of) the other chip(s)...
+ * Honestly, the situation *is* braindamaged...
+ */
+
+ if (scc != NULL)
+ {
+ OutReg(scc->ctrl,R0,RES_H_IUS);
+ ctrl = SCC_ctrl;
+ } else
+ ctrl++;
+ }
+ return IRQ_HANDLED;
+}
+
+
+
+/* ******************************************************************** */
+/* * Init Channel */
+/* ******************************************************************** */
+
+
+/* ----> set SCC channel speed <---- */
+
+static inline void set_brg(struct scc_channel *scc, unsigned int tc)
+{
+ cl(scc,R14,BRENABL); /* disable baudrate generator */
+ wr(scc,R12,tc & 255); /* brg rate LOW */
+ wr(scc,R13,tc >> 8); /* brg rate HIGH */
+ or(scc,R14,BRENABL); /* enable baudrate generator */
+}
+
+static inline void set_speed(struct scc_channel *scc)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&scc->lock, flags);
+
+ if (scc->modem.speed > 0) /* paranoia... */
+ set_brg(scc, (unsigned) (scc->clock / (scc->modem.speed * 64)) - 2);
+
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+
+/* ----> initialize a SCC channel <---- */
+
+static inline void init_brg(struct scc_channel *scc)
+{
+ wr(scc, R14, BRSRC); /* BRG source = PCLK */
+ OutReg(scc->ctrl, R14, SSBR|scc->wreg[R14]); /* DPLL source = BRG */
+ OutReg(scc->ctrl, R14, SNRZI|scc->wreg[R14]); /* DPLL NRZI mode */
+}
+
+/*
+ * Initialization according to the Z8530 manual (SGS-Thomson's version):
+ *
+ * 1. Modes and constants
+ *
+ * WR9 11000000 chip reset
+ * WR4 XXXXXXXX Tx/Rx control, async or sync mode
+ * WR1 0XX00X00 select W/REQ (optional)
+ * WR2 XXXXXXXX program interrupt vector
+ * WR3 XXXXXXX0 select Rx control
+ * WR5 XXXX0XXX select Tx control
+ * WR6 XXXXXXXX sync character
+ * WR7 XXXXXXXX sync character
+ * WR9 000X0XXX select interrupt control
+ * WR10 XXXXXXXX miscellaneous control (optional)
+ * WR11 XXXXXXXX clock control
+ * WR12 XXXXXXXX time constant lower byte (optional)
+ * WR13 XXXXXXXX time constant upper byte (optional)
+ * WR14 XXXXXXX0 miscellaneous control
+ * WR14 XXXSSSSS commands (optional)
+ *
+ * 2. Enables
+ *
+ * WR14 000SSSS1 baud rate enable
+ * WR3 SSSSSSS1 Rx enable
+ * WR5 SSSS1SSS Tx enable
+ * WR0 10000000 reset Tx CRG (optional)
+ * WR1 XSS00S00 DMA enable (optional)
+ *
+ * 3. Interrupt status
+ *
+ * WR15 XXXXXXXX enable external/status
+ * WR0 00010000 reset external status
+ * WR0 00010000 reset external status twice
+ * WR1 SSSXXSXX enable Rx, Tx and Ext/status
+ * WR9 000SXSSS enable master interrupt enable
+ *
+ * 1 = set to one, 0 = reset to zero
+ * X = user defined, S = same as previous init
+ *
+ *
+ * Note that the implementation differs in some points from above scheme.
+ *
+ */
+
+static void init_channel(struct scc_channel *scc)
+{
+ del_timer(&scc->tx_t);
+ del_timer(&scc->tx_wdog);
+
+ disable_irq(scc->irq);
+
+ wr(scc,R4,X1CLK|SDLC); /* *1 clock, SDLC mode */
+ wr(scc,R1,0); /* no W/REQ operation */
+ wr(scc,R3,Rx8|RxCRC_ENAB); /* RX 8 bits/char, CRC, disabled */
+ wr(scc,R5,Tx8|DTR|TxCRC_ENAB); /* TX 8 bits/char, disabled, DTR */
+ wr(scc,R6,0); /* SDLC address zero (not used) */
+ wr(scc,R7,FLAG); /* SDLC flag value */
+ wr(scc,R9,VIS); /* vector includes status */
+ wr(scc,R10,(scc->modem.nrz? NRZ : NRZI)|CRCPS|ABUNDER); /* abort on underrun, preset CRC generator, NRZ(I) */
+ wr(scc,R14, 0);
+
+
+/* set clock sources:
+
+ CLK_DPLL: normal halfduplex operation
+
+ RxClk: use DPLL
+ TxClk: use DPLL
+ TRxC mode DPLL output
+
+ CLK_EXTERNAL: external clocking (G3RUH or DF9IC modem)
+
+ BayCom: others:
+
+ TxClk = pin RTxC TxClk = pin TRxC
+ RxClk = pin TRxC RxClk = pin RTxC
+
+
+ CLK_DIVIDER:
+ RxClk = use DPLL
+ TxClk = pin RTxC
+
+ BayCom: others:
+ pin TRxC = DPLL pin TRxC = BRG
+ (RxClk * 1) (RxClk * 32)
+*/
+
+
+ switch(scc->modem.clocksrc)
+ {
+ case CLK_DPLL:
+ wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
+ init_brg(scc);
+ break;
+
+ case CLK_DIVIDER:
+ wr(scc, R11, ((scc->brand & BAYCOM)? TRxCDP : TRxCBR) | RCDPLL|TCRTxCP|TRxCOI);
+ init_brg(scc);
+ break;
+
+ case CLK_EXTERNAL:
+ wr(scc, R11, (scc->brand & BAYCOM)? RCTRxCP|TCRTxCP : RCRTxCP|TCTRxCP);
+ OutReg(scc->ctrl, R14, DISDPLL);
+ break;
+
+ }
+
+ set_speed(scc); /* set baudrate */
+
+ if(scc->enhanced)
+ {
+ or(scc,R15,SHDLCE|FIFOE); /* enable FIFO, SDLC/HDLC Enhancements (From now R7 is R7') */
+ wr(scc,R7,AUTOEOM);
+ }
+
+ if(scc->kiss.softdcd || (InReg(scc->ctrl,R0) & DCD))
+ /* DCD is now ON */
+ {
+ start_hunt(scc);
+ }
+
+ /* enable ABORT, DCD & SYNC/HUNT interrupts */
+
+ wr(scc,R15, BRKIE|TxUIE|(scc->kiss.softdcd? SYNCIE:DCDIE));
+
+ Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
+ Outb(scc->ctrl,RES_EXT_INT); /* must be done twice */
+
+ or(scc,R1,INT_ALL_Rx|TxINT_ENAB|EXT_INT_ENAB); /* enable interrupts */
+
+ scc->status = InReg(scc->ctrl,R0); /* read initial status */
+
+ or(scc,R9,MIE); /* master interrupt enable */
+
+ scc_init_timer(scc);
+
+ enable_irq(scc->irq);
+}
+
+
+
+
+/* ******************************************************************** */
+/* * SCC timer functions * */
+/* ******************************************************************** */
+
+
+/* ----> scc_key_trx sets the time constant for the baudrate
+ generator and keys the transmitter <---- */
+
+static void scc_key_trx(struct scc_channel *scc, char tx)
+{
+ unsigned int time_const;
+
+ if (scc->brand & PRIMUS)
+ Outb(scc->ctrl + 4, scc->option | (tx? 0x80 : 0));
+
+ if (scc->modem.speed < 300)
+ scc->modem.speed = 1200;
+
+ time_const = (unsigned) (scc->clock / (scc->modem.speed * (tx? 2:64))) - 2;
+
+ disable_irq(scc->irq);
+
+ if (tx)
+ {
+ or(scc, R1, TxINT_ENAB); /* t_maxkeyup may have reset these */
+ or(scc, R15, TxUIE);
+ }
+
+ if (scc->modem.clocksrc == CLK_DPLL)
+ { /* force simplex operation */
+ if (tx)
+ {
+#ifdef CONFIG_SCC_TRXECHO
+ cl(scc, R3, RxENABLE|ENT_HM); /* switch off receiver */
+ cl(scc, R15, DCDIE|SYNCIE); /* No DCD changes, please */
+#endif
+ set_brg(scc, time_const); /* reprogram baudrate generator */
+
+ /* DPLL -> Rx clk, BRG -> Tx CLK, TRxC mode output, TRxC = BRG */
+ wr(scc, R11, RCDPLL|TCBR|TRxCOI|TRxCBR);
+
+ /* By popular demand: tx_inhibit */
+ if (scc->kiss.tx_inhibit)
+ {
+ or(scc,R5, TxENAB);
+ scc->wreg[R5] |= RTS;
+ } else {
+ or(scc,R5,RTS|TxENAB); /* set the RTS line and enable TX */
+ }
+ } else {
+ cl(scc,R5,RTS|TxENAB);
+
+ set_brg(scc, time_const); /* reprogram baudrate generator */
+
+ /* DPLL -> Rx clk, DPLL -> Tx CLK, TRxC mode output, TRxC = DPLL */
+ wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
+
+#ifndef CONFIG_SCC_TRXECHO
+ if (scc->kiss.softdcd)
+#endif
+ {
+ or(scc,R15, scc->kiss.softdcd? SYNCIE:DCDIE);
+ start_hunt(scc);
+ }
+ }
+ } else {
+ if (tx)
+ {
+#ifdef CONFIG_SCC_TRXECHO
+ if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
+ {
+ cl(scc, R3, RxENABLE);
+ cl(scc, R15, DCDIE|SYNCIE);
+ }
+#endif
+
+ if (scc->kiss.tx_inhibit)
+ {
+ or(scc,R5, TxENAB);
+ scc->wreg[R5] |= RTS;
+ } else {
+ or(scc,R5,RTS|TxENAB); /* enable tx */
+ }
+ } else {
+ cl(scc,R5,RTS|TxENAB); /* disable tx */
+
+ if ((scc->kiss.fulldup == KISS_DUPLEX_HALF) &&
+#ifndef CONFIG_SCC_TRXECHO
+ scc->kiss.softdcd)
+#else
+ 1)
+#endif
+ {
+ or(scc, R15, scc->kiss.softdcd? SYNCIE:DCDIE);
+ start_hunt(scc);
+ }
+ }
+ }
+
+ enable_irq(scc->irq);
+}
+
+
+/* ----> SCC timer interrupt handler and friends. <---- */
+
+static void __scc_start_tx_timer(struct scc_channel *scc, void (*handler)(unsigned long), unsigned long when)
+{
+ del_timer(&scc->tx_t);
+
+ if (when == 0)
+ {
+ handler((unsigned long) scc);
+ } else
+ if (when != TIMER_OFF)
+ {
+ scc->tx_t.data = (unsigned long) scc;
+ scc->tx_t.function = handler;
+ scc->tx_t.expires = jiffies + (when*HZ)/100;
+ add_timer(&scc->tx_t);
+ }
+}
+
+static void scc_start_tx_timer(struct scc_channel *scc, void (*handler)(unsigned long), unsigned long when)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ __scc_start_tx_timer(scc, handler, when);
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+static void scc_start_defer(struct scc_channel *scc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ del_timer(&scc->tx_wdog);
+
+ if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
+ {
+ scc->tx_wdog.data = (unsigned long) scc;
+ scc->tx_wdog.function = t_busy;
+ scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
+ add_timer(&scc->tx_wdog);
+ }
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+static void scc_start_maxkeyup(struct scc_channel *scc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ del_timer(&scc->tx_wdog);
+
+ if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
+ {
+ scc->tx_wdog.data = (unsigned long) scc;
+ scc->tx_wdog.function = t_maxkeyup;
+ scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
+ add_timer(&scc->tx_wdog);
+ }
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+/*
+ * This is called from scc_txint() when there are no more frames to send.
+ * Not exactly a timer function, but it is a close friend of the family...
+ */
+
+static void scc_tx_done(struct scc_channel *scc)
+{
+ /*
+ * trx remains keyed in fulldup mode 2 until t_idle expires.
+ */
+
+ switch (scc->kiss.fulldup)
+ {
+ case KISS_DUPLEX_LINK:
+ scc->stat.tx_state = TXS_IDLE2;
+ if (scc->kiss.idletime != TIMER_OFF)
+ scc_start_tx_timer(scc, t_idle, scc->kiss.idletime*100);
+ break;
+ case KISS_DUPLEX_OPTIMA:
+ scc_notify(scc, HWEV_ALL_SENT);
+ break;
+ default:
+ scc->stat.tx_state = TXS_BUSY;
+ scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
+ }
+
+ netif_wake_queue(scc->dev);
+}
+
+
+static unsigned char Rand = 17;
+
+static inline int is_grouped(struct scc_channel *scc)
+{
+ int k;
+ struct scc_channel *scc2;
+ unsigned char grp1, grp2;
+
+ grp1 = scc->kiss.group;
+
+ for (k = 0; k < (Nchips * 2); k++)
+ {
+ scc2 = &SCC_Info[k];
+ grp2 = scc2->kiss.group;
+
+ if (scc2 == scc || !(scc2->dev && grp2))
+ continue;
+
+ if ((grp1 & 0x3f) == (grp2 & 0x3f))
+ {
+ if ( (grp1 & TXGROUP) && (scc2->wreg[R5] & RTS) )
+ return 1;
+
+ if ( (grp1 & RXGROUP) && scc2->dcd )
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* DWAIT and SLOTTIME expired
+ *
+ * fulldup == 0: DCD is active or Rand > P-persistence: start t_busy timer
+ * else key trx and start txdelay
+ * fulldup == 1: key trx and start txdelay
+ * fulldup == 2: mintime expired, reset status or key trx and start txdelay
+ */
+
+static void t_dwait(unsigned long channel)
+{
+ struct scc_channel *scc = (struct scc_channel *) channel;
+
+ if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */
+ {
+ if (skb_queue_len(&scc->tx_queue) == 0) /* nothing to send */
+ {
+ scc->stat.tx_state = TXS_IDLE;
+ netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */
+ return;
+ }
+
+ scc->stat.tx_state = TXS_BUSY;
+ }
+
+ if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
+ {
+ Rand = Rand * 17 + 31;
+
+ if (scc->dcd || (scc->kiss.persist) < Rand || (scc->kiss.group && is_grouped(scc)) )
+ {
+ scc_start_defer(scc);
+ scc_start_tx_timer(scc, t_dwait, scc->kiss.slottime);
+ return ;
+ }
+ }
+
+ if ( !(scc->wreg[R5] & RTS) )
+ {
+ scc_key_trx(scc, TX_ON);
+ scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
+ } else {
+ scc_start_tx_timer(scc, t_txdelay, 0);
+ }
+}
+
+
+/* TXDELAY expired
+ *
+ * kick transmission by a fake scc_txint(scc), start 'maxkeyup' watchdog.
+ */
+
+static void t_txdelay(unsigned long channel)
+{
+ struct scc_channel *scc = (struct scc_channel *) channel;
+
+ scc_start_maxkeyup(scc);
+
+ if (scc->tx_buff == NULL)
+ {
+ disable_irq(scc->irq);
+ scc_txint(scc);
+ enable_irq(scc->irq);
+ }
+}
+
+
+/* TAILTIME expired
+ *
+ * switch off transmitter. If we were stopped by Maxkeyup restart
+ * transmission after 'mintime' seconds
+ */
+
+static void t_tail(unsigned long channel)
+{
+ struct scc_channel *scc = (struct scc_channel *) channel;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ del_timer(&scc->tx_wdog);
+ scc_key_trx(scc, TX_OFF);
+ spin_unlock_irqrestore(&scc->lock, flags);
+
+ if (scc->stat.tx_state == TXS_TIMEOUT) /* we had a timeout? */
+ {
+ scc->stat.tx_state = TXS_WAIT;
+ scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
+ return;
+ }
+
+ scc->stat.tx_state = TXS_IDLE;
+ netif_wake_queue(scc->dev);
+}
+
+
+/* BUSY timeout
+ *
+ * throw away send buffers if DCD remains active too long.
+ */
+
+static void t_busy(unsigned long channel)
+{
+ struct scc_channel *scc = (struct scc_channel *) channel;
+
+ del_timer(&scc->tx_t);
+ netif_stop_queue(scc->dev); /* don't pile on the wabbit! */
+
+ scc_discard_buffers(scc);
+ scc->stat.txerrs++;
+ scc->stat.tx_state = TXS_IDLE;
+
+ netif_wake_queue(scc->dev);
+}
+
+/* MAXKEYUP timeout
+ *
+ * this is our watchdog.
+ */
+
+static void t_maxkeyup(unsigned long channel)
+{
+ struct scc_channel *scc = (struct scc_channel *) channel;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ /*
+ * let things settle down before we start to
+ * accept new data.
+ */
+
+ netif_stop_queue(scc->dev);
+ scc_discard_buffers(scc);
+
+ del_timer(&scc->tx_t);
+
+ cl(scc, R1, TxINT_ENAB); /* force an ABORT, but don't */
+ cl(scc, R15, TxUIE); /* count it. */
+ OutReg(scc->ctrl, R0, RES_Tx_P);
+
+ spin_unlock_irqrestore(&scc->lock, flags);
+
+ scc->stat.txerrs++;
+ scc->stat.tx_state = TXS_TIMEOUT;
+ scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
+}
+
+/* IDLE timeout
+ *
+ * in fulldup mode 2 it keys down the transmitter after 'idle' seconds
+ * of inactivity. We will not restart transmission before 'mintime'
+ * expires.
+ */
+
+static void t_idle(unsigned long channel)
+{
+ struct scc_channel *scc = (struct scc_channel *) channel;
+
+ del_timer(&scc->tx_wdog);
+
+ scc_key_trx(scc, TX_OFF);
+ if(scc->kiss.mintime)
+ scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
+ scc->stat.tx_state = TXS_WAIT;
+}
+
+static void scc_init_timer(struct scc_channel *scc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ scc->stat.tx_state = TXS_IDLE;
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+
+/* ******************************************************************** */
+/* * Set/get L1 parameters * */
+/* ******************************************************************** */
+
+
+/*
+ * this will set the "hardware" parameters through KISS commands or ioctl()
+ */
+
+#define CAST(x) (unsigned long)(x)
+
+static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, unsigned int arg)
+{
+ switch (cmd)
+ {
+ case PARAM_TXDELAY: scc->kiss.txdelay=arg; break;
+ case PARAM_PERSIST: scc->kiss.persist=arg; break;
+ case PARAM_SLOTTIME: scc->kiss.slottime=arg; break;
+ case PARAM_TXTAIL: scc->kiss.tailtime=arg; break;
+ case PARAM_FULLDUP: scc->kiss.fulldup=arg; break;
+ case PARAM_DTR: break; /* does someone need this? */
+ case PARAM_GROUP: scc->kiss.group=arg; break;
+ case PARAM_IDLE: scc->kiss.idletime=arg; break;
+ case PARAM_MIN: scc->kiss.mintime=arg; break;
+ case PARAM_MAXKEY: scc->kiss.maxkeyup=arg; break;
+ case PARAM_WAIT: scc->kiss.waittime=arg; break;
+ case PARAM_MAXDEFER: scc->kiss.maxdefer=arg; break;
+ case PARAM_TX: scc->kiss.tx_inhibit=arg; break;
+
+ case PARAM_SOFTDCD:
+ scc->kiss.softdcd=arg;
+ if (arg)
+ {
+ or(scc, R15, SYNCIE);
+ cl(scc, R15, DCDIE);
+ start_hunt(scc);
+ } else {
+ or(scc, R15, DCDIE);
+ cl(scc, R15, SYNCIE);
+ }
+ break;
+
+ case PARAM_SPEED:
+ if (arg < 256)
+ scc->modem.speed=arg*100;
+ else
+ scc->modem.speed=arg;
+
+ if (scc->stat.tx_state == 0) /* only switch baudrate on rx... ;-) */
+ set_speed(scc);
+ break;
+
+ case PARAM_RTS:
+ if ( !(scc->wreg[R5] & RTS) )
+ {
+ if (arg != TX_OFF)
+ scc_key_trx(scc, TX_ON);
+ scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
+ } else {
+ if (arg == TX_OFF)
+ {
+ scc->stat.tx_state = TXS_BUSY;
+ scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
+ }
+ }
+ break;
+
+ case PARAM_HWEVENT:
+ scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
+ break;
+
+ default: return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+
+static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
+{
+ switch (cmd)
+ {
+ case PARAM_TXDELAY: return CAST(scc->kiss.txdelay);
+ case PARAM_PERSIST: return CAST(scc->kiss.persist);
+ case PARAM_SLOTTIME: return CAST(scc->kiss.slottime);
+ case PARAM_TXTAIL: return CAST(scc->kiss.tailtime);
+ case PARAM_FULLDUP: return CAST(scc->kiss.fulldup);
+ case PARAM_SOFTDCD: return CAST(scc->kiss.softdcd);
+ case PARAM_DTR: return CAST((scc->wreg[R5] & DTR)? 1:0);
+ case PARAM_RTS: return CAST((scc->wreg[R5] & RTS)? 1:0);
+ case PARAM_SPEED: return CAST(scc->modem.speed);
+ case PARAM_GROUP: return CAST(scc->kiss.group);
+ case PARAM_IDLE: return CAST(scc->kiss.idletime);
+ case PARAM_MIN: return CAST(scc->kiss.mintime);
+ case PARAM_MAXKEY: return CAST(scc->kiss.maxkeyup);
+ case PARAM_WAIT: return CAST(scc->kiss.waittime);
+ case PARAM_MAXDEFER: return CAST(scc->kiss.maxdefer);
+ case PARAM_TX: return CAST(scc->kiss.tx_inhibit);
+ default: return NO_SUCH_PARAM;
+ }
+
+}
+
+#undef CAST
+
+/* ******************************************************************* */
+/* * Send calibration pattern * */
+/* ******************************************************************* */
+
+static void scc_stop_calibrate(unsigned long channel)
+{
+ struct scc_channel *scc = (struct scc_channel *) channel;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ del_timer(&scc->tx_wdog);
+ scc_key_trx(scc, TX_OFF);
+ wr(scc, R6, 0);
+ wr(scc, R7, FLAG);
+ Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
+ Outb(scc->ctrl,RES_EXT_INT);
+
+ netif_wake_queue(scc->dev);
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+
+static void
+scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->lock, flags);
+ netif_stop_queue(scc->dev);
+ scc_discard_buffers(scc);
+
+ del_timer(&scc->tx_wdog);
+
+ scc->tx_wdog.data = (unsigned long) scc;
+ scc->tx_wdog.function = scc_stop_calibrate;
+ scc->tx_wdog.expires = jiffies + HZ*duration;
+ add_timer(&scc->tx_wdog);
+
+ /* This doesn't seem to work. Why not? */
+ wr(scc, R6, 0);
+ wr(scc, R7, pattern);
+
+ /*
+ * Don't know if this works.
+ * Damn, where is my Z8530 programming manual...?
+ */
+
+ Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
+ Outb(scc->ctrl,RES_EXT_INT);
+
+ scc_key_trx(scc, TX_ON);
+ spin_unlock_irqrestore(&scc->lock, flags);
+}
+
+/* ******************************************************************* */
+/* * Init channel structures, special HW, etc... * */
+/* ******************************************************************* */
+
+/*
+ * Reset the Z8530s and setup special hardware
+ */
+
+static void z8530_init(void)
+{
+ struct scc_channel *scc;
+ int chip, k;
+ unsigned long flags;
+ char *flag;
+
+
+ printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
+
+ flag=" ";
+ for (k = 0; k < NR_IRQS; k++)
+ if (Ivec[k].used)
+ {
+ printk("%s%d", flag, k);
+ flag=",";
+ }
+ printk("\n");
+
+
+ /* reset and pre-init all chips in the system */
+ for (chip = 0; chip < Nchips; chip++)
+ {
+ scc=&SCC_Info[2*chip];
+ if (!scc->ctrl) continue;
+
+ /* Special SCC cards */
+
+ if(scc->brand & EAGLE) /* this is an EAGLE card */
+ Outb(scc->special,0x08); /* enable interrupt on the board */
+
+ if(scc->brand & (PC100 | PRIMUS)) /* this is a PC100/PRIMUS card */
+ Outb(scc->special,scc->option); /* set the MODEM mode (0x22) */
+
+
+ /* Reset and pre-init Z8530 */
+
+ spin_lock_irqsave(&scc->lock, flags);
+
+ Outb(scc->ctrl, 0);
+ OutReg(scc->ctrl,R9,FHWRES); /* force hardware reset */
+ udelay(100); /* give it 'a bit' more time than required */
+ wr(scc, R2, chip*16); /* interrupt vector */
+ wr(scc, R9, VIS); /* vector includes status */
+ spin_unlock_irqrestore(&scc->lock, flags);
+ }
+
+
+ Driver_Initialized = 1;
+}
+
+/*
+ * Allocate device structure, err, instance, and register driver
+ */
+
+static int scc_net_alloc(const char *name, struct scc_channel *scc)
+{
+ int err;
+ struct net_device *dev;
+
+ dev = alloc_netdev(0, name, scc_net_setup);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->priv = scc;
+ scc->dev = dev;
+ spin_lock_init(&scc->lock);
+ init_timer(&scc->tx_t);
+ init_timer(&scc->tx_wdog);
+
+ err = register_netdevice(dev);
+ if (err) {
+ printk(KERN_ERR "%s: can't register network device (%d)\n",
+ name, err);
+ free_netdev(dev);
+ scc->dev = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+
+
+/* ******************************************************************** */
+/* * Network driver methods * */
+/* ******************************************************************** */
+
+static unsigned char ax25_bcast[AX25_ADDR_LEN] =
+{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1};
+static unsigned char ax25_nocall[AX25_ADDR_LEN] =
+{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1};
+
+/* ----> Initialize device <----- */
+
+static void scc_net_setup(struct net_device *dev)
+{
+ SET_MODULE_OWNER(dev);
+ dev->tx_queue_len = 16; /* should be enough... */
+
+ dev->open = scc_net_open;
+ dev->stop = scc_net_close;
+
+ dev->hard_start_xmit = scc_net_tx;
+ dev->hard_header = ax25_encapsulate;
+ dev->rebuild_header = ax25_rebuild_header;
+ dev->set_mac_address = scc_net_set_mac_address;
+ dev->get_stats = scc_net_get_stats;
+ dev->do_ioctl = scc_net_ioctl;
+ dev->tx_timeout = NULL;
+
+ memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
+ memcpy(dev->dev_addr, ax25_nocall, AX25_ADDR_LEN);
+
+ dev->flags = 0;
+
+ dev->type = ARPHRD_AX25;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
+ dev->mtu = AX25_DEF_PACLEN;
+ dev->addr_len = AX25_ADDR_LEN;
+
+}
+
+/* ----> open network device <---- */
+
+static int scc_net_open(struct net_device *dev)
+{
+ struct scc_channel *scc = (struct scc_channel *) dev->priv;
+
+ if (!scc->init)
+ return -EINVAL;
+
+ scc->tx_buff = NULL;
+ skb_queue_head_init(&scc->tx_queue);
+
+ init_channel(scc);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* ----> close network device <---- */
+
+static int scc_net_close(struct net_device *dev)
+{
+ struct scc_channel *scc = (struct scc_channel *) dev->priv;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&scc->lock, flags);
+ Outb(scc->ctrl,0); /* Make sure pointer is written */
+ wr(scc,R1,0); /* disable interrupts */
+ wr(scc,R3,0);
+ spin_unlock_irqrestore(&scc->lock, flags);
+
+ del_timer_sync(&scc->tx_t);
+ del_timer_sync(&scc->tx_wdog);
+
+ scc_discard_buffers(scc);
+
+ return 0;
+}
+
+/* ----> receive frame, called from scc_rxint() <---- */
+
+static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
+{
+ if (skb->len == 0) {
+ dev_kfree_skb_irq(skb);
+ return;
+ }
+
+ scc->dev_stat.rx_packets++;
+ scc->dev_stat.rx_bytes += skb->len;
+
+ skb->dev = scc->dev;
+ skb->protocol = htons(ETH_P_AX25);
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+
+ netif_rx(skb);
+ scc->dev->last_rx = jiffies;
+ return;
+}
+
+/* ----> transmit frame <---- */
+
+static int scc_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct scc_channel *scc = (struct scc_channel *) dev->priv;
+ unsigned long flags;
+ char kisscmd;
+
+ if (skb->len > scc->stat.bufsize || skb->len < 2) {
+ scc->dev_stat.tx_dropped++; /* bogus frame */
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ scc->dev_stat.tx_packets++;
+ scc->dev_stat.tx_bytes += skb->len;
+ scc->stat.txframes++;
+
+ kisscmd = *skb->data & 0x1f;
+ skb_pull(skb, 1);
+
+ if (kisscmd) {
+ scc_set_param(scc, kisscmd, *skb->data);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ spin_lock_irqsave(&scc->lock, flags);
+
+ if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
+ struct sk_buff *skb_del;
+ skb_del = skb_dequeue(&scc->tx_queue);
+ dev_kfree_skb(skb_del);
+ }
+ skb_queue_tail(&scc->tx_queue, skb);
+ dev->trans_start = jiffies;
+
+
+ /*
+ * Start transmission if the trx state is idle or
+ * t_idle hasn't expired yet. Use dwait/persistence/slottime
+ * algorithm for normal halfduplex operation.
+ */
+
+ if(scc->stat.tx_state == TXS_IDLE || scc->stat.tx_state == TXS_IDLE2) {
+ scc->stat.tx_state = TXS_BUSY;
+ if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
+ __scc_start_tx_timer(scc, t_dwait, scc->kiss.waittime);
+ else
+ __scc_start_tx_timer(scc, t_dwait, 0);
+ }
+ spin_unlock_irqrestore(&scc->lock, flags);
+ return 0;
+}
+
+/* ----> ioctl functions <---- */
+
+/*
+ * SIOCSCCCFG - configure driver arg: (struct scc_hw_config *) arg
+ * SIOCSCCINI - initialize driver arg: ---
+ * SIOCSCCCHANINI - initialize channel arg: (struct scc_modem *) arg
+ * SIOCSCCSMEM - set memory arg: (struct scc_mem_config *) arg
+ * SIOCSCCGKISS - get level 1 parameter arg: (struct scc_kiss_cmd *) arg
+ * SIOCSCCSKISS - set level 1 parameter arg: (struct scc_kiss_cmd *) arg
+ * SIOCSCCGSTAT - get driver status arg: (struct scc_stat *) arg
+ * SIOCSCCCAL - send calib. pattern arg: (struct scc_calibrate *) arg
+ */
+
+static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct scc_kiss_cmd kiss_cmd;
+ struct scc_mem_config memcfg;
+ struct scc_hw_config hwcfg;
+ struct scc_calibrate cal;
+ struct scc_channel *scc = (struct scc_channel *) dev->priv;
+ int chan;
+ unsigned char device_name[IFNAMSIZ];
+ void __user *arg = ifr->ifr_data;
+
+
+ if (!Driver_Initialized)
+ {
+ if (cmd == SIOCSCCCFG)
+ {
+ int found = 1;
+
+ if (!capable(CAP_SYS_RAWIO)) return -EPERM;
+ if (!arg) return -EFAULT;
+
+ if (Nchips >= SCC_MAXCHIPS)
+ return -EINVAL;
+
+ if (copy_from_user(&hwcfg, arg, sizeof(hwcfg)))
+ return -EFAULT;
+
+ if (hwcfg.irq == 2) hwcfg.irq = 9;
+
+ if (hwcfg.irq < 0 || hwcfg.irq >= NR_IRQS)
+ return -EINVAL;
+
+ if (!Ivec[hwcfg.irq].used && hwcfg.irq)
+ {
+ if (request_irq(hwcfg.irq, scc_isr, SA_INTERRUPT, "AX.25 SCC", NULL))
+ printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
+ else
+ Ivec[hwcfg.irq].used = 1;
+ }
+
+ if (hwcfg.vector_latch && !Vector_Latch) {
+ if (!request_region(hwcfg.vector_latch, 1, "scc vector latch"))
+ printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%lx\n, disabled.", hwcfg.vector_latch);
+ else
+ Vector_Latch = hwcfg.vector_latch;
+ }
+
+ if (hwcfg.clock == 0)
+ hwcfg.clock = SCC_DEFAULT_CLOCK;
+
+#ifndef SCC_DONT_CHECK
+
+ if(request_region(hwcfg.ctrl_a, 1, "scc-probe"))
+ {
+ disable_irq(hwcfg.irq);
+ Outb(hwcfg.ctrl_a, 0);
+ OutReg(hwcfg.ctrl_a, R9, FHWRES);
+ udelay(100);
+ OutReg(hwcfg.ctrl_a,R13,0x55); /* is this chip really there? */
+ udelay(5);
+
+ if (InReg(hwcfg.ctrl_a,R13) != 0x55)
+ found = 0;
+ enable_irq(hwcfg.irq);
+ release_region(hwcfg.ctrl_a, 1);
+ }
+ else
+ found = 0;
+#endif
+
+ if (found)
+ {
+ SCC_Info[2*Nchips ].ctrl = hwcfg.ctrl_a;
+ SCC_Info[2*Nchips ].data = hwcfg.data_a;
+ SCC_Info[2*Nchips ].irq = hwcfg.irq;
+ SCC_Info[2*Nchips+1].ctrl = hwcfg.ctrl_b;
+ SCC_Info[2*Nchips+1].data = hwcfg.data_b;
+ SCC_Info[2*Nchips+1].irq = hwcfg.irq;
+
+ SCC_ctrl[Nchips].chan_A = hwcfg.ctrl_a;
+ SCC_ctrl[Nchips].chan_B = hwcfg.ctrl_b;
+ SCC_ctrl[Nchips].irq = hwcfg.irq;
+ }
+
+
+ for (chan = 0; chan < 2; chan++)
+ {
+ sprintf(device_name, "%s%i", SCC_DriverName, 2*Nchips+chan);
+
+ SCC_Info[2*Nchips+chan].special = hwcfg.special;
+ SCC_Info[2*Nchips+chan].clock = hwcfg.clock;
+ SCC_Info[2*Nchips+chan].brand = hwcfg.brand;
+ SCC_Info[2*Nchips+chan].option = hwcfg.option;
+ SCC_Info[2*Nchips+chan].enhanced = hwcfg.escc;
+
+#ifdef SCC_DONT_CHECK
+ printk(KERN_INFO "%s: data port = 0x%3.3x control port = 0x%3.3x\n",
+ device_name,
+ SCC_Info[2*Nchips+chan].data,
+ SCC_Info[2*Nchips+chan].ctrl);
+
+#else
+ printk(KERN_INFO "%s: data port = 0x%3.3lx control port = 0x%3.3lx -- %s\n",
+ device_name,
+ chan? hwcfg.data_b : hwcfg.data_a,
+ chan? hwcfg.ctrl_b : hwcfg.ctrl_a,
+ found? "found" : "missing");
+#endif
+
+ if (found)
+ {
+ request_region(SCC_Info[2*Nchips+chan].ctrl, 1, "scc ctrl");
+ request_region(SCC_Info[2*Nchips+chan].data, 1, "scc data");
+ if (Nchips+chan != 0 &&
+ scc_net_alloc(device_name,
+ &SCC_Info[2*Nchips+chan]))
+ return -EINVAL;
+ }
+ }
+
+ if (found) Nchips++;
+
+ return 0;
+ }
+
+ if (cmd == SIOCSCCINI)
+ {
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (Nchips == 0)
+ return -EINVAL;
+
+ z8530_init();
+ return 0;
+ }
+
+ return -EINVAL; /* confuse the user */
+ }
+
+ if (!scc->init)
+ {
+ if (cmd == SIOCSCCCHANINI)
+ {
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (!arg) return -EINVAL;
+
+ scc->stat.bufsize = SCC_BUFSIZE;
+
+ if (copy_from_user(&scc->modem, arg, sizeof(struct scc_modem)))
+ return -EINVAL;
+
+ /* default KISS Params */
+
+ if (scc->modem.speed < 4800)
+ {
+ scc->kiss.txdelay = 36; /* 360 ms */
+ scc->kiss.persist = 42; /* 25% persistence */ /* was 25 */
+ scc->kiss.slottime = 16; /* 160 ms */
+ scc->kiss.tailtime = 4; /* minimal reasonable value */
+ scc->kiss.fulldup = 0; /* CSMA */
+ scc->kiss.waittime = 50; /* 500 ms */
+ scc->kiss.maxkeyup = 10; /* 10 s */
+ scc->kiss.mintime = 3; /* 3 s */
+ scc->kiss.idletime = 30; /* 30 s */
+ scc->kiss.maxdefer = 120; /* 2 min */
+ scc->kiss.softdcd = 0; /* hardware dcd */
+ } else {
+ scc->kiss.txdelay = 10; /* 100 ms */
+ scc->kiss.persist = 64; /* 25% persistence */ /* was 25 */
+ scc->kiss.slottime = 8; /* 160 ms */
+ scc->kiss.tailtime = 1; /* minimal reasonable value */
+ scc->kiss.fulldup = 0; /* CSMA */
+ scc->kiss.waittime = 50; /* 500 ms */
+ scc->kiss.maxkeyup = 7; /* 7 s */
+ scc->kiss.mintime = 3; /* 3 s */
+ scc->kiss.idletime = 30; /* 30 s */
+ scc->kiss.maxdefer = 120; /* 2 min */
+ scc->kiss.softdcd = 0; /* hardware dcd */
+ }
+
+ scc->tx_buff = NULL;
+ skb_queue_head_init(&scc->tx_queue);
+ scc->init = 1;
+
+ return 0;
+ }
+
+ return -EINVAL;
+ }
+
+ switch(cmd)
+ {
+ case SIOCSCCRESERVED:
+ return -ENOIOCTLCMD;
+
+ case SIOCSCCSMEM:
+ if (!capable(CAP_SYS_RAWIO)) return -EPERM;
+ if (!arg || copy_from_user(&memcfg, arg, sizeof(memcfg)))
+ return -EINVAL;
+ scc->stat.bufsize = memcfg.bufsize;
+ return 0;
+
+ case SIOCSCCGSTAT:
+ if (!arg || copy_to_user(arg, &scc->stat, sizeof(scc->stat)))
+ return -EINVAL;
+ return 0;
+
+ case SIOCSCCGKISS:
+ if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
+ return -EINVAL;
+ kiss_cmd.param = scc_get_param(scc, kiss_cmd.command);
+ if (copy_to_user(arg, &kiss_cmd, sizeof(kiss_cmd)))
+ return -EINVAL;
+ return 0;
+
+ case SIOCSCCSKISS:
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
+ return -EINVAL;
+ return scc_set_param(scc, kiss_cmd.command, kiss_cmd.param);
+
+ case SIOCSCCCAL:
+ if (!capable(CAP_SYS_RAWIO)) return -EPERM;
+ if (!arg || copy_from_user(&cal, arg, sizeof(cal)) || cal.time == 0)
+ return -EINVAL;
+
+ scc_start_calibrate(scc, cal.time, cal.pattern);
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+
+ }
+
+ return -EINVAL;
+}
+
+/* ----> set interface callsign <---- */
+
+static int scc_net_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = (struct sockaddr *) addr;
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ return 0;
+}
+
+/* ----> get statistics <---- */
+
+static struct net_device_stats *scc_net_get_stats(struct net_device *dev)
+{
+ struct scc_channel *scc = (struct scc_channel *) dev->priv;
+
+ scc->dev_stat.rx_errors = scc->stat.rxerrs + scc->stat.rx_over;
+ scc->dev_stat.tx_errors = scc->stat.txerrs + scc->stat.tx_under;
+ scc->dev_stat.rx_fifo_errors = scc->stat.rx_over;
+ scc->dev_stat.tx_fifo_errors = scc->stat.tx_under;
+
+ return &scc->dev_stat;
+}
+
+/* ******************************************************************** */
+/* * dump statistics to /proc/net/z8530drv * */
+/* ******************************************************************** */
+
+#ifdef CONFIG_PROC_FS
+
+static inline struct scc_channel *scc_net_seq_idx(loff_t pos)
+{
+ int k;
+
+ for (k = 0; k < Nchips*2; ++k) {
+ if (!SCC_Info[k].init)
+ continue;
+ if (pos-- == 0)
+ return &SCC_Info[k];
+ }
+ return NULL;
+}
+
+static void *scc_net_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? scc_net_seq_idx(*pos - 1) : SEQ_START_TOKEN;
+
+}
+
+static void *scc_net_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ unsigned k;
+ struct scc_channel *scc = v;
+ ++*pos;
+
+ for (k = (v == SEQ_START_TOKEN) ? 0 : (scc - SCC_Info)+1;
+ k < Nchips*2; ++k) {
+ if (SCC_Info[k].init)
+ return &SCC_Info[k];
+ }
+ return NULL;
+}
+
+static void scc_net_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int scc_net_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "z8530drv-"VERSION"\n");
+ } else if (!Driver_Initialized) {
+ seq_puts(seq, "not initialized\n");
+ } else if (!Nchips) {
+ seq_puts(seq, "chips missing\n");
+ } else {
+ const struct scc_channel *scc = v;
+ const struct scc_stat *stat = &scc->stat;
+ const struct scc_kiss *kiss = &scc->kiss;
+
+
+ /* dev data ctrl irq clock brand enh vector special option
+ * baud nrz clocksrc softdcd bufsize
+ * rxints txints exints spints
+ * rcvd rxerrs over / xmit txerrs under / nospace bufsize
+ * txd pers slot tail ful wait min maxk idl defr txof grp
+ * W ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
+ * R ## ## XX ## ## ## ## ## XX ## ## ## ## ## ## ##
+ */
+
+ seq_printf(seq, "%s\t%3.3lx %3.3lx %d %lu %2.2x %d %3.3lx %3.3lx %d\n",
+ scc->dev->name,
+ scc->data, scc->ctrl, scc->irq, scc->clock, scc->brand,
+ scc->enhanced, Vector_Latch, scc->special,
+ scc->option);
+ seq_printf(seq, "\t%lu %d %d %d %d\n",
+ scc->modem.speed, scc->modem.nrz,
+ scc->modem.clocksrc, kiss->softdcd,
+ stat->bufsize);
+ seq_printf(seq, "\t%lu %lu %lu %lu\n",
+ stat->rxints, stat->txints, stat->exints, stat->spints);
+ seq_printf(seq, "\t%lu %lu %d / %lu %lu %d / %d %d\n",
+ stat->rxframes, stat->rxerrs, stat->rx_over,
+ stat->txframes, stat->txerrs, stat->tx_under,
+ stat->nospace, stat->tx_state);
+
+#define K(x) kiss->x
+ seq_printf(seq, "\t%d %d %d %d %d %d %d %d %d %d %d %d\n",
+ K(txdelay), K(persist), K(slottime), K(tailtime),
+ K(fulldup), K(waittime), K(mintime), K(maxkeyup),
+ K(idletime), K(maxdefer), K(tx_inhibit), K(group));
+#undef K
+#ifdef SCC_DEBUG
+ {
+ int reg;
+
+ seq_printf(seq, "\tW ");
+ for (reg = 0; reg < 16; reg++)
+ seq_printf(seq, "%2.2x ", scc->wreg[reg]);
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "\tR %2.2x %2.2x XX ", InReg(scc->ctrl,R0), InReg(scc->ctrl,R1));
+ for (reg = 3; reg < 8; reg++)
+ seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
+ seq_printf(seq, "XX ");
+ for (reg = 9; reg < 16; reg++)
+ seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
+ seq_printf(seq, "\n");
+ }
+#endif
+ seq_putc(seq, '\n');
+ }
+
+ return 0;
+}
+
+static struct seq_operations scc_net_seq_ops = {
+ .start = scc_net_seq_start,
+ .next = scc_net_seq_next,
+ .stop = scc_net_seq_stop,
+ .show = scc_net_seq_show,
+};
+
+
+static int scc_net_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scc_net_seq_ops);
+}
+
+static struct file_operations scc_net_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = scc_net_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+
+/* ******************************************************************** */
+/* * Init SCC driver * */
+/* ******************************************************************** */
+
+static int __init scc_init_driver (void)
+{
+ char devname[IFNAMSIZ];
+
+ printk(banner);
+
+ sprintf(devname,"%s0", SCC_DriverName);
+
+ rtnl_lock();
+ if (scc_net_alloc(devname, SCC_Info)) {
+ rtnl_unlock();
+ printk(KERN_ERR "z8530drv: cannot initialize module\n");
+ return -EIO;
+ }
+ rtnl_unlock();
+
+ proc_net_fops_create("z8530drv", 0, &scc_net_seq_fops);
+
+ return 0;
+}
+
+static void __exit scc_cleanup_driver(void)
+{
+ io_port ctrl;
+ int k;
+ struct scc_channel *scc;
+ struct net_device *dev;
+
+ if (Nchips == 0 && (dev = SCC_Info[0].dev))
+ {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+
+ /* Guard against chip prattle */
+ local_irq_disable();
+
+ for (k = 0; k < Nchips; k++)
+ if ( (ctrl = SCC_ctrl[k].chan_A) )
+ {
+ Outb(ctrl, 0);
+ OutReg(ctrl,R9,FHWRES); /* force hardware reset */
+ udelay(50);
+ }
+
+ /* To unload the port must be closed so no real IRQ pending */
+ for (k=0; k < NR_IRQS ; k++)
+ if (Ivec[k].used) free_irq(k, NULL);
+
+ local_irq_enable();
+
+ /* Now clean up */
+ for (k = 0; k < Nchips*2; k++)
+ {
+ scc = &SCC_Info[k];
+ if (scc->ctrl)
+ {
+ release_region(scc->ctrl, 1);
+ release_region(scc->data, 1);
+ }
+ if (scc->dev)
+ {
+ unregister_netdev(scc->dev);
+ free_netdev(scc->dev);
+ }
+ }
+
+
+ if (Vector_Latch)
+ release_region(Vector_Latch, 1);
+
+ proc_net_remove("z8530drv");
+}
+
+MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
+MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards");
+MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio");
+MODULE_LICENSE("GPL");
+module_init(scc_init_driver);
+module_exit(scc_cleanup_driver);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
new file mode 100644
index 000000000000..fd7b00fe38e5
--- /dev/null
+++ b/drivers/net/hamradio/yam.c
@@ -0,0 +1,1218 @@
+/*****************************************************************************/
+
+/*
+ * yam.c -- YAM radio modem driver.
+ *
+ * Copyright (C) 1998 Frederic Rible F1OAT (frible@teaser.fr)
+ * Adapted from baycom.c driver written by Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Please note that the GPL allows you to use the driver, NOT the radio.
+ * In order to use the radio, you need a license from the communications
+ * authority of your country.
+ *
+ *
+ * History:
+ * 0.0 F1OAT 06.06.98 Begin of work with baycom.c source code V 0.3
+ * 0.1 F1OAT 07.06.98 Add timer polling routine for channel arbitration
+ * 0.2 F6FBB 08.06.98 Added delay after FPGA programming
+ * 0.3 F6FBB 29.07.98 Delayed PTT implementation for dupmode=2
+ * 0.4 F6FBB 30.07.98 Added TxTail, Slottime and Persistance
+ * 0.5 F6FBB 01.08.98 Shared IRQs, /proc/net and network statistics
+ * 0.6 F6FBB 25.08.98 Added 1200Bds format
+ * 0.7 F6FBB 12.09.98 Added to the kernel configuration
+ * 0.8 F6FBB 14.10.98 Fixed slottime/persistence timing bug
+ * OK1ZIA 2.09.01 Fixed "kfree_skb on hard IRQ"
+ * using dev_kfree_skb_any(). (important in 2.4 kernel)
+ *
+ */
+
+/*****************************************************************************/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/if.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+/* prototypes for ax25_encapsulate and ax25_rebuild_header */
+#include <net/ax25.h>
+#endif /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+
+/* make genksyms happy */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/uaccess.h>
+#include <linux/init.h>
+
+#include <linux/yam.h>
+#include "yam9600.h"
+#include "yam1200.h"
+
+/* --------------------------------------------------------------------- */
+
+static const char yam_drvname[] = "yam";
+static char yam_drvinfo[] __initdata = KERN_INFO "YAM driver version 0.8 by F1OAT/F6FBB\n";
+
+/* --------------------------------------------------------------------- */
+
+#define YAM_9600 1
+#define YAM_1200 2
+
+#define NR_PORTS 4
+#define YAM_MAGIC 0xF10A7654
+
+/* Transmitter states */
+
+#define TX_OFF 0
+#define TX_HEAD 1
+#define TX_DATA 2
+#define TX_CRC1 3
+#define TX_CRC2 4
+#define TX_TAIL 5
+
+#define YAM_MAX_FRAME 1024
+
+#define DEFAULT_BITRATE 9600 /* bps */
+#define DEFAULT_HOLDD 10 /* sec */
+#define DEFAULT_TXD 300 /* ms */
+#define DEFAULT_TXTAIL 10 /* ms */
+#define DEFAULT_SLOT 100 /* ms */
+#define DEFAULT_PERS 64 /* 0->255 */
+
+struct yam_port {
+ int magic;
+ int bitrate;
+ int baudrate;
+ int iobase;
+ int irq;
+ int dupmode;
+
+ struct net_device *dev;
+
+ /* Stats section */
+
+ struct net_device_stats stats;
+
+ int nb_rxint;
+ int nb_mdint;
+
+ /* Parameters section */
+
+ int txd; /* tx delay */
+ int holdd; /* duplex ptt delay */
+ int txtail; /* txtail delay */
+ int slot; /* slottime */
+ int pers; /* persistence */
+
+ /* Tx section */
+
+ int tx_state;
+ int tx_count;
+ int slotcnt;
+ unsigned char tx_buf[YAM_MAX_FRAME];
+ int tx_len;
+ int tx_crcl, tx_crch;
+ struct sk_buff_head send_queue; /* Packets awaiting transmission */
+
+ /* Rx section */
+
+ int dcd;
+ unsigned char rx_buf[YAM_MAX_FRAME];
+ int rx_len;
+ int rx_crcl, rx_crch;
+};
+
+struct yam_mcs {
+ unsigned char bits[YAM_FPGA_SIZE];
+ int bitrate;
+ struct yam_mcs *next;
+};
+
+static struct net_device *yam_devs[NR_PORTS];
+
+static struct yam_mcs *yam_data;
+
+static char ax25_bcast[7] =
+{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1};
+static char ax25_test[7] =
+{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1};
+
+static struct timer_list yam_timer = TIMER_INITIALIZER(NULL, 0, 0);
+
+/* --------------------------------------------------------------------- */
+
+#define RBR(iobase) (iobase+0)
+#define THR(iobase) (iobase+0)
+#define IER(iobase) (iobase+1)
+#define IIR(iobase) (iobase+2)
+#define FCR(iobase) (iobase+2)
+#define LCR(iobase) (iobase+3)
+#define MCR(iobase) (iobase+4)
+#define LSR(iobase) (iobase+5)
+#define MSR(iobase) (iobase+6)
+#define SCR(iobase) (iobase+7)
+#define DLL(iobase) (iobase+0)
+#define DLM(iobase) (iobase+1)
+
+#define YAM_EXTENT 8
+
+/* Interrupt Identification Register Bit Masks */
+#define IIR_NOPEND 1
+#define IIR_MSR 0
+#define IIR_TX 2
+#define IIR_RX 4
+#define IIR_LSR 6
+#define IIR_TIMEOUT 12 /* Fifo mode only */
+
+#define IIR_MASK 0x0F
+
+/* Interrupt Enable Register Bit Masks */
+#define IER_RX 1 /* enable rx interrupt */
+#define IER_TX 2 /* enable tx interrupt */
+#define IER_LSR 4 /* enable line status interrupts */
+#define IER_MSR 8 /* enable modem status interrupts */
+
+/* Modem Control Register Bit Masks */
+#define MCR_DTR 0x01 /* DTR output */
+#define MCR_RTS 0x02 /* RTS output */
+#define MCR_OUT1 0x04 /* OUT1 output (not accessible in RS232) */
+#define MCR_OUT2 0x08 /* Master Interrupt enable (must be set on PCs) */
+#define MCR_LOOP 0x10 /* Loopback enable */
+
+/* Modem Status Register Bit Masks */
+#define MSR_DCTS 0x01 /* Delta CTS input */
+#define MSR_DDSR 0x02 /* Delta DSR */
+#define MSR_DRIN 0x04 /* Delta RI */
+#define MSR_DDCD 0x08 /* Delta DCD */
+#define MSR_CTS 0x10 /* CTS input */
+#define MSR_DSR 0x20 /* DSR input */
+#define MSR_RING 0x40 /* RI input */
+#define MSR_DCD 0x80 /* DCD input */
+
+/* line status register bit mask */
+#define LSR_RXC 0x01
+#define LSR_OE 0x02
+#define LSR_PE 0x04
+#define LSR_FE 0x08
+#define LSR_BREAK 0x10
+#define LSR_THRE 0x20
+#define LSR_TSRE 0x40
+
+/* Line Control Register Bit Masks */
+#define LCR_DLAB 0x80
+#define LCR_BREAK 0x40
+#define LCR_PZERO 0x28
+#define LCR_PEVEN 0x18
+#define LCR_PODD 0x08
+#define LCR_STOP1 0x00
+#define LCR_STOP2 0x04
+#define LCR_BIT5 0x00
+#define LCR_BIT6 0x02
+#define LCR_BIT7 0x01
+#define LCR_BIT8 0x03
+
+/* YAM Modem <-> UART Port mapping */
+
+#define TX_RDY MSR_DCTS /* transmitter ready to send */
+#define RX_DCD MSR_DCD /* carrier detect */
+#define RX_FLAG MSR_RING /* hdlc flag received */
+#define FPGA_DONE MSR_DSR /* FPGA is configured */
+#define PTT_ON (MCR_RTS|MCR_OUT2) /* activate PTT */
+#define PTT_OFF (MCR_DTR|MCR_OUT2) /* release PTT */
+
+#define ENABLE_RXINT IER_RX /* enable uart rx interrupt during rx */
+#define ENABLE_TXINT IER_MSR /* enable uart ms interrupt during tx */
+#define ENABLE_RTXINT (IER_RX|IER_MSR) /* full duplex operations */
+
+
+/*************************************************************************
+* CRC Tables
+************************************************************************/
+
+static const unsigned char chktabl[256] =
+{0x00, 0x89, 0x12, 0x9b, 0x24, 0xad, 0x36, 0xbf, 0x48, 0xc1, 0x5a, 0xd3, 0x6c, 0xe5, 0x7e,
+ 0xf7, 0x81, 0x08, 0x93, 0x1a, 0xa5, 0x2c, 0xb7, 0x3e, 0xc9, 0x40, 0xdb, 0x52, 0xed, 0x64,
+ 0xff, 0x76, 0x02, 0x8b, 0x10, 0x99, 0x26, 0xaf, 0x34, 0xbd, 0x4a, 0xc3, 0x58, 0xd1, 0x6e,
+ 0xe7, 0x7c, 0xf5, 0x83, 0x0a, 0x91, 0x18, 0xa7, 0x2e, 0xb5, 0x3c, 0xcb, 0x42, 0xd9, 0x50,
+ 0xef, 0x66, 0xfd, 0x74, 0x04, 0x8d, 0x16, 0x9f, 0x20, 0xa9, 0x32, 0xbb, 0x4c, 0xc5, 0x5e,
+ 0xd7, 0x68, 0xe1, 0x7a, 0xf3, 0x85, 0x0c, 0x97, 0x1e, 0xa1, 0x28, 0xb3, 0x3a, 0xcd, 0x44,
+ 0xdf, 0x56, 0xe9, 0x60, 0xfb, 0x72, 0x06, 0x8f, 0x14, 0x9d, 0x22, 0xab, 0x30, 0xb9, 0x4e,
+ 0xc7, 0x5c, 0xd5, 0x6a, 0xe3, 0x78, 0xf1, 0x87, 0x0e, 0x95, 0x1c, 0xa3, 0x2a, 0xb1, 0x38,
+ 0xcf, 0x46, 0xdd, 0x54, 0xeb, 0x62, 0xf9, 0x70, 0x08, 0x81, 0x1a, 0x93, 0x2c, 0xa5, 0x3e,
+ 0xb7, 0x40, 0xc9, 0x52, 0xdb, 0x64, 0xed, 0x76, 0xff, 0x89, 0x00, 0x9b, 0x12, 0xad, 0x24,
+ 0xbf, 0x36, 0xc1, 0x48, 0xd3, 0x5a, 0xe5, 0x6c, 0xf7, 0x7e, 0x0a, 0x83, 0x18, 0x91, 0x2e,
+ 0xa7, 0x3c, 0xb5, 0x42, 0xcb, 0x50, 0xd9, 0x66, 0xef, 0x74, 0xfd, 0x8b, 0x02, 0x99, 0x10,
+ 0xaf, 0x26, 0xbd, 0x34, 0xc3, 0x4a, 0xd1, 0x58, 0xe7, 0x6e, 0xf5, 0x7c, 0x0c, 0x85, 0x1e,
+ 0x97, 0x28, 0xa1, 0x3a, 0xb3, 0x44, 0xcd, 0x56, 0xdf, 0x60, 0xe9, 0x72, 0xfb, 0x8d, 0x04,
+ 0x9f, 0x16, 0xa9, 0x20, 0xbb, 0x32, 0xc5, 0x4c, 0xd7, 0x5e, 0xe1, 0x68, 0xf3, 0x7a, 0x0e,
+ 0x87, 0x1c, 0x95, 0x2a, 0xa3, 0x38, 0xb1, 0x46, 0xcf, 0x54, 0xdd, 0x62, 0xeb, 0x70, 0xf9,
+ 0x8f, 0x06, 0x9d, 0x14, 0xab, 0x22, 0xb9, 0x30, 0xc7, 0x4e, 0xd5, 0x5c, 0xe3, 0x6a, 0xf1,
+ 0x78};
+static const unsigned char chktabh[256] =
+{0x00, 0x11, 0x23, 0x32, 0x46, 0x57, 0x65, 0x74, 0x8c, 0x9d, 0xaf, 0xbe, 0xca, 0xdb, 0xe9,
+ 0xf8, 0x10, 0x01, 0x33, 0x22, 0x56, 0x47, 0x75, 0x64, 0x9c, 0x8d, 0xbf, 0xae, 0xda, 0xcb,
+ 0xf9, 0xe8, 0x21, 0x30, 0x02, 0x13, 0x67, 0x76, 0x44, 0x55, 0xad, 0xbc, 0x8e, 0x9f, 0xeb,
+ 0xfa, 0xc8, 0xd9, 0x31, 0x20, 0x12, 0x03, 0x77, 0x66, 0x54, 0x45, 0xbd, 0xac, 0x9e, 0x8f,
+ 0xfb, 0xea, 0xd8, 0xc9, 0x42, 0x53, 0x61, 0x70, 0x04, 0x15, 0x27, 0x36, 0xce, 0xdf, 0xed,
+ 0xfc, 0x88, 0x99, 0xab, 0xba, 0x52, 0x43, 0x71, 0x60, 0x14, 0x05, 0x37, 0x26, 0xde, 0xcf,
+ 0xfd, 0xec, 0x98, 0x89, 0xbb, 0xaa, 0x63, 0x72, 0x40, 0x51, 0x25, 0x34, 0x06, 0x17, 0xef,
+ 0xfe, 0xcc, 0xdd, 0xa9, 0xb8, 0x8a, 0x9b, 0x73, 0x62, 0x50, 0x41, 0x35, 0x24, 0x16, 0x07,
+ 0xff, 0xee, 0xdc, 0xcd, 0xb9, 0xa8, 0x9a, 0x8b, 0x84, 0x95, 0xa7, 0xb6, 0xc2, 0xd3, 0xe1,
+ 0xf0, 0x08, 0x19, 0x2b, 0x3a, 0x4e, 0x5f, 0x6d, 0x7c, 0x94, 0x85, 0xb7, 0xa6, 0xd2, 0xc3,
+ 0xf1, 0xe0, 0x18, 0x09, 0x3b, 0x2a, 0x5e, 0x4f, 0x7d, 0x6c, 0xa5, 0xb4, 0x86, 0x97, 0xe3,
+ 0xf2, 0xc0, 0xd1, 0x29, 0x38, 0x0a, 0x1b, 0x6f, 0x7e, 0x4c, 0x5d, 0xb5, 0xa4, 0x96, 0x87,
+ 0xf3, 0xe2, 0xd0, 0xc1, 0x39, 0x28, 0x1a, 0x0b, 0x7f, 0x6e, 0x5c, 0x4d, 0xc6, 0xd7, 0xe5,
+ 0xf4, 0x80, 0x91, 0xa3, 0xb2, 0x4a, 0x5b, 0x69, 0x78, 0x0c, 0x1d, 0x2f, 0x3e, 0xd6, 0xc7,
+ 0xf5, 0xe4, 0x90, 0x81, 0xb3, 0xa2, 0x5a, 0x4b, 0x79, 0x68, 0x1c, 0x0d, 0x3f, 0x2e, 0xe7,
+ 0xf6, 0xc4, 0xd5, 0xa1, 0xb0, 0x82, 0x93, 0x6b, 0x7a, 0x48, 0x59, 0x2d, 0x3c, 0x0e, 0x1f,
+ 0xf7, 0xe6, 0xd4, 0xc5, 0xb1, 0xa0, 0x92, 0x83, 0x7b, 0x6a, 0x58, 0x49, 0x3d, 0x2c, 0x1e,
+ 0x0f};
+
+/*************************************************************************
+* FPGA functions
+************************************************************************/
+
+static void delay(int ms)
+{
+ unsigned long timeout = jiffies + ((ms * HZ) / 1000);
+ while (time_before(jiffies, timeout))
+ cpu_relax();
+}
+
+/*
+ * reset FPGA
+ */
+
+static void fpga_reset(int iobase)
+{
+ outb(0, IER(iobase));
+ outb(LCR_DLAB | LCR_BIT5, LCR(iobase));
+ outb(1, DLL(iobase));
+ outb(0, DLM(iobase));
+
+ outb(LCR_BIT5, LCR(iobase));
+ inb(LSR(iobase));
+ inb(MSR(iobase));
+ /* turn off FPGA supply voltage */
+ outb(MCR_OUT1 | MCR_OUT2, MCR(iobase));
+ delay(100);
+ /* turn on FPGA supply voltage again */
+ outb(MCR_DTR | MCR_RTS | MCR_OUT1 | MCR_OUT2, MCR(iobase));
+ delay(100);
+}
+
+/*
+ * send one byte to FPGA
+ */
+
+static int fpga_write(int iobase, unsigned char wrd)
+{
+ unsigned char bit;
+ int k;
+ unsigned long timeout = jiffies + HZ / 10;
+
+ for (k = 0; k < 8; k++) {
+ bit = (wrd & 0x80) ? (MCR_RTS | MCR_DTR) : MCR_DTR;
+ outb(bit | MCR_OUT1 | MCR_OUT2, MCR(iobase));
+ wrd <<= 1;
+ outb(0xfc, THR(iobase));
+ while ((inb(LSR(iobase)) & LSR_TSRE) == 0)
+ if (time_after(jiffies, timeout))
+ return -1;
+ }
+
+ return 0;
+}
+
+static unsigned char *add_mcs(unsigned char *bits, int bitrate)
+{
+ struct yam_mcs *p;
+
+ /* If it already exists, replace the bit data */
+ p = yam_data;
+ while (p) {
+ if (p->bitrate == bitrate) {
+ memcpy(p->bits, bits, YAM_FPGA_SIZE);
+ return p->bits;
+ }
+ p = p->next;
+ }
+
+ /* Allocate a new mcs */
+ if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) {
+ printk(KERN_WARNING "YAM: no memory to allocate mcs\n");
+ return NULL;
+ }
+ memcpy(p->bits, bits, YAM_FPGA_SIZE);
+ p->bitrate = bitrate;
+ p->next = yam_data;
+ yam_data = p;
+
+ return p->bits;
+}
+
+static unsigned char *get_mcs(int bitrate)
+{
+ struct yam_mcs *p;
+
+ p = yam_data;
+ while (p) {
+ if (p->bitrate == bitrate)
+ return p->bits;
+ p = p->next;
+ }
+
+ /* Load predefined mcs data */
+ switch (bitrate) {
+ case 1200:
+ return add_mcs(bits_1200, bitrate);
+ default:
+ return add_mcs(bits_9600, bitrate);
+ }
+}
+
+/*
+ * download bitstream to FPGA
+ * data is contained in bits[] array in yam1200.h resp. yam9600.h
+ */
+
+static int fpga_download(int iobase, int bitrate)
+{
+ int i, rc;
+ unsigned char *pbits;
+
+ pbits = get_mcs(bitrate);
+ if (pbits == NULL)
+ return -1;
+
+ fpga_reset(iobase);
+ for (i = 0; i < YAM_FPGA_SIZE; i++) {
+ if (fpga_write(iobase, pbits[i])) {
+ printk(KERN_ERR "yam: error in write cycle\n");
+ return -1; /* write... */
+ }
+ }
+
+ fpga_write(iobase, 0xFF);
+ rc = inb(MSR(iobase)); /* check DONE signal */
+
+ /* Needed for some hardwares */
+ delay(50);
+
+ return (rc & MSR_DSR) ? 0 : -1;
+}
+
+
+/************************************************************************
+* Serial port init
+************************************************************************/
+
+static void yam_set_uart(struct net_device *dev)
+{
+ struct yam_port *yp = netdev_priv(dev);
+ int divisor = 115200 / yp->baudrate;
+
+ outb(0, IER(dev->base_addr));
+ outb(LCR_DLAB | LCR_BIT8, LCR(dev->base_addr));
+ outb(divisor, DLL(dev->base_addr));
+ outb(0, DLM(dev->base_addr));
+ outb(LCR_BIT8, LCR(dev->base_addr));
+ outb(PTT_OFF, MCR(dev->base_addr));
+ outb(0x00, FCR(dev->base_addr));
+
+ /* Flush pending irq */
+
+ inb(RBR(dev->base_addr));
+ inb(MSR(dev->base_addr));
+
+ /* Enable rx irq */
+
+ outb(ENABLE_RTXINT, IER(dev->base_addr));
+}
+
+
+/* --------------------------------------------------------------------- */
+
+enum uart {
+ c_uart_unknown, c_uart_8250,
+ c_uart_16450, c_uart_16550, c_uart_16550A
+};
+
+static const char *uart_str[] =
+{"unknown", "8250", "16450", "16550", "16550A"};
+
+static enum uart yam_check_uart(unsigned int iobase)
+{
+ unsigned char b1, b2, b3;
+ enum uart u;
+ enum uart uart_tab[] =
+ {c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A};
+
+ b1 = inb(MCR(iobase));
+ outb(b1 | 0x10, MCR(iobase)); /* loopback mode */
+ b2 = inb(MSR(iobase));
+ outb(0x1a, MCR(iobase));
+ b3 = inb(MSR(iobase)) & 0xf0;
+ outb(b1, MCR(iobase)); /* restore old values */
+ outb(b2, MSR(iobase));
+ if (b3 != 0x90)
+ return c_uart_unknown;
+ inb(RBR(iobase));
+ inb(RBR(iobase));
+ outb(0x01, FCR(iobase)); /* enable FIFOs */
+ u = uart_tab[(inb(IIR(iobase)) >> 6) & 3];
+ if (u == c_uart_16450) {
+ outb(0x5a, SCR(iobase));
+ b1 = inb(SCR(iobase));
+ outb(0xa5, SCR(iobase));
+ b2 = inb(SCR(iobase));
+ if ((b1 != 0x5a) || (b2 != 0xa5))
+ u = c_uart_8250;
+ }
+ return u;
+}
+
+/******************************************************************************
+* Rx Section
+******************************************************************************/
+static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
+{
+ if (yp->dcd && yp->rx_len >= 3 && yp->rx_len < YAM_MAX_FRAME) {
+ int pkt_len = yp->rx_len - 2 + 1; /* -CRC + kiss */
+ struct sk_buff *skb;
+
+ if ((yp->rx_crch & yp->rx_crcl) != 0xFF) {
+ /* Bad crc */
+ } else {
+ if (!(skb = dev_alloc_skb(pkt_len))) {
+ printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
+ ++yp->stats.rx_dropped;
+ } else {
+ unsigned char *cp;
+ skb->dev = dev;
+ cp = skb_put(skb, pkt_len);
+ *cp++ = 0; /* KISS kludge */
+ memcpy(cp, yp->rx_buf, pkt_len - 1);
+ skb->protocol = htons(ETH_P_AX25);
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ ++yp->stats.rx_packets;
+ }
+ }
+ }
+ yp->rx_len = 0;
+ yp->rx_crcl = 0x21;
+ yp->rx_crch = 0xf3;
+}
+
+static inline void yam_rx_byte(struct net_device *dev, struct yam_port *yp, unsigned char rxb)
+{
+ if (yp->rx_len < YAM_MAX_FRAME) {
+ unsigned char c = yp->rx_crcl;
+ yp->rx_crcl = (chktabl[c] ^ yp->rx_crch);
+ yp->rx_crch = (chktabh[c] ^ rxb);
+ yp->rx_buf[yp->rx_len++] = rxb;
+ }
+}
+
+/********************************************************************************
+* TX Section
+********************************************************************************/
+
+static void ptt_on(struct net_device *dev)
+{
+ outb(PTT_ON, MCR(dev->base_addr));
+}
+
+static void ptt_off(struct net_device *dev)
+{
+ outb(PTT_OFF, MCR(dev->base_addr));
+}
+
+static int yam_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct yam_port *yp = netdev_priv(dev);
+
+ skb_queue_tail(&yp->send_queue, skb);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+static void yam_start_tx(struct net_device *dev, struct yam_port *yp)
+{
+ if ((yp->tx_state == TX_TAIL) || (yp->txd == 0))
+ yp->tx_count = 1;
+ else
+ yp->tx_count = (yp->bitrate * yp->txd) / 8000;
+ yp->tx_state = TX_HEAD;
+ ptt_on(dev);
+}
+
+static unsigned short random_seed;
+
+static inline unsigned short random_num(void)
+{
+ random_seed = 28629 * random_seed + 157;
+ return random_seed;
+}
+
+static void yam_arbitrate(struct net_device *dev)
+{
+ struct yam_port *yp = netdev_priv(dev);
+
+ if (yp->magic != YAM_MAGIC || yp->tx_state != TX_OFF ||
+ skb_queue_empty(&yp->send_queue))
+ return;
+ /* tx_state is TX_OFF and there is data to send */
+
+ if (yp->dupmode) {
+ /* Full duplex mode, don't wait */
+ yam_start_tx(dev, yp);
+ return;
+ }
+ if (yp->dcd) {
+ /* DCD on, wait slotime ... */
+ yp->slotcnt = yp->slot / 10;
+ return;
+ }
+ /* Is slottime passed ? */
+ if ((--yp->slotcnt) > 0)
+ return;
+
+ yp->slotcnt = yp->slot / 10;
+
+ /* is random > persist ? */
+ if ((random_num() % 256) > yp->pers)
+ return;
+
+ yam_start_tx(dev, yp);
+}
+
+static void yam_dotimer(unsigned long dummy)
+{
+ int i;
+
+ for (i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev = yam_devs[i];
+ if (dev && netif_running(dev))
+ yam_arbitrate(dev);
+ }
+ yam_timer.expires = jiffies + HZ / 100;
+ add_timer(&yam_timer);
+}
+
+static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
+{
+ struct sk_buff *skb;
+ unsigned char b, temp;
+
+ switch (yp->tx_state) {
+ case TX_OFF:
+ break;
+ case TX_HEAD:
+ if (--yp->tx_count <= 0) {
+ if (!(skb = skb_dequeue(&yp->send_queue))) {
+ ptt_off(dev);
+ yp->tx_state = TX_OFF;
+ break;
+ }
+ yp->tx_state = TX_DATA;
+ if (skb->data[0] != 0) {
+/* do_kiss_params(s, skb->data, skb->len); */
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ yp->tx_len = skb->len - 1; /* strip KISS byte */
+ if (yp->tx_len >= YAM_MAX_FRAME || yp->tx_len < 2) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ memcpy(yp->tx_buf, skb->data + 1, yp->tx_len);
+ dev_kfree_skb_any(skb);
+ yp->tx_count = 0;
+ yp->tx_crcl = 0x21;
+ yp->tx_crch = 0xf3;
+ yp->tx_state = TX_DATA;
+ }
+ break;
+ case TX_DATA:
+ b = yp->tx_buf[yp->tx_count++];
+ outb(b, THR(dev->base_addr));
+ temp = yp->tx_crcl;
+ yp->tx_crcl = chktabl[temp] ^ yp->tx_crch;
+ yp->tx_crch = chktabh[temp] ^ b;
+ if (yp->tx_count >= yp->tx_len) {
+ yp->tx_state = TX_CRC1;
+ }
+ break;
+ case TX_CRC1:
+ yp->tx_crch = chktabl[yp->tx_crcl] ^ yp->tx_crch;
+ yp->tx_crcl = chktabh[yp->tx_crcl] ^ chktabl[yp->tx_crch] ^ 0xff;
+ outb(yp->tx_crcl, THR(dev->base_addr));
+ yp->tx_state = TX_CRC2;
+ break;
+ case TX_CRC2:
+ outb(chktabh[yp->tx_crch] ^ 0xFF, THR(dev->base_addr));
+ if (skb_queue_empty(&yp->send_queue)) {
+ yp->tx_count = (yp->bitrate * yp->txtail) / 8000;
+ if (yp->dupmode == 2)
+ yp->tx_count += (yp->bitrate * yp->holdd) / 8;
+ if (yp->tx_count == 0)
+ yp->tx_count = 1;
+ yp->tx_state = TX_TAIL;
+ } else {
+ yp->tx_count = 1;
+ yp->tx_state = TX_HEAD;
+ }
+ ++yp->stats.tx_packets;
+ break;
+ case TX_TAIL:
+ if (--yp->tx_count <= 0) {
+ yp->tx_state = TX_OFF;
+ ptt_off(dev);
+ }
+ break;
+ }
+}
+
+/***********************************************************************************
+* ISR routine
+************************************************************************************/
+
+static irqreturn_t yam_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev;
+ struct yam_port *yp;
+ unsigned char iir;
+ int counter = 100;
+ int i;
+ int handled = 0;
+
+ for (i = 0; i < NR_PORTS; i++) {
+ dev = yam_devs[i];
+ yp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ continue;
+
+ while ((iir = IIR_MASK & inb(IIR(dev->base_addr))) != IIR_NOPEND) {
+ unsigned char msr = inb(MSR(dev->base_addr));
+ unsigned char lsr = inb(LSR(dev->base_addr));
+ unsigned char rxb;
+
+ handled = 1;
+
+ if (lsr & LSR_OE)
+ ++yp->stats.rx_fifo_errors;
+
+ yp->dcd = (msr & RX_DCD) ? 1 : 0;
+
+ if (--counter <= 0) {
+ printk(KERN_ERR "%s: too many irq iir=%d\n",
+ dev->name, iir);
+ goto out;
+ }
+ if (msr & TX_RDY) {
+ ++yp->nb_mdint;
+ yam_tx_byte(dev, yp);
+ }
+ if (lsr & LSR_RXC) {
+ ++yp->nb_rxint;
+ rxb = inb(RBR(dev->base_addr));
+ if (msr & RX_FLAG)
+ yam_rx_flag(dev, yp);
+ else
+ yam_rx_byte(dev, yp, rxb);
+ }
+ }
+ }
+out:
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_PROC_FS
+
+static void *yam_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL;
+}
+
+static void *yam_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL;
+}
+
+static void yam_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int yam_seq_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = v;
+ const struct yam_port *yp = netdev_priv(dev);
+
+ seq_printf(seq, "Device %s\n", dev->name);
+ seq_printf(seq, " Up %d\n", netif_running(dev));
+ seq_printf(seq, " Speed %u\n", yp->bitrate);
+ seq_printf(seq, " IoBase 0x%x\n", yp->iobase);
+ seq_printf(seq, " BaudRate %u\n", yp->baudrate);
+ seq_printf(seq, " IRQ %u\n", yp->irq);
+ seq_printf(seq, " TxState %u\n", yp->tx_state);
+ seq_printf(seq, " Duplex %u\n", yp->dupmode);
+ seq_printf(seq, " HoldDly %u\n", yp->holdd);
+ seq_printf(seq, " TxDelay %u\n", yp->txd);
+ seq_printf(seq, " TxTail %u\n", yp->txtail);
+ seq_printf(seq, " SlotTime %u\n", yp->slot);
+ seq_printf(seq, " Persist %u\n", yp->pers);
+ seq_printf(seq, " TxFrames %lu\n", yp->stats.tx_packets);
+ seq_printf(seq, " RxFrames %lu\n", yp->stats.rx_packets);
+ seq_printf(seq, " TxInt %u\n", yp->nb_mdint);
+ seq_printf(seq, " RxInt %u\n", yp->nb_rxint);
+ seq_printf(seq, " RxOver %lu\n", yp->stats.rx_fifo_errors);
+ seq_printf(seq, "\n");
+ return 0;
+}
+
+static struct seq_operations yam_seqops = {
+ .start = yam_seq_start,
+ .next = yam_seq_next,
+ .stop = yam_seq_stop,
+ .show = yam_seq_show,
+};
+
+static int yam_info_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &yam_seqops);
+}
+
+static struct file_operations yam_info_fops = {
+ .owner = THIS_MODULE,
+ .open = yam_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif
+
+
+/* --------------------------------------------------------------------- */
+
+static struct net_device_stats *yam_get_stats(struct net_device *dev)
+{
+ struct yam_port *yp;
+
+ if (!dev)
+ return NULL;
+
+ yp = netdev_priv(dev);
+ if (yp->magic != YAM_MAGIC)
+ return NULL;
+
+ /*
+ * Get the current statistics. This may be called with the
+ * card open or closed.
+ */
+ return &yp->stats;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int yam_open(struct net_device *dev)
+{
+ struct yam_port *yp = netdev_priv(dev);
+ enum uart u;
+ int i;
+ int ret=0;
+
+ printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq);
+
+ if (!dev || !yp->bitrate)
+ return -ENXIO;
+ if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT ||
+ dev->irq < 2 || dev->irq > 15) {
+ return -ENXIO;
+ }
+ if (!request_region(dev->base_addr, YAM_EXTENT, dev->name))
+ {
+ printk(KERN_ERR "%s: cannot 0x%lx busy\n", dev->name, dev->base_addr);
+ return -EACCES;
+ }
+ if ((u = yam_check_uart(dev->base_addr)) == c_uart_unknown) {
+ printk(KERN_ERR "%s: cannot find uart type\n", dev->name);
+ ret = -EIO;
+ goto out_release_base;
+ }
+ if (fpga_download(dev->base_addr, yp->bitrate)) {
+ printk(KERN_ERR "%s: cannot init FPGA\n", dev->name);
+ ret = -EIO;
+ goto out_release_base;
+ }
+ outb(0, IER(dev->base_addr));
+ if (request_irq(dev->irq, yam_interrupt, SA_INTERRUPT | SA_SHIRQ, dev->name, dev)) {
+ printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
+ ret = -EBUSY;
+ goto out_release_base;
+ }
+
+ yam_set_uart(dev);
+
+ netif_start_queue(dev);
+
+ yp->slotcnt = yp->slot / 10;
+
+ /* Reset overruns for all ports - FPGA programming makes overruns */
+ for (i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev = yam_devs[i];
+ struct yam_port *yp = netdev_priv(dev);
+ inb(LSR(dev->base_addr));
+ yp->stats.rx_fifo_errors = 0;
+ }
+
+ printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq,
+ uart_str[u]);
+ return 0;
+
+out_release_base:
+ release_region(dev->base_addr, YAM_EXTENT);
+ return ret;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int yam_close(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct yam_port *yp = netdev_priv(dev);
+
+ if (!dev)
+ return -EINVAL;
+
+ /*
+ * disable interrupts
+ */
+ outb(0, IER(dev->base_addr));
+ outb(1, MCR(dev->base_addr));
+ /* Remove IRQ handler if last */
+ free_irq(dev->irq,dev);
+ release_region(dev->base_addr, YAM_EXTENT);
+ netif_stop_queue(dev);
+ while ((skb = skb_dequeue(&yp->send_queue)))
+ dev_kfree_skb(skb);
+
+ printk(KERN_INFO "%s: close yam at iobase 0x%lx irq %u\n",
+ yam_drvname, dev->base_addr, dev->irq);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct yam_port *yp = netdev_priv(dev);
+ struct yamdrv_ioctl_cfg yi;
+ struct yamdrv_ioctl_mcs *ym;
+ int ioctl_cmd;
+
+ if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
+ return -EFAULT;
+
+ if (yp->magic != YAM_MAGIC)
+ return -EINVAL;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (cmd != SIOCDEVPRIVATE)
+ return -EINVAL;
+
+ switch (ioctl_cmd) {
+
+ case SIOCYAMRESERVED:
+ return -EINVAL; /* unused */
+
+ case SIOCYAMSMCS:
+ if (netif_running(dev))
+ return -EINVAL; /* Cannot change this parameter when up */
+ if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
+ return -ENOBUFS;
+ ym->bitrate = 9600;
+ if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
+ kfree(ym);
+ return -EFAULT;
+ }
+ if (ym->bitrate > YAM_MAXBITRATE) {
+ kfree(ym);
+ return -EINVAL;
+ }
+ add_mcs(ym->bits, ym->bitrate);
+ kfree(ym);
+ break;
+
+ case SIOCYAMSCFG:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
+ return -EFAULT;
+
+ if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
+ return -EINVAL; /* Cannot change this parameter when up */
+ if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
+ return -EINVAL; /* Cannot change this parameter when up */
+ if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
+ return -EINVAL; /* Cannot change this parameter when up */
+ if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
+ return -EINVAL; /* Cannot change this parameter when up */
+
+ if (yi.cfg.mask & YAM_IOBASE) {
+ yp->iobase = yi.cfg.iobase;
+ dev->base_addr = yi.cfg.iobase;
+ }
+ if (yi.cfg.mask & YAM_IRQ) {
+ if (yi.cfg.irq > 15)
+ return -EINVAL;
+ yp->irq = yi.cfg.irq;
+ dev->irq = yi.cfg.irq;
+ }
+ if (yi.cfg.mask & YAM_BITRATE) {
+ if (yi.cfg.bitrate > YAM_MAXBITRATE)
+ return -EINVAL;
+ yp->bitrate = yi.cfg.bitrate;
+ }
+ if (yi.cfg.mask & YAM_BAUDRATE) {
+ if (yi.cfg.baudrate > YAM_MAXBAUDRATE)
+ return -EINVAL;
+ yp->baudrate = yi.cfg.baudrate;
+ }
+ if (yi.cfg.mask & YAM_MODE) {
+ if (yi.cfg.mode > YAM_MAXMODE)
+ return -EINVAL;
+ yp->dupmode = yi.cfg.mode;
+ }
+ if (yi.cfg.mask & YAM_HOLDDLY) {
+ if (yi.cfg.holddly > YAM_MAXHOLDDLY)
+ return -EINVAL;
+ yp->holdd = yi.cfg.holddly;
+ }
+ if (yi.cfg.mask & YAM_TXDELAY) {
+ if (yi.cfg.txdelay > YAM_MAXTXDELAY)
+ return -EINVAL;
+ yp->txd = yi.cfg.txdelay;
+ }
+ if (yi.cfg.mask & YAM_TXTAIL) {
+ if (yi.cfg.txtail > YAM_MAXTXTAIL)
+ return -EINVAL;
+ yp->txtail = yi.cfg.txtail;
+ }
+ if (yi.cfg.mask & YAM_PERSIST) {
+ if (yi.cfg.persist > YAM_MAXPERSIST)
+ return -EINVAL;
+ yp->pers = yi.cfg.persist;
+ }
+ if (yi.cfg.mask & YAM_SLOTTIME) {
+ if (yi.cfg.slottime > YAM_MAXSLOTTIME)
+ return -EINVAL;
+ yp->slot = yi.cfg.slottime;
+ yp->slotcnt = yp->slot / 10;
+ }
+ break;
+
+ case SIOCYAMGCFG:
+ yi.cfg.mask = 0xffffffff;
+ yi.cfg.iobase = yp->iobase;
+ yi.cfg.irq = yp->irq;
+ yi.cfg.bitrate = yp->bitrate;
+ yi.cfg.baudrate = yp->baudrate;
+ yi.cfg.mode = yp->dupmode;
+ yi.cfg.txdelay = yp->txd;
+ yi.cfg.holddly = yp->holdd;
+ yi.cfg.txtail = yp->txtail;
+ yi.cfg.persist = yp->pers;
+ yi.cfg.slottime = yp->slot;
+ if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -EINVAL;
+
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int yam_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = (struct sockaddr *) addr;
+
+ /* addr is an AX.25 shifted ASCII mac address */
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static void yam_setup(struct net_device *dev)
+{
+ struct yam_port *yp = netdev_priv(dev);
+
+ yp->magic = YAM_MAGIC;
+ yp->bitrate = DEFAULT_BITRATE;
+ yp->baudrate = DEFAULT_BITRATE * 2;
+ yp->iobase = 0;
+ yp->irq = 0;
+ yp->dupmode = 0;
+ yp->holdd = DEFAULT_HOLDD;
+ yp->txd = DEFAULT_TXD;
+ yp->txtail = DEFAULT_TXTAIL;
+ yp->slot = DEFAULT_SLOT;
+ yp->pers = DEFAULT_PERS;
+ yp->dev = dev;
+
+ dev->base_addr = yp->iobase;
+ dev->irq = yp->irq;
+ SET_MODULE_OWNER(dev);
+
+ dev->open = yam_open;
+ dev->stop = yam_close;
+ dev->do_ioctl = yam_ioctl;
+ dev->hard_start_xmit = yam_send_packet;
+ dev->get_stats = yam_get_stats;
+
+ skb_queue_head_init(&yp->send_queue);
+
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ dev->hard_header = ax25_encapsulate;
+ dev->rebuild_header = ax25_rebuild_header;
+#else /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+ dev->hard_header = NULL;
+ dev->rebuild_header = NULL;
+#endif /* CONFIG_AX25 || CONFIG_AX25_MODULE */
+
+ dev->set_mac_address = yam_set_mac_address;
+
+ dev->type = ARPHRD_AX25; /* AF_AX25 device */
+ dev->hard_header_len = 73; /* We do digipeaters now */
+ dev->mtu = 256; /* AX25 is the default */
+ dev->addr_len = 7; /* sizeof an ax.25 address */
+ memcpy(dev->broadcast, ax25_bcast, 7);
+ memcpy(dev->dev_addr, ax25_test, 7);
+
+}
+
+static int __init yam_init_driver(void)
+{
+ struct net_device *dev;
+ int i, err;
+ char name[IFNAMSIZ];
+
+ printk(yam_drvinfo);
+
+ for (i = 0; i < NR_PORTS; i++) {
+ sprintf(name, "yam%d", i);
+
+ dev = alloc_netdev(sizeof(struct yam_port), name,
+ yam_setup);
+ if (!dev) {
+ printk(KERN_ERR "yam: cannot allocate net device %s\n",
+ dev->name);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name);
+ goto error;
+ }
+ yam_devs[i] = dev;
+
+ }
+
+ yam_timer.function = yam_dotimer;
+ yam_timer.expires = jiffies + HZ / 100;
+ add_timer(&yam_timer);
+
+ proc_net_fops_create("yam", S_IRUGO, &yam_info_fops);
+ return 0;
+ error:
+ while (--i >= 0) {
+ unregister_netdev(yam_devs[i]);
+ free_netdev(yam_devs[i]);
+ }
+ return err;
+}
+
+/* --------------------------------------------------------------------- */
+
+static void __exit yam_cleanup_driver(void)
+{
+ struct yam_mcs *p;
+ int i;
+
+ del_timer(&yam_timer);
+ for (i = 0; i < NR_PORTS; i++) {
+ struct net_device *dev = yam_devs[i];
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ }
+
+ while (yam_data) {
+ p = yam_data;
+ yam_data = yam_data->next;
+ kfree(p);
+ }
+
+ proc_net_remove("yam");
+}
+
+/* --------------------------------------------------------------------- */
+
+MODULE_AUTHOR("Frederic Rible F1OAT frible@teaser.fr");
+MODULE_DESCRIPTION("Yam amateur radio modem driver");
+MODULE_LICENSE("GPL");
+
+module_init(yam_init_driver);
+module_exit(yam_cleanup_driver);
+
+/* --------------------------------------------------------------------- */
+
diff --git a/drivers/net/hamradio/yam1200.h b/drivers/net/hamradio/yam1200.h
new file mode 100644
index 000000000000..53ca8a3903a7
--- /dev/null
+++ b/drivers/net/hamradio/yam1200.h
@@ -0,0 +1,343 @@
+/*
+ *
+ * File yam1k2b5.mcs converted to h format by mcs2h
+ *
+ * (C) F6FBB 1998
+ *
+ * Tue Aug 25 20:24:08 1998
+ *
+ */
+
+static unsigned char bits_1200[]= {
+0xff,0xf2,0x00,0xa5,0xad,0xff,0xfe,0x9f,0xff,0xef,0xf3,0xcb,0xff,0xdb,0xfc,0xf2,
+0xff,0xf6,0xff,0x3c,0xbf,0xfd,0xbf,0xdf,0x6e,0x3f,0x6f,0xf1,0x7d,0xb4,0xfd,0xbf,
+0xdf,0x6f,0x3f,0x6f,0xf7,0x0b,0xff,0xdb,0xfd,0xf2,0xff,0xf6,0xff,0xff,0xff,0xff,
+0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xdf,0xff,0xff,0xff,0xef,0xff,0xff,0xff,
+0xfd,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xbf,
+0xff,0xff,0xf7,0xff,0xff,0xfb,0xff,0xff,0xff,0xfc,0xff,0xfe,0xff,0xff,0xff,0xf0,
+0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf1,0xff,0xff,0xfe,0x7f,0xbf,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xfb,0xff,0xff,0xff,0xf0,0x9f,
+0xff,0xff,0xff,0xfe,0xff,0xfd,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xf7,0xff,
+0xff,0xff,0xfb,0xff,0xfb,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xf7,0xff,0xff,0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xef,0xff,0xf0,0x5f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xef,0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xbf,0xff,0xff,0xdf,0xf7,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,
+0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,
+0xff,0xff,0xff,0xfd,0xff,0xbf,0xf1,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xfb,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x6f,0xff,0xff,0xff,
+0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xf7,0xff,0xff,0xf1,0xff,0xff,0xf7,0xbf,0xe7,0xff,0xff,0xff,0xff,0xfb,
+0xff,0xff,0xff,0xff,0xff,0xff,0x77,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xdb,
+0xff,0xff,0xf5,0xa5,0xfd,0x4b,0x6e,0xef,0x33,0x32,0xdd,0xd3,0x4a,0xd6,0x92,0xfe,
+0xb3,0x3f,0xbd,0xf1,0xfa,0xdb,0xfe,0xf7,0xf6,0x96,0xbd,0xbd,0xff,0xbd,0xff,0xed,
+0x7f,0x6b,0x7f,0xfb,0xdf,0xfe,0xfb,0xfe,0x90,0xcf,0xff,0xff,0xff,0xfe,0xbe,0xef,
+0xff,0xff,0xdb,0x5f,0xf6,0xff,0xf6,0x8f,0xfd,0xa5,0xdd,0xff,0xff,0xff,0xff,0x6f,
+0x7f,0xdb,0xf1,0xfc,0xbf,0xff,0x6f,0xff,0xef,0xfc,0x5b,0x5d,0xda,0xdf,0xf4,0xff,
+0xf2,0xff,0xfd,0xbf,0xff,0xff,0xff,0xd0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
+0xff,0xfb,0xef,0xb7,0xfc,0x33,0xff,0xfb,0xff,0x04,0x6a,0xf3,0x3c,0x36,0xff,0xf0,
+0x0f,0xf1,0x0f,0xff,0xff,0xff,0xf3,0x15,0x72,0x0f,0xf1,0x6f,0xff,0xfe,0x94,0x3f,
+0xff,0xff,0xff,0x7b,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf0,
+0xf7,0xef,0xb7,0xfc,0x33,0xff,0xff,0xff,0x04,0x6a,0xf3,0x3c,0x36,0xff,0xf0,0x0f,
+0xf1,0x0f,0xff,0xff,0xff,0xf3,0x15,0x73,0x8f,0xf2,0x6f,0xff,0xfe,0x94,0x3f,0xff,
+0xff,0xff,0x7d,0x9f,0xff,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0x9e,
+0xff,0xfc,0xef,0xd3,0xfb,0xff,0x7f,0xf5,0x5f,0xfe,0x59,0xff,0xff,0xff,0xfc,0xf1,
+0xfe,0x7f,0xff,0xff,0xfa,0x17,0xff,0xe7,0xef,0xef,0xff,0xff,0x3f,0xf1,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf5,0xff,0xbf,0xff,0xfc,0xea,
+0xff,0xf0,0xff,0xff,0xbf,0xf9,0x3f,0xb1,0xef,0xff,0xd7,0xff,0xfb,0xff,0xf0,0xff,
+0xff,0xf3,0xff,0xdf,0xff,0x7b,0xff,0xfd,0xff,0xf6,0xff,0xbf,0xff,0xff,0xbf,0xff,
+0xff,0xff,0xda,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf2,0xc0,0x01,0x00,0x00,0x02,0x02,
+0x02,0x02,0x00,0x40,0x40,0x40,0x10,0x00,0x00,0x00,0x20,0x00,0x00,0x01,0x00,0x00,
+0x00,0x00,0x00,0x00,0x19,0x00,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,
+0x00,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xfb,0xff,0xfd,0xff,
+0xff,0x7f,0xff,0xff,0xbf,0xff,0xef,0xff,0xff,0xfd,0xff,0xff,0xf1,0xff,0xdf,0xff,
+0xff,0xff,0xff,0xff,0xff,0xbf,0xfe,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xdf,
+0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xbf,0xdf,0xff,0x7f,0xff,0xff,0xff,0xff,
+0xdf,0xdf,0xff,0xef,0xff,0x9e,0xef,0xff,0xff,0x7f,0xff,0xf1,0xef,0xff,0xff,0xff,
+0xf7,0xfa,0xbf,0xff,0xff,0xfe,0x47,0xef,0xff,0xbd,0xf6,0xff,0xff,0xdf,0xf5,0xf0,
+0xf0,0xef,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,0x00,0x00,0x04,0x00,0x01,0x02,0x08,
+0x16,0x00,0x00,0x00,0x80,0x00,0x01,0x02,0x00,0x80,0x01,0x0c,0x02,0x00,0x00,0x01,
+0x00,0x00,0x20,0x00,0x00,0x06,0x00,0x20,0x00,0x10,0x00,0x14,0x00,0x04,0xc1,0xf0,
+0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0x7f,
+0xec,0xff,0xff,0xfa,0xff,0xbf,0xff,0x6f,0xff,0xe1,0xff,0xff,0xff,0xff,0xbd,0xfe,
+0x46,0xff,0xef,0x7f,0xcd,0xdf,0xff,0xff,0xfd,0xff,0xbd,0xff,0x7f,0x7f,0xf0,0x4f,
+0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xa4,0xbc,0xcd,0x6d,0x6b,0x6f,0x5b,0xdc,0x33,
+0x5a,0xf6,0xf7,0xf6,0xb3,0x3f,0xbd,0xc1,0xfa,0x5a,0xf6,0xf6,0xb6,0xf7,0xff,0xbd,
+0xbb,0x3c,0xce,0xcf,0x34,0xef,0x33,0xbb,0xcc,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,
+0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xd6,0xff,0xfd,0xfd,0xbf,0xff,0xad,
+0xbf,0xf9,0x7f,0x6f,0xfc,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,0xff,0xda,0xdb,0xfc,
+0xdb,0xff,0x76,0x8f,0xf6,0xff,0xcd,0xab,0xfe,0xfb,0xff,0xd0,0xff,0xff,0xff,0xff,
+0xfe,0xff,0x9f,0xff,0xf4,0x20,0xaf,0x6d,0x0b,0xc1,0x7b,0xff,0xff,0xff,0xcb,0xff,
+0x3f,0xf0,0xef,0x7f,0x0f,0xf1,0xc3,0x3c,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0x0b,
+0x1d,0x6a,0x64,0x05,0x6b,0x99,0x01,0xff,0xfd,0xef,0xf0,0x2f,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xf4,0x00,0x2f,0xcc,0x0b,0xc3,0x7f,0xff,0xff,0xff,0x0a,0xdf,0xbf,
+0xfd,0x7f,0xff,0xff,0xf1,0xc3,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x4a,0x0e,
+0x96,0x64,0x02,0x97,0x99,0x10,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xfe,0x84,0xf9,0xd5,0x27,0xf1,0x7f,0xff,0xf8,0xeb,0xdf,0xf3,0xcf,0x3f,
+0x1f,0xff,0xf7,0x11,0xff,0xcf,0xff,0xfe,0x67,0xff,0xff,0xff,0xff,0xc4,0xff,0xff,
+0xb3,0xa1,0xff,0xf9,0xe0,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xf5,0xff,
+0xff,0xfb,0x7f,0xe0,0xff,0xc7,0xfe,0x7f,0x3f,0xff,0xfd,0x77,0x8d,0x7f,0x0f,0xff,
+0xc3,0xff,0xf1,0xbf,0x8f,0xcf,0xff,0xff,0xdd,0x7b,0xff,0xf6,0xfa,0xf7,0xff,0x40,
+0x9f,0xf9,0x7f,0xd8,0xff,0xff,0xfa,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,
+0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x10,0x00,0x00,0x10,
+0x00,0x01,0x00,0x10,0x20,0x20,0x00,0x00,0x10,0x00,0x04,0x01,0x05,0x00,0x00,0x00,
+0x00,0x40,0x40,0x00,0x00,0x3c,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,
+0xff,0xff,0xfe,0x7f,0x7f,0xff,0xef,0xff,0xff,0xdf,0xff,0xff,0xdf,0xff,0xef,0xf7,
+0xf1,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xf7,0xff,0xff,0xff,0xfc,0xfd,0xff,0x7f,
+0x7e,0xff,0xff,0xff,0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xff,0xff,0xff,
+0xff,0xff,0xfe,0xeb,0xfd,0x6f,0xff,0xf7,0xfe,0xf5,0x7f,0xff,0xff,0x7f,0xbf,0xb1,
+0xff,0xff,0x9f,0xbf,0xfb,0xff,0xfe,0xff,0xfe,0xff,0xf7,0xeb,0xdf,0xbf,0x5f,0xdd,
+0xff,0xdb,0xfd,0xd0,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x20,0x00,0x42,0x00,
+0x00,0x00,0x30,0x18,0x04,0x08,0x09,0x21,0x82,0x80,0x02,0x00,0x08,0x00,0x01,0x00,
+0x00,0x00,0x0c,0x20,0x10,0x00,0x11,0x00,0x44,0x84,0x00,0x20,0x20,0x84,0x80,0x00,
+0x00,0x00,0xc1,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xf7,0xff,0xfb,0xdd,0xf9,0xff,
+0xda,0xff,0xdc,0xdd,0xfc,0xfb,0xff,0xbf,0xfb,0x3e,0xd7,0x96,0xfe,0x61,0xf7,0xff,
+0x7f,0xff,0x3f,0xfd,0xff,0xdf,0xcf,0xf7,0xdf,0xf7,0xbf,0xfd,0xff,0xfe,0xef,0xef,
+0xfe,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf3,0xbd,0xfd,0x4b,0x74,0xcf,
+0x73,0x5b,0xcb,0x3b,0xdf,0xfe,0xf7,0xfe,0xd3,0x75,0xac,0xa1,0xfb,0xdf,0xfe,0xf7,
+0x76,0x96,0xb5,0x24,0xbd,0xa5,0xad,0x49,0x2f,0x69,0x2b,0x52,0x5b,0xbd,0xff,0xff,
+0xf0,0xcf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xfe,0xff,0xcc,
+0xa7,0xfb,0xad,0xff,0x7f,0x6f,0xff,0x6d,0x7f,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,
+0x6f,0xff,0xdb,0xff,0xdb,0xff,0xf6,0x97,0xf6,0xff,0xb5,0xb5,0xff,0xff,0xff,0xd0,
+0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xa5,0xbc,0x43,0xfc,0x7c,0x03,0xe7,
+0xff,0xff,0x20,0xff,0xff,0xff,0xcc,0xfd,0x7d,0xf1,0xff,0xff,0xff,0xff,0xd5,0x59,
+0xba,0x56,0x66,0x6a,0xad,0x9a,0xa9,0x9a,0x97,0xa5,0xaa,0xbb,0xff,0xff,0xf0,0x0f,
+0xff,0xff,0xff,0xfe,0xfe,0xfb,0xff,0xfd,0xf7,0xfd,0x43,0xff,0xfd,0x6b,0xe7,0xff,
+0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0x3f,0xf1,0xff,0xff,0xff,0xff,0xd5,0x59,0xb5,
+0xa6,0x66,0x6a,0xad,0x9a,0xa9,0x99,0x6b,0x5a,0xaa,0xff,0xff,0xb7,0xf0,0x3f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0x9c,0xf7,0xfd,0xd2,0x41,0xff,0xff,0xf2,0x7f,
+0x8f,0xff,0xff,0x3d,0xf3,0xff,0x17,0xf1,0xff,0xff,0xff,0xff,0xff,0x7f,0xdf,0xfc,
+0x8f,0x38,0xff,0xef,0x23,0xff,0xfb,0xf7,0xc8,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,
+0xff,0xfe,0xf5,0x7f,0xff,0xfd,0xff,0xe4,0xff,0xeb,0xff,0xcf,0xbf,0xfa,0xff,0xab,
+0xef,0xff,0xfb,0xff,0xf3,0xfd,0x61,0xff,0xff,0xff,0xff,0xfa,0xff,0xfb,0xfd,0x0d,
+0xff,0xfe,0xff,0x43,0x7f,0xfe,0xbf,0xd0,0xfd,0xff,0xfa,0xf0,0x3f,0xff,0xff,0xff,
+0xfe,0xf3,0xc0,0x00,0x00,0x00,0x02,0x00,0x02,0x01,0x00,0x60,0xc0,0x40,0x00,0x00,
+0x00,0x00,0x34,0x04,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x88,0x00,
+0x00,0x03,0x00,0x00,0x40,0x00,0x40,0x00,0x00,0x3c,0xf0,0x3f,0xff,0xff,0xff,0xfe,
+0xfd,0x3f,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0x7f,0xbf,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xf7,0xf1,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xfd,0xff,
+0xff,0xff,0xff,0xfe,0xfe,0x5f,0xff,0xff,0xcb,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf0,
+0xff,0xff,0xfd,0xff,0xef,0xe3,0xde,0xee,0xd9,0xc5,0x93,0xff,0xff,0xfe,0xfe,0xff,
+0xfb,0xee,0xfe,0xf1,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xbf,0xf7,0xff,0xff,0x7f,
+0xaf,0xbd,0xdf,0xdf,0xfb,0xf3,0xf3,0xf0,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf8,0x34,
+0x00,0x06,0x61,0x00,0x18,0x01,0xa0,0x05,0x17,0x00,0x20,0x05,0x28,0x20,0x00,0x00,
+0x05,0x00,0x41,0x00,0x00,0x40,0x00,0x09,0x00,0x01,0x20,0x86,0x82,0x08,0x40,0x03,
+0x80,0x30,0x70,0x08,0x14,0x02,0xc1,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
+0xff,0xff,0xbd,0xef,0xfb,0xff,0xff,0xfb,0x9c,0x7f,0xef,0xdf,0xff,0xbf,0xeb,0xde,
+0xff,0xc1,0x7f,0xff,0xfb,0x7f,0xff,0xff,0xff,0x5f,0xff,0xff,0xff,0xdf,0xbf,0xef,
+0x3f,0xf7,0x8f,0xef,0x7f,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xbd,
+0xdf,0xef,0x7d,0x6d,0x2b,0x5a,0x5d,0xd2,0xdf,0xf6,0x92,0xb6,0xb2,0xb3,0xac,0xa1,
+0xfb,0xdf,0xfe,0xf1,0xee,0xf5,0xf6,0xbc,0x6b,0xbd,0x7d,0xaf,0x1a,0xef,0x5f,0x6b,
+0xc6,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,
+0xf6,0xff,0xf6,0xb7,0xfd,0xad,0xfd,0xbf,0xf3,0x6f,0xff,0x6f,0xff,0xdb,0xd1,0xfd,
+0xbf,0xff,0x6f,0xf5,0x6b,0xbc,0x5b,0x3c,0xda,0xef,0x16,0xaf,0x16,0xff,0xcd,0xab,
+0xff,0x6f,0xff,0xd0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfc,0xbf,0xff,0xff,
+0xff,0x6c,0x03,0x10,0xc1,0xf3,0xff,0xf3,0x3a,0xf3,0xca,0xff,0xaf,0xf1,0xff,0xff,
+0xff,0xff,0xd9,0x96,0xa6,0x65,0xa6,0x66,0x6a,0x95,0x69,0x69,0x6a,0x5a,0x5a,0xff,
+0xff,0x5f,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,
+0xea,0x0f,0x50,0xc3,0xf3,0x7f,0xff,0xf3,0xf3,0xc3,0xff,0xaf,0xf1,0xff,0xff,0xff,
+0xff,0xd9,0x96,0xa6,0x65,0xa6,0x66,0x6a,0x95,0x69,0x69,0x6a,0x5a,0x5a,0xff,0xff,
+0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xd7,0xff,0xff,0x5f,0xc1,
+0x3f,0xf7,0x5e,0xf5,0xce,0x9e,0x5f,0x3f,0x17,0xff,0xf3,0xe1,0xff,0xff,0xff,0xff,
+0xd8,0xff,0xfa,0xfe,0x67,0xff,0xfe,0xbf,0x5a,0xff,0xff,0xaf,0xf5,0xff,0xff,0xff,
+0xf0,0x2f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xfd,0xff,0xf7,0xff,0xfd,0x4e,0x3d,
+0x3f,0xe7,0x0b,0xbf,0x8f,0xf9,0xff,0xeb,0xe3,0xff,0xe1,0xff,0xff,0xfc,0xff,0xc7,
+0x9f,0xff,0x3e,0x39,0xe5,0xff,0xcf,0x9b,0xf9,0xff,0xff,0xc5,0xff,0xff,0xfa,0xf0,
+0x5f,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,
+0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x20,0x00,0x20,
+0x00,0x01,0x10,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0xf0,0x4f,
+0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xbf,
+0x3f,0xff,0xff,0xbf,0xff,0xff,0xff,0xfb,0xf1,0xff,0xff,0xff,0xff,0xf7,0xff,0xf7,
+0xff,0xed,0xff,0xfb,0xfe,0xff,0x7f,0xff,0x7f,0xdf,0xff,0xff,0xdd,0xf0,0x3f,0xff,
+0xff,0xff,0xfe,0xf0,0xff,0xff,0xf3,0xff,0xf7,0xff,0xfe,0x5f,0xff,0xf7,0xff,0xff,
+0xdf,0xff,0xff,0xff,0xf7,0xfe,0x7b,0xf1,0xff,0xfd,0xfd,0xff,0xdf,0xdf,0xff,0x7d,
+0x73,0xf9,0xff,0xc3,0x7e,0xfe,0xff,0xef,0xd7,0xff,0xcf,0xd0,0xf0,0x6f,0xff,0xff,
+0xff,0xfe,0xf8,0x30,0x00,0x00,0x40,0x04,0x00,0x01,0x41,0x20,0x00,0x04,0x00,0x02,
+0xd5,0x09,0x00,0x02,0x80,0x02,0x01,0x00,0x00,0x00,0x0a,0x04,0x00,0x07,0x00,0x01,
+0x50,0x01,0x80,0x02,0x61,0x40,0x41,0x0c,0x14,0x08,0xc1,0xf0,0x9f,0xff,0xff,0xff,
+0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xdf,0xcb,0x5f,0xfe,0xef,0xff,0xfe,
+0xff,0x3f,0xff,0x7f,0xfd,0xc1,0xff,0xff,0x7f,0xff,0xdf,0xfd,0xfc,0xfd,0xf7,0xee,
+0xff,0xff,0x4e,0xff,0xdf,0xcf,0xdb,0xeb,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0x7f,
+0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,
+0xf7,0xfb,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0x7f,0xff,0xff,0xff,0x7f,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xdd,0xff,
+0xff,0xff,0xa5,0xff,0x6f,0x6b,0xe9,0x6f,0xda,0xca,0xfb,0xdd,0xee,0xf7,0xf6,0xb2,
+0xb3,0xa4,0xa1,0x5b,0x5b,0xf6,0xd7,0xf4,0xf7,0x7b,0xbd,0xbd,0xad,0xcf,0xef,0x7f,
+0x6b,0x7f,0x3b,0xdf,0xdb,0xff,0xff,0x30,0xcf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,
+0xff,0xff,0xff,0xf6,0xfe,0x96,0xff,0xfd,0xb5,0xfd,0xbf,0xad,0x7f,0xff,0x6f,0xff,
+0xde,0xd1,0xad,0xad,0xe9,0xff,0xf1,0xec,0xef,0xde,0x3f,0xcb,0xff,0xf6,0xff,0x32,
+0xff,0xc5,0xbd,0xff,0xff,0xff,0xd0,0xbf,0xff,0xff,0xff,0xfe,0xfe,0xfb,0xff,0xf4,
+0x28,0xbf,0xff,0xfd,0xfb,0xd3,0xff,0xff,0x42,0xff,0xff,0xff,0xea,0xb3,0xfc,0xc3,
+0xc1,0xff,0x33,0xff,0xc0,0x15,0x6b,0x70,0xff,0xf0,0xf2,0x4f,0xff,0xfc,0x3e,0x97,
+0x3c,0xff,0xff,0xfd,0xef,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0x78,
+0xbf,0xff,0xfd,0xf3,0xef,0x55,0xff,0x7e,0xff,0xff,0xff,0xea,0xb3,0xfc,0xc3,0xc1,
+0xff,0x33,0xff,0xc0,0x15,0x6f,0xff,0x0f,0xf0,0xf0,0x0f,0xff,0xfc,0x3d,0x6b,0xc3,
+0xff,0xff,0xfe,0xf7,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,
+0xff,0x23,0xf8,0x7f,0xff,0x4e,0xff,0xff,0xff,0xfb,0xf9,0x17,0xff,0xf6,0xf1,0xff,
+0xcf,0xef,0xff,0xff,0x13,0xdf,0xe6,0x2f,0xc7,0xff,0xff,0xe7,0xc1,0xfd,0xff,0xfe,
+0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xfe,0xae,0xff,
+0xff,0x7f,0x3b,0x3f,0xfc,0x7f,0xfc,0xef,0xff,0xfc,0xe2,0x7b,0xff,0xf1,0xfd,0xed,
+0xef,0xff,0xff,0x35,0x73,0xff,0xff,0xfe,0xfa,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,
+0xff,0xfa,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x80,0x00,0x00,0x40,0x00,0x00,0x00,0x0c,0x04,0x01,0x40,0x40,0x00,
+0x00,0x30,0x28,0x04,0x00,0x08,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x00,0x00,0x00,
+0x38,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xfb,0xff,0x7f,
+0xff,0xff,0x9f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xdf,0xdf,0xff,
+0xff,0xff,0xff,0xed,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xbf,0xbf,0xff,0xff,0xc3,
+0xf0,0x3f,0xff,0xff,0xff,0xfe,0xf0,0xbf,0xfd,0xff,0xbf,0xff,0xff,0xfd,0xff,0xff,
+0xff,0xff,0xff,0xfd,0x7b,0xff,0x7f,0xff,0xbd,0xff,0xf1,0xef,0xff,0xff,0xfd,0xdf,
+0xfd,0xfb,0xff,0xff,0xbf,0xbe,0xff,0xcd,0x7f,0xfc,0xf7,0xf7,0x6f,0xbf,0xd8,0xf0,
+0xef,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,0x00,0x00,0x04,0x00,0x00,0xa0,0x00,0x00,
+0xc0,0x00,0x00,0x20,0x34,0x00,0x00,0x00,0x0c,0x81,0x00,0x20,0xa4,0x20,0x00,0x10,
+0x08,0x04,0x48,0x08,0x00,0x40,0x93,0x00,0x10,0x00,0x38,0x18,0x20,0xc1,0xf0,0x3f,
+0xff,0xff,0xff,0xfe,0xff,0xfb,0xff,0xff,0xb9,0xdf,0xfe,0xb3,0xff,0xff,0xe7,0xfd,
+0xff,0xff,0x3b,0xff,0x7f,0xff,0xbf,0xff,0xc1,0xff,0xfc,0xff,0xff,0x3f,0x77,0xfe,
+0xfe,0xcf,0xff,0xbf,0xfd,0xbf,0xff,0xfe,0xed,0xf2,0xfd,0xf7,0xff,0xf0,0x2f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbf,0xff,0xff,
+0xff,0xfe,0xff,0xff,0xff,0xf3,0xad,0xcf,0xef,0x70,0xc9,0x73,0x3b,0xdf,0x5b,0x4a,
+0xf6,0xb7,0xfe,0xd7,0xf5,0xbc,0xc1,0x33,0xca,0xd6,0xb7,0x6e,0xf7,0xfb,0xbd,0xc5,
+0x24,0xcf,0x6f,0x2f,0x4d,0x2b,0xba,0x5a,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,
+0xfe,0xbf,0xff,0xff,0xff,0xff,0xf6,0xf6,0xd7,0xff,0xff,0xad,0xbd,0xff,0xff,0xff,
+0xef,0xf7,0x7f,0xfc,0x5b,0xb1,0xfd,0xbd,0x75,0x6f,0xef,0x6a,0xfd,0x5b,0xfb,0xdb,
+0x3a,0xbf,0x8e,0x9f,0xff,0xbf,0xfd,0xff,0x6f,0xff,0xd0,0x6f,0xff,0xff,0xff,0xfe,
+0xff,0xbb,0xff,0xf0,0x3f,0xff,0xff,0xfd,0xfb,0x7f,0xde,0xff,0xff,0x5a,0xd6,0xbf,
+0xd8,0x2a,0xbf,0xbf,0xf1,0xe5,0xff,0xcc,0xc0,0xa9,0x70,0xff,0xf3,0x3c,0x3c,0xfd,
+0x57,0xfd,0x98,0x03,0x00,0xc3,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xff,0x3d,0xbf,0xff,0xfd,0xfb,0xff,0xdb,0xff,0xff,0x0f,0xfc,0x3f,0xd8,
+0x2a,0xbf,0xbf,0xf1,0xef,0xff,0xcc,0xc0,0x96,0xbe,0xff,0xf3,0x3f,0xff,0xfd,0x57,
+0xfd,0x99,0x0f,0xff,0xc3,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xff,0xff,
+0xff,0xf1,0xe7,0xff,0xff,0xf3,0x8e,0x7b,0xff,0xa8,0xff,0xdf,0x7f,0x8e,0x78,0x73,
+0xff,0xf1,0x51,0x62,0xff,0xfc,0x4b,0xff,0xf3,0xff,0x7e,0xcf,0xf9,0xff,0xfd,0xff,
+0xff,0x7f,0xff,0xe0,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,
+0xfb,0xfd,0xae,0xff,0xfc,0xfe,0x6f,0x3f,0xf8,0xfd,0x77,0xaf,0xfe,0x37,0xfe,0x7b,
+0xff,0xb1,0x8c,0xff,0xef,0xfd,0xf8,0xe7,0xbf,0xff,0xf1,0xfe,0x3e,0xf7,0xfe,0x95,
+0x3e,0xbf,0xff,0xff,0xff,0xfa,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,0x00,
+0x01,0x04,0x00,0x00,0x00,0x00,0x80,0x02,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x08,
+0x41,0x80,0x10,0x00,0x00,0x08,0x10,0x84,0x00,0x0c,0x04,0x02,0x61,0x00,0x00,0x81,
+0x00,0x00,0x00,0x00,0x3d,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,
+0xff,0xff,0x7f,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
+0x7f,0xbf,0xf7,0x7f,0xef,0xff,0xef,0xff,0xf7,0xfd,0xff,0xff,0xfd,0x7f,0xff,0xbe,
+0xdf,0xff,0xff,0xd9,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xff,0x7f,0xfb,0xff,
+0xfb,0xff,0xbf,0xff,0xf3,0x7f,0xfb,0xfd,0xeb,0x7f,0xdf,0xfa,0xff,0xde,0xf0,0xed,
+0xff,0xb1,0xf7,0xf9,0x1f,0xb5,0x5b,0xfe,0x7e,0xf7,0xbe,0xfd,0x7f,0x5f,0xb5,0xf7,
+0xff,0xff,0xd0,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x01,0x00,0x07,0x42,0x01,
+0x00,0x6a,0x18,0x50,0x80,0x00,0x00,0x02,0x40,0x01,0x01,0x20,0x01,0x01,0x24,0x14,
+0x21,0x10,0x02,0x08,0x07,0x08,0x00,0x40,0x10,0x80,0x58,0x00,0x84,0x80,0x18,0x10,
+0x40,0xc1,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xff,0xdb,0xb7,0xf3,
+0xdf,0x7c,0xf8,0x74,0xff,0xff,0x6f,0x7d,0x3f,0x7e,0xec,0x7f,0xc1,0xf5,0xff,0xcf,
+0x6f,0x9f,0xf9,0xdf,0xbe,0xe5,0xe7,0xff,0xd7,0xf3,0xdd,0xfb,0xff,0xfc,0xff,0xbf,
+0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xf0,0x2f,0xff,0xff,0xff,0xfe,0xd7,0xff,0xff,0xff,0xb4,0xcf,0xef,0x77,0x6f,0x73,
+0x3a,0x4a,0x3a,0xcb,0xd4,0xf7,0x2e,0xd6,0xbd,0xbd,0xa1,0x3b,0xdf,0xd6,0xf7,0xee,
+0xd3,0x35,0xbd,0xfb,0xbd,0xce,0xeb,0x2b,0x4d,0x2f,0xbb,0xda,0xff,0xff,0xfe,0xb0,
+0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdf,0x5f,0x36,0xaf,0x3f,0xed,0xb7,
+0xf5,0xfd,0xf3,0x2b,0xef,0x77,0xff,0xfb,0xda,0xb1,0xbd,0xa3,0x77,0x69,0x7f,0x4f,
+0xff,0xdb,0xfa,0x5b,0xff,0xf2,0xfe,0xff,0x96,0xff,0xff,0xfe,0xdf,0xff,0xd0,0xaf,
+0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x8f,0xfd,0x40,0x6f,0x9e,0x83,0x5a,0x0f,
+0xfa,0xc3,0xff,0xff,0xfc,0xe9,0x7f,0xf3,0x01,0xd0,0x00,0xfe,0xbf,0xcd,0x3f,0xf0,
+0xef,0xfc,0xc5,0x0c,0x3f,0xfd,0x68,0x0b,0xff,0xff,0xff,0xfe,0xdf,0xf0,0xff,0xff,
+0xff,0xff,0xfe,0xff,0xbb,0xff,0xfd,0x85,0xff,0xd4,0x6f,0x9f,0xc3,0x5a,0x0f,0xff,
+0xff,0xff,0xff,0xfc,0xe9,0x7f,0xf3,0x01,0xf0,0xfb,0xc2,0xbf,0xfc,0x00,0x37,0xef,
+0xfc,0xcd,0xbc,0x3f,0xff,0x0c,0xbf,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,
+0xff,0xfe,0xff,0xff,0xff,0xff,0xd9,0xf7,0xd1,0xb7,0x7e,0x7f,0xf1,0xe4,0xfd,0xff,
+0xfb,0xfb,0xff,0x5f,0xff,0x7f,0xb1,0xbc,0x0f,0x67,0xeb,0xb8,0x3f,0xff,0xe2,0xff,
+0xe9,0xff,0xfd,0xe3,0xff,0x3f,0x9f,0xc2,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,
+0xfe,0xf5,0x7f,0xff,0xf0,0x3f,0xbc,0xff,0xd5,0xf5,0xce,0x3f,0xfe,0xff,0xfe,0x6d,
+0xff,0xf1,0xbf,0x7b,0xff,0xf1,0xfd,0xff,0x4f,0xff,0x87,0xff,0xae,0xff,0xb1,0xf8,
+0xfe,0xff,0xff,0x78,0x01,0xb9,0xff,0xff,0xff,0xfa,0xf0,0x2f,0xff,0xff,0xff,0xfe,
+0xf3,0xc0,0x00,0x00,0x00,0x04,0x02,0x13,0x02,0x00,0x80,0x40,0x00,0x90,0x10,0x00,
+0x10,0x00,0x02,0x00,0x01,0x20,0x80,0x12,0x10,0x00,0x40,0x08,0x00,0x04,0x00,0x00,
+0x02,0x00,0x01,0x40,0x00,0x80,0x00,0x00,0x3c,0xf0,0xef,0xff,0xff,0xff,0xfe,0xfd,
+0x1f,0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0x7f,0xff,0x7f,0xf7,0xdf,0xf7,0xff,
+0xf7,0xfb,0xeb,0xd1,0xff,0xff,0xff,0xff,0xef,0xf7,0xff,0xff,0xfb,0xff,0xfe,0xff,
+0xff,0x7e,0xff,0xfb,0xff,0xff,0xff,0xdb,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf0,0xff,
+0xff,0xb7,0xeb,0xf7,0xdf,0xff,0xfe,0xf5,0x6b,0xe7,0xed,0xf7,0x3e,0xec,0xff,0x54,
+0xef,0x6f,0xf1,0xf5,0xaf,0x6f,0xf6,0xfd,0xff,0xdd,0x7b,0xff,0xef,0xbf,0x7f,0xff,
+0xff,0xf7,0xff,0xf3,0x5f,0xf7,0xd0,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,
+0x80,0x40,0x04,0x00,0x81,0x2c,0x04,0x24,0x00,0x02,0x01,0xc8,0x02,0x00,0x02,0x24,
+0x00,0x01,0xb4,0x42,0xdc,0x44,0x02,0x15,0x90,0x02,0x03,0x48,0x39,0x10,0x02,0x24,
+0xa0,0xba,0x00,0x00,0x40,0xc1,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
+0xfe,0xfc,0xf7,0xf0,0xee,0xb6,0x5d,0xfd,0xf5,0xff,0xdb,0xf7,0x7f,0x7f,0xbe,0xff,
+0xc1,0xfe,0xbf,0xfa,0xfa,0x5f,0xff,0xad,0xff,0xef,0xff,0x7f,0xdf,0x7f,0xfe,0xbf,
+0xb7,0x94,0xbf,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xd7,0xff,0xff,0xfb,0xb5,0xff,
+0xef,0x7c,0xeb,0x2b,0x52,0x5b,0x3b,0xda,0xd4,0xf3,0x36,0x96,0xb5,0xbd,0xf1,0xfb,
+0xda,0xee,0xf6,0xfe,0xd3,0x35,0xbd,0xdf,0xad,0xcf,0xef,0x7e,0xcd,0x6b,0xbb,0xdf,
+0xff,0xff,0xfd,0xb0,0xef,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xd3,0x5f,0xf6,
+0xff,0xf6,0xff,0xfd,0xad,0xfd,0xff,0x7f,0xef,0xff,0x6f,0x7f,0xdb,0xf1,0xa5,0xa3,
+0x7f,0x6f,0x6b,0x4f,0xff,0xdb,0xfb,0xcb,0xff,0xf6,0xff,0xf4,0xd7,0xfd,0xbf,0xfe,
+0xdf,0xff,0xd0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdf,0xff,0xff,0xff,
+0x3f,0x7f,0xfc,0xe5,0xff,0x20,0xfe,0xff,0xff,0xdf,0x7f,0xff,0xf1,0x7f,0xff,0xfe,
+0xff,0xf0,0x7c,0x3d,0x4f,0xf3,0xc3,0x3f,0xff,0xff,0x6f,0xc3,0xff,0x0f,0xff,0xff,
+0xaf,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xb7,0xe0,0x0f,0xff,0xff,0x2b,
+0xff,0x7d,0xbf,0xff,0xdf,0xff,0xff,0xf8,0x9f,0x7f,0xff,0xf1,0x55,0xff,0xff,0xff,
+0xfd,0x7c,0x3c,0xff,0xf3,0xc3,0x3f,0xff,0xff,0xef,0xc3,0xff,0xdf,0xff,0xff,0xff,
+0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xef,0xff,0xff,0x9f,0xbf,0x7f,
+0xf9,0x19,0x47,0x8e,0xe7,0x9f,0x3f,0x17,0xff,0xfc,0x81,0xc1,0x7e,0xf3,0xd9,0xf9,
+0x73,0xdf,0xf4,0x7f,0xfa,0xff,0xff,0xff,0xfb,0x7f,0x77,0xc7,0xff,0xff,0xff,0xf0,
+0x2f,0xff,0xff,0xff,0xfe,0xf5,0xf7,0xff,0xfb,0xff,0xf7,0x3f,0xfc,0xbf,0x3e,0x3f,
+0xec,0xff,0x81,0xaf,0xfe,0x4f,0xf3,0xbb,0xff,0xf0,0x7e,0xff,0x6f,0xff,0x87,0xff,
+0xbb,0xff,0xd5,0xfc,0xff,0x7f,0xfc,0x6f,0xff,0xef,0xe7,0xff,0xff,0xfa,0xf0,0x3f,
+0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,
+0x00,0x30,0x10,0x60,0x20,0x00,0x08,0x00,0x01,0x20,0x80,0x00,0x10,0x00,0x04,0x00,
+0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x80,0x40,0x00,0x08,0x20,0x3c,0xf0,0x6f,0xff,
+0xff,0xff,0xfe,0xf5,0xbf,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0x7f,0xfe,0x3f,0xff,
+0xff,0xff,0xff,0xff,0xef,0xff,0xff,0xf1,0xdf,0xdf,0xff,0xff,0xff,0x7f,0xdf,0xff,
+0xfd,0xbd,0xff,0xff,0xff,0xfb,0xdf,0xff,0xff,0xff,0xff,0x5b,0xf0,0xff,0xff,0xff,
+0xff,0xfe,0xf0,0xbf,0xbf,0xbf,0xff,0xf7,0xfb,0xff,0xfe,0xee,0xfa,0xff,0xff,0xff,
+0x3d,0x3b,0xff,0xff,0xfe,0xfb,0xf1,0xff,0xbf,0x7b,0xff,0xff,0xef,0xff,0xbf,0xff,
+0xff,0xff,0xff,0xff,0xfe,0xff,0xf7,0xef,0xff,0xfb,0xd0,0xf0,0xdf,0xff,0xff,0xff,
+0xfe,0xf8,0x30,0x00,0x00,0x00,0x00,0x00,0x0b,0x10,0x05,0x01,0x00,0x08,0x00,0x02,
+0x01,0x01,0x00,0x00,0x10,0x01,0xc8,0x08,0x00,0x00,0x00,0x00,0x42,0x02,0x00,0x00,
+0x00,0x80,0x02,0x00,0x00,0x40,0x24,0x80,0x00,0xc1,0xf0,0x3f,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0xf7,0xfd,0xf7,0xfa,0xef,0xee,0xf9,0xfd,0xff,0xf7,0xfe,0xbf,
+0x1f,0xfd,0x9e,0xfd,0xd1,0xef,0xff,0xf7,0x7f,0x9f,0xff,0xef,0xff,0xf6,0xff,0xfe,
+0xfe,0x7b,0xff,0xbd,0xff,0x7e,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xff,0xff,
+0xff,0xf7,0xff,0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xdf,0xfd,0xff,0xff,0xdf,0xff,
+0xff,0x5f,0xf1,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xef,0xff,
+0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x3f,0xfb,0xff,0xff,0xef,0xfb,0xfd,
+0xff,0xf1,0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xf7,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,0xff,0xe7,0xff,
+0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcf,0xff,0xfb,0xff,0xfb,0xf1,
+0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7b,0xff,0xff,0xff,0x7f,0xff,0xf1,0xff,
+0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xef,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x57,0xff,0xfe,0xbf,0xfb,0xf1,0xff,0xff,
+0xfd,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xd7,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdb,0xff,0xdb,0xfd,
+0xf6,0xff,0xf6,0xff,0x3c,0xbc,0xbc,0xbf,0xdf,0x6f,0xef,0x2f,0xf1,0x3c,0xbf,0xbc,
+0xbf,0xdf,0x6f,0xff,0x6f,0xf7,0xdb,0xff,0xdb,0xfd,0xf6,0xff,0xf6,0xff,0xff,0xff,
+0x01,0xe2,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff };
diff --git a/drivers/net/hamradio/yam9600.h b/drivers/net/hamradio/yam9600.h
new file mode 100644
index 000000000000..5ed1fe6ff43e
--- /dev/null
+++ b/drivers/net/hamradio/yam9600.h
@@ -0,0 +1,343 @@
+/*
+ *
+ * File yam111.mcs converted to h format by mcs2h
+ *
+ * (C) F6FBB 1998
+ *
+ * Tue Aug 25 20:23:03 1998
+ *
+ */
+
+static unsigned char bits_9600[]= {
+0xff,0xf2,0x00,0xa5,0xad,0xff,0xfe,0x9f,0xff,0xef,0xfb,0xcb,0xff,0xdb,0xfe,0xf2,
+0xff,0xf6,0xff,0x9c,0xbf,0xfd,0xbf,0xef,0x2e,0x3f,0x6f,0xf1,0xfd,0xb4,0xfd,0xbf,
+0xff,0x6f,0xff,0x6f,0xff,0x0b,0xff,0xdb,0xff,0xf2,0xff,0xf6,0xff,0xff,0xff,0xff,
+0xf0,0x6f,0xff,0xff,0xff,0xfe,0xff,0xfd,0xdf,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,
+0xfb,0xff,0xff,0xf7,0xff,0xff,0xff,0xfe,0xff,0x7f,0xf1,0xff,0xfe,0xff,0xbf,0xbf,
+0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xfe,0xff,0xfe,0xff,0xff,0xff,0xf0,
+0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xf7,
+0xff,0xff,0xf7,0xef,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0x7e,0xff,0xff,
+0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xf0,0xdf,
+0xff,0xff,0xff,0xfe,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xef,0xff,0xf3,0xfb,0xfe,0xff,0xf1,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xdf,0xff,0xf0,0x7f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xdf,0xff,0xff,0xff,0xf7,0xf1,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,0xff,
+0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf5,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,
+0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xef,0xff,0x7f,0xff,0xef,
+0xff,0xef,0xff,0x7f,0xef,0xf1,0xff,0xef,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xff,0xbd,0xff,0xef,0x7f,0xef,0x7f,0xfb,0xdf,0xd3,0x5a,0xfe,0xd7,0xd6,
+0xf7,0x7f,0xbd,0xf1,0xbb,0x5d,0xd6,0xf7,0xfe,0x96,0xff,0xbd,0xaf,0xad,0xbf,0xef,
+0x7f,0x6b,0x7f,0xfb,0xd6,0xfe,0xf7,0xff,0x10,0xef,0xff,0xff,0xff,0xfe,0xbe,0xef,
+0xff,0xff,0xdb,0xff,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xbf,0xff,0x7f,0xff,0x7f,
+0xdf,0xdb,0xf1,0xfd,0x35,0xff,0x6f,0xff,0x6f,0xff,0xdb,0xff,0xcb,0xff,0xf6,0xff,
+0xf2,0xfd,0xfd,0xbf,0xff,0xff,0xff,0xd0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x55,0xff,0xcc,0xc0,0x3f,0xff,
+0xff,0xf1,0x24,0xf0,0xff,0xff,0xcf,0xef,0x3f,0xff,0xf0,0xff,0xff,0xff,0xfc,0x3f,
+0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x55,0xff,0xcc,0xc0,0x3f,0xff,0xff,
+0xf1,0x00,0xf0,0xff,0xff,0xcf,0xdf,0xff,0xff,0xf0,0xff,0xff,0xff,0xfc,0x3f,0xff,
+0xff,0xff,0x7d,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xfe,0x7f,0xdf,0xff,0xff,0xff,0xf1,
+0xff,0xcf,0xff,0xf3,0xff,0x97,0xff,0xff,0x8f,0xe7,0xff,0xff,0xfc,0x71,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xf5,0xff,0xbf,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xf7,0xef,0xff,0xff,0xfc,0x7b,0xff,0xf1,0x3f,
+0xff,0xef,0xff,0xcf,0xe3,0xe3,0xff,0xff,0xff,0xff,0x3f,0xff,0xff,0xff,0xbf,0xff,
+0xbf,0xff,0xda,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf2,0xc0,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,
+0x01,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0x9f,0xff,
+0xff,0xff,0xf7,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xdb,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xfb,0xdf,0xbf,0xf1,0xfe,0xfd,0xf7,0xff,
+0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x77,0xfd,0xf2,
+0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf8,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,
+0x00,0x00,0x00,0x02,0x00,0x90,0x00,0x00,0x00,0x0c,0x01,0x00,0x00,0x04,0x24,0x00,
+0x40,0x01,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x01,0xc0,0xf0,
+0x4f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xbf,0xff,0xff,0x6f,0xff,0xdf,0xff,0xd1,0xff,0xfe,0xff,0xff,0xff,0xff,
+0xff,0xff,0xdf,0xff,0xfb,0xff,0xfb,0xef,0xff,0xff,0xee,0xff,0xff,0x7f,0xf0,0xdf,
+0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x8f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xad,0xff,0x69,0x2a,0xed,0x6b,0xfb,0xdf,0x3a,
+0xdc,0xf4,0x96,0xee,0xb3,0x3d,0x35,0xc1,0xbb,0xdd,0xfe,0xf6,0xfe,0xd6,0xb5,0xad,
+0xbf,0xa5,0xad,0x49,0x2f,0x4f,0x2b,0xda,0x5f,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,
+0xff,0xfe,0xbf,0xff,0xff,0xfb,0x5b,0xf7,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xa5,
+0xf3,0x6f,0xf3,0x6e,0xfa,0x7b,0xd1,0xfd,0xb5,0x77,0x6f,0xe9,0x6f,0xff,0xdb,0xfb,
+0xdb,0xdf,0xf6,0xff,0xf6,0xff,0xfd,0x3f,0xfe,0xf7,0xff,0xd0,0x4f,0xff,0xff,0xff,
+0xfe,0xff,0x9f,0xff,0xff,0x0f,0xff,0xc0,0x3f,0x9c,0x03,0xff,0xff,0x8b,0xa5,0xfe,
+0x80,0x3e,0xc2,0xbf,0xac,0xb1,0x24,0xff,0xff,0xff,0xff,0xff,0xff,0x0f,0xff,0xa3,
+0xff,0xfd,0x6b,0xff,0xff,0xf0,0xa5,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0x0f,0xff,0xc0,0x3f,0xd4,0x6b,0xff,0xff,0xdb,0xff,0xfe,0x86,
+0xbf,0xc2,0xbf,0x30,0xa1,0x24,0xff,0xff,0xff,0xff,0xcc,0xff,0x0f,0xff,0xa3,0xff,
+0x05,0x6b,0xff,0xff,0xf0,0xa5,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xfb,0xc7,0xff,0xc4,0xff,0xff,0x7f,0xff,0xec,0xfe,0x7f,0xdf,0xd8,0xb9,
+0x47,0xfc,0x36,0xc1,0xdf,0xff,0xff,0xf9,0xff,0xf3,0xff,0xf7,0xff,0xfc,0xff,0xfd,
+0x3f,0xff,0xff,0xff,0x3f,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf5,0xff,
+0xff,0xff,0xff,0xfe,0xff,0xff,0x7e,0xbd,0x3f,0xff,0x2b,0xfe,0x2f,0xf5,0xa3,0xfc,
+0x5b,0xfe,0x61,0x9f,0x7f,0xef,0xff,0xff,0xa7,0xfb,0xff,0xff,0xfa,0xfe,0xff,0x33,
+0xf1,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x30,0x24,0x04,
+0x00,0x01,0x00,0x80,0x40,0x00,0x08,0x00,0x00,0x00,0x02,0x01,0x01,0x00,0x02,0x00,
+0x00,0x00,0x00,0x00,0x01,0x3d,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xfd,0xbd,0xff,0xfd,
+0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0x7f,0xf6,0xef,0xbf,0xf7,0xff,0x73,0xeb,
+0xf1,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xf9,0xff,0xfd,0xfe,0xff,0xff,
+0xff,0xff,0xff,0xff,0xd9,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf0,0xbf,0x7f,0xff,0xff,
+0xff,0x7f,0xff,0xff,0xde,0xff,0xff,0xef,0xdd,0xde,0x77,0xf2,0xfb,0xed,0xe7,0xf1,
+0x73,0xfd,0xfd,0xdf,0xff,0x7d,0xbe,0xdf,0xff,0xfb,0xff,0xef,0xff,0xef,0xff,0xff,
+0xff,0xff,0xff,0xd0,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x20,0x02,0x00,0x22,
+0x40,0xc0,0x00,0x00,0x00,0x08,0x00,0x02,0x41,0x02,0x12,0x00,0x21,0x87,0x81,0x00,
+0x00,0x80,0x04,0x0b,0x28,0x01,0xb0,0x00,0x82,0x00,0x40,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0xc1,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,
+0xf7,0xff,0xfe,0x7f,0xed,0x79,0xff,0xde,0xeb,0x7f,0x74,0xf7,0xf7,0xe1,0xf9,0xff,
+0xf6,0x5f,0x7f,0xff,0xff,0xff,0xd7,0xdb,0xef,0xff,0xbb,0xff,0xff,0xff,0xcc,0xff,
+0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x3d,0xcd,0x49,0x7f,0x6f,
+0x2b,0xba,0x5c,0xd2,0xda,0xf6,0xf3,0x3e,0xf7,0xff,0xbd,0xf1,0xfa,0xdf,0xfe,0xf7,
+0xcc,0xf6,0xbb,0xa5,0xb3,0xad,0xbf,0x6f,0x7d,0x6f,0x6b,0xdb,0xdf,0xbd,0xff,0xfe,
+0xb0,0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xfb,0xdb,0x57,0xf6,0xfe,0x9f,0xd5,
+0xb7,0xff,0xaf,0xe5,0x3f,0xff,0xff,0x6f,0xff,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0x69,
+0x6c,0xdf,0xda,0xdf,0xcb,0xff,0xf6,0xff,0x76,0xfd,0xfd,0xbf,0xff,0xff,0xff,0xd0,
+0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xbd,0x08,0x03,0x89,0x4f,0x5a,
+0x0f,0xf0,0xff,0xf8,0xbf,0xff,0xff,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,0xff,0xf3,
+0xfa,0xa0,0xf0,0xf2,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,
+0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xfd,0x00,0x6b,0xff,0xff,0x5a,0x0f,
+0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,0xff,0xb3,0xf5,
+0x50,0xf0,0xf0,0xff,0xff,0xff,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x7f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xbc,0xff,0xe4,0xe7,0x71,0xff,0xf9,0xc4,0xf4,
+0x7f,0x7f,0xcf,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xfb,0xf7,0x73,0xbf,0x14,
+0xff,0xe6,0xff,0xff,0xe1,0x7d,0xff,0xff,0xe7,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,
+0xff,0xfe,0xf5,0xff,0xff,0xfe,0xd2,0xfa,0xff,0xc4,0xf4,0x5c,0xbf,0xfa,0xff,0xff,
+0xec,0x7e,0xbf,0xff,0xff,0xff,0xf1,0xff,0xff,0xef,0xff,0xff,0x6b,0xdb,0xff,0xdf,
+0xf9,0xfb,0xbf,0xff,0xf1,0xff,0xbf,0xff,0xff,0xff,0xfb,0xf0,0xbf,0xff,0xff,0xff,
+0xfe,0xf3,0xc0,0x00,0x02,0x00,0x00,0x00,0x00,0x82,0x00,0x00,0x00,0x00,0x80,0x00,
+0x00,0x00,0x00,0x40,0x00,0x01,0x00,0x00,0x00,0x01,0x08,0x20,0x00,0x00,0x00,0x00,
+0x01,0x00,0x01,0x00,0x00,0x80,0x02,0x00,0x01,0x3c,0xf0,0x5f,0xff,0xff,0xff,0xfe,
+0xfd,0xbf,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0x7f,0xff,0xdf,0xff,0xef,0xff,
+0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,
+0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xc3,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf0,
+0xff,0xdf,0xff,0xff,0xf7,0x23,0xff,0xff,0xfd,0xff,0xef,0xff,0xfe,0x7f,0x7d,0xf7,
+0xfe,0xff,0x7f,0x71,0xff,0xfb,0x7f,0xff,0xff,0xff,0x6e,0xfd,0xf7,0xfd,0xff,0xbf,
+0xff,0xbf,0xf9,0xfd,0xff,0xdf,0xef,0xf0,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf8,0x30,
+0x40,0x01,0x00,0x83,0x00,0x00,0x00,0x0c,0x06,0x08,0x04,0x26,0x26,0x00,0x00,0x06,
+0x03,0x00,0x01,0x00,0x00,0x00,0x00,0x04,0x00,0x70,0x08,0x80,0x00,0x20,0x01,0x20,
+0x00,0x02,0x00,0x30,0x00,0x00,0xc1,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
+0xff,0xff,0x7b,0x3f,0xf7,0xff,0xd7,0xfe,0xfe,0xfb,0xfe,0x3b,0xfe,0xbd,0xff,0x2f,
+0xff,0x71,0xff,0xfb,0x7f,0xe7,0xff,0xf9,0xef,0xff,0xd7,0xfa,0xff,0xb7,0xbb,0xfe,
+0xff,0xff,0x74,0xff,0xf7,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xb5,
+0xbd,0x6f,0x7c,0xeb,0x7f,0xfb,0xdb,0xd3,0x4b,0xee,0xd6,0xf6,0xb7,0xfd,0xac,0xa1,
+0xfb,0xdf,0xfe,0xf7,0xf4,0x96,0xbd,0xb4,0xc5,0xa5,0xaf,0x6f,0x69,0x4f,0x7f,0xba,
+0xdb,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,
+0xf6,0xff,0xf6,0xff,0xbd,0xbf,0xa5,0xbf,0xff,0x7d,0x7f,0xef,0xff,0xfb,0xf1,0xfd,
+0xbf,0xff,0x6f,0xff,0x6b,0x7a,0xdb,0xff,0xdb,0xdf,0xf6,0xfe,0xb6,0xfd,0xfd,0xbf,
+0xfe,0xf7,0xff,0xd0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf4,0x2f,0xff,
+0xfc,0x43,0x6b,0xff,0xff,0xff,0x0d,0xff,0xfc,0x33,0x3f,0xf0,0x5f,0xf1,0xff,0xff,
+0xff,0xff,0xf9,0xde,0xf0,0x4c,0xfe,0x77,0xaf,0xff,0xff,0xef,0xff,0xf0,0xff,0xdb,
+0xff,0x5f,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xfe,0xf7,0xff,0xf0,0x2f,0xff,0xfd,
+0x43,0x7f,0xff,0xff,0xf1,0x0f,0xff,0xfc,0x33,0x3f,0xff,0xaf,0xf1,0xff,0xff,0xff,
+0xff,0xf6,0xd7,0xff,0xbc,0xfd,0xbd,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,
+0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xfb,0xf1,
+0xbf,0xff,0xf9,0xfd,0xcf,0xf2,0x70,0xff,0x1f,0x9f,0xf3,0xf1,0xff,0xff,0xff,0xff,
+0xfc,0xf7,0xff,0x13,0x9f,0xfc,0xff,0xff,0x84,0xf7,0xff,0xff,0x47,0xff,0xff,0xff,
+0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xf1,0xfc,0xff,0xfe,0xfe,0x79,
+0x3f,0xff,0x1d,0x46,0xcf,0xff,0xcf,0xfc,0x7b,0xff,0xf1,0xff,0xff,0xff,0xff,0xed,
+0xf3,0xab,0xff,0xcb,0xff,0xf8,0xff,0xfc,0xf5,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,
+0x8f,0xff,0xff,0xff,0xfe,0xf3,0xc2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,
+0x00,0x00,0x20,0x00,0x20,0x00,0x00,0x04,0x08,0x01,0x00,0x00,0x00,0x00,0x00,0x20,
+0x0c,0x00,0x00,0x04,0x01,0x00,0x01,0x00,0x00,0x80,0x00,0x00,0x01,0x3c,0xf0,0x7f,
+0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,
+0xdf,0xff,0xff,0xf7,0xff,0xff,0xff,0xef,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,
+0xff,0xdf,0xff,0xff,0xfb,0xf7,0x7f,0xff,0xfe,0xff,0xff,0xbf,0xdb,0xf0,0xff,0xff,
+0xff,0xff,0xfe,0xf0,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0x7f,0xf7,0xff,
+0xbf,0xbf,0xcf,0xff,0xff,0xff,0x3e,0xf1,0x7f,0xff,0xff,0xef,0xff,0xff,0xff,0xfe,
+0xff,0xfd,0xff,0xbf,0xbd,0xfe,0xff,0xfb,0xf7,0xdf,0xfb,0xd0,0xf0,0x9f,0xff,0xff,
+0xff,0xfe,0xf8,0x30,0x20,0x00,0x40,0x01,0x80,0xc0,0x30,0x00,0x00,0x20,0x00,0x10,
+0x50,0x88,0x20,0x00,0x00,0x13,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,
+0x00,0x00,0x01,0x80,0x08,0x00,0x00,0xa0,0x00,0x10,0xc1,0xf0,0xef,0xff,0xff,0xff,
+0xfe,0xfd,0xef,0x7f,0xff,0xff,0xbf,0xff,0xf7,0xff,0xef,0xfb,0xfd,0x77,0xef,0xbf,
+0xf7,0x7f,0xff,0xff,0xbf,0xd1,0x7f,0xff,0xff,0xf7,0xff,0xff,0xff,0xff,0xaf,0xff,
+0xdf,0xf7,0xfb,0xff,0xfd,0xff,0xfc,0xff,0xfd,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,
+0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0x3f,0xff,0xff,0xff,0xfe,0xdd,0xff,
+0xff,0xff,0xa5,0xfd,0x6f,0x7d,0x6d,0x7f,0x52,0xdf,0x5a,0x4b,0xee,0xb6,0xee,0xf2,
+0xbb,0xac,0xa1,0x5b,0x4d,0xd6,0xf7,0xfe,0xb2,0xbd,0x35,0xb5,0xb5,0xdd,0x6f,0x7f,
+0xe9,0x5f,0x52,0xdf,0xbd,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,
+0xff,0xdb,0xfe,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xb5,0xbf,0xf9,0x7f,0x6f,0xff,
+0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,0x69,0x7f,0xdb,0xff,0xd3,0xff,0xf6,0xfe,0xf2,
+0xff,0xad,0xbf,0xff,0xff,0xff,0xd0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,
+0x30,0x0f,0xff,0xff,0xfd,0x6b,0xca,0xff,0xf0,0x0f,0xd6,0xbf,0xcf,0x3f,0xff,0xff,
+0xf1,0xff,0xff,0xff,0xca,0xfe,0xbf,0xff,0xf0,0x05,0xaf,0x0f,0xff,0xfc,0xf0,0xcf,
+0xf0,0xff,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0x30,
+0x0f,0xff,0xff,0xfc,0x3f,0xca,0xff,0x0f,0x0f,0xd6,0xbf,0xff,0xff,0xf5,0x5f,0xf1,
+0xff,0x8b,0xff,0xc3,0xff,0xff,0xff,0xff,0xff,0xff,0x0f,0xff,0xfc,0xf0,0xcf,0xf0,
+0xff,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xcf,0xff,
+0xff,0xbf,0x9f,0x3f,0xfe,0xfc,0xff,0x4f,0xff,0xff,0xff,0xff,0xff,0xf7,0xf1,0xff,
+0xdf,0xfe,0x7e,0x3f,0x9f,0xf4,0xfc,0x7f,0xfc,0xff,0xff,0x3f,0xff,0x3f,0xfe,0x3f,
+0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xfb,0xff,0xfe,0xff,
+0xff,0xff,0xff,0xbf,0xfb,0xff,0xf8,0xed,0xff,0x8f,0xff,0xbb,0xff,0xb1,0xf3,0xef,
+0x8f,0xf7,0xff,0xff,0xdb,0xff,0xff,0xff,0xef,0xbf,0xfd,0x79,0xbf,0xbf,0xff,0xff,
+0xff,0xfb,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x04,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x04,0x08,0x08,0x01,0x01,0x00,0x90,
+0x00,0x00,0x00,0x04,0x00,0x08,0x00,0x00,0x00,0x00,0x08,0x00,0x04,0x00,0x00,0x01,
+0x3c,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0x9f,0xff,0xaf,0xdf,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,
+0xbf,0xef,0xff,0xff,0xff,0xed,0xff,0xff,0xff,0xef,0xff,0xbf,0xff,0xff,0xff,0xc3,
+0xf0,0x3f,0xff,0xff,0xff,0xfe,0xf0,0xff,0xfd,0xff,0xff,0xff,0xfb,0xff,0xbb,0xff,
+0xff,0xff,0x7f,0xf6,0xff,0x7f,0xfb,0xfd,0xed,0xff,0xf1,0xff,0xfe,0x7f,0xff,0xff,
+0xff,0x5f,0xff,0xf7,0xff,0x7e,0xff,0xfd,0xff,0xef,0xff,0xff,0xff,0xef,0xf0,0xf0,
+0x8f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x80,0x00,0x04,0x00,0x00,0x40,0x02,0x00,0x03,
+0x00,0x05,0x04,0x20,0x00,0x00,0x01,0xd0,0x00,0x81,0x00,0x20,0x04,0x04,0x00,0x00,
+0x81,0x04,0x08,0x80,0x10,0x00,0xc0,0x00,0x00,0x00,0x20,0x00,0x08,0xc1,0xf0,0x6f,
+0xff,0xff,0xff,0xfe,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xf3,0xfd,0xff,0xed,0xfc,
+0xff,0xff,0x9f,0xfb,0xfd,0xff,0xff,0xff,0xf1,0xff,0xff,0x7f,0xfb,0x3e,0xff,0x9f,
+0xff,0xff,0xff,0xff,0xfd,0xf9,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xf0,0x6f,0xff,
+0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,
+0xff,0xfe,0xff,0xff,0xff,0xfd,0xbd,0xff,0xef,0x7c,0xeb,0x7f,0xfb,0xdb,0xfa,0xdc,
+0xee,0xf7,0xf6,0xd7,0xf5,0x2d,0xa1,0xbb,0xdd,0xee,0xf7,0x54,0xf7,0xfb,0x2c,0xb5,
+0xb4,0xbd,0x6b,0x6f,0xef,0x6f,0xbb,0xdf,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,
+0xfe,0xbf,0xff,0xff,0xff,0xfb,0xff,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xff,0xbf,0xef,
+0x6f,0xff,0x6f,0xfa,0xdb,0xf1,0xc5,0xbd,0xf5,0x6f,0xff,0x6f,0xca,0xdb,0xff,0xdb,
+0xfb,0xf6,0x97,0xf6,0xff,0xfd,0xbf,0xfe,0xf7,0xff,0xd0,0x9f,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0x7f,0xff,0xff,0xe7,0x63,0xff,0xff,
+0xff,0xfc,0x77,0xdf,0xf1,0xdb,0xff,0xd6,0xa8,0x3f,0xff,0xff,0x08,0x2f,0xf0,0xff,
+0xc3,0xff,0xeb,0xff,0xff,0xff,0xff,0xff,0x5f,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xfc,0xff,0xcf,0xf1,0xdb,0xff,0xd6,0xa8,0x3f,0xff,0xff,0x08,0x2f,0xf0,0xff,0xc3,
+0xff,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xbf,0xff,0xca,0xff,0x9f,0xff,0xfa,0xb9,0xe7,
+0x9f,0xf3,0x81,0xff,0xff,0xfc,0x73,0xd7,0xff,0xff,0x77,0xff,0xfd,0xff,0xfc,0xff,
+0xff,0xff,0xff,0xcf,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,
+0xff,0xf7,0xde,0xff,0xfe,0x7e,0xff,0xbf,0xff,0xbf,0xf1,0xb3,0xff,0xff,0xe3,0xfb,
+0xff,0xe1,0x1f,0x7f,0xff,0xf8,0x78,0xff,0xfb,0x1e,0xff,0xf7,0xfe,0xe7,0xff,0xff,
+0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x04,0x00,
+0x01,0x80,0x40,0x40,0x20,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
+0x80,0x00,0x00,0x01,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xfb,0xff,
+0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xf7,0xf1,
+0xfd,0xff,0xff,0xff,0xdf,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,
+0xff,0xff,0xff,0xdb,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xf0,0xff,0xdf,0xff,0xff,0x7f,
+0xff,0xff,0xff,0xbe,0xd7,0xff,0xed,0xbd,0x7e,0xbf,0xfe,0xf6,0x7f,0xbf,0x71,0xff,
+0xff,0xda,0xff,0xf9,0xff,0xbf,0x7f,0xfe,0xff,0x6f,0x7f,0xff,0xff,0xff,0xff,0xff,
+0x7f,0xff,0xd0,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x42,0x00,0x00,0x00,0x00,
+0x80,0xc1,0x00,0x00,0x90,0x00,0xc4,0x00,0x00,0x12,0x20,0x43,0x22,0x81,0x84,0x00,
+0x00,0x14,0x00,0x01,0x00,0x08,0x80,0x00,0x02,0x00,0x02,0x00,0x04,0x02,0x00,0x00,
+0x10,0xc1,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xdd,0xfe,0xff,
+0xb6,0x76,0xe5,0xbc,0xf9,0xf7,0xaf,0x5f,0xbf,0xfc,0xdf,0xcf,0xf1,0xff,0xef,0x79,
+0xff,0xbd,0xff,0xef,0xff,0xff,0xf7,0x6f,0x5f,0xff,0xff,0xfd,0xef,0xef,0xbf,0xff,
+0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xf0,0xff,0xff,0xff,0xff,0xfe,0xdb,0xff,0xff,0xfd,0x2d,0xff,0x69,0x2a,0xef,0x77,
+0xbb,0xdd,0x5a,0xdf,0xf6,0xf6,0xd6,0xf7,0x7d,0xbd,0xd1,0xb2,0x4a,0xd6,0xb2,0xbe,
+0x97,0xf5,0xbd,0xb3,0xad,0xff,0xef,0x7f,0x69,0x6b,0xfb,0xdf,0xff,0xff,0xff,0xf0,
+0x2f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xfe,0x9f,0xd4,0xbf,
+0xed,0xaf,0xff,0x6b,0x6f,0xf7,0xff,0xdd,0xdb,0x31,0xfd,0xbf,0xff,0x6f,0x7f,0xff,
+0xff,0xdb,0xff,0xcb,0xdf,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfe,0xf7,0xff,0xd0,0x8f,
+0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x1f,0xff,0x46,0x2f,0x9f,0xff,0xff,0xff,
+0xa5,0xff,0xff,0xff,0xdf,0xb7,0xff,0xff,0xf1,0xff,0xff,0xff,0xf7,0xe9,0x6a,0xbf,
+0xff,0xff,0xfd,0xff,0xff,0xfd,0x55,0x57,0xff,0xff,0xff,0xff,0xaf,0xf0,0x4f,0xff,
+0xff,0xff,0xfe,0xfe,0xdf,0xff,0xfd,0x1f,0xff,0x46,0x2f,0x9f,0xff,0xff,0xff,0xa5,
+0xff,0xff,0xff,0xc0,0x37,0xff,0xff,0xf1,0x99,0x8e,0xdc,0x7f,0xe9,0x6a,0xbf,0xff,
+0xf0,0x0f,0xff,0xff,0xfd,0x55,0x57,0xff,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,0xff,
+0xff,0xfe,0xff,0xff,0xff,0xff,0x07,0xff,0xc0,0xbe,0xff,0xff,0xcf,0xef,0x9f,0xff,
+0xff,0xfb,0xff,0xe7,0xff,0xff,0xa1,0xe3,0xce,0x3c,0x58,0x3f,0xf3,0xff,0xfd,0xef,
+0xf9,0xff,0xff,0xf7,0xf1,0x7f,0xff,0xcb,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,
+0xfe,0xf5,0x7f,0xff,0xf0,0xff,0xfe,0xff,0xc4,0x75,0xe7,0xb9,0xff,0xff,0xff,0xef,
+0xff,0xc7,0x37,0x3b,0xff,0xf0,0x13,0x9e,0x0f,0xf4,0xff,0xfe,0xfb,0xff,0xff,0xf9,
+0xfc,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0xef,0xff,0xff,0xff,0xfe,
+0xf3,0xc0,0x01,0x00,0x00,0x02,0x00,0x02,0x22,0x00,0x00,0xc0,0x40,0x00,0x40,0x00,
+0x04,0x08,0x04,0x0a,0x01,0x01,0x10,0x20,0x20,0x00,0x00,0x04,0x08,0x08,0x04,0x00,
+0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x01,0x3c,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xfd,
+0x3f,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,0x7f,0xff,0x7f,0xff,0xcf,0x9d,0xff,
+0xff,0xf7,0xfd,0xf1,0xff,0xff,0xff,0xee,0xbf,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xff,
+0xff,0xff,0xf7,0xf7,0xff,0xff,0xfe,0xbf,0xf7,0xff,0xff,0x5b,0xff,0xbf,0xf7,0xff,
+0xfd,0x7f,0x71,0xfd,0xff,0xed,0xf7,0xfe,0xef,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,
+0xff,0xff,0xef,0xff,0x7f,0xff,0xd0,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf8,0x30,0x11,
+0x00,0x48,0x60,0x40,0x82,0x60,0x24,0x60,0x00,0xcc,0x00,0x80,0x04,0x01,0x00,0x00,
+0x14,0x01,0x0c,0x04,0x00,0x30,0x00,0x00,0x00,0x08,0x08,0x00,0x01,0x00,0xc2,0x00,
+0x00,0x02,0x00,0x80,0x00,0xc1,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
+0xf7,0x7b,0xff,0xf3,0xeb,0xbf,0xff,0xf7,0xff,0xff,0xff,0xe7,0x5d,0x3f,0xff,0xf6,
+0xd1,0xfd,0xff,0xeb,0xf7,0x3d,0xff,0xff,0xff,0x5f,0xff,0x7f,0x7f,0xf3,0xff,0xff,
+0xef,0xfd,0xbf,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xb5,0xdf,
+0x6f,0x7d,0x69,0x7f,0xfb,0xdf,0x52,0x5f,0xf6,0xf7,0xfe,0xf6,0xf3,0xbd,0xb1,0xda,
+0xcd,0xfe,0xf6,0xee,0xd2,0xbd,0xa5,0xaf,0xbd,0xff,0x6f,0x7c,0xeb,0x2b,0xfa,0xda,
+0xff,0xfe,0xdf,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,
+0xff,0xf6,0xff,0xbd,0xbf,0xcd,0xbf,0xeb,0x6f,0xf7,0x6f,0xdf,0xdb,0x51,0xfd,0xbd,
+0xff,0x6f,0xff,0x6f,0xfb,0x5b,0xff,0xdb,0xff,0xf6,0xfe,0xf6,0xfd,0xfd,0xbf,0xfe,
+0xf7,0xff,0xd0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfa,0x50,0xff,0xff,0xff,
+0xf0,0x6f,0xff,0xff,0xf0,0x96,0xff,0xff,0xc6,0x2b,0xff,0xff,0xf1,0xfc,0xff,0xff,
+0xf7,0xdb,0xc3,0xff,0x00,0xff,0xff,0xff,0xff,0xff,0xc1,0x4f,0xc3,0xff,0xff,0xff,
+0xaf,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xa0,0xff,0xff,0xff,0xf0,
+0x6f,0xff,0xff,0xf0,0x96,0xff,0xff,0xc6,0x2b,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,
+0xf3,0xc3,0xff,0x00,0xff,0xff,0xff,0xff,0xff,0xc1,0x4f,0xc3,0xff,0xff,0xff,0xff,
+0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0x9f,0xf0,0x7f,
+0xff,0xf9,0xfc,0x4f,0xf3,0xff,0x27,0xeb,0xff,0xfc,0x81,0xfc,0x7f,0xfe,0x7b,0xff,
+0xf7,0xff,0x12,0x7f,0xff,0xff,0xff,0xff,0x18,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,
+0x7f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xdf,0xfe,0xff,0xfc,0x7e,0x7f,0xbf,
+0xff,0xff,0xaf,0xef,0xff,0xdf,0xdf,0xfb,0xff,0xf1,0xc3,0xfe,0x6f,0xf1,0xcf,0x3f,
+0xfb,0xff,0xff,0xcf,0xfe,0xff,0xff,0xfe,0x7f,0xbf,0xff,0xff,0xbf,0xfa,0xf0,0xdf,
+0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x01,0x00,0x00,
+0x20,0x00,0x01,0x00,0x10,0x00,0x00,0x00,0x01,0x00,0x02,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x02,0x00,0x00,0x80,0x00,0x02,0x80,0x00,0x02,0x3c,0xf0,0x2f,0xff,
+0xff,0xff,0xfe,0xfd,0xbf,0xff,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xf1,0xff,0x7f,0xff,0xff,0xff,0xff,0xef,0xff,
+0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xf0,0x2f,0xff,0xff,
+0xff,0xfe,0xf0,0xff,0xff,0xff,0xfb,0xff,0xbf,0xff,0xff,0xff,0xff,0xf7,0xbf,0xfb,
+0xff,0xff,0xff,0xdf,0xf7,0xff,0xf1,0xf7,0xbf,0xfb,0xff,0xff,0xff,0x7f,0xde,0xff,
+0xff,0xff,0xff,0xff,0xff,0xed,0xf7,0xff,0xff,0x7f,0xd0,0xf0,0x3f,0xff,0xff,0xff,
+0xfe,0xf8,0x30,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0xe0,0x00,0x00,0x80,
+0x20,0x01,0x01,0x92,0x00,0x01,0x01,0x00,0xe0,0x1c,0x60,0x20,0x30,0x08,0x08,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0xc1,0xf0,0x6f,0xff,0xff,0xff,0xfe,
+0xff,0xff,0xff,0xff,0xff,0xdb,0xfe,0xff,0xff,0xdf,0xff,0xfc,0x7f,0xfb,0xbf,0xff,
+0xff,0xff,0xff,0xff,0xf1,0xf6,0xff,0xf7,0x7e,0x3f,0xff,0x7f,0xff,0xff,0xff,0xf7,
+0xff,0xff,0xff,0xed,0xff,0xdf,0xff,0xb7,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,
+0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xbf,0xff,0xdf,
+0x57,0xef,0xf1,0xfd,0xfe,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xfb,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
+0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xdf,0xff,
+0xff,0xf1,0xfd,0xff,0x7f,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xf7,0xfd,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xff,0xff,
+0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
+0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0x6f,0xff,0xfe,0xbf,0xff,0xf1,0xff,
+0xf7,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,
+0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0x57,0xff,0xfd,0xbf,0xff,0xf1,0xff,0xef,
+0xfe,0xff,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,
+0xde,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdb,0xff,0xdb,0xfd,
+0xf6,0xff,0xf6,0xff,0x3c,0xbc,0xbc,0xbf,0xdf,0x6f,0xe7,0x2f,0xf1,0x3c,0xbf,0xfd,
+0xbf,0xdf,0x6f,0xff,0x6f,0xf7,0xdb,0xff,0xdb,0xfd,0xf6,0xff,0xf6,0xff,0xff,0xff,
+0x02,0x01,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+0xff,0xff,0xff,0xff,0xff,0xff };
diff --git a/drivers/net/hamradio/z8530.h b/drivers/net/hamradio/z8530.h
new file mode 100644
index 000000000000..8bef548572aa
--- /dev/null
+++ b/drivers/net/hamradio/z8530.h
@@ -0,0 +1,245 @@
+
+/* 8530 Serial Communications Controller Register definitions */
+#define FLAG 0x7e
+
+/* Write Register 0 */
+#define R0 0 /* Register selects */
+#define R1 1
+#define R2 2
+#define R3 3
+#define R4 4
+#define R5 5
+#define R6 6
+#define R7 7
+#define R8 8
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+
+#define NULLCODE 0 /* Null Code */
+#define POINT_HIGH 0x8 /* Select upper half of registers */
+#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
+#define SEND_ABORT 0x18 /* HDLC Abort */
+#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
+#define RES_Tx_P 0x28 /* Reset TxINT Pending */
+#define ERR_RES 0x30 /* Error Reset */
+#define RES_H_IUS 0x38 /* Reset highest IUS */
+
+#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
+#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
+#define RES_EOM_L 0xC0 /* Reset EOM latch */
+
+/* Write Register 1 */
+
+#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
+#define TxINT_ENAB 0x2 /* Tx Int Enable */
+#define PAR_SPEC 0x4 /* Parity is special condition */
+
+#define RxINT_DISAB 0 /* Rx Int Disable */
+#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
+#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
+#define INT_ERR_Rx 0x18 /* Int on error only */
+
+#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
+#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
+#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
+
+/* Write Register #2 (Interrupt Vector) */
+
+/* Write Register 3 */
+
+#define RxENABLE 0x1 /* Rx Enable */
+#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
+#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
+#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
+#define ENT_HM 0x10 /* Enter Hunt Mode */
+#define AUTO_ENAB 0x20 /* Auto Enables */
+#define Rx5 0x0 /* Rx 5 Bits/Character */
+#define Rx7 0x40 /* Rx 7 Bits/Character */
+#define Rx6 0x80 /* Rx 6 Bits/Character */
+#define Rx8 0xc0 /* Rx 8 Bits/Character */
+
+/* Write Register 4 */
+
+#define PAR_ENA 0x1 /* Parity Enable */
+#define PAR_EVEN 0x2 /* Parity Even/Odd* */
+
+#define SYNC_ENAB 0 /* Sync Modes Enable */
+#define SB1 0x4 /* 1 stop bit/char */
+#define SB15 0x8 /* 1.5 stop bits/char */
+#define SB2 0xc /* 2 stop bits/char */
+
+#define MONSYNC 0 /* 8 Bit Sync character */
+#define BISYNC 0x10 /* 16 bit sync character */
+#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC 0x30 /* External Sync Mode */
+
+#define X1CLK 0x0 /* x1 clock mode */
+#define X16CLK 0x40 /* x16 clock mode */
+#define X32CLK 0x80 /* x32 clock mode */
+#define X64CLK 0xC0 /* x64 clock mode */
+
+/* Write Register 5 */
+
+#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
+#define RTS 0x2 /* RTS */
+#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
+#define TxENAB 0x8 /* Tx Enable */
+#define SND_BRK 0x10 /* Send Break */
+#define Tx5 0x0 /* Tx 5 bits (or less)/character */
+#define Tx7 0x20 /* Tx 7 bits/character */
+#define Tx6 0x40 /* Tx 6 bits/character */
+#define Tx8 0x60 /* Tx 8 bits/character */
+#define DTR 0x80 /* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 8 (transmit buffer) */
+
+/* Write Register 9 (Master interrupt control) */
+#define VIS 1 /* Vector Includes Status */
+#define NV 2 /* No Vector */
+#define DLC 4 /* Disable Lower Chain */
+#define MIE 8 /* Master Interrupt Enable */
+#define STATHI 0x10 /* Status high */
+#define NORESET 0 /* No reset on write to R9 */
+#define CHRB 0x40 /* Reset channel B */
+#define CHRA 0x80 /* Reset channel A */
+#define FHWRES 0xc0 /* Force hardware reset */
+
+/* Write Register 10 (misc control bits) */
+#define BIT6 1 /* 6 bit/8bit sync */
+#define LOOPMODE 2 /* SDLC Loop mode */
+#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE 8 /* Mark/flag on idle */
+#define GAOP 0x10 /* Go active on poll */
+#define NRZ 0 /* NRZ mode */
+#define NRZI 0x20 /* NRZI mode */
+#define FM1 0x40 /* FM1 (transition = 1) */
+#define FM0 0x60 /* FM0 (transition = 0) */
+#define CRCPS 0x80 /* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode control) */
+#define TRxCXT 0 /* TRxC = Xtal output */
+#define TRxCTC 1 /* TRxC = Transmit clock */
+#define TRxCBR 2 /* TRxC = BR Generator Output */
+#define TRxCDP 3 /* TRxC = DPLL output */
+#define TRxCOI 4 /* TRxC O/I */
+#define TCRTxCP 0 /* Transmit clock = RTxC pin */
+#define TCTRxCP 8 /* Transmit clock = TRxC pin */
+#define TCBR 0x10 /* Transmit clock = BR Generator output */
+#define TCDPLL 0x18 /* Transmit clock = DPLL output */
+#define RCRTxCP 0 /* Receive clock = RTxC pin */
+#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
+#define RCBR 0x40 /* Receive clock = BR Generator output */
+#define RCDPLL 0x60 /* Receive clock = DPLL output */
+#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (lower byte of baud rate generator time constant) */
+
+/* Write Register 13 (upper byte of baud rate generator time constant) */
+
+/* Write Register 14 (Misc control bits) */
+#define BRENABL 1 /* Baud rate generator enable */
+#define BRSRC 2 /* Baud rate generator source */
+#define DTRREQ 4 /* DTR/Request function */
+#define AUTOECHO 8 /* Auto Echo */
+#define LOOPBAK 0x10 /* Local loopback */
+#define SEARCH 0x20 /* Enter search mode */
+#define RMC 0x40 /* Reset missing clock */
+#define DISDPLL 0x60 /* Disable DPLL */
+#define SSBR 0x80 /* Set DPLL source = BR generator */
+#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
+#define SFMM 0xc0 /* Set FM mode */
+#define SNRZI 0xe0 /* Set NRZI mode */
+
+/* Write Register 15 (external/status interrupt control) */
+#define ZCIE 2 /* Zero count IE */
+#define DCDIE 8 /* DCD IE */
+#define SYNCIE 0x10 /* Sync/hunt IE */
+#define CTSIE 0x20 /* CTS IE */
+#define TxUIE 0x40 /* Tx Underrun/EOM IE */
+#define BRKIE 0x80 /* Break/Abort IE */
+
+
+/* Read Register 0 */
+#define Rx_CH_AV 0x1 /* Rx Character Available */
+#define ZCOUNT 0x2 /* Zero count */
+#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
+#define DCD 0x8 /* DCD */
+#define SYNC_HUNT 0x10 /* Sync/hunt */
+#define CTS 0x20 /* CTS */
+#define TxEOM 0x40 /* Tx underrun */
+#define BRK_ABRT 0x80 /* Break/Abort */
+
+/* Read Register 1 */
+#define ALL_SNT 0x1 /* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3 0x8 /* 0/3 */
+#define RES4 0x4 /* 0/4 */
+#define RES5 0xc /* 0/5 */
+#define RES6 0x2 /* 0/6 */
+#define RES7 0xa /* 0/7 */
+#define RES8 0x6 /* 0/8 */
+#define RES18 0xe /* 1/8 */
+#define RES28 0x0 /* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR 0x10 /* Parity error */
+#define Rx_OVR 0x20 /* Rx Overrun Error */
+#define CRC_ERR 0x40 /* CRC/Framing Error */
+#define END_FR 0x80 /* End of Frame (SDLC) */
+
+/* Read Register 2 (channel b only) - Interrupt vector */
+
+/* Read Register 3 (interrupt pending register) ch a only */
+#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
+#define CHBTxIP 0x2 /* Channel B Tx IP */
+#define CHBRxIP 0x4 /* Channel B Rx IP */
+#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
+#define CHATxIP 0x10 /* Channel A Tx IP */
+#define CHARxIP 0x20 /* Channel A Rx IP */
+
+/* Read Register 8 (receive data register) */
+
+/* Read Register 10 (misc status bits) */
+#define ONLOOP 2 /* On loop */
+#define LOOPSEND 0x10 /* Loop sending */
+#define CLK2MIS 0x40 /* Two clocks missing */
+#define CLK1MIS 0x80 /* One clock missing */
+
+/* Read Register 12 (lower byte of baud rate generator constant) */
+
+/* Read Register 13 (upper byte of baud rate generator constant) */
+
+/* Read Register 15 (value of WR 15) */
+
+/* Z85C30/Z85230 Enhanced SCC register definitions */
+
+/* Write Register 7' (SDLC/HDLC Programmable Enhancements) */
+#define AUTOTXF 0x01 /* Auto Tx Flag */
+#define AUTOEOM 0x02 /* Auto EOM Latch Reset */
+#define AUTORTS 0x04 /* Auto RTS */
+#define TXDNRZI 0x08 /* TxD Pulled High in SDLC NRZI mode */
+#define RXFIFOH 0x08 /* Z85230: Int on RX FIFO half full */
+#define FASTDTR 0x10 /* Fast DTR/REQ Mode */
+#define CRCCBCR 0x20 /* CRC Check Bytes Completely Received */
+#define TXFIFOE 0x20 /* Z85230: Int on TX FIFO completely empty */
+#define EXTRDEN 0x40 /* Extended Read Enabled */
+
+/* Write Register 15 (external/status interrupt control) */
+#define SHDLCE 1 /* SDLC/HDLC Enhancements Enable */
+#define FIFOE 4 /* FIFO Enable */
+
+/* Read Register 6 (frame status FIFO) */
+#define BCLSB 0xff /* LSB of 14 bits count */
+
+/* Read Register 7 (frame status FIFO) */
+#define BCMSB 0x3f /* MSB of 14 bits count */
+#define FDA 0x40 /* FIFO Data Available Status */
+#define FOS 0x80 /* FIFO Overflow Status */
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
new file mode 100644
index 000000000000..4834314b676d
--- /dev/null
+++ b/drivers/net/hp-plus.c
@@ -0,0 +1,495 @@
+/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
+ These cards are sold under several model numbers, usually 2724*.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ As is often the case, a great deal of credit is owed to Russ Nelson.
+ The Crynwr packet driver was my primary source of HP-specific
+ programming information.
+*/
+
+static const char version[] =
+"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/string.h> /* Important -- this inlines word moves. */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+#define DRV_NAME "hp-plus"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hpplus_portlist[] __initdata =
+{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
+
+/*
+ The HP EtherTwist chip implementation is a fairly routine DP8390
+ implementation. It allows both shared memory and programmed-I/O buffer
+ access, using a custom interface for both. The programmed-I/O mode is
+ entirely implemented in the HP EtherTwist chip, bypassing the problem
+ ridden built-in 8390 facilities used on NE2000 designs. The shared
+ memory mode is likewise special, with an offset register used to make
+ packets appear at the shared memory base. Both modes use a base and bounds
+ page register to hide the Rx ring buffer wrap -- a packet that spans the
+ end of physical buffer memory appears continuous to the driver. (c.f. the
+ 3c503 and Cabletron E2100)
+
+ A special note: the internal buffer of the board is only 8 bits wide.
+ This lays several nasty traps for the unaware:
+ - the 8390 must be programmed for byte-wide operations
+ - all I/O and memory operations must work on whole words (the access
+ latches are serially preloaded and have no byte-swapping ability).
+
+ This board is laid out in I/O space much like the earlier HP boards:
+ the first 16 locations are for the board registers, and the second 16 are
+ for the 8390. The board is easy to identify, with both a dedicated 16 bit
+ ID register and a constant 0x530* value in the upper bits of the paging
+ register.
+*/
+
+#define HP_ID 0x00 /* ID register, always 0x4850. */
+#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
+#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
+#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
+#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
+#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
+#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
+#define HP_IO_EXTENT 32
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* The register set selected in HP_PAGING. */
+enum PageName {
+ Perf_Page = 0, /* Normal operation. */
+ MAC_Page = 1, /* The ethernet address (+checksum). */
+ HW_Page = 2, /* EEPROM-loaded hardware parameters. */
+ LAN_Page = 4, /* Transceiver selection, testing, etc. */
+ ID_Page = 6 };
+
+/* The bit definitions for the HPP_OPTION register. */
+enum HP_Option {
+ NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
+ EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
+ MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
+
+static int hpp_probe1(struct net_device *dev, int ioaddr);
+
+static void hpp_reset_8390(struct net_device *dev);
+static int hpp_open(struct net_device *dev);
+static int hpp_close(struct net_device *dev);
+static void hpp_mem_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_mem_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hpp_io_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_io_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* Probe a list of addresses for an HP LAN+ adaptor.
+ This routine is almost boilerplate. */
+
+static int __init do_hpp_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ SET_MODULE_OWNER(dev);
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hpp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; hpplus_portlist[i]; i++) {
+ if (hpp_probe1(dev, hpplus_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: hpp_close() handles free_irq */
+ release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init hp_plus_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_hpp_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+/* Do the interesting part of the probe at a single address. */
+static int __init hpp_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval;
+ unsigned char checksum = 0;
+ const char name[] = "HP-PC-LAN+";
+ int mem_start;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Check for the HP+ signature, 50 48 0x 53. */
+ if (inw(ioaddr + HP_ID) != 0x4850
+ || (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s at %#3x,", dev->name, name, ioaddr);
+
+ /* Retrieve and checksum the station address. */
+ outw(MAC_Page, ioaddr + HP_PAGING);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ unsigned char inval = inb(ioaddr + 8 + i);
+ dev->dev_addr[i] = inval;
+ checksum += inval;
+ printk(" %2.2x", inval);
+ }
+ checksum += inb(ioaddr + 14);
+
+ if (checksum != 0xff) {
+ printk(" bad checksum %2.2x.\n", checksum);
+ retval = -ENODEV;
+ goto out;
+ } else {
+ /* Point at the Software Configuration Flags. */
+ outw(ID_Page, ioaddr + HP_PAGING);
+ printk(" ID %4.4x", inw(ioaddr + 12));
+ }
+
+ /* Read the IRQ line. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ {
+ int irq = inb(ioaddr + 13) & 0x0f;
+ int option = inw(ioaddr + HPP_OPTION);
+
+ dev->irq = irq;
+ if (option & MemEnable) {
+ mem_start = inw(ioaddr + 9) << 8;
+ printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
+ } else {
+ mem_start = 0;
+ printk(", IRQ %d, programmed-I/O mode.\n", irq);
+ }
+ }
+
+ /* Set the wrap registers for string I/O reads. */
+ outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+
+ dev->open = &hpp_open;
+ dev->stop = &hpp_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ ei_status.name = name;
+ ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
+ ei_status.stop_page = HP_STOP_PG;
+
+ ei_status.reset_8390 = &hpp_reset_8390;
+ ei_status.block_input = &hpp_io_block_input;
+ ei_status.block_output = &hpp_io_block_output;
+ ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
+
+ /* Check if the memory_enable flag is set in the option register. */
+ if (mem_start) {
+ ei_status.block_input = &hpp_mem_block_input;
+ ei_status.block_output = &hpp_mem_block_output;
+ ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
+ dev->mem_start = mem_start;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
+ dev->mem_end = ei_status.rmem_end
+ = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
+ }
+
+ outw(Perf_Page, ioaddr + HP_PAGING);
+ NS8390_init(dev, 0);
+ /* Leave the 8390 and HP chip reset. */
+ outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
+
+ return 0;
+out:
+ release_region(ioaddr, HP_IO_EXTENT);
+ return retval;
+}
+
+static int
+hpp_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg;
+ int retval;
+
+ if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
+ return retval;
+ }
+
+ /* Reset the 8390 and HP chip. */
+ option_reg = inw(ioaddr + HPP_OPTION);
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ udelay(5);
+ /* Unreset the board and enable interrupts. */
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ /* Set the wrap registers for programmed-I/O operation. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Select the operational page. */
+ outw(Perf_Page, ioaddr + HP_PAGING);
+
+ ei_open(dev);
+ return 0;
+}
+
+static int
+hpp_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ free_irq(dev->irq, dev);
+ ei_close(dev);
+ outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
+ ioaddr + HPP_OPTION);
+
+ return 0;
+}
+
+static void
+hpp_reset_8390(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ /* Pause a few cycles for the hardware reset to take place. */
+ udelay(5);
+ ei_status.txing = 0;
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ udelay(5);
+
+
+ if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+/* The programmed-I/O version of reading the 4 byte 8390 specific header.
+ Note that transfer with the EtherTwist+ must be on word boundaries. */
+
+static void
+hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. */
+
+static void
+hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ char *buf = skb->data;
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, buf, count>>1);
+ if (count & 0x01)
+ buf[count-1] = inw(ioaddr + HP_DATAPORT);
+}
+
+/* The corresponding shared memory versions of the above 2 functions. */
+
+static void
+hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ isa_memcpy_fromio(hdr, dev->mem_start, sizeof(struct e8390_pkt_hdr));
+ outw(option_reg, ioaddr + HPP_OPTION);
+ hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */
+}
+
+static void
+hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+
+ /* Caution: this relies on get_8390_hdr() rounding up count!
+ Also note that we *can't* use eth_io_copy_and_sum() because
+ it will not always copy "count" bytes (e.g. padded IP). */
+
+ isa_memcpy_fromio(skb->data, dev->mem_start, count);
+ outw(option_reg, ioaddr + HPP_OPTION);
+}
+
+/* A special note: we *must* always transfer >=16 bit words.
+ It's always safe to round up, so we do. */
+static void
+hpp_io_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
+ return;
+}
+
+static void
+hpp_mem_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ isa_memcpy_toio(dev->mem_start, buf, (count + 3) & ~3);
+ outw(option_reg, ioaddr + HPP_OPTION);
+
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
+static struct net_device *dev_hpp[MAX_HPP_CARDS];
+static int io[MAX_HPP_CARDS];
+static int irq[MAX_HPP_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O port address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected");
+MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_hpp_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_hpp[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct net_device *dev = dev_hpp[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
new file mode 100644
index 000000000000..026888611d6f
--- /dev/null
+++ b/drivers/net/hp.c
@@ -0,0 +1,464 @@
+/* hp.c: A HP LAN ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is a driver for the HP PC-LAN adaptors.
+
+ Sources:
+ The Crynwr packet driver.
+*/
+
+static const char version[] =
+ "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+#define DRV_NAME "hp"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hppclan_portlist[] __initdata =
+{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
+
+#define HP_IO_EXTENT 32
+
+#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
+#define HP_ID 0x07
+#define HP_CONFIGURE 0x08 /* Configuration register. */
+#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
+#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
+#define HP_DATAON 0x10 /* Turn on dataport */
+#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
+#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
+
+static int hp_probe1(struct net_device *dev, int ioaddr);
+
+static int hp_open(struct net_device *dev);
+static int hp_close(struct net_device *dev);
+static void hp_reset_8390(struct net_device *dev);
+static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hp_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb , int ring_offset);
+static void hp_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+
+static void hp_init_card(struct net_device *dev);
+
+/* The map from IRQ number to HP_CONFIGURE register setting. */
+/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
+static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
+
+
+/* Probe for an HP LAN adaptor.
+ Also initialize the card and fill in STATION_ADDR with the station
+ address. */
+
+static int __init do_hp_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ SET_MODULE_OWNER(dev);
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; hppclan_portlist[i]; i++) {
+ if (hp_probe1(dev, hppclan_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init hp_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_hp_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init hp_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval, board_id, wordmode;
+ const char *name;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Check for the HP physical address, 08 00 09 xx xx xx. */
+ /* This really isn't good enough: we may pick up HP LANCE boards
+ also! Avoid the lance 0x5757 signature. */
+ if (inb(ioaddr) != 0x08
+ || inb(ioaddr+1) != 0x00
+ || inb(ioaddr+2) != 0x09
+ || inb(ioaddr+14) == 0x57) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Set up the parameters based on the board ID.
+ If you have additional mappings, please mail them to me -djb. */
+ if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
+ name = "HP27247";
+ wordmode = 1;
+ } else {
+ name = "HP27250";
+ wordmode = 0;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Snarf the interrupt now. Someday this could be moved to open(). */
+ if (dev->irq < 2) {
+ int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
+ int irq_8list[] = { 7, 5, 3, 4, 9, 0};
+ int *irqp = wordmode ? irq_16list : irq_8list;
+ do {
+ int irq = *irqp;
+ if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
+ unsigned long cookie = probe_irq_on();
+ /* Twinkle the interrupt, and check if it's seen. */
+ outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
+ outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
+ if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */
+ && request_irq (irq, ei_interrupt, 0, DRV_NAME, dev) == 0) {
+ printk(" selecting IRQ %d.\n", irq);
+ dev->irq = *irqp;
+ break;
+ }
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ printk(" no free IRQ lines.\n");
+ retval = -EBUSY;
+ goto out;
+ }
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+ if ((retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out;
+ }
+ }
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+ dev->open = &hp_open;
+ dev->stop = &hp_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ ei_status.name = name;
+ ei_status.word16 = wordmode;
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_PAGES;
+ ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
+
+ ei_status.reset_8390 = &hp_reset_8390;
+ ei_status.get_8390_hdr = &hp_get_8390_hdr;
+ ei_status.block_input = &hp_block_input;
+ ei_status.block_output = &hp_block_output;
+ hp_init_card(dev);
+
+ return 0;
+out:
+ release_region(ioaddr, HP_IO_EXTENT);
+ return retval;
+}
+
+static int
+hp_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int
+hp_close(struct net_device *dev)
+{
+ ei_close(dev);
+ return 0;
+}
+
+static void
+hp_reset_8390(struct net_device *dev)
+{
+ int hp_base = dev->base_addr - NIC_OFFSET;
+ int saved_config = inb_p(hp_base + HP_CONFIGURE);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+ outb_p(0x00, hp_base + HP_CONFIGURE);
+ ei_status.txing = 0;
+ /* Pause just a few cycles for the hardware reset to take place. */
+ udelay(5);
+
+ outb_p(saved_config, hp_base + HP_CONFIGURE);
+ udelay(5);
+
+ if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+static void
+hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+
+ if (ei_status.word16)
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you are
+ porting to a new ethercard look at the packet driver source for hints.
+ The HP LAN doesn't use shared memory -- we put the packet
+ out through the "remote DMA" dataport. */
+
+static void
+hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+ int xfer_count = count;
+ char *buf = skb->data;
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+ if (ei_status.word16) {
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
+ } else {
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ /* Check only the lower 8 bits so we can ignore ring wrap. */
+ if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
+ printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+static void
+hp_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work. */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0xff, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+#define NE_CMD 0x00
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ inb_p(0x61);
+ inb_p(0x61);
+#endif
+
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base);
+ if (ei_status.word16) {
+ /* Use the 'rep' sequence for 16 bit boards. */
+ outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
+ } else {
+ outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
+
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ if ((start_page << 8) + count != addr)
+ printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
+ dev->name, (start_page << 8) + count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+/* This function resets the ethercard if something screws up. */
+static void
+hp_init_card(struct net_device *dev)
+{
+ int irq = dev->irq;
+ NS8390_init(dev, 0);
+ outb_p(irqmap[irq&0x0f] | HP_RUN,
+ dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+#ifdef MODULE
+#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
+static struct net_device *dev_hp[MAX_HP_CARDS];
+static int io[MAX_HP_CARDS];
+static int irq[MAX_HP_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_hp_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_hp[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct net_device *dev = dev_hp[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
new file mode 100644
index 000000000000..acb170152bbd
--- /dev/null
+++ b/drivers/net/hp100.c
@@ -0,0 +1,3115 @@
+/*
+** hp100.c
+** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters
+**
+** $Id: hp100.c,v 1.58 2001/09/24 18:03:01 perex Exp perex $
+**
+** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz>
+** Extended for new busmaster capable chipsets by
+** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>
+**
+** Maintained by: Jaroslav Kysela <perex@suse.cz>
+**
+** This driver has only been tested with
+** -- HP J2585B 10/100 Mbit/s PCI Busmaster
+** -- HP J2585A 10/100 Mbit/s PCI
+** -- HP J2970 10 Mbit/s PCI Combo 10base-T/BNC
+** -- HP J2973 10 Mbit/s PCI 10base-T
+** -- HP J2573 10/100 ISA
+** -- Compex ReadyLink ENET100-VG4 10/100 Mbit/s PCI / EISA
+** -- Compex FreedomLine 100/VG 10/100 Mbit/s ISA / EISA / PCI
+**
+** but it should also work with the other CASCADE based adapters.
+**
+** TODO:
+** - J2573 seems to hang sometimes when in shared memory mode.
+** - Mode for Priority TX
+** - Check PCI registers, performance might be improved?
+** - To reduce interrupt load in busmaster, one could switch off
+** the interrupts that are used to refill the queues whenever the
+** queues are filled up to more than a certain threshold.
+** - some updates for EISA version of card
+**
+**
+** This code is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This code is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+** 1.57c -> 1.58
+** - used indent to change coding-style
+** - added KTI DP-200 EISA ID
+** - ioremap is also used for low (<1MB) memory (multi-architecture support)
+**
+** 1.57b -> 1.57c - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+** - release resources on failure in init_module
+**
+** 1.57 -> 1.57b - Jean II
+** - fix spinlocks, SMP is now working !
+**
+** 1.56 -> 1.57
+** - updates for new PCI interface for 2.1 kernels
+**
+** 1.55 -> 1.56
+** - removed printk in misc. interrupt and update statistics to allow
+** monitoring of card status
+** - timing changes in xmit routines, relogin to 100VG hub added when
+** driver does reset
+** - included fix for Compex FreedomLine PCI adapter
+**
+** 1.54 -> 1.55
+** - fixed bad initialization in init_module
+** - added Compex FreedomLine adapter
+** - some fixes in card initialization
+**
+** 1.53 -> 1.54
+** - added hardware multicast filter support (doesn't work)
+** - little changes in hp100_sense_lan routine
+** - added support for Coax and AUI (J2970)
+** - fix for multiple cards and hp100_mode parameter (insmod)
+** - fix for shared IRQ
+**
+** 1.52 -> 1.53
+** - fixed bug in multicast support
+**
+*/
+
+#define HP100_DEFAULT_PRIORITY_TX 0
+
+#undef HP100_DEBUG
+#undef HP100_DEBUG_B /* Trace */
+#undef HP100_DEBUG_BM /* Debug busmaster code (PDL stuff) */
+
+#undef HP100_DEBUG_TRAINING /* Debug login-to-hub procedure */
+#undef HP100_DEBUG_TX
+#undef HP100_DEBUG_IRQ
+#undef HP100_DEBUG_RX
+
+#undef HP100_MULTICAST_FILTER /* Need to be debugged... */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+
+#include "hp100.h"
+
+/*
+ * defines
+ */
+
+#define HP100_BUS_ISA 0
+#define HP100_BUS_EISA 1
+#define HP100_BUS_PCI 2
+
+#define HP100_REGION_SIZE 0x20 /* for ioports */
+#define HP100_SIG_LEN 8 /* same as EISA_SIG_LEN */
+
+#define HP100_MAX_PACKET_SIZE (1536+4)
+#define HP100_MIN_PACKET_SIZE 60
+
+#ifndef HP100_DEFAULT_RX_RATIO
+/* default - 75% onboard memory on the card are used for RX packets */
+#define HP100_DEFAULT_RX_RATIO 75
+#endif
+
+#ifndef HP100_DEFAULT_PRIORITY_TX
+/* default - don't enable transmit outgoing packets as priority */
+#define HP100_DEFAULT_PRIORITY_TX 0
+#endif
+
+/*
+ * structures
+ */
+
+struct hp100_private {
+ spinlock_t lock;
+ char id[HP100_SIG_LEN];
+ u_short chip;
+ u_short soft_model;
+ u_int memory_size;
+ u_int virt_memory_size;
+ u_short rx_ratio; /* 1 - 99 */
+ u_short priority_tx; /* != 0 - priority tx */
+ u_short mode; /* PIO, Shared Mem or Busmaster */
+ u_char bus;
+ struct pci_dev *pci_dev;
+ short mem_mapped; /* memory mapped access */
+ void __iomem *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */
+ unsigned long mem_ptr_phys; /* physical memory mapped area */
+ short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */
+ int hub_status; /* was login to hub successful? */
+ u_char mac1_mode;
+ u_char mac2_mode;
+ u_char hash_bytes[8];
+ struct net_device_stats stats;
+
+ /* Rings for busmaster mode: */
+ hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */
+ hp100_ring_t *rxrtail; /* Tail (newest) index into rxring */
+ hp100_ring_t *txrhead; /* Head (oldest) index into txring */
+ hp100_ring_t *txrtail; /* Tail (newest) index into txring */
+
+ hp100_ring_t rxring[MAX_RX_PDL];
+ hp100_ring_t txring[MAX_TX_PDL];
+
+ u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
+ u_long whatever_offset; /* Offset to bus/phys/dma address */
+ int rxrcommit; /* # Rx PDLs commited to adapter */
+ int txrcommit; /* # Tx PDLs commited to adapter */
+};
+
+/*
+ * variables
+ */
+static const char *hp100_isa_tbl[] = {
+ "HWPF150", /* HP J2573 rev A */
+ "HWP1950", /* HP J2573 */
+};
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id hp100_eisa_tbl[] = {
+ { "HWPF180" }, /* HP J2577 rev A */
+ { "HWP1920" }, /* HP 27248B */
+ { "HWP1940" }, /* HP J2577 */
+ { "HWP1990" }, /* HP J2577 */
+ { "CPX0301" }, /* ReadyLink ENET100-VG4 */
+ { "CPX0401" }, /* FreedomLine 100/VG */
+ { "" } /* Mandatory final entry ! */
+};
+MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
+#endif
+
+#ifdef CONFIG_PCI
+static struct pci_device_id hp100_pci_tbl[] = {
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2973A, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG, PCI_ANY_ID, PCI_ANY_ID,},
+/* {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_DP200, PCI_ANY_ID, PCI_ANY_ID }, */
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
+#endif
+
+static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
+static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
+static int hp100_mode = 1;
+
+module_param(hp100_rx_ratio, int, 0);
+module_param(hp100_priority_tx, int, 0);
+module_param(hp100_mode, int, 0);
+
+/*
+ * prototypes
+ */
+
+static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
+ struct pci_dev *pci_dev);
+
+
+static int hp100_open(struct net_device *dev);
+static int hp100_close(struct net_device *dev);
+static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int hp100_start_xmit_bm(struct sk_buff *skb,
+ struct net_device *dev);
+static void hp100_rx(struct net_device *dev);
+static struct net_device_stats *hp100_get_stats(struct net_device *dev);
+static void hp100_misc_interrupt(struct net_device *dev);
+static void hp100_update_stats(struct net_device *dev);
+static void hp100_clear_stats(struct hp100_private *lp, int ioaddr);
+static void hp100_set_multicast_list(struct net_device *dev);
+static irqreturn_t hp100_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void hp100_start_interface(struct net_device *dev);
+static void hp100_stop_interface(struct net_device *dev);
+static void hp100_load_eeprom(struct net_device *dev, u_short ioaddr);
+static int hp100_sense_lan(struct net_device *dev);
+static int hp100_login_to_vg_hub(struct net_device *dev,
+ u_short force_relogin);
+static int hp100_down_vg_link(struct net_device *dev);
+static void hp100_cascade_reset(struct net_device *dev, u_short enable);
+static void hp100_BM_shutdown(struct net_device *dev);
+static void hp100_mmuinit(struct net_device *dev);
+static void hp100_init_pdls(struct net_device *dev);
+static int hp100_init_rxpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u_int * pdlptr);
+static int hp100_init_txpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u_int * pdlptr);
+static void hp100_rxfill(struct net_device *dev);
+static void hp100_hwinit(struct net_device *dev);
+static void hp100_clean_txring(struct net_device *dev);
+#ifdef HP100_DEBUG
+static void hp100_RegisterDump(struct net_device *dev);
+#endif
+
+/* Conversion to new PCI API :
+ * Convert an address in a kernel buffer to a bus/phys/dma address.
+ * This work *only* for memory fragments part of lp->page_vaddr,
+ * because it was properly DMA allocated via pci_alloc_consistent(),
+ * so we just need to "retreive" the original mapping to bus/phys/dma
+ * address - Jean II */
+static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ return ((u_long) ptr) + lp->whatever_offset;
+}
+
+static inline u_int pdl_map_data(struct hp100_private *lp, void *data)
+{
+ return pci_map_single(lp->pci_dev, data,
+ MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
+}
+
+/* TODO: This function should not really be needed in a good design... */
+static void wait(void)
+{
+ mdelay(1);
+}
+
+/*
+ * probe functions
+ * These functions should - if possible - avoid doing write operations
+ * since this could cause problems when the card is not installed.
+ */
+
+/*
+ * Read board id and convert to string.
+ * Effectively same code as decode_eisa_sig
+ */
+static __devinit const char *hp100_read_id(int ioaddr)
+{
+ int i;
+ static char str[HP100_SIG_LEN];
+ unsigned char sig[4], sum;
+ unsigned short rev;
+
+ hp100_page(ID_MAC_ADDR);
+ sum = 0;
+ for (i = 0; i < 4; i++) {
+ sig[i] = hp100_inb(BOARD_ID + i);
+ sum += sig[i];
+ }
+
+ sum += hp100_inb(BOARD_ID + i);
+ if (sum != 0xff)
+ return NULL; /* bad checksum */
+
+ str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
+ str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
+ str[2] = (sig[1] & 0x1f) + ('A' - 1);
+ rev = (sig[2] << 8) | sig[3];
+ sprintf(str + 3, "%04X", rev);
+
+ return str;
+}
+
+static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+{
+ const char *sig;
+ int i;
+
+ if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
+ goto err;
+
+ if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) {
+ release_region(ioaddr, HP100_REGION_SIZE);
+ goto err;
+ }
+
+ sig = hp100_read_id(ioaddr);
+ release_region(ioaddr, HP100_REGION_SIZE);
+
+ if (sig == NULL)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) {
+ if (!strcmp(hp100_isa_tbl[i], sig))
+ break;
+
+ }
+
+ if (i < ARRAY_SIZE(hp100_isa_tbl))
+ return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
+ err:
+ return -ENODEV;
+
+}
+/*
+ * Probe for ISA board.
+ * EISA and PCI are handled by device infrastructure.
+ */
+
+static int __init hp100_isa_probe(struct net_device *dev, int addr)
+{
+ int err = -ENODEV;
+
+ /* Probe for a specific ISA address */
+ if (addr > 0xff && addr < 0x400)
+ err = hp100_isa_probe1(dev, addr);
+
+ else if (addr != 0)
+ err = -ENXIO;
+
+ else {
+ /* Probe all ISA possible port regions */
+ for (addr = 0x100; addr < 0x400; addr += 0x20) {
+ err = hp100_isa_probe1(dev, addr);
+ if (!err)
+ break;
+ }
+ }
+ return err;
+}
+
+
+#ifndef MODULE
+struct net_device * __init hp100_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ SET_MODULE_OWNER(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4200, TRACE);
+ printk("hp100: %s: probe\n", dev->name);
+#endif
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ err = hp100_isa_probe(dev, dev->base_addr);
+ if (err)
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+ out1:
+ release_region(dev->base_addr, HP100_REGION_SIZE);
+ out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
+ u_char bus, struct pci_dev *pci_dev)
+{
+ int i;
+ int err = -ENODEV;
+ const char *eid;
+ u_int chip;
+ u_char uc;
+ u_int memory_size = 0, virt_memory_size = 0;
+ u_short local_mode, lsw;
+ short mem_mapped;
+ unsigned long mem_ptr_phys;
+ void __iomem *mem_ptr_virt;
+ struct hp100_private *lp;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4201, TRACE);
+ printk("hp100: %s: probe1\n", dev->name);
+#endif
+
+ /* memory region for programmed i/o */
+ if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
+ goto out1;
+
+ if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE)
+ goto out2;
+
+ chip = hp100_inw(PAGING) & HP100_CHIPID_MASK;
+#ifdef HP100_DEBUG
+ if (chip == HP100_CHIPID_SHASTA)
+ printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if (chip == HP100_CHIPID_RAINIER)
+ printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if (chip == HP100_CHIPID_LASSEN)
+ printk("hp100: %s: Lassen Chip detected.\n", dev->name);
+ else
+ printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n", dev->name, chip);
+#endif
+
+ dev->base_addr = ioaddr;
+
+ eid = hp100_read_id(ioaddr);
+ if (eid == NULL) { /* bad checksum? */
+ printk(KERN_WARNING "hp100_probe: bad ID checksum at base port 0x%x\n", ioaddr);
+ goto out2;
+ }
+
+ hp100_page(ID_MAC_ADDR);
+ for (i = uc = 0; i < 7; i++)
+ uc += hp100_inb(LAN_ADDR + i);
+ if (uc != 0xff) {
+ printk(KERN_WARNING "hp100_probe: bad lan address checksum at port 0x%x)\n", ioaddr);
+ err = -EIO;
+ goto out2;
+ }
+
+ /* Make sure, that all registers are correctly updated... */
+
+ hp100_load_eeprom(dev, ioaddr);
+ wait();
+
+ /*
+ * Determine driver operation mode
+ *
+ * Use the variable "hp100_mode" upon insmod or as kernel parameter to
+ * force driver modes:
+ * hp100_mode=1 -> default, use busmaster mode if configured.
+ * hp100_mode=2 -> enable shared memory mode
+ * hp100_mode=3 -> force use of i/o mapped mode.
+ * hp100_mode=4 -> same as 1, but re-set the enable bit on the card.
+ */
+
+ /*
+ * LSW values:
+ * 0x2278 -> J2585B, PnP shared memory mode
+ * 0x2270 -> J2585B, shared memory mode, 0xdc000
+ * 0xa23c -> J2585B, I/O mapped mode
+ * 0x2240 -> EISA COMPEX, BusMaster (Shasta Chip)
+ * 0x2220 -> EISA HP, I/O (Shasta Chip)
+ * 0x2260 -> EISA HP, BusMaster (Shasta Chip)
+ */
+
+#if 0
+ local_mode = 0x2270;
+ hp100_outw(0xfefe, OPTION_LSW);
+ hp100_outw(local_mode | HP100_SET_LB | HP100_SET_HB, OPTION_LSW);
+#endif
+
+ /* hp100_mode value maybe used in future by another card */
+ local_mode = hp100_mode;
+ if (local_mode < 1 || local_mode > 4)
+ local_mode = 1; /* default */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: original LSW = 0x%x\n", dev->name,
+ hp100_inw(OPTION_LSW));
+#endif
+
+ if (local_mode == 3) {
+ hp100_outw(HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: IO mapped mode forced.\n");
+ } else if (local_mode == 2) {
+ hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: Shared memory mode requested.\n");
+ } else if (local_mode == 4) {
+ if (chip == HP100_CHIPID_LASSEN) {
+ hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_SET_HB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+ printk("hp100: Busmaster mode requested.\n");
+ }
+ local_mode = 1;
+ }
+
+ if (local_mode == 1) { /* default behaviour */
+ lsw = hp100_inw(OPTION_LSW);
+
+ if ((lsw & HP100_IO_EN) && (~lsw & HP100_MEM_EN) &&
+ (~lsw & (HP100_BM_WRITE | HP100_BM_READ))) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: IO_EN bit is set on card.\n", dev->name);
+#endif
+ local_mode = 3;
+ } else if (chip == HP100_CHIPID_LASSEN &&
+ (lsw & (HP100_BM_WRITE | HP100_BM_READ)) == (HP100_BM_WRITE | HP100_BM_READ)) {
+ /* Conversion to new PCI API :
+ * I don't have the doc, but I assume that the card
+ * can map the full 32bit address space.
+ * Also, we can have EISA Busmaster cards (not tested),
+ * so beware !!! - Jean II */
+ if((bus == HP100_BUS_PCI) &&
+ (pci_set_dma_mask(pci_dev, 0xffffffff))) {
+ /* Gracefully fallback to shared memory */
+ goto busmasterfail;
+ }
+ printk("hp100: Busmaster mode enabled.\n");
+ hp100_outw(HP100_MEM_EN | HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+ } else {
+ busmasterfail:
+#ifdef HP100_DEBUG
+ printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name);
+ printk("hp100: %s: Trying shared memory mode.\n", dev->name);
+#endif
+ /* In this case, try shared memory mode */
+ local_mode = 2;
+ hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+ /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */
+ }
+ }
+#ifdef HP100_DEBUG
+ printk("hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW));
+#endif
+
+ /* Check for shared memory on the card, eventually remap it */
+ hp100_page(HW_MAP);
+ mem_mapped = ((hp100_inw(OPTION_LSW) & (HP100_MEM_EN)) != 0);
+ mem_ptr_phys = 0UL;
+ mem_ptr_virt = NULL;
+ memory_size = (8192 << ((hp100_inb(SRAM) >> 5) & 0x07));
+ virt_memory_size = 0;
+
+ /* For memory mapped or busmaster mode, we want the memory address */
+ if (mem_mapped || (local_mode == 1)) {
+ mem_ptr_phys = (hp100_inw(MEM_MAP_LSW) | (hp100_inw(MEM_MAP_MSW) << 16));
+ mem_ptr_phys &= ~0x1fff; /* 8k alignment */
+
+ if (bus == HP100_BUS_ISA && (mem_ptr_phys & ~0xfffff) != 0) {
+ printk("hp100: Can only use programmed i/o mode.\n");
+ mem_ptr_phys = 0;
+ mem_mapped = 0;
+ local_mode = 3; /* Use programmed i/o */
+ }
+
+ /* We do not need access to shared memory in busmaster mode */
+ /* However in slave mode we need to remap high (>1GB) card memory */
+ if (local_mode != 1) { /* = not busmaster */
+ /* We try with smaller memory sizes, if ioremap fails */
+ for (virt_memory_size = memory_size; virt_memory_size > 16383; virt_memory_size >>= 1) {
+ if ((mem_ptr_virt = ioremap((u_long) mem_ptr_phys, virt_memory_size)) == NULL) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, mem_ptr_phys);
+#endif
+ } else {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to %p.\n", dev->name, virt_memory_size, mem_ptr_phys, mem_ptr_virt);
+#endif
+ break;
+ }
+ }
+
+ if (mem_ptr_virt == NULL) { /* all ioremap tries failed */
+ printk("hp100: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n");
+ local_mode = 3;
+ virt_memory_size = 0;
+ }
+ }
+ }
+
+ if (local_mode == 3) { /* io mapped forced */
+ mem_mapped = 0;
+ mem_ptr_phys = 0;
+ mem_ptr_virt = NULL;
+ printk("hp100: Using (slow) programmed i/o mode.\n");
+ }
+
+ /* Initialise the "private" data structure for this card. */
+ lp = netdev_priv(dev);
+
+ spin_lock_init(&lp->lock);
+ strlcpy(lp->id, eid, HP100_SIG_LEN);
+ lp->chip = chip;
+ lp->mode = local_mode;
+ lp->bus = bus;
+ lp->pci_dev = pci_dev;
+ lp->priority_tx = hp100_priority_tx;
+ lp->rx_ratio = hp100_rx_ratio;
+ lp->mem_ptr_phys = mem_ptr_phys;
+ lp->mem_ptr_virt = mem_ptr_virt;
+ hp100_page(ID_MAC_ADDR);
+ lp->soft_model = hp100_inb(SOFT_MODEL);
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset(&lp->hash_bytes, 0x00, 8);
+
+ dev->base_addr = ioaddr;
+
+ lp->memory_size = memory_size;
+ lp->virt_memory_size = virt_memory_size;
+ lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */
+
+ dev->open = hp100_open;
+ dev->stop = hp100_close;
+
+ if (lp->mode == 1) /* busmaster */
+ dev->hard_start_xmit = hp100_start_xmit_bm;
+ else
+ dev->hard_start_xmit = hp100_start_xmit;
+
+ dev->get_stats = hp100_get_stats;
+ dev->set_multicast_list = &hp100_set_multicast_list;
+
+ /* Ask the card for which IRQ line it is configured */
+ if (bus == HP100_BUS_PCI) {
+ dev->irq = pci_dev->irq;
+ } else {
+ hp100_page(HW_MAP);
+ dev->irq = hp100_inb(IRQ_CHANNEL) & HP100_IRQMASK;
+ if (dev->irq == 2)
+ dev->irq = 9;
+ }
+
+ if (lp->mode == 1) /* busmaster */
+ dev->dma = 4;
+
+ /* Ask the card for its MAC address and store it for later use. */
+ hp100_page(ID_MAC_ADDR);
+ for (i = uc = 0; i < 6; i++)
+ dev->dev_addr[i] = hp100_inb(LAN_ADDR + i);
+
+ /* Reset statistics (counters) */
+ hp100_clear_stats(lp, ioaddr);
+
+ /* If busmaster mode is wanted, a dma-capable memory area is needed for
+ * the rx and tx PDLs
+ * PCI cards can access the whole PC memory. Therefore GFP_DMA is not
+ * needed for the allocation of the memory area.
+ */
+
+ /* TODO: We do not need this with old cards, where PDLs are stored
+ * in the cards shared memory area. But currently, busmaster has been
+ * implemented/tested only with the lassen chip anyway... */
+ if (lp->mode == 1) { /* busmaster */
+ dma_addr_t page_baddr;
+ /* Get physically continous memory for TX & RX PDLs */
+ /* Conversion to new PCI API :
+ * Pages are always aligned and zeroed, no need to it ourself.
+ * Doc says should be OK for EISA bus as well - Jean II */
+ if ((lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr)) == NULL) {
+ err = -ENOMEM;
+ goto out2;
+ }
+ lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
+
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n", dev->name, (u_int) lp->page_vaddr_algn, (u_int) lp->page_vaddr_algn + MAX_RINGSIZE);
+#endif
+ lp->rxrcommit = lp->txrcommit = 0;
+ lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+ lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ }
+
+ /* Initialise the card. */
+ /* (I'm not really sure if it's a good idea to do this during probing, but
+ * like this it's assured that the lan connection type can be sensed
+ * correctly)
+ */
+ hp100_hwinit(dev);
+
+ /* Try to find out which kind of LAN the card is connected to. */
+ lp->lan_type = hp100_sense_lan(dev);
+
+ /* Print out a message what about what we think we have probed. */
+ printk("hp100: at 0x%x, IRQ %d, ", ioaddr, dev->irq);
+ switch (bus) {
+ case HP100_BUS_EISA:
+ printk("EISA");
+ break;
+ case HP100_BUS_PCI:
+ printk("PCI");
+ break;
+ default:
+ printk("ISA");
+ break;
+ }
+ printk(" bus, %dk SRAM (rx/tx %d%%).\n", lp->memory_size >> 10, lp->rx_ratio);
+
+ if (lp->mode == 2) { /* memory mapped */
+ printk("hp100: Memory area at 0x%lx-0x%lx", mem_ptr_phys,
+ (mem_ptr_phys + (mem_ptr_phys > 0x100000 ? (u_long) lp->memory_size : 16 * 1024)) - 1);
+ if (mem_ptr_virt)
+ printk(" (virtual base %p)", mem_ptr_virt);
+ printk(".\n");
+
+ /* Set for info when doing ifconfig */
+ dev->mem_start = mem_ptr_phys;
+ dev->mem_end = mem_ptr_phys + lp->memory_size;
+ }
+
+ printk("hp100: ");
+ if (lp->lan_type != HP100_LAN_ERR)
+ printk("Adapter is attached to ");
+ switch (lp->lan_type) {
+ case HP100_LAN_100:
+ printk("100Mb/s Voice Grade AnyLAN network.\n");
+ break;
+ case HP100_LAN_10:
+ printk("10Mb/s network (10baseT).\n");
+ break;
+ case HP100_LAN_COAX:
+ printk("10Mb/s network (coax).\n");
+ break;
+ default:
+ printk("Warning! Link down.\n");
+ }
+
+ return 0;
+out2:
+ release_region(ioaddr, HP100_REGION_SIZE);
+out1:
+ return -ENODEV;
+}
+
+/* This procedure puts the card into a stable init state */
+static void hp100_hwinit(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4202, TRACE);
+ printk("hp100: %s: hwinit\n", dev->name);
+#endif
+
+ /* Initialise the card. -------------------------------------------- */
+
+ /* Clear all pending Ints and disable Ints */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* clear all pending ints */
+
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+ hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+ if (lp->mode == 1) {
+ hp100_BM_shutdown(dev); /* disables BM, puts cascade in reset */
+ wait();
+ } else {
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+ hp100_cascade_reset(dev, 1);
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
+ }
+
+ /* Initiate EEPROM reload */
+ hp100_load_eeprom(dev, 0);
+
+ wait();
+
+ /* Go into reset again. */
+ hp100_cascade_reset(dev, 1);
+
+ /* Set Option Registers to a safe state */
+ hp100_outw(HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN |
+ HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB |
+ HP100_FAKE_INT |
+ HP100_INT_EN |
+ HP100_MEM_EN |
+ HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+
+ hp100_outw(HP100_TRI_INT |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
+
+ hp100_outb(HP100_PRIORITY_TX |
+ HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
+
+ /* TODO: Configure MMU for Ram Test. */
+ /* TODO: Ram Test. */
+
+ /* Re-check if adapter is still at same i/o location */
+ /* (If the base i/o in eeprom has been changed but the */
+ /* registers had not been changed, a reload of the eeprom */
+ /* would move the adapter to the address stored in eeprom */
+
+ /* TODO: Code to implement. */
+
+ /* Until here it was code from HWdiscover procedure. */
+ /* Next comes code from mmuinit procedure of SCO BM driver which is
+ * called from HWconfigure in the SCO driver. */
+
+ /* Initialise MMU, eventually switch on Busmaster Mode, initialise
+ * multicast filter...
+ */
+ hp100_mmuinit(dev);
+
+ /* We don't turn the interrupts on here - this is done by start_interface. */
+ wait(); /* TODO: Do we really need this? */
+
+ /* Enable Hardware (e.g. unreset) */
+ hp100_cascade_reset(dev, 0);
+
+ /* ------- initialisation complete ----------- */
+
+ /* Finally try to log in the Hub if there may be a VG connection. */
+ if ((lp->lan_type == HP100_LAN_100) || (lp->lan_type == HP100_LAN_ERR))
+ hp100_login_to_vg_hub(dev, 0); /* relogin */
+
+}
+
+
+/*
+ * mmuinit - Reinitialise Cascade MMU and MAC settings.
+ * Note: Must already be in reset and leaves card in reset.
+ */
+static void hp100_mmuinit(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ int i;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4203, TRACE);
+ printk("hp100: %s: mmuinit\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
+ printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n", dev->name);
+ return;
+ }
+#endif
+
+ /* Make sure IRQs are masked off and ack'ed. */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+
+ /*
+ * Enable Hardware
+ * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En
+ * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable
+ * - Clear Priority, Advance Pkt and Xmit Cmd
+ */
+
+ hp100_outw(HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN | HP100_RESET_HB |
+ HP100_IO_EN |
+ HP100_FAKE_INT |
+ HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+
+ hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+ if (lp->mode == 1) { /* busmaster */
+ hp100_outw(HP100_BM_WRITE |
+ HP100_BM_READ |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
+ } else if (lp->mode == 2) { /* memory mapped */
+ hp100_outw(HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+ hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
+ hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ } else if (lp->mode == 3) { /* i/o mapped mode */
+ hp100_outw(HP100_MMAP_DIS | HP100_SET_HB |
+ HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ }
+
+ hp100_page(HW_MAP);
+ hp100_outb(0, EARLYRXCFG);
+ hp100_outw(0, EARLYTXCFG);
+
+ /*
+ * Enable Bus Master mode
+ */
+ if (lp->mode == 1) { /* busmaster */
+ /* Experimental: Set some PCI configuration bits */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_PDL_USE3, MODECTRL1); /* BM engine read maximum */
+ hp100_andb(~HP100_TX_DUALQ, MODECTRL1); /* No Queue for Priority TX */
+
+ /* PCI Bus failures should result in a Misc. Interrupt */
+ hp100_orb(HP100_EN_BUS_FAIL, MODECTRL2);
+
+ hp100_outw(HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW);
+ hp100_page(HW_MAP);
+ /* Use Burst Mode and switch on PAGE_CK */
+ hp100_orb(HP100_BM_BURST_RD | HP100_BM_BURST_WR, BM);
+ if ((lp->chip == HP100_CHIPID_RAINIER) || (lp->chip == HP100_CHIPID_SHASTA))
+ hp100_orb(HP100_BM_PAGE_CK, BM);
+ hp100_orb(HP100_BM_MASTER, BM);
+ } else { /* not busmaster */
+
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM);
+ }
+
+ /*
+ * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs
+ */
+ hp100_page(MMU_CFG);
+ if (lp->mode == 1) { /* only needed for Busmaster */
+ int xmit_stop, recv_stop;
+
+ if ((lp->chip == HP100_CHIPID_RAINIER)
+ || (lp->chip == HP100_CHIPID_SHASTA)) {
+ int pdl_stop;
+
+ /*
+ * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and
+ * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded
+ * to the next higher 1k boundary) bytes for the rx-pdl's
+ * Note: For non-etr chips the transmit stop register must be
+ * programmed on a 1k boundary, i.e. bits 9:0 must be zero.
+ */
+ pdl_stop = lp->memory_size;
+ xmit_stop = (pdl_stop - 508 * (MAX_RX_PDL) - 16) & ~(0x03ff);
+ recv_stop = (xmit_stop * (lp->rx_ratio) / 100) & ~(0x03ff);
+ hp100_outw((pdl_stop >> 4) - 1, PDL_MEM_STOP);
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop);
+#endif
+ } else {
+ /* ETR chip (Lassen) in busmaster mode */
+ xmit_stop = (lp->memory_size) - 1;
+ recv_stop = ((lp->memory_size * lp->rx_ratio) / 100) & ~(0x03ff);
+ }
+
+ hp100_outw(xmit_stop >> 4, TX_MEM_STOP);
+ hp100_outw(recv_stop >> 4, RX_MEM_STOP);
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: TX_STOP = 0x%x\n", dev->name, xmit_stop >> 4);
+ printk("hp100: %s: RX_STOP = 0x%x\n", dev->name, recv_stop >> 4);
+#endif
+ } else {
+ /* Slave modes (memory mapped and programmed io) */
+ hp100_outw((((lp->memory_size * lp->rx_ratio) / 100) >> 4), RX_MEM_STOP);
+ hp100_outw(((lp->memory_size - 1) >> 4), TX_MEM_STOP);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(TX_MEM_STOP));
+ printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(RX_MEM_STOP));
+#endif
+ }
+
+ /* Write MAC address into page 1 */
+ hp100_page(MAC_ADDRESS);
+ for (i = 0; i < 6; i++)
+ hp100_outb(dev->dev_addr[i], MAC_ADDR + i);
+
+ /* Zero the multicast hash registers */
+ for (i = 0; i < 8; i++)
+ hp100_outb(0x0, HASH_BYTE0 + i);
+
+ /* Set up MAC defaults */
+ hp100_page(MAC_CTRL);
+
+ /* Go to LAN Page and zero all filter bits */
+ /* Zero accept error, accept multicast, accept broadcast and accept */
+ /* all directed packet bits */
+ hp100_andb(~(HP100_RX_EN |
+ HP100_TX_EN |
+ HP100_ACC_ERRORED |
+ HP100_ACC_MC |
+ HP100_ACC_BC | HP100_ACC_PHY), MAC_CFG_1);
+
+ hp100_outb(0x00, MAC_CFG_2);
+
+ /* Zero the frame format bit. This works around a training bug in the */
+ /* new hubs. */
+ hp100_outb(0x00, VG_LAN_CFG_2); /* (use 802.3) */
+
+ if (lp->priority_tx)
+ hp100_outb(HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW);
+ else
+ hp100_outb(HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW);
+
+ hp100_outb(HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
+
+ /* If busmaster, initialize the PDLs */
+ if (lp->mode == 1)
+ hp100_init_pdls(dev);
+
+ /* Go to performance page and initalize isr and imr registers */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+}
+
+/*
+ * open/close functions
+ */
+
+static int hp100_open(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4204, TRACE);
+ printk("hp100: %s: open\n", dev->name);
+#endif
+
+ /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
+ if (request_irq(dev->irq, hp100_interrupt,
+ lp->bus == HP100_BUS_PCI || lp->bus ==
+ HP100_BUS_EISA ? SA_SHIRQ : SA_INTERRUPT,
+ "hp100", dev)) {
+ printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ lp->lan_type = hp100_sense_lan(dev);
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset(&lp->hash_bytes, 0x00, 8);
+
+ hp100_stop_interface(dev);
+
+ hp100_hwinit(dev);
+
+ hp100_start_interface(dev); /* sets mac modes, enables interrupts */
+
+ return 0;
+}
+
+/* The close function is called when the interface is to be brought down */
+static int hp100_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4205, TRACE);
+ printk("hp100: %s: close\n", dev->name);
+#endif
+
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all IRQs */
+
+ hp100_stop_interface(dev);
+
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+
+ netif_stop_queue(dev);
+
+ free_irq(dev->irq, dev);
+
+#ifdef HP100_DEBUG
+ printk("hp100: %s: close LSW = 0x%x\n", dev->name,
+ hp100_inw(OPTION_LSW));
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Configure the PDL Rx rings and LAN
+ */
+static void hp100_init_pdls(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ringptr;
+ u_int *pageptr; /* Warning : increment by 4 - Jean II */
+ int i;
+
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4206, TRACE);
+ printk("hp100: %s: init pdls\n", dev->name);
+#endif
+
+ if (0 == lp->page_vaddr_algn)
+ printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n", dev->name);
+ else {
+ /* pageptr shall point into the DMA accessible memory region */
+ /* we use this pointer to status the upper limit of allocated */
+ /* memory in the allocated page. */
+ /* note: align the pointers to the pci cache line size */
+ memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE); /* Zero Rx/Tx ring page */
+ pageptr = lp->page_vaddr_algn;
+
+ lp->rxrcommit = 0;
+ ringptr = lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+
+ /* Initialise Rx Ring */
+ for (i = MAX_RX_PDL - 1; i >= 0; i--) {
+ lp->rxring[i].next = ringptr;
+ ringptr = &(lp->rxring[i]);
+ pageptr += hp100_init_rxpdl(dev, ringptr, pageptr);
+ }
+
+ /* Initialise Tx Ring */
+ lp->txrcommit = 0;
+ ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ for (i = MAX_TX_PDL - 1; i >= 0; i--) {
+ lp->txring[i].next = ringptr;
+ ringptr = &(lp->txring[i]);
+ pageptr += hp100_init_txpdl(dev, ringptr, pageptr);
+ }
+ }
+}
+
+
+/* These functions "format" the entries in the pdl structure */
+/* They return how much memory the fragments need. */
+static int hp100_init_rxpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u32 * pdlptr)
+{
+ /* pdlptr is starting address for this pdl */
+
+ if (0 != (((unsigned long) pdlptr) & 0xf))
+ printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%lx.\n",
+ dev->name, (unsigned long) pdlptr);
+
+ ringptr->pdl = pdlptr + 1;
+ ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
+ ringptr->skb = (void *) NULL;
+
+ /*
+ * Write address and length of first PDL Fragment (which is used for
+ * storing the RX-Header
+ * We use the 4 bytes _before_ the PDH in the pdl memory area to
+ * store this information. (PDH is at offset 0x04)
+ */
+ /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */
+
+ *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr); /* Address Frag 1 */
+ *(pdlptr + 3) = 4; /* Length Frag 1 */
+
+ return ((((MAX_RX_FRAG * 2 + 2) + 3) / 4) * 4);
+}
+
+
+static int hp100_init_txpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u32 * pdlptr)
+{
+ if (0 != (((unsigned long) pdlptr) & 0xf))
+ printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%lx.\n", dev->name, (unsigned long) pdlptr);
+
+ ringptr->pdl = pdlptr; /* +1; */
+ ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
+ ringptr->skb = (void *) NULL;
+
+ return ((((MAX_TX_FRAG * 2 + 2) + 3) / 4) * 4);
+}
+
+/*
+ * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes
+ * for possible odd word alignment rounding up to next dword and set PDL
+ * address for fragment#2
+ * Returns: 0 if unable to allocate skb_buff
+ * 1 if successful
+ */
+static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
+ struct net_device *dev)
+{
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+#ifdef HP100_DEBUG_BM
+ u_int *p;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4207, TRACE);
+ printk("hp100: %s: build rx pdl\n", dev->name);
+#endif
+
+ /* Allocate skb buffer of maximum size */
+ /* Note: This depends on the alloc_skb functions allocating more
+ * space than requested, i.e. aligning to 16bytes */
+
+ ringptr->skb = dev_alloc_skb(((MAX_ETHER_SIZE + 2 + 3) / 4) * 4);
+
+ if (NULL != ringptr->skb) {
+ /*
+ * Reserve 2 bytes at the head of the buffer to land the IP header
+ * on a long word boundary (According to the Network Driver section
+ * in the Linux KHG, this should help to increase performance.)
+ */
+ skb_reserve(ringptr->skb, 2);
+
+ ringptr->skb->dev = dev;
+ ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE);
+
+ /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
+ /* Note: 1st Fragment is used for the 4 byte packet status
+ * (receive header). Its PDL entries are set up by init_rxpdl. So
+ * here we only have to set up the PDL fragment entries for the data
+ * part. Those 4 bytes will be stored in the DMA memory region
+ * directly before the PDL.
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
+ dev->name, (u_int) ringptr->pdl,
+ ((MAX_ETHER_SIZE + 2 + 3) / 4) * 4,
+ (unsigned int) ringptr->skb->data);
+#endif
+
+ /* Conversion to new PCI API : map skbuf data to PCI bus.
+ * Doc says it's OK for EISA as well - Jean II */
+ ringptr->pdl[0] = 0x00020000; /* Write PDH */
+ ringptr->pdl[3] = pdl_map_data(netdev_priv(dev),
+ ringptr->skb->data);
+ ringptr->pdl[4] = MAX_ETHER_SIZE; /* Length of Data */
+
+#ifdef HP100_DEBUG_BM
+ for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
+ printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
+#endif
+ return (1);
+ }
+ /* else: */
+ /* alloc_skb failed (no memory) -> still can receive the header
+ * fragment into PDL memory. make PDL safe by clearing msgptr and
+ * making the PDL only 1 fragment (i.e. the 4 byte packet status)
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n", dev->name, (u_int) ringptr->pdl);
+#endif
+
+ ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */
+
+ return (0);
+}
+
+/*
+ * hp100_rxfill - attempt to fill the Rx Ring will empty skb's
+ *
+ * Makes assumption that skb's are always contiguous memory areas and
+ * therefore PDLs contain only 2 physical fragments.
+ * - While the number of Rx PDLs with buffers is less than maximum
+ * a. Get a maximum packet size skb
+ * b. Put the physical address of the buffer into the PDL.
+ * c. Output physical address of PDL to adapter.
+ */
+static void hp100_rxfill(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4208, TRACE);
+ printk("hp100: %s: rxfill\n", dev->name);
+#endif
+
+ hp100_page(PERFORMANCE);
+
+ while (lp->rxrcommit < MAX_RX_PDL) {
+ /*
+ ** Attempt to get a buffer and build a Rx PDL.
+ */
+ ringptr = lp->rxrtail;
+ if (0 == hp100_build_rx_pdl(ringptr, dev)) {
+ return; /* None available, return */
+ }
+
+ /* Hand this PDL over to the card */
+ /* Note: This needs performance page selected! */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n",
+ dev->name, lp->rxrcommit, (u_int) ringptr->pdl,
+ (u_int) ringptr->pdl_paddr, (u_int) ringptr->pdl[3]);
+#endif
+
+ hp100_outl((u32) ringptr->pdl_paddr, RX_PDA);
+
+ lp->rxrcommit += 1;
+ lp->rxrtail = ringptr->next;
+ }
+}
+
+/*
+ * BM_shutdown - shutdown bus mastering and leave chip in reset state
+ */
+
+static void hp100_BM_shutdown(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ unsigned long time;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4209, TRACE);
+ printk("hp100: %s: bm shutdown\n", dev->name);
+#endif
+
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* Ack all ints */
+
+ /* Ensure Interrupts are off */
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+
+ /* Disable all MAC activity */
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */
+
+ /* If cascade MMU is not already in reset */
+ if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
+ /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so
+ * MMU pointers will not be reset out from underneath
+ */
+ hp100_page(MAC_CTRL);
+ for (time = 0; time < 5000; time++) {
+ if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE))
+ break;
+ }
+
+ /* Shutdown algorithm depends on the generation of Cascade */
+ if (lp->chip == HP100_CHIPID_LASSEN) { /* ETR shutdown/reset */
+ /* Disable Busmaster mode and wait for bit to go to zero. */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM);
+ /* 100 ms timeout */
+ for (time = 0; time < 32000; time++) {
+ if (0 == (hp100_inb(BM) & HP100_BM_MASTER))
+ break;
+ }
+ } else { /* Shasta or Rainier Shutdown/Reset */
+ /* To ensure all bus master inloading activity has ceased,
+ * wait for no Rx PDAs or no Rx packets on card.
+ */
+ hp100_page(PERFORMANCE);
+ /* 100 ms timeout */
+ for (time = 0; time < 10000; time++) {
+ /* RX_PDL: PDLs not executed. */
+ /* RX_PKT_CNT: RX'd packets on card. */
+ if ((hp100_inb(RX_PDL) == 0) && (hp100_inb(RX_PKT_CNT) == 0))
+ break;
+ }
+
+ if (time >= 10000)
+ printk("hp100: %s: BM shutdown error.\n", dev->name);
+
+ /* To ensure all bus master outloading activity has ceased,
+ * wait until the Tx PDA count goes to zero or no more Tx space
+ * available in the Tx region of the card.
+ */
+ /* 100 ms timeout */
+ for (time = 0; time < 10000; time++) {
+ if ((0 == hp100_inb(TX_PKT_CNT)) &&
+ (0 != (hp100_inb(TX_MEM_FREE) & HP100_AUTO_COMPARE)))
+ break;
+ }
+
+ /* Disable Busmaster mode */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM);
+ } /* end of shutdown procedure for non-etr parts */
+
+ hp100_cascade_reset(dev, 1);
+ }
+ hp100_page(PERFORMANCE);
+ /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */
+ /* Busmaster mode should be shut down now. */
+}
+
+static int hp100_check_lan(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+
+ if (lp->lan_type < 0) { /* no LAN type detected yet? */
+ hp100_stop_interface(dev);
+ if ((lp->lan_type = hp100_sense_lan(dev)) < 0) {
+ printk("hp100: %s: no connection found - check wire\n", dev->name);
+ hp100_start_interface(dev); /* 10Mb/s RX packets maybe handled */
+ return -EIO;
+ }
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0); /* relogin */
+ hp100_start_interface(dev);
+ }
+ return 0;
+}
+
+/*
+ * transmit functions
+ */
+
+/* tx function for busmaster mode */
+static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4210, TRACE);
+ printk("hp100: %s: start_xmit_bm\n", dev->name);
+#endif
+
+ if (skb == NULL) {
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (skb->len < ETH_ZLEN && lp->chip == HP100_CHIPID_SHASTA) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ }
+
+ /* Get Tx ring tail pointer */
+ if (lp->txrtail->next == lp->txrhead) {
+ /* No memory. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
+#endif
+ /* not waited long enough since last tx? */
+ if (jiffies - dev->trans_start < HZ)
+ return -EAGAIN;
+
+ if (hp100_check_lan(dev))
+ return -EIO;
+
+ if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
+ hp100_stop_interface(dev);
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off(); /* Useful ? Jean II */
+ i = hp100_sense_lan(dev);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (i == HP100_LAN_ERR)
+ printk("hp100: %s: link down detected\n", dev->name);
+ else if (lp->lan_type != i) { /* cable change! */
+ /* it's very hard - all network settings must be changed!!! */
+ printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
+ lp->lan_type = i;
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ printk("hp100: %s: interface reset\n", dev->name);
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ }
+ }
+
+ dev->trans_start = jiffies;
+ return -EAGAIN;
+ }
+
+ /*
+ * we have to turn int's off before modifying this, otherwise
+ * a tx_pdl_cleanup could occur at the same time
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+ ringptr = lp->txrtail;
+ lp->txrtail = ringptr->next;
+
+ /* Check whether packet has minimal packet size */
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ ringptr->skb = skb;
+ ringptr->pdl[0] = ((1 << 16) | i); /* PDH: 1 Fragment & length */
+ if (lp->chip == HP100_CHIPID_SHASTA) {
+ /* TODO:Could someone who has the EISA card please check if this works? */
+ ringptr->pdl[2] = i;
+ } else { /* Lassen */
+ /* In the PDL, don't use the padded size but the real packet size: */
+ ringptr->pdl[2] = skb->len; /* 1st Frag: Length of frag */
+ }
+ /* Conversion to new PCI API : map skbuf data to PCI bus.
+ * Doc says it's OK for EISA as well - Jean II */
+ ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE)); /* 1st Frag: Adr. of data */
+
+ /* Hand this PDL to the card. */
+ hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
+
+ lp->txrcommit++;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Update statistics */
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+
+/* clean_txring checks if packets have been sent by the card by reading
+ * the TX_PDL register from the performance page and comparing it to the
+ * number of commited packets. It then frees the skb's of the packets that
+ * obviously have been sent to the network.
+ *
+ * Needs the PERFORMANCE page selected.
+ */
+static void hp100_clean_txring(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int donecount;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4211, TRACE);
+ printk("hp100: %s: clean txring\n", dev->name);
+#endif
+
+ /* How many PDLs have been transmitted? */
+ donecount = (lp->txrcommit) - hp100_inb(TX_PDL);
+
+#ifdef HP100_DEBUG
+ if (donecount > MAX_TX_PDL)
+ printk("hp100: %s: Warning: More PDLs transmitted than commited to card???\n", dev->name);
+#endif
+
+ for (; 0 != donecount; donecount--) {
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n",
+ dev->name, (u_int) lp->txrhead->skb->data,
+ lp->txrcommit, hp100_inb(TX_PDL), donecount);
+#endif
+ /* Conversion to new PCI API : NOP */
+ pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->txrhead->skb);
+ lp->txrhead->skb = (void *) NULL;
+ lp->txrhead = lp->txrhead->next;
+ lp->txrcommit--;
+ }
+}
+
+/* tx function for slave modes */
+static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ u_short val;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4212, TRACE);
+ printk("hp100: %s: start_xmit\n", dev->name);
+#endif
+
+ if (skb == NULL) {
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (hp100_check_lan(dev))
+ return -EIO;
+
+ /* If there is not enough free memory on the card... */
+ i = hp100_inl(TX_MEM_FREE) & 0x7fffffff;
+ if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
+#endif
+ /* not waited long enough since last failed tx try? */
+ if (jiffies - dev->trans_start < HZ) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: trans_start timing problem\n",
+ dev->name);
+#endif
+ return -EAGAIN;
+ }
+ if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
+ hp100_stop_interface(dev);
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off(); /* Useful ? Jean II */
+ i = hp100_sense_lan(dev);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (i == HP100_LAN_ERR)
+ printk("hp100: %s: link down detected\n", dev->name);
+ else if (lp->lan_type != i) { /* cable change! */
+ /* it's very hard - all network setting must be changed!!! */
+ printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
+ lp->lan_type = i;
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ printk("hp100: %s: interface reset\n", dev->name);
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ mdelay(1);
+ }
+ }
+ dev->trans_start = jiffies;
+ return -EAGAIN;
+ }
+
+ for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) {
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: busy\n", dev->name);
+#endif
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off();
+ val = hp100_inw(IRQ_STATUS);
+ /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set
+ * when the current packet being transmitted on the wire is completed. */
+ hp100_outw(HP100_TX_COMPLETE, IRQ_STATUS);
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n",
+ dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len);
+#endif
+
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ hp100_outw(i, DATA32); /* tell card the total packet length */
+ hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */
+
+ if (lp->mode == 2) { /* memory mapped */
+ if (lp->mem_ptr_virt) { /* high pci memory was remapped */
+ /* Note: The J2585B needs alignment to 32bits here! */
+ memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3);
+ if (!ok_flag)
+ memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
+ } else {
+ /* Note: The J2585B needs alignment to 32bits here! */
+ isa_memcpy_toio(lp->mem_ptr_phys, skb->data, (skb->len + 3) & ~3);
+ if (!ok_flag)
+ isa_memset_io(lp->mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb->len);
+ }
+ } else { /* programmed i/o */
+ outsl(ioaddr + HP100_REG_DATA32, skb->data,
+ (skb->len + 3) >> 2);
+ if (!ok_flag)
+ for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4)
+ hp100_outl(0, DATA32);
+ }
+
+ hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */
+
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb_any(skb);
+
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: end\n", dev->name);
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Receive Function (Non-Busmaster mode)
+ * Called when an "Receive Packet" interrupt occurs, i.e. the receive
+ * packet counter is non-zero.
+ * For non-busmaster, this function does the whole work of transfering
+ * the packet to the host memory and then up to higher layers via skb
+ * and netif_rx.
+ */
+
+static void hp100_rx(struct net_device *dev)
+{
+ int packets, pkt_len;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ u_int header;
+ struct sk_buff *skb;
+
+#ifdef DEBUG_B
+ hp100_outw(0x4213, TRACE);
+ printk("hp100: %s: rx\n", dev->name);
+#endif
+
+ /* First get indication of received lan packet */
+ /* RX_PKT_CND indicates the number of packets which have been fully */
+ /* received onto the card but have not been fully transferred of the card */
+ packets = hp100_inb(RX_PKT_CNT);
+#ifdef HP100_DEBUG_RX
+ if (packets > 1)
+ printk("hp100: %s: rx: waiting packets = %d\n", dev->name, packets);
+#endif
+
+ while (packets-- > 0) {
+ /* If ADV_NXT_PKT is still set, we have to wait until the card has */
+ /* really advanced to the next packet. */
+ for (pkt_len = 0; pkt_len < 6000 && (hp100_inb(OPTION_MSW) & HP100_ADV_NXT_PKT); pkt_len++) {
+#ifdef HP100_DEBUG_RX
+ printk ("hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets);
+#endif
+ }
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+ if (lp->mode == 2) { /* memory mapped mode */
+ if (lp->mem_ptr_virt) /* if memory was remapped */
+ header = readl(lp->mem_ptr_virt);
+ else
+ header = isa_readl(lp->mem_ptr_phys);
+ } else /* programmed i/o */
+ header = hp100_inl(DATA32);
+
+ pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3;
+
+#ifdef HP100_DEBUG_RX
+ printk("hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name, header & HP100_PKT_LEN_MASK,
+ (header >> 16) & 0xfff8, (header >> 16) & 7);
+#endif
+
+ /* Now we allocate the skb and transfer the data into it. */
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) { /* Not enough memory->drop packet */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
+ dev->name, pkt_len);
+#endif
+ lp->stats.rx_dropped++;
+ } else { /* skb successfully allocated */
+
+ u_char *ptr;
+
+ skb_reserve(skb,2);
+ skb->dev = dev;
+
+ /* ptr to start of the sk_buff data area */
+ skb_put(skb, pkt_len);
+ ptr = skb->data;
+
+ /* Now transfer the data from the card into that area */
+ if (lp->mode == 2) {
+ if (lp->mem_ptr_virt)
+ memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len);
+ /* Note alignment to 32bit transfers */
+ else
+ isa_memcpy_fromio(ptr, lp->mem_ptr_phys, pkt_len);
+ } else /* io mapped */
+ insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef HP100_DEBUG_RX
+ printk("hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ dev->name, ptr[0], ptr[1], ptr[2], ptr[3],
+ ptr[4], ptr[5], ptr[6], ptr[7], ptr[8],
+ ptr[9], ptr[10], ptr[11]);
+#endif
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+
+ /* Indicate the card that we have got the packet */
+ hp100_outb(HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW);
+
+ switch (header & 0x00070000) {
+ case (HP100_MULTI_ADDR_HASH << 16):
+ case (HP100_MULTI_ADDR_NO_HASH << 16):
+ lp->stats.multicast++;
+ break;
+ }
+ } /* end of while(there are packets) loop */
+#ifdef HP100_DEBUG_RX
+ printk("hp100_rx: %s: end\n", dev->name);
+#endif
+}
+
+/*
+ * Receive Function for Busmaster Mode
+ */
+static void hp100_rx_bm(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ptr;
+ u_int header;
+ int pkt_len;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4214, TRACE);
+ printk("hp100: %s: rx_bm\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if (0 == lp->rxrcommit) {
+ printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name);
+ return;
+ } else
+ /* RX_PKT_CNT states how many PDLs are currently formatted and available to
+ * the cards BM engine */
+ if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) {
+ printk("hp100: %s: More packets received than commited? RX_PKT_CNT=0x%x, commit=0x%x\n",
+ dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff,
+ lp->rxrcommit);
+ return;
+ }
+#endif
+
+ while ((lp->rxrcommit > hp100_inb(RX_PDL))) {
+ /*
+ * The packet was received into the pdl pointed to by lp->rxrhead (
+ * the oldest pdl in the ring
+ */
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+
+ ptr = lp->rxrhead;
+
+ header = *(ptr->pdl - 1);
+ pkt_len = (header & HP100_PKT_LEN_MASK);
+
+ /* Conversion to new PCI API : NOP */
+ pci_unmap_single(lp->pci_dev, (dma_addr_t) ptr->pdl[3], MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
+
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name, (u_int) (ptr->pdl - 1), (u_int) header,
+ pkt_len, (header >> 16) & 0xfff8, (header >> 16) & 7);
+ printk("hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n",
+ dev->name, hp100_inb(RX_PDL), hp100_inb(TX_PDL),
+ hp100_inb(RX_PKT_CNT), (u_int) * (ptr->pdl),
+ (u_int) * (ptr->pdl + 3), (u_int) * (ptr->pdl + 4));
+#endif
+
+ if ((pkt_len >= MIN_ETHER_SIZE) &&
+ (pkt_len <= MAX_ETHER_SIZE)) {
+ if (ptr->skb == NULL) {
+ printk("hp100: %s: rx_bm: skb null\n", dev->name);
+ /* can happen if we only allocated room for the pdh due to memory shortage. */
+ lp->stats.rx_dropped++;
+ } else {
+ skb_trim(ptr->skb, pkt_len); /* Shorten it */
+ ptr->skb->protocol =
+ eth_type_trans(ptr->skb, dev);
+
+ netif_rx(ptr->skb); /* Up and away... */
+
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+
+ switch (header & 0x00070000) {
+ case (HP100_MULTI_ADDR_HASH << 16):
+ case (HP100_MULTI_ADDR_NO_HASH << 16):
+ lp->stats.multicast++;
+ break;
+ }
+ } else {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n", dev->name, pkt_len);
+#endif
+ if (ptr->skb != NULL)
+ dev_kfree_skb_any(ptr->skb);
+ lp->stats.rx_errors++;
+ }
+
+ lp->rxrhead = lp->rxrhead->next;
+
+ /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */
+ if (0 == hp100_build_rx_pdl(lp->rxrtail, dev)) {
+ /* No space for skb, header can still be received. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name);
+#endif
+ return;
+ } else { /* successfully allocated new PDL - put it in ringlist at tail. */
+ hp100_outl((u32) lp->rxrtail->pdl_paddr, RX_PDA);
+ lp->rxrtail = lp->rxrtail->next;
+ }
+
+ }
+}
+
+/*
+ * statistics
+ */
+static struct net_device_stats *hp100_get_stats(struct net_device *dev)
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4215, TRACE);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off(); /* Useful ? Jean II */
+ hp100_update_stats(dev);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return &(lp->stats);
+}
+
+static void hp100_update_stats(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ u_short val;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4216, TRACE);
+ printk("hp100: %s: update-stats\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ hp100_page(MAC_CTRL);
+ val = hp100_inw(DROPPED) & 0x0fff;
+ lp->stats.rx_errors += val;
+ lp->stats.rx_over_errors += val;
+ val = hp100_inb(CRC);
+ lp->stats.rx_errors += val;
+ lp->stats.rx_crc_errors += val;
+ val = hp100_inb(ABORT);
+ lp->stats.tx_errors += val;
+ lp->stats.tx_aborted_errors += val;
+ hp100_page(PERFORMANCE);
+}
+
+static void hp100_misc_interrupt(struct net_device *dev)
+{
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+ hp100_outw(0x4216, TRACE);
+ printk("hp100: %s: misc_interrupt\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ lp->stats.rx_errors++;
+ lp->stats.tx_errors++;
+}
+
+static void hp100_clear_stats(struct hp100_private *lp, int ioaddr)
+{
+ unsigned long flags;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4217, TRACE);
+ printk("hp100: %s: clear_stats\n", dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_page(MAC_CTRL); /* get all statistics bytes */
+ hp100_inw(DROPPED);
+ hp100_inb(CRC);
+ hp100_inb(ABORT);
+ hp100_page(PERFORMANCE);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+/*
+ * multicast setup
+ */
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void hp100_set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4218, TRACE);
+ printk("hp100: %s: set_mc_list\n", dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off();
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */
+
+ if (dev->flags & IFF_PROMISC) {
+ lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
+ lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
+ memset(&lp->hash_bytes, 0xff, 8);
+ } else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
+ lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
+#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
+ if (dev->flags & IFF_ALLMULTI) {
+ /* set hash filter to receive all multicast packets */
+ memset(&lp->hash_bytes, 0xff, 8);
+ } else {
+ int i, j, idx;
+ u_char *addrs;
+ struct dev_mc_list *dmi;
+
+ memset(&lp->hash_bytes, 0x00, 8);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, dev->mc_count);
+#endif
+ for (i = 0, dmi = dev->mc_list; i < dev->mc_count; i++, dmi = dmi->next) {
+ addrs = dmi->dmi_addr;
+ if ((*addrs & 0x01) == 0x01) { /* multicast address? */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast = %02x:%02x:%02x:%02x:%02x:%02x, ",
+ dev->name, addrs[0], addrs[1], addrs[2],
+ addrs[3], addrs[4], addrs[5]);
+#endif
+ for (j = idx = 0; j < 6; j++) {
+ idx ^= *addrs++ & 0x3f;
+ printk(":%02x:", idx);
+ }
+#ifdef HP100_DEBUG
+ printk("idx = %i\n", idx);
+#endif
+ lp->hash_bytes[idx >> 3] |= (1 << (idx & 7));
+ }
+ }
+ }
+#else
+ memset(&lp->hash_bytes, 0xff, 8);
+#endif
+ } else {
+ lp->mac2_mode = HP100_MAC2MODE3; /* normal mode = get packets for me */
+ lp->mac1_mode = HP100_MAC1MODE3; /* and broadcasts */
+ memset(&lp->hash_bytes, 0x00, 8);
+ }
+
+ if (((hp100_inb(MAC_CFG_1) & 0x0f) != lp->mac1_mode) ||
+ (hp100_inb(MAC_CFG_2) != lp->mac2_mode)) {
+ int i;
+
+ hp100_outb(lp->mac2_mode, MAC_CFG_2);
+ hp100_andb(HP100_MAC1MODEMASK, MAC_CFG_1); /* clear mac1 mode bits */
+ hp100_orb(lp->mac1_mode, MAC_CFG_1); /* and set the new mode */
+
+ hp100_page(MAC_ADDRESS);
+ for (i = 0; i < 8; i++)
+ hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, lp->mac1_mode, lp->mac2_mode,
+ lp->hash_bytes[0], lp->hash_bytes[1],
+ lp->hash_bytes[2], lp->hash_bytes[3],
+ lp->hash_bytes[4], lp->hash_bytes[5],
+ lp->hash_bytes[6], lp->hash_bytes[7]);
+#endif
+
+ if (lp->lan_type == HP100_LAN_100) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
+ }
+ } else {
+ int i;
+ u_char old_hash_bytes[8];
+
+ hp100_page(MAC_ADDRESS);
+ for (i = 0; i < 8; i++)
+ old_hash_bytes[i] = hp100_inb(HASH_BYTE0 + i);
+ if (memcmp(old_hash_bytes, &lp->hash_bytes, 8)) {
+ for (i = 0; i < 8; i++)
+ hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, lp->hash_bytes[0],
+ lp->hash_bytes[1], lp->hash_bytes[2],
+ lp->hash_bytes[3], lp->hash_bytes[4],
+ lp->hash_bytes[5], lp->hash_bytes[6],
+ lp->hash_bytes[7]);
+#endif
+
+ if (lp->lan_type == HP100_LAN_100) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
+ }
+ }
+ }
+
+ hp100_page(MAC_CTRL);
+ hp100_orb(HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
+ HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1); /* enable tx */
+
+ hp100_page(PERFORMANCE);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/*
+ * hardware interrupt handling
+ */
+
+static irqreturn_t hp100_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct hp100_private *lp = netdev_priv(dev);
+
+ int ioaddr;
+ u_int val;
+
+ if (dev == NULL)
+ return IRQ_NONE;
+ ioaddr = dev->base_addr;
+
+ spin_lock(&lp->lock);
+
+ hp100_ints_off();
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4219, TRACE);
+#endif
+
+ /* hp100_page( PERFORMANCE ); */
+ val = hp100_inw(IRQ_STATUS);
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n",
+ dev->name, lp->mode, (u_int) val, hp100_inb(RX_PKT_CNT),
+ hp100_inb(RX_PDL), hp100_inb(TX_PKT_CNT), hp100_inb(TX_PDL));
+#endif
+
+ if (val == 0) { /* might be a shared interrupt */
+ spin_unlock(&lp->lock);
+ hp100_ints_on();
+ return IRQ_NONE;
+ }
+ /* We're only interested in those interrupts we really enabled. */
+ /* val &= hp100_inw( IRQ_MASK ); */
+
+ /*
+ * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL
+ * is considered executed whenever the RX_PDL data structure is no longer
+ * needed.
+ */
+ if (val & HP100_RX_PDL_FILL_COMPL) {
+ if (lp->mode == 1)
+ hp100_rx_bm(dev);
+ else {
+ printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name);
+ }
+ }
+
+ /*
+ * The RX_PACKET interrupt is set, when the receive packet counter is
+ * non zero. We use this interrupt for receiving in slave mode. In
+ * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill
+ * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then
+ * we somehow have missed a rx_pdl_fill_compl interrupt.
+ */
+
+ if (val & HP100_RX_PACKET) { /* Receive Packet Counter is non zero */
+ if (lp->mode != 1) /* non busmaster */
+ hp100_rx(dev);
+ else if (!(val & HP100_RX_PDL_FILL_COMPL)) {
+ /* Shouldnt happen - maybe we missed a RX_PDL_FILL Interrupt? */
+ hp100_rx_bm(dev);
+ }
+ }
+
+ /*
+ * Ack. that we have noticed the interrupt and thereby allow next one.
+ * Note that this is now done after the slave rx function, since first
+ * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt
+ * on the J2573.
+ */
+ hp100_outw(val, IRQ_STATUS);
+
+ /*
+ * RX_ERROR is set when a packet is dropped due to no memory resources on
+ * the card or when a RCV_ERR occurs.
+ * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists
+ * only in the 802.3 MAC and happens when 16 collisions occur during a TX
+ */
+ if (val & (HP100_TX_ERROR | HP100_RX_ERROR)) {
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: TX/RX Error IRQ\n", dev->name);
+#endif
+ hp100_update_stats(dev);
+ if (lp->mode == 1) {
+ hp100_rxfill(dev);
+ hp100_clean_txring(dev);
+ }
+ }
+
+ /*
+ * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero.
+ */
+ if ((lp->mode == 1) && (val & (HP100_RX_PDA_ZERO)))
+ hp100_rxfill(dev);
+
+ /*
+ * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire
+ * is completed
+ */
+ if ((lp->mode == 1) && (val & (HP100_TX_COMPLETE)))
+ hp100_clean_txring(dev);
+
+ /*
+ * MISC_ERROR is set when either the LAN link goes down or a detected
+ * bus error occurs.
+ */
+ if (val & HP100_MISC_ERROR) { /* New for J2585B */
+#ifdef HP100_DEBUG_IRQ
+ printk
+ ("hp100: %s: Misc. Error Interrupt - Check cabling.\n",
+ dev->name);
+#endif
+ if (lp->mode == 1) {
+ hp100_clean_txring(dev);
+ hp100_rxfill(dev);
+ }
+ hp100_misc_interrupt(dev);
+ }
+
+ spin_unlock(&lp->lock);
+ hp100_ints_on();
+ return IRQ_HANDLED;
+}
+
+/*
+ * some misc functions
+ */
+
+static void hp100_start_interface(struct net_device *dev)
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4220, TRACE);
+ printk("hp100: %s: hp100_start_interface\n", dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Ensure the adapter does not want to request an interrupt when */
+ /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack all IRQs */
+ hp100_outw(HP100_FAKE_INT | HP100_INT_EN | HP100_RESET_LB,
+ OPTION_LSW);
+ /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */
+ hp100_outw(HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW);
+
+ if (lp->mode == 1) {
+ /* Make sure BM bit is set... */
+ hp100_page(HW_MAP);
+ hp100_orb(HP100_BM_MASTER, BM);
+ hp100_rxfill(dev);
+ } else if (lp->mode == 2) {
+ /* Enable memory mapping. Note: Don't do this when busmaster. */
+ hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
+ }
+
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+
+ /* enable a few interrupts: */
+ if (lp->mode == 1) { /* busmaster mode */
+ hp100_outw(HP100_RX_PDL_FILL_COMPL |
+ HP100_RX_PDA_ZERO | HP100_RX_ERROR |
+ /* HP100_RX_PACKET | */
+ /* HP100_RX_EARLY_INT | */ HP100_SET_HB |
+ /* HP100_TX_PDA_ZERO | */
+ HP100_TX_COMPLETE |
+ /* HP100_MISC_ERROR | */
+ HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
+ } else {
+ hp100_outw(HP100_RX_PACKET |
+ HP100_RX_ERROR | HP100_SET_HB |
+ HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
+ }
+
+ /* Note : before hp100_set_multicast_list(), because it will play with
+ * spinlock itself... Jean II */
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Enable MAC Tx and RX, set MAC modes, ... */
+ hp100_set_multicast_list(dev);
+}
+
+static void hp100_stop_interface(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ u_int val;
+
+#ifdef HP100_DEBUG_B
+ printk("hp100: %s: hp100_stop_interface\n", dev->name);
+ hp100_outw(0x4221, TRACE);
+#endif
+
+ if (lp->mode == 1)
+ hp100_BM_shutdown(dev);
+ else {
+ /* Note: MMAP_DIS will be reenabled by start_interface */
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB |
+ HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB,
+ OPTION_LSW);
+ val = hp100_inw(OPTION_LSW);
+
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
+
+ if (!(val & HP100_HW_RST))
+ return; /* If reset, imm. return ... */
+ /* ... else: busy wait until idle */
+ for (val = 0; val < 6000; val++)
+ if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) {
+ hp100_page(PERFORMANCE);
+ return;
+ }
+ printk("hp100: %s: hp100_stop_interface - timeout\n", dev->name);
+ hp100_page(PERFORMANCE);
+ }
+}
+
+static void hp100_load_eeprom(struct net_device *dev, u_short probe_ioaddr)
+{
+ int i;
+ int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4222, TRACE);
+#endif
+
+ hp100_page(EEPROM_CTRL);
+ hp100_andw(~HP100_EEPROM_LOAD, EEPROM_CTRL);
+ hp100_orw(HP100_EEPROM_LOAD, EEPROM_CTRL);
+ for (i = 0; i < 10000; i++)
+ if (!(hp100_inb(OPTION_MSW) & HP100_EE_LOAD))
+ return;
+ printk("hp100: %s: hp100_load_eeprom - timeout\n", dev->name);
+}
+
+/* Sense connection status.
+ * return values: LAN_10 - Connected to 10Mbit/s network
+ * LAN_100 - Connected to 100Mbit/s network
+ * LAN_ERR - not connected or 100Mbit/s Hub down
+ */
+static int hp100_sense_lan(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ u_short val_VG, val_10;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4223, TRACE);
+#endif
+
+ hp100_page(MAC_CTRL);
+ val_10 = hp100_inb(10_LAN_CFG_1);
+ val_VG = hp100_inb(VG_LAN_CFG_1);
+ hp100_page(PERFORMANCE);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n",
+ dev->name, val_VG, val_10);
+#endif
+
+ if (val_10 & HP100_LINK_BEAT_ST) /* 10Mb connection is active */
+ return HP100_LAN_10;
+
+ if (val_10 & HP100_AUI_ST) { /* have we BNC or AUI onboard? */
+ /*
+ * This can be overriden by dos utility, so if this has no effect,
+ * perhaps you need to download that utility from HP and set card
+ * back to "auto detect".
+ */
+ val_10 |= HP100_AUI_SEL | HP100_LOW_TH;
+ hp100_page(MAC_CTRL);
+ hp100_outb(val_10, 10_LAN_CFG_1);
+ hp100_page(PERFORMANCE);
+ return HP100_LAN_COAX;
+ }
+
+ /* Those cards don't have a 100 Mbit connector */
+ if ( !strcmp(lp->id, "HWP1920") ||
+ (lp->pci_dev &&
+ lp->pci_dev->vendor == PCI_VENDOR_ID &&
+ (lp->pci_dev->device == PCI_DEVICE_ID_HP_J2970A ||
+ lp->pci_dev->device == PCI_DEVICE_ID_HP_J2973A)))
+ return HP100_LAN_ERR;
+
+ if (val_VG & HP100_LINK_CABLE_ST) /* Can hear the HUBs tone. */
+ return HP100_LAN_100;
+ return HP100_LAN_ERR;
+}
+
+static int hp100_down_vg_link(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long time;
+ long savelan, newlan;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4224, TRACE);
+ printk("hp100: %s: down_vg_link\n", dev->name);
+#endif
+
+ hp100_page(MAC_CTRL);
+ time = jiffies + (HZ / 4);
+ do {
+ if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
+ break;
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_after(time, jiffies));
+
+ if (time_after_eq(jiffies, time)) /* no signal->no logout */
+ return 0;
+
+ /* Drop the VG Link by clearing the link up cmd and load addr. */
+
+ hp100_andb(~(HP100_LOAD_ADDR | HP100_LINK_CMD), VG_LAN_CFG_1);
+ hp100_orb(HP100_VG_SEL, VG_LAN_CFG_1);
+
+ /* Conditionally stall for >250ms on Link-Up Status (to go down) */
+ time = jiffies + (HZ / 2);
+ do {
+ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ break;
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_after(time, jiffies));
+
+#ifdef HP100_DEBUG
+ if (time_after_eq(jiffies, time))
+ printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name);
+#endif
+
+ /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */
+ /* logout under traffic (even though all the status bits are cleared), */
+ /* do this workaround to get the Rev 1 MAC in its idle state */
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Reset VG MAC to insure it leaves the logoff state even if */
+ /* the Hub is still emitting tones */
+ hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1);
+ udelay(1500); /* wait for >1ms */
+ hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); /* Release Reset */
+ udelay(1500);
+ }
+
+ /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */
+ /* to get the VG mac to full reset. This is not req.d with later chips */
+ /* Note: It will take the between 1 and 2 seconds for the VG mac to be */
+ /* selected again! This will be left to the connect hub function to */
+ /* perform if desired. */
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Have to write to 10 and 100VG control registers simultaneously */
+ savelan = newlan = hp100_inl(10_LAN_CFG_1); /* read 10+100 LAN_CFG regs */
+ newlan &= ~(HP100_VG_SEL << 16);
+ newlan |= (HP100_DOT3_MAC) << 8;
+ hp100_andb(~HP100_AUTO_MODE, MAC_CFG_3); /* Autosel off */
+ hp100_outl(newlan, 10_LAN_CFG_1);
+
+ /* Conditionally stall for 5sec on VG selected. */
+ time = jiffies + (HZ * 5);
+ do {
+ if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
+ break;
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_after(time, jiffies));
+
+ hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */
+ hp100_outl(savelan, 10_LAN_CFG_1);
+ }
+
+ time = jiffies + (3 * HZ); /* Timeout 3s */
+ do {
+ if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
+ break;
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_after(time, jiffies));
+
+ if (time_before_eq(time, jiffies)) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: down_vg_link: timeout\n", dev->name);
+#endif
+ return -EIO;
+ }
+
+ time = jiffies + (2 * HZ); /* This seems to take a while.... */
+ do {
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_after(time, jiffies));
+
+ return 0;
+}
+
+static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ u_short val = 0;
+ unsigned long time;
+ int startst;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4225, TRACE);
+ printk("hp100: %s: login_to_vg_hub\n", dev->name);
+#endif
+
+ /* Initiate a login sequence iff VG MAC is enabled and either Load Address
+ * bit is zero or the force relogin flag is set (e.g. due to MAC address or
+ * promiscuous mode change)
+ */
+ hp100_page(MAC_CTRL);
+ startst = hp100_inb(VG_LAN_CFG_1);
+ if ((force_relogin == 1) || (hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Start training\n", dev->name);
+#endif
+
+ /* Ensure VG Reset bit is 1 (i.e., do not reset) */
+ hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1);
+
+ /* If Lassen AND auto-select-mode AND VG tones were sensed on */
+ /* entry then temporarily put them into force 100Mbit mode */
+ if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST))
+ hp100_andb(~HP100_DOT3_MAC, 10_LAN_CFG_2);
+
+ /* Drop the VG link by zeroing Link Up Command and Load Address */
+ hp100_andb(~(HP100_LINK_CMD /* |HP100_LOAD_ADDR */ ), VG_LAN_CFG_1);
+
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Bring down the link\n", dev->name);
+#endif
+
+ /* Wait for link to drop */
+ time = jiffies + (HZ / 10);
+ do {
+ if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ break;
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_after(time, jiffies));
+
+ /* Start an addressed training and optionally request promiscuous port */
+ if ((dev->flags) & IFF_PROMISC) {
+ hp100_orb(HP100_PROM_MODE, VG_LAN_CFG_2);
+ if (lp->chip == HP100_CHIPID_LASSEN)
+ hp100_orw(HP100_MACRQ_PROMSC, TRAIN_REQUEST);
+ } else {
+ hp100_andb(~HP100_PROM_MODE, VG_LAN_CFG_2);
+ /* For ETR parts we need to reset the prom. bit in the training
+ * register, otherwise promiscious mode won't be disabled.
+ */
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ hp100_andw(~HP100_MACRQ_PROMSC, TRAIN_REQUEST);
+ }
+ }
+
+ /* With ETR parts, frame format request bits can be set. */
+ if (lp->chip == HP100_CHIPID_LASSEN)
+ hp100_orb(HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST);
+
+ hp100_orb(HP100_LINK_CMD | HP100_LOAD_ADDR | HP100_VG_RESET, VG_LAN_CFG_1);
+
+ /* Note: Next wait could be omitted for Hood and earlier chips under */
+ /* certain circumstances */
+ /* TODO: check if hood/earlier and skip wait. */
+
+ /* Wait for either short timeout for VG tones or long for login */
+ /* Wait for the card hardware to signalise link cable status ok... */
+ hp100_page(MAC_CTRL);
+ time = jiffies + (1 * HZ); /* 1 sec timeout for cable st */
+ do {
+ if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
+ break;
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_before(jiffies, time));
+
+ if (time_after_eq(jiffies, time)) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Link cable status not ok? Training aborted.\n", dev->name);
+#endif
+ } else {
+#ifdef HP100_DEBUG_TRAINING
+ printk
+ ("hp100: %s: HUB tones detected. Trying to train.\n",
+ dev->name);
+#endif
+
+ time = jiffies + (2 * HZ); /* again a timeout */
+ do {
+ val = hp100_inb(VG_LAN_CFG_1);
+ if ((val & (HP100_LINK_UP_ST))) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Passed training.\n", dev->name);
+#endif
+ break;
+ }
+ if (!in_interrupt()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ } while (time_after(time, jiffies));
+ }
+
+ /* If LINK_UP_ST is set, then we are logged into the hub. */
+ if (time_before_eq(jiffies, time) && (val & HP100_LINK_UP_ST)) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Successfully logged into the HUB.\n", dev->name);
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ val = hp100_inw(TRAIN_ALLOW);
+ printk("hp100: %s: Card supports 100VG MAC Version \"%s\" ",
+ dev->name, (hp100_inw(TRAIN_REQUEST) & HP100_CARD_MACVER) ? "802.12" : "Pre");
+ printk("Driver will use MAC Version \"%s\"\n", (val & HP100_HUB_MACVER) ? "802.12" : "Pre");
+ printk("hp100: %s: Frame format is %s.\n", dev->name, (val & HP100_MALLOW_FRAMEFMT) ? "802.5" : "802.3");
+ }
+#endif
+ } else {
+ /* If LINK_UP_ST is not set, login was not successful */
+ printk("hp100: %s: Problem logging into the HUB.\n", dev->name);
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Check allowed Register to find out why there is a problem. */
+ val = hp100_inw(TRAIN_ALLOW); /* won't work on non-ETR card */
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val);
+#endif
+ if (val & HP100_MALLOW_ACCDENIED)
+ printk("hp100: %s: HUB access denied.\n", dev->name);
+ if (val & HP100_MALLOW_CONFIGURE)
+ printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name);
+ if (val & HP100_MALLOW_DUPADDR)
+ printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name);
+ }
+ }
+
+ /* If we have put the chip into forced 100 Mbit mode earlier, go back */
+ /* to auto-select mode */
+
+ if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) {
+ hp100_page(MAC_CTRL);
+ hp100_orb(HP100_DOT3_MAC, 10_LAN_CFG_2);
+ }
+
+ val = hp100_inb(VG_LAN_CFG_1);
+
+ /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */
+ hp100_page(PERFORMANCE);
+ hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
+
+ if (val & HP100_LINK_UP_ST)
+ return (0); /* login was ok */
+ else {
+ printk("hp100: %s: Training failed.\n", dev->name);
+ hp100_down_vg_link(dev);
+ return -EIO;
+ }
+ }
+ /* no forced relogin & already link there->no training. */
+ return -EIO;
+}
+
+static void hp100_cascade_reset(struct net_device *dev, u_short enable)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4226, TRACE);
+ printk("hp100: %s: cascade_reset\n", dev->name);
+#endif
+
+ if (enable) {
+ hp100_outw(HP100_HW_RST | HP100_RESET_LB, OPTION_LSW);
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Lassen requires a PCI transmit fifo reset */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_PCI_RESET, PCICTRL2);
+ hp100_orb(HP100_PCI_RESET, PCICTRL2);
+ /* Wait for min. 300 ns */
+ /* we can't use jiffies here, because it may be */
+ /* that we have disabled the timer... */
+ udelay(400);
+ hp100_andb(~HP100_PCI_RESET, PCICTRL2);
+ hp100_page(PERFORMANCE);
+ }
+ } else { /* bring out of reset */
+ hp100_outw(HP100_HW_RST | HP100_SET_LB, OPTION_LSW);
+ udelay(400);
+ hp100_page(PERFORMANCE);
+ }
+}
+
+#ifdef HP100_DEBUG
+void hp100_RegisterDump(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int Page;
+ int Register;
+
+ /* Dump common registers */
+ printk("hp100: %s: Cascade Register Dump\n", dev->name);
+ printk("hardware id #1: 0x%.2x\n", hp100_inb(HW_ID));
+ printk("hardware id #2/paging: 0x%.2x\n", hp100_inb(PAGING));
+ printk("option #1: 0x%.4x\n", hp100_inw(OPTION_LSW));
+ printk("option #2: 0x%.4x\n", hp100_inw(OPTION_MSW));
+
+ /* Dump paged registers */
+ for (Page = 0; Page < 8; Page++) {
+ /* Dump registers */
+ printk("page: 0x%.2x\n", Page);
+ outw(Page, ioaddr + 0x02);
+ for (Register = 0x8; Register < 0x22; Register += 2) {
+ /* Display Register contents except data port */
+ if (((Register != 0x10) && (Register != 0x12)) || (Page > 0)) {
+ printk("0x%.2x = 0x%.4x\n", Register, inw(ioaddr + Register));
+ }
+ }
+ }
+ hp100_page(PERFORMANCE);
+}
+#endif
+
+
+static void cleanup_dev(struct net_device *d)
+{
+ struct hp100_private *p = netdev_priv(d);
+
+ unregister_netdev(d);
+ release_region(d->base_addr, HP100_REGION_SIZE);
+
+ if (p->mode == 1) /* busmaster */
+ pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f,
+ p->page_vaddr_algn,
+ virt_to_whatever(d, p->page_vaddr_algn));
+ if (p->mem_ptr_virt)
+ iounmap(p->mem_ptr_virt);
+
+ free_netdev(d);
+}
+
+#ifdef CONFIG_EISA
+static int __init hp100_eisa_probe (struct device *gendev)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
+ struct eisa_device *edev = to_eisa_device(gendev);
+ int err;
+
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &edev->dev);
+
+ err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL);
+ if (err)
+ goto out1;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+
+#ifdef HP100_DEBUG
+ printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name,
+ dev->base_addr);
+#endif
+ gendev->driver_data = dev;
+ return 0;
+ out2:
+ release_region(dev->base_addr, HP100_REGION_SIZE);
+ out1:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit hp100_eisa_remove (struct device *gendev)
+{
+ struct net_device *dev = gendev->driver_data;
+ cleanup_dev(dev);
+ return 0;
+}
+
+static struct eisa_driver hp100_eisa_driver = {
+ .id_table = hp100_eisa_tbl,
+ .driver = {
+ .name = "hp100",
+ .probe = hp100_eisa_probe,
+ .remove = __devexit_p (hp100_eisa_remove),
+ }
+};
+#endif
+
+#ifdef CONFIG_PCI
+static int __devinit hp100_pci_probe (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ int ioaddr;
+ u_short pci_command;
+ int err;
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct hp100_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto out0;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if (!(pci_command & PCI_COMMAND_IO)) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name);
+#endif
+ pci_command |= PCI_COMMAND_IO;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+ }
+
+ if (!(pci_command & PCI_COMMAND_MASTER)) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name);
+#endif
+ pci_command |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+ }
+
+ ioaddr = pci_resource_start(pdev, 0);
+ err = hp100_probe1(dev, ioaddr, HP100_BUS_PCI, pdev);
+ if (err)
+ goto out1;
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+
+#ifdef HP100_DEBUG
+ printk("hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr);
+#endif
+ pci_set_drvdata(pdev, dev);
+ return 0;
+ out2:
+ release_region(dev->base_addr, HP100_REGION_SIZE);
+ out1:
+ free_netdev(dev);
+ out0:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit hp100_pci_remove (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ cleanup_dev(dev);
+ pci_disable_device(pdev);
+}
+
+
+static struct pci_driver hp100_pci_driver = {
+ .name = "hp100",
+ .id_table = hp100_pci_tbl,
+ .probe = hp100_pci_probe,
+ .remove = __devexit_p(hp100_pci_remove),
+};
+#endif
+
+/*
+ * module section
+ */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>, "
+ "Siegfried \"Frieder\" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>");
+MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters");
+
+/*
+ * Note: to register three isa devices, use:
+ * option hp100 hp100_port=0,0,0
+ * to register one card at io 0x280 as eth239, use:
+ * option hp100 hp100_port=0x280
+ */
+#if defined(MODULE) && defined(CONFIG_ISA)
+#define HP100_DEVICES 5
+/* Parameters set by insmod */
+static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 };
+module_param_array(hp100_port, int, NULL, 0);
+
+/* List of devices */
+static struct net_device *hp100_devlist[HP100_DEVICES];
+
+static int __init hp100_isa_init(void)
+{
+ struct net_device *dev;
+ int i, err, cards = 0;
+
+ /* Don't autoprobe ISA bus */
+ if (hp100_port[0] == 0)
+ return -ENODEV;
+
+ /* Loop on all possible base addresses */
+ for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) {
+ dev = alloc_etherdev(sizeof(struct hp100_private));
+ if (!dev) {
+ printk(KERN_WARNING "hp100: no memory for network device\n");
+ while (cards > 0)
+ cleanup_dev(hp100_devlist[--cards]);
+
+ return -ENOMEM;
+ }
+ SET_MODULE_OWNER(dev);
+
+ err = hp100_isa_probe(dev, hp100_port[i]);
+ if (!err) {
+ err = register_netdev(dev);
+ if (!err)
+ hp100_devlist[cards++] = dev;
+ else
+ release_region(dev->base_addr, HP100_REGION_SIZE);
+ }
+
+ if (err)
+ free_netdev(dev);
+ }
+
+ return cards > 0 ? 0 : -ENODEV;
+}
+
+static void __exit hp100_isa_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < HP100_DEVICES; i++) {
+ struct net_device *dev = hp100_devlist[i];
+ if (dev)
+ cleanup_dev(dev);
+ }
+}
+#else
+#define hp100_isa_init() (0)
+#define hp100_isa_cleanup() do { } while(0)
+#endif
+
+static int __init hp100_module_init(void)
+{
+ int err;
+
+ err = hp100_isa_init();
+ if (err && err != -ENODEV)
+ goto out;
+#ifdef CONFIG_EISA
+ err = eisa_driver_register(&hp100_eisa_driver);
+ if (err && err != -ENODEV)
+ goto out2;
+#endif
+#ifdef CONFIG_PCI
+ err = pci_module_init(&hp100_pci_driver);
+ if (err && err != -ENODEV)
+ goto out3;
+#endif
+ out:
+ return err;
+ out3:
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&hp100_eisa_driver);
+ out2:
+#endif
+ hp100_isa_cleanup();
+ goto out;
+}
+
+
+static void __exit hp100_module_exit(void)
+{
+ hp100_isa_cleanup();
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&hp100_eisa_driver);
+#endif
+#ifdef CONFIG_PCI
+ pci_unregister_driver (&hp100_pci_driver);
+#endif
+}
+
+module_init(hp100_module_init)
+module_exit(hp100_module_exit)
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp100.c"
+ * c-indent-level: 2
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/hp100.h b/drivers/net/hp100.h
new file mode 100644
index 000000000000..236d945987af
--- /dev/null
+++ b/drivers/net/hp100.h
@@ -0,0 +1,615 @@
+/*
+ * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
+ *
+ * $Id: hp100.h,v 1.51 1997/04/08 14:26:42 floeff Exp floeff $
+ *
+ * Authors: Jaroslav Kysela, <perex@pf.jcu.cz>
+ * Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de>
+ *
+ * This driver is based on the 'hpfepkt' crynwr packet driver.
+ *
+ * This source/code is public free; you can distribute it and/or modify
+ * it under terms of the GNU General Public License (published by the
+ * Free Software Foundation) either version two of this License, or any
+ * later version.
+ */
+
+/****************************************************************************
+ * Hardware Constants
+ ****************************************************************************/
+
+/*
+ * Page Identifiers
+ * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02)
+ */
+
+#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */
+#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */
+#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */
+#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */
+#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */
+#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */
+#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */
+#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */
+
+
+/* Registers that are present on all pages */
+
+#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */
+#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */
+#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */
+ /* W: (16),3:0 Switch pages */
+#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */
+#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */
+
+/* Page 0 - Performance */
+
+#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */
+#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */
+#define HP100_REG_FRAGMENT_LEN 0x0c /* W: (16)12:0 Current fragment len */
+/* Note: For 32 bit systems, fragment len and offset registers are available */
+/* at offset 0x28 and 0x2c, where they can be written as 32bit values. */
+#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */
+#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */
+#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */
+#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */
+#define HP100_REG_TX_PDA_L 0x14 /* W: (32) BM: Ptr to PDL, Low Pri */
+#define HP100_REG_TX_PDA_H 0x1c /* W: (32) BM: Ptr to PDL, High Pri */
+#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */
+#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */
+#define HP100_REG_RX_PDL 0x1a /* R: (8) BM: # rx pdl not executed */
+#define HP100_REG_TX_PDL 0x1b /* R: (8) BM: # tx pdl not executed */
+#define HP100_REG_RX_PDA 0x18 /* W: (32) BM: Up to 31 addresses */
+ /* which point to a PDL */
+#define HP100_REG_SL_EARLY 0x1c /* (32) Enhanced Slave Early Rx */
+#define HP100_REG_STAT_DROPPED 0x20 /* R (12) Dropped Packet Counter */
+#define HP100_REG_STAT_ERRORED 0x22 /* R (8) Errored Packet Counter */
+#define HP100_REG_STAT_ABORT 0x23 /* R (8) Abort Counter/OW Coll. Flag */
+#define HP100_REG_RX_RING 0x24 /* W (32) Slave: RX Ring Pointers */
+#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */
+#define HP100_REG_32_OFFSET 0x2c /* W (16) Slave: Offset Register */
+
+/* Page 1 - MAC Address/Hash Table */
+
+#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */
+#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */
+
+/* Page 2 - Hardware Mapping */
+
+#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */
+#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */
+#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */
+#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */
+#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */
+#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */
+
+/* New on Page 2 for ETR chips: */
+#define HP100_REG_MODECTRL1 0x10 /* RW: (8) Mode Control 1 */
+#define HP100_REG_MODECTRL2 0x11 /* RW: (8) Mode Control 2 */
+#define HP100_REG_PCICTRL1 0x12 /* RW: (8) PCI Cfg 1 */
+#define HP100_REG_PCICTRL2 0x13 /* RW: (8) PCI Cfg 2 */
+#define HP100_REG_PCIBUSMLAT 0x15 /* RW: (8) PCI Bus Master Latency */
+#define HP100_REG_EARLYTXCFG 0x16 /* RW: (16) Early TX Cfg/Cntrl Reg */
+#define HP100_REG_EARLYRXCFG 0x18 /* RW: (8) Early RX Cfg/Cntrl Reg */
+#define HP100_REG_ISAPNPCFG1 0x1a /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */
+#define HP100_REG_ISAPNPCFG2 0x1b /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */
+
+/* Page 3 - EEPROM/Boot ROM */
+
+#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */
+#define HP100_REG_BOOTROM_CTRL 0x0a
+
+/* Page 4 - LAN Configuration (MAC_CTRL) */
+
+#define HP100_REG_10_LAN_CFG_1 0x08 /* RW: (8) Set 10M XCVR functions */
+#define HP100_REG_10_LAN_CFG_2 0x09 /* RW: (8) 10M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_1 0x0a /* RW: (8) Set 100M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_2 0x0b /* RW: (8) 100M LAN Training cfgregs */
+#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */
+#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */
+#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts cant fit in mem */
+#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */
+#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */
+#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register. */
+#define HP100_REG_TRAIN_ALLOW 0x16 /* R: (16) Hub allowed register */
+
+/* Page 5 - MMU */
+
+#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */
+#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */
+#define HP100_REG_PDL_MEM_STOP 0x10 /* Not used by 802.12 devices */
+#define HP100_REG_ECB_MEM_STOP 0x14 /* I've no idea what this is */
+
+/* Page 6 - Card ID/Physical LAN Address */
+
+#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */
+#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */
+#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */
+#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */
+#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */
+
+/* Page 7 - MMU Current Pointers */
+
+#define HP100_REG_PTR_RXSTART 0x08 /* R: (16) Current begin of Rx ring */
+#define HP100_REG_PTR_RXEND 0x0a /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_TXSTART 0x0c /* R: (16) Current begin of Tx ring */
+#define HP100_REG_PTR_TXEND 0x0e /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_RPDLSTART 0x10
+#define HP100_REG_PTR_RPDLEND 0x12
+#define HP100_REG_PTR_RINGPTRS 0x14
+#define HP100_REG_PTR_MEMDEBUG 0x1a
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Hardware ID Register I (Always available, HW_ID, Offset 0x00)
+ */
+#define HP100_HW_ID_CASCADE 0x4850 /* Identifies Cascade Chip */
+
+/*
+ * Hardware ID Register 2 & Paging Register
+ * (Always available, PAGING, Offset 0x02)
+ * Bits 15:4 are for the Chip ID
+ */
+#define HP100_CHIPID_MASK 0xFFF0
+#define HP100_CHIPID_SHASTA 0x5350 /* Not 802.12 compliant */
+ /* EISA BM/SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_RAINIER 0x5360 /* Not 802.12 compliant EISA BM, */
+ /* PCI SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_LASSEN 0x5370 /* 802.12 compliant PCI BM, PCI SL */
+ /* LRF supported */
+
+/*
+ * Option Registers I and II
+ * (Always available, OPTION_LSW, Offset 0x04-0x05)
+ */
+#define HP100_DEBUG_EN 0x8000 /* 0:Dis., 1:Enable Debug Dump Ptr. */
+#define HP100_RX_HDR 0x4000 /* 0:Dis., 1:Enable putting pkt into */
+ /* system mem. before Rx interrupt */
+#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable mem.mapping. */
+ /* MMAP_DIS must be 0 and MEM_EN */
+ /* must be 1 for memory-mapped */
+ /* mode to be enabled */
+#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */
+#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */
+#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */
+#define HP100_TRI_INT 0x0200 /* 0:Don't, 1:Do tri-state the int */
+#define HP100_MEM_EN 0x0040 /* Config program set this to */
+ /* 0:Disable, 1:Enable mem map. */
+ /* See MMAP_DIS. */
+#define HP100_IO_EN 0x0020 /* 1:Enable I/O transfers */
+#define HP100_BOOT_EN 0x0010 /* 1:Enable boot ROM access */
+#define HP100_FAKE_INT 0x0008 /* 1:int */
+#define HP100_INT_EN 0x0004 /* 1:Enable ints from card */
+#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */
+ /* NIC reset on 0 to 1 transition */
+
+/*
+ * Option Register III
+ * (Always available, OPTION_MSW, Offset 0x06)
+ */
+#define HP100_PRIORITY_TX 0x0080 /* 1:Do all Tx pkts as priority */
+#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */
+#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue */
+ /* h/w will set to 0 when done */
+#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w */
+ /* will set to 0 when done */
+
+/*
+ * Interrupt Status Registers I and II
+ * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09)
+ * Note: With old chips, these Registers will clear when 1 is written to them
+ * with new chips this depends on setting of CLR_ISMODE
+ */
+#define HP100_RX_EARLY_INT 0x2000
+#define HP100_RX_PDA_ZERO 0x1000
+#define HP100_RX_PDL_FILL_COMPL 0x0800
+#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */
+#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */
+#define HP100_TX_PDA_ZERO 0x0020 /* 1 when PDA count goes to zero */
+#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */
+#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */
+#define HP100_MISC_ERROR 0x0004 /* 0:No, 1:Lan Link down or bus error */
+#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */
+
+/*
+ * Xmit Memory Free Count
+ * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit)
+ */
+#define HP100_AUTO_COMPARE 0x80000000 /* Tx Space avail & pkts<255 */
+#define HP100_FREE_SPACE 0x7fffffe0 /* Tx free memory */
+
+/*
+ * IRQ Channel
+ * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d)
+ */
+#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes asserts NOWS signal */
+#define HP100_IRQ_SCRAMBLE 0x40
+#define HP100_BOND_HP 0x20
+#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */
+ /* (Only valid on EISA cards) */
+#define HP100_IRQMASK 0x0F /* Isolate the IRQ bits */
+
+/*
+ * SRAM Parameters
+ * (Page HW_MAP, SRAM, Offset 0x0e)
+ */
+#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */
+#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count(put index in lwr bits) */
+
+/*
+ * Bus Master Register
+ * (Page HW_MAP, BM, Offset 0x0f)
+ */
+#define HP100_BM_BURST_RD 0x01 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (tx) */
+#define HP100_BM_BURST_WR 0x02 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (rx) */
+#define HP100_BM_MASTER 0x04 /* 0:Slave, 1:BM mode */
+#define HP100_BM_PAGE_CK 0x08 /* This bit should be set whenever in */
+ /* an EISA system */
+#define HP100_BM_PCI_8CLK 0x40 /* ... cycles 8 clocks apart */
+
+
+/*
+ * Mode Control Register I
+ * (Page HW_MAP, MODECTRL1, Offset0x10)
+ */
+#define HP100_TX_DUALQ 0x10
+ /* If set and BM -> dual tx pda queues */
+#define HP100_ISR_CLRMODE 0x02 /* If set ISR will clear all pending */
+ /* interrupts on read (etr only?) */
+#define HP100_EE_NOLOAD 0x04 /* Status whether res will be loaded */
+ /* from the eeprom */
+#define HP100_TX_CNT_FLG 0x08 /* Controls Early TX Reg Cnt Field */
+#define HP100_PDL_USE3 0x10 /* If set BM engine will read only */
+ /* first three data elements of a PDL */
+ /* on the first access. */
+#define HP100_BUSTYPE_MASK 0xe0 /* Three bit bus type info */
+
+/*
+ * Mode Control Register II
+ * (Page HW_MAP, MODECTRL2, Offset0x11)
+ */
+#define HP100_EE_MASK 0x0f /* Tell EEPROM circuit not to load */
+ /* certain resources */
+#define HP100_DIS_CANCEL 0x20 /* For tx dualq mode operation */
+#define HP100_EN_PDL_WB 0x40 /* 1: Status of PDL completion may be */
+ /* written back to system mem */
+#define HP100_EN_BUS_FAIL 0x80 /* Enables bus-fail portion of misc */
+ /* interrupt */
+
+/*
+ * PCI Configuration and Control Register I
+ * (Page HW_MAP, PCICTRL1, Offset 0x12)
+ */
+#define HP100_LO_MEM 0x01 /* 1: Mapped Mem requested below 1MB */
+#define HP100_NO_MEM 0x02 /* 1: Disables Req for sysmem to PCI */
+ /* bios */
+#define HP100_USE_ISA 0x04 /* 1: isa type decodes will occur */
+ /* simultaneously with PCI decodes */
+#define HP100_IRQ_HI_MASK 0xf0 /* pgmed by pci bios */
+#define HP100_PCI_IRQ_HI_MASK 0x78 /* Isolate 4 bits for PCI IRQ */
+
+/*
+ * PCI Configuration and Control Register II
+ * (Page HW_MAP, PCICTRL2, Offset 0x13)
+ */
+#define HP100_RD_LINE_PDL 0x01 /* 1: PCI command Memory Read Line en */
+#define HP100_RD_TX_DATA_MASK 0x06 /* choose PCI memread cmds for TX */
+#define HP100_MWI 0x08 /* 1: en. PCI memory write invalidate */
+#define HP100_ARB_MODE 0x10 /* Select PCI arbitor type */
+#define HP100_STOP_EN 0x20 /* Enables PCI state machine to issue */
+ /* pci stop if cascade not ready */
+#define HP100_IGNORE_PAR 0x40 /* 1: PCI state machine ignores parity */
+#define HP100_PCI_RESET 0x80 /* 0->1: Reset PCI block */
+
+/*
+ * Early TX Configuration and Control Register
+ * (Page HW_MAP, EARLYTXCFG, Offset 0x16)
+ */
+#define HP100_EN_EARLY_TX 0x8000 /* 1=Enable Early TX */
+#define HP100_EN_ADAPTIVE 0x4000 /* 1=Enable adaptive mode */
+#define HP100_EN_TX_UR_IRQ 0x2000 /* reserved, must be 0 */
+#define HP100_EN_LOW_TX 0x1000 /* reserved, must be 0 */
+#define HP100_ET_CNT_MASK 0x0fff /* bits 11..0: ET counters */
+
+/*
+ * Early RX Configuration and Control Register
+ * (Page HW_MAP, EARLYRXCFG, Offset 0x18)
+ */
+#define HP100_EN_EARLY_RX 0x80 /* 1=Enable Early RX */
+#define HP100_EN_LOW_RX 0x40 /* reserved, must be 0 */
+#define HP100_RX_TRIP_MASK 0x1f /* bits 4..0: threshold at which the
+ * early rx circuit will start the
+ * dma of received packet into system
+ * memory for BM */
+
+/*
+ * Serial Devices Control Register
+ * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08)
+ */
+#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads EEPROM into registers. */
+ /* When it goes back to 0, load is */
+ /* complete. This should take ~600us. */
+
+/*
+ * 10MB LAN Control and Configuration Register I
+ * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08)
+ */
+#define HP100_MAC10_SEL 0xc0 /* Get bits to indicate MAC */
+#define HP100_AUI_SEL 0x20 /* Status of AUI selection */
+#define HP100_LOW_TH 0x10 /* 0:No, 1:Yes allow better cabling */
+#define HP100_LINK_BEAT_DIS 0x08 /* 0:Enable, 1:Disable link beat */
+#define HP100_LINK_BEAT_ST 0x04 /* 0:No, 1:Yes link beat being Rx */
+#define HP100_R_ROL_ST 0x02 /* 0:No, 1:Yes Rx twisted pair has */
+ /* been reversed */
+#define HP100_AUI_ST 0x01 /* 0:No, 1:Yes use AUI on TP card */
+
+/*
+ * 10 MB LAN Control and Configuration Register II
+ * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09)
+ */
+#define HP100_SQU_ST 0x01 /* 0:No, 1:Yes collision signal sent */
+ /* after Tx.Only used for AUI. */
+#define HP100_FULLDUP 0x02 /* 1: LXT901 XCVR fullduplx enabled */
+#define HP100_DOT3_MAC 0x04 /* 1: DOT 3 Mac sel. unless Autosel */
+
+/*
+ * MAC Selection, use with MAC10_SEL bits
+ */
+#define HP100_AUTO_SEL_10 0x0 /* Auto select */
+#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */
+#define HP100_XCVR_7213 0x2 /* 7213 transceiver */
+#define HP100_XCVR_82503 0x3 /* 82503 transceiver */
+
+/*
+ * 100MB LAN Training Register
+ * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12)
+ */
+#define HP100_FRAME_FORMAT 0x08 /* 0:802.3, 1:802.5 frames */
+#define HP100_BRIDGE 0x04 /* 0:No, 1:Yes tell hub i am a bridge */
+#define HP100_PROM_MODE 0x02 /* 0:No, 1:Yes tell hub card is */
+ /* promiscuous */
+#define HP100_REPEATER 0x01 /* 0:No, 1:Yes tell hub MAC wants to */
+ /* be a cascaded repeater */
+
+/*
+ * 100MB LAN Control and Configuration Register
+ * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a)
+ */
+#define HP100_VG_SEL 0x80 /* 0:No, 1:Yes use 100 Mbit MAC */
+#define HP100_LINK_UP_ST 0x40 /* 0:No, 1:Yes endnode logged in */
+#define HP100_LINK_CABLE_ST 0x20 /* 0:No, 1:Yes cable can hear tones */
+ /* from hub */
+#define HP100_LOAD_ADDR 0x10 /* 0->1 card addr will be sent */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_LINK_CMD 0x08 /* 0->1 link will attempt to log in. */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_TRN_DONE 0x04 /* NEW ETR-Chips only: Will be reset */
+ /* after LinkUp Cmd is given and set */
+ /* when training has completed. */
+#define HP100_LINK_GOOD_ST 0x02 /* 0:No, 1:Yes cable passed training */
+#define HP100_VG_RESET 0x01 /* 0:Yes, 1:No reset the 100VG MAC */
+
+
+/*
+ * MAC Configuration Register I
+ * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c)
+ */
+#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */
+#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */
+#define HP100_RX_EN 0x20 /* 1: allow receiving of pkts */
+#define HP100_TX_EN 0x10 /* 1: allow transmitting of pkts */
+#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */
+#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */
+#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */
+#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL phys. pkts */
+#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */
+#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */
+#define HP100_MAC1MODE2 0x00
+#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC
+#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC
+#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */
+#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
+/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
+ a mode 7 defined to be LAN Analyzer mode, which will receive errored and
+ runt packets, and keep the CRC bytes. */
+#define HP100_MAC1MODE7 HP100_MAC1MODE6 | HP100_ACC_ERRORED
+
+/*
+ * MAC Configuration Register II
+ * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d)
+ */
+#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */
+#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */
+#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */
+ /* transceiver */
+#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */
+#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */
+#define HP100_ACCNA 0x04 /* 1: For 802.5: Accept only token ring
+ * group addr that maches NA mask */
+#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */
+ /* The length will reflect this. */
+#define HP100_ACCFA 0x01 /* 1: For 802.5: Accept only functional
+ * addrs that match FA mask (page1) */
+#define HP100_MAC2MODEMASK 0x02
+#define HP100_MAC2MODE1 0x00
+#define HP100_MAC2MODE2 0x00
+#define HP100_MAC2MODE3 0x00
+#define HP100_MAC2MODE4 0x00
+#define HP100_MAC2MODE5 0x00
+#define HP100_MAC2MODE6 0x00
+#define HP100_MAC2MODE7 KEEP_CRC
+
+/*
+ * MAC Configuration Register III
+ * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e)
+ */
+#define HP100_PACKET_PACE 0x03 /* Packet Pacing:
+ * 00: No packet pacing
+ * 01: 8 to 16 uS delay
+ * 10: 16 to 32 uS delay
+ * 11: 32 to 64 uS delay
+ */
+#define HP100_LRF_EN 0x04 /* 1: External LAN Rcv Filter and
+ * TCP/IP Checksumming enabled. */
+#define HP100_AUTO_MODE 0x10 /* 1: AutoSelect between 10/100 */
+
+/*
+ * MAC Configuration Register IV
+ * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f)
+ */
+#define HP100_MAC_SEL_ST 0x01 /* (R): Status of external VGSEL
+ * Signal, 1=100VG, 0=10Mbit sel. */
+#define HP100_LINK_FAIL_ST 0x02 /* (R): Status of Link Fail portion
+ * of the Misc. Interrupt */
+
+/*
+ * 100 MB LAN Training Request/Allowed Registers
+ * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only)
+ */
+#define HP100_MACRQ_REPEATER 0x0001 /* 1: MAC tells HUB it wants to be
+ * a cascaded repeater
+ * 0: ... wants to be a DTE */
+#define HP100_MACRQ_PROMSC 0x0006 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MACRQ_FRAMEFMT_EITHER 0x0018 /* 11: either format allowed */
+#define HP100_MACRQ_FRAMEFMT_802_3 0x0000 /* 00: 802.3 is requested */
+#define HP100_MACRQ_FRAMEFMT_802_5 0x0010 /* 10: 802.5 format is requested */
+#define HP100_CARD_MACVER 0xe000 /* R: 3 bit Cards 100VG MAC version */
+#define HP100_MALLOW_REPEATER 0x0001 /* If reset, requested access as an
+ * end node is allowed */
+#define HP100_MALLOW_PROMSC 0x0004 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MALLOW_FRAMEFMT 0x00e0 /* 2 bits: Frame Format
+ * 00: 802.3 format will be used
+ * 10: 802.5 format will be used */
+#define HP100_MALLOW_ACCDENIED 0x0400 /* N bit */
+#define HP100_MALLOW_CONFIGURE 0x0f00 /* C bit */
+#define HP100_MALLOW_DUPADDR 0x1000 /* D bit */
+#define HP100_HUB_MACVER 0xe000 /* R: 3 bit 802.12 MAC/RMAC training */
+ /* protocol of repeater */
+
+/* ****************************************************************************** */
+
+/*
+ * Set/Reset bits
+ */
+#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */
+#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */
+#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */
+#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */
+
+/*
+ * Misc. Constants
+ */
+#define HP100_LAN_100 100 /* lan_type value for VG */
+#define HP100_LAN_10 10 /* lan_type value for 10BaseT */
+#define HP100_LAN_COAX 9 /* lan_type value for Coax */
+#define HP100_LAN_ERR (-1) /* lan_type value for link down */
+
+/*
+ * Bus Master Data Structures ----------------------------------------------
+ */
+
+#define MAX_RX_PDL 30 /* Card limit = 31 */
+#define MAX_RX_FRAG 2 /* Don't need more... */
+#define MAX_TX_PDL 29
+#define MAX_TX_FRAG 2 /* Limit = 31 */
+
+/* Define total PDL area size in bytes (should be 4096) */
+/* This is the size of kernel (dma) memory that will be allocated. */
+#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16
+
+/* Ethernet Packet Sizes */
+#define MIN_ETHER_SIZE 60
+#define MAX_ETHER_SIZE 1514 /* Needed for preallocation of */
+ /* skb buffer when busmastering */
+
+/* Tx or Rx Ring Entry */
+typedef struct hp100_ring {
+ u_int *pdl; /* Address of PDLs PDH, dword before
+ * this address is used for rx hdr */
+ u_int pdl_paddr; /* Physical address of PDL */
+ struct sk_buff *skb;
+ struct hp100_ring *next;
+} hp100_ring_t;
+
+
+
+/* Mask for Header Descriptor */
+#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length */
+
+
+/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED
+ bit in the MAC Configuration Register 1 is set. */
+#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */
+#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */
+#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */
+#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */
+#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */
+ /* marker */
+#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */
+#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */
+#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */
+#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */
+ /* Length Reg. */
+#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */
+#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */
+
+/* The last three bits indicate the type of destination address */
+
+#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */
+#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */
+#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */
+#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */
+#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */
+
+/*
+ * macros
+ */
+
+#define hp100_inb( reg ) \
+ inb( ioaddr + HP100_REG_##reg )
+#define hp100_inw( reg ) \
+ inw( ioaddr + HP100_REG_##reg )
+#define hp100_inl( reg ) \
+ inl( ioaddr + HP100_REG_##reg )
+#define hp100_outb( data, reg ) \
+ outb( data, ioaddr + HP100_REG_##reg )
+#define hp100_outw( data, reg ) \
+ outw( data, ioaddr + HP100_REG_##reg )
+#define hp100_outl( data, reg ) \
+ outl( data, ioaddr + HP100_REG_##reg )
+#define hp100_orb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_orw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_andb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+#define hp100_andw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+
+#define hp100_page( page ) \
+ outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
+#define hp100_ints_off() \
+ outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_ints_on() \
+ outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_enable() \
+ outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_disable() \
+ outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
new file mode 100644
index 000000000000..08703d6f934c
--- /dev/null
+++ b/drivers/net/hplance.c
@@ -0,0 +1,231 @@
+/* hplance.c : the Linux/hp300/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/dio.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#include "hplance.h"
+
+/* We have 16834 bytes of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct hplance_private {
+ struct lance_private lance;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int __devinit hplance_init_one(struct dio_dev *d,
+ const struct dio_device_id *ent);
+static void __devinit hplance_init(struct net_device *dev,
+ struct dio_dev *d);
+static void __devexit hplance_remove_one(struct dio_dev *d);
+static void hplance_writerap(void *priv, unsigned short value);
+static void hplance_writerdp(void *priv, unsigned short value);
+static unsigned short hplance_readrdp(void *priv);
+static int hplance_open(struct net_device *dev);
+static int hplance_close(struct net_device *dev);
+
+static struct dio_device_id hplance_dio_tbl[] = {
+ { DIO_ID_LAN },
+ { 0 }
+};
+
+static struct dio_driver hplance_driver = {
+ .name = "hplance",
+ .id_table = hplance_dio_tbl,
+ .probe = hplance_init_one,
+ .remove = __devexit_p(hplance_remove_one),
+};
+
+/* Find all the HP Lance boards and initialise them... */
+static int __devinit hplance_init_one(struct dio_dev *d,
+ const struct dio_device_id *ent)
+{
+ struct net_device *dev;
+ int err = -ENOMEM;
+
+ dev = alloc_etherdev(sizeof(struct hplance_private));
+ if (!dev)
+ goto out;
+
+ err = -EBUSY;
+ if (!request_mem_region(dio_resource_start(d),
+ dio_resource_len(d), d->name))
+ goto out_free_netdev;
+
+ hplance_init(dev, d);
+ err = register_netdev(dev);
+ if (err)
+ goto out_release_mem_region;
+
+ dio_set_drvdata(d, dev);
+ return 0;
+
+ out_release_mem_region:
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ out_free_netdev:
+ free_netdev(dev);
+ out:
+ return err;
+}
+
+static void __devexit hplance_remove_one(struct dio_dev *d)
+{
+ struct net_device *dev = dio_get_drvdata(d);
+
+ unregister_netdev(dev);
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ free_netdev(dev);
+}
+
+/* Initialise a single lance board at the given DIO device */
+static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
+{
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ printk(KERN_INFO "%s: %s; select code %d, addr", dev->name, d->name, d->scode);
+
+ /* reset the board */
+ out_8(va+DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->open = &hplance_open;
+ dev->stop = &hplance_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = lance_poll;
+#endif
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &lance_set_multicast;
+ dev->dma = 0;
+
+ for (i=0; i<6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ printk("%c%2.2x", i == 0 ? ' ' : ':', dev->dev_addr[i]);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = 0; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+ printk(", irq %d\n", lp->lance.irq);
+}
+
+/* This is disgusting. We have to check the DIO status register for ack every
+ * time we read or write the LANCE registers.
+ */
+static void hplance_writerap(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static void hplance_writerdp(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static unsigned short hplance_readrdp(void *priv)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+static int hplance_open(struct net_device *dev)
+{
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
+
+ return 0;
+}
+
+static int hplance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
+}
+
+int __init hplance_init_module(void)
+{
+ return dio_module_init(&hplance_driver);
+}
+
+void __exit hplance_cleanup_module(void)
+{
+ dio_unregister_driver(&hplance_driver);
+}
+
+module_init(hplance_init_module);
+module_exit(hplance_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/hplance.h b/drivers/net/hplance.h
new file mode 100644
index 000000000000..04aee9e0376a
--- /dev/null
+++ b/drivers/net/hplance.h
@@ -0,0 +1,26 @@
+/* Random defines and structures for the HP Lance driver.
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ */
+
+/* Registers */
+#define HPLANCE_ID 0x01 /* DIO register: ID byte */
+#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */
+
+/* Control and status bits for the status register */
+#define LE_IE 0x80 /* interrupt enable */
+#define LE_IR 0x40 /* interrupt requested */
+#define LE_LOCK 0x08 /* lock status register */
+#define LE_ACK 0x04 /* ack of lock */
+#define LE_JAB 0x02 /* loss of tx clock (???) */
+/* We can also extract the IPL from the status register with the standard
+ * DIO_IPL(hplance) macro, or using dio_scodetoipl()
+ */
+
+/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg,
+ * memory and NVRAM:
+ */
+#define HPLANCE_IDOFF 0 /* board baseaddr */
+#define HPLANCE_REGOFF 0x4000 /* lance registers */
+#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */
+#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
new file mode 100644
index 000000000000..6e0ca7340a8f
--- /dev/null
+++ b/drivers/net/hydra.c
@@ -0,0 +1,256 @@
+/* New Hydra driver using generic 8390 core */
+/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */
+
+/* This file is subject to the terms and conditions of the GNU General */
+/* Public License. See the file COPYING in the main directory of the */
+/* Linux distribution for more details. */
+
+/* Peter De Schrijver (p2@mind.be) */
+/* Oldenburg 2000 */
+
+/* The Amiganet is a Zorro-II board made by Hydra Systems. It contains a */
+/* NS8390 NIC (network interface controller) clone, 16 or 64K on-board RAM */
+/* and 10BASE-2 (thin coax) and AUI connectors. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <linux/zorro.h>
+
+#include "8390.h"
+
+#define NE_EN0_DCFG (0x0e*2)
+
+#define NESM_START_PG 0x0 /* First page of TX buffer */
+#define NESM_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define HYDRA_NIC_BASE 0xffe1
+#define HYDRA_ADDRPROM 0xffc0
+#define HYDRA_VERSION "v3.0alpha"
+
+#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8))
+
+
+static int __devinit hydra_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static int __devinit hydra_init(struct zorro_dev *z);
+static int hydra_open(struct net_device *dev);
+static int hydra_close(struct net_device *dev);
+static void hydra_reset_8390(struct net_device *dev);
+static void hydra_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void hydra_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hydra_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void __devexit hydra_remove_one(struct zorro_dev *z);
+
+static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
+ { 0 }
+};
+
+static struct zorro_driver hydra_driver = {
+ .name = "hydra",
+ .id_table = hydra_zorro_tbl,
+ .probe = hydra_init_one,
+ .remove = __devexit_p(hydra_remove_one),
+};
+
+static int __devinit hydra_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ int err;
+
+ if (!request_mem_region(z->resource.start, 0x10000, "Hydra"))
+ return -EBUSY;
+ if ((err = hydra_init(z))) {
+ release_mem_region(z->resource.start, 0x10000);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int __devinit hydra_init(struct zorro_dev *z)
+{
+ struct net_device *dev;
+ unsigned long board = ZTWO_VADDR(z->resource.start);
+ unsigned long ioaddr = board+HYDRA_NIC_BASE;
+ const char name[] = "NE2000";
+ int start_page, stop_page;
+ int j;
+ int err;
+
+ static u32 hydra_offsets[16] = {
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+ };
+
+ dev = alloc_ei_netdev();
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+
+ for(j = 0; j < ETHER_ADDR_LEN; j++)
+ dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+
+ /* We must set the 8390 for word mode. */
+ z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ dev->base_addr = ioaddr;
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+ if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ, "Hydra Ethernet",
+ dev)) {
+ free_netdev(dev);
+ return -EAGAIN;
+ }
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+ ei_status.bigendian = 1;
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+
+ ei_status.reset_8390 = &hydra_reset_8390;
+ ei_status.block_input = &hydra_block_input;
+ ei_status.block_output = &hydra_block_output;
+ ei_status.get_8390_hdr = &hydra_get_8390_hdr;
+ ei_status.reg_offset = hydra_offsets;
+ dev->open = &hydra_open;
+ dev->stop = &hydra_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ NS8390_init(dev, 0);
+
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ free_netdev(dev);
+ return err;
+ }
+
+ zorro_set_drvdata(z, dev);
+
+ printk(KERN_INFO "%s: Hydra at 0x%08lx, address "
+ "%02x:%02x:%02x:%02x:%02x:%02x (hydra.c " HYDRA_VERSION ")\n",
+ dev->name, z->resource.start, dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ return 0;
+}
+
+static int hydra_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int hydra_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ return 0;
+}
+
+static void hydra_reset_8390(struct net_device *dev)
+{
+ printk(KERN_INFO "Hydra hw reset not there\n");
+}
+
+static void hydra_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ short *ptrs;
+ unsigned long hdr_start= (nic_base-HYDRA_NIC_BASE) +
+ ((ring_page - NESM_START_PG)<<8);
+ ptrs = (short *)hdr;
+
+ *(ptrs++) = z_readw(hdr_start);
+ *((short *)hdr) = WORDSWAP(*((short *)hdr));
+ hdr_start += 2;
+ *(ptrs++) = z_readw(hdr_start);
+ *((short *)hdr+1) = WORDSWAP(*((short *)hdr+1));
+}
+
+static void hydra_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned long nic_base = dev->base_addr;
+ unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
+ unsigned long xfer_start = mem_base + ring_offset - (NESM_START_PG<<8);
+
+ if (count&1)
+ count++;
+
+ if (xfer_start+count > mem_base + (NESM_STOP_PG<<8)) {
+ int semi_count = (mem_base + (NESM_STOP_PG<<8)) - xfer_start;
+
+ z_memcpy_fromio(skb->data,xfer_start,semi_count);
+ count -= semi_count;
+ z_memcpy_fromio(skb->data+semi_count, mem_base, count);
+ } else
+ z_memcpy_fromio(skb->data, xfer_start,count);
+
+}
+
+static void hydra_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ unsigned long nic_base = dev->base_addr;
+ unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
+
+ if (count&1)
+ count++;
+
+ z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count);
+}
+
+static void __devexit hydra_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr)-HYDRA_NIC_BASE, 0x10000);
+ free_netdev(dev);
+}
+
+static int __init hydra_init_module(void)
+{
+ return zorro_module_init(&hydra_driver);
+}
+
+static void __exit hydra_cleanup_module(void)
+{
+ zorro_unregister_driver(&hydra_driver);
+}
+
+module_init(hydra_init_module);
+module_exit(hydra_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/hydra.h b/drivers/net/hydra.h
new file mode 100644
index 000000000000..37414146258d
--- /dev/null
+++ b/drivers/net/hydra.h
@@ -0,0 +1,177 @@
+/* $Linux: hydra.h,v 1.0 1994/10/26 02:03:47 cgd Exp $ */
+
+/*
+ * Copyright (c) 1994 Timo Rossi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Timo Rossi
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * The Hydra Systems card uses the National Semiconductor
+ * 8390 NIC (Network Interface Controller) chip, located
+ * at card base address + 0xffe1. NIC registers are accessible
+ * only at odd byte addresses, so the register offsets must
+ * be multiplied by two.
+ *
+ * Card address PROM is located at card base + 0xffc0 (even byte addresses)
+ *
+ * RAM starts at the card base address, and is 16K or 64K.
+ * The current Amiga NetBSD hydra driver is hardwired for 16K.
+ * It seems that the RAM should be accessed as words or longwords only.
+ *
+ */
+
+/* adapted for Linux by Topi Kanerva 03/29/95
+ with original author's permission */
+
+#define HYDRA_NIC_BASE 0xffe1
+
+/* Page0 registers */
+
+#define NIC_CR 0 /* Command register */
+#define NIC_PSTART (1*2) /* Page start (write) */
+#define NIC_PSTOP (2*2) /* Page stop (write) */
+#define NIC_BNDRY (3*2) /* Boundary pointer */
+#define NIC_TSR (4*2) /* Transmit status (read) */
+#define NIC_TPSR (4*2) /* Transmit page start (write) */
+#define NIC_NCR (5*2) /* Number of collisions, read */
+#define NIC_TBCR0 (5*2) /* Transmit byte count low (write) */
+#define NIC_FIFO (6*2) /* FIFO reg. (read) */
+#define NIC_TBCR1 (6*2) /* Transmit byte count high (write) */
+#define NIC_ISR (7*2) /* Interrupt status register */
+#define NIC_RBCR0 (0xa*2) /* Remote byte count low (write) */
+#define NIC_RBCR1 (0xb*2) /* Remote byte count high (write) */
+#define NIC_RSR (0xc*2) /* Receive status (read) */
+#define NIC_RCR (0xc*2) /* Receive config (write) */
+#define NIC_CNTR0 (0xd*2) /* Frame alignment error count (read) */
+#define NIC_TCR (0xd*2) /* Transmit config (write) */
+#define NIC_CNTR1 (0xe*2) /* CRC error counter (read) */
+#define NIC_DCR (0xe*2) /* Data config (write) */
+#define NIC_CNTR2 (0xf*2) /* missed packet counter (read) */
+#define NIC_IMR (0xf*2) /* Interrupt mask reg. (write) */
+
+/* Page1 registers */
+
+#define NIC_PAR0 (1*2) /* Physical address */
+#define NIC_PAR1 (2*2)
+#define NIC_PAR2 (3*2)
+#define NIC_PAR3 (4*2)
+#define NIC_PAR4 (5*2)
+#define NIC_PAR5 (6*2)
+#define NIC_CURR (7*2) /* Current RX ring-buffer page */
+#define NIC_MAR0 (8*2) /* Multicast address */
+#define NIC_MAR1 (9*2)
+#define NIC_MAR2 (0xa*2)
+#define NIC_MAR3 (0xb*2)
+#define NIC_MAR4 (0xc*2)
+#define NIC_MAR5 (0xd*2)
+#define NIC_MAR6 (0xe*2)
+#define NIC_MAR7 (0xf*2)
+
+/* Command register definitions */
+
+#define CR_STOP 0x01 /* Stop -- software reset command */
+#define CR_START 0x02 /* Start */
+#define CR_TXP 0x04 /* Transmit packet */
+
+#define CR_RD0 0x08 /* Remote DMA cmd */
+#define CR_RD1 0x10
+#define CR_RD2 0x20
+
+#define CR_NODMA CR_RD2
+
+#define CR_PS0 0x40 /* Page select */
+#define CR_PS1 0x80
+
+#define CR_PAGE0 0
+#define CR_PAGE1 CR_PS0
+#define CR_PAGE2 CR_PS1
+
+/* Interrupt status reg. definitions */
+
+#define ISR_PRX 0x01 /* Packet received without errors */
+#define ISR_PTX 0x02 /* Packet transmitted without errors */
+#define ISR_RXE 0x04 /* Receive error */
+#define ISR_TXE 0x08 /* Transmit error */
+#define ISR_OVW 0x10 /* Ring buffer overrun */
+#define ISR_CNT 0x20 /* Counter overflow */
+#define ISR_RDC 0x40 /* Remote DMA compile */
+#define ISR_RST 0x80 /* Reset status */
+
+/* Data config reg. definitions */
+
+#define DCR_WTS 0x01 /* Word transfer select */
+#define DCR_BOS 0x02 /* Byte order select */
+#define DCR_LAS 0x04 /* Long address select */
+#define DCR_LS 0x08 /* Loopback select */
+#define DCR_AR 0x10 /* Auto-init remote */
+#define DCR_FT0 0x20 /* FIFO threshold select */
+#define DCR_FT1 0x40
+
+/* Transmit config reg. definitions */
+
+#define TCR_CRC 0x01 /* Inhibit CRC */
+#define TCR_LB0 0x02 /* Loopback control */
+#define TCR_LB1 0x04
+#define TCR_ATD 0x08 /* Auto transmit disable */
+#define TCR_OFST 0x10 /* Collision offset enable */
+
+/* Transmit status reg. definitions */
+
+#define TSR_PTX 0x01 /* Packet transmitted */
+#define TSR_COL 0x04 /* Transmit collided */
+#define TSR_ABT 0x08 /* Transmit aborted */
+#define TSR_CRS 0x10 /* Carrier sense lost */
+#define TSR_FU 0x20 /* FIFO underrun */
+#define TSR_CDH 0x40 /* CD Heartbeat */
+#define TSR_OWC 0x80 /* Out of Window Collision */
+
+/* Receiver config register definitions */
+
+#define RCR_SEP 0x01 /* Save errored packets */
+#define RCR_AR 0x02 /* Accept runt packets */
+#define RCR_AB 0x04 /* Accept broadcast */
+#define RCR_AM 0x08 /* Accept multicast */
+#define RCR_PRO 0x10 /* Promiscuous mode */
+#define RCR_MON 0x20 /* Monitor mode */
+
+/* Receiver status register definitions */
+
+#define RSR_PRX 0x01 /* Packet received without error */
+#define RSR_CRC 0x02 /* CRC error */
+#define RSR_FAE 0x04 /* Frame alignment error */
+#define RSR_FO 0x08 /* FIFO overrun */
+#define RSR_MPA 0x10 /* Missed packet */
+#define RSR_PHY 0x20 /* Physical address */
+#define RSR_DIS 0x40 /* Received disabled */
+#define RSR_DFR 0x80 /* Deferring (jabber) */
+
+/* Hydra System card address PROM offset */
+
+#define HYDRA_ADDRPROM 0xffc0
+
+
diff --git a/drivers/net/ibm_emac/Makefile b/drivers/net/ibm_emac/Makefile
new file mode 100644
index 000000000000..7f583a333c24
--- /dev/null
+++ b/drivers/net/ibm_emac/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the IBM PPC4xx EMAC controllers
+#
+
+obj-$(CONFIG_IBM_EMAC) += ibm_emac.o
+
+ibm_emac-objs := ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o
+
+# Only need this if you want to see additional debug messages
+ifeq ($(CONFIG_IBM_EMAC_ERRMSG), y)
+ibm_emac-objs += ibm_emac_debug.o
+endif
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
new file mode 100644
index 000000000000..15d5a0e82862
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac.h
@@ -0,0 +1,267 @@
+/*
+ * ibm_emac.h
+ *
+ *
+ * Armin Kuster akuster@mvista.com
+ * June, 2002
+ *
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_H_
+#define _IBM_EMAC_H_
+/* General defines needed for the driver */
+
+/* Emac */
+typedef struct emac_regs {
+ u32 em0mr0;
+ u32 em0mr1;
+ u32 em0tmr0;
+ u32 em0tmr1;
+ u32 em0rmr;
+ u32 em0isr;
+ u32 em0iser;
+ u32 em0iahr;
+ u32 em0ialr;
+ u32 em0vtpid;
+ u32 em0vtci;
+ u32 em0ptr;
+ u32 em0iaht1;
+ u32 em0iaht2;
+ u32 em0iaht3;
+ u32 em0iaht4;
+ u32 em0gaht1;
+ u32 em0gaht2;
+ u32 em0gaht3;
+ u32 em0gaht4;
+ u32 em0lsah;
+ u32 em0lsal;
+ u32 em0ipgvr;
+ u32 em0stacr;
+ u32 em0trtr;
+ u32 em0rwmr;
+} emac_t;
+
+/* MODE REG 0 */
+#define EMAC_M0_RXI 0x80000000
+#define EMAC_M0_TXI 0x40000000
+#define EMAC_M0_SRST 0x20000000
+#define EMAC_M0_TXE 0x10000000
+#define EMAC_M0_RXE 0x08000000
+#define EMAC_M0_WKE 0x04000000
+
+/* MODE Reg 1 */
+#define EMAC_M1_FDE 0x80000000
+#define EMAC_M1_ILE 0x40000000
+#define EMAC_M1_VLE 0x20000000
+#define EMAC_M1_EIFC 0x10000000
+#define EMAC_M1_APP 0x08000000
+#define EMAC_M1_AEMI 0x02000000
+#define EMAC_M1_IST 0x01000000
+#define EMAC_M1_MF_1000GPCS 0x00c00000 /* Internal GPCS */
+#define EMAC_M1_MF_1000MBPS 0x00800000 /* External GPCS */
+#define EMAC_M1_MF_100MBPS 0x00400000
+#define EMAC_M1_RFS_16K 0x00280000 /* 000 for 512 byte */
+#define EMAC_M1_TR 0x00008000
+#ifdef CONFIG_IBM_EMAC4
+#define EMAC_M1_RFS_8K 0x00200000
+#define EMAC_M1_RFS_4K 0x00180000
+#define EMAC_M1_RFS_2K 0x00100000
+#define EMAC_M1_RFS_1K 0x00080000
+#define EMAC_M1_TX_FIFO_16K 0x00050000 /* 0's for 512 byte */
+#define EMAC_M1_TX_FIFO_8K 0x00040000
+#define EMAC_M1_TX_FIFO_4K 0x00030000
+#define EMAC_M1_TX_FIFO_2K 0x00020000
+#define EMAC_M1_TX_FIFO_1K 0x00010000
+#define EMAC_M1_TX_TR 0x00008000
+#define EMAC_M1_TX_MWSW 0x00001000 /* 0 wait for status */
+#define EMAC_M1_JUMBO_ENABLE 0x00000800 /* Upt to 9Kr status */
+#define EMAC_M1_OPB_CLK_66 0x00000008 /* 66Mhz */
+#define EMAC_M1_OPB_CLK_83 0x00000010 /* 83Mhz */
+#define EMAC_M1_OPB_CLK_100 0x00000018 /* 100Mhz */
+#define EMAC_M1_OPB_CLK_100P 0x00000020 /* 100Mhz+ */
+#else /* CONFIG_IBM_EMAC4 */
+#define EMAC_M1_RFS_4K 0x00300000 /* ~4k for 512 byte */
+#define EMAC_M1_RFS_2K 0x00200000
+#define EMAC_M1_RFS_1K 0x00100000
+#define EMAC_M1_TX_FIFO_2K 0x00080000 /* 0's for 512 byte */
+#define EMAC_M1_TX_FIFO_1K 0x00040000
+#define EMAC_M1_TR0_DEPEND 0x00010000 /* 0'x for single packet */
+#define EMAC_M1_TR1_DEPEND 0x00004000
+#define EMAC_M1_TR1_MULTI 0x00002000
+#define EMAC_M1_JUMBO_ENABLE 0x00001000
+#endif /* CONFIG_IBM_EMAC4 */
+#define EMAC_M1_BASE (EMAC_M1_TX_FIFO_2K | \
+ EMAC_M1_APP | \
+ EMAC_M1_TR | EMAC_M1_VLE)
+
+/* Transmit Mode Register 0 */
+#define EMAC_TMR0_GNP0 0x80000000
+#define EMAC_TMR0_GNP1 0x40000000
+#define EMAC_TMR0_GNPD 0x20000000
+#define EMAC_TMR0_FC 0x10000000
+#define EMAC_TMR0_TFAE_2_32 0x00000001
+#define EMAC_TMR0_TFAE_4_64 0x00000002
+#define EMAC_TMR0_TFAE_8_128 0x00000003
+#define EMAC_TMR0_TFAE_16_256 0x00000004
+#define EMAC_TMR0_TFAE_32_512 0x00000005
+#define EMAC_TMR0_TFAE_64_1024 0x00000006
+#define EMAC_TMR0_TFAE_128_2048 0x00000007
+
+/* Receive Mode Register */
+#define EMAC_RMR_SP 0x80000000
+#define EMAC_RMR_SFCS 0x40000000
+#define EMAC_RMR_ARRP 0x20000000
+#define EMAC_RMR_ARP 0x10000000
+#define EMAC_RMR_AROP 0x08000000
+#define EMAC_RMR_ARPI 0x04000000
+#define EMAC_RMR_PPP 0x02000000
+#define EMAC_RMR_PME 0x01000000
+#define EMAC_RMR_PMME 0x00800000
+#define EMAC_RMR_IAE 0x00400000
+#define EMAC_RMR_MIAE 0x00200000
+#define EMAC_RMR_BAE 0x00100000
+#define EMAC_RMR_MAE 0x00080000
+#define EMAC_RMR_RFAF_2_32 0x00000001
+#define EMAC_RMR_RFAF_4_64 0x00000002
+#define EMAC_RMR_RFAF_8_128 0x00000003
+#define EMAC_RMR_RFAF_16_256 0x00000004
+#define EMAC_RMR_RFAF_32_512 0x00000005
+#define EMAC_RMR_RFAF_64_1024 0x00000006
+#define EMAC_RMR_RFAF_128_2048 0x00000007
+#define EMAC_RMR_BASE (EMAC_RMR_IAE | EMAC_RMR_BAE)
+
+/* Interrupt Status & enable Regs */
+#define EMAC_ISR_OVR 0x02000000
+#define EMAC_ISR_PP 0x01000000
+#define EMAC_ISR_BP 0x00800000
+#define EMAC_ISR_RP 0x00400000
+#define EMAC_ISR_SE 0x00200000
+#define EMAC_ISR_ALE 0x00100000
+#define EMAC_ISR_BFCS 0x00080000
+#define EMAC_ISR_PTLE 0x00040000
+#define EMAC_ISR_ORE 0x00020000
+#define EMAC_ISR_IRE 0x00010000
+#define EMAC_ISR_DBDM 0x00000200
+#define EMAC_ISR_DB0 0x00000100
+#define EMAC_ISR_SE0 0x00000080
+#define EMAC_ISR_TE0 0x00000040
+#define EMAC_ISR_DB1 0x00000020
+#define EMAC_ISR_SE1 0x00000010
+#define EMAC_ISR_TE1 0x00000008
+#define EMAC_ISR_MOS 0x00000002
+#define EMAC_ISR_MOF 0x00000001
+
+/* STA CONTROL REG */
+#define EMAC_STACR_OC 0x00008000
+#define EMAC_STACR_PHYE 0x00004000
+#define EMAC_STACR_WRITE 0x00002000
+#define EMAC_STACR_READ 0x00001000
+#define EMAC_STACR_CLK_83MHZ 0x00000800 /* 0's for 50Mhz */
+#define EMAC_STACR_CLK_66MHZ 0x00000400
+#define EMAC_STACR_CLK_100MHZ 0x00000C00
+
+/* Transmit Request Threshold Register */
+#define EMAC_TRTR_1600 0x18000000 /* 0's for 64 Bytes */
+#define EMAC_TRTR_1024 0x0f000000
+#define EMAC_TRTR_512 0x07000000
+#define EMAC_TRTR_256 0x03000000
+#define EMAC_TRTR_192 0x10000000
+#define EMAC_TRTR_128 0x01000000
+
+#define EMAC_TX_CTRL_GFCS 0x0200
+#define EMAC_TX_CTRL_GP 0x0100
+#define EMAC_TX_CTRL_ISA 0x0080
+#define EMAC_TX_CTRL_RSA 0x0040
+#define EMAC_TX_CTRL_IVT 0x0020
+#define EMAC_TX_CTRL_RVT 0x0010
+#define EMAC_TX_CTRL_TAH_CSUM 0x000e /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG4 0x000a /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG3 0x0008 /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG2 0x0006 /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG1 0x0004 /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG0 0x0002 /* TAH only */
+#define EMAC_TX_CTRL_TAH_DIS 0x0000 /* TAH only */
+
+#define EMAC_TX_CTRL_DFLT ( \
+ MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP )
+
+/* madmal transmit status / Control bits */
+#define EMAC_TX_ST_BFCS 0x0200
+#define EMAC_TX_ST_BPP 0x0100
+#define EMAC_TX_ST_LCS 0x0080
+#define EMAC_TX_ST_ED 0x0040
+#define EMAC_TX_ST_EC 0x0020
+#define EMAC_TX_ST_LC 0x0010
+#define EMAC_TX_ST_MC 0x0008
+#define EMAC_TX_ST_SC 0x0004
+#define EMAC_TX_ST_UR 0x0002
+#define EMAC_TX_ST_SQE 0x0001
+
+/* madmal receive status / Control bits */
+#define EMAC_RX_ST_OE 0x0200
+#define EMAC_RX_ST_PP 0x0100
+#define EMAC_RX_ST_BP 0x0080
+#define EMAC_RX_ST_RP 0x0040
+#define EMAC_RX_ST_SE 0x0020
+#define EMAC_RX_ST_AE 0x0010
+#define EMAC_RX_ST_BFCS 0x0008
+#define EMAC_RX_ST_PTL 0x0004
+#define EMAC_RX_ST_ORE 0x0002
+#define EMAC_RX_ST_IRE 0x0001
+#define EMAC_BAD_RX_PACKET 0x02ff
+#define EMAC_CSUM_VER_ERROR 0x0003
+
+/* identify a bad rx packet dependent on emac features */
+#ifdef CONFIG_IBM_EMAC4
+#define EMAC_IS_BAD_RX_PACKET(desc) \
+ (((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \
+ ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \
+ ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE)))
+#else
+#define EMAC_IS_BAD_RX_PACKET(desc) \
+ (desc & EMAC_BAD_RX_PACKET)
+#endif
+
+/* SoC implementation specific EMAC register defaults */
+#if defined(CONFIG_440GP)
+#define EMAC_RWMR_DEFAULT 0x80009000
+#define EMAC_TMR0_DEFAULT 0x00000000
+#define EMAC_TMR1_DEFAULT 0xf8640000
+#elif defined(CONFIG_440GX)
+#define EMAC_RWMR_DEFAULT 0x1000a200
+#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
+#define EMAC_TMR1_DEFAULT 0xa00f0000
+#elif defined(CONFIG_440SP)
+#define EMAC_RWMR_DEFAULT 0x08002000
+#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048
+#define EMAC_TMR1_DEFAULT 0xf8200000
+#else
+#define EMAC_RWMR_DEFAULT 0x0f002000
+#define EMAC_TMR0_DEFAULT 0x00000000
+#define EMAC_TMR1_DEFAULT 0x380f0000
+#endif /* CONFIG_440GP */
+
+/* Revision specific EMAC register defaults */
+#ifdef CONFIG_IBM_EMAC4
+#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \
+ EMAC_M1_OPB_CLK_83 | \
+ EMAC_M1_TX_MWSW)
+#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \
+ EMAC_RMR_RFAF_128_2048)
+#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \
+ EMAC_TMR0_DEFAULT)
+#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024
+#else /* !CONFIG_IBM_EMAC4 */
+#define EMAC_M1_DEFAULT EMAC_M1_BASE
+#define EMAC_RMR_DEFAULT EMAC_RMR_BASE
+#define EMAC_TMR0_XMIT EMAC_TMR0_GNP0
+#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600
+#endif /* CONFIG_IBM_EMAC4 */
+
+#endif
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
new file mode 100644
index 000000000000..ab44358ddbfc
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -0,0 +1,2012 @@
+/*
+ * ibm_emac_core.c
+ *
+ * Ethernet driver for the built in ethernet on the IBM 4xx PowerPC
+ * processors.
+ *
+ * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *
+ * Based on original work by
+ *
+ * Armin Kuster <akuster@mvista.com>
+ * Johnnie Peters <jpeters@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ * TODO
+ * - Check for races in the "remove" code path
+ * - Add some Power Management to the MAC and the PHY
+ * - Audit remaining of non-rewritten code (--BenH)
+ * - Cleanup message display using msglevel mecanism
+ * - Address all errata
+ * - Audit all register update paths to ensure they
+ * are being written post soft reset if required.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/ocp.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+
+#include "ibm_emac_core.h"
+
+//#define MDIO_DEBUG(fmt) printk fmt
+#define MDIO_DEBUG(fmt)
+
+//#define LINK_DEBUG(fmt) printk fmt
+#define LINK_DEBUG(fmt)
+
+//#define PKT_DEBUG(fmt) printk fmt
+#define PKT_DEBUG(fmt)
+
+#define DRV_NAME "emac"
+#define DRV_VERSION "2.0"
+#define DRV_AUTHOR "Benjamin Herrenschmidt <benh@kernel.crashing.org>"
+#define DRV_DESC "IBM EMAC Ethernet driver"
+
+/*
+ * When mdio_idx >= 0, contains a list of emac ocp_devs
+ * that have had their initialization deferred until the
+ * common MDIO controller has been initialized.
+ */
+LIST_HEAD(emac_init_list);
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+static int skb_res = SKB_RES;
+module_param(skb_res, int, 0444);
+MODULE_PARM_DESC(skb_res, "Amount of data to reserve on skb buffs\n"
+ "The 405 handles a misaligned IP header fine but\n"
+ "this can help if you are routing to a tunnel or a\n"
+ "device that needs aligned data. 0..2");
+
+#define RGMII_PRIV(ocpdev) ((struct ibm_ocp_rgmii*)ocp_get_drvdata(ocpdev))
+
+static unsigned int rgmii_enable[] = {
+ RGMII_RTBI,
+ RGMII_RGMII,
+ RGMII_TBI,
+ RGMII_GMII
+};
+
+static unsigned int rgmii_speed_mask[] = {
+ RGMII_MII2_SPDMASK,
+ RGMII_MII3_SPDMASK
+};
+
+static unsigned int rgmii_speed100[] = {
+ RGMII_MII2_100MB,
+ RGMII_MII3_100MB
+};
+
+static unsigned int rgmii_speed1000[] = {
+ RGMII_MII2_1000MB,
+ RGMII_MII3_1000MB
+};
+
+#define ZMII_PRIV(ocpdev) ((struct ibm_ocp_zmii*)ocp_get_drvdata(ocpdev))
+
+static unsigned int zmii_enable[][4] = {
+ {ZMII_SMII0, ZMII_RMII0, ZMII_MII0,
+ ~(ZMII_MDI1 | ZMII_MDI2 | ZMII_MDI3)},
+ {ZMII_SMII1, ZMII_RMII1, ZMII_MII1,
+ ~(ZMII_MDI0 | ZMII_MDI2 | ZMII_MDI3)},
+ {ZMII_SMII2, ZMII_RMII2, ZMII_MII2,
+ ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI3)},
+ {ZMII_SMII3, ZMII_RMII3, ZMII_MII3, ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI2)}
+};
+
+static unsigned int mdi_enable[] = {
+ ZMII_MDI0,
+ ZMII_MDI1,
+ ZMII_MDI2,
+ ZMII_MDI3
+};
+
+static unsigned int zmii_speed = 0x0;
+static unsigned int zmii_speed100[] = {
+ ZMII_MII0_100MB,
+ ZMII_MII1_100MB,
+ ZMII_MII2_100MB,
+ ZMII_MII3_100MB
+};
+
+/* Since multiple EMACs share MDIO lines in various ways, we need
+ * to avoid re-using the same PHY ID in cases where the arch didn't
+ * setup precise phy_map entries
+ */
+static u32 busy_phy_map = 0;
+
+/* If EMACs share a common MDIO device, this points to it */
+static struct net_device *mdio_ndev = NULL;
+
+struct emac_def_dev {
+ struct list_head link;
+ struct ocp_device *ocpdev;
+ struct ibm_ocp_mal *mal;
+};
+
+static struct net_device_stats *emac_stats(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ return &fep->stats;
+};
+
+static int
+emac_init_rgmii(struct ocp_device *rgmii_dev, int input, int phy_mode)
+{
+ struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(rgmii_dev);
+ const char *mode_name[] = { "RTBI", "RGMII", "TBI", "GMII" };
+ int mode = -1;
+
+ if (!rgmii) {
+ rgmii = kmalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL);
+
+ if (rgmii == NULL) {
+ printk(KERN_ERR
+ "rgmii%d: Out of memory allocating RGMII structure!\n",
+ rgmii_dev->def->index);
+ return -ENOMEM;
+ }
+
+ memset(rgmii, 0, sizeof(*rgmii));
+
+ rgmii->base =
+ (struct rgmii_regs *)ioremap(rgmii_dev->def->paddr,
+ sizeof(*rgmii->base));
+ if (rgmii->base == NULL) {
+ printk(KERN_ERR
+ "rgmii%d: Cannot ioremap bridge registers!\n",
+ rgmii_dev->def->index);
+
+ kfree(rgmii);
+ return -ENOMEM;
+ }
+ ocp_set_drvdata(rgmii_dev, rgmii);
+ }
+
+ if (phy_mode) {
+ switch (phy_mode) {
+ case PHY_MODE_GMII:
+ mode = GMII;
+ break;
+ case PHY_MODE_TBI:
+ mode = TBI;
+ break;
+ case PHY_MODE_RTBI:
+ mode = RTBI;
+ break;
+ case PHY_MODE_RGMII:
+ default:
+ mode = RGMII;
+ }
+ rgmii->base->fer &= ~RGMII_FER_MASK(input);
+ rgmii->base->fer |= rgmii_enable[mode] << (4 * input);
+ } else {
+ switch ((rgmii->base->fer & RGMII_FER_MASK(input)) >> (4 *
+ input)) {
+ case RGMII_RTBI:
+ mode = RTBI;
+ break;
+ case RGMII_RGMII:
+ mode = RGMII;
+ break;
+ case RGMII_TBI:
+ mode = TBI;
+ break;
+ case RGMII_GMII:
+ mode = GMII;
+ }
+ }
+
+ /* Set mode to RGMII if nothing valid is detected */
+ if (mode < 0)
+ mode = RGMII;
+
+ printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n",
+ rgmii_dev->def->index, input, mode_name[mode]);
+
+ rgmii->mode[input] = mode;
+ rgmii->users++;
+
+ return 0;
+}
+
+static void
+emac_rgmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
+{
+ struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev);
+ unsigned int rgmii_speed;
+
+ rgmii_speed = in_be32(&rgmii->base->ssr);
+
+ rgmii_speed &= ~rgmii_speed_mask[input];
+
+ if (speed == 1000)
+ rgmii_speed |= rgmii_speed1000[input];
+ else if (speed == 100)
+ rgmii_speed |= rgmii_speed100[input];
+
+ out_be32(&rgmii->base->ssr, rgmii_speed);
+}
+
+static void emac_close_rgmii(struct ocp_device *ocpdev)
+{
+ struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev);
+ BUG_ON(!rgmii || rgmii->users == 0);
+
+ if (!--rgmii->users) {
+ ocp_set_drvdata(ocpdev, NULL);
+ iounmap((void *)rgmii->base);
+ kfree(rgmii);
+ }
+}
+
+static int emac_init_zmii(struct ocp_device *zmii_dev, int input, int phy_mode)
+{
+ struct ibm_ocp_zmii *zmii = ZMII_PRIV(zmii_dev);
+ const char *mode_name[] = { "SMII", "RMII", "MII" };
+ int mode = -1;
+
+ if (!zmii) {
+ zmii = kmalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
+ if (zmii == NULL) {
+ printk(KERN_ERR
+ "zmii%d: Out of memory allocating ZMII structure!\n",
+ zmii_dev->def->index);
+ return -ENOMEM;
+ }
+ memset(zmii, 0, sizeof(*zmii));
+
+ zmii->base =
+ (struct zmii_regs *)ioremap(zmii_dev->def->paddr,
+ sizeof(*zmii->base));
+ if (zmii->base == NULL) {
+ printk(KERN_ERR
+ "zmii%d: Cannot ioremap bridge registers!\n",
+ zmii_dev->def->index);
+
+ kfree(zmii);
+ return -ENOMEM;
+ }
+ ocp_set_drvdata(zmii_dev, zmii);
+ }
+
+ if (phy_mode) {
+ switch (phy_mode) {
+ case PHY_MODE_MII:
+ mode = MII;
+ break;
+ case PHY_MODE_RMII:
+ mode = RMII;
+ break;
+ case PHY_MODE_SMII:
+ default:
+ mode = SMII;
+ }
+ zmii->base->fer &= ~ZMII_FER_MASK(input);
+ zmii->base->fer |= zmii_enable[input][mode];
+ } else {
+ switch ((zmii->base->fer & ZMII_FER_MASK(input)) << (4 * input)) {
+ case ZMII_MII0:
+ mode = MII;
+ break;
+ case ZMII_RMII0:
+ mode = RMII;
+ break;
+ case ZMII_SMII0:
+ mode = SMII;
+ }
+ }
+
+ /* Set mode to SMII if nothing valid is detected */
+ if (mode < 0)
+ mode = SMII;
+
+ printk(KERN_NOTICE "zmii%d: input %d in %s mode\n",
+ zmii_dev->def->index, input, mode_name[mode]);
+
+ zmii->mode[input] = mode;
+ zmii->users++;
+
+ return 0;
+}
+
+static void emac_enable_zmii_port(struct ocp_device *ocpdev, int input)
+{
+ u32 mask;
+ struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
+
+ mask = in_be32(&zmii->base->fer);
+ mask &= zmii_enable[input][MDI]; /* turn all non enabled MDI's off */
+ mask |= zmii_enable[input][zmii->mode[input]] | mdi_enable[input];
+ out_be32(&zmii->base->fer, mask);
+}
+
+static void
+emac_zmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
+{
+ struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
+
+ if (speed == 100)
+ zmii_speed |= zmii_speed100[input];
+ else
+ zmii_speed &= ~zmii_speed100[input];
+
+ out_be32(&zmii->base->ssr, zmii_speed);
+}
+
+static void emac_close_zmii(struct ocp_device *ocpdev)
+{
+ struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
+ BUG_ON(!zmii || zmii->users == 0);
+
+ if (!--zmii->users) {
+ ocp_set_drvdata(ocpdev, NULL);
+ iounmap((void *)zmii->base);
+ kfree(zmii);
+ }
+}
+
+int emac_phy_read(struct net_device *dev, int mii_id, int reg)
+{
+ int count;
+ uint32_t stacr;
+ struct ocp_enet_private *fep = dev->priv;
+ emac_t *emacp = fep->emacp;
+
+ MDIO_DEBUG(("%s: phy_read, id: 0x%x, reg: 0x%x\n", dev->name, mii_id,
+ reg));
+
+ /* Enable proper ZMII port */
+ if (fep->zmii_dev)
+ emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
+
+ /* Use the EMAC that has the MDIO port */
+ if (fep->mdio_dev) {
+ dev = fep->mdio_dev;
+ fep = dev->priv;
+ emacp = fep->emacp;
+ }
+
+ count = 0;
+ while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
+ && (count++ < MDIO_DELAY))
+ udelay(1);
+ MDIO_DEBUG((" (count was %d)\n", count));
+
+ if ((stacr & EMAC_STACR_OC) == 0) {
+ printk(KERN_WARNING "%s: PHY read timeout #1!\n", dev->name);
+ return -1;
+ }
+
+ /* Clear the speed bits and make a read request to the PHY */
+ stacr = ((EMAC_STACR_READ | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ);
+ stacr |= ((mii_id & 0x1F) << 5);
+
+ out_be32(&emacp->em0stacr, stacr);
+
+ count = 0;
+ while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
+ && (count++ < MDIO_DELAY))
+ udelay(1);
+ MDIO_DEBUG((" (count was %d)\n", count));
+
+ if ((stacr & EMAC_STACR_OC) == 0) {
+ printk(KERN_WARNING "%s: PHY read timeout #2!\n", dev->name);
+ return -1;
+ }
+
+ /* Check for a read error */
+ if (stacr & EMAC_STACR_PHYE) {
+ MDIO_DEBUG(("EMAC MDIO PHY error !\n"));
+ return -1;
+ }
+
+ MDIO_DEBUG((" -> 0x%x\n", stacr >> 16));
+
+ return (stacr >> 16);
+}
+
+void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data)
+{
+ int count;
+ uint32_t stacr;
+ struct ocp_enet_private *fep = dev->priv;
+ emac_t *emacp = fep->emacp;
+
+ MDIO_DEBUG(("%s phy_write, id: 0x%x, reg: 0x%x, data: 0x%x\n",
+ dev->name, mii_id, reg, data));
+
+ /* Enable proper ZMII port */
+ if (fep->zmii_dev)
+ emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
+
+ /* Use the EMAC that has the MDIO port */
+ if (fep->mdio_dev) {
+ dev = fep->mdio_dev;
+ fep = dev->priv;
+ emacp = fep->emacp;
+ }
+
+ count = 0;
+ while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
+ && (count++ < MDIO_DELAY))
+ udelay(1);
+ MDIO_DEBUG((" (count was %d)\n", count));
+
+ if ((stacr & EMAC_STACR_OC) == 0) {
+ printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
+ return;
+ }
+
+ /* Clear the speed bits and make a read request to the PHY */
+
+ stacr = ((EMAC_STACR_WRITE | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ);
+ stacr |= ((mii_id & 0x1f) << 5) | ((data & 0xffff) << 16);
+
+ out_be32(&emacp->em0stacr, stacr);
+
+ count = 0;
+ while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
+ && (count++ < MDIO_DELAY))
+ udelay(1);
+ MDIO_DEBUG((" (count was %d)\n", count));
+
+ if ((stacr & EMAC_STACR_OC) == 0)
+ printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
+
+ /* Check for a write error */
+ if ((stacr & EMAC_STACR_PHYE) != 0) {
+ MDIO_DEBUG(("EMAC MDIO PHY error !\n"));
+ }
+}
+
+static void emac_txeob_dev(void *param, u32 chanmask)
+{
+ struct net_device *dev = param;
+ struct ocp_enet_private *fep = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ PKT_DEBUG(("emac_txeob_dev() entry, tx_cnt: %d\n", fep->tx_cnt));
+
+ while (fep->tx_cnt &&
+ !(fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_READY)) {
+
+ if (fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_LAST) {
+ /* Tell the system the transmit completed. */
+ dma_unmap_single(&fep->ocpdev->dev,
+ fep->tx_desc[fep->ack_slot].data_ptr,
+ fep->tx_desc[fep->ack_slot].data_len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(fep->tx_skb[fep->ack_slot]);
+
+ if (fep->tx_desc[fep->ack_slot].ctrl &
+ (EMAC_TX_ST_EC | EMAC_TX_ST_MC | EMAC_TX_ST_SC))
+ fep->stats.collisions++;
+ }
+
+ fep->tx_skb[fep->ack_slot] = (struct sk_buff *)NULL;
+ if (++fep->ack_slot == NUM_TX_BUFF)
+ fep->ack_slot = 0;
+
+ fep->tx_cnt--;
+ }
+ if (fep->tx_cnt < NUM_TX_BUFF)
+ netif_wake_queue(dev);
+
+ PKT_DEBUG(("emac_txeob_dev() exit, tx_cnt: %d\n", fep->tx_cnt));
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+/*
+ Fill/Re-fill the rx chain with valid ctrl/ptrs.
+ This function will fill from rx_slot up to the parm end.
+ So to completely fill the chain pre-set rx_slot to 0 and
+ pass in an end of 0.
+ */
+static void emac_rx_fill(struct net_device *dev, int end)
+{
+ int i;
+ struct ocp_enet_private *fep = dev->priv;
+
+ i = fep->rx_slot;
+ do {
+ /* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,
+ * it breaks our cache line alignement. However, we still allocate
+ * +16 so that we end up allocating the exact same size as
+ * dev_alloc_skb() would do.
+ * Also, because of the skb_res, the max DMA size we give to EMAC
+ * is slighly wrong, causing it to potentially DMA 2 more bytes
+ * from a broken/oversized packet. These 16 bytes will take care
+ * that we don't walk on somebody else toes with that.
+ */
+ fep->rx_skb[i] =
+ alloc_skb(fep->rx_buffer_size + 16, GFP_ATOMIC);
+
+ if (fep->rx_skb[i] == NULL) {
+ /* Keep rx_slot here, the next time clean/fill is called
+ * we will try again before the MAL wraps back here
+ * If the MAL tries to use this descriptor with
+ * the EMPTY bit off it will cause the
+ * rxde interrupt. That is where we will
+ * try again to allocate an sk_buff.
+ */
+ break;
+
+ }
+
+ if (skb_res)
+ skb_reserve(fep->rx_skb[i], skb_res);
+
+ /* We must NOT dma_map_single the cache line right after the
+ * buffer, so we must crop our sync size to account for the
+ * reserved space
+ */
+ fep->rx_desc[i].data_ptr =
+ (unsigned char *)dma_map_single(&fep->ocpdev->dev,
+ (void *)fep->rx_skb[i]->
+ data,
+ fep->rx_buffer_size -
+ skb_res, DMA_FROM_DEVICE);
+
+ /*
+ * Some 4xx implementations use the previously
+ * reserved bits in data_len to encode the MS
+ * 4-bits of a 36-bit physical address (ERPN)
+ * This must be initialized.
+ */
+ fep->rx_desc[i].data_len = 0;
+ fep->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR |
+ (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+
+ } while ((i = (i + 1) % NUM_RX_BUFF) != end);
+
+ fep->rx_slot = i;
+}
+
+static void
+emac_rx_csum(struct net_device *dev, unsigned short ctrl, struct sk_buff *skb)
+{
+ struct ocp_enet_private *fep = dev->priv;
+
+ /* Exit if interface has no TAH engine */
+ if (!fep->tah_dev) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ /* Check for TCP/UDP/IP csum error */
+ if (ctrl & EMAC_CSUM_VER_ERROR) {
+ /* Let the stack verify checksum errors */
+ skb->ip_summed = CHECKSUM_NONE;
+/* adapter->hw_csum_err++; */
+ } else {
+ /* Csum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+/* adapter->hw_csum_good++; */
+ }
+}
+
+static int emac_rx_clean(struct net_device *dev)
+{
+ int i, b, bnum = 0, buf[6];
+ int error, frame_length;
+ struct ocp_enet_private *fep = dev->priv;
+ unsigned short ctrl;
+
+ i = fep->rx_slot;
+
+ PKT_DEBUG(("emac_rx_clean() entry, rx_slot: %d\n", fep->rx_slot));
+
+ do {
+ if (fep->rx_skb[i] == NULL)
+ continue; /*we have already handled the packet but haved failed to alloc */
+ /*
+ since rx_desc is in uncached mem we don't keep reading it directly
+ we pull out a local copy of ctrl and do the checks on the copy.
+ */
+ ctrl = fep->rx_desc[i].ctrl;
+ if (ctrl & MAL_RX_CTRL_EMPTY)
+ break; /*we don't have any more ready packets */
+
+ if (EMAC_IS_BAD_RX_PACKET(ctrl)) {
+ fep->stats.rx_errors++;
+ fep->stats.rx_dropped++;
+
+ if (ctrl & EMAC_RX_ST_OE)
+ fep->stats.rx_fifo_errors++;
+ if (ctrl & EMAC_RX_ST_AE)
+ fep->stats.rx_frame_errors++;
+ if (ctrl & EMAC_RX_ST_BFCS)
+ fep->stats.rx_crc_errors++;
+ if (ctrl & (EMAC_RX_ST_RP | EMAC_RX_ST_PTL |
+ EMAC_RX_ST_ORE | EMAC_RX_ST_IRE))
+ fep->stats.rx_length_errors++;
+ } else {
+ if ((ctrl & (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) ==
+ (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) {
+ /* Single descriptor packet */
+ emac_rx_csum(dev, ctrl, fep->rx_skb[i]);
+ /* Send the skb up the chain. */
+ frame_length = fep->rx_desc[i].data_len - 4;
+ skb_put(fep->rx_skb[i], frame_length);
+ fep->rx_skb[i]->dev = dev;
+ fep->rx_skb[i]->protocol =
+ eth_type_trans(fep->rx_skb[i], dev);
+ error = netif_rx(fep->rx_skb[i]);
+
+ if ((error == NET_RX_DROP) ||
+ (error == NET_RX_BAD)) {
+ fep->stats.rx_dropped++;
+ } else {
+ fep->stats.rx_packets++;
+ fep->stats.rx_bytes += frame_length;
+ }
+ fep->rx_skb[i] = NULL;
+ } else {
+ /* Multiple descriptor packet */
+ if (ctrl & MAL_RX_CTRL_FIRST) {
+ if (fep->rx_desc[(i + 1) % NUM_RX_BUFF].
+ ctrl & MAL_RX_CTRL_EMPTY)
+ break;
+ bnum = 0;
+ buf[bnum] = i;
+ ++bnum;
+ continue;
+ }
+ if (((ctrl & MAL_RX_CTRL_FIRST) !=
+ MAL_RX_CTRL_FIRST) &&
+ ((ctrl & MAL_RX_CTRL_LAST) !=
+ MAL_RX_CTRL_LAST)) {
+ if (fep->rx_desc[(i + 1) %
+ NUM_RX_BUFF].ctrl &
+ MAL_RX_CTRL_EMPTY) {
+ i = buf[0];
+ break;
+ }
+ buf[bnum] = i;
+ ++bnum;
+ continue;
+ }
+ if (ctrl & MAL_RX_CTRL_LAST) {
+ buf[bnum] = i;
+ ++bnum;
+ skb_put(fep->rx_skb[buf[0]],
+ fep->rx_desc[buf[0]].data_len);
+ for (b = 1; b < bnum; b++) {
+ /*
+ * MAL is braindead, we need
+ * to copy the remainder
+ * of the packet from the
+ * latter descriptor buffers
+ * to the first skb. Then
+ * dispose of the source
+ * skbs.
+ *
+ * Once the stack is fixed
+ * to handle frags on most
+ * protocols we can generate
+ * a fragmented skb with
+ * no copies.
+ */
+ memcpy(fep->rx_skb[buf[0]]->
+ data +
+ fep->rx_skb[buf[0]]->len,
+ fep->rx_skb[buf[b]]->
+ data,
+ fep->rx_desc[buf[b]].
+ data_len);
+ skb_put(fep->rx_skb[buf[0]],
+ fep->rx_desc[buf[b]].
+ data_len);
+ dma_unmap_single(&fep->ocpdev->
+ dev,
+ fep->
+ rx_desc[buf
+ [b]].
+ data_ptr,
+ fep->
+ rx_desc[buf
+ [b]].
+ data_len,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(fep->
+ rx_skb[buf[b]]);
+ }
+ emac_rx_csum(dev, ctrl,
+ fep->rx_skb[buf[0]]);
+
+ fep->rx_skb[buf[0]]->dev = dev;
+ fep->rx_skb[buf[0]]->protocol =
+ eth_type_trans(fep->rx_skb[buf[0]],
+ dev);
+ error = netif_rx(fep->rx_skb[buf[0]]);
+
+ if ((error == NET_RX_DROP)
+ || (error == NET_RX_BAD)) {
+ fep->stats.rx_dropped++;
+ } else {
+ fep->stats.rx_packets++;
+ fep->stats.rx_bytes +=
+ fep->rx_skb[buf[0]]->len;
+ }
+ for (b = 0; b < bnum; b++)
+ fep->rx_skb[buf[b]] = NULL;
+ }
+ }
+ }
+ } while ((i = (i + 1) % NUM_RX_BUFF) != fep->rx_slot);
+
+ PKT_DEBUG(("emac_rx_clean() exit, rx_slot: %d\n", fep->rx_slot));
+
+ return i;
+}
+
+static void emac_rxeob_dev(void *param, u32 chanmask)
+{
+ struct net_device *dev = param;
+ struct ocp_enet_private *fep = dev->priv;
+ unsigned long flags;
+ int n;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ if ((n = emac_rx_clean(dev)) != fep->rx_slot)
+ emac_rx_fill(dev, n);
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+/*
+ * This interrupt should never occurr, we don't program
+ * the MAL for contiunous mode.
+ */
+static void emac_txde_dev(void *param, u32 chanmask)
+{
+ struct net_device *dev = param;
+ struct ocp_enet_private *fep = dev->priv;
+
+ printk(KERN_WARNING "%s: transmit descriptor error\n", dev->name);
+
+ emac_mac_dump(dev);
+ emac_mal_dump(dev);
+
+ /* Reenable the transmit channel */
+ mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
+}
+
+/*
+ * This interrupt should be very rare at best. This occurs when
+ * the hardware has a problem with the receive descriptors. The manual
+ * states that it occurs when the hardware cannot the receive descriptor
+ * empty bit is not set. The recovery mechanism will be to
+ * traverse through the descriptors, handle any that are marked to be
+ * handled and reinitialize each along the way. At that point the driver
+ * will be restarted.
+ */
+static void emac_rxde_dev(void *param, u32 chanmask)
+{
+ struct net_device *dev = param;
+ struct ocp_enet_private *fep = dev->priv;
+ unsigned long flags;
+
+ if (net_ratelimit()) {
+ printk(KERN_WARNING "%s: receive descriptor error\n",
+ fep->ndev->name);
+
+ emac_mac_dump(dev);
+ emac_mal_dump(dev);
+ emac_desc_dump(dev);
+ }
+
+ /* Disable RX channel */
+ spin_lock_irqsave(&fep->lock, flags);
+ mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
+
+ /* For now, charge the error against all emacs */
+ fep->stats.rx_errors++;
+
+ /* so do we have any good packets still? */
+ emac_rx_clean(dev);
+
+ /* When the interface is restarted it resets processing to the
+ * first descriptor in the table.
+ */
+
+ fep->rx_slot = 0;
+ emac_rx_fill(dev, 0);
+
+ set_mal_dcrn(fep->mal, DCRN_MALRXEOBISR, fep->commac.rx_chan_mask);
+ set_mal_dcrn(fep->mal, DCRN_MALRXDEIR, fep->commac.rx_chan_mask);
+
+ /* Reenable the receive channels */
+ mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static irqreturn_t
+emac_mac_irq(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct ocp_enet_private *fep = dev->priv;
+ emac_t *emacp = fep->emacp;
+ unsigned long tmp_em0isr;
+
+ /* EMAC interrupt */
+ tmp_em0isr = in_be32(&emacp->em0isr);
+ if (tmp_em0isr & (EMAC_ISR_TE0 | EMAC_ISR_TE1)) {
+ /* This error is a hard transmit error - could retransmit */
+ fep->stats.tx_errors++;
+
+ /* Reenable the transmit channel */
+ mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
+
+ } else {
+ fep->stats.rx_errors++;
+ }
+
+ if (tmp_em0isr & EMAC_ISR_RP)
+ fep->stats.rx_length_errors++;
+ if (tmp_em0isr & EMAC_ISR_ALE)
+ fep->stats.rx_frame_errors++;
+ if (tmp_em0isr & EMAC_ISR_BFCS)
+ fep->stats.rx_crc_errors++;
+ if (tmp_em0isr & EMAC_ISR_PTLE)
+ fep->stats.rx_length_errors++;
+ if (tmp_em0isr & EMAC_ISR_ORE)
+ fep->stats.rx_length_errors++;
+ if (tmp_em0isr & EMAC_ISR_TE0)
+ fep->stats.tx_aborted_errors++;
+
+ emac_err_dump(dev, tmp_em0isr);
+
+ out_be32(&emacp->em0isr, tmp_em0isr);
+
+ return IRQ_HANDLED;
+}
+
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned short ctrl;
+ unsigned long flags;
+ struct ocp_enet_private *fep = dev->priv;
+ emac_t *emacp = fep->emacp;
+ int len = skb->len;
+ unsigned int offset = 0, size, f, tx_slot_first;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ len -= skb->data_len;
+
+ if ((fep->tx_cnt + nr_frags + len / DESC_BUF_SIZE + 1) > NUM_TX_BUFF) {
+ PKT_DEBUG(("emac_start_xmit() stopping queue\n"));
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&fep->lock, flags);
+ return -EBUSY;
+ }
+
+ tx_slot_first = fep->tx_slot;
+
+ while (len) {
+ size = min(len, DESC_BUF_SIZE);
+
+ fep->tx_desc[fep->tx_slot].data_len = (short)size;
+ fep->tx_desc[fep->tx_slot].data_ptr =
+ (unsigned char *)dma_map_single(&fep->ocpdev->dev,
+ (void *)((unsigned int)skb->
+ data + offset),
+ size, DMA_TO_DEVICE);
+
+ ctrl = EMAC_TX_CTRL_DFLT;
+ if (fep->tx_slot != tx_slot_first)
+ ctrl |= MAL_TX_CTRL_READY;
+ if ((NUM_TX_BUFF - 1) == fep->tx_slot)
+ ctrl |= MAL_TX_CTRL_WRAP;
+ if (!nr_frags && (len == size)) {
+ ctrl |= MAL_TX_CTRL_LAST;
+ fep->tx_skb[fep->tx_slot] = skb;
+ }
+ if (skb->ip_summed == CHECKSUM_HW)
+ ctrl |= EMAC_TX_CTRL_TAH_CSUM;
+
+ fep->tx_desc[fep->tx_slot].ctrl = ctrl;
+
+ len -= size;
+ offset += size;
+
+ /* Bump tx count */
+ if (++fep->tx_cnt == NUM_TX_BUFF)
+ netif_stop_queue(dev);
+
+ /* Next descriptor */
+ if (++fep->tx_slot == NUM_TX_BUFF)
+ fep->tx_slot = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = 0;
+
+ while (len) {
+ size = min(len, DESC_BUF_SIZE);
+
+ dma_map_page(&fep->ocpdev->dev,
+ frag->page,
+ frag->page_offset + offset,
+ size, DMA_TO_DEVICE);
+
+ ctrl = EMAC_TX_CTRL_DFLT | MAL_TX_CTRL_READY;
+ if ((NUM_TX_BUFF - 1) == fep->tx_slot)
+ ctrl |= MAL_TX_CTRL_WRAP;
+ if ((f == (nr_frags - 1)) && (len == size)) {
+ ctrl |= MAL_TX_CTRL_LAST;
+ fep->tx_skb[fep->tx_slot] = skb;
+ }
+
+ if (skb->ip_summed == CHECKSUM_HW)
+ ctrl |= EMAC_TX_CTRL_TAH_CSUM;
+
+ fep->tx_desc[fep->tx_slot].data_len = (short)size;
+ fep->tx_desc[fep->tx_slot].data_ptr =
+ (char *)((page_to_pfn(frag->page) << PAGE_SHIFT) +
+ frag->page_offset + offset);
+ fep->tx_desc[fep->tx_slot].ctrl = ctrl;
+
+ len -= size;
+ offset += size;
+
+ /* Bump tx count */
+ if (++fep->tx_cnt == NUM_TX_BUFF)
+ netif_stop_queue(dev);
+
+ /* Next descriptor */
+ if (++fep->tx_slot == NUM_TX_BUFF)
+ fep->tx_slot = 0;
+ }
+ }
+
+ /*
+ * Deferred set READY on first descriptor of packet to
+ * avoid TX MAL race.
+ */
+ fep->tx_desc[tx_slot_first].ctrl |= MAL_TX_CTRL_READY;
+
+ /* Send the packet out. */
+ out_be32(&emacp->em0tmr0, EMAC_TMR0_XMIT);
+
+ fep->stats.tx_packets++;
+ fep->stats.tx_bytes += skb->len;
+
+ PKT_DEBUG(("emac_start_xmit() exitn"));
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ return 0;
+}
+
+static int emac_adjust_to_link(struct ocp_enet_private *fep)
+{
+ emac_t *emacp = fep->emacp;
+ unsigned long mode_reg;
+ int full_duplex, speed;
+
+ full_duplex = 0;
+ speed = SPEED_10;
+
+ /* set mode register 1 defaults */
+ mode_reg = EMAC_M1_DEFAULT;
+
+ /* Read link mode on PHY */
+ if (fep->phy_mii.def->ops->read_link(&fep->phy_mii) == 0) {
+ /* If an error occurred, we don't deal with it yet */
+ full_duplex = (fep->phy_mii.duplex == DUPLEX_FULL);
+ speed = fep->phy_mii.speed;
+ }
+
+
+ /* set speed (default is 10Mb) */
+ switch (speed) {
+ case SPEED_1000:
+ mode_reg |= EMAC_M1_RFS_16K;
+ if (fep->rgmii_dev) {
+ struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(fep->rgmii_dev);
+
+ if ((rgmii->mode[fep->rgmii_input] == RTBI)
+ || (rgmii->mode[fep->rgmii_input] == TBI))
+ mode_reg |= EMAC_M1_MF_1000GPCS;
+ else
+ mode_reg |= EMAC_M1_MF_1000MBPS;
+
+ emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
+ 1000);
+ }
+ break;
+ case SPEED_100:
+ mode_reg |= EMAC_M1_MF_100MBPS | EMAC_M1_RFS_4K;
+ if (fep->rgmii_dev)
+ emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
+ 100);
+ if (fep->zmii_dev)
+ emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
+ 100);
+ break;
+ case SPEED_10:
+ default:
+ mode_reg = (mode_reg & ~EMAC_M1_MF_100MBPS) | EMAC_M1_RFS_4K;
+ if (fep->rgmii_dev)
+ emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
+ 10);
+ if (fep->zmii_dev)
+ emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
+ 10);
+ }
+
+ if (full_duplex)
+ mode_reg |= EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_IST;
+ else
+ mode_reg &= ~(EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_ILE);
+
+ LINK_DEBUG(("%s: adjust to link, speed: %d, duplex: %d, opened: %d\n",
+ fep->ndev->name, speed, full_duplex, fep->opened));
+
+ printk(KERN_INFO "%s: Speed: %d, %s duplex.\n",
+ fep->ndev->name, speed, full_duplex ? "Full" : "Half");
+ if (fep->opened)
+ out_be32(&emacp->em0mr1, mode_reg);
+
+ return 0;
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct ocp_enet_private *fep = ndev->priv;
+ emac_t *emacp = fep->emacp;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ /* set the high address */
+ out_be32(&emacp->em0iahr,
+ (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]);
+
+ /* set the low address */
+ out_be32(&emacp->em0ialr,
+ (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
+ | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
+
+ return 0;
+}
+
+static int emac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ int old_mtu = dev->mtu;
+ unsigned long mode_reg;
+ emac_t *emacp = fep->emacp;
+ u32 em0mr0;
+ int i, full;
+ unsigned long flags;
+
+ if ((new_mtu < EMAC_MIN_MTU) || (new_mtu > EMAC_MAX_MTU)) {
+ printk(KERN_ERR
+ "emac: Invalid MTU setting, MTU must be between %d and %d\n",
+ EMAC_MIN_MTU, EMAC_MAX_MTU);
+ return -EINVAL;
+ }
+
+ if (old_mtu != new_mtu && netif_running(dev)) {
+ /* Stop rx engine */
+ em0mr0 = in_be32(&emacp->em0mr0);
+ out_be32(&emacp->em0mr0, em0mr0 & ~EMAC_M0_RXE);
+
+ /* Wait for descriptors to be empty */
+ do {
+ full = 0;
+ for (i = 0; i < NUM_RX_BUFF; i++)
+ if (!(fep->rx_desc[i].ctrl & MAL_RX_CTRL_EMPTY)) {
+ printk(KERN_NOTICE
+ "emac: RX ring is still full\n");
+ full = 1;
+ }
+ } while (full);
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
+
+ /* Destroy all old rx skbs */
+ for (i = 0; i < NUM_RX_BUFF; i++) {
+ dma_unmap_single(&fep->ocpdev->dev,
+ fep->rx_desc[i].data_ptr,
+ fep->rx_desc[i].data_len,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(fep->rx_skb[i]);
+ fep->rx_skb[i] = NULL;
+ }
+
+ /* Set new rx_buffer_size, jumbo cap, and advertise new mtu */
+ mode_reg = in_be32(&emacp->em0mr1);
+ if (new_mtu > ENET_DEF_MTU_SIZE) {
+ mode_reg |= EMAC_M1_JUMBO_ENABLE;
+ fep->rx_buffer_size = EMAC_MAX_FRAME;
+ } else {
+ mode_reg &= ~EMAC_M1_JUMBO_ENABLE;
+ fep->rx_buffer_size = ENET_DEF_BUF_SIZE;
+ }
+ dev->mtu = new_mtu;
+ out_be32(&emacp->em0mr1, mode_reg);
+
+ /* Re-init rx skbs */
+ fep->rx_slot = 0;
+ emac_rx_fill(dev, 0);
+
+ /* Restart the rx engine */
+ mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
+ out_be32(&emacp->em0mr0, em0mr0 | EMAC_M0_RXE);
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+ }
+
+ return 0;
+}
+
+static void __emac_set_multicast_list(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ emac_t *emacp = fep->emacp;
+ u32 rmr = in_be32(&emacp->em0rmr);
+
+ /* First clear all special bits, they can be set later */
+ rmr &= ~(EMAC_RMR_PME | EMAC_RMR_PMME | EMAC_RMR_MAE);
+
+ if (dev->flags & IFF_PROMISC) {
+ rmr |= EMAC_RMR_PME;
+ } else if (dev->flags & IFF_ALLMULTI || 32 < dev->mc_count) {
+ /*
+ * Must be setting up to use multicast
+ * Now check for promiscuous multicast
+ */
+ rmr |= EMAC_RMR_PMME;
+ } else if (dev->flags & IFF_MULTICAST && 0 < dev->mc_count) {
+ unsigned short em0gaht[4] = { 0, 0, 0, 0 };
+ struct dev_mc_list *dmi;
+
+ /* Need to hash on the multicast address. */
+ for (dmi = dev->mc_list; dmi; dmi = dmi->next) {
+ unsigned long mc_crc;
+ unsigned int bit_number;
+
+ mc_crc = ether_crc(6, (char *)dmi->dmi_addr);
+ bit_number = 63 - (mc_crc >> 26); /* MSB: 0 LSB: 63 */
+ em0gaht[bit_number >> 4] |=
+ 0x8000 >> (bit_number & 0x0f);
+ }
+ emacp->em0gaht1 = em0gaht[0];
+ emacp->em0gaht2 = em0gaht[1];
+ emacp->em0gaht3 = em0gaht[2];
+ emacp->em0gaht4 = em0gaht[3];
+
+ /* Turn on multicast addressing */
+ rmr |= EMAC_RMR_MAE;
+ }
+ out_be32(&emacp->em0rmr, rmr);
+}
+
+static int emac_init_tah(struct ocp_enet_private *fep)
+{
+ tah_t *tahp;
+
+ /* Initialize TAH and enable checksum verification */
+ tahp = (tah_t *) ioremap(fep->tah_dev->def->paddr, sizeof(*tahp));
+
+ if (tahp == NULL) {
+ printk(KERN_ERR "tah%d: Cannot ioremap TAH registers!\n",
+ fep->tah_dev->def->index);
+
+ return -ENOMEM;
+ }
+
+ out_be32(&tahp->tah_mr, TAH_MR_SR);
+
+ /* wait for reset to complete */
+ while (in_be32(&tahp->tah_mr) & TAH_MR_SR) ;
+
+ /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
+ out_be32(&tahp->tah_mr,
+ TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
+ TAH_MR_DIG);
+
+ iounmap(&tahp);
+
+ return 0;
+}
+
+static void emac_init_rings(struct net_device *dev)
+{
+ struct ocp_enet_private *ep = dev->priv;
+ int loop;
+
+ ep->tx_desc = (struct mal_descriptor *)((char *)ep->mal->tx_virt_addr +
+ (ep->mal_tx_chan *
+ MAL_DT_ALIGN));
+ ep->rx_desc =
+ (struct mal_descriptor *)((char *)ep->mal->rx_virt_addr +
+ (ep->mal_rx_chan * MAL_DT_ALIGN));
+
+ /* Fill in the transmit descriptor ring. */
+ for (loop = 0; loop < NUM_TX_BUFF; loop++) {
+ if (ep->tx_skb[loop]) {
+ dma_unmap_single(&ep->ocpdev->dev,
+ ep->tx_desc[loop].data_ptr,
+ ep->tx_desc[loop].data_len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(ep->tx_skb[loop]);
+ }
+ ep->tx_skb[loop] = NULL;
+ ep->tx_desc[loop].ctrl = 0;
+ ep->tx_desc[loop].data_len = 0;
+ ep->tx_desc[loop].data_ptr = NULL;
+ }
+ ep->tx_desc[loop - 1].ctrl |= MAL_TX_CTRL_WRAP;
+
+ /* Format the receive descriptor ring. */
+ ep->rx_slot = 0;
+ /* Default is MTU=1500 + Ethernet overhead */
+ ep->rx_buffer_size = dev->mtu + ENET_HEADER_SIZE + ENET_FCS_SIZE;
+ emac_rx_fill(dev, 0);
+ if (ep->rx_slot != 0) {
+ printk(KERN_ERR
+ "%s: Not enough mem for RxChain durning Open?\n",
+ dev->name);
+ /*We couldn't fill the ring at startup?
+ *We could clean up and fail to open but right now we will try to
+ *carry on. It may be a sign of a bad NUM_RX_BUFF value
+ */
+ }
+
+ ep->tx_cnt = 0;
+ ep->tx_slot = 0;
+ ep->ack_slot = 0;
+}
+
+static void emac_reset_configure(struct ocp_enet_private *fep)
+{
+ emac_t *emacp = fep->emacp;
+ int i;
+
+ mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
+ mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
+
+ /*
+ * Check for a link, some PHYs don't provide a clock if
+ * no link is present. Some EMACs will not come out of
+ * soft reset without a PHY clock present.
+ */
+ if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
+ /* Reset the EMAC */
+ out_be32(&emacp->em0mr0, EMAC_M0_SRST);
+ udelay(20);
+ for (i = 0; i < 100; i++) {
+ if ((in_be32(&emacp->em0mr0) & EMAC_M0_SRST) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (i >= 100) {
+ printk(KERN_ERR "%s: Cannot reset EMAC\n",
+ fep->ndev->name);
+ return;
+ }
+ }
+
+ /* Switch IRQs off for now */
+ out_be32(&emacp->em0iser, 0);
+
+ /* Configure MAL rx channel */
+ mal_set_rcbs(fep->mal, fep->mal_rx_chan, DESC_BUF_SIZE_REG);
+
+ /* set the high address */
+ out_be32(&emacp->em0iahr,
+ (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]);
+
+ /* set the low address */
+ out_be32(&emacp->em0ialr,
+ (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
+ | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
+
+ /* Adjust to link */
+ if (netif_carrier_ok(fep->ndev))
+ emac_adjust_to_link(fep);
+
+ /* enable broadcast/individual address and RX FIFO defaults */
+ out_be32(&emacp->em0rmr, EMAC_RMR_DEFAULT);
+
+ /* set transmit request threshold register */
+ out_be32(&emacp->em0trtr, EMAC_TRTR_DEFAULT);
+
+ /* Reconfigure multicast */
+ __emac_set_multicast_list(fep->ndev);
+
+ /* Set receiver/transmitter defaults */
+ out_be32(&emacp->em0rwmr, EMAC_RWMR_DEFAULT);
+ out_be32(&emacp->em0tmr0, EMAC_TMR0_DEFAULT);
+ out_be32(&emacp->em0tmr1, EMAC_TMR1_DEFAULT);
+
+ /* set frame gap */
+ out_be32(&emacp->em0ipgvr, CONFIG_IBM_EMAC_FGAP);
+
+ /* set VLAN Tag Protocol Identifier */
+ out_be32(&emacp->em0vtpid, 0x8100);
+
+ /* Init ring buffers */
+ emac_init_rings(fep->ndev);
+}
+
+static void emac_kick(struct ocp_enet_private *fep)
+{
+ emac_t *emacp = fep->emacp;
+ unsigned long emac_ier;
+
+ emac_ier = EMAC_ISR_PP | EMAC_ISR_BP | EMAC_ISR_RP |
+ EMAC_ISR_SE | EMAC_ISR_PTLE | EMAC_ISR_ALE |
+ EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
+
+ out_be32(&emacp->em0iser, emac_ier);
+
+ /* enable all MAL transmit and receive channels */
+ mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
+ mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
+
+ /* set transmit and receive enable */
+ out_be32(&emacp->em0mr0, EMAC_M0_TXE | EMAC_M0_RXE);
+}
+
+static void
+emac_start_link(struct ocp_enet_private *fep, struct ethtool_cmd *ep)
+{
+ u32 advertise;
+ int autoneg;
+ int forced_speed;
+ int forced_duplex;
+
+ /* Default advertise */
+ advertise = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full;
+ autoneg = fep->want_autoneg;
+ forced_speed = fep->phy_mii.speed;
+ forced_duplex = fep->phy_mii.duplex;
+
+ /* Setup link parameters */
+ if (ep) {
+ if (ep->autoneg == AUTONEG_ENABLE) {
+ advertise = ep->advertising;
+ autoneg = 1;
+ } else {
+ autoneg = 0;
+ forced_speed = ep->speed;
+ forced_duplex = ep->duplex;
+ }
+ }
+
+ /* Configure PHY & start aneg */
+ fep->want_autoneg = autoneg;
+ if (autoneg) {
+ LINK_DEBUG(("%s: start link aneg, advertise: 0x%x\n",
+ fep->ndev->name, advertise));
+ fep->phy_mii.def->ops->setup_aneg(&fep->phy_mii, advertise);
+ } else {
+ LINK_DEBUG(("%s: start link forced, speed: %d, duplex: %d\n",
+ fep->ndev->name, forced_speed, forced_duplex));
+ fep->phy_mii.def->ops->setup_forced(&fep->phy_mii, forced_speed,
+ forced_duplex);
+ }
+ fep->timer_ticks = 0;
+ mod_timer(&fep->link_timer, jiffies + HZ);
+}
+
+static void emac_link_timer(unsigned long data)
+{
+ struct ocp_enet_private *fep = (struct ocp_enet_private *)data;
+ int link;
+
+ if (fep->going_away)
+ return;
+
+ spin_lock_irq(&fep->lock);
+
+ link = fep->phy_mii.def->ops->poll_link(&fep->phy_mii);
+ LINK_DEBUG(("%s: poll_link: %d\n", fep->ndev->name, link));
+
+ if (link == netif_carrier_ok(fep->ndev)) {
+ if (!link && fep->want_autoneg && (++fep->timer_ticks) > 10)
+ emac_start_link(fep, NULL);
+ goto out;
+ }
+ printk(KERN_INFO "%s: Link is %s\n", fep->ndev->name,
+ link ? "Up" : "Down");
+ if (link) {
+ netif_carrier_on(fep->ndev);
+ /* Chip needs a full reset on config change. That sucks, so I
+ * should ultimately move that to some tasklet to limit
+ * latency peaks caused by this code
+ */
+ emac_reset_configure(fep);
+ if (fep->opened)
+ emac_kick(fep);
+ } else {
+ fep->timer_ticks = 0;
+ netif_carrier_off(fep->ndev);
+ }
+ out:
+ mod_timer(&fep->link_timer, jiffies + HZ);
+ spin_unlock_irq(&fep->lock);
+}
+
+static void emac_set_multicast_list(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+
+ spin_lock_irq(&fep->lock);
+ __emac_set_multicast_list(dev);
+ spin_unlock_irq(&fep->lock);
+}
+
+static int emac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct ocp_enet_private *fep = ndev->priv;
+
+ cmd->supported = fep->phy_mii.def->features;
+ cmd->port = PORT_MII;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = fep->mii_phy_addr;
+ spin_lock_irq(&fep->lock);
+ cmd->autoneg = fep->want_autoneg;
+ cmd->speed = fep->phy_mii.speed;
+ cmd->duplex = fep->phy_mii.duplex;
+ spin_unlock_irq(&fep->lock);
+ return 0;
+}
+
+static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct ocp_enet_private *fep = ndev->priv;
+ unsigned long features = fep->phy_mii.def->features;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+ return -EINVAL;
+ if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_DISABLE)
+ switch (cmd->speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_HALF &&
+ (features & SUPPORTED_10baseT_Half) == 0)
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ (features & SUPPORTED_10baseT_Full) == 0)
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_HALF &&
+ (features & SUPPORTED_100baseT_Half) == 0)
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ (features & SUPPORTED_100baseT_Full) == 0)
+ return -EINVAL;
+ break;
+ case SPEED_1000:
+ if (cmd->duplex == DUPLEX_HALF &&
+ (features & SUPPORTED_1000baseT_Half) == 0)
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ (features & SUPPORTED_1000baseT_Full) == 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ } else if ((features & SUPPORTED_Autoneg) == 0)
+ return -EINVAL;
+ spin_lock_irq(&fep->lock);
+ emac_start_link(fep, cmd);
+ spin_unlock_irq(&fep->lock);
+ return 0;
+}
+
+static void
+emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ struct ocp_enet_private *fep = ndev->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "IBM EMAC %d", fep->ocpdev->def->index);
+ info->regdump_len = 0;
+}
+
+static int emac_nway_reset(struct net_device *ndev)
+{
+ struct ocp_enet_private *fep = ndev->priv;
+
+ if (!fep->want_autoneg)
+ return -EINVAL;
+ spin_lock_irq(&fep->lock);
+ emac_start_link(fep, NULL);
+ spin_unlock_irq(&fep->lock);
+ return 0;
+}
+
+static u32 emac_get_link(struct net_device *ndev)
+{
+ return netif_carrier_ok(ndev);
+}
+
+static struct ethtool_ops emac_ethtool_ops = {
+ .get_settings = emac_get_settings,
+ .set_settings = emac_set_settings,
+ .get_drvinfo = emac_get_drvinfo,
+ .nway_reset = emac_nway_reset,
+ .get_link = emac_get_link
+};
+
+static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ uint *data = (uint *) & rq->ifr_ifru;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data[0] = fep->mii_phy_addr;
+ /* Fall through */
+ case SIOCGMIIREG:
+ data[3] = emac_phy_read(dev, fep->mii_phy_addr, data[1]);
+ return 0;
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ emac_phy_write(dev, fep->mii_phy_addr, data[1], data[2]);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int emac_open(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ int rc;
+
+ spin_lock_irq(&fep->lock);
+
+ fep->opened = 1;
+ netif_carrier_off(dev);
+
+ /* Reset & configure the chip */
+ emac_reset_configure(fep);
+
+ spin_unlock_irq(&fep->lock);
+
+ /* Request our interrupt lines */
+ rc = request_irq(dev->irq, emac_mac_irq, 0, "IBM EMAC MAC", dev);
+ if (rc != 0) {
+ printk("dev->irq %d failed\n", dev->irq);
+ goto bail;
+ }
+ /* Kick the chip rx & tx channels into life */
+ spin_lock_irq(&fep->lock);
+ emac_kick(fep);
+ spin_unlock_irq(&fep->lock);
+
+ netif_start_queue(dev);
+ bail:
+ return rc;
+}
+
+static int emac_close(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ emac_t *emacp = fep->emacp;
+
+ /* XXX Stop IRQ emitting here */
+ spin_lock_irq(&fep->lock);
+ fep->opened = 0;
+ mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
+ mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ /*
+ * Check for a link, some PHYs don't provide a clock if
+ * no link is present. Some EMACs will not come out of
+ * soft reset without a PHY clock present.
+ */
+ if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
+ out_be32(&emacp->em0mr0, EMAC_M0_SRST);
+ udelay(10);
+
+ if (emacp->em0mr0 & EMAC_M0_SRST) {
+ /*not sure what to do here hopefully it clears before another open */
+ printk(KERN_ERR
+ "%s: Phy SoftReset didn't clear, no link?\n",
+ dev->name);
+ }
+ }
+
+ /* Free the irq's */
+ free_irq(dev->irq, dev);
+
+ spin_unlock_irq(&fep->lock);
+
+ return 0;
+}
+
+static void emac_remove(struct ocp_device *ocpdev)
+{
+ struct net_device *dev = ocp_get_drvdata(ocpdev);
+ struct ocp_enet_private *ep = dev->priv;
+
+ /* FIXME: locking, races, ... */
+ ep->going_away = 1;
+ ocp_set_drvdata(ocpdev, NULL);
+ if (ep->rgmii_dev)
+ emac_close_rgmii(ep->rgmii_dev);
+ if (ep->zmii_dev)
+ emac_close_zmii(ep->zmii_dev);
+
+ unregister_netdev(dev);
+ del_timer_sync(&ep->link_timer);
+ mal_unregister_commac(ep->mal, &ep->commac);
+ iounmap((void *)ep->emacp);
+ kfree(dev);
+}
+
+struct mal_commac_ops emac_commac_ops = {
+ .txeob = &emac_txeob_dev,
+ .txde = &emac_txde_dev,
+ .rxeob = &emac_rxeob_dev,
+ .rxde = &emac_rxde_dev,
+};
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int emac_netpoll(struct net_device *ndev)
+{
+ emac_rxeob_dev((void *)ndev, 0);
+ emac_txeob_dev((void *)ndev, 0);
+ return 0;
+}
+#endif
+
+static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
+{
+ int deferred_init = 0;
+ int rc = 0, i;
+ struct net_device *ndev;
+ struct ocp_enet_private *ep;
+ struct ocp_func_emac_data *emacdata;
+ int commac_reg = 0;
+ u32 phy_map;
+
+ emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
+ if (!emacdata) {
+ printk(KERN_ERR "emac%d: Missing additional data!\n",
+ ocpdev->def->index);
+ return -ENODEV;
+ }
+
+ /* Allocate our net_device structure */
+ ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
+ if (ndev == NULL) {
+ printk(KERN_ERR
+ "emac%d: Could not allocate ethernet device.\n",
+ ocpdev->def->index);
+ return -ENOMEM;
+ }
+ ep = ndev->priv;
+ ep->ndev = ndev;
+ ep->ocpdev = ocpdev;
+ ndev->irq = ocpdev->def->irq;
+ ep->wol_irq = emacdata->wol_irq;
+ if (emacdata->mdio_idx >= 0) {
+ if (emacdata->mdio_idx == ocpdev->def->index) {
+ /* Set the common MDIO net_device */
+ mdio_ndev = ndev;
+ deferred_init = 1;
+ }
+ ep->mdio_dev = mdio_ndev;
+ } else {
+ ep->mdio_dev = ndev;
+ }
+
+ ocp_set_drvdata(ocpdev, ndev);
+
+ spin_lock_init(&ep->lock);
+
+ /* Fill out MAL informations and register commac */
+ ep->mal = mal;
+ ep->mal_tx_chan = emacdata->mal_tx_chan;
+ ep->mal_rx_chan = emacdata->mal_rx_chan;
+ ep->commac.ops = &emac_commac_ops;
+ ep->commac.dev = ndev;
+ ep->commac.tx_chan_mask = MAL_CHAN_MASK(ep->mal_tx_chan);
+ ep->commac.rx_chan_mask = MAL_CHAN_MASK(ep->mal_rx_chan);
+ rc = mal_register_commac(ep->mal, &ep->commac);
+ if (rc != 0)
+ goto bail;
+ commac_reg = 1;
+
+ /* Map our MMIOs */
+ ep->emacp = (emac_t *) ioremap(ocpdev->def->paddr, sizeof(emac_t));
+
+ /* Check if we need to attach to a ZMII */
+ if (emacdata->zmii_idx >= 0) {
+ ep->zmii_input = emacdata->zmii_mux;
+ ep->zmii_dev =
+ ocp_find_device(OCP_ANY_ID, OCP_FUNC_ZMII,
+ emacdata->zmii_idx);
+ if (ep->zmii_dev == NULL)
+ printk(KERN_WARNING
+ "emac%d: ZMII %d requested but not found !\n",
+ ocpdev->def->index, emacdata->zmii_idx);
+ else if ((rc =
+ emac_init_zmii(ep->zmii_dev, ep->zmii_input,
+ emacdata->phy_mode)) != 0)
+ goto bail;
+ }
+
+ /* Check if we need to attach to a RGMII */
+ if (emacdata->rgmii_idx >= 0) {
+ ep->rgmii_input = emacdata->rgmii_mux;
+ ep->rgmii_dev =
+ ocp_find_device(OCP_ANY_ID, OCP_FUNC_RGMII,
+ emacdata->rgmii_idx);
+ if (ep->rgmii_dev == NULL)
+ printk(KERN_WARNING
+ "emac%d: RGMII %d requested but not found !\n",
+ ocpdev->def->index, emacdata->rgmii_idx);
+ else if ((rc =
+ emac_init_rgmii(ep->rgmii_dev, ep->rgmii_input,
+ emacdata->phy_mode)) != 0)
+ goto bail;
+ }
+
+ /* Check if we need to attach to a TAH */
+ if (emacdata->tah_idx >= 0) {
+ ep->tah_dev =
+ ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH,
+ emacdata->tah_idx);
+ if (ep->tah_dev == NULL)
+ printk(KERN_WARNING
+ "emac%d: TAH %d requested but not found !\n",
+ ocpdev->def->index, emacdata->tah_idx);
+ else if ((rc = emac_init_tah(ep)) != 0)
+ goto bail;
+ }
+
+ if (deferred_init) {
+ if (!list_empty(&emac_init_list)) {
+ struct list_head *entry;
+ struct emac_def_dev *ddev;
+
+ list_for_each(entry, &emac_init_list) {
+ ddev =
+ list_entry(entry, struct emac_def_dev,
+ link);
+ emac_init_device(ddev->ocpdev, ddev->mal);
+ }
+ }
+ }
+
+ /* Init link monitoring timer */
+ init_timer(&ep->link_timer);
+ ep->link_timer.function = emac_link_timer;
+ ep->link_timer.data = (unsigned long)ep;
+ ep->timer_ticks = 0;
+
+ /* Fill up the mii_phy structure */
+ ep->phy_mii.dev = ndev;
+ ep->phy_mii.mdio_read = emac_phy_read;
+ ep->phy_mii.mdio_write = emac_phy_write;
+ ep->phy_mii.mode = emacdata->phy_mode;
+
+ /* Find PHY */
+ phy_map = emacdata->phy_map | busy_phy_map;
+ for (i = 0; i <= 0x1f; i++, phy_map >>= 1) {
+ if ((phy_map & 0x1) == 0) {
+ int val = emac_phy_read(ndev, i, MII_BMCR);
+ if (val != 0xffff && val != -1)
+ break;
+ }
+ }
+ if (i == 0x20) {
+ printk(KERN_WARNING "emac%d: Can't find PHY.\n",
+ ocpdev->def->index);
+ rc = -ENODEV;
+ goto bail;
+ }
+ busy_phy_map |= 1 << i;
+ ep->mii_phy_addr = i;
+ rc = mii_phy_probe(&ep->phy_mii, i);
+ if (rc) {
+ printk(KERN_WARNING "emac%d: Failed to probe PHY type.\n",
+ ocpdev->def->index);
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Setup initial PHY config & startup aneg */
+ if (ep->phy_mii.def->ops->init)
+ ep->phy_mii.def->ops->init(&ep->phy_mii);
+ netif_carrier_off(ndev);
+ if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
+ ep->want_autoneg = 1;
+ emac_start_link(ep, NULL);
+
+ /* read the MAC Address */
+ for (i = 0; i < 6; i++)
+ ndev->dev_addr[i] = emacdata->mac_addr[i];
+
+ /* Fill in the driver function table */
+ ndev->open = &emac_open;
+ ndev->hard_start_xmit = &emac_start_xmit;
+ ndev->stop = &emac_close;
+ ndev->get_stats = &emac_stats;
+ if (emacdata->jumbo)
+ ndev->change_mtu = &emac_change_mtu;
+ ndev->set_mac_address = &emac_set_mac_address;
+ ndev->set_multicast_list = &emac_set_multicast_list;
+ ndev->do_ioctl = &emac_ioctl;
+ SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+ if (emacdata->tah_idx >= 0)
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ ndev->poll_controller = emac_netpoll;
+#endif
+
+ SET_MODULE_OWNER(ndev);
+
+ rc = register_netdev(ndev);
+ if (rc != 0)
+ goto bail;
+
+ printk("%s: IBM emac, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ndev->name,
+ ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
+ ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
+ printk(KERN_INFO "%s: Found %s PHY (0x%02x)\n",
+ ndev->name, ep->phy_mii.def->name, ep->mii_phy_addr);
+
+ bail:
+ if (rc && commac_reg)
+ mal_unregister_commac(ep->mal, &ep->commac);
+ if (rc && ndev)
+ kfree(ndev);
+
+ return rc;
+}
+
+static int emac_probe(struct ocp_device *ocpdev)
+{
+ struct ocp_device *maldev;
+ struct ibm_ocp_mal *mal;
+ struct ocp_func_emac_data *emacdata;
+
+ emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
+ if (emacdata == NULL) {
+ printk(KERN_ERR "emac%d: Missing additional datas !\n",
+ ocpdev->def->index);
+ return -ENODEV;
+ }
+
+ /* Get the MAL device */
+ maldev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_MAL, emacdata->mal_idx);
+ if (maldev == NULL) {
+ printk("No maldev\n");
+ return -ENODEV;
+ }
+ /*
+ * Get MAL driver data, it must be here due to link order.
+ * When the driver is modularized, symbol dependencies will
+ * ensure the MAL driver is already present if built as a
+ * module.
+ */
+ mal = (struct ibm_ocp_mal *)ocp_get_drvdata(maldev);
+ if (mal == NULL) {
+ printk("No maldrv\n");
+ return -ENODEV;
+ }
+
+ /* If we depend on another EMAC for MDIO, wait for it to show up */
+ if (emacdata->mdio_idx >= 0 &&
+ (emacdata->mdio_idx != ocpdev->def->index) && !mdio_ndev) {
+ struct emac_def_dev *ddev;
+ /* Add this index to the deferred init table */
+ ddev = kmalloc(sizeof(struct emac_def_dev), GFP_KERNEL);
+ ddev->ocpdev = ocpdev;
+ ddev->mal = mal;
+ list_add_tail(&ddev->link, &emac_init_list);
+ } else {
+ emac_init_device(ocpdev, mal);
+ }
+
+ return 0;
+}
+
+/* Structure for a device driver */
+static struct ocp_device_id emac_ids[] = {
+ {.vendor = OCP_ANY_ID,.function = OCP_FUNC_EMAC},
+ {.vendor = OCP_VENDOR_INVALID}
+};
+
+static struct ocp_driver emac_driver = {
+ .name = "emac",
+ .id_table = emac_ids,
+
+ .probe = emac_probe,
+ .remove = emac_remove,
+};
+
+static int __init emac_init(void)
+{
+ printk(KERN_INFO DRV_NAME ": " DRV_DESC ", version " DRV_VERSION "\n");
+ printk(KERN_INFO "Maintained by " DRV_AUTHOR "\n");
+
+ if (skb_res > 2) {
+ printk(KERN_WARNING "Invalid skb_res: %d, cropping to 2\n",
+ skb_res);
+ skb_res = 2;
+ }
+
+ return ocp_register_driver(&emac_driver);
+}
+
+static void __exit emac_exit(void)
+{
+ ocp_unregister_driver(&emac_driver);
+}
+
+module_init(emac_init);
+module_exit(emac_exit);
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h
new file mode 100644
index 000000000000..97e6e1ea8c89
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_core.h
@@ -0,0 +1,146 @@
+/*
+ * ibm_emac_core.h
+ *
+ * Ethernet driver for the built in ethernet on the IBM 405 PowerPC
+ * processor.
+ *
+ * Armin Kuster akuster@mvista.com
+ * Sept, 2001
+ *
+ * Orignial driver
+ * Johnnie Peters
+ * jpeters@mvista.com
+ *
+ * Copyright 2000 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_CORE_H_
+#define _IBM_EMAC_CORE_H_
+
+#include <linux/netdevice.h>
+#include <asm/ocp.h>
+#include <asm/mmu.h> /* For phys_addr_t */
+
+#include "ibm_emac.h"
+#include "ibm_emac_phy.h"
+#include "ibm_emac_rgmii.h"
+#include "ibm_emac_zmii.h"
+#include "ibm_emac_mal.h"
+#include "ibm_emac_tah.h"
+
+#ifndef CONFIG_IBM_EMAC_TXB
+#define NUM_TX_BUFF 64
+#define NUM_RX_BUFF 64
+#else
+#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
+#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
+#endif
+
+/* This does 16 byte alignment, exactly what we need.
+ * The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+#ifndef CONFIG_IBM_EMAC_SKBRES
+#define SKB_RES 2
+#else
+#define SKB_RES CONFIG_IBM_EMAC_SKBRES
+#endif
+
+/* Note about alignement. alloc_skb() returns a cache line
+ * aligned buffer. However, dev_alloc_skb() will add 16 more
+ * bytes and "reserve" them, so our buffer will actually end
+ * on a half cache line. What we do is to use directly
+ * alloc_skb, allocate 16 more bytes to match the total amount
+ * allocated by dev_alloc_skb(), but we don't reserve.
+ */
+#define MAX_NUM_BUF_DESC 255
+#define DESC_BUF_SIZE 4080 /* max 4096-16 */
+#define DESC_BUF_SIZE_REG (DESC_BUF_SIZE / 16)
+
+/* Transmitter timeout. */
+#define TX_TIMEOUT (2*HZ)
+
+/* MDIO latency delay */
+#define MDIO_DELAY 250
+
+/* Power managment shift registers */
+#define IBM_CPM_EMMII 0 /* Shift value for MII */
+#define IBM_CPM_EMRX 1 /* Shift value for recv */
+#define IBM_CPM_EMTX 2 /* Shift value for MAC */
+#define IBM_CPM_EMAC(x) (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX))
+
+#define ENET_HEADER_SIZE 14
+#define ENET_FCS_SIZE 4
+#define ENET_DEF_MTU_SIZE 1500
+#define ENET_DEF_BUF_SIZE (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE)
+#define EMAC_MIN_FRAME 64
+#define EMAC_MAX_FRAME 9018
+#define EMAC_MIN_MTU (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE)
+#define EMAC_MAX_MTU (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE)
+
+#ifdef CONFIG_IBM_EMAC_ERRMSG
+void emac_serr_dump_0(struct net_device *dev);
+void emac_serr_dump_1(struct net_device *dev);
+void emac_err_dump(struct net_device *dev, int em0isr);
+void emac_phy_dump(struct net_device *);
+void emac_desc_dump(struct net_device *);
+void emac_mac_dump(struct net_device *);
+void emac_mal_dump(struct net_device *);
+#else
+#define emac_serr_dump_0(dev) do { } while (0)
+#define emac_serr_dump_1(dev) do { } while (0)
+#define emac_err_dump(dev,x) do { } while (0)
+#define emac_phy_dump(dev) do { } while (0)
+#define emac_desc_dump(dev) do { } while (0)
+#define emac_mac_dump(dev) do { } while (0)
+#define emac_mal_dump(dev) do { } while (0)
+#endif
+
+struct ocp_enet_private {
+ struct sk_buff *tx_skb[NUM_TX_BUFF];
+ struct sk_buff *rx_skb[NUM_RX_BUFF];
+ struct mal_descriptor *tx_desc;
+ struct mal_descriptor *rx_desc;
+ struct mal_descriptor *rx_dirty;
+ struct net_device_stats stats;
+ int tx_cnt;
+ int rx_slot;
+ int dirty_rx;
+ int tx_slot;
+ int ack_slot;
+ int rx_buffer_size;
+
+ struct mii_phy phy_mii;
+ int mii_phy_addr;
+ int want_autoneg;
+ int timer_ticks;
+ struct timer_list link_timer;
+ struct net_device *mdio_dev;
+
+ struct ocp_device *rgmii_dev;
+ int rgmii_input;
+
+ struct ocp_device *zmii_dev;
+ int zmii_input;
+
+ struct ibm_ocp_mal *mal;
+ int mal_tx_chan, mal_rx_chan;
+ struct mal_commac commac;
+
+ struct ocp_device *tah_dev;
+
+ int opened;
+ int going_away;
+ int wol_irq;
+ emac_t *emacp;
+ struct ocp_device *ocpdev;
+ struct net_device *ndev;
+ spinlock_t lock;
+};
+#endif /* _IBM_EMAC_CORE_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c
new file mode 100644
index 000000000000..c8512046cf84
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_debug.c
@@ -0,0 +1,224 @@
+/*
+ * ibm_ocp_debug.c
+ *
+ * This has all the debug routines that where in *_enet.c
+ *
+ * Armin Kuster akuster@mvista.com
+ * April , 2002
+ *
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <asm/io.h>
+#include "ibm_ocp_mal.h"
+#include "ibm_ocp_zmii.h"
+#include "ibm_ocp_enet.h"
+
+extern int emac_phy_read(struct net_device *dev, int mii_id, int reg);
+
+void emac_phy_dump(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ unsigned long i;
+ uint data;
+
+ printk(KERN_DEBUG " Prepare for Phy dump....\n");
+ for (i = 0; i < 0x1A; i++) {
+ data = emac_phy_read(dev, fep->mii_phy_addr, i);
+ printk(KERN_DEBUG "Phy reg 0x%lx ==> %4x\n", i, data);
+ if (i == 0x07)
+ i = 0x0f;
+ }
+}
+
+void emac_desc_dump(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ int curr_slot;
+
+ printk(KERN_DEBUG
+ "dumping the receive descriptors: current slot is %d\n",
+ fep->rx_slot);
+ for (curr_slot = 0; curr_slot < NUM_RX_BUFF; curr_slot++) {
+ printk(KERN_DEBUG
+ "Desc %02d: status 0x%04x, length %3d, addr 0x%x\n",
+ curr_slot, fep->rx_desc[curr_slot].ctrl,
+ fep->rx_desc[curr_slot].data_len,
+ (unsigned int)fep->rx_desc[curr_slot].data_ptr);
+ }
+}
+
+void emac_mac_dump(struct net_device *dev)
+{
+ struct ocp_enet_private *fep = dev->priv;
+ volatile emac_t *emacp = fep->emacp;
+
+ printk(KERN_DEBUG "EMAC DEBUG ********** \n");
+ printk(KERN_DEBUG "EMAC_M0 ==> 0x%x\n", in_be32(&emacp->em0mr0));
+ printk(KERN_DEBUG "EMAC_M1 ==> 0x%x\n", in_be32(&emacp->em0mr1));
+ printk(KERN_DEBUG "EMAC_TXM0==> 0x%x\n", in_be32(&emacp->em0tmr0));
+ printk(KERN_DEBUG "EMAC_TXM1==> 0x%x\n", in_be32(&emacp->em0tmr1));
+ printk(KERN_DEBUG "EMAC_RXM ==> 0x%x\n", in_be32(&emacp->em0rmr));
+ printk(KERN_DEBUG "EMAC_ISR ==> 0x%x\n", in_be32(&emacp->em0isr));
+ printk(KERN_DEBUG "EMAC_IER ==> 0x%x\n", in_be32(&emacp->em0iser));
+ printk(KERN_DEBUG "EMAC_IAH ==> 0x%x\n", in_be32(&emacp->em0iahr));
+ printk(KERN_DEBUG "EMAC_IAL ==> 0x%x\n", in_be32(&emacp->em0ialr));
+ printk(KERN_DEBUG "EMAC_VLAN_TPID_REG ==> 0x%x\n",
+ in_be32(&emacp->em0vtpid));
+}
+
+void emac_mal_dump(struct net_device *dev)
+{
+ struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal;
+
+ printk(KERN_DEBUG " MAL DEBUG ********** \n");
+ printk(KERN_DEBUG " MCR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALCR));
+ printk(KERN_DEBUG " ESR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALESR));
+ printk(KERN_DEBUG " IER ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALIER));
+#ifdef CONFIG_40x
+ printk(KERN_DEBUG " DBR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALDBR));
+#endif /* CONFIG_40x */
+ printk(KERN_DEBUG " TXCASR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCASR));
+ printk(KERN_DEBUG " TXCARR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCARR));
+ printk(KERN_DEBUG " TXEOBISR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXEOBISR));
+ printk(KERN_DEBUG " TXDEIR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXDEIR));
+ printk(KERN_DEBUG " RXCASR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCASR));
+ printk(KERN_DEBUG " RXCARR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCARR));
+ printk(KERN_DEBUG " RXEOBISR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRXEOBISR));
+ printk(KERN_DEBUG " RXDEIR ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRXDEIR));
+ printk(KERN_DEBUG " TXCTP0R ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP0R));
+ printk(KERN_DEBUG " TXCTP1R ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP1R));
+ printk(KERN_DEBUG " TXCTP2R ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP2R));
+ printk(KERN_DEBUG " TXCTP3R ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP3R));
+ printk(KERN_DEBUG " RXCTP0R ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP0R));
+ printk(KERN_DEBUG " RXCTP1R ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP1R));
+ printk(KERN_DEBUG " RCBS0 ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS0));
+ printk(KERN_DEBUG " RCBS1 ==> 0x%x\n",
+ (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS1));
+}
+
+void emac_serr_dump_0(struct net_device *dev)
+{
+ struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal;
+ unsigned long int mal_error, plb_error, plb_addr;
+
+ mal_error = get_mal_dcrn(mal, DCRN_MALESR);
+ printk(KERN_DEBUG "ppc405_eth_serr: %s channel %ld \n",
+ (mal_error & 0x40000000) ? "Receive" :
+ "Transmit", (mal_error & 0x3e000000) >> 25);
+ printk(KERN_DEBUG " ----- latched error -----\n");
+ if (mal_error & MALESR_DE)
+ printk(KERN_DEBUG " DE: descriptor error\n");
+ if (mal_error & MALESR_OEN)
+ printk(KERN_DEBUG " ONE: OPB non-fullword error\n");
+ if (mal_error & MALESR_OTE)
+ printk(KERN_DEBUG " OTE: OPB timeout error\n");
+ if (mal_error & MALESR_OSE)
+ printk(KERN_DEBUG " OSE: OPB slave error\n");
+
+ if (mal_error & MALESR_PEIN) {
+ plb_error = mfdcr(DCRN_PLB0_BESR);
+ printk(KERN_DEBUG
+ " PEIN: PLB error, PLB0_BESR is 0x%x\n",
+ (unsigned int)plb_error);
+ plb_addr = mfdcr(DCRN_PLB0_BEAR);
+ printk(KERN_DEBUG
+ " PEIN: PLB error, PLB0_BEAR is 0x%x\n",
+ (unsigned int)plb_addr);
+ }
+}
+
+void emac_serr_dump_1(struct net_device *dev)
+{
+ struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal;
+ int mal_error = get_mal_dcrn(mal, DCRN_MALESR);
+
+ printk(KERN_DEBUG " ----- cumulative errors -----\n");
+ if (mal_error & MALESR_DEI)
+ printk(KERN_DEBUG " DEI: descriptor error interrupt\n");
+ if (mal_error & MALESR_ONEI)
+ printk(KERN_DEBUG " OPB non-fullword error interrupt\n");
+ if (mal_error & MALESR_OTEI)
+ printk(KERN_DEBUG " OTEI: timeout error interrupt\n");
+ if (mal_error & MALESR_OSEI)
+ printk(KERN_DEBUG " OSEI: slave error interrupt\n");
+ if (mal_error & MALESR_PBEI)
+ printk(KERN_DEBUG " PBEI: PLB bus error interrupt\n");
+}
+
+void emac_err_dump(struct net_device *dev, int em0isr)
+{
+ printk(KERN_DEBUG "%s: on-chip ethernet error:\n", dev->name);
+
+ if (em0isr & EMAC_ISR_OVR)
+ printk(KERN_DEBUG " OVR: overrun\n");
+ if (em0isr & EMAC_ISR_PP)
+ printk(KERN_DEBUG " PP: control pause packet\n");
+ if (em0isr & EMAC_ISR_BP)
+ printk(KERN_DEBUG " BP: packet error\n");
+ if (em0isr & EMAC_ISR_RP)
+ printk(KERN_DEBUG " RP: runt packet\n");
+ if (em0isr & EMAC_ISR_SE)
+ printk(KERN_DEBUG " SE: short event\n");
+ if (em0isr & EMAC_ISR_ALE)
+ printk(KERN_DEBUG " ALE: odd number of nibbles in packet\n");
+ if (em0isr & EMAC_ISR_BFCS)
+ printk(KERN_DEBUG " BFCS: bad FCS\n");
+ if (em0isr & EMAC_ISR_PTLE)
+ printk(KERN_DEBUG " PTLE: oversized packet\n");
+ if (em0isr & EMAC_ISR_ORE)
+ printk(KERN_DEBUG
+ " ORE: packet length field > max allowed LLC\n");
+ if (em0isr & EMAC_ISR_IRE)
+ printk(KERN_DEBUG " IRE: In Range error\n");
+ if (em0isr & EMAC_ISR_DBDM)
+ printk(KERN_DEBUG " DBDM: xmit error or SQE\n");
+ if (em0isr & EMAC_ISR_DB0)
+ printk(KERN_DEBUG " DB0: xmit error or SQE on TX channel 0\n");
+ if (em0isr & EMAC_ISR_SE0)
+ printk(KERN_DEBUG
+ " SE0: Signal Quality Error test failure from TX channel 0\n");
+ if (em0isr & EMAC_ISR_TE0)
+ printk(KERN_DEBUG " TE0: xmit channel 0 aborted\n");
+ if (em0isr & EMAC_ISR_DB1)
+ printk(KERN_DEBUG " DB1: xmit error or SQE on TX channel \n");
+ if (em0isr & EMAC_ISR_SE1)
+ printk(KERN_DEBUG
+ " SE1: Signal Quality Error test failure from TX channel 1\n");
+ if (em0isr & EMAC_ISR_TE1)
+ printk(KERN_DEBUG " TE1: xmit channel 1 aborted\n");
+ if (em0isr & EMAC_ISR_MOS)
+ printk(KERN_DEBUG " MOS\n");
+ if (em0isr & EMAC_ISR_MOF)
+ printk(KERN_DEBUG " MOF\n");
+
+ emac_mac_dump(dev);
+ emac_mal_dump(dev);
+}
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c
new file mode 100644
index 000000000000..e59f57f363ca
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_mal.c
@@ -0,0 +1,463 @@
+/*
+ * ibm_ocp_mal.c
+ *
+ * Armin Kuster akuster@mvista.com
+ * Juen, 2002
+ *
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/ocp.h>
+
+#include "ibm_emac_mal.h"
+
+// Locking: Should we share a lock with the client ? The client could provide
+// a lock pointer (optionally) in the commac structure... I don't think this is
+// really necessary though
+
+/* This lock protects the commac list. On today UP implementations, it's
+ * really only used as IRQ protection in mal_{register,unregister}_commac()
+ */
+static DEFINE_RWLOCK(mal_list_lock);
+
+int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&mal_list_lock, flags);
+
+ /* Don't let multiple commacs claim the same channel */
+ if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
+ (mal->rx_chan_mask & commac->rx_chan_mask)) {
+ write_unlock_irqrestore(&mal_list_lock, flags);
+ return -EBUSY;
+ }
+
+ mal->tx_chan_mask |= commac->tx_chan_mask;
+ mal->rx_chan_mask |= commac->rx_chan_mask;
+
+ list_add(&commac->list, &mal->commac);
+
+ write_unlock_irqrestore(&mal_list_lock, flags);
+
+ return 0;
+}
+
+int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&mal_list_lock, flags);
+
+ mal->tx_chan_mask &= ~commac->tx_chan_mask;
+ mal->rx_chan_mask &= ~commac->rx_chan_mask;
+
+ list_del_init(&commac->list);
+
+ write_unlock_irqrestore(&mal_list_lock, flags);
+
+ return 0;
+}
+
+int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
+{
+ switch (channel) {
+ case 0:
+ set_mal_dcrn(mal, DCRN_MALRCBS0, size);
+ break;
+#ifdef DCRN_MALRCBS1
+ case 1:
+ set_mal_dcrn(mal, DCRN_MALRCBS1, size);
+ break;
+#endif
+#ifdef DCRN_MALRCBS2
+ case 2:
+ set_mal_dcrn(mal, DCRN_MALRCBS2, size);
+ break;
+#endif
+#ifdef DCRN_MALRCBS3
+ case 3:
+ set_mal_dcrn(mal, DCRN_MALRCBS3, size);
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ibm_ocp_mal *mal = dev_instance;
+ unsigned long mal_error;
+
+ /*
+ * This SERR applies to one of the devices on the MAL, here we charge
+ * it against the first EMAC registered for the MAL.
+ */
+
+ mal_error = get_mal_dcrn(mal, DCRN_MALESR);
+
+ printk(KERN_ERR "%s: System Error (MALESR=%lx)\n",
+ "MAL" /* FIXME: get the name right */ , mal_error);
+
+ /* FIXME: decipher error */
+ /* DIXME: distribute to commacs, if possible */
+
+ /* Clear the error status register */
+ set_mal_dcrn(mal, DCRN_MALESR, mal_error);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ibm_ocp_mal *mal = dev_instance;
+ struct list_head *l;
+ unsigned long isr;
+
+ isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR);
+ set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr);
+
+ read_lock(&mal_list_lock);
+ list_for_each(l, &mal->commac) {
+ struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+ if (isr & mc->tx_chan_mask) {
+ mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask);
+ }
+ }
+ read_unlock(&mal_list_lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ibm_ocp_mal *mal = dev_instance;
+ struct list_head *l;
+ unsigned long isr;
+
+ isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR);
+ set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr);
+
+ read_lock(&mal_list_lock);
+ list_for_each(l, &mal->commac) {
+ struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+ if (isr & mc->rx_chan_mask) {
+ mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask);
+ }
+ }
+ read_unlock(&mal_list_lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ibm_ocp_mal *mal = dev_instance;
+ struct list_head *l;
+ unsigned long deir;
+
+ deir = get_mal_dcrn(mal, DCRN_MALTXDEIR);
+
+ /* FIXME: print which MAL correctly */
+ printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
+ "MAL", deir);
+
+ read_lock(&mal_list_lock);
+ list_for_each(l, &mal->commac) {
+ struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+ if (deir & mc->tx_chan_mask) {
+ mc->ops->txde(mc->dev, deir & mc->tx_chan_mask);
+ }
+ }
+ read_unlock(&mal_list_lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This interrupt should be very rare at best. This occurs when
+ * the hardware has a problem with the receive descriptors. The manual
+ * states that it occurs when the hardware cannot the receive descriptor
+ * empty bit is not set. The recovery mechanism will be to
+ * traverse through the descriptors, handle any that are marked to be
+ * handled and reinitialize each along the way. At that point the driver
+ * will be restarted.
+ */
+static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ibm_ocp_mal *mal = dev_instance;
+ struct list_head *l;
+ unsigned long deir;
+
+ deir = get_mal_dcrn(mal, DCRN_MALRXDEIR);
+
+ /*
+ * This really is needed. This case encountered in stress testing.
+ */
+ if (deir == 0)
+ return IRQ_HANDLED;
+
+ /* FIXME: print which MAL correctly */
+ printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n",
+ "MAL", deir);
+
+ read_lock(&mal_list_lock);
+ list_for_each(l, &mal->commac) {
+ struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+ if (deir & mc->rx_chan_mask) {
+ mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask);
+ }
+ }
+ read_unlock(&mal_list_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int __init mal_probe(struct ocp_device *ocpdev)
+{
+ struct ibm_ocp_mal *mal = NULL;
+ struct ocp_func_mal_data *maldata;
+ int err = 0;
+
+ maldata = (struct ocp_func_mal_data *)ocpdev->def->additions;
+ if (maldata == NULL) {
+ printk(KERN_ERR "mal%d: Missing additional datas !\n",
+ ocpdev->def->index);
+ return -ENODEV;
+ }
+
+ mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
+ if (mal == NULL) {
+ printk(KERN_ERR
+ "mal%d: Out of memory allocating MAL structure !\n",
+ ocpdev->def->index);
+ return -ENOMEM;
+ }
+ memset(mal, 0, sizeof(*mal));
+
+ switch (ocpdev->def->index) {
+ case 0:
+ mal->dcrbase = DCRN_MAL_BASE;
+ break;
+#ifdef DCRN_MAL1_BASE
+ case 1:
+ mal->dcrbase = DCRN_MAL1_BASE;
+ break;
+#endif
+ default:
+ BUG();
+ }
+
+ /**************************/
+
+ INIT_LIST_HEAD(&mal->commac);
+
+ set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF);
+ set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
+
+ set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */
+ /* FIXME: Add delay */
+
+ /* Set the MAL configuration register */
+ set_mal_dcrn(mal, DCRN_MALCR,
+ MALCR_PLBB | MALCR_OPBBL | MALCR_LEA |
+ MALCR_PLBLT_DEFAULT);
+
+ /* It would be nice to allocate buffers separately for each
+ * channel, but we can't because the channels share the upper
+ * 13 bits of address lines. Each channels buffer must also
+ * be 4k aligned, so we allocate 4k for each channel. This is
+ * inefficient FIXME: do better, if possible */
+ mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
+ MAL_DT_ALIGN *
+ maldata->num_tx_chans,
+ &mal->tx_phys_addr, GFP_KERNEL);
+ if (mal->tx_virt_addr == NULL) {
+ printk(KERN_ERR
+ "mal%d: Out of memory allocating MAL descriptors !\n",
+ ocpdev->def->index);
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* God, oh, god, I hate DCRs */
+ set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr);
+#ifdef DCRN_MALTXCTP1R
+ if (maldata->num_tx_chans > 1)
+ set_mal_dcrn(mal, DCRN_MALTXCTP1R,
+ mal->tx_phys_addr + MAL_DT_ALIGN);
+#endif /* DCRN_MALTXCTP1R */
+#ifdef DCRN_MALTXCTP2R
+ if (maldata->num_tx_chans > 2)
+ set_mal_dcrn(mal, DCRN_MALTXCTP2R,
+ mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
+#endif /* DCRN_MALTXCTP2R */
+#ifdef DCRN_MALTXCTP3R
+ if (maldata->num_tx_chans > 3)
+ set_mal_dcrn(mal, DCRN_MALTXCTP3R,
+ mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
+#endif /* DCRN_MALTXCTP3R */
+#ifdef DCRN_MALTXCTP4R
+ if (maldata->num_tx_chans > 4)
+ set_mal_dcrn(mal, DCRN_MALTXCTP4R,
+ mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
+#endif /* DCRN_MALTXCTP4R */
+#ifdef DCRN_MALTXCTP5R
+ if (maldata->num_tx_chans > 5)
+ set_mal_dcrn(mal, DCRN_MALTXCTP5R,
+ mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
+#endif /* DCRN_MALTXCTP5R */
+#ifdef DCRN_MALTXCTP6R
+ if (maldata->num_tx_chans > 6)
+ set_mal_dcrn(mal, DCRN_MALTXCTP6R,
+ mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
+#endif /* DCRN_MALTXCTP6R */
+#ifdef DCRN_MALTXCTP7R
+ if (maldata->num_tx_chans > 7)
+ set_mal_dcrn(mal, DCRN_MALTXCTP7R,
+ mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
+#endif /* DCRN_MALTXCTP7R */
+
+ mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
+ MAL_DT_ALIGN *
+ maldata->num_rx_chans,
+ &mal->rx_phys_addr, GFP_KERNEL);
+
+ set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
+#ifdef DCRN_MALRXCTP1R
+ if (maldata->num_rx_chans > 1)
+ set_mal_dcrn(mal, DCRN_MALRXCTP1R,
+ mal->rx_phys_addr + MAL_DT_ALIGN);
+#endif /* DCRN_MALRXCTP1R */
+#ifdef DCRN_MALRXCTP2R
+ if (maldata->num_rx_chans > 2)
+ set_mal_dcrn(mal, DCRN_MALRXCTP2R,
+ mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
+#endif /* DCRN_MALRXCTP2R */
+#ifdef DCRN_MALRXCTP3R
+ if (maldata->num_rx_chans > 3)
+ set_mal_dcrn(mal, DCRN_MALRXCTP3R,
+ mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
+#endif /* DCRN_MALRXCTP3R */
+
+ err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
+ if (err)
+ goto fail;
+ err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal);
+ if (err)
+ goto fail;
+ err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
+ if (err)
+ goto fail;
+ err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
+ if (err)
+ goto fail;
+ err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
+ if (err)
+ goto fail;
+
+ set_mal_dcrn(mal, DCRN_MALIER,
+ MALIER_DE | MALIER_NE | MALIER_TE |
+ MALIER_OPBE | MALIER_PLBE);
+
+ /* Advertise me to the rest of the world */
+ ocp_set_drvdata(ocpdev, mal);
+
+ printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n",
+ ocpdev->def->index, maldata->num_tx_chans,
+ maldata->num_rx_chans);
+
+ return 0;
+
+ fail:
+ /* FIXME: dispose requested IRQs ! */
+ if (err && mal)
+ kfree(mal);
+ return err;
+}
+
+static void __exit mal_remove(struct ocp_device *ocpdev)
+{
+ struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
+ struct ocp_func_mal_data *maldata = ocpdev->def->additions;
+
+ BUG_ON(!maldata);
+
+ ocp_set_drvdata(ocpdev, NULL);
+
+ /* FIXME: shut down the MAL, deal with dependency with emac */
+ free_irq(maldata->serr_irq, mal);
+ free_irq(maldata->txde_irq, mal);
+ free_irq(maldata->txeob_irq, mal);
+ free_irq(maldata->rxde_irq, mal);
+ free_irq(maldata->rxeob_irq, mal);
+
+ if (mal->tx_virt_addr)
+ dma_free_coherent(&ocpdev->dev,
+ MAL_DT_ALIGN * maldata->num_tx_chans,
+ mal->tx_virt_addr, mal->tx_phys_addr);
+
+ if (mal->rx_virt_addr)
+ dma_free_coherent(&ocpdev->dev,
+ MAL_DT_ALIGN * maldata->num_rx_chans,
+ mal->rx_virt_addr, mal->rx_phys_addr);
+
+ kfree(mal);
+}
+
+/* Structure for a device driver */
+static struct ocp_device_id mal_ids[] = {
+ {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL},
+ {.vendor = OCP_VENDOR_INVALID}
+};
+
+static struct ocp_driver mal_driver = {
+ .name = "mal",
+ .id_table = mal_ids,
+
+ .probe = mal_probe,
+ .remove = mal_remove,
+};
+
+static int __init init_mals(void)
+{
+ int rc;
+
+ rc = ocp_register_driver(&mal_driver);
+ if (rc < 0) {
+ ocp_unregister_driver(&mal_driver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit exit_mals(void)
+{
+ ocp_unregister_driver(&mal_driver);
+}
+
+module_init(init_mals);
+module_exit(exit_mals);
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
new file mode 100644
index 000000000000..dd9f0dabc6e0
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -0,0 +1,131 @@
+#ifndef _IBM_EMAC_MAL_H
+#define _IBM_EMAC_MAL_H
+
+#include <linux/list.h>
+
+#define MAL_DT_ALIGN (4096) /* Alignment for each channel's descriptor table */
+
+#define MAL_CHAN_MASK(chan) (0x80000000 >> (chan))
+
+/* MAL Buffer Descriptor structure */
+struct mal_descriptor {
+ unsigned short ctrl; /* MAL / Commac status control bits */
+ short data_len; /* Max length is 4K-1 (12 bits) */
+ unsigned char *data_ptr; /* pointer to actual data buffer */
+} __attribute__ ((packed));
+
+/* the following defines are for the MadMAL status and control registers. */
+/* MADMAL transmit and receive status/control bits */
+#define MAL_RX_CTRL_EMPTY 0x8000
+#define MAL_RX_CTRL_WRAP 0x4000
+#define MAL_RX_CTRL_CM 0x2000
+#define MAL_RX_CTRL_LAST 0x1000
+#define MAL_RX_CTRL_FIRST 0x0800
+#define MAL_RX_CTRL_INTR 0x0400
+
+#define MAL_TX_CTRL_READY 0x8000
+#define MAL_TX_CTRL_WRAP 0x4000
+#define MAL_TX_CTRL_CM 0x2000
+#define MAL_TX_CTRL_LAST 0x1000
+#define MAL_TX_CTRL_INTR 0x0400
+
+struct mal_commac_ops {
+ void (*txeob) (void *dev, u32 chanmask);
+ void (*txde) (void *dev, u32 chanmask);
+ void (*rxeob) (void *dev, u32 chanmask);
+ void (*rxde) (void *dev, u32 chanmask);
+};
+
+struct mal_commac {
+ struct mal_commac_ops *ops;
+ void *dev;
+ u32 tx_chan_mask, rx_chan_mask;
+ struct list_head list;
+};
+
+struct ibm_ocp_mal {
+ int dcrbase;
+
+ struct list_head commac;
+ u32 tx_chan_mask, rx_chan_mask;
+
+ dma_addr_t tx_phys_addr;
+ struct mal_descriptor *tx_virt_addr;
+
+ dma_addr_t rx_phys_addr;
+ struct mal_descriptor *rx_virt_addr;
+};
+
+#define GET_MAL_STANZA(base,dcrn) \
+ case base: \
+ x = mfdcr(dcrn(base)); \
+ break;
+
+#define SET_MAL_STANZA(base,dcrn, val) \
+ case base: \
+ mtdcr(dcrn(base), (val)); \
+ break;
+
+#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn)
+#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val)
+
+#ifdef DCRN_MAL1_BASE
+#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn)
+#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val)
+#else /* ! DCRN_MAL1_BASE */
+#define GET_MAL1_STANZA(dcrn)
+#define SET_MAL1_STANZA(dcrn,val)
+#endif
+
+#define get_mal_dcrn(mal, dcrn) ({ \
+ u32 x; \
+ switch ((mal)->dcrbase) { \
+ GET_MAL0_STANZA(dcrn) \
+ GET_MAL1_STANZA(dcrn) \
+ default: \
+ x = 0; \
+ BUG(); \
+ } \
+x; })
+
+#define set_mal_dcrn(mal, dcrn, val) do { \
+ switch ((mal)->dcrbase) { \
+ SET_MAL0_STANZA(dcrn,val) \
+ SET_MAL1_STANZA(dcrn,val) \
+ default: \
+ BUG(); \
+ } } while (0)
+
+static inline void mal_enable_tx_channels(struct ibm_ocp_mal *mal, u32 chanmask)
+{
+ set_mal_dcrn(mal, DCRN_MALTXCASR,
+ get_mal_dcrn(mal, DCRN_MALTXCASR) | chanmask);
+}
+
+static inline void mal_disable_tx_channels(struct ibm_ocp_mal *mal,
+ u32 chanmask)
+{
+ set_mal_dcrn(mal, DCRN_MALTXCARR, chanmask);
+}
+
+static inline void mal_enable_rx_channels(struct ibm_ocp_mal *mal, u32 chanmask)
+{
+ set_mal_dcrn(mal, DCRN_MALRXCASR,
+ get_mal_dcrn(mal, DCRN_MALRXCASR) | chanmask);
+}
+
+static inline void mal_disable_rx_channels(struct ibm_ocp_mal *mal,
+ u32 chanmask)
+{
+ set_mal_dcrn(mal, DCRN_MALRXCARR, chanmask);
+}
+
+extern int mal_register_commac(struct ibm_ocp_mal *mal,
+ struct mal_commac *commac);
+extern int mal_unregister_commac(struct ibm_ocp_mal *mal,
+ struct mal_commac *commac);
+
+extern int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel,
+ unsigned long size);
+
+#endif /* _IBM_EMAC_MAL_H */
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c
new file mode 100644
index 000000000000..14213f090e91
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_phy.c
@@ -0,0 +1,298 @@
+/*
+ * ibm_ocp_phy.c
+ *
+ * PHY drivers for the ibm ocp ethernet driver. Borrowed
+ * from sungem_phy.c, though I only kept the generic MII
+ * driver for now.
+ *
+ * This file should be shared with other drivers or eventually
+ * merged as the "low level" part of miilib
+ *
+ * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ *
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+#include "ibm_emac_phy.h"
+
+static int reset_one_mii_phy(struct mii_phy *phy, int phy_id)
+{
+ u16 val;
+ int limit = 10000;
+
+ val = __phy_read(phy, phy_id, MII_BMCR);
+ val &= ~BMCR_ISOLATE;
+ val |= BMCR_RESET;
+ __phy_write(phy, phy_id, MII_BMCR, val);
+
+ udelay(100);
+
+ while (limit--) {
+ val = __phy_read(phy, phy_id, MII_BMCR);
+ if ((val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ if ((val & BMCR_ISOLATE) && limit > 0)
+ __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
+
+ return (limit <= 0);
+}
+
+static int cis8201_init(struct mii_phy *phy)
+{
+ u16 epcr;
+
+ epcr = phy_read(phy, MII_CIS8201_EPCR);
+ epcr &= ~EPCR_MODE_MASK;
+
+ switch (phy->mode) {
+ case PHY_MODE_TBI:
+ epcr |= EPCR_TBI_MODE;
+ break;
+ case PHY_MODE_RTBI:
+ epcr |= EPCR_RTBI_MODE;
+ break;
+ case PHY_MODE_GMII:
+ epcr |= EPCR_GMII_MODE;
+ break;
+ case PHY_MODE_RGMII:
+ default:
+ epcr |= EPCR_RGMII_MODE;
+ }
+
+ phy_write(phy, MII_CIS8201_EPCR, epcr);
+
+ return 0;
+}
+
+static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+ u16 ctl, adv;
+
+ phy->autoneg = 1;
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = 0;
+ phy->advertising = advertise;
+
+ /* Setup standard advertise */
+ adv = phy_read(phy, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(phy, MII_ADVERTISE, adv);
+
+ /* Start/Restart aneg */
+ ctl = phy_read(phy, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+ u16 ctl;
+
+ phy->autoneg = 0;
+ phy->speed = speed;
+ phy->duplex = fd;
+ phy->pause = 0;
+
+ ctl = phy_read(phy, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
+
+ /* First reset the PHY */
+ phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+ /* Select speed & duplex */
+ switch (speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ ctl |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ default:
+ return -EINVAL;
+ }
+ if (fd == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_poll_link(struct mii_phy *phy)
+{
+ u16 status;
+
+ (void)phy_read(phy, MII_BMSR);
+ status = phy_read(phy, MII_BMSR);
+ if ((status & BMSR_LSTATUS) == 0)
+ return 0;
+ if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
+ return 0;
+ return 1;
+}
+
+#define MII_CIS8201_ACSR 0x1c
+#define ACSR_DUPLEX_STATUS 0x0020
+#define ACSR_SPEED_1000BASET 0x0010
+#define ACSR_SPEED_100BASET 0x0008
+
+static int cis8201_read_link(struct mii_phy *phy)
+{
+ u16 acsr;
+
+ if (phy->autoneg) {
+ acsr = phy_read(phy, MII_CIS8201_ACSR);
+
+ if (acsr & ACSR_DUPLEX_STATUS)
+ phy->duplex = DUPLEX_FULL;
+ else
+ phy->duplex = DUPLEX_HALF;
+ if (acsr & ACSR_SPEED_1000BASET) {
+ phy->speed = SPEED_1000;
+ } else if (acsr & ACSR_SPEED_100BASET)
+ phy->speed = SPEED_100;
+ else
+ phy->speed = SPEED_10;
+ phy->pause = 0;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+static int genmii_read_link(struct mii_phy *phy)
+{
+ u16 lpa;
+
+ if (phy->autoneg) {
+ lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
+
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = 0;
+
+ if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ phy->speed = SPEED_100;
+ if (lpa & LPA_100FULL)
+ phy->duplex = DUPLEX_FULL;
+ } else if (lpa & LPA_10FULL)
+ phy->duplex = DUPLEX_FULL;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
+#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+
+/* CIS8201 phy ops */
+static struct mii_phy_ops cis8201_phy_ops = {
+ init:cis8201_init,
+ setup_aneg:genmii_setup_aneg,
+ setup_forced:genmii_setup_forced,
+ poll_link:genmii_poll_link,
+ read_link:cis8201_read_link
+};
+
+/* Generic implementation for most 10/100 PHYs */
+static struct mii_phy_ops generic_phy_ops = {
+ setup_aneg:genmii_setup_aneg,
+ setup_forced:genmii_setup_forced,
+ poll_link:genmii_poll_link,
+ read_link:genmii_read_link
+};
+
+static struct mii_phy_def cis8201_phy_def = {
+ phy_id:0x000fc410,
+ phy_id_mask:0x000ffff0,
+ name:"CIS8201 Gigabit Ethernet",
+ features:MII_GBIT_FEATURES,
+ magic_aneg:0,
+ ops:&cis8201_phy_ops
+};
+
+static struct mii_phy_def genmii_phy_def = {
+ phy_id:0x00000000,
+ phy_id_mask:0x00000000,
+ name:"Generic MII",
+ features:MII_BASIC_FEATURES,
+ magic_aneg:0,
+ ops:&generic_phy_ops
+};
+
+static struct mii_phy_def *mii_phy_table[] = {
+ &cis8201_phy_def,
+ &genmii_phy_def,
+ NULL
+};
+
+int mii_phy_probe(struct mii_phy *phy, int mii_id)
+{
+ int rc;
+ u32 id;
+ struct mii_phy_def *def;
+ int i;
+
+ phy->autoneg = 0;
+ phy->advertising = 0;
+ phy->mii_id = mii_id;
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->pause = 0;
+
+ /* Take PHY out of isloate mode and reset it. */
+ rc = reset_one_mii_phy(phy, mii_id);
+ if (rc)
+ return -ENODEV;
+
+ /* Read ID and find matching entry */
+ id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2))
+ & 0xfffffff0;
+ for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
+ if ((id & def->phy_id_mask) == def->phy_id)
+ break;
+ /* Should never be NULL (we have a generic entry), but... */
+ if (def == NULL)
+ return -ENODEV;
+
+ phy->def = def;
+
+ /* Setup default advertising */
+ phy->advertising = def->features;
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.h b/drivers/net/ibm_emac/ibm_emac_phy.h
new file mode 100644
index 000000000000..61afbea96563
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_phy.h
@@ -0,0 +1,137 @@
+
+/*
+ * ibm_emac_phy.h
+ *
+ *
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * February 2003
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * This file basically duplicates sungem_phy.{c,h} with different PHYs
+ * supported. I'm looking into merging that in a single mii layer more
+ * flexible than mii.c
+ */
+
+#ifndef _IBM_EMAC_PHY_H_
+#define _IBM_EMAC_PHY_H_
+
+/*
+ * PHY mode settings
+ * Used for multi-mode capable PHYs
+ */
+#define PHY_MODE_NA 0
+#define PHY_MODE_MII 1
+#define PHY_MODE_RMII 2
+#define PHY_MODE_SMII 3
+#define PHY_MODE_RGMII 4
+#define PHY_MODE_TBI 5
+#define PHY_MODE_GMII 6
+#define PHY_MODE_RTBI 7
+#define PHY_MODE_SGMII 8
+
+/*
+ * PHY specific registers/values
+ */
+
+/* CIS8201 */
+#define MII_CIS8201_EPCR 0x17
+#define EPCR_MODE_MASK 0x3000
+#define EPCR_GMII_MODE 0x0000
+#define EPCR_RGMII_MODE 0x1000
+#define EPCR_TBI_MODE 0x2000
+#define EPCR_RTBI_MODE 0x3000
+
+struct mii_phy;
+
+/* Operations supported by any kind of PHY */
+struct mii_phy_ops {
+ int (*init) (struct mii_phy * phy);
+ int (*suspend) (struct mii_phy * phy, int wol_options);
+ int (*setup_aneg) (struct mii_phy * phy, u32 advertise);
+ int (*setup_forced) (struct mii_phy * phy, int speed, int fd);
+ int (*poll_link) (struct mii_phy * phy);
+ int (*read_link) (struct mii_phy * phy);
+};
+
+/* Structure used to statically define an mii/gii based PHY */
+struct mii_phy_def {
+ u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
+ u32 phy_id_mask; /* Significant bits */
+ u32 features; /* Ethtool SUPPORTED_* defines */
+ int magic_aneg; /* Autoneg does all speed test for us */
+ const char *name;
+ const struct mii_phy_ops *ops;
+};
+
+/* An instance of a PHY, partially borrowed from mii_if_info */
+struct mii_phy {
+ struct mii_phy_def *def;
+ int advertising;
+ int mii_id;
+
+ /* 1: autoneg enabled, 0: disabled */
+ int autoneg;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+
+ /* PHY mode - if needed */
+ int mode;
+
+ /* Provided by host chip */
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device * dev, int mii_id, int reg);
+ void (*mdio_write) (struct net_device * dev, int mii_id, int reg,
+ int val);
+};
+
+/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
+ * filled, the remaining fields will be filled on return
+ */
+extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
+
+static inline int __phy_read(struct mii_phy *phy, int id, int reg)
+{
+ return phy->mdio_read(phy->dev, id, reg);
+}
+
+static inline void __phy_write(struct mii_phy *phy, int id, int reg, int val)
+{
+ phy->mdio_write(phy->dev, id, reg, val);
+}
+
+static inline int phy_read(struct mii_phy *phy, int reg)
+{
+ return phy->mdio_read(phy->dev, phy->mii_id, reg);
+}
+
+static inline void phy_write(struct mii_phy *phy, int reg, int val)
+{
+ phy->mdio_write(phy->dev, phy->mii_id, reg, val);
+}
+
+#endif /* _IBM_EMAC_PHY_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h
new file mode 100644
index 000000000000..49f188f4ea6e
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h
@@ -0,0 +1,65 @@
+/*
+ * Defines for the IBM RGMII bridge
+ *
+ * Based on ocp_zmii.h/ibm_emac_zmii.h
+ * Armin Kuster akuster@mvista.com
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_RGMII_H_
+#define _IBM_EMAC_RGMII_H_
+
+#include <linux/config.h>
+
+/* RGMII bridge */
+typedef struct rgmii_regs {
+ u32 fer; /* Function enable register */
+ u32 ssr; /* Speed select register */
+} rgmii_t;
+
+#define RGMII_INPUTS 4
+
+/* RGMII device */
+struct ibm_ocp_rgmii {
+ struct rgmii_regs *base;
+ int mode[RGMII_INPUTS];
+ int users; /* number of EMACs using this RGMII bridge */
+};
+
+/* Fuctional Enable Reg */
+#define RGMII_FER_MASK(x) (0x00000007 << (4*x))
+#define RGMII_RTBI 0x00000004
+#define RGMII_RGMII 0x00000005
+#define RGMII_TBI 0x00000006
+#define RGMII_GMII 0x00000007
+
+/* Speed Selection reg */
+
+#define RGMII_SP2_100 0x00000002
+#define RGMII_SP2_1000 0x00000004
+#define RGMII_SP3_100 0x00000200
+#define RGMII_SP3_1000 0x00000400
+
+#define RGMII_MII2_SPDMASK 0x00000007
+#define RGMII_MII3_SPDMASK 0x00000700
+
+#define RGMII_MII2_100MB RGMII_SP2_100 & ~RGMII_SP2_1000
+#define RGMII_MII2_1000MB RGMII_SP2_1000 & ~RGMII_SP2_100
+#define RGMII_MII2_10MB ~(RGMII_SP2_100 | RGMII_SP2_1000)
+#define RGMII_MII3_100MB RGMII_SP3_100 & ~RGMII_SP3_1000
+#define RGMII_MII3_1000MB RGMII_SP3_1000 & ~RGMII_SP3_100
+#define RGMII_MII3_10MB ~(RGMII_SP3_100 | RGMII_SP3_1000)
+
+#define RTBI 0
+#define RGMII 1
+#define TBI 2
+#define GMII 3
+
+#endif /* _IBM_EMAC_RGMII_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h
new file mode 100644
index 000000000000..ecfc69805521
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_tah.h
@@ -0,0 +1,48 @@
+/*
+ * Defines for the IBM TAH
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_TAH_H
+#define _IBM_EMAC_TAH_H
+
+/* TAH */
+typedef struct tah_regs {
+ u32 tah_revid;
+ u32 pad[3];
+ u32 tah_mr;
+ u32 tah_ssr0;
+ u32 tah_ssr1;
+ u32 tah_ssr2;
+ u32 tah_ssr3;
+ u32 tah_ssr4;
+ u32 tah_ssr5;
+ u32 tah_tsr;
+} tah_t;
+
+/* TAH engine */
+#define TAH_MR_CVR 0x80000000
+#define TAH_MR_SR 0x40000000
+#define TAH_MR_ST_256 0x01000000
+#define TAH_MR_ST_512 0x02000000
+#define TAH_MR_ST_768 0x03000000
+#define TAH_MR_ST_1024 0x04000000
+#define TAH_MR_ST_1280 0x05000000
+#define TAH_MR_ST_1536 0x06000000
+#define TAH_MR_TFS_16KB 0x00000000
+#define TAH_MR_TFS_2KB 0x00200000
+#define TAH_MR_TFS_4KB 0x00400000
+#define TAH_MR_TFS_6KB 0x00600000
+#define TAH_MR_TFS_8KB 0x00800000
+#define TAH_MR_TFS_10KB 0x00a00000
+#define TAH_MR_DTFP 0x00100000
+#define TAH_MR_DIG 0x00080000
+
+#endif /* _IBM_EMAC_TAH_H */
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h
new file mode 100644
index 000000000000..6f6cd2a39e38
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.h
@@ -0,0 +1,93 @@
+/*
+ * ocp_zmii.h
+ *
+ * Defines for the IBM ZMII bridge
+ *
+ * Armin Kuster akuster@mvista.com
+ * Dec, 2001
+ *
+ * Copyright 2001 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_ZMII_H_
+#define _IBM_EMAC_ZMII_H_
+
+#include <linux/config.h>
+
+/* ZMII bridge registers */
+struct zmii_regs {
+ u32 fer; /* Function enable reg */
+ u32 ssr; /* Speed select reg */
+ u32 smiirs; /* SMII status reg */
+};
+
+#define ZMII_INPUTS 4
+
+/* ZMII device */
+struct ibm_ocp_zmii {
+ struct zmii_regs *base;
+ int mode[ZMII_INPUTS];
+ int users; /* number of EMACs using this ZMII bridge */
+};
+
+/* Fuctional Enable Reg */
+
+#define ZMII_FER_MASK(x) (0xf0000000 >> (4*x))
+
+#define ZMII_MDI0 0x80000000
+#define ZMII_SMII0 0x40000000
+#define ZMII_RMII0 0x20000000
+#define ZMII_MII0 0x10000000
+#define ZMII_MDI1 0x08000000
+#define ZMII_SMII1 0x04000000
+#define ZMII_RMII1 0x02000000
+#define ZMII_MII1 0x01000000
+#define ZMII_MDI2 0x00800000
+#define ZMII_SMII2 0x00400000
+#define ZMII_RMII2 0x00200000
+#define ZMII_MII2 0x00100000
+#define ZMII_MDI3 0x00080000
+#define ZMII_SMII3 0x00040000
+#define ZMII_RMII3 0x00020000
+#define ZMII_MII3 0x00010000
+
+/* Speed Selection reg */
+
+#define ZMII_SCI0 0x40000000
+#define ZMII_FSS0 0x20000000
+#define ZMII_SP0 0x10000000
+#define ZMII_SCI1 0x04000000
+#define ZMII_FSS1 0x02000000
+#define ZMII_SP1 0x01000000
+#define ZMII_SCI2 0x00400000
+#define ZMII_FSS2 0x00200000
+#define ZMII_SP2 0x00100000
+#define ZMII_SCI3 0x00040000
+#define ZMII_FSS3 0x00020000
+#define ZMII_SP3 0x00010000
+
+#define ZMII_MII0_100MB ZMII_SP0
+#define ZMII_MII0_10MB ~ZMII_SP0
+#define ZMII_MII1_100MB ZMII_SP1
+#define ZMII_MII1_10MB ~ZMII_SP1
+#define ZMII_MII2_100MB ZMII_SP2
+#define ZMII_MII2_10MB ~ZMII_SP2
+#define ZMII_MII3_100MB ZMII_SP3
+#define ZMII_MII3_10MB ~ZMII_SP3
+
+/* SMII Status reg */
+
+#define ZMII_STS0 0xFF000000 /* EMAC0 smii status mask */
+#define ZMII_STS1 0x00FF0000 /* EMAC1 smii status mask */
+
+#define SMII 0
+#define RMII 1
+#define MII 2
+#define MDI 3
+
+#endif /* _IBM_EMAC_ZMII_H_ */
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
new file mode 100644
index 000000000000..01ad904215a1
--- /dev/null
+++ b/drivers/net/ibmlana.c
@@ -0,0 +1,1080 @@
+/*
+net-3-driver for the IBM LAN Adapter/A
+
+This is an extension to the Linux operating system, and is covered by the
+same GNU General Public License that covers that work.
+
+Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
+ alfred.arnold@lancom.de)
+
+This driver is based both on the SK_MCA driver, which is itself based on the
+SK_G16 and 3C523 driver.
+
+paper sources:
+ 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
+ Hans-Peter Messmer for the basic Microchannel stuff
+
+ 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
+ for help on Ethernet driver programming
+
+ 'DP83934CVUL-20/25 MHz SONIC-T Ethernet Controller Datasheet' by National
+ Semiconductor for info on the MAC chip
+
+ 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0
+ Document Number SC30-3661-00' by IBM for info on the adapter itself
+
+ Also see http://www.natsemi.com/
+
+special acknowledgements to:
+ - Bob Eager for helping me out with documentation from IBM
+ - Jim Shorney for his endless patience with me while I was using
+ him as a beta tester to trace down the address filter bug ;-)
+
+ Missing things:
+
+ -> set debug level via ioctl instead of compile-time switches
+ -> I didn't follow the development of the 2.1.x kernels, so my
+ assumptions about which things changed with which kernel version
+ are probably nonsense
+
+History:
+ Nov 6th, 1999
+ startup from SK_MCA driver
+ Dec 6th, 1999
+ finally got docs about the card. A big thank you to Bob Eager!
+ Dec 12th, 1999
+ first packet received
+ Dec 13th, 1999
+ recv queue done, tcpdump works
+ Dec 15th, 1999
+ transmission part works
+ Dec 28th, 1999
+ added usage of the isa_functions for Linux 2.3 . Things should
+ still work with 2.0.x....
+ Jan 28th, 2000
+ in Linux 2.2.13, the version.h file mysteriously didn't get
+ included. Added a workaround for this. Futhermore, it now
+ not only compiles as a modules ;-)
+ Jan 30th, 2000
+ newer kernels automatically probe more than one board, so the
+ 'startslot' as a variable is also needed here
+ Apr 12th, 2000
+ the interrupt mask register is not set 'hard' instead of individually
+ setting registers, since this seems to set bits that shouldn't be
+ set
+ May 21st, 2000
+ reset interrupt status immediately after CAM load
+ add a recovery delay after releasing the chip's reset line
+ May 24th, 2000
+ finally found the bug in the address filter setup - damned signed
+ chars!
+ June 1st, 2000
+ corrected version codes, added support for the latest 2.3 changes
+ Oct 28th, 2002
+ cleaned up for the 2.5 tree <alan@redhat.com>
+
+ *************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/mca-legacy.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+
+#define _IBM_LANA_DRIVER_
+#include "ibmlana.h"
+
+#undef DEBUG
+
+#define DRV_NAME "ibmlana"
+
+/* ------------------------------------------------------------------------
+ * global static data - not more since we can handle multiple boards and
+ * have to pack all state info into the device struct!
+ * ------------------------------------------------------------------------ */
+
+static char *MediaNames[Media_Count] = {
+ "10BaseT", "10Base5", "Unknown", "10Base2"
+};
+
+/* ------------------------------------------------------------------------
+ * private subfunctions
+ * ------------------------------------------------------------------------ */
+
+#ifdef DEBUG
+ /* dump all registers */
+
+static void dumpregs(struct net_device *dev)
+{
+ int z;
+
+ for (z = 0; z < 160; z += 2) {
+ if (!(z & 15))
+ printk("REGS: %04x:", z);
+ printk(" %04x", inw(dev->base_addr + z));
+ if ((z & 15) == 14)
+ printk("\n");
+ }
+}
+
+/* dump parts of shared memory - only needed during debugging */
+
+static void dumpmem(struct net_device *dev, u32 start, u32 len)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int z;
+
+ printk("Address %04x:\n", start);
+ for (z = 0; z < len; z++) {
+ if ((z & 15) == 0)
+ printk("%04x:", z);
+ printk(" %02x", readb(priv->base + start + z));
+ if ((z & 15) == 15)
+ printk("\n");
+ }
+ if ((z & 15) != 0)
+ printk("\n");
+}
+
+/* print exact time - ditto */
+
+static void PrTime(void)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ printk("%9d:%06d: ", (int) tv.tv_sec, (int) tv.tv_usec);
+}
+#endif /* DEBUG */
+
+/* deduce resources out of POS registers */
+
+static void getaddrs(int slot, int *base, int *memlen, int *iobase,
+ int *irq, ibmlana_medium * medium)
+{
+ u_char pos0, pos1;
+
+ pos0 = mca_read_stored_pos(slot, 2);
+ pos1 = mca_read_stored_pos(slot, 3);
+
+ *base = 0xc0000 + ((pos1 & 0xf0) << 9);
+ *memlen = (pos1 & 0x01) ? 0x8000 : 0x4000;
+ *iobase = (pos0 & 0xe0) << 7;
+ switch (pos0 & 0x06) {
+ case 0:
+ *irq = 5;
+ break;
+ case 2:
+ *irq = 15;
+ break;
+ case 4:
+ *irq = 10;
+ break;
+ case 6:
+ *irq = 11;
+ break;
+ }
+ *medium = (pos0 & 0x18) >> 3;
+}
+
+/* wait on register value with mask and timeout */
+
+static int wait_timeout(struct net_device *dev, int regoffs, u16 mask,
+ u16 value, int timeout)
+{
+ unsigned long fin = jiffies + timeout;
+
+ while (time_before(jiffies,fin))
+ if ((inw(dev->base_addr + regoffs) & mask) == value)
+ return 1;
+
+ return 0;
+}
+
+
+/* reset the whole board */
+
+static void ResetBoard(struct net_device *dev)
+{
+ unsigned char bcmval;
+
+ /* read original board control value */
+
+ bcmval = inb(dev->base_addr + BCMREG);
+
+ /* set reset bit for a while */
+
+ bcmval |= BCMREG_RESET;
+ outb(bcmval, dev->base_addr + BCMREG);
+ udelay(10);
+ bcmval &= ~BCMREG_RESET;
+ outb(bcmval, dev->base_addr + BCMREG);
+
+ /* switch over to RAM again */
+
+ bcmval |= BCMREG_RAMEN | BCMREG_RAMWIN;
+ outb(bcmval, dev->base_addr + BCMREG);
+}
+
+/* calculate RAM layout & set up descriptors in RAM */
+
+static void InitDscrs(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ u32 addr, baddr, raddr;
+ int z;
+ tda_t tda;
+ rda_t rda;
+ rra_t rra;
+
+ /* initialize RAM */
+
+ memset_io(priv->base, 0xaa,
+ dev->mem_start - dev->mem_start); /* XXX: typo? */
+
+ /* setup n TX descriptors - independent of RAM size */
+
+ priv->tdastart = addr = 0;
+ priv->txbufstart = baddr = sizeof(tda_t) * TXBUFCNT;
+ for (z = 0; z < TXBUFCNT; z++) {
+ tda.status = 0;
+ tda.config = 0;
+ tda.length = 0;
+ tda.fragcount = 1;
+ tda.startlo = baddr;
+ tda.starthi = 0;
+ tda.fraglength = 0;
+ if (z == TXBUFCNT - 1)
+ tda.link = priv->tdastart;
+ else
+ tda.link = addr + sizeof(tda_t);
+ tda.link |= 1;
+ memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
+ addr += sizeof(tda_t);
+ baddr += PKTSIZE;
+ }
+
+ /* calculate how many receive buffers fit into remaining memory */
+
+ priv->rxbufcnt = (dev->mem_end - dev->mem_start - baddr) / (sizeof(rra_t) + sizeof(rda_t) + PKTSIZE);
+
+ /* calculate receive addresses */
+
+ priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
+ priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
+ priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
+
+ for (z = 0; z < priv->rxbufcnt; z++) {
+ rra.startlo = baddr;
+ rra.starthi = 0;
+ rra.cntlo = PKTSIZE >> 1;
+ rra.cnthi = 0;
+ memcpy_toio(priv->base + raddr, &rra, sizeof(rra_t));
+
+ rda.status = 0;
+ rda.length = 0;
+ rda.startlo = 0;
+ rda.starthi = 0;
+ rda.seqno = 0;
+ if (z < priv->rxbufcnt - 1)
+ rda.link = addr + sizeof(rda_t);
+ else
+ rda.link = 1;
+ rda.inuse = 1;
+ memcpy_toio(priv->base + addr, &rda, sizeof(rda_t));
+
+ baddr += PKTSIZE;
+ raddr += sizeof(rra_t);
+ addr += sizeof(rda_t);
+ }
+
+ /* initialize current pointers */
+
+ priv->nextrxdescr = 0;
+ priv->lastrxdescr = priv->rxbufcnt - 1;
+ priv->nexttxdescr = 0;
+ priv->currtxdescr = 0;
+ priv->txusedcnt = 0;
+ memset(priv->txused, 0, sizeof(priv->txused));
+}
+
+/* set up Rx + Tx descriptors in SONIC */
+
+static int InitSONIC(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+
+ /* set up start & end of resource area */
+
+ outw(0, SONIC_URRA);
+ outw(priv->rrastart, dev->base_addr + SONIC_RSA);
+ outw(priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)), dev->base_addr + SONIC_REA);
+ outw(priv->rrastart, dev->base_addr + SONIC_RRP);
+ outw(priv->rrastart, dev->base_addr + SONIC_RWP);
+
+ /* set EOBC so that only one packet goes into one buffer */
+
+ outw((PKTSIZE - 4) >> 1, dev->base_addr + SONIC_EOBC);
+
+ /* let SONIC read the first RRA descriptor */
+
+ outw(CMDREG_RRRA, dev->base_addr + SONIC_CMDREG);
+ if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_RRRA, 0, 2)) {
+ printk(KERN_ERR "%s: SONIC did not respond on RRRA command - giving up.", dev->name);
+ return 0;
+ }
+
+ /* point SONIC to the first RDA */
+
+ outw(0, dev->base_addr + SONIC_URDA);
+ outw(priv->rdastart, dev->base_addr + SONIC_CRDA);
+
+ /* set upper half of TDA address */
+
+ outw(0, dev->base_addr + SONIC_UTDA);
+
+ return 1;
+}
+
+/* stop SONIC so we can reinitialize it */
+
+static void StopSONIC(struct net_device *dev)
+{
+ /* disable interrupts */
+
+ outb(inb(dev->base_addr + BCMREG) & (~BCMREG_IEN), dev->base_addr + BCMREG);
+ outb(0, dev->base_addr + SONIC_IMREG);
+
+ /* reset the SONIC */
+
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+ udelay(10);
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+}
+
+/* initialize card and SONIC for proper operation */
+
+static void putcam(camentry_t * cams, int *camcnt, char *addr)
+{
+ camentry_t *pcam = cams + (*camcnt);
+ u8 *uaddr = (u8 *) addr;
+
+ pcam->index = *camcnt;
+ pcam->addr0 = (((u16) uaddr[1]) << 8) | uaddr[0];
+ pcam->addr1 = (((u16) uaddr[3]) << 8) | uaddr[2];
+ pcam->addr2 = (((u16) uaddr[5]) << 8) | uaddr[4];
+ (*camcnt)++;
+}
+
+static void InitBoard(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int camcnt;
+ camentry_t cams[16];
+ u32 cammask;
+ struct dev_mc_list *mcptr;
+ u16 rcrval;
+
+ /* reset the SONIC */
+
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+ udelay(10);
+
+ /* clear all spurious interrupts */
+
+ outw(inw(dev->base_addr + SONIC_ISREG), dev->base_addr + SONIC_ISREG);
+
+ /* set up the SONIC's bus interface - constant for this adapter -
+ must be done while the SONIC is in reset */
+
+ outw(DCREG_USR1 | DCREG_USR0 | DCREG_WC1 | DCREG_DW32, dev->base_addr + SONIC_DCREG);
+ outw(0, dev->base_addr + SONIC_DCREG2);
+
+ /* remove reset form the SONIC */
+
+ outw(0, dev->base_addr + SONIC_CMDREG);
+ udelay(10);
+
+ /* data sheet requires URRA to be programmed before setting up the CAM contents */
+
+ outw(0, dev->base_addr + SONIC_URRA);
+
+ /* program the CAM entry 0 to the device address */
+
+ camcnt = 0;
+ putcam(cams, &camcnt, dev->dev_addr);
+
+ /* start putting the multicast addresses into the CAM list. Stop if
+ it is full. */
+
+ for (mcptr = dev->mc_list; mcptr != NULL; mcptr = mcptr->next) {
+ putcam(cams, &camcnt, mcptr->dmi_addr);
+ if (camcnt == 16)
+ break;
+ }
+
+ /* calculate CAM mask */
+
+ cammask = (1 << camcnt) - 1;
+
+ /* feed CDA into SONIC, initialize RCR value (always get broadcasts) */
+
+ memcpy_toio(priv->base, cams, sizeof(camentry_t) * camcnt);
+ memcpy_toio(priv->base + (sizeof(camentry_t) * camcnt), &cammask, sizeof(cammask));
+
+#ifdef DEBUG
+ printk("CAM setup:\n");
+ dumpmem(dev, 0, sizeof(camentry_t) * camcnt + sizeof(cammask));
+#endif
+
+ outw(0, dev->base_addr + SONIC_CAMPTR);
+ outw(camcnt, dev->base_addr + SONIC_CAMCNT);
+ outw(CMDREG_LCAM, dev->base_addr + SONIC_CMDREG);
+ if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_LCAM, 0, 2)) {
+ printk(KERN_ERR "%s:SONIC did not respond on LCAM command - giving up.", dev->name);
+ return;
+ } else {
+ /* clear interrupt condition */
+
+ outw(ISREG_LCD, dev->base_addr + SONIC_ISREG);
+
+#ifdef DEBUG
+ printk("Loading CAM done, address pointers %04x:%04x\n",
+ inw(dev->base_addr + SONIC_URRA),
+ inw(dev->base_addr + SONIC_CAMPTR));
+ {
+ int z;
+
+ printk("\n-->CAM: PTR %04x CNT %04x\n",
+ inw(dev->base_addr + SONIC_CAMPTR),
+ inw(dev->base_addr + SONIC_CAMCNT));
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+ for (z = 0; z < camcnt; z++) {
+ outw(z, dev->base_addr + SONIC_CAMEPTR);
+ printk("Entry %d: %04x %04x %04x\n", z,
+ inw(dev->base_addr + SONIC_CAMADDR0),
+ inw(dev->base_addr + SONIC_CAMADDR1),
+ inw(dev->base_addr + SONIC_CAMADDR2));
+ }
+ outw(0, dev->base_addr + SONIC_CMDREG);
+ }
+#endif
+ }
+
+ rcrval = RCREG_BRD | RCREG_LB_NONE;
+
+ /* if still multicast addresses left or ALLMULTI is set, set the multicast
+ enable bit */
+
+ if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL))
+ rcrval |= RCREG_AMC;
+
+ /* promiscous mode ? */
+
+ if (dev->flags & IFF_PROMISC)
+ rcrval |= RCREG_PRO;
+
+ /* program receive mode */
+
+ outw(rcrval, dev->base_addr + SONIC_RCREG);
+#ifdef DEBUG
+ printk("\nRCRVAL: %04x\n", rcrval);
+#endif
+
+ /* set up descriptors in shared memory + feed them into SONIC registers */
+
+ InitDscrs(dev);
+ if (!InitSONIC(dev))
+ return;
+
+ /* reset all pending interrupts */
+
+ outw(0xffff, dev->base_addr + SONIC_ISREG);
+
+ /* enable transmitter + receiver interrupts */
+
+ outw(CMDREG_RXEN, dev->base_addr + SONIC_CMDREG);
+ outw(IMREG_PRXEN | IMREG_RBEEN | IMREG_PTXEN | IMREG_TXEREN, dev->base_addr + SONIC_IMREG);
+
+ /* turn on card interrupts */
+
+ outb(inb(dev->base_addr + BCMREG) | BCMREG_IEN, dev->base_addr + BCMREG);
+
+#ifdef DEBUG
+ printk("Register dump after initialization:\n");
+ dumpregs(dev);
+#endif
+}
+
+/* start transmission of a descriptor */
+
+static void StartTx(struct net_device *dev, int descr)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int addr;
+
+ addr = priv->tdastart + (descr * sizeof(tda_t));
+
+ /* put descriptor address into SONIC */
+
+ outw(addr, dev->base_addr + SONIC_CTDA);
+
+ /* trigger transmitter */
+
+ priv->currtxdescr = descr;
+ outw(CMDREG_TXP, dev->base_addr + SONIC_CMDREG);
+}
+
+/* ------------------------------------------------------------------------
+ * interrupt handler(s)
+ * ------------------------------------------------------------------------ */
+
+/* receive buffer area exhausted */
+
+static void irqrbe_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+
+ /* point the SONIC back to the RRA start */
+
+ outw(priv->rrastart, dev->base_addr + SONIC_RRP);
+ outw(priv->rrastart, dev->base_addr + SONIC_RWP);
+}
+
+/* receive interrupt */
+
+static void irqrx_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ rda_t rda;
+ u32 rdaaddr, lrdaaddr;
+
+ /* loop until ... */
+
+ while (1) {
+ /* read descriptor that was next to be filled by SONIC */
+
+ rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t));
+ lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
+ memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
+
+ /* iron out upper word halves of fields we use - SONIC will duplicate
+ bits 0..15 to 16..31 */
+
+ rda.status &= 0xffff;
+ rda.length &= 0xffff;
+ rda.startlo &= 0xffff;
+
+ /* stop if the SONIC still owns it, i.e. there is no data for us */
+
+ if (rda.inuse)
+ break;
+
+ /* good packet? */
+
+ else if (rda.status & RCREG_PRX) {
+ struct sk_buff *skb;
+
+ /* fetch buffer */
+
+ skb = dev_alloc_skb(rda.length + 2);
+ if (skb == NULL)
+ priv->stat.rx_dropped++;
+ else {
+ /* copy out data */
+
+ memcpy_fromio(skb_put(skb, rda.length),
+ priv->base +
+ rda.startlo, rda.length);
+
+ /* set up skb fields */
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* bookkeeping */
+ dev->last_rx = jiffies;
+ priv->stat.rx_packets++;
+ priv->stat.rx_bytes += rda.length;
+
+ /* pass to the upper layers */
+ netif_rx(skb);
+ }
+ }
+
+ /* otherwise check error status bits and increase statistics */
+
+ else {
+ priv->stat.rx_errors++;
+ if (rda.status & RCREG_FAER)
+ priv->stat.rx_frame_errors++;
+ if (rda.status & RCREG_CRCR)
+ priv->stat.rx_crc_errors++;
+ }
+
+ /* descriptor processed, will become new last descriptor in queue */
+
+ rda.link = 1;
+ rda.inuse = 1;
+ memcpy_toio(priv->base + rdaaddr, &rda,
+ sizeof(rda_t));
+
+ /* set up link and EOL = 0 in currently last descriptor. Only write
+ the link field since the SONIC may currently already access the
+ other fields. */
+
+ memcpy_toio(priv->base + lrdaaddr + 20, &rdaaddr, 4);
+
+ /* advance indices */
+
+ priv->lastrxdescr = priv->nextrxdescr;
+ if ((++priv->nextrxdescr) >= priv->rxbufcnt)
+ priv->nextrxdescr = 0;
+ }
+}
+
+/* transmit interrupt */
+
+static void irqtx_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ tda_t tda;
+
+ /* fetch descriptor (we forgot the size ;-) */
+ memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
+
+ /* update statistics */
+ priv->stat.tx_packets++;
+ priv->stat.tx_bytes += tda.length;
+
+ /* update our pointers */
+ priv->txused[priv->currtxdescr] = 0;
+ priv->txusedcnt--;
+
+ /* if there are more descriptors present in RAM, start them */
+ if (priv->txusedcnt > 0)
+ StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
+
+ /* tell the upper layer we can go on transmitting */
+ netif_wake_queue(dev);
+}
+
+static void irqtxerr_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ tda_t tda;
+
+ /* fetch descriptor to check status */
+ memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
+
+ /* update statistics */
+ priv->stat.tx_errors++;
+ if (tda.status & (TCREG_NCRS | TCREG_CRSL))
+ priv->stat.tx_carrier_errors++;
+ if (tda.status & TCREG_EXC)
+ priv->stat.tx_aborted_errors++;
+ if (tda.status & TCREG_OWC)
+ priv->stat.tx_window_errors++;
+ if (tda.status & TCREG_FU)
+ priv->stat.tx_fifo_errors++;
+
+ /* update our pointers */
+ priv->txused[priv->currtxdescr] = 0;
+ priv->txusedcnt--;
+
+ /* if there are more descriptors present in RAM, start them */
+ if (priv->txusedcnt > 0)
+ StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
+
+ /* tell the upper layer we can go on transmitting */
+ netif_wake_queue(dev);
+}
+
+/* general interrupt entry */
+
+static irqreturn_t irq_handler(int irq, void *device, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) device;
+ u16 ival;
+
+ /* in case we're not meant... */
+ if (!(inb(dev->base_addr + BCMREG) & BCMREG_IPEND))
+ return IRQ_NONE;
+
+ /* loop through the interrupt bits until everything is clear */
+ while (1) {
+ ival = inw(dev->base_addr + SONIC_ISREG);
+
+ if (ival & ISREG_RBE) {
+ irqrbe_handler(dev);
+ outw(ISREG_RBE, dev->base_addr + SONIC_ISREG);
+ }
+ if (ival & ISREG_PKTRX) {
+ irqrx_handler(dev);
+ outw(ISREG_PKTRX, dev->base_addr + SONIC_ISREG);
+ }
+ if (ival & ISREG_TXDN) {
+ irqtx_handler(dev);
+ outw(ISREG_TXDN, dev->base_addr + SONIC_ISREG);
+ }
+ if (ival & ISREG_TXER) {
+ irqtxerr_handler(dev);
+ outw(ISREG_TXER, dev->base_addr + SONIC_ISREG);
+ }
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+/* ------------------------------------------------------------------------
+ * driver methods
+ * ------------------------------------------------------------------------ */
+
+/* MCA info */
+
+static int ibmlana_getinfo(char *buf, int slot, void *d)
+{
+ int len = 0, i;
+ struct net_device *dev = (struct net_device *) d;
+ ibmlana_priv *priv;
+
+ /* can't say anything about an uninitialized device... */
+
+ if (dev == NULL)
+ return len;
+ priv = netdev_priv(dev);
+
+ /* print info */
+
+ len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
+ len += sprintf(buf + len, "I/O: %#lx\n", dev->base_addr);
+ len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, dev->mem_end - 1);
+ len += sprintf(buf + len, "Transceiver: %s\n", MediaNames[priv->medium]);
+ len += sprintf(buf + len, "Device: %s\n", dev->name);
+ len += sprintf(buf + len, "MAC address:");
+ for (i = 0; i < 6; i++)
+ len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
+ buf[len++] = '\n';
+ buf[len] = 0;
+
+ return len;
+}
+
+/* open driver. Means also initialization and start of LANCE */
+
+static int ibmlana_open(struct net_device *dev)
+{
+ int result;
+ ibmlana_priv *priv = netdev_priv(dev);
+
+ /* register resources - only necessary for IRQ */
+
+ result = request_irq(priv->realirq, irq_handler, SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+ if (result != 0) {
+ printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
+ return result;
+ }
+ dev->irq = priv->realirq;
+
+ /* set up the card and SONIC */
+ InitBoard(dev);
+
+ /* initialize operational flags */
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* close driver. Shut down board and free allocated resources */
+
+static int ibmlana_close(struct net_device *dev)
+{
+ /* turn off board */
+
+ /* release resources */
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+ dev->irq = 0;
+ return 0;
+}
+
+/* transmit a block. */
+
+static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int retval = 0, tmplen, addr;
+ unsigned long flags;
+ tda_t tda;
+ int baddr;
+
+ /* find out if there are free slots for a frame to transmit. If not,
+ the upper layer is in deep desperation and we simply ignore the frame. */
+
+ if (priv->txusedcnt >= TXBUFCNT) {
+ retval = -EIO;
+ priv->stat.tx_dropped++;
+ goto tx_done;
+ }
+
+ /* copy the frame data into the next free transmit buffer - fillup missing */
+ tmplen = skb->len;
+ if (tmplen < 60)
+ tmplen = 60;
+ baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
+ memcpy_toio(priv->base + baddr, skb->data, skb->len);
+
+ /* copy filler into RAM - in case we're filling up...
+ we're filling a bit more than necessary, but that doesn't harm
+ since the buffer is far larger...
+ Sorry Linus for the filler string but I couldn't resist ;-) */
+
+ if (tmplen > skb->len) {
+ char *fill = "NetBSD is a nice OS too! ";
+ unsigned int destoffs = skb->len, l = strlen(fill);
+
+ while (destoffs < tmplen) {
+ memcpy_toio(priv->base + baddr + destoffs, fill, l);
+ destoffs += l;
+ }
+ }
+
+ /* set up the new frame descriptor */
+ addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
+ memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
+ tda.length = tda.fraglength = tmplen;
+ memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
+
+ /* if there were no active descriptors, trigger the SONIC */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->txusedcnt++;
+ priv->txused[priv->nexttxdescr] = 1;
+
+ /* are all transmission slots used up ? */
+ if (priv->txusedcnt >= TXBUFCNT)
+ netif_stop_queue(dev);
+
+ if (priv->txusedcnt == 1)
+ StartTx(dev, priv->nexttxdescr);
+ priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+tx_done:
+ dev_kfree_skb(skb);
+ return retval;
+}
+
+/* return pointer to Ethernet statistics */
+
+static struct net_device_stats *ibmlana_stats(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ return &priv->stat;
+}
+
+/* switch receiver mode. */
+
+static void ibmlana_set_multicast_list(struct net_device *dev)
+{
+ /* first stop the SONIC... */
+ StopSONIC(dev);
+ /* ...then reinit it with the new flags */
+ InitBoard(dev);
+}
+
+/* ------------------------------------------------------------------------
+ * hardware check
+ * ------------------------------------------------------------------------ */
+
+static int startslot; /* counts through slots when probing multiple devices */
+
+static int ibmlana_probe(struct net_device *dev)
+{
+ int slot, z;
+ int base = 0, irq = 0, iobase = 0, memlen = 0;
+ ibmlana_priv *priv;
+ ibmlana_medium medium;
+
+ SET_MODULE_OWNER(dev);
+
+ /* can't work without an MCA bus ;-) */
+ if (MCA_bus == 0)
+ return -ENODEV;
+
+ base = dev->mem_start;
+ irq = dev->irq;
+
+ for (slot = startslot; (slot = mca_find_adapter(IBM_LANA_ID, slot)) != -1; slot++) {
+ /* deduce card addresses */
+ getaddrs(slot, &base, &memlen, &iobase, &irq, &medium);
+
+ /* slot already in use ? */
+ if (mca_is_adapter_used(slot))
+ continue;
+ /* were we looking for something different ? */
+ if (dev->irq && dev->irq != irq)
+ continue;
+ if (dev->mem_start && dev->mem_start != base)
+ continue;
+ /* found something that matches */
+ break;
+ }
+
+ /* nothing found ? */
+ if (slot == -1)
+ return (base != 0 || irq != 0) ? -ENXIO : -ENODEV;
+
+ /* announce success */
+ printk(KERN_INFO "%s: IBM LAN Adapter/A found in slot %d\n", dev->name, slot + 1);
+
+ /* try to obtain I/O range */
+ if (!request_region(iobase, IBM_LANA_IORANGE, DRV_NAME)) {
+ printk(KERN_ERR "%s: cannot allocate I/O range at %#x!\n", DRV_NAME, iobase);
+ startslot = slot + 1;
+ return -EBUSY;
+ }
+
+ priv = netdev_priv(dev);
+ priv->slot = slot;
+ priv->realirq = irq;
+ priv->medium = medium;
+ spin_lock_init(&priv->lock);
+
+
+ /* set base + irq for this device (irq not allocated so far) */
+
+ dev->irq = 0;
+ dev->mem_start = base;
+ dev->mem_end = base + memlen;
+ dev->base_addr = iobase;
+
+ priv->base = ioremap(base, memlen);
+ if (!priv->base) {
+ printk(KERN_ERR "%s: cannot remap memory!\n", DRV_NAME);
+ startslot = slot + 1;
+ release_region(iobase, IBM_LANA_IORANGE);
+ return -EBUSY;
+ }
+
+ /* make procfs entries */
+ mca_set_adapter_name(slot, "IBM LAN Adapter/A");
+ mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmlana_getinfo, dev);
+
+ mca_mark_as_used(slot);
+
+ /* set methods */
+
+ dev->open = ibmlana_open;
+ dev->stop = ibmlana_close;
+ dev->hard_start_xmit = ibmlana_tx;
+ dev->do_ioctl = NULL;
+ dev->get_stats = ibmlana_stats;
+ dev->set_multicast_list = ibmlana_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+
+ /* copy out MAC address */
+
+ for (z = 0; z < sizeof(dev->dev_addr); z++)
+ dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
+
+ /* print config */
+
+ printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, "
+ "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n",
+ dev->name, priv->realirq, dev->base_addr,
+ dev->mem_start, dev->mem_end - 1,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]);
+
+ /* reset board */
+
+ ResetBoard(dev);
+
+ /* next probe will start at next slot */
+
+ startslot = slot + 1;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------
+ * modularization support
+ * ------------------------------------------------------------------------ */
+
+#ifdef MODULE
+
+#define DEVMAX 5
+
+static struct net_device *moddevs[DEVMAX];
+static int irq;
+static int io;
+
+module_param(irq, int, 0);
+module_param(io, int, 0);
+MODULE_PARM_DESC(irq, "IBM LAN/A IRQ number");
+MODULE_PARM_DESC(io, "IBM LAN/A I/O base address");
+MODULE_LICENSE("GPL");
+
+int init_module(void)
+{
+ int z;
+
+ startslot = 0;
+ for (z = 0; z < DEVMAX; z++) {
+ struct net_device *dev = alloc_etherdev(sizeof(ibmlana_priv));
+ if (!dev)
+ break;
+ dev->irq = irq;
+ dev->base_addr = io;
+ if (ibmlana_probe(dev)) {
+ free_netdev(dev);
+ break;
+ }
+ if (register_netdev(dev)) {
+ ibmlana_priv *priv = netdev_priv(dev);
+ release_region(dev->base_addr, IBM_LANA_IORANGE);
+ mca_mark_as_unused(priv->slot);
+ mca_set_adapter_name(priv->slot, "");
+ mca_set_adapter_procfn(priv->slot, NULL, NULL);
+ iounmap(priv->base);
+ free_netdev(dev);
+ break;
+ }
+ moddevs[z] = dev;
+ }
+ return (z > 0) ? 0 : -EIO;
+}
+
+void cleanup_module(void)
+{
+ int z;
+ for (z = 0; z < DEVMAX; z++) {
+ struct net_device *dev = moddevs[z];
+ if (dev) {
+ ibmlana_priv *priv = netdev_priv(dev);
+ unregister_netdev(dev);
+ /*DeinitBoard(dev); */
+ release_region(dev->base_addr, IBM_LANA_IORANGE);
+ mca_mark_as_unused(priv->slot);
+ mca_set_adapter_name(priv->slot, "");
+ mca_set_adapter_procfn(priv->slot, NULL, NULL);
+ iounmap(priv->base);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h
new file mode 100644
index 000000000000..458ee226e537
--- /dev/null
+++ b/drivers/net/ibmlana.h
@@ -0,0 +1,279 @@
+#ifndef _IBM_LANA_INCLUDE_
+#define _IBM_LANA_INCLUDE_
+
+#ifdef _IBM_LANA_DRIVER_
+
+/* maximum packet size */
+
+#define PKTSIZE 1524
+
+/* number of transmit buffers */
+
+#define TXBUFCNT 4
+
+/* Adapter ID's */
+#define IBM_LANA_ID 0xffe0
+
+/* media enumeration - defined in a way that it fits onto the LAN/A's
+ POS registers... */
+
+typedef enum {
+ Media_10BaseT, Media_10Base5,
+ Media_Unknown, Media_10Base2, Media_Count
+} ibmlana_medium;
+
+/* private structure */
+
+typedef struct {
+ unsigned int slot; /* MCA-Slot-# */
+ struct net_device_stats stat; /* packet statistics */
+ int realirq; /* memorizes actual IRQ, even when
+ currently not allocated */
+ ibmlana_medium medium; /* physical cannector */
+ u32 tdastart, txbufstart, /* addresses */
+ rrastart, rxbufstart, rdastart, rxbufcnt, txusedcnt;
+ int nextrxdescr, /* next rx descriptor to be used */
+ lastrxdescr, /* last free rx descriptor */
+ nexttxdescr, /* last tx descriptor to be used */
+ currtxdescr, /* tx descriptor currently tx'ed */
+ txused[TXBUFCNT]; /* busy flags */
+ void __iomem *base;
+ spinlock_t lock;
+} ibmlana_priv;
+
+/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
+ a full 64K I/O range... */
+
+#define IBM_LANA_IORANGE 0xa0
+
+/* Command Register: */
+
+#define SONIC_CMDREG 0x00
+#define CMDREG_HTX 0x0001 /* halt transmission */
+#define CMDREG_TXP 0x0002 /* start transmission */
+#define CMDREG_RXDIS 0x0004 /* disable receiver */
+#define CMDREG_RXEN 0x0008 /* enable receiver */
+#define CMDREG_STP 0x0010 /* stop timer */
+#define CMDREG_ST 0x0020 /* start timer */
+#define CMDREG_RST 0x0080 /* software reset */
+#define CMDREG_RRRA 0x0100 /* force SONIC to read first RRA */
+#define CMDREG_LCAM 0x0200 /* force SONIC to read CAM descrs */
+
+/* Data Configuration Register */
+
+#define SONIC_DCREG 0x02
+#define DCREG_EXBUS 0x8000 /* Extended Bus Mode */
+#define DCREG_LBR 0x2000 /* Latched Bus Retry */
+#define DCREG_PO1 0x1000 /* Programmable Outputs */
+#define DCREG_PO0 0x0800
+#define DCREG_SBUS 0x0400 /* Synchronous Bus Mode */
+#define DCREG_USR1 0x0200 /* User Definable Pins */
+#define DCREG_USR0 0x0100
+#define DCREG_WC0 0x0000 /* 0..3 Wait States */
+#define DCREG_WC1 0x0040
+#define DCREG_WC2 0x0080
+#define DCREG_WC3 0x00c0
+#define DCREG_DW16 0x0000 /* 16 bit Bus Mode */
+#define DCREG_DW32 0x0020 /* 32 bit Bus Mode */
+#define DCREG_BMS 0x0010 /* Block Mode Select */
+#define DCREG_RFT4 0x0000 /* 4/8/16/24 bytes RX Threshold */
+#define DCREG_RFT8 0x0004
+#define DCREG_RFT16 0x0008
+#define DCREG_RFT24 0x000c
+#define DCREG_TFT8 0x0000 /* 8/16/24/28 bytes TX Threshold */
+#define DCREG_TFT16 0x0001
+#define DCREG_TFT24 0x0002
+#define DCREG_TFT28 0x0003
+
+/* Receive Control Register */
+
+#define SONIC_RCREG 0x04
+#define RCREG_ERR 0x8000 /* accept damaged and collided pkts */
+#define RCREG_RNT 0x4000 /* accept packets that are < 64 */
+#define RCREG_BRD 0x2000 /* accept broadcasts */
+#define RCREG_PRO 0x1000 /* promiscous mode */
+#define RCREG_AMC 0x0800 /* accept all multicasts */
+#define RCREG_LB_NONE 0x0000 /* no loopback */
+#define RCREG_LB_MAC 0x0200 /* MAC loopback */
+#define RCREG_LB_ENDEC 0x0400 /* ENDEC loopback */
+#define RCREG_LB_XVR 0x0600 /* Transceiver loopback */
+#define RCREG_MC 0x0100 /* Multicast received */
+#define RCREG_BC 0x0080 /* Broadcast received */
+#define RCREG_LPKT 0x0040 /* last packet in RBA */
+#define RCREG_CRS 0x0020 /* carrier sense present */
+#define RCREG_COL 0x0010 /* recv'd packet with collision */
+#define RCREG_CRCR 0x0008 /* recv'd packet with CRC error */
+#define RCREG_FAER 0x0004 /* recv'd packet with inv. framing */
+#define RCREG_LBK 0x0002 /* recv'd loopback packet */
+#define RCREG_PRX 0x0001 /* recv'd packet is OK */
+
+/* Transmit Control Register */
+
+#define SONIC_TCREG 0x06
+#define TCREG_PINT 0x8000 /* generate interrupt after TDA read */
+#define TCREG_POWC 0x4000 /* timer start out of window detect */
+#define TCREG_CRCI 0x2000 /* inhibit CRC generation */
+#define TCREG_EXDIS 0x1000 /* disable excessive deferral timer */
+#define TCREG_EXD 0x0400 /* excessive deferral occurred */
+#define TCREG_DEF 0x0200 /* single deferral occurred */
+#define TCREG_NCRS 0x0100 /* no carrier detected */
+#define TCREG_CRSL 0x0080 /* carrier lost */
+#define TCREG_EXC 0x0040 /* excessive collisions occurred */
+#define TCREG_OWC 0x0020 /* out of window collision occurred */
+#define TCREG_PMB 0x0008 /* packet monitored bad */
+#define TCREG_FU 0x0004 /* FIFO underrun */
+#define TCREG_BCM 0x0002 /* byte count mismatch of fragments */
+#define TCREG_PTX 0x0001 /* packet transmitted OK */
+
+/* Interrupt Mask Register */
+
+#define SONIC_IMREG 0x08
+#define IMREG_BREN 0x4000 /* interrupt when bus retry occurred */
+#define IMREG_HBLEN 0x2000 /* interrupt when heartbeat lost */
+#define IMREG_LCDEN 0x1000 /* interrupt when CAM loaded */
+#define IMREG_PINTEN 0x0800 /* interrupt when PINT in TDA set */
+#define IMREG_PRXEN 0x0400 /* interrupt when packet received */
+#define IMREG_PTXEN 0x0200 /* interrupt when packet was sent */
+#define IMREG_TXEREN 0x0100 /* interrupt when send failed */
+#define IMREG_TCEN 0x0080 /* interrupt when timer completed */
+#define IMREG_RDEEN 0x0040 /* interrupt when RDA exhausted */
+#define IMREG_RBEEN 0x0020 /* interrupt when RBA exhausted */
+#define IMREG_RBAEEN 0x0010 /* interrupt when RBA too short */
+#define IMREG_CRCEN 0x0008 /* interrupt when CRC counter rolls */
+#define IMREG_FAEEN 0x0004 /* interrupt when FAE counter rolls */
+#define IMREG_MPEN 0x0002 /* interrupt when MP counter rolls */
+#define IMREG_RFOEN 0x0001 /* interrupt when Rx FIFO overflows */
+
+/* Interrupt Status Register */
+
+#define SONIC_ISREG 0x0a
+#define ISREG_BR 0x4000 /* bus retry occurred */
+#define ISREG_HBL 0x2000 /* heartbeat lost */
+#define ISREG_LCD 0x1000 /* CAM loaded */
+#define ISREG_PINT 0x0800 /* PINT in TDA set */
+#define ISREG_PKTRX 0x0400 /* packet received */
+#define ISREG_TXDN 0x0200 /* packet was sent */
+#define ISREG_TXER 0x0100 /* send failed */
+#define ISREG_TC 0x0080 /* timer completed */
+#define ISREG_RDE 0x0040 /* RDA exhausted */
+#define ISREG_RBE 0x0020 /* RBA exhausted */
+#define ISREG_RBAE 0x0010 /* RBA too short for received frame */
+#define ISREG_CRC 0x0008 /* CRC counter rolls over */
+#define ISREG_FAE 0x0004 /* FAE counter rolls over */
+#define ISREG_MP 0x0002 /* MP counter rolls over */
+#define ISREG_RFO 0x0001 /* Rx FIFO overflows */
+
+#define SONIC_UTDA 0x0c /* current transmit descr address */
+#define SONIC_CTDA 0x0e
+
+#define SONIC_URDA 0x1a /* current receive descr address */
+#define SONIC_CRDA 0x1c
+
+#define SONIC_CRBA0 0x1e /* current receive buffer address */
+#define SONIC_CRBA1 0x20
+
+#define SONIC_RBWC0 0x22 /* word count in receive buffer */
+#define SONIC_RBWC1 0x24
+
+#define SONIC_EOBC 0x26 /* minimum space to be free in RBA */
+
+#define SONIC_URRA 0x28 /* upper address of CDA & Recv Area */
+
+#define SONIC_RSA 0x2a /* start of receive resource area */
+
+#define SONIC_REA 0x2c /* end of receive resource area */
+
+#define SONIC_RRP 0x2e /* resource read pointer */
+
+#define SONIC_RWP 0x30 /* resource write pointer */
+
+#define SONIC_CAMEPTR 0x42 /* CAM entry pointer */
+
+#define SONIC_CAMADDR2 0x44 /* CAM address ports */
+#define SONIC_CAMADDR1 0x46
+#define SONIC_CAMADDR0 0x48
+
+#define SONIC_CAMPTR 0x4c /* lower address of CDA */
+
+#define SONIC_CAMCNT 0x4e /* # of CAM descriptors to load */
+
+/* Data Configuration Register 2 */
+
+#define SONIC_DCREG2 0x7e
+#define DCREG2_EXPO3 0x8000 /* extended programmable outputs */
+#define DCREG2_EXPO2 0x4000
+#define DCREG2_EXPO1 0x2000
+#define DCREG2_EXPO0 0x1000
+#define DCREG2_HD 0x0800 /* heartbeat disable */
+#define DCREG2_JD 0x0200 /* jabber timer disable */
+#define DCREG2_AUTO 0x0100 /* enable AUI/TP auto selection */
+#define DCREG2_XWRAP 0x0040 /* TP transceiver loopback */
+#define DCREG2_PH 0x0010 /* HOLD request timing */
+#define DCREG2_PCM 0x0004 /* packet compress when matched */
+#define DCREG2_PCNM 0x0002 /* packet compress when not matched */
+#define DCREG2_RJCM 0x0001 /* inverse packet match via CAM */
+
+/* Board Control Register: Enable RAM, Interrupts... */
+
+#define BCMREG 0x80
+#define BCMREG_RAMEN 0x80 /* switch over to RAM */
+#define BCMREG_IPEND 0x40 /* interrupt pending ? */
+#define BCMREG_RESET 0x08 /* reset board */
+#define BCMREG_16BIT 0x04 /* adapter in 16-bit slot */
+#define BCMREG_RAMWIN 0x02 /* enable RAM window */
+#define BCMREG_IEN 0x01 /* interrupt enable */
+
+/* MAC Address PROM */
+
+#define MACADDRPROM 0x92
+
+/* structure of a CAM entry */
+
+typedef struct {
+ u32 index; /* pointer into CAM area */
+ u32 addr0; /* address part (bits 0..15 used) */
+ u32 addr1;
+ u32 addr2;
+} camentry_t;
+
+/* structure of a receive resource */
+
+typedef struct {
+ u32 startlo; /* start address (bits 0..15 used) */
+ u32 starthi;
+ u32 cntlo; /* size in 16-bit quantities */
+ u32 cnthi;
+} rra_t;
+
+/* structure of a receive descriptor */
+
+typedef struct {
+ u32 status; /* packet status */
+ u32 length; /* length in bytes */
+ u32 startlo; /* start address */
+ u32 starthi;
+ u32 seqno; /* frame sequence */
+ u32 link; /* pointer to next descriptor */
+ /* bit 0 = EOL */
+ u32 inuse; /* !=0 --> free for SONIC to write */
+} rda_t;
+
+/* structure of a transmit descriptor */
+
+typedef struct {
+ u32 status; /* transmit status */
+ u32 config; /* value for TCR */
+ u32 length; /* total length */
+ u32 fragcount; /* number of fragments */
+ u32 startlo; /* start address of fragment */
+ u32 starthi;
+ u32 fraglength; /* length of this fragment */
+ /* more address/length triplets may */
+ /* follow here */
+ u32 link; /* pointer to next descriptor */
+ /* bit 0 = EOL */
+} tda_t;
+
+#endif /* _IBM_LANA_DRIVER_ */
+
+#endif /* _IBM_LANA_INCLUDE_ */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
new file mode 100644
index 000000000000..c39b0609742a
--- /dev/null
+++ b/drivers/net/ibmveth.c
@@ -0,0 +1,1175 @@
+/**************************************************************************/
+/* */
+/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
+/* Copyright (C) 2003 IBM Corp. */
+/* Originally written by Dave Larson (larson1@us.ibm.com) */
+/* Maintained by Santiago Leon (santil@us.ibm.com) */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the Free Software */
+/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
+/* USA */
+/* */
+/* This module contains the implementation of a virtual ethernet device */
+/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
+/* option of the RS/6000 Platform Architechture to interface with virtual */
+/* ethernet NICs that are presented to the partition by the hypervisor. */
+/* */
+/**************************************************************************/
+/*
+ TODO:
+ - remove frag processing code - no longer needed
+ - add support for sysfs
+ - possibly remove procfs support
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/ethtool.h>
+#include <linux/proc_fs.h>
+#include <asm/semaphore.h>
+#include <asm/hvcall.h>
+#include <asm/atomic.h>
+#include <asm/iommu.h>
+#include <asm/vio.h>
+#include <asm/uaccess.h>
+#include <linux/seq_file.h>
+
+#include "ibmveth.h"
+
+#define DEBUG 1
+
+#define ibmveth_printk(fmt, args...) \
+ printk(KERN_INFO "%s: " fmt, __FILE__, ## args)
+
+#define ibmveth_error_printk(fmt, args...) \
+ printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
+
+#ifdef DEBUG
+#define ibmveth_debug_printk_no_adapter(fmt, args...) \
+ printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
+#define ibmveth_debug_printk(fmt, args...) \
+ printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
+#define ibmveth_assert(expr) \
+ if(!(expr)) { \
+ printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
+ BUG(); \
+ }
+#else
+#define ibmveth_debug_printk_no_adapter(fmt, args...)
+#define ibmveth_debug_printk(fmt, args...)
+#define ibmveth_assert(expr)
+#endif
+
+static int ibmveth_open(struct net_device *dev);
+static int ibmveth_close(struct net_device *dev);
+static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int ibmveth_poll(struct net_device *dev, int *budget);
+static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *ibmveth_get_stats(struct net_device *dev);
+static void ibmveth_set_multicast_list(struct net_device *dev);
+static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
+static void ibmveth_proc_register_driver(void);
+static void ibmveth_proc_unregister_driver(void);
+static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
+static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
+
+#ifdef CONFIG_PROC_FS
+#define IBMVETH_PROC_DIR "ibmveth"
+static struct proc_dir_entry *ibmveth_proc_dir;
+#endif
+
+static const char ibmveth_driver_name[] = "ibmveth";
+static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
+#define ibmveth_driver_version "1.03"
+
+MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
+MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ibmveth_driver_version);
+
+/* simple methods of getting data from the current rxq entry */
+static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
+{
+ return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle);
+}
+
+static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
+{
+ return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid);
+}
+
+static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
+{
+ return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset);
+}
+
+static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
+{
+ return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
+}
+
+/* setup the initial settings for a buffer pool */
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size)
+{
+ pool->size = pool_size;
+ pool->index = pool_index;
+ pool->buff_size = buff_size;
+ pool->threshold = pool_size / 2;
+}
+
+/* allocate and setup an buffer pool - called during open */
+static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
+
+ if(!pool->free_map) {
+ return -1;
+ }
+
+ pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
+ if(!pool->dma_addr) {
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
+
+ if(!pool->skbuff) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ memset(pool->skbuff, 0, sizeof(void*) * pool->size);
+ memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
+
+ for(i = 0; i < pool->size; ++i) {
+ pool->free_map[i] = i;
+ }
+
+ atomic_set(&pool->available, 0);
+ pool->producer_index = 0;
+ pool->consumer_index = 0;
+
+ return 0;
+}
+
+/* replenish the buffers for a pool. note that we don't need to
+ * skb_reserve these since they are used for incoming...
+ */
+static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
+{
+ u32 i;
+ u32 count = pool->size - atomic_read(&pool->available);
+ u32 buffers_added = 0;
+
+ mb();
+
+ for(i = 0; i < count; ++i) {
+ struct sk_buff *skb;
+ unsigned int free_index, index;
+ u64 correlator;
+ union ibmveth_buf_desc desc;
+ unsigned long lpar_rc;
+ dma_addr_t dma_addr;
+
+ skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
+
+ if(!skb) {
+ ibmveth_debug_printk("replenish: unable to allocate skb\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
+
+ free_index = pool->consumer_index++ % pool->size;
+ index = pool->free_map[free_index];
+
+ ibmveth_assert(index != IBM_VETH_INVALID_MAP);
+ ibmveth_assert(pool->skbuff[index] == NULL);
+
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ pool->buff_size, DMA_FROM_DEVICE);
+
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+
+ correlator = ((u64)pool->index << 32) | index;
+ *(u64*)skb->data = correlator;
+
+ desc.desc = 0;
+ desc.fields.valid = 1;
+ desc.fields.length = pool->buff_size;
+ desc.fields.address = dma_addr;
+
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+
+ if(lpar_rc != H_Success) {
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->skbuff[index] = NULL;
+ pool->consumer_index--;
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+ break;
+ } else {
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
+ }
+ }
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
+}
+
+/* check if replenishing is needed. */
+static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
+{
+ return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
+ (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
+ (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
+}
+
+/* kick the replenish tasklet if we need replenishing and it isn't already running */
+static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
+{
+ if(ibmveth_is_replenishing_needed(adapter) &&
+ (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
+ schedule_work(&adapter->replenish_task);
+ }
+}
+
+/* replenish tasklet routine */
+static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+{
+ adapter->replenish_task_cycles++;
+
+ ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
+ ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
+ ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+
+ adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
+
+ atomic_inc(&adapter->not_replenishing);
+
+ ibmveth_schedule_replenishing(adapter);
+}
+
+/* empty and free ana buffer pool - also used to do cleanup in error paths */
+static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ if(pool->free_map) {
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ }
+
+ if(pool->skbuff && pool->dma_addr) {
+ for(i = 0; i < pool->size; ++i) {
+ struct sk_buff *skb = pool->skbuff[i];
+ if(skb) {
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[i],
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ pool->skbuff[i] = NULL;
+ }
+ }
+ }
+
+ if(pool->dma_addr) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+ }
+
+ if(pool->skbuff) {
+ kfree(pool->skbuff);
+ pool->skbuff = NULL;
+ }
+}
+
+/* remove a buffer from a pool */
+static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
+{
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ unsigned int free_index;
+ struct sk_buff *skb;
+
+ ibmveth_assert(pool < IbmVethNumBufferPools);
+ ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+
+ skb = adapter->rx_buff_pool[pool].skbuff[index];
+
+ ibmveth_assert(skb != NULL);
+
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+
+ free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
+ adapter->rx_buff_pool[pool].free_map[free_index] = index;
+
+ mb();
+
+ atomic_dec(&(adapter->rx_buff_pool[pool].available));
+}
+
+/* get the current buffer on the rx queue */
+static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
+{
+ u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+
+ ibmveth_assert(pool < IbmVethNumBufferPools);
+ ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+
+ return adapter->rx_buff_pool[pool].skbuff[index];
+}
+
+/* recycle the current buffer on the rx queue */
+static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+{
+ u32 q_index = adapter->rx_queue.index;
+ u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ union ibmveth_buf_desc desc;
+ unsigned long lpar_rc;
+
+ ibmveth_assert(pool < IbmVethNumBufferPools);
+ ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+
+ desc.desc = 0;
+ desc.fields.valid = 1;
+ desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
+ desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
+
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+
+ if(lpar_rc != H_Success) {
+ ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+ }
+
+ if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
+}
+
+static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
+{
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+
+ if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
+}
+
+static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
+{
+ if(adapter->buffer_list_addr != NULL) {
+ if(!dma_mapping_error(adapter->buffer_list_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->buffer_list_addr);
+ adapter->buffer_list_addr = NULL;
+ }
+
+ if(adapter->filter_list_addr != NULL) {
+ if(!dma_mapping_error(adapter->filter_list_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->filter_list_addr);
+ adapter->filter_list_addr = NULL;
+ }
+
+ if(adapter->rx_queue.queue_addr != NULL) {
+ if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_queue.queue_dma,
+ adapter->rx_queue.queue_len,
+ DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->rx_queue.queue_addr);
+ adapter->rx_queue.queue_addr = NULL;
+ }
+
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+}
+
+static int ibmveth_open(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev->priv;
+ u64 mac_address = 0;
+ int rxq_entries;
+ unsigned long lpar_rc;
+ int rc;
+ union ibmveth_buf_desc rxq_desc;
+
+ ibmveth_debug_printk("open starting\n");
+
+ rxq_entries =
+ adapter->rx_buff_pool[0].size +
+ adapter->rx_buff_pool[1].size +
+ adapter->rx_buff_pool[2].size + 1;
+
+ adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+
+ if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
+ ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
+ ibmveth_cleanup(adapter);
+ return -ENOMEM;
+ }
+
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
+ adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
+
+ if(!adapter->rx_queue.queue_addr) {
+ ibmveth_error_printk("unable to allocate rx queue pages\n");
+ ibmveth_cleanup(adapter);
+ return -ENOMEM;
+ }
+
+ adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+
+ if((dma_mapping_error(adapter->buffer_list_dma) ) ||
+ (dma_mapping_error(adapter->filter_list_dma)) ||
+ (dma_mapping_error(adapter->rx_queue.queue_dma))) {
+ ibmveth_error_printk("unable to map filter or buffer list pages\n");
+ ibmveth_cleanup(adapter);
+ return -ENOMEM;
+ }
+
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.num_slots = rxq_entries;
+ adapter->rx_queue.toggle = 1;
+
+ if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) ||
+ ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) ||
+ ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
+ {
+ ibmveth_error_printk("unable to allocate buffer pools\n");
+ ibmveth_cleanup(adapter);
+ return -ENOMEM;
+ }
+
+ memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
+ mac_address = mac_address >> 16;
+
+ rxq_desc.desc = 0;
+ rxq_desc.fields.valid = 1;
+ rxq_desc.fields.length = adapter->rx_queue.queue_len;
+ rxq_desc.fields.address = adapter->rx_queue.queue_dma;
+
+ ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
+ ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
+ ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
+
+
+ lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
+ adapter->buffer_list_dma,
+ rxq_desc.desc,
+ adapter->filter_list_dma,
+ mac_address);
+
+ if(lpar_rc != H_Success) {
+ ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
+ ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n",
+ adapter->buffer_list_dma,
+ adapter->filter_list_dma,
+ rxq_desc.desc,
+ mac_address);
+ ibmveth_cleanup(adapter);
+ return -ENONET;
+ }
+
+ ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
+ if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
+ ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
+ do {
+ rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_isLongBusy(rc) || (rc == H_Busy));
+
+ ibmveth_cleanup(adapter);
+ return rc;
+ }
+
+ netif_start_queue(netdev);
+
+ ibmveth_debug_printk("scheduling initial replenish cycle\n");
+ ibmveth_schedule_replenishing(adapter);
+
+ ibmveth_debug_printk("open complete\n");
+
+ return 0;
+}
+
+static int ibmveth_close(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev->priv;
+ long lpar_rc;
+
+ ibmveth_debug_printk("close starting\n");
+
+ netif_stop_queue(netdev);
+
+ free_irq(netdev->irq, netdev);
+
+ cancel_delayed_work(&adapter->replenish_task);
+ flush_scheduled_work();
+
+ do {
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
+
+ if(lpar_rc != H_Success)
+ {
+ ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
+ lpar_rc);
+ }
+
+ adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
+
+ ibmveth_cleanup(adapter);
+
+ ibmveth_debug_printk("close complete\n");
+
+ return 0;
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
+ cmd->speed = SPEED_1000;
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 1;
+ return 0;
+}
+
+static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
+ strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
+ strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
+}
+
+static u32 netdev_get_link(struct net_device *dev) {
+ return 1;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .get_link = netdev_get_link,
+ .get_sg = ethtool_op_get_sg,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+};
+
+static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
+
+static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev->priv;
+ union ibmveth_buf_desc desc[IbmVethMaxSendFrags];
+ unsigned long lpar_rc;
+ int nfrags = 0, curfrag;
+ unsigned long correlator;
+ unsigned int retry_count;
+
+ if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
+ adapter->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ memset(&desc, 0, sizeof(desc));
+
+ /* nfrags = number of frags after the initial fragment */
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ if(nfrags)
+ adapter->tx_multidesc_send++;
+
+ /* map the initial fragment */
+ desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len;
+ desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
+ desc[0].fields.length, DMA_TO_DEVICE);
+ desc[0].fields.valid = 1;
+
+ if(dma_mapping_error(desc[0].fields.address)) {
+ ibmveth_error_printk("tx: unable to map initial fragment\n");
+ adapter->tx_map_failed++;
+ adapter->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ curfrag = nfrags;
+
+ /* map fragments past the initial portion if there are any */
+ while(curfrag--) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
+ desc[curfrag+1].fields.address
+ = dma_map_single(&adapter->vdev->dev,
+ page_address(frag->page) + frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ desc[curfrag+1].fields.length = frag->size;
+ desc[curfrag+1].fields.valid = 1;
+
+ if(dma_mapping_error(desc[curfrag+1].fields.address)) {
+ ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
+ adapter->tx_map_failed++;
+ adapter->stats.tx_dropped++;
+ /* Free all the mappings we just created */
+ while(curfrag < nfrags) {
+ dma_unmap_single(&adapter->vdev->dev,
+ desc[curfrag+1].fields.address,
+ desc[curfrag+1].fields.length,
+ DMA_TO_DEVICE);
+ curfrag++;
+ }
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ /* send the frame. Arbitrarily set retrycount to 1024 */
+ correlator = 0;
+ retry_count = 1024;
+ do {
+ lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
+ desc[0].desc,
+ desc[1].desc,
+ desc[2].desc,
+ desc[3].desc,
+ desc[4].desc,
+ desc[5].desc,
+ correlator);
+ } while ((lpar_rc == H_Busy) && (retry_count--));
+
+ if(lpar_rc != H_Success && lpar_rc != H_Dropped) {
+ int i;
+ ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
+ for(i = 0; i < 6; i++) {
+ ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
+ desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
+ }
+ adapter->tx_send_failed++;
+ adapter->stats.tx_dropped++;
+ } else {
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ }
+
+ do {
+ dma_unmap_single(&adapter->vdev->dev,
+ desc[nfrags].fields.address,
+ desc[nfrags].fields.length, DMA_TO_DEVICE);
+ } while(--nfrags >= 0);
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int ibmveth_poll(struct net_device *netdev, int *budget)
+{
+ struct ibmveth_adapter *adapter = netdev->priv;
+ int max_frames_to_process = netdev->quota;
+ int frames_processed = 0;
+ int more_work = 1;
+ unsigned long lpar_rc;
+
+ restart_poll:
+ do {
+ struct net_device *netdev = adapter->netdev;
+
+ if(ibmveth_rxq_pending_buffer(adapter)) {
+ struct sk_buff *skb;
+
+ rmb();
+
+ if(!ibmveth_rxq_buffer_valid(adapter)) {
+ wmb(); /* suggested by larson1 */
+ adapter->rx_invalid_buffer++;
+ ibmveth_debug_printk("recycling invalid buffer\n");
+ ibmveth_rxq_recycle_buffer(adapter);
+ } else {
+ int length = ibmveth_rxq_frame_length(adapter);
+ int offset = ibmveth_rxq_frame_offset(adapter);
+ skb = ibmveth_rxq_get_buffer(adapter);
+
+ ibmveth_rxq_harvest_buffer(adapter);
+
+ skb_reserve(skb, offset);
+ skb_put(skb, length);
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netif_receive_skb(skb); /* send it up */
+
+ adapter->stats.rx_packets++;
+ adapter->stats.rx_bytes += length;
+ frames_processed++;
+ }
+ } else {
+ more_work = 0;
+ }
+ } while(more_work && (frames_processed < max_frames_to_process));
+
+ ibmveth_schedule_replenishing(adapter);
+
+ if(more_work) {
+ /* more work to do - return that we are not done yet */
+ netdev->quota -= frames_processed;
+ *budget -= frames_processed;
+ return 1;
+ }
+
+ /* we think we are done - reenable interrupts, then check once more to make sure we are done */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
+
+ ibmveth_assert(lpar_rc == H_Success);
+
+ netif_rx_complete(netdev);
+
+ if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
+ {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+ ibmveth_assert(lpar_rc == H_Success);
+ more_work = 1;
+ goto restart_poll;
+ }
+
+ netdev->quota -= frames_processed;
+ *budget -= frames_processed;
+
+ /* we really are done */
+ return 0;
+}
+
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *netdev = dev_instance;
+ struct ibmveth_adapter *adapter = netdev->priv;
+ unsigned long lpar_rc;
+
+ if(netif_rx_schedule_prep(netdev)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+ ibmveth_assert(lpar_rc == H_Success);
+ __netif_rx_schedule(netdev);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct net_device_stats *ibmveth_get_stats(struct net_device *dev)
+{
+ struct ibmveth_adapter *adapter = dev->priv;
+ return &adapter->stats;
+}
+
+static void ibmveth_set_multicast_list(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev->priv;
+ unsigned long lpar_rc;
+
+ if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering,
+ 0);
+ if(lpar_rc != H_Success) {
+ ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
+ }
+ } else {
+ struct dev_mc_list *mclist = netdev->mc_list;
+ int i;
+ /* clear the filter table & disable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering |
+ IbmVethMcastClearFilterTable,
+ 0);
+ if(lpar_rc != H_Success) {
+ ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
+ }
+ /* add the addresses to the filter table */
+ for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
+ // add the multicast address to the filter table
+ unsigned long mcast_addr = 0;
+ memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastAddFilter,
+ mcast_addr);
+ if(lpar_rc != H_Success) {
+ ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
+ }
+ }
+
+ /* re-enable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableFiltering,
+ 0);
+ if(lpar_rc != H_Success) {
+ ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
+ }
+ }
+}
+
+static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > (1<<20)))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+{
+ int rc;
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+
+ unsigned char *mac_addr_p;
+ unsigned int *mcastFilterSize_p;
+
+
+ ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
+ if(!mac_addr_p) {
+ printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
+ "attribute\n", __FILE__, __LINE__);
+ return 0;
+ }
+
+ mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
+ if(!mcastFilterSize_p) {
+ printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
+ "VETH_MCAST_FILTER_SIZE attribute\n",
+ __FILE__, __LINE__);
+ return 0;
+ }
+
+ netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
+
+ if(!netdev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(netdev);
+
+ adapter = netdev->priv;
+ memset(adapter, 0, sizeof(adapter));
+ dev->dev.driver_data = netdev;
+
+ adapter->vdev = dev;
+ adapter->netdev = netdev;
+ adapter->mcastFilterSize= *mcastFilterSize_p;
+
+ /* Some older boxes running PHYP non-natively have an OF that
+ returns a 8-byte local-mac-address field (and the first
+ 2 bytes have to be ignored) while newer boxes' OF return
+ a 6-byte field. Note that IEEE 1275 specifies that
+ local-mac-address must be a 6-byte field.
+ The RPA doc specifies that the first byte must be 10b, so
+ we'll just look for it to solve this 8 vs. 6 byte field issue */
+
+ if ((*mac_addr_p & 0x3) != 0x02)
+ mac_addr_p += 2;
+
+ adapter->mac_addr = 0;
+ memcpy(&adapter->mac_addr, mac_addr_p, 6);
+
+ adapter->liobn = dev->iommu_table->it_index;
+
+ netdev->irq = dev->irq;
+ netdev->open = ibmveth_open;
+ netdev->poll = ibmveth_poll;
+ netdev->weight = 16;
+ netdev->stop = ibmveth_close;
+ netdev->hard_start_xmit = ibmveth_start_xmit;
+ netdev->get_stats = ibmveth_get_stats;
+ netdev->set_multicast_list = ibmveth_set_multicast_list;
+ netdev->do_ioctl = ibmveth_ioctl;
+ netdev->ethtool_ops = &netdev_ethtool_ops;
+ netdev->change_mtu = ibmveth_change_mtu;
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
+ memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize);
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize);
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize);
+
+ ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
+
+ INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
+
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+
+ atomic_set(&adapter->not_replenishing, 1);
+
+ ibmveth_debug_printk("registering netdev...\n");
+
+ rc = register_netdev(netdev);
+
+ if(rc) {
+ ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
+ free_netdev(netdev);
+ return rc;
+ }
+
+ ibmveth_debug_printk("registered\n");
+
+ ibmveth_proc_register_adapter(adapter);
+
+ return 0;
+}
+
+static int __devexit ibmveth_remove(struct vio_dev *dev)
+{
+ struct net_device *netdev = dev->dev.driver_data;
+ struct ibmveth_adapter *adapter = netdev->priv;
+
+ unregister_netdev(netdev);
+
+ ibmveth_proc_unregister_adapter(adapter);
+
+ free_netdev(netdev);
+ return 0;
+}
+
+#ifdef CONFIG_PROC_FS
+static void ibmveth_proc_register_driver(void)
+{
+ ibmveth_proc_dir = create_proc_entry(IBMVETH_PROC_DIR, S_IFDIR, proc_net);
+ if (ibmveth_proc_dir) {
+ SET_MODULE_OWNER(ibmveth_proc_dir);
+ }
+}
+
+static void ibmveth_proc_unregister_driver(void)
+{
+ remove_proc_entry(IBMVETH_PROC_DIR, proc_net);
+}
+
+static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ if (*pos == 0) {
+ return (void *)1;
+ } else {
+ return NULL;
+ }
+}
+
+static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void ibmveth_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int ibmveth_seq_show(struct seq_file *seq, void *v)
+{
+ struct ibmveth_adapter *adapter = seq->private;
+ char *current_mac = ((char*) &adapter->netdev->dev_addr);
+ char *firmware_mac = ((char*) &adapter->mac_addr) ;
+
+ seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
+
+ seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
+ seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
+ seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ current_mac[0], current_mac[1], current_mac[2],
+ current_mac[3], current_mac[4], current_mac[5]);
+ seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ firmware_mac[0], firmware_mac[1], firmware_mac[2],
+ firmware_mac[3], firmware_mac[4], firmware_mac[5]);
+
+ seq_printf(seq, "\nAdapter Statistics:\n");
+ seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized);
+ seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send);
+ seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed);
+ seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed);
+ seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
+ seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
+ seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
+ seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
+ seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
+ seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
+
+ return 0;
+}
+static struct seq_operations ibmveth_seq_ops = {
+ .start = ibmveth_seq_start,
+ .next = ibmveth_seq_next,
+ .stop = ibmveth_seq_stop,
+ .show = ibmveth_seq_show,
+};
+
+static int ibmveth_proc_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ struct proc_dir_entry *proc;
+ int rc;
+
+ rc = seq_open(file, &ibmveth_seq_ops);
+ if (!rc) {
+ /* recover the pointer buried in proc_dir_entry data */
+ seq = file->private_data;
+ proc = PDE(inode);
+ seq->private = proc->data;
+ }
+ return rc;
+}
+
+static struct file_operations ibmveth_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = ibmveth_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
+{
+ struct proc_dir_entry *entry;
+ if (ibmveth_proc_dir) {
+ entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir);
+ if (!entry) {
+ ibmveth_error_printk("Cannot create adapter proc entry");
+ } else {
+ entry->data = (void *) adapter;
+ entry->proc_fops = &ibmveth_proc_fops;
+ SET_MODULE_OWNER(entry);
+ }
+ }
+ return;
+}
+
+static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
+{
+ if (ibmveth_proc_dir) {
+ remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir);
+ }
+}
+
+#else /* CONFIG_PROC_FS */
+static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
+{
+}
+
+static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
+{
+}
+static void ibmveth_proc_register_driver(void)
+{
+}
+
+static void ibmveth_proc_unregister_driver(void)
+{
+}
+#endif /* CONFIG_PROC_FS */
+
+static struct vio_device_id ibmveth_device_table[] __devinitdata= {
+ { "network", "IBM,l-lan"},
+ { 0,}
+};
+
+MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+
+static struct vio_driver ibmveth_driver = {
+ .name = (char *)ibmveth_driver_name,
+ .id_table = ibmveth_device_table,
+ .probe = ibmveth_probe,
+ .remove = ibmveth_remove
+};
+
+static int __init ibmveth_module_init(void)
+{
+ ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
+
+ ibmveth_proc_register_driver();
+
+ return vio_register_driver(&ibmveth_driver);
+}
+
+static void __exit ibmveth_module_exit(void)
+{
+ vio_unregister_driver(&ibmveth_driver);
+ ibmveth_proc_unregister_driver();
+}
+
+module_init(ibmveth_module_init);
+module_exit(ibmveth_module_exit);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
new file mode 100644
index 000000000000..51a470da9686
--- /dev/null
+++ b/drivers/net/ibmveth.h
@@ -0,0 +1,158 @@
+/**************************************************************************/
+/* */
+/* IBM eServer i/[Series Virtual Ethernet Device Driver */
+/* Copyright (C) 2003 IBM Corp. */
+/* Dave Larson (larson1@us.ibm.com) */
+/* Santiago Leon (santil@us.ibm.com) */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the Free Software */
+/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
+/* USA */
+/* */
+/**************************************************************************/
+
+#ifndef _IBMVETH_H
+#define _IBMVETH_H
+
+#define IbmVethMaxSendFrags 6
+
+/* constants for H_MULTICAST_CTRL */
+#define IbmVethMcastReceptionModifyBit 0x80000UL
+#define IbmVethMcastReceptionEnableBit 0x20000UL
+#define IbmVethMcastFilterModifyBit 0x40000UL
+#define IbmVethMcastFilterEnableBit 0x10000UL
+
+#define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
+#define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
+#define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
+#define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
+#define IbmVethMcastAddFilter 0x1UL
+#define IbmVethMcastRemoveFilter 0x2UL
+#define IbmVethMcastClearFilterTable 0x3UL
+
+/* hcall numbers */
+#define H_VIO_SIGNAL 0x104
+#define H_REGISTER_LOGICAL_LAN 0x114
+#define H_FREE_LOGICAL_LAN 0x118
+#define H_ADD_LOGICAL_LAN_BUFFER 0x11C
+#define H_SEND_LOGICAL_LAN 0x120
+#define H_MULTICAST_CTRL 0x130
+#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
+
+/* hcall macros */
+#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
+ plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
+
+#define h_free_logical_lan(ua) \
+ plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
+
+#define h_add_logical_lan_buffer(ua, buf) \
+ plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+
+#define h_send_logical_lan(ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator) \
+ plpar_hcall_8arg_2ret(H_SEND_LOGICAL_LAN, ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator, &correlator)
+
+#define h_multicast_ctrl(ua, cmd, mac) \
+ plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
+
+#define h_change_logical_lan_mac(ua, mac) \
+ plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
+
+#define IbmVethNumBufferPools 3
+#define IbmVethPool0DftSize (1024 * 2)
+#define IbmVethPool1DftSize (1024 * 4)
+#define IbmVethPool2DftSize (1024 * 10)
+#define IbmVethPool0DftCnt 256
+#define IbmVethPool1DftCnt 256
+#define IbmVethPool2DftCnt 256
+
+#define IBM_VETH_INVALID_MAP ((u16)0xffff)
+
+struct ibmveth_buff_pool {
+ u32 size;
+ u32 index;
+ u32 buff_size;
+ u32 threshold;
+ atomic_t available;
+ u32 consumer_index;
+ u32 producer_index;
+ u16 *free_map;
+ dma_addr_t *dma_addr;
+ struct sk_buff **skbuff;
+};
+
+struct ibmveth_rx_q {
+ u64 index;
+ u64 num_slots;
+ u64 toggle;
+ dma_addr_t queue_dma;
+ u32 queue_len;
+ struct ibmveth_rx_q_entry *queue_addr;
+};
+
+struct ibmveth_adapter {
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct net_device_stats stats;
+ unsigned int mcastFilterSize;
+ unsigned long mac_addr;
+ unsigned long liobn;
+ void * buffer_list_addr;
+ void * filter_list_addr;
+ dma_addr_t buffer_list_dma;
+ dma_addr_t filter_list_dma;
+ struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
+ struct ibmveth_rx_q rx_queue;
+ atomic_t not_replenishing;
+
+ /* helper tasks */
+ struct work_struct replenish_task;
+
+ /* adapter specific stats */
+ u64 replenish_task_cycles;
+ u64 replenish_no_mem;
+ u64 replenish_add_buff_failure;
+ u64 replenish_add_buff_success;
+ u64 rx_invalid_buffer;
+ u64 rx_no_buffer;
+ u64 tx_multidesc_send;
+ u64 tx_linearized;
+ u64 tx_linearize_failed;
+ u64 tx_map_failed;
+ u64 tx_send_failed;
+};
+
+struct ibmveth_buf_desc_fields {
+ u32 valid : 1;
+ u32 toggle : 1;
+ u32 reserved : 6;
+ u32 length : 24;
+ u32 address;
+};
+
+union ibmveth_buf_desc {
+ u64 desc;
+ struct ibmveth_buf_desc_fields fields;
+};
+
+struct ibmveth_rx_q_entry {
+ u16 toggle : 1;
+ u16 valid : 1;
+ u16 reserved : 14;
+ u16 offset;
+ u32 length;
+ u64 correlator;
+};
+
+#endif /* _IBMVETH_H */
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
new file mode 100644
index 000000000000..d520b5920d6c
--- /dev/null
+++ b/drivers/net/ioc3-eth.c
@@ -0,0 +1,1653 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
+ *
+ * Copyright (C) 1999, 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
+ *
+ * References:
+ * o IOC3 ASIC specification 4.51, 1996-04-18
+ * o IEEE 802.3 specification, 2000 edition
+ * o DP38840A Specification, National Semiconductor, March 1997
+ *
+ * To do:
+ *
+ * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
+ * o Handle allocation failures in ioc3_init_rings().
+ * o Use prefetching for large packets. What is a good lower limit for
+ * prefetching?
+ * o We're probably allocating a bit too much memory.
+ * o Use hardware checksums.
+ * o Convert to using a IOC3 meta driver.
+ * o Which PHYs might possibly be attached to the IOC3 in real live,
+ * which workarounds are required for them? Do we ever have Lucent's?
+ * o For the 2.5 branch kill the mii-tool ioctls.
+ */
+
+#define IOC3_NAME "ioc3-eth"
+#define IOC3_VERSION "2.6.3-3"
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+#ifdef CONFIG_SERIAL_8250
+#include <linux/serial.h>
+#include <asm/serial.h>
+#define IOC3_BAUD (22000000 / (3*16))
+#define IOC3_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+#endif
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+
+#include <asm/byteorder.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sn0/addrs.h>
+#include <asm/sn/sn0/hubni.h>
+#include <asm/sn/sn0/hubio.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+#include <asm/sn/sn0/ip27.h>
+#include <asm/pci/bridge.h>
+
+/*
+ * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
+ * value must be a power of two.
+ */
+#define RX_BUFFS 64
+
+#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
+#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
+
+/* Private per NIC data of the driver. */
+struct ioc3_private {
+ struct ioc3 *regs;
+ unsigned long *rxr; /* pointer to receiver ring */
+ struct ioc3_etxd *txr;
+ struct sk_buff *rx_skbs[512];
+ struct sk_buff *tx_skbs[128];
+ struct net_device_stats stats;
+ int rx_ci; /* RX consumer index */
+ int rx_pi; /* RX producer index */
+ int tx_ci; /* TX consumer index */
+ int tx_pi; /* TX producer index */
+ int txqlen;
+ u32 emcr, ehar_h, ehar_l;
+ spinlock_t ioc3_lock;
+ struct mii_if_info mii;
+ struct pci_dev *pdev;
+
+ /* Members used by autonegotiation */
+ struct timer_list ioc3_timer;
+};
+
+static inline struct net_device *priv_netdev(struct ioc3_private *dev)
+{
+ return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
+}
+
+static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ioc3_set_multicast_list(struct net_device *dev);
+static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void ioc3_timeout(struct net_device *dev);
+static inline unsigned int ioc3_hash(const unsigned char *addr);
+static inline void ioc3_stop(struct ioc3_private *ip);
+static void ioc3_init(struct net_device *dev);
+
+static const char ioc3_str[] = "IOC3 Ethernet";
+static struct ethtool_ops ioc3_ethtool_ops;
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+
+#define IOC3_CACHELINE 128UL
+
+static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
+{
+ return (~addr + 1) & (IOC3_CACHELINE - 1UL);
+}
+
+static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
+ unsigned int gfp_mask)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
+ if (likely(skb)) {
+ int offset = aligned_rx_skb_addr((unsigned long) skb->data);
+ if (offset)
+ skb_reserve(skb, offset);
+ }
+
+ return skb;
+}
+
+static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
+{
+#ifdef CONFIG_SGI_IP27
+ vdev <<= 58; /* Shift to PCI64_ATTR_VIRTUAL */
+
+ return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
+ ((unsigned long)ptr & TO_PHYS_MASK);
+#else
+ return virt_to_bus(ptr);
+#endif
+}
+
+/* BEWARE: The IOC3 documentation documents the size of rx buffers as
+ 1644 while it's actually 1664. This one was nasty to track down ... */
+#define RX_OFFSET 10
+#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
+
+/* DMA barrier to separate cached and uncached accesses. */
+#define BARRIER() \
+ __asm__("sync" ::: "memory")
+
+
+#define IOC3_SIZE 0x100000
+
+/*
+ * IOC3 is a big endian device
+ *
+ * Unorthodox but makes the users of these macros more readable - the pointer
+ * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3
+ * in the environment.
+ */
+#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
+#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
+#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
+#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
+#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
+#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
+#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
+#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
+#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
+#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
+#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
+#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
+#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
+#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
+#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
+#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
+#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
+#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
+#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
+#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
+#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
+#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
+#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
+#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
+#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
+#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
+#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
+#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
+#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
+#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
+#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
+#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
+#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
+#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
+#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
+#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
+#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
+#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
+#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
+#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
+#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
+#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
+#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
+#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
+#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
+
+static inline u32 mcr_pack(u32 pulse, u32 sample)
+{
+ return (pulse << 10) | (sample << 2);
+}
+
+static int nic_wait(struct ioc3 *ioc3)
+{
+ u32 mcr;
+
+ do {
+ mcr = ioc3_r_mcr();
+ } while (!(mcr & 2));
+
+ return mcr & 1;
+}
+
+static int nic_reset(struct ioc3 *ioc3)
+{
+ int presence;
+
+ ioc3_w_mcr(mcr_pack(500, 65));
+ presence = nic_wait(ioc3);
+
+ ioc3_w_mcr(mcr_pack(0, 500));
+ nic_wait(ioc3);
+
+ return presence;
+}
+
+static inline int nic_read_bit(struct ioc3 *ioc3)
+{
+ int result;
+
+ ioc3_w_mcr(mcr_pack(6, 13));
+ result = nic_wait(ioc3);
+ ioc3_w_mcr(mcr_pack(0, 100));
+ nic_wait(ioc3);
+
+ return result;
+}
+
+static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
+{
+ if (bit)
+ ioc3_w_mcr(mcr_pack(6, 110));
+ else
+ ioc3_w_mcr(mcr_pack(80, 30));
+
+ nic_wait(ioc3);
+}
+
+/*
+ * Read a byte from an iButton device
+ */
+static u32 nic_read_byte(struct ioc3 *ioc3)
+{
+ u32 result = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ result = (result >> 1) | (nic_read_bit(ioc3) << 7);
+
+ return result;
+}
+
+/*
+ * Write a byte to an iButton device
+ */
+static void nic_write_byte(struct ioc3 *ioc3, int byte)
+{
+ int i, bit;
+
+ for (i = 8; i; i--) {
+ bit = byte & 1;
+ byte >>= 1;
+
+ nic_write_bit(ioc3, bit);
+ }
+}
+
+static u64 nic_find(struct ioc3 *ioc3, int *last)
+{
+ int a, b, index, disc;
+ u64 address = 0;
+
+ nic_reset(ioc3);
+ /* Search ROM. */
+ nic_write_byte(ioc3, 0xf0);
+
+ /* Algorithm from ``Book of iButton Standards''. */
+ for (index = 0, disc = 0; index < 64; index++) {
+ a = nic_read_bit(ioc3);
+ b = nic_read_bit(ioc3);
+
+ if (a && b) {
+ printk("NIC search failed (not fatal).\n");
+ *last = 0;
+ return 0;
+ }
+
+ if (!a && !b) {
+ if (index == *last) {
+ address |= 1UL << index;
+ } else if (index > *last) {
+ address &= ~(1UL << index);
+ disc = index;
+ } else if ((address & (1UL << index)) == 0)
+ disc = index;
+ nic_write_bit(ioc3, address & (1UL << index));
+ continue;
+ } else {
+ if (a)
+ address |= 1UL << index;
+ else
+ address &= ~(1UL << index);
+ nic_write_bit(ioc3, a);
+ continue;
+ }
+ }
+
+ *last = disc;
+
+ return address;
+}
+
+static int nic_init(struct ioc3 *ioc3)
+{
+ const char *type;
+ u8 crc;
+ u8 serial[6];
+ int save = 0, i;
+
+ type = "unknown";
+
+ while (1) {
+ u64 reg;
+ reg = nic_find(ioc3, &save);
+
+ switch (reg & 0xff) {
+ case 0x91:
+ type = "DS1981U";
+ break;
+ default:
+ if (save == 0) {
+ /* Let the caller try again. */
+ return -1;
+ }
+ continue;
+ }
+
+ nic_reset(ioc3);
+
+ /* Match ROM. */
+ nic_write_byte(ioc3, 0x55);
+ for (i = 0; i < 8; i++)
+ nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
+
+ reg >>= 8; /* Shift out type. */
+ for (i = 0; i < 6; i++) {
+ serial[i] = reg & 0xff;
+ reg >>= 8;
+ }
+ crc = reg & 0xff;
+ break;
+ }
+
+ printk("Found %s NIC", type);
+ if (type != "unknown") {
+ printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
+ " CRC %02x", serial[0], serial[1], serial[2],
+ serial[3], serial[4], serial[5], crc);
+ }
+ printk(".\n");
+
+ return 0;
+}
+
+/*
+ * Read the NIC (Number-In-a-Can) device used to store the MAC address on
+ * SN0 / SN00 nodeboards and PCI cards.
+ */
+static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+ u8 nic[14];
+ int tries = 2; /* There may be some problem with the battery? */
+ int i;
+
+ ioc3_w_gpcr_s(1 << 21);
+
+ while (tries--) {
+ if (!nic_init(ioc3))
+ break;
+ udelay(500);
+ }
+
+ if (tries < 0) {
+ printk("Failed to read MAC address\n");
+ return;
+ }
+
+ /* Read Memory. */
+ nic_write_byte(ioc3, 0xf0);
+ nic_write_byte(ioc3, 0x00);
+ nic_write_byte(ioc3, 0x00);
+
+ for (i = 13; i >= 0; i--)
+ nic[i] = nic_read_byte(ioc3);
+
+ for (i = 2; i < 8; i++)
+ priv_netdev(ip)->dev_addr[i - 2] = nic[i];
+}
+
+/*
+ * Ok, this is hosed by design. It's necessary to know what machine the
+ * NIC is in in order to know how to read the NIC address. We also have
+ * to know if it's a PCI card or a NIC in on the node board ...
+ */
+static void ioc3_get_eaddr(struct ioc3_private *ip)
+{
+ int i;
+
+
+ ioc3_get_eaddr_nic(ip);
+
+ printk("Ethernet address is ");
+ for (i = 0; i < 6; i++) {
+ printk("%02x", priv_netdev(ip)->dev_addr[i]);
+ if (i < 5)
+ printk(":");
+ }
+ printk(".\n");
+}
+
+static void __ioc3_set_mac_address(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
+ ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
+}
+
+static int ioc3_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct sockaddr *sa = addr;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ spin_lock_irq(&ip->ioc3_lock);
+ __ioc3_set_mac_address(dev);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return 0;
+}
+
+/*
+ * Caller must hold the ioc3_lock ever for MII readers. This is also
+ * used to protect the transmitter side but it's low contention.
+ */
+static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ while (ioc3_r_micr() & MICR_BUSY);
+ ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
+ while (ioc3_r_micr() & MICR_BUSY);
+
+ return ioc3_r_micr() & MIDR_DATA_MASK;
+}
+
+static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ while (ioc3_r_micr() & MICR_BUSY);
+ ioc3_w_midr_w(data);
+ ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
+ while (ioc3_r_micr() & MICR_BUSY);
+}
+
+static int ioc3_mii_init(struct ioc3_private *ip);
+
+static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ ip->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
+ return &ip->stats;
+}
+
+#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM
+
+static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
+{
+ struct ethhdr *eh = eth_hdr(skb);
+ uint32_t csum, ehsum;
+ unsigned int proto;
+ struct iphdr *ih;
+ uint16_t *ew;
+ unsigned char *cp;
+
+ /*
+ * Did hardware handle the checksum at all? The cases we can handle
+ * are:
+ *
+ * - TCP and UDP checksums of IPv4 only.
+ * - IPv6 would be doable but we keep that for later ...
+ * - Only unfragmented packets. Did somebody already tell you
+ * fragmentation is evil?
+ * - don't care about packet size. Worst case when processing a
+ * malformed packet we'll try to access the packet at ip header +
+ * 64 bytes which is still inside the skb. Even in the unlikely
+ * case where the checksum is right the higher layers will still
+ * drop the packet as appropriate.
+ */
+ if (eh->h_proto != ntohs(ETH_P_IP))
+ return;
+
+ ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
+ if (ih->frag_off & htons(IP_MF | IP_OFFSET))
+ return;
+
+ proto = ih->protocol;
+ if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
+ return;
+
+ /* Same as tx - compute csum of pseudo header */
+ csum = hwsum +
+ (ih->tot_len - (ih->ihl << 2)) +
+ htons((uint16_t)ih->protocol) +
+ (ih->saddr >> 16) + (ih->saddr & 0xffff) +
+ (ih->daddr >> 16) + (ih->daddr & 0xffff);
+
+ /* Sum up ethernet dest addr, src addr and protocol */
+ ew = (uint16_t *) eh;
+ ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
+
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+
+ csum += 0xffff ^ ehsum;
+
+ /* In the next step we also subtract the 1's complement
+ checksum of the trailing ethernet CRC. */
+ cp = (char *)eh + len; /* points at trailing CRC */
+ if (len & 1) {
+ csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
+ csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
+ } else {
+ csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
+ csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
+ }
+
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+
+ if (csum == 0xffff)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+#endif /* CONFIG_SGI_IOC3_ETH_HW_RX_CSUM */
+
+static inline void ioc3_rx(struct ioc3_private *ip)
+{
+ struct sk_buff *skb, *new_skb;
+ struct ioc3 *ioc3 = ip->regs;
+ int rx_entry, n_entry, len;
+ struct ioc3_erxbuf *rxb;
+ unsigned long *rxr;
+ u32 w0, err;
+
+ rxr = (unsigned long *) ip->rxr; /* Ring base */
+ rx_entry = ip->rx_ci; /* RX consume index */
+ n_entry = ip->rx_pi;
+
+ skb = ip->rx_skbs[rx_entry];
+ rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ w0 = be32_to_cpu(rxb->w0);
+
+ while (w0 & ERXBUF_V) {
+ err = be32_to_cpu(rxb->err); /* It's valid ... */
+ if (err & ERXBUF_GOODPKT) {
+ len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
+ skb_trim(skb, len);
+ skb->protocol = eth_type_trans(skb, priv_netdev(ip));
+
+ new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (!new_skb) {
+ /* Ouch, drop packet and just recycle packet
+ to keep the ring filled. */
+ ip->stats.rx_dropped++;
+ new_skb = skb;
+ goto next;
+ }
+
+#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM
+ ioc3_tcpudp_checksum(skb, w0 & ERXBUF_IPCKSUM_MASK,len);
+#endif
+
+ netif_rx(skb);
+
+ ip->rx_skbs[rx_entry] = NULL; /* Poison */
+
+ new_skb->dev = priv_netdev(ip);
+
+ /* Because we reserve afterwards. */
+ skb_put(new_skb, (1664 + RX_OFFSET));
+ rxb = (struct ioc3_erxbuf *) new_skb->data;
+ skb_reserve(new_skb, RX_OFFSET);
+
+ priv_netdev(ip)->last_rx = jiffies;
+ ip->stats.rx_packets++; /* Statistics */
+ ip->stats.rx_bytes += len;
+ } else {
+ /* The frame is invalid and the skb never
+ reached the network layer so we can just
+ recycle it. */
+ new_skb = skb;
+ ip->stats.rx_errors++;
+ }
+ if (err & ERXBUF_CRCERR) /* Statistics */
+ ip->stats.rx_crc_errors++;
+ if (err & ERXBUF_FRAMERR)
+ ip->stats.rx_frame_errors++;
+next:
+ ip->rx_skbs[n_entry] = new_skb;
+ rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
+ rxb->w0 = 0; /* Clear valid flag */
+ n_entry = (n_entry + 1) & 511; /* Update erpir */
+
+ /* Now go on to the next ring entry. */
+ rx_entry = (rx_entry + 1) & 511;
+ skb = ip->rx_skbs[rx_entry];
+ rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ w0 = be32_to_cpu(rxb->w0);
+ }
+ ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
+ ip->rx_pi = n_entry;
+ ip->rx_ci = rx_entry;
+}
+
+static inline void ioc3_tx(struct ioc3_private *ip)
+{
+ unsigned long packets, bytes;
+ struct ioc3 *ioc3 = ip->regs;
+ int tx_entry, o_entry;
+ struct sk_buff *skb;
+ u32 etcir;
+
+ spin_lock(&ip->ioc3_lock);
+ etcir = ioc3_r_etcir();
+
+ tx_entry = (etcir >> 7) & 127;
+ o_entry = ip->tx_ci;
+ packets = 0;
+ bytes = 0;
+
+ while (o_entry != tx_entry) {
+ packets++;
+ skb = ip->tx_skbs[o_entry];
+ bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ ip->tx_skbs[o_entry] = NULL;
+
+ o_entry = (o_entry + 1) & 127; /* Next */
+
+ etcir = ioc3_r_etcir(); /* More pkts sent? */
+ tx_entry = (etcir >> 7) & 127;
+ }
+
+ ip->stats.tx_packets += packets;
+ ip->stats.tx_bytes += bytes;
+ ip->txqlen -= packets;
+
+ if (ip->txqlen < 128)
+ netif_wake_queue(priv_netdev(ip));
+
+ ip->tx_ci = o_entry;
+ spin_unlock(&ip->ioc3_lock);
+}
+
+/*
+ * Deal with fatal IOC3 errors. This condition might be caused by a hard or
+ * software problems, so we should try to recover
+ * more gracefully if this ever happens. In theory we might be flooded
+ * with such error interrupts if something really goes wrong, so we might
+ * also consider to take the interface down.
+ */
+static void ioc3_error(struct ioc3_private *ip, u32 eisr)
+{
+ struct net_device *dev = priv_netdev(ip);
+ unsigned char *iface = dev->name;
+
+ spin_lock(&ip->ioc3_lock);
+
+ if (eisr & EISR_RXOFLO)
+ printk(KERN_ERR "%s: RX overflow.\n", iface);
+ if (eisr & EISR_RXBUFOFLO)
+ printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
+ if (eisr & EISR_RXMEMERR)
+ printk(KERN_ERR "%s: RX PCI error.\n", iface);
+ if (eisr & EISR_RXPARERR)
+ printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
+ if (eisr & EISR_TXBUFUFLO)
+ printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
+ if (eisr & EISR_TXMEMERR)
+ printk(KERN_ERR "%s: TX PCI error.\n", iface);
+
+ ioc3_stop(ip);
+ ioc3_init(dev);
+ ioc3_mii_init(ip);
+
+ netif_wake_queue(dev);
+
+ spin_unlock(&ip->ioc3_lock);
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t ioc3_interrupt(int irq, void *_dev, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)_dev;
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
+ EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
+ EISR_TXEXPLICIT | EISR_TXMEMERR;
+ u32 eisr;
+
+ eisr = ioc3_r_eisr() & enabled;
+
+ ioc3_w_eisr(eisr);
+ (void) ioc3_r_eisr(); /* Flush */
+
+ if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
+ EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
+ ioc3_error(ip, eisr);
+ if (eisr & EISR_RXTIMERINT)
+ ioc3_rx(ip);
+ if (eisr & EISR_TXEXPLICIT)
+ ioc3_tx(ip);
+
+ return IRQ_HANDLED;
+}
+
+static inline void ioc3_setup_duplex(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+
+ if (ip->mii.full_duplex) {
+ ioc3_w_etcsr(ETCSR_FD);
+ ip->emcr |= EMCR_DUPLEX;
+ } else {
+ ioc3_w_etcsr(ETCSR_HD);
+ ip->emcr &= ~EMCR_DUPLEX;
+ }
+ ioc3_w_emcr(ip->emcr);
+}
+
+static void ioc3_timer(unsigned long data)
+{
+ struct ioc3_private *ip = (struct ioc3_private *) data;
+
+ /* Print the link status if it has changed */
+ mii_check_media(&ip->mii, 1, 0);
+ ioc3_setup_duplex(ip);
+
+ ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
+ add_timer(&ip->ioc3_timer);
+}
+
+/*
+ * Try to find a PHY. There is no apparent relation between the MII addresses
+ * in the SGI documentation and what we find in reality, so we simply probe
+ * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
+ * onboard IOC3s has the special oddity that probing doesn't seem to find it
+ * yet the interface seems to work fine, so if probing fails we for now will
+ * simply default to PHY 31 instead of bailing out.
+ */
+static int ioc3_mii_init(struct ioc3_private *ip)
+{
+ struct net_device *dev = priv_netdev(ip);
+ int i, found = 0, res = 0;
+ int ioc3_phy_workaround = 1;
+ u16 word;
+
+ for (i = 0; i < 32; i++) {
+ word = ioc3_mdio_read(dev, i, MII_PHYSID1);
+
+ if (word != 0xffff && word != 0x0000) {
+ found = 1;
+ break; /* Found a PHY */
+ }
+ }
+
+ if (!found) {
+ if (ioc3_phy_workaround)
+ i = 31;
+ else {
+ ip->mii.phy_id = -1;
+ res = -ENODEV;
+ goto out;
+ }
+ }
+
+ ip->mii.phy_id = i;
+ ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
+ ip->ioc3_timer.data = (unsigned long) ip;
+ ip->ioc3_timer.function = &ioc3_timer;
+ add_timer(&ip->ioc3_timer);
+
+out:
+ return res;
+}
+
+static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = ip->rx_ci; i & 15; i++) {
+ ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
+ ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
+ }
+ ip->rx_pi &= 511;
+ ip->rx_ci &= 511;
+
+ for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
+ struct ioc3_erxbuf *rxb;
+ skb = ip->rx_skbs[i];
+ rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ rxb->w0 = 0;
+ }
+}
+
+static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i=0; i < 128; i++) {
+ skb = ip->tx_skbs[i];
+ if (skb) {
+ ip->tx_skbs[i] = NULL;
+ dev_kfree_skb_any(skb);
+ }
+ ip->txr[i].cmd = 0;
+ }
+ ip->tx_pi = 0;
+ ip->tx_ci = 0;
+}
+
+static void ioc3_free_rings(struct ioc3_private *ip)
+{
+ struct sk_buff *skb;
+ int rx_entry, n_entry;
+
+ if (ip->txr) {
+ ioc3_clean_tx_ring(ip);
+ free_pages((unsigned long)ip->txr, 2);
+ ip->txr = NULL;
+ }
+
+ if (ip->rxr) {
+ n_entry = ip->rx_ci;
+ rx_entry = ip->rx_pi;
+
+ while (n_entry != rx_entry) {
+ skb = ip->rx_skbs[n_entry];
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ n_entry = (n_entry + 1) & 511;
+ }
+ free_page((unsigned long)ip->rxr);
+ ip->rxr = NULL;
+ }
+}
+
+static void ioc3_alloc_rings(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3_erxbuf *rxb;
+ unsigned long *rxr;
+ int i;
+
+ if (ip->rxr == NULL) {
+ /* Allocate and initialize rx ring. 4kb = 512 entries */
+ ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
+ rxr = (unsigned long *) ip->rxr;
+ if (!rxr)
+ printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
+
+ /* Now the rx buffers. The RX ring may be larger but
+ we only allocate 16 buffers for now. Need to tune
+ this for performance and memory later. */
+ for (i = 0; i < RX_BUFFS; i++) {
+ struct sk_buff *skb;
+
+ skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ show_free_areas();
+ continue;
+ }
+
+ ip->rx_skbs[i] = skb;
+ skb->dev = dev;
+
+ /* Because we reserve afterwards. */
+ skb_put(skb, (1664 + RX_OFFSET));
+ rxb = (struct ioc3_erxbuf *) skb->data;
+ rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
+ skb_reserve(skb, RX_OFFSET);
+ }
+ ip->rx_ci = 0;
+ ip->rx_pi = RX_BUFFS;
+ }
+
+ if (ip->txr == NULL) {
+ /* Allocate and initialize tx rings. 16kb = 128 bufs. */
+ ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
+ if (!ip->txr)
+ printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
+ ip->tx_pi = 0;
+ ip->tx_ci = 0;
+ }
+}
+
+static void ioc3_init_rings(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ unsigned long ring;
+
+ ioc3_free_rings(ip);
+ ioc3_alloc_rings(dev);
+
+ ioc3_clean_rx_ring(ip);
+ ioc3_clean_tx_ring(ip);
+
+ /* Now the rx ring base, consume & produce registers. */
+ ring = ioc3_map(ip->rxr, 0);
+ ioc3_w_erbr_h(ring >> 32);
+ ioc3_w_erbr_l(ring & 0xffffffff);
+ ioc3_w_ercir(ip->rx_ci << 3);
+ ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
+
+ ring = ioc3_map(ip->txr, 0);
+
+ ip->txqlen = 0; /* nothing queued */
+
+ /* Now the tx ring base, consume & produce registers. */
+ ioc3_w_etbr_h(ring >> 32);
+ ioc3_w_etbr_l(ring & 0xffffffff);
+ ioc3_w_etpir(ip->tx_pi << 7);
+ ioc3_w_etcir(ip->tx_ci << 7);
+ (void) ioc3_r_etcir(); /* Flush */
+}
+
+static inline void ioc3_ssram_disc(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+ volatile u32 *ssram0 = &ioc3->ssram[0x0000];
+ volatile u32 *ssram1 = &ioc3->ssram[0x4000];
+ unsigned int pattern = 0x5555;
+
+ /* Assume the larger size SSRAM and enable parity checking */
+ ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
+
+ *ssram0 = pattern;
+ *ssram1 = ~pattern & IOC3_SSRAM_DM;
+
+ if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
+ (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
+ /* set ssram size to 64 KB */
+ ip->emcr = EMCR_RAMPAR;
+ ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
+ } else
+ ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
+}
+
+static void ioc3_init(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ del_timer(&ip->ioc3_timer); /* Kill if running */
+
+ ioc3_w_emcr(EMCR_RST); /* Reset */
+ (void) ioc3_r_emcr(); /* Flush WB */
+ udelay(4); /* Give it time ... */
+ ioc3_w_emcr(0);
+ (void) ioc3_r_emcr();
+
+ /* Misc registers */
+#ifdef CONFIG_SGI_IP27
+ ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */
+#else
+ ioc3_w_erbar(0); /* Let PCI API get it right */
+#endif
+ (void) ioc3_r_etcdc(); /* Clear on read */
+ ioc3_w_ercsr(15); /* RX low watermark */
+ ioc3_w_ertr(0); /* Interrupt immediately */
+ __ioc3_set_mac_address(dev);
+ ioc3_w_ehar_h(ip->ehar_h);
+ ioc3_w_ehar_l(ip->ehar_l);
+ ioc3_w_ersr(42); /* XXX should be random */
+
+ ioc3_init_rings(dev);
+
+ ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
+ EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
+ ioc3_w_emcr(ip->emcr);
+ ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
+ EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
+ EISR_TXEXPLICIT | EISR_TXMEMERR);
+ (void) ioc3_r_eier();
+}
+
+static inline void ioc3_stop(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+
+ ioc3_w_emcr(0); /* Shutup */
+ ioc3_w_eier(0); /* Disable interrupts */
+ (void) ioc3_r_eier(); /* Flush */
+}
+
+static int ioc3_open(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ, ioc3_str, dev)) {
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+
+ return -EAGAIN;
+ }
+
+ ip->ehar_h = 0;
+ ip->ehar_l = 0;
+ ioc3_init(dev);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int ioc3_close(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ del_timer(&ip->ioc3_timer);
+
+ netif_stop_queue(dev);
+
+ ioc3_stop(ip);
+ free_irq(dev->irq, dev);
+
+ ioc3_free_rings(ip);
+ return 0;
+}
+
+/*
+ * MENET cards have four IOC3 chips, which are attached to two sets of
+ * PCI slot resources each: the primary connections are on slots
+ * 0..3 and the secondaries are on 4..7
+ *
+ * All four ethernets are brought out to connectors; six serial ports
+ * (a pair from each of the first three IOC3s) are brought out to
+ * MiniDINs; all other subdevices are left swinging in the wind, leave
+ * them disabled.
+ */
+static inline int ioc3_is_menet(struct pci_dev *pdev)
+{
+ struct pci_dev *dev;
+
+ return pdev->bus->parent == NULL
+ && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(0, 0)))
+ && dev->vendor == PCI_VENDOR_ID_SGI
+ && dev->device == PCI_DEVICE_ID_SGI_IOC3
+ && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(1, 0)))
+ && dev->vendor == PCI_VENDOR_ID_SGI
+ && dev->device == PCI_DEVICE_ID_SGI_IOC3
+ && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(2, 0)))
+ && dev->vendor == PCI_VENDOR_ID_SGI
+ && dev->device == PCI_DEVICE_ID_SGI_IOC3;
+}
+
+#ifdef CONFIG_SERIAL_8250
+/*
+ * Note about serial ports and consoles:
+ * For console output, everyone uses the IOC3 UARTA (offset 0x178)
+ * connected to the master node (look in ip27_setup_console() and
+ * ip27prom_console_write()).
+ *
+ * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
+ * addresses on a partitioned machine. Since we currently use the ioc3
+ * serial ports, we use dynamic serial port discovery that the serial.c
+ * driver uses for pci/pnp ports (there is an entry for the SGI ioc3
+ * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
+ * than UARTB's, although UARTA on o200s has traditionally been known as
+ * port 0. So, we just use one serial port from each ioc3 (since the
+ * serial driver adds addresses to get to higher ports).
+ *
+ * The first one to do a register_console becomes the preferred console
+ * (if there is no kernel command line console= directive). /dev/console
+ * (ie 5, 1) is then "aliased" into the device number returned by the
+ * "device" routine referred to in this console structure
+ * (ip27prom_console_dev).
+ *
+ * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
+ * around ioc3 oddities in this respect.
+ *
+ * The IOC3 serials use a 22MHz clock rate with an additional divider by 3.
+ * (IOC3_BAUD = (22000000 / (3*16)))
+ */
+
+static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
+{
+ struct serial_struct req;
+
+ /*
+ * We need to recognice and treat the fourth MENET serial as it
+ * does not have an SuperIO chip attached to it, therefore attempting
+ * to access it will result in bus errors. We call something an
+ * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
+ * in it. This is paranoid but we want to avoid blowing up on a
+ * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
+ * not paranoid enough ...
+ */
+ if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
+ return;
+
+ /* Register to interrupt zero because we share the interrupt with
+ the serial driver which we don't properly support yet. */
+ memset(&req, 0, sizeof(req));
+ req.irq = 0;
+ req.flags = IOC3_COM_FLAGS;
+ req.io_type = SERIAL_IO_MEM;
+ req.iomem_reg_shift = 0;
+ req.baud_base = IOC3_BAUD;
+
+ req.iomem_base = (unsigned char *) &ioc3->sregs.uarta;
+ register_serial(&req);
+
+ req.iomem_base = (unsigned char *) &ioc3->sregs.uartb;
+ register_serial(&req);
+}
+#endif
+
+static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned int sw_physid1, sw_physid2;
+ struct net_device *dev = NULL;
+ struct ioc3_private *ip;
+ struct ioc3 *ioc3;
+ unsigned long ioc3_base, ioc3_size;
+ u32 vendor, model, rev;
+ int err, pci_using_dac;
+
+ /* Configure DMA attributes. */
+ err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
+ if (!err) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
+ if (err < 0) {
+ printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
+ "for consistent allocations\n", pci_name(pdev));
+ goto out;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, 0xffffffffULL);
+ if (err) {
+ printk(KERN_ERR "%s: No usable DMA configuration, "
+ "aborting.\n", pci_name(pdev));
+ goto out;
+ }
+ pci_using_dac = 0;
+ }
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct ioc3_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto out_disable;
+ }
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ err = pci_request_regions(pdev, "ioc3");
+ if (err)
+ goto out_free;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ip = netdev_priv(dev);
+
+ dev->irq = pdev->irq;
+
+ ioc3_base = pci_resource_start(pdev, 0);
+ ioc3_size = pci_resource_len(pdev, 0);
+ ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
+ if (!ioc3) {
+ printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto out_res;
+ }
+ ip->regs = ioc3;
+
+#ifdef CONFIG_SERIAL_8250
+ ioc3_serial_probe(pdev, ioc3);
+#endif
+
+ spin_lock_init(&ip->ioc3_lock);
+ init_timer(&ip->ioc3_timer);
+
+ ioc3_stop(ip);
+ ioc3_init(dev);
+
+ ip->pdev = pdev;
+
+ ip->mii.phy_id_mask = 0x1f;
+ ip->mii.reg_num_mask = 0x1f;
+ ip->mii.dev = dev;
+ ip->mii.mdio_read = ioc3_mdio_read;
+ ip->mii.mdio_write = ioc3_mdio_write;
+
+ ioc3_mii_init(ip);
+
+ if (ip->mii.phy_id == -1) {
+ printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
+ pci_name(pdev));
+ err = -ENODEV;
+ goto out_stop;
+ }
+
+ ioc3_ssram_disc(ip);
+ ioc3_get_eaddr(ip);
+
+ /* The IOC3-specific entries in the device structure. */
+ dev->open = ioc3_open;
+ dev->hard_start_xmit = ioc3_start_xmit;
+ dev->tx_timeout = ioc3_timeout;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->stop = ioc3_close;
+ dev->get_stats = ioc3_get_stats;
+ dev->do_ioctl = ioc3_ioctl;
+ dev->set_multicast_list = ioc3_set_multicast_list;
+ dev->set_mac_address = ioc3_set_mac_address;
+ dev->ethtool_ops = &ioc3_ethtool_ops;
+#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM
+ dev->features = NETIF_F_IP_CSUM;
+#endif
+
+ ioc3_setup_duplex(ip);
+ sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
+ sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out_stop;
+
+ mii_check_media(&ip->mii, 1, 1);
+
+ vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
+ model = (sw_physid2 >> 4) & 0x3f;
+ rev = sw_physid2 & 0xf;
+ printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
+ "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
+ printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
+ ip->emcr & EMCR_BUFSIZ ? 128 : 64);
+
+ return 0;
+
+out_stop:
+ ioc3_stop(ip);
+ ioc3_free_rings(ip);
+out_res:
+ pci_release_regions(pdev);
+out_free:
+ free_netdev(dev);
+out_disable:
+ /*
+ * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ * such a weird device ...
+ */
+out:
+ return err;
+}
+
+static void __devexit ioc3_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ unregister_netdev(dev);
+ iounmap(ioc3);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ /*
+ * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ * such a weird device ...
+ */
+}
+
+static struct pci_device_id ioc3_pci_tbl[] = {
+ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
+
+static struct pci_driver ioc3_driver = {
+ .name = "ioc3-eth",
+ .id_table = ioc3_pci_tbl,
+ .probe = ioc3_probe,
+ .remove = __devexit_p(ioc3_remove_one),
+};
+
+static int __init ioc3_init_module(void)
+{
+ return pci_module_init(&ioc3_driver);
+}
+
+static void __exit ioc3_cleanup_module(void)
+{
+ pci_unregister_driver(&ioc3_driver);
+}
+
+static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long data;
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ unsigned int len;
+ struct ioc3_etxd *desc;
+ uint32_t w0 = 0;
+ int produce;
+
+#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM
+ /*
+ * IOC3 has a fairly simple minded checksumming hardware which simply
+ * adds up the 1's complement checksum for the entire packet and
+ * inserts it at an offset which can be specified in the descriptor
+ * into the transmit packet. This means we have to compensate for the
+ * MAC header which should not be summed and the TCP/UDP pseudo headers
+ * manually.
+ */
+ if (skb->ip_summed == CHECKSUM_HW) {
+ int proto = ntohs(skb->nh.iph->protocol);
+ unsigned int csoff;
+ struct iphdr *ih = skb->nh.iph;
+ uint32_t csum, ehsum;
+ uint16_t *eh;
+
+ /* The MAC header. skb->mac seem the logic approach
+ to find the MAC header - except it's a NULL pointer ... */
+ eh = (uint16_t *) skb->data;
+
+ /* Sum up dest addr, src addr and protocol */
+ ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
+
+ /* Fold ehsum. can't use csum_fold which negates also ... */
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+
+ /* Skip IP header; it's sum is always zero and was
+ already filled in by ip_output.c */
+ csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
+ ih->tot_len - (ih->ihl << 2),
+ proto, 0xffff ^ ehsum);
+
+ csum = (csum & 0xffff) + (csum >> 16); /* Fold again */
+ csum = (csum & 0xffff) + (csum >> 16);
+
+ csoff = ETH_HLEN + (ih->ihl << 2);
+ if (proto == IPPROTO_UDP) {
+ csoff += offsetof(struct udphdr, check);
+ skb->h.uh->check = csum;
+ }
+ if (proto == IPPROTO_TCP) {
+ csoff += offsetof(struct tcphdr, check);
+ skb->h.th->check = csum;
+ }
+
+ w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
+ }
+#endif /* CONFIG_SGI_IOC3_ETH_HW_TX_CSUM */
+
+ spin_lock_irq(&ip->ioc3_lock);
+
+ data = (unsigned long) skb->data;
+ len = skb->len;
+
+ produce = ip->tx_pi;
+ desc = &ip->txr[produce];
+
+ if (len <= 104) {
+ /* Short packet, let's copy it directly into the ring. */
+ memcpy(desc->data, skb->data, skb->len);
+ if (len < ETH_ZLEN) {
+ /* Very short packet, pad with zeros at the end. */
+ memset(desc->data + len, 0, ETH_ZLEN - len);
+ len = ETH_ZLEN;
+ }
+ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
+ desc->bufcnt = cpu_to_be32(len);
+ } else if ((data ^ (data + len - 1)) & 0x4000) {
+ unsigned long b2 = (data | 0x3fffUL) + 1UL;
+ unsigned long s1 = b2 - data;
+ unsigned long s2 = data + len - b2;
+
+ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
+ ETXD_B1V | ETXD_B2V | w0);
+ desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
+ (s2 << ETXD_B2CNT_SHIFT));
+ desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
+ desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
+ } else {
+ /* Normal sized packet that doesn't cross a page boundary. */
+ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
+ desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
+ desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
+ }
+
+ BARRIER();
+
+ dev->trans_start = jiffies;
+ ip->tx_skbs[produce] = skb; /* Remember skb */
+ produce = (produce + 1) & 127;
+ ip->tx_pi = produce;
+ ioc3_w_etpir(produce << 7); /* Fire ... */
+
+ ip->txqlen++;
+
+ if (ip->txqlen >= 127)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return 0;
+}
+
+static void ioc3_timeout(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+
+ spin_lock_irq(&ip->ioc3_lock);
+
+ ioc3_stop(ip);
+ ioc3_init(dev);
+ ioc3_mii_init(ip);
+
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Given a multicast ethernet address, this routine calculates the
+ * address's bit index in the logical address filter mask
+ */
+
+static inline unsigned int ioc3_hash(const unsigned char *addr)
+{
+ unsigned int temp = 0;
+ u32 crc;
+ int bits;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
+ for (bits = 6; --bits >= 0; ) {
+ temp <<= 1;
+ temp |= (crc & 0x1);
+ crc >>= 1;
+ }
+
+ return temp;
+}
+
+static void ioc3_get_drvinfo (struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ strcpy (info->driver, IOC3_NAME);
+ strcpy (info->version, IOC3_VERSION);
+ strcpy (info->bus_info, pci_name(ip->pdev));
+}
+
+static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_ethtool_gset(&ip->mii, cmd);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_ethtool_sset(&ip->mii, cmd);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static int ioc3_nway_reset(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_nway_restart(&ip->mii);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static u32 ioc3_get_link(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_link_ok(&ip->mii);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static struct ethtool_ops ioc3_ethtool_ops = {
+ .get_drvinfo = ioc3_get_drvinfo,
+ .get_settings = ioc3_get_settings,
+ .set_settings = ioc3_set_settings,
+ .nway_reset = ioc3_nway_reset,
+ .get_link = ioc3_get_link,
+};
+
+static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static void ioc3_set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *dmi = dev->mc_list;
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ u64 ehar = 0;
+ int i;
+
+ netif_stop_queue(dev); /* Lock out others. */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ ip->emcr |= EMCR_PROMISC;
+ ioc3_w_emcr(ip->emcr);
+ (void) ioc3_r_emcr();
+ } else {
+ ip->emcr &= ~EMCR_PROMISC;
+ ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
+ (void) ioc3_r_emcr();
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ /* Too many for hashing to make sense or we want all
+ multicast packets anyway, so skip computing all the
+ hashes and just accept all packets. */
+ ip->ehar_h = 0xffffffff;
+ ip->ehar_l = 0xffffffff;
+ } else {
+ for (i = 0; i < dev->mc_count; i++) {
+ char *addr = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ if (!(*addr & 1))
+ continue;
+
+ ehar |= (1UL << ioc3_hash(addr));
+ }
+ ip->ehar_h = ehar >> 32;
+ ip->ehar_l = ehar & 0xffffffff;
+ }
+ ioc3_w_ehar_h(ip->ehar_h);
+ ioc3_w_ehar_l(ip->ehar_l);
+ }
+
+ netif_wake_queue(dev); /* Let us get going again. */
+}
+
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_init(ioc3_init_module);
+module_exit(ioc3_cleanup_module);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
new file mode 100644
index 000000000000..a464841da49f
--- /dev/null
+++ b/drivers/net/irda/Kconfig
@@ -0,0 +1,404 @@
+
+menu "Infrared-port device drivers"
+ depends on IRDA!=n
+
+comment "SIR device drivers"
+
+config IRTTY_SIR
+ tristate "IrTTY (uses Linux serial driver)"
+ depends on IRDA
+ help
+ Say Y here if you want to build support for the IrTTY line
+ discipline. To compile it as a module, choose M here: the module
+ will be called irtty-sir. IrTTY makes it possible to use Linux's
+ own serial driver for all IrDA ports that are 16550 compatible.
+ Most IrDA chips are 16550 compatible so you should probably say Y
+ to this option. Using IrTTY will however limit the speed of the
+ connection to 115200 bps (IrDA SIR mode).
+
+ If unsure, say Y.
+
+comment "Dongle support"
+
+config DONGLE
+ bool "Serial dongle support"
+ depends on IRTTY_SIR
+ help
+ Say Y here if you have an infrared device that connects to your
+ computer's serial port. These devices are called dongles. Then say Y
+ or M to the driver for your particular dongle below.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about serial dongles.
+
+config ESI_DONGLE
+ tristate "ESI JetEye PC dongle"
+ depends on DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Extended Systems
+ JetEye PC dongle. To compile it as a module, choose M here. The ESI
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for ESI
+ dongles you will have to start irattach like this:
+ "irattach -d esi".
+
+config ACTISYS_DONGLE
+ tristate "ACTiSYS IR-220L and IR220L+ dongle"
+ depends on DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-220L and
+ IR220L+ dongles. To compile it as a module, choose M here. The
+ ACTiSYS dongles attaches to the normal 9-pin serial port connector,
+ and can currently only be used by IrTTY. To activate support for
+ ACTiSYS dongles you will have to start irattach like this:
+ "irattach -d actisys" or "irattach -d actisys+".
+
+config TEKRAM_DONGLE
+ tristate "Tekram IrMate 210B dongle"
+ depends on DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Tekram IrMate 210B
+ dongle. To compile it as a module, choose M here. The Tekram dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Tekram
+ dongles you will have to start irattach like this:
+ "irattach -d tekram".
+
+config LITELINK_DONGLE
+ tristate "Parallax LiteLink dongle"
+ depends on DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Parallax Litelink
+ dongle. To compile it as a module, choose M here. The Parallax
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Parallax
+ dongles you will have to start irattach like this:
+ "irattach -d litelink".
+
+config MA600_DONGLE
+ tristate "Mobile Action MA600 dongle"
+ depends on DONGLE && IRDA && EXPERIMENTAL
+ help
+ Say Y here if you want to build support for the Mobile Action MA600
+ dongle. To compile it as a module, choose M here. The MA600 dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. The driver should also support
+ the MA620 USB version of the dongle, if the integrated USB-to-RS232
+ converter is supported by usbserial. To activate support for
+ MA600 dongle you will have to start irattach like this:
+ "irattach -d ma600".
+
+config GIRBIL_DONGLE
+ tristate "Greenwich GIrBIL dongle"
+ depends on DONGLE && IRDA && EXPERIMENTAL
+ help
+ Say Y here if you want to build support for the Greenwich GIrBIL
+ dongle. If you want to compile it as a module, choose M here.
+ The Greenwich dongle attaches to the normal 9-pin serial port
+ connector, and can currently only be used by IrTTY. To activate
+ support for Greenwich dongles you will have to start irattach
+ like this: "irattach -d girbil".
+
+config MCP2120_DONGLE
+ tristate "Microchip MCP2120"
+ depends on DONGLE && IRDA && EXPERIMENTAL
+ help
+ Say Y here if you want to build support for the Microchip MCP2120
+ dongle. If you want to compile it as a module, choose M here.
+ The MCP2120 dongle attaches to the normal 9-pin serial port
+ connector, and can currently only be used by IrTTY. To activate
+ support for MCP2120 dongles you will have to start irattach
+ like this: "irattach -d mcp2120".
+
+ You must build this dongle yourself. For more information see:
+ <http://www.eyetap.org/~tangf/irda_sir_linux.html>
+
+config OLD_BELKIN_DONGLE
+ tristate "Old Belkin dongle"
+ depends on DONGLE && IRDA && EXPERIMENTAL
+ help
+ Say Y here if you want to build support for the Adaptec Airport 1000
+ and 2000 dongles. If you want to compile it as a module, choose
+ M here. Some information is contained in the comments
+ at the top of <file:drivers/net/irda/old_belkin.c>.
+
+config ACT200L_DONGLE
+ tristate "ACTiSYS IR-200L dongle"
+ depends on DONGLE && IRDA && EXPERIMENTAL
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-200L
+ dongle. If you want to compile it as a module, choose M here.
+ The ACTiSYS IR-200L dongle attaches to the normal 9-pin serial
+ port connector, and can currently only be used by IrTTY.
+ To activate support for ACTiSYS IR-200L dongle you will have to
+ start irattach like this: "irattach -d act200l".
+
+comment "Old SIR device drivers"
+
+config IRPORT_SIR
+ tristate "IrPORT (IrDA serial driver)"
+ depends on IRDA && BROKEN_ON_SMP
+ ---help---
+ Say Y here if you want to build support for the IrPORT IrDA device
+ driver. To compile it as a module, choose M here: the module will be
+ called irport. IrPORT can be used instead of IrTTY and sometimes
+ this can be better. One example is if your IrDA port does not
+ have echo-canceling, which will work OK with IrPORT since this
+ driver is working in half-duplex mode only. You don't need to use
+ irattach with IrPORT, but you just insert it the same way as FIR
+ drivers (insmod irport io=0x3e8 irq=11). Notice that IrPORT is a
+ SIR device driver which means that speed is limited to 115200 bps.
+
+ If unsure, say Y.
+
+comment "Old Serial dongle support"
+
+config DONGLE_OLD
+ bool "Old Serial dongle support"
+ depends on (IRTTY_OLD || IRPORT_SIR) && BROKEN_ON_SMP
+ help
+ Say Y here if you have an infrared device that connects to your
+ computer's serial port. These devices are called dongles. Then say Y
+ or M to the driver for your particular dongle below.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about serial dongles.
+
+config ESI_DONGLE_OLD
+ tristate "ESI JetEye PC dongle"
+ depends on DONGLE_OLD && IRDA
+ help
+ Say Y here if you want to build support for the Extended Systems
+ JetEye PC dongle. To compile it as a module, choose M here. The ESI
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for ESI
+ dongles you will have to start irattach like this:
+ "irattach -d esi".
+
+config ACTISYS_DONGLE_OLD
+ tristate "ACTiSYS IR-220L and IR220L+ dongle"
+ depends on DONGLE_OLD && IRDA
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-220L and
+ IR220L+ dongles. To compile it as a module, choose M here. The
+ ACTiSYS dongles attaches to the normal 9-pin serial port connector,
+ and can currently only be used by IrTTY. To activate support for
+ ACTiSYS dongles you will have to start irattach like this:
+ "irattach -d actisys" or "irattach -d actisys+".
+
+config TEKRAM_DONGLE_OLD
+ tristate "Tekram IrMate 210B dongle"
+ depends on DONGLE_OLD && IRDA
+ help
+ Say Y here if you want to build support for the Tekram IrMate 210B
+ dongle. To compile it as a module, choose M here. The Tekram dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Tekram
+ dongles you will have to start irattach like this:
+ "irattach -d tekram".
+
+config GIRBIL_DONGLE_OLD
+ tristate "Greenwich GIrBIL dongle"
+ depends on DONGLE_OLD && IRDA
+ help
+ Say Y here if you want to build support for the Greenwich GIrBIL
+ dongle. To compile it as a module, choose M here. The Greenwich
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Greenwich
+ dongles you will have to insert "irattach -d girbil" in the
+ /etc/irda/drivers script.
+
+config LITELINK_DONGLE_OLD
+ tristate "Parallax LiteLink dongle"
+ depends on DONGLE_OLD && IRDA
+ help
+ Say Y here if you want to build support for the Parallax Litelink
+ dongle. To compile it as a module, choose M here. The Parallax
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Parallax
+ dongles you will have to start irattach like this:
+ "irattach -d litelink".
+
+config MCP2120_DONGLE_OLD
+ tristate "Microchip MCP2120"
+ depends on DONGLE_OLD && IRDA
+ help
+ Say Y here if you want to build support for the Microchip MCP2120
+ dongle. To compile it as a module, choose M here. The MCP2120 dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for MCP2120
+ dongles you will have to insert "irattach -d mcp2120" in the
+ /etc/irda/drivers script.
+
+ You must build this dongle yourself. For more information see:
+ <http://www.eyetap.org/~tangf/irda_sir_linux.html>
+
+config OLD_BELKIN_DONGLE_OLD
+ tristate "Old Belkin dongle"
+ depends on DONGLE_OLD && IRDA
+ help
+ Say Y here if you want to build support for the Adaptec Airport 1000
+ and 2000 dongles. To compile it as a module, choose M here: the module
+ will be called old_belkin. Some information is contained in the
+ comments at the top of <file:drivers/net/irda/old_belkin.c>.
+
+config ACT200L_DONGLE_OLD
+ tristate "ACTiSYS IR-200L dongle (EXPERIMENTAL)"
+ depends on DONGLE_OLD && EXPERIMENTAL && IRDA
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-200L
+ dongle. To compile it as a module, choose M here. The ACTiSYS
+ IR-200L dongle attaches to the normal 9-pin serial port connector,
+ and can currently only be used by IrTTY. To activate support for
+ ACTiSYS IR-200L dongles you will have to start irattach like this:
+ "irattach -d act200l".
+
+config MA600_DONGLE_OLD
+ tristate "Mobile Action MA600 dongle (EXPERIMENTAL)"
+ depends on DONGLE_OLD && EXPERIMENTAL && IRDA
+ ---help---
+ Say Y here if you want to build support for the Mobile Action MA600
+ dongle. To compile it as a module, choose M here. The MA600 dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be tested on IrCOMM. To activate support for MA600
+ dongles you will have to insert "irattach -d ma600" in the
+ /etc/irda/drivers script. Note: irutils 0.9.15 requires no
+ modification. irutils 0.9.9 needs modification. For more
+ information, download the following tar gzip file.
+
+ There is a pre-compiled module on
+ <http://engsvr.ust.hk/~eetwl95/ma600.html>
+
+config EP7211_IR
+ tristate "EP7211 I/R support"
+ depends on DONGLE_OLD && ARCH_EP7211 && IRDA
+
+comment "FIR device drivers"
+
+config USB_IRDA
+ tristate "IrDA USB dongles"
+ depends on IRDA && USB
+ ---help---
+ Say Y here if you want to build support for the USB IrDA FIR Dongle
+ device driver. To compile it as a module, choose M here: the module
+ will be called irda-usb. IrDA-USB support the various IrDA USB
+ dongles available and most of their pecularities. Those dongles
+ plug in the USB port of your computer, are plug and play, and
+ support SIR and FIR (4Mbps) speeds. On the other hand, those
+ dongles tend to be less efficient than a FIR chipset.
+
+ Please note that the driver is still experimental. And of course,
+ you will need both USB and IrDA support in your kernel...
+
+config SIGMATEL_FIR
+ tristate "SigmaTel STIr4200 bridge (EXPERIMENTAL)"
+ depends on IRDA && USB && EXPERIMENTAL
+ select CRC32
+ ---help---
+ Say Y here if you want to build support for the SigmaTel STIr4200
+ USB IrDA FIR bridge device driver.
+
+ USB bridge based on the SigmaTel STIr4200 don't conform to the
+ IrDA-USB device class specification, and therefore need their
+ own specific driver. Those dongles support SIR and FIR (4Mbps)
+ speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ stir4200.
+
+config NSC_FIR
+ tristate "NSC PC87108/PC87338"
+ depends on IRDA && ISA
+ help
+ Say Y here if you want to build support for the NSC PC87108 and
+ PC87338 IrDA chipsets. This driver supports SIR,
+ MIR and FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ nsc-ircc.
+
+config WINBOND_FIR
+ tristate "Winbond W83977AF (IR)"
+ depends on IRDA && ISA
+ help
+ Say Y here if you want to build IrDA support for the Winbond
+ W83977AF super-io chipset. This driver should be used for the IrDA
+ chipset in the Corel NetWinder. The driver supports SIR, MIR and
+ FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ w83977af_ir.
+
+config TOSHIBA_FIR
+ tristate "Toshiba Type-O IR Port"
+ depends on IRDA && PCI && !64BIT
+ help
+ Say Y here if you want to build support for the Toshiba Type-O IR
+ and Donau oboe chipsets. These chipsets are used by the Toshiba
+ Libretto 100/110CT, Tecra 8100, Portege 7020 and many more laptops.
+ To compile it as a module, choose M here: the module will be called
+ donauboe.
+
+config AU1000_FIR
+ tristate "Alchemy Au1000 SIR/FIR"
+ depends on MIPS_AU1000 && IRDA
+
+config SMC_IRCC_FIR
+ tristate "SMSC IrCC (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && IRDA && ISA
+ help
+ Say Y here if you want to build support for the SMC Infrared
+ Communications Controller. It is used in a wide variety of
+ laptops (Fujitsu, Sony, Compaq and some Toshiba).
+ To compile it as a module, choose M here: the module will be called
+ smsc-ircc2.o.
+
+config ALI_FIR
+ tristate "ALi M5123 FIR (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && IRDA && ISA
+ help
+ Say Y here if you want to build support for the ALi M5123 FIR
+ Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
+ M1535, M1535D, M1535+, M1535D Sourth Bridge. This driver supports
+ SIR, MIR and FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ ali-ircc.
+
+config VLSI_FIR
+ tristate "VLSI 82C147 SIR/MIR/FIR (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && IRDA && PCI
+ help
+ Say Y here if you want to build support for the VLSI 82C147
+ PCI-IrDA Controller. This controller is used by the HP OmniBook 800
+ and 5500 notebooks. The driver provides support for SIR, MIR and
+ FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ vlsi_ir.
+
+config SA1100_FIR
+ tristate "SA1100 Internal IR"
+ depends on ARCH_SA1100 && IRDA
+
+config VIA_FIR
+ tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
+ depends on IRDA && ISA && PCI
+ help
+ Say Y here if you want to build support for the VIA VT8231
+ and VIA VT1211 IrDA controllers, found on the motherboards using
+ those those VIA chipsets. To use this controller, you will need
+ to plug a specific 5 pins FIR IrDA dongle in the specific
+ motherboard connector. The driver provides support for SIR, MIR
+ and FIR (4Mbps) speeds.
+
+ You will need to specify the 'dongle_id' module parameter to
+ indicate the FIR dongle attached to the controller.
+
+ To compile it as a module, choose M here: the module will be called
+ via-ircc.
+
+endmenu
+
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
new file mode 100644
index 000000000000..29a8bd812b21
--- /dev/null
+++ b/drivers/net/irda/Makefile
@@ -0,0 +1,47 @@
+#
+# Makefile for the Linux IrDA infrared port device drivers.
+#
+# 9 Aug 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+# Old SIR drivers
+obj-$(CONFIG_IRPORT_SIR) += irport.o
+# FIR drivers
+obj-$(CONFIG_USB_IRDA) += irda-usb.o
+obj-$(CONFIG_SIGMATEL_FIR) += stir4200.o
+obj-$(CONFIG_NSC_FIR) += nsc-ircc.o
+obj-$(CONFIG_WINBOND_FIR) += w83977af_ir.o
+obj-$(CONFIG_SA1100_FIR) += sa1100_ir.o
+obj-$(CONFIG_TOSHIBA_FIR) += donauboe.o
+obj-$(CONFIG_SMC_IRCC_FIR) += smsc-ircc2.o
+obj-$(CONFIG_ALI_FIR) += ali-ircc.o
+obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
+obj-$(CONFIG_VIA_FIR) += via-ircc.o
+# Old dongle drivers for old SIR drivers
+obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o
+obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o
+obj-$(CONFIG_ACTISYS_DONGLE_OLD) += actisys.o
+obj-$(CONFIG_GIRBIL_DONGLE_OLD) += girbil.o
+obj-$(CONFIG_LITELINK_DONGLE_OLD) += litelink.o
+obj-$(CONFIG_OLD_BELKIN_DONGLE_OLD) += old_belkin.o
+obj-$(CONFIG_MCP2120_DONGLE_OLD) += mcp2120.o
+obj-$(CONFIG_ACT200L_DONGLE_OLD) += act200l.o
+obj-$(CONFIG_MA600_DONGLE_OLD) += ma600.o
+obj-$(CONFIG_EP7211_IR) += ep7211_ir.o
+obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
+# New SIR drivers
+obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
+# New dongles drivers for new SIR drivers
+obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
+obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
+obj-$(CONFIG_ACTISYS_DONGLE) += actisys-sir.o
+obj-$(CONFIG_LITELINK_DONGLE) += litelink-sir.o
+obj-$(CONFIG_GIRBIL_DONGLE) += girbil-sir.o
+obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o
+obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
+obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
+obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
+
+# The SIR helper module
+sir-dev-objs := sir_core.o sir_dev.o sir_dongle.o sir_kthread.o
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
new file mode 100644
index 000000000000..d8b89c74aabd
--- /dev/null
+++ b/drivers/net/irda/act200l-sir.c
@@ -0,0 +1,257 @@
+/*********************************************************************
+ *
+ * Filename: act200l.c
+ * Version: 0.8
+ * Description: Implementation for the ACTiSYS ACT-IR200L dongle
+ * Status: Experimental.
+ * Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ * Created at: Fri Aug 3 17:35:42 2001
+ * Modified at: Fri Aug 17 10:22:40 2001
+ * Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ *
+ * Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int act200l_reset(struct sir_dev *dev);
+static int act200l_open(struct sir_dev *dev);
+static int act200l_close(struct sir_dev *dev);
+static int act200l_change_speed(struct sir_dev *dev, unsigned speed);
+
+/* Regsiter 0: Control register #1 */
+#define ACT200L_REG0 0x00
+#define ACT200L_TXEN 0x01 /* Enable transmitter */
+#define ACT200L_RXEN 0x02 /* Enable receiver */
+
+/* Register 1: Control register #2 */
+#define ACT200L_REG1 0x10
+#define ACT200L_LODB 0x01 /* Load new baud rate count value */
+#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
+
+/* Register 4: Output Power register */
+#define ACT200L_REG4 0x40
+#define ACT200L_OP0 0x01 /* Enable LED1C output */
+#define ACT200L_OP1 0x02 /* Enable LED2C output */
+#define ACT200L_BLKR 0x04
+
+/* Register 5: Receive Mode register */
+#define ACT200L_REG5 0x50
+#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
+
+/* Register 6: Receive Sensitivity register #1 */
+#define ACT200L_REG6 0x60
+#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
+#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
+
+/* Register 7: Receive Sensitivity register #2 */
+#define ACT200L_REG7 0x70
+#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
+
+/* Register 8,9: Baud Rate Dvider register #1,#2 */
+#define ACT200L_REG8 0x80
+#define ACT200L_REG9 0x90
+
+#define ACT200L_2400 0x5f
+#define ACT200L_9600 0x17
+#define ACT200L_19200 0x0b
+#define ACT200L_38400 0x05
+#define ACT200L_57600 0x03
+#define ACT200L_115200 0x01
+
+/* Register 13: Control register #3 */
+#define ACT200L_REG13 0xd0
+#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
+
+/* Register 15: Status register */
+#define ACT200L_REG15 0xf0
+
+/* Register 21: Control register #4 */
+#define ACT200L_REG21 0x50
+#define ACT200L_EXCK 0x02 /* Disable clock output driver */
+#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
+
+static struct dongle_driver act200l = {
+ .owner = THIS_MODULE,
+ .driver_name = "ACTiSYS ACT-IR200L",
+ .type = IRDA_ACT200L_DONGLE,
+ .open = act200l_open,
+ .close = act200l_close,
+ .reset = act200l_reset,
+ .set_speed = act200l_change_speed,
+};
+
+static int __init act200l_sir_init(void)
+{
+ return irda_register_dongle(&act200l);
+}
+
+static void __exit act200l_sir_cleanup(void)
+{
+ irda_unregister_dongle(&act200l);
+}
+
+static int act200l_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ /* Power on the dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int act200l_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ /* Power off the dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function act200l_change_speed (dev, speed)
+ *
+ * Set the speed for the ACTiSYS ACT-IR200L type dongle.
+ *
+ */
+static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ u8 control[3];
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ /* Clear DTR and set RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ switch (speed) {
+ default:
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f);
+ break;
+ case 19200:
+ control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f);
+ break;
+ case 38400:
+ control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f);
+ break;
+ case 57600:
+ control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f);
+ break;
+ case 115200:
+ control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f);
+ break;
+ }
+ control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 3);
+ msleep(5);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ dev->speed = speed;
+ return ret;
+}
+
+/*
+ * Function act200l_reset (driver)
+ *
+ * Reset the ACTiSYS ACT-IR200L type dongle.
+ */
+
+#define ACT200L_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
+#define ACT200L_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
+
+static int act200l_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control[9] = {
+ ACT200L_REG15,
+ ACT200L_REG13 | ACT200L_SHDW,
+ ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
+ ACT200L_REG13,
+ ACT200L_REG7 | ACT200L_ENPOS,
+ ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
+ ACT200L_REG5 | ACT200L_RWIDL,
+ ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
+ ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN
+ };
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ /* Reset the dongle : set RTS low for 25 ms */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ state = ACT200L_STATE_WAIT1_RESET;
+ delay = 50;
+ break;
+
+ case ACT200L_STATE_WAIT1_RESET:
+ /* Clear DTR and set RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ udelay(25); /* better wait for some short while */
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, sizeof(control));
+ state = ACT200L_STATE_WAIT2_RESET;
+ delay = 15;
+ break;
+
+ case ACT200L_STATE_WAIT2_RESET:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ dev->speed = 9600;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n", __FUNCTION__, state);
+ ret = -1;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>");
+MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */
+
+module_init(act200l_sir_init);
+module_exit(act200l_sir_cleanup);
diff --git a/drivers/net/irda/act200l.c b/drivers/net/irda/act200l.c
new file mode 100644
index 000000000000..756cd44e857a
--- /dev/null
+++ b/drivers/net/irda/act200l.c
@@ -0,0 +1,297 @@
+/*********************************************************************
+ *
+ * Filename: act200l.c
+ * Version: 0.8
+ * Description: Implementation for the ACTiSYS ACT-IR200L dongle
+ * Status: Experimental.
+ * Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ * Created at: Fri Aug 3 17:35:42 2001
+ * Modified at: Fri Aug 17 10:22:40 2001
+ * Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ *
+ * Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+static int act200l_reset(struct irda_task *task);
+static void act200l_open(dongle_t *self, struct qos_info *qos);
+static void act200l_close(dongle_t *self);
+static int act200l_change_speed(struct irda_task *task);
+
+/* Regsiter 0: Control register #1 */
+#define ACT200L_REG0 0x00
+#define ACT200L_TXEN 0x01 /* Enable transmitter */
+#define ACT200L_RXEN 0x02 /* Enable receiver */
+
+/* Register 1: Control register #2 */
+#define ACT200L_REG1 0x10
+#define ACT200L_LODB 0x01 /* Load new baud rate count value */
+#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
+
+/* Register 4: Output Power register */
+#define ACT200L_REG4 0x40
+#define ACT200L_OP0 0x01 /* Enable LED1C output */
+#define ACT200L_OP1 0x02 /* Enable LED2C output */
+#define ACT200L_BLKR 0x04
+
+/* Register 5: Receive Mode register */
+#define ACT200L_REG5 0x50
+#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
+
+/* Register 6: Receive Sensitivity register #1 */
+#define ACT200L_REG6 0x60
+#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
+#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
+
+/* Register 7: Receive Sensitivity register #2 */
+#define ACT200L_REG7 0x70
+#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
+
+/* Register 8,9: Baud Rate Dvider register #1,#2 */
+#define ACT200L_REG8 0x80
+#define ACT200L_REG9 0x90
+
+#define ACT200L_2400 0x5f
+#define ACT200L_9600 0x17
+#define ACT200L_19200 0x0b
+#define ACT200L_38400 0x05
+#define ACT200L_57600 0x03
+#define ACT200L_115200 0x01
+
+/* Register 13: Control register #3 */
+#define ACT200L_REG13 0xd0
+#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
+
+/* Register 15: Status register */
+#define ACT200L_REG15 0xf0
+
+/* Register 21: Control register #4 */
+#define ACT200L_REG21 0x50
+#define ACT200L_EXCK 0x02 /* Disable clock output driver */
+#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
+
+static struct dongle_reg dongle = {
+ .type = IRDA_ACT200L_DONGLE,
+ .open = act200l_open,
+ .close = act200l_close,
+ .reset = act200l_reset,
+ .change_speed = act200l_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init act200l_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit act200l_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+static void act200l_open(dongle_t *self, struct qos_info *qos)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ /* Power on the dongle */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+}
+
+static void act200l_close(dongle_t *self)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ /* Power off the dongle */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+}
+
+/*
+ * Function act200l_change_speed (dev, speed)
+ *
+ * Set the speed for the ACTiSYS ACT-IR200L type dongle.
+ *
+ */
+static int act200l_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param;
+ __u8 control[3];
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ self->speed_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ if (irda_task_execute(self, act200l_reset, NULL, task,
+ (void *) speed))
+ {
+ /* Dongle need more time to reset */
+ irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
+
+ /* Give reset 1 sec to finish */
+ ret = msecs_to_jiffies(1000);
+ }
+ break;
+ case IRDA_TASK_CHILD_WAIT:
+ IRDA_WARNING("%s(), resetting dongle timed out!\n",
+ __FUNCTION__);
+ ret = -1;
+ break;
+ case IRDA_TASK_CHILD_DONE:
+ /* Clear DTR and set RTS to enter command mode */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+
+ switch (speed) {
+ case 9600:
+ default:
+ control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f);
+ break;
+ case 19200:
+ control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f);
+ break;
+ case 38400:
+ control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f);
+ break;
+ case 57600:
+ control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f);
+ break;
+ case 115200:
+ control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f);
+ break;
+ }
+ control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE;
+
+ /* Write control bytes */
+ self->write(self->dev, control, 3);
+ irda_task_next_state(task, IRDA_TASK_WAIT);
+ ret = msecs_to_jiffies(5);
+ break;
+ case IRDA_TASK_WAIT:
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function act200l_reset (driver)
+ *
+ * Reset the ACTiSYS ACT-IR200L type dongle.
+ */
+static int act200l_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u8 control[9] = {
+ ACT200L_REG15,
+ ACT200L_REG13 | ACT200L_SHDW,
+ ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
+ ACT200L_REG13,
+ ACT200L_REG7 | ACT200L_ENPOS,
+ ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
+ ACT200L_REG5 | ACT200L_RWIDL,
+ ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
+ ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN
+ };
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ self->reset_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ /* Power on the dongle */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ irda_task_next_state(task, IRDA_TASK_WAIT1);
+ ret = msecs_to_jiffies(50);
+ break;
+ case IRDA_TASK_WAIT1:
+ /* Reset the dongle : set RTS low for 25 ms */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+
+ irda_task_next_state(task, IRDA_TASK_WAIT2);
+ ret = msecs_to_jiffies(50);
+ break;
+ case IRDA_TASK_WAIT2:
+ /* Clear DTR and set RTS to enter command mode */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+
+ /* Write control bytes */
+ self->write(self->dev, control, 9);
+ irda_task_next_state(task, IRDA_TASK_WAIT3);
+ ret = msecs_to_jiffies(15);
+ break;
+ case IRDA_TASK_WAIT3:
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>");
+MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize ACTiSYS ACT-IR200L module
+ *
+ */
+module_init(act200l_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup ACTiSYS ACT-IR200L module
+ *
+ */
+module_exit(act200l_cleanup);
diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c
new file mode 100644
index 000000000000..9715ab5572e9
--- /dev/null
+++ b/drivers/net/irda/actisys-sir.c
@@ -0,0 +1,246 @@
+/*********************************************************************
+ *
+ * Filename: actisys.c
+ * Version: 1.1
+ * Description: Implementation for the ACTiSYS IR-220L and IR-220L+
+ * dongles
+ * Status: Beta.
+ * Authors: Dag Brattli <dagb@cs.uit.no> (initially)
+ * Jean Tourrilhes <jt@hpl.hp.com> (new version)
+ * Martin Diehl <mad@mdiehl.de> (new version for sir_dev)
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Sun Oct 27 22:02:13 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999 Jean Tourrilhes
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * Changelog
+ *
+ * 0.8 -> 0.9999 - Jean
+ * o New initialisation procedure : much safer and correct
+ * o New procedure the change speed : much faster and simpler
+ * o Other cleanups & comments
+ * Thanks to Lichen Wang @ Actisys for his excellent help...
+ *
+ * 1.0 -> 1.1 - Martin Diehl
+ * modified for new sir infrastructure
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/*
+ * Define the timing of the pulses we send to the dongle (to reset it, and
+ * to toggle speeds). Basically, the limit here is the propagation speed of
+ * the signals through the serial port, the dongle being much faster. Any
+ * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can
+ * go through cleanly . If you are on the wild side, you can try to lower
+ * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!)
+ */
+#define MIN_DELAY 10 /* 10 us to be on the conservative side */
+
+static int actisys_open(struct sir_dev *);
+static int actisys_close(struct sir_dev *);
+static int actisys_change_speed(struct sir_dev *, unsigned);
+static int actisys_reset(struct sir_dev *);
+
+/* These are the baudrates supported, in the order available */
+/* Note : the 220L doesn't support 38400, but we will fix that below */
+static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 };
+
+#define MAX_SPEEDS (sizeof(baud_rates)/sizeof(baud_rates[0]))
+
+static struct dongle_driver act220l = {
+ .owner = THIS_MODULE,
+ .driver_name = "Actisys ACT-220L",
+ .type = IRDA_ACTISYS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .set_speed = actisys_change_speed,
+};
+
+static struct dongle_driver act220l_plus = {
+ .owner = THIS_MODULE,
+ .driver_name = "Actisys ACT-220L+",
+ .type = IRDA_ACTISYS_PLUS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .set_speed = actisys_change_speed,
+};
+
+static int __init actisys_sir_init(void)
+{
+ int ret;
+
+ /* First, register an Actisys 220L dongle */
+ ret = irda_register_dongle(&act220l);
+ if (ret < 0)
+ return ret;
+
+ /* Now, register an Actisys 220L+ dongle */
+ ret = irda_register_dongle(&act220l_plus);
+ if (ret < 0) {
+ irda_unregister_dongle(&act220l);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit actisys_sir_cleanup(void)
+{
+ /* We have to remove both dongles */
+ irda_unregister_dongle(&act220l_plus);
+ irda_unregister_dongle(&act220l);
+}
+
+static int actisys_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+
+ /* Remove support for 38400 if this is not a 220L+ dongle */
+ if (dev->dongle_drv->type == IRDA_ACTISYS_DONGLE)
+ qos->baud_rate.bits &= ~IR_38400;
+
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int actisys_close(struct sir_dev *dev)
+{
+ /* Power off the dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function actisys_change_speed (task)
+ *
+ * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles.
+ * To cycle through the available baud rates, pulse RTS low for a few us.
+ *
+ * First, we reset the dongle to always start from a known state.
+ * Then, we cycle through the speeds by pulsing RTS low and then up.
+ * The dongle allow us to pulse quite fast, se we can set speed in one go,
+ * which is must faster ( < 100 us) and less complex than what is found
+ * in some other dongle drivers...
+ * Note that even if the new speed is the same as the current speed,
+ * we reassert the speed. This make sure that things are all right,
+ * and it's fast anyway...
+ * By the way, this function will work for both type of dongles,
+ * because the additional speed is at the end of the sequence...
+ */
+static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int ret = 0;
+ int i = 0;
+
+ IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__,
+ speed, dev->speed);
+
+ /* dongle was already resetted from irda_request state machine,
+ * we are in known state (dongle default)
+ */
+
+ /*
+ * Now, we can set the speed requested. Send RTS pulses until we
+ * reach the target speed
+ */
+ for (i = 0; i < MAX_SPEEDS; i++) {
+ if (speed == baud_rates[i]) {
+ dev->speed = speed;
+ break;
+ }
+ /* Set RTS low for 10 us */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ udelay(MIN_DELAY);
+
+ /* Set RTS high for 10 us */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ udelay(MIN_DELAY);
+ }
+
+ /* Check if life is sweet... */
+ if (i >= MAX_SPEEDS) {
+ actisys_reset(dev);
+ ret = -EINVAL; /* This should not happen */
+ }
+
+ /* Basta lavoro, on se casse d'ici... */
+ return ret;
+}
+
+/*
+ * Function actisys_reset (task)
+ *
+ * Reset the Actisys type dongle. Warning, this function must only be
+ * called with a process context!
+ *
+ * We need to do two things in this function :
+ * o first make sure that the dongle is in a state where it can operate
+ * o second put the dongle in a know state
+ *
+ * The dongle is powered of the RTS and DTR lines. In the dongle, there
+ * is a big capacitor to accommodate the current spikes. This capacitor
+ * takes a least 50 ms to be charged. In theory, the Bios set those lines
+ * up, so by the time we arrive here we should be set. It doesn't hurt
+ * to be on the conservative side, so we will wait...
+ * <Martin : move above comment to irda_config_fsm>
+ * Then, we set the speed to 9600 b/s to get in a known state (see in
+ * change_speed for details). It is needed because the IrDA stack
+ * has tried to set the speed immediately after our first return,
+ * so before we can be sure the dongle is up and running.
+ */
+
+static int actisys_reset(struct sir_dev *dev)
+{
+ /* Reset the dongle : set DTR low for 10 us */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ dev->speed = 9600; /* That's the default */
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */
+MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */
+
+module_init(actisys_sir_init);
+module_exit(actisys_sir_cleanup);
diff --git a/drivers/net/irda/actisys.c b/drivers/net/irda/actisys.c
new file mode 100644
index 000000000000..b2e31f4a384c
--- /dev/null
+++ b/drivers/net/irda/actisys.c
@@ -0,0 +1,288 @@
+/*********************************************************************
+ *
+ * Filename: actisys.c
+ * Version: 1.0
+ * Description: Implementation for the ACTiSYS IR-220L and IR-220L+
+ * dongles
+ * Status: Beta.
+ * Authors: Dag Brattli <dagb@cs.uit.no> (initially)
+ * Jean Tourrilhes <jt@hpl.hp.com> (new version)
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Fri Dec 17 09:10:43 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999 Jean Tourrilhes
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * Changelog
+ *
+ * 0.8 -> 0.9999 - Jean
+ * o New initialisation procedure : much safer and correct
+ * o New procedure the change speed : much faster and simpler
+ * o Other cleanups & comments
+ * Thanks to Lichen Wang @ Actisys for his excellent help...
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+/*
+ * Define the timing of the pulses we send to the dongle (to reset it, and
+ * to toggle speeds). Basically, the limit here is the propagation speed of
+ * the signals through the serial port, the dongle being much faster. Any
+ * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can
+ * go through cleanly . If you are on the wild side, you can try to lower
+ * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!)
+ */
+#define MIN_DELAY 10 /* 10 us to be on the conservative side */
+
+static int actisys_change_speed(struct irda_task *task);
+static int actisys_reset(struct irda_task *task);
+static void actisys_open(dongle_t *self, struct qos_info *qos);
+static void actisys_close(dongle_t *self);
+
+/* These are the baudrates supported, in the order available */
+/* Note : the 220L doesn't support 38400, but we will fix that below */
+static __u32 baud_rates[] = { 9600, 19200, 57600, 115200, 38400 };
+#define MAX_SPEEDS 5
+
+static struct dongle_reg dongle = {
+ .type = IRDA_ACTISYS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .change_speed = actisys_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static struct dongle_reg dongle_plus = {
+ .type = IRDA_ACTISYS_PLUS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .change_speed = actisys_change_speed,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function actisys_change_speed (task)
+ *
+ * There is two model of Actisys dongle we are dealing with,
+ * the 220L and 220L+. At this point, only irattach knows with
+ * kind the user has requested (it was an argument on irattach
+ * command line).
+ * So, we register a dongle of each sort and let irattach
+ * pick the right one...
+ */
+static int __init actisys_init(void)
+{
+ int ret;
+
+ /* First, register an Actisys 220L dongle */
+ ret = irda_device_register_dongle(&dongle);
+ if (ret < 0)
+ return ret;
+ /* Now, register an Actisys 220L+ dongle */
+ ret = irda_device_register_dongle(&dongle_plus);
+ if (ret < 0) {
+ irda_device_unregister_dongle(&dongle);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit actisys_cleanup(void)
+{
+ /* We have to remove both dongles */
+ irda_device_unregister_dongle(&dongle);
+ irda_device_unregister_dongle(&dongle_plus);
+}
+
+static void actisys_open(dongle_t *self, struct qos_info *qos)
+{
+ /* Power on the dongle */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+
+ /* Remove support for 38400 if this is not a 220L+ dongle */
+ if (self->issue->type == IRDA_ACTISYS_DONGLE)
+ qos->baud_rate.bits &= ~IR_38400;
+
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+}
+
+static void actisys_close(dongle_t *self)
+{
+ /* Power off the dongle */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+}
+
+/*
+ * Function actisys_change_speed (task)
+ *
+ * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles.
+ * To cycle through the available baud rates, pulse RTS low for a few us.
+ *
+ * First, we reset the dongle to always start from a known state.
+ * Then, we cycle through the speeds by pulsing RTS low and then up.
+ * The dongle allow us to pulse quite fast, se we can set speed in one go,
+ * which is must faster ( < 100 us) and less complex than what is found
+ * in some other dongle drivers...
+ * Note that even if the new speed is the same as the current speed,
+ * we reassert the speed. This make sure that things are all right,
+ * and it's fast anyway...
+ * By the way, this function will work for both type of dongles,
+ * because the additional speed is at the end of the sequence...
+ */
+static int actisys_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param; /* Target speed */
+ int ret = 0;
+ int i = 0;
+
+ IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__, speed,
+ self->speed);
+
+ /* Go to a known state by reseting the dongle */
+
+ /* Reset the dongle : set DTR low for 10 us */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode (we are now at 9600 b/s) */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /*
+ * Now, we can set the speed requested. Send RTS pulses until we
+ * reach the target speed
+ */
+ for (i=0; i<MAX_SPEEDS; i++) {
+ if (speed == baud_rates[i]) {
+ self->speed = baud_rates[i];
+ break;
+ }
+ /* Make sure previous pulse is finished */
+ udelay(MIN_DELAY);
+
+ /* Set RTS low for 10 us */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+ udelay(MIN_DELAY);
+
+ /* Set RTS high for 10 us */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+ }
+
+ /* Check if life is sweet... */
+ if (i >= MAX_SPEEDS)
+ ret = -1; /* This should not happen */
+
+ /* Basta lavoro, on se casse d'ici... */
+ irda_task_next_state(task, IRDA_TASK_DONE);
+
+ return ret;
+}
+
+/*
+ * Function actisys_reset (task)
+ *
+ * Reset the Actisys type dongle. Warning, this function must only be
+ * called with a process context!
+ *
+ * We need to do two things in this function :
+ * o first make sure that the dongle is in a state where it can operate
+ * o second put the dongle in a know state
+ *
+ * The dongle is powered of the RTS and DTR lines. In the dongle, there
+ * is a big capacitor to accommodate the current spikes. This capacitor
+ * takes a least 50 ms to be charged. In theory, the Bios set those lines
+ * up, so by the time we arrive here we should be set. It doesn't hurt
+ * to be on the conservative side, so we will wait...
+ * Then, we set the speed to 9600 b/s to get in a known state (see in
+ * change_speed for details). It is needed because the IrDA stack
+ * has tried to set the speed immediately after our first return,
+ * so before we can be sure the dongle is up and running.
+ */
+static int actisys_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ int ret = 0;
+
+ IRDA_ASSERT(task != NULL, return -1;);
+
+ self->reset_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ /* Set both DTR & RTS to power up the dongle */
+ /* In theory redundant with power up in actisys_open() */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Sleep 50 ms to make sure capacitor is charged */
+ ret = msecs_to_jiffies(50);
+ irda_task_next_state(task, IRDA_TASK_WAIT);
+ break;
+ case IRDA_TASK_WAIT:
+ /* Reset the dongle : set DTR low for 10 us */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ self->speed = 9600; /* That's the default */
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */
+MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */
+
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Actisys module
+ *
+ */
+module_init(actisys_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Actisys module
+ *
+ */
+module_exit(actisys_cleanup);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
new file mode 100644
index 000000000000..9bf34681d3df
--- /dev/null
+++ b/drivers/net/irda/ali-ircc.c
@@ -0,0 +1,2277 @@
+/*********************************************************************
+ *
+ * Filename: ali-ircc.h
+ * Version: 0.5
+ * Description: Driver for the ALI M1535D and M1543C FIR Controller
+ * Status: Experimental.
+ * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Created at: 2000/10/16 03:46PM
+ * Modified at: 2001/1/3 02:55PM
+ * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563
+ * Modified by: Clear Zhang <clear_zhang@ali.com.tw>
+ *
+ * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/pm.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "ali-ircc.h"
+
+#define CHIP_IO_EXTENT 8
+#define BROKEN_DONGLE_ID
+
+static char *driver_name = "ali-ircc";
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+/* Use BIOS settions by default, but user may supply module parameters */
+static unsigned int io[] = { ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0 };
+static unsigned int dma[] = { 0, 0, 0, 0 };
+
+static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
+static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
+static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
+
+/* These are the currently known ALi sourth-bridge chipsets, the only one difference
+ * is that M1543C doesn't support HP HDSL-3600
+ */
+static ali_chip_t chips[] =
+{
+ { "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 },
+ { "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 },
+ { "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 },
+ { NULL }
+};
+
+/* Max 4 instances for now */
+static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
+
+/* Dongle Types */
+static char *dongle_types[] = {
+ "TFDS6000",
+ "HP HSDL-3600",
+ "HP HSDL-1100",
+ "No dongle connected",
+};
+
+/* Some prototypes */
+static int ali_ircc_open(int i, chipio_t *info);
+
+static int ali_ircc_close(struct ali_ircc_cb *self);
+
+static int ali_ircc_setup(chipio_t *info);
+static int ali_ircc_is_receiving(struct ali_ircc_cb *self);
+static int ali_ircc_net_open(struct net_device *dev);
+static int ali_ircc_net_close(struct net_device *dev);
+static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int ali_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
+static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
+static void ali_ircc_suspend(struct ali_ircc_cb *self);
+static void ali_ircc_wakeup(struct ali_ircc_cb *self);
+static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
+
+/* SIR function */
+static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
+static void ali_ircc_sir_receive(struct ali_ircc_cb *self);
+static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
+static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
+static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
+
+/* FIR function */
+static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
+static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
+static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
+static int ali_ircc_dma_receive(struct ali_ircc_cb *self);
+static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
+static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
+static void ali_ircc_dma_xmit(struct ali_ircc_cb *self);
+
+/* My Function */
+static int ali_ircc_read_dongle_id (int i, chipio_t *info);
+static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);
+
+/* ALi chip function */
+static void SIR2FIR(int iobase);
+static void FIR2SIR(int iobase);
+static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);
+
+/*
+ * Function ali_ircc_init ()
+ *
+ * Initialize chip. Find out whay kinds of chips we are dealing with
+ * and their configuation registers address
+ */
+static int __init ali_ircc_init(void)
+{
+ ali_chip_t *chip;
+ chipio_t info;
+ int ret = -ENODEV;
+ int cfg, cfg_base;
+ int reg, revision;
+ int i = 0;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ /* Probe for all the ALi chipsets we know about */
+ for (chip= chips; chip->name; chip++, i++)
+ {
+ IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, chip->name);
+
+ /* Try all config registers for this chip */
+ for (cfg=0; cfg<2; cfg++)
+ {
+ cfg_base = chip->cfg[cfg];
+ if (!cfg_base)
+ continue;
+
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = io[i];
+ info.dma = dma[i];
+ info.irq = irq[i];
+
+
+ /* Enter Configuration */
+ outb(chip->entr1, cfg_base);
+ outb(chip->entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read Chip Identification Register */
+ outb(chip->cid_index, cfg_base);
+ reg = inb(cfg_base+1);
+
+ if (reg == chip->cid_value)
+ {
+ IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __FUNCTION__, cfg_base);
+
+ outb(0x1F, cfg_base);
+ revision = inb(cfg_base+1);
+ IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __FUNCTION__,
+ chip->name, revision);
+
+ /*
+ * If the user supplies the base address, then
+ * we init the chip, if not we probe the values
+ * set by the BIOS
+ */
+ if (io[i] < 2000)
+ {
+ chip->init(chip, &info);
+ }
+ else
+ {
+ chip->probe(chip, &info);
+ }
+
+ if (ali_ircc_open(i, &info) == 0)
+ ret = 0;
+ i++;
+ }
+ else
+ {
+ IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __FUNCTION__, chip->name, cfg_base);
+ }
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+ }
+ }
+
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+ return ret;
+}
+
+/*
+ * Function ali_ircc_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit ali_ircc_cleanup(void)
+{
+ int i;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ pm_unregister_all(ali_ircc_pmproc);
+
+ for (i=0; i < 4; i++) {
+ if (dev_self[i])
+ ali_ircc_close(dev_self[i]);
+ }
+
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+}
+
+/*
+ * Function ali_ircc_open (int i, chipio_t *inf)
+ *
+ * Open driver instance
+ *
+ */
+static int ali_ircc_open(int i, chipio_t *info)
+{
+ struct net_device *dev;
+ struct ali_ircc_cb *self;
+ struct pm_dev *pmdev;
+ int dongle_id;
+ int err;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ /* Set FIR FIFO and DMA Threshold */
+ if ((ali_ircc_setup(info)) == -1)
+ return -1;
+
+ dev = alloc_irdadev(sizeof(*self));
+ if (dev == NULL) {
+ IRDA_ERROR("%s(), can't allocate memory for control block!\n",
+ __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ self = dev->priv;
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+ self->index = i;
+
+ /* Initialize IO */
+ self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */
+ self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */
+ self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
+
+ /* Reserve the ioports that we need */
+ if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
+ IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__,
+ self->io.fir_base);
+ err = -ENODEV;
+ goto err_out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 14384;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_alloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ memset(self->rx_buff.head, 0, self->rx_buff.truesize);
+
+ self->tx_buff.head =
+ dma_alloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out3;
+ }
+ memset(self->tx_buff.head, 0, self->tx_buff.truesize);
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+
+ /* Keep track of module usage */
+ SET_MODULE_OWNER(dev);
+
+ /* Override the network functions we need to use */
+ dev->hard_start_xmit = ali_ircc_sir_hard_xmit;
+ dev->open = ali_ircc_net_open;
+ dev->stop = ali_ircc_net_close;
+ dev->do_ioctl = ali_ircc_net_ioctl;
+ dev->get_stats = ali_ircc_net_get_stats;
+
+ err = register_netdev(dev);
+ if (err) {
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ goto err_out4;
+ }
+ IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+
+ /* Check dongle id */
+ dongle_id = ali_ircc_read_dongle_id(i, info);
+ IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__, driver_name, dongle_types[dongle_id]);
+
+ self->io.dongle_id = dongle_id;
+
+ pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, ali_ircc_pmproc);
+ if (pmdev)
+ pmdev->data = self;
+
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+
+ return 0;
+
+ err_out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ err_out1:
+ dev_self[i] = NULL;
+ free_netdev(dev);
+ return err;
+}
+
+
+/*
+ * Function ali_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit ali_ircc_close(struct ali_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __FUNCTION__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ dev_self[self->index] = NULL;
+ free_netdev(self->netdev);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_init_43 (chip, info)
+ *
+ * Initialize the ALi M1543 chip.
+ */
+static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info)
+{
+ /* All controller information like I/O address, DMA channel, IRQ
+ * are set by BIOS
+ */
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_init_53 (chip, info)
+ *
+ * Initialize the ALi M1535 chip.
+ */
+static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info)
+{
+ /* All controller information like I/O address, DMA channel, IRQ
+ * are set by BIOS
+ */
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_probe_53 (chip, info)
+ *
+ * Probes for the ALi M1535D or M1535
+ */
+static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int hi, low, reg;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ /* Enter Configuration */
+ outb(chip->entr1, cfg_base);
+ outb(chip->entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read address control register */
+ outb(0x60, cfg_base);
+ hi = inb(cfg_base+1);
+ outb(0x61, cfg_base);
+ low = inb(cfg_base+1);
+ info->fir_base = (hi<<8) + low;
+
+ info->sir_base = info->fir_base;
+
+ IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, info->fir_base);
+
+ /* Read IRQ control register */
+ outb(0x70, cfg_base);
+ reg = inb(cfg_base+1);
+ info->irq = reg & 0x0f;
+ IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
+
+ /* Read DMA channel */
+ outb(0x74, cfg_base);
+ reg = inb(cfg_base+1);
+ info->dma = reg & 0x07;
+
+ if(info->dma == 0x04)
+ IRDA_WARNING("%s(), No DMA channel assigned !\n", __FUNCTION__);
+ else
+ IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
+
+ /* Read Enabled Status */
+ outb(0x30, cfg_base);
+ reg = inb(cfg_base+1);
+ info->enabled = (reg & 0x80) && (reg & 0x01);
+ IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __FUNCTION__, info->enabled);
+
+ /* Read Power Status */
+ outb(0x22, cfg_base);
+ reg = inb(cfg_base+1);
+ info->suspended = (reg & 0x20);
+ IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __FUNCTION__, info->suspended);
+
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_setup (info)
+ *
+ * Set FIR FIFO and DMA Threshold
+ * Returns non-negative on success.
+ *
+ */
+static int ali_ircc_setup(chipio_t *info)
+{
+ unsigned char tmp;
+ int version;
+ int iobase = info->fir_base;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ /* Locking comments :
+ * Most operations here need to be protected. We are called before
+ * the device instance is created in ali_ircc_open(), therefore
+ * nobody can bother us - Jean II */
+
+ /* Switch to FIR space */
+ SIR2FIR(iobase);
+
+ /* Master Reset */
+ outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM
+
+ /* Read FIR ID Version Register */
+ switch_bank(iobase, BANK3);
+ version = inb(iobase+FIR_ID_VR);
+
+ /* Should be 0x00 in the M1535/M1535D */
+ if(version != 0x00)
+ {
+ IRDA_ERROR("%s, Wrong chip version %02x\n", driver_name, version);
+ return -1;
+ }
+
+ // IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, info->cfg_base);
+
+ /* Set FIR FIFO Threshold Register */
+ switch_bank(iobase, BANK1);
+ outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
+
+ /* Set FIR DMA Threshold Register */
+ outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* CRC enable */
+ switch_bank(iobase, BANK2);
+ outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR);
+
+ /* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/
+
+ /* Switch to Bank 0 */
+ switch_bank(iobase, BANK0);
+
+ tmp = inb(iobase+FIR_LCR_B);
+ tmp &=~0x20; // disable SIP
+ tmp |= 0x80; // these two steps make RX mode
+ tmp &= 0xbf;
+ outb(tmp, iobase+FIR_LCR_B);
+
+ /* Disable Interrupt */
+ outb(0x00, iobase+FIR_IER);
+
+
+ /* Switch to SIR space */
+ FIR2SIR(iobase);
+
+ IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n", driver_name);
+
+ /* Enable receive interrupts */
+ // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
+ // Turn on the interrupts in ali_ircc_net_open
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_read_dongle_id (int index, info)
+ *
+ * Try to read dongle indentification. This procedure needs to be executed
+ * once after power-on/reset. It also needs to be used whenever you suspect
+ * that the user may have plugged/unplugged the IrDA Dongle.
+ */
+static int ali_ircc_read_dongle_id (int i, chipio_t *info)
+{
+ int dongle_id, reg;
+ int cfg_base = info->cfg_base;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ /* Enter Configuration */
+ outb(chips[i].entr1, cfg_base);
+ outb(chips[i].entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read Dongle ID */
+ outb(0xf0, cfg_base);
+ reg = inb(cfg_base+1);
+ dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
+ IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __FUNCTION__,
+ dongle_id, dongle_types[dongle_id]);
+
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+
+ return dongle_id;
+}
+
+/*
+ * Function ali_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct ali_ircc_cb *self;
+ int ret;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ if (!dev) {
+ IRDA_WARNING("%s: irq %d for unknown device.\n", driver_name, irq);
+ return IRQ_NONE;
+ }
+
+ self = (struct ali_ircc_cb *) dev->priv;
+
+ spin_lock(&self->lock);
+
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > 115200)
+ ret = ali_ircc_fir_interrupt(self);
+ else
+ ret = ali_ircc_sir_interrupt(self);
+
+ spin_unlock(&self->lock);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+ return ret;
+}
+/*
+ * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
+{
+ __u8 eir, OldMessageCount;
+ int iobase, tmp;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK0);
+ self->InterruptID = inb(iobase+FIR_IIR);
+ self->BusStatus = inb(iobase+FIR_BSR);
+
+ OldMessageCount = (self->LineStatus + 1) & 0x07;
+ self->LineStatus = inb(iobase+FIR_LSR);
+ //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
+ eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
+
+ IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __FUNCTION__,self->InterruptID);
+ IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __FUNCTION__,self->LineStatus);
+ IRDA_DEBUG(1, "%s(), self->ier = %x\n", __FUNCTION__,self->ier);
+ IRDA_DEBUG(1, "%s(), eir = %x\n", __FUNCTION__,eir);
+
+ /* Disable interrupts */
+ SetCOMInterrupts(self, FALSE);
+
+ /* Tx or Rx Interrupt */
+
+ if (eir & IIR_EOM)
+ {
+ if (self->io.direction == IO_XMIT) /* TX */
+ {
+ IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __FUNCTION__);
+
+ if(ali_ircc_dma_xmit_complete(self))
+ {
+ if (irda_device_txqueue_empty(self->netdev))
+ {
+ /* Prepare for receive */
+ ali_ircc_dma_receive(self);
+ self->ier = IER_EOM;
+ }
+ }
+ else
+ {
+ self->ier = IER_EOM;
+ }
+
+ }
+ else /* RX */
+ {
+ IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __FUNCTION__);
+
+ if(OldMessageCount > ((self->LineStatus+1) & 0x07))
+ {
+ self->rcvFramesOverflow = TRUE;
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __FUNCTION__);
+ }
+
+ if (ali_ircc_dma_receive_complete(self))
+ {
+ IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __FUNCTION__);
+
+ self->ier = IER_EOM;
+ }
+ else
+ {
+ IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __FUNCTION__);
+
+ self->ier = IER_EOM | IER_TIMER;
+ }
+
+ }
+ }
+ /* Timer Interrupt */
+ else if (eir & IIR_TIMER)
+ {
+ if(OldMessageCount > ((self->LineStatus+1) & 0x07))
+ {
+ self->rcvFramesOverflow = TRUE;
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __FUNCTION__);
+ }
+ /* Disable Timer */
+ switch_bank(iobase, BANK1);
+ tmp = inb(iobase+FIR_CR);
+ outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR);
+
+ /* Check if this is a Tx timer interrupt */
+ if (self->io.direction == IO_XMIT)
+ {
+ ali_ircc_dma_xmit(self);
+
+ /* Interrupt on EOM */
+ self->ier = IER_EOM;
+
+ }
+ else /* Rx */
+ {
+ if(ali_ircc_dma_receive_complete(self))
+ {
+ self->ier = IER_EOM;
+ }
+ else
+ {
+ self->ier = IER_EOM | IER_TIMER;
+ }
+ }
+ }
+
+ /* Restore Interrupt */
+ SetCOMInterrupts(self, TRUE);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __FUNCTION__);
+ return IRQ_RETVAL(eir);
+}
+
+/*
+ * Function ali_ircc_sir_interrupt (irq, self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
+{
+ int iobase;
+ int iir, lsr;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+
+ iobase = self->io.sir_base;
+
+ iir = inb(iobase+UART_IIR) & UART_IIR_ID;
+ if (iir) {
+ /* Clear interrupt */
+ lsr = inb(iobase+UART_LSR);
+
+ IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __FUNCTION__,
+ iir, lsr, iobase);
+
+ switch (iir)
+ {
+ case UART_IIR_RLSI:
+ IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
+ break;
+ case UART_IIR_RDI:
+ /* Receive interrupt */
+ ali_ircc_sir_receive(self);
+ break;
+ case UART_IIR_THRI:
+ if (lsr & UART_LSR_THRE)
+ {
+ /* Transmitter ready for data */
+ ali_ircc_sir_write_wakeup(self);
+ }
+ break;
+ default:
+ IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir);
+ break;
+ }
+
+ }
+
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+
+ return IRQ_RETVAL(iir);
+}
+
+
+/*
+ * Function ali_ircc_sir_receive (self)
+ *
+ * Receive one frame from the infrared port
+ *
+ */
+static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
+{
+ int boguscount = 0;
+ int iobase;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /*
+ * Receive all characters in Rx FIFO, unwrap and unstuff them.
+ * async_unwrap_char will deliver all found frames
+ */
+ do {
+ async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
+ inb(iobase+UART_RX));
+
+ /* Make sure we don't stay here too long */
+ if (boguscount++ > 32) {
+ IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__);
+ break;
+ }
+ } while (inb(iobase+UART_LSR) & UART_LSR_DR);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+/*
+ * Function ali_ircc_sir_write_wakeup (tty)
+ *
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ *
+ */
+static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
+{
+ int actual = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ iobase = self->io.sir_base;
+
+ /* Finished with frame? */
+ if (self->tx_buff.len > 0)
+ {
+ /* Write data left in transmit buffer */
+ actual = ali_ircc_sir_write(iobase, self->io.fifo_size,
+ self->tx_buff.data, self->tx_buff.len);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+ }
+ else
+ {
+ if (self->new_speed)
+ {
+ /* We must wait until all data are gone */
+ while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
+ IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __FUNCTION__ );
+
+ IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __FUNCTION__ , self->new_speed);
+ ali_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+
+ // benjamin 2000/11/10 06:32PM
+ if (self->io.speed > 115200)
+ {
+ IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __FUNCTION__ );
+
+ self->ier = IER_EOM;
+ // SetCOMInterrupts(self, TRUE);
+ return;
+ }
+ }
+ else
+ {
+ netif_wake_queue(self->netdev);
+ }
+
+ self->stats.tx_packets++;
+
+ /* Turn on receive interrupts */
+ outb(UART_IER_RDI, iobase+UART_IER);
+ }
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
+{
+ struct net_device *dev = self->netdev;
+ int iobase;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_DEBUG(2, "%s(), setting speed = %d \n", __FUNCTION__ , baud);
+
+ /* This function *must* be called with irq off and spin-lock.
+ * - Jean II */
+
+ iobase = self->io.fir_base;
+
+ SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
+
+ /* Go to MIR, FIR Speed */
+ if (baud > 115200)
+ {
+
+
+ ali_ircc_fir_change_speed(self, baud);
+
+ /* Install FIR xmit handler*/
+ dev->hard_start_xmit = ali_ircc_fir_hard_xmit;
+
+ /* Enable Interuupt */
+ self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
+
+ /* Be ready for incomming frames */
+ ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
+ }
+ /* Go to SIR Speed */
+ else
+ {
+ ali_ircc_sir_change_speed(self, baud);
+
+ /* Install SIR xmit handler*/
+ dev->hard_start_xmit = ali_ircc_sir_hard_xmit;
+ }
+
+
+ SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM
+
+ netif_wake_queue(self->netdev);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
+{
+
+ int iobase;
+ struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct net_device *dev;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ dev = self->netdev;
+ iobase = self->io.fir_base;
+
+ IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __FUNCTION__ ,self->io.speed,baud);
+
+ /* Come from SIR speed */
+ if(self->io.speed <=115200)
+ {
+ SIR2FIR(iobase);
+ }
+
+ /* Update accounting for new speed */
+ self->io.speed = baud;
+
+ // Set Dongle Speed mode
+ ali_ircc_change_dongle_speed(self, baud);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+/*
+ * Function ali_sir_change_speed (self, speed)
+ *
+ * Set speed of IrDA port to specified baudrate
+ *
+ */
+static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
+{
+ struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ unsigned long flags;
+ int iobase;
+ int fcr; /* FIFO control reg */
+ int lcr; /* Line control reg */
+ int divisor;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __FUNCTION__ , speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /* Come from MIR or FIR speed */
+ if(self->io.speed >115200)
+ {
+ // Set Dongle Speed mode first
+ ali_ircc_change_dongle_speed(self, speed);
+
+ FIR2SIR(iobase);
+ }
+
+ // Clear Line and Auxiluary status registers 2000/11/24 11:47AM
+
+ inb(iobase+UART_LSR);
+ inb(iobase+UART_SCR);
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ divisor = 115200/speed;
+
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ if (self->io.speed < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ fcr |= UART_FCR_TRIGGER_14;
+
+ /* IrDA ports use 8N1 */
+ lcr = UART_LCR_WLEN8;
+
+ outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
+ outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
+ outb(divisor >> 8, iobase+UART_DLM);
+ outb(lcr, iobase+UART_LCR); /* Set 8N1 */
+ outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
+
+ /* without this, the conection will be broken after come back from FIR speed,
+ but with this, the SIR connection is harder to established */
+ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
+{
+
+ struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ int iobase,dongle_id;
+ int tmp = 0;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
+ dongle_id = self->io.dongle_id;
+
+ /* We are already locked, no need to do it again */
+
+ IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __FUNCTION__ , dongle_types[dongle_id], speed);
+
+ switch_bank(iobase, BANK2);
+ tmp = inb(iobase+FIR_IRDA_CR);
+
+ /* IBM type dongle */
+ if(dongle_id == 0)
+ {
+ if(speed == 4000000)
+ {
+ // __ __
+ // SD/MODE __| |__ __
+ // __ __
+ // IRTX __ __| |__
+ // T1 T2 T3 T4 T5
+
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ // T1 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T2 -> SD/MODE:1 IRTX:0
+ tmp &= ~0x01;
+ tmp |= 0x0a;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T3 -> SD/MODE:1 IRTX:1
+ tmp |= 0x0b;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T4 -> SD/MODE:0 IRTX:1
+ tmp &= ~0x08;
+ tmp |= 0x03;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T5 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // reset -> Normal TX output Signal
+ outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
+ }
+ else /* speed <=1152000 */
+ {
+ // __
+ // SD/MODE __| |__
+ //
+ // IRTX ________
+ // T1 T2 T3
+
+ /* MIR 115200, 57600 */
+ if (speed==1152000)
+ {
+ tmp |= 0xA0; //HDLC=1, 1.152Mbps=1
+ }
+ else
+ {
+ tmp &=~0x80; //HDLC 0.576Mbps
+ tmp |= 0x20; //HDLC=1,
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ /* MIR 115200, 57600 */
+
+ //switch_bank(iobase, BANK2);
+ // T1 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T2 -> SD/MODE:1 IRTX:0
+ tmp &= ~0x01;
+ tmp |= 0x0a;
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ // T3 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // reset -> Normal TX output Signal
+ outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
+ }
+ }
+ else if (dongle_id == 1) /* HP HDSL-3600 */
+ {
+ switch(speed)
+ {
+ case 4000000:
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ break;
+
+ case 1152000:
+ tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
+ break;
+
+ case 576000:
+ tmp &=~0x80; // HDLC 0.576Mbps
+ tmp |= 0x20; // HDLC=1,
+ break;
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ else /* HP HDSL-1100 */
+ {
+ if(speed <= 115200) /* SIR */
+ {
+
+ tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ else /* MIR FIR */
+ {
+
+ switch(speed)
+ {
+ case 4000000:
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ break;
+
+ case 1152000:
+ tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
+ break;
+
+ case 576000:
+ tmp &=~0x80; // HDLC 0.576Mbps
+ tmp |= 0x20; // HDLC=1,
+ break;
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+ tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ }
+
+ switch_bank(iobase, BANK0);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+/*
+ * Function ali_ircc_sir_write (driver)
+ *
+ * Fill Tx FIFO with transmit data
+ *
+ */
+static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
+{
+ int actual = 0;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ /* Tx FIFO should be empty! */
+ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
+ IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __FUNCTION__ );
+ return 0;
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase+UART_TX);
+
+ actual++;
+ }
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ return actual;
+}
+
+/*
+ * Function ali_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int ali_ircc_net_open(struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = (struct ali_ircc_cb *) dev->priv;
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Request IRQ and install Interrupt Handler */
+ if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
+ {
+ IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+ return -EAGAIN;
+ }
+
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
+ self->io.dma);
+ free_irq(self->io.irq, self);
+ return -EAGAIN;
+ }
+
+ /* Turn on interrups */
+ outb(UART_IER_RDI , iobase+UART_IER);
+
+ /* Ready to play! */
+ netif_start_queue(dev); //benjamin by irport
+
+ /* Give self a hardware name */
+ sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int ali_ircc_net_close(struct net_device *dev)
+{
+
+ struct ali_ircc_cb *self;
+ //int iobase;
+
+ IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = (struct ali_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ disable_dma(self->io.dma);
+
+ /* Disable interrupts */
+ SetCOMInterrupts(self, FALSE);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_fir_hard_xmit (skb, dev)
+ *
+ * Transmit the frame
+ *
+ */
+static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __u32 speed;
+ int mtt, diff;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+
+ self = (struct ali_ircc_cb *) dev->priv;
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Note : you should make sure that speed changes are not going
+ * to corrupt any outgoing frame. Look at nsc-ircc for the gory
+ * details - Jean II */
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ ali_ircc_change_speed(self, speed);
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Register and copy this frame to DMA memory */
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+ self->tx_fifo.tail += skb->len;
+
+ self->stats.tx_bytes += skb->len;
+
+ memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
+ skb->len);
+
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+
+ /* Start transmit only if there is currently no transmit going on */
+ if (self->tx_fifo.len == 1)
+ {
+ /* Check if we must wait the min turn time or not */
+ mtt = irda_get_mtt(skb);
+
+ if (mtt)
+ {
+ /* Check how much time we have used already */
+ do_gettimeofday(&self->now);
+
+ diff = self->now.tv_usec - self->stamp.tv_usec;
+ /* self->stamp is set from ali_ircc_dma_receive_complete() */
+
+ IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __FUNCTION__ , diff);
+
+ if (diff < 0)
+ diff += 1000000;
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff)
+ {
+ mtt -= diff;
+
+ /*
+ * Use timer if delay larger than 1000 us, and
+ * use udelay for smaller values which should
+ * be acceptable
+ */
+ if (mtt > 500)
+ {
+ /* Adjust for timer resolution */
+ mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
+
+ IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __FUNCTION__ , mtt);
+
+ /* Setup timer */
+ if (mtt == 1) /* 500 us */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR);
+ }
+ else if (mtt == 2) /* 1 ms */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR);
+ }
+ else /* > 2ms -> 4ms */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR);
+ }
+
+
+ /* Start timer */
+ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
+ self->io.direction = IO_XMIT;
+
+ /* Enable timer interrupt */
+ self->ier = IER_TIMER;
+ SetCOMInterrupts(self, TRUE);
+
+ /* Timer will take care of the rest */
+ goto out;
+ }
+ else
+ udelay(mtt);
+ } // if (if (mtt > diff)
+ }// if (mtt)
+
+ /* Enable EOM interrupt */
+ self->ier = IER_EOM;
+ SetCOMInterrupts(self, TRUE);
+
+ /* Transmit frame */
+ ali_ircc_dma_xmit(self);
+ } // if (self->tx_fifo.len == 1)
+
+ out:
+
+ /* Not busy transmitting anymore if window is not full */
+ if (self->tx_fifo.free < MAX_TX_WINDOW)
+ netif_wake_queue(self->netdev);
+
+ /* Restore bank register */
+ switch_bank(iobase, BANK0);
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ return 0;
+}
+
+
+static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
+{
+ int iobase, tmp;
+ unsigned char FIFO_OPTI, Hi, Lo;
+
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+
+ iobase = self->io.fir_base;
+
+ /* FIFO threshold , this method comes from NDIS5 code */
+
+ if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold)
+ FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1;
+ else
+ FIFO_OPTI = TX_FIFO_Threshold;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ self->io.direction = IO_XMIT;
+
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ DMA_TX_MODE);
+
+ /* Reset Tx FIFO */
+ switch_bank(iobase, BANK0);
+ outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
+
+ /* Set Tx FIFO threshold */
+ if (self->fifo_opti_buf!=FIFO_OPTI)
+ {
+ switch_bank(iobase, BANK1);
+ outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ;
+ self->fifo_opti_buf=FIFO_OPTI;
+ }
+
+ /* Set Tx DMA threshold */
+ switch_bank(iobase, BANK1);
+ outb(TX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* Set max Tx frame size */
+ Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f;
+ Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff;
+ switch_bank(iobase, BANK2);
+ outb(Hi, iobase+FIR_TX_DSR_HI);
+ outb(Lo, iobase+FIR_TX_DSR_LO);
+
+ /* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */
+ switch_bank(iobase, BANK0);
+ tmp = inb(iobase+FIR_LCR_B);
+ tmp &= ~0x20; // Disable SIP
+ outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
+ IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __FUNCTION__ , inb(iobase+FIR_LCR_B));
+
+ outb(0, iobase+FIR_LSR);
+
+ /* Enable DMA and Burst Mode */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
+
+ switch_bank(iobase, BANK0);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
+{
+ int iobase;
+ int ret = TRUE;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+
+ iobase = self->io.fir_base;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ /* Check for underrun! */
+ switch_bank(iobase, BANK0);
+ if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
+
+ {
+ IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __FUNCTION__);
+ self->stats.tx_errors++;
+ self->stats.tx_fifo_errors++;
+ }
+ else
+ {
+ self->stats.tx_packets++;
+ }
+
+ /* Check if we need to change the speed */
+ if (self->new_speed)
+ {
+ ali_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ self->tx_fifo.ptr++;
+ self->tx_fifo.len--;
+
+ /* Any frames to be sent back-to-back? */
+ if (self->tx_fifo.len)
+ {
+ ali_ircc_dma_xmit(self);
+
+ /* Not finished yet! */
+ ret = FALSE;
+ }
+ else
+ { /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ }
+
+ /* Make sure we have room for more frames */
+ if (self->tx_fifo.free < MAX_TX_WINDOW) {
+ /* Not busy transmitting anymore */
+ /* Tell the network layer, that we can accept more frames */
+ netif_wake_queue(self->netdev);
+ }
+
+ switch_bank(iobase, BANK0);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ return ret;
+}
+
+/*
+ * Function ali_ircc_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
+{
+ int iobase, tmp;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+
+ iobase = self->io.fir_base;
+
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ /* Reset Message Count */
+ switch_bank(iobase, BANK0);
+ outb(0x07, iobase+FIR_LSR);
+
+ self->rcvFramesOverflow = FALSE;
+
+ self->LineStatus = inb(iobase+FIR_LSR) ;
+
+ /* Reset Rx FIFO info */
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Rx FIFO */
+ // switch_bank(iobase, BANK0);
+ outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
+
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Set Receive Mode,Brick Wall */
+ //switch_bank(iobase, BANK0);
+ tmp = inb(iobase+FIR_LCR_B);
+ outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
+ IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __FUNCTION__ , inb(iobase+FIR_LCR_B));
+
+ /* Set Rx Threshold */
+ switch_bank(iobase, BANK1);
+ outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
+ outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* Enable DMA and Burst Mode */
+ // switch_bank(iobase, BANK1);
+ outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
+
+ switch_bank(iobase, BANK0);
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ return 0;
+}
+
+static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ __u8 status, MessageCount;
+ int len, i, iobase, val;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+
+ st_fifo = &self->st_fifo;
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK0);
+ MessageCount = inb(iobase+ FIR_LSR)&0x07;
+
+ if (MessageCount > 0)
+ IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __FUNCTION__ , MessageCount);
+
+ for (i=0; i<=MessageCount; i++)
+ {
+ /* Bank 0 */
+ switch_bank(iobase, BANK0);
+ status = inb(iobase+FIR_LSR);
+
+ switch_bank(iobase, BANK2);
+ len = inb(iobase+FIR_RX_DSR_HI) & 0x0f;
+ len = len << 8;
+ len |= inb(iobase+FIR_RX_DSR_LO);
+
+ IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __FUNCTION__ , len);
+ IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __FUNCTION__ , status);
+
+ if (st_fifo->tail >= MAX_RX_WINDOW) {
+ IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__ );
+ continue;
+ }
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+
+ for (i=0; i<=MessageCount; i++)
+ {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->pending_bytes -= len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
+ {
+ IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __FUNCTION__ );
+
+ /* Skip frame */
+ self->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & LSR_FIFO_UR)
+ {
+ self->stats.rx_frame_errors++;
+ IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __FUNCTION__ );
+ }
+ if (status & LSR_FRAME_ERROR)
+ {
+ self->stats.rx_frame_errors++;
+ IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __FUNCTION__ );
+ }
+
+ if (status & LSR_CRC_ERROR)
+ {
+ self->stats.rx_crc_errors++;
+ IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __FUNCTION__ );
+ }
+
+ if(self->rcvFramesOverflow)
+ {
+ self->stats.rx_frame_errors++;
+ IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __FUNCTION__ );
+ }
+ if(len == 0)
+ {
+ self->stats.rx_frame_errors++;
+ IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __FUNCTION__ );
+ }
+ }
+ else
+ {
+
+ if (st_fifo->pending_bytes < 32)
+ {
+ switch_bank(iobase, BANK0);
+ val = inb(iobase+FIR_BSR);
+ if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
+ {
+ IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __FUNCTION__ );
+
+ /* Put this entry back in fifo */
+ st_fifo->head--;
+ st_fifo->len++;
+ st_fifo->pending_bytes += len;
+ st_fifo->entries[st_fifo->head].status = status;
+ st_fifo->entries[st_fifo->head].len = len;
+
+ /*
+ * DMA not finished yet, so try again
+ * later, set timer value, resolution
+ * 500 us
+ */
+
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM
+
+ /* Enable Timer */
+ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
+
+ return FALSE; /* I'll be back! */
+ }
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ do_gettimeofday(&self->stamp);
+
+ skb = dev_alloc_skb(len+1);
+ if (skb == NULL)
+ {
+ IRDA_WARNING("%s(), memory squeeze, "
+ "dropping frame.\n",
+ __FUNCTION__);
+ self->stats.rx_dropped++;
+
+ return FALSE;
+ }
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC, CRC is removed by hardware*/
+ skb_put(skb, len);
+ memcpy(skb->data, self->rx_buff.data, len);
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->stats.rx_bytes += len;
+ self->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ self->netdev->last_rx = jiffies;
+ }
+ }
+
+ switch_bank(iobase, BANK0);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ return TRUE;
+}
+
+
+
+/*
+ * Function ali_ircc_sir_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __u32 speed;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_ASSERT(dev != NULL, return 0;);
+
+ self = (struct ali_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.sir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Note : you should make sure that speed changes are not going
+ * to corrupt any outgoing frame. Look at nsc-ircc for the gory
+ * details - Jean II */
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ ali_ircc_change_speed(self, speed);
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Init tx buffer */
+ self->tx_buff.data = self->tx_buff.head;
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ self->stats.tx_bytes += self->tx_buff.len;
+
+ /* Turn on transmit finished interrupt. Will fire immediately! */
+ outb(UART_IER_THRI, iobase+UART_IER);
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+
+ return 0;
+}
+
+
+/*
+ * Function ali_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = dev->priv;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __FUNCTION__ );
+ /*
+ * This function will also be used by IrLAP to change the
+ * speed, so we still must allow for speed change within
+ * interrupt context.
+ */
+ if (!in_interrupt() && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ spin_lock_irqsave(&self->lock, flags);
+ ali_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __FUNCTION__ );
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __FUNCTION__ );
+ /* This is protected */
+ irq->ifr_receiving = ali_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+
+ return ret;
+}
+
+/*
+ * Function ali_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
+{
+ unsigned long flags;
+ int status = FALSE;
+ int iobase;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ if (self->io.speed > 115200)
+ {
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK1);
+ if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
+ {
+ /* We are receiving something */
+ IRDA_DEBUG(1, "%s(), We are receiving something\n", __FUNCTION__ );
+ status = TRUE;
+ }
+ switch_bank(iobase, BANK0);
+ }
+ else
+ {
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+ }
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+
+ return status;
+}
+
+static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
+{
+ struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+
+ return &self->stats;
+}
+
+static void ali_ircc_suspend(struct ali_ircc_cb *self)
+{
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ IRDA_MESSAGE("%s, Suspending\n", driver_name);
+
+ if (self->io.suspended)
+ return;
+
+ ali_ircc_net_close(self->netdev);
+
+ self->io.suspended = 1;
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static void ali_ircc_wakeup(struct ali_ircc_cb *self)
+{
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ if (!self->io.suspended)
+ return;
+
+ ali_ircc_net_open(self->netdev);
+
+ IRDA_MESSAGE("%s, Waking up\n", driver_name);
+
+ self->io.suspended = 0;
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static int ali_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
+{
+ struct ali_ircc_cb *self = (struct ali_ircc_cb*) dev->data;
+
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ if (self) {
+ switch (rqst) {
+ case PM_SUSPEND:
+ ali_ircc_suspend(self);
+ break;
+ case PM_RESUME:
+ ali_ircc_wakeup(self);
+ break;
+ }
+ }
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+
+ return 0;
+}
+
+
+/* ALi Chip Function */
+
+static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
+{
+
+ unsigned char newMask;
+
+ int iobase = self->io.fir_base; /* or sir_base */
+
+ IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __FUNCTION__ , enable);
+
+ /* Enable the interrupt which we wish to */
+ if (enable){
+ if (self->io.direction == IO_XMIT)
+ {
+ if (self->io.speed > 115200) /* FIR, MIR */
+ {
+ newMask = self->ier;
+ }
+ else /* SIR */
+ {
+ newMask = UART_IER_THRI | UART_IER_RDI;
+ }
+ }
+ else {
+ if (self->io.speed > 115200) /* FIR, MIR */
+ {
+ newMask = self->ier;
+ }
+ else /* SIR */
+ {
+ newMask = UART_IER_RDI;
+ }
+ }
+ }
+ else /* Disable all the interrupts */
+ {
+ newMask = 0x00;
+
+ }
+
+ //SIR and FIR has different registers
+ if (self->io.speed > 115200)
+ {
+ switch_bank(iobase, BANK0);
+ outb(newMask, iobase+FIR_IER);
+ }
+ else
+ outb(newMask, iobase+UART_IER);
+
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static void SIR2FIR(int iobase)
+{
+ //unsigned char tmp;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ /* Already protected (change_speed() or setup()), no need to lock.
+ * Jean II */
+
+ outb(0x28, iobase+UART_MCR);
+ outb(0x68, iobase+UART_MCR);
+ outb(0x88, iobase+UART_MCR);
+
+ outb(0x60, iobase+FIR_MCR); /* Master Reset */
+ outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */
+
+ //tmp = inb(iobase+FIR_LCR_B); /* SIP enable */
+ //tmp |= 0x20;
+ //outb(tmp, iobase+FIR_LCR_B);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+static void FIR2SIR(int iobase)
+{
+ unsigned char val;
+
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+
+ /* Already protected (change_speed() or setup()), no need to lock.
+ * Jean II */
+
+ outb(0x20, iobase+FIR_MCR); /* IRQ to low */
+ outb(0x00, iobase+UART_IER);
+
+ outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */
+ outb(0x00, iobase+UART_FCR);
+ outb(0x07, iobase+UART_FCR);
+
+ val = inb(iobase+UART_RX);
+ val = inb(iobase+UART_LSR);
+ val = inb(iobase+UART_MSR);
+
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+}
+
+MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
+MODULE_DESCRIPTION("ALi FIR Controller Driver");
+MODULE_LICENSE("GPL");
+
+
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+module_param_array(dma, int, NULL, 0);
+MODULE_PARM_DESC(dma, "DMA channels");
+
+module_init(ali_ircc_init);
+module_exit(ali_ircc_cleanup);
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
new file mode 100644
index 000000000000..e489c6661ee8
--- /dev/null
+++ b/drivers/net/irda/ali-ircc.h
@@ -0,0 +1,231 @@
+/*********************************************************************
+ *
+ * Filename: ali-ircc.h
+ * Version: 0.5
+ * Description: Driver for the ALI M1535D and M1543C FIR Controller
+ * Status: Experimental.
+ * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Created at: 2000/10/16 03:46PM
+ * Modified at: 2001/1/3 02:56PM
+ * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
+ *
+ * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef ALI_IRCC_H
+#define ALI_IRCC_H
+
+#include <linux/time.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* SIR Register */
+/* Usr definition of linux/serial_reg.h */
+
+/* FIR Register */
+#define BANK0 0x20
+#define BANK1 0x21
+#define BANK2 0x22
+#define BANK3 0x23
+
+#define FIR_MCR 0x07 /* Master Control Register */
+
+/* Bank 0 */
+#define FIR_DR 0x00 /* Alias 0, FIR Data Register (R/W) */
+#define FIR_IER 0x01 /* Alias 1, FIR Interrupt Enable Register (R/W) */
+#define FIR_IIR 0x02 /* Alias 2, FIR Interrupt Identification Register (Read only) */
+#define FIR_LCR_A 0x03 /* Alias 3, FIR Line Control Register A (R/W) */
+#define FIR_LCR_B 0x04 /* Alias 4, FIR Line Control Register B (R/W) */
+#define FIR_LSR 0x05 /* Alias 5, FIR Line Status Register (R/W) */
+#define FIR_BSR 0x06 /* Alias 6, FIR Bus Status Register (Read only) */
+
+
+ /* Alias 1 */
+ #define IER_FIFO 0x10 /* FIR FIFO Interrupt Enable */
+ #define IER_TIMER 0x20 /* Timer Interrupt Enable */
+ #define IER_EOM 0x40 /* End of Message Interrupt Enable */
+ #define IER_ACT 0x80 /* Active Frame Interrupt Enable */
+
+ /* Alias 2 */
+ #define IIR_FIFO 0x10 /* FIR FIFO Interrupt */
+ #define IIR_TIMER 0x20 /* Timer Interrupt */
+ #define IIR_EOM 0x40 /* End of Message Interrupt */
+ #define IIR_ACT 0x80 /* Active Frame Interrupt */
+
+ /* Alias 3 */
+ #define LCR_A_FIFO_RESET 0x80 /* FIFO Reset */
+
+ /* Alias 4 */
+ #define LCR_B_BW 0x10 /* Brick Wall */
+ #define LCR_B_SIP 0x20 /* SIP Enable */
+ #define LCR_B_TX_MODE 0x40 /* Transmit Mode */
+ #define LCR_B_RX_MODE 0x80 /* Receive Mode */
+
+ /* Alias 5 */
+ #define LSR_FIR_LSA 0x00 /* FIR Line Status Address */
+ #define LSR_FRAME_ABORT 0x08 /* Frame Abort */
+ #define LSR_CRC_ERROR 0x10 /* CRC Error */
+ #define LSR_SIZE_ERROR 0x20 /* Size Error */
+ #define LSR_FRAME_ERROR 0x40 /* Frame Error */
+ #define LSR_FIFO_UR 0x80 /* FIFO Underrun */
+ #define LSR_FIFO_OR 0x80 /* FIFO Overrun */
+
+ /* Alias 6 */
+ #define BSR_FIFO_NOT_EMPTY 0x80 /* FIFO Not Empty */
+
+/* Bank 1 */
+#define FIR_CR 0x00 /* Alias 0, FIR Configuration Register (R/W) */
+#define FIR_FIFO_TR 0x01 /* Alias 1, FIR FIFO Threshold Register (R/W) */
+#define FIR_DMA_TR 0x02 /* Alias 2, FIR DMA Threshold Register (R/W) */
+#define FIR_TIMER_IIR 0x03 /* Alias 3, FIR Timer interrupt interval register (W/O) */
+#define FIR_FIFO_FR 0x03 /* Alias 3, FIR FIFO Flag register (R/O) */
+#define FIR_FIFO_RAR 0x04 /* Alias 4, FIR FIFO Read Address register (R/O) */
+#define FIR_FIFO_WAR 0x05 /* Alias 5, FIR FIFO Write Address register (R/O) */
+#define FIR_TR 0x06 /* Alias 6, Test REgister (W/O) */
+
+ /* Alias 0 */
+ #define CR_DMA_EN 0x01 /* DMA Enable */
+ #define CR_DMA_BURST 0x02 /* DMA Burst Mode */
+ #define CR_TIMER_EN 0x08 /* Timer Enable */
+
+ /* Alias 3 */
+ #define TIMER_IIR_500 0x00 /* 500 us */
+ #define TIMER_IIR_1ms 0x01 /* 1 ms */
+ #define TIMER_IIR_2ms 0x02 /* 2 ms */
+ #define TIMER_IIR_4ms 0x03 /* 4 ms */
+
+/* Bank 2 */
+#define FIR_IRDA_CR 0x00 /* Alias 0, IrDA Control Register (R/W) */
+#define FIR_BOF_CR 0x01 /* Alias 1, BOF Count Register (R/W) */
+#define FIR_BW_CR 0x02 /* Alias 2, Brick Wall Count Register (R/W) */
+#define FIR_TX_DSR_HI 0x03 /* Alias 3, TX Data Size Register (high) (R/W) */
+#define FIR_TX_DSR_LO 0x04 /* Alias 4, TX Data Size Register (low) (R/W) */
+#define FIR_RX_DSR_HI 0x05 /* Alias 5, RX Data Size Register (high) (R/W) */
+#define FIR_RX_DSR_LO 0x06 /* Alias 6, RX Data Size Register (low) (R/W) */
+
+ /* Alias 0 */
+ #define IRDA_CR_HDLC1152 0x80 /* 1.152Mbps HDLC Select */
+ #define IRDA_CR_CRC 0X40 /* CRC Select. */
+ #define IRDA_CR_HDLC 0x20 /* HDLC select. */
+ #define IRDA_CR_HP_MODE 0x10 /* HP mode (read only) */
+ #define IRDA_CR_SD_ST 0x08 /* SD/MODE State. */
+ #define IRDA_CR_FIR_SIN 0x04 /* FIR SIN Select. */
+ #define IRDA_CR_ITTX_0 0x02 /* SOUT State. IRTX force to 0 */
+ #define IRDA_CR_ITTX_1 0x03 /* SOUT State. IRTX force to 1 */
+
+/* Bank 3 */
+#define FIR_ID_VR 0x00 /* Alias 0, FIR ID Version Register (R/O) */
+#define FIR_MODULE_CR 0x01 /* Alias 1, FIR Module Control Register (R/W) */
+#define FIR_IO_BASE_HI 0x02 /* Alias 2, FIR Higher I/O Base Address Register (R/O) */
+#define FIR_IO_BASE_LO 0x03 /* Alias 3, FIR Lower I/O Base Address Register (R/O) */
+#define FIR_IRQ_CR 0x04 /* Alias 4, FIR IRQ Channel Register (R/O) */
+#define FIR_DMA_CR 0x05 /* Alias 5, FIR DMA Channel Register (R/O) */
+
+struct ali_chip {
+ char *name;
+ int cfg[2];
+ unsigned char entr1;
+ unsigned char entr2;
+ unsigned char cid_index;
+ unsigned char cid_value;
+ int (*probe)(struct ali_chip *chip, chipio_t *info);
+ int (*init)(struct ali_chip *chip, chipio_t *info);
+};
+typedef struct ali_chip ali_chip_t;
+
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+#define TX_FIFO_Threshold 8
+#define RX_FIFO_Threshold 1
+#define TX_DMA_Threshold 1
+#define RX_DMA_Threshold 1
+
+/* For storing entries in the status FIFO */
+
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Lenght of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Lenght of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+/* Private data for each instance */
+struct ali_ircc_cb {
+
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ __u8 InterruptID; /* Interrupt ID */
+ __u8 BusStatus; /* Bus Status */
+ __u8 LineStatus; /* Line Status */
+
+ unsigned char rcvFramesOverflow;
+
+ struct timeval stamp;
+ struct timeval now;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ unsigned char fifo_opti_buf;
+
+ struct pm_dev *dev;
+};
+
+static inline void switch_bank(int iobase, int bank)
+{
+ outb(bank, iobase+FIR_MCR);
+}
+
+#endif /* ALI_IRCC_H */
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
new file mode 100644
index 000000000000..7a31d4659ed6
--- /dev/null
+++ b/drivers/net/irda/au1000_ircc.h
@@ -0,0 +1,127 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * Au1000 IrDA driver.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef AU1000_IRCC_H
+#define AU1000_IRCC_H
+
+#include <linux/time.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <asm/io.h>
+
+#define NUM_IR_IFF 1
+#define NUM_IR_DESC 64
+#define RING_SIZE_4 0x0
+#define RING_SIZE_16 0x3
+#define RING_SIZE_64 0xF
+#define MAX_NUM_IR_DESC 64
+#define MAX_BUF_SIZE 2048
+
+#define BPS_115200 0
+#define BPS_57600 1
+#define BPS_38400 2
+#define BPS_19200 5
+#define BPS_9600 11
+#define BPS_2400 47
+
+/* Ring descriptor flags */
+#define AU_OWN (1<<7) /* tx,rx */
+
+#define IR_DIS_CRC (1<<6) /* tx */
+#define IR_BAD_CRC (1<<5) /* tx */
+#define IR_NEED_PULSE (1<<4) /* tx */
+#define IR_FORCE_UNDER (1<<3) /* tx */
+#define IR_DISABLE_TX (1<<2) /* tx */
+#define IR_HW_UNDER (1<<0) /* tx */
+#define IR_TX_ERROR (IR_DIS_CRC|IR_BAD_CRC|IR_HW_UNDER)
+
+#define IR_PHY_ERROR (1<<6) /* rx */
+#define IR_CRC_ERROR (1<<5) /* rx */
+#define IR_MAX_LEN (1<<4) /* rx */
+#define IR_FIFO_OVER (1<<3) /* rx */
+#define IR_SIR_ERROR (1<<2) /* rx */
+#define IR_RX_ERROR (IR_PHY_ERROR|IR_CRC_ERROR| \
+ IR_MAX_LEN|IR_FIFO_OVER|IR_SIR_ERROR)
+
+typedef struct db_dest {
+ struct db_dest *pnext;
+ volatile u32 *vaddr;
+ dma_addr_t dma_addr;
+} db_dest_t;
+
+
+typedef struct ring_desc {
+ u8 count_0; /* 7:0 */
+ u8 count_1; /* 12:8 */
+ u8 reserved;
+ u8 flags;
+ u8 addr_0; /* 7:0 */
+ u8 addr_1; /* 15:8 */
+ u8 addr_2; /* 23:16 */
+ u8 addr_3; /* 31:24 */
+} ring_dest_t;
+
+
+/* Private data for each instance */
+struct au1k_private {
+
+ db_dest_t *pDBfree;
+ db_dest_t db[2*NUM_IR_DESC];
+ volatile ring_dest_t *rx_ring[NUM_IR_DESC];
+ volatile ring_dest_t *tx_ring[NUM_IR_DESC];
+ db_dest_t *rx_db_inuse[NUM_IR_DESC];
+ db_dest_t *tx_db_inuse[NUM_IR_DESC];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ iobuff_t rx_buff;
+
+ struct net_device *netdev;
+ struct net_device_stats stats;
+
+ struct timeval stamp;
+ struct timeval now;
+ struct qos_info qos;
+ struct irlap_cb *irlap;
+
+ u8 open;
+ u32 speed;
+ u32 newspeed;
+
+ u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
+ struct timer_list timer;
+
+ spinlock_t lock; /* For serializing operations */
+ struct pm_dev *dev;
+};
+#endif /* AU1000_IRCC_H */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
new file mode 100644
index 000000000000..e6b1985767c2
--- /dev/null
+++ b/drivers/net/irda/au1k_ir.c
@@ -0,0 +1,851 @@
+/*
+ * Alchemy Semi Au1000 IrDA driver
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/au1000.h>
+#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100)
+#include <asm/pb1000.h>
+#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
+#include <asm/db1x00.h>
+#else
+#error au1k_ir: unsupported board
+#endif
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include "au1000_ircc.h"
+
+static int au1k_irda_net_init(struct net_device *);
+static int au1k_irda_start(struct net_device *);
+static int au1k_irda_stop(struct net_device *dev);
+static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
+static int au1k_irda_rx(struct net_device *);
+static void au1k_irda_interrupt(int, void *, struct pt_regs *);
+static void au1k_tx_timeout(struct net_device *);
+static struct net_device_stats *au1k_irda_stats(struct net_device *);
+static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
+static int au1k_irda_set_speed(struct net_device *dev, int speed);
+
+static void *dma_alloc(size_t, dma_addr_t *);
+static void dma_free(void *, size_t);
+
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+static struct net_device *ir_devs[NUM_IR_IFF];
+static char version[] __devinitdata =
+ "au1k_ircc:1.2 ppopov@mvista.com\n";
+
+#define RUN_AT(x) (jiffies + (x))
+
+#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
+static BCSR * const bcsr = (BCSR *)0xAE000000;
+#endif
+
+static DEFINE_SPINLOCK(ir_lock);
+
+/*
+ * IrDA peripheral bug. You have to read the register
+ * twice to get the right value.
+ */
+u32 read_ir_reg(u32 addr)
+{
+ readl(addr);
+ return readl(addr);
+}
+
+
+/*
+ * Buffer allocation/deallocation routines. The buffer descriptor returned
+ * has the virtual and dma address of a buffer suitable for
+ * both, receive and transmit operations.
+ */
+static db_dest_t *GetFreeDB(struct au1k_private *aup)
+{
+ db_dest_t *pDB;
+ pDB = aup->pDBfree;
+
+ if (pDB) {
+ aup->pDBfree = pDB->pnext;
+ }
+ return pDB;
+}
+
+static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB)
+{
+ db_dest_t *pDBfree = aup->pDBfree;
+ if (pDBfree)
+ pDBfree->pnext = pDB;
+ aup->pDBfree = pDB;
+}
+
+
+/*
+ DMA memory allocation, derived from pci_alloc_consistent.
+ However, the Au1000 data cache is coherent (when programmed
+ so), therefore we return KSEG0 address, not KSEG1.
+*/
+static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *) __get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ ret = (void *)KSEG0ADDR(ret);
+ }
+ return ret;
+}
+
+
+static void dma_free(void *vaddr, size_t size)
+{
+ vaddr = (void *)KSEG0ADDR(vaddr);
+ free_pages((unsigned long) vaddr, get_order(size));
+}
+
+
+static void
+setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
+{
+ int i;
+ for (i=0; i<NUM_IR_DESC; i++) {
+ aup->rx_ring[i] = (volatile ring_dest_t *)
+ (rx_base + sizeof(ring_dest_t)*i);
+ }
+ for (i=0; i<NUM_IR_DESC; i++) {
+ aup->tx_ring[i] = (volatile ring_dest_t *)
+ (tx_base + sizeof(ring_dest_t)*i);
+ }
+}
+
+static int au1k_irda_init(void)
+{
+ static unsigned version_printed = 0;
+ struct au1k_private *aup;
+ struct net_device *dev;
+ int err;
+
+ if (version_printed++ == 0) printk(version);
+
+ dev = alloc_irdadev(sizeof(struct au1k_private));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
+ err = au1k_irda_net_init(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ ir_devs[0] = dev;
+ printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
+ return 0;
+
+out1:
+ aup = netdev_priv(dev);
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2*NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
+ kfree(aup->rx_buff.head);
+out:
+ free_netdev(dev);
+ return err;
+}
+
+static int au1k_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+static int au1k_irda_net_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ int i, retval = 0, err;
+ db_dest_t *pDB, *pDBfree;
+ dma_addr_t temp;
+
+ err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
+ if (err)
+ goto out1;
+
+ dev->open = au1k_irda_start;
+ dev->hard_start_xmit = au1k_irda_hard_xmit;
+ dev->stop = au1k_irda_stop;
+ dev->get_stats = au1k_irda_stats;
+ dev->do_ioctl = au1k_irda_ioctl;
+ dev->tx_timeout = au1k_tx_timeout;
+
+ irda_init_max_qos_capabilies(&aup->qos);
+
+ /* The only value we must override it the baudrate */
+ aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000 |(IR_4000000 << 8);
+
+ aup->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&aup->qos);
+
+ retval = -ENOMEM;
+
+ /* Tx ring follows rx ring + 512 bytes */
+ /* we need a 1k aligned buffer */
+ aup->rx_ring[0] = (ring_dest_t *)
+ dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp);
+ if (!aup->rx_ring[0])
+ goto out2;
+
+ /* allocate the data buffers */
+ aup->db[0].vaddr =
+ (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp);
+ if (!aup->db[0].vaddr)
+ goto out3;
+
+ setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
+
+ pDBfree = NULL;
+ pDB = aup->db;
+ for (i=0; i<(2*NUM_IR_DESC); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr =
+ (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i);
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ /* attach a data buffer to each descriptor */
+ for (i=0; i<NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB) goto out;
+ aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
+ aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
+ aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i=0; i<NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB) goto out;
+ aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
+ aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
+ aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
+ aup->tx_ring[i]->count_0 = 0;
+ aup->tx_ring[i]->count_1 = 0;
+ aup->tx_ring[i]->flags = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
+ /* power on */
+ bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
+ bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL;
+ au_sync();
+#endif
+
+ return 0;
+
+out3:
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
+out2:
+ kfree(aup->rx_buff.head);
+out1:
+ printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval);
+ return retval;
+}
+
+
+static int au1k_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ int i;
+ u32 control;
+ u32 ring_address;
+
+ /* bring the device out of reset */
+ control = 0xe; /* coherent, clock enable, one half system clock */
+
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ control |= 1;
+#endif
+ aup->tx_head = 0;
+ aup->tx_tail = 0;
+ aup->rx_head = 0;
+
+ for (i=0; i<NUM_IR_DESC; i++) {
+ aup->rx_ring[i]->flags = AU_OWN;
+ }
+
+ writel(control, IR_INTERFACE_CONFIG);
+ au_sync_delay(10);
+
+ writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */
+ au_sync_delay(1);
+
+ writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN);
+
+ ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
+ writel(ring_address >> 26, IR_RING_BASE_ADDR_H);
+ writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L);
+
+ writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE);
+
+ writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */
+ writel(0, IR_RING_ADDR_CMPR);
+
+ au1k_irda_set_speed(dev, 9600);
+ return 0;
+}
+
+static int au1k_irda_start(struct net_device *dev)
+{
+ int retval;
+ char hwname[32];
+ struct au1k_private *aup = netdev_priv(dev);
+
+ if ((retval = au1k_init(dev))) {
+ printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
+ return retval;
+ }
+
+ if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt,
+ 0, dev->name, dev))) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+ if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt,
+ 0, dev->name, dev))) {
+ free_irq(AU1000_IRDA_TX_INT, dev);
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+
+ /* Give self a hardware name */
+ sprintf(hwname, "Au1000 SIR/FIR");
+ aup->irlap = irlap_open(dev, &aup->qos, hwname);
+ netif_start_queue(dev);
+
+ writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */
+
+ aup->timer.expires = RUN_AT((3*HZ));
+ aup->timer.data = (unsigned long)dev;
+ return 0;
+}
+
+static int au1k_irda_stop(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+
+ /* disable interrupts */
+ writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2);
+ writel(0, IR_CONFIG_1);
+ writel(0, IR_INTERFACE_CONFIG); /* disable clock */
+ au_sync();
+
+ if (aup->irlap) {
+ irlap_close(aup->irlap);
+ aup->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+ del_timer(&aup->timer);
+
+ /* disable the interrupt */
+ free_irq(AU1000_IRDA_TX_INT, dev);
+ free_irq(AU1000_IRDA_RX_INT, dev);
+ return 0;
+}
+
+static void __exit au1k_irda_exit(void)
+{
+ struct net_device *dev = ir_devs[0];
+ struct au1k_private *aup = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2*NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
+ kfree(aup->rx_buff.head);
+ free_netdev(dev);
+}
+
+
+static inline void
+update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &aup->stats;
+
+ ps->tx_packets++;
+ ps->tx_bytes += pkt_len;
+
+ if (status & IR_TX_ERROR) {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+}
+
+
+static void au1k_tx_ack(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile ring_dest_t *ptxd;
+
+ ptxd = aup->tx_ring[aup->tx_tail];
+ while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
+ update_tx_stats(dev, ptxd->flags,
+ ptxd->count_1<<8 | ptxd->count_0);
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ au_sync();
+
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
+ ptxd = aup->tx_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+
+ if (aup->tx_tail == aup->tx_head) {
+ if (aup->newspeed) {
+ au1k_irda_set_speed(dev, aup->newspeed);
+ aup->newspeed = 0;
+ }
+ else {
+ writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
+ IR_CONFIG_1);
+ au_sync();
+ writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
+ IR_CONFIG_1);
+ writel(0, IR_RING_PROMPT);
+ au_sync();
+ }
+ }
+}
+
+
+/*
+ * Au1000 transmit routine.
+ */
+static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+ volatile ring_dest_t *ptxd;
+ u32 len;
+
+ u32 flags;
+ db_dest_t *pDB;
+
+ if (speed != aup->speed && speed != -1) {
+ aup->newspeed = speed;
+ }
+
+ if ((skb->len == 0) && (aup->newspeed)) {
+ if (aup->tx_tail == aup->tx_head) {
+ au1k_irda_set_speed(dev, speed);
+ aup->newspeed = 0;
+ }
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ ptxd = aup->tx_ring[aup->tx_head];
+ flags = ptxd->flags;
+
+ if (flags & AU_OWN) {
+ printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return 1;
+ }
+ else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
+ printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return 1;
+ }
+
+ pDB = aup->tx_db_inuse[aup->tx_head];
+
+#if 0
+ if (read_ir_reg(IR_RX_BYTE_CNT) != 0) {
+ printk("tx warning: rx byte cnt %x\n",
+ read_ir_reg(IR_RX_BYTE_CNT));
+ }
+#endif
+
+ if (aup->speed == 4000000) {
+ /* FIR */
+ memcpy((void *)pDB->vaddr, skb->data, skb->len);
+ ptxd->count_0 = skb->len & 0xff;
+ ptxd->count_1 = (skb->len >> 8) & 0xff;
+
+ }
+ else {
+ /* SIR */
+ len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
+ ptxd->count_0 = len & 0xff;
+ ptxd->count_1 = (len >> 8) & 0xff;
+ ptxd->flags |= IR_DIS_CRC;
+ au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
+ }
+ ptxd->flags |= AU_OWN;
+ au_sync();
+
+ writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1);
+ writel(0, IR_RING_PROMPT);
+ au_sync();
+
+ dev_kfree_skb(skb);
+ aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+
+static inline void
+update_rx_stats(struct net_device *dev, u32 status, u32 count)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &aup->stats;
+
+ ps->rx_packets++;
+
+ if (status & IR_RX_ERROR) {
+ ps->rx_errors++;
+ if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
+ ps->rx_missed_errors++;
+ if (status & IR_MAX_LEN)
+ ps->rx_length_errors++;
+ if (status & IR_CRC_ERROR)
+ ps->rx_crc_errors++;
+ }
+ else
+ ps->rx_bytes += count;
+}
+
+/*
+ * Au1000 receive routine.
+ */
+static int au1k_irda_rx(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ struct sk_buff *skb;
+ volatile ring_dest_t *prxd;
+ u32 flags, count;
+ db_dest_t *pDB;
+
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ while (!(flags & AU_OWN)) {
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ count = prxd->count_1<<8 | prxd->count_0;
+ if (!(flags & IR_RX_ERROR)) {
+ /* good frame */
+ update_rx_stats(dev, flags, count);
+ skb=alloc_skb(count+1,GFP_ATOMIC);
+ if (skb == NULL) {
+ aup->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ if (aup->speed == 4000000)
+ skb_put(skb, count);
+ else
+ skb_put(skb, count-2);
+ memcpy(skb->data, (void *)pDB->vaddr, count-2);
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ prxd->count_0 = 0;
+ prxd->count_1 = 0;
+ }
+ prxd->flags |= AU_OWN;
+ aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
+ writel(0, IR_RING_PROMPT);
+ au_sync();
+
+ /* next descriptor */
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+ dev->last_rx = jiffies;
+
+ }
+ return 0;
+}
+
+
+void au1k_irda_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+
+ if (dev == NULL) {
+ printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
+ return;
+ }
+
+ writel(0, IR_INT_CLEAR); /* ack irda interrupts */
+
+ au1k_irda_rx(dev);
+ au1k_tx_ack(dev);
+}
+
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void au1k_tx_timeout(struct net_device *dev)
+{
+ u32 speed;
+ struct au1k_private *aup = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: tx timeout\n", dev->name);
+ speed = aup->speed;
+ aup->speed = 0;
+ au1k_irda_set_speed(dev, speed);
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int
+au1k_irda_set_speed(struct net_device *dev, int speed)
+{
+ unsigned long flags;
+ struct au1k_private *aup = netdev_priv(dev);
+ u32 control;
+ int ret = 0, timeout = 10, i;
+ volatile ring_dest_t *ptxd;
+#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
+ unsigned long irda_resets;
+#endif
+
+ if (speed == aup->speed)
+ return ret;
+
+ spin_lock_irqsave(&ir_lock, flags);
+
+ /* disable PHY first */
+ writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
+
+ /* disable RX/TX */
+ writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
+ IR_CONFIG_1);
+ au_sync_delay(1);
+ while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
+ mdelay(1);
+ if (!timeout--) {
+ printk(KERN_ERR "%s: rx/tx disable timeout\n",
+ dev->name);
+ break;
+ }
+ }
+
+ /* disable DMA */
+ writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
+ au_sync_delay(1);
+
+ /*
+ * After we disable tx/rx. the index pointers
+ * go back to zero.
+ */
+ aup->tx_head = aup->tx_tail = aup->rx_head = 0;
+ for (i=0; i<NUM_IR_DESC; i++) {
+ ptxd = aup->tx_ring[i];
+ ptxd->flags = 0;
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ }
+
+ for (i=0; i<NUM_IR_DESC; i++) {
+ ptxd = aup->rx_ring[i];
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ ptxd->flags = AU_OWN;
+ }
+
+ if (speed == 4000000) {
+#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
+ bcsr->resets |= BCSR_RESETS_FIR_SEL;
+#else /* Pb1000 and Pb1100 */
+ writel(1<<13, CPLD_AUX1);
+#endif
+ }
+ else {
+#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
+ bcsr->resets &= ~BCSR_RESETS_FIR_SEL;
+#else /* Pb1000 and Pb1100 */
+ writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
+#endif
+ }
+
+ switch (speed) {
+ case 9600:
+ writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
+ writel(IR_SIR_MODE, IR_CONFIG_1);
+ break;
+ case 19200:
+ writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
+ writel(IR_SIR_MODE, IR_CONFIG_1);
+ break;
+ case 38400:
+ writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
+ writel(IR_SIR_MODE, IR_CONFIG_1);
+ break;
+ case 57600:
+ writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
+ writel(IR_SIR_MODE, IR_CONFIG_1);
+ break;
+ case 115200:
+ writel(12<<5, IR_WRITE_PHY_CONFIG);
+ writel(IR_SIR_MODE, IR_CONFIG_1);
+ break;
+ case 4000000:
+ writel(0xF, IR_WRITE_PHY_CONFIG);
+ writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
+ break;
+ default:
+ printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
+ ret = -EINVAL;
+ break;
+ }
+
+ aup->speed = speed;
+ writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
+ au_sync();
+
+ control = read_ir_reg(IR_ENABLE);
+ writel(0, IR_RING_PROMPT);
+ au_sync();
+
+ if (control & (1<<14)) {
+ printk(KERN_ERR "%s: configuration error\n", dev->name);
+ }
+ else {
+ if (control & (1<<11))
+ printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
+ if (control & (1<<12))
+ printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
+ if (control & (1<<13))
+ printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
+ if (control & (1<<10))
+ printk(KERN_DEBUG "%s TX enabled\n", dev->name);
+ if (control & (1<<9))
+ printk(KERN_DEBUG "%s RX enabled\n", dev->name);
+ }
+
+ spin_unlock_irqrestore(&ir_lock, flags);
+ return ret;
+}
+
+static int
+au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct au1k_private *aup = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (aup->open)
+ ret = au1k_irda_set_speed(dev,
+ rq->ifr_baudrate);
+ else {
+ printk(KERN_ERR "%s ioctl: !netif_running\n",
+ dev->name);
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = 0;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+
+static struct net_device_stats *au1k_irda_stats(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ return &aup->stats;
+}
+
+MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
+MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
+
+module_init(au1k_irda_init);
+module_exit(au1k_irda_exit);
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
new file mode 100644
index 000000000000..0a08c539c051
--- /dev/null
+++ b/drivers/net/irda/donauboe.c
@@ -0,0 +1,1789 @@
+/*****************************************************************
+ *
+ * Filename: donauboe.c
+ * Version: 2.17
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Documentation: http://libxg.free.fr/irda/lib-irda.html
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: Paul Bristow <paul.bristow@technologist.com>
+ * Modified: Mon Nov 11 19:10:05 1999
+ * Modified: James McKenzie <james@fishsoup.dhs.org>
+ * Modified: Thu Mar 16 12:49:00 2000 (Substantial rewrite)
+ * Modified: Sat Apr 29 00:23:03 2000 (Added DONAUOBOE support)
+ * Modified: Wed May 24 23:45:02 2000 (Fixed chipio_t structure)
+ * Modified: 2.13 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.13 dim jan 07 21:57:39 2001 (tested with kernel 2.4 & irnet/ppp)
+ * Modified: 2.14 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.14 lun fev 05 17:55:59 2001 (adapted to patch-2.4.1-pre8-irda1)
+ * Modified: 2.15 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.15 Fri Jun 21 20:40:59 2002 (sync with 2.4.18, substantial fixes)
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (fix freeregion, default to verbose)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (save_flags();cli(); replaced by spinlocks)
+ * Modified: 2.18 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.18 ven jan 10 03:14:16 2003 Change probe default options
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ ********************************************************************/
+
+/* Look at toshoboe.h (currently in include/net/irda) for details of */
+/* Where to get documentation on the chip */
+
+
+static char *rcsid =
+ "$Id: donauboe.c V2.18 ven jan 10 03:14:16 2003$";
+
+/* See below for a description of the logic in this driver */
+
+/* User servicable parts */
+/* USE_PROBE Create the code which probes the chip and does a few tests */
+/* do_probe module parameter Enable this code */
+/* Probe code is very useful for understanding how the hardware works */
+/* Use it with various combinations of TT_LEN, RX_LEN */
+/* Strongly recomended, disable if the probe fails on your machine */
+/* and send me <james@fishsoup.dhs.org> the output of dmesg */
+#define USE_PROBE 1
+#undef USE_PROBE
+
+/* Trace Transmit ring, interrupts, Receive ring or not ? */
+#define PROBE_VERBOSE 1
+
+/* Debug option, examine sent and received raw data */
+/* Irdadump is better, but does not see all packets. enable it if you want. */
+#undef DUMP_PACKETS
+
+/* MIR mode has not been tested. Some behaviour is different */
+/* Seems to work against an Ericsson R520 for me. -Martin */
+#define USE_MIR
+
+/* Schedule back to back hardware transmits wherever possible, otherwise */
+/* we need an interrupt for every frame, unset if oboe works for a bit and */
+/* then hangs */
+#define OPTIMIZE_TX
+
+/* Set the number of slots in the rings */
+/* If you get rx/tx fifo overflows at high bitrates, you can try increasing */
+/* these */
+
+#define RING_SIZE (OBOE_RING_SIZE_RX8 | OBOE_RING_SIZE_TX8)
+#define TX_SLOTS 8
+#define RX_SLOTS 8
+
+
+/* Less user servicable parts below here */
+
+/* Test, Transmit and receive buffer sizes, adjust at your peril */
+/* remarks: nfs usually needs 1k blocks */
+/* remarks: in SIR mode, CRC is received, -> RX_LEN=TX_LEN+2 */
+/* remarks: test accepts large blocks. Standard is 0x80 */
+/* When TT_LEN > RX_LEN (SIR mode) data is stored in successive slots. */
+/* When 3 or more slots are needed for each test packet, */
+/* data received in the first slots is overwritten, even */
+/* if OBOE_CTL_RX_HW_OWNS is not set, without any error! */
+#define TT_LEN 0x80
+#define TX_LEN 0xc00
+#define RX_LEN 0xc04
+/* Real transmitted length (SIR mode) is about 14+(2%*TX_LEN) more */
+/* long than user-defined length (see async_wrap_skb) and is less then 4K */
+/* Real received length is (max RX_LEN) differs from user-defined */
+/* length only b the CRC (2 or 4 bytes) */
+#define BUF_SAFETY 0x7a
+#define RX_BUF_SZ (RX_LEN)
+#define TX_BUF_SZ (TX_LEN+BUF_SAFETY)
+
+
+/* Logic of the netdev part of this driver */
+
+/* The RX ring is filled with buffers, when a packet arrives */
+/* it is DMA'd into the buffer which is marked used and RxDone called */
+/* RxDone forms an skb (and checks the CRC if in SIR mode) and ships */
+/* the packet off upstairs */
+
+/* The transmitter on the oboe chip can work in one of two modes */
+/* for each ring->tx[] the transmitter can either */
+/* a) transmit the packet, leave the trasmitter enabled and proceed to */
+/* the next ring */
+/* OR */
+/* b) transmit the packet, switch off the transmitter and issue TxDone */
+
+/* All packets are entered into the ring in mode b), if the ring was */
+/* empty the transmitter is started. */
+
+/* If OPTIMIZE_TX is defined then in TxDone if the ring contains */
+/* more than one packet, all but the last are set to mode a) [HOWEVER */
+/* the hardware may not notice this, this is why we start in mode b) ] */
+/* then restart the transmitter */
+
+/* If OPTIMIZE_TX is not defined then we just restart the transmitter */
+/* if the ring isn't empty */
+
+/* Speed changes are delayed until the TxRing is empty */
+/* mtt is handled by generating packets with bad CRCs, before the data */
+
+/* TODO: */
+/* check the mtt works ok */
+/* finish the watchdog */
+
+/* No user servicable parts below here */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+//#include <net/irda/irmod.h>
+//#include <net/irda/irlap_frame.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/crc.h>
+
+#include "donauboe.h"
+
+#define INB(port) inb_p(port)
+#define OUTB(val,port) outb_p(val,port)
+#define OUTBP(val,port) outb_p(val,port)
+
+#define PROMPT OUTB(OBOE_PROMPT_BIT,OBOE_PROMPT);
+
+#if PROBE_VERBOSE
+#define PROBE_DEBUG(args...) (printk (args))
+#else
+#define PROBE_DEBUG(args...) ;
+#endif
+
+/* Set the DMA to be byte at a time */
+#define CONFIG0H_DMA_OFF OBOE_CONFIG0H_RCVANY
+#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
+#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
+
+static struct pci_device_id toshoboe_pci_tbl[] = {
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, toshoboe_pci_tbl);
+
+#define DRIVER_NAME "toshoboe"
+static char *driver_name = DRIVER_NAME;
+
+static int max_baud = 4000000;
+#ifdef USE_PROBE
+static int do_probe = 0;
+#endif
+
+
+/**********************************************************************/
+static int
+toshoboe_checkfcs (unsigned char *buf, int len)
+{
+ int i;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ fcs.value = INIT_FCS;
+
+ for (i = 0; i < len; ++i)
+ fcs.value = irda_fcs (fcs.value, *(buf++));
+
+ return (fcs.value == GOOD_FCS);
+}
+
+/***********************************************************************/
+/* Generic chip handling code */
+#ifdef DUMP_PACKETS
+static unsigned char dump[50];
+static void
+_dumpbufs (unsigned char *data, int len, char tete)
+{
+int i,j;
+char head=tete;
+for (i=0;i<len;i+=16) {
+ for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
+ dump [3*j]=0;
+ IRDA_DEBUG (2, "%c%s\n",head , dump);
+ head='+';
+ }
+}
+#endif
+
+#ifdef USE_PROBE
+/* Dump the registers */
+static void
+toshoboe_dumpregs (struct toshoboe_cb *self)
+{
+ __u32 ringbase;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ ringbase = INB (OBOE_RING_BASE0) << 10;
+ ringbase |= INB (OBOE_RING_BASE1) << 18;
+ ringbase |= INB (OBOE_RING_BASE2) << 26;
+
+ printk (KERN_ERR DRIVER_NAME ": Register dump:\n");
+ printk (KERN_ERR "Interrupts: Tx:%d Rx:%d TxUnder:%d RxOver:%d Sip:%d\n",
+ self->int_tx, self->int_rx, self->int_txunder, self->int_rxover,
+ self->int_sip);
+ printk (KERN_ERR "RX %02x TX %02x RingBase %08x\n",
+ INB (OBOE_RXSLOT), INB (OBOE_TXSLOT), ringbase);
+ printk (KERN_ERR "RING_SIZE %02x IER %02x ISR %02x\n",
+ INB (OBOE_RING_SIZE), INB (OBOE_IER), INB (OBOE_ISR));
+ printk (KERN_ERR "CONFIG1 %02x STATUS %02x\n",
+ INB (OBOE_CONFIG1), INB (OBOE_STATUS));
+ printk (KERN_ERR "CONFIG0 %02x%02x ENABLE %02x%02x\n",
+ INB (OBOE_CONFIG0H), INB (OBOE_CONFIG0L),
+ INB (OBOE_ENABLEH), INB (OBOE_ENABLEL));
+ printk (KERN_ERR "NEW_PCONFIG %02x%02x CURR_PCONFIG %02x%02x\n",
+ INB (OBOE_NEW_PCONFIGH), INB (OBOE_NEW_PCONFIGL),
+ INB (OBOE_CURR_PCONFIGH), INB (OBOE_CURR_PCONFIGL));
+ printk (KERN_ERR "MAXLEN %02x%02x RXCOUNT %02x%02x\n",
+ INB (OBOE_MAXLENH), INB (OBOE_MAXLENL),
+ INB (OBOE_RXCOUNTL), INB (OBOE_RXCOUNTH));
+
+ if (self->ring)
+ {
+ int i;
+ ringbase = virt_to_bus (self->ring);
+ printk (KERN_ERR "Ring at %08x:\n", ringbase);
+ printk (KERN_ERR "RX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ printk ("\n");
+ printk (KERN_ERR "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ printk ("\n");
+ }
+}
+#endif
+
+/*Don't let the chip look at memory */
+static void
+toshoboe_disablebm (struct toshoboe_cb *self)
+{
+ __u8 command;
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_byte (self->pdev, PCI_COMMAND, command);
+
+}
+
+/* Shutdown the chip and point the taskfile reg somewhere else */
+static void
+toshoboe_stopchip (struct toshoboe_cb *self)
+{
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ /*Disable interrupts */
+ OUTB (0x0, OBOE_IER);
+ /*Disable DMA, Disable Rx, Disable Tx */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ /*Disable SIR MIR FIR, Tx and Rx */
+ OUTB (0x00, OBOE_ENABLEH);
+ /*Point the ring somewhere safe */
+ OUTB (0x3f, OBOE_RING_BASE2);
+ OUTB (0xff, OBOE_RING_BASE1);
+ OUTB (0xff, OBOE_RING_BASE0);
+
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Why */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*switch it off */
+ OUTB (OBOE_CONFIG1_OFF, OBOE_CONFIG1);
+
+ toshoboe_disablebm (self);
+}
+
+/* Transmitter initialization */
+static void
+toshoboe_start_DMA (struct toshoboe_cb *self, int opts)
+{
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON | opts, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+}
+
+/*Set the baud rate */
+static void
+toshoboe_setbaud (struct toshoboe_cb *self)
+{
+ __u16 pconfig = 0;
+ __u8 config0l = 0;
+
+ IRDA_DEBUG (2, "%s(%d/%d)\n", __FUNCTION__, self->speed, self->io.speed);
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+#ifdef USE_MIR
+ case 1152000:
+#endif
+ case 4000000:
+ break;
+ default:
+
+ printk (KERN_ERR DRIVER_NAME ": switch to unsupported baudrate %d\n",
+ self->speed);
+ return;
+ }
+
+ switch (self->speed)
+ {
+ /* For SIR the preamble is done by adding XBOFs */
+ /* to the packet */
+ /* set to filtered SIR mode, filter looks for BOF and EOF */
+ case 2400:
+ pconfig |= 47 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 4800:
+ pconfig |= 23 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 9600:
+ pconfig |= 11 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 19200:
+ pconfig |= 5 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 38400:
+ pconfig |= 2 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 57600:
+ pconfig |= 1 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 115200:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ default:
+ /*Set to packet based reception */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ break;
+ }
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ config0l = OBOE_CONFIG0L_ENSIR;
+ if (self->async)
+ {
+ /*Set to character based reception */
+ /*System will lock if MAXLEN=0 */
+ /*so have to be careful */
+ OUTB (0x01, OBOE_MAXLENH);
+ OUTB (0x01, OBOE_MAXLENL);
+ OUTB (0x00, OBOE_MAXLENH);
+ }
+ else
+ {
+ /*Set to packet based reception */
+ config0l |= OBOE_CONFIG0L_ENSIRF;
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ }
+ break;
+
+#ifdef USE_MIR
+ /* MIR mode */
+ /* Set for 16 bit CRC and enable MIR */
+ /* Preamble now handled by the chip */
+ case 1152000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 8 << OBOE_PCONFIG_WIDTHSHIFT;
+ pconfig |= 1 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_CRC16 | OBOE_CONFIG0L_ENMIR;
+ break;
+#endif
+ /* FIR mode */
+ /* Set for 32 bit CRC and enable FIR */
+ /* Preamble handled by the chip */
+ case 4000000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ /* Documentation says 14, but toshiba use 15 in their drivers */
+ pconfig |= 15 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_ENFIR;
+ break;
+ }
+
+ /* Copy into new PHY config buffer */
+ OUTBP (pconfig >> 8, OBOE_NEW_PCONFIGH);
+ OUTB (pconfig & 0xff, OBOE_NEW_PCONFIGL);
+ OUTB (config0l, OBOE_CONFIG0L);
+
+ /* Now make OBOE copy from new PHY to current PHY */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+
+ /* speed change executed */
+ self->new_speed = 0;
+ self->io.speed = self->speed;
+}
+
+/*Let the chip look at memory */
+static void
+toshoboe_enablebm (struct toshoboe_cb *self)
+{
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ pci_set_master (self->pdev);
+}
+
+/*setup the ring */
+static void
+toshoboe_initring (struct toshoboe_cb *self)
+{
+ int i;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->ring->tx[i].len = 0;
+ self->ring->tx[i].control = 0x00;
+ self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]);
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->ring->rx[i].len = RX_LEN;
+ self->ring->rx[i].len = 0;
+ self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]);
+ self->ring->rx[i].control = OBOE_CTL_RX_HW_OWNS;
+ }
+}
+
+static void
+toshoboe_resetptrs (struct toshoboe_cb *self)
+{
+ /* Can reset pointers by twidling DMA */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTBP (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->rxs = inb_p (OBOE_RXSLOT) & OBOE_SLOT_MASK;
+ self->txs = inb_p (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+}
+
+/* Called in locked state */
+static void
+toshoboe_initptrs (struct toshoboe_cb *self)
+{
+
+ /* spin_lock_irqsave(self->spinlock, flags); */
+ /* save_flags (flags); */
+
+ /* Can reset pointers by twidling DMA */
+ toshoboe_resetptrs (self);
+
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->txpending = 0;
+
+ /* spin_unlock_irqrestore(self->spinlock, flags); */
+ /* restore_flags (flags); */
+}
+
+/* Wake the chip up and get it looking at the rings */
+/* Called in locked state */
+static void
+toshoboe_startchip (struct toshoboe_cb *self)
+{
+ __u32 physaddr;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ toshoboe_initring (self);
+ toshoboe_enablebm (self);
+ OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
+ OUTBP (OBOE_CONFIG1_ON, OBOE_CONFIG1);
+
+ /* Stop the clocks */
+ OUTB (0, OBOE_ENABLEH);
+
+ /*Set size of rings */
+ OUTB (RING_SIZE, OBOE_RING_SIZE);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Enable ints */
+ OUTB (OBOE_INT_TXDONE | OBOE_INT_RXDONE |
+ OBOE_INT_TXUNDER | OBOE_INT_RXOVER | OBOE_INT_SIP , OBOE_IER);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Set the maximum packet length to 0xfff (4095) */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Shutdown DMA */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+
+ /*Find out where the rings live */
+ physaddr = virt_to_bus (self->ring);
+
+ IRDA_ASSERT ((physaddr & 0x3ff) == 0,
+ printk (KERN_ERR DRIVER_NAME "ring not correctly aligned\n");
+ return;);
+
+ OUTB ((physaddr >> 10) & 0xff, OBOE_RING_BASE0);
+ OUTB ((physaddr >> 18) & 0xff, OBOE_RING_BASE1);
+ OUTB ((physaddr >> 26) & 0x3f, OBOE_RING_BASE2);
+
+ /*Enable DMA controler in byte mode and RX */
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+
+ /* Start up the clocks */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*set to sensible speed */
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+}
+
+static void
+toshoboe_isntstuck (struct toshoboe_cb *self)
+{
+}
+
+static void
+toshoboe_checkstuck (struct toshoboe_cb *self)
+{
+ unsigned long flags;
+
+ if (0)
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ /* This will reset the chip completely */
+ printk (KERN_ERR DRIVER_NAME ": Resetting chip\n");
+
+ toshoboe_stopchip (self);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+}
+
+/*Generate packet of about mtt us long */
+static int
+toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
+{
+ int xbofs;
+
+ xbofs = ((int) (mtt/100)) * (int) (self->speed);
+ xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
+ xbofs++;
+
+ IRDA_DEBUG (2, DRIVER_NAME
+ ": generated mtt of %d bytes for %d us at %d baud\n"
+ , xbofs,mtt,self->speed);
+
+ if (xbofs > TX_LEN)
+ {
+ printk (KERN_ERR DRIVER_NAME ": wanted %d bytes MTT but TX_LEN is %d\n",
+ xbofs, TX_LEN);
+ xbofs = TX_LEN;
+ }
+
+ /*xbofs will do for SIR, MIR and FIR,SIR mode doesn't generate a checksum anyway */
+ memset (buf, XBOF, xbofs);
+
+ return xbofs;
+}
+
+static int toshoboe_invalid_dev(int irq)
+{
+ printk (KERN_WARNING DRIVER_NAME ": irq %d for unknown device.\n", irq);
+ return 1;
+}
+
+#ifdef USE_PROBE
+/***********************************************************************/
+/* Probe code */
+
+static void
+toshoboe_dumptx (struct toshoboe_cb *self)
+{
+ int i;
+ PROBE_DEBUG(KERN_WARNING "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ PROBE_DEBUG(" [%d]\n",self->speed);
+}
+
+static void
+toshoboe_dumprx (struct toshoboe_cb *self, int score)
+{
+ int i;
+ PROBE_DEBUG(" %d\nRX:",score);
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ PROBE_DEBUG("\n");
+}
+
+static inline int
+stuff_byte (__u8 byte, __u8 * buf)
+{
+ switch (byte)
+ {
+ case BOF: /* FALLTHROUGH */
+ case EOF: /* FALLTHROUGH */
+ case CE:
+ /* Insert transparently coded */
+ buf[0] = CE; /* Send link escape */
+ buf[1] = byte ^ IRDA_TRANS; /* Complement bit 5 */
+ return 2;
+ /* break; */
+ default:
+ /* Non-special value, no transparency required */
+ buf[0] = byte;
+ return 1;
+ /* break; */
+ }
+}
+
+static irqreturn_t
+toshoboe_probeinterrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
+ __u8 irqstat;
+
+ if (self == NULL && toshoboe_invalid_dev(irq))
+ return IRQ_NONE;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return IRQ_NONE;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp;
+
+ self->int_tx++;
+ PROBE_DEBUG("T");
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ self->int_tx+=100;
+ PROBE_DEBUG("S");
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+ }
+ }
+
+ if (irqstat & OBOE_INT_RXDONE) {
+ self->int_rx++;
+ PROBE_DEBUG("R"); }
+ if (irqstat & OBOE_INT_TXUNDER) {
+ self->int_txunder++;
+ PROBE_DEBUG("U"); }
+ if (irqstat & OBOE_INT_RXOVER) {
+ self->int_rxover++;
+ PROBE_DEBUG("O"); }
+ if (irqstat & OBOE_INT_SIP) {
+ self->int_sip++;
+ PROBE_DEBUG("I"); }
+ return IRQ_HANDLED;
+}
+
+static int
+toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
+{
+ int i;
+ int len = 0;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ if (fir)
+ {
+ memset (buf, 0, TT_LEN);
+ return (TT_LEN);
+ }
+
+ fcs.value = INIT_FCS;
+
+ memset (buf, XBOF, 10);
+ len += 10;
+ buf[len++] = BOF;
+
+ for (i = 0; i < TT_LEN; ++i)
+ {
+ len += stuff_byte (i, buf + len);
+ fcs.value = irda_fcs (fcs.value, i);
+ }
+
+ len += stuff_byte (fcs.bytes[0] ^ badcrc, buf + len);
+ len += stuff_byte (fcs.bytes[1] ^ badcrc, buf + len);
+ buf[len++] = EOF;
+ len++;
+ return len;
+}
+
+static int
+toshoboe_probefail (struct toshoboe_cb *self, char *msg)
+{
+ printk (KERN_ERR DRIVER_NAME "probe(%d) failed %s\n",self-> speed, msg);
+ toshoboe_dumpregs (self);
+ toshoboe_stopchip (self);
+ free_irq (self->io.irq, (void *) self);
+ return 0;
+}
+
+static int
+toshoboe_numvalidrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if ((self->ring->rx[i].control & 0xe0) == 0)
+ ret++;
+
+ return ret;
+}
+
+static int
+toshoboe_numrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if (!(self->ring->rx[i].control & OBOE_CTL_RX_HW_OWNS))
+ ret++;
+
+ return ret;
+}
+
+static int
+toshoboe_probe (struct toshoboe_cb *self)
+{
+ int i, j, n;
+#ifdef USE_MIR
+ int bauds[] = { 9600, 115200, 4000000, 1152000 };
+#else
+ int bauds[] = { 9600, 115200, 4000000 };
+#endif
+ unsigned long flags;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if (request_irq (self->io.irq, toshoboe_probeinterrupt,
+ self->io.irqflags, "toshoboe", (void *) self))
+ {
+ printk (KERN_ERR DRIVER_NAME ": probe failed to allocate irq %d\n",
+ self->io.irq);
+ return 0;
+ }
+
+ /* test 1: SIR filter and back to back */
+
+ for (j = 0; j < (sizeof (bauds) / sizeof (int)); ++j)
+ {
+ int fir = (j > 1);
+ toshoboe_stopchip (self);
+
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ /*Address is already setup */
+ toshoboe_startchip (self);
+ self->int_rx = self->int_tx = 0;
+ self->speed = bauds[j];
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->ring->tx[self->txs].control =
+/* (FIR only) OBOE_CTL_TX_SIP needed for switching to next slot */
+/* MIR: all received data is stored in one slot */
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_SIP
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ | OBOE_CTL_TX_SIP | OBOE_CTL_TX_BAD_CRC
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ toshoboe_dumptx (self);
+ /* Turn on TX and RX and loopback */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ n = fir ? 1 : 4;
+ while (toshoboe_numvalidrcvs (self) != n)
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "filter test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+
+ n = fir ? 203 : 102;
+ while ((toshoboe_numrcvs(self) != self->int_rx) || (self->int_tx != n))
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "interrupt test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ }
+
+ /* test 2: SIR in char at a time */
+
+ toshoboe_stopchip (self);
+ self->int_rx = self->int_tx = 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->async = 1;
+ self->speed = 115200;
+ toshoboe_setbaud (self);
+ self->ring->tx[self->txs].control =
+ OBOE_CTL_TX_RTCENTX | OBOE_CTL_TX_HW_OWNS;
+ self->ring->tx[self->txs].len = 4;
+
+ ((unsigned char *) self->tx_bufs[self->txs])[0] = 'f';
+ ((unsigned char *) self->tx_bufs[self->txs])[1] = 'i';
+ ((unsigned char *) self->tx_bufs[self->txs])[2] = 's';
+ ((unsigned char *) self->tx_bufs[self->txs])[3] = 'h';
+ toshoboe_dumptx (self);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ while (toshoboe_numvalidrcvs (self) != 4)
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async test");
+ udelay (100);
+ i++;
+ }
+
+ while ((toshoboe_numrcvs (self) != self->int_rx) || (self->int_tx != 1))
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async interrupt test");
+ udelay (100);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ self->async = 0;
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_stopchip (self);
+
+ free_irq (self->io.irq, (void *) self);
+
+ printk (KERN_WARNING DRIVER_NAME ": Self test passed ok\n");
+
+ return 1;
+}
+#endif
+
+/******************************************************************/
+/* Netdev style code */
+
+/* Transmit something */
+static int
+toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ __s32 speed;
+ int mtt, len, ctl;
+ unsigned long flags;
+ struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
+
+ self = (struct toshoboe_cb *) dev->priv;
+
+ IRDA_ASSERT (self != NULL, return 0; );
+
+ IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __FUNCTION__
+ ,skb->len,self->txpending,INB (OBOE_ENABLEH));
+ if (!cb->magic) {
+ IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __FUNCTION__, cb->magic);
+#ifdef DUMP_PACKETS
+ _dumpbufs(skb->data,skb->len,'>');
+#endif
+ }
+
+ /* change speed pending, wait for its execution */
+ if (self->new_speed)
+ return -EBUSY;
+
+ /* device stopped (apm) wait for restart */
+ if (self->stopped)
+ return -EBUSY;
+
+ toshoboe_checkstuck (self);
+
+ dev->trans_start = jiffies;
+
+ /* Check if we need to change the speed */
+ /* But not now. Wait after transmission if mtt not required */
+ speed=irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1))
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending || skb->len)
+ {
+ self->new_speed = speed;
+ IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
+ __FUNCTION__, speed);
+ /* if no data, that's all! */
+ if (!skb->len)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return 0;
+ }
+ /* True packet, go on, but */
+ /* do not accept anything before change speed execution */
+ netif_stop_queue(dev);
+ /* ready to process TxDone interrupt */
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+ else
+ {
+ /* idle and no data, change speed now */
+ self->speed = speed;
+ toshoboe_setbaud (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return 0;
+ }
+
+ }
+
+ if ((mtt = irda_get_mtt(skb)))
+ {
+ /* This is fair since the queue should be empty anyway */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return -EBUSY;
+ }
+
+ /* If in SIR mode we need to generate a string of XBOFs */
+ /* In MIR and FIR we need to generate a string of data */
+ /* which we will add a wrong checksum to */
+
+ mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
+ IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __FUNCTION__
+ ,skb->len,mtt,self->txpending);
+ if (mtt)
+ {
+ self->ring->tx[self->txs].len = mtt & 0xfff;
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC | OBOE_CTL_TX_SIP ;
+ }
+#ifdef USE_MIR
+ else if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_MIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC;
+ }
+#endif
+ self->ring->tx[self->txs].control = ctl;
+
+ OUTB (0x0, OBOE_ENABLEH);
+ /* It is only a timer. Do not send mtt packet outside! */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ }
+ else
+ {
+ printk(KERN_ERR DRIVER_NAME ": problem with mtt packet - ignored\n");
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+
+#ifdef DUMP_PACKETS
+dumpbufs(skb->data,skb->len,'>');
+#endif
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __FUNCTION__
+ ,skb->len, self->ring->tx[self->txs].control, self->txpending);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return -EBUSY;
+ }
+
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_SIRON)
+ {
+ len = async_wrap_skb (skb, self->tx_bufs[self->txs], TX_BUF_SZ);
+ }
+ else
+ {
+ len = skb->len;
+ memcpy (self->tx_bufs[self->txs], skb->data, len);
+ }
+ self->ring->tx[self->txs].len = len & 0x0fff;
+
+ /*Sometimes the HW doesn't see us assert RTCENTX in the interrupt code */
+ /*later this plays safe, we garuntee the last packet to be transmitted */
+ /*has RTCENTX set */
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_SIP ;
+ }
+ self->ring->tx[self->txs].control = ctl;
+
+ /* If transmitter is idle start in one-shot mode */
+
+ if (!self->txpending)
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+
+ return 0;
+}
+
+/*interrupt handler */
+static irqreturn_t
+toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
+ __u8 irqstat;
+ struct sk_buff *skb = NULL;
+
+ if (self == NULL && toshoboe_invalid_dev(irq))
+ return IRQ_NONE;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return IRQ_NONE;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ toshoboe_isntstuck (self);
+
+/* Txdone */
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp, txpc;
+ int i;
+
+ txp = self->txpending;
+ self->txpending = 0;
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
+ self->txpending++;
+ }
+ IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __FUNCTION__
+ ,irqstat,txp,self->txpending);
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+
+ /* Got anything queued ? start it together */
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txpc = txp;
+#ifdef OPTIMIZE_TX
+ while (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txp = txpc;
+ txpc++;
+ txpc %= TX_SLOTS;
+ self->stats.tx_packets++;
+ if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
+ }
+ self->stats.tx_packets--;
+#else
+ self->stats.tx_packets++;
+#endif
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ }
+
+ if ((!self->txpending) && (self->new_speed))
+ {
+ self->speed = self->new_speed;
+ IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
+ __FUNCTION__, self->speed);
+ toshoboe_setbaud (self);
+ }
+
+ /* Tell network layer that we want more frames */
+ if (!self->new_speed)
+ netif_wake_queue(self->netdev);
+ }
+
+ if (irqstat & OBOE_INT_RXDONE)
+ {
+ while (!(self->ring->rx[self->rxs].control & OBOE_CTL_RX_HW_OWNS))
+ {
+ int len = self->ring->rx[self->rxs].len;
+ skb = NULL;
+ IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __FUNCTION__
+ ,len,self->ring->rx[self->rxs].control);
+
+#ifdef DUMP_PACKETS
+dumpbufs(self->rx_bufs[self->rxs],len,'<');
+#endif
+
+ if (self->ring->rx[self->rxs].control == 0)
+ {
+ __u8 enable = INB (OBOE_ENABLEH);
+
+ /* In SIR mode we need to check the CRC as this */
+ /* hasn't been done by the hardware */
+ if (enable & OBOE_ENABLEH_SIRON)
+ {
+ if (!toshoboe_checkfcs (self->rx_bufs[self->rxs], len))
+ len = 0;
+ /*Trim off the CRC */
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __FUNCTION__, len,enable);
+ }
+
+#ifdef USE_MIR
+ else if (enable & OBOE_ENABLEH_MIRON)
+ {
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __FUNCTION__, len,enable);
+ }
+#endif
+ else if (enable & OBOE_ENABLEH_FIRON)
+ {
+ if (len > 3)
+ len -= 4; /*FIXME: check this */
+ else
+ len = 0;
+ IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __FUNCTION__, len,enable);
+ }
+ else
+ IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __FUNCTION__, len,enable);
+
+ if (len)
+ {
+ skb = dev_alloc_skb (len + 1);
+ if (skb)
+ {
+ skb_reserve (skb, 1);
+
+ skb_put (skb, len);
+ memcpy (skb->data, self->rx_bufs[self->rxs], len);
+
+ self->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons (ETH_P_IRDA);
+ }
+ else
+ {
+ printk (KERN_INFO
+ "%s(), memory squeeze, dropping frame.\n",
+ __FUNCTION__);
+ }
+ }
+ }
+ else
+ {
+ /* TODO: =========================================== */
+ /* if OBOE_CTL_RX_LENGTH, our buffers are too small */
+ /* (MIR or FIR) data is lost. */
+ /* (SIR) data is splitted in several slots. */
+ /* we have to join all the received buffers received */
+ /*in a large buffer before checking CRC. */
+ IRDA_DEBUG (0, "%s.err:%x(%x)\n", __FUNCTION__
+ ,len,self->ring->rx[self->rxs].control);
+ }
+
+ self->ring->rx[self->rxs].len = 0x0;
+ self->ring->rx[self->rxs].control = OBOE_CTL_RX_HW_OWNS;
+
+ self->rxs++;
+ self->rxs %= RX_SLOTS;
+
+ if (skb)
+ netif_rx (skb);
+
+ }
+ }
+
+ if (irqstat & OBOE_INT_TXUNDER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": tx fifo underflow\n");
+ }
+ if (irqstat & OBOE_INT_RXOVER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": rx fifo overflow\n");
+ }
+/* This must be useful for something... */
+ if (irqstat & OBOE_INT_SIP)
+ {
+ self->int_sip++;
+ IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __FUNCTION__
+ ,self->int_sip,irqstat,self->txpending);
+ }
+ return IRQ_HANDLED;
+}
+
+
+static int
+toshoboe_net_open (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ unsigned long flags;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT (dev != NULL, return -1; );
+ self = (struct toshoboe_cb *) dev->priv;
+
+ IRDA_ASSERT (self != NULL, return 0; );
+
+ if (self->async)
+ return -EBUSY;
+
+ if (self->stopped)
+ return 0;
+
+ if (request_irq (self->io.irq, toshoboe_interrupt,
+ SA_SHIRQ | SA_INTERRUPT, dev->name, (void *) self))
+ {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open (dev, &self->qos, driver_name);
+
+ self->irdad = 1;
+
+ return 0;
+}
+
+static int
+toshoboe_net_close (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT (dev != NULL, return -1; );
+ self = (struct toshoboe_cb *) dev->priv;
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close (self->irlap);
+ self->irlap = NULL;
+
+ self->irdad = 0;
+
+ free_irq (self->io.irq, (void *) self);
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ return 0;
+}
+
+/*
+ * Function toshoboe_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int
+toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct toshoboe_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT (dev != NULL, return -1; );
+
+ self = dev->priv;
+
+ IRDA_ASSERT (self != NULL, return -1; );
+
+ IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ switch (cmd)
+ {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ /* This function will also be used by IrLAP to change the
+ * speed, so we still must allow for speed change within
+ * interrupt context.
+ */
+ IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __FUNCTION__
+ ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
+ if (!in_interrupt () && !capable (CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* self->speed=irq->ifr_baudrate; */
+ /* toshoboe_setbaud(self); */
+ /* Just change speed once - inserted by Paul Bristow */
+ self->new_speed = irq->ifr_baudrate;
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __FUNCTION__
+ ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
+ if (!capable (CAP_NET_ADMIN))
+ return -EPERM;
+ irda_device_set_media_busy (self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
+ IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __FUNCTION__
+ ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
+ break;
+ default:
+ IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return ret;
+
+}
+
+MODULE_DESCRIPTION("Toshiba OBOE IrDA Device Driver");
+MODULE_AUTHOR("James McKenzie <james@fishsoup.dhs.org>");
+MODULE_LICENSE("GPL");
+
+module_param (max_baud, int, 0);
+MODULE_PARM_DESC(max_baud, "Maximum baud rate");
+
+#ifdef USE_PROBE
+module_param (do_probe, bool, 0);
+MODULE_PARM_DESC(do_probe, "Enable/disable chip probing and self-test");
+#endif
+
+static void
+toshoboe_close (struct pci_dev *pci_dev)
+{
+ int i;
+ struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT (self != NULL, return; );
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ kfree (self->tx_bufs[i]);
+ self->tx_bufs[i] = NULL;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ kfree (self->rx_bufs[i]);
+ self->rx_bufs[i] = NULL;
+ }
+
+ unregister_netdev(self->netdev);
+
+ kfree (self->ringbuf);
+ self->ringbuf = NULL;
+ self->ring = NULL;
+
+ free_netdev(self->netdev);
+}
+
+static int
+toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
+{
+ struct toshoboe_cb *self;
+ struct net_device *dev;
+ int i = 0;
+ int ok = 0;
+ int err;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if ((err=pci_enable_device(pci_dev)))
+ return err;
+
+ dev = alloc_irdadev(sizeof (struct toshoboe_cb));
+ if (dev == NULL)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate memory for "
+ "IrDA control block\n");
+ return -ENOMEM;
+ }
+
+ self = dev->priv;
+ self->netdev = dev;
+ self->pdev = pci_dev;
+ self->base = pci_resource_start(pci_dev,0);
+
+ self->io.fir_base = self->base;
+ self->io.fir_ext = OBOE_IO_EXTENT;
+ self->io.irq = pci_dev->irq;
+ self->io.irqflags = SA_SHIRQ | SA_INTERRUPT;
+
+ self->speed = self->io.speed = 9600;
+ self->async = 0;
+
+ /* Lock the port that we need */
+ if (NULL==request_region (self->io.fir_base, self->io.fir_ext, driver_name))
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't get iobase of 0x%03x\n"
+ ,self->io.fir_base);
+ err = -EBUSY;
+ goto freeself;
+ }
+
+ spin_lock_init(&self->spinlock);
+
+ irda_init_max_qos_capabilies (&self->qos);
+ self->qos.baud_rate.bits = 0;
+
+ if (max_baud >= 2400)
+ self->qos.baud_rate.bits |= IR_2400;
+ /*if (max_baud>=4800) idev->qos.baud_rate.bits|=IR_4800; */
+ if (max_baud >= 9600)
+ self->qos.baud_rate.bits |= IR_9600;
+ if (max_baud >= 19200)
+ self->qos.baud_rate.bits |= IR_19200;
+ if (max_baud >= 115200)
+ self->qos.baud_rate.bits |= IR_115200;
+#ifdef USE_MIR
+ if (max_baud >= 1152000)
+ {
+ self->qos.baud_rate.bits |= IR_1152000;
+ }
+#endif
+ if (max_baud >= 4000000)
+ {
+ self->qos.baud_rate.bits |= (IR_4000000 << 8);
+ }
+
+ /*FIXME: work this out... */
+ self->qos.min_turn_time.bits = 0xff;
+
+ irda_qos_bits_to_value (&self->qos);
+
+ /* Allocate twice the size to guarantee alignment */
+ self->ringbuf = (void *) kmalloc (OBOE_RING_LEN << 1, GFP_KERNEL);
+ if (!self->ringbuf)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
+ err = -ENOMEM;
+ goto freeregion;
+ }
+
+#if (BITS_PER_LONG == 64)
+#error broken on 64-bit: casts pointer to 32-bit, and then back to pointer.
+#endif
+
+ /*We need to align the taskfile on a taskfile size boundary */
+ {
+ unsigned long addr;
+
+ addr = (__u32) self->ringbuf;
+ addr &= ~(OBOE_RING_LEN - 1);
+ addr += OBOE_RING_LEN;
+ self->ring = (struct OboeRing *) addr;
+ }
+
+ memset (self->ring, 0, OBOE_RING_LEN);
+ self->io.mem_base = (__u32) self->ring;
+
+ ok = 1;
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->tx_bufs[i] = kmalloc (TX_BUF_SZ, GFP_KERNEL);
+ if (!self->tx_bufs[i])
+ ok = 0;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->rx_bufs[i] = kmalloc (RX_BUF_SZ, GFP_KERNEL);
+ if (!self->rx_bufs[i])
+ ok = 0;
+ }
+
+ if (!ok)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate rx/tx buffers\n");
+ err = -ENOMEM;
+ goto freebufs;
+ }
+
+
+#ifdef USE_PROBE
+ if (do_probe)
+ if (!toshoboe_probe (self))
+ {
+ err = -ENODEV;
+ goto freebufs;
+ }
+#endif
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pci_dev->dev);
+ dev->hard_start_xmit = toshoboe_hard_xmit;
+ dev->open = toshoboe_net_open;
+ dev->stop = toshoboe_net_close;
+ dev->do_ioctl = toshoboe_net_ioctl;
+
+ err = register_netdev(dev);
+ if (err)
+ {
+ printk (KERN_ERR DRIVER_NAME ": register_netdev() failed\n");
+ err = -ENOMEM;
+ goto freebufs;
+ }
+ printk (KERN_INFO "IrDA: Registered device %s\n", dev->name);
+
+ pci_set_drvdata(pci_dev,self);
+
+ printk (KERN_INFO DRIVER_NAME ": Using multiple tasks, version %s\n", rcsid);
+
+ return 0;
+
+freebufs:
+ for (i = 0; i < TX_SLOTS; ++i)
+ if (self->tx_bufs[i])
+ kfree (self->tx_bufs[i]);
+ for (i = 0; i < RX_SLOTS; ++i)
+ if (self->rx_bufs[i])
+ kfree (self->rx_bufs[i]);
+ kfree(self->ringbuf);
+
+freeregion:
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+freeself:
+ free_netdev(dev);
+
+ return err;
+}
+
+static int
+toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ unsigned long flags;
+ int i = 10;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if (!self || self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+/* Flush all packets */
+ while ((i--) && (self->txpending))
+ udelay (10000);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_stopchip (self);
+ self->stopped = 1;
+ self->txpending = 0;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+static int
+toshoboe_wakeup (struct pci_dev *pci_dev)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ unsigned long flags;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if (!self || !self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_startchip (self);
+ self->stopped = 0;
+
+ netif_wake_queue(self->netdev);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+static struct pci_driver donauboe_pci_driver = {
+ .name = "donauboe",
+ .id_table = toshoboe_pci_tbl,
+ .probe = toshoboe_open,
+ .remove = toshoboe_close,
+ .suspend = toshoboe_gotosleep,
+ .resume = toshoboe_wakeup
+};
+
+static int __init
+donauboe_init (void)
+{
+ return pci_module_init(&donauboe_pci_driver);
+}
+
+static void __exit
+donauboe_cleanup (void)
+{
+ pci_unregister_driver(&donauboe_pci_driver);
+}
+
+module_init(donauboe_init);
+module_exit(donauboe_cleanup);
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
new file mode 100644
index 000000000000..2ab173d9a0e4
--- /dev/null
+++ b/drivers/net/irda/donauboe.h
@@ -0,0 +1,363 @@
+/*********************************************************************
+ *
+ * Filename: toshoboe.h
+ * Version: 2.16
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (sync headers)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (add lock to be used by spinlocks)
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ * IrDA chip set list from Toshiba Computer Engineering Corp.
+ * model method maker controler Version
+ * Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
+ * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3020CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 7020CT FIR,SIR ? ?
+ *
+ * Satell. 4090XCDT FIR,SIR ? ?
+ *
+ * Libretto 100CT FIR,SIR Toshiba Oboe
+ * Libretto 1000CT FIR,SIR Toshiba Oboe
+ *
+ * TECRA750DVD FIR,SIR Toshiba Oboe(Triangle) REV ID=14h
+ * TECRA780 FIR,SIR Toshiba Oboe(Sandlot) REV ID=32h,33h
+ * TECRA750CDT FIR,SIR Toshiba Oboe(Triangle) REV ID=13h,14h
+ * TECRA8000 FIR,SIR Toshiba Oboe(ISKUR) REV ID=23h
+ *
+ ********************************************************************/
+
+/* The documentation for this chip is allegedly released */
+/* However I have not seen it, not have I managed to contact */
+/* anyone who has. HOWEVER the chip bears a striking resemblence */
+/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
+/* the documentation for this is freely available at */
+/* http://www.toshiba.com/taec/components/Generic/TMPR3922.shtml */
+/* The mapping between the registers in that document and the */
+/* Registers in the 701 oboe chip are as follows */
+
+
+/* 3922 reg 701 regs, by bit numbers */
+/* 7- 0 15- 8 24-16 31-25 */
+/* $28 0x0 0x1 */
+/* $2c SEE NOTE 1 */
+/* $30 0x6 0x7 */
+/* $34 0x8 0x9 SEE NOTE 2 */
+/* $38 0x10 0x11 */
+/* $3C 0xe SEE NOTE 3 */
+/* $40 0x12 0x13 */
+/* $44 0x14 0x15 */
+/* $48 0x16 0x17 */
+/* $4c 0x18 0x19 */
+/* $50 0x1a 0x1b */
+
+/* FIXME: could be 0x1b 0x1a here */
+
+/* $54 0x1d 0x1c */
+/* $5C 0xf SEE NOTE 4 */
+/* $130 SEE NOTE 5 */
+/* $134 SEE NOTE 6 */
+/* */
+/* NOTES: */
+/* 1. The pointer to ring is packed in most unceremoniusly */
+/* 701 Register Address bits (A9-A0 must be zero) */
+/* 0x4: A17 A16 A15 A14 A13 A12 A11 A10 */
+/* 0x5: A25 A24 A23 A22 A21 A20 A19 A18 */
+/* 0x2: 0 0 A31 A30 A29 A28 A27 A26 */
+/* */
+/* 2. The M$ drivers do a write 0x1 to 0x9, however the 3922 */
+/* documentation would suggest that a write of 0x1 to 0x8 */
+/* would be more appropriate. */
+/* */
+/* 3. This assignment is tenuous at best, register 0xe seems to */
+/* have bits arranged 0 0 0 R/W R/W R/W R/W R/W */
+/* if either of the lower two bits are set the chip seems to */
+/* switch off */
+/* */
+/* 4. Bits 7-4 seem to be different 4 seems just to be generic */
+/* receiver busy flag */
+/* */
+/* 5. and 6. The IER and ISR have a different bit assignment */
+/* The lower three bits of both read back as ones */
+/* ISR is register 0xc, IER is register 0xd */
+/* 7 6 5 4 3 2 1 0 */
+/* 0xc: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* 0xd: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* TxDone xmitt done (generated only if generate interrupt bit */
+/* is set in the ring) */
+/* RxDone recv completed (or other recv condition if you set it */
+/* up */
+/* TxUnder underflow in Transmit FIFO */
+/* RxOver overflow in Recv FIFO */
+/* SipRcv received serial gap (or other condition you set) */
+/* Interrupts are enabled by writing a one to the IER register */
+/* Interrupts are cleared by writting a one to the ISR register */
+/* */
+/* 6. The remaining registers: 0x6 and 0x3 appear to be */
+/* reserved parts of 16 or 32 bit registersthe remainder */
+/* 0xa 0xb 0x1e 0x1f could possibly be (by their behaviour) */
+/* the Unicast Filter register at $58. */
+/* */
+/* 7. While the core obviously expects 32 bit accesses all the */
+/* M$ drivers do 8 bit accesses, infact the Miniport ones */
+/* write and read back the byte serveral times (why?) */
+
+
+#ifndef TOSHOBOE_H
+#define TOSHOBOE_H
+
+/* Registers */
+
+#define OBOE_IO_EXTENT 0x1f
+
+/*Receive and transmit slot pointers */
+#define OBOE_REG(i) (i+(self->base))
+#define OBOE_RXSLOT OBOE_REG(0x0)
+#define OBOE_TXSLOT OBOE_REG(0x1)
+#define OBOE_SLOT_MASK 0x3f
+
+#define OBOE_TXRING_OFFSET 0x200
+#define OBOE_TXRING_OFFSET_IN_SLOTS 0x40
+
+/*pointer to the ring */
+#define OBOE_RING_BASE0 OBOE_REG(0x4)
+#define OBOE_RING_BASE1 OBOE_REG(0x5)
+#define OBOE_RING_BASE2 OBOE_REG(0x2)
+#define OBOE_RING_BASE3 OBOE_REG(0x3)
+
+/*Number of slots in the ring */
+#define OBOE_RING_SIZE OBOE_REG(0x7)
+#define OBOE_RING_SIZE_RX4 0x00
+#define OBOE_RING_SIZE_RX8 0x01
+#define OBOE_RING_SIZE_RX16 0x03
+#define OBOE_RING_SIZE_RX32 0x07
+#define OBOE_RING_SIZE_RX64 0x0f
+#define OBOE_RING_SIZE_TX4 0x00
+#define OBOE_RING_SIZE_TX8 0x10
+#define OBOE_RING_SIZE_TX16 0x30
+#define OBOE_RING_SIZE_TX32 0x70
+#define OBOE_RING_SIZE_TX64 0xf0
+
+#define OBOE_RING_MAX_SIZE 64
+
+/*Causes the gubbins to re-examine the ring */
+#define OBOE_PROMPT OBOE_REG(0x9)
+#define OBOE_PROMPT_BIT 0x1
+
+/* Interrupt Status Register */
+#define OBOE_ISR OBOE_REG(0xc)
+/* Interrupt Enable Register */
+#define OBOE_IER OBOE_REG(0xd)
+/* Interrupt bits for IER and ISR */
+#define OBOE_INT_TXDONE 0x80
+#define OBOE_INT_RXDONE 0x40
+#define OBOE_INT_TXUNDER 0x20
+#define OBOE_INT_RXOVER 0x10
+#define OBOE_INT_SIP 0x08
+#define OBOE_INT_MASK 0xf8
+
+/*Reset Register */
+#define OBOE_CONFIG1 OBOE_REG(0xe)
+#define OBOE_CONFIG1_RST 0x01
+#define OBOE_CONFIG1_DISABLE 0x02
+#define OBOE_CONFIG1_4 0x08
+#define OBOE_CONFIG1_8 0x08
+
+#define OBOE_CONFIG1_ON 0x8
+#define OBOE_CONFIG1_RESET 0xf
+#define OBOE_CONFIG1_OFF 0xe
+
+#define OBOE_STATUS OBOE_REG(0xf)
+#define OBOE_STATUS_RXBUSY 0x10
+#define OBOE_STATUS_FIRRX 0x04
+#define OBOE_STATUS_MIRRX 0x02
+#define OBOE_STATUS_SIRRX 0x01
+
+
+/*Speed control registers */
+#define OBOE_CONFIG0L OBOE_REG(0x10)
+#define OBOE_CONFIG0H OBOE_REG(0x11)
+
+#define OBOE_CONFIG0H_TXONLOOP 0x80 /*Transmit when looping (dangerous) */
+#define OBOE_CONFIG0H_LOOP 0x40 /*Loopback Tx->Rx */
+#define OBOE_CONFIG0H_ENTX 0x10 /*Enable Tx */
+#define OBOE_CONFIG0H_ENRX 0x08 /*Enable Rx */
+#define OBOE_CONFIG0H_ENDMAC 0x04 /*Enable/reset* the DMA controller */
+#define OBOE_CONFIG0H_RCVANY 0x02 /*DMA mode 1=bytes, 0=dwords */
+
+#define OBOE_CONFIG0L_CRC16 0x80 /*CRC 1=16 bit 0=32 bit */
+#define OBOE_CONFIG0L_ENFIR 0x40 /*Enable FIR */
+#define OBOE_CONFIG0L_ENMIR 0x20 /*Enable MIR */
+#define OBOE_CONFIG0L_ENSIR 0x10 /*Enable SIR */
+#define OBOE_CONFIG0L_ENSIRF 0x08 /*Enable SIR framer */
+#define OBOE_CONFIG0L_SIRTEST 0x04 /*Enable SIR framer in MIR and FIR */
+#define OBOE_CONFIG0L_INVERTTX 0x02 /*Invert Tx Line */
+#define OBOE_CONFIG0L_INVERTRX 0x01 /*Invert Rx Line */
+
+#define OBOE_BOF OBOE_REG(0x12)
+#define OBOE_EOF OBOE_REG(0x13)
+
+#define OBOE_ENABLEL OBOE_REG(0x14)
+#define OBOE_ENABLEH OBOE_REG(0x15)
+
+#define OBOE_ENABLEH_PHYANDCLOCK 0x80 /*Toggle low to copy config in */
+#define OBOE_ENABLEH_CONFIGERR 0x40
+#define OBOE_ENABLEH_FIRON 0x20
+#define OBOE_ENABLEH_MIRON 0x10
+#define OBOE_ENABLEH_SIRON 0x08
+#define OBOE_ENABLEH_ENTX 0x04
+#define OBOE_ENABLEH_ENRX 0x02
+#define OBOE_ENABLEH_CRC16 0x01
+
+#define OBOE_ENABLEL_BROADCAST 0x01
+
+#define OBOE_CURR_PCONFIGL OBOE_REG(0x16) /*Current config */
+#define OBOE_CURR_PCONFIGH OBOE_REG(0x17)
+
+#define OBOE_NEW_PCONFIGL OBOE_REG(0x18)
+#define OBOE_NEW_PCONFIGH OBOE_REG(0x19)
+
+#define OBOE_PCONFIGH_BAUDMASK 0xfc
+#define OBOE_PCONFIGH_WIDTHMASK 0x04
+#define OBOE_PCONFIGL_WIDTHMASK 0xe0
+#define OBOE_PCONFIGL_PREAMBLEMASK 0x1f
+
+#define OBOE_PCONFIG_BAUDMASK 0xfc00
+#define OBOE_PCONFIG_BAUDSHIFT 10
+#define OBOE_PCONFIG_WIDTHMASK 0x04e0
+#define OBOE_PCONFIG_WIDTHSHIFT 5
+#define OBOE_PCONFIG_PREAMBLEMASK 0x001f
+#define OBOE_PCONFIG_PREAMBLESHIFT 0
+
+#define OBOE_MAXLENL OBOE_REG(0x1a)
+#define OBOE_MAXLENH OBOE_REG(0x1b)
+
+#define OBOE_RXCOUNTH OBOE_REG(0x1c) /*Reset on recipt */
+#define OBOE_RXCOUNTL OBOE_REG(0x1d) /*of whole packet */
+
+/* The PCI ID of the OBOE chip */
+#ifndef PCI_DEVICE_ID_FIR701
+#define PCI_DEVICE_ID_FIR701 0x0701
+#endif
+
+#ifndef PCI_DEVICE_ID_FIRD01
+#define PCI_DEVICE_ID_FIRD01 0x0d01
+#endif
+
+struct OboeSlot
+{
+ __u16 len; /*Tweleve bits of packet length */
+ __u8 unused;
+ __u8 control; /*Slot control/status see below */
+ __u32 address; /*Slot buffer address */
+}
+__attribute__ ((packed));
+
+#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
+
+struct OboeRing
+{
+ struct OboeSlot rx[OBOE_NTASKS];
+ struct OboeSlot tx[OBOE_NTASKS];
+};
+
+#define OBOE_RING_LEN (sizeof(struct OboeRing))
+
+
+#define OBOE_CTL_TX_HW_OWNS 0x80 /*W/R This slot owned by the hardware */
+#define OBOE_CTL_TX_DISTX_CRC 0x40 /*W Disable CRC generation for [FM]IR */
+#define OBOE_CTL_TX_BAD_CRC 0x20 /*W Generate bad CRC */
+#define OBOE_CTL_TX_SIP 0x10 /*W Generate an SIP after xmittion */
+#define OBOE_CTL_TX_MKUNDER 0x08 /*W Generate an underrun error */
+#define OBOE_CTL_TX_RTCENTX 0x04 /*W Enable receiver and generate TXdone */
+ /* After this slot is processed */
+#define OBOE_CTL_TX_UNDER 0x01 /*R Set by hardware to indicate underrun */
+
+
+#define OBOE_CTL_RX_HW_OWNS 0x80 /*W/R This slot owned by hardware */
+#define OBOE_CTL_RX_PHYERR 0x40 /*R Decoder error on receiption */
+#define OBOE_CTL_RX_CRCERR 0x20 /*R CRC error only set for [FM]IR */
+#define OBOE_CTL_RX_LENGTH 0x10 /*R Packet > max Rx length */
+#define OBOE_CTL_RX_OVER 0x08 /*R set to indicate an overflow */
+#define OBOE_CTL_RX_SIRBAD 0x04 /*R SIR had BOF in packet or ABORT sequence */
+#define OBOE_CTL_RX_RXEOF 0x02 /*R Finished receiving on this slot */
+
+
+struct toshoboe_cb
+{
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+ struct tty_driver ttydev;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ chipio_t io; /* IrDA controller information */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ __u32 flags; /* Interface flags */
+
+ struct pci_dev *pdev; /*PCI device */
+ int base; /*IO base */
+
+
+ int txpending; /*how many tx's are pending */
+ int txs, rxs; /*Which slots are we at */
+
+ int irdad; /*Driver under control of netdev end */
+ int async; /*Driver under control of async end */
+
+
+ int stopped; /*Stopped by some or other APM stuff */
+
+ int filter; /*In SIR mode do we want to receive
+ frames or byte ranges */
+
+ void *ringbuf; /*The ring buffer */
+ struct OboeRing *ring; /*The ring */
+
+ void *tx_bufs[OBOE_RING_MAX_SIZE]; /*The buffers */
+ void *rx_bufs[OBOE_RING_MAX_SIZE];
+
+
+ int speed; /*Current setting of the speed */
+ int new_speed; /*Set to request a speed change */
+
+/* The spinlock protect critical parts of the driver.
+ * Locking is done like this :
+ * spin_lock_irqsave(&self->spinlock, flags);
+ * Releasing the lock :
+ * spin_unlock_irqrestore(&self->spinlock, flags);
+ */
+ spinlock_t spinlock;
+ /* Used for the probe and diagnostics code */
+ int int_rx;
+ int int_tx;
+ int int_txunder;
+ int int_rxover;
+ int int_sip;
+};
+
+
+#endif
diff --git a/drivers/net/irda/ep7211_ir.c b/drivers/net/irda/ep7211_ir.c
new file mode 100644
index 000000000000..31896262d21c
--- /dev/null
+++ b/drivers/net/irda/ep7211_ir.c
@@ -0,0 +1,122 @@
+/*
+ * IR port driver for the Cirrus Logic EP7211 processor.
+ *
+ * Copyright 2001, Blue Mug Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
+static void ep7211_ir_open(dongle_t *self, struct qos_info *qos);
+static void ep7211_ir_close(dongle_t *self);
+static int ep7211_ir_change_speed(struct irda_task *task);
+static int ep7211_ir_reset(struct irda_task *task);
+
+static struct dongle_reg dongle = {
+ .type = IRDA_EP7211_IR,
+ .open = ep7211_ir_open,
+ .close = ep7211_ir_close,
+ .reset = ep7211_ir_reset,
+ .change_speed = ep7211_ir_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static void ep7211_ir_open(dongle_t *self, struct qos_info *qos)
+{
+ unsigned int syscon1, flags;
+
+ save_flags(flags); cli();
+
+ /* Turn on the SIR encoder. */
+ syscon1 = clps_readl(SYSCON1);
+ syscon1 |= SYSCON1_SIREN;
+ clps_writel(syscon1, SYSCON1);
+
+ /* XXX: We should disable modem status interrupts on the first
+ UART (interrupt #14). */
+
+ restore_flags(flags);
+}
+
+static void ep7211_ir_close(dongle_t *self)
+{
+ unsigned int syscon1, flags;
+
+ save_flags(flags); cli();
+
+ /* Turn off the SIR encoder. */
+ syscon1 = clps_readl(SYSCON1);
+ syscon1 &= ~SYSCON1_SIREN;
+ clps_writel(syscon1, SYSCON1);
+
+ /* XXX: If we've disabled the modem status interrupts, we should
+ reset them back to their original state. */
+
+ restore_flags(flags);
+}
+
+/*
+ * Function ep7211_ir_change_speed (task)
+ *
+ * Change speed of the EP7211 I/R port. We don't really have to do anything
+ * for the EP7211 as long as the rate is being changed at the serial port
+ * level.
+ */
+static int ep7211_ir_change_speed(struct irda_task *task)
+{
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ return 0;
+}
+
+/*
+ * Function ep7211_ir_reset (task)
+ *
+ * Reset the EP7211 I/R. We don't really have to do anything.
+ *
+ */
+static int ep7211_ir_reset(struct irda_task *task)
+{
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ return 0;
+}
+
+/*
+ * Function ep7211_ir_init(void)
+ *
+ * Initialize EP7211 I/R module
+ *
+ */
+static int __init ep7211_ir_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+/*
+ * Function ep7211_ir_cleanup(void)
+ *
+ * Cleanup EP7211 I/R module
+ *
+ */
+static void __exit ep7211_ir_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+MODULE_AUTHOR("Jon McClintock <jonm@bluemug.com>");
+MODULE_DESCRIPTION("EP7211 I/R driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-8"); /* IRDA_EP7211_IR */
+
+module_init(ep7211_ir_init);
+module_exit(ep7211_ir_cleanup);
diff --git a/drivers/net/irda/esi-sir.c b/drivers/net/irda/esi-sir.c
new file mode 100644
index 000000000000..a908df7c4b9d
--- /dev/null
+++ b/drivers/net/irda/esi-sir.c
@@ -0,0 +1,159 @@
+/*********************************************************************
+ *
+ * Filename: esi.c
+ * Version: 1.6
+ * Description: Driver for the Extended Systems JetEye PC dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 21 18:54:38 1998
+ * Modified at: Sun Oct 27 22:01:04 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
+ * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
+ * Copyright (c) 2002 Martin Diehl, <mad@mdiehl.de>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int esi_open(struct sir_dev *);
+static int esi_close(struct sir_dev *);
+static int esi_change_speed(struct sir_dev *, unsigned);
+static int esi_reset(struct sir_dev *);
+
+static struct dongle_driver esi = {
+ .owner = THIS_MODULE,
+ .driver_name = "JetEye PC ESI-9680 PC",
+ .type = IRDA_ESI_DONGLE,
+ .open = esi_open,
+ .close = esi_close,
+ .reset = esi_reset,
+ .set_speed = esi_change_speed,
+};
+
+static int __init esi_sir_init(void)
+{
+ return irda_register_dongle(&esi);
+}
+
+static void __exit esi_sir_cleanup(void)
+{
+ irda_unregister_dongle(&esi);
+}
+
+static int esi_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power up and set dongle to 9600 baud */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int esi_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function esi_change_speed (task)
+ *
+ * Set the speed for the Extended Systems JetEye PC ESI-9680 type dongle
+ * Apparently (see old esi-driver) no delays are needed here...
+ *
+ */
+static int esi_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int ret = 0;
+ int dtr, rts;
+
+ switch (speed) {
+ case 19200:
+ dtr = TRUE;
+ rts = FALSE;
+ break;
+ case 115200:
+ dtr = rts = TRUE;
+ break;
+ default:
+ ret = -EINVAL;
+ speed = 9600;
+ /* fall through */
+ case 9600:
+ dtr = FALSE;
+ rts = TRUE;
+ break;
+ }
+
+ /* Change speed of dongle */
+ sirdev_set_dtr_rts(dev, dtr, rts);
+ dev->speed = speed;
+
+ return ret;
+}
+
+/*
+ * Function esi_reset (task)
+ *
+ * Reset dongle;
+ *
+ */
+static int esi_reset(struct sir_dev *dev)
+{
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ /* Hm, the old esi-driver left the dongle unpowered relying on
+ * the following speed change to repower. This might work for
+ * the esi because we only need the modem lines. However, now the
+ * general rule is reset must bring the dongle to some working
+ * well-known state because speed change might write to registers.
+ * The old esi-driver didn't any delay here - let's hope it' fine.
+ */
+
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Extended Systems JetEye PC dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-1"); /* IRDA_ESI_DONGLE */
+
+module_init(esi_sir_init);
+module_exit(esi_sir_cleanup);
+
diff --git a/drivers/net/irda/esi.c b/drivers/net/irda/esi.c
new file mode 100644
index 000000000000..d3a61af6402d
--- /dev/null
+++ b/drivers/net/irda/esi.c
@@ -0,0 +1,149 @@
+/*********************************************************************
+ *
+ * Filename: esi.c
+ * Version: 1.5
+ * Description: Driver for the Extended Systems JetEye PC dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 21 18:54:38 1998
+ * Modified at: Fri Dec 17 09:14:04 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
+ * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+static void esi_open(dongle_t *self, struct qos_info *qos);
+static void esi_close(dongle_t *self);
+static int esi_change_speed(struct irda_task *task);
+static int esi_reset(struct irda_task *task);
+
+static struct dongle_reg dongle = {
+ .type = IRDA_ESI_DONGLE,
+ .open = esi_open,
+ .close = esi_close,
+ .reset = esi_reset,
+ .change_speed = esi_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init esi_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit esi_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+static void esi_open(dongle_t *self, struct qos_info *qos)
+{
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+}
+
+static void esi_close(dongle_t *dongle)
+{
+ /* Power off dongle */
+ dongle->set_dtr_rts(dongle->dev, FALSE, FALSE);
+}
+
+/*
+ * Function esi_change_speed (task)
+ *
+ * Set the speed for the Extended Systems JetEye PC ESI-9680 type dongle
+ *
+ */
+static int esi_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param;
+ int dtr, rts;
+
+ switch (speed) {
+ case 19200:
+ dtr = TRUE;
+ rts = FALSE;
+ break;
+ case 115200:
+ dtr = rts = TRUE;
+ break;
+ case 9600:
+ default:
+ dtr = FALSE;
+ rts = TRUE;
+ break;
+ }
+
+ /* Change speed of dongle */
+ self->set_dtr_rts(self->dev, dtr, rts);
+ self->speed = speed;
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+
+ return 0;
+}
+
+/*
+ * Function esi_reset (task)
+ *
+ * Reset dongle;
+ *
+ */
+static int esi_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Extended Systems JetEye PC dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-1"); /* IRDA_ESI_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize ESI module
+ *
+ */
+module_init(esi_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup ESI module
+ *
+ */
+module_exit(esi_cleanup);
+
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
new file mode 100644
index 000000000000..0d2fe87fb9b7
--- /dev/null
+++ b/drivers/net/irda/girbil-sir.c
@@ -0,0 +1,258 @@
+/*********************************************************************
+ *
+ * Filename: girbil.c
+ * Version: 1.2
+ * Description: Implementation for the Greenwich GIrBIL dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 6 21:02:33 1999
+ * Modified at: Fri Dec 17 09:13:20 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int girbil_reset(struct sir_dev *dev);
+static int girbil_open(struct sir_dev *dev);
+static int girbil_close(struct sir_dev *dev);
+static int girbil_change_speed(struct sir_dev *dev, unsigned speed);
+
+/* Control register 1 */
+#define GIRBIL_TXEN 0x01 /* Enable transmitter */
+#define GIRBIL_RXEN 0x02 /* Enable receiver */
+#define GIRBIL_ECAN 0x04 /* Cancel self emmited data */
+#define GIRBIL_ECHO 0x08 /* Echo control characters */
+
+/* LED Current Register (0x2) */
+#define GIRBIL_HIGH 0x20
+#define GIRBIL_MEDIUM 0x21
+#define GIRBIL_LOW 0x22
+
+/* Baud register (0x3) */
+#define GIRBIL_2400 0x30
+#define GIRBIL_4800 0x31
+#define GIRBIL_9600 0x32
+#define GIRBIL_19200 0x33
+#define GIRBIL_38400 0x34
+#define GIRBIL_57600 0x35
+#define GIRBIL_115200 0x36
+
+/* Mode register (0x4) */
+#define GIRBIL_IRDA 0x40
+#define GIRBIL_ASK 0x41
+
+/* Control register 2 (0x5) */
+#define GIRBIL_LOAD 0x51 /* Load the new baud rate value */
+
+static struct dongle_driver girbil = {
+ .owner = THIS_MODULE,
+ .driver_name = "Greenwich GIrBIL",
+ .type = IRDA_GIRBIL_DONGLE,
+ .open = girbil_open,
+ .close = girbil_close,
+ .reset = girbil_reset,
+ .set_speed = girbil_change_speed,
+};
+
+static int __init girbil_sir_init(void)
+{
+ return irda_register_dongle(&girbil);
+}
+
+static void __exit girbil_sir_cleanup(void)
+{
+ irda_unregister_dongle(&girbil);
+}
+
+static int girbil_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int girbil_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function girbil_change_speed (dev, speed)
+ *
+ * Set the speed for the Girbil type dongle.
+ *
+ */
+
+#define GIRBIL_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control[2];
+ static int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* dongle alread reset - port and dongle at default speed */
+
+ switch(state) {
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ /* Set DTR and Clear RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ udelay(25); /* better wait a little while */
+
+ ret = 0;
+ switch (speed) {
+ default:
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = GIRBIL_9600;
+ break;
+ case 19200:
+ control[0] = GIRBIL_19200;
+ break;
+ case 34800:
+ control[0] = GIRBIL_38400;
+ break;
+ case 57600:
+ control[0] = GIRBIL_57600;
+ break;
+ case 115200:
+ control[0] = GIRBIL_115200;
+ break;
+ }
+ control[1] = GIRBIL_LOAD;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 2);
+
+ dev->speed = speed;
+
+ state = GIRBIL_STATE_WAIT_SPEED;
+ delay = 100;
+ break;
+
+ case GIRBIL_STATE_WAIT_SPEED:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ udelay(25); /* better wait a little while */
+ break;
+
+ default:
+ IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function girbil_reset (driver)
+ *
+ * This function resets the girbil dongle.
+ *
+ * Algorithm:
+ * 0. set RTS, and wait at least 5 ms
+ * 1. clear RTS
+ */
+
+
+#define GIRBIL_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET + 1)
+#define GIRBIL_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET + 2)
+#define GIRBIL_STATE_WAIT3_RESET (SIRDEV_STATE_DONGLE_RESET + 3)
+
+static int girbil_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ /* Reset dongle */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ /* Sleep at least 5 ms */
+ delay = 20;
+ state = GIRBIL_STATE_WAIT1_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT1_RESET:
+ /* Set DTR and clear RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ delay = 20;
+ state = GIRBIL_STATE_WAIT2_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT2_RESET:
+ /* Write control byte */
+ sirdev_raw_write(dev, &control, 1);
+ delay = 20;
+ state = GIRBIL_STATE_WAIT3_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT3_RESET:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ dev->speed = 9600;
+ break;
+
+ default:
+ IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state);
+ ret = -1;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Greenwich GIrBIL dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-4"); /* IRDA_GIRBIL_DONGLE */
+
+module_init(girbil_sir_init);
+module_exit(girbil_sir_cleanup);
diff --git a/drivers/net/irda/girbil.c b/drivers/net/irda/girbil.c
new file mode 100644
index 000000000000..248aeb0c726c
--- /dev/null
+++ b/drivers/net/irda/girbil.c
@@ -0,0 +1,250 @@
+/*********************************************************************
+ *
+ * Filename: girbil.c
+ * Version: 1.2
+ * Description: Implementation for the Greenwich GIrBIL dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 6 21:02:33 1999
+ * Modified at: Fri Dec 17 09:13:20 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+static int girbil_reset(struct irda_task *task);
+static void girbil_open(dongle_t *self, struct qos_info *qos);
+static void girbil_close(dongle_t *self);
+static int girbil_change_speed(struct irda_task *task);
+
+/* Control register 1 */
+#define GIRBIL_TXEN 0x01 /* Enable transmitter */
+#define GIRBIL_RXEN 0x02 /* Enable receiver */
+#define GIRBIL_ECAN 0x04 /* Cancel self emmited data */
+#define GIRBIL_ECHO 0x08 /* Echo control characters */
+
+/* LED Current Register (0x2) */
+#define GIRBIL_HIGH 0x20
+#define GIRBIL_MEDIUM 0x21
+#define GIRBIL_LOW 0x22
+
+/* Baud register (0x3) */
+#define GIRBIL_2400 0x30
+#define GIRBIL_4800 0x31
+#define GIRBIL_9600 0x32
+#define GIRBIL_19200 0x33
+#define GIRBIL_38400 0x34
+#define GIRBIL_57600 0x35
+#define GIRBIL_115200 0x36
+
+/* Mode register (0x4) */
+#define GIRBIL_IRDA 0x40
+#define GIRBIL_ASK 0x41
+
+/* Control register 2 (0x5) */
+#define GIRBIL_LOAD 0x51 /* Load the new baud rate value */
+
+static struct dongle_reg dongle = {
+ .type = IRDA_GIRBIL_DONGLE,
+ .open = girbil_open,
+ .close = girbil_close,
+ .reset = girbil_reset,
+ .change_speed = girbil_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init girbil_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit girbil_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+static void girbil_open(dongle_t *self, struct qos_info *qos)
+{
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+}
+
+static void girbil_close(dongle_t *self)
+{
+ /* Power off dongle */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+}
+
+/*
+ * Function girbil_change_speed (dev, speed)
+ *
+ * Set the speed for the Girbil type dongle.
+ *
+ */
+static int girbil_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param;
+ __u8 control[2];
+ int ret = 0;
+
+ self->speed_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ /* Need to reset the dongle and go to 9600 bps before
+ programming */
+ if (irda_task_execute(self, girbil_reset, NULL, task,
+ (void *) speed))
+ {
+ /* Dongle need more time to reset */
+ irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
+
+ /* Give reset 1 sec to finish */
+ ret = msecs_to_jiffies(1000);
+ }
+ break;
+ case IRDA_TASK_CHILD_WAIT:
+ IRDA_WARNING("%s(), resetting dongle timed out!\n",
+ __FUNCTION__);
+ ret = -1;
+ break;
+ case IRDA_TASK_CHILD_DONE:
+ /* Set DTR and Clear RTS to enter command mode */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+
+ switch (speed) {
+ case 9600:
+ default:
+ control[0] = GIRBIL_9600;
+ break;
+ case 19200:
+ control[0] = GIRBIL_19200;
+ break;
+ case 34800:
+ control[0] = GIRBIL_38400;
+ break;
+ case 57600:
+ control[0] = GIRBIL_57600;
+ break;
+ case 115200:
+ control[0] = GIRBIL_115200;
+ break;
+ }
+ control[1] = GIRBIL_LOAD;
+
+ /* Write control bytes */
+ self->write(self->dev, control, 2);
+ irda_task_next_state(task, IRDA_TASK_WAIT);
+ ret = msecs_to_jiffies(100);
+ break;
+ case IRDA_TASK_WAIT:
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function girbil_reset (driver)
+ *
+ * This function resets the girbil dongle.
+ *
+ * Algorithm:
+ * 0. set RTS, and wait at least 5 ms
+ * 1. clear RTS
+ */
+static int girbil_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
+ int ret = 0;
+
+ self->reset_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ /* Reset dongle */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+ irda_task_next_state(task, IRDA_TASK_WAIT1);
+ /* Sleep at least 5 ms */
+ ret = msecs_to_jiffies(20);
+ break;
+ case IRDA_TASK_WAIT1:
+ /* Set DTR and clear RTS to enter command mode */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+ irda_task_next_state(task, IRDA_TASK_WAIT2);
+ ret = msecs_to_jiffies(20);
+ break;
+ case IRDA_TASK_WAIT2:
+ /* Write control byte */
+ self->write(self->dev, &control, 1);
+ irda_task_next_state(task, IRDA_TASK_WAIT3);
+ ret = msecs_to_jiffies(20);
+ break;
+ case IRDA_TASK_WAIT3:
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Greenwich GIrBIL dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-4"); /* IRDA_GIRBIL_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Girbil module
+ *
+ */
+module_init(girbil_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Girbil module
+ *
+ */
+module_exit(girbil_cleanup);
+
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
new file mode 100644
index 000000000000..46e0022d3258
--- /dev/null
+++ b/drivers/net/irda/irda-usb.c
@@ -0,0 +1,1602 @@
+/*****************************************************************************
+ *
+ * Filename: irda-usb.c
+ * Version: 0.9b
+ * Description: IrDA-USB Driver
+ * Status: Experimental
+ * Author: Dag Brattli <dag@brattli.net>
+ *
+ * Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+ * Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+ * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * IMPORTANT NOTE
+ * --------------
+ *
+ * As of kernel 2.5.20, this is the state of compliance and testing of
+ * this driver (irda-usb) with regards to the USB low level drivers...
+ *
+ * This driver has been tested SUCCESSFULLY with the following drivers :
+ * o usb-uhci-hcd (For Intel/Via USB controllers)
+ * o uhci-hcd (Alternate/JE driver for Intel/Via USB controllers)
+ * o ohci-hcd (For other USB controllers)
+ *
+ * This driver has NOT been tested with the following drivers :
+ * o ehci-hcd (USB 2.0 controllers)
+ *
+ * Note that all HCD drivers do URB_ZERO_PACKET and timeout properly,
+ * so we don't have to worry about that anymore.
+ * One common problem is the failure to set the address on the dongle,
+ * but this happens before the driver gets loaded...
+ *
+ * Jean II
+ */
+
+/*------------------------------------------------------------------*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/usb.h>
+
+#include "irda-usb.h"
+
+/*------------------------------------------------------------------*/
+
+static int qos_mtt_bits = 0;
+
+/* These are the currently known IrDA USB dongles. Add new dongles here */
+static struct usb_device_id dongles[] = {
+ /* ACTiSYS Corp., ACT-IR2000U FIR-USB Adapter */
+ { USB_DEVICE(0x9c4, 0x011), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* Look like ACTiSYS, Report : IBM Corp., IBM UltraPort IrDA */
+ { USB_DEVICE(0x4428, 0x012), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* KC Technology Inc., KC-180 USB IrDA Device */
+ { USB_DEVICE(0x50f, 0x180), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* Extended Systems, Inc., XTNDAccess IrDA USB (ESI-9685) */
+ { USB_DEVICE(0x8e9, 0x100), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_APP_SPEC,
+ .bInterfaceSubClass = USB_CLASS_IRDA,
+ .driver_info = IUC_DEFAULT, },
+ { }, /* The end */
+};
+
+/*
+ * Important note :
+ * Devices based on the SigmaTel chipset (0x66f, 0x4200) are not designed
+ * using the "USB-IrDA specification" (yes, there exist such a thing), and
+ * therefore not supported by this driver (don't add them above).
+ * There is a Linux driver, stir4200, that support those USB devices.
+ * Jean II
+ */
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+/*------------------------------------------------------------------*/
+
+static struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf);
+static void irda_usb_disconnect(struct usb_interface *intf);
+static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self);
+static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *dev);
+static int irda_usb_open(struct irda_usb_cb *self);
+static void irda_usb_close(struct irda_usb_cb *self);
+static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
+static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
+static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
+static int irda_usb_net_open(struct net_device *dev);
+static int irda_usb_net_close(struct net_device *dev);
+static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void irda_usb_net_timeout(struct net_device *dev);
+static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev);
+
+/************************ TRANSMIT ROUTINES ************************/
+/*
+ * Receive packets from the IrDA stack and send them on the USB pipe.
+ * Handle speed change, timeout and lot's of ugliness...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_build_header(self, skb, header)
+ *
+ * Builds USB-IrDA outbound header
+ *
+ * When we send an IrDA frame over an USB pipe, we add to it a 1 byte
+ * header. This function create this header with the proper values.
+ *
+ * Important note : the USB-IrDA spec 1.0 say very clearly in chapter 5.4.2.2
+ * that the setting of the link speed and xbof number in this outbound header
+ * should be applied *AFTER* the frame has been sent.
+ * Unfortunately, some devices are not compliant with that... It seems that
+ * reading the spec is far too difficult...
+ * Jean II
+ */
+static void irda_usb_build_header(struct irda_usb_cb *self,
+ __u8 *header,
+ int force)
+{
+ /* Set the negotiated link speed */
+ if (self->new_speed != -1) {
+ /* Hum... Ugly hack :-(
+ * Some device are not compliant with the spec and change
+ * parameters *before* sending the frame. - Jean II
+ */
+ if ((self->capability & IUC_SPEED_BUG) &&
+ (!force) && (self->speed != -1)) {
+ /* No speed and xbofs change here
+ * (we'll do it later in the write callback) */
+ IRDA_DEBUG(2, "%s(), not changing speed yet\n", __FUNCTION__);
+ *header = 0;
+ return;
+ }
+
+ IRDA_DEBUG(2, "%s(), changing speed to %d\n", __FUNCTION__, self->new_speed);
+ self->speed = self->new_speed;
+ /* We will do ` self->new_speed = -1; ' in the completion
+ * handler just in case the current URB fail - Jean II */
+
+ switch (self->speed) {
+ case 2400:
+ *header = SPEED_2400;
+ break;
+ default:
+ case 9600:
+ *header = SPEED_9600;
+ break;
+ case 19200:
+ *header = SPEED_19200;
+ break;
+ case 38400:
+ *header = SPEED_38400;
+ break;
+ case 57600:
+ *header = SPEED_57600;
+ break;
+ case 115200:
+ *header = SPEED_115200;
+ break;
+ case 576000:
+ *header = SPEED_576000;
+ break;
+ case 1152000:
+ *header = SPEED_1152000;
+ break;
+ case 4000000:
+ *header = SPEED_4000000;
+ self->new_xbofs = 0;
+ break;
+ }
+ } else
+ /* No change */
+ *header = 0;
+
+ /* Set the negotiated additional XBOFS */
+ if (self->new_xbofs != -1) {
+ IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __FUNCTION__, self->new_xbofs);
+ self->xbofs = self->new_xbofs;
+ /* We will do ` self->new_xbofs = -1; ' in the completion
+ * handler just in case the current URB fail - Jean II */
+
+ switch (self->xbofs) {
+ case 48:
+ *header |= 0x10;
+ break;
+ case 28:
+ case 24: /* USB spec 1.0 says 24 */
+ *header |= 0x20;
+ break;
+ default:
+ case 12:
+ *header |= 0x30;
+ break;
+ case 5: /* Bug in IrLAP spec? (should be 6) */
+ case 6:
+ *header |= 0x40;
+ break;
+ case 3:
+ *header |= 0x50;
+ break;
+ case 2:
+ *header |= 0x60;
+ break;
+ case 1:
+ *header |= 0x70;
+ break;
+ case 0:
+ *header |= 0x80;
+ break;
+ }
+ }
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Send a command to change the speed of the dongle
+ * Need to be called with spinlock on.
+ */
+static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
+{
+ __u8 *frame;
+ struct urb *urb;
+ int ret;
+
+ IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __FUNCTION__,
+ self->new_speed, self->new_xbofs);
+
+ /* Grab the speed URB */
+ urb = self->speed_urb;
+ if (urb->status != 0) {
+ IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__);
+ return;
+ }
+
+ /* Allocate the fake frame */
+ frame = self->speed_buff;
+
+ /* Set the new speed and xbofs in this fake frame */
+ irda_usb_build_header(self, frame, 1);
+
+ /* Submit the 0 length IrDA frame to trigger new speed settings */
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
+ frame, IRDA_USB_SPEED_MTU,
+ speed_bulk_callback, self);
+ urb->transfer_buffer_length = USB_IRDA_HEADER;
+ urb->transfer_flags = URB_ASYNC_UNLINK;
+
+ /* Irq disabled -> GFP_ATOMIC */
+ if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
+ IRDA_WARNING("%s(), failed Speed URB\n", __FUNCTION__);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Speed URB callback
+ * Now, we can only get called for the speed URB.
+ */
+static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct irda_usb_cb *self = urb->context;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* We should always have a context */
+ IRDA_ASSERT(self != NULL, return;);
+ /* We should always be called for the speed URB */
+ IRDA_ASSERT(urb == self->speed_urb, return;);
+
+ /* Check for timeout and other USB nasties */
+ if (urb->status != 0) {
+ /* I get a lot of -ECONNABORTED = -103 here - Jean II */
+ IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags);
+
+ /* Don't do anything here, that might confuse the USB layer.
+ * Instead, we will wait for irda_usb_net_timeout(), the
+ * network layer watchdog, to fix the situation.
+ * Jean II */
+ /* A reset of the dongle might be welcomed here - Jean II */
+ return;
+ }
+
+ /* urb is now available */
+ //urb->status = 0; -> tested above
+
+ /* New speed and xbof is now commited in hardware */
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+
+ /* Allow the stack to send more packets */
+ netif_wake_queue(self->netdev);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Send an IrDA frame to the USB dongle (for transmission)
+ */
+static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct irda_usb_cb *self = netdev->priv;
+ struct urb *urb = self->tx_urb;
+ unsigned long flags;
+ s32 speed;
+ s16 xbofs;
+ int res, mtt;
+ int err = 1; /* Failed */
+
+ IRDA_DEBUG(4, "%s() on %s\n", __FUNCTION__, netdev->name);
+
+ netif_stop_queue(netdev);
+
+ /* Protect us from USB callbacks, net watchdog and else. */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if the device is still there.
+ * We need to check self->present under the spinlock because
+ * of irda_usb_disconnect() is synchronous - Jean II */
+ if (!self->present) {
+ IRDA_DEBUG(0, "%s(), Device is gone...\n", __FUNCTION__);
+ goto drop;
+ }
+
+ /* Check if we need to change the number of xbofs */
+ xbofs = irda_get_next_xbofs(skb);
+ if ((xbofs != self->xbofs) && (xbofs != -1)) {
+ self->new_xbofs = xbofs;
+ }
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->speed) && (speed != -1)) {
+ /* Set the desired speed */
+ self->new_speed = speed;
+
+ /* Check for empty frame */
+ if (!skb->len) {
+ /* IrLAP send us an empty frame to make us change the
+ * speed. Changing speed with the USB adapter is in
+ * fact sending an empty frame to the adapter, so we
+ * could just let the present function do its job.
+ * However, we would wait for min turn time,
+ * do an extra memcpy and increment packet counters...
+ * Jean II */
+ irda_usb_change_speed_xbofs(self);
+ netdev->trans_start = jiffies;
+ /* Will netif_wake_queue() in callback */
+ err = 0; /* No error */
+ goto drop;
+ }
+ }
+
+ if (urb->status != 0) {
+ IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__);
+ goto drop;
+ }
+
+ /* Make sure there is room for IrDA-USB header. The actual
+ * allocation will be done lower in skb_push().
+ * Also, we don't use directly skb_cow(), because it require
+ * headroom >= 16, which force unnecessary copies - Jean II */
+ if (skb_headroom(skb) < USB_IRDA_HEADER) {
+ IRDA_DEBUG(0, "%s(), Insuficient skb headroom.\n", __FUNCTION__);
+ if (skb_cow(skb, USB_IRDA_HEADER)) {
+ IRDA_WARNING("%s(), failed skb_cow() !!!\n", __FUNCTION__);
+ goto drop;
+ }
+ }
+
+ /* Change setting for next frame */
+ irda_usb_build_header(self, skb_push(skb, USB_IRDA_HEADER), 0);
+
+ /* FIXME: Make macro out of this one */
+ ((struct irda_skb_cb *)skb->cb)->context = self;
+
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
+ skb->data, IRDA_SKB_MAX_MTU,
+ write_bulk_callback, skb);
+ urb->transfer_buffer_length = skb->len;
+ /* Note : unlink *must* be Asynchronous because of the code in
+ * irda_usb_net_timeout() -> call in irq - Jean II */
+ urb->transfer_flags = URB_ASYNC_UNLINK;
+ /* This flag (URB_ZERO_PACKET) indicates that what we send is not
+ * a continuous stream of data but separate packets.
+ * In this case, the USB layer will insert an empty USB frame (TD)
+ * after each of our packets that is exact multiple of the frame size.
+ * This is how the dongle will detect the end of packet - Jean II */
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
+ /* Generate min turn time. FIXME: can we do better than this? */
+ /* Trying to a turnaround time at this level is trying to measure
+ * processor clock cycle with a wrist-watch, approximate at best...
+ *
+ * What we know is the last time we received a frame over USB.
+ * Due to latency over USB that depend on the USB load, we don't
+ * know when this frame was received over IrDA (a few ms before ?)
+ * Then, same story for our outgoing frame...
+ *
+ * In theory, the USB dongle is supposed to handle the turnaround
+ * by itself (spec 1.0, chater 4, page 6). Who knows ??? That's
+ * why this code is enabled only for dongles that doesn't meet
+ * the spec.
+ * Jean II */
+ if (self->capability & IUC_NO_TURN) {
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ int diff;
+ do_gettimeofday(&self->now);
+ diff = self->now.tv_usec - self->stamp.tv_usec;
+#ifdef IU_USB_MIN_RTT
+ /* Factor in USB delays -> Get rid of udelay() that
+ * would be lost in the noise - Jean II */
+ diff += IU_USB_MIN_RTT;
+#endif /* IU_USB_MIN_RTT */
+ /* If the usec counter did wraparound, the diff will
+ * go negative (tv_usec is a long), so we need to
+ * correct it by one second. Jean II */
+ if (diff < 0)
+ diff += 1000000;
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff) {
+ mtt -= diff;
+ if (mtt > 1000)
+ mdelay(mtt/1000);
+ else
+ udelay(mtt);
+ }
+ }
+ }
+
+ /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
+ if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
+ IRDA_WARNING("%s(), failed Tx URB\n", __FUNCTION__);
+ self->stats.tx_errors++;
+ /* Let USB recover : We will catch that in the watchdog */
+ /*netif_start_queue(netdev);*/
+ } else {
+ /* Increment packet stats */
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += skb->len;
+
+ netdev->trans_start = jiffies;
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return 0;
+
+drop:
+ /* Drop silently the skb and exit */
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return err; /* Usually 1 */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Note : this function will be called only for tx_urb...
+ */
+static void write_bulk_callback(struct urb *urb, struct pt_regs *regs)
+{
+ unsigned long flags;
+ struct sk_buff *skb = urb->context;
+ struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* We should always have a context */
+ IRDA_ASSERT(self != NULL, return;);
+ /* We should always be called for the speed URB */
+ IRDA_ASSERT(urb == self->tx_urb, return;);
+
+ /* Free up the skb */
+ dev_kfree_skb_any(skb);
+ urb->context = NULL;
+
+ /* Check for timeout and other USB nasties */
+ if (urb->status != 0) {
+ /* I get a lot of -ECONNABORTED = -103 here - Jean II */
+ IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags);
+
+ /* Don't do anything here, that might confuse the USB layer,
+ * and we could go in recursion and blow the kernel stack...
+ * Instead, we will wait for irda_usb_net_timeout(), the
+ * network layer watchdog, to fix the situation.
+ * Jean II */
+ /* A reset of the dongle might be welcomed here - Jean II */
+ return;
+ }
+
+ /* urb is now available */
+ //urb->status = 0; -> tested above
+
+ /* Make sure we read self->present properly */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* If the network is closed, stop everything */
+ if ((!self->netopen) || (!self->present)) {
+ IRDA_DEBUG(0, "%s(), Network is gone...\n", __FUNCTION__);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return;
+ }
+
+ /* If changes to speed or xbofs is pending... */
+ if ((self->new_speed != -1) || (self->new_xbofs != -1)) {
+ if ((self->new_speed != self->speed) ||
+ (self->new_xbofs != self->xbofs)) {
+ /* We haven't changed speed yet (because of
+ * IUC_SPEED_BUG), so do it now - Jean II */
+ IRDA_DEBUG(1, "%s(), Changing speed now...\n", __FUNCTION__);
+ irda_usb_change_speed_xbofs(self);
+ } else {
+ /* New speed and xbof is now commited in hardware */
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+ /* Done, waiting for next packet */
+ netif_wake_queue(self->netdev);
+ }
+ } else {
+ /* Otherwise, allow the stack to send more packets */
+ netif_wake_queue(self->netdev);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Watchdog timer from the network layer.
+ * After a predetermined timeout, if we don't give confirmation that
+ * the packet has been sent (i.e. no call to netif_wake_queue()),
+ * the network layer will call this function.
+ * Note that URB that we submit have also a timeout. When the URB timeout
+ * expire, the normal URB callback is called (write_bulk_callback()).
+ */
+static void irda_usb_net_timeout(struct net_device *netdev)
+{
+ unsigned long flags;
+ struct irda_usb_cb *self = netdev->priv;
+ struct urb *urb;
+ int done = 0; /* If we have made any progress */
+
+ IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __FUNCTION__);
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* Protect us from USB callbacks, net Tx and else. */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* self->present *MUST* be read under spinlock */
+ if (!self->present) {
+ IRDA_WARNING("%s(), device not present!\n", __FUNCTION__);
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return;
+ }
+
+ /* Check speed URB */
+ urb = self->speed_urb;
+ if (urb->status != 0) {
+ IRDA_DEBUG(0, "%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
+
+ switch (urb->status) {
+ case -EINPROGRESS:
+ usb_unlink_urb(urb);
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, we will come back here.
+ * Jean II */
+ done = 1;
+ break;
+ case -ECONNABORTED: /* -103 */
+ case -ECONNRESET: /* -104 */
+ case -ETIMEDOUT: /* -110 */
+ case -ENOENT: /* -2 (urb unlinked by us) */
+ default: /* ??? - Play safe */
+ urb->status = 0;
+ netif_wake_queue(self->netdev);
+ done = 1;
+ break;
+ }
+ }
+
+ /* Check Tx URB */
+ urb = self->tx_urb;
+ if (urb->status != 0) {
+ struct sk_buff *skb = urb->context;
+
+ IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
+
+ /* Increase error count */
+ self->stats.tx_errors++;
+
+#ifdef IU_BUG_KICK_TIMEOUT
+ /* Can't be a bad idea to reset the speed ;-) - Jean II */
+ if(self->new_speed == -1)
+ self->new_speed = self->speed;
+ if(self->new_xbofs == -1)
+ self->new_xbofs = self->xbofs;
+ irda_usb_change_speed_xbofs(self);
+#endif /* IU_BUG_KICK_TIMEOUT */
+
+ switch (urb->status) {
+ case -EINPROGRESS:
+ usb_unlink_urb(urb);
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, because urb->status will
+ * be -ENOENT. We will fix that at the next watchdog,
+ * leaving more time to USB to recover...
+ * Also, we are in interrupt, so we need to have
+ * URB_ASYNC_UNLINK to work properly...
+ * Jean II */
+ done = 1;
+ break;
+ case -ECONNABORTED: /* -103 */
+ case -ECONNRESET: /* -104 */
+ case -ETIMEDOUT: /* -110 */
+ case -ENOENT: /* -2 (urb unlinked by us) */
+ default: /* ??? - Play safe */
+ if(skb != NULL) {
+ dev_kfree_skb_any(skb);
+ urb->context = NULL;
+ }
+ urb->status = 0;
+ netif_wake_queue(self->netdev);
+ done = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Maybe we need a reset */
+ /* Note : Some drivers seem to use a usb_set_interface() when they
+ * need to reset the hardware. Hum...
+ */
+
+ /* if(done == 0) */
+}
+
+/************************* RECEIVE ROUTINES *************************/
+/*
+ * Receive packets from the USB layer stack and pass them to the IrDA stack.
+ * Try to work around USB failures...
+ */
+
+/*
+ * Note :
+ * Some of you may have noticed that most dongle have an interrupt in pipe
+ * that we don't use. Here is the little secret...
+ * When we hang a Rx URB on the bulk in pipe, it generates some USB traffic
+ * in every USB frame. This is unnecessary overhead.
+ * The interrupt in pipe will generate an event every time a packet is
+ * received. Reading an interrupt pipe adds minimal overhead, but has some
+ * latency (~1ms).
+ * If we are connected (speed != 9600), we want to minimise latency, so
+ * we just always hang the Rx URB and ignore the interrupt.
+ * If we are not connected (speed == 9600), there is usually no Rx traffic,
+ * and we want to minimise the USB overhead. In this case we should wait
+ * on the interrupt pipe and hang the Rx URB only when an interrupt is
+ * received.
+ * Jean II
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Submit a Rx URB to the USB layer to handle reception of a frame
+ * Mostly called by the completion callback of the previous URB.
+ *
+ * Jean II
+ */
+static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struct urb *urb)
+{
+ struct irda_skb_cb *cb;
+ int ret;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* This should never happen */
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(urb != NULL, return;);
+
+ /* Save ourselves in the skb */
+ cb = (struct irda_skb_cb *) skb->cb;
+ cb->context = self;
+
+ /* Reinitialize URB */
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
+ skb->data, skb->truesize,
+ irda_usb_receive, skb);
+ /* Note : unlink *must* be synchronous because of the code in
+ * irda_usb_net_close() -> free the skb - Jean II */
+ urb->status = 0;
+
+ /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ /* If this ever happen, we are in deep s***.
+ * Basically, the Rx path will stop... */
+ IRDA_WARNING("%s(), Failed to submit Rx URB %d\n",
+ __FUNCTION__, ret);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_receive(urb)
+ *
+ * Called by the USB subsystem when a frame has been received
+ *
+ */
+static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct irda_usb_cb *self;
+ struct irda_skb_cb *cb;
+ struct sk_buff *newskb;
+ struct sk_buff *dataskb;
+ int docopy;
+
+ IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
+
+ /* Find ourselves */
+ cb = (struct irda_skb_cb *) skb->cb;
+ IRDA_ASSERT(cb != NULL, return;);
+ self = (struct irda_usb_cb *) cb->context;
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* If the network is closed or the device gone, stop everything */
+ if ((!self->netopen) || (!self->present)) {
+ IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__);
+ /* Don't re-submit the URB : will stall the Rx path */
+ return;
+ }
+
+ /* Check the status */
+ if (urb->status != 0) {
+ switch (urb->status) {
+ case -EILSEQ:
+ self->stats.rx_errors++;
+ self->stats.rx_crc_errors++;
+ break;
+ case -ECONNRESET: /* -104 */
+ IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags);
+ /* uhci_cleanup_unlink() is going to kill the Rx
+ * URB just after we return. No problem, at this
+ * point the URB will be idle ;-) - Jean II */
+ break;
+ default:
+ IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
+ break;
+ }
+ goto done;
+ }
+
+ /* Check for empty frames */
+ if (urb->actual_length <= USB_IRDA_HEADER) {
+ IRDA_WARNING("%s(), empty frame!\n", __FUNCTION__);
+ goto done;
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ do_gettimeofday(&self->stamp);
+
+ /* Check if we need to copy the data to a new skb or not.
+ * For most frames, we use ZeroCopy and pass the already
+ * allocated skb up the stack.
+ * If the frame is small, it is more efficient to copy it
+ * to save memory (copy will be fast anyway - that's
+ * called Rx-copy-break). Jean II */
+ docopy = (urb->actual_length < IRDA_RX_COPY_THRESHOLD);
+
+ /* Allocate a new skb */
+ newskb = dev_alloc_skb(docopy ? urb->actual_length : IRDA_SKB_MAX_MTU);
+ if (!newskb) {
+ self->stats.rx_dropped++;
+ /* We could deliver the current skb, but this would stall
+ * the Rx path. Better drop the packet... Jean II */
+ goto done;
+ }
+
+ /* Make sure IP header get aligned (IrDA header is 5 bytes) */
+ /* But IrDA-USB header is 1 byte. Jean II */
+ //skb_reserve(newskb, USB_IRDA_HEADER - 1);
+
+ if(docopy) {
+ /* Copy packet, so we can recycle the original */
+ memcpy(newskb->data, skb->data, urb->actual_length);
+ /* Deliver this new skb */
+ dataskb = newskb;
+ /* And hook the old skb to the URB
+ * Note : we don't need to "clean up" the old skb,
+ * as we never touched it. Jean II */
+ } else {
+ /* We are using ZeroCopy. Deliver old skb */
+ dataskb = skb;
+ /* And hook the new skb to the URB */
+ skb = newskb;
+ }
+
+ /* Set proper length on skb & remove USB-IrDA header */
+ skb_put(dataskb, urb->actual_length);
+ skb_pull(dataskb, USB_IRDA_HEADER);
+
+ /* Ask the networking layer to queue the packet for the IrDA stack */
+ dataskb->dev = self->netdev;
+ dataskb->mac.raw = dataskb->data;
+ dataskb->protocol = htons(ETH_P_IRDA);
+ netif_rx(dataskb);
+
+ /* Keep stats up to date */
+ self->stats.rx_bytes += dataskb->len;
+ self->stats.rx_packets++;
+ self->netdev->last_rx = jiffies;
+
+done:
+ /* Note : at this point, the URB we've just received (urb)
+ * is still referenced by the USB layer. For example, if we
+ * have received a -ECONNRESET, uhci_cleanup_unlink() will
+ * continue to process it (in fact, cleaning it up).
+ * If we were to submit this URB, disaster would ensue.
+ * Therefore, we submit our idle URB, and put this URB in our
+ * idle slot....
+ * Jean II */
+ /* Note : with this scheme, we could submit the idle URB before
+ * processing the Rx URB. Another time... Jean II */
+
+ /* Submit the idle URB to replace the URB we've just received */
+ irda_usb_submit(self, skb, self->idle_rx_urb);
+ /* Recycle Rx URB : Now, the idle URB is the present one */
+ urb->context = NULL;
+ self->idle_rx_urb = urb;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Callbak from IrDA layer. IrDA wants to know if we have
+ * started receiving anything.
+ */
+static int irda_usb_is_receiving(struct irda_usb_cb *self)
+{
+ /* Note : because of the way UHCI works, it's almost impossible
+ * to get this info. The Controller DMA directly to memory and
+ * signal only when the whole frame is finished. To know if the
+ * first TD of the URB has been filled or not seems hard work...
+ *
+ * The other solution would be to use the "receiving" command
+ * on the default decriptor with a usb_control_msg(), but that
+ * would add USB traffic and would return result only in the
+ * next USB frame (~1ms).
+ *
+ * I've been told that current dongles send status info on their
+ * interrupt endpoint, and that's what the Windows driver uses
+ * to know this info. Unfortunately, this is not yet in the spec...
+ *
+ * Jean II
+ */
+
+ return 0; /* For now */
+}
+
+/********************** IRDA DEVICE CALLBACKS **********************/
+/*
+ * Main calls from the IrDA/Network subsystem.
+ * Mostly registering a new irda-usb device and removing it....
+ * We only deal with the IrDA side of the business, the USB side will
+ * be dealt with below...
+ */
+
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ *
+ * Note : don't mess with self->netopen - Jean II
+ */
+static int irda_usb_net_open(struct net_device *netdev)
+{
+ struct irda_usb_cb *self;
+ char hwname[16];
+ int i;
+
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(netdev != NULL, return -1;);
+ self = (struct irda_usb_cb *) netdev->priv;
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ /* Can only open the device if it's there */
+ if(!self->present) {
+ IRDA_WARNING("%s(), device not present!\n", __FUNCTION__);
+ return -1;
+ }
+
+ /* Initialise default speed and xbofs value
+ * (IrLAP will change that soon) */
+ self->speed = -1;
+ self->xbofs = -1;
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+
+ /* To do *before* submitting Rx urbs and starting net Tx queue
+ * Jean II */
+ self->netopen = 1;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", self->usbdev->devnum);
+ self->irlap = irlap_open(netdev, &self->qos, hwname);
+ IRDA_ASSERT(self->irlap != NULL, return -1;);
+
+ /* Allow IrLAP to send data to us */
+ netif_start_queue(netdev);
+
+ /* We submit all the Rx URB except for one that we keep idle.
+ * Need to be initialised before submitting other USBs, because
+ * in some cases as soon as we submit the URBs the USB layer
+ * will trigger a dummy receive - Jean II */
+ self->idle_rx_urb = self->rx_urb[IU_MAX_ACTIVE_RX_URBS];
+ self->idle_rx_urb->context = NULL;
+
+ /* Now that we can pass data to IrLAP, allow the USB layer
+ * to send us some data... */
+ for (i = 0; i < IU_MAX_ACTIVE_RX_URBS; i++) {
+ struct sk_buff *skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!skb) {
+ /* If this ever happen, we are in deep s***.
+ * Basically, we can't start the Rx path... */
+ IRDA_WARNING("%s(), Failed to allocate Rx skb\n",
+ __FUNCTION__);
+ return -1;
+ }
+ //skb_reserve(newskb, USB_IRDA_HEADER - 1);
+ irda_usb_submit(self, skb, self->rx_urb[i]);
+ }
+
+ /* Ready to play !!! */
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_net_close (self)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int irda_usb_net_close(struct net_device *netdev)
+{
+ struct irda_usb_cb *self;
+ int i;
+
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(netdev != NULL, return -1;);
+ self = (struct irda_usb_cb *) netdev->priv;
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ /* Clear this flag *before* unlinking the urbs and *before*
+ * stopping the network Tx queue - Jean II */
+ self->netopen = 0;
+
+ /* Stop network Tx queue */
+ netif_stop_queue(netdev);
+
+ /* Deallocate all the Rx path buffers (URBs and skb) */
+ for (i = 0; i < IU_MAX_RX_URBS; i++) {
+ struct urb *urb = self->rx_urb[i];
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ /* Cancel the receive command */
+ usb_kill_urb(urb);
+ /* The skb is ours, free it */
+ if(skb) {
+ dev_kfree_skb(skb);
+ urb->context = NULL;
+ }
+ }
+ /* Cancel Tx and speed URB - need to be synchronous to avoid races */
+ self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
+ usb_kill_urb(self->tx_urb);
+ self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
+ usb_kill_urb(self->speed_urb);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ unsigned long flags;
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct irda_usb_cb *self;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = dev->priv;
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Protect us from USB callbacks, net watchdog and else. */
+ spin_lock_irqsave(&self->lock, flags);
+ /* Check if the device is still there */
+ if(self->present) {
+ /* Set the desired speed */
+ self->new_speed = irq->ifr_baudrate;
+ irda_usb_change_speed_xbofs(self);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Check if the IrDA stack is still there */
+ if(self->netopen)
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = irda_usb_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Get device stats (for /proc/net/dev and ifconfig)
+ */
+static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev)
+{
+ struct irda_usb_cb *self = dev->priv;
+ return &self->stats;
+}
+
+/********************* IRDA CONFIG SUBROUTINES *********************/
+/*
+ * Various subroutines dealing with IrDA and network stuff we use to
+ * configure and initialise each irda-usb instance.
+ * These functions are used below in the main calls of the driver...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set proper values in the IrDA QOS structure
+ */
+static inline void irda_usb_init_qos(struct irda_usb_cb *self)
+{
+ struct irda_class_desc *desc;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ desc = self->irda_desc;
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* See spec section 7.2 for meaning.
+ * Values are little endian (as most USB stuff), the IrDA stack
+ * use it in native order (see parameters.c). - Jean II */
+ self->qos.baud_rate.bits = le16_to_cpu(desc->wBaudRate);
+ self->qos.min_turn_time.bits = desc->bmMinTurnaroundTime;
+ self->qos.additional_bofs.bits = desc->bmAdditionalBOFs;
+ self->qos.window_size.bits = desc->bmWindowSize;
+ self->qos.data_size.bits = desc->bmDataSize;
+
+ IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
+ __FUNCTION__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
+
+ /* Don't always trust what the dongle tell us */
+ if(self->capability & IUC_SIR_ONLY)
+ self->qos.baud_rate.bits &= 0x00ff;
+ if(self->capability & IUC_SMALL_PKT)
+ self->qos.data_size.bits = 0x07;
+ if(self->capability & IUC_NO_WINDOW)
+ self->qos.window_size.bits = 0x01;
+ if(self->capability & IUC_MAX_WINDOW)
+ self->qos.window_size.bits = 0x7f;
+ if(self->capability & IUC_MAX_XBOFS)
+ self->qos.additional_bofs.bits = 0x01;
+
+#if 1
+ /* Module parameter can override the rx window size */
+ if (qos_mtt_bits)
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+#endif
+ /*
+ * Note : most of those values apply only for the receive path,
+ * the transmit path will be set differently - Jean II
+ */
+ irda_qos_bits_to_value(&self->qos);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Initialise the network side of the irda-usb instance
+ * Called when a new USB instance is registered in irda_usb_probe()
+ */
+static inline int irda_usb_open(struct irda_usb_cb *self)
+{
+ struct net_device *netdev = self->netdev;
+
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ irda_usb_init_qos(self);
+
+ /* Override the network functions we need to use */
+ netdev->hard_start_xmit = irda_usb_hard_xmit;
+ netdev->tx_timeout = irda_usb_net_timeout;
+ netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */
+ netdev->open = irda_usb_net_open;
+ netdev->stop = irda_usb_net_close;
+ netdev->get_stats = irda_usb_net_get_stats;
+ netdev->do_ioctl = irda_usb_net_ioctl;
+
+ return register_netdev(netdev);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Cleanup the network side of the irda-usb instance
+ * Called when a USB instance is removed in irda_usb_disconnect()
+ */
+static inline void irda_usb_close(struct irda_usb_cb *self)
+{
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Remove the speed buffer */
+ if (self->speed_buff != NULL) {
+ kfree(self->speed_buff);
+ self->speed_buff = NULL;
+ }
+}
+
+/********************** USB CONFIG SUBROUTINES **********************/
+/*
+ * Various subroutines dealing with USB stuff we use to configure and
+ * initialise each irda-usb instance.
+ * These functions are used below in the main calls of the driver...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_parse_endpoints(dev, ifnum)
+ *
+ * Parse the various endpoints and find the one we need.
+ *
+ * The endpoint are the pipes used to communicate with the USB device.
+ * The spec defines 2 endpoints of type bulk transfer, one in, and one out.
+ * These are used to pass frames back and forth with the dongle.
+ * Most dongle have also an interrupt endpoint, that will be probably
+ * documented in the next spec...
+ */
+static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_host_endpoint *endpoint, int ennum)
+{
+ int i; /* Endpoint index in table */
+
+ /* Init : no endpoints */
+ self->bulk_in_ep = 0;
+ self->bulk_out_ep = 0;
+ self->bulk_int_ep = 0;
+
+ /* Let's look at all those endpoints */
+ for(i = 0; i < ennum; i++) {
+ /* All those variables will get optimised by the compiler,
+ * so let's aim for clarity... - Jean II */
+ __u8 ep; /* Endpoint address */
+ __u8 dir; /* Endpoint direction */
+ __u8 attr; /* Endpoint attribute */
+ __u16 psize; /* Endpoint max packet size in bytes */
+
+ /* Get endpoint address, direction and attribute */
+ ep = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ dir = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+ attr = endpoint[i].desc.bmAttributes;
+ psize = le16_to_cpu(endpoint[i].desc.wMaxPacketSize);
+
+ /* Is it a bulk endpoint ??? */
+ if(attr == USB_ENDPOINT_XFER_BULK) {
+ /* We need to find an IN and an OUT */
+ if(dir == USB_DIR_IN) {
+ /* This is our Rx endpoint */
+ self->bulk_in_ep = ep;
+ } else {
+ /* This is our Tx endpoint */
+ self->bulk_out_ep = ep;
+ self->bulk_out_mtu = psize;
+ }
+ } else {
+ if((attr == USB_ENDPOINT_XFER_INT) &&
+ (dir == USB_DIR_IN)) {
+ /* This is our interrupt endpoint */
+ self->bulk_int_ep = ep;
+ } else {
+ IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __FUNCTION__, ep);
+ }
+ }
+ }
+
+ IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
+ __FUNCTION__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
+ /* Should be 8, 16, 32 or 64 bytes */
+ IRDA_ASSERT(self->bulk_out_mtu == 64, ;);
+
+ return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0));
+}
+
+#ifdef IU_DUMP_CLASS_DESC
+/*------------------------------------------------------------------*/
+/*
+ * Function usb_irda_dump_class_desc(desc)
+ *
+ * Prints out the contents of the IrDA class descriptor
+ *
+ */
+static inline void irda_usb_dump_class_desc(struct irda_class_desc *desc)
+{
+ /* Values are little endian */
+ printk("bLength=%x\n", desc->bLength);
+ printk("bDescriptorType=%x\n", desc->bDescriptorType);
+ printk("bcdSpecRevision=%x\n", le16_to_cpu(desc->bcdSpecRevision));
+ printk("bmDataSize=%x\n", desc->bmDataSize);
+ printk("bmWindowSize=%x\n", desc->bmWindowSize);
+ printk("bmMinTurnaroundTime=%d\n", desc->bmMinTurnaroundTime);
+ printk("wBaudRate=%x\n", le16_to_cpu(desc->wBaudRate));
+ printk("bmAdditionalBOFs=%x\n", desc->bmAdditionalBOFs);
+ printk("bIrdaRateSniff=%x\n", desc->bIrdaRateSniff);
+ printk("bMaxUnicastList=%x\n", desc->bMaxUnicastList);
+}
+#endif /* IU_DUMP_CLASS_DESC */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_find_class_desc(intf)
+ *
+ * Returns instance of IrDA class descriptor, or NULL if not found
+ *
+ * The class descriptor is some extra info that IrDA USB devices will
+ * offer to us, describing their IrDA characteristics. We will use that in
+ * irda_usb_init_qos()
+ */
+static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf)
+{
+ struct usb_device *dev = interface_to_usbdev (intf);
+ struct irda_class_desc *desc;
+ int ret;
+
+ desc = kmalloc(sizeof (*desc), GFP_KERNEL);
+ if (desc == NULL)
+ return NULL;
+ memset(desc, 0, sizeof(*desc));
+
+ /* USB-IrDA class spec 1.0:
+ * 6.1.3: Standard "Get Descriptor" Device Request is not
+ * appropriate to retrieve class-specific descriptor
+ * 6.2.5: Class Specific "Get Class Descriptor" Interface Request
+ * is mandatory and returns the USB-IrDA class descriptor
+ */
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev,0),
+ IU_REQ_GET_CLASS_DESC,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, intf->altsetting->desc.bInterfaceNumber, desc,
+ sizeof(*desc), 500);
+
+ IRDA_DEBUG(1, "%s(), ret=%d\n", __FUNCTION__, ret);
+ if (ret < sizeof(*desc)) {
+ IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n",
+ (ret<0) ? "failed" : "too short", ret);
+ }
+ else if (desc->bDescriptorType != USB_DT_IRDA) {
+ IRDA_WARNING("usb-irda: bad class_descriptor type\n");
+ }
+ else {
+#ifdef IU_DUMP_CLASS_DESC
+ irda_usb_dump_class_desc(desc);
+#endif /* IU_DUMP_CLASS_DESC */
+
+ return desc;
+ }
+ kfree(desc);
+ return NULL;
+}
+
+/*********************** USB DEVICE CALLBACKS ***********************/
+/*
+ * Main calls from the USB subsystem.
+ * Mostly registering a new irda-usb device and removing it....
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ * The USB layer protect us from reentrancy (via BKL), so we don't need
+ * to spinlock in there... Jean II
+ */
+static int irda_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *net;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct irda_usb_cb *self = NULL;
+ struct usb_host_interface *interface;
+ struct irda_class_desc *irda_desc;
+ int ret = -ENOMEM;
+ int i; /* Driver instance index / Rx URB index */
+
+ /* Note : the probe make sure to call us only for devices that
+ * matches the list of dongle (top of the file). So, we
+ * don't need to check if the dongle is really ours.
+ * Jean II */
+
+ IRDA_MESSAGE("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ net = alloc_irdadev(sizeof(*self));
+ if (!net)
+ goto err_out;
+
+ SET_MODULE_OWNER(net);
+ SET_NETDEV_DEV(net, &intf->dev);
+ self = net->priv;
+ self->netdev = net;
+ spin_lock_init(&self->lock);
+
+ /* Create all of the needed urbs */
+ for (i = 0; i < IU_MAX_RX_URBS; i++) {
+ self->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->rx_urb[i]) {
+ goto err_out_1;
+ }
+ }
+ self->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->tx_urb) {
+ goto err_out_1;
+ }
+ self->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->speed_urb) {
+ goto err_out_2;
+ }
+
+ /* Is this really necessary? (no, except maybe for broken devices) */
+ if (usb_reset_configuration (dev) < 0) {
+ err("reset_configuration failed");
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ /* Is this really necessary? */
+ /* Note : some driver do hardcode the interface number, some others
+ * specify an alternate, but very few driver do like this.
+ * Jean II */
+ ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0);
+ IRDA_DEBUG(1, "usb-irda: set interface %d result %d\n", intf->altsetting->desc.bInterfaceNumber, ret);
+ switch (ret) {
+ case 0:
+ break;
+ case -EPIPE: /* -EPIPE = -32 */
+ /* Martin Diehl says if we get a -EPIPE we should
+ * be fine and we don't need to do a usb_clear_halt().
+ * - Jean II */
+ IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __FUNCTION__);
+ break;
+ default:
+ IRDA_DEBUG(0, "%s(), Unknown error %d\n", __FUNCTION__, ret);
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ /* Find our endpoints */
+ interface = intf->cur_altsetting;
+ if(!irda_usb_parse_endpoints(self, interface->endpoint,
+ interface->desc.bNumEndpoints)) {
+ IRDA_ERROR("%s(), Bogus endpoints...\n", __FUNCTION__);
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ /* Find IrDA class descriptor */
+ irda_desc = irda_usb_find_class_desc(intf);
+ ret = -ENODEV;
+ if (irda_desc == NULL)
+ goto err_out_3;
+
+ self->irda_desc = irda_desc;
+ self->present = 1;
+ self->netopen = 0;
+ self->capability = id->driver_info;
+ self->usbdev = dev;
+ self->usbintf = intf;
+
+ /* Allocate the buffer for speed changes */
+ /* Don't change this buffer size and allocation without doing
+ * some heavy and complete testing. Don't ask why :-(
+ * Jean II */
+ self->speed_buff = (char *) kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
+ if (self->speed_buff == NULL)
+ goto err_out_3;
+
+ memset(self->speed_buff, 0, IRDA_USB_SPEED_MTU);
+
+ ret = irda_usb_open(self);
+ if (ret)
+ goto err_out_4;
+
+ IRDA_MESSAGE("IrDA: Registered device %s\n", net->name);
+ usb_set_intfdata(intf, self);
+ return 0;
+
+err_out_4:
+ kfree(self->speed_buff);
+err_out_3:
+ /* Free all urbs that we may have created */
+ usb_free_urb(self->speed_urb);
+err_out_2:
+ usb_free_urb(self->tx_urb);
+err_out_1:
+ for (i = 0; i < IU_MAX_RX_URBS; i++) {
+ if (self->rx_urb[i])
+ usb_free_urb(self->rx_urb[i]);
+ }
+ free_netdev(net);
+err_out:
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * The current irda-usb device is removed, the USB layer tell us
+ * to shut it down...
+ * One of the constraints is that when we exit this function,
+ * we cannot use the usb_device no more. Gone. Destroyed. kfree().
+ * Most other subsystem allow you to destroy the instance at a time
+ * when it's convenient to you, to postpone it to a later date, but
+ * not the USB subsystem.
+ * So, we must make bloody sure that everything gets deactivated.
+ * Jean II
+ */
+static void irda_usb_disconnect(struct usb_interface *intf)
+{
+ unsigned long flags;
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ usb_set_intfdata(intf, NULL);
+ if (!self)
+ return;
+
+ /* Make sure that the Tx path is not executing. - Jean II */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Oups ! We are not there any more.
+ * This will stop/desactivate the Tx path. - Jean II */
+ self->present = 0;
+
+ /* We need to have irq enabled to unlink the URBs. That's OK,
+ * at this point the Tx path is gone - Jean II */
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Hum... Check if networking is still active (avoid races) */
+ if((self->netopen) || (self->irlap)) {
+ /* Accept no more transmissions */
+ /*netif_device_detach(self->netdev);*/
+ netif_stop_queue(self->netdev);
+ /* Stop all the receive URBs */
+ for (i = 0; i < IU_MAX_RX_URBS; i++)
+ usb_kill_urb(self->rx_urb[i]);
+ /* Cancel Tx and speed URB.
+ * Toggle flags to make sure it's synchronous. */
+ self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
+ usb_kill_urb(self->tx_urb);
+ self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
+ usb_kill_urb(self->speed_urb);
+ }
+
+ /* Cleanup the device stuff */
+ irda_usb_close(self);
+ /* No longer attached to USB bus */
+ self->usbdev = NULL;
+ self->usbintf = NULL;
+
+ /* Clean up our urbs */
+ for (i = 0; i < IU_MAX_RX_URBS; i++)
+ usb_free_urb(self->rx_urb[i]);
+ /* Clean up Tx and speed URB */
+ usb_free_urb(self->tx_urb);
+ usb_free_urb(self->speed_urb);
+
+ /* Free self and network device */
+ free_netdev(self->netdev);
+ IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __FUNCTION__);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .owner = THIS_MODULE,
+ .name = "irda-usb",
+ .probe = irda_usb_probe,
+ .disconnect = irda_usb_disconnect,
+ .id_table = dongles,
+};
+
+/************************* MODULE CALLBACKS *************************/
+/*
+ * Deal with module insertion/removal
+ * Mostly tell USB about our existence
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Module insertion
+ */
+static int __init usb_irda_init(void)
+{
+ int ret;
+
+ ret = usb_register(&irda_driver);
+ if (ret < 0)
+ return ret;
+
+ IRDA_MESSAGE("USB IrDA support registered\n");
+ return 0;
+}
+module_init(usb_irda_init);
+
+/*------------------------------------------------------------------*/
+/*
+ * Module removal
+ */
+static void __exit usb_irda_cleanup(void)
+{
+ /* Deregister the driver and remove all pending instances */
+ usb_deregister(&irda_driver);
+}
+module_exit(usb_irda_cleanup);
+
+/*------------------------------------------------------------------*/
+/*
+ * Module parameters
+ */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+MODULE_AUTHOR("Roman Weissgaerber <weissg@vienna.at>, Dag Brattli <dag@brattli.net> and Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
new file mode 100644
index 000000000000..bd8f66542322
--- /dev/null
+++ b/drivers/net/irda/irda-usb.h
@@ -0,0 +1,163 @@
+/*****************************************************************************
+ *
+ * Filename: irda-usb.h
+ * Version: 0.9b
+ * Description: IrDA-USB Driver
+ * Status: Experimental
+ * Author: Dag Brattli <dag@brattli.net>
+ *
+ * Copyright (C) 2001, Roman Weissgaerber <weissg@vienna.at>
+ * Copyright (C) 2000, Dag Brattli <dag@brattli.net>
+ * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include <linux/time.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> /* struct irlap_cb */
+
+#define RX_COPY_THRESHOLD 200
+#define IRDA_USB_MAX_MTU 2051
+#define IRDA_USB_SPEED_MTU 64 /* Weird, but work like this */
+
+/* Maximum number of active URB on the Rx path
+ * This is the amount of buffers the we keep between the USB harware and the
+ * IrDA stack.
+ *
+ * Note : the network layer does also queue the packets between us and the
+ * IrDA stack, and is actually pretty fast and efficient in doing that.
+ * Therefore, we don't need to have a large number of URBs, and we can
+ * perfectly live happy with only one. We certainly don't need to keep the
+ * full IrTTP window around here...
+ * I repeat for those who have trouble to understand : 1 URB is plenty
+ * good enough to handle back-to-back (brickwalled) frames. I tried it,
+ * it works (it's the hardware that has trouble doing it).
+ *
+ * Having 2 URBs would allow the USB stack to process one URB while we take
+ * care of the other and then swap the URBs...
+ * On the other hand, increasing the number of URB will have penalities
+ * in term of latency and will interact with the link management in IrLAP...
+ * Jean II */
+#define IU_MAX_ACTIVE_RX_URBS 1 /* Don't touch !!! */
+
+/* When a Rx URB is passed back to us, we can't reuse it immediately,
+ * because it may still be referenced by the USB layer. Therefore we
+ * need to keep one extra URB in the Rx path.
+ * Jean II */
+#define IU_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + 1)
+
+/* Various ugly stuff to try to workaround generic problems */
+/* Send speed command in case of timeout, just for trying to get things sane */
+#define IU_BUG_KICK_TIMEOUT
+/* Show the USB class descriptor */
+#undef IU_DUMP_CLASS_DESC
+/* Assume a minimum round trip latency for USB transfer (in us)...
+ * USB transfer are done in the next USB slot if there is no traffic
+ * (1/19 msec) and is done at 12 Mb/s :
+ * Waiting for slot + tx = (53us + 16us) * 2 = 137us minimum.
+ * Rx notification will only be done at the end of the USB frame period :
+ * OHCI : frame period = 1ms
+ * UHCI : frame period = 1ms, but notification can take 2 or 3 ms :-(
+ * EHCI : frame period = 125us */
+#define IU_USB_MIN_RTT 500 /* This should be safe in most cases */
+
+/* Inbound header */
+#define MEDIA_BUSY 0x80
+
+#define SPEED_2400 0x01
+#define SPEED_9600 0x02
+#define SPEED_19200 0x03
+#define SPEED_38400 0x04
+#define SPEED_57600 0x05
+#define SPEED_115200 0x06
+#define SPEED_576000 0x07
+#define SPEED_1152000 0x08
+#define SPEED_4000000 0x09
+
+/* Basic capabilities */
+#define IUC_DEFAULT 0x00 /* Basic device compliant with 1.0 spec */
+/* Main bugs */
+#define IUC_SPEED_BUG 0x01 /* Device doesn't set speed after the frame */
+#define IUC_NO_WINDOW 0x02 /* Device doesn't behave with big Rx window */
+#define IUC_NO_TURN 0x04 /* Device doesn't do turnaround by itself */
+/* Not currently used */
+#define IUC_SIR_ONLY 0x08 /* Device doesn't behave at FIR speeds */
+#define IUC_SMALL_PKT 0x10 /* Device doesn't behave with big Rx packets */
+#define IUC_MAX_WINDOW 0x20 /* Device underestimate the Rx window */
+#define IUC_MAX_XBOFS 0x40 /* Device need more xbofs than advertised */
+
+/* USB class definitions */
+#define USB_IRDA_HEADER 0x01
+#define USB_CLASS_IRDA 0x02 /* USB_CLASS_APP_SPEC subclass */
+#define USB_DT_IRDA 0x21
+
+struct irda_class_desc {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u16 bcdSpecRevision;
+ __u8 bmDataSize;
+ __u8 bmWindowSize;
+ __u8 bmMinTurnaroundTime;
+ __u16 wBaudRate;
+ __u8 bmAdditionalBOFs;
+ __u8 bIrdaRateSniff;
+ __u8 bMaxUnicastList;
+} __attribute__ ((packed));
+
+/* class specific interface request to get the IrDA-USB class descriptor
+ * (6.2.5, USB-IrDA class spec 1.0) */
+
+#define IU_REQ_GET_CLASS_DESC 0x06
+
+struct irda_usb_cb {
+ struct irda_class_desc *irda_desc;
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct usb_interface *usbintf; /* init: probe_irda */
+ int netopen; /* Device is active for network */
+ int present; /* Device is present on the bus */
+ __u32 capability; /* Capability of the hardware */
+ __u8 bulk_in_ep; /* Rx Endpoint assignments */
+ __u8 bulk_out_ep; /* Tx Endpoint assignments */
+ __u16 bulk_out_mtu; /* Max Tx packet size in bytes */
+ __u8 bulk_int_ep; /* Interrupt Endpoint assignments */
+
+ wait_queue_head_t wait_q; /* for timeouts */
+
+ struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
+ struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
+ struct urb *tx_urb; /* URB used to send data frames */
+ struct urb *speed_urb; /* URB used to send speed commands */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdev. */
+ struct net_device_stats stats;
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos;
+ hashbin_t *tx_list; /* Queued transmit skb's */
+ char *speed_buff; /* Buffer for speed changes */
+
+ struct timeval stamp;
+ struct timeval now;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u16 xbofs; /* Current xbofs setting */
+ __s16 new_xbofs; /* xbofs we need to set */
+ __u32 speed; /* Current speed */
+ __s32 new_speed; /* speed we need to set */
+};
+
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
new file mode 100644
index 000000000000..5971315f3fa0
--- /dev/null
+++ b/drivers/net/irda/irport.c
@@ -0,0 +1,1146 @@
+/*********************************************************************
+ *
+ * Filename: irport.c
+ * Version: 1.0
+ * Description: Half duplex serial port SIR driver for IrDA.
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 3 13:49:59 1997
+ * Modified at: Fri Jan 28 20:22:38 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: serial.c by Linus Torvalds
+ *
+ * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 2000-2003 Jean Tourrilhes, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * This driver is ment to be a small half duplex serial driver to be
+ * used for IR-chipsets that has a UART (16550) compatibility mode.
+ * Eventually it will replace irtty, because of irtty has some
+ * problems that is hard to get around when we don't have control
+ * over the serial driver. This driver may also be used by FIR
+ * drivers to handle SIR mode for them.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/serial_reg.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include "irport.h"
+
+#define IO_EXTENT 8
+
+/*
+ * Currently you'll need to set these values using insmod like this:
+ * insmod irport io=0x3e8 irq=11
+ */
+static unsigned int io[] = { ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0 };
+
+static unsigned int qos_mtt_bits = 0x03;
+
+static struct irport_cb *dev_self[] = { NULL, NULL, NULL, NULL};
+static char *driver_name = "irport";
+
+static inline void irport_write_wakeup(struct irport_cb *self);
+static inline int irport_write(int iobase, int fifo_size, __u8 *buf, int len);
+static inline void irport_receive(struct irport_cb *self);
+
+static int irport_net_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd);
+static inline int irport_is_receiving(struct irport_cb *self);
+static int irport_set_dtr_rts(struct net_device *dev, int dtr, int rts);
+static int irport_raw_write(struct net_device *dev, __u8 *buf, int len);
+static struct net_device_stats *irport_net_get_stats(struct net_device *dev);
+static int irport_change_speed_complete(struct irda_task *task);
+static void irport_timeout(struct net_device *dev);
+
+static irqreturn_t irport_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs);
+static int irport_hard_xmit(struct sk_buff *skb, struct net_device *dev);
+static void irport_change_speed(void *priv, __u32 speed);
+static int irport_net_open(struct net_device *dev);
+static int irport_net_close(struct net_device *dev);
+
+static struct irport_cb *
+irport_open(int i, unsigned int iobase, unsigned int irq)
+{
+ struct net_device *dev;
+ struct irport_cb *self;
+
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ /* Lock the port that we need */
+ if (!request_region(iobase, IO_EXTENT, driver_name)) {
+ IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
+ __FUNCTION__, iobase);
+ goto err_out1;
+ }
+
+ /*
+ * Allocate new instance of the driver
+ */
+ dev = alloc_irdadev(sizeof(struct irport_cb));
+ if (!dev) {
+ IRDA_ERROR("%s(), can't allocate memory for "
+ "irda device!\n", __FUNCTION__);
+ goto err_out2;
+ }
+
+ self = dev->priv;
+ spin_lock_init(&self->lock);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+ self->priv = self;
+ self->index = i;
+
+ /* Initialize IO */
+ self->io.sir_base = iobase;
+ self->io.sir_ext = IO_EXTENT;
+ self->io.irq = irq;
+ self->io.fifo_size = 16; /* 16550A and compatible */
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200;
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Bootstrap ZeroCopy Rx */
+ self->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ self->rx_buff.skb = __dev_alloc_skb(self->rx_buff.truesize,
+ GFP_KERNEL);
+ if (self->rx_buff.skb == NULL) {
+ IRDA_ERROR("%s(), can't allocate memory for "
+ "receive buffer!\n", __FUNCTION__);
+ goto err_out3;
+ }
+ skb_reserve(self->rx_buff.skb, 1);
+ self->rx_buff.head = self->rx_buff.skb->data;
+ /* No need to memset the buffer, unless you are really pedantic */
+
+ /* Finish setup the Rx buffer descriptor */
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Specify how much memory we want */
+ self->tx_buff.truesize = 4000;
+
+ /* Allocate memory if needed */
+ if (self->tx_buff.truesize > 0) {
+ self->tx_buff.head = (__u8 *) kmalloc(self->tx_buff.truesize,
+ GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ IRDA_ERROR("%s(), can't allocate memory for "
+ "transmit buffer!\n", __FUNCTION__);
+ goto err_out4;
+ }
+ memset(self->tx_buff.head, 0, self->tx_buff.truesize);
+ }
+ self->tx_buff.data = self->tx_buff.head;
+
+ self->netdev = dev;
+ /* Keep track of module usage */
+ SET_MODULE_OWNER(dev);
+
+ /* May be overridden by piggyback drivers */
+ self->interrupt = irport_interrupt;
+ self->change_speed = irport_change_speed;
+
+ /* Override the network functions we need to use */
+ dev->hard_start_xmit = irport_hard_xmit;
+ dev->tx_timeout = irport_timeout;
+ dev->watchdog_timeo = HZ; /* Allow time enough for speed change */
+ dev->open = irport_net_open;
+ dev->stop = irport_net_close;
+ dev->get_stats = irport_net_get_stats;
+ dev->do_ioctl = irport_net_ioctl;
+
+ /* Make ifconfig display some details */
+ dev->base_addr = iobase;
+ dev->irq = irq;
+
+ if (register_netdev(dev)) {
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ goto err_out5;
+ }
+ IRDA_MESSAGE("IrDA: Registered device %s (irport io=0x%X irq=%d)\n",
+ dev->name, iobase, irq);
+
+ return self;
+ err_out5:
+ kfree(self->tx_buff.head);
+ err_out4:
+ kfree_skb(self->rx_buff.skb);
+ err_out3:
+ free_netdev(dev);
+ dev_self[i] = NULL;
+ err_out2:
+ release_region(iobase, IO_EXTENT);
+ err_out1:
+ return NULL;
+}
+
+static int irport_close(struct irport_cb *self)
+{
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ /* We are not using any dongle anymore! */
+ if (self->dongle)
+ irda_device_dongle_cleanup(self->dongle);
+ self->dongle = NULL;
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the IO-port that this driver is using */
+ IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
+ __FUNCTION__, self->io.sir_base);
+ release_region(self->io.sir_base, self->io.sir_ext);
+
+ if (self->tx_buff.head)
+ kfree(self->tx_buff.head);
+
+ if (self->rx_buff.skb)
+ kfree_skb(self->rx_buff.skb);
+ self->rx_buff.skb = NULL;
+
+ /* Remove ourselves */
+ dev_self[self->index] = NULL;
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+static void irport_stop(struct irport_cb *self)
+{
+ int iobase;
+
+ iobase = self->io.sir_base;
+
+ /* We can't lock, we may be called from a FIR driver - Jean II */
+
+ /* We are not transmitting any more */
+ self->transmitting = 0;
+
+ /* Reset UART */
+ outb(0, iobase+UART_MCR);
+
+ /* Turn off interrupts */
+ outb(0, iobase+UART_IER);
+}
+
+static void irport_start(struct irport_cb *self)
+{
+ int iobase;
+
+ iobase = self->io.sir_base;
+
+ irport_stop(self);
+
+ /* We can't lock, we may be called from a FIR driver - Jean II */
+
+ /* Initialize UART */
+ outb(UART_LCR_WLEN8, iobase+UART_LCR); /* Reset DLAB */
+ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
+
+ /* Turn on interrups */
+ outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, iobase+UART_IER);
+}
+
+/*
+ * Function irport_probe (void)
+ *
+ * Start IO port
+ *
+ */
+int irport_probe(int iobase)
+{
+ IRDA_DEBUG(4, "%s(), iobase=%#x\n", __FUNCTION__, iobase);
+
+ return 0;
+}
+
+/*
+ * Function irport_get_fcr (speed)
+ *
+ * Compute value of fcr
+ *
+ */
+static inline unsigned int irport_get_fcr(__u32 speed)
+{
+ unsigned int fcr; /* FIFO control reg */
+
+ /* Enable fifos */
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ if (speed < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ //fcr |= UART_FCR_TRIGGER_14;
+ fcr |= UART_FCR_TRIGGER_8;
+
+ return(fcr);
+}
+
+/*
+ * Function irport_change_speed (self, speed)
+ *
+ * Set speed of IrDA port to specified baudrate
+ *
+ * This function should be called with irq off and spin-lock.
+ */
+static void irport_change_speed(void *priv, __u32 speed)
+{
+ struct irport_cb *self = (struct irport_cb *) priv;
+ int iobase;
+ unsigned int fcr; /* FIFO control reg */
+ unsigned int lcr; /* Line control reg */
+ int divisor;
+
+ IRDA_ASSERT(self != NULL, return;);
+ IRDA_ASSERT(speed != 0, return;);
+
+ IRDA_DEBUG(1, "%s(), Setting speed to: %d - iobase=%#x\n",
+ __FUNCTION__, speed, self->io.sir_base);
+
+ /* We can't lock, we may be called from a FIR driver - Jean II */
+
+ iobase = self->io.sir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Turn off interrupts */
+ outb(0, iobase+UART_IER);
+
+ divisor = SPEED_MAX/speed;
+
+ /* Get proper fifo configuration */
+ fcr = irport_get_fcr(speed);
+
+ /* IrDA ports use 8N1 */
+ lcr = UART_LCR_WLEN8;
+
+ outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
+ outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
+ outb(divisor >> 8, iobase+UART_DLM);
+ outb(lcr, iobase+UART_LCR); /* Set 8N1 */
+ outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
+
+ /* Turn on interrups */
+ /* This will generate a fatal interrupt storm.
+ * People calling us will do that properly - Jean II */
+ //outb(/*UART_IER_RLSI|*/UART_IER_RDI/*|UART_IER_THRI*/, iobase+UART_IER);
+}
+
+/*
+ * Function __irport_change_speed (instance, state, param)
+ *
+ * State machine for changing speed of the device. We do it this way since
+ * we cannot use schedule_timeout() when we are in interrupt context
+ *
+ */
+int __irport_change_speed(struct irda_task *task)
+{
+ struct irport_cb *self;
+ __u32 speed = (__u32) task->param;
+ unsigned long flags = 0;
+ int wasunlocked = 0;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
+
+ self = (struct irport_cb *) task->instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ /* Locking notes : this function may be called from irq context with
+ * spinlock, via irport_write_wakeup(), or from non-interrupt without
+ * spinlock (from the task timer). Yuck !
+ * This is ugly, and unsafe is the spinlock is not already aquired.
+ * This will be fixed when irda-task get rewritten.
+ * Jean II */
+ if (!spin_is_locked(&self->lock)) {
+ spin_lock_irqsave(&self->lock, flags);
+ wasunlocked = 1;
+ }
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ case IRDA_TASK_WAIT:
+ /* Are we ready to change speed yet? */
+ if (self->tx_buff.len > 0) {
+ task->state = IRDA_TASK_WAIT;
+
+ /* Try again later */
+ ret = msecs_to_jiffies(20);
+ break;
+ }
+
+ if (self->dongle)
+ irda_task_next_state(task, IRDA_TASK_CHILD_INIT);
+ else
+ irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
+ break;
+ case IRDA_TASK_CHILD_INIT:
+ /* Go to default speed */
+ self->change_speed(self->priv, 9600);
+
+ /* Change speed of dongle */
+ if (irda_task_execute(self->dongle,
+ self->dongle->issue->change_speed,
+ NULL, task, (void *) speed))
+ {
+ /* Dongle need more time to change its speed */
+ irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
+
+ /* Give dongle 1 sec to finish */
+ ret = msecs_to_jiffies(1000);
+ } else
+ /* Child finished immediately */
+ irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
+ break;
+ case IRDA_TASK_CHILD_WAIT:
+ IRDA_WARNING("%s(), changing speed of dongle timed out!\n", __FUNCTION__);
+ ret = -1;
+ break;
+ case IRDA_TASK_CHILD_DONE:
+ /* Finally we are ready to change the speed */
+ self->change_speed(self->priv, speed);
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ ret = -1;
+ break;
+ }
+ /* Put stuff in the state we found them - Jean II */
+ if(wasunlocked) {
+ spin_unlock_irqrestore(&self->lock, flags);
+ }
+
+ return ret;
+}
+
+/*
+ * Function irport_change_speed_complete (task)
+ *
+ * Called when the change speed operation completes
+ *
+ */
+static int irport_change_speed_complete(struct irda_task *task)
+{
+ struct irport_cb *self;
+
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ self = (struct irport_cb *) task->instance;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_ASSERT(self->netdev != NULL, return -1;);
+
+ /* Finished changing speed, so we are not busy any longer */
+ /* Signal network layer so it can try to send the frame */
+
+ netif_wake_queue(self->netdev);
+
+ return 0;
+}
+
+/*
+ * Function irport_timeout (struct net_device *dev)
+ *
+ * The networking layer thinks we timed out.
+ *
+ */
+
+static void irport_timeout(struct net_device *dev)
+{
+ struct irport_cb *self;
+ int iobase;
+ int iir, lsr;
+ unsigned long flags;
+
+ self = (struct irport_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return;);
+ iobase = self->io.sir_base;
+
+ IRDA_WARNING("%s: transmit timed out, jiffies = %ld, trans_start = %ld\n",
+ dev->name, jiffies, dev->trans_start);
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Debug what's happening... */
+
+ /* Get interrupt status */
+ lsr = inb(iobase+UART_LSR);
+ /* Read interrupt register */
+ iir = inb(iobase+UART_IIR);
+ IRDA_DEBUG(0, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __FUNCTION__, iir, lsr, iobase);
+
+ IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%d\n",
+ __FUNCTION__, self->transmitting, self->tx_buff.len,
+ self->tx_buff.data - self->tx_buff.head);
+
+ /* Now, restart the port */
+ irport_start(self);
+ self->change_speed(self->priv, self->io.speed);
+ /* This will re-enable irqs */
+ outb(/*UART_IER_RLSI|*/UART_IER_RDI/*|UART_IER_THRI*/, iobase+UART_IER);
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Function irport_wait_hw_transmitter_finish ()
+ *
+ * Wait for the real end of HW transmission
+ *
+ * The UART is a strict FIFO, and we get called only when we have finished
+ * pushing data to the FIFO, so the maximum amount of time we must wait
+ * is only for the FIFO to drain out.
+ *
+ * We use a simple calibrated loop. We may need to adjust the loop
+ * delay (udelay) to balance I/O traffic and latency. And we also need to
+ * adjust the maximum timeout.
+ * It would probably be better to wait for the proper interrupt,
+ * but it doesn't seem to be available.
+ *
+ * We can't use jiffies or kernel timers because :
+ * 1) We are called from the interrupt handler, which disable softirqs,
+ * so jiffies won't be increased
+ * 2) Jiffies granularity is usually very coarse (10ms), and we don't
+ * want to wait that long to detect stuck hardware.
+ * Jean II
+ */
+
+static void irport_wait_hw_transmitter_finish(struct irport_cb *self)
+{
+ int iobase;
+ int count = 1000; /* 1 ms */
+
+ iobase = self->io.sir_base;
+
+ /* Calibrated busy loop */
+ while((count-- > 0) && !(inb(iobase+UART_LSR) & UART_LSR_TEMT))
+ udelay(1);
+
+ if(count == 0)
+ IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__);
+}
+
+/*
+ * Function irport_hard_start_xmit (struct sk_buff *skb, struct net_device *dev)
+ *
+ * Transmits the current frame until FIFO is full, then
+ * waits until the next transmitt interrupt, and continues until the
+ * frame is transmitted.
+ */
+static int irport_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct irport_cb *self;
+ unsigned long flags;
+ int iobase;
+ s32 speed;
+
+ IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return 0;);
+
+ self = (struct irport_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.sir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests & speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ /*
+ * We send frames one by one in SIR mode (no
+ * pipelining), so at this point, if we were sending
+ * a previous frame, we just received the interrupt
+ * telling us it is finished (UART_IIR_THRI).
+ * Therefore, waiting for the transmitter to really
+ * finish draining the fifo won't take too long.
+ * And the interrupt handler is not expected to run.
+ * - Jean II */
+ irport_wait_hw_transmitter_finish(self);
+ /* Better go there already locked - Jean II */
+ irda_task_execute(self, __irport_change_speed,
+ irport_change_speed_complete,
+ NULL, (void *) speed);
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Init tx buffer */
+ self->tx_buff.data = self->tx_buff.head;
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ self->stats.tx_bytes += self->tx_buff.len;
+
+ /* We are transmitting */
+ self->transmitting = 1;
+
+ /* Turn on transmit finished interrupt. Will fire immediately! */
+ outb(UART_IER_THRI, iobase+UART_IER);
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irport_write (driver)
+ *
+ * Fill Tx FIFO with transmit data
+ *
+ * Called only from irport_write_wakeup()
+ */
+static inline int irport_write(int iobase, int fifo_size, __u8 *buf, int len)
+{
+ int actual = 0;
+
+ /* Fill FIFO with current frame */
+ while ((actual < fifo_size) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase+UART_TX);
+
+ actual++;
+ }
+
+ return actual;
+}
+
+/*
+ * Function irport_write_wakeup (tty)
+ *
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ *
+ * Called only from irport_interrupt()
+ * Make sure this function is *not* called while we are receiving,
+ * otherwise we will reset fifo and loose data :-(
+ */
+static inline void irport_write_wakeup(struct irport_cb *self)
+{
+ int actual = 0;
+ int iobase;
+ unsigned int fcr;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+
+ iobase = self->io.sir_base;
+
+ /* Finished with frame? */
+ if (self->tx_buff.len > 0) {
+ /* Write data left in transmit buffer */
+ actual = irport_write(iobase, self->io.fifo_size,
+ self->tx_buff.data, self->tx_buff.len);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+ } else {
+ /*
+ * Now serial buffer is almost free & we can start
+ * transmission of another packet. But first we must check
+ * if we need to change the speed of the hardware
+ */
+ if (self->new_speed) {
+ irport_wait_hw_transmitter_finish(self);
+ irda_task_execute(self, __irport_change_speed,
+ irport_change_speed_complete,
+ NULL, (void *) self->new_speed);
+ self->new_speed = 0;
+ } else {
+ /* Tell network layer that we want more frames */
+ netif_wake_queue(self->netdev);
+ }
+ self->stats.tx_packets++;
+
+ /*
+ * Reset Rx FIFO to make sure that all reflected transmit data
+ * is discarded. This is needed for half duplex operation
+ */
+ fcr = irport_get_fcr(self->io.speed);
+ fcr |= UART_FCR_CLEAR_RCVR;
+ outb(fcr, iobase+UART_FCR);
+
+ /* Finished transmitting */
+ self->transmitting = 0;
+
+ /* Turn on receive interrupts */
+ outb(UART_IER_RDI, iobase+UART_IER);
+
+ IRDA_DEBUG(1, "%s() : finished Tx\n", __FUNCTION__);
+ }
+}
+
+/*
+ * Function irport_receive (self)
+ *
+ * Receive one frame from the infrared port
+ *
+ * Called only from irport_interrupt()
+ */
+static inline void irport_receive(struct irport_cb *self)
+{
+ int boguscount = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /*
+ * Receive all characters in Rx FIFO, unwrap and unstuff them.
+ * async_unwrap_char will deliver all found frames
+ */
+ do {
+ async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
+ inb(iobase+UART_RX));
+
+ /* Make sure we don't stay here too long */
+ if (boguscount++ > 32) {
+ IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__);
+ break;
+ }
+ } while (inb(iobase+UART_LSR) & UART_LSR_DR);
+}
+
+/*
+ * Function irport_interrupt (irq, dev_id, regs)
+ *
+ * Interrupt handler
+ */
+static irqreturn_t irport_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct irport_cb *self;
+ int boguscount = 0;
+ int iobase;
+ int iir, lsr;
+ int handled = 0;
+
+ if (!dev) {
+ IRDA_WARNING("%s() irq %d for unknown device.\n", __FUNCTION__, irq);
+ return IRQ_NONE;
+ }
+ self = (struct irport_cb *) dev->priv;
+
+ spin_lock(&self->lock);
+
+ iobase = self->io.sir_base;
+
+ /* Cut'n'paste interrupt routine from serial.c
+ * This version try to minimise latency and I/O operations.
+ * Simplified and modified to enforce half duplex operation.
+ * - Jean II */
+
+ /* Check status even is iir reg is cleared, more robust and
+ * eliminate a read on the I/O bus - Jean II */
+ do {
+ /* Get interrupt status ; Clear interrupt */
+ lsr = inb(iobase+UART_LSR);
+
+ /* Are we receiving or transmitting ? */
+ if(!self->transmitting) {
+ /* Received something ? */
+ if (lsr & UART_LSR_DR)
+ irport_receive(self);
+ } else {
+ /* Room in Tx fifo ? */
+ if (lsr & (UART_LSR_THRE | UART_LSR_TEMT))
+ irport_write_wakeup(self);
+ }
+
+ /* A bit hackish, but working as expected... Jean II */
+ if(lsr & (UART_LSR_THRE | UART_LSR_TEMT | UART_LSR_DR))
+ handled = 1;
+
+ /* Make sure we don't stay here to long */
+ if (boguscount++ > 10) {
+ IRDA_WARNING("%s() irq handler looping : lsr=%02x\n",
+ __FUNCTION__, lsr);
+ break;
+ }
+
+ /* Read interrupt register */
+ iir = inb(iobase+UART_IIR);
+
+ /* Enable this debug only when no other options and at low
+ * bit rates, otherwise it may cause Rx overruns (lsr=63).
+ * - Jean II */
+ IRDA_DEBUG(6, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __FUNCTION__, iir, lsr, iobase);
+
+ /* As long as interrupt pending... */
+ } while ((iir & UART_IIR_NO_INT) == 0);
+
+ spin_unlock(&self->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Function irport_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ *
+ */
+static int irport_net_open(struct net_device *dev)
+{
+ struct irport_cb *self;
+ int iobase;
+ char hwname[16];
+ unsigned long flags;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct irport_cb *) dev->priv;
+
+ iobase = self->io.sir_base;
+
+ if (request_irq(self->io.irq, self->interrupt, 0, dev->name,
+ (void *) dev)) {
+ IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
+ __FUNCTION__, self->io.irq);
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&self->lock, flags);
+ /* Init uart */
+ irport_start(self);
+ /* Set 9600 bauds per default, including at the dongle */
+ irda_task_execute(self, __irport_change_speed,
+ irport_change_speed_complete,
+ NULL, (void *) 9600);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+
+ /* Give self a hardware name */
+ sprintf(hwname, "SIR @ 0x%03x", self->io.sir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ /* Ready to play! */
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * Function irport_net_close (self)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int irport_net_close(struct net_device *dev)
+{
+ struct irport_cb *self;
+ int iobase;
+ unsigned long flags;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct irport_cb *) dev->priv;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.sir_base;
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ spin_lock_irqsave(&self->lock, flags);
+ irport_stop(self);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ free_irq(self->io.irq, dev);
+
+ return 0;
+}
+
+/*
+ * Function irport_is_receiving (self)
+ *
+ * Returns true is we are currently receiving data
+ *
+ */
+static inline int irport_is_receiving(struct irport_cb *self)
+{
+ return (self->rx_buff.state != OUTSIDE_FRAME);
+}
+
+/*
+ * Function irport_set_dtr_rts (tty, dtr, rts)
+ *
+ * This function can be used by dongles etc. to set or reset the status
+ * of the dtr and rts lines
+ */
+static int irport_set_dtr_rts(struct net_device *dev, int dtr, int rts)
+{
+ struct irport_cb *self = dev->priv;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.sir_base;
+
+ if (dtr)
+ dtr = UART_MCR_DTR;
+ if (rts)
+ rts = UART_MCR_RTS;
+
+ outb(dtr|rts|UART_MCR_OUT2, iobase+UART_MCR);
+
+ return 0;
+}
+
+static int irport_raw_write(struct net_device *dev, __u8 *buf, int len)
+{
+ struct irport_cb *self = (struct irport_cb *) dev->priv;
+ int actual = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.sir_base;
+
+ /* Tx FIFO should be empty! */
+ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
+ IRDA_DEBUG( 0, "%s(), failed, fifo not empty!\n", __FUNCTION__);
+ return -1;
+ }
+
+ /* Fill FIFO with current frame */
+ while (actual < len) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase+UART_TX);
+ actual++;
+ }
+
+ return actual;
+}
+
+/*
+ * Function irport_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int irport_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct irport_cb *self;
+ dongle_t *dongle;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = dev->priv;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ irda_task_execute(self, __irport_change_speed, NULL,
+ NULL, (void *) irq->ifr_baudrate);
+ break;
+ case SIOCSDONGLE: /* Set dongle */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* Locking :
+ * irda_device_dongle_init() can't be locked.
+ * irda_task_execute() doesn't need to be locked.
+ * Jean II
+ */
+
+ /* Initialize dongle */
+ dongle = irda_device_dongle_init(dev, irq->ifr_dongle);
+ if (!dongle)
+ break;
+
+ dongle->set_mode = NULL;
+ dongle->read = NULL;
+ dongle->write = irport_raw_write;
+ dongle->set_dtr_rts = irport_set_dtr_rts;
+
+ /* Now initialize the dongle! */
+ dongle->issue->open(dongle, &self->qos);
+
+ /* Reset dongle */
+ irda_task_execute(dongle, dongle->issue->reset, NULL, NULL,
+ NULL);
+
+ /* Make dongle available to driver only now to avoid
+ * race conditions - Jean II */
+ self->dongle = dongle;
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = irport_is_receiving(self);
+ break;
+ case SIOCSDTRRTS:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* No real need to lock... */
+ spin_lock_irqsave(&self->lock, flags);
+ irport_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static struct net_device_stats *irport_net_get_stats(struct net_device *dev)
+{
+ struct irport_cb *self = (struct irport_cb *) dev->priv;
+
+ return &self->stats;
+}
+
+static int __init irport_init(void)
+{
+ int i;
+
+ for (i=0; (io[i] < 2000) && (i < 4); i++) {
+ if (irport_open(i, io[i], irq[i]) != NULL)
+ return 0;
+ }
+ /*
+ * Maybe something failed, but we can still be usable for FIR drivers
+ */
+ return 0;
+}
+
+/*
+ * Function irport_cleanup ()
+ *
+ * Close all configured ports
+ *
+ */
+static void __exit irport_cleanup(void)
+{
+ int i;
+
+ IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
+
+ for (i=0; i < 4; i++) {
+ if (dev_self[i])
+ irport_close(dev_self[i]);
+ }
+}
+
+MODULE_PARM(io, "1-4i");
+MODULE_PARM_DESC(io, "Base I/O addresses");
+MODULE_PARM(irq, "1-4i");
+MODULE_PARM_DESC(irq, "IRQ lines");
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Half duplex serial driver for IrDA SIR mode");
+MODULE_LICENSE("GPL");
+
+module_init(irport_init);
+module_exit(irport_cleanup);
+
diff --git a/drivers/net/irda/irport.h b/drivers/net/irda/irport.h
new file mode 100644
index 000000000000..fc89c8c3dd7f
--- /dev/null
+++ b/drivers/net/irda/irport.h
@@ -0,0 +1,80 @@
+/*********************************************************************
+ *
+ * Filename: irport.h
+ * Version: 0.1
+ * Description: Serial driver for IrDA
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 3 13:49:59 1997
+ * Modified at: Fri Jan 14 10:21:10 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997, 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef IRPORT_H
+#define IRPORT_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <net/irda/irda_device.h>
+
+#define SPEED_DEFAULT 9600
+#define SPEED_MAX 115200
+
+/*
+ * These are the supported serial types.
+ */
+#define PORT_UNKNOWN 0
+#define PORT_8250 1
+#define PORT_16450 2
+#define PORT_16550 3
+#define PORT_16550A 4
+#define PORT_CIRRUS 5
+#define PORT_16650 6
+#define PORT_MAX 6
+
+#define FRAME_MAX_SIZE 2048
+
+struct irport_cb {
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+
+ struct irlap_cb *irlap; /* The link layer we are attached to */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+
+ struct qos_info qos; /* QoS capabilities for this device */
+ dongle_t *dongle; /* Dongle driver */
+
+ __u32 flags; /* Interface flags */
+ __u32 new_speed;
+ int mode;
+ int index; /* Instance index */
+ int transmitting; /* Are we transmitting ? */
+
+ spinlock_t lock; /* For serializing operations */
+
+ /* For piggyback drivers */
+ void *priv;
+ void (*change_speed)(void *priv, __u32 speed);
+ int (*interrupt)(int irq, void *dev_id, struct pt_regs *regs);
+};
+
+#endif /* IRPORT_H */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
new file mode 100644
index 000000000000..7d23aa375908
--- /dev/null
+++ b/drivers/net/irda/irtty-sir.c
@@ -0,0 +1,642 @@
+/*********************************************************************
+ *
+ * Filename: irtty-sir.c
+ * Version: 2.0
+ * Description: IrDA line discipline implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Dec 9 21:18:38 1997
+ * Modified at: Sun Oct 27 22:13:30 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ * Sources: slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
+#include <linux/delay.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+#include "irtty-sir.h"
+
+static int qos_mtt_bits = 0x03; /* 5 ms or more */
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+/* ------------------------------------------------------- */
+
+/* device configuration callbacks always invoked with irda-thread context */
+
+/* find out, how many chars we have in buffers below us
+ * this is allowed to lie, i.e. return less chars than we
+ * actually have. The returned value is used to determine
+ * how long the irdathread should wait before doing the
+ * real blocking wait_until_sent()
+ */
+
+static int irtty_chars_in_buffer(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv = dev->priv;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ return priv->tty->driver->chars_in_buffer(priv->tty);
+}
+
+/* Wait (sleep) until underlaying hardware finished transmission
+ * i.e. hardware buffers are drained
+ * this must block and not return before all characters are really sent
+ *
+ * If the tty sits on top of a 16550A-like uart, there are typically
+ * up to 16 bytes in the fifo - f.e. 9600 bps 8N1 needs 16.7 msec
+ *
+ * With usbserial the uart-fifo is basically replaced by the converter's
+ * outgoing endpoint buffer, which can usually hold 64 bytes (at least).
+ * With pl2303 it appears we are safe with 60msec here.
+ *
+ * I really wish all serial drivers would provide
+ * correct implementation of wait_until_sent()
+ */
+
+#define USBSERIAL_TX_DONE_DELAY 60
+
+static void irtty_wait_until_sent(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ tty = priv->tty;
+ if (tty->driver->wait_until_sent) {
+ lock_kernel();
+ tty->driver->wait_until_sent(tty, msecs_to_jiffies(100));
+ unlock_kernel();
+ }
+ else {
+ msleep(USBSERIAL_TX_DONE_DELAY);
+ }
+}
+
+/*
+ * Function irtty_change_speed (dev, speed)
+ *
+ * Change the speed of the serial port.
+ *
+ * This may sleep in set_termios (usbserial driver f.e.) and must
+ * not be called from interrupt/timer/tasklet therefore.
+ * All such invocations are deferred to kIrDAd now so we can sleep there.
+ */
+
+static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+ struct termios old_termios;
+ int cflag;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ tty = priv->tty;
+
+ lock_kernel();
+ old_termios = *(tty->termios);
+ cflag = tty->termios->c_cflag;
+
+ cflag &= ~CBAUD;
+
+ IRDA_DEBUG(2, "%s(), Setting speed to %d\n", __FUNCTION__, speed);
+
+ switch (speed) {
+ case 1200:
+ cflag |= B1200;
+ break;
+ case 2400:
+ cflag |= B2400;
+ break;
+ case 4800:
+ cflag |= B4800;
+ break;
+ case 19200:
+ cflag |= B19200;
+ break;
+ case 38400:
+ cflag |= B38400;
+ break;
+ case 57600:
+ cflag |= B57600;
+ break;
+ case 115200:
+ cflag |= B115200;
+ break;
+ case 9600:
+ default:
+ cflag |= B9600;
+ break;
+ }
+
+ tty->termios->c_cflag = cflag;
+ if (tty->driver->set_termios)
+ tty->driver->set_termios(tty, &old_termios);
+ unlock_kernel();
+
+ priv->io.speed = speed;
+
+ return 0;
+}
+
+/*
+ * Function irtty_set_dtr_rts (dev, dtr, rts)
+ *
+ * This function can be used by dongles etc. to set or reset the status
+ * of the dtr and rts lines
+ */
+
+static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ struct sirtty_cb *priv = dev->priv;
+ int set = 0;
+ int clear = 0;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ if (rts)
+ set |= TIOCM_RTS;
+ else
+ clear |= TIOCM_RTS;
+ if (dtr)
+ set |= TIOCM_DTR;
+ else
+ clear |= TIOCM_DTR;
+
+ /*
+ * We can't use ioctl() because it expects a non-null file structure,
+ * and we don't have that here.
+ * This function is not yet defined for all tty driver, so
+ * let's be careful... Jean II
+ */
+ IRDA_ASSERT(priv->tty->driver->tiocmset != NULL, return -1;);
+ priv->tty->driver->tiocmset(priv->tty, NULL, set, clear);
+
+ return 0;
+}
+
+/* ------------------------------------------------------- */
+
+/* called from sir_dev when there is more data to send
+ * context is either netdev->hard_xmit or some transmit-completion bh
+ * i.e. we are under spinlock here and must not sleep.
+ */
+
+static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t len)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+ int writelen;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ tty = priv->tty;
+ if (!tty->driver->write)
+ return 0;
+ tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ if (tty->driver->write_room) {
+ writelen = tty->driver->write_room(tty);
+ if (writelen > len)
+ writelen = len;
+ }
+ else
+ writelen = len;
+ return tty->driver->write(tty, ptr, writelen);
+}
+
+/* ------------------------------------------------------- */
+
+/* irda line discipline callbacks */
+
+/*
+ * Function irtty_receive_buf( tty, cp, count)
+ *
+ * Handle the 'receiver data ready' interrupt. This function is called
+ * by the 'tty_io' module in the kernel when a block of IrDA data has
+ * been received, which can now be decapsulated and delivered for
+ * further processing
+ *
+ * calling context depends on underlying driver and tty->low_latency!
+ * for example (low_latency: 1 / 0):
+ * serial.c: uart-interrupt / softint
+ * usbserial: urb-complete-interrupt / softint
+ */
+
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct sir_dev *dev;
+ struct sirtty_cb *priv = tty->disc_data;
+ int i;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ if (unlikely(count==0)) /* yes, this happens */
+ return;
+
+ dev = priv->dev;
+ if (!dev) {
+ IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ /*
+ * Characters received with a parity error, etc?
+ */
+ if (fp && *fp++) {
+ IRDA_DEBUG(0, "Framing or parity error!\n");
+ sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
+ return;
+ }
+ }
+
+ sirdev_receive(dev, cp, count);
+}
+
+/*
+ * Function irtty_receive_room (tty)
+ *
+ * Used by the TTY to find out how much data we can receive at a time
+ *
+*/
+static int irtty_receive_room(struct tty_struct *tty)
+{
+ struct sirtty_cb *priv = tty->disc_data;
+
+ IRDA_ASSERT(priv != NULL, return 0;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return 0;);
+
+ return 65536; /* We can handle an infinite amount of data. :-) */
+}
+
+/*
+ * Function irtty_write_wakeup (tty)
+ *
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ *
+ */
+static void irtty_write_wakeup(struct tty_struct *tty)
+{
+ struct sirtty_cb *priv = tty->disc_data;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+
+ if (priv->dev)
+ sirdev_write_complete(priv->dev);
+}
+
+/* ------------------------------------------------------- */
+
+/*
+ * Function irtty_stop_receiver (tty, stop)
+ *
+ */
+
+static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
+{
+ struct termios old_termios;
+ int cflag;
+
+ lock_kernel();
+ old_termios = *(tty->termios);
+ cflag = tty->termios->c_cflag;
+
+ if (stop)
+ cflag &= ~CREAD;
+ else
+ cflag |= CREAD;
+
+ tty->termios->c_cflag = cflag;
+ if (tty->driver->set_termios)
+ tty->driver->set_termios(tty, &old_termios);
+ unlock_kernel();
+}
+
+/*****************************************************************/
+
+/* serialize ldisc open/close with sir_dev */
+static DECLARE_MUTEX(irtty_sem);
+
+/* notifier from sir_dev when irda% device gets opened (ifup) */
+
+static int irtty_start_dev(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv;
+ struct tty_struct *tty;
+
+ /* serialize with ldisc open/close */
+ down(&irtty_sem);
+
+ priv = dev->priv;
+ if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
+ up(&irtty_sem);
+ return -ESTALE;
+ }
+
+ tty = priv->tty;
+
+ if (tty->driver->start)
+ tty->driver->start(tty);
+ /* Make sure we can receive more data */
+ irtty_stop_receiver(tty, FALSE);
+
+ up(&irtty_sem);
+ return 0;
+}
+
+/* notifier from sir_dev when irda% device gets closed (ifdown) */
+
+static int irtty_stop_dev(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv;
+ struct tty_struct *tty;
+
+ /* serialize with ldisc open/close */
+ down(&irtty_sem);
+
+ priv = dev->priv;
+ if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
+ up(&irtty_sem);
+ return -ESTALE;
+ }
+
+ tty = priv->tty;
+
+ /* Make sure we don't receive more data */
+ irtty_stop_receiver(tty, TRUE);
+ if (tty->driver->stop)
+ tty->driver->stop(tty);
+
+ up(&irtty_sem);
+
+ return 0;
+}
+
+/* ------------------------------------------------------- */
+
+static struct sir_driver sir_tty_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = "sir_tty",
+ .start_dev = irtty_start_dev,
+ .stop_dev = irtty_stop_dev,
+ .do_write = irtty_do_write,
+ .chars_in_buffer = irtty_chars_in_buffer,
+ .wait_until_sent = irtty_wait_until_sent,
+ .set_speed = irtty_change_speed,
+ .set_dtr_rts = irtty_set_dtr_rts,
+};
+
+/* ------------------------------------------------------- */
+
+/*
+ * Function irtty_ioctl (tty, file, cmd, arg)
+ *
+ * The Swiss army knife of system calls :-)
+ *
+ */
+static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct irtty_info { char name[6]; } info;
+ struct sir_dev *dev;
+ struct sirtty_cb *priv = tty->disc_data;
+ int err = 0;
+
+ IRDA_ASSERT(priv != NULL, return -ENODEV;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
+
+ IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __FUNCTION__, cmd);
+
+ dev = priv->dev;
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ switch (cmd) {
+ case TCGETS:
+ case TCGETA:
+ err = n_tty_ioctl(tty, file, cmd, arg);
+ break;
+
+ case IRTTY_IOCTDONGLE:
+ /* this call blocks for completion */
+ err = sirdev_set_dongle(dev, (IRDA_DONGLE) arg);
+ break;
+
+ case IRTTY_IOCGET:
+ IRDA_ASSERT(dev->netdev != NULL, return -1;);
+
+ memset(&info, 0, sizeof(info));
+ strncpy(info.name, dev->netdev->name, sizeof(info.name)-1);
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ err = -EFAULT;
+ break;
+ default:
+ err = -ENOIOCTLCMD;
+ break;
+ }
+ return err;
+}
+
+
+/*
+ * Function irtty_open(tty)
+ *
+ * This function is called by the TTY module when the IrDA line
+ * discipline is called for. Because we are sure the tty line exists,
+ * we only have to link it to a free IrDA channel.
+ */
+static int irtty_open(struct tty_struct *tty)
+{
+ struct sir_dev *dev;
+ struct sirtty_cb *priv;
+ int ret = 0;
+
+ /* Module stuff handled via irda_ldisc.owner - Jean II */
+
+ /* First make sure we're not already connected. */
+ if (tty->disc_data != NULL) {
+ priv = tty->disc_data;
+ if (priv && priv->magic == IRTTY_MAGIC) {
+ ret = -EEXIST;
+ goto out;
+ }
+ tty->disc_data = NULL; /* ### */
+ }
+
+ /* stop the underlying driver */
+ irtty_stop_receiver(tty, TRUE);
+ if (tty->driver->stop)
+ tty->driver->stop(tty);
+
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+
+ /* apply mtt override */
+ sir_tty_drv.qos_mtt_bits = qos_mtt_bits;
+
+ /* get a sir device instance for this driver */
+ dev = sirdev_get_instance(&sir_tty_drv, tty->name);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* allocate private device info block */
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_put;
+ memset(priv, 0, sizeof(*priv));
+
+ priv->magic = IRTTY_MAGIC;
+ priv->tty = tty;
+ priv->dev = dev;
+
+ /* serialize with start_dev - in case we were racing with ifup */
+ down(&irtty_sem);
+
+ dev->priv = priv;
+ tty->disc_data = priv;
+
+ up(&irtty_sem);
+
+ IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name);
+
+ return 0;
+
+out_put:
+ sirdev_put_instance(dev);
+out:
+ return ret;
+}
+
+/*
+ * Function irtty_close (tty)
+ *
+ * Close down a IrDA channel. This means flushing out any pending queues,
+ * and then restoring the TTY line discipline to what it was before it got
+ * hooked to IrDA (which usually is TTY again).
+ */
+static void irtty_close(struct tty_struct *tty)
+{
+ struct sirtty_cb *priv = tty->disc_data;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ /* Hm, with a dongle attached the dongle driver wants
+ * to close the dongle - which requires the use of
+ * some tty write and/or termios or ioctl operations.
+ * Are we allowed to call those when already requested
+ * to shutdown the ldisc?
+ * If not, we should somehow mark the dev being staled.
+ * Question remains, how to close the dongle in this case...
+ * For now let's assume we are granted to issue tty driver calls
+ * until we return here from the ldisc close. I'm just wondering
+ * how this behaves with hotpluggable serial hardware like
+ * rs232-pcmcia card or usb-serial...
+ *
+ * priv->tty = NULL?;
+ */
+
+ /* we are dead now */
+ tty->disc_data = NULL;
+
+ sirdev_put_instance(priv->dev);
+
+ /* Stop tty */
+ irtty_stop_receiver(tty, TRUE);
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ if (tty->driver->stop)
+ tty->driver->stop(tty);
+
+ kfree(priv);
+
+ IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __FUNCTION__, tty->name);
+}
+
+/* ------------------------------------------------------- */
+
+static struct tty_ldisc irda_ldisc = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "irda",
+ .flags = 0,
+ .open = irtty_open,
+ .close = irtty_close,
+ .read = NULL,
+ .write = NULL,
+ .ioctl = irtty_ioctl,
+ .poll = NULL,
+ .receive_buf = irtty_receive_buf,
+ .receive_room = irtty_receive_room,
+ .write_wakeup = irtty_write_wakeup,
+ .owner = THIS_MODULE,
+};
+
+/* ------------------------------------------------------- */
+
+static int __init irtty_sir_init(void)
+{
+ int err;
+
+ if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0)
+ IRDA_ERROR("IrDA: can't register line discipline (err = %d)\n",
+ err);
+ return err;
+}
+
+static void __exit irtty_sir_cleanup(void)
+{
+ int err;
+
+ if ((err = tty_register_ldisc(N_IRDA, NULL))) {
+ IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n",
+ __FUNCTION__, err);
+ }
+}
+
+module_init(irtty_sir_init);
+module_exit(irtty_sir_cleanup);
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("IrDA TTY device driver");
+MODULE_ALIAS_LDISC(N_IRDA);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/irda/irtty-sir.h b/drivers/net/irda/irtty-sir.h
new file mode 100644
index 000000000000..b132d8f6eb13
--- /dev/null
+++ b/drivers/net/irda/irtty-sir.h
@@ -0,0 +1,34 @@
+/*********************************************************************
+ *
+ * sir_tty.h: definitions for the irtty_sir client driver (former irtty)
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef IRTTYSIR_H
+#define IRTTYSIR_H
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> // chipio_t
+
+#define IRTTY_IOC_MAGIC 'e'
+#define IRTTY_IOCTDONGLE _IO(IRTTY_IOC_MAGIC, 1)
+#define IRTTY_IOCGET _IOR(IRTTY_IOC_MAGIC, 2, struct irtty_info)
+#define IRTTY_IOC_MAXNR 2
+
+struct sirtty_cb {
+ magic_t magic;
+
+ struct sir_dev *dev;
+ struct tty_struct *tty;
+
+ chipio_t io; /* IrDA controller information */
+};
+
+#endif
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
new file mode 100644
index 000000000000..73261c54bbfd
--- /dev/null
+++ b/drivers/net/irda/litelink-sir.c
@@ -0,0 +1,209 @@
+/*********************************************************************
+ *
+ * Filename: litelink.c
+ * Version: 1.1
+ * Description: Driver for the Parallax LiteLink dongle
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri May 7 12:50:33 1999
+ * Modified at: Fri Dec 17 09:14:23 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+/*
+ * Modified at: Thu Jan 15 2003
+ * Modified by: Eugene Crosser <crosser@average.org>
+ *
+ * Convert to "new" IRDA infrastructure for kernel 2.6
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
+static int litelink_open(struct sir_dev *dev);
+static int litelink_close(struct sir_dev *dev);
+static int litelink_change_speed(struct sir_dev *dev, unsigned speed);
+static int litelink_reset(struct sir_dev *dev);
+
+/* These are the baudrates supported - 9600 must be last one! */
+static unsigned baud_rates[] = { 115200, 57600, 38400, 19200, 9600 };
+
+static struct dongle_driver litelink = {
+ .owner = THIS_MODULE,
+ .driver_name = "Parallax LiteLink",
+ .type = IRDA_LITELINK_DONGLE,
+ .open = litelink_open,
+ .close = litelink_close,
+ .reset = litelink_reset,
+ .set_speed = litelink_change_speed,
+};
+
+static int __init litelink_sir_init(void)
+{
+ return irda_register_dongle(&litelink);
+}
+
+static void __exit litelink_sir_cleanup(void)
+{
+ irda_unregister_dongle(&litelink);
+}
+
+static int litelink_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power up dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600;
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int litelink_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function litelink_change_speed (task)
+ *
+ * Change speed of the Litelink dongle. To cycle through the available
+ * baud rates, pulse RTS low for a few ms.
+ */
+static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int i;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* dongle already reset by irda-thread - current speed (dongle and
+ * port) is the default speed (115200 for litelink!)
+ */
+
+ /* Cycle through avaiable baudrates until we reach the correct one */
+ for (i = 0; baud_rates[i] != speed; i++) {
+
+ /* end-of-list reached due to invalid speed request */
+ if (baud_rates[i] == 9600)
+ break;
+
+ /* Set DTR, clear RTS */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+ }
+
+ dev->speed = baud_rates[i];
+
+ /* invalid baudrate should not happen - but if, we return -EINVAL and
+ * the dongle configured for 9600 so the stack has a chance to recover
+ */
+
+ return (dev->speed == speed) ? 0 : -EINVAL;
+}
+
+/*
+ * Function litelink_reset (task)
+ *
+ * Reset the Litelink type dongle.
+ *
+ */
+static int litelink_reset(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* probably the power-up can be dropped here, but with only
+ * 15 usec delay it's not worth the risk unless somebody with
+ * the hardware confirms it doesn't break anything...
+ */
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Clear RTS to reset dongle */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* This dongles speed defaults to 115200 bps */
+ dev->speed = 115200;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Parallax Litelink dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Litelink module
+ *
+ */
+module_init(litelink_sir_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Litelink module
+ *
+ */
+module_exit(litelink_sir_cleanup);
diff --git a/drivers/net/irda/litelink.c b/drivers/net/irda/litelink.c
new file mode 100644
index 000000000000..7db11431d0f4
--- /dev/null
+++ b/drivers/net/irda/litelink.c
@@ -0,0 +1,179 @@
+/*********************************************************************
+ *
+ * Filename: litelink.c
+ * Version: 1.1
+ * Description: Driver for the Parallax LiteLink dongle
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri May 7 12:50:33 1999
+ * Modified at: Fri Dec 17 09:14:23 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
+static void litelink_open(dongle_t *self, struct qos_info *qos);
+static void litelink_close(dongle_t *self);
+static int litelink_change_speed(struct irda_task *task);
+static int litelink_reset(struct irda_task *task);
+
+/* These are the baudrates supported */
+static __u32 baud_rates[] = { 115200, 57600, 38400, 19200, 9600 };
+
+static struct dongle_reg dongle = {
+ .type = IRDA_LITELINK_DONGLE,
+ .open = litelink_open,
+ .close = litelink_close,
+ .reset = litelink_reset,
+ .change_speed = litelink_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init litelink_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit litelink_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+static void litelink_open(dongle_t *self, struct qos_info *qos)
+{
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+}
+
+static void litelink_close(dongle_t *self)
+{
+ /* Power off dongle */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+}
+
+/*
+ * Function litelink_change_speed (task)
+ *
+ * Change speed of the Litelink dongle. To cycle through the available
+ * baud rates, pulse RTS low for a few ms.
+ */
+static int litelink_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param;
+ int i;
+
+ /* Clear RTS to reset dongle */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Cycle through avaiable baudrates until we reach the correct one */
+ for (i=0; i<5 && baud_rates[i] != speed; i++) {
+ /* Set DTR, clear RTS */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Set DTR, Set RTS */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+ }
+ irda_task_next_state(task, IRDA_TASK_DONE);
+
+ return 0;
+}
+
+/*
+ * Function litelink_reset (task)
+ *
+ * Reset the Litelink type dongle.
+ *
+ */
+static int litelink_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+
+ /* Power on dongle */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Clear RTS to reset dongle */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* This dongles speed defaults to 115200 bps */
+ self->speed = 115200;
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Parallax Litelink dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Litelink module
+ *
+ */
+module_init(litelink_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Litelink module
+ *
+ */
+module_exit(litelink_cleanup);
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
new file mode 100644
index 000000000000..ebed168b7da6
--- /dev/null
+++ b/drivers/net/irda/ma600-sir.c
@@ -0,0 +1,264 @@
+/*********************************************************************
+ *
+ * Filename: ma600.c
+ * Version: 0.1
+ * Description: Implementation of the MA600 dongle
+ * Status: Experimental.
+ * Author: Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95
+ * Created at: Sat Jun 10 20:02:35 2000
+ * Modified at: Sat Aug 16 09:34:13 2003
+ * Modified by: Martin Diehl <mad@mdiehl.de> (modified for new sir_dev)
+ *
+ * Note: very thanks to Mr. Maru Wang <maru@mobileaction.com.tw> for providing
+ * information on the MA600 dongle
+ *
+ * Copyright (c) 2000 Leung, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int ma600_open(struct sir_dev *);
+static int ma600_close(struct sir_dev *);
+static int ma600_change_speed(struct sir_dev *, unsigned);
+static int ma600_reset(struct sir_dev *);
+
+/* control byte for MA600 */
+#define MA600_9600 0x00
+#define MA600_19200 0x01
+#define MA600_38400 0x02
+#define MA600_57600 0x03
+#define MA600_115200 0x04
+#define MA600_DEV_ID1 0x05
+#define MA600_DEV_ID2 0x06
+#define MA600_2400 0x08
+
+static struct dongle_driver ma600 = {
+ .owner = THIS_MODULE,
+ .driver_name = "MA600",
+ .type = IRDA_MA600_DONGLE,
+ .open = ma600_open,
+ .close = ma600_close,
+ .reset = ma600_reset,
+ .set_speed = ma600_change_speed,
+};
+
+
+static int __init ma600_sir_init(void)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ return irda_register_dongle(&ma600);
+}
+
+static void __exit ma600_sir_cleanup(void)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ irda_unregister_dongle(&ma600);
+}
+
+/*
+ Power on:
+ (0) Clear RTS and DTR for 1 second
+ (1) Set RTS and DTR for 1 second
+ (2) 9600 bps now
+ Note: assume RTS, DTR are clear before
+*/
+static int ma600_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Explicitly set the speeds we can accept */
+ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400
+ |IR_57600|IR_115200;
+ /* Hm, 0x01 means 10ms - for >= 1ms we would need 0x07 */
+ qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int ma600_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+static __u8 get_control_byte(__u32 speed)
+{
+ __u8 byte;
+
+ switch (speed) {
+ default:
+ case 115200:
+ byte = MA600_115200;
+ break;
+ case 57600:
+ byte = MA600_57600;
+ break;
+ case 38400:
+ byte = MA600_38400;
+ break;
+ case 19200:
+ byte = MA600_19200;
+ break;
+ case 9600:
+ byte = MA600_9600;
+ break;
+ case 2400:
+ byte = MA600_2400;
+ break;
+ }
+
+ return byte;
+}
+
+/*
+ * Function ma600_change_speed (dev, speed)
+ *
+ * Set the speed for the MA600 type dongle.
+ *
+ * The dongle has already been reset to a known state (dongle default)
+ * We cycle through speeds by pulsing RTS low and then high.
+ */
+
+/*
+ * Function ma600_change_speed (dev, speed)
+ *
+ * Set the speed for the MA600 type dongle.
+ *
+ * Algorithm
+ * 1. Reset (already done by irda thread state machine)
+ * 2. clear RTS, set DTR and wait for 1ms
+ * 3. send Control Byte to the MA600 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 10 msec)
+ * 4. set RTS, set DTR (return to NORMAL Operation)
+ * 5. wait at least 10 ms, new setting (baud rate, etc) takes effect here
+ * after
+ */
+
+/* total delays are only about 20ms - let's just sleep for now to
+ * avoid the state machine complexity before we get things working
+ */
+
+static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ u8 byte;
+
+ IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __FUNCTION__,
+ speed, dev->speed);
+
+ /* dongle already reset, dongle and port at default speed (9600) */
+
+ /* Set RTS low for 1 ms */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ mdelay(1);
+
+ /* Write control byte */
+ byte = get_control_byte(speed);
+ sirdev_raw_write(dev, &byte, sizeof(byte));
+
+ /* Wait at least 10ms: fake wait_until_sent - 10 bits at 9600 baud*/
+ msleep(15); /* old ma600 uses 15ms */
+
+#if 1
+ /* read-back of the control byte. ma600 is the first dongle driver
+ * which uses this so there might be some unidentified issues.
+ * Disable this in case of problems with readback.
+ */
+
+ sirdev_raw_read(dev, &byte, sizeof(byte));
+ if (byte != get_control_byte(speed)) {
+ IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n",
+ __FUNCTION__, (unsigned) byte,
+ (unsigned) get_control_byte(speed));
+ return -1;
+ }
+ else
+ IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__);
+#endif
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 10ms */
+ msleep(10);
+
+ /* dongle is now switched to the new speed */
+ dev->speed = speed;
+
+ return 0;
+}
+
+/*
+ * Function ma600_reset (dev)
+ *
+ * This function resets the ma600 dongle.
+ *
+ * Algorithm:
+ * 0. DTR=0, RTS=1 and wait 10 ms
+ * 1. DTR=1, RTS=1 and wait 10 ms
+ * 2. 9600 bps now
+ */
+
+/* total delays are only about 20ms - let's just sleep for now to
+ * avoid the state machine complexity before we get things working
+ */
+
+int ma600_reset(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Reset the dongle : set DTR low for 10 ms */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ msleep(10);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ msleep(10);
+
+ dev->speed = 9600; /* That's the dongle-default */
+
+ return 0;
+}
+
+MODULE_AUTHOR("Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95");
+MODULE_DESCRIPTION("MA600 dongle driver version 0.1");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-11"); /* IRDA_MA600_DONGLE */
+
+module_init(ma600_sir_init);
+module_exit(ma600_sir_cleanup);
+
diff --git a/drivers/net/irda/ma600.c b/drivers/net/irda/ma600.c
new file mode 100644
index 000000000000..f5e6836667fd
--- /dev/null
+++ b/drivers/net/irda/ma600.c
@@ -0,0 +1,354 @@
+/*********************************************************************
+ *
+ * Filename: ma600.c
+ * Version: 0.1
+ * Description: Implementation of the MA600 dongle
+ * Status: Experimental.
+ * Author: Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95
+ * Created at: Sat Jun 10 20:02:35 2000
+ * Modified at:
+ * Modified by:
+ *
+ * Note: very thanks to Mr. Maru Wang <maru@mobileaction.com.tw> for providing
+ * information on the MA600 dongle
+ *
+ * Copyright (c) 2000 Leung, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+/* define this macro for release version */
+//#define NDEBUG
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#ifndef NDEBUG
+ #undef IRDA_DEBUG
+ #define IRDA_DEBUG(n, args...) (printk(KERN_DEBUG args))
+
+ #undef ASSERT
+ #define ASSERT(expr, func) \
+ if(!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n",\
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ func}
+#endif
+
+/* convert hex value to ascii hex */
+static const char hexTbl[] = "0123456789ABCDEF";
+
+
+static void ma600_open(dongle_t *self, struct qos_info *qos);
+static void ma600_close(dongle_t *self);
+static int ma600_change_speed(struct irda_task *task);
+static int ma600_reset(struct irda_task *task);
+
+/* control byte for MA600 */
+#define MA600_9600 0x00
+#define MA600_19200 0x01
+#define MA600_38400 0x02
+#define MA600_57600 0x03
+#define MA600_115200 0x04
+#define MA600_DEV_ID1 0x05
+#define MA600_DEV_ID2 0x06
+#define MA600_2400 0x08
+
+static struct dongle_reg dongle = {
+ .type = IRDA_MA600_DONGLE,
+ .open = ma600_open,
+ .close = ma600_close,
+ .reset = ma600_reset,
+ .change_speed = ma600_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init ma600_init(void)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit ma600_cleanup(void)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ irda_device_unregister_dongle(&dongle);
+}
+
+/*
+ Power on:
+ (0) Clear RTS and DTR for 1 second
+ (1) Set RTS and DTR for 1 second
+ (2) 9600 bps now
+ Note: assume RTS, DTR are clear before
+*/
+static void ma600_open(dongle_t *self, struct qos_info *qos)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400
+ |IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */
+ irda_qos_bits_to_value(qos);
+
+ //self->set_dtr_rts(self->dev, FALSE, FALSE);
+ // should wait 1 second
+
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+ // should wait 1 second
+}
+
+static void ma600_close(dongle_t *self)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+}
+
+static __u8 get_control_byte(__u32 speed)
+{
+ __u8 byte;
+
+ switch (speed) {
+ default:
+ case 115200:
+ byte = MA600_115200;
+ break;
+ case 57600:
+ byte = MA600_57600;
+ break;
+ case 38400:
+ byte = MA600_38400;
+ break;
+ case 19200:
+ byte = MA600_19200;
+ break;
+ case 9600:
+ byte = MA600_9600;
+ break;
+ case 2400:
+ byte = MA600_2400;
+ break;
+ }
+
+ return byte;
+}
+
+/*
+ * Function ma600_change_speed (dev, state, speed)
+ *
+ * Set the speed for the MA600 type dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. Reset
+ * 2. clear RTS, set DTR and wait for 1ms
+ * 3. send Control Byte to the MA600 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 10 msec)
+ * 4. set RTS, set DTR (return to NORMAL Operation)
+ * 5. wait at least 10 ms, new setting (baud rate, etc) takes effect here
+ * after
+ */
+static int ma600_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param;
+ static __u8 byte;
+ __u8 byte_echo;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ ASSERT(task != NULL, return -1;);
+
+ if (self->speed_task && self->speed_task != task) {
+ IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__);
+ return msecs_to_jiffies(10);
+ } else {
+ self->speed_task = task;
+ }
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ case IRDA_TASK_CHILD_INIT:
+ /*
+ * Need to reset the dongle and go to 9600 bps before
+ * programming
+ */
+ if (irda_task_execute(self, ma600_reset, NULL, task,
+ (void *) speed)) {
+ /* Dongle need more time to reset */
+ irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
+
+ /* give 1 second to finish */
+ ret = msecs_to_jiffies(1000);
+ } else {
+ irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
+ }
+ break;
+
+ case IRDA_TASK_CHILD_WAIT:
+ IRDA_WARNING("%s(), resetting dongle timed out!\n",
+ __FUNCTION__);
+ ret = -1;
+ break;
+
+ case IRDA_TASK_CHILD_DONE:
+ /* Set DTR, Clear RTS */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+
+ ret = msecs_to_jiffies(1); /* Sleep 1 ms */
+ irda_task_next_state(task, IRDA_TASK_WAIT);
+ break;
+
+ case IRDA_TASK_WAIT:
+ speed = (__u32) task->param;
+ byte = get_control_byte(speed);
+
+ /* Write control byte */
+ self->write(self->dev, &byte, sizeof(byte));
+
+ irda_task_next_state(task, IRDA_TASK_WAIT1);
+
+ /* Wait at least 10 ms */
+ ret = msecs_to_jiffies(15);
+ break;
+
+ case IRDA_TASK_WAIT1:
+ /* Read control byte echo */
+ self->read(self->dev, &byte_echo, sizeof(byte_echo));
+
+ if(byte != byte_echo) {
+ /* if control byte != echo, I don't know what to do */
+ printk(KERN_WARNING "%s() control byte written != read!\n", __FUNCTION__);
+ printk(KERN_WARNING "control byte = 0x%c%c\n",
+ hexTbl[(byte>>4)&0x0f], hexTbl[byte&0x0f]);
+ printk(KERN_WARNING "byte echo = 0x%c%c\n",
+ hexTbl[(byte_echo>>4) & 0x0f],
+ hexTbl[byte_echo & 0x0f]);
+ #ifndef NDEBUG
+ } else {
+ IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__);
+ #endif
+ }
+
+ /* Set DTR, Set RTS */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ irda_task_next_state(task, IRDA_TASK_WAIT2);
+
+ /* Wait at least 10 ms */
+ ret = msecs_to_jiffies(10);
+ break;
+
+ case IRDA_TASK_WAIT2:
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ break;
+
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function ma600_reset (driver)
+ *
+ * This function resets the ma600 dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * Algorithm:
+ * 0. DTR=0, RTS=1 and wait 10 ms
+ * 1. DTR=1, RTS=1 and wait 10 ms
+ * 2. 9600 bps now
+ */
+int ma600_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ ASSERT(task != NULL, return -1;);
+
+ if (self->reset_task && self->reset_task != task) {
+ IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__);
+ return msecs_to_jiffies(10);
+ } else
+ self->reset_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ /* Clear DTR and Set RTS */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+ irda_task_next_state(task, IRDA_TASK_WAIT1);
+ ret = msecs_to_jiffies(10); /* Sleep 10 ms */
+ break;
+ case IRDA_TASK_WAIT1:
+ /* Set DTR and RTS */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+ irda_task_next_state(task, IRDA_TASK_WAIT2);
+ ret = msecs_to_jiffies(10); /* Sleep 10 ms */
+ break;
+ case IRDA_TASK_WAIT2:
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ ret = -1;
+ }
+ return ret;
+}
+
+MODULE_AUTHOR("Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95");
+MODULE_DESCRIPTION("MA600 dongle driver version 0.1");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-11"); /* IRDA_MA600_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize MA600 module
+ *
+ */
+module_init(ma600_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup MA600 module
+ *
+ */
+module_exit(ma600_cleanup);
+
diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c
new file mode 100644
index 000000000000..67bd016e4df8
--- /dev/null
+++ b/drivers/net/irda/mcp2120-sir.c
@@ -0,0 +1,230 @@
+/*********************************************************************
+ *
+ *
+ * Filename: mcp2120.c
+ * Version: 1.0
+ * Description: Implementation for the MCP2120 (Microchip)
+ * Status: Experimental.
+ * Author: Felix Tang (tangf@eyetap.org)
+ * Created at: Sun Mar 31 19:32:12 EST 2002
+ * Based on code by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 2002 Felix Tang, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int mcp2120_reset(struct sir_dev *dev);
+static int mcp2120_open(struct sir_dev *dev);
+static int mcp2120_close(struct sir_dev *dev);
+static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed);
+
+#define MCP2120_9600 0x87
+#define MCP2120_19200 0x8B
+#define MCP2120_38400 0x85
+#define MCP2120_57600 0x83
+#define MCP2120_115200 0x81
+
+#define MCP2120_COMMIT 0x11
+
+static struct dongle_driver mcp2120 = {
+ .owner = THIS_MODULE,
+ .driver_name = "Microchip MCP2120",
+ .type = IRDA_MCP2120_DONGLE,
+ .open = mcp2120_open,
+ .close = mcp2120_close,
+ .reset = mcp2120_reset,
+ .set_speed = mcp2120_change_speed,
+};
+
+static int __init mcp2120_sir_init(void)
+{
+ return irda_register_dongle(&mcp2120);
+}
+
+static void __exit mcp2120_sir_cleanup(void)
+{
+ irda_unregister_dongle(&mcp2120);
+}
+
+static int mcp2120_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* seems no explicit power-on required here and reset switching it on anyway */
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01;
+ irda_qos_bits_to_value(qos);
+
+ return 0;
+}
+
+static int mcp2120_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ /* reset and inhibit mcp2120 */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ // sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function mcp2120_change_speed (dev, speed)
+ *
+ * Set the speed for the MCP2120.
+ *
+ */
+
+#define MCP2120_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED+1)
+
+static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control[2];
+ static int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+ /* Set DTR to enter command mode */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ udelay(500);
+
+ ret = 0;
+ switch (speed) {
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = MCP2120_9600;
+ //printk("mcp2120 9600\n");
+ break;
+ case 19200:
+ control[0] = MCP2120_19200;
+ //printk("mcp2120 19200\n");
+ break;
+ case 34800:
+ control[0] = MCP2120_38400;
+ //printk("mcp2120 38400\n");
+ break;
+ case 57600:
+ control[0] = MCP2120_57600;
+ //printk("mcp2120 57600\n");
+ break;
+ case 115200:
+ control[0] = MCP2120_115200;
+ //printk("mcp2120 115200\n");
+ break;
+ }
+ control[1] = MCP2120_COMMIT;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 2);
+ dev->speed = speed;
+
+ state = MCP2120_STATE_WAIT_SPEED;
+ delay = 100;
+ //printk("mcp2120_change_speed: dongle_speed\n");
+ break;
+
+ case MCP2120_STATE_WAIT_SPEED:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ //printk("mcp2120_change_speed: mcp_wait\n");
+ break;
+
+ default:
+ IRDA_ERROR("%s(), undefine state %d\n", __FUNCTION__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function mcp2120_reset (driver)
+ *
+ * This function resets the mcp2120 dongle.
+ *
+ * Info: -set RTS to reset mcp2120
+ * -set DTR to set mcp2120 software command mode
+ * -mcp2120 defaults to 9600 baud after reset
+ *
+ * Algorithm:
+ * 0. Set RTS to reset mcp2120.
+ * 1. Clear RTS and wait for device reset timer of 30 ms (max).
+ *
+ */
+
+#define MCP2120_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
+#define MCP2120_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
+
+static int mcp2120_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ //printk("mcp2120_reset: dongle_reset\n");
+ /* Reset dongle by setting RTS*/
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ state = MCP2120_STATE_WAIT1_RESET;
+ delay = 50;
+ break;
+
+ case MCP2120_STATE_WAIT1_RESET:
+ //printk("mcp2120_reset: mcp2120_wait1\n");
+ /* clear RTS and wait for at least 30 ms. */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ state = MCP2120_STATE_WAIT2_RESET;
+ delay = 50;
+ break;
+
+ case MCP2120_STATE_WAIT2_RESET:
+ //printk("mcp2120_reset mcp2120_wait2\n");
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ break;
+
+ default:
+ IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
+MODULE_DESCRIPTION("Microchip MCP2120");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
+
+module_init(mcp2120_sir_init);
+module_exit(mcp2120_sir_cleanup);
diff --git a/drivers/net/irda/mcp2120.c b/drivers/net/irda/mcp2120.c
new file mode 100644
index 000000000000..5e6199eeef4f
--- /dev/null
+++ b/drivers/net/irda/mcp2120.c
@@ -0,0 +1,240 @@
+/*********************************************************************
+ *
+ *
+ * Filename: mcp2120.c
+ * Version: 1.0
+ * Description: Implementation for the MCP2120 (Microchip)
+ * Status: Experimental.
+ * Author: Felix Tang (tangf@eyetap.org)
+ * Created at: Sun Mar 31 19:32:12 EST 2002
+ * Based on code by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 2002 Felix Tang, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+static int mcp2120_reset(struct irda_task *task);
+static void mcp2120_open(dongle_t *self, struct qos_info *qos);
+static void mcp2120_close(dongle_t *self);
+static int mcp2120_change_speed(struct irda_task *task);
+
+#define MCP2120_9600 0x87
+#define MCP2120_19200 0x8B
+#define MCP2120_38400 0x85
+#define MCP2120_57600 0x83
+#define MCP2120_115200 0x81
+
+#define MCP2120_COMMIT 0x11
+
+static struct dongle_reg dongle = {
+ .type = IRDA_MCP2120_DONGLE,
+ .open = mcp2120_open,
+ .close = mcp2120_close,
+ .reset = mcp2120_reset,
+ .change_speed = mcp2120_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init mcp2120_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit mcp2120_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+static void mcp2120_open(dongle_t *self, struct qos_info *qos)
+{
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01;
+}
+
+static void mcp2120_close(dongle_t *self)
+{
+ /* Power off dongle */
+ /* reset and inhibit mcp2120 */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+ //self->set_dtr_rts(self->dev, FALSE, FALSE);
+}
+
+/*
+ * Function mcp2120_change_speed (dev, speed)
+ *
+ * Set the speed for the MCP2120.
+ *
+ */
+static int mcp2120_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param;
+ __u8 control[2];
+ int ret = 0;
+
+ self->speed_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ /* Need to reset the dongle and go to 9600 bps before
+ programming */
+ //printk("Dmcp2120_change_speed irda_task_init\n");
+ if (irda_task_execute(self, mcp2120_reset, NULL, task,
+ (void *) speed))
+ {
+ /* Dongle need more time to reset */
+ irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
+
+ /* Give reset 1 sec to finish */
+ ret = msecs_to_jiffies(1000);
+ }
+ break;
+ case IRDA_TASK_CHILD_WAIT:
+ IRDA_WARNING("%s(), resetting dongle timed out!\n",
+ __FUNCTION__);
+ ret = -1;
+ break;
+ case IRDA_TASK_CHILD_DONE:
+ /* Set DTR to enter command mode */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+ udelay(500);
+
+ switch (speed) {
+ case 9600:
+ default:
+ control[0] = MCP2120_9600;
+ //printk("mcp2120 9600\n");
+ break;
+ case 19200:
+ control[0] = MCP2120_19200;
+ //printk("mcp2120 19200\n");
+ break;
+ case 34800:
+ control[0] = MCP2120_38400;
+ //printk("mcp2120 38400\n");
+ break;
+ case 57600:
+ control[0] = MCP2120_57600;
+ //printk("mcp2120 57600\n");
+ break;
+ case 115200:
+ control[0] = MCP2120_115200;
+ //printk("mcp2120 115200\n");
+ break;
+ }
+ control[1] = MCP2120_COMMIT;
+
+ /* Write control bytes */
+ self->write(self->dev, control, 2);
+
+ irda_task_next_state(task, IRDA_TASK_WAIT);
+ ret = msecs_to_jiffies(100);
+ //printk("mcp2120_change_speed irda_child_done\n");
+ break;
+ case IRDA_TASK_WAIT:
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ //printk("mcp2120_change_speed irda_task_wait\n");
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function mcp2120_reset (driver)
+ *
+ * This function resets the mcp2120 dongle.
+ *
+ * Info: -set RTS to reset mcp2120
+ * -set DTR to set mcp2120 software command mode
+ * -mcp2120 defaults to 9600 baud after reset
+ *
+ * Algorithm:
+ * 0. Set RTS to reset mcp2120.
+ * 1. Clear RTS and wait for device reset timer of 30 ms (max).
+ *
+ */
+
+
+static int mcp2120_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ int ret = 0;
+
+ self->reset_task = task;
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ //printk("mcp2120_reset irda_task_init\n");
+ /* Reset dongle by setting RTS*/
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+ irda_task_next_state(task, IRDA_TASK_WAIT1);
+ ret = msecs_to_jiffies(50);
+ break;
+ case IRDA_TASK_WAIT1:
+ //printk("mcp2120_reset irda_task_wait1\n");
+ /* clear RTS and wait for at least 30 ms. */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+ irda_task_next_state(task, IRDA_TASK_WAIT2);
+ ret = msecs_to_jiffies(50);
+ break;
+ case IRDA_TASK_WAIT2:
+ //printk("mcp2120_reset irda_task_wait2\n");
+ /* Go back to normal mode */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
+MODULE_DESCRIPTION("Microchip MCP2120");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize MCP2120 module
+ *
+ */
+module_init(mcp2120_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup MCP2120 module
+ *
+ */
+module_exit(mcp2120_cleanup);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
new file mode 100644
index 000000000000..805714ec9a8a
--- /dev/null
+++ b/drivers/net/irda/nsc-ircc.c
@@ -0,0 +1,2222 @@
+/*********************************************************************
+ *
+ * Filename: nsc-ircc.c
+ * Version: 1.0
+ * Description: Driver for the NSC PC'108 and PC'338 IrDA chipsets
+ * Status: Stable.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Nov 7 21:43:15 1998
+ * Modified at: Wed Mar 1 11:29:34 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
+ * Copyright (c) 1998 Actisys Corp., www.actisys.com
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Notice that all functions that needs to access the chip in _any_
+ * way, must save BSR register on entry, and restore it on exit.
+ * It is _very_ important to follow this policy!
+ *
+ * __u8 bank;
+ *
+ * bank = inb(iobase+BSR);
+ *
+ * do_your_stuff_here();
+ *
+ * outb(bank, iobase+BSR);
+ *
+ * If you find bugs in this file, its very likely that the same bug
+ * will also be in w83977af_ir.c since the implementations are quite
+ * similar.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/pm.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "nsc-ircc.h"
+
+#define CHIP_IO_EXTENT 8
+#define BROKEN_DONGLE_ID
+
+static char *driver_name = "nsc-ircc";
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+static int dongle_id;
+
+/* Use BIOS settions by default, but user may supply module parameters */
+static unsigned int io[] = { ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0, 0 };
+static unsigned int dma[] = { 0, 0, 0, 0, 0 };
+
+static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
+
+/* These are the known NSC chips */
+static nsc_chip_t chips[] = {
+/* Name, {cfg registers}, chip id index reg, chip id expected value, revision mask */
+ { "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0,
+ nsc_ircc_probe_108, nsc_ircc_init_108 },
+ { "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8,
+ nsc_ircc_probe_338, nsc_ircc_init_338 },
+ /* Contributed by Steffen Pingel - IBM X40 */
+ { "PC8738x", { 0x164e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* Contributed by Jan Frey - IBM A30/A31 */
+ { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ { NULL }
+};
+
+/* Max 4 instances for now */
+static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
+
+static char *dongle_types[] = {
+ "Differential serial interface",
+ "Differential serial interface",
+ "Reserved",
+ "Reserved",
+ "Sharp RY5HD01",
+ "Reserved",
+ "Single-ended serial interface",
+ "Consumer-IR only",
+ "HP HSDL-2300, HP HSDL-3600/HSDL-3610",
+ "IBM31T1100 or Temic TFDS6000/TFDS6500",
+ "Reserved",
+ "Reserved",
+ "HP HSDL-1100/HSDL-2100",
+ "HP HSDL-1100/HSDL-2100",
+ "Supports SIR Mode only",
+ "No dongle connected",
+};
+
+/* Some prototypes */
+static int nsc_ircc_open(int i, chipio_t *info);
+static int nsc_ircc_close(struct nsc_ircc_cb *self);
+static int nsc_ircc_setup(chipio_t *info);
+static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
+static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self);
+static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase);
+static int nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev);
+static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev);
+static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
+static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase);
+static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud);
+static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self);
+static int nsc_ircc_read_dongle_id (int iobase);
+static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id);
+
+static int nsc_ircc_net_open(struct net_device *dev);
+static int nsc_ircc_net_close(struct net_device *dev);
+static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
+static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
+
+/*
+ * Function nsc_ircc_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init nsc_ircc_init(void)
+{
+ chipio_t info;
+ nsc_chip_t *chip;
+ int ret = -ENODEV;
+ int cfg_base;
+ int cfg, id;
+ int reg;
+ int i = 0;
+
+ /* Probe for all the NSC chipsets we know about */
+ for (chip=chips; chip->name ; chip++) {
+ IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__,
+ chip->name);
+
+ /* Try all config registers for this chip */
+ for (cfg=0; cfg<3; cfg++) {
+ cfg_base = chip->cfg[cfg];
+ if (!cfg_base)
+ continue;
+
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = io[i];
+ info.dma = dma[i];
+ info.irq = irq[i];
+
+ /* Read index register */
+ reg = inb(cfg_base);
+ if (reg == 0xff) {
+ IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __FUNCTION__, cfg_base);
+ continue;
+ }
+
+ /* Read chip identification register */
+ outb(chip->cid_index, cfg_base);
+ id = inb(cfg_base+1);
+ if ((id & chip->cid_mask) == chip->cid_value) {
+ IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
+ __FUNCTION__, chip->name, id & ~chip->cid_mask);
+ /*
+ * If the user supplies the base address, then
+ * we init the chip, if not we probe the values
+ * set by the BIOS
+ */
+ if (io[i] < 0x2000) {
+ chip->init(chip, &info);
+ } else
+ chip->probe(chip, &info);
+
+ if (nsc_ircc_open(i, &info) == 0)
+ ret = 0;
+ i++;
+ } else {
+ IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id);
+ }
+ }
+
+ }
+
+ return ret;
+}
+
+/*
+ * Function nsc_ircc_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit nsc_ircc_cleanup(void)
+{
+ int i;
+
+ pm_unregister_all(nsc_ircc_pmproc);
+
+ for (i=0; i < 4; i++) {
+ if (dev_self[i])
+ nsc_ircc_close(dev_self[i]);
+ }
+}
+
+/*
+ * Function nsc_ircc_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int __init nsc_ircc_open(int i, chipio_t *info)
+{
+ struct net_device *dev;
+ struct nsc_ircc_cb *self;
+ struct pm_dev *pmdev;
+ void *ret;
+ int err;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name,
+ info->cfg_base);
+
+ if ((nsc_ircc_setup(info)) == -1)
+ return -1;
+
+ IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name);
+
+ dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
+ if (dev == NULL) {
+ IRDA_ERROR("%s(), can't allocate memory for "
+ "control block!\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ self = dev->priv;
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+ self->index = i;
+
+ /* Initialize IO */
+ self->io.cfg_base = info->cfg_base;
+ self->io.fir_base = info->fir_base;
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.fifo_size = 32;
+
+ /* Reserve the ioports that we need */
+ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
+ if (!ret) {
+ IRDA_WARNING("%s(), can't get iobase of 0x%03x\n",
+ __FUNCTION__, self->io.fir_base);
+ err = -ENODEV;
+ goto out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000 |(IR_4000000 << 8);
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 14384;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_alloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto out2;
+
+ }
+ memset(self->rx_buff.head, 0, self->rx_buff.truesize);
+
+ self->tx_buff.head =
+ dma_alloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto out3;
+ }
+ memset(self->tx_buff.head, 0, self->tx_buff.truesize);
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Override the network functions we need to use */
+ SET_MODULE_OWNER(dev);
+ dev->hard_start_xmit = nsc_ircc_hard_xmit_sir;
+ dev->open = nsc_ircc_net_open;
+ dev->stop = nsc_ircc_net_close;
+ dev->do_ioctl = nsc_ircc_net_ioctl;
+ dev->get_stats = nsc_ircc_net_get_stats;
+
+ err = register_netdev(dev);
+ if (err) {
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ goto out4;
+ }
+ IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+
+ /* Check if user has supplied a valid dongle id or not */
+ if ((dongle_id <= 0) ||
+ (dongle_id >= (sizeof(dongle_types) / sizeof(dongle_types[0]))) ) {
+ dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
+
+ IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name,
+ dongle_types[dongle_id]);
+ } else {
+ IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name,
+ dongle_types[dongle_id]);
+ }
+
+ self->io.dongle_id = dongle_id;
+ nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id);
+
+ pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, nsc_ircc_pmproc);
+ if (pmdev)
+ pmdev->data = self;
+
+ return 0;
+ out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ out1:
+ free_netdev(dev);
+ dev_self[i] = NULL;
+ return err;
+}
+
+/*
+ * Function nsc_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
+ __FUNCTION__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ dev_self[self->index] = NULL;
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_init_108 (iobase, cfg_base, irq, dma)
+ *
+ * Initialize the NSC '108 chip
+ *
+ */
+static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ __u8 temp=0;
+
+ outb(2, cfg_base); /* Mode Control Register (MCTL) */
+ outb(0x00, cfg_base+1); /* Disable device */
+
+ /* Base Address and Interrupt Control Register (BAIC) */
+ outb(CFG_108_BAIC, cfg_base);
+ switch (info->fir_base) {
+ case 0x3e8: outb(0x14, cfg_base+1); break;
+ case 0x2e8: outb(0x15, cfg_base+1); break;
+ case 0x3f8: outb(0x16, cfg_base+1); break;
+ case 0x2f8: outb(0x17, cfg_base+1); break;
+ default: IRDA_ERROR("%s(), invalid base_address", __FUNCTION__);
+ }
+
+ /* Control Signal Routing Register (CSRT) */
+ switch (info->irq) {
+ case 3: temp = 0x01; break;
+ case 4: temp = 0x02; break;
+ case 5: temp = 0x03; break;
+ case 7: temp = 0x04; break;
+ case 9: temp = 0x05; break;
+ case 11: temp = 0x06; break;
+ case 15: temp = 0x07; break;
+ default: IRDA_ERROR("%s(), invalid irq", __FUNCTION__);
+ }
+ outb(CFG_108_CSRT, cfg_base);
+
+ switch (info->dma) {
+ case 0: outb(0x08+temp, cfg_base+1); break;
+ case 1: outb(0x10+temp, cfg_base+1); break;
+ case 3: outb(0x18+temp, cfg_base+1); break;
+ default: IRDA_ERROR("%s(), invalid dma", __FUNCTION__);
+ }
+
+ outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
+ outb(0x03, cfg_base+1); /* Enable device */
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_108 (chip, info)
+ *
+ *
+ *
+ */
+static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg;
+
+ /* Read address and interrupt control register (BAIC) */
+ outb(CFG_108_BAIC, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch (reg & 0x03) {
+ case 0:
+ info->fir_base = 0x3e8;
+ break;
+ case 1:
+ info->fir_base = 0x2e8;
+ break;
+ case 2:
+ info->fir_base = 0x3f8;
+ break;
+ case 3:
+ info->fir_base = 0x2f8;
+ break;
+ }
+ info->sir_base = info->fir_base;
+ IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__,
+ info->fir_base);
+
+ /* Read control signals routing register (CSRT) */
+ outb(CFG_108_CSRT, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch (reg & 0x07) {
+ case 0:
+ info->irq = -1;
+ break;
+ case 1:
+ info->irq = 3;
+ break;
+ case 2:
+ info->irq = 4;
+ break;
+ case 3:
+ info->irq = 5;
+ break;
+ case 4:
+ info->irq = 7;
+ break;
+ case 5:
+ info->irq = 9;
+ break;
+ case 6:
+ info->irq = 11;
+ break;
+ case 7:
+ info->irq = 15;
+ break;
+ }
+ IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
+
+ /* Currently we only read Rx DMA but it will also be used for Tx */
+ switch ((reg >> 3) & 0x03) {
+ case 0:
+ info->dma = -1;
+ break;
+ case 1:
+ info->dma = 0;
+ break;
+ case 2:
+ info->dma = 1;
+ break;
+ case 3:
+ info->dma = 3;
+ break;
+ }
+ IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
+
+ /* Read mode control register (MCTL) */
+ outb(CFG_108_MCTL, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->enabled = reg & 0x01;
+ info->suspended = !((reg >> 1) & 0x01);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_init_338 (chip, info)
+ *
+ * Initialize the NSC '338 chip. Remember that the 87338 needs two
+ * consecutive writes to the data registers while CPU interrupts are
+ * disabled. The 97338 does not require this, but shouldn't be any
+ * harm if we do it anyway.
+ */
+static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info)
+{
+ /* No init yet */
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_338 (chip, info)
+ *
+ *
+ *
+ */
+static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg, com = 0;
+ int pnp;
+
+ /* Read funtion enable register (FER) */
+ outb(CFG_338_FER, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->enabled = (reg >> 2) & 0x01;
+
+ /* Check if we are in Legacy or PnP mode */
+ outb(CFG_338_PNP0, cfg_base);
+ reg = inb(cfg_base+1);
+
+ pnp = (reg >> 3) & 0x01;
+ if (pnp) {
+ IRDA_DEBUG(2, "(), Chip is in PnP mode\n");
+ outb(0x46, cfg_base);
+ reg = (inb(cfg_base+1) & 0xfe) << 2;
+
+ outb(0x47, cfg_base);
+ reg |= ((inb(cfg_base+1) & 0xfc) << 8);
+
+ info->fir_base = reg;
+ } else {
+ /* Read function address register (FAR) */
+ outb(CFG_338_FAR, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch ((reg >> 4) & 0x03) {
+ case 0:
+ info->fir_base = 0x3f8;
+ break;
+ case 1:
+ info->fir_base = 0x2f8;
+ break;
+ case 2:
+ com = 3;
+ break;
+ case 3:
+ com = 4;
+ break;
+ }
+
+ if (com) {
+ switch ((reg >> 6) & 0x03) {
+ case 0:
+ if (com == 3)
+ info->fir_base = 0x3e8;
+ else
+ info->fir_base = 0x2e8;
+ break;
+ case 1:
+ if (com == 3)
+ info->fir_base = 0x338;
+ else
+ info->fir_base = 0x238;
+ break;
+ case 2:
+ if (com == 3)
+ info->fir_base = 0x2e8;
+ else
+ info->fir_base = 0x2e0;
+ break;
+ case 3:
+ if (com == 3)
+ info->fir_base = 0x220;
+ else
+ info->fir_base = 0x228;
+ break;
+ }
+ }
+ }
+ info->sir_base = info->fir_base;
+
+ /* Read PnP register 1 (PNP1) */
+ outb(CFG_338_PNP1, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->irq = reg >> 4;
+
+ /* Read PnP register 3 (PNP3) */
+ outb(CFG_338_PNP3, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->dma = (reg & 0x07) - 1;
+
+ /* Read power and test register (PTR) */
+ outb(CFG_338_PTR, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->suspended = reg & 0x01;
+
+ return 0;
+}
+
+
+/*
+ * Function nsc_ircc_init_39x (chip, info)
+ *
+ * Now that we know it's a '39x (see probe below), we need to
+ * configure it so we can use it.
+ *
+ * The NSC '338 chip is a Super I/O chip with a "bank" architecture,
+ * the configuration of the different functionality (serial, parallel,
+ * floppy...) are each in a different bank (Logical Device Number).
+ * The base address, irq and dma configuration registers are common
+ * to all functionalities (index 0x30 to 0x7F).
+ * There is only one configuration register specific to the
+ * serial port, CFG_39X_SPC.
+ * JeanII
+ *
+ * Note : this code was written by Jan Frey <janfrey@web.de>
+ */
+static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int enabled;
+
+ /* User is shure about his config... accept it. */
+ IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
+ "io=0x%04x, irq=%d, dma=%d\n",
+ __FUNCTION__, info->fir_base, info->irq, info->dma);
+
+ /* Access bank for SP2 */
+ outb(CFG_39X_LDN, cfg_base);
+ outb(0x02, cfg_base+1);
+
+ /* Configure SP2 */
+
+ /* We want to enable the device if not enabled */
+ outb(CFG_39X_ACT, cfg_base);
+ enabled = inb(cfg_base+1) & 0x01;
+
+ if (!enabled) {
+ /* Enable the device */
+ outb(CFG_39X_SIOCF1, cfg_base);
+ outb(0x01, cfg_base+1);
+ /* May want to update info->enabled. Jean II */
+ }
+
+ /* Enable UART bank switching (bit 7) ; Sets the chip to normal
+ * power mode (wake up from sleep mode) (bit 1) */
+ outb(CFG_39X_SPC, cfg_base);
+ outb(0x82, cfg_base+1);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_39x (chip, info)
+ *
+ * Test if we really have a '39x chip at the given address
+ *
+ * Note : this code was written by Jan Frey <janfrey@web.de>
+ */
+static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg1, reg2, irq, irqt, dma1, dma2;
+ int enabled, susp;
+
+ IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n",
+ __FUNCTION__, cfg_base);
+
+ /* This function should be executed with irq off to avoid
+ * another driver messing with the Super I/O bank - Jean II */
+
+ /* Access bank for SP2 */
+ outb(CFG_39X_LDN, cfg_base);
+ outb(0x02, cfg_base+1);
+
+ /* Read infos about SP2 ; store in info struct */
+ outb(CFG_39X_BASEH, cfg_base);
+ reg1 = inb(cfg_base+1);
+ outb(CFG_39X_BASEL, cfg_base);
+ reg2 = inb(cfg_base+1);
+ info->fir_base = (reg1 << 8) | reg2;
+
+ outb(CFG_39X_IRQNUM, cfg_base);
+ irq = inb(cfg_base+1);
+ outb(CFG_39X_IRQSEL, cfg_base);
+ irqt = inb(cfg_base+1);
+ info->irq = irq;
+
+ outb(CFG_39X_DMA0, cfg_base);
+ dma1 = inb(cfg_base+1);
+ outb(CFG_39X_DMA1, cfg_base);
+ dma2 = inb(cfg_base+1);
+ info->dma = dma1 -1;
+
+ outb(CFG_39X_ACT, cfg_base);
+ info->enabled = enabled = inb(cfg_base+1) & 0x01;
+
+ outb(CFG_39X_SPC, cfg_base);
+ susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
+
+ IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __FUNCTION__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
+
+ /* Configure SP2 */
+
+ /* We want to enable the device if not enabled */
+ outb(CFG_39X_ACT, cfg_base);
+ enabled = inb(cfg_base+1) & 0x01;
+
+ if (!enabled) {
+ /* Enable the device */
+ outb(CFG_39X_SIOCF1, cfg_base);
+ outb(0x01, cfg_base+1);
+ /* May want to update info->enabled. Jean II */
+ }
+
+ /* Enable UART bank switching (bit 7) ; Sets the chip to normal
+ * power mode (wake up from sleep mode) (bit 1) */
+ outb(CFG_39X_SPC, cfg_base);
+ outb(0x82, cfg_base+1);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_setup (info)
+ *
+ * Returns non-negative on success.
+ *
+ */
+static int nsc_ircc_setup(chipio_t *info)
+{
+ int version;
+ int iobase = info->fir_base;
+
+ /* Read the Module ID */
+ switch_bank(iobase, BANK3);
+ version = inb(iobase+MID);
+
+ IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
+ __FUNCTION__, driver_name, version);
+
+ /* Should be 0x2? */
+ if (0x20 != (version & 0xf0)) {
+ IRDA_ERROR("%s, Wrong chip version %02x\n",
+ driver_name, version);
+ return -1;
+ }
+
+ /* Switch to advanced mode */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_EXT_SL, iobase+ECR1);
+ switch_bank(iobase, BANK0);
+
+ /* Set FIFO threshold to TX17, RX16, reset and enable FIFO's */
+ switch_bank(iobase, BANK0);
+ outb(FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
+
+ outb(0x03, iobase+LCR); /* 8 bit word length */
+ outb(MCR_SIR, iobase+MCR); /* Start at SIR-mode, also clears LSR*/
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, BANK2);
+ outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
+
+ /* IRCR2: FEND_MD is not set */
+ switch_bank(iobase, BANK5);
+ outb(0x02, iobase+4);
+
+ /* Make sure that some defaults are OK */
+ switch_bank(iobase, BANK6);
+ outb(0x20, iobase+0); /* Set 32 bits FIR CRC */
+ outb(0x0a, iobase+1); /* Set MIR pulse width */
+ outb(0x0d, iobase+2); /* Set SIR pulse width to 1.6us */
+ outb(0x2a, iobase+4); /* Set beginning frag, and preamble length */
+
+ /* Enable receive interrupts */
+ switch_bank(iobase, BANK0);
+ outb(IER_RXHDL_IE, iobase+IER);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_read_dongle_id (void)
+ *
+ * Try to read dongle indentification. This procedure needs to be executed
+ * once after power-on/reset. It also needs to be used whenever you suspect
+ * that the user may have plugged/unplugged the IrDA Dongle.
+ */
+static int nsc_ircc_read_dongle_id (int iobase)
+{
+ int dongle_id;
+ __u8 bank;
+
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG4: IRSL0_DS and IRSL21_DS are cleared */
+ outb(0x00, iobase+7);
+
+ /* ID0, 1, and 2 are pulled up/down very slowly */
+ udelay(50);
+
+ /* IRCFG1: read the ID bits */
+ dongle_id = inb(iobase+4) & 0x0f;
+
+#ifdef BROKEN_DONGLE_ID
+ if (dongle_id == 0x0a)
+ dongle_id = 0x09;
+#endif
+ /* Go back to bank 0 before returning */
+ switch_bank(iobase, BANK0);
+
+ outb(bank, iobase+BSR);
+
+ return dongle_id;
+}
+
+/*
+ * Function nsc_ircc_init_dongle_interface (iobase, dongle_id)
+ *
+ * This function initializes the dongle for the transceiver that is
+ * used. This procedure needs to be executed once after
+ * power-on/reset. It also needs to be used whenever you suspect that
+ * the dongle is changed.
+ */
+static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
+{
+ int bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG4: set according to dongle_id */
+ switch (dongle_id) {
+ case 0x00: /* same as */
+ case 0x01: /* Differential serial interface */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x02: /* same as */
+ case 0x03: /* Reserved */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x04: /* Sharp RY5HD01 */
+ break;
+ case 0x05: /* Reserved, but this is what the Thinkpad reports */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x06: /* Single-ended serial interface */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x07: /* Consumer-IR only */
+ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ IRDA_DEBUG(0, "%s(), %s\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ outb(0x28, iobase+7); /* Set irsl[0-2] as output */
+ break;
+ case 0x0A: /* same as */
+ case 0x0B: /* Reserved */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x0C: /* same as */
+ case 0x0D: /* HP HSDL-1100/HSDL-2100 */
+ /*
+ * Set irsl0 as input, irsl[1-2] as output, and separate
+ * inputs are used for SIR and MIR/FIR
+ */
+ outb(0x48, iobase+7);
+ break;
+ case 0x0E: /* Supports SIR Mode only */
+ outb(0x28, iobase+7); /* Set irsl[0-2] as output */
+ break;
+ case 0x0F: /* No dongle connected */
+ IRDA_DEBUG(0, "%s(), %s\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+
+ switch_bank(iobase, BANK0);
+ outb(0x62, iobase+MCR);
+ break;
+ default:
+ IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
+ __FUNCTION__, dongle_id);
+ }
+
+ /* IRCFG1: IRSL1 and 2 are set to IrDA mode */
+ outb(0x00, iobase+4);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+} /* set_up_dongle_interface */
+
+/*
+ * Function nsc_ircc_change_dongle_speed (iobase, speed, dongle_id)
+ *
+ * Change speed of the attach dongle
+ *
+ */
+static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
+{
+ __u8 bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG1: set according to dongle_id */
+ switch (dongle_id) {
+ case 0x00: /* same as */
+ case 0x01: /* Differential serial interface */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x02: /* same as */
+ case 0x03: /* Reserved */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x04: /* Sharp RY5HD01 */
+ break;
+ case 0x05: /* Reserved */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x06: /* Single-ended serial interface */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x07: /* Consumer-IR only */
+ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ IRDA_DEBUG(0, "%s(), %s\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ outb(0x00, iobase+4);
+ if (speed > 115200)
+ outb(0x01, iobase+4);
+ break;
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ outb(0x01, iobase+4);
+
+ if (speed == 4000000) {
+ /* There was a cli() there, but we now are already
+ * under spin_lock_irqsave() - JeanII */
+ outb(0x81, iobase+4);
+ outb(0x80, iobase+4);
+ } else
+ outb(0x00, iobase+4);
+ break;
+ case 0x0A: /* same as */
+ case 0x0B: /* Reserved */
+ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+ break;
+ case 0x0C: /* same as */
+ case 0x0D: /* HP HSDL-1100/HSDL-2100 */
+ break;
+ case 0x0E: /* Supports SIR Mode only */
+ break;
+ case 0x0F: /* No dongle connected */
+ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
+ __FUNCTION__, dongle_types[dongle_id]);
+
+ switch_bank(iobase, BANK0);
+ outb(0x62, iobase+MCR);
+ break;
+ default:
+ IRDA_DEBUG(0, "%s(), invalid data_rate\n", __FUNCTION__);
+ }
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_change_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ * This function *must* be called with irq off and spin-lock.
+ */
+static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
+{
+ struct net_device *dev = self->netdev;
+ __u8 mcr = MCR_SIR;
+ int iobase;
+ __u8 bank;
+ __u8 ier; /* Interrupt enable register */
+
+ IRDA_DEBUG(2, "%s(), speed=%d\n", __FUNCTION__, speed);
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ /* Select Bank 2 */
+ switch_bank(iobase, BANK2);
+
+ outb(0x00, iobase+BGDH);
+ switch (speed) {
+ case 9600: outb(0x0c, iobase+BGDL); break;
+ case 19200: outb(0x06, iobase+BGDL); break;
+ case 38400: outb(0x03, iobase+BGDL); break;
+ case 57600: outb(0x02, iobase+BGDL); break;
+ case 115200: outb(0x01, iobase+BGDL); break;
+ case 576000:
+ switch_bank(iobase, BANK5);
+
+ /* IRCR2: MDRS is set */
+ outb(inb(iobase+4) | 0x04, iobase+4);
+
+ mcr = MCR_MIR;
+ IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__);
+ break;
+ case 1152000:
+ mcr = MCR_MIR;
+ IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__);
+ break;
+ case 4000000:
+ mcr = MCR_FIR;
+ IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__);
+ break;
+ default:
+ mcr = MCR_FIR;
+ IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n",
+ __FUNCTION__, speed);
+ break;
+ }
+
+ /* Set appropriate speed mode */
+ switch_bank(iobase, BANK0);
+ outb(mcr | MCR_TX_DFR, iobase+MCR);
+
+ /* Give some hits to the transceiver */
+ nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
+
+ /* Set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, BANK0);
+ outb(0x00, iobase+FCR);
+ outb(FCR_FIFO_EN, iobase+FCR);
+ outb(FCR_RXTH| /* Set Rx FIFO threshold */
+ FCR_TXTH| /* Set Tx FIFO threshold */
+ FCR_TXSR| /* Reset Tx FIFO */
+ FCR_RXSR| /* Reset Rx FIFO */
+ FCR_FIFO_EN, /* Enable FIFOs */
+ iobase+FCR);
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, BANK2);
+ outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
+
+ /* Enable some interrupts so we can receive frames */
+ switch_bank(iobase, BANK0);
+ if (speed > 115200) {
+ /* Install FIR xmit handler */
+ dev->hard_start_xmit = nsc_ircc_hard_xmit_fir;
+ ier = IER_SFIF_IE;
+ nsc_ircc_dma_receive(self);
+ } else {
+ /* Install SIR xmit handler */
+ dev->hard_start_xmit = nsc_ircc_hard_xmit_sir;
+ ier = IER_RXHDL_IE;
+ }
+ /* Set our current interrupt mask */
+ outb(ier, iobase+IER);
+
+ /* Restore BSR */
+ outb(bank, iobase+BSR);
+
+ /* Make sure interrupt handlers keep the proper interrupt mask */
+ return(ier);
+}
+
+/*
+ * Function nsc_ircc_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static int nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __s32 speed;
+ __u8 bank;
+
+ self = (struct nsc_ircc_cb *) dev->priv;
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame. */
+ if (!skb->len) {
+ /* If we just sent a frame, we get called before
+ * the last bytes get out (because of the SIR FIFO).
+ * If this is the case, let interrupt handler change
+ * the speed itself... Jean II */
+ if (self->io.direction == IO_RECV) {
+ nsc_ircc_change_speed(self, speed);
+ /* TODO : For SIR->SIR, the next packet
+ * may get corrupted - Jean II */
+ netif_wake_queue(dev);
+ } else {
+ self->new_speed = speed;
+ /* Queue will be restarted after speed change
+ * to make sure packets gets through the
+ * proper xmit handler - Jean II */
+ }
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ self->tx_buff.data = self->tx_buff.head;
+
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ self->stats.tx_bytes += self->tx_buff.len;
+
+ /* Add interrupt on tx low level (will fire immediately) */
+ switch_bank(iobase, BANK0);
+ outb(IER_TXLDL_IE, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __s32 speed;
+ __u8 bank;
+ int mtt, diff;
+
+ self = (struct nsc_ircc_cb *) dev->priv;
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame. */
+ if (!skb->len) {
+ /* If we are currently transmitting, defer to
+ * interrupt handler. - Jean II */
+ if(self->tx_fifo.len == 0) {
+ nsc_ircc_change_speed(self, speed);
+ netif_wake_queue(dev);
+ } else {
+ self->new_speed = speed;
+ /* Keep queue stopped :
+ * the speed change operation may change the
+ * xmit handler, and we want to make sure
+ * the next packet get through the proper
+ * Tx path, so block the Tx queue until
+ * the speed change has been done.
+ * Jean II */
+ }
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+ } else {
+ /* Change speed after current frame */
+ self->new_speed = speed;
+ }
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Register and copy this frame to DMA memory */
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+ self->tx_fifo.tail += skb->len;
+
+ self->stats.tx_bytes += skb->len;
+
+ memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
+ skb->len);
+
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+
+ /* Start transmit only if there is currently no transmit going on */
+ if (self->tx_fifo.len == 1) {
+ /* Check if we must wait the min turn time or not */
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ /* Check how much time we have used already */
+ do_gettimeofday(&self->now);
+ diff = self->now.tv_usec - self->stamp.tv_usec;
+ if (diff < 0)
+ diff += 1000000;
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff) {
+ mtt -= diff;
+
+ /*
+ * Use timer if delay larger than 125 us, and
+ * use udelay for smaller values which should
+ * be acceptable
+ */
+ if (mtt > 125) {
+ /* Adjust for timer resolution */
+ mtt = mtt / 125;
+
+ /* Setup timer */
+ switch_bank(iobase, BANK4);
+ outb(mtt & 0xff, iobase+TMRL);
+ outb((mtt >> 8) & 0x0f, iobase+TMRH);
+
+ /* Start timer */
+ outb(IRCR1_TMR_EN, iobase+IRCR1);
+ self->io.direction = IO_XMIT;
+
+ /* Enable timer interrupt */
+ switch_bank(iobase, BANK0);
+ outb(IER_TMR_IE, iobase+IER);
+
+ /* Timer will take care of the rest */
+ goto out;
+ } else
+ udelay(mtt);
+ }
+ }
+ /* Enable DMA interrupt */
+ switch_bank(iobase, BANK0);
+ outb(IER_DMA_IE, iobase+IER);
+
+ /* Transmit frame */
+ nsc_ircc_dma_xmit(self, iobase);
+ }
+ out:
+ /* Not busy transmitting anymore if window is not full,
+ * and if we don't need to change speed */
+ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0))
+ netif_wake_queue(self->netdev);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_dma_xmit (self, iobase)
+ *
+ * Transmit data using DMA
+ *
+ */
+static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase)
+{
+ int bsr;
+
+ /* Save current bank */
+ bsr = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ self->io.direction = IO_XMIT;
+
+ /* Choose transmit DMA channel */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_DMASWP|ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
+
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ DMA_TX_MODE);
+
+ /* Enable DMA and SIR interaction pulse */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR)|MCR_TX_DFR|MCR_DMA_EN|MCR_IR_PLS, iobase+MCR);
+
+ /* Restore bank register */
+ outb(bsr, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_pio_xmit (self, iobase)
+ *
+ * Transmit data using PIO. Returns the number of bytes that actually
+ * got transferred
+ *
+ */
+static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
+{
+ int actual = 0;
+ __u8 bank;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ switch_bank(iobase, BANK0);
+ if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
+ IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n",
+ __FUNCTION__);
+
+ /* FIFO may still be filled to the Tx interrupt threshold */
+ fifo_size -= 17;
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual++], iobase+TXD);
+ }
+
+ IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
+ __FUNCTION__, fifo_size, actual, len);
+
+ /* Restore bank */
+ outb(bank, iobase+BSR);
+
+ return actual;
+}
+
+/*
+ * Function nsc_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
+{
+ int iobase;
+ __u8 bank;
+ int ret = TRUE;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ iobase = self->io.fir_base;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ /* Check for underrrun! */
+ if (inb(iobase+ASCR) & ASCR_TXUR) {
+ self->stats.tx_errors++;
+ self->stats.tx_fifo_errors++;
+
+ /* Clear bit, by writing 1 into it */
+ outb(ASCR_TXUR, iobase+ASCR);
+ } else {
+ self->stats.tx_packets++;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ self->tx_fifo.ptr++;
+ self->tx_fifo.len--;
+
+ /* Any frames to be sent back-to-back? */
+ if (self->tx_fifo.len) {
+ nsc_ircc_dma_xmit(self, iobase);
+
+ /* Not finished yet! */
+ ret = FALSE;
+ } else {
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ }
+
+ /* Make sure we have room for more frames and
+ * that we don't need to change speed */
+ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) {
+ /* Not busy transmitting anymore */
+ /* Tell the network layer, that we can accept more frames */
+ netif_wake_queue(self->netdev);
+ }
+
+ /* Restore bank */
+ outb(bank, iobase+BSR);
+
+ return ret;
+}
+
+/*
+ * Function nsc_ircc_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self)
+{
+ int iobase;
+ __u8 bsr;
+
+ iobase = self->io.fir_base;
+
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Save current bank */
+ bsr = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ /* Choose DMA Rx, DMA Fairness, and Advanced mode */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Rx FIFO. This will also flush the ST_FIFO */
+ switch_bank(iobase, BANK0);
+ outb(FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
+
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Enable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR)|MCR_DMA_EN, iobase+MCR);
+
+ /* Restore bank register */
+ outb(bsr, iobase+BSR);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_dma_receive_complete (self)
+ *
+ * Finished with receiving frames
+ *
+ *
+ */
+static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ __u8 status;
+ __u8 bank;
+ int len;
+
+ st_fifo = &self->st_fifo;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Read all entries in status FIFO */
+ switch_bank(iobase, BANK5);
+ while ((status = inb(iobase+FRM_ST)) & FRM_ST_VLD) {
+ /* We must empty the status FIFO no matter what */
+ len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
+
+ if (st_fifo->tail >= MAX_RX_WINDOW) {
+ IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__);
+ continue;
+ }
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+ /* Try to process all entries in status FIFO */
+ while (st_fifo->len > 0) {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->pending_bytes -= len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if (status & FRM_ST_ERR_MSK) {
+ if (status & FRM_ST_LOST_FR) {
+ /* Add number of lost frames to stats */
+ self->stats.rx_errors += len;
+ } else {
+ /* Skip frame */
+ self->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & FRM_ST_MAX_LEN)
+ self->stats.rx_length_errors++;
+
+ if (status & FRM_ST_PHY_ERR)
+ self->stats.rx_frame_errors++;
+
+ if (status & FRM_ST_BAD_CRC)
+ self->stats.rx_crc_errors++;
+ }
+ /* The errors below can be reported in both cases */
+ if (status & FRM_ST_OVR1)
+ self->stats.rx_fifo_errors++;
+
+ if (status & FRM_ST_OVR2)
+ self->stats.rx_fifo_errors++;
+ } else {
+ /*
+ * First we must make sure that the frame we
+ * want to deliver is all in main memory. If we
+ * cannot tell, then we check if the Rx FIFO is
+ * empty. If not then we will have to take a nap
+ * and try again later.
+ */
+ if (st_fifo->pending_bytes < self->io.fifo_size) {
+ switch_bank(iobase, BANK0);
+ if (inb(iobase+LSR) & LSR_RXDA) {
+ /* Put this entry back in fifo */
+ st_fifo->head--;
+ st_fifo->len++;
+ st_fifo->pending_bytes += len;
+ st_fifo->entries[st_fifo->head].status = status;
+ st_fifo->entries[st_fifo->head].len = len;
+ /*
+ * DMA not finished yet, so try again
+ * later, set timer value, resolution
+ * 125 us
+ */
+ switch_bank(iobase, BANK4);
+ outb(0x02, iobase+TMRL); /* x 125 us */
+ outb(0x00, iobase+TMRH);
+
+ /* Start timer */
+ outb(IRCR1_TMR_EN, iobase+IRCR1);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return FALSE; /* I'll be back! */
+ }
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ do_gettimeofday(&self->stamp);
+
+ skb = dev_alloc_skb(len+1);
+ if (skb == NULL) {
+ IRDA_WARNING("%s(), memory squeeze, "
+ "dropping frame.\n",
+ __FUNCTION__);
+ self->stats.rx_dropped++;
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return FALSE;
+ }
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC */
+ if (self->io.speed < 4000000) {
+ skb_put(skb, len-2);
+ memcpy(skb->data, self->rx_buff.data, len-2);
+ } else {
+ skb_put(skb, len-4);
+ memcpy(skb->data, self->rx_buff.data, len-4);
+ }
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->stats.rx_bytes += len;
+ self->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ self->netdev->last_rx = jiffies;
+ }
+ }
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return TRUE;
+}
+
+/*
+ * Function nsc_ircc_pio_receive (self)
+ *
+ * Receive all data in receiver FIFO
+ *
+ */
+static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self)
+{
+ __u8 byte;
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ /* Receive all characters in Rx FIFO */
+ do {
+ byte = inb(iobase+RXD);
+ async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
+ byte);
+ } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */
+}
+
+/*
+ * Function nsc_ircc_sir_interrupt (self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
+{
+ int actual;
+
+ /* Check if transmit FIFO is low on data */
+ if (eir & EIR_TXLDL_EV) {
+ /* Write data left in transmit buffer */
+ actual = nsc_ircc_pio_write(self->io.fir_base,
+ self->tx_buff.data,
+ self->tx_buff.len,
+ self->io.fifo_size);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+
+ self->io.direction = IO_XMIT;
+
+ /* Check if finished */
+ if (self->tx_buff.len > 0)
+ self->ier = IER_TXLDL_IE;
+ else {
+
+ self->stats.tx_packets++;
+ netif_wake_queue(self->netdev);
+ self->ier = IER_TXEMP_IE;
+ }
+
+ }
+ /* Check if transmission has completed */
+ if (eir & EIR_TXEMP_EV) {
+ /* Turn around and get ready to receive some data */
+ self->io.direction = IO_RECV;
+ self->ier = IER_RXHDL_IE;
+ /* Check if we need to change the speed?
+ * Need to be after self->io.direction to avoid race with
+ * nsc_ircc_hard_xmit_sir() - Jean II */
+ if (self->new_speed) {
+ IRDA_DEBUG(2, "%s(), Changing speed!\n", __FUNCTION__);
+ self->ier = nsc_ircc_change_speed(self,
+ self->new_speed);
+ self->new_speed = 0;
+ netif_wake_queue(self->netdev);
+
+ /* Check if we are going to FIR */
+ if (self->io.speed > 115200) {
+ /* No need to do anymore SIR stuff */
+ return;
+ }
+ }
+ }
+
+ /* Rx FIFO threshold or timeout */
+ if (eir & EIR_RXHDL_EV) {
+ nsc_ircc_pio_receive(self);
+
+ /* Keep receiving */
+ self->ier = IER_RXHDL_IE;
+ }
+}
+
+/*
+ * Function nsc_ircc_fir_interrupt (self, eir)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
+ int eir)
+{
+ __u8 bank;
+
+ bank = inb(iobase+BSR);
+
+ /* Status FIFO event*/
+ if (eir & EIR_SFIF_EV) {
+ /* Check if DMA has finished */
+ if (nsc_ircc_dma_receive_complete(self, iobase)) {
+ /* Wait for next status FIFO interrupt */
+ self->ier = IER_SFIF_IE;
+ } else {
+ self->ier = IER_SFIF_IE | IER_TMR_IE;
+ }
+ } else if (eir & EIR_TMR_EV) { /* Timer finished */
+ /* Disable timer */
+ switch_bank(iobase, BANK4);
+ outb(0, iobase+IRCR1);
+
+ /* Clear timer event */
+ switch_bank(iobase, BANK0);
+ outb(ASCR_CTE, iobase+ASCR);
+
+ /* Check if this is a Tx timer interrupt */
+ if (self->io.direction == IO_XMIT) {
+ nsc_ircc_dma_xmit(self, iobase);
+
+ /* Interrupt on DMA */
+ self->ier = IER_DMA_IE;
+ } else {
+ /* Check (again) if DMA has finished */
+ if (nsc_ircc_dma_receive_complete(self, iobase)) {
+ self->ier = IER_SFIF_IE;
+ } else {
+ self->ier = IER_SFIF_IE | IER_TMR_IE;
+ }
+ }
+ } else if (eir & EIR_DMA_EV) {
+ /* Finished with all transmissions? */
+ if (nsc_ircc_dma_xmit_complete(self)) {
+ if(self->new_speed != 0) {
+ /* As we stop the Tx queue, the speed change
+ * need to be done when the Tx fifo is
+ * empty. Ask for a Tx done interrupt */
+ self->ier = IER_TXEMP_IE;
+ } else {
+ /* Check if there are more frames to be
+ * transmitted */
+ if (irda_device_txqueue_empty(self->netdev)) {
+ /* Prepare for receive */
+ nsc_ircc_dma_receive(self);
+ self->ier = IER_SFIF_IE;
+ } else
+ IRDA_WARNING("%s(), potential "
+ "Tx queue lockup !\n",
+ __FUNCTION__);
+ }
+ } else {
+ /* Not finished yet, so interrupt on DMA again */
+ self->ier = IER_DMA_IE;
+ }
+ } else if (eir & EIR_TXEMP_EV) {
+ /* The Tx FIFO has totally drained out, so now we can change
+ * the speed... - Jean II */
+ self->ier = nsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ netif_wake_queue(self->netdev);
+ /* Note : nsc_ircc_change_speed() restarted Rx fifo */
+ }
+
+ outb(bank, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct nsc_ircc_cb *self;
+ __u8 bsr, eir;
+ int iobase;
+
+ if (!dev) {
+ IRDA_WARNING("%s: irq %d for unknown device.\n",
+ driver_name, irq);
+ return IRQ_NONE;
+ }
+ self = (struct nsc_ircc_cb *) dev->priv;
+
+ spin_lock(&self->lock);
+
+ iobase = self->io.fir_base;
+
+ bsr = inb(iobase+BSR); /* Save current bank */
+
+ switch_bank(iobase, BANK0);
+ self->ier = inb(iobase+IER);
+ eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */
+
+ outb(0, iobase+IER); /* Disable interrupts */
+
+ if (eir) {
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > 115200)
+ nsc_ircc_fir_interrupt(self, iobase, eir);
+ else
+ nsc_ircc_sir_interrupt(self, eir);
+ }
+
+ outb(self->ier, iobase+IER); /* Restore interrupts */
+ outb(bsr, iobase+BSR); /* Restore bank register */
+
+ spin_unlock(&self->lock);
+ return IRQ_RETVAL(eir);
+}
+
+/*
+ * Function nsc_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self)
+{
+ unsigned long flags;
+ int status = FALSE;
+ int iobase;
+ __u8 bank;
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ if (self->io.speed > 115200) {
+ iobase = self->io.fir_base;
+
+ /* Check if rx FIFO is not empty */
+ bank = inb(iobase+BSR);
+ switch_bank(iobase, BANK2);
+ if ((inb(iobase+RXFLV) & 0x3f) != 0) {
+ /* We are receiving something */
+ status = TRUE;
+ }
+ outb(bank, iobase+BSR);
+ } else
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return status;
+}
+
+/*
+ * Function nsc_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int nsc_ircc_net_open(struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+ __u8 bank;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct nsc_ircc_cb *) dev->priv;
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
+ IRDA_WARNING("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ IRDA_WARNING("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* turn on interrupts */
+ switch_bank(iobase, BANK0);
+ outb(IER_LS_IE | IER_RXHDL_IE, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /* Give self a hardware name */
+ sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int nsc_ircc_net_close(struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ int iobase;
+ __u8 bank;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = (struct nsc_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ iobase = self->io.fir_base;
+
+ disable_dma(self->io.dma);
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = dev->priv;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ /* This is already protected */
+ irq->ifr_receiving = nsc_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
+{
+ struct nsc_ircc_cb *self = (struct nsc_ircc_cb *) dev->priv;
+
+ return &self->stats;
+}
+
+static void nsc_ircc_suspend(struct nsc_ircc_cb *self)
+{
+ IRDA_MESSAGE("%s, Suspending\n", driver_name);
+
+ if (self->io.suspended)
+ return;
+
+ nsc_ircc_net_close(self->netdev);
+
+ self->io.suspended = 1;
+}
+
+static void nsc_ircc_wakeup(struct nsc_ircc_cb *self)
+{
+ if (!self->io.suspended)
+ return;
+
+ nsc_ircc_setup(&self->io);
+ nsc_ircc_net_open(self->netdev);
+
+ IRDA_MESSAGE("%s, Waking up\n", driver_name);
+
+ self->io.suspended = 0;
+}
+
+static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
+{
+ struct nsc_ircc_cb *self = (struct nsc_ircc_cb*) dev->data;
+ if (self) {
+ switch (rqst) {
+ case PM_SUSPEND:
+ nsc_ircc_suspend(self);
+ break;
+ case PM_RESUME:
+ nsc_ircc_wakeup(self);
+ break;
+ }
+ }
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("NSC IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+module_param_array(dma, int, NULL, 0);
+MODULE_PARM_DESC(dma, "DMA channels");
+module_param(dongle_id, int, 0);
+MODULE_PARM_DESC(dongle_id, "Type-id of used dongle");
+
+module_init(nsc_ircc_init);
+module_exit(nsc_ircc_cleanup);
+
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
new file mode 100644
index 000000000000..6edf7e514624
--- /dev/null
+++ b/drivers/net/irda/nsc-ircc.h
@@ -0,0 +1,280 @@
+/*********************************************************************
+ *
+ * Filename: nsc-ircc.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Nov 13 14:37:40 1998
+ * Modified at: Sun Jan 23 17:47:00 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
+ * Copyright (c) 1998 Actisys Corp., www.actisys.com
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef NSC_IRCC_H
+#define NSC_IRCC_H
+
+#include <linux/time.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+/* Config registers for the '108 */
+#define CFG_108_BAIC 0x00
+#define CFG_108_CSRT 0x01
+#define CFG_108_MCTL 0x02
+
+/* Config registers for the '338 */
+#define CFG_338_FER 0x00
+#define CFG_338_FAR 0x01
+#define CFG_338_PTR 0x02
+#define CFG_338_PNP0 0x1b
+#define CFG_338_PNP1 0x1c
+#define CFG_338_PNP3 0x4f
+
+/* Config registers for the '39x (in the logical device bank) */
+#define CFG_39X_LDN 0x07 /* Logical device number (Super I/O bank) */
+#define CFG_39X_SIOCF1 0x21 /* SuperI/O Config */
+#define CFG_39X_ACT 0x30 /* Device activation */
+#define CFG_39X_BASEH 0x60 /* Device base address (high bits) */
+#define CFG_39X_BASEL 0x61 /* Device base address (low bits) */
+#define CFG_39X_IRQNUM 0x70 /* Interrupt number & wake up enable */
+#define CFG_39X_IRQSEL 0x71 /* Interrupt select (edge/level + polarity) */
+#define CFG_39X_DMA0 0x74 /* DMA 0 configuration */
+#define CFG_39X_DMA1 0x75 /* DMA 1 configuration */
+#define CFG_39X_SPC 0xF0 /* Serial port configuration register */
+
+/* Flags for configuration register CRF0 */
+#define APEDCRC 0x02
+#define ENBNKSEL 0x01
+
+/* Set 0 */
+#define TXD 0x00 /* Transmit data port */
+#define RXD 0x00 /* Receive data port */
+
+/* Register 1 */
+#define IER 0x01 /* Interrupt Enable Register*/
+#define IER_RXHDL_IE 0x01 /* Receiver high data level interrupt */
+#define IER_TXLDL_IE 0x02 /* Transeiver low data level interrupt */
+#define IER_LS_IE 0x04//* Link Status Interrupt */
+#define IER_ETXURI 0x04 /* Tx underrun */
+#define IER_DMA_IE 0x10 /* DMA finished interrupt */
+#define IER_TXEMP_IE 0x20
+#define IER_SFIF_IE 0x40 /* Frame status FIFO intr */
+#define IER_TMR_IE 0x80 /* Timer event */
+
+#define FCR 0x02 /* (write only) */
+#define FCR_FIFO_EN 0x01 /* Enable FIFO's */
+#define FCR_RXSR 0x02 /* Rx FIFO soft reset */
+#define FCR_TXSR 0x04 /* Tx FIFO soft reset */
+#define FCR_RXTH 0x40 /* Rx FIFO threshold (set to 16) */
+#define FCR_TXTH 0x20 /* Tx FIFO threshold (set to 17) */
+
+#define EIR 0x02 /* (read only) */
+#define EIR_RXHDL_EV 0x01
+#define EIR_TXLDL_EV 0x02
+#define EIR_LS_EV 0x04
+#define EIR_DMA_EV 0x10
+#define EIR_TXEMP_EV 0x20
+#define EIR_SFIF_EV 0x40
+#define EIR_TMR_EV 0x80
+
+#define LCR 0x03 /* Link control register */
+#define LCR_WLS_8 0x03 /* 8 bits */
+
+#define BSR 0x03 /* Bank select register */
+#define BSR_BKSE 0x80
+#define BANK0 LCR_WLS_8 /* Must make sure that we set 8N1 */
+#define BANK1 0x80
+#define BANK2 0xe0
+#define BANK3 0xe4
+#define BANK4 0xe8
+#define BANK5 0xec
+#define BANK6 0xf0
+#define BANK7 0xf4
+
+#define MCR 0x04 /* Mode Control Register */
+#define MCR_MODE_MASK ~(0xd0)
+#define MCR_UART 0x00
+#define MCR_RESERVED 0x20
+#define MCR_SHARP_IR 0x40
+#define MCR_SIR 0x60
+#define MCR_MIR 0x80
+#define MCR_FIR 0xa0
+#define MCR_CEIR 0xb0
+#define MCR_IR_PLS 0x10
+#define MCR_DMA_EN 0x04
+#define MCR_EN_IRQ 0x08
+#define MCR_TX_DFR 0x08
+
+#define LSR 0x05 /* Link status register */
+#define LSR_RXDA 0x01 /* Receiver data available */
+#define LSR_TXRDY 0x20 /* Transmitter ready */
+#define LSR_TXEMP 0x40 /* Transmitter empty */
+
+#define ASCR 0x07 /* Auxillary Status and Control Register */
+#define ASCR_RXF_TOUT 0x01 /* Rx FIFO timeout */
+#define ASCR_FEND_INF 0x02 /* Frame end bytes in rx FIFO */
+#define ASCR_S_EOT 0x04 /* Set end of transmission */
+#define ASCT_RXBSY 0x20 /* Rx busy */
+#define ASCR_TXUR 0x40 /* Transeiver underrun */
+#define ASCR_CTE 0x80 /* Clear timer event */
+
+/* Bank 2 */
+#define BGDL 0x00 /* Baud Generator Divisor Port (Low Byte) */
+#define BGDH 0x01 /* Baud Generator Divisor Port (High Byte) */
+
+#define ECR1 0x02 /* Extended Control Register 1 */
+#define ECR1_EXT_SL 0x01 /* Extended Mode Select */
+#define ECR1_DMANF 0x02 /* DMA Fairness */
+#define ECR1_DMATH 0x04 /* DMA Threshold */
+#define ECR1_DMASWP 0x08 /* DMA Swap */
+
+#define EXCR2 0x04
+#define EXCR2_TFSIZ 0x01 /* Rx FIFO size = 32 */
+#define EXCR2_RFSIZ 0x04 /* Tx FIFO size = 32 */
+
+#define TXFLV 0x06 /* Tx FIFO level */
+#define RXFLV 0x07 /* Rx FIFO level */
+
+/* Bank 3 */
+#define MID 0x00
+
+/* Bank 4 */
+#define TMRL 0x00 /* Timer low byte */
+#define TMRH 0x01 /* Timer high byte */
+#define IRCR1 0x02 /* Infrared control register 1 */
+#define IRCR1_TMR_EN 0x01 /* Timer enable */
+
+#define TFRLL 0x04
+#define TFRLH 0x05
+#define RFRLL 0x06
+#define RFRLH 0x07
+
+/* Bank 5 */
+#define IRCR2 0x04 /* Infrared control register 2 */
+#define IRCR2_MDRS 0x04 /* MIR data rate select */
+#define IRCR2_FEND_MD 0x20 /* */
+
+#define FRM_ST 0x05 /* Frame status FIFO */
+#define FRM_ST_VLD 0x80 /* Frame status FIFO data valid */
+#define FRM_ST_ERR_MSK 0x5f
+#define FRM_ST_LOST_FR 0x40 /* Frame lost */
+#define FRM_ST_MAX_LEN 0x10 /* Max frame len exceeded */
+#define FRM_ST_PHY_ERR 0x08 /* Physical layer error */
+#define FRM_ST_BAD_CRC 0x04
+#define FRM_ST_OVR1 0x02 /* Rx FIFO overrun */
+#define FRM_ST_OVR2 0x01 /* Frame status FIFO overrun */
+
+#define RFLFL 0x06
+#define RFLFH 0x07
+
+/* Bank 6 */
+#define IR_CFG2 0x00
+#define IR_CFG2_DIS_CRC 0x02
+
+/* Bank 7 */
+#define IRM_CR 0x07 /* Infrared module control register */
+#define IRM_CR_IRX_MSL 0x40
+#define IRM_CR_AF_MNT 0x80 /* Automatic format */
+
+/* NSC chip information */
+struct nsc_chip {
+ char *name; /* Name of chipset */
+ int cfg[3]; /* Config registers */
+ u_int8_t cid_index; /* Chip identification index reg */
+ u_int8_t cid_value; /* Chip identification expected value */
+ u_int8_t cid_mask; /* Chip identification revision mask */
+
+ /* Functions for probing and initializing the specific chip */
+ int (*probe)(struct nsc_chip *chip, chipio_t *info);
+ int (*init)(struct nsc_chip *chip, chipio_t *info);
+};
+typedef struct nsc_chip nsc_chip_t;
+
+/* For storing entries in the status FIFO */
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Lenght of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Lenght of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+/* Private data for each instance */
+struct nsc_ircc_cb {
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ struct timeval stamp;
+ struct timeval now;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ struct pm_dev *dev;
+};
+
+static inline void switch_bank(int iobase, int bank)
+{
+ outb(bank, iobase+BSR);
+}
+
+#endif /* NSC_IRCC_H */
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
new file mode 100644
index 000000000000..8c22c7374a23
--- /dev/null
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -0,0 +1,156 @@
+/*********************************************************************
+ *
+ * Filename: old_belkin.c
+ * Version: 1.1
+ * Description: Driver for the Belkin (old) SmartBeam dongle
+ * Status: Experimental...
+ * Author: Jean Tourrilhes <jt@hpl.hp.com>
+ * Created at: 22/11/99
+ * Modified at: Fri Dec 17 09:13:32 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Jean Tourrilhes, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+// #include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+
+/*
+ * Belkin is selling a dongle called the SmartBeam.
+ * In fact, there is two hardware version of this dongle, of course with
+ * the same name and looking the exactly same (grrr...).
+ * I guess that I've got the old one, because inside I don't have
+ * a jumper for IrDA/ASK...
+ *
+ * As far as I can make it from info on their web site, the old dongle
+ * support only 9600 b/s, which make our life much simpler as far as
+ * the driver is concerned, but you might not like it very much ;-)
+ * The new SmartBeam does 115 kb/s, and I've not tested it...
+ *
+ * Belkin claim that the correct driver for the old dongle (in Windows)
+ * is the generic Parallax 9500a driver, but the Linux LiteLink driver
+ * fails for me (probably because Linux-IrDA doesn't rate fallback),
+ * so I created this really dumb driver...
+ *
+ * In fact, this driver doesn't do much. The only thing it does is to
+ * prevent Linux-IrDA to use any other speed than 9600 b/s ;-) This
+ * driver is called "old_belkin" so that when the new SmartBeam is supported
+ * its driver can be called "belkin" instead of "new_belkin".
+ *
+ * Note : this driver was written without any info/help from Belkin,
+ * so a lot of info here might be totally wrong. Blame me ;-)
+ */
+
+static int old_belkin_open(struct sir_dev *dev);
+static int old_belkin_close(struct sir_dev *dev);
+static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed);
+static int old_belkin_reset(struct sir_dev *dev);
+
+static struct dongle_driver old_belkin = {
+ .owner = THIS_MODULE,
+ .driver_name = "Old Belkin SmartBeam",
+ .type = IRDA_OLD_BELKIN_DONGLE,
+ .open = old_belkin_open,
+ .close = old_belkin_close,
+ .reset = old_belkin_reset,
+ .set_speed = old_belkin_change_speed,
+};
+
+static int __init old_belkin_sir_init(void)
+{
+ return irda_register_dongle(&old_belkin);
+}
+
+static void __exit old_belkin_sir_cleanup(void)
+{
+ irda_unregister_dongle(&old_belkin);
+}
+
+static int old_belkin_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Not too fast, please... */
+ qos->baud_rate.bits &= IR_9600;
+ /* Needs at least 10 ms (totally wild guess, can do probably better) */
+ qos->min_turn_time.bits = 0x01;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int old_belkin_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function old_belkin_change_speed (task)
+ *
+ * With only one speed available, not much to do...
+ */
+static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ dev->speed = 9600;
+ return (speed==dev->speed) ? 0 : -EINVAL;
+}
+
+/*
+ * Function old_belkin_reset (task)
+ *
+ * Reset the Old-Belkin type dongle.
+ *
+ */
+static int old_belkin_reset(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* This dongles speed "defaults" to 9600 bps ;-) */
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("Belkin (old) SmartBeam dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-7"); /* IRDA_OLD_BELKIN_DONGLE */
+
+module_init(old_belkin_sir_init);
+module_exit(old_belkin_sir_cleanup);
diff --git a/drivers/net/irda/old_belkin.c b/drivers/net/irda/old_belkin.c
new file mode 100644
index 000000000000..26f81fd28371
--- /dev/null
+++ b/drivers/net/irda/old_belkin.c
@@ -0,0 +1,164 @@
+/*********************************************************************
+ *
+ * Filename: old_belkin.c
+ * Version: 1.1
+ * Description: Driver for the Belkin (old) SmartBeam dongle
+ * Status: Experimental...
+ * Author: Jean Tourrilhes <jt@hpl.hp.com>
+ * Created at: 22/11/99
+ * Modified at: Fri Dec 17 09:13:32 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Jean Tourrilhes, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+/*
+ * Belkin is selling a dongle called the SmartBeam.
+ * In fact, there is two hardware version of this dongle, of course with
+ * the same name and looking the exactly same (grrr...).
+ * I guess that I've got the old one, because inside I don't have
+ * a jumper for IrDA/ASK...
+ *
+ * As far as I can make it from info on their web site, the old dongle
+ * support only 9600 b/s, which make our life much simpler as far as
+ * the driver is concerned, but you might not like it very much ;-)
+ * The new SmartBeam does 115 kb/s, and I've not tested it...
+ *
+ * Belkin claim that the correct driver for the old dongle (in Windows)
+ * is the generic Parallax 9500a driver, but the Linux LiteLink driver
+ * fails for me (probably because Linux-IrDA doesn't rate fallback),
+ * so I created this really dumb driver...
+ *
+ * In fact, this driver doesn't do much. The only thing it does is to
+ * prevent Linux-IrDA to use any other speed than 9600 b/s ;-) This
+ * driver is called "old_belkin" so that when the new SmartBeam is supported
+ * its driver can be called "belkin" instead of "new_belkin".
+ *
+ * Note : this driver was written without any info/help from Belkin,
+ * so a lot of info here might be totally wrong. Blame me ;-)
+ */
+
+/* Let's guess */
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+
+static void old_belkin_open(dongle_t *self, struct qos_info *qos);
+static void old_belkin_close(dongle_t *self);
+static int old_belkin_change_speed(struct irda_task *task);
+static int old_belkin_reset(struct irda_task *task);
+
+/* These are the baudrates supported */
+/* static __u32 baud_rates[] = { 9600 }; */
+
+static struct dongle_reg dongle = {
+ .type = IRDA_OLD_BELKIN_DONGLE,
+ .open = old_belkin_open,
+ .close = old_belkin_close,
+ .reset = old_belkin_reset,
+ .change_speed = old_belkin_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init old_belkin_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit old_belkin_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+static void old_belkin_open(dongle_t *self, struct qos_info *qos)
+{
+ /* Not too fast, please... */
+ qos->baud_rate.bits &= IR_9600;
+ /* Needs at least 10 ms (totally wild guess, can do probably better) */
+ qos->min_turn_time.bits = 0x01;
+}
+
+static void old_belkin_close(dongle_t *self)
+{
+ /* Power off dongle */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+}
+
+/*
+ * Function old_belkin_change_speed (task)
+ *
+ * With only one speed available, not much to do...
+ */
+static int old_belkin_change_speed(struct irda_task *task)
+{
+ irda_task_next_state(task, IRDA_TASK_DONE);
+
+ return 0;
+}
+
+/*
+ * Function old_belkin_reset (task)
+ *
+ * Reset the Old-Belkin type dongle.
+ *
+ */
+static int old_belkin_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+
+ /* Power on dongle */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* This dongles speed "defaults" to 9600 bps ;-) */
+ self->speed = 9600;
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+
+ return 0;
+}
+
+MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("Belkin (old) SmartBeam dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-7"); /* IRDA_OLD_BELKIN_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Old-Belkin module
+ *
+ */
+module_init(old_belkin_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Old-Belkin module
+ *
+ */
+module_exit(old_belkin_cleanup);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
new file mode 100644
index 000000000000..89f5096cab74
--- /dev/null
+++ b/drivers/net/irda/sa1100_ir.c
@@ -0,0 +1,1045 @@
+/*
+ * linux/drivers/net/irda/sa1100_ir.c
+ *
+ * Copyright (C) 2000-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Infra-red driver for the StrongARM SA1100 embedded microprocessor
+ *
+ * Note that we don't have to worry about the SA1111's DMA bugs in here,
+ * so we use the straight forward dma_map_* functions with a null pointer.
+ *
+ * This driver takes one kernel command line parameter, sa1100ir=, with
+ * the following options:
+ * max_rate:baudrate - set the maximum baud rate
+ * power_leve:level - set the transmitter power level
+ * tx_lpm:0|1 - set transmit low power mode
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/hardware.h>
+#include <asm/mach/irda.h>
+
+static int power_level = 3;
+static int tx_lpm;
+static int max_rate = 4000000;
+
+struct sa1100_irda {
+ unsigned char hscr0;
+ unsigned char utcr4;
+ unsigned char power;
+ unsigned char open;
+
+ int speed;
+ int newspeed;
+
+ struct sk_buff *txskb;
+ struct sk_buff *rxskb;
+ dma_addr_t txbuf_dma;
+ dma_addr_t rxbuf_dma;
+ dma_regs_t *txdma;
+ dma_regs_t *rxdma;
+
+ struct net_device_stats stats;
+ struct device *dev;
+ struct irda_platform_data *pdata;
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+};
+
+#define IS_FIR(si) ((si)->speed >= 4000000)
+
+#define HPSIR_MAX_RXLEN 2047
+
+/*
+ * Allocate and map the receive buffer, unless it is already allocated.
+ */
+static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
+{
+ if (si->rxskb)
+ return 0;
+
+ si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
+
+ if (!si->rxskb) {
+ printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Align any IP headers that may be contained
+ * within the frame.
+ */
+ skb_reserve(si->rxskb, 1);
+
+ si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
+ HPSIR_MAX_RXLEN,
+ DMA_FROM_DEVICE);
+ return 0;
+}
+
+/*
+ * We want to get here as soon as possible, and get the receiver setup.
+ * We use the existing buffer.
+ */
+static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
+{
+ if (!si->rxskb) {
+ printk(KERN_ERR "sa1100_ir: rx buffer went missing\n");
+ return;
+ }
+
+ /*
+ * First empty receive FIFO
+ */
+ Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
+
+ /*
+ * Enable the DMA, receiver and receive interrupt.
+ */
+ sa1100_clear_dma(si->rxdma);
+ sa1100_start_dma(si->rxdma, si->rxbuf_dma, HPSIR_MAX_RXLEN);
+ Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE;
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
+{
+ unsigned long flags;
+ int brd, ret = -EINVAL;
+
+ switch (speed) {
+ case 9600: case 19200: case 38400:
+ case 57600: case 115200:
+ brd = 3686400 / (16 * speed) - 1;
+
+ /*
+ * Stop the receive DMA.
+ */
+ if (IS_FIR(si))
+ sa1100_stop_dma(si->rxdma);
+
+ local_irq_save(flags);
+
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
+
+ Ser2UTCR1 = brd >> 8;
+ Ser2UTCR2 = brd;
+
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ si->speed = speed;
+
+ local_irq_restore(flags);
+ ret = 0;
+ break;
+
+ case 4000000:
+ local_irq_save(flags);
+
+ si->hscr0 = 0;
+
+ Ser2HSSR0 = 0xff;
+ Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
+ Ser2UTCR3 = 0;
+
+ si->speed = speed;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ sa1100_irda_rx_alloc(si);
+ sa1100_irda_rx_dma_start(si);
+
+ local_irq_restore(flags);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Control the power state of the IrDA transmitter.
+ * State:
+ * 0 - off
+ * 1 - short range, lowest power
+ * 2 - medium range, medium power
+ * 3 - maximum range, high power
+ *
+ * Currently, only assabet is known to support this.
+ */
+static int
+__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret = 0;
+ if (si->pdata->set_power)
+ ret = si->pdata->set_power(si->dev, state);
+ return ret;
+}
+
+static inline int
+sa1100_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret;
+
+ ret = __sa1100_irda_set_power(si, state);
+ if (ret == 0)
+ si->power = state;
+
+ return ret;
+}
+
+static int sa1100_irda_startup(struct sa1100_irda *si)
+{
+ int ret;
+
+ /*
+ * Ensure that the ports for this device are setup correctly.
+ */
+ if (si->pdata->startup)
+ si->pdata->startup(si->dev);
+
+ /*
+ * Configure PPC for IRDA - we want to drive TXD2 low.
+ * We also want to drive this pin low during sleep.
+ */
+ PPSR &= ~PPC_TXD2;
+ PSDR &= ~PPC_TXD2;
+ PPDR |= PPC_TXD2;
+
+ /*
+ * Enable HP-SIR modulation, and ensure that the port is disabled.
+ */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
+ Ser2UTCR4 = si->utcr4;
+ Ser2UTCR0 = UTCR0_8BitData;
+ Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
+
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+
+ ret = sa1100_irda_set_speed(si, si->speed = 9600);
+ if (ret) {
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+ }
+
+ return ret;
+}
+
+static void sa1100_irda_shutdown(struct sa1100_irda *si)
+{
+ /*
+ * Stop all DMA activity.
+ */
+ sa1100_stop_dma(si->rxdma);
+ sa1100_stop_dma(si->txdma);
+
+ /* Disable the port. */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+}
+
+#ifdef CONFIG_PM
+/*
+ * Suspend the IrDA interface.
+ */
+static int sa1100_irda_suspend(struct device *_dev, u32 state, u32 level)
+{
+ struct net_device *dev = dev_get_drvdata(_dev);
+ struct sa1100_irda *si;
+
+ if (!dev || level != SUSPEND_DISABLE)
+ return 0;
+
+ si = dev->priv;
+ if (si->open) {
+ /*
+ * Stop the transmit queue
+ */
+ netif_device_detach(dev);
+ disable_irq(dev->irq);
+ sa1100_irda_shutdown(si);
+ __sa1100_irda_set_power(si, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Resume the IrDA interface.
+ */
+static int sa1100_irda_resume(struct device *_dev, u32 level)
+{
+ struct net_device *dev = dev_get_drvdata(_dev);
+ struct sa1100_irda *si;
+
+ if (!dev || level != RESUME_ENABLE)
+ return 0;
+
+ si = dev->priv;
+ if (si->open) {
+ /*
+ * If we missed a speed change, initialise at the new speed
+ * directly. It is debatable whether this is actually
+ * required, but in the interests of continuing from where
+ * we left off it is desireable. The converse argument is
+ * that we should re-negotiate at 9600 baud again.
+ */
+ if (si->newspeed) {
+ si->speed = si->newspeed;
+ si->newspeed = 0;
+ }
+
+ sa1100_irda_startup(si);
+ __sa1100_irda_set_power(si, si->power);
+ enable_irq(dev->irq);
+
+ /*
+ * This automatically wakes up the queue
+ */
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define sa1100_irda_suspend NULL
+#define sa1100_irda_resume NULL
+#endif
+
+/*
+ * HP-SIR format interrupt service routines.
+ */
+static void sa1100_irda_hpsir_irq(struct net_device *dev)
+{
+ struct sa1100_irda *si = dev->priv;
+ int status;
+
+ status = Ser2UTSR0;
+
+ /*
+ * Deal with any receive errors first. The bytes in error may be
+ * the only bytes in the receive FIFO, so we do this first.
+ */
+ while (status & UTSR0_EIF) {
+ int stat, data;
+
+ stat = Ser2UTSR1;
+ data = Ser2UTDR;
+
+ if (stat & (UTSR1_FRE | UTSR1_ROR)) {
+ si->stats.rx_errors++;
+ if (stat & UTSR1_FRE)
+ si->stats.rx_frame_errors++;
+ if (stat & UTSR1_ROR)
+ si->stats.rx_fifo_errors++;
+ } else
+ async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
+
+ status = Ser2UTSR0;
+ }
+
+ /*
+ * We must clear certain bits.
+ */
+ Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB);
+
+ if (status & UTSR0_RFS) {
+ /*
+ * There are at least 4 bytes in the FIFO. Read 3 bytes
+ * and leave the rest to the block below.
+ */
+ async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
+ async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
+ async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
+ }
+
+ if (status & (UTSR0_RFS | UTSR0_RID)) {
+ /*
+ * Fifo contains more than 1 character.
+ */
+ do {
+ async_unwrap_char(dev, &si->stats, &si->rx_buff,
+ Ser2UTDR);
+ } while (Ser2UTSR1 & UTSR1_RNE);
+
+ dev->last_rx = jiffies;
+ }
+
+ if (status & UTSR0_TFS && si->tx_buff.len) {
+ /*
+ * Transmitter FIFO is not full
+ */
+ do {
+ Ser2UTDR = *si->tx_buff.data++;
+ si->tx_buff.len -= 1;
+ } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
+
+ if (si->tx_buff.len == 0) {
+ si->stats.tx_packets++;
+ si->stats.tx_bytes += si->tx_buff.data -
+ si->tx_buff.head;
+
+ /*
+ * We need to ensure that the transmitter has
+ * finished.
+ */
+ do
+ rmb();
+ while (Ser2UTSR1 & UTSR1_TBY);
+
+ /*
+ * Ok, we've finished transmitting. Now enable
+ * the receiver. Sometimes we get a receive IRQ
+ * immediately after a transmit...
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ if (si->newspeed) {
+ sa1100_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ }
+
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
+{
+ struct sk_buff *skb = si->rxskb;
+ dma_addr_t dma_addr;
+ unsigned int len, stat, data;
+
+ if (!skb) {
+ printk(KERN_ERR "sa1100_ir: SKB is NULL!\n");
+ return;
+ }
+
+ /*
+ * Get the current data position.
+ */
+ dma_addr = sa1100_get_dma_pos(si->rxdma);
+ len = dma_addr - si->rxbuf_dma;
+ if (len > HPSIR_MAX_RXLEN)
+ len = HPSIR_MAX_RXLEN;
+ dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE);
+
+ do {
+ /*
+ * Read Status, and then Data.
+ */
+ stat = Ser2HSSR1;
+ rmb();
+ data = Ser2HSDR;
+
+ if (stat & (HSSR1_CRE | HSSR1_ROR)) {
+ si->stats.rx_errors++;
+ if (stat & HSSR1_CRE)
+ si->stats.rx_crc_errors++;
+ if (stat & HSSR1_ROR)
+ si->stats.rx_frame_errors++;
+ } else
+ skb->data[len++] = data;
+
+ /*
+ * If we hit the end of frame, there's
+ * no point in continuing.
+ */
+ if (stat & HSSR1_EOF)
+ break;
+ } while (Ser2HSSR0 & HSSR0_EIF);
+
+ if (stat & HSSR1_EOF) {
+ si->rxskb = NULL;
+
+ skb_put(skb, len);
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ si->stats.rx_packets++;
+ si->stats.rx_bytes += len;
+
+ /*
+ * Before we pass the buffer up, allocate a new one.
+ */
+ sa1100_irda_rx_alloc(si);
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ } else {
+ /*
+ * Remap the buffer.
+ */
+ si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
+ HPSIR_MAX_RXLEN,
+ DMA_FROM_DEVICE);
+ }
+}
+
+/*
+ * FIR format interrupt service routine. We only have to
+ * handle RX events; transmit events go via the TX DMA handler.
+ *
+ * No matter what, we disable RX, process, and the restart RX.
+ */
+static void sa1100_irda_fir_irq(struct net_device *dev)
+{
+ struct sa1100_irda *si = dev->priv;
+
+ /*
+ * Stop RX DMA
+ */
+ sa1100_stop_dma(si->rxdma);
+
+ /*
+ * Framing error - we throw away the packet completely.
+ * Clearing RXE flushes the error conditions and data
+ * from the fifo.
+ */
+ if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
+ si->stats.rx_errors++;
+
+ if (Ser2HSSR0 & HSSR0_FRE)
+ si->stats.rx_frame_errors++;
+
+ /*
+ * Clear out the DMA...
+ */
+ Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
+
+ /*
+ * Clear selected status bits now, so we
+ * don't miss them next time around.
+ */
+ Ser2HSSR0 = HSSR0_FRE | HSSR0_RAB;
+ }
+
+ /*
+ * Deal with any receive errors. The any of the lowest
+ * 8 bytes in the FIFO may contain an error. We must read
+ * them one by one. The "error" could even be the end of
+ * packet!
+ */
+ if (Ser2HSSR0 & HSSR0_EIF)
+ sa1100_irda_fir_error(si, dev);
+
+ /*
+ * No matter what happens, we must restart reception.
+ */
+ sa1100_irda_rx_dma_start(si);
+}
+
+static irqreturn_t sa1100_irda_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ if (IS_FIR(((struct sa1100_irda *)dev->priv)))
+ sa1100_irda_fir_irq(dev);
+ else
+ sa1100_irda_hpsir_irq(dev);
+ return IRQ_HANDLED;
+}
+
+/*
+ * TX DMA completion handler.
+ */
+static void sa1100_irda_txdma_irq(void *id)
+{
+ struct net_device *dev = id;
+ struct sa1100_irda *si = dev->priv;
+ struct sk_buff *skb = si->txskb;
+
+ si->txskb = NULL;
+
+ /*
+ * Wait for the transmission to complete. Unfortunately,
+ * the hardware doesn't give us an interrupt to indicate
+ * "end of frame".
+ */
+ do
+ rmb();
+ while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
+
+ /*
+ * Clear the transmit underrun bit.
+ */
+ Ser2HSSR0 = HSSR0_TUR;
+
+ /*
+ * Do we need to change speed? Note that we're lazy
+ * here - we don't free the old rxskb. We don't need
+ * to allocate a buffer either.
+ */
+ if (si->newspeed) {
+ sa1100_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ }
+
+ /*
+ * Start reception. This disables the transmitter for
+ * us. This will be using the existing RX buffer.
+ */
+ sa1100_irda_rx_dma_start(si);
+
+ /*
+ * Account and free the packet.
+ */
+ if (skb) {
+ dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE);
+ si->stats.tx_packets ++;
+ si->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ }
+
+ /*
+ * Make sure that the TX queue is available for sending
+ * (for retries). TX has priority over RX at all times.
+ */
+ netif_wake_queue(dev);
+}
+
+static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sa1100_irda *si = dev->priv;
+ int speed = irda_get_next_speed(skb);
+
+ /*
+ * Does this packet contain a request to change the interface
+ * speed? If so, remember it until we complete the transmission
+ * of this frame.
+ */
+ if (speed != si->speed && speed != -1)
+ si->newspeed = speed;
+
+ /*
+ * If this is an empty frame, we can bypass a lot.
+ */
+ if (skb->len == 0) {
+ if (si->newspeed) {
+ si->newspeed = 0;
+ sa1100_irda_set_speed(si, speed);
+ }
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (!IS_FIR(si)) {
+ netif_stop_queue(dev);
+
+ si->tx_buff.data = si->tx_buff.head;
+ si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
+ si->tx_buff.truesize);
+
+ /*
+ * Set the transmit interrupt enable. This will fire
+ * off an interrupt immediately. Note that we disable
+ * the receiver so we won't get spurious characteres
+ * received.
+ */
+ Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;
+
+ dev_kfree_skb(skb);
+ } else {
+ int mtt = irda_get_mtt(skb);
+
+ /*
+ * We must not be transmitting...
+ */
+ if (si->txskb)
+ BUG();
+
+ netif_stop_queue(dev);
+
+ si->txskb = skb;
+ si->txbuf_dma = dma_map_single(si->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+
+ sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len);
+
+ /*
+ * If we have a mean turn-around time, impose the specified
+ * specified delay. We could shorten this by timing from
+ * the point we received the packet.
+ */
+ if (mtt)
+ udelay(mtt);
+
+ Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
+ }
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static int
+sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct sa1100_irda *si = dev->priv;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (si->open) {
+ ret = sa1100_irda_set_speed(si,
+ rq->ifr_baudrate);
+ } else {
+ printk("sa1100_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = IS_FIR(si) ? 0
+ : si->rx_buff.state != OUTSIDE_FRAME;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct net_device_stats *sa1100_irda_stats(struct net_device *dev)
+{
+ struct sa1100_irda *si = dev->priv;
+ return &si->stats;
+}
+
+static int sa1100_irda_start(struct net_device *dev)
+{
+ struct sa1100_irda *si = dev->priv;
+ int err;
+
+ si->speed = 9600;
+
+ err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq;
+
+ err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive",
+ NULL, NULL, &si->rxdma);
+ if (err)
+ goto err_rx_dma;
+
+ err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit",
+ sa1100_irda_txdma_irq, dev, &si->txdma);
+ if (err)
+ goto err_tx_dma;
+
+ /*
+ * The interrupt must remain disabled for now.
+ */
+ disable_irq(dev->irq);
+
+ /*
+ * Setup the serial port for the specified speed.
+ */
+ err = sa1100_irda_startup(si);
+ if (err)
+ goto err_startup;
+
+ /*
+ * Open a new IrLAP layer instance.
+ */
+ si->irlap = irlap_open(dev, &si->qos, "sa1100");
+ err = -ENOMEM;
+ if (!si->irlap)
+ goto err_irlap;
+
+ /*
+ * Now enable the interrupt and start the queue
+ */
+ si->open = 1;
+ sa1100_set_power(si, power_level); /* low power mode */
+ enable_irq(dev->irq);
+ netif_start_queue(dev);
+ return 0;
+
+err_irlap:
+ si->open = 0;
+ sa1100_irda_shutdown(si);
+err_startup:
+ sa1100_free_dma(si->txdma);
+err_tx_dma:
+ sa1100_free_dma(si->rxdma);
+err_rx_dma:
+ free_irq(dev->irq, dev);
+err_irq:
+ return err;
+}
+
+static int sa1100_irda_stop(struct net_device *dev)
+{
+ struct sa1100_irda *si = dev->priv;
+
+ disable_irq(dev->irq);
+ sa1100_irda_shutdown(si);
+
+ /*
+ * If we have been doing DMA receive, make sure we
+ * tidy that up cleanly.
+ */
+ if (si->rxskb) {
+ dma_unmap_single(si->dev, si->rxbuf_dma, HPSIR_MAX_RXLEN,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(si->rxskb);
+ si->rxskb = NULL;
+ }
+
+ /* Stop IrLAP */
+ if (si->irlap) {
+ irlap_close(si->irlap);
+ si->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+ si->open = 0;
+
+ /*
+ * Free resources
+ */
+ sa1100_free_dma(si->txdma);
+ sa1100_free_dma(si->rxdma);
+ free_irq(dev->irq, dev);
+
+ sa1100_set_power(si, 0);
+
+ return 0;
+}
+
+static int sa1100_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+static int sa1100_irda_probe(struct device *_dev)
+{
+ struct platform_device *pdev = to_platform_device(_dev);
+ struct net_device *dev;
+ struct sa1100_irda *si;
+ unsigned int baudrate_mask;
+ int err;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_1;
+ err = request_mem_region(__PREG(Ser2HSCR0), 0x1c, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_2;
+ err = request_mem_region(__PREG(Ser2HSCR2), 0x04, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_3;
+
+ dev = alloc_irdadev(sizeof(struct sa1100_irda));
+ if (!dev)
+ goto err_mem_4;
+
+ si = dev->priv;
+ si->dev = &pdev->dev;
+ si->pdata = pdev->dev.platform_data;
+
+ /*
+ * Initialise the HP-SIR buffers
+ */
+ err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
+ if (err)
+ goto err_mem_5;
+ err = sa1100_irda_init_iobuf(&si->tx_buff, 4000);
+ if (err)
+ goto err_mem_5;
+
+ dev->hard_start_xmit = sa1100_irda_hard_xmit;
+ dev->open = sa1100_irda_start;
+ dev->stop = sa1100_irda_stop;
+ dev->do_ioctl = sa1100_irda_ioctl;
+ dev->get_stats = sa1100_irda_stats;
+ dev->irq = IRQ_Ser2ICP;
+
+ irda_init_max_qos_capabilies(&si->qos);
+
+ /*
+ * We support original IRDA up to 115k2. (we don't currently
+ * support 4Mbps). Min Turn Time set to 1ms or greater.
+ */
+ baudrate_mask = IR_9600;
+
+ switch (max_rate) {
+ case 4000000: baudrate_mask |= IR_4000000 << 8;
+ case 115200: baudrate_mask |= IR_115200;
+ case 57600: baudrate_mask |= IR_57600;
+ case 38400: baudrate_mask |= IR_38400;
+ case 19200: baudrate_mask |= IR_19200;
+ }
+
+ si->qos.baud_rate.bits &= baudrate_mask;
+ si->qos.min_turn_time.bits = 7;
+
+ irda_qos_bits_to_value(&si->qos);
+
+ si->utcr4 = UTCR4_HPSIR;
+ if (tx_lpm)
+ si->utcr4 |= UTCR4_Z1_6us;
+
+ /*
+ * Initially enable HP-SIR modulation, and ensure that the port
+ * is disabled.
+ */
+ Ser2UTCR3 = 0;
+ Ser2UTCR4 = si->utcr4;
+ Ser2HSCR0 = HSCR0_UART;
+
+ err = register_netdev(dev);
+ if (err == 0)
+ dev_set_drvdata(&pdev->dev, dev);
+
+ if (err) {
+ err_mem_5:
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ err_mem_4:
+ release_mem_region(__PREG(Ser2HSCR2), 0x04);
+ err_mem_3:
+ release_mem_region(__PREG(Ser2HSCR0), 0x1c);
+ err_mem_2:
+ release_mem_region(__PREG(Ser2UTCR0), 0x24);
+ }
+ err_mem_1:
+ return err;
+}
+
+static int sa1100_irda_remove(struct device *_dev)
+{
+ struct net_device *dev = dev_get_drvdata(_dev);
+
+ if (dev) {
+ struct sa1100_irda *si = dev->priv;
+ unregister_netdev(dev);
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ }
+
+ release_mem_region(__PREG(Ser2HSCR2), 0x04);
+ release_mem_region(__PREG(Ser2HSCR0), 0x1c);
+ release_mem_region(__PREG(Ser2UTCR0), 0x24);
+
+ return 0;
+}
+
+static struct device_driver sa1100ir_driver = {
+ .name = "sa11x0-ir",
+ .bus = &platform_bus_type,
+ .probe = sa1100_irda_probe,
+ .remove = sa1100_irda_remove,
+ .suspend = sa1100_irda_suspend,
+ .resume = sa1100_irda_resume,
+};
+
+static int __init sa1100_irda_init(void)
+{
+ /*
+ * Limit power level a sensible range.
+ */
+ if (power_level < 1)
+ power_level = 1;
+ if (power_level > 3)
+ power_level = 3;
+
+ return driver_register(&sa1100ir_driver);
+}
+
+static void __exit sa1100_irda_exit(void)
+{
+ driver_unregister(&sa1100ir_driver);
+}
+
+module_init(sa1100_irda_init);
+module_exit(sa1100_irda_exit);
+module_param(power_level, int, 0);
+module_param(tx_lpm, int, 0);
+module_param(max_rate, int, 0);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("StrongARM SA1100 IrDA driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)");
+MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode");
+MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)");
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
new file mode 100644
index 000000000000..f0b8bc3637e5
--- /dev/null
+++ b/drivers/net/irda/sir-dev.h
@@ -0,0 +1,202 @@
+/*********************************************************************
+ *
+ * sir.h: include file for irda-sir device abstraction layer
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef IRDA_SIR_H
+#define IRDA_SIR_H
+
+#include <linux/netdevice.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> // iobuff_t
+
+/* FIXME: unify irda_request with sir_fsm! */
+
+struct irda_request {
+ struct list_head lh_request;
+ unsigned long pending;
+ void (*func)(void *);
+ void *data;
+ struct timer_list timer;
+};
+
+struct sir_fsm {
+ struct semaphore sem;
+ struct irda_request rq;
+ unsigned state, substate;
+ int param;
+ int result;
+};
+
+#define SIRDEV_STATE_WAIT_TX_COMPLETE 0x0100
+
+/* substates for wait_tx_complete */
+#define SIRDEV_STATE_WAIT_XMIT 0x0101
+#define SIRDEV_STATE_WAIT_UNTIL_SENT 0x0102
+#define SIRDEV_STATE_TX_DONE 0x0103
+
+#define SIRDEV_STATE_DONGLE_OPEN 0x0300
+
+/* 0x0301-0x03ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_DONGLE_CLOSE 0x0400
+
+/* 0x0401-0x04ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_SET_DTR_RTS 0x0500
+
+#define SIRDEV_STATE_SET_SPEED 0x0700
+#define SIRDEV_STATE_DONGLE_CHECK 0x0800
+#define SIRDEV_STATE_DONGLE_RESET 0x0900
+
+/* 0x0901-0x09ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_DONGLE_SPEED 0x0a00
+/* 0x0a01-0x0aff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_PORT_SPEED 0x0b00
+#define SIRDEV_STATE_DONE 0x0c00
+#define SIRDEV_STATE_ERROR 0x0d00
+#define SIRDEV_STATE_COMPLETE 0x0e00
+
+#define SIRDEV_STATE_DEAD 0xffff
+
+
+struct sir_dev;
+
+struct dongle_driver {
+
+ struct module *owner;
+
+ const char *driver_name;
+
+ IRDA_DONGLE type;
+
+ int (*open)(struct sir_dev *dev);
+ int (*close)(struct sir_dev *dev);
+ int (*reset)(struct sir_dev *dev);
+ int (*set_speed)(struct sir_dev *dev, unsigned speed);
+
+ struct list_head dongle_list;
+};
+
+struct sir_driver {
+
+ struct module *owner;
+
+ const char *driver_name;
+
+ int qos_mtt_bits;
+
+ int (*chars_in_buffer)(struct sir_dev *dev);
+ void (*wait_until_sent)(struct sir_dev *dev);
+ int (*set_speed)(struct sir_dev *dev, unsigned speed);
+ int (*set_dtr_rts)(struct sir_dev *dev, int dtr, int rts);
+
+ int (*do_write)(struct sir_dev *dev, const unsigned char *ptr, size_t len);
+
+ int (*start_dev)(struct sir_dev *dev);
+ int (*stop_dev)(struct sir_dev *dev);
+};
+
+
+/* exported */
+
+extern int irda_register_dongle(struct dongle_driver *new);
+extern int irda_unregister_dongle(struct dongle_driver *drv);
+
+extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
+extern int sirdev_put_instance(struct sir_dev *self);
+
+extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+extern void sirdev_write_complete(struct sir_dev *dev);
+extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+
+/* low level helpers for SIR device/dongle setup */
+extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+
+/* not exported */
+
+extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+extern int sirdev_put_dongle(struct sir_dev *self);
+
+extern void sirdev_enable_rx(struct sir_dev *dev);
+extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+extern int __init irda_thread_create(void);
+extern void __exit irda_thread_join(void);
+
+/* inline helpers */
+
+static inline int sirdev_schedule_speed(struct sir_dev *dev, unsigned speed)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_SPEED, speed);
+}
+
+static inline int sirdev_schedule_dongle_open(struct sir_dev *dev, int dongle_id)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_OPEN, dongle_id);
+}
+
+static inline int sirdev_schedule_dongle_close(struct sir_dev *dev)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_CLOSE, 0);
+}
+
+static inline int sirdev_schedule_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ int dtrrts;
+
+ dtrrts = ((dtr) ? 0x02 : 0x00) | ((rts) ? 0x01 : 0x00);
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_DTR_RTS, dtrrts);
+}
+
+#if 0
+static inline int sirdev_schedule_mode(struct sir_dev *dev, int mode)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_MODE, mode);
+}
+#endif
+
+
+struct sir_dev {
+ struct net_device *netdev;
+ struct net_device_stats stats;
+
+ struct irlap_cb *irlap;
+
+ struct qos_info qos;
+
+ char hwname[32];
+
+ struct sir_fsm fsm;
+ atomic_t enable_rx;
+ int raw_tx;
+ spinlock_t tx_lock;
+
+ u32 new_speed;
+ u32 flags;
+
+ unsigned speed;
+
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ struct sk_buff *tx_skb;
+
+ const struct dongle_driver * dongle_drv;
+ const struct sir_driver * drv;
+ void *priv;
+
+};
+
+#endif /* IRDA_SIR_H */
diff --git a/drivers/net/irda/sir_core.c b/drivers/net/irda/sir_core.c
new file mode 100644
index 000000000000..a49f910c835b
--- /dev/null
+++ b/drivers/net/irda/sir_core.c
@@ -0,0 +1,56 @@
+/*********************************************************************
+ *
+ * sir_core.c: module core for irda-sir abstraction layer
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/***************************************************************************/
+
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_DESCRIPTION("IrDA SIR core");
+MODULE_LICENSE("GPL");
+
+/***************************************************************************/
+
+EXPORT_SYMBOL(irda_register_dongle);
+EXPORT_SYMBOL(irda_unregister_dongle);
+
+EXPORT_SYMBOL(sirdev_get_instance);
+EXPORT_SYMBOL(sirdev_put_instance);
+
+EXPORT_SYMBOL(sirdev_set_dongle);
+EXPORT_SYMBOL(sirdev_write_complete);
+EXPORT_SYMBOL(sirdev_receive);
+
+EXPORT_SYMBOL(sirdev_raw_write);
+EXPORT_SYMBOL(sirdev_raw_read);
+EXPORT_SYMBOL(sirdev_set_dtr_rts);
+
+static int __init sir_core_init(void)
+{
+ return irda_thread_create();
+}
+
+static void __exit sir_core_exit(void)
+{
+ irda_thread_join();
+}
+
+module_init(sir_core_init);
+module_exit(sir_core_exit);
+
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
new file mode 100644
index 000000000000..efc5a8870565
--- /dev/null
+++ b/drivers/net/irda/sir_dev.c
@@ -0,0 +1,677 @@
+/*********************************************************************
+ *
+ * sir_dev.c: irda sir network device
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/delay.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+
+/***************************************************************************/
+
+void sirdev_enable_rx(struct sir_dev *dev)
+{
+ if (unlikely(atomic_read(&dev->enable_rx)))
+ return;
+
+ /* flush rx-buffer - should also help in case of problems with echo cancelation */
+ dev->rx_buff.data = dev->rx_buff.head;
+ dev->rx_buff.len = 0;
+ dev->rx_buff.in_frame = FALSE;
+ dev->rx_buff.state = OUTSIDE_FRAME;
+ atomic_set(&dev->enable_rx, 1);
+}
+
+static int sirdev_is_receiving(struct sir_dev *dev)
+{
+ if (!atomic_read(&dev->enable_rx))
+ return 0;
+
+ return (dev->rx_buff.state != OUTSIDE_FRAME);
+}
+
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
+{
+ int err;
+
+ IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type);
+
+ err = sirdev_schedule_dongle_open(dev, type);
+ if (unlikely(err))
+ return err;
+ down(&dev->fsm.sem); /* block until config change completed */
+ err = dev->fsm.result;
+ up(&dev->fsm.sem);
+ return err;
+}
+
+/* used by dongle drivers for dongle programming */
+
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
+{
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(len > dev->tx_buff.truesize))
+ return -ENOSPC;
+
+ spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
+ while (dev->tx_buff.len > 0) { /* wait until tx idle */
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ }
+
+ dev->tx_buff.data = dev->tx_buff.head;
+ memcpy(dev->tx_buff.data, buf, len);
+ dev->tx_buff.len = len;
+
+ ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+ if (ret > 0) {
+ IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__);
+
+ dev->tx_buff.data += ret;
+ dev->tx_buff.len -= ret;
+ dev->raw_tx = 1;
+ ret = len; /* all data is going to be sent */
+ }
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+ return ret;
+}
+
+/* seems some dongle drivers may need this */
+
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
+{
+ int count;
+
+ if (atomic_read(&dev->enable_rx))
+ return -EIO; /* fail if we expect irda-frames */
+
+ count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
+
+ if (count > 0) {
+ memcpy(buf, dev->rx_buff.data, count);
+ dev->rx_buff.data += count;
+ dev->rx_buff.len -= count;
+ }
+
+ /* remaining stuff gets flushed when re-enabling normal rx */
+
+ return count;
+}
+
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ int ret = -ENXIO;
+ if (dev->drv->set_dtr_rts != 0)
+ ret = dev->drv->set_dtr_rts(dev, dtr, rts);
+ return ret;
+}
+
+/**********************************************************************/
+
+/* called from client driver - likely with bh-context - to indicate
+ * it made some progress with transmission. Hence we send the next
+ * chunk, if any, or complete the skb otherwise
+ */
+
+void sirdev_write_complete(struct sir_dev *dev)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+ int actual = 0;
+ int err;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
+ __FUNCTION__, dev->tx_buff.len);
+
+ if (likely(dev->tx_buff.len > 0)) {
+ /* Write data left in transmit buffer */
+ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+
+ if (likely(actual>0)) {
+ dev->tx_buff.data += actual;
+ dev->tx_buff.len -= actual;
+ }
+ else if (unlikely(actual<0)) {
+ /* could be dropped later when we have tx_timeout to recover */
+ IRDA_ERROR("%s: drv->do_write failed (%d)\n",
+ __FUNCTION__, actual);
+ if ((skb=dev->tx_skb) != NULL) {
+ dev->tx_skb = NULL;
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ }
+ dev->tx_buff.len = 0;
+ }
+ if (dev->tx_buff.len > 0)
+ goto done; /* more data to send later */
+ }
+
+ if (unlikely(dev->raw_tx != 0)) {
+ /* in raw mode we are just done now after the buffer was sent
+ * completely. Since this was requested by some dongle driver
+ * running under the control of the irda-thread we must take
+ * care here not to re-enable the queue. The queue will be
+ * restarted when the irda-thread has completed the request.
+ */
+
+ IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__);
+ dev->raw_tx = 0;
+ goto done; /* no post-frame handling in raw mode */
+ }
+
+ /* we have finished now sending this skb.
+ * update statistics and free the skb.
+ * finally we check and trigger a pending speed change, if any.
+ * if not we switch to rx mode and wake the queue for further
+ * packets.
+ * note the scheduled speed request blocks until the lower
+ * client driver and the corresponding hardware has really
+ * finished sending all data (xmit fifo drained f.e.)
+ * before the speed change gets finally done and the queue
+ * re-activated.
+ */
+
+ IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__);
+
+ if ((skb=dev->tx_skb) != NULL) {
+ dev->tx_skb = NULL;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (unlikely(dev->new_speed > 0)) {
+ IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);
+ err = sirdev_schedule_speed(dev, dev->new_speed);
+ if (unlikely(err)) {
+ /* should never happen
+ * forget the speed change and hope the stack recovers
+ */
+ IRDA_ERROR("%s - schedule speed change failed: %d\n",
+ __FUNCTION__, err);
+ netif_wake_queue(dev->netdev);
+ }
+ /* else: success
+ * speed change in progress now
+ * on completion dev->new_speed gets cleared,
+ * rx-reenabled and the queue restarted
+ */
+ }
+ else {
+ sirdev_enable_rx(dev);
+ netif_wake_queue(dev->netdev);
+ }
+
+done:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+/* called from client driver - likely with bh-context - to give us
+ * some more received bytes. We put them into the rx-buffer,
+ * normally unwrapping and building LAP-skb's (unless rx disabled)
+ */
+
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
+{
+ if (!dev || !dev->netdev) {
+ IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
+ return -1;
+ }
+
+ if (!dev->irlap) {
+ IRDA_WARNING("%s - too early: %p / %zd!\n",
+ __FUNCTION__, cp, count);
+ return -1;
+ }
+
+ if (cp==NULL) {
+ /* error already at lower level receive
+ * just update stats and set media busy
+ */
+ irda_device_set_media_busy(dev->netdev, TRUE);
+ dev->stats.rx_dropped++;
+ IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count);
+ return 0;
+ }
+
+ /* Read the characters into the buffer */
+ if (likely(atomic_read(&dev->enable_rx))) {
+ while (count--)
+ /* Unwrap and destuff one byte */
+ async_unwrap_char(dev->netdev, &dev->stats,
+ &dev->rx_buff, *cp++);
+ } else {
+ while (count--) {
+ /* rx not enabled: save the raw bytes and never
+ * trigger any netif_rx. The received bytes are flushed
+ * later when we re-enable rx but might be read meanwhile
+ * by the dongle driver.
+ */
+ dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
+
+ /* What should we do when the buffer is full? */
+ if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
+ dev->rx_buff.len = 0;
+ }
+ }
+
+ return 0;
+}
+
+/**********************************************************************/
+
+/* callbacks from network layer */
+
+static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
+{
+ struct sir_dev *dev = ndev->priv;
+
+ return (dev) ? &dev->stats : NULL;
+}
+
+static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sir_dev *dev = ndev->priv;
+ unsigned long flags;
+ int actual = 0;
+ int err;
+ s32 speed;
+
+ IRDA_ASSERT(dev != NULL, return 0;);
+
+ netif_stop_queue(ndev);
+
+ IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len);
+
+ speed = irda_get_next_speed(skb);
+ if ((speed != dev->speed) && (speed != -1)) {
+ if (!skb->len) {
+ err = sirdev_schedule_speed(dev, speed);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ /* Failed to initiate the speed change, likely the fsm
+ * is still busy (pretty unlikely, but...)
+ * We refuse to accept the skb and return with the queue
+ * stopped so the network layer will retry after the
+ * fsm completes and wakes the queue.
+ */
+ return 1;
+ }
+ else if (unlikely(err)) {
+ /* other fatal error - forget the speed change and
+ * hope the stack will recover somehow
+ */
+ netif_start_queue(ndev);
+ }
+ /* else: success
+ * speed change in progress now
+ * on completion the queue gets restarted
+ */
+
+ dev_kfree_skb_any(skb);
+ return 0;
+ } else
+ dev->new_speed = speed;
+ }
+
+ /* Init tx buffer*/
+ dev->tx_buff.data = dev->tx_buff.head;
+
+ /* Check problems */
+ if(spin_is_locked(&dev->tx_lock)) {
+ IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__);
+ }
+
+ /* serialize with write completion */
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
+
+ /* transmission will start now - disable receive.
+ * if we are just in the middle of an incoming frame,
+ * treat it as collision. probably it's a good idea to
+ * reset the rx_buf OUTSIDE_FRAME in this case too?
+ */
+ atomic_set(&dev->enable_rx, 0);
+ if (unlikely(sirdev_is_receiving(dev)))
+ dev->stats.collisions++;
+
+ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+
+ if (likely(actual > 0)) {
+ dev->tx_skb = skb;
+ ndev->trans_start = jiffies;
+ dev->tx_buff.data += actual;
+ dev->tx_buff.len -= actual;
+ }
+ else if (unlikely(actual < 0)) {
+ /* could be dropped later when we have tx_timeout to recover */
+ IRDA_ERROR("%s: drv->do_write failed (%d)\n",
+ __FUNCTION__, actual);
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ netif_wake_queue(ndev);
+ }
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return 0;
+}
+
+/* called from network layer with rtnl hold */
+
+static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct sir_dev *dev = ndev->priv;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSDONGLE: /* Set dongle */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ irda_device_set_media_busy(dev->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = sirdev_is_receiving(dev);
+ break;
+
+ case SIOCSDTRRTS:
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSMODE:
+#if 0
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_mode(dev, irq->ifr_mode);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+#endif
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/* ----------------------------------------------------------------------------- */
+
+#define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
+
+static int sirdev_alloc_buffers(struct sir_dev *dev)
+{
+ dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
+ dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+
+ /* Bootstrap ZeroCopy Rx */
+ dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL);
+ if (dev->rx_buff.skb == NULL)
+ return -ENOMEM;
+ skb_reserve(dev->rx_buff.skb, 1);
+ dev->rx_buff.head = dev->rx_buff.skb->data;
+
+ dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
+ if (dev->tx_buff.head == NULL) {
+ kfree_skb(dev->rx_buff.skb);
+ dev->rx_buff.skb = NULL;
+ dev->rx_buff.head = NULL;
+ return -ENOMEM;
+ }
+
+ dev->tx_buff.data = dev->tx_buff.head;
+ dev->rx_buff.data = dev->rx_buff.head;
+ dev->tx_buff.len = 0;
+ dev->rx_buff.len = 0;
+
+ dev->rx_buff.in_frame = FALSE;
+ dev->rx_buff.state = OUTSIDE_FRAME;
+ return 0;
+};
+
+static void sirdev_free_buffers(struct sir_dev *dev)
+{
+ if (dev->rx_buff.skb)
+ kfree_skb(dev->rx_buff.skb);
+ if (dev->tx_buff.head)
+ kfree(dev->tx_buff.head);
+ dev->rx_buff.head = dev->tx_buff.head = NULL;
+ dev->rx_buff.skb = NULL;
+}
+
+static int sirdev_open(struct net_device *ndev)
+{
+ struct sir_dev *dev = ndev->priv;
+ const struct sir_driver *drv = dev->drv;
+
+ if (!drv)
+ return -ENODEV;
+
+ /* increase the reference count of the driver module before doing serious stuff */
+ if (!try_module_get(drv->owner))
+ return -ESTALE;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ if (sirdev_alloc_buffers(dev))
+ goto errout_dec;
+
+ if (!dev->drv->start_dev || dev->drv->start_dev(dev))
+ goto errout_free;
+
+ sirdev_enable_rx(dev);
+ dev->raw_tx = 0;
+
+ netif_start_queue(ndev);
+ dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
+ if (!dev->irlap)
+ goto errout_stop;
+
+ netif_wake_queue(ndev);
+
+ IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed);
+
+ return 0;
+
+errout_stop:
+ atomic_set(&dev->enable_rx, 0);
+ if (dev->drv->stop_dev)
+ dev->drv->stop_dev(dev);
+errout_free:
+ sirdev_free_buffers(dev);
+errout_dec:
+ module_put(drv->owner);
+ return -EAGAIN;
+}
+
+static int sirdev_close(struct net_device *ndev)
+{
+ struct sir_dev *dev = ndev->priv;
+ const struct sir_driver *drv;
+
+// IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+
+ netif_stop_queue(ndev);
+
+ down(&dev->fsm.sem); /* block on pending config completion */
+
+ atomic_set(&dev->enable_rx, 0);
+
+ if (unlikely(!dev->irlap))
+ goto out;
+ irlap_close(dev->irlap);
+ dev->irlap = NULL;
+
+ drv = dev->drv;
+ if (unlikely(!drv || !dev->priv))
+ goto out;
+
+ if (drv->stop_dev)
+ drv->stop_dev(dev);
+
+ sirdev_free_buffers(dev);
+ module_put(drv->owner);
+
+out:
+ dev->speed = 0;
+ up(&dev->fsm.sem);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------- */
+
+struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
+{
+ struct net_device *ndev;
+ struct sir_dev *dev;
+
+ IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name);
+
+ /* instead of adding tests to protect against drv->do_write==NULL
+ * at several places we refuse to create a sir_dev instance for
+ * drivers which don't implement do_write.
+ */
+ if (!drv || !drv->do_write)
+ return NULL;
+
+ /*
+ * Allocate new instance of the device
+ */
+ ndev = alloc_irdadev(sizeof(*dev));
+ if (ndev == NULL) {
+ IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__);
+ goto out;
+ }
+ dev = ndev->priv;
+
+ irda_init_max_qos_capabilies(&dev->qos);
+ dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
+ irda_qos_bits_to_value(&dev->qos);
+
+ strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
+
+ atomic_set(&dev->enable_rx, 0);
+ dev->tx_skb = NULL;
+
+ spin_lock_init(&dev->tx_lock);
+ init_MUTEX(&dev->fsm.sem);
+
+ INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
+ dev->fsm.rq.pending = 0;
+ init_timer(&dev->fsm.rq.timer);
+
+ dev->drv = drv;
+ dev->netdev = ndev;
+
+ SET_MODULE_OWNER(ndev);
+
+ /* Override the network functions we need to use */
+ ndev->hard_start_xmit = sirdev_hard_xmit;
+ ndev->open = sirdev_open;
+ ndev->stop = sirdev_close;
+ ndev->get_stats = sirdev_get_stats;
+ ndev->do_ioctl = sirdev_ioctl;
+
+ if (register_netdev(ndev)) {
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ goto out_freenetdev;
+ }
+
+ return dev;
+
+out_freenetdev:
+ free_netdev(ndev);
+out:
+ return NULL;
+}
+
+int sirdev_put_instance(struct sir_dev *dev)
+{
+ int err = 0;
+
+ IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+
+ atomic_set(&dev->enable_rx, 0);
+
+ netif_carrier_off(dev->netdev);
+ netif_device_detach(dev->netdev);
+
+ if (dev->dongle_drv)
+ err = sirdev_schedule_dongle_close(dev);
+ if (err)
+ IRDA_ERROR("%s - error %d\n", __FUNCTION__, err);
+
+ sirdev_close(dev->netdev);
+
+ down(&dev->fsm.sem);
+ dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */
+ dev->dongle_drv = NULL;
+ dev->priv = NULL;
+ up(&dev->fsm.sem);
+
+ /* Remove netdevice */
+ unregister_netdev(dev->netdev);
+
+ free_netdev(dev->netdev);
+
+ return 0;
+}
+
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
new file mode 100644
index 000000000000..c5b76746e72b
--- /dev/null
+++ b/drivers/net/irda/sir_dongle.c
@@ -0,0 +1,134 @@
+/*********************************************************************
+ *
+ * sir_dongle.c: manager for serial dongle protocol drivers
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/kmod.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/**************************************************************************
+ *
+ * dongle registration and attachment
+ *
+ */
+
+static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
+static DECLARE_MUTEX(dongle_list_lock); /* protects the list */
+
+int irda_register_dongle(struct dongle_driver *new)
+{
+ struct list_head *entry;
+ struct dongle_driver *drv;
+
+ IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
+ __FUNCTION__, new->driver_name, new->type);
+
+ down(&dongle_list_lock);
+ list_for_each(entry, &dongle_list) {
+ drv = list_entry(entry, struct dongle_driver, dongle_list);
+ if (new->type == drv->type) {
+ up(&dongle_list_lock);
+ return -EEXIST;
+ }
+ }
+ list_add(&new->dongle_list, &dongle_list);
+ up(&dongle_list_lock);
+ return 0;
+}
+
+int irda_unregister_dongle(struct dongle_driver *drv)
+{
+ down(&dongle_list_lock);
+ list_del(&drv->dongle_list);
+ up(&dongle_list_lock);
+ return 0;
+}
+
+int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
+{
+ struct list_head *entry;
+ const struct dongle_driver *drv = NULL;
+ int err = -EINVAL;
+
+#ifdef CONFIG_KMOD
+ request_module("irda-dongle-%d", type);
+#endif
+
+ if (dev->dongle_drv != NULL)
+ return -EBUSY;
+
+ /* serialize access to the list of registered dongles */
+ down(&dongle_list_lock);
+
+ list_for_each(entry, &dongle_list) {
+ drv = list_entry(entry, struct dongle_driver, dongle_list);
+ if (drv->type == type)
+ break;
+ else
+ drv = NULL;
+ }
+
+ if (!drv) {
+ err = -ENODEV;
+ goto out_unlock; /* no such dongle */
+ }
+
+ /* handling of SMP races with dongle module removal - three cases:
+ * 1) dongle driver was already unregistered - then we haven't found the
+ * requested dongle above and are already out here
+ * 2) the module is already marked deleted but the driver is still
+ * registered - then the try_module_get() below will fail
+ * 3) the try_module_get() below succeeds before the module is marked
+ * deleted - then sys_delete_module() fails and prevents the removal
+ * because the module is in use.
+ */
+
+ if (!try_module_get(drv->owner)) {
+ err = -ESTALE;
+ goto out_unlock; /* rmmod already pending */
+ }
+ dev->dongle_drv = drv;
+
+ if (!drv->open || (err=drv->open(dev))!=0)
+ goto out_reject; /* failed to open driver */
+
+ up(&dongle_list_lock);
+ return 0;
+
+out_reject:
+ dev->dongle_drv = NULL;
+ module_put(drv->owner);
+out_unlock:
+ up(&dongle_list_lock);
+ return err;
+}
+
+int sirdev_put_dongle(struct sir_dev *dev)
+{
+ const struct dongle_driver *drv = dev->dongle_drv;
+
+ if (drv) {
+ if (drv->close)
+ drv->close(dev); /* close this dongle instance */
+
+ dev->dongle_drv = NULL; /* unlink the dongle driver */
+ module_put(drv->owner);/* decrement driver's module refcount */
+ }
+
+ return 0;
+}
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
new file mode 100644
index 000000000000..18cea1099530
--- /dev/null
+++ b/drivers/net/irda/sir_kthread.c
@@ -0,0 +1,502 @@
+/*********************************************************************
+ *
+ * sir_kthread.c: dedicated thread to process scheduled
+ * sir device setup requests
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/**************************************************************************
+ *
+ * kIrDAd kernel thread and config state machine
+ *
+ */
+
+struct irda_request_queue {
+ struct list_head request_list;
+ spinlock_t lock;
+ task_t *thread;
+ struct completion exit;
+ wait_queue_head_t kick, done;
+ atomic_t num_pending;
+};
+
+static struct irda_request_queue irda_rq_queue;
+
+static int irda_queue_request(struct irda_request *rq)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (!test_and_set_bit(0, &rq->pending)) {
+ spin_lock_irqsave(&irda_rq_queue.lock, flags);
+ list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
+ wake_up(&irda_rq_queue.kick);
+ atomic_inc(&irda_rq_queue.num_pending);
+ spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
+ ret = 1;
+ }
+ return ret;
+}
+
+static void irda_request_timer(unsigned long data)
+{
+ struct irda_request *rq = (struct irda_request *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irda_rq_queue.lock, flags);
+ list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
+ wake_up(&irda_rq_queue.kick);
+ spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
+}
+
+static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
+{
+ int ret = 0;
+ struct timer_list *timer = &rq->timer;
+
+ if (!test_and_set_bit(0, &rq->pending)) {
+ timer->expires = jiffies + delay;
+ timer->function = irda_request_timer;
+ timer->data = (unsigned long)rq;
+ atomic_inc(&irda_rq_queue.num_pending);
+ add_timer(timer);
+ ret = 1;
+ }
+ return ret;
+}
+
+static void run_irda_queue(void)
+{
+ unsigned long flags;
+ struct list_head *entry, *tmp;
+ struct irda_request *rq;
+
+ spin_lock_irqsave(&irda_rq_queue.lock, flags);
+ list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
+ rq = list_entry(entry, struct irda_request, lh_request);
+ list_del_init(entry);
+ spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
+
+ clear_bit(0, &rq->pending);
+ rq->func(rq->data);
+
+ if (atomic_dec_and_test(&irda_rq_queue.num_pending))
+ wake_up(&irda_rq_queue.done);
+
+ spin_lock_irqsave(&irda_rq_queue.lock, flags);
+ }
+ spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
+}
+
+static int irda_thread(void *startup)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ daemonize("kIrDAd");
+
+ irda_rq_queue.thread = current;
+
+ complete((struct completion *)startup);
+
+ while (irda_rq_queue.thread != NULL) {
+
+ /* We use TASK_INTERRUPTIBLE, rather than
+ * TASK_UNINTERRUPTIBLE. Andrew Morton made this
+ * change ; he told me that it is safe, because "signal
+ * blocking is now handled in daemonize()", he added
+ * that the problem is that "uninterruptible sleep
+ * contributes to load average", making user worry.
+ * Jean II */
+ set_task_state(current, TASK_INTERRUPTIBLE);
+ add_wait_queue(&irda_rq_queue.kick, &wait);
+ if (list_empty(&irda_rq_queue.request_list))
+ schedule();
+ else
+ __set_task_state(current, TASK_RUNNING);
+ remove_wait_queue(&irda_rq_queue.kick, &wait);
+
+ /* make swsusp happy with our thread */
+ if (current->flags & PF_FREEZE)
+ refrigerator(PF_FREEZE);
+
+ run_irda_queue();
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
+ reparent_to_init();
+#endif
+ complete_and_exit(&irda_rq_queue.exit, 0);
+ /* never reached */
+ return 0;
+}
+
+
+static void flush_irda_queue(void)
+{
+ if (atomic_read(&irda_rq_queue.num_pending)) {
+
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (!list_empty(&irda_rq_queue.request_list))
+ run_irda_queue();
+
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&irda_rq_queue.done, &wait);
+ if (atomic_read(&irda_rq_queue.num_pending))
+ schedule();
+ else
+ __set_task_state(current, TASK_RUNNING);
+ remove_wait_queue(&irda_rq_queue.done, &wait);
+ }
+}
+
+/* substate handler of the config-fsm to handle the cases where we want
+ * to wait for transmit completion before changing the port configuration
+ */
+
+static int irda_tx_complete_fsm(struct sir_dev *dev)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+ unsigned next_state, delay;
+ unsigned bytes_left;
+
+ do {
+ next_state = fsm->substate; /* default: stay in current substate */
+ delay = 0;
+
+ switch(fsm->substate) {
+
+ case SIRDEV_STATE_WAIT_XMIT:
+ if (dev->drv->chars_in_buffer)
+ bytes_left = dev->drv->chars_in_buffer(dev);
+ else
+ bytes_left = 0;
+ if (!bytes_left) {
+ next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
+ break;
+ }
+
+ if (dev->speed > 115200)
+ delay = (bytes_left*8*10000) / (dev->speed/100);
+ else if (dev->speed > 0)
+ delay = (bytes_left*10*10000) / (dev->speed/100);
+ else
+ delay = 0;
+ /* expected delay (usec) until remaining bytes are sent */
+ if (delay < 100) {
+ udelay(delay);
+ delay = 0;
+ break;
+ }
+ /* sleep some longer delay (msec) */
+ delay = (delay+999) / 1000;
+ break;
+
+ case SIRDEV_STATE_WAIT_UNTIL_SENT:
+ /* block until underlaying hardware buffer are empty */
+ if (dev->drv->wait_until_sent)
+ dev->drv->wait_until_sent(dev);
+ next_state = SIRDEV_STATE_TX_DONE;
+ break;
+
+ case SIRDEV_STATE_TX_DONE:
+ return 0;
+
+ default:
+ IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ fsm->substate = next_state;
+ } while (delay == 0);
+ return delay;
+}
+
+/*
+ * Function irda_config_fsm
+ *
+ * State machine to handle the configuration of the device (and attached dongle, if any).
+ * This handler is scheduled for execution in kIrDAd context, so we can sleep.
+ * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
+ * long. Instead, for longer delays we start a timer to reschedule us later.
+ * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
+ * Both must be unlocked/restarted on completion - but only on final exit.
+ */
+
+static void irda_config_fsm(void *data)
+{
+ struct sir_dev *dev = data;
+ struct sir_fsm *fsm = &dev->fsm;
+ int next_state;
+ int ret = -1;
+ unsigned delay;
+
+ IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
+
+ do {
+ IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
+ __FUNCTION__, fsm->state, fsm->substate);
+
+ next_state = fsm->state;
+ delay = 0;
+
+ switch(fsm->state) {
+
+ case SIRDEV_STATE_DONGLE_OPEN:
+ if (dev->dongle_drv != NULL) {
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+
+ /* Initialize dongle */
+ ret = sirdev_get_dongle(dev, fsm->param);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ /* Dongles are powered through the modem control lines which
+ * were just set during open. Before resetting, let's wait for
+ * the power to stabilize. This is what some dongle drivers did
+ * in open before, while others didn't - should be safe anyway.
+ */
+
+ delay = 50;
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+
+ fsm->param = 9600;
+
+ break;
+
+ case SIRDEV_STATE_DONGLE_CLOSE:
+ /* shouldn't we just treat this as success=? */
+ if (dev->dongle_drv == NULL) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_DTR_RTS:
+ ret = sirdev_set_dtr_rts(dev,
+ (fsm->param&0x02) ? TRUE : FALSE,
+ (fsm->param&0x01) ? TRUE : FALSE);
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_SPEED:
+ fsm->substate = SIRDEV_STATE_WAIT_XMIT;
+ next_state = SIRDEV_STATE_DONGLE_CHECK;
+ break;
+
+ case SIRDEV_STATE_DONGLE_CHECK:
+ ret = irda_tx_complete_fsm(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ if ((delay=ret) != 0)
+ break;
+
+ if (dev->dongle_drv) {
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+ }
+ else {
+ dev->speed = fsm->param;
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_RESET:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->reset(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0) {
+ /* set serial port according to dongle default speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
+ next_state = SIRDEV_STATE_DONGLE_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->set_speed(dev, fsm->param);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0)
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ break;
+
+ case SIRDEV_STATE_PORT_SPEED:
+ /* Finally we are ready to change the serial port speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ dev->new_speed = 0;
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_DONE:
+ /* Signal network layer so it can send more frames */
+ netif_wake_queue(dev->netdev);
+ next_state = SIRDEV_STATE_COMPLETE;
+ break;
+
+ default:
+ IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ fsm->result = -EINVAL;
+ /* fall thru */
+
+ case SIRDEV_STATE_ERROR:
+ IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
+
+#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
+ netif_stop_queue(dev->netdev);
+#else
+ netif_wake_queue(dev->netdev);
+#endif
+ /* fall thru */
+
+ case SIRDEV_STATE_COMPLETE:
+ /* config change finished, so we are not busy any longer */
+ sirdev_enable_rx(dev);
+ up(&fsm->sem);
+ return;
+ }
+ fsm->state = next_state;
+ } while(!delay);
+
+ irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
+}
+
+/* schedule some device configuration task for execution by kIrDAd
+ * on behalf of the above state machine.
+ * can be called from process or interrupt/tasklet context.
+ */
+
+int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+ int xmit_was_down;
+
+ IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
+
+ if (down_trylock(&fsm->sem)) {
+ if (in_interrupt() || in_atomic() || irqs_disabled()) {
+ IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
+ return -EWOULDBLOCK;
+ } else
+ down(&fsm->sem);
+ }
+
+ if (fsm->state == SIRDEV_STATE_DEAD) {
+ /* race with sirdev_close should never happen */
+ IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
+ up(&fsm->sem);
+ return -ESTALE; /* or better EPIPE? */
+ }
+
+ xmit_was_down = netif_queue_stopped(dev->netdev);
+ netif_stop_queue(dev->netdev);
+ atomic_set(&dev->enable_rx, 0);
+
+ fsm->state = initial_state;
+ fsm->param = param;
+ fsm->result = 0;
+
+ INIT_LIST_HEAD(&fsm->rq.lh_request);
+ fsm->rq.pending = 0;
+ fsm->rq.func = irda_config_fsm;
+ fsm->rq.data = dev;
+
+ if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
+ atomic_set(&dev->enable_rx, 1);
+ if (!xmit_was_down)
+ netif_wake_queue(dev->netdev);
+ up(&fsm->sem);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+int __init irda_thread_create(void)
+{
+ struct completion startup;
+ int pid;
+
+ spin_lock_init(&irda_rq_queue.lock);
+ irda_rq_queue.thread = NULL;
+ INIT_LIST_HEAD(&irda_rq_queue.request_list);
+ init_waitqueue_head(&irda_rq_queue.kick);
+ init_waitqueue_head(&irda_rq_queue.done);
+ atomic_set(&irda_rq_queue.num_pending, 0);
+
+ init_completion(&startup);
+ pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
+ if (pid <= 0)
+ return -EAGAIN;
+ else
+ wait_for_completion(&startup);
+
+ return 0;
+}
+
+void __exit irda_thread_join(void)
+{
+ if (irda_rq_queue.thread) {
+ flush_irda_queue();
+ init_completion(&irda_rq_queue.exit);
+ irda_rq_queue.thread = NULL;
+ wake_up(&irda_rq_queue.kick);
+ wait_for_completion(&irda_rq_queue.exit);
+ }
+}
+
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
new file mode 100644
index 000000000000..10125a1dba22
--- /dev/null
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -0,0 +1,2396 @@
+/*********************************************************************
+ * $Id: smsc-ircc2.c,v 1.19.2.5 2002/10/27 11:34:26 dip Exp $
+ *
+ * Description: Driver for the SMC Infrared Communications Controller
+ * Status: Experimental.
+ * Author: Daniele Peri (peri@csai.unipa.it)
+ * Created at:
+ * Modified at:
+ * Modified by:
+ *
+ * Copyright (c) 2002 Daniele Peri
+ * All Rights Reserved.
+ * Copyright (c) 2002 Jean Tourrilhes
+ *
+ *
+ * Based on smc-ircc.c:
+ *
+ * Copyright (c) 2001 Stefani Seibold
+ * Copyright (c) 1999-2001 Dag Brattli
+ * Copyright (c) 1998-1999 Thomas Davis,
+ *
+ * and irport.c:
+ *
+ * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "smsc-ircc2.h"
+#include "smsc-sio.h"
+
+/* Types */
+
+struct smsc_transceiver {
+ char *name;
+ void (*set_for_speed)(int fir_base, u32 speed);
+ int (*probe)(int fir_base);
+};
+typedef struct smsc_transceiver smsc_transceiver_t;
+
+#if 0
+struct smc_chip {
+ char *name;
+ u16 flags;
+ u8 devid;
+ u8 rev;
+};
+typedef struct smc_chip smc_chip_t;
+#endif
+
+struct smsc_chip {
+ char *name;
+ #if 0
+ u8 type;
+ #endif
+ u16 flags;
+ u8 devid;
+ u8 rev;
+};
+typedef struct smsc_chip smsc_chip_t;
+
+struct smsc_chip_address {
+ unsigned int cfg_base;
+ unsigned int type;
+};
+typedef struct smsc_chip_address smsc_chip_address_t;
+
+/* Private data for each instance */
+struct smsc_ircc_cb {
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ __u32 flags; /* Interface flags */
+
+ int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
+ int tx_len; /* Number of frames in tx_buff */
+
+ int transceiver;
+ struct pm_dev *pmdev;
+};
+
+/* Constants */
+
+static const char *driver_name = "smsc-ircc2";
+#define DIM(x) (sizeof(x)/(sizeof(*(x))))
+#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600
+#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1
+#define SMSC_IRCC2_C_NET_TIMEOUT 0
+#define SMSC_IRCC2_C_SIR_STOP 0
+
+/* Prototypes */
+
+static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq);
+static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base);
+static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq);
+static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self);
+static void smsc_ircc_init_chip(struct smsc_ircc_cb *self);
+static int __exit smsc_ircc_close(struct smsc_ircc_cb *self);
+static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase);
+static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase);
+static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self);
+static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev);
+static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev);
+static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs);
+static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase);
+static void smsc_ircc_change_speed(void *priv, u32 speed);
+static void smsc_ircc_set_sir_speed(void *priv, u32 speed);
+static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev);
+static void smsc_ircc_sir_start(struct smsc_ircc_cb *self);
+#if SMSC_IRCC2_C_SIR_STOP
+static void smsc_ircc_sir_stop(struct smsc_ircc_cb *self);
+#endif
+static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self);
+static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
+static int smsc_ircc_net_open(struct net_device *dev);
+static int smsc_ircc_net_close(struct net_device *dev);
+static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#if SMSC_IRCC2_C_NET_TIMEOUT
+static void smsc_ircc_timeout(struct net_device *dev);
+#endif
+static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
+static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
+static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
+static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
+static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
+static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
+
+/* Probing */
+static int __init smsc_ircc_look_for_chips(void);
+static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type);
+static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_fdc(unsigned short cfg_base);
+static int __init smsc_superio_lpc(unsigned short cfg_base);
+
+/* Transceivers specific functions */
+
+static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base);
+static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base);
+static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
+
+/* Power Management */
+
+static void smsc_ircc_suspend(struct smsc_ircc_cb *self);
+static void smsc_ircc_wakeup(struct smsc_ircc_cb *self);
+static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
+
+
+/* Transceivers for SMSC-ircc */
+
+static smsc_transceiver_t smsc_transceivers[]=
+{
+ { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800},
+ { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select},
+ { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc},
+ { NULL, NULL}
+};
+#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (DIM(smsc_transceivers)-1)
+
+/* SMC SuperIO chipsets definitions */
+
+#define KEY55_1 0 /* SuperIO Configuration mode with Key <0x55> */
+#define KEY55_2 1 /* SuperIO Configuration mode with Key <0x55,0x55> */
+#define NoIRDA 2 /* SuperIO Chip has no IRDA Port */
+#define SIR 0 /* SuperIO Chip has only slow IRDA */
+#define FIR 4 /* SuperIO Chip has fast IRDA */
+#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */
+
+static smsc_chip_t __initdata fdc_chips_flat[]=
+{
+ /* Base address 0x3f0 or 0x370 */
+ { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */
+ { "37C665GT", KEY55_2|NoIRDA, 0x65, 0x01 },
+ { "37C665GT", KEY55_2|NoIRDA, 0x66, 0x01 },
+ { "37C669", KEY55_2|SIR|SERx4, 0x03, 0x02 },
+ { "37C669", KEY55_2|SIR|SERx4, 0x04, 0x02 }, /* ID? */
+ { "37C78", KEY55_2|NoIRDA, 0x78, 0x00 },
+ { "37N769", KEY55_1|FIR|SERx4, 0x28, 0x00 },
+ { "37N869", KEY55_1|FIR|SERx4, 0x29, 0x00 },
+ { NULL }
+};
+
+static smsc_chip_t __initdata fdc_chips_paged[]=
+{
+ /* Base address 0x3f0 or 0x370 */
+ { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 },
+ { "37B77X", KEY55_1|SIR|SERx4, 0x43, 0x00 },
+ { "37B78X", KEY55_1|SIR|SERx4, 0x44, 0x00 },
+ { "37B80X", KEY55_1|SIR|SERx4, 0x42, 0x00 },
+ { "37C67X", KEY55_1|FIR|SERx4, 0x40, 0x00 },
+ { "37C93X", KEY55_2|SIR|SERx4, 0x02, 0x01 },
+ { "37C93XAPM", KEY55_1|SIR|SERx4, 0x30, 0x01 },
+ { "37C93XFR", KEY55_2|FIR|SERx4, 0x03, 0x01 },
+ { "37M707", KEY55_1|SIR|SERx4, 0x42, 0x00 },
+ { "37M81X", KEY55_1|SIR|SERx4, 0x4d, 0x00 },
+ { "37N958FR", KEY55_1|FIR|SERx4, 0x09, 0x04 },
+ { "37N971", KEY55_1|FIR|SERx4, 0x0a, 0x00 },
+ { "37N972", KEY55_1|FIR|SERx4, 0x0b, 0x00 },
+ { NULL }
+};
+
+static smsc_chip_t __initdata lpc_chips_flat[]=
+{
+ /* Base address 0x2E or 0x4E */
+ { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 },
+ { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 },
+ { NULL }
+};
+
+static smsc_chip_t __initdata lpc_chips_paged[]=
+{
+ /* Base address 0x2E or 0x4E */
+ { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 },
+ { "47B37X", KEY55_1|SIR|SERx4, 0x52, 0x00 },
+ { "47M10X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
+ { "47M120", KEY55_1|NoIRDA|SERx4, 0x5c, 0x00 },
+ { "47M13X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
+ { "47M14X", KEY55_1|SIR|SERx4, 0x5f, 0x00 },
+ { "47N252", KEY55_1|FIR|SERx4, 0x0e, 0x00 },
+ { "47S42X", KEY55_1|SIR|SERx4, 0x57, 0x00 },
+ { NULL }
+};
+
+#define SMSCSIO_TYPE_FDC 1
+#define SMSCSIO_TYPE_LPC 2
+#define SMSCSIO_TYPE_FLAT 4
+#define SMSCSIO_TYPE_PAGED 8
+
+static smsc_chip_address_t __initdata possible_addresses[]=
+{
+ {0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
+ {0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
+ {0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
+ {0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
+ {0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
+ {0,0}
+};
+
+/* Globals */
+
+static struct smsc_ircc_cb *dev_self[] = { NULL, NULL};
+
+static int ircc_irq=255;
+static int ircc_dma=255;
+static int ircc_fir=0;
+static int ircc_sir=0;
+static int ircc_cfg=0;
+static int ircc_transceiver=0;
+
+static unsigned short dev_count=0;
+
+static inline void register_bank(int iobase, int bank)
+{
+ outb(((inb(iobase+IRCC_MASTER) & 0xf0) | (bank & 0x07)),
+ iobase+IRCC_MASTER);
+}
+
+
+/*******************************************************************************
+ *
+ *
+ * SMSC-ircc stuff
+ *
+ *
+ *******************************************************************************/
+
+/*
+ * Function smsc_ircc_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init smsc_ircc_init(void)
+{
+ int ret=-ENODEV;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ dev_count=0;
+
+ if ((ircc_fir>0)&&(ircc_sir>0)) {
+ IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir);
+ IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir);
+
+ if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq) == 0)
+ return 0;
+
+ return -ENODEV;
+ }
+
+ /* try user provided configuration register base address */
+ if (ircc_cfg>0) {
+ IRDA_MESSAGE(" Overriding configuration address 0x%04x\n",
+ ircc_cfg);
+ if (!smsc_superio_fdc(ircc_cfg))
+ ret = 0;
+ if (!smsc_superio_lpc(ircc_cfg))
+ ret = 0;
+ }
+
+ if(smsc_ircc_look_for_chips()>0) ret = 0;
+
+ return ret;
+}
+
+/*
+ * Function smsc_ircc_open (firbase, sirbase, dma, irq)
+ *
+ * Try to open driver instance
+ *
+ */
+static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+{
+ struct smsc_ircc_cb *self;
+ struct net_device *dev;
+ int err;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ err = smsc_ircc_present(fir_base, sir_base);
+ if(err)
+ goto err_out;
+
+ err = -ENOMEM;
+ if (dev_count > DIM(dev_self)) {
+ IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__);
+ goto err_out1;
+ }
+
+ /*
+ * Allocate new instance of the driver
+ */
+ dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
+ if (!dev) {
+ IRDA_WARNING("%s() can't allocate net device\n", __FUNCTION__);
+ goto err_out1;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ dev->hard_start_xmit = smsc_ircc_hard_xmit_sir;
+#if SMSC_IRCC2_C_NET_TIMEOUT
+ dev->tx_timeout = smsc_ircc_timeout;
+ dev->watchdog_timeo = HZ*2; /* Allow enough time for speed change */
+#endif
+ dev->open = smsc_ircc_net_open;
+ dev->stop = smsc_ircc_net_close;
+ dev->do_ioctl = smsc_ircc_net_ioctl;
+ dev->get_stats = smsc_ircc_net_get_stats;
+
+ self = dev->priv;
+ self->netdev = dev;
+
+ /* Make ifconfig display some details */
+ dev->base_addr = self->io.fir_base = fir_base;
+ dev->irq = self->io.irq = irq;
+
+ /* Need to store self somewhere */
+ dev_self[dev_count++] = self;
+ spin_lock_init(&self->lock);
+
+ self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE;
+ self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
+
+ self->rx_buff.head =
+ dma_alloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ IRDA_ERROR("%s, Can't allocate memory for receive buffer!\n",
+ driver_name);
+ goto err_out2;
+ }
+
+ self->tx_buff.head =
+ dma_alloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ IRDA_ERROR("%s, Can't allocate memory for transmit buffer!\n",
+ driver_name);
+ goto err_out3;
+ }
+
+ memset(self->rx_buff.head, 0, self->rx_buff.truesize);
+ memset(self->tx_buff.head, 0, self->tx_buff.truesize);
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
+
+ smsc_ircc_setup_qos(self);
+
+ smsc_ircc_init_chip(self);
+
+ if(ircc_transceiver > 0 &&
+ ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS)
+ self->transceiver = ircc_transceiver;
+ else
+ smsc_ircc_probe_transceiver(self);
+
+ err = register_netdev(self->netdev);
+ if(err) {
+ IRDA_ERROR("%s, Network device registration failed!\n",
+ driver_name);
+ goto err_out4;
+ }
+
+ self->pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, smsc_ircc_pmproc);
+ if (self->pmdev)
+ self->pmdev->data = self;
+
+ IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+
+ return 0;
+ err_out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ free_netdev(self->netdev);
+ dev_self[--dev_count] = NULL;
+ err_out1:
+ release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
+ release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
+ err_out:
+ return err;
+}
+
+/*
+ * Function smsc_ircc_present(fir_base, sir_base)
+ *
+ * Check the smsc-ircc chip presence
+ *
+ */
+static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
+{
+ unsigned char low, high, chip, config, dma, irq, version;
+
+ if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
+ driver_name)) {
+ IRDA_WARNING("%s: can't get fir_base of 0x%03x\n",
+ __FUNCTION__, fir_base);
+ goto out1;
+ }
+
+ if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
+ driver_name)) {
+ IRDA_WARNING("%s: can't get sir_base of 0x%03x\n",
+ __FUNCTION__, sir_base);
+ goto out2;
+ }
+
+ register_bank(fir_base, 3);
+
+ high = inb(fir_base+IRCC_ID_HIGH);
+ low = inb(fir_base+IRCC_ID_LOW);
+ chip = inb(fir_base+IRCC_CHIP_ID);
+ version = inb(fir_base+IRCC_VERSION);
+ config = inb(fir_base+IRCC_INTERFACE);
+ dma = config & IRCC_INTERFACE_DMA_MASK;
+ irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
+
+ if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
+ IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
+ __FUNCTION__, fir_base);
+ goto out3;
+ }
+ IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, "
+ "firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
+ chip & 0x0f, version, fir_base, sir_base, dma, irq);
+
+ return 0;
+ out3:
+ release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
+ out2:
+ release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
+ out1:
+ return -ENODEV;
+}
+
+/*
+ * Function smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq)
+ *
+ * Setup I/O
+ *
+ */
+static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
+ unsigned int fir_base, unsigned int sir_base,
+ u8 dma, u8 irq)
+{
+ unsigned char config, chip_dma, chip_irq;
+
+ register_bank(fir_base, 3);
+ config = inb(fir_base+IRCC_INTERFACE);
+ chip_dma = config & IRCC_INTERFACE_DMA_MASK;
+ chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
+
+ self->io.fir_base = fir_base;
+ self->io.sir_base = sir_base;
+ self->io.fir_ext = SMSC_IRCC2_FIR_CHIP_IO_EXTENT;
+ self->io.sir_ext = SMSC_IRCC2_SIR_CHIP_IO_EXTENT;
+ self->io.fifo_size = SMSC_IRCC2_FIFO_SIZE;
+ self->io.speed = SMSC_IRCC2_C_IRDA_FALLBACK_SPEED;
+
+ if (irq < 255) {
+ if (irq != chip_irq)
+ IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n",
+ driver_name, chip_irq, irq);
+ self->io.irq = irq;
+ }
+ else
+ self->io.irq = chip_irq;
+
+ if (dma < 255) {
+ if (dma != chip_dma)
+ IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n",
+ driver_name, chip_dma, dma);
+ self->io.dma = dma;
+ }
+ else
+ self->io.dma = chip_dma;
+
+}
+
+/*
+ * Function smsc_ircc_setup_qos(self)
+ *
+ * Setup qos
+ *
+ */
+static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
+{
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+
+ self->qos.min_turn_time.bits = SMSC_IRCC2_MIN_TURN_TIME;
+ self->qos.window_size.bits = SMSC_IRCC2_WINDOW_SIZE;
+ irda_qos_bits_to_value(&self->qos);
+}
+
+/*
+ * Function smsc_ircc_init_chip(self)
+ *
+ * Init chip
+ *
+ */
+static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
+{
+ int iobase, ir_mode, ctrl, fast;
+
+ IRDA_ASSERT( self != NULL, return; );
+ iobase = self->io.fir_base;
+
+ ir_mode = IRCC_CFGA_IRDA_SIR_A;
+ ctrl = 0;
+ fast = 0;
+
+ register_bank(iobase, 0);
+ outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER);
+ outb(0x00, iobase+IRCC_MASTER);
+
+ register_bank(iobase, 1);
+ outb(((inb(iobase+IRCC_SCE_CFGA) & 0x87) | ir_mode),
+ iobase+IRCC_SCE_CFGA);
+
+#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
+ outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
+ iobase+IRCC_SCE_CFGB);
+#else
+ outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
+ iobase+IRCC_SCE_CFGB);
+#endif
+ (void) inb(iobase+IRCC_FIFO_THRESHOLD);
+ outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase+IRCC_FIFO_THRESHOLD);
+
+ register_bank(iobase, 4);
+ outb((inb(iobase+IRCC_CONTROL) & 0x30) | ctrl, iobase+IRCC_CONTROL);
+
+ register_bank(iobase, 0);
+ outb(fast, iobase+IRCC_LCR_A);
+
+ smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
+
+ /* Power on device */
+ outb(0x00, iobase+IRCC_MASTER);
+}
+
+/*
+ * Function smsc_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = dev->priv;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else {
+ /* Make sure we are the only one touching
+ * self->io.speed and the hardware - Jean II */
+ spin_lock_irqsave(&self->lock, flags);
+ smsc_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ }
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = smsc_ircc_is_receiving(self);
+ break;
+ #if 0
+ case SIOCSDTRRTS:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ smsc_ircc_sir_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
+ break;
+ #endif
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) dev->priv;
+
+ return &self->stats;
+}
+
+#if SMSC_IRCC2_C_NET_TIMEOUT
+/*
+ * Function smsc_ircc_timeout (struct net_device *dev)
+ *
+ * The networking timeout management.
+ *
+ */
+
+static void smsc_ircc_timeout(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+
+ self = (struct smsc_ircc_cb *) dev->priv;
+
+ IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n",
+ dev->name, self->io.speed);
+ spin_lock_irqsave(&self->lock, flags);
+ smsc_ircc_sir_start(self);
+ smsc_ircc_change_speed(self, self->io.speed);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+#endif
+
+/*
+ * Function smsc_ircc_hard_xmit_sir (struct sk_buff *skb, struct net_device *dev)
+ *
+ * Transmits the current frame until FIFO is full, then
+ * waits until the next transmit interrupt, and continues until the
+ * frame is transmitted.
+ */
+int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ s32 speed;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return 0;);
+
+ self = (struct smsc_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.sir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure test of self->io.speed & speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ /*
+ * We send frames one by one in SIR mode (no
+ * pipelining), so at this point, if we were sending
+ * a previous frame, we just received the interrupt
+ * telling us it is finished (UART_IIR_THRI).
+ * Therefore, waiting for the transmitter to really
+ * finish draining the fifo won't take too long.
+ * And the interrupt handler is not expected to run.
+ * - Jean II */
+ smsc_ircc_sir_wait_hw_transmitter_finish(self);
+ smsc_ircc_change_speed(self, speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+ } else {
+ self->new_speed = speed;
+ }
+ }
+
+ /* Init tx buffer */
+ self->tx_buff.data = self->tx_buff.head;
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ self->stats.tx_bytes += self->tx_buff.len;
+
+ /* Turn on transmit finished interrupt. Will fire immediately! */
+ outb(UART_IER_THRI, iobase+UART_IER);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_set_fir_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ int fir_base, ir_mode, ctrl, fast;
+
+ IRDA_ASSERT(self != NULL, return;);
+ fir_base = self->io.fir_base;
+
+ self->io.speed = speed;
+
+ switch(speed) {
+ default:
+ case 576000:
+ ir_mode = IRCC_CFGA_IRDA_HDLC;
+ ctrl = IRCC_CRC;
+ fast = 0;
+ IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__);
+ break;
+ case 1152000:
+ ir_mode = IRCC_CFGA_IRDA_HDLC;
+ ctrl = IRCC_1152 | IRCC_CRC;
+ fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
+ IRDA_DEBUG(0, "%s(), handling baud of 1152000\n",
+ __FUNCTION__);
+ break;
+ case 4000000:
+ ir_mode = IRCC_CFGA_IRDA_4PPM;
+ ctrl = IRCC_CRC;
+ fast = IRCC_LCR_A_FAST;
+ IRDA_DEBUG(0, "%s(), handling baud of 4000000\n",
+ __FUNCTION__);
+ break;
+ }
+ #if 0
+ Now in tranceiver!
+ /* This causes an interrupt */
+ register_bank(fir_base, 0);
+ outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast, fir_base+IRCC_LCR_A);
+ #endif
+
+ register_bank(fir_base, 1);
+ outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base+IRCC_SCE_CFGA);
+
+ register_bank(fir_base, 4);
+ outb((inb(fir_base+IRCC_CONTROL) & 0x30) | ctrl, fir_base+IRCC_CONTROL);
+}
+
+/*
+ * Function smsc_ircc_fir_start(self)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
+{
+ struct net_device *dev;
+ int fir_base;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+ IRDA_ASSERT(dev != NULL, return;);
+
+ fir_base = self->io.fir_base;
+
+ /* Reset everything */
+
+ /* Install FIR transmit handler */
+ dev->hard_start_xmit = smsc_ircc_hard_xmit_fir;
+
+ /* Clear FIFO */
+ outb(inb(fir_base+IRCC_LCR_A)|IRCC_LCR_A_FIFO_RESET, fir_base+IRCC_LCR_A);
+
+ /* Enable interrupt */
+ /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER);*/
+
+ register_bank(fir_base, 1);
+
+ /* Select the TX/RX interface */
+#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */
+ outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
+ fir_base+IRCC_SCE_CFGB);
+#else
+ outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
+ fir_base+IRCC_SCE_CFGB);
+#endif
+ (void) inb(fir_base+IRCC_FIFO_THRESHOLD);
+
+ /* Enable SCE interrupts */
+ outb(0, fir_base+IRCC_MASTER);
+ register_bank(fir_base, 0);
+ outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, fir_base+IRCC_MASTER);
+}
+
+/*
+ * Function smsc_ircc_fir_stop(self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
+{
+ int fir_base;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ fir_base = self->io.fir_base;
+ register_bank(fir_base, 0);
+ /*outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER);*/
+ outb(inb(fir_base+IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base+IRCC_LCR_B);
+}
+
+
+/*
+ * Function smsc_ircc_change_speed(self, baud)
+ *
+ * Change the speed of the device
+ *
+ * This function *must* be called with spinlock held, because it may
+ * be called from the irq handler. - Jean II
+ */
+static void smsc_ircc_change_speed(void *priv, u32 speed)
+{
+ struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv;
+ struct net_device *dev;
+ int iobase;
+ int last_speed_was_sir;
+
+ IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+ iobase = self->io.fir_base;
+
+ last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED;
+
+ #if 0
+ /* Temp Hack */
+ speed= 1152000;
+ self->io.speed = speed;
+ last_speed_was_sir = 0;
+ smsc_ircc_fir_start(self);
+ #endif
+
+ if(self->io.speed == 0)
+ smsc_ircc_sir_start(self);
+
+ #if 0
+ if(!last_speed_was_sir) speed = self->io.speed;
+ #endif
+
+ if(self->io.speed != speed) smsc_ircc_set_transceiver_for_speed(self, speed);
+
+ self->io.speed = speed;
+
+ if(speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
+ if(!last_speed_was_sir) {
+ smsc_ircc_fir_stop(self);
+ smsc_ircc_sir_start(self);
+ }
+ smsc_ircc_set_sir_speed(self, speed);
+ }
+ else {
+ if(last_speed_was_sir) {
+ #if SMSC_IRCC2_C_SIR_STOP
+ smsc_ircc_sir_stop(self);
+ #endif
+ smsc_ircc_fir_start(self);
+ }
+ smsc_ircc_set_fir_speed(self, speed);
+
+ #if 0
+ self->tx_buff.len = 10;
+ self->tx_buff.data = self->tx_buff.head;
+
+ smsc_ircc_dma_xmit(self, iobase, 4000);
+ #endif
+ /* Be ready for incoming frames */
+ smsc_ircc_dma_receive(self, iobase);
+ }
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Function smsc_ircc_set_sir_speed (self, speed)
+ *
+ * Set speed of IrDA port to specified baudrate
+ *
+ */
+void smsc_ircc_set_sir_speed(void *priv, __u32 speed)
+{
+ struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv;
+ int iobase;
+ int fcr; /* FIFO control reg */
+ int lcr; /* Line control reg */
+ int divisor;
+
+ IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __FUNCTION__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+ iobase = self->io.sir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Turn off interrupts */
+ outb(0, iobase+UART_IER);
+
+ divisor = SMSC_IRCC2_MAX_SIR_SPEED/speed;
+
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ if (self->io.speed < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ fcr |= UART_FCR_TRIGGER_14;
+
+ /* IrDA ports use 8N1 */
+ lcr = UART_LCR_WLEN8;
+
+ outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
+ outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
+ outb(divisor >> 8, iobase+UART_DLM);
+ outb(lcr, iobase+UART_LCR); /* Set 8N1 */
+ outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
+
+ /* Turn on interrups */
+ outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, iobase+UART_IER);
+
+ IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed);
+}
+
+
+/*
+ * Function smsc_ircc_hard_xmit_fir (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ s32 speed;
+ int iobase;
+ int mtt;
+
+ IRDA_ASSERT(dev != NULL, return 0;);
+ self = (struct smsc_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure test of self->io.speed & speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed after this frame */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ /* Note : you should make sure that speed changes
+ * are not going to corrupt any outgoing frame.
+ * Look at nsc-ircc for the gory details - Jean II */
+ smsc_ircc_change_speed(self, speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+
+ memcpy(self->tx_buff.head, skb->data, skb->len);
+
+ self->tx_buff.len = skb->len;
+ self->tx_buff.data = self->tx_buff.head;
+
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ int bofs;
+
+ /*
+ * Compute how many BOFs (STA or PA's) we need to waste the
+ * min turn time given the speed of the link.
+ */
+ bofs = mtt * (self->io.speed / 1000) / 8000;
+ if (bofs > 4095)
+ bofs = 4095;
+
+ smsc_ircc_dma_xmit(self, iobase, bofs);
+ } else {
+ /* Transmit frame */
+ smsc_ircc_dma_xmit(self, iobase, 0);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_dma_xmit (self, iobase)
+ *
+ * Transmit data using DMA
+ *
+ */
+static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs)
+{
+ u8 ctrl;
+
+ IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+#if 1
+ /* Disable Rx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase+IRCC_LCR_B);
+#endif
+ register_bank(iobase, 1);
+ outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase+IRCC_SCE_CFGB);
+
+ self->io.direction = IO_XMIT;
+
+ /* Set BOF additional count for generating the min turn time */
+ register_bank(iobase, 4);
+ outb(bofs & 0xff, iobase+IRCC_BOF_COUNT_LO);
+ ctrl = inb(iobase+IRCC_CONTROL) & 0xf0;
+ outb(ctrl | ((bofs >> 8) & 0x0f), iobase+IRCC_BOF_COUNT_HI);
+
+ /* Set max Tx frame size */
+ outb(self->tx_buff.len >> 8, iobase+IRCC_TX_SIZE_HI);
+ outb(self->tx_buff.len & 0xff, iobase+IRCC_TX_SIZE_LO);
+
+ /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/
+
+ /* Enable burst mode chip Tx DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
+ IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB);
+
+ /* Setup DMA controller (must be done after enabling chip DMA) */
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_TX_MODE);
+
+ /* Enable interrupt */
+
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER);
+
+ /* Enable transmit */
+ outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase+IRCC_LCR_B);
+}
+
+/*
+ * Function smsc_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase)
+{
+ IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+#if 0
+ /* Disable Tx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase+IRCC_LCR_B);
+#endif
+ register_bank(self->io.fir_base, 1);
+ outb(inb(self->io.fir_base+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ self->io.fir_base+IRCC_SCE_CFGB);
+
+ /* Check for underrun! */
+ register_bank(iobase, 0);
+ if (inb(iobase+IRCC_LSR) & IRCC_LSR_UNDERRUN) {
+ self->stats.tx_errors++;
+ self->stats.tx_fifo_errors++;
+
+ /* Reset error condition */
+ register_bank(iobase, 0);
+ outb(IRCC_MASTER_ERROR_RESET, iobase+IRCC_MASTER);
+ outb(0x00, iobase+IRCC_MASTER);
+ } else {
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += self->tx_buff.len;
+ }
+
+ /* Check if it's time to change the speed */
+ if (self->new_speed) {
+ smsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ netif_wake_queue(self->netdev);
+}
+
+/*
+ * Function smsc_ircc_dma_receive(self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase)
+{
+#if 0
+ /* Turn off chip DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase+IRCC_SCE_CFGB);
+#endif
+
+ /* Disable Tx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase+IRCC_LCR_B);
+
+ /* Turn off chip DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase+IRCC_SCE_CFGB);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Set max Rx frame size */
+ register_bank(iobase, 4);
+ outb((2050 >> 8) & 0x0f, iobase+IRCC_RX_SIZE_HI);
+ outb(2050 & 0xff, iobase+IRCC_RX_SIZE_LO);
+
+ /* Setup DMA controller */
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Enable burst mode chip Rx DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
+ IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB);
+
+ /* Enable interrupt */
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER);
+
+
+ /* Enable receiver */
+ register_bank(iobase, 0);
+ outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE,
+ iobase+IRCC_LCR_B);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_dma_receive_complete(self, iobase)
+ *
+ * Finished with receiving frames
+ *
+ */
+static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase)
+{
+ struct sk_buff *skb;
+ int len, msgcnt, lsr;
+
+ register_bank(iobase, 0);
+
+ IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+#if 0
+ /* Disable Rx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase+IRCC_LCR_B);
+#endif
+ register_bank(iobase, 0);
+ outb(inb(iobase+IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase+IRCC_LSAR);
+ lsr= inb(iobase+IRCC_LSR);
+ msgcnt = inb(iobase+IRCC_LCR_B) & 0x08;
+
+ IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__,
+ get_dma_residue(self->io.dma));
+
+ len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
+
+ /* Look for errors
+ */
+
+ if(lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
+ self->stats.rx_errors++;
+ if(lsr & IRCC_LSR_FRAME_ERROR) self->stats.rx_frame_errors++;
+ if(lsr & IRCC_LSR_CRC_ERROR) self->stats.rx_crc_errors++;
+ if(lsr & IRCC_LSR_SIZE_ERROR) self->stats.rx_length_errors++;
+ if(lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN)) self->stats.rx_length_errors++;
+ return;
+ }
+ /* Remove CRC */
+ if (self->io.speed < 4000000)
+ len -= 2;
+ else
+ len -= 4;
+
+ if ((len < 2) || (len > 2050)) {
+ IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len);
+ return;
+ }
+ IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len);
+
+ skb = dev_alloc_skb(len+1);
+ if (!skb) {
+ IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
+ __FUNCTION__);
+ return;
+ }
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ memcpy(skb_put(skb, len), self->rx_buff.data, len);
+ self->stats.rx_packets++;
+ self->stats.rx_bytes += len;
+
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+}
+
+/*
+ * Function smsc_ircc_sir_receive (self)
+ *
+ * Receive one frame from the infrared port
+ *
+ */
+static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
+{
+ int boguscount = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /*
+ * Receive all characters in Rx FIFO, unwrap and unstuff them.
+ * async_unwrap_char will deliver all found frames
+ */
+ do {
+ async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
+ inb(iobase+UART_RX));
+
+ /* Make sure we don't stay here to long */
+ if (boguscount++ > 32) {
+ IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__);
+ break;
+ }
+ } while (inb(iobase+UART_LSR) & UART_LSR_DR);
+}
+
+
+/*
+ * Function smsc_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct smsc_ircc_cb *self;
+ int iobase, iir, lcra, lsr;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "%s: irq %d for unknown device.\n",
+ driver_name, irq);
+ goto irq_ret;
+ }
+ self = (struct smsc_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return IRQ_NONE;);
+
+ /* Serialise the interrupt handler in various CPUs, stop Tx path */
+ spin_lock(&self->lock);
+
+ /* Check if we should use the SIR interrupt handler */
+ if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
+ ret = smsc_ircc_interrupt_sir(dev);
+ goto irq_ret_unlock;
+ }
+
+ iobase = self->io.fir_base;
+
+ register_bank(iobase, 0);
+ iir = inb(iobase+IRCC_IIR);
+ if (iir == 0)
+ goto irq_ret_unlock;
+ ret = IRQ_HANDLED;
+
+ /* Disable interrupts */
+ outb(0, iobase+IRCC_IER);
+ lcra = inb(iobase+IRCC_LCR_A);
+ lsr = inb(iobase+IRCC_LSR);
+
+ IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir);
+
+ if (iir & IRCC_IIR_EOM) {
+ if (self->io.direction == IO_RECV)
+ smsc_ircc_dma_receive_complete(self, iobase);
+ else
+ smsc_ircc_dma_xmit_complete(self, iobase);
+
+ smsc_ircc_dma_receive(self, iobase);
+ }
+
+ if (iir & IRCC_IIR_ACTIVE_FRAME) {
+ /*printk(KERN_WARNING "%s(): Active Frame\n", __FUNCTION__);*/
+ }
+
+ /* Enable interrupts again */
+
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, iobase+IRCC_IER);
+
+ irq_ret_unlock:
+ spin_unlock(&self->lock);
+ irq_ret:
+ return ret;
+}
+
+/*
+ * Function irport_interrupt_sir (irq, dev_id, regs)
+ *
+ * Interrupt handler for SIR modes
+ */
+static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = dev->priv;
+ int boguscount = 0;
+ int iobase;
+ int iir, lsr;
+
+ /* Already locked comming here in smsc_ircc_interrupt() */
+ /*spin_lock(&self->lock);*/
+
+ iobase = self->io.sir_base;
+
+ iir = inb(iobase+UART_IIR) & UART_IIR_ID;
+ if (iir == 0)
+ return IRQ_NONE;
+ while (iir) {
+ /* Clear interrupt */
+ lsr = inb(iobase+UART_LSR);
+
+ IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __FUNCTION__, iir, lsr, iobase);
+
+ switch (iir) {
+ case UART_IIR_RLSI:
+ IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
+ break;
+ case UART_IIR_RDI:
+ /* Receive interrupt */
+ smsc_ircc_sir_receive(self);
+ break;
+ case UART_IIR_THRI:
+ if (lsr & UART_LSR_THRE)
+ /* Transmitter ready for data */
+ smsc_ircc_sir_write_wakeup(self);
+ break;
+ default:
+ IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
+ __FUNCTION__, iir);
+ break;
+ }
+
+ /* Make sure we don't stay here to long */
+ if (boguscount++ > 100)
+ break;
+
+ iir = inb(iobase + UART_IIR) & UART_IIR_ID;
+ }
+ /*spin_unlock(&self->lock);*/
+ return IRQ_HANDLED;
+}
+
+
+#if 0 /* unused */
+/*
+ * Function ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int ircc_is_receiving(struct smsc_ircc_cb *self)
+{
+ int status = FALSE;
+ /* int iobase; */
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ IRDA_DEBUG(0, "%s: dma count = %d\n", __FUNCTION__,
+ get_dma_residue(self->io.dma));
+
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+
+ return status;
+}
+#endif /* unused */
+
+
+/*
+ * Function smsc_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int smsc_ircc_net_open(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ int iobase;
+ char hwname[16];
+ unsigned long flags;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct smsc_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
+ (void *) dev)) {
+ IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
+ __FUNCTION__, self->io.irq);
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&self->lock, flags);
+ /*smsc_ircc_sir_start(self);*/
+ self->io.speed = 0;
+ smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Give self a hardware name */
+ /* It would be cool to offer the chip revision here - Jean II */
+ sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ smsc_ircc_net_close(dev);
+
+ IRDA_WARNING("%s(), unable to allocate DMA=%d\n",
+ __FUNCTION__, self->io.dma);
+ return -EAGAIN;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int smsc_ircc_net_close(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ int iobase;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct smsc_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ free_irq(self->io.irq, dev);
+
+ disable_dma(self->io.dma);
+
+ free_dma(self->io.dma);
+
+ return 0;
+}
+
+
+static void smsc_ircc_suspend(struct smsc_ircc_cb *self)
+{
+ IRDA_MESSAGE("%s, Suspending\n", driver_name);
+
+ if (self->io.suspended)
+ return;
+
+ smsc_ircc_net_close(self->netdev);
+
+ self->io.suspended = 1;
+}
+
+static void smsc_ircc_wakeup(struct smsc_ircc_cb *self)
+{
+ if (!self->io.suspended)
+ return;
+
+ /* The code was doing a "cli()" here, but this can't be right.
+ * If you need protection, do it in net_open with a spinlock
+ * or give a good reason. - Jean II */
+
+ smsc_ircc_net_open(self->netdev);
+
+ IRDA_MESSAGE("%s, Waking up\n", driver_name);
+}
+
+static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
+{
+ struct smsc_ircc_cb *self = (struct smsc_ircc_cb*) dev->data;
+ if (self) {
+ switch (rqst) {
+ case PM_SUSPEND:
+ smsc_ircc_suspend(self);
+ break;
+ case PM_RESUME:
+ smsc_ircc_wakeup(self);
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
+{
+ int iobase;
+ unsigned long flags;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ if (self->pmdev)
+ pm_unregister(self->pmdev);
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Make sure the irq handler is not exectuting */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Stop interrupts */
+ register_bank(iobase, 0);
+ outb(0, iobase+IRCC_IER);
+ outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER);
+ outb(0x00, iobase+IRCC_MASTER);
+#if 0
+ /* Reset to SIR mode */
+ register_bank(iobase, 1);
+ outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase+IRCC_SCE_CFGA);
+ outb(IRCC_CFGB_IR, iobase+IRCC_SCE_CFGB);
+#endif
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Release the PORTS that this driver is using */
+ IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
+ self->io.fir_base);
+
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
+ self->io.sir_base);
+
+ release_region(self->io.sir_base, self->io.sir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+static void __exit smsc_ircc_cleanup(void)
+{
+ int i;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ for (i=0; i < 2; i++) {
+ if (dev_self[i])
+ smsc_ircc_close(dev_self[i]);
+ }
+}
+
+/*
+ * Start SIR operations
+ *
+ * This function *must* be called with spinlock held, because it may
+ * be called from the irq handler (via smsc_ircc_change_speed()). - Jean II
+ */
+void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
+{
+ struct net_device *dev;
+ int fir_base, sir_base;
+
+ IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev= self->netdev;
+ IRDA_ASSERT(dev != NULL, return;);
+ dev->hard_start_xmit = &smsc_ircc_hard_xmit_sir;
+
+ fir_base = self->io.fir_base;
+ sir_base = self->io.sir_base;
+
+ /* Reset everything */
+ outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER);
+
+ #if SMSC_IRCC2_C_SIR_STOP
+ /*smsc_ircc_sir_stop(self);*/
+ #endif
+
+ register_bank(fir_base, 1);
+ outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base+IRCC_SCE_CFGA);
+
+ /* Initialize UART */
+ outb(UART_LCR_WLEN8, sir_base+UART_LCR); /* Reset DLAB */
+ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base+UART_MCR);
+
+ /* Turn on interrups */
+ outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base+UART_IER);
+
+ IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__);
+
+ outb(0x00, fir_base+IRCC_MASTER);
+}
+
+#if SMSC_IRCC2_C_SIR_STOP
+void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+ iobase = self->io.sir_base;
+
+ /* Reset UART */
+ outb(0, iobase+UART_MCR);
+
+ /* Turn off interrupts */
+ outb(0, iobase+UART_IER);
+}
+#endif
+
+/*
+ * Function smsc_sir_write_wakeup (self)
+ *
+ * Called by the SIR interrupt handler when there's room for more data.
+ * If we have more packets to send, we send them here.
+ *
+ */
+static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
+{
+ int actual = 0;
+ int iobase;
+ int fcr;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ IRDA_DEBUG(4, "%s\n", __FUNCTION__);
+
+ iobase = self->io.sir_base;
+
+ /* Finished with frame? */
+ if (self->tx_buff.len > 0) {
+ /* Write data left in transmit buffer */
+ actual = smsc_ircc_sir_write(iobase, self->io.fifo_size,
+ self->tx_buff.data, self->tx_buff.len);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+ } else {
+
+ /*if (self->tx_buff.len ==0) {*/
+
+ /*
+ * Now serial buffer is almost free & we can start
+ * transmission of another packet. But first we must check
+ * if we need to change the speed of the hardware
+ */
+ if (self->new_speed) {
+ IRDA_DEBUG(5, "%s(), Changing speed to %d.\n",
+ __FUNCTION__, self->new_speed);
+ smsc_ircc_sir_wait_hw_transmitter_finish(self);
+ smsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ } else {
+ /* Tell network layer that we want more frames */
+ netif_wake_queue(self->netdev);
+ }
+ self->stats.tx_packets++;
+
+ if(self->io.speed <= 115200) {
+ /*
+ * Reset Rx FIFO to make sure that all reflected transmit data
+ * is discarded. This is needed for half duplex operation
+ */
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR;
+ if (self->io.speed < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ fcr |= UART_FCR_TRIGGER_14;
+
+ outb(fcr, iobase+UART_FCR);
+
+ /* Turn on receive interrupts */
+ outb(UART_IER_RDI, iobase+UART_IER);
+ }
+ }
+}
+
+/*
+ * Function smsc_ircc_sir_write (iobase, fifo_size, buf, len)
+ *
+ * Fill Tx FIFO with transmit data
+ *
+ */
+static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
+{
+ int actual = 0;
+
+ /* Tx FIFO should be empty! */
+ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
+ IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__);
+ return 0;
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase+UART_TX);
+ actual++;
+ }
+ return actual;
+}
+
+/*
+ * Function smsc_ircc_is_receiving (self)
+ *
+ * Returns true is we are currently receiving data
+ *
+ */
+static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
+{
+ return (self->rx_buff.state != OUTSIDE_FRAME);
+}
+
+
+/*
+ * Function smsc_ircc_probe_transceiver(self)
+ *
+ * Tries to find the used Transceiver
+ *
+ */
+static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
+{
+ unsigned int i;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ for(i=0; smsc_transceivers[i].name!=NULL; i++)
+ if((*smsc_transceivers[i].probe)(self->io.fir_base)) {
+ IRDA_MESSAGE(" %s transceiver found\n",
+ smsc_transceivers[i].name);
+ self->transceiver= i+1;
+ return;
+ }
+ IRDA_MESSAGE("No transceiver found. Defaulting to %s\n",
+ smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
+
+ self->transceiver= SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
+}
+
+
+/*
+ * Function smsc_ircc_set_transceiver_for_speed(self, speed)
+ *
+ * Set the transceiver according to the speed
+ *
+ */
+static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ unsigned int trx;
+
+ trx = self->transceiver;
+ if(trx>0) (*smsc_transceivers[trx-1].set_for_speed)(self->io.fir_base, speed);
+}
+
+/*
+ * Function smsc_ircc_wait_hw_transmitter_finish ()
+ *
+ * Wait for the real end of HW transmission
+ *
+ * The UART is a strict FIFO, and we get called only when we have finished
+ * pushing data to the FIFO, so the maximum amount of time we must wait
+ * is only for the FIFO to drain out.
+ *
+ * We use a simple calibrated loop. We may need to adjust the loop
+ * delay (udelay) to balance I/O traffic and latency. And we also need to
+ * adjust the maximum timeout.
+ * It would probably be better to wait for the proper interrupt,
+ * but it doesn't seem to be available.
+ *
+ * We can't use jiffies or kernel timers because :
+ * 1) We are called from the interrupt handler, which disable softirqs,
+ * so jiffies won't be increased
+ * 2) Jiffies granularity is usually very coarse (10ms), and we don't
+ * want to wait that long to detect stuck hardware.
+ * Jean II
+ */
+
+static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
+{
+ int iobase;
+ int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US;
+
+ iobase = self->io.sir_base;
+
+ /* Calibrated busy loop */
+ while((count-- > 0) && !(inb(iobase+UART_LSR) & UART_LSR_TEMT))
+ udelay(1);
+
+ if(count == 0)
+ IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__);
+}
+
+
+/* PROBING
+ *
+ *
+ */
+
+static int __init smsc_ircc_look_for_chips(void)
+{
+ smsc_chip_address_t *address;
+ char *type;
+ unsigned int cfg_base, found;
+
+ found = 0;
+ address = possible_addresses;
+
+ while(address->cfg_base){
+ cfg_base = address->cfg_base;
+
+ /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/
+
+ if( address->type & SMSCSIO_TYPE_FDC){
+ type = "FDC";
+ if((address->type) & SMSCSIO_TYPE_FLAT) {
+ if(!smsc_superio_flat(fdc_chips_flat,cfg_base, type)) found++;
+ }
+ if((address->type) & SMSCSIO_TYPE_PAGED) {
+ if(!smsc_superio_paged(fdc_chips_paged,cfg_base, type)) found++;
+ }
+ }
+ if( address->type & SMSCSIO_TYPE_LPC){
+ type = "LPC";
+ if((address->type) & SMSCSIO_TYPE_FLAT) {
+ if(!smsc_superio_flat(lpc_chips_flat,cfg_base,type)) found++;
+ }
+ if((address->type) & SMSCSIO_TYPE_PAGED) {
+ if(!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC")) found++;
+ }
+ }
+ address++;
+ }
+ return found;
+}
+
+/*
+ * Function smsc_superio_flat (chip, base, type)
+ *
+ * Try to get configuration of a smc SuperIO chip with flat register model
+ *
+ */
+static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfgbase, char *type)
+{
+ unsigned short firbase, sirbase;
+ u8 mode, dma, irq;
+ int ret = -ENODEV;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type)==NULL)
+ return ret;
+
+ outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
+ mode = inb(cfgbase+1);
+
+ /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/
+
+ if(!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
+ IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__);
+
+ outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
+ sirbase = inb(cfgbase+1) << 2;
+
+ /* FIR iobase */
+ outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase);
+ firbase = inb(cfgbase+1) << 3;
+
+ /* DMA */
+ outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase);
+ dma = inb(cfgbase+1) & SMSCSIOFLAT_FIRDMASELECT_MASK;
+
+ /* IRQ */
+ outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
+ irq = inb(cfgbase+1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
+
+ IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode);
+
+ if (firbase) {
+ if (smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
+ ret=0;
+ }
+
+ /* Exit configuration */
+ outb(SMSCSIO_CFGEXITKEY, cfgbase);
+
+ return ret;
+}
+
+/*
+ * Function smsc_superio_paged (chip, base, type)
+ *
+ * Try to get configuration of a smc SuperIO chip with paged register model
+ *
+ */
+static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type)
+{
+ unsigned short fir_io, sir_io;
+ int ret = -ENODEV;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ if (smsc_ircc_probe(cfg_base,0x20,chips,type)==NULL)
+ return ret;
+
+ /* Select logical device (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base + 1);
+
+ /* SIR iobase */
+ outb(0x60, cfg_base);
+ sir_io = inb(cfg_base + 1) << 8;
+ outb(0x61, cfg_base);
+ sir_io |= inb(cfg_base + 1);
+
+ /* Read FIR base */
+ outb(0x62, cfg_base);
+ fir_io = inb(cfg_base + 1) << 8;
+ outb(0x63, cfg_base);
+ fir_io |= inb(cfg_base + 1);
+ outb(0x2b, cfg_base); /* ??? */
+
+ if (fir_io) {
+ if (smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0)
+ ret=0;
+ }
+
+ /* Exit configuration */
+ outb(SMSCSIO_CFGEXITKEY, cfg_base);
+
+ return ret;
+}
+
+
+static int __init smsc_access(unsigned short cfg_base,unsigned char reg)
+{
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ outb(reg, cfg_base);
+
+ if (inb(cfg_base)!=reg)
+ return -1;
+
+ return 0;
+}
+
+static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type)
+{
+ u8 devid,xdevid,rev;
+
+ IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+
+ /* Leave configuration */
+
+ outb(SMSCSIO_CFGEXITKEY, cfg_base);
+
+ if (inb(cfg_base) == SMSCSIO_CFGEXITKEY) /* not a smc superio chip */
+ return NULL;
+
+ outb(reg, cfg_base);
+
+ xdevid=inb(cfg_base+1);
+
+ /* Enter configuration */
+
+ outb(SMSCSIO_CFGACCESSKEY, cfg_base);
+
+ #if 0
+ if (smsc_access(cfg_base,0x55)) /* send second key and check */
+ return NULL;
+ #endif
+
+ /* probe device ID */
+
+ if (smsc_access(cfg_base,reg))
+ return NULL;
+
+ devid=inb(cfg_base+1);
+
+ if (devid==0) /* typical value for unused port */
+ return NULL;
+
+ if (devid==0xff) /* typical value for unused port */
+ return NULL;
+
+ /* probe revision ID */
+
+ if (smsc_access(cfg_base,reg+1))
+ return NULL;
+
+ rev=inb(cfg_base+1);
+
+ if (rev>=128) /* i think this will make no sense */
+ return NULL;
+
+ if (devid==xdevid) /* protection against false positives */
+ return NULL;
+
+ /* Check for expected device ID; are there others? */
+
+ while(chip->devid!=devid) {
+
+ chip++;
+
+ if (chip->name==NULL)
+ return NULL;
+ }
+
+ IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",devid,rev,cfg_base,type,chip->name);
+
+ if (chip->rev>rev){
+ IRDA_MESSAGE("Revision higher than expected\n");
+ return NULL;
+ }
+
+ if (chip->flags&NoIRDA)
+ IRDA_MESSAGE("chipset does not support IRDA\n");
+
+ return chip;
+}
+
+static int __init smsc_superio_fdc(unsigned short cfg_base)
+{
+ int ret = -1;
+
+ if (!request_region(cfg_base, 2, driver_name)) {
+ IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
+ __FUNCTION__, cfg_base);
+ } else {
+ if (!smsc_superio_flat(fdc_chips_flat,cfg_base,"FDC")
+ ||!smsc_superio_paged(fdc_chips_paged,cfg_base,"FDC"))
+ ret = 0;
+
+ release_region(cfg_base, 2);
+ }
+
+ return ret;
+}
+
+static int __init smsc_superio_lpc(unsigned short cfg_base)
+{
+ int ret = -1;
+
+ if (!request_region(cfg_base, 2, driver_name)) {
+ IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
+ __FUNCTION__, cfg_base);
+ } else {
+ if (!smsc_superio_flat(lpc_chips_flat,cfg_base,"LPC")
+ ||!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC"))
+ ret = 0;
+ release_region(cfg_base, 2);
+ }
+ return ret;
+}
+
+/************************************************
+ *
+ * Transceivers specific functions
+ *
+ ************************************************/
+
+
+/*
+ * Function smsc_ircc_set_transceiver_smsc_ircc_atc(fir_base, speed)
+ *
+ * Program transceiver through smsc-ircc ATC circuitry
+ *
+ */
+
+static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
+{
+ unsigned long jiffies_now, jiffies_timeout;
+ u8 val;
+
+ jiffies_now= jiffies;
+ jiffies_timeout= jiffies+SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES;
+
+ /* ATC */
+ register_bank(fir_base, 4);
+ outb((inb(fir_base+IRCC_ATC) & IRCC_ATC_MASK) |IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE, fir_base+IRCC_ATC);
+ while((val=(inb(fir_base+IRCC_ATC) & IRCC_ATC_nPROGREADY)) && !time_after(jiffies, jiffies_timeout));
+ if(val)
+ IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__,
+ inb(fir_base+IRCC_ATC));
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_smsc_ircc_atc(fir_base)
+ *
+ * Probe transceiver smsc-ircc ATC circuitry
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base)
+{
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed)
+ *
+ * Set transceiver
+ *
+ */
+
+static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed)
+{
+ u8 fast_mode;
+
+ switch(speed)
+ {
+ default:
+ case 576000 :
+ fast_mode = 0;
+ break;
+ case 1152000 :
+ case 4000000 :
+ fast_mode = IRCC_LCR_A_FAST;
+ break;
+
+ }
+ register_bank(fir_base, 0);
+ outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A);
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base)
+ *
+ * Probe transceiver
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base)
+{
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed)
+ *
+ * Set transceiver
+ *
+ */
+
+static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed)
+{
+ u8 fast_mode;
+
+ switch(speed)
+ {
+ default:
+ case 576000 :
+ fast_mode = 0;
+ break;
+ case 1152000 :
+ case 4000000 :
+ fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA;
+ break;
+
+ }
+ /* This causes an interrupt */
+ register_bank(fir_base, 0);
+ outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A);
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base)
+ *
+ * Probe transceiver
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base)
+{
+ return 0;
+}
+
+
+module_init(smsc_ircc_init);
+module_exit(smsc_ircc_cleanup);
+
+MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
+MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
+MODULE_LICENSE("GPL");
+
+module_param(ircc_dma, int, 0);
+MODULE_PARM_DESC(ircc_dma, "DMA channel");
+module_param(ircc_irq, int, 0);
+MODULE_PARM_DESC(ircc_irq, "IRQ line");
+module_param(ircc_fir, int, 0);
+MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
+module_param(ircc_sir, int, 0);
+MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
+module_param(ircc_cfg, int, 0);
+MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
+module_param(ircc_transceiver, int, 0);
+MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
new file mode 100644
index 000000000000..458611cc0d40
--- /dev/null
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -0,0 +1,194 @@
+/*********************************************************************
+ * $Id: smsc-ircc2.h,v 1.12.2.1 2002/10/27 10:52:37 dip Exp $
+ *
+ * Description: Definitions for the SMC IrCC chipset
+ * Status: Experimental.
+ * Author: Daniele Peri (peri@csai.unipa.it)
+ *
+ * Copyright (c) 2002 Daniele Peri
+ * All Rights Reserved.
+ *
+ * Based on smc-ircc.h:
+ *
+ * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net>
+ * All Rights Reserved
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef SMSC_IRCC2_H
+#define SMSC_IRCC2_H
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+/* Master Control Register */
+#define IRCC_MASTER 0x07
+#define IRCC_MASTER_POWERDOWN 0x80
+#define IRCC_MASTER_RESET 0x40
+#define IRCC_MASTER_INT_EN 0x20
+#define IRCC_MASTER_ERROR_RESET 0x10
+
+/* Register block 0 */
+
+/* Interrupt Identification */
+#define IRCC_IIR 0x01
+#define IRCC_IIR_ACTIVE_FRAME 0x80
+#define IRCC_IIR_EOM 0x40
+#define IRCC_IIR_RAW_MODE 0x20
+#define IRCC_IIR_FIFO 0x10
+
+/* Interrupt Enable */
+#define IRCC_IER 0x02
+#define IRCC_IER_ACTIVE_FRAME 0x80
+#define IRCC_IER_EOM 0x40
+#define IRCC_IER_RAW_MODE 0x20
+#define IRCC_IER_FIFO 0x10
+
+/* Line Status Register */
+#define IRCC_LSR 0x03
+#define IRCC_LSR_UNDERRUN 0x80
+#define IRCC_LSR_OVERRUN 0x40
+#define IRCC_LSR_FRAME_ERROR 0x20
+#define IRCC_LSR_SIZE_ERROR 0x10
+#define IRCC_LSR_CRC_ERROR 0x80
+#define IRCC_LSR_FRAME_ABORT 0x40
+
+/* Line Status Address Register */
+#define IRCC_LSAR 0x03
+#define IRCC_LSAR_ADDRESS_MASK 0x07
+
+/* Line Control Register A */
+#define IRCC_LCR_A 0x04
+#define IRCC_LCR_A_FIFO_RESET 0x80
+#define IRCC_LCR_A_FAST 0x40
+#define IRCC_LCR_A_GP_DATA 0x20
+#define IRCC_LCR_A_RAW_TX 0x10
+#define IRCC_LCR_A_RAW_RX 0x08
+#define IRCC_LCR_A_ABORT 0x04
+#define IRCC_LCR_A_DATA_DONE 0x02
+
+/* Line Control Register B */
+#define IRCC_LCR_B 0x05
+#define IRCC_LCR_B_SCE_DISABLED 0x00
+#define IRCC_LCR_B_SCE_TRANSMIT 0x40
+#define IRCC_LCR_B_SCE_RECEIVE 0x80
+#define IRCC_LCR_B_SCE_UNDEFINED 0xc0
+#define IRCC_LCR_B_SIP_ENABLE 0x20
+#define IRCC_LCR_B_BRICK_WALL 0x10
+
+/* Bus Status Register */
+#define IRCC_BSR 0x06
+#define IRCC_BSR_NOT_EMPTY 0x80
+#define IRCC_BSR_FIFO_FULL 0x40
+#define IRCC_BSR_TIMEOUT 0x20
+
+/* Register block 1 */
+
+#define IRCC_FIFO_THRESHOLD 0x02
+
+#define IRCC_SCE_CFGA 0x00
+#define IRCC_CFGA_AUX_IR 0x80
+#define IRCC_CFGA_HALF_DUPLEX 0x04
+#define IRCC_CFGA_TX_POLARITY 0x02
+#define IRCC_CFGA_RX_POLARITY 0x01
+
+#define IRCC_CFGA_COM 0x00
+#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87
+#define IRCC_CFGA_IRDA_SIR_A 0x08
+#define IRCC_CFGA_ASK_SIR 0x10
+#define IRCC_CFGA_IRDA_SIR_B 0x18
+#define IRCC_CFGA_IRDA_HDLC 0x20
+#define IRCC_CFGA_IRDA_4PPM 0x28
+#define IRCC_CFGA_CONSUMER 0x30
+#define IRCC_CFGA_RAW_IR 0x38
+#define IRCC_CFGA_OTHER 0x40
+
+#define IRCC_IR_HDLC 0x04
+#define IRCC_IR_4PPM 0x01
+#define IRCC_IR_CONSUMER 0x02
+
+#define IRCC_SCE_CFGB 0x01
+#define IRCC_CFGB_LOOPBACK 0x20
+#define IRCC_CFGB_LPBCK_TX_CRC 0x10
+#define IRCC_CFGB_NOWAIT 0x08
+#define IRCC_CFGB_STRING_MOVE 0x04
+#define IRCC_CFGB_DMA_BURST 0x02
+#define IRCC_CFGB_DMA_ENABLE 0x01
+
+#define IRCC_CFGB_MUX_COM 0x00
+#define IRCC_CFGB_MUX_IR 0x40
+#define IRCC_CFGB_MUX_AUX 0x80
+#define IRCC_CFGB_MUX_INACTIVE 0xc0
+
+/* Register block 3 - Identification Registers! */
+#define IRCC_ID_HIGH 0x00 /* 0x10 */
+#define IRCC_ID_LOW 0x01 /* 0xB8 */
+#define IRCC_CHIP_ID 0x02 /* 0xF1 */
+#define IRCC_VERSION 0x03 /* 0x01 */
+#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
+#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */
+#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */
+
+/* Register block 4 - IrDA */
+#define IRCC_CONTROL 0x00
+#define IRCC_BOF_COUNT_LO 0x01 /* Low byte */
+#define IRCC_BOF_COUNT_HI 0x00 /* High nibble (bit 0-3) */
+#define IRCC_BRICKWALL_CNT_LO 0x02 /* Low byte */
+#define IRCC_BRICKWALL_CNT_HI 0x03 /* High nibble (bit 4-7) */
+#define IRCC_TX_SIZE_LO 0x04 /* Low byte */
+#define IRCC_TX_SIZE_HI 0x03 /* High nibble (bit 0-3) */
+#define IRCC_RX_SIZE_HI 0x05 /* High nibble (bit 0-3) */
+#define IRCC_RX_SIZE_LO 0x06 /* Low byte */
+
+#define IRCC_1152 0x80
+#define IRCC_CRC 0x40
+
+/* Register block 5 - IrDA */
+#define IRCC_ATC 0x00
+#define IRCC_ATC_nPROGREADY 0x80
+#define IRCC_ATC_SPEED 0x40
+#define IRCC_ATC_ENABLE 0x20
+#define IRCC_ATC_MASK 0xE0
+
+
+#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01
+
+#define IRCC_SCE_TX_DELAY_TIMER 0x02
+
+/*
+ * Other definitions
+ */
+
+#define SMSC_IRCC2_MAX_SIR_SPEED 115200
+#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8
+#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8
+#define SMSC_IRCC2_FIFO_SIZE 16
+#define SMSC_IRCC2_FIFO_THRESHOLD 64
+/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+#define SMSC_IRCC2_RX_BUFF_TRUESIZE 14384
+#define SMSC_IRCC2_TX_BUFF_TRUESIZE 14384
+#define SMSC_IRCC2_MIN_TURN_TIME 0x07
+#define SMSC_IRCC2_WINDOW_SIZE 0x07
+/* Maximum wait for hw transmitter to finish */
+#define SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US 1000 /* 1 ms */
+/* Maximum wait for ATC transceiver programming to finish */
+#define SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES 1
+#endif /* SMSC_IRCC2_H */
diff --git a/drivers/net/irda/smsc-sio.h b/drivers/net/irda/smsc-sio.h
new file mode 100644
index 000000000000..59e20e653ebe
--- /dev/null
+++ b/drivers/net/irda/smsc-sio.h
@@ -0,0 +1,100 @@
+#ifndef SMSC_SIO_H
+#define SMSC_SIO_H
+
+/******************************************
+ Keys. They should work with every SMsC SIO
+ ******************************************/
+
+#define SMSCSIO_CFGACCESSKEY 0x55
+#define SMSCSIO_CFGEXITKEY 0xaa
+
+/*****************************
+ * Generic SIO Flat (!?) *
+ *****************************/
+
+/* Register 0x0d */
+#define SMSCSIOFLAT_DEVICEID_REG 0x0d
+
+/* Register 0x0c */
+#define SMSCSIOFLAT_UARTMODE0C_REG 0x0c
+#define SMSCSIOFLAT_UART2MODE_MASK 0x38
+#define SMSCSIOFLAT_UART2MODE_VAL_COM 0x00
+#define SMSCSIOFLAT_UART2MODE_VAL_IRDA 0x08
+#define SMSCSIOFLAT_UART2MODE_VAL_ASKIR 0x10
+
+/* Register 0x25 */
+#define SMSCSIOFLAT_UART2BASEADDR_REG 0x25
+
+/* Register 0x2b */
+#define SMSCSIOFLAT_FIRBASEADDR_REG 0x2b
+
+/* Register 0x2c */
+#define SMSCSIOFLAT_FIRDMASELECT_REG 0x2c
+#define SMSCSIOFLAT_FIRDMASELECT_MASK 0x0f
+
+/* Register 0x28 */
+#define SMSCSIOFLAT_UARTIRQSELECT_REG 0x28
+#define SMSCSIOFLAT_UART2IRQSELECT_MASK 0x0f
+#define SMSCSIOFLAT_UART1IRQSELECT_MASK 0xf0
+#define SMSCSIOFLAT_UARTIRQSELECT_VAL_NONE 0x00
+
+
+/*********************
+ * LPC47N227 *
+ *********************/
+
+#define LPC47N227_CFGACCESSKEY 0x55
+#define LPC47N227_CFGEXITKEY 0xaa
+
+/* Register 0x00 */
+#define LPC47N227_FDCPOWERVALIDCONF_REG 0x00
+#define LPC47N227_FDCPOWER_MASK 0x08
+#define LPC47N227_VALID_MASK 0x80
+
+/* Register 0x02 */
+#define LPC47N227_UART12POWER_REG 0x02
+#define LPC47N227_UART1POWERDOWN_MASK 0x08
+#define LPC47N227_UART2POWERDOWN_MASK 0x80
+
+/* Register 0x07 */
+#define LPC47N227_APMBOOTDRIVE_REG 0x07
+#define LPC47N227_PARPORT2AUTOPWRDOWN_MASK 0x10 /* auto power down on if set */
+#define LPC47N227_UART2AUTOPWRDOWN_MASK 0x20 /* auto power down on if set */
+#define LPC47N227_UART1AUTOPWRDOWN_MASK 0x40 /* auto power down on if set */
+
+/* Register 0x0c */
+#define LPC47N227_UARTMODE0C_REG 0x0c
+#define LPC47N227_UART2MODE_MASK 0x38
+#define LPC47N227_UART2MODE_VAL_COM 0x00
+#define LPC47N227_UART2MODE_VAL_IRDA 0x08
+#define LPC47N227_UART2MODE_VAL_ASKIR 0x10
+
+/* Register 0x0d */
+#define LPC47N227_DEVICEID_REG 0x0d
+#define LPC47N227_DEVICEID_DEFVAL 0x5a
+
+/* Register 0x0e */
+#define LPC47N227_REVISIONID_REG 0x0e
+
+/* Register 0x25 */
+#define LPC47N227_UART2BASEADDR_REG 0x25
+
+/* Register 0x28 */
+#define LPC47N227_UARTIRQSELECT_REG 0x28
+#define LPC47N227_UART2IRQSELECT_MASK 0x0f
+#define LPC47N227_UART1IRQSELECT_MASK 0xf0
+#define LPC47N227_UARTIRQSELECT_VAL_NONE 0x00
+
+/* Register 0x2b */
+#define LPC47N227_FIRBASEADDR_REG 0x2b
+
+/* Register 0x2c */
+#define LPC47N227_FIRDMASELECT_REG 0x2c
+#define LPC47N227_FIRDMASELECT_MASK 0x0f
+#define LPC47N227_FIRDMASELECT_VAL_DMA1 0x01 /* 47n227 has three dma channels */
+#define LPC47N227_FIRDMASELECT_VAL_DMA2 0x02
+#define LPC47N227_FIRDMASELECT_VAL_DMA3 0x03
+#define LPC47N227_FIRDMASELECT_VAL_NONE 0x0f
+
+
+#endif
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
new file mode 100644
index 000000000000..83c605e8824c
--- /dev/null
+++ b/drivers/net/irda/stir4200.c
@@ -0,0 +1,1184 @@
+/*****************************************************************************
+*
+* Filename: stir4200.c
+* Version: 0.4
+* Description: Irda SigmaTel USB Dongle
+* Status: Experimental
+* Author: Stephen Hemminger <shemminger@osdl.org>
+*
+* Based on earlier driver by Paul Stewart <stewart@parc.com>
+*
+* Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+* Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+* Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+* Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * This dongle does no framing, and requires polling to receive the
+ * data. The STIr4200 has bulk in and out endpoints just like
+ * usr-irda devices, but the data it sends and receives is raw; like
+ * irtty, it needs to call the wrap and unwrap functions to add and
+ * remove SOF/BOF and escape characters to/from the frame.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <net/irda/irda.h>
+#include <net/irda/irlap.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for SigmaTel STIr4200");
+MODULE_LICENSE("GPL");
+
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+static int rx_sensitivity = 1; /* FIR 0..4, SIR 0..6 */
+module_param(rx_sensitivity, int, 0);
+MODULE_PARM_DESC(rx_sensitivity, "Set Receiver sensitivity (0-6, 0 is most sensitive)");
+
+static int tx_power = 0; /* 0 = highest ... 3 = lowest */
+module_param(tx_power, int, 0);
+MODULE_PARM_DESC(tx_power, "Set Transmitter power (0-3, 0 is highest power)");
+
+#define STIR_IRDA_HEADER 4
+#define CTRL_TIMEOUT 100 /* milliseconds */
+#define TRANSMIT_TIMEOUT 200 /* milliseconds */
+#define STIR_FIFO_SIZE 4096
+#define FIFO_REGS_SIZE 3
+
+enum FirChars {
+ FIR_CE = 0x7d,
+ FIR_XBOF = 0x7f,
+ FIR_EOF = 0x7e,
+};
+
+enum StirRequests {
+ REQ_WRITE_REG = 0x00,
+ REQ_READ_REG = 0x01,
+ REQ_READ_ROM = 0x02,
+ REQ_WRITE_SINGLE = 0x03,
+};
+
+/* Register offsets */
+enum StirRegs {
+ REG_RSVD=0,
+ REG_MODE,
+ REG_PDCLK,
+ REG_CTRL1,
+ REG_CTRL2,
+ REG_FIFOCTL,
+ REG_FIFOLSB,
+ REG_FIFOMSB,
+ REG_DPLL,
+ REG_IRDIG,
+ REG_TEST=15,
+};
+
+enum StirModeMask {
+ MODE_FIR = 0x80,
+ MODE_SIR = 0x20,
+ MODE_ASK = 0x10,
+ MODE_FASTRX = 0x08,
+ MODE_FFRSTEN = 0x04,
+ MODE_NRESET = 0x02,
+ MODE_2400 = 0x01,
+};
+
+enum StirPdclkMask {
+ PDCLK_4000000 = 0x02,
+ PDCLK_115200 = 0x09,
+ PDCLK_57600 = 0x13,
+ PDCLK_38400 = 0x1D,
+ PDCLK_19200 = 0x3B,
+ PDCLK_9600 = 0x77,
+ PDCLK_2400 = 0xDF,
+};
+
+enum StirCtrl1Mask {
+ CTRL1_SDMODE = 0x80,
+ CTRL1_RXSLOW = 0x40,
+ CTRL1_TXPWD = 0x10,
+ CTRL1_RXPWD = 0x08,
+ CTRL1_SRESET = 0x01,
+};
+
+enum StirCtrl2Mask {
+ CTRL2_SPWIDTH = 0x08,
+ CTRL2_REVID = 0x03,
+};
+
+enum StirFifoCtlMask {
+ FIFOCTL_EOF = 0x80,
+ FIFOCTL_UNDER = 0x40,
+ FIFOCTL_OVER = 0x20,
+ FIFOCTL_DIR = 0x10,
+ FIFOCTL_CLR = 0x08,
+ FIFOCTL_EMPTY = 0x04,
+ FIFOCTL_RXERR = 0x02,
+ FIFOCTL_TXERR = 0x01,
+};
+
+enum StirDiagMask {
+ IRDIG_RXHIGH = 0x80,
+ IRDIG_RXLOW = 0x40,
+};
+
+enum StirTestMask {
+ TEST_PLLDOWN = 0x80,
+ TEST_LOOPIR = 0x40,
+ TEST_LOOPUSB = 0x20,
+ TEST_TSTENA = 0x10,
+ TEST_TSTOSC = 0x0F,
+};
+
+struct stir_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct net_device_stats stats; /* network statistics */
+ struct qos_info qos;
+ unsigned speed; /* Current speed */
+
+ wait_queue_head_t thr_wait; /* transmit thread wakeup */
+ struct completion thr_exited;
+ pid_t thr_pid;
+
+ struct sk_buff *tx_pending;
+ void *io_buf; /* transmit/receive buffer */
+ __u8 *fifo_status;
+
+ iobuff_t rx_buff; /* receive unwrap state machine */
+ struct timeval rx_time;
+ int receiving;
+ struct urb *rx_urb;
+};
+
+
+/* These are the currently known USB ids */
+static struct usb_device_id dongles[] = {
+ /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */
+ { USB_DEVICE(0x066f, 0x4200) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+/* Send control message to set dongle register */
+static int write_reg(struct stir_cb *stir, __u16 reg, __u8 value)
+{
+ struct usb_device *dev = stir->usbdev;
+
+ pr_debug("%s: write reg %d = 0x%x\n",
+ stir->netdev->name, reg, value);
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ REQ_WRITE_SINGLE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
+ value, reg, NULL, 0,
+ CTRL_TIMEOUT);
+}
+
+/* Send control message to read multiple registers */
+static inline int read_reg(struct stir_cb *stir, __u16 reg,
+ __u8 *data, __u16 count)
+{
+ struct usb_device *dev = stir->usbdev;
+
+ return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ REQ_READ_REG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, count,
+ CTRL_TIMEOUT);
+}
+
+static inline int isfir(u32 speed)
+{
+ return (speed == 4000000);
+}
+
+/*
+ * Prepare a FIR IrDA frame for transmission to the USB dongle. The
+ * FIR transmit frame is documented in the datasheet. It consists of
+ * a two byte 0x55 0xAA sequence, two little-endian length bytes, a
+ * sequence of exactly 16 XBOF bytes of 0x7E, two BOF bytes of 0x7E,
+ * then the data escaped as follows:
+ *
+ * 0x7D -> 0x7D 0x5D
+ * 0x7E -> 0x7D 0x5E
+ * 0x7F -> 0x7D 0x5F
+ *
+ * Then, 4 bytes of little endian (stuffed) FCS follow, then two
+ * trailing EOF bytes of 0x7E.
+ */
+static inline __u8 *stuff_fir(__u8 *p, __u8 c)
+{
+ switch(c) {
+ case 0x7d:
+ case 0x7e:
+ case 0x7f:
+ *p++ = 0x7d;
+ c ^= IRDA_TRANS;
+ /* fall through */
+ default:
+ *p++ = c;
+ }
+ return p;
+}
+
+/* Take raw data in skb and put it wrapped into buf */
+static unsigned wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
+{
+ __u8 *ptr = buf;
+ __u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
+ __u16 wraplen;
+ int i;
+
+ /* Header */
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+
+ ptr = buf + STIR_IRDA_HEADER;
+ memset(ptr, 0x7f, 16);
+ ptr += 16;
+
+ /* BOF */
+ *ptr++ = 0x7e;
+ *ptr++ = 0x7e;
+
+ /* Address / Control / Information */
+ for (i = 0; i < skb->len; i++)
+ ptr = stuff_fir(ptr, skb->data[i]);
+
+ /* FCS */
+ ptr = stuff_fir(ptr, fcs & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 8) & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 16) & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 24) & 0xff);
+
+ /* EOFs */
+ *ptr++ = 0x7e;
+ *ptr++ = 0x7e;
+
+ /* Total length, minus the header */
+ wraplen = (ptr - buf) - STIR_IRDA_HEADER;
+ buf[2] = wraplen & 0xff;
+ buf[3] = (wraplen >> 8) & 0xff;
+
+ return wraplen + STIR_IRDA_HEADER;
+}
+
+static unsigned wrap_sir_skb(struct sk_buff *skb, __u8 *buf)
+{
+ __u16 wraplen;
+
+ wraplen = async_wrap_skb(skb, buf + STIR_IRDA_HEADER,
+ STIR_FIFO_SIZE - STIR_IRDA_HEADER);
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+ buf[2] = wraplen & 0xff;
+ buf[3] = (wraplen >> 8) & 0xff;
+
+ return wraplen + STIR_IRDA_HEADER;
+}
+
+/*
+ * Frame is fully formed in the rx_buff so check crc
+ * and pass up to irlap
+ * setup for next receive
+ */
+static void fir_eof(struct stir_cb *stir)
+{
+ iobuff_t *rx_buff = &stir->rx_buff;
+ int len = rx_buff->len - 4;
+ struct sk_buff *skb, *nskb;
+ __u32 fcs;
+
+ if (unlikely(len <= 0)) {
+ pr_debug("%s: short frame len %d\n",
+ stir->netdev->name, len);
+
+ ++stir->stats.rx_errors;
+ ++stir->stats.rx_length_errors;
+ return;
+ }
+
+ fcs = ~(crc32_le(~0, rx_buff->data, len));
+ if (fcs != le32_to_cpu(get_unaligned((u32 *)(rx_buff->data+len)))) {
+ pr_debug("crc error calc 0x%x len %d\n", fcs, len);
+ stir->stats.rx_errors++;
+ stir->stats.rx_crc_errors++;
+ return;
+ }
+
+ /* if frame is short then just copy it */
+ if (len < IRDA_RX_COPY_THRESHOLD) {
+ nskb = dev_alloc_skb(len + 1);
+ if (unlikely(!nskb)) {
+ ++stir->stats.rx_dropped;
+ return;
+ }
+ skb_reserve(nskb, 1);
+ skb = nskb;
+ memcpy(nskb->data, rx_buff->data, len);
+ } else {
+ nskb = dev_alloc_skb(rx_buff->truesize);
+ if (unlikely(!nskb)) {
+ ++stir->stats.rx_dropped;
+ return;
+ }
+ skb_reserve(nskb, 1);
+ skb = rx_buff->skb;
+ rx_buff->skb = nskb;
+ rx_buff->head = nskb->data;
+ }
+
+ skb_put(skb, len);
+
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->dev = stir->netdev;
+
+ netif_rx(skb);
+
+ stir->stats.rx_packets++;
+ stir->stats.rx_bytes += len;
+
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+}
+
+/* Unwrap FIR stuffed data and bump it to IrLAP */
+static void stir_fir_chars(struct stir_cb *stir,
+ const __u8 *bytes, int len)
+{
+ iobuff_t *rx_buff = &stir->rx_buff;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ __u8 byte = bytes[i];
+
+ switch(rx_buff->state) {
+ case OUTSIDE_FRAME:
+ /* ignore garbage till start of frame */
+ if (unlikely(byte != FIR_EOF))
+ continue;
+ /* Now receiving frame */
+ rx_buff->state = BEGIN_FRAME;
+
+ /* Time to initialize receive buffer */
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+ continue;
+
+ case LINK_ESCAPE:
+ if (byte == FIR_EOF) {
+ pr_debug("%s: got EOF after escape\n",
+ stir->netdev->name);
+ goto frame_error;
+ }
+ rx_buff->state = INSIDE_FRAME;
+ byte ^= IRDA_TRANS;
+ break;
+
+ case BEGIN_FRAME:
+ /* ignore multiple BOF/EOF */
+ if (byte == FIR_EOF)
+ continue;
+ rx_buff->state = INSIDE_FRAME;
+ rx_buff->in_frame = TRUE;
+
+ /* fall through */
+ case INSIDE_FRAME:
+ switch(byte) {
+ case FIR_CE:
+ rx_buff->state = LINK_ESCAPE;
+ continue;
+ case FIR_XBOF:
+ /* 0x7f is not used in this framing */
+ pr_debug("%s: got XBOF without escape\n",
+ stir->netdev->name);
+ goto frame_error;
+ case FIR_EOF:
+ rx_buff->state = OUTSIDE_FRAME;
+ rx_buff->in_frame = FALSE;
+ fir_eof(stir);
+ continue;
+ }
+ break;
+ }
+
+ /* add byte to rx buffer */
+ if (unlikely(rx_buff->len >= rx_buff->truesize)) {
+ pr_debug("%s: fir frame exceeds %d\n",
+ stir->netdev->name, rx_buff->truesize);
+ ++stir->stats.rx_over_errors;
+ goto error_recovery;
+ }
+
+ rx_buff->data[rx_buff->len++] = byte;
+ continue;
+
+ frame_error:
+ ++stir->stats.rx_frame_errors;
+
+ error_recovery:
+ ++stir->stats.rx_errors;
+ rx_buff->state = OUTSIDE_FRAME;
+ rx_buff->in_frame = FALSE;
+ }
+}
+
+/* Unwrap SIR stuffed data and bump it up to IrLAP */
+static void stir_sir_chars(struct stir_cb *stir,
+ const __u8 *bytes, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ async_unwrap_char(stir->netdev, &stir->stats,
+ &stir->rx_buff, bytes[i]);
+}
+
+static inline void unwrap_chars(struct stir_cb *stir,
+ const __u8 *bytes, int length)
+{
+ if (isfir(stir->speed))
+ stir_fir_chars(stir, bytes, length);
+ else
+ stir_sir_chars(stir, bytes, length);
+}
+
+/* Mode parameters for each speed */
+static const struct {
+ unsigned speed;
+ __u8 pdclk;
+} stir_modes[] = {
+ { 2400, PDCLK_2400 },
+ { 9600, PDCLK_9600 },
+ { 19200, PDCLK_19200 },
+ { 38400, PDCLK_38400 },
+ { 57600, PDCLK_57600 },
+ { 115200, PDCLK_115200 },
+ { 4000000, PDCLK_4000000 },
+};
+
+
+/*
+ * Setup chip for speed.
+ * Called at startup to initialize the chip
+ * and on speed changes.
+ *
+ * Note: Write multiple registers doesn't appear to work
+ */
+static int change_speed(struct stir_cb *stir, unsigned speed)
+{
+ int i, err;
+ __u8 mode;
+
+ for (i = 0; i < ARRAY_SIZE(stir_modes); ++i) {
+ if (speed == stir_modes[i].speed)
+ goto found;
+ }
+
+ warn("%s: invalid speed %d", stir->netdev->name, speed);
+ return -EINVAL;
+
+ found:
+ pr_debug("speed change from %d to %d\n", stir->speed, speed);
+
+ /* Reset modulator */
+ err = write_reg(stir, REG_CTRL1, CTRL1_SRESET);
+ if (err)
+ goto out;
+
+ /* Undocumented magic to tweak the DPLL */
+ err = write_reg(stir, REG_DPLL, 0x15);
+ if (err)
+ goto out;
+
+ /* Set clock */
+ err = write_reg(stir, REG_PDCLK, stir_modes[i].pdclk);
+ if (err)
+ goto out;
+
+ mode = MODE_NRESET | MODE_FASTRX;
+ if (isfir(speed))
+ mode |= MODE_FIR | MODE_FFRSTEN;
+ else
+ mode |= MODE_SIR;
+
+ if (speed == 2400)
+ mode |= MODE_2400;
+
+ err = write_reg(stir, REG_MODE, mode);
+ if (err)
+ goto out;
+
+ /* This resets TEMIC style transceiver if any. */
+ err = write_reg(stir, REG_CTRL1,
+ CTRL1_SDMODE | (tx_power & 3) << 1);
+ if (err)
+ goto out;
+
+ err = write_reg(stir, REG_CTRL1, (tx_power & 3) << 1);
+ if (err)
+ goto out;
+
+ /* Reset sensitivity */
+ err = write_reg(stir, REG_CTRL2, (rx_sensitivity & 7) << 5);
+ out:
+ stir->speed = speed;
+ return err;
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static int stir_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ skb = xchg(&stir->tx_pending, skb);
+ wake_up(&stir->thr_wait);
+
+ /* this should never happen unless stop/wakeup problem */
+ if (unlikely(skb)) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+/*
+ * Wait for the transmit FIFO to have space for next data
+ *
+ * If space < 0 then wait till FIFO completely drains.
+ * FYI: can take up to 13 seconds at 2400baud.
+ */
+static int fifo_txwait(struct stir_cb *stir, int space)
+{
+ int err;
+ unsigned long count, status;
+
+ /* Read FIFO status and count */
+ for(;;) {
+ err = read_reg(stir, REG_FIFOCTL, stir->fifo_status,
+ FIFO_REGS_SIZE);
+ if (unlikely(err != FIFO_REGS_SIZE)) {
+ warn("%s: FIFO register read error: %d",
+ stir->netdev->name, err);
+
+ return err;
+ }
+
+ status = stir->fifo_status[0];
+ count = (unsigned)(stir->fifo_status[2] & 0x1f) << 8
+ | stir->fifo_status[1];
+
+ pr_debug("fifo status 0x%lx count %lu\n", status, count);
+
+ /* error when receive/transmit fifo gets confused */
+ if (status & FIFOCTL_RXERR) {
+ stir->stats.rx_fifo_errors++;
+ stir->stats.rx_errors++;
+ break;
+ }
+
+ if (status & FIFOCTL_TXERR) {
+ stir->stats.tx_fifo_errors++;
+ stir->stats.tx_errors++;
+ break;
+ }
+
+ /* is fifo receiving already, or empty */
+ if (!(status & FIFOCTL_DIR)
+ || (status & FIFOCTL_EMPTY))
+ return 0;
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ /* shutting down? */
+ if (!netif_running(stir->netdev)
+ || !netif_device_present(stir->netdev))
+ return -ESHUTDOWN;
+
+ /* only waiting for some space */
+ if (space >= 0 && STIR_FIFO_SIZE - 4 > space + count)
+ return 0;
+
+ /* estimate transfer time for remaining chars */
+ msleep((count * 8000) / stir->speed);
+ }
+
+ err = write_reg(stir, REG_FIFOCTL, FIFOCTL_CLR);
+ if (err)
+ return err;
+ err = write_reg(stir, REG_FIFOCTL, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+
+/* Wait for turnaround delay before starting transmit. */
+static void turnaround_delay(const struct stir_cb *stir, long us)
+{
+ long ticks;
+ struct timeval now;
+
+ if (us <= 0)
+ return;
+
+ do_gettimeofday(&now);
+ if (now.tv_sec - stir->rx_time.tv_sec > 0)
+ us -= USEC_PER_SEC;
+ us -= now.tv_usec - stir->rx_time.tv_usec;
+ if (us < 10)
+ return;
+
+ ticks = us / (1000000 / HZ);
+ if (ticks > 0) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1 + ticks);
+ } else
+ udelay(us);
+}
+
+/*
+ * Start receiver by submitting a request to the receive pipe.
+ * If nothing is available it will return after rx_interval.
+ */
+static int receive_start(struct stir_cb *stir)
+{
+ /* reset state */
+ stir->receiving = 1;
+
+ stir->rx_buff.in_frame = FALSE;
+ stir->rx_buff.state = OUTSIDE_FRAME;
+
+ stir->rx_urb->status = 0;
+ return usb_submit_urb(stir->rx_urb, GFP_KERNEL);
+}
+
+/* Stop all pending receive Urb's */
+static void receive_stop(struct stir_cb *stir)
+{
+ stir->receiving = 0;
+ usb_kill_urb(stir->rx_urb);
+
+ if (stir->rx_buff.in_frame)
+ stir->stats.collisions++;
+}
+/*
+ * Wrap data in socket buffer and send it.
+ */
+static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
+{
+ unsigned wraplen;
+ int first_frame = 0;
+
+ /* if receiving, need to turnaround */
+ if (stir->receiving) {
+ receive_stop(stir);
+ turnaround_delay(stir, irda_get_mtt(skb));
+ first_frame = 1;
+ }
+
+ if (isfir(stir->speed))
+ wraplen = wrap_fir_skb(skb, stir->io_buf);
+ else
+ wraplen = wrap_sir_skb(skb, stir->io_buf);
+
+ /* check for space available in fifo */
+ if (!first_frame)
+ fifo_txwait(stir, wraplen);
+
+ stir->stats.tx_packets++;
+ stir->stats.tx_bytes += skb->len;
+ stir->netdev->trans_start = jiffies;
+ pr_debug("send %d (%d)\n", skb->len, wraplen);
+
+ if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
+ stir->io_buf, wraplen,
+ NULL, TRANSMIT_TIMEOUT))
+ stir->stats.tx_errors++;
+}
+
+/*
+ * Transmit state machine thread
+ */
+static int stir_transmit_thread(void *arg)
+{
+ struct stir_cb *stir = arg;
+ struct net_device *dev = stir->netdev;
+ struct sk_buff *skb;
+
+ daemonize("%s", dev->name);
+ allow_signal(SIGTERM);
+
+ while (netif_running(dev)
+ && netif_device_present(dev)
+ && !signal_pending(current))
+ {
+#ifdef CONFIG_PM
+ /* if suspending, then power off and wait */
+ if (unlikely(current->flags & PF_FREEZE)) {
+ if (stir->receiving)
+ receive_stop(stir);
+ else
+ fifo_txwait(stir, -1);
+
+ write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
+
+ refrigerator(PF_FREEZE);
+
+ if (change_speed(stir, stir->speed))
+ break;
+ }
+#endif
+
+ /* if something to send? */
+ skb = xchg(&stir->tx_pending, NULL);
+ if (skb) {
+ unsigned new_speed = irda_get_next_speed(skb);
+ netif_wake_queue(dev);
+
+ if (skb->len > 0)
+ stir_send(stir, skb);
+ dev_kfree_skb(skb);
+
+ if ((new_speed != -1) && (stir->speed != new_speed)) {
+ if (fifo_txwait(stir, -1) ||
+ change_speed(stir, new_speed))
+ break;
+ }
+ continue;
+ }
+
+ /* nothing to send? start receiving */
+ if (!stir->receiving
+ && irda_device_txqueue_empty(dev)) {
+ /* Wait otherwise chip gets confused. */
+ if (fifo_txwait(stir, -1))
+ break;
+
+ if (unlikely(receive_start(stir))) {
+ if (net_ratelimit())
+ info("%s: receive usb submit failed",
+ stir->netdev->name);
+ stir->receiving = 0;
+ msleep(10);
+ continue;
+ }
+ }
+
+ /* sleep if nothing to send */
+ wait_event_interruptible(stir->thr_wait, stir->tx_pending);
+ }
+
+ complete_and_exit (&stir->thr_exited, 0);
+}
+
+
+/*
+ * USB bulk receive completion callback.
+ * Wakes up every ms (usb round trip) with wrapped
+ * data.
+ */
+static void stir_rcv_irq(struct urb *urb, struct pt_regs *regs)
+{
+ struct stir_cb *stir = urb->context;
+ int err;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(stir->netdev))
+ return;
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0)
+ return;
+
+ if (urb->actual_length > 0) {
+ pr_debug("receive %d\n", urb->actual_length);
+ unwrap_chars(stir, urb->transfer_buffer,
+ urb->actual_length);
+
+ stir->netdev->last_rx = jiffies;
+ do_gettimeofday(&stir->rx_time);
+ }
+
+ /* kernel thread is stopping receiver don't resubmit */
+ if (!stir->receiving)
+ return;
+
+ /* resubmit existing urb */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* in case of error, the kernel thread will restart us */
+ if (err) {
+ warn("%s: usb receive submit error: %d",
+ stir->netdev->name, err);
+ stir->receiving = 0;
+ wake_up(&stir->thr_wait);
+ }
+}
+
+/*
+ * Function stir_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int stir_net_open(struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+ int err;
+ char hwname[16];
+
+ err = usb_clear_halt(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1));
+ if (err)
+ goto err_out1;
+ err = usb_clear_halt(stir->usbdev, usb_rcvbulkpipe(stir->usbdev, 2));
+ if (err)
+ goto err_out1;
+
+ err = change_speed(stir, 9600);
+ if (err)
+ goto err_out1;
+
+ err = -ENOMEM;
+
+ /* Initialize for SIR/FIR to copy data directly into skb. */
+ stir->receiving = 0;
+ stir->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ stir->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!stir->rx_buff.skb)
+ goto err_out1;
+
+ skb_reserve(stir->rx_buff.skb, 1);
+ stir->rx_buff.head = stir->rx_buff.skb->data;
+ do_gettimeofday(&stir->rx_time);
+
+ stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!stir->rx_urb)
+ goto err_out2;
+
+ stir->io_buf = kmalloc(STIR_FIFO_SIZE, GFP_KERNEL);
+ if (!stir->io_buf)
+ goto err_out3;
+
+ usb_fill_bulk_urb(stir->rx_urb, stir->usbdev,
+ usb_rcvbulkpipe(stir->usbdev, 2),
+ stir->io_buf, STIR_FIFO_SIZE,
+ stir_rcv_irq, stir);
+
+ stir->fifo_status = kmalloc(FIFO_REGS_SIZE, GFP_KERNEL);
+ if (!stir->fifo_status)
+ goto err_out4;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", stir->usbdev->devnum);
+ stir->irlap = irlap_open(netdev, &stir->qos, hwname);
+ if (!stir->irlap) {
+ err("stir4200: irlap_open failed");
+ goto err_out5;
+ }
+
+ /** Start kernel thread for transmit. */
+ stir->thr_pid = kernel_thread(stir_transmit_thread, stir,
+ CLONE_FS|CLONE_FILES);
+ if (stir->thr_pid < 0) {
+ err = stir->thr_pid;
+ err("stir4200: unable to start kernel thread");
+ goto err_out6;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+ err_out6:
+ irlap_close(stir->irlap);
+ err_out5:
+ kfree(stir->fifo_status);
+ err_out4:
+ kfree(stir->io_buf);
+ err_out3:
+ usb_free_urb(stir->rx_urb);
+ err_out2:
+ kfree_skb(stir->rx_buff.skb);
+ err_out1:
+ return err;
+}
+
+/*
+ * Function stir_net_close (stir)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int stir_net_close(struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Kill transmit thread */
+ kill_proc(stir->thr_pid, SIGTERM, 1);
+ wait_for_completion(&stir->thr_exited);
+ kfree(stir->fifo_status);
+
+ /* Mop up receive urb's */
+ usb_kill_urb(stir->rx_urb);
+
+ kfree(stir->io_buf);
+ usb_free_urb(stir->rx_urb);
+ kfree_skb(stir->rx_buff.skb);
+
+ /* Stop and remove instance of IrLAP */
+ if (stir->irlap)
+ irlap_close(stir->irlap);
+
+ stir->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int stir_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct stir_cb *stir = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(stir->netdev))
+ ret = change_speed(stir, irq->ifr_baudrate);
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(stir->netdev))
+ irda_device_set_media_busy(stir->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = stir->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/*
+ * Get device stats (for /proc/net/dev and ifconfig)
+ */
+static struct net_device_stats *stir_net_get_stats(struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+ return &stir->stats;
+}
+
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ * Note : it might be worth protecting this function by a global
+ * spinlock... Or not, because maybe USB already deal with that...
+ */
+static int stir_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct stir_cb *stir = NULL;
+ struct net_device *net;
+ int ret = -ENOMEM;
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*stir));
+ if(!net)
+ goto err_out1;
+
+ SET_MODULE_OWNER(net);
+ SET_NETDEV_DEV(net, &intf->dev);
+ stir = netdev_priv(net);
+ stir->netdev = net;
+ stir->usbdev = dev;
+
+ ret = usb_reset_configuration(dev);
+ if (ret != 0) {
+ err("stir4200: usb reset configuration failed");
+ goto err_out2;
+ }
+
+ printk(KERN_INFO "SigmaTel STIr4200 IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&stir->qos);
+
+ /* That's the Rx capability. */
+ stir->qos.baud_rate.bits &= IR_2400 | IR_9600 | IR_19200 |
+ IR_38400 | IR_57600 | IR_115200 |
+ (IR_4000000 << 8);
+ stir->qos.min_turn_time.bits &= qos_mtt_bits;
+ irda_qos_bits_to_value(&stir->qos);
+
+ init_completion (&stir->thr_exited);
+ init_waitqueue_head (&stir->thr_wait);
+
+ /* Override the network functions we need to use */
+ net->hard_start_xmit = stir_hard_xmit;
+ net->open = stir_net_open;
+ net->stop = stir_net_close;
+ net->get_stats = stir_net_get_stats;
+ net->do_ioctl = stir_net_ioctl;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto err_out2;
+
+ info("IrDA: Registered SigmaTel device %s", net->name);
+
+ usb_set_intfdata(intf, stir);
+
+ return 0;
+
+err_out2:
+ free_netdev(net);
+err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void stir_disconnect(struct usb_interface *intf)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ if (!stir)
+ return;
+
+ unregister_netdev(stir->netdev);
+ free_netdev(stir->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* Power management suspend, so power off the transmitter/receiver */
+static int stir_suspend(struct usb_interface *intf, u32 state)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ netif_device_detach(stir->netdev);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int stir_resume(struct usb_interface *intf)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ netif_device_attach(stir->netdev);
+
+ /* receiver restarted when send thread wakes up */
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .owner = THIS_MODULE,
+ .name = "stir4200",
+ .probe = stir_probe,
+ .disconnect = stir_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = stir_suspend,
+ .resume = stir_resume,
+#endif
+};
+
+/*
+ * Module insertion
+ */
+static int __init stir_init(void)
+{
+ return usb_register(&irda_driver);
+}
+module_init(stir_init);
+
+/*
+ * Module removal
+ */
+static void __exit stir_cleanup(void)
+{
+ /* Deregister the driver and remove all pending instances */
+ usb_deregister(&irda_driver);
+}
+module_exit(stir_cleanup);
diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c
new file mode 100644
index 000000000000..0dd6bc7af3f2
--- /dev/null
+++ b/drivers/net/irda/tekram-sir.c
@@ -0,0 +1,232 @@
+/*********************************************************************
+ *
+ * Filename: tekram.c
+ * Version: 1.3
+ * Description: Implementation of the Tekram IrMate IR-210B dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Sun Oct 27 22:02:38 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int tekram_delay = 150; /* default is 150 ms */
+module_param(tekram_delay, int, 0);
+MODULE_PARM_DESC(tekram_delay, "tekram dongle write complete delay");
+
+static int tekram_open(struct sir_dev *);
+static int tekram_close(struct sir_dev *);
+static int tekram_change_speed(struct sir_dev *, unsigned);
+static int tekram_reset(struct sir_dev *);
+
+#define TEKRAM_115200 0x00
+#define TEKRAM_57600 0x01
+#define TEKRAM_38400 0x02
+#define TEKRAM_19200 0x03
+#define TEKRAM_9600 0x04
+
+#define TEKRAM_PW 0x10 /* Pulse select bit */
+
+static struct dongle_driver tekram = {
+ .owner = THIS_MODULE,
+ .driver_name = "Tekram IR-210B",
+ .type = IRDA_TEKRAM_DONGLE,
+ .open = tekram_open,
+ .close = tekram_close,
+ .reset = tekram_reset,
+ .set_speed = tekram_change_speed,
+};
+
+static int __init tekram_sir_init(void)
+{
+ if (tekram_delay < 1 || tekram_delay > 500)
+ tekram_delay = 200;
+ IRDA_DEBUG(1, "%s - using %d ms delay\n",
+ tekram.driver_name, tekram_delay);
+ return irda_register_dongle(&tekram);
+}
+
+static void __exit tekram_sir_cleanup(void)
+{
+ irda_unregister_dongle(&tekram);
+}
+
+static int tekram_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int tekram_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function tekram_change_speed (dev, state, speed)
+ *
+ * Set the speed for the Tekram IRMate 210 type dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. clear DTR
+ * 2. set RTS, and wait at least 7 us
+ * 3. send Control Byte to the IR-210 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 100 msec)
+ *
+ * [oops, why 100 msec? sending 1 byte (10 bits) takes 1.05 msec
+ * - is this probably to compensate for delays in tty layer?]
+ *
+ * 5. clear RTS (return to NORMAL Operation)
+ * 6. wait at least 50 us, new setting (baud rate, etc) takes effect here
+ * after
+ */
+
+#define TEKRAM_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 byte;
+ static int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ switch(state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ switch (speed) {
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall thru */
+ case 9600:
+ byte = TEKRAM_PW|TEKRAM_9600;
+ break;
+ case 19200:
+ byte = TEKRAM_PW|TEKRAM_19200;
+ break;
+ case 38400:
+ byte = TEKRAM_PW|TEKRAM_38400;
+ break;
+ case 57600:
+ byte = TEKRAM_PW|TEKRAM_57600;
+ break;
+ case 115200:
+ byte = TEKRAM_115200;
+ break;
+ }
+
+ /* Set DTR, Clear RTS */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Wait at least 7us */
+ udelay(14);
+
+ /* Write control byte */
+ sirdev_raw_write(dev, &byte, 1);
+
+ dev->speed = speed;
+
+ state = TEKRAM_STATE_WAIT_SPEED;
+ delay = tekram_delay;
+ break;
+
+ case TEKRAM_STATE_WAIT_SPEED:
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ udelay(50);
+ break;
+
+ default:
+ IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function tekram_reset (driver)
+ *
+ * This function resets the tekram dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * Algorithm:
+ * 0. Clear RTS and DTR, and wait 50 ms (power off the IR-210 )
+ * 1. clear RTS
+ * 2. set DTR, and wait at least 1 ms
+ * 3. clear DTR to SPACE state, wait at least 50 us for further
+ * operation
+ */
+
+static int tekram_reset(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Clear DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ /* Should sleep 1 ms */
+ msleep(1);
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 50 us */
+ udelay(75);
+
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Tekram IrMate IR-210B dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-0"); /* IRDA_TEKRAM_DONGLE */
+
+module_init(tekram_sir_init);
+module_exit(tekram_sir_cleanup);
diff --git a/drivers/net/irda/tekram.c b/drivers/net/irda/tekram.c
new file mode 100644
index 000000000000..8f6258221cb0
--- /dev/null
+++ b/drivers/net/irda/tekram.c
@@ -0,0 +1,282 @@
+/*********************************************************************
+ *
+ * Filename: tekram.c
+ * Version: 1.2
+ * Description: Implementation of the Tekram IrMate IR-210B dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Fri Dec 17 09:13:09 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+static void tekram_open(dongle_t *self, struct qos_info *qos);
+static void tekram_close(dongle_t *self);
+static int tekram_change_speed(struct irda_task *task);
+static int tekram_reset(struct irda_task *task);
+
+#define TEKRAM_115200 0x00
+#define TEKRAM_57600 0x01
+#define TEKRAM_38400 0x02
+#define TEKRAM_19200 0x03
+#define TEKRAM_9600 0x04
+
+#define TEKRAM_PW 0x10 /* Pulse select bit */
+
+static struct dongle_reg dongle = {
+ .type = IRDA_TEKRAM_DONGLE,
+ .open = tekram_open,
+ .close = tekram_close,
+ .reset = tekram_reset,
+ .change_speed = tekram_change_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init tekram_init(void)
+{
+ return irda_device_register_dongle(&dongle);
+}
+
+static void __exit tekram_cleanup(void)
+{
+ irda_device_unregister_dongle(&dongle);
+}
+
+static void tekram_open(dongle_t *self, struct qos_info *qos)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+}
+
+static void tekram_close(dongle_t *self)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ /* Power off dongle */
+ self->set_dtr_rts(self->dev, FALSE, FALSE);
+
+ if (self->reset_task)
+ irda_task_delete(self->reset_task);
+ if (self->speed_task)
+ irda_task_delete(self->speed_task);
+}
+
+/*
+ * Function tekram_change_speed (dev, state, speed)
+ *
+ * Set the speed for the Tekram IRMate 210 type dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. clear DTR
+ * 2. set RTS, and wait at least 7 us
+ * 3. send Control Byte to the IR-210 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 100 msec)
+ * 5. clear RTS (return to NORMAL Operation)
+ * 6. wait at least 50 us, new setting (baud rate, etc) takes effect here
+ * after
+ */
+static int tekram_change_speed(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ __u32 speed = (__u32) task->param;
+ __u8 byte;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ IRDA_ASSERT(task != NULL, return -1;);
+
+ if (self->speed_task && self->speed_task != task) {
+ IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__ );
+ return msecs_to_jiffies(10);
+ } else
+ self->speed_task = task;
+
+ switch (speed) {
+ default:
+ case 9600:
+ byte = TEKRAM_PW|TEKRAM_9600;
+ break;
+ case 19200:
+ byte = TEKRAM_PW|TEKRAM_19200;
+ break;
+ case 38400:
+ byte = TEKRAM_PW|TEKRAM_38400;
+ break;
+ case 57600:
+ byte = TEKRAM_PW|TEKRAM_57600;
+ break;
+ case 115200:
+ byte = TEKRAM_115200;
+ break;
+ }
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ case IRDA_TASK_CHILD_INIT:
+ /*
+ * Need to reset the dongle and go to 9600 bps before
+ * programming
+ */
+ if (irda_task_execute(self, tekram_reset, NULL, task,
+ (void *) speed))
+ {
+ /* Dongle need more time to reset */
+ irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
+
+ /* Give reset 1 sec to finish */
+ ret = msecs_to_jiffies(1000);
+ } else
+ irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
+ break;
+ case IRDA_TASK_CHILD_WAIT:
+ IRDA_WARNING("%s(), resetting dongle timed out!\n",
+ __FUNCTION__);
+ ret = -1;
+ break;
+ case IRDA_TASK_CHILD_DONE:
+ /* Set DTR, Clear RTS */
+ self->set_dtr_rts(self->dev, TRUE, FALSE);
+
+ /* Wait at least 7us */
+ udelay(14);
+
+ /* Write control byte */
+ self->write(self->dev, &byte, 1);
+
+ irda_task_next_state(task, IRDA_TASK_WAIT);
+
+ /* Wait at least 100 ms */
+ ret = msecs_to_jiffies(150);
+ break;
+ case IRDA_TASK_WAIT:
+ /* Set DTR, Set RTS */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->speed_task = NULL;
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function tekram_reset (driver)
+ *
+ * This function resets the tekram dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * Algorithm:
+ * 0. Clear RTS and DTR, and wait 50 ms (power off the IR-210 )
+ * 1. clear RTS
+ * 2. set DTR, and wait at least 1 ms
+ * 3. clear DTR to SPACE state, wait at least 50 us for further
+ * operation
+ */
+int tekram_reset(struct irda_task *task)
+{
+ dongle_t *self = (dongle_t *) task->instance;
+ int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+
+ IRDA_ASSERT(task != NULL, return -1;);
+
+ if (self->reset_task && self->reset_task != task) {
+ IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__ );
+ return msecs_to_jiffies(10);
+ } else
+ self->reset_task = task;
+
+ /* Power off dongle */
+ //self->set_dtr_rts(self->dev, FALSE, FALSE);
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ switch (task->state) {
+ case IRDA_TASK_INIT:
+ irda_task_next_state(task, IRDA_TASK_WAIT1);
+
+ /* Sleep 50 ms */
+ ret = msecs_to_jiffies(50);
+ break;
+ case IRDA_TASK_WAIT1:
+ /* Clear DTR, Set RTS */
+ self->set_dtr_rts(self->dev, FALSE, TRUE);
+
+ irda_task_next_state(task, IRDA_TASK_WAIT2);
+
+ /* Should sleep 1 ms */
+ ret = msecs_to_jiffies(1);
+ break;
+ case IRDA_TASK_WAIT2:
+ /* Set DTR, Set RTS */
+ self->set_dtr_rts(self->dev, TRUE, TRUE);
+
+ /* Wait at least 50 us */
+ udelay(75);
+
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ break;
+ default:
+ IRDA_ERROR("%s(), unknown state %d\n",
+ __FUNCTION__, task->state);
+ irda_task_next_state(task, IRDA_TASK_DONE);
+ self->reset_task = NULL;
+ ret = -1;
+ }
+ return ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Tekram IrMate IR-210B dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-0"); /* IRDA_TEKRAM_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Tekram module
+ *
+ */
+module_init(tekram_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Tekram module
+ *
+ */
+module_exit(tekram_cleanup);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
new file mode 100644
index 000000000000..8bafb455c102
--- /dev/null
+++ b/drivers/net/irda/via-ircc.c
@@ -0,0 +1,1676 @@
+/********************************************************************
+ Filename: via-ircc.c
+ Version: 1.0
+ Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
+ Author: VIA Technologies,inc
+ Date : 08/06/2003
+
+Copyright (c) 1998-2003 VIA Technologies, Inc.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
+F02 Oct/28/02: Add SB device ID for 3147 and 3177.
+ Comment :
+ jul/09/2002 : only implement two kind of dongle currently.
+ Oct/02/2002 : work on VT8231 and VT8233 .
+ Aug/06/2003 : change driver format to pci driver .
+
+2004-02-16: <sda@bdit.de>
+- Removed unneeded 'legacy' pci stuff.
+- Make sure SIR mode is set (hw_init()) before calling mode-dependant stuff.
+- On speed change from core, don't send SIR frame with new speed.
+ Use current speed and change speeds later.
+- Make module-param dongle_id actually work.
+- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
+ Tested with home-grown PCB on EPIA boards.
+- Code cleanup.
+
+ ********************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/pm.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "via-ircc.h"
+
+#define VIA_MODULE_NAME "via-ircc"
+#define CHIP_IO_EXTENT 0x40
+
+static char *driver_name = VIA_MODULE_NAME;
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+static int dongle_id = 0; /* default: probe */
+
+/* We can't guess the type of connected dongle, user *must* supply it. */
+module_param(dongle_id, int, 0);
+
+/* FIXME : we should not need this, because instances should be automatically
+ * managed by the PCI layer. Especially that we seem to only be using the
+ * first entry. Jean II */
+/* Max 4 instances for now */
+static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
+
+/* Some prototypes */
+static int via_ircc_open(int i, chipio_t * info, unsigned int id);
+static int via_ircc_close(struct via_ircc_cb *self);
+static int via_ircc_dma_receive(struct via_ircc_cb *self);
+static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
+ int iobase);
+static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev);
+static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev);
+static void via_hw_init(struct via_ircc_cb *self);
+static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
+static irqreturn_t via_ircc_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs);
+static int via_ircc_is_receiving(struct via_ircc_cb *self);
+static int via_ircc_read_dongle_id(int iobase);
+
+static int via_ircc_net_open(struct net_device *dev);
+static int via_ircc_net_close(struct net_device *dev);
+static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd);
+static struct net_device_stats *via_ircc_net_get_stats(struct net_device
+ *dev);
+static void via_ircc_change_dongle_speed(int iobase, int speed,
+ int dongle_id);
+static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
+static void hwreset(struct via_ircc_cb *self);
+static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
+static int upload_rxdata(struct via_ircc_cb *self, int iobase);
+static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
+static void __devexit via_remove_one (struct pci_dev *pdev);
+
+/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
+static void iodelay(int udelay)
+{
+ u8 data;
+ int i;
+
+ for (i = 0; i < udelay; i++) {
+ data = inb(0x80);
+ }
+}
+
+static struct pci_device_id via_pci_tbl[] = {
+ { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
+ { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
+ { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
+ { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
+ { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci,via_pci_tbl);
+
+
+static struct pci_driver via_driver = {
+ .name = VIA_MODULE_NAME,
+ .id_table = via_pci_tbl,
+ .probe = via_init_one,
+ .remove = __devexit_p(via_remove_one),
+};
+
+
+/*
+ * Function via_ircc_init ()
+ *
+ * Initialize chip. Just find out chip type and resource.
+ */
+static int __init via_ircc_init(void)
+{
+ int rc;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ rc = pci_register_driver(&via_driver);
+ if (rc < 0) {
+ IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
+ __FUNCTION__, rc);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ int rc;
+ u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
+ u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
+ chipio_t info;
+
+ IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __FUNCTION__, id->device);
+
+ rc = pci_enable_device (pcidev);
+ if (rc) {
+ IRDA_DEBUG(0, "%s(): error rc = %d\n", __FUNCTION__, rc);
+ return -ENODEV;
+ }
+
+ // South Bridge exist
+ if ( ReadLPCReg(0x20) != 0x3C )
+ Chipset=0x3096;
+ else
+ Chipset=0x3076;
+
+ if (Chipset==0x3076) {
+ IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __FUNCTION__);
+
+ WriteLPCReg(7,0x0c );
+ temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
+ if((temp&0x01)==1) { // BIOS close or no FIR
+ WriteLPCReg(0x1d, 0x82 );
+ WriteLPCReg(0x23,0x18);
+ temp=ReadLPCReg(0xF0);
+ if((temp&0x01)==0) {
+ temp=(ReadLPCReg(0x74)&0x03); //DMA
+ FirDRQ0=temp + 4;
+ temp=(ReadLPCReg(0x74)&0x0C) >> 2;
+ FirDRQ1=temp + 4;
+ } else {
+ temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
+ FirDRQ0=temp + 4;
+ FirDRQ1=FirDRQ0;
+ }
+ FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
+ FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
+ FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
+ FirIOBase=FirIOBase ;
+ info.fir_base=FirIOBase;
+ info.irq=FirIRQ;
+ info.dma=FirDRQ1;
+ info.dma2=FirDRQ0;
+ pci_read_config_byte(pcidev,0x40,&bTmp);
+ pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
+ pci_read_config_byte(pcidev,0x42,&bTmp);
+ pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
+ pci_write_config_byte(pcidev,0x5a,0xc0);
+ WriteLPCReg(0x28, 0x70 );
+ if (via_ircc_open(0, &info,0x3076) == 0)
+ rc=0;
+ } else
+ rc = -ENODEV; //IR not turn on
+ } else { //Not VT1211
+ IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __FUNCTION__);
+
+ pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
+ if((bTmp&0x01)==1) { // BIOS enable FIR
+ //Enable Double DMA clock
+ pci_read_config_byte(pcidev,0x42,&oldPCI_40);
+ pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
+ pci_read_config_byte(pcidev,0x40,&oldPCI_40);
+ pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
+ pci_read_config_byte(pcidev,0x44,&oldPCI_44);
+ pci_write_config_byte(pcidev,0x44,0x4e);
+ //---------- read configuration from Function0 of south bridge
+ if((bTmp&0x02)==0) {
+ pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
+ FirDRQ0 = (bTmp1 & 0x30) >> 4;
+ pci_read_config_byte(pcidev,0x44,&bTmp1);
+ FirDRQ1 = (bTmp1 & 0xc0) >> 6;
+ } else {
+ pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
+ FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
+ FirDRQ1=0;
+ }
+ pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
+ FirIRQ = bTmp1 & 0x0f;
+
+ pci_read_config_byte(pcidev,0x69,&bTmp);
+ FirIOBase = bTmp << 8;//hight byte
+ pci_read_config_byte(pcidev,0x68,&bTmp);
+ FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
+ //-------------------------
+ info.fir_base=FirIOBase;
+ info.irq=FirIRQ;
+ info.dma=FirDRQ1;
+ info.dma2=FirDRQ0;
+ if (via_ircc_open(0, &info,0x3096) == 0)
+ rc=0;
+ } else
+ rc = -ENODEV; //IR not turn on !!!!!
+ }//Not VT1211
+
+ IRDA_DEBUG(2, "%s(): End - rc = %d\n", __FUNCTION__, rc);
+ return rc;
+}
+
+/*
+ * Function via_ircc_clean ()
+ *
+ * Close all configured chips
+ *
+ */
+static void via_ircc_clean(void)
+{
+ int i;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ for (i=0; i < 4; i++) {
+ if (dev_self[i])
+ via_ircc_close(dev_self[i]);
+ }
+}
+
+static void __devexit via_remove_one (struct pci_dev *pdev)
+{
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
+ * to get our driver instance and call directly via_ircc_close().
+ * See vlsi_ir for details...
+ * Jean II */
+ via_ircc_clean();
+
+ /* FIXME : This should be in via_ircc_close(), because here we may
+ * theoritically disable still configured devices :-( - Jean II */
+ pci_disable_device(pdev);
+}
+
+static void __exit via_ircc_cleanup(void)
+{
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ /* FIXME : This should be redundant, as pci_unregister_driver()
+ * should call via_remove_one() on each device.
+ * Jean II */
+ via_ircc_clean();
+
+ /* Cleanup all instances of the driver */
+ pci_unregister_driver (&via_driver);
+}
+
+/*
+ * Function via_ircc_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
+{
+ struct net_device *dev;
+ struct via_ircc_cb *self;
+ int err;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ /* Allocate new instance of the driver */
+ dev = alloc_irdadev(sizeof(struct via_ircc_cb));
+ if (dev == NULL)
+ return -ENOMEM;
+
+ self = dev->priv;
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ /* FIXME : We should store our driver instance in the PCI layer,
+ * using pci_set_drvdata(), not in this array.
+ * See vlsi_ir for details... - Jean II */
+ /* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+ self->index = i;
+ /* Initialize Resource */
+ self->io.cfg_base = info->cfg_base;
+ self->io.fir_base = info->fir_base;
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.dma2 = info->dma2;
+ self->io.fifo_size = 32;
+ self->chip_id = id;
+ self->st_fifo.len = 0;
+ self->RxDataReady = 0;
+
+ /* Reserve the ioports that we need */
+ if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
+ IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
+ __FUNCTION__, self->io.fir_base);
+ err = -ENODEV;
+ goto err_out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* Check if user has supplied the dongle id or not */
+ if (!dongle_id)
+ dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
+ self->io.dongle_id = dongle_id;
+
+ /* The only value we must override it the baudrate */
+ /* Maximum speeds and capabilities are dongle-dependant. */
+ switch( self->io.dongle_id ){
+ case 0x0d:
+ self->qos.baud_rate.bits =
+ IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
+ IR_576000 | IR_1152000 | (IR_4000000 << 8);
+ break;
+ default:
+ self->qos.baud_rate.bits =
+ IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
+ break;
+ }
+
+ /* Following was used for testing:
+ *
+ * self->qos.baud_rate.bits = IR_9600;
+ *
+ * Is is no good, as it prohibits (error-prone) speed-changes.
+ */
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384 + 2048;
+ self->tx_buff.truesize = 14384 + 2048;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_alloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ memset(self->rx_buff.head, 0, self->rx_buff.truesize);
+
+ self->tx_buff.head =
+ dma_alloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out3;
+ }
+ memset(self->tx_buff.head, 0, self->tx_buff.truesize);
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Keep track of module usage */
+ SET_MODULE_OWNER(dev);
+
+ /* Override the network functions we need to use */
+ dev->hard_start_xmit = via_ircc_hard_xmit_sir;
+ dev->open = via_ircc_net_open;
+ dev->stop = via_ircc_net_close;
+ dev->do_ioctl = via_ircc_net_ioctl;
+ dev->get_stats = via_ircc_net_get_stats;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out4;
+
+ IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
+
+ /* Initialise the hardware..
+ */
+ self->io.speed = 9600;
+ via_hw_init(self);
+ return 0;
+ err_out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ err_out1:
+ free_netdev(dev);
+ dev_self[i] = NULL;
+ return err;
+}
+
+/*
+ * Function via_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int via_ircc_close(struct via_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ ResetChip(iobase, 5); //hardware reset.
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
+ __FUNCTION__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ dev_self[self->index] = NULL;
+
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+/*
+ * Function via_hw_init(self)
+ *
+ * Returns non-negative on success.
+ *
+ * Formerly via_ircc_setup
+ */
+static void via_hw_init(struct via_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
+ // FIFO Init
+ EnRXFIFOReadyInt(iobase, OFF);
+ EnRXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOUnderrunEOMInt(iobase, ON);
+ EnTXFIFOReadyInt(iobase, OFF);
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+
+ if (ReadLPCReg(0x20) == 0x3c)
+ WriteLPCReg(0xF0, 0); // for VT1211
+ /* Int Init */
+ EnRXSpecInt(iobase, ON);
+
+ /* The following is basically hwreset */
+ /* If this is the case, why not just call hwreset() ? Jean II */
+ ResetChip(iobase, 5);
+ EnableDMA(iobase, OFF);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ EnTXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ TXStart(iobase, OFF);
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+ SetBaudRate(iobase, 9600);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+
+ self->io.speed = 9600;
+ self->st_fifo.len = 0;
+
+ via_ircc_change_dongle_speed(iobase, self->io.speed,
+ self->io.dongle_id);
+
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+}
+
+/*
+ * Function via_ircc_read_dongle_id (void)
+ *
+ */
+static int via_ircc_read_dongle_id(int iobase)
+{
+ int dongle_id = 9; /* Default to IBM */
+
+ IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
+ return dongle_id;
+}
+
+/*
+ * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
+ * Change speed of the attach dongle
+ * only implement two type of dongle currently.
+ */
+static void via_ircc_change_dongle_speed(int iobase, int speed,
+ int dongle_id)
+{
+ u8 mode = 0;
+
+ /* speed is unused, as we use IsSIROn()/IsMIROn() */
+ speed = speed;
+
+ IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
+ __FUNCTION__, speed, iobase, dongle_id);
+
+ switch (dongle_id) {
+
+ /* Note: The dongle_id's listed here are derived from
+ * nsc-ircc.c */
+
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ UseOneRX(iobase, ON); // use one RX pin RX1,RX2
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+
+ EnRX2(iobase, ON); //sir to rx2
+ EnGPIOtoRX2(iobase, OFF);
+
+ if (IsSIROn(iobase)) { //sir
+ // Mode select Off
+ SlowIRRXLowActive(iobase, ON);
+ udelay(1000);
+ SlowIRRXLowActive(iobase, OFF);
+ } else {
+ if (IsMIROn(iobase)) { //mir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ } else { // fir
+ if (IsFIROn(iobase)) { //fir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ }
+ }
+ }
+ break;
+
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ UseOneRX(iobase, ON); //use ONE RX....RX1
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF); // invert RX pin
+
+ EnRX2(iobase, ON);
+ EnGPIOtoRX2(iobase, OFF);
+ if (IsSIROn(iobase)) { //sir
+ // Mode select On
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, OFF);
+ }
+ if (IsMIROn(iobase)) { //mir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, ON);
+ } else { // fir
+ if (IsFIROn(iobase)) { //fir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ // TX On
+ WriteTX(iobase, ON);
+ udelay(20);
+ // Mode select OFF
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // TX Off
+ WriteTX(iobase, OFF);
+ }
+ }
+ break;
+
+ case 0x0d:
+ UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+ SlowIRRXLowActive(iobase, OFF);
+ if (IsSIROn(iobase)) { //sir
+ EnGPIOtoRX2(iobase, OFF);
+ WriteGIO(iobase, OFF);
+ EnRX2(iobase, OFF); //sir to rx2
+ } else { // fir mir
+ EnGPIOtoRX2(iobase, OFF);
+ WriteGIO(iobase, OFF);
+ EnRX2(iobase, OFF); //fir to rx
+ }
+ break;
+
+ case 0x11: /* Temic TFDS4500 */
+
+ IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __FUNCTION__);
+
+ UseOneRX(iobase, ON); //use ONE RX....RX1
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, ON); // invert RX pin
+
+ EnRX2(iobase, ON); //sir to rx2
+ EnGPIOtoRX2(iobase, OFF);
+
+ if( IsSIROn(iobase) ){ //sir
+
+ // Mode select On
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, OFF);
+
+ } else{
+ IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __FUNCTION__);
+ }
+ break;
+
+ case 0x0ff: /* Vishay */
+ if (IsSIROn(iobase))
+ mode = 0;
+ else if (IsMIROn(iobase))
+ mode = 1;
+ else if (IsFIROn(iobase))
+ mode = 2;
+ else if (IsVFIROn(iobase))
+ mode = 5; //VFIR-16
+ SI_SetMode(iobase, mode);
+ break;
+
+ default:
+ IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
+ __FUNCTION__, dongle_id);
+ }
+}
+
+/*
+ * Function via_ircc_change_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
+{
+ struct net_device *dev = self->netdev;
+ u16 iobase;
+ u8 value = 0, bTmp;
+
+ iobase = self->io.fir_base;
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+ IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __FUNCTION__, speed);
+
+ WriteReg(iobase, I_ST_CT_0, 0x0);
+
+ /* Controller mode sellection */
+ switch (speed) {
+ case 2400:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ value = (115200/speed)-1;
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ break;
+ case 576000:
+ /* FIXME: this can't be right, as it's the same as 115200,
+ * and 576000 is MIR, not SIR. */
+ value = 0;
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ break;
+ case 1152000:
+ value = 0;
+ SetMIR(iobase, ON);
+ /* FIXME: CRC ??? */
+ break;
+ case 4000000:
+ value = 0;
+ SetFIR(iobase, ON);
+ SetPulseWidth(iobase, 0);
+ SetSendPreambleCount(iobase, 14);
+ CRC16(iobase, OFF);
+ EnTXCRC(iobase, ON);
+ break;
+ case 16000000:
+ value = 0;
+ SetVFIR(iobase, ON);
+ /* FIXME: CRC ??? */
+ break;
+ default:
+ value = 0;
+ break;
+ }
+
+ /* Set baudrate to 0x19[2..7] */
+ bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
+ bTmp |= value << 2;
+ WriteReg(iobase, I_CF_H_1, bTmp);
+
+ /* Some dongles may need to be informed about speed changes. */
+ via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
+
+ /* Set FIFO size to 64 */
+ SetFIFO(iobase, 64);
+
+ /* Enable IR */
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ // EnTXFIFOHalfLevelInt(iobase,ON);
+
+ /* Enable some interrupts so we can receive frames */
+ //EnAllInt(iobase,ON);
+
+ if (IsSIROn(iobase)) {
+ SIRFilter(iobase, ON);
+ SIRRecvAny(iobase, ON);
+ } else {
+ SIRFilter(iobase, OFF);
+ SIRRecvAny(iobase, OFF);
+ }
+
+ if (speed > 115200) {
+ /* Install FIR xmit handler */
+ dev->hard_start_xmit = via_ircc_hard_xmit_fir;
+ via_ircc_dma_receive(self);
+ } else {
+ /* Install SIR xmit handler */
+ dev->hard_start_xmit = via_ircc_hard_xmit_sir;
+ }
+ netif_wake_queue(dev);
+}
+
+/*
+ * Function via_ircc_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ unsigned long flags;
+ u16 iobase;
+ __u32 speed;
+
+ self = (struct via_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ via_ircc_change_speed(self, speed);
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len =
+ async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ self->stats.tx_bytes += self->tx_buff.len;
+ /* Send this frame with old speed */
+ SetBaudRate(iobase, self->io.speed);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ EnableTX(iobase, ON);
+ EnableRX(iobase, OFF);
+
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, ON);
+ EnRXDMA(iobase, OFF);
+
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_TX_MODE);
+
+ SetSendByte(iobase, self->tx_buff.len);
+ RXStart(iobase, OFF);
+ TXStart(iobase, ON);
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ u16 iobase;
+ __u32 speed;
+ unsigned long flags;
+
+ self = (struct via_ircc_cb *) dev->priv;
+ iobase = self->io.fir_base;
+
+ if (self->st_fifo.len)
+ return 0;
+ if (self->chip_id == 0x3076)
+ iodelay(1500);
+ else
+ udelay(1500);
+ netif_stop_queue(dev);
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ if (!skb->len) {
+ via_ircc_change_speed(self, speed);
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+
+ self->tx_fifo.tail += skb->len;
+ self->stats.tx_bytes += skb->len;
+ memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
+ skb->len);
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+//F01 if (self->tx_fifo.len == 1) {
+ via_ircc_dma_xmit(self, iobase);
+//F01 }
+//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return 0;
+
+}
+
+static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
+{
+ EnTXDMA(iobase, OFF);
+ self->io.direction = IO_XMIT;
+ EnPhys(iobase, ON);
+ EnableTX(iobase, ON);
+ EnableRX(iobase, OFF);
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, ON);
+ EnRXDMA(iobase, OFF);
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
+ IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
+ __FUNCTION__, self->tx_fifo.ptr,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ self->tx_fifo.len);
+
+ SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
+ RXStart(iobase, OFF);
+ TXStart(iobase, ON);
+ return 0;
+
+}
+
+/*
+ * Function via_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
+{
+ int iobase;
+ int ret = TRUE;
+ u8 Tx_status;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ iobase = self->io.fir_base;
+ /* Disable DMA */
+// DisableDmaChannel(self->io.dma);
+ /* Check for underrrun! */
+ /* Clear bit, by writing 1 into it */
+ Tx_status = GetTXStatus(iobase);
+ if (Tx_status & 0x08) {
+ self->stats.tx_errors++;
+ self->stats.tx_fifo_errors++;
+ hwreset(self);
+// how to clear underrrun ?
+ } else {
+ self->stats.tx_packets++;
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+ }
+ /* Check if we need to change the speed */
+ if (self->new_speed) {
+ via_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ if (IsFIROn(iobase)) {
+ if (self->tx_fifo.len) {
+ self->tx_fifo.len--;
+ self->tx_fifo.ptr++;
+ }
+ }
+ IRDA_DEBUG(1,
+ "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
+ __FUNCTION__,
+ self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
+/* F01_S
+ // Any frames to be sent back-to-back?
+ if (self->tx_fifo.len) {
+ // Not finished yet!
+ via_ircc_dma_xmit(self, iobase);
+ ret = FALSE;
+ } else {
+F01_E*/
+ // Reset Tx FIFO info
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+//F01 }
+
+ // Make sure we have room for more frames
+//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
+ // Not busy transmitting anymore
+ // Tell the network layer, that we can accept more frames
+ netif_wake_queue(self->netdev);
+//F01 }
+ return ret;
+}
+
+/*
+ * Function via_ircc_dma_receive (self)
+ *
+ * Set configuration for receive a frame.
+ *
+ */
+static int via_ircc_dma_receive(struct via_ircc_cb *self)
+{
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ self->RxDataReady = 0;
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ EnPhys(iobase, ON);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, ON);
+
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, OFF);
+ EnRXDMA(iobase, ON);
+ irda_setup_dma(self->io.dma2, self->rx_buff_dma,
+ self->rx_buff.truesize, DMA_RX_MODE);
+ TXStart(iobase, OFF);
+ RXStart(iobase, ON);
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_dma_receive_complete (self)
+ *
+ * Controller Finished with receiving frames,
+ * and this routine is call by ISR
+ *
+ */
+static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
+ int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ int len, i;
+ u8 status = 0;
+
+ iobase = self->io.fir_base;
+ st_fifo = &self->st_fifo;
+
+ if (self->io.speed < 4000000) { //Speed below FIR
+ len = GetRecvByte(iobase, self);
+ skb = dev_alloc_skb(len + 1);
+ if (skb == NULL)
+ return FALSE;
+ // Make sure IP header gets aligned
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 2);
+ if (self->chip_id == 0x3076) {
+ for (i = 0; i < len - 2; i++)
+ skb->data[i] = self->rx_buff.data[i * 2];
+ } else {
+ if (self->chip_id == 0x3096) {
+ for (i = 0; i < len - 2; i++)
+ skb->data[i] =
+ self->rx_buff.data[i];
+ }
+ }
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->stats.rx_bytes += len;
+ self->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ return TRUE;
+ }
+
+ else { //FIR mode
+ len = GetRecvByte(iobase, self);
+ if (len == 0)
+ return TRUE; //interrupt only, data maybe move by RxT
+ if (((len - 4) < 2) || ((len - 4) > 2048)) {
+ IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
+ __FUNCTION__, len, RxCurCount(iobase, self),
+ self->RxLastCount);
+ hwreset(self);
+ return FALSE;
+ }
+ IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
+ __FUNCTION__,
+ st_fifo->len, len - 4, RxCurCount(iobase, self));
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ if (st_fifo->tail > MAX_RX_WINDOW)
+ st_fifo->tail = 0;
+ self->RxDataReady = 0;
+
+ // It maybe have MAX_RX_WINDOW package receive by
+ // receive_complete before Timer IRQ
+/* F01_S
+ if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
+ RXStart(iobase,ON);
+ SetTimer(iobase,4);
+ }
+ else {
+F01_E */
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+//F01_S
+ // Put this entry back in fifo
+ if (st_fifo->head > MAX_RX_WINDOW)
+ st_fifo->head = 0;
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ skb = dev_alloc_skb(len + 1 - 4);
+ /*
+ * if frame size,data ptr,or skb ptr are wrong ,the get next
+ * entry.
+ */
+ if ((skb == NULL) || (skb->data == NULL)
+ || (self->rx_buff.data == NULL) || (len < 6)) {
+ self->stats.rx_dropped++;
+ return TRUE;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4);
+
+ memcpy(skb->data, self->rx_buff.data, len - 4);
+ IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__,
+ len - 4, self->rx_buff.data);
+
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->stats.rx_bytes += len;
+ self->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+
+//F01_E
+ } //FIR
+ return TRUE;
+
+}
+
+/*
+ * if frame is received , but no INT ,then use this routine to upload frame.
+ */
+static int upload_rxdata(struct via_ircc_cb *self, int iobase)
+{
+ struct sk_buff *skb;
+ int len;
+ struct st_fifo *st_fifo;
+ st_fifo = &self->st_fifo;
+
+ len = GetRecvByte(iobase, self);
+
+ IRDA_DEBUG(2, "%s(): len=%x\n", __FUNCTION__, len);
+
+ skb = dev_alloc_skb(len + 1);
+ if ((skb == NULL) || ((len - 4) < 2)) {
+ self->stats.rx_dropped++;
+ return FALSE;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4 + 1);
+ memcpy(skb->data, self->rx_buff.data, len - 4 + 1);
+ st_fifo->tail++;
+ st_fifo->len++;
+ if (st_fifo->tail > MAX_RX_WINDOW)
+ st_fifo->tail = 0;
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->stats.rx_bytes += len;
+ self->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
+ RXStart(iobase, ON);
+ } else {
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ }
+ return TRUE;
+}
+
+/*
+ * Implement back to back receive , use this routine to upload data.
+ */
+
+static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ int len;
+ u8 status;
+
+ st_fifo = &self->st_fifo;
+
+ if (CkRxRecv(iobase, self)) {
+ // if still receiving ,then return ,don't upload frame
+ self->RetryCount = 0;
+ SetTimer(iobase, 20);
+ self->RxDataReady++;
+ return FALSE;
+ } else
+ self->RetryCount++;
+
+ if ((self->RetryCount >= 1) ||
+ ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize)
+ || (st_fifo->len >= (MAX_RX_WINDOW))) {
+ while (st_fifo->len > 0) { //upload frame
+ // Put this entry back in fifo
+ if (st_fifo->head > MAX_RX_WINDOW)
+ st_fifo->head = 0;
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ skb = dev_alloc_skb(len + 1 - 4);
+ /*
+ * if frame size, data ptr, or skb ptr are wrong,
+ * then get next entry.
+ */
+ if ((skb == NULL) || (skb->data == NULL)
+ || (self->rx_buff.data == NULL) || (len < 6)) {
+ self->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4);
+ memcpy(skb->data, self->rx_buff.data, len - 4);
+
+ IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__,
+ len - 4, st_fifo->head);
+
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->stats.rx_bytes += len;
+ self->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ } //while
+ self->RetryCount = 0;
+
+ IRDA_DEBUG(2,
+ "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
+ __FUNCTION__,
+ GetHostStatus(iobase), GetRXStatus(iobase));
+
+ /*
+ * if frame is receive complete at this routine ,then upload
+ * frame.
+ */
+ if ((GetRXStatus(iobase) & 0x10)
+ && (RxCurCount(iobase, self) != self->RxLastCount)) {
+ upload_rxdata(self, iobase);
+ if (irda_device_txqueue_empty(self->netdev))
+ via_ircc_dma_receive(self);
+ }
+ } // timer detect complete
+ else
+ SetTimer(iobase, 4);
+ return TRUE;
+
+}
+
+
+
+/*
+ * Function via_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t via_ircc_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct via_ircc_cb *self;
+ int iobase;
+ u8 iHostIntType, iRxIntType, iTxIntType;
+
+ if (!dev) {
+ IRDA_WARNING("%s: irq %d for unknown device.\n", driver_name,
+ irq);
+ return IRQ_NONE;
+ }
+ self = (struct via_ircc_cb *) dev->priv;
+ iobase = self->io.fir_base;
+ spin_lock(&self->lock);
+ iHostIntType = GetHostStatus(iobase);
+
+ IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
+ __FUNCTION__, iHostIntType,
+ (iHostIntType & 0x40) ? "Timer" : "",
+ (iHostIntType & 0x20) ? "Tx" : "",
+ (iHostIntType & 0x10) ? "Rx" : "",
+ (iHostIntType & 0x0e) >> 1);
+
+ if ((iHostIntType & 0x40) != 0) { //Timer Event
+ self->EventFlag.TimeOut++;
+ ClearTimerInt(iobase, 1);
+ if (self->io.direction == IO_XMIT) {
+ via_ircc_dma_xmit(self, iobase);
+ }
+ if (self->io.direction == IO_RECV) {
+ /*
+ * frame ready hold too long, must reset.
+ */
+ if (self->RxDataReady > 30) {
+ hwreset(self);
+ if (irda_device_txqueue_empty(self->netdev)) {
+ via_ircc_dma_receive(self);
+ }
+ } else { // call this to upload frame.
+ RxTimerHandler(self, iobase);
+ }
+ } //RECV
+ } //Timer Event
+ if ((iHostIntType & 0x20) != 0) { //Tx Event
+ iTxIntType = GetTXStatus(iobase);
+
+ IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
+ __FUNCTION__, iTxIntType,
+ (iTxIntType & 0x08) ? "FIFO underr." : "",
+ (iTxIntType & 0x04) ? "EOM" : "",
+ (iTxIntType & 0x02) ? "FIFO ready" : "",
+ (iTxIntType & 0x01) ? "Early EOM" : "");
+
+ if (iTxIntType & 0x4) {
+ self->EventFlag.EOMessage++; // read and will auto clean
+ if (via_ircc_dma_xmit_complete(self)) {
+ if (irda_device_txqueue_empty
+ (self->netdev)) {
+ via_ircc_dma_receive(self);
+ }
+ } else {
+ self->EventFlag.Unknown++;
+ }
+ } //EOP
+ } //Tx Event
+ //----------------------------------------
+ if ((iHostIntType & 0x10) != 0) { //Rx Event
+ /* Check if DMA has finished */
+ iRxIntType = GetRXStatus(iobase);
+
+ IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
+ __FUNCTION__, iRxIntType,
+ (iRxIntType & 0x80) ? "PHY err." : "",
+ (iRxIntType & 0x40) ? "CRC err" : "",
+ (iRxIntType & 0x20) ? "FIFO overr." : "",
+ (iRxIntType & 0x10) ? "EOF" : "",
+ (iRxIntType & 0x08) ? "RxData" : "",
+ (iRxIntType & 0x02) ? "RxMaxLen" : "",
+ (iRxIntType & 0x01) ? "SIR bad" : "");
+ if (!iRxIntType)
+ IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __FUNCTION__);
+
+ if (iRxIntType & 0x10) {
+ if (via_ircc_dma_receive_complete(self, iobase)) {
+//F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
+ via_ircc_dma_receive(self);
+ }
+ } // No ERR
+ else { //ERR
+ IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
+ __FUNCTION__, iRxIntType, iHostIntType,
+ RxCurCount(iobase, self),
+ self->RxLastCount);
+
+ if (iRxIntType & 0x20) { //FIFO OverRun ERR
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ } else { //PHY,CRC ERR
+
+ if (iRxIntType != 0x08)
+ hwreset(self); //F01
+ }
+ via_ircc_dma_receive(self);
+ } //ERR
+
+ } //Rx Event
+ spin_unlock(&self->lock);
+ return IRQ_RETVAL(iHostIntType);
+}
+
+static void hwreset(struct via_ircc_cb *self)
+{
+ int iobase;
+ iobase = self->io.fir_base;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ ResetChip(iobase, 5);
+ EnableDMA(iobase, OFF);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ EnTXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ TXStart(iobase, OFF);
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+ SetBaudRate(iobase, 9600);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ /* Restore speed. */
+ via_ircc_change_speed(self, self->io.speed);
+
+ self->st_fifo.len = 0;
+}
+
+/*
+ * Function via_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int via_ircc_is_receiving(struct via_ircc_cb *self)
+{
+ int status = FALSE;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ iobase = self->io.fir_base;
+ if (CkRxRecv(iobase, self))
+ status = TRUE;
+
+ IRDA_DEBUG(2, "%s(): status=%x....\n", __FUNCTION__, status);
+
+ return status;
+}
+
+
+/*
+ * Function via_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int via_ircc_net_open(struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct via_ircc_cb *) dev->priv;
+ self->stats.rx_packets = 0;
+ IRDA_ASSERT(self != NULL, return 0;);
+ iobase = self->io.fir_base;
+ if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
+ IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
+ self->io.dma);
+ free_irq(self->io.irq, self);
+ return -EAGAIN;
+ }
+ if (self->io.dma2 != self->io.dma) {
+ if (request_dma(self->io.dma2, dev->name)) {
+ IRDA_WARNING("%s, unable to allocate dma2=%d\n",
+ driver_name, self->io.dma2);
+ free_irq(self->io.irq, self);
+ return -EAGAIN;
+ }
+ }
+
+
+ /* turn on interrupts */
+ EnAllInt(iobase, ON);
+ EnInternalLoop(iobase, OFF);
+ EnExternalLoop(iobase, OFF);
+
+ /* */
+ via_ircc_dma_receive(self);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ sprintf(hwname, "VIA @ 0x%x", iobase);
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ self->RxLastCount = 0;
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int via_ircc_net_close(struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ int iobase;
+
+ IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct via_ircc_cb *) dev->priv;
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ iobase = self->io.fir_base;
+ EnTXDMA(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ DisableDmaChannel(self->io.dma);
+
+ /* Disable interrupts */
+ EnAllInt(iobase, OFF);
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct via_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = dev->priv;
+ IRDA_ASSERT(self != NULL, return -1;);
+ IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name,
+ cmd);
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&self->lock, flags);
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ via_ircc_change_speed(self, irq->ifr_baudrate);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = via_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ out:
+ spin_unlock_irqrestore(&self->lock, flags);
+ return ret;
+}
+
+static struct net_device_stats *via_ircc_net_get_stats(struct net_device
+ *dev)
+{
+ struct via_ircc_cb *self = (struct via_ircc_cb *) dev->priv;
+
+ return &self->stats;
+}
+
+MODULE_AUTHOR("VIA Technologies,inc");
+MODULE_DESCRIPTION("VIA IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+module_init(via_ircc_init);
+module_exit(via_ircc_cleanup);
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
new file mode 100644
index 000000000000..204b1b34ffc7
--- /dev/null
+++ b/drivers/net/irda/via-ircc.h
@@ -0,0 +1,853 @@
+/*********************************************************************
+ *
+ * Filename: via-ircc.h
+ * Version: 1.0
+ * Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
+ * Author: VIA Technologies, inc
+ * Date : 08/06/2003
+
+Copyright (c) 1998-2003 VIA Technologies, Inc.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ * Comment:
+ * jul/08/2002 : Rx buffer length should use Rx ring ptr.
+ * Oct/28/2002 : Add SB id for 3147 and 3177.
+ * jul/09/2002 : only implement two kind of dongle currently.
+ * Oct/02/2002 : work on VT8231 and VT8233 .
+ * Aug/06/2003 : change driver format to pci driver .
+ ********************************************************************/
+#ifndef via_IRCC_H
+#define via_IRCC_H
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW + 2];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Lenght of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW + 2]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Lenght of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+
+struct eventflag // for keeping track of Interrupt Events
+{
+ //--------tx part
+ unsigned char TxFIFOUnderRun;
+ unsigned char EOMessage;
+ unsigned char TxFIFOReady;
+ unsigned char EarlyEOM;
+ //--------rx part
+ unsigned char PHYErr;
+ unsigned char CRCErr;
+ unsigned char RxFIFOOverRun;
+ unsigned char EOPacket;
+ unsigned char RxAvail;
+ unsigned char TooLargePacket;
+ unsigned char SIRBad;
+ //--------unknown
+ unsigned char Unknown;
+ //----------
+ unsigned char TimeOut;
+ unsigned char RxDMATC;
+ unsigned char TxDMATC;
+};
+
+/* Private data for each instance */
+struct via_ircc_cb {
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ struct timeval stamp;
+ struct timeval now;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 flags; /* Interface flags */
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ struct eventflag EventFlag;
+ struct pm_dev *dev;
+ unsigned int chip_id; /* to remember chip id */
+ unsigned int RetryCount;
+ unsigned int RxDataReady;
+ unsigned int RxLastCount;
+};
+
+
+//---------I=Infrared, H=Host, M=Misc, T=Tx, R=Rx, ST=Status,
+// CF=Config, CT=Control, L=Low, H=High, C=Count
+#define I_CF_L_0 0x10
+#define I_CF_H_0 0x11
+#define I_SIR_BOF 0x12
+#define I_SIR_EOF 0x13
+#define I_ST_CT_0 0x15
+#define I_ST_L_1 0x16
+#define I_ST_H_1 0x17
+#define I_CF_L_1 0x18
+#define I_CF_H_1 0x19
+#define I_CF_L_2 0x1a
+#define I_CF_H_2 0x1b
+#define I_CF_3 0x1e
+#define H_CT 0x20
+#define H_ST 0x21
+#define M_CT 0x22
+#define TX_CT_1 0x23
+#define TX_CT_2 0x24
+#define TX_ST 0x25
+#define RX_CT 0x26
+#define RX_ST 0x27
+#define RESET 0x28
+#define P_ADDR 0x29
+#define RX_C_L 0x2a
+#define RX_C_H 0x2b
+#define RX_P_L 0x2c
+#define RX_P_H 0x2d
+#define TX_C_L 0x2e
+#define TX_C_H 0x2f
+#define TIMER 0x32
+#define I_CF_4 0x33
+#define I_T_C_L 0x34
+#define I_T_C_H 0x35
+#define VERSION 0x3f
+//-------------------------------
+#define StartAddr 0x10 // the first register address
+#define EndAddr 0x3f // the last register address
+#define GetBit(val,bit) val = (unsigned char) ((val>>bit) & 0x1)
+ // Returns the bit
+#define SetBit(val,bit) val= (unsigned char ) (val | (0x1 << bit))
+ // Sets bit to 1
+#define ResetBit(val,bit) val= (unsigned char ) (val & ~(0x1 << bit))
+ // Sets bit to 0
+
+#define OFF 0
+#define ON 1
+#define DMA_TX_MODE 0x08
+#define DMA_RX_MODE 0x04
+
+#define DMA1 0
+#define DMA2 0xc0
+#define MASK1 DMA1+0x0a
+#define MASK2 DMA2+0x14
+
+#define Clk_bit 0x40
+#define Tx_bit 0x01
+#define Rd_Valid 0x08
+#define RxBit 0x08
+
+static void DisableDmaChannel(unsigned int channel)
+{
+ switch (channel) { // 8 Bit DMA channels DMAC1
+ case 0:
+ outb(4, MASK1); //mask channel 0
+ break;
+ case 1:
+ outb(5, MASK1); //Mask channel 1
+ break;
+ case 2:
+ outb(6, MASK1); //Mask channel 2
+ break;
+ case 3:
+ outb(7, MASK1); //Mask channel 3
+ break;
+ case 5:
+ outb(5, MASK2); //Mask channel 5
+ break;
+ case 6:
+ outb(6, MASK2); //Mask channel 6
+ break;
+ case 7:
+ outb(7, MASK2); //Mask channel 7
+ break;
+ default:
+ break;
+ }; //Switch
+}
+
+static unsigned char ReadLPCReg(int iRegNum)
+{
+ unsigned char iVal;
+
+ outb(0x87, 0x2e);
+ outb(0x87, 0x2e);
+ outb(iRegNum, 0x2e);
+ iVal = inb(0x2f);
+ outb(0xaa, 0x2e);
+
+ return iVal;
+}
+
+static void WriteLPCReg(int iRegNum, unsigned char iVal)
+{
+
+ outb(0x87, 0x2e);
+ outb(0x87, 0x2e);
+ outb(iRegNum, 0x2e);
+ outb(iVal, 0x2f);
+ outb(0xAA, 0x2e);
+}
+
+static __u8 ReadReg(unsigned int BaseAddr, int iRegNum)
+{
+ return ((__u8) inb(BaseAddr + iRegNum));
+}
+
+static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal)
+{
+ outb(iVal, BaseAddr + iRegNum);
+}
+
+static int WriteRegBit(unsigned int BaseAddr, unsigned char RegNum,
+ unsigned char BitPos, unsigned char value)
+{
+ __u8 Rtemp, Wtemp;
+
+ if (BitPos > 7) {
+ return -1;
+ }
+ if ((RegNum < StartAddr) || (RegNum > EndAddr))
+ return -1;
+ Rtemp = ReadReg(BaseAddr, RegNum);
+ if (value == 0)
+ Wtemp = ResetBit(Rtemp, BitPos);
+ else {
+ if (value == 1)
+ Wtemp = SetBit(Rtemp, BitPos);
+ else
+ return -1;
+ }
+ WriteReg(BaseAddr, RegNum, Wtemp);
+ return 0;
+}
+
+static __u8 CheckRegBit(unsigned int BaseAddr, unsigned char RegNum,
+ unsigned char BitPos)
+{
+ __u8 temp;
+
+ if (BitPos > 7)
+ return 0xff;
+ if ((RegNum < StartAddr) || (RegNum > EndAddr)) {
+// printf("what is the register %x!\n",RegNum);
+ }
+ temp = ReadReg(BaseAddr, RegNum);
+ return GetBit(temp, BitPos);
+}
+
+static void SetMaxRxPacketSize(__u16 iobase, __u16 size)
+{
+ __u16 low, high;
+ if ((size & 0xe000) == 0) {
+ low = size & 0x00ff;
+ high = (size & 0x1f00) >> 8;
+ WriteReg(iobase, I_CF_L_2, low);
+ WriteReg(iobase, I_CF_H_2, high);
+
+ }
+
+}
+
+//for both Rx and Tx
+
+static void SetFIFO(__u16 iobase, __u16 value)
+{
+ switch (value) {
+ case 128:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 1);
+ break;
+ case 64:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ break;
+ case 32:
+ WriteRegBit(iobase, 0x11, 0, 1);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ break;
+ default:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ }
+
+}
+
+#define CRC16(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,7,val) //0 for 32 CRC
+/*
+#define SetVFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,5,val)
+#define SetFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,6,val)
+#define SetMIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,5,val)
+#define SetSIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,4,val)
+*/
+#define SIRFilter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,3,val)
+#define Filter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,2,val)
+#define InvertTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,1,val)
+#define InvertRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,0,val)
+//****************************I_CF_H_0
+#define EnableTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,4,val)
+#define EnableRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,3,val)
+#define EnableDMA(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,2,val)
+#define SIRRecvAny(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,1,val)
+#define DiableTrans(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,0,val)
+//***************************I_SIR_BOF,I_SIR_EOF
+#define SetSIRBOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_BOF,val)
+#define SetSIREOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_EOF,val)
+#define GetSIRBOF(BaseAddr) ReadReg(BaseAddr,I_SIR_BOF)
+#define GetSIREOF(BaseAddr) ReadReg(BaseAddr,I_SIR_EOF)
+//*******************I_ST_CT_0
+#define EnPhys(BaseAddr,val) WriteRegBit(BaseAddr,I_ST_CT_0,7,val)
+#define IsModeError(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,6) //RO
+#define IsVFIROn(BaseAddr) CheckRegBit(BaseAddr,0x14,0) //RO for VT1211 only
+#define IsFIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,5) //RO
+#define IsMIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,4) //RO
+#define IsSIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,3) //RO
+#define IsEnableTX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,2) //RO
+#define IsEnableRX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,1) //RO
+#define Is16CRC(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,0) //RO
+//***************************I_CF_3
+#define DisableAdjacentPulseWidth(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,5,val) //1 disable
+#define DisablePulseWidthAdjust(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,4,val) //1 disable
+#define UseOneRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,1,val) //0 use two RX
+#define SlowIRRXLowActive(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,0,val) //0 show RX high=1 in SIR
+//***************************H_CT
+#define EnAllInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,7,val)
+#define TXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,6,val)
+#define RXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,5,val)
+#define ClearRXInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,4,val) // 1 clear
+//*****************H_ST
+#define IsRXInt(BaseAddr) CheckRegBit(BaseAddr,H_ST,4)
+#define GetIntIndentify(BaseAddr) ((ReadReg(BaseAddr,H_ST)&0xf1) >>1)
+#define IsHostBusy(BaseAddr) CheckRegBit(BaseAddr,H_ST,0)
+#define GetHostStatus(BaseAddr) ReadReg(BaseAddr,H_ST) //RO
+//**************************M_CT
+#define EnTXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,7,val)
+#define EnRXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,6,val)
+#define SwapDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,5,val)
+#define EnInternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,4,val)
+#define EnExternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,3,val)
+//**************************TX_CT_1
+#define EnTXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,4,val) //half empty int (1 half)
+#define EnTXFIFOUnderrunEOMInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,5,val)
+#define EnTXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,6,val) //int when reach it threshold (setting by bit 4)
+//**************************TX_CT_2
+#define ForceUnderrun(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,7,val) // force an underrun int
+#define EnTXCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,6,val) //1 for FIR,MIR...0 (not SIR)
+#define ForceBADCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,5,val) //force an bad CRC
+#define SendSIP(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,4,val) //send indication pulse for prevent SIR disturb
+#define ClearEnTX(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,3,val) // opposite to EnTX
+//*****************TX_ST
+#define GetTXStatus(BaseAddr) ReadReg(BaseAddr,TX_ST) //RO
+//**************************RX_CT
+#define EnRXSpecInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,0,val)
+#define EnRXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,1,val) //enable int when reach it threshold (setting by bit 7)
+#define EnRXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,7,val) //enable int when (1) half full...or (0) just not full
+//*****************RX_ST
+#define GetRXStatus(BaseAddr) ReadReg(BaseAddr,RX_ST) //RO
+//***********************P_ADDR
+#define SetPacketAddr(BaseAddr,addr) WriteReg(BaseAddr,P_ADDR,addr)
+//***********************I_CF_4
+#define EnGPIOtoRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,7,val)
+#define EnTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,1,val)
+#define ClearTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,0,val)
+//***********************I_T_C_L
+#define WriteGIO(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,7,val)
+#define ReadGIO(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,7)
+#define ReadRX(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,3) //RO
+#define WriteTX(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,0,val)
+//***********************I_T_C_H
+#define EnRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_H,7,val)
+#define ReadRX2(BaseAddr) CheckRegBit(BaseAddr,I_T_C_H,7)
+//**********************Version
+#define GetFIRVersion(BaseAddr) ReadReg(BaseAddr,VERSION)
+
+
+static void SetTimer(__u16 iobase, __u8 count)
+{
+ EnTimerInt(iobase, OFF);
+ WriteReg(iobase, TIMER, count);
+ EnTimerInt(iobase, ON);
+}
+
+
+static void SetSendByte(__u16 iobase, __u32 count)
+{
+ __u32 low, high;
+
+ if ((count & 0xf000) == 0) {
+ low = count & 0x00ff;
+ high = (count & 0x0f00) >> 8;
+ WriteReg(iobase, TX_C_L, low);
+ WriteReg(iobase, TX_C_H, high);
+ }
+}
+
+static void ResetChip(__u16 iobase, __u8 type)
+{
+ __u8 value;
+
+ value = (type + 2) << 4;
+ WriteReg(iobase, RESET, type);
+}
+
+static int CkRxRecv(__u16 iobase, struct via_ircc_cb *self)
+{
+ __u8 low, high;
+ __u16 wTmp = 0, wTmp1 = 0, wTmp_new = 0;
+
+ low = ReadReg(iobase, RX_C_L);
+ high = ReadReg(iobase, RX_C_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+ udelay(10);
+ low = ReadReg(iobase, RX_C_L);
+ high = ReadReg(iobase, RX_C_H);
+ wTmp1 = high;
+ wTmp_new = (wTmp1 << 8) | low;
+ if (wTmp_new != wTmp)
+ return 1;
+ else
+ return 0;
+
+}
+
+static __u16 RxCurCount(__u16 iobase, struct via_ircc_cb * self)
+{
+ __u8 low, high;
+ __u16 wTmp = 0, wTmp1 = 0;
+
+ low = ReadReg(iobase, RX_P_L);
+ high = ReadReg(iobase, RX_P_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+ return wTmp;
+}
+
+/* This Routine can only use in recevie_complete
+ * for it will update last count.
+ */
+
+static __u16 GetRecvByte(__u16 iobase, struct via_ircc_cb * self)
+{
+ __u8 low, high;
+ __u16 wTmp, wTmp1, ret;
+
+ low = ReadReg(iobase, RX_P_L);
+ high = ReadReg(iobase, RX_P_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+
+
+ if (wTmp >= self->RxLastCount)
+ ret = wTmp - self->RxLastCount;
+ else
+ ret = (0x8000 - self->RxLastCount) + wTmp;
+ self->RxLastCount = wTmp;
+
+/* RX_P is more actually the RX_C
+ low=ReadReg(iobase,RX_C_L);
+ high=ReadReg(iobase,RX_C_H);
+
+ if(!(high&0xe000)) {
+ temp=(high<<8)+low;
+ return temp;
+ }
+ else return 0;
+*/
+ return ret;
+}
+
+static void Sdelay(__u16 scale)
+{
+ __u8 bTmp;
+ int i, j;
+
+ for (j = 0; j < scale; j++) {
+ for (i = 0; i < 0x20; i++) {
+ bTmp = inb(0xeb);
+ outb(bTmp, 0xeb);
+ }
+ }
+}
+
+static void Tdelay(__u16 scale)
+{
+ __u8 bTmp;
+ int i, j;
+
+ for (j = 0; j < scale; j++) {
+ for (i = 0; i < 0x50; i++) {
+ bTmp = inb(0xeb);
+ outb(bTmp, 0xeb);
+ }
+ }
+}
+
+
+static void ActClk(__u16 iobase, __u8 value)
+{
+ __u8 bTmp;
+ bTmp = ReadReg(iobase, 0x34);
+ if (value)
+ WriteReg(iobase, 0x34, bTmp | Clk_bit);
+ else
+ WriteReg(iobase, 0x34, bTmp & ~Clk_bit);
+}
+
+static void ClkTx(__u16 iobase, __u8 Clk, __u8 Tx)
+{
+ __u8 bTmp;
+
+ bTmp = ReadReg(iobase, 0x34);
+ if (Clk == 0)
+ bTmp &= ~Clk_bit;
+ else {
+ if (Clk == 1)
+ bTmp |= Clk_bit;
+ }
+ WriteReg(iobase, 0x34, bTmp);
+ Sdelay(1);
+ if (Tx == 0)
+ bTmp &= ~Tx_bit;
+ else {
+ if (Tx == 1)
+ bTmp |= Tx_bit;
+ }
+ WriteReg(iobase, 0x34, bTmp);
+}
+
+static void Wr_Byte(__u16 iobase, __u8 data)
+{
+ __u8 bData = data;
+// __u8 btmp;
+ int i;
+
+ ClkTx(iobase, 0, 1);
+
+ Tdelay(2);
+ ActClk(iobase, 1);
+ Tdelay(1);
+
+ for (i = 0; i < 8; i++) { //LDN
+
+ if ((bData >> i) & 0x01) {
+ ClkTx(iobase, 0, 1); //bit data = 1;
+ } else {
+ ClkTx(iobase, 0, 0); //bit data = 1;
+ }
+ Tdelay(2);
+ Sdelay(1);
+ ActClk(iobase, 1); //clk hi
+ Tdelay(1);
+ }
+}
+
+static __u8 Rd_Indx(__u16 iobase, __u8 addr, __u8 index)
+{
+ __u8 data = 0, bTmp, data_bit;
+ int i;
+
+ bTmp = addr | (index << 1) | 0;
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ ActClk(iobase, 1);
+ udelay(1);
+ Wr_Byte(iobase, bTmp);
+ Sdelay(1);
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ for (i = 0; i < 10; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(1);
+ ClkTx(iobase, 0, 1);
+ Tdelay(1);
+ bTmp = ReadReg(iobase, 0x34);
+ if (!(bTmp & Rd_Valid))
+ break;
+ }
+ if (!(bTmp & Rd_Valid)) {
+ for (i = 0; i < 8; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ bTmp = ReadReg(iobase, 0x34);
+ data_bit = 1 << i;
+ if (bTmp & RxBit)
+ data |= data_bit;
+ else
+ data &= ~data_bit;
+ Tdelay(2);
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ bTmp = ReadReg(iobase, 0x34);
+ }
+ for (i = 0; i < 1; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ ClkTx(iobase, 0, 0);
+ Tdelay(1);
+ for (i = 0; i < 3; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ return data;
+}
+
+static void Wr_Indx(__u16 iobase, __u8 addr, __u8 index, __u8 data)
+{
+ int i;
+ __u8 bTmp;
+
+ ClkTx(iobase, 0, 0);
+ udelay(2);
+ ActClk(iobase, 1);
+ udelay(1);
+ bTmp = addr | (index << 1) | 1;
+ Wr_Byte(iobase, bTmp);
+ Wr_Byte(iobase, data);
+ for (i = 0; i < 2; i++) {
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ ActClk(iobase, 1);
+ Tdelay(1);
+ }
+ ActClk(iobase, 0);
+}
+
+static void ResetDongle(__u16 iobase)
+{
+ int i;
+ ClkTx(iobase, 0, 0);
+ Tdelay(1);
+ for (i = 0; i < 30; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(1);
+ }
+ ActClk(iobase, 0);
+}
+
+static void SetSITmode(__u16 iobase)
+{
+
+ __u8 bTmp;
+
+ bTmp = ReadLPCReg(0x28);
+ WriteLPCReg(0x28, bTmp | 0x10); //select ITMOFF
+ bTmp = ReadReg(iobase, 0x35);
+ WriteReg(iobase, 0x35, bTmp | 0x40); // Driver ITMOFF
+ WriteReg(iobase, 0x28, bTmp | 0x80); // enable All interrupt
+}
+
+static void SI_SetMode(__u16 iobase, int mode)
+{
+ //__u32 dTmp;
+ __u8 bTmp;
+
+ WriteLPCReg(0x28, 0x70); // S/W Reset
+ SetSITmode(iobase);
+ ResetDongle(iobase);
+ udelay(10);
+ Wr_Indx(iobase, 0x40, 0x0, 0x17); //RX ,APEN enable,Normal power
+ Wr_Indx(iobase, 0x40, 0x1, mode); //Set Mode
+ Wr_Indx(iobase, 0x40, 0x2, 0xff); //Set power to FIR VFIR > 1m
+ bTmp = Rd_Indx(iobase, 0x40, 1);
+}
+
+static void InitCard(__u16 iobase)
+{
+ ResetChip(iobase, 5);
+ WriteReg(iobase, I_ST_CT_0, 0x00); // open CHIP on
+ SetSIRBOF(iobase, 0xc0); // hardware default value
+ SetSIREOF(iobase, 0xc1);
+}
+
+static void CommonInit(__u16 iobase)
+{
+// EnTXCRC(iobase,0);
+ SwapDMA(iobase, OFF);
+ SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
+ EnRXFIFOReadyInt(iobase, OFF);
+ EnRXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOUnderrunEOMInt(iobase, ON);
+// EnTXFIFOReadyInt(iobase,ON);
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+// WriteLPCReg(0xF0,0); //(if VT1211 then do this)
+ if (IsSIROn(iobase)) {
+ SIRFilter(iobase, ON);
+ SIRRecvAny(iobase, ON);
+ } else {
+ SIRFilter(iobase, OFF);
+ SIRRecvAny(iobase, OFF);
+ }
+ EnRXSpecInt(iobase, ON);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+ EnableDMA(iobase, ON);
+}
+
+static void SetBaudRate(__u16 iobase, __u32 rate)
+{
+ __u8 value = 11, temp;
+
+ if (IsSIROn(iobase)) {
+ switch (rate) {
+ case (__u32) (2400L):
+ value = 47;
+ break;
+ case (__u32) (9600L):
+ value = 11;
+ break;
+ case (__u32) (19200L):
+ value = 5;
+ break;
+ case (__u32) (38400L):
+ value = 2;
+ break;
+ case (__u32) (57600L):
+ value = 1;
+ break;
+ case (__u32) (115200L):
+ value = 0;
+ break;
+ default:
+ break;
+ };
+ } else if (IsMIROn(iobase)) {
+ value = 0; // will automatically be fixed in 1.152M
+ } else if (IsFIROn(iobase)) {
+ value = 0; // will automatically be fixed in 4M
+ }
+ temp = (ReadReg(iobase, I_CF_H_1) & 0x03);
+ temp |= value << 2;
+ WriteReg(iobase, I_CF_H_1, temp);
+}
+
+static void SetPulseWidth(__u16 iobase, __u8 width)
+{
+ __u8 temp, temp1, temp2;
+
+ temp = (ReadReg(iobase, I_CF_L_1) & 0x1f);
+ temp1 = (ReadReg(iobase, I_CF_H_1) & 0xfc);
+ temp2 = (width & 0x07) << 5;
+ temp |= temp2;
+ temp2 = (width & 0x18) >> 3;
+ temp1 |= temp2;
+ WriteReg(iobase, I_CF_L_1, temp);
+ WriteReg(iobase, I_CF_H_1, temp1);
+}
+
+static void SetSendPreambleCount(__u16 iobase, __u8 count)
+{
+ __u8 temp;
+
+ temp = ReadReg(iobase, I_CF_L_1) & 0xe0;
+ temp |= count;
+ WriteReg(iobase, I_CF_L_1, temp);
+
+}
+
+static void SetVFIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, val);
+}
+
+static void SetFIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 6, val);
+}
+
+static void SetMIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 5, val);
+}
+
+static void SetSIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 4, val);
+}
+
+#endif /* via_IRCC_H */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
new file mode 100644
index 000000000000..35fad8171a01
--- /dev/null
+++ b/drivers/net/irda/vlsi_ir.c
@@ -0,0 +1,1912 @@
+/*********************************************************************
+ *
+ * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux
+ *
+ * Copyright (c) 2001-2003 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#define DRIVER_NAME "vlsi_ir"
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147"
+#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>"
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/********************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#include "vlsi_ir.h"
+
+/********************************************************/
+
+static /* const */ char drivername[] = DRIVER_NAME;
+
+static struct pci_device_id vlsi_irda_table [] = {
+ {
+ .class = PCI_CLASS_WIRELESS_IRDA << 8,
+ .class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
+ .vendor = PCI_VENDOR_ID_VLSI,
+ .device = PCI_DEVICE_ID_VLSI_82C147,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
+
+/********************************************************/
+
+/* clksrc: which clock source to be used
+ * 0: auto - try PLL, fallback to 40MHz XCLK
+ * 1: on-chip 48MHz PLL
+ * 2: external 48MHz XCLK
+ * 3: external 40MHz XCLK (HP OB-800)
+ */
+
+static int clksrc = 0; /* default is 0(auto) */
+module_param(clksrc, int, 0);
+MODULE_PARM_DESC(clksrc, "clock input source selection");
+
+/* ringsize: size of the tx and rx descriptor rings
+ * independent for tx and rx
+ * specify as ringsize=tx[,rx]
+ * allowed values: 4, 8, 16, 32, 64
+ * Due to the IrDA 1.x max. allowed window size=7,
+ * there should be no gain when using rings larger than 8
+ */
+
+static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */
+module_param_array(ringsize, int, NULL, 0);
+MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
+
+/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
+ * 0: very short, 1.5us (exception: 6us at 2.4 kbaud)
+ * 1: nominal 3/16 bittime width
+ * note: IrDA compliant peer devices should be happy regardless
+ * which one is used. Primary goal is to save some power
+ * on the sender's side - at 9.6kbaud for example the short
+ * pulse width saves more than 90% of the transmitted IR power.
+ */
+
+static int sirpulse = 1; /* default is 3/16 bittime */
+module_param(sirpulse, int, 0);
+MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
+
+/* qos_mtt_bits: encoded min-turn-time value we require the peer device
+ * to use before transmitting to us. "Type 1" (per-station)
+ * bitfield according to IrLAP definition (section 6.6.8)
+ * Don't know which transceiver is used by my OB800 - the
+ * pretty common HP HDLS-1100 requires 1 msec - so lets use this.
+ */
+
+static int qos_mtt_bits = 0x07; /* default is 1 ms or more */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
+
+/********************************************************/
+
+static void vlsi_reg_debug(unsigned iobase, const char *s)
+{
+ int i;
+
+ printk(KERN_DEBUG "%s: ", s);
+ for (i = 0; i < 0x20; i++)
+ printk("%02x", (unsigned)inb((iobase+i)));
+ printk("\n");
+}
+
+static void vlsi_ring_debug(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i;
+
+ printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__,
+ atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i);
+ printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
+ __FUNCTION__, (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+}
+
+/********************************************************/
+
+/* needed regardless of CONFIG_PROC_FS */
+static struct proc_dir_entry *vlsi_proc_root = NULL;
+
+#ifdef CONFIG_PROC_FS
+
+static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
+{
+ unsigned iobase = pci_resource_start(pdev, 0);
+ unsigned i;
+
+ seq_printf(seq, "\n%s (vid/did: %04x/%04x)\n",
+ PCIDEV_NAME(pdev), (int)pdev->vendor, (int)pdev->device);
+ seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
+ seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
+ pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
+ seq_printf(seq, "hw registers: ");
+ for (i = 0; i < 0x20; i++)
+ seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
+ seq_printf(seq, "\n");
+}
+
+static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ u8 byte;
+ u16 word;
+ unsigned delta1, delta2;
+ struct timeval now;
+ unsigned iobase = ndev->base_addr;
+
+ seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
+ netif_device_present(ndev) ? "attached" : "detached",
+ netif_running(ndev) ? "running" : "not running",
+ netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
+ netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
+
+ if (!netif_running(ndev))
+ return;
+
+ seq_printf(seq, "\nhw-state:\n");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
+ seq_printf(seq, "IRMISC:%s%s%s uart%s",
+ (byte&IRMISC_IRRAIL) ? " irrail" : "",
+ (byte&IRMISC_IRPD) ? " irpd" : "",
+ (byte&IRMISC_UARTTST) ? " uarttest" : "",
+ (byte&IRMISC_UARTEN) ? "@" : " disabled\n");
+ if (byte&IRMISC_UARTEN) {
+ seq_printf(seq, "0x%s\n",
+ (byte&2) ? ((byte&1) ? "3e8" : "2e8")
+ : ((byte&1) ? "3f8" : "2f8"));
+ }
+ pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
+ seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
+ (byte&CLKCTL_PD_INV) ? "powered" : "down",
+ (byte&CLKCTL_LOCK) ? " locked" : "",
+ (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
+ (byte&CLKCTL_CLKSTP) ? "stopped" : "running",
+ (byte&CLKCTL_WAKE) ? "enabled" : "disabled");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
+ seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
+
+ byte = inb(iobase+VLSI_PIO_IRINTR);
+ seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n",
+ (byte&IRINTR_ACTEN) ? " ACTEN" : "",
+ (byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
+ (byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
+ (byte&IRINTR_OE_EN) ? " OE_EN" : "",
+ (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
+ (byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
+ (byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
+ (byte&IRINTR_OE_INT) ? " OE_INT" : "");
+ word = inw(iobase+VLSI_PIO_RINGPTR);
+ seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
+ word = inw(iobase+VLSI_PIO_RINGBASE);
+ seq_printf(seq, "RINGBASE: busmap=0x%08x\n",
+ ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
+ word = inw(iobase+VLSI_PIO_RINGSIZE);
+ seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
+ RINGSIZE_TO_TXSIZE(word));
+
+ word = inw(iobase+VLSI_PIO_IRCFG);
+ seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (word&IRCFG_LOOP) ? " LOOP" : "",
+ (word&IRCFG_ENTX) ? " ENTX" : "",
+ (word&IRCFG_ENRX) ? " ENRX" : "",
+ (word&IRCFG_MSTR) ? " MSTR" : "",
+ (word&IRCFG_RXANY) ? " RXANY" : "",
+ (word&IRCFG_CRC16) ? " CRC16" : "",
+ (word&IRCFG_FIR) ? " FIR" : "",
+ (word&IRCFG_MIR) ? " MIR" : "",
+ (word&IRCFG_SIR) ? " SIR" : "",
+ (word&IRCFG_SIRFILT) ? " SIRFILT" : "",
+ (word&IRCFG_SIRTEST) ? " SIRTEST" : "",
+ (word&IRCFG_TXPOL) ? " TXPOL" : "",
+ (word&IRCFG_RXPOL) ? " RXPOL" : "");
+ word = inw(iobase+VLSI_PIO_IRENABLE);
+ seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n",
+ (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "",
+ (word&IRENABLE_CFGER) ? " CFGERR" : "",
+ (word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
+ (word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
+ (word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
+ (word&IRENABLE_ENTXST) ? " ENTXST" : "",
+ (word&IRENABLE_ENRXST) ? " ENRXST" : "",
+ (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
+ word = inw(iobase+VLSI_PIO_PHYCTL);
+ seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_NPHYCTL);
+ seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_MAXPKT);
+ seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word);
+ word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
+
+ seq_printf(seq, "\nsw-state:\n");
+ seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
+ (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
+ do_gettimeofday(&now);
+ if (now.tv_usec >= idev->last_rx.tv_usec) {
+ delta2 = now.tv_usec - idev->last_rx.tv_usec;
+ delta1 = 0;
+ }
+ else {
+ delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
+ delta1 = 1;
+ }
+ seq_printf(seq, "last rx: %lu.%06u sec\n",
+ now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
+
+ seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
+ idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors,
+ idev->stats.rx_dropped);
+ seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
+ idev->stats.rx_over_errors, idev->stats.rx_length_errors,
+ idev->stats.rx_frame_errors, idev->stats.rx_crc_errors);
+ seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
+ idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors,
+ idev->stats.tx_dropped, idev->stats.tx_fifo_errors);
+
+}
+
+static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i, j;
+ int h, t;
+
+ seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ h = atomic_read(&r->head) & r->mask;
+ t = atomic_read(&r->tail) & r->mask;
+ seq_printf(seq, "head = %d / tail = %d ", h, t);
+ if (h == t)
+ seq_printf(seq, "(empty)\n");
+ else {
+ if (((t+1)&r->mask) == h)
+ seq_printf(seq, "(full)\n");
+ else
+ seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
+ rd = &r->rd[h];
+ j = (unsigned) rd_get_count(rd);
+ seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n",
+ h, (unsigned)rd_get_status(rd), j);
+ if (j > 0) {
+ seq_printf(seq, " data:");
+ if (j > 20)
+ j = 20;
+ for (i = 0; i < j; i++)
+ seq_printf(seq, " %02x", (unsigned)((unsigned char *)rd->buf)[i]);
+ seq_printf(seq, "\n");
+ }
+ }
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ seq_printf(seq, "> ring descr %u: ", i);
+ seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n",
+ (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+}
+
+static int vlsi_seq_show(struct seq_file *seq, void *v)
+{
+ struct net_device *ndev = seq->private;
+ vlsi_irda_dev_t *idev = ndev->priv;
+ unsigned long flags;
+
+ seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
+ seq_printf(seq, "clksrc: %s\n",
+ (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
+ : ((clksrc==1)?"48MHz PLL":"autodetect"));
+ seq_printf(seq, "ringsize: tx=%d / rx=%d\n",
+ ringsize[0], ringsize[1]);
+ seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
+ seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
+
+ spin_lock_irqsave(&idev->lock, flags);
+ if (idev->pdev != NULL) {
+ vlsi_proc_pdev(seq, idev->pdev);
+
+ if (idev->pdev->current_state == 0)
+ vlsi_proc_ndev(seq, ndev);
+ else
+ seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
+ idev->resume_ok);
+ if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
+ seq_printf(seq, "\n--------- RX ring -----------\n\n");
+ vlsi_proc_ring(seq, idev->rx_ring);
+ seq_printf(seq, "\n--------- TX ring -----------\n\n");
+ vlsi_proc_ring(seq, idev->tx_ring);
+ }
+ }
+ seq_printf(seq, "\n");
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ return 0;
+}
+
+static int vlsi_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vlsi_seq_show, PDE(inode)->data);
+}
+
+static struct file_operations vlsi_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = vlsi_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define VLSI_PROC_FOPS (&vlsi_proc_fops)
+
+#else /* CONFIG_PROC_FS */
+#define VLSI_PROC_FOPS NULL
+#endif
+
+/********************************************************/
+
+static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
+ unsigned size, unsigned len, int dir)
+{
+ struct vlsi_ring *r;
+ struct ring_descr *rd;
+ unsigned i, j;
+ dma_addr_t busaddr;
+
+ if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */
+ return NULL;
+
+ r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
+ if (!r)
+ return NULL;
+ memset(r, 0, sizeof(*r));
+
+ r->pdev = pdev;
+ r->dir = dir;
+ r->len = len;
+ r->rd = (struct ring_descr *)(r+1);
+ r->mask = size - 1;
+ r->size = size;
+ atomic_set(&r->head, 0);
+ atomic_set(&r->tail, 0);
+
+ for (i = 0; i < size; i++) {
+ rd = r->rd + i;
+ memset(rd, 0, sizeof(*rd));
+ rd->hw = hwmap + i;
+ rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
+ if (rd->buf == NULL
+ || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+ if (rd->buf) {
+ IRDA_ERROR("%s: failed to create PCI-MAP for %p",
+ __FUNCTION__, rd->buf);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+ for (j = 0; j < i; j++) {
+ rd = r->rd + j;
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ if (busaddr)
+ pci_unmap_single(pdev, busaddr, len, dir);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+ kfree(r);
+ return NULL;
+ }
+ rd_set_addr_status(rd, busaddr, 0);
+ /* initially, the dma buffer is owned by the CPU */
+ rd->skb = NULL;
+ }
+ return r;
+}
+
+static int vlsi_free_ring(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i;
+ dma_addr_t busaddr;
+
+ for (i = 0; i < r->size; i++) {
+ rd = r->rd + i;
+ if (rd->skb)
+ dev_kfree_skb_any(rd->skb);
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ if (busaddr)
+ pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
+ if (rd->buf)
+ kfree(rd->buf);
+ }
+ kfree(r);
+ return 0;
+}
+
+static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
+{
+ char *ringarea;
+ struct ring_descr_hw *hwmap;
+
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
+
+ ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
+ if (!ringarea) {
+ IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
+ __FUNCTION__);
+ goto out;
+ }
+ memset(ringarea, 0, HW_RING_AREA_SIZE);
+
+ hwmap = (struct ring_descr_hw *)ringarea;
+ idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
+ XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (idev->rx_ring == NULL)
+ goto out_unmap;
+
+ hwmap += MAX_RING_DESCR;
+ idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
+ XFER_BUF_SIZE, PCI_DMA_TODEVICE);
+ if (idev->tx_ring == NULL)
+ goto out_free_rx;
+
+ idev->virtaddr = ringarea;
+ return 0;
+
+out_free_rx:
+ vlsi_free_ring(idev->rx_ring);
+out_unmap:
+ idev->rx_ring = idev->tx_ring = NULL;
+ pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
+ idev->busaddr = 0;
+out:
+ return -ENOMEM;
+}
+
+static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
+{
+ vlsi_free_ring(idev->rx_ring);
+ vlsi_free_ring(idev->tx_ring);
+ idev->rx_ring = idev->tx_ring = NULL;
+
+ if (idev->busaddr)
+ pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
+
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
+
+ return 0;
+}
+
+/********************************************************/
+
+static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
+{
+ u16 status;
+ int crclen, len = 0;
+ struct sk_buff *skb;
+ int ret = 0;
+ struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
+ vlsi_irda_dev_t *idev = ndev->priv;
+
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_RX_ERROR) {
+ if (status & RD_RX_OVER)
+ ret |= VLSI_RX_OVER;
+ if (status & RD_RX_LENGTH)
+ ret |= VLSI_RX_LENGTH;
+ if (status & RD_RX_PHYERR)
+ ret |= VLSI_RX_FRAME;
+ if (status & RD_RX_CRCERR)
+ ret |= VLSI_RX_CRC;
+ goto done;
+ }
+
+ len = rd_get_count(rd);
+ crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
+ len -= crclen; /* remove trailing CRC */
+ if (len <= 0) {
+ IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len);
+ ret |= VLSI_RX_DROP;
+ goto done;
+ }
+
+ if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */
+
+ /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the
+ * endian-adjustment there just in place will dirty a cache line
+ * which belongs to the map and thus we must be sure it will
+ * get flushed before giving the buffer back to hardware.
+ * vlsi_fill_rx() will do this anyway - but here we rely on.
+ */
+ le16_to_cpus(rd->buf+len);
+ if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
+ IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__);
+ ret |= VLSI_RX_CRC;
+ goto done;
+ }
+ }
+
+ if (!rd->skb) {
+ IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__);
+ ret |= VLSI_RX_DROP;
+ goto done;
+ }
+
+ skb = rd->skb;
+ rd->skb = NULL;
+ skb->dev = ndev;
+ memcpy(skb_put(skb,len), rd->buf, len);
+ skb->mac.raw = skb->data;
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+ ndev->last_rx = jiffies;
+
+done:
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ /* buffer still owned by CPU */
+
+ return (ret) ? -ret : len;
+}
+
+static void vlsi_fill_rx(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+
+ for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
+ if (rd_is_active(rd)) {
+ IRDA_WARNING("%s: driver bug: rx descr race with hw\n",
+ __FUNCTION__);
+ vlsi_ring_debug(r);
+ break;
+ }
+ if (!rd->skb) {
+ rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
+ if (rd->skb) {
+ skb_reserve(rd->skb,1);
+ rd->skb->protocol = htons(ETH_P_IRDA);
+ }
+ else
+ break; /* probably not worth logging? */
+ }
+ /* give dma buffer back to busmaster */
+ pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ rd_activate(rd);
+ }
+}
+
+static void vlsi_rx_interrupt(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_rx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ idev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ idev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ idev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ idev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ idev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ idev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ idev->stats.rx_packets++;
+ idev->stats.rx_bytes += ret;
+ }
+ }
+
+ do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
+
+ vlsi_fill_rx(r);
+
+ if (ring_first(r) == NULL) {
+ /* we are in big trouble, if this should ever happen */
+ IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__);
+ vlsi_ring_debug(r);
+ }
+ else
+ outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
+}
+
+/* caller must have stopped the controller from busmastering */
+
+static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
+{
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ if (rd_get_count(rd)) {
+ IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__);
+ ret = -VLSI_RX_DROP;
+ }
+ rd_set_count(rd, 0);
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ }
+ else
+ ret = vlsi_process_rx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ idev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ idev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ idev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ idev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ idev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ idev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ idev->stats.rx_packets++;
+ idev->stats.rx_bytes += ret;
+ }
+ }
+}
+
+/********************************************************/
+
+static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
+{
+ u16 status;
+ int len;
+ int ret;
+
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_TX_UNDRN)
+ ret = VLSI_TX_FIFO;
+ else
+ ret = 0;
+ rd_set_status(rd, 0);
+
+ if (rd->skb) {
+ len = rd->skb->len;
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ else /* tx-skb already freed? - should never happen */
+ len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */
+
+ rd_set_count(rd, 0);
+ /* dma buffer still owned by the CPU */
+
+ return (ret) ? -ret : len;
+}
+
+static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
+{
+ u16 nphyctl;
+ u16 config;
+ unsigned mode;
+ int ret;
+ int baudrate;
+ int fifocnt;
+
+ baudrate = idev->new_baud;
+ IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);
+ if (baudrate == 4000000) {
+ mode = IFF_FIR;
+ config = IRCFG_FIR;
+ nphyctl = PHYCTL_FIR;
+ }
+ else if (baudrate == 1152000) {
+ mode = IFF_MIR;
+ config = IRCFG_MIR | IRCFG_CRC16;
+ nphyctl = PHYCTL_MIR(clksrc==3);
+ }
+ else {
+ mode = IFF_SIR;
+ config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
+ switch(baudrate) {
+ default:
+ IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",
+ __FUNCTION__, baudrate);
+ baudrate = 9600;
+ /* fallthru */
+ case 2400:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);
+ break;
+ }
+ }
+ config |= IRCFG_MSTR | IRCFG_ENRX;
+
+ fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
+ }
+
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ outw(config, iobase+VLSI_PIO_IRCFG);
+ outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
+ wmb();
+ outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE);
+ mb();
+
+ udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */
+
+ /* read back settings for validation */
+
+ config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
+
+ if (mode == IFF_FIR)
+ config ^= IRENABLE_FIR_ON;
+ else if (mode == IFF_MIR)
+ config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON);
+ else
+ config ^= IRENABLE_SIR_ON;
+
+ if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
+ IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__,
+ (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
+ ret = -1;
+ }
+ else {
+ if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
+ IRDA_WARNING("%s: failed to apply baudrate %d\n",
+ __FUNCTION__, baudrate);
+ ret = -1;
+ }
+ else {
+ idev->mode = mode;
+ idev->baud = baudrate;
+ idev->new_baud = 0;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ vlsi_reg_debug(iobase,__FUNCTION__);
+
+ return ret;
+}
+
+static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ unsigned long flags;
+ unsigned iobase = ndev->base_addr;
+ u8 status;
+ u16 config;
+ int mtt;
+ int len, speed;
+ struct timeval now, ready;
+ char *msg = NULL;
+
+ speed = irda_get_next_speed(skb);
+ spin_lock_irqsave(&idev->lock, flags);
+ if (speed != -1 && speed != idev->baud) {
+ netif_stop_queue(ndev);
+ idev->new_baud = speed;
+ status = RD_TX_CLRENTX; /* stop tx-ring after this frame */
+ }
+ else
+ status = 0;
+
+ if (skb->len == 0) {
+ /* handle zero packets - should be speed change */
+ if (status == 0) {
+ msg = "bogus zero-length packet";
+ goto drop_unlock;
+ }
+
+ /* due to the completely asynch tx operation we might have
+ * IrLAP racing with the hardware here, f.e. if the controller
+ * is just sending the last packet with current speed while
+ * the LAP is already switching the speed using synchronous
+ * len=0 packet. Immediate execution would lead to hw lockup
+ * requiring a powercycle to reset. Good candidate to trigger
+ * this is the final UA:RSP packet after receiving a DISC:CMD
+ * when getting the LAP down.
+ * Note that we are not protected by the queue_stop approach
+ * because the final UA:RSP arrives _without_ request to apply
+ * new-speed-after-this-packet - hence the driver doesn't know
+ * this was the last packet and doesn't stop the queue. So the
+ * forced switch to default speed from LAP gets through as fast
+ * as only some 10 usec later while the UA:RSP is still processed
+ * by the hardware and we would get screwed.
+ */
+
+ if (ring_first(idev->tx_ring) == NULL) {
+ /* no race - tx-ring already empty */
+ vlsi_set_baud(idev, iobase);
+ netif_wake_queue(ndev);
+ }
+ else
+ ;
+ /* keep the speed change pending like it would
+ * for any len>0 packet. tx completion interrupt
+ * will apply it when the tx ring becomes empty.
+ */
+ spin_unlock_irqrestore(&idev->lock, flags);
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ /* sanity checks - simply drop the packet */
+
+ rd = ring_last(r);
+ if (!rd) {
+ msg = "ring full, but queue wasn't stopped";
+ goto drop_unlock;
+ }
+
+ if (rd_is_active(rd)) {
+ msg = "entry still owned by hw";
+ goto drop_unlock;
+ }
+
+ if (!rd->buf) {
+ msg = "tx ring entry without pci buffer";
+ goto drop_unlock;
+ }
+
+ if (rd->skb) {
+ msg = "ring entry with old skb still attached";
+ goto drop_unlock;
+ }
+
+ /* no need for serialization or interrupt disable during mtt */
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ if ((mtt = irda_get_mtt(skb)) > 0) {
+
+ ready.tv_usec = idev->last_rx.tv_usec + mtt;
+ ready.tv_sec = idev->last_rx.tv_sec;
+ if (ready.tv_usec >= 1000000) {
+ ready.tv_usec -= 1000000;
+ ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
+ }
+ for(;;) {
+ do_gettimeofday(&now);
+ if (now.tv_sec > ready.tv_sec
+ || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
+ break;
+ udelay(100);
+ /* must not sleep here - we are called under xmit_lock! */
+ }
+ }
+
+ /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
+ * after subsequent tx-completion
+ */
+
+ if (idev->mode == IFF_SIR) {
+ status |= RD_TX_DISCRC; /* no hw-crc creation */
+ len = async_wrap_skb(skb, rd->buf, r->len);
+
+ /* Some rare worst case situation in SIR mode might lead to
+ * potential buffer overflow. The wrapper detects this, returns
+ * with a shortened frame (without FCS/EOF) but doesn't provide
+ * any error indication about the invalid packet which we are
+ * going to transmit.
+ * Therefore we log if the buffer got filled to the point, where the
+ * wrapper would abort, i.e. when there are less than 5 bytes left to
+ * allow appending the FCS/EOF.
+ */
+
+ if (len >= r->len-5)
+ IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",
+ __FUNCTION__);
+ }
+ else {
+ /* hw deals with MIR/FIR mode wrapping */
+ status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */
+ len = skb->len;
+ if (len > r->len) {
+ msg = "frame exceeds tx buffer length";
+ goto drop;
+ }
+ else
+ memcpy(rd->buf, skb->data, len);
+ }
+
+ rd->skb = skb; /* remember skb for tx-complete stats */
+
+ rd_set_count(rd, len);
+ rd_set_status(rd, status); /* not yet active! */
+
+ /* give dma buffer back to busmaster-hw (flush caches to make
+ * CPU-driven changes visible from the pci bus).
+ */
+
+ pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
+
+/* Switching to TX mode here races with the controller
+ * which may stop TX at any time when fetching an inactive descriptor
+ * or one with CLR_ENTX set. So we switch on TX only, if TX was not running
+ * _after_ the new descriptor was activated on the ring. This ensures
+ * we will either find TX already stopped or we can be sure, there
+ * will be a TX-complete interrupt even if the chip stopped doing
+ * TX just after we found it still running. The ISR will then find
+ * the non-empty ring and restart TX processing. The enclosing
+ * spinlock provides the correct serialization to prevent race with isr.
+ */
+
+ spin_lock_irqsave(&idev->lock,flags);
+
+ rd_activate(rd);
+
+ if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
+
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
+ }
+
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ mb();
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
+ wmb();
+ outw(0, iobase+VLSI_PIO_PROMPT);
+ }
+ ndev->trans_start = jiffies;
+
+ if (ring_put(r) == NULL) {
+ netif_stop_queue(ndev);
+ IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__);
+ }
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&idev->lock, flags);
+drop:
+ IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg);
+ dev_kfree_skb_any(skb);
+ idev->stats.tx_errors++;
+ idev->stats.tx_dropped++;
+ /* Don't even think about returning NET_XMIT_DROP (=1) here!
+ * In fact any retval!=0 causes the packet scheduler to requeue the
+ * packet for later retry of transmission - which isn't exactly
+ * what we want after we've just called dev_kfree_skb_any ;-)
+ */
+ return 0;
+}
+
+static void vlsi_tx_interrupt(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ unsigned iobase;
+ int ret;
+ u16 config;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_tx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ idev->stats.tx_errors++;
+ if (ret & VLSI_TX_DROP)
+ idev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ idev->stats.tx_fifo_errors++;
+ }
+ else if (ret > 0){
+ idev->stats.tx_packets++;
+ idev->stats.tx_bytes += ret;
+ }
+ }
+
+ iobase = ndev->base_addr;
+
+ if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */
+ vlsi_set_baud(idev, iobase);
+
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ if (rd == NULL) /* tx ring empty: re-enable rx */
+ outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
+
+ else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
+
+ fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",
+ __FUNCTION__, fifocnt);
+ }
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
+ }
+
+ outw(0, iobase+VLSI_PIO_PROMPT);
+
+ if (netif_queue_stopped(ndev) && !idev->new_baud) {
+ netif_wake_queue(ndev);
+ IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__);
+ }
+}
+
+/* caller must have stopped the controller from busmastering */
+
+static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
+{
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__);
+ ret = -VLSI_TX_DROP;
+ }
+ else
+ ret = vlsi_process_tx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ idev->stats.tx_errors++;
+ if (ret & VLSI_TX_DROP)
+ idev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ idev->stats.tx_fifo_errors++;
+ }
+ else if (ret > 0){
+ idev->stats.tx_packets++;
+ idev->stats.tx_bytes += ret;
+ }
+ }
+
+}
+
+/********************************************************/
+
+static int vlsi_start_clock(struct pci_dev *pdev)
+{
+ u8 clkctl, lock;
+ int i, count;
+
+ if (clksrc < 2) { /* auto or PLL: try PLL */
+ clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* procedure to detect PLL lock synchronisation:
+ * after 0.5 msec initial delay we expect to find 3 PLL lock
+ * indications within 10 msec for successful PLL detection.
+ */
+ udelay(500);
+ count = 0;
+ for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
+ if (lock&CLKCTL_LOCK) {
+ if (++count >= 3)
+ break;
+ }
+ udelay(50);
+ }
+ if (count < 3) {
+ if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
+ IRDA_ERROR("%s: no PLL or failed to lock!\n",
+ __FUNCTION__);
+ clkctl = CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ return -1;
+ }
+ else /* was: clksrc=0(auto) */
+ clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
+
+ IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",
+ __FUNCTION__, clksrc);
+ }
+ else
+ clksrc = 1; /* got successful PLL lock */
+ }
+
+ if (clksrc != 1) {
+ /* we get here if either no PLL detected in auto-mode or
+ an external clock source was explicitly specified */
+
+ clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
+ if (clksrc == 3)
+ clkctl |= CLKCTL_XCKSEL;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* no way to test for working XCLK */
+ }
+ else
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
+
+ /* ok, now going to connect the chip with the clock source */
+
+ clkctl &= ~CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ return 0;
+}
+
+static void vlsi_stop_clock(struct pci_dev *pdev)
+{
+ u8 clkctl;
+
+ /* disconnect chip from clock source */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
+ clkctl |= CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* disable all clock sources */
+ clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+}
+
+/********************************************************/
+
+/* writing all-zero to the VLSI PCI IO register area seems to prevent
+ * some occasional situations where the hardware fails (symptoms are
+ * what appears as stalled tx/rx state machines, i.e. everything ok for
+ * receive or transmit but hw makes no progress or is unable to access
+ * the bus memory locations).
+ * Best place to call this is immediately after/before the internal clock
+ * gets started/stopped.
+ */
+
+static inline void vlsi_clear_regs(unsigned iobase)
+{
+ unsigned i;
+ const unsigned chip_io_extent = 32;
+
+ for (i = 0; i < chip_io_extent; i += sizeof(u16))
+ outw(0, iobase + i);
+}
+
+static int vlsi_init_chip(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev = ndev->priv;
+ unsigned iobase;
+ u16 ptr;
+
+ /* start the clock and clean the registers */
+
+ if (vlsi_start_clock(pdev)) {
+ IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__);
+ return -1;
+ }
+ iobase = ndev->base_addr;
+ vlsi_clear_regs(iobase);
+
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
+
+ outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
+
+ /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
+
+ outw(0, iobase+VLSI_PIO_IRCFG);
+ wmb();
+
+ outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
+
+ outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
+
+ outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
+ iobase+VLSI_PIO_RINGSIZE);
+
+ ptr = inw(iobase+VLSI_PIO_RINGPTR);
+ atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
+ atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
+
+ vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */
+
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
+ wmb();
+
+ /* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
+ * basically every received pulse fires an ACTIVITY-INT
+ * leading to >>1000 INT's per second instead of few 10
+ */
+
+ outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
+
+ return 0;
+}
+
+static int vlsi_start_hw(vlsi_irda_dev_t *idev)
+{
+ struct pci_dev *pdev = idev->pdev;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ u8 byte;
+
+ /* we don't use the legacy UART, disable its address decoding */
+
+ pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
+ byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
+ pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
+
+ /* enable PCI busmaster access to our 16MB page */
+
+ pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
+ pci_set_master(pdev);
+
+ if (vlsi_init_chip(pdev) < 0) {
+ pci_disable_device(pdev);
+ return -1;
+ }
+
+ vlsi_fill_rx(idev->rx_ring);
+
+ do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+
+ outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
+
+ return 0;
+}
+
+static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
+{
+ struct pci_dev *pdev = idev->pdev;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idev->lock,flags);
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
+
+ /* disable and w/c irqs */
+ outb(0, iobase+VLSI_PIO_IRINTR);
+ wmb();
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
+ spin_unlock_irqrestore(&idev->lock,flags);
+
+ vlsi_unarm_tx(idev);
+ vlsi_unarm_rx(idev);
+
+ vlsi_clear_regs(iobase);
+ vlsi_stop_clock(pdev);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**************************************************************/
+
+static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+
+ return &idev->stats;
+}
+
+static void vlsi_tx_timeout(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+
+
+ vlsi_reg_debug(ndev->base_addr, __FUNCTION__);
+ vlsi_ring_debug(idev->tx_ring);
+
+ if (netif_running(ndev))
+ netif_stop_queue(ndev);
+
+ vlsi_stop_hw(idev);
+
+ /* now simply restart the whole thing */
+
+ if (!idev->new_baud)
+ idev->new_baud = idev->baud; /* keep current baudrate */
+
+ if (vlsi_start_hw(idev))
+ IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
+ __FUNCTION__, PCIDEV_NAME(idev->pdev), ndev->name);
+ else
+ netif_start_queue(ndev);
+}
+
+static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ unsigned long flags;
+ u16 fifocnt;
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ spin_lock_irqsave(&idev->lock, flags);
+ idev->new_baud = irq->ifr_baudrate;
+ /* when called from userland there might be a minor race window here
+ * if the stack tries to change speed concurrently - which would be
+ * pretty strange anyway with the userland having full control...
+ */
+ vlsi_set_baud(idev, ndev->base_addr);
+ spin_unlock_irqrestore(&idev->lock, flags);
+ break;
+ case SIOCSMEDIABUSY:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ irda_device_set_media_busy(ndev, TRUE);
+ break;
+ case SIOCGRECEIVING:
+ /* the best we can do: check whether there are any bytes in rx fifo.
+ * The trustable window (in case some data arrives just afterwards)
+ * may be as short as 1usec or so at 4Mbps.
+ */
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
+ break;
+ default:
+ IRDA_WARNING("%s: notsupp - cmd=%04x\n",
+ __FUNCTION__, cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/********************************************************/
+
+static irqreturn_t vlsi_interrupt(int irq, void *dev_instance,
+ struct pt_regs *regs)
+{
+ struct net_device *ndev = dev_instance;
+ vlsi_irda_dev_t *idev = ndev->priv;
+ unsigned iobase;
+ u8 irintr;
+ int boguscount = 5;
+ unsigned long flags;
+ int handled = 0;
+
+ iobase = ndev->base_addr;
+ spin_lock_irqsave(&idev->lock,flags);
+ do {
+ irintr = inb(iobase+VLSI_PIO_IRINTR);
+ mb();
+ outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
+
+ if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
+ break;
+
+ handled = 1;
+
+ if (unlikely(!(irintr & ~IRINTR_ACTIVITY)))
+ break; /* nothing todo if only activity */
+
+ if (irintr&IRINTR_RPKTINT)
+ vlsi_rx_interrupt(ndev);
+
+ if (irintr&IRINTR_TPKTINT)
+ vlsi_tx_interrupt(ndev);
+
+ } while (--boguscount > 0);
+ spin_unlock_irqrestore(&idev->lock,flags);
+
+ if (boguscount <= 0)
+ IRDA_MESSAGE("%s: too much work in interrupt!\n",
+ __FUNCTION__);
+ return IRQ_RETVAL(handled);
+}
+
+/********************************************************/
+
+static int vlsi_open(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ int err = -EAGAIN;
+ char hwname[32];
+
+ if (pci_request_regions(idev->pdev, drivername)) {
+ IRDA_WARNING("%s: io resource busy\n", __FUNCTION__);
+ goto errout;
+ }
+ ndev->base_addr = pci_resource_start(idev->pdev,0);
+ ndev->irq = idev->pdev->irq;
+
+ /* under some rare occasions the chip apparently comes up with
+ * IRQ's pending. We better w/c pending IRQ and disable them all
+ */
+
+ outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+
+ if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
+ drivername, ndev)) {
+ IRDA_WARNING("%s: couldn't get IRQ: %d\n",
+ __FUNCTION__, ndev->irq);
+ goto errout_io;
+ }
+
+ if ((err = vlsi_create_hwif(idev)) != 0)
+ goto errout_irq;
+
+ sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
+ idev->irlap = irlap_open(ndev,&idev->qos,hwname);
+ if (!idev->irlap)
+ goto errout_free_ring;
+
+ do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+
+ idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
+
+ if ((err = vlsi_start_hw(idev)) != 0)
+ goto errout_close_irlap;
+
+ netif_start_queue(ndev);
+
+ IRDA_MESSAGE("%s: device %s operational\n", __FUNCTION__, ndev->name);
+
+ return 0;
+
+errout_close_irlap:
+ irlap_close(idev->irlap);
+errout_free_ring:
+ vlsi_destroy_hwif(idev);
+errout_irq:
+ free_irq(ndev->irq,ndev);
+errout_io:
+ pci_release_regions(idev->pdev);
+errout:
+ return err;
+}
+
+static int vlsi_close(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+
+ netif_stop_queue(ndev);
+
+ if (idev->irlap)
+ irlap_close(idev->irlap);
+ idev->irlap = NULL;
+
+ vlsi_stop_hw(idev);
+
+ vlsi_destroy_hwif(idev);
+
+ free_irq(ndev->irq,ndev);
+
+ pci_release_regions(idev->pdev);
+
+ IRDA_MESSAGE("%s: device %s stopped\n", __FUNCTION__, ndev->name);
+
+ return 0;
+}
+
+static int vlsi_irda_init(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct pci_dev *pdev = idev->pdev;
+
+ SET_MODULE_OWNER(ndev);
+
+ ndev->irq = pdev->irq;
+ ndev->base_addr = pci_resource_start(pdev,0);
+
+ /* PCI busmastering
+ * see include file for details why we need these 2 masks, in this order!
+ */
+
+ if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
+ || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
+ IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__);
+ return -1;
+ }
+
+ irda_init_max_qos_capabilies(&idev->qos);
+
+ /* the VLSI82C147 does not support 576000! */
+
+ idev->qos.baud_rate.bits = IR_2400 | IR_9600
+ | IR_19200 | IR_38400 | IR_57600 | IR_115200
+ | IR_1152000 | (IR_4000000 << 8);
+
+ idev->qos.min_turn_time.bits = qos_mtt_bits;
+
+ irda_qos_bits_to_value(&idev->qos);
+
+ /* currently no public media definitions for IrDA */
+
+ ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
+ ndev->if_port = IF_PORT_UNKNOWN;
+
+ ndev->open = vlsi_open;
+ ndev->stop = vlsi_close;
+ ndev->get_stats = vlsi_get_stats;
+ ndev->hard_start_xmit = vlsi_hard_start_xmit;
+ ndev->do_ioctl = vlsi_ioctl;
+ ndev->tx_timeout = vlsi_tx_timeout;
+ ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ return 0;
+}
+
+/**************************************************************/
+
+static int __devinit
+vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ vlsi_irda_dev_t *idev;
+
+ if (pci_enable_device(pdev))
+ goto out;
+ else
+ pdev->current_state = 0; /* hw must be running now */
+
+ IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n",
+ drivername, PCIDEV_NAME(pdev));
+
+ if ( !pci_resource_start(pdev,0)
+ || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
+ IRDA_ERROR("%s: bar 0 invalid", __FUNCTION__);
+ goto out_disable;
+ }
+
+ ndev = alloc_irdadev(sizeof(*idev));
+ if (ndev==NULL) {
+ IRDA_ERROR("%s: Unable to allocate device memory.\n",
+ __FUNCTION__);
+ goto out_disable;
+ }
+
+ idev = ndev->priv;
+
+ spin_lock_init(&idev->lock);
+ init_MUTEX(&idev->sem);
+ down(&idev->sem);
+ idev->pdev = pdev;
+
+ if (vlsi_irda_init(ndev) < 0)
+ goto out_freedev;
+
+ if (register_netdev(ndev) < 0) {
+ IRDA_ERROR("%s: register_netdev failed\n", __FUNCTION__);
+ goto out_freedev;
+ }
+
+ if (vlsi_proc_root != NULL) {
+ struct proc_dir_entry *ent;
+
+ ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root);
+ if (!ent) {
+ IRDA_WARNING("%s: failed to create proc entry\n",
+ __FUNCTION__);
+ } else {
+ ent->data = ndev;
+ ent->proc_fops = VLSI_PROC_FOPS;
+ ent->size = 0;
+ }
+ idev->proc_entry = ent;
+ }
+ IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
+
+ pci_set_drvdata(pdev, ndev);
+ up(&idev->sem);
+
+ return 0;
+
+out_freedev:
+ up(&idev->sem);
+ free_netdev(ndev);
+out_disable:
+ pci_disable_device(pdev);
+out:
+ pci_set_drvdata(pdev, NULL);
+ return -ENODEV;
+}
+
+static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ IRDA_ERROR("%s: lost netdevice?\n", drivername);
+ return;
+ }
+
+ unregister_netdev(ndev);
+
+ idev = ndev->priv;
+ down(&idev->sem);
+ if (idev->proc_entry) {
+ remove_proc_entry(ndev->name, vlsi_proc_root);
+ idev->proc_entry = NULL;
+ }
+ up(&idev->sem);
+
+ free_netdev(ndev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ IRDA_MESSAGE("%s: %s removed\n", drivername, PCIDEV_NAME(pdev));
+}
+
+#ifdef CONFIG_PM
+
+/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
+ * Some of the Linux PCI-PM code however depends on this, for example in
+ * pci_set_power_state(). So we have to take care to perform the required
+ * operations on our own (particularly reflecting the pdev->current_state)
+ * otherwise we might get cheated by pci-pm.
+ */
+
+
+static int vlsi_irda_suspend(struct pci_dev *pdev, u32 state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (state < 1 || state > 3 ) {
+ IRDA_ERROR("%s - %s: invalid pm state request: %u\n",
+ __FUNCTION__, PCIDEV_NAME(pdev), state);
+ return 0;
+ }
+ if (!ndev) {
+ IRDA_ERROR("%s - %s: no netdevice \n",
+ __FUNCTION__, PCIDEV_NAME(pdev));
+ return 0;
+ }
+ idev = ndev->priv;
+ down(&idev->sem);
+ if (pdev->current_state != 0) { /* already suspended */
+ if (state > pdev->current_state) { /* simply go deeper */
+ pci_set_power_state(pdev,state);
+ pdev->current_state = state;
+ }
+ else
+ IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, PCIDEV_NAME(pdev), pdev->current_state, state);
+ up(&idev->sem);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ vlsi_stop_hw(idev);
+ pci_save_state(pdev);
+ if (!idev->new_baud)
+ /* remember speed settings to restore on resume */
+ idev->new_baud = idev->baud;
+ }
+
+ pci_set_power_state(pdev,state);
+ pdev->current_state = state;
+ idev->resume_ok = 1;
+ up(&idev->sem);
+ return 0;
+}
+
+static int vlsi_irda_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ IRDA_ERROR("%s - %s: no netdevice \n",
+ __FUNCTION__, PCIDEV_NAME(pdev));
+ return 0;
+ }
+ idev = ndev->priv;
+ down(&idev->sem);
+ if (pdev->current_state == 0) {
+ up(&idev->sem);
+ IRDA_WARNING("%s - %s: already resumed\n",
+ __FUNCTION__, PCIDEV_NAME(pdev));
+ return 0;
+ }
+
+ pci_set_power_state(pdev, 0);
+ pdev->current_state = 0;
+
+ if (!idev->resume_ok) {
+ /* should be obsolete now - but used to happen due to:
+ * - pci layer initially setting pdev->current_state = 4 (unknown)
+ * - pci layer did not walk the save_state-tree (might be APM problem)
+ * so we could not refuse to suspend from undefined state
+ * - vlsi_irda_suspend detected invalid state and refused to save
+ * configuration for resume - but was too late to stop suspending
+ * - vlsi_irda_resume got screwed when trying to resume from garbage
+ *
+ * now we explicitly set pdev->current_state = 0 after enabling the
+ * device and independently resume_ok should catch any garbage config.
+ */
+ IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__);
+ up(&idev->sem);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ pci_restore_state(pdev);
+ vlsi_start_hw(idev);
+ netif_device_attach(ndev);
+ }
+ idev->resume_ok = 0;
+ up(&idev->sem);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/*********************************************************/
+
+static struct pci_driver vlsi_irda_driver = {
+ .name = drivername,
+ .id_table = vlsi_irda_table,
+ .probe = vlsi_irda_probe,
+ .remove = __devexit_p(vlsi_irda_remove),
+#ifdef CONFIG_PM
+ .suspend = vlsi_irda_suspend,
+ .resume = vlsi_irda_resume,
+#endif
+};
+
+#define PROC_DIR ("driver/" DRIVER_NAME)
+
+static int __init vlsi_mod_init(void)
+{
+ int i, ret;
+
+ if (clksrc < 0 || clksrc > 3) {
+ IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc);
+ return -1;
+ }
+
+ for (i = 0; i < 2; i++) {
+ switch(ringsize[i]) {
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ break;
+ default:
+ IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]);
+ ringsize[i] = 8;
+ break;
+ }
+ }
+
+ sirpulse = !!sirpulse;
+
+ /* create_proc_entry returns NULL if !CONFIG_PROC_FS.
+ * Failure to create the procfs entry is handled like running
+ * without procfs - it's not required for the driver to work.
+ */
+ vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, NULL);
+ if (vlsi_proc_root) {
+ /* protect registered procdir against module removal.
+ * Because we are in the module init path there's no race
+ * window after create_proc_entry (and no barrier needed).
+ */
+ vlsi_proc_root->owner = THIS_MODULE;
+ }
+
+ ret = pci_module_init(&vlsi_irda_driver);
+
+ if (ret && vlsi_proc_root)
+ remove_proc_entry(PROC_DIR, NULL);
+ return ret;
+
+}
+
+static void __exit vlsi_mod_exit(void)
+{
+ pci_unregister_driver(&vlsi_irda_driver);
+ if (vlsi_proc_root)
+ remove_proc_entry(PROC_DIR, NULL);
+}
+
+module_init(vlsi_mod_init);
+module_exit(vlsi_mod_exit);
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
new file mode 100644
index 000000000000..414694abf588
--- /dev/null
+++ b/drivers/net/irda/vlsi_ir.h
@@ -0,0 +1,798 @@
+
+/*********************************************************************
+ *
+ * vlsi_ir.h: VLSI82C147 PCI IrDA controller driver for Linux
+ *
+ * Version: 0.5
+ *
+ * Copyright (c) 2001-2003 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#ifndef IRDA_VLSI_FIR_H
+#define IRDA_VLSI_FIR_H
+
+/* ================================================================
+ * compatibility stuff
+ */
+
+/* definitions not present in pci_ids.h */
+
+#ifndef PCI_CLASS_WIRELESS_IRDA
+#define PCI_CLASS_WIRELESS_IRDA 0x0d00
+#endif
+
+#ifndef PCI_CLASS_SUBCLASS_MASK
+#define PCI_CLASS_SUBCLASS_MASK 0xffff
+#endif
+
+/* in recent 2.5 interrupt handlers have non-void return value */
+#ifndef IRQ_RETVAL
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+
+/* some stuff need to check kernelversion. Not all 2.5 stuff was present
+ * in early 2.5.x - the test is merely to separate 2.4 from 2.5
+ */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+
+/* PDE() introduced in 2.5.4 */
+#ifdef CONFIG_PROC_FS
+#define PDE(inode) ((inode)->u.generic_ip)
+#endif
+
+/* irda crc16 calculation exported in 2.5.42 */
+#define irda_calc_crc16(fcs,buf,len) (GOOD_FCS)
+
+/* we use this for unified pci device name access */
+#define PCIDEV_NAME(pdev) ((pdev)->name)
+
+#else /* 2.5 or later */
+
+/* recent 2.5/2.6 stores pci device names at varying places ;-) */
+#ifdef CONFIG_PCI_NAMES
+/* human readable name */
+#define PCIDEV_NAME(pdev) ((pdev)->pretty_name)
+#else
+/* whatever we get from the associated struct device - bus:slot:dev.fn id */
+#define PCIDEV_NAME(pdev) (pci_name(pdev))
+#endif
+
+#endif
+
+/* ================================================================ */
+
+/* non-standard PCI registers */
+
+enum vlsi_pci_regs {
+ VLSI_PCI_CLKCTL = 0x40, /* chip clock input control */
+ VLSI_PCI_MSTRPAGE = 0x41, /* addr [31:24] for all busmaster cycles */
+ VLSI_PCI_IRMISC = 0x42 /* mainly legacy UART related */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PCI_CLKCTL: Clock Control Register (u8, rw) */
+
+/* Three possible clock sources: either on-chip 48MHz PLL or
+ * external clock applied to EXTCLK pin. External clock may
+ * be either 48MHz or 40MHz, which is indicated by XCKSEL.
+ * CLKSTP controls whether the selected clock source gets
+ * connected to the IrDA block.
+ *
+ * On my HP OB-800 the BIOS sets external 40MHz clock as source
+ * when IrDA enabled and I've never detected any PLL lock success.
+ * Apparently the 14.3...MHz OSC input required for the PLL to work
+ * is not connected and the 40MHz EXTCLK is provided externally.
+ * At least this is what makes the driver working for me.
+ */
+
+enum vlsi_pci_clkctl {
+
+ /* PLL control */
+
+ CLKCTL_PD_INV = 0x04, /* PD#: inverted power down signal,
+ * i.e. PLL is powered, if PD_INV set */
+ CLKCTL_LOCK = 0x40, /* (ro) set, if PLL is locked */
+
+ /* clock source selection */
+
+ CLKCTL_EXTCLK = 0x20, /* set to select external clock input, not PLL */
+ CLKCTL_XCKSEL = 0x10, /* set to indicate EXTCLK is 40MHz, not 48MHz */
+
+ /* IrDA block control */
+
+ CLKCTL_CLKSTP = 0x80, /* set to disconnect from selected clock source */
+ CLKCTL_WAKE = 0x08 /* set to enable wakeup feature: whenever IR activity
+ * is detected, PD_INV gets set(?) and CLKSTP cleared */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PCI_MSTRPAGE: Master Page Register (u8, rw) and busmastering stuff */
+
+#define DMA_MASK_USED_BY_HW 0xffffffff
+#define DMA_MASK_MSTRPAGE 0x00ffffff
+#define MSTRPAGE_VALUE (DMA_MASK_MSTRPAGE >> 24)
+
+ /* PCI busmastering is somewhat special for this guy - in short:
+ *
+ * We select to operate using fixed MSTRPAGE=0, use ISA DMA
+ * address restrictions to make the PCI BM api aware of this,
+ * but ensure the hardware is dealing with real 32bit access.
+ *
+ * In detail:
+ * The chip executes normal 32bit busmaster cycles, i.e.
+ * drives all 32 address lines. These addresses however are
+ * composed of [0:23] taken from various busaddr-pointers
+ * and [24:31] taken from the MSTRPAGE register in the VLSI82C147
+ * config space. Therefore _all_ busmastering must be
+ * targeted to/from one single 16MB (busaddr-) superpage!
+ * The point is to make sure all the allocations for memory
+ * locations with busmaster access (ring descriptors, buffers)
+ * are indeed bus-mappable to the same 16MB range (for x86 this
+ * means they must reside in the same 16MB physical memory address
+ * range). The only constraint we have which supports "several objects
+ * mappable to common 16MB range" paradigma, is the old ISA DMA
+ * restriction to the first 16MB of physical address range.
+ * Hence the approach here is to enable PCI busmaster support using
+ * the correct 32bit dma-mask used by the chip. Afterwards the device's
+ * dma-mask gets restricted to 24bit, which must be honoured somehow by
+ * all allocations for memory areas to be exposed to the chip ...
+ *
+ * Note:
+ * Don't be surprised to get "Setting latency timer..." messages every
+ * time when PCI busmastering is enabled for the chip.
+ * The chip has its PCI latency timer RO fixed at 0 - which is not a
+ * problem here, because it is never requesting _burst_ transactions.
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PCIIRMISC: IR Miscellaneous Register (u8, rw) */
+
+/* legacy UART emulation - not used by this driver - would require:
+ * (see below for some register-value definitions)
+ *
+ * - IRMISC_UARTEN must be set to enable UART address decoding
+ * - IRMISC_UARTSEL configured
+ * - IRCFG_MASTER must be cleared
+ * - IRCFG_SIR must be set
+ * - IRENABLE_PHYANDCLOCK must be asserted 0->1 (and hence IRENABLE_SIR_ON)
+ */
+
+enum vlsi_pci_irmisc {
+
+ /* IR transceiver control */
+
+ IRMISC_IRRAIL = 0x40, /* (ro?) IR rail power indication (and control?)
+ * 0=3.3V / 1=5V. Probably set during power-on?
+ * unclear - not touched by driver */
+ IRMISC_IRPD = 0x08, /* transceiver power down, if set */
+
+ /* legacy UART control */
+
+ IRMISC_UARTTST = 0x80, /* UART test mode - "always write 0" */
+ IRMISC_UARTEN = 0x04, /* enable UART address decoding */
+
+ /* bits [1:0] IRMISC_UARTSEL to select legacy UART address */
+
+ IRMISC_UARTSEL_3f8 = 0x00,
+ IRMISC_UARTSEL_2f8 = 0x01,
+ IRMISC_UARTSEL_3e8 = 0x02,
+ IRMISC_UARTSEL_2e8 = 0x03
+};
+
+/* ================================================================ */
+
+/* registers mapped to 32 byte PCI IO space */
+
+/* note: better access all registers at the indicated u8/u16 size
+ * although some of them contain only 1 byte of information.
+ * some of them (particaluarly PROMPT and IRCFG) ignore
+ * access when using the wrong addressing mode!
+ */
+
+enum vlsi_pio_regs {
+ VLSI_PIO_IRINTR = 0x00, /* interrupt enable/request (u8, rw) */
+ VLSI_PIO_RINGPTR = 0x02, /* rx/tx ring pointer (u16, ro) */
+ VLSI_PIO_RINGBASE = 0x04, /* [23:10] of ring address (u16, rw) */
+ VLSI_PIO_RINGSIZE = 0x06, /* rx/tx ring size (u16, rw) */
+ VLSI_PIO_PROMPT = 0x08, /* triggers ring processing (u16, wo) */
+ /* 0x0a-0x0f: reserved / duplicated UART regs */
+ VLSI_PIO_IRCFG = 0x10, /* configuration select (u16, rw) */
+ VLSI_PIO_SIRFLAG = 0x12, /* BOF/EOF for filtered SIR (u16, ro) */
+ VLSI_PIO_IRENABLE = 0x14, /* enable and status register (u16, rw/ro) */
+ VLSI_PIO_PHYCTL = 0x16, /* physical layer current status (u16, ro) */
+ VLSI_PIO_NPHYCTL = 0x18, /* next physical layer select (u16, rw) */
+ VLSI_PIO_MAXPKT = 0x1a, /* [11:0] max len for packet receive (u16, rw) */
+ VLSI_PIO_RCVBCNT = 0x1c /* current receive-FIFO byte count (u16, ro) */
+ /* 0x1e-0x1f: reserved / duplicated UART regs */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRINTR: Interrupt Register (u8, rw) */
+
+/* enable-bits:
+ * 1 = enable / 0 = disable
+ * interrupt condition bits:
+ * set according to corresponding interrupt source
+ * (regardless of the state of the enable bits)
+ * enable bit status indicates whether interrupt gets raised
+ * write-to-clear
+ * note: RPKTINT and TPKTINT behave different in legacy UART mode (which we don't use :-)
+ */
+
+enum vlsi_pio_irintr {
+ IRINTR_ACTEN = 0x80, /* activity interrupt enable */
+ IRINTR_ACTIVITY = 0x40, /* activity monitor (traffic detected) */
+ IRINTR_RPKTEN = 0x20, /* receive packet interrupt enable*/
+ IRINTR_RPKTINT = 0x10, /* rx-packet transfered from fifo to memory finished */
+ IRINTR_TPKTEN = 0x08, /* transmit packet interrupt enable */
+ IRINTR_TPKTINT = 0x04, /* last bit of tx-packet+crc shifted to ir-pulser */
+ IRINTR_OE_EN = 0x02, /* UART rx fifo overrun error interrupt enable */
+ IRINTR_OE_INT = 0x01 /* UART rx fifo overrun error (read LSR to clear) */
+};
+
+/* we use this mask to check whether the (shared PCI) interrupt is ours */
+
+#define IRINTR_INT_MASK (IRINTR_ACTIVITY|IRINTR_RPKTINT|IRINTR_TPKTINT)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGPTR: Ring Pointer Read-Back Register (u16, ro) */
+
+/* _both_ ring pointers are indices relative to the _entire_ rx,tx-ring!
+ * i.e. the referenced descriptor is located
+ * at RINGBASE + PTR * sizeof(descr) for rx and tx
+ * therefore, the tx-pointer has offset MAX_RING_DESCR
+ */
+
+#define MAX_RING_DESCR 64 /* tx, rx rings may contain up to 64 descr each */
+
+#define RINGPTR_RX_MASK (MAX_RING_DESCR-1)
+#define RINGPTR_TX_MASK ((MAX_RING_DESCR-1)<<8)
+
+#define RINGPTR_GET_RX(p) ((p)&RINGPTR_RX_MASK)
+#define RINGPTR_GET_TX(p) (((p)&RINGPTR_TX_MASK)>>8)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGBASE: Ring Pointer Base Address Register (u16, ro) */
+
+/* Contains [23:10] part of the ring base (bus-) address
+ * which must be 1k-alinged. [31:24] is taken from
+ * VLSI_PCI_MSTRPAGE above.
+ * The controller initiates non-burst PCI BM cycles to
+ * fetch and update the descriptors in the ring.
+ * Once fetched, the descriptor remains cached onchip
+ * until it gets closed and updated due to the ring
+ * processing state machine.
+ * The entire ring area is split in rx and tx areas with each
+ * area consisting of 64 descriptors of 8 bytes each.
+ * The rx(tx) ring is located at ringbase+0 (ringbase+64*8).
+ */
+
+#define BUS_TO_RINGBASE(p) (((p)>>10)&0x3fff)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGSIZE: Ring Size Register (u16, rw) */
+
+/* bit mask to indicate the ring size to be used for rx and tx.
+ * possible values encoded bits
+ * 4 0000
+ * 8 0001
+ * 16 0011
+ * 32 0111
+ * 64 1111
+ * located at [15:12] for tx and [11:8] for rx ([7:0] unused)
+ *
+ * note: probably a good idea to have IRCFG_MSTR cleared when writing
+ * this so the state machines are stopped and the RINGPTR is reset!
+ */
+
+#define SIZE_TO_BITS(num) ((((num)-1)>>2)&0x0f)
+#define TX_RX_TO_RINGSIZE(tx,rx) ((SIZE_TO_BITS(tx)<<12)|(SIZE_TO_BITS(rx)<<8))
+#define RINGSIZE_TO_RXSIZE(rs) ((((rs)&0x0f00)>>6)+4)
+#define RINGSIZE_TO_TXSIZE(rs) ((((rs)&0xf000)>>10)+4)
+
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_PROMPT: Ring Prompting Register (u16, write-to-start) */
+
+/* writing any value kicks the ring processing state machines
+ * for both tx, rx rings as follows:
+ * - active rings (currently owning an active descriptor)
+ * ignore the prompt and continue
+ * - idle rings fetch the next descr from the ring and start
+ * their processing
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRCFG: IR Config Register (u16, rw) */
+
+/* notes:
+ * - not more than one SIR/MIR/FIR bit must be set at any time
+ * - SIR, MIR, FIR and CRC16 select the configuration which will
+ * be applied on next 0->1 transition of IRENABLE_PHYANDCLOCK (see below).
+ * - besides allowing the PCI interface to execute busmaster cycles
+ * and therefore the ring SM to operate, the MSTR bit has side-effects:
+ * when MSTR is cleared, the RINGPTR's get reset and the legacy UART mode
+ * (in contrast to busmaster access mode) gets enabled.
+ * - clearing ENRX or setting ENTX while data is received may stall the
+ * receive fifo until ENRX reenabled _and_ another packet arrives
+ * - SIRFILT means the chip performs the required unwrapping of hardware
+ * headers (XBOF's, BOF/EOF) and un-escaping in the _receive_ direction.
+ * Only the resulting IrLAP payload is copied to the receive buffers -
+ * but with the 16bit FCS still encluded. Question remains, whether it
+ * was already checked or we should do it before passing the packet to IrLAP?
+ */
+
+enum vlsi_pio_ircfg {
+ IRCFG_LOOP = 0x4000, /* enable loopback test mode */
+ IRCFG_ENTX = 0x1000, /* transmit enable */
+ IRCFG_ENRX = 0x0800, /* receive enable */
+ IRCFG_MSTR = 0x0400, /* master enable */
+ IRCFG_RXANY = 0x0200, /* receive any packet */
+ IRCFG_CRC16 = 0x0080, /* 16bit (not 32bit) CRC select for MIR/FIR */
+ IRCFG_FIR = 0x0040, /* FIR 4PPM encoding mode enable */
+ IRCFG_MIR = 0x0020, /* MIR HDLC encoding mode enable */
+ IRCFG_SIR = 0x0010, /* SIR encoding mode enable */
+ IRCFG_SIRFILT = 0x0008, /* enable SIR decode filter (receiver unwrapping) */
+ IRCFG_SIRTEST = 0x0004, /* allow SIR decode filter when not in SIR mode */
+ IRCFG_TXPOL = 0x0002, /* invert tx polarity when set */
+ IRCFG_RXPOL = 0x0001 /* invert rx polarity when set */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_SIRFLAG: SIR Flag Register (u16, ro) */
+
+/* register contains hardcoded BOF=0xc0 at [7:0] and EOF=0xc1 at [15:8]
+ * which is used for unwrapping received frames in SIR decode-filter mode
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRENABLE: IR Enable Register (u16, rw/ro) */
+
+/* notes:
+ * - IREN acts as gate for latching the configured IR mode information
+ * from IRCFG and IRPHYCTL when IREN=reset and applying them when
+ * IREN gets set afterwards.
+ * - ENTXST reflects IRCFG_ENTX
+ * - ENRXST = IRCFG_ENRX && (!IRCFG_ENTX || IRCFG_LOOP)
+ */
+
+enum vlsi_pio_irenable {
+ IRENABLE_PHYANDCLOCK = 0x8000, /* enable IR phy and gate the mode config (rw) */
+ IRENABLE_CFGER = 0x4000, /* mode configuration error (ro) */
+ IRENABLE_FIR_ON = 0x2000, /* FIR on status (ro) */
+ IRENABLE_MIR_ON = 0x1000, /* MIR on status (ro) */
+ IRENABLE_SIR_ON = 0x0800, /* SIR on status (ro) */
+ IRENABLE_ENTXST = 0x0400, /* transmit enable status (ro) */
+ IRENABLE_ENRXST = 0x0200, /* Receive enable status (ro) */
+ IRENABLE_CRC16_ON = 0x0100 /* 16bit (not 32bit) CRC enabled status (ro) */
+};
+
+#define IRENABLE_MASK 0xff00 /* Read mask */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_PHYCTL: IR Physical Layer Current Control Register (u16, ro) */
+
+/* read-back of the currently applied physical layer status.
+ * applied from VLSI_PIO_NPHYCTL at rising edge of IRENABLE_PHYANDCLOCK
+ * contents identical to VLSI_PIO_NPHYCTL (see below)
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_NPHYCTL: IR Physical Layer Next Control Register (u16, rw) */
+
+/* latched during IRENABLE_PHYANDCLOCK=0 and applied at 0-1 transition
+ *
+ * consists of BAUD[15:10], PLSWID[9:5] and PREAMB[4:0] bits defined as follows:
+ *
+ * SIR-mode: BAUD = (115.2kHz / baudrate) - 1
+ * PLSWID = (pulsetime * freq / (BAUD+1)) - 1
+ * where pulsetime is the requested IrPHY pulse width
+ * and freq is 8(16)MHz for 40(48)MHz primary input clock
+ * PREAMB: don't care for SIR
+ *
+ * The nominal SIR pulse width is 3/16 bit time so we have PLSWID=12
+ * fixed for all SIR speeds at 40MHz input clock (PLSWID=24 at 48MHz).
+ * IrPHY also allows shorter pulses down to the nominal pulse duration
+ * at 115.2kbaud (minus some tolerance) which is 1.41 usec.
+ * Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by two for 48MHz)
+ * we get the minimum acceptable PLSWID values according to the VLSI
+ * specification, which provides 1.5 usec pulse width for all speeds (except
+ * for 2.4kbaud getting 6usec). This is fine with IrPHY v1.3 specs and
+ * reduces the transceiver power which drains the battery. At 9.6kbaud for
+ * example this amounts to more than 90% battery power saving!
+ *
+ * MIR-mode: BAUD = 0
+ * PLSWID = 9(10) for 40(48) MHz input clock
+ * to get nominal MIR pulse width
+ * PREAMB = 1
+ *
+ * FIR-mode: BAUD = 0
+ * PLSWID: don't care
+ * PREAMB = 15
+ */
+
+#define PHYCTL_BAUD_SHIFT 10
+#define PHYCTL_BAUD_MASK 0xfc00
+#define PHYCTL_PLSWID_SHIFT 5
+#define PHYCTL_PLSWID_MASK 0x03e0
+#define PHYCTL_PREAMB_SHIFT 0
+#define PHYCTL_PREAMB_MASK 0x001f
+
+#define PHYCTL_TO_BAUD(bwp) (((bwp)&PHYCTL_BAUD_MASK)>>PHYCTL_BAUD_SHIFT)
+#define PHYCTL_TO_PLSWID(bwp) (((bwp)&PHYCTL_PLSWID_MASK)>>PHYCTL_PLSWID_SHIFT)
+#define PHYCTL_TO_PREAMB(bwp) (((bwp)&PHYCTL_PREAMB_MASK)>>PHYCTL_PREAMB_SHIFT)
+
+#define BWP_TO_PHYCTL(b,w,p) ((((b)<<PHYCTL_BAUD_SHIFT)&PHYCTL_BAUD_MASK) \
+ | (((w)<<PHYCTL_PLSWID_SHIFT)&PHYCTL_PLSWID_MASK) \
+ | (((p)<<PHYCTL_PREAMB_SHIFT)&PHYCTL_PREAMB_MASK))
+
+#define BAUD_BITS(br) ((115200/(br))-1)
+
+static inline unsigned
+calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
+{
+ unsigned tmp;
+
+ if (widthselect) /* nominal 3/16 puls width */
+ return (clockselect) ? 12 : 24;
+
+ tmp = ((clockselect) ? 12 : 24) / (BAUD_BITS(baudrate)+1);
+
+ /* intermediate result of integer division needed here */
+
+ return (tmp>0) ? (tmp-1) : 0;
+}
+
+#define PHYCTL_SIR(br,ws,cs) BWP_TO_PHYCTL(BAUD_BITS(br),calc_width_bits((br),(ws),(cs)),0)
+#define PHYCTL_MIR(cs) BWP_TO_PHYCTL(0,((cs)?9:10),1)
+#define PHYCTL_FIR BWP_TO_PHYCTL(0,0,15)
+
+/* quite ugly, I know. But implementing these calculations here avoids
+ * having magic numbers in the code and allows some playing with pulsewidths
+ * without risk to violate the standards.
+ * FWIW, here is the table for reference:
+ *
+ * baudrate BAUD min-PLSWID nom-PLSWID PREAMB
+ * 2400 47 0(0) 12(24) 0
+ * 9600 11 0(0) 12(24) 0
+ * 19200 5 1(2) 12(24) 0
+ * 38400 2 3(6) 12(24) 0
+ * 57600 1 5(10) 12(24) 0
+ * 115200 0 11(22) 12(24) 0
+ * MIR 0 - 9(10) 1
+ * FIR 0 - 0 15
+ *
+ * note: x(y) means x-value for 40MHz / y-value for 48MHz primary input clock
+ */
+
+/* ------------------------------------------ */
+
+
+/* VLSI_PIO_MAXPKT: Maximum Packet Length register (u16, rw) */
+
+/* maximum acceptable length for received packets */
+
+/* hw imposed limitation - register uses only [11:0] */
+#define MAX_PACKET_LENGTH 0x0fff
+
+/* IrLAP I-field (apparently not defined elsewhere) */
+#define IRDA_MTU 2048
+
+/* complete packet consists of A(1)+C(1)+I(<=IRDA_MTU) */
+#define IRLAP_SKB_ALLOCSIZE (1+1+IRDA_MTU)
+
+/* the buffers we use to exchange frames with the hardware need to be
+ * larger than IRLAP_SKB_ALLOCSIZE because we may have up to 4 bytes FCS
+ * appended and, in SIR mode, a lot of frame wrapping bytes. The worst
+ * case appears to be a SIR packet with I-size==IRDA_MTU and all bytes
+ * requiring to be escaped to provide transparency. Furthermore, the peer
+ * might ask for quite a number of additional XBOFs:
+ * up to 115+48 XBOFS 163
+ * regular BOF 1
+ * A-field 1
+ * C-field 1
+ * I-field, IRDA_MTU, all escaped 4096
+ * FCS (16 bit at SIR, escaped) 4
+ * EOF 1
+ * AFAICS nothing in IrLAP guarantees A/C field not to need escaping
+ * (f.e. 0xc0/0xc1 - i.e. BOF/EOF - are legal values there) so in the
+ * worst case we have 4269 bytes total frame size.
+ * However, the VLSI uses 12 bits only for all buffer length values,
+ * which limits the maximum useable buffer size <= 4095.
+ * Note this is not a limitation in the receive case because we use
+ * the SIR filtering mode where the hw unwraps the frame and only the
+ * bare packet+fcs is stored into the buffer - in contrast to the SIR
+ * tx case where we have to pass frame-wrapped packets to the hw.
+ * If this would ever become an issue in real life, the only workaround
+ * I see would be using the legacy UART emulation in SIR mode.
+ */
+
+#define XFER_BUF_SIZE MAX_PACKET_LENGTH
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RCVBCNT: Receive Byte Count Register (u16, ro) */
+
+/* receive packet counter gets incremented on every non-filtered
+ * byte which was put in the receive fifo and reset for each
+ * new packet. Used to decide whether we are just in the middle
+ * of receiving
+ */
+
+/* better apply the [11:0] mask when reading, as some docs say the
+ * reserved [15:12] would return 1 when reading - which is wrong AFAICS
+ */
+#define RCVBCNT_MASK 0x0fff
+
+/******************************************************************/
+
+/* descriptors for rx/tx ring
+ *
+ * accessed by hardware - don't change!
+ *
+ * the descriptor is owned by hardware, when the ACTIVE status bit
+ * is set and nothing (besides reading status to test the bit)
+ * shall be done. The bit gets cleared by hw, when the descriptor
+ * gets closed. Premature reaping of descriptors owned be the chip
+ * can be achieved by disabling IRCFG_MSTR
+ *
+ * Attention: Writing addr overwrites status!
+ *
+ * ### FIXME: depends on endianess (but there ain't no non-i586 ob800 ;-)
+ */
+
+struct ring_descr_hw {
+ volatile u16 rd_count; /* tx/rx count [11:0] */
+ u16 reserved;
+ union {
+ u32 addr; /* [23:0] of the buffer's busaddress */
+ struct {
+ u8 addr_res[3];
+ volatile u8 status; /* descriptor status */
+ } rd_s __attribute__((packed));
+ } rd_u __attribute((packed));
+} __attribute__ ((packed));
+
+#define rd_addr rd_u.addr
+#define rd_status rd_u.rd_s.status
+
+/* ring descriptor status bits */
+
+#define RD_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
+
+/* TX ring descriptor status */
+
+#define RD_TX_DISCRC 0x40 /* do not send CRC (for SIR) */
+#define RD_TX_BADCRC 0x20 /* force a bad CRC */
+#define RD_TX_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
+#define RD_TX_FRCEUND 0x08 /* force underrun */
+#define RD_TX_CLRENTX 0x04 /* clear ENTX after this frame */
+#define RD_TX_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
+
+/* RX ring descriptor status */
+
+#define RD_RX_PHYERR 0x40 /* physical encoding error */
+#define RD_RX_CRCERR 0x20 /* CRC error (MIR/FIR) */
+#define RD_RX_LENGTH 0x10 /* frame exceeds buffer length */
+#define RD_RX_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
+#define RD_RX_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
+
+#define RD_RX_ERROR 0x7c /* any error in received frame */
+
+/* the memory required to hold the 2 descriptor rings */
+#define HW_RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr_hw))
+
+/******************************************************************/
+
+/* sw-ring descriptors consists of a bus-mapped transfer buffer with
+ * associated skb and a pointer to the hw entry descriptor
+ */
+
+struct ring_descr {
+ struct ring_descr_hw *hw;
+ struct sk_buff *skb;
+ void *buf;
+};
+
+/* wrappers for operations on hw-exposed ring descriptors
+ * access to the hw-part of the descriptors must use these.
+ */
+
+static inline int rd_is_active(struct ring_descr *rd)
+{
+ return ((rd->hw->rd_status & RD_ACTIVE) != 0);
+}
+
+static inline void rd_activate(struct ring_descr *rd)
+{
+ rd->hw->rd_status |= RD_ACTIVE;
+}
+
+static inline void rd_set_status(struct ring_descr *rd, u8 s)
+{
+ rd->hw->rd_status = s; /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
+{
+ /* order is important for two reasons:
+ * - overlayed: writing addr overwrites status
+ * - we want to write status last so we have valid address in
+ * case status has RD_ACTIVE set
+ */
+
+ if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
+ IRDA_ERROR("%s: pci busaddr inconsistency!\n", __FUNCTION__);
+ dump_stack();
+ return;
+ }
+
+ a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
+ * to status - just in case MSTRPAGE_VALUE!=0
+ */
+ rd->hw->rd_addr = cpu_to_le32(a);
+ wmb();
+ rd_set_status(rd, s); /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_count(struct ring_descr *rd, u16 c)
+{
+ rd->hw->rd_count = cpu_to_le16(c);
+}
+
+static inline u8 rd_get_status(struct ring_descr *rd)
+{
+ return rd->hw->rd_status;
+}
+
+static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
+{
+ dma_addr_t a;
+
+ a = le32_to_cpu(rd->hw->rd_addr);
+ return (a & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
+}
+
+static inline u16 rd_get_count(struct ring_descr *rd)
+{
+ return le16_to_cpu(rd->hw->rd_count);
+}
+
+/******************************************************************/
+
+/* sw descriptor rings for rx, tx:
+ *
+ * operations follow producer-consumer paradigm, with the hw
+ * in the middle doing the processing.
+ * ring size must be power of two.
+ *
+ * producer advances r->tail after inserting for processing
+ * consumer advances r->head after removing processed rd
+ * ring is empty if head==tail / full if (tail+1)==head
+ */
+
+struct vlsi_ring {
+ struct pci_dev *pdev;
+ int dir;
+ unsigned len;
+ unsigned size;
+ unsigned mask;
+ atomic_t head, tail;
+ struct ring_descr *rd;
+};
+
+/* ring processing helpers */
+
+static inline struct ring_descr *ring_last(struct vlsi_ring *r)
+{
+ int t;
+
+ t = atomic_read(&r->tail) & r->mask;
+ return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
+}
+
+static inline struct ring_descr *ring_put(struct vlsi_ring *r)
+{
+ atomic_inc(&r->tail);
+ return ring_last(r);
+}
+
+static inline struct ring_descr *ring_first(struct vlsi_ring *r)
+{
+ int h;
+
+ h = atomic_read(&r->head) & r->mask;
+ return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
+}
+
+static inline struct ring_descr *ring_get(struct vlsi_ring *r)
+{
+ atomic_inc(&r->head);
+ return ring_first(r);
+}
+
+/******************************************************************/
+
+/* our private compound VLSI-PCI-IRDA device information */
+
+typedef struct vlsi_irda_dev {
+ struct pci_dev *pdev;
+ struct net_device_stats stats;
+
+ struct irlap_cb *irlap;
+
+ struct qos_info qos;
+
+ unsigned mode;
+ int baud, new_baud;
+
+ dma_addr_t busaddr;
+ void *virtaddr;
+ struct vlsi_ring *tx_ring, *rx_ring;
+
+ struct timeval last_rx;
+
+ spinlock_t lock;
+ struct semaphore sem;
+
+ u8 resume_ok;
+ struct proc_dir_entry *proc_entry;
+
+} vlsi_irda_dev_t;
+
+/********************************************************/
+
+/* the remapped error flags we use for returning from frame
+ * post-processing in vlsi_process_tx/rx() after it was completed
+ * by the hardware. These functions either return the >=0 number
+ * of transfered bytes in case of success or the negative (-)
+ * of the or'ed error flags.
+ */
+
+#define VLSI_TX_DROP 0x0001
+#define VLSI_TX_FIFO 0x0002
+
+#define VLSI_RX_DROP 0x0100
+#define VLSI_RX_OVER 0x0200
+#define VLSI_RX_LENGTH 0x0400
+#define VLSI_RX_FRAME 0x0800
+#define VLSI_RX_CRC 0x1000
+
+/********************************************************/
+
+#endif /* IRDA_VLSI_FIR_H */
+
diff --git a/drivers/net/irda/w83977af.h b/drivers/net/irda/w83977af.h
new file mode 100644
index 000000000000..04476c2e9121
--- /dev/null
+++ b/drivers/net/irda/w83977af.h
@@ -0,0 +1,53 @@
+#ifndef W83977AF_H
+#define W83977AF_H
+
+#define W977_EFIO_BASE 0x370
+#define W977_EFIO2_BASE 0x3f0
+#define W977_DEVICE_IR 0x06
+
+
+/*
+ * Enter extended function mode
+ */
+static inline void w977_efm_enter(unsigned int efio)
+{
+ outb(0x87, efio);
+ outb(0x87, efio);
+}
+
+/*
+ * Select a device to configure
+ */
+
+static inline void w977_select_device(__u8 devnum, unsigned int efio)
+{
+ outb(0x07, efio);
+ outb(devnum, efio+1);
+}
+
+/*
+ * Write a byte to a register
+ */
+static inline void w977_write_reg(__u8 reg, __u8 value, unsigned int efio)
+{
+ outb(reg, efio);
+ outb(value, efio+1);
+}
+
+/*
+ * read a byte from a register
+ */
+static inline __u8 w977_read_reg(__u8 reg, unsigned int efio)
+{
+ outb(reg, efio);
+ return inb(efio+1);
+}
+
+/*
+ * Exit extended function mode
+ */
+static inline void w977_efm_exit(unsigned int efio)
+{
+ outb(0xAA, efio);
+}
+#endif
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
new file mode 100644
index 000000000000..0ea65c4c6f85
--- /dev/null
+++ b/drivers/net/irda/w83977af_ir.c
@@ -0,0 +1,1379 @@
+/*********************************************************************
+ *
+ * Filename: w83977af_ir.c
+ * Version: 1.0
+ * Description: FIR driver for the Winbond W83977AF Super I/O chip
+ * Status: Experimental.
+ * Author: Paul VanderSpek
+ * Created at: Wed Nov 4 11:46:16 1998
+ * Modified at: Fri Jan 28 12:10:59 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998-1999 Rebel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
+ * warranty for any of this software. This material is provided "AS-IS"
+ * and at no charge.
+ *
+ * If you find bugs in this file, its very likely that the same bug
+ * will also be in pc87108.c since the implementations are quite
+ * similar.
+ *
+ * Notice that all functions that needs to access the chip in _any_
+ * way, must save BSR register on entry, and restore it on exit.
+ * It is _very_ important to follow this policy!
+ *
+ * __u8 bank;
+ *
+ * bank = inb( iobase+BSR);
+ *
+ * do_your_stuff_here();
+ *
+ * outb( bank, iobase+BSR);
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include "w83977af.h"
+#include "w83977af_ir.h"
+
+#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
+#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
+#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
+#endif
+#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */
+#define CONFIG_USE_W977_PNP /* Currently needed */
+#define PIO_MAX_SPEED 115200
+
+static char *driver_name = "w83977af_ir";
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+#define CHIP_IO_EXTENT 8
+
+static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
+#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
+static unsigned int irq[] = { 6, 0, 0, 0 };
+#else
+static unsigned int irq[] = { 11, 0, 0, 0 };
+#endif
+static unsigned int dma[] = { 1, 0, 0, 0 };
+static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
+static unsigned int efio = W977_EFIO_BASE;
+
+static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
+
+/* Some prototypes */
+static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
+ unsigned int dma);
+static int w83977af_close(struct w83977af_ir *self);
+static int w83977af_probe(int iobase, int irq, int dma);
+static int w83977af_dma_receive(struct w83977af_ir *self);
+static int w83977af_dma_receive_complete(struct w83977af_ir *self);
+static int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev);
+static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
+static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
+static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
+static int w83977af_is_receiving(struct w83977af_ir *self);
+
+static int w83977af_net_open(struct net_device *dev);
+static int w83977af_net_close(struct net_device *dev);
+static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
+
+/*
+ * Function w83977af_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init w83977af_init(void)
+{
+ int i;
+
+ IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+
+ for (i=0; (io[i] < 2000) && (i < 4); i++) {
+ if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/*
+ * Function w83977af_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit w83977af_cleanup(void)
+{
+ int i;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
+
+ for (i=0; i < 4; i++) {
+ if (dev_self[i])
+ w83977af_close(dev_self[i]);
+ }
+}
+
+/*
+ * Function w83977af_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+int w83977af_open(int i, unsigned int iobase, unsigned int irq,
+ unsigned int dma)
+{
+ struct net_device *dev;
+ struct w83977af_ir *self;
+ int err;
+
+ IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+
+ /* Lock the port that we need */
+ if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
+ IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
+ __FUNCTION__ , iobase);
+ return -ENODEV;
+ }
+
+ if (w83977af_probe(iobase, irq, dma) == -1) {
+ err = -1;
+ goto err_out;
+ }
+ /*
+ * Allocate new instance of the driver
+ */
+ dev = alloc_irdadev(sizeof(struct w83977af_ir));
+ if (dev == NULL) {
+ printk( KERN_ERR "IrDA: Can't allocate memory for "
+ "IrDA control block!\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ self = dev->priv;
+ spin_lock_init(&self->lock);
+
+
+ /* Initialize IO */
+ self->io.fir_base = iobase;
+ self->io.irq = irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = dma;
+ self->io.fifo_size = 32;
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+
+ /* FIXME: The HP HDLS-1100 does not support 1152000! */
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+
+ /* The HP HDLS-1100 needs 1 ms according to the specs */
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 4000;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_alloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+
+ memset(self->rx_buff.head, 0, self->rx_buff.truesize);
+
+ self->tx_buff.head =
+ dma_alloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ memset(self->tx_buff.head, 0, self->tx_buff.truesize);
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+ self->netdev = dev;
+
+ /* Keep track of module usage */
+ SET_MODULE_OWNER(dev);
+
+ /* Override the network functions we need to use */
+ dev->hard_start_xmit = w83977af_hard_xmit;
+ dev->open = w83977af_net_open;
+ dev->stop = w83977af_net_close;
+ dev->do_ioctl = w83977af_net_ioctl;
+ dev->get_stats = w83977af_net_get_stats;
+
+ err = register_netdev(dev);
+ if (err) {
+ IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__);
+ goto err_out3;
+ }
+ IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+
+ return 0;
+err_out3:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+err_out2:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+err_out1:
+ free_netdev(dev);
+err_out:
+ release_region(iobase, CHIP_IO_EXTENT);
+ return err;
+}
+
+/*
+ * Function w83977af_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int w83977af_close(struct w83977af_ir *self)
+{
+ int iobase;
+
+ IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+
+ iobase = self->io.fir_base;
+
+#ifdef CONFIG_USE_W977_PNP
+ /* enter PnP configuration mode */
+ w977_efm_enter(efio);
+
+ w977_select_device(W977_DEVICE_IR, efio);
+
+ /* Deactivate device */
+ w977_write_reg(0x30, 0x00, efio);
+
+ w977_efm_exit(efio);
+#endif /* CONFIG_USE_W977_PNP */
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
+ __FUNCTION__ , self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+int w83977af_probe( int iobase, int irq, int dma)
+{
+ int version;
+ int i;
+
+ for (i=0; i < 2; i++) {
+ IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ );
+#ifdef CONFIG_USE_W977_PNP
+ /* Enter PnP configuration mode */
+ w977_efm_enter(efbase[i]);
+
+ w977_select_device(W977_DEVICE_IR, efbase[i]);
+
+ /* Configure PnP port, IRQ, and DMA channel */
+ w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
+ w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
+
+ w977_write_reg(0x70, irq, efbase[i]);
+#ifdef CONFIG_ARCH_NETWINDER
+ /* Netwinder uses 1 higher than Linux */
+ w977_write_reg(0x74, dma+1, efbase[i]);
+#else
+ w977_write_reg(0x74, dma, efbase[i]);
+#endif /*CONFIG_ARCH_NETWINDER */
+ w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
+
+ /* Set append hardware CRC, enable IR bank selection */
+ w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
+
+ /* Activate device */
+ w977_write_reg(0x30, 0x01, efbase[i]);
+
+ w977_efm_exit(efbase[i]);
+#endif /* CONFIG_USE_W977_PNP */
+ /* Disable Advanced mode */
+ switch_bank(iobase, SET2);
+ outb(iobase+2, 0x00);
+
+ /* Turn on UART (global) interrupts */
+ switch_bank(iobase, SET0);
+ outb(HCR_EN_IRQ, iobase+HCR);
+
+ /* Switch to advanced mode */
+ switch_bank(iobase, SET2);
+ outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
+
+ /* Set default IR-mode */
+ switch_bank(iobase, SET0);
+ outb(HCR_SIR, iobase+HCR);
+
+ /* Read the Advanced IR ID */
+ switch_bank(iobase, SET3);
+ version = inb(iobase+AUID);
+
+ /* Should be 0x1? */
+ if (0x10 == (version & 0xf0)) {
+ efio = efbase[i];
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, SET2);
+ outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
+
+ /* Set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, SET0);
+ outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
+ UFR_EN_FIFO,iobase+UFR);
+
+ /* Receiver frame length */
+ switch_bank(iobase, SET4);
+ outb(2048 & 0xff, iobase+6);
+ outb((2048 >> 8) & 0x1f, iobase+7);
+
+ /*
+ * Init HP HSDL-1100 transceiver.
+ *
+ * Set IRX_MSL since we have 2 * receive paths IRRX,
+ * and IRRXH. Clear IRSL0D since we want IRSL0 * to
+ * be a input pin used for IRRXH
+ *
+ * IRRX pin 37 connected to receiver
+ * IRTX pin 38 connected to transmitter
+ * FIRRX pin 39 connected to receiver (IRSL0)
+ * CIRRX pin 40 connected to pin 37
+ */
+ switch_bank(iobase, SET7);
+ outb(0x40, iobase+7);
+
+ IRDA_MESSAGE("W83977AF (IR) driver loaded. "
+ "Version: 0x%02x\n", version);
+
+ return 0;
+ } else {
+ /* Try next extented function register address */
+ IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ );
+ }
+ }
+ return -1;
+}
+
+void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
+{
+ int ir_mode = HCR_SIR;
+ int iobase;
+ __u8 set;
+
+ iobase = self->io.fir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Save current bank */
+ set = inb(iobase+SSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, SET0);
+ outb(0, iobase+ICR);
+
+ /* Select Set 2 */
+ switch_bank(iobase, SET2);
+ outb(0x00, iobase+ABHL);
+
+ switch (speed) {
+ case 9600: outb(0x0c, iobase+ABLL); break;
+ case 19200: outb(0x06, iobase+ABLL); break;
+ case 38400: outb(0x03, iobase+ABLL); break;
+ case 57600: outb(0x02, iobase+ABLL); break;
+ case 115200: outb(0x01, iobase+ABLL); break;
+ case 576000:
+ ir_mode = HCR_MIR_576;
+ IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ );
+ break;
+ case 1152000:
+ ir_mode = HCR_MIR_1152;
+ IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ );
+ break;
+ case 4000000:
+ ir_mode = HCR_FIR;
+ IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ );
+ break;
+ default:
+ ir_mode = HCR_FIR;
+ IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed);
+ break;
+ }
+
+ /* Set speed mode */
+ switch_bank(iobase, SET0);
+ outb(ir_mode, iobase+HCR);
+
+ /* set FIFO size to 32 */
+ switch_bank(iobase, SET2);
+ outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
+
+ /* set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, SET0);
+ outb(0x00, iobase+UFR); /* Reset */
+ outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
+ outb(0xa7, iobase+UFR);
+
+ netif_wake_queue(self->netdev);
+
+ /* Enable some interrupts so we can receive frames */
+ switch_bank(iobase, SET0);
+ if (speed > PIO_MAX_SPEED) {
+ outb(ICR_EFSFI, iobase+ICR);
+ w83977af_dma_receive(self);
+ } else
+ outb(ICR_ERBRI, iobase+ICR);
+
+ /* Restore SSR */
+ outb(set, iobase+SSR);
+}
+
+/*
+ * Function w83977af_hard_xmit (skb, dev)
+ *
+ * Sets up a DMA transfer to send the current frame.
+ *
+ */
+int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ __s32 speed;
+ int iobase;
+ __u8 set;
+ int mtt;
+
+ self = (struct w83977af_ir *) dev->priv;
+
+ iobase = self->io.fir_base;
+
+ IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies,
+ (int) skb->len);
+
+ /* Lock transmit buffer */
+ netif_stop_queue(dev);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ w83977af_change_speed(self, speed);
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+ return 0;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Save current set */
+ set = inb(iobase+SSR);
+
+ /* Decide if we should use PIO or DMA transfer */
+ if (self->io.speed > PIO_MAX_SPEED) {
+ self->tx_buff.data = self->tx_buff.head;
+ memcpy(self->tx_buff.data, skb->data, skb->len);
+ self->tx_buff.len = skb->len;
+
+ mtt = irda_get_mtt(skb);
+#ifdef CONFIG_USE_INTERNAL_TIMER
+ if (mtt > 50) {
+ /* Adjust for timer resolution */
+ mtt /= 1000+1;
+
+ /* Setup timer */
+ switch_bank(iobase, SET4);
+ outb(mtt & 0xff, iobase+TMRL);
+ outb((mtt >> 8) & 0x0f, iobase+TMRH);
+
+ /* Start timer */
+ outb(IR_MSL_EN_TMR, iobase+IR_MSL);
+ self->io.direction = IO_XMIT;
+
+ /* Enable timer interrupt */
+ switch_bank(iobase, SET0);
+ outb(ICR_ETMRI, iobase+ICR);
+ } else {
+#endif
+ IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt);
+ if (mtt)
+ udelay(mtt);
+
+ /* Enable DMA interrupt */
+ switch_bank(iobase, SET0);
+ outb(ICR_EDMAI, iobase+ICR);
+ w83977af_dma_write(self, iobase);
+#ifdef CONFIG_USE_INTERNAL_TIMER
+ }
+#endif
+ } else {
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ /* Add interrupt on tx low level (will fire immediately) */
+ switch_bank(iobase, SET0);
+ outb(ICR_ETXTHI, iobase+ICR);
+ }
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+
+ /* Restore set register */
+ outb(set, iobase+SSR);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_dma_write (self, iobase)
+ *
+ * Send frame using DMA
+ *
+ */
+static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
+{
+ __u8 set;
+#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
+ unsigned long flags;
+ __u8 hcr;
+#endif
+ IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len);
+
+ /* Save current set */
+ set = inb(iobase+SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+
+ /* Choose transmit DMA channel */
+ switch_bank(iobase, SET2);
+ outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
+#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
+ spin_lock_irqsave(&self->lock, flags);
+
+ disable_dma(self->io.dma);
+ clear_dma_ff(self->io.dma);
+ set_dma_mode(self->io.dma, DMA_MODE_READ);
+ set_dma_addr(self->io.dma, self->tx_buff_dma);
+ set_dma_count(self->io.dma, self->tx_buff.len);
+#else
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_MODE_WRITE);
+#endif
+ self->io.direction = IO_XMIT;
+
+ /* Enable DMA */
+ switch_bank(iobase, SET0);
+#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
+ hcr = inb(iobase+HCR);
+ outb(hcr | HCR_EN_DMA, iobase+HCR);
+ enable_dma(self->io.dma);
+ spin_unlock_irqrestore(&self->lock, flags);
+#else
+ outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
+#endif
+
+ /* Restore set register */
+ outb(set, iobase+SSR);
+}
+
+/*
+ * Function w83977af_pio_write (iobase, buf, len, fifo_size)
+ *
+ *
+ *
+ */
+static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
+{
+ int actual = 0;
+ __u8 set;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
+
+ /* Save current bank */
+ set = inb(iobase+SSR);
+
+ switch_bank(iobase, SET0);
+ if (!(inb_p(iobase+USR) & USR_TSRE)) {
+ IRDA_DEBUG(4,
+ "%s(), warning, FIFO not empty yet!\n", __FUNCTION__ );
+
+ fifo_size -= 17;
+ IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
+ __FUNCTION__ , fifo_size);
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual++], iobase+TBR);
+ }
+
+ IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
+ __FUNCTION__ , fifo_size, actual, len);
+
+ /* Restore bank */
+ outb(set, iobase+SSR);
+
+ return actual;
+}
+
+/*
+ * Function w83977af_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. So do the necessary things
+ *
+ *
+ */
+static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
+{
+ int iobase;
+ __u8 set;
+
+ IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies);
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase+SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+
+ /* Check for underrrun! */
+ if (inb(iobase+AUDR) & AUDR_UNDR) {
+ IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ );
+
+ self->stats.tx_errors++;
+ self->stats.tx_fifo_errors++;
+
+ /* Clear bit, by writing 1 to it */
+ outb(AUDR_UNDR, iobase+AUDR);
+ } else
+ self->stats.tx_packets++;
+
+
+ if (self->new_speed) {
+ w83977af_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Unlock tx_buff and request another frame */
+ /* Tell the network layer, that we want more frames */
+ netif_wake_queue(self->netdev);
+
+ /* Restore set */
+ outb(set, iobase+SSR);
+}
+
+/*
+ * Function w83977af_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+int w83977af_dma_receive(struct w83977af_ir *self)
+{
+ int iobase;
+ __u8 set;
+#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+ unsigned long flags;
+ __u8 hcr;
+#endif
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
+
+ iobase= self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase+SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+
+ /* Choose DMA Rx, DMA Fairness, and Advanced mode */
+ switch_bank(iobase, SET2);
+ outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
+ iobase+ADCR1);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+ spin_lock_irqsave(&self->lock, flags);
+
+ disable_dma(self->io.dma);
+ clear_dma_ff(self->io.dma);
+ set_dma_mode(self->io.dma, DMA_MODE_READ);
+ set_dma_addr(self->io.dma, self->rx_buff_dma);
+ set_dma_count(self->io.dma, self->rx_buff.truesize);
+#else
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_MODE_READ);
+#endif
+ /*
+ * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
+ * important that we don't reset the Tx FIFO since it might not
+ * be finished transmitting yet
+ */
+ switch_bank(iobase, SET0);
+ outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
+ self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
+
+ /* Enable DMA */
+ switch_bank(iobase, SET0);
+#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+ hcr = inb(iobase+HCR);
+ outb(hcr | HCR_EN_DMA, iobase+HCR);
+ enable_dma(self->io.dma);
+ spin_unlock_irqrestore(&self->lock, flags);
+#else
+ outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
+#endif
+ /* Restore set */
+ outb(set, iobase+SSR);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_receive_complete (self)
+ *
+ * Finished with receiving a frame
+ *
+ */
+int w83977af_dma_receive_complete(struct w83977af_ir *self)
+{
+ struct sk_buff *skb;
+ struct st_fifo *st_fifo;
+ int len;
+ int iobase;
+ __u8 set;
+ __u8 status;
+
+ IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
+
+ st_fifo = &self->st_fifo;
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase+SSR);
+
+ iobase = self->io.fir_base;
+
+ /* Read status FIFO */
+ switch_bank(iobase, SET5);
+ while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
+ st_fifo->entries[st_fifo->tail].status = status;
+
+ st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
+ st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
+
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+
+ while (st_fifo->len) {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if (status & FS_FO_ERR_MSK) {
+ if (status & FS_FO_LST_FR) {
+ /* Add number of lost frames to stats */
+ self->stats.rx_errors += len;
+ } else {
+ /* Skip frame */
+ self->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & FS_FO_MX_LEX)
+ self->stats.rx_length_errors++;
+
+ if (status & FS_FO_PHY_ERR)
+ self->stats.rx_frame_errors++;
+
+ if (status & FS_FO_CRC_ERR)
+ self->stats.rx_crc_errors++;
+ }
+ /* The errors below can be reported in both cases */
+ if (status & FS_FO_RX_OV)
+ self->stats.rx_fifo_errors++;
+
+ if (status & FS_FO_FSF_OV)
+ self->stats.rx_fifo_errors++;
+
+ } else {
+ /* Check if we have transferred all data to memory */
+ switch_bank(iobase, SET0);
+ if (inb(iobase+USR) & USR_RDR) {
+#ifdef CONFIG_USE_INTERNAL_TIMER
+ /* Put this entry back in fifo */
+ st_fifo->head--;
+ st_fifo->len++;
+ st_fifo->entries[st_fifo->head].status = status;
+ st_fifo->entries[st_fifo->head].len = len;
+
+ /* Restore set register */
+ outb(set, iobase+SSR);
+
+ return FALSE; /* I'll be back! */
+#else
+ udelay(80); /* Should be enough!? */
+#endif
+ }
+
+ skb = dev_alloc_skb(len+1);
+ if (skb == NULL) {
+ printk(KERN_INFO
+ "%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
+ /* Restore set register */
+ outb(set, iobase+SSR);
+
+ return FALSE;
+ }
+
+ /* Align to 20 bytes */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC */
+ if (self->io.speed < 4000000) {
+ skb_put(skb, len-2);
+ memcpy(skb->data, self->rx_buff.data, len-2);
+ } else {
+ skb_put(skb, len-4);
+ memcpy(skb->data, self->rx_buff.data, len-4);
+ }
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ self->netdev->last_rx = jiffies;
+ }
+ }
+ /* Restore set register */
+ outb(set, iobase+SSR);
+
+ return TRUE;
+}
+
+/*
+ * Function pc87108_pio_receive (self)
+ *
+ * Receive all data in receiver FIFO
+ *
+ */
+static void w83977af_pio_receive(struct w83977af_ir *self)
+{
+ __u8 byte = 0x00;
+ int iobase;
+
+ IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.fir_base;
+
+ /* Receive all characters in Rx FIFO */
+ do {
+ byte = inb(iobase+RBR);
+ async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
+ byte);
+ } while (inb(iobase+USR) & USR_RDR); /* Data available */
+}
+
+/*
+ * Function w83977af_sir_interrupt (self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
+{
+ int actual;
+ __u8 new_icr = 0;
+ __u8 set;
+ int iobase;
+
+ IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr);
+
+ iobase = self->io.fir_base;
+ /* Transmit FIFO low on data */
+ if (isr & ISR_TXTH_I) {
+ /* Write data left in transmit buffer */
+ actual = w83977af_pio_write(self->io.fir_base,
+ self->tx_buff.data,
+ self->tx_buff.len,
+ self->io.fifo_size);
+
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+
+ self->io.direction = IO_XMIT;
+
+ /* Check if finished */
+ if (self->tx_buff.len > 0) {
+ new_icr |= ICR_ETXTHI;
+ } else {
+ set = inb(iobase+SSR);
+ switch_bank(iobase, SET0);
+ outb(AUDR_SFEND, iobase+AUDR);
+ outb(set, iobase+SSR);
+
+ self->stats.tx_packets++;
+
+ /* Feed me more packets */
+ netif_wake_queue(self->netdev);
+ new_icr |= ICR_ETBREI;
+ }
+ }
+ /* Check if transmission has completed */
+ if (isr & ISR_TXEMP_I) {
+ /* Check if we need to change the speed? */
+ if (self->new_speed) {
+ IRDA_DEBUG(2,
+ "%s(), Changing speed!\n", __FUNCTION__ );
+ w83977af_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Turn around and get ready to receive some data */
+ self->io.direction = IO_RECV;
+ new_icr |= ICR_ERBRI;
+ }
+
+ /* Rx FIFO threshold or timeout */
+ if (isr & ISR_RXTH_I) {
+ w83977af_pio_receive(self);
+
+ /* Keep receiving */
+ new_icr |= ICR_ERBRI;
+ }
+ return new_icr;
+}
+
+/*
+ * Function pc87108_fir_interrupt (self, eir)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
+{
+ __u8 new_icr = 0;
+ __u8 set;
+ int iobase;
+
+ iobase = self->io.fir_base;
+ set = inb(iobase+SSR);
+
+ /* End of frame detected in FIFO */
+ if (isr & (ISR_FEND_I|ISR_FSF_I)) {
+ if (w83977af_dma_receive_complete(self)) {
+
+ /* Wait for next status FIFO interrupt */
+ new_icr |= ICR_EFSFI;
+ } else {
+ /* DMA not finished yet */
+
+ /* Set timer value, resolution 1 ms */
+ switch_bank(iobase, SET4);
+ outb(0x01, iobase+TMRL); /* 1 ms */
+ outb(0x00, iobase+TMRH);
+
+ /* Start timer */
+ outb(IR_MSL_EN_TMR, iobase+IR_MSL);
+
+ new_icr |= ICR_ETMRI;
+ }
+ }
+ /* Timer finished */
+ if (isr & ISR_TMR_I) {
+ /* Disable timer */
+ switch_bank(iobase, SET4);
+ outb(0, iobase+IR_MSL);
+
+ /* Clear timer event */
+ /* switch_bank(iobase, SET0); */
+/* outb(ASCR_CTE, iobase+ASCR); */
+
+ /* Check if this is a TX timer interrupt */
+ if (self->io.direction == IO_XMIT) {
+ w83977af_dma_write(self, iobase);
+
+ new_icr |= ICR_EDMAI;
+ } else {
+ /* Check if DMA has now finished */
+ w83977af_dma_receive_complete(self);
+
+ new_icr |= ICR_EFSFI;
+ }
+ }
+ /* Finished with DMA */
+ if (isr & ISR_DMA_I) {
+ w83977af_dma_xmit_complete(self);
+
+ /* Check if there are more frames to be transmitted */
+ /* if (irda_device_txqueue_empty(self)) { */
+
+ /* Prepare for receive
+ *
+ * ** Netwinder Tx DMA likes that we do this anyway **
+ */
+ w83977af_dma_receive(self);
+ new_icr = ICR_EFSFI;
+ /* } */
+ }
+
+ /* Restore set */
+ outb(set, iobase+SSR);
+
+ return new_icr;
+}
+
+/*
+ * Function w83977af_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t w83977af_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct w83977af_ir *self;
+ __u8 set, icr, isr;
+ int iobase;
+
+ if (!dev) {
+ printk(KERN_WARNING "%s: irq %d for unknown device.\n",
+ driver_name, irq);
+ return IRQ_NONE;
+ }
+ self = (struct w83977af_ir *) dev->priv;
+
+ iobase = self->io.fir_base;
+
+ /* Save current bank */
+ set = inb(iobase+SSR);
+ switch_bank(iobase, SET0);
+
+ icr = inb(iobase+ICR);
+ isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
+
+ outb(0, iobase+ICR); /* Disable interrupts */
+
+ if (isr) {
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > PIO_MAX_SPEED )
+ icr = w83977af_fir_interrupt(self, isr);
+ else
+ icr = w83977af_sir_interrupt(self, isr);
+ }
+
+ outb(icr, iobase+ICR); /* Restore (new) interrupts */
+ outb(set, iobase+SSR); /* Restore bank register */
+ return IRQ_RETVAL(isr);
+}
+
+/*
+ * Function w83977af_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int w83977af_is_receiving(struct w83977af_ir *self)
+{
+ int status = FALSE;
+ int iobase;
+ __u8 set;
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ if (self->io.speed > 115200) {
+ iobase = self->io.fir_base;
+
+ /* Check if rx FIFO is not empty */
+ set = inb(iobase+SSR);
+ switch_bank(iobase, SET2);
+ if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
+ /* We are receiving something */
+ status = TRUE;
+ }
+ outb(set, iobase+SSR);
+ } else
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+
+ return status;
+}
+
+/*
+ * Function w83977af_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int w83977af_net_open(struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ int iobase;
+ char hwname[32];
+ __u8 set;
+
+ IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = (struct w83977af_ir *) dev->priv;
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
+ (void *) dev)) {
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ free_irq(self->io.irq, self);
+ return -EAGAIN;
+ }
+
+ /* Save current set */
+ set = inb(iobase+SSR);
+
+ /* Enable some interrupts so we can receive frames again */
+ switch_bank(iobase, SET0);
+ if (self->io.speed > 115200) {
+ outb(ICR_EFSFI, iobase+ICR);
+ w83977af_dma_receive(self);
+ } else
+ outb(ICR_ERBRI, iobase+ICR);
+
+ /* Restore bank register */
+ outb(set, iobase+SSR);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /* Give self a hardware name */
+ sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int w83977af_net_close(struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ int iobase;
+ __u8 set;
+
+ IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = (struct w83977af_ir *) dev->priv;
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ disable_dma(self->io.dma);
+
+ /* Save current set */
+ set = inb(iobase+SSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, SET0);
+ outb(0, iobase+ICR);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ /* Restore bank register */
+ outb(set, iobase+SSR);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct w83977af_ir *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = dev->priv;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ w83977af_change_speed(self, irq->ifr_baudrate);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = w83977af_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+out:
+ spin_unlock_irqrestore(&self->lock, flags);
+ return ret;
+}
+
+static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
+{
+ struct w83977af_ir *self = (struct w83977af_ir *) dev->priv;
+
+ return &self->stats;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+
+/*
+ * Function init_module (void)
+ *
+ *
+ *
+ */
+module_init(w83977af_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ *
+ *
+ */
+module_exit(w83977af_cleanup);
diff --git a/drivers/net/irda/w83977af_ir.h b/drivers/net/irda/w83977af_ir.h
new file mode 100644
index 000000000000..0b7661deafee
--- /dev/null
+++ b/drivers/net/irda/w83977af_ir.h
@@ -0,0 +1,199 @@
+/*********************************************************************
+ *
+ * Filename: w83977af_ir.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Paul VanderSpek
+ * Created at: Thu Nov 19 13:55:34 1998
+ * Modified at: Tue Jan 11 13:08:19 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef W83977AF_IR_H
+#define W83977AF_IR_H
+
+#include <asm/io.h>
+#include <linux/types.h>
+
+/* Flags for configuration register CRF0 */
+#define ENBNKSEL 0x01
+#define APEDCRC 0x02
+#define TXW4C 0x04
+#define RXW4C 0x08
+
+/* Bank 0 */
+#define RBR 0x00 /* Receiver buffer register */
+#define TBR 0x00 /* Transmitter buffer register */
+
+#define ICR 0x01 /* Interrupt configuration register */
+#define ICR_ERBRI 0x01 /* Receiver buffer register interrupt */
+#define ICR_ETBREI 0x02 /* Transeiver empty interrupt */
+#define ICR_EUSRI 0x04//* IR status interrupt */
+#define ICR_EHSRI 0x04
+#define ICR_ETXURI 0x04 /* Tx underrun */
+#define ICR_EDMAI 0x10 /* DMA interrupt */
+#define ICR_ETXTHI 0x20 /* Transmitter threshold interrupt */
+#define ICR_EFSFI 0x40 /* Frame status FIFO interrupt */
+#define ICR_ETMRI 0x80 /* Timer interrupt */
+
+#define UFR 0x02 /* FIFO control register */
+#define UFR_EN_FIFO 0x01 /* Enable FIFO's */
+#define UFR_RXF_RST 0x02 /* Reset Rx FIFO */
+#define UFR_TXF_RST 0x04 /* Reset Tx FIFO */
+#define UFR_RXTL 0x80 /* Rx FIFO threshold (set to 16) */
+#define UFR_TXTL 0x20 /* Tx FIFO threshold (set to 17) */
+
+#define ISR 0x02 /* Interrupt status register */
+#define ISR_RXTH_I 0x01 /* Receive threshold interrupt */
+#define ISR_TXEMP_I 0x02 /* Transmitter empty interrupt */
+#define ISR_FEND_I 0x04
+#define ISR_DMA_I 0x10
+#define ISR_TXTH_I 0x20 /* Transmitter threshold interrupt */
+#define ISR_FSF_I 0x40
+#define ISR_TMR_I 0x80 /* Timer interrupt */
+
+#define UCR 0x03 /* Uart control register */
+#define UCR_DLS8 0x03 /* 8N1 */
+
+#define SSR 0x03 /* Sets select register */
+#define SET0 UCR_DLS8 /* Make sure we keep 8N1 */
+#define SET1 (0x80|UCR_DLS8) /* Make sure we keep 8N1 */
+#define SET2 0xE0
+#define SET3 0xE4
+#define SET4 0xE8
+#define SET5 0xEC
+#define SET6 0xF0
+#define SET7 0xF4
+
+#define HCR 0x04
+#define HCR_MODE_MASK ~(0xD0)
+#define HCR_SIR 0x60
+#define HCR_MIR_576 0x20
+#define HCR_MIR_1152 0x80
+#define HCR_FIR 0xA0
+#define HCR_EN_DMA 0x04
+#define HCR_EN_IRQ 0x08
+#define HCR_TX_WT 0x08
+
+#define USR 0x05 /* IR status register */
+#define USR_RDR 0x01 /* Receive data ready */
+#define USR_TSRE 0x40 /* Transmitter empty? */
+
+#define AUDR 0x07
+#define AUDR_SFEND 0x08 /* Set a frame end */
+#define AUDR_RXBSY 0x20 /* Rx busy */
+#define AUDR_UNDR 0x40 /* Transeiver underrun */
+
+/* Set 2 */
+#define ABLL 0x00 /* Advanced baud rate divisor latch (low byte) */
+#define ABHL 0x01 /* Advanced baud rate divisor latch (high byte) */
+
+#define ADCR1 0x02
+#define ADCR1_ADV_SL 0x01
+#define ADCR1_D_CHSW 0x08 /* the specs are wrong. its bit 3, not 4 */
+#define ADCR1_DMA_F 0x02
+
+#define ADCR2 0x04
+#define ADCR2_TXFS32 0x01
+#define ADCR2_RXFS32 0x04
+
+#define RXFDTH 0x07
+
+/* Set 3 */
+#define AUID 0x00
+
+/* Set 4 */
+#define TMRL 0x00 /* Timer value register (low byte) */
+#define TMRH 0x01 /* Timer value register (high byte) */
+
+#define IR_MSL 0x02 /* Infrared mode select */
+#define IR_MSL_EN_TMR 0x01 /* Enable timer */
+
+#define TFRLL 0x04 /* Transmitter frame length (low byte) */
+#define TFRLH 0x05 /* Transmitter frame length (high byte) */
+#define RFRLL 0x06 /* Receiver frame length (low byte) */
+#define RFRLH 0x07 /* Receiver frame length (high byte) */
+
+/* Set 5 */
+
+#define FS_FO 0x05 /* Frame status FIFO */
+#define FS_FO_FSFDR 0x80 /* Frame status FIFO data ready */
+#define FS_FO_LST_FR 0x40 /* Frame lost */
+#define FS_FO_MX_LEX 0x10 /* Max frame len exceeded */
+#define FS_FO_PHY_ERR 0x08 /* Physical layer error */
+#define FS_FO_CRC_ERR 0x04
+#define FS_FO_RX_OV 0x02 /* Receive overrun */
+#define FS_FO_FSF_OV 0x01 /* Frame status FIFO overrun */
+#define FS_FO_ERR_MSK 0x5f /* Error mask */
+
+#define RFLFL 0x06
+#define RFLFH 0x07
+
+/* Set 6 */
+#define IR_CFG2 0x00
+#define IR_CFG2_DIS_CRC 0x02
+
+/* Set 7 */
+#define IRM_CR 0x07 /* Infrared module control register */
+#define IRM_CR_IRX_MSL 0x40
+#define IRM_CR_AF_MNT 0x80 /* Automatic format */
+
+/* For storing entries in the status FIFO */
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[10];
+ int head;
+ int tail;
+ int len;
+};
+
+/* Private data for each instance */
+struct w83977af_ir {
+ struct st_fifo st_fifo;
+
+ int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
+ int tx_len; /* Number of frames in tx_buff */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ /* Note : currently locking is *very* incomplete, but this
+ * will get you started. Check in nsc-ircc.c for a proper
+ * locking strategy. - Jean II */
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+};
+
+static inline void switch_bank( int iobase, int set)
+{
+ outb(set, iobase+SSR);
+}
+
+#endif
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
new file mode 100644
index 000000000000..50bebb55e9ee
--- /dev/null
+++ b/drivers/net/isa-skeleton.c
@@ -0,0 +1,724 @@
+/* isa-skeleton.c: A network driver outline for linux.
+ *
+ * Written 1993-94 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * The author may be reached as becker@scyld.com, or C/O
+ * Scyld Computing Corporation
+ * 410 Severn Ave., Suite 210
+ * Annapolis MD 21403
+ *
+ * This file is an outline for writing a network device driver for the
+ * the Linux operating system.
+ *
+ * To write (or understand) a driver, have a look at the "loopback.c" file to
+ * get a feel of what is going on, and then use the code below as a skeleton
+ * for the new driver.
+ *
+ */
+
+static const char *version =
+ "isa-skeleton.c:v1.51 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+/*
+ * Sources:
+ * List your sources of programming information to document that
+ * the driver is your own creation, and give due credit to others
+ * that contributed to the work. Remember that GNU project code
+ * cannot use proprietary or trade secret information. Interface
+ * definitions are generally considered non-copyrightable to the
+ * extent that the same names and structures must be used to be
+ * compatible.
+ *
+ * Finally, keep in mind that the Linux kernel is has an API, not
+ * ABI. Proprietary object-code-only distributions are not permitted
+ * under the GPL.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+/*
+ * The name of the card. Is used for messages and in the requests for
+ * io regions, irqs and dma channels
+ */
+static const char* cardname = "netcard";
+
+/* First, a few definitions that the brave might change. */
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int netcard_portlist[] __initdata =
+ { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 2
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define NETCARD_IO_EXTENT 32
+
+#define MY_TX_TIMEOUT ((400*HZ)/1000)
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ long open_time; /* Useless example local info. */
+
+ /* Tx control lock. This protects the transmit buffer ring
+ * state along with the "tx full" state of the driver. This
+ * means all netif_queue flow control actions are protected
+ * by this lock as well.
+ */
+ spinlock_t lock;
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00
+#define SA_ADDR1 0x42
+#define SA_ADDR2 0x65
+
+/* Index to functions, as function prototypes. */
+
+static int netcard_probe1(struct net_device *dev, int ioaddr);
+static int net_open(struct net_device *dev);
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void net_tx_timeout(struct net_device *dev);
+
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) 1
+static void hardware_send_packet(short ioaddr, char *buf, int length);
+static void chipset_init(struct net_device *dev, int startp);
+
+/*
+ * Check for a network adaptor of this type, and return '0' iff one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ * If dev->base_addr == 2, allocate space for the device and return success
+ * (detachable devices only).
+ */
+static int __init do_netcard_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ SET_MODULE_OWNER(dev);
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return netcard_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (netcard_probe1(dev, ioaddr) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+#ifdef jumpered_dma
+ free_dma(dev->dma);
+#endif
+#ifdef jumpered_interrupts
+ free_irq(dev->irq, dev);
+#endif
+ release_region(dev->base_addr, NETCARD_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init netcard_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_netcard_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+/*
+ * This is the real probe routine. Linux has a history of friendly device
+ * probes on the ISA bus. A good device probes avoids doing writes, and
+ * verifies that the correct device exists and functions.
+ */
+static int __init netcard_probe1(struct net_device *dev, int ioaddr)
+{
+ struct net_local *np;
+ static unsigned version_printed;
+ int i;
+ int err = -ENODEV;
+
+ /* Grab the region so that no one else tries to probe our ioports. */
+ if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname))
+ return -EBUSY;
+
+ /*
+ * For ethernet adaptors the first three octets of the station address
+ * contains the manufacturer's unique code. That might be a good probe
+ * method. Ideally you would add additional checks.
+ */
+ if (inb(ioaddr + 0) != SA_ADDR0
+ || inb(ioaddr + 1) != SA_ADDR1
+ || inb(ioaddr + 2) != SA_ADDR2)
+ goto out;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_DEBUG "%s", version);
+
+ printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cardname, ioaddr);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ err = -EAGAIN;
+#ifdef jumpered_interrupts
+ /*
+ * If this board has jumpered interrupts, allocate the interrupt
+ * vector now. There is no point in waiting since no other device
+ * can use the interrupt, and this marks the irq as busy. Jumpered
+ * interrupts are typically not reported by the boards, and we must
+ * used autoIRQ to find them.
+ */
+
+ if (dev->irq == -1)
+ ; /* Do nothing: a user-level program will set it. */
+ else if (dev->irq < 2) { /* "Auto-IRQ" */
+ unsigned long irq_mask = probe_irq_on();
+ /* Trigger an interrupt here. */
+
+ dev->irq = probe_irq_off(irq_mask);
+ if (net_debug >= 2)
+ printk(" autoirq is %d", dev->irq);
+ } else if (dev->irq == 2)
+ /*
+ * Fixup for users that don't know that IRQ 2 is really
+ * IRQ9, or don't know which one to set.
+ */
+ dev->irq = 9;
+
+ {
+ int irqval = request_irq(dev->irq, &net_interrupt, 0, cardname, dev);
+ if (irqval) {
+ printk("%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name, dev->irq, irqval);
+ goto out;
+ }
+ }
+#endif /* jumpered interrupt */
+#ifdef jumpered_dma
+ /*
+ * If we use a jumpered DMA channel, that should be probed for and
+ * allocated here as well. See lance.c for an example.
+ */
+ if (dev->dma == 0) {
+ if (request_dma(dev->dma, cardname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ goto out1;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else {
+ short dma_status, new_dma_status;
+
+ /* Read the DMA channel status registers. */
+ dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ /* Trigger a DMA request, perhaps pause a bit. */
+ outw(0x1234, ioaddr + 8);
+ /* Re-read the DMA status registers. */
+ new_dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ /*
+ * Eliminate the old and floating requests,
+ * and DMA4 the cascade.
+ */
+ new_dma_status ^= dma_status;
+ new_dma_status &= ~0x10;
+ for (i = 7; i > 0; i--)
+ if (test_bit(i, &new_dma_status)) {
+ dev->dma = i;
+ break;
+ }
+ if (i <= 0) {
+ printk("DMA probe failed.\n");
+ goto out1;
+ }
+ if (request_dma(dev->dma, cardname)) {
+ printk("probed DMA %d allocation failed.\n", dev->dma);
+ goto out1;
+ }
+ }
+#endif /* jumpered DMA */
+
+ np = netdev_priv(dev);
+ spin_lock_init(&np->lock);
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ dev->tx_timeout = &net_tx_timeout;
+ dev->watchdog_timeo = MY_TX_TIMEOUT;
+ return 0;
+out1:
+#ifdef jumpered_interrupts
+ free_irq(dev->irq, dev);
+#endif
+out:
+ release_region(base_addr, NETCARD_IO_EXTENT);
+ return err;
+}
+
+static void net_tx_timeout(struct net_device *dev)
+{
+ struct net_local *np = netdev_priv(dev);
+
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+
+ /* Try to restart the adaptor. */
+ chipset_init(dev, 1);
+
+ np->stats.tx_errors++;
+
+ /* If we have space available to accept new transmit
+ * requests, wake up the queueing layer. This would
+ * be the case if the chipset_init() call above just
+ * flushes out the tx queue and empties it.
+ *
+ * If instead, the tx queue is retained then the
+ * netif_wake_queue() call should be placed in the
+ * TX completion interrupt handler of the driver instead
+ * of here.
+ */
+ if (!tx_full(dev))
+ netif_wake_queue(dev);
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int
+net_open(struct net_device *dev)
+{
+ struct net_local *np = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ /*
+ * This is used if the interrupt line can turned off (shared).
+ * See 3c503.c for an example of selecting the IRQ at config-time.
+ */
+ if (request_irq(dev->irq, &net_interrupt, 0, cardname, dev)) {
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(dev->dma, cardname)) {
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ chipset_init(dev, 1);
+ outb(0x00, ioaddr);
+ np->open_time = jiffies;
+
+ /* We are now ready to accept transmit requeusts from
+ * the queueing layer of the networking.
+ */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *np = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* If some error occurs while trying to transmit this
+ * packet, you should return '1' from this function.
+ * In such a case you _may not_ do anything to the
+ * SKB, it is still owned by the network queueing
+ * layer when an error is returned. This means you
+ * may not modify any SKB fields, you may not free
+ * the SKB, etc.
+ */
+
+#if TX_RING
+ /* This is the most common case for modern hardware.
+ * The spinlock protects this code from the TX complete
+ * hardware interrupt handler. Queue flow control is
+ * thus managed under this lock as well.
+ */
+ spin_lock_irq(&np->lock);
+
+ add_to_tx_ring(np, skb, length);
+ dev->trans_start = jiffies;
+
+ /* If we just used up the very last entry in the
+ * TX ring on this device, tell the queueing
+ * layer to send no more.
+ */
+ if (tx_full(dev))
+ netif_stop_queue(dev);
+
+ /* When the TX completion hw interrupt arrives, this
+ * is when the transmit statistics are updated.
+ */
+
+ spin_unlock_irq(&np->lock);
+#else
+ /* This is the case for older hardware which takes
+ * a single transmit buffer at a time, and it is
+ * just written to the device via PIO.
+ *
+ * No spin locking is needed since there is no TX complete
+ * event. If by chance your card does have a TX complete
+ * hardware IRQ then you may need to utilize np->lock here.
+ */
+ hardware_send_packet(ioaddr, buf, length);
+ np->stats.tx_bytes += skb->len;
+
+ dev->trans_start = jiffies;
+
+ /* You might need to clean up and record Tx statistics here. */
+ if (inw(ioaddr) == /*RU*/81)
+ np->stats.tx_aborted_errors++;
+ dev_kfree_skb (skb);
+#endif
+
+ return 0;
+}
+
+#if TX_RING
+/* This handles TX complete events posted by the device
+ * via interrupts.
+ */
+void net_tx(struct net_device *dev)
+{
+ struct net_local *np = netdev_priv(dev);
+ int entry;
+
+ /* This protects us from concurrent execution of
+ * our dev->hard_start_xmit function above.
+ */
+ spin_lock(&np->lock);
+
+ entry = np->tx_old;
+ while (tx_entry_is_sent(np, entry)) {
+ struct sk_buff *skb = np->skbs[entry];
+
+ np->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq (skb);
+
+ entry = next_tx_entry(np, entry);
+ }
+ np->tx_old = entry;
+
+ /* If we had stopped the queue due to a "tx full"
+ * condition, and space has now been made available,
+ * wake up the queue.
+ */
+ if (netif_queue_stopped(dev) && ! tx_full(dev))
+ netif_wake_queue(dev);
+
+ spin_unlock(&np->lock);
+}
+#endif
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *np;
+ int ioaddr, status;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+
+ np = netdev_priv(dev);
+ status = inw(ioaddr + 0);
+
+ if (status == 0)
+ goto out;
+ handled = 1;
+
+ if (status & RX_INTR) {
+ /* Got a packet(s). */
+ net_rx(dev);
+ }
+#if TX_RING
+ if (status & TX_INTR) {
+ /* Transmit complete. */
+ net_tx(dev);
+ np->stats.tx_packets++;
+ netif_wake_queue(dev);
+ }
+#endif
+ if (status & COUNTERS_INTR) {
+ /* Increment the appropriate 'localstats' field. */
+ np->stats.tx_window_errors++;
+ }
+out:
+ return IRQ_RETVAL(handled);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int boguscount = 10;
+
+ do {
+ int status = inw(ioaddr);
+ int pkt_len = inw(ioaddr);
+
+ if (pkt_len == 0) /* Read all the frames? */
+ break; /* Done for now */
+
+ if (status & 0x40) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ lp->stats.rx_bytes+=pkt_len;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start,
+ pkt_len);
+ /* or */
+ insw(ioaddr, skb->data, (pkt_len + 1) >> 1);
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ } while (--boguscount);
+
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ lp->open_time = 0;
+
+ netif_stop_queue(dev);
+
+ /* Flush the Tx and disable Rx here. */
+
+ disable_dma(dev->dma);
+
+ /* If not IRQ or DMA jumpered, free up the line. */
+ outw(0x00, ioaddr+0); /* Release the physical interrupt line. */
+
+ free_irq(dev->irq, dev);
+ free_dma(dev->dma);
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+
+ /* Update the statistics from the device registers. */
+ lp->stats.rx_missed_errors = inw(ioaddr+1);
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+ if (dev->flags&IFF_PROMISC)
+ {
+ /* Enable promiscuous mode */
+ outw(MULTICAST|PROMISC, ioaddr);
+ }
+ else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
+ {
+ /* Disable promiscuous mode, use normal mode. */
+ hardware_set_filter(NULL);
+
+ outw(MULTICAST, ioaddr);
+ }
+ else if(dev->mc_count)
+ {
+ /* Walk the address list, and load the filter */
+ hardware_set_filter(dev->mc_list);
+
+ outw(MULTICAST, ioaddr);
+ }
+ else
+ outw(0, ioaddr);
+}
+
+#ifdef MODULE
+
+static struct net_device *this_device;
+static int io = 0x300;
+static int irq;
+static int dma;
+static int mem;
+MODULE_LICENSE("GPL");
+
+int init_module(void)
+{
+ struct net_device *dev;
+ int result;
+
+ if (io == 0)
+ printk(KERN_WARNING "%s: You shouldn't use auto-probing with insmod!\n",
+ cardname);
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev)
+ return -ENOMEM;
+
+ /* Copy the parameters from insmod into the device structure. */
+ dev->base_addr = io;
+ dev->irq = irq;
+ dev->dma = dma;
+ dev->mem_start = mem;
+ if (do_netcard_probe(dev) == 0) {
+ if (register_netdev(dev) == 0)
+ this_device = dev;
+ return 0;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(this_device);
+ cleanup_card(this_device);
+ free_netdev(this_device);
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command:
+ * gcc -D__KERNEL__ -Wall -Wstrict-prototypes -Wwrite-strings
+ * -Wredundant-decls -O2 -m486 -c skeleton.c
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
new file mode 100644
index 000000000000..855f8b2cf13b
--- /dev/null
+++ b/drivers/net/iseries_veth.c
@@ -0,0 +1,1422 @@
+/* File veth.c created by Kyle A. Lucke on Mon Aug 7 2000. */
+/*
+ * IBM eServer iSeries Virtual Ethernet Device Driver
+ * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp.
+ * Substantially cleaned up by:
+ * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ *
+ * This module implements the virtual ethernet device for iSeries LPAR
+ * Linux. It uses hypervisor message passing to implement an
+ * ethernet-like network device communicating between partitions on
+ * the iSeries.
+ *
+ * The iSeries LPAR hypervisor currently allows for up to 16 different
+ * virtual ethernets. These are all dynamically configurable on
+ * OS/400 partitions, but dynamic configuration is not supported under
+ * Linux yet. An ethXX network device will be created for each
+ * virtual ethernet this partition is connected to.
+ *
+ * - This driver is responsible for routing packets to and from other
+ * partitions. The MAC addresses used by the virtual ethernets
+ * contains meaning and must not be modified.
+ *
+ * - Having 2 virtual ethernets to the same remote partition DOES NOT
+ * double the available bandwidth. The 2 devices will share the
+ * available hypervisor bandwidth.
+ *
+ * - If you send a packet to your own mac address, it will just be
+ * dropped, you won't get it on the receive side.
+ *
+ * - Multicast is implemented by sending the frame frame to every
+ * other partition. It is the responsibility of the receiving
+ * partition to filter the addresses desired.
+ *
+ * Tunable parameters:
+ *
+ * VETH_NUMBUFFERS: This compile time option defaults to 120. It
+ * controls how much memory Linux will allocate per remote partition
+ * it is communicating with. It can be thought of as the maximum
+ * number of packets outstanding to a remote partition at a time.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/ethtool.h>
+#include <asm/iSeries/mf.h>
+#include <asm/iSeries/iSeries_pci.h>
+#include <asm/uaccess.h>
+
+#include <asm/iSeries/HvLpConfig.h>
+#include <asm/iSeries/HvTypes.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/iommu.h>
+#include <asm/vio.h>
+
+#include "iseries_veth.h"
+
+MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>");
+MODULE_DESCRIPTION("iSeries Virtual ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define VETH_NUMBUFFERS (120)
+#define VETH_ACKTIMEOUT (1000000) /* microseconds */
+#define VETH_MAX_MCAST (12)
+
+#define VETH_MAX_MTU (9000)
+
+#if VETH_NUMBUFFERS < 10
+#define ACK_THRESHOLD (1)
+#elif VETH_NUMBUFFERS < 20
+#define ACK_THRESHOLD (4)
+#elif VETH_NUMBUFFERS < 40
+#define ACK_THRESHOLD (10)
+#else
+#define ACK_THRESHOLD (20)
+#endif
+
+#define VETH_STATE_SHUTDOWN (0x0001)
+#define VETH_STATE_OPEN (0x0002)
+#define VETH_STATE_RESET (0x0004)
+#define VETH_STATE_SENTMON (0x0008)
+#define VETH_STATE_SENTCAPS (0x0010)
+#define VETH_STATE_GOTCAPACK (0x0020)
+#define VETH_STATE_GOTCAPS (0x0040)
+#define VETH_STATE_SENTCAPACK (0x0080)
+#define VETH_STATE_READY (0x0100)
+
+struct veth_msg {
+ struct veth_msg *next;
+ struct VethFramesData data;
+ int token;
+ unsigned long in_use;
+ struct sk_buff *skb;
+ struct device *dev;
+};
+
+struct veth_lpar_connection {
+ HvLpIndex remote_lp;
+ struct work_struct statemachine_wq;
+ struct veth_msg *msgs;
+ int num_events;
+ struct VethCapData local_caps;
+
+ struct timer_list ack_timer;
+
+ spinlock_t lock;
+ unsigned long state;
+ HvLpInstanceId src_inst;
+ HvLpInstanceId dst_inst;
+ struct VethLpEvent cap_event, cap_ack_event;
+ u16 pending_acks[VETH_MAX_ACKS_PER_MSG];
+ u32 num_pending_acks;
+
+ int num_ack_events;
+ struct VethCapData remote_caps;
+ u32 ack_timeout;
+
+ spinlock_t msg_stack_lock;
+ struct veth_msg *msg_stack_head;
+};
+
+struct veth_port {
+ struct device *dev;
+ struct net_device_stats stats;
+ u64 mac_addr;
+ HvLpIndexMap lpar_map;
+
+ spinlock_t pending_gate;
+ struct sk_buff *pending_skb;
+ HvLpIndexMap pending_lpmask;
+
+ rwlock_t mcast_gate;
+ int promiscuous;
+ int all_mcast;
+ int num_mcast;
+ u64 mcast_addr[VETH_MAX_MCAST];
+};
+
+static HvLpIndex this_lp;
+static struct veth_lpar_connection *veth_cnx[HVMAXARCHITECTEDLPS]; /* = 0 */
+static struct net_device *veth_dev[HVMAXARCHITECTEDVIRTUALLANS]; /* = 0 */
+
+static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *);
+static void veth_flush_pending(struct veth_lpar_connection *cnx);
+static void veth_receive(struct veth_lpar_connection *, struct VethLpEvent *);
+static void veth_timed_ack(unsigned long connectionPtr);
+
+/*
+ * Utility functions
+ */
+
+#define veth_printk(prio, fmt, args...) \
+ printk(prio "%s: " fmt, __FILE__, ## args)
+
+#define veth_error(fmt, args...) \
+ printk(KERN_ERR "(%s:%3.3d) ERROR: " fmt, __FILE__, __LINE__ , ## args)
+
+static inline void veth_stack_push(struct veth_lpar_connection *cnx,
+ struct veth_msg *msg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnx->msg_stack_lock, flags);
+ msg->next = cnx->msg_stack_head;
+ cnx->msg_stack_head = msg;
+ spin_unlock_irqrestore(&cnx->msg_stack_lock, flags);
+}
+
+static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx)
+{
+ unsigned long flags;
+ struct veth_msg *msg;
+
+ spin_lock_irqsave(&cnx->msg_stack_lock, flags);
+ msg = cnx->msg_stack_head;
+ if (msg)
+ cnx->msg_stack_head = cnx->msg_stack_head->next;
+ spin_unlock_irqrestore(&cnx->msg_stack_lock, flags);
+ return msg;
+}
+
+static inline HvLpEvent_Rc
+veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype,
+ HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype,
+ u64 token,
+ u64 data1, u64 data2, u64 data3, u64 data4, u64 data5)
+{
+ return HvCallEvent_signalLpEventFast(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan,
+ subtype, ackind, acktype,
+ cnx->src_inst,
+ cnx->dst_inst,
+ token, data1, data2, data3,
+ data4, data5);
+}
+
+static inline HvLpEvent_Rc veth_signaldata(struct veth_lpar_connection *cnx,
+ u16 subtype, u64 token, void *data)
+{
+ u64 *p = (u64 *) data;
+
+ return veth_signalevent(cnx, subtype, HvLpEvent_AckInd_NoAck,
+ HvLpEvent_AckType_ImmediateAck,
+ token, p[0], p[1], p[2], p[3], p[4]);
+}
+
+struct veth_allocation {
+ struct completion c;
+ int num;
+};
+
+static void veth_complete_allocation(void *parm, int number)
+{
+ struct veth_allocation *vc = (struct veth_allocation *)parm;
+
+ vc->num = number;
+ complete(&vc->c);
+}
+
+static int veth_allocate_events(HvLpIndex rlp, int number)
+{
+ struct veth_allocation vc = { COMPLETION_INITIALIZER(vc.c), 0 };
+
+ mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan,
+ sizeof(struct VethLpEvent), number,
+ &veth_complete_allocation, &vc);
+ wait_for_completion(&vc.c);
+
+ return vc.num;
+}
+
+/*
+ * LPAR connection code
+ */
+
+static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
+{
+ schedule_work(&cnx->statemachine_wq);
+}
+
+static void veth_take_cap(struct veth_lpar_connection *cnx,
+ struct VethLpEvent *event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnx->lock, flags);
+ /* Receiving caps may mean the other end has just come up, so
+ * we need to reload the instance ID of the far end */
+ cnx->dst_inst =
+ HvCallEvent_getTargetLpInstanceId(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan);
+
+ if (cnx->state & VETH_STATE_GOTCAPS) {
+ veth_error("Received a second capabilities from lpar %d\n",
+ cnx->remote_lp);
+ event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable;
+ HvCallEvent_ackLpEvent((struct HvLpEvent *) event);
+ } else {
+ memcpy(&cnx->cap_event, event, sizeof(cnx->cap_event));
+ cnx->state |= VETH_STATE_GOTCAPS;
+ veth_kick_statemachine(cnx);
+ }
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
+ struct VethLpEvent *event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnx->lock, flags);
+ if (cnx->state & VETH_STATE_GOTCAPACK) {
+ veth_error("Received a second capabilities ack from lpar %d\n",
+ cnx->remote_lp);
+ } else {
+ memcpy(&cnx->cap_ack_event, event,
+ sizeof(&cnx->cap_ack_event));
+ cnx->state |= VETH_STATE_GOTCAPACK;
+ veth_kick_statemachine(cnx);
+ }
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_take_monitor_ack(struct veth_lpar_connection *cnx,
+ struct VethLpEvent *event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnx->lock, flags);
+ veth_printk(KERN_DEBUG, "Monitor ack returned for lpar %d\n",
+ cnx->remote_lp);
+ cnx->state |= VETH_STATE_RESET;
+ veth_kick_statemachine(cnx);
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_handle_ack(struct VethLpEvent *event)
+{
+ HvLpIndex rlp = event->base_event.xTargetLp;
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+
+ BUG_ON(! cnx);
+
+ switch (event->base_event.xSubtype) {
+ case VethEventTypeCap:
+ veth_take_cap_ack(cnx, event);
+ break;
+ case VethEventTypeMonitor:
+ veth_take_monitor_ack(cnx, event);
+ break;
+ default:
+ veth_error("Unknown ack type %d from lpar %d\n",
+ event->base_event.xSubtype, rlp);
+ };
+}
+
+static void veth_handle_int(struct VethLpEvent *event)
+{
+ HvLpIndex rlp = event->base_event.xSourceLp;
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+ unsigned long flags;
+ int i;
+
+ BUG_ON(! cnx);
+
+ switch (event->base_event.xSubtype) {
+ case VethEventTypeCap:
+ veth_take_cap(cnx, event);
+ break;
+ case VethEventTypeMonitor:
+ /* do nothing... this'll hang out here til we're dead,
+ * and the hypervisor will return it for us. */
+ break;
+ case VethEventTypeFramesAck:
+ spin_lock_irqsave(&cnx->lock, flags);
+ for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) {
+ u16 msgnum = event->u.frames_ack_data.token[i];
+
+ if (msgnum < VETH_NUMBUFFERS)
+ veth_recycle_msg(cnx, cnx->msgs + msgnum);
+ }
+ spin_unlock_irqrestore(&cnx->lock, flags);
+ veth_flush_pending(cnx);
+ break;
+ case VethEventTypeFrames:
+ veth_receive(cnx, event);
+ break;
+ default:
+ veth_error("Unknown interrupt type %d from lpar %d\n",
+ event->base_event.xSubtype, rlp);
+ };
+}
+
+static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs)
+{
+ struct VethLpEvent *veth_event = (struct VethLpEvent *)event;
+
+ if (event->xFlags.xFunction == HvLpEvent_Function_Ack)
+ veth_handle_ack(veth_event);
+ else if (event->xFlags.xFunction == HvLpEvent_Function_Int)
+ veth_handle_int(veth_event);
+}
+
+static int veth_process_caps(struct veth_lpar_connection *cnx)
+{
+ struct VethCapData *remote_caps = &cnx->remote_caps;
+ int num_acks_needed;
+
+ /* Convert timer to jiffies */
+ cnx->ack_timeout = remote_caps->ack_timeout * HZ / 1000000;
+
+ if ( (remote_caps->num_buffers == 0)
+ || (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG)
+ || (remote_caps->ack_threshold == 0)
+ || (cnx->ack_timeout == 0) ) {
+ veth_error("Received incompatible capabilities from lpar %d\n",
+ cnx->remote_lp);
+ return HvLpEvent_Rc_InvalidSubtypeData;
+ }
+
+ num_acks_needed = (remote_caps->num_buffers
+ / remote_caps->ack_threshold) + 1;
+
+ /* FIXME: locking on num_ack_events? */
+ if (cnx->num_ack_events < num_acks_needed) {
+ int num;
+
+ num = veth_allocate_events(cnx->remote_lp,
+ num_acks_needed-cnx->num_ack_events);
+ if (num > 0)
+ cnx->num_ack_events += num;
+
+ if (cnx->num_ack_events < num_acks_needed) {
+ veth_error("Couldn't allocate enough ack events for lpar %d\n",
+ cnx->remote_lp);
+
+ return HvLpEvent_Rc_BufferNotAvailable;
+ }
+ }
+
+
+ return HvLpEvent_Rc_Good;
+}
+
+/* FIXME: The gotos here are a bit dubious */
+static void veth_statemachine(void *p)
+{
+ struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p;
+ int rlp = cnx->remote_lp;
+ int rc;
+
+ spin_lock_irq(&cnx->lock);
+
+ restart:
+ if (cnx->state & VETH_STATE_RESET) {
+ int i;
+
+ del_timer(&cnx->ack_timer);
+
+ if (cnx->state & VETH_STATE_OPEN)
+ HvCallEvent_closeLpEventPath(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan);
+
+ /* reset ack data */
+ memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
+ cnx->num_pending_acks = 0;
+
+ cnx->state &= ~(VETH_STATE_RESET | VETH_STATE_SENTMON
+ | VETH_STATE_OPEN | VETH_STATE_SENTCAPS
+ | VETH_STATE_GOTCAPACK | VETH_STATE_GOTCAPS
+ | VETH_STATE_SENTCAPACK | VETH_STATE_READY);
+
+ /* Clean up any leftover messages */
+ if (cnx->msgs)
+ for (i = 0; i < VETH_NUMBUFFERS; ++i)
+ veth_recycle_msg(cnx, cnx->msgs + i);
+ spin_unlock_irq(&cnx->lock);
+ veth_flush_pending(cnx);
+ spin_lock_irq(&cnx->lock);
+ if (cnx->state & VETH_STATE_RESET)
+ goto restart;
+ }
+
+ if (cnx->state & VETH_STATE_SHUTDOWN)
+ /* It's all over, do nothing */
+ goto out;
+
+ if ( !(cnx->state & VETH_STATE_OPEN) ) {
+ if (! cnx->msgs || (cnx->num_events < (2 + VETH_NUMBUFFERS)) )
+ goto cant_cope;
+
+ HvCallEvent_openLpEventPath(rlp, HvLpEvent_Type_VirtualLan);
+ cnx->src_inst =
+ HvCallEvent_getSourceLpInstanceId(rlp,
+ HvLpEvent_Type_VirtualLan);
+ cnx->dst_inst =
+ HvCallEvent_getTargetLpInstanceId(rlp,
+ HvLpEvent_Type_VirtualLan);
+ cnx->state |= VETH_STATE_OPEN;
+ }
+
+ if ( (cnx->state & VETH_STATE_OPEN)
+ && !(cnx->state & VETH_STATE_SENTMON) ) {
+ rc = veth_signalevent(cnx, VethEventTypeMonitor,
+ HvLpEvent_AckInd_DoAck,
+ HvLpEvent_AckType_DeferredAck,
+ 0, 0, 0, 0, 0, 0);
+
+ if (rc == HvLpEvent_Rc_Good) {
+ cnx->state |= VETH_STATE_SENTMON;
+ } else {
+ if ( (rc != HvLpEvent_Rc_PartitionDead)
+ && (rc != HvLpEvent_Rc_PathClosed) )
+ veth_error("Error sending monitor to "
+ "lpar %d, rc=%x\n",
+ rlp, (int) rc);
+
+ /* Oh well, hope we get a cap from the other
+ * end and do better when that kicks us */
+ goto out;
+ }
+ }
+
+ if ( (cnx->state & VETH_STATE_OPEN)
+ && !(cnx->state & VETH_STATE_SENTCAPS)) {
+ u64 *rawcap = (u64 *)&cnx->local_caps;
+
+ rc = veth_signalevent(cnx, VethEventTypeCap,
+ HvLpEvent_AckInd_DoAck,
+ HvLpEvent_AckType_ImmediateAck,
+ 0, rawcap[0], rawcap[1], rawcap[2],
+ rawcap[3], rawcap[4]);
+
+ if (rc == HvLpEvent_Rc_Good) {
+ cnx->state |= VETH_STATE_SENTCAPS;
+ } else {
+ if ( (rc != HvLpEvent_Rc_PartitionDead)
+ && (rc != HvLpEvent_Rc_PathClosed) )
+ veth_error("Error sending caps to "
+ "lpar %d, rc=%x\n",
+ rlp, (int) rc);
+ /* Oh well, hope we get a cap from the other
+ * end and do better when that kicks us */
+ goto out;
+ }
+ }
+
+ if ((cnx->state & VETH_STATE_GOTCAPS)
+ && !(cnx->state & VETH_STATE_SENTCAPACK)) {
+ struct VethCapData *remote_caps = &cnx->remote_caps;
+
+ memcpy(remote_caps, &cnx->cap_event.u.caps_data,
+ sizeof(*remote_caps));
+
+ spin_unlock_irq(&cnx->lock);
+ rc = veth_process_caps(cnx);
+ spin_lock_irq(&cnx->lock);
+
+ /* We dropped the lock, so recheck for anything which
+ * might mess us up */
+ if (cnx->state & (VETH_STATE_RESET|VETH_STATE_SHUTDOWN))
+ goto restart;
+
+ cnx->cap_event.base_event.xRc = rc;
+ HvCallEvent_ackLpEvent((struct HvLpEvent *)&cnx->cap_event);
+ if (rc == HvLpEvent_Rc_Good)
+ cnx->state |= VETH_STATE_SENTCAPACK;
+ else
+ goto cant_cope;
+ }
+
+ if ((cnx->state & VETH_STATE_GOTCAPACK)
+ && (cnx->state & VETH_STATE_GOTCAPS)
+ && !(cnx->state & VETH_STATE_READY)) {
+ if (cnx->cap_ack_event.base_event.xRc == HvLpEvent_Rc_Good) {
+ /* Start the ACK timer */
+ cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
+ add_timer(&cnx->ack_timer);
+ cnx->state |= VETH_STATE_READY;
+ } else {
+ veth_printk(KERN_ERR, "Caps rejected (rc=%d) by "
+ "lpar %d\n",
+ cnx->cap_ack_event.base_event.xRc,
+ rlp);
+ goto cant_cope;
+ }
+ }
+
+ out:
+ spin_unlock_irq(&cnx->lock);
+ return;
+
+ cant_cope:
+ /* FIXME: we get here if something happens we really can't
+ * cope with. The link will never work once we get here, and
+ * all we can do is not lock the rest of the system up */
+ veth_error("Badness on connection to lpar %d (state=%04lx) "
+ " - shutting down\n", rlp, cnx->state);
+ cnx->state |= VETH_STATE_SHUTDOWN;
+ spin_unlock_irq(&cnx->lock);
+}
+
+static int veth_init_connection(u8 rlp)
+{
+ struct veth_lpar_connection *cnx;
+ struct veth_msg *msgs;
+ int i;
+
+ if ( (rlp == this_lp)
+ || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
+ return 0;
+
+ cnx = kmalloc(sizeof(*cnx), GFP_KERNEL);
+ if (! cnx)
+ return -ENOMEM;
+ memset(cnx, 0, sizeof(*cnx));
+
+ cnx->remote_lp = rlp;
+ spin_lock_init(&cnx->lock);
+ INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
+ init_timer(&cnx->ack_timer);
+ cnx->ack_timer.function = veth_timed_ack;
+ cnx->ack_timer.data = (unsigned long) cnx;
+ memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
+
+ veth_cnx[rlp] = cnx;
+
+ msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL);
+ if (! msgs) {
+ veth_error("Can't allocate buffers for lpar %d\n", rlp);
+ return -ENOMEM;
+ }
+
+ cnx->msgs = msgs;
+ memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg));
+ spin_lock_init(&cnx->msg_stack_lock);
+
+ for (i = 0; i < VETH_NUMBUFFERS; i++) {
+ msgs[i].token = i;
+ veth_stack_push(cnx, msgs + i);
+ }
+
+ cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS);
+
+ if (cnx->num_events < (2 + VETH_NUMBUFFERS)) {
+ veth_error("Can't allocate events for lpar %d, only got %d\n",
+ rlp, cnx->num_events);
+ return -ENOMEM;
+ }
+
+ cnx->local_caps.num_buffers = VETH_NUMBUFFERS;
+ cnx->local_caps.ack_threshold = ACK_THRESHOLD;
+ cnx->local_caps.ack_timeout = VETH_ACKTIMEOUT;
+
+ return 0;
+}
+
+static void veth_stop_connection(u8 rlp)
+{
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+
+ if (! cnx)
+ return;
+
+ spin_lock_irq(&cnx->lock);
+ cnx->state |= VETH_STATE_RESET | VETH_STATE_SHUTDOWN;
+ veth_kick_statemachine(cnx);
+ spin_unlock_irq(&cnx->lock);
+
+ flush_scheduled_work();
+
+ /* FIXME: not sure if this is necessary - will already have
+ * been deleted by the state machine, just want to make sure
+ * its not running any more */
+ del_timer_sync(&cnx->ack_timer);
+
+ if (cnx->num_events > 0)
+ mf_deallocate_lp_events(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan,
+ cnx->num_events,
+ NULL, NULL);
+ if (cnx->num_ack_events > 0)
+ mf_deallocate_lp_events(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan,
+ cnx->num_ack_events,
+ NULL, NULL);
+}
+
+static void veth_destroy_connection(u8 rlp)
+{
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+
+ if (! cnx)
+ return;
+
+ kfree(cnx->msgs);
+ kfree(cnx);
+ veth_cnx[rlp] = NULL;
+}
+
+/*
+ * net_device code
+ */
+
+static int veth_open(struct net_device *dev)
+{
+ struct veth_port *port = (struct veth_port *) dev->priv;
+
+ memset(&port->stats, 0, sizeof (port->stats));
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int veth_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static struct net_device_stats *veth_get_stats(struct net_device *dev)
+{
+ struct veth_port *port = (struct veth_port *) dev->priv;
+
+ return &port->stats;
+}
+
+static int veth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void veth_set_multicast_list(struct net_device *dev)
+{
+ struct veth_port *port = (struct veth_port *) dev->priv;
+ unsigned long flags;
+
+ write_lock_irqsave(&port->mcast_gate, flags);
+
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ port->promiscuous = 1;
+ } else if ( (dev->flags & IFF_ALLMULTI)
+ || (dev->mc_count > VETH_MAX_MCAST) ) {
+ port->all_mcast = 1;
+ } else {
+ struct dev_mc_list *dmi = dev->mc_list;
+ int i;
+
+ /* Update table */
+ port->num_mcast = 0;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ u8 *addr = dmi->dmi_addr;
+ u64 xaddr = 0;
+
+ if (addr[0] & 0x01) {/* multicast address? */
+ memcpy(&xaddr, addr, ETH_ALEN);
+ port->mcast_addr[port->num_mcast] = xaddr;
+ port->num_mcast++;
+ }
+ dmi = dmi->next;
+ }
+ }
+
+ write_unlock_irqrestore(&port->mcast_gate, flags);
+}
+
+static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, "veth", sizeof(info->driver) - 1);
+ info->driver[sizeof(info->driver) - 1] = '\0';
+ strncpy(info->version, "1.0", sizeof(info->version) - 1);
+}
+
+static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ ecmd->supported = (SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+ ecmd->advertising = (SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->phy_address = 0;
+ ecmd->speed = SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->maxtxpkt = 120;
+ ecmd->maxrxpkt = 120;
+ return 0;
+}
+
+static u32 veth_get_link(struct net_device *dev)
+{
+ return 1;
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = veth_get_drvinfo,
+ .get_settings = veth_get_settings,
+ .get_link = veth_get_link,
+};
+
+static void veth_tx_timeout(struct net_device *dev)
+{
+ struct veth_port *port = (struct veth_port *)dev->priv;
+ struct net_device_stats *stats = &port->stats;
+ unsigned long flags;
+ int i;
+
+ stats->tx_errors++;
+
+ spin_lock_irqsave(&port->pending_gate, flags);
+
+ printk(KERN_WARNING "%s: Tx timeout! Resetting lp connections: %08x\n",
+ dev->name, port->pending_lpmask);
+
+ /* If we've timed out the queue must be stopped, which should
+ * only ever happen when there is a pending packet. */
+ WARN_ON(! port->pending_lpmask);
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ struct veth_lpar_connection *cnx = veth_cnx[i];
+
+ if (! (port->pending_lpmask & (1<<i)))
+ continue;
+
+ /* If we're pending on it, we must be connected to it,
+ * so we should certainly have a structure for it. */
+ BUG_ON(! cnx);
+
+ /* Theoretically we could be kicking a connection
+ * which doesn't deserve it, but in practice if we've
+ * had a Tx timeout, the pending_lpmask will have
+ * exactly one bit set - the connection causing the
+ * problem. */
+ spin_lock(&cnx->lock);
+ cnx->state |= VETH_STATE_RESET;
+ veth_kick_statemachine(cnx);
+ spin_unlock(&cnx->lock);
+ }
+
+ spin_unlock_irqrestore(&port->pending_gate, flags);
+}
+
+static struct net_device * __init veth_probe_one(int vlan, struct device *vdev)
+{
+ struct net_device *dev;
+ struct veth_port *port;
+ int i, rc;
+
+ dev = alloc_etherdev(sizeof (struct veth_port));
+ if (! dev) {
+ veth_error("Unable to allocate net_device structure!\n");
+ return NULL;
+ }
+
+ port = (struct veth_port *) dev->priv;
+
+ spin_lock_init(&port->pending_gate);
+ rwlock_init(&port->mcast_gate);
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ HvLpVirtualLanIndexMap map;
+
+ if (i == this_lp)
+ continue;
+ map = HvLpConfig_getVirtualLanIndexMapForLp(i);
+ if (map & (0x8000 >> vlan))
+ port->lpar_map |= (1 << i);
+ }
+ port->dev = vdev;
+
+ dev->dev_addr[0] = 0x02;
+ dev->dev_addr[1] = 0x01;
+ dev->dev_addr[2] = 0xff;
+ dev->dev_addr[3] = vlan;
+ dev->dev_addr[4] = 0xff;
+ dev->dev_addr[5] = this_lp;
+
+ dev->mtu = VETH_MAX_MTU;
+
+ memcpy(&port->mac_addr, dev->dev_addr, 6);
+
+ dev->open = veth_open;
+ dev->hard_start_xmit = veth_start_xmit;
+ dev->stop = veth_close;
+ dev->get_stats = veth_get_stats;
+ dev->change_mtu = veth_change_mtu;
+ dev->set_mac_address = NULL;
+ dev->set_multicast_list = veth_set_multicast_list;
+ SET_ETHTOOL_OPS(dev, &ops);
+
+ dev->watchdog_timeo = 2 * (VETH_ACKTIMEOUT * HZ / 1000000);
+ dev->tx_timeout = veth_tx_timeout;
+
+ SET_NETDEV_DEV(dev, vdev);
+
+ rc = register_netdev(dev);
+ if (rc != 0) {
+ veth_printk(KERN_ERR,
+ "Failed to register ethernet device for vlan %d\n",
+ vlan);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ veth_printk(KERN_DEBUG, "%s attached to iSeries vlan %d (lpar_map=0x%04x)\n",
+ dev->name, vlan, port->lpar_map);
+
+ return dev;
+}
+
+/*
+ * Tx path
+ */
+
+static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
+ struct net_device *dev)
+{
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+ struct veth_port *port = (struct veth_port *) dev->priv;
+ HvLpEvent_Rc rc;
+ u32 dma_address, dma_length;
+ struct veth_msg *msg = NULL;
+ int err = 0;
+ unsigned long flags;
+
+ if (! cnx) {
+ port->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ spin_lock_irqsave(&cnx->lock, flags);
+
+ if (! cnx->state & VETH_STATE_READY)
+ goto drop;
+
+ if ((skb->len - 14) > VETH_MAX_MTU)
+ goto drop;
+
+ msg = veth_stack_pop(cnx);
+
+ if (! msg) {
+ err = 1;
+ goto drop;
+ }
+
+ dma_length = skb->len;
+ dma_address = dma_map_single(port->dev, skb->data,
+ dma_length, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dma_address))
+ goto recycle_and_drop;
+
+ /* Is it really necessary to check the length and address
+ * fields of the first entry here? */
+ msg->skb = skb;
+ msg->dev = port->dev;
+ msg->data.addr[0] = dma_address;
+ msg->data.len[0] = dma_length;
+ msg->data.eofmask = 1 << VETH_EOF_SHIFT;
+ set_bit(0, &(msg->in_use));
+ rc = veth_signaldata(cnx, VethEventTypeFrames, msg->token, &msg->data);
+
+ if (rc != HvLpEvent_Rc_Good)
+ goto recycle_and_drop;
+
+ spin_unlock_irqrestore(&cnx->lock, flags);
+ return 0;
+
+ recycle_and_drop:
+ msg->skb = NULL;
+ /* need to set in use to make veth_recycle_msg in case this
+ * was a mapping failure */
+ set_bit(0, &msg->in_use);
+ veth_recycle_msg(cnx, msg);
+ drop:
+ port->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&cnx->lock, flags);
+ return err;
+}
+
+static HvLpIndexMap veth_transmit_to_many(struct sk_buff *skb,
+ HvLpIndexMap lpmask,
+ struct net_device *dev)
+{
+ struct veth_port *port = (struct veth_port *) dev->priv;
+ int i;
+ int rc;
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ if ((lpmask & (1 << i)) == 0)
+ continue;
+
+ rc = veth_transmit_to_one(skb_get(skb), i, dev);
+ if (! rc)
+ lpmask &= ~(1<<i);
+ }
+
+ if (! lpmask) {
+ port->stats.tx_packets++;
+ port->stats.tx_bytes += skb->len;
+ }
+
+ return lpmask;
+}
+
+static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned char *frame = skb->data;
+ struct veth_port *port = (struct veth_port *) dev->priv;
+ unsigned long flags;
+ HvLpIndexMap lpmask;
+
+ if (! (frame[0] & 0x01)) {
+ /* unicast packet */
+ HvLpIndex rlp = frame[5];
+
+ if ( ! ((1 << rlp) & port->lpar_map) ) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ lpmask = 1 << rlp;
+ } else {
+ lpmask = port->lpar_map;
+ }
+
+ spin_lock_irqsave(&port->pending_gate, flags);
+
+ lpmask = veth_transmit_to_many(skb, lpmask, dev);
+
+ if (! lpmask) {
+ dev_kfree_skb(skb);
+ } else {
+ if (port->pending_skb) {
+ veth_error("%s: Tx while skb was pending!\n",
+ dev->name);
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&port->pending_gate, flags);
+ return 1;
+ }
+
+ port->pending_skb = skb;
+ port->pending_lpmask = lpmask;
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&port->pending_gate, flags);
+
+ return 0;
+}
+
+static void veth_recycle_msg(struct veth_lpar_connection *cnx,
+ struct veth_msg *msg)
+{
+ u32 dma_address, dma_length;
+
+ if (test_and_clear_bit(0, &msg->in_use)) {
+ dma_address = msg->data.addr[0];
+ dma_length = msg->data.len[0];
+
+ dma_unmap_single(msg->dev, dma_address, dma_length,
+ DMA_TO_DEVICE);
+
+ if (msg->skb) {
+ dev_kfree_skb_any(msg->skb);
+ msg->skb = NULL;
+ }
+
+ memset(&msg->data, 0, sizeof(msg->data));
+ veth_stack_push(cnx, msg);
+ } else
+ if (cnx->state & VETH_STATE_OPEN)
+ veth_error("Bogus frames ack from lpar %d (#%d)\n",
+ cnx->remote_lp, msg->token);
+}
+
+static void veth_flush_pending(struct veth_lpar_connection *cnx)
+{
+ int i;
+ for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
+ struct net_device *dev = veth_dev[i];
+ struct veth_port *port;
+ unsigned long flags;
+
+ if (! dev)
+ continue;
+
+ port = (struct veth_port *)dev->priv;
+
+ if (! (port->lpar_map & (1<<cnx->remote_lp)))
+ continue;
+
+ spin_lock_irqsave(&port->pending_gate, flags);
+ if (port->pending_skb) {
+ port->pending_lpmask =
+ veth_transmit_to_many(port->pending_skb,
+ port->pending_lpmask,
+ dev);
+ if (! port->pending_lpmask) {
+ dev_kfree_skb_any(port->pending_skb);
+ port->pending_skb = NULL;
+ netif_wake_queue(dev);
+ }
+ }
+ spin_unlock_irqrestore(&port->pending_gate, flags);
+ }
+}
+
+/*
+ * Rx path
+ */
+
+static inline int veth_frame_wanted(struct veth_port *port, u64 mac_addr)
+{
+ int wanted = 0;
+ int i;
+ unsigned long flags;
+
+ if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) )
+ return 1;
+
+ if (! (((char *) &mac_addr)[0] & 0x01))
+ return 0;
+
+ read_lock_irqsave(&port->mcast_gate, flags);
+
+ if (port->promiscuous || port->all_mcast) {
+ wanted = 1;
+ goto out;
+ }
+
+ for (i = 0; i < port->num_mcast; ++i) {
+ if (port->mcast_addr[i] == mac_addr) {
+ wanted = 1;
+ break;
+ }
+ }
+
+ out:
+ read_unlock_irqrestore(&port->mcast_gate, flags);
+
+ return wanted;
+}
+
+struct dma_chunk {
+ u64 addr;
+ u64 size;
+};
+
+#define VETH_MAX_PAGES_PER_FRAME ( (VETH_MAX_MTU+PAGE_SIZE-2)/PAGE_SIZE + 1 )
+
+static inline void veth_build_dma_list(struct dma_chunk *list,
+ unsigned char *p, unsigned long length)
+{
+ unsigned long done;
+ int i = 1;
+
+ /* FIXME: skbs are continguous in real addresses. Do we
+ * really need to break it into PAGE_SIZE chunks, or can we do
+ * it just at the granularity of iSeries real->absolute
+ * mapping? Indeed, given the way the allocator works, can we
+ * count on them being absolutely contiguous? */
+ list[0].addr = ISERIES_HV_ADDR(p);
+ list[0].size = min(length,
+ PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
+
+ done = list[0].size;
+ while (done < length) {
+ list[i].addr = ISERIES_HV_ADDR(p + done);
+ list[i].size = min(length-done, PAGE_SIZE);
+ done += list[i].size;
+ i++;
+ }
+}
+
+static void veth_flush_acks(struct veth_lpar_connection *cnx)
+{
+ HvLpEvent_Rc rc;
+
+ rc = veth_signaldata(cnx, VethEventTypeFramesAck,
+ 0, &cnx->pending_acks);
+
+ if (rc != HvLpEvent_Rc_Good)
+ veth_error("Error 0x%x acking frames from lpar %d!\n",
+ (unsigned)rc, cnx->remote_lp);
+
+ cnx->num_pending_acks = 0;
+ memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks));
+}
+
+static void veth_receive(struct veth_lpar_connection *cnx,
+ struct VethLpEvent *event)
+{
+ struct VethFramesData *senddata = &event->u.frames_data;
+ int startchunk = 0;
+ int nchunks;
+ unsigned long flags;
+ HvLpDma_Rc rc;
+
+ do {
+ u16 length = 0;
+ struct sk_buff *skb;
+ struct dma_chunk local_list[VETH_MAX_PAGES_PER_FRAME];
+ struct dma_chunk remote_list[VETH_MAX_FRAMES_PER_MSG];
+ u64 dest;
+ HvLpVirtualLanIndex vlan;
+ struct net_device *dev;
+ struct veth_port *port;
+
+ /* FIXME: do we need this? */
+ memset(local_list, 0, sizeof(local_list));
+ memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+
+ /* a 0 address marks the end of the valid entries */
+ if (senddata->addr[startchunk] == 0)
+ break;
+
+ /* make sure that we have at least 1 EOF entry in the
+ * remaining entries */
+ if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) {
+ veth_error("missing EOF frag in event "
+ "eofmask=0x%x startchunk=%d\n",
+ (unsigned) senddata->eofmask, startchunk);
+ break;
+ }
+
+ /* build list of chunks in this frame */
+ nchunks = 0;
+ do {
+ remote_list[nchunks].addr =
+ (u64) senddata->addr[startchunk+nchunks] << 32;
+ remote_list[nchunks].size =
+ senddata->len[startchunk+nchunks];
+ length += remote_list[nchunks].size;
+ } while (! (senddata->eofmask &
+ (1 << (VETH_EOF_SHIFT + startchunk + nchunks++))));
+
+ /* length == total length of all chunks */
+ /* nchunks == # of chunks in this frame */
+
+ if ((length - ETH_HLEN) > VETH_MAX_MTU) {
+ veth_error("Received oversize frame from lpar %d "
+ "(length=%d)\n", cnx->remote_lp, length);
+ continue;
+ }
+
+ skb = alloc_skb(length, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ veth_build_dma_list(local_list, skb->data, length);
+
+ rc = HvCallEvent_dmaBufList(HvLpEvent_Type_VirtualLan,
+ event->base_event.xSourceLp,
+ HvLpDma_Direction_RemoteToLocal,
+ cnx->src_inst,
+ cnx->dst_inst,
+ HvLpDma_AddressType_RealAddress,
+ HvLpDma_AddressType_TceIndex,
+ ISERIES_HV_ADDR(&local_list),
+ ISERIES_HV_ADDR(&remote_list),
+ length);
+ if (rc != HvLpDma_Rc_Good) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+
+ vlan = skb->data[9];
+ dev = veth_dev[vlan];
+ if (! dev)
+ /* Some earlier versions of the driver sent
+ broadcasts down all connections, even to
+ lpars that weren't on the relevant vlan.
+ So ignore packets belonging to a vlan we're
+ not on. */
+ continue;
+
+ port = (struct veth_port *)dev->priv;
+ dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
+
+ if ((vlan > HVMAXARCHITECTEDVIRTUALLANS) || !port) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+ if (! veth_frame_wanted(port, dest)) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+
+ skb_put(skb, length);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx(skb); /* send it up */
+ port->stats.rx_packets++;
+ port->stats.rx_bytes += length;
+ } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG);
+
+ /* Ack it */
+ spin_lock_irqsave(&cnx->lock, flags);
+ BUG_ON(cnx->num_pending_acks > VETH_MAX_ACKS_PER_MSG);
+
+ cnx->pending_acks[cnx->num_pending_acks++] =
+ event->base_event.xCorrelationToken;
+
+ if ( (cnx->num_pending_acks >= cnx->remote_caps.ack_threshold)
+ || (cnx->num_pending_acks >= VETH_MAX_ACKS_PER_MSG) )
+ veth_flush_acks(cnx);
+
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_timed_ack(unsigned long ptr)
+{
+ struct veth_lpar_connection *cnx = (struct veth_lpar_connection *) ptr;
+ unsigned long flags;
+
+ /* Ack all the events */
+ spin_lock_irqsave(&cnx->lock, flags);
+ if (cnx->num_pending_acks > 0)
+ veth_flush_acks(cnx);
+
+ /* Reschedule the timer */
+ cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
+ add_timer(&cnx->ack_timer);
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static int veth_remove(struct vio_dev *vdev)
+{
+ int i = vdev->unit_address;
+ struct net_device *dev;
+
+ dev = veth_dev[i];
+ if (dev != NULL) {
+ veth_dev[i] = NULL;
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ return 0;
+}
+
+static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ int i = vdev->unit_address;
+ struct net_device *dev;
+
+ dev = veth_probe_one(i, &vdev->dev);
+ if (dev == NULL) {
+ veth_remove(vdev);
+ return 1;
+ }
+ veth_dev[i] = dev;
+
+ /* Start the state machine on each connection, to commence
+ * link negotiation */
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++)
+ if (veth_cnx[i])
+ veth_kick_statemachine(veth_cnx[i]);
+
+ return 0;
+}
+
+/**
+ * veth_device_table: Used by vio.c to match devices that we
+ * support.
+ */
+static struct vio_device_id veth_device_table[] __devinitdata = {
+ { "vlan", "" },
+ { NULL, NULL }
+};
+MODULE_DEVICE_TABLE(vio, veth_device_table);
+
+static struct vio_driver veth_driver = {
+ .name = "iseries_veth",
+ .id_table = veth_device_table,
+ .probe = veth_probe,
+ .remove = veth_remove
+};
+
+/*
+ * Module initialization/cleanup
+ */
+
+void __exit veth_module_cleanup(void)
+{
+ int i;
+
+ vio_unregister_driver(&veth_driver);
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
+ veth_stop_connection(i);
+
+ HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
+
+ /* Hypervisor callbacks may have scheduled more work while we
+ * were destroying connections. Now that we've disconnected from
+ * the hypervisor make sure everything's finished. */
+ flush_scheduled_work();
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
+ veth_destroy_connection(i);
+
+}
+module_exit(veth_module_cleanup);
+
+int __init veth_module_init(void)
+{
+ int i;
+ int rc;
+
+ this_lp = HvLpConfig_getLpIndex_outline();
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
+ rc = veth_init_connection(i);
+ if (rc != 0) {
+ veth_module_cleanup();
+ return rc;
+ }
+ }
+
+ HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan,
+ &veth_handle_event);
+
+ return vio_register_driver(&veth_driver);
+}
+module_init(veth_module_init);
diff --git a/drivers/net/iseries_veth.h b/drivers/net/iseries_veth.h
new file mode 100644
index 000000000000..d9370f79b83e
--- /dev/null
+++ b/drivers/net/iseries_veth.h
@@ -0,0 +1,46 @@
+/* File veth.h created by Kyle A. Lucke on Mon Aug 7 2000. */
+
+#ifndef _ISERIES_VETH_H
+#define _ISERIES_VETH_H
+
+#define VethEventTypeCap (0)
+#define VethEventTypeFrames (1)
+#define VethEventTypeMonitor (2)
+#define VethEventTypeFramesAck (3)
+
+#define VETH_MAX_ACKS_PER_MSG (20)
+#define VETH_MAX_FRAMES_PER_MSG (6)
+
+struct VethFramesData {
+ u32 addr[VETH_MAX_FRAMES_PER_MSG];
+ u16 len[VETH_MAX_FRAMES_PER_MSG];
+ u32 eofmask;
+};
+#define VETH_EOF_SHIFT (32-VETH_MAX_FRAMES_PER_MSG)
+
+struct VethFramesAckData {
+ u16 token[VETH_MAX_ACKS_PER_MSG];
+};
+
+struct VethCapData {
+ u8 caps_version;
+ u8 rsvd1;
+ u16 num_buffers;
+ u16 ack_threshold;
+ u16 rsvd2;
+ u32 ack_timeout;
+ u32 rsvd3;
+ u64 rsvd4[3];
+};
+
+struct VethLpEvent {
+ struct HvLpEvent base_event;
+ union {
+ struct VethCapData caps_data;
+ struct VethFramesData frames_data;
+ struct VethFramesAckData frames_ack_data;
+ } u;
+
+};
+
+#endif /* _ISERIES_VETH_H */
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile
new file mode 100644
index 000000000000..7c7aff1ea7d5
--- /dev/null
+++ b/drivers/net/ixgb/Makefile
@@ -0,0 +1,35 @@
+################################################################################
+#
+#
+# Copyright(c) 1999 - 2002 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# The full GNU General Public License is included in this distribution in the
+# file called LICENSE.
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/10GbE driver
+#
+
+obj-$(CONFIG_IXGB) += ixgb.o
+
+ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
new file mode 100644
index 000000000000..26c4f15f7fc0
--- /dev/null
+++ b/drivers/net/ixgb/ixgb.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_H_
+#define _IXGB_H_
+
+#include <linux/stddef.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#endif
+
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+struct ixgb_adapter;
+#include "ixgb_hw.h"
+#include "ixgb_ee.h"
+#include "ixgb_ids.h"
+
+#ifdef _DEBUG_DRIVER_
+#define IXGB_DBG(args...) printk(KERN_DEBUG "ixgb: " args)
+#else
+#define IXGB_DBG(args...)
+#endif
+
+#define IXGB_ERR(args...) printk(KERN_ERR "ixgb: " args)
+
+/* TX/RX descriptor defines */
+#define DEFAULT_TXD 256
+#define MAX_TXD 4096
+#define MIN_TXD 64
+
+/* hardware cannot reliably support more than 512 descriptors owned by
+ * hardware descrioptor cache otherwise an unreliable ring under heavy
+ * recieve load may result */
+/* #define DEFAULT_RXD 1024 */
+/* #define MAX_RXD 4096 */
+#define DEFAULT_RXD 512
+#define MAX_RXD 512
+#define MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGB_RXBUFFER_2048 2048
+#define IXGB_RXBUFFER_4096 4096
+#define IXGB_RXBUFFER_8192 8192
+#define IXGB_RXBUFFER_16384 16384
+
+/* How many Tx Descriptors do we need to call netif_wake_queue? */
+#define IXGB_TX_QUEUE_WAKE 16
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+/* only works for sizes that are powers of 2 */
+#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgb_buffer {
+ struct sk_buff *skb;
+ uint64_t dma;
+ unsigned long time_stamp;
+ uint16_t length;
+ uint16_t next_to_watch;
+};
+
+struct ixgb_desc_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct ixgb_buffer *buffer_info;
+};
+
+#define IXGB_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc)
+#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc)
+#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc)
+
+/* board specific private data structure */
+
+struct ixgb_adapter {
+ struct timer_list watchdog_timer;
+ struct vlan_group *vlgrp;
+ uint32_t bd_number;
+ uint32_t rx_buffer_len;
+ uint32_t part_num;
+ uint16_t link_speed;
+ uint16_t link_duplex;
+ spinlock_t tx_lock;
+ atomic_t irq_sem;
+ struct work_struct tx_timeout_task;
+
+ struct timer_list blink_timer;
+ unsigned long led_status;
+
+ /* TX */
+ struct ixgb_desc_ring tx_ring;
+ unsigned long timeo_start;
+ uint32_t tx_cmd_type;
+ uint64_t hw_csum_tx_good;
+ uint64_t hw_csum_tx_error;
+ uint32_t tx_int_delay;
+ boolean_t tx_int_delay_enable;
+ boolean_t detect_tx_hung;
+
+ /* RX */
+ struct ixgb_desc_ring rx_ring;
+ uint64_t hw_csum_rx_error;
+ uint64_t hw_csum_rx_good;
+ uint32_t rx_int_delay;
+ boolean_t rx_csum;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in ixgb_hw.h */
+ struct ixgb_hw hw;
+ struct ixgb_hw_stats stats;
+#ifdef CONFIG_PCI_MSI
+ boolean_t have_msi;
+#endif
+};
+#endif /* _IXGB_H_ */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
new file mode 100644
index 000000000000..653e99f919ce
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -0,0 +1,774 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgb_hw.h"
+#include "ixgb_ee.h"
+/* Local prototypes */
+static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw);
+
+static void ixgb_shift_out_bits(struct ixgb_hw *hw,
+ uint16_t data,
+ uint16_t count);
+static void ixgb_standby_eeprom(struct ixgb_hw *hw);
+
+static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw);
+
+static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd_reg - EECD's current value
+ *****************************************************************************/
+static void
+ixgb_raise_clock(struct ixgb_hw *hw,
+ uint32_t *eecd_reg)
+{
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd_reg = *eecd_reg | IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ udelay(50);
+ return;
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd_reg - EECD's current value
+ *****************************************************************************/
+static void
+ixgb_lower_clock(struct ixgb_hw *hw,
+ uint32_t *eecd_reg)
+{
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ udelay(50);
+ return;
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void
+ixgb_shift_out_bits(struct ixgb_hw *hw,
+ uint16_t data,
+ uint16_t count)
+{
+ uint32_t eecd_reg;
+ uint32_t mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd_reg &= ~IXGB_EECD_DI;
+
+ if(data & mask)
+ eecd_reg |= IXGB_EECD_DI;
+
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+
+ udelay(50);
+
+ ixgb_raise_clock(hw, &eecd_reg);
+ ixgb_lower_clock(hw, &eecd_reg);
+
+ mask = mask >> 1;
+
+ } while(mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd_reg &= ~IXGB_EECD_DI;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ return;
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static uint16_t
+ixgb_shift_in_bits(struct ixgb_hw *hw)
+{
+ uint32_t eecd_reg;
+ uint32_t i;
+ uint16_t data;
+
+ /* In order to read a register from the EEPROM, we need to shift 16 bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the "DO"
+ * bit. During this "shifting in" process the "DI" bit should always be
+ * clear..
+ */
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
+ data = 0;
+
+ for(i = 0; i < 16; i++) {
+ data = data << 1;
+ ixgb_raise_clock(hw, &eecd_reg);
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ eecd_reg &= ~(IXGB_EECD_DI);
+ if(eecd_reg & IXGB_EECD_DO)
+ data |= 1;
+
+ ixgb_lower_clock(hw, &eecd_reg);
+ }
+
+ return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static void
+ixgb_setup_eeprom(struct ixgb_hw *hw)
+{
+ uint32_t eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ /* Clear SK and DI */
+ eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+
+ /* Set CS */
+ eecd_reg |= IXGB_EECD_CS;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ return;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_standby_eeprom(struct ixgb_hw *hw)
+{
+ uint32_t eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ /* Deselct EEPROM */
+ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ udelay(50);
+
+ /* Clock high */
+ eecd_reg |= IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ udelay(50);
+
+ /* Select EEPROM */
+ eecd_reg |= IXGB_EECD_CS;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ udelay(50);
+
+ /* Clock low */
+ eecd_reg &= ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ udelay(50);
+ return;
+}
+
+/******************************************************************************
+ * Raises then lowers the EEPROM's clock pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_clock_eeprom(struct ixgb_hw *hw)
+{
+ uint32_t eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ /* Rising edge of clock */
+ eecd_reg |= IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ udelay(50);
+
+ /* Falling edge of clock */
+ eecd_reg &= ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ udelay(50);
+ return;
+}
+
+/******************************************************************************
+ * Terminates a command by lowering the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_cleanup_eeprom(struct ixgb_hw *hw)
+{
+ uint32_t eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
+
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+
+ ixgb_clock_eeprom(hw);
+ return;
+}
+
+/******************************************************************************
+ * Waits for the EEPROM to finish the current command.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * The command is done when the EEPROM's data out pin goes high.
+ *
+ * Returns:
+ * TRUE: EEPROM data pin is high before timeout.
+ * FALSE: Time expired.
+ *****************************************************************************/
+static boolean_t
+ixgb_wait_eeprom_command(struct ixgb_hw *hw)
+{
+ uint32_t eecd_reg;
+ uint32_t i;
+
+ /* Toggle the CS line. This in effect tells to EEPROM to actually execute
+ * the command in question.
+ */
+ ixgb_standby_eeprom(hw);
+
+ /* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for(i = 0; i < 200; i++) {
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ if(eecd_reg & IXGB_EECD_DO)
+ return (TRUE);
+
+ udelay(50);
+ }
+ ASSERT(0);
+ return (FALSE);
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *
+ * Returns:
+ * TRUE: Checksum is valid
+ * FALSE: Checksum is not valid.
+ *****************************************************************************/
+boolean_t
+ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
+{
+ uint16_t checksum = 0;
+ uint16_t i;
+
+ for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
+ checksum += ixgb_read_eeprom(hw, i);
+
+ if(checksum == (uint16_t) EEPROM_SUM)
+ return (TRUE);
+ else
+ return (FALSE);
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+void
+ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
+{
+ uint16_t checksum = 0;
+ uint16_t i;
+
+ for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
+ checksum += ixgb_read_eeprom(hw, i);
+
+ checksum = (uint16_t) EEPROM_SUM - checksum;
+
+ ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
+ return;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * reg - offset within the EEPROM to be written to
+ * data - 16 bit word to be writen to the EEPROM
+ *
+ * If ixgb_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *
+ *****************************************************************************/
+void
+ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ /* Prepare the EEPROM for writing */
+ ixgb_setup_eeprom(hw);
+
+ /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode
+ * plus 4-bit dummy). This puts the EEPROM into write/erase mode.
+ */
+ ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
+ ixgb_shift_out_bits(hw, 0, 4);
+
+ /* Prepare the EEPROM */
+ ixgb_standby_eeprom(hw);
+
+ /* Send the Write command (3-bit opcode + 6-bit addr) */
+ ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
+ ixgb_shift_out_bits(hw, offset, 6);
+
+ /* Send the data */
+ ixgb_shift_out_bits(hw, data, 16);
+
+ ixgb_wait_eeprom_command(hw);
+
+ /* Recover from write */
+ ixgb_standby_eeprom(hw);
+
+ /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
+ * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
+ * mode.
+ */
+ ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
+ ixgb_shift_out_bits(hw, 0, 4);
+
+ /* Done with writing */
+ ixgb_cleanup_eeprom(hw);
+
+ /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
+ ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
+
+ return;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of 16 bit word in the EEPROM to read
+ *
+ * Returns:
+ * The 16-bit value read from the eeprom
+ *****************************************************************************/
+uint16_t
+ixgb_read_eeprom(struct ixgb_hw *hw,
+ uint16_t offset)
+{
+ uint16_t data;
+
+ /* Prepare the EEPROM for reading */
+ ixgb_setup_eeprom(hw);
+
+ /* Send the READ command (opcode + addr) */
+ ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
+ /*
+ * We have a 64 word EEPROM, there are 6 address bits
+ */
+ ixgb_shift_out_bits(hw, offset, 6);
+
+ /* Read the data */
+ data = ixgb_shift_in_bits(hw);
+
+ /* End this read operation */
+ ixgb_standby_eeprom(hw);
+
+ return (data);
+}
+
+/******************************************************************************
+ * Reads eeprom and stores data in shared structure.
+ * Validates eeprom checksum and eeprom signature.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * TRUE: if eeprom read is successful
+ * FALSE: otherwise.
+ *****************************************************************************/
+boolean_t
+ixgb_get_eeprom_data(struct ixgb_hw *hw)
+{
+ uint16_t i;
+ uint16_t checksum = 0;
+ struct ixgb_ee_map_type *ee_map;
+
+ DEBUGFUNC("ixgb_get_eeprom_data");
+
+ ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ DEBUGOUT("ixgb_ee: Reading eeprom data\n");
+ for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
+ uint16_t ee_data;
+ ee_data = ixgb_read_eeprom(hw, i);
+ checksum += ee_data;
+ hw->eeprom[i] = le16_to_cpu(ee_data);
+ }
+
+ if (checksum != (uint16_t) EEPROM_SUM) {
+ DEBUGOUT("ixgb_ee: Checksum invalid.\n");
+ /* clear the init_ctrl_reg_1 to signify that the cache is
+ * invalidated */
+ ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
+ return (FALSE);
+ }
+
+ if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK))
+ != le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
+ DEBUGOUT("ixgb_ee: Signature invalid.\n");
+ return(FALSE);
+ }
+
+ return(TRUE);
+}
+
+/******************************************************************************
+ * Local function to check if the eeprom signature is good
+ * If the eeprom signature is good, calls ixgb)get_eeprom_data.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * TRUE: eeprom signature was good and the eeprom read was successful
+ * FALSE: otherwise.
+ ******************************************************************************/
+static boolean_t
+ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK))
+ == le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
+ return (TRUE);
+ } else {
+ return ixgb_get_eeprom_data(hw);
+ }
+}
+
+/******************************************************************************
+ * return a word from the eeprom
+ *
+ * hw - Struct containing variables accessed by shared code
+ * index - Offset of eeprom word
+ *
+ * Returns:
+ * Word at indexed offset in eeprom, if valid, 0 otherwise.
+ ******************************************************************************/
+uint16_t
+ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
+{
+
+ if ((index < IXGB_EEPROM_SIZE) &&
+ (ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
+ return(hw->eeprom[index]);
+ }
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the mac address from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise
+ *
+ * Returns: None.
+ ******************************************************************************/
+void
+ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
+ uint8_t *mac_addr)
+{
+ int i;
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ DEBUGFUNC("ixgb_get_ee_mac_addr");
+
+ if (ixgb_check_and_get_eeprom_data(hw) == TRUE) {
+ for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
+ mac_addr[i] = ee_map->mac_addr[i];
+ DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
+ }
+ }
+}
+
+/******************************************************************************
+ * return the compatibility flags from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * compatibility flags if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_compatibility(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->compatibility);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Printed Board Assembly number from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * PBA number if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint32_t
+ixgb_get_ee_pba_number(struct ixgb_hw *hw)
+{
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
+ | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Initialization Control Word 1 from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->init_ctrl_reg_1);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Initialization Control Word 2 from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->init_ctrl_reg_2);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Subsystem Id from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * Subsystem Id if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->subsystem_id);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Sub Vendor Id from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * Sub Vendor Id if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->subvendor_id);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Device Id from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * Device Id if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_device_id(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->device_id);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Vendor Id from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * Device Id if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->vendor_id);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the Software Defined Pins Register from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * SDP Register if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint16_t
+ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->swdpins_reg);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the D3 Power Management Bits from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint8_t
+ixgb_get_ee_d3_power(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->d3_power);
+
+ return(0);
+}
+
+/******************************************************************************
+ * return the D0 Power Management Bits from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+uint8_t
+ixgb_get_ee_d0_power(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+ return(ee_map->d0_power);
+
+ return(0);
+}
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
new file mode 100644
index 000000000000..5190aa8761a2
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_EE_H_
+#define _IXGB_EE_H_
+
+#define IXGB_EEPROM_SIZE 64 /* Size in words */
+
+#define IXGB_ETH_LENGTH_OF_ADDRESS 6
+
+/* EEPROM Commands */
+#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */
+#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */
+#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */
+#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */
+#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */
+
+/* EEPROM MAP (Word Offsets) */
+#define EEPROM_IA_1_2_REG 0x0000
+#define EEPROM_IA_3_4_REG 0x0001
+#define EEPROM_IA_5_6_REG 0x0002
+#define EEPROM_COMPATIBILITY_REG 0x0003
+#define EEPROM_PBA_1_2_REG 0x0008
+#define EEPROM_PBA_3_4_REG 0x0009
+#define EEPROM_INIT_CONTROL1_REG 0x000A
+#define EEPROM_SUBSYS_ID_REG 0x000B
+#define EEPROM_SUBVEND_ID_REG 0x000C
+#define EEPROM_DEVICE_ID_REG 0x000D
+#define EEPROM_VENDOR_ID_REG 0x000E
+#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDPINS_REG 0x0020
+#define EEPROM_CIRCUIT_CTRL_REG 0x0021
+#define EEPROM_D0_D3_POWER_REG 0x0022
+#define EEPROM_FLASH_VERSION 0x0032
+#define EEPROM_CHECKSUM_REG 0x003F
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+
+#define EEPROM_ICW1_SIGNATURE_MASK 0xC000
+#define EEPROM_ICW1_SIGNATURE_VALID 0x4000
+#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+
+/* EEPROM structure */
+struct ixgb_ee_map_type {
+ uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
+ uint16_t compatibility;
+ uint16_t reserved1[4];
+ uint32_t pba_number;
+ uint16_t init_ctrl_reg_1;
+ uint16_t subsystem_id;
+ uint16_t subvendor_id;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t init_ctrl_reg_2;
+ uint16_t oem_reserved[16];
+ uint16_t swdpins_reg;
+ uint16_t circuit_ctrl_reg;
+ uint8_t d3_power;
+ uint8_t d0_power;
+ uint16_t reserved2[28];
+ uint16_t checksum;
+};
+
+/* EEPROM Functions */
+uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg);
+
+boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
+
+void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
+
+void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data);
+
+#endif /* IXGB_EE_H */
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
new file mode 100644
index 000000000000..aea10e8aaa72
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -0,0 +1,704 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgb */
+
+#include "ixgb.h"
+
+#include <asm/uaccess.h>
+
+extern char ixgb_driver_name[];
+extern char ixgb_driver_version[];
+
+extern int ixgb_up(struct ixgb_adapter *adapter);
+extern void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
+extern void ixgb_reset(struct ixgb_adapter *adapter);
+extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+
+struct ixgb_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IXGB_STAT(m) sizeof(((struct ixgb_adapter *)0)->m), \
+ offsetof(struct ixgb_adapter, m)
+static struct ixgb_stats ixgb_gstrings_stats[] = {
+ {"rx_packets", IXGB_STAT(net_stats.rx_packets)},
+ {"tx_packets", IXGB_STAT(net_stats.tx_packets)},
+ {"rx_bytes", IXGB_STAT(net_stats.rx_bytes)},
+ {"tx_bytes", IXGB_STAT(net_stats.tx_bytes)},
+ {"rx_errors", IXGB_STAT(net_stats.rx_errors)},
+ {"tx_errors", IXGB_STAT(net_stats.tx_errors)},
+ {"rx_dropped", IXGB_STAT(net_stats.rx_dropped)},
+ {"tx_dropped", IXGB_STAT(net_stats.tx_dropped)},
+ {"multicast", IXGB_STAT(net_stats.multicast)},
+ {"collisions", IXGB_STAT(net_stats.collisions)},
+
+/* { "rx_length_errors", IXGB_STAT(net_stats.rx_length_errors) }, */
+ {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)},
+ {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)},
+ {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)},
+ {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)},
+ {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)},
+ {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)},
+ {"tx_carrier_errors", IXGB_STAT(net_stats.tx_carrier_errors)},
+ {"tx_fifo_errors", IXGB_STAT(net_stats.tx_fifo_errors)},
+ {"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)},
+ {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
+ {"tx_deferred_ok", IXGB_STAT(stats.dc)},
+ {"rx_long_length_errors", IXGB_STAT(stats.roc)},
+ {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
+#ifdef NETIF_F_TSO
+ {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
+ {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
+#endif
+ {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
+ {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
+ {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
+ {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)},
+ {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)},
+ {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)},
+ {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)},
+ {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)}
+};
+
+#define IXGB_STATS_LEN \
+ sizeof(ixgb_gstrings_stats) / sizeof(struct ixgb_stats)
+
+static int
+ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if(netif_carrier_ok(adapter->netdev)) {
+ ecmd->speed = SPEED_10000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+ return 0;
+}
+
+static int
+ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ if(ecmd->autoneg == AUTONEG_ENABLE ||
+ ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
+ return -EINVAL;
+
+ if(netif_running(adapter->netdev)) {
+ ixgb_down(adapter, TRUE);
+ ixgb_reset(adapter);
+ ixgb_up(adapter);
+ } else
+ ixgb_reset(adapter);
+
+ return 0;
+}
+
+static void
+ixgb_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_hw *hw = &adapter->hw;
+
+ pause->autoneg = AUTONEG_DISABLE;
+
+ if(hw->fc.type == ixgb_fc_rx_pause)
+ pause->rx_pause = 1;
+ else if(hw->fc.type == ixgb_fc_tx_pause)
+ pause->tx_pause = 1;
+ else if(hw->fc.type == ixgb_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int
+ixgb_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_hw *hw = &adapter->hw;
+
+ if(pause->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if(pause->rx_pause && pause->tx_pause)
+ hw->fc.type = ixgb_fc_full;
+ else if(pause->rx_pause && !pause->tx_pause)
+ hw->fc.type = ixgb_fc_rx_pause;
+ else if(!pause->rx_pause && pause->tx_pause)
+ hw->fc.type = ixgb_fc_tx_pause;
+ else if(!pause->rx_pause && !pause->tx_pause)
+ hw->fc.type = ixgb_fc_none;
+
+ if(netif_running(adapter->netdev)) {
+ ixgb_down(adapter, TRUE);
+ ixgb_up(adapter);
+ } else
+ ixgb_reset(adapter);
+
+ return 0;
+}
+
+static uint32_t
+ixgb_get_rx_csum(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ return adapter->rx_csum;
+}
+
+static int
+ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ adapter->rx_csum = data;
+
+ if(netif_running(netdev)) {
+ ixgb_down(adapter,TRUE);
+ ixgb_up(adapter);
+ } else
+ ixgb_reset(adapter);
+ return 0;
+}
+
+static uint32_t
+ixgb_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int
+ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
+{
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int
+ixgb_set_tso(struct net_device *netdev, uint32_t data)
+{
+ if(data)
+ netdev->features |= NETIF_F_TSO;
+ else
+ netdev->features &= ~NETIF_F_TSO;
+ return 0;
+}
+#endif /* NETIF_F_TSO */
+
+#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
+
+static int
+ixgb_get_regs_len(struct net_device *netdev)
+{
+#define IXGB_REG_DUMP_LEN 136*sizeof(uint32_t)
+ return IXGB_REG_DUMP_LEN;
+}
+
+static void
+ixgb_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_hw *hw = &adapter->hw;
+ uint32_t *reg = p;
+ uint32_t *reg_start = reg;
+ uint8_t i;
+
+ regs->version = (adapter->hw.device_id << 16) | adapter->hw.subsystem_id;
+
+ /* General Registers */
+ *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
+ *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
+ *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
+ *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
+ *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
+
+ /* Interrupt */
+ *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
+ *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
+ *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
+ *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
+
+ /* Receive */
+ *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
+ *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
+ *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
+ *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
+ *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
+ *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
+ *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
+ *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
+ *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
+ *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
+ *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
+ *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
+
+ for (i = 0; i < IXGB_RAR_ENTRIES; i++) {
+ *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
+ *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
+ }
+
+ /* Transmit */
+ *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
+ *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
+ *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
+ *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
+ *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
+ *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
+ *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
+ *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
+ *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
+ *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
+
+ /* Physical */
+ *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
+ *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
+ *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
+ *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
+ *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
+ *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
+ *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
+ *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
+ *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
+ *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
+ *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
+ *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
+ *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
+
+ /* Statistics */
+ *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
+ *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
+ *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
+ *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
+ *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
+ *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
+ *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
+ *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
+ *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
+ *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
+ *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
+ *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
+ *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
+ *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
+ *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
+ *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
+ *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
+ *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
+ *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
+ *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
+ *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
+ *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
+ *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
+ *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
+ *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
+ *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
+ *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
+ *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
+ *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
+ *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
+ *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
+ *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
+ *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
+ *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
+ *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
+ *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
+ *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
+ *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
+ *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
+ *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
+ *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
+ *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
+ *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
+ *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
+ *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
+ *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
+ *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
+ *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
+ *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
+ *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
+ *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
+ *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
+ *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
+ *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
+ *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
+ *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
+ *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
+ *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
+ *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
+ *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
+
+ regs->len = (reg - reg_start) * sizeof(uint32_t);
+}
+
+static int
+ixgb_get_eeprom_len(struct net_device *netdev)
+{
+ /* return size in bytes */
+ return (IXGB_EEPROM_SIZE << 1);
+}
+
+static int
+ixgb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *bytes)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_hw *hw = &adapter->hw;
+ uint16_t *eeprom_buff;
+ int i, max_len, first_word, last_word;
+ int ret_val = 0;
+
+ if(eeprom->len == 0) {
+ ret_val = -EINVAL;
+ goto geeprom_error;
+ }
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ max_len = ixgb_get_eeprom_len(netdev);
+
+ if(eeprom->offset > eeprom->offset + eeprom->len) {
+ ret_val = -EINVAL;
+ goto geeprom_error;
+ }
+
+ if((eeprom->offset + eeprom->len) > max_len)
+ eeprom->len = (max_len - eeprom->offset);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(uint16_t) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if(!eeprom_buff)
+ return -ENOMEM;
+
+ /* note the eeprom was good because the driver loaded */
+ for(i = 0; i <= (last_word - first_word); i++) {
+ eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
+ }
+
+ memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+geeprom_error:
+ return ret_val;
+}
+
+static int
+ixgb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *bytes)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_hw *hw = &adapter->hw;
+ uint16_t *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word;
+ uint16_t i;
+
+ if(eeprom->len == 0)
+ return -EINVAL;
+
+ if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = ixgb_get_eeprom_len(netdev);
+
+ if(eeprom->offset > eeprom->offset + eeprom->len)
+ return -EINVAL;
+
+ if((eeprom->offset + eeprom->len) > max_len)
+ eeprom->len = (max_len - eeprom->offset);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if(!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if(eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
+ ptr++;
+ }
+ if((eeprom->offset + eeprom->len) & 1) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_buff[last_word - first_word]
+ = ixgb_read_eeprom(hw, last_word);
+ }
+
+ memcpy(ptr, bytes, eeprom->len);
+ for(i = 0; i <= (last_word - first_word); i++)
+ ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
+
+ /* Update the checksum over the first part of the EEPROM if needed */
+ if(first_word <= EEPROM_CHECKSUM_REG)
+ ixgb_update_eeprom_checksum(hw);
+
+ kfree(eeprom_buff);
+ return 0;
+}
+
+static void
+ixgb_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ strncpy(drvinfo->driver, ixgb_driver_name, 32);
+ strncpy(drvinfo->version, ixgb_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_stats = IXGB_STATS_LEN;
+ drvinfo->regdump_len = ixgb_get_regs_len(netdev);
+ drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
+}
+
+static void
+ixgb_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+
+ ring->rx_max_pending = MAX_RXD;
+ ring->tx_max_pending = MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int
+ixgb_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+ struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
+ int err;
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ if(netif_running(adapter->netdev))
+ ixgb_down(adapter,TRUE);
+
+ rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD);
+ rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD);
+ IXGB_ROUNDUP(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD);
+ txdr->count = min(txdr->count,(uint32_t)MAX_TXD);
+ IXGB_ROUNDUP(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if(netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ if((err = ixgb_setup_rx_resources(adapter)))
+ goto err_setup_rx;
+ if((err = ixgb_setup_tx_resources(adapter)))
+ goto err_setup_tx;
+
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+
+ rx_new = adapter->rx_ring;
+ tx_new = adapter->tx_ring;
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ ixgb_free_rx_resources(adapter);
+ ixgb_free_tx_resources(adapter);
+ adapter->rx_ring = rx_new;
+ adapter->tx_ring = tx_new;
+ if((err = ixgb_up(adapter)))
+ return err;
+ }
+
+ return 0;
+err_setup_tx:
+ ixgb_free_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ ixgb_up(adapter);
+ return err;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define IXGB_ID_INTERVAL (HZ/4)
+
+/* bit defines for adapter->led_status */
+#define IXGB_LED_ON 0
+
+static void
+ixgb_led_blink_callback(unsigned long data)
+{
+ struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
+
+ if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
+ ixgb_led_off(&adapter->hw);
+ else
+ ixgb_led_on(&adapter->hw);
+
+ mod_timer(&adapter->blink_timer, jiffies + IXGB_ID_INTERVAL);
+}
+
+static int
+ixgb_phys_id(struct net_device *netdev, uint32_t data)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
+ data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
+
+ if(!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = ixgb_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long)adapter;
+ }
+
+ mod_timer(&adapter->blink_timer, jiffies);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if(data)
+ schedule_timeout(data * HZ);
+ else
+ schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+
+ del_timer_sync(&adapter->blink_timer);
+ ixgb_led_off(&adapter->hw);
+ clear_bit(IXGB_LED_ON, &adapter->led_status);
+
+ return 0;
+}
+
+static int
+ixgb_get_stats_count(struct net_device *netdev)
+{
+ return IXGB_STATS_LEN;
+}
+
+static void
+ixgb_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ int i;
+
+ ixgb_update_stats(adapter);
+ for(i = 0; i < IXGB_STATS_LEN; i++) {
+ char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
+ data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
+ sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
+ }
+}
+
+static void
+ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+{
+ int i;
+
+ switch(stringset) {
+ case ETH_SS_STATS:
+ for(i=0; i < IXGB_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ixgb_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+struct ethtool_ops ixgb_ethtool_ops = {
+ .get_settings = ixgb_get_settings,
+ .set_settings = ixgb_set_settings,
+ .get_drvinfo = ixgb_get_drvinfo,
+ .get_regs_len = ixgb_get_regs_len,
+ .get_regs = ixgb_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgb_get_eeprom_len,
+ .get_eeprom = ixgb_get_eeprom,
+ .set_eeprom = ixgb_set_eeprom,
+ .get_ringparam = ixgb_get_ringparam,
+ .set_ringparam = ixgb_set_ringparam,
+ .get_pauseparam = ixgb_get_pauseparam,
+ .set_pauseparam = ixgb_set_pauseparam,
+ .get_rx_csum = ixgb_get_rx_csum,
+ .set_rx_csum = ixgb_set_rx_csum,
+ .get_tx_csum = ixgb_get_tx_csum,
+ .set_tx_csum = ixgb_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgb_set_tso,
+#endif
+ .get_strings = ixgb_get_strings,
+ .phys_id = ixgb_phys_id,
+ .get_stats_count = ixgb_get_stats_count,
+ .get_ethtool_stats = ixgb_get_ethtool_stats,
+};
+
+void ixgb_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
new file mode 100644
index 000000000000..69329c73095a
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -0,0 +1,1202 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ixgb_hw.c
+ * Shared functions for accessing and configuring the adapter
+ */
+
+#include "ixgb_hw.h"
+#include "ixgb_ids.h"
+
+/* Local function prototypes */
+
+static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr);
+
+static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value);
+
+static void ixgb_get_bus_info(struct ixgb_hw *hw);
+
+static boolean_t ixgb_link_reset(struct ixgb_hw *hw);
+
+static void ixgb_optics_reset(struct ixgb_hw *hw);
+
+static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
+
+uint32_t ixgb_mac_reset(struct ixgb_hw *hw);
+
+uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
+{
+ uint32_t ctrl_reg;
+
+ ctrl_reg = IXGB_CTRL0_RST |
+ IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
+ IXGB_CTRL0_SDP2_DIR |
+ IXGB_CTRL0_SDP1_DIR |
+ IXGB_CTRL0_SDP0_DIR |
+ IXGB_CTRL0_SDP3 | /* Initial value 1101 */
+ IXGB_CTRL0_SDP2 |
+ IXGB_CTRL0_SDP0;
+
+#ifdef HP_ZX1
+ /* Workaround for 82597EX reset errata */
+ IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
+#else
+ IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
+#endif
+
+ /* Delay a few ms just to allow the reset to complete */
+ msec_delay(IXGB_DELAY_AFTER_RESET);
+ ctrl_reg = IXGB_READ_REG(hw, CTRL0);
+#ifdef DBG
+ /* Make sure the self-clearing global reset bit did self clear */
+ ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
+#endif
+
+ if (hw->phy_type == ixgb_phy_type_txn17401) {
+ ixgb_optics_reset(hw);
+ }
+
+ return ctrl_reg;
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+boolean_t
+ixgb_adapter_stop(struct ixgb_hw *hw)
+{
+ uint32_t ctrl_reg;
+ uint32_t icr_reg;
+
+ DEBUGFUNC("ixgb_adapter_stop");
+
+ /* If we are stopped or resetting exit gracefully and wait to be
+ * started again before accessing the hardware.
+ */
+ if(hw->adapter_stopped) {
+ DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
+ return FALSE;
+ }
+
+ /* Set the Adapter Stopped flag so other driver functions stop
+ * touching the Hardware.
+ */
+ hw->adapter_stopped = TRUE;
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
+ IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
+ msec_delay(IXGB_DELAY_BEFORE_RESET);
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ DEBUGOUT("Issuing a global reset to MAC\n");
+
+ ctrl_reg = ixgb_mac_reset(hw);
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ IXGB_WRITE_REG(hw, IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr_reg = IXGB_READ_REG(hw, ICR);
+
+ return (ctrl_reg & IXGB_CTRL0_RST);
+}
+
+
+/******************************************************************************
+ * Identifies the vendor of the optics module on the adapter. The SR adapters
+ * support two different types of XPAK optics, so it is necessary to determine
+ * which optics are present before applying any optics-specific workarounds.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *
+ * Returns: the vendor of the XPAK optics module.
+ *****************************************************************************/
+static ixgb_xpak_vendor
+ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
+{
+ uint32_t i;
+ uint16_t vendor_name[5];
+ ixgb_xpak_vendor xpak_vendor;
+
+ DEBUGFUNC("ixgb_identify_xpak_vendor");
+
+ /* Read the first few bytes of the vendor string from the XPAK NVR
+ * registers. These are standard XENPAK/XPAK registers, so all XPAK
+ * devices should implement them. */
+ for (i = 0; i < 5; i++) {
+ vendor_name[i] = ixgb_read_phy_reg(hw,
+ MDIO_PMA_PMD_XPAK_VENDOR_NAME
+ + i, IXGB_PHY_ADDRESS,
+ MDIO_PMA_PMD_DID);
+ }
+
+ /* Determine the actual vendor */
+ if (vendor_name[0] == 'I' &&
+ vendor_name[1] == 'N' &&
+ vendor_name[2] == 'T' &&
+ vendor_name[3] == 'E' && vendor_name[4] == 'L') {
+ xpak_vendor = ixgb_xpak_vendor_intel;
+ } else {
+ xpak_vendor = ixgb_xpak_vendor_infineon;
+ }
+
+ return (xpak_vendor);
+}
+
+/******************************************************************************
+ * Determine the physical layer module on the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code. The device_id
+ * field must be (correctly) populated before calling this routine.
+ *
+ * Returns: the phy type of the adapter.
+ *****************************************************************************/
+static ixgb_phy_type
+ixgb_identify_phy(struct ixgb_hw *hw)
+{
+ ixgb_phy_type phy_type;
+ ixgb_xpak_vendor xpak_vendor;
+
+ DEBUGFUNC("ixgb_identify_phy");
+
+ /* Infer the transceiver/phy type from the device id */
+ switch (hw->device_id) {
+ case IXGB_DEVICE_ID_82597EX:
+ DEBUGOUT("Identified TXN17401 optics\n");
+ phy_type = ixgb_phy_type_txn17401;
+ break;
+
+ case IXGB_DEVICE_ID_82597EX_SR:
+ /* The SR adapters carry two different types of XPAK optics
+ * modules; read the vendor identifier to determine the exact
+ * type of optics. */
+ xpak_vendor = ixgb_identify_xpak_vendor(hw);
+ if (xpak_vendor == ixgb_xpak_vendor_intel) {
+ DEBUGOUT("Identified TXN17201 optics\n");
+ phy_type = ixgb_phy_type_txn17201;
+ } else {
+ DEBUGOUT("Identified G6005 optics\n");
+ phy_type = ixgb_phy_type_g6005;
+ }
+ break;
+ case IXGB_DEVICE_ID_82597EX_LR:
+ DEBUGOUT("Identified G6104 optics\n");
+ phy_type = ixgb_phy_type_g6104;
+ break;
+ default:
+ DEBUGOUT("Unknown physical layer module\n");
+ phy_type = ixgb_phy_type_unknown;
+ break;
+ }
+
+ return (phy_type);
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Resets the controller.
+ * Reads and validates the EEPROM.
+ * Initializes the receive address registers.
+ * Initializes the multicast table.
+ * Clears all on-chip counters.
+ * Calls routine to setup flow control settings.
+ * Leaves the transmit and receive units disabled and uninitialized.
+ *
+ * Returns:
+ * TRUE if successful,
+ * FALSE if unrecoverable problems were encountered.
+ *****************************************************************************/
+boolean_t
+ixgb_init_hw(struct ixgb_hw *hw)
+{
+ uint32_t i;
+ uint32_t ctrl_reg;
+ boolean_t status;
+
+ DEBUGFUNC("ixgb_init_hw");
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ DEBUGOUT("Issuing a global reset to MAC\n");
+
+ ctrl_reg = ixgb_mac_reset(hw);
+
+ DEBUGOUT("Issuing an EE reset to MAC\n");
+#ifdef HP_ZX1
+ /* Workaround for 82597EX reset errata */
+ IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
+#else
+ IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
+#endif
+
+ /* Delay a few ms just to allow the reset to complete */
+ msec_delay(IXGB_DELAY_AFTER_EE_RESET);
+
+ if (ixgb_get_eeprom_data(hw) == FALSE) {
+ return(FALSE);
+ }
+
+ /* Use the device id to determine the type of phy/transceiver. */
+ hw->device_id = ixgb_get_ee_device_id(hw);
+ hw->phy_type = ixgb_identify_phy(hw);
+
+ /* Setup the receive addresses.
+ * Receive Address Registers (RARs 0 - 15).
+ */
+ ixgb_init_rx_addrs(hw);
+
+ /*
+ * Check that a valid MAC address has been set.
+ * If it is not valid, we fail hardware init.
+ */
+ if (!mac_addr_valid(hw->curr_mac_addr)) {
+ DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
+ return(FALSE);
+ }
+
+ /* tell the routines in this file they can access hardware again */
+ hw->adapter_stopped = FALSE;
+
+ /* Fill in the bus_info structure */
+ ixgb_get_bus_info(hw);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for(i = 0; i < IXGB_MC_TBL_SIZE; i++)
+ IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
+
+ /* Zero out the VLAN Filter Table Array */
+ ixgb_clear_vfta(hw);
+
+ /* Zero all of the hardware counters */
+ ixgb_clear_hw_cntrs(hw);
+
+ /* Call a subroutine to setup flow control. */
+ status = ixgb_setup_fc(hw);
+
+ /* 82597EX errata: Call check-for-link in case lane deskew is locked */
+ ixgb_check_for_link(hw);
+
+ return (status);
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+void
+ixgb_init_rx_addrs(struct ixgb_hw *hw)
+{
+ uint32_t i;
+
+ DEBUGFUNC("ixgb_init_rx_addrs");
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (!mac_addr_valid(hw->curr_mac_addr)) {
+
+ /* Get the MAC address from the eeprom for later reference */
+ ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
+
+ DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
+ hw->curr_mac_addr[0],
+ hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n",
+ hw->curr_mac_addr[3],
+ hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ } else {
+
+ /* Setup the receive address. */
+ DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+ DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
+ hw->curr_mac_addr[0],
+ hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n",
+ hw->curr_mac_addr[3],
+ hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+
+ ixgb_rar_set(hw, hw->curr_mac_addr, 0);
+ }
+
+ /* Zero out the other 15 receive addresses. */
+ DEBUGOUT("Clearing RAR[1-15]\n");
+ for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
+ IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ }
+
+ return;
+}
+
+/******************************************************************************
+ * Updates the MAC's list of multicast addresses.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr_list - the list of new multicast addresses
+ * mc_addr_count - number of addresses
+ * pad - number of bytes between addresses in the list
+ *
+ * The given list replaces any existing list. Clears the last 15 receive
+ * address registers and the multicast table. Uses receive address registers
+ * for the first 15 multicast addresses, and hashes the rest into the
+ * multicast table.
+ *****************************************************************************/
+void
+ixgb_mc_addr_list_update(struct ixgb_hw *hw,
+ uint8_t *mc_addr_list,
+ uint32_t mc_addr_count,
+ uint32_t pad)
+{
+ uint32_t hash_value;
+ uint32_t i;
+ uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */
+
+ DEBUGFUNC("ixgb_mc_addr_list_update");
+
+ /* Set the new number of MC addresses that we are being requested to use. */
+ hw->num_mc_addrs = mc_addr_count;
+
+ /* Clear RAR[1-15] */
+ DEBUGOUT(" Clearing RAR[1-15]\n");
+ for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
+ IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ }
+
+ /* Clear the MTA */
+ DEBUGOUT(" Clearing MTA\n");
+ for(i = 0; i < IXGB_MC_TBL_SIZE; i++) {
+ IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ }
+
+ /* Add the new addresses */
+ for(i = 0; i < mc_addr_count; i++) {
+ DEBUGOUT(" Adding the multicast addresses:\n");
+ DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
+ 1],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
+ 2],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
+ 3],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
+ 4],
+ mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
+ 5]);
+
+ /* Place this multicast address in the RAR if there is room, *
+ * else put it in the MTA
+ */
+ if(rar_used_count < IXGB_RAR_ENTRIES) {
+ ixgb_rar_set(hw,
+ mc_addr_list +
+ (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
+ rar_used_count);
+ DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
+ rar_used_count++;
+ } else {
+ hash_value = ixgb_hash_mc_addr(hw,
+ mc_addr_list +
+ (i *
+ (IXGB_ETH_LENGTH_OF_ADDRESS
+ + pad)));
+
+ DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+
+ ixgb_mta_set(hw, hash_value);
+ }
+ }
+
+ DEBUGOUT("MC Update Complete\n");
+ return;
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * Returns:
+ * The hash value
+ *****************************************************************************/
+static uint32_t
+ixgb_hash_mc_addr(struct ixgb_hw *hw,
+ uint8_t *mc_addr)
+{
+ uint32_t hash_value = 0;
+
+ DEBUGFUNC("ixgb_hash_mc_addr");
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB - According to H/W docs */
+ case 0:
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value =
+ ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ break;
+ case 1: /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value =
+ ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+ break;
+ case 2: /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value =
+ ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ break;
+ case 3: /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+ break;
+ default:
+ /* Invalid mc_filter_type, what should we do? */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ return (hash_value);
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+static void
+ixgb_mta_set(struct ixgb_hw *hw,
+ uint32_t hash_value)
+{
+ uint32_t hash_bit, hash_reg;
+ uint32_t mta_reg;
+
+ /* The MTA is a register array of 128 32-bit registers.
+ * It is treated like an array of 4096 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+
+ mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+ mta_reg |= (1 << hash_bit);
+
+ IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
+
+ return;
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void
+ixgb_rar_set(struct ixgb_hw *hw,
+ uint8_t *addr,
+ uint32_t index)
+{
+ uint32_t rar_low, rar_high;
+
+ DEBUGFUNC("ixgb_rar_set");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((uint32_t) addr[0] |
+ ((uint32_t)addr[1] << 8) |
+ ((uint32_t)addr[2] << 16) |
+ ((uint32_t)addr[3] << 24));
+
+ rar_high = ((uint32_t) addr[4] |
+ ((uint32_t)addr[5] << 8) |
+ IXGB_RAH_AV);
+
+ IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ return;
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void
+ixgb_write_vfta(struct ixgb_hw *hw,
+ uint32_t offset,
+ uint32_t value)
+{
+ IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ return;
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+ixgb_clear_vfta(struct ixgb_hw *hw)
+{
+ uint32_t offset;
+
+ for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
+ IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
+ return;
+}
+
+/******************************************************************************
+ * Configures the flow control settings based on SW configuration.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+
+boolean_t
+ixgb_setup_fc(struct ixgb_hw *hw)
+{
+ uint32_t ctrl_reg;
+ uint32_t pap_reg = 0; /* by default, assume no pause time */
+ boolean_t status = TRUE;
+
+ DEBUGFUNC("ixgb_setup_fc");
+
+ /* Get the current control reg 0 settings */
+ ctrl_reg = IXGB_READ_REG(hw, CTRL0);
+
+ /* Clear the Receive Pause Enable and Transmit Pause Enable bits */
+ ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
+
+ /* The possible values of the "flow_control" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.type) {
+ case ixgb_fc_none: /* 0 */
+ /* Set CMDC bit to disable Rx Flow control */
+ ctrl_reg |= (IXGB_CTRL0_CMDC);
+ break;
+ case ixgb_fc_rx_pause: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled.
+ */
+ ctrl_reg |= (IXGB_CTRL0_RPE);
+ break;
+ case ixgb_fc_tx_pause: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ ctrl_reg |= (IXGB_CTRL0_TPE);
+ pap_reg = hw->fc.pause_time;
+ break;
+ case ixgb_fc_full: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
+ pap_reg = hw->fc.pause_time;
+ break;
+ default:
+ /* We should never get here. The value should be 0-3. */
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* Write the new settings */
+ IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
+
+ if (pap_reg != 0) {
+ IXGB_WRITE_REG(hw, PAP, pap_reg);
+ }
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if(!(hw->fc.type & ixgb_fc_tx_pause)) {
+ IXGB_WRITE_REG(hw, FCRTL, 0);
+ IXGB_WRITE_REG(hw, FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of XON
+ * frames. */
+ if(hw->fc.send_xon) {
+ IXGB_WRITE_REG(hw, FCRTL,
+ (hw->fc.low_water | IXGB_FCRTL_XONE));
+ } else {
+ IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
+ }
+ IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
+ }
+ return (status);
+}
+
+/******************************************************************************
+ * Reads a word from a device over the Management Data Interface (MDI) bus.
+ * This interface is used to manage Physical layer devices.
+ *
+ * hw - Struct containing variables accessed by hw code
+ * reg_address - Offset of device register being read.
+ * phy_address - Address of device on MDI.
+ *
+ * Returns: Data word (16 bits) from MDI device.
+ *
+ * The 82597EX has support for several MDI access methods. This routine
+ * uses the new protocol MDI Single Command and Address Operation.
+ * This requires that first an address cycle command is sent, followed by a
+ * read command.
+ *****************************************************************************/
+uint16_t
+ixgb_read_phy_reg(struct ixgb_hw *hw,
+ uint32_t reg_address,
+ uint32_t phy_address,
+ uint32_t device_type)
+{
+ uint32_t i;
+ uint32_t data;
+ uint32_t command = 0;
+
+ ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
+ ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
+ ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the address cycle completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** This may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for(i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Address cycle complete, setup and write the read command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the read command completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** The read may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for(i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Operation is complete, get the data from the MDIO Read/Write Data
+ * register and return.
+ */
+ data = IXGB_READ_REG(hw, MSRWD);
+ data >>= IXGB_MSRWD_READ_DATA_SHIFT;
+ return((uint16_t) data);
+}
+
+/******************************************************************************
+ * Writes a word to a device over the Management Data Interface (MDI) bus.
+ * This interface is used to manage Physical layer devices.
+ *
+ * hw - Struct containing variables accessed by hw code
+ * reg_address - Offset of device register being read.
+ * phy_address - Address of device on MDI.
+ * device_type - Also known as the Device ID or DID.
+ * data - 16-bit value to be written
+ *
+ * Returns: void.
+ *
+ * The 82597EX has support for several MDI access methods. This routine
+ * uses the new protocol MDI Single Command and Address Operation.
+ * This requires that first an address cycle command is sent, followed by a
+ * write command.
+ *****************************************************************************/
+void
+ixgb_write_phy_reg(struct ixgb_hw *hw,
+ uint32_t reg_address,
+ uint32_t phy_address,
+ uint32_t device_type,
+ uint16_t data)
+{
+ uint32_t i;
+ uint32_t command = 0;
+
+ ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
+ ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
+ ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
+
+ /* Put the data in the MDIO Read/Write Data register */
+ IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the address cycle completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** This may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for(i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Address cycle complete, setup and write the write command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the read command completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** The write may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for(i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Operation is complete, return. */
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by hw code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+void
+ixgb_check_for_link(struct ixgb_hw *hw)
+{
+ uint32_t status_reg;
+ uint32_t xpcss_reg;
+
+ DEBUGFUNC("ixgb_check_for_link");
+
+ xpcss_reg = IXGB_READ_REG(hw, XPCSS);
+ status_reg = IXGB_READ_REG(hw, STATUS);
+
+ if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
+ (status_reg & IXGB_STATUS_LU)) {
+ hw->link_up = TRUE;
+ } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
+ (status_reg & IXGB_STATUS_LU)) {
+ DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
+ hw->link_up = ixgb_link_reset(hw);
+ } else {
+ /*
+ * 82597EX errata. Since the lane deskew problem may prevent
+ * link, reset the link before reporting link down.
+ */
+ hw->link_up = ixgb_link_reset(hw);
+ }
+ /* Anything else for 10 Gig?? */
+}
+
+/******************************************************************************
+ * Check for a bad link condition that may have occured.
+ * The indication is that the RFC / LFC registers may be incrementing
+ * continually. A full adapter reset is required to recover.
+ *
+ * hw - Struct containing variables accessed by hw code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
+{
+ uint32_t newLFC, newRFC;
+ boolean_t bad_link_returncode = FALSE;
+
+ if (hw->phy_type == ixgb_phy_type_txn17401) {
+ newLFC = IXGB_READ_REG(hw, LFC);
+ newRFC = IXGB_READ_REG(hw, RFC);
+ if ((hw->lastLFC + 250 < newLFC)
+ || (hw->lastRFC + 250 < newRFC)) {
+ DEBUGOUT
+ ("BAD LINK! too many LFC/RFC since last check\n");
+ bad_link_returncode = TRUE;
+ }
+ hw->lastLFC = newLFC;
+ hw->lastRFC = newRFC;
+ }
+
+ return bad_link_returncode;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
+{
+ volatile uint32_t temp_reg;
+
+ DEBUGFUNC("ixgb_clear_hw_cntrs");
+
+ /* if we are stopped or resetting exit gracefully */
+ if(hw->adapter_stopped) {
+ DEBUGOUT("Exiting because the adapter is stopped!!!\n");
+ return;
+ }
+
+ temp_reg = IXGB_READ_REG(hw, TPRL);
+ temp_reg = IXGB_READ_REG(hw, TPRH);
+ temp_reg = IXGB_READ_REG(hw, GPRCL);
+ temp_reg = IXGB_READ_REG(hw, GPRCH);
+ temp_reg = IXGB_READ_REG(hw, BPRCL);
+ temp_reg = IXGB_READ_REG(hw, BPRCH);
+ temp_reg = IXGB_READ_REG(hw, MPRCL);
+ temp_reg = IXGB_READ_REG(hw, MPRCH);
+ temp_reg = IXGB_READ_REG(hw, UPRCL);
+ temp_reg = IXGB_READ_REG(hw, UPRCH);
+ temp_reg = IXGB_READ_REG(hw, VPRCL);
+ temp_reg = IXGB_READ_REG(hw, VPRCH);
+ temp_reg = IXGB_READ_REG(hw, JPRCL);
+ temp_reg = IXGB_READ_REG(hw, JPRCH);
+ temp_reg = IXGB_READ_REG(hw, GORCL);
+ temp_reg = IXGB_READ_REG(hw, GORCH);
+ temp_reg = IXGB_READ_REG(hw, TORL);
+ temp_reg = IXGB_READ_REG(hw, TORH);
+ temp_reg = IXGB_READ_REG(hw, RNBC);
+ temp_reg = IXGB_READ_REG(hw, RUC);
+ temp_reg = IXGB_READ_REG(hw, ROC);
+ temp_reg = IXGB_READ_REG(hw, RLEC);
+ temp_reg = IXGB_READ_REG(hw, CRCERRS);
+ temp_reg = IXGB_READ_REG(hw, ICBC);
+ temp_reg = IXGB_READ_REG(hw, ECBC);
+ temp_reg = IXGB_READ_REG(hw, MPC);
+ temp_reg = IXGB_READ_REG(hw, TPTL);
+ temp_reg = IXGB_READ_REG(hw, TPTH);
+ temp_reg = IXGB_READ_REG(hw, GPTCL);
+ temp_reg = IXGB_READ_REG(hw, GPTCH);
+ temp_reg = IXGB_READ_REG(hw, BPTCL);
+ temp_reg = IXGB_READ_REG(hw, BPTCH);
+ temp_reg = IXGB_READ_REG(hw, MPTCL);
+ temp_reg = IXGB_READ_REG(hw, MPTCH);
+ temp_reg = IXGB_READ_REG(hw, UPTCL);
+ temp_reg = IXGB_READ_REG(hw, UPTCH);
+ temp_reg = IXGB_READ_REG(hw, VPTCL);
+ temp_reg = IXGB_READ_REG(hw, VPTCH);
+ temp_reg = IXGB_READ_REG(hw, JPTCL);
+ temp_reg = IXGB_READ_REG(hw, JPTCH);
+ temp_reg = IXGB_READ_REG(hw, GOTCL);
+ temp_reg = IXGB_READ_REG(hw, GOTCH);
+ temp_reg = IXGB_READ_REG(hw, TOTL);
+ temp_reg = IXGB_READ_REG(hw, TOTH);
+ temp_reg = IXGB_READ_REG(hw, DC);
+ temp_reg = IXGB_READ_REG(hw, PLT64C);
+ temp_reg = IXGB_READ_REG(hw, TSCTC);
+ temp_reg = IXGB_READ_REG(hw, TSCTFC);
+ temp_reg = IXGB_READ_REG(hw, IBIC);
+ temp_reg = IXGB_READ_REG(hw, RFC);
+ temp_reg = IXGB_READ_REG(hw, LFC);
+ temp_reg = IXGB_READ_REG(hw, PFRC);
+ temp_reg = IXGB_READ_REG(hw, PFTC);
+ temp_reg = IXGB_READ_REG(hw, MCFRC);
+ temp_reg = IXGB_READ_REG(hw, MCFTC);
+ temp_reg = IXGB_READ_REG(hw, XONRXC);
+ temp_reg = IXGB_READ_REG(hw, XONTXC);
+ temp_reg = IXGB_READ_REG(hw, XOFFRXC);
+ temp_reg = IXGB_READ_REG(hw, XOFFTXC);
+ temp_reg = IXGB_READ_REG(hw, RJC);
+ return;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+ixgb_led_on(struct ixgb_hw *hw)
+{
+ uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+
+ /* To turn on the LED, clear software-definable pin 0 (SDP0). */
+ ctrl0_reg &= ~IXGB_CTRL0_SDP0;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
+ return;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+ixgb_led_off(struct ixgb_hw *hw)
+{
+ uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+
+ /* To turn off the LED, set software-definable pin 0 (SDP0). */
+ ctrl0_reg |= IXGB_CTRL0_SDP0;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
+ return;
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_get_bus_info(struct ixgb_hw *hw)
+{
+ uint32_t status_reg;
+
+ status_reg = IXGB_READ_REG(hw, STATUS);
+
+ hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
+ ixgb_bus_type_pcix : ixgb_bus_type_pci;
+
+ if (hw->bus.type == ixgb_bus_type_pci) {
+ hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
+ ixgb_bus_speed_66 : ixgb_bus_speed_33;
+ } else {
+ switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
+ case IXGB_STATUS_PCIX_SPD_66:
+ hw->bus.speed = ixgb_bus_speed_66;
+ break;
+ case IXGB_STATUS_PCIX_SPD_100:
+ hw->bus.speed = ixgb_bus_speed_100;
+ break;
+ case IXGB_STATUS_PCIX_SPD_133:
+ hw->bus.speed = ixgb_bus_speed_133;
+ break;
+ default:
+ hw->bus.speed = ixgb_bus_speed_reserved;
+ break;
+ }
+ }
+
+ hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
+ ixgb_bus_width_64 : ixgb_bus_width_32;
+
+ return;
+}
+
+/******************************************************************************
+ * Tests a MAC address to ensure it is a valid Individual Address
+ *
+ * mac_addr - pointer to MAC address.
+ *
+ *****************************************************************************/
+boolean_t
+mac_addr_valid(uint8_t *mac_addr)
+{
+ boolean_t is_valid = TRUE;
+ DEBUGFUNC("mac_addr_valid");
+
+ /* Make sure it is not a multicast address */
+ if (IS_MULTICAST(mac_addr)) {
+ DEBUGOUT("MAC address is multicast\n");
+ is_valid = FALSE;
+ }
+ /* Not a broadcast address */
+ else if (IS_BROADCAST(mac_addr)) {
+ DEBUGOUT("MAC address is broadcast\n");
+ is_valid = FALSE;
+ }
+ /* Reject the zero address */
+ else if (mac_addr[0] == 0 &&
+ mac_addr[1] == 0 &&
+ mac_addr[2] == 0 &&
+ mac_addr[3] == 0 &&
+ mac_addr[4] == 0 &&
+ mac_addr[5] == 0) {
+ DEBUGOUT("MAC address is all zeros\n");
+ is_valid = FALSE;
+ }
+ return (is_valid);
+}
+
+/******************************************************************************
+ * Resets the 10GbE link. Waits the settle time and returns the state of
+ * the link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+boolean_t
+ixgb_link_reset(struct ixgb_hw *hw)
+{
+ boolean_t link_status = FALSE;
+ uint8_t wait_retries = MAX_RESET_ITERATIONS;
+ uint8_t lrst_retries = MAX_RESET_ITERATIONS;
+
+ do {
+ /* Reset the link */
+ IXGB_WRITE_REG(hw, CTRL0,
+ IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
+
+ /* Wait for link-up and lane re-alignment */
+ do {
+ udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
+ link_status =
+ ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
+ && (IXGB_READ_REG(hw, XPCSS) &
+ IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE;
+ } while (!link_status && --wait_retries);
+
+ } while (!link_status && --lrst_retries);
+
+ return link_status;
+}
+
+/******************************************************************************
+ * Resets the 10GbE optics module.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+ixgb_optics_reset(struct ixgb_hw *hw)
+{
+ if (hw->phy_type == ixgb_phy_type_txn17401) {
+ uint16_t mdio_reg;
+
+ ixgb_write_phy_reg(hw,
+ MDIO_PMA_PMD_CR1,
+ IXGB_PHY_ADDRESS,
+ MDIO_PMA_PMD_DID,
+ MDIO_PMA_PMD_CR1_RESET);
+
+ mdio_reg = ixgb_read_phy_reg( hw,
+ MDIO_PMA_PMD_CR1,
+ IXGB_PHY_ADDRESS,
+ MDIO_PMA_PMD_DID);
+ }
+
+ return;
+}
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
new file mode 100644
index 000000000000..97898efe7cc8
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -0,0 +1,847 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_HW_H_
+#define _IXGB_HW_H_
+
+#include "ixgb_osdep.h"
+
+/* Enums */
+typedef enum {
+ ixgb_mac_unknown = 0,
+ ixgb_82597,
+ ixgb_num_macs
+} ixgb_mac_type;
+
+/* Types of physical layer modules */
+typedef enum {
+ ixgb_phy_type_unknown = 0,
+ ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
+ ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
+ ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
+ ixgb_phy_type_txn17401 /* 1310nm, SM fiber, XENPAK transceiver */
+} ixgb_phy_type;
+
+/* XPAK transceiver vendors, for the SR adapters */
+typedef enum {
+ ixgb_xpak_vendor_intel,
+ ixgb_xpak_vendor_infineon
+} ixgb_xpak_vendor;
+
+/* Media Types */
+typedef enum {
+ ixgb_media_type_unknown = 0,
+ ixgb_media_type_fiber = 1,
+ ixgb_num_media_types
+} ixgb_media_type;
+
+/* Flow Control Settings */
+typedef enum {
+ ixgb_fc_none = 0,
+ ixgb_fc_rx_pause = 1,
+ ixgb_fc_tx_pause = 2,
+ ixgb_fc_full = 3,
+ ixgb_fc_default = 0xFF
+} ixgb_fc_type;
+
+/* PCI bus types */
+typedef enum {
+ ixgb_bus_type_unknown = 0,
+ ixgb_bus_type_pci,
+ ixgb_bus_type_pcix
+} ixgb_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+ ixgb_bus_speed_unknown = 0,
+ ixgb_bus_speed_33,
+ ixgb_bus_speed_66,
+ ixgb_bus_speed_100,
+ ixgb_bus_speed_133,
+ ixgb_bus_speed_reserved
+} ixgb_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+ ixgb_bus_width_unknown = 0,
+ ixgb_bus_width_32,
+ ixgb_bus_width_64
+} ixgb_bus_width;
+
+#define IXGB_ETH_LENGTH_OF_ADDRESS 6
+
+#define IXGB_EEPROM_SIZE 64 /* Size in words */
+
+#define SPEED_10000 10000
+#define FULL_DUPLEX 2
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */
+
+#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */
+#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
+#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */
+
+#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */
+ /* NOTE: this is MICROSECONDS */
+#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */
+
+/* General Registers */
+#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
+#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
+#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
+#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
+#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
+
+/* Interrupt */
+#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
+#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
+#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
+#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
+
+/* Receive */
+#define IXGB_RCTL 0x00100 /* RX Control - RW */
+#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
+#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
+#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
+#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
+#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
+#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
+#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
+#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
+#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
+#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */
+#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
+#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
+#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
+#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
+#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
+#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
+#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Transmit */
+#define IXGB_TCTL 0x00600 /* TX Control - RW */
+#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
+#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
+#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
+#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
+#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
+#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
+#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
+#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
+#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
+#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8
+
+/* Physical */
+#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
+#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
+#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
+#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
+#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */
+#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
+#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
+#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
+#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
+#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
+#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
+#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
+#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
+
+/* Wake-up */
+#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
+#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
+#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
+#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
+#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
+
+/* Statistics */
+#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
+#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
+#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
+#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
+#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
+#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
+#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
+#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
+#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
+#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
+#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
+#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
+#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
+#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
+#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
+#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
+#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
+#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
+#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
+#define IXGB_RUC 0x02050 /* Receive Undersize Count */
+#define IXGB_ROC 0x02058 /* Receive Oversize Count */
+#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
+#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
+#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
+#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
+#define IXGB_MPC 0x02080 /* Missed Packets Count */
+#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
+#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
+#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
+#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
+#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
+#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
+#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
+#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
+#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
+#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
+#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
+#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
+#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
+#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
+#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
+#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
+#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
+#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
+#define IXGB_DC 0x02148 /* Defer Count */
+#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */
+#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
+#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
+#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
+#define IXGB_RFC 0x02188 /* Remote Fault Count */
+#define IXGB_LFC 0x02190 /* Local Fault Count */
+#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
+#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
+#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */
+#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */
+#define IXGB_XONRXC 0x021B8 /* XON Received Count */
+#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
+#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
+#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
+#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
+
+/* CTRL0 Bit Masks */
+#define IXGB_CTRL0_LRST 0x00000008
+#define IXGB_CTRL0_JFE 0x00000010
+#define IXGB_CTRL0_XLE 0x00000020
+#define IXGB_CTRL0_MDCS 0x00000040
+#define IXGB_CTRL0_CMDC 0x00000080
+#define IXGB_CTRL0_SDP0 0x00040000
+#define IXGB_CTRL0_SDP1 0x00080000
+#define IXGB_CTRL0_SDP2 0x00100000
+#define IXGB_CTRL0_SDP3 0x00200000
+#define IXGB_CTRL0_SDP0_DIR 0x00400000
+#define IXGB_CTRL0_SDP1_DIR 0x00800000
+#define IXGB_CTRL0_SDP2_DIR 0x01000000
+#define IXGB_CTRL0_SDP3_DIR 0x02000000
+#define IXGB_CTRL0_RST 0x04000000
+#define IXGB_CTRL0_RPE 0x08000000
+#define IXGB_CTRL0_TPE 0x10000000
+#define IXGB_CTRL0_VME 0x40000000
+
+/* CTRL1 Bit Masks */
+#define IXGB_CTRL1_GPI0_EN 0x00000001
+#define IXGB_CTRL1_GPI1_EN 0x00000002
+#define IXGB_CTRL1_GPI2_EN 0x00000004
+#define IXGB_CTRL1_GPI3_EN 0x00000008
+#define IXGB_CTRL1_SDP4 0x00000010
+#define IXGB_CTRL1_SDP5 0x00000020
+#define IXGB_CTRL1_SDP6 0x00000040
+#define IXGB_CTRL1_SDP7 0x00000080
+#define IXGB_CTRL1_SDP4_DIR 0x00000100
+#define IXGB_CTRL1_SDP5_DIR 0x00000200
+#define IXGB_CTRL1_SDP6_DIR 0x00000400
+#define IXGB_CTRL1_SDP7_DIR 0x00000800
+#define IXGB_CTRL1_EE_RST 0x00002000
+#define IXGB_CTRL1_RO_DIS 0x00020000
+#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000
+#define IXGB_CTRL1_PCIXHM_1_2 0x00000000
+#define IXGB_CTRL1_PCIXHM_5_8 0x00400000
+#define IXGB_CTRL1_PCIXHM_3_4 0x00800000
+#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000
+
+/* STATUS Bit Masks */
+#define IXGB_STATUS_LU 0x00000002
+#define IXGB_STATUS_AIP 0x00000004
+#define IXGB_STATUS_TXOFF 0x00000010
+#define IXGB_STATUS_XAUIME 0x00000020
+#define IXGB_STATUS_RES 0x00000040
+#define IXGB_STATUS_RIS 0x00000080
+#define IXGB_STATUS_RIE 0x00000100
+#define IXGB_STATUS_RLF 0x00000200
+#define IXGB_STATUS_RRF 0x00000400
+#define IXGB_STATUS_PCI_SPD 0x00000800
+#define IXGB_STATUS_BUS64 0x00001000
+#define IXGB_STATUS_PCIX_MODE 0x00002000
+#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000
+#define IXGB_STATUS_PCIX_SPD_66 0x00000000
+#define IXGB_STATUS_PCIX_SPD_100 0x00004000
+#define IXGB_STATUS_PCIX_SPD_133 0x00008000
+#define IXGB_STATUS_REV_ID_MASK 0x000F0000
+#define IXGB_STATUS_REV_ID_SHIFT 16
+
+/* EECD Bit Masks */
+#define IXGB_EECD_SK 0x00000001
+#define IXGB_EECD_CS 0x00000002
+#define IXGB_EECD_DI 0x00000004
+#define IXGB_EECD_DO 0x00000008
+#define IXGB_EECD_FWE_MASK 0x00000030
+#define IXGB_EECD_FWE_DIS 0x00000010
+#define IXGB_EECD_FWE_EN 0x00000020
+
+/* MFS */
+#define IXGB_MFS_SHIFT 16
+
+/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */
+#define IXGB_INT_TXDW 0x00000001
+#define IXGB_INT_TXQE 0x00000002
+#define IXGB_INT_LSC 0x00000004
+#define IXGB_INT_RXSEQ 0x00000008
+#define IXGB_INT_RXDMT0 0x00000010
+#define IXGB_INT_RXO 0x00000040
+#define IXGB_INT_RXT0 0x00000080
+#define IXGB_INT_AUTOSCAN 0x00000200
+#define IXGB_INT_GPI0 0x00000800
+#define IXGB_INT_GPI1 0x00001000
+#define IXGB_INT_GPI2 0x00002000
+#define IXGB_INT_GPI3 0x00004000
+
+/* RCTL Bit Masks */
+#define IXGB_RCTL_RXEN 0x00000002
+#define IXGB_RCTL_SBP 0x00000004
+#define IXGB_RCTL_UPE 0x00000008
+#define IXGB_RCTL_MPE 0x00000010
+#define IXGB_RCTL_RDMTS_MASK 0x00000300
+#define IXGB_RCTL_RDMTS_1_2 0x00000000
+#define IXGB_RCTL_RDMTS_1_4 0x00000100
+#define IXGB_RCTL_RDMTS_1_8 0x00000200
+#define IXGB_RCTL_MO_MASK 0x00003000
+#define IXGB_RCTL_MO_47_36 0x00000000
+#define IXGB_RCTL_MO_46_35 0x00001000
+#define IXGB_RCTL_MO_45_34 0x00002000
+#define IXGB_RCTL_MO_43_32 0x00003000
+#define IXGB_RCTL_MO_SHIFT 12
+#define IXGB_RCTL_BAM 0x00008000
+#define IXGB_RCTL_BSIZE_MASK 0x00030000
+#define IXGB_RCTL_BSIZE_2048 0x00000000
+#define IXGB_RCTL_BSIZE_4096 0x00010000
+#define IXGB_RCTL_BSIZE_8192 0x00020000
+#define IXGB_RCTL_BSIZE_16384 0x00030000
+#define IXGB_RCTL_VFE 0x00040000
+#define IXGB_RCTL_CFIEN 0x00080000
+#define IXGB_RCTL_CFI 0x00100000
+#define IXGB_RCTL_RPDA_MASK 0x00600000
+#define IXGB_RCTL_RPDA_MC_MAC 0x00000000
+#define IXGB_RCTL_MC_ONLY 0x00400000
+#define IXGB_RCTL_CFF 0x00800000
+#define IXGB_RCTL_SECRC 0x04000000
+#define IXGB_RDT_FPDB 0x80000000
+
+#define IXGB_RCTL_IDLE_RX_UNIT 0
+
+/* FCRTL Bit Masks */
+#define IXGB_FCRTL_XONE 0x80000000
+
+/* RXDCTL Bit Masks */
+#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF
+#define IXGB_RXDCTL_PTHRESH_SHIFT 0
+#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00
+#define IXGB_RXDCTL_HTHRESH_SHIFT 9
+#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000
+#define IXGB_RXDCTL_WTHRESH_SHIFT 18
+
+/* RAIDC Bit Masks */
+#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F
+#define IXGB_RAIDC_DELAY_MASK 0x000FF800
+#define IXGB_RAIDC_DELAY_SHIFT 11
+#define IXGB_RAIDC_POLL_MASK 0x1FF00000
+#define IXGB_RAIDC_POLL_SHIFT 20
+#define IXGB_RAIDC_RXT_GATE 0x40000000
+#define IXGB_RAIDC_EN 0x80000000
+
+#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220
+#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244
+#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122
+#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61
+
+/* RXCSUM Bit Masks */
+#define IXGB_RXCSUM_IPOFL 0x00000100
+#define IXGB_RXCSUM_TUOFL 0x00000200
+
+/* RAH Bit Masks */
+#define IXGB_RAH_ASEL_MASK 0x00030000
+#define IXGB_RAH_ASEL_DEST 0x00000000
+#define IXGB_RAH_ASEL_SRC 0x00010000
+#define IXGB_RAH_AV 0x80000000
+
+/* TCTL Bit Masks */
+#define IXGB_TCTL_TCE 0x00000001
+#define IXGB_TCTL_TXEN 0x00000002
+#define IXGB_TCTL_TPDE 0x00000004
+
+#define IXGB_TCTL_IDLE_TX_UNIT 0
+
+/* TXDCTL Bit Masks */
+#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F
+#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00
+#define IXGB_TXDCTL_HTHRESH_SHIFT 8
+#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000
+#define IXGB_TXDCTL_WTHRESH_SHIFT 16
+
+/* TSPMT Bit Masks */
+#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF
+#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000
+#define IXGB_TSPMT_TSPBP_SHIFT 16
+
+/* PAP Bit Masks */
+#define IXGB_PAP_TXPC_MASK 0x0000FFFF
+#define IXGB_PAP_TXPV_MASK 0x000F0000
+#define IXGB_PAP_TXPV_10G 0x00000000
+#define IXGB_PAP_TXPV_1G 0x00010000
+#define IXGB_PAP_TXPV_2G 0x00020000
+#define IXGB_PAP_TXPV_3G 0x00030000
+#define IXGB_PAP_TXPV_4G 0x00040000
+#define IXGB_PAP_TXPV_5G 0x00050000
+#define IXGB_PAP_TXPV_6G 0x00060000
+#define IXGB_PAP_TXPV_7G 0x00070000
+#define IXGB_PAP_TXPV_8G 0x00080000
+#define IXGB_PAP_TXPV_9G 0x00090000
+#define IXGB_PAP_TXPV_WAN 0x000F0000
+
+/* PCSC1 Bit Masks */
+#define IXGB_PCSC1_LOOPBACK 0x00004000
+
+/* PCSC2 Bit Masks */
+#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003
+#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001
+
+/* PCSS1 Bit Masks */
+#define IXGB_PCSS1_LOCAL_FAULT 0x00000080
+#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004
+
+/* PCSS2 Bit Masks */
+#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000
+#define IXGB_PCSS2_DEV_PRES 0x00004000
+#define IXGB_PCSS2_TX_LF 0x00000800
+#define IXGB_PCSS2_RX_LF 0x00000400
+#define IXGB_PCSS2_10GBW 0x00000004
+#define IXGB_PCSS2_10GBX 0x00000002
+#define IXGB_PCSS2_10GBR 0x00000001
+
+/* XPCSS Bit Masks */
+#define IXGB_XPCSS_ALIGN_STATUS 0x00001000
+#define IXGB_XPCSS_PATTERN_TEST 0x00000800
+#define IXGB_XPCSS_LANE_3_SYNC 0x00000008
+#define IXGB_XPCSS_LANE_2_SYNC 0x00000004
+#define IXGB_XPCSS_LANE_1_SYNC 0x00000002
+#define IXGB_XPCSS_LANE_0_SYNC 0x00000001
+
+/* XPCSTC Bit Masks */
+#define IXGB_XPCSTC_BERT_TRIG 0x00200000
+#define IXGB_XPCSTC_BERT_SST 0x00100000
+#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000
+#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17
+#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003
+#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001
+#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000
+
+/* MSCA bit Masks */
+/* New Protocol Address */
+#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF
+#define IXGB_MSCA_NP_ADDR_SHIFT 0
+/* Either Device Type or Register Address,depending on ST_CODE */
+#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000
+#define IXGB_MSCA_DEV_TYPE_SHIFT 16
+#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000
+#define IXGB_MSCA_PHY_ADDR_SHIFT 21
+#define IXGB_MSCA_OP_CODE_MASK 0x0C000000
+/* OP_CODE == 00, Address cycle, New Protocol */
+/* OP_CODE == 01, Write operation */
+/* OP_CODE == 10, Read operation */
+/* OP_CODE == 11, Read, auto increment, New Protocol */
+#define IXGB_MSCA_ADDR_CYCLE 0x00000000
+#define IXGB_MSCA_WRITE 0x04000000
+#define IXGB_MSCA_READ 0x08000000
+#define IXGB_MSCA_READ_AUTOINC 0x0C000000
+#define IXGB_MSCA_OP_CODE_SHIFT 26
+#define IXGB_MSCA_ST_CODE_MASK 0x30000000
+/* ST_CODE == 00, New Protocol */
+/* ST_CODE == 01, Old Protocol */
+#define IXGB_MSCA_NEW_PROTOCOL 0x00000000
+#define IXGB_MSCA_OLD_PROTOCOL 0x10000000
+#define IXGB_MSCA_ST_CODE_SHIFT 28
+/* Initiate command, self-clearing when command completes */
+#define IXGB_MSCA_MDI_COMMAND 0x40000000
+/*MDI In Progress Enable. */
+#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000
+
+/* MSRWD bit masks */
+#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGB_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGB_MSRWD_READ_DATA_SHIFT 16
+
+/* Definitions for the optics devices on the MDIO bus. */
+#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
+
+/* Standard five-bit Device IDs. See IEEE 802.3ae, clause 45 */
+#define MDIO_PMA_PMD_DID 0x01
+#define MDIO_WIS_DID 0x02
+#define MDIO_PCS_DID 0x03
+#define MDIO_XGXS_DID 0x04
+
+/* Standard PMA/PMD registers and bit definitions. */
+/* Note: This is a very limited set of definitions, */
+/* only implemented features are defined. */
+#define MDIO_PMA_PMD_CR1 0x0000
+#define MDIO_PMA_PMD_CR1_RESET 0x8000
+
+#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
+
+/* Vendor-specific MDIO registers */
+#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */
+#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */
+
+#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80
+#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00
+#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */
+
+/* Layout of a single receive descriptor. The controller assumes that this
+ * structure is packed into 16 bytes, which is a safe assumption with most
+ * compilers. However, some compilers may insert padding between the fields,
+ * in which case the structure must be packed in some compiler-specific
+ * manner. */
+struct ixgb_rx_desc {
+ uint64_t buff_addr;
+ uint16_t length;
+ uint16_t reserved;
+ uint8_t status;
+ uint8_t errors;
+ uint16_t special;
+};
+
+#define IXGB_RX_DESC_STATUS_DD 0x01
+#define IXGB_RX_DESC_STATUS_EOP 0x02
+#define IXGB_RX_DESC_STATUS_IXSM 0x04
+#define IXGB_RX_DESC_STATUS_VP 0x08
+#define IXGB_RX_DESC_STATUS_TCPCS 0x20
+#define IXGB_RX_DESC_STATUS_IPCS 0x40
+#define IXGB_RX_DESC_STATUS_PIF 0x80
+
+#define IXGB_RX_DESC_ERRORS_CE 0x01
+#define IXGB_RX_DESC_ERRORS_SE 0x02
+#define IXGB_RX_DESC_ERRORS_P 0x08
+#define IXGB_RX_DESC_ERRORS_TCPE 0x20
+#define IXGB_RX_DESC_ERRORS_IPE 0x40
+#define IXGB_RX_DESC_ERRORS_RXE 0x80
+
+#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
+
+/* Layout of a single transmit descriptor. The controller assumes that this
+ * structure is packed into 16 bytes, which is a safe assumption with most
+ * compilers. However, some compilers may insert padding between the fields,
+ * in which case the structure must be packed in some compiler-specific
+ * manner. */
+struct ixgb_tx_desc {
+ uint64_t buff_addr;
+ uint32_t cmd_type_len;
+ uint8_t status;
+ uint8_t popts;
+ uint16_t vlan;
+};
+
+#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF
+#define IXGB_TX_DESC_TYPE_MASK 0x00F00000
+#define IXGB_TX_DESC_TYPE_SHIFT 20
+#define IXGB_TX_DESC_CMD_MASK 0xFF000000
+#define IXGB_TX_DESC_CMD_SHIFT 24
+#define IXGB_TX_DESC_CMD_EOP 0x01000000
+#define IXGB_TX_DESC_CMD_TSE 0x04000000
+#define IXGB_TX_DESC_CMD_RS 0x08000000
+#define IXGB_TX_DESC_CMD_VLE 0x40000000
+#define IXGB_TX_DESC_CMD_IDE 0x80000000
+
+#define IXGB_TX_DESC_TYPE 0x00100000
+
+#define IXGB_TX_DESC_STATUS_DD 0x01
+
+#define IXGB_TX_DESC_POPTS_IXSM 0x01
+#define IXGB_TX_DESC_POPTS_TXSM 0x02
+#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
+
+struct ixgb_context_desc {
+ uint8_t ipcss;
+ uint8_t ipcso;
+ uint16_t ipcse;
+ uint8_t tucss;
+ uint8_t tucso;
+ uint16_t tucse;
+ uint32_t cmd_type_len;
+ uint8_t status;
+ uint8_t hdr_len;
+ uint16_t mss;
+};
+
+#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000
+#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000
+#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000
+#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000
+#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000
+
+#define IXGB_CONTEXT_DESC_TYPE 0x00000000
+
+#define IXGB_CONTEXT_DESC_STATUS_DD 0x01
+
+/* Filters */
+#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
+
+#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
+#define ENET_HEADER_SIZE 14
+#define ENET_FCS_LENGTH 4
+#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
+#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
+#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
+#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* Phy Addresses */
+#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
+#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
+#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
+
+/* This structure takes a 64k flash and maps it for identification commands */
+struct ixgb_flash_buffer {
+ uint8_t manufacturer_id;
+ uint8_t device_id;
+ uint8_t filler1[0x2AA8];
+ uint8_t cmd2;
+ uint8_t filler2[0x2AAA];
+ uint8_t cmd1;
+ uint8_t filler3[0xAAAA];
+};
+
+/*
+ * This is a little-endian specific check.
+ */
+#define IS_MULTICAST(Address) \
+ (boolean_t)(((uint8_t *)(Address))[0] & ((uint8_t)0x01))
+
+/*
+ * Check whether an address is broadcast.
+ */
+#define IS_BROADCAST(Address) \
+ ((((uint8_t *)(Address))[0] == ((uint8_t)0xff)) && (((uint8_t *)(Address))[1] == ((uint8_t)0xff)))
+
+/* Flow control parameters */
+struct ixgb_fc {
+ uint32_t high_water; /* Flow Control High-water */
+ uint32_t low_water; /* Flow Control Low-water */
+ uint16_t pause_time; /* Flow Control Pause timer */
+ boolean_t send_xon; /* Flow control send XON */
+ ixgb_fc_type type; /* Type of flow control */
+};
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+
+/* Phy definitions */
+#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF
+#define IXGB_MAX_PHY_ADDRESS 31
+#define IXGB_MAX_PHY_DEV_TYPE 31
+
+/* Bus parameters */
+struct ixgb_bus {
+ ixgb_bus_speed speed;
+ ixgb_bus_width width;
+ ixgb_bus_type type;
+};
+
+struct ixgb_hw {
+ uint8_t __iomem *hw_addr;/* Base Address of the hardware */
+ void *back; /* Pointer to OS-dependent struct */
+ struct ixgb_fc fc; /* Flow control parameters */
+ struct ixgb_bus bus; /* Bus parameters */
+ uint32_t phy_id; /* Phy Identifier */
+ uint32_t phy_addr; /* XGMII address of Phy */
+ ixgb_mac_type mac_type; /* Identifier for MAC controller */
+ ixgb_phy_type phy_type; /* Transceiver/phy identifier */
+ uint32_t max_frame_size; /* Maximum frame size supported */
+ uint32_t mc_filter_type; /* Multicast filter hash type */
+ uint32_t num_mc_addrs; /* Number of current Multicast addrs */
+ uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
+ uint32_t num_tx_desc; /* Number of Transmit descriptors */
+ uint32_t num_rx_desc; /* Number of Receive descriptors */
+ uint32_t rx_buffer_size; /* Size of Receive buffer */
+ boolean_t link_up; /* TRUE if link is valid */
+ boolean_t adapter_stopped; /* State of adapter */
+ uint16_t device_id; /* device id from PCI configuration space */
+ uint16_t vendor_id; /* vendor id from PCI configuration space */
+ uint8_t revision_id; /* revision id from PCI configuration space */
+ uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
+ uint16_t subsystem_id; /* subsystem id from PCI configuration space */
+ uint32_t bar0; /* Base Address registers */
+ uint32_t bar1;
+ uint32_t bar2;
+ uint32_t bar3;
+ uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */
+ uint16_t eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
+ unsigned long io_base; /* Our I/O mapped location */
+ uint32_t lastLFC;
+ uint32_t lastRFC;
+};
+
+/* Statistics reported by the hardware */
+struct ixgb_hw_stats {
+ uint64_t tprl;
+ uint64_t tprh;
+ uint64_t gprcl;
+ uint64_t gprch;
+ uint64_t bprcl;
+ uint64_t bprch;
+ uint64_t mprcl;
+ uint64_t mprch;
+ uint64_t uprcl;
+ uint64_t uprch;
+ uint64_t vprcl;
+ uint64_t vprch;
+ uint64_t jprcl;
+ uint64_t jprch;
+ uint64_t gorcl;
+ uint64_t gorch;
+ uint64_t torl;
+ uint64_t torh;
+ uint64_t rnbc;
+ uint64_t ruc;
+ uint64_t roc;
+ uint64_t rlec;
+ uint64_t crcerrs;
+ uint64_t icbc;
+ uint64_t ecbc;
+ uint64_t mpc;
+ uint64_t tptl;
+ uint64_t tpth;
+ uint64_t gptcl;
+ uint64_t gptch;
+ uint64_t bptcl;
+ uint64_t bptch;
+ uint64_t mptcl;
+ uint64_t mptch;
+ uint64_t uptcl;
+ uint64_t uptch;
+ uint64_t vptcl;
+ uint64_t vptch;
+ uint64_t jptcl;
+ uint64_t jptch;
+ uint64_t gotcl;
+ uint64_t gotch;
+ uint64_t totl;
+ uint64_t toth;
+ uint64_t dc;
+ uint64_t plt64c;
+ uint64_t tsctc;
+ uint64_t tsctfc;
+ uint64_t ibic;
+ uint64_t rfc;
+ uint64_t lfc;
+ uint64_t pfrc;
+ uint64_t pftc;
+ uint64_t mcfrc;
+ uint64_t mcftc;
+ uint64_t xonrxc;
+ uint64_t xontxc;
+ uint64_t xoffrxc;
+ uint64_t xofftxc;
+ uint64_t rjc;
+};
+
+/* Function Prototypes */
+extern boolean_t ixgb_adapter_stop(struct ixgb_hw *hw);
+extern boolean_t ixgb_init_hw(struct ixgb_hw *hw);
+extern boolean_t ixgb_adapter_start(struct ixgb_hw *hw);
+extern void ixgb_init_rx_addrs(struct ixgb_hw *hw);
+extern void ixgb_check_for_link(struct ixgb_hw *hw);
+extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw);
+extern boolean_t ixgb_setup_fc(struct ixgb_hw *hw);
+extern void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
+extern boolean_t mac_addr_valid(uint8_t *mac_addr);
+
+extern uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw,
+ uint32_t reg_addr,
+ uint32_t phy_addr,
+ uint32_t device_type);
+
+extern void ixgb_write_phy_reg(struct ixgb_hw *hw,
+ uint32_t reg_addr,
+ uint32_t phy_addr,
+ uint32_t device_type,
+ uint16_t data);
+
+extern void ixgb_rar_set(struct ixgb_hw *hw,
+ uint8_t *addr,
+ uint32_t index);
+
+
+/* Filters (multicast, vlan, receive) */
+extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
+ uint8_t *mc_addr_list,
+ uint32_t mc_addr_count,
+ uint32_t pad);
+
+/* Vfta functions */
+extern void ixgb_write_vfta(struct ixgb_hw *hw,
+ uint32_t offset,
+ uint32_t value);
+
+extern void ixgb_clear_vfta(struct ixgb_hw *hw);
+
+/* Access functions to eeprom data */
+void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
+uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
+uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
+uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
+uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw);
+uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw *hw);
+uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw *hw);
+uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
+uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw *hw);
+uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw);
+uint8_t ixgb_get_ee_d3_power(struct ixgb_hw *hw);
+uint8_t ixgb_get_ee_d0_power(struct ixgb_hw *hw);
+boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
+uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
+
+/* Everything else */
+void ixgb_led_on(struct ixgb_hw *hw);
+void ixgb_led_off(struct ixgb_hw *hw);
+void ixgb_write_pci_cfg(struct ixgb_hw *hw,
+ uint32_t reg,
+ uint16_t * value);
+
+
+#endif /* _IXGB_HW_H_ */
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
new file mode 100644
index 000000000000..aee207eaa287
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -0,0 +1,48 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_IDS_H_
+#define _IXGB_IDS_H_
+
+/**********************************************************************
+** The Device and Vendor IDs for 10 Gigabit MACs
+**********************************************************************/
+
+#define INTEL_VENDOR_ID 0x8086
+#define INTEL_SUBVENDOR_ID 0x8086
+
+
+#define IXGB_DEVICE_ID_82597EX 0x1048
+#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
+#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
+#define IXGB_SUBDEVICE_ID_A11F 0xA11F
+#define IXGB_SUBDEVICE_ID_A01F 0xA01F
+
+#endif /* #ifndef _IXGB_IDS_H_ */
+
+/* End of File */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
new file mode 100644
index 000000000000..7d26623d8592
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -0,0 +1,2166 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgb.h"
+
+/* Change Log
+ * 1.0.88 01/05/05
+ * - include fix to the condition that determines when to quit NAPI - Robert Olsson
+ * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
+ * 1.0.84 10/26/04
+ * - reset buffer_info->dma in Tx resource cleanup logic
+ * 1.0.83 10/12/04
+ * - sparse cleanup - shemminger@osdl.org
+ * - fix tx resource cleanup logic
+ */
+
+char ixgb_driver_name[] = "ixgb";
+char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
+
+#ifndef CONFIG_IXGB_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI;
+char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+
+/* ixgb_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgb_pci_tbl[] = {
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
+
+/* Local Function Prototypes */
+
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
+
+static int ixgb_init_module(void);
+static void ixgb_exit_module(void);
+static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit ixgb_remove(struct pci_dev *pdev);
+static int ixgb_sw_init(struct ixgb_adapter *adapter);
+static int ixgb_open(struct net_device *netdev);
+static int ixgb_close(struct net_device *netdev);
+static void ixgb_configure_tx(struct ixgb_adapter *adapter);
+static void ixgb_configure_rx(struct ixgb_adapter *adapter);
+static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
+static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
+static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
+static void ixgb_set_multi(struct net_device *netdev);
+static void ixgb_watchdog(unsigned long data);
+static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
+static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
+static int ixgb_set_mac(struct net_device *netdev, void *p);
+static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
+static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
+#ifdef CONFIG_IXGB_NAPI
+static int ixgb_clean(struct net_device *netdev, int *budget);
+static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
+ int *work_done, int work_to_do);
+#else
+static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
+#endif
+static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
+static void ixgb_tx_timeout(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp);
+static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
+static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
+static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
+
+static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
+ void *ptr);
+static int ixgb_suspend(struct pci_dev *pdev, uint32_t state);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void ixgb_netpoll(struct net_device *dev);
+#endif
+
+struct notifier_block ixgb_notifier_reboot = {
+ .notifier_call = ixgb_notify_reboot,
+ .next = NULL,
+ .priority = 0
+};
+
+/* Exported from other modules */
+
+extern void ixgb_check_options(struct ixgb_adapter *adapter);
+
+static struct pci_driver ixgb_driver = {
+ .name = ixgb_driver_name,
+ .id_table = ixgb_pci_tbl,
+ .probe = ixgb_probe,
+ .remove = __devexit_p(ixgb_remove),
+ /* Power Managment Hooks */
+ .suspend = NULL,
+ .resume = NULL
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
+MODULE_LICENSE("GPL");
+
+/* some defines for controlling descriptor fetches in h/w */
+#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
+#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
+ pushed this many descriptors from head */
+#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
+
+/**
+ * ixgb_init_module - Driver Registration Routine
+ *
+ * ixgb_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init
+ixgb_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "%s - version %s\n",
+ ixgb_driver_string, ixgb_driver_version);
+
+ printk(KERN_INFO "%s\n", ixgb_copyright);
+
+ ret = pci_module_init(&ixgb_driver);
+ if(ret >= 0) {
+ register_reboot_notifier(&ixgb_notifier_reboot);
+ }
+ return ret;
+}
+
+module_init(ixgb_init_module);
+
+/**
+ * ixgb_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgb_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit
+ixgb_exit_module(void)
+{
+ unregister_reboot_notifier(&ixgb_notifier_reboot);
+ pci_unregister_driver(&ixgb_driver);
+}
+
+module_exit(ixgb_exit_module);
+
+/**
+ * ixgb_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static inline void
+ixgb_irq_disable(struct ixgb_adapter *adapter)
+{
+ atomic_inc(&adapter->irq_sem);
+ IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
+ IXGB_WRITE_FLUSH(&adapter->hw);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * ixgb_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static inline void
+ixgb_irq_enable(struct ixgb_adapter *adapter)
+{
+ if(atomic_dec_and_test(&adapter->irq_sem)) {
+ IXGB_WRITE_REG(&adapter->hw, IMS,
+ IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
+ IXGB_INT_RXO | IXGB_INT_LSC);
+ IXGB_WRITE_FLUSH(&adapter->hw);
+ }
+}
+
+int
+ixgb_up(struct ixgb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+ struct ixgb_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+
+ ixgb_set_multi(netdev);
+
+ ixgb_restore_vlan(adapter);
+
+ ixgb_configure_tx(adapter);
+ ixgb_setup_rctl(adapter);
+ ixgb_configure_rx(adapter);
+ ixgb_alloc_rx_buffers(adapter);
+
+#ifdef CONFIG_PCI_MSI
+ {
+ boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
+ IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
+ adapter->have_msi = TRUE;
+
+ if (!pcix)
+ adapter->have_msi = FALSE;
+ else if((err = pci_enable_msi(adapter->pdev))) {
+ printk (KERN_ERR
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = FALSE;
+ /* proceed to try to request regular interrupt */
+ }
+ }
+
+#endif
+ if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
+ SA_SHIRQ | SA_SAMPLE_RANDOM,
+ netdev->name, netdev)))
+ return err;
+
+ /* disable interrupts and get the hardware into a known state */
+ IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+
+ if((hw->max_frame_size != max_frame) ||
+ (hw->max_frame_size !=
+ (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
+
+ hw->max_frame_size = max_frame;
+
+ IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
+
+ if(hw->max_frame_size >
+ IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
+ uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
+
+ if(!(ctrl0 & IXGB_CTRL0_JFE)) {
+ ctrl0 |= IXGB_CTRL0_JFE;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0);
+ }
+ }
+ }
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ ixgb_irq_enable(adapter);
+
+#ifdef CONFIG_IXGB_NAPI
+ netif_poll_enable(netdev);
+#endif
+ return 0;
+}
+
+void
+ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ ixgb_irq_disable(adapter);
+ free_irq(adapter->pdev->irq, netdev);
+#ifdef CONFIG_PCI_MSI
+ if(adapter->have_msi == TRUE)
+ pci_disable_msi(adapter->pdev);
+
+#endif
+ if(kill_watchdog)
+ del_timer_sync(&adapter->watchdog_timer);
+#ifdef CONFIG_IXGB_NAPI
+ netif_poll_disable(netdev);
+#endif
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ ixgb_reset(adapter);
+ ixgb_clean_tx_ring(adapter);
+ ixgb_clean_rx_ring(adapter);
+}
+
+void
+ixgb_reset(struct ixgb_adapter *adapter)
+{
+
+ ixgb_adapter_stop(&adapter->hw);
+ if(!ixgb_init_hw(&adapter->hw))
+ IXGB_DBG("ixgb_init_hw failed.\n");
+}
+
+/**
+ * ixgb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgb_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+
+static int __devinit
+ixgb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct ixgb_adapter *adapter;
+ static int cards_found = 0;
+ unsigned long mmio_start;
+ int mmio_len;
+ int pci_using_dac;
+ int i;
+ int err;
+
+ if((err = pci_enable_device(pdev)))
+ return err;
+
+ if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ pci_using_dac = 1;
+ } else {
+ if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ IXGB_ERR("No usable DMA configuration, aborting\n");
+ return err;
+ }
+ pci_using_dac = 0;
+ }
+
+ if((err = pci_request_regions(pdev, ixgb_driver_name)))
+ return err;
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
+ if(!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev->priv;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+
+ mmio_start = pci_resource_start(pdev, BAR_0);
+ mmio_len = pci_resource_len(pdev, BAR_0);
+
+ adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+ if(!adapter->hw.hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ for(i = BAR_1; i <= BAR_5; i++) {
+ if(pci_resource_len(pdev, i) == 0)
+ continue;
+ if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ adapter->hw.io_base = pci_resource_start(pdev, i);
+ break;
+ }
+ }
+
+ netdev->open = &ixgb_open;
+ netdev->stop = &ixgb_close;
+ netdev->hard_start_xmit = &ixgb_xmit_frame;
+ netdev->get_stats = &ixgb_get_stats;
+ netdev->set_multicast_list = &ixgb_set_multi;
+ netdev->set_mac_address = &ixgb_set_mac;
+ netdev->change_mtu = &ixgb_change_mtu;
+ ixgb_set_ethtool_ops(netdev);
+ netdev->tx_timeout = &ixgb_tx_timeout;
+ netdev->watchdog_timeo = HZ;
+#ifdef CONFIG_IXGB_NAPI
+ netdev->poll = &ixgb_clean;
+ netdev->weight = 64;
+#endif
+ netdev->vlan_rx_register = ixgb_vlan_rx_register;
+ netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = ixgb_netpoll;
+#endif
+
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len;
+ netdev->base_addr = adapter->hw.io_base;
+
+ adapter->bd_number = cards_found;
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ /* setup the private structure */
+
+ if((err = ixgb_sw_init(adapter)))
+ goto err_sw_init;
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+#ifdef NETIF_F_TSO
+ netdev->features |= NETIF_F_TSO;
+#endif
+
+ if(pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ /* make sure the EEPROM is good */
+
+ if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+ printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+
+ if(!is_valid_ether_addr(netdev->dev_addr)) {
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &ixgb_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->tx_timeout_task,
+ (void (*)(void *))ixgb_tx_timeout_task, netdev);
+
+ if((err = register_netdev(netdev)))
+ goto err_register;
+
+ /* we're going to reset, so assume we have no link for now */
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
+ netdev->name);
+ ixgb_check_options(adapter);
+ /* reset the hardware with the new settings */
+
+ ixgb_reset(adapter);
+
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+err_eeprom:
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+ return err;
+}
+
+/**
+ * ixgb_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgb_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit
+ixgb_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ unregister_netdev(netdev);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ free_netdev(netdev);
+}
+
+/**
+ * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int __devinit
+ixgb_sw_init(struct ixgb_adapter *adapter)
+{
+ struct ixgb_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+
+ adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
+
+ hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+
+ if((hw->device_id == IXGB_DEVICE_ID_82597EX)
+ ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
+ ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+ hw->mac_type = ixgb_82597;
+ else {
+ /* should never have loaded on this device */
+ printk(KERN_ERR "ixgb: unsupported device id\n");
+ }
+
+ /* enable flow control to be programmed */
+ hw->fc.send_xon = 1;
+
+ atomic_set(&adapter->irq_sem, 1);
+ spin_lock_init(&adapter->tx_lock);
+
+ return 0;
+}
+
+/**
+ * ixgb_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int
+ixgb_open(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ int err;
+
+ /* allocate transmit descriptors */
+
+ if((err = ixgb_setup_tx_resources(adapter)))
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+
+ if((err = ixgb_setup_rx_resources(adapter)))
+ goto err_setup_rx;
+
+ if((err = ixgb_up(adapter)))
+ goto err_up;
+
+ return 0;
+
+err_up:
+ ixgb_free_rx_resources(adapter);
+err_setup_rx:
+ ixgb_free_tx_resources(adapter);
+err_setup_tx:
+ ixgb_reset(adapter);
+
+ return err;
+}
+
+/**
+ * ixgb_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int
+ixgb_close(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ ixgb_down(adapter, TRUE);
+
+ ixgb_free_tx_resources(adapter);
+ ixgb_free_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgb_buffer) * txdr->count;
+ txdr->buffer_info = vmalloc(size);
+ if(!txdr->buffer_info) {
+ return -ENOMEM;
+ }
+ memset(txdr->buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+
+ txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
+ IXGB_ROUNDUP(txdr->size, 4096);
+
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ if(!txdr->desc) {
+ vfree(txdr->buffer_info);
+ return -ENOMEM;
+ }
+ memset(txdr->desc, 0, txdr->size);
+
+ txdr->next_to_use = 0;
+ txdr->next_to_clean = 0;
+
+ return 0;
+}
+
+/**
+ * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void
+ixgb_configure_tx(struct ixgb_adapter *adapter)
+{
+ uint64_t tdba = adapter->tx_ring.dma;
+ uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
+ uint32_t tctl;
+ struct ixgb_hw *hw = &adapter->hw;
+
+ /* Setup the Base and Length of the Tx Descriptor Ring
+ * tx_ring.dma can be either a 32 or 64 bit value
+ */
+
+ IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
+ IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
+
+ IXGB_WRITE_REG(hw, TDLEN, tdlen);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+
+ IXGB_WRITE_REG(hw, TDH, 0);
+ IXGB_WRITE_REG(hw, TDT, 0);
+
+ /* don't set up txdctl, it induces performance problems if configured
+ * incorrectly */
+ /* Set the Tx Interrupt Delay register */
+
+ IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
+
+ /* Program the Transmit Control Register */
+
+ tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
+ IXGB_WRITE_REG(hw, TCTL, tctl);
+
+ /* Setup Transmit Descriptor Settings for this adapter */
+ adapter->tx_cmd_type =
+ IXGB_TX_DESC_TYPE
+ | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
+}
+
+/**
+ * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+int
+ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgb_buffer) * rxdr->count;
+ rxdr->buffer_info = vmalloc(size);
+ if(!rxdr->buffer_info) {
+ return -ENOMEM;
+ }
+ memset(rxdr->buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+
+ rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
+ IXGB_ROUNDUP(rxdr->size, 4096);
+
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+ if(!rxdr->desc) {
+ vfree(rxdr->buffer_info);
+ return -ENOMEM;
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+
+ rxdr->next_to_clean = 0;
+ rxdr->next_to_use = 0;
+
+ return 0;
+}
+
+/**
+ * ixgb_setup_rctl - configure the receive control register
+ * @adapter: Board private structure
+ **/
+
+static void
+ixgb_setup_rctl(struct ixgb_adapter *adapter)
+{
+ uint32_t rctl;
+
+ rctl = IXGB_READ_REG(&adapter->hw, RCTL);
+
+ rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
+
+ rctl |=
+ IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
+ IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
+ (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
+
+ rctl |= IXGB_RCTL_SECRC;
+
+ switch (adapter->rx_buffer_len) {
+ case IXGB_RXBUFFER_2048:
+ default:
+ rctl |= IXGB_RCTL_BSIZE_2048;
+ break;
+ case IXGB_RXBUFFER_4096:
+ rctl |= IXGB_RCTL_BSIZE_4096;
+ break;
+ case IXGB_RXBUFFER_8192:
+ rctl |= IXGB_RCTL_BSIZE_8192;
+ break;
+ case IXGB_RXBUFFER_16384:
+ rctl |= IXGB_RCTL_BSIZE_16384;
+ break;
+ }
+
+ IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
+}
+
+/**
+ * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void
+ixgb_configure_rx(struct ixgb_adapter *adapter)
+{
+ uint64_t rdba = adapter->rx_ring.dma;
+ uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
+ struct ixgb_hw *hw = &adapter->hw;
+ uint32_t rctl;
+ uint32_t rxcsum;
+ uint32_t rxdctl;
+
+ /* make sure receives are disabled while setting up the descriptors */
+
+ rctl = IXGB_READ_REG(hw, RCTL);
+ IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
+
+ /* set the Receive Delay Timer Register */
+
+ IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+
+ IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
+ IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
+
+ IXGB_WRITE_REG(hw, RDLEN, rdlen);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers */
+ IXGB_WRITE_REG(hw, RDH, 0);
+ IXGB_WRITE_REG(hw, RDT, 0);
+
+ /* set up pre-fetching of receive buffers so we get some before we
+ * run out (default hardware behavior is to run out before fetching
+ * more). This sets up to fetch if HTHRESH rx descriptors are avail
+ * and the descriptors in hw cache are below PTHRESH. This avoids
+ * the hardware behavior of fetching <=512 descriptors in a single
+ * burst that pre-empts all other activity, usually causing fifo
+ * overflows. */
+ /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
+ rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
+ RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
+ RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
+ IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
+
+ /* Enable Receive Checksum Offload for TCP and UDP */
+ if(adapter->rx_csum == TRUE) {
+ rxcsum = IXGB_READ_REG(hw, RXCSUM);
+ rxcsum |= IXGB_RXCSUM_TUOFL;
+ IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
+ }
+
+ /* Enable Receives */
+
+ IXGB_WRITE_REG(hw, RCTL, rctl);
+}
+
+/**
+ * ixgb_free_tx_resources - Free Tx Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void
+ixgb_free_tx_resources(struct ixgb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgb_clean_tx_ring(adapter);
+
+ vfree(adapter->tx_ring.buffer_info);
+ adapter->tx_ring.buffer_info = NULL;
+
+ pci_free_consistent(pdev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
+
+ adapter->tx_ring.desc = NULL;
+}
+
+static inline void
+ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
+ struct ixgb_buffer *buffer_info)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ if(buffer_info->dma) {
+ pci_unmap_page(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+ if(buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * ixgb_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ **/
+
+static void
+ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct ixgb_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for(i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ size = sizeof(struct ixgb_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ IXGB_WRITE_REG(&adapter->hw, TDH, 0);
+ IXGB_WRITE_REG(&adapter->hw, TDT, 0);
+}
+
+/**
+ * ixgb_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void
+ixgb_free_rx_resources(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgb_clean_rx_ring(adapter);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgb_clean_rx_ring - Free Rx Buffers
+ * @adapter: board private structure
+ **/
+
+static void
+ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct ixgb_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+
+ for(i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if(buffer_info->skb) {
+
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(buffer_info->skb);
+
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct ixgb_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ IXGB_WRITE_REG(&adapter->hw, RDH, 0);
+ IXGB_WRITE_REG(&adapter->hw, RDT, 0);
+}
+
+/**
+ * ixgb_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+ixgb_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct sockaddr *addr = p;
+
+ if(!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
+
+ return 0;
+}
+
+/**
+ * ixgb_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void
+ixgb_set_multi(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_hw *hw = &adapter->hw;
+ struct dev_mc_list *mc_ptr;
+ uint32_t rctl;
+ int i;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = IXGB_READ_REG(hw, RCTL);
+
+ if(netdev->flags & IFF_PROMISC) {
+ rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
+ } else if(netdev->flags & IFF_ALLMULTI) {
+ rctl |= IXGB_RCTL_MPE;
+ rctl &= ~IXGB_RCTL_UPE;
+ } else {
+ rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
+ }
+
+ if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ rctl |= IXGB_RCTL_MPE;
+ IXGB_WRITE_REG(hw, RCTL, rctl);
+ } else {
+ uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
+
+ IXGB_WRITE_REG(hw, RCTL, rctl);
+
+ for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
+ i++, mc_ptr = mc_ptr->next)
+ memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
+ mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
+
+ ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
+ }
+}
+
+/**
+ * ixgb_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ **/
+
+static void
+ixgb_watchdog(unsigned long data)
+{
+ struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
+ struct net_device *netdev = adapter->netdev;
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+
+ ixgb_check_for_link(&adapter->hw);
+
+ if (ixgb_check_for_bad_link(&adapter->hw)) {
+ /* force the reset path */
+ netif_stop_queue(netdev);
+ }
+
+ if(adapter->hw.link_up) {
+ if(!netif_carrier_ok(netdev)) {
+ printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
+ netdev->name, 10000, "Full Duplex");
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ } else {
+ if(netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ printk(KERN_INFO
+ "ixgb: %s NIC Link is Down\n",
+ netdev->name);
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ }
+ }
+
+ ixgb_update_stats(adapter);
+
+ if(!netif_carrier_ok(netdev)) {
+ if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ schedule_work(&adapter->tx_timeout_task);
+ }
+ }
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = TRUE;
+
+ /* generate an interrupt to force clean up of any stragglers */
+ IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
+
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
+#define IXGB_TX_FLAGS_CSUM 0x00000001
+#define IXGB_TX_FLAGS_VLAN 0x00000002
+#define IXGB_TX_FLAGS_TSO 0x00000004
+
+static inline int
+ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
+{
+#ifdef NETIF_F_TSO
+ struct ixgb_context_desc *context_desc;
+ unsigned int i;
+ uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
+ uint16_t ipcse, tucse, mss;
+ int err;
+
+ if(likely(skb_shinfo(skb)->tso_size)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+ mss = skb_shinfo(skb)->tso_size;
+ skb->nh.iph->tot_len = 0;
+ skb->nh.iph->check = 0;
+ skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
+ skb->nh.iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ ipcss = skb->nh.raw - skb->data;
+ ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
+ ipcse = skb->h.raw - skb->data - 1;
+ tucss = skb->h.raw - skb->data;
+ tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
+ tucse = 0;
+
+ i = adapter->tx_ring.next_to_use;
+ context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+
+ context_desc->ipcss = ipcss;
+ context_desc->ipcso = ipcso;
+ context_desc->ipcse = cpu_to_le16(ipcse);
+ context_desc->tucss = tucss;
+ context_desc->tucso = tucso;
+ context_desc->tucse = cpu_to_le16(tucse);
+ context_desc->mss = cpu_to_le16(mss);
+ context_desc->hdr_len = hdr_len;
+ context_desc->status = 0;
+ context_desc->cmd_type_len = cpu_to_le32(
+ IXGB_CONTEXT_DESC_TYPE
+ | IXGB_CONTEXT_DESC_CMD_TSE
+ | IXGB_CONTEXT_DESC_CMD_IP
+ | IXGB_CONTEXT_DESC_CMD_TCP
+ | IXGB_CONTEXT_DESC_CMD_RS
+ | IXGB_CONTEXT_DESC_CMD_IDE
+ | (skb->len - (hdr_len)));
+
+ if(++i == adapter->tx_ring.count) i = 0;
+ adapter->tx_ring.next_to_use = i;
+
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+static inline boolean_t
+ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
+{
+ struct ixgb_context_desc *context_desc;
+ unsigned int i;
+ uint8_t css, cso;
+
+ if(likely(skb->ip_summed == CHECKSUM_HW)) {
+ css = skb->h.raw - skb->data;
+ cso = (skb->h.raw + skb->csum) - skb->data;
+
+ i = adapter->tx_ring.next_to_use;
+ context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+
+ context_desc->tucss = css;
+ context_desc->tucso = cso;
+ context_desc->tucse = 0;
+ /* zero out any previously existing data in one instruction */
+ *(uint32_t *)&(context_desc->ipcss) = 0;
+ context_desc->status = 0;
+ context_desc->hdr_len = 0;
+ context_desc->mss = 0;
+ context_desc->cmd_type_len =
+ cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
+ | IXGB_TX_DESC_CMD_RS
+ | IXGB_TX_DESC_CMD_IDE);
+
+ if(++i == adapter->tx_ring.count) i = 0;
+ adapter->tx_ring.next_to_use = i;
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+#define IXGB_MAX_TXD_PWR 14
+#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
+
+static inline int
+ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
+ unsigned int first)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct ixgb_buffer *buffer_info;
+ int len = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+ len -= skb->data_len;
+
+ i = tx_ring->next_to_use;
+
+ while(len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+
+ len -= size;
+ offset += size;
+ count++;
+ if(++i == tx_ring->count) i = 0;
+ }
+
+ for(f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = 0;
+
+ while(len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_page(adapter->pdev,
+ frag->page,
+ frag->page_offset + offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+
+ len -= size;
+ offset += size;
+ count++;
+ if(++i == tx_ring->count) i = 0;
+ }
+ }
+ i = (i == 0) ? tx_ring->count - 1 : i - 1;
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+}
+
+static inline void
+ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct ixgb_tx_desc *tx_desc = NULL;
+ struct ixgb_buffer *buffer_info;
+ uint32_t cmd_type_len = adapter->tx_cmd_type;
+ uint8_t status = 0;
+ uint8_t popts = 0;
+ unsigned int i;
+
+ if(tx_flags & IXGB_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
+ popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
+ }
+
+ if(tx_flags & IXGB_TX_FLAGS_CSUM)
+ popts |= IXGB_TX_DESC_POPTS_TXSM;
+
+ if(tx_flags & IXGB_TX_FLAGS_VLAN) {
+ cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
+ }
+
+ i = tx_ring->next_to_use;
+
+ while(count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = IXGB_TX_DESC(*tx_ring, i);
+ tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->cmd_type_len =
+ cpu_to_le32(cmd_type_len | buffer_info->length);
+ tx_desc->status = status;
+ tx_desc->popts = popts;
+ tx_desc->vlan = cpu_to_le16(vlan_id);
+
+ if(++i == tx_ring->count) i = 0;
+ }
+
+ tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
+ | IXGB_TX_DESC_CMD_RS );
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ IXGB_WRITE_REG(&adapter->hw, TDT, i);
+}
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
+ (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
+
+static int
+ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ unsigned long flags;
+ int vlan_id = 0;
+ int tso;
+
+ if(skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ spin_lock_irqsave(&adapter->tx_lock, flags);
+ if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+
+ if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ tx_flags |= IXGB_TX_FLAGS_VLAN;
+ vlan_id = vlan_tx_tag_get(skb);
+ }
+
+ first = adapter->tx_ring.next_to_use;
+
+ tso = ixgb_tso(adapter, skb);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IXGB_TX_FLAGS_TSO;
+ else if(ixgb_tx_csum(adapter, skb))
+ tx_flags |= IXGB_TX_FLAGS_CSUM;
+
+ ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
+ tx_flags);
+
+ netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+/**
+ * ixgb_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void
+ixgb_tx_timeout(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void
+ixgb_tx_timeout_task(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ ixgb_down(adapter, TRUE);
+ ixgb_up(adapter);
+}
+
+/**
+ * ixgb_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+
+static struct net_device_stats *
+ixgb_get_stats(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ return &adapter->net_stats;
+}
+
+/**
+ * ixgb_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+ixgb_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+ int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+
+
+ if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
+ || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
+ IXGB_ERR("Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
+ || (max_frame <= IXGB_RXBUFFER_2048)) {
+ adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
+
+ } else if(max_frame <= IXGB_RXBUFFER_4096) {
+ adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
+
+ } else if(max_frame <= IXGB_RXBUFFER_8192) {
+ adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
+
+ } else {
+ adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
+ }
+
+ netdev->mtu = new_mtu;
+
+ if(old_max_frame != max_frame && netif_running(netdev)) {
+
+ ixgb_down(adapter, TRUE);
+ ixgb_up(adapter);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgb_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+
+void
+ixgb_update_stats(struct ixgb_adapter *adapter)
+{
+ adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
+ adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
+ adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
+ adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
+ adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
+ adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
+ adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
+ adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
+ adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
+ adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
+ adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
+ adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
+ adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
+ adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
+ adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
+ adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
+ adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
+ adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
+ adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
+ adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
+ adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
+ adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
+ adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
+ adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
+ adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
+ adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
+ adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
+ adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
+ adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
+ adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
+ adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
+ adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
+ adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
+ adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
+ adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
+ adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
+ adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
+ adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
+ adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
+ adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
+ adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
+ adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
+ adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
+ adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
+ adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
+ adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
+ adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
+ adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
+ adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
+ adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
+ adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
+ adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
+ adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
+ adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
+ adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
+ adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
+ adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
+ adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
+ adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
+ adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
+
+ /* Fill out the OS statistics structure */
+
+ adapter->net_stats.rx_packets = adapter->stats.gprcl;
+ adapter->net_stats.tx_packets = adapter->stats.gptcl;
+ adapter->net_stats.rx_bytes = adapter->stats.gorcl;
+ adapter->net_stats.tx_bytes = adapter->stats.gotcl;
+ adapter->net_stats.multicast = adapter->stats.mprcl;
+ adapter->net_stats.collisions = 0;
+
+ /* ignore RLEC as it reports errors for padded (<64bytes) frames
+ * with a length in the type/len field */
+ adapter->net_stats.rx_errors =
+ /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
+ adapter->stats.ruc +
+ adapter->stats.roc /*+ adapter->stats.rlec */ +
+ adapter->stats.icbc +
+ adapter->stats.ecbc + adapter->stats.mpc;
+
+ adapter->net_stats.rx_dropped = adapter->stats.mpc;
+
+ /* see above
+ * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
+ */
+
+ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+ adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
+ adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+ adapter->net_stats.rx_over_errors = adapter->stats.mpc;
+
+ adapter->net_stats.tx_errors = 0;
+ adapter->net_stats.rx_frame_errors = 0;
+ adapter->net_stats.tx_aborted_errors = 0;
+ adapter->net_stats.tx_carrier_errors = 0;
+ adapter->net_stats.tx_fifo_errors = 0;
+ adapter->net_stats.tx_heartbeat_errors = 0;
+ adapter->net_stats.tx_window_errors = 0;
+}
+
+#define IXGB_MAX_INTR 10
+/**
+ * ixgb_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ **/
+
+static irqreturn_t
+ixgb_intr(int irq, void *data, struct pt_regs *regs)
+{
+ struct net_device *netdev = data;
+ struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_hw *hw = &adapter->hw;
+ uint32_t icr = IXGB_READ_REG(hw, ICR);
+#ifndef CONFIG_IXGB_NAPI
+ unsigned int i;
+#endif
+
+ if(unlikely(!icr))
+ return IRQ_NONE; /* Not our interrupt */
+
+ if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ }
+
+#ifdef CONFIG_IXGB_NAPI
+ if(netif_rx_schedule_prep(netdev)) {
+
+ /* Disable interrupts and register for poll. The flush
+ of the posted write is intentionally left out.
+ */
+
+ atomic_inc(&adapter->irq_sem);
+ IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
+ __netif_rx_schedule(netdev);
+ }
+#else
+ /* yes, that is actually a & and it is meant to make sure that
+ * every pass through this for loop checks both receive and
+ * transmit queues for completed descriptors, intended to
+ * avoid starvation issues and assist tx/rx fairness. */
+ for(i = 0; i < IXGB_MAX_INTR; i++)
+ if(!ixgb_clean_rx_irq(adapter) &
+ !ixgb_clean_tx_irq(adapter))
+ break;
+#endif
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_IXGB_NAPI
+/**
+ * ixgb_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+
+static int
+ixgb_clean(struct net_device *netdev, int *budget)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ int work_to_do = min(*budget, netdev->quota);
+ int tx_cleaned;
+ int work_done = 0;
+
+ tx_cleaned = ixgb_clean_tx_irq(adapter);
+ ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
+
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ /* if no Tx and not enough Rx work done, exit the polling mode */
+ if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
+ netif_rx_complete(netdev);
+ ixgb_irq_enable(adapter);
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+/**
+ * ixgb_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct ixgb_tx_desc *tx_desc, *eop_desc;
+ struct ixgb_buffer *buffer_info;
+ unsigned int i, eop;
+ boolean_t cleaned = FALSE;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IXGB_TX_DESC(*tx_ring, eop);
+
+ while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
+
+ for(cleaned = FALSE; !cleaned; ) {
+ tx_desc = IXGB_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ if (tx_desc->popts
+ & (IXGB_TX_DESC_POPTS_TXSM |
+ IXGB_TX_DESC_POPTS_IXSM))
+ adapter->hw_csum_tx_good++;
+
+ ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
+
+ *(uint32_t *)&(tx_desc->status) = 0;
+
+ cleaned = (i == eop);
+ if(++i == tx_ring->count) i = 0;
+ }
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IXGB_TX_DESC(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+ spin_lock(&adapter->tx_lock);
+ if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
+ (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
+
+ netif_wake_queue(netdev);
+ }
+ spin_unlock(&adapter->tx_lock);
+
+ if(adapter->detect_tx_hung) {
+ /* detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ adapter->detect_tx_hung = FALSE;
+ if(tx_ring->buffer_info[i].dma &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
+ && !(IXGB_READ_REG(&adapter->hw, STATUS) &
+ IXGB_STATUS_TXOFF))
+ netif_stop_queue(netdev);
+ }
+
+ return cleaned;
+}
+
+/**
+ * ixgb_rx_checksum - Receive Checksum Offload for 82597.
+ * @adapter: board private structure
+ * @rx_desc: receive descriptor
+ * @sk_buff: socket buffer with received data
+ **/
+
+static inline void
+ixgb_rx_checksum(struct ixgb_adapter *adapter,
+ struct ixgb_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ /* Ignore Checksum bit is set OR
+ * TCP Checksum has not been calculated
+ */
+ if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
+ (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ /* At this point we know the hardware did the TCP checksum */
+ /* now look at the TCP checksum error bit */
+ if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
+ /* let the stack verify checksum errors */
+ skb->ip_summed = CHECKSUM_NONE;
+ adapter->hw_csum_rx_error++;
+ } else {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+ }
+}
+
+/**
+ * ixgb_clean_rx_irq - Send received data up the network stack,
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+#ifdef CONFIG_IXGB_NAPI
+ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
+#else
+ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
+#endif
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgb_rx_desc *rx_desc, *next_rxd;
+ struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
+ struct sk_buff *skb, *next_skb;
+ uint32_t length;
+ unsigned int i, j;
+ boolean_t cleaned = FALSE;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGB_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
+
+#ifdef CONFIG_IXGB_NAPI
+ if(*work_done >= work_to_do)
+ break;
+
+ (*work_done)++;
+#endif
+ skb = buffer_info->skb;
+ prefetch(skb->data);
+
+ if(++i == rx_ring->count) i = 0;
+ next_rxd = IXGB_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ if((j = i + 1) == rx_ring->count) j = 0;
+ next2_buffer = &rx_ring->buffer_info[j];
+ prefetch(next2_buffer);
+
+ next_buffer = &rx_ring->buffer_info[i];
+ next_skb = next_buffer->skb;
+ prefetch(next_skb);
+
+
+ cleaned = TRUE;
+
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ length = le16_to_cpu(rx_desc->length);
+
+ if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
+
+ /* All receives must fit into a single buffer */
+
+ IXGB_DBG("Receive packet consumed multiple buffers "
+ "length<%x>\n", length);
+
+ dev_kfree_skb_irq(skb);
+ rx_desc->status = 0;
+ buffer_info->skb = NULL;
+
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ continue;
+ }
+
+ if (unlikely(rx_desc->errors
+ & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
+ | IXGB_RX_DESC_ERRORS_P |
+ IXGB_RX_DESC_ERRORS_RXE))) {
+
+ dev_kfree_skb_irq(skb);
+ rx_desc->status = 0;
+ buffer_info->skb = NULL;
+
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ continue;
+ }
+
+ /* Good Receive */
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ ixgb_rx_checksum(adapter, rx_desc, skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+#ifdef CONFIG_IXGB_NAPI
+ if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+ le16_to_cpu(rx_desc->special) &
+ IXGB_RX_DESC_SPECIAL_VLAN_MASK);
+ } else {
+ netif_receive_skb(skb);
+ }
+#else /* CONFIG_IXGB_NAPI */
+ if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
+ vlan_hwaccel_rx(skb, adapter->vlgrp,
+ le16_to_cpu(rx_desc->special) &
+ IXGB_RX_DESC_SPECIAL_VLAN_MASK);
+ } else {
+ netif_rx(skb);
+ }
+#endif /* CONFIG_IXGB_NAPI */
+ netdev->last_rx = jiffies;
+
+ rx_desc->status = 0;
+ buffer_info->skb = NULL;
+
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+
+ rx_ring->next_to_clean = i;
+
+ ixgb_alloc_rx_buffers(adapter);
+
+ return cleaned;
+}
+
+/**
+ * ixgb_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ **/
+
+static void
+ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgb_rx_desc *rx_desc;
+ struct ixgb_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ int num_group_tail_writes;
+ long cleancount;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+ cleancount = IXGB_DESC_UNUSED(rx_ring);
+
+ num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
+
+ /* leave one descriptor unused */
+ while(--cleancount > 0) {
+ rx_desc = IXGB_RX_DESC(*rx_ring, i);
+
+ skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
+
+ if(unlikely(!skb)) {
+ /* Better luck next round */
+ break;
+ }
+
+ /* Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ skb->dev = netdev;
+
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+ buffer_info->dma =
+ pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+
+ rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
+
+ if((i & ~(num_group_tail_writes- 1)) == i) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ IXGB_WRITE_REG(&adapter->hw, RDT, i);
+ }
+
+ if(++i == rx_ring->count) i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
+ *
+ * @param netdev network interface device structure
+ * @param grp indicates to enable or disable tagging/stripping
+ **/
+static void
+ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ uint32_t ctrl, rctl;
+
+ ixgb_irq_disable(adapter);
+ adapter->vlgrp = grp;
+
+ if(grp) {
+ /* enable VLAN tag insert/strip */
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl |= IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+
+ /* enable VLAN receive filtering */
+
+ rctl = IXGB_READ_REG(&adapter->hw, RCTL);
+ rctl |= IXGB_RCTL_VFE;
+ rctl &= ~IXGB_RCTL_CFIEN;
+ IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
+ } else {
+ /* disable VLAN tag insert/strip */
+
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl &= ~IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+
+ /* disable VLAN filtering */
+
+ rctl = IXGB_READ_REG(&adapter->hw, RCTL);
+ rctl &= ~IXGB_RCTL_VFE;
+ IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
+ }
+
+ ixgb_irq_enable(adapter);
+}
+
+static void
+ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ uint32_t vfta, index;
+
+ /* add VID to filter table */
+
+ index = (vid >> 5) & 0x7F;
+ vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ ixgb_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void
+ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
+{
+ struct ixgb_adapter *adapter = netdev->priv;
+ uint32_t vfta, index;
+
+ ixgb_irq_disable(adapter);
+
+ if(adapter->vlgrp)
+ adapter->vlgrp->vlan_devices[vid] = NULL;
+
+ ixgb_irq_enable(adapter);
+
+ /* remove VID from filter table*/
+
+ index = (vid >> 5) & 0x7F;
+ vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ ixgb_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void
+ixgb_restore_vlan(struct ixgb_adapter *adapter)
+{
+ ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if(adapter->vlgrp) {
+ uint16_t vid;
+ for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if(!adapter->vlgrp->vlan_devices[vid])
+ continue;
+ ixgb_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+/**
+ * ixgb_notify_reboot - handles OS notification of reboot event.
+ * @param nb notifier block, unused
+ * @param event Event being passed to driver to act upon
+ * @param p A pointer to our net device
+ **/
+static int
+ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
+{
+ struct pci_dev *pdev = NULL;
+
+ switch(event) {
+ case SYS_DOWN:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ if (pci_dev_driver(pdev) == &ixgb_driver)
+ ixgb_suspend(pdev, 3);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * ixgb_suspend - driver suspend function called from notify.
+ * @param pdev pci driver structure used for passing to
+ * @param state power state to enter
+ **/
+static int
+ixgb_suspend(struct pci_dev *pdev, uint32_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ netif_device_detach(netdev);
+
+ if(netif_running(netdev))
+ ixgb_down(adapter, TRUE);
+
+ pci_save_state(pdev);
+
+ state = (state > 0) ? 3 : 0;
+ pci_set_power_state(pdev, state);
+ msec_delay(200);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void ixgb_netpoll(struct net_device *dev)
+{
+ struct ixgb_adapter *adapter = dev->priv;
+ disable_irq(adapter->pdev->irq);
+ ixgb_intr(adapter->pdev->irq, dev, NULL);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/* ixgb_main.c */
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
new file mode 100644
index 000000000000..9eba92891901
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* glue for the OS independent part of ixgb
+ * includes register access macros
+ */
+
+#ifndef _IXGB_OSDEP_H_
+#define _IXGB_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifndef msec_delay
+#define msec_delay(x) do { if(in_interrupt()) { \
+ /* Don't mdelay in interrupt context! */ \
+ BUG(); \
+ } else { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x * HZ)/1000 + 2); \
+ } } while(0)
+#endif
+
+#define PCI_COMMAND_REGISTER PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+
+typedef enum {
+#undef FALSE
+ FALSE = 0,
+#undef TRUE
+ TRUE = 1
+} boolean_t;
+
+#undef ASSERT
+#define ASSERT(x) if(!(x)) BUG()
+#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
+
+#ifdef DBG
+#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F)
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+#define IXGB_WRITE_REG(a, reg, value) ( \
+ writel((value), ((a)->hw_addr + IXGB_##reg)))
+
+#define IXGB_READ_REG(a, reg) ( \
+ readl((a)->hw_addr + IXGB_##reg))
+
+#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
+
+#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
+
+#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
+
+#define IXGB_MEMCPY memcpy
+
+#endif /* _IXGB_OSDEP_H_ */
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
new file mode 100644
index 000000000000..8a83dfdf746d
--- /dev/null
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -0,0 +1,476 @@
+/*******************************************************************************
+
+
+ Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgb.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define IXGB_MAX_NIC 8
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
+#define IXGB_PARAM(X, desc) \
+ static int __devinitdata X[IXGB_MAX_NIC+1] = IXGB_PARAM_INIT; \
+ static int num_##X = 0; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 64-4096
+ *
+ * Default Value: 256
+ */
+
+IXGB_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 64-4096
+ *
+ * Default Value: 1024
+ */
+
+IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ * - 0 - No Flow Control
+ * - 1 - Rx only, respond to PAUSE frames but do not generate them
+ * - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ * - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+
+IXGB_PARAM(FlowControl, "Flow Control setting");
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables all checksum offload
+ * - 1 - enables receive IP/TCP/UDP checksum offload
+ * on 82597 based NICs
+ *
+ * Default Value: 1
+ */
+
+IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 0.8192 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 32
+ */
+
+IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+
+/* Receive Interrupt Delay in units of 0.8192 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 72
+ */
+
+IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay");
+
+/* Receive Flow control high threshold (when we send a pause frame)
+ * (FCRTH)
+ *
+ * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity)
+ *
+ * Default Value: 196,608 (0x30000)
+ */
+
+IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
+
+/* Receive Flow control low threshold (when we send a resume frame)
+ * (FCRTL)
+ *
+ * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity)
+ * must be less than high threshold by at least 8 bytes
+ *
+ * Default Value: 163,840 (0x28000)
+ */
+
+IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
+
+/* Flow control request timeout (how long to pause the link partner's tx)
+ * (PAP 15:0)
+ *
+ * Valid Range: 1 - 65535
+ *
+ * Default Value: 256 (0x100)
+ */
+
+IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
+
+/* Interrupt Delay Enable
+ *
+ * Valid Range: 0, 1
+ *
+ * - 0 - disables transmit interrupt delay
+ * - 1 - enables transmmit interrupt delay
+ *
+ * Default Value: 1
+ */
+
+IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
+
+
+#define DEFAULT_TIDV 32
+#define MAX_TIDV 0xFFFF
+#define MIN_TIDV 0
+
+#define DEFAULT_RDTR 72
+#define MAX_RDTR 0xFFFF
+#define MIN_RDTR 0
+
+#define XSUMRX_DEFAULT OPTION_ENABLED
+
+#define FLOW_CONTROL_FULL ixgb_fc_full
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+#define DEFAULT_FCRTL 0x28000
+#define DEFAULT_FCRTH 0x30000
+#define MIN_FCRTL 0
+#define MAX_FCRTL 0x3FFE8
+#define MIN_FCRTH 8
+#define MAX_FCRTH 0x3FFF0
+
+#define DEFAULT_FCPAUSE 0x100 /* this may be too long */
+#define MIN_FCPAUSE 1
+#define MAX_FCPAUSE 0xffff
+
+struct ixgb_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct ixgb_opt_list {
+ int i;
+ char *str;
+ } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit
+ixgb_validate_option(int *value, struct ixgb_option *opt)
+{
+ if(*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ printk(KERN_INFO "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ printk(KERN_INFO "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ struct ixgb_opt_list *ent;
+
+ for(i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if(*value == ent->i) {
+ if(ent->str[0] != '\0')
+ printk(KERN_INFO "%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ printk(KERN_INFO "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
+
+/**
+ * ixgb_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit
+ixgb_check_options(struct ixgb_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if(bd >= IXGB_MAX_NIC) {
+ printk(KERN_NOTICE
+ "Warning: no configuration for board #%i\n", bd);
+ printk(KERN_NOTICE "Using defaults for all values\n");
+ }
+
+ { /* Transmit Descriptor Count */
+ struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TXD),
+ .def = DEFAULT_TXD,
+ .arg = { .r = { .min = MIN_TXD,
+ .max = MAX_TXD}}
+ };
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+
+ if(num_TxDescriptors > bd) {
+ tx_ring->count = TxDescriptors[bd];
+ ixgb_validate_option(&tx_ring->count, &opt);
+ } else {
+ tx_ring->count = opt.def;
+ }
+ IXGB_ROUNDUP(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
+ }
+ { /* Receive Descriptor Count */
+ struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RXD),
+ .def = DEFAULT_RXD,
+ .arg = { .r = { .min = MIN_RXD,
+ .max = MAX_RXD}}
+ };
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+
+ if(num_RxDescriptors > bd) {
+ rx_ring->count = RxDescriptors[bd];
+ ixgb_validate_option(&rx_ring->count, &opt);
+ } else {
+ rx_ring->count = opt.def;
+ }
+ IXGB_ROUNDUP(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
+ }
+ { /* Receive Checksum Offload Enable */
+ struct ixgb_option opt = {
+ .type = enable_option,
+ .name = "Receive Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if(num_XsumRX > bd) {
+ int rx_csum = XsumRX[bd];
+ ixgb_validate_option(&rx_csum, &opt);
+ adapter->rx_csum = rx_csum;
+ } else {
+ adapter->rx_csum = opt.def;
+ }
+ }
+ { /* Flow Control */
+
+ struct ixgb_opt_list fc_list[] =
+ {{ ixgb_fc_none, "Flow Control Disabled" },
+ { ixgb_fc_rx_pause,"Flow Control Receive Only" },
+ { ixgb_fc_tx_pause,"Flow Control Transmit Only" },
+ { ixgb_fc_full, "Flow Control Enabled" },
+ { ixgb_fc_default, "Flow Control Hardware Default" }};
+
+ struct ixgb_option opt = {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = ixgb_fc_full,
+ .arg = { .l = { .nr = LIST_LEN(fc_list),
+ .p = fc_list }}
+ };
+
+ if(num_FlowControl > bd) {
+ int fc = FlowControl[bd];
+ ixgb_validate_option(&fc, &opt);
+ adapter->hw.fc.type = fc;
+ } else {
+ adapter->hw.fc.type = opt.def;
+ }
+ }
+ { /* Receive Flow Control High Threshold */
+ struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Rx Flow Control High Threshold",
+ .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
+ .def = DEFAULT_FCRTH,
+ .arg = { .r = { .min = MIN_FCRTH,
+ .max = MAX_FCRTH}}
+ };
+
+ if(num_RxFCHighThresh > bd) {
+ adapter->hw.fc.high_water = RxFCHighThresh[bd];
+ ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
+ } else {
+ adapter->hw.fc.high_water = opt.def;
+ }
+ if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
+ printk (KERN_INFO
+ "Ignoring RxFCHighThresh when no RxFC\n");
+ }
+ { /* Receive Flow Control Low Threshold */
+ struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Rx Flow Control Low Threshold",
+ .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
+ .def = DEFAULT_FCRTL,
+ .arg = { .r = { .min = MIN_FCRTL,
+ .max = MAX_FCRTL}}
+ };
+
+ if(num_RxFCLowThresh > bd) {
+ adapter->hw.fc.low_water = RxFCLowThresh[bd];
+ ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
+ } else {
+ adapter->hw.fc.low_water = opt.def;
+ }
+ if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
+ printk (KERN_INFO
+ "Ignoring RxFCLowThresh when no RxFC\n");
+ }
+ { /* Flow Control Pause Time Request*/
+ struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Flow Control Pause Time Request",
+ .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
+ .def = DEFAULT_FCPAUSE,
+ .arg = { .r = { .min = MIN_FCPAUSE,
+ .max = MAX_FCPAUSE}}
+ };
+
+ if(num_FCReqTimeout > bd) {
+ int pause_time = FCReqTimeout[bd];
+ ixgb_validate_option(&pause_time, &opt);
+ adapter->hw.fc.pause_time = pause_time;
+ } else {
+ adapter->hw.fc.pause_time = opt.def;
+ }
+ if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
+ printk (KERN_INFO
+ "Ignoring FCReqTimeout when no RxFC\n");
+ }
+ /* high low and spacing check for rx flow control thresholds */
+ if (adapter->hw.fc.type & ixgb_fc_rx_pause) {
+ /* high must be greater than low */
+ if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
+ /* set defaults */
+ printk (KERN_INFO
+ "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
+ "Using Defaults\n");
+ adapter->hw.fc.high_water = DEFAULT_FCRTH;
+ adapter->hw.fc.low_water = DEFAULT_FCRTL;
+ }
+ }
+ { /* Receive Interrupt Delay */
+ struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RDTR,
+ .max = MAX_RDTR}}
+ };
+
+ if(num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ ixgb_validate_option(&adapter->rx_int_delay, &opt);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ { /* Transmit Interrupt Delay */
+ struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TIDV,
+ .max = MAX_TIDV}}
+ };
+
+ if(num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ ixgb_validate_option(&adapter->tx_int_delay, &opt);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+
+ { /* Transmit Interrupt Delay Enable */
+ struct ixgb_option opt = {
+ .type = enable_option,
+ .name = "Tx Interrupt Delay Enable",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if(num_IntDelayEnable > bd) {
+ int ide = IntDelayEnable[bd];
+ ixgb_validate_option(&ide, &opt);
+ adapter->tx_int_delay_enable = ide;
+ } else {
+ adapter->tx_int_delay_enable = opt.def;
+ }
+ }
+}
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
new file mode 100644
index 000000000000..7fec613e1675
--- /dev/null
+++ b/drivers/net/jazzsonic.c
@@ -0,0 +1,381 @@
+/*
+ * sonic.c
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the onboard Sonic ethernet controller on Mips Jazz
+ * systems (Acer Pica-61, Mips Magnum 4000, Olivetti M700 and
+ * perhaps others, too)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/jazz.h>
+#include <asm/jazzdma.h>
+
+static char jazz_sonic_string[] = "jazzsonic";
+static struct platform_device *jazz_sonic_device;
+
+#define SONIC_MEM_SIZE 0x100
+
+#define SREGS_PAD(n) u16 n;
+
+#include "sonic.h"
+
+/*
+ * Macros to access SONIC registers
+ */
+#define SONIC_READ(reg) (*((volatile unsigned int *)base_addr+reg))
+
+#define SONIC_WRITE(reg,val) \
+do { \
+ *((volatile unsigned int *)base_addr+(reg)) = (val); \
+} while (0)
+
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
+
+/*
+ * Base address and interrupt of the SONIC controller on JAZZ boards
+ */
+static struct {
+ unsigned int port;
+ unsigned int irq;
+} sonic_portlist[] = { {JAZZ_ETHERNET_BASE, JAZZ_ETHERNET_IRQ}, {0, 0}};
+
+/*
+ * We cannot use station (ethernet) address prefixes to detect the
+ * sonic controller since these are board manufacturer depended.
+ * So we check for known Silicon Revision IDs instead.
+ */
+static unsigned short known_revisions[] =
+{
+ 0x04, /* Mips Magnum 4000 */
+ 0xffff /* end of list */
+};
+
+static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
+ unsigned int irq)
+{
+ static unsigned version_printed;
+ unsigned int silicon_revision;
+ unsigned int val;
+ struct sonic_local *lp;
+ int err = -ENODEV;
+ int i;
+
+ if (!request_mem_region(base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
+ return -EBUSY;
+ /*
+ * get the Silicon Revision ID. If this is one of the known
+ * one assume that we found a SONIC ethernet controller at
+ * the expected location.
+ */
+ silicon_revision = SONIC_READ(SONIC_SR);
+ if (sonic_debug > 1)
+ printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
+
+ i = 0;
+ while (known_revisions[i] != 0xffff
+ && known_revisions[i] != silicon_revision)
+ i++;
+
+ if (known_revisions[i] == 0xffff) {
+ printk("SONIC ethernet controller not found (0x%4x)\n",
+ silicon_revision);
+ goto out;
+ }
+
+ if (sonic_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: Sonic ethernet found at 0x%08lx, ", dev->name, base_addr);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = base_addr;
+ dev->irq = irq;
+
+ /*
+ * Put the sonic into software reset, then
+ * retrieve and print the ethernet address.
+ */
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_CEP,0);
+ for (i=0; i<3; i++) {
+ val = SONIC_READ(SONIC_CAP0-i);
+ dev->dev_addr[i*2] = val;
+ dev->dev_addr[i*2+1] = val >> 8;
+ }
+
+ printk("HW Address ");
+ for (i = 0; i < 6; i++) {
+ printk("%2.2x", dev->dev_addr[i]);
+ if (i<5)
+ printk(":");
+ }
+
+ printk(" IRQ %d\n", irq);
+
+ err = -ENOMEM;
+
+ /* Initialize the device structure. */
+ if (dev->priv == NULL) {
+ /*
+ * the memory be located in the same 64kb segment
+ */
+ lp = NULL;
+ i = 0;
+ do {
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if ((unsigned long) lp >> 16
+ != ((unsigned long)lp + sizeof(*lp) ) >> 16) {
+ /* FIXME, free the memory later */
+ kfree(lp);
+ lp = NULL;
+ }
+ } while (lp == NULL && i++ < 20);
+
+ if (lp == NULL) {
+ printk("%s: couldn't allocate memory for descriptors\n",
+ dev->name);
+ goto out;
+ }
+
+ memset(lp, 0, sizeof(struct sonic_local));
+
+ /* get the virtual dma address */
+ lp->cda_laddr = vdma_alloc(CPHYSADDR(lp),sizeof(*lp));
+ if (lp->cda_laddr == ~0UL) {
+ printk("%s: couldn't get DMA page entry for "
+ "descriptors\n", dev->name);
+ goto out1;
+ }
+
+ lp->tda_laddr = lp->cda_laddr + sizeof (lp->cda);
+ lp->rra_laddr = lp->tda_laddr + sizeof (lp->tda);
+ lp->rda_laddr = lp->rra_laddr + sizeof (lp->rra);
+
+ /* allocate receive buffer area */
+ /* FIXME, maybe we should use skbs */
+ lp->rba = kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL);
+ if (!lp->rba) {
+ printk("%s: couldn't allocate receive buffers\n",
+ dev->name);
+ goto out2;
+ }
+
+ /* get virtual dma address */
+ lp->rba_laddr = vdma_alloc(CPHYSADDR(lp->rba),
+ SONIC_NUM_RRS * SONIC_RBSIZE);
+ if (lp->rba_laddr == ~0UL) {
+ printk("%s: couldn't get DMA page entry for receive "
+ "buffers\n",dev->name);
+ goto out3;
+ }
+
+ /* now convert pointer to KSEG1 pointer */
+ lp->rba = (char *)KSEG1ADDR(lp->rba);
+ flush_cache_all();
+ dev->priv = (struct sonic_local *)KSEG1ADDR(lp);
+ }
+
+ lp = (struct sonic_local *)dev->priv;
+ dev->open = sonic_open;
+ dev->stop = sonic_close;
+ dev->hard_start_xmit = sonic_send_packet;
+ dev->get_stats = sonic_get_stats;
+ dev->set_multicast_list = &sonic_multicast_list;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * clear tally counter
+ */
+ SONIC_WRITE(SONIC_CRCT,0xffff);
+ SONIC_WRITE(SONIC_FAET,0xffff);
+ SONIC_WRITE(SONIC_MPT,0xffff);
+
+ return 0;
+out3:
+ kfree(lp->rba);
+out2:
+ vdma_free(lp->cda_laddr);
+out1:
+ kfree(lp);
+out:
+ release_region(base_addr, SONIC_MEM_SIZE);
+ return err;
+}
+
+/*
+ * Probe for a SONIC ethernet controller on a Mips Jazz board.
+ * Actually probing is superfluous but we're paranoid.
+ */
+static int __init jazz_sonic_probe(struct device *device)
+{
+ struct net_device *dev;
+ struct sonic_local *lp;
+ unsigned long base_addr;
+ int err = 0;
+ int i;
+
+ /*
+ * Don't probe if we're not running on a Jazz board.
+ */
+ if (mips_machgroup != MACH_GROUP_JAZZ)
+ return -ENODEV;
+
+ dev = alloc_etherdev(0);
+ if (!dev)
+ return -ENOMEM;
+
+ netdev_boot_setup_check(dev);
+ base_addr = dev->base_addr;
+
+ if (base_addr >= KSEG0) { /* Check a single specified location. */
+ err = sonic_probe1(dev, base_addr, dev->irq);
+ } else if (base_addr != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (i = 0; sonic_portlist[i].port; i++) {
+ int io = sonic_portlist[i].port;
+ if (sonic_probe1(dev, io, sonic_portlist[i].irq) == 0)
+ break;
+ }
+ if (!sonic_portlist[i].port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+
+ return 0;
+
+out1:
+ lp = dev->priv;
+ vdma_free(lp->rba_laddr);
+ kfree(lp->rba);
+ vdma_free(lp->cda_laddr);
+ kfree(lp);
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+/*
+ * SONIC uses a normal IRQ
+ */
+#define sonic_request_irq request_irq
+#define sonic_free_irq free_irq
+
+#define sonic_chiptomem(x) KSEG1ADDR(vdma_log2phys(x))
+
+#include "sonic.c"
+
+static int __devexit jazz_sonic_device_remove (struct device *device)
+{
+ struct net_device *dev = device->driver_data;
+
+ unregister_netdev (dev);
+ release_region (dev->base_addr, SONIC_MEM_SIZE);
+ free_netdev (dev);
+
+ return 0;
+}
+
+static struct device_driver jazz_sonic_driver = {
+ .name = jazz_sonic_string,
+ .bus = &platform_bus_type,
+ .probe = jazz_sonic_probe,
+ .remove = __devexit_p(jazz_sonic_device_remove),
+};
+
+static void jazz_sonic_platform_release (struct device *device)
+{
+ struct platform_device *pldev;
+
+ /* free device */
+ pldev = to_platform_device (device);
+ kfree (pldev);
+}
+
+static int __init jazz_sonic_init_module(void)
+{
+ struct platform_device *pldev;
+
+ if (driver_register(&jazz_sonic_driver)) {
+ printk(KERN_ERR "Driver registration failed\n");
+ return -ENOMEM;
+ }
+
+ jazz_sonic_device = NULL;
+
+ if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
+ goto out_unregister;
+ }
+
+ memset(pldev, 0, sizeof (*pldev));
+ pldev->name = jazz_sonic_string;
+ pldev->id = 0;
+ pldev->dev.release = jazz_sonic_platform_release;
+ jazz_sonic_device = pldev;
+
+ if (platform_device_register (pldev)) {
+ kfree(pldev);
+ jazz_sonic_device = NULL;
+ }
+
+ return 0;
+
+out_unregister:
+ platform_device_unregister(pldev);
+
+ return -ENOMEM;
+}
+
+static void __exit jazz_sonic_cleanup_module(void)
+{
+ driver_unregister(&jazz_sonic_driver);
+
+ if (jazz_sonic_device) {
+ platform_device_unregister(jazz_sonic_device);
+ jazz_sonic_device = NULL;
+ }
+}
+
+module_init(jazz_sonic_init_module);
+module_exit(jazz_sonic_cleanup_module);
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
new file mode 100644
index 000000000000..dec557fb6a99
--- /dev/null
+++ b/drivers/net/lance.c
@@ -0,0 +1,1308 @@
+/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
+/*
+ Written/copyright 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
+ with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Andrey V. Savochkin:
+ - alignment problem with 1.3.* kernel and some minor changes.
+ Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
+ - added support for Linux/Alpha, but removed most of it, because
+ it worked only for the PCI chip.
+ - added hook for the 32bit lance driver
+ - added PCnetPCI II (79C970A) to chip table
+ Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
+ - hopefully fix above so Linux/Alpha can use ISA cards too.
+ 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
+ v1.12 10/27/97 Module support -djb
+ v1.14 2/3/98 Module support modified, made PCI support optional -djb
+ v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
+ before unregister_netdev() which caused NULL pointer
+ reference later in the chain (in rtnetlink_fill_ifinfo())
+ -- Mika Kuoppala <miku@iki.fi>
+
+ Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
+ the 2.1 version of the old driver - Alan Cox
+
+ Get rid of check_region, check kmalloc return in lance_probe1
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+
+ Reworked detection, added support for Racal InterLan EtherBlaster cards
+ Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
+*/
+
+static const char version[] = "lance.c:v1.15ac 1999/11/13 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
+static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
+static int __init do_lance_probe(struct net_device *dev);
+
+
+static struct card {
+ char id_offset14;
+ char id_offset15;
+} cards[] = {
+ { //"normal"
+ .id_offset14 = 0x57,
+ .id_offset15 = 0x57,
+ },
+ { //NI6510EB
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x44,
+ },
+ { //Racal InterLan EtherBlaster
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x49,
+ },
+};
+#define NUM_CARDS 3
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the AMD 79C960, the "PCnet-ISA
+single-chip ethernet controller for ISA". This chip is used in a wide
+variety of boards from vendors such as Allied Telesis, HP, Kingston,
+and Boca. This driver is also intended to work with older AMD 7990
+designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
+I use the name LANCE to refer to all of the AMD chips, even though it properly
+refers only to the original 7990.
+
+II. Board-specific settings
+
+The driver is designed to work the boards that use the faster
+bus-master mode, rather than in shared memory mode. (Only older designs
+have on-board buffer memory needed to support the slower shared memory mode.)
+
+Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
+channel. This driver probes the likely base addresses:
+{0x300, 0x320, 0x340, 0x360}.
+After the board is found it generates a DMA-timeout interrupt and uses
+autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
+of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
+probed for by enabling each free DMA channel in turn and checking if
+initialization succeeds.
+
+The HP-J2405A board is an exception: with this board it is easy to read the
+EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
+_know_ the base address -- that field is for writing the EEPROM.)
+
+III. Driver operation
+
+IIIa. Ring buffers
+The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
+the base and length of the data buffer, along with status bits. The length
+of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
+the buffer length (rather than being directly the buffer length) for
+implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
+ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
+needlessly uses extra space and reduces the chance that an upper layer will
+be able to reorder queued Tx packets based on priority. Decreasing the number
+of entries makes it more difficult to achieve back-to-back packet transmission
+and increases the chance that Rx ring will overflow. (Consider the worst case
+of receiving back-to-back minimum-sized packets.)
+
+The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
+statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
+avoid the administrative overhead. For the Rx side this avoids dynamically
+allocating full-sized buffers "just in case", at the expense of a
+memory-to-memory data copy for each packet received. For most systems this
+is a good tradeoff: the Rx buffer will always be in low memory, the copy
+is inexpensive, and it primes the cache for later packet processing. For Tx
+the buffers are only used when needed as low-memory bounce buffers.
+
+IIIB. 16M memory limitations.
+For the ISA bus master mode all structures used directly by the LANCE,
+the initialization block, Rx and Tx rings, and data buffers, must be
+accessible from the ISA bus, i.e. in the lower 16M of real memory.
+This is a problem for current Linux kernels on >16M machines. The network
+devices are initialized after memory initialization, and the kernel doles out
+memory from the top of memory downward. The current solution is to have a
+special network initialization routine that's called before memory
+initialization; this will eventually be generalized for all network devices.
+As mentioned before, low-memory "bounce-buffers" are used when needed.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+*/
+
+/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
+ That translates to 4 and 4 (16 == 2^^4).
+ This is a compile-time option for efficiency.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define LANCE_DATA 0x10
+#define LANCE_ADDR 0x12
+#define LANCE_RESET 0x14
+#define LANCE_BUS_IF 0x16
+#define LANCE_TOTAL_SIZE 0x18
+
+#define TX_TIMEOUT 20
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ s32 base;
+ s16 buf_length; /* This length is 2s complement (negative)! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ s32 base;
+ s16 length; /* Length is 2s complement (negative)! */
+ s16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring; /* Tx and Rx ring base pointers */
+ u32 tx_ring;
+};
+
+struct lance_private {
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
+ struct lance_rx_head rx_ring[RX_RING_SIZE];
+ struct lance_tx_head tx_ring[TX_RING_SIZE];
+ struct lance_init_block init_block;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ /* Tx low-memory "bounce buffer" address. */
+ char (*tx_bounce_buffs)[PKT_BUF_SZ];
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ int dma;
+ struct net_device_stats stats;
+ unsigned char chip_version; /* See lance_chip_type. */
+ spinlock_t devlock;
+};
+
+#define LANCE_MUST_PAD 0x00000001
+#define LANCE_ENABLE_AUTOSELECT 0x00000002
+#define LANCE_MUST_REINIT_RING 0x00000004
+#define LANCE_MUST_UNRESET 0x00000008
+#define LANCE_HAS_MISSED_FRAME 0x00000010
+
+/* A mapping from the chip ID number to the part number and features.
+ These are from the datasheets -- in real life the '970 version
+ reportedly has the same ID as the '965. */
+static struct lance_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x0000, "LANCE 7990", /* Ancient lance chip. */
+ LANCE_MUST_PAD + LANCE_MUST_UNRESET},
+ {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
+ it the PCnet32. */
+ {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x0, "PCnet (unknown)",
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+};
+
+enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
+
+
+/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
+ Assume yes until we know the memory size. */
+static unsigned char lance_need_isa_bounce_buffers = 1;
+
+static int lance_open(struct net_device *dev);
+static void lance_init_ring(struct net_device *dev, int mode);
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int lance_rx(struct net_device *dev);
+static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int lance_close(struct net_device *dev);
+static struct net_device_stats *lance_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev);
+
+
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct lance_private *lp = dev->priv;
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+ release_region(dev->base_addr, LANCE_TOTAL_SIZE);
+ kfree(lp->tx_bounce_buffs);
+ kfree((void*)lp->rx_buffs);
+ kfree(lp);
+}
+
+#ifdef MODULE
+#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
+
+static struct net_device *dev_lance[MAX_CARDS];
+static int io[MAX_CARDS];
+static int dma[MAX_CARDS];
+static int irq[MAX_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
+MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
+MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
+MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
+
+int init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) /* only complain once */
+ break;
+ printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
+ return -EPERM;
+ }
+ dev = alloc_etherdev(0);
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->dma = dma[this_dev];
+ if (do_lance_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_lance[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ break;
+ }
+ if (found != 0)
+ return 0;
+ return -ENXIO;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_lance[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
+ board probes now that kmalloc() can allocate ISA DMA-able regions.
+ This also allows the LANCE driver to be used as a module.
+ */
+static int __init do_lance_probe(struct net_device *dev)
+{
+ int *port, result;
+
+ if (high_memory <= phys_to_virt(16*1024*1024))
+ lance_need_isa_bounce_buffers = 0;
+
+ for (port = lance_portlist; *port; port++) {
+ int ioaddr = *port;
+ struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
+ "lance-probe");
+
+ if (r) {
+ /* Detect the card with minimal I/O reads */
+ char offset14 = inb(ioaddr + 14);
+ int card;
+ for (card = 0; card < NUM_CARDS; ++card)
+ if (cards[card].id_offset14 == offset14)
+ break;
+ if (card < NUM_CARDS) {/*yes, the first byte matches*/
+ char offset15 = inb(ioaddr + 15);
+ for (card = 0; card < NUM_CARDS; ++card)
+ if ((cards[card].id_offset14 == offset14) &&
+ (cards[card].id_offset15 == offset15))
+ break;
+ }
+ if (card < NUM_CARDS) { /*Signature OK*/
+ result = lance_probe1(dev, ioaddr, 0, 0);
+ if (!result) {
+ struct lance_private *lp = dev->priv;
+ int ver = lp->chip_version;
+
+ r->name = chip_table[ver].name;
+ return 0;
+ }
+ }
+ release_region(ioaddr, LANCE_TOTAL_SIZE);
+ }
+ }
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init lance_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_lance_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
+{
+ struct lance_private *lp;
+ long dma_channels; /* Mark spuriously-busy DMA channels */
+ int i, reset_val, lance_version;
+ const char *chipname;
+ /* Flags for specific chips or boards. */
+ unsigned char hpJ2405A = 0; /* HP ISA adaptor */
+ int hp_builtin = 0; /* HP on-board ethernet. */
+ static int did_version; /* Already printed version info. */
+ unsigned long flags;
+ int err = -ENOMEM;
+
+ /* First we look for special cases.
+ Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
+ There are two HP versions, check the BIOS for the configuration port.
+ This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
+ */
+ if (isa_readw(0x000f0102) == 0x5048) {
+ static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
+ int hp_port = (isa_readl(0x000f00f1) & 1) ? 0x499 : 0x99;
+ /* We can have boards other than the built-in! Verify this is on-board. */
+ if ((inb(hp_port) & 0xc0) == 0x80
+ && ioaddr_table[inb(hp_port) & 3] == ioaddr)
+ hp_builtin = hp_port;
+ }
+ /* We also recognize the HP Vectra on-board here, but check below. */
+ hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
+ && inb(ioaddr+2) == 0x09);
+
+ /* Reset the LANCE. */
+ reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
+
+ /* The Un-Reset needed is only needed for the real NE2100, and will
+ confuse the HP board. */
+ if (!hpJ2405A)
+ outw(reset_val, ioaddr+LANCE_RESET);
+
+ outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+LANCE_DATA) != 0x0004)
+ return -ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+LANCE_ADDR);
+ if (inw(ioaddr+LANCE_ADDR) != 88) {
+ lance_version = 0;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+LANCE_DATA);
+ outw(89, ioaddr+LANCE_ADDR);
+ chip_version |= inw(ioaddr+LANCE_DATA) << 16;
+ if (lance_debug > 2)
+ printk(" LANCE chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return -ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
+ if (chip_table[lance_version].id_number == chip_version)
+ break;
+ }
+ }
+
+ /* We can't allocate dev->priv from alloc_etherdev() because it must
+ a ISA DMA-able region. */
+ SET_MODULE_OWNER(dev);
+ chipname = chip_table[lance_version].name;
+ printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ dev->base_addr = ioaddr;
+ /* Make certain the data structures used by the LANCE are aligned and DMAble. */
+
+ lp = kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+ if(lp==NULL)
+ return -ENODEV;
+ if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
+ memset(lp, 0, sizeof(*lp));
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->rx_buffs)
+ goto out_lp;
+ if (lance_need_isa_bounce_buffers) {
+ lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->tx_bounce_buffs)
+ goto out_rx;
+ } else
+ lp->tx_bounce_buffs = NULL;
+
+ lp->chip_version = lance_version;
+ spin_lock_init(&lp->devlock);
+
+ lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+
+ if (irq) { /* Set iff PCI card. */
+ dev->dma = 4; /* Native bus-master, no DMA channel needed. */
+ dev->irq = irq;
+ } else if (hp_builtin) {
+ static const char dma_tbl[4] = {3, 5, 6, 0};
+ static const char irq_tbl[4] = {3, 4, 5, 9};
+ unsigned char port_val = inb(hp_builtin);
+ dev->dma = dma_tbl[(port_val >> 4) & 3];
+ dev->irq = irq_tbl[(port_val >> 2) & 3];
+ printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (hpJ2405A) {
+ static const char dma_tbl[4] = {3, 5, 6, 7};
+ static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
+ short reset_val = inw(ioaddr+LANCE_RESET);
+ dev->dma = dma_tbl[(reset_val >> 2) & 3];
+ dev->irq = irq_tbl[(reset_val >> 4) & 7];
+ printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
+ short bus_info;
+ outw(8, ioaddr+LANCE_ADDR);
+ bus_info = inw(ioaddr+LANCE_BUS_IF);
+ dev->dma = bus_info & 0x07;
+ dev->irq = (bus_info >> 4) & 0x0F;
+ } else {
+ /* The DMA channel may be passed in PARAM1. */
+ if (dev->mem_start & 0x07)
+ dev->dma = dev->mem_start & 0x07;
+ }
+
+ if (dev->dma == 0) {
+ /* Read the DMA channel status register, so that we can avoid
+ stuck DMA channels in the DMA detection below. */
+ dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ }
+ err = -ENODEV;
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d", dev->irq);
+ else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
+ unsigned long irq_mask;
+
+ /* To auto-IRQ we enable the initialization-done and DMA error
+ interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ boards will work. */
+ irq_mask = probe_irq_on();
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq)
+ printk(", probed IRQ %d", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ goto out_tx;
+ }
+
+ /* Check for the initialization done bit, 0x0100, which means
+ that we don't need a DMA channel. */
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ dev->dma = 4;
+ }
+
+ if (dev->dma == 4) {
+ printk(", no DMA needed.\n");
+ } else if (dev->dma) {
+ if (request_dma(dev->dma, chipname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ goto out_tx;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else { /* OK, we have to auto-DMA. */
+ for (i = 0; i < 4; i++) {
+ static const char dmas[] = { 5, 6, 7, 3 };
+ int dma = dmas[i];
+ int boguscnt;
+
+ /* Don't enable a permanently busy DMA channel, or the machine
+ will hang. */
+ if (test_bit(dma, &dma_channels))
+ continue;
+ outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
+ if (request_dma(dma, chipname))
+ continue;
+
+ flags=claim_dma_lock();
+ set_dma_mode(dma, DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ /* Trigger an initialization. */
+ outw(0x0001, ioaddr+LANCE_DATA);
+ for (boguscnt = 100; boguscnt > 0; --boguscnt)
+ if (inw(ioaddr+LANCE_DATA) & 0x0900)
+ break;
+ if (inw(ioaddr+LANCE_DATA) & 0x0100) {
+ dev->dma = dma;
+ printk(", DMA %d.\n", dev->dma);
+ break;
+ } else {
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ release_dma_lock(flags);
+ free_dma(dma);
+ }
+ }
+ if (i == 4) { /* Failure: bail. */
+ printk("DMA detection failed.\n");
+ goto out_tx;
+ }
+ }
+
+ if (lance_version == 0 && dev->irq == 0) {
+ /* We may auto-IRQ now that we have a DMA channel. */
+ /* Trigger an initialization just for the interrupt. */
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(40);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq == 0) {
+ printk(" Failed to detect the 7990 IRQ line.\n");
+ goto out_dma;
+ }
+ printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
+ }
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* Turn on auto-select of media (10baseT or BNC) so that the user
+ can watch the LEDs even if the board isn't opened. */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Don't touch 10base2 power bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->open = lance_open;
+ dev->hard_start_xmit = lance_start_xmit;
+ dev->stop = lance_close;
+ dev->get_stats = lance_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->tx_timeout = lance_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 0;
+out_dma:
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+out_tx:
+ kfree(lp->tx_bounce_buffs);
+out_rx:
+ kfree((void*)lp->rx_buffs);
+out_lp:
+ kfree(lp);
+ return err;
+}
+
+
+static int
+lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
+ return -EAGAIN;
+ }
+
+ /* We used to allocate DMA here, but that was silly.
+ DMA lines can't be shared! We now permanently allocate them. */
+
+ /* Reset the LANCE */
+ inw(ioaddr+LANCE_RESET);
+
+ /* The DMA controller is used as a no-operation slave, "cascade mode". */
+ if (dev->dma != 4) {
+ unsigned long flags=claim_dma_lock();
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ release_dma_lock(flags);
+ }
+
+ /* Un-Reset the LANCE, needed only for the NE2100. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
+ outw(0, ioaddr+LANCE_RESET);
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Only touch autoselect bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 1)
+ printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, dev->dma,
+ (u32) isa_virt_to_bus(lp->tx_ring),
+ (u32) isa_virt_to_bus(lp->rx_ring),
+ (u32) isa_virt_to_bus(&lp->init_block));
+
+ lance_init_ring(dev, GFP_KERNEL);
+ /* Re-initialize the LANCE, and start it when done. */
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+
+ outw(0x0004, ioaddr+LANCE_ADDR);
+ outw(0x0915, ioaddr+LANCE_DATA);
+
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0001, ioaddr+LANCE_DATA);
+
+ netif_start_queue (dev);
+
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+LANCE_DATA);
+
+ if (lance_debug > 2)
+ printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/* The LANCE has been halted for one reason or another (busmaster memory
+ arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ etc.). Modern LANCE variants always reload their ring-buffer
+ configuration when restarted, so we must reinitialize our ring
+ context before restarting. As part of this reinitialization,
+ find all packets still on the Tx ring and pretend that they had been
+ sent (in effect, drop the packets on the floor) - the higher-level
+ protocols will time out and retransmit. It'd be better to shuffle
+ these skbs to a temp list and then actually re-Tx them after
+ restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+*/
+
+static void
+lance_purge_ring(struct net_device *dev)
+{
+ struct lance_private *lp = dev->priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = lp->rx_skbuff[i];
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+static void
+lance_init_ring(struct net_device *dev, int gfp)
+{
+ struct lance_private *lp = dev->priv;
+ int i;
+
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ void *rx_buff;
+
+ skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
+ lp->rx_skbuff[i] = skb;
+ if (skb) {
+ skb->dev = dev;
+ rx_buff = skb->tail;
+ } else
+ rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
+ if (rx_buff == NULL)
+ lp->rx_ring[i].base = 0;
+ else
+ lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
+ lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_ring[i].base = 0;
+ }
+
+ lp->init_block.mode = 0x0000;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+}
+
+static void
+lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ struct lance_private *lp = dev->priv;
+
+ if (must_reinit ||
+ (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
+ lance_purge_ring(dev);
+ lance_init_ring(dev, GFP_ATOMIC);
+ }
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(csr0_bits, dev->base_addr + LANCE_DATA);
+}
+
+
+static void lance_tx_timeout (struct net_device *dev)
+{
+ struct lance_private *lp = (struct lance_private *) dev->priv;
+ int ioaddr = dev->base_addr;
+
+ outw (0, ioaddr + LANCE_ADDR);
+ printk ("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw (ioaddr + LANCE_DATA));
+ outw (0x0004, ioaddr + LANCE_DATA);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ if (lance_debug > 3) {
+ int i;
+ printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc);
+ printk ("\n");
+ }
+#endif
+ lance_restart (dev, 0x0043, 1);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+
+ if (lance_debug > 3) {
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+LANCE_DATA));
+ outw(0x0000, ioaddr+LANCE_DATA);
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ goto out;
+ lp->tx_ring[entry].length = -ETH_ZLEN;
+ }
+ else
+ lp->tx_ring[entry].length = -skb->len;
+ } else
+ lp->tx_ring[entry].length = -skb->len;
+
+ lp->tx_ring[entry].misc = 0x0000;
+
+ lp->stats.tx_bytes += skb->len;
+
+ /* If any part of this buffer is >16M we must copy it to a low-memory
+ buffer. */
+ if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
+ if (lance_debug > 5)
+ printk("%s: bouncing a high-memory packet (%#x).\n",
+ dev->name, (u32)isa_virt_to_bus(skb->data));
+ memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+ dev_kfree_skb(skb);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+ }
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0048, ioaddr+LANCE_DATA);
+
+ dev->trans_start = jiffies;
+
+ if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
+ netif_stop_queue(dev);
+
+out:
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ return 0;
+}
+
+/* The LANCE interrupt handler. */
+static irqreturn_t
+lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ int csr0, ioaddr, boguscnt=10;
+ int must_restart;
+
+ if (dev == NULL) {
+ printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = dev->priv;
+
+ spin_lock (&lp->devlock);
+
+ outw(0x00, dev->base_addr + LANCE_ADDR);
+ while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
+ && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
+
+ must_restart = 0;
+
+ if (lance_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ lance_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = lp->tx_ring[entry].base;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x40000000) {
+ /* There was an major error, log it. */
+ int err_status = lp->tx_ring[entry].misc;
+ lp->stats.tx_errors++;
+ if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x1000) lp->stats.tx_window_errors++;
+ if (err_status & 0x4000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x18000000)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb if it's not a data-only copy
+ in the bounce buffer. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
+ dirty_tx, lp->cur_tx,
+ netif_queue_stopped(dev) ? "yes" : "no");
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ /* if the ring is no longer full, accept more packets */
+ if (netif_queue_stopped(dev) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
+ netif_wake_queue (dev);
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x0004, dev->base_addr + LANCE_DATA);
+ lance_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x7940, dev->base_addr + LANCE_DATA);
+
+ if (lance_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + LANCE_ADDR),
+ inw(dev->base_addr + LANCE_DATA));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+static int
+lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].base >= 0) {
+ int status = lp->rx_ring[entry].base >> 24;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].base &= 0x03ffffff;
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net3. */
+ short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len<60)
+ {
+ printk("%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ }
+ else
+ {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].base |= 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
+ pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes+=pkt_len;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
+ lp->rx_ring[entry].base |= 0x80000000;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+lance_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct lance_private *lp = dev->priv;
+
+ netif_stop_queue (dev);
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ }
+ outw(0, ioaddr+LANCE_ADDR);
+
+ if (lance_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+LANCE_DATA);
+
+ if (dev->dma != 4)
+ {
+ unsigned long flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ }
+ free_irq(dev->irq, dev);
+
+ lance_purge_ring(dev);
+
+ return 0;
+}
+
+static struct net_device_stats *lance_get_stats(struct net_device *dev)
+{
+ struct lance_private *lp = dev->priv;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ short ioaddr = dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+ saved_addr = inw(ioaddr+LANCE_ADDR);
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ outw(saved_addr, ioaddr+LANCE_ADDR);
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ }
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outw(0, ioaddr+LANCE_ADDR);
+ outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int i;
+ int num_addrs=dev->mc_count;
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ outw(8 + i, ioaddr+LANCE_ADDR);
+ outw(multicast_table[i], ioaddr+LANCE_DATA);
+ }
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
+ }
+
+ lance_restart(dev, 0x0142, 0); /* Resume normal operation */
+
+}
+
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
new file mode 100644
index 000000000000..5e263fcba669
--- /dev/null
+++ b/drivers/net/lasi_82596.c
@@ -0,0 +1,1607 @@
+/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
+ munged into HPPA boxen .
+
+ This driver is based upon 82596.c, original credits are below...
+ but there were too many hoops which HP wants jumped through to
+ keep this code in there in a sane manner.
+
+ 3 primary sources of the mess --
+ 1) hppa needs *lots* of cacheline flushing to keep this kind of
+ MMIO running.
+
+ 2) The 82596 needs to see all of its pointers as their physical
+ address. Thus virt_to_bus/bus_to_virt are *everywhere*.
+
+ 3) The implementation HP is using seems to be significantly pickier
+ about when and how the command and RX units are started. some
+ command ordering was changed.
+
+ Examination of the mach driver leads one to believe that there
+ might be a saner way to pull this off... anyone who feels like a
+ full rewrite can be my guest.
+
+ Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
+
+ 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
+ 03/02/2000 changes for better/correct(?) cache-flushing (deller)
+*/
+
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performace test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/pdc.h>
+#include <asm/cache.h>
+#include <asm/parisc-device.h>
+
+#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
+
+/* DEBUG flags
+ */
+
+#define DEB_INIT 0x0001
+#define DEB_PROBE 0x0002
+#define DEB_SERIOUS 0x0004
+#define DEB_ERRORS 0x0008
+#define DEB_MULTI 0x0010
+#define DEB_TDR 0x0020
+#define DEB_OPEN 0x0040
+#define DEB_RESET 0x0080
+#define DEB_ADDCMD 0x0100
+#define DEB_STATUS 0x0200
+#define DEB_STARTTX 0x0400
+#define DEB_RXADDR 0x0800
+#define DEB_TXADDR 0x1000
+#define DEB_RXFRAME 0x2000
+#define DEB_INTS 0x4000
+#define DEB_STRUCT 0x8000
+#define DEB_ANY 0xffff
+
+
+#define DEB(x,y) if (i596_debug & (x)) { y; }
+
+
+#define CHECK_WBACK(addr,len) \
+ do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
+
+#define CHECK_INV(addr,len) \
+ do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
+
+#define CHECK_WBACK_INV(addr,len) \
+ do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+
+
+#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
+#define PA_CPU_PORT_L_ACCESS 4
+#define PA_CHANNEL_ATTENTION 8
+
+
+/*
+ * Define various macros for Channel Attention, word swapping etc., dependent
+ * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
+ */
+
+#ifdef __BIG_ENDIAN
+#define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define ISCP_BUSY 0x00010000
+#define MACH_IS_APRICOT 0
+#else
+#define WSWAPrfd(x) ((struct i596_rfd *)(x))
+#define WSWAPrbd(x) ((struct i596_rbd *)(x))
+#define WSWAPiscp(x) ((struct i596_iscp *)(x))
+#define WSWAPscb(x) ((struct i596_scb *)(x))
+#define WSWAPcmd(x) ((struct i596_cmd *)(x))
+#define WSWAPtbd(x) ((struct i596_tbd *)(x))
+#define WSWAPchar(x) ((char *)(x))
+#define ISCP_BUSY 0x0001
+#define MACH_IS_APRICOT 1
+#endif
+
+/*
+ * The MPU_PORT command allows direct access to the 82596. With PORT access
+ * the following commands are available (p5-18). The 32-bit port command
+ * must be word-swapped with the most significant word written first.
+ * This only applies to VME boards.
+ */
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
+
+static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(i596_debug, "i");
+MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define MAX_DRIVERS 4 /* max count of drivers */
+
+#define PKT_BUF_SZ 1536
+#define MAX_MC_CNT 64
+
+#define I596_NULL ((u32)0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
+};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+#define TX_TIMEOUT 5
+
+#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
+
+
+struct i596_reg {
+ unsigned short porthi;
+ unsigned short portlo;
+ u32 ca;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ dma_addr_t next;
+ dma_addr_t data;
+ u32 cache_pad[5]; /* Total 32 bytes... */
+};
+
+/* The command structure has two 'next' pointers; v_next is the address of
+ * the next command as seen by the CPU, b_next is the address of the next
+ * command as seen by the 82596. The b_next pointer, as used by the 82596
+ * always references the status field of the next command, rather than the
+ * v_next field, because the 82596 is unaware of v_next. It may seem more
+ * logical to put v_next at the end of the structure, but we cannot do that
+ * because the 82596 expects other fields to be there, depending on command
+ * type.
+ */
+
+struct i596_cmd {
+ struct i596_cmd *v_next; /* Address from CPUs viewpoint */
+ unsigned short status;
+ unsigned short command;
+ dma_addr_t b_next; /* Address from i596 viewpoint */
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ dma_addr_t tbd;
+ unsigned short size;
+ unsigned short pad;
+ struct sk_buff *skb; /* So we can free it after tx */
+ dma_addr_t dma_addr;
+#ifdef __LP64__
+ u32 cache_pad[6]; /* Total 64 bytes... */
+#else
+ u32 cache_pad[1]; /* Total 32 bytes... */
+#endif
+};
+
+struct tdr_cmd {
+ struct i596_cmd cmd;
+ unsigned short status;
+ unsigned short pad;
+};
+
+struct mc_cmd {
+ struct i596_cmd cmd;
+ short mc_cnt;
+ char mc_addrs[MAX_MC_CNT*6];
+};
+
+struct sa_cmd {
+ struct i596_cmd cmd;
+ char eth_addr[8];
+};
+
+struct cf_cmd {
+ struct i596_cmd cmd;
+ char i596_config[16];
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ dma_addr_t b_next; /* Address from i596 viewpoint */
+ dma_addr_t rbd;
+ unsigned short count;
+ unsigned short size;
+ struct i596_rfd *v_next; /* Address from CPUs viewpoint */
+ struct i596_rfd *v_prev;
+#ifndef __LP64__
+ u32 cache_pad[2]; /* Total 32 bytes... */
+#endif
+};
+
+struct i596_rbd {
+ /* hardware data */
+ unsigned short count;
+ unsigned short zero1;
+ dma_addr_t b_next;
+ dma_addr_t b_data; /* Address from i596 viewpoint */
+ unsigned short size;
+ unsigned short zero2;
+ /* driver data */
+ struct sk_buff *skb;
+ struct i596_rbd *v_next;
+ dma_addr_t b_addr; /* This rbd addr from i596 view */
+ unsigned char *v_data; /* Address from CPUs viewpoint */
+ /* Total 32 bytes... */
+#ifdef __LP64__
+ u32 cache_pad[4];
+#endif
+};
+
+/* These values as chosen so struct i596_private fits in one page... */
+
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 16
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ dma_addr_t cmd;
+ dma_addr_t rfd;
+ u32 crc_err;
+ u32 align_err;
+ u32 resource_err;
+ u32 over_err;
+ u32 rcvdt_err;
+ u32 short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ u32 stat;
+ dma_addr_t scb;
+};
+
+struct i596_scp {
+ u32 sysbus;
+ u32 pad;
+ dma_addr_t iscp;
+};
+
+struct i596_private {
+ volatile struct i596_scp scp __attribute__((aligned(32)));
+ volatile struct i596_iscp iscp __attribute__((aligned(32)));
+ volatile struct i596_scb scb __attribute__((aligned(32)));
+ struct sa_cmd sa_cmd __attribute__((aligned(32)));
+ struct cf_cmd cf_cmd __attribute__((aligned(32)));
+ struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
+ struct mc_cmd mc_cmd __attribute__((aligned(32)));
+ struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
+ struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
+ struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
+ struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
+ u32 stat;
+ int last_restart;
+ struct i596_rfd *rfd_head;
+ struct i596_rbd *rbd_head;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ u32 last_cmd;
+ struct net_device_stats stats;
+ int next_tx_cmd;
+ int options;
+ spinlock_t lock;
+ dma_addr_t dma_addr;
+ struct device *dev;
+};
+
+static char init_setup[] =
+{
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+ 0x80, /* don't save bad frames */
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct net_device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int i596_close(struct net_device *dev);
+static struct net_device_stats *i596_get_stats(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void i596_tx_timeout (struct net_device *dev);
+static void print_eth(unsigned char *buf, char *str);
+static void set_multicast_list(struct net_device *dev);
+
+static int rx_ring_size = RX_RING_SIZE;
+static int ticks_limit = 100;
+static int max_cmd_backlog = TX_RING_SIZE-1;
+
+
+static inline void CA(struct net_device *dev)
+{
+ gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
+}
+
+
+static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
+{
+ struct i596_private *lp = dev->priv;
+
+ u32 v = (u32) (c) | (u32) (x);
+ u16 a, b;
+
+ if (lp->options & OPT_SWAP_PORT) {
+ a = v >> 16;
+ b = v & 0xffff;
+ } else {
+ a = v & 0xffff;
+ b = v >> 16;
+ }
+
+ gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
+ udelay(1);
+ gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
+}
+
+
+static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+ while (--delcnt && lp->iscp.stat) {
+ udelay(10);
+ CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+ }
+ if (!delcnt) {
+ printk("%s: %s, iscp.stat %04x, didn't clear\n",
+ dev->name, str, lp->iscp.stat);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+ while (--delcnt && lp->scb.command) {
+ udelay(10);
+ CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+ }
+ if (!delcnt) {
+ printk("%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str, lp->scb.status, lp->scb.command);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static void i596_display_data(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
+ &lp->scp, lp->scp.sysbus, lp->scp.iscp);
+ printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
+ &lp->iscp, lp->iscp.stat, lp->iscp.scb);
+ printk("scb at %p, scb.status = %04x, .command = %04x,"
+ " .cmd = %08x, .rfd = %08x\n",
+ &lp->scb, lp->scb.status, lp->scb.command,
+ lp->scb.cmd, lp->scb.rfd);
+ printk(" errors: crc %x, align %x, resource %x,"
+ " over %x, rcvdt %x, short %x\n",
+ lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
+ lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
+ cmd = lp->cmd_head;
+ while (cmd != NULL) {
+ printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
+ cmd, cmd->status, cmd->command, cmd->b_next);
+ cmd = cmd->v_next;
+ }
+ rfd = lp->rfd_head;
+ printk("rfd_head = %p\n", rfd);
+ do {
+ printk(" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
+ " count %04x\n",
+ rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
+ rfd->count);
+ rfd = rfd->v_next;
+ } while (rfd != lp->rfd_head);
+ rbd = lp->rbd_head;
+ printk("rbd_head = %p\n", rbd);
+ do {
+ printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
+ rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
+ rbd = rbd->v_next;
+ } while (rbd != lp->rbd_head);
+ CHECK_INV(lp, sizeof(struct i596_private));
+}
+
+
+#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
+static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ pcc2[0x28] = 1;
+ pcc2[0x2b] = 0x1d;
+ printk("%s: Error interrupt\n", dev->name);
+ i596_display_data(dev);
+}
+#endif
+
+#define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
+
+static inline void init_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ int i;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ /* First build the Receive Buffer Descriptor List */
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ dma_addr_t dma_addr;
+ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
+
+ if (skb == NULL)
+ panic("%s: alloc_skb() failed", __FILE__);
+ skb_reserve(skb, 2);
+ dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ skb->dev = dev;
+ rbd->v_next = rbd+1;
+ rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
+ rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
+ rbd->skb = skb;
+ rbd->v_data = skb->tail;
+ rbd->b_data = WSWAPchar(dma_addr);
+ rbd->size = PKT_BUF_SZ;
+ }
+ lp->rbd_head = lp->rbds;
+ rbd = lp->rbds + rx_ring_size - 1;
+ rbd->v_next = lp->rbds;
+ rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
+
+ /* Now build the Receive Frame Descriptor List */
+
+ for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
+ rfd->rbd = I596_NULL;
+ rfd->v_next = rfd+1;
+ rfd->v_prev = rfd-1;
+ rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
+ rfd->cmd = CMD_FLEX;
+ }
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
+ rfd = lp->rfds;
+ rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
+ rfd->v_prev = lp->rfds + rx_ring_size - 1;
+ rfd = lp->rfds + rx_ring_size - 1;
+ rfd->v_next = lp->rfds;
+ rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+
+ CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+}
+
+static inline void remove_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct i596_rbd *rbd;
+ int i;
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ if (rbd->skb == NULL)
+ break;
+ dma_unmap_single(lp->dev,
+ (dma_addr_t)WSWAPchar(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb(rbd->skb);
+ }
+}
+
+
+static void rebuild_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ int i;
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+
+ for (i = 0; i < rx_ring_size; i++) {
+ lp->rfds[i].rbd = I596_NULL;
+ lp->rfds[i].cmd = CMD_FLEX;
+ }
+ lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
+ lp->rbd_head = lp->rbds;
+ lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
+
+ CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+}
+
+
+static int init_i596_mem(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ unsigned long flags;
+
+ disable_irq(dev->irq); /* disable IRQs from LAN */
+ DEB(DEB_INIT,
+ printk("RESET 82596 port: %p (with IRQ %d disabled)\n",
+ (void*)(dev->base_addr + PA_I82596_RESET),
+ dev->irq));
+
+ gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
+ udelay(100); /* Wait 100us - seems to help */
+
+ /* change the scp address */
+
+ lp->last_cmd = jiffies;
+
+
+ lp->scp.sysbus = 0x0000006c;
+ lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
+ lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
+ lp->iscp.stat = ISCP_BUSY;
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = NULL;
+ lp->scb.cmd = I596_NULL;
+
+ DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
+
+ CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
+ CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
+
+ MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
+
+ CA(dev);
+
+ if (wait_istat(dev, lp, 1000, "initialization timed out"))
+ goto failed;
+ DEB(DEB_INIT, printk("%s: i82596 initialization successful\n", dev->name));
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+ rebuild_rx_bufs(dev);
+
+ lp->scb.command = 0;
+ CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+
+ enable_irq(dev->irq); /* enable IRQs from LAN */
+
+ DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
+ memcpy(lp->cf_cmd.i596_config, init_setup, 14);
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+
+ DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
+ memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+ lp->sa_cmd.cmd.command = CmdSASetup;
+ CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
+ i596_add_cmd(dev, &lp->sa_cmd.cmd);
+
+ DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
+ lp->tdr_cmd.cmd.command = CmdTDR;
+ CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
+ i596_add_cmd(dev, &lp->tdr_cmd.cmd);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (wait_cmd(dev, lp, 1000, "timed out waiting to issue RX_START")) {
+ spin_unlock_irqrestore (&lp->lock, flags);
+ goto failed;
+ }
+ DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
+ lp->scb.command = RX_START;
+ lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
+ CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+
+ CA(dev);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (wait_cmd(dev, lp, 1000, "RX_START not processed"))
+ goto failed;
+ DEB(DEB_INIT, printk("%s: Receive unit started OK\n", dev->name));
+
+ return 0;
+
+failed:
+ printk("%s: Failed to initialise 82596\n", dev->name);
+ MPU_PORT(dev, PORT_RESET, 0);
+ return -1;
+}
+
+
+static inline int i596_rx(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+ int frames = 0;
+
+ DEB(DEB_RXFRAME, printk("i596_rx(), rfd_head %p, rbd_head %p\n",
+ lp->rfd_head, lp->rbd_head));
+
+
+ rfd = lp->rfd_head; /* Ref next frame to check */
+
+ CHECK_INV(rfd, sizeof(struct i596_rfd));
+ while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
+ if (rfd->rbd == I596_NULL)
+ rbd = NULL;
+ else if (rfd->rbd == lp->rbd_head->b_addr) {
+ rbd = lp->rbd_head;
+ CHECK_INV(rbd, sizeof(struct i596_rbd));
+ }
+ else {
+ printk("%s: rbd chain broken!\n", dev->name);
+ /* XXX Now what? */
+ rbd = NULL;
+ }
+ DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
+ rfd, rfd->rbd, rfd->stat));
+
+ if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
+ /* a good frame */
+ int pkt_len = rbd->count & 0x3fff;
+ struct sk_buff *skb = rbd->skb;
+ int rx_in_place = 0;
+
+ DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
+ frames++;
+
+ /* Check if the packet is long enough to just accept
+ * without copying to a properly sized skbuff.
+ */
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+ dma_addr_t dma_addr;
+
+ dma_unmap_single(lp->dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
+ /* Get fresh skbuff to replace filled one. */
+ newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
+ if (newskb == NULL) {
+ skb = NULL; /* drop pkt */
+ goto memory_squeeze;
+ }
+ skb_reserve(newskb, 2);
+
+ /* Pass up the skb already on the Rx ring. */
+ skb_put(skb, pkt_len);
+ rx_in_place = 1;
+ rbd->skb = newskb;
+ newskb->dev = dev;
+ dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ rbd->v_data = newskb->tail;
+ rbd->b_data = WSWAPchar(dma_addr);
+ CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+ }
+ else
+ skb = dev_alloc_skb(pkt_len + 2);
+memory_squeeze:
+ if (skb == NULL) {
+ /* XXX tulip.c can defer packets here!! */
+ printk("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ }
+ else {
+ skb->dev = dev;
+ if (!rx_in_place) {
+ /* 16 byte align the data fields */
+ dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
+ dma_sync_single_for_device(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
+ }
+ skb->len = pkt_len;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes+=pkt_len;
+ }
+ }
+ else {
+ DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
+ dev->name, rfd->stat));
+ lp->stats.rx_errors++;
+ if ((rfd->stat) & 0x0001)
+ lp->stats.collisions++;
+ if ((rfd->stat) & 0x0080)
+ lp->stats.rx_length_errors++;
+ if ((rfd->stat) & 0x0100)
+ lp->stats.rx_over_errors++;
+ if ((rfd->stat) & 0x0200)
+ lp->stats.rx_fifo_errors++;
+ if ((rfd->stat) & 0x0400)
+ lp->stats.rx_frame_errors++;
+ if ((rfd->stat) & 0x0800)
+ lp->stats.rx_crc_errors++;
+ if ((rfd->stat) & 0x1000)
+ lp->stats.rx_length_errors++;
+ }
+
+ /* Clear the buffer descriptor count and EOF + F flags */
+
+ if (rbd != NULL && (rbd->count & 0x4000)) {
+ rbd->count = 0;
+ lp->rbd_head = rbd->v_next;
+ CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+ }
+
+ /* Tidy the frame descriptor, marking it as end of list */
+
+ rfd->rbd = I596_NULL;
+ rfd->stat = 0;
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+ rfd->count = 0;
+
+ /* Remove end-of-list from old end descriptor */
+
+ rfd->v_prev->cmd = CMD_FLEX;
+
+ /* Update record of next frame descriptor to process */
+
+ lp->scb.rfd = rfd->b_next;
+ lp->rfd_head = rfd->v_next;
+ CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
+ CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
+ rfd = lp->rfd_head;
+ CHECK_INV(rfd, sizeof(struct i596_rfd));
+ }
+
+ DEB(DEB_RXFRAME, printk("frames %d\n", frames));
+
+ return 0;
+}
+
+
+static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+
+ while (lp->cmd_head != NULL) {
+ ptr = lp->cmd_head;
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+ dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
+
+ dev_kfree_skb(skb);
+
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ tx_cmd->cmd.command = 0; /* Mark as free */
+ break;
+ }
+ default:
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ }
+ CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
+ }
+
+ wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
+ lp->scb.cmd = I596_NULL;
+ CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+}
+
+
+static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
+{
+ unsigned long flags;
+
+ DEB(DEB_RESET, printk("i596_reset\n"));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ wait_cmd(dev, lp, 100, "i596_reset timed out");
+
+ netif_stop_queue(dev);
+
+ /* FIXME: this command might cause an lpmc */
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+ CA(dev);
+
+ /* wait for shutdown */
+ wait_cmd(dev, lp, 1000, "i596_reset 2 timed out");
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ i596_cleanup_cmd(dev,lp);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ init_i596_mem(dev);
+}
+
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = dev->priv;
+ unsigned long flags;
+
+ DEB(DEB_ADDCMD, printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL | CMD_INTR);
+ cmd->v_next = NULL;
+ cmd->b_next = I596_NULL;
+ CHECK_WBACK(cmd, sizeof(struct i596_cmd));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (lp->cmd_head != NULL) {
+ lp->cmd_tail->v_next = cmd;
+ lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
+ CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
+ } else {
+ lp->cmd_head = cmd;
+ wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
+ lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
+ lp->scb.command = CUC_START;
+ CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+ CA(dev);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (lp->cmd_backlog > max_cmd_backlog) {
+ unsigned long tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < ticks_limit)
+ return;
+
+ printk("%s: command unit timed out, status resetting.\n", dev->name);
+#if 1
+ i596_reset(dev, lp);
+#endif
+ }
+}
+
+#if 0
+/* this function makes a perfectly adequate probe... but we have a
+ device list */
+static int i596_test(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ volatile int *tint;
+ u32 data;
+
+ tint = (volatile int *)(&(lp->scp));
+ data = virt_to_dma(lp,tint);
+
+ tint[1] = -1;
+ CHECK_WBACK(tint,PAGE_SIZE);
+
+ MPU_PORT(dev, 1, data);
+
+ for(data = 1000000; data; data--) {
+ CHECK_INV(tint,PAGE_SIZE);
+ if(tint[1] != -1)
+ break;
+
+ }
+
+ printk("i596_test result %d\n", tint[1]);
+
+}
+#endif
+
+
+static int i596_open(struct net_device *dev)
+{
+ DEB(DEB_OPEN, printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+ if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
+ printk("%s: IRQ %d not free\n", dev->name, dev->irq);
+ goto out;
+ }
+
+ init_rx_bufs(dev);
+
+ if (init_i596_mem(dev)) {
+ printk("%s: Failed to init memory\n", dev->name);
+ goto out_remove_rx_bufs;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+
+out_remove_rx_bufs:
+ remove_rx_bufs(dev);
+ free_irq(dev->irq, dev);
+out:
+ return -EAGAIN;
+}
+
+static void i596_tx_timeout (struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+
+ /* Transmitter timeout, serious problems. */
+ DEB(DEB_ERRORS, printk("%s: transmit timed out, status resetting.\n",
+ dev->name));
+
+ lp->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ DEB(DEB_ERRORS, printk("Resetting board.\n"));
+ /* Shutdown and restart */
+ i596_reset (dev, lp);
+ } else {
+ /* Issue a channel attention signal */
+ DEB(DEB_ERRORS, printk("Kicking board.\n"));
+ lp->scb.command = CUC_START | RX_START;
+ CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
+ CA (dev);
+ lp->last_restart = lp->stats.tx_packets;
+ }
+
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tbd;
+ short length = skb->len;
+ dev->trans_start = jiffies;
+
+ DEB(DEB_STARTTX, printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
+ skb->len, skb->data));
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
+ netif_stop_queue(dev);
+
+ tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
+ tbd = lp->tbds + lp->next_tx_cmd;
+
+ if (tx_cmd->cmd.command) {
+ DEB(DEB_ERRORS, printk("%s: xmit ring full, dropping packet.\n",
+ dev->name));
+ lp->stats.tx_dropped++;
+
+ dev_kfree_skb(skb);
+ } else {
+ if (++lp->next_tx_cmd == TX_RING_SIZE)
+ lp->next_tx_cmd = 0;
+ tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
+ tbd->next = I596_NULL;
+
+ tx_cmd->cmd.command = CMD_FLEX | CmdTx;
+ tx_cmd->skb = skb;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tbd->pad = 0;
+ tbd->size = EOF | length;
+
+ tx_cmd->dma_addr = dma_map_single(lp->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ tbd->data = WSWAPchar(tx_cmd->dma_addr);
+
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
+ CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
+ CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
+ i596_add_cmd(dev, &tx_cmd->cmd);
+
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += length;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void print_eth(unsigned char *add, char *str)
+{
+ int i;
+
+ printk("i596 0x%p, ", add);
+ for (i = 0; i < 6; i++)
+ printk(" %02X", add[i + 6]);
+ printk(" -->");
+ for (i = 0; i < 6; i++)
+ printk(" %02X", add[i]);
+ printk(" %02X%02X, %s\n", add[12], add[13], str);
+}
+
+
+#define LAN_PROM_ADDR 0xF0810000
+
+static int __devinit i82596_probe(struct net_device *dev,
+ struct device *gen_dev)
+{
+ int i;
+ struct i596_private *lp;
+ char eth_addr[6];
+ dma_addr_t dma_addr;
+
+ /* This lot is ensure things have been cache line aligned. */
+ if (sizeof(struct i596_rfd) != 32) {
+ printk("82596: sizeof(struct i596_rfd) = %d\n",
+ (int)sizeof(struct i596_rfd));
+ return -ENODEV;
+ }
+ if ((sizeof(struct i596_rbd) % 32) != 0) {
+ printk("82596: sizeof(struct i596_rbd) = %d\n",
+ (int)sizeof(struct i596_rbd));
+ return -ENODEV;
+ }
+ if ((sizeof(struct tx_cmd) % 32) != 0) {
+ printk("82596: sizeof(struct tx_cmd) = %d\n",
+ (int)sizeof(struct tx_cmd));
+ return -ENODEV;
+ }
+ if (sizeof(struct i596_tbd) != 32) {
+ printk("82596: sizeof(struct i596_tbd) = %d\n",
+ (int)sizeof(struct i596_tbd));
+ return -ENODEV;
+ }
+#ifndef __LP64__
+ if (sizeof(struct i596_private) > 4096) {
+ printk("82596: sizeof(struct i596_private) = %d\n",
+ (int)sizeof(struct i596_private));
+ return -ENODEV;
+ }
+#endif
+
+ if (!dev->base_addr || !dev->irq)
+ return -ENODEV;
+
+ if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
+ for (i=0; i < 6; i++) {
+ eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+ }
+ printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
+ }
+
+ dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
+ sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
+ if (!dev->mem_start) {
+ printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = eth_addr[i];
+
+ /* The 82596-specific entries in the device structure. */
+ dev->open = i596_open;
+ dev->stop = i596_close;
+ dev->hard_start_xmit = i596_start_xmit;
+ dev->get_stats = i596_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->tx_timeout = i596_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->priv = (void *)(dev->mem_start);
+
+ lp = dev->priv;
+ memset(lp, 0, sizeof(struct i596_private));
+
+ lp->scb.command = 0;
+ lp->scb.cmd = I596_NULL;
+ lp->scb.rfd = I596_NULL;
+ spin_lock_init(&lp->lock);
+ lp->dma_addr = dma_addr;
+ lp->dev = gen_dev;
+
+ CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
+
+ i = register_netdev(dev);
+ if (i) {
+ lp = dev->priv;
+ dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
+ (void *)dev->mem_start, lp->dma_addr);
+ return i;
+ };
+
+ DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
+ for (i = 0; i < 6; i++)
+ DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
+ DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
+ DEB(DEB_INIT, printk(KERN_INFO "%s: lp at 0x%p (%d bytes), lp->scb at 0x%p\n",
+ dev->name, lp, (int)sizeof(struct i596_private), &lp->scb));
+
+ return 0;
+}
+
+
+static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct i596_private *lp;
+ unsigned short status, ack_cmd = 0;
+
+ if (dev == NULL) {
+ printk("%s: irq %d for unknown device.\n", __FUNCTION__, irq);
+ return IRQ_NONE;
+ }
+
+ lp = dev->priv;
+
+ spin_lock (&lp->lock);
+
+ wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
+ status = lp->scb.status;
+
+ DEB(DEB_INTS, printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ dev->name, irq, status));
+
+ ack_cmd = status & 0xf000;
+
+ if (!ack_cmd) {
+ DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
+ spin_unlock (&lp->lock);
+ return IRQ_NONE;
+ }
+
+ if ((status & 0x8000) || (status & 0x2000)) {
+ struct i596_cmd *ptr;
+
+ if ((status & 0x8000))
+ DEB(DEB_INTS, printk("%s: i596 interrupt completed command.\n", dev->name));
+ if ((status & 0x2000))
+ DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
+
+ while (lp->cmd_head != NULL) {
+ CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
+ if (!(lp->cmd_head->status & STAT_C))
+ break;
+
+ ptr = lp->cmd_head;
+
+ DEB(DEB_STATUS, printk("cmd_head->status = %04x, ->command = %04x\n",
+ lp->cmd_head->status, lp->cmd_head->command));
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ if ((ptr->status) & STAT_OK) {
+ DEB(DEB_TXADDR, print_eth(skb->data, "tx-done"));
+ } else {
+ lp->stats.tx_errors++;
+ if ((ptr->status) & 0x0020)
+ lp->stats.collisions++;
+ if (!((ptr->status) & 0x0040))
+ lp->stats.tx_heartbeat_errors++;
+ if ((ptr->status) & 0x0400)
+ lp->stats.tx_carrier_errors++;
+ if ((ptr->status) & 0x0800)
+ lp->stats.collisions++;
+ if ((ptr->status) & 0x1000)
+ lp->stats.tx_aborted_errors++;
+ }
+ dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_irq(skb);
+
+ tx_cmd->cmd.command = 0; /* Mark free */
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned short status = ((struct tdr_cmd *)ptr)->status;
+
+ if (status & 0x8000) {
+ DEB(DEB_ANY, printk("%s: link ok.\n", dev->name));
+ } else {
+ if (status & 0x4000)
+ printk("%s: Transceiver problem.\n", dev->name);
+ if (status & 0x2000)
+ printk("%s: Termination problem.\n", dev->name);
+ if (status & 0x1000)
+ printk("%s: Short circuit.\n", dev->name);
+
+ DEB(DEB_TDR, printk("%s: Time %d.\n", dev->name, status & 0x07ff));
+ }
+ break;
+ }
+ case CmdConfigure:
+ /* Zap command so set_multicast_list() knows it is free */
+ ptr->command = 0;
+ break;
+ }
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ CHECK_WBACK(ptr, sizeof(struct i596_cmd));
+ lp->last_cmd = jiffies;
+ }
+
+ /* This mess is arranging that only the last of any outstanding
+ * commands has the interrupt bit set. Should probably really
+ * only add to the cmd queue when the CU is stopped.
+ */
+ ptr = lp->cmd_head;
+ while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
+ struct i596_cmd *prev = ptr;
+
+ ptr->command &= 0x1fff;
+ ptr = ptr->v_next;
+ CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
+ }
+
+ if ((lp->cmd_head != NULL))
+ ack_cmd |= CUC_START;
+ lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
+ CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
+ }
+ if ((status & 0x1000) || (status & 0x4000)) {
+ if ((status & 0x4000))
+ DEB(DEB_INTS, printk("%s: i596 interrupt received a frame.\n", dev->name));
+ i596_rx(dev);
+ /* Only RX_START if stopped - RGH 07-07-96 */
+ if (status & 0x1000) {
+ if (netif_running(dev)) {
+ DEB(DEB_ERRORS, printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
+ ack_cmd |= RX_START;
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ rebuild_rx_bufs(dev);
+ }
+ }
+ }
+ wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
+ lp->scb.command = ack_cmd;
+ CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+
+ /* DANGER: I suspect that some kind of interrupt
+ acknowledgement aside from acking the 82596 might be needed
+ here... but it's running acceptably without */
+
+ CA(dev);
+
+ wait_cmd(dev, lp, 100, "i596 interrupt, exit timeout");
+ DEB(DEB_INTS, printk("%s: exiting interrupt.\n", dev->name));
+
+ spin_unlock (&lp->lock);
+ return IRQ_HANDLED;
+}
+
+static int i596_close(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ DEB(DEB_INIT, printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status));
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ wait_cmd(dev, lp, 100, "close1 timed out");
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+
+ CA(dev);
+
+ wait_cmd(dev, lp, 100, "close2 timed out");
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DEB(DEB_STRUCT,i596_display_data(dev));
+ i596_cleanup_cmd(dev,lp);
+
+ disable_irq(dev->irq);
+
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+static struct net_device_stats *
+ i596_get_stats(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct i596_private *lp = dev->priv;
+ int config = 0, cnt;
+
+ DEB(DEB_MULTI, printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
+ dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF",
+ dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+
+ if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] |= 0x01;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] &= ~0x01;
+ config = 1;
+ }
+ if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] &= ~0x20;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] |= 0x20;
+ config = 1;
+ }
+ if (config) {
+ if (lp->cf_cmd.cmd.command)
+ printk("%s: config change request already queued\n",
+ dev->name);
+ else {
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+ }
+ }
+
+ cnt = dev->mc_count;
+ if (cnt > MAX_MC_CNT)
+ {
+ cnt = MAX_MC_CNT;
+ printk("%s: Only %d multicast addresses supported",
+ dev->name, cnt);
+ }
+
+ if (dev->mc_count > 0) {
+ struct dev_mc_list *dmi;
+ unsigned char *cp;
+ struct mc_cmd *cmd;
+
+ cmd = &lp->mc_cmd;
+ cmd->cmd.command = CmdMulticastList;
+ cmd->mc_cnt = dev->mc_count * 6;
+ cp = cmd->mc_addrs;
+ for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
+ memcpy(cp, dmi->dmi_addr, 6);
+ if (i596_debug > 1)
+ DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
+ }
+ CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
+ i596_add_cmd(dev, &cmd->cmd);
+ }
+}
+
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
+static int debug = -1;
+
+static int num_drivers;
+static struct net_device *netdevs[MAX_DRIVERS];
+
+static int __devinit
+lan_init_chip(struct parisc_device *dev)
+{
+ struct net_device *netdevice;
+ int retval;
+
+ if (num_drivers >= MAX_DRIVERS) {
+ /* max count of possible i82596 drivers reached */
+ return -ENOMEM;
+ }
+
+ if (num_drivers == 0)
+ printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
+
+ if (!dev->irq) {
+ printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
+ __FILE__, dev->hpa);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq);
+
+ netdevice = alloc_etherdev(0);
+ if (!netdevice)
+ return -ENOMEM;
+
+ netdevice->base_addr = dev->hpa;
+ netdevice->irq = dev->irq;
+
+ retval = i82596_probe(netdevice, &dev->dev);
+ if (retval) {
+ free_netdev(netdevice);
+ return -ENODEV;
+ }
+
+ if (dev->id.sversion == 0x72) {
+ ((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
+ }
+
+ netdevs[num_drivers++] = netdevice;
+
+ return retval;
+}
+
+
+static struct parisc_device_id lan_tbl[] = {
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, lan_tbl);
+
+static struct parisc_driver lan_driver = {
+ .name = "Apricot",
+ .id_table = lan_tbl,
+ .probe = lan_init_chip,
+};
+
+static int __devinit lasi_82596_init(void)
+{
+ if (debug >= 0)
+ i596_debug = debug;
+ return register_parisc_driver(&lan_driver);
+}
+
+module_init(lasi_82596_init);
+
+static void __exit lasi_82596_exit(void)
+{
+ int i;
+
+ for (i=0; i<MAX_DRIVERS; i++) {
+ struct i596_private *lp;
+ struct net_device *netdevice;
+
+ netdevice = netdevs[i];
+ if (!netdevice)
+ continue;
+
+ unregister_netdev(netdevice);
+
+ lp = netdevice->priv;
+ dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
+ (void *)netdevice->mem_start, lp->dma_addr);
+ free_netdev(netdevice);
+ }
+ num_drivers = 0;
+
+ unregister_parisc_driver(&lan_driver);
+}
+
+module_exit(lasi_82596_exit);
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
new file mode 100644
index 000000000000..179a97c0af69
--- /dev/null
+++ b/drivers/net/lne390.c
@@ -0,0 +1,458 @@
+/*
+ lne390.c
+
+ Linux driver for Mylex LNE390 EISA Network Adapter
+
+ Copyright (C) 1996-1998, Paul Gortmaker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Information and Code Sources:
+
+ 1) Based upon framework of es3210 driver.
+ 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
+ 3) Russ Nelson's asm packet driver provided additional info.
+ 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
+
+ The LNE390 is an EISA shared memory NS8390 implementation. Note
+ that all memory copies to/from the board must be 32bit transfers.
+ There are two versions of the card: the lne390a and the lne390b.
+ Going by the EISA cfg files, the "a" has jumpers to select between
+ BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU.
+ The shared memory address selection is also slightly different.
+ Note that shared memory address > 1MB are supported with this driver.
+
+ You can try <http://www.mylex.com> if you want more info, as I've
+ never even seen one of these cards. :)
+
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/01
+ - get rid of check_region
+ - no need to check if dev == NULL in lne390_probe1
+*/
+
+static const char *version =
+ "lne390.c: Driver revision v0.99.1, 01/09/2000\n";
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "lne390"
+
+static int lne390_probe1(struct net_device *dev, int ioaddr);
+
+static int lne390_open(struct net_device *dev);
+static int lne390_close(struct net_device *dev);
+
+static void lne390_reset_8390(struct net_device *dev);
+
+static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
+static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
+static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
+
+#define LNE390_START_PG 0x00 /* First page of TX buffer */
+#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */
+#define LNE390_IO_EXTENT 0x20
+#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */
+#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */
+#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
+
+#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */
+#define LNE390_ADDR1 0x80
+#define LNE390_ADDR2 0xe5
+
+#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */
+#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */
+
+#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
+#define LNE390_CFG2 0xc90
+
+/*
+ * You can OR any of the following bits together and assign it
+ * to LNE390_DEBUG to get verbose driver info during operation.
+ * Currently only the probe one is implemented.
+ */
+
+#define LNE390_D_PROBE 0x01
+#define LNE390_D_RX_PKT 0x02
+#define LNE390_D_TX_PKT 0x04
+#define LNE390_D_IRQ 0x08
+
+#define LNE390_DEBUG 0
+
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
+static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
+
+/*
+ * Probe for the card. The best way is to read the EISA ID if it
+ * is known. Then we can check the prefix of the station address
+ * PROM for a match against the value assigned to Mylex.
+ */
+
+static int __init do_lne390_probe(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+ int ret;
+
+ SET_MODULE_OWNER(dev);
+
+ if (ioaddr > 0x1ff) { /* Check a single specified location. */
+ if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+ ret = lne390_probe1(dev, ioaddr);
+ if (ret)
+ release_region(ioaddr, LNE390_IO_EXTENT);
+ return ret;
+ }
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ if (!EISA_bus) {
+#if LNE390_DEBUG & LNE390_D_PROBE
+ printk("lne390-debug: Not an EISA bus. Not probing high ports.\n");
+#endif
+ return -ENXIO;
+ }
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
+ continue;
+ if (lne390_probe1(dev, ioaddr) == 0)
+ return 0;
+ release_region(ioaddr, LNE390_IO_EXTENT);
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, LNE390_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+#ifndef MODULE
+struct net_device * __init lne390_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_lne390_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init lne390_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, revision, ret;
+ unsigned long eisa_id;
+
+ if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV;
+
+#if LNE390_DEBUG & LNE390_D_PROBE
+ printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT));
+ printk("lne390-debug: config regs: %#x %#x\n",
+ inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2));
+#endif
+
+
+/* Check the EISA ID of the card. */
+ eisa_id = inl(ioaddr + LNE390_ID_PORT);
+ if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) {
+ return -ENODEV;
+ }
+
+ revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */
+
+#if 0
+/* Check the Mylex vendor ID as well. Not really required. */
+ if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0
+ || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
+ || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
+ printk("lne390.c: card not found");
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
+ printk(" (invalid prefix).\n");
+ return -ENODEV;
+ }
+#endif
+
+ printk("lne390.c: LNE390%X in EISA slot %d, address", 0xa+revision, ioaddr/0x1000);
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i)));
+ printk(".\nlne390.c: ");
+
+ /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
+ if (dev->irq == 0) {
+ unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3;
+ dev->irq = irq_map[irq_reg & 0x07];
+ printk("using");
+ } else {
+ /* This is useless unless we reprogram the card here too */
+ if (dev->irq == 2) dev->irq = 9; /* Doh! */
+ printk("assigning");
+ }
+ printk(" IRQ %d,", dev->irq);
+
+ if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return ret;
+ }
+
+ if (dev->mem_start == 0) {
+ unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07;
+
+ if (revision) /* LNE390B */
+ dev->mem_start = shmem_mapB[mem_reg] * 0x10000;
+ else /* LNE390A */
+ dev->mem_start = shmem_mapA[mem_reg] * 0x10000;
+ printk(" using ");
+ } else {
+ /* Should check for value in shmem_map and reprogram the card to use it */
+ dev->mem_start &= 0xfff0000;
+ printk(" assigning ");
+ }
+
+ printk("%dkB memory at physical address %#lx\n",
+ LNE390_STOP_PG/4, dev->mem_start);
+
+ /*
+ BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
+ the card mem within the region covered by `normal' RAM !!!
+
+ ioremap() will fail in that case.
+ */
+ ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100);
+ if (!ei_status.mem) {
+ printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n");
+ printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n");
+ printk(KERN_ERR "lne390.c: Driver NOT installed.\n");
+ ret = -EAGAIN;
+ goto cleanup;
+ }
+ printk("lne390.c: remapped %dkB card memory to virtual address %p\n",
+ LNE390_STOP_PG/4, ei_status.mem);
+
+ dev->mem_start = (unsigned long)ei_status.mem;
+ dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256;
+
+ /* The 8390 offset is zero for the LNE390 */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "LNE390";
+ ei_status.tx_start_page = LNE390_START_PG;
+ ei_status.rx_start_page = LNE390_START_PG + TX_PAGES;
+ ei_status.stop_page = LNE390_STOP_PG;
+ ei_status.word16 = 1;
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &lne390_reset_8390;
+ ei_status.block_input = &lne390_block_input;
+ ei_status.block_output = &lne390_block_output;
+ ei_status.get_8390_hdr = &lne390_get_8390_hdr;
+
+ dev->open = &lne390_open;
+ dev->stop = &lne390_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+ return 0;
+cleanup:
+ free_irq(dev->irq, dev);
+ return ret;
+}
+
+/*
+ * Reset as per the packet driver method. Judging by the EISA cfg
+ * file, this just toggles the "Board Enable" bits (bit 2 and 0).
+ */
+
+static void lne390_reset_8390(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+
+ outb(0x04, ioaddr + LNE390_RESET_PORT);
+ if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name);
+
+ mdelay(2);
+
+ ei_status.txing = 0;
+ outb(0x01, ioaddr + LNE390_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+
+ return;
+}
+
+/*
+ * Note: In the following three functions is the implicit assumption
+ * that the associated memcpy will only use "rep; movsl" as long as
+ * we keep the counts as some multiple of doublewords. This is a
+ * requirement of the hardware, and also prevents us from using
+ * eth_io_copy_and_sum() since we can't guarantee it will limit
+ * itself to doubleword access.
+ */
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly. (A single doubleword.)
+ */
+
+static void
+lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+/*
+ * Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps. The count will already
+ * be rounded up to a doubleword value via lne390_get_8390_hdr() above.
+ */
+
+static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8);
+
+ if (ring_offset + count > (LNE390_STOP_PG<<8)) {
+ /* Packet wraps over end of ring buffer. */
+ int semi_count = (LNE390_STOP_PG<<8) - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count,
+ ei_status.mem + (TX_PAGES<<8), count);
+ } else {
+ /* Packet is in one chunk. */
+ memcpy_fromio(skb->data, xfer_start, count);
+ }
+}
+
+static void lne390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8);
+
+ count = (count + 3) & ~3; /* Round up to doubleword */
+ memcpy_toio(shmem, buf, count);
+}
+
+static int lne390_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int lne390_close(struct net_device *dev)
+{
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ ei_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */
+static struct net_device *dev_lne[MAX_LNE_CARDS];
+static int io[MAX_LNE_CARDS];
+static int irq[MAX_LNE_CARDS];
+static int mem[MAX_LNE_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, "memory base address(es)");
+MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
+MODULE_LICENSE("GPL");
+
+int init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
+ if (io[this_dev] == 0 && this_dev != 0)
+ break;
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ if (do_lne390_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_lne[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
+ struct net_device *dev = dev_lne[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
new file mode 100644
index 000000000000..2ffc31708d5f
--- /dev/null
+++ b/drivers/net/loopback.c
@@ -0,0 +1,233 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Pseudo-driver for the loopback interface.
+ *
+ * Version: @(#)loopback.c 1.0.4b 08/16/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@scyld.com>
+ *
+ * Alan Cox : Fixed oddments for NET3.014
+ * Alan Cox : Rejig for NET3.029 snap #3
+ * Alan Cox : Fixed NET3.029 bugs and sped up
+ * Larry McVoy : Tiny tweak to double performance
+ * Alan Cox : Backed out LMV's tweak - the linux mm
+ * can't take it...
+ * Michael Griffith: Don't bother computing the checksums
+ * on packets received on the loopback
+ * interface.
+ * Alexey Kuznetsov: Potential hang under some extreme
+ * cases removed.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <net/sock.h>
+#include <net/checksum.h>
+#include <linux/if_ether.h> /* For the statistics structure. */
+#include <linux/if_arp.h> /* For ARPHRD_ETHER */
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/percpu.h>
+
+static DEFINE_PER_CPU(struct net_device_stats, loopback_stats);
+
+#define LOOPBACK_OVERHEAD (128 + MAX_HEADER + 16 + 16)
+
+/* KISS: just allocate small chunks and copy bits.
+ *
+ * So, in fact, this is documentation, explaining what we expect
+ * of largesending device modulo TCP checksum, which is ignored for loopback.
+ */
+
+static void emulate_large_send_offload(struct sk_buff *skb)
+{
+ struct iphdr *iph = skb->nh.iph;
+ struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
+ unsigned int doffset = (iph->ihl + th->doff) * 4;
+ unsigned int mtu = skb_shinfo(skb)->tso_size + doffset;
+ unsigned int offset = 0;
+ u32 seq = ntohl(th->seq);
+ u16 id = ntohs(iph->id);
+
+ while (offset + doffset < skb->len) {
+ unsigned int frag_size = min(mtu, skb->len - offset) - doffset;
+ struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC);
+
+ if (!nskb)
+ break;
+ skb_reserve(nskb, 32);
+ nskb->mac.raw = nskb->data - 14;
+ nskb->nh.raw = nskb->data;
+ iph = nskb->nh.iph;
+ memcpy(nskb->data, skb->nh.raw, doffset);
+ if (skb_copy_bits(skb,
+ doffset + offset,
+ nskb->data + doffset,
+ frag_size))
+ BUG();
+ skb_put(nskb, doffset + frag_size);
+ nskb->ip_summed = CHECKSUM_UNNECESSARY;
+ nskb->dev = skb->dev;
+ nskb->priority = skb->priority;
+ nskb->protocol = skb->protocol;
+ nskb->dst = dst_clone(skb->dst);
+ memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+ nskb->pkt_type = skb->pkt_type;
+
+ th = (struct tcphdr*)(nskb->nh.raw + iph->ihl*4);
+ iph->tot_len = htons(frag_size + doffset);
+ iph->id = htons(id);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl);
+ th->seq = htonl(seq);
+ if (offset + doffset + frag_size < skb->len)
+ th->fin = th->psh = 0;
+ netif_rx(nskb);
+ offset += frag_size;
+ seq += frag_size;
+ id++;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+/*
+ * The higher levels take care of making this non-reentrant (it's
+ * called with bh's disabled).
+ */
+static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *lb_stats;
+
+ skb_orphan(skb);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ skb->dev=dev;
+#ifndef LOOPBACK_MUST_CHECKSUM
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+
+ if (skb_shinfo(skb)->tso_size) {
+ BUG_ON(skb->protocol != htons(ETH_P_IP));
+ BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
+
+ emulate_large_send_offload(skb);
+ return 0;
+ }
+
+ dev->last_rx = jiffies;
+
+ lb_stats = &per_cpu(loopback_stats, get_cpu());
+ lb_stats->rx_bytes += skb->len;
+ lb_stats->tx_bytes += skb->len;
+ lb_stats->rx_packets++;
+ lb_stats->tx_packets++;
+ put_cpu();
+
+ netif_rx(skb);
+
+ return(0);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct net_device_stats *stats = dev->priv;
+ int i;
+
+ if (!stats) {
+ return NULL;
+ }
+
+ memset(stats, 0, sizeof(struct net_device_stats));
+
+ for (i=0; i < NR_CPUS; i++) {
+ struct net_device_stats *lb_stats;
+
+ if (!cpu_possible(i))
+ continue;
+ lb_stats = &per_cpu(loopback_stats, i);
+ stats->rx_bytes += lb_stats->rx_bytes;
+ stats->tx_bytes += lb_stats->tx_bytes;
+ stats->rx_packets += lb_stats->rx_packets;
+ stats->tx_packets += lb_stats->tx_packets;
+ }
+
+ return stats;
+}
+
+static u32 loopback_get_link(struct net_device *dev)
+{
+ return 1;
+}
+
+static struct ethtool_ops loopback_ethtool_ops = {
+ .get_link = loopback_get_link,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+};
+
+struct net_device loopback_dev = {
+ .name = "lo",
+ .mtu = (16 * 1024) + 20 + 20 + 12,
+ .hard_start_xmit = loopback_xmit,
+ .hard_header = eth_header,
+ .hard_header_cache = eth_header_cache,
+ .header_cache_update = eth_header_cache_update,
+ .hard_header_len = ETH_HLEN, /* 14 */
+ .addr_len = ETH_ALEN, /* 6 */
+ .tx_queue_len = 0,
+ .type = ARPHRD_LOOPBACK, /* 0x0001*/
+ .rebuild_header = eth_rebuild_header,
+ .flags = IFF_LOOPBACK,
+ .features = NETIF_F_SG|NETIF_F_FRAGLIST
+ |NETIF_F_NO_CSUM|NETIF_F_HIGHDMA
+ |NETIF_F_LLTX,
+ .ethtool_ops = &loopback_ethtool_ops,
+};
+
+/* Setup and register the of the LOOPBACK device. */
+int __init loopback_init(void)
+{
+ struct net_device_stats *stats;
+
+ /* Can survive without statistics */
+ stats = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL);
+ if (stats) {
+ memset(stats, 0, sizeof(struct net_device_stats));
+ loopback_dev.priv = stats;
+ loopback_dev.get_stats = &get_stats;
+ }
+
+ return register_netdev(&loopback_dev);
+};
+
+EXPORT_SYMBOL(loopback_dev);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
new file mode 100644
index 000000000000..6139f06d7d2b
--- /dev/null
+++ b/drivers/net/lp486e.c
@@ -0,0 +1,1352 @@
+/* Intel Professional Workstation/panther ethernet driver */
+/* lp486e.c: A panther 82596 ethernet driver for linux. */
+/*
+ History and copyrights:
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and
+ distributed according to the terms of the GNU General Public License
+ as modified by SRC, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Apricot
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+ Professional Workstation
+ Derived from apricot.c by Ard van Breemen
+ <ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org>
+
+ Credits:
+ Thanks to Murphy Software BV for letting me write this in their time.
+ Well, actually, I get payed doing this...
+ (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
+ more information on the Professional Workstation)
+
+ Present version
+ aeb@cwi.nl
+*/
+/*
+ There are currently two motherboards that I know of in the
+ professional workstation. The only one that I know is the
+ intel panther motherboard. -- ard
+*/
+/*
+The pws is equipped with an intel 82596. This is a very intelligent controller
+which runs its own micro-code. Communication with the hostprocessor is done
+through linked lists of commands and buffers in the hostprocessors memory.
+A complete description of the 82596 is available from intel. Search for
+a file called "29021806.pdf". It is a complete description of the chip itself.
+To use it for the pws some additions are needed regarding generation of
+the PORT and CA signal, and the interrupt glue needed for a pc.
+I/O map:
+PORT SIZE ACTION MEANING
+0xCB0 2 WRITE Lower 16 bits for PORT command
+0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command
+0xCB4 1 WRITE Generation of CA signal
+0xCB8 1 WRITE Clear interrupt glue
+All other communication is through memory!
+*/
+
+#define SLOW_DOWN_IO udelay(5)
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define DRV_NAME "lp486e"
+
+/* debug print flags */
+#define LOG_SRCDST 0x80000000
+#define LOG_STATINT 0x40000000
+#define LOG_STARTINT 0x20000000
+
+#define i596_debug debug
+
+static int i596_debug = 0;
+
+static const char * const medianame[] = {
+ "10baseT", "AUI",
+ "10baseT-FD", "AUI-FD",
+};
+
+#define LP486E_TOTAL_SIZE 16
+
+#define I596_NULL (0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOP = 0,
+ CmdIASetup = 1,
+ CmdConfigure = 2,
+ CmdMulticastList = 3,
+ CmdTx = 4,
+ CmdTDR = 5,
+ CmdDump = 6,
+ CmdDiagnose = 7
+};
+
+#if 0
+static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
+ "Tx", "TDR", "Dump", "Diagnose" };
+#endif
+
+/* Status word bits */
+#define STAT_CX 0x8000 /* The CU finished executing a command
+ with the Interrupt bit set */
+#define STAT_FR 0x4000 /* The RU finished receiving a frame */
+#define STAT_CNA 0x2000 /* The CU left the active state */
+#define STAT_RNR 0x1000 /* The RU left the active state */
+#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
+#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended,
+ 2: active, 3-7: unused */
+#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended,
+ 2: no resources, 4: ready,
+ 10: no resources due to no more RBDs,
+ 12: no more RBDs, other: unused */
+#define STAT_T 0x0008 /* Bus throttle timers loaded */
+#define STAT_ZERO 0x0807 /* Always zero */
+
+#if 0
+static char *CUstates[8] = {
+ "idle", "suspended", "active", 0, 0, 0, 0, 0
+};
+static char *RUstates[16] = {
+ "idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
+ 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
+};
+
+static void
+i596_out_status(int status) {
+ int bad = 0;
+ char *s;
+
+ printk("status %4.4x:", status);
+ if (status == 0xffff)
+ printk(" strange..\n");
+ else {
+ if (status & STAT_CX)
+ printk(" CU done");
+ if (status & STAT_CNA)
+ printk(" CU stopped");
+ if (status & STAT_FR)
+ printk(" got a frame");
+ if (status & STAT_RNR)
+ printk(" RU stopped");
+ if (status & STAT_T)
+ printk(" throttled");
+ if (status & STAT_ZERO)
+ bad = 1;
+ s = CUstates[(status & STAT_CUS) >> 8];
+ if (!s)
+ bad = 1;
+ else
+ printk(" CU(%s)", s);
+ s = RUstates[(status & STAT_RUS) >> 4];
+ if (!s)
+ bad = 1;
+ else
+ printk(" RU(%s)", s);
+ if (bad)
+ printk(" bad status");
+ printk("\n");
+ }
+}
+#endif
+
+/* Command word bits */
+#define ACK_CX 0x8000
+#define ACK_FR 0x4000
+#define ACK_CNA 0x2000
+#define ACK_RNR 0x1000
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+typedef u32 phys_addr;
+
+static inline phys_addr
+va_to_pa(void *x) {
+ return x ? virt_to_bus(x) : I596_NULL;
+}
+
+static inline void *
+pa_to_va(phys_addr x) {
+ return (x == I596_NULL) ? NULL : bus_to_virt(x);
+}
+
+/* status bits for cmd */
+#define CMD_STAT_C 0x8000 /* CU command complete */
+#define CMD_STAT_B 0x4000 /* CU command in progress */
+#define CMD_STAT_OK 0x2000 /* CU command completed without errors */
+#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */
+
+struct i596_cmd { /* 8 bytes */
+ unsigned short status;
+ unsigned short command;
+ phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
+ phys_addr pa_data; /* va_to_pa(char *data) */
+ struct sk_buff *skb;
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */
+ unsigned short size;
+ unsigned short pad;
+};
+
+/* status bits for rfd */
+#define RFD_STAT_C 0x8000 /* Frame reception complete */
+#define RFD_STAT_B 0x4000 /* Frame reception in progress */
+#define RFD_STAT_OK 0x2000 /* Frame received without errors */
+#define RFD_STATUS 0x1fff
+#define RFD_LENGTH_ERR 0x1000
+#define RFD_CRC_ERR 0x0800
+#define RFD_ALIGN_ERR 0x0400
+#define RFD_NOBUFS_ERR 0x0200
+#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */
+#define RFD_SHORT_FRAME_ERR 0x0080
+#define RFD_NOEOP_ERR 0x0040
+#define RFD_TRUNC_ERR 0x0020
+#define RFD_MULTICAST 0x0002 /* 0: destination had our address
+ 1: destination was broadcast/multicast */
+#define RFD_COLLISION 0x0001
+
+/* receive frame descriptor */
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */
+ phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */
+ unsigned short count;
+ unsigned short size;
+ char data[1532];
+};
+
+#define RBD_EL 0x8000
+#define RBD_P 0x4000
+#define RBD_SIZEMASK 0x3fff
+#define RBD_EOF 0x8000
+#define RBD_F 0x4000
+
+/* receive buffer descriptor */
+struct i596_rbd {
+ unsigned short size;
+ unsigned short pad;
+ phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
+ phys_addr pa_data; /* va_to_pa(char *data) */
+ phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */
+
+ /* Driver private part */
+ struct sk_buff *skb;
+};
+
+#define RX_RING_SIZE 64
+#define RX_SKBSIZE (ETH_FRAME_LEN+10)
+#define RX_RBD_SIZE 32
+
+/* System Control Block - 40 bytes */
+struct i596_scb {
+ u16 status; /* 0 */
+ u16 command; /* 2 */
+ phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */
+ phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */
+ u32 crc_err; /* 12 */
+ u32 align_err; /* 16 */
+ u32 resource_err; /* 20 */
+ u32 over_err; /* 24 */
+ u32 rcvdt_err; /* 28 */
+ u32 short_err; /* 32 */
+ u16 t_on; /* 36 */
+ u16 t_off; /* 38 */
+};
+
+/* Intermediate System Configuration Pointer - 8 bytes */
+struct i596_iscp {
+ u32 busy; /* 0 */
+ phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */
+};
+
+/* System Configuration Pointer - 12 bytes */
+struct i596_scp {
+ u32 sysbus; /* 0 */
+ u32 pad; /* 4 */
+ phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */
+};
+
+/* Selftest and dump results - needs 16-byte alignment */
+/*
+ * The size of the dump area is 304 bytes. When the dump is executed
+ * by the Port command an extra word will be appended to the dump area.
+ * The extra word is a copy of the Dump status word (containing the
+ * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump]
+ */
+struct i596_dump {
+ u16 dump[153]; /* (304 = 130h) + 2 bytes */
+};
+
+struct i596_private { /* aligned to a 16-byte boundary */
+ struct i596_scp scp; /* 0 - needs 16-byte alignment */
+ struct i596_iscp iscp; /* 12 */
+ struct i596_scb scb; /* 20 */
+ u32 dummy; /* 60 */
+ struct i596_dump dump; /* 64 - needs 16-byte alignment */
+
+ struct i596_cmd set_add;
+ char eth_addr[8]; /* directly follows set_add */
+
+ struct i596_cmd set_conf;
+ char i596_config[16]; /* directly follows set_conf */
+
+ struct i596_cmd tdr;
+ unsigned long tdr_stat; /* directly follows tdr */
+
+ int last_restart;
+ struct i596_rbd *rbd_list;
+ struct i596_rbd *rbd_tail;
+ struct i596_rfd *rx_tail;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ struct net_device_stats stats;
+ spinlock_t cmd_lock;
+};
+
+static char init_setup[14] = {
+ 0x8E, /* length 14 bytes, prefetch on */
+ 0xC8, /* default: fifo to 8, monitor off */
+ 0x40, /* default: don't save bad frames (apricot.c had 0x80) */
+ 0x2E, /* (default is 0x26)
+ No source address insertion, 8 byte preamble */
+ 0x00, /* default priority and backoff */
+ 0x60, /* default interframe spacing */
+ 0x00, /* default slot time LSB */
+ 0xf2, /* default slot time and nr of retries */
+ 0x00, /* default various bits
+ (0: promiscuous mode, 1: broadcast disable,
+ 2: encoding mode, 3: transmit on no CRS,
+ 4: no CRC insertion, 5: CRC type,
+ 6: bit stuffing, 7: padding) */
+ 0x00, /* default carrier sense and collision detect */
+ 0x40, /* default minimum frame length */
+ 0xff, /* (default is 0xff, and that is what apricot.c has;
+ elp486.c has 0xfb: Enable crc append in memory.) */
+ 0x00, /* default: not full duplex */
+ 0x7f /* (default is 0x3f) multi IA */
+};
+
+static int i596_open(struct net_device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int i596_close(struct net_device *dev);
+static struct net_device_stats *i596_get_stats(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void print_eth(char *);
+static void set_multicast_list(struct net_device *dev);
+static void i596_tx_timeout(struct net_device *dev);
+
+static int
+i596_timeout(struct net_device *dev, char *msg, int ct) {
+ struct i596_private *lp;
+ int boguscnt = ct;
+
+ lp = (struct i596_private *) dev->priv;
+ while (lp->scb.command) {
+ if (--boguscnt == 0) {
+ printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
+ dev->name, msg,
+ lp->scb.status, lp->scb.command);
+ return 1;
+ }
+ udelay(5);
+ barrier();
+ }
+ return 0;
+}
+
+static inline int
+init_rx_bufs(struct net_device *dev, int num) {
+ struct i596_private *lp;
+ struct i596_rfd *rfd;
+ int i;
+ // struct i596_rbd *rbd;
+
+ lp = (struct i596_private *) dev->priv;
+ lp->scb.pa_rfd = I596_NULL;
+
+ for (i = 0; i < num; i++) {
+ rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
+ if (rfd == NULL)
+ break;
+
+ rfd->stat = 0;
+ rfd->pa_rbd = I596_NULL;
+ rfd->count = 0;
+ rfd->size = 1532;
+ if (i == 0) {
+ rfd->cmd = CMD_EOL;
+ lp->rx_tail = rfd;
+ } else {
+ rfd->cmd = 0;
+ }
+ rfd->pa_next = lp->scb.pa_rfd;
+ lp->scb.pa_rfd = va_to_pa(rfd);
+ lp->rx_tail->pa_next = lp->scb.pa_rfd;
+ }
+
+#if 0
+ for (i = 0; i<RX_RBD_SIZE; i++) {
+ rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
+ if (rbd) {
+ rbd->pad = 0;
+ rbd->count = 0;
+ rbd->skb = dev_alloc_skb(RX_SKB_SIZE);
+ if (!rbd->skb) {
+ printk("dev_alloc_skb failed");
+ }
+ rbd->next = rfd->rbd;
+ if (i) {
+ rfd->rbd->prev = rbd;
+ rbd->size = RX_SKB_SIZE;
+ } else {
+ rbd->size = (RX_SKB_SIZE | RBD_EL);
+ lp->rbd_tail = rbd;
+ }
+
+ rfd->rbd = rbd;
+ } else {
+ printk("Could not kmalloc rbd\n");
+ }
+ }
+ lp->rbd_tail->next = rfd->rbd;
+#endif
+ return (i);
+}
+
+static inline void
+remove_rx_bufs(struct net_device *dev) {
+ struct i596_private *lp;
+ struct i596_rfd *rfd;
+
+ lp = (struct i596_private *) dev->priv;
+ lp->rx_tail->pa_next = I596_NULL;
+
+ do {
+ rfd = pa_to_va(lp->scb.pa_rfd);
+ lp->scb.pa_rfd = rfd->pa_next;
+ kfree(rfd);
+ } while (rfd != lp->rx_tail);
+
+ lp->rx_tail = NULL;
+
+#if 0
+ for (lp->rbd_list) {
+ }
+#endif
+}
+
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_DUMP 0x03 /* dump */
+
+#define IOADDR 0xcb0 /* real constant */
+#define IRQ 10 /* default IRQ - can be changed by ECU */
+
+/* The 82596 requires two 16-bit write cycles for a port command */
+static inline void
+PORT(phys_addr a, unsigned int cmd) {
+ if (a & 0xf)
+ printk("lp486e.c: PORT: address not aligned\n");
+ outw(((a & 0xffff) | cmd), IOADDR);
+ outw(((a>>16) & 0xffff), IOADDR+2);
+}
+
+static inline void
+CA(void) {
+ outb(0, IOADDR+4);
+ udelay(8);
+}
+
+static inline void
+CLEAR_INT(void) {
+ outb(0, IOADDR+8);
+}
+
+#define SIZE(x) (sizeof(x)/sizeof((x)[0]))
+
+#if 0
+/* selftest or dump */
+static void
+i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
+ struct i596_private *lp = dev->priv;
+ u16 *outp;
+ int i, m;
+
+ memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
+ outp = &(lp->dump.dump[0]);
+
+ PORT(va_to_pa(outp), portcmd);
+ mdelay(30); /* random, unmotivated */
+
+ printk("lp486e i82596 %s result:\n", cmdname);
+ for (m = SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
+ ;
+ for (i = 0; i < m; i++) {
+ printk(" %04x", lp->dump.dump[i]);
+ if (i%8 == 7)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+
+static int
+i596_scp_setup(struct net_device *dev) {
+ struct i596_private *lp = dev->priv;
+ int boguscnt;
+
+ /* Setup SCP, ISCP, SCB */
+ /*
+ * sysbus bits:
+ * only a single byte is significant - here 0x44
+ * 0x80: big endian mode (details depend on stepping)
+ * 0x40: 1
+ * 0x20: interrupt pin is active low
+ * 0x10: lock function disabled
+ * 0x08: external triggering of bus throttle timers
+ * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode
+ * 0x01: unused
+ */
+ lp->scp.sysbus = 0x00440000; /* linear mode */
+ lp->scp.pad = 0; /* must be zero */
+ lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
+
+ /*
+ * The CPU sets the ISCP to 1 before it gives the first CA()
+ */
+ lp->iscp.busy = 0x0001;
+ lp->iscp.pa_scb = va_to_pa(&(lp->scb));
+
+ lp->scb.command = 0;
+ lp->scb.status = 0;
+ lp->scb.pa_cmd = I596_NULL;
+ /* lp->scb.pa_rfd has been initialised already */
+
+ lp->last_cmd = jiffies;
+ lp->cmd_backlog = 0;
+ lp->cmd_head = NULL;
+
+ /*
+ * Reset the 82596.
+ * We need to wait 10 systemclock cycles, and
+ * 5 serial clock cycles.
+ */
+ PORT(0, PORT_RESET); /* address part ignored */
+ udelay(100);
+
+ /*
+ * Before the CA signal is asserted, the default SCP address
+ * (0x00fffff4) can be changed to a 16-byte aligned value
+ */
+ PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */
+
+ /*
+ * The initialization procedure begins when a
+ * Channel Attention signal is asserted after a reset.
+ */
+
+ CA();
+
+ /*
+ * The ISCP busy is cleared by the 82596 after the SCB address is read.
+ */
+ boguscnt = 100;
+ while (lp->iscp.busy) {
+ if (--boguscnt == 0) {
+ /* No i82596 present? */
+ printk("%s: i82596 initialization timed out\n",
+ dev->name);
+ return 1;
+ }
+ udelay(5);
+ barrier();
+ }
+ /* I find here boguscnt==100, so no delay was required. */
+
+ return 0;
+}
+
+static int
+init_i596(struct net_device *dev) {
+ struct i596_private *lp;
+
+ if (i596_scp_setup(dev))
+ return 1;
+
+ lp = (struct i596_private *) dev->priv;
+ lp->scb.command = 0;
+
+ memcpy ((void *)lp->i596_config, init_setup, 14);
+ lp->set_conf.command = CmdConfigure;
+ i596_add_cmd(dev, (void *)&lp->set_conf);
+
+ memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
+ lp->set_add.command = CmdIASetup;
+ i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
+
+ lp->tdr.command = CmdTDR;
+ i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
+
+ if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
+ return 1;
+
+ lp->scb.command = RX_START;
+ CA();
+
+ barrier();
+
+ if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
+ return 1;
+
+ return 0;
+}
+
+/* Receive a single frame */
+static inline int
+i596_rx_one(struct net_device *dev, struct i596_private *lp,
+ struct i596_rfd *rfd, int *frames) {
+
+ if (rfd->stat & RFD_STAT_OK) {
+ /* a good frame */
+ int pkt_len = (rfd->count & 0x3fff);
+ struct sk_buff *skb = dev_alloc_skb(pkt_len);
+
+ (*frames)++;
+
+ if (rfd->cmd & CMD_EOL)
+ printk("Received on EOL\n");
+
+ if (skb == NULL) {
+ printk ("%s: i596_rx Memory squeeze, "
+ "dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ return 1;
+ }
+
+ skb->dev = dev;
+ memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ } else {
+#if 0
+ printk("Frame reception error status %04x\n",
+ rfd->stat);
+#endif
+ lp->stats.rx_errors++;
+ if (rfd->stat & RFD_COLLISION)
+ lp->stats.collisions++;
+ if (rfd->stat & RFD_SHORT_FRAME_ERR)
+ lp->stats.rx_length_errors++;
+ if (rfd->stat & RFD_DMA_ERR)
+ lp->stats.rx_over_errors++;
+ if (rfd->stat & RFD_NOBUFS_ERR)
+ lp->stats.rx_fifo_errors++;
+ if (rfd->stat & RFD_ALIGN_ERR)
+ lp->stats.rx_frame_errors++;
+ if (rfd->stat & RFD_CRC_ERR)
+ lp->stats.rx_crc_errors++;
+ if (rfd->stat & RFD_LENGTH_ERR)
+ lp->stats.rx_length_errors++;
+ }
+ rfd->stat = rfd->count = 0;
+ return 0;
+}
+
+static int
+i596_rx(struct net_device *dev) {
+ struct i596_private *lp = (struct i596_private *) dev->priv;
+ struct i596_rfd *rfd;
+ int frames = 0;
+
+ while (1) {
+ rfd = pa_to_va(lp->scb.pa_rfd);
+ if (!rfd) {
+ printk(KERN_ERR "i596_rx: NULL rfd?\n");
+ return 0;
+ }
+#if 1
+ if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
+ printk("SF:%p-%04x\n", rfd, rfd->stat);
+#endif
+ if (!(rfd->stat & RFD_STAT_C))
+ break; /* next one not ready */
+ if (i596_rx_one(dev, lp, rfd, &frames))
+ break; /* out of memory */
+ rfd->cmd = CMD_EOL;
+ lp->rx_tail->cmd = 0;
+ lp->rx_tail = rfd;
+ lp->scb.pa_rfd = rfd->pa_next;
+ barrier();
+ }
+
+ return frames;
+}
+
+static void
+i596_cleanup_cmd(struct net_device *dev) {
+ struct i596_private *lp;
+ struct i596_cmd *cmd;
+
+ lp = (struct i596_private *) dev->priv;
+ while (lp->cmd_head) {
+ cmd = (struct i596_cmd *)lp->cmd_head;
+
+ lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
+ lp->cmd_backlog--;
+
+ switch ((cmd->command) & 0x7) {
+ case CmdTx: {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
+ struct i596_tbd * tx_cmd_tbd;
+ tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
+
+ dev_kfree_skb_any(tx_cmd_tbd->skb);
+
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)tx_cmd);
+ netif_wake_queue(dev);
+ break;
+ }
+ case CmdMulticastList: {
+ // unsigned short count = *((unsigned short *) (ptr + 1));
+
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)cmd);
+ break;
+ }
+ default: {
+ cmd->pa_next = I596_NULL;
+ break;
+ }
+ }
+ barrier();
+ }
+
+ if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
+ ;
+
+ lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
+}
+
+static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
+
+ if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
+ ;
+
+ netif_stop_queue(dev);
+
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA();
+ barrier();
+
+ /* wait for shutdown */
+ if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
+ ;
+
+ i596_cleanup_cmd(dev);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ /*dev_kfree_skb(skb, FREE_WRITE);*/
+ init_i596(dev);
+}
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
+ struct i596_private *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL | CMD_INTR);
+ cmd->pa_next = I596_NULL;
+
+ spin_lock_irqsave(&lp->cmd_lock, flags);
+
+ if (lp->cmd_head) {
+ lp->cmd_tail->pa_next = va_to_pa(cmd);
+ } else {
+ lp->cmd_head = cmd;
+ if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
+ ;
+ lp->scb.pa_cmd = va_to_pa(cmd);
+ lp->scb.command = CUC_START;
+ CA();
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
+ spin_unlock_irqrestore(&lp->cmd_lock, flags);
+
+ if (lp->cmd_backlog > 16) {
+ int tickssofar = jiffies - lp->last_cmd;
+ if (tickssofar < HZ/4)
+ return;
+
+ printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int i596_open(struct net_device *dev)
+{
+ int i;
+
+ i = request_irq(dev->irq, &i596_interrupt, SA_SHIRQ, dev->name, dev);
+ if (i) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return i;
+ }
+
+ if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
+ printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
+
+ if (i < 4) {
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+ netif_start_queue(dev);
+ init_i596(dev);
+ return 0; /* Always succeed */
+}
+
+static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
+ struct i596_private *lp = dev->priv;
+ struct tx_cmd *tx_cmd;
+ short length;
+
+ length = skb->len;
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
+ dev->trans_start = jiffies;
+
+ tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
+ if (tx_cmd == NULL) {
+ printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.tx_dropped++;
+ dev_kfree_skb (skb);
+ } else {
+ struct i596_tbd *tx_cmd_tbd;
+ tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
+ tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
+ tx_cmd_tbd->pa_next = I596_NULL;
+
+ tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tx_cmd_tbd->pad = 0;
+ tx_cmd_tbd->size = (EOF | length);
+
+ tx_cmd_tbd->pa_data = va_to_pa (skb->data);
+ tx_cmd_tbd->skb = skb;
+
+ if (i596_debug & LOG_SRCDST)
+ print_eth (skb->data);
+
+ i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
+
+ lp->stats.tx_packets++;
+ }
+
+ return 0;
+}
+
+static void
+i596_tx_timeout (struct net_device *dev) {
+ struct i596_private *lp = dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
+ lp->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ printk ("Resetting board.\n");
+
+ /* Shutdown and restart */
+ i596_reset (dev, lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ printk ("Kicking board.\n");
+ lp->scb.command = (CUC_START | RX_START);
+ CA();
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ netif_wake_queue(dev);
+}
+
+static void print_eth(char *add)
+{
+ int i;
+
+ printk ("Dest ");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char) add[i]);
+ printk ("\n");
+
+ printk ("Source");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char) add[i+6]);
+ printk ("\n");
+
+ printk ("type %2.2X%2.2X\n",
+ (unsigned char) add[12], (unsigned char) add[13]);
+}
+
+static int __init lp486e_probe(struct net_device *dev) {
+ struct i596_private *lp;
+ unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
+ unsigned char *bios;
+ int i, j;
+ int ret = -ENOMEM;
+ static int probed;
+
+ if (probed)
+ return -ENODEV;
+ probed++;
+
+ if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
+ printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
+ return -EBUSY;
+ }
+
+ lp = (struct i596_private *) dev->priv;
+ spin_lock_init(&lp->cmd_lock);
+
+ /*
+ * Do we really have this thing?
+ */
+ if (i596_scp_setup(dev)) {
+ ret = -ENODEV;
+ goto err_out_kfree;
+ }
+
+ dev->base_addr = IOADDR;
+ dev->irq = IRQ;
+
+
+ /*
+ * How do we find the ethernet address? I don't know.
+ * One possibility is to look at the EISA configuration area
+ * [0xe8000-0xe9fff]. This contains the ethernet address
+ * but not at a fixed address - things depend on setup options.
+ *
+ * If we find no address, or the wrong address, use
+ * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6
+ * with the value found in the BIOS setup.
+ */
+ bios = bus_to_virt(0xe8000);
+ for (j = 0; j < 0x2000; j++) {
+ if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
+ printk("%s: maybe address at BIOS 0x%x:",
+ dev->name, 0xe8000+j);
+ for (i = 0; i < 6; i++) {
+ eth_addr[i] = bios[i+j];
+ printk(" %2.2X", eth_addr[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
+ dev->name, dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
+ printk("\n");
+
+ /* The LP486E-specific entries in the device structure. */
+ dev->open = &i596_open;
+ dev->stop = &i596_close;
+ dev->hard_start_xmit = &i596_start_xmit;
+ dev->get_stats = &i596_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->watchdog_timeo = 5*HZ;
+ dev->tx_timeout = i596_tx_timeout;
+
+#if 0
+ /* selftest reports 0x320925ae - don't know what that means */
+ i596_port_do(dev, PORT_SELFTEST, "selftest");
+ i596_port_do(dev, PORT_DUMP, "dump");
+#endif
+ return 0;
+
+err_out_kfree:
+ release_region(IOADDR, LP486E_TOTAL_SIZE);
+ return ret;
+}
+
+static inline void
+i596_handle_CU_completion(struct net_device *dev,
+ struct i596_private *lp,
+ unsigned short status,
+ unsigned short *ack_cmdp) {
+ struct i596_cmd *cmd;
+ int frames_out = 0;
+ int commands_done = 0;
+ int cmd_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->cmd_lock, flags);
+ cmd = lp->cmd_head;
+
+ while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
+ cmd = lp->cmd_head;
+
+ lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
+ lp->cmd_backlog--;
+
+ commands_done++;
+ cmd_val = cmd->command & 0x7;
+#if 0
+ printk("finished CU %s command (%d)\n",
+ CUcmdnames[cmd_val], cmd_val);
+#endif
+ switch (cmd_val) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tx_cmd_tbd;
+
+ tx_cmd = (struct tx_cmd *) cmd;
+ tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
+
+ frames_out++;
+ if (cmd->status & CMD_STAT_OK) {
+ if (i596_debug)
+ print_eth(pa_to_va(tx_cmd_tbd->pa_data));
+ } else {
+ lp->stats.tx_errors++;
+ if (i596_debug)
+ printk("transmission failure:%04x\n",
+ cmd->status);
+ if (cmd->status & 0x0020)
+ lp->stats.collisions++;
+ if (!(cmd->status & 0x0040))
+ lp->stats.tx_heartbeat_errors++;
+ if (cmd->status & 0x0400)
+ lp->stats.tx_carrier_errors++;
+ if (cmd->status & 0x0800)
+ lp->stats.collisions++;
+ if (cmd->status & 0x1000)
+ lp->stats.tx_aborted_errors++;
+ }
+ dev_kfree_skb_irq(tx_cmd_tbd->skb);
+
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)tx_cmd);
+ netif_wake_queue(dev);
+ break;
+ }
+
+ case CmdMulticastList:
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)cmd);
+ break;
+
+ case CmdTDR:
+ {
+ unsigned long status = *((unsigned long *) (cmd + 1));
+ if (status & 0x8000) {
+ if (i596_debug)
+ printk("%s: link ok.\n", dev->name);
+ } else {
+ if (status & 0x4000)
+ printk("%s: Transceiver problem.\n",
+ dev->name);
+ if (status & 0x2000)
+ printk("%s: Termination problem.\n",
+ dev->name);
+ if (status & 0x1000)
+ printk("%s: Short circuit.\n",
+ dev->name);
+ printk("%s: Time %ld.\n",
+ dev->name, status & 0x07ff);
+ }
+ }
+ default:
+ cmd->pa_next = I596_NULL;
+ lp->last_cmd = jiffies;
+
+ }
+ barrier();
+ }
+
+ cmd = lp->cmd_head;
+ while (cmd && (cmd != lp->cmd_tail)) {
+ cmd->command &= 0x1fff;
+ cmd = pa_to_va(cmd->pa_next);
+ barrier();
+ }
+
+ if (lp->cmd_head)
+ *ack_cmdp |= CUC_START;
+ lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
+ spin_unlock_irqrestore(&lp->cmd_lock, flags);
+}
+
+static irqreturn_t
+i596_interrupt (int irq, void *dev_instance, struct pt_regs *regs) {
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct i596_private *lp;
+ unsigned short status, ack_cmd = 0;
+ int frames_in = 0;
+
+ lp = (struct i596_private *) dev->priv;
+
+ /*
+ * The 82596 examines the command, performs the required action,
+ * and then clears the SCB command word.
+ */
+ if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
+ ;
+
+ /*
+ * The status word indicates the status of the 82596.
+ * It is modified only by the 82596.
+ *
+ * [So, we must not clear it. I find often status 0xffff,
+ * which is not one of the values allowed by the docs.]
+ */
+ status = lp->scb.status;
+#if 0
+ if (i596_debug) {
+ printk("%s: i596 interrupt, ", dev->name);
+ i596_out_status(status);
+ }
+#endif
+ /* Impossible, but it happens - perhaps when we get
+ a receive interrupt but scb.pa_rfd is I596_NULL. */
+ if (status == 0xffff) {
+ printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
+ goto out;
+ }
+
+ ack_cmd = (status & STAT_ACK);
+
+ if (status & (STAT_CX | STAT_CNA))
+ i596_handle_CU_completion(dev, lp, status, &ack_cmd);
+
+ if (status & (STAT_FR | STAT_RNR)) {
+ /* Restart the receive unit when it got inactive somehow */
+ if ((status & STAT_RNR) && netif_running(dev))
+ ack_cmd |= RX_START;
+
+ if (status & STAT_FR) {
+ frames_in = i596_rx(dev);
+ if (!frames_in)
+ printk("receive frame reported, but no frames\n");
+ }
+ }
+
+ /* acknowledge the interrupt */
+ /*
+ if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev))
+ ack_cmd |= CUC_START;
+ */
+
+ if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
+ ;
+
+ lp->scb.command = ack_cmd;
+
+ CLEAR_INT();
+ CA();
+
+ out:
+ return IRQ_HANDLED;
+}
+
+static int i596_close(struct net_device *dev) {
+ struct i596_private *lp = dev->priv;
+
+ netif_stop_queue(dev);
+
+ if (i596_debug)
+ printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status);
+
+ lp->scb.command = (CUC_ABORT | RX_ABORT);
+ CA();
+
+ i596_cleanup_cmd(dev);
+
+ if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
+ ;
+
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+static struct net_device_stats * i596_get_stats(struct net_device *dev) {
+ struct i596_private *lp = dev->priv;
+
+ return &lp->stats;
+}
+
+/*
+* Set or clear the multicast filter for this adaptor.
+*/
+
+static void set_multicast_list(struct net_device *dev) {
+ struct i596_private *lp = dev->priv;
+ struct i596_cmd *cmd;
+
+ if (i596_debug > 1)
+ printk ("%s: set multicast list %d\n",
+ dev->name, dev->mc_count);
+
+ if (dev->mc_count > 0) {
+ struct dev_mc_list *dmi;
+ char *cp;
+ cmd = (struct i596_cmd *)kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ if (cmd == NULL) {
+ printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
+ return;
+ }
+ cmd->command = CmdMulticastList;
+ *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ cp = ((char *)(cmd + 1))+2;
+ for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
+ memcpy(cp, dmi,6);
+ cp += 6;
+ }
+ if (i596_debug & LOG_SRCDST)
+ print_eth (((char *)(cmd + 1)) + 2);
+ i596_add_cmd(dev, cmd);
+ } else {
+ if (lp->set_conf.pa_next != I596_NULL) {
+ return;
+ }
+ if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ if (dev->flags & IFF_ALLMULTI)
+ dev->flags |= IFF_PROMISC;
+ lp->i596_config[8] &= ~0x01;
+ } else {
+ lp->i596_config[8] |= 0x01;
+ }
+
+ i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
+ }
+}
+
+MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
+MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
+MODULE_LICENSE("GPL");
+
+static struct net_device *dev_lp486e;
+static int full_duplex;
+static int options;
+static int io = IOADDR;
+static int irq = IRQ;
+
+module_param(debug, int, 0);
+//module_param(max_interrupt_work, int, 0);
+//module_param(reverse_probe, int, 0);
+//module_param(rx_copybreak, int, 0);
+module_param(options, int, 0);
+module_param(full_duplex, int, 0);
+
+static int __init lp486e_init_module(void) {
+ int err;
+ struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq;
+ dev->base_addr = io;
+ err = lp486e_probe(dev);
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+ err = register_netdev(dev);
+ if (err) {
+ release_region(dev->base_addr, LP486E_TOTAL_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ dev_lp486e = dev;
+ full_duplex = 0;
+ options = 0;
+ return 0;
+}
+
+static void __exit lp486e_cleanup_module(void) {
+ unregister_netdev(dev_lp486e);
+ release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
+ free_netdev(dev_lp486e);
+}
+
+module_init(lp486e_init_module);
+module_exit(lp486e_cleanup_module);
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
new file mode 100644
index 000000000000..ce5761816a64
--- /dev/null
+++ b/drivers/net/mac8390.c
@@ -0,0 +1,757 @@
+/* mac8390.c: New driver for 8390-based Nubus (or Nubus-alike)
+ Ethernet cards on Linux */
+/* Based on the former daynaport.c driver, by Alan Cox. Some code
+ taken from or inspired by skeleton.c by Donald Becker, acenic.c by
+ Jes Sorensen, and ne2k-pci.c by Donald Becker and Paul Gortmaker.
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference. */
+
+/* 2000-02-28: support added for Dayna and Kinetics cards by
+ A.G.deWijn@phys.uu.nl */
+/* 2000-04-04: support added for Dayna2 by bart@etpmod.phys.tue.nl */
+/* 2001-04-18: support for DaynaPort E/LC-M by rayk@knightsmanor.org */
+/* 2001-05-15: support for Cabletron ported from old daynaport driver
+ * and fixed access to Sonic Sys card which masquerades as a Farallon
+ * by rayk@knightsmanor.org */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/nubus.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/hwtest.h>
+#include <asm/macints.h>
+
+#include "8390.h"
+
+#define WD_START_PG 0x00 /* First page of TX buffer */
+#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
+#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
+#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */
+
+/* Unfortunately it seems we have to hardcode these for the moment */
+/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */
+
+#define DAYNA_8390_BASE 0x80000
+#define DAYNA_8390_MEM 0x00000
+
+#define KINETICS_8390_BASE 0x80000
+#define KINETICS_8390_MEM 0x00000
+
+#define CABLETRON_8390_BASE 0x90000
+#define CABLETRON_8390_MEM 0x00000
+
+enum mac8390_type {
+ MAC8390_NONE = -1,
+ MAC8390_APPLE,
+ MAC8390_ASANTE,
+ MAC8390_FARALLON, /* Apple, Asante, and Farallon are all compatible */
+ MAC8390_CABLETRON,
+ MAC8390_DAYNA,
+ MAC8390_INTERLAN,
+ MAC8390_KINETICS,
+ MAC8390_FOCUS,
+ MAC8390_SONICSYS,
+ MAC8390_DAYNA2,
+ MAC8390_DAYNA3,
+};
+
+static const char * cardname[] = {
+ "apple",
+ "asante",
+ "farallon",
+ "cabletron",
+ "dayna",
+ "interlan",
+ "kinetics",
+ "focus",
+ "sonic systems",
+ "dayna2",
+ "dayna_lc",
+};
+
+static int word16[] = {
+ 1, /* apple */
+ 1, /* asante */
+ 1, /* farallon */
+ 1, /* cabletron */
+ 0, /* dayna */
+ 1, /* interlan */
+ 0, /* kinetics */
+ 1, /* focus (??) */
+ 1, /* sonic systems */
+ 1, /* dayna2 */
+ 1, /* dayna-lc */
+};
+
+/* on which cards do we use NuBus resources? */
+static int useresources[] = {
+ 1, /* apple */
+ 1, /* asante */
+ 1, /* farallon */
+ 0, /* cabletron */
+ 0, /* dayna */
+ 0, /* interlan */
+ 0, /* kinetics */
+ 0, /* focus (??) */
+ 1, /* sonic systems */
+ 1, /* dayna2 */
+ 1, /* dayna-lc */
+};
+
+static char version[] __initdata =
+ "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+
+extern enum mac8390_type mac8390_ident(struct nubus_dev * dev);
+extern int mac8390_memsize(unsigned long membase);
+extern int mac8390_memtest(struct net_device * dev);
+static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
+ enum mac8390_type type);
+
+static int mac8390_open(struct net_device * dev);
+static int mac8390_close(struct net_device * dev);
+static void mac8390_no_reset(struct net_device *dev);
+
+/* Sane (32-bit chunk memory read/write) - Apple/Asante/Farallon do this*/
+static void sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void sane_block_input(struct net_device * dev, int count,
+ struct sk_buff * skb, int ring_offset);
+static void sane_block_output(struct net_device * dev, int count,
+ const unsigned char * buf, const int start_page);
+
+/* dayna_memcpy to and from card */
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
+ int from, int count);
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count);
+
+/* Dayna - Dayna/Kinetics use this */
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void word_memcpy_tocard(void *tp, const void *fp, int count);
+static void word_memcpy_fromcard(void *tp, const void *fp, int count);
+
+enum mac8390_type __init mac8390_ident(struct nubus_dev * dev)
+{
+ if (dev->dr_sw == NUBUS_DRSW_ASANTE)
+ return MAC8390_ASANTE;
+ if (dev->dr_sw == NUBUS_DRSW_FARALLON)
+ return MAC8390_FARALLON;
+ if (dev->dr_sw == NUBUS_DRSW_KINETICS)
+ return MAC8390_KINETICS;
+ if (dev->dr_sw == NUBUS_DRSW_DAYNA)
+ return MAC8390_DAYNA;
+ if (dev->dr_sw == NUBUS_DRSW_DAYNA2)
+ return MAC8390_DAYNA2;
+ if (dev->dr_sw == NUBUS_DRSW_DAYNA_LC)
+ return MAC8390_DAYNA3;
+ if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ return MAC8390_CABLETRON;
+ return MAC8390_NONE;
+}
+
+int __init mac8390_memsize(unsigned long membase)
+{
+ unsigned long flags;
+ int i, j;
+
+ local_irq_save(flags);
+ /* Check up to 32K in 4K increments */
+ for (i = 0; i < 8; i++) {
+ volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000));
+
+ /* Unwriteable - we have a fully decoded card and the
+ RAM end located */
+ if (hwreg_present(m) == 0)
+ break;
+
+ /* write a distinctive byte */
+ *m = 0xA5A0 | i;
+ /* check that we read back what we wrote */
+ if (*m != (0xA5A0 | i))
+ break;
+
+ /* check for partial decode and wrap */
+ for (j = 0; j < i; j++) {
+ volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000));
+ if (*p != (0xA5A0 | j))
+ break;
+ }
+ }
+ local_irq_restore(flags);
+ /* in any case, we stopped once we tried one block too many,
+ or once we reached 32K */
+ return i * 0x1000;
+}
+
+struct net_device * __init mac8390_probe(int unit)
+{
+ struct net_device *dev;
+ volatile unsigned short *i;
+ int version_disp = 0;
+ struct nubus_dev * ndev = NULL;
+ int err = -ENODEV;
+
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+ int offset;
+ static unsigned int slots;
+
+ enum mac8390_type cardtype;
+
+ /* probably should check for Nubus instead */
+
+ if (!MACH_IS_MAC)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_ei_netdev();
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ SET_MODULE_OWNER(dev);
+
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) {
+ /* Have we seen it already? */
+ if (slots & (1<<ndev->board->slot))
+ continue;
+ slots |= 1<<ndev->board->slot;
+
+ if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE)
+ continue;
+
+ if (version_disp == 0) {
+ version_disp = 1;
+ printk(version);
+ }
+
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+ /* This is getting to be a habit */
+ dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
+
+ /* Get some Nubus info - we will trust the card's idea
+ of where its memory and registers are. */
+
+ if (nubus_get_func_dir(ndev, &dir) == -1) {
+ printk(KERN_ERR "%s: Unable to get Nubus functional"
+ " directory for slot %X!\n",
+ dev->name, ndev->board->slot);
+ continue;
+ }
+
+ /* Get the MAC address */
+ if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
+ printk(KERN_INFO "%s: Couldn't get MAC address!\n",
+ dev->name);
+ continue;
+ } else {
+ nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+ /* Some Sonic Sys cards masquerade as Farallon */
+ if (cardtype == MAC8390_FARALLON &&
+ dev->dev_addr[0] == 0x0 &&
+ dev->dev_addr[1] == 0x40 &&
+ dev->dev_addr[2] == 0x10) {
+ /* This is really Sonic Sys card */
+ cardtype = MAC8390_SONICSYS;
+ }
+ }
+
+ if (useresources[cardtype] == 1) {
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
+ printk(KERN_ERR "%s: Memory offset resource"
+ " for slot %X not found!\n",
+ dev->name, ndev->board->slot);
+ continue;
+ }
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ dev->mem_start = dev->base_addr + offset;
+ /* yes, this is how the Apple driver does it */
+ dev->base_addr = dev->mem_start + 0x10000;
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
+ printk(KERN_INFO "%s: Memory length resource"
+ " for slot %X not found"
+ ", probing\n",
+ dev->name, ndev->board->slot);
+ offset = mac8390_memsize(dev->mem_start);
+ } else {
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ }
+ dev->mem_end = dev->mem_start + offset;
+ } else {
+ switch (cardtype) {
+ case MAC8390_KINETICS:
+ case MAC8390_DAYNA: /* it's the same */
+ dev->base_addr =
+ (int)(ndev->board->slot_addr +
+ DAYNA_8390_BASE);
+ dev->mem_start =
+ (int)(ndev->board->slot_addr +
+ DAYNA_8390_MEM);
+ dev->mem_end =
+ dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_CABLETRON:
+ dev->base_addr =
+ (int)(ndev->board->slot_addr +
+ CABLETRON_8390_BASE);
+ dev->mem_start =
+ (int)(ndev->board->slot_addr +
+ CABLETRON_8390_MEM);
+ /* The base address is unreadable if 0x00
+ * has been written to the command register
+ * Reset the chip by writing E8390_NODMA +
+ * E8390_PAGE0 + E8390_STOP just to be
+ * sure
+ */
+ i = (void *)dev->base_addr;
+ *i = 0x21;
+ dev->mem_end =
+ dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+
+ default:
+ printk(KERN_ERR "Card type %s is"
+ " unsupported, sorry\n",
+ cardname[cardtype]);
+ continue;
+ }
+ }
+
+ /* Do the nasty 8390 stuff */
+ if (!mac8390_initdev(dev, ndev, cardtype))
+ break;
+ }
+
+ if (!ndev)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+ return dev;
+
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+#ifdef MODULE
+MODULE_AUTHOR("David Huggins-Daines <dhd@debian.org> and others");
+MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* overkill, of course */
+static struct net_device *dev_mac8390[15];
+int init_module(void)
+{
+ int i;
+ for (i = 0; i < 15; i++) {
+ struct net_device *dev = mac8390_probe(-1);
+ if (IS_ERR(dev))
+ break;
+ dev_mac890[i] = dev;
+ }
+ if (!i) {
+ printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int i;
+ for (i = 0; i < 15; i++) {
+ struct net_device *dev = dev_mac890[i];
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ }
+}
+
+#endif /* MODULE */
+
+static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
+ enum mac8390_type type)
+{
+ static u32 fwrd4_offsets[16]={
+ 0, 4, 8, 12,
+ 16, 20, 24, 28,
+ 32, 36, 40, 44,
+ 48, 52, 56, 60
+ };
+ static u32 back4_offsets[16]={
+ 60, 56, 52, 48,
+ 44, 40, 36, 32,
+ 28, 24, 20, 16,
+ 12, 8, 4, 0
+ };
+ static u32 fwrd2_offsets[16]={
+ 0, 2, 4, 6,
+ 8, 10, 12, 14,
+ 16, 18, 20, 22,
+ 24, 26, 28, 30
+ };
+
+ int access_bitmode;
+
+ /* Now fill in our stuff */
+ dev->open = &mac8390_open;
+ dev->stop = &mac8390_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ /* GAR, ei_status is actually a macro even though it looks global */
+ ei_status.name = cardname[type];
+ ei_status.word16 = word16[type];
+
+ /* Cabletron's TX/RX buffers are backwards */
+ if (type == MAC8390_CABLETRON) {
+ ei_status.tx_start_page = CABLETRON_TX_START_PG;
+ ei_status.rx_start_page = CABLETRON_RX_START_PG;
+ ei_status.stop_page = CABLETRON_RX_STOP_PG;
+ ei_status.rmem_start = dev->mem_start;
+ ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
+ } else {
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ ei_status.rmem_end = dev->mem_end;
+ }
+
+ /* Fill in model-specific information and functions */
+ switch(type) {
+ case MAC8390_SONICSYS:
+ /* 16 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &slow_sane_block_input;
+ ei_status.block_output = &slow_sane_block_output;
+ ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 0;
+ break;
+ case MAC8390_FARALLON:
+ case MAC8390_APPLE:
+ case MAC8390_ASANTE:
+ case MAC8390_DAYNA2:
+ case MAC8390_DAYNA3:
+ /* 32 bit card, register map is reversed */
+ /* sane */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &sane_block_input;
+ ei_status.block_output = &sane_block_output;
+ ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 1;
+ break;
+ case MAC8390_CABLETRON:
+ /* 16 bit card, register map is short forward */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &slow_sane_block_input;
+ ei_status.block_output = &slow_sane_block_output;
+ ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reg_offset = fwrd2_offsets;
+ access_bitmode = 0;
+ break;
+ case MAC8390_DAYNA:
+ case MAC8390_KINETICS:
+ /* 16 bit memory */
+ /* dayna and similar */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &dayna_block_input;
+ ei_status.block_output = &dayna_block_output;
+ ei_status.get_8390_hdr = &dayna_get_8390_hdr;
+ ei_status.reg_offset = fwrd4_offsets;
+ access_bitmode = 0;
+ break;
+ default:
+ printk(KERN_ERR "Card type %s is unsupported, sorry\n", cardname[type]);
+ return -ENODEV;
+ }
+
+ NS8390_init(dev, 0);
+
+ /* Good, done, now spit out some messages */
+ printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
+ dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
+ printk(KERN_INFO "MAC ");
+ {
+ int i;
+ for (i = 0; i < 6; i++) {
+ printk("%2.2x", dev->dev_addr[i]);
+ if (i < 5)
+ printk(":");
+ }
+ }
+ printk(" IRQ %d, shared memory at %#lx-%#lx, %d-bit access.\n",
+ dev->irq, dev->mem_start, dev->mem_end-1,
+ access_bitmode?32:16);
+ return 0;
+}
+
+static int mac8390_open(struct net_device *dev)
+{
+ ei_open(dev);
+ if (request_irq(dev->irq, ei_interrupt, 0, "8390 Ethernet", dev)) {
+ printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int mac8390_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ ei_close(dev);
+ return 0;
+}
+
+static void mac8390_no_reset(struct net_device *dev)
+{
+ ei_status.txing = 0;
+ if (ei_debug > 1)
+ printk("reset not supported\n");
+ return;
+}
+
+/* dayna_memcpy_fromio/dayna_memcpy_toio */
+/* directly from daynaport.c by Alan Cox */
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
+{
+ volatile unsigned short *ptr;
+ unsigned short *target=to;
+ from<<=1; /* word, skip overhead */
+ ptr=(unsigned short *)(dev->mem_start+from);
+ /* Leading byte? */
+ if (from&2) {
+ *((char *)target)++ = *(((char *)ptr++)-1);
+ count--;
+ }
+ while(count>=2)
+ {
+ *target++=*ptr++; /* Copy and */
+ ptr++; /* skip cruft */
+ count-=2;
+ }
+ /* Trailing byte? */
+ if(count)
+ {
+ /* Big endian */
+ unsigned short v=*ptr;
+ *((char *)target)=v>>8;
+ }
+}
+
+static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
+{
+ volatile unsigned short *ptr;
+ const unsigned short *src=from;
+ to<<=1; /* word, skip overhead */
+ ptr=(unsigned short *)(dev->mem_start+to);
+ /* Leading byte? */
+ if (to&2) { /* avoid a byte write (stomps on other data) */
+ ptr[-1] = (ptr[-1]&0xFF00)|*((unsigned char *)src)++;
+ ptr++;
+ count--;
+ }
+ while(count>=2)
+ {
+ *ptr++=*src++; /* Copy and */
+ ptr++; /* skip cruft */
+ count-=2;
+ }
+ /* Trailing byte? */
+ if(count)
+ {
+ /* Big endian */
+ unsigned short v=*src;
+ /* card doesn't like byte writes */
+ *ptr=(*ptr&0x00FF)|(v&0xFF00);
+ }
+}
+
+/* sane block input/output */
+static void sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
+ memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4);
+ /* Fix endianness */
+ hdr->count = swab16(hdr->count);
+}
+
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
+ unsigned long xfer_start = xfer_base + dev->mem_start;
+
+ if (xfer_start + count > ei_status.rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.rmem_end - xfer_start;
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count);
+ count -= semi_count;
+ memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count);
+ } else {
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count);
+ }
+}
+
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ long shmem = (start_page - WD_START_PG)<<8;
+
+ memcpy_toio((char *)dev->mem_start + shmem, buf, count);
+}
+
+/* dayna block input/output */
+static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
+
+ dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4);
+ /* Fix endianness */
+ hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8);
+}
+
+static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
+ unsigned long xfer_start = xfer_base+dev->mem_start;
+
+ /* Note the offset math is done in card memory space which is word
+ per long onto our space. */
+
+ if (xfer_start + count > ei_status.rmem_end)
+ {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.rmem_end - xfer_start;
+ dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
+ count -= semi_count;
+ dayna_memcpy_fromcard(dev, skb->data + semi_count,
+ ei_status.rmem_start - dev->mem_start,
+ count);
+ }
+ else
+ {
+ dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
+ }
+}
+
+static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ long shmem = (start_page - WD_START_PG)<<8;
+
+ dayna_memcpy_tocard(dev, shmem, buf, count);
+}
+
+/* Cabletron block I/O */
+static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
+ word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4);
+ /* Register endianism - fix here rather than 8390.c */
+ hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
+}
+
+static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
+ unsigned long xfer_start = xfer_base+dev->mem_start;
+
+ if (xfer_start + count > ei_status.rmem_end)
+ {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.rmem_end - xfer_start;
+ word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
+ xfer_base, semi_count);
+ count -= semi_count;
+ word_memcpy_fromcard(skb->data + semi_count,
+ (char *)ei_status.rmem_start, count);
+ }
+ else
+ {
+ word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
+ xfer_base, count);
+ }
+}
+
+static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ long shmem = (start_page - WD_START_PG)<<8;
+
+ word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count);
+}
+
+static void word_memcpy_tocard(void *tp, const void *fp, int count)
+{
+ volatile unsigned short *to = tp;
+ const unsigned short *from = fp;
+
+ count++;
+ count/=2;
+
+ while(count--)
+ *to++=*from++;
+}
+
+static void word_memcpy_fromcard(void *tp, const void *fp, int count)
+{
+ unsigned short *to = tp;
+ const volatile unsigned short *from = fp;
+
+ count++;
+ count/=2;
+
+ while(count--)
+ *to++=*from++;
+}
+
+
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
new file mode 100644
index 000000000000..f65b0db111b8
--- /dev/null
+++ b/drivers/net/mac89x0.c
@@ -0,0 +1,666 @@
+/* mac89x0.c: A Crystal Semiconductor CS89[02]0 driver for linux. */
+/*
+ Written 1996 by Russell Nelson, with reference to skeleton.c
+ written 1993-1994 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached at nelson@crynwr.com, Crynwr
+ Software, 11 Grant St., Potsdam, NY 13676
+
+ Changelog:
+
+ Mike Cruse : mcruse@cti-ltd.com
+ : Changes for Linux 2.0 compatibility.
+ : Added dev_id parameter in net_interrupt(),
+ : request_irq() and free_irq(). Just NULL for now.
+
+ Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
+ : in net_open() and net_close() so kerneld would know
+ : that the module is in use and wouldn't eject the
+ : driver prematurely.
+
+ Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c
+ : as an example. Disabled autoprobing in init_module(),
+ : not a good thing to do to other devices while Linux
+ : is running from all accounts.
+
+ Alan Cox : Removed 1.2 support, added 2.1 extra counters.
+
+ David Huggins-Daines <dhd@debian.org>
+
+ Split this off into mac89x0.c, and gutted it of all parts which are
+ not relevant to the existing CS8900 cards on the Macintosh
+ (i.e. basically the Daynaport CS and LC cards). To be precise:
+
+ * Removed all the media-detection stuff, because these cards are
+ TP-only.
+
+ * Lobotomized the ISA interrupt bogosity, because these cards use
+ a hardwired NuBus interrupt and a magic ISAIRQ value in the card.
+
+ * Basically eliminated everything not relevant to getting the
+ cards minimally functioning on the Macintosh.
+
+ I might add that these cards are badly designed even from the Mac
+ standpoint, in that Dayna, in their infinite wisdom, used NuBus slot
+ I/O space and NuBus interrupts for these cards, but neglected to
+ provide anything even remotely resembling a NuBus ROM. Therefore we
+ have to probe for them in a brain-damaged ISA-like fashion.
+
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+ check kmalloc and release the allocated memory on failure in
+ mac89x0_probe and in init_module
+ use local_irq_{save,restore}(flags) in net_get_stat, not just
+ local_irq_{dis,en}able()
+*/
+
+static char *version =
+"cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n";
+
+/* ======================= configure the driver here ======================= */
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+/* ======================= end of configuration ======================= */
+
+
+/* Always include 'config.h' first in case the user wants to turn on
+ or override something. */
+#include <linux/module.h>
+
+#define PRINTK(x) printk x
+
+/*
+ Sources:
+
+ Crynwr packet driver epktisa.
+
+ Crystal Semiconductor data sheets.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/nubus.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/hwtest.h>
+#include <asm/macints.h>
+
+#include "cs89x0.h"
+
+static unsigned int net_debug = NET_DEBUG;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ int chip_type; /* one of: CS8900, CS8920, CS8920M */
+ char chip_revision; /* revision letter of the chip ('A'...) */
+ int send_cmd; /* the propercommand used to send a packet. */
+ int rx_mode;
+ int curr_rx_cfg;
+ int send_underrun; /* keep track of how many underruns in a row we get */
+ struct sk_buff *skb;
+};
+
+/* Index to functions, as function prototypes. */
+
+#if 0
+extern void reset_chip(struct net_device *dev);
+#endif
+static int net_open(struct net_device *dev);
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void set_multicast_list(struct net_device *dev);
+static void net_rx(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static int set_mac_address(struct net_device *dev, void *addr);
+
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) 1
+
+/* For reading/writing registers ISA-style */
+static inline int
+readreg_io(struct net_device *dev, int portno)
+{
+ nubus_writew(swab16(portno), dev->base_addr + ADD_PORT);
+ return swab16(nubus_readw(dev->base_addr + DATA_PORT));
+}
+
+static inline void
+writereg_io(struct net_device *dev, int portno, int value)
+{
+ nubus_writew(swab16(portno), dev->base_addr + ADD_PORT);
+ nubus_writew(swab16(value), dev->base_addr + DATA_PORT);
+}
+
+/* These are for reading/writing registers in shared memory */
+static inline int
+readreg(struct net_device *dev, int portno)
+{
+ return swab16(nubus_readw(dev->mem_start + portno));
+}
+
+static inline void
+writereg(struct net_device *dev, int portno, int value)
+{
+ nubus_writew(swab16(value), dev->mem_start + portno);
+}
+
+/* Probe for the CS8900 card in slot E. We won't bother looking
+ anywhere else until we have a really good reason to do so. */
+struct net_device * __init mac89x0_probe(int unit)
+{
+ struct net_device *dev;
+ static int once_is_enough;
+ struct net_local *lp;
+ static unsigned version_printed;
+ int i, slot;
+ unsigned rev_type = 0;
+ unsigned long ioaddr;
+ unsigned short sig;
+ int err = -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (once_is_enough)
+ goto out;
+ once_is_enough = 1;
+
+ /* We might have to parameterize this later */
+ slot = 0xE;
+ /* Get out now if there's a real NuBus card in slot E */
+ if (nubus_find_slot(slot, NULL) != NULL)
+ goto out;
+
+ /* The pseudo-ISA bits always live at offset 0x300 (gee,
+ wonder why...) */
+ ioaddr = (unsigned long)
+ nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE);
+ {
+ unsigned long flags;
+ int card_present;
+
+ local_irq_save(flags);
+ card_present = hwreg_present((void*) ioaddr+4)
+ && hwreg_present((void*) ioaddr + DATA_PORT);
+ local_irq_restore(flags);
+
+ if (!card_present)
+ goto out;
+ }
+
+ nubus_writew(0, ioaddr + ADD_PORT);
+ sig = nubus_readw(ioaddr + DATA_PORT);
+ if (sig != swab16(CHIP_EISA_ID_SIG))
+ goto out;
+
+ /* Initialize the net_device structure. */
+ lp = netdev_priv(dev);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+ dev->mem_start = (unsigned long)
+ nubus_slot_addr(slot) | (((slot&0xf) << 20) + MMIOBASE);
+ dev->mem_end = dev->mem_start + 0x1000;
+
+ /* Turn on shared memory */
+ writereg_io(dev, PP_BusCTL, MEMORY_ON);
+
+ /* get the chip type */
+ rev_type = readreg(dev, PRODUCT_ID_ADD);
+ lp->chip_type = rev_type &~ REVISON_BITS;
+ lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
+
+ /* Check the chip type and revision in order to set the correct send command
+ CS8920 revision C and CS8900 revision F can use the faster send. */
+ lp->send_cmd = TX_AFTER_381;
+ if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
+ lp->send_cmd = TX_NOW;
+ if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
+ lp->send_cmd = TX_NOW;
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#8lx",
+ dev->name,
+ lp->chip_type==CS8900?'0':'2',
+ lp->chip_type==CS8920M?"M":"",
+ lp->chip_revision,
+ dev->base_addr);
+
+ /* Try to read the MAC address */
+ if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) {
+ printk("\nmac89x0: No EEPROM, giving up now.\n");
+ goto out1;
+ } else {
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ /* Big-endian (why??!) */
+ unsigned short s = readreg(dev, PP_IA + i);
+ dev->dev_addr[i] = s >> 8;
+ dev->dev_addr[i+1] = s & 0xff;
+ }
+ }
+
+ dev->irq = SLOT2IRQ(slot);
+ printk(" IRQ %d ADDR ", dev->irq);
+
+ /* print the ethernet address. */
+ for (i = 0; i < ETH_ALEN; i++)
+ printk("%2.2x%s", dev->dev_addr[i],
+ ((i < ETH_ALEN-1) ? ":" : ""));
+ printk("\n");
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->set_mac_address = &set_mac_address;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return 0;
+out1:
+ nubus_writew(0, dev->base_addr + ADD_PORT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+#if 0
+/* This is useful for something, but I don't know what yet. */
+void __init reset_chip(struct net_device *dev)
+{
+ int reset_start_time;
+
+ writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
+
+ /* wait 30 ms */
+ msleep_interruptible(30);
+
+ /* Wait until the chip is reset */
+ reset_start_time = jiffies;
+ while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
+ ;
+}
+#endif
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int
+net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int i;
+
+ /* Disable the interrupt for now */
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ);
+
+ /* Grab the interrupt */
+ if (request_irq(dev->irq, &net_interrupt, 0, "cs89x0", dev))
+ return -EAGAIN;
+
+ /* Set up the IRQ - Apparently magic */
+ if (lp->chip_type == CS8900)
+ writereg(dev, PP_CS8900_ISAINT, 0);
+ else
+ writereg(dev, PP_CS8920_ISAINT, 0);
+
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ /* Turn on both receive and transmit operations */
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
+
+ /* Receive only error free packets addressed to this card */
+ lp->rx_mode = 0;
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
+
+ lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
+
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
+
+ writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL |
+ TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL);
+
+ writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL |
+ TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL);
+
+ /* now that we've got our act together, enable everything */
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (net_debug > 3)
+ printk("%s: sent %d byte packet of type %x\n",
+ dev->name, skb->len,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8)
+ | skb->data[ETH_ALEN+ETH_ALEN+1]);
+
+ /* keep the upload from being interrupted, since we
+ ask the chip to start transmitting before the
+ whole packet has been completely uploaded. */
+ local_irq_save(flags);
+
+ /* initiate a transmit sequence */
+ writereg(dev, PP_TxCMD, lp->send_cmd);
+ writereg(dev, PP_TxLength, skb->len);
+
+ /* Test to see if the chip has allocated memory for the packet */
+ if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
+ /* Gasp! It hasn't. But that shouldn't happen since
+ we're waiting for TxOk, so return 1 and requeue this packet. */
+ local_irq_restore(flags);
+ return 1;
+ }
+
+ /* Write the contents of the packet */
+ memcpy_toio(dev->mem_start + PP_TxFrame, skb->data, skb->len+1);
+
+ local_irq_restore(flags);
+ dev->trans_start = jiffies;
+ }
+ dev_kfree_skb (skb);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* we MUST read all the events out of the ISQ, otherwise we'll never
+ get interrupted again. As a consequence, we can't have any limit
+ on the number of times we loop in the interrupt handler. The
+ hardware guarantees that eventually we'll run out of events. Of
+ course, if you're on a slow machine, and packets are arriving
+ faster than you can read them off, you're screwed. Hasta la
+ vista, baby! */
+ while ((status = swab16(nubus_readw(dev->base_addr + ISQ_PORT)))) {
+ if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
+ switch(status & ISQ_EVENT_MASK) {
+ case ISQ_RECEIVER_EVENT:
+ /* Got a packet(s). */
+ net_rx(dev);
+ break;
+ case ISQ_TRANSMITTER_EVENT:
+ lp->stats.tx_packets++;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ if ((status & TX_OK) == 0) lp->stats.tx_errors++;
+ if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++;
+ if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++;
+ if (status & TX_LATE_COL) lp->stats.tx_window_errors++;
+ if (status & TX_16_COL) lp->stats.tx_aborted_errors++;
+ break;
+ case ISQ_BUFFER_EVENT:
+ if (status & READY_FOR_TX) {
+ /* we tried to transmit a packet earlier,
+ but inexplicably ran out of buffers.
+ That shouldn't happen since we only ever
+ load one packet. Shrug. Do the right
+ thing anyway. */
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ if (status & TX_UNDERRUN) {
+ if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
+ lp->send_underrun++;
+ if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
+ else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
+ }
+ break;
+ case ISQ_RX_MISS_EVENT:
+ lp->stats.rx_missed_errors += (status >>6);
+ break;
+ case ISQ_TX_COL_EVENT:
+ lp->stats.collisions += (status >>6);
+ break;
+ }
+ }
+ dev->interrupt = 0;
+ return IRQ_HANDLED;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int status, length;
+
+ status = readreg(dev, PP_RxStatus);
+ if ((status & RX_OK) == 0) {
+ lp->stats.rx_errors++;
+ if (status & RX_RUNT) lp->stats.rx_length_errors++;
+ if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++;
+ if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT)))
+ /* per str 172 */
+ lp->stats.rx_crc_errors++;
+ if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
+ return;
+ }
+
+ length = readreg(dev, PP_RxLength);
+ /* Malloc up new buffer. */
+ skb = alloc_skb(length, GFP_ATOMIC);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ return;
+ }
+ skb_put(skb, length);
+ skb->dev = dev;
+
+ memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length);
+
+ if (net_debug > 3)printk("%s: received %d byte packet of type %x\n",
+ dev->name, length,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8)
+ | skb->data[ETH_ALEN+ETH_ALEN+1]);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += length;
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct net_device *dev)
+{
+
+ writereg(dev, PP_RxCFG, 0);
+ writereg(dev, PP_TxCFG, 0);
+ writereg(dev, PP_BufCFG, 0);
+ writereg(dev, PP_BusCTL, 0);
+
+ netif_stop_queue(dev);
+
+ free_irq(dev->irq, dev);
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *
+net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Update the statistics from the device registers. */
+ lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
+ lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
+ local_irq_restore(flags);
+
+ return &lp->stats;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ lp->rx_mode = RX_ALL_ACCEPT;
+ }
+ else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ {
+ /* The multicast-accept list is initialized to accept-all, and we
+ rely on higher-level filtering for now. */
+ lp->rx_mode = RX_MULTCAST_ACCEPT;
+ }
+ else
+ lp->rx_mode = 0;
+
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
+
+ /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg |
+ (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0));
+}
+
+
+static int set_mac_address(struct net_device *dev, void *addr)
+{
+ int i;
+ if (dev->start)
+ return -EBUSY;
+ printk("%s: Setting MAC address to ", dev->name);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = ((unsigned char *)addr)[i]);
+ printk(".\n");
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ return 0;
+}
+
+#ifdef MODULE
+
+static struct net_device *dev_cs89x0;
+static int debug;
+
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)");
+MODULE_LICENSE("GPL");
+
+int
+init_module(void)
+{
+ net_debug = debug;
+ dev_cs89x0 = mac89x0_probe(-1);
+ if (IS_ERR(dev_cs89x0)) {
+ printk(KERN_WARNING "mac89x0.c: No card found\n");
+ return PTR_ERR(dev_cs89x0);
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(dev_cs89x0);
+ nubus_writew(0, dev_cs89x0->base_addr + ADD_PORT);
+ free_netdev(dev_cs89x0);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "m68k-linux-gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c -o mac89x0.o mac89x0.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 8
+ * tab-width: 8
+ * End:
+ *
+ */
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
new file mode 100644
index 000000000000..6ed2d7dbd44c
--- /dev/null
+++ b/drivers/net/mace.c
@@ -0,0 +1,1053 @@
+/*
+ * Network device driver for the MACE ethernet controller on
+ * Apple Powermacs. Assumes it's under a DBDMA controller.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/spinlock.h>
+#include <asm/prom.h>
+#include <asm/dbdma.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/macio.h>
+
+#include "mace.h"
+
+static int port_aaui = -1;
+
+#define N_RX_RING 8
+#define N_TX_RING 6
+#define MAX_TX_ACTIVE 1
+#define NCMDS_TX 1 /* dma commands per element in tx ring */
+#define RX_BUFLEN (ETH_FRAME_LEN + 8)
+#define TX_TIMEOUT HZ /* 1 second */
+
+/* Chip rev needs workaround on HW & multicast addr change */
+#define BROKEN_ADDRCHG_REV 0x0941
+
+/* Bits in transmit DMA status */
+#define TX_DMA_ERR 0x80
+
+struct mace_data {
+ volatile struct mace __iomem *mace;
+ volatile struct dbdma_regs __iomem *tx_dma;
+ int tx_dma_intr;
+ volatile struct dbdma_regs __iomem *rx_dma;
+ int rx_dma_intr;
+ volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
+ volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
+ struct sk_buff *rx_bufs[N_RX_RING];
+ int rx_fill;
+ int rx_empty;
+ struct sk_buff *tx_bufs[N_TX_RING];
+ int tx_fill;
+ int tx_empty;
+ unsigned char maccc;
+ unsigned char tx_fullup;
+ unsigned char tx_active;
+ unsigned char tx_bad_runt;
+ struct net_device_stats stats;
+ struct timer_list tx_timeout;
+ int timeout_active;
+ int port_aaui;
+ int chipid;
+ struct macio_dev *mdev;
+ spinlock_t lock;
+};
+
+/*
+ * Number of bytes of private data per MACE: allow enough for
+ * the rx and tx dma commands plus a branch dma command each,
+ * and another 16 bytes to allow us to align the dma command
+ * buffers on a 16 byte boundary.
+ */
+#define PRIV_BYTES (sizeof(struct mace_data) \
+ + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
+
+static int bitrev(int);
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *mace_stats(struct net_device *dev);
+static void mace_set_multicast(struct net_device *dev);
+static void mace_reset(struct net_device *dev);
+static int mace_set_address(struct net_device *dev, void *addr);
+static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
+static void mace_set_timeout(struct net_device *dev);
+static void mace_tx_timeout(unsigned long data);
+static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
+static inline void mace_clean_rings(struct mace_data *mp);
+static void __mace_set_address(struct net_device *dev, void *addr);
+
+/*
+ * If we can't get a skbuff when we need it, we use this area for DMA.
+ */
+static unsigned char *dummy_buf;
+
+/* Bit-reverse one byte of an ethernet hardware address. */
+static inline int
+bitrev(int b)
+{
+ int d = 0, i;
+
+ for (i = 0; i < 8; ++i, b >>= 1)
+ d = (d << 1) | (b & 1);
+ return d;
+}
+
+
+static int __devinit mace_probe(struct macio_dev *mdev, const struct of_match *match)
+{
+ struct device_node *mace = macio_get_of_node(mdev);
+ struct net_device *dev;
+ struct mace_data *mp;
+ unsigned char *addr;
+ int j, rev, rc = -EBUSY;
+
+ if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
+ printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
+ mace->full_name);
+ return -ENODEV;
+ }
+
+ addr = get_property(mace, "mac-address", NULL);
+ if (addr == NULL) {
+ addr = get_property(mace, "local-mac-address", NULL);
+ if (addr == NULL) {
+ printk(KERN_ERR "Can't get mac-address for MACE %s\n",
+ mace->full_name);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * lazy allocate the driver-wide dummy buffer. (Note that we
+ * never have more than one MACE in the system anyway)
+ */
+ if (dummy_buf == NULL) {
+ dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
+ if (dummy_buf == NULL) {
+ printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (macio_request_resources(mdev, "mace")) {
+ printk(KERN_ERR "MACE: can't request IO resources !\n");
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(PRIV_BYTES);
+ if (!dev) {
+ printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
+ rc = -ENOMEM;
+ goto err_release;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
+
+ mp = dev->priv;
+ mp->mdev = mdev;
+ macio_set_drvdata(mdev, dev);
+
+ dev->base_addr = macio_resource_start(mdev, 0);
+ mp->mace = ioremap(dev->base_addr, 0x1000);
+ if (mp->mace == NULL) {
+ printk(KERN_ERR "MACE: can't map IO resources !\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ dev->irq = macio_irq(mdev, 0);
+
+ rev = addr[0] == 0 && addr[1] == 0xA0;
+ for (j = 0; j < 6; ++j) {
+ dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
+ }
+ mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
+ in_8(&mp->mace->chipid_lo);
+
+
+ mp = (struct mace_data *) dev->priv;
+ mp->maccc = ENXMT | ENRCV;
+
+ mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
+ if (mp->tx_dma == NULL) {
+ printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
+ rc = -ENOMEM;
+ goto err_unmap_io;
+ }
+ mp->tx_dma_intr = macio_irq(mdev, 1);
+
+ mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
+ if (mp->rx_dma == NULL) {
+ printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
+ rc = -ENOMEM;
+ goto err_unmap_tx_dma;
+ }
+ mp->rx_dma_intr = macio_irq(mdev, 2);
+
+ mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
+ mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
+
+ memset(&mp->stats, 0, sizeof(mp->stats));
+ memset((char *) mp->tx_cmds, 0,
+ (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
+ init_timer(&mp->tx_timeout);
+ spin_lock_init(&mp->lock);
+ mp->timeout_active = 0;
+
+ if (port_aaui >= 0)
+ mp->port_aaui = port_aaui;
+ else {
+ /* Apple Network Server uses the AAUI port */
+ if (machine_is_compatible("AAPL,ShinerESB"))
+ mp->port_aaui = 1;
+ else {
+#ifdef CONFIG_MACE_AAUI_PORT
+ mp->port_aaui = 1;
+#else
+ mp->port_aaui = 0;
+#endif
+ }
+ }
+
+ dev->open = mace_open;
+ dev->stop = mace_close;
+ dev->hard_start_xmit = mace_xmit_start;
+ dev->get_stats = mace_stats;
+ dev->set_multicast_list = mace_set_multicast;
+ dev->set_mac_address = mace_set_address;
+
+ /*
+ * Most of what is below could be moved to mace_open()
+ */
+ mace_reset(dev);
+
+ rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
+ goto err_unmap_rx_dma;
+ }
+ rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line);
+ goto err_free_irq;
+ }
+ rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line);
+ goto err_free_tx_irq;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
+ goto err_free_rx_irq;
+ }
+
+ printk(KERN_INFO "%s: MACE at", dev->name);
+ for (j = 0; j < 6; ++j) {
+ printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
+ }
+ printk(", chip revision %d.%d\n", mp->chipid >> 8, mp->chipid & 0xff);
+
+ return 0;
+
+ err_free_rx_irq:
+ free_irq(macio_irq(mdev, 2), dev);
+ err_free_tx_irq:
+ free_irq(macio_irq(mdev, 1), dev);
+ err_free_irq:
+ free_irq(macio_irq(mdev, 0), dev);
+ err_unmap_rx_dma:
+ iounmap(mp->rx_dma);
+ err_unmap_tx_dma:
+ iounmap(mp->tx_dma);
+ err_unmap_io:
+ iounmap(mp->mace);
+ err_free:
+ free_netdev(dev);
+ err_release:
+ macio_release_resources(mdev);
+
+ return rc;
+}
+
+static int __devexit mace_remove(struct macio_dev *mdev)
+{
+ struct net_device *dev = macio_get_drvdata(mdev);
+ struct mace_data *mp;
+
+ BUG_ON(dev == NULL);
+
+ macio_set_drvdata(mdev, NULL);
+
+ mp = dev->priv;
+
+ unregister_netdev(dev);
+
+ free_irq(dev->irq, dev);
+ free_irq(mp->tx_dma_intr, dev);
+ free_irq(mp->rx_dma_intr, dev);
+
+ iounmap(mp->rx_dma);
+ iounmap(mp->tx_dma);
+ iounmap(mp->mace);
+
+ free_netdev(dev);
+
+ macio_release_resources(mdev);
+
+ return 0;
+}
+
+static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
+{
+ int i;
+
+ out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
+
+ /*
+ * Yes this looks peculiar, but apparently it needs to be this
+ * way on some machines.
+ */
+ for (i = 200; i > 0; --i)
+ if (ld_le32(&dma->control) & RUN)
+ udelay(1);
+}
+
+static void mace_reset(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ int i;
+
+ /* soft-reset the chip */
+ i = 200;
+ while (--i) {
+ out_8(&mb->biucc, SWRST);
+ if (in_8(&mb->biucc) & SWRST) {
+ udelay(10);
+ continue;
+ }
+ break;
+ }
+ if (!i) {
+ printk(KERN_ERR "mace: cannot reset chip!\n");
+ return;
+ }
+
+ out_8(&mb->imr, 0xff); /* disable all intrs for now */
+ i = in_8(&mb->ir);
+ out_8(&mb->maccc, 0); /* turn off tx, rx */
+
+ out_8(&mb->biucc, XMTSP_64);
+ out_8(&mb->utr, RTRD);
+ out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
+ out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
+ out_8(&mb->rcvfc, 0);
+
+ /* load up the hardware address */
+ __mace_set_address(dev, dev->dev_addr);
+
+ /* clear the multicast filter */
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, LOGADDR);
+ else {
+ out_8(&mb->iac, ADDRCHG | LOGADDR);
+ while ((in_8(&mb->iac) & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 8; ++i)
+ out_8(&mb->ladrf, 0);
+
+ /* done changing address */
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, 0);
+
+ if (mp->port_aaui)
+ out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
+ else
+ out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
+}
+
+static void __mace_set_address(struct net_device *dev, void *addr)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ unsigned char *p = addr;
+ int i;
+
+ /* load up the hardware address */
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, PHYADDR);
+ else {
+ out_8(&mb->iac, ADDRCHG | PHYADDR);
+ while ((in_8(&mb->iac) & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 6; ++i)
+ out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, 0);
+}
+
+static int mace_set_address(struct net_device *dev, void *addr)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+
+ __mace_set_address(dev, addr);
+
+ /* note: setting ADDRCHG clears ENRCV */
+ out_8(&mb->maccc, mp->maccc);
+
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 0;
+}
+
+static inline void mace_clean_rings(struct mace_data *mp)
+{
+ int i;
+
+ /* free some skb's */
+ for (i = 0; i < N_RX_RING; ++i) {
+ if (mp->rx_bufs[i] != 0) {
+ dev_kfree_skb(mp->rx_bufs[i]);
+ mp->rx_bufs[i] = NULL;
+ }
+ }
+ for (i = mp->tx_empty; i != mp->tx_fill; ) {
+ dev_kfree_skb(mp->tx_bufs[i]);
+ if (++i >= N_TX_RING)
+ i = 0;
+ }
+}
+
+static int mace_open(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_cmd *cp;
+ int i;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ /* reset the chip */
+ mace_reset(dev);
+
+ /* initialize list of sk_buffs for receiving and set up recv dma */
+ mace_clean_rings(mp);
+ memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
+ cp = mp->rx_cmds;
+ for (i = 0; i < N_RX_RING - 1; ++i) {
+ skb = dev_alloc_skb(RX_BUFLEN + 2);
+ if (skb == 0) {
+ data = dummy_buf;
+ } else {
+ skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
+ data = skb->data;
+ }
+ mp->rx_bufs[i] = skb;
+ st_le16(&cp->req_count, RX_BUFLEN);
+ st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
+ st_le32(&cp->phy_addr, virt_to_bus(data));
+ cp->xfer_status = 0;
+ ++cp;
+ }
+ mp->rx_bufs[i] = NULL;
+ st_le16(&cp->command, DBDMA_STOP);
+ mp->rx_fill = i;
+ mp->rx_empty = 0;
+
+ /* Put a branch back to the beginning of the receive command list */
+ ++cp;
+ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
+ st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
+
+ /* start rx dma */
+ out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+ out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
+ out_le32(&rd->control, (RUN << 16) | RUN);
+
+ /* put a branch at the end of the tx command list */
+ cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
+ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
+ st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
+
+ /* reset tx dma */
+ out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
+ out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
+ mp->tx_fill = 0;
+ mp->tx_empty = 0;
+ mp->tx_fullup = 0;
+ mp->tx_active = 0;
+ mp->tx_bad_runt = 0;
+
+ /* turn it on! */
+ out_8(&mb->maccc, mp->maccc);
+ /* enable all interrupts except receive interrupts */
+ out_8(&mb->imr, RCVINT);
+
+ return 0;
+}
+
+static int mace_close(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+
+ /* disable rx and tx */
+ out_8(&mb->maccc, 0);
+ out_8(&mb->imr, 0xff); /* disable all intrs */
+
+ /* disable rx and tx dma */
+ st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+ st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+
+ mace_clean_rings(mp);
+
+ return 0;
+}
+
+static inline void mace_set_timeout(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+
+ if (mp->timeout_active)
+ del_timer(&mp->tx_timeout);
+ mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
+ mp->tx_timeout.function = mace_tx_timeout;
+ mp->tx_timeout.data = (unsigned long) dev;
+ add_timer(&mp->tx_timeout);
+ mp->timeout_active = 1;
+}
+
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_cmd *cp, *np;
+ unsigned long flags;
+ int fill, next, len;
+
+ /* see if there's a free slot in the tx ring */
+ spin_lock_irqsave(&mp->lock, flags);
+ fill = mp->tx_fill;
+ next = fill + 1;
+ if (next >= N_TX_RING)
+ next = 0;
+ if (next == mp->tx_empty) {
+ netif_stop_queue(dev);
+ mp->tx_fullup = 1;
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 1; /* can't take it at the moment */
+ }
+ spin_unlock_irqrestore(&mp->lock, flags);
+
+ /* partially fill in the dma command block */
+ len = skb->len;
+ if (len > ETH_FRAME_LEN) {
+ printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
+ len = ETH_FRAME_LEN;
+ }
+ mp->tx_bufs[fill] = skb;
+ cp = mp->tx_cmds + NCMDS_TX * fill;
+ st_le16(&cp->req_count, len);
+ st_le32(&cp->phy_addr, virt_to_bus(skb->data));
+
+ np = mp->tx_cmds + NCMDS_TX * next;
+ out_le16(&np->command, DBDMA_STOP);
+
+ /* poke the tx dma channel */
+ spin_lock_irqsave(&mp->lock, flags);
+ mp->tx_fill = next;
+ if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
+ ++mp->tx_active;
+ mace_set_timeout(dev);
+ }
+ if (++next >= N_TX_RING)
+ next = 0;
+ if (next == mp->tx_empty)
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&mp->lock, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *mace_stats(struct net_device *dev)
+{
+ struct mace_data *p = (struct mace_data *) dev->priv;
+
+ return &p->stats;
+}
+
+static void mace_set_multicast(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ int i, j;
+ u32 crc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+ mp->maccc &= ~PROM;
+ if (dev->flags & IFF_PROMISC) {
+ mp->maccc |= PROM;
+ } else {
+ unsigned char multicast_filter[8];
+ struct dev_mc_list *dmi = dev->mc_list;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 8; i++)
+ multicast_filter[i] = 0xff;
+ } else {
+ for (i = 0; i < 8; i++)
+ multicast_filter[i] = 0;
+ for (i = 0; i < dev->mc_count; i++) {
+ crc = ether_crc_le(6, dmi->dmi_addr);
+ j = crc >> 26; /* bit number in multicast_filter */
+ multicast_filter[j >> 3] |= 1 << (j & 7);
+ dmi = dmi->next;
+ }
+ }
+#if 0
+ printk("Multicast filter :");
+ for (i = 0; i < 8; i++)
+ printk("%02x ", multicast_filter[i]);
+ printk("\n");
+#endif
+
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, LOGADDR);
+ else {
+ out_8(&mb->iac, ADDRCHG | LOGADDR);
+ while ((in_8(&mb->iac) & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 8; ++i)
+ out_8(&mb->ladrf, multicast_filter[i]);
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, 0);
+ }
+ /* reset maccc */
+ out_8(&mb->maccc, mp->maccc);
+ spin_unlock_irqrestore(&mp->lock, flags);
+}
+
+static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
+{
+ volatile struct mace __iomem *mb = mp->mace;
+ static int mace_babbles, mace_jabbers;
+
+ if (intr & MPCO)
+ mp->stats.rx_missed_errors += 256;
+ mp->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */
+ if (intr & RNTPCO)
+ mp->stats.rx_length_errors += 256;
+ mp->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
+ if (intr & CERR)
+ ++mp->stats.tx_heartbeat_errors;
+ if (intr & BABBLE)
+ if (mace_babbles++ < 4)
+ printk(KERN_DEBUG "mace: babbling transmitter\n");
+ if (intr & JABBER)
+ if (mace_jabbers++ < 4)
+ printk(KERN_DEBUG "mace: jabbering transceiver\n");
+}
+
+static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_cmd *cp;
+ int intr, fs, i, stat, x;
+ int xcount, dstat;
+ unsigned long flags;
+ /* static int mace_last_fs, mace_last_xcount; */
+
+ spin_lock_irqsave(&mp->lock, flags);
+ intr = in_8(&mb->ir); /* read interrupt register */
+ in_8(&mb->xmtrc); /* get retries */
+ mace_handle_misc_intrs(mp, intr);
+
+ i = mp->tx_empty;
+ while (in_8(&mb->pr) & XMTSV) {
+ del_timer(&mp->tx_timeout);
+ mp->timeout_active = 0;
+ /*
+ * Clear any interrupt indication associated with this status
+ * word. This appears to unlatch any error indication from
+ * the DMA controller.
+ */
+ intr = in_8(&mb->ir);
+ if (intr != 0)
+ mace_handle_misc_intrs(mp, intr);
+ if (mp->tx_bad_runt) {
+ fs = in_8(&mb->xmtfs);
+ mp->tx_bad_runt = 0;
+ out_8(&mb->xmtfc, AUTO_PAD_XMIT);
+ continue;
+ }
+ dstat = ld_le32(&td->status);
+ /* stop DMA controller */
+ out_le32(&td->control, RUN << 16);
+ /*
+ * xcount is the number of complete frames which have been
+ * written to the fifo but for which status has not been read.
+ */
+ xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
+ if (xcount == 0 || (dstat & DEAD)) {
+ /*
+ * If a packet was aborted before the DMA controller has
+ * finished transferring it, it seems that there are 2 bytes
+ * which are stuck in some buffer somewhere. These will get
+ * transmitted as soon as we read the frame status (which
+ * reenables the transmit data transfer request). Turning
+ * off the DMA controller and/or resetting the MACE doesn't
+ * help. So we disable auto-padding and FCS transmission
+ * so the two bytes will only be a runt packet which should
+ * be ignored by other stations.
+ */
+ out_8(&mb->xmtfc, DXMTFCS);
+ }
+ fs = in_8(&mb->xmtfs);
+ if ((fs & XMTSV) == 0) {
+ printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
+ fs, xcount, dstat);
+ mace_reset(dev);
+ /*
+ * XXX mace likes to hang the machine after a xmtfs error.
+ * This is hard to reproduce, reseting *may* help
+ */
+ }
+ cp = mp->tx_cmds + NCMDS_TX * i;
+ stat = ld_le16(&cp->xfer_status);
+ if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
+ /*
+ * Check whether there were in fact 2 bytes written to
+ * the transmit FIFO.
+ */
+ udelay(1);
+ x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
+ if (x != 0) {
+ /* there were two bytes with an end-of-packet indication */
+ mp->tx_bad_runt = 1;
+ mace_set_timeout(dev);
+ } else {
+ /*
+ * Either there weren't the two bytes buffered up, or they
+ * didn't have an end-of-packet indication.
+ * We flush the transmit FIFO just in case (by setting the
+ * XMTFWU bit with the transmitter disabled).
+ */
+ out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
+ out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
+ udelay(1);
+ out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
+ out_8(&mb->xmtfc, AUTO_PAD_XMIT);
+ }
+ }
+ /* dma should have finished */
+ if (i == mp->tx_fill) {
+ printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
+ fs, xcount, dstat);
+ continue;
+ }
+ /* Update stats */
+ if (fs & (UFLO|LCOL|LCAR|RTRY)) {
+ ++mp->stats.tx_errors;
+ if (fs & LCAR)
+ ++mp->stats.tx_carrier_errors;
+ if (fs & (UFLO|LCOL|RTRY))
+ ++mp->stats.tx_aborted_errors;
+ } else {
+ mp->stats.tx_bytes += mp->tx_bufs[i]->len;
+ ++mp->stats.tx_packets;
+ }
+ dev_kfree_skb_irq(mp->tx_bufs[i]);
+ --mp->tx_active;
+ if (++i >= N_TX_RING)
+ i = 0;
+#if 0
+ mace_last_fs = fs;
+ mace_last_xcount = xcount;
+#endif
+ }
+
+ if (i != mp->tx_empty) {
+ mp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ }
+ mp->tx_empty = i;
+ i += mp->tx_active;
+ if (i >= N_TX_RING)
+ i -= N_TX_RING;
+ if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
+ do {
+ /* set up the next one */
+ cp = mp->tx_cmds + NCMDS_TX * i;
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ ++mp->tx_active;
+ if (++i >= N_TX_RING)
+ i = 0;
+ } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
+ out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
+ mace_set_timeout(dev);
+ }
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void mace_tx_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&mp->lock, flags);
+ mp->timeout_active = 0;
+ if (mp->tx_active == 0 && !mp->tx_bad_runt)
+ goto out;
+
+ /* update various counters */
+ mace_handle_misc_intrs(mp, in_8(&mb->ir));
+
+ cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
+
+ /* turn off both tx and rx and reset the chip */
+ out_8(&mb->maccc, 0);
+ printk(KERN_ERR "mace: transmit timeout - resetting\n");
+ dbdma_reset(td);
+ mace_reset(dev);
+
+ /* restart rx dma */
+ cp = bus_to_virt(ld_le32(&rd->cmdptr));
+ dbdma_reset(rd);
+ out_le16(&cp->xfer_status, 0);
+ out_le32(&rd->cmdptr, virt_to_bus(cp));
+ out_le32(&rd->control, (RUN << 16) | RUN);
+
+ /* fix up the transmit side */
+ i = mp->tx_empty;
+ mp->tx_active = 0;
+ ++mp->stats.tx_errors;
+ if (mp->tx_bad_runt) {
+ mp->tx_bad_runt = 0;
+ } else if (i != mp->tx_fill) {
+ dev_kfree_skb(mp->tx_bufs[i]);
+ if (++i >= N_TX_RING)
+ i = 0;
+ mp->tx_empty = i;
+ }
+ mp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (i != mp->tx_fill) {
+ cp = mp->tx_cmds + NCMDS_TX * i;
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ out_le32(&td->cmdptr, virt_to_bus(cp));
+ out_le32(&td->control, (RUN << 16) | RUN);
+ ++mp->tx_active;
+ mace_set_timeout(dev);
+ }
+
+ /* turn it back on */
+ out_8(&mb->imr, RCVINT);
+ out_8(&mb->maccc, mp->maccc);
+
+out:
+ spin_unlock_irqrestore(&mp->lock, flags);
+}
+
+static irqreturn_t mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_cmd *cp, *np;
+ int i, nb, stat, next;
+ struct sk_buff *skb;
+ unsigned frame_status;
+ static int mace_lost_status;
+ unsigned char *data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+ for (i = mp->rx_empty; i != mp->rx_fill; ) {
+ cp = mp->rx_cmds + i;
+ stat = ld_le16(&cp->xfer_status);
+ if ((stat & ACTIVE) == 0) {
+ next = i + 1;
+ if (next >= N_RX_RING)
+ next = 0;
+ np = mp->rx_cmds + next;
+ if (next != mp->rx_fill
+ && (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
+ printk(KERN_DEBUG "mace: lost a status word\n");
+ ++mace_lost_status;
+ } else
+ break;
+ }
+ nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
+ out_le16(&cp->command, DBDMA_STOP);
+ /* got a packet, have a look at it */
+ skb = mp->rx_bufs[i];
+ if (skb == 0) {
+ ++mp->stats.rx_dropped;
+ } else if (nb > 8) {
+ data = skb->data;
+ frame_status = (data[nb-3] << 8) + data[nb-4];
+ if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
+ ++mp->stats.rx_errors;
+ if (frame_status & RS_OFLO)
+ ++mp->stats.rx_over_errors;
+ if (frame_status & RS_FRAMERR)
+ ++mp->stats.rx_frame_errors;
+ if (frame_status & RS_FCSERR)
+ ++mp->stats.rx_crc_errors;
+ } else {
+ /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
+ * FCS on frames with 802.3 headers. This means that Ethernet
+ * frames have 8 extra octets at the end, while 802.3 frames
+ * have only 4. We need to correctly account for this. */
+ if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
+ nb -= 4;
+ else /* Ethernet header; mace includes FCS */
+ nb -= 8;
+ skb_put(skb, nb);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ mp->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ mp->rx_bufs[i] = NULL;
+ ++mp->stats.rx_packets;
+ }
+ } else {
+ ++mp->stats.rx_errors;
+ ++mp->stats.rx_length_errors;
+ }
+
+ /* advance to next */
+ if (++i >= N_RX_RING)
+ i = 0;
+ }
+ mp->rx_empty = i;
+
+ i = mp->rx_fill;
+ for (;;) {
+ next = i + 1;
+ if (next >= N_RX_RING)
+ next = 0;
+ if (next == mp->rx_empty)
+ break;
+ cp = mp->rx_cmds + i;
+ skb = mp->rx_bufs[i];
+ if (skb == 0) {
+ skb = dev_alloc_skb(RX_BUFLEN + 2);
+ if (skb != 0) {
+ skb_reserve(skb, 2);
+ mp->rx_bufs[i] = skb;
+ }
+ }
+ st_le16(&cp->req_count, RX_BUFLEN);
+ data = skb? skb->data: dummy_buf;
+ st_le32(&cp->phy_addr, virt_to_bus(data));
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
+#if 0
+ if ((ld_le32(&rd->status) & ACTIVE) != 0) {
+ out_le32(&rd->control, (PAUSE << 16) | PAUSE);
+ while ((in_le32(&rd->status) & ACTIVE) != 0)
+ ;
+ }
+#endif
+ i = next;
+ }
+ if (i != mp->rx_fill) {
+ out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
+ mp->rx_fill = i;
+ }
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static struct of_match mace_match[] =
+{
+ {
+ .name = "mace",
+ .type = OF_ANY_MATCH,
+ .compatible = OF_ANY_MATCH
+ },
+ {},
+};
+
+static struct macio_driver mace_driver =
+{
+ .name = "mace",
+ .match_table = mace_match,
+ .probe = mace_probe,
+ .remove = mace_remove,
+};
+
+
+static int __init mace_init(void)
+{
+ return macio_register_driver(&mace_driver);
+}
+
+static void __exit mace_cleanup(void)
+{
+ macio_unregister_driver(&mace_driver);
+
+ if (dummy_buf) {
+ kfree(dummy_buf);
+ dummy_buf = NULL;
+ }
+}
+
+MODULE_AUTHOR("Paul Mackerras");
+MODULE_DESCRIPTION("PowerMac MACE driver.");
+MODULE_PARM(port_aaui, "i");
+MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
+MODULE_LICENSE("GPL");
+
+module_init(mace_init);
+module_exit(mace_cleanup);
diff --git a/drivers/net/mace.h b/drivers/net/mace.h
new file mode 100644
index 000000000000..30b7ec0cedb5
--- /dev/null
+++ b/drivers/net/mace.h
@@ -0,0 +1,173 @@
+/*
+ * mace.h - definitions for the registers in the Am79C940 MACE
+ * (Medium Access Control for Ethernet) controller.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define REG(x) volatile unsigned char x; char x ## _pad[15]
+
+struct mace {
+ REG(rcvfifo); /* receive FIFO */
+ REG(xmtfifo); /* transmit FIFO */
+ REG(xmtfc); /* transmit frame control */
+ REG(xmtfs); /* transmit frame status */
+ REG(xmtrc); /* transmit retry count */
+ REG(rcvfc); /* receive frame control */
+ REG(rcvfs); /* receive frame status (4 bytes) */
+ REG(fifofc); /* FIFO frame count */
+ REG(ir); /* interrupt register */
+ REG(imr); /* interrupt mask register */
+ REG(pr); /* poll register */
+ REG(biucc); /* bus interface unit config control */
+ REG(fifocc); /* FIFO configuration control */
+ REG(maccc); /* medium access control config control */
+ REG(plscc); /* phys layer signalling config control */
+ REG(phycc); /* physical configuration control */
+ REG(chipid_lo); /* chip ID, lsb */
+ REG(chipid_hi); /* chip ID, msb */
+ REG(iac); /* internal address config */
+ REG(reg19);
+ REG(ladrf); /* logical address filter (8 bytes) */
+ REG(padr); /* physical address (6 bytes) */
+ REG(reg22);
+ REG(reg23);
+ REG(mpc); /* missed packet count (clears when read) */
+ REG(reg25);
+ REG(rntpc); /* runt packet count (clears when read) */
+ REG(rcvcc); /* recv collision count (clears when read) */
+ REG(reg28);
+ REG(utr); /* user test reg */
+ REG(reg30);
+ REG(reg31);
+};
+
+/* Bits in XMTFC */
+#define DRTRY 0x80 /* don't retry transmission after collision */
+#define DXMTFCS 0x08 /* don't append FCS to transmitted frame */
+#define AUTO_PAD_XMIT 0x01 /* auto-pad short packets on transmission */
+
+/* Bits in XMTFS: only valid when XMTSV is set in PR and XMTFS */
+#define XMTSV 0x80 /* transmit status (i.e. XMTFS) valid */
+#define UFLO 0x40 /* underflow - xmit fifo ran dry */
+#define LCOL 0x20 /* late collision (transmission aborted) */
+#define MORE 0x10 /* 2 or more retries needed to xmit frame */
+#define ONE 0x08 /* 1 retry needed to xmit frame */
+#define DEFER 0x04 /* MACE had to defer xmission (enet busy) */
+#define LCAR 0x02 /* loss of carrier (transmission aborted) */
+#define RTRY 0x01 /* too many retries (transmission aborted) */
+
+/* Bits in XMTRC: only valid when XMTSV is set in PR (and XMTFS) */
+#define EXDEF 0x80 /* had to defer for excessive time */
+#define RETRY_MASK 0x0f /* number of retries (0 - 15) */
+
+/* Bits in RCVFC */
+#define LLRCV 0x08 /* low latency receive: early DMA request */
+#define M_RBAR 0x04 /* sets function of EAM/R pin */
+#define AUTO_STRIP_RCV 0x01 /* auto-strip short LLC frames on recv */
+
+/*
+ * Bits in RCVFS. After a frame is received, four bytes of status
+ * are automatically read from this register and appended to the frame
+ * data in memory. These are:
+ * Byte 0 and 1: message byte count and frame status
+ * Byte 2: runt packet count
+ * Byte 3: receive collision count
+ */
+#define RS_OFLO 0x8000 /* receive FIFO overflowed */
+#define RS_CLSN 0x4000 /* received frame suffered (late) collision */
+#define RS_FRAMERR 0x2000 /* framing error flag */
+#define RS_FCSERR 0x1000 /* frame had FCS error */
+#define RS_COUNT 0x0fff /* mask for byte count field */
+
+/* Bits (fields) in FIFOFC */
+#define RCVFC_SH 4 /* receive frame count in FIFO */
+#define RCVFC_MASK 0x0f
+#define XMTFC_SH 0 /* transmit frame count in FIFO */
+#define XMTFC_MASK 0x0f
+
+/*
+ * Bits in IR and IMR. The IR clears itself when read.
+ * Setting a bit in the IMR will disable the corresponding interrupt.
+ */
+#define JABBER 0x80 /* jabber error - 10baseT xmission too long */
+#define BABBLE 0x40 /* babble - xmitter xmitting for too long */
+#define CERR 0x20 /* collision err - no SQE test (heartbeat) */
+#define RCVCCO 0x10 /* RCVCC overflow */
+#define RNTPCO 0x08 /* RNTPC overflow */
+#define MPCO 0x04 /* MPC overflow */
+#define RCVINT 0x02 /* receive interrupt */
+#define XMTINT 0x01 /* transmitter interrupt */
+
+/* Bits in PR */
+#define XMTSV 0x80 /* XMTFS valid (same as in XMTFS) */
+#define TDTREQ 0x40 /* set when xmit fifo is requesting data */
+#define RDTREQ 0x20 /* set when recv fifo requests data xfer */
+
+/* Bits in BIUCC */
+#define BSWP 0x40 /* byte swap, i.e. big-endian bus */
+#define XMTSP_4 0x00 /* start xmitting when 4 bytes in FIFO */
+#define XMTSP_16 0x10 /* start xmitting when 16 bytes in FIFO */
+#define XMTSP_64 0x20 /* start xmitting when 64 bytes in FIFO */
+#define XMTSP_112 0x30 /* start xmitting when 112 bytes in FIFO */
+#define SWRST 0x01 /* software reset */
+
+/* Bits in FIFOCC */
+#define XMTFW_8 0x00 /* xmit fifo watermark = 8 words free */
+#define XMTFW_16 0x40 /* 16 words free */
+#define XMTFW_32 0x80 /* 32 words free */
+#define RCVFW_16 0x00 /* recv fifo watermark = 16 bytes avail */
+#define RCVFW_32 0x10 /* 32 bytes avail */
+#define RCVFW_64 0x20 /* 64 bytes avail */
+#define XMTFWU 0x08 /* xmit fifo watermark update enable */
+#define RCVFWU 0x04 /* recv fifo watermark update enable */
+#define XMTBRST 0x02 /* enable transmit burst mode */
+#define RCVBRST 0x01 /* enable receive burst mode */
+
+/* Bits in MACCC */
+#define PROM 0x80 /* promiscuous mode */
+#define DXMT2PD 0x40 /* disable xmit two-part deferral algorithm */
+#define EMBA 0x20 /* enable modified backoff algorithm */
+#define DRCVPA 0x08 /* disable receiving physical address */
+#define DRCVBC 0x04 /* disable receiving broadcasts */
+#define ENXMT 0x02 /* enable transmitter */
+#define ENRCV 0x01 /* enable receiver */
+
+/* Bits in PLSCC */
+#define XMTSEL 0x08 /* select DO+/DO- state when idle */
+#define PORTSEL_AUI 0x00 /* select AUI port */
+#define PORTSEL_10T 0x02 /* select 10Base-T port */
+#define PORTSEL_DAI 0x04 /* select DAI port */
+#define PORTSEL_GPSI 0x06 /* select GPSI port */
+#define ENPLSIO 0x01 /* enable optional PLS I/O pins */
+
+/* Bits in PHYCC */
+#define LNKFL 0x80 /* reports 10Base-T link failure */
+#define DLNKTST 0x40 /* disable 10Base-T link test */
+#define REVPOL 0x20 /* 10Base-T receiver polarity reversed */
+#define DAPC 0x10 /* disable auto receiver polarity correction */
+#define LRT 0x08 /* low receive threshold for long links */
+#define ASEL 0x04 /* auto-select AUI or 10Base-T port */
+#define RWAKE 0x02 /* remote wake function */
+#define AWAKE 0x01 /* auto wake function */
+
+/* Bits in IAC */
+#define ADDRCHG 0x80 /* request address change */
+#define PHYADDR 0x04 /* access physical address */
+#define LOGADDR 0x02 /* access multicast filter */
+
+/* Bits in UTR */
+#define RTRE 0x80 /* reserved test register enable. DON'T SET. */
+#define RTRD 0x40 /* reserved test register disable. Sticky */
+#define RPAC 0x20 /* accept runt packets */
+#define FCOLL 0x10 /* force collision */
+#define RCVFCSE 0x08 /* receive FCS enable */
+#define LOOP_NONE 0x00 /* no loopback */
+#define LOOP_EXT 0x02 /* external loopback */
+#define LOOP_INT 0x04 /* internal loopback, excludes MENDEC */
+#define LOOP_MENDEC 0x06 /* internal loopback, includes MENDEC */
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
new file mode 100644
index 000000000000..79a6fc139757
--- /dev/null
+++ b/drivers/net/macmace.c
@@ -0,0 +1,710 @@
+/*
+ * Driver for the Macintosh 68K onboard MACE controller with PSC
+ * driven DMA. The MACE driver code is derived from mace.c. The
+ * Mac68k theory of operation is courtesy of the MacBSD wizards.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ * Copyright (C) 1998 Alan Cox <alan@redhat.com>
+ *
+ * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_psc.h>
+#include <asm/page.h>
+#include "mace.h"
+
+#define N_TX_RING 1
+#define N_RX_RING 8
+#define N_RX_PAGES ((N_RX_RING * 0x0800 + PAGE_SIZE - 1) / PAGE_SIZE)
+#define TX_TIMEOUT HZ
+
+/* Bits in transmit DMA status */
+#define TX_DMA_ERR 0x80
+
+/* The MACE is simply wired down on a Mac68K box */
+
+#define MACE_BASE (void *)(0x50F1C000)
+#define MACE_PROM (void *)(0x50F08001)
+
+struct mace_data {
+ volatile struct mace *mace;
+ volatile unsigned char *tx_ring;
+ volatile unsigned char *tx_ring_phys;
+ volatile unsigned char *rx_ring;
+ volatile unsigned char *rx_ring_phys;
+ int dma_intr;
+ struct net_device_stats stats;
+ int rx_slot, rx_tail;
+ int tx_slot, tx_sloti, tx_count;
+};
+
+struct mace_frame {
+ u16 len;
+ u16 status;
+ u16 rntpc;
+ u16 rcvcc;
+ u32 pad1;
+ u32 pad2;
+ u8 data[1];
+ /* And frame continues.. */
+};
+
+#define PRIV_BYTES sizeof(struct mace_data)
+
+extern void psc_debug_dump(void);
+
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *mace_stats(struct net_device *dev);
+static void mace_set_multicast(struct net_device *dev);
+static int mace_set_address(struct net_device *dev, void *addr);
+static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs);
+static void mace_tx_timeout(struct net_device *dev);
+
+/* Bit-reverse one byte of an ethernet hardware address. */
+
+static int bitrev(int b)
+{
+ int d = 0, i;
+
+ for (i = 0; i < 8; ++i, b >>= 1) {
+ d = (d << 1) | (b & 1);
+ }
+
+ return d;
+}
+
+/*
+ * Load a receive DMA channel with a base address and ring length
+ */
+
+static void mace_load_rxdma_base(struct net_device *dev, int set)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+
+ psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
+ psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
+ psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
+ psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
+ mp->rx_tail = 0;
+}
+
+/*
+ * Reset the receive DMA subsystem
+ */
+
+static void mace_rxdma_reset(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mace = mp->mace;
+ u8 maccc = mace->maccc;
+
+ mace->maccc = maccc & ~ENRCV;
+
+ psc_write_word(PSC_ENETRD_CTL, 0x8800);
+ mace_load_rxdma_base(dev, 0x00);
+ psc_write_word(PSC_ENETRD_CTL, 0x0400);
+
+ psc_write_word(PSC_ENETRD_CTL, 0x8800);
+ mace_load_rxdma_base(dev, 0x10);
+ psc_write_word(PSC_ENETRD_CTL, 0x0400);
+
+ mace->maccc = maccc;
+ mp->rx_slot = 0;
+
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
+}
+
+/*
+ * Reset the transmit DMA subsystem
+ */
+
+static void mace_txdma_reset(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mace = mp->mace;
+ u8 maccc;
+
+ psc_write_word(PSC_ENETWR_CTL, 0x8800);
+
+ maccc = mace->maccc;
+ mace->maccc = maccc & ~ENXMT;
+
+ mp->tx_slot = mp->tx_sloti = 0;
+ mp->tx_count = N_TX_RING;
+
+ psc_write_word(PSC_ENETWR_CTL, 0x0400);
+ mace->maccc = maccc;
+}
+
+/*
+ * Disable DMA
+ */
+
+static void mace_dma_off(struct net_device *dev)
+{
+ psc_write_word(PSC_ENETRD_CTL, 0x8800);
+ psc_write_word(PSC_ENETRD_CTL, 0x1000);
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
+
+ psc_write_word(PSC_ENETWR_CTL, 0x8800);
+ psc_write_word(PSC_ENETWR_CTL, 0x1000);
+ psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
+ psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
+}
+
+/*
+ * Not really much of a probe. The hardware table tells us if this
+ * model of Macintrash has a MACE (AV macintoshes)
+ */
+
+struct net_device *mace_probe(int unit)
+{
+ int j;
+ struct mace_data *mp;
+ unsigned char *addr;
+ struct net_device *dev;
+ unsigned char checksum = 0;
+ static int found = 0;
+ int err;
+
+ if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
+ return ERR_PTR(-ENODEV);
+
+ found = 1; /* prevent 'finding' one on every device probe */
+
+ dev = alloc_etherdev(PRIV_BYTES);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ mp = (struct mace_data *) dev->priv;
+ dev->base_addr = (u32)MACE_BASE;
+ mp->mace = (volatile struct mace *) MACE_BASE;
+
+ dev->irq = IRQ_MAC_MACE;
+ mp->dma_intr = IRQ_MAC_MACE_DMA;
+
+ /*
+ * The PROM contains 8 bytes which total 0xFF when XOR'd
+ * together. Due to the usual peculiar apple brain damage
+ * the bytes are spaced out in a strange boundary and the
+ * bits are reversed.
+ */
+
+ addr = (void *)MACE_PROM;
+
+ for (j = 0; j < 6; ++j) {
+ u8 v=bitrev(addr[j<<4]);
+ checksum ^= v;
+ dev->dev_addr[j] = v;
+ }
+ for (; j < 8; ++j) {
+ checksum ^= bitrev(addr[j<<4]);
+ }
+
+ if (checksum != 0xFF) {
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ memset(&mp->stats, 0, sizeof(mp->stats));
+
+ dev->open = mace_open;
+ dev->stop = mace_close;
+ dev->hard_start_xmit = mace_xmit_start;
+ dev->tx_timeout = mace_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->get_stats = mace_stats;
+ dev->set_multicast_list = mace_set_multicast;
+ dev->set_mac_address = mace_set_address;
+
+ printk(KERN_INFO "%s: 68K MACE, hardware address %.2X", dev->name, dev->dev_addr[0]);
+ for (j = 1 ; j < 6 ; j++) printk(":%.2X", dev->dev_addr[j]);
+ printk("\n");
+
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*
+ * Load the address on a mace controller.
+ */
+
+static int mace_set_address(struct net_device *dev, void *addr)
+{
+ unsigned char *p = addr;
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mb = mp->mace;
+ int i;
+ unsigned long flags;
+ u8 maccc;
+
+ local_irq_save(flags);
+
+ maccc = mb->maccc;
+
+ /* load up the hardware address */
+ mb->iac = ADDRCHG | PHYADDR;
+ while ((mb->iac & ADDRCHG) != 0);
+
+ for (i = 0; i < 6; ++i) {
+ mb->padr = dev->dev_addr[i] = p[i];
+ }
+
+ mb->maccc = maccc;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * Open the Macintosh MACE. Most of this is playing with the DMA
+ * engine. The ethernet chip is quite friendly.
+ */
+
+static int mace_open(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mb = mp->mace;
+#if 0
+ int i;
+
+ i = 200;
+ while (--i) {
+ mb->biucc = SWRST;
+ if (mb->biucc & SWRST) {
+ udelay(10);
+ continue;
+ }
+ break;
+ }
+ if (!i) {
+ printk(KERN_ERR "%s: software reset failed!!\n", dev->name);
+ return -EAGAIN;
+ }
+#endif
+
+ mb->biucc = XMTSP_64;
+ mb->fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU | XMTBRST | RCVBRST;
+ mb->xmtfc = AUTO_PAD_XMIT;
+ mb->plscc = PORTSEL_AUI;
+ /* mb->utr = RTRD; */
+
+ if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Allocate the DMA ring buffers */
+
+ mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES);
+ mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0);
+
+ if (mp->tx_ring==NULL || mp->rx_ring==NULL) {
+ if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES);
+ if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0);
+ free_irq(dev->irq, dev);
+ free_irq(mp->dma_intr, dev);
+ printk(KERN_ERR "%s: unable to allocate DMA buffers\n", dev->name);
+ return -ENOMEM;
+ }
+
+ mp->rx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->rx_ring);
+ mp->tx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->tx_ring);
+
+ /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */
+
+ kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER);
+ kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH);
+
+ mace_dma_off(dev);
+
+ /* Not sure what these do */
+
+ psc_write_word(PSC_ENETWR_CTL, 0x9000);
+ psc_write_word(PSC_ENETRD_CTL, 0x9000);
+ psc_write_word(PSC_ENETWR_CTL, 0x0400);
+ psc_write_word(PSC_ENETRD_CTL, 0x0400);
+
+#if 0
+ /* load up the hardware address */
+
+ mb->iac = ADDRCHG | PHYADDR;
+
+ while ((mb->iac & ADDRCHG) != 0);
+
+ for (i = 0; i < 6; ++i)
+ mb->padr = dev->dev_addr[i];
+
+ /* clear the multicast filter */
+ mb->iac = ADDRCHG | LOGADDR;
+
+ while ((mb->iac & ADDRCHG) != 0);
+
+ for (i = 0; i < 8; ++i)
+ mb->ladrf = 0;
+
+ mb->plscc = PORTSEL_GPSI + ENPLSIO;
+
+ mb->maccc = ENXMT | ENRCV;
+ mb->imr = RCVINT;
+#endif
+
+ mace_rxdma_reset(dev);
+ mace_txdma_reset(dev);
+
+ return 0;
+}
+
+/*
+ * Shut down the mace and its interrupt channel
+ */
+
+static int mace_close(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mb = mp->mace;
+
+ mb->maccc = 0; /* disable rx and tx */
+ mb->imr = 0xFF; /* disable all irqs */
+ mace_dma_off(dev); /* disable rx and tx dma */
+
+ free_irq(dev->irq, dev);
+ free_irq(IRQ_MAC_MACE_DMA, dev);
+
+ free_pages((u32) mp->rx_ring, N_RX_PAGES);
+ free_pages((u32) mp->tx_ring, 0);
+
+ return 0;
+}
+
+/*
+ * Transmit a frame
+ */
+
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+
+ /* Stop the queue if the buffer is full */
+
+ if (!mp->tx_count) {
+ netif_stop_queue(dev);
+ return 1;
+ }
+ mp->tx_count--;
+
+ mp->stats.tx_packets++;
+ mp->stats.tx_bytes += skb->len;
+
+ /* We need to copy into our xmit buffer to take care of alignment and caching issues */
+
+ memcpy((void *) mp->tx_ring, skb->data, skb->len);
+
+ /* load the Tx DMA and fire it off */
+
+ psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
+ psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
+ psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
+
+ mp->tx_slot ^= 0x10;
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *mace_stats(struct net_device *dev)
+{
+ struct mace_data *p = (struct mace_data *) dev->priv;
+ return &p->stats;
+}
+
+static void mace_set_multicast(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mb = mp->mace;
+ int i, j;
+ u32 crc;
+ u8 maccc;
+
+ maccc = mb->maccc;
+ mb->maccc &= ~PROM;
+
+ if (dev->flags & IFF_PROMISC) {
+ mb->maccc |= PROM;
+ } else {
+ unsigned char multicast_filter[8];
+ struct dev_mc_list *dmi = dev->mc_list;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 8; i++) {
+ multicast_filter[i] = 0xFF;
+ }
+ } else {
+ for (i = 0; i < 8; i++)
+ multicast_filter[i] = 0;
+ for (i = 0; i < dev->mc_count; i++) {
+ crc = ether_crc_le(6, dmi->dmi_addr);
+ j = crc >> 26; /* bit number in multicast_filter */
+ multicast_filter[j >> 3] |= 1 << (j & 7);
+ dmi = dmi->next;
+ }
+ }
+
+ mb->iac = ADDRCHG | LOGADDR;
+ while (mb->iac & ADDRCHG);
+
+ for (i = 0; i < 8; ++i) {
+ mb->ladrf = multicast_filter[i];
+ }
+ }
+
+ mb->maccc = maccc;
+}
+
+/*
+ * Miscellaneous interrupts are handled here. We may end up
+ * having to bash the chip on the head for bad errors
+ */
+
+static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
+{
+ volatile struct mace *mb = mp->mace;
+ static int mace_babbles, mace_jabbers;
+
+ if (intr & MPCO) {
+ mp->stats.rx_missed_errors += 256;
+ }
+ mp->stats.rx_missed_errors += mb->mpc; /* reading clears it */
+
+ if (intr & RNTPCO) {
+ mp->stats.rx_length_errors += 256;
+ }
+ mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */
+
+ if (intr & CERR) {
+ ++mp->stats.tx_heartbeat_errors;
+ }
+ if (intr & BABBLE) {
+ if (mace_babbles++ < 4) {
+ printk(KERN_DEBUG "mace: babbling transmitter\n");
+ }
+ }
+ if (intr & JABBER) {
+ if (mace_jabbers++ < 4) {
+ printk(KERN_DEBUG "mace: jabbering transceiver\n");
+ }
+ }
+}
+
+/*
+ * A transmit error has occurred. (We kick the transmit side from
+ * the DMA completion)
+ */
+
+static void mace_xmit_error(struct net_device *dev)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mb = mp->mace;
+ u8 xmtfs, xmtrc;
+
+ xmtfs = mb->xmtfs;
+ xmtrc = mb->xmtrc;
+
+ if (xmtfs & XMTSV) {
+ if (xmtfs & UFLO) {
+ printk("%s: DMA underrun.\n", dev->name);
+ mp->stats.tx_errors++;
+ mp->stats.tx_fifo_errors++;
+ mace_txdma_reset(dev);
+ }
+ if (xmtfs & RTRY) {
+ mp->stats.collisions++;
+ }
+ }
+}
+
+/*
+ * A receive interrupt occurred.
+ */
+
+static void mace_recv_interrupt(struct net_device *dev)
+{
+/* struct mace_data *mp = (struct mace_data *) dev->priv; */
+// volatile struct mace *mb = mp->mace;
+}
+
+/*
+ * Process the chip interrupt
+ */
+
+static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ volatile struct mace *mb = mp->mace;
+ u8 ir;
+
+ ir = mb->ir;
+ mace_handle_misc_intrs(mp, ir);
+
+ if (ir & XMTINT) {
+ mace_xmit_error(dev);
+ }
+ if (ir & RCVINT) {
+ mace_recv_interrupt(dev);
+ }
+ return IRQ_HANDLED;
+}
+
+static void mace_tx_timeout(struct net_device *dev)
+{
+/* struct mace_data *mp = (struct mace_data *) dev->priv; */
+// volatile struct mace *mb = mp->mace;
+}
+
+/*
+ * Handle a newly arrived frame
+ */
+
+static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
+{
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ struct sk_buff *skb;
+
+ if (mf->status & RS_OFLO) {
+ printk("%s: fifo overflow.\n", dev->name);
+ mp->stats.rx_errors++;
+ mp->stats.rx_fifo_errors++;
+ }
+ if (mf->status&(RS_CLSN|RS_FRAMERR|RS_FCSERR))
+ mp->stats.rx_errors++;
+
+ if (mf->status&RS_CLSN) {
+ mp->stats.collisions++;
+ }
+ if (mf->status&RS_FRAMERR) {
+ mp->stats.rx_frame_errors++;
+ }
+ if (mf->status&RS_FCSERR) {
+ mp->stats.rx_crc_errors++;
+ }
+
+ skb = dev_alloc_skb(mf->len+2);
+ if (!skb) {
+ mp->stats.rx_dropped++;
+ return;
+ }
+ skb_reserve(skb,2);
+ memcpy(skb_put(skb, mf->len), mf->data, mf->len);
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ mp->stats.rx_packets++;
+ mp->stats.rx_bytes += mf->len;
+}
+
+/*
+ * The PSC has passed us a DMA interrupt event.
+ */
+
+static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = (struct mace_data *) dev->priv;
+ int left, head;
+ u16 status;
+ u32 baka;
+
+ /* Not sure what this does */
+
+ while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
+ if (!(baka & 0x60000000)) return IRQ_NONE;
+
+ /*
+ * Process the read queue
+ */
+
+ status = psc_read_word(PSC_ENETRD_CTL);
+
+ if (status & 0x2000) {
+ mace_rxdma_reset(dev);
+ } else if (status & 0x0100) {
+ psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
+
+ left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
+ head = N_RX_RING - left;
+
+ /* Loop through the ring buffer and process new packages */
+
+ while (mp->rx_tail < head) {
+ mace_dma_rx_frame(dev, (struct mace_frame *) (mp->rx_ring + (mp->rx_tail * 0x0800)));
+ mp->rx_tail++;
+ }
+
+ /* If we're out of buffers in this ring then switch to */
+ /* the other set, otherwise just reactivate this one. */
+
+ if (!left) {
+ mace_load_rxdma_base(dev, mp->rx_slot);
+ mp->rx_slot ^= 0x10;
+ } else {
+ psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
+ }
+ }
+
+ /*
+ * Process the write queue
+ */
+
+ status = psc_read_word(PSC_ENETWR_CTL);
+
+ if (status & 0x2000) {
+ mace_txdma_reset(dev);
+ } else if (status & 0x0100) {
+ psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
+ mp->tx_sloti ^= 0x10;
+ mp->tx_count++;
+ netif_wake_queue(dev);
+ }
+ return IRQ_HANDLED;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
new file mode 100644
index 000000000000..be28c65de729
--- /dev/null
+++ b/drivers/net/macsonic.c
@@ -0,0 +1,657 @@
+/*
+ * macsonic.c
+ *
+ * (C) 1998 Alan Cox
+ *
+ * Debugging Andreas Ehliar, Michael Schmitz
+ *
+ * Based on code
+ * (C) 1996 by Thomas Bogendoerfer (tsbogend@bigbug.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the Mac onboard Sonic ethernet chip.
+ *
+ * 98/12/21 MSch: judged from tests on Q800, it's basically working,
+ * but eating up both receive and transmit resources
+ * and duplicating packets. Needs more testing.
+ *
+ * 99/01/03 MSch: upgraded to version 0.92 of the core driver, fixed.
+ *
+ * 00/10/31 sammy@oh.verio.com: Updated driver for 2.4 kernels, fixed problems
+ * on centris.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/nubus.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/hwtest.h>
+#include <asm/dma.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_via.h>
+
+#define SREGS_PAD(n) u16 n;
+
+#include "sonic.h"
+
+#define SONIC_READ(reg) \
+ nubus_readl(base_addr+(reg))
+#define SONIC_WRITE(reg,val) \
+ nubus_writel((val), base_addr+(reg))
+#define sonic_read(dev, reg) \
+ nubus_readl((dev)->base_addr+(reg))
+#define sonic_write(dev, reg, val) \
+ nubus_writel((val), (dev)->base_addr+(reg))
+
+
+static int sonic_debug;
+static int sonic_version_printed;
+
+static int reg_offset;
+
+extern int mac_onboard_sonic_probe(struct net_device* dev);
+extern int mac_nubus_sonic_probe(struct net_device* dev);
+
+/* For onboard SONIC */
+#define ONBOARD_SONIC_REGISTERS 0x50F0A000
+#define ONBOARD_SONIC_PROM_BASE 0x50f08000
+
+enum macsonic_type {
+ MACSONIC_DUODOCK,
+ MACSONIC_APPLE,
+ MACSONIC_APPLE16,
+ MACSONIC_DAYNA,
+ MACSONIC_DAYNALINK
+};
+
+/* For the built-in SONIC in the Duo Dock */
+#define DUODOCK_SONIC_REGISTERS 0xe10000
+#define DUODOCK_SONIC_PROM_BASE 0xe12000
+
+/* For Apple-style NuBus SONIC */
+#define APPLE_SONIC_REGISTERS 0
+#define APPLE_SONIC_PROM_BASE 0x40000
+
+/* Daynalink LC SONIC */
+#define DAYNALINK_PROM_BASE 0x400000
+
+/* For Dayna-style NuBus SONIC (haven't seen one yet) */
+#define DAYNA_SONIC_REGISTERS 0x180000
+/* This is what OpenBSD says. However, this is definitely in NuBus
+ ROM space so we should be able to get it by walking the NuBus
+ resource directories */
+#define DAYNA_SONIC_MAC_ADDR 0xffe004
+
+#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr)
+
+struct net_device * __init macsonic_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ SET_MODULE_OWNER(dev);
+
+ /* This will catch fatal stuff like -ENOMEM as well as success */
+ err = mac_onboard_sonic_probe(dev);
+ if (err == 0)
+ goto found;
+ if (err != -ENODEV)
+ goto out;
+ err = mac_nubus_sonic_probe(dev);
+ if (err)
+ goto out;
+found:
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ kfree(dev->priv);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*
+ * For reversing the PROM address
+ */
+
+static unsigned char nibbletab[] = {0, 8, 4, 12, 2, 10, 6, 14,
+ 1, 9, 5, 13, 3, 11, 7, 15};
+
+static inline void bit_reverse_addr(unsigned char addr[6])
+{
+ int i;
+
+ for(i = 0; i < 6; i++)
+ addr[i] = ((nibbletab[addr[i] & 0xf] << 4) |
+ nibbletab[(addr[i] >> 4) &0xf]);
+}
+
+int __init macsonic_init(struct net_device* dev)
+{
+ struct sonic_local* lp = NULL;
+ int i;
+
+ /* Allocate the entire chunk of memory for the descriptors.
+ Note that this cannot cross a 64K boundary. */
+ for (i = 0; i < 20; i++) {
+ unsigned long desc_base, desc_top;
+ if((lp = kmalloc(sizeof(struct sonic_local), GFP_KERNEL | GFP_DMA)) == NULL) {
+ printk(KERN_ERR "%s: couldn't allocate descriptor buffers\n", dev->name);
+ return -ENOMEM;
+ }
+
+ desc_base = (unsigned long) lp;
+ desc_top = desc_base + sizeof(struct sonic_local);
+ if ((desc_top & 0xffff) >= (desc_base & 0xffff))
+ break;
+ /* Hmm. try again (FIXME: does this actually work?) */
+ kfree(lp);
+ printk(KERN_DEBUG
+ "%s: didn't get continguous chunk [%08lx - %08lx], trying again\n",
+ dev->name, desc_base, desc_top);
+ }
+
+ if (lp == NULL) {
+ printk(KERN_ERR "%s: tried 20 times to allocate descriptor buffers, giving up.\n",
+ dev->name);
+ return -ENOMEM;
+ }
+
+ dev->priv = lp;
+
+#if 0
+ /* this code is only here as a curiousity... mainly, where the
+ fuck did SONIC_BUS_SCALE come from, and what was it supposed
+ to do? the normal allocation works great for 32 bit stuffs.. */
+
+ /* Now set up the pointers to point to the appropriate places */
+ lp->cda = lp->sonic_desc;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+#endif
+
+ memset(lp, 0, sizeof(struct sonic_local));
+
+ lp->cda_laddr = (unsigned int)&(lp->cda);
+ lp->tda_laddr = (unsigned int)lp->tda;
+ lp->rra_laddr = (unsigned int)lp->rra;
+ lp->rda_laddr = (unsigned int)lp->rda;
+
+ /* FIXME, maybe we should use skbs */
+ if ((lp->rba = (char *)
+ kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL | GFP_DMA)) == NULL) {
+ printk(KERN_ERR "%s: couldn't allocate receive buffers\n", dev->name);
+ dev->priv = NULL;
+ kfree(lp);
+ return -ENOMEM;
+ }
+
+ lp->rba_laddr = (unsigned int)lp->rba;
+
+ {
+ int rs, ds;
+
+ /* almost always 12*4096, but let's not take chances */
+ rs = ((SONIC_NUM_RRS * SONIC_RBSIZE + 4095) / 4096) * 4096;
+ /* almost always under a page, but let's not take chances */
+ ds = ((sizeof(struct sonic_local) + 4095) / 4096) * 4096;
+ kernel_set_cachemode(lp->rba, rs, IOMAP_NOCACHE_SER);
+ kernel_set_cachemode(lp, ds, IOMAP_NOCACHE_SER);
+ }
+
+#if 0
+ flush_cache_all();
+#endif
+
+ dev->open = sonic_open;
+ dev->stop = sonic_close;
+ dev->hard_start_xmit = sonic_send_packet;
+ dev->get_stats = sonic_get_stats;
+ dev->set_multicast_list = &sonic_multicast_list;
+
+ /*
+ * clear tally counter
+ */
+ sonic_write(dev, SONIC_CRCT, 0xffff);
+ sonic_write(dev, SONIC_FAET, 0xffff);
+ sonic_write(dev, SONIC_MPT, 0xffff);
+
+ return 0;
+}
+
+int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
+{
+ const int prom_addr = ONBOARD_SONIC_PROM_BASE;
+ int i;
+
+ /* On NuBus boards we can sometimes look in the ROM resources.
+ No such luck for comm-slot/onboard. */
+ for(i = 0; i < 6; i++)
+ dev->dev_addr[i] = SONIC_READ_PROM(i);
+
+ /* Most of the time, the address is bit-reversed. The NetBSD
+ source has a rather long and detailed historical account of
+ why this is so. */
+ if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
+ memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
+ memcmp(dev->dev_addr, "\x00\x05\x02", 3))
+ bit_reverse_addr(dev->dev_addr);
+ else
+ return 0;
+
+ /* If we still have what seems to be a bogus address, we'll
+ look in the CAM. The top entry should be ours. */
+ /* Danger! This only works if MacOS has already initialized
+ the card... */
+ if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
+ memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
+ memcmp(dev->dev_addr, "\x00\x05\x02", 3))
+ {
+ unsigned short val;
+
+ printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n");
+
+ sonic_write(dev, SONIC_CMD, SONIC_CR_RST);
+ sonic_write(dev, SONIC_CEP, 15);
+
+ val = sonic_read(dev, SONIC_CAP2);
+ dev->dev_addr[5] = val >> 8;
+ dev->dev_addr[4] = val & 0xff;
+ val = sonic_read(dev, SONIC_CAP1);
+ dev->dev_addr[3] = val >> 8;
+ dev->dev_addr[2] = val & 0xff;
+ val = sonic_read(dev, SONIC_CAP0);
+ dev->dev_addr[1] = val >> 8;
+ dev->dev_addr[0] = val & 0xff;
+
+ printk(KERN_INFO "HW Address from CAM 15: ");
+ for (i = 0; i < 6; i++) {
+ printk("%2.2x", dev->dev_addr[i]);
+ if (i < 5)
+ printk(":");
+ }
+ printk("\n");
+ } else return 0;
+
+ if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
+ memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
+ memcmp(dev->dev_addr, "\x00\x05\x02", 3))
+ {
+ /*
+ * Still nonsense ... messed up someplace!
+ */
+ printk(KERN_ERR "macsonic: ERROR (INVALID MAC)\n");
+ return -EIO;
+ } else return 0;
+}
+
+int __init mac_onboard_sonic_probe(struct net_device* dev)
+{
+ /* Bwahahaha */
+ static int once_is_more_than_enough;
+ int i;
+ int dma_bitmode;
+
+ if (once_is_more_than_enough)
+ return -ENODEV;
+ once_is_more_than_enough = 1;
+
+ if (!MACH_IS_MAC)
+ return -ENODEV;
+
+ printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
+
+ if (macintosh_config->ether_type != MAC_ETHER_SONIC)
+ {
+ printk("none.\n");
+ return -ENODEV;
+ }
+
+ /* Bogus probing, on the models which may or may not have
+ Ethernet (BTW, the Ethernet *is* always at the same
+ address, and nothing else lives there, at least if Apple's
+ documentation is to be believed) */
+ if (macintosh_config->ident == MAC_MODEL_Q630 ||
+ macintosh_config->ident == MAC_MODEL_P588 ||
+ macintosh_config->ident == MAC_MODEL_C610) {
+ unsigned long flags;
+ int card_present;
+
+ local_irq_save(flags);
+ card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
+ local_irq_restore(flags);
+
+ if (!card_present) {
+ printk("none.\n");
+ return -ENODEV;
+ }
+ }
+
+ printk("yes\n");
+
+ /* Danger! My arms are flailing wildly! You *must* set this
+ before using sonic_read() */
+
+ dev->base_addr = ONBOARD_SONIC_REGISTERS;
+ if (via_alt_mapping)
+ dev->irq = IRQ_AUTO_3;
+ else
+ dev->irq = IRQ_NUBUS_9;
+
+ if (!sonic_version_printed) {
+ printk(KERN_INFO "%s", version);
+ sonic_version_printed = 1;
+ }
+ printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
+ dev->name, dev->base_addr);
+
+ /* Now do a song and dance routine in an attempt to determine
+ the bus width */
+
+ /* The PowerBook's SONIC is 16 bit always. */
+ if (macintosh_config->ident == MAC_MODEL_PB520) {
+ reg_offset = 0;
+ dma_bitmode = 0;
+ } else if (macintosh_config->ident == MAC_MODEL_C610) {
+ reg_offset = 0;
+ dma_bitmode = 1;
+ } else {
+ /* Some of the comm-slot cards are 16 bit. But some
+ of them are not. The 32-bit cards use offset 2 and
+ pad with zeroes or sometimes ones (I think...)
+ Therefore, if we try offset 0 and get a silicon
+ revision of 0, we assume 16 bit. */
+ int sr;
+
+ /* Technically this is not necessary since we zeroed
+ it above */
+ reg_offset = 0;
+ dma_bitmode = 0;
+ sr = sonic_read(dev, SONIC_SR);
+ if (sr == 0 || sr == 0xffff) {
+ reg_offset = 2;
+ /* 83932 is 0x0004, 83934 is 0x0100 or 0x0101 */
+ sr = sonic_read(dev, SONIC_SR);
+ dma_bitmode = 1;
+
+ }
+ printk(KERN_INFO
+ "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
+ dev->name, sr, dma_bitmode?32:16, reg_offset);
+ }
+
+
+ /* this carries my sincere apologies -- by the time I got to updating
+ the driver, support for "reg_offsets" appeares nowhere in the sonic
+ code, going back for over a year. Fortunately, my Mac does't seem
+ to use whatever this was.
+
+ If you know how this is supposed to be implemented, either fix it,
+ or contact me (sammy@oh.verio.com) to explain what it is. --Sam */
+
+ if(reg_offset) {
+ printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name);
+ return -ENODEV;
+ }
+
+ /* Software reset, then initialize control registers. */
+ sonic_write(dev, SONIC_CMD, SONIC_CR_RST);
+ sonic_write(dev, SONIC_DCR, SONIC_DCR_BMS |
+ SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_EXBUS |
+ (dma_bitmode ? SONIC_DCR_DW : 0));
+
+ /* This *must* be written back to in order to restore the
+ extended programmable output bits */
+ sonic_write(dev, SONIC_DCR2, 0);
+
+ /* Clear *and* disable interrupts to be on the safe side */
+ sonic_write(dev, SONIC_ISR,0x7fff);
+ sonic_write(dev, SONIC_IMR,0);
+
+ /* Now look for the MAC address. */
+ if (mac_onboard_sonic_ethernet_addr(dev) != 0)
+ return -ENODEV;
+
+ printk(KERN_INFO "MAC ");
+ for (i = 0; i < 6; i++) {
+ printk("%2.2x", dev->dev_addr[i]);
+ if (i < 5)
+ printk(":");
+ }
+
+ printk(" IRQ %d\n", dev->irq);
+
+ /* Shared init code */
+ return macsonic_init(dev);
+}
+
+int __init mac_nubus_sonic_ethernet_addr(struct net_device* dev,
+ unsigned long prom_addr,
+ int id)
+{
+ int i;
+ for(i = 0; i < 6; i++)
+ dev->dev_addr[i] = SONIC_READ_PROM(i);
+ /* For now we are going to assume that they're all bit-reversed */
+ bit_reverse_addr(dev->dev_addr);
+
+ return 0;
+}
+
+int __init macsonic_ident(struct nubus_dev* ndev)
+{
+ if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
+ ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
+ return MACSONIC_DAYNALINK;
+ if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
+ ndev->dr_sw == NUBUS_DRSW_APPLE) {
+ /* There has to be a better way to do this... */
+ if (strstr(ndev->board->name, "DuoDock"))
+ return MACSONIC_DUODOCK;
+ else
+ return MACSONIC_APPLE;
+ }
+ return -1;
+}
+
+int __init mac_nubus_sonic_probe(struct net_device* dev)
+{
+ static int slots;
+ struct nubus_dev* ndev = NULL;
+ unsigned long base_addr, prom_addr;
+ u16 sonic_dcr;
+ int id;
+ int i;
+ int dma_bitmode;
+
+ /* Find the first SONIC that hasn't been initialized already */
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
+ NUBUS_TYPE_ETHERNET, ndev)) != NULL)
+ {
+ /* Have we seen it already? */
+ if (slots & (1<<ndev->board->slot))
+ continue;
+ slots |= 1<<ndev->board->slot;
+
+ /* Is it one of ours? */
+ if ((id = macsonic_ident(ndev)) != -1)
+ break;
+ }
+
+ if (ndev == NULL)
+ return -ENODEV;
+
+ switch (id) {
+ case MACSONIC_DUODOCK:
+ base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE;
+ sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1
+ | SONIC_DCR_TFT0;
+ reg_offset = 2;
+ dma_bitmode = 1;
+ break;
+ case MACSONIC_APPLE:
+ base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
+ sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0;
+ reg_offset = 0;
+ dma_bitmode = 1;
+ break;
+ case MACSONIC_APPLE16:
+ base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
+ sonic_dcr = SONIC_DCR_EXBUS
+ | SONIC_DCR_RFT1 | SONIC_DCR_TFT0
+ | SONIC_DCR_PO1 | SONIC_DCR_BMS;
+ reg_offset = 0;
+ dma_bitmode = 0;
+ break;
+ case MACSONIC_DAYNALINK:
+ base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE;
+ sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0
+ | SONIC_DCR_PO1 | SONIC_DCR_BMS;
+ reg_offset = 0;
+ dma_bitmode = 0;
+ break;
+ case MACSONIC_DAYNA:
+ base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR;
+ sonic_dcr = SONIC_DCR_BMS
+ | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
+ reg_offset = 0;
+ dma_bitmode = 0;
+ break;
+ default:
+ printk(KERN_ERR "macsonic: WTF, id is %d\n", id);
+ return -ENODEV;
+ }
+
+ /* Danger! My arms are flailing wildly! You *must* set this
+ before using sonic_read() */
+ dev->base_addr = base_addr;
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+
+ if (!sonic_version_printed) {
+ printk(KERN_INFO "%s", version);
+ sonic_version_printed = 1;
+ }
+ printk(KERN_INFO "%s: %s in slot %X\n",
+ dev->name, ndev->board->name, ndev->board->slot);
+ printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
+ dev->name, sonic_read(dev, SONIC_SR), dma_bitmode?32:16, reg_offset);
+
+ if(reg_offset) {
+ printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name);
+ return -ENODEV;
+ }
+
+ /* Software reset, then initialize control registers. */
+ sonic_write(dev, SONIC_CMD, SONIC_CR_RST);
+ sonic_write(dev, SONIC_DCR, sonic_dcr
+ | (dma_bitmode ? SONIC_DCR_DW : 0));
+
+ /* Clear *and* disable interrupts to be on the safe side */
+ sonic_write(dev, SONIC_ISR,0x7fff);
+ sonic_write(dev, SONIC_IMR,0);
+
+ /* Now look for the MAC address. */
+ if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0)
+ return -ENODEV;
+
+ printk(KERN_INFO "MAC ");
+ for (i = 0; i < 6; i++) {
+ printk("%2.2x", dev->dev_addr[i]);
+ if (i < 5)
+ printk(":");
+ }
+ printk(" IRQ %d\n", dev->irq);
+
+ /* Shared init code */
+ return macsonic_init(dev);
+}
+
+#ifdef MODULE
+static struct net_device *dev_macsonic;
+
+MODULE_PARM(sonic_debug, "i");
+MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
+
+int
+init_module(void)
+{
+ dev_macsonic = macsonic_probe(-1);
+ if (IS_ERR(dev_macsonic)) {
+ printk(KERN_WARNING "macsonic.c: No card found\n");
+ return PTR_ERR(dev_macsonic);
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(dev_macsonic);
+ kfree(dev_macsonic->priv);
+ free_netdev(dev_macsonic);
+}
+#endif /* MODULE */
+
+
+#define vdma_alloc(foo, bar) ((u32)foo)
+#define vdma_free(baz)
+#define sonic_chiptomem(bat) (bat)
+#define PHYSADDR(quux) (quux)
+#define CPHYSADDR(quux) (quux)
+
+#define sonic_request_irq request_irq
+#define sonic_free_irq free_irq
+
+#include "sonic.c"
+
+/*
+ * Local variables:
+ * compile-command: "m68k-linux-gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c -o macsonic.o macsonic.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 8
+ * tab-width: 8
+ * End:
+ *
+ */
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
new file mode 100644
index 000000000000..e23655f5049f
--- /dev/null
+++ b/drivers/net/meth.c
@@ -0,0 +1,843 @@
+/*
+ * meth.c -- O2 Builtin 10/100 Ethernet driver
+ *
+ * Copyright (C) 2001-2003 Ilya Volynets
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h> /* mark_bh */
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/device.h> /* struct device, et al */
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/ip.h> /* struct iphdr */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/skbuff.h>
+#include <linux/mii.h> /* MII definitions */
+
+#include <asm/ip32/mace.h>
+#include <asm/ip32/ip32_ints.h>
+
+#include <asm/io.h>
+#include <asm/checksum.h>
+#include <asm/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#include "meth.h"
+
+#ifndef MFE_DEBUG
+#define MFE_DEBUG 0
+#endif
+
+#if MFE_DEBUG>=1
+#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args)
+#define MFE_RX_DEBUG 2
+#else
+#define DPRINTK(str,args...)
+#define MFE_RX_DEBUG 0
+#endif
+
+
+static const char *meth_str="SGI O2 Fast Ethernet";
+MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
+MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
+
+#define HAVE_TX_TIMEOUT
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
+#define TX_TIMEOUT (400*HZ/1000)
+
+#ifdef HAVE_TX_TIMEOUT
+static int timeout = TX_TIMEOUT;
+MODULE_PARM(timeout, "i");
+#endif
+
+/*
+ * This structure is private to each device. It is used to pass
+ * packets in and out, so there is place for a packet
+ */
+struct meth_private {
+ struct net_device_stats stats;
+ /* in-memory copy of MAC Control register */
+ unsigned long mac_ctrl;
+ /* in-memory copy of DMA Control register */
+ unsigned long dma_ctrl;
+ /* address of PHY, used by mdio_* functions, initialized in mdio_probe */
+ unsigned long phy_addr;
+ tx_packet *tx_ring;
+ dma_addr_t tx_ring_dma;
+ struct sk_buff *tx_skbs[TX_RING_ENTRIES];
+ dma_addr_t tx_skb_dmas[TX_RING_ENTRIES];
+ unsigned long tx_read, tx_write, tx_count;
+
+ rx_packet *rx_ring[RX_RING_ENTRIES];
+ dma_addr_t rx_ring_dmas[RX_RING_ENTRIES];
+ struct sk_buff *rx_skbs[RX_RING_ENTRIES];
+ unsigned long rx_write;
+
+ spinlock_t meth_lock;
+};
+
+static void meth_tx_timeout(struct net_device *dev);
+static irqreturn_t meth_interrupt(int irq, void *dev_id, struct pt_regs *pregs);
+
+/* global, initialized in ip32-setup.c */
+char o2meth_eaddr[8]={0,0,0,0,0,0,0,0};
+
+static inline void load_eaddr(struct net_device *dev)
+{
+ int i;
+ DPRINTK("Loading MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int)o2meth_eaddr[0]&0xFF,(int)o2meth_eaddr[1]&0xFF,(int)o2meth_eaddr[2]&0xFF,
+ (int)o2meth_eaddr[3]&0xFF,(int)o2meth_eaddr[4]&0xFF,(int)o2meth_eaddr[5]&0xFF);
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = o2meth_eaddr[i];
+ mace->eth.mac_addr = (*(unsigned long*)o2meth_eaddr) >> 16;
+}
+
+/*
+ * Waits for BUSY status of mdio bus to clear
+ */
+#define WAIT_FOR_PHY(___rval) \
+ while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \
+ udelay(25); \
+ }
+/*read phy register, return value read */
+static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
+{
+ unsigned long rval;
+ WAIT_FOR_PHY(rval);
+ mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f);
+ udelay(25);
+ mace->eth.phy_trans_go = 1;
+ udelay(25);
+ WAIT_FOR_PHY(rval);
+ return rval & MDIO_DATA_MASK;
+}
+
+static int mdio_probe(struct meth_private *priv)
+{
+ int i;
+ unsigned long p2, p3;
+ /* check if phy is detected already */
+ if(priv->phy_addr>=0&&priv->phy_addr<32)
+ return 0;
+ spin_lock(&priv->meth_lock);
+ for (i=0;i<32;++i){
+ priv->phy_addr=i;
+ p2=mdio_read(priv,2);
+ p3=mdio_read(priv,3);
+#if MFE_DEBUG>=2
+ switch ((p2<<12)|(p3>>4)){
+ case PHY_QS6612X:
+ DPRINTK("PHY is QS6612X\n");
+ break;
+ case PHY_ICS1889:
+ DPRINTK("PHY is ICS1889\n");
+ break;
+ case PHY_ICS1890:
+ DPRINTK("PHY is ICS1890\n");
+ break;
+ case PHY_DP83840:
+ DPRINTK("PHY is DP83840\n");
+ break;
+ }
+#endif
+ if(p2!=0xffff&&p2!=0x0000){
+ DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4));
+ break;
+ }
+ }
+ spin_unlock(&priv->meth_lock);
+ if(priv->phy_addr<32) {
+ return 0;
+ }
+ DPRINTK("Oopsie! PHY is not known!\n");
+ priv->phy_addr=-1;
+ return -ENODEV;
+}
+
+static void meth_check_link(struct net_device *dev)
+{
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+ unsigned long mii_advertising = mdio_read(priv, 4);
+ unsigned long mii_partner = mdio_read(priv, 5);
+ unsigned long negotiated = mii_advertising & mii_partner;
+ unsigned long duplex, speed;
+
+ if (mii_partner == 0xffff)
+ return;
+
+ speed = (negotiated & 0x0380) ? METH_100MBIT : 0;
+ duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ?
+ METH_PHY_FDX : 0;
+
+ if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) {
+ DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half");
+ if (duplex)
+ priv->mac_ctrl |= METH_PHY_FDX;
+ else
+ priv->mac_ctrl &= ~METH_PHY_FDX;
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ }
+
+ if ((priv->mac_ctrl & METH_100MBIT) ^ speed) {
+ DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10);
+ if (duplex)
+ priv->mac_ctrl |= METH_100MBIT;
+ else
+ priv->mac_ctrl &= ~METH_100MBIT;
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ }
+}
+
+
+static int meth_init_tx_ring(struct meth_private *priv)
+{
+ /* Init TX ring */
+ priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
+ &priv->tx_ring_dma, GFP_ATOMIC);
+ if (!priv->tx_ring)
+ return -ENOMEM;
+ memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE);
+ priv->tx_count = priv->tx_read = priv->tx_write = 0;
+ mace->eth.tx_ring_base = priv->tx_ring_dma;
+ /* Now init skb save area */
+ memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs));
+ memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas));
+ return 0;
+}
+
+static int meth_init_rx_ring(struct meth_private *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_ENTRIES; i++) {
+ priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0);
+ /* 8byte status vector + 3quad padding + 2byte padding,
+ * to put data on 64bit aligned boundary */
+ skb_reserve(priv->rx_skbs[i],METH_RX_HEAD);
+ priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
+ /* I'll need to re-sync it after each RX */
+ priv->rx_ring_dmas[i] =
+ dma_map_single(NULL, priv->rx_ring[i],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ mace->eth.rx_fifo = priv->rx_ring_dmas[i];
+ }
+ priv->rx_write = 0;
+ return 0;
+}
+static void meth_free_tx_ring(struct meth_private *priv)
+{
+ int i;
+
+ /* Remove any pending skb */
+ for (i = 0; i < TX_RING_ENTRIES; i++) {
+ if (priv->tx_skbs[i])
+ dev_kfree_skb(priv->tx_skbs[i]);
+ priv->tx_skbs[i] = NULL;
+ }
+ dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring,
+ priv->tx_ring_dma);
+}
+
+/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */
+static void meth_free_rx_ring(struct meth_private *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_ENTRIES; i++) {
+ dma_unmap_single(NULL, priv->rx_ring_dmas[i],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_ring[i] = 0;
+ priv->rx_ring_dmas[i] = 0;
+ kfree_skb(priv->rx_skbs[i]);
+ }
+}
+
+int meth_reset(struct net_device *dev)
+{
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+
+ /* Reset card */
+ mace->eth.mac_ctrl = SGI_MAC_RESET;
+ udelay(1);
+ mace->eth.mac_ctrl = 0;
+ udelay(25);
+
+ /* Load ethernet address */
+ load_eaddr(dev);
+ /* Should load some "errata", but later */
+
+ /* Check for device */
+ if (mdio_probe(priv) < 0) {
+ DPRINTK("Unable to find PHY\n");
+ return -ENODEV;
+ }
+
+ /* Initial mode: 10 | Half-duplex | Accept normal packets */
+ priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG;
+ if (dev->flags | IFF_PROMISC)
+ priv->mac_ctrl |= METH_PROMISC;
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+
+ /* Autonegotiate speed and duplex mode */
+ meth_check_link(dev);
+
+ /* Now set dma control, but don't enable DMA, yet */
+ priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) |
+ (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ return 0;
+}
+
+/*============End Helper Routines=====================*/
+
+/*
+ * Open and close
+ */
+static int meth_open(struct net_device *dev)
+{
+ struct meth_private *priv = dev->priv;
+ int ret;
+
+ priv->phy_addr = -1; /* No PHY is known yet... */
+
+ /* Initialize the hardware */
+ ret = meth_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Allocate the ring buffers */
+ ret = meth_init_tx_ring(priv);
+ if (ret < 0)
+ return ret;
+ ret = meth_init_rx_ring(priv);
+ if (ret < 0)
+ goto out_free_tx_ring;
+
+ ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+ goto out_free_rx_ring;
+ }
+
+ /* Start DMA */
+ priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/
+ METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ DPRINTK("About to start queue\n");
+ netif_start_queue(dev);
+
+ return 0;
+
+out_free_rx_ring:
+ meth_free_rx_ring(priv);
+out_free_tx_ring:
+ meth_free_tx_ring(priv);
+
+ return ret;
+}
+
+static int meth_release(struct net_device *dev)
+{
+ struct meth_private *priv = dev->priv;
+
+ DPRINTK("Stopping queue\n");
+ netif_stop_queue(dev); /* can't transmit any more */
+ /* shut down DMA */
+ priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN |
+ METH_DMA_RX_EN | METH_DMA_RX_INT_EN);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ free_irq(dev->irq, dev);
+ meth_free_tx_ring(priv);
+ meth_free_rx_ring(priv);
+
+ return 0;
+}
+
+/*
+ * Receive a packet: retrieve, encapsulate and pass over to upper levels
+ */
+static void meth_rx(struct net_device* dev, unsigned long int_status)
+{
+ struct sk_buff *skb;
+ unsigned long status;
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+ unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
+
+ spin_lock(&priv->meth_lock);
+ priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ spin_unlock(&priv->meth_lock);
+
+ if (int_status & METH_INT_RX_UNDERFLOW) {
+ fifo_rptr = (fifo_rptr - 1) & 0x0f;
+ }
+ while (priv->rx_write != fifo_rptr) {
+ dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ status = priv->rx_ring[priv->rx_write]->status.raw;
+#if MFE_DEBUG
+ if (!(status & METH_RX_ST_VALID)) {
+ DPRINTK("Not received? status=%016lx\n",status);
+ }
+#endif
+ if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) {
+ int len = (status & 0xffff) - 4; /* omit CRC */
+ /* length sanity check */
+ if (len < 60 || len > 1518) {
+ printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2lx.\n",
+ dev->name, priv->rx_write,
+ priv->rx_ring[priv->rx_write]->status.raw);
+ priv->stats.rx_errors++;
+ priv->stats.rx_length_errors++;
+ skb = priv->rx_skbs[priv->rx_write];
+ } else {
+ skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC | GFP_DMA);
+ if (!skb) {
+ /* Ouch! No memory! Drop packet on the floor */
+ DPRINTK("No mem: dropping packet\n");
+ priv->stats.rx_dropped++;
+ skb = priv->rx_skbs[priv->rx_write];
+ } else {
+ struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write];
+ /* 8byte status vector + 3quad padding + 2byte padding,
+ * to put data on 64bit aligned boundary */
+ skb_reserve(skb, METH_RX_HEAD);
+ /* Write metadata, and then pass to the receive level */
+ skb_put(skb_c, len);
+ priv->rx_skbs[priv->rx_write] = skb;
+ skb_c->dev = dev;
+ skb_c->protocol = eth_type_trans(skb_c, dev);
+ dev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+ netif_rx(skb_c);
+ }
+ }
+ } else {
+ priv->stats.rx_errors++;
+ skb=priv->rx_skbs[priv->rx_write];
+#if MFE_DEBUG>0
+ printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status);
+ if(status&METH_RX_ST_RCV_CODE_VIOLATION)
+ printk(KERN_WARNING "Receive Code Violation\n");
+ if(status&METH_RX_ST_CRC_ERR)
+ printk(KERN_WARNING "CRC error\n");
+ if(status&METH_RX_ST_INV_PREAMBLE_CTX)
+ printk(KERN_WARNING "Invalid Preamble Context\n");
+ if(status&METH_RX_ST_LONG_EVT_SEEN)
+ printk(KERN_WARNING "Long Event Seen...\n");
+ if(status&METH_RX_ST_BAD_PACKET)
+ printk(KERN_WARNING "Bad Packet\n");
+ if(status&METH_RX_ST_CARRIER_EVT_SEEN)
+ printk(KERN_WARNING "Carrier Event Seen\n");
+#endif
+ }
+ priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
+ priv->rx_ring[priv->rx_write]->status.raw = 0;
+ priv->rx_ring_dmas[priv->rx_write] =
+ dma_map_single(NULL, priv->rx_ring[priv->rx_write],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
+ ADVANCE_RX_PTR(priv->rx_write);
+ }
+ spin_lock(&priv->meth_lock);
+ /* In case there was underflow, and Rx DMA was disabled */
+ priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ mace->eth.int_stat = METH_INT_RX_THRESHOLD;
+ spin_unlock(&priv->meth_lock);
+}
+
+static int meth_tx_full(struct net_device *dev)
+{
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+
+ return (priv->tx_count >= TX_RING_ENTRIES - 1);
+}
+
+static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
+{
+ struct meth_private *priv = dev->priv;
+ unsigned long status;
+ struct sk_buff *skb;
+ unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
+
+ spin_lock(&priv->meth_lock);
+
+ /* Stop DMA notification */
+ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ while (priv->tx_read != rptr) {
+ skb = priv->tx_skbs[priv->tx_read];
+ status = priv->tx_ring[priv->tx_read].header.raw;
+#if MFE_DEBUG>=1
+ if (priv->tx_read == priv->tx_write)
+ DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr);
+#endif
+ if (status & METH_TX_ST_DONE) {
+ if (status & METH_TX_ST_SUCCESS){
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ } else {
+ priv->stats.tx_errors++;
+#if MFE_DEBUG>=1
+ DPRINTK("TX error: status=%016lx <",status);
+ if(status & METH_TX_ST_SUCCESS)
+ printk(" SUCCESS");
+ if(status & METH_TX_ST_TOOLONG)
+ printk(" TOOLONG");
+ if(status & METH_TX_ST_UNDERRUN)
+ printk(" UNDERRUN");
+ if(status & METH_TX_ST_EXCCOLL)
+ printk(" EXCCOLL");
+ if(status & METH_TX_ST_DEFER)
+ printk(" DEFER");
+ if(status & METH_TX_ST_LATECOLL)
+ printk(" LATECOLL");
+ printk(" >\n");
+#endif
+ }
+ } else {
+ DPRINTK("RPTR points us here, but packet not done?\n");
+ break;
+ }
+ dev_kfree_skb_irq(skb);
+ priv->tx_skbs[priv->tx_read] = NULL;
+ priv->tx_ring[priv->tx_read].header.raw = 0;
+ priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1);
+ priv->tx_count--;
+ }
+
+ /* wake up queue if it was stopped */
+ if (netif_queue_stopped(dev) && !meth_tx_full(dev)) {
+ netif_wake_queue(dev);
+ }
+
+ mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
+ spin_unlock(&priv->meth_lock);
+}
+
+static void meth_error(struct net_device* dev, unsigned status)
+{
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+
+ printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
+ /* check for errors too... */
+ if (status & (METH_INT_TX_LINK_FAIL))
+ printk(KERN_WARNING "meth: link failure\n");
+ /* Should I do full reset in this case? */
+ if (status & (METH_INT_MEM_ERROR))
+ printk(KERN_WARNING "meth: memory error\n");
+ if (status & (METH_INT_TX_ABORT))
+ printk(KERN_WARNING "meth: aborted\n");
+ if (status & (METH_INT_RX_OVERFLOW))
+ printk(KERN_WARNING "meth: Rx overflow\n");
+ if (status & (METH_INT_RX_UNDERFLOW)) {
+ printk(KERN_WARNING "meth: Rx underflow\n");
+ spin_lock(&priv->meth_lock);
+ mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
+ /* more underflow interrupts will be delivered,
+ * effectively throwing us into an infinite loop.
+ * Thus I stop processing Rx in this case. */
+ priv->dma_ctrl &= ~METH_DMA_RX_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ DPRINTK("Disabled meth Rx DMA temporarily\n");
+ spin_unlock(&priv->meth_lock);
+ }
+ mace->eth.int_stat = METH_INT_ERROR;
+}
+
+/*
+ * The typical interrupt entry point
+ */
+static irqreturn_t meth_interrupt(int irq, void *dev_id, struct pt_regs *pregs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+ unsigned long status;
+
+ status = mace->eth.int_stat;
+ while (status & 0xff) {
+ /* First handle errors - if we get Rx underflow,
+ * Rx DMA will be disabled, and Rx handler will reenable
+ * it. I don't think it's possible to get Rx underflow,
+ * without getting Rx interrupt */
+ if (status & METH_INT_ERROR) {
+ meth_error(dev, status);
+ }
+ if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) {
+ /* a transmission is over: free the skb */
+ meth_tx_cleanup(dev, status);
+ }
+ if (status & METH_INT_RX_THRESHOLD) {
+ if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN))
+ break;
+ /* send it to meth_rx for handling */
+ meth_rx(dev, status);
+ }
+ status = mace->eth.int_stat;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Transmits packets that fit into TX descriptor (are <=120B)
+ */
+static void meth_tx_short_prepare(struct meth_private *priv,
+ struct sk_buff *skb)
+{
+ tx_packet *desc = &priv->tx_ring[priv->tx_write];
+ int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+ desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
+ /* maybe I should set whole thing to 0 first... */
+ memcpy(desc->data.dt + (120 - len), skb->data, skb->len);
+ if (skb->len < len)
+ memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
+}
+#define TX_CATBUF1 BIT(25)
+static void meth_tx_1page_prepare(struct meth_private *priv,
+ struct sk_buff *skb)
+{
+ tx_packet *desc = &priv->tx_ring[priv->tx_write];
+ void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7);
+ int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data);
+ int buffer_len = skb->len - unaligned_len;
+ dma_addr_t catbuf;
+
+ desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1);
+
+ /* unaligned part */
+ if (unaligned_len) {
+ memcpy(desc->data.dt + (120 - unaligned_len),
+ skb->data, unaligned_len);
+ desc->header.raw |= (128 - unaligned_len) << 16;
+ }
+
+ /* first page */
+ catbuf = dma_map_single(NULL, buffer_data, buffer_len,
+ DMA_TO_DEVICE);
+ desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
+ desc->data.cat_buf[0].form.len = buffer_len - 1;
+}
+#define TX_CATBUF2 BIT(26)
+static void meth_tx_2page_prepare(struct meth_private *priv,
+ struct sk_buff *skb)
+{
+ tx_packet *desc = &priv->tx_ring[priv->tx_write];
+ void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7);
+ void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data);
+ int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data);
+ int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data);
+ int buffer2_len = skb->len - buffer1_len - unaligned_len;
+ dma_addr_t catbuf1, catbuf2;
+
+ desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
+ /* unaligned part */
+ if (unaligned_len){
+ memcpy(desc->data.dt + (120 - unaligned_len),
+ skb->data, unaligned_len);
+ desc->header.raw |= (128 - unaligned_len) << 16;
+ }
+
+ /* first page */
+ catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len,
+ DMA_TO_DEVICE);
+ desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
+ desc->data.cat_buf[0].form.len = buffer1_len - 1;
+ /* second page */
+ catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len,
+ DMA_TO_DEVICE);
+ desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3;
+ desc->data.cat_buf[1].form.len = buffer2_len - 1;
+}
+
+static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
+{
+ /* Remember the skb, so we can free it at interrupt time */
+ priv->tx_skbs[priv->tx_write] = skb;
+ if (skb->len <= 120) {
+ /* Whole packet fits into descriptor */
+ meth_tx_short_prepare(priv, skb);
+ } else if (PAGE_ALIGN((unsigned long)skb->data) !=
+ PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) {
+ /* Packet crosses page boundary */
+ meth_tx_2page_prepare(priv, skb);
+ } else {
+ /* Packet is in one page */
+ meth_tx_1page_prepare(priv, skb);
+ }
+ priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1);
+ mace->eth.tx_info = priv->tx_write;
+ priv->tx_count++;
+}
+
+/*
+ * Transmit a packet (called by the kernel)
+ */
+static int meth_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ /* Stop DMA notification */
+ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ meth_add_to_tx_ring(priv, skb);
+ dev->trans_start = jiffies; /* save the timestamp */
+
+ /* If TX ring is full, tell the upper layer to stop sending packets */
+ if (meth_tx_full(dev)) {
+ printk(KERN_DEBUG "TX full: stopping\n");
+ netif_stop_queue(dev);
+ }
+
+ /* Restart DMA notification */
+ priv->dma_ctrl |= METH_DMA_TX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+
+ return 0;
+}
+
+/*
+ * Deal with a transmit timeout.
+ */
+static void meth_tx_timeout(struct net_device *dev)
+{
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+ unsigned long flags;
+
+ printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
+
+ /* Protect against concurrent rx interrupts */
+ spin_lock_irqsave(&priv->meth_lock,flags);
+
+ /* Try to reset the interface. */
+ meth_reset(dev);
+
+ priv->stats.tx_errors++;
+
+ /* Clear all rings */
+ meth_free_tx_ring(priv);
+ meth_free_rx_ring(priv);
+ meth_init_tx_ring(priv);
+ meth_init_rx_ring(priv);
+
+ /* Restart dma */
+ priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ /* Enable interrupt */
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+
+ return;
+}
+
+/*
+ * Ioctl commands
+ */
+static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ /* XXX Not yet implemented */
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * Return statistics to the caller
+ */
+static struct net_device_stats *meth_stats(struct net_device *dev)
+{
+ struct meth_private *priv = (struct meth_private *) dev->priv;
+ return &priv->stats;
+}
+
+/*
+ * The init function.
+ */
+static struct net_device *meth_init(void)
+{
+ struct net_device *dev;
+ struct meth_private *priv;
+ int ret;
+
+ dev = alloc_etherdev(sizeof(struct meth_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->open = meth_open;
+ dev->stop = meth_release;
+ dev->hard_start_xmit = meth_tx;
+ dev->do_ioctl = meth_ioctl;
+ dev->get_stats = meth_stats;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = meth_tx_timeout;
+ dev->watchdog_timeo = timeout;
+#endif
+ dev->irq = MACE_ETHERNET_IRQ;
+ dev->base_addr = (unsigned long)&mace->eth;
+
+ priv = (struct meth_private *) dev->priv;
+ spin_lock_init(&priv->meth_lock);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ free_netdev(dev);
+ return ERR_PTR(ret);
+ }
+
+ printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n",
+ dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29));
+ return 0;
+}
+
+static struct net_device *meth_dev;
+
+static int __init meth_init_module(void)
+{
+ meth_dev = meth_init();
+ if (IS_ERR(meth_dev))
+ return PTR_ERR(meth_dev);
+ return 0;
+}
+
+static void __exit meth_exit_module(void)
+{
+ unregister_netdev(meth_dev);
+ free_netdev(meth_dev);
+}
+
+module_init(meth_init_module);
+module_exit(meth_exit_module);
diff --git a/drivers/net/meth.h b/drivers/net/meth.h
new file mode 100644
index 000000000000..84960dae2a22
--- /dev/null
+++ b/drivers/net/meth.h
@@ -0,0 +1,246 @@
+
+/*
+ * snull.h -- definitions for the network module
+ *
+ * Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
+ * Copyright (C) 2001 O'Reilly & Associates
+ *
+ * The source code in this file can be freely used, adapted,
+ * and redistributed in source or binary form, so long as an
+ * acknowledgment appears in derived source files. The citation
+ * should list that the code comes from the book "Linux Device
+ * Drivers" by Alessandro Rubini and Jonathan Corbet, published
+ * by O'Reilly & Associates. No warranty is attached;
+ * we cannot take responsibility for errors or fitness for use.
+ */
+
+/* version dependencies have been confined to a separate file */
+
+/* Tunable parameters */
+#define TX_RING_ENTRIES 64 /* 64-512?*/
+
+#define RX_RING_ENTRIES 16 /* Do not change */
+/* Internal constants */
+#define TX_RING_BUFFER_SIZE (TX_RING_ENTRIES*sizeof(tx_packet))
+#define RX_BUFFER_SIZE 1546 /* ethenet packet size */
+#define METH_RX_BUFF_SIZE 4096
+#define METH_RX_HEAD 34 /* status + 3 quad garbage-fill + 2 byte zero-pad */
+#define RX_BUFFER_OFFSET (sizeof(rx_status_vector)+2) /* staus vector + 2 bytes of padding */
+#define RX_BUCKET_SIZE 256
+
+#undef BIT
+#define BIT(x) (1UL << (x))
+
+/* For more detailed explanations of what each field menas,
+ see Nick's great comments to #defines below (or docs, if
+ you are lucky enough toget hold of them :)*/
+
+/* tx status vector is written over tx command header upon
+ dma completion. */
+
+typedef struct tx_status_vector {
+ u64 sent:1; /* always set to 1...*/
+ u64 pad0:34;/* always set to 0 */
+ u64 flags:9; /*I'm too lazy to specify each one separately at the moment*/
+ u64 col_retry_cnt:4; /*collision retry count*/
+ u64 len:16; /*Transmit length in bytes*/
+} tx_status_vector;
+
+/*
+ * Each packet is 128 bytes long.
+ * It consists of header, 0-3 concatination
+ * buffer pointers and up to 120 data bytes.
+ */
+typedef struct tx_packet_hdr {
+ u64 pad1:36; /*should be filled with 0 */
+ u64 cat_ptr3_valid:1, /*Concatination pointer valid flags*/
+ cat_ptr2_valid:1,
+ cat_ptr1_valid:1;
+ u64 tx_int_flag:1; /*Generate TX intrrupt when packet has been sent*/
+ u64 term_dma_flag:1; /*Terminate transmit DMA on transmit abort conditions*/
+ u64 data_offset:7; /*Starting byte offset in ring data block*/
+ u64 data_len:16; /*Length of valid data in bytes-1*/
+} tx_packet_hdr;
+typedef union tx_cat_ptr {
+ struct {
+ u64 pad2:16; /* should be 0 */
+ u64 len:16; /*length of buffer data - 1*/
+ u64 start_addr:29; /*Physical starting address*/
+ u64 pad1:3; /* should be zero */
+ } form;
+ u64 raw;
+} tx_cat_ptr;
+
+typedef struct tx_packet {
+ union {
+ tx_packet_hdr header;
+ tx_status_vector res;
+ u64 raw;
+ }header;
+ union {
+ tx_cat_ptr cat_buf[3];
+ char dt[120];
+ } data;
+} tx_packet;
+
+typedef union rx_status_vector {
+ volatile struct {
+ u64 pad1:1;/*fill it with ones*/
+ u64 pad2:15;/*fill with 0*/
+ u64 ip_chk_sum:16;
+ u64 seq_num:5;
+ u64 mac_addr_match:1;
+ u64 mcast_addr_match:1;
+ u64 carrier_event_seen:1;
+ u64 bad_packet:1;
+ u64 long_event_seen:1;
+ u64 invalid_preamble:1;
+ u64 broadcast:1;
+ u64 multicast:1;
+ u64 crc_error:1;
+ u64 huh:1;/*???*/
+ u64 rx_code_violation:1;
+ u64 rx_len:16;
+ } parsed;
+ volatile u64 raw;
+} rx_status_vector;
+
+typedef struct rx_packet {
+ rx_status_vector status;
+ u64 pad[3]; /* For whatever reason, there needs to be 4 double-word offset */
+ u16 pad2;
+ char buf[METH_RX_BUFF_SIZE-sizeof(rx_status_vector)-3*sizeof(u64)-sizeof(u16)];/* data */
+} rx_packet;
+
+#define TX_INFO_RPTR 0x00FF0000
+#define TX_INFO_WPTR 0x000000FF
+
+ /* Bits in METH_MAC */
+
+#define SGI_MAC_RESET BIT(0) /* 0: MAC110 active in run mode, 1: Global reset signal to MAC110 core is active */
+#define METH_PHY_FDX BIT(1) /* 0: Disable full duplex, 1: Enable full duplex */
+#define METH_PHY_LOOP BIT(2) /* 0: Normal operation, follows 10/100mbit and M10T/MII select, 1: loops internal MII bus */
+ /* selects ignored */
+#define METH_100MBIT BIT(3) /* 0: 10meg mode, 1: 100meg mode */
+#define METH_PHY_MII BIT(4) /* 0: MII selected, 1: SIA selected */
+ /* Note: when loopback is set this bit becomes collision control. Setting this bit will */
+ /* cause a collision to be reported. */
+
+ /* Bits 5 and 6 are used to determine the the Destination address filter mode */
+#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
+#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
+#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
+#define METH_PROMISC 0x60 /* 11: Promiscious mode */
+
+#define METH_PHY_LINK_FAIL BIT(7) /* 0: Link failure detection disabled, 1: Hardware scans for link failure in PHY */
+
+#define METH_MAC_IPG 0x1ffff00
+
+#define METH_DEFAULT_IPG ((17<<15) | (11<<22) | (21<<8))
+ /* 0x172e5c00 */ /* 23, 23, 23 */ /*0x54A9500 *//*21,21,21*/
+ /* Bits 8 through 14 are used to determine Inter-Packet Gap between "Back to Back" packets */
+ /* The gap depends on the clock speed of the link, 80ns per increment for 100baseT, 800ns */
+ /* per increment for 10BaseT */
+
+ /* Bits 15 through 21 are used to determine IPGR1 */
+
+ /* Bits 22 through 28 are used to determine IPGR2 */
+
+#define METH_REV_SHIFT 29 /* Bits 29 through 31 are used to determine the revision */
+ /* 000: Inital revision */
+ /* 001: First revision, Improved TX concatenation */
+
+
+/* DMA control bits */
+#define METH_RX_OFFSET_SHIFT 12 /* Bits 12:14 of DMA control register indicate starting offset of packet data for RX operation */
+#define METH_RX_DEPTH_SHIFT 4 /* Bits 8:4 define RX fifo depth -- when # of RX fifo entries != depth, interrupt is generted */
+
+#define METH_DMA_TX_EN BIT(1) /* enable TX DMA */
+#define METH_DMA_TX_INT_EN BIT(0) /* enable TX Buffer Empty interrupt */
+#define METH_DMA_RX_EN BIT(15) /* Enable RX */
+#define METH_DMA_RX_INT_EN BIT(9) /* Enable interrupt on RX packet */
+
+/* RX FIFO MCL Info bits */
+#define METH_RX_FIFO_WPTR(x) (((x)>>16)&0xf)
+#define METH_RX_FIFO_RPTR(x) (((x)>>8)&0xf)
+#define METH_RX_FIFO_DEPTH(x) ((x)&0x1f)
+
+/* RX status bits */
+
+#define METH_RX_ST_VALID BIT(63)
+#define METH_RX_ST_RCV_CODE_VIOLATION BIT(16)
+#define METH_RX_ST_DRBL_NBL BIT(17)
+#define METH_RX_ST_CRC_ERR BIT(18)
+#define METH_RX_ST_MCAST_PKT BIT(19)
+#define METH_RX_ST_BCAST_PKT BIT(20)
+#define METH_RX_ST_INV_PREAMBLE_CTX BIT(21)
+#define METH_RX_ST_LONG_EVT_SEEN BIT(22)
+#define METH_RX_ST_BAD_PACKET BIT(23)
+#define METH_RX_ST_CARRIER_EVT_SEEN BIT(24)
+#define METH_RX_ST_MCAST_FILTER_MATCH BIT(25)
+#define METH_RX_ST_PHYS_ADDR_MATCH BIT(26)
+
+#define METH_RX_STATUS_ERRORS \
+ ( \
+ METH_RX_ST_RCV_CODE_VIOLATION| \
+ METH_RX_ST_CRC_ERR| \
+ METH_RX_ST_INV_PREAMBLE_CTX| \
+ METH_RX_ST_LONG_EVT_SEEN| \
+ METH_RX_ST_BAD_PACKET| \
+ METH_RX_ST_CARRIER_EVT_SEEN \
+ )
+ /* Bits in METH_INT */
+ /* Write _1_ to corresponding bit to clear */
+#define METH_INT_TX_EMPTY BIT(0) /* 0: No interrupt pending, 1: The TX ring buffer is empty */
+#define METH_INT_TX_PKT BIT(1) /* 0: No interrupt pending */
+ /* 1: A TX message had the INT request bit set, the packet has been sent. */
+#define METH_INT_TX_LINK_FAIL BIT(2) /* 0: No interrupt pending, 1: PHY has reported a link failure */
+#define METH_INT_MEM_ERROR BIT(3) /* 0: No interrupt pending */
+ /* 1: A memory error occurred durring DMA, DMA stopped, Fatal */
+#define METH_INT_TX_ABORT BIT(4) /* 0: No interrupt pending, 1: The TX aborted operation, DMA stopped, FATAL */
+#define METH_INT_RX_THRESHOLD BIT(5) /* 0: No interrupt pending, 1: Selected receive threshold condition Valid */
+#define METH_INT_RX_UNDERFLOW BIT(6) /* 0: No interrupt pending, 1: FIFO was empty, packet could not be queued */
+#define METH_INT_RX_OVERFLOW BIT(7) /* 0: No interrupt pending, 1: DMA FIFO Overflow, DMA stopped, FATAL */
+
+/*#define METH_INT_RX_RPTR_MASK 0x0001F00*/ /* Bits 8 through 12 alias of RX read-pointer */
+#define METH_INT_RX_RPTR_MASK 0x0000F00 /* Bits 8 through 11 alias of RX read-pointer - so, is Rx FIFO 16 or 32 entry?*/
+
+ /* Bits 13 through 15 are always 0. */
+
+#define METH_INT_TX_RPTR_MASK 0x1FF0000 /* Bits 16 through 24 alias of TX read-pointer */
+
+#define METH_INT_RX_SEQ_MASK 0x2E000000 /* Bits 25 through 29 are the starting seq number for the message at the */
+
+ /* top of the queue */
+
+#define METH_INT_ERROR (METH_INT_TX_LINK_FAIL| \
+ METH_INT_MEM_ERROR| \
+ METH_INT_TX_ABORT| \
+ METH_INT_RX_OVERFLOW| \
+ METH_INT_RX_UNDERFLOW)
+
+#define METH_INT_MCAST_HASH BIT(30) /* If RX DMA is enabled the hash select logic output is latched here */
+
+/* TX status bits */
+#define METH_TX_ST_DONE BIT(63) /* TX complete */
+#define METH_TX_ST_SUCCESS BIT(23) /* Packet was transmitted successfully */
+#define METH_TX_ST_TOOLONG BIT(24) /* TX abort due to excessive length */
+#define METH_TX_ST_UNDERRUN BIT(25) /* TX abort due to underrun (?) */
+#define METH_TX_ST_EXCCOLL BIT(26) /* TX abort due to excess collisions */
+#define METH_TX_ST_DEFER BIT(27) /* TX abort due to excess deferals */
+#define METH_TX_ST_LATECOLL BIT(28) /* TX abort due to late collision */
+
+
+/* Tx command header bits */
+#define METH_TX_CMD_INT_EN BIT(24) /* Generate TX interrupt when packet is sent */
+
+/* Phy MDIO interface busy flag */
+#define MDIO_BUSY BIT(16)
+#define MDIO_DATA_MASK 0xFFFF
+/* PHY defines */
+#define PHY_QS6612X 0x0181441 /* Quality TX */
+#define PHY_ICS1889 0x0015F41 /* ICS FX */
+#define PHY_ICS1890 0x0015F42 /* ICS TX */
+#define PHY_DP83840 0x20005C0 /* National TX */
+
+#define ADVANCE_RX_PTR(x) x=(x+1)&(RX_RING_ENTRIES-1)
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
new file mode 100644
index 000000000000..c33cb3dc942b
--- /dev/null
+++ b/drivers/net/mii.c
@@ -0,0 +1,398 @@
+/*
+
+ mii.c: MII interface library
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2001,2002 Jeff Garzik
+
+ Various code came from myson803.c and other files by
+ Donald Becker. Copyright:
+
+ Written 1998-2002 by Donald Becker.
+
+ This software may be used and distributed according
+ to the terms of the GNU General Public License (GPL),
+ incorporated herein by reference. Drivers based on
+ or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice.
+ This file is not a complete program and may only be
+ used when the entire operating system is licensed
+ under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+ u32 advert, bmcr, lpa, nego;
+ u32 advert2 = 0, bmcr2 = 0, lpa2 = 0;
+
+ ecmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+ if (mii->supports_gmii)
+ ecmd->supported |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+
+ /* only supports twisted-pair */
+ ecmd->port = PORT_MII;
+
+ /* only supports internal transceiver */
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ /* this isn't fully supported at higher layers */
+ ecmd->phy_address = mii->phy_id;
+
+ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ if (mii->supports_gmii)
+ advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
+
+ if (advert & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (advert2 & ADVERTISE_1000HALF)
+ ecmd->advertising |= ADVERTISED_1000baseT_Half;
+ if (advert2 & ADVERTISE_1000FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
+ if (mii->supports_gmii) {
+ bmcr2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
+ lpa2 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
+ }
+ if (bmcr & BMCR_ANENABLE) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ nego = mii_nway_result(advert & lpa);
+ if ((bmcr2 & (ADVERTISE_1000HALF | ADVERTISE_1000FULL)) &
+ (lpa2 >> 2))
+ ecmd->speed = SPEED_1000;
+ else if (nego == LPA_100FULL || nego == LPA_100HALF)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ if ((lpa2 & LPA_1000FULL) || nego == LPA_100FULL ||
+ nego == LPA_10FULL) {
+ ecmd->duplex = DUPLEX_FULL;
+ mii->full_duplex = 1;
+ } else {
+ ecmd->duplex = DUPLEX_HALF;
+ mii->full_duplex = 0;
+ }
+ } else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->speed = ((bmcr & BMCR_SPEED1000 &&
+ (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 :
+ (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10);
+ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+
+ if (ecmd->speed != SPEED_10 &&
+ ecmd->speed != SPEED_100 &&
+ ecmd->speed != SPEED_1000)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_MII)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->phy_address != mii->phy_id)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ if ((ecmd->speed == SPEED_1000) && (!mii->supports_gmii))
+ return -EINVAL;
+
+ /* ignore supported, maxtxpkt, maxrxpkt */
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u32 bmcr, advert, tmp;
+ u32 advert2 = 0, tmp2 = 0;
+
+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full)) == 0)
+ return -EINVAL;
+
+ /* advertise only what has been requested */
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (mii->supports_gmii) {
+ advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
+ tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ }
+ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ tmp |= ADVERTISE_10HALF;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ tmp |= ADVERTISE_10FULL;
+ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ tmp |= ADVERTISE_100HALF;
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ tmp |= ADVERTISE_100FULL;
+ if (mii->supports_gmii) {
+ if (ecmd->advertising & ADVERTISED_1000baseT_Half)
+ tmp2 |= ADVERTISE_1000HALF;
+ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ tmp2 |= ADVERTISE_1000FULL;
+ }
+ if (advert != tmp) {
+ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
+ mii->advertising = tmp;
+ }
+ if ((mii->supports_gmii) && (advert2 != tmp2))
+ mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2);
+
+ /* turn on autonegotiation, and force a renegotiate */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
+
+ mii->force_media = 0;
+ } else {
+ u32 bmcr, tmp;
+
+ /* turn off auto negotiation, set speed and duplexity */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (ecmd->speed == SPEED_1000)
+ tmp |= BMCR_SPEED1000;
+ else if (ecmd->speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (ecmd->duplex == DUPLEX_FULL) {
+ tmp |= BMCR_FULLDPLX;
+ mii->full_duplex = 1;
+ } else
+ mii->full_duplex = 0;
+ if (bmcr != tmp)
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
+
+ mii->force_media = 1;
+ }
+ return 0;
+}
+
+int mii_link_ok (struct mii_if_info *mii)
+{
+ /* first, a dummy read, needed to latch some MII phys */
+ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
+ return 1;
+ return 0;
+}
+
+int mii_nway_restart (struct mii_if_info *mii)
+{
+ int bmcr;
+ int r = -EINVAL;
+
+ /* if autoneg is off, it's an error */
+ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+
+ if (bmcr & BMCR_ANENABLE) {
+ bmcr |= BMCR_ANRESTART;
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
+ r = 0;
+ }
+
+ return r;
+}
+
+void mii_check_link (struct mii_if_info *mii)
+{
+ int cur_link = mii_link_ok(mii);
+ int prev_link = netif_carrier_ok(mii->dev);
+
+ if (cur_link && !prev_link)
+ netif_carrier_on(mii->dev);
+ else if (prev_link && !cur_link)
+ netif_carrier_off(mii->dev);
+}
+
+unsigned int mii_check_media (struct mii_if_info *mii,
+ unsigned int ok_to_print,
+ unsigned int init_media)
+{
+ unsigned int old_carrier, new_carrier;
+ int advertise, lpa, media, duplex;
+ int lpa2 = 0;
+
+ /* if forced media, go no further */
+ if (mii->force_media)
+ return 0; /* duplex did not change */
+
+ /* check current and old link status */
+ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
+ new_carrier = (unsigned int) mii_link_ok(mii);
+
+ /* if carrier state did not change, this is a "bounce",
+ * just exit as everything is already set correctly
+ */
+ if ((!init_media) && (old_carrier == new_carrier))
+ return 0; /* duplex did not change */
+
+ /* no carrier, nothing much to do */
+ if (!new_carrier) {
+ netif_carrier_off(mii->dev);
+ if (ok_to_print)
+ printk(KERN_INFO "%s: link down\n", mii->dev->name);
+ return 0; /* duplex did not change */
+ }
+
+ /*
+ * we have carrier, see who's on the other end
+ */
+ netif_carrier_on(mii->dev);
+
+ /* get MII advertise and LPA values */
+ if ((!init_media) && (mii->advertising))
+ advertise = mii->advertising;
+ else {
+ advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+ mii->advertising = advertise;
+ }
+ lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+ if (mii->supports_gmii)
+ lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
+
+ /* figure out media and duplex from advertise and LPA values */
+ media = mii_nway_result(lpa & advertise);
+ duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+ if (lpa2 & LPA_1000FULL)
+ duplex = 1;
+
+ if (ok_to_print)
+ printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
+ mii->dev->name,
+ lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" :
+ media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
+ duplex ? "full" : "half",
+ lpa);
+
+ if ((init_media) || (mii->full_duplex != duplex)) {
+ mii->full_duplex = duplex;
+ return 1; /* duplex changed */
+ }
+
+ return 0; /* duplex did not change */
+}
+
+int generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_chg_out)
+{
+ int rc = 0;
+ unsigned int duplex_changed = 0;
+
+ if (duplex_chg_out)
+ *duplex_chg_out = 0;
+
+ mii_data->phy_id &= mii_if->phy_id_mask;
+ mii_data->reg_num &= mii_if->reg_num_mask;
+
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ mii_data->phy_id = mii_if->phy_id;
+ /* fall through */
+
+ case SIOCGMIIREG:
+ mii_data->val_out =
+ mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num);
+ break;
+
+ case SIOCSMIIREG: {
+ u16 val = mii_data->val_in;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (mii_data->phy_id == mii_if->phy_id) {
+ switch(mii_data->reg_num) {
+ case MII_BMCR: {
+ unsigned int new_duplex = 0;
+ if (val & (BMCR_RESET|BMCR_ANENABLE))
+ mii_if->force_media = 0;
+ else
+ mii_if->force_media = 1;
+ if (mii_if->force_media &&
+ (val & BMCR_FULLDPLX))
+ new_duplex = 1;
+ if (mii_if->full_duplex != new_duplex) {
+ duplex_changed = 1;
+ mii_if->full_duplex = new_duplex;
+ }
+ break;
+ }
+ case MII_ADVERTISE:
+ mii_if->advertising = val;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+
+ mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num, val);
+ break;
+ }
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
+ *duplex_chg_out = 1;
+
+ return rc;
+}
+
+MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION ("MII hardware support library");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(mii_link_ok);
+EXPORT_SYMBOL(mii_nway_restart);
+EXPORT_SYMBOL(mii_ethtool_gset);
+EXPORT_SYMBOL(mii_ethtool_sset);
+EXPORT_SYMBOL(mii_check_link);
+EXPORT_SYMBOL(mii_check_media);
+EXPORT_SYMBOL(generic_mii_ioctl);
+
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
new file mode 100644
index 000000000000..d6de213720f4
--- /dev/null
+++ b/drivers/net/mv643xx_eth.c
@@ -0,0 +1,3033 @@
+/*
+ * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports
+ * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
+ *
+ * Based on the 64360 driver from:
+ * Copyright (C) 2002 rabeeh@galileo.co.il
+ *
+ * Copyright (C) 2003 PMC-Sierra, Inc.,
+ * written by Manish Lachwani (lachwani@pmc-sierra.com)
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (C) 2004-2005 MontaVista Software, Inc.
+ * Dale Farnsworth <dale@farnsworth.org>
+ *
+ * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
+ * <sjhill@realitydiluted.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/delay.h>
+#include "mv643xx_eth.h"
+
+/*
+ * The first part is the high level driver of the gigE ethernet ports.
+ */
+
+/* Constants */
+#define VLAN_HLEN 4
+#define FCS_LEN 4
+#define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
+#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
+
+#define INT_CAUSE_UNMASK_ALL 0x0007ffff
+#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
+#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
+#define INT_CAUSE_MASK_ALL 0x00000000
+#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
+#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
+#endif
+
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
+#else
+#define MAX_DESCS_PER_SKB 1
+#endif
+
+#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
+#define PHY_WAIT_MICRO_SECONDS 10
+
+/* Static function declarations */
+static int eth_port_link_is_up(unsigned int eth_port_num);
+static void eth_port_uc_addr_get(struct net_device *dev,
+ unsigned char *MacAddr);
+static int mv643xx_eth_real_open(struct net_device *);
+static int mv643xx_eth_real_stop(struct net_device *);
+static int mv643xx_eth_change_mtu(struct net_device *, int);
+static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
+static void eth_port_init_mac_tables(unsigned int eth_port_num);
+#ifdef MV643XX_NAPI
+static int mv643xx_poll(struct net_device *dev, int *budget);
+#endif
+static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
+static int ethernet_phy_detect(unsigned int eth_port_num);
+static struct ethtool_ops mv643xx_ethtool_ops;
+
+static char mv643xx_driver_name[] = "mv643xx_eth";
+static char mv643xx_driver_version[] = "1.0";
+
+static void __iomem *mv643xx_eth_shared_base;
+
+/* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */
+static spinlock_t mv643xx_eth_phy_lock = SPIN_LOCK_UNLOCKED;
+
+static inline u32 mv_read(int offset)
+{
+ void *__iomem reg_base;
+
+ reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
+
+ return readl(reg_base + offset);
+}
+
+static inline void mv_write(int offset, u32 data)
+{
+ void * __iomem reg_base;
+
+ reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
+ writel(data, reg_base + offset);
+}
+
+/*
+ * Changes MTU (maximum transfer unit) of the gigabit ethenret port
+ *
+ * Input : pointer to ethernet interface network device structure
+ * new mtu size
+ * Output : 0 upon success, -EINVAL upon failure
+ */
+static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+
+ if ((new_mtu > 9500) || (new_mtu < 64)) {
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ /*
+ * Stop then re-open the interface. This will allocate RX skb's with
+ * the new MTU.
+ * There is a possible danger that the open will not successed, due
+ * to memory is full, which might fail the open function.
+ */
+ if (netif_running(dev)) {
+ if (mv643xx_eth_real_stop(dev))
+ printk(KERN_ERR
+ "%s: Fatal error on stopping device\n",
+ dev->name);
+ if (mv643xx_eth_real_open(dev))
+ printk(KERN_ERR
+ "%s: Fatal error on opening device\n",
+ dev->name);
+ }
+
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 0;
+}
+
+/*
+ * mv643xx_eth_rx_task
+ *
+ * Fills / refills RX queue on a certain gigabit ethernet port
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv643xx_eth_rx_task(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mv643xx_private *mp = netdev_priv(dev);
+ struct pkt_info pkt_info;
+ struct sk_buff *skb;
+
+ if (test_and_set_bit(0, &mp->rx_task_busy))
+ panic("%s: Error in test_set_bit / clear_bit", dev->name);
+
+ while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
+ skb = dev_alloc_skb(RX_SKB_SIZE);
+ if (!skb)
+ break;
+ mp->rx_ring_skbs++;
+ pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
+ pkt_info.byte_cnt = RX_SKB_SIZE;
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ pkt_info.return_info = skb;
+ if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
+ printk(KERN_ERR
+ "%s: Error allocating RX Ring\n", dev->name);
+ break;
+ }
+ skb_reserve(skb, 2);
+ }
+ clear_bit(0, &mp->rx_task_busy);
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again in a later time .
+ */
+ if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) {
+ printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
+ /* After 100mSec */
+ mp->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&mp->timeout);
+ mp->rx_timer_flag = 1;
+ }
+#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
+ else {
+ /* Return interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
+ INT_CAUSE_UNMASK_ALL);
+ }
+#endif
+}
+
+/*
+ * mv643xx_eth_rx_task_timer_wrapper
+ *
+ * Timer routine to wake up RX queue filling task. This function is
+ * used only in case the RX queue is empty, and all alloc_skb has
+ * failed (due to out of memory event).
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mv643xx_private *mp = netdev_priv(dev);
+
+ mp->rx_timer_flag = 0;
+ mv643xx_eth_rx_task((void *)data);
+}
+
+/*
+ * mv643xx_eth_update_mac_address
+ *
+ * Update the MAC address of the port in the address table
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv643xx_eth_update_mac_address(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+
+ eth_port_init_mac_tables(port_num);
+ memcpy(mp->port_mac_addr, dev->dev_addr, 6);
+ eth_port_uc_addr_set(port_num, mp->port_mac_addr);
+}
+
+/*
+ * mv643xx_eth_set_rx_mode
+ *
+ * Change from promiscuos to regular rx mode
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void mv643xx_eth_set_rx_mode(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ u32 config_reg;
+
+ config_reg = ethernet_get_config_reg(mp->port_num);
+ if (dev->flags & IFF_PROMISC)
+ config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
+ else
+ config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
+ ethernet_set_config_reg(mp->port_num, config_reg);
+}
+
+/*
+ * mv643xx_eth_set_mac_address
+ *
+ * Change the interface's mac address.
+ * No special hardware thing should be done because interface is always
+ * put in promiscuous mode.
+ *
+ * Input : pointer to ethernet interface network device structure and
+ * a pointer to the designated entry to be added to the cache.
+ * Output : zero upon success, negative upon failure
+ */
+static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ /* +2 is for the offset of the HW addr type */
+ dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+ mv643xx_eth_update_mac_address(dev);
+ return 0;
+}
+
+/*
+ * mv643xx_eth_tx_timeout
+ *
+ * Called upon a timeout on transmitting a packet
+ *
+ * Input : pointer to ethernet interface network device structure.
+ * Output : N/A
+ */
+static void mv643xx_eth_tx_timeout(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: TX timeout ", dev->name);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&mp->tx_timeout_task);
+}
+
+/*
+ * mv643xx_eth_tx_timeout_task
+ *
+ * Actual routine to reset the adapter when a timeout on Tx has occurred
+ */
+static void mv643xx_eth_tx_timeout_task(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+
+ netif_device_detach(dev);
+ eth_port_reset(mp->port_num);
+ eth_port_start(mp);
+ netif_device_attach(dev);
+}
+
+/*
+ * mv643xx_eth_free_tx_queue
+ *
+ * Input : dev - a pointer to the required interface
+ *
+ * Output : 0 if was able to release skb , nonzero otherwise
+ */
+static int mv643xx_eth_free_tx_queue(struct net_device *dev,
+ unsigned int eth_int_cause_ext)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &mp->stats;
+ struct pkt_info pkt_info;
+ int released = 1;
+
+ if (!(eth_int_cause_ext & (BIT0 | BIT8)))
+ return released;
+
+ spin_lock(&mp->lock);
+
+ /* Check only queue 0 */
+ while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
+ if (pkt_info.cmd_sts & BIT0) {
+ printk("%s: Error in TX\n", dev->name);
+ stats->tx_errors++;
+ }
+
+ /*
+ * If return_info is different than 0, release the skb.
+ * The case where return_info is not 0 is only in case
+ * when transmitted a scatter/gather packet, where only
+ * last skb releases the whole chain.
+ */
+ if (pkt_info.return_info) {
+ if (skb_shinfo(pkt_info.return_info)->nr_frags)
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_irq(pkt_info.return_info);
+ released = 0;
+
+ /*
+ * Decrement the number of outstanding skbs counter on
+ * the TX queue.
+ */
+ if (mp->tx_ring_skbs == 0)
+ panic("ERROR - TX outstanding SKBs"
+ " counter is corrupted");
+ mp->tx_ring_skbs--;
+ } else
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
+ }
+
+ spin_unlock(&mp->lock);
+
+ return released;
+}
+
+/*
+ * mv643xx_eth_receive
+ *
+ * This function is forward packets that are received from the port's
+ * queues toward kernel core or FastRoute them to another interface.
+ *
+ * Input : dev - a pointer to the required interface
+ * max - maximum number to receive (0 means unlimted)
+ *
+ * Output : number of served packets
+ */
+#ifdef MV643XX_NAPI
+static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
+#else
+static int mv643xx_eth_receive_queue(struct net_device *dev)
+#endif
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &mp->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+ struct pkt_info pkt_info;
+
+#ifdef MV643XX_NAPI
+ while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) {
+#else
+ while (eth_port_receive(mp, &pkt_info) == ETH_OK) {
+#endif
+ mp->rx_ring_skbs--;
+ received_packets++;
+#ifdef MV643XX_NAPI
+ budget--;
+#endif
+ /* Update statistics. Note byte count includes 4 byte CRC count */
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_info.byte_cnt;
+ skb = pkt_info.return_info;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be dropeed.
+ */
+ if (((pkt_info.cmd_sts
+ & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
+ (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
+ || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
+ stats->rx_dropped++;
+ if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |
+ ETH_RX_LAST_DESC)) !=
+ (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Received packet spread "
+ "on multiple descriptors\n",
+ dev->name);
+ }
+ if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)
+ stats->rx_errors++;
+
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, pkt_info.byte_cnt - 4);
+ skb->dev = dev;
+
+ if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum = htons(
+ (pkt_info.cmd_sts & 0x0007fff8) >> 3);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+#ifdef MV643XX_NAPI
+ netif_receive_skb(skb);
+#else
+ netif_rx(skb);
+#endif
+ }
+ }
+
+ return received_packets;
+}
+
+/*
+ * mv643xx_eth_int_handler
+ *
+ * Main interrupt handler for the gigbit ethernet ports
+ *
+ * Input : irq - irq number (not used)
+ * dev_id - a pointer to the required interface's data structure
+ * regs - not used
+ * Output : N/A
+ */
+
+static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct mv643xx_private *mp = netdev_priv(dev);
+ u32 eth_int_cause, eth_int_cause_ext = 0;
+ unsigned int port_num = mp->port_num;
+
+ /* Read interrupt cause registers */
+ eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
+ INT_CAUSE_UNMASK_ALL;
+
+ if (eth_int_cause & BIT1)
+ eth_int_cause_ext = mv_read(
+ MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
+ INT_CAUSE_UNMASK_ALL_EXT;
+
+#ifdef MV643XX_NAPI
+ if (!(eth_int_cause & 0x0007fffd)) {
+ /* Dont ack the Rx interrupt */
+#endif
+ /*
+ * Clear specific ethernet port intrerrupt registers by
+ * acknowleding relevant bits.
+ */
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num),
+ ~eth_int_cause);
+ if (eth_int_cause_ext != 0x0)
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG
+ (port_num), ~eth_int_cause_ext);
+
+ /* UDP change : We may need this */
+ if ((eth_int_cause_ext & 0x0000ffff) &&
+ (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
+ (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
+ netif_wake_queue(dev);
+#ifdef MV643XX_NAPI
+ } else {
+ if (netif_rx_schedule_prep(dev)) {
+ /* Mask all the interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
+ mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG
+ (port_num), 0);
+ __netif_rx_schedule(dev);
+ }
+#else
+ if (eth_int_cause & (BIT2 | BIT11))
+ mv643xx_eth_receive_queue(dev, 0);
+
+ /*
+ * After forwarded received packets to upper layer, add a task
+ * in an interrupts enabled context that refills the RX ring
+ * with skb's.
+ */
+#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
+ /* Unmask all interrupts on ethernet port */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
+ INT_CAUSE_MASK_ALL);
+ queue_task(&mp->rx_task, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#else
+ mp->rx_task.func(dev);
+#endif
+#endif
+ }
+ /* PHY status changed */
+ if (eth_int_cause_ext & (BIT16 | BIT20)) {
+ if (eth_port_link_is_up(port_num)) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ /* Start TX queue */
+ mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG
+ (port_num), 1);
+ } else {
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ }
+ }
+
+ /*
+ * If no real interrupt occured, exit.
+ * This can happen when using gigE interrupt coalescing mechanism.
+ */
+ if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+#ifdef MV643XX_COAL
+
+/*
+ * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
+ *
+ * DESCRIPTION:
+ * This routine sets the RX coalescing interrupt mechanism parameter.
+ * This parameter is a timeout counter, that counts in 64 t_clk
+ * chunks ; that when timeout event occurs a maskable interrupt
+ * occurs.
+ * The parameter is calculated using the tClk of the MV-643xx chip
+ * , and the required delay of the interrupt in usec.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet port number
+ * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
+ * unsigned int delay Delay in usec
+ *
+ * OUTPUT:
+ * Interrupt coalescing mechanism value is set in MV-643xx chip.
+ *
+ * RETURN:
+ * The interrupt coalescing value set in the gigE port.
+ *
+ */
+static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
+ unsigned int t_clk, unsigned int delay)
+{
+ unsigned int coal = ((t_clk / 1000000) * delay) / 64;
+
+ /* Set RX Coalescing mechanism */
+ mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num),
+ ((coal & 0x3fff) << 8) |
+ (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num))
+ & 0xffc000ff));
+
+ return coal;
+}
+#endif
+
+/*
+ * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
+ *
+ * DESCRIPTION:
+ * This routine sets the TX coalescing interrupt mechanism parameter.
+ * This parameter is a timeout counter, that counts in 64 t_clk
+ * chunks ; that when timeout event occurs a maskable interrupt
+ * occurs.
+ * The parameter is calculated using the t_cLK frequency of the
+ * MV-643xx chip and the required delay in the interrupt in uSec
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet port number
+ * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
+ * unsigned int delay Delay in uSeconds
+ *
+ * OUTPUT:
+ * Interrupt coalescing mechanism value is set in MV-643xx chip.
+ *
+ * RETURN:
+ * The interrupt coalescing value set in the gigE port.
+ *
+ */
+static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
+ unsigned int t_clk, unsigned int delay)
+{
+ unsigned int coal;
+ coal = ((t_clk / 1000000) * delay) / 64;
+ /* Set TX Coalescing mechanism */
+ mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num),
+ coal << 4);
+ return coal;
+}
+
+/*
+ * mv643xx_eth_open
+ *
+ * This function is called when openning the network device. The function
+ * should initialize all the hardware, initialize cyclic Rx/Tx
+ * descriptors chain and buffers and allocate an IRQ to the network
+ * device.
+ *
+ * Input : a pointer to the network device structure
+ *
+ * Output : zero of success , nonzero if fails.
+ */
+
+static int mv643xx_eth_open(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ int err;
+
+ spin_lock_irq(&mp->lock);
+
+ err = request_irq(dev->irq, mv643xx_eth_int_handler,
+ SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev);
+
+ if (err) {
+ printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
+ port_num);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ if (mv643xx_eth_real_open(dev)) {
+ printk("%s: Error opening interface\n", dev->name);
+ err = -EBUSY;
+ goto out_free;
+ }
+
+ spin_unlock_irq(&mp->lock);
+
+ return 0;
+
+out_free:
+ free_irq(dev->irq, dev);
+
+out:
+ spin_unlock_irq(&mp->lock);
+
+ return err;
+}
+
+/*
+ * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
+ *
+ * DESCRIPTION:
+ * This function prepares a Rx chained list of descriptors and packet
+ * buffers in a form of a ring. The routine must be called after port
+ * initialization routine and before port start routine.
+ * The Ethernet SDMA engine uses CPU bus addresses to access the various
+ * devices in the system (i.e. DRAM). This function uses the ethernet
+ * struct 'virtual to physical' routine (set by the user) to set the ring
+ * with physical addresses.
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet Port Control srtuct.
+ *
+ * OUTPUT:
+ * The routine updates the Ethernet port control struct with information
+ * regarding the Rx descriptors and buffers.
+ *
+ * RETURN:
+ * None.
+ */
+static void ether_init_rx_desc_ring(struct mv643xx_private *mp)
+{
+ volatile struct eth_rx_desc *p_rx_desc;
+ int rx_desc_num = mp->rx_ring_size;
+ int i;
+
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area;
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);
+ }
+
+ /* Save Rx desc pointer to driver struct. */
+ mp->rx_curr_desc_q = 0;
+ mp->rx_used_desc_q = 0;
+
+ mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
+
+ /* Add the queue to the list of RX queues of this port */
+ mp->port_rx_queue_command |= 1;
+}
+
+/*
+ * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
+ *
+ * DESCRIPTION:
+ * This function prepares a Tx chained list of descriptors and packet
+ * buffers in a form of a ring. The routine must be called after port
+ * initialization routine and before port start routine.
+ * The Ethernet SDMA engine uses CPU bus addresses to access the various
+ * devices in the system (i.e. DRAM). This function uses the ethernet
+ * struct 'virtual to physical' routine (set by the user) to set the ring
+ * with physical addresses.
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet Port Control srtuct.
+ *
+ * OUTPUT:
+ * The routine updates the Ethernet port control struct with information
+ * regarding the Tx descriptors and buffers.
+ *
+ * RETURN:
+ * None.
+ */
+static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
+{
+ int tx_desc_num = mp->tx_ring_size;
+ struct eth_tx_desc *p_tx_desc;
+ int i;
+
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area;
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);
+ }
+
+ mp->tx_curr_desc_q = 0;
+ mp->tx_used_desc_q = 0;
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+ mp->tx_first_desc_q = 0;
+#endif
+
+ mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
+
+ /* Add the queue to the list of Tx queues of this port */
+ mp->port_tx_queue_command |= 1;
+}
+
+/* Helper function for mv643xx_eth_open */
+static int mv643xx_eth_real_open(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ unsigned int size;
+
+ /* Stop RX Queues */
+ mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
+
+ /* Clear the ethernet port interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+
+ /* Unmask RX buffer and TX end interrupt */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL);
+
+ /* Unmask phy and link status changes interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL_EXT);
+
+ /* Set the MAC Address */
+ memcpy(mp->port_mac_addr, dev->dev_addr, 6);
+
+ eth_port_init(mp);
+
+ INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev);
+
+ memset(&mp->timeout, 0, sizeof(struct timer_list));
+ mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper;
+ mp->timeout.data = (unsigned long)dev;
+
+ mp->rx_task_busy = 0;
+ mp->rx_timer_flag = 0;
+
+ /* Allocate RX and TX skb rings */
+ mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
+ GFP_KERNEL);
+ if (!mp->rx_skb) {
+ printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
+ GFP_KERNEL);
+ if (!mp->tx_skb) {
+ printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
+ kfree(mp->rx_skb);
+ return -ENOMEM;
+ }
+
+ /* Allocate TX ring */
+ mp->tx_ring_skbs = 0;
+ size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
+ mp->tx_desc_area_size = size;
+
+ if (mp->tx_sram_size) {
+ mp->p_tx_desc_area = ioremap(mp->tx_sram_addr,
+ mp->tx_sram_size);
+ mp->tx_desc_dma = mp->tx_sram_addr;
+ } else
+ mp->p_tx_desc_area = dma_alloc_coherent(NULL, size,
+ &mp->tx_desc_dma,
+ GFP_KERNEL);
+
+ if (!mp->p_tx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+ dev->name, size);
+ kfree(mp->rx_skb);
+ kfree(mp->tx_skb);
+ return -ENOMEM;
+ }
+ BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
+ memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
+
+ ether_init_tx_desc_ring(mp);
+
+ /* Allocate RX ring */
+ mp->rx_ring_skbs = 0;
+ size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
+ mp->rx_desc_area_size = size;
+
+ if (mp->rx_sram_size) {
+ mp->p_rx_desc_area = ioremap(mp->rx_sram_addr,
+ mp->rx_sram_size);
+ mp->rx_desc_dma = mp->rx_sram_addr;
+ } else
+ mp->p_rx_desc_area = dma_alloc_coherent(NULL, size,
+ &mp->rx_desc_dma,
+ GFP_KERNEL);
+
+ if (!mp->p_rx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
+ dev->name, size);
+ printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
+ dev->name);
+ if (mp->rx_sram_size)
+ iounmap(mp->p_rx_desc_area);
+ else
+ dma_free_coherent(NULL, mp->tx_desc_area_size,
+ mp->p_tx_desc_area, mp->tx_desc_dma);
+ kfree(mp->rx_skb);
+ kfree(mp->tx_skb);
+ return -ENOMEM;
+ }
+ memset((void *)mp->p_rx_desc_area, 0, size);
+
+ ether_init_rx_desc_ring(mp);
+
+ mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */
+
+ eth_port_start(mp);
+
+ /* Interrupt Coalescing */
+
+#ifdef MV643XX_COAL
+ mp->rx_int_coal =
+ eth_port_set_rx_coal(port_num, 133000000, MV643XX_RX_COAL);
+#endif
+
+ mp->tx_int_coal =
+ eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void mv643xx_eth_free_tx_rings(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ unsigned int curr;
+
+ /* Stop Tx Queues */
+ mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
+
+ /* Free outstanding skb's on TX rings */
+ for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
+ if (mp->tx_skb[curr]) {
+ dev_kfree_skb(mp->tx_skb[curr]);
+ mp->tx_ring_skbs--;
+ }
+ }
+ if (mp->tx_ring_skbs)
+ printk("%s: Error on Tx descriptor free - could not free %d"
+ " descriptors\n", dev->name, mp->tx_ring_skbs);
+
+ /* Free TX ring */
+ if (mp->tx_sram_size)
+ iounmap(mp->p_tx_desc_area);
+ else
+ dma_free_coherent(NULL, mp->tx_desc_area_size,
+ mp->p_tx_desc_area, mp->tx_desc_dma);
+}
+
+static void mv643xx_eth_free_rx_rings(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+ int curr;
+
+ /* Stop RX Queues */
+ mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
+
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) {
+ if (mp->rx_skb[curr]) {
+ dev_kfree_skb(mp->rx_skb[curr]);
+ mp->rx_ring_skbs--;
+ }
+ }
+
+ if (mp->rx_ring_skbs)
+ printk(KERN_ERR
+ "%s: Error in freeing Rx Ring. %d skb's still"
+ " stuck in RX Ring - ignoring them\n", dev->name,
+ mp->rx_ring_skbs);
+ /* Free RX ring */
+ if (mp->rx_sram_size)
+ iounmap(mp->p_rx_desc_area);
+ else
+ dma_free_coherent(NULL, mp->rx_desc_area_size,
+ mp->p_rx_desc_area, mp->rx_desc_dma);
+}
+
+/*
+ * mv643xx_eth_stop
+ *
+ * This function is used when closing the network device.
+ * It updates the hardware,
+ * release all memory that holds buffers and descriptors and release the IRQ.
+ * Input : a pointer to the device structure
+ * Output : zero if success , nonzero if fails
+ */
+
+/* Helper function for mv643xx_eth_stop */
+
+static int mv643xx_eth_real_stop(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int port_num = mp->port_num;
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ mv643xx_eth_free_tx_rings(dev);
+ mv643xx_eth_free_rx_rings(dev);
+
+ eth_port_reset(mp->port_num);
+
+ /* Disable ethernet port interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+
+ /* Mask RX buffer and TX end interrupt */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
+
+ /* Mask phy and link status changes interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
+
+ return 0;
+}
+
+static int mv643xx_eth_stop(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+
+ spin_lock_irq(&mp->lock);
+
+ mv643xx_eth_real_stop(dev);
+
+ free_irq(dev->irq, dev);
+ spin_unlock_irq(&mp->lock);
+
+ return 0;
+}
+
+#ifdef MV643XX_NAPI
+static void mv643xx_tx(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ struct pkt_info pkt_info;
+
+ while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
+ if (pkt_info.return_info) {
+ if (skb_shinfo(pkt_info.return_info)->nr_frags)
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_irq(pkt_info.return_info);
+
+ if (mp->tx_ring_skbs)
+ mp->tx_ring_skbs--;
+ } else
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt, DMA_TO_DEVICE);
+ }
+
+ if (netif_queue_stopped(dev) &&
+ mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)
+ netif_wake_queue(dev);
+}
+
+/*
+ * mv643xx_poll
+ *
+ * This function is used in case of NAPI
+ */
+static int mv643xx_poll(struct net_device *dev, int *budget)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ int done = 1, orig_budget, work_done;
+ unsigned int port_num = mp->port_num;
+ unsigned long flags;
+
+#ifdef MV643XX_TX_FAST_REFILL
+ if (++mp->tx_clean_threshold > 5) {
+ spin_lock_irqsave(&mp->lock, flags);
+ mv643xx_tx(dev);
+ mp->tx_clean_threshold = 0;
+ spin_unlock_irqrestore(&mp->lock, flags);
+ }
+#endif
+
+ if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
+ != (u32) mp->rx_used_desc_q) {
+ orig_budget = *budget;
+ if (orig_budget > dev->quota)
+ orig_budget = dev->quota;
+ work_done = mv643xx_eth_receive_queue(dev, orig_budget);
+ mp->rx_task.func(dev);
+ *budget -= work_done;
+ dev->quota -= work_done;
+ if (work_done >= orig_budget)
+ done = 0;
+ }
+
+ if (done) {
+ spin_lock_irqsave(&mp->lock, flags);
+ __netif_rx_complete(dev);
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL);
+ mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
+ INT_CAUSE_UNMASK_ALL_EXT);
+ spin_unlock_irqrestore(&mp->lock, flags);
+ }
+
+ return done ? 0 : 1;
+}
+#endif
+
+/*
+ * mv643xx_eth_start_xmit
+ *
+ * This function is queues a packet in the Tx descriptor for
+ * required port.
+ *
+ * Input : skb - a pointer to socket buffer
+ * dev - a pointer to the required port
+ *
+ * Output : zero upon success
+ */
+static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &mp->stats;
+ ETH_FUNC_RET_STATUS status;
+ unsigned long flags;
+ struct pkt_info pkt_info;
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_ERR
+ "%s: Tried sending packet when interface is stopped\n",
+ dev->name);
+ return 1;
+ }
+
+ /* This is a hard error, log it. */
+ if ((mp->tx_ring_size - mp->tx_ring_skbs) <=
+ (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ printk(KERN_ERR
+ "%s: Bug in mv643xx_eth - Trying to transmit when"
+ " queue full !\n", dev->name);
+ return 1;
+ }
+
+ /* Paranoid check - this shouldn't happen */
+ if (skb == NULL) {
+ stats->tx_dropped++;
+ printk(KERN_ERR "mv64320_eth paranoid check failed\n");
+ return 1;
+ }
+
+ spin_lock_irqsave(&mp->lock, flags);
+
+ /* Update packet info data structure -- DMA owned, first last */
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+ if (!skb_shinfo(skb)->nr_frags) {
+linear:
+ if (skb->ip_summed != CHECKSUM_HW) {
+ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
+ ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
+ pkt_info.l4i_chk = 0;
+ } else {
+ u32 ipheader = skb->nh.iph->ihl << 11;
+
+ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
+ ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC |
+ ETH_GEN_TCP_UDP_CHECKSUM |
+ ETH_GEN_IP_V_4_CHECKSUM | ipheader;
+ /* CPU already calculated pseudo header checksum. */
+ if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ pkt_info.cmd_sts |= ETH_UDP_FRAME;
+ pkt_info.l4i_chk = skb->h.uh->check;
+ } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+ pkt_info.l4i_chk = skb->h.th->check;
+ else {
+ printk(KERN_ERR
+ "%s: chksum proto != TCP or UDP\n",
+ dev->name);
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 1;
+ }
+ }
+ pkt_info.byte_cnt = skb->len;
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ pkt_info.return_info = skb;
+ mp->tx_ring_skbs++;
+ status = eth_port_send(mp, &pkt_info);
+ if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
+ printk(KERN_ERR "%s: Error on transmitting packet\n",
+ dev->name);
+ stats->tx_bytes += pkt_info.byte_cnt;
+ } else {
+ unsigned int frag;
+ u32 ipheader;
+
+ /* Since hardware can't handle unaligned fragments smaller
+ * than 9 bytes, if we find any, we linearize the skb
+ * and start again. When I've seen it, it's always been
+ * the first frag (probably near the end of the page),
+ * but we check all frags to be safe.
+ */
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *fragp;
+
+ fragp = &skb_shinfo(skb)->frags[frag];
+ if (fragp->size <= 8 && fragp->page_offset & 0x7) {
+ skb_linearize(skb, GFP_ATOMIC);
+ printk(KERN_DEBUG "%s: unaligned tiny fragment"
+ "%d of %d, fixed\n",
+ dev->name, frag,
+ skb_shinfo(skb)->nr_frags);
+ goto linear;
+ }
+ }
+
+ /* first frag which is skb header */
+ pkt_info.byte_cnt = skb_headlen(skb);
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+ pkt_info.l4i_chk = 0;
+ pkt_info.return_info = 0;
+ pkt_info.cmd_sts = ETH_TX_FIRST_DESC;
+
+ if (skb->ip_summed == CHECKSUM_HW) {
+ ipheader = skb->nh.iph->ihl << 11;
+ pkt_info.cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
+ ETH_GEN_IP_V_4_CHECKSUM | ipheader;
+ /* CPU already calculated pseudo header checksum. */
+ if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ pkt_info.cmd_sts |= ETH_UDP_FRAME;
+ pkt_info.l4i_chk = skb->h.uh->check;
+ } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+ pkt_info.l4i_chk = skb->h.th->check;
+ else {
+ printk(KERN_ERR
+ "%s: chksum proto != TCP or UDP\n",
+ dev->name);
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 1;
+ }
+ }
+
+ status = eth_port_send(mp, &pkt_info);
+ if (status != ETH_OK) {
+ if ((status == ETH_ERROR))
+ printk(KERN_ERR
+ "%s: Error on transmitting packet\n",
+ dev->name);
+ if (status == ETH_QUEUE_FULL)
+ printk("Error on Queue Full \n");
+ if (status == ETH_QUEUE_LAST_RESOURCE)
+ printk("Tx resource error \n");
+ }
+ stats->tx_bytes += pkt_info.byte_cnt;
+
+ /* Check for the remaining frags */
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ pkt_info.l4i_chk = 0x0000;
+ pkt_info.cmd_sts = 0x00000000;
+
+ /* Last Frag enables interrupt and frees the skb */
+ if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
+ pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT |
+ ETH_TX_LAST_DESC;
+ pkt_info.return_info = skb;
+ mp->tx_ring_skbs++;
+ } else {
+ pkt_info.return_info = 0;
+ }
+ pkt_info.l4i_chk = 0;
+ pkt_info.byte_cnt = this_frag->size;
+
+ pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page,
+ this_frag->page_offset,
+ this_frag->size,
+ DMA_TO_DEVICE);
+
+ status = eth_port_send(mp, &pkt_info);
+
+ if (status != ETH_OK) {
+ if ((status == ETH_ERROR))
+ printk(KERN_ERR "%s: Error on "
+ "transmitting packet\n",
+ dev->name);
+
+ if (status == ETH_QUEUE_LAST_RESOURCE)
+ printk("Tx resource error \n");
+
+ if (status == ETH_QUEUE_FULL)
+ printk("Queue is full \n");
+ }
+ stats->tx_bytes += pkt_info.byte_cnt;
+ }
+ }
+#else
+ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
+ ETH_TX_LAST_DESC;
+ pkt_info.l4i_chk = 0;
+ pkt_info.byte_cnt = skb->len;
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ pkt_info.return_info = skb;
+ mp->tx_ring_skbs++;
+ status = eth_port_send(mp, &pkt_info);
+ if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
+ printk(KERN_ERR "%s: Error on transmitting packet\n",
+ dev->name);
+ stats->tx_bytes += pkt_info.byte_cnt;
+#endif
+
+ /* Check if TX queue can handle another skb. If not, then
+ * signal higher layers to stop requesting TX
+ */
+ if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB))
+ /*
+ * Stop getting skb's from upper layers.
+ * Getting skb's from upper layers will be enabled again after
+ * packets are released.
+ */
+ netif_stop_queue(dev);
+
+ /* Update statistics and start of transmittion time */
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+
+ spin_unlock_irqrestore(&mp->lock, flags);
+
+ return 0; /* success */
+}
+
+/*
+ * mv643xx_eth_get_stats
+ *
+ * Returns a pointer to the interface statistics.
+ *
+ * Input : dev - a pointer to the required interface
+ *
+ * Output : a pointer to the interface's statistics
+ */
+
+static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+
+ return &mp->stats;
+}
+
+/*/
+ * mv643xx_eth_probe
+ *
+ * First function called after registering the network device.
+ * It's purpose is to initialize the device as an ethernet device,
+ * fill the ethernet device structure with pointers * to functions,
+ * and set the MAC address of the interface
+ *
+ * Input : struct device *
+ * Output : -ENOMEM if failed , 0 if success
+ */
+static int mv643xx_eth_probe(struct device *ddev)
+{
+ struct platform_device *pdev = to_platform_device(ddev);
+ struct mv643xx_eth_platform_data *pd;
+ int port_num = pdev->id;
+ struct mv643xx_private *mp;
+ struct net_device *dev;
+ u8 *p;
+ struct resource *res;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mv643xx_private));
+ if (!dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(ddev, dev);
+
+ mp = netdev_priv(dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
+
+ mp->port_num = port_num;
+
+ dev->open = mv643xx_eth_open;
+ dev->stop = mv643xx_eth_stop;
+ dev->hard_start_xmit = mv643xx_eth_start_xmit;
+ dev->get_stats = mv643xx_eth_get_stats;
+ dev->set_mac_address = mv643xx_eth_set_mac_address;
+ dev->set_multicast_list = mv643xx_eth_set_rx_mode;
+
+ /* No need to Tx Timeout */
+ dev->tx_timeout = mv643xx_eth_tx_timeout;
+#ifdef MV643XX_NAPI
+ dev->poll = mv643xx_poll;
+ dev->weight = 64;
+#endif
+
+ dev->watchdog_timeo = 2 * HZ;
+ dev->tx_queue_len = mp->tx_ring_size;
+ dev->base_addr = 0;
+ dev->change_mtu = mv643xx_eth_change_mtu;
+ SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops);
+
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+#ifdef MAX_SKB_FRAGS
+ /*
+ * Zero copy can only work if we use Discovery II memory. Else, we will
+ * have to map the buffers to ISA memory which is only 16 MB
+ */
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
+#endif
+#endif
+
+ /* Configure the timeout task */
+ INIT_WORK(&mp->tx_timeout_task,
+ (void (*)(void *))mv643xx_eth_tx_timeout_task, dev);
+
+ spin_lock_init(&mp->lock);
+
+ /* set default config values */
+ eth_port_uc_addr_get(dev, dev->dev_addr);
+ mp->port_config = MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE;
+ mp->port_config_extend = MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE;
+ mp->port_sdma_config = MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE;
+ mp->port_serial_control = MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE;
+ mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
+ mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
+
+ pd = pdev->dev.platform_data;
+ if (pd) {
+ if (pd->mac_addr != NULL)
+ memcpy(dev->dev_addr, pd->mac_addr, 6);
+
+ if (pd->phy_addr || pd->force_phy_addr)
+ ethernet_phy_set(port_num, pd->phy_addr);
+
+ if (pd->port_config || pd->force_port_config)
+ mp->port_config = pd->port_config;
+
+ if (pd->port_config_extend || pd->force_port_config_extend)
+ mp->port_config_extend = pd->port_config_extend;
+
+ if (pd->port_sdma_config || pd->force_port_sdma_config)
+ mp->port_sdma_config = pd->port_sdma_config;
+
+ if (pd->port_serial_control || pd->force_port_serial_control)
+ mp->port_serial_control = pd->port_serial_control;
+
+ if (pd->rx_queue_size)
+ mp->rx_ring_size = pd->rx_queue_size;
+
+ if (pd->tx_queue_size)
+ mp->tx_ring_size = pd->tx_queue_size;
+
+ if (pd->tx_sram_size) {
+ mp->tx_sram_size = pd->tx_sram_size;
+ mp->tx_sram_addr = pd->tx_sram_addr;
+ }
+
+ if (pd->rx_sram_size) {
+ mp->rx_sram_size = pd->rx_sram_size;
+ mp->rx_sram_addr = pd->rx_sram_addr;
+ }
+ }
+
+ err = ethernet_phy_detect(port_num);
+ if (err) {
+ pr_debug("MV643xx ethernet port %d: "
+ "No PHY detected at addr %d\n",
+ port_num, ethernet_phy_get(port_num));
+ return err;
+ }
+
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ p = dev->dev_addr;
+ printk(KERN_NOTICE
+ "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, port_num, p[0], p[1], p[2], p[3], p[4], p[5]);
+
+ if (dev->features & NETIF_F_SG)
+ printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name);
+
+ if (dev->features & NETIF_F_IP_CSUM)
+ printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
+ dev->name);
+
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+ printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
+#endif
+
+#ifdef MV643XX_COAL
+ printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
+ dev->name);
+#endif
+
+#ifdef MV643XX_NAPI
+ printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
+#endif
+
+ return 0;
+
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+static int mv643xx_eth_remove(struct device *ddev)
+{
+ struct net_device *dev = dev_get_drvdata(ddev);
+
+ unregister_netdev(dev);
+ flush_scheduled_work();
+
+ free_netdev(dev);
+ dev_set_drvdata(ddev, NULL);
+ return 0;
+}
+
+static int mv643xx_eth_shared_probe(struct device *ddev)
+{
+ struct platform_device *pdev = to_platform_device(ddev);
+ struct resource *res;
+
+ printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ mv643xx_eth_shared_base = ioremap(res->start,
+ MV643XX_ETH_SHARED_REGS_SIZE);
+ if (mv643xx_eth_shared_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+
+}
+
+static int mv643xx_eth_shared_remove(struct device *ddev)
+{
+ iounmap(mv643xx_eth_shared_base);
+ mv643xx_eth_shared_base = NULL;
+
+ return 0;
+}
+
+static struct device_driver mv643xx_eth_driver = {
+ .name = MV643XX_ETH_NAME,
+ .bus = &platform_bus_type,
+ .probe = mv643xx_eth_probe,
+ .remove = mv643xx_eth_remove,
+};
+
+static struct device_driver mv643xx_eth_shared_driver = {
+ .name = MV643XX_ETH_SHARED_NAME,
+ .bus = &platform_bus_type,
+ .probe = mv643xx_eth_shared_probe,
+ .remove = mv643xx_eth_shared_remove,
+};
+
+/*
+ * mv643xx_init_module
+ *
+ * Registers the network drivers into the Linux kernel
+ *
+ * Input : N/A
+ *
+ * Output : N/A
+ */
+static int __init mv643xx_init_module(void)
+{
+ int rc;
+
+ rc = driver_register(&mv643xx_eth_shared_driver);
+ if (!rc) {
+ rc = driver_register(&mv643xx_eth_driver);
+ if (rc)
+ driver_unregister(&mv643xx_eth_shared_driver);
+ }
+ return rc;
+}
+
+/*
+ * mv643xx_cleanup_module
+ *
+ * Registers the network drivers into the Linux kernel
+ *
+ * Input : N/A
+ *
+ * Output : N/A
+ */
+static void __exit mv643xx_cleanup_module(void)
+{
+ driver_unregister(&mv643xx_eth_driver);
+ driver_unregister(&mv643xx_eth_shared_driver);
+}
+
+module_init(mv643xx_init_module);
+module_exit(mv643xx_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
+ " and Dale Farnsworth");
+MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
+
+/*
+ * The second part is the low level driver of the gigE ethernet ports.
+ */
+
+/*
+ * Marvell's Gigabit Ethernet controller low level driver
+ *
+ * DESCRIPTION:
+ * This file introduce low level API to Marvell's Gigabit Ethernet
+ * controller. This Gigabit Ethernet Controller driver API controls
+ * 1) Operations (i.e. port init, start, reset etc').
+ * 2) Data flow (i.e. port send, receive etc').
+ * Each Gigabit Ethernet port is controlled via
+ * struct mv643xx_private.
+ * This struct includes user configuration information as well as
+ * driver internal data needed for its operations.
+ *
+ * Supported Features:
+ * - This low level driver is OS independent. Allocating memory for
+ * the descriptor rings and buffers are not within the scope of
+ * this driver.
+ * - The user is free from Rx/Tx queue managing.
+ * - This low level driver introduce functionality API that enable
+ * the to operate Marvell's Gigabit Ethernet Controller in a
+ * convenient way.
+ * - Simple Gigabit Ethernet port operation API.
+ * - Simple Gigabit Ethernet port data flow API.
+ * - Data flow and operation API support per queue functionality.
+ * - Support cached descriptors for better performance.
+ * - Enable access to all four DRAM banks and internal SRAM memory
+ * spaces.
+ * - PHY access and control API.
+ * - Port control register configuration API.
+ * - Full control over Unicast and Multicast MAC configurations.
+ *
+ * Operation flow:
+ *
+ * Initialization phase
+ * This phase complete the initialization of the the
+ * mv643xx_private struct.
+ * User information regarding port configuration has to be set
+ * prior to calling the port initialization routine.
+ *
+ * In this phase any port Tx/Rx activity is halted, MIB counters
+ * are cleared, PHY address is set according to user parameter and
+ * access to DRAM and internal SRAM memory spaces.
+ *
+ * Driver ring initialization
+ * Allocating memory for the descriptor rings and buffers is not
+ * within the scope of this driver. Thus, the user is required to
+ * allocate memory for the descriptors ring and buffers. Those
+ * memory parameters are used by the Rx and Tx ring initialization
+ * routines in order to curve the descriptor linked list in a form
+ * of a ring.
+ * Note: Pay special attention to alignment issues when using
+ * cached descriptors/buffers. In this phase the driver store
+ * information in the mv643xx_private struct regarding each queue
+ * ring.
+ *
+ * Driver start
+ * This phase prepares the Ethernet port for Rx and Tx activity.
+ * It uses the information stored in the mv643xx_private struct to
+ * initialize the various port registers.
+ *
+ * Data flow:
+ * All packet references to/from the driver are done using
+ * struct pkt_info.
+ * This struct is a unified struct used with Rx and Tx operations.
+ * This way the user is not required to be familiar with neither
+ * Tx nor Rx descriptors structures.
+ * The driver's descriptors rings are management by indexes.
+ * Those indexes controls the ring resources and used to indicate
+ * a SW resource error:
+ * 'current'
+ * This index points to the current available resource for use. For
+ * example in Rx process this index will point to the descriptor
+ * that will be passed to the user upon calling the receive
+ * routine. In Tx process, this index will point to the descriptor
+ * that will be assigned with the user packet info and transmitted.
+ * 'used'
+ * This index points to the descriptor that need to restore its
+ * resources. For example in Rx process, using the Rx buffer return
+ * API will attach the buffer returned in packet info to the
+ * descriptor pointed by 'used'. In Tx process, using the Tx
+ * descriptor return will merely return the user packet info with
+ * the command status of the transmitted buffer pointed by the
+ * 'used' index. Nevertheless, it is essential to use this routine
+ * to update the 'used' index.
+ * 'first'
+ * This index supports Tx Scatter-Gather. It points to the first
+ * descriptor of a packet assembled of multiple buffers. For
+ * example when in middle of Such packet we have a Tx resource
+ * error the 'curr' index get the value of 'first' to indicate
+ * that the ring returned to its state before trying to transmit
+ * this packet.
+ *
+ * Receive operation:
+ * The eth_port_receive API set the packet information struct,
+ * passed by the caller, with received information from the
+ * 'current' SDMA descriptor.
+ * It is the user responsibility to return this resource back
+ * to the Rx descriptor ring to enable the reuse of this source.
+ * Return Rx resource is done using the eth_rx_return_buff API.
+ *
+ * Transmit operation:
+ * The eth_port_send API supports Scatter-Gather which enables to
+ * send a packet spanned over multiple buffers. This means that
+ * for each packet info structure given by the user and put into
+ * the Tx descriptors ring, will be transmitted only if the 'LAST'
+ * bit will be set in the packet info command status field. This
+ * API also consider restriction regarding buffer alignments and
+ * sizes.
+ * The user must return a Tx resource after ensuring the buffer
+ * has been transmitted to enable the Tx ring indexes to update.
+ *
+ * BOARD LAYOUT
+ * This device is on-board. No jumper diagram is necessary.
+ *
+ * EXTERNAL INTERFACE
+ *
+ * Prior to calling the initialization routine eth_port_init() the user
+ * must set the following fields under mv643xx_private struct:
+ * port_num User Ethernet port number.
+ * port_mac_addr[6] User defined port MAC address.
+ * port_config User port configuration value.
+ * port_config_extend User port config extend value.
+ * port_sdma_config User port SDMA config value.
+ * port_serial_control User port serial control value.
+ *
+ * This driver data flow is done using the struct pkt_info which
+ * is a unified struct for Rx and Tx operations:
+ *
+ * byte_cnt Tx/Rx descriptor buffer byte count.
+ * l4i_chk CPU provided TCP Checksum. For Tx operation
+ * only.
+ * cmd_sts Tx/Rx descriptor command status.
+ * buf_ptr Tx/Rx descriptor buffer pointer.
+ * return_info Tx/Rx user resource return information.
+ */
+
+/* defines */
+/* SDMA command macros */
+#define ETH_ENABLE_TX_QUEUE(eth_port) \
+ mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
+
+/* locals */
+
+/* PHY routines */
+static int ethernet_phy_get(unsigned int eth_port_num);
+static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
+
+/* Ethernet Port routines */
+static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
+ int option);
+
+/*
+ * eth_port_init - Initialize the Ethernet port driver
+ *
+ * DESCRIPTION:
+ * This function prepares the ethernet port to start its activity:
+ * 1) Completes the ethernet port driver struct initialization toward port
+ * start routine.
+ * 2) Resets the device to a quiescent state in case of warm reboot.
+ * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
+ * 4) Clean MAC tables. The reset status of those tables is unknown.
+ * 5) Set PHY address.
+ * Note: Call this routine prior to eth_port_start routine and after
+ * setting user values in the user fields of Ethernet port control
+ * struct.
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet port control struct
+ *
+ * OUTPUT:
+ * See description.
+ *
+ * RETURN:
+ * None.
+ */
+static void eth_port_init(struct mv643xx_private *mp)
+{
+ mp->port_rx_queue_command = 0;
+ mp->port_tx_queue_command = 0;
+
+ mp->rx_resource_err = 0;
+ mp->tx_resource_err = 0;
+
+ eth_port_reset(mp->port_num);
+
+ eth_port_init_mac_tables(mp->port_num);
+
+ ethernet_phy_reset(mp->port_num);
+}
+
+/*
+ * eth_port_start - Start the Ethernet port activity.
+ *
+ * DESCRIPTION:
+ * This routine prepares the Ethernet port for Rx and Tx activity:
+ * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
+ * has been initialized a descriptor's ring (using
+ * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
+ * 2. Initialize and enable the Ethernet configuration port by writing to
+ * the port's configuration and command registers.
+ * 3. Initialize and enable the SDMA by writing to the SDMA's
+ * configuration and command registers. After completing these steps,
+ * the ethernet port SDMA can starts to perform Rx and Tx activities.
+ *
+ * Note: Each Rx and Tx queue descriptor's list must be initialized prior
+ * to calling this function (use ether_init_tx_desc_ring for Tx queues
+ * and ether_init_rx_desc_ring for Rx queues).
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet port control struct
+ *
+ * OUTPUT:
+ * Ethernet port is ready to receive and transmit.
+ *
+ * RETURN:
+ * None.
+ */
+static void eth_port_start(struct mv643xx_private *mp)
+{
+ unsigned int port_num = mp->port_num;
+ int tx_curr_desc, rx_curr_desc;
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = mp->tx_curr_desc_q;
+ mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
+ (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = mp->rx_curr_desc_q;
+ mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
+ (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
+
+ /* Add the assigned Ethernet address to the port's address table */
+ eth_port_uc_addr_set(port_num, mp->port_mac_addr);
+
+ /* Assign port configuration and command. */
+ mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), mp->port_config);
+
+ mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num),
+ mp->port_config_extend);
+
+
+ /* Increase the Rx side buffer size if supporting GigE */
+ if (mp->port_serial_control & MV643XX_ETH_SET_GMII_SPEED_TO_1000)
+ mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
+ (mp->port_serial_control & 0xfff1ffff) | (0x5 << 17));
+ else
+ mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
+ mp->port_serial_control);
+
+ mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
+ mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)) |
+ MV643XX_ETH_SERIAL_PORT_ENABLE);
+
+ /* Assign port SDMA configuration */
+ mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num),
+ mp->port_sdma_config);
+
+ /* Enable port Rx. */
+ mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
+ mp->port_rx_queue_command);
+}
+
+/*
+ * eth_port_uc_addr_set - This function Set the port Unicast address.
+ *
+ * DESCRIPTION:
+ * This function Set the port Ethernet MAC address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Port number.
+ * char * p_addr Address to be set
+ *
+ * OUTPUT:
+ * Set MAC address low and high registers. also calls eth_port_uc_addr()
+ * To set the unicast table with the proper information.
+ *
+ * RETURN:
+ * N/A.
+ *
+ */
+static void eth_port_uc_addr_set(unsigned int eth_port_num,
+ unsigned char *p_addr)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ mac_l = (p_addr[4] << 8) | (p_addr[5]);
+ mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
+ (p_addr[3] << 0);
+
+ mv_write(MV643XX_ETH_MAC_ADDR_LOW(eth_port_num), mac_l);
+ mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h);
+
+ /* Accept frames of this address */
+ eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR);
+
+ return;
+}
+
+/*
+ * eth_port_uc_addr_get - This function retrieves the port Unicast address
+ * (MAC address) from the ethernet hw registers.
+ *
+ * DESCRIPTION:
+ * This function retrieves the port Ethernet MAC address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Port number.
+ * char *MacAddr pointer where the MAC address is stored
+ *
+ * OUTPUT:
+ * Copy the MAC address to the location pointed to by MacAddr
+ *
+ * RETURN:
+ * N/A.
+ *
+ */
+static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(mp->port_num));
+ mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(mp->port_num));
+
+ p_addr[0] = (mac_h >> 24) & 0xff;
+ p_addr[1] = (mac_h >> 16) & 0xff;
+ p_addr[2] = (mac_h >> 8) & 0xff;
+ p_addr[3] = mac_h & 0xff;
+ p_addr[4] = (mac_l >> 8) & 0xff;
+ p_addr[5] = mac_l & 0xff;
+}
+
+/*
+ * eth_port_uc_addr - This function Set the port unicast address table
+ *
+ * DESCRIPTION:
+ * This function locates the proper entry in the Unicast table for the
+ * specified MAC nibble and sets its properties according to function
+ * parameters.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Port number.
+ * unsigned char uc_nibble Unicast MAC Address last nibble.
+ * int option 0 = Add, 1 = remove address.
+ *
+ * OUTPUT:
+ * This function add/removes MAC addresses from the port unicast address
+ * table.
+ *
+ * RETURN:
+ * true is output succeeded.
+ * false if option parameter is invalid.
+ *
+ */
+static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
+ int option)
+{
+ unsigned int unicast_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Locate the Unicast table entry */
+ uc_nibble = (0xf & uc_nibble);
+ tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */
+ reg_offset = uc_nibble % 4; /* Entry offset within the above register */
+
+ switch (option) {
+ case REJECT_MAC_ADDR:
+ /* Clear accepts frame bit at given unicast DA table entry */
+ unicast_reg = mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset));
+
+ unicast_reg &= (0x0E << (8 * reg_offset));
+
+ mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset), unicast_reg);
+ break;
+
+ case ACCEPT_MAC_ADDR:
+ /* Set accepts frame bit at unicast DA filter table entry */
+ unicast_reg =
+ mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset));
+
+ unicast_reg |= (0x01 << (8 * reg_offset));
+
+ mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + tbl_offset), unicast_reg);
+
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
+ *
+ * DESCRIPTION:
+ * Go through all the DA filter tables (Unicast, Special Multicast &
+ * Other Multicast) and set each entry to 0.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * Multicast and Unicast packets are rejected.
+ *
+ * RETURN:
+ * None.
+ */
+static void eth_port_init_mac_tables(unsigned int eth_port_num)
+{
+ int table_index;
+
+ /* Clear DA filter unicast table (Ex_dFUT) */
+ for (table_index = 0; table_index <= 0xC; table_index += 4)
+ mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
+ (eth_port_num) + table_index), 0);
+
+ for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+ /* Clear DA filter special multicast table (Ex_dFSMT) */
+ mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index), 0);
+ /* Clear DA filter other multicast table (Ex_dFOMT) */
+ mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index), 0);
+ }
+}
+
+/*
+ * eth_clear_mib_counters - Clear all MIB counters
+ *
+ * DESCRIPTION:
+ * This function clears all MIB counters of a specific ethernet port.
+ * A read from the MIB counter will reset the counter.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * After reading all MIB counters, the counters resets.
+ *
+ * RETURN:
+ * MIB counter value.
+ *
+ */
+static void eth_clear_mib_counters(unsigned int eth_port_num)
+{
+ int i;
+
+ /* Perform dummy reads from MIB counters */
+ for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
+ i += 4)
+ mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i);
+}
+
+static inline u32 read_mib(struct mv643xx_private *mp, int offset)
+{
+ return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset);
+}
+
+static void eth_update_mib_counters(struct mv643xx_private *mp)
+{
+ struct mv643xx_mib_counters *p = &mp->mib_counters;
+ int offset;
+
+ p->good_octets_received +=
+ read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
+ p->good_octets_received +=
+ (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH) << 32;
+
+ for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
+ offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
+ offset += 4)
+ *(u32 *)((char *)p + offset) = read_mib(mp, offset);
+
+ p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW);
+ p->good_octets_sent +=
+ (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_HIGH) << 32;
+
+ for (offset = ETH_MIB_GOOD_FRAMES_SENT;
+ offset <= ETH_MIB_LATE_COLLISION;
+ offset += 4)
+ *(u32 *)((char *)p + offset) = read_mib(mp, offset);
+}
+
+/*
+ * ethernet_phy_detect - Detect whether a phy is present
+ *
+ * DESCRIPTION:
+ * This function tests whether there is a PHY present on
+ * the specified port.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * None
+ *
+ * RETURN:
+ * 0 on success
+ * -ENODEV on failure
+ *
+ */
+static int ethernet_phy_detect(unsigned int port_num)
+{
+ unsigned int phy_reg_data0;
+ int auto_neg;
+
+ eth_port_read_smi_reg(port_num, 0, &phy_reg_data0);
+ auto_neg = phy_reg_data0 & 0x1000;
+ phy_reg_data0 ^= 0x1000; /* invert auto_neg */
+ eth_port_write_smi_reg(port_num, 0, phy_reg_data0);
+
+ eth_port_read_smi_reg(port_num, 0, &phy_reg_data0);
+ if ((phy_reg_data0 & 0x1000) == auto_neg)
+ return -ENODEV; /* change didn't take */
+
+ phy_reg_data0 ^= 0x1000;
+ eth_port_write_smi_reg(port_num, 0, phy_reg_data0);
+ return 0;
+}
+
+/*
+ * ethernet_phy_get - Get the ethernet port PHY address.
+ *
+ * DESCRIPTION:
+ * This routine returns the given ethernet port PHY address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * None.
+ *
+ * RETURN:
+ * PHY address.
+ *
+ */
+static int ethernet_phy_get(unsigned int eth_port_num)
+{
+ unsigned int reg_data;
+
+ reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG);
+
+ return ((reg_data >> (5 * eth_port_num)) & 0x1f);
+}
+
+/*
+ * ethernet_phy_set - Set the ethernet port PHY address.
+ *
+ * DESCRIPTION:
+ * This routine sets the given ethernet port PHY address.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ * int phy_addr PHY address.
+ *
+ * OUTPUT:
+ * None.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
+{
+ u32 reg_data;
+ int addr_shift = 5 * eth_port_num;
+
+ reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG);
+ reg_data &= ~(0x1f << addr_shift);
+ reg_data |= (phy_addr & 0x1f) << addr_shift;
+ mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data);
+}
+
+/*
+ * ethernet_phy_reset - Reset Ethernet port PHY.
+ *
+ * DESCRIPTION:
+ * This routine utilizes the SMI interface to reset the ethernet port PHY.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * The PHY is reset.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static void ethernet_phy_reset(unsigned int eth_port_num)
+{
+ unsigned int phy_reg_data;
+
+ /* Reset the PHY */
+ eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
+ phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
+ eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
+}
+
+/*
+ * eth_port_reset - Reset Ethernet port
+ *
+ * DESCRIPTION:
+ * This routine resets the chip by aborting any SDMA engine activity and
+ * clearing the MIB counters. The Receiver and the Transmit unit are in
+ * idle state after this command is performed and the port is disabled.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * Channel activity is halted.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static void eth_port_reset(unsigned int port_num)
+{
+ unsigned int reg_data;
+
+ /* Stop Tx port activity. Check port Tx activity. */
+ reg_data = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num));
+
+ if (reg_data & 0xFF) {
+ /* Issue stop command for active channels only */
+ mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
+ (reg_data << 8));
+
+ /* Wait for all Tx activity to terminate. */
+ /* Check port cause register that all Tx queues are stopped */
+ while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num))
+ & 0xFF)
+ udelay(10);
+ }
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ reg_data = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num));
+
+ if (reg_data & 0xFF) {
+ /* Issue stop command for active channels only */
+ mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
+ (reg_data << 8));
+
+ /* Wait for all Rx activity to terminate. */
+ /* Check port cause register that all Rx queues are stopped */
+ while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num))
+ & 0xFF)
+ udelay(10);
+ }
+
+ /* Clear all MIB counters */
+ eth_clear_mib_counters(port_num);
+
+ /* Reset the Enable bit in the Configuration Register */
+ reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
+ reg_data &= ~MV643XX_ETH_SERIAL_PORT_ENABLE;
+ mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data);
+}
+
+/*
+ * ethernet_set_config_reg - Set specified bits in configuration register.
+ *
+ * DESCRIPTION:
+ * This function sets specified bits in the given ethernet
+ * configuration register.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ * unsigned int value 32 bit value.
+ *
+ * OUTPUT:
+ * The set bits in the value parameter are set in the configuration
+ * register.
+ *
+ * RETURN:
+ * None.
+ *
+ */
+static void ethernet_set_config_reg(unsigned int eth_port_num,
+ unsigned int value)
+{
+ unsigned int eth_config_reg;
+
+ eth_config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num));
+ eth_config_reg |= value;
+ mv_write(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num), eth_config_reg);
+}
+
+static int eth_port_autoneg_supported(unsigned int eth_port_num)
+{
+ unsigned int phy_reg_data0;
+
+ eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data0);
+
+ return phy_reg_data0 & 0x1000;
+}
+
+static int eth_port_link_is_up(unsigned int eth_port_num)
+{
+ unsigned int phy_reg_data1;
+
+ eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data1);
+
+ if (eth_port_autoneg_supported(eth_port_num)) {
+ if (phy_reg_data1 & 0x20) /* auto-neg complete */
+ return 1;
+ } else if (phy_reg_data1 & 0x4) /* link up */
+ return 1;
+
+ return 0;
+}
+
+/*
+ * ethernet_get_config_reg - Get the port configuration register
+ *
+ * DESCRIPTION:
+ * This function returns the configuration register value of the given
+ * ethernet port.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ *
+ * OUTPUT:
+ * None.
+ *
+ * RETURN:
+ * Port configuration register value.
+ */
+static unsigned int ethernet_get_config_reg(unsigned int eth_port_num)
+{
+ unsigned int eth_config_reg;
+
+ eth_config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_EXTEND_REG
+ (eth_port_num));
+ return eth_config_reg;
+}
+
+/*
+ * eth_port_read_smi_reg - Read PHY registers
+ *
+ * DESCRIPTION:
+ * This routine utilize the SMI interface to interact with the PHY in
+ * order to perform PHY register read.
+ *
+ * INPUT:
+ * unsigned int port_num Ethernet Port number.
+ * unsigned int phy_reg PHY register address offset.
+ * unsigned int *value Register value buffer.
+ *
+ * OUTPUT:
+ * Write the value of a specified PHY register into given buffer.
+ *
+ * RETURN:
+ * false if the PHY is busy or read data is not in valid state.
+ * true otherwise.
+ *
+ */
+static void eth_port_read_smi_reg(unsigned int port_num,
+ unsigned int phy_reg, unsigned int *value)
+{
+ int phy_addr = ethernet_phy_get(port_num);
+ unsigned long flags;
+ int i;
+
+ /* the SMI register is a shared resource */
+ spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
+
+ /* wait for the SMI register to become available */
+ for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk("mv643xx PHY busy timeout, port %d\n", port_num);
+ goto out;
+ }
+ udelay(PHY_WAIT_MICRO_SECONDS);
+ }
+
+ mv_write(MV643XX_ETH_SMI_REG,
+ (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
+
+ /* now wait for the data to be valid */
+ for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk("mv643xx PHY read timeout, port %d\n", port_num);
+ goto out;
+ }
+ udelay(PHY_WAIT_MICRO_SECONDS);
+ }
+
+ *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff;
+out:
+ spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
+}
+
+/*
+ * eth_port_write_smi_reg - Write to PHY registers
+ *
+ * DESCRIPTION:
+ * This routine utilize the SMI interface to interact with the PHY in
+ * order to perform writes to PHY registers.
+ *
+ * INPUT:
+ * unsigned int eth_port_num Ethernet Port number.
+ * unsigned int phy_reg PHY register address offset.
+ * unsigned int value Register value.
+ *
+ * OUTPUT:
+ * Write the given value to the specified PHY register.
+ *
+ * RETURN:
+ * false if the PHY is busy.
+ * true otherwise.
+ *
+ */
+static void eth_port_write_smi_reg(unsigned int eth_port_num,
+ unsigned int phy_reg, unsigned int value)
+{
+ int phy_addr;
+ int i;
+ unsigned long flags;
+
+ phy_addr = ethernet_phy_get(eth_port_num);
+
+ /* the SMI register is a shared resource */
+ spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
+
+ /* wait for the SMI register to become available */
+ for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk("mv643xx PHY busy timeout, port %d\n",
+ eth_port_num);
+ goto out;
+ }
+ udelay(PHY_WAIT_MICRO_SECONDS);
+ }
+
+ mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
+ ETH_SMI_OPCODE_WRITE | (value & 0xffff));
+out:
+ spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
+}
+
+/*
+ * eth_port_send - Send an Ethernet packet
+ *
+ * DESCRIPTION:
+ * This routine send a given packet described by p_pktinfo parameter. It
+ * supports transmitting of a packet spaned over multiple buffers. The
+ * routine updates 'curr' and 'first' indexes according to the packet
+ * segment passed to the routine. In case the packet segment is first,
+ * the 'first' index is update. In any case, the 'curr' index is updated.
+ * If the routine get into Tx resource error it assigns 'curr' index as
+ * 'first'. This way the function can abort Tx process of multiple
+ * descriptors per packet.
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info User packet buffer.
+ *
+ * OUTPUT:
+ * Tx ring 'curr' and 'first' indexes are updated.
+ *
+ * RETURN:
+ * ETH_QUEUE_FULL in case of Tx resource error.
+ * ETH_ERROR in case the routine can not access Tx desc ring.
+ * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
+ * ETH_OK otherwise.
+ *
+ */
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+/*
+ * Modified to include the first descriptor pointer in case of SG
+ */
+static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info)
+{
+ int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc;
+ struct eth_tx_desc *current_descriptor;
+ struct eth_tx_desc *first_descriptor;
+ u32 command;
+
+ /* Do not process Tx ring in case of Tx ring resource error */
+ if (mp->tx_resource_err)
+ return ETH_QUEUE_FULL;
+
+ /*
+ * The hardware requires that each buffer that is <= 8 bytes
+ * in length must be aligned on an 8 byte boundary.
+ */
+ if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) {
+ printk(KERN_ERR
+ "mv643xx_eth port %d: packet size <= 8 problem\n",
+ mp->port_num);
+ return ETH_ERROR;
+ }
+
+ /* Get the Tx Desc ring indexes */
+ tx_desc_curr = mp->tx_curr_desc_q;
+ tx_desc_used = mp->tx_used_desc_q;
+
+ current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
+
+ tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size;
+
+ current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
+ current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
+ current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
+ mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info;
+
+ command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC |
+ ETH_BUFFER_OWNED_BY_DMA;
+ if (command & ETH_TX_FIRST_DESC) {
+ tx_first_desc = tx_desc_curr;
+ mp->tx_first_desc_q = tx_first_desc;
+ first_descriptor = current_descriptor;
+ mp->tx_first_command = command;
+ } else {
+ tx_first_desc = mp->tx_first_desc_q;
+ first_descriptor = &mp->p_tx_desc_area[tx_first_desc];
+ BUG_ON(first_descriptor == NULL);
+ current_descriptor->cmd_sts = command;
+ }
+
+ if (command & ETH_TX_LAST_DESC) {
+ wmb();
+ first_descriptor->cmd_sts = mp->tx_first_command;
+
+ wmb();
+ ETH_ENABLE_TX_QUEUE(mp->port_num);
+
+ /*
+ * Finish Tx packet. Update first desc in case of Tx resource
+ * error */
+ tx_first_desc = tx_next_desc;
+ mp->tx_first_desc_q = tx_first_desc;
+ }
+
+ /* Check for ring index overlap in the Tx desc ring */
+ if (tx_next_desc == tx_desc_used) {
+ mp->tx_resource_err = 1;
+ mp->tx_curr_desc_q = tx_first_desc;
+
+ return ETH_QUEUE_LAST_RESOURCE;
+ }
+
+ mp->tx_curr_desc_q = tx_next_desc;
+
+ return ETH_OK;
+}
+#else
+static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info)
+{
+ int tx_desc_curr;
+ int tx_desc_used;
+ struct eth_tx_desc *current_descriptor;
+ unsigned int command_status;
+
+ /* Do not process Tx ring in case of Tx ring resource error */
+ if (mp->tx_resource_err)
+ return ETH_QUEUE_FULL;
+
+ /* Get the Tx Desc ring indexes */
+ tx_desc_curr = mp->tx_curr_desc_q;
+ tx_desc_used = mp->tx_used_desc_q;
+ current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
+
+ command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
+ current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
+ current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
+ mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info;
+
+ /* Set last desc with DMA ownership and interrupt enable. */
+ wmb();
+ current_descriptor->cmd_sts = command_status |
+ ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
+
+ wmb();
+ ETH_ENABLE_TX_QUEUE(mp->port_num);
+
+ /* Finish Tx packet. Update first desc in case of Tx resource error */
+ tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size;
+
+ /* Update the current descriptor */
+ mp->tx_curr_desc_q = tx_desc_curr;
+
+ /* Check for ring index overlap in the Tx desc ring */
+ if (tx_desc_curr == tx_desc_used) {
+ mp->tx_resource_err = 1;
+ return ETH_QUEUE_LAST_RESOURCE;
+ }
+
+ return ETH_OK;
+}
+#endif
+
+/*
+ * eth_tx_return_desc - Free all used Tx descriptors
+ *
+ * DESCRIPTION:
+ * This routine returns the transmitted packet information to the caller.
+ * It uses the 'first' index to support Tx desc return in case a transmit
+ * of a packet spanned over multiple buffer still in process.
+ * In case the Tx queue was in "resource error" condition, where there are
+ * no available Tx resources, the function resets the resource error flag.
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info User packet buffer.
+ *
+ * OUTPUT:
+ * Tx ring 'first' and 'used' indexes are updated.
+ *
+ * RETURN:
+ * ETH_ERROR in case the routine can not access Tx desc ring.
+ * ETH_RETRY in case there is transmission in process.
+ * ETH_END_OF_JOB if the routine has nothing to release.
+ * ETH_OK otherwise.
+ *
+ */
+static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info)
+{
+ int tx_desc_used;
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+ int tx_busy_desc = mp->tx_first_desc_q;
+#else
+ int tx_busy_desc = mp->tx_curr_desc_q;
+#endif
+ struct eth_tx_desc *p_tx_desc_used;
+ unsigned int command_status;
+
+ /* Get the Tx Desc ring indexes */
+ tx_desc_used = mp->tx_used_desc_q;
+
+ p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
+
+ /* Sanity check */
+ if (p_tx_desc_used == NULL)
+ return ETH_ERROR;
+
+ /* Stop release. About to overlap the current available Tx descriptor */
+ if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err)
+ return ETH_END_OF_JOB;
+
+ command_status = p_tx_desc_used->cmd_sts;
+
+ /* Still transmitting... */
+ if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
+ return ETH_RETRY;
+
+ /* Pass the packet information to the caller */
+ p_pkt_info->cmd_sts = command_status;
+ p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
+ mp->tx_skb[tx_desc_used] = NULL;
+
+ /* Update the next descriptor to release. */
+ mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size;
+
+ /* Any Tx return cancels the Tx resource error status */
+ mp->tx_resource_err = 0;
+
+ return ETH_OK;
+}
+
+/*
+ * eth_port_receive - Get received information from Rx ring.
+ *
+ * DESCRIPTION:
+ * This routine returns the received data to the caller. There is no
+ * data copying during routine operation. All information is returned
+ * using pointer to packet information struct passed from the caller.
+ * If the routine exhausts Rx ring resources then the resource error flag
+ * is set.
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info User packet buffer.
+ *
+ * OUTPUT:
+ * Rx ring current and used indexes are updated.
+ *
+ * RETURN:
+ * ETH_ERROR in case the routine can not access Rx desc ring.
+ * ETH_QUEUE_FULL if Rx ring resources are exhausted.
+ * ETH_END_OF_JOB if there is no received data.
+ * ETH_OK otherwise.
+ */
+static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info)
+{
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ volatile struct eth_rx_desc *p_rx_desc;
+ unsigned int command_status;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (mp->rx_resource_err)
+ return ETH_QUEUE_FULL;
+
+ /* Get the Rx Desc ring 'curr and 'used' indexes */
+ rx_curr_desc = mp->rx_curr_desc_q;
+ rx_used_desc = mp->rx_used_desc_q;
+
+ p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc];
+
+ /* The following parameters are used to save readings from memory */
+ command_status = p_rx_desc->cmd_sts;
+ rmb();
+
+ /* Nothing to receive... */
+ if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
+ return ETH_END_OF_JOB;
+
+ p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
+ p_pkt_info->cmd_sts = command_status;
+ p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET;
+ p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
+ p_pkt_info->l4i_chk = p_rx_desc->buf_size;
+
+ /* Clean the return info field to indicate that the packet has been */
+ /* moved to the upper layers */
+ mp->rx_skb[rx_curr_desc] = NULL;
+
+ /* Update current index in data structure */
+ rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size;
+ mp->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ mp->rx_resource_err = 1;
+
+ return ETH_OK;
+}
+
+/*
+ * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
+ *
+ * DESCRIPTION:
+ * This routine returns a Rx buffer back to the Rx ring. It retrieves the
+ * next 'used' descriptor and attached the returned buffer to it.
+ * In case the Rx ring was in "resource error" condition, where there are
+ * no available Rx resources, the function resets the resource error flag.
+ *
+ * INPUT:
+ * struct mv643xx_private *mp Ethernet Port Control srtuct.
+ * struct pkt_info *p_pkt_info Information on returned buffer.
+ *
+ * OUTPUT:
+ * New available Rx resource in Rx descriptor ring.
+ *
+ * RETURN:
+ * ETH_ERROR in case the routine can not access Rx desc ring.
+ * ETH_OK otherwise.
+ */
+static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info)
+{
+ int used_rx_desc; /* Where to return Rx resource */
+ volatile struct eth_rx_desc *p_used_rx_desc;
+
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = mp->rx_used_desc_q;
+ p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc];
+
+ p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
+ p_used_rx_desc->buf_size = p_pkt_info->byte_cnt;
+ mp->rx_skb[used_rx_desc] = p_pkt_info->return_info;
+
+ /* Flush the write pipe */
+
+ /* Return the descriptor to DMA ownership */
+ wmb();
+ p_used_rx_desc->cmd_sts =
+ ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
+ wmb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size;
+
+ /* Any Rx return cancels the Rx resource error status */
+ mp->rx_resource_err = 0;
+
+ return ETH_OK;
+}
+
+/************* Begin ethtool support *************************/
+
+struct mv643xx_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
+ offsetof(struct mv643xx_private, m)
+
+static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
+ { "rx_packets", MV643XX_STAT(stats.rx_packets) },
+ { "tx_packets", MV643XX_STAT(stats.tx_packets) },
+ { "rx_bytes", MV643XX_STAT(stats.rx_bytes) },
+ { "tx_bytes", MV643XX_STAT(stats.tx_bytes) },
+ { "rx_errors", MV643XX_STAT(stats.rx_errors) },
+ { "tx_errors", MV643XX_STAT(stats.tx_errors) },
+ { "rx_dropped", MV643XX_STAT(stats.rx_dropped) },
+ { "tx_dropped", MV643XX_STAT(stats.tx_dropped) },
+ { "good_octets_received", MV643XX_STAT(mib_counters.good_octets_received) },
+ { "bad_octets_received", MV643XX_STAT(mib_counters.bad_octets_received) },
+ { "internal_mac_transmit_err", MV643XX_STAT(mib_counters.internal_mac_transmit_err) },
+ { "good_frames_received", MV643XX_STAT(mib_counters.good_frames_received) },
+ { "bad_frames_received", MV643XX_STAT(mib_counters.bad_frames_received) },
+ { "broadcast_frames_received", MV643XX_STAT(mib_counters.broadcast_frames_received) },
+ { "multicast_frames_received", MV643XX_STAT(mib_counters.multicast_frames_received) },
+ { "frames_64_octets", MV643XX_STAT(mib_counters.frames_64_octets) },
+ { "frames_65_to_127_octets", MV643XX_STAT(mib_counters.frames_65_to_127_octets) },
+ { "frames_128_to_255_octets", MV643XX_STAT(mib_counters.frames_128_to_255_octets) },
+ { "frames_256_to_511_octets", MV643XX_STAT(mib_counters.frames_256_to_511_octets) },
+ { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters.frames_512_to_1023_octets) },
+ { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters.frames_1024_to_max_octets) },
+ { "good_octets_sent", MV643XX_STAT(mib_counters.good_octets_sent) },
+ { "good_frames_sent", MV643XX_STAT(mib_counters.good_frames_sent) },
+ { "excessive_collision", MV643XX_STAT(mib_counters.excessive_collision) },
+ { "multicast_frames_sent", MV643XX_STAT(mib_counters.multicast_frames_sent) },
+ { "broadcast_frames_sent", MV643XX_STAT(mib_counters.broadcast_frames_sent) },
+ { "unrec_mac_control_received", MV643XX_STAT(mib_counters.unrec_mac_control_received) },
+ { "fc_sent", MV643XX_STAT(mib_counters.fc_sent) },
+ { "good_fc_received", MV643XX_STAT(mib_counters.good_fc_received) },
+ { "bad_fc_received", MV643XX_STAT(mib_counters.bad_fc_received) },
+ { "undersize_received", MV643XX_STAT(mib_counters.undersize_received) },
+ { "fragments_received", MV643XX_STAT(mib_counters.fragments_received) },
+ { "oversize_received", MV643XX_STAT(mib_counters.oversize_received) },
+ { "jabber_received", MV643XX_STAT(mib_counters.jabber_received) },
+ { "mac_receive_error", MV643XX_STAT(mib_counters.mac_receive_error) },
+ { "bad_crc_event", MV643XX_STAT(mib_counters.bad_crc_event) },
+ { "collision", MV643XX_STAT(mib_counters.collision) },
+ { "late_collision", MV643XX_STAT(mib_counters.late_collision) },
+};
+
+#define MV643XX_STATS_LEN \
+ sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats)
+
+static int
+mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct mv643xx_private *mp = netdev->priv;
+ int port_num = mp->port_num;
+ int autoneg = eth_port_autoneg_supported(port_num);
+ int mode_10_bit;
+ int auto_duplex;
+ int half_duplex = 0;
+ int full_duplex = 0;
+ int auto_speed;
+ int speed_10 = 0;
+ int speed_100 = 0;
+ int speed_1000 = 0;
+
+ u32 pcs = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
+ u32 psr = mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num));
+
+ mode_10_bit = psr & MV643XX_ETH_PORT_STATUS_MODE_10_BIT;
+
+ if (mode_10_bit) {
+ ecmd->supported = SUPPORTED_10baseT_Half;
+ } else {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ (autoneg ? SUPPORTED_Autoneg : 0) |
+ SUPPORTED_TP);
+
+ auto_duplex = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX);
+ auto_speed = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII);
+
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (autoneg) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+
+ if (auto_duplex) {
+ half_duplex = 1;
+ full_duplex = 1;
+ } else {
+ if (pcs & MV643XX_ETH_SET_FULL_DUPLEX_MODE)
+ full_duplex = 1;
+ else
+ half_duplex = 1;
+ }
+
+ if (auto_speed) {
+ speed_10 = 1;
+ speed_100 = 1;
+ speed_1000 = 1;
+ } else {
+ if (pcs & MV643XX_ETH_SET_GMII_SPEED_TO_1000)
+ speed_1000 = 1;
+ else if (pcs & MV643XX_ETH_SET_MII_SPEED_TO_100)
+ speed_100 = 1;
+ else
+ speed_10 = 1;
+ }
+
+ if (speed_10 & half_duplex)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (speed_10 & full_duplex)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (speed_100 & half_duplex)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (speed_100 & full_duplex)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (speed_1000)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = ethernet_phy_get(port_num);
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(netdev)) {
+ if (mode_10_bit)
+ ecmd->speed = SPEED_10;
+ else {
+ if (psr & MV643XX_ETH_PORT_STATUS_GMII_1000)
+ ecmd->speed = SPEED_1000;
+ else if (psr & MV643XX_ETH_PORT_STATUS_MII_100)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ }
+
+ if (psr & MV643XX_ETH_PORT_STATUS_FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+static void
+mv643xx_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, mv643xx_driver_name, 32);
+ strncpy(drvinfo->version, mv643xx_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, "mv643xx", 32);
+ drvinfo->n_stats = MV643XX_STATS_LEN;
+}
+
+static int
+mv643xx_get_stats_count(struct net_device *netdev)
+{
+ return MV643XX_STATS_LEN;
+}
+
+static void
+mv643xx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct mv643xx_private *mp = netdev->priv;
+ int i;
+
+ eth_update_mib_counters(mp);
+
+ for(i = 0; i < MV643XX_STATS_LEN; i++) {
+ char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset;
+ data[i] = (mv643xx_gstrings_stats[i].sizeof_stat ==
+ sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
+ }
+}
+
+static void
+mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+{
+ int i;
+
+ switch(stringset) {
+ case ETH_SS_STATS:
+ for (i=0; i < MV643XX_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mv643xx_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static struct ethtool_ops mv643xx_ethtool_ops = {
+ .get_settings = mv643xx_get_settings,
+ .get_drvinfo = mv643xx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_strings = mv643xx_get_strings,
+ .get_stats_count = mv643xx_get_stats_count,
+ .get_ethtool_stats = mv643xx_get_ethtool_stats,
+};
+
+/************* End ethtool support *************************/
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
new file mode 100644
index 000000000000..57c4f8fbfdb6
--- /dev/null
+++ b/drivers/net/mv643xx_eth.h
@@ -0,0 +1,438 @@
+#ifndef __MV643XX_ETH_H__
+#define __MV643XX_ETH_H__
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/mv643xx.h>
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+/*
+ * The first part is the high level driver of the gigE ethernet ports.
+ */
+
+/* Checksum offload for Tx works for most packets, but
+ * fails if previous packet sent did not use hw csum
+ */
+#undef MV643XX_CHECKSUM_OFFLOAD_TX
+#define MV643XX_NAPI
+#define MV643XX_TX_FAST_REFILL
+#undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */
+#undef MV643XX_COAL
+
+/*
+ * Number of RX / TX descriptors on RX / TX rings.
+ * Note that allocating RX descriptors is done by allocating the RX
+ * ring AND a preallocated RX buffers (skb's) for each descriptor.
+ * The TX descriptors only allocates the TX descriptors ring,
+ * with no pre allocated TX buffers (skb's are allocated by higher layers.
+ */
+
+/* Default TX ring size is 1000 descriptors */
+#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
+
+/* Default RX ring size is 400 descriptors */
+#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
+
+#define MV643XX_TX_COAL 100
+#ifdef MV643XX_COAL
+#define MV643XX_RX_COAL 100
+#endif
+
+/*
+ * The second part is the low level driver of the gigE ethernet ports.
+ */
+
+/*
+ * Header File for : MV-643xx network interface header
+ *
+ * DESCRIPTION:
+ * This header file contains macros typedefs and function declaration for
+ * the Marvell Gig Bit Ethernet Controller.
+ *
+ * DEPENDENCIES:
+ * None.
+ *
+ */
+
+/* MAC accepet/reject macros */
+#define ACCEPT_MAC_ADDR 0
+#define REJECT_MAC_ADDR 1
+
+/* Buffer offset from buffer pointer */
+#define RX_BUF_OFFSET 0x2
+
+/* Gigabit Ethernet Unit Global Registers */
+
+/* MIB Counters register definitions */
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
+#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
+#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
+#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
+#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
+#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
+#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
+#define ETH_MIB_FRAMES_64_OCTETS 0x20
+#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
+#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
+#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
+#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
+#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
+#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
+#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
+#define ETH_MIB_GOOD_FRAMES_SENT 0x40
+#define ETH_MIB_EXCESSIVE_COLLISION 0x44
+#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
+#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
+#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
+#define ETH_MIB_FC_SENT 0x54
+#define ETH_MIB_GOOD_FC_RECEIVED 0x58
+#define ETH_MIB_BAD_FC_RECEIVED 0x5c
+#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
+#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
+#define ETH_MIB_OVERSIZE_RECEIVED 0x68
+#define ETH_MIB_JABBER_RECEIVED 0x6c
+#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
+#define ETH_MIB_BAD_CRC_EVENT 0x74
+#define ETH_MIB_COLLISION 0x78
+#define ETH_MIB_LATE_COLLISION 0x7c
+
+/* Port serial status reg (PSR) */
+#define ETH_INTERFACE_GMII_MII 0
+#define ETH_INTERFACE_PCM BIT0
+#define ETH_LINK_IS_DOWN 0
+#define ETH_LINK_IS_UP BIT1
+#define ETH_PORT_AT_HALF_DUPLEX 0
+#define ETH_PORT_AT_FULL_DUPLEX BIT2
+#define ETH_RX_FLOW_CTRL_DISABLED 0
+#define ETH_RX_FLOW_CTRL_ENBALED BIT3
+#define ETH_GMII_SPEED_100_10 0
+#define ETH_GMII_SPEED_1000 BIT4
+#define ETH_MII_SPEED_10 0
+#define ETH_MII_SPEED_100 BIT5
+#define ETH_NO_TX 0
+#define ETH_TX_IN_PROGRESS BIT7
+#define ETH_BYPASS_NO_ACTIVE 0
+#define ETH_BYPASS_ACTIVE BIT8
+#define ETH_PORT_NOT_AT_PARTITION_STATE 0
+#define ETH_PORT_AT_PARTITION_STATE BIT9
+#define ETH_PORT_TX_FIFO_NOT_EMPTY 0
+#define ETH_PORT_TX_FIFO_EMPTY BIT10
+
+#define ETH_DEFAULT_RX_BPDU_QUEUE_3 (BIT23 | BIT22)
+#define ETH_DEFAULT_RX_BPDU_QUEUE_4 BIT24
+#define ETH_DEFAULT_RX_BPDU_QUEUE_5 (BIT24 | BIT22)
+#define ETH_DEFAULT_RX_BPDU_QUEUE_6 (BIT24 | BIT23)
+#define ETH_DEFAULT_RX_BPDU_QUEUE_7 (BIT24 | BIT23 | BIT22)
+
+/* SMI reg */
+#define ETH_SMI_BUSY BIT28 /* 0 - Write, 1 - Read */
+#define ETH_SMI_READ_VALID BIT27 /* 0 - Write, 1 - Read */
+#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read operation */
+#define ETH_SMI_OPCODE_READ BIT26 /* Operation is in progress */
+
+/* SDMA command status fields macros */
+
+/* Tx & Rx descriptors status */
+#define ETH_ERROR_SUMMARY (BIT0)
+
+/* Tx & Rx descriptors command */
+#define ETH_BUFFER_OWNED_BY_DMA (BIT31)
+
+/* Tx descriptors status */
+#define ETH_LC_ERROR (0 )
+#define ETH_UR_ERROR (BIT1 )
+#define ETH_RL_ERROR (BIT2 )
+#define ETH_LLC_SNAP_FORMAT (BIT9 )
+
+/* Rx descriptors status */
+#define ETH_CRC_ERROR (0 )
+#define ETH_OVERRUN_ERROR (BIT1 )
+#define ETH_MAX_FRAME_LENGTH_ERROR (BIT2 )
+#define ETH_RESOURCE_ERROR ((BIT2 | BIT1))
+#define ETH_VLAN_TAGGED (BIT19)
+#define ETH_BPDU_FRAME (BIT20)
+#define ETH_TCP_FRAME_OVER_IP_V_4 (0 )
+#define ETH_UDP_FRAME_OVER_IP_V_4 (BIT21)
+#define ETH_OTHER_FRAME_TYPE (BIT22)
+#define ETH_LAYER_2_IS_ETH_V_2 (BIT23)
+#define ETH_FRAME_TYPE_IP_V_4 (BIT24)
+#define ETH_FRAME_HEADER_OK (BIT25)
+#define ETH_RX_LAST_DESC (BIT26)
+#define ETH_RX_FIRST_DESC (BIT27)
+#define ETH_UNKNOWN_DESTINATION_ADDR (BIT28)
+#define ETH_RX_ENABLE_INTERRUPT (BIT29)
+#define ETH_LAYER_4_CHECKSUM_OK (BIT30)
+
+/* Rx descriptors byte count */
+#define ETH_FRAME_FRAGMENTED (BIT2)
+
+/* Tx descriptors command */
+#define ETH_LAYER_4_CHECKSUM_FIRST_DESC (BIT10)
+#define ETH_FRAME_SET_TO_VLAN (BIT15)
+#define ETH_TCP_FRAME (0 )
+#define ETH_UDP_FRAME (BIT16)
+#define ETH_GEN_TCP_UDP_CHECKSUM (BIT17)
+#define ETH_GEN_IP_V_4_CHECKSUM (BIT18)
+#define ETH_ZERO_PADDING (BIT19)
+#define ETH_TX_LAST_DESC (BIT20)
+#define ETH_TX_FIRST_DESC (BIT21)
+#define ETH_GEN_CRC (BIT22)
+#define ETH_TX_ENABLE_INTERRUPT (BIT23)
+#define ETH_AUTO_MODE (BIT30)
+
+/* typedefs */
+
+typedef enum _eth_func_ret_status {
+ ETH_OK, /* Returned as expected. */
+ ETH_ERROR, /* Fundamental error. */
+ ETH_RETRY, /* Could not process request. Try later.*/
+ ETH_END_OF_JOB, /* Ring has nothing to process. */
+ ETH_QUEUE_FULL, /* Ring resource error. */
+ ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
+} ETH_FUNC_RET_STATUS;
+
+typedef enum _eth_target {
+ ETH_TARGET_DRAM,
+ ETH_TARGET_DEVICE,
+ ETH_TARGET_CBS,
+ ETH_TARGET_PCI0,
+ ETH_TARGET_PCI1
+} ETH_TARGET;
+
+/* These are for big-endian machines. Little endian needs different
+ * definitions.
+ */
+#if defined(__BIG_ENDIAN)
+struct eth_rx_desc {
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 cmd_sts; /* Descriptor command status */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+};
+
+struct eth_tx_desc {
+ u16 byte_cnt; /* buffer byte count */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u32 cmd_sts; /* Command/status field */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+ u32 buf_ptr; /* pointer to buffer for this descriptor*/
+};
+
+#elif defined(__LITTLE_ENDIAN)
+struct eth_rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 buf_size; /* Buffer size */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct eth_tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor*/
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+/* Unified struct for Rx and Tx operations. The user is not required to */
+/* be familier with neither Tx nor Rx descriptors. */
+struct pkt_info {
+ unsigned short byte_cnt; /* Descriptor buffer byte count */
+ unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
+ unsigned int cmd_sts; /* Descriptor command status */
+ dma_addr_t buf_ptr; /* Descriptor buffer pointer */
+ struct sk_buff *return_info; /* User resource return information */
+};
+
+/* Ethernet port specific infomation */
+
+struct mv643xx_mib_counters {
+ u64 good_octets_received;
+ u32 bad_octets_received;
+ u32 internal_mac_transmit_err;
+ u32 good_frames_received;
+ u32 bad_frames_received;
+ u32 broadcast_frames_received;
+ u32 multicast_frames_received;
+ u32 frames_64_octets;
+ u32 frames_65_to_127_octets;
+ u32 frames_128_to_255_octets;
+ u32 frames_256_to_511_octets;
+ u32 frames_512_to_1023_octets;
+ u32 frames_1024_to_max_octets;
+ u64 good_octets_sent;
+ u32 good_frames_sent;
+ u32 excessive_collision;
+ u32 multicast_frames_sent;
+ u32 broadcast_frames_sent;
+ u32 unrec_mac_control_received;
+ u32 fc_sent;
+ u32 good_fc_received;
+ u32 bad_fc_received;
+ u32 undersize_received;
+ u32 fragments_received;
+ u32 oversize_received;
+ u32 jabber_received;
+ u32 mac_receive_error;
+ u32 bad_crc_event;
+ u32 collision;
+ u32 late_collision;
+};
+
+struct mv643xx_private {
+ int port_num; /* User Ethernet port number */
+ u8 port_mac_addr[6]; /* User defined port MAC address.*/
+ u32 port_config; /* User port configuration value*/
+ u32 port_config_extend; /* User port config extend value*/
+ u32 port_sdma_config; /* User port SDMA config value */
+ u32 port_serial_control; /* User port serial control value */
+ u32 port_tx_queue_command; /* Port active Tx queues summary*/
+ u32 port_rx_queue_command; /* Port active Rx queues summary*/
+
+ u32 rx_sram_addr; /* Base address of rx sram area */
+ u32 rx_sram_size; /* Size of rx sram area */
+ u32 tx_sram_addr; /* Base address of tx sram area */
+ u32 tx_sram_size; /* Size of tx sram area */
+
+ int rx_resource_err; /* Rx ring resource error flag */
+ int tx_resource_err; /* Tx ring resource error flag */
+
+ /* Tx/Rx rings managment indexes fields. For driver use */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+ int tx_first_desc_q;
+ u32 tx_first_command;
+#endif
+
+#ifdef MV643XX_TX_FAST_REFILL
+ u32 tx_clean_threshold;
+#endif
+
+ struct eth_rx_desc *p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ unsigned int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct eth_tx_desc *p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ unsigned int tx_desc_area_size;
+ struct sk_buff **tx_skb;
+
+ struct work_struct tx_timeout_task;
+
+ /*
+ * Former struct mv643xx_eth_priv members start here
+ */
+ struct net_device_stats stats;
+ struct mv643xx_mib_counters mib_counters;
+ spinlock_t lock;
+ /* Size of Tx Ring per queue */
+ unsigned int tx_ring_size;
+ /* Ammont of SKBs outstanding on Tx queue */
+ unsigned int tx_ring_skbs;
+ /* Size of Rx Ring per queue */
+ unsigned int rx_ring_size;
+ /* Ammount of SKBs allocated to Rx Ring per queue */
+ unsigned int rx_ring_skbs;
+
+ /*
+ * rx_task used to fill RX ring out of bottom half context
+ */
+ struct work_struct rx_task;
+
+ /*
+ * Used in case RX Ring is empty, which can be caused when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ long rx_task_busy __attribute__ ((aligned(SMP_CACHE_BYTES)));
+ unsigned rx_timer_flag;
+
+ u32 rx_int_coal;
+ u32 tx_int_coal;
+};
+
+/* ethernet.h API list */
+
+/* Port operation control routines */
+static void eth_port_init(struct mv643xx_private *mp);
+static void eth_port_reset(unsigned int eth_port_num);
+static void eth_port_start(struct mv643xx_private *mp);
+
+static void ethernet_set_config_reg(unsigned int eth_port_num,
+ unsigned int value);
+static unsigned int ethernet_get_config_reg(unsigned int eth_port_num);
+
+/* Port MAC address routines */
+static void eth_port_uc_addr_set(unsigned int eth_port_num,
+ unsigned char *p_addr);
+
+/* PHY and MIB routines */
+static void ethernet_phy_reset(unsigned int eth_port_num);
+
+static void eth_port_write_smi_reg(unsigned int eth_port_num,
+ unsigned int phy_reg, unsigned int value);
+
+static void eth_port_read_smi_reg(unsigned int eth_port_num,
+ unsigned int phy_reg, unsigned int *value);
+
+static void eth_clear_mib_counters(unsigned int eth_port_num);
+
+/* Port data flow control routines */
+static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info);
+static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info);
+static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info);
+static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
+ struct pkt_info *p_pkt_info);
+
+#endif /* __MV643XX_ETH_H__ */
diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c
new file mode 100644
index 000000000000..56a82d8ee8f5
--- /dev/null
+++ b/drivers/net/mvme147.c
@@ -0,0 +1,203 @@
+/* mvme147.c : the Linux/mvme147/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/mvme147hw.h>
+
+/* We have 16834 bytes of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct m147lance_private {
+ struct lance_private lance;
+ unsigned long ram;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int m147lance_open(struct net_device *dev);
+static int m147lance_close(struct net_device *dev);
+static void m147lance_writerap(struct lance_private *lp, unsigned short value);
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value);
+static unsigned short m147lance_readrdp(struct lance_private *lp);
+
+typedef void (*writerap_t)(void *, unsigned short);
+typedef void (*writerdp_t)(void *, unsigned short);
+typedef unsigned short (*readrdp_t)(void *);
+
+/* Initialise the one and only on-board 7990 */
+struct net_device * __init mvme147lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int called;
+ static const char name[] = "MVME147 LANCE";
+ struct m147lance_private *lp;
+ u_long *addr;
+ u_long address;
+ int err;
+
+ if (!MACH_IS_MVME147 || called)
+ return ERR_PTR(-ENODEV);
+ called++;
+
+ dev = alloc_etherdev(sizeof(struct m147lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ SET_MODULE_OWNER(dev);
+
+ /* Fill the dev fields */
+ dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
+ dev->open = &m147lance_open;
+ dev->stop = &m147lance_close;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &lance_set_multicast;
+ dev->tx_timeout = &lance_tx_timeout;
+ dev->dma = 0;
+
+ addr=(u_long *)ETHERNET_ADDRESS;
+ address = *addr;
+ dev->dev_addr[0]=0x08;
+ dev->dev_addr[1]=0x00;
+ dev->dev_addr[2]=0x3e;
+ address=address>>8;
+ dev->dev_addr[5]=address&0xff;
+ address=address>>8;
+ dev->dev_addr[4]=address&0xff;
+ address=address>>8;
+ dev->dev_addr[3]=address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, dev->base_addr, MVME147_LANCE_IRQ,
+ dev->dev_addr[0],
+ dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ lp = (struct m147lance_private *)dev->priv;
+ lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
+ if (!lp->ram)
+ {
+ printk("%s: No memory for LANCE buffers\n", dev->name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lp->lance.name = (char*)name; /* discards const, shut up gcc */
+ lp->lance.base = dev->base_addr;
+ lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
+ lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = MVME147_LANCE_IRQ;
+ lp->lance.writerap = (writerap_t)m147lance_writerap;
+ lp->lance.writerdp = (writerdp_t)m147lance_writerdp;
+ lp->lance.readrdp = (readrdp_t)m147lance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+
+ err = register_netdev(dev);
+ if (err) {
+ free_pages(lp->ram, 3);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+
+static void m147lance_writerap(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RAP, value);
+}
+
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RDP, value);
+}
+
+static unsigned short m147lance_readrdp(struct lance_private *lp)
+{
+ return in_be16(lp->base + LANCE_RDP);
+}
+
+static int m147lance_open(struct net_device *dev)
+{
+ int status;
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */
+
+ return 0;
+}
+
+static int m147lance_close(struct net_device *dev)
+{
+ /* disable interrupts at boardlevel */
+ m147_pcc->lan_cntrl=0x0; /* disable interrupts */
+ lance_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+MODULE_LICENSE("GPL");
+
+static struct net_device *dev_mvme147_lance;
+int init_module(void)
+{
+ dev_mvme147_lance = mvme147lance_probe(-1);
+ if (IS_ERR(dev_mvme147_lance))
+ return PTR_ERR(dev_mvme147_lance);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct m147lance_private *lp = dev_mvme147_lance->priv;
+ unregister_netdev(dev_mvme147_lance);
+ free_pages(lp->ram, 3);
+ free_netdev(dev_mvme147_lance);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/myri_code.h b/drivers/net/myri_code.h
new file mode 100644
index 000000000000..851eba8a3e00
--- /dev/null
+++ b/drivers/net/myri_code.h
@@ -0,0 +1,6287 @@
+/* This is the Myrinet MCP code for LANai4.x */
+/* Generated by cat $MYRI_HOME/lib/lanai/mcp4.dat > myri_code4.h */
+
+static unsigned int lanai4_code_off = 0x0000; /* half-word offset */
+static unsigned char lanai4_code[76256] __initdata = {
+0xF2,0x0E,
+0xFE,0x00, 0xC2,0x90, 0x00,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x01,0x4C, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x2A,0x6C, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x2C,0x10, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x02, 0x05,0x3C, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02, 0x00,0x03, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x29,0xE0, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x2B,0x84, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x2C,0x1C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x02, 0x0A,0xBC, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02, 0x00,0x02, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x2A,0xF8, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF7,0x04, 0x4A,0x9C, 0x85,0x16, 0x00,0x00, 0x20,0x3A, 0x00,0x01, 0xEE,0x00,
+0x01,0x01, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x01,0x00, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x01,0x2D, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0xF4,0x82, 0x00,0x12, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x01,0xE0, 0xB4,0xBA,
+0x68,0x02, 0xE0,0x00, 0x01,0xE0, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x3B,0x64, 0xF5,0x84,
+0x4F,0x54, 0xF7,0x05, 0x7A,0x10, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x01,0x99, 0x97,0x2A,
+0x00,0x20, 0x95,0xAA, 0x00,0x1C, 0xF6,0x06, 0x4A,0x98, 0x26,0xAC, 0x00,0x01, 0x77,0x35,
+0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0x07,0x38, 0x00,0x0C, 0xA4,0xBA,
+0x60,0x02, 0x00,0x00, 0x00,0x01, 0x94,0xAA, 0x00,0x10, 0xC7,0x38, 0x60,0x00, 0x87,0x3A,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x97,0x2A, 0x00,0x14, 0xF7,0x04, 0x4A,0x9C, 0x00,0x00,
+0x00,0x01, 0x27,0x38, 0x00,0x01, 0xC0,0x2E, 0x72,0x00, 0xD7,0x00, 0x0A,0x01, 0xE0,0x00,
+0x01,0xD0, 0xF7,0x05, 0x7A,0x18, 0x95,0xAA, 0x00,0x1C, 0xF6,0x06, 0x4A,0x98, 0x06,0xAC,
+0x00,0x01, 0x77,0x35, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0x07,0x38,
+0x00,0x0C, 0xA4,0xBA, 0x60,0x02, 0x00,0x00, 0x00,0x01, 0x94,0xAA, 0x00,0x10, 0xC7,0x38,
+0x60,0x00, 0x87,0x3A, 0x00,0x04, 0xF0,0x05, 0x7A,0x18, 0x97,0x2A, 0x00,0x14, 0xF5,0x05,
+0x79,0xD8, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x01,0xF4, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x38, 0xF7,0x04,
+0x7A,0x10, 0xF6,0x84, 0x3B,0x64, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x02,0x4C, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x02,0x4C, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x02,0x85, 0xF4,0x82, 0x00,0x00, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86,
+0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02,
+0x00,0x12, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x02,0x74, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF3,0x06, 0x2A,0x6C, 0xF3,0x05, 0x2C,0x10, 0xE0,0x00, 0x05,0x28, 0xF0,0x05,
+0x7A,0x18, 0xF3,0x84, 0x79,0xD8, 0xF6,0x84, 0x4A,0xA0, 0x23,0x14, 0x00,0x20, 0x93,0x16,
+0xFF,0xC4, 0x84,0x1E, 0x00,0x10, 0x96,0x96, 0xFF,0xD4, 0xF7,0x04, 0x4A,0x9C, 0x94,0x16,
+0xFF,0xE0, 0x85,0x1E, 0x00,0x14, 0xC0,0x36, 0x72,0x00, 0xEC,0x00, 0x03,0x6C, 0x95,0x16,
+0xFF,0xE4, 0x77,0x35, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF3,0x06,
+0x4A,0x98, 0xC6,0xB8, 0x30,0x00, 0x06,0xB4, 0x00,0x0C, 0xC5,0x84, 0x00,0x00, 0x87,0x36,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x42,0x00, 0xE6,0x00, 0x02,0xFC, 0xC6,0x24,
+0x00,0x00, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x52,0x00, 0xE6,0x00,
+0x03,0x00, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x03,0x0D, 0x00,0x00, 0x00,0x01, 0xF5,0x82, 0x00,0x00, 0x86,0x36, 0x00,0x00, 0x87,0x16,
+0xFF,0xE0, 0x00,0x00, 0x00,0x01, 0xC0,0x32, 0x72,0x00, 0xE2,0x00, 0x03,0x48, 0xF5,0x02,
+0x00,0x00, 0xC0,0x32, 0x72,0x00, 0xE6,0x00, 0x03,0x50, 0x20,0x2A, 0x00,0x00, 0x86,0xB6,
+0x00,0x04, 0x87,0x16, 0xFF,0xE4, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0x03,0x51, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00,
+0x03,0x61, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00,
+0x03,0x70, 0x20,0x26, 0x00,0x00, 0xF4,0x82, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00,
+0x03,0xA5, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xD4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9,
+0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4,
+0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xD8, 0xE0,0x00, 0x04,0x18, 0x96,0x96,
+0xFF,0xDC, 0x27,0x14, 0x00,0x2C, 0x97,0x13, 0xFF,0xFC, 0x83,0x16, 0xFF,0xC4, 0x00,0x00,
+0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0xF3,0x06, 0x4A,0x98, 0x93,0x13, 0xFF,0xFC, 0x93,0x96,
+0xFF,0xCC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96,
+0xFF,0xCC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x04,0x15, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xD4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xD8, 0x96,0x96, 0xFF,0xDC, 0xF7,0x05, 0x4A,0xA0, 0xE0,0x00, 0x04,0x1C, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x04,0x2C, 0xF4,0x82,
+0x00,0x01, 0xE0,0x00, 0x04,0x84, 0xF4,0x82, 0x00,0x00, 0x86,0x96, 0xFF,0xD8, 0x00,0x00,
+0x00,0x01, 0x77,0x35, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF6,0x86,
+0x42,0xC8, 0xA6,0x3A, 0x68,0x02, 0xC7,0x38, 0x68,0x00, 0x75,0x39, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0x05,0xB8, 0x00,0x02, 0x86,0xAE, 0x00,0x00, 0x07,0x38, 0x00,0x04, 0x97,0x16,
+0xFF,0xEC, 0xC6,0x30, 0x57,0xC0, 0x76,0x30, 0xFF,0xF0, 0x96,0x16, 0xFF,0xF4, 0x75,0xAD,
+0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0xC6,0xB4, 0x5F,0xC0, 0x76,0xB4, 0xFF,0xF0, 0x96,0x96,
+0xFF,0xF0, 0x20,0x26, 0x00,0x00, 0xE6,0x00, 0x05,0x25, 0xF3,0x06, 0x29,0xE0, 0x86,0x96,
+0xFF,0xF0, 0xF5,0x82, 0x00,0x00, 0xC7,0x34, 0x68,0x00, 0xC4,0x9C, 0x72,0x00, 0xC0,0x2E,
+0x6A,0x00, 0xEC,0x00, 0x04,0xF0, 0xC5,0x24, 0x00,0x00, 0xC6,0x2C, 0x00,0x00, 0x87,0x16,
+0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0xA6,0xB2, 0x70,0x02, 0x05,0xAC, 0x00,0x01, 0xC7,0x30,
+0x70,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB4,
+0xFF,0xF0, 0xF6,0xAB, 0x28,0x00, 0x05,0x28, 0x00,0x02, 0x87,0x16, 0xFF,0xF0, 0x00,0x00,
+0x00,0x01, 0xC0,0x2E, 0x72,0x00, 0xEC,0x00, 0x04,0xB1, 0x06,0x30, 0x00,0x02, 0xF3,0x02,
+0x00,0x03, 0xF3,0x05, 0x76,0xF4, 0x87,0x16, 0xFF,0xF0, 0x86,0x9E, 0x00,0x04, 0xC7,0x38,
+0x70,0x00, 0xC7,0x38, 0x48,0x00, 0xC6,0xB4, 0x70,0x00, 0x87,0x16, 0xFF,0xF4, 0x06,0xB4,
+0x00,0x20, 0x97,0x02, 0xFF,0x6C, 0x94,0x82, 0xFF,0x50, 0x96,0x82, 0xFF,0x58, 0xF3,0x06,
+0x29,0xE0, 0xF3,0x05, 0x2C,0x10, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF7,0x04, 0x7A,0x18, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x05,0xCD, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x7A,0x10, 0xF6,0x84, 0x3B,0x64, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x05,0xCD, 0xF5,0x86, 0x4A,0x98, 0xF6,0x04, 0x79,0xD8, 0xF6,0x84, 0x4F,0x54, 0x00,0x00,
+0x00,0x01, 0x96,0xB2, 0x00,0x1C, 0x06,0xB4, 0x00,0x01, 0x77,0x35, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x77,0x39, 0x00,0x02, 0x07,0x38, 0x00,0x0C, 0xA5,0x3A, 0x58,0x02, 0x00,0x00,
+0x00,0x01, 0x95,0x32, 0x00,0x10, 0xC7,0x38, 0x58,0x00, 0x87,0x3A, 0x00,0x04, 0xF0,0x05,
+0x7A,0x18, 0x97,0x32, 0x00,0x14, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x01,0xF4, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0x05,0xFC, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86,
+0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02,
+0x00,0x12, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x05,0xF4, 0xB5,0x3A, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF5,0x06, 0x2A,0x6C, 0xF5,0x05, 0x2C,0x10, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x96, 0x00,0x00, 0xF7,0x04, 0x75,0xEC, 0x85,0x2E,
+0x00,0x20, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x06,0xCC, 0xF5,0x05, 0x7A,0x08, 0xF7,0x04,
+0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x06,0xCC, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x7A,0x08, 0xF6,0x84, 0x3B,0x64, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0x47,0x0C, 0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x06,0xCC, 0x00,0x00, 0x00,0x01, 0x87,0x2E, 0x00,0x1C, 0xF6,0x84, 0x4F,0x54, 0xF7,0x05,
+0x7A,0x00, 0xC7,0x34, 0x72,0x00, 0x20,0x3A, 0x00,0x00, 0xEE,0x00, 0x06,0x8D, 0xF5,0x02,
+0x00,0x01, 0xE0,0x00, 0x06,0x90, 0xF5,0x05, 0x79,0xF8, 0xF0,0x85, 0x79,0xF8, 0xF6,0x84,
+0x7A,0x00, 0xC7,0x38, 0x70,0x00, 0xC6,0xB4, 0x70,0x00, 0xF7,0x04, 0x79,0xF8, 0xF6,0x85,
+0x79,0xE8, 0xC7,0x38, 0x70,0x00, 0xC6,0x34, 0x70,0x00, 0xF7,0x04, 0x4A,0x9C, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x00, 0x06,0xCC, 0xF6,0x05, 0x79,0xF0, 0x20,0x36,
+0x00,0x00, 0xEC,0x00, 0x06,0xF8, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86,
+0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02,
+0x00,0x13, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x07,0x38, 0xB5,0x3A, 0x68,0x02, 0xE0,0x00,
+0x07,0x38, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x4A,0x9C, 0x00,0x00, 0x00,0x01, 0xC0,0x32,
+0x72,0x00, 0xEE,0x00, 0x07,0x19, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x4A,0x9C, 0xE0,0x00,
+0x07,0x28, 0xF7,0x05, 0x79,0xF0, 0x20,0x32, 0x00,0x00, 0xEC,0x00, 0x07,0x28, 0x00,0x00,
+0x00,0x01, 0xF0,0x85, 0x79,0xF0, 0xF5,0x85, 0x79,0xE0, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x07,0x4C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x38, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x07,0xA4, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x07,0xA4, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x7A,0x08, 0xF6,0x84, 0x3B,0x64, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x07,0xD5, 0xF4,0x02,
+0x00,0x00, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x13, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x07,0xCC, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xE0,0x00, 0x0A,0xA4, 0xF3,0x06,
+0x2B,0x84, 0xF6,0x84, 0x79,0xE8, 0xF6,0x06, 0x4A,0x98, 0x77,0x35, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF6,0x84, 0x79,0xE0, 0x07,0x38, 0x00,0x0C, 0xA3,0x3A,
+0x60,0x02, 0xC3,0xB4, 0x00,0x00, 0x93,0x36, 0x00,0x10, 0xC7,0x38, 0x60,0x00, 0x87,0x3A,
+0x00,0x04, 0x23,0x14, 0x00,0x20, 0x93,0x16, 0xFF,0xC4, 0x97,0x36, 0x00,0x14, 0x84,0x9E,
+0x00,0x10, 0xF6,0x84, 0x4A,0xA0, 0x94,0x96, 0xFF,0xE0, 0x96,0x96, 0xFF,0xD4, 0x85,0x1E,
+0x00,0x14, 0xF7,0x04, 0x4A,0x9C, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x00,
+0x08,0xEC, 0x95,0x16, 0xFF,0xE4, 0x77,0x35, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39,
+0x00,0x02, 0xC6,0xB8, 0x60,0x00, 0x06,0xB4, 0x00,0x0C, 0xC5,0x84, 0x00,0x00, 0x87,0x36,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00, 0x08,0x7C, 0xC6,0x20,
+0x00,0x00, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x52,0x00, 0xE6,0x00,
+0x08,0x80, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x08,0x8D, 0x00,0x00, 0x00,0x01, 0xF5,0x82, 0x00,0x00, 0x86,0x36, 0x00,0x00, 0x87,0x16,
+0xFF,0xE0, 0x00,0x00, 0x00,0x01, 0xC0,0x32, 0x72,0x00, 0xE2,0x00, 0x08,0xC8, 0xF5,0x02,
+0x00,0x00, 0xC0,0x32, 0x72,0x00, 0xE6,0x00, 0x08,0xD0, 0x20,0x2A, 0x00,0x00, 0x86,0xB6,
+0x00,0x04, 0x87,0x16, 0xFF,0xE4, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0x08,0xD1, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00,
+0x08,0xE1, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00,
+0x08,0xF0, 0x20,0x22, 0x00,0x00, 0xF4,0x02, 0x00,0x01, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x09,0x25, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xD4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9,
+0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4,
+0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xD8, 0xE0,0x00, 0x09,0x98, 0x96,0x96,
+0xFF,0xDC, 0x27,0x14, 0x00,0x2C, 0x97,0x13, 0xFF,0xFC, 0x83,0x16, 0xFF,0xC4, 0x00,0x00,
+0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0xF3,0x06, 0x4A,0x98, 0x93,0x13, 0xFF,0xFC, 0x93,0x96,
+0xFF,0xCC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96,
+0xFF,0xCC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x09,0x95, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xD4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xD8, 0x96,0x96, 0xFF,0xDC, 0xF7,0x05, 0x4A,0xA0, 0xE0,0x00, 0x09,0x9C, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x09,0xAC, 0xF4,0x82,
+0x00,0x01, 0xE0,0x00, 0x0A,0x04, 0xF4,0x82, 0x00,0x00, 0x86,0x96, 0xFF,0xD8, 0x00,0x00,
+0x00,0x01, 0x77,0x35, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF6,0x86,
+0x42,0xC8, 0xA6,0x3A, 0x68,0x02, 0xC7,0x38, 0x68,0x00, 0x75,0x39, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0x05,0xB8, 0x00,0x02, 0x86,0xAE, 0x00,0x00, 0x07,0x38, 0x00,0x04, 0x97,0x16,
+0xFF,0xEC, 0xC6,0x30, 0x57,0xC0, 0x76,0x30, 0xFF,0xF0, 0x96,0x16, 0xFF,0xF4, 0x75,0xAD,
+0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0xC6,0xB4, 0x5F,0xC0, 0x76,0xB4, 0xFF,0xF0, 0x96,0x96,
+0xFF,0xF0, 0x20,0x26, 0x00,0x00, 0xE6,0x00, 0x0A,0xA5, 0xF3,0x06, 0x2A,0xF8, 0x86,0x96,
+0xFF,0xF0, 0xF5,0x82, 0x00,0x00, 0xC7,0x34, 0x68,0x00, 0xC4,0x9C, 0x72,0x00, 0xC0,0x2E,
+0x6A,0x00, 0xEC,0x00, 0x0A,0x70, 0xC5,0x24, 0x00,0x00, 0xC6,0x2C, 0x00,0x00, 0x87,0x16,
+0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0xA6,0xB2, 0x70,0x02, 0x05,0xAC, 0x00,0x01, 0xC7,0x30,
+0x70,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB4,
+0xFF,0xF0, 0xF6,0xAB, 0x28,0x00, 0x05,0x28, 0x00,0x02, 0x87,0x16, 0xFF,0xF0, 0x00,0x00,
+0x00,0x01, 0xC0,0x2E, 0x72,0x00, 0xEC,0x00, 0x0A,0x31, 0x06,0x30, 0x00,0x02, 0xF3,0x02,
+0x00,0x02, 0xF3,0x05, 0x76,0xF4, 0x87,0x16, 0xFF,0xF0, 0x86,0x9E, 0x00,0x04, 0xC7,0x38,
+0x70,0x00, 0xC7,0x38, 0x48,0x00, 0xC6,0xB4, 0x70,0x00, 0x87,0x16, 0xFF,0xF4, 0x06,0xB4,
+0x00,0x20, 0x97,0x02, 0xFF,0x6C, 0x94,0x82, 0xFF,0x50, 0x96,0x82, 0xFF,0x58, 0xF3,0x06,
+0x2A,0xF8, 0xF3,0x05, 0x2C,0x1C, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF6,0x84, 0x79,0xE8, 0xF7,0x04, 0x79,0xF8, 0x00,0x00, 0x00,0x01, 0xC6,0xB4,
+0x70,0x00, 0xF7,0x04, 0x7A,0x20, 0xF6,0x85, 0x79,0xE8, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x7A,0x20, 0xF7,0x04, 0x79,0xF0, 0xF6,0x04, 0x7A,0x20, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x0B,0x2C, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x82, 0x00,0x13, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x0B,0x20, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x86,
+0x2B,0x84, 0xE0,0x00, 0x0B,0x38, 0xF5,0x85, 0x2C,0x1C, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x07,0x4C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF7,0x06, 0x2C,0x10, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x29,0xE0, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x2C,0x10, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x2A,0x6C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x2C,0x1C, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x2A,0xF8, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x2C,0x1C, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x2B,0x84, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF0,0x05,
+0x2D,0x38, 0xF0,0x05, 0x2D,0x3C, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x18, 0xFF,0x85, 0x2E,0xDC, 0xF7,0x06, 0x0C,0x3E, 0xC7,0x7C,
+0x74,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x14,0x29, 0x97,0x16, 0xFF,0xF4, 0x47,0x38,
+0xFF,0xFB, 0xF6,0x84, 0x6F,0x50, 0xCF,0xB8, 0x00,0x00, 0x83,0x96, 0xFF,0xF4, 0xF7,0x02,
+0x00,0x3F, 0xC3,0x9C, 0x6D,0x80, 0xC7,0x1C, 0x74,0x00, 0x20,0x3A, 0x00,0x3F, 0xE2,0x00,
+0x12,0x60, 0x93,0x96, 0xFF,0xF4, 0x77,0x39, 0x00,0x02, 0xF6,0x82, 0x0C,0x5C, 0xA6,0xB6,
+0x70,0x02, 0x00,0x00, 0x00,0x01, 0xC1,0x34, 0x00,0x00, 0x00,0x00, 0x12,0x60, 0x00,0x00,
+0x12,0x60, 0x00,0x00, 0x0D,0x68, 0x00,0x00, 0x0D,0x68, 0x00,0x00, 0x0D,0x5C, 0x00,0x00,
+0x0D,0x5C, 0x00,0x00, 0x0D,0x68, 0x00,0x00, 0x0D,0x68, 0x00,0x00, 0x12,0x50, 0x00,0x00,
+0x12,0x50, 0x00,0x00, 0x12,0x3C, 0x00,0x00, 0x12,0x3C, 0x00,0x00, 0x0D,0xE0, 0x00,0x00,
+0x0D,0xE0, 0x00,0x00, 0x12,0x3C, 0x00,0x00, 0x12,0x3C, 0x00,0x00, 0x0D,0xE8, 0x00,0x00,
+0x0D,0xF4, 0x00,0x00, 0x0E,0x00, 0x00,0x00, 0x0E,0x20, 0x00,0x00, 0x0E,0x40, 0x00,0x00,
+0x0E,0x60, 0x00,0x00, 0x0E,0x80, 0x00,0x00, 0x0E,0xA0, 0x00,0x00, 0x0E,0xC0, 0x00,0x00,
+0x0E,0xC8, 0x00,0x00, 0x0E,0xD0, 0x00,0x00, 0x12,0x28, 0x00,0x00, 0x0E,0xD8, 0x00,0x00,
+0x0E,0xF4, 0x00,0x00, 0x0F,0x10, 0x00,0x00, 0x12,0x28, 0x00,0x00, 0x0F,0x18, 0x00,0x00,
+0x0F,0x18, 0x00,0x00, 0x0F,0x24, 0x00,0x00, 0x0F,0x24, 0x00,0x00, 0x0F,0x44, 0x00,0x00,
+0x0F,0x44, 0x00,0x00, 0x0F,0x64, 0x00,0x00, 0x0F,0x64, 0x00,0x00, 0x0F,0x84, 0x00,0x00,
+0x0F,0x84, 0x00,0x00, 0x0F,0x8C, 0x00,0x00, 0x0F,0x8C, 0x00,0x00, 0x0F,0x94, 0x00,0x00,
+0x0F,0x94, 0x00,0x00, 0x0F,0xB0, 0x00,0x00, 0x0F,0xB0, 0x00,0x00, 0x0F,0xB8, 0x00,0x00,
+0x0F,0xD8, 0x00,0x00, 0x0F,0xF8, 0x00,0x00, 0x10,0x2C, 0x00,0x00, 0x10,0x60, 0x00,0x00,
+0x10,0x94, 0x00,0x00, 0x10,0xC8, 0x00,0x00, 0x10,0xFC, 0x00,0x00, 0x11,0x30, 0x00,0x00,
+0x11,0x4C, 0x00,0x00, 0x11,0x68, 0x00,0x00, 0x12,0x14, 0x00,0x00, 0x11,0x84, 0x00,0x00,
+0x11,0xB4, 0x00,0x00, 0x11,0xE4, 0x00,0x00, 0x12,0x14, 0xF3,0x82, 0x00,0x06, 0xE0,0x00,
+0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF6,0x02, 0x00,0x05, 0x20,0x32, 0x00,0x14, 0xE6,0x00,
+0x0D,0xB5, 0x27,0x00, 0x00,0x10, 0x20,0x3A, 0x00,0x01, 0xE2,0x00, 0x0D,0xB5, 0xF7,0x06,
+0x2D,0xCC, 0xF6,0x84, 0x2E,0xCC, 0x00,0x00, 0x00,0x01, 0x75,0xB5, 0x00,0x02, 0xB6,0x2E,
+0x70,0x02, 0x06,0xB4, 0x00,0x01, 0xF6,0x85, 0x2E,0xCC, 0x86,0x02, 0xFF,0x34, 0xF7,0x06,
+0x2E,0x4C, 0x20,0x36, 0x00,0x1F, 0xE2,0x00, 0x0D,0xB5, 0xB6,0x2E, 0x70,0x02, 0xF0,0x05,
+0x2E,0xCC, 0xF7,0x04, 0x2D,0x58, 0x00,0x00, 0x00,0x01, 0x87,0x3A, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x87,0x3A, 0x00,0x18, 0x00,0x00, 0x00,0x01, 0x07,0x88, 0x00,0x08, 0xC1,0x38,
+0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x60, 0x00,0x00, 0x00,0x01, 0xE0,0x00,
+0x12,0x40, 0xF3,0x82, 0x00,0x06, 0xF3,0x82, 0x00,0x0B, 0xE0,0x00, 0x12,0x54, 0x93,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x07, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x0B, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x0B, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x06, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x06, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x0B, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xE0,0x00,
+0x12,0x40, 0xF3,0x82, 0x00,0x0B, 0xE0,0x00, 0x12,0x40, 0xF3,0x82, 0x00,0x07, 0xE0,0x00,
+0x12,0x2C, 0xF3,0x82, 0x00,0x0B, 0xF3,0x82, 0x00,0x0B, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x40, 0xF3,0x82,
+0x00,0x06, 0xF3,0x82, 0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x40, 0xF3,0x82, 0x00,0x06, 0xE0,0x00,
+0x12,0x2C, 0xF3,0x82, 0x00,0x0B, 0xF3,0x82, 0x00,0x14, 0xE0,0x00, 0x12,0x54, 0x93,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x06, 0xE0,0x00, 0x12,0x54, 0x93,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93,
+0xFF,0xFC, 0xE0,0x00, 0x12,0x40, 0xF3,0x82, 0x00,0x14, 0xE0,0x00, 0x12,0x2C, 0xF3,0x82,
+0x00,0x14, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x40, 0xF3,0x82, 0x00,0x06, 0xE0,0x00,
+0x12,0x2C, 0xF3,0x82, 0x00,0x14, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x0B, 0xE0,0x00,
+0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x07, 0xE0,0x00,
+0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x0B, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x0B, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x06, 0xE0,0x00, 0x12,0x54, 0x93,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x06, 0xE0,0x00,
+0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x0B, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x14, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0xE0,0x00, 0x12,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0x12,0x40, 0xF3,0x82, 0x00,0x0B, 0xF3,0x82, 0x00,0x14, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00,
+0x12,0x40, 0xF3,0x82, 0x00,0x07, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x2C, 0xF3,0x82,
+0x00,0x0B, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x0B, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x40, 0xF3,0x82,
+0x00,0x06, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x40, 0xF3,0x82,
+0x00,0x06, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x12,0x2C, 0xF3,0x82, 0x00,0x0B, 0xF7,0x04,
+0x35,0x28, 0xF6,0x82, 0x00,0x01, 0x07,0x38, 0x00,0x08, 0xE0,0x00, 0x13,0xCC, 0xF7,0x05,
+0x35,0x44, 0xF3,0x82, 0x00,0x14, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x07, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x05, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0x90,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0x83,0x96,
+0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x77,0x9C, 0x00,0x14, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00,
+0x12,0x9D, 0xF7,0x06, 0x04,0x00, 0xF7,0x04, 0x6F,0x5C, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x6F,0x5C, 0xF7,0x04, 0x6F,0x5C, 0xE0,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x83,0x96, 0xFF,0xF4, 0xF7,0x06, 0x04,0x00, 0xC0,0x1E, 0x74,0x00, 0xE6,0x00,
+0x14,0x29, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x2E,0xD0, 0xF6,0x84, 0x35,0x24, 0x07,0x38,
+0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x14,0x05, 0xF7,0x05, 0x2E,0xD0, 0xF7,0x04,
+0xE0,0x14, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x14,0x05, 0xF6,0x82,
+0x00,0x00, 0xF6,0x85, 0xE0,0x14, 0xF7,0x04, 0x2E,0xD8, 0xC5,0x34, 0x00,0x00, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x2E,0xD8, 0x20,0x2A, 0x00,0x02, 0xEE,0x00, 0x13,0xCC, 0xF6,0x82,
+0x00,0x00, 0xF6,0x84, 0x35,0x28, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00, 0x13,0xA0, 0x05,0xB4, 0x00,0x08, 0x95,0x93,
+0xFF,0xFC, 0x95,0x16, 0xFF,0xE8, 0x95,0x96, 0xFF,0xE4, 0x96,0x96, 0xFF,0xE0, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x64, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xE8, 0x85,0x96,
+0xFF,0xE4, 0x86,0x96, 0xFF,0xE0, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x13,0x90, 0xF7,0x02,
+0x00,0x00, 0x86,0x36, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x0F, 0xE2,0x00,
+0x13,0x75, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0x97,0x36, 0x00,0x14, 0x87,0x36, 0x00,0x14, 0xE0,0x00, 0x13,0x90, 0xF7,0x02,
+0x00,0x00, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x05, 0xC7,0x38,
+0x6A,0x00, 0xC7,0x38, 0x60,0x00, 0x07,0x38, 0x00,0x10, 0xC7,0x2C, 0x70,0x00, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x12,0x00, 0xF7,0x05, 0x35,0x2C, 0xF6,0x84, 0x35,0x28, 0xF7,0x04,
+0x6F,0x4C, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x13,0xC0, 0x07,0x34,
+0x14,0x94, 0xF3,0x84, 0x6F,0x44, 0xE0,0x00, 0x13,0xC4, 0xF3,0x85, 0x35,0x28, 0xF7,0x05,
+0x35,0x28, 0xE0,0x00, 0x12,0xE8, 0x05,0x28, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00,
+0x14,0x29, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0xF0,0x05, 0x35,0x24, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x82, 0x00,0x0D, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x14,0x28, 0xB3,0xBA, 0x68,0x02, 0xE0,0x00, 0x14,0x28, 0xF0,0x05,
+0x2D,0x38, 0xF7,0x04, 0xE0,0x10, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x14,0x29, 0xF7,0x02, 0x00,0x00, 0xF7,0x05, 0xE0,0x10, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x02,0x98, 0x97,0x93, 0xFF,0xFC, 0xF4,0x84, 0x2D,0x38, 0xF7,0x04, 0x2D,0x3C, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00, 0x0C,0x09, 0xF6,0x86, 0x2C,0x28, 0x77,0x39,
+0x00,0x02, 0xA5,0x3A, 0x68,0x02, 0x00,0x00, 0x00,0x01, 0x20,0x2A, 0x00,0x14, 0xE6,0x00,
+0x14,0x91, 0x27,0x28, 0x00,0x15, 0x20,0x3A, 0x00,0x01, 0xE2,0x00, 0x14,0x91, 0xF7,0x06,
+0x2D,0xCC, 0xF6,0x84, 0x2E,0xCC, 0x86,0x02, 0xFF,0x34, 0x75,0xB5, 0x00,0x02, 0xB5,0x2E,
+0x70,0x02, 0x06,0xB4, 0x00,0x01, 0xF6,0x85, 0x2E,0xCC, 0xF7,0x06, 0x2E,0x4C, 0x20,0x36,
+0x00,0x1F, 0xE2,0x00, 0x14,0x91, 0xB6,0x2E, 0x70,0x02, 0xF0,0x05, 0x2E,0xCC, 0xF7,0x06,
+0x2D,0x44, 0x76,0xA9, 0x00,0x02, 0xA7,0x36, 0x70,0x02, 0x00,0x00, 0x00,0x01, 0x87,0x3A,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x87,0x36, 0x00,0x04, 0x94,0x96,
+0xFF,0xEC, 0x07,0x88, 0x00,0x08, 0xC1,0x38, 0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04,
+0x2D,0x3C, 0x84,0x96, 0xFF,0xEC, 0x07,0x38, 0x00,0x01, 0x20,0x3A, 0x00,0x44, 0xE6,0x00,
+0x14,0x2C, 0xF7,0x05, 0x2D,0x3C, 0xE0,0x00, 0x14,0x2C, 0xF0,0x05, 0x2D,0x3C, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x84,0x16, 0x00,0x00, 0xF7,0x02,
+0x00,0x00, 0x85,0x96, 0x00,0x04, 0x20,0x3A, 0x00,0x21, 0xEE,0x00, 0x15,0x34, 0x95,0xA2,
+0x00,0x00, 0xF6,0x06, 0x23,0x38, 0x07,0x20, 0x00,0x84, 0xC6,0xA0, 0x00,0x00, 0x96,0x3A,
+0x00,0x04, 0x27,0x38, 0x00,0x04, 0xC0,0x3A, 0x6A,0x00, 0xEC,0x00, 0x15,0x20, 0x00,0x00,
+0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x96,
+0x00,0x00, 0x87,0x16, 0x00,0x04, 0xF6,0x04, 0x2D,0x40, 0x97,0x36, 0x00,0x00, 0x97,0x36,
+0x00,0x04, 0x07,0x30, 0x00,0x01, 0xF7,0x05, 0x2D,0x40, 0x96,0x36, 0x00,0x08, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x16, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x20,0x2A, 0x00,0x14, 0xE6,0x00, 0x15,0xD9, 0x27,0x28, 0x00,0x15, 0x20,0x3A,
+0x00,0x01, 0xE2,0x00, 0x15,0xD9, 0xF7,0x06, 0x2D,0xCC, 0xF6,0x84, 0x2E,0xCC, 0x86,0x02,
+0xFF,0x34, 0x75,0xB5, 0x00,0x02, 0xB5,0x2E, 0x70,0x02, 0x06,0xB4, 0x00,0x01, 0xF6,0x85,
+0x2E,0xCC, 0xF7,0x06, 0x2E,0x4C, 0x20,0x36, 0x00,0x1F, 0xE2,0x00, 0x15,0xD9, 0xB6,0x2E,
+0x70,0x02, 0xF0,0x05, 0x2E,0xCC, 0xF6,0x86, 0x2D,0x44, 0x77,0x29, 0x00,0x02, 0xA6,0xBA,
+0x68,0x02, 0x00,0x00, 0x00,0x01, 0x86,0xB6, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x87,0x3A, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x07,0x88, 0x00,0x08, 0xC1,0x38,
+0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x87,0x16, 0x00,0x00, 0x86,0x96, 0x00,0x04, 0xF6,0x06, 0x2D,0x44, 0x76,0xB5,
+0x00,0x02, 0x85,0xBA, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xB5,0xB6, 0x60,0x02, 0xC6,0xB4,
+0x70,0x00, 0x85,0x96, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0x95,0xB6, 0x00,0x04, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x16, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x87,0x32, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x0F, 0x86,0xB2,
+0x00,0x00, 0xC5,0x38, 0x00,0x00, 0xEE,0x00, 0x16,0xB4, 0xC5,0xB4, 0x00,0x00, 0x20,0x36,
+0x00,0x0F, 0xEE,0x00, 0x16,0xB4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEC,0x00,
+0x16,0xB5, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xEC,0x00, 0x16,0xD0, 0x00,0x00,
+0x00,0x01, 0x87,0x32, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x32,
+0x00,0x0C, 0x87,0x32, 0x00,0x0C, 0xE0,0x00, 0x16,0xD8, 0xF4,0x02, 0x00,0x00, 0xC0,0x2A,
+0x5A,0x00, 0x44,0x0C, 0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x2E,0xE0, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x32,0xD4, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x02, 0x18,0x2C, 0x97,0x13, 0xFF,0xFC, 0xF7,0x82, 0x00,0x09, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x2E,0xE0, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02, 0x34,0x58, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02,
+0x00,0x0C, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x2F,0x6C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02, 0x3F,0x94, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x0B, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x2F,0xF8, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02,
+0x3B,0x84, 0x97,0x13, 0xFF,0xFC, 0xF7,0x82, 0x00,0x0B, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x32,0x28, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x02, 0x26,0xE4, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02, 0x00,0x13, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x30,0x84, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02, 0x26,0xA0, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02,
+0x00,0x11, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x31,0x10, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02, 0x18,0x2C, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x09, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x31,0x9C, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF0,0x05,
+0x7A,0x78, 0xF0,0x05, 0x32,0xE8, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x50, 0xF7,0x04, 0x71,0xC8, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x18,0x55, 0xF6,0x86, 0x71,0xC4, 0xE0,0x00, 0x18,0x6C, 0xF6,0x02,
+0x00,0x00, 0xF7,0x04, 0x71,0xD4, 0x00,0x00, 0x00,0x01, 0x77,0x39, 0x00,0x02, 0xC7,0x38,
+0x68,0x00, 0x86,0x3A, 0x00,0x18, 0x00,0x00, 0x00,0x01, 0xF6,0x05, 0x32,0xC4, 0x86,0xB2,
+0x00,0x08, 0x07,0x01, 0x80,0x00, 0xC5,0xB4, 0x74,0x00, 0xF5,0x85, 0x32,0xD0, 0x87,0x32,
+0x00,0x18, 0xF6,0x86, 0x6F,0x44, 0x77,0x39, 0x00,0x02, 0xA7,0x3A, 0x68,0x02, 0x20,0x2E,
+0x00,0x00, 0xF7,0x05, 0x32,0xC0, 0x07,0x38, 0x09,0xD8, 0x86,0xB2, 0x00,0x04, 0xF7,0x05,
+0x32,0xCC, 0xE6,0x00, 0x19,0x41, 0xF6,0x85, 0x32,0xC8, 0xF7,0x04, 0x71,0x98, 0xF6,0x84,
+0x7A,0x78, 0x27,0x38, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x19,0x10, 0xF7,0x05,
+0x71,0x98, 0xF7,0x04, 0x76,0xFC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x18,0xE8, 0xF3,0x02, 0x00,0x11, 0xF3,0x06, 0x32,0xD4, 0xF3,0x05, 0x76,0xFC, 0xE0,0x00,
+0x18,0xF8, 0xF7,0x02, 0x00,0x01, 0xF3,0x05, 0x76,0xF8, 0xF3,0x06, 0x32,0xD4, 0xF3,0x05,
+0x77,0x00, 0xF7,0x02, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x19,0x14, 0xF3,0x02,
+0x00,0x01, 0xF3,0x06, 0x31,0x10, 0xE0,0x00, 0x26,0x8C, 0xF3,0x05, 0x32,0xD4, 0xF3,0x02,
+0x00,0x01, 0xF3,0x05, 0x7A,0x78, 0xF3,0x06, 0x30,0x84, 0xF3,0x05, 0x32,0xD4, 0xF3,0x04,
+0x32,0xC4, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x06,0x10, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x26,0x8C, 0x00,0x00, 0x00,0x01, 0xF3,0x02,
+0x00,0x00, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x1C,0xB9, 0x93,0x16, 0xFF,0xE4, 0x87,0x32,
+0x00,0x08, 0x86,0x96, 0xFF,0xE4, 0xC3,0x04, 0x00,0x00, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00,
+0x19,0x84, 0x20,0x36, 0x00,0x00, 0x87,0x32, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x32,0x00, 0xE6,0x00, 0x19,0x84, 0x20,0x36, 0x00,0x00, 0xF6,0x82, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xE6,0x00, 0x1C,0xB8, 0xF3,0x02, 0x00,0x00, 0xF7,0x04, 0x32,0xC0, 0x93,0x16,
+0xFF,0xAC, 0xF5,0x84, 0x32,0xC4, 0x86,0x3A, 0x14,0x28, 0x03,0xB8, 0x14,0x20, 0x04,0x2C,
+0x00,0x08, 0x86,0xBA, 0x14,0x24, 0x00,0x00, 0x00,0x01, 0xC0,0x32, 0x6A,0x00, 0xEC,0x00,
+0x1A,0x70, 0x96,0x16, 0xFF,0xEC, 0x77,0x31, 0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x77,0x39,
+0x00,0x02, 0xC6,0x38, 0x38,0x00, 0x06,0x30, 0x00,0x0C, 0x86,0xB2, 0x00,0x00, 0x87,0x2E,
+0x00,0x08, 0x85,0x16, 0xFF,0xAC, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x1A,0x00, 0xC4,0x84,
+0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x2E, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x1A,0x04, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A,
+0x00,0x00, 0xE6,0x00, 0x1A,0x11, 0x00,0x00, 0x00,0x01, 0xF4,0x82, 0x00,0x00, 0x86,0xB2,
+0x00,0x00, 0x87,0x22, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0x1A,0x4C, 0xF5,0x82, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x1A,0x54, 0x20,0x2E,
+0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x22, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0x1A,0x55, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0x1A,0x65, 0x20,0x26, 0x00,0x00, 0xF4,0x82, 0x00,0x01, 0x20,0x26,
+0x00,0x00, 0xE6,0x00, 0x1A,0x70, 0xF3,0x02, 0x00,0x01, 0x93,0x16, 0xFF,0xAC, 0x83,0x16,
+0xFF,0xAC, 0x00,0x00, 0x00,0x01, 0x20,0x1A, 0x00,0x00, 0xE6,0x00, 0x1A,0xB1, 0xF6,0x02,
+0x00,0x01, 0x87,0x16, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4,
+0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x38,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6,
+0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xE0,0x00, 0x1B,0x18, 0x96,0x96, 0xFF,0xF4, 0x27,0x14,
+0x00,0x14, 0x97,0x13, 0xFF,0xFC, 0x94,0x13, 0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0x93,0x96,
+0xFF,0xBC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96,
+0xFF,0xBC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x1B,0x15, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x38,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xF0, 0x96,0x96, 0xFF,0xF4, 0x97,0x1E, 0x00,0x08, 0xE0,0x00, 0x1B,0x1C, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x1C,0xB8, 0xF3,0x02,
+0x00,0x00, 0xF6,0x04, 0x32,0xC0, 0x93,0x16, 0xFF,0xAC, 0x86,0xB2, 0x14,0x28, 0x03,0xB0,
+0x14,0x20, 0x04,0x30, 0x14,0x8C, 0x87,0x32, 0x14,0x24, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xEC,0x00, 0x1C,0x04, 0x96,0x96, 0xFF,0xEC, 0x77,0x35, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x77,0x39, 0x00,0x02, 0xC5,0xB8, 0x38,0x00, 0x05,0xAC, 0x00,0x0C, 0x86,0xAE,
+0x00,0x00, 0x87,0x32, 0x14,0x8C, 0x85,0x16, 0xFF,0xAC, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x1B,0x94, 0xC4,0x84, 0x00,0x00, 0x86,0xAE, 0x00,0x04, 0x87,0x32, 0x14,0x90, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x1B,0x98, 0x20,0x2A, 0x00,0x00, 0xF5,0x02,
+0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0x1B,0xA5, 0x00,0x00, 0x00,0x01, 0xF4,0x82,
+0x00,0x00, 0x86,0xAE, 0x00,0x00, 0x87,0x22, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0x1B,0xE0, 0xF6,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x1B,0xE8, 0x20,0x32, 0x00,0x00, 0x86,0xAE, 0x00,0x04, 0x87,0x22, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x1B,0xE9, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x1B,0xF9, 0x20,0x26, 0x00,0x00, 0xF4,0x82,
+0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00, 0x1C,0x04, 0xF3,0x02, 0x00,0x01, 0x93,0x16,
+0xFF,0xAC, 0x83,0x16, 0xFF,0xAC, 0x00,0x00, 0x00,0x01, 0x20,0x1A, 0x00,0x00, 0xE6,0x00,
+0x1C,0x45, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x76,0xB9,
+0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x38,0x00, 0x06,0xB4,
+0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xE0,0x00, 0x1C,0xAC, 0x96,0x96,
+0xFF,0xF4, 0x27,0x14, 0x00,0x14, 0x97,0x13, 0xFF,0xFC, 0x94,0x13, 0xFF,0xFC, 0x93,0x93,
+0xFF,0xFC, 0x93,0x96, 0xFF,0xBC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93,
+0xFF,0xFC, 0x83,0x96, 0xFF,0xBC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x1C,0xA9, 0xF6,0x02,
+0x00,0x01, 0x87,0x16, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4,
+0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x38,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6,
+0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0x96,0x96, 0xFF,0xF4, 0x97,0x1E, 0x00,0x08, 0xE0,0x00,
+0x1C,0xB0, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x1E,0x15, 0xF3,0x02, 0x00,0x01, 0xF6,0x84, 0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x85,0xB6,
+0x0E,0xF4, 0x86,0x36, 0x0E,0xF8, 0x20,0x2E, 0x00,0x10, 0xE2,0x00, 0x1C,0xDC, 0x20,0x32,
+0x00,0x10, 0xE2,0x00, 0x1C,0xF9, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x0F,0x00, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x0F,0x00, 0x87,0x36, 0x0F,0x00, 0xE0,0x00,
+0x1D,0x24, 0xF7,0x02, 0x00,0x00, 0x07,0x30, 0x00,0x01, 0xC0,0x3A, 0x5A,0x00, 0xE6,0x00,
+0x1D,0x1D, 0xF6,0x82, 0x00,0x00, 0x20,0x32, 0x00,0x10, 0xE6,0x00, 0x1D,0x20, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0x1D,0x24, 0xC7,0x34, 0x00,0x00, 0xF6,0x82, 0x00,0x01, 0xC7,0x34,
+0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x1E,0x14, 0xF3,0x02, 0x00,0x01, 0xF3,0x04,
+0x32,0xCC, 0x00,0x00, 0x00,0x01, 0x93,0x16, 0xFF,0xDC, 0x93,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x43,0x68, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x1D,0xFC, 0xF3,0x02, 0x00,0x00, 0x83,0x16, 0xFF,0xDC, 0x00,0x00, 0x00,0x01, 0x86,0x1A,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x1D,0x91, 0x76,0xB1,
+0x00,0x02, 0x87,0x1A, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x1A,
+0x00,0x0C, 0x87,0x1A, 0x00,0x0C, 0xE0,0x00, 0x1D,0xFC, 0xF3,0x02, 0x00,0x00, 0xF3,0x02,
+0x00,0x4C, 0x93,0x13, 0xFF,0xFC, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x04, 0xC7,0x38,
+0x6A,0x00, 0x83,0x16, 0xFF,0xDC, 0xC7,0x38, 0x60,0x00, 0xC7,0x38, 0x30,0x00, 0x07,0x38,
+0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0xF3,0x06, 0x7A,0x28, 0x93,0x13, 0xFF,0xFC, 0x96,0x16,
+0xFF,0xB4, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16,
+0xFF,0xB4, 0x00,0x00, 0x00,0x01, 0x06,0x30, 0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00,
+0x1D,0xEC, 0x00,0x00, 0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x83,0x16, 0xFF,0xDC, 0x00,0x00,
+0x00,0x01, 0x96,0x1A, 0x00,0x00, 0xF3,0x02, 0x00,0x01, 0x93,0x16, 0xFF,0xD4, 0x83,0x16,
+0xFF,0xD4, 0x00,0x00, 0x00,0x01, 0x20,0x1A, 0x00,0x00, 0xE6,0x00, 0x1E,0x18, 0xF3,0x02,
+0x00,0x01, 0x93,0x16, 0xFF,0xE4, 0x83,0x16, 0xFF,0xE4, 0x00,0x00, 0x00,0x01, 0x20,0x1A,
+0x00,0x00, 0xE6,0x00, 0x1F,0x35, 0xF6,0x82, 0x0C,0xAB, 0xF7,0x04, 0x32,0xB4, 0x83,0x16,
+0xFF,0xD4, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x32,0xB4, 0xF7,0x04, 0x32,0xB4, 0x20,0x1A,
+0x00,0x00, 0xE6,0x00, 0x1E,0x70, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x32,0xC0, 0xF3,0x06,
+0xE0,0x30, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00, 0x1E,0x70, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x32,0xE8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x32,0xE8, 0xF7,0x04,
+0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x1E,0xAD, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0A, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x1E,0xAC, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x71,0xD4, 0xF6,0x84,
+0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00, 0x1E,0xC8, 0xF7,0x05,
+0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84, 0x71,0xD4, 0xF7,0x04, 0x71,0xD0, 0xF0,0x05,
+0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xF6,0x84, 0x32,0xD0, 0x00,0x00,
+0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x25,0xD9, 0xF7,0x05, 0x71,0xC8, 0xF7,0x04,
+0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x25,0x79, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x25,0x78, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x25,0x78, 0x00,0x00, 0x00,0x01, 0xE0,0x00, 0x25,0xDC, 0xF3,0x06,
+0x31,0x9C, 0xF0,0x05, 0x32,0xE8, 0xF7,0x04, 0x32,0xC0, 0xF6,0x04, 0x6F,0x54, 0x96,0xBA,
+0x00,0x04, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x1F,0x60, 0xF3,0x02, 0x00,0x0C, 0xF3,0x02,
+0x00,0x01, 0xF3,0x05, 0x6F,0x54, 0xE0,0x00, 0x1F,0x68, 0xF7,0x02, 0x00,0x01, 0xF3,0x05,
+0x6F,0x58, 0xF7,0x02, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x1F,0x7C, 0xF3,0x06,
+0x2F,0x6C, 0xE0,0x00, 0x26,0x8C, 0xF3,0x05, 0x32,0xD4, 0xF5,0x84, 0x7A,0x70, 0x24,0x94,
+0x00,0x10, 0x20,0x2E, 0x00,0x01, 0xE6,0x00, 0x22,0x84, 0xF5,0x85, 0x7A,0xA0, 0xF7,0x02,
+0x00,0x01, 0xF6,0x04, 0x32,0xC8, 0xF7,0x05, 0x7A,0x70, 0xF7,0x04, 0x32,0xC4, 0xF6,0x84,
+0x32,0xC0, 0xF6,0x05, 0x7A,0x2C, 0x90,0x02, 0xFF,0x80, 0x90,0x02, 0xFF,0x38, 0xF5,0x84,
+0x7A,0x28, 0x07,0x38, 0x00,0x24, 0x95,0x82, 0xFF,0x3C, 0x97,0x02, 0xFF,0x40, 0x96,0x02,
+0xFF,0x44, 0x87,0x36, 0x14,0x10, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x97,0x36,
+0x14,0x10, 0x87,0x36, 0x14,0x18, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x18, 0x87,0x36, 0x14,0x18, 0xF0,0x05, 0x6F,0x50, 0xF7,0x04, 0x32,0xB8, 0x95,0x96,
+0xFF,0xEC, 0xC7,0x38, 0x60,0x00, 0xF7,0x05, 0x32,0xB8, 0xF7,0x04, 0x32,0xBC, 0xF3,0x06,
+0x2F,0xF8, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x32,0xBC, 0xF7,0x04, 0x32,0xBC, 0xF3,0x05,
+0x32,0xD4, 0xF7,0x06, 0x0C,0x3E, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x20,0x34, 0x00,0x00,
+0x00,0x01, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x20,0x25, 0x00,0x00, 0x00,0x01, 0xF7,0x06,
+0x0C,0x3E, 0xC7,0x7C, 0x74,0x00, 0x20,0x3A, 0x00,0x10, 0xE6,0x00, 0x26,0x8C, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x32,0xE4, 0xFF,0x82, 0x00,0x10, 0xF5,0x84, 0x6F,0x58, 0x07,0x38,
+0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x20,0x90, 0xF7,0x05, 0x32,0xE4, 0xF7,0x04,
+0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x20,0x84, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xE0,0x00, 0x20,0x94, 0xF3,0x05, 0x6F,0x58, 0xF0,0x05,
+0x6F,0x54, 0xF5,0x84, 0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x90,0x2E, 0x00,0x04, 0x87,0x2E,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00, 0x21,0xC0, 0x00,0x00,
+0x00,0x01, 0x87,0x02, 0xFF,0x38, 0x03,0x2C, 0x0E,0xF4, 0x93,0x16, 0xFF,0xCC, 0xF7,0x05,
+0x7A,0x68, 0x93,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xB8, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x43,0xA0, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xB8, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x21,0x7C, 0x00,0x00, 0x00,0x01, 0x86,0x2E, 0x0E,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x32,
+0x00,0x10, 0xE2,0x00, 0x21,0x19, 0xF3,0x02, 0x00,0x4C, 0x87,0x2E, 0x0F,0x00, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E, 0x0F,0x00, 0x87,0x2E, 0x0F,0x00, 0xE0,0x00,
+0x21,0x7C, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0xF3,0x06, 0x7A,0x28, 0x93,0x13,
+0xFF,0xFC, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x04, 0xC7,0x38,
+0x6A,0x00, 0x83,0x16, 0xFF,0xCC, 0xC7,0x38, 0x60,0x00, 0xC7,0x38, 0x30,0x00, 0x07,0x38,
+0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xB8, 0x96,0x16, 0xFF,0xB4, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16, 0xFF,0xB4, 0x85,0x96,
+0xFF,0xB8, 0x06,0x30, 0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00, 0x21,0x78, 0x00,0x00,
+0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x96,0x2E, 0x0E,0xF8, 0xF7,0x04, 0x32,0xC0, 0xF3,0x06,
+0xE0,0x30, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00, 0x21,0xC0, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8, 0x00,0x1E, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00,
+0x21,0xC1, 0x00,0x00, 0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0xF7,0x04,
+0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x21,0xFD, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0A, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x21,0xFC, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x71,0xD4, 0xF6,0x84,
+0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00, 0x22,0x18, 0xF7,0x05,
+0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84, 0x71,0xD4, 0xF7,0x04, 0x71,0xD0, 0xF0,0x05,
+0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xF6,0x84, 0x32,0xD0, 0x00,0x00,
+0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x25,0xD9, 0xF7,0x05, 0x71,0xC8, 0xF7,0x04,
+0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x25,0x79, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x25,0x78, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x25,0x78, 0x00,0x00, 0x00,0x01, 0xE0,0x00, 0x25,0xDC, 0xF3,0x06,
+0x31,0x9C, 0xF0,0x05, 0x7A,0x88, 0x90,0x02, 0xFF,0x38, 0xF0,0x05, 0x6F,0x50, 0x90,0x02,
+0xFF,0x80, 0xF7,0x04, 0x32,0xC4, 0xF3,0x06, 0x32,0x28, 0xF3,0x05, 0x32,0xD4, 0xF6,0x04,
+0x32,0xC8, 0xF6,0x84, 0x7A,0x2C, 0xF5,0x02, 0x00,0x00, 0x07,0x38, 0x00,0x24, 0xF7,0x05,
+0x7A,0x98, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x22,0xD5, 0xF6,0x05, 0x7A,0x90, 0xC0,0x2A,
+0x5A,0x00, 0xE6,0x00, 0x26,0x20, 0xC0,0x32, 0x6A,0x00, 0xEE,0x00, 0x26,0x21, 0x00,0x00,
+0x00,0x01, 0xF6,0x84, 0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x14,0x10, 0x00,0x00,
+0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x97,0x36, 0x14,0x10, 0x87,0x36, 0x14,0x18, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x14,0x18, 0x87,0x36, 0x14,0x18, 0xF7,0x04,
+0x32,0xB8, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x60,0x00, 0xF7,0x05, 0x32,0xB8, 0xF7,0x04,
+0x32,0xBC, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x32,0xBC, 0xF7,0x04,
+0x32,0xBC, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x23,0x45, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x32,0xE0, 0xF5,0x05, 0x7A,0x70, 0x07,0x38, 0x00,0x01, 0xE0,0x00, 0x23,0x48, 0xF7,0x05,
+0x32,0xE0, 0xF5,0x05, 0x7A,0x70, 0xF5,0x84, 0x6F,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x2E,
+0x00,0x21, 0xE2,0x00, 0x23,0x8C, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x23,0x80, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02,
+0x00,0x22, 0xE0,0x00, 0x23,0x90, 0xF3,0x05, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF5,0x84,
+0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x90,0x2E, 0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00, 0x24,0xBC, 0x00,0x00, 0x00,0x01, 0x87,0x02,
+0xFF,0x38, 0x03,0x2C, 0x0E,0xF4, 0x93,0x16, 0xFF,0xC4, 0xF7,0x05, 0x7A,0x68, 0x93,0x13,
+0xFF,0xFC, 0x95,0x96, 0xFF,0xB8, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x43,0xA0, 0x97,0x93,
+0xFF,0xFC, 0x85,0x96, 0xFF,0xB8, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x24,0x78, 0x00,0x00,
+0x00,0x01, 0x86,0x2E, 0x0E,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00,
+0x24,0x15, 0xF3,0x02, 0x00,0x4C, 0x87,0x2E, 0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0x97,0x2E, 0x0F,0x00, 0x87,0x2E, 0x0F,0x00, 0xE0,0x00, 0x24,0x78, 0x00,0x00,
+0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0xF3,0x06, 0x7A,0x28, 0x93,0x13, 0xFF,0xFC, 0x76,0xB1,
+0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x83,0x16,
+0xFF,0xC4, 0xC7,0x38, 0x60,0x00, 0xC7,0x38, 0x30,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13,
+0xFF,0xFC, 0x95,0x96, 0xFF,0xB8, 0x96,0x16, 0xFF,0xB4, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16, 0xFF,0xB4, 0x85,0x96, 0xFF,0xB8, 0x06,0x30,
+0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00, 0x24,0x74, 0x00,0x00, 0x00,0x01, 0xF6,0x02,
+0x00,0x00, 0x96,0x2E, 0x0E,0xF8, 0xF7,0x04, 0x32,0xC0, 0xF3,0x06, 0xE0,0x30, 0xC0,0x3A,
+0x32,0x00, 0xE6,0x00, 0x24,0xBC, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0xE0,0x18, 0x00,0x00,
+0x00,0x01, 0x77,0xB8, 0x00,0x1E, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00, 0x24,0xBD, 0x00,0x00,
+0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x24,0xF9, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04,
+0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0xF3,0x02, 0x00,0x0A, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x24,0xF8, 0xB3,0x3A,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38,
+0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00, 0x25,0x14, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05,
+0x71,0xD4, 0xF6,0x84, 0x71,0xD4, 0xF7,0x04, 0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36,
+0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xF6,0x84, 0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xE6,0x00, 0x25,0xD9, 0xF7,0x05, 0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x25,0x79, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x25,0x78, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x25,0xD1, 0x00,0x00, 0x00,0x01, 0xF5,0x84, 0x76,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x2E,
+0x00,0x21, 0xE2,0x00, 0x25,0xC4, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x25,0xB0, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02,
+0x00,0x22, 0xF3,0x05, 0x76,0xF8, 0xF3,0x04, 0x77,0x00, 0xE0,0x00, 0x25,0xC8, 0xF3,0x05,
+0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0xE0,0x00, 0x25,0xD8, 0xF0,0x05, 0x7A,0x78, 0xE0,0x00,
+0x25,0xDC, 0xF3,0x06, 0x31,0x9C, 0xF3,0x06, 0x2E,0xE0, 0xF3,0x05, 0x32,0xD4, 0xF7,0x04,
+0x71,0xC8, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x26,0x8C, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x09, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x26,0x8C, 0xB3,0x3A, 0x68,0x02, 0xE0,0x00, 0x26,0x8C, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04,
+0x7A,0x90, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xEE,0x00, 0x26,0x41, 0xC5,0xB4,
+0x00,0x00, 0xC7,0x38, 0x5A,0x00, 0xE0,0x00, 0x26,0x48, 0xF7,0x05, 0x7A,0x90, 0xC5,0xB8,
+0x00,0x00, 0xF0,0x05, 0x7A,0x90, 0xF6,0x84, 0x7A,0x88, 0xF7,0x06, 0x7A,0x28, 0x76,0x35,
+0x00,0x03, 0xA7,0x32, 0x70,0x02, 0x06,0xB4, 0x00,0x01, 0x97,0x16, 0xFF,0xEC, 0x84,0xA6,
+0xFF,0xFC, 0xF7,0x06, 0x7A,0x2C, 0xF3,0x04, 0x7A,0x98, 0x94,0x82, 0xFF,0x3C, 0x93,0x02,
+0xFF,0x40, 0x95,0x82, 0xFF,0x44, 0xB5,0xB2, 0x70,0x02, 0xF7,0x04, 0x7A,0x98, 0xF6,0x85,
+0x7A,0x88, 0xC7,0x38, 0x58,0x00, 0xF7,0x05, 0x7A,0x98, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x02, 0x00,0x01, 0xF7,0x05, 0x7A,0x78, 0xF7,0x06,
+0x30,0x84, 0xF7,0x05, 0x32,0xD4, 0xF7,0x04, 0x32,0xC4, 0x00,0x00, 0x00,0x01, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x06,0x10, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x50, 0xF7,0x04,
+0x32,0xD0, 0xF3,0x02, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x2A,0x71, 0x93,0x16,
+0xFF,0xE4, 0xF6,0x84, 0x32,0xC4, 0x86,0x16, 0xFF,0xE4, 0x87,0x36, 0x00,0x08, 0xC3,0x04,
+0x00,0x00, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00, 0x27,0x3C, 0x20,0x32, 0x00,0x00, 0x87,0x36,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00, 0x27,0x3C, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x2A,0x70, 0xF3,0x02,
+0x00,0x00, 0xF7,0x04, 0x32,0xC0, 0x93,0x16, 0xFF,0xAC, 0xF5,0x84, 0x32,0xC4, 0x86,0x3A,
+0x14,0x28, 0x03,0xB8, 0x14,0x20, 0x04,0x2C, 0x00,0x08, 0x86,0xBA, 0x14,0x24, 0x00,0x00,
+0x00,0x01, 0xC0,0x32, 0x6A,0x00, 0xEC,0x00, 0x28,0x28, 0x96,0x16, 0xFF,0xEC, 0x77,0x31,
+0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x77,0x39, 0x00,0x02, 0xC6,0x38, 0x38,0x00, 0x06,0x30,
+0x00,0x0C, 0x86,0xB2, 0x00,0x00, 0x87,0x2E, 0x00,0x08, 0x85,0x16, 0xFF,0xAC, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x27,0xB8, 0xC4,0x84, 0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x2E,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x27,0xBC, 0x20,0x2A,
+0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0x27,0xC9, 0x00,0x00,
+0x00,0x01, 0xF4,0x82, 0x00,0x00, 0x86,0xB2, 0x00,0x00, 0x87,0x22, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x28,0x04, 0xF5,0x82, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x28,0x0C, 0x20,0x2E, 0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x22,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x28,0x0D, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x28,0x1D, 0x20,0x26,
+0x00,0x00, 0xF4,0x82, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00, 0x28,0x28, 0xF3,0x02,
+0x00,0x01, 0x93,0x16, 0xFF,0xAC, 0x83,0x16, 0xFF,0xAC, 0x00,0x00, 0x00,0x01, 0x20,0x1A,
+0x00,0x00, 0xE6,0x00, 0x28,0x69, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xEC, 0x00,0x00,
+0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4,
+0x38,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xE0,0x00,
+0x28,0xD0, 0x96,0x96, 0xFF,0xF4, 0x27,0x14, 0x00,0x14, 0x97,0x13, 0xFF,0xFC, 0x94,0x13,
+0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0x93,0x96, 0xFF,0xBC, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0xBC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x28,0xCD, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x76,0xB9,
+0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x38,0x00, 0x06,0xB4,
+0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0x96,0x96, 0xFF,0xF4, 0x97,0x1E,
+0x00,0x08, 0xE0,0x00, 0x28,0xD4, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0x2A,0x70, 0xF3,0x02, 0x00,0x00, 0xF6,0x04, 0x32,0xC0, 0x93,0x16,
+0xFF,0xAC, 0x86,0xB2, 0x14,0x28, 0x03,0xB0, 0x14,0x20, 0x04,0x30, 0x14,0x8C, 0x87,0x32,
+0x14,0x24, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x00, 0x29,0xBC, 0x96,0x96,
+0xFF,0xEC, 0x77,0x35, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xC5,0xB8,
+0x38,0x00, 0x05,0xAC, 0x00,0x0C, 0x86,0xAE, 0x00,0x00, 0x87,0x32, 0x14,0x8C, 0x85,0x16,
+0xFF,0xAC, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x29,0x4C, 0xC4,0x84, 0x00,0x00, 0x86,0xAE,
+0x00,0x04, 0x87,0x32, 0x14,0x90, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x29,0x50, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00,
+0x29,0x5D, 0x00,0x00, 0x00,0x01, 0xF4,0x82, 0x00,0x00, 0x86,0xAE, 0x00,0x00, 0x87,0x22,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x29,0x98, 0xF6,0x02,
+0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x29,0xA0, 0x20,0x32, 0x00,0x00, 0x86,0xAE,
+0x00,0x04, 0x87,0x22, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0x29,0xA1, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x29,0xB1, 0x20,0x26, 0x00,0x00, 0xF4,0x82, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00,
+0x29,0xBC, 0xF3,0x02, 0x00,0x01, 0x93,0x16, 0xFF,0xAC, 0x83,0x16, 0xFF,0xAC, 0x00,0x00,
+0x00,0x01, 0x20,0x1A, 0x00,0x00, 0xE6,0x00, 0x29,0xFD, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x38,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xF0, 0xE0,0x00, 0x2A,0x64, 0x96,0x96, 0xFF,0xF4, 0x27,0x14, 0x00,0x14, 0x97,0x13,
+0xFF,0xFC, 0x94,0x13, 0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0x93,0x96, 0xFF,0xBC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0xBC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x2A,0x61, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xEC, 0x00,0x00,
+0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4,
+0x38,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0x96,0x96,
+0xFF,0xF4, 0x97,0x1E, 0x00,0x08, 0xE0,0x00, 0x2A,0x68, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x2B,0xCD, 0xF3,0x02, 0x00,0x01, 0xF6,0x84,
+0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x85,0xB6, 0x0E,0xF4, 0x86,0x36, 0x0E,0xF8, 0x20,0x2E,
+0x00,0x10, 0xE2,0x00, 0x2A,0x94, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x2A,0xB1, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x0F,0x00, 0x87,0x36, 0x0F,0x00, 0xE0,0x00, 0x2A,0xDC, 0xF7,0x02, 0x00,0x00, 0x07,0x30,
+0x00,0x01, 0xC0,0x3A, 0x5A,0x00, 0xE6,0x00, 0x2A,0xD5, 0xF6,0x82, 0x00,0x00, 0x20,0x32,
+0x00,0x10, 0xE6,0x00, 0x2A,0xD8, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x2A,0xDC, 0xC7,0x34,
+0x00,0x00, 0xF6,0x82, 0x00,0x01, 0xC7,0x34, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x2B,0xCC, 0xF3,0x02, 0x00,0x01, 0xF3,0x04, 0x32,0xCC, 0x00,0x00, 0x00,0x01, 0x93,0x16,
+0xFF,0xDC, 0x93,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x43,0x68, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x2B,0xB4, 0xF3,0x02, 0x00,0x00, 0x83,0x16,
+0xFF,0xDC, 0x00,0x00, 0x00,0x01, 0x86,0x1A, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x32,
+0x00,0x10, 0xE2,0x00, 0x2B,0x49, 0x76,0xB1, 0x00,0x02, 0x87,0x1A, 0x00,0x0C, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x1A, 0x00,0x0C, 0x87,0x1A, 0x00,0x0C, 0xE0,0x00,
+0x2B,0xB4, 0xF3,0x02, 0x00,0x00, 0xF3,0x02, 0x00,0x4C, 0x93,0x13, 0xFF,0xFC, 0xC6,0xB4,
+0x60,0x00, 0x77,0x35, 0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x83,0x16, 0xFF,0xDC, 0xC7,0x38,
+0x60,0x00, 0xC7,0x38, 0x30,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0xF3,0x06,
+0x7A,0x28, 0x93,0x13, 0xFF,0xFC, 0x96,0x16, 0xFF,0xB4, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16, 0xFF,0xB4, 0x00,0x00, 0x00,0x01, 0x06,0x30,
+0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00, 0x2B,0xA4, 0x00,0x00, 0x00,0x01, 0xF6,0x02,
+0x00,0x00, 0x83,0x16, 0xFF,0xDC, 0x00,0x00, 0x00,0x01, 0x96,0x1A, 0x00,0x00, 0xF3,0x02,
+0x00,0x01, 0x93,0x16, 0xFF,0xD4, 0x83,0x16, 0xFF,0xD4, 0x00,0x00, 0x00,0x01, 0x20,0x1A,
+0x00,0x00, 0xE6,0x00, 0x2B,0xD0, 0xF3,0x02, 0x00,0x01, 0x93,0x16, 0xFF,0xE4, 0x83,0x16,
+0xFF,0xE4, 0x00,0x00, 0x00,0x01, 0x20,0x1A, 0x00,0x00, 0xE6,0x00, 0x2C,0xED, 0xF6,0x82,
+0x0C,0xAB, 0xF7,0x04, 0x32,0xB4, 0x83,0x16, 0xFF,0xD4, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x32,0xB4, 0xF7,0x04, 0x32,0xB4, 0x20,0x1A, 0x00,0x00, 0xE6,0x00, 0x2C,0x28, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x32,0xC0, 0xF3,0x06, 0xE0,0x30, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00,
+0x2C,0x28, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x32,0xE8, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x32,0xE8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x2C,0x65, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02,
+0x00,0x0A, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x2C,0x64, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF7,0x04, 0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0xE6,0x00, 0x2C,0x80, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84,
+0x71,0xD4, 0xF7,0x04, 0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C,
+0x00,0x01, 0xF6,0x84, 0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00,
+0x33,0x91, 0xF7,0x05, 0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x33,0x31, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x33,0x30, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x33,0x30, 0x00,0x00,
+0x00,0x01, 0xE0,0x00, 0x33,0x94, 0xF3,0x06, 0x31,0x9C, 0xF0,0x05, 0x32,0xE8, 0xF7,0x04,
+0x32,0xC0, 0xF6,0x04, 0x6F,0x54, 0x96,0xBA, 0x00,0x04, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x2D,0x18, 0xF3,0x02, 0x00,0x0C, 0xF3,0x02, 0x00,0x01, 0xF3,0x05, 0x6F,0x54, 0xE0,0x00,
+0x2D,0x20, 0xF7,0x02, 0x00,0x01, 0xF3,0x05, 0x6F,0x58, 0xF7,0x02, 0x00,0x00, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x2D,0x34, 0xF3,0x06, 0x2F,0x6C, 0xE0,0x00, 0x34,0x44, 0xF3,0x05,
+0x32,0xD4, 0xF5,0x84, 0x7A,0x70, 0x24,0x94, 0x00,0x10, 0x20,0x2E, 0x00,0x01, 0xE6,0x00,
+0x30,0x3C, 0xF5,0x85, 0x7A,0xA0, 0xF7,0x02, 0x00,0x01, 0xF6,0x04, 0x32,0xC8, 0xF7,0x05,
+0x7A,0x70, 0xF7,0x04, 0x32,0xC4, 0xF6,0x84, 0x32,0xC0, 0xF6,0x05, 0x7A,0x2C, 0x90,0x02,
+0xFF,0x80, 0x90,0x02, 0xFF,0x38, 0xF5,0x84, 0x7A,0x28, 0x07,0x38, 0x00,0x24, 0x95,0x82,
+0xFF,0x3C, 0x97,0x02, 0xFF,0x40, 0x96,0x02, 0xFF,0x44, 0x87,0x36, 0x14,0x10, 0x00,0x00,
+0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x97,0x36, 0x14,0x10, 0x87,0x36, 0x14,0x18, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x14,0x18, 0x87,0x36, 0x14,0x18, 0xF0,0x05,
+0x6F,0x50, 0xF7,0x04, 0x32,0xB8, 0x95,0x96, 0xFF,0xEC, 0xC7,0x38, 0x60,0x00, 0xF7,0x05,
+0x32,0xB8, 0xF7,0x04, 0x32,0xBC, 0xF3,0x06, 0x2F,0xF8, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x32,0xBC, 0xF7,0x04, 0x32,0xBC, 0xF3,0x05, 0x32,0xD4, 0xF7,0x06, 0x0C,0x3E, 0xC0,0x7E,
+0x74,0x00, 0xE6,0x00, 0x2D,0xEC, 0x00,0x00, 0x00,0x01, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00,
+0x2D,0xDD, 0x00,0x00, 0x00,0x01, 0xF7,0x06, 0x0C,0x3E, 0xC7,0x7C, 0x74,0x00, 0x20,0x3A,
+0x00,0x10, 0xE6,0x00, 0x34,0x44, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x32,0xE4, 0xFF,0x82,
+0x00,0x10, 0xF5,0x84, 0x6F,0x58, 0x07,0x38, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00,
+0x2E,0x48, 0xF7,0x05, 0x32,0xE4, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x2E,0x3C, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xE0,0x00,
+0x2E,0x4C, 0xF3,0x05, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF5,0x84, 0x32,0xC0, 0x00,0x00,
+0x00,0x01, 0x90,0x2E, 0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x02, 0xE6,0x00, 0x2F,0x78, 0x00,0x00, 0x00,0x01, 0x87,0x02, 0xFF,0x38, 0x03,0x2C,
+0x0E,0xF4, 0x93,0x16, 0xFF,0xCC, 0xF7,0x05, 0x7A,0x68, 0x93,0x13, 0xFF,0xFC, 0x95,0x96,
+0xFF,0xB8, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x43,0xA0, 0x97,0x93, 0xFF,0xFC, 0x85,0x96,
+0xFF,0xB8, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x2F,0x34, 0x00,0x00, 0x00,0x01, 0x86,0x2E,
+0x0E,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x2E,0xD1, 0xF3,0x02,
+0x00,0x4C, 0x87,0x2E, 0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E,
+0x0F,0x00, 0x87,0x2E, 0x0F,0x00, 0xE0,0x00, 0x2F,0x34, 0x00,0x00, 0x00,0x01, 0x93,0x13,
+0xFF,0xFC, 0xF3,0x06, 0x7A,0x28, 0x93,0x13, 0xFF,0xFC, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4,
+0x60,0x00, 0x77,0x35, 0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x83,0x16, 0xFF,0xCC, 0xC7,0x38,
+0x60,0x00, 0xC7,0x38, 0x30,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0x95,0x96,
+0xFF,0xB8, 0x96,0x16, 0xFF,0xB4, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93,
+0xFF,0xFC, 0x86,0x16, 0xFF,0xB4, 0x85,0x96, 0xFF,0xB8, 0x06,0x30, 0x00,0x01, 0x20,0x32,
+0x00,0x11, 0xE6,0x00, 0x2F,0x30, 0x00,0x00, 0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x96,0x2E,
+0x0E,0xF8, 0xF7,0x04, 0x32,0xC0, 0xF3,0x06, 0xE0,0x30, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00,
+0x2F,0x78, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8,
+0x00,0x1E, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00, 0x2F,0x79, 0x00,0x00, 0x00,0x01, 0x0F,0x81,
+0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x2F,0xB5, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02,
+0x00,0x0A, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x2F,0xB4, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF7,0x04, 0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0xE6,0x00, 0x2F,0xD0, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84,
+0x71,0xD4, 0xF7,0x04, 0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C,
+0x00,0x01, 0xF6,0x84, 0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00,
+0x33,0x91, 0xF7,0x05, 0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x33,0x31, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x33,0x30, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x33,0x30, 0x00,0x00,
+0x00,0x01, 0xE0,0x00, 0x33,0x94, 0xF3,0x06, 0x31,0x9C, 0xF0,0x05, 0x7A,0x88, 0x90,0x02,
+0xFF,0x38, 0xF0,0x05, 0x6F,0x50, 0x90,0x02, 0xFF,0x80, 0xF7,0x04, 0x32,0xC4, 0xF3,0x06,
+0x32,0x28, 0xF3,0x05, 0x32,0xD4, 0xF6,0x04, 0x32,0xC8, 0xF6,0x84, 0x7A,0x2C, 0xF5,0x02,
+0x00,0x00, 0x07,0x38, 0x00,0x24, 0xF7,0x05, 0x7A,0x98, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x30,0x8D, 0xF6,0x05, 0x7A,0x90, 0xC0,0x2A, 0x5A,0x00, 0xE6,0x00, 0x33,0xD8, 0xC0,0x32,
+0x6A,0x00, 0xEE,0x00, 0x33,0xD9, 0x00,0x00, 0x00,0x01, 0xF6,0x84, 0x32,0xC0, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x14,0x10, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x97,0x36,
+0x14,0x10, 0x87,0x36, 0x14,0x18, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x18, 0x87,0x36, 0x14,0x18, 0xF7,0x04, 0x32,0xB8, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x60,0x00, 0xF7,0x05, 0x32,0xB8, 0xF7,0x04, 0x32,0xBC, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x32,0xBC, 0xF7,0x04, 0x32,0xBC, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x30,0xFD, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x32,0xE0, 0xF5,0x05, 0x7A,0x70, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0x31,0x00, 0xF7,0x05, 0x32,0xE0, 0xF5,0x05, 0x7A,0x70, 0xF5,0x84,
+0x6F,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x31,0x44, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x31,0x38, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xE0,0x00, 0x31,0x48, 0xF3,0x05,
+0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF5,0x84, 0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x90,0x2E,
+0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00,
+0x32,0x74, 0x00,0x00, 0x00,0x01, 0x87,0x02, 0xFF,0x38, 0x03,0x2C, 0x0E,0xF4, 0x93,0x16,
+0xFF,0xC4, 0xF7,0x05, 0x7A,0x68, 0x93,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xB8, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x43,0xA0, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xB8, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x32,0x30, 0x00,0x00, 0x00,0x01, 0x86,0x2E, 0x0E,0xF8, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x31,0xCD, 0xF3,0x02, 0x00,0x4C, 0x87,0x2E,
+0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E, 0x0F,0x00, 0x87,0x2E,
+0x0F,0x00, 0xE0,0x00, 0x32,0x30, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0xF3,0x06,
+0x7A,0x28, 0x93,0x13, 0xFF,0xFC, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35,
+0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x83,0x16, 0xFF,0xC4, 0xC7,0x38, 0x60,0x00, 0xC7,0x38,
+0x30,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xB8, 0x96,0x16,
+0xFF,0xB4, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16,
+0xFF,0xB4, 0x85,0x96, 0xFF,0xB8, 0x06,0x30, 0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00,
+0x32,0x2C, 0x00,0x00, 0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x96,0x2E, 0x0E,0xF8, 0xF7,0x04,
+0x32,0xC0, 0xF3,0x06, 0xE0,0x30, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00, 0x32,0x74, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8, 0x00,0x1E, 0x70,0x3E,
+0xFF,0xE1, 0xE6,0x00, 0x32,0x75, 0x00,0x00, 0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04,
+0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04,
+0x79,0xC8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x32,0xB1, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0A, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x32,0xB0, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04,
+0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x32,0xCC, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84, 0x71,0xD4, 0xF7,0x04,
+0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xF6,0x84,
+0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x33,0x91, 0xF7,0x05,
+0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x33,0x31, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x33,0x30, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x33,0x89, 0x00,0x00, 0x00,0x01, 0xF5,0x84,
+0x76,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x33,0x7C, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x33,0x68, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xF3,0x05, 0x76,0xF8, 0xF3,0x04,
+0x77,0x00, 0xE0,0x00, 0x33,0x80, 0xF3,0x05, 0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0xE0,0x00,
+0x33,0x90, 0xF0,0x05, 0x7A,0x78, 0xE0,0x00, 0x33,0x94, 0xF3,0x06, 0x31,0x9C, 0xF3,0x06,
+0x2E,0xE0, 0xF3,0x05, 0x32,0xD4, 0xF7,0x04, 0x71,0xC8, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x34,0x44, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02,
+0x00,0x09, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x34,0x44, 0xB3,0x3A, 0x68,0x02, 0xE0,0x00,
+0x34,0x44, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x7A,0x90, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0xEE,0x00, 0x33,0xF9, 0xC5,0xB4, 0x00,0x00, 0xC7,0x38, 0x5A,0x00, 0xE0,0x00,
+0x34,0x00, 0xF7,0x05, 0x7A,0x90, 0xC5,0xB8, 0x00,0x00, 0xF0,0x05, 0x7A,0x90, 0xF6,0x84,
+0x7A,0x88, 0xF7,0x06, 0x7A,0x28, 0x76,0x35, 0x00,0x03, 0xA7,0x32, 0x70,0x02, 0x06,0xB4,
+0x00,0x01, 0x97,0x16, 0xFF,0xEC, 0x84,0xA6, 0xFF,0xFC, 0xF7,0x06, 0x7A,0x2C, 0xF3,0x04,
+0x7A,0x98, 0x94,0x82, 0xFF,0x3C, 0x93,0x02, 0xFF,0x40, 0x95,0x82, 0xFF,0x44, 0xB5,0xB2,
+0x70,0x02, 0xF7,0x04, 0x7A,0x98, 0xF6,0x85, 0x7A,0x88, 0xC7,0x38, 0x58,0x00, 0xF7,0x05,
+0x7A,0x98, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x20, 0xF5,0x84, 0x7A,0x70, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x01, 0xE6,0x00,
+0x37,0x6C, 0xF5,0x85, 0x7A,0xA0, 0xF7,0x02, 0x00,0x01, 0xF6,0x04, 0x32,0xC8, 0xF7,0x05,
+0x7A,0x70, 0xF7,0x04, 0x32,0xC4, 0xF6,0x84, 0x32,0xC0, 0xF6,0x05, 0x7A,0x2C, 0x90,0x02,
+0xFF,0x80, 0x90,0x02, 0xFF,0x38, 0xF5,0x84, 0x7A,0x28, 0x07,0x38, 0x00,0x24, 0x95,0x82,
+0xFF,0x3C, 0x97,0x02, 0xFF,0x40, 0x96,0x02, 0xFF,0x44, 0x87,0x36, 0x14,0x10, 0x00,0x00,
+0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x97,0x36, 0x14,0x10, 0x87,0x36, 0x14,0x18, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x14,0x18, 0x87,0x36, 0x14,0x18, 0xF0,0x05,
+0x6F,0x50, 0xF7,0x04, 0x32,0xB8, 0x95,0x96, 0xFF,0xF4, 0xC7,0x38, 0x60,0x00, 0xF7,0x05,
+0x32,0xB8, 0xF7,0x04, 0x32,0xBC, 0xF4,0x86, 0x2F,0xF8, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x32,0xBC, 0xF7,0x04, 0x32,0xBC, 0xF4,0x85, 0x32,0xD4, 0xF7,0x06, 0x0C,0x3E, 0xC0,0x7E,
+0x74,0x00, 0xE6,0x00, 0x35,0x1C, 0x00,0x00, 0x00,0x01, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00,
+0x35,0x0D, 0x00,0x00, 0x00,0x01, 0xF7,0x06, 0x0C,0x3E, 0xC7,0x7C, 0x74,0x00, 0x20,0x3A,
+0x00,0x10, 0xE6,0x00, 0x3B,0x70, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x32,0xE4, 0xFF,0x82,
+0x00,0x10, 0xF5,0x84, 0x6F,0x58, 0x07,0x38, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00,
+0x35,0x78, 0xF7,0x05, 0x32,0xE4, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x35,0x6C, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xE0,0x00,
+0x35,0x7C, 0xF4,0x85, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF5,0x84, 0x32,0xC0, 0x00,0x00,
+0x00,0x01, 0x90,0x2E, 0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x02, 0xE6,0x00, 0x36,0xA8, 0x00,0x00, 0x00,0x01, 0x87,0x02, 0xFF,0x38, 0x04,0xAC,
+0x0E,0xF4, 0x94,0x96, 0xFF,0xEC, 0xF7,0x05, 0x7A,0x68, 0x94,0x93, 0xFF,0xFC, 0x95,0x96,
+0xFF,0xDC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x43,0xA0, 0x97,0x93, 0xFF,0xFC, 0x85,0x96,
+0xFF,0xDC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x36,0x64, 0x00,0x00, 0x00,0x01, 0x86,0x2E,
+0x0E,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x36,0x01, 0xF4,0x82,
+0x00,0x4C, 0x87,0x2E, 0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E,
+0x0F,0x00, 0x87,0x2E, 0x0F,0x00, 0xE0,0x00, 0x36,0x64, 0x00,0x00, 0x00,0x01, 0x94,0x93,
+0xFF,0xFC, 0xF4,0x86, 0x7A,0x28, 0x94,0x93, 0xFF,0xFC, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4,
+0x60,0x00, 0x77,0x35, 0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x84,0x96, 0xFF,0xEC, 0xC7,0x38,
+0x60,0x00, 0xC7,0x38, 0x48,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0x95,0x96,
+0xFF,0xDC, 0x96,0x16, 0xFF,0xD8, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93,
+0xFF,0xFC, 0x86,0x16, 0xFF,0xD8, 0x85,0x96, 0xFF,0xDC, 0x06,0x30, 0x00,0x01, 0x20,0x32,
+0x00,0x11, 0xE6,0x00, 0x36,0x60, 0x00,0x00, 0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x96,0x2E,
+0x0E,0xF8, 0xF7,0x04, 0x32,0xC0, 0xF4,0x86, 0xE0,0x30, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00,
+0x36,0xA8, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8,
+0x00,0x1E, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00, 0x36,0xA9, 0x00,0x00, 0x00,0x01, 0x0F,0x81,
+0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x36,0xE5, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82,
+0x00,0x0A, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x36,0xE4, 0xB4,0xBA, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF7,0x04, 0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0xE6,0x00, 0x37,0x00, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84,
+0x71,0xD4, 0xF7,0x04, 0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C,
+0x00,0x01, 0xF6,0x84, 0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00,
+0x3A,0xC1, 0xF7,0x05, 0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x3A,0x61, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x3A,0x60, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x3A,0x60, 0x00,0x00,
+0x00,0x01, 0xE0,0x00, 0x3A,0xC4, 0xF4,0x86, 0x31,0x9C, 0xF0,0x05, 0x7A,0x88, 0x90,0x02,
+0xFF,0x38, 0xF0,0x05, 0x6F,0x50, 0x90,0x02, 0xFF,0x80, 0xF7,0x04, 0x32,0xC4, 0xF4,0x86,
+0x32,0x28, 0xF4,0x85, 0x32,0xD4, 0xF6,0x04, 0x32,0xC8, 0xF6,0x84, 0x7A,0x2C, 0xF5,0x02,
+0x00,0x00, 0x07,0x38, 0x00,0x24, 0xF7,0x05, 0x7A,0x98, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x37,0xBD, 0xF6,0x05, 0x7A,0x90, 0xC0,0x2A, 0x5A,0x00, 0xE6,0x00, 0x3B,0x08, 0xC0,0x32,
+0x6A,0x00, 0xEE,0x00, 0x3B,0x09, 0x00,0x00, 0x00,0x01, 0xF6,0x84, 0x32,0xC0, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x14,0x10, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x97,0x36,
+0x14,0x10, 0x87,0x36, 0x14,0x18, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x18, 0x87,0x36, 0x14,0x18, 0xF7,0x04, 0x32,0xB8, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x60,0x00, 0xF7,0x05, 0x32,0xB8, 0xF7,0x04, 0x32,0xBC, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x32,0xBC, 0xF7,0x04, 0x32,0xBC, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x38,0x2D, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x32,0xE0, 0xF5,0x05, 0x7A,0x70, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0x38,0x30, 0xF7,0x05, 0x32,0xE0, 0xF5,0x05, 0x7A,0x70, 0xF5,0x84,
+0x6F,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x38,0x74, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x38,0x68, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xE0,0x00, 0x38,0x78, 0xF4,0x85,
+0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF5,0x84, 0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x90,0x2E,
+0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00,
+0x39,0xA4, 0x00,0x00, 0x00,0x01, 0x87,0x02, 0xFF,0x38, 0x04,0xAC, 0x0E,0xF4, 0x94,0x96,
+0xFF,0xE4, 0xF7,0x05, 0x7A,0x68, 0x94,0x93, 0xFF,0xFC, 0x95,0x96, 0xFF,0xDC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x43,0xA0, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xDC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x39,0x60, 0x00,0x00, 0x00,0x01, 0x86,0x2E, 0x0E,0xF8, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x38,0xFD, 0xF4,0x82, 0x00,0x4C, 0x87,0x2E,
+0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E, 0x0F,0x00, 0x87,0x2E,
+0x0F,0x00, 0xE0,0x00, 0x39,0x60, 0x00,0x00, 0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0xF4,0x86,
+0x7A,0x28, 0x94,0x93, 0xFF,0xFC, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35,
+0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x84,0x96, 0xFF,0xE4, 0xC7,0x38, 0x60,0x00, 0xC7,0x38,
+0x48,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xDC, 0x96,0x16,
+0xFF,0xD8, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16,
+0xFF,0xD8, 0x85,0x96, 0xFF,0xDC, 0x06,0x30, 0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00,
+0x39,0x5C, 0x00,0x00, 0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x96,0x2E, 0x0E,0xF8, 0xF7,0x04,
+0x32,0xC0, 0xF4,0x86, 0xE0,0x30, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00, 0x39,0xA4, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8, 0x00,0x1E, 0x70,0x3E,
+0xFF,0xE1, 0xE6,0x00, 0x39,0xA5, 0x00,0x00, 0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04,
+0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04,
+0x79,0xC8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x39,0xE1, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82, 0x00,0x0A, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x39,0xE0, 0xB4,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04,
+0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x39,0xFC, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84, 0x71,0xD4, 0xF7,0x04,
+0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xF6,0x84,
+0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x3A,0xC1, 0xF7,0x05,
+0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x3A,0x61, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x3A,0x60, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x3A,0xB9, 0x00,0x00, 0x00,0x01, 0xF5,0x84,
+0x76,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x3A,0xAC, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x3A,0x98, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xF4,0x85, 0x76,0xF8, 0xF4,0x84,
+0x77,0x00, 0xE0,0x00, 0x3A,0xB0, 0xF4,0x85, 0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0xE0,0x00,
+0x3A,0xC0, 0xF0,0x05, 0x7A,0x78, 0xE0,0x00, 0x3A,0xC4, 0xF4,0x86, 0x31,0x9C, 0xF4,0x86,
+0x2E,0xE0, 0xF4,0x85, 0x32,0xD4, 0xF7,0x04, 0x71,0xC8, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x3B,0x70, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82,
+0x00,0x09, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x3B,0x70, 0xB4,0xBA, 0x68,0x02, 0xE0,0x00,
+0x3B,0x70, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x7A,0x90, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0xEE,0x00, 0x3B,0x29, 0xC5,0xB4, 0x00,0x00, 0xC7,0x38, 0x5A,0x00, 0xE0,0x00,
+0x3B,0x30, 0xF7,0x05, 0x7A,0x90, 0xC5,0xB8, 0x00,0x00, 0xF0,0x05, 0x7A,0x90, 0xF7,0x04,
+0x7A,0x88, 0xF6,0x86, 0x7A,0x28, 0x76,0x39, 0x00,0x03, 0xA6,0xB2, 0x68,0x02, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x7A,0x88, 0xF7,0x04, 0x7A,0x98, 0x96,0x96, 0xFF,0xF4, 0x96,0x82,
+0xFF,0x3C, 0xF4,0x84, 0x7A,0x98, 0xF6,0x86, 0x7A,0x2C, 0xC7,0x38, 0x58,0x00, 0x94,0x82,
+0xFF,0x40, 0x95,0x82, 0xFF,0x44, 0xB5,0xB2, 0x68,0x02, 0xF7,0x05, 0x7A,0x98, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x18, 0xF5,0x04,
+0x7A,0x88, 0xF7,0x06, 0x7A,0x2C, 0xF5,0x84, 0x7A,0x90, 0x76,0xA9, 0x00,0x03, 0xA6,0xB6,
+0x70,0x02, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x3B,0xCD, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x7A,0xA0, 0x00,0x00, 0x00,0x01, 0xC0,0x2A, 0x72,0x00, 0xE6,0x00, 0x3F,0x18, 0xC0,0x2E,
+0x6A,0x00, 0xEE,0x00, 0x3F,0x19, 0x00,0x00, 0x00,0x01, 0xF6,0x84, 0x32,0xC0, 0xF6,0x04,
+0x32,0xC8, 0x87,0x36, 0x14,0x10, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x97,0x36,
+0x14,0x10, 0x87,0x36, 0x14,0x18, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x18, 0x87,0x36, 0x14,0x18, 0xF7,0x04, 0x32,0xB8, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x60,0x00, 0xF7,0x05, 0x32,0xB8, 0xF7,0x04, 0x32,0xBC, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x32,0xBC, 0xF7,0x04, 0x32,0xBC, 0x20,0x2E, 0x00,0x00, 0xE6,0x00,
+0x3C,0x3D, 0xF6,0x82, 0x00,0x00, 0xF7,0x04, 0x32,0xE0, 0xF6,0x85, 0x7A,0x70, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0x3C,0x40, 0xF7,0x05, 0x32,0xE0, 0xF5,0x05, 0x7A,0x70, 0xF5,0x84,
+0x6F,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x3C,0x84, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x3C,0x78, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xE0,0x00, 0x3C,0x88, 0xF4,0x85,
+0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF5,0x84, 0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x90,0x2E,
+0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00,
+0x3D,0xB4, 0x00,0x00, 0x00,0x01, 0x87,0x02, 0xFF,0x38, 0x04,0xAC, 0x0E,0xF4, 0x94,0x96,
+0xFF,0xEC, 0xF7,0x05, 0x7A,0x68, 0x94,0x93, 0xFF,0xFC, 0x95,0x96, 0xFF,0xE4, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x43,0xA0, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xE4, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x3D,0x70, 0x00,0x00, 0x00,0x01, 0x86,0x2E, 0x0E,0xF8, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x3D,0x0D, 0xF4,0x82, 0x00,0x4C, 0x87,0x2E,
+0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E, 0x0F,0x00, 0x87,0x2E,
+0x0F,0x00, 0xE0,0x00, 0x3D,0x70, 0x00,0x00, 0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0xF4,0x86,
+0x7A,0x28, 0x94,0x93, 0xFF,0xFC, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35,
+0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x84,0x96, 0xFF,0xEC, 0xC7,0x38, 0x60,0x00, 0xC7,0x38,
+0x48,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xE4, 0x96,0x16,
+0xFF,0xE0, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16,
+0xFF,0xE0, 0x85,0x96, 0xFF,0xE4, 0x06,0x30, 0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00,
+0x3D,0x6C, 0x00,0x00, 0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x96,0x2E, 0x0E,0xF8, 0xF7,0x04,
+0x32,0xC0, 0xF4,0x86, 0xE0,0x30, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00, 0x3D,0xB4, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8, 0x00,0x1E, 0x70,0x3E,
+0xFF,0xE1, 0xE6,0x00, 0x3D,0xB5, 0x00,0x00, 0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04,
+0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04,
+0x79,0xC8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x3D,0xF1, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82, 0x00,0x0A, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x3D,0xF0, 0xB4,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04,
+0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x3E,0x0C, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84, 0x71,0xD4, 0xF7,0x04,
+0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xF6,0x84,
+0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x3E,0xD1, 0xF7,0x05,
+0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x3E,0x71, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x3E,0x70, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x3E,0xC9, 0x00,0x00, 0x00,0x01, 0xF5,0x84,
+0x76,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x3E,0xBC, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x3E,0xA8, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xF4,0x85, 0x76,0xF8, 0xF4,0x84,
+0x77,0x00, 0xE0,0x00, 0x3E,0xC0, 0xF4,0x85, 0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0xE0,0x00,
+0x3E,0xD0, 0xF0,0x05, 0x7A,0x78, 0xE0,0x00, 0x3E,0xD4, 0xF4,0x86, 0x31,0x9C, 0xF4,0x86,
+0x2E,0xE0, 0xF4,0x85, 0x32,0xD4, 0xF7,0x04, 0x71,0xC8, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x3F,0x80, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82,
+0x00,0x09, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x3F,0x80, 0xB4,0xBA, 0x68,0x02, 0xE0,0x00,
+0x3F,0x80, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x7A,0x90, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0xEE,0x00, 0x3F,0x39, 0xC5,0xB4, 0x00,0x00, 0xC7,0x38, 0x5A,0x00, 0xE0,0x00,
+0x3F,0x40, 0xF7,0x05, 0x7A,0x90, 0xC5,0xB8, 0x00,0x00, 0xF0,0x05, 0x7A,0x90, 0xF7,0x04,
+0x7A,0x88, 0xF6,0x86, 0x7A,0x28, 0x76,0x39, 0x00,0x03, 0xA6,0xB2, 0x68,0x02, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x7A,0x88, 0xF7,0x04, 0x7A,0x98, 0x96,0x96, 0xFF,0xF4, 0x96,0x82,
+0xFF,0x3C, 0xF4,0x84, 0x7A,0x98, 0xF6,0x86, 0x7A,0x2C, 0xC7,0x38, 0x58,0x00, 0x94,0x82,
+0xFF,0x40, 0x95,0x82, 0xFF,0x44, 0xB5,0xB2, 0x68,0x02, 0xF7,0x05, 0x7A,0x98, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x10, 0xF5,0x84,
+0x6F,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x3F,0xE4, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x3F,0xD8, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02, 0x00,0x22, 0xE0,0x00, 0x3F,0xE8, 0xF5,0x05,
+0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF5,0x84, 0x32,0xC0, 0x00,0x00, 0x00,0x01, 0x90,0x2E,
+0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00,
+0x41,0x14, 0x00,0x00, 0x00,0x01, 0x87,0x02, 0xFF,0x38, 0x05,0x2C, 0x0E,0xF4, 0x95,0x16,
+0xFF,0xF4, 0xF7,0x05, 0x7A,0x68, 0x95,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xEC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x43,0xA0, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xEC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x40,0xD0, 0x00,0x00, 0x00,0x01, 0x86,0x2E, 0x0E,0xF8, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x40,0x6D, 0xF5,0x02, 0x00,0x4C, 0x87,0x2E,
+0x0F,0x00, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E, 0x0F,0x00, 0x87,0x2E,
+0x0F,0x00, 0xE0,0x00, 0x40,0xD0, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0xF5,0x06,
+0x7A,0x28, 0x95,0x13, 0xFF,0xFC, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35,
+0x00,0x04, 0xC7,0x38, 0x6A,0x00, 0x85,0x16, 0xFF,0xF4, 0xC7,0x38, 0x60,0x00, 0xC7,0x38,
+0x50,0x00, 0x07,0x38, 0x00,0x10, 0x97,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xEC, 0x96,0x16,
+0xFF,0xE8, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0x86,0x16,
+0xFF,0xE8, 0x85,0x96, 0xFF,0xEC, 0x06,0x30, 0x00,0x01, 0x20,0x32, 0x00,0x11, 0xE6,0x00,
+0x40,0xCC, 0x00,0x00, 0x00,0x01, 0xF6,0x02, 0x00,0x00, 0x96,0x2E, 0x0E,0xF8, 0xF7,0x04,
+0x32,0xC0, 0xF5,0x06, 0xE0,0x30, 0xC0,0x3A, 0x52,0x00, 0xE6,0x00, 0x41,0x14, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8, 0x00,0x1E, 0x70,0x3E,
+0xFF,0xE1, 0xE6,0x00, 0x41,0x15, 0x00,0x00, 0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04,
+0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04,
+0x79,0xC8, 0xF7,0x04, 0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x41,0x51, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02, 0x00,0x0A, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x41,0x50, 0xB5,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04,
+0x71,0xD4, 0xF6,0x84, 0x71,0xCC, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x41,0x6C, 0xF7,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD4, 0xF6,0x84, 0x71,0xD4, 0xF7,0x04,
+0x71,0xD0, 0xF0,0x05, 0x71,0xC4, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xF6,0x84,
+0x32,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x42,0x31, 0xF7,0x05,
+0x71,0xC8, 0xF7,0x04, 0x71,0x98, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x41,0xD1, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x41,0xD0, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x42,0x29, 0x00,0x00, 0x00,0x01, 0xF5,0x84,
+0x76,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x42,0x1C, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x42,0x08, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02, 0x00,0x22, 0xF5,0x05, 0x76,0xF8, 0xF5,0x04,
+0x77,0x00, 0xE0,0x00, 0x42,0x20, 0xF5,0x05, 0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0xE0,0x00,
+0x42,0x30, 0xF0,0x05, 0x7A,0x78, 0xE0,0x00, 0x42,0x34, 0xF5,0x06, 0x31,0x9C, 0xF5,0x06,
+0x2E,0xE0, 0xF5,0x05, 0x32,0xD4, 0xF7,0x04, 0x71,0xC8, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x42,0x74, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02,
+0x00,0x09, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x42,0x74, 0xB5,0x3A, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06,
+0x32,0xD4, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x2E,0xE0, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x32,0xD4, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x2F,0x6C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x32,0xD4, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x2F,0xF8, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x32,0xD4, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x30,0x84, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x32,0xD4, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x31,0x10, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x32,0xD4, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x31,0x9C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x32,0xD4, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x32,0x28, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x87,0x16,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x86,0xBA, 0x00,0x00, 0x87,0x3A, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0x44,0x0C, 0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x96, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x86,0x2E,
+0x00,0x00, 0x86,0xAE, 0x00,0x04, 0x20,0x32, 0x00,0x10, 0xE2,0x00, 0x43,0xD0, 0x00,0x00,
+0x00,0x01, 0x20,0x36, 0x00,0x10, 0xE2,0x00, 0x43,0xED, 0x07,0x34, 0x00,0x01, 0x87,0x2E,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E, 0x00,0x0C, 0x87,0x2E,
+0x00,0x0C, 0xE0,0x00, 0x44,0x14, 0xF4,0x02, 0x00,0x00, 0xC0,0x3A, 0x62,0x00, 0xE6,0x00,
+0x44,0x11, 0xF4,0x02, 0x00,0x00, 0x20,0x36, 0x00,0x10, 0xE6,0x00, 0x44,0x14, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x44,0x14, 0x00,0x00, 0x00,0x01, 0xF4,0x02,
+0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x02,
+0x00,0x01, 0xF7,0x05, 0x35,0x24, 0xF7,0x04, 0x6F,0x44, 0x00,0x00, 0x00,0x01, 0xF7,0x05,
+0x35,0x28, 0xF7,0x06, 0x32,0xF4, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x35,0x30, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02,
+0x45,0x04, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02, 0x00,0x0D, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x32,0xF4, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x02, 0x4A,0x04, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02, 0x00,0x0F, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x33,0x80, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02, 0x4E,0xEC, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02,
+0x00,0x08, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x34,0x0C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02, 0x57,0x64, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x02, 0x00,0x07, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x34,0x98, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x14, 0xF7,0x04,
+0x75,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x45,0x2D, 0xF6,0x86,
+0x75,0xF8, 0xE0,0x00, 0x45,0x44, 0xF7,0x02, 0x00,0x00, 0xF7,0x04, 0x76,0x04, 0x00,0x00,
+0x00,0x01, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x87,0x3A, 0x00,0x18, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x45,0x5C, 0xF7,0x05, 0x35,0x48, 0xF4,0x86,
+0x33,0x80, 0xE0,0x00, 0x49,0xF0, 0xF4,0x85, 0x35,0x30, 0xF7,0x04, 0x6F,0x54, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x45,0x80, 0xF4,0x82, 0x00,0x08, 0xF4,0x82,
+0x00,0x01, 0xF4,0x85, 0x6F,0x54, 0xE0,0x00, 0x45,0x88, 0xF7,0x02, 0x00,0x01, 0xF4,0x85,
+0x6F,0x58, 0xF7,0x02, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x45,0xA0, 0xF4,0x82,
+0x00,0x04, 0xF4,0x86, 0x34,0x0C, 0xE0,0x00, 0x49,0xF0, 0xF4,0x85, 0x35,0x30, 0xF6,0x84,
+0x35,0x48, 0xF6,0x04, 0x35,0x2C, 0xF4,0xB7, 0x28,0x00, 0x07,0x34, 0x00,0x02, 0xF4,0x82,
+0x00,0x01, 0xF4,0xBB, 0x28,0x00, 0x87,0x32, 0x00,0x8C, 0xF4,0x82, 0x00,0x01, 0x97,0x36,
+0x00,0x18, 0x87,0x32, 0x00,0x90, 0xF4,0x85, 0x6F,0x50, 0x97,0x36, 0x00,0x04, 0x84,0xB2,
+0x00,0x84, 0x00,0x00, 0x00,0x01, 0x94,0xB6, 0x00,0x10, 0x84,0xB2, 0x00,0x88, 0x00,0x00,
+0x00,0x01, 0x94,0xB6, 0x00,0x14, 0x84,0xB6, 0x00,0x10, 0x00,0x00, 0x00,0x01, 0x94,0xB6,
+0x00,0x08, 0x84,0xB6, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x94,0xB6, 0x00,0x0C, 0x84,0xB2,
+0x00,0x98, 0x00,0x00, 0x00,0x01, 0xF4,0x85, 0x35,0x54, 0xF4,0x82, 0x00,0x01, 0x94,0x82,
+0xFF,0x80, 0xF5,0x04, 0x35,0x54, 0xF4,0x86, 0x34,0x98, 0xF4,0x85, 0x35,0x30, 0x95,0x02,
+0xFF,0x38, 0x85,0xB2, 0x00,0x00, 0x06,0xB4, 0x00,0x24, 0x95,0x82, 0xFF,0x3C, 0x96,0x82,
+0xFF,0x40, 0x87,0x32, 0x00,0x04, 0xF6,0x85, 0x35,0x50, 0x97,0x02, 0xFF,0x44, 0x86,0xB2,
+0x00,0x04, 0xF0,0x05, 0x35,0x4C, 0xF7,0x04, 0x35,0x40, 0x95,0x16, 0xFF,0xF4, 0x95,0x96,
+0xFF,0xF4, 0xC7,0x38, 0x68,0x00, 0xF7,0x05, 0x35,0x40, 0xF5,0x84, 0x35,0x28, 0x86,0xB2,
+0x00,0x04, 0x87,0x2E, 0x14,0x14, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x97,0x2E,
+0x14,0x14, 0x87,0x32, 0x00,0x80, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x01, 0xEE,0x00,
+0x49,0xF0, 0xF7,0x06, 0x0C,0x3E, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x46,0xA4, 0x00,0x00,
+0x00,0x01, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x46,0x95, 0x00,0x00, 0x00,0x01, 0xF7,0x06,
+0x0C,0x3E, 0xC7,0x7C, 0x74,0x00, 0x20,0x3A, 0x00,0x10, 0xE6,0x00, 0x49,0xF0, 0x00,0x00,
+0x00,0x01, 0xFF,0x82, 0x00,0x10, 0x86,0x82, 0xFF,0x38, 0xF7,0x04, 0x35,0x58, 0xF5,0x84,
+0x6F,0x58, 0xF6,0x85, 0x35,0x54, 0x07,0x38, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00,
+0x47,0x08, 0xF7,0x05, 0x35,0x58, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x46,0xFC, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xE0,0x00,
+0x47,0x0C, 0xF4,0x85, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF6,0x84, 0x35,0x2C, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x94, 0xC4,0x84, 0x00,0x00, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00,
+0x47,0x71, 0x00,0x00, 0x00,0x01, 0x86,0x36, 0x00,0x94, 0xF6,0x84, 0x35,0x54, 0x00,0x00,
+0x00,0x01, 0x76,0xB4, 0xFF,0xF0, 0xF7,0x04, 0x35,0x54, 0x96,0x96, 0xFF,0xF4, 0x47,0x39,
+0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xC6,0xB4, 0x70,0x00, 0xF7,0x04, 0x35,0x48, 0x77,0xB4,
+0x00,0x0F, 0x70,0x3E, 0xFF,0xE1, 0x07,0x38, 0x00,0x24, 0xE6,0x00, 0x47,0x69, 0xC6,0x38,
+0x60,0x00, 0x06,0xB4, 0x00,0x01, 0xC7,0x04, 0x6E,0x00, 0xF7,0x33, 0x28,0x00, 0xF6,0x84,
+0x35,0x44, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x0F, 0xE2,0x00, 0x47,0xBD, 0x07,0x38, 0x00,0x01, 0x87,0x36, 0x00,0x0C, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x00,0x0C, 0x87,0x36, 0x00,0x0C, 0xE0,0x00,
+0x47,0xD0, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x35,0x28, 0xF6,0x82, 0x00,0x01, 0x07,0x38,
+0x00,0x08, 0xE0,0x00, 0x49,0x68, 0xF7,0x05, 0x35,0x44, 0x20,0x3A, 0x00,0x10, 0xE6,0x00,
+0x47,0xCC, 0x00,0x00, 0x00,0x01, 0xF7,0x02, 0x00,0x00, 0x97,0x36, 0x00,0x04, 0xF7,0x04,
+0x35,0x3C, 0xF6,0x84, 0x35,0x28, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x35,0x3C, 0xF7,0x04,
+0x35,0x3C, 0x87,0x36, 0x14,0x1C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x1C, 0xF7,0x04, 0x76,0x04, 0x86,0xB6, 0x14,0x1C, 0xF6,0x04, 0x75,0xFC, 0x07,0x38,
+0x00,0x01, 0xF6,0x84, 0x76,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x48,0x1C, 0xF7,0x05, 0x76,0x04, 0xF0,0x05, 0x76,0x04, 0xF6,0x84, 0x76,0x04, 0xF7,0x04,
+0x76,0x08, 0xF0,0x05, 0x75,0xFC, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0x48,0x81, 0xF7,0x05, 0x75,0xF8, 0xF7,0x04, 0x76,0x48, 0xF4,0x86,
+0x72,0x18, 0xC0,0x3A, 0x4A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x48,0x81, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82, 0x00,0x0E, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x48,0x80, 0xB4,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02,
+0x00,0x00, 0x20,0x2A, 0x00,0x02, 0xEE,0x00, 0x49,0x68, 0xF6,0x82, 0x00,0x00, 0xF6,0x84,
+0x35,0x28, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x02, 0xE6,0x00, 0x49,0x3C, 0x05,0xB4, 0x00,0x08, 0x95,0x93, 0xFF,0xFC, 0x95,0x16,
+0xFF,0xEC, 0x95,0x96, 0xFF,0xE8, 0x96,0x96, 0xFF,0xE4, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x5E,0xDC, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xEC, 0x85,0x96, 0xFF,0xE8, 0x86,0x96,
+0xFF,0xE4, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x49,0x2C, 0xF7,0x02, 0x00,0x00, 0x86,0x36,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x0F, 0xE2,0x00, 0x49,0x11, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x00,0x14, 0x87,0x36, 0x00,0x14, 0xE0,0x00, 0x49,0x2C, 0xF7,0x02, 0x00,0x00, 0x76,0xB1,
+0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x05, 0xC7,0x38, 0x6A,0x00, 0xC7,0x38,
+0x60,0x00, 0x07,0x38, 0x00,0x10, 0xC7,0x2C, 0x70,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x47,0xA8, 0xF7,0x05, 0x35,0x2C, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x4C, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x49,0x5C, 0x07,0x34, 0x14,0x94, 0xF4,0x84,
+0x6F,0x44, 0xE0,0x00, 0x49,0x60, 0xF4,0x85, 0x35,0x28, 0xF7,0x05, 0x35,0x28, 0xE0,0x00,
+0x48,0x84, 0x05,0x28, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x49,0xA1, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82, 0x00,0x0D, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x49,0xA8, 0xB4,0xBA, 0x68,0x02, 0xE0,0x00, 0x49,0xA8, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82,
+0x00,0x01, 0xF4,0x85, 0x35,0x24, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x44, 0xF4,0x86,
+0x32,0xF4, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x49,0xF0, 0xF4,0x85, 0x35,0x30, 0xF7,0x04,
+0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8, 0x00,0x1F, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00,
+0x49,0xF1, 0x00,0x00, 0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x14, 0xF7,0x04,
+0x75,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x4A,0x2D, 0xF6,0x86,
+0x75,0xF8, 0xE0,0x00, 0x4A,0x40, 0xF6,0x82, 0x00,0x00, 0xF7,0x04, 0x76,0x04, 0x00,0x00,
+0x00,0x01, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x86,0xBA, 0x00,0x18, 0xF7,0x04,
+0x6F,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x4A,0x64, 0xF6,0x85,
+0x35,0x48, 0xF4,0x82, 0x00,0x01, 0xF4,0x85, 0x6F,0x54, 0xE0,0x00, 0x4A,0x70, 0xF7,0x02,
+0x00,0x01, 0xF4,0x82, 0x00,0x08, 0xF4,0x85, 0x6F,0x58, 0xF7,0x02, 0x00,0x00, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x4A,0x88, 0xF4,0x82, 0x00,0x04, 0xF4,0x86, 0x34,0x0C, 0xE0,0x00,
+0x4E,0xD8, 0xF4,0x85, 0x35,0x30, 0xF6,0x84, 0x35,0x48, 0xF6,0x04, 0x35,0x2C, 0xF4,0xB7,
+0x28,0x00, 0x07,0x34, 0x00,0x02, 0xF4,0x82, 0x00,0x01, 0xF4,0xBB, 0x28,0x00, 0x87,0x32,
+0x00,0x8C, 0xF4,0x82, 0x00,0x01, 0x97,0x36, 0x00,0x18, 0x87,0x32, 0x00,0x90, 0xF4,0x85,
+0x6F,0x50, 0x97,0x36, 0x00,0x04, 0x84,0xB2, 0x00,0x84, 0x00,0x00, 0x00,0x01, 0x94,0xB6,
+0x00,0x10, 0x84,0xB2, 0x00,0x88, 0x00,0x00, 0x00,0x01, 0x94,0xB6, 0x00,0x14, 0x84,0xB6,
+0x00,0x10, 0x00,0x00, 0x00,0x01, 0x94,0xB6, 0x00,0x08, 0x84,0xB6, 0x00,0x14, 0x00,0x00,
+0x00,0x01, 0x94,0xB6, 0x00,0x0C, 0x84,0xB2, 0x00,0x98, 0x00,0x00, 0x00,0x01, 0xF4,0x85,
+0x35,0x54, 0xF4,0x82, 0x00,0x01, 0x94,0x82, 0xFF,0x80, 0xF5,0x04, 0x35,0x54, 0xF4,0x86,
+0x34,0x98, 0xF4,0x85, 0x35,0x30, 0x95,0x02, 0xFF,0x38, 0x85,0xB2, 0x00,0x00, 0x06,0xB4,
+0x00,0x24, 0x95,0x82, 0xFF,0x3C, 0x96,0x82, 0xFF,0x40, 0x87,0x32, 0x00,0x04, 0xF6,0x85,
+0x35,0x50, 0x97,0x02, 0xFF,0x44, 0x86,0xB2, 0x00,0x04, 0xF0,0x05, 0x35,0x4C, 0xF7,0x04,
+0x35,0x40, 0x95,0x16, 0xFF,0xF4, 0x95,0x96, 0xFF,0xF4, 0xC7,0x38, 0x68,0x00, 0xF7,0x05,
+0x35,0x40, 0xF5,0x84, 0x35,0x28, 0x86,0xB2, 0x00,0x04, 0x87,0x2E, 0x14,0x14, 0x00,0x00,
+0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x97,0x2E, 0x14,0x14, 0x87,0x32, 0x00,0x80, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x01, 0xEE,0x00, 0x4E,0xD8, 0xF7,0x06, 0x0C,0x3E, 0xC0,0x7E,
+0x74,0x00, 0xE6,0x00, 0x4B,0x8C, 0x00,0x00, 0x00,0x01, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00,
+0x4B,0x7D, 0x00,0x00, 0x00,0x01, 0xF7,0x06, 0x0C,0x3E, 0xC7,0x7C, 0x74,0x00, 0x20,0x3A,
+0x00,0x10, 0xE6,0x00, 0x4E,0xD8, 0x00,0x00, 0x00,0x01, 0xFF,0x82, 0x00,0x10, 0x86,0x82,
+0xFF,0x38, 0xF7,0x04, 0x35,0x58, 0xF5,0x84, 0x6F,0x58, 0xF6,0x85, 0x35,0x54, 0x07,0x38,
+0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x4B,0xF0, 0xF7,0x05, 0x35,0x58, 0xF7,0x04,
+0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x4B,0xE4, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xE0,0x00, 0x4B,0xF4, 0xF4,0x85, 0x6F,0x58, 0xF0,0x05,
+0x6F,0x54, 0xF6,0x84, 0x35,0x2C, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x94, 0xC4,0x84,
+0x00,0x00, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00, 0x4C,0x59, 0x00,0x00, 0x00,0x01, 0x86,0x36,
+0x00,0x94, 0xF6,0x84, 0x35,0x54, 0x00,0x00, 0x00,0x01, 0x76,0xB4, 0xFF,0xF0, 0xF7,0x04,
+0x35,0x54, 0x96,0x96, 0xFF,0xF4, 0x47,0x39, 0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xC6,0xB4,
+0x70,0x00, 0xF7,0x04, 0x35,0x48, 0x77,0xB4, 0x00,0x0F, 0x70,0x3E, 0xFF,0xE1, 0x07,0x38,
+0x00,0x24, 0xE6,0x00, 0x4C,0x51, 0xC6,0x38, 0x60,0x00, 0x06,0xB4, 0x00,0x01, 0xC7,0x04,
+0x6E,0x00, 0xF7,0x33, 0x28,0x00, 0xF6,0x84, 0x35,0x44, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x0F, 0xE2,0x00, 0x4C,0xA5, 0x07,0x38,
+0x00,0x01, 0x87,0x36, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x00,0x0C, 0x87,0x36, 0x00,0x0C, 0xE0,0x00, 0x4C,0xB8, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x35,0x28, 0xF6,0x82, 0x00,0x01, 0x07,0x38, 0x00,0x08, 0xE0,0x00, 0x4E,0x50, 0xF7,0x05,
+0x35,0x44, 0x20,0x3A, 0x00,0x10, 0xE6,0x00, 0x4C,0xB4, 0x00,0x00, 0x00,0x01, 0xF7,0x02,
+0x00,0x00, 0x97,0x36, 0x00,0x04, 0xF7,0x04, 0x35,0x3C, 0xF6,0x84, 0x35,0x28, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x35,0x3C, 0xF7,0x04, 0x35,0x3C, 0x87,0x36, 0x14,0x1C, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x14,0x1C, 0xF7,0x04, 0x76,0x04, 0x86,0xB6,
+0x14,0x1C, 0xF6,0x04, 0x75,0xFC, 0x07,0x38, 0x00,0x01, 0xF6,0x84, 0x76,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00, 0x4D,0x04, 0xF7,0x05, 0x76,0x04, 0xF0,0x05,
+0x76,0x04, 0xF6,0x84, 0x76,0x04, 0xF7,0x04, 0x76,0x08, 0xF0,0x05, 0x75,0xFC, 0xC0,0x36,
+0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x4D,0x69, 0xF7,0x05,
+0x75,0xF8, 0xF7,0x04, 0x76,0x48, 0xF4,0x86, 0x72,0x18, 0xC0,0x3A, 0x4A,0x00, 0x47,0x0C,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x4D,0x69, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04,
+0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0xF4,0x82, 0x00,0x0E, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x4D,0x68, 0xB4,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02, 0x00,0x00, 0x20,0x2A, 0x00,0x02, 0xEE,0x00,
+0x4E,0x50, 0xF6,0x82, 0x00,0x00, 0xF6,0x84, 0x35,0x28, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00, 0x4E,0x24, 0x05,0xB4,
+0x00,0x08, 0x95,0x93, 0xFF,0xFC, 0x95,0x16, 0xFF,0xEC, 0x95,0x96, 0xFF,0xE8, 0x96,0x96,
+0xFF,0xE4, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x5E,0xDC, 0x97,0x93, 0xFF,0xFC, 0x85,0x16,
+0xFF,0xEC, 0x85,0x96, 0xFF,0xE8, 0x86,0x96, 0xFF,0xE4, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x4E,0x14, 0xF7,0x02, 0x00,0x00, 0x86,0x36, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x20,0x32,
+0x00,0x0F, 0xE2,0x00, 0x4D,0xF9, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x14, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x00,0x14, 0x87,0x36, 0x00,0x14, 0xE0,0x00,
+0x4E,0x14, 0xF7,0x02, 0x00,0x00, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35,
+0x00,0x05, 0xC7,0x38, 0x6A,0x00, 0xC7,0x38, 0x60,0x00, 0x07,0x38, 0x00,0x10, 0xC7,0x2C,
+0x70,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x4C,0x90, 0xF7,0x05, 0x35,0x2C, 0xF6,0x84,
+0x35,0x28, 0xF7,0x04, 0x6F,0x4C, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x4E,0x44, 0x07,0x34, 0x14,0x94, 0xF4,0x84, 0x6F,0x44, 0xE0,0x00, 0x4E,0x48, 0xF4,0x85,
+0x35,0x28, 0xF7,0x05, 0x35,0x28, 0xE0,0x00, 0x4D,0x6C, 0x05,0x28, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xE6,0x00, 0x4E,0x89, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00,
+0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82,
+0x00,0x0D, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x4E,0x90, 0xB4,0xBA, 0x68,0x02, 0xE0,0x00,
+0x4E,0x90, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x01, 0xF4,0x85, 0x35,0x24, 0xF6,0x84,
+0x35,0x28, 0xF7,0x04, 0x6F,0x44, 0xF4,0x86, 0x32,0xF4, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x4E,0xD8, 0xF4,0x85, 0x35,0x30, 0xF7,0x04, 0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8,
+0x00,0x1F, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00, 0x4E,0xD9, 0x00,0x00, 0x00,0x01, 0x0F,0x81,
+0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x14, 0xF6,0x84, 0x35,0x48, 0xF6,0x04, 0x35,0x2C, 0xF4,0x82,
+0x00,0x04, 0xF4,0xB7, 0x28,0x00, 0x07,0x34, 0x00,0x02, 0xF4,0x82, 0x00,0x01, 0xF4,0xBB,
+0x28,0x00, 0x87,0x32, 0x00,0x8C, 0xF4,0x82, 0x00,0x01, 0x97,0x36, 0x00,0x18, 0x87,0x32,
+0x00,0x90, 0xF4,0x85, 0x6F,0x50, 0x97,0x36, 0x00,0x04, 0x84,0xB2, 0x00,0x84, 0x00,0x00,
+0x00,0x01, 0x94,0xB6, 0x00,0x10, 0x84,0xB2, 0x00,0x88, 0x00,0x00, 0x00,0x01, 0x94,0xB6,
+0x00,0x14, 0x84,0xB6, 0x00,0x10, 0x00,0x00, 0x00,0x01, 0x94,0xB6, 0x00,0x08, 0x84,0xB6,
+0x00,0x14, 0x00,0x00, 0x00,0x01, 0x94,0xB6, 0x00,0x0C, 0x84,0xB2, 0x00,0x98, 0x00,0x00,
+0x00,0x01, 0xF4,0x85, 0x35,0x54, 0xF4,0x82, 0x00,0x01, 0x94,0x82, 0xFF,0x80, 0xF5,0x04,
+0x35,0x54, 0xF4,0x86, 0x34,0x98, 0xF4,0x85, 0x35,0x30, 0x95,0x02, 0xFF,0x38, 0x85,0xB2,
+0x00,0x00, 0x06,0xB4, 0x00,0x24, 0x95,0x82, 0xFF,0x3C, 0x96,0x82, 0xFF,0x40, 0x87,0x32,
+0x00,0x04, 0xF6,0x85, 0x35,0x50, 0x97,0x02, 0xFF,0x44, 0x86,0xB2, 0x00,0x04, 0xF0,0x05,
+0x35,0x4C, 0xF7,0x04, 0x35,0x40, 0x95,0x16, 0xFF,0xF4, 0x95,0x96, 0xFF,0xF4, 0xC7,0x38,
+0x68,0x00, 0xF7,0x05, 0x35,0x40, 0xF5,0x84, 0x35,0x28, 0x86,0xB2, 0x00,0x04, 0x87,0x2E,
+0x14,0x14, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x97,0x2E, 0x14,0x14, 0x87,0x32,
+0x00,0x80, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x01, 0xEE,0x00, 0x53,0x4C, 0xF7,0x06,
+0x0C,0x3E, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x50,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x7E,
+0x74,0x00, 0xE6,0x00, 0x4F,0xF1, 0x00,0x00, 0x00,0x01, 0xF7,0x06, 0x0C,0x3E, 0xC7,0x7C,
+0x74,0x00, 0x20,0x3A, 0x00,0x10, 0xE6,0x00, 0x53,0x4C, 0x00,0x00, 0x00,0x01, 0xFF,0x82,
+0x00,0x10, 0x86,0x82, 0xFF,0x38, 0xF7,0x04, 0x35,0x58, 0xF5,0x84, 0x6F,0x58, 0xF6,0x85,
+0x35,0x54, 0x07,0x38, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00, 0x50,0x64, 0xF7,0x05,
+0x35,0x58, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x50,0x58, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x22, 0xE0,0x00, 0x50,0x68, 0xF4,0x85,
+0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF6,0x84, 0x35,0x2C, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x00,0x94, 0xC4,0x84, 0x00,0x00, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x00, 0x50,0xCD, 0x00,0x00,
+0x00,0x01, 0x86,0x36, 0x00,0x94, 0xF6,0x84, 0x35,0x54, 0x00,0x00, 0x00,0x01, 0x76,0xB4,
+0xFF,0xF0, 0xF7,0x04, 0x35,0x54, 0x96,0x96, 0xFF,0xF4, 0x47,0x39, 0x00,0x00, 0x97,0x16,
+0xFF,0xF0, 0xC6,0xB4, 0x70,0x00, 0xF7,0x04, 0x35,0x48, 0x77,0xB4, 0x00,0x0F, 0x70,0x3E,
+0xFF,0xE1, 0x07,0x38, 0x00,0x24, 0xE6,0x00, 0x50,0xC5, 0xC6,0x38, 0x60,0x00, 0x06,0xB4,
+0x00,0x01, 0xC7,0x04, 0x6E,0x00, 0xF7,0x33, 0x28,0x00, 0xF6,0x84, 0x35,0x44, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x0F, 0xE2,0x00,
+0x51,0x19, 0x07,0x38, 0x00,0x01, 0x87,0x36, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0x97,0x36, 0x00,0x0C, 0x87,0x36, 0x00,0x0C, 0xE0,0x00, 0x51,0x2C, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x35,0x28, 0xF6,0x82, 0x00,0x01, 0x07,0x38, 0x00,0x08, 0xE0,0x00,
+0x52,0xC4, 0xF7,0x05, 0x35,0x44, 0x20,0x3A, 0x00,0x10, 0xE6,0x00, 0x51,0x28, 0x00,0x00,
+0x00,0x01, 0xF7,0x02, 0x00,0x00, 0x97,0x36, 0x00,0x04, 0xF7,0x04, 0x35,0x3C, 0xF6,0x84,
+0x35,0x28, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x35,0x3C, 0xF7,0x04, 0x35,0x3C, 0x87,0x36,
+0x14,0x1C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x14,0x1C, 0xF7,0x04,
+0x76,0x04, 0x86,0xB6, 0x14,0x1C, 0xF6,0x04, 0x75,0xFC, 0x07,0x38, 0x00,0x01, 0xF6,0x84,
+0x76,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00, 0x51,0x78, 0xF7,0x05,
+0x76,0x04, 0xF0,0x05, 0x76,0x04, 0xF6,0x84, 0x76,0x04, 0xF7,0x04, 0x76,0x08, 0xF0,0x05,
+0x75,0xFC, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0x51,0xDD, 0xF7,0x05, 0x75,0xF8, 0xF7,0x04, 0x76,0x48, 0xF4,0x86, 0x72,0x18, 0xC0,0x3A,
+0x4A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x51,0xDD, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82, 0x00,0x0E, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x51,0xDC, 0xB4,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02, 0x00,0x00, 0x20,0x2A,
+0x00,0x02, 0xEE,0x00, 0x52,0xC4, 0xF6,0x82, 0x00,0x00, 0xF6,0x84, 0x35,0x28, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x00,
+0x52,0x98, 0x05,0xB4, 0x00,0x08, 0x95,0x93, 0xFF,0xFC, 0x95,0x16, 0xFF,0xEC, 0x95,0x96,
+0xFF,0xE8, 0x96,0x96, 0xFF,0xE4, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x5E,0xDC, 0x97,0x93,
+0xFF,0xFC, 0x85,0x16, 0xFF,0xEC, 0x85,0x96, 0xFF,0xE8, 0x86,0x96, 0xFF,0xE4, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x52,0x88, 0xF7,0x02, 0x00,0x00, 0x86,0x36, 0x00,0x0C, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x0F, 0xE2,0x00, 0x52,0x6D, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x00,0x14, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x00,0x14, 0x87,0x36,
+0x00,0x14, 0xE0,0x00, 0x52,0x88, 0xF7,0x02, 0x00,0x00, 0x76,0xB1, 0x00,0x02, 0xC6,0xB4,
+0x60,0x00, 0x77,0x35, 0x00,0x05, 0xC7,0x38, 0x6A,0x00, 0xC7,0x38, 0x60,0x00, 0x07,0x38,
+0x00,0x10, 0xC7,0x2C, 0x70,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x51,0x04, 0xF7,0x05,
+0x35,0x2C, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x4C, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x52,0xB8, 0x07,0x34, 0x14,0x94, 0xF4,0x84, 0x6F,0x44, 0xE0,0x00,
+0x52,0xBC, 0xF4,0x85, 0x35,0x28, 0xF7,0x05, 0x35,0x28, 0xE0,0x00, 0x51,0xE0, 0x05,0x28,
+0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x52,0xFD, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04,
+0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0xF4,0x82, 0x00,0x0D, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x53,0x04, 0xB4,0xBA,
+0x68,0x02, 0xE0,0x00, 0x53,0x04, 0xF0,0x05, 0x2D,0x38, 0xF4,0x82, 0x00,0x01, 0xF4,0x85,
+0x35,0x24, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x44, 0xF4,0x86, 0x32,0xF4, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x53,0x4C, 0xF4,0x85, 0x35,0x30, 0xF7,0x04, 0xE0,0x18, 0x00,0x00,
+0x00,0x01, 0x77,0xB8, 0x00,0x1F, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00, 0x53,0x4D, 0x00,0x00,
+0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x14, 0xF4,0x84, 0x35,0x54, 0xF6,0x84,
+0x35,0x4C, 0xF5,0x84, 0x35,0x2C, 0x94,0x82, 0xFF,0x38, 0x76,0xB5, 0x00,0x03, 0xA5,0x2E,
+0x68,0x02, 0x00,0x00, 0x00,0x01, 0x95,0x02, 0xFF,0x3C, 0xF3,0x84, 0x35,0x50, 0xC6,0xAC,
+0x68,0x00, 0x93,0x82, 0xFF,0x40, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x97,0x02,
+0xFF,0x44, 0x86,0x36, 0x00,0x04, 0xF7,0x04, 0x35,0x40, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x60,0x00, 0xF7,0x05, 0x35,0x40, 0xF6,0x04, 0x35,0x28, 0x86,0xB6, 0x00,0x04, 0x87,0x32,
+0x14,0x14, 0x94,0x96, 0xFF,0xF4, 0xC7,0x38, 0x68,0x00, 0x97,0x32, 0x14,0x14, 0x87,0x2E,
+0x00,0x80, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x01, 0xEE,0x00, 0x57,0x50, 0x95,0x16,
+0xFF,0xF4, 0xF7,0x06, 0x0C,0x3E, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x54,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x53,0xF5, 0x00,0x00, 0x00,0x01, 0xF7,0x06,
+0x0C,0x3E, 0xC7,0x7C, 0x74,0x00, 0x20,0x3A, 0x00,0x10, 0xE6,0x00, 0x57,0x50, 0x00,0x00,
+0x00,0x01, 0xFF,0x82, 0x00,0x10, 0x86,0x82, 0xFF,0x38, 0xF7,0x04, 0x35,0x58, 0xF5,0x84,
+0x6F,0x58, 0xF6,0x85, 0x35,0x54, 0x07,0x38, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00,
+0x54,0x68, 0xF7,0x05, 0x35,0x58, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x54,0x5C, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x82, 0x00,0x22, 0xE0,0x00,
+0x54,0x6C, 0xF3,0x85, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF6,0x84, 0x35,0x2C, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x94, 0xC3,0x84, 0x00,0x00, 0xC0,0x3A, 0x3A,0x00, 0xE6,0x00,
+0x54,0xD1, 0x00,0x00, 0x00,0x01, 0x86,0x36, 0x00,0x94, 0xF6,0x84, 0x35,0x54, 0x00,0x00,
+0x00,0x01, 0x76,0xB4, 0xFF,0xF0, 0xF7,0x04, 0x35,0x54, 0x96,0x96, 0xFF,0xF4, 0x47,0x39,
+0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xC6,0xB4, 0x70,0x00, 0xF7,0x04, 0x35,0x48, 0x77,0xB4,
+0x00,0x0F, 0x70,0x3E, 0xFF,0xE1, 0x07,0x38, 0x00,0x24, 0xE6,0x00, 0x54,0xC9, 0xC6,0x38,
+0x60,0x00, 0x06,0xB4, 0x00,0x01, 0xC7,0x04, 0x6E,0x00, 0xF7,0x33, 0x28,0x00, 0xF6,0x84,
+0x35,0x44, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x0F, 0xE2,0x00, 0x55,0x1D, 0x07,0x38, 0x00,0x01, 0x87,0x36, 0x00,0x0C, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x00,0x0C, 0x87,0x36, 0x00,0x0C, 0xE0,0x00,
+0x55,0x30, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x35,0x28, 0xF6,0x82, 0x00,0x01, 0x07,0x38,
+0x00,0x08, 0xE0,0x00, 0x56,0xC8, 0xF7,0x05, 0x35,0x44, 0x20,0x3A, 0x00,0x10, 0xE6,0x00,
+0x55,0x2C, 0x00,0x00, 0x00,0x01, 0xF7,0x02, 0x00,0x00, 0x97,0x36, 0x00,0x04, 0xF7,0x04,
+0x35,0x3C, 0xF6,0x84, 0x35,0x28, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x35,0x3C, 0xF7,0x04,
+0x35,0x3C, 0x87,0x36, 0x14,0x1C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x1C, 0xF7,0x04, 0x76,0x04, 0x86,0xB6, 0x14,0x1C, 0xF6,0x04, 0x75,0xFC, 0x07,0x38,
+0x00,0x01, 0xF6,0x84, 0x76,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x55,0x7C, 0xF7,0x05, 0x76,0x04, 0xF0,0x05, 0x76,0x04, 0xF6,0x84, 0x76,0x04, 0xF7,0x04,
+0x76,0x08, 0xF0,0x05, 0x75,0xFC, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0x55,0xE1, 0xF7,0x05, 0x75,0xF8, 0xF7,0x04, 0x76,0x48, 0xF3,0x86,
+0x72,0x18, 0xC0,0x3A, 0x3A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x55,0xE1, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x82, 0x00,0x0E, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x55,0xE0, 0xB3,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02,
+0x00,0x00, 0x20,0x2A, 0x00,0x02, 0xEE,0x00, 0x56,0xC8, 0xF6,0x82, 0x00,0x00, 0xF6,0x84,
+0x35,0x28, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x02, 0xE6,0x00, 0x56,0x9C, 0x05,0xB4, 0x00,0x08, 0x95,0x93, 0xFF,0xFC, 0x95,0x16,
+0xFF,0xEC, 0x95,0x96, 0xFF,0xE8, 0x96,0x96, 0xFF,0xE4, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x5E,0xDC, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xEC, 0x85,0x96, 0xFF,0xE8, 0x86,0x96,
+0xFF,0xE4, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x56,0x8C, 0xF7,0x02, 0x00,0x00, 0x86,0x36,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x0F, 0xE2,0x00, 0x56,0x71, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x00,0x14, 0x87,0x36, 0x00,0x14, 0xE0,0x00, 0x56,0x8C, 0xF7,0x02, 0x00,0x00, 0x76,0xB1,
+0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x05, 0xC7,0x38, 0x6A,0x00, 0xC7,0x38,
+0x60,0x00, 0x07,0x38, 0x00,0x10, 0xC7,0x2C, 0x70,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x55,0x08, 0xF7,0x05, 0x35,0x2C, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x4C, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x56,0xBC, 0x07,0x34, 0x14,0x94, 0xF3,0x84,
+0x6F,0x44, 0xE0,0x00, 0x56,0xC0, 0xF3,0x85, 0x35,0x28, 0xF7,0x05, 0x35,0x28, 0xE0,0x00,
+0x55,0xE4, 0x05,0x28, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x57,0x01, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x82, 0x00,0x0D, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x57,0x08, 0xB3,0xBA, 0x68,0x02, 0xE0,0x00, 0x57,0x08, 0xF0,0x05, 0x2D,0x38, 0xF3,0x82,
+0x00,0x01, 0xF3,0x85, 0x35,0x24, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x44, 0xF3,0x86,
+0x32,0xF4, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x57,0x50, 0xF3,0x85, 0x35,0x30, 0xF7,0x04,
+0xE0,0x18, 0x00,0x00, 0x00,0x01, 0x77,0xB8, 0x00,0x1F, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00,
+0x57,0x51, 0x00,0x00, 0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x14, 0x87,0x02,
+0xFF,0x38, 0xF3,0x84, 0x35,0x2C, 0xF7,0x05, 0x35,0x54, 0x87,0x1E, 0x00,0x80, 0xF5,0x04,
+0x35,0x4C, 0x27,0x38, 0x00,0x01, 0xC0,0x2A, 0x72,0x00, 0xE6,0x00, 0x5A,0x4C, 0x00,0x00,
+0x00,0x01, 0xF5,0x84, 0x6F,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00,
+0x57,0xD8, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x57,0xCC, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xE0,0x00,
+0x57,0xDC, 0xF3,0x05, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF6,0x84, 0x35,0x2C, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x94, 0xC3,0x04, 0x00,0x00, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00,
+0x58,0x41, 0x00,0x00, 0x00,0x01, 0x86,0x36, 0x00,0x94, 0xF6,0x84, 0x35,0x54, 0x00,0x00,
+0x00,0x01, 0x76,0xB4, 0xFF,0xF0, 0xF7,0x04, 0x35,0x54, 0x96,0x96, 0xFF,0xF4, 0x47,0x39,
+0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xC6,0xB4, 0x70,0x00, 0xF7,0x04, 0x35,0x48, 0x77,0xB4,
+0x00,0x0F, 0x70,0x3E, 0xFF,0xE1, 0x07,0x38, 0x00,0x24, 0xE6,0x00, 0x58,0x39, 0xC6,0x38,
+0x60,0x00, 0x06,0xB4, 0x00,0x01, 0xC7,0x04, 0x6E,0x00, 0xF7,0x33, 0x28,0x00, 0xF6,0x84,
+0x35,0x44, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x0F, 0xE2,0x00, 0x58,0x8D, 0x07,0x38, 0x00,0x01, 0x87,0x36, 0x00,0x0C, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x00,0x0C, 0x87,0x36, 0x00,0x0C, 0xE0,0x00,
+0x58,0xA0, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x35,0x28, 0xF6,0x82, 0x00,0x01, 0x07,0x38,
+0x00,0x08, 0xE0,0x00, 0x5A,0x38, 0xF7,0x05, 0x35,0x44, 0x20,0x3A, 0x00,0x10, 0xE6,0x00,
+0x58,0x9C, 0x00,0x00, 0x00,0x01, 0xF7,0x02, 0x00,0x00, 0x97,0x36, 0x00,0x04, 0xF7,0x04,
+0x35,0x3C, 0xF6,0x84, 0x35,0x28, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x35,0x3C, 0xF7,0x04,
+0x35,0x3C, 0x87,0x36, 0x14,0x1C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x1C, 0xF7,0x04, 0x76,0x04, 0x86,0xB6, 0x14,0x1C, 0xF6,0x04, 0x75,0xFC, 0x07,0x38,
+0x00,0x01, 0xF6,0x84, 0x76,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x58,0xEC, 0xF7,0x05, 0x76,0x04, 0xF0,0x05, 0x76,0x04, 0xF6,0x84, 0x76,0x04, 0xF7,0x04,
+0x76,0x08, 0xF0,0x05, 0x75,0xFC, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0x59,0x51, 0xF7,0x05, 0x75,0xF8, 0xF7,0x04, 0x76,0x48, 0xF3,0x06,
+0x72,0x18, 0xC0,0x3A, 0x32,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x59,0x51, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0E, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x59,0x50, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02,
+0x00,0x00, 0x20,0x2A, 0x00,0x02, 0xEE,0x00, 0x5A,0x38, 0xF6,0x82, 0x00,0x00, 0xF6,0x84,
+0x35,0x28, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x02, 0xE6,0x00, 0x5A,0x0C, 0x05,0xB4, 0x00,0x08, 0x95,0x93, 0xFF,0xFC, 0x95,0x16,
+0xFF,0xEC, 0x95,0x96, 0xFF,0xE8, 0x96,0x96, 0xFF,0xE4, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x5E,0xDC, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xEC, 0x85,0x96, 0xFF,0xE8, 0x86,0x96,
+0xFF,0xE4, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x59,0xFC, 0xF7,0x02, 0x00,0x00, 0x86,0x36,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x0F, 0xE2,0x00, 0x59,0xE1, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x00,0x14, 0x87,0x36, 0x00,0x14, 0xE0,0x00, 0x59,0xFC, 0xF7,0x02, 0x00,0x00, 0x76,0xB1,
+0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x05, 0xC7,0x38, 0x6A,0x00, 0xC7,0x38,
+0x60,0x00, 0x07,0x38, 0x00,0x10, 0xC7,0x2C, 0x70,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x58,0x78, 0xF7,0x05, 0x35,0x2C, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x4C, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x5A,0x2C, 0x07,0x34, 0x14,0x94, 0xF3,0x04,
+0x6F,0x44, 0xE0,0x00, 0x5A,0x30, 0xF3,0x05, 0x35,0x28, 0xF7,0x05, 0x35,0x28, 0xE0,0x00,
+0x59,0x54, 0x05,0x28, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x5D,0xC4, 0xF3,0x02,
+0x00,0x01, 0xE0,0x00, 0x5D,0xF0, 0x00,0x00, 0x00,0x01, 0x77,0x29, 0x00,0x03, 0xC7,0x1C,
+0x70,0x00, 0x87,0x3A, 0x00,0x04, 0x05,0x28, 0x00,0x01, 0x76,0xA9, 0x00,0x03, 0xF4,0x84,
+0x35,0x54, 0xF6,0x04, 0x35,0x50, 0x94,0x82, 0xFF,0x38, 0xA4,0x1E, 0x68,0x02, 0xC6,0x30,
+0x70,0x00, 0x94,0x02, 0xFF,0x3C, 0x96,0x02, 0xFF,0x40, 0xC6,0x9C, 0x68,0x00, 0x87,0x36,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x97,0x02, 0xFF,0x44, 0x85,0xB6, 0x00,0x04, 0xF7,0x04,
+0x35,0x40, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x58,0x00, 0xF7,0x05, 0x35,0x40, 0x85,0xB6,
+0x00,0x04, 0xF5,0x05, 0x35,0x4C, 0xF6,0x84, 0x35,0x28, 0xF6,0x05, 0x35,0x50, 0x87,0x36,
+0x14,0x14, 0x94,0x96, 0xFF,0xF4, 0xC7,0x38, 0x58,0x00, 0x97,0x36, 0x14,0x14, 0x87,0x1E,
+0x00,0x80, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x01, 0xEE,0x00, 0x5E,0x3C, 0x94,0x16,
+0xFF,0xF4, 0xF7,0x06, 0x0C,0x3E, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x5A,0xF4, 0x00,0x00,
+0x00,0x01, 0xC0,0x7E, 0x74,0x00, 0xE6,0x00, 0x5A,0xE5, 0x00,0x00, 0x00,0x01, 0xF7,0x06,
+0x0C,0x3E, 0xC7,0x7C, 0x74,0x00, 0x20,0x3A, 0x00,0x10, 0xE6,0x00, 0x5E,0x3C, 0x00,0x00,
+0x00,0x01, 0xFF,0x82, 0x00,0x10, 0x86,0x82, 0xFF,0x38, 0xF7,0x04, 0x35,0x58, 0xF5,0x84,
+0x6F,0x58, 0xF6,0x85, 0x35,0x54, 0x07,0x38, 0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x00,
+0x5B,0x58, 0xF7,0x05, 0x35,0x58, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x5B,0x4C, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xE0,0x00,
+0x5B,0x5C, 0xF3,0x05, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF6,0x84, 0x35,0x2C, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x94, 0xC3,0x04, 0x00,0x00, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00,
+0x5B,0xC1, 0x00,0x00, 0x00,0x01, 0x86,0x36, 0x00,0x94, 0xF6,0x84, 0x35,0x54, 0x00,0x00,
+0x00,0x01, 0x76,0xB4, 0xFF,0xF0, 0xF7,0x04, 0x35,0x54, 0x96,0x96, 0xFF,0xF4, 0x47,0x39,
+0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xC6,0xB4, 0x70,0x00, 0xF7,0x04, 0x35,0x48, 0x77,0xB4,
+0x00,0x0F, 0x70,0x3E, 0xFF,0xE1, 0x07,0x38, 0x00,0x24, 0xE6,0x00, 0x5B,0xB9, 0xC6,0x38,
+0x60,0x00, 0x06,0xB4, 0x00,0x01, 0xC7,0x04, 0x6E,0x00, 0xF7,0x33, 0x28,0x00, 0xF6,0x84,
+0x35,0x44, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x0F, 0xE2,0x00, 0x5C,0x0D, 0x07,0x38, 0x00,0x01, 0x87,0x36, 0x00,0x0C, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x00,0x0C, 0x87,0x36, 0x00,0x0C, 0xE0,0x00,
+0x5C,0x20, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x35,0x28, 0xF6,0x82, 0x00,0x01, 0x07,0x38,
+0x00,0x08, 0xE0,0x00, 0x5D,0xB8, 0xF7,0x05, 0x35,0x44, 0x20,0x3A, 0x00,0x10, 0xE6,0x00,
+0x5C,0x1C, 0x00,0x00, 0x00,0x01, 0xF7,0x02, 0x00,0x00, 0x97,0x36, 0x00,0x04, 0xF7,0x04,
+0x35,0x3C, 0xF6,0x84, 0x35,0x28, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x35,0x3C, 0xF7,0x04,
+0x35,0x3C, 0x87,0x36, 0x14,0x1C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x14,0x1C, 0xF7,0x04, 0x76,0x04, 0x86,0xB6, 0x14,0x1C, 0xF6,0x04, 0x75,0xFC, 0x07,0x38,
+0x00,0x01, 0xF6,0x84, 0x76,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00,
+0x5C,0x6C, 0xF7,0x05, 0x76,0x04, 0xF0,0x05, 0x76,0x04, 0xF6,0x84, 0x76,0x04, 0xF7,0x04,
+0x76,0x08, 0xF0,0x05, 0x75,0xFC, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0x5C,0xD1, 0xF7,0x05, 0x75,0xF8, 0xF7,0x04, 0x76,0x48, 0xF3,0x06,
+0x72,0x18, 0xC0,0x3A, 0x32,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x5C,0xD1, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0E, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x5C,0xD0, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02,
+0x00,0x00, 0x20,0x2A, 0x00,0x02, 0xEE,0x00, 0x5D,0xB8, 0xF6,0x82, 0x00,0x00, 0xF6,0x84,
+0x35,0x28, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x02, 0xE6,0x00, 0x5D,0x8C, 0x05,0xB4, 0x00,0x08, 0x95,0x93, 0xFF,0xFC, 0x95,0x16,
+0xFF,0xEC, 0x95,0x96, 0xFF,0xE8, 0x96,0x96, 0xFF,0xE4, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x5E,0xDC, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xEC, 0x85,0x96, 0xFF,0xE8, 0x86,0x96,
+0xFF,0xE4, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x5D,0x7C, 0xF7,0x02, 0x00,0x00, 0x86,0x36,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x0F, 0xE2,0x00, 0x5D,0x61, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36,
+0x00,0x14, 0x87,0x36, 0x00,0x14, 0xE0,0x00, 0x5D,0x7C, 0xF7,0x02, 0x00,0x00, 0x76,0xB1,
+0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0x77,0x35, 0x00,0x05, 0xC7,0x38, 0x6A,0x00, 0xC7,0x38,
+0x60,0x00, 0x07,0x38, 0x00,0x10, 0xC7,0x2C, 0x70,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x5B,0xF8, 0xF7,0x05, 0x35,0x2C, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x4C, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x5D,0xAC, 0x07,0x34, 0x14,0x94, 0xF3,0x04,
+0x6F,0x44, 0xE0,0x00, 0x5D,0xB0, 0xF3,0x05, 0x35,0x28, 0xF7,0x05, 0x35,0x28, 0xE0,0x00,
+0x5C,0xD4, 0x05,0x28, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x5D,0xF1, 0xF3,0x02,
+0x00,0x01, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0D, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x5D,0xF4, 0xB3,0x3A, 0x68,0x02, 0xE0,0x00, 0x5D,0xF4, 0xF0,0x05, 0x2D,0x38, 0xF3,0x05,
+0x35,0x24, 0xF6,0x84, 0x35,0x28, 0xF7,0x04, 0x6F,0x44, 0xF3,0x06, 0x32,0xF4, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x5E,0x3C, 0xF3,0x05, 0x35,0x30, 0xF7,0x04, 0xE0,0x18, 0x00,0x00,
+0x00,0x01, 0x77,0xB8, 0x00,0x1F, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00, 0x5E,0x3D, 0x00,0x00,
+0x00,0x01, 0x0F,0x81, 0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x35,0x30, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x32,0xF4, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x35,0x30, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x33,0x80, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x35,0x30, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x34,0x0C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x35,0x30, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x34,0x98, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x86,0x16, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x32, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x0F, 0x86,0xB2, 0x00,0x00, 0xC5,0x38, 0x00,0x00, 0xEE,0x00,
+0x5F,0x2C, 0xC5,0xB4, 0x00,0x00, 0x20,0x36, 0x00,0x0F, 0xEE,0x00, 0x5F,0x2C, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEC,0x00, 0x5F,0x2D, 0x00,0x00, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xEC,0x00, 0x5F,0x48, 0x00,0x00, 0x00,0x01, 0x87,0x32, 0x00,0x0C, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x32, 0x00,0x0C, 0x87,0x32, 0x00,0x0C, 0xE0,0x00,
+0x5F,0x50, 0xF4,0x02, 0x00,0x00, 0xC0,0x2A, 0x5A,0x00, 0x44,0x0C, 0x00,0x01, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF6,0x86,
+0x35,0x60, 0x96,0x93, 0xFF,0xFC, 0xF6,0x86, 0x42,0x30, 0x96,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82, 0x66,0xF8, 0x96,0x93,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x17, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86, 0x35,0x60, 0x96,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82,
+0x69,0x80, 0x96,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x18, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86,
+0x35,0x60, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x82, 0x6B,0x50, 0x96,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x16, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x86, 0x35,0x60, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82, 0x61,0x78, 0x96,0x93, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x1F, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86, 0x35,0x60, 0x96,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82, 0x62,0x7C, 0x96,0x93,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x20, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86, 0x35,0x60, 0x96,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82,
+0x66,0xF8, 0x96,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x17, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86,
+0x35,0xEC, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x82, 0x69,0x80, 0x96,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x18, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x86, 0x35,0xEC, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82, 0x6B,0x50, 0x96,0x93, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x16, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86, 0x35,0xEC, 0x96,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82, 0x61,0x78, 0x96,0x93,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x1F, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86, 0x35,0xEC, 0x96,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF6,0x82,
+0x62,0x7C, 0x96,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x20, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86,
+0x35,0xEC, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x04, 0xE0,0x28, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x61,0x15, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0xE0,0x28, 0xE0,0x00, 0x61,0x18, 0x77,0x39,
+0x00,0x02, 0xF7,0x02, 0x00,0xF0, 0xF7,0x05, 0x42,0x28, 0xF7,0x06, 0x40,0x8A, 0xF0,0x3B,
+0x28,0x00, 0xF7,0x06, 0x40,0x8C, 0xF0,0x3B, 0x28,0x00, 0xF7,0x02, 0x00,0x00, 0xF7,0x05,
+0x7A,0xC0, 0xF7,0x05, 0x7A,0xB8, 0xF7,0x05, 0x7A,0xB0, 0xF7,0x05, 0x7A,0xC8, 0xF6,0x82,
+0xC3,0x50, 0x96,0x93, 0xFF,0xFC, 0xF6,0x82, 0x00,0x16, 0x96,0x93, 0xFF,0xFC, 0xF6,0x86,
+0x42,0x30, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF6,0x04,
+0x6F,0x34, 0x00,0x00, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x61,0xED, 0x76,0xB1,
+0x00,0x1E, 0x87,0x32, 0x00,0x00, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x20,0x3A, 0x00,0x07, 0xE6,0x00, 0x61,0xEC, 0x06,0xB0, 0x00,0x02, 0x87,0x36,
+0x00,0x00, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x20,0x3A, 0x00,0x01, 0xE6,0x00, 0x61,0xEC, 0xF5,0x06, 0x35,0xEC, 0xF7,0x04,
+0x42,0x30, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x52,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x62,0x11, 0xF5,0x82, 0x00,0x00, 0xF7,0x04, 0x42,0xA0, 0xF6,0x06,
+0x42,0xA2, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xE0,0x00, 0x62,0x68, 0xF7,0x33, 0x28,0x00, 0x87,0x32,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0xF7,0x05, 0xE0,0x00, 0x86,0xB2, 0x00,0x08, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x62,0x3C, 0xF6,0x85, 0xE0,0x04, 0x20,0x36, 0x00,0x00, 0xE6,0x00,
+0x62,0x40, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00,
+0x62,0x65, 0xF6,0x06, 0x42,0xA2, 0xF7,0x04, 0x42,0xA0, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0xF0,0x05, 0x42,0x28, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF7,0x04, 0x42,0x3C, 0xF6,0x84, 0x6F,0x34, 0x07,0x38, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xE6,0x00, 0x62,0xB1, 0xF7,0x05, 0x42,0x3C, 0x87,0x36, 0x00,0x00, 0xF5,0x9E,
+0x00,0x02, 0xC0,0x3A, 0x5A,0x00, 0xE6,0x00, 0x62,0xBD, 0xF5,0x86, 0x35,0xEC, 0xF7,0x04,
+0x42,0xA0, 0xE0,0x00, 0x62,0xDC, 0xF6,0x06, 0x42,0xA2, 0xF7,0x04, 0x42,0x30, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x5A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x62,0xF9, 0xF6,0x06, 0x42,0xA4, 0xF7,0x04, 0x42,0xA4, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xE0,0x00,
+0x63,0x0C, 0xF7,0x33, 0x28,0x00, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x63,0x20, 0x97,0x93, 0xFF,0xFC, 0xF0,0x05, 0x42,0x28, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x20, 0x83,0x16, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x87,0x1A, 0x00,0x18, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x63,0x6C, 0xF7,0x02, 0x00,0x00, 0x83,0x9A, 0x00,0x1C, 0x00,0x00, 0x00,0x01, 0xF3,0x85,
+0x7A,0xC0, 0x84,0x9A, 0x00,0x14, 0xF7,0x05, 0x7A,0xC8, 0xF4,0x85, 0x7A,0xB0, 0xF7,0x05,
+0x7A,0xB8, 0x83,0x16, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x86,0x9A, 0x00,0x14, 0xF7,0x04,
+0x7A,0xB0, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x63,0xD0, 0xF6,0x02,
+0x00,0x00, 0x86,0x9A, 0x00,0x1C, 0xF7,0x04, 0x7A,0xC0, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x63,0xD0, 0x00,0x00, 0x00,0x01, 0x86,0x9A, 0x00,0x18, 0xF7,0x04,
+0x7A,0xB8, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x63,0xD0, 0x00,0x00,
+0x00,0x01, 0x86,0x9A, 0x00,0x20, 0xF7,0x04, 0x7A,0xC8, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x20,0x3A, 0x00,0x64, 0xEE,0x00, 0x63,0xD9, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x64,0x58, 0x00,0x00, 0x00,0x01, 0x83,0x96,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x1E, 0x00,0x18, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x64,0x3C, 0xF7,0x02, 0x00,0x00, 0xF7,0x05, 0x40,0x80, 0xF7,0x05,
+0x40,0x84, 0xF6,0x84, 0x6E,0x50, 0xF4,0x82, 0xFF,0xFF, 0x83,0x1E, 0x00,0x0C, 0xF4,0x85,
+0x4F,0x54, 0x93,0x36, 0x00,0x10, 0x83,0x9E, 0x00,0x10, 0x84,0x96, 0x00,0x00, 0x93,0xB6,
+0x00,0x14, 0x84,0xA6, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0x94,0xB6, 0x1D,0xDC, 0xF6,0x82,
+0x00,0x64, 0xF6,0x85, 0x4A,0x98, 0xF7,0x05, 0x4A,0x9C, 0x83,0x16, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x87,0x1A, 0x00,0x20, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEE,0x00,
+0x64,0x7C, 0xF3,0x82, 0x00,0x00, 0xF7,0x04, 0x42,0xA4, 0xF6,0x06, 0x42,0xA6, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0x66,0xE4, 0xF7,0x33, 0x28,0x00, 0x93,0x96, 0xFF,0xF4, 0x84,0x16,
+0x00,0x00, 0xF4,0x86, 0x42,0xC8, 0x94,0x96, 0xFF,0xEC, 0xF3,0x02, 0x00,0x0C, 0x93,0x16,
+0xFF,0xE4, 0x83,0x96, 0x00,0x00, 0x84,0x96, 0xFF,0xF4, 0x87,0x1E, 0x00,0x20, 0x00,0x00,
+0x00,0x01, 0xC0,0x26, 0x72,0x00, 0xEC,0x00, 0x66,0x48, 0xF3,0x86, 0x4A,0x98, 0x84,0xA2,
+0x00,0x24, 0x83,0x16, 0xFF,0xE4, 0xC5,0x04, 0x00,0x00, 0xB4,0x9A, 0x38,0x02, 0xC7,0x18,
+0x38,0x00, 0x83,0x22, 0x00,0x28, 0x83,0x96, 0xFF,0xF4, 0x84,0x96, 0xFF,0xE4, 0x93,0x3A,
+0x00,0x04, 0x93,0xBA, 0x00,0x08, 0xF6,0x04, 0xE0,0x00, 0xF3,0x06, 0x4A,0x98, 0xA6,0xA6,
+0x30,0x02, 0xF5,0x82, 0x00,0x00, 0xC0,0x32, 0x6A,0x00, 0xE6,0x00, 0x65,0x10, 0xC6,0x38,
+0x00,0x00, 0xF6,0x84, 0xE0,0x04, 0x87,0x32, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x65,0x14, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0x65,0x21, 0x00,0x00, 0x00,0x01, 0xF5,0x02, 0x00,0x00, 0xF6,0x84,
+0xE0,0x00, 0x87,0x32, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0x65,0x5C, 0xF5,0x82, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x65,0x64, 0x20,0x2E,
+0x00,0x00, 0xF6,0x84, 0xE0,0x04, 0x87,0x32, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0x65,0x65, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0x65,0x75, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A,
+0x00,0x00, 0xE6,0x00, 0x65,0x88, 0x00,0x00, 0x00,0x01, 0x83,0x96, 0xFF,0xF4, 0x00,0x00,
+0x00,0x01, 0xF3,0x85, 0x4F,0x54, 0x87,0x22, 0x00,0x2C, 0x76,0xA1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x05,0xA0, 0x00,0x2E, 0x76,0x2D, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xF4,0x82,
+0x00,0x00, 0x94,0x96, 0xFF,0xDC, 0x83,0x16, 0xFF,0xEC, 0x20,0x26, 0x00,0x07, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x87,0x2E, 0x00,0x00, 0x06,0x98,
+0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xE2,0x00, 0x66,0x1C, 0xF7,0x37,
+0x28,0x00, 0x85,0x16, 0xFF,0xEC, 0x85,0x96, 0xFF,0xDC, 0x00,0x00, 0x00,0x01, 0xC7,0x2C,
+0x40,0x00, 0x86,0xBA, 0x00,0x30, 0x06,0x28, 0x00,0x04, 0x05,0x28, 0x00,0x02, 0x05,0xAC,
+0x00,0x02, 0x83,0x96, 0xFF,0xDC, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0x03,0x9C,
+0x00,0x01, 0x93,0x96, 0xFF,0xDC, 0x20,0x1E, 0x00,0x07, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB4,
+0xFF,0xF0, 0xE2,0x00, 0x65,0xE1, 0xF6,0xB3, 0x28,0x00, 0x04,0x20, 0x00,0x1C, 0x84,0x96,
+0xFF,0xEC, 0x83,0x16, 0xFF,0xE4, 0x83,0x96, 0xFF,0xF4, 0x04,0xA4, 0x00,0x14, 0x94,0x96,
+0xFF,0xEC, 0x03,0x18, 0x00,0x0C, 0x93,0x16, 0xFF,0xE4, 0x03,0x9C, 0x00,0x01, 0xE0,0x00,
+0x64,0x94, 0x93,0x96, 0xFF,0xF4, 0x84,0x96, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x26,
+0x00,0x20, 0x00,0x00, 0x00,0x01, 0xF7,0x05, 0x4A,0x9C, 0x85,0xA6, 0x00,0x20, 0xF7,0x04,
+0x7A,0xB8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x7A,0xB8, 0xF7,0x04,
+0x7A,0xB8, 0xF6,0x84, 0x7A,0xC8, 0x86,0x26, 0x00,0x18, 0xC6,0xB4, 0x58,0x00, 0x87,0x26,
+0x00,0x1C, 0x00,0x00, 0x00,0x01, 0x27,0x38, 0x00,0x01, 0xC0,0x32, 0x72,0x00, 0x47,0x0C,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x66,0xE5, 0xF6,0x85, 0x7A,0xC8, 0x83,0x26,
+0x00,0x08, 0xF7,0x04, 0x6E,0x50, 0xF3,0x05, 0x3B,0x64, 0x83,0xA6, 0x00,0x08, 0xF6,0x82,
+0x00,0x00, 0x93,0xBA, 0x1D,0xDC, 0x84,0xA6, 0x00,0x0C, 0x83,0x16, 0x00,0x00, 0x94,0xBA,
+0x00,0x10, 0x83,0x1A, 0x00,0x10, 0xF6,0x85, 0x7A,0xC8, 0x93,0x3A, 0x00,0x14, 0xF7,0x02,
+0x00,0x01, 0xF7,0x05, 0x40,0x84, 0xF6,0x85, 0x7A,0xC0, 0xF6,0x85, 0x7A,0xB8, 0xF6,0x85,
+0x7A,0xB0, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x08, 0xF3,0x84, 0x6F,0x34, 0x00,0x00, 0x00,0x01, 0x87,0x1E, 0x00,0x18, 0xF6,0x84,
+0xE0,0x1C, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x00, 0x67,0x29, 0xF7,0x02,
+0x00,0x01, 0xF7,0x02, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x67,0xE8, 0xF5,0x82,
+0x00,0x01, 0xF7,0x04, 0xE0,0x1C, 0x86,0x9E, 0x00,0x18, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x67,0xE9, 0xC5,0x84,
+0x00,0x00, 0x86,0x9E, 0x00,0x10, 0xF7,0x04, 0xE0,0x00, 0xF6,0x02, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x67,0x88, 0x05,0x1C, 0x00,0x10, 0x86,0x9E, 0x00,0x14, 0xF7,0x04,
+0xE0,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x67,0x8C, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x67,0x99, 0x00,0x00,
+0x00,0x01, 0xF5,0x82, 0x00,0x00, 0x86,0xAA, 0x00,0x00, 0xF7,0x04, 0xE0,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x67,0xD4, 0xF6,0x02, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x67,0xDC, 0x20,0x32, 0x00,0x00, 0x86,0xAA, 0x00,0x04, 0xF7,0x04,
+0xE0,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x67,0xDD, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x67,0xED, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x68,0x10, 0xF6,0x06,
+0x42,0x9C, 0xF7,0x04, 0x42,0x9C, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33, 0x28,0x00, 0xF7,0x04,
+0x75,0xF4, 0x75,0xAC, 0xFF,0xE1, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x68,0x45, 0x95,0x96,
+0xFF,0xF4, 0xF7,0x04, 0x42,0x98, 0xF6,0x06, 0x42,0x98, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0x87,0x1E, 0x00,0x20, 0x04,0x1C, 0x00,0x20, 0x76,0xA1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x20,0x3A, 0x00,0x08, 0xEE,0x00,
+0x68,0xC4, 0xF3,0x06, 0x15,0x54, 0xF5,0x02, 0x00,0x00, 0x05,0x9C, 0x00,0x22, 0xC4,0xAC,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x87,0x22, 0x00,0x00, 0x76,0xA1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC0,0x2A, 0x72,0x00, 0xEC,0x00,
+0x68,0xC0, 0xC6,0xA4, 0x60,0x00, 0xA7,0x26, 0x60,0x02, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x05,0x28, 0x00,0x01, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xE8, 0xF7,0x2F,
+0x68,0x00, 0x05,0xAC, 0x00,0x01, 0xE0,0x00, 0x68,0x78, 0x06,0x30, 0x00,0x02, 0xF3,0x06,
+0x15,0x54, 0x93,0x13, 0xFF,0xFC, 0xF7,0x04, 0xE0,0x24, 0x00,0x00, 0x00,0x01, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x04, 0xE0,0x1C, 0x00,0x00, 0x00,0x01, 0x97,0x13, 0xFF,0xFC, 0xF3,0x06,
+0xE0,0x00, 0x93,0x13, 0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0xF3,0x02, 0x00,0x01, 0x93,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xEE,0x64, 0x97,0x93, 0xFF,0xFC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x69,0x28, 0xF6,0x06, 0x42,0x9E, 0xF7,0x04, 0x42,0x9C, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x83,0x16, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x20,0x1A,
+0x00,0x00, 0xE6,0x00, 0x69,0x6C, 0xF3,0x06, 0x35,0xEC, 0xF7,0x04, 0x42,0x30, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x32,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x69,0x6D, 0xF0,0x05, 0x42,0x28, 0xF3,0x06, 0x35,0x60, 0xF3,0x05, 0x42,0x30, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x04, 0xF5,0x04, 0x6F,0x34, 0xF7,0x04,
+0x42,0x40, 0x86,0x2A, 0x00,0x18, 0x07,0x38, 0x00,0x01, 0xF6,0x84, 0xE0,0x1C, 0xF7,0x05,
+0x42,0x40, 0xC0,0x36, 0x62,0x00, 0xEC,0x00, 0x69,0xB5, 0xF7,0x02, 0x00,0x01, 0xF7,0x02,
+0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x6A,0x80, 0xF7,0x02, 0x00,0x01, 0xF7,0x04,
+0xE0,0x1C, 0x86,0xAA, 0x00,0x18, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x6A,0x7D, 0xC5,0x84, 0x00,0x00, 0x86,0xAA,
+0x00,0x10, 0xF7,0x04, 0xE0,0x00, 0xF6,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x6A,0x14, 0x04,0xA8, 0x00,0x10, 0x86,0xAA, 0x00,0x14, 0xF7,0x04, 0xE0,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x6A,0x18, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x6A,0x25, 0x00,0x00, 0x00,0x01, 0xF5,0x82,
+0x00,0x00, 0x86,0xA6, 0x00,0x00, 0xF7,0x04, 0xE0,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0x6A,0x60, 0xF6,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x6A,0x68, 0x20,0x32, 0x00,0x00, 0x86,0xA6, 0x00,0x04, 0xF7,0x04, 0xE0,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x6A,0x69, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x6A,0x81, 0xC7,0x2C, 0x00,0x00, 0xF5,0x82,
+0x00,0x01, 0xE0,0x00, 0x6A,0x80, 0xC7,0x2C, 0x00,0x00, 0xC7,0x04, 0x00,0x00, 0x20,0x3A,
+0x00,0x00, 0xEE,0x00, 0x6B,0x3D, 0xF6,0x86, 0x40,0x8A, 0xF7,0x04, 0x40,0x88, 0x76,0xB5,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x6B,0x3C, 0xF6,0x82, 0x00,0x00, 0xF6,0x85, 0x40,0x80, 0xF6,0x85,
+0x40,0x84, 0x96,0x93, 0xFF,0xFC, 0x96,0x93, 0xFF,0xFC, 0xF7,0x04, 0xE0,0x1C, 0x00,0x00,
+0x00,0x01, 0x97,0x13, 0xFF,0xFC, 0xF3,0x86, 0xE0,0x00, 0x93,0x93, 0xFF,0xFC, 0x95,0x13,
+0xFF,0xFC, 0xF3,0x82, 0x00,0x02, 0x93,0x93, 0xFF,0xFC, 0x96,0x96, 0xFF,0xF4, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xEE,0x64, 0x97,0x93, 0xFF,0xFC, 0xF4,0x05, 0x40,0x84, 0x86,0x96,
+0xFF,0xF4, 0xF7,0x04, 0x6E,0x50, 0xF3,0x86, 0x35,0xEC, 0xF6,0x85, 0x40,0x90, 0xF6,0x85,
+0x40,0x94, 0x87,0x3A, 0x1D,0xDC, 0xF6,0x85, 0x42,0x28, 0xF7,0x05, 0x3B,0x64, 0xF7,0x04,
+0x42,0x30, 0xF4,0x05, 0x40,0x80, 0xC0,0x3A, 0x3A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x6B,0x3D, 0xF3,0x86, 0x35,0x60, 0xF3,0x85, 0x42,0x30, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF4,0x86, 0x42,0x30, 0x94,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x6D,0xD9, 0xF5,0x82, 0x00,0x00, 0xF7,0x04, 0x40,0x8C, 0xF6,0x06, 0x40,0x8C, 0x76,0x31,
+0x00,0x1E, 0xF6,0x84, 0x42,0x28, 0x76,0x30, 0xFF,0xE5, 0x06,0xB4, 0x00,0x01, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x6B,0xC8, 0xF6,0x85,
+0x42,0x28, 0xF7,0x04, 0x40,0x88, 0xF6,0x86, 0x40,0x8A, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x6D,0x0D, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x40,0x8C, 0xF6,0x86, 0x40,0x8C, 0x76,0xB5,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x6C,0x35, 0xF6,0x06, 0x40,0x8A, 0xF7,0x04, 0x40,0x88, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x6C,0x34, 0xF4,0x86, 0x36,0x78, 0xF7,0x04, 0x42,0x44, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x4A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x6C,0x35, 0xF4,0x82, 0x00,0x01, 0xF4,0xB3, 0x28,0x00, 0xE0,0x00, 0x6D,0x10, 0xF0,0x05,
+0x42,0x2C, 0xF7,0x04, 0x40,0x8C, 0xF5,0x06, 0x40,0x8C, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x6C,0xC1, 0xF6,0x06, 0x40,0x8A, 0xF7,0x04, 0x40,0x88, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x6C,0xC1, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x2C, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0x20,0x3A, 0x00,0x09, 0xEE,0x00, 0x6D,0x11, 0xF7,0x05, 0x42,0x2C, 0xF0,0x2B,
+0x28,0x00, 0xF0,0x33, 0x28,0x00, 0xF5,0x82, 0x00,0x01, 0xF7,0x04, 0x42,0x94, 0xF6,0x06,
+0x42,0x94, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xE0,0x00, 0x6D,0x10, 0xF7,0x33, 0x28,0x00, 0xF7,0x04,
+0x40,0x8C, 0xF6,0x86, 0x40,0x8C, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x6D,0x14, 0x20,0x2E,
+0x00,0x00, 0xF7,0x04, 0x40,0x88, 0xF6,0x06, 0x40,0x8A, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x6D,0x15, 0x20,0x2E, 0x00,0x00, 0xF0,0x33, 0x28,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0x6D,0xB5, 0xF4,0x86, 0x35,0xEC, 0xF7,0x04, 0x42,0x30, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x4A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x6D,0x59, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0xE0,0x28, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x6D,0x79, 0xF6,0x82, 0x00,0x3C, 0xF6,0x84, 0xE0,0x28, 0xE0,0x00,
+0x6D,0x78, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0xE0,0x28, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x6D,0x79, 0xF6,0x82, 0x00,0xF0, 0xF7,0x04, 0xE0,0x28, 0x00,0x00,
+0x00,0x01, 0x76,0xB9, 0x00,0x02, 0xF7,0x04, 0x42,0x28, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x6A,0x00, 0xEC,0x00, 0x6D,0xB5, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0xF0,0x05,
+0x42,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82,
+0x00,0x19, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x6D,0xB4, 0xB4,0xBA, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF4,0x82, 0xC3,0x50, 0x94,0x93, 0xFF,0xFC, 0xF4,0x82, 0x00,0x16, 0x94,0x93,
+0xFF,0xFC, 0xF4,0x86, 0x42,0x30, 0x94,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x1E,0xC0, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x04, 0xF5,0x86, 0x36,0x78, 0x95,0x93, 0xFF,0xFC, 0xF5,0x86,
+0x42,0x44, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x82, 0x74,0x18, 0x95,0x93, 0xFF,0xFC, 0xF5,0x82, 0x00,0x19, 0x95,0x93,
+0xFF,0xFC, 0xF5,0x86, 0x36,0x78, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0x74,0xAC, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x1D, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x37,0x04, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0x78,0x00, 0x95,0x93,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x1B, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x37,0x04, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82,
+0x78,0xFC, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1A, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86,
+0x37,0x90, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x82, 0x80,0xD8, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1B, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x86, 0x37,0x90, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0x81,0x74, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x1D, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x38,0x1C, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0x87,0x74, 0x95,0x93,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x1B, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x38,0x1C, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82,
+0x94,0xF8, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1B, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86,
+0x39,0x34, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x82, 0x8A,0x00, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x86, 0x39,0x34, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0x8E,0x08, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x1A, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x39,0x34, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0x96,0x9C, 0x95,0x93,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x1E, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x38,0xA8, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82,
+0x9B,0x2C, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1B, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86,
+0x38,0xA8, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x82, 0xA2,0xDC, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1E, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x86, 0x3A,0xD8, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0x9E,0x54, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x1B, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x3A,0xD8, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0xA3,0xC0, 0x95,0x93,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x39,0xC0, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82,
+0xA7,0x64, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1E, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86,
+0x39,0xC0, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x82, 0xAA,0x04, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82, 0x00,0x1B, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x86, 0x39,0xC0, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x82, 0xAE,0xF8, 0x95,0x93, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x3A,0x4C, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x42,0x50, 0xF0,0x3B,
+0x28,0x00, 0xF7,0x06, 0x40,0x88, 0xF0,0x3B, 0x28,0x00, 0xF6,0x02, 0x00,0x00, 0xF6,0x05,
+0x40,0x80, 0xF6,0x05, 0x40,0x84, 0xF7,0x06, 0x3B,0x70, 0xF6,0x3B, 0x28,0x00, 0xF7,0x06,
+0x3B,0x72, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82, 0xCA,0x20, 0xF5,0x85, 0x3B,0x74, 0xF7,0x06,
+0x3B,0x78, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06, 0x3B,0x7A, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82,
+0xB1,0x94, 0xF5,0x85, 0x3B,0x7C, 0xF7,0x06, 0x3B,0x80, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06,
+0x3B,0x82, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82, 0xC7,0x54, 0xF5,0x85, 0x3B,0x84, 0xF7,0x06,
+0x3B,0x88, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06, 0x3B,0x8A, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82,
+0xBE,0xF8, 0xF5,0x85, 0x3B,0x8C, 0xF7,0x06, 0x3B,0x90, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06,
+0x3B,0x92, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82, 0xC8,0xF8, 0xF5,0x85, 0x3B,0x94, 0xF7,0x06,
+0x3B,0x98, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06, 0x3B,0x9A, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82,
+0xC5,0xD8, 0xF5,0x85, 0x3B,0x9C, 0xF7,0x06, 0x3B,0xA0, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06,
+0x3B,0xA2, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82, 0xC7,0x70, 0xF5,0x85, 0x3B,0xA4, 0xF7,0x06,
+0x3B,0xA8, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06, 0x3B,0xAA, 0xF0,0xBB, 0x28,0x00, 0xF5,0x82,
+0xC1,0xB4, 0xF5,0x85, 0x3B,0xAC, 0x96,0x16, 0xFF,0xF4, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD5,0x40, 0x97,0x93, 0xFF,0xFC, 0xF6,0x84, 0x6E,0x50, 0x86,0x16, 0xFF,0xF4, 0x00,0x00,
+0x00,0x01, 0x96,0x36, 0x1D,0xDC, 0xF6,0x05, 0x3B,0x64, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x30, 0x25,0x94, 0x00,0x20, 0xF0,0x2F,
+0x28,0x00, 0x26,0x14, 0x00,0x38, 0xF0,0x33, 0x28,0x00, 0x90,0x13, 0xFF,0xFC, 0xF7,0x04,
+0x42,0x50, 0xF6,0x86, 0x42,0x50, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x97,0x13, 0xFF,0xFC, 0x96,0x13, 0xFF,0xFC, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xF5,0xF4, 0x97,0x93, 0xFF,0xFC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x72,0x1D, 0xF5,0x02, 0x17,0x70, 0xF7,0x04, 0x42,0x54, 0x00,0x00,
+0x00,0x01, 0x27,0x38, 0x00,0x01, 0xF7,0x05, 0x42,0x54, 0x95,0x13, 0xFF,0xFC, 0xF5,0x02,
+0x00,0x1B, 0x95,0x13, 0xFF,0xFC, 0xF5,0x06, 0x42,0x44, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04, 0xE0,0x04, 0x86,0x16, 0x00,0x00, 0xF6,0x82,
+0x00,0xFF, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38, 0x6C,0x00, 0xF7,0x33, 0x28,0x00, 0xF7,0x06,
+0xE0,0x06, 0x87,0x3A, 0x00,0x00, 0x06,0xB0, 0x00,0x02, 0xF7,0x37, 0x28,0x00, 0xF6,0x84,
+0x3B,0x64, 0x07,0x30, 0x00,0x04, 0xF6,0xBB, 0x28,0x00, 0x87,0x02, 0xFF,0x34, 0x06,0x30,
+0x00,0x06, 0xF7,0x33, 0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x30, 0x26,0x14, 0x00,0x20, 0xF0,0x33, 0x28,0x00, 0x27,0x14,
+0x00,0x38, 0xF0,0x3B, 0x28,0x00, 0x97,0x13, 0xFF,0xFC, 0x90,0x93, 0xFF,0xFC, 0xF7,0x04,
+0x42,0x50, 0xF6,0x86, 0x42,0x50, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x97,0x13, 0xFF,0xFC, 0x96,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xF3,0x38, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x73,0x19, 0xF5,0x82, 0x17,0x70, 0xF7,0x04, 0x42,0x54, 0x00,0x00, 0x00,0x01, 0x27,0x38,
+0x00,0x01, 0xF7,0x05, 0x42,0x54, 0x95,0x93, 0xFF,0xFC, 0xF5,0x82, 0x00,0x1B, 0x95,0x93,
+0xFF,0xFC, 0xF5,0x86, 0x42,0x44, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x1E,0xC0, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x40, 0x26,0x14, 0x00,0x20, 0x96,0x16, 0xFF,0xC4, 0xF0,0x33,
+0x28,0x00, 0x90,0x13, 0xFF,0xFC, 0x96,0x13, 0xFF,0xFC, 0x26,0x14, 0x00,0x38, 0x96,0x16,
+0xFF,0xBC, 0x96,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93,
+0xFF,0xFC, 0x90,0x13, 0xFF,0xFC, 0xF7,0x04, 0x42,0x50, 0xF6,0x86, 0x42,0x50, 0x76,0xB5,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x97,0x13,
+0xFF,0xFC, 0x86,0x16, 0xFF,0xBC, 0x00,0x00, 0x00,0x01, 0x96,0x13, 0xFF,0xFC, 0x86,0x16,
+0xFF,0xC4, 0x00,0x00, 0x00,0x01, 0x96,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xF5,0xF4, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x73,0xE5, 0xF6,0x02,
+0x17,0x70, 0xF7,0x04, 0x42,0x54, 0x00,0x00, 0x00,0x01, 0x27,0x38, 0x00,0x01, 0xF7,0x05,
+0x42,0x54, 0x96,0x13, 0xFF,0xFC, 0xF6,0x02, 0x00,0x1B, 0x96,0x13, 0xFF,0xFC, 0xF6,0x06,
+0x42,0x44, 0x96,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x04, 0xF5,0x82, 0x00,0x00, 0xF5,0x85, 0x40,0x80, 0x95,0x96, 0xFF,0xF4, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCB,0x50, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xF4, 0xF5,0x02,
+0x00,0x64, 0xF5,0x05, 0x3B,0xB4, 0xF7,0x04, 0x42,0x50, 0xF4,0x86, 0x42,0x50, 0x76,0xA5,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF6,0x04, 0x4F,0x5C, 0xF4,0x02, 0x00,0x06, 0xF4,0x05,
+0x42,0x54, 0xF5,0x85, 0x3B,0x6C, 0xF5,0x85, 0x3B,0xB8, 0x95,0x32, 0x00,0x00, 0x95,0xB2,
+0x00,0x04, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x27,
+0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x71,0xB0, 0x97,0x93, 0xFF,0xFC, 0xF4,0x06,
+0x37,0x04, 0xF4,0x05, 0x42,0x44, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x50, 0xF7,0x04, 0x42,0x50, 0xF6,0x86, 0x42,0x50, 0x76,0xB5,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF6,0x04, 0x6F,0x34, 0xC7,0x38, 0x6F,0xC0, 0x86,0xB2,
+0x00,0x0C, 0x77,0x39, 0xFF,0xF0, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x77,0xEC, 0xC5,0x04,
+0x00,0x00, 0x86,0xB2, 0x00,0x10, 0xF7,0x04, 0xE0,0x00, 0xF3,0x02, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x75,0x18, 0x04,0xB0, 0x00,0x10, 0x86,0xB2, 0x00,0x14, 0xF7,0x04,
+0xE0,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x75,0x1C, 0x20,0x1A,
+0x00,0x00, 0xF3,0x02, 0x00,0x01, 0x20,0x1A, 0x00,0x00, 0xE6,0x00, 0x75,0x29, 0x00,0x00,
+0x00,0x01, 0xF5,0x02, 0x00,0x00, 0x86,0xA6, 0x00,0x00, 0xF7,0x04, 0xE0,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x75,0x64, 0xF6,0x02, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x75,0x6C, 0x20,0x32, 0x00,0x00, 0x86,0xA6, 0x00,0x04, 0xF7,0x04,
+0xE0,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x75,0x6D, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x75,0x7D, 0x20,0x2A,
+0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0x77,0xEC, 0x00,0x00,
+0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0x26,0x14,
+0x00,0x20, 0xF0,0x33, 0x28,0x00, 0x04,0xA0, 0x00,0x02, 0xF0,0x27, 0x28,0x00, 0xF5,0x82,
+0x00,0x00, 0x23,0x94, 0x00,0x22, 0xF5,0x9F, 0x28,0x00, 0x03,0xA0, 0x00,0x1A, 0x93,0x96,
+0xFF,0xD4, 0x25,0x94, 0x00,0x22, 0x85,0xAE, 0x00,0x00, 0x77,0xAD, 0x00,0x1E, 0x77,0xBC,
+0xFF,0xE5, 0xC5,0xAC, 0x7F,0xC0, 0x75,0xAD, 0xFF,0xF0, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x06,0xA4, 0x00,0x02, 0x23,0x14, 0x00,0x1E, 0x75,0x15, 0x00,0x1E, 0xF5,0x9F,
+0x28,0x00, 0xF3,0x84, 0xE0,0x00, 0x75,0x28, 0xFF,0xE5, 0x93,0xA2, 0x00,0x1C, 0xF5,0x84,
+0xE0,0x04, 0x73,0x99, 0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96, 0xFF,0xAC, 0x73,0x95,
+0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96, 0xFF,0xCC, 0x23,0x94, 0x00,0x42, 0x95,0xA2,
+0x00,0x20, 0x87,0x16, 0xFF,0xE0, 0x75,0x95, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x95,0x96,
+0xFF,0xB4, 0x75,0x95, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x95,0x96, 0xFF,0xC4, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0xF4,0x84, 0x4F,0x58, 0x87,0x1A,
+0x00,0x00, 0xC4,0xA0, 0x4A,0x00, 0x74,0xA4, 0xFF,0xFA, 0xC5,0xA4, 0x00,0x00, 0xF5,0x9F,
+0x28,0x00, 0x83,0x96, 0xFF,0xAC, 0x23,0x14, 0x00,0x1A, 0x76,0x19, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x85,0x96, 0xFF,0xB4, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xE4, 0x83,0x96, 0xFF,0xCC, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xE8, 0x23,0x14, 0x00,0x16, 0x76,0x19, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xEC, 0x23,0x14, 0x00,0x12, 0x76,0x19,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x85,0x96, 0xFF,0xC4, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x16, 0xFF,0xF0, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0xF3,0x82,
+0x00,0x02, 0xF3,0xA3, 0x28,0x00, 0x04,0x20, 0x00,0x18, 0x25,0x94, 0x00,0x22, 0x85,0xAE,
+0x00,0x00, 0x77,0xAD, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC5,0xAC, 0x7F,0xC0, 0x75,0xAD,
+0xFF,0xF0, 0x83,0x96, 0xFF,0xD4, 0xF5,0xA3, 0x28,0x00, 0xF4,0x9F, 0x28,0x00, 0x25,0x94,
+0x00,0x42, 0x85,0xAE, 0x00,0x00, 0x77,0xAD, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC5,0xAC,
+0x7F,0xC0, 0x75,0xAD, 0xFF,0xF0, 0x44,0xAD, 0x00,0x00, 0x94,0x93, 0xFF,0xFC, 0xF7,0x86,
+0xE0,0x00, 0x97,0x93, 0xFF,0xFC, 0xF3,0x84, 0x4F,0x5C, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x23,0x40, 0x97,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x78,0xD8, 0x97,0x93, 0xFF,0xFC, 0xF0,0x05, 0x40,0x84, 0xF7,0x86,
+0xE0,0x00, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD5,0xA0, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x04, 0x6E,0x50, 0xF4,0x05, 0x40,0x84, 0x87,0x3A, 0x1D,0xDC, 0x00,0x00,
+0x00,0x01, 0xF7,0x05, 0x3B,0x64, 0xF5,0x86, 0x36,0x78, 0xF5,0x85, 0x42,0x44, 0xF3,0x86,
+0x35,0x60, 0xF3,0x85, 0x42,0x30, 0xF5,0x86, 0x42,0x44, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x1F,0x48, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF5,0x86, 0x42,0x44, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x78,0x89, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xEE,0x00, 0x78,0x51, 0xF6,0x06, 0x42,0x50, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x71,0xB0, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x78,0x88, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x42,0x50, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF5,0x82, 0x00,0x06, 0xF5,0x85,
+0x42,0x54, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x72,0xAC, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86,
+0x37,0x90, 0xF5,0x85, 0x42,0x44, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF6,0x06, 0x36,0x78, 0xF6,0x05, 0x42,0x44, 0xF7,0x02, 0x00,0x00, 0xF7,0x05,
+0x40,0x80, 0xF7,0x05, 0x40,0x94, 0xF6,0x84, 0x6E,0x50, 0xF7,0x05, 0x40,0x90, 0x97,0x36,
+0x1D,0xDC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x02,
+0x00,0x01, 0xF7,0x05, 0x40,0x80, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0xA8, 0xF7,0x04, 0x42,0x50, 0xF5,0x86, 0x42,0x50, 0x76,0xAD,
+0x00,0x1E, 0xF4,0x84, 0x6F,0x34, 0x76,0xB4, 0xFF,0xE5, 0x94,0x96, 0xFF,0xC4, 0xC7,0x38,
+0x6F,0xC0, 0x86,0xA6, 0x00,0x0C, 0x77,0x39, 0xFF,0xF0, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x79,0x55, 0xF6,0x06, 0x42,0x9A, 0xF7,0x04, 0x42,0x98, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0xF7,0x04, 0x42,0x50, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x85,0x16,
+0xFF,0xC4, 0xC7,0x38, 0x6F,0xC0, 0x86,0xAA, 0x00,0x0C, 0x77,0x39, 0xFF,0xF0, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x80,0xA8, 0xF6,0x06, 0x42,0x9A, 0x87,0x2A, 0x00,0x10, 0x86,0x2A,
+0x00,0x1C, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x79,0xA8, 0xF6,0x82, 0x00,0x00, 0x87,0x2A,
+0x00,0x14, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x79,0xAC, 0x20,0x36,
+0x00,0x00, 0xF6,0x82, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0x7A,0x05, 0x24,0x94,
+0x00,0x20, 0x94,0x96, 0xFF,0xBC, 0x85,0x16, 0xFF,0xC4, 0xF0,0x27, 0x28,0x00, 0x05,0x28,
+0x00,0x10, 0x95,0x16, 0xFF,0xB4, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x72,0x50, 0x97,0x93, 0xFF,0xFC, 0x84,0x96, 0xFF,0xB4, 0x00,0x00, 0x00,0x01, 0x94,0x93,
+0xFF,0xFC, 0x85,0x16, 0xFF,0xBC, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xF9,0x34, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x80,0xC4, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x80,0x6C, 0x00,0x00, 0x00,0x01, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0x25,0x94, 0x00,0x20, 0xF0,0x2F,
+0x28,0x00, 0x04,0xA0, 0x00,0x02, 0x94,0x96, 0xFF,0x5C, 0xF0,0x27, 0x28,0x00, 0xF4,0x82,
+0x00,0x00, 0x25,0x14, 0x00,0x5A, 0xF4,0xAB, 0x28,0x00, 0x07,0x20, 0x00,0x1A, 0x25,0x14,
+0x00,0x5A, 0x85,0x2A, 0x00,0x00, 0x77,0xA9, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC5,0x28,
+0x7F,0xC0, 0x75,0x29, 0xFF,0xF0, 0x75,0xAD, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x23,0x14,
+0x00,0x1E, 0x76,0x19, 0x00,0x1E, 0xF5,0x3B, 0x28,0x00, 0xF4,0x84, 0xE0,0x00, 0x76,0x30,
+0xFF,0xE5, 0x94,0xA2, 0x00,0x1C, 0xF5,0x04, 0xE0,0x04, 0x84,0x96, 0xFF,0x5C, 0x95,0x22,
+0x00,0x20, 0x87,0x16, 0xFF,0xE0, 0x06,0xA4, 0x00,0x02, 0x75,0x15, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0x95,0x16, 0xFF,0x54, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96,
+0xFF,0x9C, 0x75,0x15, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x95,0x16, 0xFF,0x94, 0x74,0x95,
+0x00,0x1E, 0x85,0x16, 0xFF,0x5C, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFF,0x8C, 0x84,0x96,
+0xFF,0x54, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x85,0x16, 0xFF,0x9C, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xE4, 0x23,0x14, 0x00,0x1A, 0x76,0x19,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x84,0x96, 0xFF,0x94, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16,
+0xFF,0xE8, 0x23,0x14, 0x00,0x16, 0x76,0x19, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xEC, 0x23,0x14, 0x00,0x12, 0x76,0x19, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x85,0x16, 0xFF,0x8C, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x16, 0xFF,0xF0, 0x06,0xB4, 0x00,0x02, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0xF4,0x82, 0x00,0x02, 0xF4,0xA3,
+0x28,0x00, 0x25,0x14, 0x00,0x5A, 0x85,0x2A, 0x00,0x00, 0x77,0xA9, 0x00,0x1E, 0x77,0xBC,
+0xFF,0xE5, 0xC5,0x28, 0x7F,0xC0, 0x75,0x29, 0xFF,0xF0, 0x07,0x20, 0x00,0x18, 0xF5,0x3B,
+0x28,0x00, 0x94,0x16, 0xFF,0xAC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93,
+0xFF,0xFC, 0x26,0x14, 0x00,0x38, 0x24,0x94, 0x00,0x5A, 0x84,0xA6, 0x00,0x00, 0x77,0xA5,
+0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC4,0xA4, 0x7F,0xC0, 0x74,0xA5, 0xFF,0xF0, 0x05,0xA0,
+0x00,0x02, 0x06,0xAC, 0x00,0x02, 0x23,0x94, 0x00,0x36, 0x75,0x1D, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0x07,0x20, 0x00,0x1A, 0xF4,0xB3, 0x28,0x00, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x95,0x16, 0xFF,0x54, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96,
+0xFF,0x5C, 0x75,0x15, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x95,0x16, 0xFF,0x7C, 0x74,0x95,
+0x00,0x1E, 0x85,0x16, 0xFF,0xC4, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFF,0x74, 0x85,0x2A,
+0x00,0x34, 0x24,0x94, 0x00,0x5A, 0x95,0x16, 0xFF,0x84, 0x84,0xA6, 0x00,0x00, 0x77,0xA5,
+0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC4,0xA4, 0x7F,0xC0, 0x74,0xA5, 0xFF,0xF0, 0x25,0x14,
+0x00,0x5A, 0xF4,0xAF, 0x28,0x00, 0x85,0x2A, 0x00,0x00, 0x77,0xA9, 0x00,0x1E, 0x77,0xBC,
+0xFF,0xE5, 0xC5,0x28, 0x7F,0xC0, 0x75,0x29, 0xFF,0xF0, 0x84,0x96, 0xFF,0xC4, 0xF5,0x3B,
+0x28,0x00, 0x84,0xA6, 0x00,0x10, 0x85,0x16, 0xFF,0xC4, 0x94,0xA2, 0x00,0x1C, 0x85,0x2A,
+0x00,0x14, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFF,0x6C, 0x95,0x22,
+0x00,0x20, 0x87,0x16, 0xFF,0xC8, 0x85,0x16, 0xFF,0x54, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x87,0x1E, 0x00,0x00, 0x84,0x96, 0xFF,0x5C, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16,
+0xFF,0xCC, 0x23,0x94, 0x00,0x32, 0x76,0x1D, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x85,0x16,
+0xFF,0x7C, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1E,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xD0, 0x23,0x94, 0x00,0x2E, 0x76,0x1D,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1E, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x84,0x96, 0xFF,0x74, 0x85,0x16,
+0xFF,0x6C, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xD4, 0x23,0x94, 0x00,0x2A, 0x76,0x1D, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1E,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x16, 0xFF,0xD8, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x57,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0xF4,0x82, 0x00,0x02, 0xF4,0xA3, 0x28,0x00, 0x07,0x20,
+0x00,0x18, 0x25,0x14, 0x00,0x7A, 0x85,0x2A, 0x00,0x00, 0x77,0xA9, 0x00,0x1E, 0x77,0xBC,
+0xFF,0xE5, 0xC5,0x28, 0x7F,0xC0, 0x75,0x29, 0xFF,0xF0, 0x84,0x96, 0xFF,0xC4, 0xF5,0x3B,
+0x28,0x00, 0x87,0x26, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x24, 0xF7,0x04,
+0x4F,0x58, 0xE6,0x00, 0x7E,0xF9, 0x94,0x16, 0xFF,0x54, 0xC7,0x20, 0x72,0x00, 0xF6,0x84,
+0x6E,0x50, 0x86,0x26, 0x00,0x2C, 0x77,0x38, 0xFF,0xFA, 0x25,0x14, 0x00,0x5A, 0x84,0x2A,
+0x00,0x00, 0x77,0xA9, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC4,0x20, 0x7F,0xC0, 0x74,0x21,
+0xFF,0xF0, 0x47,0x39, 0x00,0x00, 0x86,0xB6, 0x1D,0xDC, 0x77,0x39, 0x00,0x02, 0xC0,0x32,
+0x6A,0x00, 0x46,0x8C, 0x00,0x01, 0xD6,0x80, 0x0A,0x68, 0x20,0x36, 0x00,0x00, 0xF6,0x86,
+0x40,0x98, 0xE6,0x00, 0x7E,0xC0, 0xC3,0xB8, 0x68,0x00, 0xC5,0x84, 0x00,0x00, 0x86,0xA6,
+0x00,0x24, 0xF7,0x04, 0xE0,0x00, 0xF6,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x7E,0x54, 0x03,0x24, 0x00,0x24, 0x86,0xA6, 0x00,0x28, 0xF7,0x04, 0xE0,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x7E,0x58, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x7E,0x65, 0x00,0x00, 0x00,0x01, 0xF5,0x82,
+0x00,0x00, 0x86,0x9A, 0x00,0x00, 0xF7,0x04, 0xE0,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0x7E,0xA0, 0xF6,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x7E,0xA8, 0x20,0x32, 0x00,0x00, 0x86,0x9A, 0x00,0x04, 0xF7,0x04, 0xE0,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x7E,0xA9, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x7E,0xB9, 0x20,0x2E, 0x00,0x00, 0xF5,0x82,
+0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x7E,0xC5, 0x00,0x00, 0x00,0x01, 0xF4,0x02,
+0x00,0x01, 0xF7,0x04, 0x4F,0x58, 0xF4,0x1F, 0x28,0x00, 0x84,0x96, 0xFF,0x54, 0x85,0x16,
+0xFF,0xC4, 0xF6,0x86, 0x40,0x9A, 0xC7,0x24, 0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0x86,0x2A,
+0x00,0x30, 0x47,0x39, 0x00,0x00, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0xE0,0x00,
+0x7F,0x4C, 0xF6,0x3B, 0x28,0x00, 0x84,0x96, 0xFF,0x54, 0xF6,0x06, 0x40,0x98, 0xC7,0x24,
+0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0xC6,0xB8, 0x00,0x00, 0x46,0xB5, 0x00,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x60,0x00, 0xF5,0x02, 0x00,0x01, 0xF5,0x37, 0x28,0x00, 0x47,0x39,
+0x00,0x00, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x60,0x00, 0x24,0x94, 0x00,0x5A, 0x84,0xA6,
+0x00,0x00, 0x77,0xA5, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC4,0xA4, 0x7F,0xC0, 0x74,0xA5,
+0xFF,0xF0, 0x07,0x38, 0x00,0x02, 0xF4,0xBB, 0x28,0x00, 0xF7,0x04, 0x4F,0x58, 0x85,0x16,
+0xFF,0x54, 0x84,0x96, 0xFF,0xAC, 0xC6,0xA8, 0x72,0x00, 0x76,0xB4, 0xFF,0xFA, 0x06,0x24,
+0x00,0x1A, 0xF6,0xB3, 0x28,0x00, 0xC7,0x24, 0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0x06,0xA8,
+0x00,0x1A, 0xF7,0x37, 0x28,0x00, 0x47,0x39, 0x00,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x24,
+0x00,0x1C, 0x97,0x13, 0xFF,0xFC, 0xF5,0x04, 0x4F,0x5C, 0x00,0x00, 0x00,0x01, 0x95,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x23,0x40, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04,
+0x4F,0x58, 0x84,0x96, 0xFF,0x54, 0x00,0x00, 0x00,0x01, 0xC7,0x24, 0x72,0x00, 0x77,0x38,
+0xFF,0xFA, 0x47,0x39, 0x00,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x24, 0x00,0x1C, 0x97,0x13,
+0xFF,0xFC, 0xF5,0x04, 0x4F,0x5C, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x23,0x40, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x78,0xD8, 0x97,0x93, 0xFF,0xFC, 0xF6,0x84, 0x6E,0x50, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x1D,0xDC, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x36, 0x1D,0xDC, 0x87,0x36,
+0x1D,0xDC, 0xF0,0x05, 0x40,0x84, 0xF4,0x86, 0xE0,0x00, 0x94,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xD5,0xA0, 0x97,0x93, 0xFF,0xFC, 0xF4,0x05, 0x40,0x84, 0xF7,0x04,
+0x6E,0x50, 0xF0,0x05, 0x42,0x5C, 0x87,0x3A, 0x1D,0xDC, 0xF6,0x86, 0x2C,0x28, 0xF7,0x05,
+0x3B,0x64, 0xF7,0x04, 0x2D,0x38, 0xF5,0x06, 0x3A,0x4C, 0xF5,0x05, 0x42,0x44, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82, 0x00,0x1C, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x80,0x60, 0xB4,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x06,
+0x35,0xEC, 0xE0,0x00, 0x80,0x8C, 0xF5,0x05, 0x42,0x30, 0x20,0x32, 0x00,0x01, 0xE6,0x00,
+0x80,0xC4, 0x00,0x00, 0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93,
+0xFF,0xFC, 0xF4,0x86, 0x35,0x60, 0xF4,0x85, 0x42,0x30, 0xF5,0x06, 0x42,0x44, 0x95,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1F,0x48, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00,
+0x80,0xC4, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x98, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF5,0x86,
+0x42,0x44, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x81,0x61, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x42,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEE,0x00, 0x81,0x29, 0xF6,0x06,
+0x42,0x50, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x72,0xAC, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00,
+0x81,0x60, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x50, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xF5,0x82, 0x00,0x06, 0xF5,0x85, 0x42,0x54, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x73,0x4C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x86, 0x38,0x1C, 0xF5,0x85, 0x42,0x44, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x58, 0xF7,0x04,
+0x42,0x50, 0xF6,0x86, 0x42,0x50, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF6,0x04,
+0x6F,0x34, 0xC7,0x38, 0x6F,0xC0, 0x86,0xB2, 0x00,0x0C, 0x77,0x39, 0xFF,0xF0, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x82,0x50, 0xF4,0x82, 0x00,0x00, 0xC5,0x04, 0x00,0x00, 0x86,0xB2,
+0x00,0x10, 0xF7,0x04, 0xE0,0x00, 0xC5,0xA4, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x81,0xE4, 0x04,0x30, 0x00,0x10, 0x86,0xB2, 0x00,0x14, 0xF7,0x04, 0xE0,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x81,0xE8, 0x20,0x2E, 0x00,0x00, 0xF5,0x82,
+0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x81,0xF5, 0x00,0x00, 0x00,0x01, 0xF5,0x02,
+0x00,0x00, 0x86,0xA2, 0x00,0x00, 0xF7,0x04, 0xE0,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0x82,0x30, 0xF6,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0x82,0x38, 0x20,0x32, 0x00,0x00, 0x86,0xA2, 0x00,0x04, 0xF7,0x04, 0xE0,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x82,0x39, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x82,0x49, 0x20,0x2A, 0x00,0x00, 0xF5,0x02,
+0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0x82,0x59, 0x20,0x26, 0x00,0x00, 0xF4,0x82,
+0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00, 0x87,0x60, 0x00,0x00, 0x00,0x01, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0x07,0x20, 0x00,0x02, 0xF0,0x3B,
+0x28,0x00, 0xF7,0x04, 0x4F,0x58, 0xF4,0x05, 0x3B,0xB0, 0x06,0xA0, 0x00,0x14, 0xC7,0x20,
+0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0xF7,0x37, 0x28,0x00, 0x06,0xA0, 0x00,0x16, 0xF7,0x37,
+0x28,0x00, 0xF3,0x02, 0x00,0x01, 0xF3,0x23, 0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0x26,0x14, 0x00,0x20, 0xF0,0x33, 0x28,0x00, 0x04,0xA0,
+0x00,0x02, 0xF0,0x27, 0x28,0x00, 0xF3,0x02, 0x00,0x00, 0x23,0x94, 0x00,0x2A, 0xF3,0x1F,
+0x28,0x00, 0x07,0x20, 0x00,0x1A, 0x23,0x94, 0x00,0x2A, 0x83,0x9E, 0x00,0x00, 0x77,0x9D,
+0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC3,0x9C, 0x7F,0xC0, 0x73,0x9D, 0xFF,0xF0, 0x76,0x31,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x06,0xA4, 0x00,0x02, 0x75,0x15, 0x00,0x1E, 0xF3,0xBB,
+0x28,0x00, 0xF3,0x04, 0xE0,0x00, 0x75,0x28, 0xFF,0xE5, 0x93,0x22, 0x00,0x1C, 0xF3,0x84,
+0xE0,0x04, 0x23,0x14, 0x00,0x1E, 0x93,0x16, 0xFF,0xA4, 0x75,0x99, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0x73,0x15, 0x00,0x1E, 0x73,0x18, 0xFF,0xE5, 0x93,0x16, 0xFF,0xCC, 0x83,0x16,
+0xFF,0xA4, 0x93,0xA2, 0x00,0x20, 0x87,0x16, 0xFF,0xE0, 0x73,0x95, 0x00,0x1E, 0x73,0x9C,
+0xFF,0xE5, 0x93,0x96, 0xFF,0xAC, 0x73,0x95, 0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x93,0x96,
+0xFF,0xC4, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xE4, 0x23,0x94, 0x00,0x1A, 0x93,0x96, 0xFF,0xA4, 0x76,0x1D,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1E, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xE8, 0x23,0x14,
+0x00,0x16, 0x93,0x16, 0xFF,0xA4, 0x76,0x19, 0x00,0x1E, 0x83,0x96, 0xFF,0xAC, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xEC, 0x23,0x14, 0x00,0x12, 0x93,0x16,
+0xFF,0xA4, 0x76,0x19, 0x00,0x1E, 0x83,0x96, 0xFF,0xCC, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x16,
+0xFF,0xF0, 0x83,0x16, 0xFF,0xC4, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x37,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0xF3,0x82, 0x00,0x02, 0xF3,0xA3, 0x28,0x00, 0x23,0x14,
+0x00,0x2A, 0x83,0x1A, 0x00,0x00, 0x77,0x99, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC3,0x18,
+0x7F,0xC0, 0x73,0x19, 0xFF,0xF0, 0x07,0x20, 0x00,0x18, 0xF3,0x3B, 0x28,0x00, 0x94,0x16,
+0xFF,0xDC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0x07,0x20,
+0x00,0x02, 0x23,0x94, 0x00,0x2A, 0x83,0x9E, 0x00,0x00, 0x77,0x9D, 0x00,0x1E, 0x77,0xBC,
+0xFF,0xE5, 0xC3,0x9C, 0x7F,0xC0, 0x73,0x9D, 0xFF,0xF0, 0x24,0x80, 0x00,0x07, 0x05,0x20,
+0x00,0x0A, 0xF3,0xBB, 0x28,0x00, 0x20,0x26, 0x00,0x07, 0xEE,0x00, 0x84,0xE0, 0x06,0x28,
+0x00,0x0E, 0x86,0xB2, 0x00,0x00, 0x77,0x31, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0x75,0xB1,
+0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x05,0x28, 0x00,0x02, 0x04,0xA4, 0x00,0x01, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0xFF,0x00, 0xC6,0xB4, 0x74,0x00, 0xF6,0xB3,
+0x28,0x00, 0x87,0x32, 0x00,0x00, 0xF3,0x02, 0x00,0xFF, 0xC7,0x38, 0x5F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0xC7,0x38, 0x34,0x00, 0xE0,0x00, 0x84,0x88, 0xF7,0x33, 0x28,0x00, 0x05,0x20,
+0x00,0x26, 0x86,0x2A, 0x00,0x00, 0x76,0xA9, 0x00,0x1E, 0xF5,0x84, 0x4F,0x58, 0x76,0xB4,
+0xFF,0xE5, 0x83,0x96, 0xFF,0xDC, 0xF3,0x02, 0x00,0xFF, 0x94,0x16, 0xFF,0xBC, 0xC7,0x1C,
+0x5A,0x00, 0x77,0x38, 0xFF,0xFA, 0xC6,0x30, 0x6F,0xC0, 0x76,0x31, 0xFF,0xF0, 0x47,0x39,
+0x00,0x00, 0xC7,0x38, 0x34,0x00, 0xF6,0x82, 0xFF,0x00, 0xC6,0x30, 0x6C,0x00, 0xC7,0x38,
+0x60,0x00, 0xF6,0x84, 0x3B,0x6C, 0xF7,0x2B, 0x28,0x00, 0xC5,0xA0, 0x5A,0x00, 0x75,0xAC,
+0xFF,0xFA, 0x83,0x16, 0xFF,0xDC, 0x07,0x34, 0x00,0x01, 0xF7,0x05, 0x3B,0x6C, 0x07,0x20,
+0x00,0x3A, 0xF6,0xBB, 0x28,0x00, 0x07,0x20, 0x00,0x36, 0xF0,0x3B, 0x28,0x00, 0xF3,0x82,
+0x00,0x03, 0xF3,0xA3, 0x28,0x00, 0x07,0x18, 0x00,0x1A, 0xF5,0xBB, 0x28,0x00, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0x07,0x20, 0x00,0x02, 0xF0,0x3B,
+0x28,0x00, 0x24,0x80, 0x00,0x07, 0x05,0x20, 0x00,0x0A, 0x20,0x26, 0x00,0x07, 0xEE,0x00,
+0x85,0xD4, 0x06,0x28, 0x00,0x0E, 0x86,0xB2, 0x00,0x00, 0x77,0x31, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0x75,0xB1, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x05,0x28, 0x00,0x02, 0x04,0xA4,
+0x00,0x01, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0xFF,0x00, 0xC6,0xB4,
+0x74,0x00, 0xF6,0xB3, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0xF3,0x82, 0x00,0xFF, 0xC7,0x38,
+0x5F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38, 0x3C,0x00, 0xE0,0x00, 0x85,0x7C, 0xF7,0x33,
+0x28,0x00, 0x05,0xA0, 0x00,0x26, 0x86,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC5,0x20, 0x00,0x00, 0x24,0x00, 0x00,0x07, 0xF3,0x02, 0x00,0x01, 0x93,0x16,
+0xFF,0xA4, 0xF7,0x04, 0x4F,0x58, 0x83,0x96, 0xFF,0xBC, 0x24,0x80, 0x00,0x0E, 0xC7,0x1C,
+0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0xC6,0x30, 0x6F,0xC0, 0x76,0x31, 0xFF,0xF0, 0x47,0x39,
+0x00,0x00, 0xF6,0x82, 0x00,0xFF, 0xC7,0x38, 0x6C,0x00, 0xF6,0x82, 0xFF,0x00, 0xC6,0x30,
+0x6C,0x00, 0xC7,0x38, 0x60,0x00, 0xF6,0x84, 0x3B,0x6C, 0xF7,0x2F, 0x28,0x00, 0x07,0x34,
+0x00,0x01, 0xF7,0x05, 0x3B,0x6C, 0x07,0x28, 0x00,0x3A, 0xF6,0xBB, 0x28,0x00, 0x07,0x28,
+0x00,0x36, 0xF0,0x3B, 0x28,0x00, 0xF3,0x02, 0x00,0x03, 0xF3,0x2B, 0x28,0x00, 0x20,0x22,
+0x00,0x07, 0xEE,0x00, 0x86,0x94, 0xC6,0x28, 0x48,0x00, 0x06,0x30, 0x00,0x26, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x04,0xA4, 0x00,0x02, 0x04,0x20,
+0x00,0x01, 0x83,0x96, 0xFF,0xA4, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xF6,0x82,
+0xFF,0x00, 0xC7,0x38, 0x6C,0x00, 0xC7,0x1C, 0x70,0x00, 0xE0,0x00, 0x86,0x50, 0xF7,0x33,
+0x28,0x00, 0x06,0x28, 0x00,0x26, 0x86,0xB2, 0x00,0x00, 0x77,0x31, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0xFF,0x00, 0xC6,0xB4,
+0x74,0x00, 0xF6,0xB3, 0x28,0x00, 0x95,0x13, 0xFF,0xFC, 0xF3,0x04, 0x3B,0xB0, 0x00,0x00,
+0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x95,0x16, 0xFF,0xB4, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD4,0x2C, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xB4, 0xF0,0x05, 0x40,0x7C, 0x83,0x96,
+0xFF,0xBC, 0x23,0x00, 0x00,0x07, 0xF3,0x05, 0x42,0x58, 0xF7,0x04, 0x42,0x50, 0xF6,0x06,
+0x42,0x50, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF3,0x06, 0x39,0x34, 0xF3,0x05,
+0x42,0x44, 0xF5,0x05, 0x40,0x74, 0xF3,0x85, 0x42,0x60, 0xF3,0x82, 0x00,0x06, 0xF3,0x85,
+0x42,0x54, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xF6,0x84, 0x2D,0x38, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x06,0x34, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0xF7,0x06,
+0x2C,0x28, 0x76,0xB5, 0x00,0x02, 0xF3,0x82, 0x00,0x1C, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0x87,0x4C, 0xB3,0xB6, 0x70,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x06, 0x42,0x44, 0x93,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1F,0x48, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x48, 0xF3,0x86,
+0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x89,0xED, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x42,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEE,0x00, 0x87,0xC9, 0x00,0x00,
+0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x73,0x4C, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00,
+0x89,0xEC, 0x00,0x00, 0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93,
+0xFF,0xFC, 0x26,0x14, 0x00,0x20, 0xF0,0x33, 0x28,0x00, 0x05,0xA0, 0x00,0x02, 0xF0,0x2F,
+0x28,0x00, 0xF3,0x82, 0x00,0x00, 0x24,0x94, 0x00,0x22, 0xF3,0xA7, 0x28,0x00, 0x04,0xA0,
+0x00,0x1A, 0x94,0x96, 0xFF,0xD4, 0x23,0x94, 0x00,0x22, 0x83,0x9E, 0x00,0x00, 0x77,0x9D,
+0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC3,0x9C, 0x7F,0xC0, 0x73,0x9D, 0xFF,0xF0, 0x76,0x31,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x06,0xAC, 0x00,0x02, 0x23,0x14, 0x00,0x1E, 0x75,0x19,
+0x00,0x1E, 0xF3,0xA7, 0x28,0x00, 0xF4,0x84, 0xE0,0x00, 0x75,0x28, 0xFF,0xE5, 0x94,0xA2,
+0x00,0x1C, 0xF3,0x84, 0xE0,0x04, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96,
+0xFF,0xB4, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFF,0xCC, 0x84,0x96,
+0xFF,0xB4, 0x93,0xA2, 0x00,0x20, 0x87,0x16, 0xFF,0xE0, 0x73,0x95, 0x00,0x1E, 0x73,0x9C,
+0xFF,0xE5, 0x93,0x96, 0xFF,0xBC, 0x73,0x95, 0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96,
+0xFF,0xC4, 0x83,0x96, 0xFF,0xBC, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F,
+0x28,0x00, 0xF5,0x84, 0x4F,0x58, 0x87,0x1A, 0x00,0x00, 0xC5,0xA0, 0x5A,0x00, 0x75,0xAC,
+0xFF,0xFA, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xE4, 0x23,0x14, 0x00,0x1A, 0x76,0x19, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x45,0xAD, 0x00,0x00, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x84,0x96, 0xFF,0xCC, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16,
+0xFF,0xE8, 0x23,0x14, 0x00,0x16, 0x76,0x19, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xEC, 0x23,0x14, 0x00,0x12, 0x76,0x19, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x83,0x96, 0xFF,0xC4, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x16, 0xFF,0xF0, 0x06,0xB4, 0x00,0x02, 0xC7,0x38,
+0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0xF4,0x82, 0x00,0x02, 0xF4,0xA3,
+0x28,0x00, 0x04,0x20, 0x00,0x18, 0x23,0x94, 0x00,0x22, 0x83,0x9E, 0x00,0x00, 0x77,0x9D,
+0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC3,0x9C, 0x7F,0xC0, 0x73,0x9D, 0xFF,0xF0, 0x84,0x96,
+0xFF,0xD4, 0xF3,0xA3, 0x28,0x00, 0xF3,0x82, 0x00,0x01, 0xF3,0xA7, 0x28,0x00, 0x95,0x93,
+0xFF,0xFC, 0xF4,0x86, 0xE0,0x00, 0x94,0x93, 0xFF,0xFC, 0xF3,0x84, 0x4F,0x5C, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x23,0x40, 0x97,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x78,0xD8, 0x97,0x93, 0xFF,0xFC, 0xF4,0x86,
+0x36,0x78, 0xF4,0x85, 0x42,0x44, 0xF0,0x05, 0x40,0x84, 0xF6,0x84, 0x4F,0x5C, 0xF7,0x02,
+0x00,0x64, 0x97,0x36, 0x00,0x00, 0x90,0x36, 0x00,0x04, 0xF7,0x02, 0x00,0x01, 0xF7,0x05,
+0x40,0x84, 0xF3,0x86, 0x35,0xEC, 0xF3,0x85, 0x42,0x30, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x90, 0xF7,0x04, 0x42,0x60, 0xF5,0x02,
+0x00,0x00, 0x05,0xB8, 0x00,0x18, 0xF6,0x04, 0x42,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x32,
+0x00,0x07, 0xEE,0x00, 0x8A,0x70, 0xC7,0x30, 0x60,0x00, 0xC7,0x38, 0x58,0x00, 0x07,0x38,
+0x00,0x0E, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB4, 0x74,0x00, 0xC0,0x36,
+0x52,0x00, 0x47,0x0C, 0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x8A,0x71, 0x07,0x30, 0x00,0x01, 0xE0,0x00, 0x8A,0x18, 0xF7,0x05, 0x42,0x58, 0xF4,0x04,
+0x42,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x22, 0x00,0x07, 0xEE,0x00, 0x8D,0x94, 0x24,0x94,
+0x00,0x36, 0xF6,0x04, 0x42,0x60, 0x25,0x14, 0x00,0x38, 0x23,0x94, 0x00,0x20, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x06,0x30,
+0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B,
+0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x34, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x32, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x30, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2E, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2C, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2A, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x94,
+0x00,0x28, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x94,0x13, 0xFF,0xFC, 0x95,0x13, 0xFF,0xFC, 0x93,0x96,
+0xFF,0x7C, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x04, 0x42,0x60, 0x24,0x94, 0x00,0x7E, 0x25,0x14, 0x00,0x80, 0x23,0x94,
+0x00,0x68, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x06,0x30, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38,
+0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x7C, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x7A, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x78, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x76, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x74, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x72, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x24,0x94, 0x00,0x70, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x95,0x13, 0xFF,0xFC, 0x93,0x96,
+0xFF,0x74, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD2,0x58, 0x97,0x93,
+0xFF,0xFC, 0x83,0x96, 0xFF,0x74, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0xF7,0x04,
+0x42,0x58, 0x23,0x94, 0x00,0x50, 0xC7,0x00, 0x72,0x00, 0x97,0x13, 0xFF,0xFC, 0x93,0x96,
+0xFF,0x6C, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCF,0x24, 0x97,0x93,
+0xFF,0xFC, 0x83,0x96, 0xFF,0x6C, 0xF6,0x86, 0x42,0x50, 0x93,0x93, 0xFF,0xFC, 0xF3,0x84,
+0x42,0x58, 0x76,0xB5, 0x00,0x1E, 0x93,0x93, 0xFF,0xFC, 0xF7,0x04, 0x42,0x50, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x97,0x13, 0xFF,0xFC, 0x83,0x96,
+0xFF,0x7C, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xF3,0x38, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x8D,0x95, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x42,0x58, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x42,0x58, 0xF7,0x04, 0x42,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x07, 0xEE,0x00,
+0x8D,0xD4, 0xF3,0x82, 0x17,0x70, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x82, 0x00,0x1C, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0x8D,0xF4, 0xB3,0xBA, 0x68,0x02, 0xE0,0x00, 0x8D,0xF4, 0xF0,0x05,
+0x2D,0x38, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x1B, 0x93,0x93, 0xFF,0xFC, 0xF3,0x86,
+0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x88, 0xF7,0x04, 0x42,0x50, 0xF6,0x86, 0x42,0x50, 0x76,0xB5, 0x00,0x1E, 0xF3,0x84,
+0x6F,0x34, 0x76,0xB4, 0xFF,0xE5, 0x93,0x96, 0xFF,0xC4, 0xC7,0x38, 0x6F,0xC0, 0x86,0x9E,
+0x00,0x0C, 0x77,0x39, 0xFF,0xF0, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x8E,0x65, 0xF6,0x06,
+0x42,0xA0, 0xF7,0x04, 0x42,0xA0, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xE0,0x00, 0x94,0xE4, 0xF7,0x33,
+0x28,0x00, 0xF6,0x04, 0x42,0x60, 0x24,0x94, 0x00,0x36, 0x85,0x16, 0xFF,0xC4, 0x23,0x94,
+0x00,0x38, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x85,0x2A, 0x00,0x1C, 0x06,0x30, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x95,0x16, 0xFF,0xBC, 0xF7,0x1F, 0x28,0x00, 0x87,0x32,
+0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0x85,0x16, 0xFF,0xC4, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x34, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x32, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x30, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2E, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2C, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2A, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x94,
+0x00,0x28, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x87,0x2A, 0x00,0x20, 0x00,0x00, 0x00,0x01, 0x97,0x13,
+0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0x27,0x14, 0x00,0x20, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0xC4, 0x00,0x00,
+0x00,0x01, 0x87,0x1E, 0x00,0x10, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0x8F,0xF0, 0xF6,0x82, 0x00,0x00, 0x87,0x1E, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0x8F,0xF4, 0x20,0x36, 0x00,0x00, 0xF6,0x82, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xE6,0x00, 0x90,0x41, 0x00,0x00, 0x00,0x01, 0x85,0x16, 0xFF,0xC4, 0x00,0x00,
+0x00,0x01, 0x05,0x28, 0x00,0x10, 0x95,0x16, 0xFF,0xB4, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x72,0x50, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0xB4, 0x27,0x14,
+0x00,0x20, 0x93,0x93, 0xFF,0xFC, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xF9,0x34, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x94,0xE4, 0x00,0x00, 0x00,0x01, 0x85,0x16,
+0xFF,0xBC, 0x00,0x00, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0x94,0xBC, 0x00,0x00,
+0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0xF5,0x02,
+0x00,0x00, 0x23,0x94, 0x00,0x62, 0xF5,0x1F, 0x28,0x00, 0x75,0x95, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0x06,0x20, 0x00,0x02, 0x06,0xB0, 0x00,0x02, 0x23,0x14, 0x00,0x1E, 0x73,0x99,
+0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96, 0xFF,0x74, 0x75,0x15, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0x95,0x16, 0xFF,0x7C, 0x73,0x95, 0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96,
+0xFF,0x8C, 0x85,0x16, 0xFF,0xC4, 0x73,0x95, 0x00,0x1E, 0x93,0x96, 0xFF,0x84, 0x85,0x2A,
+0x00,0x34, 0x23,0x94, 0x00,0x62, 0x95,0x16, 0xFF,0xAC, 0xF0,0x33, 0x28,0x00, 0x05,0x20,
+0x00,0x1A, 0x95,0x16, 0xFF,0x94, 0x83,0x9E, 0x00,0x00, 0x77,0x9D, 0x00,0x1E, 0x77,0xBC,
+0xFF,0xE5, 0xC3,0x9C, 0x7F,0xC0, 0x73,0x9D, 0xFF,0xF0, 0x74,0x95, 0x00,0x1E, 0xF3,0xAB,
+0x28,0x00, 0x85,0x16, 0xFF,0xC4, 0x74,0xA4, 0xFF,0xE5, 0x85,0x2A, 0x00,0x10, 0x83,0x96,
+0xFF,0xC4, 0x95,0x22, 0x00,0x1C, 0x83,0x9E, 0x00,0x14, 0x85,0x16, 0xFF,0x84, 0x93,0xA2,
+0x00,0x20, 0x87,0x16, 0xFF,0xE0, 0x75,0x28, 0xFF,0xE5, 0x95,0x16, 0xFF,0x84, 0xF3,0x84,
+0x4F,0x58, 0x85,0x16, 0xFF,0x74, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x93,0x96, 0xFF,0xA4, 0xC0,0x22, 0x3A,0x00, 0x83,0x96,
+0xFF,0x7C, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xE4, 0x23,0x14, 0x00,0x1A, 0x76,0x19, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xE8, 0x23,0x14, 0x00,0x16, 0x76,0x19,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x85,0x16, 0xFF,0x8C, 0x83,0x96,
+0xFF,0x84, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xEC, 0x23,0x14, 0x00,0x12, 0x76,0x19, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x16, 0xFF,0xF0, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0xF5,0x02, 0x00,0x02, 0xF5,0x23, 0x28,0x00, 0x23,0x94,
+0x00,0x52, 0x83,0x9E, 0x00,0x00, 0x77,0x9D, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC3,0x9C,
+0x7F,0xC0, 0x73,0x9D, 0xFF,0xF0, 0x03,0x20, 0x00,0x18, 0xE6,0x00, 0x92,0x30, 0xF3,0x9B,
+0x28,0x00, 0xF7,0x04, 0x42,0x70, 0xE0,0x00, 0x92,0x9C, 0xF6,0x06, 0x42,0x72, 0x85,0x16,
+0xFF,0xC4, 0x00,0x00, 0x00,0x01, 0x86,0xAA, 0x00,0x20, 0x00,0x00, 0x00,0x01, 0x07,0x34,
+0x00,0x07, 0x20,0x3A, 0x00,0x0E, 0xE2,0x00, 0x92,0x94, 0xC7,0x34, 0x68,0x00, 0xF5,0x84,
+0x42,0x60, 0xF3,0x82, 0x00,0xFF, 0xC7,0x2C, 0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA,
+0x00,0x00, 0x97,0x16, 0xFF,0x74, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xC6,0xB4, 0x3C,0x00, 0x20,0x36, 0x00,0x00, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0x92,0xC9, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x42,0x74, 0xF6,0x06, 0x42,0x74, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00,
+0x94,0xE4, 0x00,0x00, 0x00,0x01, 0x85,0x16, 0xFF,0xA4, 0x83,0x96, 0xFF,0x74, 0xC7,0x20,
+0x52,0x00, 0x74,0xB8, 0xFF,0xFA, 0xC6,0x24, 0x00,0x00, 0x87,0x1E, 0x00,0x00, 0x76,0x9D,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC5,0xAC, 0x52,0x00, 0x75,0xAC, 0xFF,0xFA, 0x46,0x31,
+0x00,0x00, 0xF5,0x02, 0x00,0xFF, 0xC6,0x30, 0x54,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0xF6,0x82, 0xFF,0x00, 0xC7,0x38, 0x6C,0x00, 0xC6,0x30, 0x70,0x00, 0xF6,0x1F,
+0x28,0x00, 0x83,0x96, 0xFF,0x94, 0x85,0x16, 0xFF,0xC4, 0xF5,0x9F, 0x28,0x00, 0x87,0x2A,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x24, 0xE6,0x00, 0x94,0x69, 0xF6,0x86,
+0x40,0x98, 0xF7,0x04, 0x6E,0x50, 0x86,0x2A, 0x00,0x2C, 0xC6,0xA4, 0x00,0x00, 0x23,0x94,
+0x00,0x62, 0x84,0x9E, 0x00,0x00, 0x77,0x9D, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC4,0xA4,
+0x7F,0xC0, 0x74,0xA5, 0xFF,0xF0, 0x46,0xB5, 0x00,0x00, 0x87,0x3A, 0x1D,0xDC, 0x76,0xB5,
+0x00,0x02, 0xC0,0x32, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A,
+0x00,0x00, 0xF7,0x06, 0x40,0x98, 0xE6,0x00, 0x94,0x34, 0xC3,0x34, 0x70,0x00, 0xC5,0x84,
+0x00,0x00, 0x86,0xAA, 0x00,0x24, 0xF7,0x04, 0xE0,0x00, 0xF6,0x02, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0x05,0x28, 0x00,0x24, 0xE6,0x00, 0x93,0xC4, 0x95,0x16, 0xFF,0x74, 0x83,0x96,
+0xFF,0xC4, 0x00,0x00, 0x00,0x01, 0x86,0x9E, 0x00,0x28, 0xF7,0x04, 0xE0,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x93,0xC8, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x93,0xD5, 0x00,0x00, 0x00,0x01, 0xF5,0x82,
+0x00,0x00, 0x85,0x16, 0xFF,0x74, 0xF7,0x04, 0xE0,0x00, 0x86,0xAA, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x94,0x14, 0xF6,0x02, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0x94,0x1C, 0x20,0x32, 0x00,0x00, 0x86,0xAA, 0x00,0x04, 0xF7,0x04,
+0xE0,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0x94,0x1D, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0x94,0x2D, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0x94,0x39, 0x00,0x00,
+0x00,0x01, 0xF4,0x82, 0x00,0x01, 0xF7,0x04, 0x4F,0x58, 0xF4,0x9B, 0x28,0x00, 0x83,0x96,
+0xFF,0xC4, 0xF6,0x86, 0x40,0x9A, 0xC7,0x20, 0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0x86,0x1E,
+0x00,0x30, 0x47,0x39, 0x00,0x00, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0xE0,0x00,
+0x94,0xE4, 0xF6,0x3B, 0x28,0x00, 0x47,0x25, 0x00,0x00, 0x77,0x39, 0x00,0x02, 0xC7,0x38,
+0x68,0x00, 0xF5,0x02, 0x00,0x01, 0xF5,0x3B, 0x28,0x00, 0x07,0x38, 0x00,0x02, 0x23,0x94,
+0x00,0x62, 0x83,0x9E, 0x00,0x00, 0x77,0x9D, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC3,0x9C,
+0x7F,0xC0, 0x73,0x9D, 0xFF,0xF0, 0x25,0x14, 0x00,0x62, 0xF3,0xBB, 0x28,0x00, 0x85,0x2A,
+0x00,0x00, 0x77,0xA9, 0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC5,0x28, 0x7F,0xC0, 0x75,0x29,
+0xFF,0xF0, 0xE0,0x00, 0x94,0xE4, 0xF5,0x1B, 0x28,0x00, 0x83,0x96, 0xFF,0xBC, 0x00,0x00,
+0x00,0x01, 0x20,0x1E, 0x00,0x01, 0xE6,0x00, 0x94,0xE4, 0x00,0x00, 0x00,0x01, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x35,0x60, 0xF5,0x05,
+0x42,0x30, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF5,0x06,
+0x42,0x44, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0x96,0x89, 0x00,0x00, 0x00,0x01, 0xF6,0x84,
+0x42,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xEE,0x00, 0x95,0x8D, 0xF5,0x86,
+0x42,0x50, 0xF7,0x04, 0x42,0x50, 0x76,0x2D, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x26,0xB4,
+0x00,0x01, 0xF6,0x85, 0x42,0x54, 0x25,0x00, 0x00,0x07, 0xF5,0x05, 0x42,0x58, 0xF6,0x84,
+0x2D,0x38, 0xC7,0x38, 0x67,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x2F,
+0x28,0x00, 0x06,0x34, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0xF7,0x06, 0x2C,0x28, 0x76,0xB5,
+0x00,0x02, 0xF5,0x02, 0x00,0x1C, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x96,0x88, 0xB5,0x36,
+0x70,0x02, 0xE0,0x00, 0x96,0x88, 0xF0,0x05, 0x2D,0x38, 0xF5,0x04, 0x42,0x60, 0x00,0x00,
+0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xB2,0x84, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x84, 0x4F,0x58, 0x00,0x00, 0x00,0x01, 0x07,0x34, 0x00,0x40, 0xC0,0x22,
+0x72,0x00, 0xE6,0x00, 0x95,0xEC, 0xF6,0x06, 0x42,0x76, 0xF7,0x04, 0x42,0x74, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0x96,0x88, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x60, 0x00,0x00,
+0x00,0x01, 0xC0,0x22, 0x72,0x00, 0xE6,0x00, 0x96,0x24, 0x00,0x00, 0x00,0x01, 0x97,0x13,
+0xFF,0xFC, 0xF5,0x04, 0x3B,0xB0, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xD4,0x2C, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x96,0x40, 0x00,0x00,
+0x00,0x01, 0xC0,0x22, 0x6A,0x00, 0xE6,0x00, 0x96,0x71, 0x00,0x00, 0x00,0x01, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCC,0x60, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04,
+0x40,0x7C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x13, 0xFF,0xFC, 0xF5,0x04,
+0x40,0x74, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xBE,0xF8, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x96,0x88, 0x00,0x00, 0x00,0x01, 0xF5,0x04,
+0x40,0x74, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xC1,0xB4, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x70, 0xF6,0x04, 0x6F,0x34, 0xF7,0x04, 0x42,0x64, 0x86,0xB2,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0x9B,0x18, 0x06,0xB0,
+0x00,0x02, 0x87,0x36, 0x00,0x00, 0xF4,0x04, 0x40,0x7C, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC0,0x3A, 0x42,0x00, 0xE6,0x00,
+0x9B,0x18, 0x24,0x94, 0x00,0x36, 0xF6,0x04, 0x40,0x74, 0x23,0x94, 0x00,0x38, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x06,0x30,
+0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x34, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x32, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x30, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2E, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2C, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2A, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x94,
+0x00,0x28, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x94,0x13, 0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0x27,0x14,
+0x00,0x20, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0xF5,0x04,
+0x40,0x74, 0x94,0x16, 0xFF,0xC4, 0x07,0x20, 0x00,0x02, 0xF0,0x3B, 0x28,0x00, 0x24,0x80,
+0x00,0x07, 0xF4,0x02, 0x00,0xFF, 0x83,0x96, 0xFF,0xC4, 0x95,0x16, 0xFF,0xBC, 0x03,0x1C,
+0x00,0x0A, 0x20,0x26, 0x00,0x07, 0xEE,0x00, 0x98,0xA8, 0x06,0x18, 0x00,0x0E, 0x86,0xB2,
+0x00,0x00, 0x77,0x31, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0x75,0xB1, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0x03,0x18, 0x00,0x02, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02,
+0xFF,0x00, 0xC6,0xB4, 0x74,0x00, 0xF6,0xB3, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x04,0xA4,
+0x00,0x01, 0xC7,0x38, 0x5F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38, 0x44,0x00, 0xE0,0x00,
+0x98,0x54, 0xF7,0x33, 0x28,0x00, 0x85,0x16, 0xFF,0xC4, 0x74,0x95, 0x00,0x1E, 0x74,0xA4,
+0xFF,0xE5, 0x83,0x96, 0xFF,0xC4, 0x23,0x14, 0x00,0x1E, 0x74,0x19, 0x00,0x1E, 0x74,0x20,
+0xFF,0xE5, 0x05,0x28, 0x00,0x26, 0x95,0x16, 0xFF,0x8C, 0x85,0xAA, 0x00,0x00, 0x76,0xA9,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x03,0x9C, 0x00,0x02, 0x93,0x96, 0xFF,0xB4, 0x06,0x1C,
+0x00,0x02, 0x73,0x95, 0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96, 0xFF,0xAC, 0x73,0x95,
+0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96, 0xFF,0x9C, 0x83,0x96, 0xFF,0xBC, 0x75,0x15,
+0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x95,0x16, 0xFF,0x94, 0x75,0x15, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0x95,0x16, 0xFF,0xA4, 0x85,0x16, 0xFF,0xC4, 0xC5,0xAC, 0x6F,0xC0, 0x75,0xAD,
+0xFF,0xF0, 0xF5,0x05, 0x42,0x60, 0xF5,0x04, 0x4F,0x58, 0xF6,0x82, 0x00,0xFF, 0xC7,0x1C,
+0x52,0x00, 0x77,0x38, 0xFF,0xFA, 0x47,0x39, 0x00,0x00, 0xC7,0x38, 0x6C,0x00, 0xF6,0x82,
+0xFF,0x00, 0xC5,0xAC, 0x6C,0x00, 0xC7,0x38, 0x58,0x00, 0x83,0x96, 0xFF,0x8C, 0xF5,0x84,
+0x3B,0x6C, 0x85,0x16, 0xFF,0xB4, 0xF7,0x1F, 0x28,0x00, 0x87,0x16, 0xFF,0xE0, 0x06,0xAC,
+0x00,0x01, 0xF6,0x85, 0x3B,0x6C, 0x83,0x96, 0xFF,0xC4, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0xF5,0x04, 0x4F,0x58, 0x87,0x1A, 0x00,0x00, 0xC0,0x1E,
+0x52,0x00, 0xC7,0x38, 0x47,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30,
+0x00,0x02, 0x87,0x16, 0xFF,0xE4, 0x23,0x14, 0x00,0x1A, 0x76,0x99, 0x00,0x1E, 0x83,0x96,
+0xFF,0x94, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0x30, 0x00,0x02, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x16, 0xFF,0xE8, 0x23,0x14,
+0x00,0x16, 0x76,0x99, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x85,0x16, 0xFF,0xAC, 0x83,0x96,
+0xFF,0xA4, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x06,0x30, 0x00,0x02, 0x85,0x16, 0xFF,0x9C, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x16, 0xFF,0xEC, 0x23,0x14,
+0x00,0x12, 0x76,0x99, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0x30, 0x00,0x02, 0x83,0x96,
+0xFF,0xC4, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x16,
+0xFF,0xF0, 0x06,0x30, 0x00,0x02, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33,
+0x28,0x00, 0x07,0x1C, 0x00,0x3A, 0xF5,0xBB, 0x28,0x00, 0x07,0x1C, 0x00,0x36, 0xF0,0x3B,
+0x28,0x00, 0xF5,0x02, 0x00,0x03, 0xE6,0x00, 0x9A,0xA4, 0xF5,0x1F, 0x28,0x00, 0xF7,0x04,
+0x42,0x78, 0xF6,0x06, 0x42,0x78, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x9B,0x18, 0x00,0x00,
+0x00,0x01, 0xF3,0x86, 0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x1F,0x48, 0x97,0x93, 0xFF,0xFC, 0x25,0x00, 0x00,0x07, 0xF5,0x05, 0x42,0x58, 0xF7,0x04,
+0x42,0x50, 0xF6,0x06, 0x42,0x50, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF3,0x82,
+0x00,0x06, 0xF3,0x85, 0x42,0x54, 0xF5,0x06, 0x39,0x34, 0xF5,0x05, 0x42,0x44, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xF6,0x84, 0x2D,0x38, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0x06,0x34, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0xF7,0x06, 0x2C,0x28, 0x76,0xB5,
+0x00,0x02, 0xF3,0x82, 0x00,0x1C, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0x9B,0x18, 0xB3,0xB6,
+0x70,0x02, 0xF0,0x05, 0x2D,0x38, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x78, 0xF3,0x86, 0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0x9E,0x41, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xEE,0x00, 0x9D,0x85, 0x24,0x94, 0x00,0x36, 0xF6,0x04, 0x40,0x74, 0x25,0x14,
+0x00,0x38, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x06,0x30, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38,
+0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x34, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x32, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x30, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2E, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2C, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94, 0x00,0x2A, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x24,0x94, 0x00,0x28, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0xF3,0x84, 0x40,0x7C, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x95,0x13, 0xFF,0xFC, 0x23,0x94, 0x00,0x20, 0x93,0x96,
+0xFF,0x94, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93,
+0xFF,0xFC, 0x83,0x96, 0xFF,0x94, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x23,0x94,
+0x00,0x68, 0x93,0x96, 0xFF,0x8C, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD2,0x58, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x8C, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x90,0x13, 0xFF,0xFC, 0x23,0x94, 0x00,0x50, 0x93,0x96, 0xFF,0x84, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCF,0x24, 0x97,0x93, 0xFF,0xFC, 0x87,0x02,
+0xFF,0x34, 0x00,0x00, 0x00,0x01, 0xF7,0x05, 0x42,0x64, 0xF3,0x84, 0x40,0x7C, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x97,0x13, 0xFF,0xFC, 0x83,0x96, 0xFF,0x84, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x94, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xF7,0xC8, 0x97,0x93, 0xFF,0xFC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0x9D,0x5D, 0xF3,0x82, 0x17,0x70, 0xF7,0x04, 0x42,0x54, 0x00,0x00,
+0x00,0x01, 0x27,0x38, 0x00,0x01, 0xF7,0x05, 0x42,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x1B, 0x93,0x93, 0xFF,0xFC, 0xF3,0x86, 0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0x9E,0x40, 0x00,0x00,
+0x00,0x01, 0xF5,0x04, 0x40,0x7C, 0xF4,0x84, 0x40,0x74, 0xC7,0x28, 0x50,0x00, 0xC7,0x24,
+0x70,0x00, 0x05,0xB8, 0x00,0x26, 0x86,0xAE, 0x00,0x00, 0x77,0x2D, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x77,0xB4, 0x00,0x08, 0x70,0x3E, 0xFF,0xE8, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xF7,0x04, 0x4F,0x58, 0xE6,0x00,
+0x9D,0xFD, 0xF6,0x02, 0x00,0xFF, 0xF7,0x04, 0x42,0x78, 0xF6,0x06, 0x42,0x7A, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0x9E,0x40, 0x00,0x00, 0x00,0x01, 0x86,0xAE, 0x00,0x00, 0x77,0x2D,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02,
+0x00,0x01, 0xC7,0x38, 0x64,0x00, 0xF6,0x02, 0xFF,0x00, 0xC6,0xB4, 0x64,0x00, 0xC7,0x38,
+0x68,0x00, 0xF7,0x2F, 0x28,0x00, 0x07,0x28, 0x00,0x01, 0x97,0x13, 0xFF,0xFC, 0x94,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xBE,0xF8, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0xD8, 0xF3,0x86,
+0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xA2,0xC9, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x42,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEE,0x00, 0xA0,0x35, 0x24,0x94,
+0x00,0x36, 0xF6,0x04, 0x40,0x74, 0x25,0x14, 0x00,0x38, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x06,0x30, 0x00,0x02, 0x75,0xB1,
+0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x87,0x32,
+0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x34, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x32, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x30, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x2E, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x2C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x2A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x94, 0x00,0x28, 0x76,0x31,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0xF3,0x84, 0x40,0x7C, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x95,0x13,
+0xFF,0xFC, 0x23,0x94, 0x00,0x20, 0x93,0x96, 0xFF,0x4C, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x4C, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x23,0x94, 0x00,0x50, 0x93,0x96, 0xFF,0x44, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD2,0x58, 0x97,0x93, 0xFF,0xFC, 0x87,0x02,
+0xFF,0x34, 0x00,0x00, 0x00,0x01, 0xF7,0x05, 0x42,0x64, 0xF3,0x84, 0x40,0x7C, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x97,0x13, 0xFF,0xFC, 0x83,0x96, 0xFF,0x44, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x4C, 0xE0,0x00, 0xA2,0x80, 0x93,0x93,
+0xFF,0xFC, 0xF4,0x04, 0x40,0x7C, 0xF6,0x04, 0x40,0x74, 0xF3,0x82, 0x00,0x00, 0xC7,0x20,
+0x40,0x00, 0xC7,0x30, 0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA, 0x00,0x00, 0x77,0x39,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x77,0xB4, 0x00,0x08, 0x70,0x3E,
+0xFF,0xE8, 0x47,0x0C, 0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xA0,0xAD, 0x93,0x96, 0xFF,0x3C, 0xF7,0x04, 0x42,0xA0, 0xF6,0x06, 0x42,0xA0, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0xA2,0xC8, 0x00,0x00, 0x00,0x01, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x24,0x94, 0x00,0x7E, 0x25,0x14,
+0x00,0x80, 0x23,0x94, 0x00,0x68, 0x06,0x30, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC,
+0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94,
+0x00,0x7C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94,
+0x00,0x7A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94,
+0x00,0x78, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94,
+0x00,0x76, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94,
+0x00,0x74, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x24,0x94,
+0x00,0x72, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x94, 0x00,0x70, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x94,0x13,
+0xFF,0xFC, 0x95,0x13, 0xFF,0xFC, 0x93,0x96, 0xFF,0x34, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x34, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x23,0x94, 0x00,0xB0, 0x93,0x96, 0xFF,0x2C, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD2,0x58, 0x97,0x93, 0xFF,0xFC, 0x83,0x96,
+0xFF,0x2C, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x3C, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x23,0x94, 0x00,0x98, 0x93,0x96, 0xFF,0x24, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCF,0x24, 0x97,0x93, 0xFF,0xFC, 0xF3,0x82,
+0x00,0x06, 0xF3,0x85, 0x42,0x54, 0x87,0x02, 0xFF,0x34, 0xF3,0x86, 0x38,0xA8, 0xF3,0x85,
+0x42,0x44, 0xF7,0x05, 0x42,0x64, 0xF3,0x84, 0x40,0x7C, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x97,0x13, 0xFF,0xFC, 0x83,0x96, 0xFF,0x24, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x83,0x96, 0xFF,0x34, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xF7,0xC8, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0xA2,0xA9, 0xF3,0x82, 0x17,0x70, 0xF7,0x04, 0x42,0x54, 0x00,0x00, 0x00,0x01, 0x27,0x38,
+0x00,0x01, 0xF7,0x05, 0x42,0x54, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0x1B, 0x93,0x93,
+0xFF,0xFC, 0xF3,0x86, 0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x1E,0xC0, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF6,0x04, 0x6F,0x34, 0xF7,0x04, 0x42,0x64, 0x86,0xB2, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xA3,0xAC, 0x06,0xB0, 0x00,0x02, 0x87,0x36,
+0x00,0x00, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0xF6,0x84,
+0x40,0x7C, 0x77,0x39, 0xFF,0xF0, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x00, 0xA3,0xAC, 0xC7,0x34,
+0x68,0x00, 0xF5,0x84, 0x40,0x74, 0xF6,0x04, 0x4F,0x58, 0x00,0x00, 0x00,0x01, 0xC6,0x2C,
+0x62,0x00, 0x76,0x30, 0xFF,0xFA, 0xC5,0xAC, 0x70,0x00, 0x05,0xAC, 0x00,0x26, 0x86,0xAE,
+0x00,0x00, 0x77,0x2D, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0x46,0x31, 0x00,0x00, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0x30, 0x74,0x00, 0xF7,0x02,
+0xFF,0x00, 0xC6,0xB4, 0x74,0x00, 0xC6,0x30, 0x68,0x00, 0xF6,0x2F, 0x28,0x00, 0xF5,0x06,
+0x42,0x44, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1F,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x04, 0x40,0x7C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x13,
+0xFF,0xFC, 0xF5,0x04, 0x40,0x74, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xBE,0xF8, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x80, 0xF7,0x04, 0x42,0x58, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0xA3,0xF4, 0x20,0x3A, 0x00,0x07, 0xF5,0x02,
+0x00,0x01, 0xF5,0x05, 0x42,0x58, 0xF7,0x04, 0x42,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x07, 0xEE,0x00, 0xA6,0xF0, 0x23,0x94, 0x00,0x1E, 0xF6,0x04, 0x42,0x60, 0x23,0x14,
+0x00,0x66, 0xF4,0x84, 0x40,0x78, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x04,0xA4, 0x00,0x02, 0x74,0x25, 0x00,0x1E, 0x74,0x20,
+0xFF,0xE5, 0x06,0x30, 0x00,0x02, 0x75,0x31, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x95,0x16,
+0xFF,0x7C, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x25,0x14, 0x00,0x20, 0x95,0x16,
+0xFF,0x94, 0xF7,0x2B, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x85,0x16, 0xFF,0x7C, 0x05,0xA4,
+0x00,0x02, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x23,0x94,
+0x00,0x1C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x25,0x14, 0x00,0x50, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x1A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x18, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x16, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x14, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x12, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x10, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x87,0x26, 0x00,0x00, 0x26,0x14, 0x00,0x68, 0xC7,0x38,
+0x47,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x2E, 0x00,0x00, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x23,0x14,
+0x00,0x64, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x23,0x14,
+0x00,0x62, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x23,0x14,
+0x00,0x60, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x23,0x14,
+0x00,0x5E, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x23,0x14,
+0x00,0x5C, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x23,0x14,
+0x00,0x5A, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x05,0xAC,
+0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x23,0x14, 0x00,0x58, 0x75,0xAD, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x96,0x13,
+0xFF,0xFC, 0x95,0x16, 0xFF,0x8C, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD2,0x58, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0x8C, 0x00,0x00, 0x00,0x01, 0x95,0x13,
+0xFF,0xFC, 0xF5,0x04, 0x42,0x58, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x25,0x14,
+0x00,0x38, 0x95,0x16, 0xFF,0x84, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xCF,0x24, 0x97,0x93, 0xFF,0xFC, 0xF5,0x04, 0x42,0x58, 0x00,0x00, 0x00,0x01, 0x95,0x13,
+0xFF,0xFC, 0xF5,0x04, 0x42,0x64, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x85,0x16,
+0xFF,0x84, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x85,0x16, 0xFF,0x94, 0x00,0x00,
+0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xF7,0xC8, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xA6,0xF1, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x42,0x58, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x42,0x58, 0xF7,0x04,
+0x42,0x58, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x07, 0xEE,0x00, 0xA7,0x30, 0xF5,0x02,
+0x17,0x70, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02, 0x00,0x1C, 0x20,0x32, 0x00,0x44, 0xE6,0x00,
+0xA7,0x50, 0xB5,0x3A, 0x68,0x02, 0xE0,0x00, 0xA7,0x50, 0xF0,0x05, 0x2D,0x38, 0x95,0x13,
+0xFF,0xFC, 0xF5,0x02, 0x00,0x1B, 0x95,0x13, 0xFF,0xFC, 0xF5,0x06, 0x42,0x44, 0x95,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x30, 0xF6,0x04,
+0x6F,0x34, 0xF7,0x04, 0x42,0x64, 0x86,0xB2, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0xA9,0xF0, 0x07,0x30, 0x00,0x02, 0x86,0x3A, 0x00,0x00, 0xF5,0x82,
+0x00,0x00, 0xF6,0x84, 0x40,0x7C, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0x30,
+0x77,0xC0, 0xF7,0x04, 0x40,0x74, 0xC6,0xB4, 0x68,0x00, 0x76,0x31, 0xFF,0xF0, 0xC6,0x00,
+0x62,0x00, 0x96,0x16, 0xFF,0xF4, 0xC7,0x38, 0x68,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA,
+0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x77,0xB4,
+0x00,0x08, 0x70,0x3E, 0xFF,0xE8, 0x47,0x0C, 0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0xA8,0x34, 0xF6,0x02, 0x00,0xFF, 0x83,0x16, 0xFF,0xF4, 0x83,0x96,
+0xFF,0xF4, 0xF7,0x04, 0x40,0x78, 0xC6,0x98, 0x38,0x00, 0xC7,0x38, 0x68,0x00, 0x07,0x38,
+0x00,0x26, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xC6,0xB4, 0x64,0x00, 0xC0,0x36, 0x5A,0x00, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0xA8,0x3D, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0xA8,0x75, 0xF6,0x06,
+0x42,0x7C, 0xF7,0x04, 0x42,0x7C, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x78,0x9C, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0xA9,0xF0, 0x00,0x00,
+0x00,0x01, 0xF3,0x04, 0x42,0x60, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCC,0x60, 0x97,0x93, 0xFF,0xFC, 0xF4,0x04, 0x40,0x78, 0xF7,0x04,
+0x4F,0x58, 0xF5,0x04, 0x40,0x74, 0xF3,0x84, 0x40,0x7C, 0xF3,0x04, 0x40,0x7C, 0xC6,0x20,
+0x72,0x00, 0x76,0x30, 0xFF,0xFA, 0xC5,0x9C, 0x30,0x00, 0xC5,0xA8, 0x58,0x00, 0x05,0xAC,
+0x00,0x26, 0x86,0xAE, 0x00,0x00, 0x74,0xAD, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x73,0xAD,
+0x00,0x1E, 0x73,0x9C, 0xFF,0xE5, 0x93,0x96, 0xFF,0xD4, 0xC5,0x28, 0x72,0x00, 0x75,0x28,
+0xFF,0xFA, 0x83,0x16, 0xFF,0xF4, 0x83,0x96, 0xFF,0xF4, 0x46,0x31, 0x00,0x00, 0x45,0x29,
+0x00,0x00, 0xC7,0x18, 0x38,0x00, 0xC4,0x20, 0x70,0x00, 0x04,0x20, 0x00,0x26, 0x73,0x21,
+0x00,0x1E, 0xC6,0xB4, 0x4F,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF4,0x82, 0x00,0xFF, 0xC6,0x30,
+0x4C,0x00, 0xF3,0x82, 0xFF,0x00, 0xC6,0xB4, 0x3C,0x00, 0xC6,0x30, 0x68,0x00, 0xF6,0x2F,
+0x28,0x00, 0x87,0x2E, 0x00,0x00, 0x73,0x18, 0xFF,0xE5, 0x93,0x16, 0xFF,0xCC, 0x83,0x16,
+0xFF,0xD4, 0x83,0x96, 0xFF,0xF4, 0xC5,0x28, 0x4C,0x00, 0xC7,0x38, 0x37,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x76,0x9D, 0x00,0x10, 0x76,0xB5, 0xFF,0xF8, 0xC7,0x38, 0x4C,0x00, 0xC6,0xB4,
+0x70,0x00, 0xF6,0xAF, 0x28,0x00, 0x87,0x22, 0x00,0x00, 0x76,0xA1, 0x00,0x1E, 0x83,0x16,
+0xFF,0xCC, 0xF3,0x82, 0xFF,0x00, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x37,0xC0, 0x77,0x39,
+0xFF,0xF0, 0xC7,0x38, 0x3C,0x00, 0xC5,0x28, 0x70,0x00, 0xF5,0x23, 0x28,0x00, 0x87,0x22,
+0x00,0x00, 0xF3,0x04, 0x40,0x7C, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x73,0x19,
+0x00,0x10, 0x93,0x16, 0xFF,0xEC, 0x73,0x99, 0xFF,0xF8, 0xC7,0x38, 0x4C,0x00, 0xC7,0x1C,
+0x70,0x00, 0x97,0x16, 0xFF,0xDC, 0x23,0x14, 0x00,0x22, 0x83,0x1A, 0x00,0x00, 0x77,0x99,
+0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC3,0x18, 0x7F,0xC0, 0x73,0x19, 0xFF,0xF0, 0xF3,0x23,
+0x28,0x00, 0xF3,0x86, 0x42,0x44, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x1F,0x48, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04, 0x40,0x7C, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0x97,0x13, 0xFF,0xFC, 0xF3,0x04, 0x40,0x74, 0x00,0x00, 0x00,0x01, 0x93,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xBE,0xF8, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x98, 0xF3,0x06,
+0x42,0x44, 0x93,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x20,0xE4, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xAE,0xE5, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x42,0x54, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEE,0x00, 0xAD,0x89, 0x27,0x38,
+0x00,0x01, 0xF7,0x05, 0x42,0x54, 0x23,0x94, 0x00,0x1E, 0xF6,0x04, 0x42,0x60, 0x24,0x94,
+0x00,0x66, 0x94,0x96, 0xFF,0x64, 0xF3,0x04, 0x40,0x78, 0x24,0x94, 0x00,0x20, 0x94,0x96,
+0xFF,0x94, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x03,0x18, 0x00,0x02, 0x93,0x16, 0xFF,0x74, 0x74,0x19, 0x00,0x1E, 0x74,0x20,
+0xFF,0xE5, 0x05,0x98, 0x00,0x02, 0x06,0x30, 0x00,0x02, 0x75,0x31, 0x00,0x1E, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0x28,
+0xFF,0xE5, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x23,0x94,
+0x00,0x1C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x85,0x16, 0xFF,0x64, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x1A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x18, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x16, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x14, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x12, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F,
+0x28,0x00, 0x23,0x94, 0x00,0x10, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x26,0x14, 0x00,0x68, 0xC7,0x38,
+0x47,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x2E, 0x00,0x00, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x23,0x14,
+0x00,0x64, 0x93,0x16, 0xFF,0x64, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x24,0x94, 0x00,0x62, 0x94,0x96, 0xFF,0x64, 0x05,0xAC, 0x00,0x02, 0x87,0x2E,
+0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x25,0x14, 0x00,0x60, 0x95,0x16, 0xFF,0x64, 0x05,0xAC,
+0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x23,0x14, 0x00,0x5E, 0x93,0x16,
+0xFF,0x64, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x24,0x94,
+0x00,0x5C, 0x94,0x96, 0xFF,0x64, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x25,0x14, 0x00,0x5A, 0x95,0x16, 0xFF,0x64, 0x05,0xAC, 0x00,0x02, 0x87,0x2E,
+0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x24,0x94, 0x00,0x50, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x23,0x14, 0x00,0x58, 0x05,0xAC,
+0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x93,0x16, 0xFF,0x64, 0x75,0xAD, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x96,0x13,
+0xFF,0xFC, 0x94,0x96, 0xFF,0x8C, 0x94,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD2,0x58, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0x8C, 0x23,0x14, 0x00,0x38, 0x95,0x13,
+0xFF,0xFC, 0x27,0x80, 0x00,0x07, 0x97,0x93, 0xFF,0xFC, 0x93,0x16, 0xFF,0x84, 0x93,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCF,0x24, 0x97,0x93, 0xFF,0xFC, 0x27,0x80,
+0x00,0x07, 0xF7,0x85, 0x42,0x58, 0x27,0x80, 0x00,0x07, 0x97,0x93, 0xFF,0xFC, 0xF4,0x84,
+0x42,0x64, 0x00,0x00, 0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0x84, 0x00,0x00,
+0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x83,0x16, 0xFF,0x94, 0x00,0x00, 0x00,0x01, 0x93,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xF7,0xC8, 0x97,0x93, 0xFF,0xFC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0xAD,0x5D, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x58, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x42,0x58, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86,
+0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF4,0x82,
+0x00,0x1C, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0xAE,0xE4, 0xB4,0xBA, 0x68,0x02, 0xE0,0x00,
+0xAE,0xE4, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x40,0x78, 0xF5,0x84, 0x4F,0x58, 0x07,0x38,
+0x00,0x16, 0x86,0xBA, 0x00,0x00, 0xF4,0x06, 0x3B,0x90, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB4, 0xFF,0xF0, 0x76,0x35, 0x00,0x06, 0xA7,0x2E,
+0x60,0x02, 0xC5,0x2C, 0x60,0x00, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x40,0x00, 0x07,0x38,
+0x00,0x02, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x73,0xB7, 0xFF,0xF0, 0xEE,0x00, 0xAE,0x55, 0x95,0x16, 0xFF,0x64, 0xA7,0x2E,
+0x60,0x02, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x40,0x00, 0x86,0xBA, 0x00,0x04, 0x23,0x14,
+0x00,0x88, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0xA6,0xAA, 0x68,0x02, 0x77,0x1D, 0x00,0x03, 0xC7,0x38, 0x68,0x00, 0x27,0x38,
+0x00,0x08, 0x85,0x3A, 0x00,0x04, 0x84,0xBA, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x95,0x1A,
+0x00,0x04, 0x94,0x9A, 0x00,0x00, 0x85,0x96, 0xFF,0x7C, 0xE0,0x00, 0xAE,0x78, 0x00,0x00,
+0x00,0x01, 0x84,0x96, 0xFF,0x64, 0xA7,0x2E, 0x60,0x02, 0x76,0xA5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38,
+0x40,0x00, 0x85,0xBA, 0x00,0x04, 0x85,0x16, 0xFF,0x64, 0xF6,0x06, 0x3B,0x90, 0x87,0x2A,
+0x00,0x00, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xA6,0xBA, 0x60,0x02, 0x20,0x1E, 0x00,0x00, 0xC7,0x38,
+0x60,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0xEE,0x00,
+0xAE,0xC9, 0x76,0xB5, 0xFF,0xF0, 0x83,0x16, 0xFF,0x78, 0x00,0x00, 0x00,0x01, 0x77,0x19,
+0xFF,0xF0, 0xC6,0xB8, 0x68,0x00, 0x84,0x96, 0xFF,0x64, 0x00,0x00, 0x00,0x01, 0xC7,0x24,
+0x68,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xC1,0x2C, 0x00,0x00, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x10, 0xF7,0x04, 0x40,0x84, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xAF,0x3C, 0xF6,0x06, 0x42,0xB8, 0xF7,0x04, 0x42,0xB8, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xF3,0x06, 0x36,0x78, 0xF3,0x05, 0x42,0x44, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33, 0x28,0x00, 0xF7,0x04, 0x4F,0x5C, 0xF3,0x84,
+0x42,0x5C, 0x83,0x3A, 0x00,0x04, 0xC4,0x38, 0x00,0x00, 0x93,0x16, 0xFF,0xEC, 0x77,0x1D,
+0x00,0x01, 0xC7,0x38, 0x38,0x00, 0x77,0x39, 0x00,0x02, 0x04,0xB8, 0x00,0x0C, 0x83,0x16,
+0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0xC0,0x1E, 0x32,0x00, 0xEC,0x00, 0xB0,0x70, 0xC5,0x04,
+0x00,0x00, 0xA6,0xA2, 0x48,0x02, 0xF7,0x04, 0xE0,0x00, 0xF5,0x82, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0xAF,0xA8, 0xC6,0x20, 0x48,0x00, 0x86,0xB2, 0x00,0x04, 0xF7,0x04,
+0xE0,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xAF,0xAC, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0xAF,0xB9, 0x00,0x00,
+0x00,0x01, 0xF5,0x02, 0x00,0x00, 0x86,0xB2, 0x00,0x00, 0xF7,0x04, 0xE0,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0xAF,0xF4, 0xF5,0x82, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0xAF,0xFC, 0x20,0x2E, 0x00,0x00, 0x86,0xB2, 0x00,0x04, 0xF7,0x04,
+0xE0,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0xAF,0xFD, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0xB0,0x0D, 0x20,0x2A,
+0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0xB0,0x59, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x7A,0xD0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xB0,0x64, 0xC7,0x20, 0x48,0x00, 0x87,0x3A, 0x00,0x08, 0xF6,0x06, 0x40,0x98, 0x77,0x39,
+0x00,0x02, 0xA6,0xBA, 0x60,0x02, 0xC7,0x38, 0x60,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0x20,0x36, 0x00,0x00, 0xE6,0x00,
+0xB0,0x64, 0x00,0x00, 0x00,0x01, 0x04,0xA4, 0x00,0x0C, 0xE0,0x00, 0xAF,0x60, 0x03,0x9C,
+0x00,0x01, 0x83,0x16, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0xC0,0x1E, 0x32,0x00, 0xEC,0x00,
+0xB1,0x04, 0xF3,0x06, 0x36,0x78, 0xF6,0x84, 0x4F,0x5C, 0x77,0x1D, 0x00,0x01, 0xC7,0x38,
+0x38,0x00, 0x77,0x39, 0x00,0x02, 0x07,0x38, 0x00,0x0C, 0xC6,0xB4, 0x70,0x00, 0x87,0x36,
+0x00,0x08, 0xF6,0x84, 0x4F,0x58, 0x77,0x39, 0x00,0x06, 0xC6,0xB4, 0x70,0x00, 0x96,0x93,
+0xFF,0xFC, 0x93,0x96, 0xFF,0xF4, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xFA,0x98, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xF6,0x84, 0x42,0x6C, 0x83,0x96, 0xFF,0xF4, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0xC7,0x1C, 0x70,0x00, 0xF7,0x05, 0x42,0x5C, 0x06,0xB4,
+0x00,0x01, 0xF7,0x04, 0x2D,0x38, 0xF6,0x85, 0x42,0x6C, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x1C, 0x20,0x32,
+0x00,0x44, 0xE6,0x00, 0xB1,0x08, 0xB3,0x3A, 0x68,0x02, 0xE0,0x00, 0xB1,0x08, 0xF0,0x05,
+0x2D,0x38, 0xF3,0x05, 0x42,0x44, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF4,0x02, 0x00,0x00, 0xC5,0xA0, 0x00,0x00, 0xF6,0x82, 0x07,0x70, 0xF7,0x04,
+0x6E,0x50, 0x20,0x36, 0x00,0x00, 0xE6,0x00, 0xB1,0x6D, 0x06,0x38, 0x00,0x1C, 0x87,0x32,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC4,0x20, 0x70,0x00, 0xC0,0x22, 0x72,0x00, 0xE4,0x00,
+0xB1,0x5D, 0x00,0x00, 0x00,0x01, 0x05,0xAC, 0x00,0x01, 0x26,0xB4, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xE6,0x00, 0xB1,0x40, 0x06,0x30, 0x00,0x04, 0xC4,0x20, 0x58,0x00, 0xC0,0x22,
+0x5A,0x00, 0xE4,0x00, 0xB1,0x81, 0x00,0x00, 0x00,0x01, 0x04,0x20, 0x00,0x01, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x78,0xD8, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xB1,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x04, 0x40,0x94, 0x00,0x00, 0x00,0x01, 0xC0,0x22, 0x72,0x00, 0xE6,0x00,
+0xB1,0xED, 0xF4,0x05, 0x40,0x90, 0xF7,0x04, 0x6E,0x50, 0x00,0x00, 0x00,0x01, 0x86,0xBA,
+0x1D,0xDC, 0xF5,0x82, 0x00,0x01, 0x06,0xB4, 0x00,0x01, 0x96,0xBA, 0x1D,0xDC, 0x87,0x3A,
+0x1D,0xDC, 0xE0,0x00, 0xB1,0xF0, 0xF5,0x85, 0x7A,0xD0, 0xF0,0x05, 0x7A,0xD0, 0xF5,0x84,
+0x40,0x90, 0xF0,0x05, 0x40,0x84, 0xF5,0x85, 0x40,0x94, 0xF5,0x86, 0xE0,0x00, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD5,0xA0, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04,
+0x6E,0x50, 0xF4,0x05, 0x40,0x84, 0x85,0xBA, 0x1D,0xDC, 0x00,0x00, 0x00,0x01, 0xF5,0x85,
+0x3B,0x64, 0xF5,0x84, 0xE0,0x00, 0xF0,0x05, 0x42,0x5C, 0x95,0xBA, 0x00,0x10, 0xF5,0x84,
+0xE0,0x04, 0xF6,0x86, 0x2C,0x28, 0x95,0xBA, 0x00,0x14, 0xF7,0x04, 0x2D,0x38, 0xF5,0x86,
+0x3A,0x4C, 0xF5,0x85, 0x42,0x44, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0xF5,0x82, 0x00,0x1C, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0xB2,0x68, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x86, 0x35,0xEC, 0xF5,0x85, 0x42,0x30, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0xC8, 0xF3,0x02,
+0x00,0x00, 0x93,0x16, 0xFF,0x94, 0x24,0x80, 0x00,0x08, 0x94,0x96, 0xFF,0x84, 0x23,0x80,
+0x00,0x07, 0x83,0x16, 0xFF,0x94, 0x00,0x00, 0x00,0x01, 0x93,0x16, 0xFF,0x54, 0x20,0x1E,
+0x00,0x07, 0xEE,0x00, 0xB5,0x64, 0xC7,0x1C, 0x38,0x00, 0x84,0x96, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0xC7,0x24, 0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA, 0x00,0x00, 0xF5,0x84,
+0x4F,0x58, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB6, 0x74,0x00, 0xE6,0x00, 0xB3,0x2D, 0x20,0x36,
+0x00,0x01, 0xE6,0x00, 0xB3,0x2D, 0x77,0x35, 0x00,0x06, 0xA6,0xBA, 0x58,0x02, 0xC7,0x38,
+0x58,0x00, 0x76,0x39, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC6,0xB4, 0x67,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0x20,0x36, 0x00,0x02, 0xE6,0x00, 0xB3,0x31, 0xC6,0xB8, 0x00,0x00, 0xC7,0x2C,
+0x00,0x00, 0xE0,0x00, 0xB3,0x30, 0xC6,0xB8, 0x00,0x00, 0xF6,0x84, 0x4F,0x58, 0xF7,0x04,
+0x4F,0x58, 0xC5,0x34, 0x00,0x00, 0xC0,0x2A, 0x72,0x00, 0xE6,0x00, 0xB5,0x5D, 0x00,0x00,
+0x00,0x01, 0xF6,0x84, 0x3B,0xBC, 0xF3,0x02, 0x00,0x00, 0x93,0x16, 0xFF,0x3C, 0x04,0x28,
+0x00,0x1C, 0xF7,0x04, 0x3B,0xB8, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x00,
+0xB4,0x40, 0x96,0x96, 0xFF,0xAC, 0x77,0x35, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39,
+0x00,0x02, 0xF4,0x86, 0x3B,0xB4, 0xC6,0x38, 0x48,0x00, 0x06,0x30, 0x00,0x0C, 0xC3,0x04,
+0x00,0x00, 0x93,0x16, 0xFF,0x34, 0x86,0xB2, 0x00,0x00, 0x87,0x2A, 0x00,0x1C, 0x85,0x96,
+0xFF,0x3C, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xB3,0xC0, 0x20,0x2E, 0x00,0x00, 0x86,0xB2,
+0x00,0x04, 0x87,0x2A, 0x00,0x20, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0xB3,0xC0, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00,
+0xB3,0xD1, 0x00,0x00, 0x00,0x01, 0xF4,0x82, 0x00,0x00, 0x94,0x96, 0xFF,0x34, 0x86,0xB2,
+0x00,0x00, 0x87,0x22, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0xB4,0x0C, 0xF5,0x82, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xB4,0x14, 0x20,0x2E,
+0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x22, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0xB4,0x15, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0xB4,0x25, 0x00,0x00, 0x00,0x01, 0xF3,0x02, 0x00,0x01, 0x93,0x16,
+0xFF,0x34, 0x84,0x96, 0xFF,0x34, 0x00,0x00, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00,
+0xB4,0x40, 0x00,0x00, 0x00,0x01, 0xF3,0x02, 0x00,0x01, 0x93,0x16, 0xFF,0x3C, 0x84,0x96,
+0xFF,0x3C, 0x00,0x00, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00, 0xB4,0x81, 0xF6,0x02,
+0x00,0x01, 0x87,0x16, 0xFF,0xAC, 0xF3,0x06, 0x3B,0xB4, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4,
+0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6,
+0x00,0x00, 0x97,0x16, 0xFF,0xB0, 0xE0,0x00, 0xB4,0xF4, 0x96,0x96, 0xFF,0xB4, 0x27,0x14,
+0x00,0x54, 0x97,0x13, 0xFF,0xFC, 0x94,0x13, 0xFF,0xFC, 0xF4,0x86, 0x3B,0xB4, 0x94,0x93,
+0xFF,0xFC, 0x93,0x96, 0xFF,0x4C, 0x95,0x16, 0xFF,0x44, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x4C, 0x85,0x16, 0xFF,0x44, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0xB4,0xF1, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xAC, 0xF3,0x06,
+0x3B,0xB4, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4,
+0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xB0, 0x96,0x96,
+0xFF,0xB4, 0xF7,0x05, 0x3B,0xBC, 0xE0,0x00, 0xB4,0xF8, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0xB5,0x2D, 0x27,0x14, 0x00,0x08, 0x84,0x96,
+0xFF,0x54, 0x00,0x00, 0x00,0x01, 0xC7,0x24, 0x70,0x00, 0x83,0x16, 0xFF,0xB4, 0x04,0xA4,
+0x00,0x04, 0x94,0x96, 0xFF,0x54, 0x84,0x96, 0xFF,0x94, 0x93,0x3A, 0xFF,0xC0, 0x04,0xA4,
+0x00,0x01, 0xE0,0x00, 0xB5,0x54, 0x94,0x96, 0xFF,0x94, 0x83,0x16, 0xFF,0x54, 0x00,0x00,
+0x00,0x01, 0xC7,0x18, 0x70,0x00, 0xF4,0x84, 0x4F,0x58, 0x03,0x18, 0x00,0x04, 0x93,0x16,
+0xFF,0x54, 0x83,0x16, 0xFF,0x94, 0x94,0xBA, 0xFF,0xC0, 0x03,0x18, 0x00,0x01, 0x93,0x16,
+0xFF,0x94, 0x95,0x16, 0xFF,0x3C, 0x93,0x96, 0xFF,0x8C, 0xE0,0x00, 0xB2,0xB0, 0x03,0x9C,
+0x00,0x01, 0x84,0x96, 0xFF,0x94, 0x00,0x00, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00,
+0xB5,0x84, 0xF3,0x82, 0x00,0x01, 0xF4,0x04, 0x4F,0x58, 0xE0,0x00, 0xBE,0xE4, 0x00,0x00,
+0x00,0x01, 0x83,0x16, 0xFF,0xB8, 0x84,0x96, 0xFF,0x94, 0x00,0x00, 0x00,0x01, 0xC0,0x1E,
+0x4A,0x00, 0xEC,0x00, 0xB5,0xCC, 0x93,0x16, 0xFF,0x7C, 0x26,0x94, 0x00,0x04, 0x87,0x36,
+0xFF,0xC0, 0x83,0x16, 0xFF,0x7C, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00,
+0xBB,0x98, 0x03,0x9C, 0x00,0x01, 0x84,0x96, 0xFF,0x94, 0x00,0x00, 0x00,0x01, 0xC0,0x1E,
+0x4A,0x00, 0xEC,0x00, 0xB5,0xA1, 0x06,0xB4, 0x00,0x04, 0xF4,0x04, 0x4F,0x58, 0x83,0x16,
+0xFF,0x7C, 0x00,0x00, 0x00,0x01, 0xC0,0x1A, 0x42,0x00, 0xE6,0x00, 0xBA,0x2D, 0xF4,0x82,
+0x00,0x00, 0x94,0x96, 0xFF,0x74, 0x23,0x80, 0x00,0x07, 0x20,0x1E, 0x00,0x07, 0xEE,0x00,
+0xB7,0x48, 0xC7,0x1C, 0x38,0x00, 0x83,0x16, 0xFF,0x7C, 0x00,0x00, 0x00,0x01, 0xC7,0x18,
+0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA, 0x00,0x00, 0xF5,0x84, 0x4F,0x58, 0x77,0x39,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02,
+0x00,0xFF, 0xC6,0xB6, 0x74,0x00, 0xE6,0x00, 0xB6,0x69, 0x20,0x36, 0x00,0x01, 0xE6,0x00,
+0xB6,0x69, 0x77,0x35, 0x00,0x06, 0xA6,0xBA, 0x58,0x02, 0xC7,0x38, 0x58,0x00, 0x76,0x39,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC6,0xB4, 0x67,0xC0, 0x76,0xB5, 0xFF,0xF0, 0x20,0x36,
+0x00,0x02, 0xE6,0x00, 0xB6,0x6D, 0xC6,0xB8, 0x00,0x00, 0xC7,0x2C, 0x00,0x00, 0xE0,0x00,
+0xB6,0x6C, 0xC6,0xB8, 0x00,0x00, 0xF6,0x84, 0x4F,0x58, 0xF7,0x04, 0x4F,0x58, 0xC5,0x34,
+0x00,0x00, 0xC0,0x2A, 0x72,0x00, 0xE6,0x00, 0xB7,0x41, 0xC5,0x84, 0x00,0x00, 0x84,0x96,
+0xFF,0x74, 0x86,0xAA, 0x00,0x1C, 0x83,0x16, 0xFF,0x3C, 0xF6,0x02, 0x00,0x00, 0x04,0xA4,
+0x00,0x01, 0x94,0x96, 0xFF,0x74, 0x87,0x1A, 0x00,0x1C, 0x04,0xA8, 0x00,0x1C, 0x94,0x96,
+0xFF,0x34, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xB6,0xCC, 0x04,0x18, 0x00,0x1C, 0x86,0xAA,
+0x00,0x20, 0x87,0x1A, 0x00,0x20, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0xB6,0xD0, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0xB6,0xDD, 0x00,0x00, 0x00,0x01, 0xF5,0x82, 0x00,0x00, 0x83,0x16, 0xFF,0x34, 0x87,0x22,
+0x00,0x00, 0x86,0x9A, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0xB7,0x1C, 0xF6,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xB7,0x24, 0x20,0x32,
+0x00,0x00, 0x86,0x9A, 0x00,0x04, 0x87,0x22, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0xB7,0x25, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0xB7,0x35, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0xB7,0x40, 0x00,0x00, 0x00,0x01, 0x93,0x96, 0xFF,0x84, 0xE0,0x00,
+0xB5,0xEC, 0x03,0x9C, 0x00,0x01, 0x84,0x96, 0xFF,0x74, 0x83,0x16, 0xFF,0x94, 0x00,0x00,
+0x00,0x01, 0xC0,0x26, 0x32,0x00, 0xE6,0x00, 0xBB,0x98, 0x23,0x00, 0x00,0x08, 0x84,0x96,
+0xFF,0x84, 0x00,0x00, 0x00,0x01, 0xC0,0x26, 0x32,0x00, 0xE6,0x00, 0xBB,0x99, 0xF6,0x02,
+0x00,0x00, 0xF6,0x84, 0x40,0x7C, 0xF7,0x04, 0x40,0x74, 0xC6,0xB4, 0x68,0x00, 0xC7,0x38,
+0x68,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x77,0xB4, 0x00,0x08, 0x70,0x3E, 0xFF,0xE8, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0xB8,0x04, 0xF5,0x82,
+0x00,0xFF, 0x84,0x96, 0xFF,0x84, 0x83,0x16, 0xFF,0x8C, 0x00,0x00, 0x00,0x01, 0xC7,0x24,
+0x32,0x00, 0x84,0x96, 0xFF,0x7C, 0xC7,0x38, 0x70,0x00, 0xC7,0x24, 0x70,0x00, 0x07,0x38,
+0x00,0x26, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xC6,0xB4, 0x5C,0x00, 0xC0,0x36, 0x62,0x00, 0x47,0x0C,
+0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0xB8,0x0D, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0xBB,0x98, 0x23,0x80,
+0x00,0x07, 0x20,0x1E, 0x00,0x07, 0xEE,0x00, 0xB8,0xC8, 0xC7,0x1C, 0x38,0x00, 0x83,0x16,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC7,0x18, 0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA,
+0x00,0x00, 0xF5,0x84, 0x4F,0x58, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB6, 0x74,0x00, 0xE6,0x00,
+0xB8,0x91, 0x20,0x36, 0x00,0x01, 0xE6,0x00, 0xB8,0x91, 0x77,0x35, 0x00,0x06, 0xA6,0xBA,
+0x58,0x02, 0xC7,0x38, 0x58,0x00, 0x76,0x39, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC6,0xB4,
+0x67,0xC0, 0x76,0xB5, 0xFF,0xF0, 0x20,0x36, 0x00,0x02, 0xE6,0x00, 0xB8,0x95, 0xC6,0xB8,
+0x00,0x00, 0xC7,0x2C, 0x00,0x00, 0xE0,0x00, 0xB8,0x94, 0xC6,0xB8, 0x00,0x00, 0xF6,0x84,
+0x4F,0x58, 0xF7,0x04, 0x4F,0x58, 0xC5,0x34, 0x00,0x00, 0xC0,0x2A, 0x72,0x00, 0xE6,0x00,
+0xB8,0xC1, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x93,0x96, 0xFF,0x4C, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCC,0x60, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x4C, 0xE0,0x00,
+0xB8,0x14, 0x03,0x9C, 0x00,0x01, 0x84,0x96, 0xFF,0x84, 0x83,0x16, 0xFF,0x8C, 0xF3,0x84,
+0x40,0x7C, 0xF5,0x04, 0x40,0x74, 0xC4,0xA4, 0x32,0x00, 0x94,0x96, 0xFF,0x34, 0x83,0x16,
+0xFF,0x34, 0xC5,0x9C, 0x38,0x00, 0xC5,0xA8, 0x58,0x00, 0x05,0xAC, 0x00,0x26, 0x86,0xAE,
+0x00,0x00, 0x77,0x2D, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0x74,0x2D, 0x00,0x1E, 0x74,0x20,
+0xFF,0xE5, 0x73,0x9D, 0x00,0x10, 0x73,0x9D, 0xFF,0xF8, 0xC4,0xA4, 0x30,0x00, 0x94,0x96,
+0xFF,0x3C, 0x83,0x16, 0xFF,0x7C, 0xC6,0xB4, 0x77,0xC0, 0xC4,0x98, 0x48,0x00, 0x94,0x96,
+0xFF,0x3C, 0x04,0xA4, 0x00,0x26, 0x94,0x96, 0xFF,0x3C, 0x73,0x25, 0x00,0x1E, 0x73,0x18,
+0xFF,0xE5, 0x93,0x16, 0xFF,0x6C, 0x74,0xA5, 0x00,0x1E, 0x94,0x96, 0xFF,0x64, 0x74,0xA4,
+0xFF,0xE5, 0x94,0x96, 0xFF,0x64, 0x83,0x16, 0xFF,0x7C, 0xF4,0x84, 0x4F,0x58, 0x76,0xB5,
+0xFF,0xF0, 0xC6,0x18, 0x4A,0x00, 0x76,0x30, 0xFF,0xFA, 0x46,0x31, 0x00,0x00, 0xF3,0x02,
+0x00,0xFF, 0xC6,0x30, 0x34,0x00, 0xF4,0x82, 0xFF,0x00, 0xC6,0xB4, 0x4C,0x00, 0xC6,0x30,
+0x68,0x00, 0xF6,0x2F, 0x28,0x00, 0x87,0x2E, 0x00,0x00, 0x83,0x16, 0xFF,0x34, 0xC7,0x38,
+0x47,0xC0, 0x77,0x39, 0xFF,0xF0, 0x73,0x19, 0x00,0x10, 0x93,0x16, 0xFF,0x34, 0x74,0x99,
+0xFF,0xF8, 0xF3,0x02, 0x00,0xFF, 0xC7,0x38, 0x34,0x00, 0xC7,0x24, 0x70,0x00, 0x97,0x16,
+0xFF,0x34, 0x24,0x94, 0x00,0xCA, 0x84,0xA6, 0x00,0x00, 0x77,0xA5, 0x00,0x1E, 0x77,0xBC,
+0xFF,0xE5, 0xC4,0xA4, 0x7F,0xC0, 0x74,0xA5, 0xFF,0xF0, 0x83,0x16, 0xFF,0x3C, 0xF4,0xAF,
+0x28,0x00, 0xF4,0x84, 0x4F,0x58, 0x87,0x1A, 0x00,0x00, 0xC5,0x28, 0x4A,0x00, 0x75,0x28,
+0xFF,0xFA, 0x83,0x16, 0xFF,0x6C, 0x45,0x29, 0x00,0x00, 0xF4,0x82, 0x00,0xFF, 0xC5,0x28,
+0x4C,0x00, 0x84,0x96, 0xFF,0x3C, 0xC7,0x38, 0x37,0xC0, 0x77,0x39, 0xFF,0xF0, 0xF3,0x02,
+0xFF,0x00, 0xC7,0x38, 0x34,0x00, 0xC5,0x28, 0x70,0x00, 0xF5,0x27, 0x28,0x00, 0x87,0x26,
+0x00,0x00, 0x83,0x16, 0xFF,0x64, 0x84,0x16, 0xFF,0x7C, 0xC7,0x38, 0x37,0xC0, 0x77,0x39,
+0xFF,0xF0, 0xF4,0x82, 0x00,0xFF, 0xC7,0x38, 0x4C,0x00, 0x83,0x16, 0xFF,0x3C, 0xC3,0x9C,
+0x70,0x00, 0xE0,0x00, 0xBE,0xE4, 0xF3,0x9B, 0x28,0x00, 0xF7,0x04, 0x40,0x7C, 0xF6,0x04,
+0x40,0x74, 0xC7,0x38, 0x70,0x00, 0xC7,0x30, 0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA,
+0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x77,0xB4,
+0x00,0x08, 0x70,0x3E, 0xFF,0xE8, 0x47,0x0C, 0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0xBA,0x7D, 0x25,0x80, 0x00,0x07, 0xE0,0x00, 0xBE,0xE4, 0x04,0x20,
+0x00,0x40, 0xE0,0x00, 0xBA,0xD8, 0xC4,0x2C, 0x00,0x00, 0xC7,0x30, 0x42,0x00, 0x84,0x96,
+0x00,0x00, 0x75,0x38, 0xFF,0xFA, 0x06,0x24, 0x00,0x0A, 0x20,0x2E, 0x00,0x07, 0xEE,0x00,
+0xBA,0xD4, 0x07,0x30, 0x00,0x0E, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB4,
+0x74,0x00, 0x47,0x29, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0xBA,0x74, 0x06,0x30, 0x00,0x02, 0xE0,0x00, 0xBA,0x8C, 0x05,0xAC,
+0x00,0x01, 0xF4,0x02, 0x00,0x08, 0x07,0x20, 0x00,0x07, 0x20,0x3A, 0x00,0x0E, 0xE2,0x00,
+0xBB,0xA4, 0xC5,0xA0, 0x40,0x00, 0x83,0x16, 0x00,0x00, 0xF5,0x04, 0x40,0x7C, 0xF4,0x82,
+0x00,0xFF, 0xF6,0x04, 0x4F,0x58, 0xC5,0x98, 0x58,0x00, 0x05,0xAC, 0x00,0x26, 0x86,0xAE,
+0x00,0x00, 0x77,0x2D, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0x18, 0x62,0x00, 0x76,0x30,
+0xFF,0xFA, 0x46,0x31, 0x00,0x00, 0xC6,0x30, 0x4C,0x00, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0x77,0x29, 0x00,0x10, 0x77,0x39, 0xFF,0xF8, 0xC6,0xB4, 0x4C,0x00, 0xC7,0x38,
+0x68,0x00, 0xF7,0x2F, 0x28,0x00, 0xF5,0x84, 0x40,0x74, 0xC5,0x28, 0x50,0x00, 0xC5,0xAC,
+0x50,0x00, 0x05,0xAC, 0x00,0x26, 0x86,0xAE, 0x00,0x00, 0x77,0x2D, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0x75,0x2D, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0xF7,0x02, 0xFF,0x00, 0xC6,0xB4, 0x74,0x00, 0xC6,0x30, 0x68,0x00, 0xF6,0x2F,
+0x28,0x00, 0x87,0x2E, 0x00,0x00, 0x76,0xA1, 0x00,0x10, 0x76,0xB5, 0xFF,0xF8, 0xC7,0x38,
+0x57,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38, 0x4C,0x00, 0xC6,0xB4, 0x70,0x00, 0xE0,0x00,
+0xBB,0xF8, 0xF6,0xAF, 0x28,0x00, 0xF4,0x04, 0x4F,0x58, 0xE0,0x00, 0xBE,0xE4, 0x04,0x20,
+0x00,0x40, 0xF6,0x04, 0x4F,0x58, 0x83,0x16, 0x00,0x00, 0xF7,0x04, 0x40,0x7C, 0xF5,0x84,
+0x40,0x74, 0xC6,0x18, 0x62,0x00, 0x76,0x30, 0xFF,0xFA, 0xC7,0x38, 0x70,0x00, 0xC5,0xAC,
+0x70,0x00, 0x05,0xAC, 0x00,0x26, 0x86,0xAE, 0x00,0x00, 0x77,0x2D, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0x46,0x31, 0x00,0x00, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02,
+0x00,0xFF, 0xC6,0x30, 0x74,0x00, 0xF7,0x02, 0xFF,0x00, 0xC6,0xB4, 0x74,0x00, 0xC6,0x30,
+0x68,0x00, 0xF6,0x2F, 0x28,0x00, 0x23,0x80, 0x00,0x07, 0x20,0x1E, 0x00,0x07, 0xEE,0x00,
+0xBE,0xE0, 0xC7,0x1C, 0x38,0x00, 0x84,0x96, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC7,0x24,
+0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA, 0x00,0x00, 0xF5,0x84, 0x4F,0x58, 0x77,0x39,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02,
+0x00,0xFF, 0xC6,0xB6, 0x74,0x00, 0xE6,0x00, 0xBC,0x79, 0x20,0x36, 0x00,0x01, 0xE6,0x00,
+0xBC,0x79, 0x77,0x35, 0x00,0x06, 0xA6,0xBA, 0x58,0x02, 0xC7,0x38, 0x58,0x00, 0x76,0x39,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC6,0xB4, 0x67,0xC0, 0x76,0xB5, 0xFF,0xF0, 0x20,0x36,
+0x00,0x02, 0xE6,0x00, 0xBC,0x7D, 0xC6,0xB8, 0x00,0x00, 0xC7,0x2C, 0x00,0x00, 0xE0,0x00,
+0xBC,0x7C, 0xC6,0xB8, 0x00,0x00, 0xF6,0x84, 0x4F,0x58, 0xF7,0x04, 0x4F,0x58, 0xC5,0x34,
+0x00,0x00, 0xC0,0x2A, 0x72,0x00, 0xE6,0x00, 0xBE,0xD9, 0x06,0xA8, 0x00,0x1C, 0x83,0x16,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x96,0x93, 0xFF,0xFC, 0xF4,0x86,
+0x3B,0xB4, 0x94,0x93, 0xFF,0xFC, 0x93,0x96, 0xFF,0x4C, 0x95,0x16, 0xFF,0x44, 0x96,0x96,
+0xFF,0x40, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x23,0x40, 0x97,0x93, 0xFF,0xFC, 0xF3,0x04,
+0x4F,0x5C, 0xF4,0x82, 0x00,0x00, 0x94,0x96, 0xFF,0x5C, 0x86,0x96, 0xFF,0x40, 0x83,0x96,
+0xFF,0x4C, 0x85,0x16, 0xFF,0x44, 0x93,0x16, 0xFF,0x34, 0x86,0x1A, 0x00,0x08, 0x96,0x96,
+0xFF,0x3C, 0x87,0x1A, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x32, 0x72,0x00, 0xEC,0x00,
+0xBD,0xB8, 0x96,0x16, 0xFF,0x9C, 0x77,0x31, 0x00,0x01, 0xC7,0x38, 0x60,0x00, 0x77,0x39,
+0x00,0x02, 0xC6,0x38, 0x30,0x00, 0x06,0x30, 0x00,0x0C, 0x86,0xB2, 0x00,0x00, 0x87,0x2A,
+0x00,0x1C, 0x85,0x96, 0xFF,0x5C, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xBD,0x40, 0xC4,0x04,
+0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x2A, 0x00,0x20, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0xBD,0x44, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E,
+0x00,0x00, 0xE6,0x00, 0xBD,0x51, 0x00,0x00, 0x00,0x01, 0xF4,0x02, 0x00,0x00, 0x83,0x16,
+0xFF,0x3C, 0x86,0xB2, 0x00,0x00, 0x87,0x1A, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0xBD,0x90, 0xF5,0x82, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0xBD,0x98, 0x20,0x2E, 0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x1A, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0xBD,0x99, 0x20,0x2E, 0x00,0x00, 0xF5,0x82,
+0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0xBD,0xA9, 0x20,0x22, 0x00,0x00, 0xF4,0x02,
+0x00,0x01, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xBD,0xB8, 0x00,0x00, 0x00,0x01, 0xF4,0x82,
+0x00,0x01, 0x94,0x96, 0xFF,0x5C, 0x83,0x16, 0xFF,0x5C, 0x00,0x00, 0x00,0x01, 0x20,0x1A,
+0x00,0x00, 0xE6,0x00, 0xBD,0xF9, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0x9C, 0x84,0x96,
+0xFF,0x34, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4,
+0x48,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xA0, 0xE0,0x00,
+0xBE,0x70, 0x96,0x96, 0xFF,0xA4, 0x27,0x14, 0x00,0x64, 0x97,0x13, 0xFF,0xFC, 0x83,0x16,
+0xFF,0x3C, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x84,0x96, 0xFF,0x34, 0x00,0x00,
+0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0x93,0x96, 0xFF,0x4C, 0x95,0x16, 0xFF,0x44, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0x4C, 0x85,0x16,
+0xFF,0x44, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xBE,0x71, 0xF6,0x02, 0x00,0x00, 0x87,0x16,
+0xFF,0x9C, 0x83,0x16, 0xFF,0x34, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xA0, 0x96,0x96, 0xFF,0xA4, 0x97,0x1A, 0x00,0x08, 0xF6,0x02, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0xBE,0x99, 0xF6,0x06, 0x42,0x9C, 0xF7,0x04, 0x42,0x9C, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0xF7,0x04, 0x4F,0x58, 0x00,0x00, 0x00,0x01, 0xC7,0x28,
+0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0x47,0x39, 0x00,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x28,
+0x00,0x1C, 0x97,0x13, 0xFF,0xFC, 0xF4,0x84, 0x4F,0x5C, 0x00,0x00, 0x00,0x01, 0x94,0x93,
+0xFF,0xFC, 0x93,0x96, 0xFF,0x4C, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x23,0x40, 0x97,0x93,
+0xFF,0xFC, 0x83,0x96, 0xFF,0x4C, 0xE0,0x00, 0xBB,0xFC, 0x03,0x9C, 0x00,0x01, 0x84,0x16,
+0x00,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x60, 0x85,0x16, 0x00,0x00, 0x86,0x16, 0x00,0x04, 0x06,0xA8, 0x00,0x18, 0xC7,0x30,
+0x60,0x00, 0xC5,0xB8, 0x68,0x00, 0x20,0x32, 0x00,0x07, 0xEE,0x00, 0xBF,0x64, 0x07,0x2C,
+0x00,0x0E, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB4, 0x74,0x00, 0x20,0x36,
+0x00,0x00, 0x47,0x0C, 0x00,0x01, 0xD7,0x00, 0x0A,0x70, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xBF,0x61, 0x05,0xAC, 0x00,0x02, 0xE0,0x00, 0xBF,0x18, 0x06,0x30, 0x00,0x01, 0x20,0x32,
+0x00,0x07, 0xEE,0x00, 0xC0,0x4C, 0x06,0xA8, 0x00,0x16, 0xF5,0x05, 0x40,0x74, 0xF6,0x05,
+0x40,0x7C, 0xF3,0x02, 0x00,0x06, 0xF3,0x05, 0x42,0x54, 0x96,0x13, 0xFF,0xFC, 0x05,0x28,
+0x00,0x02, 0x95,0x16, 0xFF,0xC4, 0x95,0x13, 0xFF,0xFC, 0x23,0x94, 0x00,0x20, 0x93,0x96,
+0xFF,0xBC, 0x93,0x93, 0xFF,0xFC, 0x96,0x16, 0xFF,0xAC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD0,0xDC, 0x97,0x93, 0xFF,0xFC, 0x84,0x96, 0xFF,0xC4, 0x23,0x14, 0x00,0x38, 0x94,0x93,
+0xFF,0xFC, 0x93,0x16, 0xFF,0xB4, 0x93,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD2,0x58, 0x97,0x93, 0xFF,0xFC, 0x87,0x02, 0xFF,0x34, 0x86,0x16, 0xFF,0xAC, 0xF7,0x05,
+0x42,0x64, 0x96,0x13, 0xFF,0xFC, 0x97,0x13, 0xFF,0xFC, 0x83,0x96, 0xFF,0xB4, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x84,0x96, 0xFF,0xBC, 0x00,0x00, 0x00,0x01, 0x94,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xF7,0xC8, 0x97,0x93, 0xFF,0xFC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0xC0,0x1D, 0xF3,0x06, 0x3A,0xD8, 0xF7,0x04, 0x42,0x54, 0x00,0x00,
+0x00,0x01, 0x27,0x38, 0x00,0x01, 0xF7,0x05, 0x42,0x54, 0xF3,0x05, 0x42,0x44, 0xF3,0x82,
+0x17,0x70, 0x93,0x93, 0xFF,0xFC, 0xF4,0x82, 0x00,0x1B, 0x94,0x93, 0xFF,0xFC, 0xF3,0x06,
+0x42,0x44, 0x93,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0xC1,0xA0, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0xF5,0x84,
+0x4F,0x58, 0xF4,0x06, 0x3B,0x70, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x76,0x39, 0x00,0x06, 0xA7,0x2E, 0x60,0x02, 0xC5,0x2C,
+0x60,0x00, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x40,0x00, 0x07,0x38, 0x00,0x02, 0x86,0xBA,
+0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB7,
+0xFF,0xF0, 0xEE,0x00, 0xC1,0x15, 0x96,0x96, 0xFF,0x9C, 0xA7,0x2E, 0x60,0x02, 0x76,0xA9,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x83,0x96, 0xFF,0x9C, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x40,0x00, 0x86,0xBA, 0x00,0x04, 0x24,0x94,
+0x00,0x60, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0xA6,0xAA, 0x68,0x02, 0x77,0x1D, 0x00,0x03, 0xC7,0x38, 0x68,0x00, 0x27,0x38,
+0x00,0x08, 0x83,0xBA, 0x00,0x04, 0x83,0x3A, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x93,0xA6,
+0x00,0x04, 0x93,0x26, 0x00,0x00, 0x85,0x96, 0xFF,0xA4, 0xE0,0x00, 0xC1,0x38, 0x23,0x00,
+0x00,0x07, 0xA7,0x2E, 0x60,0x02, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x40,0x00, 0x85,0xBA,
+0x00,0x04, 0x23,0x00, 0x00,0x07, 0x93,0x13, 0xFF,0xFC, 0x87,0x2A, 0x00,0x00, 0x76,0xA9,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x83,0x96, 0xFF,0x9C, 0xF6,0x06, 0x3B,0x70, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xA6,0xBA, 0x60,0x02, 0x20,0x1E,
+0x00,0x00, 0xC7,0x38, 0x60,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0xEE,0x00, 0xC1,0x8D, 0x76,0xB5, 0xFF,0xF0, 0x84,0x96, 0xFF,0xA0, 0x00,0x00,
+0x00,0x01, 0x77,0x25, 0xFF,0xF0, 0xC6,0xB8, 0x68,0x00, 0xC7,0x28, 0x68,0x00, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xC1,0x2C, 0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x70, 0x25,0x00,
+0x00,0x07, 0x20,0x2A, 0x00,0x07, 0xEE,0x00, 0xC3,0xB8, 0xC7,0x28, 0x50,0x00, 0x83,0x16,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC7,0x18, 0x70,0x00, 0x07,0x38, 0x00,0x26, 0x86,0xBA,
+0x00,0x00, 0xF5,0x84, 0x4F,0x58, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB6, 0x74,0x00, 0xE6,0x00,
+0xC2,0x3D, 0x20,0x36, 0x00,0x01, 0xE6,0x00, 0xC2,0x3D, 0x77,0x35, 0x00,0x06, 0xA6,0xBA,
+0x58,0x02, 0xC7,0x38, 0x58,0x00, 0x76,0x39, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC6,0xB4,
+0x67,0xC0, 0x76,0xB5, 0xFF,0xF0, 0x20,0x36, 0x00,0x02, 0xE6,0x00, 0xC2,0x4D, 0xC0,0x3A,
+0x5A,0x00, 0xE0,0x00, 0xC2,0x48, 0xC7,0x2C, 0x00,0x00, 0xF7,0x04, 0x4F,0x58, 0xF5,0x84,
+0x4F,0x58, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x5A,0x00, 0xE6,0x00, 0xC3,0xB1, 0xF4,0x86,
+0x3B,0x90, 0x83,0x96, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x06,0x9C, 0x00,0x16, 0x87,0x36,
+0x00,0x00, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0x76,0x39, 0x00,0x06, 0xA7,0x2E, 0x60,0x02, 0xC5,0x2C, 0x60,0x00, 0x76,0xA9,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39,
+0x00,0x03, 0xC7,0x38, 0x48,0x00, 0x07,0x38, 0x00,0x02, 0x86,0xBA, 0x00,0x00, 0x77,0x39,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB7, 0xFF,0xF0, 0xEE,0x00,
+0xC3,0x21, 0x96,0x96, 0xFF,0x8C, 0xA7,0x2E, 0x60,0x02, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x83,0x16, 0xFF,0x8C, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39,
+0x00,0x03, 0xC7,0x38, 0x48,0x00, 0x86,0xBA, 0x00,0x04, 0x24,0x94, 0x00,0x70, 0x77,0x39,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xA6,0xAA,
+0x68,0x02, 0x77,0x19, 0x00,0x03, 0xC7,0x38, 0x68,0x00, 0x27,0x38, 0x00,0x08, 0x83,0xBA,
+0x00,0x04, 0x83,0x3A, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x93,0xA6, 0x00,0x04, 0x93,0x26,
+0x00,0x00, 0x86,0x16, 0xFF,0x94, 0xE0,0x00, 0xC3,0x44, 0x00,0x00, 0x00,0x01, 0xA7,0x2E,
+0x60,0x02, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF3,0x06, 0x3B,0x90, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x30,0x00, 0x86,0x3A,
+0x00,0x04, 0x87,0x2A, 0x00,0x00, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x83,0x96,
+0xFF,0x8C, 0xF4,0x86, 0x3B,0x90, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39,
+0x00,0x03, 0xA6,0xBA, 0x48,0x02, 0x20,0x1E, 0x00,0x00, 0xC7,0x38, 0x48,0x00, 0x77,0x39,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0xEE,0x00, 0xC3,0x95, 0x76,0xB5,
+0xFF,0xF0, 0x83,0x16, 0xFF,0x90, 0x00,0x00, 0x00,0x01, 0x77,0x19, 0xFF,0xF0, 0xC6,0xB8,
+0x68,0x00, 0xC7,0x28, 0x68,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xC1,0x30,
+0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0xC5,0xC4, 0x00,0x00, 0x00,0x01, 0xE0,0x00,
+0xC1,0xC4, 0x05,0x28, 0x00,0x01, 0x83,0x96, 0x00,0x00, 0xF4,0x82, 0x00,0x06, 0xF4,0x85,
+0x42,0x54, 0xF6,0x04, 0x42,0x60, 0x25,0x14, 0x00,0x1E, 0x23,0x14, 0x00,0x20, 0x93,0x16,
+0xFF,0xAC, 0xF3,0x85, 0x40,0x78, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x06,0x30, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC,
+0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14,
+0x00,0x1C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14,
+0x00,0x1A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14,
+0x00,0x18, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14,
+0x00,0x16, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14,
+0x00,0x14, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14,
+0x00,0x12, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x25,0x14, 0x00,0x10, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x07,0x1C,
+0x00,0x02, 0x97,0x13, 0xFF,0xFC, 0x23,0x94, 0x00,0x50, 0x93,0x96, 0xFF,0xA4, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD2,0x58, 0x97,0x93, 0xFF,0xFC, 0x84,0x96,
+0xFF,0xA4, 0x23,0x14, 0x00,0x38, 0x94,0x93, 0xFF,0xFC, 0x27,0x80, 0x00,0x07, 0x97,0x93,
+0xFF,0xFC, 0x93,0x16, 0xFF,0x9C, 0x93,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xCF,0x24, 0x97,0x93, 0xFF,0xFC, 0x87,0x02, 0xFF,0x34, 0x27,0x80, 0x00,0x07, 0xF7,0x85,
+0x42,0x58, 0xF7,0x05, 0x42,0x64, 0x27,0x80, 0x00,0x07, 0x97,0x93, 0xFF,0xFC, 0x97,0x13,
+0xFF,0xFC, 0x83,0x96, 0xFF,0x9C, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x84,0x96,
+0xFF,0xAC, 0x00,0x00, 0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xF5,0xF4, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xC5,0x95, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x42,0x58, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x42,0x58, 0xF7,0x04, 0x2D,0x38, 0xF3,0x06, 0x39,0xC0, 0xF3,0x05, 0x42,0x44, 0xF6,0x86,
+0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x82,
+0x00,0x1C, 0x20,0x32, 0x00,0x44, 0xE6,0x00, 0xC5,0xC4, 0xB3,0xBA, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x25,0x00,
+0x00,0x07, 0xF7,0x04, 0x40,0x74, 0xF6,0x84, 0x4F,0x58, 0xF6,0x04, 0x42,0x60, 0xC7,0x38,
+0x6A,0x00, 0x75,0xB8, 0xFF,0xFA, 0x06,0x30, 0x00,0x0A, 0x20,0x2A, 0x00,0x07, 0xEE,0x00,
+0xC6,0x48, 0x07,0x30, 0x00,0x0E, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB4,
+0x74,0x00, 0x47,0x2D, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0xC6,0x4C, 0xC3,0x28, 0x00,0x00, 0x06,0x30, 0x00,0x02, 0xE0,0x00,
+0xC5,0xFC, 0x05,0x28, 0x00,0x01, 0xF3,0x02, 0x00,0x08, 0xC5,0x18, 0x30,0x00, 0xF3,0x84,
+0x42,0x60, 0xF6,0x04, 0x4F,0x58, 0xF7,0x04, 0x40,0x7C, 0xF4,0x84, 0x40,0x74, 0xC5,0x1C,
+0x50,0x00, 0x05,0x28, 0x00,0x26, 0x85,0xAA, 0x00,0x00, 0x74,0x29, 0x00,0x1E, 0x74,0x20,
+0xFF,0xE5, 0xC6,0x1C, 0x62,0x00, 0x76,0x30, 0xFF,0xFA, 0xC6,0xB8, 0x70,0x00, 0xC4,0xA4,
+0x68,0x00, 0x04,0xA4, 0x00,0x26, 0x76,0xA5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x77,0x39,
+0x00,0x10, 0x77,0x39, 0xFF,0xF8, 0x46,0x31, 0x00,0x00, 0xC5,0xAC, 0x47,0xC0, 0x75,0xAD,
+0xFF,0xF0, 0xF4,0x02, 0x00,0xFF, 0xC5,0xAC, 0x44,0x00, 0xC7,0x38, 0x58,0x00, 0xF7,0x2B,
+0x28,0x00, 0x87,0x26, 0x00,0x00, 0x75,0xA5, 0x00,0x1E, 0xC6,0x30, 0x44,0x00, 0x75,0xAC,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xF6,0x82, 0xFF,0x00, 0xC7,0x38,
+0x6C,0x00, 0xC6,0x30, 0x70,0x00, 0xF6,0x27, 0x28,0x00, 0x87,0x26, 0x00,0x00, 0x76,0x99,
+0x00,0x10, 0x76,0xB5, 0xFF,0xF8, 0xC7,0x38, 0x5F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38,
+0x44,0x00, 0xC6,0xB4, 0x70,0x00, 0xF6,0xA7, 0x28,0x00, 0x93,0x93, 0xFF,0xFC, 0xF3,0x84,
+0x3B,0xB0, 0x00,0x00, 0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD4,0x2C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04, 0x40,0x7C, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0x97,0x13, 0xFF,0xFC, 0xF3,0x84, 0x40,0x74, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xBE,0xF8, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06,
+0x42,0x30, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x35,0x60, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x42,0x30, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x35,0xEC, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x42,0x44, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x36,0x78, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x42,0x44, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x37,0x04, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x42,0x44, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x37,0x90, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x42,0x44, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x38,0x1C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x42,0x44, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x38,0xA8, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x42,0x44, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x39,0x34, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x42,0x44, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x39,0xC0, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x42,0x44, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x3A,0x4C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x42,0x44, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x3A,0xD8, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x96,
+0x00,0x00, 0xF5,0x06, 0x3B,0x90, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38,
+0x50,0x00, 0x07,0x38, 0x00,0x02, 0x86,0xBA, 0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0x37, 0xFF,0xF0, 0xEE,0x00, 0xC9,0x95, 0x00,0x00,
+0x00,0x01, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x50,0x00, 0x86,0xBA,
+0x00,0x04, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0xA6,0xAE, 0x68,0x02, 0x77,0x31, 0x00,0x03, 0xC7,0x38, 0x68,0x00, 0x27,0x38,
+0x00,0x08, 0x84,0xBA, 0x00,0x04, 0x84,0x3A, 0x00,0x00, 0xE0,0x00, 0xC9,0xB4, 0xC5,0x24,
+0x00,0x00, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x50,0x00, 0x85,0x3A,
+0x00,0x04, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x20,0x32,
+0x00,0x00, 0xF6,0x06, 0x3B,0x90, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39,
+0x00,0x03, 0xA6,0xBA, 0x60,0x02, 0xC7,0x38, 0x60,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0xEE,0x00, 0xC9,0xF9, 0x76,0xB5, 0xFF,0xF0, 0x77,0x21,
+0xFF,0xF0, 0xC6,0xB8, 0x68,0x00, 0xC7,0x2C, 0x68,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xC1,0x28, 0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x96, 0x00,0x00, 0xF5,0x06, 0x3B,0x70, 0x87,0x2E,
+0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x77,0x39, 0x00,0x03, 0xC7,0x38, 0x50,0x00, 0x07,0x38, 0x00,0x02, 0x86,0xBA,
+0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0x37,
+0xFF,0xF0, 0xEE,0x00, 0xCA,0xBD, 0x00,0x00, 0x00,0x01, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39,
+0x00,0x03, 0xC7,0x38, 0x50,0x00, 0x86,0xBA, 0x00,0x04, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xA6,0xAE, 0x68,0x02, 0x77,0x31,
+0x00,0x03, 0xC7,0x38, 0x68,0x00, 0x27,0x38, 0x00,0x08, 0x84,0xBA, 0x00,0x04, 0x84,0x3A,
+0x00,0x00, 0xE0,0x00, 0xCA,0xDC, 0xC5,0x24, 0x00,0x00, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39,
+0x00,0x03, 0xC7,0x38, 0x50,0x00, 0x85,0x3A, 0x00,0x04, 0x83,0x96, 0x00,0x04, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x20,0x32, 0x00,0x00, 0x93,0x93, 0xFF,0xFC, 0x87,0x2E,
+0x00,0x00, 0xF6,0x06, 0x3B,0x70, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0x39,
+0x00,0x03, 0xA6,0xBA, 0x60,0x02, 0xC7,0x38, 0x60,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0xEE,0x00, 0xCB,0x29, 0x76,0xB5, 0xFF,0xF0, 0x77,0x21,
+0xFF,0xF0, 0xC6,0xB8, 0x68,0x00, 0xC7,0x2C, 0x68,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xC1,0x28, 0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF5,0x04, 0x4F,0x58, 0xF5,0x82, 0x00,0x02, 0x06,0x28,
+0x00,0x80, 0x20,0x2E, 0x00,0x62, 0xEE,0x00, 0xCB,0x90, 0x07,0x30, 0x00,0x40, 0xF0,0x33,
+0x28,0x00, 0xC6,0xB8, 0x52,0x00, 0x76,0xB4, 0xFF,0xFA, 0x06,0x30, 0x00,0x14, 0xF6,0xB3,
+0x28,0x00, 0xC6,0x38, 0x00,0x00, 0xE0,0x00, 0xCB,0x64, 0x05,0xAC, 0x00,0x01, 0xF7,0x04,
+0x4F,0x58, 0x00,0x00, 0x00,0x01, 0x06,0xB8, 0x18,0xD4, 0xF4,0x82, 0x00,0x01, 0xF4,0xB7,
+0x28,0x00, 0x07,0x38, 0x18,0xC0, 0xF0,0x3B, 0x28,0x00, 0xF7,0x06, 0x42,0xC0, 0xF4,0x82,
+0x00,0x02, 0xF4,0xBB, 0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF6,0x84, 0x42,0xC0, 0xF6,0x06, 0x42,0xC0, 0x77,0x31, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0x75,0xB1, 0x00,0x1E, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB4, 0xFF,0xF0, 0xF7,0x04,
+0x4F,0x58, 0x76,0xB5, 0x00,0x06, 0xC4,0x38, 0x68,0x00, 0x87,0x22, 0x00,0x14, 0x76,0xA1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33,
+0x28,0x00, 0xF7,0x04, 0x42,0xC0, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0x20,0x3A, 0x00,0x01, 0xE6,0x00, 0xCC,0x4C, 0xF6,0x06, 0x42,0x90, 0xF7,0x04,
+0x42,0x90, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x04, 0x85,0x16, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x95,0x16, 0xFF,0xF4, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xCD,0x00, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xF4, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0xCC,0xBC, 0xF5,0x86, 0x42,0xC0, 0xF7,0x04, 0x42,0x90, 0xF6,0x06, 0x42,0x92, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0xCC,0xEC, 0xF7,0x33, 0x28,0x00, 0xF0,0x2B, 0x28,0x00, 0xF6,0x84,
+0x42,0xC0, 0x77,0x2D, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0x06,0x28, 0x00,0x14, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB4, 0xFF,0xF0, 0xF7,0x04, 0x4F,0x58, 0xF6,0xB3, 0x28,0x00, 0xC7,0x28,
+0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0xF7,0x2F, 0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x96, 0x00,0x00, 0xF7,0x04, 0x4F,0x58, 0xF4,0x02,
+0x00,0x00, 0xC6,0xB4, 0x72,0x00, 0x77,0x34, 0xFF,0xFA, 0x27,0x38, 0x00,0x02, 0x20,0x3A,
+0x00,0x61, 0xF7,0x02, 0x00,0x3F, 0xE2,0x00, 0xCD,0x40, 0xC6,0xB4, 0x74,0x00, 0x20,0x36,
+0x00,0x00, 0xE6,0x00, 0xCD,0x40, 0x00,0x00, 0x00,0x01, 0xF4,0x02, 0x00,0x01, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x16, 0x00,0x00, 0x87,0x16,
+0x00,0x08, 0x85,0x96, 0x00,0x04, 0xC5,0x30, 0x70,0x00, 0xC0,0x32, 0x52,0x00, 0xE6,0x00,
+0xCD,0xA1, 0x00,0x00, 0x00,0x01, 0x86,0xB2, 0x00,0x00, 0x77,0x31, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xE8, 0xF6,0xAF, 0x68,0x00, 0x06,0x30,
+0x00,0x01, 0xC0,0x32, 0x52,0x00, 0xE6,0x00, 0xCD,0x78, 0x05,0xAC, 0x00,0x01, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x84,0x96,
+0x00,0x00, 0x84,0x16, 0x00,0x04, 0x85,0x96, 0x00,0x08, 0x86,0xA6, 0x00,0x00, 0x77,0x25,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x75,0x35, 0xFF,0xF0, 0x20,0x2A,
+0x00,0x10, 0xE2,0x00, 0xCE,0x0D, 0xF6,0x06, 0x42,0x8E, 0xF5,0x02, 0x00,0x10, 0xF7,0x04,
+0x42,0x8C, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x20,0x2E, 0x00,0x01, 0xE6,0x00,
+0xCE,0x70, 0x20,0x2A, 0x00,0x00, 0xEE,0x00, 0xCE,0x71, 0x07,0x24, 0x00,0x02, 0x25,0x28,
+0x00,0x01, 0xA5,0xBA, 0x50,0x02, 0x86,0x22, 0x00,0x00, 0x76,0xA1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x50,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC5,0xAC,
+0x77,0xC0, 0xC6,0x30, 0x6F,0xC0, 0x76,0x31, 0xFF,0xF0, 0x75,0xAD, 0xFF,0xE8, 0xF6,0x82,
+0x00,0xFF, 0xF7,0x02, 0xF1,0x54, 0x75,0xAD, 0x00,0x02, 0xA7,0x2E, 0x70,0x02, 0xC6,0x30,
+0x6C,0x00, 0xC6,0x30, 0x75,0x80, 0xF6,0x23, 0x28,0x00, 0x24,0x20, 0x00,0x02, 0x25,0xA8,
+0x00,0x01, 0xF3,0x02, 0xF2,0x46, 0x03,0xA4, 0x00,0x02, 0xC4,0xAC, 0x38,0x00, 0x25,0x2C,
+0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xEC,0x00, 0xCF,0x11, 0x00,0x00, 0x00,0x01, 0xE6,0x00,
+0xCE,0xA0, 0xC7,0x1C, 0x50,0x00, 0xE0,0x00, 0xCE,0xB4, 0xF6,0x02, 0x00,0x00, 0xA6,0x9E,
+0x50,0x02, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0x35,
+0xFF,0xE8, 0x86,0xA6, 0x00,0x00, 0x77,0x25, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0x25,0x28,
+0x00,0x02, 0x25,0xAC, 0x00,0x02, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xE8, 0x77,0x31,
+0x00,0x04, 0xC7,0x38, 0x62,0x00, 0x77,0x39, 0x00,0x01, 0xC7,0x38, 0x30,0x00, 0xC6,0xB4,
+0x68,0x00, 0xC6,0xB4, 0x70,0x00, 0x06,0xB4, 0x00,0x0E, 0x87,0x36, 0x00,0x00, 0x24,0xA4,
+0x00,0x02, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0xE0,0x00, 0xCE,0x84, 0x24,0x20, 0x00,0x02, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x16, 0x00,0x08, 0x83,0x16,
+0x00,0x04, 0x83,0x96, 0x00,0x00, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x05,0x9C, 0x00,0x02, 0x74,0x9D, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x74,0x1D,
+0x00,0x1E, 0x06,0x30, 0x00,0x02, 0x75,0x31, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0x28, 0xFF,0xE5, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x05,0xAC, 0x00,0x02, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x87,0x1E, 0x00,0x00, 0x74,0x20,
+0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x1F,
+0x28,0x00, 0x87,0x1E, 0x00,0x00, 0x04,0x9C, 0x00,0x02, 0xC7,0x38, 0x47,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x25,0x38, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xEE,0x00, 0xD0,0xBD, 0x26,0x28,
+0x00,0x01, 0xA7,0x26, 0x60,0x02, 0xC6,0xA4, 0x60,0x00, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC5,0xA4, 0x50,0x00, 0xC5,0x30, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xE8, 0xE0,0x00, 0xD0,0x88, 0xF7,0x2F, 0x68,0x00, 0x07,0x1C, 0x00,0x02, 0xF3,0x3B,
+0x68,0x00, 0xC4,0x1C, 0x00,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x86,0x16, 0x00,0x04, 0x84,0x16, 0x00,0x00, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x05,0xA0, 0x00,0x02, 0x74,0xA1, 0x00,0x1E, 0x74,0xA4,
+0xFF,0xE5, 0x06,0x30, 0x00,0x02, 0x75,0x31, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0x28, 0xFF,0xE5, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x06,0xA0, 0x00,0x02, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2F, 0x28,0x00, 0x87,0x22,
+0x00,0x00, 0x76,0x21, 0x00,0x1E, 0x85,0x96, 0x00,0x08, 0xC7,0x38, 0x4F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0xC6,0xB4, 0x70,0x00, 0xF5,0xB7, 0x68,0x00, 0x87,0x22, 0x00,0x00, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x23,
+0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x20, 0x27,0x14, 0x00,0x20, 0xF0,0x3B, 0x28,0x00, 0x84,0x96, 0x00,0x04, 0xF5,0x02,
+0x00,0x00, 0x86,0xA6, 0x00,0x00, 0x76,0x25, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x04,0x24,
+0x00,0x02, 0xC6,0xB4, 0x67,0xC0, 0x76,0xB4, 0xFF,0xF0, 0xF6,0xBB, 0x28,0x00, 0x87,0x26,
+0x00,0x00, 0x76,0xA5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0xC0,0x2A, 0x72,0x00, 0xEC,0x00, 0xD2,0xF8, 0x76,0xA5, 0x00,0x1E, 0x87,0x26,
+0x00,0x00, 0x76,0xB4, 0xFF,0xE5, 0x06,0x28, 0x00,0x01, 0x25,0x94, 0x00,0x1E, 0xC5,0xAC,
+0x50,0x00, 0xC5,0x30, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38,
+0x52,0x00, 0xA6,0xA2, 0x70,0x02, 0xC7,0x20, 0x70,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xE8, 0xC6,0x80, 0x6A,0x00, 0xE0,0x00,
+0xD2,0x90, 0xF6,0xAF, 0x68,0x00, 0x87,0x16, 0xFF,0xE0, 0x76,0x15, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x83,0x96, 0x00,0x00, 0x23,0x14, 0x00,0x1E, 0x75,0x99, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0x75,0x15, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x74,0x95, 0x00,0x1E, 0x74,0xA4,
+0xFF,0xE5, 0x74,0x15, 0x00,0x1E, 0x74,0x20, 0xFF,0xE5, 0x06,0x9C, 0x00,0x02, 0x73,0x95,
+0x00,0x1E, 0x93,0x96, 0xFF,0xDC, 0xC7,0x38, 0x67,0xC0, 0x83,0x96, 0x00,0x00, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x83,0x96, 0xFF,0xDC, 0x87,0x1A, 0x00,0x00, 0x73,0x9C,
+0xFF,0xE5, 0x93,0x96, 0xFF,0xDC, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xE4, 0x23,0x14, 0x00,0x1A, 0x76,0x19,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xE8, 0x23,0x14,
+0x00,0x16, 0x76,0x19, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16,
+0xFF,0xEC, 0x23,0x14, 0x00,0x12, 0x76,0x19, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x47,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4,
+0x00,0x02, 0x84,0x16, 0x00,0x00, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x16, 0xFF,0xF0, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x3F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x86,0x16, 0x00,0x00, 0x84,0x16, 0x00,0x04, 0xF6,0x84, 0x4F,0x58, 0x87,0x32,
+0x00,0x14, 0x03,0x30, 0x00,0x14, 0x75,0x19, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0xC3,0xA0,
+0x6A,0x00, 0x73,0x9C, 0xFF,0xFA, 0x04,0xA0, 0x00,0x14, 0x75,0xA5, 0x00,0x1E, 0xC6,0x30,
+0x6A,0x00, 0x76,0x30, 0xFF,0xFA, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0xF3,0x9B, 0x28,0x00, 0x07,0x20, 0x00,0x16, 0xF6,0x3B, 0x28,0x00, 0x87,0x22,
+0x00,0x14, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x77,0x39,
+0x00,0x06, 0xC6,0xB4, 0x70,0x00, 0x06,0xB4, 0x00,0x16, 0xF3,0xB7, 0x28,0x00, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x16, 0x00,0x00, 0xF5,0x84,
+0x4F,0x58, 0x05,0x30, 0x00,0x16, 0x87,0x2A, 0x00,0x00, 0x76,0xA9, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x77,0x39, 0x00,0x06, 0xC4,0x2C,
+0x70,0x00, 0xC0,0x22, 0x62,0x00, 0xE6,0x00, 0xD5,0x29, 0x06,0xA0, 0x00,0x16, 0x87,0x36,
+0x00,0x00, 0xC6,0x30, 0x5A,0x00, 0x76,0x30, 0xFF,0xFA, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x77,0x39, 0x00,0x06, 0x76,0xB8,
+0xFF,0xFA, 0xF6,0xAB, 0x28,0x00, 0xC7,0x2C, 0x70,0x00, 0x07,0x38, 0x00,0x14, 0xE0,0x00,
+0xD5,0x2C, 0xF6,0x3B, 0x28,0x00, 0xC4,0x2C, 0x00,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x4F,0x84, 0x47,0x38, 0xFF,0xFC, 0xF7,0x05,
+0x6F,0x30, 0xF6,0x86, 0x50,0x5C, 0x46,0xB4, 0xFF,0xFC, 0xF6,0x85, 0x6E,0x50, 0xF7,0x06,
+0x6E,0x7C, 0x47,0x38, 0xFF,0xFC, 0xF7,0x05, 0x6E,0x54, 0x07,0x34, 0x19,0x1C, 0xF7,0x05,
+0x4F,0x5C, 0xF7,0x02, 0x00,0x64, 0x97,0x36, 0x19,0x1C, 0xF7,0x02, 0x00,0x00, 0x97,0x36,
+0x19,0x20, 0x06,0xB4, 0x00,0x1C, 0xF6,0x85, 0x4F,0x58, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x90, 0xF3,0x02, 0xFF,0xFF, 0xF3,0x05,
+0x4F,0x54, 0xF3,0x82, 0x00,0x00, 0x93,0x96, 0xFF,0xAC, 0x23,0x14, 0x00,0x20, 0x93,0x16,
+0xFF,0x9C, 0x23,0x94, 0x00,0x38, 0x93,0x96, 0xFF,0x94, 0x83,0x16, 0xFF,0xAC, 0xF7,0x04,
+0x4F,0x5C, 0xF3,0x82, 0x00,0x0C, 0x93,0x96, 0xFF,0x74, 0x93,0x16, 0xFF,0x8C, 0x87,0x3A,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x97,0x16, 0xFF,0xA4, 0x83,0x16, 0xFF,0xAC, 0x83,0x96,
+0xFF,0xA4, 0x00,0x00, 0x00,0x01, 0xC0,0x1A, 0x3A,0x00, 0xEC,0x00, 0xDB,0x78, 0xF3,0x02,
+0x04,0xBC, 0xF7,0x04, 0x4F,0x5C, 0x83,0x16, 0xFF,0x74, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x30,0x00, 0x87,0x3A, 0x00,0x08, 0xF6,0x84, 0x4F,0x58, 0x77,0x39, 0x00,0x06, 0xC4,0xB4,
+0x70,0x00, 0x94,0x93, 0xFF,0xFC, 0x94,0x96, 0xFF,0x7C, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xCD,0x00, 0x97,0x93, 0xFF,0xFC, 0x84,0x96, 0xFF,0x7C, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0xD6,0x54, 0xC5,0x04, 0x00,0x00, 0xF7,0x04, 0x42,0x88, 0xE0,0x00, 0xD8,0x7C, 0xF6,0x06,
+0x42,0x88, 0xF6,0x04, 0x4F,0x5C, 0x83,0x96, 0x00,0x00, 0x83,0x16, 0xFF,0x74, 0x86,0x9E,
+0x00,0x00, 0xA7,0x32, 0x30,0x02, 0xF5,0x82, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0xD6,0x94, 0xC6,0x30, 0x30,0x00, 0x86,0x9E, 0x00,0x04, 0x87,0x32, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xD6,0x98, 0x20,0x2E, 0x00,0x00, 0xF5,0x82,
+0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0xD6,0xA5, 0x00,0x00, 0x00,0x01, 0xF5,0x02,
+0x00,0x00, 0x83,0x96, 0x00,0x00, 0x87,0x32, 0x00,0x00, 0x86,0x9E, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0xD6,0xE4, 0xF5,0x82, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0xE6,0x00, 0xD6,0xEC, 0x20,0x2E, 0x00,0x00, 0x86,0x9E, 0x00,0x04, 0x87,0x32,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0xD6,0xED, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0xD6,0xFD, 0x20,0x2A,
+0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0xD7,0x28, 0x04,0xA4,
+0x00,0x02, 0x83,0x16, 0xFF,0xAC, 0xF7,0x06, 0x42,0xC8, 0x83,0x96, 0xFF,0x8C, 0xF3,0x05,
+0x4F,0x54, 0xC7,0x1C, 0x70,0x00, 0xF0,0x3B, 0x28,0x00, 0x07,0x38, 0x00,0x02, 0xE0,0x00,
+0xDB,0x50, 0xF0,0x3B, 0x28,0x00, 0x94,0x96, 0xFF,0x6C, 0x87,0x26, 0x00,0x00, 0x76,0xA5,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x83,0x16, 0xFF,0x6C, 0x83,0x96, 0xFF,0x9C, 0x24,0x94,
+0x00,0x1E, 0x06,0x18, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0x1D,
+0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x1C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x1A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x18, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x16, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x14, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x24,0x94, 0x00,0x12, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x94, 0x00,0x10, 0x76,0x31,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x87,0x16, 0xFF,0xE0, 0xF6,0x82, 0xFF,0xFC, 0xC7,0x38, 0x57,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x03, 0xC4,0xB8, 0x6C,0x00, 0x20,0x26, 0x00,0x10, 0xE2,0x00,
+0xD8,0x9D, 0xF6,0x06, 0x42,0x8A, 0xF7,0x04, 0x42,0x88, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xF4,0x02, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0xDB,0xA0, 0xF7,0x33, 0x28,0x00, 0x83,0x16, 0xFF,0x6C, 0x25,0x14,
+0x00,0x36, 0x83,0x96, 0xFF,0x94, 0x87,0x1A, 0x00,0x00, 0x76,0x99, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x06,0x18, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x1F, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38,
+0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x34, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x32, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x30, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x2E, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x2C, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x2A, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x28, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x26,0xA4, 0x00,0x02, 0x74,0xA4, 0xFF,0xFF, 0x76,0x31,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B,
+0x28,0x00, 0x90,0x13, 0xFF,0xFC, 0x83,0x16, 0xFF,0x8C, 0xF7,0x06, 0x42,0xCC, 0xC7,0x18,
+0x70,0x00, 0xC7,0x38, 0x68,0x00, 0x97,0x13, 0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0x94,0x96,
+0xFF,0x7C, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0xB8, 0x97,0x93, 0xFF,0xFC, 0x83,0x96,
+0xFF,0x6C, 0x24,0x14, 0x00,0x4E, 0x25,0x14, 0x00,0x50, 0x83,0x16, 0xFF,0x8C, 0x84,0x96,
+0xFF,0x7C, 0x87,0x1E, 0x00,0x00, 0x76,0x9D, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x06,0x1C,
+0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0x29, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x4C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x4A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x48, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x46, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x44, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x42, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x14, 0x00,0x40, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x86,0x96,
+0xFF,0xB0, 0xF6,0x06, 0x42,0xC8, 0xC6,0x18, 0x60,0x00, 0xF7,0x02, 0x00,0x03, 0xC6,0xB4,
+0x57,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xC6,0xB4, 0x74,0x00, 0xF7,0x02, 0x00,0x04, 0xC7,0x38,
+0x6A,0x00, 0xF7,0x33, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0xF4,0xB3, 0x28,0x00, 0x83,0x96,
+0xFF,0x8C, 0x83,0x16, 0xFF,0x74, 0x03,0x9C, 0x00,0x14, 0x93,0x96, 0xFF,0x8C, 0x03,0x18,
+0x00,0x0C, 0x83,0x96, 0xFF,0xAC, 0x93,0x16, 0xFF,0x74, 0x03,0x9C, 0x00,0x01, 0xE0,0x00,
+0xD5,0xEC, 0x93,0x96, 0xFF,0xAC, 0x93,0x13, 0xFF,0xFC, 0xF3,0x84, 0x4F,0x5C, 0x00,0x00,
+0x00,0x01, 0x93,0x93, 0xFF,0xFC, 0xF3,0x06, 0x4A,0x98, 0x93,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93, 0xFF,0xFC, 0xF4,0x02, 0x00,0x01, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x01,0xA0, 0xF5,0x02,
+0x00,0x00, 0xF3,0x84, 0x6E,0x50, 0xF6,0x02, 0x00,0x1C, 0x20,0x2A, 0x00,0x63, 0xEE,0x00,
+0xDC,0x08, 0xC5,0x9C, 0x60,0x00, 0xA6,0x9E, 0x60,0x02, 0x77,0x2D, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0x20,0x36, 0x00,0x03, 0xE6,0x00,
+0xDB,0xFC, 0x07,0x2C, 0x00,0x36, 0xF0,0x3B, 0x28,0x00, 0x06,0x30, 0x00,0x40, 0xE0,0x00,
+0xDB,0xCC, 0x05,0x28, 0x00,0x01, 0xF5,0x84, 0x4F,0x5C, 0x00,0x00, 0x00,0x01, 0x86,0xAE,
+0x00,0x08, 0xF4,0x02, 0x00,0x00, 0x87,0x2E, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xEC,0x00, 0xDC,0xF0, 0x96,0x96, 0xFF,0xEC, 0x77,0x35, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x77,0x39, 0x00,0x02, 0xC6,0x38, 0x58,0x00, 0x06,0x30, 0x00,0x0C, 0xC3,0x84,
+0x00,0x00, 0x83,0x16, 0x00,0x00, 0x86,0xB2, 0x00,0x00, 0x87,0x1A, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xDC,0x7C, 0xC5,0x20, 0x00,0x00, 0x86,0xB2,
+0x00,0x04, 0x87,0x1A, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0xDC,0x80, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00,
+0xDC,0x8D, 0x00,0x00, 0x00,0x01, 0xF3,0x82, 0x00,0x00, 0x84,0x96, 0x00,0x00, 0x86,0xB2,
+0x00,0x00, 0x87,0x26, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0xDC,0xCC, 0xF5,0x02, 0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xDC,0xD4, 0x20,0x2A,
+0x00,0x00, 0x86,0xB2, 0x00,0x04, 0x87,0x26, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x00, 0xDC,0xD5, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A,
+0x00,0x00, 0xE6,0x00, 0xDC,0xE5, 0x20,0x1E, 0x00,0x00, 0xF3,0x82, 0x00,0x01, 0x20,0x1E,
+0x00,0x00, 0xE6,0x00, 0xDC,0xF4, 0x20,0x22, 0x00,0x00, 0xF4,0x02, 0x00,0x01, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0xDD,0x29, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xEC, 0x00,0x00,
+0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4,
+0x58,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xF0, 0xE0,0x00,
+0xDD,0x98, 0x96,0x96, 0xFF,0xF4, 0x27,0x14, 0x00,0x14, 0x97,0x13, 0xFF,0xFC, 0x83,0x16,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x95,0x93, 0xFF,0xFC, 0x95,0x96,
+0xFE,0x70, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x85,0x96,
+0xFE,0x70, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xDD,0x95, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x58,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xF0, 0x96,0x96, 0xFF,0xF4, 0x97,0x2E, 0x00,0x08, 0xE0,0x00, 0xDD,0x9C, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0xDD,0xB0, 0xF4,0x82,
+0x00,0x00, 0xF7,0x04, 0x42,0x7C, 0xE0,0x00, 0xE0,0x9C, 0xF6,0x06, 0x42,0x7E, 0x94,0x96,
+0xFF,0x44, 0x87,0x16, 0xFF,0xF4, 0xF6,0x04, 0x4F,0x58, 0x77,0x39, 0x00,0x06, 0xC7,0x30,
+0x70,0x00, 0x97,0x16, 0xFF,0x54, 0x06,0xB8, 0x00,0x1A, 0x87,0x36, 0x00,0x00, 0x83,0x16,
+0xFF,0x54, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x93,0x13,
+0xFF,0xFC, 0x77,0x38, 0xFF,0xF0, 0x77,0x39, 0x00,0x06, 0xC6,0x30, 0x70,0x00, 0x96,0x16,
+0xFF,0x4C, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0x00, 0x97,0x93, 0xFF,0xFC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0xDE,0x35, 0xF3,0x02, 0x00,0x01, 0x84,0x96, 0xFF,0x4C, 0x00,0x00,
+0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0x00, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xDE,0x38, 0x00,0x00, 0x00,0x01, 0xF3,0x02,
+0x00,0x01, 0x93,0x16, 0xFF,0x44, 0x84,0x96, 0xFF,0x44, 0x00,0x00, 0x00,0x01, 0x20,0x26,
+0x00,0x00, 0xE6,0x00, 0xDE,0x59, 0xF6,0x06, 0x42,0xA4, 0xF7,0x04, 0x42,0xA4, 0xE0,0x00,
+0xE0,0xA0, 0x76,0xB1, 0x00,0x1E, 0x83,0x16, 0xFF,0x4C, 0x86,0x16, 0xFF,0x4C, 0x87,0x1A,
+0x00,0x00, 0x76,0x99, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x20,0x3A, 0x00,0x02, 0xE6,0x00, 0xDE,0x85, 0x00,0x00, 0x00,0x01, 0xF6,0x04,
+0x4F,0x58, 0xF5,0x84, 0x4F,0x58, 0x00,0x00, 0x00,0x01, 0xC0,0x32, 0x5A,0x00, 0xE6,0x00,
+0xE0,0x25, 0x00,0x00, 0x00,0x01, 0x84,0x96, 0xFF,0x4C, 0x00,0x00, 0x00,0x01, 0x06,0xA4,
+0x00,0x1A, 0x87,0x36, 0x00,0x00, 0x83,0x16, 0xFF,0x54, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x77,0x39, 0x00,0x06, 0xC7,0x2C,
+0x70,0x00, 0xC0,0x3A, 0x32,0x00, 0xE6,0x00, 0xDE,0xDD, 0xF6,0x06, 0x42,0x80, 0xF7,0x04,
+0x42,0x80, 0xE0,0x00, 0xE0,0xA0, 0x76,0xB1, 0x00,0x1E, 0x26,0x14, 0x00,0x30, 0xF0,0x33,
+0x28,0x00, 0x87,0x16, 0xFF,0xD0, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x84,0x96,
+0xFF,0x4C, 0x23,0x14, 0x00,0x2E, 0x93,0x16, 0xFE,0x64, 0x75,0x99, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0x75,0x15, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x73,0x15, 0x00,0x1E, 0x73,0x18,
+0xFF,0xE5, 0x93,0x16, 0xFF,0x34, 0x83,0x16, 0xFE,0x64, 0x04,0x24, 0x00,0x02, 0x06,0xA0,
+0x00,0x02, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFF,0x3C, 0x74,0x95,
+0x00,0x1E, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFF,0x2C, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xD4, 0x24,0x94,
+0x00,0x2A, 0x94,0x96, 0xFE,0x64, 0x76,0x25, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x26, 0x00,0x00, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xD8, 0x23,0x14, 0x00,0x26, 0x93,0x16, 0xFE,0x64, 0x76,0x19,
+0x00,0x1E, 0x84,0x96, 0xFF,0x3C, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x84,0x96,
+0xFF,0x34, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0xDC, 0x23,0x14, 0x00,0x22, 0x93,0x16, 0xFE,0x64, 0x76,0x19,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x16, 0xFF,0xE0, 0x83,0x16, 0xFF,0x2C, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x37,0xC0, 0x77,0x38, 0xFF,0xF0, 0xE0,0x00, 0xEA,0xA0, 0xF7,0x37,
+0x28,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCB,0xCC, 0x97,0x93, 0xFF,0xFC, 0x06,0xA0,
+0x00,0x02, 0xF7,0x04, 0x4F,0x58, 0xF0,0x37, 0x28,0x00, 0x06,0xA0, 0x00,0x14, 0x94,0x16,
+0xFF,0x24, 0xC7,0x20, 0x72,0x00, 0x77,0x38, 0xFF,0xFA, 0xF7,0x37, 0x28,0x00, 0x06,0xA0,
+0x00,0x16, 0xF7,0x37, 0x28,0x00, 0xF4,0x82, 0x00,0x01, 0xF4,0xA3, 0x28,0x00, 0x94,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0x00, 0x97,0x93, 0xFF,0xFC, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0xE0,0xBC, 0x26,0x94, 0x00,0x48, 0xF7,0x04, 0x42,0x80, 0xE0,0x00,
+0xE0,0x9C, 0xF6,0x06, 0x42,0x82, 0x86,0x96, 0xFE,0xF4, 0xE0,0x00, 0xE2,0x94, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x42,0x84, 0xF6,0x06, 0x42,0x84, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xF4,0x02, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0xEA,0xA4, 0xF7,0x33, 0x28,0x00, 0x83,0x16, 0xFF,0x4C, 0x75,0x15,
+0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x93,0x16, 0xFF,0x1C, 0x07,0x18, 0x00,0x36, 0xF4,0x82,
+0x00,0x01, 0xF4,0xBB, 0x28,0x00, 0xF0,0x37, 0x28,0x00, 0x87,0x16, 0xFF,0xB8, 0x76,0xB5,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x04,0x18, 0x00,0x02, 0x06,0x20, 0x00,0x02, 0x23,0x14,
+0x00,0x46, 0x93,0x16, 0xFF,0x14, 0x75,0x99, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x74,0x95,
+0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFF,0x0C, 0x73,0x15, 0x00,0x1E, 0x73,0x18,
+0xFF,0xE5, 0x93,0x16, 0xFF,0x04, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96,
+0xFE,0xFC, 0x23,0x00, 0x00,0x07, 0x93,0x16, 0xFE,0xF4, 0x84,0x96, 0xFF,0x1C, 0x83,0x16,
+0xFF,0x14, 0x04,0xA4, 0x00,0x0A, 0x94,0x96, 0xFE,0x7C, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0xF6,0x84, 0x4F,0x58, 0x84,0x96, 0xFF,0x54, 0x87,0x1A,
+0x00,0x00, 0xC6,0xA4, 0x6A,0x00, 0x74,0x34, 0xFF,0xFA, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x16, 0xFF,0xBC, 0x23,0x14,
+0x00,0x42, 0x93,0x16, 0xFF,0x14, 0x76,0x99, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0x30,
+0x00,0x02, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30,
+0x00,0x02, 0x87,0x16, 0xFF,0xC0, 0x24,0x94, 0x00,0x3E, 0x94,0x96, 0xFF,0x14, 0x76,0xA5,
+0x00,0x1E, 0x83,0x16, 0xFF,0x0C, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x37,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x26, 0x00,0x00, 0x06,0x30, 0x00,0x02, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x16,
+0xFF,0xC4, 0x24,0x94, 0x00,0x3A, 0x94,0x96, 0xFF,0x14, 0x76,0xA5, 0x00,0x1E, 0x83,0x16,
+0xFF,0x04, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x37,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33,
+0x28,0x00, 0x87,0x26, 0x00,0x00, 0x06,0x30, 0x00,0x02, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x87,0x16, 0xFF,0xC8, 0x84,0x96, 0xFE,0xFC, 0x06,0x30,
+0x00,0x02, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x83,0x16,
+0xFE,0xF4, 0x00,0x00, 0x00,0x01, 0x20,0x1A, 0x00,0x07, 0xEE,0x00, 0xE2,0x94, 0xF6,0x82,
+0x00,0x08, 0x84,0x96, 0xFE,0x7C, 0x00,0x00, 0x00,0x01, 0x07,0x24, 0x00,0x0E, 0x86,0xBA,
+0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0xF7,0x02, 0x00,0xFF, 0xC6,0xB4, 0x74,0x00, 0x47,0x21, 0x00,0x00, 0xC0,0x36,
+0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0xE0,0x88, 0x04,0xA4,
+0x00,0x02, 0x94,0x96, 0xFE,0x7C, 0x03,0x18, 0x00,0x01, 0xE0,0x00, 0xE2,0x30, 0x93,0x16,
+0xFE,0xF4, 0x83,0x16, 0xFF,0x1C, 0x00,0x00, 0x00,0x01, 0x07,0x18, 0x00,0x38, 0xF6,0xBB,
+0x28,0x00, 0x93,0x13, 0xFF,0xFC, 0x84,0x96, 0xFF,0x24, 0x00,0x00, 0x00,0x01, 0x94,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD4,0x2C, 0x97,0x93, 0xFF,0xFC, 0x23,0x14,
+0x00,0x78, 0x93,0x16, 0xFE,0xBC, 0x84,0x96, 0x00,0x00, 0x23,0x14, 0x00,0xA8, 0x86,0xA6,
+0x00,0x04, 0x87,0x26, 0x00,0x00, 0x93,0x16, 0xFE,0x9C, 0xC6,0xB4, 0x70,0x00, 0x96,0x96,
+0xFE,0xEC, 0xF7,0x02, 0x00,0x01, 0xC7,0x34, 0x74,0x00, 0x97,0x16, 0xFE,0xE4, 0x84,0x96,
+0xFF,0x24, 0x00,0x00, 0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD4,0xB4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04, 0x4F,0x58, 0x00,0x00, 0x00,0x01, 0xC0,0x22,
+0x72,0x00, 0xE6,0x00, 0xEA,0xA1, 0x94,0x16, 0xFF,0x1C, 0x86,0xA2, 0x00,0x38, 0x77,0x21,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xF3,0x02, 0x00,0x00, 0x93,0x16, 0xFE,0xD4, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0x96,0x96, 0xFE,0xDC, 0x84,0x96, 0xFE,0xD4, 0x00,0x00,
+0x00,0x01, 0x20,0x26, 0x00,0x0E, 0xEE,0x00, 0xE2,0xF0, 0xF3,0x02, 0x00,0x0F, 0x93,0x13,
+0xFF,0xFC, 0x83,0x16, 0xFE,0xEC, 0x00,0x00, 0x00,0x01, 0xC7,0x18, 0x48,0x00, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x27,0xE8, 0x97,0x93, 0xFF,0xFC, 0xC3,0xA0,
+0x00,0x00, 0x84,0x96, 0xFE,0xE4, 0x00,0x00, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00,
+0xE3,0x8D, 0x23,0x9C, 0x00,0x07, 0xC3,0x80, 0x3A,0x00, 0xC7,0x1C, 0x38,0x00, 0x83,0x16,
+0xFF,0x1C, 0xF4,0x82, 0x00,0xFF, 0xF6,0x04, 0x4F,0x58, 0xC7,0x18, 0x70,0x00, 0x07,0x38,
+0x00,0x26, 0x86,0xBA, 0x00,0x00, 0x97,0x16, 0xFE,0xC4, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xC6,0xB4, 0x4C,0x00, 0x76,0xB5,
+0x00,0x06, 0xC3,0x30, 0x68,0x00, 0x07,0x30, 0x00,0x40, 0xC0,0x1A, 0x72,0x00, 0xE6,0x00,
+0xE4,0x0D, 0x93,0x16, 0xFE,0xCC, 0x93,0x13, 0xFF,0xFC, 0x93,0x96, 0xFE,0x74, 0x96,0x16,
+0xFE,0x6C, 0x96,0x96, 0xFE,0x68, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0x00, 0x97,0x93,
+0xFF,0xFC, 0x83,0x96, 0xFE,0x74, 0x86,0x16, 0xFE,0x6C, 0x86,0x96, 0xFE,0x68, 0x20,0x22,
+0x00,0x00, 0xE6,0x00, 0xE0,0x95, 0x00,0x00, 0x00,0x01, 0xF5,0x84, 0x4F,0x58, 0x84,0x96,
+0xFE,0xCC, 0x07,0x2C, 0x00,0x40, 0xC0,0x26, 0x72,0x00, 0xE6,0x00, 0xEA,0x8D, 0x00,0x00,
+0x00,0x01, 0xA7,0x32, 0x68,0x02, 0x76,0xA5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x86,0x16,
+0xFE,0xCC, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x20,0x3A, 0x00,0x02, 0xE6,0x00,
+0xE4,0x51, 0xC0,0x32, 0x5A,0x00, 0xC6,0x2C, 0x00,0x00, 0xC0,0x32, 0x5A,0x00, 0xE6,0x00,
+0xE6,0xE5, 0x25,0x14, 0x00,0x76, 0x83,0x16, 0xFF,0x1C, 0x84,0x96, 0xFE,0xBC, 0x06,0x18,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x83,0x16,
+0xFE,0xDC, 0x06,0x30, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38,
+0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x74, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x72, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x70, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x6E, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x6C, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x6A, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x25,0x14, 0x00,0x68, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0xC7,0x1C, 0x32,0x00, 0x97,0x13,
+0xFF,0xFC, 0x94,0x93, 0xFF,0xFC, 0x26,0x14, 0x00,0x60, 0x96,0x13, 0xFF,0xFC, 0x96,0x16,
+0xFE,0x6C, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93, 0xFF,0xFC, 0x87,0x16,
+0xFF,0xA0, 0x86,0x16, 0xFE,0x6C, 0x84,0x96, 0xFE,0xCC, 0x23,0x14, 0x00,0x5E, 0x93,0x16,
+0xFE,0x5C, 0x75,0x99, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x74,0x15, 0x00,0x1E, 0x74,0x20,
+0xFF,0xE5, 0x73,0x15, 0x00,0x1E, 0x73,0x18, 0xFF,0xE5, 0x93,0x16, 0xFE,0xAC, 0x83,0x16,
+0xFE,0x5C, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x05,0x24, 0x00,0x02, 0x06,0xA8,
+0x00,0x02, 0x74,0x95, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFE,0xB4, 0x74,0x95,
+0x00,0x1E, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x87,0x1A,
+0x00,0x00, 0x74,0xA4, 0xFF,0xE5, 0x94,0x96, 0xFE,0xA4, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xA4, 0x24,0x94,
+0x00,0x5A, 0x94,0x96, 0xFE,0x5C, 0x76,0x25, 0x00,0x1E, 0x83,0x16, 0xFE,0xB4, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x37,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x26,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16, 0xFF,0xA8, 0x24,0x94, 0x00,0x56, 0x94,0x96,
+0xFE,0x5C, 0x76,0x25, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x47,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x26, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16,
+0xFF,0xAC, 0x23,0x14, 0x00,0x52, 0x93,0x16, 0xFE,0x5C, 0x76,0x19, 0x00,0x1E, 0x84,0x96,
+0xFE,0xAC, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1A, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x16, 0xFF,0xB0, 0x83,0x16, 0xFE,0xA4, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x37,0xC0, 0x77,0x38, 0xFF,0xF0, 0xE0,0x00, 0xEA,0x8C, 0xF7,0x37,
+0x28,0x00, 0x84,0x96, 0xFE,0xCC, 0x00,0x00, 0x00,0x01, 0x04,0xA4, 0x00,0x36, 0x94,0x96,
+0xFE,0x5C, 0x87,0x26, 0x00,0x00, 0x76,0xA5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x20,0x3A, 0x00,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0xEA,0x8D, 0x00,0x00, 0x00,0x01, 0x83,0x16, 0xFE,0xCC, 0x84,0x96,
+0xFF,0x1C, 0x06,0x18, 0x00,0x3A, 0x85,0xB2, 0x00,0x00, 0x07,0x24, 0x00,0x3A, 0x86,0xBA,
+0x00,0x00, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC5,0xAC, 0x67,0xC0, 0xC6,0xB4, 0x77,0xC0, 0x75,0xAD, 0xFF,0xF0, 0x76,0xB5,
+0xFF,0xF0, 0xC0,0x2E, 0x6A,0x00, 0xEC,0x00, 0xE7,0x64, 0xF5,0x02, 0x00,0x02, 0xF5,0x02,
+0x00,0x01, 0x83,0x16, 0xFF,0x1C, 0x00,0x00, 0x00,0x01, 0x07,0x18, 0x00,0x36, 0x86,0xBA,
+0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0x20,0x36, 0x00,0x02, 0xE6,0x00, 0xE7,0x9C, 0x00,0x00, 0x00,0x01, 0x20,0x2A,
+0x00,0x01, 0xE6,0x00, 0xEA,0x8D, 0x00,0x00, 0x00,0x01, 0x84,0x96, 0xFE,0x5C, 0x83,0x16,
+0xFF,0x1C, 0xF5,0x27, 0x28,0x00, 0x06,0x18, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x25,0x14, 0x00,0xA6, 0x84,0x96, 0xFE,0x9C, 0x83,0x16,
+0xFE,0xDC, 0x06,0x30, 0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38,
+0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0xA4, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0xA2, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0xA0, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x9E, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x9C, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x25,0x14, 0x00,0x9A, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32,
+0x00,0x00, 0x25,0x14, 0x00,0x98, 0x76,0x31, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B, 0x28,0x00, 0xC7,0x1C, 0x32,0x00, 0x97,0x13,
+0xFF,0xFC, 0x94,0x93, 0xFF,0xFC, 0x26,0x14, 0x00,0x90, 0x96,0x13, 0xFF,0xFC, 0x96,0x16,
+0xFE,0x6C, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xD0,0xDC, 0x97,0x93, 0xFF,0xFC, 0x87,0x16,
+0xFF,0x70, 0x86,0x16, 0xFE,0x6C, 0x84,0x96, 0xFE,0xCC, 0x23,0x94, 0x00,0x8E, 0x75,0x9D,
+0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0x73,0x15, 0x00,0x1E, 0x73,0x18, 0xFF,0xE5, 0x93,0x16,
+0xFE,0x94, 0x74,0x15, 0x00,0x1E, 0x74,0x20, 0xFF,0xE5, 0x73,0x15, 0x00,0x1E, 0x73,0x18,
+0xFF,0xE5, 0x93,0x16, 0xFE,0x84, 0x83,0x16, 0xFE,0x94, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x05,0x24, 0x00,0x02, 0x06,0xA8, 0x00,0x02, 0x74,0x95, 0x00,0x1E, 0x74,0xA4,
+0xFF,0xE5, 0x94,0x96, 0xFE,0x8C, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x2B,
+0x28,0x00, 0x84,0x96, 0xFE,0xC4, 0x87,0x1E, 0x00,0x00, 0x75,0x25, 0x00,0x1E, 0xC7,0x38,
+0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16,
+0xFF,0x74, 0x23,0x94, 0x00,0x8A, 0x76,0x1D, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x84,0x96,
+0xFE,0x8C, 0x75,0x28, 0xFF,0xE5, 0xC7,0x38, 0x37,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37,
+0x28,0x00, 0x87,0x1E, 0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x83,0x16, 0xFE,0x84, 0xC7,0x38,
+0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4, 0x00,0x02, 0x87,0x16,
+0xFF,0x78, 0x23,0x94, 0x00,0x86, 0x76,0x1D, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x47,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1E, 0x00,0x00, 0x06,0xB4,
+0x00,0x02, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x06,0xB4,
+0x00,0x02, 0x87,0x16, 0xFF,0x7C, 0x23,0x94, 0x00,0x82, 0x76,0x1D, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1E,
+0x00,0x00, 0x06,0xB4, 0x00,0x02, 0x84,0x96, 0xFE,0xC4, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x16, 0xFF,0x80, 0x06,0xB4, 0x00,0x02, 0xC7,0x38,
+0x37,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x26, 0x00,0x00, 0xF3,0x02,
+0x00,0xFF, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xE8, 0xC6,0xB8, 0x34,0x00, 0xF7,0x02,
+0x00,0x80, 0xC7,0x34, 0x74,0x00, 0x77,0x39, 0x00,0x10, 0x77,0x39, 0xFF,0xF0, 0x20,0x3A,
+0x00,0x00, 0xE6,0x00, 0xEA,0x61, 0x27,0x00, 0x01,0x00, 0xC6,0xB4, 0x75,0x80, 0x84,0x96,
+0xFE,0xCC, 0x00,0x00, 0x00,0x01, 0x07,0x24, 0x00,0x38, 0xF6,0xBB, 0x28,0x00, 0x94,0x93,
+0xFF,0xFC, 0x83,0x16, 0xFF,0x24, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xD4,0x2C, 0x97,0x93, 0xFF,0xFC, 0x84,0x96, 0xFE,0xD4, 0x00,0x00,
+0x00,0x01, 0x04,0xA4, 0x00,0x01, 0xE0,0x00, 0xE3,0x3C, 0x94,0x96, 0xFE,0xD4, 0xF4,0x02,
+0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x16,
+0x00,0x08, 0x86,0x96, 0x00,0x0C, 0xF5,0x02, 0xFF,0xFC, 0x85,0x96, 0x00,0x04, 0x84,0x16,
+0x00,0x10, 0xF4,0x84, 0xE0,0x00, 0x07,0x30, 0x00,0x02, 0x94,0xB2, 0x00,0x10, 0xF4,0x84,
+0xE0,0x04, 0x06,0xB4, 0x00,0x03, 0x94,0xB2, 0x00,0x14, 0xF4,0x84, 0xE0,0x1C, 0xC6,0xB4,
+0x54,0x00, 0x94,0xB2, 0x00,0x18, 0xF4,0x82, 0x00,0x05, 0xF4,0xB3, 0x28,0x00, 0xF4,0x82,
+0x00,0x01, 0xF4,0xBB, 0x28,0x00, 0x27,0x34, 0x00,0x08, 0x97,0x32, 0x00,0x04, 0x86,0x16,
+0x00,0x00, 0x07,0x2C, 0x00,0x03, 0xC7,0x38, 0x54,0x00, 0xC6,0xB8, 0x68,0x00, 0x96,0x93,
+0xFF,0xFC, 0xC6,0x30, 0x72,0x00, 0x96,0x13, 0xFF,0xFC, 0xF7,0x02, 0x00,0x03, 0xC5,0xAC,
+0x74,0x00, 0xF7,0x02, 0x00,0x04, 0xC7,0x38, 0x5A,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xC1,0x20, 0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x14, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x18, 0x87,0x16, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0x83,0xBA, 0x00,0x00, 0x84,0x96, 0x00,0x00, 0x93,0x96, 0xFF,0xF0, 0xF3,0x84,
+0x6E,0x54, 0x87,0x3A, 0x00,0x04, 0x93,0x96, 0xFF,0xEC, 0x97,0x16, 0xFF,0xF4, 0x90,0x13,
+0xFF,0xFC, 0x27,0x1C, 0x00,0x02, 0x97,0x13, 0xFF,0xFC, 0x07,0x24, 0x00,0x20, 0x97,0x13,
+0xFF,0xFC, 0x94,0x96, 0xFF,0xE4, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0xB8, 0x97,0x93,
+0xFF,0xFC, 0x84,0x96, 0xFF,0xE4, 0x83,0x96, 0x00,0x08, 0x87,0x26, 0x00,0x18, 0x85,0x16,
+0xFF,0xEC, 0xC0,0x3A, 0x3A,0x00, 0xEE,0x00, 0xEC,0x7C, 0xF5,0x82, 0x00,0x01, 0x87,0x26,
+0x00,0x18, 0x83,0x96, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0xC0,0x1E, 0x72,0x00, 0xE6,0x00,
+0xEC,0x7C, 0xC5,0x84, 0x00,0x00, 0x86,0xA6, 0x00,0x10, 0x87,0x16, 0xFF,0xF0, 0xF6,0x02,
+0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xEC,0x1C, 0x04,0x24, 0x00,0x10, 0x86,0xA6,
+0x00,0x14, 0x87,0x16, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x00,
+0xEC,0x20, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0xEC,0x2D, 0x00,0x00, 0x00,0x01, 0xF5,0x82, 0x00,0x00, 0x86,0xA2, 0x00,0x00, 0x87,0x16,
+0xFF,0xF0, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0xEC,0x68, 0xF6,0x02,
+0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x00, 0xEC,0x70, 0x20,0x32, 0x00,0x00, 0x86,0xA2,
+0x00,0x04, 0x87,0x16, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00,
+0xEC,0x71, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0xEC,0x81, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00,
+0xEC,0xAC, 0xF7,0x02, 0x00,0x01, 0xF7,0x04, 0x42,0x9C, 0xF6,0x06, 0x42,0x9C, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0xF7,0x02, 0x00,0x01, 0x97,0x2A, 0x00,0x08, 0x83,0xA6,
+0x00,0x0C, 0x77,0x2C, 0xFF,0xE1, 0x93,0xAA, 0x00,0x0C, 0x97,0x2A, 0x00,0x1C, 0x83,0xA6,
+0x00,0x1C, 0xF7,0x04, 0x6E,0x50, 0x93,0xAA, 0x00,0x20, 0x83,0xBA, 0x1D,0xDC, 0xF6,0x82,
+0x00,0x00, 0x93,0xAA, 0x00,0x2C, 0x83,0x96, 0x00,0x0C, 0xC5,0xB4, 0x00,0x00, 0x93,0xAA,
+0x00,0x30, 0x83,0xBA, 0x00,0x10, 0xC6,0x34, 0x00,0x00, 0x93,0xAA, 0x00,0x24, 0x87,0x3A,
+0x00,0x14, 0x00,0x00, 0x00,0x01, 0x97,0x2A, 0x00,0x28, 0x20,0x36, 0x00,0x1F, 0xEE,0x00,
+0xED,0x1C, 0xC7,0x30, 0x50,0x00, 0x07,0x38, 0x00,0x34, 0x95,0xBA, 0x00,0x00, 0x06,0x30,
+0x00,0x04, 0xE0,0x00, 0xEC,0xFC, 0x06,0xB4, 0x00,0x01, 0x83,0x96, 0x00,0x10, 0x76,0xA5,
+0x00,0x1E, 0x93,0x93, 0xFF,0xFC, 0xF3,0x82, 0x00,0xB4, 0x93,0x93, 0xFF,0xFC, 0x95,0x13,
+0xFF,0xFC, 0x87,0x26, 0x00,0x20, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x97,0x13, 0xFF,0xFC, 0x83,0x96, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xEA,0xB8, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x14, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x18, 0x87,0x16,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x86,0x3A, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x96,0x16,
+0xFF,0xF0, 0x87,0x3A, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x97,0x16, 0xFF,0xF4, 0xF6,0x02,
+0x1D,0xE0, 0x96,0x13, 0xFF,0xFC, 0x86,0x16, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x96,0x13,
+0xFF,0xFC, 0xF6,0x04, 0x6E,0x50, 0x00,0x00, 0x00,0x01, 0x96,0x13, 0xFF,0xFC, 0x26,0x14,
+0x00,0x10, 0x96,0x16, 0xFF,0xEC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x26,0xF8, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x84, 0x6E,0x50, 0xF6,0x02, 0x00,0x00, 0x87,0x36, 0x1D,0xD8, 0x96,0x16,
+0xFF,0xE4, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF6,0x86, 0x42,0xC0, 0xF7,0x37, 0x28,0x00, 0x86,0x16, 0xFF,0xEC, 0x00,0x00,
+0x00,0x01, 0x96,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xDB,0xB4, 0x97,0x93,
+0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xEE,0x4D, 0x00,0x00, 0x00,0x01, 0x86,0x16,
+0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0x96,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xD5,0xA0, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00, 0xEE,0x4D, 0x00,0x00,
+0x00,0x01, 0xF6,0x02, 0x00,0x01, 0x96,0x16, 0xFF,0xE4, 0x84,0x16, 0xFF,0xE4, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x96, 0x00,0x04, 0x86,0x16,
+0x00,0x00, 0x87,0x36, 0x00,0x08, 0x85,0x96, 0x00,0x08, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xEE,0x99, 0x20,0x3A, 0x00,0x03, 0xE6,0x00, 0xEE,0xE9, 0xF4,0x02, 0x00,0x00, 0xE0,0x00,
+0xEF,0x0C, 0x00,0x00, 0x00,0x01, 0x77,0xB0, 0x00,0x1F, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00,
+0xEF,0x0D, 0xF4,0x02, 0x00,0x00, 0x85,0x16, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x95,0x13,
+0xFF,0xFC, 0x85,0x16, 0x00,0x10, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x85,0x16,
+0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x95,0x93, 0xFF,0xFC, 0x96,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xEB,0x60, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00,
+0xEF,0x0C, 0x00,0x00, 0x00,0x01, 0x77,0xB0, 0x00,0x1E, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00,
+0xEF,0x0D, 0x00,0x00, 0x00,0x01, 0x95,0x93, 0xFF,0xFC, 0x96,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xED,0x74, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x18, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x18, 0xF4,0x82, 0x00,0x00, 0x86,0x96,
+0x00,0x00, 0xF6,0x04, 0x4A,0xA0, 0x23,0x94, 0x00,0x10, 0x84,0x36, 0x00,0x00, 0x96,0x16,
+0xFF,0xE4, 0xF7,0x04, 0x4A,0x9C, 0x94,0x16, 0xFF,0xF0, 0x85,0x36, 0x00,0x04, 0xC0,0x32,
+0x72,0x00, 0xEC,0x00, 0xF0,0x14, 0x95,0x16, 0xFF,0xF4, 0x77,0x31, 0x00,0x01, 0xC7,0x38,
+0x60,0x00, 0x77,0x39, 0x00,0x02, 0xF3,0x06, 0x4A,0x98, 0xC6,0xB8, 0x30,0x00, 0x06,0xB4,
+0x00,0x0C, 0xC5,0x84, 0x00,0x00, 0x87,0x36, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x42,0x00, 0xE6,0x00, 0xEF,0xA4, 0xC6,0x24, 0x00,0x00, 0x87,0x36, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x52,0x00, 0xE6,0x00, 0xEF,0xA8, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00, 0xEF,0xB5, 0x00,0x00, 0x00,0x01, 0xF5,0x82,
+0x00,0x00, 0x86,0x36, 0x00,0x00, 0x87,0x16, 0xFF,0xF0, 0x00,0x00, 0x00,0x01, 0xC0,0x32,
+0x72,0x00, 0xE2,0x00, 0xEF,0xF0, 0xF5,0x02, 0x00,0x00, 0xC0,0x32, 0x72,0x00, 0xE6,0x00,
+0xEF,0xF8, 0x20,0x2A, 0x00,0x00, 0x86,0xB6, 0x00,0x04, 0x87,0x16, 0xFF,0xF4, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x00, 0xEF,0xF9, 0x20,0x2A, 0x00,0x00, 0xF5,0x02,
+0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x00, 0xF0,0x09, 0x20,0x2E, 0x00,0x00, 0xF5,0x82,
+0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x00, 0xF0,0x18, 0x20,0x26, 0x00,0x00, 0xF4,0x82,
+0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x00, 0xF0,0x4D, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xE4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xE8, 0xE0,0x00, 0xF0,0xB0, 0x96,0x96, 0xFF,0xEC, 0x27,0x14, 0x00,0x1C, 0x97,0x13,
+0xFF,0xFC, 0x93,0x93, 0xFF,0xFC, 0xF3,0x06, 0x4A,0x98, 0x93,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x20,0x22, 0x00,0x00, 0xE6,0x00,
+0xF0,0xAD, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xE4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9,
+0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4,
+0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xE8, 0x96,0x96, 0xFF,0xEC, 0xF7,0x05,
+0x4A,0xA0, 0xE0,0x00, 0xF0,0xB4, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32,
+0x00,0x00, 0xE6,0x00, 0xF1,0x21, 0xF4,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xE8, 0xF6,0x06,
+0x42,0xC8, 0x76,0xB9, 0x00,0x02, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xA7,0x36,
+0x60,0x02, 0x83,0x16, 0x00,0x04, 0xC6,0xB4, 0x60,0x00, 0x76,0x35, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x05,0x34, 0x00,0x02, 0x75,0xA9, 0x00,0x1E, 0xC7,0x38, 0x67,0xC0, 0x77,0x38,
+0xFF,0xF0, 0x97,0x1A, 0x00,0x00, 0x87,0x2A, 0x00,0x00, 0x75,0xAC, 0xFF,0xE5, 0x83,0x16,
+0x00,0x08, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x97,0x1A, 0x00,0x00, 0x83,0x16,
+0x00,0x0C, 0x06,0xB4, 0x00,0x04, 0xE0,0x00, 0xF1,0x24, 0x96,0x9A, 0x00,0x00, 0xF4,0x02,
+0x00,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x10, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0xB9,0x00, 0x00,0x00, 0xBA,0x00, 0x00,0x00,
+0xBB,0x00, 0x00,0x00, 0xBC,0x00, 0x00,0x00, 0xBD,0x00, 0x00,0x00, 0xBE,0x00, 0x00,0x00,
+0xBF,0x00, 0x00,0x00, 0x80,0x00, 0x00,0x00, 0x81,0x00, 0x00,0x00, 0x82,0x00, 0x00,0x00,
+0x83,0x00, 0x00,0x00, 0x84,0x00, 0x00,0x00, 0x85,0x00, 0x00,0x00, 0x86,0x00, 0x00,0x00,
+0x87,0x00, 0xB9,0xB9, 0xB9,0xBA, 0xB9,0xBB, 0xB9,0xBC, 0xB9,0xBD, 0xB9,0xBE, 0xB9,0xBF,
+0xB9,0x80, 0xB9,0x81, 0xB9,0x82, 0xB9,0x83, 0xB9,0x84, 0xB9,0x85, 0xB9,0x86, 0xB9,0x87,
+0xBA,0xB9, 0xBA,0xBA, 0xBA,0xBB, 0xBA,0xBC, 0xBA,0xBD, 0xBA,0xBE, 0xBA,0xBF, 0xBA,0x80,
+0xBA,0x81, 0xBA,0x82, 0xBA,0x83, 0xBA,0x84, 0xBA,0x85, 0xBA,0x86, 0xBA,0x87, 0xBB,0xB9,
+0xBB,0xBA, 0xBB,0xBB, 0xBB,0xBC, 0xBB,0xBD, 0xBB,0xBE, 0xBB,0xBF, 0xBB,0x80, 0xBB,0x81,
+0xBB,0x82, 0xBB,0x83, 0xBB,0x84, 0xBB,0x85, 0xBB,0x86, 0xBB,0x87, 0xBC,0xB9, 0xBC,0xBA,
+0xBC,0xBB, 0xBC,0xBC, 0xBC,0xBD, 0xBC,0xBE, 0xBC,0xBF, 0xBC,0x80, 0xBC,0x81, 0xBC,0x82,
+0xBC,0x83, 0xBC,0x84, 0xBC,0x85, 0xBC,0x86, 0xBC,0x87, 0xBD,0xB9, 0xBD,0xBA, 0xBD,0xBB,
+0xBD,0xBC, 0xBD,0xBD, 0xBD,0xBE, 0xBD,0xBF, 0xBD,0x80, 0xBD,0x81, 0xBD,0x82, 0xBD,0x83,
+0xBD,0x84, 0xBD,0x85, 0xBD,0x86, 0xBD,0x87, 0xBE,0xB9, 0xBE,0xBA, 0xBE,0xBB, 0xBE,0xBC,
+0xBE,0xBD, 0xBE,0xBE, 0xBE,0xBF, 0xBE,0x80, 0xBE,0x81, 0xBE,0x82, 0xBE,0x83, 0xBE,0x84,
+0xBE,0x85, 0xBE,0x86, 0xBE,0x87, 0xBF,0xB9, 0xBF,0xBA, 0xBF,0xBB, 0xBF,0xBC, 0xBF,0xBD,
+0xBF,0xBE, 0xBF,0xBF, 0xBF,0x80, 0xBF,0x81, 0xBF,0x82, 0xBF,0x83, 0xBF,0x84, 0xBF,0x85,
+0xBF,0x86, 0xBF,0x87, 0x80,0xB9, 0x80,0xBA, 0x80,0xBB, 0x80,0xBC, 0x80,0xBD, 0x80,0xBE,
+0x80,0xBF, 0x80,0x80, 0x80,0x81, 0x80,0x82, 0x80,0x83, 0x80,0x84, 0x80,0x85, 0x80,0x86,
+0x80,0x87, 0x81,0xB9, 0x81,0xBA, 0x81,0xBB, 0x81,0xBC, 0x81,0xBD, 0x81,0xBE, 0x81,0xBF,
+0x81,0x80, 0x81,0x81, 0x81,0x82, 0x81,0x83, 0x81,0x84, 0x81,0x85, 0x81,0x86, 0x81,0x87,
+0x82,0xB9, 0x82,0xBA, 0x82,0xBB, 0x82,0xBC, 0x82,0xBD, 0x82,0xBE, 0x82,0xBF, 0x82,0x80,
+0x82,0x81, 0x82,0x82, 0x82,0x83, 0x82,0x84, 0x82,0x85, 0x82,0x86, 0x82,0x87, 0x83,0xB9,
+0x83,0xBA, 0x83,0xBB, 0x83,0xBC, 0x83,0xBD, 0x83,0xBE, 0x83,0xBF, 0x83,0x80, 0x83,0x81,
+0x83,0x82, 0x83,0x83, 0x83,0x84, 0x83,0x85, 0x83,0x86, 0x83,0x87, 0x84,0xB9, 0x84,0xBA,
+0x84,0xBB, 0x84,0xBC, 0x84,0xBD, 0x84,0xBE, 0x84,0xBF, 0x84,0x80, 0x84,0x81, 0x84,0x82,
+0x84,0x83, 0x84,0x84, 0x84,0x85, 0x84,0x86, 0x84,0x87, 0x85,0xB9, 0x85,0xBA, 0x85,0xBB,
+0x85,0xBC, 0x85,0xBD, 0x85,0xBE, 0x85,0xBF, 0x85,0x80, 0x85,0x81, 0x85,0x82, 0x85,0x83,
+0x85,0x84, 0x85,0x85, 0x85,0x86, 0x85,0x87, 0x86,0xB9, 0x86,0xBA, 0x86,0xBB, 0x86,0xBC,
+0x86,0xBD, 0x86,0xBE, 0x86,0xBF, 0x86,0x80, 0x86,0x81, 0x86,0x82, 0x86,0x83, 0x86,0x84,
+0x86,0x85, 0x86,0x86, 0x86,0x87, 0x87,0xB9, 0x87,0xBA, 0x87,0xBB, 0x87,0xBC, 0x87,0xBD,
+0x87,0xBE, 0x87,0xBF, 0x87,0x80, 0x87,0x81, 0x87,0x82, 0x87,0x83, 0x87,0x84, 0x87,0x85,
+0x87,0x86, 0x87,0x87, 0x00,0x00, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x18, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xF3,0x7D, 0xF6,0x06, 0x42,0x96, 0xF7,0x04, 0x42,0x94, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xF4,0x02, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0xF5,0xE0, 0xF7,0x33, 0x28,0x00, 0xF3,0x84, 0x6F,0x30, 0x90,0x13,
+0xFF,0xFC, 0x27,0x1C, 0x00,0x02, 0x97,0x13, 0xFF,0xFC, 0x83,0x16, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x93,0x96, 0xFF,0xEC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xCD,0xB8, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0xEC, 0xF7,0x02, 0x00,0x00, 0x97,0x1E,
+0x00,0x08, 0x83,0x16, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x93,0x1E, 0x00,0x0C, 0x83,0x16,
+0x00,0x08, 0x04,0x9C, 0x00,0x22, 0x93,0x1E, 0x00,0x1C, 0x83,0x16, 0x00,0x0C, 0x93,0x96,
+0xFF,0xF4, 0x87,0x1A, 0x00,0x00, 0x76,0x99, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x06,0x18,
+0x00,0x02, 0x75,0xB1, 0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0x06,0x9C, 0x00,0x20, 0xF7,0x37, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x96,0x96,
+0xFF,0xE4, 0x75,0x35, 0x00,0x1E, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x04,0x9C, 0x00,0x24, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x04,0x9C, 0x00,0x26, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x04,0x9C, 0x00,0x28, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x04,0x9C, 0x00,0x2A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x04,0x9C, 0x00,0x2C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x04,0x9C, 0x00,0x2E, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x04,0x9C, 0x00,0x30, 0x76,0x31,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27,
+0x28,0x00, 0x87,0x1E, 0x00,0x20, 0x75,0x28, 0xFF,0xE5, 0xC7,0x38, 0x57,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x20,0x3A, 0x00,0x08, 0xEE,0x00, 0xF5,0x98, 0xF3,0x06, 0x14,0xD8, 0x83,0x16,
+0xFF,0xE4, 0x87,0x1E, 0x00,0x20, 0x76,0x99, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x25,0xB8, 0x00,0x01, 0xC4,0xAC, 0x58,0x00, 0x04,0x24,
+0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xEC,0x00, 0xF5,0x95, 0xF5,0x02, 0x00,0x00, 0x83,0x16,
+0xFF,0xE4, 0x00,0x00, 0x00,0x01, 0x06,0x18, 0x00,0x02, 0xA7,0x32, 0x58,0x02, 0xC6,0xB0,
+0x58,0x00, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xE8, 0xC6,0xB0, 0x40,0x00, 0x77,0xB8, 0x00,0x18, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x00,
+0xF5,0x7D, 0xF7,0x37, 0x68,0x00, 0xF5,0x02, 0xFF,0xFF, 0xC7,0x30, 0x48,0x00, 0xF5,0x3B,
+0x68,0x00, 0x24,0xA4, 0x00,0x02, 0x24,0x20, 0x00,0x02, 0xE0,0x00, 0xF5,0x34, 0x25,0xAC,
+0x00,0x01, 0xF3,0x06, 0x14,0xD8, 0x93,0x13, 0xFF,0xFC, 0xF3,0x02, 0x00,0x34, 0x93,0x13,
+0xFF,0xFC, 0x83,0x16, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x83,0x16,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x1A, 0x00,0x00, 0x76,0x99, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x97,0x13, 0xFF,0xFC, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xEA,0xB8, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x10, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x10, 0xF7,0x04,
+0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0xF6,0x39, 0xF6,0x06,
+0x42,0x96, 0xF7,0x04, 0x42,0x94, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF4,0x02,
+0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xE0,0x00,
+0xF7,0x48, 0xF7,0x33, 0x28,0x00, 0xF5,0x04, 0x6F,0x30, 0x00,0x00, 0x00,0x01, 0x95,0x16,
+0xFF,0xF4, 0x90,0x13, 0xFF,0xFC, 0x27,0x28, 0x00,0x02, 0x97,0x13, 0xFF,0xFC, 0x85,0x96,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xCD,0xB8, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0x00,0x04, 0xF6,0x02, 0x00,0x00, 0x86,0xAA,
+0x00,0x00, 0x77,0x29, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0xF7,0x02, 0x00,0x01, 0xC0,0x36, 0x74,0x00, 0xE6,0x00, 0xF6,0x99, 0x96,0x96,
+0xFF,0xEC, 0xC6,0x38, 0x00,0x00, 0x96,0x13, 0xFF,0xFC, 0x85,0x96, 0xFF,0xEC, 0x85,0x16,
+0xFF,0xF4, 0x47,0x2C, 0xFF,0xFE, 0x07,0x38, 0x00,0x02, 0xC7,0x28, 0x72,0x00, 0x97,0x13,
+0xFF,0xFC, 0x85,0x96, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCD,0xB8, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0xFF,0xF4, 0xF7,0x02,
+0x00,0x02, 0x97,0x2A, 0x00,0x08, 0x85,0x96, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0x95,0xAA,
+0x00,0x0C, 0x85,0x96, 0x00,0x0C, 0x00,0x00, 0x00,0x01, 0x95,0xAA, 0x00,0x1C, 0xF5,0x06,
+0x14,0xD8, 0x95,0x13, 0xFF,0xFC, 0xF5,0x82, 0x00,0x20, 0x95,0x93, 0xFF,0xFC, 0x85,0x16,
+0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x85,0x96, 0x00,0x00, 0x85,0x16,
+0xFF,0xEC, 0x87,0x2E, 0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38, 0x50,0x00, 0x97,0x13, 0xFF,0xFC, 0x85,0x96,
+0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xEA,0xB8, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x10, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x85,0x96, 0x00,0x00, 0x85,0x16, 0x00,0x04, 0x87,0x16, 0x00,0x08, 0xF6,0x02,
+0xFF,0xFC, 0x06,0xA8, 0x00,0x03, 0xC6,0xB4, 0x64,0x00, 0x07,0x38, 0x00,0x03, 0xC7,0x38,
+0x64,0x00, 0xC7,0x34, 0x70,0x00, 0x97,0x13, 0xFF,0xFC, 0xC5,0xAC, 0x6A,0x00, 0x95,0x93,
+0xFF,0xFC, 0xF7,0x02, 0x00,0x03, 0xC5,0x28, 0x74,0x00, 0xF7,0x02, 0x00,0x04, 0xC7,0x38,
+0x52,0x00, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x14,0xD8, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x10, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x10, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xF8,0x0D, 0xF6,0x06, 0x42,0x96, 0xF7,0x04, 0x42,0x94, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xF4,0x02, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0xF9,0x20, 0xF7,0x33, 0x28,0x00, 0xF5,0x04, 0x6F,0x30, 0x00,0x00,
+0x00,0x01, 0x95,0x16, 0xFF,0xF4, 0x90,0x13, 0xFF,0xFC, 0x27,0x28, 0x00,0x02, 0x97,0x13,
+0xFF,0xFC, 0x85,0x96, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x95,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xCD,0xB8, 0x97,0x93, 0xFF,0xFC, 0x85,0x16, 0x00,0x04, 0xF6,0x02,
+0x00,0x00, 0x86,0xAA, 0x00,0x00, 0x77,0x29, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB5, 0xFF,0xF0, 0xF7,0x02, 0x00,0x01, 0xC0,0x36, 0x74,0x00, 0xE6,0x00,
+0xF8,0x6D, 0x96,0x96, 0xFF,0xEC, 0xC6,0x38, 0x00,0x00, 0x96,0x13, 0xFF,0xFC, 0x85,0x96,
+0xFF,0xEC, 0x85,0x16, 0xFF,0xF4, 0x47,0x2C, 0xFF,0xFE, 0x07,0x38, 0x00,0x02, 0xC7,0x28,
+0x72,0x00, 0x97,0x13, 0xFF,0xFC, 0x85,0x96, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0xB8, 0x97,0x93, 0xFF,0xFC, 0x85,0x16,
+0xFF,0xF4, 0xF5,0x82, 0x00,0x06, 0xF5,0xAB, 0x28,0x00, 0x85,0x96, 0x00,0x08, 0x07,0x28,
+0x00,0x02, 0x95,0xAA, 0x00,0x04, 0x05,0x14, 0x00,0x0E, 0x85,0x2A, 0x00,0x00, 0x77,0xA9,
+0x00,0x1E, 0x77,0xBC, 0xFF,0xE5, 0xC5,0x28, 0x7F,0xC0, 0x75,0x29, 0xFF,0xF0, 0xF5,0x3B,
+0x28,0x00, 0xF5,0x86, 0x14,0xD8, 0x95,0x93, 0xFF,0xFC, 0xF5,0x02, 0x00,0x08, 0x95,0x13,
+0xFF,0xFC, 0x85,0x96, 0x00,0x00, 0x85,0x16, 0xFF,0xEC, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0xC7,0x38,
+0x50,0x00, 0x97,0x13, 0xFF,0xFC, 0x85,0x96, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xF7,0x5C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x10, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x08, 0xF7,0x04,
+0x75,0xEC, 0x83,0x96, 0x00,0x04, 0x20,0x3A, 0x00,0x00, 0xE6,0x00, 0xFA,0x64, 0xF6,0x06,
+0x42,0x96, 0xF5,0x04, 0x6F,0x30, 0x90,0x13, 0xFF,0xFC, 0x27,0x28, 0x00,0x02, 0x97,0x13,
+0xFF,0xFC, 0x83,0x16, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0x93,0x96,
+0xFF,0xF4, 0x95,0x16, 0xFF,0xF0, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0xB8, 0x97,0x93,
+0xFF,0xFC, 0x85,0x16, 0xFF,0xF0, 0xF3,0x02, 0x00,0x07, 0x83,0x96, 0xFF,0xF4, 0xF3,0x2B,
+0x28,0x00, 0x07,0x28, 0x00,0x02, 0xF3,0x02, 0x00,0x01, 0xF3,0x3B, 0x28,0x00, 0x87,0x1E,
+0x00,0x00, 0x76,0x9D, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x05,0x9C, 0x00,0x02, 0x76,0x2D,
+0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0x74,0x9D, 0x00,0x1E, 0x74,0xA4, 0xFF,0xE5, 0x04,0x1C,
+0x00,0x06, 0x83,0x16, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0x06,0xA8,
+0x00,0x04, 0xF7,0x37, 0x28,0x00, 0x87,0x2E, 0x00,0x00, 0x06,0xA8, 0x00,0x06, 0x75,0xA1,
+0x00,0x1E, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x1E,
+0x00,0x04, 0x75,0xAC, 0xFF,0xE5, 0x06,0xA8, 0x00,0x08, 0x76,0x19, 0x00,0x1E, 0xC7,0x38,
+0x4F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0x87,0x22, 0x00,0x00, 0x06,0xA8,
+0x00,0x0A, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x37, 0x28,0x00, 0xF3,0x06,
+0x14,0xD8, 0x93,0x13, 0xFF,0xFC, 0xF3,0x02, 0x00,0x0C, 0x93,0x13, 0xFF,0xFC, 0x83,0x16,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x1A, 0x00,0x00, 0x76,0x30, 0xFF,0xE5, 0xC7,0x38,
+0x67,0xC0, 0x77,0x39, 0xFF,0xF0, 0x97,0x13, 0xFF,0xFC, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xF7,0x5C, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00, 0xFA,0x84, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x42,0x94, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xF4,0x02,
+0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xF7,0x33,
+0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x48, 0xF7,0x04, 0x75,0xEC, 0x85,0x96, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x00,
+0xFD,0x98, 0xF6,0x06, 0x42,0x96, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0x24,0x14, 0x00,0x1E, 0x06,0x2C, 0x00,0x02, 0x75,0x31,
+0x00,0x1E, 0x24,0x94, 0x00,0x20, 0x75,0x28, 0xFF,0xE5, 0xF3,0x84, 0x6E,0x50, 0xC7,0x38,
+0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x87,0x32, 0x00,0x00, 0x93,0x96,
+0xFF,0xC4, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x1C, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x1A, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x18, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x16, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x14, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x24,0x14,
+0x00,0x12, 0x06,0x30, 0x00,0x02, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x06,0x30,
+0x00,0x02, 0x87,0x32, 0x00,0x00, 0x24,0x14, 0x00,0x10, 0x76,0x31, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x23, 0x28,0x00, 0x90,0x13,
+0xFF,0xFC, 0x27,0x1C, 0x00,0x02, 0x97,0x13, 0xFF,0xFC, 0x94,0x93, 0xFF,0xFC, 0x95,0x96,
+0xFF,0xBC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xCD,0xB8, 0x97,0x93, 0xFF,0xFC, 0x85,0x96,
+0xFF,0xBC, 0x23,0x14, 0x00,0x36, 0x24,0x94, 0x00,0x38, 0x73,0xA5, 0x00,0x1E, 0x73,0x9C,
+0xFF,0xE5, 0xF4,0x04, 0x42,0xC0, 0xF6,0x86, 0x42,0xC0, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0x87,0x2E, 0x00,0x00, 0x76,0x2D, 0x00,0x1E, 0x76,0x30, 0xFF,0xE5, 0xC4,0x20,
+0x6F,0xC0, 0x74,0x20, 0xFF,0xF0, 0x05,0xAC, 0x00,0x02, 0x75,0x2D, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0xC7,0x38, 0x67,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x27, 0x28,0x00, 0x87,0x2E,
+0x00,0x00, 0xF6,0x04, 0x6E,0x50, 0xC7,0x38, 0x57,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x23,0x14, 0x00,0x34, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x23,0x14, 0x00,0x32, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x23,0x14, 0x00,0x30, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x23,0x14, 0x00,0x2E, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x23,0x14, 0x00,0x2C, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x23,0x14, 0x00,0x2A, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x76,0xAD,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x05,0xAC, 0x00,0x02, 0x87,0x2E, 0x00,0x00, 0x23,0x14, 0x00,0x28, 0x75,0xAD,
+0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0xC7,0x38, 0x5F,0xC0, 0x77,0x38, 0xFF,0xF0, 0xF7,0x1B,
+0x28,0x00, 0x87,0x16, 0xFF,0xC8, 0xF6,0x82, 0x00,0x03, 0xC7,0x38, 0x3F,0xC0, 0x96,0xB2,
+0x00,0x08, 0x06,0xB0, 0x1D,0xD8, 0xF4,0x37, 0x28,0x00, 0xF3,0x86, 0x14,0xD8, 0x93,0x93,
+0xFF,0xFC, 0xF3,0x82, 0x1D,0xE0, 0x93,0x93, 0xFF,0xFC, 0x96,0x13, 0xFF,0xFC, 0x77,0x39,
+0xFF,0xF0, 0x97,0x13, 0xFF,0xFC, 0x83,0x96, 0xFF,0xC4, 0x00,0x00, 0x00,0x01, 0x93,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xEA,0xB8, 0x97,0x93, 0xFF,0xFC, 0xE0,0x00,
+0xFD,0xB8, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0x94, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xF4,0x02, 0x00,0x00, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x86,0x16, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x32, 0x00,0x00, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x20,0x3A,
+0x00,0x06, 0xE6,0x00, 0xFE,0x21, 0xF5,0x82, 0x00,0x1E, 0xF7,0x04, 0x42,0xA8, 0xF6,0x06,
+0x42,0xA8, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39,
+0xFF,0xF0, 0x07,0x38, 0x00,0x01, 0xE0,0x00, 0xFE,0x34, 0xF7,0x33, 0x28,0x00, 0xF6,0x05,
+0x6F,0x34, 0x95,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x04, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x16,
+0x00,0x00, 0x85,0x96, 0x00,0x04, 0x87,0x32, 0x00,0x00, 0x76,0xB1, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x20,0x3A, 0x00,0x07, 0xE6,0x00,
+0xFE,0x9D, 0xF4,0x02, 0x00,0x00, 0xF7,0x04, 0x42,0xA8, 0xF6,0x06, 0x42,0xAA, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xE0,0x00, 0xFF,0x1C, 0xF7,0x33, 0x28,0x00, 0x07,0x30, 0x00,0x02, 0x86,0xBA,
+0x00,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5,
+0xFF,0xF0, 0x20,0x36, 0x00,0x01, 0xE6,0x00, 0xFE,0xD5, 0xF6,0x05, 0x6F,0x34, 0x20,0x36,
+0x00,0x02, 0xE6,0x00, 0xFE,0xE5, 0xF5,0x02, 0x00,0x20, 0xE0,0x00, 0xFE,0xFC, 0xF6,0x06,
+0x42,0xAC, 0x20,0x2E, 0x00,0x0C, 0xE6,0x00, 0xFF,0x1C, 0xF4,0x02, 0x00,0x00, 0xF5,0x02,
+0x00,0x1F, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0xFF,0x1C, 0xF4,0x02, 0x00,0x01, 0xF7,0x04, 0x42,0xAC, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0xF4,0x02, 0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x86,0x96, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x00,0x04, 0xF6,0x02, 0x00,0x00, 0x07,0x38, 0x00,0x08, 0x97,0x36, 0x00,0x04, 0x87,0x36,
+0x00,0x08, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xEC,0x00, 0xFF,0x7D, 0xF6,0x85,
+0x6F,0x34, 0x87,0x36, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x03, 0xEE,0x00,
+0xFF,0x80, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x00,
+0xFF,0xBD, 0xF6,0x06, 0x42,0xAE, 0xF7,0x04, 0x6F,0x34, 0x00,0x00, 0x00,0x01, 0x87,0x3A,
+0x00,0x08, 0xF6,0x82, 0xFF,0xEC, 0x77,0x39, 0x00,0x02, 0xA7,0x3A, 0x68,0x02, 0x00,0x00,
+0x00,0x01, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x00, 0xFF,0xD8, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x42,0xAC, 0x76,0xB1,
+0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x07,0x38,
+0x00,0x01, 0xF7,0x33, 0x28,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x00,0x17, 0x00,0x00,
+0x00,0x1A, 0x00,0x00, 0x00,0x1D, 0x00,0x00, 0x00,0x18, 0x00,0x00, 0x00,0x00, 0x56,0x65,
+0x72,0x73, 0x69,0x6F, 0x6E,0x53, 0x74,0x72, 0x69,0x6E, 0x67,0x3A, 0x20,0x6D, 0x63,0x70,
+0x2D,0x6C, 0x34,0x76, 0x33,0x20, 0x33,0x2E, 0x30,0x38, 0x63,0x20, 0x44,0x65, 0x63,0x20,
+0x31,0x31, 0x20,0x31, 0x39,0x39, 0x36,0x20, 0x31,0x33, 0x3A,0x30, 0x36,0x3A, 0x31,0x36,
+0x00,0x00, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04, 0xE0,0x0C, 0xFF,0x02,
+0x00,0x00, 0x97,0x02, 0xFF,0x84, 0xF7,0x06, 0x0C,0x3E, 0xCF,0xFC, 0x75,0x80, 0xF6,0x02,
+0x00,0x02, 0x96,0x02, 0xFF,0x8C, 0x90,0x02, 0xFF,0x88, 0xF7,0x04, 0xE0,0x20, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x00,0x74, 0xF6,0x82, 0x00,0x00, 0xF6,0x82,
+0x00,0x03, 0x96,0x82, 0xFF,0x98, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x0C, 0xF5,0x02, 0x14,0x94, 0xF5,0x05, 0x7B,0x00, 0xF5,0x0E,
+0xF0,0x14, 0xF5,0x05, 0x7B,0x08, 0xF7,0x06, 0xE0,0x00, 0xF6,0x86, 0x7B,0x68, 0xC7,0x38,
+0x6A,0x00, 0xF7,0x05, 0x7A,0xF0, 0xF5,0x02, 0x00,0x4C, 0xF6,0x82, 0x00,0x00, 0x20,0x36,
+0x00,0x02, 0xEE,0x01, 0x01,0x24, 0xF5,0x05, 0x7A,0xF8, 0xC5,0xB4, 0x00,0x00, 0xC6,0x34,
+0x00,0x00, 0xF7,0x06, 0xE0,0x30, 0xC7,0x2C, 0x70,0x00, 0xF5,0x06, 0x6F,0x44, 0xB7,0x32,
+0x50,0x02, 0x90,0x13, 0xFF,0xFC, 0x97,0x13, 0xFF,0xFC, 0x95,0x96, 0xFF,0xF4, 0x96,0x16,
+0xFF,0xF0, 0x96,0x96, 0xFF,0xEC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x03,0x1C, 0x97,0x93,
+0xFF,0xFC, 0x85,0x96, 0xFF,0xF4, 0x86,0x16, 0xFF,0xF0, 0x86,0x96, 0xFF,0xEC, 0x05,0xAC,
+0x14,0x94, 0x06,0xB4, 0x00,0x01, 0x20,0x36, 0x00,0x02, 0xEE,0x01, 0x00,0xD5, 0x06,0x30,
+0x00,0x04, 0xF5,0x02, 0x00,0x22, 0xF5,0x05, 0x6F,0x58, 0xF0,0x05, 0x6F,0x54, 0xF0,0x05,
+0x6F,0x50, 0xF0,0x05, 0x2D,0x40, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x29,0x58, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02,
+0x00,0x03, 0xF7,0x05, 0xE0,0x08, 0xF7,0x04, 0x7A,0xD8, 0xF6,0x02, 0x00,0x01, 0x96,0x02,
+0xFF,0x94, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x01,0x91, 0xF7,0x06, 0x7A,0xE8, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x03,0xDC, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x7A,0xE8, 0xF6,0x02,
+0x00,0x05, 0xF6,0x3B, 0x28,0x00, 0xF7,0x06, 0x7A,0xE0, 0x86,0x82, 0xFF,0x44, 0xF6,0x02,
+0x00,0x03, 0x20,0x36, 0x00,0x00, 0xE6,0x01, 0x01,0xC9, 0xF6,0x3B, 0x28,0x00, 0xF7,0x04,
+0x6F,0x64, 0x86,0x82, 0xFF,0x44, 0x07,0x38, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x01,
+0x01,0xB0, 0xF7,0x05, 0x6F,0x64, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x00,0x34, 0x97,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x00,0x8C, 0x97,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x44,0x28, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0xF0, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x0C,0x60, 0x97,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x04,0x08, 0x97,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x00,0x20, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x0B,0xD8, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1D,0x68, 0x97,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1E,0x50, 0x97,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x5F,0x68, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x6D,0xEC, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x21,0xD0, 0x97,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x22,0x2C, 0x97,0x93, 0xFF,0xFC, 0x90,0x02,
+0xFF,0x94, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x0B,0xFC, 0x97,0x93, 0xFF,0xFC, 0xF4,0x02,
+0x00,0x00, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x08, 0xF6,0x02, 0x00,0x00, 0xC5,0xB0, 0x00,0x00, 0x20,0x32, 0x00,0x02, 0xEE,0x01,
+0x03,0x08, 0xF5,0x06, 0x6F,0x44, 0xA6,0xAE, 0x50,0x02, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x01, 0xE6,0x01, 0x02,0xFC, 0xF5,0x02,
+0x00,0x02, 0x95,0x13, 0xFF,0xFC, 0x96,0x93, 0xFF,0xFC, 0x95,0x96, 0xFF,0xF4, 0x96,0x16,
+0xFF,0xF0, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x03,0x1C, 0x97,0x93, 0xFF,0xFC, 0x86,0x16,
+0xFF,0xF0, 0x85,0x96, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x05,0xAC, 0x00,0x04, 0xE0,0x01,
+0x02,0xAC, 0x06,0x30, 0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x87,0x16, 0x00,0x00, 0xF6,0x02, 0x00,0x00, 0xF6,0x82, 0x00,0x08, 0x96,0x3A,
+0x00,0x08, 0x96,0x3A, 0x00,0x0C, 0x96,0x3A, 0x09,0xD8, 0x96,0x3A, 0x09,0xDC, 0x96,0x3A,
+0x0E,0xF4, 0x96,0x3A, 0x0E,0xF8, 0x96,0xBA, 0x14,0x20, 0x96,0x3A, 0x14,0x24, 0x90,0xBA,
+0x14,0x8C, 0x86,0x96, 0x00,0x04, 0x90,0xBA, 0x14,0x90, 0x96,0xBA, 0x00,0x00, 0x96,0x3A,
+0x00,0x04, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x96,
+0x00,0x00, 0x87,0x16, 0x00,0x08, 0x86,0x16, 0x00,0x04, 0x77,0x38, 0xFF,0xFF, 0xC5,0x30,
+0x70,0x00, 0xC0,0x32, 0x52,0x00, 0xE4,0x01, 0x03,0xC9, 0x00,0x00, 0x00,0x01, 0x87,0x2E,
+0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0xC0,0x32, 0x52,0x00, 0xE4,0x01,
+0x03,0xA0, 0x05,0xAC, 0x00,0x02, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF7,0x02, 0x00,0x01, 0xE0,0x01, 0x03,0xE8, 0xF7,0x05, 0x7A,0xD8, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF5,0x02,
+0x00,0x0A, 0xF5,0x05, 0x71,0xCC, 0xF0,0x05, 0x71,0xD4, 0xF0,0x05, 0x71,0xD0, 0xF0,0x05,
+0x71,0xC4, 0xF5,0x02, 0x00,0x01, 0xF6,0x82, 0x00,0x00, 0x20,0x36, 0x00,0x0A, 0xEC,0x01,
+0x04,0x64, 0xF5,0x05, 0x71,0xC8, 0xF5,0x8A, 0x1E,0x00, 0xF6,0x06, 0x71,0xC4, 0x47,0x2C,
+0xFF,0xFC, 0x97,0x32, 0x00,0x18, 0x06,0x30, 0x00,0x04, 0x06,0xB4, 0x00,0x01, 0xF7,0x04,
+0x71,0xCC, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x01, 0x04,0x41, 0x05,0xAC,
+0x21,0x4C, 0xF0,0x05, 0x71,0x98, 0xF5,0x06, 0x6F,0x68, 0x95,0x13, 0xFF,0xFC, 0xF5,0x06,
+0x7B,0x18, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x06, 0x05,0xD4, 0x95,0x13, 0xFF,0xFC, 0xF7,0x82, 0x00,0x05, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x06, 0x6F,0x68, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x0B,0x70, 0x95,0x13, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x06, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x6F,0x68, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x0B,0xA0, 0x95,0x13,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x05, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x70,0x80, 0x95,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06,
+0x0B,0x70, 0x95,0x13, 0xFF,0xFC, 0xF7,0x82, 0x00,0x06, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06,
+0x70,0x80, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x06, 0x05,0x58, 0x95,0x13, 0xFF,0xFC, 0xF5,0x02, 0x00,0x0A, 0x95,0x13,
+0xFF,0xFC, 0xF5,0x06, 0x71,0x0C, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x05,0x58, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04,
+0x71,0xC4, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x05,0x7D, 0xF6,0x86,
+0x71,0xC4, 0xE0,0x01, 0x05,0x94, 0xF7,0x02, 0x00,0x00, 0xF7,0x04, 0x71,0xD0, 0x00,0x00,
+0x00,0x01, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x87,0x3A, 0x00,0x18, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x05,0xAC, 0xF7,0x05, 0x7B,0x10, 0xF6,0x06,
+0x71,0x0C, 0xE0,0x01, 0x05,0xC0, 0xF6,0x05, 0x7B,0x18, 0xF6,0x06, 0x6F,0x68, 0xF6,0x05,
+0x7B,0x18, 0x97,0x02, 0xFF,0x48, 0x07,0x38, 0x21,0x28, 0x97,0x02, 0xFF,0x4C, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x10, 0x86,0x82,
+0xFF,0x48, 0xF4,0x86, 0x6F,0x68, 0xF4,0x85, 0x7B,0x18, 0xF5,0x04, 0x7B,0x10, 0x26,0xB4,
+0x00,0x02, 0x85,0xB6, 0x00,0x00, 0x87,0x2A, 0x00,0x00, 0x76,0x29, 0x00,0x1E, 0x76,0x30,
+0xFF,0xE5, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC5,0xAC, 0x6F,0xC0, 0xC7,0x38,
+0x67,0xC0, 0x77,0x39, 0xFF,0xF0, 0x77,0xB8, 0x00,0x10, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01,
+0x06,0x45, 0x75,0xAC, 0xFF,0xF0, 0xF7,0x04, 0x71,0xAC, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x71,0xAC, 0xF7,0x04, 0x71,0xAC, 0xE0,0x01, 0x08,0xC4, 0xF7,0x02,
+0x00,0x01, 0x77,0x2C, 0xFF,0xF8, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x06,0x71, 0x76,0xA9,
+0x00,0x1E, 0xF7,0x04, 0x71,0xA8, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x71,0xA8, 0xF7,0x04, 0x71,0xA8, 0xE0,0x01, 0x08,0xC4, 0xF7,0x02, 0x00,0x01, 0x87,0x2A,
+0x00,0x00, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x27,0x38,
+0x00,0x04, 0x20,0x3A, 0x00,0x03, 0xE2,0x01, 0x08,0xA4, 0x00,0x00, 0x00,0x01, 0x77,0x39,
+0x00,0x02, 0xF6,0x86, 0x06,0xA4, 0xA6,0xB6, 0x70,0x02, 0x00,0x00, 0x00,0x01, 0xC1,0x34,
+0x00,0x00, 0x00,0x01, 0x06,0xB4, 0x00,0x01, 0x07,0x7C, 0x00,0x01, 0x07,0xEC, 0x00,0x01,
+0x08,0x44, 0x87,0x2A, 0x00,0x04, 0xC4,0x84, 0x00,0x00, 0xC0,0x3A, 0x4A,0x00, 0xE6,0x01,
+0x06,0xD8, 0x00,0x00, 0x00,0x01, 0x87,0x02, 0xFF,0x48, 0x00,0x00, 0x00,0x01, 0xC7,0x38,
+0x52,0x00, 0x97,0x2A, 0x00,0x04, 0x87,0x2A, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x21,0x00, 0xEE,0x01, 0x07,0x3C, 0xF6,0x02, 0x00,0x00, 0x86,0xAA, 0x00,0x04, 0x87,0x02,
+0xFF,0x48, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x52,0x00, 0x27,0x38, 0x00,0x28, 0xC0,0x36,
+0x72,0x00, 0xE6,0x01, 0x07,0x3C, 0x00,0x00, 0x00,0x01, 0x77,0xFC, 0x00,0x1D, 0x70,0x3E,
+0xFF,0xE1, 0xE6,0x01, 0x07,0x3C, 0x00,0x00, 0x00,0x01, 0x77,0xFC, 0x00,0x17, 0x70,0x3E,
+0xFF,0xE1, 0xE6,0x01, 0x07,0x3D, 0x00,0x00, 0x00,0x01, 0x77,0xFC, 0x00,0x16, 0x70,0x3E,
+0xFF,0xE1, 0xE6,0x01, 0x07,0x44, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x01, 0x08,0x88, 0x00,0x00, 0x00,0x01, 0x87,0x2A, 0x00,0x18, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xEE,0x01, 0x08,0xC1, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x71,0xA4, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x71,0xA4, 0xF7,0x04,
+0x71,0xA4, 0xE0,0x01, 0x08,0xC4, 0xF7,0x02, 0x00,0x01, 0x87,0x2A, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x21,0x00, 0xEE,0x01, 0x07,0xE0, 0xF6,0x02, 0x00,0x00, 0x86,0xAA,
+0x00,0x04, 0x87,0x02, 0xFF,0x48, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x52,0x00, 0x27,0x38,
+0x00,0x0C, 0xC0,0x36, 0x72,0x00, 0xE6,0x01, 0x07,0xE0, 0x00,0x00, 0x00,0x01, 0x77,0xFC,
+0x00,0x1D, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01, 0x07,0xE0, 0x00,0x00, 0x00,0x01, 0x77,0xFC,
+0x00,0x17, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01, 0x07,0xE1, 0x00,0x00, 0x00,0x01, 0x77,0xFC,
+0x00,0x16, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01, 0x08,0x80, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0xE0,0x01, 0x08,0x80, 0x20,0x32, 0x00,0x00, 0x87,0x02, 0xFF,0x48, 0x00,0x00,
+0x00,0x01, 0xC7,0x38, 0x52,0x00, 0x27,0x38, 0x00,0x04, 0x20,0x3A, 0x00,0x08, 0xE6,0x01,
+0x08,0x38, 0xF6,0x82, 0x00,0x00, 0x77,0xFC, 0x00,0x1D, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01,
+0x08,0x38, 0x00,0x00, 0x00,0x01, 0x77,0xFC, 0x00,0x17, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01,
+0x08,0x39, 0x00,0x00, 0x00,0x01, 0x77,0xFC, 0x00,0x16, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01,
+0x08,0x80, 0x20,0x36, 0x00,0x00, 0xF6,0x82, 0x00,0x01, 0xE0,0x01, 0x08,0x80, 0x20,0x36,
+0x00,0x00, 0xF7,0x02, 0x00,0x00, 0x77,0xFC, 0x00,0x1D, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01,
+0x08,0x78, 0x00,0x00, 0x00,0x01, 0x77,0xFC, 0x00,0x17, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01,
+0x08,0x79, 0x00,0x00, 0x00,0x01, 0x77,0xFC, 0x00,0x16, 0x70,0x3E, 0xFF,0xE1, 0xE6,0x01,
+0x08,0x80, 0x20,0x3A, 0x00,0x00, 0xF7,0x02, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01,
+0x08,0xC1, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x71,0xA0, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x71,0xA0, 0xF7,0x04, 0x71,0xA0, 0xE0,0x01, 0x08,0xC4, 0xF7,0x02,
+0x00,0x01, 0xF7,0x04, 0x71,0x9C, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x71,0x9C, 0xF7,0x04, 0x71,0x9C, 0xE0,0x01, 0x08,0xC4, 0xF7,0x02, 0x00,0x01, 0xF7,0x02,
+0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x09,0x68, 0x00,0x00, 0x00,0x01, 0xF6,0x84,
+0x7B,0x10, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0x76,0xB5, 0x00,0x1E, 0x76,0xB4,
+0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x39, 0xFF,0xF0, 0x27,0x38, 0x00,0x04, 0x20,0x3A,
+0x00,0x03, 0xE2,0x01, 0x0B,0x50, 0x77,0x39, 0x00,0x02, 0xF6,0x86, 0x09,0x0C, 0xA6,0xB6,
+0x70,0x02, 0x00,0x00, 0x00,0x01, 0xC1,0x34, 0x00,0x00, 0x00,0x01, 0x09,0x1C, 0x00,0x01,
+0x0A,0xE0, 0x00,0x01, 0x0A,0xAC, 0x00,0x01, 0x0B,0x14, 0xF7,0x04, 0x71,0xD0, 0xF6,0x04,
+0x71,0xCC, 0x06,0xB8, 0x00,0x01, 0xC0,0x36, 0x62,0x00, 0xE6,0x01, 0x09,0x38, 0xC7,0x34,
+0x00,0x00, 0xF7,0x02, 0x00,0x00, 0xF5,0x84, 0x71,0xD4, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x5A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x09,0x85, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x71,0xB0, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x71,0xB0, 0xF7,0x04, 0x71,0xB0, 0xF7,0x04, 0x71,0xB4, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x71,0xB4, 0xF7,0x04, 0x71,0xB4, 0xE0,0x01, 0x0B,0x50, 0x00,0x00,
+0x00,0x01, 0xF4,0x84, 0x71,0xC8, 0xF6,0x85, 0x71,0xD0, 0x94,0x96, 0xFF,0xF4, 0xF4,0x84,
+0x7B,0x10, 0xC0,0x36, 0x62,0x00, 0xE6,0x01, 0x09,0xA4, 0x94,0x96, 0xFF,0xEC, 0xF0,0x05,
+0x71,0xD0, 0xF7,0x04, 0x71,0xD0, 0xF0,0x05, 0x71,0xC8, 0x84,0x96, 0xFF,0xEC, 0xC0,0x3A,
+0x5A,0x00, 0x47,0x0C, 0x00,0x01, 0xF7,0x05, 0x71,0xC4, 0x87,0x26, 0x00,0x08, 0x00,0x00,
+0x00,0x01, 0x70,0x3A, 0xFF,0xE1, 0xE6,0x01, 0x09,0xE1, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x71,0x98, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x71,0x98, 0x84,0x96,
+0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x01, 0x0A,0x71, 0x00,0x00,
+0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x05,0x58, 0x97,0x93, 0xFF,0xFC, 0xF6,0x02,
+0x00,0x09, 0x20,0x32, 0x00,0x14, 0xE6,0x01, 0x0A,0x4D, 0x27,0x00, 0x00,0x0C, 0x20,0x3A,
+0x00,0x01, 0xE2,0x01, 0x0A,0x4D, 0xF7,0x06, 0x2D,0xCC, 0xF6,0x84, 0x2E,0xCC, 0x00,0x00,
+0x00,0x01, 0x75,0xB5, 0x00,0x02, 0xB6,0x2E, 0x70,0x02, 0x06,0xB4, 0x00,0x01, 0xF6,0x85,
+0x2E,0xCC, 0x86,0x02, 0xFF,0x34, 0xF7,0x06, 0x2E,0x4C, 0x20,0x36, 0x00,0x1F, 0xE2,0x01,
+0x0A,0x4D, 0xB6,0x2E, 0x70,0x02, 0xF0,0x05, 0x2E,0xCC, 0xF7,0x04, 0x2D,0x68, 0x00,0x00,
+0x00,0x01, 0x87,0x3A, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x3A, 0x00,0x28, 0x00,0x00,
+0x00,0x01, 0x07,0x88, 0x00,0x08, 0xC1,0x38, 0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0xF7,0x04,
+0x71,0xBC, 0x84,0x96, 0xFF,0xEC, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x71,0xBC, 0xF7,0x04,
+0x71,0xBC, 0x86,0xA6, 0x00,0x04, 0x84,0x96, 0xFF,0xF4, 0xF7,0x04, 0x71,0xB8, 0x20,0x26,
+0x00,0x00, 0xC7,0x38, 0x68,0x00, 0xF7,0x05, 0x71,0xB8, 0xE6,0x01, 0x0B,0x51, 0x00,0x00,
+0x00,0x01, 0xE0,0x01, 0x0B,0x5C, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x71,0xC0, 0x00,0x00,
+0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x71,0xC0, 0xF7,0x04, 0x71,0xC0, 0xF4,0x84,
+0x7B,0x10, 0x00,0x00, 0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0xFD,0xCC, 0x97,0x93, 0xFF,0xFC, 0xE0,0x01, 0x0B,0x50, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x71,0xC0, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x71,0xC0, 0xF7,0x04,
+0x71,0xC0, 0xF4,0x84, 0x7B,0x10, 0x00,0x00, 0x00,0x01, 0x94,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0xFF,0x30, 0x97,0x93, 0xFF,0xFC, 0xE0,0x01, 0x0B,0x50, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x71,0xC0, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x71,0xC0, 0xF7,0x04, 0x71,0xC0, 0xF6,0x84, 0x7B,0x10, 0x87,0x02, 0xFF,0x48, 0x00,0x00,
+0x00,0x01, 0xC7,0x38, 0x6A,0x00, 0x27,0x38, 0x00,0x04, 0x97,0x13, 0xFF,0xFC, 0x96,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0xFE,0x48, 0x97,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x05,0x58, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x70,0x80, 0xF7,0x05, 0x7B,0x18, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x05,0x58, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x6F,0x68, 0xF7,0x05, 0x7B,0x18, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x05,0x58, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x7B,0x18, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x6F,0x68, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x7B,0x18, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x6F,0xF4, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x7B,0x18, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x70,0x80, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x7B,0x18, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x71,0x0C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF5,0x02, 0x00,0x04, 0xF5,0x05, 0x76,0x00, 0xF0,0x05,
+0x76,0x08, 0xF0,0x05, 0x76,0x04, 0xF0,0x05, 0x75,0xF8, 0xF5,0x02, 0x00,0x01, 0xF6,0x82,
+0x00,0x00, 0x20,0x36, 0x00,0x04, 0xEC,0x01, 0x0C,0xBC, 0xF5,0x05, 0x75,0xFC, 0xF5,0x8E,
+0x6A,0xF8, 0xF6,0x06, 0x75,0xF8, 0x47,0x2C, 0xFF,0xFC, 0x97,0x32, 0x00,0x18, 0x06,0x30,
+0x00,0x04, 0x06,0xB4, 0x00,0x01, 0xF7,0x04, 0x76,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xEC,0x01, 0x0C,0x99, 0x05,0xAC, 0x21,0x4C, 0xF5,0x06, 0x72,0x18, 0x95,0x13,
+0xFF,0xFC, 0xF5,0x06, 0x76,0x48, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x15,0x48, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x0D,0xF4, 0x95,0x13, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x0E, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x72,0x18, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x0D,0xF4, 0x95,0x13,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x0E, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x72,0xA4, 0x95,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06,
+0x13,0x2C, 0x95,0x13, 0xFF,0xFC, 0xF7,0x82, 0x00,0x01, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06,
+0x73,0x30, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x06, 0x16,0xC8, 0x95,0x13, 0xFF,0xFC, 0xF7,0x82, 0x00,0x01, 0x97,0x93,
+0xFF,0xFC, 0xF5,0x06, 0x73,0xBC, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x18,0x00, 0x95,0x13, 0xFF,0xFC, 0xF7,0x82,
+0x00,0x10, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x74,0x48, 0x95,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x16,0x40, 0x95,0x13,
+0xFF,0xFC, 0xF7,0x82, 0x00,0x10, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06, 0x74,0xD4, 0x95,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF5,0x06,
+0x13,0x2C, 0x95,0x13, 0xFF,0xFC, 0xF5,0x02, 0x00,0x12, 0x95,0x13, 0xFF,0xFC, 0xF5,0x06,
+0x75,0x60, 0x95,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93,
+0xFF,0xFC, 0xF0,0x05, 0x75,0xF0, 0xF0,0x05, 0x75,0xEC, 0xF0,0x05, 0x75,0xF4, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x38, 0xF7,0x04,
+0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x0E,0x28, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01,
+0x0E,0x3D, 0x00,0x00, 0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x15,0xD0, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x01, 0x13,0x18, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x75,0xFC, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x0E,0x59, 0xF6,0x86, 0x75,0xF8, 0xE0,0x01,
+0x0E,0x6C, 0xF6,0x82, 0x00,0x00, 0xF7,0x04, 0x76,0x08, 0x00,0x00, 0x00,0x01, 0x77,0x39,
+0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x86,0xBA, 0x00,0x18, 0xF7,0x04, 0x76,0xFC, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x0E,0x90, 0xF6,0x85, 0x76,0x60, 0xF3,0x06,
+0x76,0x48, 0xF3,0x05, 0x76,0xFC, 0xE0,0x01, 0x0E,0xA4, 0xF7,0x02, 0x00,0x01, 0xF3,0x02,
+0x00,0x10, 0xF3,0x05, 0x76,0xF8, 0xF3,0x06, 0x76,0x48, 0xF3,0x05, 0x77,0x00, 0xF7,0x02,
+0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x13,0x15, 0xF3,0x06, 0x74,0x48, 0xF7,0x04,
+0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x0E,0xD8, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01,
+0x0E,0xED, 0x00,0x00, 0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x16,0x40, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x01, 0x13,0x18, 0x00,0x00, 0x00,0x01, 0xF6,0x84, 0x76,0x60, 0x00,0x00,
+0x00,0x01, 0x87,0x36, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0x70,0x3A, 0xFF,0xE1, 0xE6,0x01,
+0x0F,0x21, 0xF4,0x82, 0x00,0x00, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x00,0xBC, 0x97,0x93, 0xFF,0xFC, 0xE0,0x01, 0x13,0x14, 0xF3,0x06, 0x75,0x60, 0xC3,0xB4,
+0x00,0x00, 0x84,0x1E, 0x00,0x10, 0xF6,0x84, 0x4A,0xA0, 0x23,0x14, 0x00,0x20, 0x93,0x16,
+0xFF,0xC4, 0x94,0x16, 0xFF,0xE0, 0x96,0x96, 0xFF,0xD4, 0x85,0x1E, 0x00,0x14, 0xF7,0x04,
+0x4A,0x9C, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x01, 0x10,0x0C, 0x95,0x16,
+0xFF,0xE4, 0x77,0x35, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF3,0x06,
+0x4A,0x98, 0xC6,0xB8, 0x30,0x00, 0x06,0xB4, 0x00,0x0C, 0xC5,0x84, 0x00,0x00, 0x87,0x36,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x42,0x00, 0xE6,0x01, 0x0F,0x9C, 0xC6,0x24,
+0x00,0x00, 0x87,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x52,0x00, 0xE6,0x01,
+0x0F,0xA0, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x01,
+0x0F,0xAD, 0x00,0x00, 0x00,0x01, 0xF5,0x82, 0x00,0x00, 0x86,0x36, 0x00,0x00, 0x87,0x16,
+0xFF,0xE0, 0x00,0x00, 0x00,0x01, 0xC0,0x32, 0x72,0x00, 0xE2,0x01, 0x0F,0xE8, 0xF5,0x02,
+0x00,0x00, 0xC0,0x32, 0x72,0x00, 0xE6,0x01, 0x0F,0xF0, 0x20,0x2A, 0x00,0x00, 0x86,0xB6,
+0x00,0x04, 0x87,0x16, 0xFF,0xE4, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x01,
+0x0F,0xF1, 0x20,0x2A, 0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x01,
+0x10,0x01, 0x20,0x2E, 0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x01,
+0x10,0x10, 0x20,0x26, 0x00,0x00, 0xF4,0x82, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x01,
+0x10,0x45, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xD4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9,
+0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4,
+0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xD8, 0xE0,0x01, 0x10,0xB8, 0x96,0x96,
+0xFF,0xDC, 0x27,0x14, 0x00,0x2C, 0x97,0x13, 0xFF,0xFC, 0x83,0x16, 0xFF,0xC4, 0x00,0x00,
+0x00,0x01, 0x93,0x13, 0xFF,0xFC, 0xF3,0x06, 0x4A,0x98, 0x93,0x13, 0xFF,0xFC, 0x93,0x96,
+0xFF,0xCC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96,
+0xFF,0xCC, 0x20,0x22, 0x00,0x00, 0xE6,0x01, 0x10,0xB5, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xD4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5,
+0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16,
+0xFF,0xD8, 0x96,0x96, 0xFF,0xDC, 0xF7,0x05, 0x4A,0xA0, 0xE0,0x01, 0x10,0xBC, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x01, 0x10,0xCC, 0xF4,0x82,
+0x00,0x01, 0xE0,0x01, 0x11,0x24, 0xF4,0x82, 0x00,0x00, 0x86,0x96, 0xFF,0xD8, 0x00,0x00,
+0x00,0x01, 0x77,0x35, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF6,0x86,
+0x42,0xC8, 0xA6,0x3A, 0x68,0x02, 0xC7,0x38, 0x68,0x00, 0x75,0x39, 0x00,0x1E, 0x75,0x28,
+0xFF,0xE5, 0x05,0xB8, 0x00,0x02, 0x86,0xAE, 0x00,0x00, 0x07,0x38, 0x00,0x04, 0x97,0x16,
+0xFF,0xEC, 0xC6,0x30, 0x57,0xC0, 0x76,0x30, 0xFF,0xF0, 0x96,0x16, 0xFF,0xF4, 0x75,0xAD,
+0x00,0x1E, 0x75,0xAC, 0xFF,0xE5, 0xC6,0xB4, 0x5F,0xC0, 0x76,0xB4, 0xFF,0xF0, 0x96,0x96,
+0xFF,0xF0, 0x20,0x26, 0x00,0x00, 0xE6,0x01, 0x11,0x38, 0xF5,0x82, 0x00,0x00, 0xE0,0x01,
+0x11,0xCC, 0xF6,0x02, 0x00,0x00, 0x86,0x96, 0xFF,0xF0, 0x00,0x00, 0x00,0x01, 0xC7,0x34,
+0x68,0x00, 0xC4,0x9C, 0x72,0x00, 0xC0,0x2E, 0x6A,0x00, 0xEC,0x01, 0x11,0x98, 0xC5,0x24,
+0x00,0x00, 0xC6,0x2C, 0x00,0x00, 0x87,0x16, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0xA6,0xB2,
+0x70,0x02, 0x05,0xAC, 0x00,0x01, 0xC7,0x30, 0x70,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38,
+0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB4, 0xFF,0xF0, 0xF6,0xAB, 0x28,0x00, 0x05,0x28,
+0x00,0x02, 0x87,0x16, 0xFF,0xF0, 0x00,0x00, 0x00,0x01, 0xC0,0x2E, 0x72,0x00, 0xEC,0x01,
+0x11,0x59, 0x06,0x30, 0x00,0x02, 0xF3,0x02, 0x00,0x01, 0xF3,0x05, 0x76,0xF4, 0xF6,0x02,
+0x00,0x01, 0x87,0x16, 0xFF,0xF0, 0x86,0x9E, 0x00,0x04, 0xC7,0x38, 0x70,0x00, 0xC7,0x38,
+0x48,0x00, 0xC6,0xB4, 0x70,0x00, 0x87,0x16, 0xFF,0xF4, 0x06,0xB4, 0x00,0x20, 0x97,0x02,
+0xFF,0x6C, 0x94,0x82, 0xFF,0x50, 0x96,0x82, 0xFF,0x58, 0x20,0x32, 0x00,0x00, 0xE6,0x01,
+0x13,0x10, 0x00,0x00, 0x00,0x01, 0xF7,0x04, 0x76,0x5C, 0xF5,0x84, 0x76,0xF8, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x76,0x5C, 0xF7,0x04, 0x76,0x5C, 0x20,0x2E, 0x00,0x21, 0xE2,0x01,
+0x12,0x30, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x01,
+0x12,0x1C, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xF3,0x05,
+0x76,0xF8, 0xF3,0x04, 0x77,0x00, 0xE0,0x01, 0x12,0x34, 0xF3,0x05, 0x76,0xFC, 0xF0,0x05,
+0x76,0xFC, 0xF7,0x04, 0x75,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01,
+0x12,0x71, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0F, 0x20,0x32,
+0x00,0x44, 0xE6,0x01, 0x12,0x70, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04,
+0x76,0x08, 0xF6,0x84, 0x76,0x00, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x01,
+0x12,0x8C, 0xF7,0x05, 0x76,0x08, 0xF0,0x05, 0x76,0x08, 0xF6,0x84, 0x76,0x08, 0xF7,0x04,
+0x76,0x04, 0xF0,0x05, 0x75,0xF8, 0xF6,0x06, 0x75,0xF8, 0xC0,0x36, 0x72,0x00, 0x47,0x0C,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x12,0xB9, 0xF7,0x05, 0x75,0xFC, 0xE0,0x01,
+0x12,0xC8, 0xF7,0x02, 0x00,0x00, 0x77,0x35, 0x00,0x02, 0xC7,0x38, 0x60,0x00, 0x87,0x3A,
+0x00,0x18, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x13,0x09, 0xF7,0x05,
+0x76,0x60, 0xF7,0x04, 0x2D,0x38, 0xF3,0x06, 0x72,0xA4, 0xF3,0x05, 0x76,0x48, 0xF6,0x86,
+0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02,
+0x00,0x0E, 0x20,0x32, 0x00,0x44, 0xE6,0x01, 0x13,0x18, 0xB3,0x3A, 0x68,0x02, 0xE0,0x01,
+0x13,0x18, 0xF0,0x05, 0x2D,0x38, 0xE0,0x01, 0x13,0x14, 0xF3,0x06, 0x72,0x18, 0xF3,0x06,
+0x73,0x30, 0xF3,0x05, 0x76,0x48, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF7,0x04, 0x76,0x60, 0x00,0x00, 0x00,0x01, 0x86,0xBA, 0x00,0x04, 0xF7,0x04,
+0x76,0x54, 0x00,0x00, 0x00,0x01, 0xC7,0x38, 0x68,0x00, 0xF7,0x05, 0x76,0x54, 0xF7,0x04,
+0x76,0x58, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x76,0x58, 0xF7,0x04,
+0x75,0xF8, 0xF6,0x84, 0x76,0x58, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x13,0x9D, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02, 0x00,0x0F, 0x20,0x32, 0x00,0x44, 0xE6,0x01,
+0x13,0x9C, 0xB5,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x76,0x08, 0xF6,0x84,
+0x76,0x00, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x01, 0x13,0xB8, 0xF7,0x05,
+0x76,0x08, 0xF0,0x05, 0x76,0x08, 0xF7,0x04, 0x76,0x08, 0xF6,0x84, 0x76,0x04, 0xF0,0x05,
+0x75,0xF8, 0xF5,0x84, 0x76,0xF8, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x2E,
+0x00,0x21, 0xE2,0x01, 0x14,0x14, 0xF7,0x05, 0x75,0xFC, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86,
+0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32,
+0x00,0x44, 0xE6,0x01, 0x14,0x00, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x02,
+0x00,0x22, 0xF5,0x05, 0x76,0xF8, 0xF5,0x04, 0x77,0x00, 0xE0,0x01, 0x14,0x18, 0xF5,0x05,
+0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0xF7,0x04, 0x75,0xEC, 0xF5,0x06, 0x72,0x18, 0x20,0x3A,
+0x00,0x00, 0xE6,0x01, 0x14,0x40, 0xF5,0x05, 0x76,0x48, 0xF7,0x04, 0x75,0xF0, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x14,0x55, 0x00,0x00, 0x00,0x01, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x15,0xD0, 0x97,0x93, 0xFF,0xFC, 0xE0,0x01, 0x14,0xC4, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xFC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01,
+0x14,0x71, 0xF6,0x86, 0x75,0xF8, 0xE0,0x01, 0x14,0x88, 0xF7,0x02, 0x00,0x00, 0xF7,0x04,
+0x76,0x08, 0x00,0x00, 0x00,0x01, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x87,0x3A,
+0x00,0x18, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x14,0xC5, 0xF7,0x05,
+0x76,0x60, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02, 0x00,0x0E, 0x20,0x32, 0x00,0x44, 0xE6,0x01,
+0x14,0xBC, 0xB5,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF5,0x06, 0x72,0xA4, 0xF5,0x05,
+0x76,0x48, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04,
+0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x15,0x40, 0xF4,0x02,
+0x00,0x00, 0x86,0x96, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xF6,0x85, 0x75,0xEC, 0x86,0x96,
+0x00,0x08, 0x00,0x00, 0x00,0x01, 0xF6,0x85, 0x7B,0x38, 0x86,0x96, 0x00,0x00, 0xF7,0x04,
+0x76,0x48, 0xF6,0x85, 0x7B,0x30, 0xF6,0x86, 0x72,0x18, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x15,0x41, 0xF4,0x02, 0x00,0x01, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x15,0xD0, 0x97,0x93, 0xFF,0xFC, 0xF4,0x02, 0x00,0x01, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04, 0x75,0xF4, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x15,0xBC, 0xF4,0x02, 0x00,0x00, 0x86,0x96,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0xF6,0x85, 0x75,0xF0, 0x86,0x96, 0x00,0x08, 0x00,0x00,
+0x00,0x01, 0xF6,0x85, 0x7B,0x48, 0x86,0x96, 0x00,0x00, 0xF7,0x04, 0x76,0x48, 0xF6,0x85,
+0x7B,0x40, 0xF6,0x86, 0x72,0x18, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x01, 0x15,0xBD, 0xF4,0x02, 0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x15,0xD0, 0x97,0x93, 0xFF,0xFC, 0xF4,0x02, 0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04, 0x76,0xFC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x01, 0x15,0xFC, 0xF6,0x82, 0x00,0x10, 0xF6,0x86, 0x76,0x48, 0xF6,0x85,
+0x76,0xFC, 0xE0,0x01, 0x16,0x0C, 0xF7,0x02, 0x00,0x01, 0xF6,0x85, 0x76,0xF8, 0xF6,0x86,
+0x76,0x48, 0xF6,0x85, 0x77,0x00, 0xF7,0x02, 0x00,0x00, 0x20,0x3A, 0x00,0x00, 0xE6,0x01,
+0x16,0x20, 0xF6,0x86, 0x74,0xD4, 0xE0,0x01, 0x16,0x2C, 0xF6,0x85, 0x76,0x48, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x16,0x40, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF6,0x04, 0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0xE6,0x01, 0x16,0x85, 0xF7,0x02, 0x00,0x01, 0xF7,0x05, 0x75,0xF4, 0xF6,0x84,
+0x7B,0x48, 0xF7,0x05, 0x76,0xF4, 0xF7,0x04, 0x7B,0x40, 0xC6,0xB0, 0x68,0x00, 0x26,0xB4,
+0x00,0x04, 0x97,0x02, 0xFF,0x6C, 0x96,0x02, 0xFF,0x50, 0xE0,0x01, 0x16,0xA8, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x75,0xEC, 0xF6,0x84, 0x7B,0x38, 0xF5,0x82, 0x00,0x01, 0xF5,0x85,
+0x76,0xF4, 0xF6,0x04, 0x7B,0x30, 0xC6,0xB8, 0x68,0x00, 0x26,0xB4, 0x00,0x04, 0x96,0x02,
+0xFF,0x6C, 0x97,0x02, 0xFF,0x50, 0x96,0x82, 0xFF,0x58, 0xF5,0x86, 0x73,0xBC, 0xF5,0x85,
+0x76,0x48, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04,
+0x7B,0x28, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0xF7,0x05, 0x7B,0x28, 0xF7,0x04,
+0x75,0xF4, 0xF6,0x84, 0x7B,0x28, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x17,0x21, 0x00,0x00,
+0x00,0x01, 0xF0,0x05, 0x75,0xF4, 0xF7,0x04, 0x75,0xEC, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x01, 0x17,0x25, 0xF0,0x05, 0x75,0xF0, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x16,0x40, 0x97,0x93, 0xFF,0xFC, 0xE0,0x01, 0x17,0xEC, 0x00,0x00, 0x00,0x01, 0xF0,0x05,
+0x75,0xEC, 0xF7,0x04, 0x75,0xFC, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01,
+0x17,0x41, 0xF6,0x86, 0x75,0xF8, 0xE0,0x01, 0x17,0x58, 0xF7,0x02, 0x00,0x00, 0xF7,0x04,
+0x76,0x08, 0x00,0x00, 0x00,0x01, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x87,0x3A,
+0x00,0x18, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x17,0x95, 0xF7,0x05,
+0x76,0x60, 0xF7,0x04, 0x2D,0x38, 0xF6,0x86, 0x2C,0x28, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF5,0x02, 0x00,0x0E, 0x20,0x32, 0x00,0x44, 0xE6,0x01,
+0x17,0x8C, 0xB5,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xE0,0x01, 0x17,0x98, 0xF5,0x06,
+0x72,0xA4, 0xF5,0x06, 0x72,0x18, 0xF5,0x05, 0x76,0x48, 0xF5,0x84, 0x76,0xF8, 0x00,0x00,
+0x00,0x01, 0x20,0x2E, 0x00,0x21, 0xE2,0x01, 0x17,0xE8, 0xF6,0x86, 0x2C,0x28, 0xF7,0x04,
+0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39,
+0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x01, 0x17,0xD4, 0xB5,0xBA, 0x68,0x02, 0xF0,0x05,
+0x2D,0x38, 0xF5,0x02, 0x00,0x22, 0xF5,0x05, 0x76,0xF8, 0xF5,0x04, 0x77,0x00, 0xE0,0x01,
+0x17,0xEC, 0xF5,0x05, 0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x38, 0xF7,0x04, 0x75,0xEC, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x18,0x34, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x75,0xF0, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x18,0x49, 0x00,0x00,
+0x00,0x01, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x16,0x40, 0x97,0x93, 0xFF,0xFC, 0xE0,0x01,
+0x1C,0x74, 0x00,0x00, 0x00,0x01, 0xF6,0x84, 0x76,0x60, 0x00,0x00, 0x00,0x01, 0x87,0x36,
+0x00,0x08, 0x00,0x00, 0x00,0x01, 0x70,0x3A, 0xFF,0xE1, 0xE6,0x01, 0x18,0x7D, 0xF4,0x82,
+0x00,0x00, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x00,0xBC, 0x97,0x93,
+0xFF,0xFC, 0xE0,0x01, 0x1C,0x70, 0xF3,0x06, 0x75,0x60, 0xC3,0xB4, 0x00,0x00, 0x84,0x1E,
+0x00,0x10, 0xF6,0x84, 0x4A,0xA0, 0x23,0x14, 0x00,0x20, 0x93,0x16, 0xFF,0xC4, 0x94,0x16,
+0xFF,0xE0, 0x96,0x96, 0xFF,0xD4, 0x85,0x1E, 0x00,0x14, 0xF7,0x04, 0x4A,0x9C, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xEC,0x01, 0x19,0x68, 0x95,0x16, 0xFF,0xE4, 0x77,0x35,
+0x00,0x01, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF3,0x06, 0x4A,0x98, 0xC6,0xB8,
+0x30,0x00, 0x06,0xB4, 0x00,0x0C, 0xC5,0x84, 0x00,0x00, 0x87,0x36, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0xC0,0x3A, 0x42,0x00, 0xE6,0x01, 0x18,0xF8, 0xC6,0x24, 0x00,0x00, 0x87,0x36,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x52,0x00, 0xE6,0x01, 0x18,0xFC, 0x20,0x32,
+0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x01, 0x19,0x09, 0x00,0x00,
+0x00,0x01, 0xF5,0x82, 0x00,0x00, 0x86,0x36, 0x00,0x00, 0x87,0x16, 0xFF,0xE0, 0x00,0x00,
+0x00,0x01, 0xC0,0x32, 0x72,0x00, 0xE2,0x01, 0x19,0x44, 0xF5,0x02, 0x00,0x00, 0xC0,0x32,
+0x72,0x00, 0xE6,0x01, 0x19,0x4C, 0x20,0x2A, 0x00,0x00, 0x86,0xB6, 0x00,0x04, 0x87,0x16,
+0xFF,0xE4, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE2,0x01, 0x19,0x4D, 0x20,0x2A,
+0x00,0x00, 0xF5,0x02, 0x00,0x01, 0x20,0x2A, 0x00,0x00, 0xE6,0x01, 0x19,0x5D, 0x20,0x2E,
+0x00,0x00, 0xF5,0x82, 0x00,0x01, 0x20,0x2E, 0x00,0x00, 0xE6,0x01, 0x19,0x6C, 0x20,0x26,
+0x00,0x00, 0xF4,0x82, 0x00,0x01, 0x20,0x26, 0x00,0x00, 0xE6,0x01, 0x19,0xA1, 0xF6,0x02,
+0x00,0x01, 0x87,0x16, 0xFF,0xD4, 0xF3,0x06, 0x4A,0x98, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4,
+0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4, 0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6,
+0x00,0x00, 0x97,0x16, 0xFF,0xD8, 0xE0,0x01, 0x1A,0x14, 0x96,0x96, 0xFF,0xDC, 0x27,0x14,
+0x00,0x2C, 0x97,0x13, 0xFF,0xFC, 0x83,0x16, 0xFF,0xC4, 0x00,0x00, 0x00,0x01, 0x93,0x13,
+0xFF,0xFC, 0xF3,0x06, 0x4A,0x98, 0x93,0x13, 0xFF,0xFC, 0x93,0x96, 0xFF,0xCC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x83,0x96, 0xFF,0xCC, 0x20,0x22,
+0x00,0x00, 0xE6,0x01, 0x1A,0x11, 0xF6,0x02, 0x00,0x01, 0x87,0x16, 0xFF,0xD4, 0xF3,0x06,
+0x4A,0x98, 0x76,0xB9, 0x00,0x01, 0xC6,0xB4, 0x70,0x00, 0x76,0xB5, 0x00,0x02, 0xC6,0xB4,
+0x30,0x00, 0x06,0xB4, 0x00,0x14, 0x86,0xB6, 0x00,0x00, 0x97,0x16, 0xFF,0xD8, 0x96,0x96,
+0xFF,0xDC, 0xF7,0x05, 0x4A,0xA0, 0xE0,0x01, 0x1A,0x18, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x00, 0x20,0x32, 0x00,0x00, 0xE6,0x01, 0x1A,0x28, 0xF4,0x82, 0x00,0x01, 0xE0,0x01,
+0x1A,0x80, 0xF4,0x82, 0x00,0x00, 0x86,0x96, 0xFF,0xD8, 0x00,0x00, 0x00,0x01, 0x77,0x35,
+0x00,0x02, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0x00,0x02, 0xF6,0x86, 0x42,0xC8, 0xA6,0x3A,
+0x68,0x02, 0xC7,0x38, 0x68,0x00, 0x75,0x39, 0x00,0x1E, 0x75,0x28, 0xFF,0xE5, 0x05,0xB8,
+0x00,0x02, 0x86,0xAE, 0x00,0x00, 0x07,0x38, 0x00,0x04, 0x97,0x16, 0xFF,0xEC, 0xC6,0x30,
+0x57,0xC0, 0x76,0x30, 0xFF,0xF0, 0x96,0x16, 0xFF,0xF4, 0x75,0xAD, 0x00,0x1E, 0x75,0xAC,
+0xFF,0xE5, 0xC6,0xB4, 0x5F,0xC0, 0x76,0xB4, 0xFF,0xF0, 0x96,0x96, 0xFF,0xF0, 0x20,0x26,
+0x00,0x00, 0xE6,0x01, 0x1A,0x94, 0xF5,0x82, 0x00,0x00, 0xE0,0x01, 0x1B,0x28, 0xF6,0x02,
+0x00,0x00, 0x86,0x96, 0xFF,0xF0, 0x00,0x00, 0x00,0x01, 0xC7,0x34, 0x68,0x00, 0xC4,0x9C,
+0x72,0x00, 0xC0,0x2E, 0x6A,0x00, 0xEC,0x01, 0x1A,0xF4, 0xC5,0x24, 0x00,0x00, 0xC6,0x2C,
+0x00,0x00, 0x87,0x16, 0xFF,0xEC, 0x00,0x00, 0x00,0x01, 0xA6,0xB2, 0x70,0x02, 0x05,0xAC,
+0x00,0x01, 0xC7,0x30, 0x70,0x00, 0x77,0x39, 0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4,
+0x77,0xC0, 0x76,0xB4, 0xFF,0xF0, 0xF6,0xAB, 0x28,0x00, 0x05,0x28, 0x00,0x02, 0x87,0x16,
+0xFF,0xF0, 0x00,0x00, 0x00,0x01, 0xC0,0x2E, 0x72,0x00, 0xEC,0x01, 0x1A,0xB5, 0x06,0x30,
+0x00,0x02, 0xF3,0x02, 0x00,0x01, 0xF3,0x05, 0x76,0xF4, 0xF6,0x02, 0x00,0x01, 0x87,0x16,
+0xFF,0xF0, 0x86,0x9E, 0x00,0x04, 0xC7,0x38, 0x70,0x00, 0xC7,0x38, 0x48,0x00, 0xC6,0xB4,
+0x70,0x00, 0x87,0x16, 0xFF,0xF4, 0x06,0xB4, 0x00,0x20, 0x97,0x02, 0xFF,0x6C, 0x94,0x82,
+0xFF,0x50, 0x96,0x82, 0xFF,0x58, 0x20,0x32, 0x00,0x00, 0xE6,0x01, 0x1C,0x6C, 0x00,0x00,
+0x00,0x01, 0xF7,0x04, 0x76,0x5C, 0xF5,0x84, 0x76,0xF8, 0x07,0x38, 0x00,0x01, 0xF7,0x05,
+0x76,0x5C, 0xF7,0x04, 0x76,0x5C, 0x20,0x2E, 0x00,0x21, 0xE2,0x01, 0x1B,0x8C, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x32, 0x00,0x44, 0xE6,0x01, 0x1B,0x78, 0xB5,0xBA,
+0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF3,0x02, 0x00,0x22, 0xF3,0x05, 0x76,0xF8, 0xF3,0x04,
+0x77,0x00, 0xE0,0x01, 0x1B,0x90, 0xF3,0x05, 0x76,0xFC, 0xF0,0x05, 0x76,0xFC, 0xF7,0x04,
+0x75,0xF8, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x1B,0xCD, 0xF6,0x86,
+0x2C,0x28, 0xF7,0x04, 0x2D,0x38, 0x00,0x00, 0x00,0x01, 0x06,0x38, 0x00,0x01, 0xF6,0x05,
+0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0F, 0x20,0x32, 0x00,0x44, 0xE6,0x01,
+0x1B,0xCC, 0xB3,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0xF7,0x04, 0x76,0x08, 0xF6,0x84,
+0x76,0x00, 0x07,0x38, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0xE6,0x01, 0x1B,0xE8, 0xF7,0x05,
+0x76,0x08, 0xF0,0x05, 0x76,0x08, 0xF6,0x84, 0x76,0x08, 0xF7,0x04, 0x76,0x04, 0xF0,0x05,
+0x75,0xF8, 0xF6,0x06, 0x75,0xF8, 0xC0,0x36, 0x72,0x00, 0x47,0x0C, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x01, 0x1C,0x15, 0xF7,0x05, 0x75,0xFC, 0xE0,0x01, 0x1C,0x24, 0xF7,0x02,
+0x00,0x00, 0x77,0x35, 0x00,0x02, 0xC7,0x38, 0x60,0x00, 0x87,0x3A, 0x00,0x18, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x1C,0x65, 0xF7,0x05, 0x76,0x60, 0xF7,0x04,
+0x2D,0x38, 0xF3,0x06, 0x72,0xA4, 0xF3,0x05, 0x76,0x48, 0xF6,0x86, 0x2C,0x28, 0x06,0x38,
+0x00,0x01, 0xF6,0x05, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0xF3,0x02, 0x00,0x0E, 0x20,0x32,
+0x00,0x44, 0xE6,0x01, 0x1C,0x74, 0xB3,0x3A, 0x68,0x02, 0xE0,0x01, 0x1C,0x74, 0xF0,0x05,
+0x2D,0x38, 0xE0,0x01, 0x1C,0x70, 0xF3,0x06, 0x72,0x18, 0xF3,0x06, 0x73,0x30, 0xF3,0x05,
+0x76,0x48, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06,
+0x76,0x48, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x72,0x18, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x76,0x48, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x72,0xA4, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x76,0x48, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x73,0x30, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x76,0x48, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x73,0xBC, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06,
+0x76,0x48, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06, 0x74,0x48, 0x97,0x13, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x76,0x48, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x74,0xD4, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x76,0x48, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x75,0x60, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF6,0x86,
+0x76,0x68, 0x96,0x93, 0xFF,0xFC, 0xF6,0x86, 0x77,0x04, 0x96,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93, 0xFF,0xFC, 0xF6,0x86, 0x1D,0xD4, 0x96,0x93,
+0xFF,0xFC, 0x90,0x13, 0xFF,0xFC, 0xF6,0x86, 0x76,0x68, 0x96,0x93, 0xFF,0xFC, 0x07,0x88,
+0x00,0x08, 0xE0,0x00, 0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF7,0x02, 0x00,0x22, 0xF7,0x05,
+0x76,0xF4, 0xF7,0x05, 0x76,0xF8, 0xF0,0x05, 0x76,0xFC, 0xF0,0x05, 0x77,0x00, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04, 0x76,0xF4, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x22, 0xE6,0x01, 0x1E,0x01, 0x00,0x00, 0x00,0x01, 0x97,0x13,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x84, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x77,0x04, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x76,0x68, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF6,0x86, 0x78,0x10, 0x96,0x93, 0xFF,0xFC, 0xF6,0x86,
+0x78,0xA4, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF6,0x86, 0x1F,0xBC, 0x96,0x93, 0xFF,0xFC, 0xF6,0x82, 0x00,0x14, 0x96,0x93,
+0xFF,0xFC, 0xF6,0x86, 0x78,0x10, 0x96,0x93, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0xF0,0x05, 0x78,0x9C, 0x90,0x02, 0xFF,0x34, 0xF7,0x02,
+0x7F,0xFF, 0xF7,0x05, 0x78,0xA0, 0x97,0x02, 0xFF,0x30, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF6,0x04, 0x78,0x9C, 0x87,0x16, 0x00,0x00, 0x84,0x96,
+0x00,0x08, 0xF5,0x86, 0x77,0x10, 0x87,0x3A, 0x00,0x08, 0xF6,0x86, 0x21,0x8C, 0x75,0x39,
+0x00,0x04, 0x77,0x39, 0x00,0x02, 0xA7,0x3A, 0x68,0x02, 0x20,0x32, 0x00,0x00, 0xC6,0xA8,
+0x58,0x00, 0x84,0x16, 0x00,0x04, 0xC6,0x30, 0x75,0x80, 0x94,0x36, 0x00,0x04, 0xB4,0xAA,
+0x58,0x02, 0x87,0x36, 0x00,0x08, 0xF6,0x05, 0x78,0x9C, 0x07,0x38, 0x00,0x01, 0xE6,0x01,
+0x1F,0x2D, 0x97,0x36, 0x00,0x08, 0x87,0x02, 0xFF,0x30, 0x00,0x00, 0x00,0x01, 0xC0,0x3A,
+0x4A,0x00, 0xEE,0x01, 0x1F,0x35, 0x00,0x00, 0x00,0x01, 0xF4,0x85, 0x78,0xA0, 0x94,0x82,
+0xFF,0x30, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x96,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x87,0x2E, 0x00,0x08, 0xF6,0x86, 0x21,0x8C, 0x77,0x39,
+0x00,0x02, 0xA7,0x3A, 0x68,0x02, 0xF6,0x04, 0x78,0x9C, 0xC7,0x04, 0x76,0x00, 0x86,0xAE,
+0x00,0x08, 0xC6,0x30, 0x74,0x00, 0xF7,0x06, 0x77,0x10, 0xF6,0x05, 0x78,0x9C, 0x76,0xB5,
+0x00,0x04, 0xC6,0xB4, 0x70,0x00, 0x87,0x36, 0x00,0x08, 0x20,0x32, 0x00,0x00, 0x07,0x38,
+0x00,0x01, 0xE6,0x01, 0x1F,0xA8, 0x97,0x36, 0x00,0x08, 0xF7,0x02, 0x7F,0xFF, 0xF7,0x05,
+0x78,0xA0, 0x97,0x02, 0xFF,0x30, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0x22,0x10, 0x00,0x08, 0xF7,0x04, 0x78,0x9C, 0x00,0x00, 0x00,0x01, 0x20,0x3A,
+0x00,0x00, 0xE6,0x01, 0x20,0xD1, 0xF6,0x02, 0x7F,0xFF, 0x96,0x16, 0xFF,0xF4, 0xF6,0x84,
+0x2D,0x40, 0xF6,0x06, 0x77,0x10, 0x26,0xB4, 0x00,0x01, 0x77,0x35, 0x00,0x04, 0xC4,0xB8,
+0x60,0x00, 0xC3,0x38, 0x00,0x00, 0x74,0x35, 0x00,0x02, 0xF6,0x06, 0x77,0x10, 0xC0,0x26,
+0x62,0x00, 0xEC,0x01, 0x20,0xC1, 0xF6,0x06, 0x21,0x8C, 0xF3,0x84, 0x78,0x9C, 0xA7,0x22,
+0x60,0x02, 0x00,0x00, 0x00,0x01, 0xC0,0x1E, 0x74,0x00, 0xE6,0x01, 0x20,0xB1, 0x00,0x00,
+0x00,0x01, 0x86,0xA6, 0x00,0x00, 0xF7,0x04, 0x78,0xA0, 0x00,0x00, 0x00,0x01, 0xC6,0xB4,
+0x72,0x00, 0x20,0x36, 0x00,0x00, 0xEE,0x01, 0x20,0x98, 0x96,0xA6, 0x00,0x00, 0xF7,0x04,
+0x2D,0x38, 0xF6,0x06, 0x77,0x10, 0xC5,0x18, 0x60,0x00, 0xF6,0x86, 0x2C,0x28, 0x86,0x2A,
+0x00,0x04, 0x05,0xB8, 0x00,0x01, 0xF5,0x85, 0x2D,0x38, 0x77,0x39, 0x00,0x02, 0x20,0x2E,
+0x00,0x44, 0xE6,0x01, 0x20,0x70, 0xB6,0x3A, 0x68,0x02, 0xF0,0x05, 0x2D,0x38, 0x86,0x2A,
+0x00,0x08, 0x00,0x00, 0x00,0x01, 0x96,0x2A, 0x00,0x0C, 0xF6,0x06, 0x21,0x8C, 0xA7,0x22,
+0x60,0x02, 0x00,0x00, 0x00,0x01, 0xC7,0x04, 0x76,0x00, 0xC7,0x1C, 0x74,0x00, 0xE0,0x01,
+0x20,0xB0, 0xF7,0x05, 0x78,0x9C, 0x86,0x16, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x62,0x00, 0xEC,0x01, 0x20,0xB0, 0x00,0x00, 0x00,0x01, 0x96,0x96, 0xFF,0xF4, 0x24,0xA4,
+0x00,0x10, 0x23,0x18, 0x00,0x10, 0xE0,0x01, 0x1F,0xFC, 0x24,0x20, 0x00,0x04, 0x86,0x16,
+0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0xF6,0x05, 0x78,0xA0, 0x96,0x02, 0xFF,0x30, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x87,0x16, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x87,0x3A, 0x00,0x08, 0xF6,0x86, 0x77,0x10, 0x77,0x39, 0x00,0x04, 0xC7,0x38,
+0x68,0x00, 0x86,0xBA, 0x00,0x0C, 0x87,0x3A, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0x44,0x0C, 0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x04, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF7,0x02, 0x00,0x0F, 0x20,0x3A, 0x00,0x00, 0xEC,0x01, 0x21,0x5D, 0xF6,0x86,
+0x77,0x18, 0x90,0x36, 0x00,0x00, 0x27,0x38, 0x00,0x01, 0xC6,0x04, 0x00,0x00, 0xC0,0x3A,
+0x62,0x00, 0xE6,0x01, 0x21,0x44, 0x06,0xB4, 0x00,0x10, 0xF6,0x06, 0x78,0xA4, 0x96,0x13,
+0xFF,0xFC, 0xF6,0x06, 0x78,0x10, 0x96,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x00,0x01, 0x00,0x00,
+0x00,0x02, 0x00,0x00, 0x00,0x04, 0x00,0x00, 0x00,0x08, 0x00,0x00, 0x00,0x10, 0x00,0x00,
+0x00,0x20, 0x00,0x00, 0x00,0x40, 0x00,0x00, 0x00,0x80, 0x00,0x00, 0x01,0x00, 0x00,0x00,
+0x02,0x00, 0x00,0x00, 0x04,0x00, 0x00,0x00, 0x08,0x00, 0x00,0x00, 0x10,0x00, 0x00,0x00,
+0x20,0x00, 0x00,0x00, 0x40,0x00, 0x00,0x00, 0x80,0x00, 0x00,0x00, 0x00,0x00, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x78,0xB0, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x79,0xCC, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x15,0x48, 0x97,0x93,
+0xFF,0xFC, 0xF7,0x06, 0x22,0x2C, 0x97,0x13, 0xFF,0xFC, 0xF7,0x02, 0x00,0x15, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x78,0xB0, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x16,0x1C, 0x97,0x93, 0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14,
+0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90,
+0x00,0x08, 0xF6,0x84, 0x6F,0x44, 0x00,0x00, 0x00,0x01, 0x87,0x36, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x20,0x3A, 0x00,0x02, 0xE6,0x01, 0x22,0x70, 0xF6,0x02, 0x00,0x00, 0x87,0x36,
+0x0E,0xF4, 0x86,0xB6, 0x0E,0xF8, 0x00,0x00, 0x00,0x01, 0xC0,0x3A, 0x6A,0x00, 0x47,0x0C,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x22,0x78, 0x20,0x32, 0x00,0x00, 0xF6,0x02,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x01, 0x22,0x94, 0x00,0x00, 0x00,0x01, 0xF7,0x04,
+0x32,0xE8, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x22,0xB1, 0xF5,0x82,
+0x03,0xE8, 0x0F,0x81, 0x40,0x00, 0xF7,0x04, 0x79,0xC8, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0xF7,0x05, 0x79,0xC8, 0xF7,0x04, 0x79,0xC8, 0xF5,0x82, 0x03,0xE8, 0x95,0x93,
+0xFF,0xFC, 0xF5,0x82, 0x00,0x15, 0x95,0x93, 0xFF,0xFC, 0xF5,0x86, 0x79,0xCC, 0x95,0x93,
+0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x1E,0xC0, 0x97,0x93, 0xFF,0xFC, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x06, 0x79,0xCC, 0x97,0x13,
+0xFF,0xFC, 0xF7,0x06, 0x78,0xB0, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00,
+0x14,0xF4, 0x97,0x93, 0xFF,0xFC, 0xF7,0x06, 0x79,0xCC, 0x97,0x13, 0xFF,0xFC, 0xF7,0x06,
+0x79,0x3C, 0x97,0x13, 0xFF,0xFC, 0x07,0x88, 0x00,0x08, 0xE0,0x00, 0x14,0xF4, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC1,0x3C, 0x00,0x00, 0x02,0x10, 0x00,0x04, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x0C, 0x85,0x96, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x86,0xAE, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x01,
+0x23,0x84, 0x27,0x14, 0x00,0x0C, 0x87,0x2E, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x01, 0x97,0x2E, 0x00,0x04, 0x87,0x2E, 0x00,0x04, 0xE0,0x01, 0x24,0x34, 0x96,0x96,
+0xFF,0xF4, 0x97,0x13, 0xFF,0xFC, 0x85,0x16, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x95,0x13,
+0xFF,0xFC, 0x95,0x93, 0xFF,0xFC, 0x95,0x96, 0xFF,0xEC, 0x07,0x88, 0x00,0x08, 0xE0,0x01,
+0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xEC, 0x20,0x22, 0x00,0x00, 0xE6,0x01,
+0x24,0x34, 0x00,0x00, 0x00,0x01, 0x86,0xAE, 0x00,0x04, 0x86,0x16, 0xFF,0xF4, 0x00,0x00,
+0x00,0x01, 0xC0,0x36, 0x62,0x00, 0xEE,0x01, 0x24,0x21, 0x77,0x35, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x77,0x39, 0x00,0x02, 0xC6,0xB8, 0x58,0x00, 0x77,0x31, 0x00,0x01, 0xC7,0x38,
+0x60,0x00, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x58,0x00, 0x85,0x36, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x95,0x36, 0x00,0x0C, 0x85,0x36, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x95,0x36,
+0x00,0x10, 0x85,0x36, 0x00,0x08, 0x00,0x00, 0x00,0x01, 0x95,0x36, 0x00,0x14, 0x26,0xB4,
+0x00,0x0C, 0xC0,0x36, 0x72,0x00, 0xEE,0x01, 0x23,0xEC, 0x00,0x00, 0x00,0x01, 0x87,0x2E,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x07,0x38, 0x00,0x01, 0x97,0x2E, 0x00,0x04, 0x87,0x2E,
+0x00,0x04, 0x86,0x96, 0xFF,0xF4, 0x85,0x16, 0x00,0x04, 0x77,0x35, 0x00,0x01, 0xC7,0x38,
+0x68,0x00, 0x77,0x39, 0x00,0x02, 0xC7,0x2C, 0x70,0x00, 0x85,0x2A, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x95,0x3A, 0x00,0x0C, 0x85,0x16, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x85,0x2A,
+0x00,0x04, 0x00,0x00, 0x00,0x01, 0x95,0x3A, 0x00,0x10, 0x85,0x16, 0x00,0x08, 0xF4,0x02,
+0x00,0x01, 0x95,0x3A, 0x00,0x14, 0x96,0xAE, 0x00,0x08, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x0C, 0x85,0x96, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x84,0x2E, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x20,0x22, 0x00,0x00, 0xE6,0x01,
+0x25,0x55, 0x27,0x14, 0x00,0x0C, 0x97,0x13, 0xFF,0xFC, 0x85,0x16, 0x00,0x04, 0x00,0x00,
+0x00,0x01, 0x95,0x13, 0xFF,0xFC, 0x95,0x93, 0xFF,0xFC, 0x95,0x96, 0xFF,0xEC, 0x07,0x88,
+0x00,0x08, 0xE0,0x01, 0x25,0x68, 0x97,0x93, 0xFF,0xFC, 0x85,0x96, 0xFF,0xEC, 0x20,0x22,
+0x00,0x00, 0xE6,0x01, 0x25,0x55, 0x00,0x00, 0x00,0x01, 0x86,0x16, 0xFF,0xF4, 0x00,0x00,
+0x00,0x01, 0x20,0x32, 0x00,0x00, 0xEE,0x01, 0x25,0x45, 0x77,0x31, 0x00,0x01, 0xC6,0xAC,
+0x00,0x00, 0xC7,0x38, 0x60,0x00, 0x77,0x39, 0x00,0x02, 0xC7,0x38, 0x58,0x00, 0x85,0x36,
+0x00,0x18, 0x00,0x00, 0x00,0x01, 0x95,0x36, 0x00,0x0C, 0x85,0x36, 0x00,0x1C, 0x00,0x00,
+0x00,0x01, 0x95,0x36, 0x00,0x10, 0x85,0x36, 0x00,0x20, 0x00,0x00, 0x00,0x01, 0x95,0x36,
+0x00,0x14, 0x06,0xB4, 0x00,0x0C, 0xC0,0x36, 0x72,0x00, 0xEC,0x01, 0x25,0x11, 0x00,0x00,
+0x00,0x01, 0x87,0x2E, 0x00,0x04, 0xF4,0x02, 0x00,0x01, 0x27,0x38, 0x00,0x01, 0x97,0x2E,
+0x00,0x04, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x08, 0x83,0x96, 0x00,0x04, 0x83,0x16, 0x00,0x00, 0xC5,0x00, 0x00,0x00, 0x84,0x1A,
+0x00,0x04, 0xC4,0xA8, 0x00,0x00, 0x94,0x16, 0xFF,0xF4, 0xC0,0x26, 0x42,0x00, 0xE6,0x01,
+0x26,0xD1, 0x00,0x00, 0x00,0x01, 0x83,0x16, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0xC0,0x2A,
+0x32,0x00, 0xE6,0x01, 0x26,0xD1, 0xC7,0x20, 0x4A,0x00, 0x95,0x16, 0xFF,0xF4, 0x76,0xB8,
+0xFF,0xE1, 0xC7,0x38, 0x68,0x00, 0x77,0x39, 0xFF,0xFF, 0xC5,0x24, 0x70,0x00, 0x77,0x29,
+0x00,0x01, 0xC7,0x38, 0x50,0x00, 0x77,0x39, 0x00,0x02, 0x83,0x16, 0x00,0x00, 0x86,0x9E,
+0x00,0x00, 0xC5,0xB8, 0x30,0x00, 0x05,0xAC, 0x00,0x0C, 0x87,0x2E, 0x00,0x00, 0xC6,0x00,
+0x00,0x00, 0xC0,0x36, 0x72,0x00, 0xE6,0x01, 0x26,0x10, 0x20,0x32, 0x00,0x00, 0x86,0x9E,
+0x00,0x04, 0x87,0x2E, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36, 0x72,0x00, 0xE6,0x01,
+0x26,0x10, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32, 0x00,0x00, 0xE6,0x01,
+0x26,0x25, 0x00,0x00, 0x00,0x01, 0xC7,0x00, 0x00,0x00, 0xE0,0x01, 0x26,0x78, 0x20,0x3A,
+0x00,0x00, 0x86,0x9E, 0x00,0x00, 0x87,0x2E, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x01, 0x26,0x5C, 0x00,0x00, 0x00,0x01, 0xE6,0x01, 0x26,0x64, 0x20,0x32,
+0x00,0x00, 0x86,0x9E, 0x00,0x04, 0x87,0x2E, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC0,0x36,
+0x72,0x00, 0xE2,0x01, 0x26,0x65, 0x20,0x32, 0x00,0x00, 0xF6,0x02, 0x00,0x01, 0x20,0x32,
+0x00,0x00, 0x47,0x04, 0xFF,0xFF, 0xE6,0x01, 0x26,0x79, 0x20,0x3A, 0x00,0x00, 0xF7,0x02,
+0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x26,0xB1, 0x20,0x3A, 0x00,0x00, 0xEE,0x01,
+0x26,0xA0, 0x20,0x3A, 0x00,0x01, 0x43,0x04, 0xFF,0xFF, 0xC0,0x3A, 0x32,0x00, 0xE6,0x01,
+0x26,0xC9, 0xC0,0x26, 0x42,0x00, 0xE0,0x01, 0x25,0x90, 0x00,0x00, 0x00,0x01, 0xE6,0x01,
+0x26,0xC1, 0xC0,0x26, 0x42,0x00, 0xE0,0x01, 0x25,0x90, 0x00,0x00, 0x00,0x01, 0x83,0x16,
+0x00,0x08, 0xF4,0x02, 0x00,0x01, 0xE0,0x01, 0x26,0xE0, 0x95,0x1A, 0x00,0x00, 0xE0,0x01,
+0x25,0x8C, 0xC4,0xA8, 0x00,0x00, 0xE0,0x01, 0x25,0x8C, 0xC4,0x28, 0x00,0x00, 0x83,0x16,
+0x00,0x08, 0x00,0x00, 0x00,0x01, 0x94,0x1A, 0x00,0x00, 0xC4,0x00, 0x00,0x00, 0x87,0x96,
+0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x85,0x96,
+0x00,0x04, 0x84,0x16, 0x00,0x00, 0x84,0x96, 0x00,0x08, 0xF7,0x02, 0x00,0x03, 0xC6,0xA0,
+0x4D,0x80, 0xC6,0xB6, 0x74,0x00, 0xE6,0x01, 0x27,0x71, 0xC6,0x20, 0x00,0x00, 0x20,0x36,
+0x00,0x02, 0xE6,0x01, 0x27,0xA0, 0xC5,0x20, 0x48,0x00, 0xC7,0x20, 0x48,0x00, 0x27,0x38,
+0x00,0x02, 0xC0,0x22, 0x72,0x00, 0xE2,0x01, 0x27,0x9C, 0xC5,0x38, 0x00,0x00, 0x87,0x2E,
+0x00,0x00, 0x76,0xAD, 0x00,0x1E, 0x76,0xB4, 0xFF,0xE5, 0xC7,0x38, 0x6F,0xC0, 0x77,0x38,
+0xFF,0xF0, 0xF7,0x33, 0x28,0x00, 0x06,0x30, 0x00,0x02, 0xC0,0x32, 0x52,0x00, 0xE2,0x01,
+0x27,0x41, 0x05,0xAC, 0x00,0x02, 0xE0,0x01, 0x27,0xA0, 0xC5,0x20, 0x48,0x00, 0xC7,0x20,
+0x48,0x00, 0x27,0x38, 0x00,0x04, 0xC0,0x22, 0x72,0x00, 0xE2,0x01, 0x27,0xA0, 0xC5,0x20,
+0x48,0x00, 0x83,0xAD, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0x93,0xB1, 0x00,0x04, 0xC0,0x32,
+0x72,0x00, 0xE2,0x01, 0x27,0x85, 0x00,0x00, 0x00,0x01, 0xC5,0x20, 0x48,0x00, 0xC0,0x32,
+0x52,0x00, 0xE4,0x01, 0x27,0xD5, 0x00,0x00, 0x00,0x01, 0x86,0xAE, 0x00,0x00, 0x77,0x2D,
+0x00,0x1E, 0x77,0x38, 0xFF,0xE5, 0xC6,0xB4, 0x77,0xC0, 0x76,0xB5, 0xFF,0xE8, 0xF6,0xB3,
+0x68,0x00, 0x06,0x30, 0x00,0x01, 0xC0,0x32, 0x52,0x00, 0xE4,0x01, 0x27,0xAC, 0x05,0xAC,
+0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x0C, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x84,0x16,
+0x00,0x00, 0x86,0x96, 0x00,0x04, 0x00,0x00, 0x00,0x01, 0xC7,0x22, 0x6D,0x80, 0xE6,0x01,
+0x28,0x10, 0x20,0x36, 0x00,0x00, 0xE0,0x01, 0x28,0x74, 0xC4,0x38, 0x00,0x00, 0xF7,0x02,
+0x00,0x01, 0xEE,0x01, 0x28,0x41, 0xF6,0x02, 0x00,0x00, 0x76,0xB5, 0x00,0x01, 0x20,0x36,
+0x00,0x00, 0xEE,0x01, 0x28,0x1C, 0x77,0x39, 0x00,0x01, 0xE0,0x01, 0x28,0x44, 0x20,0x22,
+0x00,0x00, 0x74,0x21, 0x00,0x01, 0x77,0x38, 0xFF,0xFF, 0x06,0x30, 0x00,0x01, 0x20,0x22,
+0x00,0x00, 0xEE,0x01, 0x28,0x34, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x28,0x71, 0x00,0x00,
+0x00,0x01, 0xC0,0x22, 0x6A,0x00, 0xE4,0x01, 0x28,0x64, 0x00,0x00, 0x00,0x01, 0xC4,0x20,
+0x6A,0x00, 0x77,0x3A, 0xFF,0xFF, 0xE6,0x01, 0x28,0x54, 0x76,0xB4, 0xFF,0xFF, 0xD4,0x20,
+0x07,0x62, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x08, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10,
+0x00,0x04, 0xE0,0x01, 0x28,0xCC, 0xF7,0x06, 0x29,0xDC, 0x86,0xBA, 0x00,0x00, 0x00,0x00,
+0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x01, 0x28,0xC9, 0x00,0x00, 0x00,0x01, 0x97,0x16,
+0xFF,0xF4, 0x07,0x88, 0x00,0x08, 0xC1,0x34, 0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0x87,0x16,
+0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x27,0x38, 0x00,0x04, 0xF6,0x06, 0x29,0xE0, 0xC0,0x3A,
+0x62,0x00, 0xE4,0x01, 0x28,0x9D, 0x00,0x00, 0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96,
+0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93,
+0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0x22,0x10, 0x00,0x04, 0xE0,0x01, 0x29,0x34, 0xF7,0x06,
+0x29,0x98, 0x86,0xBA, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x20,0x36, 0x00,0x00, 0xE6,0x01,
+0x29,0x31, 0x00,0x00, 0x00,0x01, 0x97,0x16, 0xFF,0xF4, 0x07,0x88, 0x00,0x08, 0xC1,0x34,
+0x00,0x00, 0x97,0x93, 0xFF,0xFC, 0x87,0x16, 0xFF,0xF4, 0x00,0x00, 0x00,0x01, 0x07,0x38,
+0x00,0x04, 0xF6,0x06, 0x29,0xE0, 0xC0,0x3A, 0x62,0x00, 0xE4,0x01, 0x29,0x04, 0x00,0x00,
+0x00,0x01, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x92,0x93, 0xFF,0xFC, 0x02,0x90, 0x00,0x08, 0xF7,0x04,
+0x7B,0x50, 0x00,0x00, 0x00,0x01, 0x20,0x3A, 0x00,0x00, 0xE6,0x01, 0x29,0x84, 0xF6,0x82,
+0x00,0x01, 0xF6,0x85, 0x7B,0x50, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x28,0xF0, 0x97,0x93,
+0xFF,0xFC, 0x87,0x96, 0xFF,0xFC, 0x82,0x96, 0xFF,0xF8, 0x02,0x14, 0x00,0x00, 0x01,0x3C,
+0x00,0x00, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x0B,0x4C, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x42,0x88, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x5E,0x50, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0xC7,0xA8, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x0B,0xD0, 0x00,0x00, 0x00,0x00, 0x00,0x01,
+0x1C,0x88, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x1E,0x14, 0x00,0x00, 0x00,0x00, 0x00,0x01,
+0x21,0x2C, 0x00,0x00, 0x00,0x00, 0x00,0x01, 0x22,0xE4, 0x00,0x00, 0x00,0x00, } ;
+
+
+/* This is the LANai data */
+
+static unsigned int lanai4_data_off = 0x94F0; /* half-word offset */
+static unsigned char lanai4_data[20472] __initdata = {
+0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x01,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00, 0x00,0x00,
+0x00,0x00, 0x00,0x00, 0x00,0x00, } ;
+
+
+#ifdef SYMBOL_DEFINES_COMPILED
+/* These are half-word addresses - NOT byte offsets */
+#define MYRI_GenerateMapVersion 0xBD08
+#define MYRI_MoreToGenerate 0xBD0C
+#define MYRI_GenerateMessage 0xBCEC
+#define MYRI_RelayMapVersion 0xBD04
+#define MYRI_RelayStart 0xBD00
+#define MYRI_RelayDirection 0xBCFC
+#define MYRI_RelayIndex 0xBCF4
+#define MYRI_RelayStop 0xBCF8
+#define MYRI_RelayMessage 0xBCF0
+#define MYRI_BroadcastRelayMessagesSent 0xBD10
+#define MYRI_SendMultiplexGrabbed 0xBD3C
+#define MYRI_HostReceiveItem 0xBD14
+#define MYRI_HostReceiveNumScatters 0xBD50
+#define MYRI_HostReceiveScatterIndex 0xBD44
+#define MYRI_HostReceiveScatterPointer 0xBD4C
+#define MYRI_HostReceiveScatterLength 0xBD48
+#define MYRI_HostReceiveChecksum 0xBD40
+#define MYRI_RouteNumFragments 0xBD60
+#define MYRI_RouteFragment 0xBD5C
+#define MYRI_RouteStamp 0xBD58
+#define MYRI_RoutesSoFar 0xBD64
+#define MYRI_MapVersionChanged 0xBD68
+#define MYRI_map_c 0xBD54
+#define MYRI_pack_list 0x789C
+#define MYRI_pack_table 0x78BA
+#define MYRI_msg2event 0x7FF6
+#define MYRI_VersionString 0xB7A0
+#define MYRI_SizeofChannel 0xBD80
+#define MYRI_EndOfQueueSpace 0xBD84
+#define MYRI_FreeCode 0xBD78
+#define MYRI_FreeData 0xBD7C
+#define MYRI_Asserting 0xBD6C
+#define MYRI_Version 0xBD74
+#define MYRI_Processor 0xBD70
+#define MYRI_NetReceive 0xBD8C
+#define MYRI_NetReceiveBuffer 0xBD88
+#define MYRI_MapLength 0xBD9C
+#define MYRI_MapSendAlignment 0xBD98
+#define MYRI_HostReplyLength 0xBDA4
+#define MYRI_HostReplySendAlignment 0xBDA0
+#define MYRI_MapSendMessages 0xBD94
+#define MYRI_bitmask 0x90C6
+#define MYRI_HostSendFull 0x99C0
+#define MYRI_HostReplies 0xA04C
+#define MYRI_Interrupts 0xBCE4
+#define MYRI_NetReceiveDrops 0xB8DA
+#define MYRI_SendMultiplexDoneEvent 0xBB7A
+#define MYRI_bEvents 0x96E6
+#define MYRI_bHostReceiveShortcuts 0x9972
+#define MYRI_the_map 0xB728
+#define MYRI_Channels 0xB7A2
+#define MYRI_bEventIndex 0x9766
+#define MYRI_NoBuffersChannel0 0x9974
+#define MYRI_bShakes 0x976A
+#define MYRI_MAPPER 0xA122
+#define MYRI_CORE_timeout_counter 0xA114
+#define MYRI_the_map_is_valid 0xA040
+#define MYRI_bBadScatters 0x9970
+#define MYRI_HostReceiveChannel 0x9960
+#define MYRI_send_space 0xA7B0
+#define MYRI_MapReceiveMessages 0xB8E0
+#define MYRI_wakeup_mask 0xBC4E
+#define MYRI_NetSendBuffer 0xBB30
+#define MYRI_HostReceiveMulticast 0x9968
+#define MYRI_HostSendChannel 0x9A94
+#define MYRI_NetReceiveDmaDone 0xB7FA
+#define MYRI_HostSendChecksum 0x9AAA
+#define MYRI_HostReceiveScatter 0x9914
+#define MYRI_compares 0x9DC8
+#define MYRI_NetSendQueue 0xBAFC
+#define MYRI_HostTable 0xA54C
+#define MYRI_map_h 0x9DB4
+#define MYRI_HostSendBytes 0x9AA0
+#define MYRI_L3_end_loaded_memory 0xBDB4
+#define MYRI_NetSendBytes 0xBB2A
+#define MYRI_map_space 0xA81C
+#define MYRI_MAP_ACK 0xA045
+#define MYRI_NetReceiveMisroutes 0xB8D6
+#define MYRI_HostReceiveBytes 0x995C
+#define MYRI_BroadcastRelayIdle 0x95C2
+#define MYRI_HostReceiveIdle 0x9770
+#define MYRI_timing 0xBC08
+#define MYRI_HostReceive 0x996A
+#define MYRI_routeHandleMessage 0x7732
+#define MYRI_Freses 0xB7AE
+#define MYRI_NextToPut 0x969C
+#define MYRI_HostSendIdle 0x997A
+#define MYRIedata 0xBCEC
+#define MYRI_SendingHostReply 0xBAFA
+#define MYRI_timing_period 0xBC50
+#define MYRI_debug 0xA138
+#define MYRI_NetSendBusy 0xB998
+#define MYRI_routeInitialize 0x6AA0
+#define MYRI_HostReceiveQueue 0x9966
+#define MYRI_bWakes 0x9768
+#define MYRI_NetReceiveBadLengths 0xB8D0
+#define MYRI_NetReceiveQueue 0xB8E2
+#define MYRI_MapBuffer 0xBAF6
+#define MYRI_MapChecksum 0xA048
+#define MYRI_the_routes_are_valid 0xA042
+#define MYRI_MAPPER_probe_stamp 0xA132
+#define MYRI_memory 0xA7AC
+#define MYRI_Events 0x9614
+#define MYRI_switches 0x9DB6
+#define MYRI_Hosts 0xA7AE
+#define MYRI_HostReceiveGoingToBroadcast 0x98CE
+#define MYRI_NetSendIdle 0xB90C
+#define MYRI_host_reply_space 0xB72C
+#define MYRI_HostReceiveWaitingToBroadcast 0x9888
+#define MYRI_bSetRoutes 0xA11E
+#define MYRI_bSends 0x976C
+#define MYRI_BroadcastGenerateIdle 0x9536
+#define MYRI_WatchdogOff 0xBC9E
+#define MYRI_TIMER 0xBC52
+#define MYRI_SendMultiplex 0xBB82
+#define MYRI_HostReplyBuffer 0xBAF8
+#define MYRI_the_new_switch 0xA130
+#define MYRI_current_switch 0xA03A
+#define MYRI_memcpy 0x937C
+#define MYRI_server 0x9AF6
+#define MYRI_WatchdogOn 0xBC58
+#define MYRI_NetSendContinuing 0xB952
+#define MYRI_NetReceiveBadChannels 0xB8D2
+#define MYRI_SendMultiplexFreeMachine 0xBB80
+#define MYRI_NetReceiveFlush 0xB840
+#define MYRI_NetSendBroadcasting 0xBAB0
+#define MYRI_looking_for_a_loopback 0x9D6C
+#define MYRI_HostSendGatherPointer 0x9AA8
+#define MYRI_HostSendItem 0x9A96
+#define MYRI_MAP_REQ 0xA046
+#define MYRI_memory_free_list 0xA160
+#define MYRIend 0xBDB4
+#define MYRI_MapVersion 0x9DB2
+#define MYRI_client 0x9AB0
+#define MYRI_HostReceiveBroadcasting 0x9842
+#define MYRIetext 0x94F0
+#define MYRI_NetSendMapBusy 0xB9DE
+#define MYRI_bRouteMessages 0xA134
+#define MYRI_DmaDirection 0xB7A8
+#define MYRI_SendMultiplexFreeEvent 0xBB7C
+#define MYRI_idle_mapper 0x9B3C
+#define MYRI_NetSend 0xBB24
+#define MYRI_current_port 0xA03E
+#define MYRI_HostConnectedSwitches 0x9DDA
+#define MYRI_try_loopback 0x9B82
+#define MYRI_the_host_reply_message 0xB72A
+#define MYRI_MAPPER_queue 0x9DD8
+#define MYRI_bHostSendShortcuts 0x9AAC
+#define MYRI_BroadcastGenerate 0x9608
+#define MYRI_DmaFreeEvent 0xB7AC
+#define MYRI_host_timeout_counter 0xA116
+#define MYRI_NetReceiveDma 0xB7B4
+#define MYRI_MAPPER_try_port 0xA12C
+#define MYRI_bcopy 0x66AA
+#define MYRI_queue_h 0xBB32
+#define MYRI_bUpdates 0xA120
+#define MYRI_SendMultiplexMachine 0xBB7E
+#define MYRI_MAPPER_repeat 0xA12A
+#define MYRI_the_return_port 0xA038
+#define MYRI_NetReceiveFull 0xB886
+#define MYRI_MyHostTableIndex 0xA7AA
+#define MYRI_CORE 0xA118
+#define MYRI_I_have_a_map 0xA044
+#define MYRI_bWaiting 0xB7B0
+#define MYRI_NetReceiveOverflows 0xB8D8
+#define MYRI_NumMachines 0x96A0
+#define MYRI_MapMessagesSentCounter 0xB79C
+#define MYRI_HostReceiveBuffer 0x9962
+#define MYRI_HostSendDma 0x9A4C
+#define MYRI_NextToGet 0x969E
+#define MYRI_HostReceiveDmaBusy 0x97B6
+#define MYRI_bUpdateMessages 0xA136
+#define MYRI_try_the_switch 0x9C0E
+#define MYRI_BroadcastRelay 0x960E
+#define MYRI_the_msg 0xB798
+#define MYRI_bEventsRTC 0x9726
+#define MYRI_HostReceiveMessages 0x995E
+#define MYRI_CURRENT_MSG 0xB79A
+#define MYRI_MAPPER_phase 0xA128
+#define MYRI_HostReceiveDma 0x97FC
+#define MYRI_Watchdog 0xBCE6
+#define MYRI_HostSendEmpty 0x9A92
+#define MYRI_abort 0x63C6
+#define MYRI_SendMultiplexIdle 0xBB34
+#define MYRI_looking_for_a_switch 0x9C54
+#define MYRI_NetSendMessages 0xBB2C
+#define MYRI_updating 0x9D26
+#define MYRI_BroadcastGenerateSending 0x94F0
+#define MYRI_HostSendBuffer 0x9AA4
+#define MYRI_HostSendMessages 0x9A9E
+#define MYRI_BroadcastRelaySending 0x957C
+#define MYRI_HostSendDmaBusy 0x9A06
+#define MYRI_BroadcastsPending 0xB8CC
+#define MYRI_NetSendMapWaiting 0xBA6A
+#define MYRI_NetReceiveBadTypes 0xB8CE
+#define MYRI_looking_for_hosts 0x9C9A
+#define MYRI_bBadHeader 0x9978
+#define MYRI_HostSendGatherIndex 0x9AA6
+#define MYRI_routeLookup 0x7790
+#define MYRI_NetReceiveMessages 0xB8DE
+#define MYRI_DmaInUse 0xB7AA
+#define MYRI_explores 0x9DB8
+#define MYRI_HostSend 0x9A98
+#define MYRI_DmaResetSpin 0xB7B2
+#define MYRIstart 0x0000
+#define MYRI_RouteTable 0xA164
+#define MYRI_Machines 0x96A2
+#define MYRI_try_the_host 0x9BC8
+#define MYRI_isr_record 0x976E
+#define MYRI_HostReceiveDrops 0x995A
+#define MYRI_HostReceiveLength 0x9964
+#define MYRI_timers 0xBB88
+#define MYRI_NetSendWaiting 0xBA24
+#define MYRI_NetSendDrops 0xBB2E
+#define MYRI_comparing_a_switch 0x9CE0
+#define MYRI_OldMapChecksum 0xA04A
+#define MYRI_HostSendQueue 0x9AA2
+#define MYRI_MAPPER_host 0xA12E
+#define MYRI_compare_switch 0xA03C
+#define MYRI_main 0x80A6
+#define MYRI_NetReceiveBadCrcs 0xB8D4
+#define MYRI_NetReceiveBytes 0xB8DC
+
+#endif /* SYMBOL_DEFINES_COMPILED */
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
new file mode 100644
index 000000000000..aad5494c83cf
--- /dev/null
+++ b/drivers/net/myri_sbus.c
@@ -0,0 +1,1174 @@
+/* myri_sbus.h: MyriCOM MyriNET SBUS card driver.
+ *
+ * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
+ */
+
+static char version[] =
+ "myri_sbus.c:v1.9 12/Sep/99 David S. Miller (davem@redhat.com)\n";
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <net/dst.h>
+#include <net/arp.h>
+#include <net/sock.h>
+#include <net/ipv6.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/idprom.h>
+#include <asm/sbus.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/auxio.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/checksum.h>
+
+#include "myri_sbus.h"
+#include "myri_code.h"
+
+/* #define DEBUG_DETECT */
+/* #define DEBUG_IRQ */
+/* #define DEBUG_TRANSMIT */
+/* #define DEBUG_RECEIVE */
+/* #define DEBUG_HEADER */
+
+#ifdef DEBUG_DETECT
+#define DET(x) printk x
+#else
+#define DET(x)
+#endif
+
+#ifdef DEBUG_IRQ
+#define DIRQ(x) printk x
+#else
+#define DIRQ(x)
+#endif
+
+#ifdef DEBUG_TRANSMIT
+#define DTX(x) printk x
+#else
+#define DTX(x)
+#endif
+
+#ifdef DEBUG_RECEIVE
+#define DRX(x) printk x
+#else
+#define DRX(x)
+#endif
+
+#ifdef DEBUG_HEADER
+#define DHDR(x) printk x
+#else
+#define DHDR(x)
+#endif
+
+#ifdef MODULE
+static struct myri_eth *root_myri_dev;
+#endif
+
+static void myri_reset_off(void __iomem *lp, void __iomem *cregs)
+{
+ /* Clear IRQ mask. */
+ sbus_writel(0, lp + LANAI_EIMASK);
+
+ /* Turn RESET function off. */
+ sbus_writel(CONTROL_ROFF, cregs + MYRICTRL_CTRL);
+}
+
+static void myri_reset_on(void __iomem *cregs)
+{
+ /* Enable RESET function. */
+ sbus_writel(CONTROL_RON, cregs + MYRICTRL_CTRL);
+
+ /* Disable IRQ's. */
+ sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
+}
+
+static void myri_disable_irq(void __iomem *lp, void __iomem *cregs)
+{
+ sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
+ sbus_writel(0, lp + LANAI_EIMASK);
+ sbus_writel(ISTAT_HOST, lp + LANAI_ISTAT);
+}
+
+static void myri_enable_irq(void __iomem *lp, void __iomem *cregs)
+{
+ sbus_writel(CONTROL_EIRQ, cregs + MYRICTRL_CTRL);
+ sbus_writel(ISTAT_HOST, lp + LANAI_EIMASK);
+}
+
+static inline void bang_the_chip(struct myri_eth *mp)
+{
+ struct myri_shmem __iomem *shmem = mp->shmem;
+ void __iomem *cregs = mp->cregs;
+
+ sbus_writel(1, &shmem->send);
+ sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
+}
+
+static int myri_do_handshake(struct myri_eth *mp)
+{
+ struct myri_shmem __iomem *shmem = mp->shmem;
+ void __iomem *cregs = mp->cregs;
+ struct myri_channel __iomem *chan = &shmem->channel;
+ int tick = 0;
+
+ DET(("myri_do_handshake: "));
+ if (sbus_readl(&chan->state) == STATE_READY) {
+ DET(("Already STATE_READY, failed.\n"));
+ return -1; /* We're hosed... */
+ }
+
+ myri_disable_irq(mp->lregs, cregs);
+
+ while (tick++ <= 25) {
+ u32 softstate;
+
+ /* Wake it up. */
+ DET(("shakedown, CONTROL_WON, "));
+ sbus_writel(1, &shmem->shakedown);
+ sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
+
+ softstate = sbus_readl(&chan->state);
+ DET(("chanstate[%08x] ", softstate));
+ if (softstate == STATE_READY) {
+ DET(("wakeup successful, "));
+ break;
+ }
+
+ if (softstate != STATE_WFN) {
+ DET(("not WFN setting that, "));
+ sbus_writel(STATE_WFN, &chan->state);
+ }
+
+ udelay(20);
+ }
+
+ myri_enable_irq(mp->lregs, cregs);
+
+ if (tick > 25) {
+ DET(("25 ticks we lose, failure.\n"));
+ return -1;
+ }
+ DET(("success\n"));
+ return 0;
+}
+
+static int myri_load_lanai(struct myri_eth *mp)
+{
+ struct net_device *dev = mp->dev;
+ struct myri_shmem __iomem *shmem = mp->shmem;
+ void __iomem *rptr;
+ int i;
+
+ myri_disable_irq(mp->lregs, mp->cregs);
+ myri_reset_on(mp->cregs);
+
+ rptr = mp->lanai;
+ for (i = 0; i < mp->eeprom.ramsz; i++)
+ sbus_writeb(0, rptr + i);
+
+ if (mp->eeprom.cpuvers >= CPUVERS_3_0)
+ sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL);
+
+ /* Load executable code. */
+ for (i = 0; i < sizeof(lanai4_code); i++)
+ sbus_writeb(lanai4_code[i], rptr + (lanai4_code_off * 2) + i);
+
+ /* Load data segment. */
+ for (i = 0; i < sizeof(lanai4_data); i++)
+ sbus_writeb(lanai4_data[i], rptr + (lanai4_data_off * 2) + i);
+
+ /* Set device address. */
+ sbus_writeb(0, &shmem->addr[0]);
+ sbus_writeb(0, &shmem->addr[1]);
+ for (i = 0; i < 6; i++)
+ sbus_writeb(dev->dev_addr[i],
+ &shmem->addr[i + 2]);
+
+ /* Set SBUS bursts and interrupt mask. */
+ sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst);
+ sbus_writel(SHMEM_IMASK_RX, &shmem->imask);
+
+ /* Release the LANAI. */
+ myri_disable_irq(mp->lregs, mp->cregs);
+ myri_reset_off(mp->lregs, mp->cregs);
+ myri_disable_irq(mp->lregs, mp->cregs);
+
+ /* Wait for the reset to complete. */
+ for (i = 0; i < 5000; i++) {
+ if (sbus_readl(&shmem->channel.state) != STATE_READY)
+ break;
+ else
+ udelay(10);
+ }
+
+ if (i == 5000)
+ printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n");
+
+ i = myri_do_handshake(mp);
+ if (i)
+ printk(KERN_ERR "myricom: Handshake with LANAI failed.\n");
+
+ if (mp->eeprom.cpuvers == CPUVERS_4_0)
+ sbus_writel(0, mp->lregs + LANAI_VERS);
+
+ return i;
+}
+
+static void myri_clean_rings(struct myri_eth *mp)
+{
+ struct sendq __iomem *sq = mp->sq;
+ struct recvq __iomem *rq = mp->rq;
+ int i;
+
+ sbus_writel(0, &rq->tail);
+ sbus_writel(0, &rq->head);
+ for (i = 0; i < (RX_RING_SIZE+1); i++) {
+ if (mp->rx_skbs[i] != NULL) {
+ struct myri_rxd __iomem *rxd = &rq->myri_rxd[i];
+ u32 dma_addr;
+
+ dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
+ sbus_unmap_single(mp->myri_sdev, dma_addr, RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
+ dev_kfree_skb(mp->rx_skbs[i]);
+ mp->rx_skbs[i] = NULL;
+ }
+ }
+
+ mp->tx_old = 0;
+ sbus_writel(0, &sq->tail);
+ sbus_writel(0, &sq->head);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (mp->tx_skbs[i] != NULL) {
+ struct sk_buff *skb = mp->tx_skbs[i];
+ struct myri_txd __iomem *txd = &sq->myri_txd[i];
+ u32 dma_addr;
+
+ dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
+ sbus_unmap_single(mp->myri_sdev, dma_addr, (skb->len + 3) & ~3, SBUS_DMA_TODEVICE);
+ dev_kfree_skb(mp->tx_skbs[i]);
+ mp->tx_skbs[i] = NULL;
+ }
+ }
+}
+
+static void myri_init_rings(struct myri_eth *mp, int from_irq)
+{
+ struct recvq __iomem *rq = mp->rq;
+ struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
+ struct net_device *dev = mp->dev;
+ int gfp_flags = GFP_KERNEL;
+ int i;
+
+ if (from_irq || in_interrupt())
+ gfp_flags = GFP_ATOMIC;
+
+ myri_clean_rings(mp);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags);
+ u32 dma_addr;
+
+ if (!skb)
+ continue;
+ mp->rx_skbs[i] = skb;
+ skb->dev = dev;
+ skb_put(skb, RX_ALLOC_SIZE);
+
+ dma_addr = sbus_map_single(mp->myri_sdev, skb->data, RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
+ sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
+ sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
+ sbus_writel(i, &rxd[i].ctx);
+ sbus_writel(1, &rxd[i].num_sg);
+ }
+ sbus_writel(0, &rq->head);
+ sbus_writel(RX_RING_SIZE, &rq->tail);
+}
+
+static int myri_init(struct myri_eth *mp, int from_irq)
+{
+ myri_init_rings(mp, from_irq);
+ return 0;
+}
+
+static void myri_is_not_so_happy(struct myri_eth *mp)
+{
+}
+
+#ifdef DEBUG_HEADER
+static void dump_ehdr(struct ethhdr *ehdr)
+{
+ printk("ehdr[h_dst(%02x:%02x:%02x:%02x:%02x:%02x)"
+ "h_source(%02x:%02x:%02x:%02x:%02x:%02x)h_proto(%04x)]\n",
+ ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2],
+ ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[4],
+ ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2],
+ ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[4],
+ ehdr->h_proto);
+}
+
+static void dump_ehdr_and_myripad(unsigned char *stuff)
+{
+ struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2);
+
+ printk("pad[%02x:%02x]", stuff[0], stuff[1]);
+ printk("ehdr[h_dst(%02x:%02x:%02x:%02x:%02x:%02x)"
+ "h_source(%02x:%02x:%02x:%02x:%02x:%02x)h_proto(%04x)]\n",
+ ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2],
+ ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[4],
+ ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2],
+ ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[4],
+ ehdr->h_proto);
+}
+#endif
+
+static void myri_tx(struct myri_eth *mp, struct net_device *dev)
+{
+ struct sendq __iomem *sq= mp->sq;
+ int entry = mp->tx_old;
+ int limit = sbus_readl(&sq->head);
+
+ DTX(("entry[%d] limit[%d] ", entry, limit));
+ if (entry == limit)
+ return;
+ while (entry != limit) {
+ struct sk_buff *skb = mp->tx_skbs[entry];
+ u32 dma_addr;
+
+ DTX(("SKB[%d] ", entry));
+ dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
+ sbus_unmap_single(mp->myri_sdev, dma_addr, skb->len, SBUS_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ mp->tx_skbs[entry] = NULL;
+ mp->enet_stats.tx_packets++;
+ entry = NEXT_TX(entry);
+ }
+ mp->tx_old = entry;
+}
+
+/* Determine the packet's protocol ID. The rule here is that we
+ * assume 802.3 if the type field is short enough to be a length.
+ * This is normal practice and works for any 'now in use' protocol.
+ */
+static unsigned short myri_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ethhdr *eth;
+ unsigned char *rawp;
+
+ skb->mac.raw = (((unsigned char *)skb->data) + MYRI_PAD_LEN);
+ skb_pull(skb, dev->hard_header_len);
+ eth = eth_hdr(skb);
+
+#ifdef DEBUG_HEADER
+ DHDR(("myri_type_trans: "));
+ dump_ehdr(eth);
+#endif
+ if (*eth->h_dest & 1) {
+ if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
+ if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+
+ if (ntohs(eth->h_proto) >= 1536)
+ return eth->h_proto;
+
+ rawp = skb->data;
+
+ /* This is a magic hack to spot IPX packets. Older Novell breaks
+ * the protocol design and runs IPX over 802.3 without an 802.2 LLC
+ * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
+ * won't work for fault tolerant netware but does for the rest.
+ */
+ if (*(unsigned short *)rawp == 0xFFFF)
+ return htons(ETH_P_802_3);
+
+ /* Real 802.2 LLC */
+ return htons(ETH_P_802_2);
+}
+
+static void myri_rx(struct myri_eth *mp, struct net_device *dev)
+{
+ struct recvq __iomem *rq = mp->rq;
+ struct recvq __iomem *rqa = mp->rqack;
+ int entry = sbus_readl(&rqa->head);
+ int limit = sbus_readl(&rqa->tail);
+ int drops;
+
+ DRX(("entry[%d] limit[%d] ", entry, limit));
+ if (entry == limit)
+ return;
+ drops = 0;
+ DRX(("\n"));
+ while (entry != limit) {
+ struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
+ u32 csum = sbus_readl(&rxdack->csum);
+ int len = sbus_readl(&rxdack->myri_scatters[0].len);
+ int index = sbus_readl(&rxdack->ctx);
+ struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
+ struct sk_buff *skb = mp->rx_skbs[index];
+
+ /* Ack it. */
+ sbus_writel(NEXT_RX(entry), &rqa->head);
+
+ /* Check for errors. */
+ DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
+ sbus_dma_sync_single_for_cpu(mp->myri_sdev,
+ sbus_readl(&rxd->myri_scatters[0].addr),
+ RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
+ if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
+ DRX(("ERROR["));
+ mp->enet_stats.rx_errors++;
+ if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
+ DRX(("BAD_LENGTH] "));
+ mp->enet_stats.rx_length_errors++;
+ } else {
+ DRX(("NO_PADDING] "));
+ mp->enet_stats.rx_frame_errors++;
+ }
+
+ /* Return it to the LANAI. */
+ drop_it:
+ drops++;
+ DRX(("DROP "));
+ mp->enet_stats.rx_dropped++;
+ sbus_dma_sync_single_for_device(mp->myri_sdev,
+ sbus_readl(&rxd->myri_scatters[0].addr),
+ RX_ALLOC_SIZE,
+ SBUS_DMA_FROMDEVICE);
+ sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
+ sbus_writel(index, &rxd->ctx);
+ sbus_writel(1, &rxd->num_sg);
+ sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
+ goto next;
+ }
+
+ DRX(("len[%d] ", len));
+ if (len > RX_COPY_THRESHOLD) {
+ struct sk_buff *new_skb;
+ u32 dma_addr;
+
+ DRX(("BIGBUFF "));
+ new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ DRX(("skb_alloc(FAILED) "));
+ goto drop_it;
+ }
+ sbus_unmap_single(mp->myri_sdev,
+ sbus_readl(&rxd->myri_scatters[0].addr),
+ RX_ALLOC_SIZE,
+ SBUS_DMA_FROMDEVICE);
+ mp->rx_skbs[index] = new_skb;
+ new_skb->dev = dev;
+ skb_put(new_skb, RX_ALLOC_SIZE);
+ dma_addr = sbus_map_single(mp->myri_sdev,
+ new_skb->data,
+ RX_ALLOC_SIZE,
+ SBUS_DMA_FROMDEVICE);
+ sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
+ sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
+ sbus_writel(index, &rxd->ctx);
+ sbus_writel(1, &rxd->num_sg);
+ sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
+
+ /* Trim the original skb for the netif. */
+ DRX(("trim(%d) ", len));
+ skb_trim(skb, len);
+ } else {
+ struct sk_buff *copy_skb = dev_alloc_skb(len);
+
+ DRX(("SMALLBUFF "));
+ if (copy_skb == NULL) {
+ DRX(("dev_alloc_skb(FAILED) "));
+ goto drop_it;
+ }
+ /* DMA sync already done above. */
+ copy_skb->dev = dev;
+ DRX(("resv_and_put "));
+ skb_put(copy_skb, len);
+ memcpy(copy_skb->data, skb->data, len);
+
+ /* Reuse original ring buffer. */
+ DRX(("reuse "));
+ sbus_dma_sync_single_for_device(mp->myri_sdev,
+ sbus_readl(&rxd->myri_scatters[0].addr),
+ RX_ALLOC_SIZE,
+ SBUS_DMA_FROMDEVICE);
+ sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
+ sbus_writel(index, &rxd->ctx);
+ sbus_writel(1, &rxd->num_sg);
+ sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
+
+ skb = copy_skb;
+ }
+
+ /* Just like the happy meal we get checksums from this card. */
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */
+
+ skb->protocol = myri_type_trans(skb, dev);
+ DRX(("prot[%04x] netif_rx ", skb->protocol));
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ mp->enet_stats.rx_packets++;
+ mp->enet_stats.rx_bytes += len;
+ next:
+ DRX(("NEXT\n"));
+ entry = NEXT_RX(entry);
+ }
+}
+
+static irqreturn_t myri_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct myri_eth *mp = (struct myri_eth *) dev->priv;
+ void __iomem *lregs = mp->lregs;
+ struct myri_channel __iomem *chan = &mp->shmem->channel;
+ unsigned long flags;
+ u32 status;
+ int handled = 0;
+
+ spin_lock_irqsave(&mp->irq_lock, flags);
+
+ status = sbus_readl(lregs + LANAI_ISTAT);
+ DIRQ(("myri_interrupt: status[%08x] ", status));
+ if (status & ISTAT_HOST) {
+ u32 softstate;
+
+ handled = 1;
+ DIRQ(("IRQ_DISAB "));
+ myri_disable_irq(lregs, mp->cregs);
+ softstate = sbus_readl(&chan->state);
+ DIRQ(("state[%08x] ", softstate));
+ if (softstate != STATE_READY) {
+ DIRQ(("myri_not_so_happy "));
+ myri_is_not_so_happy(mp);
+ }
+ DIRQ(("\nmyri_rx: "));
+ myri_rx(mp, dev);
+ DIRQ(("\nistat=ISTAT_HOST "));
+ sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT);
+ DIRQ(("IRQ_ENAB "));
+ myri_enable_irq(lregs, mp->cregs);
+ }
+ DIRQ(("\n"));
+
+ spin_unlock_irqrestore(&mp->irq_lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int myri_open(struct net_device *dev)
+{
+ struct myri_eth *mp = (struct myri_eth *) dev->priv;
+
+ return myri_init(mp, in_interrupt());
+}
+
+static int myri_close(struct net_device *dev)
+{
+ struct myri_eth *mp = (struct myri_eth *) dev->priv;
+
+ myri_clean_rings(mp);
+ return 0;
+}
+
+static void myri_tx_timeout(struct net_device *dev)
+{
+ struct myri_eth *mp = (struct myri_eth *) dev->priv;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+
+ mp->enet_stats.tx_errors++;
+ myri_init(mp, 0);
+ netif_wake_queue(dev);
+}
+
+static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct myri_eth *mp = (struct myri_eth *) dev->priv;
+ struct sendq __iomem *sq = mp->sq;
+ struct myri_txd __iomem *txd;
+ unsigned long flags;
+ unsigned int head, tail;
+ int len, entry;
+ u32 dma_addr;
+
+ DTX(("myri_start_xmit: "));
+
+ myri_tx(mp, dev);
+
+ netif_stop_queue(dev);
+
+ /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
+ head = sbus_readl(&sq->head);
+ tail = sbus_readl(&sq->tail);
+
+ if (!TX_BUFFS_AVAIL(head, tail)) {
+ DTX(("no buffs available, returning 1\n"));
+ return 1;
+ }
+
+ spin_lock_irqsave(&mp->irq_lock, flags);
+
+ DHDR(("xmit[skbdata(%p)]\n", skb->data));
+#ifdef DEBUG_HEADER
+ dump_ehdr_and_myripad(((unsigned char *) skb->data));
+#endif
+
+ /* XXX Maybe this can go as well. */
+ len = skb->len;
+ if (len & 3) {
+ DTX(("len&3 "));
+ len = (len + 4) & (~3);
+ }
+
+ entry = sbus_readl(&sq->tail);
+
+ txd = &sq->myri_txd[entry];
+ mp->tx_skbs[entry] = skb;
+
+ /* Must do this before we sbus map it. */
+ if (skb->data[MYRI_PAD_LEN] & 0x1) {
+ sbus_writew(0xffff, &txd->addr[0]);
+ sbus_writew(0xffff, &txd->addr[1]);
+ sbus_writew(0xffff, &txd->addr[2]);
+ sbus_writew(0xffff, &txd->addr[3]);
+ } else {
+ sbus_writew(0xffff, &txd->addr[0]);
+ sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
+ sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
+ sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
+ }
+
+ dma_addr = sbus_map_single(mp->myri_sdev, skb->data, len, SBUS_DMA_TODEVICE);
+ sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
+ sbus_writel(len, &txd->myri_gathers[0].len);
+ sbus_writel(1, &txd->num_sg);
+ sbus_writel(KERNEL_CHANNEL, &txd->chan);
+ sbus_writel(len, &txd->len);
+ sbus_writel((u32)-1, &txd->csum_off);
+ sbus_writel(0, &txd->csum_field);
+
+ sbus_writel(NEXT_TX(entry), &sq->tail);
+ DTX(("BangTheChip "));
+ bang_the_chip(mp);
+
+ DTX(("tbusy=0, returning 0\n"));
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&mp->irq_lock, flags);
+ return 0;
+}
+
+/* Create the MyriNet MAC header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ */
+static int myri_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+ void *daddr, void *saddr, unsigned len)
+{
+ struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
+ unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN);
+
+#ifdef DEBUG_HEADER
+ DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1]));
+ dump_ehdr(eth);
+#endif
+
+ /* Set the MyriNET padding identifier. */
+ pad[0] = MYRI_PAD_LEN;
+ pad[1] = 0xab;
+
+ /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length
+ * in here instead. It is up to the 802.2 layer to carry protocol information.
+ */
+ if (type != ETH_P_802_3)
+ eth->h_proto = htons(type);
+ else
+ eth->h_proto = htons(len);
+
+ /* Set the source hardware address. */
+ if (saddr)
+ memcpy(eth->h_source, saddr, dev->addr_len);
+ else
+ memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
+
+ /* Anyway, the loopback-device should never use this function... */
+ if (dev->flags & IFF_LOOPBACK) {
+ int i;
+ for (i = 0; i < dev->addr_len; i++)
+ eth->h_dest[i] = 0;
+ return(dev->hard_header_len);
+ }
+
+ if (daddr) {
+ memcpy(eth->h_dest, daddr, dev->addr_len);
+ return dev->hard_header_len;
+ }
+ return -dev->hard_header_len;
+}
+
+/* Rebuild the MyriNet MAC header. This is called after an ARP
+ * (or in future other address resolution) has completed on this
+ * sk_buff. We now let ARP fill in the other fields.
+ */
+static int myri_rebuild_header(struct sk_buff *skb)
+{
+ unsigned char *pad = (unsigned char *) skb->data;
+ struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
+ struct net_device *dev = skb->dev;
+
+#ifdef DEBUG_HEADER
+ DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1]));
+ dump_ehdr(eth);
+#endif
+
+ /* Refill MyriNet padding identifiers, this is just being anal. */
+ pad[0] = MYRI_PAD_LEN;
+ pad[1] = 0xab;
+
+ switch (eth->h_proto)
+ {
+#ifdef CONFIG_INET
+ case __constant_htons(ETH_P_IP):
+ return arp_find(eth->h_dest, skb);
+#endif
+
+ default:
+ printk(KERN_DEBUG
+ "%s: unable to resolve type %X addresses.\n",
+ dev->name, (int)eth->h_proto);
+
+ memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
+ return 0;
+ break;
+ }
+
+ return 0;
+}
+
+int myri_header_cache(struct neighbour *neigh, struct hh_cache *hh)
+{
+ unsigned short type = hh->hh_type;
+ unsigned char *pad;
+ struct ethhdr *eth;
+ struct net_device *dev = neigh->dev;
+
+ pad = ((unsigned char *) hh->hh_data) +
+ HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN);
+ eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
+
+ if (type == __constant_htons(ETH_P_802_3))
+ return -1;
+
+ /* Refill MyriNet padding identifiers, this is just being anal. */
+ pad[0] = MYRI_PAD_LEN;
+ pad[1] = 0xab;
+
+ eth->h_proto = type;
+ memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
+ memcpy(eth->h_dest, neigh->ha, dev->addr_len);
+ hh->hh_len = 16;
+ return 0;
+}
+
+
+/* Called by Address Resolution module to notify changes in address. */
+void myri_header_cache_update(struct hh_cache *hh, struct net_device *dev, unsigned char * haddr)
+{
+ memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
+ haddr, dev->addr_len);
+}
+
+static int myri_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < (ETH_HLEN + MYRI_PAD_LEN)) || (new_mtu > MYRINET_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static struct net_device_stats *myri_get_stats(struct net_device *dev)
+{ return &(((struct myri_eth *)dev->priv)->enet_stats); }
+
+static void myri_set_multicast(struct net_device *dev)
+{
+ /* Do nothing, all MyriCOM nodes transmit multicast frames
+ * as broadcast packets...
+ */
+}
+
+static inline void set_boardid_from_idprom(struct myri_eth *mp, int num)
+{
+ mp->eeprom.id[0] = 0;
+ mp->eeprom.id[1] = idprom->id_machtype;
+ mp->eeprom.id[2] = (idprom->id_sernum >> 16) & 0xff;
+ mp->eeprom.id[3] = (idprom->id_sernum >> 8) & 0xff;
+ mp->eeprom.id[4] = (idprom->id_sernum >> 0) & 0xff;
+ mp->eeprom.id[5] = num;
+}
+
+static inline void determine_reg_space_size(struct myri_eth *mp)
+{
+ switch(mp->eeprom.cpuvers) {
+ case CPUVERS_2_3:
+ case CPUVERS_3_0:
+ case CPUVERS_3_1:
+ case CPUVERS_3_2:
+ mp->reg_size = (3 * 128 * 1024) + 4096;
+ break;
+
+ case CPUVERS_4_0:
+ case CPUVERS_4_1:
+ mp->reg_size = ((4096<<1) + mp->eeprom.ramsz);
+ break;
+
+ case CPUVERS_4_2:
+ case CPUVERS_5_0:
+ default:
+ printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
+ mp->eeprom.cpuvers);
+ mp->reg_size = (3 * 128 * 1024) + 4096;
+ };
+}
+
+#ifdef DEBUG_DETECT
+static void dump_eeprom(struct myri_eth *mp)
+{
+ printk("EEPROM: clockval[%08x] cpuvers[%04x] "
+ "id[%02x,%02x,%02x,%02x,%02x,%02x]\n",
+ mp->eeprom.cval, mp->eeprom.cpuvers,
+ mp->eeprom.id[0], mp->eeprom.id[1], mp->eeprom.id[2],
+ mp->eeprom.id[3], mp->eeprom.id[4], mp->eeprom.id[5]);
+ printk("EEPROM: ramsz[%08x]\n", mp->eeprom.ramsz);
+ printk("EEPROM: fvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
+ mp->eeprom.fvers[0], mp->eeprom.fvers[1], mp->eeprom.fvers[2],
+ mp->eeprom.fvers[3], mp->eeprom.fvers[4], mp->eeprom.fvers[5],
+ mp->eeprom.fvers[6], mp->eeprom.fvers[7]);
+ printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
+ mp->eeprom.fvers[8], mp->eeprom.fvers[9], mp->eeprom.fvers[10],
+ mp->eeprom.fvers[11], mp->eeprom.fvers[12], mp->eeprom.fvers[13],
+ mp->eeprom.fvers[14], mp->eeprom.fvers[15]);
+ printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
+ mp->eeprom.fvers[16], mp->eeprom.fvers[17], mp->eeprom.fvers[18],
+ mp->eeprom.fvers[19], mp->eeprom.fvers[20], mp->eeprom.fvers[21],
+ mp->eeprom.fvers[22], mp->eeprom.fvers[23]);
+ printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
+ mp->eeprom.fvers[24], mp->eeprom.fvers[25], mp->eeprom.fvers[26],
+ mp->eeprom.fvers[27], mp->eeprom.fvers[28], mp->eeprom.fvers[29],
+ mp->eeprom.fvers[30], mp->eeprom.fvers[31]);
+ printk("EEPROM: mvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
+ mp->eeprom.mvers[0], mp->eeprom.mvers[1], mp->eeprom.mvers[2],
+ mp->eeprom.mvers[3], mp->eeprom.mvers[4], mp->eeprom.mvers[5],
+ mp->eeprom.mvers[6], mp->eeprom.mvers[7]);
+ printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
+ mp->eeprom.mvers[8], mp->eeprom.mvers[9], mp->eeprom.mvers[10],
+ mp->eeprom.mvers[11], mp->eeprom.mvers[12], mp->eeprom.mvers[13],
+ mp->eeprom.mvers[14], mp->eeprom.mvers[15]);
+ printk("EEPROM: dlval[%04x] brd_type[%04x] bus_type[%04x] prod_code[%04x]\n",
+ mp->eeprom.dlval, mp->eeprom.brd_type, mp->eeprom.bus_type,
+ mp->eeprom.prod_code);
+ printk("EEPROM: serial_num[%08x]\n", mp->eeprom.serial_num);
+}
+#endif
+
+static int __init myri_ether_init(struct sbus_dev *sdev, int num)
+{
+ static unsigned version_printed;
+ struct net_device *dev;
+ struct myri_eth *mp;
+ unsigned char prop_buf[32];
+ int i;
+
+ DET(("myri_ether_init(%p,%d):\n", sdev, num));
+ dev = alloc_etherdev(sizeof(struct myri_eth));
+
+ if (!dev)
+ return -ENOMEM;
+
+ if (version_printed++ == 0)
+ printk(version);
+
+ mp = (struct myri_eth *) dev->priv;
+ spin_lock_init(&mp->irq_lock);
+ mp->myri_sdev = sdev;
+
+ /* Clean out skb arrays. */
+ for (i = 0; i < (RX_RING_SIZE + 1); i++)
+ mp->rx_skbs[i] = NULL;
+
+ for (i = 0; i < TX_RING_SIZE; i++)
+ mp->tx_skbs[i] = NULL;
+
+ /* First check for EEPROM information. */
+ i = prom_getproperty(sdev->prom_node, "myrinet-eeprom-info",
+ (char *)&mp->eeprom, sizeof(struct myri_eeprom));
+ DET(("prom_getprop(myrinet-eeprom-info) returns %d\n", i));
+ if (i == 0 || i == -1) {
+ /* No eeprom property, must cook up the values ourselves. */
+ DET(("No EEPROM: "));
+ mp->eeprom.bus_type = BUS_TYPE_SBUS;
+ mp->eeprom.cpuvers = prom_getintdefault(sdev->prom_node,"cpu_version",0);
+ mp->eeprom.cval = prom_getintdefault(sdev->prom_node,"clock_value",0);
+ mp->eeprom.ramsz = prom_getintdefault(sdev->prom_node,"sram_size",0);
+ DET(("cpuvers[%d] cval[%d] ramsz[%d]\n", mp->eeprom.cpuvers,
+ mp->eeprom.cval, mp->eeprom.ramsz));
+ if (mp->eeprom.cpuvers == 0) {
+ DET(("EEPROM: cpuvers was zero, setting to %04x\n",CPUVERS_2_3));
+ mp->eeprom.cpuvers = CPUVERS_2_3;
+ }
+ if (mp->eeprom.cpuvers < CPUVERS_3_0) {
+ DET(("EEPROM: cpuvers < CPUVERS_3_0, clockval set to zero.\n"));
+ mp->eeprom.cval = 0;
+ }
+ if (mp->eeprom.ramsz == 0) {
+ DET(("EEPROM: ramsz == 0, setting to 128k\n"));
+ mp->eeprom.ramsz = (128 * 1024);
+ }
+ i = prom_getproperty(sdev->prom_node, "myrinet-board-id",
+ &prop_buf[0], 10);
+ DET(("EEPROM: prom_getprop(myrinet-board-id) returns %d\n", i));
+ if ((i != 0) && (i != -1))
+ memcpy(&mp->eeprom.id[0], &prop_buf[0], 6);
+ else
+ set_boardid_from_idprom(mp, num);
+ i = prom_getproperty(sdev->prom_node, "fpga_version",
+ &mp->eeprom.fvers[0], 32);
+ DET(("EEPROM: prom_getprop(fpga_version) returns %d\n", i));
+ if (i == 0 || i == -1)
+ memset(&mp->eeprom.fvers[0], 0, 32);
+
+ if (mp->eeprom.cpuvers == CPUVERS_4_1) {
+ DET(("EEPROM: cpuvers CPUVERS_4_1, "));
+ if (mp->eeprom.ramsz == (128 * 1024)) {
+ DET(("ramsize 128k, setting to 256k, "));
+ mp->eeprom.ramsz = (256 * 1024);
+ }
+ if ((mp->eeprom.cval==0x40414041)||(mp->eeprom.cval==0x90449044)){
+ DET(("changing cval from %08x to %08x ",
+ mp->eeprom.cval, 0x50e450e4));
+ mp->eeprom.cval = 0x50e450e4;
+ }
+ DET(("\n"));
+ }
+ }
+#ifdef DEBUG_DETECT
+ dump_eeprom(mp);
+#endif
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = mp->eeprom.id[i];
+
+ determine_reg_space_size(mp);
+
+ /* Map in the MyriCOM register/localram set. */
+ if (mp->eeprom.cpuvers < CPUVERS_4_0) {
+ /* XXX Makes no sense, if control reg is non-existant this
+ * XXX driver cannot function at all... maybe pre-4.0 is
+ * XXX only a valid version for PCI cards? Ask feldy...
+ */
+ DET(("Mapping regs for cpuvers < CPUVERS_4_0\n"));
+ mp->regs = sbus_ioremap(&sdev->resource[0], 0,
+ mp->reg_size, "MyriCOM Regs");
+ if (!mp->regs) {
+ printk("MyriCOM: Cannot map MyriCOM registers.\n");
+ goto err;
+ }
+ mp->lanai = mp->regs + (256 * 1024);
+ mp->lregs = mp->lanai + (0x10000 * 2);
+ } else {
+ DET(("Mapping regs for cpuvers >= CPUVERS_4_0\n"));
+ mp->cregs = sbus_ioremap(&sdev->resource[0], 0,
+ PAGE_SIZE, "MyriCOM Control Regs");
+ mp->lregs = sbus_ioremap(&sdev->resource[0], (256 * 1024),
+ PAGE_SIZE, "MyriCOM LANAI Regs");
+ mp->lanai =
+ sbus_ioremap(&sdev->resource[0], (512 * 1024),
+ mp->eeprom.ramsz, "MyriCOM SRAM");
+ }
+ DET(("Registers mapped: cregs[%p] lregs[%p] lanai[%p]\n",
+ mp->cregs, mp->lregs, mp->lanai));
+
+ if (mp->eeprom.cpuvers >= CPUVERS_4_0)
+ mp->shmem_base = 0xf000;
+ else
+ mp->shmem_base = 0x8000;
+
+ DET(("Shared memory base is %04x, ", mp->shmem_base));
+
+ mp->shmem = (struct myri_shmem __iomem *)
+ (mp->lanai + (mp->shmem_base * 2));
+ DET(("shmem mapped at %p\n", mp->shmem));
+
+ mp->rqack = &mp->shmem->channel.recvqa;
+ mp->rq = &mp->shmem->channel.recvq;
+ mp->sq = &mp->shmem->channel.sendq;
+
+ /* Reset the board. */
+ DET(("Resetting LANAI\n"));
+ myri_reset_off(mp->lregs, mp->cregs);
+ myri_reset_on(mp->cregs);
+
+ /* Turn IRQ's off. */
+ myri_disable_irq(mp->lregs, mp->cregs);
+
+ /* Reset once more. */
+ myri_reset_on(mp->cregs);
+
+ /* Get the supported DVMA burst sizes from our SBUS. */
+ mp->myri_bursts = prom_getintdefault(mp->myri_sdev->bus->prom_node,
+ "burst-sizes", 0x00);
+
+ if (!sbus_can_burst64(sdev))
+ mp->myri_bursts &= ~(DMA_BURST64);
+
+ DET(("MYRI bursts %02x\n", mp->myri_bursts));
+
+ /* Encode SBUS interrupt level in second control register. */
+ i = prom_getint(sdev->prom_node, "interrupts");
+ if (i == 0)
+ i = 4;
+ DET(("prom_getint(interrupts)==%d, irqlvl set to %04x\n",
+ i, (1 << i)));
+
+ sbus_writel((1 << i), mp->cregs + MYRICTRL_IRQLVL);
+
+ mp->dev = dev;
+ dev->open = &myri_open;
+ dev->stop = &myri_close;
+ dev->hard_start_xmit = &myri_start_xmit;
+ dev->tx_timeout = &myri_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+ dev->get_stats = &myri_get_stats;
+ dev->set_multicast_list = &myri_set_multicast;
+ dev->irq = sdev->irqs[0];
+
+ /* Register interrupt handler now. */
+ DET(("Requesting MYRIcom IRQ line.\n"));
+ if (request_irq(dev->irq, &myri_interrupt,
+ SA_SHIRQ, "MyriCOM Ethernet", (void *) dev)) {
+ printk("MyriCOM: Cannot register interrupt handler.\n");
+ goto err;
+ }
+
+ dev->mtu = MYRINET_MTU;
+ dev->change_mtu = myri_change_mtu;
+ dev->hard_header = myri_header;
+ dev->rebuild_header = myri_rebuild_header;
+ dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN);
+ dev->hard_header_cache = myri_header_cache;
+ dev->header_cache_update= myri_header_cache_update;
+
+ /* Load code onto the LANai. */
+ DET(("Loading LANAI firmware\n"));
+ myri_load_lanai(mp);
+
+ if (register_netdev(dev)) {
+ printk("MyriCOM: Cannot register device.\n");
+ goto err_free_irq;
+ }
+
+#ifdef MODULE
+ mp->next_module = root_myri_dev;
+ root_myri_dev = mp;
+#endif
+
+ printk("%s: MyriCOM MyriNET Ethernet ", dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i],
+ i == 5 ? ' ' : ':');
+ printk("\n");
+
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err:
+ /* This will also free the co-allocated 'dev->priv' */
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+static int __init myri_sbus_match(struct sbus_dev *sdev)
+{
+ char *name = sdev->prom_name;
+
+ if (!strcmp(name, "MYRICOM,mlanai") ||
+ !strcmp(name, "myri"))
+ return 1;
+
+ return 0;
+}
+
+static int __init myri_sbus_probe(void)
+{
+ struct sbus_bus *bus;
+ struct sbus_dev *sdev = NULL;
+ static int called;
+ int cards = 0, v;
+
+#ifdef MODULE
+ root_myri_dev = NULL;
+#endif
+
+ if (called)
+ return -ENODEV;
+ called++;
+
+ for_each_sbus(bus) {
+ for_each_sbusdev(sdev, bus) {
+ if (myri_sbus_match(sdev)) {
+ cards++;
+ DET(("Found myricom myrinet as %s\n", sdev->prom_name));
+ if ((v = myri_ether_init(sdev, (cards - 1))))
+ return v;
+ }
+ }
+ }
+ if (!cards)
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit myri_sbus_cleanup(void)
+{
+#ifdef MODULE
+ while (root_myri_dev) {
+ struct myri_eth *next = root_myri_dev->next_module;
+
+ unregister_netdev(root_myri_dev->dev);
+ /* this will also free the co-allocated 'root_myri_dev' */
+ free_netdev(root_myri_dev->dev);
+ root_myri_dev = next;
+ }
+#endif /* MODULE */
+}
+
+module_init(myri_sbus_probe);
+module_exit(myri_sbus_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
new file mode 100644
index 000000000000..9391e55a5e92
--- /dev/null
+++ b/drivers/net/myri_sbus.h
@@ -0,0 +1,313 @@
+/* myri_sbus.h: Defines for MyriCOM MyriNET SBUS card driver.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _MYRI_SBUS_H
+#define _MYRI_SBUS_H
+
+/* LANAI Registers */
+#define LANAI_IPF0 0x00UL /* Context zero state registers.*/
+#define LANAI_CUR0 0x04UL
+#define LANAI_PREV0 0x08UL
+#define LANAI_DATA0 0x0cUL
+#define LANAI_DPF0 0x10UL
+#define LANAI_IPF1 0x14UL /* Context one state registers. */
+#define LANAI_CUR1 0x18UL
+#define LANAI_PREV1 0x1cUL
+#define LANAI_DATA1 0x20UL
+#define LANAI_DPF1 0x24UL
+#define LANAI_ISTAT 0x28UL /* Interrupt status. */
+#define LANAI_EIMASK 0x2cUL /* External IRQ mask. */
+#define LANAI_ITIMER 0x30UL /* IRQ timer. */
+#define LANAI_RTC 0x34UL /* Real Time Clock */
+#define LANAI_CSUM 0x38UL /* Checksum. */
+#define LANAI_DMAXADDR 0x3cUL /* SBUS DMA external address. */
+#define LANAI_DMALADDR 0x40UL /* SBUS DMA local address. */
+#define LANAI_DMACTR 0x44UL /* SBUS DMA counter. */
+#define LANAI_RXDMAPTR 0x48UL /* Receive DMA pointer. */
+#define LANAI_RXDMALIM 0x4cUL /* Receive DMA limit. */
+#define LANAI_TXDMAPTR 0x50UL /* Transmit DMA pointer. */
+#define LANAI_TXDMALIM 0x54UL /* Transmit DMA limit. */
+#define LANAI_TXDMALIMT 0x58UL /* Transmit DMA limit w/tail. */
+ /* 0x5cUL, reserved */
+#define LANAI_RBYTE 0x60UL /* Receive byte. */
+ /* 0x64-->0x6c, reserved */
+#define LANAI_RHALF 0x70UL /* Receive half-word. */
+ /* 0x72UL, reserved */
+#define LANAI_RWORD 0x74UL /* Receive word. */
+#define LANAI_SALIGN 0x78UL /* Send align. */
+#define LANAI_SBYTE 0x7cUL /* SingleSend send-byte. */
+#define LANAI_SHALF 0x80UL /* SingleSend send-halfword. */
+#define LANAI_SWORD 0x84UL /* SingleSend send-word. */
+#define LANAI_SSENDT 0x88UL /* SingleSend special. */
+#define LANAI_DMADIR 0x8cUL /* DMA direction. */
+#define LANAI_DMASTAT 0x90UL /* DMA status. */
+#define LANAI_TIMEO 0x94UL /* Timeout register. */
+#define LANAI_MYRINET 0x98UL /* XXX MAGIC myricom thing */
+#define LANAI_HWDEBUG 0x9cUL /* Hardware debugging reg. */
+#define LANAI_LEDS 0xa0UL /* LED control. */
+#define LANAI_VERS 0xa4UL /* Version register. */
+#define LANAI_LINKON 0xa8UL /* Link activation reg. */
+ /* 0xac-->0x104, reserved */
+#define LANAI_CVAL 0x108UL /* Clock value register. */
+#define LANAI_REG_SIZE 0x10cUL
+
+/* Interrupt status bits. */
+#define ISTAT_DEBUG 0x80000000
+#define ISTAT_HOST 0x40000000
+#define ISTAT_LAN7 0x00800000
+#define ISTAT_LAN6 0x00400000
+#define ISTAT_LAN5 0x00200000
+#define ISTAT_LAN4 0x00100000
+#define ISTAT_LAN3 0x00080000
+#define ISTAT_LAN2 0x00040000
+#define ISTAT_LAN1 0x00020000
+#define ISTAT_LAN0 0x00010000
+#define ISTAT_WRDY 0x00008000
+#define ISTAT_HRDY 0x00004000
+#define ISTAT_SRDY 0x00002000
+#define ISTAT_LINK 0x00001000
+#define ISTAT_FRES 0x00000800
+#define ISTAT_NRES 0x00000800
+#define ISTAT_WAKE 0x00000400
+#define ISTAT_OB2 0x00000200
+#define ISTAT_OB1 0x00000100
+#define ISTAT_TAIL 0x00000080
+#define ISTAT_WDOG 0x00000040
+#define ISTAT_TIME 0x00000020
+#define ISTAT_DMA 0x00000010
+#define ISTAT_SEND 0x00000008
+#define ISTAT_BUF 0x00000004
+#define ISTAT_RECV 0x00000002
+#define ISTAT_BRDY 0x00000001
+
+/* MYRI Registers */
+#define MYRI_RESETOFF 0x00UL
+#define MYRI_RESETON 0x04UL
+#define MYRI_IRQOFF 0x08UL
+#define MYRI_IRQON 0x0cUL
+#define MYRI_WAKEUPOFF 0x10UL
+#define MYRI_WAKEUPON 0x14UL
+#define MYRI_IRQREAD 0x18UL
+ /* 0x1c-->0x3ffc, reserved */
+#define MYRI_LOCALMEM 0x4000UL
+#define MYRI_REG_SIZE 0x25000UL
+
+/* Shared memory interrupt mask. */
+#define SHMEM_IMASK_RX 0x00000002
+#define SHMEM_IMASK_TX 0x00000001
+
+/* Just to make things readable. */
+#define KERNEL_CHANNEL 0
+
+/* The size of this must be >= 129 bytes. */
+struct myri_eeprom {
+ unsigned int cval;
+ unsigned short cpuvers;
+ unsigned char id[6];
+ unsigned int ramsz;
+ unsigned char fvers[32];
+ unsigned char mvers[16];
+ unsigned short dlval;
+ unsigned short brd_type;
+ unsigned short bus_type;
+ unsigned short prod_code;
+ unsigned int serial_num;
+ unsigned short _reserved[24];
+ unsigned int _unused[2];
+};
+
+/* EEPROM bus types, only SBUS is valid in this driver. */
+#define BUS_TYPE_SBUS 1
+
+/* EEPROM CPU revisions. */
+#define CPUVERS_2_3 0x0203
+#define CPUVERS_3_0 0x0300
+#define CPUVERS_3_1 0x0301
+#define CPUVERS_3_2 0x0302
+#define CPUVERS_4_0 0x0400
+#define CPUVERS_4_1 0x0401
+#define CPUVERS_4_2 0x0402
+#define CPUVERS_5_0 0x0500
+
+/* MYRI Control Registers */
+#define MYRICTRL_CTRL 0x00UL
+#define MYRICTRL_IRQLVL 0x02UL
+#define MYRICTRL_REG_SIZE 0x04UL
+
+/* Global control register defines. */
+#define CONTROL_ROFF 0x8000 /* Reset OFF. */
+#define CONTROL_RON 0x4000 /* Reset ON. */
+#define CONTROL_EIRQ 0x2000 /* Enable IRQ's. */
+#define CONTROL_DIRQ 0x1000 /* Disable IRQ's. */
+#define CONTROL_WON 0x0800 /* Wake-up ON. */
+
+#define MYRI_SCATTER_ENTRIES 8
+#define MYRI_GATHER_ENTRIES 16
+
+struct myri_sglist {
+ u32 addr;
+ u32 len;
+};
+
+struct myri_rxd {
+ struct myri_sglist myri_scatters[MYRI_SCATTER_ENTRIES]; /* DMA scatter list.*/
+ u32 csum; /* HW computed checksum. */
+ u32 ctx;
+ u32 num_sg; /* Total scatter entries. */
+};
+
+struct myri_txd {
+ struct myri_sglist myri_gathers[MYRI_GATHER_ENTRIES]; /* DMA scatter list. */
+ u32 num_sg; /* Total scatter entries. */
+ u16 addr[4]; /* XXX address */
+ u32 chan;
+ u32 len; /* Total length of packet. */
+ u32 csum_off; /* Where data to csum is. */
+ u32 csum_field; /* Where csum goes in pkt. */
+};
+
+#define MYRINET_MTU 8432
+#define RX_ALLOC_SIZE 8448
+#define MYRI_PAD_LEN 2
+#define RX_COPY_THRESHOLD 256
+
+/* These numbers are cast in stone, new firmware is needed if
+ * you want to change them.
+ */
+#define TX_RING_MAXSIZE 16
+#define RX_RING_MAXSIZE 16
+
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+
+/* GRRR... */
+static __inline__ int NEXT_RX(int num)
+{
+ /* XXX >=??? */
+ if(++num > RX_RING_SIZE)
+ num = 0;
+ return num;
+}
+
+static __inline__ int PREV_RX(int num)
+{
+ if(--num < 0)
+ num = RX_RING_SIZE;
+ return num;
+}
+
+#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
+#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(head, tail) \
+ ((head) <= (tail) ? \
+ (head) + (TX_RING_SIZE - 1) - (tail) : \
+ (head) - (tail) - 1)
+
+struct sendq {
+ u32 tail;
+ u32 head;
+ u32 hdebug;
+ u32 mdebug;
+ struct myri_txd myri_txd[TX_RING_MAXSIZE];
+};
+
+struct recvq {
+ u32 head;
+ u32 tail;
+ u32 hdebug;
+ u32 mdebug;
+ struct myri_rxd myri_rxd[RX_RING_MAXSIZE + 1];
+};
+
+#define MYRI_MLIST_SIZE 8
+
+struct mclist {
+ u32 maxlen;
+ u32 len;
+ u32 cache;
+ struct pair {
+ u8 addr[8];
+ u32 val;
+ } mc_pairs[MYRI_MLIST_SIZE];
+ u8 bcast_addr[8];
+};
+
+struct myri_channel {
+ u32 state; /* State of the channel. */
+ u32 busy; /* Channel is busy. */
+ struct sendq sendq; /* Device tx queue. */
+ struct recvq recvq; /* Device rx queue. */
+ struct recvq recvqa; /* Device rx queue acked. */
+ u32 rbytes; /* Receive bytes. */
+ u32 sbytes; /* Send bytes. */
+ u32 rmsgs; /* Receive messages. */
+ u32 smsgs; /* Send messages. */
+ struct mclist mclist; /* Device multicast list. */
+};
+
+/* Values for per-channel state. */
+#define STATE_WFH 0 /* Waiting for HOST. */
+#define STATE_WFN 1 /* Waiting for NET. */
+#define STATE_READY 2 /* Ready. */
+
+struct myri_shmem {
+ u8 addr[8]; /* Board's address. */
+ u32 nchan; /* Number of channels. */
+ u32 burst; /* SBUS dma burst enable. */
+ u32 shakedown; /* DarkkkkStarrr Crashesss... */
+ u32 send; /* Send wanted. */
+ u32 imask; /* Interrupt enable mask. */
+ u32 mlevel; /* Map level. */
+ u32 debug[4]; /* Misc. debug areas. */
+ struct myri_channel channel; /* Only one channel on a host. */
+};
+
+struct myri_eth {
+ /* These are frequently accessed, keep together
+ * to obtain good cache hit rates.
+ */
+ spinlock_t irq_lock;
+ struct myri_shmem __iomem *shmem; /* Shared data structures. */
+ void __iomem *cregs; /* Control register space. */
+ struct recvq __iomem *rqack; /* Where we ack rx's. */
+ struct recvq __iomem *rq; /* Where we put buffers. */
+ struct sendq __iomem *sq; /* Where we stuff tx's. */
+ struct net_device *dev; /* Linux/NET dev struct. */
+ int tx_old; /* To speed up tx cleaning. */
+ void __iomem *lregs; /* Quick ptr to LANAI regs. */
+ struct sk_buff *rx_skbs[RX_RING_SIZE+1];/* RX skb's */
+ struct sk_buff *tx_skbs[TX_RING_SIZE]; /* TX skb's */
+ struct net_device_stats enet_stats; /* Interface stats. */
+
+ /* These are less frequently accessed. */
+ void __iomem *regs; /* MyriCOM register space. */
+ void __iomem *lanai; /* View 2 of register space. */
+ unsigned int myri_bursts; /* SBUS bursts. */
+ struct myri_eeprom eeprom; /* Local copy of EEPROM. */
+ unsigned int reg_size; /* Size of register space. */
+ unsigned int shmem_base; /* Offset to shared ram. */
+ struct sbus_dev *myri_sdev; /* Our SBUS device struct. */
+ struct myri_eth *next_module; /* Next in adapter chain. */
+};
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
+static inline struct sk_buff *myri_alloc_skb(unsigned int length, int gfp_flags)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + 64, gfp_flags);
+ if(skb) {
+ int offset = ALIGNED_RX_SKB_ADDR(skb->data);
+
+ if(offset)
+ skb_reserve(skb, offset);
+ }
+ return skb;
+}
+
+#endif /* !(_MYRI_SBUS_H) */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
new file mode 100644
index 000000000000..223bdadd4c0d
--- /dev/null
+++ b/drivers/net/natsemi.c
@@ -0,0 +1,3273 @@
+/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
+/*
+ Written/copyright 1999-2001 by Donald Becker.
+ Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+ Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL. License for under other terms may be
+ available. Contact the original author for details.
+
+ The original author may be reached as becker@scyld.com, or at
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/netsemi.html
+
+
+ Linux kernel modifications:
+
+ Version 1.0.1:
+ - Spinlock fixes
+ - Bug fixes and better intr performance (Tjeerd)
+ Version 1.0.2:
+ - Now reads correct MAC address from eeprom
+ Version 1.0.3:
+ - Eliminate redundant priv->tx_full flag
+ - Call netif_start_queue from dev->tx_timeout
+ - wmb() in start_tx() to flush data
+ - Update Tx locking
+ - Clean up PCI enable (davej)
+ Version 1.0.4:
+ - Merge Donald Becker's natsemi.c version 1.07
+ Version 1.0.5:
+ - { fill me in }
+ Version 1.0.6:
+ * ethtool support (jgarzik)
+ * Proper initialization of the card (which sometimes
+ fails to occur and leaves the card in a non-functional
+ state). (uzi)
+
+ * Some documented register settings to optimize some
+ of the 100Mbit autodetection circuitry in rev C cards. (uzi)
+
+ * Polling of the PHY intr for stuff like link state
+ change and auto- negotiation to finally work properly. (uzi)
+
+ * One-liner removal of a duplicate declaration of
+ netdev_error(). (uzi)
+
+ Version 1.0.7: (Manfred Spraul)
+ * pci dma
+ * SMP locking update
+ * full reset added into tx_timeout
+ * correct multicast hash generation (both big and little endian)
+ [copied from a natsemi driver version
+ from Myrio Corporation, Greg Smith]
+ * suspend/resume
+
+ version 1.0.8 (Tim Hockin <thockin@sun.com>)
+ * ETHTOOL_* support
+ * Wake on lan support (Erik Gilling)
+ * MXDMA fixes for serverworks
+ * EEPROM reload
+
+ version 1.0.9 (Manfred Spraul)
+ * Main change: fix lack of synchronize
+ netif_close/netif_suspend against a last interrupt
+ or packet.
+ * do not enable superflous interrupts (e.g. the
+ drivers relies on TxDone - TxIntr not needed)
+ * wait that the hardware has really stopped in close
+ and suspend.
+ * workaround for the (at least) gcc-2.95.1 compiler
+ problem. Also simplifies the code a bit.
+ * disable_irq() in tx_timeout - needed to protect
+ against rx interrupts.
+ * stop the nic before switching into silent rx mode
+ for wol (required according to docu).
+
+ version 1.0.10:
+ * use long for ee_addr (various)
+ * print pointers properly (DaveM)
+ * include asm/irq.h (?)
+
+ version 1.0.11:
+ * check and reset if PHY errors appear (Adrian Sun)
+ * WoL cleanup (Tim Hockin)
+ * Magic number cleanup (Tim Hockin)
+ * Don't reload EEPROM on every reset (Tim Hockin)
+ * Save and restore EEPROM state across reset (Tim Hockin)
+ * MDIO Cleanup (Tim Hockin)
+ * Reformat register offsets/bits (jgarzik)
+
+ version 1.0.12:
+ * ETHTOOL_* further support (Tim Hockin)
+
+ version 1.0.13:
+ * ETHTOOL_[G]EEPROM support (Tim Hockin)
+
+ version 1.0.13:
+ * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
+
+ version 1.0.14:
+ * Cleanup some messages and autoneg in ethtool (Tim Hockin)
+
+ version 1.0.15:
+ * Get rid of cable_magic flag
+ * use new (National provided) solution for cable magic issue
+
+ version 1.0.16:
+ * call netdev_rx() for RxErrors (Manfred Spraul)
+ * formatting and cleanups
+ * change options and full_duplex arrays to be zero
+ initialized
+ * enable only the WoL and PHY interrupts in wol mode
+
+ version 1.0.17:
+ * only do cable_magic on 83815 and early 83816 (Tim Hockin)
+ * create a function for rx refill (Manfred Spraul)
+ * combine drain_ring and init_ring (Manfred Spraul)
+ * oom handling (Manfred Spraul)
+ * hands_off instead of playing with netif_device_{de,a}ttach
+ (Manfred Spraul)
+ * be sure to write the MAC back to the chip (Manfred Spraul)
+ * lengthen EEPROM timeout, and always warn about timeouts
+ (Manfred Spraul)
+ * comments update (Manfred)
+ * do the right thing on a phy-reset (Manfred and Tim)
+
+ TODO:
+ * big endian support with CFG:BEM instead of cpu_to_le32
+ * support for an external PHY
+ * NAPI
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define DRV_NAME "natsemi"
+#define DRV_VERSION "1.07+LK1.0.17"
+#define DRV_RELDATE "Sep 27, 2002"
+
+#define RX_OFFSET 2
+
+/* Updated to recommendations in pci-skeleton v2.03. */
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_WOL | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+static int debug = -1;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+static int mtu;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 512 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 100;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define NATSEMI_HW_TIMEOUT 400
+#define NATSEMI_TIMER_FREQ 3*HZ
+#define NATSEMI_PG0_NREGS 64
+#define NATSEMI_RFDR_NREGS 8
+#define NATSEMI_PG1_NREGS 4
+#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
+ NATSEMI_PG1_NREGS)
+#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
+#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
+#define NATSEMI_EEPROM_SIZE 24 /* 12 16-bit values */
+
+/* Buffer sizes:
+ * The nic writes 32-bit values, even if the upper bytes of
+ * a 32-bit value are beyond the end of the buffer.
+ */
+#define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */
+#define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */
+#define NATSEMI_LONGPKT 1518 /* limit for normal packets */
+#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+ KERN_INFO DRV_NAME " dp8381x driver, version "
+ DRV_VERSION ", " DRV_RELDATE "\n"
+ KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
+ KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
+ KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work,
+ "DP8381x maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
+MODULE_PARM_DESC(debug, "DP8381x default debug level");
+MODULE_PARM_DESC(rx_copybreak,
+ "DP8381x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options,
+ "DP8381x: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
+It also works with other chips in in the DP83810 series.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be valid.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+Most operations are synchronized on the np->lock irq spinlock, except the
+performance critical codepaths:
+
+The rx process only runs in the interrupt handler. Access from outside
+the interrupt handler is only permitted after disable_irq().
+
+The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
+is set, then access is permitted under spin_lock_irq(&np->lock).
+
+Thus configuration functions that want to access everything must call
+ disable_irq(dev->irq);
+ spin_lock_bh(dev->xmit_lock);
+ spin_lock_irq(&np->lock);
+
+IV. Notes
+
+NatSemi PCI network controllers are very uncommon.
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+Datasheet is available from:
+http://www.national.com/pf/DP/DP83815.html
+
+IVc. Errata
+
+None characterised.
+*/
+
+
+
+enum pcistuff {
+ PCI_USES_IO = 0x01,
+ PCI_USES_MEM = 0x02,
+ PCI_USES_MASTER = 0x04,
+ PCI_ADDR0 = 0x08,
+ PCI_ADDR1 = 0x10,
+};
+
+/* MMIO operations required */
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+
+
+/*
+ * Support for fibre connections on Am79C874:
+ * This phy needs a special setup when connected to a fibre cable.
+ * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
+ */
+#define PHYID_AM79C874 0x0022561b
+
+#define MII_MCTRL 0x15 /* mode control register */
+#define MII_FX_SEL 0x0001 /* 100BASE-FX (fiber) */
+#define MII_EN_SCRM 0x0004 /* enable scrambler (tp) */
+
+
+/* array of board data directly indexed by pci_tbl[x].driver_data */
+static struct {
+ const char *name;
+ unsigned long flags;
+} natsemi_pci_info[] __devinitdata = {
+ { "NatSemi DP8381[56]", PCI_IOTYPE },
+};
+
+static struct pci_device_id natsemi_pci_tbl[] = {
+ { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device.
+*/
+enum register_offsets {
+ ChipCmd = 0x00,
+ ChipConfig = 0x04,
+ EECtrl = 0x08,
+ PCIBusCfg = 0x0C,
+ IntrStatus = 0x10,
+ IntrMask = 0x14,
+ IntrEnable = 0x18,
+ IntrHoldoff = 0x1C, /* DP83816 only */
+ TxRingPtr = 0x20,
+ TxConfig = 0x24,
+ RxRingPtr = 0x30,
+ RxConfig = 0x34,
+ ClkRun = 0x3C,
+ WOLCmd = 0x40,
+ PauseCmd = 0x44,
+ RxFilterAddr = 0x48,
+ RxFilterData = 0x4C,
+ BootRomAddr = 0x50,
+ BootRomData = 0x54,
+ SiliconRev = 0x58,
+ StatsCtrl = 0x5C,
+ StatsData = 0x60,
+ RxPktErrs = 0x60,
+ RxMissed = 0x68,
+ RxCRCErrs = 0x64,
+ BasicControl = 0x80,
+ BasicStatus = 0x84,
+ AnegAdv = 0x90,
+ AnegPeer = 0x94,
+ PhyStatus = 0xC0,
+ MIntrCtrl = 0xC4,
+ MIntrStatus = 0xC8,
+ PhyCtrl = 0xE4,
+
+ /* These are from the spec, around page 78... on a separate table.
+ * The meaning of these registers depend on the value of PGSEL. */
+ PGSEL = 0xCC,
+ PMDCSR = 0xE4,
+ TSTDAT = 0xFC,
+ DSPCFG = 0xF4,
+ SDCFG = 0xF8
+};
+/* the values for the 'magic' registers above (PGSEL=1) */
+#define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
+#define TSTDAT_VAL 0x0
+#define DSPCFG_VAL 0x5040
+#define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
+#define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
+#define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */
+#define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
+
+/* misc PCI space registers */
+enum pci_register_offsets {
+ PCIPM = 0x44,
+};
+
+enum ChipCmd_bits {
+ ChipReset = 0x100,
+ RxReset = 0x20,
+ TxReset = 0x10,
+ RxOff = 0x08,
+ RxOn = 0x04,
+ TxOff = 0x02,
+ TxOn = 0x01,
+};
+
+enum ChipConfig_bits {
+ CfgPhyDis = 0x200,
+ CfgPhyRst = 0x400,
+ CfgExtPhy = 0x1000,
+ CfgAnegEnable = 0x2000,
+ CfgAneg100 = 0x4000,
+ CfgAnegFull = 0x8000,
+ CfgAnegDone = 0x8000000,
+ CfgFullDuplex = 0x20000000,
+ CfgSpeed100 = 0x40000000,
+ CfgLink = 0x80000000,
+};
+
+enum EECtrl_bits {
+ EE_ShiftClk = 0x04,
+ EE_DataIn = 0x01,
+ EE_ChipSelect = 0x08,
+ EE_DataOut = 0x02,
+ MII_Data = 0x10,
+ MII_Write = 0x20,
+ MII_ShiftClk = 0x40,
+};
+
+enum PCIBusCfg_bits {
+ EepromReload = 0x4,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum IntrStatus_bits {
+ IntrRxDone = 0x0001,
+ IntrRxIntr = 0x0002,
+ IntrRxErr = 0x0004,
+ IntrRxEarly = 0x0008,
+ IntrRxIdle = 0x0010,
+ IntrRxOverrun = 0x0020,
+ IntrTxDone = 0x0040,
+ IntrTxIntr = 0x0080,
+ IntrTxErr = 0x0100,
+ IntrTxIdle = 0x0200,
+ IntrTxUnderrun = 0x0400,
+ StatsMax = 0x0800,
+ SWInt = 0x1000,
+ WOLPkt = 0x2000,
+ LinkChange = 0x4000,
+ IntrHighBits = 0x8000,
+ RxStatusFIFOOver = 0x10000,
+ IntrPCIErr = 0xf00000,
+ RxResetDone = 0x1000000,
+ TxResetDone = 0x2000000,
+ IntrAbnormalSummary = 0xCD20,
+};
+
+/*
+ * Default Interrupts:
+ * Rx OK, Rx Packet Error, Rx Overrun,
+ * Tx OK, Tx Packet Error, Tx Underrun,
+ * MIB Service, Phy Interrupt, High Bits,
+ * Rx Status FIFO overrun,
+ * Received Target Abort, Received Master Abort,
+ * Signalled System Error, Received Parity Error
+ */
+#define DEFAULT_INTR 0x00f1cd65
+
+enum TxConfig_bits {
+ TxDrthMask = 0x3f,
+ TxFlthMask = 0x3f00,
+ TxMxdmaMask = 0x700000,
+ TxMxdma_512 = 0x0,
+ TxMxdma_4 = 0x100000,
+ TxMxdma_8 = 0x200000,
+ TxMxdma_16 = 0x300000,
+ TxMxdma_32 = 0x400000,
+ TxMxdma_64 = 0x500000,
+ TxMxdma_128 = 0x600000,
+ TxMxdma_256 = 0x700000,
+ TxCollRetry = 0x800000,
+ TxAutoPad = 0x10000000,
+ TxMacLoop = 0x20000000,
+ TxHeartIgn = 0x40000000,
+ TxCarrierIgn = 0x80000000
+};
+
+/*
+ * Tx Configuration:
+ * - 256 byte DMA burst length
+ * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
+ * - 64 bytes initial drain threshold (i.e. begin actual transmission
+ * when 64 byte are in the fifo)
+ * - on tx underruns, increase drain threshold by 64.
+ * - at most use a drain threshold of 1472 bytes: The sum of the fill
+ * threshold and the drain threshold must be less than 2016 bytes.
+ *
+ */
+#define TX_FLTH_VAL ((512/32) << 8)
+#define TX_DRTH_VAL_START (64/32)
+#define TX_DRTH_VAL_INC 2
+#define TX_DRTH_VAL_LIMIT (1472/32)
+
+enum RxConfig_bits {
+ RxDrthMask = 0x3e,
+ RxMxdmaMask = 0x700000,
+ RxMxdma_512 = 0x0,
+ RxMxdma_4 = 0x100000,
+ RxMxdma_8 = 0x200000,
+ RxMxdma_16 = 0x300000,
+ RxMxdma_32 = 0x400000,
+ RxMxdma_64 = 0x500000,
+ RxMxdma_128 = 0x600000,
+ RxMxdma_256 = 0x700000,
+ RxAcceptLong = 0x8000000,
+ RxAcceptTx = 0x10000000,
+ RxAcceptRunt = 0x40000000,
+ RxAcceptErr = 0x80000000
+};
+#define RX_DRTH_VAL (128/8)
+
+enum ClkRun_bits {
+ PMEEnable = 0x100,
+ PMEStatus = 0x8000,
+};
+
+enum WolCmd_bits {
+ WakePhy = 0x1,
+ WakeUnicast = 0x2,
+ WakeMulticast = 0x4,
+ WakeBroadcast = 0x8,
+ WakeArp = 0x10,
+ WakePMatch0 = 0x20,
+ WakePMatch1 = 0x40,
+ WakePMatch2 = 0x80,
+ WakePMatch3 = 0x100,
+ WakeMagic = 0x200,
+ WakeMagicSecure = 0x400,
+ SecureHack = 0x100000,
+ WokePhy = 0x400000,
+ WokeUnicast = 0x800000,
+ WokeMulticast = 0x1000000,
+ WokeBroadcast = 0x2000000,
+ WokeArp = 0x4000000,
+ WokePMatch0 = 0x8000000,
+ WokePMatch1 = 0x10000000,
+ WokePMatch2 = 0x20000000,
+ WokePMatch3 = 0x40000000,
+ WokeMagic = 0x80000000,
+ WakeOptsSummary = 0x7ff
+};
+
+enum RxFilterAddr_bits {
+ RFCRAddressMask = 0x3ff,
+ AcceptMulticast = 0x00200000,
+ AcceptMyPhys = 0x08000000,
+ AcceptAllPhys = 0x10000000,
+ AcceptAllMulticast = 0x20000000,
+ AcceptBroadcast = 0x40000000,
+ RxFilterEnable = 0x80000000
+};
+
+enum StatsCtrl_bits {
+ StatsWarn = 0x1,
+ StatsFreeze = 0x2,
+ StatsClear = 0x4,
+ StatsStrobe = 0x8,
+};
+
+enum MIntrCtrl_bits {
+ MICRIntEn = 0x2,
+};
+
+enum PhyCtrl_bits {
+ PhyAddrMask = 0x1f,
+};
+
+#define PHY_ADDR_NONE 32
+#define PHY_ADDR_INTERNAL 1
+
+/* values we might find in the silicon revision register */
+#define SRR_DP83815_C 0x0302
+#define SRR_DP83815_D 0x0403
+#define SRR_DP83816_A4 0x0504
+#define SRR_DP83816_A5 0x0505
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ u32 next_desc;
+ s32 cmd_status;
+ u32 addr;
+ u32 software_use;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+ DescNoCRC=0x10000000, DescPktOK=0x08000000,
+ DescSizeMask=0xfff,
+
+ DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
+ DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
+ DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
+ DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
+
+ DescRxAbort=0x04000000, DescRxOver=0x02000000,
+ DescRxDest=0x01800000, DescRxLong=0x00400000,
+ DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
+ DescRxCRC=0x00080000, DescRxAlign=0x00040000,
+ DescRxLoop=0x00020000, DesRxColl=0x00010000,
+};
+
+struct netdev_private {
+ /* Descriptor rings first for alignment */
+ dma_addr_t ring_dma;
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ /* The addresses of receive-in-place skbuffs */
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ dma_addr_t rx_dma[RX_RING_SIZE];
+ /* address of a sent-in-place packet/buffer, for later free() */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_dma[TX_RING_SIZE];
+ struct net_device_stats stats;
+ /* Media monitoring timer */
+ struct timer_list timer;
+ /* Frequently used values: keep some adjacent for cache effect */
+ struct pci_dev *pci_dev;
+ struct netdev_desc *rx_head_desc;
+ /* Producer/consumer ring indices */
+ unsigned int cur_rx, dirty_rx;
+ unsigned int cur_tx, dirty_tx;
+ /* Based on MTU+slack. */
+ unsigned int rx_buf_sz;
+ int oom;
+ /* Do not touch the nic registers */
+ int hands_off;
+ /* external phy that is used: only valid if dev->if_port != PORT_TP */
+ int mii;
+ int phy_addr_external;
+ unsigned int full_duplex;
+ /* Rx filter */
+ u32 cur_rx_mode;
+ u32 rx_filter[16];
+ /* FIFO and PCI burst thresholds */
+ u32 tx_config, rx_config;
+ /* original contents of ClkRun register */
+ u32 SavedClkRun;
+ /* silicon revision */
+ u32 srr;
+ /* expected DSPCFG value */
+ u16 dspcfg;
+ /* parms saved in ethtool format */
+ u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
+ u8 duplex; /* Duplex, half or full */
+ u8 autoneg; /* Autonegotiation enabled */
+ /* MII transceiver section */
+ u16 advertising;
+ unsigned int iosize;
+ spinlock_t lock;
+ u32 msg_enable;
+};
+
+static void move_int_phy(struct net_device *dev, int addr);
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int reg);
+static void mdio_write(struct net_device *dev, int reg, u16 data);
+static void init_phy_fixup(struct net_device *dev);
+static int miiport_read(struct net_device *dev, int phy_id, int reg);
+static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
+static int find_mii(struct net_device *dev);
+static void natsemi_reset(struct net_device *dev);
+static void natsemi_reload_eeprom(struct net_device *dev);
+static void natsemi_stop_rxtx(struct net_device *dev);
+static int netdev_open(struct net_device *dev);
+static void do_cable_magic(struct net_device *dev);
+static void undo_cable_magic(struct net_device *dev);
+static void check_link(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void dump_ring(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+static int alloc_ring(struct net_device *dev);
+static void refill_rx(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static void drain_tx(struct net_device *dev);
+static void drain_ring(struct net_device *dev);
+static void free_ring(struct net_device *dev);
+static void reinit_ring(struct net_device *dev);
+static void init_registers(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void netdev_rx(struct net_device *dev);
+static void netdev_tx_done(struct net_device *dev);
+static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void natsemi_poll_controller(struct net_device *dev);
+#endif
+static void __set_rx_mode(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void __get_stats(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_set_wol(struct net_device *dev, u32 newval);
+static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
+static int netdev_set_sopass(struct net_device *dev, u8 *newval);
+static int netdev_get_sopass(struct net_device *dev, u8 *data);
+static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
+static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
+static void enable_wol_mode(struct net_device *dev, int enable_intr);
+static int netdev_close(struct net_device *dev);
+static int netdev_get_regs(struct net_device *dev, u8 *buf);
+static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
+static struct ethtool_ops ethtool_ops;
+
+static inline void __iomem *ns_ioaddr(struct net_device *dev)
+{
+ return (void __iomem *) dev->base_addr;
+}
+
+static void move_int_phy(struct net_device *dev, int addr)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int target = 31;
+
+ /*
+ * The internal phy is visible on the external mii bus. Therefore we must
+ * move it away before we can send commands to an external phy.
+ * There are two addresses we must avoid:
+ * - the address on the external phy that is used for transmission.
+ * - the address that we want to access. User space can access phys
+ * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the
+ * phy that is used for transmission.
+ */
+
+ if (target == addr)
+ target--;
+ if (target == np->phy_addr_external)
+ target--;
+ writew(target, ioaddr + PhyCtrl);
+ readw(ioaddr + PhyCtrl);
+ udelay(1);
+}
+
+static int __devinit natsemi_probe1 (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ int i, option, irq, chip_idx = ent->driver_data;
+ static int find_cnt = -1;
+ unsigned long iostart, iosize;
+ void __iomem *ioaddr;
+ const int pcibar = 1; /* PCI base address register */
+ int prev_eedata;
+ u32 tmp;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+
+ /* natsemi has a non-standard PM control register
+ * in PCI config space. Some boards apparently need
+ * to be brought to D0 in this manner.
+ */
+ pci_read_config_dword(pdev, PCIPM, &tmp);
+ if (tmp & PCI_PM_CTRL_STATE_MASK) {
+ /* D0 state, disable PME assertion */
+ u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
+ pci_write_config_dword(pdev, PCIPM, newtmp);
+ }
+
+ find_cnt++;
+ iostart = pci_resource_start(pdev, pcibar);
+ iosize = pci_resource_len(pdev, pcibar);
+ irq = pdev->irq;
+
+ if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
+ pci_set_master(pdev);
+
+ dev = alloc_etherdev(sizeof (struct netdev_private));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ i = pci_request_regions(pdev, DRV_NAME);
+ if (i)
+ goto err_pci_request_regions;
+
+ ioaddr = ioremap(iostart, iosize);
+ if (!ioaddr) {
+ i = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ /* Work around the dropped serial bit. */
+ prev_eedata = eeprom_read(ioaddr, 6);
+ for (i = 0; i < 3; i++) {
+ int eedata = eeprom_read(ioaddr, i + 7);
+ dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ dev->dev_addr[i*2+1] = eedata >> 7;
+ prev_eedata = eedata;
+ }
+
+ dev->base_addr = (unsigned long __force) ioaddr;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+
+ np->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+ np->iosize = iosize;
+ spin_lock_init(&np->lock);
+ np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
+ np->hands_off = 0;
+
+ /* Initial port:
+ * - If the nic was configured to use an external phy and if find_mii
+ * finds a phy: use external port, first phy that replies.
+ * - Otherwise: internal port.
+ * Note that the phy address for the internal phy doesn't matter:
+ * The address would be used to access a phy over the mii bus, but
+ * the internal phy is accessed through mapped registers.
+ */
+ if (readl(ioaddr + ChipConfig) & CfgExtPhy)
+ dev->if_port = PORT_MII;
+ else
+ dev->if_port = PORT_TP;
+ /* Reset the chip to erase previous misconfiguration. */
+ natsemi_reload_eeprom(dev);
+ natsemi_reset(dev);
+
+ if (dev->if_port != PORT_TP) {
+ np->phy_addr_external = find_mii(dev);
+ if (np->phy_addr_external == PHY_ADDR_NONE) {
+ dev->if_port = PORT_TP;
+ np->phy_addr_external = PHY_ADDR_INTERNAL;
+ }
+ } else {
+ np->phy_addr_external = PHY_ADDR_INTERNAL;
+ }
+
+ option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option) {
+ if (option & 0x200)
+ np->full_duplex = 1;
+ if (option & 15)
+ printk(KERN_INFO
+ "natsemi %s: ignoring user supplied media type %d",
+ pci_name(np->pci_dev), option & 15);
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
+ np->full_duplex = 1;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->change_mtu = &natsemi_change_mtu;
+ dev->do_ioctl = &netdev_ioctl;
+ dev->tx_timeout = &tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &natsemi_poll_controller;
+#endif
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+
+ if (mtu)
+ dev->mtu = mtu;
+
+ netif_carrier_off(dev);
+
+ /* get the initial settings from hardware */
+ tmp = mdio_read(dev, MII_BMCR);
+ np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
+ np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
+ np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
+ np->advertising= mdio_read(dev, MII_ADVERTISE);
+
+ if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
+ && netif_msg_probe(np)) {
+ printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
+ "10%s %s duplex.\n",
+ pci_name(np->pci_dev),
+ (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
+ "enabled, advertise" : "disabled, force",
+ (np->advertising &
+ (ADVERTISE_100FULL|ADVERTISE_100HALF))?
+ "0" : "",
+ (np->advertising &
+ (ADVERTISE_100FULL|ADVERTISE_10FULL))?
+ "full" : "half");
+ }
+ if (netif_msg_probe(np))
+ printk(KERN_INFO
+ "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
+ pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
+ np->advertising);
+
+ /* save the silicon revision for later querying */
+ np->srr = readl(ioaddr + SiliconRev);
+ if (netif_msg_hw(np))
+ printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
+ pci_name(np->pci_dev), np->srr);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_register_netdev;
+
+ if (netif_msg_drv(np)) {
+ printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ",
+ dev->name, natsemi_pci_info[chip_idx].name, iostart,
+ pci_name(np->pci_dev));
+ for (i = 0; i < ETH_ALEN-1; i++)
+ printk("%02x:", dev->dev_addr[i]);
+ printk("%02x, IRQ %d", dev->dev_addr[i], irq);
+ if (dev->if_port == PORT_TP)
+ printk(", port TP.\n");
+ else
+ printk(", port MII, phy ad %d.\n", np->phy_addr_external);
+ }
+ return 0;
+
+ err_register_netdev:
+ iounmap(ioaddr);
+
+ err_ioremap:
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ err_pci_request_regions:
+ free_netdev(dev);
+ return i;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
+ a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
+ made udelay() unreliable.
+ The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
+ depricated.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(void __iomem *addr, int location)
+{
+ int i;
+ int retval = 0;
+ void __iomem *ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+
+ writel(EE_Write0, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 0; i < 16; i++) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_Write0, ee_addr);
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ * The 83815 series has an internal transceiver, and we present the
+ * internal management registers as if they were MII connected.
+ * External Phy registers are referenced through the MII interface.
+ */
+
+/* clock transitions >= 20ns (25MHz)
+ * One readl should be good to PCI @ 100MHz
+ */
+#define mii_delay(ioaddr) readl(ioaddr + EECtrl)
+
+static int mii_getbit (struct net_device *dev)
+{
+ int data;
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ writel(MII_ShiftClk, ioaddr + EECtrl);
+ data = readl(ioaddr + EECtrl);
+ writel(0, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+ return (data & MII_Data)? 1 : 0;
+}
+
+static void mii_send_bits (struct net_device *dev, u32 data, int len)
+{
+ u32 i;
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ for (i = (1 << (len-1)); i; i >>= 1)
+ {
+ u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
+ writel(mdio_val, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+ writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+ }
+ writel(0, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+}
+
+static int miiport_read(struct net_device *dev, int phy_id, int reg)
+{
+ u32 cmd;
+ int i;
+ u32 retval = 0;
+
+ /* Ensure sync */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP = 0110'b for read operation */
+ cmd = (0x06 << 10) | (phy_id << 5) | reg;
+ mii_send_bits (dev, cmd, 14);
+ /* Turnaround */
+ if (mii_getbit (dev))
+ return 0;
+ /* Read data */
+ for (i = 0; i < 16; i++) {
+ retval <<= 1;
+ retval |= mii_getbit (dev);
+ }
+ /* End cycle */
+ mii_getbit (dev);
+ return retval;
+}
+
+static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
+{
+ u32 cmd;
+
+ /* Ensure sync */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
+ cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
+ mii_send_bits (dev, cmd, 32);
+ /* End cycle */
+ mii_getbit (dev);
+}
+
+static int mdio_read(struct net_device *dev, int reg)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ /* The 83815 series has two ports:
+ * - an internal transceiver
+ * - an external mii bus
+ */
+ if (dev->if_port == PORT_TP)
+ return readw(ioaddr+BasicControl+(reg<<2));
+ else
+ return miiport_read(dev, np->phy_addr_external, reg);
+}
+
+static void mdio_write(struct net_device *dev, int reg, u16 data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ /* The 83815 series has an internal transceiver; handle separately */
+ if (dev->if_port == PORT_TP)
+ writew(data, ioaddr+BasicControl+(reg<<2));
+ else
+ miiport_write(dev, np->phy_addr_external, reg, data);
+}
+
+static void init_phy_fixup(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int i;
+ u32 cfg;
+ u16 tmp;
+
+ /* restore stuff lost when power was out */
+ tmp = mdio_read(dev, MII_BMCR);
+ if (np->autoneg == AUTONEG_ENABLE) {
+ /* renegotiate if something changed */
+ if ((tmp & BMCR_ANENABLE) == 0
+ || np->advertising != mdio_read(dev, MII_ADVERTISE))
+ {
+ /* turn on autonegotiation and force negotiation */
+ tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mdio_write(dev, MII_ADVERTISE, np->advertising);
+ }
+ } else {
+ /* turn off auto negotiation, set speed and duplexity */
+ tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (np->speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (np->duplex == DUPLEX_FULL)
+ tmp |= BMCR_FULLDPLX;
+ /*
+ * Note: there is no good way to inform the link partner
+ * that our capabilities changed. The user has to unplug
+ * and replug the network cable after some changes, e.g.
+ * after switching from 10HD, autoneg off to 100 HD,
+ * autoneg off.
+ */
+ }
+ mdio_write(dev, MII_BMCR, tmp);
+ readl(ioaddr + ChipConfig);
+ udelay(1);
+
+ /* find out what phy this is */
+ np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
+ + mdio_read(dev, MII_PHYSID2);
+
+ /* handle external phys here */
+ switch (np->mii) {
+ case PHYID_AM79C874:
+ /* phy specific configuration for fibre/tp operation */
+ tmp = mdio_read(dev, MII_MCTRL);
+ tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
+ if (dev->if_port == PORT_FIBRE)
+ tmp |= MII_FX_SEL;
+ else
+ tmp |= MII_EN_SCRM;
+ mdio_write(dev, MII_MCTRL, tmp);
+ break;
+ default:
+ break;
+ }
+ cfg = readl(ioaddr + ChipConfig);
+ if (cfg & CfgExtPhy)
+ return;
+
+ /* On page 78 of the spec, they recommend some settings for "optimum
+ performance" to be done in sequence. These settings optimize some
+ of the 100Mbit autodetection circuitry. They say we only want to
+ do this for rev C of the chip, but engineers at NSC (Bradley
+ Kennedy) recommends always setting them. If you don't, you get
+ errors on some autonegotiations that make the device unusable.
+
+ It seems that the DSP needs a few usec to reinitialize after
+ the start of the phy. Just retry writing these values until they
+ stick.
+ */
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+
+ int dspcfg;
+ writew(1, ioaddr + PGSEL);
+ writew(PMDCSR_VAL, ioaddr + PMDCSR);
+ writew(TSTDAT_VAL, ioaddr + TSTDAT);
+ np->dspcfg = (np->srr <= SRR_DP83815_C)?
+ DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
+ writew(np->dspcfg, ioaddr + DSPCFG);
+ writew(SDCFG_VAL, ioaddr + SDCFG);
+ writew(0, ioaddr + PGSEL);
+ readl(ioaddr + ChipConfig);
+ udelay(10);
+
+ writew(1, ioaddr + PGSEL);
+ dspcfg = readw(ioaddr + DSPCFG);
+ writew(0, ioaddr + PGSEL);
+ if (np->dspcfg == dspcfg)
+ break;
+ }
+
+ if (netif_msg_link(np)) {
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_INFO
+ "%s: DSPCFG mismatch after retrying for %d usec.\n",
+ dev->name, i*10);
+ } else {
+ printk(KERN_INFO
+ "%s: DSPCFG accepted after %d usec.\n",
+ dev->name, i*10);
+ }
+ }
+ /*
+ * Enable PHY Specific event based interrupts. Link state change
+ * and Auto-Negotiation Completion are among the affected.
+ * Read the intr status to clear it (needed for wake events).
+ */
+ readw(ioaddr + MIntrStatus);
+ writew(MICRIntEn, ioaddr + MIntrCtrl);
+}
+
+static int switch_port_external(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ u32 cfg;
+
+ cfg = readl(ioaddr + ChipConfig);
+ if (cfg & CfgExtPhy)
+ return 0;
+
+ if (netif_msg_link(np)) {
+ printk(KERN_INFO "%s: switching to external transceiver.\n",
+ dev->name);
+ }
+
+ /* 1) switch back to external phy */
+ writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
+ readl(ioaddr + ChipConfig);
+ udelay(1);
+
+ /* 2) reset the external phy: */
+ /* resetting the external PHY has been known to cause a hub supplying
+ * power over Ethernet to kill the power. We don't want to kill
+ * power to this computer, so we avoid resetting the phy.
+ */
+
+ /* 3) reinit the phy fixup, it got lost during power down. */
+ move_int_phy(dev, np->phy_addr_external);
+ init_phy_fixup(dev);
+
+ return 1;
+}
+
+static int switch_port_internal(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int i;
+ u32 cfg;
+ u16 bmcr;
+
+ cfg = readl(ioaddr + ChipConfig);
+ if (!(cfg &CfgExtPhy))
+ return 0;
+
+ if (netif_msg_link(np)) {
+ printk(KERN_INFO "%s: switching to internal transceiver.\n",
+ dev->name);
+ }
+ /* 1) switch back to internal phy: */
+ cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
+ writel(cfg, ioaddr + ChipConfig);
+ readl(ioaddr + ChipConfig);
+ udelay(1);
+
+ /* 2) reset the internal phy: */
+ bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
+ writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
+ readl(ioaddr + ChipConfig);
+ udelay(10);
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
+ if (!(bmcr & BMCR_RESET))
+ break;
+ udelay(10);
+ }
+ if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
+ printk(KERN_INFO
+ "%s: phy reset did not complete in %d usec.\n",
+ dev->name, i*10);
+ }
+ /* 3) reinit the phy fixup, it got lost during power down. */
+ init_phy_fixup(dev);
+
+ return 1;
+}
+
+/* Scan for a PHY on the external mii bus.
+ * There are two tricky points:
+ * - Do not scan while the internal phy is enabled. The internal phy will
+ * crash: e.g. reads from the DSPCFG register will return odd values and
+ * the nasty random phy reset code will reset the nic every few seconds.
+ * - The internal phy must be moved around, an external phy could
+ * have the same address as the internal phy.
+ */
+static int find_mii(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int tmp;
+ int i;
+ int did_switch;
+
+ /* Switch to external phy */
+ did_switch = switch_port_external(dev);
+
+ /* Scan the possible phy addresses:
+ *
+ * PHY address 0 means that the phy is in isolate mode. Not yet
+ * supported due to lack of test hardware. User space should
+ * handle it through ethtool.
+ */
+ for (i = 1; i <= 31; i++) {
+ move_int_phy(dev, i);
+ tmp = miiport_read(dev, i, MII_BMSR);
+ if (tmp != 0xffff && tmp != 0x0000) {
+ /* found something! */
+ np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
+ + mdio_read(dev, MII_PHYSID2);
+ if (netif_msg_probe(np)) {
+ printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
+ pci_name(np->pci_dev), np->mii, i);
+ }
+ break;
+ }
+ }
+ /* And switch back to internal phy: */
+ if (did_switch)
+ switch_port_internal(dev);
+ return i;
+}
+
+/* CFG bits [13:16] [18:23] */
+#define CFG_RESET_SAVE 0xfde000
+/* WCSR bits [0:4] [9:10] */
+#define WCSR_RESET_SAVE 0x61f
+/* RFCR bits [20] [22] [27:31] */
+#define RFCR_RESET_SAVE 0xf8500000;
+
+static void natsemi_reset(struct net_device *dev)
+{
+ int i;
+ u32 cfg;
+ u32 wcsr;
+ u32 rfcr;
+ u16 pmatch[3];
+ u16 sopass[3];
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ /*
+ * Resetting the chip causes some registers to be lost.
+ * Natsemi suggests NOT reloading the EEPROM while live, so instead
+ * we save the state that would have been loaded from EEPROM
+ * on a normal power-up (see the spec EEPROM map). This assumes
+ * whoever calls this will follow up with init_registers() eventually.
+ */
+
+ /* CFG */
+ cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
+ /* WCSR */
+ wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
+ /* RFCR */
+ rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
+ /* PMATCH */
+ for (i = 0; i < 3; i++) {
+ writel(i*2, ioaddr + RxFilterAddr);
+ pmatch[i] = readw(ioaddr + RxFilterData);
+ }
+ /* SOPAS */
+ for (i = 0; i < 3; i++) {
+ writel(0xa+(i*2), ioaddr + RxFilterAddr);
+ sopass[i] = readw(ioaddr + RxFilterData);
+ }
+
+ /* now whack the chip */
+ writel(ChipReset, ioaddr + ChipCmd);
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ if (!(readl(ioaddr + ChipCmd) & ChipReset))
+ break;
+ udelay(5);
+ }
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
+ dev->name, i*5);
+ } else if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
+ dev->name, i*5);
+ }
+
+ /* restore CFG */
+ cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
+ /* turn on external phy if it was selected */
+ if (dev->if_port == PORT_TP)
+ cfg &= ~(CfgExtPhy | CfgPhyDis);
+ else
+ cfg |= (CfgExtPhy | CfgPhyDis);
+ writel(cfg, ioaddr + ChipConfig);
+ /* restore WCSR */
+ wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
+ writel(wcsr, ioaddr + WOLCmd);
+ /* read RFCR */
+ rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
+ /* restore PMATCH */
+ for (i = 0; i < 3; i++) {
+ writel(i*2, ioaddr + RxFilterAddr);
+ writew(pmatch[i], ioaddr + RxFilterData);
+ }
+ for (i = 0; i < 3; i++) {
+ writel(0xa+(i*2), ioaddr + RxFilterAddr);
+ writew(sopass[i], ioaddr + RxFilterData);
+ }
+ /* restore RFCR */
+ writel(rfcr, ioaddr + RxFilterAddr);
+}
+
+static void natsemi_reload_eeprom(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int i;
+
+ writel(EepromReload, ioaddr + PCIBusCfg);
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ udelay(50);
+ if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
+ break;
+ }
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
+ pci_name(np->pci_dev), i*50);
+ } else if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
+ pci_name(np->pci_dev), i*50);
+ }
+}
+
+static void natsemi_stop_rxtx(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+ for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
+ if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
+ break;
+ udelay(5);
+ }
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
+ dev->name, i*5);
+ } else if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
+ dev->name, i*5);
+ }
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int i;
+
+ /* Reset the chip, just in case. */
+ natsemi_reset(dev);
+
+ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
+ if (i) return i;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+ i = alloc_ring(dev);
+ if (i < 0) {
+ free_irq(dev->irq, dev);
+ return i;
+ }
+ init_ring(dev);
+ spin_lock_irq(&np->lock);
+ init_registers(dev);
+ /* now set the MAC address according to dev->dev_addr */
+ for (i = 0; i < 3; i++) {
+ u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
+
+ writel(i*2, ioaddr + RxFilterAddr);
+ writew(mac, ioaddr + RxFilterData);
+ }
+ writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
+ spin_unlock_irq(&np->lock);
+
+ netif_start_queue(dev);
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + NATSEMI_TIMER_FREQ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void do_cable_magic(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ if (dev->if_port != PORT_TP)
+ return;
+
+ if (np->srr >= SRR_DP83816_A5)
+ return;
+
+ /*
+ * 100 MBit links with short cables can trip an issue with the chip.
+ * The problem manifests as lots of CRC errors and/or flickering
+ * activity LED while idle. This process is based on instructions
+ * from engineers at National.
+ */
+ if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
+ u16 data;
+
+ writew(1, ioaddr + PGSEL);
+ /*
+ * coefficient visibility should already be enabled via
+ * DSPCFG | 0x1000
+ */
+ data = readw(ioaddr + TSTDAT) & 0xff;
+ /*
+ * the value must be negative, and within certain values
+ * (these values all come from National)
+ */
+ if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* the bug has been triggered - fix the coefficient */
+ writew(TSTDAT_FIXED, ioaddr + TSTDAT);
+ /* lock the value */
+ data = readw(ioaddr + DSPCFG);
+ np->dspcfg = data | DSPCFG_LOCK;
+ writew(np->dspcfg, ioaddr + DSPCFG);
+ }
+ writew(0, ioaddr + PGSEL);
+ }
+}
+
+static void undo_cable_magic(struct net_device *dev)
+{
+ u16 data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ if (dev->if_port != PORT_TP)
+ return;
+
+ if (np->srr >= SRR_DP83816_A5)
+ return;
+
+ writew(1, ioaddr + PGSEL);
+ /* make sure the lock bit is clear */
+ data = readw(ioaddr + DSPCFG);
+ np->dspcfg = data & ~DSPCFG_LOCK;
+ writew(np->dspcfg, ioaddr + DSPCFG);
+ writew(0, ioaddr + PGSEL);
+}
+
+static void check_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int duplex;
+ u16 bmsr;
+
+ /* The link status field is latched: it remains low after a temporary
+ * link failure until it's read. We need the current link status,
+ * thus read twice.
+ */
+ mdio_read(dev, MII_BMSR);
+ bmsr = mdio_read(dev, MII_BMSR);
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ if (netif_carrier_ok(dev)) {
+ if (netif_msg_link(np))
+ printk(KERN_NOTICE "%s: link down.\n",
+ dev->name);
+ netif_carrier_off(dev);
+ undo_cable_magic(dev);
+ }
+ return;
+ }
+ if (!netif_carrier_ok(dev)) {
+ if (netif_msg_link(np))
+ printk(KERN_NOTICE "%s: link up.\n", dev->name);
+ netif_carrier_on(dev);
+ do_cable_magic(dev);
+ }
+
+ duplex = np->full_duplex;
+ if (!duplex) {
+ if (bmsr & BMSR_ANEGCOMPLETE) {
+ int tmp = mii_nway_result(
+ np->advertising & mdio_read(dev, MII_LPA));
+ if (tmp == LPA_100FULL || tmp == LPA_10FULL)
+ duplex = 1;
+ } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
+ duplex = 1;
+ }
+
+ /* if duplex is set then bit 28 must be set, too */
+ if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
+ if (netif_msg_link(np))
+ printk(KERN_INFO
+ "%s: Setting %s-duplex based on negotiated "
+ "link capability.\n", dev->name,
+ duplex ? "full" : "half");
+ if (duplex) {
+ np->rx_config |= RxAcceptTx;
+ np->tx_config |= TxCarrierIgn | TxHeartIgn;
+ } else {
+ np->rx_config &= ~RxAcceptTx;
+ np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ }
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ init_phy_fixup(dev);
+
+ /* clear any interrupts that are pending, such as wake events */
+ readl(ioaddr + IntrStatus);
+
+ writel(np->ring_dma, ioaddr + RxRingPtr);
+ writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
+ ioaddr + TxRingPtr);
+
+ /* Initialize other registers.
+ * Configure the PCI bus bursts and FIFO thresholds.
+ * Configure for standard, in-spec Ethernet.
+ * Start with half-duplex. check_link will update
+ * to the correct settings.
+ */
+
+ /* DRTH: 2: start tx if 64 bytes are in the fifo
+ * FLTH: 0x10: refill with next packet if 512 bytes are free
+ * MXDMA: 0: up to 256 byte bursts.
+ * MXDMA must be <= FLTH
+ * ECRETRY=1
+ * ATP=1
+ */
+ np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
+ TX_FLTH_VAL | TX_DRTH_VAL_START;
+ writel(np->tx_config, ioaddr + TxConfig);
+
+ /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
+ * MXDMA 0: up to 256 byte bursts
+ */
+ np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
+ /* if receive ring now has bigger buffers than normal, enable jumbo */
+ if (np->rx_buf_sz > NATSEMI_LONGPKT)
+ np->rx_config |= RxAcceptLong;
+
+ writel(np->rx_config, ioaddr + RxConfig);
+
+ /* Disable PME:
+ * The PME bit is initialized from the EEPROM contents.
+ * PCI cards probably have PME disabled, but motherboard
+ * implementations may have PME set to enable WakeOnLan.
+ * With PME set the chip will scan incoming packets but
+ * nothing will be written to memory. */
+ np->SavedClkRun = readl(ioaddr + ClkRun);
+ writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
+ if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
+ printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
+ dev->name, readl(ioaddr + WOLCmd));
+ }
+
+ check_link(dev);
+ __set_rx_mode(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(DEFAULT_INTR, ioaddr + IntrMask);
+ writel(1, ioaddr + IntrEnable);
+
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
+}
+
+/*
+ * netdev_timer:
+ * Purpose:
+ * 1) check for link changes. Usually they are handled by the MII interrupt
+ * but it doesn't hurt to check twice.
+ * 2) check for sudden death of the NIC:
+ * It seems that a reference set for this chip went out with incorrect info,
+ * and there exist boards that aren't quite right. An unexpected voltage
+ * drop can cause the PHY to get itself in a weird state (basically reset).
+ * NOTE: this only seems to affect revC chips.
+ * 3) check of death of the RX path due to OOM
+ */
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int next_tick = 5*HZ;
+
+ if (netif_msg_timer(np)) {
+ /* DO NOT read the IntrStatus register,
+ * a read clears any pending interrupts.
+ */
+ printk(KERN_DEBUG "%s: Media selection timer tick.\n",
+ dev->name);
+ }
+
+ if (dev->if_port == PORT_TP) {
+ u16 dspcfg;
+
+ spin_lock_irq(&np->lock);
+ /* check for a nasty random phy-reset - use dspcfg as a flag */
+ writew(1, ioaddr+PGSEL);
+ dspcfg = readw(ioaddr+DSPCFG);
+ writew(0, ioaddr+PGSEL);
+ if (dspcfg != np->dspcfg) {
+ if (!netif_queue_stopped(dev)) {
+ spin_unlock_irq(&np->lock);
+ if (netif_msg_hw(np))
+ printk(KERN_NOTICE "%s: possible phy reset: "
+ "re-initializing\n", dev->name);
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ natsemi_stop_rxtx(dev);
+ dump_ring(dev);
+ reinit_ring(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+ } else {
+ /* hurry back */
+ next_tick = HZ;
+ spin_unlock_irq(&np->lock);
+ }
+ } else {
+ /* init_registers() calls check_link() for the above case */
+ check_link(dev);
+ spin_unlock_irq(&np->lock);
+ }
+ } else {
+ spin_lock_irq(&np->lock);
+ check_link(dev);
+ spin_unlock_irq(&np->lock);
+ }
+ if (np->oom) {
+ disable_irq(dev->irq);
+ np->oom = 0;
+ refill_rx(dev);
+ enable_irq(dev->irq);
+ if (!np->oom) {
+ writel(RxOn, ioaddr + ChipCmd);
+ } else {
+ next_tick = 1;
+ }
+ }
+ mod_timer(&np->timer, jiffies + next_tick);
+}
+
+static void dump_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (netif_msg_pktdata(np)) {
+ int i;
+ printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+ i, np->tx_ring[i].next_desc,
+ np->tx_ring[i].cmd_status,
+ np->tx_ring[i].addr);
+ }
+ printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+ i, np->rx_ring[i].next_desc,
+ np->rx_ring[i].cmd_status,
+ np->rx_ring[i].addr);
+ }
+ }
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ if (!np->hands_off) {
+ if (netif_msg_tx_err(np))
+ printk(KERN_WARNING
+ "%s: Transmit timed out, status %#08x,"
+ " resetting...\n",
+ dev->name, readl(ioaddr + IntrStatus));
+ dump_ring(dev);
+
+ natsemi_reset(dev);
+ reinit_ring(dev);
+ init_registers(dev);
+ } else {
+ printk(KERN_WARNING
+ "%s: tx_timeout while in hands_off state?\n",
+ dev->name);
+ }
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static int alloc_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->rx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+ &np->ring_dma);
+ if (!np->rx_ring)
+ return -ENOMEM;
+ np->tx_ring = &np->rx_ring[RX_RING_SIZE];
+ return 0;
+}
+
+static void refill_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ int entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
+ skb = dev_alloc_skb(buflen);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_dma[entry] = pci_map_single(np->pci_dev,
+ skb->tail, buflen, PCI_DMA_FROMDEVICE);
+ np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
+ }
+ np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
+ }
+ if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
+ if (netif_msg_rx_err(np))
+ printk(KERN_WARNING "%s: going OOM.\n", dev->name);
+ np->oom = 1;
+ }
+}
+
+static void set_bufsize(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ if (dev->mtu <= ETH_DATA_LEN)
+ np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
+ else
+ np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* 1) TX ring */
+ np->dirty_tx = np->cur_tx = 0;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+ +sizeof(struct netdev_desc)
+ *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
+ np->tx_ring[i].cmd_status = 0;
+ }
+
+ /* 2) RX ring */
+ np->dirty_rx = 0;
+ np->cur_rx = RX_RING_SIZE;
+ np->oom = 0;
+ set_bufsize(dev);
+
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Please be carefull before changing this loop - at least gcc-2.95.1
+ * miscompiles it otherwise.
+ */
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+ +sizeof(struct netdev_desc)
+ *((i+1)%RX_RING_SIZE));
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+ np->rx_skbuff[i] = NULL;
+ }
+ refill_rx(dev);
+ dump_ring(dev);
+}
+
+static void drain_tx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_dma[i], np->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(np->tx_skbuff[i]);
+ np->stats.tx_dropped++;
+ }
+ np->tx_skbuff[i] = NULL;
+ }
+}
+
+static void drain_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int buflen = np->rx_buf_sz;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].cmd_status = 0;
+ np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->rx_dma[i], buflen,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = NULL;
+ }
+}
+
+static void drain_ring(struct net_device *dev)
+{
+ drain_rx(dev);
+ drain_tx(dev);
+}
+
+static void free_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ pci_free_consistent(np->pci_dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+ np->rx_ring, np->ring_dma);
+}
+
+static void reinit_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* RX Ring */
+ np->dirty_rx = 0;
+ np->cur_rx = RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[0];
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+
+ refill_rx(dev);
+}
+
+static void reinit_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* drain TX ring */
+ drain_tx(dev);
+ np->dirty_tx = np->cur_tx = 0;
+ for (i=0;i<TX_RING_SIZE;i++)
+ np->tx_ring[i].cmd_status = 0;
+
+ reinit_rx(dev);
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ unsigned entry;
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+ np->tx_dma[entry] = pci_map_single(np->pci_dev,
+ skb->data,skb->len, PCI_DMA_TODEVICE);
+
+ np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
+
+ spin_lock_irq(&np->lock);
+
+ if (!np->hands_off) {
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
+ /* StrongARM: Explicitly cache flush np->tx_ring and
+ * skb->data,skb->len. */
+ wmb();
+ np->cur_tx++;
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ netdev_tx_done(dev);
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
+ netif_stop_queue(dev);
+ }
+ /* Wake the potentially-idle transmit channel. */
+ writel(TxOn, ioaddr + ChipCmd);
+ } else {
+ dev_kfree_skb_irq(skb);
+ np->stats.tx_dropped++;
+ }
+ spin_unlock_irq(&np->lock);
+
+ dev->trans_start = jiffies;
+
+ if (netif_msg_tx_queued(np)) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+static void netdev_tx_done(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
+ break;
+ if (netif_msg_tx_done(np))
+ printk(KERN_DEBUG
+ "%s: tx frame #%d finished, status %#08x.\n",
+ dev->name, np->dirty_tx,
+ le32_to_cpu(np->tx_ring[entry].cmd_status));
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
+ np->stats.tx_packets++;
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ } else { /* Various Tx errors */
+ int tx_status =
+ le32_to_cpu(np->tx_ring[entry].cmd_status);
+ if (tx_status & (DescTxAbort|DescTxExcColl))
+ np->stats.tx_aborted_errors++;
+ if (tx_status & DescTxFIFO)
+ np->stats.tx_fifo_errors++;
+ if (tx_status & DescTxCarrier)
+ np->stats.tx_carrier_errors++;
+ if (tx_status & DescTxOOWCol)
+ np->stats.tx_window_errors++;
+ np->stats.tx_errors++;
+ }
+ pci_unmap_single(np->pci_dev,np->tx_dma[entry],
+ np->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ /* Free the original skb. */
+ dev_kfree_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ }
+ if (netif_queue_stopped(dev)
+ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, wake queue. */
+ netif_wake_queue(dev);
+ }
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int boguscnt = max_interrupt_work;
+ unsigned int handled = 0;
+
+ if (np->hands_off)
+ return IRQ_NONE;
+ do {
+ /* Reading automatically acknowledges all int sources. */
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG
+ "%s: Interrupt, status %#08x, mask %#08x.\n",
+ dev->name, intr_status,
+ readl(ioaddr + IntrMask));
+
+ if (intr_status == 0)
+ break;
+ handled = 1;
+
+ if (intr_status &
+ (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
+ IntrRxErr | IntrRxOverrun)) {
+ netdev_rx(dev);
+ }
+
+ if (intr_status &
+ (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
+ spin_lock(&np->lock);
+ netdev_tx_done(dev);
+ spin_unlock(&np->lock);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ if (netif_msg_intr(np))
+ printk(KERN_WARNING
+ "%s: Too much work at interrupt, "
+ "status=%#08x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name);
+
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static void netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ unsigned int buflen = np->rx_buf_sz;
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ /* If the driver owns the next entry it's a new packet. Send it up. */
+ while (desc_status < 0) { /* e.g. & DescOwn */
+ int pkt_len;
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG
+ " netdev_rx() entry %d status was %#08x.\n",
+ entry, desc_status);
+ if (--boguscnt < 0)
+ break;
+ pkt_len = (desc_status & DescSizeMask) - 4;
+ if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
+ if (desc_status & DescMore) {
+ if (netif_msg_rx_err(np))
+ printk(KERN_WARNING
+ "%s: Oversized(?) Ethernet "
+ "frame spanned multiple "
+ "buffers, entry %#08x "
+ "status %#08x.\n", dev->name,
+ np->cur_rx, desc_status);
+ np->stats.rx_length_errors++;
+ } else {
+ /* There was an error. */
+ np->stats.rx_errors++;
+ if (desc_status & (DescRxAbort|DescRxOver))
+ np->stats.rx_over_errors++;
+ if (desc_status & (DescRxLong|DescRxRunt))
+ np->stats.rx_length_errors++;
+ if (desc_status & (DescRxInvalid|DescRxAlign))
+ np->stats.rx_frame_errors++;
+ if (desc_status & DescRxCRC)
+ np->stats.rx_crc_errors++;
+ }
+ } else if (pkt_len > np->rx_buf_sz) {
+ /* if this is the tail of a double buffer
+ * packet, we've already counted the error
+ * on the first part. Ignore the second half.
+ */
+ } else {
+ struct sk_buff *skb;
+ /* Omit CRC size. */
+ /* Check if the packet is long enough to accept
+ * without copying to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
+ skb->dev = dev;
+ /* 16 byte align the IP header */
+ skb_reserve(skb, RX_OFFSET);
+ pci_dma_sync_single_for_cpu(np->pci_dev,
+ np->rx_dma[entry],
+ buflen,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(skb,
+ np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(np->pci_dev,
+ np->rx_dma[entry],
+ buflen,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(np->pci_dev, np->rx_dma[entry],
+ buflen, PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += pkt_len;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ }
+ refill_rx(dev);
+
+ /* Restart Rx engine if stopped. */
+ if (np->oom)
+ mod_timer(&np->timer, jiffies + 1);
+ else
+ writel(RxOn, ioaddr + ChipCmd);
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ spin_lock(&np->lock);
+ if (intr_status & LinkChange) {
+ u16 lpa = mdio_read(dev, MII_LPA);
+ if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE
+ && netif_msg_link(np)) {
+ printk(KERN_INFO
+ "%s: Autonegotiation advertising"
+ " %#04x partner %#04x.\n", dev->name,
+ np->advertising, lpa);
+ }
+
+ /* read MII int status to clear the flag */
+ readw(ioaddr + MIntrStatus);
+ check_link(dev);
+ }
+ if (intr_status & StatsMax) {
+ __get_stats(dev);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
+ np->tx_config += TX_DRTH_VAL_INC;
+ if (netif_msg_tx_err(np))
+ printk(KERN_NOTICE
+ "%s: increased tx threshold, txcfg %#08x.\n",
+ dev->name, np->tx_config);
+ } else {
+ if (netif_msg_tx_err(np))
+ printk(KERN_NOTICE
+ "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
+ dev->name, np->tx_config);
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ }
+ if (intr_status & WOLPkt && netif_msg_wol(np)) {
+ int wol_status = readl(ioaddr + WOLCmd);
+ printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
+ dev->name, wol_status);
+ }
+ if (intr_status & RxStatusFIFOOver) {
+ if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
+ printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
+ dev->name);
+ }
+ np->stats.rx_fifo_errors++;
+ }
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
+ intr_status & IntrPCIErr);
+ np->stats.tx_fifo_errors++;
+ np->stats.rx_fifo_errors++;
+ }
+ spin_unlock(&np->lock);
+}
+
+static void __get_stats(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&np->lock);
+ if (netif_running(dev) && !np->hands_off)
+ __get_stats(dev);
+ spin_unlock_irq(&np->lock);
+
+ return &np->stats;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void natsemi_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ intr_handler(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+#define HASH_TABLE 0x200
+static void __set_rx_mode(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+ u8 mc_filter[64]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ rx_mode = RxFilterEnable | AcceptBroadcast
+ | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ rx_mode = RxFilterEnable | AcceptBroadcast
+ | AcceptAllMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
+ mc_filter[i/8] |= (1 << (i & 0x07));
+ }
+ rx_mode = RxFilterEnable | AcceptBroadcast
+ | AcceptMulticast | AcceptMyPhys;
+ for (i = 0; i < 64; i += 2) {
+ writew(HASH_TABLE + i, ioaddr + RxFilterAddr);
+ writew((mc_filter[i+1]<<8) + mc_filter[i],
+ ioaddr + RxFilterData);
+ }
+ }
+ writel(rx_mode, ioaddr + RxFilterAddr);
+ np->cur_rx_mode = rx_mode;
+}
+
+static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ /* synchronized against open : rtnl_lock() held by caller */
+ if (netif_running(dev)) {
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ disable_irq(dev->irq);
+ spin_lock(&np->lock);
+ /* stop engines */
+ natsemi_stop_rxtx(dev);
+ /* drain rx queue */
+ drain_rx(dev);
+ /* change buffers */
+ set_bufsize(dev);
+ reinit_rx(dev);
+ writel(np->ring_dma, ioaddr + RxRingPtr);
+ /* restart engines */
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ spin_unlock(&np->lock);
+ enable_irq(dev->irq);
+ }
+ return 0;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ if (!np->hands_off)
+ __set_rx_mode(dev);
+ spin_unlock_irq(&np->lock);
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return NATSEMI_REGS_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ return NATSEMI_EEPROM_SIZE;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ netdev_get_ecmd(dev, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = netdev_set_ecmd(dev, ecmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ netdev_get_wol(dev, &wol->supported, &wol->wolopts);
+ netdev_get_sopass(dev, wol->sopass);
+ spin_unlock_irq(&np->lock);
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ netdev_set_wol(dev, wol->wolopts);
+ res = netdev_set_sopass(dev, wol->sopass);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ regs->version = NATSEMI_REGS_VER;
+ spin_lock_irq(&np->lock);
+ netdev_get_regs(dev, buf);
+ spin_unlock_irq(&np->lock);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->msg_enable = val;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ int tmp;
+ int r = -EINVAL;
+ /* if autoneg is off, it's an error */
+ tmp = mdio_read(dev, MII_BMCR);
+ if (tmp & BMCR_ANENABLE) {
+ tmp |= (BMCR_ANRESTART);
+ mdio_write(dev, MII_BMCR, tmp);
+ r = 0;
+ }
+ return r;
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ /* LSTATUS is latched low until a read - so read twice */
+ mdio_read(dev, MII_BMSR);
+ return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
+}
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u8 eebuf[NATSEMI_EEPROM_SIZE];
+ int res;
+
+ eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
+ spin_lock_irq(&np->lock);
+ res = netdev_get_eeprom(dev, eebuf);
+ spin_unlock_irq(&np->lock);
+ if (!res)
+ memcpy(data, eebuf+eeprom->offset, eeprom->len);
+ return res;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = get_drvinfo,
+ .get_regs_len = get_regs_len,
+ .get_eeprom_len = get_eeprom_len,
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_wol = get_wol,
+ .set_wol = set_wol,
+ .get_regs = get_regs,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_eeprom = get_eeprom,
+};
+
+static int netdev_set_wol(struct net_device *dev, u32 newval)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
+
+ /* translate to bitmasks this chip understands */
+ if (newval & WAKE_PHY)
+ data |= WakePhy;
+ if (newval & WAKE_UCAST)
+ data |= WakeUnicast;
+ if (newval & WAKE_MCAST)
+ data |= WakeMulticast;
+ if (newval & WAKE_BCAST)
+ data |= WakeBroadcast;
+ if (newval & WAKE_ARP)
+ data |= WakeArp;
+ if (newval & WAKE_MAGIC)
+ data |= WakeMagic;
+ if (np->srr >= SRR_DP83815_D) {
+ if (newval & WAKE_MAGICSECURE) {
+ data |= WakeMagicSecure;
+ }
+ }
+
+ writel(data, ioaddr + WOLCmd);
+
+ return 0;
+}
+
+static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u32 regval = readl(ioaddr + WOLCmd);
+
+ *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
+ | WAKE_ARP | WAKE_MAGIC);
+
+ if (np->srr >= SRR_DP83815_D) {
+ /* SOPASS works on revD and higher */
+ *supported |= WAKE_MAGICSECURE;
+ }
+ *cur = 0;
+
+ /* translate from chip bitmasks */
+ if (regval & WakePhy)
+ *cur |= WAKE_PHY;
+ if (regval & WakeUnicast)
+ *cur |= WAKE_UCAST;
+ if (regval & WakeMulticast)
+ *cur |= WAKE_MCAST;
+ if (regval & WakeBroadcast)
+ *cur |= WAKE_BCAST;
+ if (regval & WakeArp)
+ *cur |= WAKE_ARP;
+ if (regval & WakeMagic)
+ *cur |= WAKE_MAGIC;
+ if (regval & WakeMagicSecure) {
+ /* this can be on in revC, but it's broken */
+ *cur |= WAKE_MAGICSECURE;
+ }
+
+ return 0;
+}
+
+static int netdev_set_sopass(struct net_device *dev, u8 *newval)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u16 *sval = (u16 *)newval;
+ u32 addr;
+
+ if (np->srr < SRR_DP83815_D) {
+ return 0;
+ }
+
+ /* enable writing to these registers by disabling the RX filter */
+ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
+ addr &= ~RxFilterEnable;
+ writel(addr, ioaddr + RxFilterAddr);
+
+ /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
+ writel(addr | 0xa, ioaddr + RxFilterAddr);
+ writew(sval[0], ioaddr + RxFilterData);
+
+ writel(addr | 0xc, ioaddr + RxFilterAddr);
+ writew(sval[1], ioaddr + RxFilterData);
+
+ writel(addr | 0xe, ioaddr + RxFilterAddr);
+ writew(sval[2], ioaddr + RxFilterData);
+
+ /* re-enable the RX filter */
+ writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
+
+ return 0;
+}
+
+static int netdev_get_sopass(struct net_device *dev, u8 *data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u16 *sval = (u16 *)data;
+ u32 addr;
+
+ if (np->srr < SRR_DP83815_D) {
+ sval[0] = sval[1] = sval[2] = 0;
+ return 0;
+ }
+
+ /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
+ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
+
+ writel(addr | 0xa, ioaddr + RxFilterAddr);
+ sval[0] = readw(ioaddr + RxFilterData);
+
+ writel(addr | 0xc, ioaddr + RxFilterAddr);
+ sval[1] = readw(ioaddr + RxFilterData);
+
+ writel(addr | 0xe, ioaddr + RxFilterAddr);
+ sval[2] = readw(ioaddr + RxFilterData);
+
+ writel(addr, ioaddr + RxFilterAddr);
+
+ return 0;
+}
+
+static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 tmp;
+
+ ecmd->port = dev->if_port;
+ ecmd->speed = np->speed;
+ ecmd->duplex = np->duplex;
+ ecmd->autoneg = np->autoneg;
+ ecmd->advertising = 0;
+ if (np->advertising & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (np->advertising & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (np->advertising & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (np->advertising & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ ecmd->supported = (SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
+ ecmd->phy_address = np->phy_addr_external;
+ /*
+ * We intentionally report the phy address of the external
+ * phy, even if the internal phy is used. This is necessary
+ * to work around a deficiency of the ethtool interface:
+ * It's only possible to query the settings of the active
+ * port. Therefore
+ * # ethtool -s ethX port mii
+ * actually sends an ioctl to switch to port mii with the
+ * settings that are used for the current active port.
+ * If we would report a different phy address in this
+ * command, then
+ * # ethtool -s ethX port tp;ethtool -s ethX port mii
+ * would unintentionally change the phy address.
+ *
+ * Fortunately the phy address doesn't matter with the
+ * internal phy...
+ */
+
+ /* set information based on active port type */
+ switch (ecmd->port) {
+ default:
+ case PORT_TP:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ case PORT_MII:
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case PORT_FIBRE:
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+
+ /* if autonegotiation is on, try to return the active speed/duplex */
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ tmp = mii_nway_result(
+ np->advertising & mdio_read(dev, MII_LPA));
+ if (tmp == LPA_100FULL || tmp == LPA_100HALF)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ if (tmp == LPA_100FULL || tmp == LPA_10FULL)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ }
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
+ return -EINVAL;
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0) {
+ return -EINVAL;
+ }
+ } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ /*
+ * maxtxpkt, maxrxpkt: ignored for now.
+ *
+ * transceiver:
+ * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
+ * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
+ * selects based on ecmd->port.
+ *
+ * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
+ * phys that are connected to the mii bus. It's used to apply fibre
+ * specific updates.
+ */
+
+ /* WHEW! now lets bang some bits */
+
+ /* save the parms */
+ dev->if_port = ecmd->port;
+ np->autoneg = ecmd->autoneg;
+ np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
+ if (np->autoneg == AUTONEG_ENABLE) {
+ /* advertise only what has been requested */
+ np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ np->advertising |= ADVERTISE_10HALF;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ np->advertising |= ADVERTISE_10FULL;
+ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ np->advertising |= ADVERTISE_100HALF;
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ np->advertising |= ADVERTISE_100FULL;
+ } else {
+ np->speed = ecmd->speed;
+ np->duplex = ecmd->duplex;
+ /* user overriding the initial full duplex parm? */
+ if (np->duplex == DUPLEX_HALF)
+ np->full_duplex = 0;
+ }
+
+ /* get the right phy enabled */
+ if (ecmd->port == PORT_TP)
+ switch_port_internal(dev);
+ else
+ switch_port_external(dev);
+
+ /* set parms and see how this affected our link status */
+ init_phy_fixup(dev);
+ check_link(dev);
+ return 0;
+}
+
+static int netdev_get_regs(struct net_device *dev, u8 *buf)
+{
+ int i;
+ int j;
+ u32 rfcr;
+ u32 *rbuf = (u32 *)buf;
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ /* read non-mii page 0 of registers */
+ for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
+ rbuf[i] = readl(ioaddr + i*4);
+ }
+
+ /* read current mii registers */
+ for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
+ rbuf[i] = mdio_read(dev, i & 0x1f);
+
+ /* read only the 'magic' registers from page 1 */
+ writew(1, ioaddr + PGSEL);
+ rbuf[i++] = readw(ioaddr + PMDCSR);
+ rbuf[i++] = readw(ioaddr + TSTDAT);
+ rbuf[i++] = readw(ioaddr + DSPCFG);
+ rbuf[i++] = readw(ioaddr + SDCFG);
+ writew(0, ioaddr + PGSEL);
+
+ /* read RFCR indexed registers */
+ rfcr = readl(ioaddr + RxFilterAddr);
+ for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
+ writel(j*2, ioaddr + RxFilterAddr);
+ rbuf[i++] = readw(ioaddr + RxFilterData);
+ }
+ writel(rfcr, ioaddr + RxFilterAddr);
+
+ /* the interrupt status is clear-on-read - see if we missed any */
+ if (rbuf[4] & rbuf[5]) {
+ printk(KERN_WARNING
+ "%s: shoot, we dropped an interrupt (%#08x)\n",
+ dev->name, rbuf[4] & rbuf[5]);
+ }
+
+ return 0;
+}
+
+#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
+ | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
+ | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
+ | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
+ | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
+ | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
+ | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
+ | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
+
+static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
+{
+ int i;
+ u16 *ebuf = (u16 *)buf;
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ /* eeprom_read reads 16 bits, and indexes by 16 bits */
+ for (i = 0; i < NATSEMI_EEPROM_SIZE/2; i++) {
+ ebuf[i] = eeprom_read(ioaddr, i);
+ /* The EEPROM itself stores data bit-swapped, but eeprom_read
+ * reads it back "sanely". So we swap it back here in order to
+ * present it to userland as it is stored. */
+ ebuf[i] = SWAP_BITS(ebuf[i]);
+ }
+ return 0;
+}
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(rq);
+ struct netdev_private *np = netdev_priv(dev);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
+ data->phy_id = np->phy_addr_external;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
+ /* The phy_id is not enough to uniquely identify
+ * the intended target. Therefore the command is sent to
+ * the given mii on the current port.
+ */
+ if (dev->if_port == PORT_TP) {
+ if ((data->phy_id & 0x1f) == np->phy_addr_external)
+ data->val_out = mdio_read(dev,
+ data->reg_num & 0x1f);
+ else
+ data->val_out = 0;
+ } else {
+ move_int_phy(dev, data->phy_id & 0x1f);
+ data->val_out = miiport_read(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f);
+ }
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (dev->if_port == PORT_TP) {
+ if ((data->phy_id & 0x1f) == np->phy_addr_external) {
+ if ((data->reg_num & 0x1f) == MII_ADVERTISE)
+ np->advertising = data->val_in;
+ mdio_write(dev, data->reg_num & 0x1f,
+ data->val_in);
+ }
+ } else {
+ if ((data->phy_id & 0x1f) == np->phy_addr_external) {
+ if ((data->reg_num & 0x1f) == MII_ADVERTISE)
+ np->advertising = data->val_in;
+ }
+ move_int_phy(dev, data->phy_id & 0x1f);
+ miiport_write(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f,
+ data->val_in);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void enable_wol_mode(struct net_device *dev, int enable_intr)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (netif_msg_wol(np))
+ printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
+ dev->name);
+
+ /* For WOL we must restart the rx process in silent mode.
+ * Write NULL to the RxRingPtr. Only possible if
+ * rx process is stopped
+ */
+ writel(0, ioaddr + RxRingPtr);
+
+ /* read WoL status to clear */
+ readl(ioaddr + WOLCmd);
+
+ /* PME on, clear status */
+ writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
+
+ /* and restart the rx process */
+ writel(RxOn, ioaddr + ChipCmd);
+
+ if (enable_intr) {
+ /* enable the WOL interrupt.
+ * Could be used to send a netlink message.
+ */
+ writel(WOLPkt | LinkChange, ioaddr + IntrMask);
+ writel(1, ioaddr + IntrEnable);
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (netif_msg_ifdown(np))
+ printk(KERN_DEBUG
+ "%s: Shutting down ethercard, status was %#04x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+ if (netif_msg_pktdata(np))
+ printk(KERN_DEBUG
+ "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
+
+ /*
+ * FIXME: what if someone tries to close a device
+ * that is suspended?
+ * Should we reenable the nic to switch to
+ * the final WOL settings?
+ */
+
+ del_timer_sync(&np->timer);
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ /* Disable interrupts, and flush posted writes */
+ writel(0, ioaddr + IntrEnable);
+ readl(ioaddr + IntrEnable);
+ np->hands_off = 1;
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ free_irq(dev->irq, dev);
+
+ /* Interrupt disabled, interrupt handler released,
+ * queue stopped, timer deleted, rtnl_lock held
+ * All async codepaths that access the driver are disabled.
+ */
+ spin_lock_irq(&np->lock);
+ np->hands_off = 0;
+ readl(ioaddr + IntrMask);
+ readw(ioaddr + MIntrStatus);
+
+ /* Freeze Stats */
+ writel(StatsFreeze, ioaddr + StatsCtrl);
+
+ /* Stop the chip's Tx and Rx processes. */
+ natsemi_stop_rxtx(dev);
+
+ __get_stats(dev);
+ spin_unlock_irq(&np->lock);
+
+ /* clear the carrier last - an interrupt could reenable it otherwise */
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ dump_ring(dev);
+ drain_ring(dev);
+ free_ring(dev);
+
+ {
+ u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
+ if (wol) {
+ /* restart the NIC in WOL mode.
+ * The nic must be stopped for this.
+ */
+ enable_wol_mode(dev, 0);
+ } else {
+ /* Restore PME enable bit unmolested */
+ writel(np->SavedClkRun, ioaddr + ClkRun);
+ }
+ }
+ return 0;
+}
+
+
+static void __devexit natsemi_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ unregister_netdev (dev);
+ pci_release_regions (pdev);
+ iounmap(ioaddr);
+ free_netdev (dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * The ns83815 chip doesn't have explicit RxStop bits.
+ * Kicking the Rx or Tx process for a new packet reenables the Rx process
+ * of the nic, thus this function must be very careful:
+ *
+ * suspend/resume synchronization:
+ * entry points:
+ * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
+ * start_tx, tx_timeout
+ *
+ * No function accesses the hardware without checking np->hands_off.
+ * the check occurs under spin_lock_irq(&np->lock);
+ * exceptions:
+ * * netdev_ioctl: noncritical access.
+ * * netdev_open: cannot happen due to the device_detach
+ * * netdev_close: doesn't hurt.
+ * * netdev_timer: timer stopped by natsemi_suspend.
+ * * intr_handler: doesn't acquire the spinlock. suspend calls
+ * disable_irq() to enforce synchronization.
+ *
+ * Interrupts must be disabled, otherwise hands_off can cause irq storms.
+ */
+
+static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ rtnl_lock();
+ if (netif_running (dev)) {
+ del_timer_sync(&np->timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+
+ writel(0, ioaddr + IntrEnable);
+ np->hands_off = 1;
+ natsemi_stop_rxtx(dev);
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ /* Update the error counts. */
+ __get_stats(dev);
+
+ /* pci_power_off(pdev, -1); */
+ drain_ring(dev);
+ {
+ u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
+ /* Restore PME enable bit */
+ if (wol) {
+ /* restart the NIC in WOL mode.
+ * The nic must be stopped for this.
+ * FIXME: use the WOL interrupt
+ */
+ enable_wol_mode(dev, 0);
+ } else {
+ /* Restore PME enable bit unmolested */
+ writel(np->SavedClkRun, ioaddr + ClkRun);
+ }
+ }
+ }
+ netif_device_detach(dev);
+ rtnl_unlock();
+ return 0;
+}
+
+
+static int natsemi_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ rtnl_lock();
+ if (netif_device_present(dev))
+ goto out;
+ if (netif_running(dev)) {
+ BUG_ON(!np->hands_off);
+ pci_enable_device(pdev);
+ /* pci_power_on(pdev); */
+
+ natsemi_reset(dev);
+ init_ring(dev);
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ np->hands_off = 0;
+ init_registers(dev);
+ netif_device_attach(dev);
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ mod_timer(&np->timer, jiffies + 1*HZ);
+ }
+ netif_device_attach(dev);
+out:
+ rtnl_unlock();
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver natsemi_driver = {
+ .name = DRV_NAME,
+ .id_table = natsemi_pci_tbl,
+ .probe = natsemi_probe1,
+ .remove = __devexit_p(natsemi_remove1),
+#ifdef CONFIG_PM
+ .suspend = natsemi_suspend,
+ .resume = natsemi_resume,
+#endif
+};
+
+static int __init natsemi_init_mod (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+
+ return pci_module_init (&natsemi_driver);
+}
+
+static void __exit natsemi_exit_mod (void)
+{
+ pci_unregister_driver (&natsemi_driver);
+}
+
+module_init(natsemi_init_mod);
+module_exit(natsemi_exit_mod);
+
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
new file mode 100644
index 000000000000..84e291e24935
--- /dev/null
+++ b/drivers/net/ne-h8300.c
@@ -0,0 +1,670 @@
+/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */
+/*
+ original ne.c
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ H8/300 modified
+ Yoshinori Sato <ysato@users.sourceforge.jp>
+*/
+
+static const char version1[] =
+"ne-h8300.c:v1.00 2004/04/11 ysato\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ne-h8300"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+/* A zero-terminated list of I/O addresses to be probed at boot. */
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */
+#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT (ei_status.word16?0x40:0x20)
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+static int ne_probe1(struct net_device *dev, int ioaddr);
+
+static int ne_open(struct net_device *dev);
+static int ne_close(struct net_device *dev);
+
+static void ne_reset_8390(struct net_device *dev);
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+static u32 reg_offset[16];
+
+static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int i;
+ unsigned char bus_width;
+
+ bus_width = *(volatile unsigned char *)ABWCR;
+ bus_width &= 1 << ((base_addr >> 21) & 7);
+
+ for (i = 0; i < sizeof(reg_offset) / sizeof(u32); i++)
+ if (bus_width == 0)
+ reg_offset[i] = i * 2 + 1;
+ else
+ reg_offset[i] = i;
+
+ ei_local->reg_offset = reg_offset;
+ return 0;
+}
+
+static int __initdata h8300_ne_count = 0;
+#ifdef CONFIG_H8300H_H8MAX
+static unsigned long __initdata h8300_ne_base[] = { 0x800600 };
+static int h8300_ne_irq[] = {EXT_IRQ4};
+#endif
+#ifdef CONFIG_H8300H_AKI3068NET
+static unsigned long __initdata h8300_ne_base[] = { 0x200000 };
+static int h8300_ne_irq[] = {EXT_IRQ5};
+#endif
+
+static inline int init_dev(struct net_device *dev)
+{
+ if (h8300_ne_count < (sizeof(h8300_ne_base) / sizeof(unsigned long))) {
+ dev->base_addr = h8300_ne_base[h8300_ne_count];
+ dev->irq = h8300_ne_irq[h8300_ne_count];
+ h8300_ne_count++;
+ return 0;
+ } else
+ return -ENODEV;
+}
+
+/* Probe for various non-shared-memory ethercards.
+
+ NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+ buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+ the SAPROM, while other supposed NE2000 clones must be detected by their
+ SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+
+ We use the minimum memory size for some ethercard product lines, iff we can't
+ distinguish models. You can increase the packet buffer size by setting
+ PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
+ E1010 starts at 0x100 and ends at 0x2000.
+ E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+ E2010 starts at 0x100 and ends at 0x4000.
+ E2010-x starts at 0x100 and ends at 0xffff. */
+
+static int __init do_ne_probe(struct net_device *dev)
+{
+ unsigned int base_addr = dev->base_addr;
+
+ SET_MODULE_OWNER(dev);
+
+ /* First check any supplied i/o locations. User knows best. <cough> */
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ne_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init ne_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (init_dev(dev))
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = init_reg_offset(dev, dev->base_addr);
+ if (err)
+ goto out;
+
+ err = do_ne_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init ne_probe1(struct net_device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[16];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+ int reg0, ret;
+ static unsigned version_printed;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned char bus_width;
+
+ if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ reg0 = inb_p(ioaddr);
+ if (reg0 == 0xFF) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb_p(ioaddr + EI_SHIFT(0x0d));
+ outb_p(0xff, ioaddr + EI_SHIFT(0x0d));
+ outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+ outb_p(reg0, ioaddr + EI_SHIFT(0));
+ outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */
+ ret = -ENODEV;
+ goto err_out;
+ }
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version1);
+
+ printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr);
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] =
+ {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+ bus_width = *(volatile unsigned char *)ABWCR;
+ bus_width &= 1 << ((ioaddr >> 21) & 7);
+ ei_status.word16 = (bus_width == 0); /* temporary setting */
+ for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) {
+ SA_prom[i] = inb_p(ioaddr + NE_DATAPORT);
+ inb_p(ioaddr + NE_DATAPORT); /* dummy read */
+ }
+
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ if (bus_width)
+ wordlength = 1;
+ else
+ outb_p(0x49, ioaddr + EN0_DCFG);
+
+ /* Set up the rest of the parameters. */
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+
+ if (! dev->irq) {
+ printk(" failed to detect IRQ line.\n");
+ ret = -EAGAIN;
+ goto err_out;
+ }
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out;
+ }
+
+ dev->base_addr = ioaddr;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ printk(" %2.2x", SA_prom[i]);
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ printk("\n%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, ioaddr, dev->irq);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (wordlength == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+ ei_status.priv = 0;
+ dev->open = &ne_open;
+ dev->stop = &ne_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+ return 0;
+
+err_out:
+ release_region(ioaddr, NE_IO_EXTENT);
+ return ret;
+}
+
+static int ne_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int ne_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+
+static void ne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO);
+ outb_p(0, NE_BASE + EN0_RCNTHI);
+ outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, NE_BASE + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+
+ if (ei_status.word16) {
+ int len;
+ unsigned short *p = (unsigned short *)hdr;
+ for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--)
+ *p++ = inw(NE_BASE + NE_DATAPORT);
+ } else
+ insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+
+ le16_to_cpus(&hdr->count);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
+ outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
+ outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO);
+ outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+ if (ei_status.word16)
+ {
+ int len;
+ unsigned short *p = (unsigned short *)buf;
+ for (len = count>>1; len > 0; len--)
+ *p++ = inw(NE_BASE + NE_DATAPORT);
+ if (count & 0x01)
+ {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(NE_BASE + EN0_RSARHI);
+ int low = inb_p(NE_BASE + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk(KERN_WARNING "%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+
+ outb_p(0x42, NE_BASE + EN0_RCNTLO);
+ outb_p(0x00, NE_BASE + EN0_RCNTHI);
+ outb_p(0x42, NE_BASE + EN0_RSARLO);
+ outb_p(0x00, NE_BASE + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ udelay(6);
+#endif
+
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
+ outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
+ outb_p(0x00, NE_BASE + EN0_RSARLO);
+ outb_p(start_page, NE_BASE + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD);
+ if (ei_status.word16) {
+ int len;
+ unsigned short *p = (unsigned short *)buf;
+ for (len = count>>1; len > 0; len--)
+ outw(*p++, NE_BASE + NE_DATAPORT);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(NE_BASE + EN0_RSARHI);
+ int low = inb_p(NE_BASE + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+
+ if (tries <= 0)
+ {
+ printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 1 /* Max number of NE cards per module */
+static struct net_device *dev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
+
+MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that no ISA autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+ int err;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ if (io[this_dev]) {
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ dev->base_addr = io[this_dev];
+ } else {
+ dev->base_addr = h8300_ne_base[this_dev];
+ dev->irq = h8300_ne_irq[this_dev];
+ }
+ err = init_reg_offset(dev, dev->base_addr);
+ if (!err) {
+ if (do_ne_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_ne[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ }
+ free_netdev(dev);
+ if (found)
+ break;
+ if (io[this_dev] != 0)
+ printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr);
+ else
+ printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
+ return -ENXIO;
+ }
+ if (found)
+ return 0;
+ return -ENODEV;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = dev_ne[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
new file mode 100644
index 000000000000..496433902ade
--- /dev/null
+++ b/drivers/net/ne.c
@@ -0,0 +1,862 @@
+/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ This driver should work with many programmed-I/O 8390-based ethernet
+ boards. Currently it supports the NE1000, NE2000, many clones,
+ and some Cabletron products.
+
+ Changelog:
+
+ Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made
+ sanity checks and bad clone support optional.
+ Paul Gortmaker : new reset code, reset card after probe at boot.
+ Paul Gortmaker : multiple card support for module users.
+ Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c
+ Paul Gortmaker : Allow users with bad cards to avoid full probe.
+ Paul Gortmaker : PCI probe changes, more PCI cards supported.
+ rjohnson@analogic.com : Changed init order so an interrupt will only
+ occur after memory is allocated for dev->priv. Deallocated memory
+ last in cleanup_modue()
+ Richard Guenther : Added support for ISAPnP cards
+ Paul Gortmaker : Discontinued PCI support - use ne2k-pci.c instead.
+ Hayato Fujiwara : Add m32r support.
+
+*/
+
+/* Routines for the NatSemi-based designs (NE[12]000). */
+
+static const char version1[] =
+"ne.c:v1.10 9/23/94 Donald Becker (becker@scyld.com)\n";
+static const char version2[] =
+"Last modified Nov 1, 2000 by Paul Gortmaker\n";
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/isapnp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ne"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
+#define SUPPORT_NE_BAD_CLONES
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+/* A zero-terminated list of I/O addresses to be probed at boot. */
+#ifndef MODULE
+static unsigned int netcard_portlist[] __initdata = {
+ 0x300, 0x280, 0x320, 0x340, 0x360, 0x380, 0
+};
+#endif
+
+static struct isapnp_device_id isapnp_clone_list[] __initdata = {
+ { ISAPNP_CARD_ID('A','X','E',0x2011),
+ ISAPNP_VENDOR('A','X','E'), ISAPNP_FUNCTION(0x2011),
+ (long) "NetGear EA201" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('E','D','I'), ISAPNP_FUNCTION(0x0216),
+ (long) "NN NE2000" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('P','N','P'), ISAPNP_FUNCTION(0x80d6),
+ (long) "Generic PNP" },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(isapnp, isapnp_clone_list);
+
+#ifdef SUPPORT_NE_BAD_CLONES
+/* A list of bad clones that we none-the-less recognize. */
+static struct { const char *name8, *name16; unsigned char SAprefix[4];}
+bad_clone_list[] __initdata = {
+ {"DE100", "DE200", {0x00, 0xDE, 0x01,}},
+ {"DE120", "DE220", {0x00, 0x80, 0xc8,}},
+ {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */
+ {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}},
+ {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */
+ {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */
+ {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */
+ {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */
+ {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */
+ {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */
+ {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
+ {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
+ {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
+ {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
+ {NULL,}
+};
+#endif
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#ifdef CONFIG_PLAT_MAPPI
+# define DCR_VAL 0x4b
+#elif CONFIG_PLAT_OAKS32R
+# define DCR_VAL 0x48
+#else
+# define DCR_VAL 0x49
+#endif
+
+static int ne_probe1(struct net_device *dev, int ioaddr);
+static int ne_probe_isapnp(struct net_device *dev);
+
+static int ne_open(struct net_device *dev);
+static int ne_close(struct net_device *dev);
+
+static void ne_reset_8390(struct net_device *dev);
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+/* Probe for various non-shared-memory ethercards.
+
+ NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+ buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+ the SAPROM, while other supposed NE2000 clones must be detected by their
+ SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+
+ We use the minimum memory size for some ethercard product lines, iff we can't
+ distinguish models. You can increase the packet buffer size by setting
+ PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
+ E1010 starts at 0x100 and ends at 0x2000.
+ E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+ E2010 starts at 0x100 and ends at 0x4000.
+ E2010-x starts at 0x100 and ends at 0xffff. */
+
+static int __init do_ne_probe(struct net_device *dev)
+{
+ unsigned int base_addr = dev->base_addr;
+#ifndef MODULE
+ int orig_irq = dev->irq;
+#endif
+
+ SET_MODULE_OWNER(dev);
+
+ /* First check any supplied i/o locations. User knows best. <cough> */
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ne_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ /* Then look for any installed ISAPnP clones */
+ if (isapnp_present() && (ne_probe_isapnp(dev) == 0))
+ return 0;
+
+#ifndef MODULE
+ /* Last resort. The semi-risky ISA auto-probe. */
+ for (base_addr = 0; netcard_portlist[base_addr] != 0; base_addr++) {
+ int ioaddr = netcard_portlist[base_addr];
+ dev->irq = orig_irq;
+ if (ne_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+#endif
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ if (idev)
+ pnp_device_detach(idev);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init ne_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_ne_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init ne_probe_isapnp(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; isapnp_clone_list[i].vendor != 0; i++) {
+ struct pnp_dev *idev = NULL;
+
+ while ((idev = pnp_find_dev(NULL,
+ isapnp_clone_list[i].vendor,
+ isapnp_clone_list[i].function,
+ idev))) {
+ /* Avoid already found cards from previous calls */
+ if (pnp_device_attach(idev) < 0)
+ continue;
+ if (pnp_activate_dev(idev) < 0) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ /* if no io and irq, search for next */
+ if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ /* found it */
+ dev->base_addr = pnp_port_start(idev, 0);
+ dev->irq = pnp_irq(idev, 0);
+ printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) isapnp_clone_list[i].driver_data,
+ dev->base_addr, dev->irq);
+ if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
+ printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ pnp_device_detach(idev);
+ return -ENXIO;
+ }
+ ei_status.priv = (unsigned long)idev;
+ break;
+ }
+ if (!idev)
+ continue;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int __init ne_probe1(struct net_device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[32];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+ int neX000, ctron, copam, bad_card;
+ int reg0, ret;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ reg0 = inb_p(ioaddr);
+ if (reg0 == 0xFF) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb_p(ioaddr + 0x0d);
+ outb_p(0xff, ioaddr + 0x0d);
+ outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+ outb_p(reg0, ioaddr);
+ outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */
+ ret = -ENODEV;
+ goto err_out;
+ }
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+
+ printk(KERN_INFO "NE*000 ethercard probe at %#3x:", ioaddr);
+
+ /* A user with a poor card that fails to ack the reset, or that
+ does not have a valid 0x57,0x57 signature can still use this
+ without having to recompile. Specifying an i/o address along
+ with an otherwise unused dev->mem_end value of "0xBAD" will
+ cause the driver to skip these parts of the probe. */
+
+ bad_card = ((dev->base_addr != 0) && (dev->mem_end == 0xbad));
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+
+ {
+ unsigned long reset_start_time = jiffies;
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ if (bad_card) {
+ printk(" (warning: no reset ack)");
+ break;
+ } else {
+ printk(" not found (no reset ack).\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+ }
+
+ outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] =
+ {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+ SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+ if (SA_prom[i] != SA_prom[i+1])
+ wordlength = 1;
+ }
+
+ if (wordlength == 2)
+ {
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+ /* We must set the 8390 for word mode. */
+ outb_p(DCR_VAL, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
+ neX000 = ((SA_prom[14] == 0x57 && SA_prom[15] == 0x57)
+ || (SA_prom[14] == 0x42 && SA_prom[15] == 0x42));
+#else
+ neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
+#endif
+ ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
+ copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00);
+
+ /* Set up the rest of the parameters. */
+ if (neX000 || bad_card || copam) {
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+ }
+ else if (ctron)
+ {
+ name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
+ start_page = 0x01;
+ stop_page = (wordlength == 2) ? 0x40 : 0x20;
+ }
+ else
+ {
+#ifdef SUPPORT_NE_BAD_CLONES
+ /* Ack! Well, there might be a *bad* NE*000 clone there.
+ Check for total bogus addresses. */
+ for (i = 0; bad_clone_list[i].name8; i++)
+ {
+ if (SA_prom[0] == bad_clone_list[i].SAprefix[0] &&
+ SA_prom[1] == bad_clone_list[i].SAprefix[1] &&
+ SA_prom[2] == bad_clone_list[i].SAprefix[2])
+ {
+ if (wordlength == 2)
+ {
+ name = bad_clone_list[i].name16;
+ } else {
+ name = bad_clone_list[i].name8;
+ }
+ break;
+ }
+ }
+ if (bad_clone_list[i].name8 == NULL)
+ {
+ printk(" not found (invalid signature %2.2x %2.2x).\n",
+ SA_prom[14], SA_prom[15]);
+ ret = -ENXIO;
+ goto err_out;
+ }
+#else
+ printk(" not found.\n");
+ ret = -ENXIO;
+ goto err_out;
+#endif
+ }
+
+ if (dev->irq < 2)
+ {
+ unsigned long cookie = probe_irq_on();
+ outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */
+ outb_p(0x00, ioaddr + EN0_RCNTLO);
+ outb_p(0x00, ioaddr + EN0_RCNTHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */
+ mdelay(10); /* wait 10ms for interrupt to propagate */
+ outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
+ dev->irq = probe_irq_off(cookie);
+ if (ei_debug > 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ or don't know which one to set. */
+ dev->irq = 9;
+
+ if (! dev->irq) {
+ printk(" failed to detect IRQ line.\n");
+ ret = -EAGAIN;
+ goto err_out;
+ }
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out;
+ }
+
+ dev->base_addr = ioaddr;
+
+#ifdef CONFIG_PLAT_MAPPI
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+ ioaddr + E8390_CMD); /* 0x61 */
+ for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+ dev->dev_addr[i] = SA_prom[i]
+ = inb_p(ioaddr + EN1_PHYS_SHIFT(i));
+ printk(" %2.2x", SA_prom[i]);
+ }
+#else
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ printk(" %2.2x", SA_prom[i]);
+ dev->dev_addr[i] = SA_prom[i];
+ }
+#endif
+
+ printk("\n%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, ioaddr, dev->irq);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+#ifdef CONFIG_PLAT_OAKS32R
+ ei_status.word16 = 0;
+#else
+ ei_status.word16 = (wordlength == 2);
+#endif
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+ ei_status.priv = 0;
+ dev->open = &ne_open;
+ dev->stop = &ne_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+ return 0;
+
+err_out:
+ release_region(ioaddr, NE_IO_EXTENT);
+ return ret;
+}
+
+static int ne_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int ne_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+
+static void ne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16)
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+
+ le16_to_cpus(&hdr->count);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16)
+ {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk(KERN_WARNING "%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0x00, nic_base + EN0_RCNTHI);
+ outb_p(0x42, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ udelay(6);
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+
+ if (tries <= 0)
+ {
+ printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
+static struct net_device *dev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(bad, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es),required");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that no ISA autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_ne_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_ne[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ if (found)
+ break;
+ if (io[this_dev] != 0)
+ printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", io[this_dev]);
+ else
+ printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
+ return -ENXIO;
+ }
+ if (found)
+ return 0;
+ return -ENODEV;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = dev_ne[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
new file mode 100644
index 000000000000..6ebef27dbfae
--- /dev/null
+++ b/drivers/net/ne2.c
@@ -0,0 +1,829 @@
+/* ne2.c: A NE/2 Ethernet Driver for Linux. */
+/*
+ Based on the NE2000 driver written by Donald Becker (1992-94).
+ modified by Wim Dumon (Apr 1996)
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as wimpie@linux.cc.kuleuven.ac.be
+
+ Currently supported: NE/2
+ This patch was never tested on other MCA-ethernet adapters, but it
+ might work. Just give it a try and let me know if you have problems.
+ Also mail me if it really works, please!
+
+ Changelog:
+ Mon Feb 3 16:26:02 MET 1997
+ - adapted the driver to work with the 2.1.25 kernel
+ - multiple ne2 support (untested)
+ - module support (untested)
+
+ Fri Aug 28 00:18:36 CET 1998 (David Weinehall)
+ - fixed a few minor typos
+ - made the MODULE_PARM conditional (it only works with the v2.1.x kernels)
+ - fixed the module support (Now it's working...)
+
+ Mon Sep 7 19:01:44 CET 1998 (David Weinehall)
+ - added support for Arco Electronics AE/2-card (experimental)
+
+ Mon Sep 14 09:53:42 CET 1998 (David Weinehall)
+ - added support for Compex ENET-16MC/P (experimental)
+
+ Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren)
+ - Miscellaneous bugfixes
+
+ Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson)
+ - Cleanup
+
+ Wed Sep 23 14:33:34 CET 1998 (David Weinehall)
+ - Restructuring and rewriting for v2.1.x compliance
+
+ Wed Oct 14 17:19:21 CET 1998 (David Weinehall)
+ - Added code that unregisters irq and proc-info
+ - Version# bump
+
+ Mon Nov 16 15:28:23 CET 1998 (Wim Dumon)
+ - pass 'dev' as last parameter of request_irq in stead of 'NULL'
+
+ Wed Feb 7 21:24:00 CET 2001 (Alfred Arnold)
+ - added support for the D-Link DE-320CT
+
+ * WARNING
+ -------
+ This is alpha-test software. It is not guaranteed to work. As a
+ matter of fact, I'm quite sure there are *LOTS* of bugs in here. I
+ would like to hear from you if you use this driver, even if it works.
+ If it doesn't work, be sure to send me a mail with the problems !
+*/
+
+static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.org>\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mca-legacy.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ne2"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x20 /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x30
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* From the .ADF file: */
+static unsigned int addresses[7] __initdata =
+ {0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0};
+static int irqs[4] __initdata = {3, 4, 5, 9};
+
+/* From the D-Link ADF file: */
+static unsigned int dlink_addresses[4] __initdata =
+ {0x300, 0x320, 0x340, 0x360};
+static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15};
+
+struct ne2_adapters_t {
+ unsigned int id;
+ char *name;
+};
+
+static struct ne2_adapters_t ne2_adapters[] __initdata = {
+ { 0x6354, "Arco Ethernet Adapter AE/2" },
+ { 0x70DE, "Compex ENET-16 MC/P" },
+ { 0x7154, "Novell Ethernet Adapter NE/2" },
+ { 0x56ea, "D-Link DE-320CT" },
+ { 0x0000, NULL }
+};
+
+extern int netcard_probe(struct net_device *dev);
+
+static int ne2_probe1(struct net_device *dev, int slot);
+
+static int ne_open(struct net_device *dev);
+static int ne_close(struct net_device *dev);
+
+static void ne_reset_8390(struct net_device *dev);
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+/*
+ * special code to read the DE-320's MAC address EEPROM. In contrast to a
+ * standard NE design, this is a serial EEPROM (93C46) that has to be read
+ * bit by bit. The EEPROM cotrol port at base + 0x1e has the following
+ * layout:
+ *
+ * Bit 0 = Data out (read from EEPROM)
+ * Bit 1 = Data in (write to EEPROM)
+ * Bit 2 = Clock
+ * Bit 3 = Chip Select
+ * Bit 7 = ~50 kHz clock for defined delays
+ *
+ */
+
+static void __init dlink_put_eeprom(unsigned char value, unsigned int addr)
+{
+ int z;
+ unsigned char v1, v2;
+
+ /* write the value to the NIC EEPROM register */
+
+ outb(value, addr + 0x1e);
+
+ /* now wait the clock line to toggle twice. Effectively, we are
+ waiting (at least) for one clock cycle */
+
+ for (z = 0; z < 2; z++) {
+ do {
+ v1 = inb(addr + 0x1e);
+ v2 = inb(addr + 0x1e);
+ }
+ while (!((v1 ^ v2) & 0x80));
+ }
+}
+
+static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr)
+{
+ /* shift data bit into correct position */
+
+ bit = bit << 1;
+
+ /* write value, keep clock line high for two cycles */
+
+ dlink_put_eeprom(0x09 | bit, addr);
+ dlink_put_eeprom(0x0d | bit, addr);
+ dlink_put_eeprom(0x0d | bit, addr);
+ dlink_put_eeprom(0x09 | bit, addr);
+}
+
+static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr)
+{
+ int z;
+
+ /* adjust bits so that they are left-aligned in a 16-bit-word */
+
+ value = value << (16 - len);
+
+ /* shift bits out to the EEPROM */
+
+ for (z = 0; z < len; z++) {
+ dlink_send_eeprom_bit((value & 0x8000) >> 15, addr);
+ value = value << 1;
+ }
+}
+
+static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr)
+{
+ int z;
+ unsigned int value = 0;
+
+ /* pull the CS line low for a moment. This resets the EEPROM-
+ internal logic, and makes it ready for a new command. */
+
+ dlink_put_eeprom(0x01, addr);
+ dlink_put_eeprom(0x09, addr);
+
+ /* send one start bit, read command (1 - 0), plus the address to
+ the EEPROM */
+
+ dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr);
+
+ /* get the data word. We clock by sending 0s to the EEPROM, which
+ get ignored during the read process */
+
+ for (z = 0; z < 16; z++) {
+ dlink_send_eeprom_bit(0, addr);
+ value = (value << 1) | (inb(addr + 0x1e) & 0x01);
+ }
+
+ return value;
+}
+
+/*
+ * Note that at boot, this probe only picks up one card at a time.
+ */
+
+static int __init do_ne2_probe(struct net_device *dev)
+{
+ static int current_mca_slot = -1;
+ int i;
+ int adapter_found = 0;
+
+ SET_MODULE_OWNER(dev);
+
+ /* Do not check any supplied i/o locations.
+ POS registers usually don't fail :) */
+
+ /* MCA cards have POS registers.
+ Autodetecting MCA cards is extremely simple.
+ Just search for the card. */
+
+ for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) {
+ current_mca_slot =
+ mca_find_unused_adapter(ne2_adapters[i].id, 0);
+
+ if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) {
+ int res;
+ mca_set_adapter_name(current_mca_slot,
+ ne2_adapters[i].name);
+ mca_mark_as_used(current_mca_slot);
+
+ res = ne2_probe1(dev, current_mca_slot);
+ if (res)
+ mca_mark_as_unused(current_mca_slot);
+ return res;
+ }
+ }
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ mca_mark_as_unused(ei_status.priv);
+ mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init ne2_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_ne2_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int ne2_procinfo(char *buf, int slot, struct net_device *dev)
+{
+ int len=0;
+
+ len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" );
+ len += sprintf(buf+len, "Driver written by Wim Dumon ");
+ len += sprintf(buf+len, "<wimpie@kotnet.org>\n");
+ len += sprintf(buf+len, "Modified by ");
+ len += sprintf(buf+len, "David Weinehall <tao@acc.umu.se>\n");
+ len += sprintf(buf+len, "and by Magnus Jonsson <bigfoot@acc.umu.se>\n");
+ len += sprintf(buf+len, "Based on the original NE2000 drivers\n" );
+ len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr);
+ len += sprintf(buf+len, "IRQ : %d\n", dev->irq);
+
+#define HW_ADDR(i) dev->dev_addr[i]
+ len += sprintf(buf+len, "HW addr : %x:%x:%x:%x:%x:%x\n",
+ HW_ADDR(0), HW_ADDR(1), HW_ADDR(2),
+ HW_ADDR(3), HW_ADDR(4), HW_ADDR(5) );
+#undef HW_ADDR
+
+ return len;
+}
+
+static int __init ne2_probe1(struct net_device *dev, int slot)
+{
+ int i, base_addr, irq, retval;
+ unsigned char POS;
+ unsigned char SA_prom[32];
+ const char *name = "NE/2";
+ int start_page, stop_page;
+ static unsigned version_printed;
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("NE/2 ethercard found in slot %d:", slot);
+
+ /* Read base IO and IRQ from the POS-registers */
+ POS = mca_read_stored_pos(slot, 2);
+ if(!(POS % 2)) {
+ printk(" disabled.\n");
+ return -ENODEV;
+ }
+
+ /* handle different POS register structure for D-Link card */
+
+ if (mca_read_stored_pos(slot, 0) == 0xea) {
+ base_addr = dlink_addresses[(POS >> 5) & 0x03];
+ irq = dlink_irqs[(POS >> 2) & 0x07];
+ }
+ else {
+ i = (POS & 0xE)>>1;
+ /* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0"
+ " en zou het moeten werken -> %d\n", i);
+ The above line was for remote testing, thanx to sdog ... */
+ base_addr = addresses[i - 1];
+ irq = irqs[(POS & 0x60)>>5];
+ }
+
+ if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+#ifdef DEBUG
+ printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS,
+ base_addr, irq);
+#endif
+
+#ifndef CRYNWR_WAY
+ /* Reset the card the way they do it in the Crynwr packet driver */
+ for (i=0; i<8; i++)
+ outb(0x0, base_addr + NE_RESET);
+ inb(base_addr + NE_RESET);
+ outb(0x21, base_addr + NE_CMD);
+ if (inb(base_addr + NE_CMD) != 0x21) {
+ printk("NE/2 adapter not responding\n");
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* In the crynwr sources they do a RAM-test here. I skip it. I suppose
+ my RAM is okay. Suppose your memory is broken. Then this test
+ should fail and you won't be able to use your card. But if I do not
+ test, you won't be able to use your card, neither. So this test
+ won't help you. */
+
+#else /* _I_ never tested it this way .. Go ahead and try ...*/
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on
+ clones.. */
+ outb(inb(base_addr + NE_RESET), base_addr + NE_RESET);
+
+ while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk(" not found (no reset ack).\n");
+ retval = -ENODEV;
+ goto out;
+ }
+
+ outb_p(0xff, base_addr + EN0_ISR); /* Ack all intr. */
+ }
+#endif
+
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to
+ NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {
+ unsigned char value, offset;
+ } program_seq[] = {
+ /* Select page 0 */
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD},
+ {0x49, EN0_DCFG}, /* Set WORD-wide (0x49) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, base_addr +
+ program_seq[i].offset);
+
+ }
+ for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) {
+ SA_prom[i] = inb(base_addr + NE_DATAPORT);
+ }
+
+ /* I don't know whether the previous sequence includes the general
+ board reset procedure, so better don't omit it and just overwrite
+ the garbage read from a DE-320 with correct stuff. */
+
+ if (mca_read_stored_pos(slot, 0) == 0xea) {
+ unsigned int v;
+
+ for (i = 0; i < 3; i++) {
+ v = dlink_get_eeprom(i, base_addr);
+ SA_prom[(i << 1) ] = v & 0xff;
+ SA_prom[(i << 1) + 1] = (v >> 8) & 0xff;
+ }
+ }
+
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ dev->irq=irq;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d (irqval=%d).\n",
+ dev->irq, retval);
+ goto out;
+ }
+
+ dev->base_addr = base_addr;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ printk(" %2.2x", SA_prom[i]);
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ printk("\n%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, base_addr, dev->irq);
+
+ mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (2 == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+
+ ei_status.priv = slot;
+
+ dev->open = &ne_open;
+ dev->stop = &ne_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+ return 0;
+out:
+ release_region(base_addr, NE_IO_EXTENT);
+ return retval;
+}
+
+static int ne_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int ne_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void ne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1)
+ printk("resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk("%s: ne_reset_8390() did not complete.\n",
+ dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+
+ int nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen.
+ If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16)
+ insw(NE_BASE + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(NE_BASE + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for
+ hints. The NEx000 doesn't share the on-board packet memory -- you have
+ to put the packet out through the "remote DMA" dataport using outb. */
+
+static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen.
+ If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk("%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen.
+ If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0x00, nic_base + EN0_RCNTHI);
+ outb_p(0x42, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk("%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
+static struct net_device *dev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
+MODULE_LICENSE("GPL");
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(bad, int, NULL, 0);
+MODULE_PARM_DESC(io, "(ignored)");
+MODULE_PARM_DESC(irq, "(ignored)");
+MODULE_PARM_DESC(bad, "(ignored)");
+
+/* Module code fixed by David Weinehall */
+
+int init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_ne2_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_ne[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ break;
+ }
+ if (found)
+ return 0;
+ printk(KERN_WARNING "ne2.c: No NE/2 card found\n");
+ return -ENXIO;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = dev_ne[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
new file mode 100644
index 000000000000..a1a6c08e7dcf
--- /dev/null
+++ b/drivers/net/ne2k-pci.c
@@ -0,0 +1,712 @@
+/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */
+/*
+ A Linux device driver for PCI NE2000 clones.
+
+ Authors and other copyright holders:
+ 1992-2000 by Donald Becker, NE2000 core and various modifications.
+ 1995-1998 by Paul Gortmaker, core modifications and PCI support.
+ Copyright 1993 assigned to the United States Government as represented
+ by the Director, National Security Agency.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Issues remaining:
+ People are making PCI ne2000 clones! Oh the horror, the horror...
+ Limited full-duplex support.
+*/
+
+#define DRV_NAME "ne2k-pci"
+#define DRV_VERSION "1.03"
+#define DRV_RELDATE "9/22/2003"
+
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+
+/* Force a non std. amount of memory. Units are 256 byte pages. */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "8390.h"
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n"
+KERN_INFO " http://www.scyld.com/network/ne2k-pci.html\n";
+
+#if defined(__powerpc__)
+#define inl_le(addr) le32_to_cpu(inl(addr))
+#define inw_le(addr) le16_to_cpu(inw(addr))
+#endif
+
+#define PFX DRV_NAME ": "
+
+MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
+MODULE_DESCRIPTION("PCI NE2000 clone driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(debug, "debug level (1-2)");
+MODULE_PARM_DESC(options, "Bit 5: full duplex");
+MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
+
+/* Some defines that people can play with if so inclined. */
+
+/* Use 32 bit data-movement operations instead of 16 bit. */
+#define USE_LONGIO
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Flags. We rename an existing ei_status field to store flags! */
+/* Thus only the low 8 bits are usable for non-init-time flags. */
+#define ne2k_flags reg0
+enum {
+ ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */
+ FORCE_FDX=0x20, /* User override. */
+ REALTEK_FDX=0x40, HOLTEK_FDX=0x80,
+ STOP_PG_0x60=0x100,
+};
+
+enum ne2k_pci_chipsets {
+ CH_RealTek_RTL_8029 = 0,
+ CH_Winbond_89C940,
+ CH_Compex_RL2000,
+ CH_KTI_ET32P2,
+ CH_NetVin_NV5000SC,
+ CH_Via_86C926,
+ CH_SureCom_NE34,
+ CH_Winbond_W89C940F,
+ CH_Holtek_HT80232,
+ CH_Holtek_HT80229,
+ CH_Winbond_89C940_8c4a,
+};
+
+
+static struct {
+ char *name;
+ int flags;
+} pci_clone_list[] __devinitdata = {
+ {"RealTek RTL-8029", REALTEK_FDX},
+ {"Winbond 89C940", 0},
+ {"Compex RL2000", 0},
+ {"KTI ET32P2", 0},
+ {"NetVin NV5000SC", 0},
+ {"Via 86C926", ONLY_16BIT_IO},
+ {"SureCom NE34", 0},
+ {"Winbond W89C940F", 0},
+ {"Holtek HT80232", ONLY_16BIT_IO | HOLTEK_FDX},
+ {"Holtek HT80229", ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60 },
+ {"Winbond W89C940(misprogrammed)", 0},
+ {NULL,}
+};
+
+
+static struct pci_device_id ne2k_pci_tbl[] = {
+ { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
+ { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
+ { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
+ { 0x8e2e, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_KTI_ET32P2 },
+ { 0x4a14, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_NetVin_NV5000SC },
+ { 0x1106, 0x0926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Via_86C926 },
+ { 0x10bd, 0x0e34, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_SureCom_NE34 },
+ { 0x1050, 0x5a5a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_W89C940F },
+ { 0x12c3, 0x0058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80232 },
+ { 0x12c3, 0x5598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80229 },
+ { 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl);
+
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+
+static int ne2k_pci_open(struct net_device *dev);
+static int ne2k_pci_close(struct net_device *dev);
+
+static void ne2k_pci_reset_8390(struct net_device *dev);
+static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne2k_pci_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static struct ethtool_ops ne2k_pci_ethtool_ops;
+
+
+
+/* There is no room in the standard 8390 structure for extra info we need,
+ so we build a meta/outer-wrapper structure.. */
+struct ne2k_pci_card {
+ struct net_device *dev;
+ struct pci_dev *pci_dev;
+};
+
+
+
+/*
+ NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
+ buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
+ 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
+ detected by their SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+*/
+
+
+static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ int i;
+ unsigned char SA_prom[32];
+ int start_page, stop_page;
+ int irq, reg0, chip_idx = ent->driver_data;
+ static unsigned int fnd_cnt;
+ long ioaddr;
+ int flags = pci_clone_list[chip_idx].flags;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ fnd_cnt++;
+
+ i = pci_enable_device (pdev);
+ if (i)
+ return i;
+
+ ioaddr = pci_resource_start (pdev, 0);
+ irq = pdev->irq;
+
+ if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
+ printk (KERN_ERR PFX "no I/O resource at PCI BAR #0\n");
+ return -ENODEV;
+ }
+
+ if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
+ printk (KERN_ERR PFX "I/O resource 0x%x @ 0x%lx busy\n",
+ NE_IO_EXTENT, ioaddr);
+ return -EBUSY;
+ }
+
+ reg0 = inb(ioaddr);
+ if (reg0 == 0xFF)
+ goto err_out_free_res;
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb(ioaddr + 0x0d);
+ outb(0xff, ioaddr + 0x0d);
+ outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb(ioaddr + EN0_COUNTER0) != 0) {
+ outb(reg0, ioaddr);
+ outb(regd, ioaddr + 0x0d); /* Restore the old values. */
+ goto err_out_free_res;
+ }
+ }
+
+ /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */
+ dev = alloc_ei_netdev();
+ if (!dev) {
+ printk (KERN_ERR PFX "cannot allocate ethernet device\n");
+ goto err_out_free_res;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ /* This looks like a horrible timing loop, but it should never take
+ more than a few cycles.
+ */
+ while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ /* Limit wait: '2' avoids jiffy roll-over. */
+ if (jiffies - reset_start_time > 2) {
+ printk(KERN_ERR PFX "Card failure (no reset ack).\n");
+ goto err_out_free_netdev;
+ }
+
+ outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x49, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+
+ /* Note: all PCI cards have at least 16 bit access, so we don't have
+ to check for 8 bit cards. Most cards permit 32 bit access. */
+ if (flags & ONLY_32BIT_IO) {
+ for (i = 0; i < 4 ; i++)
+ ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT));
+ } else
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+
+ /* We always set the 8390 registers for word mode. */
+ outb(0x49, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+
+ stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG;
+
+ /* Set up the rest of the parameters. */
+ dev->irq = irq;
+ dev->base_addr = ioaddr;
+ pci_set_drvdata(pdev, dev);
+
+ ei_status.name = pci_clone_list[chip_idx].name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+ ei_status.ne2k_flags = flags;
+ if (fnd_cnt < MAX_UNITS) {
+ if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX))
+ ei_status.ne2k_flags |= FORCE_FDX;
+ }
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne2k_pci_reset_8390;
+ ei_status.block_input = &ne2k_pci_block_input;
+ ei_status.block_output = &ne2k_pci_block_output;
+ ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr;
+ ei_status.priv = (unsigned long) pdev;
+ dev->open = &ne2k_pci_open;
+ dev->stop = &ne2k_pci_close;
+ dev->ethtool_ops = &ne2k_pci_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_free_netdev;
+
+ printk("%s: %s found at %#lx, IRQ %d, ",
+ dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq);
+ for(i = 0; i < 6; i++) {
+ printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ return 0;
+
+err_out_free_netdev:
+ free_netdev (dev);
+err_out_free_res:
+ release_region (ioaddr, NE_IO_EXTENT);
+ pci_set_drvdata (pdev, NULL);
+ return -ENODEV;
+
+}
+
+/*
+ * Magic incantation sequence for full duplex on the supported cards.
+ */
+static inline int set_realtek_fdx(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */
+ outb(0xC0, ioaddr + 0x01); /* Enable writes to CONFIG3 */
+ outb(0x40, ioaddr + 0x06); /* Enable full duplex */
+ outb(0x00, ioaddr + 0x01); /* Disable writes to CONFIG3 */
+ outb(E8390_PAGE0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 0 */
+ return 0;
+}
+
+static inline int set_holtek_fdx(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
+ return 0;
+}
+
+static int ne2k_pci_set_fdx(struct net_device *dev)
+{
+ if (ei_status.ne2k_flags & REALTEK_FDX)
+ return set_realtek_fdx(dev);
+ else if (ei_status.ne2k_flags & HOLTEK_FDX)
+ return set_holtek_fdx(dev);
+
+ return -EOPNOTSUPP;
+}
+
+static int ne2k_pci_open(struct net_device *dev)
+{
+ int ret = request_irq(dev->irq, ei_interrupt, SA_SHIRQ, dev->name, dev);
+ if (ret)
+ return ret;
+
+ if (ei_status.ne2k_flags & FORCE_FDX)
+ ne2k_pci_set_fdx(dev);
+
+ ei_open(dev);
+ return 0;
+}
+
+static int ne2k_pci_close(struct net_device *dev)
+{
+ ei_close(dev);
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void ne2k_pci_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
+ dev->name, jiffies);
+
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2) {
+ printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ long nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb(0, nic_base + EN0_RCNTHI);
+ outb(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb(ring_page, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ } else {
+ *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
+ le16_to_cpus(&hdr->count);
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ long nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+ }
+ } else {
+ insl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ u16 *b = (u16 *)buf;
+
+ *b++ = le16_to_cpu(inw(NE_BASE + NE_DATAPORT));
+ buf = (char *)b;
+ }
+ if (count & 1)
+ *buf = inb(NE_BASE + NE_DATAPORT);
+ }
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne2k_pci_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ long nic_base = NE_BASE;
+ unsigned long dma_start;
+
+ /* On little-endian it's always safe to round the count up for
+ word writes. */
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ else
+ if (count & 0x01)
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb(0x42, nic_base + EN0_RCNTLO);
+ outb(0x00, nic_base + EN0_RCNTHI);
+ outb(0x42, nic_base + EN0_RSARLO);
+ outb(0x00, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+#endif
+ outb(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(0x00, nic_base + EN0_RSARLO);
+ outb(start_page, nic_base + EN0_RSARHI);
+ outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ u16 *b = (u16 *)buf;
+
+ outw(cpu_to_le16(*b++), NE_BASE + NE_DATAPORT);
+ buf = (char *)b;
+ }
+ }
+ }
+
+ dma_start = jiffies;
+
+ while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
+ printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne2k_pci_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+static void ne2k_pci_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ei_device *ei = dev->priv;
+ struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(pci_dev));
+}
+
+static struct ethtool_ops ne2k_pci_ethtool_ops = {
+ .get_drvinfo = ne2k_pci_get_drvinfo,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+};
+
+static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ BUG();
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int ne2k_pci_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+ NS8390_init(dev, 1);
+ netif_device_attach(dev);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver ne2k_driver = {
+ .name = DRV_NAME,
+ .probe = ne2k_pci_init_one,
+ .remove = __devexit_p(ne2k_pci_remove_one),
+ .id_table = ne2k_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = ne2k_pci_suspend,
+ .resume = ne2k_pci_resume,
+#endif /* CONFIG_PM */
+
+};
+
+
+static int __init ne2k_pci_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init (&ne2k_driver);
+}
+
+
+static void __exit ne2k_pci_cleanup(void)
+{
+ pci_unregister_driver (&ne2k_driver);
+}
+
+module_init(ne2k_pci_init);
+module_exit(ne2k_pci_cleanup);
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
new file mode 100644
index 000000000000..6c92f0969015
--- /dev/null
+++ b/drivers/net/ne3210.c
@@ -0,0 +1,374 @@
+/*
+ ne3210.c
+
+ Linux driver for Novell NE3210 EISA Network Adapter
+
+ Copyright (C) 1998, Paul Gortmaker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Information and Code Sources:
+
+ 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32)
+ 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
+ 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file
+
+ The NE3210 is an EISA shared memory NS8390 implementation. Shared
+ memory address > 1MB should work with this driver.
+
+ Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched
+ around (or perhaps there are some defective/backwards cards ???)
+
+ This driver WILL NOT WORK FOR THE NE3200 - it is completely different
+ and does not use an 8390 at all.
+
+ Updated to EISA probing API 5/2003 by Marc Zyngier.
+*/
+
+static const char *version =
+ "ne3210.c: Driver revision v0.03, 30/09/98\n";
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ne3210"
+
+static int ne3210_open(struct net_device *dev);
+static int ne3210_close(struct net_device *dev);
+
+static void ne3210_reset_8390(struct net_device *dev);
+
+static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
+static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
+static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
+
+#define NE3210_START_PG 0x00 /* First page of TX buffer */
+#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define NE3210_IO_EXTENT 0x20
+#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */
+#define NE3210_RESET_PORT 0xc84
+#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
+
+#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */
+#define NE3210_ADDR1 0x00
+#define NE3210_ADDR2 0x1b
+
+#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
+#define NE3210_CFG2 0xc90
+#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1)
+
+/*
+ * You can OR any of the following bits together and assign it
+ * to NE3210_DEBUG to get verbose driver info during operation.
+ * Currently only the probe one is implemented.
+ */
+
+#define NE3210_D_PROBE 0x01
+#define NE3210_D_RX_PKT 0x02
+#define NE3210_D_TX_PKT 0x04
+#define NE3210_D_IRQ 0x08
+
+#define NE3210_DEBUG 0x0
+
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
+static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
+static int ifmap_val[] __initdata = {
+ IF_PORT_10BASET,
+ IF_PORT_UNKNOWN,
+ IF_PORT_10BASE2,
+ IF_PORT_AUI,
+};
+
+static int __init ne3210_eisa_probe (struct device *device)
+{
+ unsigned long ioaddr, phys_mem;
+ int i, retval, port_index;
+ struct eisa_device *edev = to_eisa_device (device);
+ struct net_device *dev;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (!(dev = alloc_ei_netdev ())) {
+ printk ("ne3210.c: unable to allocate memory for dev!\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, device);
+ device->driver_data = dev;
+ ioaddr = edev->base_addr;
+
+ if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ if (!request_region(ioaddr + NE3210_CFG1,
+ NE3210_CFG_EXTENT, DRV_NAME)) {
+ retval = -EBUSY;
+ goto out1;
+ }
+
+#if NE3210_DEBUG & NE3210_D_PROBE
+ printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig);
+ printk("ne3210-debug: config regs: %#x %#x\n",
+ inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2));
+#endif
+
+
+ port_index = inb(ioaddr + NE3210_CFG2) >> 6;
+ printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr:",
+ edev->slot, ifmap[port_index]);
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i)));
+
+
+ /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
+ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
+ printk(".\nne3210.c: using IRQ %d, ", dev->irq);
+
+ retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out2;
+ }
+
+ phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000;
+
+ /*
+ BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
+ the card mem within the region covered by `normal' RAM !!!
+ */
+ if (phys_mem > 1024*1024) { /* phys addr > 1MB */
+ if (phys_mem < virt_to_phys(high_memory)) {
+ printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
+ printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
+ printk(KERN_CRIT "ne3210.c: or to an address above 0x%lx.\n", virt_to_phys(high_memory));
+ printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
+ retval = -EINVAL;
+ goto out3;
+ }
+ }
+
+ if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) {
+ printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n",
+ phys_mem);
+ goto out3;
+ }
+
+ printk("%dkB memory at physical address %#lx\n",
+ NE3210_STOP_PG/4, phys_mem);
+
+ ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100);
+ if (!ei_status.mem) {
+ printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n");
+ printk(KERN_ERR "ne3210.c: Driver NOT installed.\n");
+ retval = -EAGAIN;
+ goto out4;
+ }
+ printk("ne3210.c: remapped %dkB card memory to virtual address %p\n",
+ NE3210_STOP_PG/4, ei_status.mem);
+ dev->mem_start = (unsigned long)ei_status.mem;
+ dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256;
+
+ /* The 8390 offset is zero for the NE3210 */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "NE3210";
+ ei_status.tx_start_page = NE3210_START_PG;
+ ei_status.rx_start_page = NE3210_START_PG + TX_PAGES;
+ ei_status.stop_page = NE3210_STOP_PG;
+ ei_status.word16 = 1;
+ ei_status.priv = phys_mem;
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &ne3210_reset_8390;
+ ei_status.block_input = &ne3210_block_input;
+ ei_status.block_output = &ne3210_block_output;
+ ei_status.get_8390_hdr = &ne3210_get_8390_hdr;
+
+ dev->open = &ne3210_open;
+ dev->stop = &ne3210_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ dev->if_port = ifmap_val[port_index];
+
+ if ((retval = register_netdev (dev)))
+ goto out5;
+
+ NS8390_init(dev, 0);
+ return 0;
+
+ out5:
+ iounmap(ei_status.mem);
+ out4:
+ release_mem_region (phys_mem, NE3210_STOP_PG*0x100);
+ out3:
+ free_irq (dev->irq, dev);
+ out2:
+ release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
+ out1:
+ release_region (ioaddr, NE3210_IO_EXTENT);
+ out:
+ free_netdev (dev);
+
+ return retval;
+}
+
+static int __devexit ne3210_eisa_remove (struct device *device)
+{
+ struct net_device *dev = device->driver_data;
+ unsigned long ioaddr = to_eisa_device (device)->base_addr;
+
+ unregister_netdev (dev);
+ iounmap(ei_status.mem);
+ release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100);
+ free_irq (dev->irq, dev);
+ release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
+ release_region (ioaddr, NE3210_IO_EXTENT);
+ free_netdev (dev);
+
+ return 0;
+}
+
+/*
+ * Reset by toggling the "Board Enable" bits (bit 2 and 0).
+ */
+
+static void ne3210_reset_8390(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+
+ outb(0x04, ioaddr + NE3210_RESET_PORT);
+ if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name);
+
+ mdelay(2);
+
+ ei_status.txing = 0;
+ outb(0x01, ioaddr + NE3210_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+
+ return;
+}
+
+/*
+ * Note: In the following three functions is the implicit assumption
+ * that the associated memcpy will only use "rep; movsl" as long as
+ * we keep the counts as some multiple of doublewords. This is a
+ * requirement of the hardware, and also prevents us from using
+ * eth_io_copy_and_sum() since we can't guarantee it will limit
+ * itself to doubleword access.
+ */
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly. (A single doubleword.)
+ */
+
+static void
+ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+/*
+ * Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps. The count will already
+ * be rounded up to a doubleword value via ne3210_get_8390_hdr() above.
+ */
+
+static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256;
+
+ if (ring_offset + count > NE3210_STOP_PG*256) {
+ /* Packet wraps over end of ring buffer. */
+ int semi_count = NE3210_STOP_PG*256 - ring_offset;
+ memcpy_fromio(skb->data, start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count,
+ ei_status.mem + TX_PAGES*256, count);
+ } else {
+ /* Packet is in one chunk. */
+ memcpy_fromio(skb->data, start, count);
+ }
+}
+
+static void ne3210_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8);
+
+ count = (count + 3) & ~3; /* Round up to doubleword */
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ne3210_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int ne3210_close(struct net_device *dev)
+{
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ ei_close(dev);
+ return 0;
+}
+
+static struct eisa_device_id ne3210_ids[] = {
+ { "EGL0101" },
+ { "NVL1801" },
+ { "" },
+};
+
+static struct eisa_driver ne3210_eisa_driver = {
+ .id_table = ne3210_ids,
+ .driver = {
+ .name = "ne3210",
+ .probe = ne3210_eisa_probe,
+ .remove = __devexit_p (ne3210_eisa_remove),
+ },
+};
+
+MODULE_DESCRIPTION("NE3210 EISA Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(eisa, ne3210_ids);
+
+int ne3210_init(void)
+{
+ return eisa_driver_register (&ne3210_eisa_driver);
+}
+
+void ne3210_cleanup(void)
+{
+ eisa_driver_unregister (&ne3210_eisa_driver);
+}
+
+module_init (ne3210_init);
+module_exit (ne3210_cleanup);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
new file mode 100644
index 000000000000..edd1b5306b16
--- /dev/null
+++ b/drivers/net/netconsole.c
@@ -0,0 +1,127 @@
+/*
+ * linux/drivers/net/netconsole.c
+ *
+ * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains the implementation of an IRQ-safe, crash-safe
+ * kernel console implementation that outputs kernel messages to the
+ * network.
+ *
+ * Modification history:
+ *
+ * 2001-09-17 started by Ingo Molnar.
+ * 2003-08-11 2.6 port by Matt Mackall
+ * simplified options
+ * generic card hooks
+ * works non-modular
+ * 2003-09-07 rewritten with netpoll api
+ */
+
+/****************************************************************
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ ****************************************************************/
+
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty_driver.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/sysrq.h>
+#include <linux/smp.h>
+#include <linux/netpoll.h>
+
+MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
+MODULE_DESCRIPTION("Console driver for network interfaces");
+MODULE_LICENSE("GPL");
+
+static char config[256];
+module_param_string(netconsole, config, 256, 0);
+MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]\n");
+
+static struct netpoll np = {
+ .name = "netconsole",
+ .dev_name = "eth0",
+ .local_port = 6665,
+ .remote_port = 6666,
+ .remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .drop = netpoll_queue,
+};
+static int configured = 0;
+
+#define MAX_PRINT_CHUNK 1000
+
+static void write_msg(struct console *con, const char *msg, unsigned int len)
+{
+ int frag, left;
+ unsigned long flags;
+
+ if (!np.dev)
+ return;
+
+ local_irq_save(flags);
+
+ for(left = len; left; ) {
+ frag = min(left, MAX_PRINT_CHUNK);
+ netpoll_send_udp(&np, msg, frag);
+ msg += frag;
+ left -= frag;
+ }
+
+ local_irq_restore(flags);
+}
+
+static struct console netconsole = {
+ .flags = CON_ENABLED | CON_PRINTBUFFER,
+ .write = write_msg
+};
+
+static int option_setup(char *opt)
+{
+ configured = !netpoll_parse_options(&np, opt);
+ return 0;
+}
+
+__setup("netconsole=", option_setup);
+
+static int init_netconsole(void)
+{
+ if(strlen(config))
+ option_setup(config);
+
+ if(!configured) {
+ printk("netconsole: not configured, aborting\n");
+ return -EINVAL;
+ }
+
+ if(netpoll_setup(&np))
+ return -EINVAL;
+
+ register_console(&netconsole);
+ printk(KERN_INFO "netconsole: network logging started\n");
+ return 0;
+}
+
+static void cleanup_netconsole(void)
+{
+ unregister_console(&netconsole);
+ netpoll_cleanup(&np);
+}
+
+module_init(init_netconsole);
+module_exit(cleanup_netconsole);
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
new file mode 100644
index 000000000000..2ab01a5d1d22
--- /dev/null
+++ b/drivers/net/ni5010.c
@@ -0,0 +1,812 @@
+/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
+ *
+ * Copyright 1996,1997 Jan-Pascal van Best and Andreas Mohr.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * The authors may be reached as:
+ * jvbest@wi.leidenuniv.nl a.mohr@mailto.de
+ * or by snail mail as
+ * Jan-Pascal van Best Andreas Mohr
+ * Klikspaanweg 58-4 Stauferstr. 6
+ * 2324 LZ Leiden D-71272 Renningen
+ * The Netherlands Germany
+ *
+ * Sources:
+ * Donald Becker's "skeleton.c"
+ * Crynwr ni5010 packet driver
+ *
+ * Changes:
+ * v0.0: First test version
+ * v0.1: First working version
+ * v0.2:
+ * v0.3->v0.90: Now demand setting io and irq when loading as module
+ * 970430 v0.91: modified for Linux 2.1.14
+ * v0.92: Implemented Andreas' (better) NI5010 probe
+ * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
+ * 970623 v1.00: First kernel version (AM)
+ * 970814 v1.01: Added detection of onboard receive buffer size (AM)
+ * Bugs:
+ * - None known...
+ * - Note that you have to patch ifconfig for the new /proc/net/dev
+ * format. It gives incorrect stats otherwise.
+ *
+ * To do:
+ * Fix all bugs :-)
+ * Move some stuff to chipset_init()
+ * Handle xmt errors other than collisions
+ * Complete merge with Andreas' driver
+ * Implement ring buffers (Is this useful? You can't squeeze
+ * too many packet in a 2k buffer!)
+ * Implement DMA (Again, is this useful? Some docs says DMA is
+ * slower than programmed I/O)
+ *
+ * Compile with:
+ * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ \
+ * -DMODULE -c ni5010.c
+ *
+ * Insert with e.g.:
+ * insmod ni5010.o io=0x300 irq=5
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni5010.h"
+
+static const char *boardname = "NI5010";
+static char *version =
+ "ni5010.c: v1.00 06/23/97 Jan-Pascal van Best and Andreas Mohr\n";
+
+/* bufsize_rcv == 0 means autoprobing */
+static unsigned int bufsize_rcv;
+
+#define jumpered_interrupts /* IRQ line jumpered on board */
+#undef jumpered_dma /* No DMA used */
+#undef FULL_IODETECT /* Only detect in portlist */
+
+#ifndef FULL_IODETECT
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int ports[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0 };
+#endif
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef NI5010_DEBUG
+#define NI5010_DEBUG 0
+#endif
+
+/* Information that needs to be kept for each board. */
+struct ni5010_local {
+ struct net_device_stats stats;
+ int o_pkt_size;
+ spinlock_t lock;
+};
+
+/* Index to functions, as function prototypes. */
+
+static int ni5010_probe1(struct net_device *dev, int ioaddr);
+static int ni5010_open(struct net_device *dev);
+static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ni5010_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void ni5010_rx(struct net_device *dev);
+static void ni5010_timeout(struct net_device *dev);
+static int ni5010_close(struct net_device *dev);
+static struct net_device_stats *ni5010_get_stats(struct net_device *dev);
+static void ni5010_set_multicast_list(struct net_device *dev);
+static void reset_receiver(struct net_device *dev);
+
+static int process_xmt_interrupt(struct net_device *dev);
+#define tx_done(dev) 1
+static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad);
+static void chipset_init(struct net_device *dev, int startp);
+static void dump_packet(void *buf, int len);
+static void ni5010_show_registers(struct net_device *dev);
+
+static int io;
+static int irq;
+
+struct net_device * __init ni5010_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct ni5010_local));
+ int *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ }
+
+ PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name));
+
+ SET_MODULE_OWNER(dev);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = ni5010_probe1(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+#ifdef FULL_IODETECT
+ for (io=0x200; io<0x400 && ni5010_probe1(dev, io) ; io+=0x20)
+ ;
+ if (io == 0x400)
+ err = -ENODEV;
+
+#else
+ for (port = ports; *port && ni5010_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+#endif /* FULL_IODETECT */
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, NI5010_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static inline int rd_port(int ioaddr)
+{
+ inb(IE_RBUF);
+ return inb(IE_SAPROM);
+}
+
+static void __init trigger_irq(int ioaddr)
+{
+ outb(0x00, EDLC_RESET); /* Clear EDLC hold RESET state */
+ outb(0x00, IE_RESET); /* Board reset */
+ outb(0x00, EDLC_XMASK); /* Disable all Xmt interrupts */
+ outb(0x00, EDLC_RMASK); /* Disable all Rcv interrupt */
+ outb(0xff, EDLC_XCLR); /* Clear all pending Xmt interrupts */
+ outb(0xff, EDLC_RCLR); /* Clear all pending Rcv interrupts */
+ /*
+ * Transmit packet mode: Ignore parity, Power xcvr,
+ * Enable loopback
+ */
+ outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
+ outb(RMD_BROADCAST, EDLC_RMODE); /* Receive normal&broadcast */
+ outb(XM_ALL, EDLC_XMASK); /* Enable all Xmt interrupts */
+ udelay(50); /* FIXME: Necessary? */
+ outb(MM_EN_XMT|MM_MUX, IE_MMODE); /* Start transmission */
+}
+
+/*
+ * This is the real probe routine. Linux has a history of friendly device
+ * probes on the ISA bus. A good device probes avoids doing writes, and
+ * verifies that the correct device exists and functions.
+ */
+
+static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
+{
+ static unsigned version_printed;
+ struct ni5010_local *lp;
+ int i;
+ unsigned int data = 0;
+ int boguscount = 40;
+ int err = -ENODEV;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ if (!request_region(ioaddr, NI5010_IO_EXTENT, boardname))
+ return -EBUSY;
+
+ /*
+ * This is no "official" probe method, I've rather tested which
+ * probe works best with my seven NI5010 cards
+ * (they have very different serial numbers)
+ * Suggestions or failure reports are very, very welcome !
+ * But I think it is a relatively good probe method
+ * since it doesn't use any "outb"
+ * It should be nearly 100% reliable !
+ * well-known WARNING: this probe method (like many others)
+ * will hang the system if a NE2000 card region is probed !
+ *
+ * - Andreas
+ */
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_probe1(%#3x)\n",
+ dev->name, ioaddr));
+
+ if (inb(ioaddr+0) == 0xff)
+ goto out;
+
+ while ( (rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr) &
+ rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr)) != 0xff)
+ {
+ if (boguscount-- == 0)
+ goto out;
+ }
+
+ PRINTK2((KERN_DEBUG "%s: I/O #1 passed!\n", dev->name));
+
+ for (i=0; i<32; i++)
+ if ( (data = rd_port(ioaddr)) != 0xff) break;
+ if (data==0xff)
+ goto out;
+
+ PRINTK2((KERN_DEBUG "%s: I/O #2 passed!\n", dev->name));
+
+ if ((data != SA_ADDR0) || (rd_port(ioaddr) != SA_ADDR1) ||
+ (rd_port(ioaddr) != SA_ADDR2))
+ goto out;
+
+ for (i=0; i<4; i++)
+ rd_port(ioaddr);
+
+ if ( (rd_port(ioaddr) != NI5010_MAGICVAL1) ||
+ (rd_port(ioaddr) != NI5010_MAGICVAL2) )
+ goto out;
+
+ PRINTK2((KERN_DEBUG "%s: I/O #3 passed!\n", dev->name));
+
+ if (NI5010_DEBUG && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ printk("NI5010 ethercard probe at 0x%x: ", ioaddr);
+
+ dev->base_addr = ioaddr;
+
+ for (i=0; i<6; i++) {
+ outw(i, IE_GP);
+ printk("%2.2x ", dev->dev_addr[i] = inb(IE_SAPROM));
+ }
+
+ PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
+
+#ifdef jumpered_interrupts
+ if (dev->irq == 0xff)
+ ;
+ else if (dev->irq < 2) {
+ unsigned long irq_mask;
+
+ PRINTK2((KERN_DEBUG "%s: I/O #5 passed!\n", dev->name));
+
+ irq_mask = probe_irq_on();
+ trigger_irq(ioaddr);
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+
+ PRINTK2((KERN_DEBUG "%s: I/O #6 passed!\n", dev->name));
+
+ if (dev->irq == 0) {
+ err = -EAGAIN;
+ printk(KERN_WARNING "%s: no IRQ found!\n", dev->name);
+ goto out;
+ }
+ PRINTK2((KERN_DEBUG "%s: I/O #7 passed!\n", dev->name));
+ } else if (dev->irq == 2) {
+ dev->irq = 9;
+ }
+#endif /* jumpered_irq */
+ PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
+
+ /* DMA is not supported (yet?), so no use detecting it */
+ lp = netdev_priv(dev);
+
+ spin_lock_init(&lp->lock);
+
+ PRINTK2((KERN_DEBUG "%s: I/O #10 passed!\n", dev->name));
+
+/* get the size of the onboard receive buffer
+ * higher addresses than bufsize are wrapped into real buffer
+ * i.e. data for offs. 0x801 is written to 0x1 with a 2K onboard buffer
+ */
+ if (!bufsize_rcv) {
+ outb(1, IE_MMODE); /* Put Rcv buffer on system bus */
+ outw(0, IE_GP); /* Point GP at start of packet */
+ outb(0, IE_RBUF); /* set buffer byte 0 to 0 */
+ for (i = 1; i < 0xff; i++) {
+ outw(i << 8, IE_GP); /* Point GP at packet size to be tested */
+ outb(i, IE_RBUF);
+ outw(0x0, IE_GP); /* Point GP at start of packet */
+ data = inb(IE_RBUF);
+ if (data == i) break;
+ }
+ bufsize_rcv = i << 8;
+ outw(0, IE_GP); /* Point GP at start of packet */
+ outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
+ }
+ printk("// bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
+ memset(dev->priv, 0, sizeof(struct ni5010_local));
+
+ dev->open = ni5010_open;
+ dev->stop = ni5010_close;
+ dev->hard_start_xmit = ni5010_send_packet;
+ dev->get_stats = ni5010_get_stats;
+ dev->set_multicast_list = ni5010_set_multicast_list;
+ dev->tx_timeout = ni5010_timeout;
+ dev->watchdog_timeo = HZ/20;
+
+ dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
+
+ /* Shut up the ni5010 */
+ outb(0, EDLC_RMASK); /* Mask all receive interrupts */
+ outb(0, EDLC_XMASK); /* Mask all xmit interrupts */
+ outb(0xff, EDLC_RCLR); /* Kill all pending rcv interrupts */
+ outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
+
+ printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
+ if (dev->dma) printk(" & DMA %d", dev->dma);
+ printk(".\n");
+
+ printk(KERN_INFO "Join the NI5010 driver development team!\n");
+ printk(KERN_INFO "Mail to a.mohr@mailto.de or jvbest@wi.leidenuniv.nl\n");
+ return 0;
+out:
+ release_region(dev->base_addr, NI5010_IO_EXTENT);
+ return err;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+
+static int ni5010_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
+
+ if (request_irq(dev->irq, &ni5010_interrupt, 0, boardname, dev)) {
+ printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ PRINTK3((KERN_DEBUG "%s: passed open() #1\n", dev->name));
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+#ifdef jumpered_dma
+ if (request_dma(dev->dma, cardname)) {
+ printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
+ free_irq(dev->irq, NULL);
+ return -EAGAIN;
+ }
+#endif /* jumpered_dma */
+
+ PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
+ /* Reset the hardware here. Don't forget to set the station address. */
+
+ outb(RS_RESET, EDLC_RESET); /* Hold up EDLC_RESET while configing board */
+ outb(0, IE_RESET); /* Hardware reset of ni5010 board */
+ outb(XMD_LBC, EDLC_XMODE); /* Only loopback xmits */
+
+ PRINTK3((KERN_DEBUG "%s: passed open() #3\n", dev->name));
+ /* Set the station address */
+ for(i = 0;i < 6; i++) {
+ outb(dev->dev_addr[i], EDLC_ADDR + i);
+ }
+
+ PRINTK3((KERN_DEBUG "%s: Initialising ni5010\n", dev->name));
+ outb(0, EDLC_XMASK); /* No xmit interrupts for now */
+ outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
+ /* Normal packet xmit mode */
+ outb(0xff, EDLC_XCLR); /* Clear all pending xmit interrupts */
+ outb(RMD_BROADCAST, EDLC_RMODE);
+ /* Receive broadcast and normal packets */
+ reset_receiver(dev); /* Ready ni5010 for receiving packets */
+
+ outb(0, EDLC_RESET); /* Un-reset the ni5010 */
+
+ netif_start_queue(dev);
+
+ if (NI5010_DEBUG) ni5010_show_registers(dev);
+
+ PRINTK((KERN_DEBUG "%s: open successful\n", dev->name));
+ return 0;
+}
+
+static void reset_receiver(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ PRINTK3((KERN_DEBUG "%s: resetting receiver\n", dev->name));
+ outw(0, IE_GP); /* Receive packet at start of buffer */
+ outb(0xff, EDLC_RCLR); /* Clear all pending rcv interrupts */
+ outb(0, IE_MMODE); /* Put EDLC to rcv buffer */
+ outb(MM_EN_RCV, IE_MMODE); /* Enable rcv */
+ outb(0xff, EDLC_RMASK); /* Enable all rcv interrupts */
+}
+
+static void ni5010_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ /* FIXME: Give it a real kick here */
+ chipset_init(dev, 1);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_send_packet\n", dev->name));
+
+ /*
+ * Block sending
+ */
+
+ netif_stop_queue(dev);
+ hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb);
+ return 0;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t ni5010_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct ni5010_local *lp;
+ int ioaddr, status;
+ int xmit_was_error = 0;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_interrupt\n", dev->name));
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+ status = inb(IE_ISTAT);
+ PRINTK3((KERN_DEBUG "%s: IE_ISTAT = %#02x\n", dev->name, status));
+
+ if ((status & IS_R_INT) == 0) ni5010_rx(dev);
+
+ if ((status & IS_X_INT) == 0) {
+ xmit_was_error = process_xmt_interrupt(dev);
+ }
+
+ if ((status & IS_DMA_INT) == 0) {
+ PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name));
+ outb(0, IE_DMA_RST); /* Reset DMA int */
+ }
+
+ if (!xmit_was_error)
+ reset_receiver(dev);
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+
+static void dump_packet(void *buf, int len)
+{
+ int i;
+
+ printk(KERN_DEBUG "Packet length = %#4x\n", len);
+ for (i = 0; i < len; i++){
+ if (i % 16 == 0) printk(KERN_DEBUG "%#4.4x", i);
+ if (i % 2 == 0) printk(" ");
+ printk("%2.2x", ((unsigned char *)buf)[i]);
+ if (i % 16 == 15) printk("\n");
+ }
+ printk("\n");
+
+ return;
+}
+
+/* We have a good packet, get it out of the buffer. */
+static void ni5010_rx(struct net_device *dev)
+{
+ struct ni5010_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned char rcv_stat;
+ struct sk_buff *skb;
+ int i_pkt_size;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_rx()\n", dev->name));
+
+ rcv_stat = inb(EDLC_RSTAT);
+ PRINTK3((KERN_DEBUG "%s: EDLC_RSTAT = %#2x\n", dev->name, rcv_stat));
+
+ if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) {
+ PRINTK((KERN_INFO "%s: receive error.\n", dev->name));
+ lp->stats.rx_errors++;
+ if (rcv_stat & RS_RUNT) lp->stats.rx_length_errors++;
+ if (rcv_stat & RS_ALIGN) lp->stats.rx_frame_errors++;
+ if (rcv_stat & RS_CRC_ERR) lp->stats.rx_crc_errors++;
+ if (rcv_stat & RS_OFLW) lp->stats.rx_fifo_errors++;
+ outb(0xff, EDLC_RCLR); /* Clear the interrupt */
+ return;
+ }
+
+ outb(0xff, EDLC_RCLR); /* Clear the interrupt */
+
+ i_pkt_size = inw(IE_RCNT);
+ if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) {
+ PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n",
+ dev->name, i_pkt_size));
+ lp->stats.rx_errors++;
+ lp->stats.rx_length_errors++;
+ return;
+ }
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(i_pkt_size + 3);
+ if (skb == NULL) {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+
+ /* Read packet into buffer */
+ outb(MM_MUX, IE_MMODE); /* Rcv buffer to system bus */
+ outw(0, IE_GP); /* Seek to beginning of packet */
+ insb(IE_RBUF, skb_put(skb, i_pkt_size), i_pkt_size);
+
+ if (NI5010_DEBUG >= 4)
+ dump_packet(skb->data, skb->len);
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += i_pkt_size;
+
+ PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n",
+ dev->name, i_pkt_size));
+
+}
+
+static int process_xmt_interrupt(struct net_device *dev)
+{
+ struct ni5010_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int xmit_stat;
+
+ PRINTK2((KERN_DEBUG "%s: entering process_xmt_interrupt\n", dev->name));
+
+ xmit_stat = inb(EDLC_XSTAT);
+ PRINTK3((KERN_DEBUG "%s: EDLC_XSTAT = %2.2x\n", dev->name, xmit_stat));
+
+ outb(0, EDLC_XMASK); /* Disable xmit IRQ's */
+ outb(0xff, EDLC_XCLR); /* Clear all pending xmit IRQ's */
+
+ if (xmit_stat & XS_COLL){
+ PRINTK((KERN_DEBUG "%s: collision detected, retransmitting\n",
+ dev->name));
+ outw(NI5010_BUFSIZE - lp->o_pkt_size, IE_GP);
+ /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */
+ outb(MM_EN_XMT | MM_MUX, IE_MMODE);
+ outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */
+ lp->stats.collisions++;
+ return 1;
+ }
+
+ /* FIXME: handle other xmt error conditions */
+
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += lp->o_pkt_size;
+ netif_wake_queue(dev);
+
+ PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n",
+ dev->name, lp->o_pkt_size));
+
+ return 0;
+}
+
+/* The inverse routine to ni5010_open(). */
+static int ni5010_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
+#ifdef jumpered_interrupts
+ free_irq(dev->irq, NULL);
+#endif
+ /* Put card in held-RESET state */
+ outb(0, IE_MMODE);
+ outb(RS_RESET, EDLC_RESET);
+
+ netif_stop_queue(dev);
+
+ PRINTK((KERN_DEBUG "%s: %s closed down\n", dev->name, boardname));
+ return 0;
+
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *ni5010_get_stats(struct net_device *dev)
+{
+ struct ni5010_local *lp = netdev_priv(dev);
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_get_stats\n", dev->name));
+
+ if (NI5010_DEBUG) ni5010_show_registers(dev);
+
+ /* cli(); */
+ /* Update the statistics from the device registers. */
+ /* We do this in the interrupt handler */
+ /* sti(); */
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+*/
+static void ni5010_set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
+
+ if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI) {
+ dev->flags |= IFF_PROMISC;
+ outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
+ PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
+ } else if (dev->mc_list) {
+ /* Sorry, multicast not supported */
+ PRINTK((KERN_DEBUG "%s: No multicast, entering broadcast mode\n", dev->name));
+ outb(RMD_BROADCAST, EDLC_RMODE);
+ } else {
+ PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
+ outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
+ }
+}
+
+static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad)
+{
+ struct ni5010_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ unsigned int buf_offs;
+
+ PRINTK2((KERN_DEBUG "%s: entering hardware_send_packet\n", dev->name));
+
+ if (length > ETH_FRAME_LEN) {
+ PRINTK((KERN_WARNING "%s: packet too large, not possible\n",
+ dev->name));
+ return;
+ }
+
+ if (NI5010_DEBUG) ni5010_show_registers(dev);
+
+ if (inb(IE_ISTAT) & IS_EN_XMT) {
+ PRINTK((KERN_WARNING "%s: sending packet while already transmitting, not possible\n",
+ dev->name));
+ return;
+ }
+
+ if (NI5010_DEBUG > 3) dump_packet(buf, length);
+
+ buf_offs = NI5010_BUFSIZE - length - pad;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->o_pkt_size = length + pad;
+
+ outb(0, EDLC_RMASK); /* Mask all receive interrupts */
+ outb(0, IE_MMODE); /* Put Xmit buffer on system bus */
+ outb(0xff, EDLC_RCLR); /* Clear out pending rcv interrupts */
+
+ outw(buf_offs, IE_GP); /* Point GP at start of packet */
+ outsb(IE_XBUF, buf, length); /* Put data in buffer */
+ while(pad--)
+ outb(0, IE_XBUF);
+
+ outw(buf_offs, IE_GP); /* Rewrite where packet starts */
+
+ /* should work without that outb() (Crynwr used it) */
+ /*outb(MM_MUX, IE_MMODE);*/ /* Xmt buffer to EDLC bus */
+ outb(MM_EN_XMT | MM_MUX, IE_MMODE); /* Begin transmission */
+ outb(XM_ALL, EDLC_XMASK); /* Cause interrupt after completion or fail */
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ netif_wake_queue(dev);
+
+ if (NI5010_DEBUG) ni5010_show_registers(dev);
+}
+
+static void chipset_init(struct net_device *dev, int startp)
+{
+ /* FIXME: Move some stuff here */
+ PRINTK3((KERN_DEBUG "%s: doing NOTHING in chipset_init\n", dev->name));
+}
+
+static void ni5010_show_registers(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ PRINTK3((KERN_DEBUG "%s: XSTAT %#2.2x\n", dev->name, inb(EDLC_XSTAT)));
+ PRINTK3((KERN_DEBUG "%s: XMASK %#2.2x\n", dev->name, inb(EDLC_XMASK)));
+ PRINTK3((KERN_DEBUG "%s: RSTAT %#2.2x\n", dev->name, inb(EDLC_RSTAT)));
+ PRINTK3((KERN_DEBUG "%s: RMASK %#2.2x\n", dev->name, inb(EDLC_RMASK)));
+ PRINTK3((KERN_DEBUG "%s: RMODE %#2.2x\n", dev->name, inb(EDLC_RMODE)));
+ PRINTK3((KERN_DEBUG "%s: XMODE %#2.2x\n", dev->name, inb(EDLC_XMODE)));
+ PRINTK3((KERN_DEBUG "%s: ISTAT %#2.2x\n", dev->name, inb(IE_ISTAT)));
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni5010;
+
+MODULE_PARM(io, "i");
+MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(io, "ni5010 I/O base address");
+MODULE_PARM_DESC(irq, "ni5010 IRQ number");
+
+int init_module(void)
+{
+ PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
+ /*
+ if(io <= 0 || irq == 0){
+ printk(KERN_WARNING "%s: Autoprobing not allowed for modules.\n", boardname);
+ printk(KERN_WARNING "%s: Set symbols 'io' and 'irq'\n", boardname);
+ return -EINVAL;
+ }
+ */
+ if (io <= 0){
+ printk(KERN_WARNING "%s: Autoprobing for modules is hazardous, trying anyway..\n", boardname);
+ }
+
+ PRINTK2((KERN_DEBUG "%s: init_module irq=%#2x, io=%#3x\n", boardname, irq, io));
+ dev_ni5010 = ni5010_probe(-1);
+ if (IS_ERR(dev_ni5010))
+ return PTR_ERR(dev_ni5010);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
+ unregister_netdev(dev_ni5010);
+ release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
+ free_netdev(dev_ni5010);
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c ni5010.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/ni5010.h b/drivers/net/ni5010.h
new file mode 100644
index 000000000000..e10e717fcd76
--- /dev/null
+++ b/drivers/net/ni5010.h
@@ -0,0 +1,144 @@
+/*
+ * Racal-Interlan ni5010 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * copyrights (c) 1996 by Jan-Pascal van Best (jvbest@wi.leidenuniv.nl)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ */
+
+#define NI5010_BUFSIZE 2048 /* number of bytes in a buffer */
+
+#define NI5010_MAGICVAL0 0x00 /* magic-values for ni5010 card */
+#define NI5010_MAGICVAL1 0x55
+#define NI5010_MAGICVAL2 0xAA
+
+#define SA_ADDR0 0x02
+#define SA_ADDR1 0x07
+#define SA_ADDR2 0x01
+
+/* The number of low I/O ports used by the ni5010 ethercard. */
+#define NI5010_IO_EXTENT 32
+
+#define PRINTK(x) if (NI5010_DEBUG) printk x
+#define PRINTK2(x) if (NI5010_DEBUG>=2) printk x
+#define PRINTK3(x) if (NI5010_DEBUG>=3) printk x
+
+/* The various IE command registers */
+#define EDLC_XSTAT (ioaddr + 0x00) /* EDLC transmit csr */
+#define EDLC_XCLR (ioaddr + 0x00) /* EDLC transmit "Clear IRQ" */
+#define EDLC_XMASK (ioaddr + 0x01) /* EDLC transmit "IRQ Masks" */
+#define EDLC_RSTAT (ioaddr + 0x02) /* EDLC receive csr */
+#define EDLC_RCLR (ioaddr + 0x02) /* EDLC receive "Clear IRQ" */
+#define EDLC_RMASK (ioaddr + 0x03) /* EDLC receive "IRQ Masks" */
+#define EDLC_XMODE (ioaddr + 0x04) /* EDLC transmit Mode */
+#define EDLC_RMODE (ioaddr + 0x05) /* EDLC receive Mode */
+#define EDLC_RESET (ioaddr + 0x06) /* EDLC RESET register */
+#define EDLC_TDR1 (ioaddr + 0x07) /* "Time Domain Reflectometry" reg1 */
+#define EDLC_ADDR (ioaddr + 0x08) /* EDLC station address, 6 bytes */
+ /* 0x0E doesn't exist for r/w */
+#define EDLC_TDR2 (ioaddr + 0x0f) /* "Time Domain Reflectometry" reg2 */
+#define IE_GP (ioaddr + 0x10) /* GP pointer (word register) */
+ /* 0x11 is 2nd byte of GP Pointer */
+#define IE_RCNT (ioaddr + 0x10) /* Count of bytes in rcv'd packet */
+ /* 0x11 is 2nd byte of "Byte Count" */
+#define IE_MMODE (ioaddr + 0x12) /* Memory Mode register */
+#define IE_DMA_RST (ioaddr + 0x13) /* IE DMA Reset. write only */
+#define IE_ISTAT (ioaddr + 0x13) /* IE Interrupt Status. read only */
+#define IE_RBUF (ioaddr + 0x14) /* IE Receive Buffer port */
+#define IE_XBUF (ioaddr + 0x15) /* IE Transmit Buffer port */
+#define IE_SAPROM (ioaddr + 0x16) /* window on station addr prom */
+#define IE_RESET (ioaddr + 0x17) /* any write causes Board Reset */
+
+/* bits in EDLC_XSTAT, interrupt clear on write, status when read */
+#define XS_TPOK 0x80 /* transmit packet successful */
+#define XS_CS 0x40 /* carrier sense */
+#define XS_RCVD 0x20 /* transmitted packet received */
+#define XS_SHORT 0x10 /* transmission media is shorted */
+#define XS_UFLW 0x08 /* underflow. iff failed board */
+#define XS_COLL 0x04 /* collision occurred */
+#define XS_16COLL 0x02 /* 16th collision occurred */
+#define XS_PERR 0x01 /* parity error */
+
+#define XS_CLR_UFLW 0x08 /* clear underflow */
+#define XS_CLR_COLL 0x04 /* clear collision */
+#define XS_CLR_16COLL 0x02 /* clear 16th collision */
+#define XS_CLR_PERR 0x01 /* clear parity error */
+
+/* bits in EDLC_XMASK, mask/enable transmit interrupts. register is r/w */
+#define XM_TPOK 0x80 /* =1 to enable Xmt Pkt OK interrupts */
+#define XM_RCVD 0x20 /* =1 to enable Xmt Pkt Rcvd ints */
+#define XM_UFLW 0x08 /* =1 to enable Xmt Underflow ints */
+#define XM_COLL 0x04 /* =1 to enable Xmt Collision ints */
+#define XM_COLL16 0x02 /* =1 to enable Xmt 16th Coll ints */
+#define XM_PERR 0x01 /* =1 to enable Xmt Parity Error ints */
+ /* note: always clear this bit */
+#define XM_ALL (XM_TPOK | XM_RCVD | XM_UFLW | XM_COLL | XM_COLL16)
+
+/* bits in EDLC_RSTAT, interrupt clear on write, status when read */
+#define RS_PKT_OK 0x80 /* received good packet */
+#define RS_RST_PKT 0x10 /* RESET packet received */
+#define RS_RUNT 0x08 /* Runt Pkt rcvd. Len < 64 Bytes */
+#define RS_ALIGN 0x04 /* Alignment error. not 8 bit aligned */
+#define RS_CRC_ERR 0x02 /* Bad CRC on rcvd pkt */
+#define RS_OFLW 0x01 /* overflow for rcv FIFO */
+#define RS_VALID_BITS ( RS_PKT_OK | RS_RST_PKT | RS_RUNT | RS_ALIGN | RS_CRC_ERR | RS_OFLW )
+ /* all valid RSTAT bits */
+
+#define RS_CLR_PKT_OK 0x80 /* clear rcvd packet interrupt */
+#define RS_CLR_RST_PKT 0x10 /* clear RESET packet received */
+#define RS_CLR_RUNT 0x08 /* clear Runt Pckt received */
+#define RS_CLR_ALIGN 0x04 /* clear Alignment error */
+#define RS_CLR_CRC_ERR 0x02 /* clear CRC error */
+#define RS_CLR_OFLW 0x01 /* clear rcv FIFO Overflow */
+
+/* bits in EDLC_RMASK, mask/enable receive interrupts. register is r/w */
+#define RM_PKT_OK 0x80 /* =1 to enable rcvd good packet ints */
+#define RM_RST_PKT 0x10 /* =1 to enable RESET packet ints */
+#define RM_RUNT 0x08 /* =1 to enable Runt Pkt rcvd ints */
+#define RM_ALIGN 0x04 /* =1 to enable Alignment error ints */
+#define RM_CRC_ERR 0x02 /* =1 to enable Bad CRC error ints */
+#define RM_OFLW 0x01 /* =1 to enable overflow error ints */
+
+/* bits in EDLC_RMODE, set Receive Packet mode. register is r/w */
+#define RMD_TEST 0x80 /* =1 for Chip testing. normally 0 */
+#define RMD_ADD_SIZ 0x10 /* =1 5-byte addr match. normally 0 */
+#define RMD_EN_RUNT 0x08 /* =1 enable runt rcv. normally 0 */
+#define RMD_EN_RST 0x04 /* =1 to rcv RESET pkt. normally 0 */
+
+#define RMD_PROMISC 0x03 /* receive *all* packets. unusual */
+#define RMD_MULTICAST 0x02 /* receive multicasts too. unusual */
+#define RMD_BROADCAST 0x01 /* receive broadcasts & normal. usual */
+#define RMD_NO_PACKETS 0x00 /* don't receive any packets. unusual */
+
+/* bits in EDLC_XMODE, set Transmit Packet mode. register is r/w */
+#define XMD_COLL_CNT 0xf0 /* coll's since success. read-only */
+#define XMD_IG_PAR 0x08 /* =1 to ignore parity. ALWAYS set */
+#define XMD_T_MODE 0x04 /* =1 to power xcvr. ALWAYS set this */
+#define XMD_LBC 0x02 /* =1 for loopbakc. normally set */
+#define XMD_DIS_C 0x01 /* =1 disables contention. normally 0 */
+
+/* bits in EDLC_RESET, write only */
+#define RS_RESET 0x80 /* =1 to hold EDLC in reset state */
+
+/* bits in IE_MMODE, write only */
+#define MM_EN_DMA 0x80 /* =1 begin DMA xfer, Cplt clrs it */
+#define MM_EN_RCV 0x40 /* =1 allows Pkt rcv. clr'd by rcv */
+#define MM_EN_XMT 0x20 /* =1 begin Xmt pkt. Cplt clrs it */
+#define MM_BUS_PAGE 0x18 /* =00 ALWAYS. Used when MUX=1 */
+#define MM_NET_PAGE 0x06 /* =00 ALWAYS. Used when MUX=0 */
+#define MM_MUX 0x01 /* =1 means Rcv Buff on system bus */
+ /* =0 means Xmt Buff on system bus */
+
+/* bits in IE_ISTAT, read only */
+#define IS_TDIAG 0x80 /* =1 if Diagnostic problem */
+#define IS_EN_RCV 0x20 /* =1 until frame is rcv'd cplt */
+#define IS_EN_XMT 0x10 /* =1 until frame is xmt'd cplt */
+#define IS_EN_DMA 0x08 /* =1 until DMA is cplt or aborted */
+#define IS_DMA_INT 0x04 /* =0 iff DMA done interrupt. */
+#define IS_R_INT 0x02 /* =0 iff unmasked Rcv interrupt */
+#define IS_X_INT 0x01 /* =0 iff unmasked Xmt interrupt */
+
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
new file mode 100644
index 000000000000..fa854c8fde75
--- /dev/null
+++ b/drivers/net/ni52.c
@@ -0,0 +1,1386 @@
+/*
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
+ * [feel free to mail ....]
+ *
+ * when using as module: (no autoprobing!)
+ * compile with:
+ * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni52.c
+ * run with e.g:
+ * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
+ *
+ * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
+ *
+ * If you find a bug, please report me:
+ * The kernel panic output and any kmsg from the ni52 driver
+ * the ni5210-driver-version and the linux-kernel version
+ * how many shared memory (memsize) on the netcard,
+ * bootprom: yes/no, base_addr, mem_start
+ * maybe the ni5210-card revision and the i82586 version
+ *
+ * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
+ * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,
+ * 0xd8000,0xcc000,0xce000,0xda000,0xdc000
+ *
+ * sources:
+ * skeleton.c from Donald Becker
+ *
+ * I have also done a look in the following sources: (mail me if you need them)
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's (fourth) i82586-driver for BSD
+ * (before getting an i82596 (yes 596 not 586) manual, the existing drivers helped
+ * me a lot to understand this tricky chip.)
+ *
+ * Known Problems:
+ * The internal sysbus seems to be slow. So we often lose packets because of
+ * overruns while receiving from a fast remote host.
+ * This can slow down TCP connections. Maybe the newer ni5210 cards are better.
+ * my experience is, that if a machine sends with more than about 500-600K/s
+ * the fifo/sysbus overflows.
+ *
+ * IMPORTANT NOTE:
+ * On fast networks, it's a (very) good idea to have 16K shared memory. With
+ * 8K, we can store only 4 receive frames, so it can (easily) happen that a remote
+ * machine 'overruns' our system.
+ *
+ * Known i82586/card problems (I'm sure, there are many more!):
+ * Running the NOP-mode, the i82586 sometimes seems to forget to report
+ * every xmit-interrupt until we restart the CU.
+ * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
+ * in the RBD-Struct which indicates an end of the RBD queue.
+ * Instead, the RU fetches another (randomly selected and
+ * usually used) RBD and begins to fill it. (Maybe, this happens only if
+ * the last buffer from the previous RFD fits exact into the queue and
+ * the next RFD can't fetch an initial RBD. Anyone knows more? )
+ *
+ * results from ftp performance tests with Linux 1.2.5
+ * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
+ * sending in NOP-mode: peak performance up to 530K/s (but better don't run this mode)
+ */
+
+/*
+ * 29.Sept.96: virt_to_bus changes for new memory scheme
+ * 19.Feb.96: more Mcast changes, module support (MH)
+ *
+ * 18.Nov.95: Mcast changes (AC).
+ *
+ * 23.April.95: fixed(?) receiving problems by configuring a RFD more
+ * than the number of RBD's. Can maybe cause other problems.
+ * 18.April.95: Added MODULE support (MH)
+ * 17.April.95: MC related changes in init586() and set_multicast_list().
+ * removed use of 'jiffies' in init586() (MH)
+ *
+ * 19.Sep.94: Added Multicast support (not tested yet) (MH)
+ *
+ * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
+ * Now, every RFD has exact one RBD. (MH)
+ *
+ * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
+ *
+ * 19.Aug.94: changed request_irq() parameter (MH)
+ *
+ * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
+ *
+ * 19.July.94: lotsa cleanups .. (MH)
+ *
+ * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
+ *
+ * 4.July.94: patches for Linux 1.1.24 (MH)
+ *
+ * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
+ *
+ * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, too (MH)
+ *
+ * < 30.Sep.93: first versions
+ */
+
+static int debuglevel; /* debug-printk 0: off 1: a few 2: more */
+static int automatic_resume; /* experimental .. better should be zero */
+static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
+static int fifo=0x8; /* don't change */
+
+/* #define REALLY_SLOW_IO */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni52.h"
+
+#define DRV_NAME "ni52"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 1 /* 8 Bit */
+
+#define ni_attn586() {outb(0,dev->base_addr+NI52_ATTENTION);}
+#define ni_reset586() {outb(0,dev->base_addr+NI52_RESET);}
+#define ni_disint() {outb(0,dev->base_addr+NI52_INTDIS);}
+#define ni_enaint() {outb(0,dev->base_addr+NI52_INTENA);}
+
+#define make32(ptr16) (p->memtop + (short) (ptr16) )
+#define make24(ptr32) ( ((char *) (ptr32)) - p->base)
+#define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop ))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change these values: */
+
+#define RECV_BUFF_SIZE 1524 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+/* different DELAYs */
+#define DELAY(x) mdelay(32 * x);
+#define DELAY_16(); { udelay(16); }
+#define DELAY_18(); { udelay(4); }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() \
+{ int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_cuc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
+ if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
+
+#define WAIT_4_SCB_CMD_RUC() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_ruc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
+ if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
+
+#define WAIT_4_STAT_COMPL(addr) { int i; \
+ for(i=0;i<32767;i++) { \
+ if((addr)->cmd_status & STAT_COMPL) break; \
+ DELAY_16(); DELAY_16(); } }
+
+#define NI52_TOTAL_SIZE 16
+#define NI52_ADDR0 0x02
+#define NI52_ADDR1 0x07
+#define NI52_ADDR2 0x01
+
+static int ni52_probe1(struct net_device *dev,int ioaddr);
+static irqreturn_t ni52_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr);
+static int ni52_open(struct net_device *dev);
+static int ni52_close(struct net_device *dev);
+static int ni52_send_packet(struct sk_buff *,struct net_device *);
+static struct net_device_stats *ni52_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void ni52_timeout(struct net_device *dev);
+#if 0
+static void ni52_dump(struct net_device *,void *);
+#endif
+
+/* helper-functions */
+static int init586(struct net_device *dev);
+static int check586(struct net_device *dev,char *where,unsigned size);
+static void alloc586(struct net_device *dev);
+static void startrecv586(struct net_device *dev);
+static void *alloc_rfa(struct net_device *dev,void *ptr);
+static void ni52_rcv_int(struct net_device *dev);
+static void ni52_xmt_int(struct net_device *dev);
+static void ni52_rnr_int(struct net_device *dev);
+
+struct priv
+{
+ struct net_device_stats stats;
+ unsigned long base;
+ char *memtop;
+ long int lock;
+ int reseted;
+ volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct transmit_cmd_struct *xmit_cmds[2];
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point,num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count,xmit_last;
+};
+
+/**********************************************
+ * close device
+ */
+static int ni52_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+
+ ni_reset586(); /* the hard way to stop the receiver */
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+static int ni52_open(struct net_device *dev)
+{
+ int ret;
+
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+
+ ret = request_irq(dev->irq, &ni52_interrupt,0,dev->name,dev);
+ if (ret)
+ {
+ ni_reset586();
+ return ret;
+ }
+
+ netif_start_queue(dev);
+
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+static int check586(struct net_device *dev,char *where,unsigned size)
+{
+ struct priv pb;
+ struct priv *p = /* (struct priv *) dev->priv*/ &pb;
+ char *iscp_addrs[2];
+ int i;
+
+ p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000;
+ p->memtop = isa_bus_to_virt((unsigned long)where) + size;
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *)p->scp,0, sizeof(struct scp_struct));
+ for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */
+ if(((char *)p->scp)[i])
+ return 0;
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+ if(p->scp->sysbus != SYSBUSVAL)
+ return 0;
+
+ iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
+ iscp_addrs[1]= (char *) p->scp - sizeof(struct iscp_struct);
+
+ for(i=0;i<2;i++)
+ {
+ p->iscp = (struct iscp_struct *) iscp_addrs[i];
+ memset((char *)p->iscp,0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ ni_reset586();
+ ni_attn586();
+ DELAY(1); /* wait a while... */
+
+ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
+ return 0;
+ }
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by ni52_probe1 and open586.
+ */
+static void alloc586(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ ni_reset586();
+ DELAY(1);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
+ p->iscp = (struct iscp_struct *) ((char *)p->scp - sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp,0,sizeof(struct iscp_struct));
+ memset((char *) p->scp ,0,sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+
+ p->iscp->busy = 1;
+ ni_reset586();
+ ni_attn586();
+
+ DELAY(1);
+
+ if(p->iscp->busy)
+ printk("%s: Init-Problems (alloc).\n",dev->name);
+
+ p->reseted = 0;
+
+ memset((char *)p->scb,0,sizeof(struct scb_struct));
+}
+
+/* set: io,irq,memstart,memend or set it when calling insmod */
+static int irq=9;
+static int io=0x300;
+static long memstart; /* e.g 0xd0000 */
+static long memend; /* e.g 0xd4000 */
+
+/**********************************************
+ * probe the ni5210-card
+ */
+struct net_device * __init ni52_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct priv));
+ static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
+ int *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ memstart = dev->mem_start;
+ memend = dev->mem_end;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = ni52_probe1(dev, io);
+ } else if (io > 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = ports; *port && ni52_probe1(dev, *port) ; port++)
+ ;
+ if (*port)
+ goto got_it;
+#ifdef FULL_IO_PROBE
+ for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8)
+ ;
+ if (io < 0x400)
+ goto got_it;
+#endif
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+got_it:
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, NI52_TOTAL_SIZE);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int __init ni52_probe1(struct net_device *dev,int ioaddr)
+{
+ int i, size, retval;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->mem_start = memstart;
+ dev->mem_end = memend;
+
+ if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
+ return -EBUSY;
+
+ if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
+ !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ for(i=0;i<ETH_ALEN;i++)
+ dev->dev_addr[i] = inb(dev->base_addr+i);
+
+ if(dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
+ || dev->dev_addr[2] != NI52_ADDR2) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ printk(KERN_INFO "%s: NI5210 found at %#3lx, ",dev->name,dev->base_addr);
+
+ /*
+ * check (or search) IO-Memory, 8K and 16K
+ */
+#ifdef MODULE
+ size = dev->mem_end - dev->mem_start;
+ if(size != 0x2000 && size != 0x4000) {
+ printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n",dev->name,size);
+ retval = -ENODEV;
+ goto out;
+ }
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size);
+ retval = -ENODEV;
+ goto out;
+ }
+#else
+ if(dev->mem_start != 0) /* no auto-mem-probe */
+ {
+ size = 0x4000; /* check for 16K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ size = 0x2000; /* check for 8K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memprobe, Can't find memory at 0x%lx!\n",dev->mem_start);
+ retval = -ENODEV;
+ goto out;
+ }
+ }
+ }
+ else
+ {
+ static long memaddrs[] = { 0xc8000,0xca000,0xcc000,0xce000,0xd0000,0xd2000,
+ 0xd4000,0xd6000,0xd8000,0xda000,0xdc000, 0 };
+ for(i=0;;i++)
+ {
+ if(!memaddrs[i]) {
+ printk("?memprobe, Can't find io-memory!\n");
+ retval = -ENODEV;
+ goto out;
+ }
+ dev->mem_start = memaddrs[i];
+ size = 0x2000; /* check for 8K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 8K-check */
+ break;
+ size = 0x4000; /* check for 16K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 16K-check */
+ break;
+ }
+ }
+ dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
+#endif
+
+ memset((char *) dev->priv,0,sizeof(struct priv));
+
+ ((struct priv *) (dev->priv))->memtop = isa_bus_to_virt(dev->mem_start) + size;
+ ((struct priv *) (dev->priv))->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if(size == 0x2000)
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8;
+ else
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16;
+
+ printk("Memaddr: 0x%lx, Memsize: %d, ",dev->mem_start,size);
+
+ if(dev->irq < 2)
+ {
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ ni_reset586();
+ ni_attn586();
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if(!dev->irq)
+ {
+ printk("?autoirq, Failed to detect IRQ line!\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else {
+ if(dev->irq == 2)
+ dev->irq = 9;
+ printk("IRQ %d (assigned and not checked!).\n",dev->irq);
+ }
+
+ dev->open = ni52_open;
+ dev->stop = ni52_close;
+ dev->get_stats = ni52_get_stats;
+ dev->tx_timeout = ni52_timeout;
+ dev->watchdog_timeo = HZ/20;
+ dev->hard_start_xmit = ni52_send_packet;
+ dev->set_multicast_list = set_multicast_list;
+
+ dev->if_port = 0;
+
+ return 0;
+out:
+ release_region(ioaddr, NI52_TOTAL_SIZE);
+ return retval;
+}
+
+/**********************************************
+ * init the chip (ni52-interrupt should be disabled?!)
+ * needs a correct 'allocated' memory
+ */
+
+static int init586(struct net_device *dev)
+{
+ void *ptr;
+ int i,result=0;
+ struct priv *p = (struct priv *) dev->priv;
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct dev_mc_list *dmi=dev->mc_list;
+ int num_addrs=dev->mc_count;
+
+ ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if(dev->flags & IFF_ALLMULTI) {
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if(num_addrs > len) {
+ printk("%s: switching to promisc. mode\n",dev->name);
+ dev->flags|=IFF_PROMISC;
+ }
+ }
+ if(dev->flags&IFF_PROMISC)
+ {
+ cfg_cmd->promisc=1;
+ dev->flags|=IFF_PROMISC;
+ }
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+ p->scb->cmd_ruc = 0;
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(cfg_cmd);
+
+ if((cfg_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
+ {
+ printk("%s: configure command failed: %x\n",dev->name,cfg_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+
+ ias_cmd = (struct iasetup_cmd_struct *)ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(ias_cmd);
+
+ if((ias_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
+ printk("%s (ni52): individual address setup command failed: %04x\n",dev->name,ias_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+
+ tdr_cmd = (struct tdr_cmd_struct *)ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(tdr_cmd);
+
+ if(!(tdr_cmd->cmd_status & STAT_COMPL))
+ {
+ printk("%s: Problems while running the TDR.\n",dev->name);
+ }
+ else
+ {
+ DELAY_16(); /* wait for result */
+ result = tdr_cmd->status;
+
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ ni_attn586(); /* ack the interrupts */
+
+ if(result & TDR_LNK_OK)
+ ;
+ else if(result & TDR_XCVR_PRB)
+ printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name);
+ else if(result & TDR_ET_OPN)
+ printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ else if(result & TDR_ET_SRT)
+ {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ }
+ else
+ printk("%s: TDR: Unknown status %04x\n",dev->name,result);
+ }
+
+ /*
+ * Multicast setup
+ */
+ if(num_addrs && !(dev->flags & IFF_PROMISC) )
+ {
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = num_addrs * 6;
+
+ for(i=0;i<num_addrs;i++,dmi=dmi->next)
+ memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(mc_cmd);
+
+ if( (mc_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't apply multicast-address-list.\n",dev->name);
+ }
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for(i=0;i<2;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#else
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if((void *)ptr > (void *)p->iscp)
+ {
+ printk("%s: not enough shared-mem for your configuration!\n",dev->name);
+ return 1;
+ }
+ memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
+ memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
+ p->xmit_cmds[i]->cmd_status = STAT_COMPL;
+ p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter'
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
+ p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_SUSPEND | CMD_INT;
+#endif
+
+ /*
+ * ack. interrupts
+ */
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ ni_attn586();
+ DELAY_16();
+
+ ni_enaint();
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for ni52_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct net_device *dev,void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
+ p->rfd_first = rfd;
+
+ for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
+ rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
+ rfd[i].rbd_offset = 0xffff;
+ }
+ rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
+
+ for(i=0;i<p->num_recv_buffs;i++)
+ {
+ rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
+ rbd[i].size = RECV_BUFF_SIZE;
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static irqreturn_t ni52_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr)
+{
+ struct net_device *dev = dev_id;
+ unsigned short stat;
+ int cnt=0;
+ struct priv *p;
+
+ if (!dev) {
+ printk ("ni5210-interrupt: irq %d for unknown device.\n",irq);
+ return IRQ_NONE;
+ }
+ p = (struct priv *) dev->priv;
+
+ if(debuglevel > 1)
+ printk("I");
+
+ WAIT_4_SCB_CMD(); /* wait for last command */
+
+ while((stat=p->scb->cus & STAT_MASK))
+ {
+ p->scb->cmd_cuc = stat;
+ ni_attn586();
+
+ if(stat & STAT_FR) /* received a frame */
+ ni52_rcv_int(dev);
+
+ if(stat & STAT_RNR) /* RU went 'not ready' */
+ {
+ printk("(R)");
+ if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+ else
+ {
+ printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
+ ni52_rnr_int(dev);
+ }
+ }
+
+ if(stat & STAT_CX) /* command with I-bit set complete */
+ ni52_xmt_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if(stat & STAT_CNA) /* CU went 'not ready' */
+ {
+ if(netif_running(dev))
+ printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
+ }
+#endif
+
+ if(debuglevel > 1)
+ printk("%d",cnt++);
+
+ WAIT_4_SCB_CMD(); /* wait for ack. (ni52_xmt_int can be faster than ack!!) */
+ if(p->scb->cmd_cuc) /* timed out? */
+ {
+ printk("%s: Acknowledge timed out.\n",dev->name);
+ ni_disint();
+ break;
+ }
+ }
+
+ if(debuglevel > 1)
+ printk("i");
+ return IRQ_HANDLED;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void ni52_rcv_int(struct net_device *dev)
+{
+ int status,cnt=0;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("R");
+
+ for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
+ {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if(status & RFD_OK) /* frame received without error? */
+ {
+ if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */
+ {
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ if(skb != NULL)
+ {
+ skb->dev = dev;
+ skb_reserve(skb,2);
+ skb_put(skb,totlen);
+ eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += totlen;
+ }
+ else
+ p->stats.rx_dropped++;
+ }
+ else
+ {
+ int rstat;
+ /* free all RBD's until RBD_LAST is set */
+ totlen = 0;
+ while(!((rstat=rbd->status) & RBD_LAST))
+ {
+ totlen += rstat & RBD_MASK;
+ if(!rstat)
+ {
+ printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
+ break;
+ }
+ rbd->status = 0;
+ rbd = (struct rbd_struct *) make32(rbd->next);
+ }
+ totlen += rstat & RBD_MASK;
+ rbd->status = 0;
+ printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
+ p->stats.rx_dropped++;
+ }
+ }
+ else /* frame !(ok), only with 'save-bad-frames' */
+ {
+ printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
+ p->stats.rx_errors++;
+ }
+ p->rfd_top->stat_high = 0;
+ p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
+ p->rfd_top->rbd_offset = 0xffff;
+ p->rfd_last->last = 0; /* delete RFD_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ p->scb->rfa_offset = make16(p->rfd_top);
+
+ if(debuglevel > 0)
+ printk("%d",cnt++);
+ }
+
+ if(automatic_resume)
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+
+#ifdef WAIT_4_BUSY
+ {
+ int i;
+ for(i=0;i<1024;i++)
+ {
+ if(p->rfd_top->status)
+ break;
+ DELAY_16();
+ if(i == 1023)
+ printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
+ }
+ }
+#endif
+
+#if 0
+ if(!at_least_one)
+ {
+ int i;
+ volatile struct rfd_struct *rfds=p->rfd_top;
+ volatile struct rbd_struct *rbds;
+ printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
+ for(i=0;i< (p->num_recv_buffs+4);i++)
+ {
+ rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
+ printk("%04x:%04x ",rfds->status,rbds->status);
+ rfds = (struct rfd_struct *) make32(rfds->next);
+ }
+ printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
+ printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
+ }
+ old_at_least = at_least_one;
+#endif
+
+ if(debuglevel > 0)
+ printk("r");
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void ni52_rnr_int(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
+ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */
+
+ alloc_rfa(dev,(char *)p->rfd_first);
+/* maybe add a check here, before restarting the RU */
+ startrecv586(dev); /* restart RU */
+
+ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void ni52_xmt_int(struct net_device *dev)
+{
+ int status;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("X");
+
+ status = p->xmit_cmds[p->xmit_last]->cmd_status;
+ if(!(status & STAT_COMPL))
+ printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
+
+ if(status & STAT_OK)
+ {
+ p->stats.tx_packets++;
+ p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ }
+ else
+ {
+ p->stats.tx_errors++;
+ if(status & TCMD_LATECOLL) {
+ printk("%s: late collision detected.\n",dev->name);
+ p->stats.collisions++;
+ }
+ else if(status & TCMD_NOCARRIER) {
+ p->stats.tx_carrier_errors++;
+ printk("%s: no carrier detected.\n",dev->name);
+ }
+ else if(status & TCMD_LOSTCTS)
+ printk("%s: loss of CTS detected.\n",dev->name);
+ else if(status & TCMD_UNDERRUN) {
+ p->stats.tx_fifo_errors++;
+ printk("%s: DMA underrun detected.\n",dev->name);
+ }
+ else if(status & TCMD_MAXCOLL) {
+ printk("%s: Max. collisions exceeded.\n",dev->name);
+ p->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS > 1)
+ if( (++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+ netif_wake_queue(dev);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd_ruc = RUC_START;
+ ni_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
+}
+
+static void ni52_timeout(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+#ifndef NO_NOPCOMMANDS
+ if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
+ {
+ netif_wake_queue(dev);
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n",dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)p->xmit_cmds[0]->cmd_status,(int)p->nop_cmds[0]->cmd_status,(int)p->nop_cmds[1]->cmd_status,(int)p->nop_point);
+#endif
+ p->scb->cmd_cuc = CUC_ABORT;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ dev->trans_start = jiffies;
+ return 0;
+ }
+#endif
+ {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+ printk("%s: command-stats: %04x %04x\n",dev->name,p->xmit_cmds[0]->cmd_status,p->xmit_cmds[1]->cmd_status);
+ printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+#endif
+ ni52_close(dev);
+ ni52_open(dev);
+ }
+ dev->trans_start = jiffies;
+}
+
+/******************************************************
+ * send frame
+ */
+
+static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int len,i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ return 0;
+ }
+
+ netif_stop_queue(dev);
+
+#if(NUM_XMIT_BUFFS > 1)
+ if(test_and_set_bit(0,(void *) &p->lock)) {
+ printk("%s: Queue was locked\n",dev->name);
+ return 1;
+ }
+ else
+#endif
+ {
+ memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ len = ETH_ZLEN;
+ memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, len - skb->len);
+ }
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+
+#ifdef DEBUG
+ if(p->scb->cus & CU_ACTIVE)
+ {
+ printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
+ printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,p->xmit_cmds[0]->cmd_status);
+ }
+#endif
+
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+ for(i=0;i<16;i++)
+ {
+ p->xmit_cmds[0]->cmd_status = 0;
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
+ p->scb->cmd_cuc = CUC_RESUME;
+ else
+ {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ }
+
+ ni_attn586();
+ dev->trans_start = jiffies;
+ if(!i)
+ dev_kfree_skb(skb);
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
+ break;
+ if(p->xmit_cmds[0]->cmd_status)
+ break;
+ if(i==15)
+ printk("%s: Can't start transmit-command.\n",dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ dev->trans_start = jiffies;
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb);
+# endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
+ if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
+ next_nop = 0;
+
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ /* linkpointer of xmit-command already points to next nop cmd */
+ p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ dev->trans_start = jiffies;
+ p->xmit_count = next_nop;
+
+ {
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ if(p->xmit_count != p->xmit_last)
+ netif_wake_queue(dev);
+ p->lock = 0;
+ restore_flags(flags);
+ }
+ dev_kfree_skb(skb);
+#endif
+ }
+ return 0;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct net_device_stats *ni52_get_stats(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ unsigned short crc,aln,rsc,ovrn;
+
+ crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
+ p->scb->crc_errs = 0;
+ aln = p->scb->aln_errs;
+ p->scb->aln_errs = 0;
+ rsc = p->scb->rsc_errs;
+ p->scb->rsc_errs = 0;
+ ovrn = p->scb->ovrn_errs;
+ p->scb->ovrn_errs = 0;
+
+ p->stats.rx_crc_errors += crc;
+ p->stats.rx_fifo_errors += ovrn;
+ p->stats.rx_frame_errors += aln;
+ p->stats.rx_dropped += rsc;
+
+ return &p->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni52;
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(memstart, long, 0);
+module_param(memend, long, 0);
+MODULE_PARM_DESC(io, "NI5210 I/O base address,required");
+MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
+MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
+MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
+
+int init_module(void)
+{
+ if(io <= 0x0 || !memend || !memstart || irq < 2) {
+ printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
+ return -ENODEV;
+ }
+ dev_ni52 = ni52_probe(-1);
+ if (IS_ERR(dev_ni52))
+ return PTR_ERR(dev_ni52);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(dev_ni52);
+ release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
+ free_netdev(dev_ni52);
+}
+#endif /* MODULE */
+
+#if 0
+/*
+ * DUMP .. we expect a not running CMD unit and enough space
+ */
+void ni52_dump(struct net_device *dev,void *ptr)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
+ int i;
+
+ p->scb->cmd_cuc = CUC_ABORT;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+
+ dump_cmd->cmd_status = 0;
+ dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
+ dump_cmd->dump_offset = make16((dump_cmd + 1));
+ dump_cmd->cmd_link = 0xffff;
+
+ p->scb->cbl_offset = make16(dump_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_STAT_COMPL(dump_cmd);
+
+ if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't get dump information.\n",dev->name);
+
+ for(i=0;i<170;i++) {
+ printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
+ if(i % 24 == 23)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+MODULE_LICENSE("GPL");
+
+/*
+ * END: linux/drivers/net/ni52.c
+ */
diff --git a/drivers/net/ni52.h b/drivers/net/ni52.h
new file mode 100644
index 000000000000..68f19175afba
--- /dev/null
+++ b/drivers/net/ni52.h
@@ -0,0 +1,310 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+
+#define NI52_RESET 0 /* writing to this address, resets the i82586 */
+#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
+#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
+#define NI52_TDIS 2 /* Xmit disable */
+#define NI52_INTENA 5 /* Interrupt enable */
+#define NI52_INTDIS 4 /* Interrupt disable */
+#define NI52_MAGIC1 6 /* dunno exact function */
+#define NI52_MAGIC2 7 /* dunno exact function */
+
+#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
+#define NI52_MAGICVAL2 0x55
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* has to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned char rus;
+ unsigned char cus;
+ unsigned char cmd_ruc; /* command word: RU part */
+ unsigned char cmd_cuc; /* command word: CU part & ACK */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* alignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x07 /* mask for CU command */
+#define CUC_NOP 0x00 /* NOP-command */
+#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x02 /* resume after suspend */
+#define CUC_SUSPEND 0x03 /* Suspend CU */
+#define CUC_ABORT 0x04 /* abort command operation immediately */
+
+#define ACK_MASK 0xf0 /* mask for ACK command */
+#define ACK_CX 0x80 /* acknowledges STAT_CX */
+#define ACK_FR 0x40 /* ack. STAT_FR */
+#define ACK_CNA 0x20 /* ack. STAT_CNA */
+#define ACK_RNR 0x10 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf0 /* mask for cause of interrupt */
+#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x40 /* RU finished receiving a frame */
+#define STAT_CNA 0x20 /* CU left active state */
+#define STAT_RNR 0x10 /* RU left ready state */
+
+#define CU_STATUS 0x7 /* CU status, 0=idle */
+#define CU_SUSPEND 0x1 /* CU is suspended */
+#define CU_ACTIVE 0x2 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned char stat_low; /* status word */
+ unsigned char stat_high; /* status word */
+ unsigned char rfd_sf; /* 82596 mode only */
+ unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x80 /* last: last rfd in the list */
+#define RFD_SUSP 0x40 /* last: suspend RU after */
+#define RFD_COMPL 0x80
+#define RFD_OK 0x20
+#define RFD_BUSY 0x40
+#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
+#define RFD_ERR_CRC 0x08 /* CRC error */
+#define RFD_ERR_ALGN 0x04 /* Alignment error */
+#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
+#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
+
+#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
+#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
+#define RFD_COLLDET 0x0001 /* Detected collision during reception */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * DUMP command
+ */
+struct dump_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short dump_offset; /* pointeroffset to DUMP space */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
new file mode 100644
index 000000000000..925d1dfcc4dc
--- /dev/null
+++ b/drivers/net/ni65.c
@@ -0,0 +1,1277 @@
+/*
+ * ni6510 (am7990 'lance' chip) driver for Linux-net-3
+ * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
+ * copyrights (c) 1994,1995,1996 by M.Hipp
+ *
+ * This driver can handle the old ni6510 board and the newer ni6510
+ * EtherBlaster. (probably it also works with every full NE2100
+ * compatible card)
+ *
+ * To compile as module, type:
+ * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni65.c
+ * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers the Linux-kernel.
+ *
+ * comments/bugs/suggestions can be sent to:
+ * Michael Hipp
+ * email: hippm@informatik.uni-tuebingen.de
+ *
+ * sources:
+ * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
+ * and from the original drivers by D.Becker
+ *
+ * known problems:
+ * - on some PCI boards (including my own) the card/board/ISA-bridge has
+ * problems with bus master DMA. This results in lotsa overruns.
+ * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
+ * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
+ * Or just play with your BIOS options to optimize ISA-DMA access.
+ * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
+ * defines -> please report me your experience then
+ * - Harald reported for ASUS SP3G mainboards, that you should use
+ * the 'optimal settings' from the user's manual on page 3-12!
+ *
+ * credits:
+ * thanx to Jason Sullivan for sending me a ni6510 card!
+ * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
+ *
+ * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
+ * average: FTP -> 8384421 bytes received in 8.5 seconds
+ * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
+ * peak: FTP -> 8384421 bytes received in 7.5 seconds
+ * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
+ */
+
+/*
+ * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
+ * 96.Sept.29: virt_to_bus stuff added for new memory modell
+ * 96.April.29: Added Harald Koenig's Patches (MH)
+ * 96.April.13: enhanced error handling .. more tests (MH)
+ * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
+ * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
+ * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
+ * hopefully no more 16MB limit
+ *
+ * 95.Nov.18: multicast tweaked (AC).
+ *
+ * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
+ *
+ * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "ni65.h"
+
+/*
+ * the current setting allows an acceptable performance
+ * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
+ * the header of this file
+ * 'invert' the defines for max. performance. This may cause DMA problems
+ * on some boards (e.g on my ASUS SP3G)
+ */
+#undef XMT_VIA_SKB
+#undef RCV_VIA_SKB
+#define RCV_PARANOIA_CHECK
+
+#define MID_PERFORMANCE
+
+#if defined( LOW_PERFORMANCE )
+ static int isa0=7,isa1=7,csr80=0x0c10;
+#elif defined( MID_PERFORMANCE )
+ static int isa0=5,isa1=5,csr80=0x2810;
+#else /* high performance */
+ static int isa0=4,isa1=4,csr80=0x0017;
+#endif
+
+/*
+ * a few card/vendor specific defines
+ */
+#define NI65_ID0 0x00
+#define NI65_ID1 0x55
+#define NI65_EB_ID0 0x52
+#define NI65_EB_ID1 0x44
+#define NE2100_ID0 0x57
+#define NE2100_ID1 0x57
+
+#define PORT p->cmdr_addr
+
+/*
+ * buffer configuration
+ */
+#if 1
+#define RMDNUM 16
+#define RMDNUMMASK 0x80000000
+#else
+#define RMDNUM 8
+#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
+#endif
+
+#if 0
+#define TMDNUM 1
+#define TMDNUMMASK 0x00000000
+#else
+#define TMDNUM 4
+#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
+#endif
+
+/* slightly oversized */
+#define R_BUF_SIZE 1544
+#define T_BUF_SIZE 1544
+
+/*
+ * lance register defines
+ */
+#define L_DATAREG 0x00
+#define L_ADDRREG 0x02
+#define L_RESET 0x04
+#define L_CONFIG 0x05
+#define L_BUSIF 0x06
+
+/*
+ * to access the lance/am7990-regs, you have to write
+ * reg-number into L_ADDRREG, then you can access it using L_DATAREG
+ */
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+#define INIT_RING_BEFORE_START 0x1
+#define FULL_RESET_ON_ERROR 0x2
+
+#if 0
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
+ outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
+ inw(PORT+L_DATAREG))
+#if 0
+#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#else
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+#else
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+
+static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
+
+static struct card {
+ unsigned char id0,id1;
+ short id_offset;
+ short total_size;
+ short cmd_offset;
+ short addr_offset;
+ unsigned char *vendor_id;
+ char *cardname;
+ long config;
+} cards[] = {
+ {
+ .id0 = NI65_ID0,
+ .id1 = NI65_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x10,
+ .cmd_offset = 0x0,
+ .addr_offset = 0x8,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510",
+ .config = 0x1,
+ },
+ {
+ .id0 = NI65_EB_ID0,
+ .id1 = NI65_EB_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510 EtherBlaster",
+ .config = 0x2,
+ },
+ {
+ .id0 = NE2100_ID0,
+ .id1 = NE2100_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = NULL,
+ .cardname = "generic NE2100",
+ .config = 0x0,
+ },
+};
+#define NUM_CARDS 3
+
+struct priv
+{
+ struct rmd rmdhead[RMDNUM];
+ struct tmd tmdhead[TMDNUM];
+ struct init_block ib;
+ int rmdnum;
+ int tmdnum,tmdlast;
+#ifdef RCV_VIA_SKB
+ struct sk_buff *recv_skb[RMDNUM];
+#else
+ void *recvbounce[RMDNUM];
+#endif
+#ifdef XMT_VIA_SKB
+ struct sk_buff *tmd_skb[TMDNUM];
+#endif
+ void *tmdbounce[TMDNUM];
+ int tmdbouncenum;
+ int lock,xmit_queued;
+ struct net_device_stats stats;
+ void *self;
+ int cmdr_addr;
+ int cardno;
+ int features;
+ spinlock_t ring_lock;
+};
+
+static int ni65_probe1(struct net_device *dev,int);
+static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs *regs);
+static void ni65_recv_intr(struct net_device *dev,int);
+static void ni65_xmit_intr(struct net_device *dev,int);
+static int ni65_open(struct net_device *dev);
+static int ni65_lance_reinit(struct net_device *dev);
+static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev);
+static void ni65_timeout(struct net_device *dev);
+static int ni65_close(struct net_device *dev);
+static int ni65_alloc_buffer(struct net_device *dev);
+static void ni65_free_buffer(struct priv *p);
+static struct net_device_stats *ni65_get_stats(struct net_device *);
+static void set_multicast_list(struct net_device *dev);
+
+static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
+static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
+
+static int debuglevel = 1;
+
+/*
+ * set 'performance' registers .. we must STOP lance for that
+ */
+static void ni65_set_performance(struct priv *p)
+{
+ writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
+
+ if( !(cards[p->cardno].config & 0x02) )
+ return;
+
+ outw(80,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) != 80)
+ return;
+
+ writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
+ outw(0,PORT+L_ADDRREG);
+ outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
+ outw(1,PORT+L_ADDRREG);
+ outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
+
+ outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
+}
+
+/*
+ * open interface (up)
+ */
+static int ni65_open(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ int irqval = request_irq(dev->irq, &ni65_interrupt,0,
+ cards[p->cardno].cardname,dev);
+ if (irqval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name,dev->irq, irqval);
+ return -EAGAIN;
+ }
+
+ if(ni65_lance_reinit(dev))
+ {
+ netif_start_queue(dev);
+ return 0;
+ }
+ else
+ {
+ free_irq(dev->irq,dev);
+ return -EAGAIN;
+ }
+}
+
+/*
+ * close interface (down)
+ */
+static int ni65_close(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ netif_stop_queue(dev);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
+
+#ifdef XMT_VIA_SKB
+ {
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ {
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+ }
+ }
+#endif
+ free_irq(dev->irq,dev);
+ return 0;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ disable_dma(dev->dma);
+ free_dma(dev->dma);
+ release_region(dev->base_addr, cards[p->cardno].total_size);
+ ni65_free_buffer(p);
+}
+
+/* set: io,irq,dma or set it when calling insmod */
+static int irq;
+static int io;
+static int dma;
+
+/*
+ * Probe The Card (not the lance-chip)
+ */
+struct net_device * __init ni65_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ static int ports[] = {0x360,0x300,0x320,0x340, 0};
+ int *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ irq = dev->irq;
+ dma = dev->dma;
+ } else {
+ dev->base_addr = io;
+ }
+
+ if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
+ err = ni65_probe1(dev, dev->base_addr);
+ } else if (dev->base_addr > 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = ports; *port && ni65_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*
+ * this is the real card probe ..
+ */
+static int __init ni65_probe1(struct net_device *dev,int ioaddr)
+{
+ int i,j;
+ struct priv *p;
+ unsigned long flags;
+
+ dev->irq = irq;
+ dev->dma = dma;
+
+ for(i=0;i<NUM_CARDS;i++) {
+ if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
+ continue;
+ if(cards[i].id_offset >= 0) {
+ if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
+ inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
+ release_region(ioaddr, cards[i].total_size);
+ continue;
+ }
+ }
+ if(cards[i].vendor_id) {
+ for(j=0;j<3;j++)
+ if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
+ release_region(ioaddr, cards[i].total_size);
+ continue;
+ }
+ }
+ break;
+ }
+ if(i == NUM_CARDS)
+ return -ENODEV;
+
+ for(j=0;j<6;j++)
+ dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+
+ if( (j=ni65_alloc_buffer(dev)) < 0) {
+ release_region(ioaddr, cards[i].total_size);
+ return j;
+ }
+ p = (struct priv *) dev->priv;
+ p->cmdr_addr = ioaddr + cards[i].cmd_offset;
+ p->cardno = i;
+ spin_lock_init(&p->ring_lock);
+
+ printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (j=readreg(CSR0)) != 0x4) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ outw(88,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) == 88) {
+ unsigned long v;
+ v = inw(PORT+L_DATAREG);
+ v <<= 16;
+ outw(89,PORT+L_ADDRREG);
+ v |= inw(PORT+L_DATAREG);
+ printk("Version %#08lx, ",v);
+ p->features = INIT_RING_BEFORE_START;
+ }
+ else {
+ printk("ancient LANCE, ");
+ p->features = 0x0;
+ }
+
+ if(test_bit(0,&cards[i].config)) {
+ dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
+ dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
+ printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
+ }
+ else {
+ if(dev->dma == 0) {
+ /* 'stuck test' from lance.c */
+ long dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ for(i=1;i<5;i++) {
+ int dma = dmatab[i];
+ if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
+ continue;
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ set_dma_mode(dma,DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ free_dma(dma);
+ release_dma_lock(flags);
+
+ if(readreg(CSR0) & CSR0_IDON)
+ break;
+ }
+ if(i == 5) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ dev->dma = dmatab[i];
+ printk("DMA %d (autodetected), ",dev->dma);
+ }
+ else
+ printk("DMA %d (assigned), ",dev->dma);
+
+ if(dev->irq < 2)
+ {
+ unsigned long irq_mask;
+
+ ni65_init_lance(p,dev->dev_addr,0,0);
+ irq_mask = probe_irq_on();
+ writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
+ msleep(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if(!dev->irq)
+ {
+ printk("Failed to detect IRQ line!\n");
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else
+ printk("IRQ %d (assigned).\n",dev->irq);
+ }
+
+ if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
+ {
+ printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ dev->base_addr = ioaddr;
+ SET_MODULE_OWNER(dev);
+ dev->open = ni65_open;
+ dev->stop = ni65_close;
+ dev->hard_start_xmit = ni65_send_packet;
+ dev->tx_timeout = ni65_timeout;
+ dev->watchdog_timeo = HZ/2;
+ dev->get_stats = ni65_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ return 0; /* everything is OK */
+}
+
+/*
+ * set lance register and trigger init
+ */
+static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+{
+ int i;
+ u32 pib;
+
+ writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
+
+ for(i=0;i<6;i++)
+ p->ib.eaddr[i] = daddr[i];
+
+ for(i=0;i<8;i++)
+ p->ib.filter[i] = filter;
+ p->ib.mode = mode;
+
+ p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
+ p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
+ writereg(0,CSR3); /* busmaster/no word-swap */
+ pib = (u32) isa_virt_to_bus(&p->ib);
+ writereg(pib & 0xffff,CSR1);
+ writereg(pib >> 16,CSR2);
+
+ writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
+
+ for(i=0;i<32;i++)
+ {
+ mdelay(4);
+ if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
+ break; /* init ok ? */
+ }
+}
+
+/*
+ * allocate memory area and check the 16MB border
+ */
+static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
+{
+ struct sk_buff *skb=NULL;
+ unsigned char *ptr;
+ void *ret;
+
+ if(type) {
+ ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
+ if(!skb) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2+16);
+ skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
+ ptr = skb->data;
+ }
+ else {
+ ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
+ if(!ret) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ }
+ if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
+ if(type)
+ kfree_skb(skb);
+ else
+ kfree(ptr);
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * allocate all memory structures .. send/recv buffers etc ...
+ */
+static int ni65_alloc_buffer(struct net_device *dev)
+{
+ unsigned char *ptr;
+ struct priv *p;
+ int i;
+
+ /*
+ * we need 8-aligned memory ..
+ */
+ ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
+ if(!ptr)
+ return -ENOMEM;
+
+ p = dev->priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
+ memset((char *) dev->priv,0,sizeof(struct priv));
+ p->self = ptr;
+
+ for(i=0;i<TMDNUM;i++)
+ {
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = NULL;
+#endif
+ p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
+ if(!p->tmdbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
+ if(!p->recv_skb[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#else
+ p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
+ if(!p->recvbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#endif
+ }
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * free buffers and private struct
+ */
+static void ni65_free_buffer(struct priv *p)
+{
+ int i;
+
+ if(!p)
+ return;
+
+ for(i=0;i<TMDNUM;i++) {
+ if(p->tmdbounce[i])
+ kfree(p->tmdbounce[i]);
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i])
+ dev_kfree_skb(p->tmd_skb[i]);
+#endif
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ if(p->recv_skb[i])
+ dev_kfree_skb(p->recv_skb[i]);
+#else
+ if(p->recvbounce[i])
+ kfree(p->recvbounce[i]);
+#endif
+ }
+ if(p->self)
+ kfree(p->self);
+}
+
+
+/*
+ * stop and (re)start lance .. e.g after an error
+ */
+static void ni65_stop_start(struct net_device *dev,struct priv *p)
+{
+ int csr0 = CSR0_INEA;
+
+ writedatareg(CSR0_STOP);
+
+ if(debuglevel > 1)
+ printk(KERN_DEBUG "ni65_stop_start\n");
+
+ if(p->features & INIT_RING_BEFORE_START) {
+ int i;
+#ifdef XMT_VIA_SKB
+ struct sk_buff *skb_save[TMDNUM];
+#endif
+ unsigned long buffer[TMDNUM];
+ short blen[TMDNUM];
+
+ if(p->xmit_queued) {
+ while(1) {
+ if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
+ break;
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ break;
+ }
+ }
+
+ for(i=0;i<TMDNUM;i++) {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ skb_save[i] = p->tmd_skb[i];
+#endif
+ buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+ blen[i] = tmdp->blen;
+ tmdp->u.s.status = 0x0;
+ }
+
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + i;
+ rmdp->u.s.status = RCV_OWN;
+ }
+ p->tmdnum = p->xmit_queued = 0;
+ writedatareg(CSR0_STRT | csr0);
+
+ for(i=0;i<TMDNUM;i++) {
+ int num = (i + p->tmdlast) & (TMDNUM-1);
+ p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
+ p->tmdhead[i].blen = blen[num];
+ if(p->tmdhead[i].u.s.status & XMIT_OWN) {
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+ p->xmit_queued = 1;
+ writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
+ }
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = skb_save[num];
+#endif
+ }
+ p->rmdnum = p->tmdlast = 0;
+ if(!p->lock)
+ if (p->tmdnum || !p->xmit_queued)
+ netif_wake_queue(dev);
+ dev->trans_start = jiffies;
+ }
+ else
+ writedatareg(CSR0_STRT | csr0);
+}
+
+/*
+ * init lance (write init-values .. init-buffers) (open-helper)
+ */
+static int ni65_lance_reinit(struct net_device *dev)
+{
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+ unsigned long flags;
+
+ p->lock = 0;
+ p->xmit_queued = 0;
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
+ set_dma_mode(dev->dma,DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (i=readreg(CSR0) ) != 0x4)
+ {
+ printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
+ cards[p->cardno].cardname,(int) i);
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0;
+ }
+
+ p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
+ for(i=0;i<TMDNUM;i++)
+ {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+#endif
+ tmdp->u.buffer = 0x0;
+ tmdp->u.s.status = XMIT_START | XMIT_END;
+ tmdp->blen = tmdp->status2 = 0;
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+ struct rmd *rmdp = p->rmdhead + i;
+#ifdef RCV_VIA_SKB
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
+#else
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
+#endif
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN;
+ }
+
+ if(dev->flags & IFF_PROMISC)
+ ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
+ else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ ni65_init_lance(p,dev->dev_addr,0xff,0x0);
+ else
+ ni65_init_lance(p,dev->dev_addr,0x00,0x00);
+
+ /*
+ * ni65_set_lance_mem() sets L_ADDRREG to CSR0
+ * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
+ */
+
+ if(inw(PORT+L_DATAREG) & CSR0_IDON) {
+ ni65_set_performance(p);
+ /* init OK: start lance , enable interrupts */
+ writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
+ return 1; /* ->OK */
+ }
+ printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0; /* ->Error */
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+{
+ int csr0 = 0;
+ struct net_device *dev = dev_id;
+ struct priv *p;
+ int bcnt = 32;
+
+ p = (struct priv *) dev->priv;
+
+ spin_lock(&p->ring_lock);
+
+ while(--bcnt) {
+ csr0 = inw(PORT+L_DATAREG);
+
+#if 0
+ writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
+#else
+ writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
+#endif
+
+ if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
+ break;
+
+ if(csr0 & CSR0_RINT) /* RECV-int? */
+ ni65_recv_intr(dev,csr0);
+ if(csr0 & CSR0_TINT) /* XMIT-int? */
+ ni65_xmit_intr(dev,csr0);
+
+ if(csr0 & CSR0_ERR)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
+ if(csr0 & CSR0_BABL)
+ p->stats.tx_errors++;
+ if(csr0 & CSR0_MISS) {
+ int i;
+ for(i=0;i<RMDNUM;i++)
+ printk("%02x ",p->rmdhead[i].u.s.status);
+ printk("\n");
+ p->stats.rx_errors++;
+ }
+ if(csr0 & CSR0_MERR) {
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
+ ni65_stop_start(dev,p);
+ }
+ }
+ }
+
+#ifdef RCV_PARANOIA_CHECK
+{
+ int j;
+ for(j=0;j<RMDNUM;j++)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+ int i,k,num1,num2;
+ for(i=RMDNUM-1;i>0;i--) {
+ num2 = (p->rmdnum + i) & (RMDNUM-1);
+ if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
+ break;
+ }
+
+ if(i) {
+ for(k=0;k<RMDNUM;k++) {
+ num1 = (p->rmdnum + k) & (RMDNUM-1);
+ if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
+ break;
+ }
+ if(!k)
+ break;
+
+ if(debuglevel > 0)
+ {
+ char buf[256],*buf1;
+ int k;
+ buf1 = buf;
+ for(k=0;k<RMDNUM;k++) {
+ sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
+ buf1 += 3;
+ }
+ *buf1 = 0;
+ printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
+ }
+
+ p->rmdnum = num1;
+ ni65_recv_intr(dev,csr0);
+ if((p->rmdhead[num2].u.s.status & RCV_OWN))
+ break; /* ok, we are 'in sync' again */
+ }
+ else
+ break;
+ }
+}
+#endif
+
+ if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
+ printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
+ ni65_stop_start(dev,p);
+ }
+ else
+ writedatareg(CSR0_INEA);
+
+ spin_unlock(&p->ring_lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * We have received an Xmit-Interrupt ..
+ * send a new packet if necessary
+ */
+static void ni65_xmit_intr(struct net_device *dev,int csr0)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ while(p->xmit_queued)
+ {
+ struct tmd *tmdp = p->tmdhead + p->tmdlast;
+ int tmdstat = tmdp->u.s.status;
+
+ if(tmdstat & XMIT_OWN)
+ break;
+
+ if(tmdstat & XMIT_ERR)
+ {
+#if 0
+ if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
+ printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
+#endif
+ /* checking some errors */
+ if(tmdp->status2 & XMIT_RTRY)
+ p->stats.tx_aborted_errors++;
+ if(tmdp->status2 & XMIT_LCAR)
+ p->stats.tx_carrier_errors++;
+ if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
+ /* this stops the xmitter */
+ p->stats.tx_fifo_errors++;
+ if(debuglevel > 0)
+ printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
+ if(p->features & INIT_RING_BEFORE_START) {
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
+ ni65_stop_start(dev,p);
+ break; /* no more Xmit processing .. */
+ }
+ else
+ ni65_stop_start(dev,p);
+ }
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
+ if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
+ p->stats.tx_errors++;
+ tmdp->status2 = 0;
+ }
+ else {
+ p->stats.tx_bytes -= (short)(tmdp->blen);
+ p->stats.tx_packets++;
+ }
+
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[p->tmdlast]) {
+ dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
+ p->tmd_skb[p->tmdlast] = NULL;
+ }
+#endif
+
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ p->xmit_queued = 0;
+ }
+ netif_wake_queue(dev);
+}
+
+/*
+ * We have received a packet
+ */
+static void ni65_recv_intr(struct net_device *dev,int csr0)
+{
+ struct rmd *rmdp;
+ int rmdstat,len;
+ int cnt=0;
+ struct priv *p = (struct priv *) dev->priv;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
+ {
+ cnt++;
+ if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
+ {
+ if(!(rmdstat & RCV_ERR)) {
+ if(rmdstat & RCV_START)
+ {
+ p->stats.rx_length_errors++;
+ printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
+ }
+ }
+ else {
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
+ dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
+ if(rmdstat & RCV_FRAM)
+ p->stats.rx_frame_errors++;
+ if(rmdstat & RCV_OFLO)
+ p->stats.rx_over_errors++;
+ if(rmdstat & RCV_CRC)
+ p->stats.rx_crc_errors++;
+ if(rmdstat & RCV_BUF_ERR)
+ p->stats.rx_fifo_errors++;
+ }
+ if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
+ p->stats.rx_errors++;
+ }
+ else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
+ {
+#ifdef RCV_VIA_SKB
+ struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
+ if (skb)
+ skb_reserve(skb,16);
+#else
+ struct sk_buff *skb = dev_alloc_skb(len+2);
+#endif
+ if(skb)
+ {
+ skb_reserve(skb,2);
+ skb->dev = dev;
+#ifdef RCV_VIA_SKB
+ if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
+ skb_put(skb,len);
+ eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
+ }
+ else {
+ struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
+ skb_put(skb,R_BUF_SIZE);
+ p->recv_skb[p->rmdnum] = skb;
+ rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ skb = skb1;
+ skb_trim(skb,len);
+ }
+#else
+ skb_put(skb,len);
+ eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
+#endif
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += len;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ else
+ {
+ printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
+ p->stats.rx_dropped++;
+ }
+ }
+ else {
+ printk(KERN_INFO "%s: received runt packet\n",dev->name);
+ p->stats.rx_errors++;
+ }
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN; /* change owner */
+ p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+}
+
+/*
+ * kick xmitter ..
+ */
+
+static void ni65_timeout(struct net_device *dev)
+{
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
+ for(i=0;i<TMDNUM;i++)
+ printk("%02x ",p->tmdhead[i].u.s.status);
+ printk("\n");
+ ni65_lance_reinit(dev);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * Send a packet
+ */
+
+static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit(0, (void*)&p->lock)) {
+ printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
+ return 1;
+ }
+
+ {
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ struct tmd *tmdp;
+ unsigned long flags;
+
+#ifdef XMT_VIA_SKB
+ if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
+#endif
+
+ memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
+ (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
+ if (len > skb->len)
+ memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
+ dev_kfree_skb (skb);
+
+ spin_lock_irqsave(&p->ring_lock, flags);
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
+ p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
+
+#ifdef XMT_VIA_SKB
+ }
+ else {
+ spin_lock_irqsave(&p->ring_lock, flags);
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ p->tmd_skb[p->tmdnum] = skb;
+ }
+#endif
+ tmdp->blen = -len;
+
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
+ writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
+
+ p->xmit_queued = 1;
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+
+ if(p->tmdnum != p->tmdlast)
+ netif_wake_queue(dev);
+
+ p->lock = 0;
+ dev->trans_start = jiffies;
+
+ spin_unlock_irqrestore(&p->ring_lock, flags);
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *ni65_get_stats(struct net_device *dev)
+{
+
+#if 0
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+ for(i=0;i<RMDNUM;i++)
+ {
+ struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
+ printk("%02x ",rmdp->u.s.status);
+ }
+ printk("\n");
+#endif
+
+ return &((struct priv *) dev->priv)->stats;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ if(!ni65_lance_reinit(dev))
+ printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni65;
+
+module_param(irq, int, 0);
+module_param(io, int, 0);
+module_param(dma, int, 0);
+MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
+MODULE_PARM_DESC(io, "ni6510 I/O base address");
+MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
+
+int init_module(void)
+{
+ dev_ni65 = ni65_probe(-1);
+ return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(dev_ni65);
+ cleanup_card(dev_ni65);
+ free_netdev(dev_ni65);
+}
+#endif /* MODULE */
+
+MODULE_LICENSE("GPL");
+
+/*
+ * END of ni65.c
+ */
diff --git a/drivers/net/ni65.h b/drivers/net/ni65.h
new file mode 100644
index 000000000000..b01cef1b62c1
--- /dev/null
+++ b/drivers/net/ni65.h
@@ -0,0 +1,121 @@
+/* am7990 (lance) definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by
+ * same GNU General Public License that covers that work.
+ *
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources: (mail me or ask archie if you need them)
+ * crynwr-packet-driver
+ */
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define M_PROM 0x8000 /* Promiscuous Mode */
+#define M_INTL 0x0040 /* Internal Loopback */
+#define M_DRTY 0x0020 /* Disable Retry */
+#define M_COLL 0x0010 /* Force Collision */
+#define M_DTCR 0x0008 /* Disable Transmit CRC) */
+#define M_LOOP 0x0004 /* Loopback */
+#define M_DTX 0x0002 /* Disable the Transmitter */
+#define M_DRX 0x0001 /* Disable the Receiver */
+
+
+/*
+ * Receive message descriptor bit definitions.
+ */
+
+#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define RCV_ERR 0x40 /* Error Summary */
+#define RCV_FRAM 0x20 /* Framing Error */
+#define RCV_OFLO 0x10 /* Overflow Error */
+#define RCV_CRC 0x08 /* CRC Error */
+#define RCV_BUF_ERR 0x04 /* Buffer Error */
+#define RCV_START 0x02 /* Start of Packet */
+#define RCV_END 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor bit definitions.
+ */
+
+#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define XMIT_ERR 0x40 /* Error Summary */
+#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
+#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
+#define XMIT_DEF 0x04 /* Deferred */
+#define XMIT_START 0x02 /* Start of Packet */
+#define XMIT_END 0x01 /* End of Packet */
+
+/*
+ * transmit status (2) (valid if XMIT_ERR == 1)
+ */
+
+#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
+#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
+#define XMIT_LCAR 0x0800 /* Loss of Carrier */
+#define XMIT_LCOL 0x1000 /* Late collision */
+#define XMIT_RESERV 0x2000 /* Reserved */
+#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
+#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
+
+struct init_block {
+ unsigned short mode;
+ unsigned char eaddr[6];
+ unsigned char filter[8];
+ /* bit 29-31: number of rmd's (power of 2) */
+ u32 rrp; /* receive ring pointer (align 8) */
+ /* bit 29-31: number of tmd's (power of 2) */
+ u32 trp; /* transmit ring pointer (align 8) */
+};
+
+struct rmd { /* Receive Message Descriptor */
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile short blen;
+ volatile unsigned short mlen;
+};
+
+struct tmd {
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile unsigned short blen;
+ volatile unsigned short status2;
+};
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
new file mode 100644
index 000000000000..2fcc181a8624
--- /dev/null
+++ b/drivers/net/ns83820.c
@@ -0,0 +1,2222 @@
+#define _VERSION "0.20"
+/* ns83820.c by Benjamin LaHaise with contributions.
+ *
+ * Questions/comments/discussion to linux-ns83820@kvack.org.
+ *
+ * $Revision: 1.34.2.23 $
+ *
+ * Copyright 2001 Benjamin LaHaise.
+ * Copyright 2001, 2002 Red Hat.
+ *
+ * Mmmm, chocolate vanilla mocha...
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * ChangeLog
+ * =========
+ * 20010414 0.1 - created
+ * 20010622 0.2 - basic rx and tx.
+ * 20010711 0.3 - added duplex and link state detection support.
+ * 20010713 0.4 - zero copy, no hangs.
+ * 0.5 - 64 bit dma support (davem will hate me for this)
+ * - disable jumbo frames to avoid tx hangs
+ * - work around tx deadlocks on my 1.02 card via
+ * fiddling with TXCFG
+ * 20010810 0.6 - use pci dma api for ringbuffers, work on ia64
+ * 20010816 0.7 - misc cleanups
+ * 20010826 0.8 - fix critical zero copy bugs
+ * 0.9 - internal experiment
+ * 20010827 0.10 - fix ia64 unaligned access.
+ * 20010906 0.11 - accept all packets with checksum errors as
+ * otherwise fragments get lost
+ * - fix >> 32 bugs
+ * 0.12 - add statistics counters
+ * - add allmulti/promisc support
+ * 20011009 0.13 - hotplug support, other smaller pci api cleanups
+ * 20011204 0.13a - optical transceiver support added
+ * by Michael Clark <michael@metaparadigm.com>
+ * 20011205 0.13b - call register_netdev earlier in initialization
+ * suppress duplicate link status messages
+ * 20011117 0.14 - ethtool GDRVINFO, GLINK support from jgarzik
+ * 20011204 0.15 get ppc (big endian) working
+ * 20011218 0.16 various cleanups
+ * 20020310 0.17 speedups
+ * 20020610 0.18 - actually use the pci dma api for highmem
+ * - remove pci latency register fiddling
+ * 0.19 - better bist support
+ * - add ihr and reset_phy parameters
+ * - gmii bus probing
+ * - fix missed txok introduced during performance
+ * tuning
+ * 0.20 - fix stupid RFEN thinko. i am such a smurf.
+ *
+ * 20040828 0.21 - add hardware vlan accleration
+ * by Neil Horman <nhorman@redhat.com>
+ * Driver Overview
+ * ===============
+ *
+ * This driver was originally written for the National Semiconductor
+ * 83820 chip, a 10/100/1000 Mbps 64 bit PCI ethernet NIC. Hopefully
+ * this code will turn out to be a) clean, b) correct, and c) fast.
+ * With that in mind, I'm aiming to split the code up as much as
+ * reasonably possible. At present there are X major sections that
+ * break down into a) packet receive, b) packet transmit, c) link
+ * management, d) initialization and configuration. Where possible,
+ * these code paths are designed to run in parallel.
+ *
+ * This driver has been tested and found to work with the following
+ * cards (in no particular order):
+ *
+ * Cameo SOHO-GA2000T SOHO-GA2500T
+ * D-Link DGE-500T
+ * PureData PDP8023Z-TG
+ * SMC SMC9452TX SMC9462TX
+ * Netgear GA621
+ *
+ * Special thanks to SMC for providing hardware to test this driver on.
+ *
+ * Reports of success or failure would be greatly appreciated.
+ */
+//#define dprintk printk
+#define dprintk(x...) do { } while (0)
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <linux/workqueue.h>
+#include <linux/init.h>
+#include <linux/ip.h> /* for iph */
+#include <linux/in.h> /* for IPPROTO_... */
+#include <linux/eeprom.h>
+#include <linux/compiler.h>
+#include <linux/prefetch.h>
+#include <linux/ethtool.h>
+#include <linux/timer.h>
+#include <linux/if_vlan.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#define DRV_NAME "ns83820"
+
+/* Global parameters. See module_param near the bottom. */
+static int ihr = 2;
+static int reset_phy = 0;
+static int lnksts = 0; /* CFG_LNKSTS bit polarity */
+
+/* Dprintk is used for more interesting debug events */
+#undef Dprintk
+#define Dprintk dprintk
+
+#if defined(CONFIG_HIGHMEM64G) || defined(__ia64__)
+#define USE_64BIT_ADDR "+"
+#endif
+
+#if defined(USE_64BIT_ADDR)
+#define VERSION _VERSION USE_64BIT_ADDR
+#define TRY_DAC 1
+#else
+#define VERSION _VERSION
+#define TRY_DAC 0
+#endif
+
+/* tunables */
+#define RX_BUF_SIZE 1500 /* 8192 */
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define NS83820_VLAN_ACCEL_SUPPORT
+#endif
+
+/* Must not exceed ~65000. */
+#define NR_RX_DESC 64
+#define NR_TX_DESC 128
+
+/* not tunable */
+#define REAL_RX_BUF_SIZE (RX_BUF_SIZE + 14) /* rx/tx mac addr + type */
+
+#define MIN_TX_DESC_FREE 8
+
+/* register defines */
+#define CFGCS 0x04
+
+#define CR_TXE 0x00000001
+#define CR_TXD 0x00000002
+/* Ramit : Here's a tip, don't do a RXD immediately followed by an RXE
+ * The Receive engine skips one descriptor and moves
+ * onto the next one!! */
+#define CR_RXE 0x00000004
+#define CR_RXD 0x00000008
+#define CR_TXR 0x00000010
+#define CR_RXR 0x00000020
+#define CR_SWI 0x00000080
+#define CR_RST 0x00000100
+
+#define PTSCR_EEBIST_FAIL 0x00000001
+#define PTSCR_EEBIST_EN 0x00000002
+#define PTSCR_EELOAD_EN 0x00000004
+#define PTSCR_RBIST_FAIL 0x000001b8
+#define PTSCR_RBIST_DONE 0x00000200
+#define PTSCR_RBIST_EN 0x00000400
+#define PTSCR_RBIST_RST 0x00002000
+
+#define MEAR_EEDI 0x00000001
+#define MEAR_EEDO 0x00000002
+#define MEAR_EECLK 0x00000004
+#define MEAR_EESEL 0x00000008
+#define MEAR_MDIO 0x00000010
+#define MEAR_MDDIR 0x00000020
+#define MEAR_MDC 0x00000040
+
+#define ISR_TXDESC3 0x40000000
+#define ISR_TXDESC2 0x20000000
+#define ISR_TXDESC1 0x10000000
+#define ISR_TXDESC0 0x08000000
+#define ISR_RXDESC3 0x04000000
+#define ISR_RXDESC2 0x02000000
+#define ISR_RXDESC1 0x01000000
+#define ISR_RXDESC0 0x00800000
+#define ISR_TXRCMP 0x00400000
+#define ISR_RXRCMP 0x00200000
+#define ISR_DPERR 0x00100000
+#define ISR_SSERR 0x00080000
+#define ISR_RMABT 0x00040000
+#define ISR_RTABT 0x00020000
+#define ISR_RXSOVR 0x00010000
+#define ISR_HIBINT 0x00008000
+#define ISR_PHY 0x00004000
+#define ISR_PME 0x00002000
+#define ISR_SWI 0x00001000
+#define ISR_MIB 0x00000800
+#define ISR_TXURN 0x00000400
+#define ISR_TXIDLE 0x00000200
+#define ISR_TXERR 0x00000100
+#define ISR_TXDESC 0x00000080
+#define ISR_TXOK 0x00000040
+#define ISR_RXORN 0x00000020
+#define ISR_RXIDLE 0x00000010
+#define ISR_RXEARLY 0x00000008
+#define ISR_RXERR 0x00000004
+#define ISR_RXDESC 0x00000002
+#define ISR_RXOK 0x00000001
+
+#define TXCFG_CSI 0x80000000
+#define TXCFG_HBI 0x40000000
+#define TXCFG_MLB 0x20000000
+#define TXCFG_ATP 0x10000000
+#define TXCFG_ECRETRY 0x00800000
+#define TXCFG_BRST_DIS 0x00080000
+#define TXCFG_MXDMA1024 0x00000000
+#define TXCFG_MXDMA512 0x00700000
+#define TXCFG_MXDMA256 0x00600000
+#define TXCFG_MXDMA128 0x00500000
+#define TXCFG_MXDMA64 0x00400000
+#define TXCFG_MXDMA32 0x00300000
+#define TXCFG_MXDMA16 0x00200000
+#define TXCFG_MXDMA8 0x00100000
+
+#define CFG_LNKSTS 0x80000000
+#define CFG_SPDSTS 0x60000000
+#define CFG_SPDSTS1 0x40000000
+#define CFG_SPDSTS0 0x20000000
+#define CFG_DUPSTS 0x10000000
+#define CFG_TBI_EN 0x01000000
+#define CFG_MODE_1000 0x00400000
+/* Ramit : Dont' ever use AUTO_1000, it never works and is buggy.
+ * Read the Phy response and then configure the MAC accordingly */
+#define CFG_AUTO_1000 0x00200000
+#define CFG_PINT_CTL 0x001c0000
+#define CFG_PINT_DUPSTS 0x00100000
+#define CFG_PINT_LNKSTS 0x00080000
+#define CFG_PINT_SPDSTS 0x00040000
+#define CFG_TMRTEST 0x00020000
+#define CFG_MRM_DIS 0x00010000
+#define CFG_MWI_DIS 0x00008000
+#define CFG_T64ADDR 0x00004000
+#define CFG_PCI64_DET 0x00002000
+#define CFG_DATA64_EN 0x00001000
+#define CFG_M64ADDR 0x00000800
+#define CFG_PHY_RST 0x00000400
+#define CFG_PHY_DIS 0x00000200
+#define CFG_EXTSTS_EN 0x00000100
+#define CFG_REQALG 0x00000080
+#define CFG_SB 0x00000040
+#define CFG_POW 0x00000020
+#define CFG_EXD 0x00000010
+#define CFG_PESEL 0x00000008
+#define CFG_BROM_DIS 0x00000004
+#define CFG_EXT_125 0x00000002
+#define CFG_BEM 0x00000001
+
+#define EXTSTS_UDPPKT 0x00200000
+#define EXTSTS_TCPPKT 0x00080000
+#define EXTSTS_IPPKT 0x00020000
+#define EXTSTS_VPKT 0x00010000
+#define EXTSTS_VTG_MASK 0x0000ffff
+
+#define SPDSTS_POLARITY (CFG_SPDSTS1 | CFG_SPDSTS0 | CFG_DUPSTS | (lnksts ? CFG_LNKSTS : 0))
+
+#define MIBC_MIBS 0x00000008
+#define MIBC_ACLR 0x00000004
+#define MIBC_FRZ 0x00000002
+#define MIBC_WRN 0x00000001
+
+#define PCR_PSEN (1 << 31)
+#define PCR_PS_MCAST (1 << 30)
+#define PCR_PS_DA (1 << 29)
+#define PCR_STHI_8 (3 << 23)
+#define PCR_STLO_4 (1 << 23)
+#define PCR_FFHI_8K (3 << 21)
+#define PCR_FFLO_4K (1 << 21)
+#define PCR_PAUSE_CNT 0xFFFE
+
+#define RXCFG_AEP 0x80000000
+#define RXCFG_ARP 0x40000000
+#define RXCFG_STRIPCRC 0x20000000
+#define RXCFG_RX_FD 0x10000000
+#define RXCFG_ALP 0x08000000
+#define RXCFG_AIRL 0x04000000
+#define RXCFG_MXDMA512 0x00700000
+#define RXCFG_DRTH 0x0000003e
+#define RXCFG_DRTH0 0x00000002
+
+#define RFCR_RFEN 0x80000000
+#define RFCR_AAB 0x40000000
+#define RFCR_AAM 0x20000000
+#define RFCR_AAU 0x10000000
+#define RFCR_APM 0x08000000
+#define RFCR_APAT 0x07800000
+#define RFCR_APAT3 0x04000000
+#define RFCR_APAT2 0x02000000
+#define RFCR_APAT1 0x01000000
+#define RFCR_APAT0 0x00800000
+#define RFCR_AARP 0x00400000
+#define RFCR_MHEN 0x00200000
+#define RFCR_UHEN 0x00100000
+#define RFCR_ULM 0x00080000
+
+#define VRCR_RUDPE 0x00000080
+#define VRCR_RTCPE 0x00000040
+#define VRCR_RIPE 0x00000020
+#define VRCR_IPEN 0x00000010
+#define VRCR_DUTF 0x00000008
+#define VRCR_DVTF 0x00000004
+#define VRCR_VTREN 0x00000002
+#define VRCR_VTDEN 0x00000001
+
+#define VTCR_PPCHK 0x00000008
+#define VTCR_GCHK 0x00000004
+#define VTCR_VPPTI 0x00000002
+#define VTCR_VGTI 0x00000001
+
+#define CR 0x00
+#define CFG 0x04
+#define MEAR 0x08
+#define PTSCR 0x0c
+#define ISR 0x10
+#define IMR 0x14
+#define IER 0x18
+#define IHR 0x1c
+#define TXDP 0x20
+#define TXDP_HI 0x24
+#define TXCFG 0x28
+#define GPIOR 0x2c
+#define RXDP 0x30
+#define RXDP_HI 0x34
+#define RXCFG 0x38
+#define PQCR 0x3c
+#define WCSR 0x40
+#define PCR 0x44
+#define RFCR 0x48
+#define RFDR 0x4c
+
+#define SRR 0x58
+
+#define VRCR 0xbc
+#define VTCR 0xc0
+#define VDR 0xc4
+#define CCSR 0xcc
+
+#define TBICR 0xe0
+#define TBISR 0xe4
+#define TANAR 0xe8
+#define TANLPAR 0xec
+#define TANER 0xf0
+#define TESR 0xf4
+
+#define TBICR_MR_AN_ENABLE 0x00001000
+#define TBICR_MR_RESTART_AN 0x00000200
+
+#define TBISR_MR_LINK_STATUS 0x00000020
+#define TBISR_MR_AN_COMPLETE 0x00000004
+
+#define TANAR_PS2 0x00000100
+#define TANAR_PS1 0x00000080
+#define TANAR_HALF_DUP 0x00000040
+#define TANAR_FULL_DUP 0x00000020
+
+#define GPIOR_GP5_OE 0x00000200
+#define GPIOR_GP4_OE 0x00000100
+#define GPIOR_GP3_OE 0x00000080
+#define GPIOR_GP2_OE 0x00000040
+#define GPIOR_GP1_OE 0x00000020
+#define GPIOR_GP3_OUT 0x00000004
+#define GPIOR_GP1_OUT 0x00000001
+
+#define LINK_AUTONEGOTIATE 0x01
+#define LINK_DOWN 0x02
+#define LINK_UP 0x04
+
+#ifdef USE_64BIT_ADDR
+#define HW_ADDR_LEN 8
+#define desc_addr_set(desc, addr) \
+ do { \
+ u64 __addr = (addr); \
+ (desc)[0] = cpu_to_le32(__addr); \
+ (desc)[1] = cpu_to_le32(__addr >> 32); \
+ } while(0)
+#define desc_addr_get(desc) \
+ (((u64)le32_to_cpu((desc)[1]) << 32) \
+ | le32_to_cpu((desc)[0]))
+#else
+#define HW_ADDR_LEN 4
+#define desc_addr_set(desc, addr) ((desc)[0] = cpu_to_le32(addr))
+#define desc_addr_get(desc) (le32_to_cpu((desc)[0]))
+#endif
+
+#define DESC_LINK 0
+#define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4)
+#define DESC_CMDSTS (DESC_BUFPTR + HW_ADDR_LEN/4)
+#define DESC_EXTSTS (DESC_CMDSTS + 4/4)
+
+#define CMDSTS_OWN 0x80000000
+#define CMDSTS_MORE 0x40000000
+#define CMDSTS_INTR 0x20000000
+#define CMDSTS_ERR 0x10000000
+#define CMDSTS_OK 0x08000000
+#define CMDSTS_RUNT 0x00200000
+#define CMDSTS_LEN_MASK 0x0000ffff
+
+#define CMDSTS_DEST_MASK 0x01800000
+#define CMDSTS_DEST_SELF 0x00800000
+#define CMDSTS_DEST_MULTI 0x01000000
+
+#define DESC_SIZE 8 /* Should be cache line sized */
+
+struct rx_info {
+ spinlock_t lock;
+ int up;
+ long idle;
+
+ struct sk_buff *skbs[NR_RX_DESC];
+
+ u32 *next_rx_desc;
+ u16 next_rx, next_empty;
+
+ u32 *descs;
+ dma_addr_t phy_descs;
+};
+
+
+struct ns83820 {
+ struct net_device_stats stats;
+ u8 __iomem *base;
+
+ struct pci_dev *pci_dev;
+
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ struct vlan_group *vlgrp;
+#endif
+
+ struct rx_info rx_info;
+ struct tasklet_struct rx_tasklet;
+
+ unsigned ihr;
+ struct work_struct tq_refill;
+
+ /* protects everything below. irqsave when using. */
+ spinlock_t misc_lock;
+
+ u32 CFG_cache;
+
+ u32 MEAR_cache;
+ u32 IMR_cache;
+ struct eeprom ee;
+
+ unsigned linkstate;
+
+ spinlock_t tx_lock;
+
+ u16 tx_done_idx;
+ u16 tx_idx;
+ volatile u16 tx_free_idx; /* idx of free desc chain */
+ u16 tx_intr_idx;
+
+ atomic_t nr_tx_skbs;
+ struct sk_buff *tx_skbs[NR_TX_DESC];
+
+ char pad[16] __attribute__((aligned(16)));
+ u32 *tx_descs;
+ dma_addr_t tx_phy_descs;
+
+ struct timer_list tx_watchdog;
+};
+
+static inline struct ns83820 *PRIV(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+#define __kick_rx(dev) writel(CR_RXE, dev->base + CR)
+
+static inline void kick_rx(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ dprintk("kick_rx: maybe kicking\n");
+ if (test_and_clear_bit(0, &dev->rx_info.idle)) {
+ dprintk("actually kicking\n");
+ writel(dev->rx_info.phy_descs +
+ (4 * DESC_SIZE * dev->rx_info.next_rx),
+ dev->base + RXDP);
+ if (dev->rx_info.next_rx == dev->rx_info.next_empty)
+ printk(KERN_DEBUG "%s: uh-oh: next_rx == next_empty???\n",
+ ndev->name);
+ __kick_rx(dev);
+ }
+}
+
+//free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC
+#define start_tx_okay(dev) \
+ (((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE)
+
+
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+static void ns83820_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
+{
+ struct ns83820 *dev = PRIV(ndev);
+
+ spin_lock_irq(&dev->misc_lock);
+ spin_lock(&dev->tx_lock);
+
+ dev->vlgrp = grp;
+
+ spin_unlock(&dev->tx_lock);
+ spin_unlock_irq(&dev->misc_lock);
+}
+
+static void ns83820_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+{
+ struct ns83820 *dev = PRIV(ndev);
+
+ spin_lock_irq(&dev->misc_lock);
+ spin_lock(&dev->tx_lock);
+ if (dev->vlgrp)
+ dev->vlgrp->vlan_devices[vid] = NULL;
+ spin_unlock(&dev->tx_lock);
+ spin_unlock_irq(&dev->misc_lock);
+}
+#endif
+
+/* Packet Receiver
+ *
+ * The hardware supports linked lists of receive descriptors for
+ * which ownership is transfered back and forth by means of an
+ * ownership bit. While the hardware does support the use of a
+ * ring for receive descriptors, we only make use of a chain in
+ * an attempt to reduce bus traffic under heavy load scenarios.
+ * This will also make bugs a bit more obvious. The current code
+ * only makes use of a single rx chain; I hope to implement
+ * priority based rx for version 1.0. Goal: even under overload
+ * conditions, still route realtime traffic with as low jitter as
+ * possible.
+ */
+static inline void build_rx_desc(struct ns83820 *dev, u32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts)
+{
+ desc_addr_set(desc + DESC_LINK, link);
+ desc_addr_set(desc + DESC_BUFPTR, buf);
+ desc[DESC_EXTSTS] = cpu_to_le32(extsts);
+ mb();
+ desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
+}
+
+#define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_DESC)
+static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
+{
+ unsigned next_empty;
+ u32 cmdsts;
+ u32 *sg;
+ dma_addr_t buf;
+
+ next_empty = dev->rx_info.next_empty;
+
+ /* don't overrun last rx marker */
+ if (unlikely(nr_rx_empty(dev) <= 2)) {
+ kfree_skb(skb);
+ return 1;
+ }
+
+#if 0
+ dprintk("next_empty[%d] nr_used[%d] next_rx[%d]\n",
+ dev->rx_info.next_empty,
+ dev->rx_info.nr_used,
+ dev->rx_info.next_rx
+ );
+#endif
+
+ sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
+ if (unlikely(NULL != dev->rx_info.skbs[next_empty]))
+ BUG();
+ dev->rx_info.skbs[next_empty] = skb;
+
+ dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
+ cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
+ buf = pci_map_single(dev->pci_dev, skb->tail,
+ REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
+ /* update link of previous rx */
+ if (likely(next_empty != dev->rx_info.next_rx))
+ dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4));
+
+ return 0;
+}
+
+static inline int rx_refill(struct net_device *ndev, int gfp)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ unsigned i;
+ unsigned long flags = 0;
+
+ if (unlikely(nr_rx_empty(dev) <= 2))
+ return 0;
+
+ dprintk("rx_refill(%p)\n", ndev);
+ if (gfp == GFP_ATOMIC)
+ spin_lock_irqsave(&dev->rx_info.lock, flags);
+ for (i=0; i<NR_RX_DESC; i++) {
+ struct sk_buff *skb;
+ long res;
+ /* extra 16 bytes for alignment */
+ skb = __dev_alloc_skb(REAL_RX_BUF_SIZE+16, gfp);
+ if (unlikely(!skb))
+ break;
+
+ res = (long)skb->tail & 0xf;
+ res = 0x10 - res;
+ res &= 0xf;
+ skb_reserve(skb, res);
+
+ skb->dev = ndev;
+ if (gfp != GFP_ATOMIC)
+ spin_lock_irqsave(&dev->rx_info.lock, flags);
+ res = ns83820_add_rx_skb(dev, skb);
+ if (gfp != GFP_ATOMIC)
+ spin_unlock_irqrestore(&dev->rx_info.lock, flags);
+ if (res) {
+ i = 1;
+ break;
+ }
+ }
+ if (gfp == GFP_ATOMIC)
+ spin_unlock_irqrestore(&dev->rx_info.lock, flags);
+
+ return i ? 0 : -ENOMEM;
+}
+
+static void FASTCALL(rx_refill_atomic(struct net_device *ndev));
+static void fastcall rx_refill_atomic(struct net_device *ndev)
+{
+ rx_refill(ndev, GFP_ATOMIC);
+}
+
+/* REFILL */
+static inline void queue_refill(void *_dev)
+{
+ struct net_device *ndev = _dev;
+ struct ns83820 *dev = PRIV(ndev);
+
+ rx_refill(ndev, GFP_KERNEL);
+ if (dev->rx_info.up)
+ kick_rx(ndev);
+}
+
+static inline void clear_rx_desc(struct ns83820 *dev, unsigned i)
+{
+ build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0);
+}
+
+static void FASTCALL(phy_intr(struct net_device *ndev));
+static void fastcall phy_intr(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
+ u32 cfg, new_cfg;
+ u32 tbisr, tanar, tanlpar;
+ int speed, fullduplex, newlinkstate;
+
+ cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
+
+ if (dev->CFG_cache & CFG_TBI_EN) {
+ /* we have an optical transceiver */
+ tbisr = readl(dev->base + TBISR);
+ tanar = readl(dev->base + TANAR);
+ tanlpar = readl(dev->base + TANLPAR);
+ dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n",
+ tbisr, tanar, tanlpar);
+
+ if ( (fullduplex = (tanlpar & TANAR_FULL_DUP)
+ && (tanar & TANAR_FULL_DUP)) ) {
+
+ /* both of us are full duplex */
+ writel(readl(dev->base + TXCFG)
+ | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,
+ dev->base + TXCFG);
+ writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
+ dev->base + RXCFG);
+ /* Light up full duplex LED */
+ writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,
+ dev->base + GPIOR);
+
+ } else if(((tanlpar & TANAR_HALF_DUP)
+ && (tanar & TANAR_HALF_DUP))
+ || ((tanlpar & TANAR_FULL_DUP)
+ && (tanar & TANAR_HALF_DUP))
+ || ((tanlpar & TANAR_HALF_DUP)
+ && (tanar & TANAR_FULL_DUP))) {
+
+ /* one or both of us are half duplex */
+ writel((readl(dev->base + TXCFG)
+ & ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP,
+ dev->base + TXCFG);
+ writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD,
+ dev->base + RXCFG);
+ /* Turn off full duplex LED */
+ writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT,
+ dev->base + GPIOR);
+ }
+
+ speed = 4; /* 1000F */
+
+ } else {
+ /* we have a copper transceiver */
+ new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS);
+
+ if (cfg & CFG_SPDSTS1)
+ new_cfg |= CFG_MODE_1000;
+ else
+ new_cfg &= ~CFG_MODE_1000;
+
+ speed = ((cfg / CFG_SPDSTS0) & 3);
+ fullduplex = (cfg & CFG_DUPSTS);
+
+ if (fullduplex)
+ new_cfg |= CFG_SB;
+
+ if ((cfg & CFG_LNKSTS) &&
+ ((new_cfg ^ dev->CFG_cache) & CFG_MODE_1000)) {
+ writel(new_cfg, dev->base + CFG);
+ dev->CFG_cache = new_cfg;
+ }
+
+ dev->CFG_cache &= ~CFG_SPDSTS;
+ dev->CFG_cache |= cfg & CFG_SPDSTS;
+ }
+
+ newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN;
+
+ if (newlinkstate & LINK_UP
+ && dev->linkstate != newlinkstate) {
+ netif_start_queue(ndev);
+ netif_wake_queue(ndev);
+ printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n",
+ ndev->name,
+ speeds[speed],
+ fullduplex ? "full" : "half");
+ } else if (newlinkstate & LINK_DOWN
+ && dev->linkstate != newlinkstate) {
+ netif_stop_queue(ndev);
+ printk(KERN_INFO "%s: link now down.\n", ndev->name);
+ }
+
+ dev->linkstate = newlinkstate;
+}
+
+static int ns83820_setup_rx(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ unsigned i;
+ int ret;
+
+ dprintk("ns83820_setup_rx(%p)\n", ndev);
+
+ dev->rx_info.idle = 1;
+ dev->rx_info.next_rx = 0;
+ dev->rx_info.next_rx_desc = dev->rx_info.descs;
+ dev->rx_info.next_empty = 0;
+
+ for (i=0; i<NR_RX_DESC; i++)
+ clear_rx_desc(dev, i);
+
+ writel(0, dev->base + RXDP_HI);
+ writel(dev->rx_info.phy_descs, dev->base + RXDP);
+
+ ret = rx_refill(ndev, GFP_KERNEL);
+ if (!ret) {
+ dprintk("starting receiver\n");
+ /* prevent the interrupt handler from stomping on us */
+ spin_lock_irq(&dev->rx_info.lock);
+
+ writel(0x0001, dev->base + CCSR);
+ writel(0, dev->base + RFCR);
+ writel(0x7fc00000, dev->base + RFCR);
+ writel(0xffc00000, dev->base + RFCR);
+
+ dev->rx_info.up = 1;
+
+ phy_intr(ndev);
+
+ /* Okay, let it rip */
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache |= ISR_PHY;
+ dev->IMR_cache |= ISR_RXRCMP;
+ //dev->IMR_cache |= ISR_RXERR;
+ //dev->IMR_cache |= ISR_RXOK;
+ dev->IMR_cache |= ISR_RXORN;
+ dev->IMR_cache |= ISR_RXSOVR;
+ dev->IMR_cache |= ISR_RXDESC;
+ dev->IMR_cache |= ISR_RXIDLE;
+ dev->IMR_cache |= ISR_TXDESC;
+ dev->IMR_cache |= ISR_TXIDLE;
+
+ writel(dev->IMR_cache, dev->base + IMR);
+ writel(1, dev->base + IER);
+ spin_unlock_irq(&dev->misc_lock);
+
+ kick_rx(ndev);
+
+ spin_unlock_irq(&dev->rx_info.lock);
+ }
+ return ret;
+}
+
+static void ns83820_cleanup_rx(struct ns83820 *dev)
+{
+ unsigned i;
+ unsigned long flags;
+
+ dprintk("ns83820_cleanup_rx(%p)\n", dev);
+
+ /* disable receive interrupts */
+ spin_lock_irqsave(&dev->misc_lock, flags);
+ dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE);
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
+
+ /* synchronize with the interrupt handler and kill it */
+ dev->rx_info.up = 0;
+ synchronize_irq(dev->pci_dev->irq);
+
+ /* touch the pci bus... */
+ readl(dev->base + IMR);
+
+ /* assumes the transmitter is already disabled and reset */
+ writel(0, dev->base + RXDP_HI);
+ writel(0, dev->base + RXDP);
+
+ for (i=0; i<NR_RX_DESC; i++) {
+ struct sk_buff *skb = dev->rx_info.skbs[i];
+ dev->rx_info.skbs[i] = NULL;
+ clear_rx_desc(dev, i);
+ if (skb)
+ kfree_skb(skb);
+ }
+}
+
+static void FASTCALL(ns83820_rx_kick(struct net_device *ndev));
+static void fastcall ns83820_rx_kick(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {
+ if (dev->rx_info.up) {
+ rx_refill_atomic(ndev);
+ kick_rx(ndev);
+ }
+ }
+
+ if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4)
+ schedule_work(&dev->tq_refill);
+ else
+ kick_rx(ndev);
+ if (dev->rx_info.idle)
+ printk(KERN_DEBUG "%s: BAD\n", ndev->name);
+}
+
+/* rx_irq
+ *
+ */
+static void FASTCALL(rx_irq(struct net_device *ndev));
+static void fastcall rx_irq(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+ int rx_rc, len;
+ u32 cmdsts, *desc;
+ unsigned long flags;
+ int nr = 0;
+
+ dprintk("rx_irq(%p)\n", ndev);
+ dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n",
+ readl(dev->base + RXDP),
+ (long)(dev->rx_info.phy_descs),
+ (int)dev->rx_info.next_rx,
+ (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)),
+ (int)dev->rx_info.next_empty,
+ (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty))
+ );
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (!info->up)
+ goto out;
+
+ dprintk("walking descs\n");
+ next_rx = info->next_rx;
+ desc = info->next_rx_desc;
+ while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) &&
+ (cmdsts != CMDSTS_OWN)) {
+ struct sk_buff *skb;
+ u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]);
+ dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR);
+
+ dprintk("cmdsts: %08x\n", cmdsts);
+ dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK]));
+ dprintk("extsts: %08x\n", extsts);
+
+ skb = info->skbs[next_rx];
+ info->skbs[next_rx] = NULL;
+ info->next_rx = (next_rx + 1) % NR_RX_DESC;
+
+ mb();
+ clear_rx_desc(dev, next_rx);
+
+ pci_unmap_single(dev->pci_dev, bufptr,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ len = cmdsts & CMDSTS_LEN_MASK;
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ /* NH: As was mentioned below, this chip is kinda
+ * brain dead about vlan tag stripping. Frames
+ * that are 64 bytes with a vlan header appended
+ * like arp frames, or pings, are flagged as Runts
+ * when the tag is stripped and hardware. This
+ * also means that the OK bit in the descriptor
+ * is cleared when the frame comes in so we have
+ * to do a specific length check here to make sure
+ * the frame would have been ok, had we not stripped
+ * the tag.
+ */
+ if (likely((CMDSTS_OK & cmdsts) ||
+ ((cmdsts & CMDSTS_RUNT) && len >= 56))) {
+#else
+ if (likely(CMDSTS_OK & cmdsts)) {
+#endif
+ skb_put(skb, len);
+ if (unlikely(!skb))
+ goto netdev_mangle_me_harder_failed;
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ dev->stats.multicast ++;
+ dev->stats.rx_packets ++;
+ dev->stats.rx_bytes += len;
+ if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ skb->protocol = eth_type_trans(skb, ndev);
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ if(extsts & EXTSTS_VPKT) {
+ unsigned short tag;
+ tag = ntohs(extsts & EXTSTS_VTG_MASK);
+ rx_rc = vlan_hwaccel_rx(skb,dev->vlgrp,tag);
+ } else {
+ rx_rc = netif_rx(skb);
+ }
+#else
+ rx_rc = netif_rx(skb);
+#endif
+ if (NET_RX_DROP == rx_rc) {
+netdev_mangle_me_harder_failed:
+ dev->stats.rx_dropped ++;
+ }
+ } else {
+ kfree_skb(skb);
+ }
+
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+ }
+ info->next_rx = next_rx;
+ info->next_rx_desc = info->descs + (DESC_SIZE * next_rx);
+
+out:
+ if (0 && !nr) {
+ Dprintk("dazed: cmdsts_f: %08x\n", cmdsts);
+ }
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void rx_action(unsigned long _dev)
+{
+ struct net_device *ndev = (void *)_dev;
+ struct ns83820 *dev = PRIV(ndev);
+ rx_irq(ndev);
+ writel(ihr, dev->base + IHR);
+
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache |= ISR_RXDESC;
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irq(&dev->misc_lock);
+
+ rx_irq(ndev);
+ ns83820_rx_kick(ndev);
+}
+
+/* Packet Transmit code
+ */
+static inline void kick_tx(struct ns83820 *dev)
+{
+ dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n",
+ dev, dev->tx_idx, dev->tx_free_idx);
+ writel(CR_TXE, dev->base + CR);
+}
+
+/* No spinlock needed on the transmit irq path as the interrupt handler is
+ * serialized.
+ */
+static void do_tx_done(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 cmdsts, tx_done_idx, *desc;
+
+ spin_lock_irq(&dev->tx_lock);
+
+ dprintk("do_tx_done(%p)\n", ndev);
+ tx_done_idx = dev->tx_done_idx;
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+
+ dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
+ while ((tx_done_idx != dev->tx_free_idx) &&
+ !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) {
+ struct sk_buff *skb;
+ unsigned len;
+ dma_addr_t addr;
+
+ if (cmdsts & CMDSTS_ERR)
+ dev->stats.tx_errors ++;
+ if (cmdsts & CMDSTS_OK)
+ dev->stats.tx_packets ++;
+ if (cmdsts & CMDSTS_OK)
+ dev->stats.tx_bytes += cmdsts & 0xffff;
+
+ dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ tx_done_idx, dev->tx_free_idx, cmdsts);
+ skb = dev->tx_skbs[tx_done_idx];
+ dev->tx_skbs[tx_done_idx] = NULL;
+ dprintk("done(%p)\n", skb);
+
+ len = cmdsts & CMDSTS_LEN_MASK;
+ addr = desc_addr_get(desc + DESC_BUFPTR);
+ if (skb) {
+ pci_unmap_single(dev->pci_dev,
+ addr,
+ len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ atomic_dec(&dev->nr_tx_skbs);
+ } else
+ pci_unmap_page(dev->pci_dev,
+ addr,
+ len,
+ PCI_DMA_TODEVICE);
+
+ tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;
+ dev->tx_done_idx = tx_done_idx;
+ desc[DESC_CMDSTS] = cpu_to_le32(0);
+ mb();
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+ }
+
+ /* Allow network stack to resume queueing packets after we've
+ * finished transmitting at least 1/4 of the packets in the queue.
+ */
+ if (netif_queue_stopped(ndev) && start_tx_okay(dev)) {
+ dprintk("start_queue(%p)\n", ndev);
+ netif_start_queue(ndev);
+ netif_wake_queue(ndev);
+ }
+ spin_unlock_irq(&dev->tx_lock);
+}
+
+static void ns83820_cleanup_tx(struct ns83820 *dev)
+{
+ unsigned i;
+
+ for (i=0; i<NR_TX_DESC; i++) {
+ struct sk_buff *skb = dev->tx_skbs[i];
+ dev->tx_skbs[i] = NULL;
+ if (skb) {
+ u32 *desc = dev->tx_descs + (i * DESC_SIZE);
+ pci_unmap_single(dev->pci_dev,
+ desc_addr_get(desc + DESC_BUFPTR),
+ le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ atomic_dec(&dev->nr_tx_skbs);
+ }
+ }
+
+ memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4);
+}
+
+/* transmit routine. This code relies on the network layer serializing
+ * its calls in, but will run happily in parallel with the interrupt
+ * handler. This code currently has provisions for fragmenting tx buffers
+ * while trying to track down a bug in either the zero copy code or
+ * the tx fifo (hence the MAX_FRAG_LEN).
+ */
+static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 free_idx, cmdsts, extsts;
+ int nr_free, nr_frags;
+ unsigned tx_done_idx, last_idx;
+ dma_addr_t buf;
+ unsigned len;
+ skb_frag_t *frag;
+ int stopped = 0;
+ int do_intr = 0;
+ volatile u32 *first_desc;
+
+ dprintk("ns83820_hard_start_xmit\n");
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+again:
+ if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {
+ netif_stop_queue(ndev);
+ if (unlikely(dev->CFG_cache & CFG_LNKSTS))
+ return 1;
+ netif_start_queue(ndev);
+ }
+
+ last_idx = free_idx = dev->tx_free_idx;
+ tx_done_idx = dev->tx_done_idx;
+ nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC;
+ nr_free -= 1;
+ if (nr_free <= nr_frags) {
+ dprintk("stop_queue - not enough(%p)\n", ndev);
+ netif_stop_queue(ndev);
+
+ /* Check again: we may have raced with a tx done irq */
+ if (dev->tx_done_idx != tx_done_idx) {
+ dprintk("restart queue(%p)\n", ndev);
+ netif_start_queue(ndev);
+ goto again;
+ }
+ return 1;
+ }
+
+ if (free_idx == dev->tx_intr_idx) {
+ do_intr = 1;
+ dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC;
+ }
+
+ nr_free -= nr_frags;
+ if (nr_free < MIN_TX_DESC_FREE) {
+ dprintk("stop_queue - last entry(%p)\n", ndev);
+ netif_stop_queue(ndev);
+ stopped = 1;
+ }
+
+ frag = skb_shinfo(skb)->frags;
+ if (!nr_frags)
+ frag = NULL;
+ extsts = 0;
+ if (skb->ip_summed == CHECKSUM_HW) {
+ extsts |= EXTSTS_IPPKT;
+ if (IPPROTO_TCP == skb->nh.iph->protocol)
+ extsts |= EXTSTS_TCPPKT;
+ else if (IPPROTO_UDP == skb->nh.iph->protocol)
+ extsts |= EXTSTS_UDPPKT;
+ }
+
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ if(vlan_tx_tag_present(skb)) {
+ /* fetch the vlan tag info out of the
+ * ancilliary data if the vlan code
+ * is using hw vlan acceleration
+ */
+ short tag = vlan_tx_tag_get(skb);
+ extsts |= (EXTSTS_VPKT | htons(tag));
+ }
+#endif
+
+ len = skb->len;
+ if (nr_frags)
+ len -= skb->data_len;
+ buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+ first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
+
+ for (;;) {
+ volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
+ u32 residue = 0;
+
+ dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
+ (unsigned long long)buf);
+ last_idx = free_idx;
+ free_idx = (free_idx + 1) % NR_TX_DESC;
+ desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4));
+ desc_addr_set(desc + DESC_BUFPTR, buf);
+ desc[DESC_EXTSTS] = cpu_to_le32(extsts);
+
+ cmdsts = ((nr_frags|residue) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
+ cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN;
+ cmdsts |= len;
+ desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
+
+ if (residue) {
+ buf += len;
+ len = residue;
+ continue;
+ }
+
+ if (!nr_frags)
+ break;
+
+ buf = pci_map_page(dev->pci_dev, frag->page,
+ frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
+ (long long)buf, (long) page_to_pfn(frag->page),
+ frag->page_offset);
+ len = frag->size;
+ frag++;
+ nr_frags--;
+ }
+ dprintk("done pkt\n");
+
+ spin_lock_irq(&dev->tx_lock);
+ dev->tx_skbs[last_idx] = skb;
+ first_desc[DESC_CMDSTS] |= cpu_to_le32(CMDSTS_OWN);
+ dev->tx_free_idx = free_idx;
+ atomic_inc(&dev->nr_tx_skbs);
+ spin_unlock_irq(&dev->tx_lock);
+
+ kick_tx(dev);
+
+ /* Check again: we may have raced with a tx done irq */
+ if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
+ netif_start_queue(ndev);
+
+ /* set the transmit start time to catch transmit timeouts */
+ ndev->trans_start = jiffies;
+ return 0;
+}
+
+static void ns83820_update_stats(struct ns83820 *dev)
+{
+ u8 __iomem *base = dev->base;
+
+ /* the DP83820 will freeze counters, so we need to read all of them */
+ dev->stats.rx_errors += readl(base + 0x60) & 0xffff;
+ dev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff;
+ dev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff;
+ dev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff;
+ /*dev->stats.rx_symbol_errors +=*/ readl(base + 0x70);
+ dev->stats.rx_length_errors += readl(base + 0x74) & 0xffff;
+ dev->stats.rx_length_errors += readl(base + 0x78) & 0xffff;
+ /*dev->stats.rx_badopcode_errors += */ readl(base + 0x7c);
+ /*dev->stats.rx_pause_count += */ readl(base + 0x80);
+ /*dev->stats.tx_pause_count += */ readl(base + 0x84);
+ dev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff;
+}
+
+static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+
+ /* somewhat overkill */
+ spin_lock_irq(&dev->misc_lock);
+ ns83820_update_stats(dev);
+ spin_unlock_irq(&dev->misc_lock);
+
+ return &dev->stats;
+}
+
+static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ strcpy(info->driver, "ns83820");
+ strcpy(info->version, VERSION);
+ strcpy(info->bus_info, pci_name(dev->pci_dev));
+}
+
+static u32 ns83820_get_link(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
+ return cfg & CFG_LNKSTS ? 1 : 0;
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = ns83820_get_drvinfo,
+ .get_link = ns83820_get_link
+};
+
+static void ns83820_mib_isr(struct ns83820 *dev)
+{
+ spin_lock(&dev->misc_lock);
+ ns83820_update_stats(dev);
+ spin_unlock(&dev->misc_lock);
+}
+
+static void ns83820_do_isr(struct net_device *ndev, u32 isr);
+static irqreturn_t ns83820_irq(int foo, void *data, struct pt_regs *regs)
+{
+ struct net_device *ndev = data;
+ struct ns83820 *dev = PRIV(ndev);
+ u32 isr;
+ dprintk("ns83820_irq(%p)\n", ndev);
+
+ dev->ihr = 0;
+
+ isr = readl(dev->base + ISR);
+ dprintk("irq: %08x\n", isr);
+ ns83820_do_isr(ndev, isr);
+ return IRQ_HANDLED;
+}
+
+static void ns83820_do_isr(struct net_device *ndev, u32 isr)
+{
+ struct ns83820 *dev = PRIV(ndev);
+#ifdef DEBUG
+ if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC))
+ Dprintk("odd isr? 0x%08x\n", isr);
+#endif
+
+ if (ISR_RXIDLE & isr) {
+ dev->rx_info.idle = 1;
+ Dprintk("oh dear, we are idle\n");
+ ns83820_rx_kick(ndev);
+ }
+
+ if ((ISR_RXDESC | ISR_RXOK) & isr) {
+ prefetch(dev->rx_info.next_rx_desc);
+
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK);
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irq(&dev->misc_lock);
+
+ tasklet_schedule(&dev->rx_tasklet);
+ //rx_irq(ndev);
+ //writel(4, dev->base + IHR);
+ }
+
+ if ((ISR_RXIDLE | ISR_RXORN | ISR_RXDESC | ISR_RXOK | ISR_RXERR) & isr)
+ ns83820_rx_kick(ndev);
+
+ if (unlikely(ISR_RXSOVR & isr)) {
+ //printk("overrun: rxsovr\n");
+ dev->stats.rx_fifo_errors ++;
+ }
+
+ if (unlikely(ISR_RXORN & isr)) {
+ //printk("overrun: rxorn\n");
+ dev->stats.rx_fifo_errors ++;
+ }
+
+ if ((ISR_RXRCMP & isr) && dev->rx_info.up)
+ writel(CR_RXE, dev->base + CR);
+
+ if (ISR_TXIDLE & isr) {
+ u32 txdp;
+ txdp = readl(dev->base + TXDP);
+ dprintk("txdp: %08x\n", txdp);
+ txdp -= dev->tx_phy_descs;
+ dev->tx_idx = txdp / (DESC_SIZE * 4);
+ if (dev->tx_idx >= NR_TX_DESC) {
+ printk(KERN_ALERT "%s: BUG -- txdp out of range\n", ndev->name);
+ dev->tx_idx = 0;
+ }
+ /* The may have been a race between a pci originated read
+ * and the descriptor update from the cpu. Just in case,
+ * kick the transmitter if the hardware thinks it is on a
+ * different descriptor than we are.
+ */
+ if (dev->tx_idx != dev->tx_free_idx)
+ kick_tx(dev);
+ }
+
+ /* Defer tx ring processing until more than a minimum amount of
+ * work has accumulated
+ */
+ if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) {
+ do_tx_done(ndev);
+
+ /* Disable TxOk if there are no outstanding tx packets.
+ */
+ if ((dev->tx_done_idx == dev->tx_free_idx) &&
+ (dev->IMR_cache & ISR_TXOK)) {
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache &= ~ISR_TXOK;
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irq(&dev->misc_lock);
+ }
+ }
+
+ /* The TxIdle interrupt can come in before the transmit has
+ * completed. Normally we reap packets off of the combination
+ * of TxDesc and TxIdle and leave TxOk disabled (since it
+ * occurs on every packet), but when no further irqs of this
+ * nature are expected, we must enable TxOk.
+ */
+ if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) {
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache |= ISR_TXOK;
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irq(&dev->misc_lock);
+ }
+
+ /* MIB interrupt: one of the statistics counters is about to overflow */
+ if (unlikely(ISR_MIB & isr))
+ ns83820_mib_isr(dev);
+
+ /* PHY: Link up/down/negotiation state change */
+ if (unlikely(ISR_PHY & isr))
+ phy_intr(ndev);
+
+#if 0 /* Still working on the interrupt mitigation strategy */
+ if (dev->ihr)
+ writel(dev->ihr, dev->base + IHR);
+#endif
+}
+
+static void ns83820_do_reset(struct ns83820 *dev, u32 which)
+{
+ Dprintk("resetting chip...\n");
+ writel(which, dev->base + CR);
+ do {
+ schedule();
+ } while (readl(dev->base + CR) & which);
+ Dprintk("okay!\n");
+}
+
+static int ns83820_stop(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+
+ /* FIXME: protect against interrupt handler? */
+ del_timer_sync(&dev->tx_watchdog);
+
+ /* disable interrupts */
+ writel(0, dev->base + IMR);
+ writel(0, dev->base + IER);
+ readl(dev->base + IER);
+
+ dev->rx_info.up = 0;
+ synchronize_irq(dev->pci_dev->irq);
+
+ ns83820_do_reset(dev, CR_RST);
+
+ synchronize_irq(dev->pci_dev->irq);
+
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache &= ~(ISR_TXURN | ISR_TXIDLE | ISR_TXERR | ISR_TXDESC | ISR_TXOK);
+ spin_unlock_irq(&dev->misc_lock);
+
+ ns83820_cleanup_rx(dev);
+ ns83820_cleanup_tx(dev);
+
+ return 0;
+}
+
+static void ns83820_tx_timeout(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 tx_done_idx, *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ tx_done_idx = dev->tx_done_idx;
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+
+ printk(KERN_INFO "%s: tx_timeout: tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ ndev->name,
+ tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
+
+#if defined(DEBUG)
+ {
+ u32 isr;
+ isr = readl(dev->base + ISR);
+ printk("irq: %08x imr: %08x\n", isr, dev->IMR_cache);
+ ns83820_do_isr(ndev, isr);
+ }
+#endif
+
+ do_tx_done(ndev);
+
+ tx_done_idx = dev->tx_done_idx;
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+
+ printk(KERN_INFO "%s: after: tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ ndev->name,
+ tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
+
+ local_irq_restore(flags);
+}
+
+static void ns83820_tx_watch(unsigned long data)
+{
+ struct net_device *ndev = (void *)data;
+ struct ns83820 *dev = PRIV(ndev);
+
+#if defined(DEBUG)
+ printk("ns83820_tx_watch: %u %u %d\n",
+ dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs)
+ );
+#endif
+
+ if (time_after(jiffies, ndev->trans_start + 1*HZ) &&
+ dev->tx_done_idx != dev->tx_free_idx) {
+ printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n",
+ ndev->name,
+ dev->tx_done_idx, dev->tx_free_idx,
+ atomic_read(&dev->nr_tx_skbs));
+ ns83820_tx_timeout(ndev);
+ }
+
+ mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
+}
+
+static int ns83820_open(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ unsigned i;
+ u32 desc;
+ int ret;
+
+ dprintk("ns83820_open\n");
+
+ writel(0, dev->base + PQCR);
+
+ ret = ns83820_setup_rx(ndev);
+ if (ret)
+ goto failed;
+
+ memset(dev->tx_descs, 0, 4 * NR_TX_DESC * DESC_SIZE);
+ for (i=0; i<NR_TX_DESC; i++) {
+ dev->tx_descs[(i * DESC_SIZE) + DESC_LINK]
+ = cpu_to_le32(
+ dev->tx_phy_descs
+ + ((i+1) % NR_TX_DESC) * DESC_SIZE * 4);
+ }
+
+ dev->tx_idx = 0;
+ dev->tx_done_idx = 0;
+ desc = dev->tx_phy_descs;
+ writel(0, dev->base + TXDP_HI);
+ writel(desc, dev->base + TXDP);
+
+ init_timer(&dev->tx_watchdog);
+ dev->tx_watchdog.data = (unsigned long)ndev;
+ dev->tx_watchdog.function = ns83820_tx_watch;
+ mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
+
+ netif_start_queue(ndev); /* FIXME: wait for phy to come up */
+
+ return 0;
+
+failed:
+ ns83820_stop(ndev);
+ return ret;
+}
+
+static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
+{
+ unsigned i;
+ for (i=0; i<3; i++) {
+ u32 data;
+#if 0 /* I've left this in as an example of how to use eeprom.h */
+ data = eeprom_readw(&dev->ee, 0xa + 2 - i);
+#else
+ /* Read from the perfect match memory: this is loaded by
+ * the chip from the EEPROM via the EELOAD self test.
+ */
+ writel(i*2, dev->base + RFCR);
+ data = readl(dev->base + RFDR);
+#endif
+ *mac++ = data;
+ *mac++ = data >> 8;
+ }
+}
+
+static int ns83820_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if (new_mtu > RX_BUF_SIZE)
+ return -EINVAL;
+ ndev->mtu = new_mtu;
+ return 0;
+}
+
+static void ns83820_set_multicast(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u8 __iomem *rfcr = dev->base + RFCR;
+ u32 and_mask = 0xffffffff;
+ u32 or_mask = 0;
+ u32 val;
+
+ if (ndev->flags & IFF_PROMISC)
+ or_mask |= RFCR_AAU | RFCR_AAM;
+ else
+ and_mask &= ~(RFCR_AAU | RFCR_AAM);
+
+ if (ndev->flags & IFF_ALLMULTI)
+ or_mask |= RFCR_AAM;
+ else
+ and_mask &= ~RFCR_AAM;
+
+ spin_lock_irq(&dev->misc_lock);
+ val = (readl(rfcr) & and_mask) | or_mask;
+ /* Ramit : RFCR Write Fix doc says RFEN must be 0 modify other bits */
+ writel(val & ~RFCR_RFEN, rfcr);
+ writel(val, rfcr);
+ spin_unlock_irq(&dev->misc_lock);
+}
+
+static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enable, u32 done, u32 fail)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ int timed_out = 0;
+ long start;
+ u32 status;
+ int loops = 0;
+
+ dprintk("%s: start %s\n", ndev->name, name);
+
+ start = jiffies;
+
+ writel(enable, dev->base + PTSCR);
+ for (;;) {
+ loops++;
+ status = readl(dev->base + PTSCR);
+ if (!(status & enable))
+ break;
+ if (status & done)
+ break;
+ if (status & fail)
+ break;
+ if ((jiffies - start) >= HZ) {
+ timed_out = 1;
+ break;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ if (status & fail)
+ printk(KERN_INFO "%s: %s failed! (0x%08x & 0x%08x)\n",
+ ndev->name, name, status, fail);
+ else if (timed_out)
+ printk(KERN_INFO "%s: run_bist %s timed out! (%08x)\n",
+ ndev->name, name, status);
+
+ dprintk("%s: done %s in %d loops\n", ndev->name, name, loops);
+}
+
+#ifdef PHY_CODE_IS_FINISHED
+static void ns83820_mii_write_bit(struct ns83820 *dev, int bit)
+{
+ /* drive MDC low */
+ dev->MEAR_cache &= ~MEAR_MDC;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* enable output, set bit */
+ dev->MEAR_cache |= MEAR_MDDIR;
+ if (bit)
+ dev->MEAR_cache |= MEAR_MDIO;
+ else
+ dev->MEAR_cache &= ~MEAR_MDIO;
+
+ /* set the output bit */
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */
+ udelay(1);
+
+ /* drive MDC high causing the data bit to be latched */
+ dev->MEAR_cache |= MEAR_MDC;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* Wait again... */
+ udelay(1);
+}
+
+static int ns83820_mii_read_bit(struct ns83820 *dev)
+{
+ int bit;
+
+ /* drive MDC low, disable output */
+ dev->MEAR_cache &= ~MEAR_MDC;
+ dev->MEAR_cache &= ~MEAR_MDDIR;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */
+ udelay(1);
+
+ /* drive MDC high causing the data bit to be latched */
+ bit = (readl(dev->base + MEAR) & MEAR_MDIO) ? 1 : 0;
+ dev->MEAR_cache |= MEAR_MDC;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+
+ /* Wait again... */
+ udelay(1);
+
+ return bit;
+}
+
+static unsigned ns83820_mii_read_reg(struct ns83820 *dev, unsigned phy, unsigned reg)
+{
+ unsigned data = 0;
+ int i;
+
+ /* read some garbage so that we eventually sync up */
+ for (i=0; i<64; i++)
+ ns83820_mii_read_bit(dev);
+
+ ns83820_mii_write_bit(dev, 0); /* start */
+ ns83820_mii_write_bit(dev, 1);
+ ns83820_mii_write_bit(dev, 1); /* opcode read */
+ ns83820_mii_write_bit(dev, 0);
+
+ /* write out the phy address: 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, phy & (0x10 >> i));
+
+ /* write out the register address, 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, reg & (0x10 >> i));
+
+ ns83820_mii_read_bit(dev); /* turn around cycles */
+ ns83820_mii_read_bit(dev);
+
+ /* read in the register data, 16 bits msb first */
+ for (i=0; i<16; i++) {
+ data <<= 1;
+ data |= ns83820_mii_read_bit(dev);
+ }
+
+ return data;
+}
+
+static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigned reg, unsigned data)
+{
+ int i;
+
+ /* read some garbage so that we eventually sync up */
+ for (i=0; i<64; i++)
+ ns83820_mii_read_bit(dev);
+
+ ns83820_mii_write_bit(dev, 0); /* start */
+ ns83820_mii_write_bit(dev, 1);
+ ns83820_mii_write_bit(dev, 0); /* opcode read */
+ ns83820_mii_write_bit(dev, 1);
+
+ /* write out the phy address: 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, phy & (0x10 >> i));
+
+ /* write out the register address, 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, reg & (0x10 >> i));
+
+ ns83820_mii_read_bit(dev); /* turn around cycles */
+ ns83820_mii_read_bit(dev);
+
+ /* read in the register data, 16 bits msb first */
+ for (i=0; i<16; i++)
+ ns83820_mii_write_bit(dev, (data >> (15 - i)) & 1);
+
+ return data;
+}
+
+static void ns83820_probe_phy(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ static int first;
+ int i;
+#define MII_PHYIDR1 0x02
+#define MII_PHYIDR2 0x03
+
+#if 0
+ if (!first) {
+ unsigned tmp;
+ ns83820_mii_read_reg(dev, 1, 0x09);
+ ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e);
+
+ tmp = ns83820_mii_read_reg(dev, 1, 0x00);
+ ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000);
+ udelay(1300);
+ ns83820_mii_read_reg(dev, 1, 0x09);
+ }
+#endif
+ first = 1;
+
+ for (i=1; i<2; i++) {
+ int j;
+ unsigned a, b;
+ a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1);
+ b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2);
+
+ //printk("%s: phy %d: 0x%04x 0x%04x\n",
+ // ndev->name, i, a, b);
+
+ for (j=0; j<0x16; j+=4) {
+ dprintk("%s: [0x%02x] %04x %04x %04x %04x\n",
+ ndev->name, j,
+ ns83820_mii_read_reg(dev, i, 0 + j),
+ ns83820_mii_read_reg(dev, i, 1 + j),
+ ns83820_mii_read_reg(dev, i, 2 + j),
+ ns83820_mii_read_reg(dev, i, 3 + j)
+ );
+ }
+ }
+ {
+ unsigned a, b;
+ /* read firmware version: memory addr is 0x8402 and 0x8403 */
+ ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+ ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+ a = ns83820_mii_read_reg(dev, 1, 0x1d);
+
+ ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+ ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+ b = ns83820_mii_read_reg(dev, 1, 0x1d);
+ dprintk("version: 0x%04x 0x%04x\n", a, b);
+ }
+}
+#endif
+
+static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ struct ns83820 *dev;
+ long addr;
+ int err;
+ int using_dac = 0;
+
+ /* See if we can set the dma mask early on; failure is fatal. */
+ if (TRY_DAC && !pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) {
+ using_dac = 1;
+ } else if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
+ using_dac = 0;
+ } else {
+ printk(KERN_WARNING "ns83820.c: pci_set_dma_mask failed!\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct ns83820));
+ dev = PRIV(ndev);
+ err = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ spin_lock_init(&dev->rx_info.lock);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->misc_lock);
+ dev->pci_dev = pci_dev;
+
+ dev->ee.cache = &dev->MEAR_cache;
+ dev->ee.lock = &dev->misc_lock;
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, &pci_dev->dev);
+
+ INIT_WORK(&dev->tq_refill, queue_refill, ndev);
+ tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
+
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ printk(KERN_INFO "ns83820: pci_enable_dev failed: %d\n", err);
+ goto out_free;
+ }
+
+ pci_set_master(pci_dev);
+ addr = pci_resource_start(pci_dev, 1);
+ dev->base = ioremap_nocache(addr, PAGE_SIZE);
+ dev->tx_descs = pci_alloc_consistent(pci_dev,
+ 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
+ dev->rx_info.descs = pci_alloc_consistent(pci_dev,
+ 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs);
+ err = -ENOMEM;
+ if (!dev->base || !dev->tx_descs || !dev->rx_info.descs)
+ goto out_disable;
+
+ dprintk("%p: %08lx %p: %08lx\n",
+ dev->tx_descs, (long)dev->tx_phy_descs,
+ dev->rx_info.descs, (long)dev->rx_info.phy_descs);
+
+ /* disable interrupts */
+ writel(0, dev->base + IMR);
+ writel(0, dev->base + IER);
+ readl(dev->base + IER);
+
+ dev->IMR_cache = 0;
+
+ setup_ee_mem_bitbanger(&dev->ee, dev->base + MEAR, 3, 2, 1, 0,
+ 0);
+
+ err = request_irq(pci_dev->irq, ns83820_irq, SA_SHIRQ,
+ DRV_NAME, ndev);
+ if (err) {
+ printk(KERN_INFO "ns83820: unable to register irq %d\n",
+ pci_dev->irq);
+ goto out_disable;
+ }
+
+ /*
+ * FIXME: we are holding rtnl_lock() over obscenely long area only
+ * because some of the setup code uses dev->name. It's Wrong(tm) -
+ * we should be using driver-specific names for all that stuff.
+ * For now that will do, but we really need to come back and kill
+ * most of the dev_alloc_name() users later.
+ */
+ rtnl_lock();
+ err = dev_alloc_name(ndev, ndev->name);
+ if (err < 0) {
+ printk(KERN_INFO "ns83820: unable to get netdev name: %d\n", err);
+ goto out_free_irq;
+ }
+
+ printk("%s: ns83820.c: 0x22c: %08x, subsystem: %04x:%04x\n",
+ ndev->name, le32_to_cpu(readl(dev->base + 0x22c)),
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device);
+
+ ndev->open = ns83820_open;
+ ndev->stop = ns83820_stop;
+ ndev->hard_start_xmit = ns83820_hard_start_xmit;
+ ndev->get_stats = ns83820_get_stats;
+ ndev->change_mtu = ns83820_change_mtu;
+ ndev->set_multicast_list = ns83820_set_multicast;
+ SET_ETHTOOL_OPS(ndev, &ops);
+ ndev->tx_timeout = ns83820_tx_timeout;
+ ndev->watchdog_timeo = 5 * HZ;
+ pci_set_drvdata(pci_dev, ndev);
+
+ ns83820_do_reset(dev, CR_RST);
+
+ /* Must reset the ram bist before running it */
+ writel(PTSCR_RBIST_RST, dev->base + PTSCR);
+ ns83820_run_bist(ndev, "sram bist", PTSCR_RBIST_EN,
+ PTSCR_RBIST_DONE, PTSCR_RBIST_FAIL);
+ ns83820_run_bist(ndev, "eeprom bist", PTSCR_EEBIST_EN, 0,
+ PTSCR_EEBIST_FAIL);
+ ns83820_run_bist(ndev, "eeprom load", PTSCR_EELOAD_EN, 0, 0);
+
+ /* I love config registers */
+ dev->CFG_cache = readl(dev->base + CFG);
+
+ if ((dev->CFG_cache & CFG_PCI64_DET)) {
+ printk(KERN_INFO "%s: detected 64 bit PCI data bus.\n",
+ ndev->name);
+ /*dev->CFG_cache |= CFG_DATA64_EN;*/
+ if (!(dev->CFG_cache & CFG_DATA64_EN))
+ printk(KERN_INFO "%s: EEPROM did not enable 64 bit bus. Disabled.\n",
+ ndev->name);
+ } else
+ dev->CFG_cache &= ~(CFG_DATA64_EN);
+
+ dev->CFG_cache &= (CFG_TBI_EN | CFG_MRM_DIS | CFG_MWI_DIS |
+ CFG_T64ADDR | CFG_DATA64_EN | CFG_EXT_125 |
+ CFG_M64ADDR);
+ dev->CFG_cache |= CFG_PINT_DUPSTS | CFG_PINT_LNKSTS | CFG_PINT_SPDSTS |
+ CFG_EXTSTS_EN | CFG_EXD | CFG_PESEL;
+ dev->CFG_cache |= CFG_REQALG;
+ dev->CFG_cache |= CFG_POW;
+ dev->CFG_cache |= CFG_TMRTEST;
+
+ /* When compiled with 64 bit addressing, we must always enable
+ * the 64 bit descriptor format.
+ */
+#ifdef USE_64BIT_ADDR
+ dev->CFG_cache |= CFG_M64ADDR;
+#endif
+ if (using_dac)
+ dev->CFG_cache |= CFG_T64ADDR;
+
+ /* Big endian mode does not seem to do what the docs suggest */
+ dev->CFG_cache &= ~CFG_BEM;
+
+ /* setup optical transceiver if we have one */
+ if (dev->CFG_cache & CFG_TBI_EN) {
+ printk(KERN_INFO "%s: enabling optical transceiver\n",
+ ndev->name);
+ writel(readl(dev->base + GPIOR) | 0x3e8, dev->base + GPIOR);
+
+ /* setup auto negotiation feature advertisement */
+ writel(readl(dev->base + TANAR)
+ | TANAR_HALF_DUP | TANAR_FULL_DUP,
+ dev->base + TANAR);
+
+ /* start auto negotiation */
+ writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,
+ dev->base + TBICR);
+ writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);
+ dev->linkstate = LINK_AUTONEGOTIATE;
+
+ dev->CFG_cache |= CFG_MODE_1000;
+ }
+
+ writel(dev->CFG_cache, dev->base + CFG);
+ dprintk("CFG: %08x\n", dev->CFG_cache);
+
+ if (reset_phy) {
+ printk(KERN_INFO "%s: resetting phy\n", ndev->name);
+ writel(dev->CFG_cache | CFG_PHY_RST, dev->base + CFG);
+ msleep(10);
+ writel(dev->CFG_cache, dev->base + CFG);
+ }
+
+#if 0 /* Huh? This sets the PCI latency register. Should be done via
+ * the PCI layer. FIXME.
+ */
+ if (readl(dev->base + SRR))
+ writel(readl(dev->base+0x20c) | 0xfe00, dev->base + 0x20c);
+#endif
+
+ /* Note! The DMA burst size interacts with packet
+ * transmission, such that the largest packet that
+ * can be transmitted is 8192 - FLTH - burst size.
+ * If only the transmit fifo was larger...
+ */
+ /* Ramit : 1024 DMA is not a good idea, it ends up banging
+ * some DELL and COMPAQ SMP systems */
+ writel(TXCFG_CSI | TXCFG_HBI | TXCFG_ATP | TXCFG_MXDMA512
+ | ((1600 / 32) * 0x100),
+ dev->base + TXCFG);
+
+ /* Flush the interrupt holdoff timer */
+ writel(0x000, dev->base + IHR);
+ writel(0x100, dev->base + IHR);
+ writel(0x000, dev->base + IHR);
+
+ /* Set Rx to full duplex, don't accept runt, errored, long or length
+ * range errored packets. Use 512 byte DMA.
+ */
+ /* Ramit : 1024 DMA is not a good idea, it ends up banging
+ * some DELL and COMPAQ SMP systems
+ * Turn on ALP, only we are accpeting Jumbo Packets */
+ writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD
+ | RXCFG_STRIPCRC
+ //| RXCFG_ALP
+ | (RXCFG_MXDMA512) | 0, dev->base + RXCFG);
+
+ /* Disable priority queueing */
+ writel(0, dev->base + PQCR);
+
+ /* Enable IP checksum validation and detetion of VLAN headers.
+ * Note: do not set the reject options as at least the 0x102
+ * revision of the chip does not properly accept IP fragments
+ * at least for UDP.
+ */
+ /* Ramit : Be sure to turn on RXCFG_ARP if VLAN's are enabled, since
+ * the MAC it calculates the packetsize AFTER stripping the VLAN
+ * header, and if a VLAN Tagged packet of 64 bytes is received (like
+ * a ping with a VLAN header) then the card, strips the 4 byte VLAN
+ * tag and then checks the packet size, so if RXCFG_ARP is not enabled,
+ * it discrards it!. These guys......
+ * also turn on tag stripping if hardware acceleration is enabled
+ */
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN)
+#else
+#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN)
+#endif
+ writel(VRCR_INIT_VALUE, dev->base + VRCR);
+
+ /* Enable per-packet TCP/UDP/IP checksumming
+ * and per packet vlan tag insertion if
+ * vlan hardware acceleration is enabled
+ */
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+#define VTCR_INIT_VALUE (VTCR_PPCHK|VTCR_VPPTI)
+#else
+#define VTCR_INIT_VALUE VTCR_PPCHK
+#endif
+ writel(VTCR_INIT_VALUE, dev->base + VTCR);
+
+ /* Ramit : Enable async and sync pause frames */
+ /* writel(0, dev->base + PCR); */
+ writel((PCR_PS_MCAST | PCR_PS_DA | PCR_PSEN | PCR_FFLO_4K |
+ PCR_FFHI_8K | PCR_STLO_4 | PCR_STHI_8 | PCR_PAUSE_CNT),
+ dev->base + PCR);
+
+ /* Disable Wake On Lan */
+ writel(0, dev->base + WCSR);
+
+ ns83820_getmac(dev, ndev->dev_addr);
+
+ /* Yes, we support dumb IP checksum on transmit */
+ ndev->features |= NETIF_F_SG;
+ ndev->features |= NETIF_F_IP_CSUM;
+
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ /* We also support hardware vlan acceleration */
+ ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ ndev->vlan_rx_register = ns83820_vlan_rx_register;
+ ndev->vlan_rx_kill_vid = ns83820_vlan_rx_kill_vid;
+#endif
+
+ if (using_dac) {
+ printk(KERN_INFO "%s: using 64 bit addressing.\n",
+ ndev->name);
+ ndev->features |= NETIF_F_HIGHDMA;
+ }
+
+ printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %02x:%02x:%02x:%02x:%02x:%02x io=0x%08lx irq=%d f=%s\n",
+ ndev->name,
+ (unsigned)readl(dev->base + SRR) >> 8,
+ (unsigned)readl(dev->base + SRR) & 0xff,
+ ndev->dev_addr[0], ndev->dev_addr[1],
+ ndev->dev_addr[2], ndev->dev_addr[3],
+ ndev->dev_addr[4], ndev->dev_addr[5],
+ addr, pci_dev->irq,
+ (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg"
+ );
+
+#ifdef PHY_CODE_IS_FINISHED
+ ns83820_probe_phy(ndev);
+#endif
+
+ err = register_netdevice(ndev);
+ if (err) {
+ printk(KERN_INFO "ns83820: unable to register netdev: %d\n", err);
+ goto out_cleanup;
+ }
+ rtnl_unlock();
+
+ return 0;
+
+out_cleanup:
+ writel(0, dev->base + IMR); /* paranoia */
+ writel(0, dev->base + IER);
+ readl(dev->base + IER);
+out_free_irq:
+ rtnl_unlock();
+ free_irq(pci_dev->irq, ndev);
+out_disable:
+ if (dev->base)
+ iounmap(dev->base);
+ pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs);
+ pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs);
+ pci_disable_device(pci_dev);
+out_free:
+ free_netdev(ndev);
+ pci_set_drvdata(pci_dev, NULL);
+out:
+ return err;
+}
+
+static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
+{
+ struct net_device *ndev = pci_get_drvdata(pci_dev);
+ struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */
+
+ if (!ndev) /* paranoia */
+ return;
+
+ writel(0, dev->base + IMR); /* paranoia */
+ writel(0, dev->base + IER);
+ readl(dev->base + IER);
+
+ unregister_netdev(ndev);
+ free_irq(dev->pci_dev->irq, ndev);
+ iounmap(dev->base);
+ pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
+ pci_disable_device(dev->pci_dev);
+ free_netdev(ndev);
+ pci_set_drvdata(pci_dev, NULL);
+}
+
+static struct pci_device_id ns83820_pci_tbl[] = {
+ { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
+ { 0, },
+};
+
+static struct pci_driver driver = {
+ .name = "ns83820",
+ .id_table = ns83820_pci_tbl,
+ .probe = ns83820_init_one,
+ .remove = __devexit_p(ns83820_remove_one),
+#if 0 /* FIXME: implement */
+ .suspend = ,
+ .resume = ,
+#endif
+};
+
+
+static int __init ns83820_init(void)
+{
+ printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n");
+ return pci_module_init(&driver);
+}
+
+static void __exit ns83820_exit(void)
+{
+ pci_unregister_driver(&driver);
+}
+
+MODULE_AUTHOR("Benjamin LaHaise <bcrl@kvack.org>");
+MODULE_DESCRIPTION("National Semiconductor DP83820 10/100/1000 driver");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, ns83820_pci_tbl);
+
+module_param(lnksts, int, 0);
+MODULE_PARM_DESC(lnksts, "Polarity of LNKSTS bit");
+
+module_param(ihr, int, 0);
+MODULE_PARM_DESC(ihr, "Time in 100 us increments to delay interrupts (range 0-127)");
+
+module_param(reset_phy, int, 0);
+MODULE_PARM_DESC(reset_phy, "Set to 1 to reset the PHY on startup");
+
+module_init(ns83820_init);
+module_exit(ns83820_exit);
diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c
new file mode 100644
index 000000000000..62167a29debe
--- /dev/null
+++ b/drivers/net/oaknet.c
@@ -0,0 +1,665 @@
+/*
+ *
+ * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
+ *
+ * Module name: oaknet.c
+ *
+ * Description:
+ * Driver for the National Semiconductor DP83902AV Ethernet controller
+ * on-board the IBM PowerPC "Oak" evaluation board. Adapted from the
+ * various other 8390 drivers written by Donald Becker and Paul Gortmaker.
+ *
+ * Additional inspiration from the "tcd8390.c" driver from TiVo, Inc.
+ * and "enetLib.c" from IBM.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+
+#include <asm/board.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+
+/* Preprocessor Defines */
+
+#if !defined(TRUE) || TRUE != 1
+#define TRUE 1
+#endif
+
+#if !defined(FALSE) || FALSE != 0
+#define FALSE 0
+#endif
+
+#define OAKNET_START_PG 0x20 /* First page of TX buffer */
+#define OAKNET_STOP_PG 0x40 /* Last pagge +1 of RX ring */
+
+#define OAKNET_WAIT (2 * HZ / 100) /* 20 ms */
+
+/* Experimenting with some fixes for a broken driver... */
+
+#define OAKNET_DISINT
+#define OAKNET_HEADCHECK
+#define OAKNET_RWFIX
+
+
+/* Global Variables */
+
+static const char *name = "National DP83902AV";
+
+static struct net_device *oaknet_devs;
+
+
+/* Function Prototypes */
+
+static int oaknet_open(struct net_device *dev);
+static int oaknet_close(struct net_device *dev);
+
+static void oaknet_reset_8390(struct net_device *dev);
+static void oaknet_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void oaknet_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void oaknet_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+
+static void oaknet_dma_error(struct net_device *dev, const char *name);
+
+
+/*
+ * int oaknet_init()
+ *
+ * Description:
+ * This routine performs all the necessary platform-specific initiali-
+ * zation and set-up for the IBM "Oak" evaluation board's National
+ * Semiconductor DP83902AV "ST-NIC" Ethernet controller.
+ *
+ * Input(s):
+ * N/A
+ *
+ * Output(s):
+ * N/A
+ *
+ * Returns:
+ * 0 if OK, otherwise system error number on error.
+ *
+ */
+static int __init oaknet_init(void)
+{
+ register int i;
+ int reg0, regd;
+ int ret = -ENOMEM;
+ struct net_device *dev;
+#if 0
+ unsigned long ioaddr = OAKNET_IO_BASE;
+#else
+ unsigned long ioaddr = ioremap(OAKNET_IO_BASE, OAKNET_IO_SIZE);
+#endif
+ bd_t *bip = (bd_t *)__res;
+
+ if (!ioaddr)
+ return -ENOMEM;
+
+ dev = alloc_ei_netdev();
+ if (!dev)
+ goto out_unmap;
+
+ ret = -EBUSY;
+ if (!request_region(OAKNET_IO_BASE, OAKNET_IO_SIZE, name))
+ goto out_dev;
+
+ /* Quick register check to see if the device is really there. */
+
+ ret = -ENODEV;
+ if ((reg0 = ei_ibp(ioaddr)) == 0xFF)
+ goto out_region;
+
+ /*
+ * That worked. Now a more thorough check, using the multicast
+ * address registers, that the device is definitely out there
+ * and semi-functional.
+ */
+
+ ei_obp(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
+ regd = ei_ibp(ioaddr + 0x0D);
+ ei_obp(0xFF, ioaddr + 0x0D);
+ ei_obp(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
+ ei_ibp(ioaddr + EN0_COUNTER0);
+
+ /* It's no good. Fix things back up and leave. */
+
+ ret = -ENODEV;
+ if (ei_ibp(ioaddr + EN0_COUNTER0) != 0) {
+ ei_obp(reg0, ioaddr);
+ ei_obp(regd, ioaddr + 0x0D);
+ goto out_region;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ /*
+ * This controller is on an embedded board, so the base address
+ * and interrupt assignments are pre-assigned and unchageable.
+ */
+
+ dev->base_addr = ioaddr;
+ dev->irq = OAKNET_INT;
+
+ /*
+ * Disable all chip interrupts for now and ACK all pending
+ * interrupts.
+ */
+
+ ei_obp(0x0, ioaddr + EN0_IMR);
+ ei_obp(0xFF, ioaddr + EN0_ISR);
+
+ /* Attempt to get the interrupt line */
+
+ ret = -EAGAIN;
+ if (request_irq(dev->irq, ei_interrupt, 0, name, dev)) {
+ printk("%s: unable to request interrupt %d.\n",
+ name, dev->irq);
+ goto out_region;
+ }
+
+ /* Tell the world about what and where we've found. */
+
+ printk("%s: %s at", dev->name, name);
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ dev->dev_addr[i] = bip->bi_enetaddr[i];
+ printk("%c%.2x", (i ? ':' : ' '), dev->dev_addr[i]);
+ }
+ printk(", found at %#lx, using IRQ %d.\n", dev->base_addr, dev->irq);
+
+ /* Set up some required driver fields and then we're done. */
+
+ ei_status.name = name;
+ ei_status.word16 = FALSE;
+ ei_status.tx_start_page = OAKNET_START_PG;
+ ei_status.rx_start_page = OAKNET_START_PG + TX_PAGES;
+ ei_status.stop_page = OAKNET_STOP_PG;
+
+ ei_status.reset_8390 = &oaknet_reset_8390;
+ ei_status.block_input = &oaknet_block_input;
+ ei_status.block_output = &oaknet_block_output;
+ ei_status.get_8390_hdr = &oaknet_get_8390_hdr;
+
+ dev->open = oaknet_open;
+ dev->stop = oaknet_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ NS8390_init(dev, FALSE);
+ ret = register_netdev(dev);
+ if (ret)
+ goto out_irq;
+
+ oaknet_devs = dev;
+ return 0;
+
+out_irq;
+ free_irq(dev->irq, dev);
+out_region:
+ release_region(OAKNET_IO_BASE, OAKNET_IO_SIZE);
+out_dev:
+ free_netdev(dev);
+out_unmap:
+ iounmap(ioaddr);
+ return ret;
+}
+
+/*
+ * static int oaknet_open()
+ *
+ * Description:
+ * This routine is a modest wrapper around ei_open, the 8390-generic,
+ * driver open routine. This just increments the module usage count
+ * and passes along the status from ei_open.
+ *
+ * Input(s):
+ * *dev - Pointer to the device structure for this driver.
+ *
+ * Output(s):
+ * *dev - Pointer to the device structure for this driver, potentially
+ * modified by ei_open.
+ *
+ * Returns:
+ * 0 if OK, otherwise < 0 on error.
+ *
+ */
+static int
+oaknet_open(struct net_device *dev)
+{
+ int status = ei_open(dev);
+ return (status);
+}
+
+/*
+ * static int oaknet_close()
+ *
+ * Description:
+ * This routine is a modest wrapper around ei_close, the 8390-generic,
+ * driver close routine. This just decrements the module usage count
+ * and passes along the status from ei_close.
+ *
+ * Input(s):
+ * *dev - Pointer to the device structure for this driver.
+ *
+ * Output(s):
+ * *dev - Pointer to the device structure for this driver, potentially
+ * modified by ei_close.
+ *
+ * Returns:
+ * 0 if OK, otherwise < 0 on error.
+ *
+ */
+static int
+oaknet_close(struct net_device *dev)
+{
+ int status = ei_close(dev);
+ return (status);
+}
+
+/*
+ * static void oaknet_reset_8390()
+ *
+ * Description:
+ * This routine resets the DP83902 chip.
+ *
+ * Input(s):
+ * *dev - Pointer to the device structure for this driver.
+ *
+ * Output(s):
+ * N/A
+ *
+ * Returns:
+ * N/A
+ *
+ */
+static void
+oaknet_reset_8390(struct net_device *dev)
+{
+ int base = E8390_BASE;
+
+ /*
+ * We have no provision of reseting the controller as is done
+ * in other drivers, such as "ne.c". However, the following
+ * seems to work well enough in the TiVo driver.
+ */
+
+ printk("Resetting %s...\n", dev->name);
+ ei_obp(E8390_STOP | E8390_NODMA | E8390_PAGE0, base + E8390_CMD);
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+}
+
+/*
+ * static void oaknet_get_8390_hdr()
+ *
+ * Description:
+ * This routine grabs the 8390-specific header. It's similar to the
+ * block input routine, but we don't need to be concerned with ring wrap
+ * as the header will be at the start of a page, so we optimize accordingly.
+ *
+ * Input(s):
+ * *dev - Pointer to the device structure for this driver.
+ * *hdr - Pointer to storage for the 8390-specific packet header.
+ * ring_page - ?
+ *
+ * Output(s):
+ * *hdr - Pointer to the 8390-specific packet header for the just-
+ * received frame.
+ *
+ * Returns:
+ * N/A
+ *
+ */
+static void
+oaknet_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ int base = dev->base_addr;
+
+ /*
+ * This should NOT happen. If it does, it is the LAST thing you'll
+ * see.
+ */
+
+ if (ei_status.dmaing) {
+ oaknet_dma_error(dev, "oaknet_get_8390_hdr");
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, base + OAKNET_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), base + EN0_RCNTLO);
+ outb_p(0, base + EN0_RCNTHI);
+ outb_p(0, base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, base + EN0_RSARHI);
+ outb_p(E8390_RREAD + E8390_START, base + OAKNET_CMD);
+
+ if (ei_status.word16)
+ insw(base + OAKNET_DATA, hdr,
+ sizeof(struct e8390_pkt_hdr) >> 1);
+ else
+ insb(base + OAKNET_DATA, hdr,
+ sizeof(struct e8390_pkt_hdr));
+
+ /* Byte-swap the packet byte count */
+
+ hdr->count = le16_to_cpu(hdr->count);
+
+ outb_p(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
+ ei_status.dmaing &= ~0x01;
+}
+
+/*
+ * XXX - Document me.
+ */
+static void
+oaknet_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ int base = OAKNET_BASE;
+ char *buf = skb->data;
+
+ /*
+ * This should NOT happen. If it does, it is the LAST thing you'll
+ * see.
+ */
+
+ if (ei_status.dmaing) {
+ oaknet_dma_error(dev, "oaknet_block_input");
+ return;
+ }
+
+#ifdef OAKNET_DISINT
+ save_flags(flags);
+ cli();
+#endif
+
+ ei_status.dmaing |= 0x01;
+ ei_obp(E8390_NODMA + E8390_PAGE0 + E8390_START, base + E8390_CMD);
+ ei_obp(count & 0xff, base + EN0_RCNTLO);
+ ei_obp(count >> 8, base + EN0_RCNTHI);
+ ei_obp(ring_offset & 0xff, base + EN0_RSARLO);
+ ei_obp(ring_offset >> 8, base + EN0_RSARHI);
+ ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
+ if (ei_status.word16) {
+ ei_isw(base + E8390_DATA, buf, count >> 1);
+ if (count & 0x01) {
+ buf[count - 1] = ei_ib(base + E8390_DATA);
+#ifdef OAKNET_HEADCHECK
+ bytes++;
+#endif
+ }
+ } else {
+ ei_isb(base + E8390_DATA, buf, count);
+ }
+#ifdef OAKNET_HEADCHECK
+ /*
+ * This was for the ALPHA version only, but enough people have
+ * been encountering problems so it is still here. If you see
+ * this message you either 1) have a slightly incompatible clone
+ * or 2) have noise/speed problems with your bus.
+ */
+
+ /* DMA termination address check... */
+ {
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'ei_ibp(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = ei_ibp(base + EN0_RSARHI);
+ int low = ei_ibp(base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + bytes) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk("%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + bytes, addr);
+ }
+#endif
+ ei_obp(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
+ ei_status.dmaing &= ~0x01;
+
+#ifdef OAKNET_DISINT
+ restore_flags(flags);
+#endif
+}
+
+/*
+ * static void oaknet_block_output()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ * *dev - Pointer to the device structure for this driver.
+ * count - Number of bytes to be transferred.
+ * *buf -
+ * start_page -
+ *
+ * Output(s):
+ * N/A
+ *
+ * Returns:
+ * N/A
+ *
+ */
+static void
+oaknet_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ int base = E8390_BASE;
+#if 0
+ int bug;
+#endif
+ unsigned long start;
+#ifdef OAKNET_DISINT
+ unsigned long flags;
+#endif
+#ifdef OAKNET_HEADCHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. */
+
+ if (ei_status.word16 && (count & 0x1))
+ count++;
+
+ /*
+ * This should NOT happen. If it does, it is the LAST thing you'll
+ * see.
+ */
+
+ if (ei_status.dmaing) {
+ oaknet_dma_error(dev, "oaknet_block_output");
+ return;
+ }
+
+#ifdef OAKNET_DISINT
+ save_flags(flags);
+ cli();
+#endif
+
+ ei_status.dmaing |= 0x01;
+
+ /* Make sure we are in page 0. */
+
+ ei_obp(E8390_PAGE0 + E8390_START + E8390_NODMA, base + E8390_CMD);
+
+#ifdef OAKNET_HEADCHECK
+retry:
+#endif
+
+#if 0
+ /*
+ * The 83902 documentation states that the processor needs to
+ * do a "dummy read" before doing the remote write to work
+ * around a chip bug they don't feel like fixing.
+ */
+
+ bug = 0;
+ while (1) {
+ unsigned int rdhi;
+ unsigned int rdlo;
+
+ /* Now the normal output. */
+ ei_obp(ENISR_RDC, base + EN0_ISR);
+ ei_obp(count & 0xff, base + EN0_RCNTLO);
+ ei_obp(count >> 8, base + EN0_RCNTHI);
+ ei_obp(0x00, base + EN0_RSARLO);
+ ei_obp(start_page, base + EN0_RSARHI);
+
+ if (bug++)
+ break;
+
+ /* Perform the dummy read */
+ rdhi = ei_ibp(base + EN0_CRDAHI);
+ rdlo = ei_ibp(base + EN0_CRDALO);
+ ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
+
+ while (1) {
+ unsigned int nrdhi;
+ unsigned int nrdlo;
+ nrdhi = ei_ibp(base + EN0_CRDAHI);
+ nrdlo = ei_ibp(base + EN0_CRDALO);
+ if ((rdhi != nrdhi) || (rdlo != nrdlo))
+ break;
+ }
+ }
+#else
+#ifdef OAKNET_RWFIX
+ /*
+ * Handle the read-before-write bug the same way as the
+ * Crynwr packet driver -- the Nat'l Semi. method doesn't work.
+ * Actually this doesn't always work either, but if you have
+ * problems with your 83902 this is better than nothing!
+ */
+
+ ei_obp(0x42, base + EN0_RCNTLO);
+ ei_obp(0x00, base + EN0_RCNTHI);
+ ei_obp(0x42, base + EN0_RSARLO);
+ ei_obp(0x00, base + EN0_RSARHI);
+ ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
+ /* Make certain that the dummy read has occurred. */
+ udelay(6);
+#endif
+
+ ei_obp(ENISR_RDC, base + EN0_ISR);
+
+ /* Now the normal output. */
+ ei_obp(count & 0xff, base + EN0_RCNTLO);
+ ei_obp(count >> 8, base + EN0_RCNTHI);
+ ei_obp(0x00, base + EN0_RSARLO);
+ ei_obp(start_page, base + EN0_RSARHI);
+#endif /* 0/1 */
+
+ ei_obp(E8390_RWRITE + E8390_START, base + E8390_CMD);
+ if (ei_status.word16) {
+ ei_osw(E8390_BASE + E8390_DATA, buf, count >> 1);
+ } else {
+ ei_osb(E8390_BASE + E8390_DATA, buf, count);
+ }
+
+#ifdef OAKNET_DISINT
+ restore_flags(flags);
+#endif
+
+ start = jiffies;
+
+#ifdef OAKNET_HEADCHECK
+ /*
+ * This was for the ALPHA version only, but enough people have
+ * been encountering problems so it is still here.
+ */
+
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = ei_ibp(base + EN0_RSARHI);
+ int low = ei_ibp(base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+
+ if (tries <= 0) {
+ printk("%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) {
+ if (jiffies - start > OAKNET_WAIT) {
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ oaknet_reset_8390(dev);
+ NS8390_init(dev, TRUE);
+ break;
+ }
+ }
+
+ ei_obp(ENISR_RDC, base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/*
+ * static void oaknet_dma_error()
+ *
+ * Description:
+ * This routine prints out a last-ditch informative message to the console
+ * indicating that a DMA error occurred. If you see this, it's the last
+ * thing you'll see.
+ *
+ * Input(s):
+ * *dev - Pointer to the device structure for this driver.
+ * *name - Informative text (e.g. function name) indicating where the
+ * DMA error occurred.
+ *
+ * Output(s):
+ * N/A
+ *
+ * Returns:
+ * N/A
+ *
+ */
+static void
+oaknet_dma_error(struct net_device *dev, const char *name)
+{
+ printk(KERN_EMERG "%s: DMAing conflict in %s."
+ "[DMAstat:%d][irqlock:%d][intr:%ld]\n",
+ dev->name, name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+}
+
+/*
+ * Oak Ethernet module unload interface.
+ */
+static void __exit oaknet_cleanup_module (void)
+{
+ /* Convert to loop once driver supports multiple devices. */
+ unregister_netdev(oaknet_dev);
+ free_irq(oaknet_devs->irq, oaknet_devs);
+ release_region(oaknet_devs->base_addr, OAKNET_IO_SIZE);
+ iounmap(ioaddr);
+ free_netdev(oaknet_devs);
+}
+
+module_init(oaknet_init);
+module_exit(oaknet_cleanup_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
new file mode 100644
index 000000000000..bb1c3d8981ee
--- /dev/null
+++ b/drivers/net/pci-skeleton.c
@@ -0,0 +1,1977 @@
+/*
+
+ drivers/net/pci-skeleton.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+
+ Original code came from 8139too.c, which in turns was based
+ originally on Donald Becker's rtl8139.c driver, versions 1.11
+ and older. This driver was originally based on rtl8139.c
+ version 1.07. Header of rtl8139.c version 1.11:
+
+ -----<snip>-----
+
+ Written 1997-2000 by Donald Becker.
+ This software may be used and distributed according to the
+ terms of the GNU General Public License (GPL), incorporated
+ herein by reference. Drivers based on or derived from this
+ code fall under the GPL and must retain the authorship,
+ copyright and license notice. This file is not a complete
+ program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139
+ PCI ethernet chips.
+
+ The author may be reached as becker@scyld.com, or C/O Scyld
+ Computing Corporation 410 Severn Ave., Suite 210 Annapolis
+ MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/rtl8139.html
+
+ Twister-tuning table provided by Kinston
+ <shangh@realtek.com.tw>.
+
+ -----<snip>-----
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+
+-----------------------------------------------------------------------------
+
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the RealTek RTL8139 series, the RealTek
+Fast Ethernet controllers for PCI and CardBus. This chip is used on many
+low-end boards, sometimes with its markings changed.
+
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+
+III. Driver operation
+
+IIIa. Rx Ring buffers
+
+The receive unit uses a single linear ring buffer rather than the more
+common (and more efficient) descriptor-based architecture. Incoming frames
+are sequentially stored into the Rx region, and the host copies them into
+skbuffs.
+
+Comment: While it is theoretically possible to process many frames in place,
+any delay in Rx processing would cause us to drop frames. More importantly,
+the Linux protocol stack is not designed to operate in this manner.
+
+IIIb. Tx operation
+
+The RTL8139 uses a fixed set of four Tx descriptors in register space.
+In a stunningly bad design choice, Tx frames must be 32 bit aligned. Linux
+aligns the IP header on word boundaries, and 14 byte ethernet header means
+that almost all frames will need to be copied to an alignment buffer.
+
+IVb. References
+
+http://www.realtek.com.tw/cn/cn.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <asm/io.h>
+
+#define NETDRV_VERSION "1.0.0"
+#define MODNAME "netdrv"
+#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
+#define PFX MODNAME ": "
+
+static char version[] __devinitdata =
+KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
+KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
+
+/* define to 1 to enable PIO instead of MMIO */
+#undef USE_IO_OPS
+
+/* define to 1 to enable copious debugging info */
+#undef NETDRV_DEBUG
+
+/* define to 1 to disable lightweight runtime debugging checks */
+#undef NETDRV_NDEBUG
+
+
+#ifdef NETDRV_DEBUG
+/* note: prints function name for you */
+# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#else
+# define DPRINTK(fmt, args...)
+#endif
+
+#ifdef NETDRV_NDEBUG
+# define assert(expr) do {} while (0)
+#else
+# define assert(expr) \
+ if(!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ }
+#endif
+
+
+/* A few user-configurable values. */
+/* media options */
+static int media[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Size of the in-memory receive ring. */
+#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
+#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+#define RX_BUF_PAD 16
+#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
+#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC 4
+
+/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
+#define MAX_ETH_FRAME_SIZE 1536
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
+#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* PCI Tuning Parameters
+ Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+
+enum {
+ HAS_CHIP_XCVR = 0x020000,
+ HAS_LNK_CHNG = 0x040000,
+};
+
+#define NETDRV_MIN_IO_SIZE 0x80
+#define RTL8139B_IO_SIZE 256
+
+#define NETDRV_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
+
+typedef enum {
+ RTL8139 = 0,
+ NETDRV_CB,
+ SMC1211TX,
+ /*MPX5030,*/
+ DELTA8139,
+ ADDTRON8139,
+} board_t;
+
+
+/* indexed by board_t, above */
+static struct {
+ const char *name;
+} board_info[] __devinitdata = {
+ { "RealTek RTL8139 Fast Ethernet" },
+ { "RealTek RTL8139B PCI/CardBus" },
+ { "SMC1211TX EZCard 10/100 (RealTek RTL8139)" },
+/* { MPX5030, "Accton MPX5030 (RealTek RTL8139)" },*/
+ { "Delta Electronics 8139 10/100BaseTX" },
+ { "Addtron Technolgy 8139 10/100BaseTX" },
+};
+
+
+static struct pci_device_id netdrv_pci_tbl[] = {
+ {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
+ {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
+/* {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MPX5030 },*/
+ {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DELTA8139 },
+ {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
+ {0,}
+};
+MODULE_DEVICE_TABLE (pci, netdrv_pci_tbl);
+
+
+/* The rest of these values should never change. */
+
+/* Symbolic offsets to registers. */
+enum NETDRV_registers {
+ MAC0 = 0, /* Ethernet hardware address. */
+ MAR0 = 8, /* Multicast filter. */
+ TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf = 0x30,
+ RxEarlyCnt = 0x34,
+ RxEarlyStatus = 0x36,
+ ChipCmd = 0x37,
+ RxBufPtr = 0x38,
+ RxBufAddr = 0x3A,
+ IntrMask = 0x3C,
+ IntrStatus = 0x3E,
+ TxConfig = 0x40,
+ ChipVersion = 0x43,
+ RxConfig = 0x44,
+ Timer = 0x48, /* A general-purpose counter. */
+ RxMissed = 0x4C, /* 24 bits valid, write clears. */
+ Cfg9346 = 0x50,
+ Config0 = 0x51,
+ Config1 = 0x52,
+ FlashReg = 0x54,
+ MediaStatus = 0x58,
+ Config3 = 0x59,
+ Config4 = 0x5A, /* absent on RTL-8139A */
+ HltClk = 0x5B,
+ MultiIntr = 0x5C,
+ TxSummary = 0x60,
+ BasicModeCtrl = 0x62,
+ BasicModeStatus = 0x64,
+ NWayAdvert = 0x66,
+ NWayLPAR = 0x68,
+ NWayExpansion = 0x6A,
+ /* Undocumented registers, but required for proper operation. */
+ FIFOTMS = 0x70, /* FIFO Control and test. */
+ CSCR = 0x74, /* Chip Status and Configuration Register. */
+ PARA78 = 0x78,
+ PARA7c = 0x7c, /* Magic transceiver parameter register. */
+ Config5 = 0xD8, /* absent on RTL-8139A */
+};
+
+enum ClearBitMasks {
+ MultiIntrClear = 0xF000,
+ ChipCmdClear = 0xE2,
+ Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+};
+
+enum ChipCmdBits {
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08,
+ CmdTxEnb = 0x04,
+ RxBufEmpty = 0x01,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+ PCIErr = 0x8000,
+ PCSTimeout = 0x4000,
+ RxFIFOOver = 0x40,
+ RxUnderrun = 0x20,
+ RxOverflow = 0x10,
+ TxErr = 0x08,
+ TxOK = 0x04,
+ RxErr = 0x02,
+ RxOK = 0x01,
+};
+enum TxStatusBits {
+ TxHostOwns = 0x2000,
+ TxUnderrun = 0x4000,
+ TxStatOK = 0x8000,
+ TxOutOfWindow = 0x20000000,
+ TxAborted = 0x40000000,
+ TxCarrierLost = 0x80000000,
+};
+enum RxStatusBits {
+ RxMulticast = 0x8000,
+ RxPhysical = 0x4000,
+ RxBroadcast = 0x2000,
+ RxBadSymbol = 0x0020,
+ RxRunt = 0x0010,
+ RxTooLong = 0x0008,
+ RxCRCErr = 0x0004,
+ RxBadAlign = 0x0002,
+ RxStatusOK = 0x0001,
+};
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+ AcceptErr = 0x20,
+ AcceptRunt = 0x10,
+ AcceptBroadcast = 0x08,
+ AcceptMulticast = 0x04,
+ AcceptMyPhys = 0x02,
+ AcceptAllPhys = 0x01,
+};
+
+/* Bits in TxConfig. */
+enum tx_config_bits {
+ TxIFG1 = (1 << 25), /* Interframe Gap Time */
+ TxIFG0 = (1 << 24), /* Enabling these bits violates IEEE 802.3 */
+ TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
+ TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
+ TxClearAbt = (1 << 0), /* Clear abort (WO) */
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
+};
+
+/* Bits in Config1 */
+enum Config1Bits {
+ Cfg1_PM_Enable = 0x01,
+ Cfg1_VPD_Enable = 0x02,
+ Cfg1_PIO = 0x04,
+ Cfg1_MMIO = 0x08,
+ Cfg1_LWAKE = 0x10,
+ Cfg1_Driver_Load = 0x20,
+ Cfg1_LED0 = 0x40,
+ Cfg1_LED1 = 0x80,
+};
+
+enum RxConfigBits {
+ /* Early Rx threshold, none or X/16 */
+ RxCfgEarlyRxNone = 0,
+ RxCfgEarlyRxShift = 24,
+
+ /* rx fifo threshold */
+ RxCfgFIFOShift = 13,
+ RxCfgFIFONone = (7 << RxCfgFIFOShift),
+
+ /* Max DMA burst */
+ RxCfgDMAShift = 8,
+ RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
+
+ /* rx ring buffer length */
+ RxCfgRcv8K = 0,
+ RxCfgRcv16K = (1 << 11),
+ RxCfgRcv32K = (1 << 12),
+ RxCfgRcv64K = (1 << 11) | (1 << 12),
+
+ /* Disable packet wrap at end of Rx buffer */
+ RxNoWrap = (1 << 7),
+};
+
+
+/* Twister tuning parameters from RealTek.
+ Completely undocumented, but required to tune bad links. */
+enum CSCRBits {
+ CSCR_LinkOKBit = 0x0400,
+ CSCR_LinkChangeBit = 0x0800,
+ CSCR_LinkStatusBits = 0x0f000,
+ CSCR_LinkDownOffCmd = 0x003c0,
+ CSCR_LinkDownCmd = 0x0f3c0,
+};
+
+
+enum Cfg9346Bits {
+ Cfg9346_Lock = 0x00,
+ Cfg9346_Unlock = 0xC0,
+};
+
+
+#define PARA78_default 0x78fa8388
+#define PARA7c_default 0xcb38de43 /* param[0][3] */
+#define PARA7c_xxx 0xcb38de43
+static const unsigned long param[4][4] = {
+ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+
+typedef enum {
+ CH_8139 = 0,
+ CH_8139_K,
+ CH_8139A,
+ CH_8139B,
+ CH_8130,
+ CH_8139C,
+} chip_t;
+
+
+/* directly indexed by chip_t, above */
+const static struct {
+ const char *name;
+ u8 version; /* from RTL8139C docs */
+ u32 RxConfigMask; /* should clear the bits supported by this chip */
+} rtl_chip_info[] = {
+ { "RTL-8139",
+ 0x40,
+ 0xf0fe0040, /* XXX copied from RTL8139A, verify */
+ },
+
+ { "RTL-8139 rev K",
+ 0x60,
+ 0xf0fe0040,
+ },
+
+ { "RTL-8139A",
+ 0x70,
+ 0xf0fe0040,
+ },
+
+ { "RTL-8139B",
+ 0x78,
+ 0xf0fc0040
+ },
+
+ { "RTL-8130",
+ 0x7C,
+ 0xf0fe0040, /* XXX copied from RTL8139A, verify */
+ },
+
+ { "RTL-8139C",
+ 0x74,
+ 0xf0fc0040, /* XXX copied from RTL8139B, verify */
+ },
+
+};
+
+
+struct netdrv_private {
+ board_t board;
+ void *mmio_addr;
+ int drv_flags;
+ struct pci_dev *pci_dev;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ unsigned char *rx_ring;
+ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
+ unsigned int tx_flag;
+ atomic_t cur_tx;
+ atomic_t dirty_tx;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct ring_info tx_info[NUM_TX_DESC];
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_bufs_dma;
+ char phys[4]; /* MII device addresses. */
+ char twistie, twist_row, twist_col; /* Twister tune state. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ spinlock_t lock;
+ chip_t chipset;
+};
+
+MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM (multicast_filter_limit, "i");
+MODULE_PARM (max_interrupt_work, "i");
+MODULE_PARM (media, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
+MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
+MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
+
+static int read_eeprom (void *ioaddr, int location, int addr_len);
+static int netdrv_open (struct net_device *dev);
+static int mdio_read (struct net_device *dev, int phy_id, int location);
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int val);
+static void netdrv_timer (unsigned long data);
+static void netdrv_tx_timeout (struct net_device *dev);
+static void netdrv_init_ring (struct net_device *dev);
+static int netdrv_start_xmit (struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t netdrv_interrupt (int irq, void *dev_instance,
+ struct pt_regs *regs);
+static int netdrv_close (struct net_device *dev);
+static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *netdrv_get_stats (struct net_device *dev);
+static void netdrv_set_rx_mode (struct net_device *dev);
+static void netdrv_hw_start (struct net_device *dev);
+
+
+#ifdef USE_IO_OPS
+
+#define NETDRV_R8(reg) inb (((unsigned long)ioaddr) + (reg))
+#define NETDRV_R16(reg) inw (((unsigned long)ioaddr) + (reg))
+#define NETDRV_R32(reg) ((unsigned long) inl (((unsigned long)ioaddr) + (reg)))
+#define NETDRV_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_W8_F NETDRV_W8
+#define NETDRV_W16_F NETDRV_W16
+#define NETDRV_W32_F NETDRV_W32
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb(addr) inb((unsigned long)(addr))
+#define readw(addr) inw((unsigned long)(addr))
+#define readl(addr) inl((unsigned long)(addr))
+#define writeb(val,addr) outb((val),(unsigned long)(addr))
+#define writew(val,addr) outw((val),(unsigned long)(addr))
+#define writel(val,addr) outl((val),(unsigned long)(addr))
+
+#else
+
+/* write MMIO register, with flush */
+/* Flush avoids rtl8139 bug w/ posted MMIO writes */
+#define NETDRV_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
+#define NETDRV_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
+#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+
+
+#if MMIO_FLUSH_AUDIT_COMPLETE
+
+/* write MMIO register */
+#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg))
+#define NETDRV_W16(reg, val16) writew ((val16), ioaddr + (reg))
+#define NETDRV_W32(reg, val32) writel ((val32), ioaddr + (reg))
+
+#else
+
+/* write MMIO register, then flush */
+#define NETDRV_W8 NETDRV_W8_F
+#define NETDRV_W16 NETDRV_W16_F
+#define NETDRV_W32 NETDRV_W32_F
+
+#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
+
+/* read MMIO register */
+#define NETDRV_R8(reg) readb (ioaddr + (reg))
+#define NETDRV_R16(reg) readw (ioaddr + (reg))
+#define NETDRV_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+
+#endif /* USE_IO_OPS */
+
+
+static const u16 netdrv_intr_mask =
+ PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
+ TxErr | TxOK | RxErr | RxOK;
+
+static const unsigned int netdrv_rx_config =
+ RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+
+
+static int __devinit netdrv_init_board (struct pci_dev *pdev,
+ struct net_device **dev_out,
+ void **ioaddr_out)
+{
+ void *ioaddr = NULL;
+ struct net_device *dev;
+ struct netdrv_private *tp;
+ int rc, i;
+ u32 pio_start, pio_end, pio_flags, pio_len;
+ unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+ u32 tmp;
+
+ DPRINTK ("ENTER\n");
+
+ assert (pdev != NULL);
+ assert (ioaddr_out != NULL);
+
+ *ioaddr_out = NULL;
+ *dev_out = NULL;
+
+ /* dev zeroed in alloc_etherdev */
+ dev = alloc_etherdev (sizeof (*tp));
+ if (dev == NULL) {
+ printk (KERN_ERR PFX "unable to alloc new ethernet\n");
+ DPRINTK ("EXIT, returning -ENOMEM\n");
+ return -ENOMEM;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ tp = dev->priv;
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device (pdev);
+ if (rc)
+ goto err_out;
+
+ pio_start = pci_resource_start (pdev, 0);
+ pio_end = pci_resource_end (pdev, 0);
+ pio_flags = pci_resource_flags (pdev, 0);
+ pio_len = pci_resource_len (pdev, 0);
+
+ mmio_start = pci_resource_start (pdev, 1);
+ mmio_end = pci_resource_end (pdev, 1);
+ mmio_flags = pci_resource_flags (pdev, 1);
+ mmio_len = pci_resource_len (pdev, 1);
+
+ /* set this immediately, we need to know before
+ * we talk to the chip directly */
+ DPRINTK("PIO region size == 0x%02X\n", pio_len);
+ DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
+
+ /* make sure PCI base addr 0 is PIO */
+ if (!(pio_flags & IORESOURCE_IO)) {
+ printk (KERN_ERR PFX "region #0 not a PIO resource, aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(mmio_flags & IORESOURCE_MEM)) {
+ printk (KERN_ERR PFX "region #1 not an MMIO resource, aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if ((pio_len < NETDRV_MIN_IO_SIZE) ||
+ (mmio_len < NETDRV_MIN_IO_SIZE)) {
+ printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ rc = pci_request_regions (pdev, "pci-skeleton");
+ if (rc)
+ goto err_out;
+
+ pci_set_master (pdev);
+
+#ifdef USE_IO_OPS
+ ioaddr = (void *) pio_start;
+#else
+ /* ioremap MMIO region */
+ ioaddr = ioremap (mmio_start, mmio_len);
+ if (ioaddr == NULL) {
+ printk (KERN_ERR PFX "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+ goto err_out_free_res;
+ }
+#endif /* USE_IO_OPS */
+
+ /* Soft reset the chip. */
+ NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--)
+ if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
+ break;
+ else
+ udelay (10);
+
+ /* Bring the chip out of low-power mode. */
+ /* <insert device-specific code here> */
+
+#ifndef USE_IO_OPS
+ /* sanity checks -- ensure PIO and MMIO registers agree */
+ assert (inb (pio_start+Config0) == readb (ioaddr+Config0));
+ assert (inb (pio_start+Config1) == readb (ioaddr+Config1));
+ assert (inb (pio_start+TxConfig) == readb (ioaddr+TxConfig));
+ assert (inb (pio_start+RxConfig) == readb (ioaddr+RxConfig));
+#endif /* !USE_IO_OPS */
+
+ /* identify chip attached to board */
+ tmp = NETDRV_R8 (ChipVersion);
+ for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--)
+ if (tmp == rtl_chip_info[i].version) {
+ tp->chipset = i;
+ goto match;
+ }
+
+ /* if unknown chip, assume array element #0, original RTL-8139 in this case */
+ printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8139\n",
+ pci_name(pdev));
+ printk (KERN_DEBUG PFX "PCI device %s: TxConfig = 0x%lx\n", pci_name(pdev), NETDRV_R32 (TxConfig));
+ tp->chipset = 0;
+
+match:
+ DPRINTK ("chipset id (%d) == index %d, '%s'\n",
+ tmp,
+ tp->chipset,
+ rtl_chip_info[tp->chipset].name);
+
+ i = register_netdev (dev);
+ if (i)
+ goto err_out_unmap;
+
+ DPRINTK ("EXIT, returning 0\n");
+ *ioaddr_out = ioaddr;
+ *dev_out = dev;
+ return 0;
+
+err_out_unmap:
+#ifndef USE_IO_OPS
+ iounmap(ioaddr);
+err_out_free_res:
+#endif
+ pci_release_regions (pdev);
+err_out:
+ free_netdev (dev);
+ DPRINTK ("EXIT, returning %d\n", rc);
+ return rc;
+}
+
+
+static int __devinit netdrv_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct netdrv_private *tp;
+ int i, addr_len, option;
+ void *ioaddr = NULL;
+ static int board_idx = -1;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ DPRINTK ("ENTER\n");
+
+ assert (pdev != NULL);
+ assert (ent != NULL);
+
+ board_idx++;
+
+ i = netdrv_init_board (pdev, &dev, &ioaddr);
+ if (i < 0) {
+ DPRINTK ("EXIT, returning %d\n", i);
+ return i;
+ }
+
+ tp = dev->priv;
+
+ assert (ioaddr != NULL);
+ assert (dev != NULL);
+ assert (tp != NULL);
+
+ addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((u16 *) (dev->dev_addr))[i] =
+ le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+
+ /* The Rtl8139-specific entries in the device structure. */
+ dev->open = netdrv_open;
+ dev->hard_start_xmit = netdrv_start_xmit;
+ dev->stop = netdrv_close;
+ dev->get_stats = netdrv_get_stats;
+ dev->set_multicast_list = netdrv_set_rx_mode;
+ dev->do_ioctl = netdrv_ioctl;
+ dev->tx_timeout = netdrv_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long) ioaddr;
+
+ /* dev->priv/tp zeroed and aligned in alloc_etherdev */
+ tp = dev->priv;
+
+ /* note: tp->chipset set in netdrv_init_board */
+ tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | NETDRV_CAPS;
+ tp->pci_dev = pdev;
+ tp->board = ent->driver_data;
+ tp->mmio_addr = ioaddr;
+ spin_lock_init(&tp->lock);
+
+ pci_set_drvdata(pdev, dev);
+
+ tp->phys[0] = 32;
+
+ printk (KERN_INFO "%s: %s at 0x%lx, "
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
+ "IRQ %d\n",
+ dev->name,
+ board_info[ent->driver_data].name,
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5],
+ dev->irq);
+
+ printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
+ dev->name, rtl_chip_info[tp->chipset].name);
+
+ /* Put the chip into low-power mode. */
+ NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
+
+ /* The lower four bits are the media type. */
+ option = (board_idx > 7) ? 0 : media[board_idx];
+ if (option > 0) {
+ tp->full_duplex = (option & 0x200) ? 1 : 0;
+ tp->default_port = option & 15;
+ if (tp->default_port)
+ tp->medialock = 1;
+ }
+
+ if (tp->full_duplex) {
+ printk (KERN_INFO
+ "%s: Media type forced to Full Duplex.\n",
+ dev->name);
+ mdio_write (dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+ tp->duplex_lock = 1;
+ }
+
+ DPRINTK ("EXIT - returning 0\n");
+ return 0;
+}
+
+
+static void __devexit netdrv_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdrv_private *np;
+
+ DPRINTK ("ENTER\n");
+
+ assert (dev != NULL);
+
+ np = dev->priv;
+ assert (np != NULL);
+
+ unregister_netdev (dev);
+
+#ifndef USE_IO_OPS
+ iounmap (np->mmio_addr);
+#endif /* !USE_IO_OPS */
+
+ pci_release_regions (pdev);
+
+ free_netdev (dev);
+
+ pci_set_drvdata (pdev, NULL);
+
+ pci_disable_device (pdev);
+
+ DPRINTK ("EXIT\n");
+}
+
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() readl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ void *ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ DPRINTK ("ENTER\n");
+
+ writeb (EE_ENB & ~EE_CS, ee_addr);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writeb (EE_ENB | dataval, ee_addr);
+ eeprom_delay ();
+ writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ }
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+
+ for (i = 16; i > 0; i--) {
+ writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ retval =
+ (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
+ 0);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+ }
+
+ /* Terminate the EEPROM access. */
+ writeb (~EE_CS, ee_addr);
+ eeprom_delay ();
+
+ DPRINTK ("EXIT - returning %d\n", retval);
+ return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol.
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define MDIO_DIR 0x80
+#define MDIO_DATA_OUT 0x04
+#define MDIO_DATA_IN 0x02
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay() readb(mdio_addr)
+
+
+static char mii_2_8139_map[8] = {
+ BasicModeCtrl,
+ BasicModeStatus,
+ 0,
+ 0,
+ NWayAdvert,
+ NWayLPAR,
+ NWayExpansion,
+ 0
+};
+
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync (void *mdio_addr)
+{
+ int i;
+
+ DPRINTK ("ENTER\n");
+
+ for (i = 32; i >= 0; i--) {
+ writeb (MDIO_WRITE1, mdio_addr);
+ mdio_delay ();
+ writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+ mdio_delay ();
+ }
+
+ DPRINTK ("EXIT\n");
+}
+
+
+static int mdio_read (struct net_device *dev, int phy_id, int location)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *mdio_addr = tp->mmio_addr + Config4;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int retval = 0;
+ int i;
+
+ DPRINTK ("ENTER\n");
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ DPRINTK ("EXIT after directly using 8139 internal regs\n");
+ return location < 8 && mii_2_8139_map[location] ?
+ readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
+ }
+ mdio_sync (mdio_addr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ writeb (MDIO_DIR | dataval, mdio_addr);
+ mdio_delay ();
+ writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay ();
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ writeb (0, mdio_addr);
+ mdio_delay ();
+ retval =
+ (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1
+ : 0);
+ writeb (MDIO_CLK, mdio_addr);
+ mdio_delay ();
+ }
+
+ DPRINTK ("EXIT, returning %d\n", (retval >> 1) & 0xffff);
+ return (retval >> 1) & 0xffff;
+}
+
+
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *mdio_addr = tp->mmio_addr + Config4;
+ int mii_cmd =
+ (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+ int i;
+
+ DPRINTK ("ENTER\n");
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ if (location < 8 && mii_2_8139_map[location]) {
+ writew (value,
+ tp->mmio_addr + mii_2_8139_map[location]);
+ readw (tp->mmio_addr + mii_2_8139_map[location]);
+ }
+ DPRINTK ("EXIT after directly using 8139 internal regs\n");
+ return;
+ }
+ mdio_sync (mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval =
+ (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ writeb (dataval, mdio_addr);
+ mdio_delay ();
+ writeb (dataval | MDIO_CLK, mdio_addr);
+ mdio_delay ();
+ }
+
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ writeb (0, mdio_addr);
+ mdio_delay ();
+ writeb (MDIO_CLK, mdio_addr);
+ mdio_delay ();
+ }
+
+ DPRINTK ("EXIT\n");
+}
+
+
+static int netdrv_open (struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ int retval;
+#ifdef NETDRV_DEBUG
+ void *ioaddr = tp->mmio_addr;
+#endif
+
+ DPRINTK ("ENTER\n");
+
+ retval = request_irq (dev->irq, netdrv_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval) {
+ DPRINTK ("EXIT, returning %d\n", retval);
+ return retval;
+ }
+
+ tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ &tp->tx_bufs_dma);
+ tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ &tp->rx_ring_dma);
+ if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+ free_irq(dev->irq, dev);
+
+ if (tp->tx_bufs)
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ if (tp->rx_ring)
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+
+ DPRINTK ("EXIT, returning -ENOMEM\n");
+ return -ENOMEM;
+
+ }
+
+ tp->full_duplex = tp->duplex_lock;
+ tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+
+ netdrv_init_ring (dev);
+ netdrv_hw_start (dev);
+
+ DPRINTK ("%s: netdrv_open() ioaddr %#lx IRQ %d"
+ " GP Pins %2.2x %s-duplex.\n",
+ dev->name, pci_resource_start (tp->pci_dev, 1),
+ dev->irq, NETDRV_R8 (MediaStatus),
+ tp->full_duplex ? "full" : "half");
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer (&tp->timer);
+ tp->timer.expires = jiffies + 3 * HZ;
+ tp->timer.data = (unsigned long) dev;
+ tp->timer.function = &netdrv_timer;
+ add_timer (&tp->timer);
+
+ DPRINTK ("EXIT, returning 0\n");
+ return 0;
+}
+
+
+/* Start the hardware at open or resume. */
+static void netdrv_hw_start (struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ u32 i;
+
+ DPRINTK ("ENTER\n");
+
+ /* Soft reset the chip. */
+ NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
+ udelay (100);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--)
+ if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
+ break;
+
+ /* Restore our idea of the MAC address. */
+ NETDRV_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
+ NETDRV_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
+ CmdRxEnb | CmdTxEnb);
+
+ i = netdrv_rx_config |
+ (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ NETDRV_W32_F (RxConfig, i);
+
+ /* Check this value: the documentation for IFG contradicts ifself. */
+ NETDRV_W32 (TxConfig, (TX_DMA_BURST << TxDMAShift));
+
+ /* unlock Config[01234] and BMCR register writes */
+ NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
+ udelay (10);
+
+ tp->cur_rx = 0;
+
+ /* Lock Config[01234] and BMCR register writes */
+ NETDRV_W8_F (Cfg9346, Cfg9346_Lock);
+ udelay (10);
+
+ /* init Rx ring buffer DMA address */
+ NETDRV_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* init Tx buffer DMA addresses */
+ for (i = 0; i < NUM_TX_DESC; i++)
+ NETDRV_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+
+ NETDRV_W32_F (RxMissed, 0);
+
+ netdrv_set_rx_mode (dev);
+
+ /* no early-rx interrupts */
+ NETDRV_W16 (MultiIntr, NETDRV_R16 (MultiIntr) & MultiIntrClear);
+
+ /* make sure RxTx has started */
+ NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
+ CmdRxEnb | CmdTxEnb);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ NETDRV_W16_F (IntrMask, netdrv_intr_mask);
+
+ netif_start_queue (dev);
+
+ DPRINTK ("EXIT\n");
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void netdrv_init_ring (struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ int i;
+
+ DPRINTK ("ENTER\n");
+
+ tp->cur_rx = 0;
+ atomic_set (&tp->cur_tx, 0);
+ atomic_set (&tp->dirty_tx, 0);
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ tp->tx_info[i].skb = NULL;
+ tp->tx_info[i].mapping = 0;
+ tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+ }
+
+ DPRINTK ("EXIT\n");
+}
+
+
+static void netdrv_timer (unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ int next_tick = 60 * HZ;
+ int mii_lpa;
+
+ mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
+
+ if (!tp->duplex_lock && mii_lpa != 0xffff) {
+ int duplex = (mii_lpa & LPA_100FULL)
+ || (mii_lpa & 0x01C0) == 0x0040;
+ if (tp->full_duplex != duplex) {
+ tp->full_duplex = duplex;
+ printk (KERN_INFO
+ "%s: Setting %s-duplex based on MII #%d link"
+ " partner ability of %4.4x.\n", dev->name,
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
+ NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
+ NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ }
+
+ DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n",
+ dev->name, NETDRV_R16 (NWayLPAR));
+ DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x"
+ " RxStatus %4.4x.\n", dev->name,
+ NETDRV_R16 (IntrMask),
+ NETDRV_R16 (IntrStatus),
+ NETDRV_R32 (RxEarlyStatus));
+ DPRINTK ("%s: Chip config %2.2x %2.2x.\n",
+ dev->name, NETDRV_R8 (Config0),
+ NETDRV_R8 (Config1));
+
+ tp->timer.expires = jiffies + next_tick;
+ add_timer (&tp->timer);
+}
+
+
+static void netdrv_tx_clear (struct netdrv_private *tp)
+{
+ int i;
+
+ atomic_set (&tp->cur_tx, 0);
+ atomic_set (&tp->dirty_tx, 0);
+
+ /* Dump the unsent Tx packets. */
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ struct ring_info *rp = &tp->tx_info[i];
+ if (rp->mapping != 0) {
+ pci_unmap_single (tp->pci_dev, rp->mapping,
+ rp->skb->len, PCI_DMA_TODEVICE);
+ rp->mapping = 0;
+ }
+ if (rp->skb) {
+ dev_kfree_skb (rp->skb);
+ rp->skb = NULL;
+ tp->stats.tx_dropped++;
+ }
+ }
+}
+
+
+static void netdrv_tx_timeout (struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ int i;
+ u8 tmp8;
+ unsigned long flags;
+
+ DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x "
+ "media %2.2x.\n", dev->name,
+ NETDRV_R8 (ChipCmd),
+ NETDRV_R16 (IntrStatus),
+ NETDRV_R8 (MediaStatus));
+
+ /* disable Tx ASAP, if not already */
+ tmp8 = NETDRV_R8 (ChipCmd);
+ if (tmp8 & CmdTxEnb)
+ NETDRV_W8 (ChipCmd, tmp8 & ~CmdTxEnb);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ NETDRV_W16 (IntrMask, 0x0000);
+
+ /* Emit info to figure out what went wrong. */
+ printk (KERN_DEBUG "%s: Tx queue start entry %d dirty entry %d.\n",
+ dev->name, atomic_read (&tp->cur_tx),
+ atomic_read (&tp->dirty_tx));
+ for (i = 0; i < NUM_TX_DESC; i++)
+ printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n",
+ dev->name, i, NETDRV_R32 (TxStatus0 + (i * 4)),
+ i == atomic_read (&tp->dirty_tx) % NUM_TX_DESC ?
+ " (queue head)" : "");
+
+ /* Stop a shared interrupt from scavenging while we are. */
+ spin_lock_irqsave (&tp->lock, flags);
+
+ netdrv_tx_clear (tp);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ /* ...and finally, reset everything */
+ netdrv_hw_start (dev);
+
+ netif_wake_queue (dev);
+}
+
+
+
+static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ int entry;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = atomic_read (&tp->cur_tx) % NUM_TX_DESC;
+
+ assert (tp->tx_info[entry].skb == NULL);
+ assert (tp->tx_info[entry].mapping == 0);
+
+ tp->tx_info[entry].skb = skb;
+ /* tp->tx_info[entry].mapping = 0; */
+ memcpy (tp->tx_buf[entry], skb->data, skb->len);
+
+ /* Note: the chip doesn't have auto-pad! */
+ NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)),
+ tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+ dev->trans_start = jiffies;
+ atomic_inc (&tp->cur_tx);
+ if ((atomic_read (&tp->cur_tx) - atomic_read (&tp->dirty_tx)) >= NUM_TX_DESC)
+ netif_stop_queue (dev);
+
+ DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n",
+ dev->name, skb->data, skb->len, entry);
+
+ return 0;
+}
+
+
+static void netdrv_tx_interrupt (struct net_device *dev,
+ struct netdrv_private *tp,
+ void *ioaddr)
+{
+ int cur_tx, dirty_tx, tx_left;
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ dirty_tx = atomic_read (&tp->dirty_tx);
+
+ cur_tx = atomic_read (&tp->cur_tx);
+ tx_left = cur_tx - dirty_tx;
+ while (tx_left > 0) {
+ int entry = dirty_tx % NUM_TX_DESC;
+ int txstatus;
+
+ txstatus = NETDRV_R32 (TxStatus0 + (entry * sizeof (u32)));
+
+ if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+ DPRINTK ("%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+ tp->stats.tx_errors++;
+ if (txstatus & TxAborted) {
+ tp->stats.tx_aborted_errors++;
+ NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
+ }
+ if (txstatus & TxCarrierLost)
+ tp->stats.tx_carrier_errors++;
+ if (txstatus & TxOutOfWindow)
+ tp->stats.tx_window_errors++;
+ } else {
+ if (txstatus & TxUnderrun) {
+ /* Add 64 to the Tx FIFO threshold. */
+ if (tp->tx_flag < 0x00300000)
+ tp->tx_flag += 0x00020000;
+ tp->stats.tx_fifo_errors++;
+ }
+ tp->stats.collisions += (txstatus >> 24) & 15;
+ tp->stats.tx_bytes += txstatus & 0x7ff;
+ tp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ if (tp->tx_info[entry].mapping != 0) {
+ pci_unmap_single(tp->pci_dev,
+ tp->tx_info[entry].mapping,
+ tp->tx_info[entry].skb->len,
+ PCI_DMA_TODEVICE);
+ tp->tx_info[entry].mapping = 0;
+ }
+ dev_kfree_skb_irq (tp->tx_info[entry].skb);
+ tp->tx_info[entry].skb = NULL;
+ dirty_tx++;
+ if (dirty_tx < 0) { /* handle signed int overflow */
+ atomic_sub (cur_tx, &tp->cur_tx); /* XXX racy? */
+ dirty_tx = cur_tx - tx_left + 1;
+ }
+ if (netif_queue_stopped (dev))
+ netif_wake_queue (dev);
+
+ cur_tx = atomic_read (&tp->cur_tx);
+ tx_left = cur_tx - dirty_tx;
+
+ }
+
+#ifndef NETDRV_NDEBUG
+ if (atomic_read (&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
+ printk (KERN_ERR
+ "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
+ dev->name, dirty_tx, atomic_read (&tp->cur_tx));
+ dirty_tx += NUM_TX_DESC;
+ }
+#endif /* NETDRV_NDEBUG */
+
+ atomic_set (&tp->dirty_tx, dirty_tx);
+}
+
+
+/* TODO: clean this up! Rx reset need not be this intensive */
+static void netdrv_rx_err (u32 rx_status, struct net_device *dev,
+ struct netdrv_private *tp, void *ioaddr)
+{
+ u8 tmp8;
+ int tmp_work = 1000;
+
+ DPRINTK ("%s: Ethernet frame had errors, status %8.8x.\n",
+ dev->name, rx_status);
+ if (rx_status & RxTooLong) {
+ DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
+ dev->name, rx_status);
+ /* A.C.: The chip hangs here. */
+ }
+ tp->stats.rx_errors++;
+ if (rx_status & (RxBadSymbol | RxBadAlign))
+ tp->stats.rx_frame_errors++;
+ if (rx_status & (RxRunt | RxTooLong))
+ tp->stats.rx_length_errors++;
+ if (rx_status & RxCRCErr)
+ tp->stats.rx_crc_errors++;
+ /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+ tp->cur_rx = 0;
+
+ /* disable receive */
+ tmp8 = NETDRV_R8 (ChipCmd) & ChipCmdClear;
+ NETDRV_W8_F (ChipCmd, tmp8 | CmdTxEnb);
+
+ /* A.C.: Reset the multicast list. */
+ netdrv_set_rx_mode (dev);
+
+ /* XXX potentially temporary hack to
+ * restart hung receiver */
+ while (--tmp_work > 0) {
+ tmp8 = NETDRV_R8 (ChipCmd);
+ if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
+ break;
+ NETDRV_W8_F (ChipCmd,
+ (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
+ }
+
+ /* G.S.: Re-enable receiver */
+ /* XXX temporary hack to work around receiver hang */
+ netdrv_set_rx_mode (dev);
+
+ if (tmp_work <= 0)
+ printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
+}
+
+
+/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
+ field alignments and semantics. */
+static void netdrv_rx_interrupt (struct net_device *dev,
+ struct netdrv_private *tp, void *ioaddr)
+{
+ unsigned char *rx_ring;
+ u16 cur_rx;
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ rx_ring = tp->rx_ring;
+ cur_rx = tp->cur_rx;
+
+ DPRINTK ("%s: In netdrv_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
+ NETDRV_R16 (RxBufAddr),
+ NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
+
+ while ((NETDRV_R8 (ChipCmd) & RxBufEmpty) == 0) {
+ int ring_offset = cur_rx % RX_BUF_LEN;
+ u32 rx_status;
+ unsigned int rx_size;
+ unsigned int pkt_size;
+ struct sk_buff *skb;
+
+ /* read size+status of next frame from DMA ring buffer */
+ rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+ rx_size = rx_status >> 16;
+ pkt_size = rx_size - 4;
+
+ DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x,"
+ " cur %4.4x.\n", dev->name, rx_status,
+ rx_size, cur_rx);
+#if NETDRV_DEBUG > 2
+ {
+ int i;
+ DPRINTK ("%s: Frame contents ", dev->name);
+ for (i = 0; i < 70; i++)
+ printk (" %2.2x",
+ rx_ring[ring_offset + i]);
+ printk (".\n");
+ }
+#endif
+
+ /* If Rx err or invalid rx_size/rx_status received
+ * (which happens if we get lost in the ring),
+ * Rx process gets reset, so we abort any further
+ * Rx processing.
+ */
+ if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
+ (!(rx_status & RxStatusOK))) {
+ netdrv_rx_err (rx_status, dev, tp, ioaddr);
+ return;
+ }
+
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+
+ /* TODO: consider allocating skb's outside of
+ * interrupt context, both to speed interrupt processing,
+ * and also to reduce the chances of having to
+ * drop packets here under memory pressure.
+ */
+
+ skb = dev_alloc_skb (pkt_size + 2);
+ if (skb) {
+ skb->dev = dev;
+ skb_reserve (skb, 2); /* 16 byte align the IP fields. */
+
+ eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_put (skb, pkt_size);
+
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ dev->last_rx = jiffies;
+ tp->stats.rx_bytes += pkt_size;
+ tp->stats.rx_packets++;
+ } else {
+ printk (KERN_WARNING
+ "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ tp->stats.rx_dropped++;
+ }
+
+ cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+ NETDRV_W16_F (RxBufPtr, cur_rx - 16);
+ }
+
+ DPRINTK ("%s: Done netdrv_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
+ NETDRV_R16 (RxBufAddr),
+ NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
+
+ tp->cur_rx = cur_rx;
+}
+
+
+static void netdrv_weird_interrupt (struct net_device *dev,
+ struct netdrv_private *tp,
+ void *ioaddr,
+ int status, int link_changed)
+{
+ printk (KERN_DEBUG "%s: Abnormal interrupt, status %8.8x.\n",
+ dev->name, status);
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ /* Update the error count. */
+ tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
+ NETDRV_W32 (RxMissed, 0);
+
+ if ((status & RxUnderrun) && link_changed &&
+ (tp->drv_flags & HAS_LNK_CHNG)) {
+ /* Really link-change on new chips. */
+ int lpar = NETDRV_R16 (NWayLPAR);
+ int duplex = (lpar & 0x0100) || (lpar & 0x01C0) == 0x0040
+ || tp->duplex_lock;
+ if (tp->full_duplex != duplex) {
+ tp->full_duplex = duplex;
+ NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
+ NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ status &= ~RxUnderrun;
+ }
+
+ /* XXX along with netdrv_rx_err, are we double-counting errors? */
+ if (status &
+ (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+ tp->stats.rx_errors++;
+
+ if (status & (PCSTimeout))
+ tp->stats.rx_length_errors++;
+ if (status & (RxUnderrun | RxFIFOOver))
+ tp->stats.rx_fifo_errors++;
+ if (status & RxOverflow) {
+ tp->stats.rx_over_errors++;
+ tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN;
+ NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16);
+ }
+ if (status & PCIErr) {
+ u16 pci_cmd_status;
+ pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+
+ printk (KERN_ERR "%s: PCI Bus error %4.4x.\n",
+ dev->name, pci_cmd_status);
+ }
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t netdrv_interrupt (int irq, void *dev_instance,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct netdrv_private *tp = dev->priv;
+ int boguscnt = max_interrupt_work;
+ void *ioaddr = tp->mmio_addr;
+ int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
+ int handled = 0;
+
+ spin_lock (&tp->lock);
+
+ do {
+ status = NETDRV_R16 (IntrStatus);
+
+ /* h/w no longer present (hotplug?) or major error, bail */
+ if (status == 0xFFFF)
+ break;
+
+ handled = 1;
+ /* Acknowledge all of the current interrupt sources ASAP */
+ NETDRV_W16_F (IntrStatus, status);
+
+ DPRINTK ("%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
+ dev->name, status,
+ NETDRV_R16 (IntrStatus));
+
+ if ((status &
+ (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
+ RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
+ break;
+
+ /* Check uncommon events with one test. */
+ if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
+ RxFIFOOver | TxErr | RxErr))
+ netdrv_weird_interrupt (dev, tp, ioaddr,
+ status, link_changed);
+
+ if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
+ netdrv_rx_interrupt (dev, tp, ioaddr);
+
+ if (status & (TxOK | TxErr))
+ netdrv_tx_interrupt (dev, tp, ioaddr);
+
+ boguscnt--;
+ } while (boguscnt > 0);
+
+ if (boguscnt <= 0) {
+ printk (KERN_WARNING
+ "%s: Too much work at interrupt, "
+ "IntrStatus=0x%4.4x.\n", dev->name,
+ status);
+
+ /* Clear all interrupt sources. */
+ NETDRV_W16 (IntrStatus, 0xffff);
+ }
+
+ spin_unlock (&tp->lock);
+
+ DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, NETDRV_R16 (IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+
+static int netdrv_close (struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ DPRINTK ("ENTER\n");
+
+ netif_stop_queue (dev);
+
+ DPRINTK ("%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, NETDRV_R16 (IntrStatus));
+
+ del_timer_sync (&tp->timer);
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Stop the chip's Tx and Rx DMA processes. */
+ NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ NETDRV_W16 (IntrMask, 0x0000);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
+ NETDRV_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ synchronize_irq ();
+ free_irq (dev->irq, dev);
+
+ netdrv_tx_clear (tp);
+
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ tp->rx_ring = NULL;
+ tp->tx_bufs = NULL;
+
+ /* Green! Put the chip in low-power mode. */
+ NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8 (Config1, 0x03);
+ NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+
+ DPRINTK ("EXIT\n");
+ return 0;
+}
+
+
+static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdrv_private *tp = dev->priv;
+ struct mii_ioctl_data *data = if_mii(rq);
+ unsigned long flags;
+ int rc = 0;
+
+ DPRINTK ("ENTER\n");
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = tp->phys[0] & 0x3f;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ spin_lock_irqsave (&tp->lock, flags);
+ data->val_out = mdio_read (dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ spin_unlock_irqrestore (&tp->lock, flags);
+ break;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable (CAP_NET_ADMIN)) {
+ rc = -EPERM;
+ break;
+ }
+
+ spin_lock_irqsave (&tp->lock, flags);
+ mdio_write (dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_irqrestore (&tp->lock, flags);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ DPRINTK ("EXIT, returning %d\n", rc);
+ return rc;
+}
+
+
+static struct net_device_stats *netdrv_get_stats (struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+
+ DPRINTK ("ENTER\n");
+
+ assert (tp != NULL);
+
+ if (netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
+ NETDRV_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+ }
+
+ DPRINTK ("EXIT\n");
+ return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static void netdrv_set_rx_mode (struct net_device *dev)
+{
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int i, rx_mode;
+ u32 tmp;
+
+ DPRINTK ("ENTER\n");
+
+ DPRINTK ("%s: netdrv_set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
+ dev->name, dev->flags, NETDRV_R32 (RxConfig));
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ }
+
+ /* if called from irq handler, lock already acquired */
+ if (!in_irq ())
+ spin_lock_irq (&tp->lock);
+
+ /* We can safely update without stopping the chip. */
+ tmp = netdrv_rx_config | rx_mode |
+ (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ NETDRV_W32_F (RxConfig, tmp);
+ NETDRV_W32_F (MAR0 + 0, mc_filter[0]);
+ NETDRV_W32_F (MAR0 + 4, mc_filter[1]);
+
+ if (!in_irq ())
+ spin_unlock_irq (&tp->lock);
+
+ DPRINTK ("EXIT\n");
+}
+
+
+#ifdef CONFIG_PM
+
+static int netdrv_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdrv_private *tp = dev->priv;
+ void *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return 0;
+ netif_device_detach (dev);
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Disable interrupts, stop Tx and Rx. */
+ NETDRV_W16 (IntrMask, 0x0000);
+ NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
+ NETDRV_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ pci_save_state (pdev);
+ pci_set_power_state (pdev, PCI_D3hot);
+
+ return 0;
+}
+
+
+static int netdrv_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdrv_private *tp = dev->priv;
+
+ if (!netif_running(dev))
+ return 0;
+ pci_set_power_state (pdev, PCI_D0);
+ pci_restore_state (pdev);
+ netif_device_attach (dev);
+ netdrv_hw_start (dev);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver netdrv_pci_driver = {
+ .name = MODNAME,
+ .id_table = netdrv_pci_tbl,
+ .probe = netdrv_init_one,
+ .remove = __devexit_p(netdrv_remove_one),
+#ifdef CONFIG_PM
+ .suspend = netdrv_suspend,
+ .resume = netdrv_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init netdrv_init_module (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init (&netdrv_pci_driver);
+}
+
+
+static void __exit netdrv_cleanup_module (void)
+{
+ pci_unregister_driver (&netdrv_pci_driver);
+}
+
+
+module_init(netdrv_init_module);
+module_exit(netdrv_cleanup_module);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
new file mode 100644
index 000000000000..41e517114807
--- /dev/null
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -0,0 +1,1307 @@
+/* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner".
+
+ Written 1993-1998 by
+ Donald Becker, becker@scyld.com, (driver core) and
+ David Hinds, dahinds@users.sourceforge.net (from his PC card code).
+ Locking fixes (C) Copyright 2003 Red Hat Inc
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver derives from Donald Becker's 3c509 core, which has the
+ following copyright:
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+
+*/
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c574 PC card Fast Ethernet
+Adapter.
+
+II. Board-specific settings
+
+None -- PC cards are autoconfigured.
+
+III. Driver operation
+
+The 3c574 uses a Boomerang-style interface, without the bus-master capability.
+See the Boomerang driver and documentation for most details.
+
+IV. Notes and chip documentation.
+
+Two added registers are used to enhance PIO performance, RunnerRdCtrl and
+RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the
+count of word (16 bits) reads or writes the driver is about to do to the Rx
+or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card
+translation latency by buffering the I/O operations with an 8 word FIFO.
+Note: No other chip accesses are permitted when this buffer is used.
+
+A second enhancement is that both attribute and common memory space
+0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster
+with *some* PCcard bridges) may be used instead of I/O operations.
+This is enabled by setting the 0x10 bit in the PCMCIA LAN COR.
+
+Some slow PC card bridges work better if they never see a WAIT signal.
+This is configured by setting the 0x20 bit in the PCMCIA LAN COR.
+Only do this after testing that it is reliable and improves performance.
+
+The upper five bits of RunnerRdCtrl are used to window into PCcard
+configuration space registers. Window 0 is the regular Boomerang/Odie
+register set, 1-5 are various PC card control registers, and 16-31 are
+the (reversed!) CIS table.
+
+A final note: writing the InternalConfig register in window 3 with an
+invalid ramWidth is Very Bad.
+
+V. References
+
+http://www.scyld.com/expert/NWay.html
+http://www.national.com/pf/DP/DP83840.html
+
+Thanks to Terry Murphy of 3Com for providing development information for
+earlier 3Com products.
+
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/mem_op.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+INT_MODULE_PARM(max_interrupt_work, 32);
+
+/* Force full duplex modes? */
+INT_MODULE_PARM(full_duplex, 0);
+
+/* Autodetect link polarity reversal? */
+INT_MODULE_PARM(auto_polarity, 1);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"3c574_cs.c 1.65ac1 2003/04/07 Donald Becker/David Hinds, becker@scyld.com.\n";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((800*HZ)/1000)
+
+/* To minimize the size of the driver source and make the driver more
+ readable not all constants are symbolically defined.
+ You'll need the manual if you want to understand driver details anyway. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum el3_cmds {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,
+};
+
+enum elxl_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+enum Window0 {
+ Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+/* These assumes the larger EEPROM. */
+enum Win0_EEPROM_cmds {
+ EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the "Odie" this window is always mapped at offsets 0x10-0x1f.
+ Except for TxFree, which is overlapped by RunnerWrCtrl. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */
+ RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c,
+};
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+union wn3_config {
+ int i;
+ struct w3_config_fields {
+ unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+ int pad8:8;
+ unsigned int ram_split:2, pad18:2, xcvr:3, pad21:1, autoselect:1;
+ int pad24:7;
+ } u;
+};
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+
+struct el3_private {
+ dev_link_t link;
+ dev_node_t node;
+ struct net_device_stats stats;
+ u16 advertising, partner; /* NWay media advertisement */
+ unsigned char phys; /* MII device address */
+ unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
+ /* for transceiver monitoring */
+ struct timer_list media;
+ unsigned short media_status;
+ unsigned short fast_poll;
+ unsigned long last_irq;
+ spinlock_t window_lock; /* Guards the Window selection */
+};
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+/* Index of functions. */
+
+static void tc574_config(dev_link_t *link);
+static void tc574_release(dev_link_t *link);
+static int tc574_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static void mdio_sync(kio_addr_t ioaddr, int bits);
+static int mdio_read(kio_addr_t ioaddr, int phy_id, int location);
+static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value);
+static unsigned short read_eeprom(kio_addr_t ioaddr, int index);
+static void tc574_wait_for_completion(struct net_device *dev, int cmd);
+
+static void tc574_reset(struct net_device *dev);
+static void media_check(unsigned long arg);
+static int el3_open(struct net_device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev, int worklimit);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static void set_rx_mode(struct net_device *dev);
+
+static dev_info_t dev_info = "3c574_cs";
+
+static dev_link_t *tc574_attach(void);
+static void tc574_detach(dev_link_t *);
+
+static dev_link_t *dev_list;
+
+/*
+ tc574_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+*/
+
+static dev_link_t *tc574_attach(void)
+{
+ struct el3_private *lp;
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ int ret;
+
+ DEBUG(0, "3c574_attach()\n");
+
+ /* Create the PC card device object. */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return NULL;
+ lp = netdev_priv(dev);
+ link = &lp->link;
+ link->priv = dev;
+
+ spin_lock_init(&lp->window_lock);
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &el3_interrupt;
+ link->irq.Instance = dev;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ /* The EL3-specific entries in the device structure. */
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->get_stats = &el3_get_stats;
+ dev->do_ioctl = &el3_ioctl;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->set_multicast_list = &set_rx_mode;
+ dev->open = &el3_open;
+ dev->stop = &el3_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = el3_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &tc574_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ tc574_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* tc574_attach */
+
+/*
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+*/
+
+static void tc574_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "3c574_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ if (link->state & DEV_CONFIG)
+ tc574_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* tc574_detach */
+
+/*
+ tc574_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+*/
+
+#define CS_CHECK(fn, ret) \
+ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+
+static void tc574_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ struct el3_private *lp = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ unsigned short buf[32];
+ int last_fn, last_ret, i, j;
+ kio_addr_t ioaddr;
+ u16 *phys_addr;
+ char *cardname;
+ union wn3_config config;
+
+ phys_addr = (u16 *)dev->dev_addr;
+
+ DEBUG(0, "3c574_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->io.IOAddrLines = 16;
+ for (i = j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+
+ ioaddr = dev->base_addr;
+
+ /* The 3c574 normally uses an EEPROM for configuration info, including
+ the hardware address. The future products may include a modem chip
+ and put the address in the CIS. */
+ tuple.DesiredTuple = 0x88;
+ if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) {
+ pcmcia_get_tuple_data(handle, &tuple);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(buf[i]);
+ } else {
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (phys_addr[0] == 0x6060) {
+ printk(KERN_NOTICE "3c574_cs: IO port conflict at 0x%03lx"
+ "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS &&
+ pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS &&
+ pcmcia_parse_tuple(handle, &tuple, &parse) == CS_SUCCESS) {
+ cardname = parse.version_1.str + parse.version_1.ofs[1];
+ } else
+ cardname = "3Com 3c574";
+
+ {
+ u_char mcr;
+ outw(2<<11, ioaddr + RunnerRdCtrl);
+ mcr = inb(ioaddr + 2);
+ outw(0<<11, ioaddr + RunnerRdCtrl);
+ printk(KERN_INFO " ASIC rev %d,", mcr>>3);
+ EL3WINDOW(3);
+ config.i = inl(ioaddr + Wn3_Config);
+ lp->default_media = config.u.xcvr;
+ lp->autoselect = config.u.autoselect;
+ }
+
+ init_timer(&lp->media);
+
+ {
+ int phy;
+
+ /* Roadrunner only: Turn on the MII transceiver */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ EL3WINDOW(4);
+ for (phy = 1; phy <= 32; phy++) {
+ int mii_status;
+ mdio_sync(ioaddr, 32);
+ mii_status = mdio_read(ioaddr, phy & 0x1f, 1);
+ if (mii_status != 0xffff) {
+ lp->phys = phy & 0x1f;
+ DEBUG(0, " MII transceiver at index %d, status %x.\n",
+ phy, mii_status);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required = 1;
+ break;
+ }
+ }
+ if (phy > 32) {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ goto failed;
+ }
+ i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ lp->advertising = mdio_read(ioaddr, lp->phys, 4);
+ if (full_duplex) {
+ /* Only advertise the FD media types. */
+ lp->advertising &= ~0x02a0;
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ }
+ }
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ link->dev = &lp->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(lp->node.dev_name, dev->name);
+
+ printk(KERN_INFO "%s: %s at io %#3lx, irq %d, hw_addr ",
+ dev->name, cardname, dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : ".\n"));
+ printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n",
+ 8 << config.u.ram_size, ram_split[config.u.ram_split],
+ config.u.autoselect ? "autoselect " : "");
+
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ tc574_release(link);
+ return;
+
+} /* tc574_config */
+
+/*
+ After a card is removed, tc574_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+*/
+
+static void tc574_release(dev_link_t *link)
+{
+ DEBUG(0, "3c574_release(0x%p)\n", link);
+
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+*/
+
+static int tc574_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(1, "3c574_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ tc574_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ tc574_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* tc574_event */
+
+static void dump_status(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
+ "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
+ inw(ioaddr+TxFree));
+ EL3WINDOW(4);
+ printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
+ " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc574_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 1500;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n", dev->name, cmd);
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static unsigned short read_eeprom(kio_addr_t ioaddr, int index)
+{
+ int timer;
+ outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 usec for the read to take place. */
+ for (timer = 1620; timer >= 0; timer--) {
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ return inw(ioaddr + Wn0EepromData);
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+ The maxium data clock rate is 2.5 Mhz. The timing is easily met by the
+ slow PC card interface. */
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(kio_addr_t ioaddr, int bits)
+{
+ kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ }
+}
+
+static int mdio_read(kio_addr_t ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value)
+{
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Leave the interface idle. */
+ for (i = 1; i >= 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+
+ return;
+}
+
+/* Reset and restore all of the 3c574 registers. */
+static void tc574_reset(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int i;
+ kio_addr_t ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ tc574_wait_for_completion(dev, TotalReset|0x10);
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ /* Clear any transactions in progress. */
+ outw(0, ioaddr + RunnerWrCtrl);
+ outw(0, ioaddr + RunnerRdCtrl);
+
+ /* Set the station address and mask. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ /* Reset config options */
+ EL3WINDOW(3);
+ outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b,
+ ioaddr + Wn3_Config);
+ /* Roadrunner only: Turn on the MII transceiver. */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ spin_lock_irqsave(&lp->window_lock, flags);
+ EL3WINDOW(3);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ inb(ioaddr + 13);
+
+ /* .. enable any extra statistics bits.. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+
+ /* .. re-sync MII and re-fill what NWay is advertising. */
+ mdio_sync(ioaddr, 32);
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ if (!auto_polarity) {
+ /* works for TDK 78Q2120 series MII's */
+ int i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ }
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ /* Switch to register set 1 for normal use, just for TxFree. */
+ set_rx_mode(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure | RxEarly, ioaddr + EL3_CMD);
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ netif_start_queue(dev);
+
+ tc574_reset(dev);
+ lp->media.function = &media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ DEBUG(2, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
+ dump_status(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TxStatus);
+ if (!(tx_status & 0x84))
+ break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc574_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ DEBUG(1, "%s: transmit error: status 0x%02x\n",
+ dev->name, tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+}
+
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ DEBUG(3, "%s: el3_start_xmit(length = %ld) called, "
+ "status %4.4x.\n", dev->name, (long)skb->len,
+ inw(ioaddr + EL3_STATUS));
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0, ioaddr + TX_FIFO);
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
+
+ dev->trans_start = jiffies;
+
+ /* TxFree appears only in Window 1, not offset 0x1c. */
+ if (inw(ioaddr + TxFree) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet.
+ The threshold is in units of dwords. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+
+ pop_tx_status(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static irqreturn_t el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr;
+ unsigned status;
+ int work_budget = max_interrupt_work;
+ int handled = 0;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ spin_lock(&lp->window_lock);
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | RxEarly | StatsFull)) {
+ if (!netif_device_present(dev) ||
+ ((status & 0xe000) != 0x2000)) {
+ DEBUG(1, "%s: Interrupt from dead card\n", dev->name);
+ break;
+ }
+
+ handled = 1;
+
+ if (status & RxComplete)
+ work_budget = el3_rx(dev, work_budget);
+
+ if (status & TxAvailable) {
+ DEBUG(3, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+
+ if (status & TxComplete)
+ pop_tx_status(dev);
+
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull)
+ update_stats(dev);
+ if (status & RxEarly) {
+ work_budget = el3_rx(dev, work_budget);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ EL3WINDOW(1);
+ printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic"
+ " register %04x.\n", dev->name, fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc574_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--work_budget < 0) {
+ DEBUG(0, "%s: Too much work in interrupt, "
+ "status %4.4x.\n", dev->name, status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+
+ DEBUG(3, "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ spin_unlock(&lp->window_lock);
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ This timer serves two purposes: to check for missed interrupts
+ (and as a last resort, poll the NIC for events), and to monitor
+ the MII, reporting changes in cable status.
+*/
+static void media_check(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ unsigned long flags;
+ unsigned short /* cable, */ media, partner;
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
+ if (!lp->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ el3_interrupt(dev->irq, lp, NULL);
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + 2*HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ EL3WINDOW(4);
+ media = mdio_read(ioaddr, lp->phys, 1);
+ partner = mdio_read(ioaddr, lp->phys, 5);
+ EL3WINDOW(1);
+
+ if (media != lp->media_status) {
+ if ((media ^ lp->media_status) & 0x0004)
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (lp->media_status & 0x0004) ? "lost" : "found");
+ if ((media ^ lp->media_status) & 0x0020) {
+ lp->partner = 0;
+ if (lp->media_status & 0x0020) {
+ printk(KERN_INFO "%s: autonegotiation restarted\n",
+ dev->name);
+ } else if (partner) {
+ partner &= lp->advertising;
+ lp->partner = partner;
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((partner & 0x0180) ? "100" : "10"),
+ ((partner & 0x0140) ? 'F' : 'H'));
+ } else {
+ printk(KERN_INFO "%s: link partner did not autonegotiate\n",
+ dev->name);
+ }
+
+ EL3WINDOW(3);
+ outb((partner & 0x0140 ? 0x20 : 0) |
+ (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ EL3WINDOW(1);
+
+ }
+ if (media & 0x0010)
+ printk(KERN_INFO "%s: remote fault detected\n",
+ dev->name);
+ if (media & 0x0002)
+ printk(KERN_INFO "%s: jabber detected\n", dev->name);
+ lp->media_status = media;
+ }
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+
+ if (netif_device_present(dev)) {
+ unsigned long flags;
+ spin_lock_irqsave(&lp->window_lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ }
+ return &lp->stats;
+}
+
+/* Update statistics.
+ Suprisingly this need not be run single-threaded, but it effectively is.
+ The counters clear when read, so the adds must merely be atomic.
+ */
+static void update_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u8 rx, tx, up;
+
+ DEBUG(2, "%s: updating the statistics.\n", dev->name);
+
+ if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */
+ return;
+
+ /* Unlike the 3c509 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ up = inb(ioaddr + 9);
+ lp->stats.tx_packets += (up&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ rx = inw(ioaddr + 10);
+ tx = inw(ioaddr + 12);
+
+ EL3WINDOW(4);
+ /* BadSSD */ inb(ioaddr + 12);
+ up = inb(ioaddr + 13);
+
+ lp->stats.tx_bytes += tx + ((up & 0xf0) << 12);
+
+ EL3WINDOW(1);
+}
+
+static int el3_rx(struct net_device *dev, int worklimit)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ short rx_status;
+
+ DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
+ (--worklimit >= 0)) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ DEBUG(3, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ ((pkt_len+3)>>2));
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of"
+ " size %d.\n", dev->name, pkt_len);
+ lp->stats.rx_dropped++;
+ }
+ }
+ tc574_wait_for_completion(dev, RxDiscard);
+ }
+
+ return worklimit;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "3c574_cs");
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_ifru;
+ int phy = lp->phys & 0x1f;
+
+ DEBUG(2, "%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data[0], data[1], data[2], data[3]);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get the address of the PHY in use. */
+ data[0] = phy;
+ case SIOCGMIIREG: /* Read the specified MII register. */
+ {
+ int saved_window;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ EL3WINDOW(saved_window);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ return 0;
+ }
+ case SIOCSMIIREG: /* Write the specified MII register */
+ {
+ int saved_window;
+ unsigned long flags;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ spin_lock_irqsave(&lp->window_lock, flags);
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ EL3WINDOW(saved_window);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* The Odie chip has a 64 bin multicast filter, but the bit layout is not
+ documented. Until it is we revert to receiving all multicast frames when
+ any multicast reception is desired.
+ Note: My other drivers emit a log message whenever promiscuous mode is
+ entered to help detect password sniffers. This is less desirable on
+ typical PC card machines, so we omit the message.
+ */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+
+ if (dev->flags & IFF_PROMISC)
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ struct el3_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
+
+ if (DEV_OK(link)) {
+ unsigned long flags;
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ /* Note: Switching to window 0 may disable the IRQ. */
+ EL3WINDOW(0);
+ spin_lock_irqsave(&lp->window_lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
+
+ return 0;
+}
+
+static struct pcmcia_driver tc574_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "3c574_cs",
+ },
+ .attach = tc574_attach,
+ .detach = tc574_detach,
+};
+
+static int __init init_tc574(void)
+{
+ return pcmcia_register_driver(&tc574_driver);
+}
+
+static void __exit exit_tc574(void)
+{
+ pcmcia_unregister_driver(&tc574_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_tc574);
+module_exit(exit_tc574);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
new file mode 100644
index 000000000000..89abdda1d343
--- /dev/null
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -0,0 +1,1081 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for the 3com 3c589 card.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ 3c589_cs.c 1.162 2001/10/13 00:08:50
+
+ The network driver code is based on Donald Becker's 3c589 code:
+
+ Written 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+ Updated for 2.5.x by Alan Cox <alan@redhat.com>
+
+======================================================================*/
+
+#define DRV_NAME "3c589_cs"
+#define DRV_VERSION "1.162-ac"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_TIMER 0x0a
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EEPROM_READ 0x0080
+#define EEPROM_BUSY 0x8000
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,
+};
+
+enum c509status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000
+};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+#define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+struct el3_private {
+ dev_link_t link;
+ dev_node_t node;
+ struct net_device_stats stats;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u16 media_status;
+ u16 fast_poll;
+ unsigned long last_irq;
+ spinlock_t lock;
+};
+
+static char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* Special hook for setting if_port when module is loaded */
+INT_MODULE_PARM(if_port, 0);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+static void tc589_config(dev_link_t *link);
+static void tc589_release(dev_link_t *link);
+static int tc589_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static u16 read_eeprom(kio_addr_t ioaddr, int index);
+static void tc589_reset(struct net_device *dev);
+static void media_check(unsigned long arg);
+static int el3_config(struct net_device *dev, struct ifmap *map);
+static int el3_open(struct net_device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static struct ethtool_ops netdev_ethtool_ops;
+
+static dev_info_t dev_info = "3c589_cs";
+
+static dev_link_t *tc589_attach(void);
+static void tc589_detach(dev_link_t *);
+
+static dev_link_t *dev_list;
+
+/*======================================================================
+
+ tc589_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *tc589_attach(void)
+{
+ struct el3_private *lp;
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ int ret;
+
+ DEBUG(0, "3c589_attach()\n");
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return NULL;
+ lp = netdev_priv(dev);
+ link = &lp->link;
+ link->priv = dev;
+
+ spin_lock_init(&lp->lock);
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &el3_interrupt;
+ link->irq.Instance = dev;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ /* The EL3-specific entries in the device structure. */
+ SET_MODULE_OWNER(dev);
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->set_config = &el3_config;
+ dev->get_stats = &el3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->open = &el3_open;
+ dev->stop = &el3_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = el3_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &tc589_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ tc589_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* tc589_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void tc589_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "3c589_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ if (link->state & DEV_CONFIG)
+ tc589_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* tc589_detach */
+
+/*======================================================================
+
+ tc589_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void tc589_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ struct el3_private *lp = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ u16 buf[32], *phys_addr;
+ int last_fn, last_ret, i, j, multi = 0, fifo;
+ kio_addr_t ioaddr;
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+
+ DEBUG(0, "3c589_config(0x%p)\n", link);
+
+ phys_addr = (u16 *)dev->dev_addr;
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Is this a 3c562? */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) &&
+ (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) {
+ if (le16_to_cpu(buf[0]) != MANFID_3COM)
+ printk(KERN_INFO "3c589_cs: hmmm, is this really a "
+ "3Com card??\n");
+ multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562);
+ }
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ link->io.IOAddrLines = 16;
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80)) continue;
+ link->io.BasePort1 = j ^ 0x300;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ the hardware address. The 3c562 puts the address in the CIS. */
+ tuple.DesiredTuple = 0x88;
+ if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) {
+ pcmcia_get_tuple_data(handle, &tuple);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(buf[i]);
+ } else {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == 0x6060) {
+ printk(KERN_ERR "3c589_cs: IO port conflict at 0x%03lx"
+ "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ /* The address and resource configuration register aren't loaded from
+ the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
+ outw(0x3f00, ioaddr + 8);
+ fifo = inl(ioaddr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
+
+ link->dev = &lp->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(lp->node.dev_name, dev->name);
+
+ printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, hw_addr ",
+ dev->name, (multi ? "562" : "589"), dev->base_addr,
+ dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ tc589_release(link);
+ return;
+
+} /* tc589_config */
+
+/*======================================================================
+
+ After a card is removed, tc589_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void tc589_release(dev_link_t *link)
+{
+ DEBUG(0, "3c589_release(0x%p)\n", link);
+
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int tc589_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(1, "3c589_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ tc589_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ tc589_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* tc589_event */
+
+/*====================================================================*/
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc589_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ printk(KERN_WARNING "%s: command 0x%04x did not complete!\n",
+ dev->name, cmd);
+}
+
+/*
+ Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+*/
+static u16 read_eeprom(kio_addr_t ioaddr, int index)
+{
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
+}
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void tc589_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0: case 1: outw(0, ioaddr + 6); break;
+ case 2: outw(3<<14, ioaddr + 6); break;
+ case 3: outw(1<<14, ioaddr + 6); break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+}
+
+static void dump_status(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
+ "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS),
+ inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
+ " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/* Reset and restore all of the 3c589 registers. */
+static void tc589_reset(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-cast and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure, ioaddr + EL3_CMD);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+#ifdef PCMCIA_DEBUG
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return pc_debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ pc_debug = level;
+}
+#endif /* PCMCIA_DEBUG */
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+#ifdef PCMCIA_DEBUG
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+#endif /* PCMCIA_DEBUG */
+};
+
+static int el3_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ netif_start_queue(dev);
+
+ tc589_reset(dev);
+ init_timer(&lp->media);
+ lp->media.function = &media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ DEBUG(1, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
+ dump_status(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84)) break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ DEBUG(1, "%s: transmit error: status 0x%02x\n",
+ dev->name, tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+}
+
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ struct el3_private *priv = netdev_priv(dev);
+
+ DEBUG(3, "%s: el3_start_xmit(length = %ld) called, "
+ "status %4.4x.\n", dev->name, (long)skb->len,
+ inw(ioaddr + EL3_STATUS));
+
+ priv->stats.tx_bytes += skb->len;
+
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+
+ dev->trans_start = jiffies;
+ if (inw(ioaddr + TX_FREE) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
+
+ dev_kfree_skb(skb);
+ pop_tx_status(dev);
+
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static irqreturn_t el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr;
+ __u16 status;
+ int i = 0, handled = 1;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ spin_lock(&lp->lock);
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+ if ((status & 0xe000) != 0x2000) {
+ DEBUG(1, "%s: interrupt from dead card\n", dev->name);
+ handled = 0;
+ break;
+ }
+
+ if (status & RxComplete)
+ el3_rx(dev);
+
+ if (status & TxAvailable) {
+ DEBUG(3, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+
+ if (status & TxComplete)
+ pop_tx_status(dev);
+
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic"
+ " register %04x.\n", dev->name, fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_multicast_list(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (++i > 10) {
+ printk(KERN_ERR "%s: infinite loop in interrupt, "
+ "status %4.4x.\n", dev->name, status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+
+ lp->last_irq = jiffies;
+ spin_unlock(&lp->lock);
+ DEBUG(3, "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+ return IRQ_RETVAL(handled);
+}
+
+static void media_check(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)(arg);
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 media, errs;
+ unsigned long flags;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ EL3WINDOW(1);
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ (inb(ioaddr + EL3_TIMER) == 0xff)) {
+ if (!lp->fast_poll)
+ printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
+ el3_interrupt(dev->irq, lp, NULL);
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ /* lp->lock guards the EL3 window. Window should always be 1 except
+ when the lock is held */
+ spin_lock_irqsave(&lp->lock, flags);
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (jiffies - lp->last_irq < HZ) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
+ }
+
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
+ ((lp->media_status ^ media) & 0x0010))
+ printk(KERN_INFO "%s: coax cable %s\n", dev->name,
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ printk(KERN_INFO "%s: flipped to 10baseT\n",
+ dev->name);
+ else
+ tc589_set_xcvr(dev, 2);
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ printk(KERN_INFO "%s: flipped to 10base2\n",
+ dev->name);
+ }
+ }
+ lp->media_status = media;
+ }
+
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ dev_link_t *link = &lp->link;
+
+ if (DEV_OK(link)) {
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return &lp->stats;
+}
+
+/*
+ Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+
+ Caller must hold the lock for this
+*/
+static void update_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ DEBUG(2, "%s: updating the statistics.\n", dev->name);
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Rx octets */ inw(ioaddr + 10);
+ /* Tx octets */ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+}
+
+static int el3_rx(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
+
+ DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ (--worklimit >= 0)) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ DEBUG(3, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len+3)>>2);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of"
+ " size %d.\n", dev->name, pkt_len);
+ lp->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
+ }
+ if (worklimit == 0)
+ printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name);
+ return 0;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (!(DEV_OK(link))) return;
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+ kio_addr_t ioaddr = dev->base_addr;
+
+ DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
+
+ if (DEV_OK(link)) {
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
+
+ return 0;
+}
+
+static struct pcmcia_driver tc589_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "3c589_cs",
+ },
+ .attach = tc589_attach,
+ .detach = tc589_detach,
+};
+
+static int __init init_tc589(void)
+{
+ return pcmcia_register_driver(&tc589_driver);
+}
+
+static void __exit exit_tc589(void)
+{
+ pcmcia_unregister_driver(&tc589_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_tc589);
+module_exit(exit_tc589);
diff --git a/drivers/net/pcmcia/Kconfig b/drivers/net/pcmcia/Kconfig
new file mode 100644
index 000000000000..74f862001247
--- /dev/null
+++ b/drivers/net/pcmcia/Kconfig
@@ -0,0 +1,132 @@
+#
+# PCMCIA Network device configuration
+#
+
+menu "PCMCIA network device support"
+ depends on NETDEVICES && PCMCIA!=n
+
+config NET_PCMCIA
+ bool "PCMCIA network device support"
+ ---help---
+ Say Y if you would like to include support for any PCMCIA or CardBus
+ network adapters, then say Y to the driver for your particular card
+ below. PCMCIA- or PC-cards are credit-card size devices often used
+ with laptops computers; CardBus is the newer and faster version of
+ PCMCIA.
+
+ To use your PC-cards, you will need supporting software from David
+ Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
+ for location). You also want to check out the PCMCIA-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ If unsure, say N.
+
+config PCMCIA_3C589
+ tristate "3Com 3c589 PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ help
+ Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
+ (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 3c589_cs. If unsure, say N.
+
+config PCMCIA_3C574
+ tristate "3Com 3c574 PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ help
+ Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
+ (PC-card) Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 3c574_cs. If unsure, say N.
+
+config PCMCIA_FMVJ18X
+ tristate "Fujitsu FMV-J18x PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ select CRC32
+ help
+ Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called fmvj18x_cs. If unsure, say N.
+
+config PCMCIA_PCNET
+ tristate "NE2000 compatible PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ select CRC32
+ help
+ Say Y here if you intend to attach an NE2000 compatible PCMCIA
+ (PC-card) Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcnet_cs. If unsure, say N.
+
+config PCMCIA_NMCLAN
+ tristate "New Media PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ help
+ Say Y here if you intend to attach a New Media Ethernet or LiveWire
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called nmclan_cs. If unsure, say N.
+
+config PCMCIA_SMC91C92
+ tristate "SMC 91Cxx PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ select CRC32
+ select MII
+ help
+ Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA
+ (PC-card) Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called smc91c92_cs. If unsure, say N.
+
+config PCMCIA_XIRC2PS
+ tristate "Xircom 16-bit PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ help
+ Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card)
+ Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called xirc2ps_cs. If unsure, say N.
+
+config PCMCIA_AXNET
+ tristate "Asix AX88190 PCMCIA support"
+ depends on NET_PCMCIA && PCMCIA
+ ---help---
+ Say Y here if you intend to attach an Asix AX88190-based PCMCIA
+ (PC-card) Fast Ethernet card to your computer. These cards are
+ nearly NE2000 compatible but need a separate driver due to a few
+ misfeatures.
+
+ To compile this driver as a module, choose M here: the module will be
+ called axnet_cs. If unsure, say N.
+
+config ARCNET_COM20020_CS
+ tristate "COM20020 ARCnet PCMCIA support"
+ depends on NET_PCMCIA && ARCNET_COM20020 && PCMCIA
+ help
+ Say Y here if you intend to attach this type of ARCnet PCMCIA card
+ to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called com20020_cs. If unsure, say N.
+
+config PCMCIA_IBMTR
+ tristate "IBM PCMCIA tokenring adapter support"
+ depends on NET_PCMCIA && IBMTR!=y && TR && PCMCIA && !64BIT
+ help
+ Say Y here if you intend to attach this type of Token Ring PCMCIA
+ card to your computer. You then also need to say Y to "Token Ring
+ driver support".
+
+ To compile this driver as a module, choose M here: the module will be
+ called ibmtr_cs.
+
+endmenu
+
diff --git a/drivers/net/pcmcia/Makefile b/drivers/net/pcmcia/Makefile
new file mode 100644
index 000000000000..87d2d99f4c14
--- /dev/null
+++ b/drivers/net/pcmcia/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the Linux PCMCIA network device drivers.
+#
+
+# 16-bit client drivers
+obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o
+obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o
+obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o
+obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
+obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o
+obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o
+obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o
+obj-$(CONFIG_ARCNET_COM20020_CS)+= com20020_cs.o
+obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o
+
+obj-$(CONFIG_PCMCIA_IBMTR) += ibmtr_cs.o
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
new file mode 100644
index 000000000000..853b586e481a
--- /dev/null
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -0,0 +1,1864 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for Asix AX88190-based cards
+
+ The Asix AX88190 is a NS8390-derived chipset with a few nasty
+ idiosyncracies that make it very inconvenient to support with a
+ standard 8390 driver. This driver is based on pcnet_cs, with the
+ tweaked 8390 code grafted on the end. Much of what I did was to
+ clean up and update a similar driver supplied by Asix, which was
+ adapted by William Lee, william@asix.com.tw.
+
+ Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net
+
+ axnet_cs.c 1.28 2002/06/29 06:27:37
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include "../8390.h"
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define AXNET_CMD 0x00
+#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define AXNET_MII_EEP 0x14 /* Offset of MII access port */
+#define AXNET_TEST 0x15 /* Offset of TEST Register port */
+#define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */
+
+#define AXNET_START_PG 0x40 /* First page of TX buffer */
+#define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */
+
+#define IS_AX88190 0x0001
+#define IS_AX88790 0x0002
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#ifdef PCMCIA_DEBUG
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"axnet_cs.c 1.28 2002/06/29 06:27:37 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+static void axnet_config(dev_link_t *link);
+static void axnet_release(dev_link_t *link);
+static int axnet_event(event_t event, int priority,
+ event_callback_args_t *args);
+static int axnet_open(struct net_device *dev);
+static int axnet_close(struct net_device *dev);
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
+static void ei_watchdog(u_long arg);
+static void axnet_reset_8390(struct net_device *dev);
+
+static int mdio_read(kio_addr_t addr, int phy_id, int loc);
+static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value);
+
+static void get_8390_hdr(struct net_device *,
+ struct e8390_pkt_hdr *, int);
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page);
+
+static dev_link_t *axnet_attach(void);
+static void axnet_detach(dev_link_t *);
+
+static dev_info_t dev_info = "axnet_cs";
+static dev_link_t *dev_list;
+
+static void axdev_setup(struct net_device *dev);
+static void AX88190_init(struct net_device *dev, int startp);
+static int ax_open(struct net_device *dev);
+static int ax_close(struct net_device *dev);
+static irqreturn_t ax_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*====================================================================*/
+
+typedef struct axnet_dev_t {
+ dev_link_t link;
+ dev_node_t node;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+} axnet_dev_t;
+
+static inline axnet_dev_t *PRIV(struct net_device *dev)
+{
+ void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device);
+ return p;
+}
+
+/*======================================================================
+
+ axnet_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *axnet_attach(void)
+{
+ axnet_dev_t *info;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int ret;
+
+ DEBUG(0, "axnet_attach()\n");
+
+ dev = alloc_netdev(sizeof(struct ei_device) + sizeof(axnet_dev_t),
+ "eth%d", axdev_setup);
+
+ if (!dev)
+ return NULL;
+
+ info = PRIV(dev);
+ link = &info->link;
+ link->priv = dev;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ dev->open = &axnet_open;
+ dev->stop = &axnet_close;
+ dev->do_ioctl = &axnet_ioctl;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &axnet_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(link->handle, RegisterClient, ret);
+ axnet_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* axnet_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void axnet_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "axnet_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ if (link->state & DEV_CONFIG)
+ axnet_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* axnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+
+======================================================================*/
+
+static int get_prom(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ kio_addr_t ioaddr = dev->base_addr;
+ int i, j;
+
+ /* This is based on drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x01, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {0x10, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */
+ {0x04, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->conf.ConfigBase != 0x03c0)
+ return 0;
+
+ axnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + AXNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ return 1;
+} /* get_prom */
+
+/*======================================================================
+
+ axnet_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static int try_io_port(dev_link_t *link)
+{
+ int j, ret;
+ if (link->io.NumPorts1 == 32) {
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (link->io.NumPorts2 > 0) {
+ /* for master/slave multifunction cards */
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ }
+ } else {
+ /* This should be two 16-port windows */
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->io.BasePort1 == 0) {
+ link->io.IOAddrLines = 16;
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ link->io.BasePort2 = (j ^ 0x300) + 0x10;
+ ret = pcmcia_request_io(link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+ } else {
+ return pcmcia_request_io(link->handle, &link->io);
+ }
+}
+
+static void axnet_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ int i, j, last_ret, last_fn;
+ u_short buf[64];
+ config_info_t conf;
+
+ DEBUG(0, "axnet_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ /* don't trust the CIS on this; Linksys got it wrong */
+ link->conf.Present = 0x63;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up current Vcc */
+ CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
+ link->conf.Vcc = conf.Vcc;
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ tuple.Attributes = 0;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ while (last_ret == CS_SUCCESS) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_io_t *io = &(parse.cftable_entry.io);
+
+ if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
+ pcmcia_parse_tuple(handle, &tuple, &parse) != 0 ||
+ cfg->index == 0 || cfg->io.nwin == 0)
+ goto next_entry;
+
+ link->conf.ConfigIndex = 0x05;
+ /* For multifunction cards, by convention, we configure the
+ network function with window 0, and serial with window 1 */
+ if (io->nwin > 1) {
+ i = (io->win[1].len > io->win[0].len);
+ link->io.BasePort2 = io->win[1-i].base;
+ link->io.NumPorts2 = io->win[1-i].len;
+ } else {
+ i = link->io.NumPorts2 = 0;
+ }
+ link->io.BasePort1 = io->win[i].base;
+ link->io.NumPorts1 = io->win[i].len;
+ link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ if (link->io.NumPorts1 + link->io.NumPorts2 >= 32) {
+ last_ret = try_io_port(link);
+ if (last_ret == CS_SUCCESS) break;
+ }
+ next_entry:
+ last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ }
+ if (last_ret != CS_SUCCESS) {
+ cs_error(handle, RequestIO, last_ret);
+ goto failed;
+ }
+
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+
+ if (link->io.NumPorts2 == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+
+ if (!get_prom(link)) {
+ printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n");
+ printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n");
+ goto failed;
+ }
+
+ ei_status.name = "AX88190";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = AXNET_START_PG;
+ ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
+ ei_status.stop_page = AXNET_STOP_PG;
+ ei_status.reset_8390 = &axnet_reset_8390;
+ ei_status.get_8390_hdr = &get_8390_hdr;
+ ei_status.block_input = &block_input;
+ ei_status.block_output = &block_output;
+
+ if (inb(dev->base_addr + AXNET_TEST) != 0)
+ info->flags |= IS_AX88790;
+ else
+ info->flags |= IS_AX88190;
+
+ if (info->flags & IS_AX88790)
+ outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
+ if (i == 32) {
+ conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 };
+ pcmcia_access_configuration_register(link->handle, &reg);
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+ }
+
+ info->phy_id = (i < 32) ? i : -1;
+ link->dev = &info->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(info->node.dev_name, dev->name);
+
+ printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, hw_addr ",
+ dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
+ dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ if (info->phy_id != -1) {
+ DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j);
+ } else {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ }
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ axnet_release(link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+} /* axnet_config */
+
+/*======================================================================
+
+ After a card is removed, axnet_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void axnet_release(dev_link_t *link)
+{
+ DEBUG(0, "axnet_release(0x%p)\n", link);
+
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int axnet_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(2, "axnet_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ axnet_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ axnet_reset_8390(dev);
+ AX88190_init(dev, 1);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* axnet_event */
+
+/*======================================================================
+
+ MII interface support
+
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DATA_WRITE0 0x00
+#define MDIO_DATA_WRITE1 0x08
+#define MDIO_DATA_READ 0x04
+#define MDIO_MASK 0x0f
+#define MDIO_ENB_IN 0x02
+
+static void mdio_sync(kio_addr_t addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb_p(MDIO_DATA_WRITE1, addr);
+ outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(kio_addr_t addr, int phy_id, int loc)
+{
+ u_int cmd = (0xf6<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 14; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*====================================================================*/
+
+static int axnet_open(struct net_device *dev)
+{
+ axnet_dev_t *info = PRIV(dev);
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "axnet_open('%s')\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+
+ request_irq(dev->irq, ei_irq_wrapper, SA_SHIRQ, dev_info, dev);
+
+ info->link_status = 0x00;
+ init_timer(&info->watchdog);
+ info->watchdog.function = &ei_watchdog;
+ info->watchdog.data = (u_long)dev;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ax_open(dev);
+} /* axnet_open */
+
+/*====================================================================*/
+
+static int axnet_close(struct net_device *dev)
+{
+ axnet_dev_t *info = PRIV(dev);
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "axnet_close('%s')\n", dev->name);
+
+ ax_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&info->watchdog);
+
+ return 0;
+} /* axnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void axnet_reset_8390(struct net_device *dev)
+{
+ kio_addr_t nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n",
+ dev->name);
+
+} /* axnet_reset_8390 */
+
+/*====================================================================*/
+
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ PRIV(dev)->stale = 0;
+ return ax_interrupt(irq, dev_id, regs);
+}
+
+static void ei_watchdog(u_long arg)
+{
+ struct net_device *dev = (struct net_device *)(arg);
+ axnet_dev_t *info = PRIV(dev);
+ kio_addr_t nic_base = dev->base_addr;
+ kio_addr_t mii_addr = nic_base + AXNET_MII_EEP;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ ei_irq_wrapper(dev->irq, dev, NULL);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (info->phy_id < 0)
+ goto reschedule;
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ info->phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ if (link) {
+ info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
+ if (p)
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
+ else
+ printk(KERN_INFO "%s: link partner did not autonegotiate\n",
+ dev->name);
+ AX88190_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "axnet_cs");
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/*====================================================================*/
+
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ axnet_dev_t *info = PRIV(dev);
+ u16 *data = (u16 *)&rq->ifr_ifru;
+ kio_addr_t mii_addr = dev->base_addr + AXNET_MII_EEP;
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data[0] = info->phy_id;
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data[3] = mdio_read(mii_addr, data[0], data[1] & 0x1f);
+ return 0;
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(mii_addr, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ kio_addr_t nic_base = dev->base_addr;
+
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+}
+
+/*====================================================================*/
+
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ kio_addr_t nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+#ifdef PCMCIA_DEBUG
+ if ((ei_debug > 4) && (count != 4))
+ printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4);
+#endif
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+
+}
+
+/*====================================================================*/
+
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ kio_addr_t nic_base = dev->base_addr;
+
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4)
+ printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD);
+ outsw(nic_base + AXNET_DATAPORT, buf, count>>1);
+}
+
+static struct pcmcia_driver axnet_cs_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "axnet_cs",
+ },
+ .attach = axnet_attach,
+ .detach = axnet_detach,
+};
+
+static int __init init_axnet_cs(void)
+{
+ return pcmcia_register_driver(&axnet_cs_driver);
+}
+
+static void __exit exit_axnet_cs(void)
+{
+ pcmcia_unregister_driver(&axnet_cs_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_axnet_cs);
+module_exit(exit_axnet_cs);
+
+/*====================================================================*/
+
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+ Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
+ Paul Gortmaker : tweak ANK's above multicast changes a bit.
+ Paul Gortmaker : update packet statistics for v2.1.x
+ Alan Cox : support arbitary stupid port mappings on the
+ 68K Macintosh. Support >16bit I/O spaces
+ Paul Gortmaker : add kmod support for auto-loading of the 8390
+ module by all drivers that require it.
+ Alan Cox : Spinlocking work, added 'BUG_83C690'
+ Paul Gortmaker : Separate out Tx timeout code from Tx path.
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+static const char *version_8390 =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n";
+
+#include <linux/bitops.h>
+#include <asm/irq.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+
+#include <linux/etherdevice.h>
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct net_device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_tx_timeout(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct net_device *dev);
+static void do_set_multicast_list(struct net_device *dev);
+
+/*
+ * SMP and the 8390 setup.
+ *
+ * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ * a page register that controls bank and packet buffer access. We guard
+ * this with ei_local->page_lock. Nobody should assume or set the page other
+ * than zero when the lock is not held. Lock holders must restore page 0
+ * before unlocking. Even pure readers must take the lock to protect in
+ * page 0.
+ *
+ * To make life difficult the chip can also be very slow. We therefore can't
+ * just use spinlocks. For the longer lockups we disable the irq the device
+ * sits on and hold the lock. We must hold the lock because there is a dual
+ * processor case other than interrupts (get stats/set multicast list in
+ * parallel with each other and transmit).
+ *
+ * Note: in theory we can just disable the irq on the card _but_ there is
+ * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ * enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ * Finally by special arrangement for the purpose of being generally
+ * annoying the transmit function is called bh atomic. That places
+ * restrictions on the user context callers as disable_irq won't save
+ * them.
+ */
+
+/**
+ * ax_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+static int ax_open(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+#ifdef HAVE_TX_TIMEOUT
+ /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
+ wrapper that does e.g. media check & then calls ei_tx_timeout. */
+ if (dev->tx_timeout == NULL)
+ dev->tx_timeout = ei_tx_timeout;
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /*
+ * Grab the page lock so we own the register set, then call
+ * the init function.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ AX88190_init(dev, 1);
+ /* Set the flag before we drop the lock, That way the IRQ arrives
+ after its set and we get no silly warnings */
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+#define dev_lock(dev) (((struct ei_device *)netdev_priv(dev))->page_lock)
+
+/**
+ * ax_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ax_open(). Only used when "ifconfig <devname> down" is done.
+ */
+int ax_close(struct net_device *dev)
+{
+ unsigned long flags;
+
+ /*
+ * Hold the page lock during close
+ */
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ AX88190_init(dev, 0);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * ei_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+void ei_tx_timeout(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ unsigned long flags;
+
+ ei_local->stat.tx_errors++;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ txsr = inb(e8390_base+EN0_TSR);
+ isr = inb(e8390_base+EN0_ISR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets)
+ {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Ugly but a reset can be slow, yet must be protected */
+
+ disable_irq_nosync(dev->irq);
+ spin_lock(&ei_local->page_lock);
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ AX88190_init(dev, 1);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int length, send_length, output_page;
+ unsigned long flags;
+ u8 packet[ETH_ZLEN];
+
+ netif_stop_queue(dev);
+
+ length = skb->len;
+
+ /* Mask interrupts from the ethercard.
+ SMP: We have to grab the lock here otherwise the IRQ handler
+ on another CPU can flip window and race the IRQ mask set. We end
+ up trashing the mcast filter not disabling irqs if we don't lock */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ outb_p(0x00, e8390_base + EN0_IMR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ /*
+ * Slow phase with lock held.
+ */
+
+ disable_irq_nosync(dev->irq);
+
+ spin_lock(&ei_local->page_lock);
+
+ ei_local->irqlock = 1;
+
+ send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0)
+ {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ }
+ else if (ei_local->tx2 == 0)
+ {
+ output_page = ei_local->tx_start_page + TX_PAGES/2;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ }
+ else
+ { /* We should never get here. */
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ netif_stop_queue(dev);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ ei_local->stat.tx_errors++;
+ return 1;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ if (length == skb->len)
+ ei_block_output(dev, length, skb->data, output_page);
+ else {
+ memset(packet, 0, ETH_ZLEN);
+ memcpy(packet, skb->data, skb->len);
+ ei_block_output(dev, length, packet, output_page);
+ }
+
+ if (! ei_local->txing)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page)
+ {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ }
+ else
+ {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ }
+ else ei_local->txqueue++;
+
+ if (ei_local->tx1 && ei_local->tx2)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+
+ dev_kfree_skb (skb);
+ ei_local->stat.tx_bytes += send_length;
+
+ return 0;
+}
+
+/**
+ * ax_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ * @regs: unused
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * necessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+static irqreturn_t ax_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ long e8390_base;
+ int interrupts, nr_serviced = 0, i;
+ struct ei_device *ei_local;
+ int handled = 0;
+
+ if (dev == NULL)
+ {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ e8390_base = dev->base_addr;
+ ei_local = (struct ei_device *) netdev_priv(dev);
+
+ /*
+ * Protect the irq test too.
+ */
+
+ spin_lock(&ei_local->page_lock);
+
+ if (ei_local->irqlock)
+ {
+#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+#endif
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_NONE;
+ }
+
+ if (ei_debug > 3)
+ printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
+ inb_p(e8390_base + EN0_ISR));
+
+ outb_p(0x00, e8390_base + EN0_ISR);
+ ei_local->irqlock = 1;
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE)
+ {
+ if (!netif_running(dev) || (interrupts == 0xff)) {
+ if (ei_debug > 1)
+ printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ interrupts = 0;
+ break;
+ }
+ handled = 1;
+
+ /* AX88190 bug fix. */
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ for (i = 0; i < 10; i++) {
+ if (!(inb(e8390_base + EN0_ISR) & interrupts))
+ break;
+ outb_p(0, e8390_base + EN0_ISR);
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ }
+ if (interrupts & ENISR_OVER)
+ ei_rx_overrun(dev);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
+ {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX)
+ ei_tx_intr(dev);
+ else if (interrupts & ENISR_TX_ERR)
+ ei_tx_err(dev);
+
+ if (interrupts & ENISR_COUNTERS)
+ {
+ ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ }
+ }
+
+ if (interrupts && ei_debug)
+ {
+ handled = 1;
+ if (nr_serviced >= MAX_SERVICE)
+ {
+ /* 0xFF is valid for a card removal */
+ if(interrupts!=0xFF)
+ printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_RETVAL(handled);
+}
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned char txsr = inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+ printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ if (txsr & ENTSR_ABT)
+ printk("excess-collisions ");
+ if (txsr & ENTSR_ND)
+ printk("non-deferral ");
+ if (txsr & ENTSR_CRS)
+ printk("lost-carrier ");
+ if (txsr & ENTSR_FU)
+ printk("FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ printk("lost-heartbeat ");
+ printk("\n");
+#endif
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int status = inb(e8390_base + EN0_TSR);
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+
+ if (ei_local->tx1 < 0)
+ {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ if (ei_local->tx2 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ }
+ else ei_local->lasttx = 20, ei_local->txing = 0;
+ }
+ else if (ei_local->tx2 < 0)
+ {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ if (ei_local->tx1 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ }
+ else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ }
+// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
+// dev->name, ei_local->lasttx);
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT)
+ {
+ ei_local->stat.tx_aborted_errors++;
+ ei_local->stat.collisions += 16;
+ }
+ if (status & ENTSR_CRS)
+ ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU)
+ ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH)
+ ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC)
+ ei_local->stat.tx_window_errors++;
+ }
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+
+ while (++rx_pkt_count < 10)
+ {
+ int pkt_len, pkt_stat;
+
+ /* Get the rx page (incoming packet pointer). */
+ rxing_page = inb_p(e8390_base + EN1_CURPAG -1);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.)
+
+ Keep quiet if it looks like a card removal. One problem here
+ is that some clones crash in roughly the same way.
+ */
+ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
+ printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+ pkt_stat = rx_frame.status;
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ if (pkt_len < 60 || pkt_len > 1518)
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ ei_local->stat.rx_length_errors++;
+ }
+ else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ ei_local->stat.rx_packets++;
+ ei_local->stat.rx_bytes += pkt_len;
+ if (pkt_stat & ENRSR_PHY)
+ ei_local->stat.multicast++;
+ }
+ }
+ else
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ ei_local->stat.rx_errors++;
+ /* NB: The NIC counts CRC, frame and missed errors. */
+ if (pkt_stat & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ return;
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ long e8390_base = dev->base_addr;
+ unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+
+ mdelay(10);
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ outb_p(0x00, e8390_base+EN0_RCNTLO);
+ outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+
+ if (was_txing)
+ {
+ unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed)
+ must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR);
+ if (must_resend)
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ * Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ unsigned long flags;
+
+ /* If the card is stopped, just return the present stats. */
+ if (!netif_running(dev))
+ return &ei_local->stat;
+
+ spin_lock_irqsave(&ei_local->page_lock,flags);
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return &ei_local->stat;
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ * Set or clear the multicast filter for this adaptor. May be called
+ * from a BH in 2.1.x. Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
+ else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
+ else
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
+}
+
+/*
+ * Called without lock held. This is invoked from user context and may
+ * be parallel to just about everything else. Its also fairly quick and
+ * not called too often. Must protect against both bh and irq users
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ do_set_multicast_list(dev);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+}
+
+/**
+ * axdev_setup - init rest of 8390 device struct
+ * @dev: network device structure to init
+ *
+ * Initialize the rest of the 8390 device structure. Do NOT __init
+ * this, as it is used by 8390 based modular drivers too.
+ */
+
+static void axdev_setup(struct net_device *dev)
+{
+ struct ei_device *ei_local;
+ if (ei_debug > 1)
+ printk(version_8390);
+
+ SET_MODULE_OWNER(dev);
+
+
+ ei_local = (struct ei_device *)netdev_priv(dev);
+ spin_lock_init(&ei_local->page_lock);
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+}
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * AX88190_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean. non-zero value to initiate chip processing
+ *
+ * Must be called with lock held.
+ */
+
+static void AX88190_init(struct net_device *dev, int startp)
+{
+ axnet_dev_t *info = PRIV(dev);
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ int i;
+ int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+
+ if(sizeof(struct e8390_pkt_hdr)!=4)
+ panic("8390.c: header struct mispacked\n");
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers. */
+
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+ for(i = 0; i < 6; i++)
+ {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+ if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
+ printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ }
+ /*
+ * Initialize the multicast list to accept-all. If we enable multicast
+ * the higher levels can do the filtering.
+ */
+ for (i = 0; i < 8; i++)
+ outb_p(0xff, e8390_base + EN1_MULT + i);
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ netif_start_queue(dev);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+
+ if (startp)
+ {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+ outb_p(E8390_TXCONFIG | info->duplex_flag,
+ e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */
+ do_set_multicast_list(dev); /* (re)load the mcast table */
+ }
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+ Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+
+ if (inb_p(e8390_base) & E8390_TRANS)
+ {
+ printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
new file mode 100644
index 000000000000..4294e1e3f156
--- /dev/null
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -0,0 +1,509 @@
+/*
+ * Linux ARCnet driver - COM20020 PCMCIA support
+ *
+ * Written 1994-1999 by Avery Pennarun,
+ * based on an ISA version by David Woodhouse.
+ * Derived from ibmtr_cs.c by Steve Kipisz (pcmcia-cs 3.1.4)
+ * which was derived from pcnet_cs.c by David Hinds.
+ * Some additional portions derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ * Changes:
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
+ * - reorganize kmallocs in com20020_attach, checking all for failure
+ * and releasing the previous allocations if one fails
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/arcdevice.h>
+#include <linux/com20020.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
+
+#ifdef PCMCIA_DEBUG
+
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+
+static void regdump(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int count;
+
+ printk("com20020 register dump:\n");
+ for (count = ioaddr; count < ioaddr + 16; count++)
+ {
+ if (!(count % 16))
+ printk("\n%04X: ", count);
+ printk("%02X ", inb(count));
+ }
+ printk("\n");
+
+ printk("buffer0 dump:\n");
+ /* set up the address register */
+ count = 0;
+ outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
+ outb(count & 0xff, _ADDR_LO);
+
+ for (count = 0; count < 256+32; count++)
+ {
+ if (!(count % 16))
+ printk("\n%04X: ", count);
+
+ /* copy the data */
+ printk("%02X ", inb(_MEMDATA));
+ }
+ printk("\n");
+}
+
+#else
+
+#define DEBUG(n, args...) do { } while (0)
+static inline void regdump(struct net_device *dev) { }
+
+#endif
+
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+static int node;
+static int timeout = 3;
+static int backplane;
+static int clockp;
+static int clockm;
+
+module_param(node, int, 0);
+module_param(timeout, int, 0);
+module_param(backplane, int, 0);
+module_param(clockp, int, 0);
+module_param(clockm, int, 0);
+
+MODULE_LICENSE("GPL");
+
+/*====================================================================*/
+
+static void com20020_config(dev_link_t *link);
+static void com20020_release(dev_link_t *link);
+static int com20020_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static dev_info_t dev_info = "com20020_cs";
+
+static dev_link_t *com20020_attach(void);
+static void com20020_detach(dev_link_t *);
+
+static dev_link_t *dev_list;
+
+/*====================================================================*/
+
+typedef struct com20020_dev_t {
+ struct net_device *dev;
+ dev_node_t node;
+} com20020_dev_t;
+
+/*======================================================================
+
+ com20020_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *com20020_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ com20020_dev_t *info;
+ struct net_device *dev;
+ int ret;
+ struct arcnet_local *lp;
+
+ DEBUG(0, "com20020_attach()\n");
+
+ /* Create new network device */
+ link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
+ if (!link)
+ return NULL;
+
+ info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
+ if (!info)
+ goto fail_alloc_info;
+
+ dev = alloc_arcdev("");
+ if (!dev)
+ goto fail_alloc_dev;
+
+ memset(info, 0, sizeof(struct com20020_dev_t));
+ memset(link, 0, sizeof(struct dev_link_t));
+ lp = dev->priv;
+ lp->timeout = timeout;
+ lp->backplane = backplane;
+ lp->clockp = clockp;
+ lp->clockm = clockm & 3;
+ lp->hw.owner = THIS_MODULE;
+
+ /* fill in our module parameters as defaults */
+ dev->dev_addr[0] = node;
+
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts1 = 16;
+ link->io.IOAddrLines = 16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.Present = PRESENT_OPTION;
+
+
+ link->irq.Instance = info->dev = dev;
+ link->priv = info;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &com20020_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ com20020_detach(link);
+ return NULL;
+ }
+
+ return link;
+
+fail_alloc_dev:
+ kfree(info);
+fail_alloc_info:
+ kfree(link);
+ return NULL;
+} /* com20020_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void com20020_detach(dev_link_t *link)
+{
+ struct com20020_dev_t *info = link->priv;
+ dev_link_t **linkp;
+ struct net_device *dev;
+
+ DEBUG(1,"detach...\n");
+
+ DEBUG(0, "com20020_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ dev = info->dev;
+
+ if (link->dev) {
+ DEBUG(1,"unregister...\n");
+
+ unregister_netdev(dev);
+
+ /*
+ * this is necessary because we register our IRQ separately
+ * from card services.
+ */
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ }
+
+ if (link->state & DEV_CONFIG)
+ com20020_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ DEBUG(1,"unlinking...\n");
+ *linkp = link->next;
+ if (link->priv)
+ {
+ dev = info->dev;
+ if (dev)
+ {
+ DEBUG(1,"kfree...\n");
+ free_netdev(dev);
+ }
+ DEBUG(1,"kfree2...\n");
+ kfree(info);
+ }
+ DEBUG(1,"kfree3...\n");
+ kfree(link);
+
+} /* com20020_detach */
+
+/*======================================================================
+
+ com20020_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void com20020_config(dev_link_t *link)
+{
+ struct arcnet_local *lp;
+ client_handle_t handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ com20020_dev_t *info;
+ struct net_device *dev;
+ int i, last_ret, last_fn;
+ u_char buf[64];
+ int ioaddr;
+
+ handle = link->handle;
+ info = link->priv;
+ dev = info->dev;
+
+ DEBUG(1,"config...\n");
+
+ DEBUG(0, "com20020_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1);
+ i = !CS_SUCCESS;
+ if (!link->io.BasePort1)
+ {
+ for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10)
+ {
+ link->io.BasePort1 = ioaddr;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS)
+ break;
+ }
+ }
+ else
+ i = pcmcia_request_io(link->handle, &link->io);
+
+ if (i != CS_SUCCESS)
+ {
+ DEBUG(1,"arcnet: requestIO failed totally!\n");
+ goto failed;
+ }
+
+ ioaddr = dev->base_addr = link->io.BasePort1;
+ DEBUG(1,"arcnet: got ioaddr %Xh\n", ioaddr);
+
+ DEBUG(1,"arcnet: request IRQ %d (%Xh/%Xh)\n",
+ link->irq.AssignedIRQ,
+ link->irq.IRQInfo1, link->irq.IRQInfo2);
+ i = pcmcia_request_irq(link->handle, &link->irq);
+ if (i != CS_SUCCESS)
+ {
+ DEBUG(1,"arcnet: requestIRQ failed totally!\n");
+ goto failed;
+ }
+
+ dev->irq = link->irq.AssignedIRQ;
+
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+
+ if (com20020_check(dev))
+ {
+ regdump(dev);
+ goto failed;
+ }
+
+ lp = dev->priv;
+ lp->card_name = "PCMCIA COM20020";
+ lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
+
+ link->dev = &info->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ i = com20020_found(dev, 0); /* calls register_netdev */
+
+ if (i != 0) {
+ DEBUG(1,KERN_NOTICE "com20020_cs: com20020_found() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(info->node.dev_name, dev->name);
+
+ DEBUG(1,KERN_INFO "%s: port %#3lx, irq %d\n",
+ dev->name, dev->base_addr, dev->irq);
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ DEBUG(1,"com20020_config failed...\n");
+ com20020_release(link);
+} /* com20020_config */
+
+/*======================================================================
+
+ After a card is removed, com20020_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void com20020_release(dev_link_t *link)
+{
+
+ DEBUG(1,"release...\n");
+
+ DEBUG(0, "com20020_release(0x%p)\n", link);
+
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~(DEV_CONFIG | DEV_RELEASE_PENDING);
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int com20020_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ com20020_dev_t *info = link->priv;
+ struct net_device *dev = info->dev;
+
+ DEBUG(1, "com20020_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT;
+ com20020_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open) {
+ netif_device_detach(dev);
+ }
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ int ioaddr = dev->base_addr;
+ struct arcnet_local *lp = dev->priv;
+ ARCRESET;
+ }
+ }
+ break;
+ }
+ return 0;
+} /* com20020_event */
+
+
+
+static struct pcmcia_driver com20020_cs_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "com20020_cs",
+ },
+ .attach = com20020_attach,
+ .detach = com20020_detach,
+};
+
+static int __init init_com20020_cs(void)
+{
+ return pcmcia_register_driver(&com20020_cs_driver);
+}
+
+static void __exit exit_com20020_cs(void)
+{
+ pcmcia_unregister_driver(&com20020_cs_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_com20020_cs);
+module_exit(exit_com20020_cs);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
new file mode 100644
index 000000000000..0424865e8094
--- /dev/null
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -0,0 +1,1286 @@
+/*======================================================================
+ fmvj18x_cs.c 2.8 2002/03/23
+
+ A fmvj18x (and its compatibles) PCMCIA client driver
+
+ Contributed by Shingo Fujimoto, shingo@flab.fujitsu.co.jp
+
+ TDK LAK-CD021 and CONTEC C-NET(PC)C support added by
+ Nobuhiro Katayama, kata-n@po.iijnet.or.jp
+
+ The PCMCIA client code is based on code written by David Hinds.
+ Network code is based on the "FMV-18x driver" by Yutaka TAMIYA
+ but is actually largely Donald Becker's AT1700 driver, which
+ carries the following attribution:
+
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+======================================================================*/
+
+#define DRV_NAME "fmvj18x_cs"
+#define DRV_VERSION "2.8"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* SRAM configuration */
+/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */
+INT_MODULE_PARM(sram_config, 0);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version = DRV_NAME ".c " DRV_VERSION " 2002/03/23";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+/*
+ PCMCIA event handlers
+ */
+static void fmvj18x_config(dev_link_t *link);
+static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id);
+static int fmvj18x_setup_mfc(dev_link_t *link);
+static void fmvj18x_release(dev_link_t *link);
+static int fmvj18x_event(event_t event, int priority,
+ event_callback_args_t *args);
+static dev_link_t *fmvj18x_attach(void);
+static void fmvj18x_detach(dev_link_t *);
+
+/*
+ LAN controller(MBH86960A) specific routines
+ */
+static int fjn_config(struct net_device *dev, struct ifmap *map);
+static int fjn_open(struct net_device *dev);
+static int fjn_close(struct net_device *dev);
+static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t fjn_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void fjn_rx(struct net_device *dev);
+static void fjn_reset(struct net_device *dev);
+static struct net_device_stats *fjn_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void fjn_tx_timeout(struct net_device *dev);
+static struct ethtool_ops netdev_ethtool_ops;
+
+static dev_info_t dev_info = "fmvj18x_cs";
+static dev_link_t *dev_list;
+
+/*
+ card type
+ */
+typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
+ XXX10304
+} cardtype_t;
+
+/*
+ driver specific data structure
+*/
+typedef struct local_info_t {
+ dev_link_t link;
+ dev_node_t node;
+ struct net_device_stats stats;
+ long open_time;
+ uint tx_started:1;
+ uint tx_queue;
+ u_short tx_queue_len;
+ cardtype_t cardtype;
+ u_short sent;
+ u_char mc_filter[8];
+} local_info_t;
+
+#define MC_FILTERBREAK 64
+
+/*====================================================================*/
+/*
+ ioport offset from the base address
+ */
+#define TX_STATUS 0 /* transmit status register */
+#define RX_STATUS 1 /* receive status register */
+#define TX_INTR 2 /* transmit interrupt mask register */
+#define RX_INTR 3 /* receive interrupt mask register */
+#define TX_MODE 4 /* transmit mode register */
+#define RX_MODE 5 /* receive mode register */
+#define CONFIG_0 6 /* configuration register 0 */
+#define CONFIG_1 7 /* configuration register 1 */
+
+#define NODE_ID 8 /* node ID register (bank 0) */
+#define MAR_ADR 8 /* multicast address registers (bank 1) */
+
+#define DATAPORT 8 /* buffer mem port registers (bank 2) */
+#define TX_START 10 /* transmit start register */
+#define COL_CTRL 11 /* 16 collision control register */
+#define BMPR12 12 /* reserved */
+#define BMPR13 13 /* reserved */
+#define RX_SKIP 14 /* skip received packet register */
+
+#define LAN_CTRL 16 /* LAN card control register */
+
+#define MAC_ID 0x1a /* hardware address */
+#define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */
+
+/*
+ control bits
+ */
+#define ENA_TMT_OK 0x80
+#define ENA_TMT_REC 0x20
+#define ENA_COL 0x04
+#define ENA_16_COL 0x02
+#define ENA_TBUS_ERR 0x01
+
+#define ENA_PKT_RDY 0x80
+#define ENA_BUS_ERR 0x40
+#define ENA_LEN_ERR 0x08
+#define ENA_ALG_ERR 0x04
+#define ENA_CRC_ERR 0x02
+#define ENA_OVR_FLO 0x01
+
+/* flags */
+#define F_TMT_RDY 0x80 /* can accept new packet */
+#define F_NET_BSY 0x40 /* carrier is detected */
+#define F_TMT_OK 0x20 /* send packet successfully */
+#define F_SRT_PKT 0x10 /* short packet error */
+#define F_COL_ERR 0x04 /* collision error */
+#define F_16_COL 0x02 /* 16 collision error */
+#define F_TBUS_ERR 0x01 /* bus read error */
+
+#define F_PKT_RDY 0x80 /* packet(s) in buffer */
+#define F_BUS_ERR 0x40 /* bus read error */
+#define F_LEN_ERR 0x08 /* short packet */
+#define F_ALG_ERR 0x04 /* frame error */
+#define F_CRC_ERR 0x02 /* CRC error */
+#define F_OVR_FLO 0x01 /* overflow error */
+
+#define F_BUF_EMP 0x40 /* receive buffer is empty */
+
+#define F_SKP_PKT 0x05 /* drop packet in buffer */
+
+/* default bitmaps */
+#define D_TX_INTR ( ENA_TMT_OK )
+#define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \
+ | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO )
+#define TX_STAT_M ( F_TMT_RDY )
+#define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \
+ | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO )
+
+/* commands */
+#define D_TX_MODE 0x06 /* no tests, detect carrier */
+#define ID_MATCHED 0x02 /* (RX_MODE) */
+#define RECV_ALL 0x03 /* (RX_MODE) */
+#define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */
+#define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */
+#define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */
+#define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */
+#define BANK_0 0xa0 /* bank 0 (CONFIG_1) */
+#define BANK_1 0xa4 /* bank 1 (CONFIG_1) */
+#define BANK_2 0xa8 /* bank 2 (CONFIG_1) */
+#define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */
+#define DO_TX 0x80 /* do transmit packet */
+#define SEND_PKT 0x81 /* send a packet */
+#define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */
+#define MANU_MODE 0x03 /* Stop and skip packet on 16 col */
+#define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */
+#define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */
+#define INTR_OFF 0x0d /* LAN controller ignores interrupts */
+#define INTR_ON 0x1d /* LAN controller will catch interrupts */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+#define BANK_0U 0x20 /* bank 0 (CONFIG_1) */
+#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */
+#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */
+
+static dev_link_t *fmvj18x_attach(void)
+{
+ local_info_t *lp;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int ret;
+
+ DEBUG(0, "fmvj18x_attach()\n");
+
+ /* Make up a FMVJ18x specific data structure */
+ dev = alloc_etherdev(sizeof(local_info_t));
+ if (!dev)
+ return NULL;
+ lp = netdev_priv(dev);
+ link = &lp->link;
+ link->priv = dev;
+
+ /* The io structure describes IO port mapping */
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 5;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &fjn_interrupt;
+ link->irq.Instance = dev;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* The FMVJ18x specific entries in the device structure. */
+ SET_MODULE_OWNER(dev);
+ dev->hard_start_xmit = &fjn_start_xmit;
+ dev->set_config = &fjn_config;
+ dev->get_stats = &fjn_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->open = &fjn_open;
+ dev->stop = &fjn_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = fjn_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &fmvj18x_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ fmvj18x_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* fmvj18x_attach */
+
+/*====================================================================*/
+
+static void fmvj18x_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "fmvj18x_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ if (link->state & DEV_CONFIG)
+ fmvj18x_release(link);
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* fmvj18x_detach */
+
+/*====================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static int mfc_try_io_port(dev_link_t *link)
+{
+ int i, ret;
+ static kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+
+ for (i = 0; i < 5; i++) {
+ link->io.BasePort2 = serial_base[i];
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (link->io.BasePort2 == 0) {
+ link->io.NumPorts2 = 0;
+ printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n");
+ }
+ ret = pcmcia_request_io(link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+}
+
+static int ungermann_try_io_port(dev_link_t *link)
+{
+ int ret;
+ kio_addr_t ioaddr;
+ /*
+ Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360
+ 0x380,0x3c0 only for ioport.
+ */
+ for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
+ link->io.BasePort1 = ioaddr;
+ ret = pcmcia_request_io(link->handle, &link->io);
+ if (ret == CS_SUCCESS) {
+ /* calculate ConfigIndex value */
+ link->conf.ConfigIndex =
+ ((link->io.BasePort1 & 0x0f0) >> 3) | 0x22;
+ return ret;
+ }
+ }
+ return ret; /* RequestIO failed */
+}
+
+static void fmvj18x_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ local_info_t *lp = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ int i, last_fn, last_ret, ret;
+ kio_addr_t ioaddr;
+ cardtype_t cardtype;
+ char *card_name = "unknown";
+ u_char *node_id;
+
+ DEBUG(0, "fmvj18x_config(0x%p)\n", link);
+
+ /*
+ This reads the card's CONFIG tuple to find its configuration
+ registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ tuple.TupleData = (u_char *)buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ tuple.TupleOffset = 0;
+ if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) {
+ /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigIndex = parse.cftable_entry.index;
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ else
+ buf[0] = 0xffff;
+ switch (le16_to_cpu(buf[0])) {
+ case MANFID_TDK:
+ cardtype = TDK;
+ if (le16_to_cpu(buf[1]) == PRODID_TDK_CF010) {
+ cs_status_t status;
+ pcmcia_get_status(handle, &status);
+ if (status.CardState & CS_EVENT_3VCARD)
+ link->conf.Vcc = 33; /* inserted in 3.3V slot */
+ } else if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410) {
+ /* MultiFunction Card */
+ link->conf.ConfigBase = 0x800;
+ link->conf.ConfigIndex = 0x47;
+ link->io.NumPorts2 = 8;
+ }
+ break;
+ case MANFID_CONTEC:
+ cardtype = CONTEC;
+ break;
+ case MANFID_FUJITSU:
+ if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10302)
+ /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
+ but these are MBH10304 based card. */
+ cardtype = MBH10304;
+ else if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304)
+ cardtype = MBH10304;
+ else
+ cardtype = LA501;
+ break;
+ default:
+ cardtype = MBH10304;
+ }
+ } else {
+ /* old type card */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ else
+ buf[0] = 0xffff;
+ switch (le16_to_cpu(buf[0])) {
+ case MANFID_FUJITSU:
+ if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304) {
+ cardtype = XXX10304; /* MBH10304 with buggy CIS */
+ link->conf.ConfigIndex = 0x20;
+ } else {
+ cardtype = MBH10302; /* NextCom NC5310, etc. */
+ link->conf.ConfigIndex = 1;
+ }
+ break;
+ case MANFID_UNGERMANN:
+ cardtype = UNGERMANN;
+ break;
+ default:
+ cardtype = MBH10302;
+ link->conf.ConfigIndex = 1;
+ }
+ }
+
+ if (link->io.NumPorts2 != 0) {
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ ret = mfc_try_io_port(link);
+ if (ret != CS_SUCCESS) goto cs_failed;
+ } else if (cardtype == UNGERMANN) {
+ ret = ungermann_try_io_port(link);
+ if (ret != CS_SUCCESS) goto cs_failed;
+ } else {
+ CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io));
+ }
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+
+ if (link->io.BasePort2 != 0)
+ fmvj18x_setup_mfc(link);
+
+ ioaddr = dev->base_addr;
+
+ /* Reset controller */
+ if (sram_config == 0)
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set hardware address */
+ switch (cardtype) {
+ case MBH10304:
+ case TDK:
+ case LA501:
+ case CONTEC:
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ if (cardtype == MBH10304) {
+ /* MBH10304's CIS_FUNCE is corrupted */
+ node_id = &(tuple.TupleData[5]);
+ card_name = "FMV-J182";
+ } else {
+ while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) {
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ }
+ node_id = &(tuple.TupleData[2]);
+ if( cardtype == TDK ) {
+ card_name = "TDK LAK-CD021";
+ } else if( cardtype == LA501 ) {
+ card_name = "LA501";
+ } else {
+ card_name = "C-NET(PC)C";
+ }
+ }
+ /* Read MACID from CIS */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = node_id[i];
+ break;
+ case UNGERMANN:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ card_name = "Access/CARD";
+ break;
+ case XXX10304:
+ /* Read MACID from Buggy CIS */
+ if (fmvj18x_get_hwinfo(link, tuple.TupleData) == -1) {
+ printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n");
+ goto failed;
+ }
+ for (i = 0 ; i < 6; i++) {
+ dev->dev_addr[i] = tuple.TupleData[i];
+ }
+ card_name = "FMV-J182";
+ break;
+ case MBH10302:
+ default:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ card_name = "FMV-J181";
+ break;
+ }
+
+ lp->cardtype = cardtype;
+ link->dev = &lp->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(lp->node.dev_name, dev->name);
+
+ /* print current configuration */
+ printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, hw_addr ",
+ dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
+ dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ return;
+
+cs_failed:
+ /* All Card Services errors end up here */
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ fmvj18x_release(link);
+ link->state &= ~DEV_CONFIG_PENDING;
+
+} /* fmvj18x_config */
+/*====================================================================*/
+
+static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
+{
+ win_req_t req;
+ memreq_t mem;
+ u_char __iomem *base;
+ int i, j;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ i = pcmcia_request_window(&link->handle, &req, &link->win);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return -1;
+ }
+
+ base = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ mem.CardOffset = 0;
+ pcmcia_map_mem_page(link->win, &mem);
+
+ /*
+ * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
+ * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff
+ * 'xx' is garbage.
+ * 'yy' is MAC address.
+ */
+ for (i = 0; i < 0x200; i++) {
+ if (readb(base+i*2) == 0x22) {
+ if (readb(base+(i-1)*2) == 0xff
+ && readb(base+(i+5)*2) == 0x04
+ && readb(base+(i+6)*2) == 0x06
+ && readb(base+(i+13)*2) == 0xff)
+ break;
+ }
+ }
+
+ if (i != 0x200) {
+ for (j = 0 ; j < 6; j++,i++) {
+ node_id[j] = readb(base+(i+7)*2);
+ }
+ }
+
+ iounmap(base);
+ j = pcmcia_release_window(link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return (i != 0x200) ? 0 : -1;
+
+} /* fmvj18x_get_hwinfo */
+/*====================================================================*/
+
+static int fmvj18x_setup_mfc(dev_link_t *link)
+{
+ win_req_t req;
+ memreq_t mem;
+ u_char __iomem *base;
+ int i, j;
+ struct net_device *dev = link->priv;
+ kio_addr_t ioaddr;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ i = pcmcia_request_window(&link->handle, &req, &link->win);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return -1;
+ }
+
+ base = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ mem.CardOffset = 0;
+ pcmcia_map_mem_page(link->win, &mem);
+
+ ioaddr = dev->base_addr;
+ writeb(0x47, base+0x800); /* Config Option Register of LAN */
+ writeb(0x0, base+0x802); /* Config and Status Register */
+
+ writeb(ioaddr & 0xff, base+0x80a); /* I/O Base(Low) of LAN */
+ writeb((ioaddr >> 8) & 0xff, base+0x80c); /* I/O Base(High) of LAN */
+
+ writeb(0x45, base+0x820); /* Config Option Register of Modem */
+ writeb(0x8, base+0x822); /* Config and Status Register */
+
+ iounmap(base);
+ j = pcmcia_release_window(link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return 0;
+
+}
+/*====================================================================*/
+
+static void fmvj18x_release(dev_link_t *link)
+{
+
+ DEBUG(0, "fmvj18x_release(0x%p)\n", link);
+
+ /* Don't bother checking to see if these succeed or not */
+ pcmcia_release_window(link->win);
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*====================================================================*/
+
+static int fmvj18x_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(1, "fmvj18x_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ fmvj18x_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ fjn_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* fmvj18x_event */
+
+static struct pcmcia_driver fmvj18x_cs_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "fmvj18x_cs",
+ },
+ .attach = fmvj18x_attach,
+ .detach = fmvj18x_detach,
+};
+
+static int __init init_fmvj18x_cs(void)
+{
+ return pcmcia_register_driver(&fmvj18x_cs_driver);
+}
+
+static void __exit exit_fmvj18x_cs(void)
+{
+ pcmcia_unregister_driver(&fmvj18x_cs_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_fmvj18x_cs);
+module_exit(exit_fmvj18x_cs);
+
+/*====================================================================*/
+
+static irqreturn_t fjn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ local_info_t *lp = netdev_priv(dev);
+ kio_addr_t ioaddr;
+ unsigned short tx_stat, rx_stat;
+
+ if (lp == NULL) {
+ printk(KERN_NOTICE "fjn_interrupt(): irq %d for "
+ "unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ ioaddr = dev->base_addr;
+
+ /* avoid multiple interrupts */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ /* get status */
+ tx_stat = inb(ioaddr + TX_STATUS);
+ rx_stat = inb(ioaddr + RX_STATUS);
+
+ /* clear status */
+ outb(tx_stat, ioaddr + TX_STATUS);
+ outb(rx_stat, ioaddr + RX_STATUS);
+
+ DEBUG(4, "%s: interrupt, rx_status %02x.\n", dev->name, rx_stat);
+ DEBUG(4, " tx_status %02x.\n", tx_stat);
+
+ if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ /* there is packet(s) in rx buffer */
+ fjn_rx(dev);
+ }
+ if (tx_stat & F_TMT_RDY) {
+ lp->stats.tx_packets += lp->sent ;
+ lp->sent = 0 ;
+ if (lp->tx_queue) {
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ } else {
+ lp->tx_started = 0;
+ }
+ netif_wake_queue(dev);
+ }
+ DEBUG(4, "%s: exiting interrupt,\n", dev->name);
+ DEBUG(4, " tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat);
+
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+ return IRQ_HANDLED;
+
+} /* fjn_interrupt */
+
+/*====================================================================*/
+
+static void fjn_tx_timeout(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n",
+ dev->name, htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & F_TMT_RDY
+ ? "IRQ conflict" : "network cable problem");
+ printk(KERN_NOTICE "%s: timeout registers: %04x %04x %04x "
+ "%04x %04x %04x %04x %04x.\n",
+ dev->name, htons(inw(ioaddr + 0)),
+ htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
+ htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
+ htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
+ htons(inw(ioaddr +14)));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ local_irq_disable();
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->sent = 0;
+ lp->open_time = jiffies;
+ local_irq_enable();
+ netif_wake_queue(dev);
+}
+
+static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ short length = skb->len;
+
+ if (length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
+ netif_stop_queue(dev);
+
+ {
+ unsigned char *buf = skb->data;
+
+ if (length > ETH_FRAME_LEN) {
+ printk(KERN_NOTICE "%s: Attempting to send a large packet"
+ " (%d bytes).\n", dev->name, length);
+ return 1;
+ }
+
+ DEBUG(4, "%s: Transmitting a packet of length %lu.\n",
+ dev->name, (unsigned long)skb->len);
+ lp->stats.tx_bytes += skb->len;
+
+ /* Disable both interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += ((length+3) & ~1);
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ netif_start_queue(dev);
+ } else {
+ if( sram_config == 0 ) {
+ if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ } else {
+ if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) &&
+ lp->tx_queue < 127 )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ }
+ }
+
+ /* Re-enable interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+ }
+ dev_kfree_skb (skb);
+
+ return 0;
+} /* fjn_start_xmit */
+
+/*====================================================================*/
+
+static void fjn_reset(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int i;
+
+ DEBUG(4, "fjn_reset(%s) called.\n",dev->name);
+
+ /* Reset controller */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set Tx modes */
+ outb(D_TX_MODE, ioaddr + TX_MODE);
+ /* set Rx modes */
+ outb(ID_MATCHED, ioaddr + RX_MODE);
+
+ /* Set hardware address */
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + NODE_ID + i);
+
+ /* Switch to bank 1 */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_1, ioaddr + CONFIG_1);
+ else
+ outb(BANK_1U, ioaddr + CONFIG_1);
+
+ /* set the multicast table to accept none. */
+ for (i = 0; i < 6; i++)
+ outb(0x00, ioaddr + MAR_ADR + i);
+
+ /* Switch to bank 2 (runtime mode) */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_2, ioaddr + CONFIG_1);
+ else
+ outb(BANK_2U, ioaddr + CONFIG_1);
+
+ /* set 16col ctrl bits */
+ if( lp->cardtype == TDK || lp->cardtype == CONTEC)
+ outb(TDK_AUTO_MODE, ioaddr + COL_CTRL);
+ else
+ outb(AUTO_MODE, ioaddr + COL_CTRL);
+
+ /* clear Reserved Regs */
+ outb(0x00, ioaddr + BMPR12);
+ outb(0x00, ioaddr + BMPR13);
+
+ /* reset Skip packet reg. */
+ outb(0x01, ioaddr + RX_SKIP);
+
+ /* Enable Tx and Rx */
+ if( sram_config == 0 )
+ outb(CONFIG0_DFL, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_DFL_1, ioaddr + CONFIG_0);
+
+ /* Init receive pointer ? */
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ /* Clear all status */
+ outb(0xff, ioaddr + TX_STATUS);
+ outb(0xff, ioaddr + RX_STATUS);
+
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ /* Turn on Rx interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+
+ /* Turn on interrupts from LAN card controller */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_ON, ioaddr + LAN_CTRL);
+} /* fjn_reset */
+
+/*====================================================================*/
+
+static void fjn_rx(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int boguscount = 10; /* 5 -> 10: by agy 19940922 */
+
+ DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n",
+ dev->name, inb(ioaddr + RX_STATUS));
+
+ while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ u_short status = inw(ioaddr + DATAPORT);
+
+ DEBUG(4, "%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ break;
+ }
+#endif
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & F_LEN_ERR) lp->stats.rx_length_errors++;
+ if (status & F_ALG_ERR) lp->stats.rx_frame_errors++;
+ if (status & F_CRC_ERR) lp->stats.rx_crc_errors++;
+ if (status & F_OVR_FLO) lp->stats.rx_over_errors++;
+ } else {
+ u_short pkt_len = inw(ioaddr + DATAPORT);
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk(KERN_NOTICE "%s: The FMV-18x claimed a very "
+ "large packet, size %d.\n", dev->name, pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping "
+ "packet (len %d).\n", dev->name, pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ skb_reserve(skb, 2);
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 5) {
+ int i;
+ printk(KERN_DEBUG "%s: Rxed packet of length %d: ",
+ dev->name, pkt_len);
+ for (i = 0; i < 14; i++)
+ printk(" %02x", skb->data[i]);
+ printk(".\n");
+ }
+#endif
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a netif_wake_queue() for us and will work on them
+ when we get to the bottom-half routine. */
+/*
+ if (lp->cardtype != TDK) {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP)
+ break;
+ (void)inw(ioaddr + DATAPORT); /+ dummy status read +/
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ }
+
+ if (i > 0)
+ DEBUG(5, "%s: Exint Rx packet with mode %02x after "
+ "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
+ }
+*/
+
+ return;
+} /* fjn_rx */
+
+/*====================================================================*/
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+#ifdef PCMCIA_DEBUG
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return pc_debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ pc_debug = level;
+}
+#endif /* PCMCIA_DEBUG */
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+#ifdef PCMCIA_DEBUG
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+#endif /* PCMCIA_DEBUG */
+};
+
+static int fjn_config(struct net_device *dev, struct ifmap *map){
+ return 0;
+}
+
+static int fjn_open(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ DEBUG(4, "fjn_open('%s').\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->open_time = jiffies;
+ netif_start_queue(dev);
+
+ return 0;
+} /* fjn_open */
+
+/*====================================================================*/
+
+static int fjn_close(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+ kio_addr_t ioaddr = dev->base_addr;
+
+ DEBUG(4, "fjn_close('%s').\n", dev->name);
+
+ lp->open_time = 0;
+ netif_stop_queue(dev);
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST ,ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(CHIP_OFF ,ioaddr + CONFIG_1);
+
+ /* Set the ethernet adaptor disable IRQ */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ link->open--;
+
+ return 0;
+} /* fjn_close */
+
+/*====================================================================*/
+
+static struct net_device_stats *fjn_get_stats(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ return &lp->stats;
+} /* fjn_get_stats */
+
+/*====================================================================*/
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ struct local_info_t *lp = netdev_priv(dev);
+ u_char mc_filter[8]; /* Multicast hash filter */
+ u_long flags;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (dev->mc_count > MC_FILTERBREAK
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ unsigned int bit =
+ ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ mc_filter[bit >> 3] |= (1 << bit);
+ }
+ }
+
+ local_irq_save(flags);
+ if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
+ int saved_bank = inb(ioaddr + CONFIG_1);
+ /* Switch to bank 1 and set the multicast table. */
+ outb(0xe4, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + 8 + i);
+ memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
+ outb(saved_bank, ioaddr + CONFIG_1);
+ }
+ local_irq_restore(flags);
+}
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
new file mode 100644
index 000000000000..3107ccfe8f3d
--- /dev/null
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -0,0 +1,535 @@
+/*======================================================================
+
+ A PCMCIA token-ring driver for IBM-based cards
+
+ This driver supports the IBM PCMCIA Token-Ring Card.
+ Written by Steve Kipisz, kipisz@vnet.ibm.com or
+ bungy@ibm.net
+
+ Written 1995,1996.
+
+ This code is based on pcnet_cs.c from David Hinds.
+
+ V2.2.0 February 1999 - Mike Phillips phillim@amtrak.com
+
+ Linux V2.2.x presented significant changes to the underlying
+ ibmtr.c code. Mainly the code became a lot more organized and
+ modular.
+
+ This caused the old PCMCIA Token Ring driver to give up and go
+ home early. Instead of just patching the old code to make it
+ work, the PCMCIA code has been streamlined, updated and possibly
+ improved.
+
+ This code now only contains code required for the Card Services.
+ All we do here is set the card up enough so that the real ibmtr.c
+ driver can find it and work with it properly.
+
+ i.e. We set up the io port, irq, mmio memory and shared ram
+ memory. This enables ibmtr_probe in ibmtr.c to find the card and
+ configure it as though it was a normal ISA and/or PnP card.
+
+ CHANGES
+
+ v2.2.5 April 1999 Mike Phillips (phillim@amtrak.com)
+ Obscure bug fix, required changed to ibmtr.c not ibmtr_cs.c
+
+ v2.2.7 May 1999 Mike Phillips (phillim@amtrak.com)
+ Updated to version 2.2.7 to match the first version of the kernel
+ that the modification to ibmtr.c were incorporated into.
+
+ v2.2.17 July 2000 Burt Silverman (burts@us.ibm.com)
+ Address translation feature of PCMCIA controller is usable so
+ memory windows can be placed in High memory (meaning above
+ 0xFFFFF.)
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+#include <linux/ibmtr.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#define PCMCIA
+#include "../tokenring/ibmtr.c"
+
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"ibmtr_cs.c 1.10 1996/01/06 05:19:00 (Steve Kipisz)\n"
+" 2.2.7 1999/05/03 12:00:00 (Mike Phillips)\n"
+" 2.4.2 2001/30/28 Midnight (Burt Silverman)\n";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+/* MMIO base address */
+static u_long mmiobase = 0xce000;
+
+/* SRAM base address */
+static u_long srambase = 0xd0000;
+
+/* SRAM size 8,16,32,64 */
+static u_long sramsize = 64;
+
+/* Ringspeed 4,16 */
+static int ringspeed = 16;
+
+module_param(mmiobase, ulong, 0);
+module_param(srambase, ulong, 0);
+module_param(sramsize, ulong, 0);
+module_param(ringspeed, int, 0);
+MODULE_LICENSE("GPL");
+
+/*====================================================================*/
+
+static void ibmtr_config(dev_link_t *link);
+static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase);
+static void ibmtr_release(dev_link_t *link);
+static int ibmtr_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static dev_info_t dev_info = "ibmtr_cs";
+
+static dev_link_t *ibmtr_attach(void);
+static void ibmtr_detach(dev_link_t *);
+
+static dev_link_t *dev_list;
+
+extern int ibmtr_probe_card(struct net_device *dev);
+extern irqreturn_t tok_interrupt (int irq, void *dev_id, struct pt_regs *regs);
+
+/*====================================================================*/
+
+typedef struct ibmtr_dev_t {
+ dev_link_t link;
+ struct net_device *dev;
+ dev_node_t node;
+ window_handle_t sram_win_handle;
+ struct tok_info *ti;
+} ibmtr_dev_t;
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "ibmtr_cs");
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/*======================================================================
+
+ ibmtr_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *ibmtr_attach(void)
+{
+ ibmtr_dev_t *info;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int ret;
+
+ DEBUG(0, "ibmtr_attach()\n");
+
+ /* Create new token-ring device */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) return NULL;
+ memset(info,0,sizeof(*info));
+ dev = alloc_trdev(sizeof(struct tok_info));
+ if (!dev) {
+ kfree(info);
+ return NULL;
+ }
+
+ link = &info->link;
+ link->priv = info;
+ info->ti = netdev_priv(dev);
+
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts1 = 4;
+ link->io.IOAddrLines = 16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &tok_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.Present = PRESENT_OPTION;
+
+ link->irq.Instance = info->dev = dev;
+
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &ibmtr_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ goto out_detach;
+ }
+
+out:
+ return link;
+
+out_detach:
+ ibmtr_detach(link);
+ link = NULL;
+ goto out;
+} /* ibmtr_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void ibmtr_detach(dev_link_t *link)
+{
+ struct ibmtr_dev_t *info = link->priv;
+ dev_link_t **linkp;
+ struct net_device *dev;
+
+ DEBUG(0, "ibmtr_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ dev = info->dev;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ {
+ struct tok_info *ti = netdev_priv(dev);
+ del_timer_sync(&(ti->tr_timer));
+ }
+ if (link->state & DEV_CONFIG)
+ ibmtr_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ free_netdev(dev);
+ kfree(info);
+} /* ibmtr_detach */
+
+/*======================================================================
+
+ ibmtr_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ token-ring device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void ibmtr_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ ibmtr_dev_t *info = link->priv;
+ struct net_device *dev = info->dev;
+ struct tok_info *ti = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ win_req_t req;
+ memreq_t mem;
+ int i, last_ret, last_fn;
+ u_char buf[64];
+
+ DEBUG(0, "ibmtr_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->conf.ConfigIndex = 0x61;
+
+ /* Determine if this is PRIMARY or ALTERNATE. */
+
+ /* Try PRIMARY card at 0xA20-0xA23 */
+ link->io.BasePort1 = 0xA20;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i != CS_SUCCESS) {
+ /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
+ link->io.BasePort1 = 0xA24;
+ CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io));
+ }
+ dev->base_addr = link->io.BasePort1;
+
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ dev->irq = link->irq.AssignedIRQ;
+ ti->irq = link->irq.AssignedIRQ;
+ ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
+
+ /* Allocate the MMIO memory window */
+ req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
+ req.Attributes |= WIN_USE_WAIT;
+ req.Base = 0;
+ req.Size = 0x2000;
+ req.AccessSpeed = 250;
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+
+ mem.CardOffset = mmiobase;
+ mem.Page = 0;
+ CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
+ ti->mmio = ioremap(req.Base, req.Size);
+
+ /* Allocate the SRAM memory window */
+ req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
+ req.Attributes |= WIN_USE_WAIT;
+ req.Base = 0;
+ req.Size = sramsize * 1024;
+ req.AccessSpeed = 250;
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &info->sram_win_handle));
+
+ mem.CardOffset = srambase;
+ mem.Page = 0;
+ CS_CHECK(MapMemPage, pcmcia_map_mem_page(info->sram_win_handle, &mem));
+
+ ti->sram_base = mem.CardOffset >> 12;
+ ti->sram_virt = ioremap(req.Base, req.Size);
+ ti->sram_phys = req.Base;
+
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+
+ /* Set up the Token-Ring Controller Configuration Register and
+ turn on the card. Check the "Local Area Network Credit Card
+ Adapters Technical Reference" SC30-3585 for this info. */
+ ibmtr_hw_setup(dev, mmiobase);
+
+ link->dev = &info->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ i = ibmtr_probe_card(dev);
+ if (i != 0) {
+ printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(info->node.dev_name, dev->name);
+
+ printk(KERN_INFO "%s: port %#3lx, irq %d,",
+ dev->name, dev->base_addr, dev->irq);
+ printk (" mmio %#5lx,", (u_long)ti->mmio);
+ printk (" sram %#5lx,", (u_long)ti->sram_base << 12);
+ printk ("\n" KERN_INFO " hwaddr=");
+ for (i = 0; i < TR_ALEN; i++)
+ printk("%02X", dev->dev_addr[i]);
+ printk("\n");
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ ibmtr_release(link);
+} /* ibmtr_config */
+
+/*======================================================================
+
+ After a card is removed, ibmtr_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void ibmtr_release(dev_link_t *link)
+{
+ ibmtr_dev_t *info = link->priv;
+ struct net_device *dev = info->dev;
+
+ DEBUG(0, "ibmtr_release(0x%p)\n", link);
+
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+ if (link->win) {
+ struct tok_info *ti = netdev_priv(dev);
+ iounmap(ti->mmio);
+ pcmcia_release_window(link->win);
+ pcmcia_release_window(info->sram_win_handle);
+ }
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int ibmtr_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ ibmtr_dev_t *info = link->priv;
+ struct net_device *dev = info->dev;
+
+ DEBUG(1, "ibmtr_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ /* set flag to bypass normal interrupt code */
+ struct tok_info *priv = netdev_priv(dev);
+ priv->sram_phys |= 1;
+ netif_device_detach(dev);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT;
+ ibmtr_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ ibmtr_probe(dev); /* really? */
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* ibmtr_event */
+
+/*====================================================================*/
+
+static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
+{
+ int i;
+
+ /* Bizarre IBM behavior, there are 16 bits of information we
+ need to set, but the card only allows us to send 4 bits at a
+ time. For each byte sent to base_addr, bits 7-4 tell the
+ card which part of the 16 bits we are setting, bits 3-0 contain
+ the actual information */
+
+ /* First nibble provides 4 bits of mmio */
+ i = (mmiobase >> 16) & 0x0F;
+ outb(i, dev->base_addr);
+
+ /* Second nibble provides 3 bits of mmio */
+ i = 0x10 | ((mmiobase >> 12) & 0x0E);
+ outb(i, dev->base_addr);
+
+ /* Third nibble, hard-coded values */
+ i = 0x26;
+ outb(i, dev->base_addr);
+
+ /* Fourth nibble sets shared ram page size */
+
+ /* 8 = 00, 16 = 01, 32 = 10, 64 = 11 */
+ i = (sramsize >> 4) & 0x07;
+ i = ((i == 4) ? 3 : i) << 2;
+ i |= 0x30;
+
+ if (ringspeed == 16)
+ i |= 2;
+ if (dev->base_addr == 0xA24)
+ i |= 1;
+ outb(i, dev->base_addr);
+
+ /* 0x40 will release the card for use */
+ outb(0x40, dev->base_addr);
+
+ return;
+}
+
+static struct pcmcia_driver ibmtr_cs_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "ibmtr_cs",
+ },
+ .attach = ibmtr_attach,
+ .detach = ibmtr_detach,
+};
+
+static int __init init_ibmtr_cs(void)
+{
+ return pcmcia_register_driver(&ibmtr_cs_driver);
+}
+
+static void __exit exit_ibmtr_cs(void)
+{
+ pcmcia_unregister_driver(&ibmtr_cs_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_ibmtr_cs);
+module_exit(exit_ibmtr_cs);
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
new file mode 100644
index 000000000000..4603807fcafb
--- /dev/null
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -0,0 +1,1699 @@
+/* ----------------------------------------------------------------------------
+Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN.
+ nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao
+
+ The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media
+ Access Controller for Ethernet (MACE). It is essentially the Am2150
+ PCMCIA Ethernet card contained in the Am2150 Demo Kit.
+
+Written by Roger C. Pao <rpao@paonet.org>
+ Copyright 1995 Roger C. Pao
+ Linux 2.5 cleanups Copyright Red Hat 2003
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+Ported to Linux 1.3.* network driver environment by
+ Matti Aarnio <mea@utu.fi>
+
+References
+
+ Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993
+ Am79C940 (MACE) Data Sheet, 1994
+ Am79C90 (C-LANCE) Data Sheet, 1994
+ Linux PCMCIA Programmer's Guide v1.17
+ /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8
+
+ Eric Mears, New Media Corporation
+ Tom Pollard, New Media Corporation
+ Dean Siasoyco, New Media Corporation
+ Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com>
+ Donald Becker <becker@scyld.com>
+ David Hinds <dahinds@users.sourceforge.net>
+
+ The Linux client driver is based on the 3c589_cs.c client driver by
+ David Hinds.
+
+ The Linux network driver outline is based on the 3c589_cs.c driver,
+ the 8390.c driver, and the example skeleton.c kernel code, which are
+ by Donald Becker.
+
+ The Am2150 network driver hardware interface code is based on the
+ OS/9000 driver for the New Media Ethernet LAN by Eric Mears.
+
+ Special thanks for testing and help in debugging this driver goes
+ to Ken Lesniak.
+
+-------------------------------------------------------------------------------
+Driver Notes and Issues
+-------------------------------------------------------------------------------
+
+1. Developed on a Dell 320SLi
+ PCMCIA Card Services 2.6.2
+ Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386
+
+2. rc.pcmcia may require loading pcmcia_core with io_speed=300:
+ 'insmod pcmcia_core.o io_speed=300'.
+ This will avoid problems with fast systems which causes rx_framecnt
+ to return random values.
+
+3. If hot extraction does not work for you, use 'ifconfig eth0 down'
+ before extraction.
+
+4. There is a bad slow-down problem in this driver.
+
+5. Future: Multicast processing. In the meantime, do _not_ compile your
+ kernel with multicast ip enabled.
+
+-------------------------------------------------------------------------------
+History
+-------------------------------------------------------------------------------
+Log: nmclan_cs.c,v
+ * 2.5.75-ac1 2003/07/11 Alan Cox <alan@redhat.com>
+ * Fixed hang on card eject as we probe it
+ * Cleaned up to use new style locking.
+ *
+ * Revision 0.16 1995/07/01 06:42:17 rpao
+ * Bug fix: nmclan_reset() called CardServices incorrectly.
+ *
+ * Revision 0.15 1995/05/24 08:09:47 rpao
+ * Re-implement MULTI_TX dev->tbusy handling.
+ *
+ * Revision 0.14 1995/05/23 03:19:30 rpao
+ * Added, in nmclan_config(), "tuple.Attributes = 0;".
+ * Modified MACE ID check to ignore chip revision level.
+ * Avoid tx_free_frames race condition between _start_xmit and _interrupt.
+ *
+ * Revision 0.13 1995/05/18 05:56:34 rpao
+ * Statistics changes.
+ * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list.
+ * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup.
+ *
+ * Revision 0.12 1995/05/14 00:12:23 rpao
+ * Statistics overhaul.
+ *
+
+95/05/13 rpao V0.10a
+ Bug fix: MACE statistics counters used wrong I/O ports.
+ Bug fix: mace_interrupt() needed to allow statistics to be
+ processed without RX or TX interrupts pending.
+95/05/11 rpao V0.10
+ Multiple transmit request processing.
+ Modified statistics to use MACE counters where possible.
+95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO.
+ *Released
+95/05/10 rpao V0.08
+ Bug fix: Make all non-exported functions private by using
+ static keyword.
+ Bug fix: Test IntrCnt _before_ reading MACE_IR.
+95/05/10 rpao V0.07 Statistics.
+95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states.
+
+---------------------------------------------------------------------------- */
+
+#define DRV_NAME "nmclan_cs"
+#define DRV_VERSION "0.16"
+
+
+/* ----------------------------------------------------------------------------
+Conditional Compilation Options
+---------------------------------------------------------------------------- */
+
+#define MULTI_TX 0
+#define RESET_ON_TIMEOUT 1
+#define TX_INTERRUPTABLE 1
+#define RESET_XILINX 0
+
+/* ----------------------------------------------------------------------------
+Include Files
+---------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/* ----------------------------------------------------------------------------
+Defines
+---------------------------------------------------------------------------- */
+
+#define ETHER_ADDR_LEN ETH_ALEN
+ /* 6 bytes in an Ethernet Address */
+#define MACE_LADRF_LEN 8
+ /* 8 bytes in Logical Address Filter */
+
+/* Loop Control Defines */
+#define MACE_MAX_IR_ITERATIONS 10
+#define MACE_MAX_RX_ITERATIONS 12
+ /*
+ TBD: Dean brought this up, and I assumed the hardware would
+ handle it:
+
+ If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be
+ non-zero when the isr exits. We may not get another interrupt
+ to process the remaining packets for some time.
+ */
+
+/*
+The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA)
+which manages the interface between the MACE and the PCMCIA bus. It
+also includes buffer management for the 32K x 8 SRAM to control up to
+four transmit and 12 receive frames at a time.
+*/
+#define AM2150_MAX_TX_FRAMES 4
+#define AM2150_MAX_RX_FRAMES 12
+
+/* Am2150 Ethernet Card I/O Mapping */
+#define AM2150_RCV 0x00
+#define AM2150_XMT 0x04
+#define AM2150_XMT_SKIP 0x09
+#define AM2150_RCV_NEXT 0x0A
+#define AM2150_RCV_FRAME_COUNT 0x0B
+#define AM2150_MACE_BANK 0x0C
+#define AM2150_MACE_BASE 0x10
+
+/* MACE Registers */
+#define MACE_RCVFIFO 0
+#define MACE_XMTFIFO 1
+#define MACE_XMTFC 2
+#define MACE_XMTFS 3
+#define MACE_XMTRC 4
+#define MACE_RCVFC 5
+#define MACE_RCVFS 6
+#define MACE_FIFOFC 7
+#define MACE_IR 8
+#define MACE_IMR 9
+#define MACE_PR 10
+#define MACE_BIUCC 11
+#define MACE_FIFOCC 12
+#define MACE_MACCC 13
+#define MACE_PLSCC 14
+#define MACE_PHYCC 15
+#define MACE_CHIPIDL 16
+#define MACE_CHIPIDH 17
+#define MACE_IAC 18
+/* Reserved */
+#define MACE_LADRF 20
+#define MACE_PADR 21
+/* Reserved */
+/* Reserved */
+#define MACE_MPC 24
+/* Reserved */
+#define MACE_RNTPC 26
+#define MACE_RCVCC 27
+/* Reserved */
+#define MACE_UTR 29
+#define MACE_RTR1 30
+#define MACE_RTR2 31
+
+/* MACE Bit Masks */
+#define MACE_XMTRC_EXDEF 0x80
+#define MACE_XMTRC_XMTRC 0x0F
+
+#define MACE_XMTFS_XMTSV 0x80
+#define MACE_XMTFS_UFLO 0x40
+#define MACE_XMTFS_LCOL 0x20
+#define MACE_XMTFS_MORE 0x10
+#define MACE_XMTFS_ONE 0x08
+#define MACE_XMTFS_DEFER 0x04
+#define MACE_XMTFS_LCAR 0x02
+#define MACE_XMTFS_RTRY 0x01
+
+#define MACE_RCVFS_RCVSTS 0xF000
+#define MACE_RCVFS_OFLO 0x8000
+#define MACE_RCVFS_CLSN 0x4000
+#define MACE_RCVFS_FRAM 0x2000
+#define MACE_RCVFS_FCS 0x1000
+
+#define MACE_FIFOFC_RCVFC 0xF0
+#define MACE_FIFOFC_XMTFC 0x0F
+
+#define MACE_IR_JAB 0x80
+#define MACE_IR_BABL 0x40
+#define MACE_IR_CERR 0x20
+#define MACE_IR_RCVCCO 0x10
+#define MACE_IR_RNTPCO 0x08
+#define MACE_IR_MPCO 0x04
+#define MACE_IR_RCVINT 0x02
+#define MACE_IR_XMTINT 0x01
+
+#define MACE_MACCC_PROM 0x80
+#define MACE_MACCC_DXMT2PD 0x40
+#define MACE_MACCC_EMBA 0x20
+#define MACE_MACCC_RESERVED 0x10
+#define MACE_MACCC_DRCVPA 0x08
+#define MACE_MACCC_DRCVBC 0x04
+#define MACE_MACCC_ENXMT 0x02
+#define MACE_MACCC_ENRCV 0x01
+
+#define MACE_PHYCC_LNKFL 0x80
+#define MACE_PHYCC_DLNKTST 0x40
+#define MACE_PHYCC_REVPOL 0x20
+#define MACE_PHYCC_DAPC 0x10
+#define MACE_PHYCC_LRT 0x08
+#define MACE_PHYCC_ASEL 0x04
+#define MACE_PHYCC_RWAKE 0x02
+#define MACE_PHYCC_AWAKE 0x01
+
+#define MACE_IAC_ADDRCHG 0x80
+#define MACE_IAC_PHYADDR 0x04
+#define MACE_IAC_LOGADDR 0x02
+
+#define MACE_UTR_RTRE 0x80
+#define MACE_UTR_RTRD 0x40
+#define MACE_UTR_RPA 0x20
+#define MACE_UTR_FCOLL 0x10
+#define MACE_UTR_RCVFCSE 0x08
+#define MACE_UTR_LOOP_INCL_MENDEC 0x06
+#define MACE_UTR_LOOP_NO_MENDEC 0x04
+#define MACE_UTR_LOOP_EXTERNAL 0x02
+#define MACE_UTR_LOOP_NONE 0x00
+#define MACE_UTR_RESERVED 0x01
+
+/* Switch MACE register bank (only 0 and 1 are valid) */
+#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK)
+
+#define MACE_IMR_DEFAULT \
+ (0xFF - \
+ ( \
+ MACE_IR_CERR | \
+ MACE_IR_RCVCCO | \
+ MACE_IR_RNTPCO | \
+ MACE_IR_MPCO | \
+ MACE_IR_RCVINT | \
+ MACE_IR_XMTINT \
+ ) \
+ )
+#undef MACE_IMR_DEFAULT
+#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* ----------------------------------------------------------------------------
+Type Definitions
+---------------------------------------------------------------------------- */
+
+typedef struct _mace_statistics {
+ /* MACE_XMTFS */
+ int xmtsv;
+ int uflo;
+ int lcol;
+ int more;
+ int one;
+ int defer;
+ int lcar;
+ int rtry;
+
+ /* MACE_XMTRC */
+ int exdef;
+ int xmtrc;
+
+ /* RFS1--Receive Status (RCVSTS) */
+ int oflo;
+ int clsn;
+ int fram;
+ int fcs;
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ int rfs_rntpc;
+
+ /* RFS3--Receive Collision Count (RCVCC) */
+ int rfs_rcvcc;
+
+ /* MACE_IR */
+ int jab;
+ int babl;
+ int cerr;
+ int rcvcco;
+ int rntpco;
+ int mpco;
+
+ /* MACE_MPC */
+ int mpc;
+
+ /* MACE_RNTPC */
+ int rntpc;
+
+ /* MACE_RCVCC */
+ int rcvcc;
+} mace_statistics;
+
+typedef struct _mace_private {
+ dev_link_t link;
+ dev_node_t node;
+ struct net_device_stats linux_stats; /* Linux statistics counters */
+ mace_statistics mace_stats; /* MACE chip statistics counters */
+
+ /* restore_multicast_list() state variables */
+ int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */
+ int multicast_num_addrs;
+
+ char tx_free_frames; /* Number of free transmit frame buffers */
+ char tx_irq_disabled; /* MACE TX interrupt disabled */
+
+ spinlock_t bank_lock; /* Must be held if you step off bank 0 */
+} mace_private;
+
+/* ----------------------------------------------------------------------------
+Private Global Variables
+---------------------------------------------------------------------------- */
+
+#ifdef PCMCIA_DEBUG
+static char rcsid[] =
+"nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao";
+static char *version =
+DRV_NAME " " DRV_VERSION " (Roger C. Pao)";
+#endif
+
+static dev_info_t dev_info="nmclan_cs";
+static dev_link_t *dev_list;
+
+static char *if_names[]={
+ "Auto", "10baseT", "BNC",
+};
+
+/* ----------------------------------------------------------------------------
+Parameters
+ These are the parameters that can be set during loading with
+ 'insmod'.
+---------------------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("New Media PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
+INT_MODULE_PARM(if_port, 0);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+#else
+#define DEBUG(n, args...)
+#endif
+
+/* ----------------------------------------------------------------------------
+Function Prototypes
+---------------------------------------------------------------------------- */
+
+static void nmclan_config(dev_link_t *link);
+static void nmclan_release(dev_link_t *link);
+static int nmclan_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static void nmclan_reset(struct net_device *dev);
+static int mace_config(struct net_device *dev, struct ifmap *map);
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev);
+static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct net_device_stats *mace_get_stats(struct net_device *dev);
+static int mace_rx(struct net_device *dev, unsigned char RxCnt);
+static void restore_multicast_list(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static struct ethtool_ops netdev_ethtool_ops;
+
+
+static dev_link_t *nmclan_attach(void);
+static void nmclan_detach(dev_link_t *);
+
+/* ----------------------------------------------------------------------------
+nmclan_attach
+ Creates an "instance" of the driver, allocating local data
+ structures for one device. The device is registered with Card
+ Services.
+---------------------------------------------------------------------------- */
+
+static dev_link_t *nmclan_attach(void)
+{
+ mace_private *lp;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int ret;
+
+ DEBUG(0, "nmclan_attach()\n");
+ DEBUG(1, "%s\n", rcsid);
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(mace_private));
+ if (!dev)
+ return NULL;
+ lp = netdev_priv(dev);
+ link = &lp->link;
+ link->priv = dev;
+
+ spin_lock_init(&lp->bank_lock);
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 5;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &mace_interrupt;
+ link->irq.Instance = dev;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ SET_MODULE_OWNER(dev);
+ dev->hard_start_xmit = &mace_start_xmit;
+ dev->set_config = &mace_config;
+ dev->get_stats = &mace_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->open = &mace_open;
+ dev->stop = &mace_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = mace_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &nmclan_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ nmclan_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* nmclan_attach */
+
+/* ----------------------------------------------------------------------------
+nmclan_detach
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+---------------------------------------------------------------------------- */
+
+static void nmclan_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "nmclan_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ if (link->state & DEV_CONFIG)
+ nmclan_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* nmclan_detach */
+
+/* ----------------------------------------------------------------------------
+mace_read
+ Reads a MACE register. This is bank independent; however, the
+ caller must ensure that this call is not interruptable. We are
+ assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static int mace_read(mace_private *lp, kio_addr_t ioaddr, int reg)
+{
+ int data = 0xFF;
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ data = inb(ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+ return (data & 0xFF);
+} /* mace_read */
+
+/* ----------------------------------------------------------------------------
+mace_write
+ Writes to a MACE register. This is bank independent; however,
+ the caller must ensure that this call is not interruptable. We
+ are assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static void mace_write(mace_private *lp, kio_addr_t ioaddr, int reg, int data)
+{
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+} /* mace_write */
+
+/* ----------------------------------------------------------------------------
+mace_init
+ Resets the MACE chip.
+---------------------------------------------------------------------------- */
+static int mace_init(mace_private *lp, kio_addr_t ioaddr, char *enet_addr)
+{
+ int i;
+ int ct = 0;
+
+ /* MACE Software reset */
+ mace_write(lp, ioaddr, MACE_BIUCC, 1);
+ while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) {
+ /* Wait for reset bit to be cleared automatically after <= 200ns */;
+ if(++ct > 500)
+ {
+ printk(KERN_ERR "mace: reset failed, card removed ?\n");
+ return -1;
+ }
+ udelay(1);
+ }
+ mace_write(lp, ioaddr, MACE_BIUCC, 0);
+
+ /* The Am2150 requires that the MACE FIFOs operate in burst mode. */
+ mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F);
+
+ mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */
+ mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */
+
+ /*
+ * Bit 2-1 PORTSEL[1-0] Port Select.
+ * 00 AUI/10Base-2
+ * 01 10Base-T
+ * 10 DAI Port (reserved in Am2150)
+ * 11 GPSI
+ * For this card, only the first two are valid.
+ * So, PLSCC should be set to
+ * 0x00 for 10Base-2
+ * 0x02 for 10Base-T
+ * Or just set ASEL in PHYCC below!
+ */
+ switch (if_port) {
+ case 1:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
+ break;
+ case 2:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
+ break;
+ default:
+ mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
+ /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
+ and the MACE device will automatically select the operating media
+ interface port. */
+ break;
+ }
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR);
+ /* Poll ADDRCHG bit */
+ ct = 0;
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ {
+ if(++ ct > 500)
+ {
+ printk(KERN_ERR "mace: ADDRCHG timeout, card removed ?\n");
+ return -1;
+ }
+ }
+ /* Set PADR register */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
+
+ /* MAC Configuration Control Register should be written last */
+ /* Let set_multicast_list set this. */
+ /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */
+ mace_write(lp, ioaddr, MACE_MACCC, 0x00);
+ return 0;
+} /* mace_init */
+
+/* ----------------------------------------------------------------------------
+nmclan_config
+ This routine is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+---------------------------------------------------------------------------- */
+
+#define CS_CHECK(fn, ret) \
+ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void nmclan_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ mace_private *lp = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[64];
+ int i, last_ret, last_fn;
+ kio_addr_t ioaddr;
+
+ DEBUG(0, "nmclan_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ CS_CHECK(RequestIO, pcmcia_request_io(handle, &link->io));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+
+ ioaddr = dev->base_addr;
+
+ /* Read the ethernet address from the CIS. */
+ tuple.DesiredTuple = 0x80 /* CISTPL_CFTABLE_ENTRY_MISC */;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
+
+ /* Verify configuration by reading the MACE ID. */
+ {
+ char sig[2];
+
+ sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL);
+ sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH);
+ if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
+ DEBUG(0, "nmclan_cs configured: mace id=%x %x\n",
+ sig[0], sig[1]);
+ } else {
+ printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
+ " be 0x40 0x?9\n", sig[0], sig[1]);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+ }
+ }
+
+ if(mace_init(lp, ioaddr, dev->dev_addr) == -1)
+ goto failed;
+
+ /* The if_port symbol can be set when the module is loaded */
+ if (if_port <= 2)
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
+
+ link->dev = &lp->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ i = register_netdev(dev);
+ if (i != 0) {
+ printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(lp->node.dev_name, dev->name);
+
+ printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port, hw_addr ",
+ dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ nmclan_release(link);
+ return;
+
+} /* nmclan_config */
+
+/* ----------------------------------------------------------------------------
+nmclan_release
+ After a card is removed, nmclan_release() will unregister the
+ net device, and release the PCMCIA configuration. If the device
+ is still open, this will be postponed until it is closed.
+---------------------------------------------------------------------------- */
+static void nmclan_release(dev_link_t *link)
+{
+
+ DEBUG(0, "nmclan_release(0x%p)\n", link);
+
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/* ----------------------------------------------------------------------------
+nmclan_event
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+---------------------------------------------------------------------------- */
+static int nmclan_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(1, "nmclan_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ nmclan_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ nmclan_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ case CS_EVENT_RESET_REQUEST:
+ return 1;
+ break;
+ }
+ return 0;
+} /* nmclan_event */
+
+/* ----------------------------------------------------------------------------
+nmclan_reset
+ Reset and restore all of the Xilinx and MACE registers.
+---------------------------------------------------------------------------- */
+static void nmclan_reset(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#if RESET_XILINX
+ dev_link_t *link = &lp->link;
+ conf_reg_t reg;
+ u_long OrigCorValue;
+
+ /* Save original COR value */
+ reg.Function = 0;
+ reg.Action = CS_READ;
+ reg.Offset = CISREG_COR;
+ reg.Value = 0;
+ pcmcia_access_configuration_register(link->handle, &reg);
+ OrigCorValue = reg.Value;
+
+ /* Reset Xilinx */
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_COR;
+ DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
+ OrigCorValue);
+ reg.Value = COR_SOFT_RESET;
+ pcmcia_access_configuration_register(link->handle, &reg);
+ /* Need to wait for 20 ms for PCMCIA to finish reset. */
+
+ /* Restore original COR configuration index */
+ reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK);
+ pcmcia_access_configuration_register(link->handle, &reg);
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+#endif /* #if RESET_XILINX */
+
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ /* Reinitialize the MACE chip for operation. */
+ mace_init(lp, dev->base_addr, dev->dev_addr);
+ mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT);
+
+ /* Restore the multicast list and enable TX and RX. */
+ restore_multicast_list(dev);
+} /* nmclan_reset */
+
+/* ----------------------------------------------------------------------------
+mace_config
+ [Someone tell me what this is supposed to do? Is if_port a defined
+ standard? If so, there should be defines to indicate 1=10Base-T,
+ 2=10Base-2, etc. including limited automatic detection.]
+---------------------------------------------------------------------------- */
+static int mace_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 2) {
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n", dev->name,
+ if_names[dev->if_port]);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+} /* mace_config */
+
+/* ----------------------------------------------------------------------------
+mace_open
+ Open device driver.
+---------------------------------------------------------------------------- */
+static int mace_open(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+
+ MACEBANK(0);
+
+ netif_start_queue(dev);
+ nmclan_reset(dev);
+
+ return 0; /* Always succeed */
+} /* mace_open */
+
+/* ----------------------------------------------------------------------------
+mace_close
+ Closes device driver.
+---------------------------------------------------------------------------- */
+static int mace_close(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
+
+ /* Mask off all interrupts from the MACE chip. */
+ outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+
+ link->open--;
+ netif_stop_queue(dev);
+
+ return 0;
+} /* mace_close */
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+#ifdef PCMCIA_DEBUG
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return pc_debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ pc_debug = level;
+}
+#endif /* PCMCIA_DEBUG */
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+#ifdef PCMCIA_DEBUG
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+#endif /* PCMCIA_DEBUG */
+};
+
+/* ----------------------------------------------------------------------------
+mace_start_xmit
+ This routine begins the packet transmit function. When completed,
+ it will generate a transmit interrupt.
+
+ According to /usr/src/linux/net/inet/dev.c, if _start_xmit
+ returns 0, the "packet is now solely the responsibility of the
+ driver." If _start_xmit returns non-zero, the "transmission
+ failed, put skb back into a list."
+---------------------------------------------------------------------------- */
+
+static void mace_tx_timeout(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name);
+#if RESET_ON_TIMEOUT
+ printk("resetting card\n");
+ pcmcia_reset_card(link->handle, NULL);
+#else /* #if RESET_ON_TIMEOUT */
+ printk("NOT resetting card\n");
+#endif /* #if RESET_ON_TIMEOUT */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ DEBUG(3, "%s: mace_start_xmit(length = %ld) called.\n",
+ dev->name, (long)skb->len);
+
+#if (!TX_INTERRUPTABLE)
+ /* Disable MACE TX interrupts. */
+ outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT,
+ ioaddr + AM2150_MACE_BASE + MACE_IMR);
+ lp->tx_irq_disabled=1;
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ {
+ /* This block must not be interrupted by another transmit request!
+ mace_tx_timeout will take care of timer-based retransmissions from
+ the upper layers. The interrupt handler is guaranteed never to
+ service a transmit interrupt while we are in here.
+ */
+
+ lp->linux_stats.tx_bytes += skb->len;
+ lp->tx_free_frames--;
+
+ /* WARNING: Write the _exact_ number of bytes written in the header! */
+ /* Put out the word header [must be an outw()] . . . */
+ outw(skb->len, ioaddr + AM2150_XMT);
+ /* . . . and the packet [may be any combination of outw() and outb()] */
+ outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
+ if (skb->len & 1) {
+ /* Odd byte transfer */
+ outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
+ }
+
+ dev->trans_start = jiffies;
+
+#if MULTI_TX
+ if (lp->tx_free_frames > 0)
+ netif_start_queue(dev);
+#endif /* #if MULTI_TX */
+ }
+
+#if (!TX_INTERRUPTABLE)
+ /* Re-enable MACE TX interrupts. */
+ lp->tx_irq_disabled=0;
+ outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ dev_kfree_skb(skb);
+
+ return 0;
+} /* mace_start_xmit */
+
+/* ----------------------------------------------------------------------------
+mace_interrupt
+ The interrupt handler.
+---------------------------------------------------------------------------- */
+static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ mace_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int status;
+ int IntrCnt = MACE_MAX_IR_ITERATIONS;
+
+ if (dev == NULL) {
+ DEBUG(2, "mace_interrupt(): irq 0x%X for unknown device.\n",
+ irq);
+ return IRQ_NONE;
+ }
+
+ if (lp->tx_irq_disabled) {
+ printk(
+ (lp->tx_irq_disabled?
+ KERN_NOTICE "%s: Interrupt with tx_irq_disabled "
+ "[isr=%02X, imr=%02X]\n":
+ KERN_NOTICE "%s: Re-entering the interrupt handler "
+ "[isr=%02X, imr=%02X]\n"),
+ dev->name,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)
+ );
+ /* WARNING: MACE_IR has been read! */
+ return IRQ_NONE;
+ }
+
+ if (!netif_device_present(dev)) {
+ DEBUG(2, "%s: interrupt from dead card\n", dev->name);
+ return IRQ_NONE;
+ }
+
+ do {
+ /* WARNING: MACE_IR is a READ/CLEAR port! */
+ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+
+ DEBUG(3, "mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
+
+ if (status & MACE_IR_RCVINT) {
+ mace_rx(dev, MACE_MAX_RX_ITERATIONS);
+ }
+
+ if (status & MACE_IR_XMTINT) {
+ unsigned char fifofc;
+ unsigned char xmtrc;
+ unsigned char xmtfs;
+
+ fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
+ if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
+ lp->linux_stats.tx_errors++;
+ outb(0xFF, ioaddr + AM2150_XMT_SKIP);
+ }
+
+ /* Transmit Retry Count (XMTRC, reg 4) */
+ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
+ if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
+ lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);
+
+ if (
+ (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
+ MACE_XMTFS_XMTSV /* Transmit Status Valid */
+ ) {
+ lp->mace_stats.xmtsv++;
+
+ if (xmtfs & ~MACE_XMTFS_XMTSV) {
+ if (xmtfs & MACE_XMTFS_UFLO) {
+ /* Underflow. Indicates that the Transmit FIFO emptied before
+ the end of frame was reached. */
+ lp->mace_stats.uflo++;
+ }
+ if (xmtfs & MACE_XMTFS_LCOL) {
+ /* Late Collision */
+ lp->mace_stats.lcol++;
+ }
+ if (xmtfs & MACE_XMTFS_MORE) {
+ /* MORE than one retry was needed */
+ lp->mace_stats.more++;
+ }
+ if (xmtfs & MACE_XMTFS_ONE) {
+ /* Exactly ONE retry occurred */
+ lp->mace_stats.one++;
+ }
+ if (xmtfs & MACE_XMTFS_DEFER) {
+ /* Transmission was defered */
+ lp->mace_stats.defer++;
+ }
+ if (xmtfs & MACE_XMTFS_LCAR) {
+ /* Loss of carrier */
+ lp->mace_stats.lcar++;
+ }
+ if (xmtfs & MACE_XMTFS_RTRY) {
+ /* Retry error: transmit aborted after 16 attempts */
+ lp->mace_stats.rtry++;
+ }
+ } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */
+
+ } /* if (xmtfs & MACE_XMTFS_XMTSV) */
+
+ lp->linux_stats.tx_packets++;
+ lp->tx_free_frames++;
+ netif_wake_queue(dev);
+ } /* if (status & MACE_IR_XMTINT) */
+
+ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
+ if (status & MACE_IR_JAB) {
+ /* Jabber Error. Excessive transmit duration (20-150ms). */
+ lp->mace_stats.jab++;
+ }
+ if (status & MACE_IR_BABL) {
+ /* Babble Error. >1518 bytes transmitted. */
+ lp->mace_stats.babl++;
+ }
+ if (status & MACE_IR_CERR) {
+ /* Collision Error. CERR indicates the absence of the
+ Signal Quality Error Test message after a packet
+ transmission. */
+ lp->mace_stats.cerr++;
+ }
+ if (status & MACE_IR_RCVCCO) {
+ /* Receive Collision Count Overflow; */
+ lp->mace_stats.rcvcco++;
+ }
+ if (status & MACE_IR_RNTPCO) {
+ /* Runt Packet Count Overflow */
+ lp->mace_stats.rntpco++;
+ }
+ if (status & MACE_IR_MPCO) {
+ /* Missed Packet Count Overflow */
+ lp->mace_stats.mpco++;
+ }
+ } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */
+
+ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));
+
+ return IRQ_HANDLED;
+} /* mace_interrupt */
+
+/* ----------------------------------------------------------------------------
+mace_rx
+ Receives packets.
+---------------------------------------------------------------------------- */
+static int mace_rx(struct net_device *dev, unsigned char RxCnt)
+{
+ mace_private *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ unsigned char rx_framecnt;
+ unsigned short rx_status;
+
+ while (
+ ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
+ (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
+ (RxCnt--)
+ ) {
+ rx_status = inw(ioaddr + AM2150_RCV);
+
+ DEBUG(3, "%s: in mace_rx(), framecnt 0x%X, rx_status"
+ " 0x%X.\n", dev->name, rx_framecnt, rx_status);
+
+ if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
+ lp->linux_stats.rx_errors++;
+ if (rx_status & MACE_RCVFS_OFLO) {
+ lp->mace_stats.oflo++;
+ }
+ if (rx_status & MACE_RCVFS_CLSN) {
+ lp->mace_stats.clsn++;
+ }
+ if (rx_status & MACE_RCVFS_FRAM) {
+ lp->mace_stats.fram++;
+ }
+ if (rx_status & MACE_RCVFS_FCS) {
+ lp->mace_stats.fcs++;
+ }
+ } else {
+ short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
+ /* Auto Strip is off, always subtract 4 */
+ struct sk_buff *skb;
+
+ lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
+ /* runt packet count */
+ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
+ /* rcv collision count */
+
+ DEBUG(3, " receiving packet size 0x%X rx_status"
+ " 0x%X.\n", pkt_len, rx_status);
+
+ skb = dev_alloc_skb(pkt_len+2);
+
+ if (skb != NULL) {
+ skb->dev = dev;
+
+ skb_reserve(skb, 2);
+ insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
+ if (pkt_len & 1)
+ *(skb->tail-1) = inb(ioaddr + AM2150_RCV);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
+
+ dev->last_rx = jiffies;
+ lp->linux_stats.rx_packets++;
+ lp->linux_stats.rx_bytes += skb->len;
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ continue;
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of size"
+ " %d.\n", dev->name, pkt_len);
+ lp->linux_stats.rx_dropped++;
+ }
+ }
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ } /* while */
+
+ return 0;
+} /* mace_rx */
+
+/* ----------------------------------------------------------------------------
+pr_linux_stats
+---------------------------------------------------------------------------- */
+static void pr_linux_stats(struct net_device_stats *pstats)
+{
+ DEBUG(2, "pr_linux_stats\n");
+ DEBUG(2, " rx_packets=%-7ld tx_packets=%ld\n",
+ (long)pstats->rx_packets, (long)pstats->tx_packets);
+ DEBUG(2, " rx_errors=%-7ld tx_errors=%ld\n",
+ (long)pstats->rx_errors, (long)pstats->tx_errors);
+ DEBUG(2, " rx_dropped=%-7ld tx_dropped=%ld\n",
+ (long)pstats->rx_dropped, (long)pstats->tx_dropped);
+ DEBUG(2, " multicast=%-7ld collisions=%ld\n",
+ (long)pstats->multicast, (long)pstats->collisions);
+
+ DEBUG(2, " rx_length_errors=%-7ld rx_over_errors=%ld\n",
+ (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
+ DEBUG(2, " rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
+ (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
+ DEBUG(2, " rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
+ (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
+
+ DEBUG(2, " tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
+ (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
+ DEBUG(2, " tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
+ (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
+ DEBUG(2, " tx_window_errors=%ld\n",
+ (long)pstats->tx_window_errors);
+} /* pr_linux_stats */
+
+/* ----------------------------------------------------------------------------
+pr_mace_stats
+---------------------------------------------------------------------------- */
+static void pr_mace_stats(mace_statistics *pstats)
+{
+ DEBUG(2, "pr_mace_stats\n");
+
+ DEBUG(2, " xmtsv=%-7d uflo=%d\n",
+ pstats->xmtsv, pstats->uflo);
+ DEBUG(2, " lcol=%-7d more=%d\n",
+ pstats->lcol, pstats->more);
+ DEBUG(2, " one=%-7d defer=%d\n",
+ pstats->one, pstats->defer);
+ DEBUG(2, " lcar=%-7d rtry=%d\n",
+ pstats->lcar, pstats->rtry);
+
+ /* MACE_XMTRC */
+ DEBUG(2, " exdef=%-7d xmtrc=%d\n",
+ pstats->exdef, pstats->xmtrc);
+
+ /* RFS1--Receive Status (RCVSTS) */
+ DEBUG(2, " oflo=%-7d clsn=%d\n",
+ pstats->oflo, pstats->clsn);
+ DEBUG(2, " fram=%-7d fcs=%d\n",
+ pstats->fram, pstats->fcs);
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ /* RFS3--Receive Collision Count (RCVCC) */
+ DEBUG(2, " rfs_rntpc=%-7d rfs_rcvcc=%d\n",
+ pstats->rfs_rntpc, pstats->rfs_rcvcc);
+
+ /* MACE_IR */
+ DEBUG(2, " jab=%-7d babl=%d\n",
+ pstats->jab, pstats->babl);
+ DEBUG(2, " cerr=%-7d rcvcco=%d\n",
+ pstats->cerr, pstats->rcvcco);
+ DEBUG(2, " rntpco=%-7d mpco=%d\n",
+ pstats->rntpco, pstats->mpco);
+
+ /* MACE_MPC */
+ DEBUG(2, " mpc=%d\n", pstats->mpc);
+
+ /* MACE_RNTPC */
+ DEBUG(2, " rntpc=%d\n", pstats->rntpc);
+
+ /* MACE_RCVCC */
+ DEBUG(2, " rcvcc=%d\n", pstats->rcvcc);
+
+} /* pr_mace_stats */
+
+/* ----------------------------------------------------------------------------
+update_stats
+ Update statistics. We change to register window 1, so this
+ should be run single-threaded if the device is active. This is
+ expected to be a rare operation, and it's simpler for the rest
+ of the driver to assume that window 0 is always valid rather
+ than use a special window-state variable.
+
+ oflo & uflo should _never_ occur since it would mean the Xilinx
+ was not able to transfer data between the MACE FIFO and the
+ card's SRAM fast enough. If this happens, something is
+ seriously wrong with the hardware.
+---------------------------------------------------------------------------- */
+static void update_stats(kio_addr_t ioaddr, struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC);
+ lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
+ lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
+ /* At this point, mace_stats is fully updated for this call.
+ We may now update the linux_stats. */
+
+ /* The MACE has no equivalent for linux_stats field which are commented
+ out. */
+
+ /* lp->linux_stats.multicast; */
+ lp->linux_stats.collisions =
+ lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
+ /* Collision: The MACE may retry sending a packet 15 times
+ before giving up. The retry count is in XMTRC.
+ Does each retry constitute a collision?
+ If so, why doesn't the RCVCC record these collisions? */
+
+ /* detailed rx_errors: */
+ lp->linux_stats.rx_length_errors =
+ lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
+ /* lp->linux_stats.rx_over_errors */
+ lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
+ lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
+ lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
+ lp->linux_stats.rx_missed_errors =
+ lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
+
+ /* detailed tx_errors */
+ lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
+ lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+ /* LCAR usually results from bad cabling. */
+ lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
+ lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* lp->linux_stats.tx_window_errors; */
+
+ return;
+} /* update_stats */
+
+/* ----------------------------------------------------------------------------
+mace_get_stats
+ Gathers ethernet statistics from the MACE chip.
+---------------------------------------------------------------------------- */
+static struct net_device_stats *mace_get_stats(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ update_stats(dev->base_addr, dev);
+
+ DEBUG(1, "%s: updating the statistics.\n", dev->name);
+ pr_linux_stats(&lp->linux_stats);
+ pr_mace_stats(&lp->mace_stats);
+
+ return &lp->linux_stats;
+} /* net_device_stats */
+
+/* ----------------------------------------------------------------------------
+updateCRC
+ Modified from Am79C90 data sheet.
+---------------------------------------------------------------------------- */
+
+#ifdef BROKEN_MULTICAST
+
+static void updateCRC(int *CRC, int bit)
+{
+ int poly[]={
+ 1,1,1,0, 1,1,0,1,
+ 1,0,1,1, 1,0,0,0,
+ 1,0,0,0, 0,0,1,1,
+ 0,0,1,0, 0,0,0,0
+ }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the
+ CRC generator polynomial. */
+
+ int j;
+
+ /* shift CRC and control bit (CRC[32]) */
+ for (j = 32; j > 0; j--)
+ CRC[j] = CRC[j-1];
+ CRC[0] = 0;
+
+ /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+} /* updateCRC */
+
+/* ----------------------------------------------------------------------------
+BuildLAF
+ Build logical address filter.
+ Modified from Am79C90 data sheet.
+
+Input
+ ladrf: logical address filter (contents initialized to 0)
+ adr: ethernet address
+---------------------------------------------------------------------------- */
+static void BuildLAF(int *ladrf, int *adr)
+{
+ int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */
+
+ int i, byte; /* temporary array indices */
+ int hashcode; /* the output object */
+
+ CRC[32]=0;
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ updateCRC(CRC, (adr[byte] >> i) & 1);
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+
+ byte = hashcode >> 3;
+ ladrf[byte] |= (1 << (hashcode & 7));
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 2) {
+ printk(KERN_DEBUG " adr =");
+ for (i = 0; i < 6; i++)
+ printk(" %02X", adr[i]);
+ printk("\n" KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63]"
+ " =", hashcode);
+ for (i = 0; i < 8; i++)
+ printk(" %02X", ladrf[i]);
+ printk("\n");
+ }
+#endif
+} /* BuildLAF */
+
+/* ----------------------------------------------------------------------------
+restore_multicast_list
+ Restores the multicast filter for MACE chip to the last
+ set_multicast_list() call.
+
+Input
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+static void restore_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int num_addrs = lp->multicast_num_addrs;
+ int *ladrf = lp->multicast_ladrf;
+ kio_addr_t ioaddr = dev->base_addr;
+ int i;
+
+ DEBUG(2, "%s: restoring Rx mode to %d addresses.\n",
+ dev->name, num_addrs);
+
+ if (num_addrs > 0) {
+
+ DEBUG(1, "Attempt to restore multicast list detected.\n");
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set LADRF register */
+ for (i = 0; i < MACE_LADRF_LEN; i++)
+ mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]);
+
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ } else if (num_addrs < 0) {
+
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+
+ } else {
+
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ }
+} /* restore_multicast_list */
+
+/* ----------------------------------------------------------------------------
+set_multicast_list
+ Set or clear the multicast filter for this adaptor.
+
+Input
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+Output
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ int i;
+ struct dev_mc_list *dmi = dev->mc_list;
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 1) {
+ static int old;
+ if (dev->mc_count != old) {
+ old = dev->mc_count;
+ DEBUG(0, "%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ /* Set multicast_num_addrs. */
+ lp->multicast_num_addrs = dev->mc_count;
+
+ /* Set multicast_ladrf. */
+ if (num_addrs > 0) {
+ /* Calculate multicast logical address filter */
+ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
+ for (i = 0; i < dev->mc_count; i++) {
+ memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
+ dmi = dmi->next;
+ BuildLAF(lp->multicast_ladrf, adr);
+ }
+ }
+
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+#endif /* BROKEN_MULTICAST */
+
+static void restore_multicast_list(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+
+ DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name,
+ lp->multicast_num_addrs);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+ } else {
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ }
+} /* restore_multicast_list */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 1) {
+ static int old;
+ if (dev->mc_count != old) {
+ old = dev->mc_count;
+ DEBUG(0, "%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ lp->multicast_num_addrs = dev->mc_count;
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+static struct pcmcia_driver nmclan_cs_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "nmclan_cs",
+ },
+ .attach = nmclan_attach,
+ .detach = nmclan_detach,
+};
+
+static int __init init_nmclan_cs(void)
+{
+ return pcmcia_register_driver(&nmclan_cs_driver);
+}
+
+static void __exit exit_nmclan_cs(void)
+{
+ pcmcia_unregister_driver(&nmclan_cs_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_nmclan_cs);
+module_exit(exit_nmclan_cs);
diff --git a/drivers/net/pcmcia/ositech.h b/drivers/net/pcmcia/ositech.h
new file mode 100644
index 000000000000..4126efc355bd
--- /dev/null
+++ b/drivers/net/pcmcia/ositech.h
@@ -0,0 +1,358 @@
+/*
+ This file contains the firmware of Seven of Diamonds from OSITECH.
+ (Special thanks to Kevin MacPherson of OSITECH)
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+*/
+
+ static const u_char __Xilinx7OD[] = {
+ 0xFF, 0x04, 0xA0, 0x36, 0xF3, 0xEC, 0xFF, 0xFF, 0xFF, 0xDF, 0xFB, 0xFF,
+ 0xF3, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0x3F, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x7F, 0xFE, 0xFF,
+ 0xCE, 0xFE, 0xFE, 0xFE,
+ 0xFE, 0xDE, 0xBD, 0xDD, 0xFD, 0xFF, 0xFD, 0xCF, 0xF7, 0xBF, 0x7F, 0xFF,
+ 0x7F, 0x3F, 0xFE, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xBC, 0xFF, 0xFF, 0xBD, 0xB5, 0x7F, 0x7F, 0xBF, 0xBF,
+ 0x7F, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDE,
+ 0xFE, 0xFE, 0xFA, 0xDE,
+ 0xBD, 0xFD, 0xED, 0xFD, 0xFD, 0xCF, 0xEF, 0xEF, 0xEF, 0xEF, 0xC7, 0xDF,
+ 0xDF, 0xDF, 0xDF, 0xDF,
+ 0xFF, 0x7E, 0xFE, 0xFD, 0x7D, 0x6D, 0xEE, 0xFE, 0x7C, 0xFB, 0xF4, 0xFB,
+ 0xCF, 0xDB, 0xDF, 0xFF,
+ 0xFF, 0xBB, 0x7F, 0xFF, 0x7F, 0xFF, 0xF7, 0xFF, 0x9E, 0xBF, 0x3B, 0xBF,
+ 0xBF, 0x7F, 0x7F, 0x7F,
+ 0x7E, 0x6F, 0xDF, 0xEF, 0xF5, 0xF6, 0xFD, 0xF6, 0xF5, 0xED, 0xEB, 0xFF,
+ 0xEF, 0xEF, 0xEF, 0x7E,
+ 0x7F, 0x7F, 0x6F, 0x7F, 0xFF, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xEF, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0x1F, 0x1F, 0xEE, 0xFF, 0xBC,
+ 0xB7, 0xFF, 0xDF, 0xFF,
+ 0xDF, 0xEF, 0x3B, 0xE3, 0xD3, 0xFF, 0xFB, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF,
+ 0xFF, 0xBA, 0xBF, 0x2D,
+ 0xDB, 0xBD, 0xFD, 0xDB, 0xDF, 0xFA, 0xFB, 0xFF, 0xEF, 0xFB, 0xDB, 0xF3,
+ 0xFF, 0xDF, 0xFD, 0x7F,
+ 0xEF, 0xFB, 0xFF, 0xFF, 0xBE, 0xBF, 0x27, 0xBA, 0xFE, 0xFB, 0xDF, 0xFF,
+ 0xF6, 0xFF, 0xFF, 0xEF,
+ 0xFB, 0xDB, 0xF3, 0xD9, 0x9A, 0x3F, 0xFF, 0xAF, 0xBF, 0xFF, 0xFF, 0xBE,
+ 0x3F, 0x37, 0xBD, 0x96,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE, 0xFB, 0xF3, 0xF3, 0xEB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xFA, 0xBC, 0xAE, 0xFE, 0xBE, 0xFE, 0xBB, 0x7F, 0xFD, 0xFF,
+ 0x7F, 0xEF, 0xF7, 0xFB,
+ 0xBB, 0xD7, 0xF7, 0x7F, 0xFF, 0xF7, 0xFF, 0xFF, 0xF7, 0xBC, 0xED, 0xFD,
+ 0xBD, 0x9D, 0x7D, 0x7B,
+ 0xFB, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFE, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xAA, 0xB9, 0xBF, 0x8F, 0xBF, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0x7F, 0xCF,
+ 0xFB, 0xEB, 0xCB, 0xEB,
+ 0xEE, 0xFF, 0xFF, 0xD7, 0xFF, 0xFF, 0xFF, 0x3E, 0x33, 0x3F, 0x1C, 0x7C,
+ 0xFC, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xCF, 0xD3, 0xF3, 0xE3, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEB, 0xFE, 0x35,
+ 0x3F, 0x3D, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xEF, 0x6F, 0xE3,
+ 0xE3, 0xE3, 0xEF, 0xFF,
+ 0xFF, 0xDF, 0xFF, 0xFF, 0xF7, 0xFE, 0x3E, 0x5E, 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFD, 0xFF, 0xFF,
+ 0xAF, 0xCF, 0xF2, 0xCB, 0xCF, 0x8E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD,
+ 0xFC, 0x3E, 0x1F, 0x9E,
+ 0xAD, 0xFD, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xEF, 0xFF, 0xB3, 0xF7, 0xE7,
+ 0xF7, 0xFA, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEE, 0xEB, 0xAB, 0xAF, 0x9F, 0xE3, 0x7F, 0xFF, 0xDE,
+ 0xFF, 0x7F, 0xEE, 0xFF,
+ 0xFF, 0xFB, 0x3A, 0xFA, 0xFF, 0xF2, 0x77, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF,
+ 0xFE, 0xBD, 0xAE, 0xDE,
+ 0x7D, 0x7D, 0xFD, 0xFF, 0xBF, 0xEE, 0xFF, 0xFD, 0xFF, 0xDB, 0xFB, 0xFF,
+ 0xF7, 0xEF, 0xFB, 0xFF,
+ 0xFF, 0xFE, 0xFF, 0x2D, 0xAF, 0xB9, 0xFD, 0x79, 0xFB, 0xFA, 0xFF, 0xBF,
+ 0xEF, 0xFF, 0xFF, 0x91,
+ 0xFA, 0xFB, 0xDF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFC, 0xCF, 0x37, 0xBF,
+ 0xBF, 0xFF, 0x7F, 0x7F,
+ 0xFF, 0xFF, 0xFF, 0xAF, 0xFF, 0xFF, 0xF3, 0xFB, 0xFB, 0xFF, 0xF5, 0xEF,
+ 0xFF, 0xFF, 0xF7, 0xFA,
+ 0xFF, 0xFF, 0xEE, 0xFA, 0xFE, 0xFB, 0x55, 0xDD, 0xFF, 0x7F, 0xAF, 0xFE,
+ 0xFF, 0xFB, 0xFB, 0xF5,
+ 0xFF, 0xF7, 0xEF, 0xFF, 0xFF, 0xFF, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D,
+ 0x7B, 0x7B, 0x7B, 0x7B,
+ 0xFB, 0xAE, 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xDA, 0xB7, 0x61,
+ 0xFF, 0xB9, 0x59, 0xF3, 0x73, 0xF3, 0xDF, 0x7F, 0x6F, 0xDF, 0xEF, 0xF7,
+ 0xEB, 0xEB, 0xD7, 0xFF,
+ 0xD7, 0xFF, 0xFF, 0xF7, 0xFE, 0x7F, 0xFB, 0x3E, 0x38, 0x73, 0xF6, 0x7F,
+ 0xFC, 0xFF, 0xFF, 0xCF,
+ 0xFF, 0xB7, 0xFB, 0xB3, 0xB3, 0x67, 0xFF, 0xE7, 0xFD, 0xFF, 0xEF, 0xF6,
+ 0x7F, 0xB7, 0xBC, 0xF5,
+ 0x7B, 0xF6, 0xF7, 0xF5, 0xFF, 0xFF, 0xEF, 0xFF, 0xF7, 0xFF, 0xF7, 0xCE,
+ 0xE7, 0xFF, 0x9F, 0xFF,
+ 0xFF, 0xF5, 0xFE, 0x7D, 0xFF, 0x5F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xFF, 0xF6,
+ 0xCB, 0xDB, 0xEE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFE, 0x7F, 0xBE,
+ 0x1E, 0x3E, 0xFE, 0xFF,
+ 0x7D, 0xFE, 0xFF, 0xFF, 0xEF, 0xBF, 0xE7, 0xFF, 0xE3, 0xE3, 0xFF, 0xDF,
+ 0xE7, 0xFF, 0xFF, 0xFF,
+ 0xB8, 0xEF, 0xB7, 0x2F, 0xEE, 0xFF, 0xDF, 0xFF, 0xBF, 0xFF, 0x7F, 0xEF,
+ 0xEB, 0xBF, 0xA3, 0xD3,
+ 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xFD, 0x3F, 0xCF, 0xFD,
+ 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xFB, 0xBF, 0xBB, 0xBF, 0xDB, 0xFD, 0xFB, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x3E, 0xFE,
+ 0x3F, 0xBA, 0xBA, 0xFE, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xEF, 0xC3, 0x7F,
+ 0xB2, 0x9B, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0x3C, 0xFF, 0x3F, 0x3C, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xAF, 0xF3, 0xFE, 0xF3, 0xE3, 0xEB, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xF7,
+ 0x9A, 0xFE, 0xAF, 0x9E,
+ 0xBE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0x7B, 0xEF, 0xF7, 0xBF, 0xFB, 0xFB,
+ 0xFB, 0xFF, 0xFF, 0x7F,
+ 0xFF, 0xFF, 0xFF, 0xBC, 0xBD, 0xFD, 0xBD, 0xDD, 0x7D, 0x7B, 0x7B, 0x7B,
+ 0x7B, 0xFB, 0xAE, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xF7, 0x9A, 0xFF,
+ 0x9F, 0xFF, 0xAF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xCF, 0xF3, 0xFF, 0xEB, 0xFF, 0xEB, 0xFF,
+ 0xFF, 0xBF, 0xFF, 0xFF,
+ 0xEF, 0xFE, 0xFF, 0x37, 0xFC, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xCF, 0xEF, 0xFD, 0xF3,
+ 0xFF, 0xEE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0xFD, 0x2F, 0xFD,
+ 0xFF, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF, 0xCF, 0xFF, 0xF3, 0xBF, 0x69, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE,
+ 0xFB, 0x9F, 0xFF, 0xBF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x87,
+ 0xFE, 0xDA, 0xEF, 0xCF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xBF, 0xEF, 0xEF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xFD, 0xFF, 0x7B, 0xFF, 0xEB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEB, 0xF8, 0xFF, 0xEF,
+ 0xAF, 0xFF, 0xFF, 0xBD, 0xFF, 0xFF, 0xFF, 0x7F, 0xEE, 0x7F, 0xEF, 0xFF,
+ 0xBB, 0xFF, 0xBF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xF7, 0xF6, 0xFB, 0xBD, 0xFD, 0xDD, 0xF5, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xAF,
+ 0xFF, 0x5F, 0xF5, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF6,
+ 0xF3, 0xFF, 0xDE, 0xFE,
+ 0xEF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xDE, 0xDF, 0x5F, 0xDF,
+ 0xFD, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xAF, 0xFF, 0xFF,
+ 0xEF, 0xED, 0xFF, 0xDF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xDA, 0xBD, 0xBE,
+ 0xAE, 0xFE, 0x7F, 0xFD,
+ 0xDF, 0xFF, 0xFF, 0x7F, 0xEF, 0xFF, 0xFB, 0xFB, 0xFB, 0x7F, 0xF7, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xBC, 0xFD, 0xBD, 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE,
+ 0xFF, 0xFF, 0xFD, 0xFF,
+ 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x9F, 0xBF, 0xBF, 0xCF,
+ 0x7F, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xFF, 0xEB, 0xEB, 0xEB, 0xFF, 0xD7, 0xFE, 0xFF, 0xFF,
+ 0xBF, 0xE7, 0xFE, 0xBF,
+ 0x7F, 0xFC, 0xFF, 0xFF, 0xED, 0xFF, 0xFF, 0xFF, 0xFF, 0x4F, 0xFF, 0xFB,
+ 0xFB, 0xFF, 0xFF, 0xDD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBD, 0xDF, 0x9D, 0xFD, 0xDF, 0xB9,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFB, 0xEF, 0xEB, 0xFF, 0xDE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF6, 0x9F, 0xFF, 0xFC,
+ 0xFE, 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xDF, 0xFA, 0xCD, 0xCF,
+ 0xBF, 0x9F, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xFE, 0xBF, 0xFF, 0xDF, 0xEF, 0x5F, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0x6F, 0xFF,
+ 0xBB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7E, 0xFF,
+ 0x5F, 0xFF, 0xBF, 0xBF,
+ 0xF9, 0xFF, 0xFF, 0xFF, 0x7F, 0x6E, 0x7B, 0xFF, 0xEF, 0xFD, 0xEB, 0xDF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xB6, 0x3E, 0xFC, 0xFD, 0xBF, 0x7E, 0xFB, 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xEF, 0xF7, 0xF3, 0xF7,
+ 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0x35, 0x79, 0xFF,
+ 0xBF, 0xFC, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF, 0xFB, 0x53, 0xDF, 0xFF, 0xEB, 0xBF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xBC,
+ 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xF5,
+ 0xFF, 0xF7, 0xFF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xAA, 0xEE, 0xFE, 0x3F, 0x7D,
+ 0xFD, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xAF, 0x77, 0xFB, 0xFB, 0xFF, 0xFB, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xBE, 0xBD, 0xBD,
+ 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFC,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x9A, 0xD9, 0xB8, 0xFF, 0xFF, 0x79, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xCF,
+ 0xFB, 0xFF, 0xEB, 0xFF, 0xEB, 0xD7, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xDE,
+ 0xF8, 0xFB, 0xFE, 0x3F,
+ 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xAD, 0xBF, 0xFA, 0xFF, 0x73,
+ 0xDF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x3A, 0xF5, 0xB7, 0xFC, 0x3F, 0xF9, 0xFD, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xEF, 0xF3, 0xFF,
+ 0xBF, 0xFE, 0xF3, 0x9F, 0xFE, 0xFF, 0xFF, 0xFF, 0xF7, 0x3E, 0xFF, 0xFF,
+ 0xFF, 0xBF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xD3, 0xFE, 0xDB, 0xFF, 0xDB, 0xDF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x3E, 0xFF, 0xBF, 0xFF, 0x7F, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x8F,
+ 0xF3, 0xFF, 0xED, 0xFF,
+ 0xF7, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF6, 0x3C, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x9F, 0xEF, 0xEF, 0xD1, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x7E, 0xBF,
+ 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBB, 0xEF, 0xDF, 0xF1,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE, 0x3E, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xBF,
+ 0xEF, 0xFD, 0xC3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFC, 0x3E, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x2E, 0xEF, 0xF3, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xAF, 0xFB,
+ 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xF2, 0xD6, 0xED,
+ 0xBD, 0xBD, 0xBD, 0x7D,
+ 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x92, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xAF, 0xEB, 0xEB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFE, 0x2E, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x4F, 0xEF, 0xF3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE,
+ 0x3C, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xCE,
+ 0xC3, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x5D, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xCF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xEE, 0x3E, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xDF, 0xE2, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF6, 0xBE, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x7F, 0xEE,
+ 0x5F, 0xE6, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3E,
+ 0x7D, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xF7, 0x36, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xD3, 0xF6,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x7F, 0xEE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xEF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE,
+ 0xFB, 0xFA, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xD6, 0xFD, 0xBD, 0xBD, 0xBD,
+ 0x7D, 0x7B, 0x7B, 0x7B,
+ 0x7B, 0xFB, 0xAE, 0xFF, 0x7E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xBA, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xEB, 0x6B,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x4F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0x3E, 0x6E, 0xFC, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xC3, 0xC9, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x3E, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFB,
+ 0xD5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFE,
+ 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6F, 0xEF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF6, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE,
+ 0xEF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0xFE, 0xFF, 0xF7, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xFA, 0xEF, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xFF, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE, 0xEF, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xA7, 0xFF, 0xFC, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xFE, 0xAE, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7,
+ 0xF7, 0xFA, 0xFF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B,
+ 0x7B, 0x7B, 0xFB, 0xAF,
+ 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCA,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x6F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xCF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xF2, 0xFC,
+ 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAE, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7E, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFE, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xEF, 0xDD, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFA, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF6, 0x9C, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB,
+ 0xAE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7A, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xDF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x6F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xFE,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xEB,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x9E, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xCB, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xBE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xEF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFB, 0xAF, 0x7F, 0xFF,
+ 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xBC, 0xBD,
+ 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xAF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFE, 0xFF, 0x9F, 0x9F,
+ 0x9F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0xFF, 0xEF, 0xDF, 0xDF, 0xDF, 0xDF, 0xCF, 0xB7, 0xBF, 0xBF,
+ 0xBF, 0xBF, 0xFF, 0xBC,
+ 0xB9, 0x9D, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xEF, 0xD7,
+ 0xF5, 0xF3, 0xF1, 0xD1,
+ 0x65, 0xE3, 0xE3, 0xE3, 0xA3, 0xFF, 0xFE, 0x7F, 0xFE, 0xDE, 0xDE, 0xFF,
+ 0xBD, 0xBD, 0xBD, 0xBD,
+ 0xDF, 0xEF, 0xFB, 0xF7, 0xF3, 0xF3, 0xF3, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
+ 0xFB, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+
+ };
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
new file mode 100644
index 000000000000..b0126304ca08
--- /dev/null
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -0,0 +1,1659 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for NS8390-based cards
+
+ This driver supports the D-Link DE-650 and Linksys EthernetCard
+ cards, the newer D-Link and Linksys combo cards, Accton EN2212
+ cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory
+ mode, and the IBM Credit Card Adapter, the NE4100, the Thomas
+ Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory
+ mode. It will also handle the Socket EA card in either mode.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ pcnet_cs.c 1.153 2003/11/09 18:53:09
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+ Based also on Keith Moore's changes to Don Becker's code, for IBM
+ CCAE support. Drivers merged back together, and shared-memory
+ Socket EA support added, by Ken Raeburn, September 1995.
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <../drivers/net/8390.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define PCNET_CMD 0x00
+#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */
+
+#define PCNET_START_PG 0x40 /* First page of TX buffer */
+#define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* Socket EA cards have a larger packet buffer */
+#define SOCKET_START_PG 0x01
+#define SOCKET_STOP_PG 0xff
+
+#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
+
+static char *if_names[] = { "auto", "10baseT", "10base2"};
+
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"pcnet_cs.c 1.153 2003/11/09 18:53:09 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+INT_MODULE_PARM(if_port, 1); /* Transceiver type */
+INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */
+INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */
+INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */
+INT_MODULE_PARM(delay_time, 4); /* in usec */
+INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */
+INT_MODULE_PARM(full_duplex, 0); /* full duplex? */
+
+/* Ugh! Let the user hardwire the hardware address for queer cards */
+static int hw_addr[6] = { 0, /* ... */ };
+module_param_array(hw_addr, int, NULL, 0);
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev);
+static void pcnet_config(dev_link_t *link);
+static void pcnet_release(dev_link_t *link);
+static int pcnet_event(event_t event, int priority,
+ event_callback_args_t *args);
+static int pcnet_open(struct net_device *dev);
+static int pcnet_close(struct net_device *dev);
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
+static void ei_watchdog(u_long arg);
+static void pcnet_reset_8390(struct net_device *dev);
+static int set_config(struct net_device *dev, struct ifmap *map);
+static int setup_shmem_window(dev_link_t *link, int start_pg,
+ int stop_pg, int cm_offset);
+static int setup_dma_config(dev_link_t *link, int start_pg,
+ int stop_pg);
+
+static dev_link_t *pcnet_attach(void);
+static void pcnet_detach(dev_link_t *);
+
+static dev_info_t dev_info = "pcnet_cs";
+static dev_link_t *dev_list;
+
+/*====================================================================*/
+
+typedef struct hw_info_t {
+ u_int offset;
+ u_char a0, a1, a2;
+ u_int flags;
+} hw_info_t;
+
+#define DELAY_OUTPUT 0x01
+#define HAS_MISC_REG 0x02
+#define USE_BIG_BUF 0x04
+#define HAS_IBM_MISC 0x08
+#define IS_DL10019 0x10
+#define IS_DL10022 0x20
+#define HAS_MII 0x40
+#define USE_SHMEM 0x80 /* autodetected */
+
+#define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */
+#define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */
+#define MII_PHYID_REV_MASK 0xfffffff0
+#define MII_PHYID_REG1 0x02
+#define MII_PHYID_REG2 0x03
+
+static hw_info_t hw_info[] = {
+ { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
+ { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
+ { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
+ { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
+ DELAY_OUTPUT | HAS_IBM_MISC },
+ { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 },
+ { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 },
+ { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 },
+ { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 },
+ { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 },
+ { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 },
+ { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 },
+ { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 },
+ { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 },
+ { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 },
+ { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 },
+ { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 },
+ { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
+ { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
+ { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
+ { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 },
+ { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b,
+ DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF },
+ { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 },
+ { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 },
+ { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 },
+ { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 },
+ { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 }
+};
+
+#define NR_INFO (sizeof(hw_info)/sizeof(hw_info_t))
+
+static hw_info_t default_info = { 0, 0, 0, 0, 0 };
+static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII };
+static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
+
+typedef struct pcnet_dev_t {
+ dev_link_t link;
+ dev_node_t node;
+ u_int flags;
+ void __iomem *base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_char phy_id;
+ u_char eth_phy, pna_phy;
+ u_short link_status;
+ u_long mii_reset;
+} pcnet_dev_t;
+
+static inline pcnet_dev_t *PRIV(struct net_device *dev)
+{
+ char *p = netdev_priv(dev);
+ return (pcnet_dev_t *)(p + sizeof(struct ei_device));
+}
+
+/*======================================================================
+
+ pcnet_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *pcnet_attach(void)
+{
+ pcnet_dev_t *info;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int ret;
+
+ DEBUG(0, "pcnet_attach()\n");
+
+ /* Create new ethernet device */
+ dev = __alloc_ei_netdev(sizeof(pcnet_dev_t));
+ if (!dev) return NULL;
+ info = PRIV(dev);
+ link = &info->link;
+ link->priv = dev;
+
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ SET_MODULE_OWNER(dev);
+ dev->open = &pcnet_open;
+ dev->stop = &pcnet_close;
+ dev->set_config = &set_config;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &pcnet_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(link->handle, RegisterClient, ret);
+ pcnet_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* pcnet_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void pcnet_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "pcnet_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ if (link->state & DEV_CONFIG)
+ pcnet_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* pcnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address, for card types that
+ encode this information in their CIS.
+
+======================================================================*/
+
+static hw_info_t *get_hwinfo(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ win_req_t req;
+ memreq_t mem;
+ u_char __iomem *base, *virt;
+ int i, j;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ i = pcmcia_request_window(&link->handle, &req, &link->win);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return NULL;
+ }
+
+ virt = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ for (i = 0; i < NR_INFO; i++) {
+ mem.CardOffset = hw_info[i].offset & ~(req.Size-1);
+ pcmcia_map_mem_page(link->win, &mem);
+ base = &virt[hw_info[i].offset & (req.Size-1)];
+ if ((readb(base+0) == hw_info[i].a0) &&
+ (readb(base+2) == hw_info[i].a1) &&
+ (readb(base+4) == hw_info[i].a2))
+ break;
+ }
+ if (i < NR_INFO) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = readb(base + (j<<1));
+ }
+
+ iounmap(virt);
+ j = pcmcia_release_window(link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return (i < NR_INFO) ? hw_info+i : NULL;
+} /* get_hwinfo */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+ It checks the address against a list of known types, then falls
+ back to a simple NE2000 clone signature check.
+
+======================================================================*/
+
+static hw_info_t *get_prom(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ kio_addr_t ioaddr = dev->base_addr;
+ u_char prom[32];
+ int i, j;
+
+ /* This is lifted straight from drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ pcnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 32; i++)
+ prom[i] = inb(ioaddr + PCNET_DATAPORT);
+ for (i = 0; i < NR_INFO; i++) {
+ if ((prom[0] == hw_info[i].a0) &&
+ (prom[2] == hw_info[i].a1) &&
+ (prom[4] == hw_info[i].a2))
+ break;
+ }
+ if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = prom[j<<1];
+ return (i < NR_INFO) ? hw_info+i : &default_info;
+ }
+ return NULL;
+} /* get_prom */
+
+/*======================================================================
+
+ For DL10019 based cards, like the Linksys EtherFast
+
+======================================================================*/
+
+static hw_info_t *get_dl10019(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+ u_char sum;
+
+ for (sum = 0, i = 0x14; i < 0x1c; i++)
+ sum += inb_p(dev->base_addr + i);
+ if (sum != 0xff)
+ return NULL;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ i = inb(dev->base_addr + 0x1f);
+ return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
+}
+
+/*======================================================================
+
+ For Asix AX88190 based cards
+
+======================================================================*/
+
+static hw_info_t *get_ax88190(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ kio_addr_t ioaddr = dev->base_addr;
+ int i, j;
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->conf.ConfigBase != 0x03c0)
+ return NULL;
+
+ outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */
+ outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */
+ outb_p(0x04, ioaddr + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + PCNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ printk(KERN_NOTICE "pcnet_cs: this is an AX88190 card!\n");
+ printk(KERN_NOTICE "pcnet_cs: use axnet_cs instead.\n");
+ return NULL;
+}
+
+/*======================================================================
+
+ This should be totally unnecessary... but when we can't figure
+ out the hardware address any other way, we'll let the user hard
+ wire it when the module is initialized.
+
+======================================================================*/
+
+static hw_info_t *get_hwired(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (hw_addr[i] != 0) break;
+ if (i == 6)
+ return NULL;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = hw_addr[i];
+
+ return &default_info;
+} /* get_hwired */
+
+/*======================================================================
+
+ pcnet_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static int try_io_port(dev_link_t *link)
+{
+ int j, ret;
+ if (link->io.NumPorts1 == 32) {
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (link->io.NumPorts2 > 0) {
+ /* for master/slave multifunction cards */
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ }
+ } else {
+ /* This should be two 16-port windows */
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->io.BasePort1 == 0) {
+ link->io.IOAddrLines = 16;
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ link->io.BasePort2 = (j ^ 0x300) + 0x10;
+ ret = pcmcia_request_io(link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+ } else {
+ return pcmcia_request_io(link->handle, &link->io);
+ }
+}
+
+static void pcnet_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ pcnet_dev_t *info = PRIV(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
+ int manfid = 0, prodid = 0, has_shmem = 0;
+ u_short buf[64];
+ config_info_t conf;
+ hw_info_t *hw_info;
+
+ DEBUG(0, "pcnet_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up current Vcc */
+ CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
+ link->conf.Vcc = conf.Vcc;
+
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) &&
+ (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) {
+ manfid = le16_to_cpu(buf[0]);
+ prodid = le16_to_cpu(buf[1]);
+ }
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ tuple.Attributes = 0;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ while (last_ret == CS_SUCCESS) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_io_t *io = &(parse.cftable_entry.io);
+
+ if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
+ pcmcia_parse_tuple(handle, &tuple, &parse) != 0 ||
+ cfg->index == 0 || cfg->io.nwin == 0)
+ goto next_entry;
+
+ link->conf.ConfigIndex = cfg->index;
+ /* For multifunction cards, by convention, we configure the
+ network function with window 0, and serial with window 1 */
+ if (io->nwin > 1) {
+ i = (io->win[1].len > io->win[0].len);
+ link->io.BasePort2 = io->win[1-i].base;
+ link->io.NumPorts2 = io->win[1-i].len;
+ } else {
+ i = link->io.NumPorts2 = 0;
+ }
+ has_shmem = ((cfg->mem.nwin == 1) &&
+ (cfg->mem.win[0].len >= 0x4000));
+ link->io.BasePort1 = io->win[i].base;
+ link->io.NumPorts1 = io->win[i].len;
+ link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ if (link->io.NumPorts1 + link->io.NumPorts2 >= 32) {
+ last_ret = try_io_port(link);
+ if (last_ret == CS_SUCCESS) break;
+ }
+ next_entry:
+ last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ }
+ if (last_ret != CS_SUCCESS) {
+ cs_error(handle, RequestIO, last_ret);
+ goto failed;
+ }
+
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+
+ if (link->io.NumPorts2 == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+ if ((manfid == MANFID_IBM) &&
+ (prodid == PRODID_IBM_HOME_AND_AWAY))
+ link->conf.ConfigIndex |= 0x10;
+
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (info->flags & HAS_MISC_REG) {
+ if ((if_port == 1) || (if_port == 2))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
+ } else {
+ dev->if_port = 0;
+ }
+
+ hw_info = get_hwinfo(link);
+ if (hw_info == NULL)
+ hw_info = get_prom(link);
+ if (hw_info == NULL)
+ hw_info = get_dl10019(link);
+ if (hw_info == NULL)
+ hw_info = get_ax88190(link);
+ if (hw_info == NULL)
+ hw_info = get_hwired(link);
+
+ if (hw_info == NULL) {
+ printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
+ " address for io base %#3lx\n", dev->base_addr);
+ goto failed;
+ }
+
+ info->flags = hw_info->flags;
+ /* Check for user overrides */
+ info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
+ if ((manfid == MANFID_SOCKET) &&
+ ((prodid == PRODID_SOCKET_LPE) ||
+ (prodid == PRODID_SOCKET_LPE_CF) ||
+ (prodid == PRODID_SOCKET_EIO)))
+ info->flags &= ~USE_BIG_BUF;
+ if (!use_big_buf)
+ info->flags &= ~USE_BIG_BUF;
+
+ if (info->flags & USE_BIG_BUF) {
+ start_pg = SOCKET_START_PG;
+ stop_pg = SOCKET_STOP_PG;
+ cm_offset = 0x10000;
+ } else {
+ start_pg = PCNET_START_PG;
+ stop_pg = PCNET_STOP_PG;
+ cm_offset = 0;
+ }
+
+ /* has_shmem is ignored if use_shmem != -1 */
+ if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) ||
+ (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0))
+ setup_dma_config(link, start_pg, stop_pg);
+
+ ei_status.name = "NE2000";
+ ei_status.word16 = 1;
+ ei_status.reset_8390 = &pcnet_reset_8390;
+
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+
+ if (info->flags & (IS_DL10019|IS_DL10022)) {
+ u_char id = inb(dev->base_addr + 0x1a);
+ dev->do_ioctl = &ei_ioctl;
+ mii_phy_probe(dev);
+ if ((id == 0x30) && !info->pna_phy && (info->eth_phy == 4))
+ info->eth_phy = 0;
+ }
+
+ link->dev = &info->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto failed;
+ }
+
+ strcpy(info->node.dev_name, dev->name);
+
+ if (info->flags & (IS_DL10019|IS_DL10022)) {
+ u_char id = inb(dev->base_addr + 0x1a);
+ printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
+ dev->name, ((info->flags & IS_DL10022) ? 22 : 19), id);
+ if (info->pna_phy)
+ printk("PNA, ");
+ } else {
+ printk(KERN_INFO "%s: NE2000 Compatible: ", dev->name);
+ }
+ printk("io %#3lx, irq %d,", dev->base_addr, dev->irq);
+ if (info->flags & USE_SHMEM)
+ printk (" mem %#5lx,", dev->mem_start);
+ if (info->flags & HAS_MISC_REG)
+ printk(" %s xcvr,", if_names[dev->if_port]);
+ printk(" hw_addr ");
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ pcnet_release(link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+} /* pcnet_config */
+
+/*======================================================================
+
+ After a card is removed, pcnet_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void pcnet_release(dev_link_t *link)
+{
+ pcnet_dev_t *info = PRIV(link->priv);
+
+ DEBUG(0, "pcnet_release(0x%p)\n", link);
+
+ if (info->flags & USE_SHMEM) {
+ iounmap(info->base);
+ pcmcia_release_window(link->win);
+ }
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int pcnet_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(2, "pcnet_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ pcnet_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* pcnet_event */
+
+/*======================================================================
+
+ MII interface support for DL10019 and DL10022 based cards
+
+ On the DL10019, the MII IO direction bit is 0x10; on the DL10022
+ it is 0x20. Setting both bits seems to work on both card types.
+
+======================================================================*/
+
+#define DLINK_GPIO 0x1c
+#define DLINK_DIAG 0x1d
+#define DLINK_EEPROM 0x1e
+
+#define MDIO_SHIFT_CLK 0x80
+#define MDIO_DATA_OUT 0x40
+#define MDIO_DIR_WRITE 0x30
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x10
+#define MDIO_MASK 0x0f
+
+static void mdio_sync(kio_addr_t addr)
+{
+ int bits, mask = inb(addr) & MDIO_MASK;
+ for (bits = 0; bits < 32; bits++) {
+ outb(mask | MDIO_DATA_WRITE1, addr);
+ outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(kio_addr_t addr, int phy_id, int loc)
+{
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(mask, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(mask, addr);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static void mdio_reset(kio_addr_t addr, int phy_id)
+{
+ outb_p(0x08, addr);
+ outb_p(0x0c, addr);
+ outb_p(0x08, addr);
+ outb_p(0x0c, addr);
+ outb_p(0x00, addr);
+}
+
+/*======================================================================
+
+ EEPROM access routines for DL10019 and DL10022 based cards
+
+======================================================================*/
+
+#define EE_EEP 0x40
+#define EE_ASIC 0x10
+#define EE_CS 0x08
+#define EE_CK 0x04
+#define EE_DO 0x02
+#define EE_DI 0x01
+#define EE_ADOT 0x01 /* DataOut for ASIC */
+#define EE_READ_CMD 0x06
+
+#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */
+
+static int read_eeprom(kio_addr_t ioaddr, int location)
+{
+ int i, retval = 0;
+ kio_addr_t ee_addr = ioaddr + DLINK_EEPROM;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ outb(0, ee_addr);
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_EEP|EE_CS|dataval, ee_addr);
+ outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr);
+ }
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb_p(EE_EEP|EE_CS | EE_CK, ee_addr);
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0);
+ outb_p(EE_EEP|EE_CS, ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(0, ee_addr);
+ return retval;
+}
+
+/*
+ The internal ASIC registers can be changed by EEPROM READ access
+ with EE_ASIC bit set.
+ In ASIC mode, EE_ADOT is used to output the data to the ASIC.
+*/
+
+static void write_asic(kio_addr_t ioaddr, int location, short asic_data)
+{
+ int i;
+ kio_addr_t ee_addr = ioaddr + DLINK_EEPROM;
+ short dataval;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ asic_data |= read_eeprom(ioaddr, location);
+
+ outb(0, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_DI, ee_addr);
+
+ read_cmd = read_cmd >> 1;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ }
+ // sync
+ outb(EE_ASIC|EE_CS, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_CK, ee_addr);
+ outb(EE_ASIC|EE_CS, ee_addr);
+
+ for (i = 15; i >= 0; i--) {
+ dataval = (asic_data & (1 << i)) ? EE_ADOT : 0;
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ }
+
+ /* Terminate the ASIC access. */
+ outb(EE_ASIC|EE_DI, ee_addr);
+ outb(EE_ASIC|EE_DI| EE_CK, ee_addr);
+ outb(EE_ASIC|EE_DI, ee_addr);
+
+ outb(0, ee_addr);
+}
+
+/*====================================================================*/
+
+static void set_misc_reg(struct net_device *dev)
+{
+ kio_addr_t nic_base = dev->base_addr;
+ pcnet_dev_t *info = PRIV(dev);
+ u_char tmp;
+
+ if (info->flags & HAS_MISC_REG) {
+ tmp = inb_p(nic_base + PCNET_MISC) & ~3;
+ if (dev->if_port == 2)
+ tmp |= 1;
+ if (info->flags & USE_BIG_BUF)
+ tmp |= 2;
+ if (info->flags & HAS_IBM_MISC)
+ tmp |= 8;
+ outb_p(tmp, nic_base + PCNET_MISC);
+ }
+ if (info->flags & IS_DL10022) {
+ if (info->flags & HAS_MII) {
+ mdio_reset(nic_base + DLINK_GPIO, info->eth_phy);
+ /* Restart MII autonegotiation */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
+ info->mii_reset = jiffies;
+ } else {
+ outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG);
+ }
+ }
+}
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ int i;
+ u_int tmp, phyid;
+
+ for (i = 31; i >= 0; i--) {
+ tmp = mdio_read(mii_addr, i, 1);
+ if ((tmp == 0) || (tmp == 0xffff))
+ continue;
+ tmp = mdio_read(mii_addr, i, MII_PHYID_REG1);
+ phyid = tmp << 16;
+ phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
+ phyid &= MII_PHYID_REV_MASK;
+ DEBUG(0, "%s: MII at %d is 0x%08x\n", dev->name, i, phyid);
+ if (phyid == AM79C9XX_HOME_PHY) {
+ info->pna_phy = i;
+ } else if (phyid != AM79C9XX_ETH_PHY) {
+ info->eth_phy = i;
+ }
+ }
+}
+
+static int pcnet_open(struct net_device *dev)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "pcnet_open('%s')\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+
+ set_misc_reg(dev);
+ request_irq(dev->irq, ei_irq_wrapper, SA_SHIRQ, dev_info, dev);
+
+ info->phy_id = info->eth_phy;
+ info->link_status = 0x00;
+ init_timer(&info->watchdog);
+ info->watchdog.function = &ei_watchdog;
+ info->watchdog.data = (u_long)dev;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ei_open(dev);
+} /* pcnet_open */
+
+/*====================================================================*/
+
+static int pcnet_close(struct net_device *dev)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "pcnet_close('%s')\n", dev->name);
+
+ ei_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&info->watchdog);
+
+ return 0;
+} /* pcnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void pcnet_reset_8390(struct net_device *dev)
+{
+ kio_addr_t nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
+ dev->name);
+ set_misc_reg(dev);
+
+} /* pcnet_reset_8390 */
+
+/*====================================================================*/
+
+static int set_config(struct net_device *dev, struct ifmap *map)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (!(info->flags & HAS_MISC_REG))
+ return -EOPNOTSUPP;
+ else if ((map->port < 1) || (map->port > 2))
+ return -EINVAL;
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ NS8390_init(dev, 1);
+ }
+ return 0;
+}
+
+/*====================================================================*/
+
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ pcnet_dev_t *info = PRIV(dev);
+ irqreturn_t ret = ei_interrupt(irq, dev_id, regs);
+
+ if (ret == IRQ_HANDLED)
+ info->stale = 0;
+ return ret;
+}
+
+static void ei_watchdog(u_long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ pcnet_dev_t *info = PRIV(dev);
+ kio_addr_t nic_base = dev->base_addr;
+ kio_addr_t mii_addr = nic_base + DLINK_GPIO;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ outb_p(E8390_NODMA+E8390_PAGE0, nic_base + E8390_CMD);
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ ei_irq_wrapper(dev->irq, dev, NULL);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (!(info->flags & HAS_MII))
+ goto reschedule;
+
+ mdio_read(mii_addr, info->phy_id, 1);
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ if (info->eth_phy) {
+ info->phy_id = info->eth_phy = 0;
+ } else {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ info->flags &= ~HAS_MII;
+ }
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ if (link && (info->flags & IS_DL10022)) {
+ /* Disable collision detection on full duplex links */
+ outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
+ } else if (link && (info->flags & IS_DL10019)) {
+ /* Disable collision detection on full duplex links */
+ write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0);
+ }
+ if (link) {
+ if (info->phy_id == info->eth_phy) {
+ if (p)
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
+ else
+ printk(KERN_INFO "%s: link partner did not "
+ "autonegotiate\n", dev->name);
+ }
+ NS8390_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+ if (info->pna_phy && time_after(jiffies, info->mii_reset + 6*HZ)) {
+ link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004;
+ if (((info->phy_id == info->pna_phy) && link) ||
+ ((info->phy_id != info->pna_phy) && !link)) {
+ /* isolate this MII and try flipping to the other one */
+ mdio_write(mii_addr, info->phy_id, 0, 0x0400);
+ info->phy_id ^= info->pna_phy ^ info->eth_phy;
+ printk(KERN_INFO "%s: switched to %s transceiver\n", dev->name,
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ mdio_write(mii_addr, info->phy_id, 0,
+ (info->phy_id == info->eth_phy) ? 0x1000 : 0);
+ info->link_status = 0;
+ info->mii_reset = jiffies;
+ }
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+/*====================================================================*/
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "pcnet_cs");
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/*====================================================================*/
+
+
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ u16 *data = (u16 *)&rq->ifr_ifru;
+ kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data[0] = info->phy_id;
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data[3] = mdio_read(mii_addr, data[0], data[1] & 0x1f);
+ return 0;
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(mii_addr, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void dma_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ kio_addr_t nic_base = dev->base_addr;
+
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static void dma_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ kio_addr_t nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+#ifdef PCMCIA_DEBUG
+ if ((ei_debug > 4) && (count != 4))
+ printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4);
+#endif
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+
+ /* This was for the ALPHA version only, but enough people have
+ encountering problems that it is still here. */
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff))
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk(KERN_NOTICE "%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+} /* dma_block_input */
+
+/*====================================================================*/
+
+static void dma_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ kio_addr_t nic_base = dev->base_addr;
+ pcnet_dev_t *info = PRIV(dev);
+#ifdef PCMCIA_DEBUG
+ int retries = 0;
+#endif
+ u_long dma_start;
+
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4)
+ printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD);
+
+#ifdef PCMCIA_DEBUG
+ retry:
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD);
+ outsw(nic_base + PCNET_DATAPORT, buf, count>>1);
+
+ dma_start = jiffies;
+
+#ifdef PCMCIA_DEBUG
+ /* This was for the ALPHA version only, but enough people have
+ encountering problems that it is still here. */
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk(KERN_NOTICE "%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
+ printk(KERN_NOTICE "%s: timeout waiting for Tx RDC.\n",
+ dev->name);
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ if (info->flags & DELAY_OUTPUT)
+ udelay((long)delay_time);
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static int setup_dma_config(dev_link_t *link, int start_pg,
+ int stop_pg)
+{
+ struct net_device *dev = link->priv;
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = stop_pg;
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = &dma_get_8390_hdr;
+ ei_status.block_input = &dma_block_input;
+ ei_status.block_output = &dma_block_output;
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static void copyin(void *dest, void __iomem *src, int c)
+{
+ u_short *d = dest;
+ u_short __iomem *s = src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { *d++ = __raw_readw(s++); } while (--c);
+ }
+ /* get last byte by fetching a word and masking */
+ if (odd)
+ *((u_char *)d) = readw(s) & 0xff;
+}
+
+static void copyout(void __iomem *dest, const void *src, int c)
+{
+ u_short __iomem *d = dest;
+ const u_short *s = src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { __raw_writew(*s++, d++); } while (--c);
+ }
+ /* copy last byte doing a read-modify-write */
+ if (odd)
+ writew((readw(d) & 0xff00) | *(u_char *)s, d);
+}
+
+/*====================================================================*/
+
+static void shmem_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
+ + (ring_page << 8)
+ - (ei_status.rx_start_page << 8);
+
+ copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
+ + ring_offset
+ - (ei_status.rx_start_page << 8);
+ char *buf = skb->data;
+
+ if (xfer_start + count > (void __iomem *)ei_status.rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = (void __iomem *)ei_status.rmem_end - xfer_start;
+ copyin(buf, xfer_start, semi_count);
+ buf += semi_count;
+ xfer_start = ei_status.mem + (TX_PAGES<<8);
+ count -= semi_count;
+ }
+ copyin(buf, xfer_start, count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ void __iomem *shmem = ei_status.mem + (start_page << 8);
+ shmem -= ei_status.tx_start_page << 8;
+ copyout(shmem, buf, count);
+}
+
+/*====================================================================*/
+
+static int setup_shmem_window(dev_link_t *link, int start_pg,
+ int stop_pg, int cm_offset)
+{
+ struct net_device *dev = link->priv;
+ pcnet_dev_t *info = PRIV(dev);
+ win_req_t req;
+ memreq_t mem;
+ int i, window_size, offset, last_ret, last_fn;
+
+ window_size = (stop_pg - start_pg) << 8;
+ if (window_size > 32 * 1024)
+ window_size = 32 * 1024;
+
+ /* Make sure it's a power of two. */
+ while ((window_size & (window_size - 1)) != 0)
+ window_size += window_size & ~(window_size - 1);
+
+ /* Allocate a memory window */
+ req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
+ req.Attributes |= WIN_USE_WAIT;
+ req.Base = 0; req.Size = window_size;
+ req.AccessSpeed = mem_speed;
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+
+ mem.CardOffset = (start_pg << 8) + cm_offset;
+ offset = mem.CardOffset % window_size;
+ mem.CardOffset -= offset;
+ mem.Page = 0;
+ CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
+
+ /* Try scribbling on the buffer */
+ info->base = ioremap(req.Base, window_size);
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ __raw_writew((i>>1), info->base+offset+i);
+ udelay(100);
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ if (__raw_readw(info->base+offset+i) != (i>>1)) break;
+ pcnet_reset_8390(dev);
+ if (i != (TX_PAGES<<8)) {
+ iounmap(info->base);
+ pcmcia_release_window(link->win);
+ info->base = NULL; link->win = NULL;
+ goto failed;
+ }
+
+ ei_status.mem = info->base + offset;
+ dev->mem_start = (u_long)ei_status.mem;
+ dev->mem_end = ei_status.rmem_end = (u_long)info->base + req.Size;
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = start_pg + ((req.Size - offset) >> 8);
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = &shmem_get_8390_hdr;
+ ei_status.block_input = &shmem_block_input;
+ ei_status.block_output = &shmem_block_output;
+
+ info->flags |= USE_SHMEM;
+ return 0;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ return 1;
+}
+
+/*====================================================================*/
+
+static struct pcmcia_driver pcnet_driver = {
+ .drv = {
+ .name = "pcnet_cs",
+ },
+ .attach = pcnet_attach,
+ .detach = pcnet_detach,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_pcnet_cs(void)
+{
+ return pcmcia_register_driver(&pcnet_driver);
+}
+
+static void __exit exit_pcnet_cs(void)
+{
+ DEBUG(0, "pcnet_cs: unloading\n");
+ pcmcia_unregister_driver(&pcnet_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_pcnet_cs);
+module_exit(exit_pcnet_cs);
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
new file mode 100644
index 000000000000..85a152173148
--- /dev/null
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -0,0 +1,2260 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for SMC91c92-based cards.
+
+ This driver supports Megahertz PCMCIA ethernet cards; and
+ Megahertz, Motorola, Ositech, and Psion Dacom ethernet/modem
+ multifunction cards.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ smc91c92_cs.c 1.122 2002/10/25 06:26:39
+
+ This driver contains code written by Donald Becker
+ (becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au),
+ David Hinds (dahinds@users.sourceforge.net), and Erik Stahlman
+ (erik@vt.edu). Donald wrote the SMC 91c92 code using parts of
+ Erik's SMC 91c94 driver. Rowan wrote a similar driver, and I've
+ incorporated some parts of his driver here. I (Dave) wrote most
+ of the PCMCIA glue code, and the Ositech support code. Kelly
+ Stephens (kstephen@holli.com) added support for the Motorola
+ Mariner, with help from Allen Brost.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/* Ositech Seven of Diamonds firmware */
+#include "ositech.h"
+
+/*====================================================================*/
+
+static char *if_names[] = { "auto", "10baseT", "10base2"};
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/*
+ Transceiver/media type.
+ 0 = auto
+ 1 = 10baseT (and autoselect if #define AUTOSELECT),
+ 2 = AUI/10base2,
+*/
+INT_MODULE_PARM(if_port, 0);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+static const char *version =
+"smc91c92_cs.c 0.09 1996/8/4 Donald Becker, becker@scyld.com.\n";
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+#else
+#define DEBUG(n, args...)
+#endif
+
+#define DRV_NAME "smc91c92_cs"
+#define DRV_VERSION "1.122"
+
+/*====================================================================*/
+
+/* Operational parameter that usually are not changed. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+#define INTR_WORK 4
+
+/* Times to check the check the chip before concluding that it doesn't
+ currently have room for another Tx packet. */
+#define MEMORY_WAIT_TIME 8
+
+static dev_info_t dev_info = "smc91c92_cs";
+
+static dev_link_t *dev_list;
+
+struct smc_private {
+ dev_link_t link;
+ spinlock_t lock;
+ u_short manfid;
+ u_short cardid;
+ struct net_device_stats stats;
+ dev_node_t node;
+ struct sk_buff *saved_skb;
+ int packets_waiting;
+ void __iomem *base;
+ u_short cfg;
+ struct timer_list media;
+ int watchdog, tx_err;
+ u_short media_status;
+ u_short fast_poll;
+ u_short link_status;
+ struct mii_if_info mii_if;
+ int duplex;
+ int rx_ovrn;
+};
+
+/* Special definitions for Megahertz multifunction cards */
+#define MEGAHERTZ_ISR 0x0380
+
+/* Special function registers for Motorola Mariner */
+#define MOT_LAN 0x0000
+#define MOT_UART 0x0020
+#define MOT_EEPROM 0x20
+
+#define MOT_NORMAL \
+(COR_LEVEL_REQ | COR_FUNC_ENA | COR_ADDR_DECODE | COR_IREQ_ENA)
+
+/* Special function registers for Ositech cards */
+#define OSITECH_AUI_CTL 0x0c
+#define OSITECH_PWRDOWN 0x0d
+#define OSITECH_RESET 0x0e
+#define OSITECH_ISR 0x0f
+#define OSITECH_AUI_PWR 0x0c
+#define OSITECH_RESET_ISR 0x0e
+
+#define OSI_AUI_PWR 0x40
+#define OSI_LAN_PWRDOWN 0x02
+#define OSI_MODEM_PWRDOWN 0x01
+#define OSI_LAN_RESET 0x02
+#define OSI_MODEM_RESET 0x01
+
+/* Symbolic constants for the SMC91c9* series chips, from Erik Stahlman. */
+#define BANK_SELECT 14 /* Window select register. */
+#define SMC_SELECT_BANK(x) { outw(x, ioaddr + BANK_SELECT); }
+
+/* Bank 0 registers. */
+#define TCR 0 /* transmit control register */
+#define TCR_CLEAR 0 /* do NOTHING */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_PAD_EN 0x0080 /* pads short packets to 64 bytes */
+#define TCR_MONCSN 0x0400 /* Monitor Carrier. */
+#define TCR_FDUPLX 0x0800 /* Full duplex mode. */
+#define TCR_NORMAL TCR_ENABLE | TCR_PAD_EN
+
+#define EPH 2 /* Ethernet Protocol Handler report. */
+#define EPH_TX_SUC 0x0001
+#define EPH_SNGLCOL 0x0002
+#define EPH_MULCOL 0x0004
+#define EPH_LTX_MULT 0x0008
+#define EPH_16COL 0x0010
+#define EPH_SQET 0x0020
+#define EPH_LTX_BRD 0x0040
+#define EPH_TX_DEFR 0x0080
+#define EPH_LAT_COL 0x0200
+#define EPH_LOST_CAR 0x0400
+#define EPH_EXC_DEF 0x0800
+#define EPH_CTR_ROL 0x1000
+#define EPH_RX_OVRN 0x2000
+#define EPH_LINK_OK 0x4000
+#define EPH_TX_UNRN 0x8000
+#define MEMINFO 8 /* Memory Information Register */
+#define MEMCFG 10 /* Memory Configuration Register */
+
+/* Bank 1 registers. */
+#define CONFIG 0
+#define CFG_MII_SELECT 0x8000 /* 91C100 only */
+#define CFG_NO_WAIT 0x1000
+#define CFG_FULL_STEP 0x0400
+#define CFG_SET_SQLCH 0x0200
+#define CFG_AUI_SELECT 0x0100
+#define CFG_16BIT 0x0080
+#define CFG_DIS_LINK 0x0040
+#define CFG_STATIC 0x0030
+#define CFG_IRQ_SEL_1 0x0004
+#define CFG_IRQ_SEL_0 0x0002
+#define BASE_ADDR 2
+#define ADDR0 4
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_STORE 0x0001
+#define CTL_RELOAD 0x0002
+#define CTL_EE_SELECT 0x0004
+#define CTL_TE_ENABLE 0x0020
+#define CTL_CR_ENABLE 0x0040
+#define CTL_LE_ENABLE 0x0080
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_POWERDOWN 0x2000
+
+/* Bank 2 registers. */
+#define MMU_CMD 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+#define FP_RXEMPTY 0x8000
+#define POINTER 6
+#define PTR_AUTO_INC 0x0040
+#define PTR_READ 0x2000
+#define PTR_AUTOINC 0x4000
+#define PTR_RCV 0x8000
+#define DATA_1 8
+#define INTERRUPT 12
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+
+#define RCR 4
+enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002,
+ RxEnable = 0x0100, RxStripCRC = 0x0200};
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+#define COUNTER 6
+
+/* BANK 3 -- not the same values as in smc9194! */
+#define MULTICAST0 0
+#define MULTICAST2 2
+#define MULTICAST4 4
+#define MULTICAST6 6
+#define MGMT 8
+#define REVISION 0x0a
+
+/* Transmit status bits. */
+#define TS_SUCCESS 0x0001
+#define TS_16COL 0x0010
+#define TS_LATCOL 0x0200
+#define TS_LOSTCAR 0x0400
+
+/* Receive status bits. */
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+#define set_bits(v, p) outw(inw(p)|(v), (p))
+#define mask_bits(v, p) outw(inw(p)&(v), (p))
+
+/*====================================================================*/
+
+static dev_link_t *smc91c92_attach(void);
+static void smc91c92_detach(dev_link_t *);
+static void smc91c92_config(dev_link_t *link);
+static void smc91c92_release(dev_link_t *link);
+static int smc91c92_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static int smc_open(struct net_device *dev);
+static int smc_close(struct net_device *dev);
+static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void smc_tx_timeout(struct net_device *dev);
+static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void smc_rx(struct net_device *dev);
+static struct net_device_stats *smc_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static int s9k_config(struct net_device *dev, struct ifmap *map);
+static void smc_set_xcvr(struct net_device *dev, int if_port);
+static void smc_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static void mdio_sync(kio_addr_t addr);
+static int mdio_read(struct net_device *dev, int phy_id, int loc);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value);
+static int smc_link_ok(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
+
+/*======================================================================
+
+ smc91c92_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *smc91c92_attach(void)
+{
+ client_reg_t client_reg;
+ struct smc_private *smc;
+ dev_link_t *link;
+ struct net_device *dev;
+ int ret;
+
+ DEBUG(0, "smc91c92_attach()\n");
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct smc_private));
+ if (!dev)
+ return NULL;
+ smc = netdev_priv(dev);
+ link = &smc->link;
+ link->priv = dev;
+
+ spin_lock_init(&smc->lock);
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 4;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &smc_interrupt;
+ link->irq.Instance = dev;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* The SMC91c92-specific entries in the device structure. */
+ SET_MODULE_OWNER(dev);
+ dev->hard_start_xmit = &smc_start_xmit;
+ dev->get_stats = &smc_get_stats;
+ dev->set_config = &s9k_config;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->open = &smc_open;
+ dev->stop = &smc_close;
+ dev->do_ioctl = &smc_ioctl;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = smc_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ smc->mii_if.dev = dev;
+ smc->mii_if.mdio_read = mdio_read;
+ smc->mii_if.mdio_write = mdio_write;
+ smc->mii_if.phy_id_mask = 0x1f;
+ smc->mii_if.reg_num_mask = 0x1f;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask = CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &smc91c92_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ smc91c92_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* smc91c92_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void smc91c92_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "smc91c92_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ if (link->state & DEV_CONFIG)
+ smc91c92_release(link);
+
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* smc91c92_detach */
+
+/*====================================================================*/
+
+static int cvt_ascii_address(struct net_device *dev, char *s)
+{
+ int i, j, da, c;
+
+ if (strlen(s) != 12)
+ return -1;
+ for (i = 0; i < 6; i++) {
+ da = 0;
+ for (j = 0; j < 2; j++) {
+ c = *s++;
+ da <<= 4;
+ da += ((c >= '0') && (c <= '9')) ?
+ (c - '0') : ((c & 0x0f) + 9);
+ }
+ dev->dev_addr[i] = da;
+ }
+ return 0;
+}
+
+/*====================================================================*/
+
+static int first_tuple(client_handle_t handle, tuple_t *tuple,
+ cisparse_t *parse)
+{
+ int i;
+
+ if ((i = pcmcia_get_first_tuple(handle, tuple)) != CS_SUCCESS ||
+ (i = pcmcia_get_tuple_data(handle, tuple)) != CS_SUCCESS)
+ return i;
+ return pcmcia_parse_tuple(handle, tuple, parse);
+}
+
+static int next_tuple(client_handle_t handle, tuple_t *tuple,
+ cisparse_t *parse)
+{
+ int i;
+
+ if ((i = pcmcia_get_next_tuple(handle, tuple)) != CS_SUCCESS ||
+ (i = pcmcia_get_tuple_data(handle, tuple)) != CS_SUCCESS)
+ return i;
+ return pcmcia_parse_tuple(handle, tuple, parse);
+}
+
+/*======================================================================
+
+ Configuration stuff for Megahertz cards
+
+ mhz_3288_power() is used to power up a 3288's ethernet chip.
+ mhz_mfc_config() handles socket setup for multifunction (1144
+ and 3288) cards. mhz_setup() gets a card's hardware ethernet
+ address.
+
+======================================================================*/
+
+static int mhz_3288_power(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ u_char tmp;
+
+ /* Read the ISR twice... */
+ readb(smc->base+MEGAHERTZ_ISR);
+ udelay(5);
+ readb(smc->base+MEGAHERTZ_ISR);
+
+ /* Pause 200ms... */
+ mdelay(200);
+
+ /* Now read and write the COR... */
+ tmp = readb(smc->base + link->conf.ConfigBase + CISREG_COR);
+ udelay(5);
+ writeb(tmp, smc->base + link->conf.ConfigBase + CISREG_COR);
+
+ return 0;
+}
+
+static int mhz_mfc_config(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255];
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+ win_req_t req;
+ memreq_t mem;
+ int i, k;
+
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ link->io.IOAddrLines = 16;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts2 = 8;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ i = first_tuple(link->handle, &tuple, &parse);
+ /* The Megahertz combo cards have modem-like CIS entries, so
+ we have to explicitly try a bunch of port combinations. */
+ while (i == CS_SUCCESS) {
+ link->conf.ConfigIndex = cf->index;
+ link->io.BasePort2 = cf->io.win[0].base;
+ for (k = 0; k < 0x400; k += 0x10) {
+ if (k & 0x80) continue;
+ link->io.BasePort1 = k ^ 0x300;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i == CS_SUCCESS) break;
+ i = next_tuple(link->handle, &tuple, &parse);
+ }
+ if (i != CS_SUCCESS)
+ return i;
+ dev->base_addr = link->io.BasePort1;
+
+ /* Allocate a memory window, for accessing the ISR */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = req.Size = 0;
+ req.AccessSpeed = 0;
+ i = pcmcia_request_window(&link->handle, &req, &link->win);
+ if (i != CS_SUCCESS)
+ return i;
+ smc->base = ioremap(req.Base, req.Size);
+ mem.CardOffset = mem.Page = 0;
+ if (smc->manfid == MANFID_MOTOROLA)
+ mem.CardOffset = link->conf.ConfigBase;
+ i = pcmcia_map_mem_page(link->win, &mem);
+
+ if ((i == CS_SUCCESS)
+ && (smc->manfid == MANFID_MEGAHERTZ)
+ && (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+
+ return i;
+}
+
+static int mhz_setup(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255], *station_addr;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ /* Read the station address from the CIS. It is stored as the last
+ (fourth) string in the Version 1 Version/ID tuple. */
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (first_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ return -1;
+ /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
+ if (next_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ first_tuple(handle, &tuple, &parse);
+ if (parse.version_1.ns > 3) {
+ station_addr = parse.version_1.str + parse.version_1.ofs[3];
+ if (cvt_ascii_address(dev, station_addr) == 0)
+ return 0;
+ }
+
+ /* Another possibility: for the EM3288, in a special tuple */
+ tuple.DesiredTuple = 0x81;
+ if (pcmcia_get_first_tuple(handle, &tuple) != CS_SUCCESS)
+ return -1;
+ if (pcmcia_get_tuple_data(handle, &tuple) != CS_SUCCESS)
+ return -1;
+ buf[12] = '\0';
+ if (cvt_ascii_address(dev, buf) == 0)
+ return 0;
+
+ return -1;
+}
+
+/*======================================================================
+
+ Configuration stuff for the Motorola Mariner
+
+ mot_config() writes directly to the Mariner configuration
+ registers because the CIS is just bogus.
+
+======================================================================*/
+
+static void mot_config(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ kio_addr_t iouart = link->io.BasePort2;
+
+ /* Set UART base address and force map with COR bit 1 */
+ writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
+ writeb((iouart >> 8) & 0xff, smc->base + MOT_UART + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_UART + CISREG_COR);
+
+ /* Set SMC base address and force map with COR bit 1 */
+ writeb(ioaddr & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8) & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_LAN + CISREG_COR);
+
+ /* Wait for things to settle down */
+ mdelay(100);
+}
+
+static int mot_setup(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ kio_addr_t ioaddr = dev->base_addr;
+ int i, wait, loop;
+ u_int addr;
+
+ /* Read Ethernet address from Serial EEPROM */
+
+ for (i = 0; i < 3; i++) {
+ SMC_SELECT_BANK(2);
+ outw(MOT_EEPROM + i, ioaddr + POINTER);
+ SMC_SELECT_BANK(1);
+ outw((CTL_RELOAD | CTL_EE_SELECT), ioaddr + CONTROL);
+
+ for (loop = wait = 0; loop < 200; loop++) {
+ udelay(10);
+ wait = ((CTL_RELOAD | CTL_STORE) & inw(ioaddr + CONTROL));
+ if (wait == 0) break;
+ }
+
+ if (wait)
+ return -1;
+
+ addr = inw(ioaddr + GENERAL);
+ dev->dev_addr[2*i] = addr & 0xff;
+ dev->dev_addr[2*i+1] = (addr >> 8) & 0xff;
+ }
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static int smc_config(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255];
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+ int i;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ link->io.NumPorts1 = 16;
+ i = first_tuple(link->handle, &tuple, &parse);
+ while (i != CS_NO_MORE_ITEMS) {
+ if (i == CS_SUCCESS) {
+ link->conf.ConfigIndex = cf->index;
+ link->io.BasePort1 = cf->io.win[0].base;
+ link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ i = next_tuple(link->handle, &tuple, &parse);
+ }
+ if (i == CS_SUCCESS)
+ dev->base_addr = link->io.BasePort1;
+ return i;
+}
+
+static int smc_setup(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ tuple_t tuple;
+ cisparse_t parse;
+ cistpl_lan_node_id_t *node_id;
+ u_char buf[255], *station_addr;
+ int i;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ /* Check for a LAN function extension tuple */
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ i = first_tuple(handle, &tuple, &parse);
+ while (i == CS_SUCCESS) {
+ if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID)
+ break;
+ i = next_tuple(handle, &tuple, &parse);
+ }
+ if (i == CS_SUCCESS) {
+ node_id = (cistpl_lan_node_id_t *)parse.funce.data;
+ if (node_id->nb == 6) {
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = node_id->id[i];
+ return 0;
+ }
+ }
+ /* Try the third string in the Version 1 Version/ID tuple. */
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (first_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ return -1;
+ station_addr = parse.version_1.str + parse.version_1.ofs[2];
+ if (cvt_ascii_address(dev, station_addr) == 0)
+ return 0;
+
+ return -1;
+}
+
+/*====================================================================*/
+
+static int osi_config(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ static kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
+ int i, j;
+
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ link->io.NumPorts1 = 64;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts2 = 8;
+ link->io.IOAddrLines = 16;
+
+ /* Enable Hard Decode, LAN, Modem */
+ link->conf.ConfigIndex = 0x23;
+
+ for (i = j = 0; j < 4; j++) {
+ link->io.BasePort2 = com[j];
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ /* Fallback: turn off hard decode */
+ link->conf.ConfigIndex = 0x03;
+ link->io.NumPorts2 = 0;
+ i = pcmcia_request_io(link->handle, &link->io);
+ }
+ dev->base_addr = link->io.BasePort1 + 0x10;
+ return i;
+}
+
+static int osi_setup(dev_link_t *link, u_short manfid, u_short cardid)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ tuple_t tuple;
+ u_char buf[255];
+ int i;
+
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+
+ /* Read the station address from tuple 0x90, subtuple 0x04 */
+ tuple.DesiredTuple = 0x90;
+ i = pcmcia_get_first_tuple(handle, &tuple);
+ while (i == CS_SUCCESS) {
+ i = pcmcia_get_tuple_data(handle, &tuple);
+ if ((i != CS_SUCCESS) || (buf[0] == 0x04))
+ break;
+ i = pcmcia_get_next_tuple(handle, &tuple);
+ }
+ if (i != CS_SUCCESS)
+ return -1;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = buf[i+2];
+
+ if (((manfid == MANFID_OSITECH) &&
+ (cardid == PRODID_OSITECH_SEVEN)) ||
+ ((manfid == MANFID_PSION) &&
+ (cardid == PRODID_PSION_NET100))) {
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < sizeof(__Xilinx7OD); i++) {
+ outb(__Xilinx7OD[i], link->io.BasePort1+2);
+ udelay(50);
+ }
+ } else if (manfid == MANFID_OSITECH) {
+ /* Make sure both functions are powered up */
+ set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
+ /* Now, turn on the interrupt for both card functions */
+ set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR);
+ DEBUG(2, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
+ inw(link->io.BasePort1 + OSITECH_AUI_PWR),
+ inw(link->io.BasePort1 + OSITECH_RESET_ISR));
+ }
+
+ return 0;
+}
+
+/*======================================================================
+
+ This verifies that the chip is some SMC91cXX variant, and returns
+ the revision code if successful. Otherwise, it returns -ENODEV.
+
+======================================================================*/
+
+static int check_sig(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ kio_addr_t ioaddr = dev->base_addr;
+ int width;
+ u_short s;
+
+ SMC_SELECT_BANK(1);
+ if (inw(ioaddr + BANK_SELECT) >> 8 != 0x33) {
+ /* Try powering up the chip */
+ outw(0, ioaddr + CONTROL);
+ mdelay(55);
+ }
+
+ /* Try setting bus width */
+ width = (link->io.Attributes1 == IO_DATA_PATH_WIDTH_AUTO);
+ s = inb(ioaddr + CONFIG);
+ if (width)
+ s |= CFG_16BIT;
+ else
+ s &= ~CFG_16BIT;
+ outb(s, ioaddr + CONFIG);
+
+ /* Check Base Address Register to make sure bus width is OK */
+ s = inw(ioaddr + BASE_ADDR);
+ if ((inw(ioaddr + BANK_SELECT) >> 8 == 0x33) &&
+ ((s >> 8) != (s & 0xff))) {
+ SMC_SELECT_BANK(3);
+ s = inw(ioaddr + REVISION);
+ return (s & 0xff);
+ }
+
+ if (width) {
+ event_callback_args_t args;
+ printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
+ args.client_data = link;
+ smc91c92_event(CS_EVENT_RESET_PHYSICAL, 0, &args);
+ pcmcia_release_io(link->handle, &link->io);
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ pcmcia_request_io(link->handle, &link->io);
+ smc91c92_event(CS_EVENT_CARD_RESET, 0, &args);
+ return check_sig(link);
+ }
+ return -ENODEV;
+}
+
+/*======================================================================
+
+ smc91c92_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_EXIT_TEST(ret, svc, label) \
+if (ret != CS_SUCCESS) { cs_error(link->handle, svc, ret); goto label; }
+
+static void smc91c92_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ char *name;
+ int i, j, rev;
+ kio_addr_t ioaddr;
+ u_long mir;
+
+ DEBUG(0, "smc91c92_config(0x%p)\n", link);
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ i = first_tuple(handle, &tuple, &parse);
+ CS_EXIT_TEST(i, ParseTuple, config_failed);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if (first_tuple(handle, &tuple, &parse) == CS_SUCCESS) {
+ smc->manfid = parse.manfid.manf;
+ smc->cardid = parse.manfid.card;
+ }
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ i = osi_config(link);
+ } else if ((smc->manfid == MANFID_MOTOROLA) ||
+ ((smc->manfid == MANFID_MEGAHERTZ) &&
+ ((smc->cardid == PRODID_MEGAHERTZ_VARIOUS) ||
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288)))) {
+ i = mhz_mfc_config(link);
+ } else {
+ i = smc_config(link);
+ }
+ CS_EXIT_TEST(i, RequestIO, config_failed);
+
+ i = pcmcia_request_irq(link->handle, &link->irq);
+ CS_EXIT_TEST(i, RequestIRQ, config_failed);
+ i = pcmcia_request_configuration(link->handle, &link->conf);
+ CS_EXIT_TEST(i, RequestConfiguration, config_failed);
+
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+
+ dev->irq = link->irq.AssignedIRQ;
+
+ if ((if_port >= 0) && (if_port <= 2))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "smc91c92_cs: invalid if_port requested\n");
+
+ switch (smc->manfid) {
+ case MANFID_OSITECH:
+ case MANFID_PSION:
+ i = osi_setup(link, smc->manfid, smc->cardid); break;
+ case MANFID_SMC:
+ case MANFID_NEW_MEDIA:
+ i = smc_setup(link); break;
+ case 0x128: /* For broken Megahertz cards */
+ case MANFID_MEGAHERTZ:
+ i = mhz_setup(link); break;
+ case MANFID_MOTOROLA:
+ default: /* get the hw address from EEPROM */
+ i = mot_setup(link); break;
+ }
+
+ if (i != 0) {
+ printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
+ goto config_undo;
+ }
+
+ smc->duplex = 0;
+ smc->rx_ovrn = 0;
+
+ rev = check_sig(link);
+ name = "???";
+ if (rev > 0)
+ switch (rev >> 4) {
+ case 3: name = "92"; break;
+ case 4: name = ((rev & 15) >= 6) ? "96" : "94"; break;
+ case 5: name = "95"; break;
+ case 7: name = "100"; break;
+ case 8: name = "100-FD"; break;
+ case 9: name = "110"; break;
+ }
+
+ ioaddr = dev->base_addr;
+ if (rev > 0) {
+ u_long mcr;
+ SMC_SELECT_BANK(0);
+ mir = inw(ioaddr + MEMINFO) & 0xff;
+ if (mir == 0xff) mir++;
+ /* Get scale factor for memory size */
+ mcr = ((rev >> 4) > 3) ? inw(ioaddr + MEMCFG) : 0x0200;
+ mir *= 128 * (1<<((mcr >> 9) & 7));
+ SMC_SELECT_BANK(1);
+ smc->cfg = inw(ioaddr + CONFIG) & ~CFG_AUI_SELECT;
+ smc->cfg |= CFG_NO_WAIT | CFG_16BIT | CFG_STATIC;
+ if (smc->manfid == MANFID_OSITECH)
+ smc->cfg |= CFG_IRQ_SEL_1 | CFG_IRQ_SEL_0;
+ if ((rev >> 4) >= 7)
+ smc->cfg |= CFG_MII_SELECT;
+ } else
+ mir = 0;
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+ smc->mii_if.phy_id = (i < 32) ? i : -1;
+
+ SMC_SELECT_BANK(0);
+ }
+
+ link->dev = &smc->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
+ link->dev = NULL;
+ goto config_undo;
+ }
+
+ strcpy(smc->node.dev_name, dev->name);
+
+ printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
+ "hw_addr ", dev->name, name, (rev & 0x0f), dev->base_addr,
+ dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ if (rev > 0) {
+ if (mir & 0x3ff)
+ printk(KERN_INFO " %lu byte", mir);
+ else
+ printk(KERN_INFO " %lu kb", mir>>10);
+ printk(" buffer, %s xcvr\n", (smc->cfg & CFG_MII_SELECT) ?
+ "MII" : if_names[dev->if_port]);
+ }
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ if (smc->mii_if.phy_id != -1) {
+ DEBUG(0, " MII transceiver at index %d, status %x.\n",
+ smc->mii_if.phy_id, j);
+ } else {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ }
+ }
+
+ return;
+
+config_undo:
+ unregister_netdev(dev);
+config_failed: /* CS_EXIT_TEST() calls jump to here... */
+ smc91c92_release(link);
+ link->state &= ~DEV_CONFIG_PENDING;
+
+} /* smc91c92_config */
+
+/*======================================================================
+
+ After a card is removed, smc91c92_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void smc91c92_release(dev_link_t *link)
+{
+
+ DEBUG(0, "smc91c92_release(0x%p)\n", link);
+
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+ if (link->win) {
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ iounmap(smc->base);
+ pcmcia_release_window(link->win);
+ }
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int smc91c92_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ int i;
+
+ DEBUG(1, "smc91c92_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ smc91c92_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ if ((smc->manfid == MANFID_MEGAHERTZ) &&
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Power up the card and enable interrupts */
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR);
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR);
+ }
+ if (((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid == PRODID_OSITECH_SEVEN)) ||
+ ((smc->manfid == MANFID_PSION) &&
+ (smc->cardid == PRODID_PSION_NET100))) {
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < sizeof(__Xilinx7OD); i++) {
+ outb(__Xilinx7OD[i], link->io.BasePort1+2);
+ udelay(50);
+ }
+ }
+ if (link->open) {
+ smc_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* smc91c92_event */
+
+/*======================================================================
+
+ MII interface support for SMC91cXX based cards
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x04
+#define MDIO_DATA_OUT 0x01
+#define MDIO_DIR_WRITE 0x08
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x02
+
+static void mdio_sync(kio_addr_t addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb(MDIO_DATA_WRITE1, addr);
+ outb(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int loc)
+{
+ kio_addr_t addr = dev->base_addr + MGMT;
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(0, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ kio_addr_t addr = dev->base_addr + MGMT;
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(0, addr);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*======================================================================
+
+ The driver core code, most of which should be common with a
+ non-PCMCIA implementation.
+
+======================================================================*/
+
+#ifdef PCMCIA_DEBUG
+static void smc_dump(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ u_short i, w, save;
+ save = inw(ioaddr + BANK_SELECT);
+ for (w = 0; w < 4; w++) {
+ SMC_SELECT_BANK(w);
+ printk(KERN_DEBUG "bank %d: ", w);
+ for (i = 0; i < 14; i += 2)
+ printk(" %04x", inw(ioaddr + i));
+ printk("\n");
+ }
+ outw(save, ioaddr + BANK_SELECT);
+}
+#endif
+
+static int smc_open(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ dev_link_t *link = &smc->link;
+
+#ifdef PCMCIA_DEBUG
+ DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n",
+ dev->name, dev, inw(dev->base_addr + BANK_SELECT));
+ if (pc_debug > 1) smc_dump(dev);
+#endif
+
+ /* Check that the PCMCIA card is still here. */
+ if (!DEV_OK(link))
+ return -ENODEV;
+ /* Physical device present signature. */
+ if (check_sig(link) < 0) {
+ printk("smc91c92_cs: Yikes! Bad chip signature!\n");
+ return -ENODEV;
+ }
+ link->open++;
+
+ netif_start_queue(dev);
+ smc->saved_skb = NULL;
+ smc->packets_waiting = 0;
+
+ smc_reset(dev);
+ init_timer(&smc->media);
+ smc->media.function = &media_check;
+ smc->media.data = (u_long) dev;
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+
+ return 0;
+} /* smc_open */
+
+/*====================================================================*/
+
+static int smc_close(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ dev_link_t *link = &smc->link;
+ kio_addr_t ioaddr = dev->base_addr;
+
+ DEBUG(0, "%s: smc_close(), status %4.4x.\n",
+ dev->name, inw(ioaddr + BANK_SELECT));
+
+ netif_stop_queue(dev);
+
+ /* Shut off all interrupts, and turn off the Tx and Rx sections.
+ Don't bother to check for chip present. */
+ SMC_SELECT_BANK(2); /* Nominally paranoia, but do no assume... */
+ outw(0, ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ mask_bits(0xff00, ioaddr + RCR);
+ mask_bits(0xff00, ioaddr + TCR);
+
+ /* Put the chip into power-down mode. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_POWERDOWN, ioaddr + CONTROL );
+
+ link->open--;
+ del_timer_sync(&smc->media);
+
+ return 0;
+} /* smc_close */
+
+/*======================================================================
+
+ Transfer a packet to the hardware and trigger the packet send.
+ This may be called at either from either the Tx queue code
+ or the interrupt handler.
+
+======================================================================*/
+
+static void smc_hardware_send_packet(struct net_device * dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ struct sk_buff *skb = smc->saved_skb;
+ kio_addr_t ioaddr = dev->base_addr;
+ u_char packet_no;
+
+ if (!skb) {
+ printk(KERN_ERR "%s: In XMIT with no packet to send.\n", dev->name);
+ return;
+ }
+
+ /* There should be a packet slot waiting. */
+ packet_no = inw(ioaddr + PNR_ARR) >> 8;
+ if (packet_no & 0x80) {
+ /* If not, there is a hardware problem! Likely an ejected card. */
+ printk(KERN_WARNING "%s: 91c92 hardware Tx buffer allocation"
+ " failed, status %#2.2x.\n", dev->name, packet_no);
+ dev_kfree_skb_irq(skb);
+ smc->saved_skb = NULL;
+ netif_start_queue(dev);
+ return;
+ }
+
+ smc->stats.tx_bytes += skb->len;
+ /* The card should use the just-allocated buffer. */
+ outw(packet_no, ioaddr + PNR_ARR);
+ /* point to the beginning of the packet */
+ outw(PTR_AUTOINC , ioaddr + POINTER);
+
+ /* Send the packet length (+6 for status, length and ctl byte)
+ and the status word (set to zeros). */
+ {
+ u_char *buf = skb->data;
+ u_int length = skb->len; /* The chip will pad to ethernet min. */
+
+ DEBUG(2, "%s: Trying to xmit packet of length %d.\n",
+ dev->name, length);
+
+ /* send the packet length: +6 for status word, length, and ctl */
+ outw(0, ioaddr + DATA_1);
+ outw(length + 6, ioaddr + DATA_1);
+ outsw(ioaddr + DATA_1, buf, length >> 1);
+
+ /* The odd last byte, if there is one, goes in the control word. */
+ outw((length & 1) ? 0x2000 | buf[length-1] : 0, ioaddr + DATA_1);
+ }
+
+ /* Enable the Tx interrupts, both Tx (TxErr) and TxEmpty. */
+ outw(((IM_TX_INT|IM_TX_EMPTY_INT)<<8) |
+ (inw(ioaddr + INTERRUPT) & 0xff00),
+ ioaddr + INTERRUPT);
+
+ /* The chip does the rest of the work. */
+ outw(MC_ENQUEUE , ioaddr + MMU_CMD);
+
+ smc->saved_skb = NULL;
+ dev_kfree_skb_irq(skb);
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+ return;
+}
+
+/*====================================================================*/
+
+static void smc_tx_timeout(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, "
+ "Tx_status %2.2x status %4.4x.\n",
+ dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
+ smc->stats.tx_errors++;
+ smc_reset(dev);
+ dev->trans_start = jiffies;
+ smc->saved_skb = NULL;
+ netif_wake_queue(dev);
+}
+
+static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u_short num_pages;
+ short time_out, ir;
+
+ netif_stop_queue(dev);
+
+ DEBUG(2, "%s: smc_start_xmit(length = %d) called,"
+ " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2));
+
+ if (smc->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ smc->stats.tx_aborted_errors++;
+ printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n",
+ dev->name);
+ return 1;
+ }
+ smc->saved_skb = skb;
+
+ num_pages = skb->len >> 8;
+
+ if (num_pages > 7) {
+ printk(KERN_ERR "%s: Far too big packet error.\n", dev->name);
+ dev_kfree_skb (skb);
+ smc->saved_skb = NULL;
+ smc->stats.tx_dropped++;
+ return 0; /* Do not re-queue this packet. */
+ }
+ /* A packet is now waiting. */
+ smc->packets_waiting++;
+
+ SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+
+ /* Allocate the memory; send the packet now if we win. */
+ outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD);
+ for (time_out = MEMORY_WAIT_TIME; time_out >= 0; time_out--) {
+ ir = inw(ioaddr+INTERRUPT);
+ if (ir & IM_ALLOC_INT) {
+ /* Acknowledge the interrupt, send the packet. */
+ outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT);
+ smc_hardware_send_packet(dev); /* Send the packet now.. */
+ return 0;
+ }
+ }
+
+ /* Otherwise defer until the Tx-space-allocated interrupt. */
+ DEBUG(2, "%s: memory allocation deferred.\n", dev->name);
+ outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
+
+ return 0;
+}
+
+/*======================================================================
+
+ Handle a Tx anomolous event. Entered while in Window 2.
+
+======================================================================*/
+
+static void smc_tx_err(struct net_device * dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int saved_packet = inw(ioaddr + PNR_ARR) & 0xff;
+ int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f;
+ int tx_status;
+
+ /* select this as the packet to read from */
+ outw(packet_no, ioaddr + PNR_ARR);
+
+ /* read the first word from this packet */
+ outw(PTR_AUTOINC | PTR_READ | 0, ioaddr + POINTER);
+
+ tx_status = inw(ioaddr + DATA_1);
+
+ smc->stats.tx_errors++;
+ if (tx_status & TS_LOSTCAR) smc->stats.tx_carrier_errors++;
+ if (tx_status & TS_LATCOL) smc->stats.tx_window_errors++;
+ if (tx_status & TS_16COL) {
+ smc->stats.tx_aborted_errors++;
+ smc->tx_err++;
+ }
+
+ if (tx_status & TS_SUCCESS) {
+ printk(KERN_NOTICE "%s: Successful packet caused error "
+ "interrupt?\n", dev->name);
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+ SMC_SELECT_BANK(2);
+
+ outw(MC_FREEPKT, ioaddr + MMU_CMD); /* Free the packet memory. */
+
+ /* one less packet waiting for me */
+ smc->packets_waiting--;
+
+ outw(saved_packet, ioaddr + PNR_ARR);
+ return;
+}
+
+/*====================================================================*/
+
+static void smc_eph_irq(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u_short card_stats, ephs;
+
+ SMC_SELECT_BANK(0);
+ ephs = inw(ioaddr + EPH);
+ DEBUG(2, "%s: Ethernet protocol handler interrupt, status"
+ " %4.4x.\n", dev->name, ephs);
+ /* Could be a counter roll-over warning: update stats. */
+ card_stats = inw(ioaddr + COUNTER);
+ /* single collisions */
+ smc->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ smc->stats.collisions += card_stats & 0xF;
+#if 0 /* These are for when linux supports these statistics */
+ card_stats >>= 4; /* deferred */
+ card_stats >>= 4; /* excess deferred */
+#endif
+ /* If we had a transmit error we must re-enable the transmitter. */
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+
+ /* Clear a link error interrupt. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_AUTO_RELEASE | 0x0000, ioaddr + CONTROL);
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ SMC_SELECT_BANK(2);
+}
+
+/*====================================================================*/
+
+static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr;
+ u_short saved_bank, saved_pointer, mask, status;
+ unsigned int handled = 1;
+ char bogus_cnt = INTR_WORK; /* Work we are willing to do. */
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
+ irq, ioaddr);
+
+ smc->watchdog = 0;
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ if ((saved_bank & 0xff00) != 0x3300) {
+ /* The device does not exist -- the card could be off-line, or
+ maybe it has been ejected. */
+ DEBUG(1, "%s: SMC91c92 interrupt %d for non-existent"
+ "/ejected device.\n", dev->name, irq);
+ handled = 0;
+ goto irq_done;
+ }
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw(ioaddr + POINTER);
+ mask = inw(ioaddr + INTERRUPT) >> 8;
+ /* clear all interrupts */
+ outw(0, ioaddr + INTERRUPT);
+
+ do { /* read the status flag, and mask it */
+ status = inw(ioaddr + INTERRUPT) & 0xff;
+ DEBUG(3, "%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
+ status, mask);
+ if ((status & mask) == 0) {
+ if (bogus_cnt == INTR_WORK)
+ handled = 0;
+ break;
+ }
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ smc_rx(dev);
+ }
+ if (status & IM_TX_INT) {
+ smc_tx_err(dev);
+ outw(IM_TX_INT, ioaddr + INTERRUPT);
+ }
+ status &= mask;
+ if (status & IM_TX_EMPTY_INT) {
+ outw(IM_TX_EMPTY_INT, ioaddr + INTERRUPT);
+ mask &= ~IM_TX_EMPTY_INT;
+ smc->stats.tx_packets += smc->packets_waiting;
+ smc->packets_waiting = 0;
+ }
+ if (status & IM_ALLOC_INT) {
+ /* Clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet(dev);
+
+ /* enable xmit interrupts based on this */
+ mask |= (IM_TX_EMPTY_INT | IM_TX_INT);
+
+ /* and let the card send more packets to me */
+ netif_wake_queue(dev);
+ }
+ if (status & IM_RX_OVRN_INT) {
+ smc->stats.rx_errors++;
+ smc->stats.rx_fifo_errors++;
+ if (smc->duplex)
+ smc->rx_ovrn = 1; /* need MC_RESET outside smc_interrupt */
+ outw(IM_RX_OVRN_INT, ioaddr + INTERRUPT);
+ }
+ if (status & IM_EPH_INT)
+ smc_eph_irq(dev);
+ } while (--bogus_cnt);
+
+ DEBUG(3, " Restoring saved registers mask %2.2x bank %4.4x"
+ " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+
+ /* restore state register */
+ outw((mask<<8), ioaddr + INTERRUPT);
+ outw(saved_pointer, ioaddr + POINTER);
+ SMC_SELECT_BANK(saved_bank);
+
+ DEBUG(3, "%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+
+irq_done:
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Retrigger interrupt if needed */
+ mask_bits(0x00ff, ioaddr-0x10+OSITECH_RESET_ISR);
+ set_bits(0x0300, ioaddr-0x10+OSITECH_RESET_ISR);
+ }
+ if (smc->manfid == MANFID_MOTOROLA) {
+ u_char cor;
+ cor = readb(smc->base + MOT_UART + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_UART + CISREG_COR);
+ writeb(cor, smc->base + MOT_UART + CISREG_COR);
+ cor = readb(smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor, smc->base + MOT_LAN + CISREG_COR);
+ }
+#ifdef DOES_NOT_WORK
+ if (smc->base != NULL) { /* Megahertz MFC's */
+ readb(smc->base+MEGAHERTZ_ISR);
+ readb(smc->base+MEGAHERTZ_ISR);
+ }
+#endif
+ return IRQ_RETVAL(handled);
+}
+
+/*====================================================================*/
+
+static void smc_rx(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int rx_status;
+ int packet_length; /* Caution: not frame length, rather words
+ to transfer from the chip. */
+
+ /* Assertion: we are in Window 2. */
+
+ if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
+ printk(KERN_ERR "%s: smc_rx() with nothing on Rx FIFO.\n",
+ dev->name);
+ return;
+ }
+
+ /* Reset the read pointer, and read the status and packet length. */
+ outw(PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER);
+ rx_status = inw(ioaddr + DATA_1);
+ packet_length = inw(ioaddr + DATA_1) & 0x07ff;
+
+ DEBUG(2, "%s: Receive status %4.4x length %d.\n",
+ dev->name, rx_status, packet_length);
+
+ if (!(rx_status & RS_ERRORS)) {
+ /* do stuff to make a new packet */
+ struct sk_buff *skb;
+
+ /* Note: packet_length adds 5 or 6 extra bytes here! */
+ skb = dev_alloc_skb(packet_length+2);
+
+ if (skb == NULL) {
+ DEBUG(1, "%s: Low memory, packet dropped.\n", dev->name);
+ smc->stats.rx_dropped++;
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+ return;
+ }
+
+ packet_length -= (rx_status & RS_ODDFRAME ? 5 : 6);
+ skb_reserve(skb, 2);
+ insw(ioaddr+DATA_1, skb_put(skb, packet_length),
+ (packet_length+1)>>1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ skb->dev = dev;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ smc->stats.rx_packets++;
+ smc->stats.rx_bytes += packet_length;
+ if (rx_status & RS_MULTICAST)
+ smc->stats.multicast++;
+ } else {
+ /* error ... */
+ smc->stats.rx_errors++;
+
+ if (rx_status & RS_ALGNERR) smc->stats.rx_frame_errors++;
+ if (rx_status & (RS_TOOSHORT | RS_TOOLONG))
+ smc->stats.rx_length_errors++;
+ if (rx_status & RS_BADCRC) smc->stats.rx_crc_errors++;
+ }
+ /* Let the MMU free the memory of this packet. */
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+
+ return;
+}
+
+/*====================================================================*/
+
+static struct net_device_stats *smc_get_stats(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ /* Nothing to update - the 91c92 is a pretty primative chip. */
+ return &smc->stats;
+}
+
+/*======================================================================
+
+ Calculate values for the hardware multicast filter hash table.
+
+======================================================================*/
+
+static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
+ u_char *multicast_table)
+{
+ struct dev_mc_list *mc_addr;
+
+ for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) {
+ u_int position = ether_crc(6, mc_addr->dmi_addr);
+#ifndef final_version /* Verify multicast address. */
+ if ((mc_addr->dmi_addr[0] & 1) == 0)
+ continue;
+#endif
+ multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
+ }
+}
+
+/*======================================================================
+
+ Set the receive mode.
+
+ This routine is used by both the protocol level to notify us of
+ promiscuous/multicast mode changes, and by the open/reset code to
+ initialize the Rx registers. We always set the multicast list and
+ leave the receiver running.
+
+======================================================================*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ struct smc_private *smc = netdev_priv(dev);
+ u_int multicast_table[ 2 ] = { 0, };
+ unsigned long flags;
+ u_short rx_cfg_setting;
+
+ if (dev->flags & IFF_PROMISC) {
+ printk(KERN_NOTICE "%s: setting Rx mode to promiscuous.\n", dev->name);
+ rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
+ } else if (dev->flags & IFF_ALLMULTI)
+ rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
+ else {
+ if (dev->mc_count) {
+ fill_multicast_tbl(dev->mc_count, dev->mc_list,
+ (u_char *)multicast_table);
+ }
+ rx_cfg_setting = RxStripCRC | RxEnable;
+ }
+
+ /* Load MC table and Rx setting into the chip without interrupts. */
+ spin_lock_irqsave(&smc->lock, flags);
+ SMC_SELECT_BANK(3);
+ outl(multicast_table[0], ioaddr + MULTICAST0);
+ outl(multicast_table[1], ioaddr + MULTICAST4);
+ SMC_SELECT_BANK(0);
+ outw(rx_cfg_setting, ioaddr + RCR);
+ SMC_SELECT_BANK(2);
+ spin_unlock_irqrestore(&smc->lock, flags);
+
+ return;
+}
+
+/*======================================================================
+
+ Senses when a card's config changes. Here, it's coax or TP.
+
+======================================================================*/
+
+static int s9k_config(struct net_device *dev, struct ifmap *map)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (smc->cfg & CFG_MII_SELECT)
+ return -EOPNOTSUPP;
+ else if (map->port > 2)
+ return -EINVAL;
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ smc_reset(dev);
+ }
+ return 0;
+}
+
+/*======================================================================
+
+ Reset the chip, reloading every register that might be corrupted.
+
+======================================================================*/
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void smc_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u_short saved_bank;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(1);
+ if (if_port == 2) {
+ outw(smc->cfg | CFG_AUI_SELECT, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ set_bits(OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0001 : 0x0002);
+ } else {
+ outw(smc->cfg, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ mask_bits(~OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0012 : 0x4001);
+ }
+ SMC_SELECT_BANK(saved_bank);
+}
+
+static void smc_reset(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ struct smc_private *smc = netdev_priv(dev);
+ int i;
+
+ DEBUG(0, "%s: smc91c92 reset called.\n", dev->name);
+
+ /* The first interaction must be a write to bring the chip out
+ of sleep mode. */
+ SMC_SELECT_BANK(0);
+ /* Reset the chip. */
+ outw(RCR_SOFTRESET, ioaddr + RCR);
+ udelay(10);
+
+ /* Clear the transmit and receive configuration registers. */
+ outw(RCR_CLEAR, ioaddr + RCR);
+ outw(TCR_CLEAR, ioaddr + TCR);
+
+ /* Set the Window 1 control, configuration and station addr registers.
+ No point in writing the I/O base register ;-> */
+ SMC_SELECT_BANK(1);
+ /* Automatically release succesfully transmitted packets,
+ Accept link errors, counter and Tx error interrupts. */
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ smc_set_xcvr(dev, dev->if_port);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ outw((dev->if_port == 2 ? OSI_AUI_PWR : 0) |
+ (inw(ioaddr-0x10+OSITECH_AUI_PWR) & 0xff00),
+ ioaddr - 0x10 + OSITECH_AUI_PWR);
+
+ /* Fill in the physical address. The databook is wrong about the order! */
+ for (i = 0; i < 6; i += 2)
+ outw((dev->dev_addr[i+1]<<8)+dev->dev_addr[i],
+ ioaddr + ADDR0 + i);
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK(2);
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ outw(0, ioaddr + INTERRUPT);
+
+ /* Re-enable the chip. */
+ SMC_SELECT_BANK(0);
+ outw(((smc->cfg & CFG_MII_SELECT) ? 0 : TCR_MONCSN) |
+ TCR_ENABLE | TCR_PAD_EN | smc->duplex, ioaddr + TCR);
+ set_rx_mode(dev);
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ /* Reset MII */
+ mdio_write(dev, smc->mii_if.phy_id, 0, 0x8000);
+
+ /* Advertise 100F, 100H, 10F, 10H */
+ mdio_write(dev, smc->mii_if.phy_id, 4, 0x01e1);
+
+ /* Restart MII autonegotiation */
+ mdio_write(dev, smc->mii_if.phy_id, 0, 0x0000);
+ mdio_write(dev, smc->mii_if.phy_id, 0, 0x1200);
+ }
+
+ /* Enable interrupts. */
+ SMC_SELECT_BANK(2);
+ outw((IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) << 8,
+ ioaddr + INTERRUPT);
+}
+
+/*======================================================================
+
+ Media selection timer routine
+
+======================================================================*/
+
+static void media_check(u_long arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u_short i, media, saved_bank;
+ u_short link;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ SMC_SELECT_BANK(2);
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+ i = inw(ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ media = inw(ioaddr + EPH) & EPH_LINK_OK;
+ SMC_SELECT_BANK(1);
+ media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
+
+ /* Check for pending interrupt with watchdog flag set: with
+ this, we can limp along even if the interrupt is blocked */
+ if (smc->watchdog++ && ((i>>8) & i)) {
+ if (!smc->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ smc_interrupt(dev->irq, smc, NULL);
+ smc->fast_poll = HZ;
+ }
+ if (smc->fast_poll) {
+ smc->fast_poll--;
+ smc->media.expires = jiffies + HZ/100;
+ add_timer(&smc->media);
+ SMC_SELECT_BANK(saved_bank);
+ return;
+ }
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ if (smc->mii_if.phy_id < 0)
+ goto reschedule;
+
+ SMC_SELECT_BANK(3);
+ link = mdio_read(dev, smc->mii_if.phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ smc->mii_if.phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != smc->link_status) {
+ u_short p = mdio_read(dev, smc->mii_if.phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
+ ? TCR_FDUPLX : 0);
+ if (link) {
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ (smc->duplex ? 'F' : 'H'));
+ }
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
+ smc->link_status = link;
+ }
+ goto reschedule;
+ }
+
+ /* Ignore collisions unless we've had no rx's recently */
+ if (jiffies - dev->last_rx > HZ) {
+ if (smc->tx_err || (smc->media_status & EPH_16COL))
+ media |= EPH_16COL;
+ }
+ smc->tx_err = 0;
+
+ if (media != smc->media_status) {
+ if ((media & smc->media_status & 1) &&
+ ((smc->media_status ^ media) & EPH_LINK_OK))
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (smc->media_status & EPH_LINK_OK ? "lost" : "found"));
+ else if ((media & smc->media_status & 2) &&
+ ((smc->media_status ^ media) & EPH_16COL))
+ printk(KERN_INFO "%s: coax cable %s\n", dev->name,
+ (media & EPH_16COL ? "problem" : "ok"));
+ if (dev->if_port == 0) {
+ if (media & 1) {
+ if (media & EPH_LINK_OK)
+ printk(KERN_INFO "%s: flipped to 10baseT\n",
+ dev->name);
+ else
+ smc_set_xcvr(dev, 2);
+ } else {
+ if (media & EPH_16COL)
+ smc_set_xcvr(dev, 1);
+ else
+ printk(KERN_INFO "%s: flipped to 10base2\n",
+ dev->name);
+ }
+ }
+ smc->media_status = media;
+ }
+
+reschedule:
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+ SMC_SELECT_BANK(saved_bank);
+}
+
+static int smc_link_ok(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ struct smc_private *smc = netdev_priv(dev);
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ return mii_link_ok(&smc->mii_if);
+ } else {
+ SMC_SELECT_BANK(0);
+ return inw(ioaddr + EPH) & EPH_LINK_OK;
+ }
+}
+
+static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ kio_addr_t ioaddr = dev->base_addr;
+
+ ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
+
+ SMC_SELECT_BANK(1);
+ tmp = inw(ioaddr + CONFIG);
+ ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->speed = SPEED_10;
+ ecmd->phy_address = ioaddr + MGMT;
+
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ return 0;
+}
+
+static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ kio_addr_t ioaddr = dev->base_addr;
+
+ if (ecmd->speed != SPEED_10)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+
+ if (ecmd->port == PORT_AUI)
+ smc_set_xcvr(dev, 1);
+ else
+ smc_set_xcvr(dev, 0);
+
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ if (ecmd->duplex == DUPLEX_FULL)
+ tmp |= TCR_FDUPLX;
+ else
+ tmp &= ~TCR_FDUPLX;
+ outw(tmp, ioaddr + TCR);
+
+ return 0;
+}
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+}
+
+static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ int ret;
+
+ SMC_SELECT_BANK(3);
+ spin_lock_irq(&smc->lock);
+ if (smc->cfg & CFG_MII_SELECT)
+ ret = mii_ethtool_gset(&smc->mii_if, ecmd);
+ else
+ ret = smc_netdev_get_ecmd(dev, ecmd);
+ spin_unlock_irq(&smc->lock);
+ SMC_SELECT_BANK(saved_bank);
+ return ret;
+}
+
+static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ int ret;
+
+ SMC_SELECT_BANK(3);
+ spin_lock_irq(&smc->lock);
+ if (smc->cfg & CFG_MII_SELECT)
+ ret = mii_ethtool_sset(&smc->mii_if, ecmd);
+ else
+ ret = smc_netdev_set_ecmd(dev, ecmd);
+ spin_unlock_irq(&smc->lock);
+ SMC_SELECT_BANK(saved_bank);
+ return ret;
+}
+
+static u32 smc_get_link(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ u32 ret;
+
+ SMC_SELECT_BANK(3);
+ spin_lock_irq(&smc->lock);
+ ret = smc_link_ok(dev);
+ spin_unlock_irq(&smc->lock);
+ SMC_SELECT_BANK(saved_bank);
+ return ret;
+}
+
+#ifdef PCMCIA_DEBUG
+static u32 smc_get_msglevel(struct net_device *dev)
+{
+ return pc_debug;
+}
+
+static void smc_set_msglevel(struct net_device *dev, u32 val)
+{
+ pc_debug = val;
+}
+#endif
+
+static int smc_nway_reset(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ if (smc->cfg & CFG_MII_SELECT) {
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ int res;
+
+ SMC_SELECT_BANK(3);
+ res = mii_nway_restart(&smc->mii_if);
+ SMC_SELECT_BANK(saved_bank);
+
+ return res;
+ } else
+ return -EOPNOTSUPP;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = smc_get_drvinfo,
+ .get_settings = smc_get_settings,
+ .set_settings = smc_set_settings,
+ .get_link = smc_get_link,
+#ifdef PCMCIA_DEBUG
+ .get_msglevel = smc_get_msglevel,
+ .set_msglevel = smc_set_msglevel,
+#endif
+ .nway_reset = smc_nway_reset,
+};
+
+static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ struct mii_ioctl_data *mii = if_mii(rq);
+ int rc = 0;
+ u16 saved_bank;
+ kio_addr_t ioaddr = dev->base_addr;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&smc->lock);
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(3);
+ rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irq(&smc->lock);
+ return rc;
+}
+
+static struct pcmcia_driver smc91c92_cs_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "smc91c92_cs",
+ },
+ .attach = smc91c92_attach,
+ .detach = smc91c92_detach,
+};
+
+static int __init init_smc91c92_cs(void)
+{
+ return pcmcia_register_driver(&smc91c92_cs_driver);
+}
+
+static void __exit exit_smc91c92_cs(void)
+{
+ pcmcia_unregister_driver(&smc91c92_cs_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_smc91c92_cs);
+module_exit(exit_smc91c92_cs);
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
new file mode 100644
index 000000000000..58177d67ea12
--- /dev/null
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -0,0 +1,2031 @@
+/* [xirc2ps_cs.c wk 03.11.99] (1.40 1999/11/18 00:06:03)
+ * Xircom CreditCard Ethernet Adapter IIps driver
+ * Xircom Realport 10/100 (RE-100) driver
+ *
+ * This driver supports various Xircom CreditCard Ethernet adapters
+ * including the CE2, CE IIps, RE-10, CEM28, CEM33, CE33, CEM56,
+ * CE3-100, CE3B, RE-100, REM10BT, and REM56G-100.
+ *
+ * 2000-09-24 <psheer@icon.co.za> The Xircom CE3B-100 may not
+ * autodetect the media properly. In this case use the
+ * if_port=1 (for 10BaseT) or if_port=4 (for 100BaseT) options
+ * to force the media type.
+ *
+ * Written originally by Werner Koch based on David Hinds' skeleton of the
+ * PCMCIA driver.
+ *
+ * Copyright (c) 1997,1998 Werner Koch (dd9jn)
+ *
+ * This driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ *
+ * ALTERNATIVELY, this driver may be distributed under the terms of
+ * the following license, in which case the provisions of this license
+ * are required INSTEAD OF the GNU General Public License. (This clause
+ * is necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#ifndef MANFID_COMPAQ
+ #define MANFID_COMPAQ 0x0138
+ #define MANFID_COMPAQ2 0x0183 /* is this correct? */
+#endif
+
+#include <pcmcia/ds.h>
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/****************
+ * Some constants used to access the hardware
+ */
+
+/* Register offsets and value constans */
+#define XIRCREG_CR 0 /* Command register (wr) */
+enum xirc_cr {
+ TransmitPacket = 0x01,
+ SoftReset = 0x02,
+ EnableIntr = 0x04,
+ ForceIntr = 0x08,
+ ClearTxFIFO = 0x10,
+ ClearRxOvrun = 0x20,
+ RestartTx = 0x40
+};
+#define XIRCREG_ESR 0 /* Ethernet status register (rd) */
+enum xirc_esr {
+ FullPktRcvd = 0x01, /* full packet in receive buffer */
+ PktRejected = 0x04, /* a packet has been rejected */
+ TxPktPend = 0x08, /* TX Packet Pending */
+ IncorPolarity = 0x10,
+ MediaSelect = 0x20 /* set if TP, clear if AUI */
+};
+#define XIRCREG_PR 1 /* Page Register select */
+#define XIRCREG_EDP 4 /* Ethernet Data Port Register */
+#define XIRCREG_ISR 6 /* Ethernet Interrupt Status Register */
+enum xirc_isr {
+ TxBufOvr = 0x01, /* TX Buffer Overflow */
+ PktTxed = 0x02, /* Packet Transmitted */
+ MACIntr = 0x04, /* MAC Interrupt occurred */
+ TxResGrant = 0x08, /* Tx Reservation Granted */
+ RxFullPkt = 0x20, /* Rx Full Packet */
+ RxPktRej = 0x40, /* Rx Packet Rejected */
+ ForcedIntr= 0x80 /* Forced Interrupt */
+};
+#define XIRCREG1_IMR0 12 /* Ethernet Interrupt Mask Register (on page 1)*/
+#define XIRCREG1_IMR1 13
+#define XIRCREG0_TSO 8 /* Transmit Space Open Register (on page 0)*/
+#define XIRCREG0_TRS 10 /* Transmit reservation Size Register (page 0)*/
+#define XIRCREG0_DO 12 /* Data Offset Register (page 0) (wr) */
+#define XIRCREG0_RSR 12 /* Receive Status Register (page 0) (rd) */
+enum xirc_rsr {
+ PhyPkt = 0x01, /* set:physical packet, clear: multicast packet */
+ BrdcstPkt = 0x02, /* set if it is a broadcast packet */
+ PktTooLong = 0x04, /* set if packet length > 1518 */
+ AlignErr = 0x10, /* incorrect CRC and last octet not complete */
+ CRCErr = 0x20, /* incorrect CRC and last octet is complete */
+ PktRxOk = 0x80 /* received ok */
+};
+#define XIRCREG0_PTR 13 /* packets transmitted register (rd) */
+#define XIRCREG0_RBC 14 /* receive byte count regsister (rd) */
+#define XIRCREG1_ECR 14 /* ethernet configurationn register */
+enum xirc_ecr {
+ FullDuplex = 0x04, /* enable full duplex mode */
+ LongTPMode = 0x08, /* adjust for longer lengths of TP cable */
+ DisablePolCor = 0x10,/* disable auto polarity correction */
+ DisableLinkPulse = 0x20, /* disable link pulse generation */
+ DisableAutoTx = 0x40, /* disable auto-transmit */
+};
+#define XIRCREG2_RBS 8 /* receive buffer start register */
+#define XIRCREG2_LED 10 /* LED Configuration register */
+/* values for the leds: Bits 2-0 for led 1
+ * 0 disabled Bits 5-3 for led 2
+ * 1 collision
+ * 2 noncollision
+ * 3 link_detected
+ * 4 incor_polarity
+ * 5 jabber
+ * 6 auto_assertion
+ * 7 rx_tx_activity
+ */
+#define XIRCREG2_MSR 12 /* Mohawk specific register */
+
+#define XIRCREG4_GPR0 8 /* General Purpose Register 0 */
+#define XIRCREG4_GPR1 9 /* General Purpose Register 1 */
+#define XIRCREG2_GPR2 13 /* General Purpose Register 2 (page2!)*/
+#define XIRCREG4_BOV 10 /* Bonding Version Register */
+#define XIRCREG4_LMA 12 /* Local Memory Address Register */
+#define XIRCREG4_LMD 14 /* Local Memory Data Port */
+/* MAC register can only by accessed with 8 bit operations */
+#define XIRCREG40_CMD0 8 /* Command Register (wr) */
+enum xirc_cmd { /* Commands */
+ Transmit = 0x01,
+ EnableRecv = 0x04,
+ DisableRecv = 0x08,
+ Abort = 0x10,
+ Online = 0x20,
+ IntrAck = 0x40,
+ Offline = 0x80
+};
+#define XIRCREG5_RHSA0 10 /* Rx Host Start Address */
+#define XIRCREG40_RXST0 9 /* Receive Status Register */
+#define XIRCREG40_TXST0 11 /* Transmit Status Register 0 */
+#define XIRCREG40_TXST1 12 /* Transmit Status Register 10 */
+#define XIRCREG40_RMASK0 13 /* Receive Mask Register */
+#define XIRCREG40_TMASK0 14 /* Transmit Mask Register 0 */
+#define XIRCREG40_TMASK1 15 /* Transmit Mask Register 0 */
+#define XIRCREG42_SWC0 8 /* Software Configuration 0 */
+#define XIRCREG42_SWC1 9 /* Software Configuration 1 */
+#define XIRCREG42_BOC 10 /* Back-Off Configuration */
+#define XIRCREG44_TDR0 8 /* Time Domain Reflectometry 0 */
+#define XIRCREG44_TDR1 9 /* Time Domain Reflectometry 1 */
+#define XIRCREG44_RXBC_LO 10 /* Rx Byte Count 0 (rd) */
+#define XIRCREG44_RXBC_HI 11 /* Rx Byte Count 1 (rd) */
+#define XIRCREG45_REV 15 /* Revision Register (rd) */
+#define XIRCREG50_IA 8 /* Individual Address (8-13) */
+
+static char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
+
+/****************
+ * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ * you do not define PCMCIA_DEBUG at all, all the debug code will be
+ * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ * be present but disabled -- but it can then be enabled for specific
+ * modules at load time with a 'pc_debug=#' option to insmod.
+ */
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KDBG_XIRC args)
+#else
+#define DEBUG(n, args...)
+#endif
+
+#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
+#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
+#define KWRN_XIRC KERN_WARNING "xirc2ps_cs: "
+#define KNOT_XIRC KERN_NOTICE "xirc2ps_cs: "
+#define KINF_XIRC KERN_INFO "xirc2ps_cs: "
+
+/* card types */
+#define XIR_UNKNOWN 0 /* unknown: not supported */
+#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
+#define XIR_CE2 2 /* (prodid 2) */
+#define XIR_CE3 3 /* (prodid 3) */
+#define XIR_CEM 4 /* (prodid 1) different hardware: not supported */
+#define XIR_CEM2 5 /* (prodid 2) */
+#define XIR_CEM3 6 /* (prodid 3) */
+#define XIR_CEM33 7 /* (prodid 4) */
+#define XIR_CEM56M 8 /* (prodid 5) */
+#define XIR_CEM56 9 /* (prodid 6) */
+#define XIR_CM28 10 /* (prodid 3) modem only: not supported here */
+#define XIR_CM33 11 /* (prodid 4) modem only: not supported here */
+#define XIR_CM56 12 /* (prodid 5) modem only: not supported here */
+#define XIR_CG 13 /* (prodid 1) GSM modem only: not supported */
+#define XIR_CBE 14 /* (prodid 1) cardbus ethernet: not supported */
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("Xircom PCMCIA ethernet driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+INT_MODULE_PARM(if_port, 0);
+INT_MODULE_PARM(full_duplex, 0);
+INT_MODULE_PARM(do_sound, 1);
+INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */
+
+/*====================================================================*/
+
+/* We do not process more than these number of bytes during one
+ * interrupt. (Of course we receive complete packets, so this is not
+ * an exact value).
+ * Something between 2000..22000; first value gives best interrupt latency,
+ * the second enables the usage of the complete on-chip buffer. We use the
+ * high value as the initial value.
+ */
+static unsigned maxrx_bytes = 22000;
+
+/* MII management prototypes */
+static void mii_idle(kio_addr_t ioaddr);
+static void mii_putbit(kio_addr_t ioaddr, unsigned data);
+static int mii_getbit(kio_addr_t ioaddr);
+static void mii_wbits(kio_addr_t ioaddr, unsigned data, int len);
+static unsigned mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg);
+static void mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg,
+ unsigned data, int len);
+
+/*
+ * The event() function is this driver's Card Services event handler.
+ * It will be called by Card Services when an appropriate card status
+ * event is received. The config() and release() entry points are
+ * used to configure or release a socket, in response to card insertion
+ * and ejection events. They are invoked from the event handler.
+ */
+
+static int has_ce2_string(dev_link_t * link);
+static void xirc2ps_config(dev_link_t * link);
+static void xirc2ps_release(dev_link_t * link);
+static int xirc2ps_event(event_t event, int priority,
+ event_callback_args_t * args);
+
+/****************
+ * The attach() and detach() entry points are used to create and destroy
+ * "instances" of the driver, where each instance represents everything
+ * needed to manage one actual PCMCIA card.
+ */
+
+static dev_link_t *xirc2ps_attach(void);
+static void xirc2ps_detach(dev_link_t *);
+
+/****************
+ * You'll also need to prototype all the functions that will actually
+ * be used to talk to your device. See 'pcmem_cs' for a good example
+ * of a fully self-sufficient driver; the other drivers rely more or
+ * less on other parts of the kernel.
+ */
+
+static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*
+ * The dev_info variable is the "key" that is used to match up this
+ * device driver with appropriate cards, through the card configuration
+ * database.
+ */
+
+static dev_info_t dev_info = "xirc2ps_cs";
+
+/****************
+ * A linked list of "instances" of the device. Each actual
+ * PCMCIA card corresponds to one device instance, and is described
+ * by one dev_link_t structure (defined in ds.h).
+ *
+ * You may not want to use a linked list for this -- for example, the
+ * memory card driver uses an array of dev_link_t pointers, where minor
+ * device numbers are used to derive the corresponding array index.
+ */
+
+static dev_link_t *dev_list;
+
+/****************
+ * A dev_link_t structure has fields for most things that are needed
+ * to keep track of a socket, but there will usually be some device
+ * specific information that also needs to be kept track of. The
+ * 'priv' pointer in a dev_link_t structure can be used to point to
+ * a device-specific private data structure, like this.
+ *
+ * A driver needs to provide a dev_node_t structure for each device
+ * on a card. In some cases, there is only one device per card (for
+ * example, ethernet cards, modems). In other cases, there may be
+ * many actual or logical devices (SCSI adapters, memory cards with
+ * multiple partitions). The dev_node_t structures need to be kept
+ * in a linked list starting at the 'dev' field of a dev_link_t
+ * structure. We allocate them in the card's private data structure,
+ * because they generally can't be allocated dynamically.
+ */
+
+typedef struct local_info_t {
+ dev_link_t link;
+ dev_node_t node;
+ struct net_device_stats stats;
+ int card_type;
+ int probe_port;
+ int silicon; /* silicon revision. 0=old CE2, 1=Scipper, 4=Mohawk */
+ int mohawk; /* a CE3 type card */
+ int dingo; /* a CEM56 type card */
+ int new_mii; /* has full 10baseT/100baseT MII */
+ int modem; /* is a multi function card (i.e with a modem) */
+ void __iomem *dingo_ccr; /* only used for CEM56 cards */
+ unsigned last_ptr_value; /* last packets transmitted value */
+ const char *manf_str;
+} local_info_t;
+
+/****************
+ * Some more prototypes
+ */
+static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void do_tx_timeout(struct net_device *dev);
+static struct net_device_stats *do_get_stats(struct net_device *dev);
+static void set_addresses(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static int set_card_type(dev_link_t *link, const void *s);
+static int do_config(struct net_device *dev, struct ifmap *map);
+static int do_open(struct net_device *dev);
+static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static void hardreset(struct net_device *dev);
+static void do_reset(struct net_device *dev, int full);
+static int init_mii(struct net_device *dev);
+static void do_powerdown(struct net_device *dev);
+static int do_stop(struct net_device *dev);
+
+/*=============== Helper functions =========================*/
+static int
+first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+{
+ int err;
+
+ if ((err = pcmcia_get_first_tuple(handle, tuple)) == 0 &&
+ (err = pcmcia_get_tuple_data(handle, tuple)) == 0)
+ err = pcmcia_parse_tuple(handle, tuple, parse);
+ return err;
+}
+
+static int
+next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+{
+ int err;
+
+ if ((err = pcmcia_get_next_tuple(handle, tuple)) == 0 &&
+ (err = pcmcia_get_tuple_data(handle, tuple)) == 0)
+ err = pcmcia_parse_tuple(handle, tuple, parse);
+ return err;
+}
+
+#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR)
+#define GetByte(reg) ((unsigned)inb(ioaddr + (reg)))
+#define GetWord(reg) ((unsigned)inw(ioaddr + (reg)))
+#define PutByte(reg,value) outb((value), ioaddr+(reg))
+#define PutWord(reg,value) outw((value), ioaddr+(reg))
+
+/*====== Functions used for debugging =================================*/
+#if defined(PCMCIA_DEBUG) && 0 /* reading regs may change system status */
+static void
+PrintRegisters(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+
+ if (pc_debug > 1) {
+ int i, page;
+
+ printk(KDBG_XIRC "Register common: ");
+ for (i = 0; i < 8; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ for (page = 0; page <= 8; page++) {
+ printk(KDBG_XIRC "Register page %2x: ", page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ }
+ for (page=0x40 ; page <= 0x5f; page++) {
+ if (page == 0x43 || (page >= 0x46 && page <= 0x4f)
+ || (page >= 0x51 && page <=0x5e))
+ continue;
+ printk(KDBG_XIRC "Register page %2x: ", page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ }
+ }
+}
+#endif /* PCMCIA_DEBUG */
+
+/*============== MII Management functions ===============*/
+
+/****************
+ * Turn around for read
+ */
+static void
+mii_idle(kio_addr_t ioaddr)
+{
+ PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x04|1); /* and drive MDCK high */
+ udelay(1);
+}
+
+/****************
+ * Write a bit to MDI/O
+ */
+static void
+mii_putbit(kio_addr_t ioaddr, unsigned data)
+{
+ #if 1
+ if (data) {
+ PutByte(XIRCREG2_GPR2, 0x0c|2|0); /* set MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|2|1); /* and drive MDCK high */
+ udelay(1);
+ } else {
+ PutByte(XIRCREG2_GPR2, 0x0c|0|0); /* clear MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|0|1); /* and drive MDCK high */
+ udelay(1);
+ }
+ #else
+ if (data) {
+ PutWord(XIRCREG2_GPR2-1, 0x0e0e);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0f0f);
+ udelay(1);
+ } else {
+ PutWord(XIRCREG2_GPR2-1, 0x0c0c);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0d0d);
+ udelay(1);
+ }
+ #endif
+}
+
+/****************
+ * Get a bit from MDI/O
+ */
+static int
+mii_getbit(kio_addr_t ioaddr)
+{
+ unsigned d;
+
+ PutByte(XIRCREG2_GPR2, 4|0); /* drive MDCK low */
+ udelay(1);
+ d = GetByte(XIRCREG2_GPR2); /* read MDIO */
+ PutByte(XIRCREG2_GPR2, 4|1); /* drive MDCK high again */
+ udelay(1);
+ return d & 0x20; /* read MDIO */
+}
+
+static void
+mii_wbits(kio_addr_t ioaddr, unsigned data, int len)
+{
+ unsigned m = 1 << (len-1);
+ for (; m; m >>= 1)
+ mii_putbit(ioaddr, data & m);
+}
+
+static unsigned
+mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg)
+{
+ int i;
+ unsigned data=0, m;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x06, 4); /* Start and opcode for read */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY register to read */
+ mii_idle(ioaddr); /* turn around */
+ mii_getbit(ioaddr);
+
+ for (m = 1<<15; m; m >>= 1)
+ if (mii_getbit(ioaddr))
+ data |= m;
+ mii_idle(ioaddr);
+ return data;
+}
+
+static void
+mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len)
+{
+ int i;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x05, 4); /* Start and opcode for write */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY Register to write */
+ mii_putbit(ioaddr, 1); /* turn around */
+ mii_putbit(ioaddr, 0);
+ mii_wbits(ioaddr, data, len); /* And write the data */
+ mii_idle(ioaddr);
+}
+
+/*============= Main bulk of functions =========================*/
+
+/****************
+ * xirc2ps_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device. The device is registered
+ * with Card Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
+ */
+
+static dev_link_t *
+xirc2ps_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ local_info_t *local;
+ int err;
+
+ DEBUG(0, "attach()\n");
+
+ /* Allocate the device structure */
+ dev = alloc_etherdev(sizeof(local_info_t));
+ if (!dev)
+ return NULL;
+ local = netdev_priv(dev);
+ link = &local->link;
+ link->priv = dev;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+ link->irq.Handler = xirc2ps_interrupt;
+ link->irq.Instance = dev;
+
+ /* Fill in card specific entries */
+ SET_MODULE_OWNER(dev);
+ dev->hard_start_xmit = &do_start_xmit;
+ dev->set_config = &do_config;
+ dev->get_stats = &do_get_stats;
+ dev->do_ioctl = &do_ioctl;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->set_multicast_list = &set_multicast_list;
+ dev->open = &do_open;
+ dev->stop = &do_stop;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = do_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &xirc2ps_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ if ((err = pcmcia_register_client(&link->handle, &client_reg))) {
+ cs_error(link->handle, RegisterClient, err);
+ xirc2ps_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* xirc2ps_attach */
+
+/****************
+ * This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
+ */
+
+static void
+xirc2ps_detach(dev_link_t * link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link)
+ break;
+ if (!*linkp) {
+ DEBUG(0, "detach(0x%p): dev_link lost\n", link);
+ return;
+ }
+
+ if (link->dev)
+ unregister_netdev(dev);
+
+ /*
+ * If the device is currently configured and active, we won't
+ * actually delete it yet. Instead, it is marked so that when
+ * the release() function is called, that will trigger a proper
+ * detach().
+ */
+ if (link->state & DEV_CONFIG)
+ xirc2ps_release(link);
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free it */
+ *linkp = link->next;
+ free_netdev(dev);
+} /* xirc2ps_detach */
+
+/****************
+ * Detect the type of the card. s is the buffer with the data of tuple 0x20
+ * Returns: 0 := not supported
+ * mediaid=11 and prodid=47
+ * Media-Id bits:
+ * Ethernet 0x01
+ * Tokenring 0x02
+ * Arcnet 0x04
+ * Wireless 0x08
+ * Modem 0x10
+ * GSM only 0x20
+ * Prod-Id bits:
+ * Pocket 0x10
+ * External 0x20
+ * Creditcard 0x40
+ * Cardbus 0x80
+ *
+ */
+static int
+set_card_type(dev_link_t *link, const void *s)
+{
+ struct net_device *dev = link->priv;
+ local_info_t *local = netdev_priv(dev);
+ #ifdef PCMCIA_DEBUG
+ unsigned cisrev = ((const unsigned char *)s)[2];
+ #endif
+ unsigned mediaid= ((const unsigned char *)s)[3];
+ unsigned prodid = ((const unsigned char *)s)[4];
+
+ DEBUG(0, "cisrev=%02x mediaid=%02x prodid=%02x\n",
+ cisrev, mediaid, prodid);
+
+ local->mohawk = 0;
+ local->dingo = 0;
+ local->modem = 0;
+ local->card_type = XIR_UNKNOWN;
+ if (!(prodid & 0x40)) {
+ printk(KNOT_XIRC "Ooops: Not a creditcard\n");
+ return 0;
+ }
+ if (!(mediaid & 0x01)) {
+ printk(KNOT_XIRC "Not an Ethernet card\n");
+ return 0;
+ }
+ if (mediaid & 0x10) {
+ local->modem = 1;
+ switch(prodid & 15) {
+ case 1: local->card_type = XIR_CEM ; break;
+ case 2: local->card_type = XIR_CEM2 ; break;
+ case 3: local->card_type = XIR_CEM3 ; break;
+ case 4: local->card_type = XIR_CEM33 ; break;
+ case 5: local->card_type = XIR_CEM56M;
+ local->mohawk = 1;
+ break;
+ case 6:
+ case 7: /* 7 is the RealPort 10/56 */
+ local->card_type = XIR_CEM56 ;
+ local->mohawk = 1;
+ local->dingo = 1;
+ break;
+ }
+ } else {
+ switch(prodid & 15) {
+ case 1: local->card_type = has_ce2_string(link)? XIR_CE2 : XIR_CE ;
+ break;
+ case 2: local->card_type = XIR_CE2; break;
+ case 3: local->card_type = XIR_CE3;
+ local->mohawk = 1;
+ break;
+ }
+ }
+ if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
+ printk(KNOT_XIRC "Sorry, this is an old CE card\n");
+ return 0;
+ }
+ if (local->card_type == XIR_UNKNOWN)
+ printk(KNOT_XIRC "unknown card (mediaid=%02x prodid=%02x)\n",
+ mediaid, prodid);
+
+ return 1;
+}
+
+/****************
+ * There are some CE2 cards out which claim to be a CE card.
+ * This function looks for a "CE2" in the 3rd version field.
+ * Returns: true if this is a CE2
+ */
+static int
+has_ce2_string(dev_link_t * link)
+{
+ client_handle_t handle = link->handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[256];
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 254;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (!first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 2) {
+ if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
+ return 1;
+ }
+ return 0;
+}
+
+/****************
+ * xirc2ps_config() is scheduled to run after a CARD_INSERTION event
+ * is received, to configure the PCMCIA socket, and to make the
+ * ethernet device available to the system.
+ */
+static void
+xirc2ps_config(dev_link_t * link)
+{
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ local_info_t *local = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ kio_addr_t ioaddr;
+ int err, i;
+ u_char buf[64];
+ cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+
+ local->dingo_ccr = NULL;
+
+ DEBUG(0, "config(0x%p)\n", link);
+
+ /*
+ * This reads the card's CONFIG tuple to find its configuration
+ * registers.
+ */
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+
+ /* Is this a valid card */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if ((err=first_tuple(handle, &tuple, &parse))) {
+ printk(KNOT_XIRC "manfid not found in CIS\n");
+ goto failure;
+ }
+
+ switch(parse.manfid.manf) {
+ case MANFID_XIRCOM:
+ local->manf_str = "Xircom";
+ break;
+ case MANFID_ACCTON:
+ local->manf_str = "Accton";
+ break;
+ case MANFID_COMPAQ:
+ case MANFID_COMPAQ2:
+ local->manf_str = "Compaq";
+ break;
+ case MANFID_INTEL:
+ local->manf_str = "Intel";
+ break;
+ case MANFID_TOSHIBA:
+ local->manf_str = "Toshiba";
+ break;
+ default:
+ printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n",
+ (unsigned)parse.manfid.manf);
+ goto failure;
+ }
+ DEBUG(0, "found %s card\n", local->manf_str);
+
+ if (!set_card_type(link, buf)) {
+ printk(KNOT_XIRC "this card is not supported\n");
+ goto failure;
+ }
+
+ /* get configuration stuff */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ if ((err=first_tuple(handle, &tuple, &parse)))
+ goto cis_error;
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* get the ethernet address from the CIS */
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries:
+ * the first one with a length of zero the second correct -
+ * so I skip all entries with length 0 */
+ if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID
+ && ((cistpl_lan_node_id_t *)parse.funce.data)->nb)
+ break;
+ }
+ if (err) { /* not found: try to get the node-id from tuple 0x89 */
+ tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */
+ if ((err = pcmcia_get_first_tuple(handle, &tuple)) == 0 &&
+ (err = pcmcia_get_tuple_data(handle, &tuple)) == 0) {
+ if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID)
+ memcpy(&parse, buf, 8);
+ else
+ err = -1;
+ }
+ }
+ if (err) { /* another try (James Lehmer's CE2 version 4.1)*/
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ if (parse.funce.type == 0x02 && parse.funce.data[0] == 1
+ && parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) {
+ buf[1] = 4;
+ memcpy(&parse, buf+1, 8);
+ break;
+ }
+ }
+ }
+ if (err) {
+ printk(KNOT_XIRC "node-id not found in CIS\n");
+ goto failure;
+ }
+ node_id = (cistpl_lan_node_id_t *)parse.funce.data;
+ if (node_id->nb != 6) {
+ printk(KNOT_XIRC "malformed node-id in CIS\n");
+ goto failure;
+ }
+ for (i=0; i < 6; i++)
+ dev->dev_addr[i] = node_id->id[i];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->io.IOAddrLines =10;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ if (local->modem) {
+ int pass;
+
+ if (do_sound) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status |= CCSR_AUDIO_ENA;
+ }
+ link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ;
+ link->io.NumPorts2 = 8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (local->dingo) {
+ /* Take the Modem IO port from the CIS and scan for a free
+ * Ethernet port */
+ link->io.NumPorts1 = 16; /* no Mako stuff anymore */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ link->conf.ConfigIndex = cf->index ;
+ link->io.BasePort2 = cf->io.win[0].base;
+ link->io.BasePort1 = ioaddr;
+ if (!(err=pcmcia_request_io(link->handle, &link->io)))
+ goto port_found;
+ }
+ }
+ }
+ } else {
+ link->io.NumPorts1 = 18;
+ /* We do 2 passes here: The first one uses the regular mapping and
+ * the second tries again, thereby considering that the 32 ports are
+ * mirrored every 32 bytes. Actually we use a mirrored port for
+ * the Mako if (on the first pass) the COR bit 5 is set.
+ */
+ for (pass=0; pass < 2; pass++) {
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)){
+ if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8){
+ link->conf.ConfigIndex = cf->index ;
+ link->io.BasePort2 = cf->io.win[0].base;
+ link->io.BasePort1 = link->io.BasePort2
+ + (pass ? (cf->index & 0x20 ? -24:8)
+ : (cf->index & 0x20 ? 8:-24));
+ if (!(err=pcmcia_request_io(link->handle, &link->io)))
+ goto port_found;
+ }
+ }
+ }
+ /* if special option:
+ * try to configure as Ethernet only.
+ * .... */
+ }
+ printk(KNOT_XIRC "no ports available\n");
+ } else {
+ link->irq.Attributes |= IRQ_TYPE_EXCLUSIVE;
+ link->io.NumPorts1 = 16;
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ link->io.BasePort1 = ioaddr;
+ if (!(err=pcmcia_request_io(link->handle, &link->io)))
+ goto port_found;
+ }
+ link->io.BasePort1 = 0; /* let CS decide */
+ if ((err=pcmcia_request_io(link->handle, &link->io))) {
+ cs_error(link->handle, RequestIO, err);
+ goto config_error;
+ }
+ }
+ port_found:
+ if (err)
+ goto config_error;
+
+ /****************
+ * Now allocate an interrupt line. Note that this does not
+ * actually assign a handler to the interrupt.
+ */
+ if ((err=pcmcia_request_irq(link->handle, &link->irq))) {
+ cs_error(link->handle, RequestIRQ, err);
+ goto config_error;
+ }
+
+ /****************
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping.
+ */
+ if ((err=pcmcia_request_configuration(link->handle, &link->conf))) {
+ cs_error(link->handle, RequestConfiguration, err);
+ goto config_error;
+ }
+
+ if (local->dingo) {
+ conf_reg_t reg;
+ win_req_t req;
+ memreq_t mem;
+
+ /* Reset the modem's BAR to the correct value
+ * This is necessary because in the RequestConfiguration call,
+ * the base address of the ethernet port (BasePort1) is written
+ * to the BAR registers of the modem.
+ */
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_IOBASE_0;
+ reg.Value = link->io.BasePort2 & 0xff;
+ if ((err = pcmcia_access_configuration_register(link->handle, &reg))) {
+ cs_error(link->handle, AccessConfigurationRegister, err);
+ goto config_error;
+ }
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_IOBASE_1;
+ reg.Value = (link->io.BasePort2 >> 8) & 0xff;
+ if ((err = pcmcia_access_configuration_register(link->handle, &reg))) {
+ cs_error(link->handle, AccessConfigurationRegister, err);
+ goto config_error;
+ }
+
+ /* There is no config entry for the Ethernet part which
+ * is at 0x0800. So we allocate a window into the attribute
+ * memory and write direct to the CIS registers
+ */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = req.Size = 0;
+ req.AccessSpeed = 0;
+ if ((err = pcmcia_request_window(&link->handle, &req, &link->win))) {
+ cs_error(link->handle, RequestWindow, err);
+ goto config_error;
+ }
+ local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
+ mem.CardOffset = 0x0;
+ mem.Page = 0;
+ if ((err = pcmcia_map_mem_page(link->win, &mem))) {
+ cs_error(link->handle, MapMemPage, err);
+ goto config_error;
+ }
+
+ /* Setup the CCRs; there are no infos in the CIS about the Ethernet
+ * part.
+ */
+ writeb(0x47, local->dingo_ccr + CISREG_COR);
+ ioaddr = link->io.BasePort1;
+ writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1);
+
+ #if 0
+ {
+ u_char tmp;
+ printk(KERN_INFO "ECOR:");
+ for (i=0; i < 7; i++) {
+ tmp = readb(local->dingo_ccr + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ printk(KERN_INFO "DCOR:");
+ for (i=0; i < 4; i++) {
+ tmp = readb(local->dingo_ccr + 0x20 + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ printk(KERN_INFO "SCOR:");
+ for (i=0; i < 10; i++) {
+ tmp = readb(local->dingo_ccr + 0x40 + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ }
+ #endif
+
+ writeb(0x01, local->dingo_ccr + 0x20);
+ writeb(0x0c, local->dingo_ccr + 0x22);
+ writeb(0x00, local->dingo_ccr + 0x24);
+ writeb(0x00, local->dingo_ccr + 0x26);
+ writeb(0x00, local->dingo_ccr + 0x28);
+ }
+
+ /* The if_port symbol can be set when the module is loaded */
+ local->probe_port=0;
+ if (!if_port) {
+ local->probe_port = dev->if_port = 1;
+ } else if ((if_port >= 1 && if_port <= 2) ||
+ (local->mohawk && if_port==4))
+ dev->if_port = if_port;
+ else
+ printk(KNOT_XIRC "invalid if_port requested\n");
+
+ /* we can now register the device with the net subsystem */
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+
+ if (local->dingo)
+ do_reset(dev, 1); /* a kludge to make the cem56 work */
+
+ link->dev = &local->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ if ((err=register_netdev(dev))) {
+ printk(KNOT_XIRC "register_netdev() failed\n");
+ link->dev = NULL;
+ goto config_error;
+ }
+
+ strcpy(local->node.dev_name, dev->name);
+
+ /* give some infos about the hardware */
+ printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr",
+ dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%c%02X", i?':':' ', dev->dev_addr[i]);
+ printk("\n");
+
+ return;
+
+ config_error:
+ link->state &= ~DEV_CONFIG_PENDING;
+ xirc2ps_release(link);
+ return;
+
+ cis_error:
+ printk(KNOT_XIRC "unable to parse CIS\n");
+ failure:
+ link->state &= ~DEV_CONFIG_PENDING;
+} /* xirc2ps_config */
+
+/****************
+ * After a card is removed, xirc2ps_release() will unregister the net
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void
+xirc2ps_release(dev_link_t *link)
+{
+
+ DEBUG(0, "release(0x%p)\n", link);
+
+ if (link->win) {
+ struct net_device *dev = link->priv;
+ local_info_t *local = netdev_priv(dev);
+ if (local->dingo)
+ iounmap(local->dingo_ccr - 0x0800);
+ pcmcia_release_window(link->win);
+ }
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+
+} /* xirc2ps_release */
+
+/*====================================================================*/
+
+/****************
+ * The card status event handler. Mostly, this schedules other
+ * stuff to run after an event is received. A CARD_REMOVAL event
+ * also sets some flags to discourage the net drivers from trying
+ * to talk to the card any more.
+ *
+ * When a CARD_REMOVAL event is received, we immediately set a flag
+ * to block future accesses to this device. All the functions that
+ * actually access the device should check this flag to make sure
+ * the card is still present.
+ */
+
+static int
+xirc2ps_event(event_t event, int priority,
+ event_callback_args_t * args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(0, "event(%d)\n", (int)event);
+
+ switch (event) {
+ case CS_EVENT_REGISTRATION_COMPLETE:
+ DEBUG(0, "registration complete\n");
+ break;
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG)
+ netif_device_detach(dev);
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ xirc2ps_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open) {
+ netif_device_detach(dev);
+ do_powerdown(dev);
+ }
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ do_reset(dev,1);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* xirc2ps_event */
+
+/*====================================================================*/
+
+/****************
+ * This is the Interrupt service route.
+ */
+static irqreturn_t
+xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ local_info_t *lp = netdev_priv(dev);
+ kio_addr_t ioaddr;
+ u_char saved_page;
+ unsigned bytes_rcvd;
+ unsigned int_status, eth_status, rx_status, tx_status;
+ unsigned rsr, pktlen;
+ ulong start_ticks = jiffies; /* fixme: jiffies rollover every 497 days
+ * is this something to worry about?
+ * -- on a laptop?
+ */
+
+ if (!netif_device_present(dev))
+ return IRQ_HANDLED;
+
+ ioaddr = dev->base_addr;
+ if (lp->mohawk) { /* must disable the interrupt */
+ PutByte(XIRCREG_CR, 0);
+ }
+
+ DEBUG(6, "%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr);
+
+ saved_page = GetByte(XIRCREG_PR);
+ /* Read the ISR to see whats the cause for the interrupt.
+ * This also clears the interrupt flags on CE2 cards
+ */
+ int_status = GetByte(XIRCREG_ISR);
+ bytes_rcvd = 0;
+ loop_entry:
+ if (int_status == 0xff) { /* card may be ejected */
+ DEBUG(3, "%s: interrupt %d for dead card\n", dev->name, irq);
+ goto leave;
+ }
+ eth_status = GetByte(XIRCREG_ESR);
+
+ SelectPage(0x40);
+ rx_status = GetByte(XIRCREG40_RXST0);
+ PutByte(XIRCREG40_RXST0, (~rx_status & 0xff));
+ tx_status = GetByte(XIRCREG40_TXST0);
+ tx_status |= GetByte(XIRCREG40_TXST1) << 8;
+ PutByte(XIRCREG40_TXST0, 0);
+ PutByte(XIRCREG40_TXST1, 0);
+
+ DEBUG(3, "%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n",
+ dev->name, int_status, eth_status, rx_status, tx_status);
+
+ /***** receive section ******/
+ SelectPage(0);
+ while (eth_status & FullPktRcvd) {
+ rsr = GetByte(XIRCREG0_RSR);
+ if (bytes_rcvd > maxrx_bytes && (rsr & PktRxOk)) {
+ /* too many bytes received during this int, drop the rest of the
+ * packets */
+ lp->stats.rx_dropped++;
+ DEBUG(2, "%s: RX drop, too much done\n", dev->name);
+ } else if (rsr & PktRxOk) {
+ struct sk_buff *skb;
+
+ pktlen = GetWord(XIRCREG0_RBC);
+ bytes_rcvd += pktlen;
+
+ DEBUG(5, "rsr=%#02x packet_length=%u\n", rsr, pktlen);
+
+ skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
+ if (!skb) {
+ printk(KNOT_XIRC "low memory, packet dropped (size=%u)\n",
+ pktlen);
+ lp->stats.rx_dropped++;
+ } else { /* okay get the packet */
+ skb_reserve(skb, 2);
+ if (lp->silicon == 0 ) { /* work around a hardware bug */
+ unsigned rhsa; /* receive start address */
+
+ SelectPage(5);
+ rhsa = GetWord(XIRCREG5_RHSA0);
+ SelectPage(0);
+ rhsa += 3; /* skip control infos */
+ if (rhsa >= 0x8000)
+ rhsa = 0;
+ if (rhsa + pktlen > 0x8000) {
+ unsigned i;
+ u_char *buf = skb_put(skb, pktlen);
+ for (i=0; i < pktlen ; i++, rhsa++) {
+ buf[i] = GetByte(XIRCREG_EDP);
+ if (rhsa == 0x8000) {
+ rhsa = 0;
+ i--;
+ }
+ }
+ } else {
+ insw(ioaddr+XIRCREG_EDP,
+ skb_put(skb, pktlen), (pktlen+1)>>1);
+ }
+ }
+ #if 0
+ else if (lp->mohawk) {
+ /* To use this 32 bit access we should use
+ * a manual optimized loop
+ * Also the words are swapped, we can get more
+ * performance by using 32 bit access and swapping
+ * the words in a register. Will need this for cardbus
+ *
+ * Note: don't forget to change the ALLOC_SKB to .. +3
+ */
+ unsigned i;
+ u_long *p = skb_put(skb, pktlen);
+ register u_long a;
+ kio_addr_t edpreg = ioaddr+XIRCREG_EDP-2;
+ for (i=0; i < len ; i += 4, p++) {
+ a = inl(edpreg);
+ __asm__("rorl $16,%0\n\t"
+ :"=q" (a)
+ : "0" (a));
+ *p = a;
+ }
+ }
+ #endif
+ else {
+ insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen),
+ (pktlen+1)>>1);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
+ if (!(rsr & PhyPkt))
+ lp->stats.multicast++;
+ }
+ } else { /* bad packet */
+ DEBUG(5, "rsr=%#02x\n", rsr);
+ }
+ if (rsr & PktTooLong) {
+ lp->stats.rx_frame_errors++;
+ DEBUG(3, "%s: Packet too long\n", dev->name);
+ }
+ if (rsr & CRCErr) {
+ lp->stats.rx_crc_errors++;
+ DEBUG(3, "%s: CRC error\n", dev->name);
+ }
+ if (rsr & AlignErr) {
+ lp->stats.rx_fifo_errors++; /* okay ? */
+ DEBUG(3, "%s: Alignment error\n", dev->name);
+ }
+
+ /* clear the received/dropped/error packet */
+ PutWord(XIRCREG0_DO, 0x8000); /* issue cmd: skip_rx_packet */
+
+ /* get the new ethernet status */
+ eth_status = GetByte(XIRCREG_ESR);
+ }
+ if (rx_status & 0x10) { /* Receive overrun */
+ lp->stats.rx_over_errors++;
+ PutByte(XIRCREG_CR, ClearRxOvrun);
+ DEBUG(3, "receive overrun cleared\n");
+ }
+
+ /***** transmit section ******/
+ if (int_status & PktTxed) {
+ unsigned n, nn;
+
+ n = lp->last_ptr_value;
+ nn = GetByte(XIRCREG0_PTR);
+ lp->last_ptr_value = nn;
+ if (nn < n) /* rollover */
+ lp->stats.tx_packets += 256 - n;
+ else if (n == nn) { /* happens sometimes - don't know why */
+ DEBUG(0, "PTR not changed?\n");
+ } else
+ lp->stats.tx_packets += lp->last_ptr_value - n;
+ netif_wake_queue(dev);
+ }
+ if (tx_status & 0x0002) { /* Execessive collissions */
+ DEBUG(0, "tx restarted due to execssive collissions\n");
+ PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
+ }
+ if (tx_status & 0x0040)
+ lp->stats.tx_aborted_errors++;
+
+ /* recalculate our work chunk so that we limit the duration of this
+ * ISR to about 1/10 of a second.
+ * Calculate only if we received a reasonable amount of bytes.
+ */
+ if (bytes_rcvd > 1000) {
+ u_long duration = jiffies - start_ticks;
+
+ if (duration >= HZ/10) { /* if more than about 1/10 second */
+ maxrx_bytes = (bytes_rcvd * (HZ/10)) / duration;
+ if (maxrx_bytes < 2000)
+ maxrx_bytes = 2000;
+ else if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ DEBUG(1, "set maxrx=%u (rcvd=%u ticks=%lu)\n",
+ maxrx_bytes, bytes_rcvd, duration);
+ } else if (!duration && maxrx_bytes < 22000) {
+ /* now much faster */
+ maxrx_bytes += 2000;
+ if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ DEBUG(1, "set maxrx=%u\n", maxrx_bytes);
+ }
+ }
+
+ leave:
+ if (lockup_hack) {
+ if (int_status != 0xff && (int_status = GetByte(XIRCREG_ISR)) != 0)
+ goto loop_entry;
+ }
+ SelectPage(saved_page);
+ PutByte(XIRCREG_CR, EnableIntr); /* re-enable interrupts */
+ /* Instead of dropping packets during a receive, we could
+ * force an interrupt with this command:
+ * PutByte(XIRCREG_CR, EnableIntr|ForceIntr);
+ */
+ return IRQ_HANDLED;
+} /* xirc2ps_interrupt */
+
+/*====================================================================*/
+
+static void
+do_tx_timeout(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
+ lp->stats.tx_errors++;
+ /* reset the card */
+ do_reset(dev,1);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int
+do_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ int okay;
+ unsigned freespace;
+ unsigned pktlen = skb? skb->len : 0;
+
+ DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n",
+ skb, dev, pktlen);
+
+
+ /* adjust the packet length to min. required
+ * and hope that the buffer is large enough
+ * to provide some random data.
+ * fixme: For Mohawk we can change this by sending
+ * a larger packetlen than we actually have; the chip will
+ * pad this in his buffer with random bytes
+ */
+ if (pktlen < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ pktlen = ETH_ZLEN;
+ }
+
+ netif_stop_queue(dev);
+ SelectPage(0);
+ PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
+ freespace = GetWord(XIRCREG0_TSO);
+ okay = freespace & 0x8000;
+ freespace &= 0x7fff;
+ /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
+ okay = pktlen +2 < freespace;
+ DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n",
+ dev->name, freespace, okay ? " (okay)":" (not enough)");
+ if (!okay) { /* not enough space */
+ return 1; /* upper layer may decide to requeue this packet */
+ }
+ /* send the packet */
+ PutWord(XIRCREG_EDP, (u_short)pktlen);
+ outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1);
+ if (pktlen & 1)
+ PutByte(XIRCREG_EDP, skb->data[pktlen-1]);
+
+ if (lp->mohawk)
+ PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
+
+ dev_kfree_skb (skb);
+ dev->trans_start = jiffies;
+ lp->stats.tx_bytes += pktlen;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static struct net_device_stats *
+do_get_stats(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+
+ /* lp->stats.rx_missed_errors = GetByte(?) */
+ return &lp->stats;
+}
+
+/****************
+ * Set all addresses: This first one is the individual address,
+ * the next 9 addresses are taken from the multicast list and
+ * the rest is filled with the individual address.
+ */
+static void
+set_addresses(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ local_info_t *lp = netdev_priv(dev);
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addr;
+ int i,j,k,n;
+
+ SelectPage(k=0x50);
+ for (i=0,j=8,n=0; ; i++, j++) {
+ if (i > 5) {
+ if (++n > 9)
+ break;
+ i = 0;
+ }
+ if (j > 15) {
+ j = 8;
+ k++;
+ SelectPage(k);
+ }
+
+ if (n && n <= dev->mc_count && dmi) {
+ addr = dmi->dmi_addr;
+ dmi = dmi->next;
+ } else
+ addr = dev->dev_addr;
+
+ if (lp->mohawk)
+ PutByte(j, addr[5-i]);
+ else
+ PutByte(j, addr[i]);
+ }
+ SelectPage(0);
+}
+
+/****************
+ * Set or clear the multicast filter for this adaptor.
+ * We can filter up to 9 addresses, if more are requested we set
+ * multicast promiscuous mode.
+ */
+
+static void
+set_multicast_list(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+
+ SelectPage(0x42);
+ if (dev->flags & IFF_PROMISC) { /* snoop */
+ PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */
+ } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ PutByte(XIRCREG42_SWC1, 0x06); /* set MPE */
+ } else if (dev->mc_count) {
+ /* the chip can filter 9 addresses perfectly */
+ PutByte(XIRCREG42_SWC1, 0x00);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, Offline);
+ set_addresses(dev);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ } else { /* standard usage */
+ PutByte(XIRCREG42_SWC1, 0x00);
+ }
+ SelectPage(0);
+}
+
+static int
+do_config(struct net_device *dev, struct ifmap *map)
+{
+ local_info_t *local = netdev_priv(dev);
+
+ DEBUG(0, "do_config(%p)\n", dev);
+ if (map->port != 255 && map->port != dev->if_port) {
+ if (map->port > 4)
+ return -EINVAL;
+ if (!map->port) {
+ local->probe_port = 1;
+ dev->if_port = 1;
+ } else {
+ local->probe_port = 0;
+ dev->if_port = map->port;
+ }
+ printk(KERN_INFO "%s: switching to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ do_reset(dev,1); /* not the fine way :-) */
+ }
+ return 0;
+}
+
+/****************
+ * Open the driver
+ */
+static int
+do_open(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ DEBUG(0, "do_open(%p)\n", dev);
+
+ /* Check that the PCMCIA card is still here. */
+ /* Physical device present signature. */
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ /* okay */
+ link->open++;
+
+ netif_start_queue(dev);
+ do_reset(dev,1);
+
+ return 0;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "xirc2ps_cs");
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+static int
+do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ local_info_t *local = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_ifru;
+
+ DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data[0], data[1], data[2], data[3]);
+
+ if (!local->mohawk)
+ return -EOPNOTSUPP;
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get the address of the PHY in use. */
+ data[0] = 0; /* we have only this address */
+ /* fall trough */
+ case SIOCGMIIREG: /* Read the specified MII register. */
+ data[3] = mii_rd(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ break;
+ case SIOCSMIIREG: /* Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mii_wr(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2], 16);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void
+hardreset(struct net_device *dev)
+{
+ local_info_t *local = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+
+ SelectPage(4);
+ udelay(1);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ msleep(40); /* wait 40 msec */
+ if (local->mohawk)
+ PutByte(XIRCREG4_GPR1, 1); /* set bit 0: power up */
+ else
+ PutByte(XIRCREG4_GPR1, 1 | 4); /* set bit 0: power up, bit 2: AIC */
+ msleep(20); /* wait 20 msec */
+}
+
+static void
+do_reset(struct net_device *dev, int full)
+{
+ local_info_t *local = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ unsigned value;
+
+ DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
+
+ hardreset(dev);
+ PutByte(XIRCREG_CR, SoftReset); /* set */
+ msleep(20); /* wait 20 msec */
+ PutByte(XIRCREG_CR, 0); /* clear */
+ msleep(40); /* wait 40 msec */
+ if (local->mohawk) {
+ SelectPage(4);
+ /* set pin GP1 and GP2 to output (0x0c)
+ * set GP1 to low to power up the ML6692 (0x00)
+ * set GP2 to high to power up the 10Mhz chip (0x02)
+ */
+ PutByte(XIRCREG4_GPR0, 0x0e);
+ }
+
+ /* give the circuits some time to power up */
+ msleep(500); /* about 500ms */
+
+ local->last_ptr_value = 0;
+ local->silicon = local->mohawk ? (GetByte(XIRCREG4_BOV) & 0x70) >> 4
+ : (GetByte(XIRCREG4_BOV) & 0x30) >> 4;
+
+ if (local->probe_port) {
+ if (!local->mohawk) {
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR0, 4);
+ local->probe_port = 0;
+ }
+ } else if (dev->if_port == 2) { /* enable 10Base2 */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ } else { /* enable 10BaseT */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0x80);
+ }
+ msleep(40); /* wait 40 msec to let it complete */
+
+ #ifdef PCMCIA_DEBUG
+ if (pc_debug) {
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value);
+ }
+ #endif
+
+ /* setup the ECR */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff); /* allow all ints */
+ PutByte(XIRCREG1_IMR1, 1 ); /* and Set TxUnderrunDetect */
+ value = GetByte(XIRCREG1_ECR);
+ #if 0
+ if (local->mohawk)
+ value |= DisableLinkPulse;
+ PutByte(XIRCREG1_ECR, value);
+ #endif
+ DEBUG(0, "%s: ECR is: %#02x\n", dev->name, value);
+
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */
+
+ if (local->silicon != 1) {
+ /* set the local memory dividing line.
+ * The comments in the sample code say that this is only
+ * settable with the scipper version 2 which is revision 0.
+ * Always for CE3 cards
+ */
+ SelectPage(2);
+ PutWord(XIRCREG2_RBS, 0x2000);
+ }
+
+ if (full)
+ set_addresses(dev);
+
+ /* Hardware workaround:
+ * The receive byte pointer after reset is off by 1 so we need
+ * to move the offset pointer back to 0.
+ */
+ SelectPage(0);
+ PutWord(XIRCREG0_DO, 0x2000); /* change offset command, off=0 */
+
+ /* setup MAC IMRs and clear status registers */
+ SelectPage(0x40); /* Bit 7 ... bit 0 */
+ PutByte(XIRCREG40_RMASK0, 0xff); /* ROK, RAB, rsv, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TMASK0, 0xff); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TMASK1, 0xb0); /* rsv, rsv, PTD, EXT, rsv,rsv,rsv, rsv*/
+ PutByte(XIRCREG40_RXST0, 0x00); /* ROK, RAB, REN, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TXST0, 0x00); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TXST1, 0x00); /* TEN, rsv, PTD, EXT, retry_counter:4 */
+
+ if (full && local->mohawk && init_mii(dev)) {
+ if (dev->if_port == 4 || local->dingo || local->new_mii) {
+ printk(KERN_INFO "%s: MII selected\n", dev->name);
+ SelectPage(2);
+ PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
+ msleep(20);
+ } else {
+ printk(KERN_INFO "%s: MII detected; using 10mbs\n",
+ dev->name);
+ SelectPage(0x42);
+ if (dev->if_port == 2) /* enable 10Base2 */
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ else /* enable 10BaseT */
+ PutByte(XIRCREG42_SWC1, 0x80);
+ msleep(40); /* wait 40 msec to let it complete */
+ }
+ if (full_duplex)
+ PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex));
+ } else { /* No MII */
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ dev->if_port = (value & MediaSelect) ? 1 : 2;
+ }
+
+ /* configure the LEDs */
+ SelectPage(2);
+ if (dev->if_port == 1 || dev->if_port == 4) /* TP: Link and Activity */
+ PutByte(XIRCREG2_LED, 0x3b);
+ else /* Coax: Not-Collision and Activity */
+ PutByte(XIRCREG2_LED, 0x3a);
+
+ if (local->dingo)
+ PutByte(0x0b, 0x04); /* 100 Mbit LED */
+
+ /* enable receiver and put the mac online */
+ if (full) {
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ }
+
+ /* setup Ethernet IMR and enable interrupts */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff);
+ udelay(1);
+ SelectPage(0);
+ PutByte(XIRCREG_CR, EnableIntr);
+ if (local->modem && !local->dingo) { /* do some magic */
+ if (!(GetByte(0x10) & 0x01))
+ PutByte(0x10, 0x11); /* unmask master-int bit */
+ }
+
+ if (full)
+ printk(KERN_INFO "%s: media %s, silicon revision %d\n",
+ dev->name, if_names[dev->if_port], local->silicon);
+ /* We should switch back to page 0 to avoid a bug in revision 0
+ * where regs with offset below 8 can't be read after an access
+ * to the MAC registers */
+ SelectPage(0);
+}
+
+/****************
+ * Initialize the Media-Independent-Interface
+ * Returns: True if we have a good MII
+ */
+static int
+init_mii(struct net_device *dev)
+{
+ local_info_t *local = netdev_priv(dev);
+ kio_addr_t ioaddr = dev->base_addr;
+ unsigned control, status, linkpartner;
+ int i;
+
+ if (if_port == 4 || if_port == 1) { /* force 100BaseT or 10BaseT */
+ dev->if_port = if_port;
+ local->probe_port = 0;
+ return 1;
+ }
+
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0xff00) != 0x7800)
+ return 0; /* No MII */
+
+ local->new_mii = (mii_rd(ioaddr, 0, 2) != 0xffff);
+
+ if (local->probe_port)
+ control = 0x1000; /* auto neg */
+ else if (dev->if_port == 4)
+ control = 0x2000; /* no auto neg, 100mbs mode */
+ else
+ control = 0x0000; /* no auto neg, 10mbs mode */
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ control = mii_rd(ioaddr, 0, 0);
+
+ if (control & 0x0400) {
+ printk(KERN_NOTICE "%s can't take PHY out of isolation mode\n",
+ dev->name);
+ local->probe_port = 0;
+ return 0;
+ }
+
+ if (local->probe_port) {
+ /* according to the DP83840A specs the auto negotiation process
+ * may take up to 3.5 sec, so we use this also for our ML6692
+ * Fixme: Better to use a timer here!
+ */
+ for (i=0; i < 35; i++) {
+ msleep(100); /* wait 100 msec */
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0x0020) && (status & 0x0004))
+ break;
+ }
+
+ if (!(status & 0x0020)) {
+ printk(KERN_INFO "%s: autonegotiation failed;"
+ " using 10mbs\n", dev->name);
+ if (!local->new_mii) {
+ control = 0x0000;
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ SelectPage(0);
+ dev->if_port = (GetByte(XIRCREG_ESR) & MediaSelect) ? 1 : 2;
+ }
+ } else {
+ linkpartner = mii_rd(ioaddr, 0, 5);
+ printk(KERN_INFO "%s: MII link partner: %04x\n",
+ dev->name, linkpartner);
+ if (linkpartner & 0x0080) {
+ dev->if_port = 4;
+ } else
+ dev->if_port = 1;
+ }
+ }
+
+ return 1;
+}
+
+static void
+do_powerdown(struct net_device *dev)
+{
+
+ kio_addr_t ioaddr = dev->base_addr;
+
+ DEBUG(0, "do_powerdown(%p)\n", dev);
+
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+}
+
+static int
+do_stop(struct net_device *dev)
+{
+ kio_addr_t ioaddr = dev->base_addr;
+ local_info_t *lp = netdev_priv(dev);
+ dev_link_t *link = &lp->link;
+
+ DEBUG(0, "do_stop(%p)\n", dev);
+
+ if (!link)
+ return -ENODEV;
+
+ netif_stop_queue(dev);
+
+ SelectPage(0);
+ PutByte(XIRCREG_CR, 0); /* disable interrupts */
+ SelectPage(0x01);
+ PutByte(XIRCREG1_IMR0, 0x00); /* forbid all ints */
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+
+ link->open--;
+ return 0;
+}
+
+static struct pcmcia_driver xirc2ps_cs_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "xirc2ps_cs",
+ },
+ .attach = xirc2ps_attach,
+ .detach = xirc2ps_detach,
+};
+
+static int __init
+init_xirc2ps_cs(void)
+{
+ return pcmcia_register_driver(&xirc2ps_cs_driver);
+}
+
+static void __exit
+exit_xirc2ps_cs(void)
+{
+ pcmcia_unregister_driver(&xirc2ps_cs_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_xirc2ps_cs);
+module_exit(exit_xirc2ps_cs);
+
+#ifndef MODULE
+static int __init setup_xirc2ps_cs(char *str)
+{
+ /* if_port, full_duplex, do_sound, lockup_hack
+ */
+ int ints[10] = { -1 };
+
+ str = get_options(str, 9, ints);
+
+#define MAYBE_SET(X,Y) if (ints[0] >= Y && ints[Y] != -1) { X = ints[Y]; }
+ MAYBE_SET(if_port, 3);
+ MAYBE_SET(full_duplex, 4);
+ MAYBE_SET(do_sound, 5);
+ MAYBE_SET(lockup_hack, 6);
+#undef MAYBE_SET
+
+ return 0;
+}
+
+__setup("xirc2ps_cs=", setup_xirc2ps_cs);
+#endif
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
new file mode 100644
index 000000000000..17947e6c8793
--- /dev/null
+++ b/drivers/net/pcnet32.c
@@ -0,0 +1,2358 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ * 23 Oct, 2000.
+ * Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *************************************************************************/
+
+#define DRV_NAME "pcnet32"
+#define DRV_VERSION "1.30i"
+#define DRV_RELDATE "06.28.2004"
+#define PFX DRV_NAME ": "
+
+static const char *version =
+DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static struct pci_device_id pcnet32_pci_tbl[] = {
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ /*
+ * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
+ * the incorrect vendor id.
+ */
+ { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl);
+
+static int cards_found;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+
+
+static int pcnet32_debug = 0;
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct net_device *pcnet32_dev;
+
+static int max_interrupt_work = 2;
+static int rx_copybreak = 200;
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
+#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static unsigned char options_mapping[] = {
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_ASEL /* 15 not supported */
+};
+
+static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Loopback test (offline)"
+};
+#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN)
+
+#define PCNET32_NUM_REGS 168
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+static int homepna[MAX_UNITS];
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * History:
+ * v0.01: Initial version
+ * only tested on Alpha Noname Board
+ * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
+ * tested on a ASUS SP3G
+ * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL
+ * looks like the 974 doesn't like stopping and restarting in a
+ * short period of time; now we do a reinit of the lance; the
+ * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
+ * and hangs the machine (thanks to Klaus Liedl for debugging)
+ * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
+ * made it standalone (no need for lance.c)
+ * v0.13: added additional PCI detecting for special PCI devices (Compaq)
+ * v0.14: stripped down additional PCI probe (thanks to David C Niemi
+ * and sveneric@xs4all.nl for testing this on their Compaq boxes)
+ * v0.15: added 79C965 (VLB) probe
+ * added interrupt sharing for PCI chips
+ * v0.16: fixed set_multicast_list on Alpha machines
+ * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
+ * v0.19: changed setting of autoselect bit
+ * v0.20: removed additional Compaq PCI probe; there is now a working one
+ * in arch/i386/bios32.c
+ * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
+ * v0.22: added printing of status to ring dump
+ * v0.23: changed enet_statistics to net_devive_stats
+ * v0.90: added multicast filter
+ * added module support
+ * changed irq probe to new style
+ * added PCnetFast chip id
+ * added fix for receive stalls with Intel saturn chipsets
+ * added in-place rx skbs like in the tulip driver
+ * minor cleanups
+ * v0.91: added PCnetFast+ chip id
+ * back port to 2.0.x
+ * v1.00: added some stuff from Donald Becker's 2.0.34 version
+ * added support for byte counters in net_dev_stats
+ * v1.01: do ring dumps, only when debugging the driver
+ * increased the transmit timeout
+ * v1.02: fixed memory leak in pcnet32_init_ring()
+ * v1.10: workaround for stopped transmitter
+ * added port selection for modules
+ * detect special T1/E1 WAN card and setup port selection
+ * v1.11: fixed wrong checking of Tx errors
+ * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu)
+ * added save original kmalloc addr for freeing (mcr@solidum.com)
+ * added support for PCnetHome chip (joe@MIT.EDU)
+ * rewritten PCI card detection
+ * added dwio mode to get driver working on some PPC machines
+ * v1.21: added mii selection and mii ioctl
+ * v1.22: changed pci scanning code to make PPC people happy
+ * fixed switching to 32bit mode in pcnet32_open() (thanks
+ * to Michael Richard <mcr@solidum.com> for noticing this one)
+ * added sub vendor/device id matching (thanks again to
+ * Michael Richard <mcr@solidum.com>)
+ * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
+ * v1.23 fixed small bug, when manual selecting MII speed/duplex
+ * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
+ * underflows. Added tx_start_pt module parameter. Increased
+ * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO
+ * for FAST[+] chipsets. <kaf@fc.hp.com>
+ * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
+ * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
+ * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France
+ * <jamey@crl.dec.com>
+ * - Fixed a few bugs, related to running the controller in 32bit mode.
+ * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
+ * v1.27 improved CSR/PROM address detection, lots of cleanups,
+ * new pcnet32vlb module option, HP-PARISC support,
+ * added module parameter descriptions,
+ * initial ethtool support - Helge Deller <deller@gmx.de>
+ * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
+ * use alloc_etherdev and register_netdev
+ * fix pci probe not increment cards_found
+ * FD auto negotiate error workaround for xSeries250
+ * clean up and using new mii module
+ * v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com>
+ * Added timer for cable connection state changes.
+ * v1.28 20 Feb 2004 Don Fry <brazilnut@us.ibm.com>
+ * Jon Mason <jonmason@us.ibm.com>, Chinmay Albal <albal@in.ibm.com>
+ * Now uses ethtool_ops, netif_msg_* and generic_mii_ioctl.
+ * Fixes bogus 'Bus master arbitration failure', pci_[un]map_single
+ * length errors, and transmit hangs. Cleans up after errors in open.
+ * Jim Lewis <jklewis@us.ibm.com> added ethernet loopback test.
+ * Thomas Munck Steenholdt <tmus@tmus.dk> non-mii ioctl corrections.
+ * v1.29 6 Apr 2004 Jim Lewis <jklewis@us.ibm.com> added physical
+ * identification code (blink led's) and register dump.
+ * Don Fry added timer for 971/972 so skbufs don't remain on tx ring
+ * forever.
+ * v1.30 18 May 2004 Don Fry removed timer and Last Transmit Interrupt
+ * (ltint) as they added complexity and didn't give good throughput.
+ * v1.30a 22 May 2004 Don Fry limit frames received during interrupt.
+ * v1.30b 24 May 2004 Don Fry fix bogus tx carrier errors with 79c973,
+ * assisted by Bruce Penrod <bmpenrod@endruntechnologies.com>.
+ * v1.30c 25 May 2004 Don Fry added netif_wake_queue after pcnet32_restart.
+ * v1.30d 01 Jun 2004 Don Fry discard oversize rx packets.
+ * v1.30e 11 Jun 2004 Don Fry recover after fifo error and rx hang.
+ * v1.30f 16 Jun 2004 Don Fry cleanup IRQ to allow 0 and 1 for PCI,
+ * expanding on suggestions from Ralf Baechle <ralf@linux-mips.org>,
+ * and Brian Murphy <brian@murphy.dk>.
+ * v1.30g 22 Jun 2004 Patrick Simmons <psimmons@flash.net> added option
+ * homepna for selecting HomePNA mode for PCNet/Home 79C978.
+ * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
+ * v1.30i 28 Jun 2004 Don Fry change to use module_param.
+ */
+
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 5
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ u32 base;
+ s16 buf_length;
+ s16 status;
+ u32 msg_length;
+ u32 reserved;
+};
+
+struct pcnet32_tx_head {
+ u32 base;
+ s16 length;
+ s16 status;
+ u32 misc;
+ u32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ u16 mode;
+ u16 tlen_rlen;
+ u8 phys_addr[6];
+ u16 reserved;
+ u32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring;
+ u32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+ u16 (*read_csr)(unsigned long, int);
+ void (*write_csr)(unsigned long, int, u16);
+ u16 (*read_bcr)(unsigned long, int);
+ void (*write_bcr)(unsigned long, int, u16);
+ u16 (*read_rap)(unsigned long);
+ void (*write_rap)(unsigned long, u16);
+ void (*reset)(unsigned long);
+};
+
+/*
+ * The first three fields of pcnet32_private are read by the ethernet device
+ * so we allocate the structure should be allocated by pci_alloc_consistent().
+ */
+struct pcnet32_private {
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
+ struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
+ struct pcnet32_init_block init_block;
+ dma_addr_t dma_addr; /* DMA address of beginning of this
+ object, returned by
+ pci_alloc_consistent */
+ struct pci_dev *pci_dev; /* Pointer to the associated pci device
+ structure */
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ dma_addr_t tx_dma_addr[TX_RING_SIZE];
+ dma_addr_t rx_dma_addr[RX_RING_SIZE];
+ struct pcnet32_access a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct net_device_stats stats;
+ char tx_full;
+ int options;
+ unsigned int shared_irq:1, /* shared irq possible */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1; /* mii port available */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+ struct timer_list watchdog_timer;
+ struct timer_list blink_timer;
+ u32 msg_enable; /* debug message level */
+};
+
+static void pcnet32_probe_vlbus(void);
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
+static int pcnet32_rx(struct net_device *);
+static void pcnet32_tx_timeout (struct net_device *dev);
+static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
+static int pcnet32_close(struct net_device *);
+static struct net_device_stats *pcnet32_get_stats(struct net_device *);
+static void pcnet32_load_multicast(struct net_device *dev);
+static void pcnet32_set_multicast_list(struct net_device *);
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+static void pcnet32_watchdog(struct net_device *);
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val);
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *eth_test, u64 *data);
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1);
+static int pcnet32_phys_id(struct net_device *dev, u32 data);
+static void pcnet32_led_blink_callback(struct net_device *dev);
+static int pcnet32_get_regs_len(struct net_device *dev);
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr);
+
+enum pci_flags_bit {
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+
+static u16 pcnet32_wio_read_csr (unsigned long addr, int index)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ return inw (addr+PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ outw (val, addr+PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr (unsigned long addr, int index)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ return inw (addr+PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val)
+{
+ outw (index, addr+PCNET32_WIO_RAP);
+ outw (val, addr+PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap (unsigned long addr)
+{
+ return inw (addr+PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap (unsigned long addr, u16 val)
+{
+ outw (val, addr+PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset (unsigned long addr)
+{
+ inw (addr+PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check (unsigned long addr)
+{
+ outw (88, addr+PCNET32_WIO_RAP);
+ return (inw (addr+PCNET32_WIO_RAP) == 88);
+}
+
+static struct pcnet32_access pcnet32_wio = {
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+ .write_bcr = pcnet32_wio_write_bcr,
+ .read_rap = pcnet32_wio_read_rap,
+ .write_rap = pcnet32_wio_write_rap,
+ .reset = pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr (unsigned long addr, int index)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ return (inl (addr+PCNET32_DWIO_RDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ outl (val, addr+PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ return (inl (addr+PCNET32_DWIO_BDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val)
+{
+ outl (index, addr+PCNET32_DWIO_RAP);
+ outl (val, addr+PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap (unsigned long addr)
+{
+ return (inl (addr+PCNET32_DWIO_RAP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_rap (unsigned long addr, u16 val)
+{
+ outl (val, addr+PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset (unsigned long addr)
+{
+ inl (addr+PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check (unsigned long addr)
+{
+ outl (88, addr+PCNET32_DWIO_RAP);
+ return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88);
+}
+
+static struct pcnet32_access pcnet32_dwio = {
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+ .write_bcr = pcnet32_dwio_write_bcr,
+ .read_rap = pcnet32_dwio_read_rap,
+ .write_rap = pcnet32_dwio_write_rap,
+ .reset = pcnet32_dwio_reset
+};
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void pcnet32_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ pcnet32_interrupt(0, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+
+static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ mii_ethtool_gset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ r = 0;
+ }
+ return r;
+}
+
+static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_ethtool_sset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct pcnet32_private *lp = dev->priv;
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ if (lp->pci_dev)
+ strcpy (info->bus_info, pci_name(lp->pci_dev));
+ else
+ sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+}
+
+static u32 pcnet32_get_link(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_link_ok(&lp->mii_if);
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return r;
+}
+
+static u32 pcnet32_get_msglevel(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ return lp->msg_enable;
+}
+
+static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct pcnet32_private *lp = dev->priv;
+ lp->msg_enable = value;
+}
+
+static int pcnet32_nway_reset(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_nway_restart(&lp->mii_if);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = dev->priv;
+
+ ering->tx_max_pending = TX_RING_SIZE - 1;
+ ering->tx_pending = lp->cur_tx - lp->dirty_tx;
+ ering->rx_max_pending = RX_RING_SIZE - 1;
+ ering->rx_pending = lp->cur_rx & RX_RING_MOD_MASK;
+}
+
+static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
+}
+
+static int pcnet32_self_test_count(struct net_device *dev)
+{
+ return PCNET32_TEST_LEN;
+}
+
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int rc;
+
+ if (test->flags == ETH_TEST_FL_OFFLINE) {
+ rc = pcnet32_loopback_test(dev, data);
+ if (rc) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name);
+ test->flags |= ETH_TEST_FL_FAILED;
+ } else if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name);
+ } else if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name);
+} /* end pcnet32_ethtool_test */
+
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1)
+{
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a; /* access to registers */
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ struct sk_buff *skb; /* sk buff */
+ int x, i; /* counters */
+ int numbuffs = 4; /* number of TX/RX buffers and descs */
+ u16 status = 0x8300; /* TX ring status */
+ u16 teststatus; /* test of ring status */
+ int rc; /* return code */
+ int size; /* size of packets */
+ unsigned char *packet; /* source packet data */
+ static int data_len = 60; /* length of source packets */
+ unsigned long flags;
+ unsigned long ticks;
+
+ *data1 = 1; /* status of test, default to fail */
+ rc = 1; /* default to fail */
+
+ if (netif_running(dev))
+ pcnet32_close(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Reset the PCNET32 */
+ lp->a.reset (ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr (ioaddr, 20, 2);
+
+ lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->init_block.filter[0] = 0;
+ lp->init_block.filter[1] = 0;
+
+ /* purge & init rings but don't actually restart */
+ pcnet32_restart(dev, 0x0000);
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+
+ /* Initialize Transmit buffers. */
+ size = data_len + 15;
+ for (x=0; x<numbuffs; x++) {
+ if (!(skb = dev_alloc_skb(size))) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n",
+ dev->name, __LINE__);
+ goto clean_up;
+ } else {
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = le16_to_cpu(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i=0; i<6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i=0; i<6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i=0; i<data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = le16_to_cpu(status);
+ }
+ }
+
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
+ x = x | 0x0002;
+ a->write_bcr(ioaddr, 32, x);
+
+ lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
+
+ teststatus = le16_to_cpu(0x8000);
+ lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
+
+ /* Check status of descriptors */
+ for (x=0; x<numbuffs; x++) {
+ ticks = 0;
+ rmb();
+ while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ rmb();
+ ticks++;
+ }
+ if (ticks == 200) {
+ if (netif_msg_hw(lp))
+ printk("%s: Desc %d failed to reset!\n",dev->name,x);
+ break;
+ }
+ }
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+ wmb();
+ if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
+ printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
+
+ for (x=0; x<numbuffs; x++) {
+ printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
+ skb = lp->rx_skbuff[x];
+ for (i=0; i<size; i++) {
+ printk("%02x ", *(skb->data+i));
+ }
+ printk("\n");
+ }
+ }
+
+ x = 0;
+ rc = 0;
+ while (x<numbuffs && !rc) {
+ skb = lp->rx_skbuff[x];
+ packet = lp->tx_skbuff[x]->data;
+ for (i=0; i<size; i++) {
+ if (*(skb->data+i) != packet[i]) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n",
+ dev->name, i, *(skb->data+i), packet[i]);
+ rc = 1;
+ break;
+ }
+ }
+ x++;
+ }
+ if (!rc) {
+ *data1 = 0;
+ }
+
+clean_up:
+ x = a->read_csr(ioaddr, 15) & 0xFFFF;
+ a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
+
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ x = x & ~0x0002;
+ a->write_bcr(ioaddr, 32, x);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (netif_running(dev)) {
+ pcnet32_open(dev);
+ } else {
+ lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */
+ }
+
+ return(rc);
+} /* end pcnet32_loopback_test */
+
+static void pcnet32_led_blink_callback(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i=4; i<8; i++) {
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
+}
+
+static int pcnet32_phys_id(struct net_device *dev, u32 data)
+{
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i, regs[4];
+
+ if (!lp->blink_timer.function) {
+ init_timer(&lp->blink_timer);
+ lp->blink_timer.function = (void *) pcnet32_led_blink_callback;
+ lp->blink_timer.data = (unsigned long) dev;
+ }
+
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i=4; i<8; i++) {
+ regs[i-4] = a->read_bcr(ioaddr, i);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->blink_timer, jiffies);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)))
+ data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+
+ schedule_timeout(data * HZ);
+ del_timer_sync(&lp->blink_timer);
+
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i=4; i<8; i++) {
+ a->write_bcr(ioaddr, i, regs[i-4]);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
+}
+
+static int pcnet32_get_regs_len(struct net_device *dev)
+{
+ return(PCNET32_NUM_REGS * sizeof(u16));
+}
+
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ int i, csr0;
+ u16 *buff = ptr;
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ csr0 = a->read_csr(ioaddr, 0);
+ if (!(csr0 & 0x0004)) { /* If not stopped */
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ a->write_csr(ioaddr, 5, 0x0001);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ ticks++;
+ if (ticks > 200) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Error getting into suspend!\n",
+ dev->name);
+ break;
+ }
+ }
+ }
+
+ /* read address PROM */
+ for (i=0; i<16; i += 2)
+ *buff++ = inw(ioaddr + i);
+
+ /* read control and status registers */
+ for (i=0; i<90; i++) {
+ *buff++ = a->read_csr(ioaddr, i);
+ }
+
+ *buff++ = a->read_csr(ioaddr, 112);
+ *buff++ = a->read_csr(ioaddr, 114);
+
+ /* read bus configuration registers */
+ for (i=0; i<36; i++) {
+ *buff++ = a->read_bcr(ioaddr, i);
+ }
+
+ /* read mii phy registers */
+ if (lp->mii) {
+ for (i=0; i<32; i++) {
+ lp->a.write_bcr(ioaddr, 33, ((lp->mii_if.phy_id) << 5) | i);
+ *buff++ = lp->a.read_bcr(ioaddr, 34);
+ }
+ }
+
+ if (!(csr0 & 0x0004)) { /* If not stopped */
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ a->write_csr(ioaddr, 5, 0x0000);
+ }
+
+ i = buff - (u16 *)ptr;
+ for (; i < PCNET32_NUM_REGS; i++)
+ *buff++ = 0;
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static struct ethtool_ops pcnet32_ethtool_ops = {
+ .get_settings = pcnet32_get_settings,
+ .set_settings = pcnet32_set_settings,
+ .get_drvinfo = pcnet32_get_drvinfo,
+ .get_msglevel = pcnet32_get_msglevel,
+ .set_msglevel = pcnet32_set_msglevel,
+ .nway_reset = pcnet32_nway_reset,
+ .get_link = pcnet32_get_link,
+ .get_ringparam = pcnet32_get_ringparam,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .get_tso = ethtool_op_get_tso,
+ .get_strings = pcnet32_get_strings,
+ .self_test_count = pcnet32_self_test_count,
+ .self_test = pcnet32_ethtool_test,
+ .phys_id = pcnet32_phys_id,
+ .get_regs_len = pcnet32_get_regs_len,
+ .get_regs = pcnet32_get_regs,
+};
+
+/* only probes for non-PCI devices, the rest are handled by
+ * pci_register_driver via pcnet32_probe_pci */
+
+static void __devinit
+pcnet32_probe_vlbus(void)
+{
+ unsigned int *port, ioaddr;
+
+ /* search for PCnet32 VLB cards at known addresses */
+ for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+ if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) {
+ pcnet32_probe1(ioaddr, 0, NULL);
+ } else {
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ }
+ }
+ }
+}
+
+
+static int __devinit
+pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned long ioaddr;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ ioaddr = pci_resource_start (pdev, 0);
+ if (!ioaddr) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n");
+ return -ENODEV;
+ }
+
+ if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n");
+ return -ENODEV;
+ }
+ if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "io address range already allocated\n");
+ return -EBUSY;
+ }
+
+ err = pcnet32_probe1(ioaddr, 1, pdev);
+ if (err < 0) {
+ pci_disable_device(pdev);
+ }
+ return err;
+}
+
+
+/* pcnet32_probe1
+ * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
+ * pdev will be NULL when called from pcnet32_probe_vlbus.
+ */
+static int __devinit
+pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+{
+ struct pcnet32_private *lp;
+ dma_addr_t lp_dma_addr;
+ int i, media;
+ int fdx, mii, fset, dxsuflo;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+ struct pcnet32_access *a = NULL;
+ u8 promaddr[6];
+ int ret = -ENODEV;
+
+ /* reset the chip */
+ pcnet32_wio_reset(ioaddr);
+
+ /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+ a = &pcnet32_wio;
+ } else {
+ pcnet32_dwio_reset(ioaddr);
+ if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) {
+ a = &pcnet32_dwio;
+ } else
+ goto err_release_region;
+ }
+
+ chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16);
+ if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
+ printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX "Unsupported chip version.\n");
+ goto err_release_region;
+ }
+
+ /* initialize variables */
+ fdx = mii = fset = dxsuflo = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+ case 0x2420:
+ chipname = "PCnet/PCI 79C970"; /* PCI */
+ break;
+ case 0x2430:
+ if (shared)
+ chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+ else
+ chipname = "PCnet/32 79C965"; /* 486/VL bus */
+ break;
+ case 0x2621:
+ chipname = "PCnet/PCI II 79C970A"; /* PCI */
+ fdx = 1;
+ break;
+ case 0x2623:
+ chipname = "PCnet/FAST 79C971"; /* PCI */
+ fdx = 1; mii = 1; fset = 1;
+ break;
+ case 0x2624:
+ chipname = "PCnet/FAST+ 79C972"; /* PCI */
+ fdx = 1; mii = 1; fset = 1;
+ break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1; mii = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+ fdx = 1;
+ /*
+ * This is based on specs published at www.amd.com. This section
+ * assumes that a card with a 79C978 wants to go into standard
+ * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
+ * and the module option homepna=1 can select this instead.
+ */
+ media = a->read_bcr(ioaddr, 49);
+ media &= ~3; /* default to 10Mb ethernet */
+ if (cards_found < MAX_UNITS && homepna[cards_found])
+ media |= 1; /* switch to home wiring mode */
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
+ (media & 1) ? "1" : "10");
+ a->write_bcr(ioaddr, 49, media);
+ break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1; mii = 1;
+ break;
+ case 0x2628:
+ chipname = "PCnet/PRO 79C976";
+ fdx = 1; mii = 1;
+ break;
+ default:
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
+ chip_version);
+ goto err_release_region;
+ }
+
+ /*
+ * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+ * starting until the packet is loaded. Strike one for reliability, lose
+ * one for latency - although on PCI this isnt a big loss. Older chips
+ * have FIFO's smaller than a packet, so you can't do this.
+ * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
+ */
+
+ if (fset) {
+ a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
+ a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+ dxsuflo = 1;
+ }
+
+ dev = alloc_etherdev(0);
+ if (!dev) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "Memory allocation failed.\n");
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+
+ /* In most chips, after a chip reset, the ethernet address is read from the
+ * station address PROM at the base address and programmed into the
+ * "Physical Address Registers" CSR12-14.
+ * As a precautionary measure, we read the PROM values and complain if
+ * they disagree with the CSRs. Either way, we use the CSR values, and
+ * double check that they are valid.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned int val;
+ val = a->read_csr(ioaddr, i+12) & 0x0ffff;
+ /* There may be endianness issues here. */
+ dev->dev_addr[2*i] = val & 0x0ff;
+ dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff;
+ }
+
+ /* read PROM address and compare with CSR address */
+ for (i = 0; i < 6; i++)
+ promaddr[i] = inb(ioaddr + i);
+
+ if (memcmp(promaddr, dev->dev_addr, 6)
+ || !is_valid_ether_addr(dev->dev_addr)) {
+#ifndef __powerpc__
+ if (is_valid_ether_addr(promaddr)) {
+#else
+ if (!is_valid_ether_addr(dev->dev_addr)
+ && is_valid_ether_addr(promaddr)) {
+#endif
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ printk(" warning: CSR address invalid,\n");
+ printk(KERN_INFO " using instead PROM address of");
+ }
+ memcpy(dev->dev_addr, promaddr, 6);
+ }
+ }
+
+ /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+ if (!is_valid_ether_addr(dev->dev_addr))
+ memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+
+ /* Version 0x2623 and 0x2624 */
+ if (((chip_version + 1) & 0xfffe) == 0x2624) {
+ i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+ printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i);
+ switch(i>>10) {
+ case 0: printk(" 20 bytes,"); break;
+ case 1: printk(" 64 bytes,"); break;
+ case 2: printk(" 128 bytes,"); break;
+ case 3: printk("~220 bytes,"); break;
+ }
+ i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+ printk(" BCR18(%x):",i&0xffff);
+ if (i & (1<<5)) printk("BurstWrEn ");
+ if (i & (1<<6)) printk("BurstRdEn ");
+ if (i & (1<<7)) printk("DWordIO ");
+ if (i & (1<<11)) printk("NoUFlow ");
+ i = a->read_bcr(ioaddr, 25);
+ printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8);
+ i = a->read_bcr(ioaddr, 26);
+ printk(" SRAM_BND=0x%04x,",i<<8);
+ i = a->read_bcr(ioaddr, 27);
+ if (i & (1<<14)) printk("LowLatRx");
+ }
+ }
+
+ dev->base_addr = ioaddr;
+ /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
+ if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ memset(lp, 0, sizeof(*lp));
+ lp->dma_addr = lp_dma_addr;
+ lp->pci_dev = pdev;
+
+ spin_lock_init(&lp->lock);
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->shared_irq = shared;
+ lp->mii_if.full_duplex = fdx;
+ lp->mii_if.phy_id_mask = 0x1f;
+ lp->mii_if.reg_num_mask = 0x1f;
+ lp->dxsuflo = dxsuflo;
+ lp->mii = mii;
+ lp->msg_enable = pcnet32_debug;
+ if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping)))
+ lp->options = PCNET32_PORT_ASEL;
+ else
+ lp->options = options_mapping[options[cards_found]];
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+
+ if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+ ((cards_found>=MAX_UNITS) || full_duplex[cards_found]))
+ lp->options |= PCNET32_PORT_FD;
+
+ if (!a) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "No access methods\n");
+ ret = -ENODEV;
+ goto err_free_consistent;
+ }
+ lp->a = *a;
+
+ /* detect special T1/E1 WAN card by checking for MAC address */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
+ && dev->dev_addr[2] == 0x75)
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
+
+ lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+ lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr +
+ offsetof(struct pcnet32_private, rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
+ offsetof(struct pcnet32_private, tx_ring));
+
+ /* switch pcnet32 to 32bit mode */
+ a->write_bcr(ioaddr, 20, 2);
+
+ a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
+ init_block)) & 0xffff);
+ a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
+ init_block)) >> 16);
+
+ if (pdev) { /* use the IRQ provided by PCI */
+ dev->irq = pdev->irq;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(" assigned IRQ %d.\n", dev->irq);
+ } else {
+ unsigned long irq_mask = probe_irq_on();
+
+ /*
+ * To auto-IRQ we enable the initialization-done and DMA error
+ * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ * boards will work.
+ */
+ /* Trigger an initialization just for the interrupt. */
+ a->write_csr (ioaddr, 0, 0x41);
+ mdelay (1);
+
+ dev->irq = probe_irq_off (irq_mask);
+ if (!dev->irq) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(", failed to detect IRQ line.\n");
+ ret = -ENODEV;
+ goto err_free_consistent;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(", probed IRQ %d.\n", dev->irq);
+ }
+
+ /* Set the mii phy_id so that we can query the link state */
+ if (lp->mii)
+ lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f;
+
+ init_timer (&lp->watchdog_timer);
+ lp->watchdog_timer.data = (unsigned long) dev;
+ lp->watchdog_timer.function = (void *) &pcnet32_watchdog;
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->open = &pcnet32_open;
+ dev->hard_start_xmit = &pcnet32_start_xmit;
+ dev->stop = &pcnet32_close;
+ dev->get_stats = &pcnet32_get_stats;
+ dev->set_multicast_list = &pcnet32_set_multicast_list;
+ dev->do_ioctl = &pcnet32_ioctl;
+ dev->ethtool_ops = &pcnet32_ethtool_ops;
+ dev->tx_timeout = pcnet32_tx_timeout;
+ dev->watchdog_timeo = (5*HZ);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = pcnet32_poll_controller;
+#endif
+
+ /* Fill in the generic fields of the device structure. */
+ if (register_netdev(dev))
+ goto err_free_consistent;
+
+ if (pdev) {
+ pci_set_drvdata(pdev, dev);
+ } else {
+ lp->next = pcnet32_dev;
+ pcnet32_dev = dev;
+ }
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+ cards_found++;
+
+ /* enable LED writes */
+ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
+
+ return 0;
+
+err_free_consistent:
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+err_free_netdev:
+ free_netdev(dev);
+err_release_region:
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return ret;
+}
+
+
+static int
+pcnet32_open(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val;
+ int i;
+ int rc;
+ unsigned long flags;
+
+ if (request_irq(dev->irq, &pcnet32_interrupt,
+ lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Check for a valid station address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ rc = -EINVAL;
+ goto err_free_irq;
+ }
+
+ /* Reset the PCNET32 */
+ lp->a.reset (ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr (ioaddr, 20, 2);
+
+ if (netif_msg_ifup(lp))
+ printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq,
+ (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)),
+ (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)),
+ (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
+
+ /* set/reset autoselect bit */
+ val = lp->a.read_bcr (ioaddr, 2) & ~2;
+ if (lp->options & PCNET32_PORT_ASEL)
+ val |= 2;
+ lp->a.write_bcr (ioaddr, 2, val);
+
+ /* handle full duplex setting */
+ if (lp->mii_if.full_duplex) {
+ val = lp->a.read_bcr (ioaddr, 9) & ~3;
+ if (lp->options & PCNET32_PORT_FD) {
+ val |= 1;
+ if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ val |= 2;
+ } else if (lp->options & PCNET32_PORT_ASEL) {
+ /* workaround of xSeries250, turn on for 79C975 only */
+ i = ((lp->a.read_csr(ioaddr, 88) |
+ (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff;
+ if (i == 0x2627)
+ val |= 3;
+ }
+ lp->a.write_bcr (ioaddr, 9, val);
+ }
+
+ /* set/reset GPSI bit in test register */
+ val = lp->a.read_csr (ioaddr, 124) & ~0x10;
+ if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+ val |= 0x10;
+ lp->a.write_csr (ioaddr, 124, val);
+
+ /* Allied Telesyn AT 2700/2701 FX looses the link, so skip that */
+ if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
+ (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
+ lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
+ printk(KERN_DEBUG "%s: Skipping PHY selection.\n", dev->name);
+ } else {
+ /*
+ * 24 Jun 2004 according AMD, in order to change the PHY,
+ * DANAS (or DISPM for 79C976) must be set; then select the speed,
+ * duplex, and/or enable auto negotiation, and clear DANAS
+ */
+ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+ lp->a.write_bcr(ioaddr, 32,
+ lp->a.read_bcr(ioaddr, 32) | 0x0080);
+ /* disable Auto Negotiation, set 10Mpbs, HD */
+ val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
+ if (lp->options & PCNET32_PORT_FD)
+ val |= 0x10;
+ if (lp->options & PCNET32_PORT_100)
+ val |= 0x08;
+ lp->a.write_bcr (ioaddr, 32, val);
+ } else {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->a.write_bcr(ioaddr, 32,
+ lp->a.read_bcr(ioaddr, 32) | 0x0080);
+ /* enable auto negotiate, setup, disable fd */
+ val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
+ val |= 0x20;
+ lp->a.write_bcr(ioaddr, 32, val);
+ }
+ }
+ }
+
+#ifdef DO_DXSUFLO
+ if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+ val = lp->a.read_csr (ioaddr, 3);
+ val |= 0x40;
+ lp->a.write_csr (ioaddr, 3, val);
+ }
+#endif
+
+ lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+
+ if (pcnet32_init_ring(dev)) {
+ rc = -ENOMEM;
+ goto err_free_ring;
+ }
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ lp->a.write_csr (ioaddr, 1, (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)) & 0xffff);
+ lp->a.write_csr (ioaddr, 2, (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)) >> 16);
+
+ lp->a.write_csr (ioaddr, 4, 0x0915);
+ lp->a.write_csr (ioaddr, 0, 0x0001);
+
+ netif_start_queue(dev);
+
+ /* If we have mii, print the link status and start the watchdog */
+ if (lp->mii) {
+ mii_check_media (&lp->mii_if, netif_msg_link(lp), 1);
+ mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+ }
+
+ i = 0;
+ while (i++ < 100)
+ if (lp->a.read_csr (ioaddr, 0) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ lp->a.write_csr (ioaddr, 0, 0x0042);
+
+ if (netif_msg_ifup(lp))
+ printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)),
+ lp->a.read_csr(ioaddr, 0));
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0; /* Always succeed */
+
+err_free_ring:
+ /* free any allocated skbuffs */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_ring[i].status = 0;
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a.write_bcr (ioaddr, 20, 4);
+
+err_free_irq:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ free_irq(dev->irq, dev);
+ return rc;
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void
+pcnet32_purge_tx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+}
+
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static int
+pcnet32_init_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
+ if (rx_skbuff == NULL) {
+ if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
+ /* there is not much, we can do at this point */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
+ dev->name);
+ return -1;
+ }
+ skb_reserve (rx_skbuff, 2);
+ }
+
+ rmb();
+ if (lp->rx_dma_addr[i] == 0)
+ lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail,
+ PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
+ lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[i].status = le16_to_cpu(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ * the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ lp->tx_ring[i].base = 0;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr +
+ offsetof(struct pcnet32_private, rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
+ offsetof(struct pcnet32_private, tx_ring));
+ wmb(); /* Make sure all changes are visible */
+ return 0;
+}
+
+/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
+ * then flush the pending transmit operations, re-initialize the ring,
+ * and tell the chip to initialize.
+ */
+static void
+pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+
+ /* wait for stop */
+ for (i=0; i<100; i++)
+ if (lp->a.read_csr(ioaddr, 0) & 0x0004)
+ break;
+
+ if (i >= 100 && netif_msg_drv(lp))
+ printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n",
+ dev->name);
+
+ pcnet32_purge_tx_ring(dev);
+ if (pcnet32_init_ring(dev))
+ return;
+
+ /* ReInit Ring */
+ lp->a.write_csr (ioaddr, 0, 1);
+ i = 0;
+ while (i++ < 1000)
+ if (lp->a.read_csr (ioaddr, 0) & 0x0100)
+ break;
+
+ lp->a.write_csr (ioaddr, 0, csr0_bits);
+}
+
+
+static void
+pcnet32_tx_timeout (struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr, flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Transmitter timeout, serious problems. */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ lp->a.write_csr (ioaddr, 0, 0x0004);
+ lp->stats.tx_errors++;
+ if (netif_msg_tx_err(lp)) {
+ int i;
+ printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->rx_ring[i].base),
+ (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
+ le32_to_cpu(lp->rx_ring[i].msg_length),
+ le16_to_cpu(lp->rx_ring[i].status));
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->tx_ring[i].base),
+ (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
+ le32_to_cpu(lp->tx_ring[i].misc),
+ le16_to_cpu(lp->tx_ring[i].status));
+ printk("\n");
+ }
+ pcnet32_restart(dev, 0x0042);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+static int
+pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 status;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ if (netif_msg_tx_queued(lp)) {
+ printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ }
+
+ /* Default status -- will not enable Successful-TxDone
+ * interrupt when that option is available to us.
+ */
+ status = 0x8300;
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the status
+ * with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[entry].status = le16_to_cpu(status);
+
+ lp->cur_tx++;
+ lp->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ lp->a.write_csr (ioaddr, 0, 0x0048);
+
+ dev->trans_start = jiffies;
+
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base != 0) {
+ lp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+
+/* The PCNET32 interrupt handler. */
+static irqreturn_t
+pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct pcnet32_private *lp;
+ unsigned long ioaddr;
+ u16 csr0,rap;
+ int boguscnt = max_interrupt_work;
+ int must_restart;
+
+ if (!dev) {
+ if (pcnet32_debug & NETIF_MSG_INTR)
+ printk (KERN_DEBUG "%s(): irq %d for unknown device\n",
+ __FUNCTION__, irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = dev->priv;
+
+ spin_lock(&lp->lock);
+
+ rap = lp->a.read_rap(ioaddr);
+ while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
+ if (csr0 == 0xffff) {
+ break; /* PCMCIA remove happened */
+ }
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
+
+ must_restart = 0;
+
+ if (netif_msg_intr(lp))
+ printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, lp->a.read_csr (ioaddr, 0));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ pcnet32_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ unsigned int dirty_tx = lp->dirty_tx;
+ int delta;
+
+ while (dirty_tx != lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was an major error, log it. */
+ int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (netif_msg_tx_err(lp))
+ printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n",
+ dev->name, status, err_status);
+ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000) lp->stats.tx_window_errors++;
+#ifndef DO_DXSUFLO
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ if (netif_msg_tx_err(lp))
+ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->name, csr0);
+ must_restart = 1;
+ }
+#else
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ if (! lp->dxsuflo) { /* If controller doesn't recover ... */
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ if (netif_msg_tx_err(lp))
+ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->name, csr0);
+ must_restart = 1;
+ }
+ }
+#endif
+ } else {
+ if (status & 0x1800)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry],
+ lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ lp->tx_dma_addr[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+ delta = (lp->cur_tx - dirty_tx) & (TX_RING_MOD_MASK + TX_RING_SIZE);
+ if (delta > TX_RING_SIZE) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ delta -= TX_RING_SIZE;
+ }
+
+ if (lp->tx_full &&
+ netif_queue_stopped(dev) &&
+ delta < TX_RING_SIZE - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * this happens when our receive ring is full. This shouldn't
+ * be a problem as we will see normal rx interrupts for the frames
+ * in the receive ring. But there are some PCI chipsets (I can
+ * reproduce this on SP3G with Intel saturn chipset) which have
+ * sometimes problems and will fill up the receive ring with
+ * error descriptors. In this situation we don't get a rx
+ * interrupt, but a missed frame interrupt sooner or later.
+ * So we try to clean up our receive ring here.
+ */
+ pcnet32_rx(dev);
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* unlike for the lance, there is no restart needed */
+ }
+
+ if (must_restart) {
+ /* reset the chip to clear the error condition, then restart */
+ lp->a.reset(ioaddr);
+ lp->a.write_csr(ioaddr, 4, 0x0915);
+ pcnet32_restart(dev, 0x0002);
+ netif_wake_queue(dev);
+ }
+ }
+
+ /* Set interrupt enable. */
+ lp->a.write_csr (ioaddr, 0, 0x0040);
+ lp->a.write_rap (ioaddr,rap);
+
+ if (netif_msg_intr(lp))
+ printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
+ dev->name, lp->a.read_csr (ioaddr, 0));
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int
+pcnet32_rx(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int boguscnt = RX_RING_SIZE / 2;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+ int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+ if (status != 0x03) { /* There was an error. */
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error.
+ */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4;
+ struct sk_buff *skb;
+
+ /* Discard oversize frames. */
+ if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR "%s: Impossible packet size %d!\n",
+ dev->name, pkt_len);
+ lp->stats.rx_errors++;
+ } else if (pkt_len < 60) {
+ if (netif_msg_rx_err(lp))
+ printk(KERN_ERR "%s: Runt packet!\n", dev->name);
+ lp->stats.rx_errors++;
+ } else {
+ int rx_in_place = 0;
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
+ skb_reserve (newskb, 2);
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry],
+ PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
+ skb_put (skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ newskb->dev = dev;
+ lp->rx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev, newskb->tail,
+ PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
+ lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
+ rx_in_place = 1;
+ } else
+ skb = NULL;
+ } else {
+ skb = dev_alloc_skb(pkt_len+2);
+ }
+
+ if (skb == NULL) {
+ int i;
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
+ & RX_RING_MOD_MASK].status) < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2) {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ wmb(); /* Make sure adapter sees owner change */
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ if (!rx_in_place) {
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ pci_dma_sync_single_for_cpu(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SZ-2,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(skb,
+ (unsigned char *)(lp->rx_skbuff[entry]->tail),
+ pkt_len,0);
+ pci_dma_sync_single_for_device(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SZ-2,
+ PCI_DMA_FROMDEVICE);
+ }
+ lp->stats.rx_bytes += skb->len;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
+ }
+ /*
+ * The docs say that the buffer length isn't touched, but Andrew Boyd
+ * of QNX reports that some revs of the 79C965 clear it.
+ */
+ lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ if (--boguscnt <= 0) break; /* don't stay in loop forever */
+ }
+
+ return 0;
+}
+
+static int
+pcnet32_close(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+ unsigned long flags;
+
+ del_timer_sync(&lp->watchdog_timer);
+
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
+
+ if (netif_msg_ifdown(lp))
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, lp->a.read_csr (ioaddr, 0));
+
+ /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+ lp->a.write_csr (ioaddr, 0, 0x0004);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a.write_bcr (ioaddr, 20, 4);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ free_irq(dev->irq, dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* free all allocated skbuffs */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_ring[i].status = 0;
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *
+pcnet32_get_stats(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ saved_addr = lp->a.read_rap(ioaddr);
+ lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
+ lp->a.write_rap(ioaddr, saved_addr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &lp->stats;
+}
+
+/* taken from the sunlance driver, which it took from the depca driver */
+static void pcnet32_load_multicast (struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ volatile struct pcnet32_init_block *ib = &lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct dev_mc_list *dmi=dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc = crc >> 26;
+ mcast_table [crc >> 4] = le16_to_cpu(
+ le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf)));
+ }
+ return;
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void pcnet32_set_multicast_list(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr, flags;
+ struct pcnet32_private *lp = dev->priv;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ if (netif_msg_hw(lp))
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7);
+ } else {
+ lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast (dev);
+ }
+
+ lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
+ pcnet32_restart(dev, 0x0042); /* Resume normal operation */
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/* This routine assumes that the lp->lock is held */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val_out;
+
+ if (!lp->mii)
+ return 0;
+
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a.read_bcr(ioaddr, 34);
+
+ return val_out;
+}
+
+/* This routine assumes that the lp->lock is held */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+
+ if (!lp->mii)
+ return;
+
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a.write_bcr(ioaddr, 34, val);
+}
+
+static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ /* SIOC[GS]MIIxxx ioctls */
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static void pcnet32_watchdog(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+
+ /* Print the link status if it has changed */
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ mii_check_media (&lp->mii_if, netif_msg_link(lp), 0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+}
+
+static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct pcnet32_private *lp = dev->priv;
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static struct pci_driver pcnet32_driver = {
+ .name = DRV_NAME,
+ .probe = pcnet32_probe_pci,
+ .remove = __devexit_p(pcnet32_remove_one),
+ .id_table = pcnet32_pci_tbl,
+};
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+static int tx_start_pt = -1;
+static int pcnet32_have_pci;
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, DRV_NAME " debug level");
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+module_param(tx_start_pt, int, 0);
+MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
+module_param(pcnet32vlb, int, 0);
+MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
+module_param_array(options, int, NULL, 0);
+MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
+/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
+module_param_array(homepna, int, NULL, 0);
+MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
+MODULE_LICENSE("GPL");
+
+#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int __init pcnet32_init_module(void)
+{
+ printk(KERN_INFO "%s", version);
+
+ pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
+
+ if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+ tx_start = tx_start_pt;
+
+ /* find the PCI devices */
+ if (!pci_module_init(&pcnet32_driver))
+ pcnet32_have_pci = 1;
+
+ /* should we find any remaining VLbus devices ? */
+ if (pcnet32vlb)
+ pcnet32_probe_vlbus();
+
+ if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
+ printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+
+ return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+}
+
+static void __exit pcnet32_cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ while (pcnet32_dev) {
+ struct pcnet32_private *lp = pcnet32_dev->priv;
+ next_dev = lp->next;
+ unregister_netdev(pcnet32_dev);
+ release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ free_netdev(pcnet32_dev);
+ pcnet32_dev = next_dev;
+ }
+
+ if (pcnet32_have_pci)
+ pci_unregister_driver(&pcnet32_driver);
+}
+
+module_init(pcnet32_init_module);
+module_exit(pcnet32_cleanup_module);
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
new file mode 100644
index 000000000000..f4b62405d2e5
--- /dev/null
+++ b/drivers/net/plip.c
@@ -0,0 +1,1427 @@
+/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
+/* PLIP: A parallel port "network" driver for Linux. */
+/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
+/*
+ * Authors: Donald Becker <becker@scyld.com>
+ * Tommy Thorn <thorn@daimi.aau.dk>
+ * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * Peter Bauer <100136.3530@compuserve.com>
+ * Niibe Yutaka <gniibe@mri.co.jp>
+ * Nimrod Zimerman <zimerman@mailandnews.com>
+ *
+ * Enhancements:
+ * Modularization and ifreq/ifmap support by Alan Cox.
+ * Rewritten by Niibe Yutaka.
+ * parport-sharing awareness code by Philip Blundell.
+ * SMP locking by Niibe Yutaka.
+ * Support for parallel ports with no IRQ (poll mode),
+ * Modifications to use the parallel port API
+ * by Nimrod Zimerman.
+ *
+ * Fixes:
+ * Niibe Yutaka
+ * - Module initialization.
+ * - MTU fix.
+ * - Make sure other end is OK, before sending a packet.
+ * - Fix immediate timer problem.
+ *
+ * Al Viro
+ * - Changed {enable,disable}_irq handling to make it work
+ * with new ("stack") semantics.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
+ * inspired by Russ Nelson's parallel port packet driver.
+ *
+ * NOTE:
+ * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
+ * Because of the necessity to communicate to DOS machines with the
+ * Crynwr packet driver, Peter Bauer changed the protocol again
+ * back to original protocol.
+ *
+ * This version follows original PLIP protocol.
+ * So, this PLIP can't communicate the PLIP of Linux v1.0.
+ */
+
+/*
+ * To use with DOS box, please do (Turn on ARP switch):
+ * # ifconfig plip[0-2] arp
+ */
+static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
+
+/*
+ Sources:
+ Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
+ "parallel.asm" parallel port packet driver.
+
+ The "Crynwr" parallel port standard specifies the following protocol:
+ Trigger by sending nibble '0x8' (this causes interrupt on other end)
+ count-low octet
+ count-high octet
+ ... data octets
+ checksum octet
+ Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
+ <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
+
+ The packet is encapsulated as if it were ethernet.
+
+ The cable used is a de facto standard parallel null cable -- sold as
+ a "LapLink" cable by various places. You'll need a 12-conductor cable to
+ make one yourself. The wiring is:
+ SLCTIN 17 - 17
+ GROUND 25 - 25
+ D0->ERROR 2 - 15 15 - 2
+ D1->SLCT 3 - 13 13 - 3
+ D2->PAPOUT 4 - 12 12 - 4
+ D3->ACK 5 - 10 10 - 5
+ D4->BUSY 6 - 11 11 - 6
+ Do not connect the other pins. They are
+ D5,D6,D7 are 7,8,9
+ STROBE is 1, FEED is 14, INIT is 16
+ extra grounds are 18,19,20,21,22,23,24
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/lp.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_plip.h>
+#include <linux/workqueue.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/parport.h>
+#include <linux/bitops.h>
+
+#include <net/neighbour.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/semaphore.h>
+
+/* Maximum number of devices to support. */
+#define PLIP_MAX 8
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+#define ENABLE(irq) if (irq != -1) enable_irq(irq)
+#define DISABLE(irq) if (irq != -1) disable_irq(irq)
+
+/* In micro second */
+#define PLIP_DELAY_UNIT 1
+
+/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
+#define PLIP_TRIGGER_WAIT 500
+
+/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
+#define PLIP_NIBBLE_WAIT 3000
+
+/* Bottom halves */
+static void plip_kick_bh(struct net_device *dev);
+static void plip_bh(struct net_device *dev);
+static void plip_timer_bh(struct net_device *dev);
+
+/* Interrupt handler */
+static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/* Functions for DEV methods */
+static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
+static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+static int plip_hard_header_cache(struct neighbour *neigh,
+ struct hh_cache *hh);
+static int plip_open(struct net_device *dev);
+static int plip_close(struct net_device *dev);
+static struct net_device_stats *plip_get_stats(struct net_device *dev);
+static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int plip_preempt(void *handle);
+static void plip_wakeup(void *handle);
+
+enum plip_connection_state {
+ PLIP_CN_NONE=0,
+ PLIP_CN_RECEIVE,
+ PLIP_CN_SEND,
+ PLIP_CN_CLOSING,
+ PLIP_CN_ERROR
+};
+
+enum plip_packet_state {
+ PLIP_PK_DONE=0,
+ PLIP_PK_TRIGGER,
+ PLIP_PK_LENGTH_LSB,
+ PLIP_PK_LENGTH_MSB,
+ PLIP_PK_DATA,
+ PLIP_PK_CHECKSUM
+};
+
+enum plip_nibble_state {
+ PLIP_NB_BEGIN,
+ PLIP_NB_1,
+ PLIP_NB_2,
+};
+
+struct plip_local {
+ enum plip_packet_state state;
+ enum plip_nibble_state nibble;
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN)
+ unsigned char lsb;
+ unsigned char msb;
+#elif defined(__BIG_ENDIAN)
+ unsigned char msb;
+ unsigned char lsb;
+#else
+#error "Please fix the endianness defines in <asm/byteorder.h>"
+#endif
+ } b;
+ unsigned short h;
+ } length;
+ unsigned short byte;
+ unsigned char checksum;
+ unsigned char data;
+ struct sk_buff *skb;
+};
+
+struct net_local {
+ struct net_device_stats enet_stats;
+ struct work_struct immediate;
+ struct work_struct deferred;
+ struct work_struct timer;
+ struct plip_local snd_data;
+ struct plip_local rcv_data;
+ struct pardevice *pardev;
+ unsigned long trigger;
+ unsigned long nibble;
+ enum plip_connection_state connection;
+ unsigned short timeout_count;
+ int is_deferred;
+ int port_owner;
+ int should_relinquish;
+ int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+ int (*orig_hard_header_cache)(struct neighbour *neigh,
+ struct hh_cache *hh);
+ spinlock_t lock;
+ atomic_t kill_timer;
+ struct semaphore killed_timer_sem;
+};
+
+inline static void enable_parport_interrupts (struct net_device *dev)
+{
+ if (dev->irq != -1)
+ {
+ struct parport *port =
+ ((struct net_local *)dev->priv)->pardev->port;
+ port->ops->enable_irq (port);
+ }
+}
+
+inline static void disable_parport_interrupts (struct net_device *dev)
+{
+ if (dev->irq != -1)
+ {
+ struct parport *port =
+ ((struct net_local *)dev->priv)->pardev->port;
+ port->ops->disable_irq (port);
+ }
+}
+
+inline static void write_data (struct net_device *dev, unsigned char data)
+{
+ struct parport *port =
+ ((struct net_local *)dev->priv)->pardev->port;
+
+ port->ops->write_data (port, data);
+}
+
+inline static unsigned char read_status (struct net_device *dev)
+{
+ struct parport *port =
+ ((struct net_local *)dev->priv)->pardev->port;
+
+ return port->ops->read_status (port);
+}
+
+/* Entry point of PLIP driver.
+ Probe the hardware, and register/initialize the driver.
+
+ PLIP is rather weird, because of the way it interacts with the parport
+ system. It is _not_ initialised from Space.c. Instead, plip_init()
+ is called, and that function makes up a "struct net_device" for each port, and
+ then calls us here.
+
+ */
+static void
+plip_init_netdev(struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+
+ /* Then, override parts of it */
+ dev->hard_start_xmit = plip_tx_packet;
+ dev->open = plip_open;
+ dev->stop = plip_close;
+ dev->get_stats = plip_get_stats;
+ dev->do_ioctl = plip_ioctl;
+ dev->header_cache_update = NULL;
+ dev->tx_queue_len = 10;
+ dev->flags = IFF_POINTOPOINT|IFF_NOARP;
+ memset(dev->dev_addr, 0xfc, ETH_ALEN);
+
+ /* Set the private structure */
+ nl->orig_hard_header = dev->hard_header;
+ dev->hard_header = plip_hard_header;
+
+ nl->orig_hard_header_cache = dev->hard_header_cache;
+ dev->hard_header_cache = plip_hard_header_cache;
+
+
+ nl->port_owner = 0;
+
+ /* Initialize constants */
+ nl->trigger = PLIP_TRIGGER_WAIT;
+ nl->nibble = PLIP_NIBBLE_WAIT;
+
+ /* Initialize task queue structures */
+ INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
+ INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
+
+ if (dev->irq == -1)
+ INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
+
+ spin_lock_init(&nl->lock);
+}
+
+/* Bottom half handler for the delayed request.
+ This routine is kicked by do_timer().
+ Request `plip_bh' to be invoked. */
+static void
+plip_kick_bh(struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+
+ if (nl->is_deferred)
+ schedule_work(&nl->immediate);
+}
+
+/* Forward declarations of internal routines */
+static int plip_none(struct net_device *, struct net_local *,
+ struct plip_local *, struct plip_local *);
+static int plip_receive_packet(struct net_device *, struct net_local *,
+ struct plip_local *, struct plip_local *);
+static int plip_send_packet(struct net_device *, struct net_local *,
+ struct plip_local *, struct plip_local *);
+static int plip_connection_close(struct net_device *, struct net_local *,
+ struct plip_local *, struct plip_local *);
+static int plip_error(struct net_device *, struct net_local *,
+ struct plip_local *, struct plip_local *);
+static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd,
+ struct plip_local *rcv,
+ int error);
+
+#define OK 0
+#define TIMEOUT 1
+#define ERROR 2
+#define HS_TIMEOUT 3
+
+typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd, struct plip_local *rcv);
+
+static plip_func connection_state_table[] =
+{
+ plip_none,
+ plip_receive_packet,
+ plip_send_packet,
+ plip_connection_close,
+ plip_error
+};
+
+/* Bottom half handler of PLIP. */
+static void
+plip_bh(struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+ struct plip_local *snd = &nl->snd_data;
+ struct plip_local *rcv = &nl->rcv_data;
+ plip_func f;
+ int r;
+
+ nl->is_deferred = 0;
+ f = connection_state_table[nl->connection];
+ if ((r = (*f)(dev, nl, snd, rcv)) != OK
+ && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
+ nl->is_deferred = 1;
+ schedule_delayed_work(&nl->deferred, 1);
+ }
+}
+
+static void
+plip_timer_bh(struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+
+ if (!(atomic_read (&nl->kill_timer))) {
+ plip_interrupt (-1, dev, NULL);
+
+ schedule_delayed_work(&nl->timer, 1);
+ }
+ else {
+ up (&nl->killed_timer_sem);
+ }
+}
+
+static int
+plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd, struct plip_local *rcv,
+ int error)
+{
+ unsigned char c0;
+ /*
+ * This is tricky. If we got here from the beginning of send (either
+ * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
+ * already disabled. With the old variant of {enable,disable}_irq()
+ * extra disable_irq() was a no-op. Now it became mortal - it's
+ * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
+ * that is). So we have to treat HS_TIMEOUT and ERROR from send
+ * in a special way.
+ */
+
+ spin_lock_irq(&nl->lock);
+ if (nl->connection == PLIP_CN_SEND) {
+
+ if (error != ERROR) { /* Timeout */
+ nl->timeout_count++;
+ if ((error == HS_TIMEOUT
+ && nl->timeout_count <= 10)
+ || nl->timeout_count <= 3) {
+ spin_unlock_irq(&nl->lock);
+ /* Try again later */
+ return TIMEOUT;
+ }
+ c0 = read_status(dev);
+ printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
+ dev->name, snd->state, c0);
+ } else
+ error = HS_TIMEOUT;
+ nl->enet_stats.tx_errors++;
+ nl->enet_stats.tx_aborted_errors++;
+ } else if (nl->connection == PLIP_CN_RECEIVE) {
+ if (rcv->state == PLIP_PK_TRIGGER) {
+ /* Transmission was interrupted. */
+ spin_unlock_irq(&nl->lock);
+ return OK;
+ }
+ if (error != ERROR) { /* Timeout */
+ if (++nl->timeout_count <= 3) {
+ spin_unlock_irq(&nl->lock);
+ /* Try again later */
+ return TIMEOUT;
+ }
+ c0 = read_status(dev);
+ printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
+ dev->name, rcv->state, c0);
+ }
+ nl->enet_stats.rx_dropped++;
+ }
+ rcv->state = PLIP_PK_DONE;
+ if (rcv->skb) {
+ kfree_skb(rcv->skb);
+ rcv->skb = NULL;
+ }
+ snd->state = PLIP_PK_DONE;
+ if (snd->skb) {
+ dev_kfree_skb(snd->skb);
+ snd->skb = NULL;
+ }
+ spin_unlock_irq(&nl->lock);
+ if (error == HS_TIMEOUT) {
+ DISABLE(dev->irq);
+ synchronize_irq(dev->irq);
+ }
+ disable_parport_interrupts (dev);
+ netif_stop_queue (dev);
+ nl->connection = PLIP_CN_ERROR;
+ write_data (dev, 0x00);
+
+ return TIMEOUT;
+}
+
+static int
+plip_none(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd, struct plip_local *rcv)
+{
+ return OK;
+}
+
+/* PLIP_RECEIVE --- receive a byte(two nibbles)
+ Returns OK on success, TIMEOUT on timeout */
+inline static int
+plip_receive(unsigned short nibble_timeout, struct net_device *dev,
+ enum plip_nibble_state *ns_p, unsigned char *data_p)
+{
+ unsigned char c0, c1;
+ unsigned int cx;
+
+ switch (*ns_p) {
+ case PLIP_NB_BEGIN:
+ cx = nibble_timeout;
+ while (1) {
+ c0 = read_status(dev);
+ udelay(PLIP_DELAY_UNIT);
+ if ((c0 & 0x80) == 0) {
+ c1 = read_status(dev);
+ if (c0 == c1)
+ break;
+ }
+ if (--cx == 0)
+ return TIMEOUT;
+ }
+ *data_p = (c0 >> 3) & 0x0f;
+ write_data (dev, 0x10); /* send ACK */
+ *ns_p = PLIP_NB_1;
+
+ case PLIP_NB_1:
+ cx = nibble_timeout;
+ while (1) {
+ c0 = read_status(dev);
+ udelay(PLIP_DELAY_UNIT);
+ if (c0 & 0x80) {
+ c1 = read_status(dev);
+ if (c0 == c1)
+ break;
+ }
+ if (--cx == 0)
+ return TIMEOUT;
+ }
+ *data_p |= (c0 << 1) & 0xf0;
+ write_data (dev, 0x00); /* send ACK */
+ *ns_p = PLIP_NB_BEGIN;
+ case PLIP_NB_2:
+ break;
+ }
+ return OK;
+}
+
+/*
+ * Determine the packet's protocol ID. The rule here is that we
+ * assume 802.3 if the type field is short enough to be a length.
+ * This is normal practice and works for any 'now in use' protocol.
+ *
+ * PLIP is ethernet ish but the daddr might not be valid if unicast.
+ * PLIP fortunately has no bus architecture (its Point-to-point).
+ *
+ * We can't fix the daddr thing as that quirk (more bug) is embedded
+ * in far too many old systems not all even running Linux.
+ */
+
+static unsigned short plip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ethhdr *eth;
+ unsigned char *rawp;
+
+ skb->mac.raw=skb->data;
+ skb_pull(skb,dev->hard_header_len);
+ eth = eth_hdr(skb);
+
+ if(*eth->h_dest&1)
+ {
+ if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ skb->pkt_type=PACKET_BROADCAST;
+ else
+ skb->pkt_type=PACKET_MULTICAST;
+ }
+
+ /*
+ * This ALLMULTI check should be redundant by 1.4
+ * so don't forget to remove it.
+ */
+
+ if (ntohs(eth->h_proto) >= 1536)
+ return eth->h_proto;
+
+ rawp = skb->data;
+
+ /*
+ * This is a magic hack to spot IPX packets. Older Novell breaks
+ * the protocol design and runs IPX over 802.3 without an 802.2 LLC
+ * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
+ * won't work for fault tolerant netware but does for the rest.
+ */
+ if (*(unsigned short *)rawp == 0xFFFF)
+ return htons(ETH_P_802_3);
+
+ /*
+ * Real 802.2 LLC
+ */
+ return htons(ETH_P_802_2);
+}
+
+
+/* PLIP_RECEIVE_PACKET --- receive a packet */
+static int
+plip_receive_packet(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd, struct plip_local *rcv)
+{
+ unsigned short nibble_timeout = nl->nibble;
+ unsigned char *lbuf;
+
+ switch (rcv->state) {
+ case PLIP_PK_TRIGGER:
+ DISABLE(dev->irq);
+ /* Don't need to synchronize irq, as we can safely ignore it */
+ disable_parport_interrupts (dev);
+ write_data (dev, 0x01); /* send ACK */
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: receive start\n", dev->name);
+ rcv->state = PLIP_PK_LENGTH_LSB;
+ rcv->nibble = PLIP_NB_BEGIN;
+
+ case PLIP_PK_LENGTH_LSB:
+ if (snd->state != PLIP_PK_DONE) {
+ if (plip_receive(nl->trigger, dev,
+ &rcv->nibble, &rcv->length.b.lsb)) {
+ /* collision, here dev->tbusy == 1 */
+ rcv->state = PLIP_PK_DONE;
+ nl->is_deferred = 1;
+ nl->connection = PLIP_CN_SEND;
+ schedule_delayed_work(&nl->deferred, 1);
+ enable_parport_interrupts (dev);
+ ENABLE(dev->irq);
+ return OK;
+ }
+ } else {
+ if (plip_receive(nibble_timeout, dev,
+ &rcv->nibble, &rcv->length.b.lsb))
+ return TIMEOUT;
+ }
+ rcv->state = PLIP_PK_LENGTH_MSB;
+
+ case PLIP_PK_LENGTH_MSB:
+ if (plip_receive(nibble_timeout, dev,
+ &rcv->nibble, &rcv->length.b.msb))
+ return TIMEOUT;
+ if (rcv->length.h > dev->mtu + dev->hard_header_len
+ || rcv->length.h < 8) {
+ printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
+ return ERROR;
+ }
+ /* Malloc up new buffer. */
+ rcv->skb = dev_alloc_skb(rcv->length.h + 2);
+ if (rcv->skb == NULL) {
+ printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
+ return ERROR;
+ }
+ skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
+ skb_put(rcv->skb,rcv->length.h);
+ rcv->skb->dev = dev;
+ rcv->state = PLIP_PK_DATA;
+ rcv->byte = 0;
+ rcv->checksum = 0;
+
+ case PLIP_PK_DATA:
+ lbuf = rcv->skb->data;
+ do
+ if (plip_receive(nibble_timeout, dev,
+ &rcv->nibble, &lbuf[rcv->byte]))
+ return TIMEOUT;
+ while (++rcv->byte < rcv->length.h);
+ do
+ rcv->checksum += lbuf[--rcv->byte];
+ while (rcv->byte);
+ rcv->state = PLIP_PK_CHECKSUM;
+
+ case PLIP_PK_CHECKSUM:
+ if (plip_receive(nibble_timeout, dev,
+ &rcv->nibble, &rcv->data))
+ return TIMEOUT;
+ if (rcv->data != rcv->checksum) {
+ nl->enet_stats.rx_crc_errors++;
+ if (net_debug)
+ printk(KERN_DEBUG "%s: checksum error\n", dev->name);
+ return ERROR;
+ }
+ rcv->state = PLIP_PK_DONE;
+
+ case PLIP_PK_DONE:
+ /* Inform the upper layer for the arrival of a packet. */
+ rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
+ netif_rx(rcv->skb);
+ dev->last_rx = jiffies;
+ nl->enet_stats.rx_bytes += rcv->length.h;
+ nl->enet_stats.rx_packets++;
+ rcv->skb = NULL;
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: receive end\n", dev->name);
+
+ /* Close the connection. */
+ write_data (dev, 0x00);
+ spin_lock_irq(&nl->lock);
+ if (snd->state != PLIP_PK_DONE) {
+ nl->connection = PLIP_CN_SEND;
+ spin_unlock_irq(&nl->lock);
+ schedule_work(&nl->immediate);
+ enable_parport_interrupts (dev);
+ ENABLE(dev->irq);
+ return OK;
+ } else {
+ nl->connection = PLIP_CN_NONE;
+ spin_unlock_irq(&nl->lock);
+ enable_parport_interrupts (dev);
+ ENABLE(dev->irq);
+ return OK;
+ }
+ }
+ return OK;
+}
+
+/* PLIP_SEND --- send a byte (two nibbles)
+ Returns OK on success, TIMEOUT when timeout */
+inline static int
+plip_send(unsigned short nibble_timeout, struct net_device *dev,
+ enum plip_nibble_state *ns_p, unsigned char data)
+{
+ unsigned char c0;
+ unsigned int cx;
+
+ switch (*ns_p) {
+ case PLIP_NB_BEGIN:
+ write_data (dev, data & 0x0f);
+ *ns_p = PLIP_NB_1;
+
+ case PLIP_NB_1:
+ write_data (dev, 0x10 | (data & 0x0f));
+ cx = nibble_timeout;
+ while (1) {
+ c0 = read_status(dev);
+ if ((c0 & 0x80) == 0)
+ break;
+ if (--cx == 0)
+ return TIMEOUT;
+ udelay(PLIP_DELAY_UNIT);
+ }
+ write_data (dev, 0x10 | (data >> 4));
+ *ns_p = PLIP_NB_2;
+
+ case PLIP_NB_2:
+ write_data (dev, (data >> 4));
+ cx = nibble_timeout;
+ while (1) {
+ c0 = read_status(dev);
+ if (c0 & 0x80)
+ break;
+ if (--cx == 0)
+ return TIMEOUT;
+ udelay(PLIP_DELAY_UNIT);
+ }
+ *ns_p = PLIP_NB_BEGIN;
+ return OK;
+ }
+ return OK;
+}
+
+/* PLIP_SEND_PACKET --- send a packet */
+static int
+plip_send_packet(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd, struct plip_local *rcv)
+{
+ unsigned short nibble_timeout = nl->nibble;
+ unsigned char *lbuf;
+ unsigned char c0;
+ unsigned int cx;
+
+ if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
+ printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
+ snd->state = PLIP_PK_DONE;
+ snd->skb = NULL;
+ return ERROR;
+ }
+
+ switch (snd->state) {
+ case PLIP_PK_TRIGGER:
+ if ((read_status(dev) & 0xf8) != 0x80)
+ return HS_TIMEOUT;
+
+ /* Trigger remote rx interrupt. */
+ write_data (dev, 0x08);
+ cx = nl->trigger;
+ while (1) {
+ udelay(PLIP_DELAY_UNIT);
+ spin_lock_irq(&nl->lock);
+ if (nl->connection == PLIP_CN_RECEIVE) {
+ spin_unlock_irq(&nl->lock);
+ /* Interrupted. */
+ nl->enet_stats.collisions++;
+ return OK;
+ }
+ c0 = read_status(dev);
+ if (c0 & 0x08) {
+ spin_unlock_irq(&nl->lock);
+ DISABLE(dev->irq);
+ synchronize_irq(dev->irq);
+ if (nl->connection == PLIP_CN_RECEIVE) {
+ /* Interrupted.
+ We don't need to enable irq,
+ as it is soon disabled. */
+ /* Yes, we do. New variant of
+ {enable,disable}_irq *counts*
+ them. -- AV */
+ ENABLE(dev->irq);
+ nl->enet_stats.collisions++;
+ return OK;
+ }
+ disable_parport_interrupts (dev);
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: send start\n", dev->name);
+ snd->state = PLIP_PK_LENGTH_LSB;
+ snd->nibble = PLIP_NB_BEGIN;
+ nl->timeout_count = 0;
+ break;
+ }
+ spin_unlock_irq(&nl->lock);
+ if (--cx == 0) {
+ write_data (dev, 0x00);
+ return HS_TIMEOUT;
+ }
+ }
+
+ case PLIP_PK_LENGTH_LSB:
+ if (plip_send(nibble_timeout, dev,
+ &snd->nibble, snd->length.b.lsb))
+ return TIMEOUT;
+ snd->state = PLIP_PK_LENGTH_MSB;
+
+ case PLIP_PK_LENGTH_MSB:
+ if (plip_send(nibble_timeout, dev,
+ &snd->nibble, snd->length.b.msb))
+ return TIMEOUT;
+ snd->state = PLIP_PK_DATA;
+ snd->byte = 0;
+ snd->checksum = 0;
+
+ case PLIP_PK_DATA:
+ do
+ if (plip_send(nibble_timeout, dev,
+ &snd->nibble, lbuf[snd->byte]))
+ return TIMEOUT;
+ while (++snd->byte < snd->length.h);
+ do
+ snd->checksum += lbuf[--snd->byte];
+ while (snd->byte);
+ snd->state = PLIP_PK_CHECKSUM;
+
+ case PLIP_PK_CHECKSUM:
+ if (plip_send(nibble_timeout, dev,
+ &snd->nibble, snd->checksum))
+ return TIMEOUT;
+
+ nl->enet_stats.tx_bytes += snd->skb->len;
+ dev_kfree_skb(snd->skb);
+ nl->enet_stats.tx_packets++;
+ snd->state = PLIP_PK_DONE;
+
+ case PLIP_PK_DONE:
+ /* Close the connection */
+ write_data (dev, 0x00);
+ snd->skb = NULL;
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: send end\n", dev->name);
+ nl->connection = PLIP_CN_CLOSING;
+ nl->is_deferred = 1;
+ schedule_delayed_work(&nl->deferred, 1);
+ enable_parport_interrupts (dev);
+ ENABLE(dev->irq);
+ return OK;
+ }
+ return OK;
+}
+
+static int
+plip_connection_close(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd, struct plip_local *rcv)
+{
+ spin_lock_irq(&nl->lock);
+ if (nl->connection == PLIP_CN_CLOSING) {
+ nl->connection = PLIP_CN_NONE;
+ netif_wake_queue (dev);
+ }
+ spin_unlock_irq(&nl->lock);
+ if (nl->should_relinquish) {
+ nl->should_relinquish = nl->port_owner = 0;
+ parport_release(nl->pardev);
+ }
+ return OK;
+}
+
+/* PLIP_ERROR --- wait till other end settled */
+static int
+plip_error(struct net_device *dev, struct net_local *nl,
+ struct plip_local *snd, struct plip_local *rcv)
+{
+ unsigned char status;
+
+ status = read_status(dev);
+ if ((status & 0xf8) == 0x80) {
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
+ nl->connection = PLIP_CN_NONE;
+ nl->should_relinquish = 0;
+ netif_start_queue (dev);
+ enable_parport_interrupts (dev);
+ ENABLE(dev->irq);
+ netif_wake_queue (dev);
+ } else {
+ nl->is_deferred = 1;
+ schedule_delayed_work(&nl->deferred, 1);
+ }
+
+ return OK;
+}
+
+/* Handle the parallel port interrupts. */
+static void
+plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *nl;
+ struct plip_local *rcv;
+ unsigned char c0;
+
+ if (dev == NULL) {
+ printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ nl = netdev_priv(dev);
+ rcv = &nl->rcv_data;
+
+ spin_lock_irq (&nl->lock);
+
+ c0 = read_status(dev);
+ if ((c0 & 0xf8) != 0xc0) {
+ if ((dev->irq != -1) && (net_debug > 1))
+ printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
+ spin_unlock_irq (&nl->lock);
+ return;
+ }
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
+
+ switch (nl->connection) {
+ case PLIP_CN_CLOSING:
+ netif_wake_queue (dev);
+ case PLIP_CN_NONE:
+ case PLIP_CN_SEND:
+ rcv->state = PLIP_PK_TRIGGER;
+ nl->connection = PLIP_CN_RECEIVE;
+ nl->timeout_count = 0;
+ schedule_work(&nl->immediate);
+ break;
+
+ case PLIP_CN_RECEIVE:
+ /* May occur because there is race condition
+ around test and set of dev->interrupt.
+ Ignore this interrupt. */
+ break;
+
+ case PLIP_CN_ERROR:
+ printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
+ break;
+ }
+
+ spin_unlock_irq(&nl->lock);
+}
+
+static int
+plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+ struct plip_local *snd = &nl->snd_data;
+
+ if (netif_queue_stopped(dev))
+ return 1;
+
+ /* We may need to grab the bus */
+ if (!nl->port_owner) {
+ if (parport_claim(nl->pardev))
+ return 1;
+ nl->port_owner = 1;
+ }
+
+ netif_stop_queue (dev);
+
+ if (skb->len > dev->mtu + dev->hard_header_len) {
+ printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
+ netif_start_queue (dev);
+ return 1;
+ }
+
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: send request\n", dev->name);
+
+ spin_lock_irq(&nl->lock);
+ dev->trans_start = jiffies;
+ snd->skb = skb;
+ snd->length.h = skb->len;
+ snd->state = PLIP_PK_TRIGGER;
+ if (nl->connection == PLIP_CN_NONE) {
+ nl->connection = PLIP_CN_SEND;
+ nl->timeout_count = 0;
+ }
+ schedule_work(&nl->immediate);
+ spin_unlock_irq(&nl->lock);
+
+ return 0;
+}
+
+static void
+plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
+{
+ struct in_device *in_dev;
+
+ if ((in_dev=dev->ip_ptr) != NULL) {
+ /* Any address will do - we take the first */
+ struct in_ifaddr *ifa=in_dev->ifa_list;
+ if (ifa != NULL) {
+ memcpy(eth->h_source, dev->dev_addr, 6);
+ memset(eth->h_dest, 0xfc, 2);
+ memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
+ }
+ }
+}
+
+static int
+plip_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len)
+{
+ struct net_local *nl = netdev_priv(dev);
+ int ret;
+
+ if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
+ plip_rewrite_address (dev, (struct ethhdr *)skb->data);
+
+ return ret;
+}
+
+int plip_hard_header_cache(struct neighbour *neigh,
+ struct hh_cache *hh)
+{
+ struct net_local *nl = neigh->dev->priv;
+ int ret;
+
+ if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
+ {
+ struct ethhdr *eth;
+
+ eth = (struct ethhdr*)(((u8*)hh->hh_data) +
+ HH_DATA_OFF(sizeof(*eth)));
+ plip_rewrite_address (neigh->dev, eth);
+ }
+
+ return ret;
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine gets exclusive access to the parallel port by allocating
+ its IRQ line.
+ */
+static int
+plip_open(struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+ struct in_device *in_dev;
+
+ /* Grab the port */
+ if (!nl->port_owner) {
+ if (parport_claim(nl->pardev)) return -EAGAIN;
+ nl->port_owner = 1;
+ }
+
+ nl->should_relinquish = 0;
+
+ /* Clear the data port. */
+ write_data (dev, 0x00);
+
+ /* Enable rx interrupt. */
+ enable_parport_interrupts (dev);
+ if (dev->irq == -1)
+ {
+ atomic_set (&nl->kill_timer, 0);
+ schedule_delayed_work(&nl->timer, 1);
+ }
+
+ /* Initialize the state machine. */
+ nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
+ nl->rcv_data.skb = nl->snd_data.skb = NULL;
+ nl->connection = PLIP_CN_NONE;
+ nl->is_deferred = 0;
+
+ /* Fill in the MAC-level header.
+ We used to abuse dev->broadcast to store the point-to-point
+ MAC address, but we no longer do it. Instead, we fetch the
+ interface address whenever it is needed, which is cheap enough
+ because we use the hh_cache. Actually, abusing dev->broadcast
+ didn't work, because when using plip_open the point-to-point
+ address isn't yet known.
+ PLIP doesn't have a real MAC address, but we need it to be
+ DOS compatible, and to properly support taps (otherwise,
+ when the device address isn't identical to the address of a
+ received frame, the kernel incorrectly drops it). */
+
+ if ((in_dev=dev->ip_ptr) != NULL) {
+ /* Any address will do - we take the first. We already
+ have the first two bytes filled with 0xfc, from
+ plip_init_dev(). */
+ struct in_ifaddr *ifa=in_dev->ifa_list;
+ if (ifa != NULL) {
+ memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
+ }
+ }
+
+ netif_start_queue (dev);
+
+ return 0;
+}
+
+/* The inverse routine to plip_open (). */
+static int
+plip_close(struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+ struct plip_local *snd = &nl->snd_data;
+ struct plip_local *rcv = &nl->rcv_data;
+
+ netif_stop_queue (dev);
+ DISABLE(dev->irq);
+ synchronize_irq(dev->irq);
+
+ if (dev->irq == -1)
+ {
+ init_MUTEX_LOCKED (&nl->killed_timer_sem);
+ atomic_set (&nl->kill_timer, 1);
+ down (&nl->killed_timer_sem);
+ }
+
+#ifdef NOTDEF
+ outb(0x00, PAR_DATA(dev));
+#endif
+ nl->is_deferred = 0;
+ nl->connection = PLIP_CN_NONE;
+ if (nl->port_owner) {
+ parport_release(nl->pardev);
+ nl->port_owner = 0;
+ }
+
+ snd->state = PLIP_PK_DONE;
+ if (snd->skb) {
+ dev_kfree_skb(snd->skb);
+ snd->skb = NULL;
+ }
+ rcv->state = PLIP_PK_DONE;
+ if (rcv->skb) {
+ kfree_skb(rcv->skb);
+ rcv->skb = NULL;
+ }
+
+#ifdef NOTDEF
+ /* Reset. */
+ outb(0x00, PAR_CONTROL(dev));
+#endif
+ return 0;
+}
+
+static int
+plip_preempt(void *handle)
+{
+ struct net_device *dev = (struct net_device *)handle;
+ struct net_local *nl = netdev_priv(dev);
+
+ /* Stand our ground if a datagram is on the wire */
+ if (nl->connection != PLIP_CN_NONE) {
+ nl->should_relinquish = 1;
+ return 1;
+ }
+
+ nl->port_owner = 0; /* Remember that we released the bus */
+ return 0;
+}
+
+static void
+plip_wakeup(void *handle)
+{
+ struct net_device *dev = (struct net_device *)handle;
+ struct net_local *nl = netdev_priv(dev);
+
+ if (nl->port_owner) {
+ /* Why are we being woken up? */
+ printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
+ if (!parport_claim(nl->pardev))
+ /* bus_owner is already set (but why?) */
+ printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
+ else
+ return;
+ }
+
+ if (!(dev->flags & IFF_UP))
+ /* Don't need the port when the interface is down */
+ return;
+
+ if (!parport_claim(nl->pardev)) {
+ nl->port_owner = 1;
+ /* Clear the data port. */
+ write_data (dev, 0x00);
+ }
+
+ return;
+}
+
+static struct net_device_stats *
+plip_get_stats(struct net_device *dev)
+{
+ struct net_local *nl = netdev_priv(dev);
+ struct net_device_stats *r = &nl->enet_stats;
+
+ return r;
+}
+
+static int
+plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct net_local *nl = netdev_priv(dev);
+ struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
+
+ if (cmd != SIOCDEVPLIP)
+ return -EOPNOTSUPP;
+
+ switch(pc->pcmd) {
+ case PLIP_GET_TIMEOUT:
+ pc->trigger = nl->trigger;
+ pc->nibble = nl->nibble;
+ break;
+ case PLIP_SET_TIMEOUT:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ nl->trigger = pc->trigger;
+ nl->nibble = pc->nibble;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
+static int timid;
+
+module_param_array(parport, int, NULL, 0);
+module_param(timid, int, 0);
+MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
+
+static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
+
+static inline int
+plip_searchfor(int list[], int a)
+{
+ int i;
+ for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
+ if (list[i] == a) return 1;
+ }
+ return 0;
+}
+
+/* plip_attach() is called (by the parport code) when a port is
+ * available to use. */
+static void plip_attach (struct parport *port)
+{
+ static int unit;
+ struct net_device *dev;
+ struct net_local *nl;
+ char name[IFNAMSIZ];
+
+ if ((parport[0] == -1 && (!timid || !port->devices)) ||
+ plip_searchfor(parport, port->number)) {
+ if (unit == PLIP_MAX) {
+ printk(KERN_ERR "plip: too many devices\n");
+ return;
+ }
+
+ sprintf(name, "plip%d", unit);
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev) {
+ printk(KERN_ERR "plip: memory squeeze\n");
+ return;
+ }
+
+ strcpy(dev->name, name);
+
+ SET_MODULE_OWNER(dev);
+ dev->irq = port->irq;
+ dev->base_addr = port->base;
+ if (port->irq == -1) {
+ printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
+ "which is fairly inefficient!\n", port->name);
+ }
+
+ nl = netdev_priv(dev);
+ nl->pardev = parport_register_device(port, name, plip_preempt,
+ plip_wakeup, plip_interrupt,
+ 0, dev);
+
+ if (!nl->pardev) {
+ printk(KERN_ERR "%s: parport_register failed\n", name);
+ goto err_free_dev;
+ return;
+ }
+
+ plip_init_netdev(dev);
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "%s: network register failed\n", name);
+ goto err_parport_unregister;
+ }
+
+ printk(KERN_INFO "%s", version);
+ if (dev->irq != -1)
+ printk(KERN_INFO "%s: Parallel port at %#3lx, "
+ "using IRQ %d.\n",
+ dev->name, dev->base_addr, dev->irq);
+ else
+ printk(KERN_INFO "%s: Parallel port at %#3lx, "
+ "not using IRQ.\n",
+ dev->name, dev->base_addr);
+ dev_plip[unit++] = dev;
+ }
+ return;
+
+err_parport_unregister:
+ parport_unregister_device(nl->pardev);
+err_free_dev:
+ free_netdev(dev);
+ return;
+}
+
+/* plip_detach() is called (by the parport code) when a port is
+ * no longer available to use. */
+static void plip_detach (struct parport *port)
+{
+ /* Nothing to do */
+}
+
+static struct parport_driver plip_driver = {
+ .name = "plip",
+ .attach = plip_attach,
+ .detach = plip_detach
+};
+
+static void __exit plip_cleanup_module (void)
+{
+ struct net_device *dev;
+ int i;
+
+ parport_unregister_driver (&plip_driver);
+
+ for (i=0; i < PLIP_MAX; i++) {
+ if ((dev = dev_plip[i])) {
+ struct net_local *nl = netdev_priv(dev);
+ unregister_netdev(dev);
+ if (nl->port_owner)
+ parport_release(nl->pardev);
+ parport_unregister_device(nl->pardev);
+ free_netdev(dev);
+ dev_plip[i] = NULL;
+ }
+ }
+}
+
+#ifndef MODULE
+
+static int parport_ptr;
+
+static int __init plip_setup(char *str)
+{
+ int ints[4];
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+
+ /* Ugh. */
+ if (!strncmp(str, "parport", 7)) {
+ int n = simple_strtoul(str+7, NULL, 10);
+ if (parport_ptr < PLIP_MAX)
+ parport[parport_ptr++] = n;
+ else
+ printk(KERN_INFO "plip: too many ports, %s ignored.\n",
+ str);
+ } else if (!strcmp(str, "timid")) {
+ timid = 1;
+ } else {
+ if (ints[0] == 0 || ints[1] == 0) {
+ /* disable driver on "plip=" or "plip=0" */
+ parport[0] = -2;
+ } else {
+ printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
+ ints[1]);
+ }
+ }
+ return 1;
+}
+
+__setup("plip=", plip_setup);
+
+#endif /* !MODULE */
+
+static int __init plip_init (void)
+{
+ if (parport[0] == -2)
+ return 0;
+
+ if (parport[0] != -1 && timid) {
+ printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
+ timid = 0;
+ }
+
+ if (parport_register_driver (&plip_driver)) {
+ printk (KERN_WARNING "plip: couldn't register driver\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+module_init(plip_init);
+module_exit(plip_cleanup_module);
+MODULE_LICENSE("GPL");
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
+ * End:
+ */
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
new file mode 100644
index 000000000000..33b9d79b1aad
--- /dev/null
+++ b/drivers/net/ppp_async.c
@@ -0,0 +1,1033 @@
+/*
+ * PPP async serial channel driver for Linux.
+ *
+ * Copyright 1999 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This driver provides the encapsulation and framing for sending
+ * and receiving PPP frames over async serial lines. It relies on
+ * the generic PPP layer to give it frames to send and to process
+ * received frames. It implements the PPP line discipline.
+ *
+ * Part of the code in this driver was inspired by the old async-only
+ * PPP driver, written by Michael Callahan and Al Longyear, and
+ * subsequently hacked by Paul Mackerras.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/crc-ccitt.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/ppp_channel.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+
+#define PPP_VERSION "2.4.2"
+
+#define OBUFSIZE 256
+
+/* Structure for storing local state. */
+struct asyncppp {
+ struct tty_struct *tty;
+ unsigned int flags;
+ unsigned int state;
+ unsigned int rbits;
+ int mru;
+ spinlock_t xmit_lock;
+ spinlock_t recv_lock;
+ unsigned long xmit_flags;
+ u32 xaccm[8];
+ u32 raccm;
+ unsigned int bytes_sent;
+ unsigned int bytes_rcvd;
+
+ struct sk_buff *tpkt;
+ int tpkt_pos;
+ u16 tfcs;
+ unsigned char *optr;
+ unsigned char *olim;
+ unsigned long last_xmit;
+
+ struct sk_buff *rpkt;
+ int lcp_fcs;
+ struct sk_buff_head rqueue;
+
+ struct tasklet_struct tsk;
+
+ atomic_t refcnt;
+ struct semaphore dead_sem;
+ struct ppp_channel chan; /* interface to generic ppp layer */
+ unsigned char obuf[OBUFSIZE];
+};
+
+/* Bit numbers in xmit_flags */
+#define XMIT_WAKEUP 0
+#define XMIT_FULL 1
+#define XMIT_BUSY 2
+
+/* State bits */
+#define SC_TOSS 1
+#define SC_ESCAPE 2
+#define SC_PREV_ERROR 4
+
+/* Bits in rbits */
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+static int flag_time = HZ;
+module_param(flag_time, int, 0);
+MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_PPP);
+
+/*
+ * Prototypes.
+ */
+static int ppp_async_encode(struct asyncppp *ap);
+static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
+static int ppp_async_push(struct asyncppp *ap);
+static void ppp_async_flush_output(struct asyncppp *ap);
+static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
+ char *flags, int count);
+static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
+ unsigned long arg);
+static void ppp_async_process(unsigned long arg);
+
+static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
+ int len, int inbound);
+
+static struct ppp_channel_ops async_ops = {
+ ppp_async_send,
+ ppp_async_ioctl
+};
+
+/*
+ * Routines implementing the PPP line discipline.
+ */
+
+/*
+ * We have a potential race on dereferencing tty->disc_data,
+ * because the tty layer provides no locking at all - thus one
+ * cpu could be running ppp_asynctty_receive while another
+ * calls ppp_asynctty_close, which zeroes tty->disc_data and
+ * frees the memory that ppp_asynctty_receive is using. The best
+ * way to fix this is to use a rwlock in the tty struct, but for now
+ * we use a single global rwlock for all ttys in ppp line discipline.
+ *
+ * FIXME: this is no longer true. The _close path for the ldisc is
+ * now guaranteed to be sane.
+ */
+static DEFINE_RWLOCK(disc_data_lock);
+
+static struct asyncppp *ap_get(struct tty_struct *tty)
+{
+ struct asyncppp *ap;
+
+ read_lock(&disc_data_lock);
+ ap = tty->disc_data;
+ if (ap != NULL)
+ atomic_inc(&ap->refcnt);
+ read_unlock(&disc_data_lock);
+ return ap;
+}
+
+static void ap_put(struct asyncppp *ap)
+{
+ if (atomic_dec_and_test(&ap->refcnt))
+ up(&ap->dead_sem);
+}
+
+/*
+ * Called when a tty is put into PPP line discipline. Called in process
+ * context.
+ */
+static int
+ppp_asynctty_open(struct tty_struct *tty)
+{
+ struct asyncppp *ap;
+ int err;
+
+ err = -ENOMEM;
+ ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ if (ap == 0)
+ goto out;
+
+ /* initialize the asyncppp structure */
+ memset(ap, 0, sizeof(*ap));
+ ap->tty = tty;
+ ap->mru = PPP_MRU;
+ spin_lock_init(&ap->xmit_lock);
+ spin_lock_init(&ap->recv_lock);
+ ap->xaccm[0] = ~0U;
+ ap->xaccm[3] = 0x60000000U;
+ ap->raccm = ~0U;
+ ap->optr = ap->obuf;
+ ap->olim = ap->obuf;
+ ap->lcp_fcs = -1;
+
+ skb_queue_head_init(&ap->rqueue);
+ tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
+
+ atomic_set(&ap->refcnt, 1);
+ init_MUTEX_LOCKED(&ap->dead_sem);
+
+ ap->chan.private = ap;
+ ap->chan.ops = &async_ops;
+ ap->chan.mtu = PPP_MRU;
+ err = ppp_register_channel(&ap->chan);
+ if (err)
+ goto out_free;
+
+ tty->disc_data = ap;
+
+ return 0;
+
+ out_free:
+ kfree(ap);
+ out:
+ return err;
+}
+
+/*
+ * Called when the tty is put into another line discipline
+ * or it hangs up. We have to wait for any cpu currently
+ * executing in any of the other ppp_asynctty_* routines to
+ * finish before we can call ppp_unregister_channel and free
+ * the asyncppp struct. This routine must be called from
+ * process context, not interrupt or softirq context.
+ */
+static void
+ppp_asynctty_close(struct tty_struct *tty)
+{
+ struct asyncppp *ap;
+
+ write_lock_irq(&disc_data_lock);
+ ap = tty->disc_data;
+ tty->disc_data = NULL;
+ write_unlock_irq(&disc_data_lock);
+ if (ap == 0)
+ return;
+
+ /*
+ * We have now ensured that nobody can start using ap from now
+ * on, but we have to wait for all existing users to finish.
+ * Note that ppp_unregister_channel ensures that no calls to
+ * our channel ops (i.e. ppp_async_send/ioctl) are in progress
+ * by the time it returns.
+ */
+ if (!atomic_dec_and_test(&ap->refcnt))
+ down(&ap->dead_sem);
+ tasklet_kill(&ap->tsk);
+
+ ppp_unregister_channel(&ap->chan);
+ if (ap->rpkt != 0)
+ kfree_skb(ap->rpkt);
+ skb_queue_purge(&ap->rqueue);
+ if (ap->tpkt != 0)
+ kfree_skb(ap->tpkt);
+ kfree(ap);
+}
+
+/*
+ * Called on tty hangup in process context.
+ *
+ * Wait for I/O to driver to complete and unregister PPP channel.
+ * This is already done by the close routine, so just call that.
+ */
+static int ppp_asynctty_hangup(struct tty_struct *tty)
+{
+ ppp_asynctty_close(tty);
+ return 0;
+}
+
+/*
+ * Read does nothing - no data is ever available this way.
+ * Pppd reads and writes packets via /dev/ppp instead.
+ */
+static ssize_t
+ppp_asynctty_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t count)
+{
+ return -EAGAIN;
+}
+
+/*
+ * Write on the tty does nothing, the packets all come in
+ * from the ppp generic stuff.
+ */
+static ssize_t
+ppp_asynctty_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t count)
+{
+ return -EAGAIN;
+}
+
+/*
+ * Called in process context only. May be re-entered by multiple
+ * ioctl calling threads.
+ */
+
+static int
+ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct asyncppp *ap = ap_get(tty);
+ int err, val;
+ int __user *p = (int __user *)arg;
+
+ if (ap == 0)
+ return -ENXIO;
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGCHAN:
+ err = -ENXIO;
+ if (ap == 0)
+ break;
+ err = -EFAULT;
+ if (put_user(ppp_channel_index(&ap->chan), p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGUNIT:
+ err = -ENXIO;
+ if (ap == 0)
+ break;
+ err = -EFAULT;
+ if (put_user(ppp_unit_number(&ap->chan), p))
+ break;
+ err = 0;
+ break;
+
+ case TCGETS:
+ case TCGETA:
+ err = n_tty_ioctl(tty, file, cmd, arg);
+ break;
+
+ case TCFLSH:
+ /* flush our buffers and the serial port's buffer */
+ if (arg == TCIOFLUSH || arg == TCOFLUSH)
+ ppp_async_flush_output(ap);
+ err = n_tty_ioctl(tty, file, cmd, arg);
+ break;
+
+ case FIONREAD:
+ val = 0;
+ if (put_user(val, p))
+ break;
+ err = 0;
+ break;
+
+ default:
+ err = -ENOIOCTLCMD;
+ }
+
+ ap_put(ap);
+ return err;
+}
+
+/* No kernel lock - fine */
+static unsigned int
+ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
+{
+ return 0;
+}
+
+static int
+ppp_asynctty_room(struct tty_struct *tty)
+{
+ return 65535;
+}
+
+/*
+ * This can now be called from hard interrupt level as well
+ * as soft interrupt level or mainline.
+ */
+static void
+ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
+ char *cflags, int count)
+{
+ struct asyncppp *ap = ap_get(tty);
+ unsigned long flags;
+
+ if (ap == 0)
+ return;
+ spin_lock_irqsave(&ap->recv_lock, flags);
+ ppp_async_input(ap, buf, cflags, count);
+ spin_unlock_irqrestore(&ap->recv_lock, flags);
+ if (skb_queue_len(&ap->rqueue))
+ tasklet_schedule(&ap->tsk);
+ ap_put(ap);
+ if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
+ && tty->driver->unthrottle)
+ tty->driver->unthrottle(tty);
+}
+
+static void
+ppp_asynctty_wakeup(struct tty_struct *tty)
+{
+ struct asyncppp *ap = ap_get(tty);
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (ap == 0)
+ return;
+ set_bit(XMIT_WAKEUP, &ap->xmit_flags);
+ tasklet_schedule(&ap->tsk);
+ ap_put(ap);
+}
+
+
+static struct tty_ldisc ppp_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "ppp",
+ .open = ppp_asynctty_open,
+ .close = ppp_asynctty_close,
+ .hangup = ppp_asynctty_hangup,
+ .read = ppp_asynctty_read,
+ .write = ppp_asynctty_write,
+ .ioctl = ppp_asynctty_ioctl,
+ .poll = ppp_asynctty_poll,
+ .receive_room = ppp_asynctty_room,
+ .receive_buf = ppp_asynctty_receive,
+ .write_wakeup = ppp_asynctty_wakeup,
+};
+
+static int __init
+ppp_async_init(void)
+{
+ int err;
+
+ err = tty_register_ldisc(N_PPP, &ppp_ldisc);
+ if (err != 0)
+ printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
+ err);
+ return err;
+}
+
+/*
+ * The following routines provide the PPP channel interface.
+ */
+static int
+ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+{
+ struct asyncppp *ap = chan->private;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int err, val;
+ u32 accm[8];
+
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGFLAGS:
+ val = ap->flags | ap->rbits;
+ if (put_user(val, p))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSFLAGS:
+ if (get_user(val, p))
+ break;
+ ap->flags = val & ~SC_RCV_BITS;
+ spin_lock_irq(&ap->recv_lock);
+ ap->rbits = val & SC_RCV_BITS;
+ spin_unlock_irq(&ap->recv_lock);
+ err = 0;
+ break;
+
+ case PPPIOCGASYNCMAP:
+ if (put_user(ap->xaccm[0], (u32 __user *)argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSASYNCMAP:
+ if (get_user(ap->xaccm[0], (u32 __user *)argp))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGRASYNCMAP:
+ if (put_user(ap->raccm, (u32 __user *)argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSRASYNCMAP:
+ if (get_user(ap->raccm, (u32 __user *)argp))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGXASYNCMAP:
+ if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSXASYNCMAP:
+ if (copy_from_user(accm, argp, sizeof(accm)))
+ break;
+ accm[2] &= ~0x40000000U; /* can't escape 0x5e */
+ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
+ memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ if (put_user(ap->mru, p))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSMRU:
+ if (get_user(val, p))
+ break;
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+ err = 0;
+ break;
+
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+
+/*
+ * This is called at softirq level to deliver received packets
+ * to the ppp_generic code, and to tell the ppp_generic code
+ * if we can accept more output now.
+ */
+static void ppp_async_process(unsigned long arg)
+{
+ struct asyncppp *ap = (struct asyncppp *) arg;
+ struct sk_buff *skb;
+
+ /* process received packets */
+ while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
+ if (skb->cb[0])
+ ppp_input_error(&ap->chan, 0);
+ ppp_input(&ap->chan, skb);
+ }
+
+ /* try to push more stuff out */
+ if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
+ ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Procedures for encapsulation and framing.
+ */
+
+/*
+ * Procedure to encode the data for async serial transmission.
+ * Does octet stuffing (escaping), puts the address/control bytes
+ * on if A/C compression is disabled, and does protocol compression.
+ * Assumes ap->tpkt != 0 on entry.
+ * Returns 1 if we finished the current frame, 0 otherwise.
+ */
+
+#define PUT_BYTE(ap, buf, c, islcp) do { \
+ if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
+ *buf++ = PPP_ESCAPE; \
+ *buf++ = c ^ 0x20; \
+ } else \
+ *buf++ = c; \
+} while (0)
+
+static int
+ppp_async_encode(struct asyncppp *ap)
+{
+ int fcs, i, count, c, proto;
+ unsigned char *buf, *buflim;
+ unsigned char *data;
+ int islcp;
+
+ buf = ap->obuf;
+ ap->olim = buf;
+ ap->optr = buf;
+ i = ap->tpkt_pos;
+ data = ap->tpkt->data;
+ count = ap->tpkt->len;
+ fcs = ap->tfcs;
+ proto = (data[0] << 8) + data[1];
+
+ /*
+ * LCP packets with code values between 1 (configure-reqest)
+ * and 7 (code-reject) must be sent as though no options
+ * had been negotiated.
+ */
+ islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+ if (i == 0) {
+ if (islcp)
+ async_lcp_peek(ap, data, count, 0);
+
+ /*
+ * Start of a new packet - insert the leading FLAG
+ * character if necessary.
+ */
+ if (islcp || flag_time == 0
+ || jiffies - ap->last_xmit >= flag_time)
+ *buf++ = PPP_FLAG;
+ ap->last_xmit = jiffies;
+ fcs = PPP_INITFCS;
+
+ /*
+ * Put in the address/control bytes if necessary
+ */
+ if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
+ PUT_BYTE(ap, buf, 0xff, islcp);
+ fcs = PPP_FCS(fcs, 0xff);
+ PUT_BYTE(ap, buf, 0x03, islcp);
+ fcs = PPP_FCS(fcs, 0x03);
+ }
+ }
+
+ /*
+ * Once we put in the last byte, we need to put in the FCS
+ * and closing flag, so make sure there is at least 7 bytes
+ * of free space in the output buffer.
+ */
+ buflim = ap->obuf + OBUFSIZE - 6;
+ while (i < count && buf < buflim) {
+ c = data[i++];
+ if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
+ continue; /* compress protocol field */
+ fcs = PPP_FCS(fcs, c);
+ PUT_BYTE(ap, buf, c, islcp);
+ }
+
+ if (i < count) {
+ /*
+ * Remember where we are up to in this packet.
+ */
+ ap->olim = buf;
+ ap->tpkt_pos = i;
+ ap->tfcs = fcs;
+ return 0;
+ }
+
+ /*
+ * We have finished the packet. Add the FCS and flag.
+ */
+ fcs = ~fcs;
+ c = fcs & 0xff;
+ PUT_BYTE(ap, buf, c, islcp);
+ c = (fcs >> 8) & 0xff;
+ PUT_BYTE(ap, buf, c, islcp);
+ *buf++ = PPP_FLAG;
+ ap->olim = buf;
+
+ kfree_skb(ap->tpkt);
+ ap->tpkt = NULL;
+ return 1;
+}
+
+/*
+ * Transmit-side routines.
+ */
+
+/*
+ * Send a packet to the peer over an async tty line.
+ * Returns 1 iff the packet was accepted.
+ * If the packet was not accepted, we will call ppp_output_wakeup
+ * at some later time.
+ */
+static int
+ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct asyncppp *ap = chan->private;
+
+ ppp_async_push(ap);
+
+ if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
+ return 0; /* already full */
+ ap->tpkt = skb;
+ ap->tpkt_pos = 0;
+
+ ppp_async_push(ap);
+ return 1;
+}
+
+/*
+ * Push as much data as possible out to the tty.
+ */
+static int
+ppp_async_push(struct asyncppp *ap)
+{
+ int avail, sent, done = 0;
+ struct tty_struct *tty = ap->tty;
+ int tty_stuffed = 0;
+
+ /*
+ * We can get called recursively here if the tty write
+ * function calls our wakeup function. This can happen
+ * for example on a pty with both the master and slave
+ * set to PPP line discipline.
+ * We use the XMIT_BUSY bit to detect this and get out,
+ * leaving the XMIT_WAKEUP bit set to tell the other
+ * instance that it may now be able to write more now.
+ */
+ if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
+ return 0;
+ spin_lock_bh(&ap->xmit_lock);
+ for (;;) {
+ if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
+ tty_stuffed = 0;
+ if (!tty_stuffed && ap->optr < ap->olim) {
+ avail = ap->olim - ap->optr;
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ sent = tty->driver->write(tty, ap->optr, avail);
+ if (sent < 0)
+ goto flush; /* error, e.g. loss of CD */
+ ap->optr += sent;
+ if (sent < avail)
+ tty_stuffed = 1;
+ continue;
+ }
+ if (ap->optr >= ap->olim && ap->tpkt != 0) {
+ if (ppp_async_encode(ap)) {
+ /* finished processing ap->tpkt */
+ clear_bit(XMIT_FULL, &ap->xmit_flags);
+ done = 1;
+ }
+ continue;
+ }
+ /*
+ * We haven't made any progress this time around.
+ * Clear XMIT_BUSY to let other callers in, but
+ * after doing so we have to check if anyone set
+ * XMIT_WAKEUP since we last checked it. If they
+ * did, we should try again to set XMIT_BUSY and go
+ * around again in case XMIT_BUSY was still set when
+ * the other caller tried.
+ */
+ clear_bit(XMIT_BUSY, &ap->xmit_flags);
+ /* any more work to do? if not, exit the loop */
+ if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
+ || (!tty_stuffed && ap->tpkt != 0)))
+ break;
+ /* more work to do, see if we can do it now */
+ if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
+ break;
+ }
+ spin_unlock_bh(&ap->xmit_lock);
+ return done;
+
+flush:
+ clear_bit(XMIT_BUSY, &ap->xmit_flags);
+ if (ap->tpkt != 0) {
+ kfree_skb(ap->tpkt);
+ ap->tpkt = NULL;
+ clear_bit(XMIT_FULL, &ap->xmit_flags);
+ done = 1;
+ }
+ ap->optr = ap->olim;
+ spin_unlock_bh(&ap->xmit_lock);
+ return done;
+}
+
+/*
+ * Flush output from our internal buffers.
+ * Called for the TCFLSH ioctl. Can be entered in parallel
+ * but this is covered by the xmit_lock.
+ */
+static void
+ppp_async_flush_output(struct asyncppp *ap)
+{
+ int done = 0;
+
+ spin_lock_bh(&ap->xmit_lock);
+ ap->optr = ap->olim;
+ if (ap->tpkt != NULL) {
+ kfree_skb(ap->tpkt);
+ ap->tpkt = NULL;
+ clear_bit(XMIT_FULL, &ap->xmit_flags);
+ done = 1;
+ }
+ spin_unlock_bh(&ap->xmit_lock);
+ if (done)
+ ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Receive-side routines.
+ */
+
+/* see how many ordinary chars there are at the start of buf */
+static inline int
+scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
+{
+ int i, c;
+
+ for (i = 0; i < count; ++i) {
+ c = buf[i];
+ if (c == PPP_ESCAPE || c == PPP_FLAG
+ || (c < 0x20 && (ap->raccm & (1 << c)) != 0))
+ break;
+ }
+ return i;
+}
+
+/* called when a flag is seen - do end-of-packet processing */
+static void
+process_input_packet(struct asyncppp *ap)
+{
+ struct sk_buff *skb;
+ unsigned char *p;
+ unsigned int len, fcs, proto;
+
+ skb = ap->rpkt;
+ if (ap->state & (SC_TOSS | SC_ESCAPE))
+ goto err;
+
+ if (skb == NULL)
+ return; /* 0-length packet */
+
+ /* check the FCS */
+ p = skb->data;
+ len = skb->len;
+ if (len < 3)
+ goto err; /* too short */
+ fcs = PPP_INITFCS;
+ for (; len > 0; --len)
+ fcs = PPP_FCS(fcs, *p++);
+ if (fcs != PPP_GOODFCS)
+ goto err; /* bad FCS */
+ skb_trim(skb, skb->len - 2);
+
+ /* check for address/control and protocol compression */
+ p = skb->data;
+ if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ /* chop off address/control */
+ if (skb->len < 3)
+ goto err;
+ p = skb_pull(skb, 2);
+ }
+ proto = p[0];
+ if (proto & 1) {
+ /* protocol is compressed */
+ skb_push(skb, 1)[0] = 0;
+ } else {
+ if (skb->len < 2)
+ goto err;
+ proto = (proto << 8) + p[1];
+ if (proto == PPP_LCP)
+ async_lcp_peek(ap, p, skb->len, 1);
+ }
+
+ /* queue the frame to be processed */
+ skb->cb[0] = ap->state;
+ skb_queue_tail(&ap->rqueue, skb);
+ ap->rpkt = NULL;
+ ap->state = 0;
+ return;
+
+ err:
+ /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
+ ap->state = SC_PREV_ERROR;
+ if (skb)
+ skb_trim(skb, 0);
+}
+
+/* Called when the tty driver has data for us. Runs parallel with the
+ other ldisc functions but will not be re-entered */
+
+static void
+ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
+ char *flags, int count)
+{
+ struct sk_buff *skb;
+ int c, i, j, n, s, f;
+ unsigned char *sp;
+
+ /* update bits used for 8-bit cleanness detection */
+ if (~ap->rbits & SC_RCV_BITS) {
+ s = 0;
+ for (i = 0; i < count; ++i) {
+ c = buf[i];
+ if (flags != 0 && flags[i] != 0)
+ continue;
+ s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
+ c = ((c >> 4) ^ c) & 0xf;
+ s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
+ }
+ ap->rbits |= s;
+ }
+
+ while (count > 0) {
+ /* scan through and see how many chars we can do in bulk */
+ if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
+ n = 1;
+ else
+ n = scan_ordinary(ap, buf, count);
+
+ f = 0;
+ if (flags != 0 && (ap->state & SC_TOSS) == 0) {
+ /* check the flags to see if any char had an error */
+ for (j = 0; j < n; ++j)
+ if ((f = flags[j]) != 0)
+ break;
+ }
+ if (f != 0) {
+ /* start tossing */
+ ap->state |= SC_TOSS;
+
+ } else if (n > 0 && (ap->state & SC_TOSS) == 0) {
+ /* stuff the chars in the skb */
+ skb = ap->rpkt;
+ if (skb == 0) {
+ skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
+ if (skb == 0)
+ goto nomem;
+ /* Try to get the payload 4-byte aligned */
+ if (buf[0] != PPP_ALLSTATIONS)
+ skb_reserve(skb, 2 + (buf[0] & 1));
+ ap->rpkt = skb;
+ }
+ if (n > skb_tailroom(skb)) {
+ /* packet overflowed MRU */
+ ap->state |= SC_TOSS;
+ } else {
+ sp = skb_put(skb, n);
+ memcpy(sp, buf, n);
+ if (ap->state & SC_ESCAPE) {
+ sp[0] ^= 0x20;
+ ap->state &= ~SC_ESCAPE;
+ }
+ }
+ }
+
+ if (n >= count)
+ break;
+
+ c = buf[n];
+ if (flags != NULL && flags[n] != 0) {
+ ap->state |= SC_TOSS;
+ } else if (c == PPP_FLAG) {
+ process_input_packet(ap);
+ } else if (c == PPP_ESCAPE) {
+ ap->state |= SC_ESCAPE;
+ } else if (I_IXON(ap->tty)) {
+ if (c == START_CHAR(ap->tty))
+ start_tty(ap->tty);
+ else if (c == STOP_CHAR(ap->tty))
+ stop_tty(ap->tty);
+ }
+ /* otherwise it's a char in the recv ACCM */
+ ++n;
+
+ buf += n;
+ if (flags != 0)
+ flags += n;
+ count -= n;
+ }
+ return;
+
+ nomem:
+ printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
+ ap->state |= SC_TOSS;
+}
+
+/*
+ * We look at LCP frames going past so that we can notice
+ * and react to the LCP configure-ack from the peer.
+ * In the situation where the peer has been sent a configure-ack
+ * already, LCP is up once it has sent its configure-ack
+ * so the immediately following packet can be sent with the
+ * configured LCP options. This allows us to process the following
+ * packet correctly without pppd needing to respond quickly.
+ *
+ * We only respond to the received configure-ack if we have just
+ * sent a configure-request, and the configure-ack contains the
+ * same data (this is checked using a 16-bit crc of the data).
+ */
+#define CONFREQ 1 /* LCP code field values */
+#define CONFACK 2
+#define LCP_MRU 1 /* LCP option numbers */
+#define LCP_ASYNCMAP 2
+
+static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
+ int len, int inbound)
+{
+ int dlen, fcs, i, code;
+ u32 val;
+
+ data += 2; /* skip protocol bytes */
+ len -= 2;
+ if (len < 4) /* 4 = code, ID, length */
+ return;
+ code = data[0];
+ if (code != CONFACK && code != CONFREQ)
+ return;
+ dlen = (data[2] << 8) + data[3];
+ if (len < dlen)
+ return; /* packet got truncated or length is bogus */
+
+ if (code == (inbound? CONFACK: CONFREQ)) {
+ /*
+ * sent confreq or received confack:
+ * calculate the crc of the data from the ID field on.
+ */
+ fcs = PPP_INITFCS;
+ for (i = 1; i < dlen; ++i)
+ fcs = PPP_FCS(fcs, data[i]);
+
+ if (!inbound) {
+ /* outbound confreq - remember the crc for later */
+ ap->lcp_fcs = fcs;
+ return;
+ }
+
+ /* received confack, check the crc */
+ fcs ^= ap->lcp_fcs;
+ ap->lcp_fcs = -1;
+ if (fcs != 0)
+ return;
+ } else if (inbound)
+ return; /* not interested in received confreq */
+
+ /* process the options in the confack */
+ data += 4;
+ dlen -= 4;
+ /* data[0] is code, data[1] is length */
+ while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
+ switch (data[0]) {
+ case LCP_MRU:
+ val = (data[2] << 8) + data[3];
+ if (inbound)
+ ap->mru = val;
+ else
+ ap->chan.mtu = val;
+ break;
+ case LCP_ASYNCMAP:
+ val = (data[2] << 24) + (data[3] << 16)
+ + (data[4] << 8) + data[5];
+ if (inbound)
+ ap->raccm = val;
+ else
+ ap->xaccm[0] = val;
+ break;
+ }
+ dlen -= data[1];
+ data += data[1];
+ }
+}
+
+static void __exit ppp_async_cleanup(void)
+{
+ if (tty_register_ldisc(N_PPP, NULL) != 0)
+ printk(KERN_ERR "failed to unregister PPP line discipline\n");
+}
+
+module_init(ppp_async_init);
+module_exit(ppp_async_cleanup);
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
new file mode 100644
index 000000000000..507d6328d4eb
--- /dev/null
+++ b/drivers/net/ppp_deflate.c
@@ -0,0 +1,659 @@
+/*
+ * ==FILEVERSION 980319==
+ *
+ * ppp_deflate.c - interface the zlib procedures for Deflate compression
+ * and decompression (as used by gzip) to the PPP code.
+ * This version is for use with Linux kernel 1.3.X.
+ *
+ * Copyright (c) 1994 The Australian National University.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies. This software is provided without any
+ * warranty, express or implied. The Australian National University
+ * makes no representations about the suitability of this software for
+ * any purpose.
+ *
+ * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
+ * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
+ * OR MODIFICATIONS.
+ *
+ * From: deflate.c,v 1.1 1996/01/18 03:17:48 paulus Exp
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <linux/ppp_defs.h>
+#include <linux/ppp-comp.h>
+
+#include <linux/zlib.h>
+
+/*
+ * State for a Deflate (de)compressor.
+ */
+struct ppp_deflate_state {
+ int seqno;
+ int w_size;
+ int unit;
+ int mru;
+ int debug;
+ z_stream strm;
+ struct compstat stats;
+};
+
+#define DEFLATE_OVHD 2 /* Deflate overhead/packet */
+
+static void *z_comp_alloc(unsigned char *options, int opt_len);
+static void *z_decomp_alloc(unsigned char *options, int opt_len);
+static void z_comp_free(void *state);
+static void z_decomp_free(void *state);
+static int z_comp_init(void *state, unsigned char *options,
+ int opt_len,
+ int unit, int hdrlen, int debug);
+static int z_decomp_init(void *state, unsigned char *options,
+ int opt_len,
+ int unit, int hdrlen, int mru, int debug);
+static int z_compress(void *state, unsigned char *rptr,
+ unsigned char *obuf,
+ int isize, int osize);
+static void z_incomp(void *state, unsigned char *ibuf, int icnt);
+static int z_decompress(void *state, unsigned char *ibuf,
+ int isize, unsigned char *obuf, int osize);
+static void z_comp_reset(void *state);
+static void z_decomp_reset(void *state);
+static void z_comp_stats(void *state, struct compstat *stats);
+
+/**
+ * z_comp_free - free the memory used by a compressor
+ * @arg: pointer to the private state for the compressor.
+ */
+static void z_comp_free(void *arg)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+ if (state) {
+ zlib_deflateEnd(&state->strm);
+ if (state->strm.workspace)
+ vfree(state->strm.workspace);
+ kfree(state);
+ }
+}
+
+/**
+ * z_comp_alloc - allocate space for a compressor.
+ * @options: pointer to CCP option data
+ * @opt_len: length of the CCP option at @options.
+ *
+ * The @options pointer points to the a buffer containing the
+ * CCP option data for the compression being negotiated. It is
+ * formatted according to RFC1979, and describes the window
+ * size that the peer is requesting that we use in compressing
+ * data to be sent to it.
+ *
+ * Returns the pointer to the private state for the compressor,
+ * or NULL if we could not allocate enough memory.
+ */
+static void *z_comp_alloc(unsigned char *options, int opt_len)
+{
+ struct ppp_deflate_state *state;
+ int w_size;
+
+ if (opt_len != CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return NULL;
+ w_size = DEFLATE_SIZE(options[2]);
+ if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
+ return NULL;
+
+ state = (struct ppp_deflate_state *) kmalloc(sizeof(*state),
+ GFP_KERNEL);
+ if (state == NULL)
+ return NULL;
+
+ memset (state, 0, sizeof (struct ppp_deflate_state));
+ state->strm.next_in = NULL;
+ state->w_size = w_size;
+ state->strm.workspace = vmalloc(zlib_deflate_workspacesize());
+ if (state->strm.workspace == NULL)
+ goto out_free;
+
+ if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
+ DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY)
+ != Z_OK)
+ goto out_free;
+ return (void *) state;
+
+out_free:
+ z_comp_free(state);
+ return NULL;
+}
+
+/**
+ * z_comp_init - initialize a previously-allocated compressor.
+ * @arg: pointer to the private state for the compressor
+ * @options: pointer to the CCP option data describing the
+ * compression that was negotiated with the peer
+ * @opt_len: length of the CCP option data at @options
+ * @unit: PPP unit number for diagnostic messages
+ * @hdrlen: ignored (present for backwards compatibility)
+ * @debug: debug flag; if non-zero, debug messages are printed.
+ *
+ * The CCP options described by @options must match the options
+ * specified when the compressor was allocated. The compressor
+ * history is reset. Returns 0 for failure (CCP options don't
+ * match) or 1 for success.
+ */
+static int z_comp_init(void *arg, unsigned char *options, int opt_len,
+ int unit, int hdrlen, int debug)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+ if (opt_len < CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || DEFLATE_SIZE(options[2]) != state->w_size
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return 0;
+
+ state->seqno = 0;
+ state->unit = unit;
+ state->debug = debug;
+
+ zlib_deflateReset(&state->strm);
+
+ return 1;
+}
+
+/**
+ * z_comp_reset - reset a previously-allocated compressor.
+ * @arg: pointer to private state for the compressor.
+ *
+ * This clears the history for the compressor and makes it
+ * ready to start emitting a new compressed stream.
+ */
+static void z_comp_reset(void *arg)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+ state->seqno = 0;
+ zlib_deflateReset(&state->strm);
+}
+
+/**
+ * z_compress - compress a PPP packet with Deflate compression.
+ * @arg: pointer to private state for the compressor
+ * @rptr: uncompressed packet (input)
+ * @obuf: compressed packet (output)
+ * @isize: size of uncompressed packet
+ * @osize: space available at @obuf
+ *
+ * Returns the length of the compressed packet, or 0 if the
+ * packet is incompressible.
+ */
+int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
+ int isize, int osize)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+ int r, proto, off, olen, oavail;
+ unsigned char *wptr;
+
+ /*
+ * Check that the protocol is in the range we handle.
+ */
+ proto = PPP_PROTOCOL(rptr);
+ if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
+ return 0;
+
+ /* Don't generate compressed packets which are larger than
+ the uncompressed packet. */
+ if (osize > isize)
+ osize = isize;
+
+ wptr = obuf;
+
+ /*
+ * Copy over the PPP header and store the 2-byte sequence number.
+ */
+ wptr[0] = PPP_ADDRESS(rptr);
+ wptr[1] = PPP_CONTROL(rptr);
+ wptr[2] = PPP_COMP >> 8;
+ wptr[3] = PPP_COMP;
+ wptr += PPP_HDRLEN;
+ wptr[0] = state->seqno >> 8;
+ wptr[1] = state->seqno;
+ wptr += DEFLATE_OVHD;
+ olen = PPP_HDRLEN + DEFLATE_OVHD;
+ state->strm.next_out = wptr;
+ state->strm.avail_out = oavail = osize - olen;
+ ++state->seqno;
+
+ off = (proto > 0xff) ? 2 : 3; /* skip 1st proto byte if 0 */
+ rptr += off;
+ state->strm.next_in = rptr;
+ state->strm.avail_in = (isize - off);
+
+ for (;;) {
+ r = zlib_deflate(&state->strm, Z_PACKET_FLUSH);
+ if (r != Z_OK) {
+ if (state->debug)
+ printk(KERN_ERR
+ "z_compress: deflate returned %d\n", r);
+ break;
+ }
+ if (state->strm.avail_out == 0) {
+ olen += oavail;
+ state->strm.next_out = NULL;
+ state->strm.avail_out = oavail = 1000000;
+ } else {
+ break; /* all done */
+ }
+ }
+ olen += oavail - state->strm.avail_out;
+
+ /*
+ * See if we managed to reduce the size of the packet.
+ */
+ if (olen < isize) {
+ state->stats.comp_bytes += olen;
+ state->stats.comp_packets++;
+ } else {
+ state->stats.inc_bytes += isize;
+ state->stats.inc_packets++;
+ olen = 0;
+ }
+ state->stats.unc_bytes += isize;
+ state->stats.unc_packets++;
+
+ return olen;
+}
+
+/**
+ * z_comp_stats - return compression statistics for a compressor
+ * or decompressor.
+ * @arg: pointer to private space for the (de)compressor
+ * @stats: pointer to a struct compstat to receive the result.
+ */
+static void z_comp_stats(void *arg, struct compstat *stats)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+ *stats = state->stats;
+}
+
+/**
+ * z_decomp_free - Free the memory used by a decompressor.
+ * @arg: pointer to private space for the decompressor.
+ */
+static void z_decomp_free(void *arg)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+ if (state) {
+ zlib_inflateEnd(&state->strm);
+ if (state->strm.workspace)
+ kfree(state->strm.workspace);
+ kfree(state);
+ }
+}
+
+/**
+ * z_decomp_alloc - allocate space for a decompressor.
+ * @options: pointer to CCP option data
+ * @opt_len: length of the CCP option at @options.
+ *
+ * The @options pointer points to the a buffer containing the
+ * CCP option data for the compression being negotiated. It is
+ * formatted according to RFC1979, and describes the window
+ * size that we are requesting the peer to use in compressing
+ * data to be sent to us.
+ *
+ * Returns the pointer to the private state for the decompressor,
+ * or NULL if we could not allocate enough memory.
+ */
+static void *z_decomp_alloc(unsigned char *options, int opt_len)
+{
+ struct ppp_deflate_state *state;
+ int w_size;
+
+ if (opt_len != CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return NULL;
+ w_size = DEFLATE_SIZE(options[2]);
+ if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
+ return NULL;
+
+ state = (struct ppp_deflate_state *) kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return NULL;
+
+ memset (state, 0, sizeof (struct ppp_deflate_state));
+ state->w_size = w_size;
+ state->strm.next_out = NULL;
+ state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
+ GFP_KERNEL|__GFP_REPEAT);
+ if (state->strm.workspace == NULL)
+ goto out_free;
+
+ if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK)
+ goto out_free;
+ return (void *) state;
+
+out_free:
+ z_decomp_free(state);
+ return NULL;
+}
+
+/**
+ * z_decomp_init - initialize a previously-allocated decompressor.
+ * @arg: pointer to the private state for the decompressor
+ * @options: pointer to the CCP option data describing the
+ * compression that was negotiated with the peer
+ * @opt_len: length of the CCP option data at @options
+ * @unit: PPP unit number for diagnostic messages
+ * @hdrlen: ignored (present for backwards compatibility)
+ * @mru: maximum length of decompressed packets
+ * @debug: debug flag; if non-zero, debug messages are printed.
+ *
+ * The CCP options described by @options must match the options
+ * specified when the decompressor was allocated. The decompressor
+ * history is reset. Returns 0 for failure (CCP options don't
+ * match) or 1 for success.
+ */
+static int z_decomp_init(void *arg, unsigned char *options, int opt_len,
+ int unit, int hdrlen, int mru, int debug)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+ if (opt_len < CILEN_DEFLATE
+ || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT)
+ || options[1] != CILEN_DEFLATE
+ || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL
+ || DEFLATE_SIZE(options[2]) != state->w_size
+ || options[3] != DEFLATE_CHK_SEQUENCE)
+ return 0;
+
+ state->seqno = 0;
+ state->unit = unit;
+ state->debug = debug;
+ state->mru = mru;
+
+ zlib_inflateReset(&state->strm);
+
+ return 1;
+}
+
+/**
+ * z_decomp_reset - reset a previously-allocated decompressor.
+ * @arg: pointer to private state for the decompressor.
+ *
+ * This clears the history for the decompressor and makes it
+ * ready to receive a new compressed stream.
+ */
+static void z_decomp_reset(void *arg)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+
+ state->seqno = 0;
+ zlib_inflateReset(&state->strm);
+}
+
+/**
+ * z_decompress - decompress a Deflate-compressed packet.
+ * @arg: pointer to private state for the decompressor
+ * @ibuf: pointer to input (compressed) packet data
+ * @isize: length of input packet
+ * @obuf: pointer to space for output (decompressed) packet
+ * @osize: amount of space available at @obuf
+ *
+ * Because of patent problems, we return DECOMP_ERROR for errors
+ * found by inspecting the input data and for system problems, but
+ * DECOMP_FATALERROR for any errors which could possibly be said to
+ * be being detected "after" decompression. For DECOMP_ERROR,
+ * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
+ * infringing a patent of Motorola's if we do, so we take CCP down
+ * instead.
+ *
+ * Given that the frame has the correct sequence number and a good FCS,
+ * errors such as invalid codes in the input most likely indicate a
+ * bug, so we return DECOMP_FATALERROR for them in order to turn off
+ * compression, even though they are detected by inspecting the input.
+ */
+int z_decompress(void *arg, unsigned char *ibuf, int isize,
+ unsigned char *obuf, int osize)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+ int olen, seq, r;
+ int decode_proto, overflow;
+ unsigned char overflow_buf[1];
+
+ if (isize <= PPP_HDRLEN + DEFLATE_OVHD) {
+ if (state->debug)
+ printk(KERN_DEBUG "z_decompress%d: short pkt (%d)\n",
+ state->unit, isize);
+ return DECOMP_ERROR;
+ }
+
+ /* Check the sequence number. */
+ seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1];
+ if (seq != (state->seqno & 0xffff)) {
+ if (state->debug)
+ printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
+ state->unit, seq, state->seqno & 0xffff);
+ return DECOMP_ERROR;
+ }
+ ++state->seqno;
+
+ /*
+ * Fill in the first part of the PPP header. The protocol field
+ * comes from the decompressed data.
+ */
+ obuf[0] = PPP_ADDRESS(ibuf);
+ obuf[1] = PPP_CONTROL(ibuf);
+ obuf[2] = 0;
+
+ /*
+ * Set up to call inflate. We set avail_out to 1 initially so we can
+ * look at the first byte of the output and decide whether we have
+ * a 1-byte or 2-byte protocol field.
+ */
+ state->strm.next_in = ibuf + PPP_HDRLEN + DEFLATE_OVHD;
+ state->strm.avail_in = isize - (PPP_HDRLEN + DEFLATE_OVHD);
+ state->strm.next_out = obuf + 3;
+ state->strm.avail_out = 1;
+ decode_proto = 1;
+ overflow = 0;
+
+ /*
+ * Call inflate, supplying more input or output as needed.
+ */
+ for (;;) {
+ r = zlib_inflate(&state->strm, Z_PACKET_FLUSH);
+ if (r != Z_OK) {
+ if (state->debug)
+ printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n",
+ state->unit, r, (state->strm.msg? state->strm.msg: ""));
+ return DECOMP_FATALERROR;
+ }
+ if (state->strm.avail_out != 0)
+ break; /* all done */
+ if (decode_proto) {
+ state->strm.avail_out = osize - PPP_HDRLEN;
+ if ((obuf[3] & 1) == 0) {
+ /* 2-byte protocol field */
+ obuf[2] = obuf[3];
+ --state->strm.next_out;
+ ++state->strm.avail_out;
+ }
+ decode_proto = 0;
+ } else if (!overflow) {
+ /*
+ * We've filled up the output buffer; the only way to
+ * find out whether inflate has any more characters
+ * left is to give it another byte of output space.
+ */
+ state->strm.next_out = overflow_buf;
+ state->strm.avail_out = 1;
+ overflow = 1;
+ } else {
+ if (state->debug)
+ printk(KERN_DEBUG "z_decompress%d: ran out of mru\n",
+ state->unit);
+ return DECOMP_FATALERROR;
+ }
+ }
+
+ if (decode_proto) {
+ if (state->debug)
+ printk(KERN_DEBUG "z_decompress%d: didn't get proto\n",
+ state->unit);
+ return DECOMP_ERROR;
+ }
+
+ olen = osize + overflow - state->strm.avail_out;
+ state->stats.unc_bytes += olen;
+ state->stats.unc_packets++;
+ state->stats.comp_bytes += isize;
+ state->stats.comp_packets++;
+
+ return olen;
+}
+
+/**
+ * z_incomp - add incompressible input data to the history.
+ * @arg: pointer to private state for the decompressor
+ * @ibuf: pointer to input packet data
+ * @icnt: length of input data.
+ */
+static void z_incomp(void *arg, unsigned char *ibuf, int icnt)
+{
+ struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
+ int proto, r;
+
+ /*
+ * Check that the protocol is one we handle.
+ */
+ proto = PPP_PROTOCOL(ibuf);
+ if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
+ return;
+
+ ++state->seqno;
+
+ /*
+ * We start at the either the 1st or 2nd byte of the protocol field,
+ * depending on whether the protocol value is compressible.
+ */
+ state->strm.next_in = ibuf + 3;
+ state->strm.avail_in = icnt - 3;
+ if (proto > 0xff) {
+ --state->strm.next_in;
+ ++state->strm.avail_in;
+ }
+
+ r = zlib_inflateIncomp(&state->strm);
+ if (r != Z_OK) {
+ /* gak! */
+ if (state->debug) {
+ printk(KERN_DEBUG "z_incomp%d: inflateIncomp returned %d (%s)\n",
+ state->unit, r, (state->strm.msg? state->strm.msg: ""));
+ }
+ return;
+ }
+
+ /*
+ * Update stats.
+ */
+ state->stats.inc_bytes += icnt;
+ state->stats.inc_packets++;
+ state->stats.unc_bytes += icnt;
+ state->stats.unc_packets++;
+}
+
+/*************************************************************
+ * Module interface table
+ *************************************************************/
+
+/* These are in ppp_generic.c */
+extern int ppp_register_compressor (struct compressor *cp);
+extern void ppp_unregister_compressor (struct compressor *cp);
+
+/*
+ * Procedures exported to if_ppp.c.
+ */
+static struct compressor ppp_deflate = {
+ .compress_proto = CI_DEFLATE,
+ .comp_alloc = z_comp_alloc,
+ .comp_free = z_comp_free,
+ .comp_init = z_comp_init,
+ .comp_reset = z_comp_reset,
+ .compress = z_compress,
+ .comp_stat = z_comp_stats,
+ .decomp_alloc = z_decomp_alloc,
+ .decomp_free = z_decomp_free,
+ .decomp_init = z_decomp_init,
+ .decomp_reset = z_decomp_reset,
+ .decompress = z_decompress,
+ .incomp = z_incomp,
+ .decomp_stat = z_comp_stats,
+ .owner = THIS_MODULE
+};
+
+static struct compressor ppp_deflate_draft = {
+ .compress_proto = CI_DEFLATE_DRAFT,
+ .comp_alloc = z_comp_alloc,
+ .comp_free = z_comp_free,
+ .comp_init = z_comp_init,
+ .comp_reset = z_comp_reset,
+ .compress = z_compress,
+ .comp_stat = z_comp_stats,
+ .decomp_alloc = z_decomp_alloc,
+ .decomp_free = z_decomp_free,
+ .decomp_init = z_decomp_init,
+ .decomp_reset = z_decomp_reset,
+ .decompress = z_decompress,
+ .incomp = z_incomp,
+ .decomp_stat = z_comp_stats,
+ .owner = THIS_MODULE
+};
+
+static int __init deflate_init(void)
+{
+ int answer = ppp_register_compressor(&ppp_deflate);
+ if (answer == 0)
+ printk(KERN_INFO
+ "PPP Deflate Compression module registered\n");
+ ppp_register_compressor(&ppp_deflate_draft);
+ return answer;
+}
+
+static void __exit deflate_cleanup(void)
+{
+ ppp_unregister_compressor(&ppp_deflate);
+ ppp_unregister_compressor(&ppp_deflate_draft);
+}
+
+module_init(deflate_init);
+module_exit(deflate_cleanup);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
+MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
new file mode 100644
index 000000000000..c456dc81b873
--- /dev/null
+++ b/drivers/net/ppp_generic.c
@@ -0,0 +1,2746 @@
+/*
+ * Generic PPP layer for Linux.
+ *
+ * Copyright 1999-2002 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * The generic PPP layer handles the PPP network interfaces, the
+ * /dev/ppp device, packet and VJ compression, and multilink.
+ * It talks to PPP `channels' via the interface defined in
+ * include/linux/ppp_channel.h. Channels provide the basic means for
+ * sending and receiving PPP frames on some kind of communications
+ * channel.
+ *
+ * Part of the code in this driver was inspired by the old async-only
+ * PPP driver, written by Michael Callahan and Al Longyear, and
+ * subsequently hacked by Paul Mackerras.
+ *
+ * ==FILEVERSION 20041108==
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/ppp_defs.h>
+#include <linux/filter.h>
+#include <linux/if_ppp.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp-comp.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/rwsem.h>
+#include <linux/stddef.h>
+#include <linux/device.h>
+#include <net/slhc_vj.h>
+#include <asm/atomic.h>
+
+#define PPP_VERSION "2.4.2"
+
+/*
+ * Network protocols we support.
+ */
+#define NP_IP 0 /* Internet Protocol V4 */
+#define NP_IPV6 1 /* Internet Protocol V6 */
+#define NP_IPX 2 /* IPX protocol */
+#define NP_AT 3 /* Appletalk protocol */
+#define NP_MPLS_UC 4 /* MPLS unicast */
+#define NP_MPLS_MC 5 /* MPLS multicast */
+#define NUM_NP 6 /* Number of NPs. */
+
+#define MPHDRLEN 6 /* multilink protocol header length */
+#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
+#define MIN_FRAG_SIZE 64
+
+/*
+ * An instance of /dev/ppp can be associated with either a ppp
+ * interface unit or a ppp channel. In both cases, file->private_data
+ * points to one of these.
+ */
+struct ppp_file {
+ enum {
+ INTERFACE=1, CHANNEL
+ } kind;
+ struct sk_buff_head xq; /* pppd transmit queue */
+ struct sk_buff_head rq; /* receive queue for pppd */
+ wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
+ atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
+ int hdrlen; /* space to leave for headers */
+ int index; /* interface unit / channel number */
+ int dead; /* unit/channel has been shut down */
+};
+
+#define PF_TO_X(pf, X) ((X *)((char *)(pf) - offsetof(X, file)))
+
+#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
+#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
+
+#define ROUNDUP(n, x) (((n) + (x) - 1) / (x))
+
+/*
+ * Data structure describing one ppp unit.
+ * A ppp unit corresponds to a ppp network interface device
+ * and represents a multilink bundle.
+ * It can have 0 or more ppp channels connected to it.
+ */
+struct ppp {
+ struct ppp_file file; /* stuff for read/write/poll 0 */
+ struct file *owner; /* file that owns this unit 48 */
+ struct list_head channels; /* list of attached channels 4c */
+ int n_channels; /* how many channels are attached 54 */
+ spinlock_t rlock; /* lock for receive side 58 */
+ spinlock_t wlock; /* lock for transmit side 5c */
+ int mru; /* max receive unit 60 */
+ unsigned int flags; /* control bits 64 */
+ unsigned int xstate; /* transmit state bits 68 */
+ unsigned int rstate; /* receive state bits 6c */
+ int debug; /* debug flags 70 */
+ struct slcompress *vj; /* state for VJ header compression */
+ enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
+ struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
+ struct compressor *xcomp; /* transmit packet compressor 8c */
+ void *xc_state; /* its internal state 90 */
+ struct compressor *rcomp; /* receive decompressor 94 */
+ void *rc_state; /* its internal state 98 */
+ unsigned long last_xmit; /* jiffies when last pkt sent 9c */
+ unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
+ struct net_device *dev; /* network interface device a4 */
+#ifdef CONFIG_PPP_MULTILINK
+ int nxchan; /* next channel to send something on */
+ u32 nxseq; /* next sequence number to send */
+ int mrru; /* MP: max reconst. receive unit */
+ u32 nextseq; /* MP: seq no of next packet */
+ u32 minseq; /* MP: min of most recent seqnos */
+ struct sk_buff_head mrq; /* MP: receive reconstruction queue */
+#endif /* CONFIG_PPP_MULTILINK */
+ struct net_device_stats stats; /* statistics */
+#ifdef CONFIG_PPP_FILTER
+ struct sock_filter *pass_filter; /* filter for packets to pass */
+ struct sock_filter *active_filter;/* filter for pkts to reset idle */
+ unsigned pass_len, active_len;
+#endif /* CONFIG_PPP_FILTER */
+};
+
+/*
+ * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
+ * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP.
+ * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
+ * Bits in xstate: SC_COMP_RUN
+ */
+#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
+ |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
+ |SC_COMP_TCP|SC_REJ_COMP_TCP)
+
+/*
+ * Private data structure for each channel.
+ * This includes the data structure used for multilink.
+ */
+struct channel {
+ struct ppp_file file; /* stuff for read/write/poll */
+ struct list_head list; /* link in all/new_channels list */
+ struct ppp_channel *chan; /* public channel data structure */
+ struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
+ spinlock_t downl; /* protects `chan', file.xq dequeue */
+ struct ppp *ppp; /* ppp unit we're connected to */
+ struct list_head clist; /* link in list of channels per unit */
+ rwlock_t upl; /* protects `ppp' */
+#ifdef CONFIG_PPP_MULTILINK
+ u8 avail; /* flag used in multilink stuff */
+ u8 had_frag; /* >= 1 fragments have been sent */
+ u32 lastseq; /* MP: last sequence # received */
+#endif /* CONFIG_PPP_MULTILINK */
+};
+
+/*
+ * SMP locking issues:
+ * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
+ * list and the ppp.n_channels field, you need to take both locks
+ * before you modify them.
+ * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
+ * channel.downl.
+ */
+
+/*
+ * A cardmap represents a mapping from unsigned integers to pointers,
+ * and provides a fast "find lowest unused number" operation.
+ * It uses a broad (32-way) tree with a bitmap at each level.
+ * It is designed to be space-efficient for small numbers of entries
+ * and time-efficient for large numbers of entries.
+ */
+#define CARDMAP_ORDER 5
+#define CARDMAP_WIDTH (1U << CARDMAP_ORDER)
+#define CARDMAP_MASK (CARDMAP_WIDTH - 1)
+
+struct cardmap {
+ int shift;
+ unsigned long inuse;
+ struct cardmap *parent;
+ void *ptr[CARDMAP_WIDTH];
+};
+static void *cardmap_get(struct cardmap *map, unsigned int nr);
+static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
+static unsigned int cardmap_find_first_free(struct cardmap *map);
+static void cardmap_destroy(struct cardmap **map);
+
+/*
+ * all_ppp_sem protects the all_ppp_units mapping.
+ * It also ensures that finding a ppp unit in the all_ppp_units map
+ * and updating its file.refcnt field is atomic.
+ */
+static DECLARE_MUTEX(all_ppp_sem);
+static struct cardmap *all_ppp_units;
+static atomic_t ppp_unit_count = ATOMIC_INIT(0);
+
+/*
+ * all_channels_lock protects all_channels and last_channel_index,
+ * and the atomicity of find a channel and updating its file.refcnt
+ * field.
+ */
+static DEFINE_SPINLOCK(all_channels_lock);
+static LIST_HEAD(all_channels);
+static LIST_HEAD(new_channels);
+static int last_channel_index;
+static atomic_t channel_count = ATOMIC_INIT(0);
+
+/* Get the PPP protocol number from a skb */
+#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
+
+/* We limit the length of ppp->file.rq to this (arbitrary) value */
+#define PPP_MAX_RQLEN 32
+
+/*
+ * Maximum number of multilink fragments queued up.
+ * This has to be large enough to cope with the maximum latency of
+ * the slowest channel relative to the others. Strictly it should
+ * depend on the number of channels and their characteristics.
+ */
+#define PPP_MP_MAX_QLEN 128
+
+/* Multilink header bits. */
+#define B 0x80 /* this fragment begins a packet */
+#define E 0x40 /* this fragment ends a packet */
+
+/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
+#define seq_before(a, b) ((s32)((a) - (b)) < 0)
+#define seq_after(a, b) ((s32)((a) - (b)) > 0)
+
+/* Prototypes. */
+static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
+ unsigned int cmd, unsigned long arg);
+static void ppp_xmit_process(struct ppp *ppp);
+static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
+static void ppp_push(struct ppp *ppp);
+static void ppp_channel_push(struct channel *pch);
+static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
+ struct channel *pch);
+static void ppp_receive_error(struct ppp *ppp);
+static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
+static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
+ struct sk_buff *skb);
+#ifdef CONFIG_PPP_MULTILINK
+static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
+ struct channel *pch);
+static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
+static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
+static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
+#endif /* CONFIG_PPP_MULTILINK */
+static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
+static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
+static void ppp_ccp_closed(struct ppp *ppp);
+static struct compressor *find_compressor(int type);
+static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
+static struct ppp *ppp_create_interface(int unit, int *retp);
+static void init_ppp_file(struct ppp_file *pf, int kind);
+static void ppp_shutdown_interface(struct ppp *ppp);
+static void ppp_destroy_interface(struct ppp *ppp);
+static struct ppp *ppp_find_unit(int unit);
+static struct channel *ppp_find_channel(int unit);
+static int ppp_connect_channel(struct channel *pch, int unit);
+static int ppp_disconnect_channel(struct channel *pch);
+static void ppp_destroy_channel(struct channel *pch);
+
+static struct class_simple *ppp_class;
+
+/* Translates a PPP protocol number to a NP index (NP == network protocol) */
+static inline int proto_to_npindex(int proto)
+{
+ switch (proto) {
+ case PPP_IP:
+ return NP_IP;
+ case PPP_IPV6:
+ return NP_IPV6;
+ case PPP_IPX:
+ return NP_IPX;
+ case PPP_AT:
+ return NP_AT;
+ case PPP_MPLS_UC:
+ return NP_MPLS_UC;
+ case PPP_MPLS_MC:
+ return NP_MPLS_MC;
+ }
+ return -EINVAL;
+}
+
+/* Translates an NP index into a PPP protocol number */
+static const int npindex_to_proto[NUM_NP] = {
+ PPP_IP,
+ PPP_IPV6,
+ PPP_IPX,
+ PPP_AT,
+ PPP_MPLS_UC,
+ PPP_MPLS_MC,
+};
+
+/* Translates an ethertype into an NP index */
+static inline int ethertype_to_npindex(int ethertype)
+{
+ switch (ethertype) {
+ case ETH_P_IP:
+ return NP_IP;
+ case ETH_P_IPV6:
+ return NP_IPV6;
+ case ETH_P_IPX:
+ return NP_IPX;
+ case ETH_P_PPPTALK:
+ case ETH_P_ATALK:
+ return NP_AT;
+ case ETH_P_MPLS_UC:
+ return NP_MPLS_UC;
+ case ETH_P_MPLS_MC:
+ return NP_MPLS_MC;
+ }
+ return -1;
+}
+
+/* Translates an NP index into an ethertype */
+static const int npindex_to_ethertype[NUM_NP] = {
+ ETH_P_IP,
+ ETH_P_IPV6,
+ ETH_P_IPX,
+ ETH_P_PPPTALK,
+ ETH_P_MPLS_UC,
+ ETH_P_MPLS_MC,
+};
+
+/*
+ * Locking shorthand.
+ */
+#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
+#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
+#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
+#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
+#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
+ ppp_recv_lock(ppp); } while (0)
+#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
+ ppp_xmit_unlock(ppp); } while (0)
+
+/*
+ * /dev/ppp device routines.
+ * The /dev/ppp device is used by pppd to control the ppp unit.
+ * It supports the read, write, ioctl and poll functions.
+ * Open instances of /dev/ppp can be in one of three states:
+ * unattached, attached to a ppp unit, or attached to a ppp channel.
+ */
+static int ppp_open(struct inode *inode, struct file *file)
+{
+ /*
+ * This could (should?) be enforced by the permissions on /dev/ppp.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ return 0;
+}
+
+static int ppp_release(struct inode *inode, struct file *file)
+{
+ struct ppp_file *pf = file->private_data;
+ struct ppp *ppp;
+
+ if (pf != 0) {
+ file->private_data = NULL;
+ if (pf->kind == INTERFACE) {
+ ppp = PF_TO_PPP(pf);
+ if (file == ppp->owner)
+ ppp_shutdown_interface(ppp);
+ }
+ if (atomic_dec_and_test(&pf->refcnt)) {
+ switch (pf->kind) {
+ case INTERFACE:
+ ppp_destroy_interface(PF_TO_PPP(pf));
+ break;
+ case CHANNEL:
+ ppp_destroy_channel(PF_TO_CHANNEL(pf));
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static ssize_t ppp_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ppp_file *pf = file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ ssize_t ret;
+ struct sk_buff *skb = NULL;
+
+ ret = count;
+
+ if (pf == 0)
+ return -ENXIO;
+ add_wait_queue(&pf->rwait, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ skb = skb_dequeue(&pf->rq);
+ if (skb)
+ break;
+ ret = 0;
+ if (pf->dead)
+ break;
+ if (pf->kind == INTERFACE) {
+ /*
+ * Return 0 (EOF) on an interface that has no
+ * channels connected, unless it is looping
+ * network traffic (demand mode).
+ */
+ struct ppp *ppp = PF_TO_PPP(pf);
+ if (ppp->n_channels == 0
+ && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ break;
+ }
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ ret = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&pf->rwait, &wait);
+
+ if (skb == 0)
+ goto out;
+
+ ret = -EOVERFLOW;
+ if (skb->len > count)
+ goto outf;
+ ret = -EFAULT;
+ if (copy_to_user(buf, skb->data, skb->len))
+ goto outf;
+ ret = skb->len;
+
+ outf:
+ kfree_skb(skb);
+ out:
+ return ret;
+}
+
+static ssize_t ppp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ppp_file *pf = file->private_data;
+ struct sk_buff *skb;
+ ssize_t ret;
+
+ if (pf == 0)
+ return -ENXIO;
+ ret = -ENOMEM;
+ skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
+ if (skb == 0)
+ goto out;
+ skb_reserve(skb, pf->hdrlen);
+ ret = -EFAULT;
+ if (copy_from_user(skb_put(skb, count), buf, count)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ skb_queue_tail(&pf->xq, skb);
+
+ switch (pf->kind) {
+ case INTERFACE:
+ ppp_xmit_process(PF_TO_PPP(pf));
+ break;
+ case CHANNEL:
+ ppp_channel_push(PF_TO_CHANNEL(pf));
+ break;
+ }
+
+ ret = count;
+
+ out:
+ return ret;
+}
+
+/* No kernel lock - fine */
+static unsigned int ppp_poll(struct file *file, poll_table *wait)
+{
+ struct ppp_file *pf = file->private_data;
+ unsigned int mask;
+
+ if (pf == 0)
+ return 0;
+ poll_wait(file, &pf->rwait, wait);
+ mask = POLLOUT | POLLWRNORM;
+ if (skb_peek(&pf->rq) != 0)
+ mask |= POLLIN | POLLRDNORM;
+ if (pf->dead)
+ mask |= POLLHUP;
+ else if (pf->kind == INTERFACE) {
+ /* see comment in ppp_read */
+ struct ppp *ppp = PF_TO_PPP(pf);
+ if (ppp->n_channels == 0
+ && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ return mask;
+}
+
+#ifdef CONFIG_PPP_FILTER
+static int get_filter(void __user *arg, struct sock_filter **p)
+{
+ struct sock_fprog uprog;
+ struct sock_filter *code = NULL;
+ int len, err;
+
+ if (copy_from_user(&uprog, arg, sizeof(uprog)))
+ return -EFAULT;
+
+ if (uprog.len > BPF_MAXINSNS)
+ return -EINVAL;
+
+ if (!uprog.len) {
+ *p = NULL;
+ return 0;
+ }
+
+ len = uprog.len * sizeof(struct sock_filter);
+ code = kmalloc(len, GFP_KERNEL);
+ if (code == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(code, uprog.filter, len)) {
+ kfree(code);
+ return -EFAULT;
+ }
+
+ err = sk_chk_filter(code, uprog.len);
+ if (err) {
+ kfree(code);
+ return err;
+ }
+
+ *p = code;
+ return uprog.len;
+}
+#endif /* CONFIG_PPP_FILTER */
+
+static int ppp_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ppp_file *pf = file->private_data;
+ struct ppp *ppp;
+ int err = -EFAULT, val, val2, i;
+ struct ppp_idle idle;
+ struct npioctl npi;
+ int unit, cflags;
+ struct slcompress *vj;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+
+ if (pf == 0)
+ return ppp_unattached_ioctl(pf, file, cmd, arg);
+
+ if (cmd == PPPIOCDETACH) {
+ /*
+ * We have to be careful here... if the file descriptor
+ * has been dup'd, we could have another process in the
+ * middle of a poll using the same file *, so we had
+ * better not free the interface data structures -
+ * instead we fail the ioctl. Even in this case, we
+ * shut down the interface if we are the owner of it.
+ * Actually, we should get rid of PPPIOCDETACH, userland
+ * (i.e. pppd) could achieve the same effect by closing
+ * this fd and reopening /dev/ppp.
+ */
+ err = -EINVAL;
+ if (pf->kind == INTERFACE) {
+ ppp = PF_TO_PPP(pf);
+ if (file == ppp->owner)
+ ppp_shutdown_interface(ppp);
+ }
+ if (atomic_read(&file->f_count) <= 2) {
+ ppp_release(inode, file);
+ err = 0;
+ } else
+ printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n",
+ atomic_read(&file->f_count));
+ return err;
+ }
+
+ if (pf->kind == CHANNEL) {
+ struct channel *pch = PF_TO_CHANNEL(pf);
+ struct ppp_channel *chan;
+
+ switch (cmd) {
+ case PPPIOCCONNECT:
+ if (get_user(unit, p))
+ break;
+ err = ppp_connect_channel(pch, unit);
+ break;
+
+ case PPPIOCDISCONN:
+ err = ppp_disconnect_channel(pch);
+ break;
+
+ default:
+ down_read(&pch->chan_sem);
+ chan = pch->chan;
+ err = -ENOTTY;
+ if (chan && chan->ops->ioctl)
+ err = chan->ops->ioctl(chan, cmd, arg);
+ up_read(&pch->chan_sem);
+ }
+ return err;
+ }
+
+ if (pf->kind != INTERFACE) {
+ /* can't happen */
+ printk(KERN_ERR "PPP: not interface or channel??\n");
+ return -EINVAL;
+ }
+
+ ppp = PF_TO_PPP(pf);
+ switch (cmd) {
+ case PPPIOCSMRU:
+ if (get_user(val, p))
+ break;
+ ppp->mru = val;
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ if (get_user(val, p))
+ break;
+ ppp_lock(ppp);
+ cflags = ppp->flags & ~val;
+ ppp->flags = val & SC_FLAG_BITS;
+ ppp_unlock(ppp);
+ if (cflags & SC_CCP_OPEN)
+ ppp_ccp_closed(ppp);
+ err = 0;
+ break;
+
+ case PPPIOCGFLAGS:
+ val = ppp->flags | ppp->xstate | ppp->rstate;
+ if (put_user(val, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSCOMPRESS:
+ err = ppp_set_compress(ppp, arg);
+ break;
+
+ case PPPIOCGUNIT:
+ if (put_user(ppp->file.index, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSDEBUG:
+ if (get_user(val, p))
+ break;
+ ppp->debug = val;
+ err = 0;
+ break;
+
+ case PPPIOCGDEBUG:
+ if (put_user(ppp->debug, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGIDLE:
+ idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+ idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
+ if (copy_to_user(argp, &idle, sizeof(idle)))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSMAXCID:
+ if (get_user(val, p))
+ break;
+ val2 = 15;
+ if ((val >> 16) != 0) {
+ val2 = val >> 16;
+ val &= 0xffff;
+ }
+ vj = slhc_init(val2+1, val+1);
+ if (vj == 0) {
+ printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+ err = -ENOMEM;
+ break;
+ }
+ ppp_lock(ppp);
+ if (ppp->vj != 0)
+ slhc_free(ppp->vj);
+ ppp->vj = vj;
+ ppp_unlock(ppp);
+ err = 0;
+ break;
+
+ case PPPIOCGNPMODE:
+ case PPPIOCSNPMODE:
+ if (copy_from_user(&npi, argp, sizeof(npi)))
+ break;
+ err = proto_to_npindex(npi.protocol);
+ if (err < 0)
+ break;
+ i = err;
+ if (cmd == PPPIOCGNPMODE) {
+ err = -EFAULT;
+ npi.mode = ppp->npmode[i];
+ if (copy_to_user(argp, &npi, sizeof(npi)))
+ break;
+ } else {
+ ppp->npmode[i] = npi.mode;
+ /* we may be able to transmit more packets now (??) */
+ netif_wake_queue(ppp->dev);
+ }
+ err = 0;
+ break;
+
+#ifdef CONFIG_PPP_FILTER
+ case PPPIOCSPASS:
+ {
+ struct sock_filter *code;
+ err = get_filter(argp, &code);
+ if (err >= 0) {
+ ppp_lock(ppp);
+ kfree(ppp->pass_filter);
+ ppp->pass_filter = code;
+ ppp->pass_len = err;
+ ppp_unlock(ppp);
+ err = 0;
+ }
+ break;
+ }
+ case PPPIOCSACTIVE:
+ {
+ struct sock_filter *code;
+ err = get_filter(argp, &code);
+ if (err >= 0) {
+ ppp_lock(ppp);
+ kfree(ppp->active_filter);
+ ppp->active_filter = code;
+ ppp->active_len = err;
+ ppp_unlock(ppp);
+ err = 0;
+ }
+ break;
+ }
+#endif /* CONFIG_PPP_FILTER */
+
+#ifdef CONFIG_PPP_MULTILINK
+ case PPPIOCSMRRU:
+ if (get_user(val, p))
+ break;
+ ppp_recv_lock(ppp);
+ ppp->mrru = val;
+ ppp_recv_unlock(ppp);
+ err = 0;
+ break;
+#endif /* CONFIG_PPP_MULTILINK */
+
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+
+static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int unit, err = -EFAULT;
+ struct ppp *ppp;
+ struct channel *chan;
+ int __user *p = (int __user *)arg;
+
+ switch (cmd) {
+ case PPPIOCNEWUNIT:
+ /* Create a new ppp unit */
+ if (get_user(unit, p))
+ break;
+ ppp = ppp_create_interface(unit, &err);
+ if (ppp == 0)
+ break;
+ file->private_data = &ppp->file;
+ ppp->owner = file;
+ err = -EFAULT;
+ if (put_user(ppp->file.index, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCATTACH:
+ /* Attach to an existing ppp unit */
+ if (get_user(unit, p))
+ break;
+ down(&all_ppp_sem);
+ err = -ENXIO;
+ ppp = ppp_find_unit(unit);
+ if (ppp != 0) {
+ atomic_inc(&ppp->file.refcnt);
+ file->private_data = &ppp->file;
+ err = 0;
+ }
+ up(&all_ppp_sem);
+ break;
+
+ case PPPIOCATTCHAN:
+ if (get_user(unit, p))
+ break;
+ spin_lock_bh(&all_channels_lock);
+ err = -ENXIO;
+ chan = ppp_find_channel(unit);
+ if (chan != 0) {
+ atomic_inc(&chan->file.refcnt);
+ file->private_data = &chan->file;
+ err = 0;
+ }
+ spin_unlock_bh(&all_channels_lock);
+ break;
+
+ default:
+ err = -ENOTTY;
+ }
+ return err;
+}
+
+static struct file_operations ppp_device_fops = {
+ .owner = THIS_MODULE,
+ .read = ppp_read,
+ .write = ppp_write,
+ .poll = ppp_poll,
+ .ioctl = ppp_ioctl,
+ .open = ppp_open,
+ .release = ppp_release
+};
+
+#define PPP_MAJOR 108
+
+/* Called at boot time if ppp is compiled into the kernel,
+ or at module load time (from init_module) if compiled as a module. */
+static int __init ppp_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+ err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
+ if (!err) {
+ ppp_class = class_simple_create(THIS_MODULE, "ppp");
+ if (IS_ERR(ppp_class)) {
+ err = PTR_ERR(ppp_class);
+ goto out_chrdev;
+ }
+ class_simple_device_add(ppp_class, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+ err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0),
+ S_IFCHR|S_IRUSR|S_IWUSR, "ppp");
+ if (err)
+ goto out_class;
+ }
+
+out:
+ if (err)
+ printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+ return err;
+
+out_class:
+ class_simple_device_remove(MKDEV(PPP_MAJOR,0));
+ class_simple_destroy(ppp_class);
+out_chrdev:
+ unregister_chrdev(PPP_MAJOR, "ppp");
+ goto out;
+}
+
+/*
+ * Network interface unit routines.
+ */
+static int
+ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ppp *ppp = (struct ppp *) dev->priv;
+ int npi, proto;
+ unsigned char *pp;
+
+ npi = ethertype_to_npindex(ntohs(skb->protocol));
+ if (npi < 0)
+ goto outf;
+
+ /* Drop, accept or reject the packet */
+ switch (ppp->npmode[npi]) {
+ case NPMODE_PASS:
+ break;
+ case NPMODE_QUEUE:
+ /* it would be nice to have a way to tell the network
+ system to queue this one up for later. */
+ goto outf;
+ case NPMODE_DROP:
+ case NPMODE_ERROR:
+ goto outf;
+ }
+
+ /* Put the 2-byte PPP protocol number on the front,
+ making sure there is room for the address and control fields. */
+ if (skb_headroom(skb) < PPP_HDRLEN) {
+ struct sk_buff *ns;
+
+ ns = alloc_skb(skb->len + dev->hard_header_len, GFP_ATOMIC);
+ if (ns == 0)
+ goto outf;
+ skb_reserve(ns, dev->hard_header_len);
+ skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
+ kfree_skb(skb);
+ skb = ns;
+ }
+ pp = skb_push(skb, 2);
+ proto = npindex_to_proto[npi];
+ pp[0] = proto >> 8;
+ pp[1] = proto;
+
+ netif_stop_queue(dev);
+ skb_queue_tail(&ppp->file.xq, skb);
+ ppp_xmit_process(ppp);
+ return 0;
+
+ outf:
+ kfree_skb(skb);
+ ++ppp->stats.tx_dropped;
+ return 0;
+}
+
+static struct net_device_stats *
+ppp_net_stats(struct net_device *dev)
+{
+ struct ppp *ppp = (struct ppp *) dev->priv;
+
+ return &ppp->stats;
+}
+
+static int
+ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ppp *ppp = dev->priv;
+ int err = -EFAULT;
+ void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+ struct ppp_stats stats;
+ struct ppp_comp_stats cstats;
+ char *vers;
+
+ switch (cmd) {
+ case SIOCGPPPSTATS:
+ ppp_get_stats(ppp, &stats);
+ if (copy_to_user(addr, &stats, sizeof(stats)))
+ break;
+ err = 0;
+ break;
+
+ case SIOCGPPPCSTATS:
+ memset(&cstats, 0, sizeof(cstats));
+ if (ppp->xc_state != 0)
+ ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
+ if (ppp->rc_state != 0)
+ ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
+ if (copy_to_user(addr, &cstats, sizeof(cstats)))
+ break;
+ err = 0;
+ break;
+
+ case SIOCGPPPVER:
+ vers = PPP_VERSION;
+ if (copy_to_user(addr, vers, strlen(vers) + 1))
+ break;
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static void ppp_setup(struct net_device *dev)
+{
+ dev->hard_header_len = PPP_HDRLEN;
+ dev->mtu = PPP_MTU;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 3;
+ dev->type = ARPHRD_PPP;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+}
+
+/*
+ * Transmit-side routines.
+ */
+
+/*
+ * Called to do any work queued up on the transmit side
+ * that can now be done.
+ */
+static void
+ppp_xmit_process(struct ppp *ppp)
+{
+ struct sk_buff *skb;
+
+ ppp_xmit_lock(ppp);
+ if (ppp->dev != 0) {
+ ppp_push(ppp);
+ while (ppp->xmit_pending == 0
+ && (skb = skb_dequeue(&ppp->file.xq)) != 0)
+ ppp_send_frame(ppp, skb);
+ /* If there's no work left to do, tell the core net
+ code that we can accept some more. */
+ if (ppp->xmit_pending == 0 && skb_peek(&ppp->file.xq) == 0)
+ netif_wake_queue(ppp->dev);
+ }
+ ppp_xmit_unlock(ppp);
+}
+
+/*
+ * Compress and send a frame.
+ * The caller should have locked the xmit path,
+ * and xmit_pending should be 0.
+ */
+static void
+ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+ int proto = PPP_PROTO(skb);
+ struct sk_buff *new_skb;
+ int len;
+ unsigned char *cp;
+
+ if (proto < 0x8000) {
+#ifdef CONFIG_PPP_FILTER
+ /* check if we should pass this packet */
+ /* the filter instructions are constructed assuming
+ a four-byte PPP header on each packet */
+ *skb_push(skb, 2) = 1;
+ if (ppp->pass_filter
+ && sk_run_filter(skb, ppp->pass_filter,
+ ppp->pass_len) == 0) {
+ if (ppp->debug & 1)
+ printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+ kfree_skb(skb);
+ return;
+ }
+ /* if this packet passes the active filter, record the time */
+ if (!(ppp->active_filter
+ && sk_run_filter(skb, ppp->active_filter,
+ ppp->active_len) == 0))
+ ppp->last_xmit = jiffies;
+ skb_pull(skb, 2);
+#else
+ /* for data packets, record the time */
+ ppp->last_xmit = jiffies;
+#endif /* CONFIG_PPP_FILTER */
+ }
+
+ ++ppp->stats.tx_packets;
+ ppp->stats.tx_bytes += skb->len - 2;
+
+ switch (proto) {
+ case PPP_IP:
+ if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0)
+ break;
+ /* try to do VJ TCP header compression */
+ new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
+ GFP_ATOMIC);
+ if (new_skb == 0) {
+ printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+ goto drop;
+ }
+ skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
+ cp = skb->data + 2;
+ len = slhc_compress(ppp->vj, cp, skb->len - 2,
+ new_skb->data + 2, &cp,
+ !(ppp->flags & SC_NO_TCP_CCID));
+ if (cp == skb->data + 2) {
+ /* didn't compress */
+ kfree_skb(new_skb);
+ } else {
+ if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
+ proto = PPP_VJC_COMP;
+ cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
+ } else {
+ proto = PPP_VJC_UNCOMP;
+ cp[0] = skb->data[2];
+ }
+ kfree_skb(skb);
+ skb = new_skb;
+ cp = skb_put(skb, len + 2);
+ cp[0] = 0;
+ cp[1] = proto;
+ }
+ break;
+
+ case PPP_CCP:
+ /* peek at outbound CCP frames */
+ ppp_ccp_peek(ppp, skb, 0);
+ break;
+ }
+
+ /* try to do packet compression */
+ if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0
+ && proto != PPP_LCP && proto != PPP_CCP) {
+ new_skb = alloc_skb(ppp->dev->mtu + ppp->dev->hard_header_len,
+ GFP_ATOMIC);
+ if (new_skb == 0) {
+ printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+ goto drop;
+ }
+ if (ppp->dev->hard_header_len > PPP_HDRLEN)
+ skb_reserve(new_skb,
+ ppp->dev->hard_header_len - PPP_HDRLEN);
+
+ /* compressor still expects A/C bytes in hdr */
+ len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
+ new_skb->data, skb->len + 2,
+ ppp->dev->mtu + PPP_HDRLEN);
+ if (len > 0 && (ppp->flags & SC_CCP_UP)) {
+ kfree_skb(skb);
+ skb = new_skb;
+ skb_put(skb, len);
+ skb_pull(skb, 2); /* pull off A/C bytes */
+ } else {
+ /* didn't compress, or CCP not up yet */
+ kfree_skb(new_skb);
+ }
+ }
+
+ /*
+ * If we are waiting for traffic (demand dialling),
+ * queue it up for pppd to receive.
+ */
+ if (ppp->flags & SC_LOOP_TRAFFIC) {
+ if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
+ goto drop;
+ skb_queue_tail(&ppp->file.rq, skb);
+ wake_up_interruptible(&ppp->file.rwait);
+ return;
+ }
+
+ ppp->xmit_pending = skb;
+ ppp_push(ppp);
+ return;
+
+ drop:
+ kfree_skb(skb);
+ ++ppp->stats.tx_errors;
+}
+
+/*
+ * Try to send the frame in xmit_pending.
+ * The caller should have the xmit path locked.
+ */
+static void
+ppp_push(struct ppp *ppp)
+{
+ struct list_head *list;
+ struct channel *pch;
+ struct sk_buff *skb = ppp->xmit_pending;
+
+ if (skb == 0)
+ return;
+
+ list = &ppp->channels;
+ if (list_empty(list)) {
+ /* nowhere to send the packet, just drop it */
+ ppp->xmit_pending = NULL;
+ kfree_skb(skb);
+ return;
+ }
+
+ if ((ppp->flags & SC_MULTILINK) == 0) {
+ /* not doing multilink: send it down the first channel */
+ list = list->next;
+ pch = list_entry(list, struct channel, clist);
+
+ spin_lock_bh(&pch->downl);
+ if (pch->chan) {
+ if (pch->chan->ops->start_xmit(pch->chan, skb))
+ ppp->xmit_pending = NULL;
+ } else {
+ /* channel got unregistered */
+ kfree_skb(skb);
+ ppp->xmit_pending = NULL;
+ }
+ spin_unlock_bh(&pch->downl);
+ return;
+ }
+
+#ifdef CONFIG_PPP_MULTILINK
+ /* Multilink: fragment the packet over as many links
+ as can take the packet at the moment. */
+ if (!ppp_mp_explode(ppp, skb))
+ return;
+#endif /* CONFIG_PPP_MULTILINK */
+
+ ppp->xmit_pending = NULL;
+ kfree_skb(skb);
+}
+
+#ifdef CONFIG_PPP_MULTILINK
+/*
+ * Divide a packet to be transmitted into fragments and
+ * send them out the individual links.
+ */
+static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
+{
+ int nch, len, fragsize;
+ int i, bits, hdrlen, mtu;
+ int flen, fnb;
+ unsigned char *p, *q;
+ struct list_head *list;
+ struct channel *pch;
+ struct sk_buff *frag;
+ struct ppp_channel *chan;
+
+ nch = 0;
+ hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
+ list = &ppp->channels;
+ while ((list = list->next) != &ppp->channels) {
+ pch = list_entry(list, struct channel, clist);
+ nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0);
+ /*
+ * If a channel hasn't had a fragment yet, it has to get
+ * one before we send any fragments on later channels.
+ * If it can't take a fragment now, don't give any
+ * to subsequent channels.
+ */
+ if (!pch->had_frag && !pch->avail) {
+ while ((list = list->next) != &ppp->channels) {
+ pch = list_entry(list, struct channel, clist);
+ pch->avail = 0;
+ }
+ break;
+ }
+ }
+ if (nch == 0)
+ return 0; /* can't take now, leave it in xmit_pending */
+
+ /* Do protocol field compression (XXX this should be optional) */
+ p = skb->data;
+ len = skb->len;
+ if (*p == 0) {
+ ++p;
+ --len;
+ }
+
+ /* decide on fragment size */
+ fragsize = len;
+ if (nch > 1) {
+ int maxch = ROUNDUP(len, MIN_FRAG_SIZE);
+ if (nch > maxch)
+ nch = maxch;
+ fragsize = ROUNDUP(fragsize, nch);
+ }
+
+ /* skip to the channel after the one we last used
+ and start at that one */
+ for (i = 0; i < ppp->nxchan; ++i) {
+ list = list->next;
+ if (list == &ppp->channels) {
+ i = 0;
+ break;
+ }
+ }
+
+ /* create a fragment for each channel */
+ bits = B;
+ do {
+ list = list->next;
+ if (list == &ppp->channels) {
+ i = 0;
+ continue;
+ }
+ pch = list_entry(list, struct channel, clist);
+ ++i;
+ if (!pch->avail)
+ continue;
+
+ /* check the channel's mtu and whether it is still attached. */
+ spin_lock_bh(&pch->downl);
+ if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) {
+ /* can't use this channel */
+ spin_unlock_bh(&pch->downl);
+ pch->avail = 0;
+ if (--nch == 0)
+ break;
+ continue;
+ }
+
+ /*
+ * We have to create multiple fragments for this channel
+ * if fragsize is greater than the channel's mtu.
+ */
+ if (fragsize > len)
+ fragsize = len;
+ for (flen = fragsize; flen > 0; flen -= fnb) {
+ fnb = flen;
+ if (fnb > mtu + 2 - hdrlen)
+ fnb = mtu + 2 - hdrlen;
+ if (fnb >= len)
+ bits |= E;
+ frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC);
+ if (frag == 0)
+ goto noskb;
+ q = skb_put(frag, fnb + hdrlen);
+ /* make the MP header */
+ q[0] = PPP_MP >> 8;
+ q[1] = PPP_MP;
+ if (ppp->flags & SC_MP_XSHORTSEQ) {
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+ q[3] = ppp->nxseq;
+ } else {
+ q[2] = bits;
+ q[3] = ppp->nxseq >> 16;
+ q[4] = ppp->nxseq >> 8;
+ q[5] = ppp->nxseq;
+ }
+
+ /* copy the data in */
+ memcpy(q + hdrlen, p, fnb);
+
+ /* try to send it down the channel */
+ chan = pch->chan;
+ if (!chan->ops->start_xmit(chan, frag))
+ skb_queue_tail(&pch->file.xq, frag);
+ pch->had_frag = 1;
+ p += fnb;
+ len -= fnb;
+ ++ppp->nxseq;
+ bits = 0;
+ }
+ spin_unlock_bh(&pch->downl);
+ } while (len > 0);
+ ppp->nxchan = i;
+
+ return 1;
+
+ noskb:
+ spin_unlock_bh(&pch->downl);
+ if (ppp->debug & 1)
+ printk(KERN_ERR "PPP: no memory (fragment)\n");
+ ++ppp->stats.tx_errors;
+ ++ppp->nxseq;
+ return 1; /* abandon the frame */
+}
+#endif /* CONFIG_PPP_MULTILINK */
+
+/*
+ * Try to send data out on a channel.
+ */
+static void
+ppp_channel_push(struct channel *pch)
+{
+ struct sk_buff *skb;
+ struct ppp *ppp;
+
+ spin_lock_bh(&pch->downl);
+ if (pch->chan != 0) {
+ while (skb_queue_len(&pch->file.xq) > 0) {
+ skb = skb_dequeue(&pch->file.xq);
+ if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
+ /* put the packet back and try again later */
+ skb_queue_head(&pch->file.xq, skb);
+ break;
+ }
+ }
+ } else {
+ /* channel got deregistered */
+ skb_queue_purge(&pch->file.xq);
+ }
+ spin_unlock_bh(&pch->downl);
+ /* see if there is anything from the attached unit to be sent */
+ if (skb_queue_len(&pch->file.xq) == 0) {
+ read_lock_bh(&pch->upl);
+ ppp = pch->ppp;
+ if (ppp != 0)
+ ppp_xmit_process(ppp);
+ read_unlock_bh(&pch->upl);
+ }
+}
+
+/*
+ * Receive-side routines.
+ */
+
+/* misuse a few fields of the skb for MP reconstruction */
+#define sequence priority
+#define BEbits cb[0]
+
+static inline void
+ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+ ppp_recv_lock(ppp);
+ /* ppp->dev == 0 means interface is closing down */
+ if (ppp->dev != 0)
+ ppp_receive_frame(ppp, skb, pch);
+ else
+ kfree_skb(skb);
+ ppp_recv_unlock(ppp);
+}
+
+void
+ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct channel *pch = chan->ppp;
+ int proto;
+
+ if (pch == 0 || skb->len == 0) {
+ kfree_skb(skb);
+ return;
+ }
+
+ proto = PPP_PROTO(skb);
+ read_lock_bh(&pch->upl);
+ if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) {
+ /* put it on the channel queue */
+ skb_queue_tail(&pch->file.rq, skb);
+ /* drop old frames if queue too long */
+ while (pch->file.rq.qlen > PPP_MAX_RQLEN
+ && (skb = skb_dequeue(&pch->file.rq)) != 0)
+ kfree_skb(skb);
+ wake_up_interruptible(&pch->file.rwait);
+ } else {
+ ppp_do_recv(pch->ppp, skb, pch);
+ }
+ read_unlock_bh(&pch->upl);
+}
+
+/* Put a 0-length skb in the receive queue as an error indication */
+void
+ppp_input_error(struct ppp_channel *chan, int code)
+{
+ struct channel *pch = chan->ppp;
+ struct sk_buff *skb;
+
+ if (pch == 0)
+ return;
+
+ read_lock_bh(&pch->upl);
+ if (pch->ppp != 0) {
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (skb != 0) {
+ skb->len = 0; /* probably unnecessary */
+ skb->cb[0] = code;
+ ppp_do_recv(pch->ppp, skb, pch);
+ }
+ }
+ read_unlock_bh(&pch->upl);
+}
+
+/*
+ * We come in here to process a received frame.
+ * The receive side of the ppp unit is locked.
+ */
+static void
+ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+ if (skb->len >= 2) {
+#ifdef CONFIG_PPP_MULTILINK
+ /* XXX do channel-level decompression here */
+ if (PPP_PROTO(skb) == PPP_MP)
+ ppp_receive_mp_frame(ppp, skb, pch);
+ else
+#endif /* CONFIG_PPP_MULTILINK */
+ ppp_receive_nonmp_frame(ppp, skb);
+ return;
+ }
+
+ if (skb->len > 0)
+ /* note: a 0-length skb is used as an error indication */
+ ++ppp->stats.rx_length_errors;
+
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
+}
+
+static void
+ppp_receive_error(struct ppp *ppp)
+{
+ ++ppp->stats.rx_errors;
+ if (ppp->vj != 0)
+ slhc_toss(ppp->vj);
+}
+
+static void
+ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+ struct sk_buff *ns;
+ int proto, len, npi;
+
+ /*
+ * Decompress the frame, if compressed.
+ * Note that some decompressors need to see uncompressed frames
+ * that come in as well as compressed frames.
+ */
+ if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN)
+ && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
+ skb = ppp_decompress_frame(ppp, skb);
+
+ proto = PPP_PROTO(skb);
+ switch (proto) {
+ case PPP_VJC_COMP:
+ /* decompress VJ compressed packets */
+ if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP))
+ goto err;
+
+ if (skb_tailroom(skb) < 124) {
+ /* copy to a new sk_buff with more tailroom */
+ ns = dev_alloc_skb(skb->len + 128);
+ if (ns == 0) {
+ printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+ goto err;
+ }
+ skb_reserve(ns, 2);
+ skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
+ kfree_skb(skb);
+ skb = ns;
+ }
+ else if (!pskb_may_pull(skb, skb->len))
+ goto err;
+
+ len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
+ if (len <= 0) {
+ printk(KERN_DEBUG "PPP: VJ decompression error\n");
+ goto err;
+ }
+ len += 2;
+ if (len > skb->len)
+ skb_put(skb, len - skb->len);
+ else if (len < skb->len)
+ skb_trim(skb, len);
+ proto = PPP_IP;
+ break;
+
+ case PPP_VJC_UNCOMP:
+ if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP))
+ goto err;
+
+ /* Until we fix the decompressor need to make sure
+ * data portion is linear.
+ */
+ if (!pskb_may_pull(skb, skb->len))
+ goto err;
+
+ if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
+ printk(KERN_ERR "PPP: VJ uncompressed error\n");
+ goto err;
+ }
+ proto = PPP_IP;
+ break;
+
+ case PPP_CCP:
+ ppp_ccp_peek(ppp, skb, 1);
+ break;
+ }
+
+ ++ppp->stats.rx_packets;
+ ppp->stats.rx_bytes += skb->len - 2;
+
+ npi = proto_to_npindex(proto);
+ if (npi < 0) {
+ /* control or unknown frame - pass it to pppd */
+ skb_queue_tail(&ppp->file.rq, skb);
+ /* limit queue length by dropping old frames */
+ while (ppp->file.rq.qlen > PPP_MAX_RQLEN
+ && (skb = skb_dequeue(&ppp->file.rq)) != 0)
+ kfree_skb(skb);
+ /* wake up any process polling or blocking on read */
+ wake_up_interruptible(&ppp->file.rwait);
+
+ } else {
+ /* network protocol frame - give it to the kernel */
+
+#ifdef CONFIG_PPP_FILTER
+ /* check if the packet passes the pass and active filters */
+ /* the filter instructions are constructed assuming
+ a four-byte PPP header on each packet */
+ *skb_push(skb, 2) = 0;
+ if (ppp->pass_filter
+ && sk_run_filter(skb, ppp->pass_filter,
+ ppp->pass_len) == 0) {
+ if (ppp->debug & 1)
+ printk(KERN_DEBUG "PPP: inbound frame not passed\n");
+ kfree_skb(skb);
+ return;
+ }
+ if (!(ppp->active_filter
+ && sk_run_filter(skb, ppp->active_filter,
+ ppp->active_len) == 0))
+ ppp->last_recv = jiffies;
+ skb_pull(skb, 2);
+#else
+ ppp->last_recv = jiffies;
+#endif /* CONFIG_PPP_FILTER */
+
+ if ((ppp->dev->flags & IFF_UP) == 0
+ || ppp->npmode[npi] != NPMODE_PASS) {
+ kfree_skb(skb);
+ } else {
+ skb_pull(skb, 2); /* chop off protocol */
+ skb->dev = ppp->dev;
+ skb->protocol = htons(npindex_to_ethertype[npi]);
+ skb->mac.raw = skb->data;
+ skb->input_dev = ppp->dev;
+ netif_rx(skb);
+ ppp->dev->last_rx = jiffies;
+ }
+ }
+ return;
+
+ err:
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
+}
+
+static struct sk_buff *
+ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
+{
+ int proto = PPP_PROTO(skb);
+ struct sk_buff *ns;
+ int len;
+
+ /* Until we fix all the decompressor's need to make sure
+ * data portion is linear.
+ */
+ if (!pskb_may_pull(skb, skb->len))
+ goto err;
+
+ if (proto == PPP_COMP) {
+ ns = dev_alloc_skb(ppp->mru + PPP_HDRLEN);
+ if (ns == 0) {
+ printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+ goto err;
+ }
+ /* the decompressor still expects the A/C bytes in the hdr */
+ len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
+ skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN);
+ if (len < 0) {
+ /* Pass the compressed frame to pppd as an
+ error indication. */
+ if (len == DECOMP_FATALERROR)
+ ppp->rstate |= SC_DC_FERROR;
+ kfree_skb(ns);
+ goto err;
+ }
+
+ kfree_skb(skb);
+ skb = ns;
+ skb_put(skb, len);
+ skb_pull(skb, 2); /* pull off the A/C bytes */
+
+ } else {
+ /* Uncompressed frame - pass to decompressor so it
+ can update its dictionary if necessary. */
+ if (ppp->rcomp->incomp)
+ ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
+ skb->len + 2);
+ }
+
+ return skb;
+
+ err:
+ ppp->rstate |= SC_DC_ERROR;
+ ppp_receive_error(ppp);
+ return skb;
+}
+
+#ifdef CONFIG_PPP_MULTILINK
+/*
+ * Receive a multilink frame.
+ * We put it on the reconstruction queue and then pull off
+ * as many completed frames as we can.
+ */
+static void
+ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+{
+ u32 mask, seq;
+ struct list_head *l;
+ int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
+
+ if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
+ goto err; /* no good, throw it away */
+
+ /* Decode sequence number and begin/end bits */
+ if (ppp->flags & SC_MP_SHORTSEQ) {
+ seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
+ mask = 0xfff;
+ } else {
+ seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
+ mask = 0xffffff;
+ }
+ skb->BEbits = skb->data[2];
+ skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
+
+ /*
+ * Do protocol ID decompression on the first fragment of each packet.
+ */
+ if ((skb->BEbits & B) && (skb->data[0] & 1))
+ *skb_push(skb, 1) = 0;
+
+ /*
+ * Expand sequence number to 32 bits, making it as close
+ * as possible to ppp->minseq.
+ */
+ seq |= ppp->minseq & ~mask;
+ if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
+ seq += mask + 1;
+ else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
+ seq -= mask + 1; /* should never happen */
+ skb->sequence = seq;
+ pch->lastseq = seq;
+
+ /*
+ * If this packet comes before the next one we were expecting,
+ * drop it.
+ */
+ if (seq_before(seq, ppp->nextseq)) {
+ kfree_skb(skb);
+ ++ppp->stats.rx_dropped;
+ ppp_receive_error(ppp);
+ return;
+ }
+
+ /*
+ * Reevaluate minseq, the minimum over all channels of the
+ * last sequence number received on each channel. Because of
+ * the increasing sequence number rule, we know that any fragment
+ * before `minseq' which hasn't arrived is never going to arrive.
+ * The list of channels can't change because we have the receive
+ * side of the ppp unit locked.
+ */
+ for (l = ppp->channels.next; l != &ppp->channels; l = l->next) {
+ struct channel *ch = list_entry(l, struct channel, clist);
+ if (seq_before(ch->lastseq, seq))
+ seq = ch->lastseq;
+ }
+ if (seq_before(ppp->minseq, seq))
+ ppp->minseq = seq;
+
+ /* Put the fragment on the reconstruction queue */
+ ppp_mp_insert(ppp, skb);
+
+ /* If the queue is getting long, don't wait any longer for packets
+ before the start of the queue. */
+ if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN
+ && seq_before(ppp->minseq, ppp->mrq.next->sequence))
+ ppp->minseq = ppp->mrq.next->sequence;
+
+ /* Pull completed packets off the queue and receive them. */
+ while ((skb = ppp_mp_reconstruct(ppp)) != 0)
+ ppp_receive_nonmp_frame(ppp, skb);
+
+ return;
+
+ err:
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
+}
+
+/*
+ * Insert a fragment on the MP reconstruction queue.
+ * The queue is ordered by increasing sequence number.
+ */
+static void
+ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+ struct sk_buff_head *list = &ppp->mrq;
+ u32 seq = skb->sequence;
+
+ /* N.B. we don't need to lock the list lock because we have the
+ ppp unit receive-side lock. */
+ for (p = list->next; p != (struct sk_buff *)list; p = p->next)
+ if (seq_before(seq, p->sequence))
+ break;
+ __skb_insert(skb, p->prev, p, list);
+}
+
+/*
+ * Reconstruct a packet from the MP fragment queue.
+ * We go through increasing sequence numbers until we find a
+ * complete packet, or we get to the sequence number for a fragment
+ * which hasn't arrived but might still do so.
+ */
+struct sk_buff *
+ppp_mp_reconstruct(struct ppp *ppp)
+{
+ u32 seq = ppp->nextseq;
+ u32 minseq = ppp->minseq;
+ struct sk_buff_head *list = &ppp->mrq;
+ struct sk_buff *p, *next;
+ struct sk_buff *head, *tail;
+ struct sk_buff *skb = NULL;
+ int lost = 0, len = 0;
+
+ if (ppp->mrru == 0) /* do nothing until mrru is set */
+ return NULL;
+ head = list->next;
+ tail = NULL;
+ for (p = head; p != (struct sk_buff *) list; p = next) {
+ next = p->next;
+ if (seq_before(p->sequence, seq)) {
+ /* this can't happen, anyway ignore the skb */
+ printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
+ p->sequence, seq);
+ head = next;
+ continue;
+ }
+ if (p->sequence != seq) {
+ /* Fragment `seq' is missing. If it is after
+ minseq, it might arrive later, so stop here. */
+ if (seq_after(seq, minseq))
+ break;
+ /* Fragment `seq' is lost, keep going. */
+ lost = 1;
+ seq = seq_before(minseq, p->sequence)?
+ minseq + 1: p->sequence;
+ next = p;
+ continue;
+ }
+
+ /*
+ * At this point we know that all the fragments from
+ * ppp->nextseq to seq are either present or lost.
+ * Also, there are no complete packets in the queue
+ * that have no missing fragments and end before this
+ * fragment.
+ */
+
+ /* B bit set indicates this fragment starts a packet */
+ if (p->BEbits & B) {
+ head = p;
+ lost = 0;
+ len = 0;
+ }
+
+ len += p->len;
+
+ /* Got a complete packet yet? */
+ if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) {
+ if (len > ppp->mrru + 2) {
+ ++ppp->stats.rx_length_errors;
+ printk(KERN_DEBUG "PPP: reconstructed packet"
+ " is too long (%d)\n", len);
+ } else if (p == head) {
+ /* fragment is complete packet - reuse skb */
+ tail = p;
+ skb = skb_get(p);
+ break;
+ } else if ((skb = dev_alloc_skb(len)) == NULL) {
+ ++ppp->stats.rx_missed_errors;
+ printk(KERN_DEBUG "PPP: no memory for "
+ "reconstructed packet");
+ } else {
+ tail = p;
+ break;
+ }
+ ppp->nextseq = seq + 1;
+ }
+
+ /*
+ * If this is the ending fragment of a packet,
+ * and we haven't found a complete valid packet yet,
+ * we can discard up to and including this fragment.
+ */
+ if (p->BEbits & E)
+ head = next;
+
+ ++seq;
+ }
+
+ /* If we have a complete packet, copy it all into one skb. */
+ if (tail != NULL) {
+ /* If we have discarded any fragments,
+ signal a receive error. */
+ if (head->sequence != ppp->nextseq) {
+ if (ppp->debug & 1)
+ printk(KERN_DEBUG " missed pkts %u..%u\n",
+ ppp->nextseq, head->sequence-1);
+ ++ppp->stats.rx_dropped;
+ ppp_receive_error(ppp);
+ }
+
+ if (head != tail)
+ /* copy to a single skb */
+ for (p = head; p != tail->next; p = p->next)
+ skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
+ ppp->nextseq = tail->sequence + 1;
+ head = tail->next;
+ }
+
+ /* Discard all the skbuffs that we have copied the data out of
+ or that we can't use. */
+ while ((p = list->next) != head) {
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+
+ return skb;
+}
+#endif /* CONFIG_PPP_MULTILINK */
+
+/*
+ * Channel interface.
+ */
+
+/*
+ * Create a new, unattached ppp channel.
+ */
+int
+ppp_register_channel(struct ppp_channel *chan)
+{
+ struct channel *pch;
+
+ pch = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ if (pch == 0)
+ return -ENOMEM;
+ memset(pch, 0, sizeof(struct channel));
+ pch->ppp = NULL;
+ pch->chan = chan;
+ chan->ppp = pch;
+ init_ppp_file(&pch->file, CHANNEL);
+ pch->file.hdrlen = chan->hdrlen;
+#ifdef CONFIG_PPP_MULTILINK
+ pch->lastseq = -1;
+#endif /* CONFIG_PPP_MULTILINK */
+ init_rwsem(&pch->chan_sem);
+ spin_lock_init(&pch->downl);
+ rwlock_init(&pch->upl);
+ spin_lock_bh(&all_channels_lock);
+ pch->file.index = ++last_channel_index;
+ list_add(&pch->list, &new_channels);
+ atomic_inc(&channel_count);
+ spin_unlock_bh(&all_channels_lock);
+ return 0;
+}
+
+/*
+ * Return the index of a channel.
+ */
+int ppp_channel_index(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+
+ if (pch != 0)
+ return pch->file.index;
+ return -1;
+}
+
+/*
+ * Return the PPP unit number to which a channel is connected.
+ */
+int ppp_unit_number(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ int unit = -1;
+
+ if (pch != 0) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp != 0)
+ unit = pch->ppp->file.index;
+ read_unlock_bh(&pch->upl);
+ }
+ return unit;
+}
+
+/*
+ * Disconnect a channel from the generic layer.
+ * This must be called in process context.
+ */
+void
+ppp_unregister_channel(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+
+ if (pch == 0)
+ return; /* should never happen */
+ chan->ppp = NULL;
+
+ /*
+ * This ensures that we have returned from any calls into the
+ * the channel's start_xmit or ioctl routine before we proceed.
+ */
+ down_write(&pch->chan_sem);
+ spin_lock_bh(&pch->downl);
+ pch->chan = NULL;
+ spin_unlock_bh(&pch->downl);
+ up_write(&pch->chan_sem);
+ ppp_disconnect_channel(pch);
+ spin_lock_bh(&all_channels_lock);
+ list_del(&pch->list);
+ spin_unlock_bh(&all_channels_lock);
+ pch->file.dead = 1;
+ wake_up_interruptible(&pch->file.rwait);
+ if (atomic_dec_and_test(&pch->file.refcnt))
+ ppp_destroy_channel(pch);
+}
+
+/*
+ * Callback from a channel when it can accept more to transmit.
+ * This should be called at BH/softirq level, not interrupt level.
+ */
+void
+ppp_output_wakeup(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+
+ if (pch == 0)
+ return;
+ ppp_channel_push(pch);
+}
+
+/*
+ * Compression control.
+ */
+
+/* Process the PPPIOCSCOMPRESS ioctl. */
+static int
+ppp_set_compress(struct ppp *ppp, unsigned long arg)
+{
+ int err;
+ struct compressor *cp, *ocomp;
+ struct ppp_option_data data;
+ void *state, *ostate;
+ unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
+
+ err = -EFAULT;
+ if (copy_from_user(&data, (void __user *) arg, sizeof(data))
+ || (data.length <= CCP_MAX_OPTION_LENGTH
+ && copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
+ goto out;
+ err = -EINVAL;
+ if (data.length > CCP_MAX_OPTION_LENGTH
+ || ccp_option[1] < 2 || ccp_option[1] > data.length)
+ goto out;
+
+ cp = find_compressor(ccp_option[0]);
+#ifdef CONFIG_KMOD
+ if (cp == 0) {
+ request_module("ppp-compress-%d", ccp_option[0]);
+ cp = find_compressor(ccp_option[0]);
+ }
+#endif /* CONFIG_KMOD */
+ if (cp == 0)
+ goto out;
+
+ err = -ENOBUFS;
+ if (data.transmit) {
+ state = cp->comp_alloc(ccp_option, data.length);
+ if (state != 0) {
+ ppp_xmit_lock(ppp);
+ ppp->xstate &= ~SC_COMP_RUN;
+ ocomp = ppp->xcomp;
+ ostate = ppp->xc_state;
+ ppp->xcomp = cp;
+ ppp->xc_state = state;
+ ppp_xmit_unlock(ppp);
+ if (ostate != 0) {
+ ocomp->comp_free(ostate);
+ module_put(ocomp->owner);
+ }
+ err = 0;
+ } else
+ module_put(cp->owner);
+
+ } else {
+ state = cp->decomp_alloc(ccp_option, data.length);
+ if (state != 0) {
+ ppp_recv_lock(ppp);
+ ppp->rstate &= ~SC_DECOMP_RUN;
+ ocomp = ppp->rcomp;
+ ostate = ppp->rc_state;
+ ppp->rcomp = cp;
+ ppp->rc_state = state;
+ ppp_recv_unlock(ppp);
+ if (ostate != 0) {
+ ocomp->decomp_free(ostate);
+ module_put(ocomp->owner);
+ }
+ err = 0;
+ } else
+ module_put(cp->owner);
+ }
+
+ out:
+ return err;
+}
+
+/*
+ * Look at a CCP packet and update our state accordingly.
+ * We assume the caller has the xmit or recv path locked.
+ */
+static void
+ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
+{
+ unsigned char *dp;
+ int len;
+
+ if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
+ return; /* no header */
+ dp = skb->data + 2;
+
+ switch (CCP_CODE(dp)) {
+ case CCP_CONFREQ:
+
+ /* A ConfReq starts negotiation of compression
+ * in one direction of transmission,
+ * and hence brings it down...but which way?
+ *
+ * Remember:
+ * A ConfReq indicates what the sender would like to receive
+ */
+ if(inbound)
+ /* He is proposing what I should send */
+ ppp->xstate &= ~SC_COMP_RUN;
+ else
+ /* I am proposing to what he should send */
+ ppp->rstate &= ~SC_DECOMP_RUN;
+
+ break;
+
+ case CCP_TERMREQ:
+ case CCP_TERMACK:
+ /*
+ * CCP is going down, both directions of transmission
+ */
+ ppp->rstate &= ~SC_DECOMP_RUN;
+ ppp->xstate &= ~SC_COMP_RUN;
+ break;
+
+ case CCP_CONFACK:
+ if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
+ break;
+ len = CCP_LENGTH(dp);
+ if (!pskb_may_pull(skb, len + 2))
+ return; /* too short */
+ dp += CCP_HDRLEN;
+ len -= CCP_HDRLEN;
+ if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
+ break;
+ if (inbound) {
+ /* we will start receiving compressed packets */
+ if (ppp->rc_state == 0)
+ break;
+ if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
+ ppp->file.index, 0, ppp->mru, ppp->debug)) {
+ ppp->rstate |= SC_DECOMP_RUN;
+ ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
+ }
+ } else {
+ /* we will soon start sending compressed packets */
+ if (ppp->xc_state == 0)
+ break;
+ if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
+ ppp->file.index, 0, ppp->debug))
+ ppp->xstate |= SC_COMP_RUN;
+ }
+ break;
+
+ case CCP_RESETACK:
+ /* reset the [de]compressor */
+ if ((ppp->flags & SC_CCP_UP) == 0)
+ break;
+ if (inbound) {
+ if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
+ ppp->rcomp->decomp_reset(ppp->rc_state);
+ ppp->rstate &= ~SC_DC_ERROR;
+ }
+ } else {
+ if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
+ ppp->xcomp->comp_reset(ppp->xc_state);
+ }
+ break;
+ }
+}
+
+/* Free up compression resources. */
+static void
+ppp_ccp_closed(struct ppp *ppp)
+{
+ void *xstate, *rstate;
+ struct compressor *xcomp, *rcomp;
+
+ ppp_lock(ppp);
+ ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
+ ppp->xstate = 0;
+ xcomp = ppp->xcomp;
+ xstate = ppp->xc_state;
+ ppp->xc_state = NULL;
+ ppp->rstate = 0;
+ rcomp = ppp->rcomp;
+ rstate = ppp->rc_state;
+ ppp->rc_state = NULL;
+ ppp_unlock(ppp);
+
+ if (xstate) {
+ xcomp->comp_free(xstate);
+ module_put(xcomp->owner);
+ }
+ if (rstate) {
+ rcomp->decomp_free(rstate);
+ module_put(rcomp->owner);
+ }
+}
+
+/* List of compressors. */
+static LIST_HEAD(compressor_list);
+static DEFINE_SPINLOCK(compressor_list_lock);
+
+struct compressor_entry {
+ struct list_head list;
+ struct compressor *comp;
+};
+
+static struct compressor_entry *
+find_comp_entry(int proto)
+{
+ struct compressor_entry *ce;
+ struct list_head *list = &compressor_list;
+
+ while ((list = list->next) != &compressor_list) {
+ ce = list_entry(list, struct compressor_entry, list);
+ if (ce->comp->compress_proto == proto)
+ return ce;
+ }
+ return NULL;
+}
+
+/* Register a compressor */
+int
+ppp_register_compressor(struct compressor *cp)
+{
+ struct compressor_entry *ce;
+ int ret;
+ spin_lock(&compressor_list_lock);
+ ret = -EEXIST;
+ if (find_comp_entry(cp->compress_proto) != 0)
+ goto out;
+ ret = -ENOMEM;
+ ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
+ if (ce == 0)
+ goto out;
+ ret = 0;
+ ce->comp = cp;
+ list_add(&ce->list, &compressor_list);
+ out:
+ spin_unlock(&compressor_list_lock);
+ return ret;
+}
+
+/* Unregister a compressor */
+void
+ppp_unregister_compressor(struct compressor *cp)
+{
+ struct compressor_entry *ce;
+
+ spin_lock(&compressor_list_lock);
+ ce = find_comp_entry(cp->compress_proto);
+ if (ce != 0 && ce->comp == cp) {
+ list_del(&ce->list);
+ kfree(ce);
+ }
+ spin_unlock(&compressor_list_lock);
+}
+
+/* Find a compressor. */
+static struct compressor *
+find_compressor(int type)
+{
+ struct compressor_entry *ce;
+ struct compressor *cp = NULL;
+
+ spin_lock(&compressor_list_lock);
+ ce = find_comp_entry(type);
+ if (ce != 0) {
+ cp = ce->comp;
+ if (!try_module_get(cp->owner))
+ cp = NULL;
+ }
+ spin_unlock(&compressor_list_lock);
+ return cp;
+}
+
+/*
+ * Miscelleneous stuff.
+ */
+
+static void
+ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
+{
+ struct slcompress *vj = ppp->vj;
+
+ memset(st, 0, sizeof(*st));
+ st->p.ppp_ipackets = ppp->stats.rx_packets;
+ st->p.ppp_ierrors = ppp->stats.rx_errors;
+ st->p.ppp_ibytes = ppp->stats.rx_bytes;
+ st->p.ppp_opackets = ppp->stats.tx_packets;
+ st->p.ppp_oerrors = ppp->stats.tx_errors;
+ st->p.ppp_obytes = ppp->stats.tx_bytes;
+ if (vj == 0)
+ return;
+ st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
+ st->vj.vjs_compressed = vj->sls_o_compressed;
+ st->vj.vjs_searches = vj->sls_o_searches;
+ st->vj.vjs_misses = vj->sls_o_misses;
+ st->vj.vjs_errorin = vj->sls_i_error;
+ st->vj.vjs_tossed = vj->sls_i_tossed;
+ st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
+ st->vj.vjs_compressedin = vj->sls_i_compressed;
+}
+
+/*
+ * Stuff for handling the lists of ppp units and channels
+ * and for initialization.
+ */
+
+/*
+ * Create a new ppp interface unit. Fails if it can't allocate memory
+ * or if there is already a unit with the requested number.
+ * unit == -1 means allocate a new number.
+ */
+static struct ppp *
+ppp_create_interface(int unit, int *retp)
+{
+ struct ppp *ppp;
+ struct net_device *dev = NULL;
+ int ret = -ENOMEM;
+ int i;
+
+ ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
+ if (!ppp)
+ goto out;
+ dev = alloc_netdev(0, "", ppp_setup);
+ if (!dev)
+ goto out1;
+ memset(ppp, 0, sizeof(struct ppp));
+
+ ppp->mru = PPP_MRU;
+ init_ppp_file(&ppp->file, INTERFACE);
+ ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+ for (i = 0; i < NUM_NP; ++i)
+ ppp->npmode[i] = NPMODE_PASS;
+ INIT_LIST_HEAD(&ppp->channels);
+ spin_lock_init(&ppp->rlock);
+ spin_lock_init(&ppp->wlock);
+#ifdef CONFIG_PPP_MULTILINK
+ ppp->minseq = -1;
+ skb_queue_head_init(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+ ppp->dev = dev;
+ dev->priv = ppp;
+
+ dev->hard_start_xmit = ppp_start_xmit;
+ dev->get_stats = ppp_net_stats;
+ dev->do_ioctl = ppp_net_ioctl;
+
+ ret = -EEXIST;
+ down(&all_ppp_sem);
+ if (unit < 0)
+ unit = cardmap_find_first_free(all_ppp_units);
+ else if (cardmap_get(all_ppp_units, unit) != NULL)
+ goto out2; /* unit already exists */
+
+ /* Initialize the new ppp unit */
+ ppp->file.index = unit;
+ sprintf(dev->name, "ppp%d", unit);
+
+ ret = register_netdev(dev);
+ if (ret != 0) {
+ printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
+ dev->name, ret);
+ goto out2;
+ }
+
+ atomic_inc(&ppp_unit_count);
+ cardmap_set(&all_ppp_units, unit, ppp);
+ up(&all_ppp_sem);
+ *retp = 0;
+ return ppp;
+
+out2:
+ up(&all_ppp_sem);
+ free_netdev(dev);
+out1:
+ kfree(ppp);
+out:
+ *retp = ret;
+ return NULL;
+}
+
+/*
+ * Initialize a ppp_file structure.
+ */
+static void
+init_ppp_file(struct ppp_file *pf, int kind)
+{
+ pf->kind = kind;
+ skb_queue_head_init(&pf->xq);
+ skb_queue_head_init(&pf->rq);
+ atomic_set(&pf->refcnt, 1);
+ init_waitqueue_head(&pf->rwait);
+}
+
+/*
+ * Take down a ppp interface unit - called when the owning file
+ * (the one that created the unit) is closed or detached.
+ */
+static void ppp_shutdown_interface(struct ppp *ppp)
+{
+ struct net_device *dev;
+
+ down(&all_ppp_sem);
+ ppp_lock(ppp);
+ dev = ppp->dev;
+ ppp->dev = NULL;
+ ppp_unlock(ppp);
+ /* This will call dev_close() for us. */
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ cardmap_set(&all_ppp_units, ppp->file.index, NULL);
+ ppp->file.dead = 1;
+ ppp->owner = NULL;
+ wake_up_interruptible(&ppp->file.rwait);
+ up(&all_ppp_sem);
+}
+
+/*
+ * Free the memory used by a ppp unit. This is only called once
+ * there are no channels connected to the unit and no file structs
+ * that reference the unit.
+ */
+static void ppp_destroy_interface(struct ppp *ppp)
+{
+ atomic_dec(&ppp_unit_count);
+
+ if (!ppp->file.dead || ppp->n_channels) {
+ /* "can't happen" */
+ printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
+ "n_channels=%d !\n", ppp, ppp->file.dead,
+ ppp->n_channels);
+ return;
+ }
+
+ ppp_ccp_closed(ppp);
+ if (ppp->vj) {
+ slhc_free(ppp->vj);
+ ppp->vj = NULL;
+ }
+ skb_queue_purge(&ppp->file.xq);
+ skb_queue_purge(&ppp->file.rq);
+#ifdef CONFIG_PPP_MULTILINK
+ skb_queue_purge(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+ if (ppp->pass_filter) {
+ kfree(ppp->pass_filter);
+ ppp->pass_filter = NULL;
+ }
+ if (ppp->active_filter) {
+ kfree(ppp->active_filter);
+ ppp->active_filter = NULL;
+ }
+#endif /* CONFIG_PPP_FILTER */
+
+ kfree(ppp);
+}
+
+/*
+ * Locate an existing ppp unit.
+ * The caller should have locked the all_ppp_sem.
+ */
+static struct ppp *
+ppp_find_unit(int unit)
+{
+ return cardmap_get(all_ppp_units, unit);
+}
+
+/*
+ * Locate an existing ppp channel.
+ * The caller should have locked the all_channels_lock.
+ * First we look in the new_channels list, then in the
+ * all_channels list. If found in the new_channels list,
+ * we move it to the all_channels list. This is for speed
+ * when we have a lot of channels in use.
+ */
+static struct channel *
+ppp_find_channel(int unit)
+{
+ struct channel *pch;
+ struct list_head *list;
+
+ list = &new_channels;
+ while ((list = list->next) != &new_channels) {
+ pch = list_entry(list, struct channel, list);
+ if (pch->file.index == unit) {
+ list_del(&pch->list);
+ list_add(&pch->list, &all_channels);
+ return pch;
+ }
+ }
+ list = &all_channels;
+ while ((list = list->next) != &all_channels) {
+ pch = list_entry(list, struct channel, list);
+ if (pch->file.index == unit)
+ return pch;
+ }
+ return NULL;
+}
+
+/*
+ * Connect a PPP channel to a PPP interface unit.
+ */
+static int
+ppp_connect_channel(struct channel *pch, int unit)
+{
+ struct ppp *ppp;
+ int ret = -ENXIO;
+ int hdrlen;
+
+ down(&all_ppp_sem);
+ ppp = ppp_find_unit(unit);
+ if (ppp == 0)
+ goto out;
+ write_lock_bh(&pch->upl);
+ ret = -EINVAL;
+ if (pch->ppp != 0)
+ goto outl;
+
+ ppp_lock(ppp);
+ if (pch->file.hdrlen > ppp->file.hdrlen)
+ ppp->file.hdrlen = pch->file.hdrlen;
+ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
+ if (ppp->dev && hdrlen > ppp->dev->hard_header_len)
+ ppp->dev->hard_header_len = hdrlen;
+ list_add_tail(&pch->clist, &ppp->channels);
+ ++ppp->n_channels;
+ pch->ppp = ppp;
+ atomic_inc(&ppp->file.refcnt);
+ ppp_unlock(ppp);
+ ret = 0;
+
+ outl:
+ write_unlock_bh(&pch->upl);
+ out:
+ up(&all_ppp_sem);
+ return ret;
+}
+
+/*
+ * Disconnect a channel from its ppp unit.
+ */
+static int
+ppp_disconnect_channel(struct channel *pch)
+{
+ struct ppp *ppp;
+ int err = -EINVAL;
+
+ write_lock_bh(&pch->upl);
+ ppp = pch->ppp;
+ pch->ppp = NULL;
+ write_unlock_bh(&pch->upl);
+ if (ppp != 0) {
+ /* remove it from the ppp unit's list */
+ ppp_lock(ppp);
+ list_del(&pch->clist);
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
+ ppp_unlock(ppp);
+ if (atomic_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+ err = 0;
+ }
+ return err;
+}
+
+/*
+ * Free up the resources used by a ppp channel.
+ */
+static void ppp_destroy_channel(struct channel *pch)
+{
+ atomic_dec(&channel_count);
+
+ if (!pch->file.dead) {
+ /* "can't happen" */
+ printk(KERN_ERR "ppp: destroying undead channel %p !\n",
+ pch);
+ return;
+ }
+ skb_queue_purge(&pch->file.xq);
+ skb_queue_purge(&pch->file.rq);
+ kfree(pch);
+}
+
+static void __exit ppp_cleanup(void)
+{
+ /* should never happen */
+ if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
+ printk(KERN_ERR "PPP: removing module but units remain!\n");
+ cardmap_destroy(&all_ppp_units);
+ if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
+ printk(KERN_ERR "PPP: failed to unregister PPP device\n");
+ devfs_remove("ppp");
+ class_simple_device_remove(MKDEV(PPP_MAJOR, 0));
+ class_simple_destroy(ppp_class);
+}
+
+/*
+ * Cardmap implementation.
+ */
+static void *cardmap_get(struct cardmap *map, unsigned int nr)
+{
+ struct cardmap *p;
+ int i;
+
+ for (p = map; p != NULL; ) {
+ if ((i = nr >> p->shift) >= CARDMAP_WIDTH)
+ return NULL;
+ if (p->shift == 0)
+ return p->ptr[i];
+ nr &= ~(CARDMAP_MASK << p->shift);
+ p = p->ptr[i];
+ }
+ return NULL;
+}
+
+static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
+{
+ struct cardmap *p;
+ int i;
+
+ p = *pmap;
+ if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
+ do {
+ /* need a new top level */
+ struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
+ memset(np, 0, sizeof(*np));
+ np->ptr[0] = p;
+ if (p != NULL) {
+ np->shift = p->shift + CARDMAP_ORDER;
+ p->parent = np;
+ } else
+ np->shift = 0;
+ p = np;
+ } while ((nr >> p->shift) >= CARDMAP_WIDTH);
+ *pmap = p;
+ }
+ while (p->shift > 0) {
+ i = (nr >> p->shift) & CARDMAP_MASK;
+ if (p->ptr[i] == NULL) {
+ struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
+ memset(np, 0, sizeof(*np));
+ np->shift = p->shift - CARDMAP_ORDER;
+ np->parent = p;
+ p->ptr[i] = np;
+ }
+ if (ptr == NULL)
+ clear_bit(i, &p->inuse);
+ p = p->ptr[i];
+ }
+ i = nr & CARDMAP_MASK;
+ p->ptr[i] = ptr;
+ if (ptr != NULL)
+ set_bit(i, &p->inuse);
+ else
+ clear_bit(i, &p->inuse);
+}
+
+static unsigned int cardmap_find_first_free(struct cardmap *map)
+{
+ struct cardmap *p;
+ unsigned int nr = 0;
+ int i;
+
+ if ((p = map) == NULL)
+ return 0;
+ for (;;) {
+ i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH);
+ if (i >= CARDMAP_WIDTH) {
+ if (p->parent == NULL)
+ return CARDMAP_WIDTH << p->shift;
+ p = p->parent;
+ i = (nr >> p->shift) & CARDMAP_MASK;
+ set_bit(i, &p->inuse);
+ continue;
+ }
+ nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift);
+ if (p->shift == 0 || p->ptr[i] == NULL)
+ return nr;
+ p = p->ptr[i];
+ }
+}
+
+static void cardmap_destroy(struct cardmap **pmap)
+{
+ struct cardmap *p, *np;
+ int i;
+
+ for (p = *pmap; p != NULL; p = np) {
+ if (p->shift != 0) {
+ for (i = 0; i < CARDMAP_WIDTH; ++i)
+ if (p->ptr[i] != NULL)
+ break;
+ if (i < CARDMAP_WIDTH) {
+ np = p->ptr[i];
+ p->ptr[i] = NULL;
+ continue;
+ }
+ }
+ np = p->parent;
+ kfree(p);
+ }
+ *pmap = NULL;
+}
+
+/* Module/initialization stuff */
+
+module_init(ppp_init);
+module_exit(ppp_cleanup);
+
+EXPORT_SYMBOL(ppp_register_channel);
+EXPORT_SYMBOL(ppp_unregister_channel);
+EXPORT_SYMBOL(ppp_channel_index);
+EXPORT_SYMBOL(ppp_unit_number);
+EXPORT_SYMBOL(ppp_input);
+EXPORT_SYMBOL(ppp_input_error);
+EXPORT_SYMBOL(ppp_output_wakeup);
+EXPORT_SYMBOL(ppp_register_compressor);
+EXPORT_SYMBOL(ppp_unregister_compressor);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR);
+MODULE_ALIAS("/dev/ppp");
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
new file mode 100644
index 000000000000..7d0150b4c629
--- /dev/null
+++ b/drivers/net/ppp_synctty.c
@@ -0,0 +1,803 @@
+/*
+ * PPP synchronous tty channel driver for Linux.
+ *
+ * This is a ppp channel driver that can be used with tty device drivers
+ * that are frame oriented, such as synchronous HDLC devices.
+ *
+ * Complete PPP frames without encoding/decoding are exchanged between
+ * the channel driver and the device driver.
+ *
+ * The async map IOCTL codes are implemented to keep the user mode
+ * applications happy if they call them. Synchronous PPP does not use
+ * the async maps.
+ *
+ * Copyright 1999 Paul Mackerras.
+ *
+ * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This driver provides the encapsulation and framing for sending
+ * and receiving PPP frames over sync serial lines. It relies on
+ * the generic PPP layer to give it frames to send and to process
+ * received frames. It implements the PPP line discipline.
+ *
+ * Part of the code in this driver was inspired by the old async-only
+ * PPP driver, written by Michael Callahan and Al Longyear, and
+ * subsequently hacked by Paul Mackerras.
+ *
+ * ==FILEVERSION 20040616==
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/ppp_channel.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#define PPP_VERSION "2.4.2"
+
+/* Structure for storing local state. */
+struct syncppp {
+ struct tty_struct *tty;
+ unsigned int flags;
+ unsigned int rbits;
+ int mru;
+ spinlock_t xmit_lock;
+ spinlock_t recv_lock;
+ unsigned long xmit_flags;
+ u32 xaccm[8];
+ u32 raccm;
+ unsigned int bytes_sent;
+ unsigned int bytes_rcvd;
+
+ struct sk_buff *tpkt;
+ unsigned long last_xmit;
+
+ struct sk_buff_head rqueue;
+
+ struct tasklet_struct tsk;
+
+ atomic_t refcnt;
+ struct semaphore dead_sem;
+ struct ppp_channel chan; /* interface to generic ppp layer */
+};
+
+/* Bit numbers in xmit_flags */
+#define XMIT_WAKEUP 0
+#define XMIT_FULL 1
+
+/* Bits in rbits */
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
+
+/*
+ * Prototypes.
+ */
+static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
+static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
+static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
+ unsigned long arg);
+static void ppp_sync_process(unsigned long arg);
+static int ppp_sync_push(struct syncppp *ap);
+static void ppp_sync_flush_output(struct syncppp *ap);
+static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
+ char *flags, int count);
+
+static struct ppp_channel_ops sync_ops = {
+ ppp_sync_send,
+ ppp_sync_ioctl
+};
+
+/*
+ * Utility procedures to print a buffer in hex/ascii
+ */
+static void
+ppp_print_hex (register __u8 * out, const __u8 * in, int count)
+{
+ register __u8 next_ch;
+ static char hex[] = "0123456789ABCDEF";
+
+ while (count-- > 0) {
+ next_ch = *in++;
+ *out++ = hex[(next_ch >> 4) & 0x0F];
+ *out++ = hex[next_ch & 0x0F];
+ ++out;
+ }
+}
+
+static void
+ppp_print_char (register __u8 * out, const __u8 * in, int count)
+{
+ register __u8 next_ch;
+
+ while (count-- > 0) {
+ next_ch = *in++;
+
+ if (next_ch < 0x20 || next_ch > 0x7e)
+ *out++ = '.';
+ else {
+ *out++ = next_ch;
+ if (next_ch == '%') /* printk/syslogd has a bug !! */
+ *out++ = '%';
+ }
+ }
+ *out = '\0';
+}
+
+static void
+ppp_print_buffer (const char *name, const __u8 *buf, int count)
+{
+ __u8 line[44];
+
+ if (name != NULL)
+ printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
+
+ while (count > 8) {
+ memset (line, 32, 44);
+ ppp_print_hex (line, buf, 8);
+ ppp_print_char (&line[8 * 3], buf, 8);
+ printk(KERN_DEBUG "%s\n", line);
+ count -= 8;
+ buf += 8;
+ }
+
+ if (count > 0) {
+ memset (line, 32, 44);
+ ppp_print_hex (line, buf, count);
+ ppp_print_char (&line[8 * 3], buf, count);
+ printk(KERN_DEBUG "%s\n", line);
+ }
+}
+
+
+/*
+ * Routines implementing the synchronous PPP line discipline.
+ */
+
+/*
+ * We have a potential race on dereferencing tty->disc_data,
+ * because the tty layer provides no locking at all - thus one
+ * cpu could be running ppp_synctty_receive while another
+ * calls ppp_synctty_close, which zeroes tty->disc_data and
+ * frees the memory that ppp_synctty_receive is using. The best
+ * way to fix this is to use a rwlock in the tty struct, but for now
+ * we use a single global rwlock for all ttys in ppp line discipline.
+ *
+ * FIXME: Fixed in tty_io nowdays.
+ */
+static DEFINE_RWLOCK(disc_data_lock);
+
+static struct syncppp *sp_get(struct tty_struct *tty)
+{
+ struct syncppp *ap;
+
+ read_lock(&disc_data_lock);
+ ap = tty->disc_data;
+ if (ap != NULL)
+ atomic_inc(&ap->refcnt);
+ read_unlock(&disc_data_lock);
+ return ap;
+}
+
+static void sp_put(struct syncppp *ap)
+{
+ if (atomic_dec_and_test(&ap->refcnt))
+ up(&ap->dead_sem);
+}
+
+/*
+ * Called when a tty is put into sync-PPP line discipline.
+ */
+static int
+ppp_sync_open(struct tty_struct *tty)
+{
+ struct syncppp *ap;
+ int err;
+
+ ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ err = -ENOMEM;
+ if (ap == 0)
+ goto out;
+
+ /* initialize the syncppp structure */
+ memset(ap, 0, sizeof(*ap));
+ ap->tty = tty;
+ ap->mru = PPP_MRU;
+ spin_lock_init(&ap->xmit_lock);
+ spin_lock_init(&ap->recv_lock);
+ ap->xaccm[0] = ~0U;
+ ap->xaccm[3] = 0x60000000U;
+ ap->raccm = ~0U;
+
+ skb_queue_head_init(&ap->rqueue);
+ tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
+
+ atomic_set(&ap->refcnt, 1);
+ init_MUTEX_LOCKED(&ap->dead_sem);
+
+ ap->chan.private = ap;
+ ap->chan.ops = &sync_ops;
+ ap->chan.mtu = PPP_MRU;
+ ap->chan.hdrlen = 2; /* for A/C bytes */
+ err = ppp_register_channel(&ap->chan);
+ if (err)
+ goto out_free;
+
+ tty->disc_data = ap;
+
+ return 0;
+
+ out_free:
+ kfree(ap);
+ out:
+ return err;
+}
+
+/*
+ * Called when the tty is put into another line discipline
+ * or it hangs up. We have to wait for any cpu currently
+ * executing in any of the other ppp_synctty_* routines to
+ * finish before we can call ppp_unregister_channel and free
+ * the syncppp struct. This routine must be called from
+ * process context, not interrupt or softirq context.
+ */
+static void
+ppp_sync_close(struct tty_struct *tty)
+{
+ struct syncppp *ap;
+
+ write_lock_irq(&disc_data_lock);
+ ap = tty->disc_data;
+ tty->disc_data = NULL;
+ write_unlock_irq(&disc_data_lock);
+ if (ap == 0)
+ return;
+
+ /*
+ * We have now ensured that nobody can start using ap from now
+ * on, but we have to wait for all existing users to finish.
+ * Note that ppp_unregister_channel ensures that no calls to
+ * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
+ * by the time it returns.
+ */
+ if (!atomic_dec_and_test(&ap->refcnt))
+ down(&ap->dead_sem);
+ tasklet_kill(&ap->tsk);
+
+ ppp_unregister_channel(&ap->chan);
+ skb_queue_purge(&ap->rqueue);
+ if (ap->tpkt != 0)
+ kfree_skb(ap->tpkt);
+ kfree(ap);
+}
+
+/*
+ * Called on tty hangup in process context.
+ *
+ * Wait for I/O to driver to complete and unregister PPP channel.
+ * This is already done by the close routine, so just call that.
+ */
+static int ppp_sync_hangup(struct tty_struct *tty)
+{
+ ppp_sync_close(tty);
+ return 0;
+}
+
+/*
+ * Read does nothing - no data is ever available this way.
+ * Pppd reads and writes packets via /dev/ppp instead.
+ */
+static ssize_t
+ppp_sync_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t count)
+{
+ return -EAGAIN;
+}
+
+/*
+ * Write on the tty does nothing, the packets all come in
+ * from the ppp generic stuff.
+ */
+static ssize_t
+ppp_sync_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t count)
+{
+ return -EAGAIN;
+}
+
+static int
+ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct syncppp *ap = sp_get(tty);
+ int __user *p = (int __user *)arg;
+ int err, val;
+
+ if (ap == 0)
+ return -ENXIO;
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGCHAN:
+ err = -ENXIO;
+ if (ap == 0)
+ break;
+ err = -EFAULT;
+ if (put_user(ppp_channel_index(&ap->chan), p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGUNIT:
+ err = -ENXIO;
+ if (ap == 0)
+ break;
+ err = -EFAULT;
+ if (put_user(ppp_unit_number(&ap->chan), p))
+ break;
+ err = 0;
+ break;
+
+ case TCGETS:
+ case TCGETA:
+ err = n_tty_ioctl(tty, file, cmd, arg);
+ break;
+
+ case TCFLSH:
+ /* flush our buffers and the serial port's buffer */
+ if (arg == TCIOFLUSH || arg == TCOFLUSH)
+ ppp_sync_flush_output(ap);
+ err = n_tty_ioctl(tty, file, cmd, arg);
+ break;
+
+ case FIONREAD:
+ val = 0;
+ if (put_user(val, p))
+ break;
+ err = 0;
+ break;
+
+ default:
+ err = -ENOIOCTLCMD;
+ }
+
+ sp_put(ap);
+ return err;
+}
+
+/* No kernel lock - fine */
+static unsigned int
+ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
+{
+ return 0;
+}
+
+static int
+ppp_sync_room(struct tty_struct *tty)
+{
+ return 65535;
+}
+
+/*
+ * This can now be called from hard interrupt level as well
+ * as soft interrupt level or mainline.
+ */
+static void
+ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
+ char *cflags, int count)
+{
+ struct syncppp *ap = sp_get(tty);
+ unsigned long flags;
+
+ if (ap == 0)
+ return;
+ spin_lock_irqsave(&ap->recv_lock, flags);
+ ppp_sync_input(ap, buf, cflags, count);
+ spin_unlock_irqrestore(&ap->recv_lock, flags);
+ if (skb_queue_len(&ap->rqueue))
+ tasklet_schedule(&ap->tsk);
+ sp_put(ap);
+ if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
+ && tty->driver->unthrottle)
+ tty->driver->unthrottle(tty);
+}
+
+static void
+ppp_sync_wakeup(struct tty_struct *tty)
+{
+ struct syncppp *ap = sp_get(tty);
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (ap == 0)
+ return;
+ set_bit(XMIT_WAKEUP, &ap->xmit_flags);
+ tasklet_schedule(&ap->tsk);
+ sp_put(ap);
+}
+
+
+static struct tty_ldisc ppp_sync_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "pppsync",
+ .open = ppp_sync_open,
+ .close = ppp_sync_close,
+ .hangup = ppp_sync_hangup,
+ .read = ppp_sync_read,
+ .write = ppp_sync_write,
+ .ioctl = ppp_synctty_ioctl,
+ .poll = ppp_sync_poll,
+ .receive_room = ppp_sync_room,
+ .receive_buf = ppp_sync_receive,
+ .write_wakeup = ppp_sync_wakeup,
+};
+
+static int __init
+ppp_sync_init(void)
+{
+ int err;
+
+ err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
+ if (err != 0)
+ printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
+ err);
+ return err;
+}
+
+/*
+ * The following routines provide the PPP channel interface.
+ */
+static int
+ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+{
+ struct syncppp *ap = chan->private;
+ int err, val;
+ u32 accm[8];
+ void __user *argp = (void __user *)arg;
+ u32 __user *p = argp;
+
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGFLAGS:
+ val = ap->flags | ap->rbits;
+ if (put_user(val, (int __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSFLAGS:
+ if (get_user(val, (int __user *) argp))
+ break;
+ ap->flags = val & ~SC_RCV_BITS;
+ spin_lock_irq(&ap->recv_lock);
+ ap->rbits = val & SC_RCV_BITS;
+ spin_unlock_irq(&ap->recv_lock);
+ err = 0;
+ break;
+
+ case PPPIOCGASYNCMAP:
+ if (put_user(ap->xaccm[0], p))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSASYNCMAP:
+ if (get_user(ap->xaccm[0], p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGRASYNCMAP:
+ if (put_user(ap->raccm, p))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSRASYNCMAP:
+ if (get_user(ap->raccm, p))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGXASYNCMAP:
+ if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSXASYNCMAP:
+ if (copy_from_user(accm, argp, sizeof(accm)))
+ break;
+ accm[2] &= ~0x40000000U; /* can't escape 0x5e */
+ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
+ memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ if (put_user(ap->mru, (int __user *) argp))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSMRU:
+ if (get_user(val, (int __user *) argp))
+ break;
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+ err = 0;
+ break;
+
+ default:
+ err = -ENOTTY;
+ }
+ return err;
+}
+
+/*
+ * This is called at softirq level to deliver received packets
+ * to the ppp_generic code, and to tell the ppp_generic code
+ * if we can accept more output now.
+ */
+static void ppp_sync_process(unsigned long arg)
+{
+ struct syncppp *ap = (struct syncppp *) arg;
+ struct sk_buff *skb;
+
+ /* process received packets */
+ while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
+ if (skb->len == 0) {
+ /* zero length buffers indicate error */
+ ppp_input_error(&ap->chan, 0);
+ kfree_skb(skb);
+ }
+ else
+ ppp_input(&ap->chan, skb);
+ }
+
+ /* try to push more stuff out */
+ if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
+ ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Procedures for encapsulation and framing.
+ */
+
+struct sk_buff*
+ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
+{
+ int proto;
+ unsigned char *data;
+ int islcp;
+
+ data = skb->data;
+ proto = (data[0] << 8) + data[1];
+
+ /* LCP packets with codes between 1 (configure-request)
+ * and 7 (code-reject) must be sent as though no options
+ * have been negotiated.
+ */
+ islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+ /* compress protocol field if option enabled */
+ if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
+ skb_pull(skb,1);
+
+ /* prepend address/control fields if necessary */
+ if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
+ if (skb_headroom(skb) < 2) {
+ struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
+ if (npkt == NULL) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ skb_reserve(npkt,2);
+ memcpy(skb_put(npkt,skb->len), skb->data, skb->len);
+ kfree_skb(skb);
+ skb = npkt;
+ }
+ skb_push(skb,2);
+ skb->data[0] = PPP_ALLSTATIONS;
+ skb->data[1] = PPP_UI;
+ }
+
+ ap->last_xmit = jiffies;
+
+ if (skb && ap->flags & SC_LOG_OUTPKT)
+ ppp_print_buffer ("send buffer", skb->data, skb->len);
+
+ return skb;
+}
+
+/*
+ * Transmit-side routines.
+ */
+
+/*
+ * Send a packet to the peer over an sync tty line.
+ * Returns 1 iff the packet was accepted.
+ * If the packet was not accepted, we will call ppp_output_wakeup
+ * at some later time.
+ */
+static int
+ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct syncppp *ap = chan->private;
+
+ ppp_sync_push(ap);
+
+ if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
+ return 0; /* already full */
+ skb = ppp_sync_txmunge(ap, skb);
+ if (skb != NULL)
+ ap->tpkt = skb;
+ else
+ clear_bit(XMIT_FULL, &ap->xmit_flags);
+
+ ppp_sync_push(ap);
+ return 1;
+}
+
+/*
+ * Push as much data as possible out to the tty.
+ */
+static int
+ppp_sync_push(struct syncppp *ap)
+{
+ int sent, done = 0;
+ struct tty_struct *tty = ap->tty;
+ int tty_stuffed = 0;
+
+ if (!spin_trylock_bh(&ap->xmit_lock))
+ return 0;
+ for (;;) {
+ if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
+ tty_stuffed = 0;
+ if (!tty_stuffed && ap->tpkt != 0) {
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ sent = tty->driver->write(tty, ap->tpkt->data, ap->tpkt->len);
+ if (sent < 0)
+ goto flush; /* error, e.g. loss of CD */
+ if (sent < ap->tpkt->len) {
+ tty_stuffed = 1;
+ } else {
+ kfree_skb(ap->tpkt);
+ ap->tpkt = NULL;
+ clear_bit(XMIT_FULL, &ap->xmit_flags);
+ done = 1;
+ }
+ continue;
+ }
+ /* haven't made any progress */
+ spin_unlock_bh(&ap->xmit_lock);
+ if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
+ || (!tty_stuffed && ap->tpkt != 0)))
+ break;
+ if (!spin_trylock_bh(&ap->xmit_lock))
+ break;
+ }
+ return done;
+
+flush:
+ if (ap->tpkt != 0) {
+ kfree_skb(ap->tpkt);
+ ap->tpkt = NULL;
+ clear_bit(XMIT_FULL, &ap->xmit_flags);
+ done = 1;
+ }
+ spin_unlock_bh(&ap->xmit_lock);
+ return done;
+}
+
+/*
+ * Flush output from our internal buffers.
+ * Called for the TCFLSH ioctl.
+ */
+static void
+ppp_sync_flush_output(struct syncppp *ap)
+{
+ int done = 0;
+
+ spin_lock_bh(&ap->xmit_lock);
+ if (ap->tpkt != NULL) {
+ kfree_skb(ap->tpkt);
+ ap->tpkt = NULL;
+ clear_bit(XMIT_FULL, &ap->xmit_flags);
+ done = 1;
+ }
+ spin_unlock_bh(&ap->xmit_lock);
+ if (done)
+ ppp_output_wakeup(&ap->chan);
+}
+
+/*
+ * Receive-side routines.
+ */
+
+/* called when the tty driver has data for us.
+ *
+ * Data is frame oriented: each call to ppp_sync_input is considered
+ * a whole frame. If the 1st flag byte is non-zero then the whole
+ * frame is considered to be in error and is tossed.
+ */
+static void
+ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
+ char *flags, int count)
+{
+ struct sk_buff *skb;
+ unsigned char *p;
+
+ if (count == 0)
+ return;
+
+ if (ap->flags & SC_LOG_INPKT)
+ ppp_print_buffer ("receive buffer", buf, count);
+
+ /* stuff the chars in the skb */
+ if ((skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2)) == 0) {
+ printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
+ goto err;
+ }
+ /* Try to get the payload 4-byte aligned */
+ if (buf[0] != PPP_ALLSTATIONS)
+ skb_reserve(skb, 2 + (buf[0] & 1));
+
+ if (flags != 0 && *flags) {
+ /* error flag set, ignore frame */
+ goto err;
+ } else if (count > skb_tailroom(skb)) {
+ /* packet overflowed MRU */
+ goto err;
+ }
+
+ p = skb_put(skb, count);
+ memcpy(p, buf, count);
+
+ /* strip address/control field if present */
+ p = skb->data;
+ if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ /* chop off address/control */
+ if (skb->len < 3)
+ goto err;
+ p = skb_pull(skb, 2);
+ }
+
+ /* decompress protocol field if compressed */
+ if (p[0] & 1) {
+ /* protocol is compressed */
+ skb_push(skb, 1)[0] = 0;
+ } else if (skb->len < 2)
+ goto err;
+
+ /* queue the frame to be processed */
+ skb_queue_tail(&ap->rqueue, skb);
+ return;
+
+err:
+ /* queue zero length packet as error indication */
+ if (skb || (skb = dev_alloc_skb(0))) {
+ skb_trim(skb, 0);
+ skb_queue_tail(&ap->rqueue, skb);
+ }
+}
+
+static void __exit
+ppp_sync_cleanup(void)
+{
+ if (tty_register_ldisc(N_SYNC_PPP, NULL) != 0)
+ printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
+}
+
+module_init(ppp_sync_init);
+module_exit(ppp_sync_cleanup);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_SYNC_PPP);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
new file mode 100644
index 000000000000..ce1a9bf7b9a7
--- /dev/null
+++ b/drivers/net/pppoe.c
@@ -0,0 +1,1153 @@
+/** -*- linux-c -*- ***********************************************************
+ * Linux PPP over Ethernet (PPPoX/PPPoE) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoE --- PPP over Ethernet (RFC 2516)
+ *
+ *
+ * Version: 0.7.0
+ *
+ * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
+ * 030700 : Fixed connect logic to allow for disconnect.
+ * 270700 : Fixed potential SMP problems; we must protect against
+ * simultaneous invocation of ppp_input
+ * and ppp_unregister_channel.
+ * 040800 : Respect reference count mechanisms on net-devices.
+ * 200800 : fix kfree(skb) in pppoe_rcv (acme)
+ * Module reference count is decremented in the right spot now,
+ * guards against sock_put not actually freeing the sk
+ * in pppoe_release.
+ * 051000 : Initialization cleanup.
+ * 111100 : Fix recvmsg.
+ * 050101 : Fix PADT procesing.
+ * 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey)
+ * 170701 : Do not lock_sock with rwlock held. (DaveM)
+ * Ignore discovery frames if user has socket
+ * locked. (DaveM)
+ * Ignore return value of dev_queue_xmit in __pppoe_xmit
+ * or else we may kfree an SKB twice. (DaveM)
+ * 190701 : When doing copies of skb's in __pppoe_xmit, always delete
+ * the original skb that was passed in on success, never on
+ * failure. Delete the copy of the skb on failure to avoid
+ * a memory leak.
+ * 081001 : Misc. cleanup (licence string, non-blocking, prevent
+ * reference of device on close).
+ * 121301 : New ppp channels interface; cannot unregister a channel
+ * from interrupts. Thus, we mark the socket as a ZOMBIE
+ * and do the unregistration later.
+ * 081002 : seq_file support for proc stuff -acme
+ * 111602 : Merge all 2.4 fixes into 2.5/2.6 tree. Label 2.5/2.6
+ * as version 0.7. Spacing cleanup.
+ * Author: Michal Ostrowski <mostrows@speakeasy.net>
+ * Contributors:
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * David S. Miller (davem@redhat.com)
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/if_ether.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/notifier.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <net/sock.h>
+
+#include <asm/uaccess.h>
+
+#define PPPOE_HASH_BITS 4
+#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS)
+
+static struct ppp_channel_ops pppoe_chan_ops;
+
+static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
+static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
+
+static struct proto_ops pppoe_ops;
+static DEFINE_RWLOCK(pppoe_hash_lock);
+
+static struct ppp_channel_ops pppoe_chan_ops;
+
+static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
+{
+ return (a->sid == b->sid &&
+ (memcmp(a->remote, b->remote, ETH_ALEN) == 0));
+}
+
+static inline int cmp_addr(struct pppoe_addr *a, unsigned long sid, char *addr)
+{
+ return (a->sid == sid &&
+ (memcmp(a->remote,addr,ETH_ALEN) == 0));
+}
+
+static int hash_item(unsigned long sid, unsigned char *addr)
+{
+ char hash = 0;
+ int i, j;
+
+ for (i = 0; i < ETH_ALEN ; ++i) {
+ for (j = 0; j < 8/PPPOE_HASH_BITS ; ++j) {
+ hash ^= addr[i] >> ( j * PPPOE_HASH_BITS );
+ }
+ }
+
+ for (i = 0; i < (sizeof(unsigned long)*8) / PPPOE_HASH_BITS ; ++i)
+ hash ^= sid >> (i*PPPOE_HASH_BITS);
+
+ return hash & ( PPPOE_HASH_SIZE - 1 );
+}
+
+/* zeroed because its in .bss */
+static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE];
+
+/**********************************************************************
+ *
+ * Set/get/delete/rehash items (internal versions)
+ *
+ **********************************************************************/
+static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr)
+{
+ int hash = hash_item(sid, addr);
+ struct pppox_sock *ret;
+
+ ret = item_hash_table[hash];
+
+ while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr))
+ ret = ret->next;
+
+ return ret;
+}
+
+static int __set_item(struct pppox_sock *po)
+{
+ int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
+ struct pppox_sock *ret;
+
+ ret = item_hash_table[hash];
+ while (ret) {
+ if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa))
+ return -EALREADY;
+
+ ret = ret->next;
+ }
+
+ if (!ret) {
+ po->next = item_hash_table[hash];
+ item_hash_table[hash] = po;
+ }
+
+ return 0;
+}
+
+static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
+{
+ int hash = hash_item(sid, addr);
+ struct pppox_sock *ret, **src;
+
+ ret = item_hash_table[hash];
+ src = &item_hash_table[hash];
+
+ while (ret) {
+ if (cmp_addr(&ret->pppoe_pa, sid, addr)) {
+ *src = ret->next;
+ break;
+ }
+
+ src = &ret->next;
+ ret = ret->next;
+ }
+
+ return ret;
+}
+
+/**********************************************************************
+ *
+ * Set/get/delete/rehash items
+ *
+ **********************************************************************/
+static inline struct pppox_sock *get_item(unsigned long sid,
+ unsigned char *addr)
+{
+ struct pppox_sock *po;
+
+ read_lock_bh(&pppoe_hash_lock);
+ po = __get_item(sid, addr);
+ if (po)
+ sock_hold(sk_pppox(po));
+ read_unlock_bh(&pppoe_hash_lock);
+
+ return po;
+}
+
+static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
+{
+ return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote);
+}
+
+static inline int set_item(struct pppox_sock *po)
+{
+ int i;
+
+ if (!po)
+ return -EINVAL;
+
+ write_lock_bh(&pppoe_hash_lock);
+ i = __set_item(po);
+ write_unlock_bh(&pppoe_hash_lock);
+
+ return i;
+}
+
+static inline struct pppox_sock *delete_item(unsigned long sid, char *addr)
+{
+ struct pppox_sock *ret;
+
+ write_lock_bh(&pppoe_hash_lock);
+ ret = __delete_item(sid, addr);
+ write_unlock_bh(&pppoe_hash_lock);
+
+ return ret;
+}
+
+
+
+/***************************************************************************
+ *
+ * Handler for device events.
+ * Certain device events require that sockets be unconnected.
+ *
+ **************************************************************************/
+
+static void pppoe_flush_dev(struct net_device *dev)
+{
+ int hash;
+
+ BUG_ON(dev == NULL);
+
+ read_lock_bh(&pppoe_hash_lock);
+ for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) {
+ struct pppox_sock *po = item_hash_table[hash];
+
+ while (po != NULL) {
+ if (po->pppoe_dev == dev) {
+ struct sock *sk = sk_pppox(po);
+
+ sock_hold(sk);
+ po->pppoe_dev = NULL;
+
+ /* We hold a reference to SK, now drop the
+ * hash table lock so that we may attempt
+ * to lock the socket (which can sleep).
+ */
+ read_unlock_bh(&pppoe_hash_lock);
+
+ lock_sock(sk);
+
+ if (sk->sk_state &
+ (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ pppox_unbind_sock(sk);
+ dev_put(dev);
+ sk->sk_state = PPPOX_ZOMBIE;
+ sk->sk_state_change(sk);
+ }
+
+ release_sock(sk);
+
+ sock_put(sk);
+
+ read_lock_bh(&pppoe_hash_lock);
+
+ /* Now restart from the beginning of this
+ * hash chain. We always NULL out pppoe_dev
+ * so we are guaranteed to make forward
+ * progress.
+ */
+ po = item_hash_table[hash];
+ continue;
+ }
+ po = po->next;
+ }
+ }
+ read_unlock_bh(&pppoe_hash_lock);
+}
+
+static int pppoe_device_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+
+ /* Only look at sockets that are using this specific device. */
+ switch (event) {
+ case NETDEV_CHANGEMTU:
+ /* A change in mtu is a bad thing, requiring
+ * LCP re-negotiation.
+ */
+
+ case NETDEV_GOING_DOWN:
+ case NETDEV_DOWN:
+ /* Find every socket on this device and kill it. */
+ pppoe_flush_dev(dev);
+ break;
+
+ default:
+ break;
+ };
+
+ return NOTIFY_DONE;
+}
+
+
+static struct notifier_block pppoe_notifier = {
+ .notifier_call = pppoe_device_event,
+};
+
+
+/************************************************************************
+ *
+ * Do the real work of receiving a PPPoE Session frame.
+ *
+ ***********************************************************************/
+static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+{
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pppox_sock *relay_po = NULL;
+
+ if (sk->sk_state & PPPOX_BOUND) {
+ struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw;
+ int len = ntohs(ph->length);
+ skb_pull(skb, sizeof(struct pppoe_hdr));
+ skb_postpull_rcsum(skb, ph, sizeof(*ph));
+ if (pskb_trim_rcsum(skb, len))
+ goto abort_kfree;
+
+ ppp_input(&po->chan, skb);
+ } else if (sk->sk_state & PPPOX_RELAY) {
+ relay_po = get_item_by_addr(&po->pppoe_relay);
+
+ if (relay_po == NULL)
+ goto abort_kfree;
+
+ if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
+ goto abort_put;
+
+ skb_pull(skb, sizeof(struct pppoe_hdr));
+ if (!__pppoe_xmit(sk_pppox(relay_po), skb))
+ goto abort_put;
+ } else {
+ if (sock_queue_rcv_skb(sk, skb))
+ goto abort_kfree;
+ }
+
+ return NET_RX_SUCCESS;
+
+abort_put:
+ sock_put(sk_pppox(relay_po));
+
+abort_kfree:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+/************************************************************************
+ *
+ * Receive wrapper called in BH context.
+ *
+ ***********************************************************************/
+static int pppoe_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+
+{
+ struct pppoe_hdr *ph;
+ struct pppox_sock *po;
+ struct sock *sk;
+ int ret;
+
+ if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
+ goto drop;
+
+ if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
+ goto out;
+
+ ph = (struct pppoe_hdr *) skb->nh.raw;
+
+ po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
+ if (!po)
+ goto drop;
+
+ sk = sk_pppox(po);
+ bh_lock_sock(sk);
+
+ /* Socket state is unknown, must put skb into backlog. */
+ if (sock_owned_by_user(sk) != 0) {
+ sk_add_backlog(sk, skb);
+ ret = NET_RX_SUCCESS;
+ } else {
+ ret = pppoe_rcv_core(sk, skb);
+ }
+
+ bh_unlock_sock(sk);
+ sock_put(sk);
+
+ return ret;
+drop:
+ kfree_skb(skb);
+out:
+ return NET_RX_DROP;
+}
+
+/************************************************************************
+ *
+ * Receive a PPPoE Discovery frame.
+ * This is solely for detection of PADT frames
+ *
+ ***********************************************************************/
+static int pppoe_disc_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+
+{
+ struct pppoe_hdr *ph;
+ struct pppox_sock *po;
+
+ if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
+ goto abort;
+
+ if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
+ goto out;
+
+ ph = (struct pppoe_hdr *) skb->nh.raw;
+ if (ph->code != PADT_CODE)
+ goto abort;
+
+ po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
+ if (po) {
+ struct sock *sk = sk_pppox(po);
+
+ bh_lock_sock(sk);
+
+ /* If the user has locked the socket, just ignore
+ * the packet. With the way two rcv protocols hook into
+ * one socket family type, we cannot (easily) distinguish
+ * what kind of SKB it is during backlog rcv.
+ */
+ if (sock_owned_by_user(sk) == 0) {
+ /* We're no longer connect at the PPPOE layer,
+ * and must wait for ppp channel to disconnect us.
+ */
+ sk->sk_state = PPPOX_ZOMBIE;
+ }
+
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ }
+
+abort:
+ kfree_skb(skb);
+out:
+ return NET_RX_SUCCESS; /* Lies... :-) */
+}
+
+static struct packet_type pppoes_ptype = {
+ .type = __constant_htons(ETH_P_PPP_SES),
+ .func = pppoe_rcv,
+};
+
+static struct packet_type pppoed_ptype = {
+ .type = __constant_htons(ETH_P_PPP_DISC),
+ .func = pppoe_disc_rcv,
+};
+
+static struct proto pppoe_sk_proto = {
+ .name = "PPPOE",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+/***********************************************************************
+ *
+ * Initialize a new struct sock.
+ *
+ **********************************************************************/
+static int pppoe_create(struct socket *sock)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+
+ sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, 1);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppoe_ops;
+
+ sk->sk_backlog_rcv = pppoe_rcv_core;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_protocol = PX_PROTO_OE;
+
+ error = 0;
+out: return error;
+}
+
+static int pppoe_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po;
+ int error = 0;
+
+ if (!sk)
+ return 0;
+
+ if (sock_flag(sk, SOCK_DEAD))
+ return -EBADF;
+
+ pppox_unbind_sock(sk);
+
+ /* Signal the death of the socket. */
+ sk->sk_state = PPPOX_DEAD;
+
+ po = pppox_sk(sk);
+ if (po->pppoe_pa.sid) {
+ delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
+ }
+
+ if (po->pppoe_dev)
+ dev_put(po->pppoe_dev);
+
+ po->pppoe_dev = NULL;
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ sock_put(sk);
+
+ return error;
+}
+
+
+static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct net_device *dev = NULL;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ int error;
+
+ lock_sock(sk);
+
+ error = -EINVAL;
+ if (sp->sa_protocol != PX_PROTO_OE)
+ goto end;
+
+ /* Check for already bound sockets */
+ error = -EBUSY;
+ if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid)
+ goto end;
+
+ /* Check for already disconnected sockets, on attempts to disconnect */
+ error = -EALREADY;
+ if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid )
+ goto end;
+
+ error = 0;
+ if (po->pppoe_pa.sid) {
+ pppox_unbind_sock(sk);
+
+ /* Delete the old binding */
+ delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote);
+
+ if(po->pppoe_dev)
+ dev_put(po->pppoe_dev);
+
+ memset(sk_pppox(po) + 1, 0,
+ sizeof(struct pppox_sock) - sizeof(struct sock));
+
+ sk->sk_state = PPPOX_NONE;
+ }
+
+ /* Don't re-bind if sid==0 */
+ if (sp->sa_addr.pppoe.sid != 0) {
+ dev = dev_get_by_name(sp->sa_addr.pppoe.dev);
+
+ error = -ENODEV;
+ if (!dev)
+ goto end;
+
+ po->pppoe_dev = dev;
+
+ if (!(dev->flags & IFF_UP))
+ goto err_put;
+
+ memcpy(&po->pppoe_pa,
+ &sp->sa_addr.pppoe,
+ sizeof(struct pppoe_addr));
+
+ error = set_item(po);
+ if (error < 0)
+ goto err_put;
+
+ po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
+ dev->hard_header_len);
+
+ po->chan.private = sk;
+ po->chan.ops = &pppoe_chan_ops;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto err_put;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ }
+
+ po->num = sp->sa_addr.pppoe.sid;
+
+ end:
+ release_sock(sk);
+ return error;
+err_put:
+ if (po->pppoe_dev) {
+ dev_put(po->pppoe_dev);
+ po->pppoe_dev = NULL;
+ }
+ goto end;
+}
+
+
+static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer)
+{
+ int len = sizeof(struct sockaddr_pppox);
+ struct sockaddr_pppox sp;
+
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OE;
+ memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa,
+ sizeof(struct pppoe_addr));
+
+ memcpy(uaddr, &sp, len);
+
+ *usockaddr_len = len;
+
+ return 0;
+}
+
+
+static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ int val = 0;
+ int err = 0;
+
+ switch (cmd) {
+ case PPPIOCGMRU:
+ err = -ENXIO;
+
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (put_user(po->pppoe_dev->mtu -
+ sizeof(struct pppoe_hdr) -
+ PPP_HDRLEN,
+ (int __user *) arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (get_user(val,(int __user *) arg))
+ break;
+
+ if (val < (po->pppoe_dev->mtu
+ - sizeof(struct pppoe_hdr)
+ - PPP_HDRLEN))
+ err = 0;
+ else
+ err = -EINVAL;
+ break;
+
+ case PPPIOCSFLAGS:
+ err = -EFAULT;
+ if (get_user(val, (int __user *) arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPOEIOCSFWD:
+ {
+ struct pppox_sock *relay_po;
+
+ err = -EBUSY;
+ if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD))
+ break;
+
+ err = -ENOTCONN;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ /* PPPoE address from the user specifies an outbound
+ PPPoE address to which frames are forwarded to */
+ err = -EFAULT;
+ if (copy_from_user(&po->pppoe_relay,
+ (void __user *)arg,
+ sizeof(struct sockaddr_pppox)))
+ break;
+
+ err = -EINVAL;
+ if (po->pppoe_relay.sa_family != AF_PPPOX ||
+ po->pppoe_relay.sa_protocol!= PX_PROTO_OE)
+ break;
+
+ /* Check that the socket referenced by the address
+ actually exists. */
+ relay_po = get_item_by_addr(&po->pppoe_relay);
+
+ if (!relay_po)
+ break;
+
+ sock_put(sk_pppox(relay_po));
+ sk->sk_state |= PPPOX_RELAY;
+ err = 0;
+ break;
+ }
+
+ case PPPOEIOCDFWD:
+ err = -EALREADY;
+ if (!(sk->sk_state & PPPOX_RELAY))
+ break;
+
+ sk->sk_state &= ~PPPOX_RELAY;
+ err = 0;
+ break;
+
+ default:;
+ };
+
+ return err;
+}
+
+
+static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct sk_buff *skb = NULL;
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ int error = 0;
+ struct pppoe_hdr hdr;
+ struct pppoe_hdr *ph;
+ struct net_device *dev;
+ char *start;
+
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
+ error = -ENOTCONN;
+ goto end;
+ }
+
+ hdr.ver = 1;
+ hdr.type = 1;
+ hdr.code = 0;
+ hdr.sid = po->num;
+
+ lock_sock(sk);
+
+ dev = po->pppoe_dev;
+
+ error = -EMSGSIZE;
+ if (total_len > (dev->mtu + dev->hard_header_len))
+ goto end;
+
+
+ skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
+ 0, GFP_KERNEL);
+ if (!skb) {
+ error = -ENOMEM;
+ goto end;
+ }
+
+ /* Reserve space for headers. */
+ skb_reserve(skb, dev->hard_header_len);
+ skb->nh.raw = skb->data;
+
+ skb->dev = dev;
+
+ skb->priority = sk->sk_priority;
+ skb->protocol = __constant_htons(ETH_P_PPP_SES);
+
+ ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr));
+ start = (char *) &ph->tag[0];
+
+ error = memcpy_fromiovec(start, m->msg_iov, total_len);
+
+ if (error < 0) {
+ kfree_skb(skb);
+ goto end;
+ }
+
+ error = total_len;
+ dev->hard_header(skb, dev, ETH_P_PPP_SES,
+ po->pppoe_pa.remote, NULL, total_len);
+
+ memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
+
+ ph->length = htons(total_len);
+
+ dev_queue_xmit(skb);
+
+end:
+ release_sock(sk);
+ return error;
+}
+
+
+/************************************************************************
+ *
+ * xmit function for internal use.
+ *
+ ***********************************************************************/
+static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
+{
+ struct pppox_sock *po = pppox_sk(sk);
+ struct net_device *dev = po->pppoe_dev;
+ struct pppoe_hdr hdr;
+ struct pppoe_hdr *ph;
+ int headroom = skb_headroom(skb);
+ int data_len = skb->len;
+ struct sk_buff *skb2;
+
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto abort;
+
+ hdr.ver = 1;
+ hdr.type = 1;
+ hdr.code = 0;
+ hdr.sid = po->num;
+ hdr.length = htons(skb->len);
+
+ if (!dev)
+ goto abort;
+
+ /* Copy the skb if there is no space for the header. */
+ if (headroom < (sizeof(struct pppoe_hdr) + dev->hard_header_len)) {
+ skb2 = dev_alloc_skb(32+skb->len +
+ sizeof(struct pppoe_hdr) +
+ dev->hard_header_len);
+
+ if (skb2 == NULL)
+ goto abort;
+
+ skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr));
+ memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
+ } else {
+ /* Make a clone so as to not disturb the original skb,
+ * give dev_queue_xmit something it can free.
+ */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ }
+
+ ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
+ memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
+ skb2->protocol = __constant_htons(ETH_P_PPP_SES);
+
+ skb2->nh.raw = skb2->data;
+
+ skb2->dev = dev;
+
+ dev->hard_header(skb2, dev, ETH_P_PPP_SES,
+ po->pppoe_pa.remote, NULL, data_len);
+
+ /* We're transmitting skb2, and assuming that dev_queue_xmit
+ * will free it. The generic ppp layer however, is expecting
+ * that we give back 'skb' (not 'skb2') in case of failure,
+ * but free it in case of success.
+ */
+
+ if (dev_queue_xmit(skb2) < 0)
+ goto abort;
+
+ kfree_skb(skb);
+ return 1;
+
+abort:
+ return 0;
+}
+
+
+/************************************************************************
+ *
+ * xmit function called by generic PPP driver
+ * sends PPP frame over PPPoE socket
+ *
+ ***********************************************************************/
+static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *) chan->private;
+ return __pppoe_xmit(sk, skb);
+}
+
+
+static struct ppp_channel_ops pppoe_chan_ops = {
+ .start_xmit = pppoe_xmit,
+};
+
+static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb = NULL;
+ int error = 0;
+ int len;
+ struct pppoe_hdr *ph = NULL;
+
+ if (sk->sk_state & PPPOX_BOUND) {
+ error = -EIO;
+ goto end;
+ }
+
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &error);
+
+ if (error < 0) {
+ goto end;
+ }
+
+ m->msg_namelen = 0;
+
+ if (skb) {
+ error = 0;
+ ph = (struct pppoe_hdr *) skb->nh.raw;
+ len = ntohs(ph->length);
+
+ error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len);
+ if (error < 0)
+ goto do_skb_free;
+ error = len;
+ }
+
+do_skb_free:
+ if (skb)
+ kfree_skb(skb);
+end:
+ return error;
+}
+
+#ifdef CONFIG_PROC_FS
+static int pppoe_seq_show(struct seq_file *seq, void *v)
+{
+ struct pppox_sock *po;
+ char *dev_name;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Id Address Device\n");
+ goto out;
+ }
+
+ po = v;
+ dev_name = po->pppoe_pa.dev;
+
+ seq_printf(seq, "%08X %02X:%02X:%02X:%02X:%02X:%02X %8s\n",
+ po->pppoe_pa.sid,
+ po->pppoe_pa.remote[0], po->pppoe_pa.remote[1],
+ po->pppoe_pa.remote[2], po->pppoe_pa.remote[3],
+ po->pppoe_pa.remote[4], po->pppoe_pa.remote[5], dev_name);
+out:
+ return 0;
+}
+
+static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos)
+{
+ struct pppox_sock *po = NULL;
+ int i = 0;
+
+ for (; i < PPPOE_HASH_SIZE; i++) {
+ po = item_hash_table[i];
+ while (po) {
+ if (!pos--)
+ goto out;
+ po = po->next;
+ }
+ }
+out:
+ return po;
+}
+
+static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ loff_t l = *pos;
+
+ read_lock_bh(&pppoe_hash_lock);
+ return l ? pppoe_get_idx(--l) : SEQ_START_TOKEN;
+}
+
+static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct pppox_sock *po;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN) {
+ po = pppoe_get_idx(0);
+ goto out;
+ }
+ po = v;
+ if (po->next)
+ po = po->next;
+ else {
+ int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
+
+ while (++hash < PPPOE_HASH_SIZE) {
+ po = item_hash_table[hash];
+ if (po)
+ break;
+ }
+ }
+out:
+ return po;
+}
+
+static void pppoe_seq_stop(struct seq_file *seq, void *v)
+{
+ read_unlock_bh(&pppoe_hash_lock);
+}
+
+static struct seq_operations pppoe_seq_ops = {
+ .start = pppoe_seq_start,
+ .next = pppoe_seq_next,
+ .stop = pppoe_seq_stop,
+ .show = pppoe_seq_show,
+};
+
+static int pppoe_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &pppoe_seq_ops);
+}
+
+static struct file_operations pppoe_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = pppoe_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init pppoe_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+ p = create_proc_entry("pppoe", S_IRUGO, proc_net);
+ if (!p)
+ return -ENOMEM;
+
+ p->proc_fops = &pppoe_seq_fops;
+ return 0;
+}
+#else /* CONFIG_PROC_FS */
+static inline int pppoe_proc_init(void) { return 0; }
+#endif /* CONFIG_PROC_FS */
+
+/* ->ioctl are set at pppox_create */
+
+static struct proto_ops pppoe_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppoe_release,
+ .bind = sock_no_bind,
+ .connect = pppoe_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pppoe_getname,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = pppoe_sendmsg,
+ .recvmsg = pppoe_recvmsg,
+ .mmap = sock_no_mmap
+};
+
+static struct pppox_proto pppoe_proto = {
+ .create = pppoe_create,
+ .ioctl = pppoe_ioctl,
+ .owner = THIS_MODULE,
+};
+
+
+static int __init pppoe_init(void)
+{
+ int err = proto_register(&pppoe_sk_proto, 0);
+
+ if (err)
+ goto out;
+
+ err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
+ if (err)
+ goto out_unregister_pppoe_proto;
+
+ err = pppoe_proc_init();
+ if (err)
+ goto out_unregister_pppox_proto;
+
+ dev_add_pack(&pppoes_ptype);
+ dev_add_pack(&pppoed_ptype);
+ register_netdevice_notifier(&pppoe_notifier);
+out:
+ return err;
+out_unregister_pppox_proto:
+ unregister_pppox_proto(PX_PROTO_OE);
+out_unregister_pppoe_proto:
+ proto_unregister(&pppoe_sk_proto);
+ goto out;
+}
+
+static void __exit pppoe_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OE);
+ dev_remove_pack(&pppoes_ptype);
+ dev_remove_pack(&pppoed_ptype);
+ unregister_netdevice_notifier(&pppoe_notifier);
+ remove_proc_entry("pppoe", proc_net);
+ proto_unregister(&pppoe_sk_proto);
+}
+
+module_init(pppoe_init);
+module_exit(pppoe_exit);
+
+MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>");
+MODULE_DESCRIPTION("PPP over Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_PPPOX);
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
new file mode 100644
index 000000000000..0c1e114527fb
--- /dev/null
+++ b/drivers/net/pppox.c
@@ -0,0 +1,153 @@
+/** -*- linux-c -*- ***********************************************************
+ * Linux PPP over X/Ethernet (PPPoX/PPPoE) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoE --- PPP over Ethernet (RFC 2516)
+ *
+ *
+ * Version: 0.5.2
+ *
+ * Author: Michal Ostrowski <mostrows@speakeasy.net>
+ *
+ * 051000 : Initialization cleanup
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/init.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/ppp_channel.h>
+
+#include <net/sock.h>
+
+#include <asm/uaccess.h>
+
+static struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
+
+int register_pppox_proto(int proto_num, struct pppox_proto *pp)
+{
+ if (proto_num < 0 || proto_num > PX_MAX_PROTO)
+ return -EINVAL;
+ if (pppox_protos[proto_num])
+ return -EALREADY;
+ pppox_protos[proto_num] = pp;
+ return 0;
+}
+
+void unregister_pppox_proto(int proto_num)
+{
+ if (proto_num >= 0 && proto_num <= PX_MAX_PROTO)
+ pppox_protos[proto_num] = NULL;
+}
+
+void pppox_unbind_sock(struct sock *sk)
+{
+ /* Clear connection to ppp device, if attached. */
+
+ if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ ppp_unregister_channel(&pppox_sk(sk)->chan);
+ sk->sk_state = PPPOX_DEAD;
+ }
+}
+
+EXPORT_SYMBOL(register_pppox_proto);
+EXPORT_SYMBOL(unregister_pppox_proto);
+EXPORT_SYMBOL(pppox_unbind_sock);
+
+static int pppox_ioctl(struct socket* sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ int rc = 0;
+
+ lock_sock(sk);
+
+ switch (cmd) {
+ case PPPIOCGCHAN: {
+ int index;
+ rc = -ENOTCONN;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ rc = -EINVAL;
+ index = ppp_channel_index(&po->chan);
+ if (put_user(index , (int __user *) arg))
+ break;
+
+ rc = 0;
+ sk->sk_state |= PPPOX_BOUND;
+ break;
+ }
+ default:
+ if (pppox_protos[sk->sk_protocol]->ioctl)
+ rc = pppox_protos[sk->sk_protocol]->ioctl(sock, cmd,
+ arg);
+
+ break;
+ };
+
+ release_sock(sk);
+ return rc;
+}
+
+
+static int pppox_create(struct socket *sock, int protocol)
+{
+ int rc = -EPROTOTYPE;
+
+ if (protocol < 0 || protocol > PX_MAX_PROTO)
+ goto out;
+
+ rc = -EPROTONOSUPPORT;
+ if (!pppox_protos[protocol] ||
+ !try_module_get(pppox_protos[protocol]->owner))
+ goto out;
+
+ rc = pppox_protos[protocol]->create(sock);
+ if (!rc) {
+ /* We get to set the ioctl handler. */
+ /* For everything else, pppox is just a shell. */
+ sock->ops->ioctl = pppox_ioctl;
+ }
+ module_put(pppox_protos[protocol]->owner);
+out:
+ return rc;
+}
+
+static struct net_proto_family pppox_proto_family = {
+ .family = PF_PPPOX,
+ .create = pppox_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppox_init(void)
+{
+ return sock_register(&pppox_proto_family);
+}
+
+static void __exit pppox_exit(void)
+{
+ sock_unregister(PF_PPPOX);
+}
+
+module_init(pppox_init);
+module_exit(pppox_exit);
+
+MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>");
+MODULE_DESCRIPTION("PPP over Ethernet driver (generic socket layer)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
new file mode 100644
index 000000000000..434e4ff967b2
--- /dev/null
+++ b/drivers/net/r8169.c
@@ -0,0 +1,2523 @@
+/*
+=========================================================================
+ r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
+ --------------------------------------------------------------------
+
+ History:
+ Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
+ May 20 2002 - Add link status force-mode and TBI mode support.
+ 2004 - Massive updates. See kernel SCM system for details.
+=========================================================================
+ 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
+ Command: 'insmod r8169 media = SET_MEDIA'
+ Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
+
+ SET_MEDIA can be:
+ _10_Half = 0x01
+ _10_Full = 0x02
+ _100_Half = 0x04
+ _100_Full = 0x08
+ _1000_Full = 0x10
+
+ 2. Support TBI mode.
+=========================================================================
+VERSION 1.1 <2002/10/4>
+
+ The bit4:0 of MII register 4 is called "selector field", and have to be
+ 00001b to indicate support of IEEE std 802.3 during NWay process of
+ exchanging Link Code Word (FLP).
+
+VERSION 1.2 <2002/11/30>
+
+ - Large style cleanup
+ - Use ether_crc in stock kernel (linux/crc32.h)
+ - Copy mc_filter setup code from 8139cp
+ (includes an optimization, and avoids set_bit use)
+
+VERSION 1.6LK <2004/04/14>
+
+ - Merge of Realtek's version 1.6
+ - Conversion to DMA API
+ - Suspend/resume
+ - Endianness
+ - Misc Rx/Tx bugs
+
+VERSION 2.2LK <2005/01/25>
+
+ - RX csum, TX csum/SG, TSO
+ - VLAN
+ - baby (< 7200) Jumbo frames support
+ - Merge of Realtek's version 2.2 (new phy)
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define RTL8169_VERSION "2.2LK"
+#define MODULENAME "r8169"
+#define PFX MODULENAME ": "
+
+#ifdef RTL8169_DEBUG
+#define assert(expr) \
+ if(!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ }
+#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
+#else
+#define assert(expr) do {} while (0)
+#define dprintk(fmt, args...) do {} while (0)
+#endif /* RTL8169_DEBUG */
+
+#define TX_BUFFS_AVAIL(tp) \
+ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+
+#ifdef CONFIG_R8169_NAPI
+#define rtl8169_rx_skb netif_receive_skb
+#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
+#define rtl8169_rx_quota(count, quota) min(count, quota)
+#else
+#define rtl8169_rx_skb netif_rx
+#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
+#define rtl8169_rx_quota(count, quota) count
+#endif
+
+/* media options */
+#define MAX_UNITS 8
+static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+static int num_media = 0;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* MAC address length */
+#define MAC_ADDR_LEN 6
+
+#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
+#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
+#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
+#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
+
+#define R8169_REGS_SIZE 256
+#define R8169_NAPI_WEIGHT 64
+#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
+#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
+#define RX_BUF_SIZE 1536 /* Rx Buffer size */
+#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
+#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+
+#define RTL8169_TX_TIMEOUT (6*HZ)
+#define RTL8169_PHY_TIMEOUT (10*HZ)
+
+/* write/read MMIO register */
+#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
+#define RTL_R8(reg) readb (ioaddr + (reg))
+#define RTL_R16(reg) readw (ioaddr + (reg))
+#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+
+enum mac_version {
+ RTL_GIGA_MAC_VER_B = 0x00,
+ /* RTL_GIGA_MAC_VER_C = 0x03, */
+ RTL_GIGA_MAC_VER_D = 0x01,
+ RTL_GIGA_MAC_VER_E = 0x02,
+ RTL_GIGA_MAC_VER_X = 0x04 /* Greater than RTL_GIGA_MAC_VER_E */
+};
+
+enum phy_version {
+ RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
+ RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
+ RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
+ RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
+ RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
+ RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
+};
+
+
+#define _R(NAME,MAC,MASK) \
+ { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
+
+const static struct {
+ const char *name;
+ u8 mac_version;
+ u32 RxConfigMask; /* Clears the bits supported by this chip */
+} rtl_chip_info[] = {
+ _R("RTL8169", RTL_GIGA_MAC_VER_B, 0xff7e1880),
+ _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_D, 0xff7e1880),
+ _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_E, 0xff7e1880),
+ _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_X, 0xff7e1880),
+};
+#undef _R
+
+static struct pci_device_id rtl8169_pci_tbl[] = {
+ {0x10ec, 0x8169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x1186, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
+
+static int rx_copybreak = 200;
+static int use_dac;
+
+enum RTL8169_registers {
+ MAC0 = 0, /* Ethernet hardware address. */
+ MAR0 = 8, /* Multicast filter. */
+ TxDescStartAddrLow = 0x20,
+ TxDescStartAddrHigh = 0x24,
+ TxHDescStartAddrLow = 0x28,
+ TxHDescStartAddrHigh = 0x2c,
+ FLASH = 0x30,
+ ERSR = 0x36,
+ ChipCmd = 0x37,
+ TxPoll = 0x38,
+ IntrMask = 0x3C,
+ IntrStatus = 0x3E,
+ TxConfig = 0x40,
+ RxConfig = 0x44,
+ RxMissed = 0x4C,
+ Cfg9346 = 0x50,
+ Config0 = 0x51,
+ Config1 = 0x52,
+ Config2 = 0x53,
+ Config3 = 0x54,
+ Config4 = 0x55,
+ Config5 = 0x56,
+ MultiIntr = 0x5C,
+ PHYAR = 0x60,
+ TBICSR = 0x64,
+ TBI_ANAR = 0x68,
+ TBI_LPAR = 0x6A,
+ PHYstatus = 0x6C,
+ RxMaxSize = 0xDA,
+ CPlusCmd = 0xE0,
+ IntrMitigate = 0xE2,
+ RxDescAddrLow = 0xE4,
+ RxDescAddrHigh = 0xE8,
+ EarlyTxThres = 0xEC,
+ FuncEvent = 0xF0,
+ FuncEventMask = 0xF4,
+ FuncPresetState = 0xF8,
+ FuncForceEvent = 0xFC,
+};
+
+enum RTL8169_register_content {
+ /* InterruptStatusBits */
+ SYSErr = 0x8000,
+ PCSTimeout = 0x4000,
+ SWInt = 0x0100,
+ TxDescUnavail = 0x80,
+ RxFIFOOver = 0x40,
+ LinkChg = 0x20,
+ RxOverflow = 0x10,
+ TxErr = 0x08,
+ TxOK = 0x04,
+ RxErr = 0x02,
+ RxOK = 0x01,
+
+ /* RxStatusDesc */
+ RxRES = 0x00200000,
+ RxCRC = 0x00080000,
+ RxRUNT = 0x00100000,
+ RxRWT = 0x00400000,
+
+ /* ChipCmdBits */
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08,
+ CmdTxEnb = 0x04,
+ RxBufEmpty = 0x01,
+
+ /* Cfg9346Bits */
+ Cfg9346_Lock = 0x00,
+ Cfg9346_Unlock = 0xC0,
+
+ /* rx_mode_bits */
+ AcceptErr = 0x20,
+ AcceptRunt = 0x10,
+ AcceptBroadcast = 0x08,
+ AcceptMulticast = 0x04,
+ AcceptMyPhys = 0x02,
+ AcceptAllPhys = 0x01,
+
+ /* RxConfigBits */
+ RxCfgFIFOShift = 13,
+ RxCfgDMAShift = 8,
+
+ /* TxConfigBits */
+ TxInterFrameGapShift = 24,
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ /* TBICSR p.28 */
+ TBIReset = 0x80000000,
+ TBILoopback = 0x40000000,
+ TBINwEnable = 0x20000000,
+ TBINwRestart = 0x10000000,
+ TBILinkOk = 0x02000000,
+ TBINwComplete = 0x01000000,
+
+ /* CPlusCmd p.31 */
+ RxVlan = (1 << 6),
+ RxChkSum = (1 << 5),
+ PCIDAC = (1 << 4),
+ PCIMulRW = (1 << 3),
+
+ /* rtl8169_PHYstatus */
+ TBI_Enable = 0x80,
+ TxFlowCtrl = 0x40,
+ RxFlowCtrl = 0x20,
+ _1000bpsF = 0x10,
+ _100bps = 0x08,
+ _10bps = 0x04,
+ LinkStatus = 0x02,
+ FullDup = 0x01,
+
+ /* GIGABIT_PHY_registers */
+ PHY_CTRL_REG = 0,
+ PHY_STAT_REG = 1,
+ PHY_AUTO_NEGO_REG = 4,
+ PHY_1000_CTRL_REG = 9,
+
+ /* GIGABIT_PHY_REG_BIT */
+ PHY_Restart_Auto_Nego = 0x0200,
+ PHY_Enable_Auto_Nego = 0x1000,
+
+ /* PHY_STAT_REG = 1 */
+ PHY_Auto_Neco_Comp = 0x0020,
+
+ /* PHY_AUTO_NEGO_REG = 4 */
+ PHY_Cap_10_Half = 0x0020,
+ PHY_Cap_10_Full = 0x0040,
+ PHY_Cap_100_Half = 0x0080,
+ PHY_Cap_100_Full = 0x0100,
+
+ /* PHY_1000_CTRL_REG = 9 */
+ PHY_Cap_1000_Full = 0x0200,
+
+ PHY_Cap_Null = 0x0,
+
+ /* _MediaType */
+ _10_Half = 0x01,
+ _10_Full = 0x02,
+ _100_Half = 0x04,
+ _100_Full = 0x08,
+ _1000_Full = 0x10,
+
+ /* _TBICSRBit */
+ TBILinkOK = 0x02000000,
+};
+
+enum _DescStatusBit {
+ DescOwn = (1 << 31), /* Descriptor is owned by NIC */
+ RingEnd = (1 << 30), /* End of descriptor ring */
+ FirstFrag = (1 << 29), /* First segment of a packet */
+ LastFrag = (1 << 28), /* Final segment of a packet */
+
+ /* Tx private */
+ LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
+ MSSShift = 16, /* MSS value position */
+ MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
+ IPCS = (1 << 18), /* Calculate IP checksum */
+ UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
+ TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
+ TxVlanTag = (1 << 17), /* Add VLAN tag */
+
+ /* Rx private */
+ PID1 = (1 << 18), /* Protocol ID bit 1/2 */
+ PID0 = (1 << 17), /* Protocol ID bit 2/2 */
+
+#define RxProtoUDP (PID1)
+#define RxProtoTCP (PID0)
+#define RxProtoIP (PID1 | PID0)
+#define RxProtoMask RxProtoIP
+
+ IPFail = (1 << 16), /* IP checksum failed */
+ UDPFail = (1 << 15), /* UDP/IP checksum failed */
+ TCPFail = (1 << 14), /* TCP/IP checksum failed */
+ RxVlanTag = (1 << 16), /* VLAN tag available */
+};
+
+#define RsvdMask 0x3fffc000
+
+struct TxDesc {
+ u32 opts1;
+ u32 opts2;
+ u64 addr;
+};
+
+struct RxDesc {
+ u32 opts1;
+ u32 opts2;
+ u64 addr;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ u32 len;
+ u8 __pad[sizeof(void *) - sizeof(u32)];
+};
+
+struct rtl8169_private {
+ void __iomem *mmio_addr; /* memory map physical address */
+ struct pci_dev *pci_dev; /* Index of PCI device */
+ struct net_device_stats stats; /* statistics of net device */
+ spinlock_t lock; /* spin lock flag */
+ int chipset;
+ int mac_version;
+ int phy_version;
+ u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
+ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
+ u32 dirty_rx;
+ u32 dirty_tx;
+ struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
+ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
+ dma_addr_t TxPhyAddr;
+ dma_addr_t RxPhyAddr;
+ struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
+ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
+ unsigned rx_buf_sz;
+ struct timer_list timer;
+ u16 cp_cmd;
+ u16 intr_mask;
+ int phy_auto_nego_reg;
+ int phy_1000_ctrl_reg;
+#ifdef CONFIG_R8169_VLAN
+ struct vlan_group *vlgrp;
+#endif
+ int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
+ void (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*phy_reset_enable)(void __iomem *);
+ unsigned int (*phy_reset_pending)(void __iomem *);
+ unsigned int (*link_ok)(void __iomem *);
+ struct work_struct task;
+};
+
+MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@oss.sgi.com>");
+MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
+module_param_array(media, int, &num_media, 0);
+module_param(rx_copybreak, int, 0);
+module_param(use_dac, int, 0);
+MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RTL8169_VERSION);
+
+static int rtl8169_open(struct net_device *dev);
+static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance,
+ struct pt_regs *regs);
+static int rtl8169_init_ring(struct net_device *dev);
+static void rtl8169_hw_start(struct net_device *dev);
+static int rtl8169_close(struct net_device *dev);
+static void rtl8169_set_rx_mode(struct net_device *dev);
+static void rtl8169_tx_timeout(struct net_device *dev);
+static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev);
+static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
+ void __iomem *);
+static int rtl8169_change_mtu(struct net_device *netdev, int new_mtu);
+static void rtl8169_down(struct net_device *dev);
+
+#ifdef CONFIG_R8169_NAPI
+static int rtl8169_poll(struct net_device *dev, int *budget);
+#endif
+
+static const u16 rtl8169_intr_mask =
+ SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
+static const u16 rtl8169_napi_event =
+ RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
+static const unsigned int rtl8169_rx_config =
+ (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
+
+#define PHY_Cap_10_Half_Or_Less PHY_Cap_10_Half
+#define PHY_Cap_10_Full_Or_Less PHY_Cap_10_Full | PHY_Cap_10_Half_Or_Less
+#define PHY_Cap_100_Half_Or_Less PHY_Cap_100_Half | PHY_Cap_10_Full_Or_Less
+#define PHY_Cap_100_Full_Or_Less PHY_Cap_100_Full | PHY_Cap_100_Half_Or_Less
+
+static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
+{
+ int i;
+
+ RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
+ udelay(1000);
+
+ for (i = 2000; i > 0; i--) {
+ /* Check if the RTL8169 has completed writing to the specified MII register */
+ if (!(RTL_R32(PHYAR) & 0x80000000))
+ break;
+ udelay(100);
+ }
+}
+
+static int mdio_read(void __iomem *ioaddr, int RegAddr)
+{
+ int i, value = -1;
+
+ RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
+ udelay(1000);
+
+ for (i = 2000; i > 0; i--) {
+ /* Check if the RTL8169 has completed retrieving data from the specified MII register */
+ if (RTL_R32(PHYAR) & 0x80000000) {
+ value = (int) (RTL_R32(PHYAR) & 0xFFFF);
+ break;
+ }
+ udelay(100);
+ }
+ return value;
+}
+
+static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+{
+ RTL_W16(IntrMask, 0x0000);
+
+ RTL_W16(IntrStatus, 0xffff);
+}
+
+static void rtl8169_asic_down(void __iomem *ioaddr)
+{
+ RTL_W8(ChipCmd, 0x00);
+ rtl8169_irq_mask_and_ack(ioaddr);
+ RTL_R16(CPlusCmd);
+}
+
+static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
+{
+ return RTL_R32(TBICSR) & TBIReset;
+}
+
+static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
+{
+ return mdio_read(ioaddr, 0) & 0x8000;
+}
+
+static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
+{
+ return RTL_R32(TBICSR) & TBILinkOk;
+}
+
+static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
+{
+ return RTL_R8(PHYstatus) & LinkStatus;
+}
+
+static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
+{
+ RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
+}
+
+static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
+{
+ unsigned int val;
+
+ val = (mdio_read(ioaddr, PHY_CTRL_REG) | 0x8000) & 0xffff;
+ mdio_write(ioaddr, PHY_CTRL_REG, val);
+}
+
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp, void __iomem *ioaddr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (tp->link_ok(ioaddr)) {
+ netif_carrier_on(dev);
+ printk(KERN_INFO PFX "%s: link up\n", dev->name);
+ } else
+ netif_carrier_off(dev);
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
+{
+ struct {
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 media;
+ } link_settings[] = {
+ { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
+ { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
+ { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
+ { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
+ { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
+ /* Make TBI happy */
+ { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
+ }, *p;
+ unsigned char option;
+
+ option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
+
+ if ((option != 0xff) && !idx)
+ printk(KERN_WARNING PFX "media option is deprecated.\n");
+
+ for (p = link_settings; p->media != 0xff; p++) {
+ if (p->media == option)
+ break;
+ }
+ *autoneg = p->autoneg;
+ *speed = p->speed;
+ *duplex = p->duplex;
+}
+
+static void rtl8169_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ strcpy(info->driver, MODULENAME);
+ strcpy(info->version, RTL8169_VERSION);
+ strcpy(info->bus_info, pci_name(tp->pci_dev));
+}
+
+static int rtl8169_get_regs_len(struct net_device *dev)
+{
+ return R8169_REGS_SIZE;
+}
+
+static int rtl8169_set_speed_tbi(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int ret = 0;
+ u32 reg;
+
+ reg = RTL_R32(TBICSR);
+ if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
+ (duplex == DUPLEX_FULL)) {
+ RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
+ } else if (autoneg == AUTONEG_ENABLE)
+ RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
+ else {
+ printk(KERN_WARNING PFX
+ "%s: incorrect speed setting refused in TBI mode\n",
+ dev->name);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtl8169_set_speed_xmii(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int auto_nego, giga_ctrl;
+
+ auto_nego = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
+ auto_nego &= ~(PHY_Cap_10_Half | PHY_Cap_10_Full |
+ PHY_Cap_100_Half | PHY_Cap_100_Full);
+ giga_ctrl = mdio_read(ioaddr, PHY_1000_CTRL_REG);
+ giga_ctrl &= ~(PHY_Cap_1000_Full | PHY_Cap_Null);
+
+ if (autoneg == AUTONEG_ENABLE) {
+ auto_nego |= (PHY_Cap_10_Half | PHY_Cap_10_Full |
+ PHY_Cap_100_Half | PHY_Cap_100_Full);
+ giga_ctrl |= PHY_Cap_1000_Full;
+ } else {
+ if (speed == SPEED_10)
+ auto_nego |= PHY_Cap_10_Half | PHY_Cap_10_Full;
+ else if (speed == SPEED_100)
+ auto_nego |= PHY_Cap_100_Half | PHY_Cap_100_Full;
+ else if (speed == SPEED_1000)
+ giga_ctrl |= PHY_Cap_1000_Full;
+
+ if (duplex == DUPLEX_HALF)
+ auto_nego &= ~(PHY_Cap_10_Full | PHY_Cap_100_Full);
+ }
+
+ tp->phy_auto_nego_reg = auto_nego;
+ tp->phy_1000_ctrl_reg = giga_ctrl;
+
+ mdio_write(ioaddr, PHY_AUTO_NEGO_REG, auto_nego);
+ mdio_write(ioaddr, PHY_1000_CTRL_REG, giga_ctrl);
+ mdio_write(ioaddr, PHY_CTRL_REG, PHY_Enable_Auto_Nego |
+ PHY_Restart_Auto_Nego);
+ return 0;
+}
+
+static int rtl8169_set_speed(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
+
+ ret = tp->set_speed(dev, autoneg, speed, duplex);
+
+ if (netif_running(dev) && (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
+ mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
+
+ return ret;
+}
+
+static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return ret;
+}
+
+static u32 rtl8169_get_rx_csum(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return tp->cp_cmd & RxChkSum;
+}
+
+static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ if (data)
+ tp->cp_cmd |= RxChkSum;
+ else
+ tp->cp_cmd &= ~RxChkSum;
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_R8169_VLAN
+
+static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
+ TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+}
+
+static void rtl8169_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ tp->vlgrp = grp;
+ if (tp->vlgrp)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (tp->vlgrp)
+ tp->vlgrp->vlan_devices[vid] = NULL;
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
+ struct sk_buff *skb)
+{
+ u32 opts2 = le32_to_cpu(desc->opts2);
+ int ret;
+
+ if (tp->vlgrp && (opts2 & RxVlanTag)) {
+ rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
+ swab16(opts2 & 0xffff));
+ ret = 0;
+ } else
+ ret = -1;
+ desc->opts2 = 0;
+ return ret;
+}
+
+#else /* !CONFIG_R8169_VLAN */
+
+static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
+ struct sk_buff *skb)
+{
+ return -1;
+}
+
+#endif
+
+static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 status;
+
+ cmd->supported =
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ status = RTL_R32(TBICSR);
+ cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
+ cmd->autoneg = !!(status & TBINwEnable);
+
+ cmd->speed = SPEED_1000;
+ cmd->duplex = DUPLEX_FULL; /* Always set */
+}
+
+static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 status;
+
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP;
+
+ cmd->autoneg = 1;
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+
+ if (tp->phy_auto_nego_reg & PHY_Cap_10_Half)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (tp->phy_auto_nego_reg & PHY_Cap_10_Full)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (tp->phy_auto_nego_reg & PHY_Cap_100_Half)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (tp->phy_auto_nego_reg & PHY_Cap_100_Full)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full)
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+
+ status = RTL_R8(PHYstatus);
+
+ if (status & _1000bpsF)
+ cmd->speed = SPEED_1000;
+ else if (status & _100bps)
+ cmd->speed = SPEED_100;
+ else if (status & _10bps)
+ cmd->speed = SPEED_10;
+
+ cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+}
+
+static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ tp->get_settings(dev, cmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ return 0;
+}
+
+static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (regs->len > R8169_REGS_SIZE)
+ regs->len = R8169_REGS_SIZE;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ memcpy_fromio(p, tp->mmio_addr, regs->len);
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static struct ethtool_ops rtl8169_ethtool_ops = {
+ .get_drvinfo = rtl8169_get_drvinfo,
+ .get_regs_len = rtl8169_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_settings = rtl8169_get_settings,
+ .set_settings = rtl8169_set_settings,
+ .get_rx_csum = rtl8169_get_rx_csum,
+ .set_rx_csum = rtl8169_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+ .get_regs = rtl8169_get_regs,
+};
+
+static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
+ int bitval)
+{
+ int val;
+
+ val = mdio_read(ioaddr, reg);
+ val = (bitval == 1) ?
+ val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
+ mdio_write(ioaddr, reg, val & 0xffff);
+}
+
+static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
+{
+ const struct {
+ u32 mask;
+ int mac_version;
+ } mac_info[] = {
+ { 0x1 << 28, RTL_GIGA_MAC_VER_X },
+ { 0x1 << 26, RTL_GIGA_MAC_VER_E },
+ { 0x1 << 23, RTL_GIGA_MAC_VER_D },
+ { 0x00000000, RTL_GIGA_MAC_VER_B } /* Catch-all */
+ }, *p = mac_info;
+ u32 reg;
+
+ reg = RTL_R32(TxConfig) & 0x7c800000;
+ while ((reg & p->mask) != p->mask)
+ p++;
+ tp->mac_version = p->mac_version;
+}
+
+static void rtl8169_print_mac_version(struct rtl8169_private *tp)
+{
+ struct {
+ int version;
+ char *msg;
+ } mac_print[] = {
+ { RTL_GIGA_MAC_VER_E, "RTL_GIGA_MAC_VER_E" },
+ { RTL_GIGA_MAC_VER_D, "RTL_GIGA_MAC_VER_D" },
+ { RTL_GIGA_MAC_VER_B, "RTL_GIGA_MAC_VER_B" },
+ { 0, NULL }
+ }, *p;
+
+ for (p = mac_print; p->msg; p++) {
+ if (tp->mac_version == p->version) {
+ dprintk("mac_version == %s (%04d)\n", p->msg,
+ p->version);
+ return;
+ }
+ }
+ dprintk("mac_version == Unknown\n");
+}
+
+static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
+{
+ const struct {
+ u16 mask;
+ u16 set;
+ int phy_version;
+ } phy_info[] = {
+ { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
+ { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
+ { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
+ { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
+ }, *p = phy_info;
+ u16 reg;
+
+ reg = mdio_read(ioaddr, 3) & 0xffff;
+ while ((reg & p->mask) != p->set)
+ p++;
+ tp->phy_version = p->phy_version;
+}
+
+static void rtl8169_print_phy_version(struct rtl8169_private *tp)
+{
+ struct {
+ int version;
+ char *msg;
+ u32 reg;
+ } phy_print[] = {
+ { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
+ { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
+ { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
+ { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
+ { 0, NULL, 0x0000 }
+ }, *p;
+
+ for (p = phy_print; p->msg; p++) {
+ if (tp->phy_version == p->version) {
+ dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
+ return;
+ }
+ }
+ dprintk("phy_version == Unknown\n");
+}
+
+static void rtl8169_hw_phy_config(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct {
+ u16 regs[5]; /* Beware of bit-sign propagation */
+ } phy_magic[5] = { {
+ { 0x0000, //w 4 15 12 0
+ 0x00a1, //w 3 15 0 00a1
+ 0x0008, //w 2 15 0 0008
+ 0x1020, //w 1 15 0 1020
+ 0x1000 } },{ //w 0 15 0 1000
+ { 0x7000, //w 4 15 12 7
+ 0xff41, //w 3 15 0 ff41
+ 0xde60, //w 2 15 0 de60
+ 0x0140, //w 1 15 0 0140
+ 0x0077 } },{ //w 0 15 0 0077
+ { 0xa000, //w 4 15 12 a
+ 0xdf01, //w 3 15 0 df01
+ 0xdf20, //w 2 15 0 df20
+ 0xff95, //w 1 15 0 ff95
+ 0xfa00 } },{ //w 0 15 0 fa00
+ { 0xb000, //w 4 15 12 b
+ 0xff41, //w 3 15 0 ff41
+ 0xde20, //w 2 15 0 de20
+ 0x0140, //w 1 15 0 0140
+ 0x00bb } },{ //w 0 15 0 00bb
+ { 0xf000, //w 4 15 12 f
+ 0xdf01, //w 3 15 0 df01
+ 0xdf20, //w 2 15 0 df20
+ 0xff95, //w 1 15 0 ff95
+ 0xbf00 } //w 0 15 0 bf00
+ }
+ }, *p = phy_magic;
+ int i;
+
+ rtl8169_print_mac_version(tp);
+ rtl8169_print_phy_version(tp);
+
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_B)
+ return;
+ if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
+ return;
+
+ dprintk("MAC version != 0 && PHY version == 0 or 1\n");
+ dprintk("Do final_reg2.cfg\n");
+
+ /* Shazam ! */
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_X) {
+ mdio_write(ioaddr, 31, 0x0001);
+ mdio_write(ioaddr, 9, 0x273a);
+ mdio_write(ioaddr, 14, 0x7bfb);
+ mdio_write(ioaddr, 27, 0x841e);
+
+ mdio_write(ioaddr, 31, 0x0002);
+ mdio_write(ioaddr, 1, 0x90d0);
+ mdio_write(ioaddr, 31, 0x0000);
+ return;
+ }
+
+ /* phy config for RTL8169s mac_version C chip */
+ mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
+ mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
+ mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
+ rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
+
+ for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
+ int val, pos = 4;
+
+ val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
+ mdio_write(ioaddr, pos, val);
+ while (--pos >= 0)
+ mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
+ rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
+ rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
+ }
+ mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
+}
+
+static void rtl8169_phy_timer(unsigned long __opaque)
+{
+ struct net_device *dev = (struct net_device *)__opaque;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long timeout = RTL8169_PHY_TIMEOUT;
+
+ assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
+ assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
+
+ if (!(tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
+ return;
+
+ spin_lock_irq(&tp->lock);
+
+ if (tp->phy_reset_pending(ioaddr)) {
+ /*
+ * A busy loop could burn quite a few cycles on nowadays CPU.
+ * Let's delay the execution of the timer for a few ticks.
+ */
+ timeout = HZ/10;
+ goto out_mod_timer;
+ }
+
+ if (tp->link_ok(ioaddr))
+ goto out_unlock;
+
+ printk(KERN_WARNING PFX "%s: PHY reset until link up\n", dev->name);
+
+ tp->phy_reset_enable(ioaddr);
+
+out_mod_timer:
+ mod_timer(timer, jiffies + timeout);
+out_unlock:
+ spin_unlock_irq(&tp->lock);
+}
+
+static inline void rtl8169_delete_timer(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+
+ if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
+ (tp->phy_version >= RTL_GIGA_PHY_VER_H))
+ return;
+
+ del_timer_sync(timer);
+}
+
+static inline void rtl8169_request_timer(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+
+ if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
+ (tp->phy_version >= RTL_GIGA_PHY_VER_H))
+ return;
+
+ init_timer(timer);
+ timer->expires = jiffies + RTL8169_PHY_TIMEOUT;
+ timer->data = (unsigned long)(dev);
+ timer->function = rtl8169_phy_timer;
+ add_timer(timer);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void rtl8169_netpoll(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ disable_irq(pdev->irq);
+ rtl8169_interrupt(pdev->irq, dev, NULL);
+ enable_irq(pdev->irq);
+}
+#endif
+
+static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
+ void __iomem *ioaddr)
+{
+ iounmap(ioaddr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static int __devinit
+rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
+ void __iomem **ioaddr_out)
+{
+ void __iomem *ioaddr;
+ struct net_device *dev;
+ struct rtl8169_private *tp;
+ int rc = -ENOMEM, i, acpi_idle_state = 0, pm_cap;
+
+ assert(ioaddr_out != NULL);
+
+ /* dev zeroed in alloc_etherdev */
+ dev = alloc_etherdev(sizeof (*tp));
+ if (dev == NULL) {
+ printk(KERN_ERR PFX "unable to alloc new ethernet\n");
+ goto err_out;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ tp = netdev_priv(dev);
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ printk(KERN_ERR PFX "%s: enable failure\n", pci_name(pdev));
+ goto err_out_free_dev;
+ }
+
+ rc = pci_set_mwi(pdev);
+ if (rc < 0)
+ goto err_out_disable;
+
+ /* save power state before pci_enable_device overwrites it */
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap) {
+ u16 pwr_command;
+
+ pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
+ acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
+ } else {
+ printk(KERN_ERR PFX
+ "Cannot find PowerManagement capability, aborting.\n");
+ goto err_out_mwi;
+ }
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX
+ "region #1 not an MMIO resource, aborting\n");
+ rc = -ENODEV;
+ goto err_out_mwi;
+ }
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
+ printk(KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out_mwi;
+ }
+
+ rc = pci_request_regions(pdev, MODULENAME);
+ if (rc) {
+ printk(KERN_ERR PFX "%s: could not request regions.\n",
+ pci_name(pdev));
+ goto err_out_mwi;
+ }
+
+ tp->cp_cmd = PCIMulRW | RxChkSum;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc < 0) {
+ printk(KERN_ERR PFX "DMA configuration failed.\n");
+ goto err_out_free_res;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
+ if (ioaddr == NULL) {
+ printk(KERN_ERR PFX "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+ goto err_out_free_res;
+ }
+
+ /* Unneeded ? Don't mess with Mrs. Murphy. */
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+ /* Soft reset the chip. */
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--) {
+ if ((RTL_R8(ChipCmd) & CmdReset) == 0)
+ break;
+ udelay(10);
+ }
+
+ /* Identify chip attached to board */
+ rtl8169_get_mac_version(tp, ioaddr);
+ rtl8169_get_phy_version(tp, ioaddr);
+
+ rtl8169_print_mac_version(tp);
+ rtl8169_print_phy_version(tp);
+
+ for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
+ if (tp->mac_version == rtl_chip_info[i].mac_version)
+ break;
+ }
+ if (i < 0) {
+ /* Unknown chip: assume array element #0, original RTL-8169 */
+ printk(KERN_DEBUG PFX
+ "PCI device %s: unknown chip version, assuming %s\n",
+ pci_name(pdev), rtl_chip_info[0].name);
+ i++;
+ }
+ tp->chipset = i;
+
+ *ioaddr_out = ioaddr;
+ *dev_out = dev;
+out:
+ return rc;
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_mwi:
+ pci_clear_mwi(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+
+err_out_free_dev:
+ free_netdev(dev);
+err_out:
+ *ioaddr_out = NULL;
+ *dev_out = NULL;
+ goto out;
+}
+
+static int __devinit
+rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct rtl8169_private *tp;
+ void __iomem *ioaddr = NULL;
+ static int board_idx = -1;
+ static int printed_version = 0;
+ u8 autoneg, duplex;
+ u16 speed;
+ int i, rc;
+
+ assert(pdev != NULL);
+ assert(ent != NULL);
+
+ board_idx++;
+
+ if (!printed_version) {
+ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+ MODULENAME, RTL8169_VERSION);
+ printed_version = 1;
+ }
+
+ rc = rtl8169_init_board(pdev, &dev, &ioaddr);
+ if (rc)
+ return rc;
+
+ tp = netdev_priv(dev);
+ assert(ioaddr != NULL);
+
+ if (RTL_R8(PHYstatus) & TBI_Enable) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+ tp->phy_reset_enable = rtl8169_tbi_reset_enable;
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+
+ tp->phy_1000_ctrl_reg = PHY_Cap_1000_Full; /* Implied by TBI */
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
+ }
+
+ /* Get MAC address. FIXME: read EEPROM */
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ dev->dev_addr[i] = RTL_R8(MAC0 + i);
+
+ dev->open = rtl8169_open;
+ dev->hard_start_xmit = rtl8169_start_xmit;
+ dev->get_stats = rtl8169_get_stats;
+ SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->stop = rtl8169_close;
+ dev->tx_timeout = rtl8169_tx_timeout;
+ dev->set_multicast_list = rtl8169_set_rx_mode;
+ dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long) ioaddr;
+ dev->change_mtu = rtl8169_change_mtu;
+
+#ifdef CONFIG_R8169_NAPI
+ dev->poll = rtl8169_poll;
+ dev->weight = R8169_NAPI_WEIGHT;
+ printk(KERN_INFO PFX "NAPI enabled\n");
+#endif
+
+#ifdef CONFIG_R8169_VLAN
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_rx_register = rtl8169_vlan_rx_register;
+ dev->vlan_rx_kill_vid = rtl8169_vlan_rx_kill_vid;
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = rtl8169_netpoll;
+#endif
+
+ tp->intr_mask = 0xffff;
+ tp->pci_dev = pdev;
+ tp->mmio_addr = ioaddr;
+
+ spin_lock_init(&tp->lock);
+
+ rc = register_netdev(dev);
+ if (rc) {
+ rtl8169_release_board(pdev, dev, ioaddr);
+ return rc;
+ }
+
+ printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n", dev->name,
+ rtl_chip_info[tp->chipset].name);
+
+ pci_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "%s: %s at 0x%lx, "
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
+ "IRQ %d\n",
+ dev->name,
+ rtl_chip_info[ent->driver_data].name,
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5], dev->irq);
+
+ rtl8169_hw_phy_config(dev);
+
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+
+ if (tp->mac_version < RTL_GIGA_MAC_VER_E) {
+ dprintk("Set PCI Latency=0x40\n");
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+ dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
+ }
+
+ rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
+
+ rtl8169_set_speed(dev, autoneg, speed, duplex);
+
+ if (RTL_R8(PHYstatus) & TBI_Enable)
+ printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
+
+ return 0;
+}
+
+static void __devexit
+rtl8169_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ assert(dev != NULL);
+ assert(tp != NULL);
+
+ unregister_netdev(dev);
+ rtl8169_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int rtl8169_suspend(struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&tp->lock, flags);
+
+ /* Disable interrupts, stop Rx and Tx */
+ RTL_W16(IntrMask, 0);
+ RTL_W8(ChipCmd, 0);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += RTL_R32(RxMissed);
+ RTL_W32(RxMissed, 0);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return 0;
+}
+
+static int rtl8169_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_attach(dev);
+ rtl8169_hw_start(dev);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
+ struct net_device *dev)
+{
+ unsigned int mtu = dev->mtu;
+
+ tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
+}
+
+static int rtl8169_open(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+ int retval;
+
+ rtl8169_set_rxbufsize(tp, dev);
+
+ retval =
+ request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval < 0)
+ goto out;
+
+ retval = -ENOMEM;
+
+ /*
+ * Rx and Tx desscriptors needs 256 bytes alignment.
+ * pci_alloc_consistent provides more.
+ */
+ tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr);
+ if (!tp->TxDescArray)
+ goto err_free_irq;
+
+ tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr);
+ if (!tp->RxDescArray)
+ goto err_free_tx;
+
+ retval = rtl8169_init_ring(dev);
+ if (retval < 0)
+ goto err_free_rx;
+
+ INIT_WORK(&tp->task, NULL, dev);
+
+ rtl8169_hw_start(dev);
+
+ rtl8169_request_timer(dev);
+
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+out:
+ return retval;
+
+err_free_rx:
+ pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+err_free_tx:
+ pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+err_free_irq:
+ free_irq(dev->irq, dev);
+ goto out;
+}
+
+static void rtl8169_hw_reset(void __iomem *ioaddr)
+{
+ /* Disable interrupts */
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+ /* Reset the chipset */
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* PCI commit */
+ RTL_R8(ChipCmd);
+}
+
+static void
+rtl8169_hw_start(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 i;
+
+ /* Soft reset the chip. */
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--) {
+ if ((RTL_R8(ChipCmd) & CmdReset) == 0)
+ break;
+ udelay(10);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W8(EarlyTxThres, EarlyTxThld);
+
+ /* For gigabit rtl8169, MTU + header + CRC + VLAN */
+ RTL_W16(RxMaxSize, tp->rx_buf_sz);
+
+ /* Set Rx Config register */
+ i = rtl8169_rx_config |
+ (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ RTL_W32(RxConfig, i);
+
+ /* Set DMA burst size and Interframe Gap Time */
+ RTL_W32(TxConfig,
+ (TX_DMA_BURST << TxDMAShift) | (InterFrameGap <<
+ TxInterFrameGapShift));
+ tp->cp_cmd |= RTL_R16(CPlusCmd);
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_D) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_E)) {
+ dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
+ "Bit-3 and bit-14 MUST be 1\n");
+ tp->cp_cmd |= (1 << 14) | PCIMulRW;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ }
+
+ /*
+ * Undocumented corner. Supposedly:
+ * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
+ */
+ RTL_W16(IntrMitigate, 0x0000);
+
+ RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
+ RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
+ RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
+ RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+ udelay(10);
+
+ RTL_W32(RxMissed, 0);
+
+ rtl8169_set_rx_mode(dev);
+
+ /* no early-rx interrupts */
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ RTL_W16(IntrMask, rtl8169_intr_mask);
+
+ netif_start_queue(dev);
+}
+
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int ret = 0;
+
+ if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ if (!netif_running(dev))
+ goto out;
+
+ rtl8169_down(dev);
+
+ rtl8169_set_rxbufsize(tp, dev);
+
+ ret = rtl8169_init_ring(dev);
+ if (ret < 0)
+ goto out;
+
+ netif_poll_enable(dev);
+
+ rtl8169_hw_start(dev);
+
+ rtl8169_request_timer(dev);
+
+out:
+ return ret;
+}
+
+static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
+{
+ desc->addr = 0x0badbadbadbadbadull;
+ desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
+}
+
+static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
+ struct sk_buff **sk_buff, struct RxDesc *desc)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+
+ pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(*sk_buff);
+ *sk_buff = NULL;
+ rtl8169_make_unusable_by_asic(desc);
+}
+
+static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+{
+ u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+
+ desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
+}
+
+static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+{
+ desc->addr = cpu_to_le64(mapping);
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+}
+
+static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+ struct RxDesc *desc, int rx_buf_sz)
+{
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret = 0;
+
+ skb = dev_alloc_skb(rx_buf_sz + NET_IP_ALIGN);
+ if (!skb)
+ goto err_out;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ *sk_buff = skb;
+
+ mapping = pci_map_single(pdev, skb->tail, rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
+
+out:
+ return ret;
+
+err_out:
+ ret = -ENOMEM;
+ rtl8169_make_unusable_by_asic(desc);
+ goto out;
+}
+
+static void rtl8169_rx_clear(struct rtl8169_private *tp)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ if (tp->Rx_skbuff[i]) {
+ rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
+ tp->RxDescArray + i);
+ }
+ }
+}
+
+static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
+ u32 start, u32 end)
+{
+ u32 cur;
+
+ for (cur = start; end - cur > 0; cur++) {
+ int ret, i = cur % NUM_RX_DESC;
+
+ if (tp->Rx_skbuff[i])
+ continue;
+
+ ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
+ tp->RxDescArray + i, tp->rx_buf_sz);
+ if (ret < 0)
+ break;
+ }
+ return cur - start;
+}
+
+static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
+{
+ desc->opts1 |= cpu_to_le32(RingEnd);
+}
+
+static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+{
+ tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
+static int rtl8169_init_ring(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_init_ring_indexes(tp);
+
+ memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
+ memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
+
+ if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+ goto err_out;
+
+ rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+
+ return 0;
+
+err_out:
+ rtl8169_rx_clear(tp);
+ return -ENOMEM;
+}
+
+static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
+ struct TxDesc *desc)
+{
+ unsigned int len = tx_skb->len;
+
+ pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+ desc->opts1 = 0x00;
+ desc->opts2 = 0x00;
+ desc->addr = 0x00;
+ tx_skb->len = 0;
+}
+
+static void rtl8169_tx_clear(struct rtl8169_private *tp)
+{
+ unsigned int i;
+
+ for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
+ unsigned int entry = i % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ unsigned int len = tx_skb->len;
+
+ if (len) {
+ struct sk_buff *skb = tx_skb->skb;
+
+ rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (skb) {
+ dev_kfree_skb(skb);
+ tx_skb->skb = NULL;
+ }
+ tp->stats.tx_dropped++;
+ }
+ }
+ tp->cur_tx = tp->dirty_tx = 0;
+}
+
+static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ PREPARE_WORK(&tp->task, task, dev);
+ schedule_delayed_work(&tp->task, 4);
+}
+
+static void rtl8169_wait_for_quiescence(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ synchronize_irq(dev->irq);
+
+ /* Wait for any pending NAPI task to complete */
+ netif_poll_disable(dev);
+
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+ netif_poll_enable(dev);
+}
+
+static void rtl8169_reinit_task(void *_data)
+{
+ struct net_device *dev = _data;
+ int ret;
+
+ if (netif_running(dev)) {
+ rtl8169_wait_for_quiescence(dev);
+ rtl8169_close(dev);
+ }
+
+ ret = rtl8169_open(dev);
+ if (unlikely(ret < 0)) {
+ if (net_ratelimit()) {
+ printk(PFX KERN_ERR "%s: reinit failure (status = %d)."
+ " Rescheduling.\n", dev->name, ret);
+ }
+ rtl8169_schedule_work(dev, rtl8169_reinit_task);
+ }
+}
+
+static void rtl8169_reset_task(void *_data)
+{
+ struct net_device *dev = _data;
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ rtl8169_wait_for_quiescence(dev);
+
+ rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
+ rtl8169_tx_clear(tp);
+
+ if (tp->dirty_rx == tp->cur_rx) {
+ rtl8169_init_ring_indexes(tp);
+ rtl8169_hw_start(dev);
+ netif_wake_queue(dev);
+ } else {
+ if (net_ratelimit()) {
+ printk(PFX KERN_EMERG "%s: Rx buffers shortage\n",
+ dev->name);
+ }
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+ }
+}
+
+static void rtl8169_tx_timeout(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_hw_reset(tp->mmio_addr);
+
+ /* Let's wait a bit while any (async) irq lands on */
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
+static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
+ u32 opts1)
+{
+ struct skb_shared_info *info = skb_shinfo(skb);
+ unsigned int cur_frag, entry;
+ struct TxDesc *txd;
+
+ entry = tp->cur_tx;
+ for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
+ skb_frag_t *frag = info->frags + cur_frag;
+ dma_addr_t mapping;
+ u32 status, len;
+ void *addr;
+
+ entry = (entry + 1) % NUM_TX_DESC;
+
+ txd = tp->TxDescArray + entry;
+ len = frag->size;
+ addr = ((void *) page_address(frag->page)) + frag->page_offset;
+ mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
+
+ /* anti gcc 2.95.3 bugware (sic) */
+ status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
+
+ txd->opts1 = cpu_to_le32(status);
+ txd->addr = cpu_to_le64(mapping);
+
+ tp->tx_skb[entry].len = len;
+ }
+
+ if (cur_frag) {
+ tp->tx_skb[entry].skb = skb;
+ txd->opts1 |= cpu_to_le32(LastFrag);
+ }
+
+ return cur_frag;
+}
+
+static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
+{
+ if (dev->features & NETIF_F_TSO) {
+ u32 mss = skb_shinfo(skb)->tso_size;
+
+ if (mss)
+ return LargeSend | ((mss & MSSMask) << MSSShift);
+ }
+ if (skb->ip_summed == CHECKSUM_HW) {
+ const struct iphdr *ip = skb->nh.iph;
+
+ if (ip->protocol == IPPROTO_TCP)
+ return IPCS | TCPCS;
+ else if (ip->protocol == IPPROTO_UDP)
+ return IPCS | UDPCS;
+ WARN_ON(1); /* we need a WARN() */
+ }
+ return 0;
+}
+
+static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
+ struct TxDesc *txd = tp->TxDescArray + entry;
+ void __iomem *ioaddr = tp->mmio_addr;
+ dma_addr_t mapping;
+ u32 status, len;
+ u32 opts1;
+ int ret = 0;
+
+ if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
+ dev->name);
+ goto err_stop;
+ }
+
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop;
+
+ opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
+
+ frags = rtl8169_xmit_frags(tp, skb, opts1);
+ if (frags) {
+ len = skb_headlen(skb);
+ opts1 |= FirstFrag;
+ } else {
+ len = skb->len;
+
+ if (unlikely(len < ETH_ZLEN)) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (!skb)
+ goto err_update_stats;
+ len = ETH_ZLEN;
+ }
+
+ opts1 |= FirstFrag | LastFrag;
+ tp->tx_skb[entry].skb = skb;
+ }
+
+ mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+ tp->tx_skb[entry].len = len;
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+
+ wmb();
+
+ /* anti gcc 2.95.3 bugware (sic) */
+ status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
+ txd->opts1 = cpu_to_le32(status);
+
+ dev->trans_start = jiffies;
+
+ tp->cur_tx += frags + 1;
+
+ smp_wmb();
+
+ RTL_W8(TxPoll, 0x40); /* set polling bit */
+
+ if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ netif_stop_queue(dev);
+ smp_rmb();
+ if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+ netif_wake_queue(dev);
+ }
+
+out:
+ return ret;
+
+err_stop:
+ netif_stop_queue(dev);
+ ret = 1;
+err_update_stats:
+ tp->stats.tx_dropped++;
+ goto out;
+}
+
+static void rtl8169_pcierr_interrupt(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+ void __iomem *ioaddr = tp->mmio_addr;
+ u16 pci_status, pci_cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+ printk(KERN_ERR PFX "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
+ dev->name, pci_cmd, pci_status);
+
+ /*
+ * The recovery sequence below admits a very elaborated explanation:
+ * - it seems to work;
+ * - I did not see what else could be done.
+ *
+ * Feel free to adjust to your needs.
+ */
+ pci_write_config_word(pdev, PCI_COMMAND,
+ pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+ pci_write_config_word(pdev, PCI_STATUS,
+ pci_status & (PCI_STATUS_DETECTED_PARITY |
+ PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
+
+ /* The infamous DAC f*ckup only happens at boot time */
+ if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ printk(KERN_INFO PFX "%s: disabling PCI DAC.\n", dev->name);
+ tp->cp_cmd &= ~PCIDAC;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ dev->features &= ~NETIF_F_HIGHDMA;
+ rtl8169_schedule_work(dev, rtl8169_reinit_task);
+ }
+
+ rtl8169_hw_reset(ioaddr);
+}
+
+static void
+rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ unsigned int dirty_tx, tx_left;
+
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
+
+ dirty_tx = tp->dirty_tx;
+ smp_rmb();
+ tx_left = tp->cur_tx - dirty_tx;
+
+ while (tx_left > 0) {
+ unsigned int entry = dirty_tx % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ u32 len = tx_skb->len;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ if (status & DescOwn)
+ break;
+
+ tp->stats.tx_bytes += len;
+ tp->stats.tx_packets++;
+
+ rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
+
+ if (status & LastFrag) {
+ dev_kfree_skb_irq(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+ dirty_tx++;
+ tx_left--;
+ }
+
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ smp_wmb();
+ if (netif_queue_stopped(dev) &&
+ (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
+{
+ u32 opts1 = le32_to_cpu(desc->opts1);
+ u32 status = opts1 & RxProtoMask;
+
+ if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
+ ((status == RxProtoIP) && !(opts1 & IPFail)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+ struct RxDesc *desc, int rx_buf_sz)
+{
+ int ret = -1;
+
+ if (pkt_size < rx_copybreak) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
+ if (skb) {
+ skb_reserve(skb, NET_IP_ALIGN);
+ eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
+ *sk_buff = skb;
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+static int
+rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ unsigned int cur_rx, rx_left;
+ unsigned int delta, count;
+
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
+
+ cur_rx = tp->cur_rx;
+ rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+ rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
+
+ while (rx_left > 0) {
+ unsigned int entry = cur_rx % NUM_RX_DESC;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(tp->RxDescArray[entry].opts1);
+
+ if (status & DescOwn)
+ break;
+ if (status & RxRES) {
+ printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
+ tp->stats.rx_errors++;
+ if (status & (RxRWT | RxRUNT))
+ tp->stats.rx_length_errors++;
+ if (status & RxCRC)
+ tp->stats.rx_crc_errors++;
+ } else {
+ struct RxDesc *desc = tp->RxDescArray + entry;
+ struct sk_buff *skb = tp->Rx_skbuff[entry];
+ int pkt_size = (status & 0x00001FFF) - 4;
+ void (*pci_action)(struct pci_dev *, dma_addr_t,
+ size_t, int) = pci_dma_sync_single_for_device;
+
+ rtl8169_rx_csum(skb, desc);
+
+ pci_dma_sync_single_for_cpu(tp->pci_dev,
+ le64_to_cpu(desc->addr), tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
+ tp->rx_buf_sz)) {
+ pci_action = pci_unmap_single;
+ tp->Rx_skbuff[entry] = NULL;
+ }
+
+ pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
+ tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ skb->dev = dev;
+ skb_put(skb, pkt_size);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
+ rtl8169_rx_skb(skb);
+
+ dev->last_rx = jiffies;
+ tp->stats.rx_bytes += pkt_size;
+ tp->stats.rx_packets++;
+ }
+
+ cur_rx++;
+ rx_left--;
+ }
+
+ count = cur_rx - tp->cur_rx;
+ tp->cur_rx = cur_rx;
+
+ delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+ if (!delta && count)
+ printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+ tp->dirty_rx += delta;
+
+ /*
+ * FIXME: until there is periodic timer to try and refill the ring,
+ * a temporary shortage may definitely kill the Rx process.
+ * - disable the asic to try and avoid an overflow and kick it again
+ * after refill ?
+ * - how do others driver handle this condition (Uh oh...).
+ */
+ if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
+ printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
+
+ return count;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
+static irqreturn_t
+rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int boguscnt = max_interrupt_work;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int status;
+ int handled = 0;
+
+ do {
+ status = RTL_R16(IntrStatus);
+
+ /* hotplug/major error/no more work/shared irq */
+ if ((status == 0xFFFF) || !status)
+ break;
+
+ handled = 1;
+
+ if (unlikely(!netif_running(dev))) {
+ rtl8169_asic_down(ioaddr);
+ goto out;
+ }
+
+ status &= tp->intr_mask;
+ RTL_W16(IntrStatus,
+ (status & RxFIFOOver) ? (status | RxOverflow) : status);
+
+ if (!(status & rtl8169_intr_mask))
+ break;
+
+ if (unlikely(status & SYSErr)) {
+ rtl8169_pcierr_interrupt(dev);
+ break;
+ }
+
+ if (status & LinkChg)
+ rtl8169_check_link_status(dev, tp, ioaddr);
+
+#ifdef CONFIG_R8169_NAPI
+ RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
+ tp->intr_mask = ~rtl8169_napi_event;
+
+ if (likely(netif_rx_schedule_prep(dev)))
+ __netif_rx_schedule(dev);
+ else {
+ printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
+ dev->name, status);
+ }
+ break;
+#else
+ /* Rx interrupt */
+ if (status & (RxOK | RxOverflow | RxFIFOOver)) {
+ rtl8169_rx_interrupt(dev, tp, ioaddr);
+ }
+ /* Tx interrupt */
+ if (status & (TxOK | TxErr))
+ rtl8169_tx_interrupt(dev, tp, ioaddr);
+#endif
+
+ boguscnt--;
+ } while (boguscnt > 0);
+
+ if (boguscnt <= 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt!\n",
+ dev->name);
+ /* Clear all interrupt sources. */
+ RTL_W16(IntrStatus, 0xffff);
+ }
+out:
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_R8169_NAPI
+static int rtl8169_poll(struct net_device *dev, int *budget)
+{
+ unsigned int work_done, work_to_do = min(*budget, dev->quota);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
+ rtl8169_tx_interrupt(dev, tp, ioaddr);
+
+ *budget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done < work_to_do) {
+ netif_rx_complete(dev);
+ tp->intr_mask = 0xffff;
+ /*
+ * 20040426: the barrier is not strictly required but the
+ * behavior of the irq handler could be less predictable
+ * without it. Btw, the lack of flush for the posted pci
+ * write is safe - FR
+ */
+ smp_wmb();
+ RTL_W16(IntrMask, rtl8169_intr_mask);
+ }
+
+ return (work_done >= work_to_do);
+}
+#endif
+
+static void rtl8169_down(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int poll_locked = 0;
+
+ rtl8169_delete_timer(dev);
+
+ netif_stop_queue(dev);
+
+ flush_scheduled_work();
+
+core_down:
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_asic_down(ioaddr);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += RTL_R32(RxMissed);
+ RTL_W32(RxMissed, 0);
+
+ spin_unlock_irq(&tp->lock);
+
+ synchronize_irq(dev->irq);
+
+ if (!poll_locked) {
+ netif_poll_disable(dev);
+ poll_locked++;
+ }
+
+ /* Give a racing hard_start_xmit a few cycles to complete. */
+ synchronize_kernel();
+
+ /*
+ * And now for the 50k$ question: are IRQ disabled or not ?
+ *
+ * Two paths lead here:
+ * 1) dev->close
+ * -> netif_running() is available to sync the current code and the
+ * IRQ handler. See rtl8169_interrupt for details.
+ * 2) dev->change_mtu
+ * -> rtl8169_poll can not be issued again and re-enable the
+ * interruptions. Let's simply issue the IRQ down sequence again.
+ */
+ if (RTL_R16(IntrMask))
+ goto core_down;
+
+ rtl8169_tx_clear(tp);
+
+ rtl8169_rx_clear(tp);
+}
+
+static int rtl8169_close(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ rtl8169_down(dev);
+
+ free_irq(dev->irq, dev);
+
+ netif_poll_enable(dev);
+
+ pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+ tp->RxDescArray = NULL;
+
+ return 0;
+}
+
+static void
+rtl8169_set_rx_mode(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int i, rx_mode;
+ u32 tmp = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ tmp = rtl8169_rx_config | rx_mode |
+ (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+
+ RTL_W32(RxConfig, tmp);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+/**
+ * rtl8169_get_stats - Get rtl8169 read/write statistics
+ * @dev: The Ethernet Device to get statistics for
+ *
+ * Get TX/RX statistics for rtl8169
+ */
+static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&tp->lock, flags);
+ tp->stats.rx_missed_errors += RTL_R32(RxMissed);
+ RTL_W32(RxMissed, 0);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ return &tp->stats;
+}
+
+static struct pci_driver rtl8169_pci_driver = {
+ .name = MODULENAME,
+ .id_table = rtl8169_pci_tbl,
+ .probe = rtl8169_init_one,
+ .remove = __devexit_p(rtl8169_remove_one),
+#ifdef CONFIG_PM
+ .suspend = rtl8169_suspend,
+ .resume = rtl8169_resume,
+#endif
+};
+
+static int __init
+rtl8169_init_module(void)
+{
+ return pci_module_init(&rtl8169_pci_driver);
+}
+
+static void __exit
+rtl8169_cleanup_module(void)
+{
+ pci_unregister_driver(&rtl8169_pci_driver);
+}
+
+module_init(rtl8169_init_module);
+module_exit(rtl8169_cleanup_module);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
new file mode 100644
index 000000000000..12a86f96d973
--- /dev/null
+++ b/drivers/net/rrunner.c
@@ -0,0 +1,1756 @@
+/*
+ * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
+ *
+ * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
+ *
+ * Thanks to Essential Communication for providing us with hardware
+ * and very comprehensive documentation without which I would not have
+ * been able to write this driver. A special thank you to John Gibbon
+ * for sorting out the legal issues, with the NDA, allowing the code to
+ * be released under the GPL.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
+ * stupid bugs in my code.
+ *
+ * Softnet support and various other patches from Val Henson of
+ * ODS/Essential.
+ *
+ * PCI DMA mapping code partly based on work by Francois Romieu.
+ */
+
+
+#define DEBUG 1
+#define RX_DMA_SKBUFF 1
+#define PKT_COPY_THRESHOLD 512
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/hippidevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <net/sock.h>
+
+#include <asm/system.h>
+#include <asm/cache.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define rr_if_busy(dev) netif_queue_stopped(dev)
+#define rr_if_running(dev) netif_running(dev)
+
+#include "rrunner.h"
+
+#define RUN_AT(x) (jiffies + (x))
+
+
+MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
+MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
+MODULE_LICENSE("GPL");
+
+static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
+
+/*
+ * Implementation notes:
+ *
+ * The DMA engine only allows for DMA within physical 64KB chunks of
+ * memory. The current approach of the driver (and stack) is to use
+ * linear blocks of memory for the skbuffs. However, as the data block
+ * is always the first part of the skb and skbs are 2^n aligned so we
+ * are guarantted to get the whole block within one 64KB align 64KB
+ * chunk.
+ *
+ * On the long term, relying on being able to allocate 64KB linear
+ * chunks of memory is not feasible and the skb handling code and the
+ * stack will need to know about I/O vectors or something similar.
+ */
+
+/*
+ * These are checked at init time to see if they are at least 256KB
+ * and increased to 256KB if they are not. This is done to avoid ending
+ * up with socket buffers smaller than the MTU size,
+ */
+extern __u32 sysctl_wmem_max;
+extern __u32 sysctl_rmem_max;
+
+static int __devinit rr_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ static int version_disp;
+ u8 pci_latency;
+ struct rr_private *rrpriv;
+ void *tmpptr;
+ dma_addr_t ring_dma;
+ int ret = -ENOMEM;
+
+ dev = alloc_hippi_dev(sizeof(struct rr_private));
+ if (!dev)
+ goto out3;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ rrpriv = netdev_priv(dev);
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_request_regions(pdev, "rrunner")) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ rrpriv->pci_dev = pdev;
+
+ spin_lock_init(&rrpriv->lock);
+
+ dev->irq = pdev->irq;
+ dev->open = &rr_open;
+ dev->hard_start_xmit = &rr_start_xmit;
+ dev->stop = &rr_close;
+ dev->get_stats = &rr_get_stats;
+ dev->do_ioctl = &rr_ioctl;
+
+ dev->base_addr = pci_resource_start(pdev, 0);
+
+ /* display version info if adapter is found */
+ if (!version_disp) {
+ /* set display flag to TRUE so that */
+ /* we only display this string ONCE */
+ version_disp = 1;
+ printk(version);
+ }
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency <= 0x58){
+ pci_latency = 0x58;
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
+ }
+
+ pci_set_master(pdev);
+
+ printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
+ "at 0x%08lx, irq %i, PCI latency %i\n", dev->name,
+ dev->base_addr, dev->irq, pci_latency);
+
+ /*
+ * Remap the regs into kernel space.
+ */
+
+ rrpriv->regs = ioremap(dev->base_addr, 0x1000);
+
+ if (!rrpriv->regs){
+ printk(KERN_ERR "%s: Unable to map I/O register, "
+ "RoadRunner will be disabled.\n", dev->name);
+ ret = -EIO;
+ goto out;
+ }
+
+ tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ rrpriv->tx_ring = tmpptr;
+ rrpriv->tx_ring_dma = ring_dma;
+
+ if (!tmpptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ rrpriv->rx_ring = tmpptr;
+ rrpriv->rx_ring_dma = ring_dma;
+
+ if (!tmpptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
+ rrpriv->evt_ring = tmpptr;
+ rrpriv->evt_ring_dma = ring_dma;
+
+ if (!tmpptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Don't access any register before this point!
+ */
+#ifdef __BIG_ENDIAN
+ writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
+ &rrpriv->regs->HostCtrl);
+#endif
+ /*
+ * Need to add a case for little-endian 64-bit hosts here.
+ */
+
+ rr_init(dev);
+
+ dev->base_addr = 0;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto out;
+ return 0;
+
+ out:
+ if (rrpriv->rx_ring)
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
+ rrpriv->rx_ring_dma);
+ if (rrpriv->tx_ring)
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
+ rrpriv->tx_ring_dma);
+ if (rrpriv->regs)
+ iounmap(rrpriv->regs);
+ if (pdev) {
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+ out2:
+ free_netdev(dev);
+ out3:
+ return ret;
+}
+
+static void __devexit rr_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct rr_private *rr = netdev_priv(dev);
+
+ if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){
+ printk(KERN_ERR "%s: trying to unload running NIC\n",
+ dev->name);
+ writel(HALT_NIC, &rr->regs->HostCtrl);
+ }
+
+ pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
+ rr->evt_ring_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
+ rr->rx_ring_dma);
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
+ rr->tx_ring_dma);
+ unregister_netdev(dev);
+ iounmap(rr->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+
+/*
+ * Commands are considered to be slow, thus there is no reason to
+ * inline this.
+ */
+static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
+{
+ struct rr_regs __iomem *regs;
+ u32 idx;
+
+ regs = rrpriv->regs;
+ /*
+ * This is temporary - it will go away in the final version.
+ * We probably also want to make this function inline.
+ */
+ if (readl(&regs->HostCtrl) & NIC_HALTED){
+ printk("issuing command for halted NIC, code 0x%x, "
+ "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl));
+ if (readl(&regs->Mode) & FATAL_ERR)
+ printk("error codes Fail1 %02x, Fail2 %02x\n",
+ readl(&regs->Fail1), readl(&regs->Fail2));
+ }
+
+ idx = rrpriv->info->cmd_ctrl.pi;
+
+ writel(*(u32*)(cmd), &regs->CmdRing[idx]);
+ wmb();
+
+ idx = (idx - 1) % CMD_RING_ENTRIES;
+ rrpriv->info->cmd_ctrl.pi = idx;
+ wmb();
+
+ if (readl(&regs->Mode) & FATAL_ERR)
+ printk("error code %02x\n", readl(&regs->Fail1));
+}
+
+
+/*
+ * Reset the board in a sensible manner. The NIC is already halted
+ * when we get here and a spin-lock is held.
+ */
+static int rr_reset(struct net_device *dev)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ struct eeprom *hw = NULL;
+ u32 start_pc;
+ int i;
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ rr_load_firmware(dev);
+
+ writel(0x01000000, &regs->TX_state);
+ writel(0xff800000, &regs->RX_state);
+ writel(0, &regs->AssistState);
+ writel(CLEAR_INTA, &regs->LocalCtrl);
+ writel(0x01, &regs->BrkPt);
+ writel(0, &regs->Timer);
+ writel(0, &regs->TimerRef);
+ writel(RESET_DMA, &regs->DmaReadState);
+ writel(RESET_DMA, &regs->DmaWriteState);
+ writel(0, &regs->DmaWriteHostHi);
+ writel(0, &regs->DmaWriteHostLo);
+ writel(0, &regs->DmaReadHostHi);
+ writel(0, &regs->DmaReadHostLo);
+ writel(0, &regs->DmaReadLen);
+ writel(0, &regs->DmaWriteLen);
+ writel(0, &regs->DmaWriteLcl);
+ writel(0, &regs->DmaWriteIPchecksum);
+ writel(0, &regs->DmaReadLcl);
+ writel(0, &regs->DmaReadIPchecksum);
+ writel(0, &regs->PciState);
+#if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
+ writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode);
+#elif (BITS_PER_LONG == 64)
+ writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode);
+#else
+ writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode);
+#endif
+
+#if 0
+ /*
+ * Don't worry, this is just black magic.
+ */
+ writel(0xdf000, &regs->RxBase);
+ writel(0xdf000, &regs->RxPrd);
+ writel(0xdf000, &regs->RxCon);
+ writel(0xce000, &regs->TxBase);
+ writel(0xce000, &regs->TxPrd);
+ writel(0xce000, &regs->TxCon);
+ writel(0, &regs->RxIndPro);
+ writel(0, &regs->RxIndCon);
+ writel(0, &regs->RxIndRef);
+ writel(0, &regs->TxIndPro);
+ writel(0, &regs->TxIndCon);
+ writel(0, &regs->TxIndRef);
+ writel(0xcc000, &regs->pad10[0]);
+ writel(0, &regs->DrCmndPro);
+ writel(0, &regs->DrCmndCon);
+ writel(0, &regs->DwCmndPro);
+ writel(0, &regs->DwCmndCon);
+ writel(0, &regs->DwCmndRef);
+ writel(0, &regs->DrDataPro);
+ writel(0, &regs->DrDataCon);
+ writel(0, &regs->DrDataRef);
+ writel(0, &regs->DwDataPro);
+ writel(0, &regs->DwDataCon);
+ writel(0, &regs->DwDataRef);
+#endif
+
+ writel(0xffffffff, &regs->MbEvent);
+ writel(0, &regs->Event);
+
+ writel(0, &regs->TxPi);
+ writel(0, &regs->IpRxPi);
+
+ writel(0, &regs->EvtCon);
+ writel(0, &regs->EvtPrd);
+
+ rrpriv->info->evt_ctrl.pi = 0;
+
+ for (i = 0; i < CMD_RING_ENTRIES; i++)
+ writel(0, &regs->CmdRing[i]);
+
+/*
+ * Why 32 ? is this not cache line size dependent?
+ */
+ writel(RBURST_64|WBURST_64, &regs->PciState);
+ wmb();
+
+ start_pc = rr_read_eeprom_word(rrpriv, &hw->rncd_info.FwStart);
+
+#if (DEBUG > 1)
+ printk("%s: Executing firmware at address 0x%06x\n",
+ dev->name, start_pc);
+#endif
+
+ writel(start_pc + 0x800, &regs->Pc);
+ wmb();
+ udelay(5);
+
+ writel(start_pc, &regs->Pc);
+ wmb();
+
+ return 0;
+}
+
+
+/*
+ * Read a string from the EEPROM.
+ */
+static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
+ unsigned long offset,
+ unsigned char *buf,
+ unsigned long length)
+{
+ struct rr_regs __iomem *regs = rrpriv->regs;
+ u32 misc, io, host, i;
+
+ io = readl(&regs->ExtIo);
+ writel(0, &regs->ExtIo);
+ misc = readl(&regs->LocalCtrl);
+ writel(0, &regs->LocalCtrl);
+ host = readl(&regs->HostCtrl);
+ writel(host | HALT_NIC, &regs->HostCtrl);
+ mb();
+
+ for (i = 0; i < length; i++){
+ writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
+ mb();
+ buf[i] = (readl(&regs->WinData) >> 24) & 0xff;
+ mb();
+ }
+
+ writel(host, &regs->HostCtrl);
+ writel(misc, &regs->LocalCtrl);
+ writel(io, &regs->ExtIo);
+ mb();
+ return i;
+}
+
+
+/*
+ * Shortcut to read one word (4 bytes) out of the EEPROM and convert
+ * it to our CPU byte-order.
+ */
+static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
+ void * offset)
+{
+ u32 word;
+
+ if ((rr_read_eeprom(rrpriv, (unsigned long)offset,
+ (char *)&word, 4) == 4))
+ return be32_to_cpu(word);
+ return 0;
+}
+
+
+/*
+ * Write a string to the EEPROM.
+ *
+ * This is only called when the firmware is not running.
+ */
+static unsigned int write_eeprom(struct rr_private *rrpriv,
+ unsigned long offset,
+ unsigned char *buf,
+ unsigned long length)
+{
+ struct rr_regs __iomem *regs = rrpriv->regs;
+ u32 misc, io, data, i, j, ready, error = 0;
+
+ io = readl(&regs->ExtIo);
+ writel(0, &regs->ExtIo);
+ misc = readl(&regs->LocalCtrl);
+ writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl);
+ mb();
+
+ for (i = 0; i < length; i++){
+ writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
+ mb();
+ data = buf[i] << 24;
+ /*
+ * Only try to write the data if it is not the same
+ * value already.
+ */
+ if ((readl(&regs->WinData) & 0xff000000) != data){
+ writel(data, &regs->WinData);
+ ready = 0;
+ j = 0;
+ mb();
+ while(!ready){
+ udelay(20);
+ if ((readl(&regs->WinData) & 0xff000000) ==
+ data)
+ ready = 1;
+ mb();
+ if (j++ > 5000){
+ printk("data mismatch: %08x, "
+ "WinData %08x\n", data,
+ readl(&regs->WinData));
+ ready = 1;
+ error = 1;
+ }
+ }
+ }
+ }
+
+ writel(misc, &regs->LocalCtrl);
+ writel(io, &regs->ExtIo);
+ mb();
+
+ return error;
+}
+
+
+static int __init rr_init(struct net_device *dev)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ struct eeprom *hw = NULL;
+ u32 sram_size, rev;
+ int i;
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ rev = readl(&regs->FwRev);
+ rrpriv->fw_rev = rev;
+ if (rev > 0x00020024)
+ printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
+ ((rev >> 8) & 0xff), (rev & 0xff));
+ else if (rev >= 0x00020000) {
+ printk(" Firmware revision: %i.%i.%i (2.0.37 or "
+ "later is recommended)\n", (rev >> 16),
+ ((rev >> 8) & 0xff), (rev & 0xff));
+ }else{
+ printk(" Firmware revision too old: %i.%i.%i, please "
+ "upgrade to 2.0.37 or later.\n",
+ (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
+ }
+
+#if (DEBUG > 2)
+ printk(" Maximum receive rings %i\n", readl(&regs->MaxRxRng));
+#endif
+
+ /*
+ * Read the hardware address from the eeprom. The HW address
+ * is not really necessary for HIPPI but awfully convenient.
+ * The pointer arithmetic to put it in dev_addr is ugly, but
+ * Donald Becker does it this way for the GigE version of this
+ * card and it's shorter and more portable than any
+ * other method I've seen. -VAL
+ */
+
+ *(u16 *)(dev->dev_addr) =
+ htons(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA));
+ *(u32 *)(dev->dev_addr+2) =
+ htonl(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA[4]));
+
+ printk(" MAC: ");
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x\n", dev->dev_addr[i]);
+
+ sram_size = rr_read_eeprom_word(rrpriv, (void *)8);
+ printk(" SRAM size 0x%06x\n", sram_size);
+
+ if (sysctl_rmem_max < 262144){
+ printk(" Receive socket buffer limit too low (%i), "
+ "setting to 262144\n", sysctl_rmem_max);
+ sysctl_rmem_max = 262144;
+ }
+
+ if (sysctl_wmem_max < 262144){
+ printk(" Transmit socket buffer limit too low (%i), "
+ "setting to 262144\n", sysctl_wmem_max);
+ sysctl_wmem_max = 262144;
+ }
+
+ return 0;
+}
+
+
+static int rr_init1(struct net_device *dev)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ unsigned long myjif, flags;
+ struct cmd cmd;
+ u32 hostctrl;
+ int ecode = 0;
+ short i;
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ spin_lock_irqsave(&rrpriv->lock, flags);
+
+ hostctrl = readl(&regs->HostCtrl);
+ writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl);
+ wmb();
+
+ if (hostctrl & PARITY_ERR){
+ printk("%s: Parity error halting NIC - this is serious!\n",
+ dev->name);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+ ecode = -EFAULT;
+ goto error;
+ }
+
+ set_rxaddr(regs, rrpriv->rx_ctrl_dma);
+ set_infoaddr(regs, rrpriv->info_dma);
+
+ rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
+ rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
+ rrpriv->info->evt_ctrl.mode = 0;
+ rrpriv->info->evt_ctrl.pi = 0;
+ set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
+
+ rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
+ rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
+ rrpriv->info->cmd_ctrl.mode = 0;
+ rrpriv->info->cmd_ctrl.pi = 15;
+
+ for (i = 0; i < CMD_RING_ENTRIES; i++) {
+ writel(0, &regs->CmdRing[i]);
+ }
+
+ for (i = 0; i < TX_RING_ENTRIES; i++) {
+ rrpriv->tx_ring[i].size = 0;
+ set_rraddr(&rrpriv->tx_ring[i].addr, 0);
+ rrpriv->tx_skbuff[i] = NULL;
+ }
+ rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
+ rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
+ rrpriv->info->tx_ctrl.mode = 0;
+ rrpriv->info->tx_ctrl.pi = 0;
+ set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
+
+ /*
+ * Set dirty_tx before we start receiving interrupts, otherwise
+ * the interrupt handler might think it is supposed to process
+ * tx ints before we are up and running, which may cause a null
+ * pointer access in the int handler.
+ */
+ rrpriv->tx_full = 0;
+ rrpriv->cur_rx = 0;
+ rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
+
+ rr_reset(dev);
+
+ /* Tuning values */
+ writel(0x5000, &regs->ConRetry);
+ writel(0x100, &regs->ConRetryTmr);
+ writel(0x500000, &regs->ConTmout);
+ writel(0x60, &regs->IntrTmr);
+ writel(0x500000, &regs->TxDataMvTimeout);
+ writel(0x200000, &regs->RxDataMvTimeout);
+ writel(0x80, &regs->WriteDmaThresh);
+ writel(0x80, &regs->ReadDmaThresh);
+
+ rrpriv->fw_running = 0;
+ wmb();
+
+ hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
+ writel(hostctrl, &regs->HostCtrl);
+ wmb();
+
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+ for (i = 0; i < RX_RING_ENTRIES; i++) {
+ struct sk_buff *skb;
+ dma_addr_t addr;
+
+ rrpriv->rx_ring[i].mode = 0;
+ skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Unable to allocate memory "
+ "for receive ring - halting NIC\n", dev->name);
+ ecode = -ENOMEM;
+ goto error;
+ }
+ rrpriv->rx_skbuff[i] = skb;
+ addr = pci_map_single(rrpriv->pci_dev, skb->data,
+ dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ /*
+ * Sanity test to see if we conflict with the DMA
+ * limitations of the Roadrunner.
+ */
+ if ((((unsigned long)skb->data) & 0xfff) > ~65320)
+ printk("skb alloc error\n");
+
+ set_rraddr(&rrpriv->rx_ring[i].addr, addr);
+ rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
+ }
+
+ rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
+ rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
+ rrpriv->rx_ctrl[4].mode = 8;
+ rrpriv->rx_ctrl[4].pi = 0;
+ wmb();
+ set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
+
+ udelay(1000);
+
+ /*
+ * Now start the FirmWare.
+ */
+ cmd.code = C_START_FW;
+ cmd.ring = 0;
+ cmd.index = 0;
+
+ rr_issue_cmd(rrpriv, &cmd);
+
+ /*
+ * Give the FirmWare time to chew on the `get running' command.
+ */
+ myjif = jiffies + 5 * HZ;
+ while (time_before(jiffies, myjif) && !rrpriv->fw_running)
+ cpu_relax();
+
+ netif_start_queue(dev);
+
+ return ecode;
+
+ error:
+ /*
+ * We might have gotten here because we are out of memory,
+ * make sure we release everything we allocated before failing
+ */
+ for (i = 0; i < RX_RING_ENTRIES; i++) {
+ struct sk_buff *skb = rrpriv->rx_skbuff[i];
+
+ if (skb) {
+ pci_unmap_single(rrpriv->pci_dev,
+ rrpriv->rx_ring[i].addr.addrlo,
+ dev->mtu + HIPPI_HLEN,
+ PCI_DMA_FROMDEVICE);
+ rrpriv->rx_ring[i].size = 0;
+ set_rraddr(&rrpriv->rx_ring[i].addr, 0);
+ dev_kfree_skb(skb);
+ rrpriv->rx_skbuff[i] = NULL;
+ }
+ }
+ return ecode;
+}
+
+
+/*
+ * All events are considered to be slow (RX/TX ints do not generate
+ * events) and are handled here, outside the main interrupt handler,
+ * to reduce the size of the handler.
+ */
+static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ u32 tmp;
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ while (prodidx != eidx){
+ switch (rrpriv->evt_ring[eidx].code){
+ case E_NIC_UP:
+ tmp = readl(&regs->FwRev);
+ printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
+ "up and running\n", dev->name,
+ (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
+ rrpriv->fw_running = 1;
+ writel(RX_RING_ENTRIES - 1, &regs->IpRxPi);
+ wmb();
+ break;
+ case E_LINK_ON:
+ printk(KERN_INFO "%s: Optical link ON\n", dev->name);
+ break;
+ case E_LINK_OFF:
+ printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
+ break;
+ case E_RX_IDLE:
+ printk(KERN_WARNING "%s: RX data not moving\n",
+ dev->name);
+ goto drop;
+ case E_WATCHDOG:
+ printk(KERN_INFO "%s: The watchdog is here to see "
+ "us\n", dev->name);
+ break;
+ case E_INTERN_ERR:
+ printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
+ dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ case E_HOST_ERR:
+ printk(KERN_ERR "%s: Host software error\n",
+ dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ /*
+ * TX events.
+ */
+ case E_CON_REJ:
+ printk(KERN_WARNING "%s: Connection rejected\n",
+ dev->name);
+ rrpriv->stats.tx_aborted_errors++;
+ break;
+ case E_CON_TMOUT:
+ printk(KERN_WARNING "%s: Connection timeout\n",
+ dev->name);
+ break;
+ case E_DISC_ERR:
+ printk(KERN_WARNING "%s: HIPPI disconnect error\n",
+ dev->name);
+ rrpriv->stats.tx_aborted_errors++;
+ break;
+ case E_INT_PRTY:
+ printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
+ dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ case E_TX_IDLE:
+ printk(KERN_WARNING "%s: Transmitter idle\n",
+ dev->name);
+ break;
+ case E_TX_LINK_DROP:
+ printk(KERN_WARNING "%s: Link lost during transmit\n",
+ dev->name);
+ rrpriv->stats.tx_aborted_errors++;
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ case E_TX_INV_RNG:
+ printk(KERN_ERR "%s: Invalid send ring block\n",
+ dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ case E_TX_INV_BUF:
+ printk(KERN_ERR "%s: Invalid send buffer address\n",
+ dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ case E_TX_INV_DSC:
+ printk(KERN_ERR "%s: Invalid descriptor address\n",
+ dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ /*
+ * RX events.
+ */
+ case E_RX_RNG_OUT:
+ printk(KERN_INFO "%s: Receive ring full\n", dev->name);
+ break;
+
+ case E_RX_PAR_ERR:
+ printk(KERN_WARNING "%s: Receive parity error\n",
+ dev->name);
+ goto drop;
+ case E_RX_LLRC_ERR:
+ printk(KERN_WARNING "%s: Receive LLRC error\n",
+ dev->name);
+ goto drop;
+ case E_PKT_LN_ERR:
+ printk(KERN_WARNING "%s: Receive packet length "
+ "error\n", dev->name);
+ goto drop;
+ case E_DTA_CKSM_ERR:
+ printk(KERN_WARNING "%s: Data checksum error\n",
+ dev->name);
+ goto drop;
+ case E_SHT_BST:
+ printk(KERN_WARNING "%s: Unexpected short burst "
+ "error\n", dev->name);
+ goto drop;
+ case E_STATE_ERR:
+ printk(KERN_WARNING "%s: Recv. state transition"
+ " error\n", dev->name);
+ goto drop;
+ case E_UNEXP_DATA:
+ printk(KERN_WARNING "%s: Unexpected data error\n",
+ dev->name);
+ goto drop;
+ case E_LST_LNK_ERR:
+ printk(KERN_WARNING "%s: Link lost error\n",
+ dev->name);
+ goto drop;
+ case E_FRM_ERR:
+ printk(KERN_WARNING "%s: Framming Error\n",
+ dev->name);
+ goto drop;
+ case E_FLG_SYN_ERR:
+ printk(KERN_WARNING "%s: Flag sync. lost during"
+ "packet\n", dev->name);
+ goto drop;
+ case E_RX_INV_BUF:
+ printk(KERN_ERR "%s: Invalid receive buffer "
+ "address\n", dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ case E_RX_INV_DSC:
+ printk(KERN_ERR "%s: Invalid receive descriptor "
+ "address\n", dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ case E_RNG_BLK:
+ printk(KERN_ERR "%s: Invalid ring block\n",
+ dev->name);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ wmb();
+ break;
+ drop:
+ /* Label packet to be dropped.
+ * Actual dropping occurs in rx
+ * handling.
+ *
+ * The index of packet we get to drop is
+ * the index of the packet following
+ * the bad packet. -kbf
+ */
+ {
+ u16 index = rrpriv->evt_ring[eidx].index;
+ index = (index + (RX_RING_ENTRIES - 1)) %
+ RX_RING_ENTRIES;
+ rrpriv->rx_ring[index].mode |=
+ (PACKET_BAD | PACKET_END);
+ }
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
+ dev->name, rrpriv->evt_ring[eidx].code);
+ }
+ eidx = (eidx + 1) % EVT_RING_ENTRIES;
+ }
+
+ rrpriv->info->evt_ctrl.pi = eidx;
+ wmb();
+ return eidx;
+}
+
+
+static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
+{
+ struct rr_private *rrpriv = netdev_priv(dev);
+ struct rr_regs __iomem *regs = rrpriv->regs;
+
+ do {
+ struct rx_desc *desc;
+ u32 pkt_len;
+
+ desc = &(rrpriv->rx_ring[index]);
+ pkt_len = desc->size;
+#if (DEBUG > 2)
+ printk("index %i, rxlimit %i\n", index, rxlimit);
+ printk("len %x, mode %x\n", pkt_len, desc->mode);
+#endif
+ if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
+ rrpriv->stats.rx_dropped++;
+ goto defer;
+ }
+
+ if (pkt_len > 0){
+ struct sk_buff *skb, *rx_skb;
+
+ rx_skb = rrpriv->rx_skbuff[index];
+
+ if (pkt_len < PKT_COPY_THRESHOLD) {
+ skb = alloc_skb(pkt_len, GFP_ATOMIC);
+ if (skb == NULL){
+ printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
+ rrpriv->stats.rx_dropped++;
+ goto defer;
+ } else {
+ pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
+ desc->addr.addrlo,
+ pkt_len,
+ PCI_DMA_FROMDEVICE);
+
+ memcpy(skb_put(skb, pkt_len),
+ rx_skb->data, pkt_len);
+
+ pci_dma_sync_single_for_device(rrpriv->pci_dev,
+ desc->addr.addrlo,
+ pkt_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ }else{
+ struct sk_buff *newskb;
+
+ newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
+ GFP_ATOMIC);
+ if (newskb){
+ dma_addr_t addr;
+
+ pci_unmap_single(rrpriv->pci_dev,
+ desc->addr.addrlo, dev->mtu +
+ HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ skb = rx_skb;
+ skb_put(skb, pkt_len);
+ rrpriv->rx_skbuff[index] = newskb;
+ addr = pci_map_single(rrpriv->pci_dev,
+ newskb->data,
+ dev->mtu + HIPPI_HLEN,
+ PCI_DMA_FROMDEVICE);
+ set_rraddr(&desc->addr, addr);
+ } else {
+ printk("%s: Out of memory, deferring "
+ "packet\n", dev->name);
+ rrpriv->stats.rx_dropped++;
+ goto defer;
+ }
+ }
+ skb->dev = dev;
+ skb->protocol = hippi_type_trans(skb, dev);
+
+ netif_rx(skb); /* send it up */
+
+ dev->last_rx = jiffies;
+ rrpriv->stats.rx_packets++;
+ rrpriv->stats.rx_bytes += pkt_len;
+ }
+ defer:
+ desc->mode = 0;
+ desc->size = dev->mtu + HIPPI_HLEN;
+
+ if ((index & 7) == 7)
+ writel(index, &regs->IpRxPi);
+
+ index = (index + 1) % RX_RING_ENTRIES;
+ } while(index != rxlimit);
+
+ rrpriv->cur_rx = index;
+ wmb();
+}
+
+
+static irqreturn_t rr_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ struct net_device *dev = (struct net_device *)dev_id;
+ u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ if (!(readl(&regs->HostCtrl) & RR_INT))
+ return IRQ_NONE;
+
+ spin_lock(&rrpriv->lock);
+
+ prodidx = readl(&regs->EvtPrd);
+ txcsmr = (prodidx >> 8) & 0xff;
+ rxlimit = (prodidx >> 16) & 0xff;
+ prodidx &= 0xff;
+
+#if (DEBUG > 2)
+ printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
+ prodidx, rrpriv->info->evt_ctrl.pi);
+#endif
+ /*
+ * Order here is important. We must handle events
+ * before doing anything else in order to catch
+ * such things as LLRC errors, etc -kbf
+ */
+
+ eidx = rrpriv->info->evt_ctrl.pi;
+ if (prodidx != eidx)
+ eidx = rr_handle_event(dev, prodidx, eidx);
+
+ rxindex = rrpriv->cur_rx;
+ if (rxindex != rxlimit)
+ rx_int(dev, rxlimit, rxindex);
+
+ txcon = rrpriv->dirty_tx;
+ if (txcsmr != txcon) {
+ do {
+ /* Due to occational firmware TX producer/consumer out
+ * of sync. error need to check entry in ring -kbf
+ */
+ if(rrpriv->tx_skbuff[txcon]){
+ struct tx_desc *desc;
+ struct sk_buff *skb;
+
+ desc = &(rrpriv->tx_ring[txcon]);
+ skb = rrpriv->tx_skbuff[txcon];
+
+ rrpriv->stats.tx_packets++;
+ rrpriv->stats.tx_bytes += skb->len;
+
+ pci_unmap_single(rrpriv->pci_dev,
+ desc->addr.addrlo, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+
+ rrpriv->tx_skbuff[txcon] = NULL;
+ desc->size = 0;
+ set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
+ desc->mode = 0;
+ }
+ txcon = (txcon + 1) % TX_RING_ENTRIES;
+ } while (txcsmr != txcon);
+ wmb();
+
+ rrpriv->dirty_tx = txcon;
+ if (rrpriv->tx_full && rr_if_busy(dev) &&
+ (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
+ != rrpriv->dirty_tx)){
+ rrpriv->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+
+ eidx |= ((txcsmr << 8) | (rxlimit << 16));
+ writel(eidx, &regs->EvtCon);
+ wmb();
+
+ spin_unlock(&rrpriv->lock);
+ return IRQ_HANDLED;
+}
+
+static inline void rr_raz_tx(struct rr_private *rrpriv,
+ struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < TX_RING_ENTRIES; i++) {
+ struct sk_buff *skb = rrpriv->tx_skbuff[i];
+
+ if (skb) {
+ struct tx_desc *desc = &(rrpriv->tx_ring[i]);
+
+ pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
+ skb->len, PCI_DMA_TODEVICE);
+ desc->size = 0;
+ set_rraddr(&desc->addr, 0);
+ dev_kfree_skb(skb);
+ rrpriv->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+static inline void rr_raz_rx(struct rr_private *rrpriv,
+ struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_ENTRIES; i++) {
+ struct sk_buff *skb = rrpriv->rx_skbuff[i];
+
+ if (skb) {
+ struct rx_desc *desc = &(rrpriv->rx_ring[i]);
+
+ pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
+ dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ desc->size = 0;
+ set_rraddr(&desc->addr, 0);
+ dev_kfree_skb(skb);
+ rrpriv->rx_skbuff[i] = NULL;
+ }
+ }
+}
+
+static void rr_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct rr_private *rrpriv = netdev_priv(dev);
+ struct rr_regs __iomem *regs = rrpriv->regs;
+ unsigned long flags;
+
+ if (readl(&regs->HostCtrl) & NIC_HALTED){
+ printk("%s: Restarting nic\n", dev->name);
+ memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
+ memset(rrpriv->info, 0, sizeof(struct rr_info));
+ wmb();
+
+ rr_raz_tx(rrpriv, dev);
+ rr_raz_rx(rrpriv, dev);
+
+ if (rr_init1(dev)) {
+ spin_lock_irqsave(&rrpriv->lock, flags);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
+ &regs->HostCtrl);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+ }
+ }
+ rrpriv->timer.expires = RUN_AT(5*HZ);
+ add_timer(&rrpriv->timer);
+}
+
+
+static int rr_open(struct net_device *dev)
+{
+ struct rr_private *rrpriv = netdev_priv(dev);
+ struct pci_dev *pdev = rrpriv->pci_dev;
+ struct rr_regs __iomem *regs;
+ int ecode = 0;
+ unsigned long flags;
+ dma_addr_t dma_addr;
+
+ regs = rrpriv->regs;
+
+ if (rrpriv->fw_rev < 0x00020000) {
+ printk(KERN_WARNING "%s: trying to configure device with "
+ "obsolete firmware\n", dev->name);
+ ecode = -EBUSY;
+ goto error;
+ }
+
+ rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
+ 256 * sizeof(struct ring_ctrl),
+ &dma_addr);
+ if (!rrpriv->rx_ctrl) {
+ ecode = -ENOMEM;
+ goto error;
+ }
+ rrpriv->rx_ctrl_dma = dma_addr;
+ memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl));
+
+ rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
+ &dma_addr);
+ if (!rrpriv->info) {
+ ecode = -ENOMEM;
+ goto error;
+ }
+ rrpriv->info_dma = dma_addr;
+ memset(rrpriv->info, 0, sizeof(struct rr_info));
+ wmb();
+
+ spin_lock_irqsave(&rrpriv->lock, flags);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
+ readl(&regs->HostCtrl);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+ if (request_irq(dev->irq, rr_interrupt, SA_SHIRQ, dev->name, dev)) {
+ printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+ dev->name, dev->irq);
+ ecode = -EAGAIN;
+ goto error;
+ }
+
+ if ((ecode = rr_init1(dev)))
+ goto error;
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&rrpriv->timer);
+ rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
+ rrpriv->timer.data = (unsigned long)dev;
+ rrpriv->timer.function = &rr_timer; /* timer handler */
+ add_timer(&rrpriv->timer);
+
+ netif_start_queue(dev);
+
+ return ecode;
+
+ error:
+ spin_lock_irqsave(&rrpriv->lock, flags);
+ writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+ if (rrpriv->info) {
+ pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
+ rrpriv->info_dma);
+ rrpriv->info = NULL;
+ }
+ if (rrpriv->rx_ctrl) {
+ pci_free_consistent(pdev, sizeof(struct ring_ctrl),
+ rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
+ rrpriv->rx_ctrl = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+ return ecode;
+}
+
+
+static void rr_dump(struct net_device *dev)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ u32 index, cons;
+ short i;
+ int len;
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ printk("%s: dumping NIC TX rings\n", dev->name);
+
+ printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
+ readl(&regs->RxPrd), readl(&regs->TxPrd),
+ readl(&regs->EvtPrd), readl(&regs->TxPi),
+ rrpriv->info->tx_ctrl.pi);
+
+ printk("Error code 0x%x\n", readl(&regs->Fail1));
+
+ index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES;
+ cons = rrpriv->dirty_tx;
+ printk("TX ring index %i, TX consumer %i\n",
+ index, cons);
+
+ if (rrpriv->tx_skbuff[index]){
+ len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
+ printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
+ for (i = 0; i < len; i++){
+ if (!(i & 7))
+ printk("\n");
+ printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
+ }
+ printk("\n");
+ }
+
+ if (rrpriv->tx_skbuff[cons]){
+ len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
+ printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
+ printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n",
+ rrpriv->tx_ring[cons].mode,
+ rrpriv->tx_ring[cons].size,
+ (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
+ (unsigned long)rrpriv->tx_skbuff[cons]->data,
+ (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
+ for (i = 0; i < len; i++){
+ if (!(i & 7))
+ printk("\n");
+ printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
+ }
+ printk("\n");
+ }
+
+ printk("dumping TX ring info:\n");
+ for (i = 0; i < TX_RING_ENTRIES; i++)
+ printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
+ rrpriv->tx_ring[i].mode,
+ rrpriv->tx_ring[i].size,
+ (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
+
+}
+
+
+static int rr_close(struct net_device *dev)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ unsigned long flags;
+ u32 tmp;
+ short i;
+
+ netif_stop_queue(dev);
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ /*
+ * Lock to make sure we are not cleaning up while another CPU
+ * is handling interrupts.
+ */
+ spin_lock_irqsave(&rrpriv->lock, flags);
+
+ tmp = readl(&regs->HostCtrl);
+ if (tmp & NIC_HALTED){
+ printk("%s: NIC already halted\n", dev->name);
+ rr_dump(dev);
+ }else{
+ tmp |= HALT_NIC | RR_CLEAR_INT;
+ writel(tmp, &regs->HostCtrl);
+ readl(&regs->HostCtrl);
+ }
+
+ rrpriv->fw_running = 0;
+
+ del_timer_sync(&rrpriv->timer);
+
+ writel(0, &regs->TxPi);
+ writel(0, &regs->IpRxPi);
+
+ writel(0, &regs->EvtCon);
+ writel(0, &regs->EvtPrd);
+
+ for (i = 0; i < CMD_RING_ENTRIES; i++)
+ writel(0, &regs->CmdRing[i]);
+
+ rrpriv->info->tx_ctrl.entries = 0;
+ rrpriv->info->cmd_ctrl.pi = 0;
+ rrpriv->info->evt_ctrl.pi = 0;
+ rrpriv->rx_ctrl[4].entries = 0;
+
+ rr_raz_tx(rrpriv, dev);
+ rr_raz_rx(rrpriv, dev);
+
+ pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl),
+ rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
+ rrpriv->rx_ctrl = NULL;
+
+ pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info),
+ rrpriv->info, rrpriv->info_dma);
+ rrpriv->info = NULL;
+
+ free_irq(dev->irq, dev);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+ return 0;
+}
+
+
+static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rr_private *rrpriv = netdev_priv(dev);
+ struct rr_regs __iomem *regs = rrpriv->regs;
+ struct ring_ctrl *txctrl;
+ unsigned long flags;
+ u32 index, len = skb->len;
+ u32 *ifield;
+ struct sk_buff *new_skb;
+
+ if (readl(&regs->Mode) & FATAL_ERR)
+ printk("error codes Fail1 %02x, Fail2 %02x\n",
+ readl(&regs->Fail1), readl(&regs->Fail2));
+
+ /*
+ * We probably need to deal with tbusy here to prevent overruns.
+ */
+
+ if (skb_headroom(skb) < 8){
+ printk("incoming skb too small - reallocating\n");
+ if (!(new_skb = dev_alloc_skb(len + 8))) {
+ dev_kfree_skb(skb);
+ netif_wake_queue(dev);
+ return -EBUSY;
+ }
+ skb_reserve(new_skb, 8);
+ skb_put(new_skb, len);
+ memcpy(new_skb->data, skb->data, len);
+ dev_kfree_skb(skb);
+ skb = new_skb;
+ }
+
+ ifield = (u32 *)skb_push(skb, 8);
+
+ ifield[0] = 0;
+ ifield[1] = skb->private.ifield;
+
+ /*
+ * We don't need the lock before we are actually going to start
+ * fiddling with the control blocks.
+ */
+ spin_lock_irqsave(&rrpriv->lock, flags);
+
+ txctrl = &rrpriv->info->tx_ctrl;
+
+ index = txctrl->pi;
+
+ rrpriv->tx_skbuff[index] = skb;
+ set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
+ rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
+ rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
+ rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
+ txctrl->pi = (index + 1) % TX_RING_ENTRIES;
+ wmb();
+ writel(txctrl->pi, &regs->TxPi);
+
+ if (txctrl->pi == rrpriv->dirty_tx){
+ rrpriv->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+
+static struct net_device_stats *rr_get_stats(struct net_device *dev)
+{
+ struct rr_private *rrpriv;
+
+ rrpriv = netdev_priv(dev);
+
+ return(&rrpriv->stats);
+}
+
+
+/*
+ * Read the firmware out of the EEPROM and put it into the SRAM
+ * (or from user space - later)
+ *
+ * This operation requires the NIC to be halted and is performed with
+ * interrupts disabled and with the spinlock hold.
+ */
+static int rr_load_firmware(struct net_device *dev)
+{
+ struct rr_private *rrpriv;
+ struct rr_regs __iomem *regs;
+ unsigned long eptr, segptr;
+ int i, j;
+ u32 localctrl, sptr, len, tmp;
+ u32 p2len, p2size, nr_seg, revision, io, sram_size;
+ struct eeprom *hw = NULL;
+
+ rrpriv = netdev_priv(dev);
+ regs = rrpriv->regs;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (!(readl(&regs->HostCtrl) & NIC_HALTED)){
+ printk("%s: Trying to load firmware to a running NIC.\n",
+ dev->name);
+ return -EBUSY;
+ }
+
+ localctrl = readl(&regs->LocalCtrl);
+ writel(0, &regs->LocalCtrl);
+
+ writel(0, &regs->EvtPrd);
+ writel(0, &regs->RxPrd);
+ writel(0, &regs->TxPrd);
+
+ /*
+ * First wipe the entire SRAM, otherwise we might run into all
+ * kinds of trouble ... sigh, this took almost all afternoon
+ * to track down ;-(
+ */
+ io = readl(&regs->ExtIo);
+ writel(0, &regs->ExtIo);
+ sram_size = rr_read_eeprom_word(rrpriv, (void *)8);
+
+ for (i = 200; i < sram_size / 4; i++){
+ writel(i * 4, &regs->WinBase);
+ mb();
+ writel(0, &regs->WinData);
+ mb();
+ }
+ writel(io, &regs->ExtIo);
+ mb();
+
+ eptr = (unsigned long)rr_read_eeprom_word(rrpriv,
+ &hw->rncd_info.AddrRunCodeSegs);
+ eptr = ((eptr & 0x1fffff) >> 3);
+
+ p2len = rr_read_eeprom_word(rrpriv, (void *)(0x83*4));
+ p2len = (p2len << 2);
+ p2size = rr_read_eeprom_word(rrpriv, (void *)(0x84*4));
+ p2size = ((p2size & 0x1fffff) >> 3);
+
+ if ((eptr < p2size) || (eptr > (p2size + p2len))){
+ printk("%s: eptr is invalid\n", dev->name);
+ goto out;
+ }
+
+ revision = rr_read_eeprom_word(rrpriv, &hw->manf.HeaderFmt);
+
+ if (revision != 1){
+ printk("%s: invalid firmware format (%i)\n",
+ dev->name, revision);
+ goto out;
+ }
+
+ nr_seg = rr_read_eeprom_word(rrpriv, (void *)eptr);
+ eptr +=4;
+#if (DEBUG > 1)
+ printk("%s: nr_seg %i\n", dev->name, nr_seg);
+#endif
+
+ for (i = 0; i < nr_seg; i++){
+ sptr = rr_read_eeprom_word(rrpriv, (void *)eptr);
+ eptr += 4;
+ len = rr_read_eeprom_word(rrpriv, (void *)eptr);
+ eptr += 4;
+ segptr = (unsigned long)rr_read_eeprom_word(rrpriv, (void *)eptr);
+ segptr = ((segptr & 0x1fffff) >> 3);
+ eptr += 4;
+#if (DEBUG > 1)
+ printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
+ dev->name, i, sptr, len, segptr);
+#endif
+ for (j = 0; j < len; j++){
+ tmp = rr_read_eeprom_word(rrpriv, (void *)segptr);
+ writel(sptr, &regs->WinBase);
+ mb();
+ writel(tmp, &regs->WinData);
+ mb();
+ segptr += 4;
+ sptr += 4;
+ }
+ }
+
+out:
+ writel(localctrl, &regs->LocalCtrl);
+ mb();
+ return 0;
+}
+
+
+static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rr_private *rrpriv;
+ unsigned char *image, *oldimage;
+ unsigned long flags;
+ unsigned int i;
+ int error = -EOPNOTSUPP;
+
+ rrpriv = netdev_priv(dev);
+
+ switch(cmd){
+ case SIOCRRGFW:
+ if (!capable(CAP_SYS_RAWIO)){
+ return -EPERM;
+ }
+
+ image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
+ if (!image){
+ printk(KERN_ERR "%s: Unable to allocate memory "
+ "for EEPROM image\n", dev->name);
+ return -ENOMEM;
+ }
+
+
+ if (rrpriv->fw_running){
+ printk("%s: Firmware already running\n", dev->name);
+ error = -EPERM;
+ goto gf_out;
+ }
+
+ spin_lock_irqsave(&rrpriv->lock, flags);
+ i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+ if (i != EEPROM_BYTES){
+ printk(KERN_ERR "%s: Error reading EEPROM\n",
+ dev->name);
+ error = -EFAULT;
+ goto gf_out;
+ }
+ error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
+ if (error)
+ error = -EFAULT;
+ gf_out:
+ kfree(image);
+ return error;
+
+ case SIOCRRPFW:
+ if (!capable(CAP_SYS_RAWIO)){
+ return -EPERM;
+ }
+
+ image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
+ oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
+ if (!image || !oldimage) {
+ printk(KERN_ERR "%s: Unable to allocate memory "
+ "for EEPROM image\n", dev->name);
+ error = -ENOMEM;
+ goto wf_out;
+ }
+
+ error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES);
+ if (error) {
+ error = -EFAULT;
+ goto wf_out;
+ }
+
+ if (rrpriv->fw_running){
+ printk("%s: Firmware already running\n", dev->name);
+ error = -EPERM;
+ goto wf_out;
+ }
+
+ printk("%s: Updating EEPROM firmware\n", dev->name);
+
+ spin_lock_irqsave(&rrpriv->lock, flags);
+ error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
+ if (error)
+ printk(KERN_ERR "%s: Error writing EEPROM\n",
+ dev->name);
+
+ i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+ if (i != EEPROM_BYTES)
+ printk(KERN_ERR "%s: Error reading back EEPROM "
+ "image\n", dev->name);
+
+ error = memcmp(image, oldimage, EEPROM_BYTES);
+ if (error){
+ printk(KERN_ERR "%s: Error verifying EEPROM image\n",
+ dev->name);
+ error = -EFAULT;
+ }
+ wf_out:
+ if (oldimage)
+ kfree(oldimage);
+ if (image)
+ kfree(image);
+ return error;
+
+ case SIOCRRID:
+ return put_user(0x52523032, (int __user *)rq->ifr_data);
+ default:
+ return error;
+ }
+}
+
+static struct pci_device_id rr_pci_tbl[] = {
+ { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
+
+static struct pci_driver rr_driver = {
+ .name = "rrunner",
+ .id_table = rr_pci_tbl,
+ .probe = rr_init_one,
+ .remove = __devexit_p(rr_remove_one),
+};
+
+static int __init rr_init_module(void)
+{
+ return pci_module_init(&rr_driver);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+ pci_unregister_driver(&rr_driver);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -pipe -fomit-frame-pointer -fno-strength-reduce -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c rrunner.c"
+ * End:
+ */
diff --git a/drivers/net/rrunner.h b/drivers/net/rrunner.h
new file mode 100644
index 000000000000..10baae55953a
--- /dev/null
+++ b/drivers/net/rrunner.h
@@ -0,0 +1,848 @@
+#ifndef _RRUNNER_H_
+#define _RRUNNER_H_
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+
+#if ((BITS_PER_LONG != 32) && (BITS_PER_LONG != 64))
+#error "BITS_PER_LONG not defined or not valid"
+#endif
+
+
+struct rr_regs {
+
+ u32 pad0[16];
+
+ u32 HostCtrl;
+ u32 LocalCtrl;
+ u32 Pc;
+ u32 BrkPt;
+
+/* Timer increments every 0.97 micro-seconds (unsigned int) */
+ u32 Timer_Hi;
+ u32 Timer;
+ u32 TimerRef;
+ u32 PciState;
+
+ u32 Event;
+ u32 MbEvent;
+
+ u32 WinBase;
+ u32 WinData;
+ u32 RX_state;
+ u32 TX_state;
+
+ u32 Overhead;
+ u32 ExtIo;
+
+ u32 DmaWriteHostHi;
+ u32 DmaWriteHostLo;
+
+ u32 pad1[2];
+
+ u32 DmaReadHostHi;
+ u32 DmaReadHostLo;
+
+ u32 pad2;
+
+ u32 DmaReadLen;
+ u32 DmaWriteState;
+
+ u32 DmaWriteLcl;
+ u32 DmaWriteIPchecksum;
+ u32 DmaWriteLen;
+ u32 DmaReadState;
+ u32 DmaReadLcl;
+ u32 DmaReadIPchecksum;
+ u32 pad3;
+
+ u32 RxBase;
+ u32 RxPrd;
+ u32 RxCon;
+
+ u32 pad4;
+
+ u32 TxBase;
+ u32 TxPrd;
+ u32 TxCon;
+
+ u32 pad5;
+
+ u32 RxIndPro;
+ u32 RxIndCon;
+ u32 RxIndRef;
+
+ u32 pad6;
+
+ u32 TxIndPro;
+ u32 TxIndCon;
+ u32 TxIndRef;
+
+ u32 pad7[17];
+
+ u32 DrCmndPro;
+ u32 DrCmndCon;
+ u32 DrCmndRef;
+
+ u32 pad8;
+
+ u32 DwCmndPro;
+ u32 DwCmndCon;
+ u32 DwCmndRef;
+
+ u32 AssistState;
+
+ u32 DrDataPro;
+ u32 DrDataCon;
+ u32 DrDataRef;
+
+ u32 pad9;
+
+ u32 DwDataPro;
+ u32 DwDataCon;
+ u32 DwDataRef;
+
+ u32 pad10[33];
+
+ u32 EvtCon;
+
+ u32 pad11[5];
+
+ u32 TxPi;
+ u32 IpRxPi;
+
+ u32 pad11a[8];
+
+ u32 CmdRing[16];
+
+/* The ULA is in two registers the high order two bytes of the first
+ * word contain the RunCode features.
+ * ula0 res res byte0 byte1
+ * ula1 byte2 byte3 byte4 byte5
+ */
+ u32 Ula0;
+ u32 Ula1;
+
+ u32 RxRingHi;
+ u32 RxRingLo;
+
+ u32 InfoPtrHi;
+ u32 InfoPtrLo;
+
+ u32 Mode;
+
+ u32 ConRetry;
+ u32 ConRetryTmr;
+
+ u32 ConTmout;
+ u32 CtatTmr;
+
+ u32 MaxRxRng;
+
+ u32 IntrTmr;
+ u32 TxDataMvTimeout;
+ u32 RxDataMvTimeout;
+
+ u32 EvtPrd;
+ u32 TraceIdx;
+
+ u32 Fail1;
+ u32 Fail2;
+
+ u32 DrvPrm;
+
+ u32 FilterLA;
+
+ u32 FwRev;
+ u32 FwRes1;
+ u32 FwRes2;
+ u32 FwRes3;
+
+ u32 WriteDmaThresh;
+ u32 ReadDmaThresh;
+
+ u32 pad12[325];
+ u32 Window[512];
+};
+
+/*
+ * Host control register bits.
+ */
+
+#define RR_INT 0x01
+#define RR_CLEAR_INT 0x02
+#define NO_SWAP 0x04000004
+#define NO_SWAP1 0x00000004
+#define PCI_RESET_NIC 0x08
+#define HALT_NIC 0x10
+#define SSTEP_NIC 0x20
+#define MEM_READ_MULTI 0x40
+#define NIC_HALTED 0x100
+#define HALT_INST 0x200
+#define PARITY_ERR 0x400
+#define INVALID_INST_B 0x800
+#define RR_REV_2 0x20000000
+#define RR_REV_MASK 0xf0000000
+
+/*
+ * Local control register bits.
+ */
+
+#define INTA_STATE 0x01
+#define CLEAR_INTA 0x02
+#define FAST_EEPROM_ACCESS 0x08
+#define ENABLE_EXTRA_SRAM 0x100
+#define ENABLE_EXTRA_DESC 0x200
+#define ENABLE_PARITY 0x400
+#define FORCE_DMA_PARITY_ERROR 0x800
+#define ENABLE_EEPROM_WRITE 0x1000
+#define ENABLE_DATA_CACHE 0x2000
+#define SRAM_LO_PARITY_ERR 0x4000
+#define SRAM_HI_PARITY_ERR 0x8000
+
+/*
+ * PCI state bits.
+ */
+
+#define FORCE_PCI_RESET 0x01
+#define PROVIDE_LENGTH 0x02
+#define MASK_DMA_READ_MAX 0x1C
+#define RBURST_DISABLE 0x00
+#define RBURST_4 0x04
+#define RBURST_16 0x08
+#define RBURST_32 0x0C
+#define RBURST_64 0x10
+#define RBURST_128 0x14
+#define RBURST_256 0x18
+#define RBURST_1024 0x1C
+#define MASK_DMA_WRITE_MAX 0xE0
+#define WBURST_DISABLE 0x00
+#define WBURST_4 0x20
+#define WBURST_16 0x40
+#define WBURST_32 0x60
+#define WBURST_64 0x80
+#define WBURST_128 0xa0
+#define WBURST_256 0xc0
+#define WBURST_1024 0xe0
+#define MASK_MIN_DMA 0xFF00
+#define FIFO_RETRY_ENABLE 0x10000
+
+/*
+ * Event register
+ */
+
+#define DMA_WRITE_DONE 0x10000
+#define DMA_READ_DONE 0x20000
+#define DMA_WRITE_ERR 0x40000
+#define DMA_READ_ERR 0x80000
+
+/*
+ * Receive state
+ *
+ * RoadRunner HIPPI Receive State Register controls and monitors the
+ * HIPPI receive interface in the NIC. Look at err bits when a HIPPI
+ * receive Error Event occurs.
+ */
+
+#define ENABLE_NEW_CON 0x01
+#define RESET_RECV 0x02
+#define RECV_ALL 0x00
+#define RECV_1K 0x20
+#define RECV_2K 0x40
+#define RECV_4K 0x60
+#define RECV_8K 0x80
+#define RECV_16K 0xa0
+#define RECV_32K 0xc0
+#define RECV_64K 0xe0
+
+/*
+ * Transmit status.
+ */
+
+#define ENA_XMIT 0x01
+#define PERM_CON 0x02
+
+/*
+ * DMA write state
+ */
+
+#define RESET_DMA 0x01
+#define NO_SWAP_DMA 0x02
+#define DMA_ACTIVE 0x04
+#define THRESH_MASK 0x1F
+#define DMA_ERROR_MASK 0xff000000
+
+/*
+ * Gooddies stored in the ULA registers.
+ */
+
+#define TRACE_ON_WHAT_BIT 0x00020000 /* Traces on */
+#define ONEM_BUF_WHAT_BIT 0x00040000 /* 1Meg vs 256K */
+#define CHAR_API_WHAT_BIT 0x00080000 /* Char API vs network only */
+#define CMD_EVT_WHAT_BIT 0x00200000 /* Command event */
+#define LONG_TX_WHAT_BIT 0x00400000
+#define LONG_RX_WHAT_BIT 0x00800000
+#define WHAT_BIT_MASK 0xFFFD0000 /* Feature bit mask */
+
+/*
+ * Mode status
+ */
+
+#define EVENT_OVFL 0x80000000
+#define FATAL_ERR 0x40000000
+#define LOOP_BACK 0x01
+#define MODE_PH 0x02
+#define MODE_FP 0x00
+#define PTR64BIT 0x04
+#define PTR32BIT 0x00
+#define PTR_WD_SWAP 0x08
+#define PTR_WD_NOSWAP 0x00
+#define POST_WARN_EVENT 0x10
+#define ERR_TERM 0x20
+#define DIRECT_CONN 0x40
+#define NO_NIC_WATCHDOG 0x80
+#define SWAP_DATA 0x100
+#define SWAP_CONTROL 0x200
+#define NIC_HALT_ON_ERR 0x400
+#define NIC_NO_RESTART 0x800
+#define HALF_DUP_TX 0x1000
+#define HALF_DUP_RX 0x2000
+
+
+/*
+ * Error codes
+ */
+
+/* Host Error Codes - values of fail1 */
+#define ERR_UNKNOWN_MBOX 0x1001
+#define ERR_UNKNOWN_CMD 0x1002
+#define ERR_MAX_RING 0x1003
+#define ERR_RING_CLOSED 0x1004
+#define ERR_RING_OPEN 0x1005
+/* Firmware internal errors */
+#define ERR_EVENT_RING_FULL 0x01
+#define ERR_DW_PEND_CMND_FULL 0x02
+#define ERR_DR_PEND_CMND_FULL 0x03
+#define ERR_DW_PEND_DATA_FULL 0x04
+#define ERR_DR_PEND_DATA_FULL 0x05
+#define ERR_ILLEGAL_JUMP 0x06
+#define ERR_UNIMPLEMENTED 0x07
+#define ERR_TX_INFO_FULL 0x08
+#define ERR_RX_INFO_FULL 0x09
+#define ERR_ILLEGAL_MODE 0x0A
+#define ERR_MAIN_TIMEOUT 0x0B
+#define ERR_EVENT_BITS 0x0C
+#define ERR_UNPEND_FULL 0x0D
+#define ERR_TIMER_QUEUE_FULL 0x0E
+#define ERR_TIMER_QUEUE_EMPTY 0x0F
+#define ERR_TIMER_NO_FREE 0x10
+#define ERR_INTR_START 0x11
+#define ERR_BAD_STARTUP 0x12
+#define ERR_NO_PKT_END 0x13
+#define ERR_HALTED_ON_ERR 0x14
+/* Hardware NIC Errors */
+#define ERR_WRITE_DMA 0x0101
+#define ERR_READ_DMA 0x0102
+#define ERR_EXT_SERIAL 0x0103
+#define ERR_TX_INT_PARITY 0x0104
+
+
+/*
+ * Event definitions
+ */
+
+#define EVT_RING_ENTRIES 64
+#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
+
+struct event {
+#ifdef __LITTLE_ENDIAN
+ u16 index;
+ u8 ring;
+ u8 code;
+#else
+ u8 code;
+ u8 ring;
+ u16 index;
+#endif
+ u32 timestamp;
+};
+
+/*
+ * General Events
+ */
+
+#define E_NIC_UP 0x01
+#define E_WATCHDOG 0x02
+
+#define E_STAT_UPD 0x04
+#define E_INVAL_CMD 0x05
+#define E_SET_CMD_CONS 0x06
+#define E_LINK_ON 0x07
+#define E_LINK_OFF 0x08
+#define E_INTERN_ERR 0x09
+#define E_HOST_ERR 0x0A
+#define E_STATS_UPDATE 0x0B
+#define E_REJECTING 0x0C
+
+/*
+ * Send Events
+ */
+#define E_CON_REJ 0x13
+#define E_CON_TMOUT 0x14
+#define E_CON_NC_TMOUT 0x15 /* I , Connection No Campon Timeout */
+#define E_DISC_ERR 0x16
+#define E_INT_PRTY 0x17
+#define E_TX_IDLE 0x18
+#define E_TX_LINK_DROP 0x19
+#define E_TX_INV_RNG 0x1A
+#define E_TX_INV_BUF 0x1B
+#define E_TX_INV_DSC 0x1C
+
+/*
+ * Destination Events
+ */
+/*
+ * General Receive events
+ */
+#define E_VAL_RNG 0x20
+#define E_RX_RNG_ENER 0x21
+#define E_INV_RNG 0x22
+#define E_RX_RNG_SPC 0x23
+#define E_RX_RNG_OUT 0x24
+#define E_PKT_DISCARD 0x25
+#define E_INFO_EVT 0x27
+
+/*
+ * Data corrupted events
+ */
+#define E_RX_PAR_ERR 0x2B
+#define E_RX_LLRC_ERR 0x2C
+#define E_IP_CKSM_ERR 0x2D
+#define E_DTA_CKSM_ERR 0x2E
+#define E_SHT_BST 0x2F
+
+/*
+ * Data lost events
+ */
+#define E_LST_LNK_ERR 0x30
+#define E_FLG_SYN_ERR 0x31
+#define E_FRM_ERR 0x32
+#define E_RX_IDLE 0x33
+#define E_PKT_LN_ERR 0x34
+#define E_STATE_ERR 0x35
+#define E_UNEXP_DATA 0x3C
+
+/*
+ * Fatal events
+ */
+#define E_RX_INV_BUF 0x36
+#define E_RX_INV_DSC 0x37
+#define E_RNG_BLK 0x38
+
+/*
+ * Warning events
+ */
+#define E_RX_TO 0x39
+#define E_BFR_SPC 0x3A
+#define E_INV_ULP 0x3B
+
+#define E_NOT_IMPLEMENTED 0x40
+
+
+/*
+ * Commands
+ */
+
+#define CMD_RING_ENTRIES 16
+
+struct cmd {
+#ifdef __LITTLE_ENDIAN
+ u16 index;
+ u8 ring;
+ u8 code;
+#else
+ u8 code;
+ u8 ring;
+ u16 index;
+#endif
+};
+
+#define C_START_FW 0x01
+#define C_UPD_STAT 0x02
+#define C_WATCHDOG 0x05
+#define C_DEL_RNG 0x09
+#define C_NEW_RNG 0x0A
+#define C_CONN 0x0D
+
+
+/*
+ * Mode bits
+ */
+
+#define PACKET_BAD 0x01 /* Packet had link-layer error */
+#define INTERRUPT 0x02
+#define TX_IP_CKSUM 0x04
+#define PACKET_END 0x08
+#define PACKET_START 0x10
+#define SAME_IFIELD 0x80
+
+
+typedef struct {
+#if (BITS_PER_LONG == 64)
+ u64 addrlo;
+#else
+ u32 addrhi;
+ u32 addrlo;
+#endif
+} rraddr;
+
+
+static inline void set_rraddr(rraddr *ra, dma_addr_t addr)
+{
+ unsigned long baddr = addr;
+#if (BITS_PER_LONG == 64)
+ ra->addrlo = baddr;
+#else
+ /* Don't bother setting zero every time */
+ ra->addrlo = baddr;
+#endif
+ mb();
+}
+
+
+static inline void set_rxaddr(struct rr_regs __iomem *regs, volatile dma_addr_t addr)
+{
+ unsigned long baddr = addr;
+#if (BITS_PER_LONG == 64) && defined(__LITTLE_ENDIAN)
+ writel(baddr & 0xffffffff, &regs->RxRingHi);
+ writel(baddr >> 32, &regs->RxRingLo);
+#elif (BITS_PER_LONG == 64)
+ writel(baddr >> 32, &regs->RxRingHi);
+ writel(baddr & 0xffffffff, &regs->RxRingLo);
+#else
+ writel(0, &regs->RxRingHi);
+ writel(baddr, &regs->RxRingLo);
+#endif
+ mb();
+}
+
+
+static inline void set_infoaddr(struct rr_regs __iomem *regs, volatile dma_addr_t addr)
+{
+ unsigned long baddr = addr;
+#if (BITS_PER_LONG == 64) && defined(__LITTLE_ENDIAN)
+ writel(baddr & 0xffffffff, &regs->InfoPtrHi);
+ writel(baddr >> 32, &regs->InfoPtrLo);
+#elif (BITS_PER_LONG == 64)
+ writel(baddr >> 32, &regs->InfoPtrHi);
+ writel(baddr & 0xffffffff, &regs->InfoPtrLo);
+#else
+ writel(0, &regs->InfoPtrHi);
+ writel(baddr, &regs->InfoPtrLo);
+#endif
+ mb();
+}
+
+
+/*
+ * TX ring
+ */
+
+#ifdef CONFIG_ROADRUNNER_LARGE_RINGS
+#define TX_RING_ENTRIES 32
+#else
+#define TX_RING_ENTRIES 16
+#endif
+#define TX_TOTAL_SIZE (TX_RING_ENTRIES * sizeof(struct tx_desc))
+
+struct tx_desc{
+ rraddr addr;
+ u32 res;
+#ifdef __LITTLE_ENDIAN
+ u16 size;
+ u8 pad;
+ u8 mode;
+#else
+ u8 mode;
+ u8 pad;
+ u16 size;
+#endif
+};
+
+
+#ifdef CONFIG_ROADRUNNER_LARGE_RINGS
+#define RX_RING_ENTRIES 32
+#else
+#define RX_RING_ENTRIES 16
+#endif
+#define RX_TOTAL_SIZE (RX_RING_ENTRIES * sizeof(struct rx_desc))
+
+struct rx_desc{
+ rraddr addr;
+ u32 res;
+#ifdef __LITTLE_ENDIAN
+ u16 size;
+ u8 pad;
+ u8 mode;
+#else
+ u8 mode;
+ u8 pad;
+ u16 size;
+#endif
+};
+
+
+/*
+ * ioctl's
+ */
+
+#define SIOCRRPFW SIOCDEVPRIVATE /* put firmware */
+#define SIOCRRGFW SIOCDEVPRIVATE+1 /* get firmware */
+#define SIOCRRID SIOCDEVPRIVATE+2 /* identify */
+
+
+struct seg_hdr {
+ u32 seg_start;
+ u32 seg_len;
+ u32 seg_eestart;
+};
+
+
+#define EEPROM_BASE 0x80000000
+#define EEPROM_WORDS 8192
+#define EEPROM_BYTES (EEPROM_WORDS * sizeof(u32))
+
+struct eeprom_boot {
+ u32 key1;
+ u32 key2;
+ u32 sram_size;
+ struct seg_hdr loader;
+ u32 init_chksum;
+ u32 reserved1;
+};
+
+struct eeprom_manf {
+ u32 HeaderFmt;
+ u32 Firmware;
+ u32 BoardRevision;
+ u32 RoadrunnerRev;
+ char OpticsPart[8];
+ u32 OpticsRev;
+ u32 pad1;
+ char SramPart[8];
+ u32 SramRev;
+ u32 pad2;
+ char EepromPart[8];
+ u32 EepromRev;
+ u32 EepromSize;
+ char PalPart[8];
+ u32 PalRev;
+ u32 pad3;
+ char PalCodeFile[12];
+ u32 PalCodeRev;
+ char BoardULA[8];
+ char SerialNo[8];
+ char MfgDate[8];
+ char MfgTime[8];
+ char ModifyDate[8];
+ u32 ModCount;
+ u32 pad4[13];
+};
+
+
+struct eeprom_phase_info {
+ char phase1File[12];
+ u32 phase1Rev;
+ char phase1Date[8];
+ char phase2File[12];
+ u32 phase2Rev;
+ char phase2Date[8];
+ u32 reserved7[4];
+};
+
+struct eeprom_rncd_info {
+ u32 FwStart;
+ u32 FwRev;
+ char FwDate[8];
+ u32 AddrRunCodeSegs;
+ u32 FileNames;
+ char File[13][8];
+};
+
+
+/* Phase 1 region (starts are word offset 0x80) */
+struct phase1_hdr{
+ u32 jump;
+ u32 noop;
+ struct seg_hdr phase2Seg;
+};
+
+struct eeprom {
+ struct eeprom_boot boot;
+ u32 pad1[8];
+ struct eeprom_manf manf;
+ struct eeprom_phase_info phase_info;
+ struct eeprom_rncd_info rncd_info;
+ u32 pad2[15];
+ u32 hdr_checksum;
+ struct phase1_hdr phase1;
+};
+
+
+struct rr_stats {
+ u32 NicTimeStamp;
+ u32 RngCreated;
+ u32 RngDeleted;
+ u32 IntrGen;
+ u32 NEvtOvfl;
+ u32 InvCmd;
+ u32 DmaReadErrs;
+ u32 DmaWriteErrs;
+ u32 StatUpdtT;
+ u32 StatUpdtC;
+ u32 WatchDog;
+ u32 Trace;
+
+ /* Serial HIPPI */
+ u32 LnkRdyEst;
+ u32 GLinkErr;
+ u32 AltFlgErr;
+ u32 OvhdBit8Sync;
+ u32 RmtSerPrtyErr;
+ u32 RmtParPrtyErr;
+ u32 RmtLoopBk;
+ u32 pad1;
+
+ /* HIPPI tx */
+ u32 ConEst;
+ u32 ConRejS;
+ u32 ConRetry;
+ u32 ConTmOut;
+ u32 SndConDiscon;
+ u32 SndParErr;
+ u32 PktSnt;
+ u32 pad2[2];
+ u32 ShFBstSnt;
+ u64 BytSent;
+ u32 TxTimeout;
+ u32 pad3[3];
+
+ /* HIPPI rx */
+ u32 ConAcc;
+ u32 ConRejdiPrty;
+ u32 ConRejd64b;
+ u32 ConRejdBuf;
+ u32 RxConDiscon;
+ u32 RxConNoData;
+ u32 PktRx;
+ u32 pad4[2];
+ u32 ShFBstRx;
+ u64 BytRx;
+ u32 RxParErr;
+ u32 RxLLRCerr;
+ u32 RxBstSZerr;
+ u32 RxStateErr;
+ u32 RxRdyErr;
+ u32 RxInvULP;
+ u32 RxSpcBuf;
+ u32 RxSpcDesc;
+ u32 RxRngSpc;
+ u32 RxRngFull;
+ u32 RxPktLenErr;
+ u32 RxCksmErr;
+ u32 RxPktDrp;
+ u32 RngLowSpc;
+ u32 RngDataClose;
+ u32 RxTimeout;
+ u32 RxIdle;
+};
+
+
+/*
+ * This struct is shared with the NIC firmware.
+ */
+struct ring_ctrl {
+ rraddr rngptr;
+#ifdef __LITTLE_ENDIAN
+ u16 entries;
+ u8 pad;
+ u8 entry_size;
+ u16 pi;
+ u16 mode;
+#else
+ u8 entry_size;
+ u8 pad;
+ u16 entries;
+ u16 mode;
+ u16 pi;
+#endif
+};
+
+struct rr_info {
+ union {
+ struct rr_stats stats;
+ u32 stati[128];
+ } s;
+ struct ring_ctrl evt_ctrl;
+ struct ring_ctrl cmd_ctrl;
+ struct ring_ctrl tx_ctrl;
+ u8 pad[464];
+ u8 trace[3072];
+};
+
+/*
+ * The linux structure for the RoadRunner.
+ *
+ * RX/TX descriptors are put first to make sure they are properly
+ * aligned and do not cross cache-line boundaries.
+ */
+
+struct rr_private
+{
+ struct rx_desc *rx_ring;
+ struct tx_desc *tx_ring;
+ struct event *evt_ring;
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t evt_ring_dma;
+ /* Alignment ok ? */
+ struct sk_buff *rx_skbuff[RX_RING_ENTRIES];
+ struct sk_buff *tx_skbuff[TX_RING_ENTRIES];
+ struct rr_regs __iomem *regs; /* Register base */
+ struct ring_ctrl *rx_ctrl; /* Receive ring control */
+ struct rr_info *info; /* Shared info page */
+ dma_addr_t rx_ctrl_dma;
+ dma_addr_t info_dma;
+ spinlock_t lock;
+ struct timer_list timer;
+ u32 cur_rx, cur_cmd, cur_evt;
+ u32 dirty_rx, dirty_tx;
+ u32 tx_full;
+ u32 fw_rev;
+ volatile short fw_running;
+ struct net_device_stats stats;
+ struct pci_dev *pci_dev;
+};
+
+
+/*
+ * Prototypes
+ */
+static int rr_init(struct net_device *dev);
+static int rr_init1(struct net_device *dev);
+static irqreturn_t rr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+static int rr_open(struct net_device *dev);
+static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int rr_close(struct net_device *dev);
+static struct net_device_stats *rr_get_stats(struct net_device *dev);
+static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
+ unsigned long offset,
+ unsigned char *buf,
+ unsigned long length);
+static u32 rr_read_eeprom_word(struct rr_private *rrpriv, void * offset);
+static int rr_load_firmware(struct net_device *dev);
+static inline void rr_raz_tx(struct rr_private *, struct net_device *);
+static inline void rr_raz_rx(struct rr_private *, struct net_device *);
+#endif /* _RRUNNER_H_ */
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
new file mode 100644
index 000000000000..7092ca6b277e
--- /dev/null
+++ b/drivers/net/s2io-regs.h
@@ -0,0 +1,778 @@
+/************************************************************************
+ * regs.h: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
+ * Copyright(c) 2002-2005 Neterion Inc.
+
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ ************************************************************************/
+#ifndef _REGS_H
+#define _REGS_H
+
+#define TBD 0
+
+typedef struct _XENA_dev_config {
+/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
+
+/* General Control-Status Registers */
+ u64 general_int_status;
+#define GEN_INTR_TXPIC BIT(0)
+#define GEN_INTR_TXDMA BIT(1)
+#define GEN_INTR_TXMAC BIT(2)
+#define GEN_INTR_TXXGXS BIT(3)
+#define GEN_INTR_TXTRAFFIC BIT(8)
+#define GEN_INTR_RXPIC BIT(32)
+#define GEN_INTR_RXDMA BIT(33)
+#define GEN_INTR_RXMAC BIT(34)
+#define GEN_INTR_MC BIT(35)
+#define GEN_INTR_RXXGXS BIT(36)
+#define GEN_INTR_RXTRAFFIC BIT(40)
+#define GEN_ERROR_INTR GEN_INTR_TXPIC | GEN_INTR_RXPIC | \
+ GEN_INTR_TXDMA | GEN_INTR_RXDMA | \
+ GEN_INTR_TXMAC | GEN_INTR_RXMAC | \
+ GEN_INTR_TXXGXS| GEN_INTR_RXXGXS| \
+ GEN_INTR_MC
+
+ u64 general_int_mask;
+
+ u8 unused0[0x100 - 0x10];
+
+ u64 sw_reset;
+/* XGXS must be removed from reset only once. */
+#define SW_RESET_XENA vBIT(0xA5,0,8)
+#define SW_RESET_FLASH vBIT(0xA5,8,8)
+#define SW_RESET_EOI vBIT(0xA5,16,8)
+#define SW_RESET_ALL (SW_RESET_XENA | \
+ SW_RESET_FLASH | \
+ SW_RESET_EOI)
+/* The SW_RESET register must read this value after a successful reset. */
+#define SW_RESET_RAW_VAL 0xA5000000
+
+
+ u64 adapter_status;
+#define ADAPTER_STATUS_TDMA_READY BIT(0)
+#define ADAPTER_STATUS_RDMA_READY BIT(1)
+#define ADAPTER_STATUS_PFC_READY BIT(2)
+#define ADAPTER_STATUS_TMAC_BUF_EMPTY BIT(3)
+#define ADAPTER_STATUS_PIC_QUIESCENT BIT(5)
+#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6)
+#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7)
+#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
+#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
+#define ADAPTER_STATUS_MC_DRAM_READY BIT(24)
+#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25)
+#define ADAPTER_STATUS_M_PLL_LOCK BIT(30)
+#define ADAPTER_STATUS_P_PLL_LOCK BIT(31)
+
+ u64 adapter_control;
+#define ADAPTER_CNTL_EN BIT(7)
+#define ADAPTER_EOI_TX_ON BIT(15)
+#define ADAPTER_LED_ON BIT(23)
+#define ADAPTER_UDPI(val) vBIT(val,36,4)
+#define ADAPTER_WAIT_INT BIT(48)
+#define ADAPTER_ECC_EN BIT(55)
+
+ u64 serr_source;
+#define SERR_SOURCE_PIC BIT(0)
+#define SERR_SOURCE_TXDMA BIT(1)
+#define SERR_SOURCE_RXDMA BIT(2)
+#define SERR_SOURCE_MAC BIT(3)
+#define SERR_SOURCE_MC BIT(4)
+#define SERR_SOURCE_XGXS BIT(5)
+#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
+ SERR_SOURCE_TXDMA | \
+ SERR_SOURCE_RXDMA | \
+ SERR_SOURCE_MAC | \
+ SERR_SOURCE_MC | \
+ SERR_SOURCE_XGXS)
+
+
+ u8 unused_0[0x800 - 0x120];
+
+/* PCI-X Controller registers */
+ u64 pic_int_status;
+ u64 pic_int_mask;
+#define PIC_INT_TX BIT(0)
+#define PIC_INT_FLSH BIT(1)
+#define PIC_INT_MDIO BIT(2)
+#define PIC_INT_IIC BIT(3)
+#define PIC_INT_GPIO BIT(4)
+#define PIC_INT_RX BIT(32)
+
+ u64 txpic_int_reg;
+ u64 txpic_int_mask;
+#define PCIX_INT_REG_ECC_SG_ERR BIT(0)
+#define PCIX_INT_REG_ECC_DB_ERR BIT(1)
+#define PCIX_INT_REG_FLASHR_R_FSM_ERR BIT(8)
+#define PCIX_INT_REG_FLASHR_W_FSM_ERR BIT(9)
+#define PCIX_INT_REG_INI_TX_FSM_SERR BIT(10)
+#define PCIX_INT_REG_INI_TXO_FSM_ERR BIT(11)
+#define PCIX_INT_REG_TRT_FSM_SERR BIT(13)
+#define PCIX_INT_REG_SRT_FSM_SERR BIT(14)
+#define PCIX_INT_REG_PIFR_FSM_SERR BIT(15)
+#define PCIX_INT_REG_WRC_TX_SEND_FSM_SERR BIT(21)
+#define PCIX_INT_REG_RRC_TX_REQ_FSM_SERR BIT(23)
+#define PCIX_INT_REG_INI_RX_FSM_SERR BIT(48)
+#define PCIX_INT_REG_RA_RX_FSM_SERR BIT(50)
+/*
+#define PCIX_INT_REG_WRC_RX_SEND_FSM_SERR BIT(52)
+#define PCIX_INT_REG_RRC_RX_REQ_FSM_SERR BIT(54)
+#define PCIX_INT_REG_RRC_RX_SPLIT_FSM_SERR BIT(58)
+*/
+ u64 txpic_alarms;
+ u64 rxpic_int_reg;
+ u64 rxpic_int_mask;
+ u64 rxpic_alarms;
+
+ u64 flsh_int_reg;
+ u64 flsh_int_mask;
+#define PIC_FLSH_INT_REG_CYCLE_FSM_ERR BIT(63)
+#define PIC_FLSH_INT_REG_ERR BIT(62)
+ u64 flash_alarms;
+
+ u64 mdio_int_reg;
+ u64 mdio_int_mask;
+#define MDIO_INT_REG_MDIO_BUS_ERR BIT(0)
+#define MDIO_INT_REG_DTX_BUS_ERR BIT(8)
+#define MDIO_INT_REG_LASI BIT(39)
+ u64 mdio_alarms;
+
+ u64 iic_int_reg;
+ u64 iic_int_mask;
+#define IIC_INT_REG_BUS_FSM_ERR BIT(4)
+#define IIC_INT_REG_BIT_FSM_ERR BIT(5)
+#define IIC_INT_REG_CYCLE_FSM_ERR BIT(6)
+#define IIC_INT_REG_REQ_FSM_ERR BIT(7)
+#define IIC_INT_REG_ACK_ERR BIT(8)
+ u64 iic_alarms;
+
+ u8 unused4[0x08];
+
+ u64 gpio_int_reg;
+ u64 gpio_int_mask;
+ u64 gpio_alarms;
+
+ u8 unused5[0x38];
+
+ u64 tx_traffic_int;
+#define TX_TRAFFIC_INT_n(n) BIT(n)
+ u64 tx_traffic_mask;
+
+ u64 rx_traffic_int;
+#define RX_TRAFFIC_INT_n(n) BIT(n)
+ u64 rx_traffic_mask;
+
+/* PIC Control registers */
+ u64 pic_control;
+#define PIC_CNTL_RX_ALARM_MAP_1 BIT(0)
+#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,4)
+
+ u64 swapper_ctrl;
+#define SWAPPER_CTRL_PIF_R_FE BIT(0)
+#define SWAPPER_CTRL_PIF_R_SE BIT(1)
+#define SWAPPER_CTRL_PIF_W_FE BIT(8)
+#define SWAPPER_CTRL_PIF_W_SE BIT(9)
+#define SWAPPER_CTRL_TXP_FE BIT(16)
+#define SWAPPER_CTRL_TXP_SE BIT(17)
+#define SWAPPER_CTRL_TXD_R_FE BIT(18)
+#define SWAPPER_CTRL_TXD_R_SE BIT(19)
+#define SWAPPER_CTRL_TXD_W_FE BIT(20)
+#define SWAPPER_CTRL_TXD_W_SE BIT(21)
+#define SWAPPER_CTRL_TXF_R_FE BIT(22)
+#define SWAPPER_CTRL_TXF_R_SE BIT(23)
+#define SWAPPER_CTRL_RXD_R_FE BIT(32)
+#define SWAPPER_CTRL_RXD_R_SE BIT(33)
+#define SWAPPER_CTRL_RXD_W_FE BIT(34)
+#define SWAPPER_CTRL_RXD_W_SE BIT(35)
+#define SWAPPER_CTRL_RXF_W_FE BIT(36)
+#define SWAPPER_CTRL_RXF_W_SE BIT(37)
+#define SWAPPER_CTRL_XMSI_FE BIT(40)
+#define SWAPPER_CTRL_XMSI_SE BIT(41)
+#define SWAPPER_CTRL_STATS_FE BIT(48)
+#define SWAPPER_CTRL_STATS_SE BIT(49)
+
+ u64 pif_rd_swapper_fb;
+#define IF_RD_SWAPPER_FB 0x0123456789ABCDEF
+
+ u64 scheduled_int_ctrl;
+#define SCHED_INT_CTRL_TIMER_EN BIT(0)
+#define SCHED_INT_CTRL_ONE_SHOT BIT(1)
+#define SCHED_INT_CTRL_INT2MSI TBD
+#define SCHED_INT_PERIOD TBD
+
+ u64 txreqtimeout;
+#define TXREQTO_VAL(val) vBIT(val,0,32)
+#define TXREQTO_EN BIT(63)
+
+ u64 statsreqtimeout;
+#define STATREQTO_VAL(n) TBD
+#define STATREQTO_EN BIT(63)
+
+ u64 read_retry_delay;
+ u64 read_retry_acceleration;
+ u64 write_retry_delay;
+ u64 write_retry_acceleration;
+
+ u64 xmsi_control;
+ u64 xmsi_access;
+ u64 xmsi_address;
+ u64 xmsi_data;
+
+ u64 rx_mat;
+
+ u8 unused6[0x8];
+
+ u64 tx_mat0_7;
+ u64 tx_mat8_15;
+ u64 tx_mat16_23;
+ u64 tx_mat24_31;
+ u64 tx_mat32_39;
+ u64 tx_mat40_47;
+ u64 tx_mat48_55;
+ u64 tx_mat56_63;
+
+ u8 unused_1[0x10];
+
+ /* Automated statistics collection */
+ u64 stat_cfg;
+#define STAT_CFG_STAT_EN BIT(0)
+#define STAT_CFG_ONE_SHOT_EN BIT(1)
+#define STAT_CFG_STAT_NS_EN BIT(8)
+#define STAT_CFG_STAT_RO BIT(9)
+#define STAT_TRSF_PER(n) TBD
+#define PER_SEC 0x208d5
+#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
+
+ u64 stat_addr;
+
+ /* General Configuration */
+ u64 mdio_control;
+
+ u64 dtx_control;
+
+ u64 i2c_control;
+#define I2C_CONTROL_DEV_ID(id) vBIT(id,1,3)
+#define I2C_CONTROL_ADDR(addr) vBIT(addr,5,11)
+#define I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2)
+#define I2C_CONTROL_READ BIT(24)
+#define I2C_CONTROL_NACK BIT(25)
+#define I2C_CONTROL_CNTL_START vBIT(0xE,28,4)
+#define I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4))
+#define I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF)
+#define I2C_CONTROL_SET_DATA(val) vBIT(val,32,32)
+
+ u64 gpio_control;
+#define GPIO_CTRL_GPIO_0 BIT(8)
+
+ u8 unused7[0x600];
+
+/* TxDMA registers */
+ u64 txdma_int_status;
+ u64 txdma_int_mask;
+#define TXDMA_PFC_INT BIT(0)
+#define TXDMA_TDA_INT BIT(1)
+#define TXDMA_PCC_INT BIT(2)
+#define TXDMA_TTI_INT BIT(3)
+#define TXDMA_LSO_INT BIT(4)
+#define TXDMA_TPA_INT BIT(5)
+#define TXDMA_SM_INT BIT(6)
+ u64 pfc_err_reg;
+ u64 pfc_err_mask;
+ u64 pfc_err_alarm;
+
+ u64 tda_err_reg;
+ u64 tda_err_mask;
+ u64 tda_err_alarm;
+
+ u64 pcc_err_reg;
+#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8)
+
+ u64 pcc_err_mask;
+ u64 pcc_err_alarm;
+
+ u64 tti_err_reg;
+ u64 tti_err_mask;
+ u64 tti_err_alarm;
+
+ u64 lso_err_reg;
+ u64 lso_err_mask;
+ u64 lso_err_alarm;
+
+ u64 tpa_err_reg;
+ u64 tpa_err_mask;
+ u64 tpa_err_alarm;
+
+ u64 sm_err_reg;
+ u64 sm_err_mask;
+ u64 sm_err_alarm;
+
+ u8 unused8[0x100 - 0xB8];
+
+/* TxDMA arbiter */
+ u64 tx_dma_wrap_stat;
+
+/* Tx FIFO controller */
+#define X_MAX_FIFOS 8
+#define X_FIFO_MAX_LEN 0x1FFF /*8191 */
+ u64 tx_fifo_partition_0;
+#define TX_FIFO_PARTITION_EN BIT(0)
+#define TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_0_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_1_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_1_LEN(val) vBIT(val,51,13 )
+
+ u64 tx_fifo_partition_1;
+#define TX_FIFO_PARTITION_2_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_2_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_3_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_3_LEN(val) vBIT(val,51,13)
+
+ u64 tx_fifo_partition_2;
+#define TX_FIFO_PARTITION_4_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_4_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_5_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_5_LEN(val) vBIT(val,51,13)
+
+ u64 tx_fifo_partition_3;
+#define TX_FIFO_PARTITION_6_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_6_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13)
+
+#define TX_FIFO_PARTITION_PRI_0 0 /* highest */
+#define TX_FIFO_PARTITION_PRI_1 1
+#define TX_FIFO_PARTITION_PRI_2 2
+#define TX_FIFO_PARTITION_PRI_3 3
+#define TX_FIFO_PARTITION_PRI_4 4
+#define TX_FIFO_PARTITION_PRI_5 5
+#define TX_FIFO_PARTITION_PRI_6 6
+#define TX_FIFO_PARTITION_PRI_7 7 /* lowest */
+
+ u64 tx_w_round_robin_0;
+ u64 tx_w_round_robin_1;
+ u64 tx_w_round_robin_2;
+ u64 tx_w_round_robin_3;
+ u64 tx_w_round_robin_4;
+
+ u64 tti_command_mem;
+#define TTI_CMD_MEM_WE BIT(7)
+#define TTI_CMD_MEM_STROBE_NEW_CMD BIT(15)
+#define TTI_CMD_MEM_STROBE_BEING_EXECUTED BIT(15)
+#define TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6)
+
+ u64 tti_data1_mem;
+#define TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26)
+#define TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2)
+#define TTI_DATA1_MEM_TX_TIMER_AC_EN BIT(38)
+#define TTI_DATA1_MEM_TX_TIMER_CI_EN BIT(39)
+#define TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7)
+#define TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7)
+#define TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7)
+
+ u64 tti_data2_mem;
+#define TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16)
+#define TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16)
+#define TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16)
+#define TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16)
+
+/* Tx Protocol assist */
+ u64 tx_pa_cfg;
+#define TX_PA_CFG_IGNORE_FRM_ERR BIT(1)
+#define TX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
+#define TX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
+#define TX_PA_CFG_IGNORE_L2_ERR BIT(6)
+
+/* Recent add, used only debug purposes. */
+ u64 pcc_enable;
+
+ u8 unused9[0x700 - 0x178];
+
+ u64 txdma_debug_ctrl;
+
+ u8 unused10[0x1800 - 0x1708];
+
+/* RxDMA Registers */
+ u64 rxdma_int_status;
+ u64 rxdma_int_mask;
+#define RXDMA_INT_RC_INT_M BIT(0)
+#define RXDMA_INT_RPA_INT_M BIT(1)
+#define RXDMA_INT_RDA_INT_M BIT(2)
+#define RXDMA_INT_RTI_INT_M BIT(3)
+
+ u64 rda_err_reg;
+ u64 rda_err_mask;
+ u64 rda_err_alarm;
+
+ u64 rc_err_reg;
+ u64 rc_err_mask;
+ u64 rc_err_alarm;
+
+ u64 prc_pcix_err_reg;
+ u64 prc_pcix_err_mask;
+ u64 prc_pcix_err_alarm;
+
+ u64 rpa_err_reg;
+ u64 rpa_err_mask;
+ u64 rpa_err_alarm;
+
+ u64 rti_err_reg;
+ u64 rti_err_mask;
+ u64 rti_err_alarm;
+
+ u8 unused11[0x100 - 0x88];
+
+/* DMA arbiter */
+ u64 rx_queue_priority;
+#define RX_QUEUE_0_PRIORITY(val) vBIT(val,5,3)
+#define RX_QUEUE_1_PRIORITY(val) vBIT(val,13,3)
+#define RX_QUEUE_2_PRIORITY(val) vBIT(val,21,3)
+#define RX_QUEUE_3_PRIORITY(val) vBIT(val,29,3)
+#define RX_QUEUE_4_PRIORITY(val) vBIT(val,37,3)
+#define RX_QUEUE_5_PRIORITY(val) vBIT(val,45,3)
+#define RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3)
+#define RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3)
+
+#define RX_QUEUE_PRI_0 0 /* highest */
+#define RX_QUEUE_PRI_1 1
+#define RX_QUEUE_PRI_2 2
+#define RX_QUEUE_PRI_3 3
+#define RX_QUEUE_PRI_4 4
+#define RX_QUEUE_PRI_5 5
+#define RX_QUEUE_PRI_6 6
+#define RX_QUEUE_PRI_7 7 /* lowest */
+
+ u64 rx_w_round_robin_0;
+ u64 rx_w_round_robin_1;
+ u64 rx_w_round_robin_2;
+ u64 rx_w_round_robin_3;
+ u64 rx_w_round_robin_4;
+
+ /* Per-ring controller regs */
+#define RX_MAX_RINGS 8
+#if 0
+#define RX_MAX_RINGS_SZ 0xFFFF /* 65536 */
+#define RX_MIN_RINGS_SZ 0x3F /* 63 */
+#endif
+ u64 prc_rxd0_n[RX_MAX_RINGS];
+ u64 prc_ctrl_n[RX_MAX_RINGS];
+#define PRC_CTRL_RC_ENABLED BIT(7)
+#define PRC_CTRL_RING_MODE (BIT(14)|BIT(15))
+#define PRC_CTRL_RING_MODE_1 vBIT(0,14,2)
+#define PRC_CTRL_RING_MODE_3 vBIT(1,14,2)
+#define PRC_CTRL_RING_MODE_5 vBIT(2,14,2)
+#define PRC_CTRL_RING_MODE_x vBIT(3,14,2)
+#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23))
+#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
+#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
+#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
+
+ u64 prc_alarm_action;
+#define PRC_ALARM_ACTION_RR_R0_STOP BIT(3)
+#define PRC_ALARM_ACTION_RW_R0_STOP BIT(7)
+#define PRC_ALARM_ACTION_RR_R1_STOP BIT(11)
+#define PRC_ALARM_ACTION_RW_R1_STOP BIT(15)
+#define PRC_ALARM_ACTION_RR_R2_STOP BIT(19)
+#define PRC_ALARM_ACTION_RW_R2_STOP BIT(23)
+#define PRC_ALARM_ACTION_RR_R3_STOP BIT(27)
+#define PRC_ALARM_ACTION_RW_R3_STOP BIT(31)
+#define PRC_ALARM_ACTION_RR_R4_STOP BIT(35)
+#define PRC_ALARM_ACTION_RW_R4_STOP BIT(39)
+#define PRC_ALARM_ACTION_RR_R5_STOP BIT(43)
+#define PRC_ALARM_ACTION_RW_R5_STOP BIT(47)
+#define PRC_ALARM_ACTION_RR_R6_STOP BIT(51)
+#define PRC_ALARM_ACTION_RW_R6_STOP BIT(55)
+#define PRC_ALARM_ACTION_RR_R7_STOP BIT(59)
+#define PRC_ALARM_ACTION_RW_R7_STOP BIT(63)
+
+/* Receive traffic interrupts */
+ u64 rti_command_mem;
+#define RTI_CMD_MEM_WE BIT(7)
+#define RTI_CMD_MEM_STROBE BIT(15)
+#define RTI_CMD_MEM_STROBE_NEW_CMD BIT(15)
+#define RTI_CMD_MEM_STROBE_CMD_BEING_EXECUTED BIT(15)
+#define RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3)
+
+ u64 rti_data1_mem;
+#define RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29)
+#define RTI_DATA1_MEM_RX_TIMER_AC_EN BIT(38)
+#define RTI_DATA1_MEM_RX_TIMER_CI_EN BIT(39)
+#define RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7)
+#define RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7)
+#define RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7)
+
+ u64 rti_data2_mem;
+#define RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16)
+#define RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16)
+#define RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16)
+#define RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16)
+
+ u64 rx_pa_cfg;
+#define RX_PA_CFG_IGNORE_FRM_ERR BIT(1)
+#define RX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
+#define RX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
+#define RX_PA_CFG_IGNORE_L2_ERR BIT(6)
+
+ u8 unused12[0x700 - 0x1D8];
+
+ u64 rxdma_debug_ctrl;
+
+ u8 unused13[0x2000 - 0x1f08];
+
+/* Media Access Controller Register */
+ u64 mac_int_status;
+ u64 mac_int_mask;
+#define MAC_INT_STATUS_TMAC_INT BIT(0)
+#define MAC_INT_STATUS_RMAC_INT BIT(1)
+
+ u64 mac_tmac_err_reg;
+#define TMAC_ERR_REG_TMAC_ECC_DB_ERR BIT(15)
+#define TMAC_ERR_REG_TMAC_TX_BUF_OVRN BIT(23)
+#define TMAC_ERR_REG_TMAC_TX_CRI_ERR BIT(31)
+ u64 mac_tmac_err_mask;
+ u64 mac_tmac_err_alarm;
+
+ u64 mac_rmac_err_reg;
+#define RMAC_ERR_REG_RX_BUFF_OVRN BIT(0)
+#define RMAC_ERR_REG_RTS_ECC_DB_ERR BIT(14)
+#define RMAC_ERR_REG_ECC_DB_ERR BIT(15)
+#define RMAC_LINK_STATE_CHANGE_INT BIT(31)
+ u64 mac_rmac_err_mask;
+ u64 mac_rmac_err_alarm;
+
+ u8 unused14[0x100 - 0x40];
+
+ u64 mac_cfg;
+#define MAC_CFG_TMAC_ENABLE BIT(0)
+#define MAC_CFG_RMAC_ENABLE BIT(1)
+#define MAC_CFG_LAN_NOT_WAN BIT(2)
+#define MAC_CFG_TMAC_LOOPBACK BIT(3)
+#define MAC_CFG_TMAC_APPEND_PAD BIT(4)
+#define MAC_CFG_RMAC_STRIP_FCS BIT(5)
+#define MAC_CFG_RMAC_STRIP_PAD BIT(6)
+#define MAC_CFG_RMAC_PROM_ENABLE BIT(7)
+#define MAC_RMAC_DISCARD_PFRM BIT(8)
+#define MAC_RMAC_BCAST_ENABLE BIT(9)
+#define MAC_RMAC_ALL_ADDR_ENABLE BIT(10)
+#define MAC_RMAC_INVLD_IPG_THR(val) vBIT(val,16,8)
+
+ u64 tmac_avg_ipg;
+#define TMAC_AVG_IPG(val) vBIT(val,0,8)
+
+ u64 rmac_max_pyld_len;
+#define RMAC_MAX_PYLD_LEN(val) vBIT(val,2,14)
+#define RMAC_MAX_PYLD_LEN_DEF vBIT(1500,2,14)
+#define RMAC_MAX_PYLD_LEN_JUMBO_DEF vBIT(9600,2,14)
+
+ u64 rmac_err_cfg;
+#define RMAC_ERR_FCS BIT(0)
+#define RMAC_ERR_FCS_ACCEPT BIT(1)
+#define RMAC_ERR_TOO_LONG BIT(1)
+#define RMAC_ERR_TOO_LONG_ACCEPT BIT(1)
+#define RMAC_ERR_RUNT BIT(2)
+#define RMAC_ERR_RUNT_ACCEPT BIT(2)
+#define RMAC_ERR_LEN_MISMATCH BIT(3)
+#define RMAC_ERR_LEN_MISMATCH_ACCEPT BIT(3)
+
+ u64 rmac_cfg_key;
+#define RMAC_CFG_KEY(val) vBIT(val,0,16)
+
+#define MAX_MAC_ADDRESSES 16
+#define MAX_MC_ADDRESSES 32 /* Multicast addresses */
+#define MAC_MAC_ADDR_START_OFFSET 0
+#define MAC_MC_ADDR_START_OFFSET 16
+#define MAC_MC_ALL_MC_ADDR_OFFSET 63 /* enables all multicast pkts */
+ u64 rmac_addr_cmd_mem;
+#define RMAC_ADDR_CMD_MEM_WE BIT(7)
+#define RMAC_ADDR_CMD_MEM_RD 0
+#define RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD BIT(15)
+#define RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING BIT(15)
+#define RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6)
+
+ u64 rmac_addr_data0_mem;
+#define RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48)
+#define RMAC_ADDR_DATA0_MEM_USER BIT(48)
+
+ u64 rmac_addr_data1_mem;
+#define RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48)
+
+ u8 unused15[0x8];
+
+/*
+ u64 rmac_addr_cfg;
+#define RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n)
+#define RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n)
+#define RMAC_ADDR_BCAST_EN vBIT(0)_48
+#define RMAC_ADDR_ALL_ADDR_EN vBIT(0)_49
+*/
+ u64 tmac_ipg_cfg;
+
+ u64 rmac_pause_cfg;
+#define RMAC_PAUSE_GEN BIT(0)
+#define RMAC_PAUSE_GEN_ENABLE BIT(0)
+#define RMAC_PAUSE_RX BIT(1)
+#define RMAC_PAUSE_RX_ENABLE BIT(1)
+#define RMAC_PAUSE_HG_PTIME_DEF vBIT(0xFFFF,16,16)
+#define RMAC_PAUSE_HG_PTIME(val) vBIT(val,16,16)
+
+ u64 rmac_red_cfg;
+
+ u64 rmac_red_rate_q0q3;
+ u64 rmac_red_rate_q4q7;
+
+ u64 mac_link_util;
+#define MAC_TX_LINK_UTIL vBIT(0xFE,1,7)
+#define MAC_TX_LINK_UTIL_DISABLE vBIT(0xF, 8,4)
+#define MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4)
+#define MAC_RX_LINK_UTIL vBIT(0xFE,33,7)
+#define MAC_RX_LINK_UTIL_DISABLE vBIT(0xF,40,4)
+#define MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4)
+
+#define MAC_LINK_UTIL_DISABLE MAC_TX_LINK_UTIL_DISABLE | \
+ MAC_RX_LINK_UTIL_DISABLE
+
+ u64 rmac_invalid_ipg;
+
+/* rx traffic steering */
+#define MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14)
+ u64 rts_frm_len_n[8];
+
+ u64 rts_qos_steering;
+
+#define MAX_DIX_MAP 4
+ u64 rts_dix_map_n[MAX_DIX_MAP];
+#define RTS_DIX_MAP_ETYPE(val) vBIT(val,0,16)
+#define RTS_DIX_MAP_SCW(val) BIT(val,21)
+
+ u64 rts_q_alternates;
+ u64 rts_default_q;
+
+ u64 rts_ctrl;
+#define RTS_CTRL_IGNORE_SNAP_OUI BIT(2)
+#define RTS_CTRL_IGNORE_LLC_CTRL BIT(3)
+
+ u64 rts_pn_cam_ctrl;
+#define RTS_PN_CAM_CTRL_WE BIT(7)
+#define RTS_PN_CAM_CTRL_STROBE_NEW_CMD BIT(15)
+#define RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED BIT(15)
+#define RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8)
+ u64 rts_pn_cam_data;
+#define RTS_PN_CAM_DATA_TCP_SELECT BIT(7)
+#define RTS_PN_CAM_DATA_PORT(val) vBIT(val,8,16)
+#define RTS_PN_CAM_DATA_SCW(val) vBIT(val,24,8)
+
+ u64 rts_ds_mem_ctrl;
+#define RTS_DS_MEM_CTRL_WE BIT(7)
+#define RTS_DS_MEM_CTRL_STROBE_NEW_CMD BIT(15)
+#define RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED BIT(15)
+#define RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6)
+ u64 rts_ds_mem_data;
+#define RTS_DS_MEM_DATA(n) vBIT(n,0,8)
+
+ u8 unused16[0x700 - 0x220];
+
+ u64 mac_debug_ctrl;
+#define MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL
+
+ u8 unused17[0x2800 - 0x2708];
+
+/* memory controller registers */
+ u64 mc_int_status;
+#define MC_INT_STATUS_MC_INT BIT(0)
+ u64 mc_int_mask;
+#define MC_INT_MASK_MC_INT BIT(0)
+
+ u64 mc_err_reg;
+#define MC_ERR_REG_ECC_DB_ERR_L BIT(14)
+#define MC_ERR_REG_ECC_DB_ERR_U BIT(15)
+#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
+#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
+#define MC_ERR_REG_SM_ERR BIT(31)
+ u64 mc_err_mask;
+ u64 mc_err_alarm;
+
+ u8 unused18[0x100 - 0x28];
+
+/* MC configuration */
+ u64 rx_queue_cfg;
+#define RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8)
+#define RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8)
+#define RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8)
+#define RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8)
+#define RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8)
+#define RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8)
+#define RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8)
+#define RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8)
+
+ u64 mc_rldram_mrs;
+#define MC_RLDRAM_QUEUE_SIZE_ENABLE BIT(39)
+#define MC_RLDRAM_MRS_ENABLE BIT(47)
+
+ u64 mc_rldram_interleave;
+
+ u64 mc_pause_thresh_q0q3;
+ u64 mc_pause_thresh_q4q7;
+
+ u64 mc_red_thresh_q[8];
+
+ u8 unused19[0x200 - 0x168];
+ u64 mc_rldram_ref_per;
+ u8 unused20[0x220 - 0x208];
+ u64 mc_rldram_test_ctrl;
+#define MC_RLDRAM_TEST_MODE BIT(47)
+#define MC_RLDRAM_TEST_WRITE BIT(7)
+#define MC_RLDRAM_TEST_GO BIT(15)
+#define MC_RLDRAM_TEST_DONE BIT(23)
+#define MC_RLDRAM_TEST_PASS BIT(31)
+
+ u8 unused21[0x240 - 0x228];
+ u64 mc_rldram_test_add;
+ u8 unused22[0x260 - 0x248];
+ u64 mc_rldram_test_d0;
+ u8 unused23[0x280 - 0x268];
+ u64 mc_rldram_test_d1;
+ u8 unused24[0x300 - 0x288];
+ u64 mc_rldram_test_d2;
+ u8 unused25[0x700 - 0x308];
+ u64 mc_debug_ctrl;
+
+ u8 unused26[0x3000 - 0x2f08];
+
+/* XGXG */
+ /* XGXS control registers */
+
+ u64 xgxs_int_status;
+#define XGXS_INT_STATUS_TXGXS BIT(0)
+#define XGXS_INT_STATUS_RXGXS BIT(1)
+ u64 xgxs_int_mask;
+#define XGXS_INT_MASK_TXGXS BIT(0)
+#define XGXS_INT_MASK_RXGXS BIT(1)
+
+ u64 xgxs_txgxs_err_reg;
+#define TXGXS_ECC_DB_ERR BIT(15)
+ u64 xgxs_txgxs_err_mask;
+ u64 xgxs_txgxs_err_alarm;
+
+ u64 xgxs_rxgxs_err_reg;
+ u64 xgxs_rxgxs_err_mask;
+ u64 xgxs_rxgxs_err_alarm;
+
+ u8 unused27[0x100 - 0x40];
+
+ u64 xgxs_cfg;
+ u64 xgxs_status;
+
+ u64 xgxs_cfg_key;
+ u64 xgxs_efifo_cfg; /* CHANGED */
+ u64 rxgxs_ber_0; /* CHANGED */
+ u64 rxgxs_ber_1; /* CHANGED */
+
+} XENA_dev_config_t;
+
+#define XENA_REG_SPACE sizeof(XENA_dev_config_t)
+#define XENA_EEPROM_SPACE (0x01 << 11)
+
+#endif /* _REGS_H */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
new file mode 100644
index 000000000000..9c224eba057d
--- /dev/null
+++ b/drivers/net/s2io.c
@@ -0,0 +1,4950 @@
+/************************************************************************
+ * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
+ * Copyright(c) 2002-2005 Neterion Inc.
+
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * Credits:
+ * Jeff Garzik : For pointing out the improper error condition
+ * check in the s2io_xmit routine and also some
+ * issues in the Tx watch dog function. Also for
+ * patiently answering all those innumerable
+ * questions regaring the 2.6 porting issues.
+ * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
+ * macros available only in 2.6 Kernel.
+ * Francois Romieu : For pointing out all code part that were
+ * deprecated and also styling related comments.
+ * Grant Grundler : For helping me get rid of some Architecture
+ * dependent code.
+ * Christopher Hellwig : Some more 2.6 specific issues in the driver.
+ *
+ * The module loadable parameters that are supported by the driver and a brief
+ * explaination of all the variables.
+ * rx_ring_num : This can be used to program the number of receive rings used
+ * in the driver.
+ * rx_ring_len: This defines the number of descriptors each ring can have. This
+ * is also an array of size 8.
+ * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
+ * tx_fifo_len: This too is an array of 8. Each element defines the number of
+ * Tx descriptors that can be associated with each corresponding FIFO.
+ * in PCI Configuration space.
+ ************************************************************************/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/stddef.h>
+#include <linux/ioctl.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
+#include <linux/ethtool.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/* local include */
+#include "s2io.h"
+#include "s2io-regs.h"
+
+/* S2io Driver name & version. */
+static char s2io_driver_name[] = "s2io";
+static char s2io_driver_version[] = "Version 1.7.7.1";
+
+/*
+ * Cards with following subsystem_id have a link state indication
+ * problem, 600B, 600C, 600D, 640B, 640C and 640D.
+ * macro below identifies these cards given the subsystem_id.
+ */
+#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
+ (((subid >= 0x600B) && (subid <= 0x600D)) || \
+ ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
+
+#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
+ ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
+#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
+#define PANIC 1
+#define LOW 2
+static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
+{
+ int level = 0;
+ if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
+ level = LOW;
+ if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
+ level = PANIC;
+ }
+ }
+
+ return level;
+}
+
+/* Ethtool related variables and Macros. */
+static char s2io_gstrings[][ETH_GSTRING_LEN] = {
+ "Register test\t(offline)",
+ "Eeprom test\t(offline)",
+ "Link test\t(online)",
+ "RLDRAM test\t(offline)",
+ "BIST Test\t(offline)"
+};
+
+static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
+ {"tmac_frms"},
+ {"tmac_data_octets"},
+ {"tmac_drop_frms"},
+ {"tmac_mcst_frms"},
+ {"tmac_bcst_frms"},
+ {"tmac_pause_ctrl_frms"},
+ {"tmac_any_err_frms"},
+ {"tmac_vld_ip_octets"},
+ {"tmac_vld_ip"},
+ {"tmac_drop_ip"},
+ {"tmac_icmp"},
+ {"tmac_rst_tcp"},
+ {"tmac_tcp"},
+ {"tmac_udp"},
+ {"rmac_vld_frms"},
+ {"rmac_data_octets"},
+ {"rmac_fcs_err_frms"},
+ {"rmac_drop_frms"},
+ {"rmac_vld_mcst_frms"},
+ {"rmac_vld_bcst_frms"},
+ {"rmac_in_rng_len_err_frms"},
+ {"rmac_long_frms"},
+ {"rmac_pause_ctrl_frms"},
+ {"rmac_discarded_frms"},
+ {"rmac_usized_frms"},
+ {"rmac_osized_frms"},
+ {"rmac_frag_frms"},
+ {"rmac_jabber_frms"},
+ {"rmac_ip"},
+ {"rmac_ip_octets"},
+ {"rmac_hdr_err_ip"},
+ {"rmac_drop_ip"},
+ {"rmac_icmp"},
+ {"rmac_tcp"},
+ {"rmac_udp"},
+ {"rmac_err_drp_udp"},
+ {"rmac_pause_cnt"},
+ {"rmac_accepted_ip"},
+ {"rmac_err_tcp"},
+};
+
+#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
+#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
+
+#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
+#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
+
+
+/*
+ * Constants to be programmed into the Xena's registers, to configure
+ * the XAUI.
+ */
+
+#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
+#define END_SIGN 0x0
+
+static u64 default_mdio_cfg[] = {
+ /* Reset PMA PLL */
+ 0xC001010000000000ULL, 0xC0010100000000E0ULL,
+ 0xC0010100008000E4ULL,
+ /* Remove Reset from PMA PLL */
+ 0xC001010000000000ULL, 0xC0010100000000E0ULL,
+ 0xC0010100000000E4ULL,
+ END_SIGN
+};
+
+static u64 default_dtx_cfg[] = {
+ 0x8000051500000000ULL, 0x80000515000000E0ULL,
+ 0x80000515D93500E4ULL, 0x8001051500000000ULL,
+ 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
+ 0x8002051500000000ULL, 0x80020515000000E0ULL,
+ 0x80020515F21000E4ULL,
+ /* Set PADLOOPBACKN */
+ 0x8002051500000000ULL, 0x80020515000000E0ULL,
+ 0x80020515B20000E4ULL, 0x8003051500000000ULL,
+ 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
+ 0x8004051500000000ULL, 0x80040515000000E0ULL,
+ 0x80040515B20000E4ULL, 0x8005051500000000ULL,
+ 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
+ SWITCH_SIGN,
+ /* Remove PADLOOPBACKN */
+ 0x8002051500000000ULL, 0x80020515000000E0ULL,
+ 0x80020515F20000E4ULL, 0x8003051500000000ULL,
+ 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
+ 0x8004051500000000ULL, 0x80040515000000E0ULL,
+ 0x80040515F20000E4ULL, 0x8005051500000000ULL,
+ 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
+ END_SIGN
+};
+
+
+/*
+ * Constants for Fixing the MacAddress problem seen mostly on
+ * Alpha machines.
+ */
+static u64 fix_mac[] = {
+ 0x0060000000000000ULL, 0x0060600000000000ULL,
+ 0x0040600000000000ULL, 0x0000600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0000600000000000ULL,
+ 0x0040600000000000ULL, 0x0060600000000000ULL,
+ END_SIGN
+};
+
+/* Module Loadable parameters. */
+static unsigned int tx_fifo_num = 1;
+static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
+ {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
+static unsigned int rx_ring_num = 1;
+static unsigned int rx_ring_sz[MAX_RX_RINGS] =
+ {[0 ...(MAX_RX_RINGS - 1)] = 0 };
+static unsigned int Stats_refresh_time = 4;
+static unsigned int rmac_pause_time = 65535;
+static unsigned int mc_pause_threshold_q0q3 = 187;
+static unsigned int mc_pause_threshold_q4q7 = 187;
+static unsigned int shared_splits;
+static unsigned int tmac_util_period = 5;
+static unsigned int rmac_util_period = 5;
+#ifndef CONFIG_S2IO_NAPI
+static unsigned int indicate_max_pkts;
+#endif
+
+/*
+ * S2IO device table.
+ * This table lists all the devices that this driver supports.
+ */
+static struct pci_device_id s2io_tbl[] __devinitdata = {
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, s2io_tbl);
+
+static struct pci_driver s2io_driver = {
+ .name = "S2IO",
+ .id_table = s2io_tbl,
+ .probe = s2io_init_nic,
+ .remove = __devexit_p(s2io_rem_nic),
+};
+
+/* A simplifier macro used both by init and free shared_mem Fns(). */
+#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
+
+/**
+ * init_shared_mem - Allocation and Initialization of Memory
+ * @nic: Device private variable.
+ * Description: The function allocates all the memory areas shared
+ * between the NIC and the driver. This includes Tx descriptors,
+ * Rx descriptors and the statistics block.
+ */
+
+static int init_shared_mem(struct s2io_nic *nic)
+{
+ u32 size;
+ void *tmp_v_addr, *tmp_v_addr_next;
+ dma_addr_t tmp_p_addr, tmp_p_addr_next;
+ RxD_block_t *pre_rxd_blk = NULL;
+ int i, j, blk_cnt;
+ int lst_size, lst_per_page;
+ struct net_device *dev = nic->dev;
+#ifdef CONFIG_2BUFF_MODE
+ unsigned long tmp;
+ buffAdd_t *ba;
+#endif
+
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+
+ /* Allocation and initialization of TXDLs in FIOFs */
+ size = 0;
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ size += config->tx_cfg[i].fifo_len;
+ }
+ if (size > MAX_AVAILABLE_TXDS) {
+ DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
+ DBG_PRINT(ERR_DBG, "that can be used\n");
+ return FAILURE;
+ }
+
+ lst_size = (sizeof(TxD_t) * config->max_txds);
+ lst_per_page = PAGE_SIZE / lst_size;
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ int fifo_len = config->tx_cfg[i].fifo_len;
+ int list_holder_size = fifo_len * sizeof(list_info_hold_t);
+ nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
+ if (!nic->list_info[i]) {
+ DBG_PRINT(ERR_DBG,
+ "Malloc failed for list_info\n");
+ return -ENOMEM;
+ }
+ memset(nic->list_info[i], 0, list_holder_size);
+ }
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
+ lst_per_page);
+ mac_control->tx_curr_put_info[i].offset = 0;
+ mac_control->tx_curr_put_info[i].fifo_len =
+ config->tx_cfg[i].fifo_len - 1;
+ mac_control->tx_curr_get_info[i].offset = 0;
+ mac_control->tx_curr_get_info[i].fifo_len =
+ config->tx_cfg[i].fifo_len - 1;
+ for (j = 0; j < page_num; j++) {
+ int k = 0;
+ dma_addr_t tmp_p;
+ void *tmp_v;
+ tmp_v = pci_alloc_consistent(nic->pdev,
+ PAGE_SIZE, &tmp_p);
+ if (!tmp_v) {
+ DBG_PRINT(ERR_DBG,
+ "pci_alloc_consistent ");
+ DBG_PRINT(ERR_DBG, "failed for TxDL\n");
+ return -ENOMEM;
+ }
+ while (k < lst_per_page) {
+ int l = (j * lst_per_page) + k;
+ if (l == config->tx_cfg[i].fifo_len)
+ goto end_txd_alloc;
+ nic->list_info[i][l].list_virt_addr =
+ tmp_v + (k * lst_size);
+ nic->list_info[i][l].list_phy_addr =
+ tmp_p + (k * lst_size);
+ k++;
+ }
+ }
+ }
+ end_txd_alloc:
+
+ /* Allocation and initialization of RXDs in Rings */
+ size = 0;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
+ DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
+ DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
+ i);
+ DBG_PRINT(ERR_DBG, "RxDs per Block");
+ return FAILURE;
+ }
+ size += config->rx_cfg[i].num_rxd;
+ nic->block_count[i] =
+ config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
+ nic->pkt_cnt[i] =
+ config->rx_cfg[i].num_rxd - nic->block_count[i];
+ }
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ mac_control->rx_curr_get_info[i].block_index = 0;
+ mac_control->rx_curr_get_info[i].offset = 0;
+ mac_control->rx_curr_get_info[i].ring_len =
+ config->rx_cfg[i].num_rxd - 1;
+ mac_control->rx_curr_put_info[i].block_index = 0;
+ mac_control->rx_curr_put_info[i].offset = 0;
+ mac_control->rx_curr_put_info[i].ring_len =
+ config->rx_cfg[i].num_rxd - 1;
+ blk_cnt =
+ config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
+ /* Allocating all the Rx blocks */
+ for (j = 0; j < blk_cnt; j++) {
+#ifndef CONFIG_2BUFF_MODE
+ size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
+#else
+ size = SIZE_OF_BLOCK;
+#endif
+ tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
+ &tmp_p_addr);
+ if (tmp_v_addr == NULL) {
+ /*
+ * In case of failure, free_shared_mem()
+ * is called, which should free any
+ * memory that was alloced till the
+ * failure happened.
+ */
+ nic->rx_blocks[i][j].block_virt_addr =
+ tmp_v_addr;
+ return -ENOMEM;
+ }
+ memset(tmp_v_addr, 0, size);
+ nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
+ nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
+ }
+ /* Interlinking all Rx Blocks */
+ for (j = 0; j < blk_cnt; j++) {
+ tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
+ tmp_v_addr_next =
+ nic->rx_blocks[i][(j + 1) %
+ blk_cnt].block_virt_addr;
+ tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
+ tmp_p_addr_next =
+ nic->rx_blocks[i][(j + 1) %
+ blk_cnt].block_dma_addr;
+
+ pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
+ pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
+ * marker.
+ */
+#ifndef CONFIG_2BUFF_MODE
+ pre_rxd_blk->reserved_2_pNext_RxD_block =
+ (unsigned long) tmp_v_addr_next;
+#endif
+ pre_rxd_blk->pNext_RxD_Blk_physical =
+ (u64) tmp_p_addr_next;
+ }
+ }
+
+#ifdef CONFIG_2BUFF_MODE
+ /*
+ * Allocation of Storages for buffer addresses in 2BUFF mode
+ * and the buffers as well.
+ */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ blk_cnt =
+ config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
+ nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
+ GFP_KERNEL);
+ if (!nic->ba[i])
+ return -ENOMEM;
+ for (j = 0; j < blk_cnt; j++) {
+ int k = 0;
+ nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
+ (MAX_RXDS_PER_BLOCK + 1)),
+ GFP_KERNEL);
+ if (!nic->ba[i][j])
+ return -ENOMEM;
+ while (k != MAX_RXDS_PER_BLOCK) {
+ ba = &nic->ba[i][j][k];
+
+ ba->ba_0_org = kmalloc
+ (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
+ if (!ba->ba_0_org)
+ return -ENOMEM;
+ tmp = (unsigned long) ba->ba_0_org;
+ tmp += ALIGN_SIZE;
+ tmp &= ~((unsigned long) ALIGN_SIZE);
+ ba->ba_0 = (void *) tmp;
+
+ ba->ba_1_org = kmalloc
+ (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
+ if (!ba->ba_1_org)
+ return -ENOMEM;
+ tmp = (unsigned long) ba->ba_1_org;
+ tmp += ALIGN_SIZE;
+ tmp &= ~((unsigned long) ALIGN_SIZE);
+ ba->ba_1 = (void *) tmp;
+ k++;
+ }
+ }
+ }
+#endif
+
+ /* Allocation and initialization of Statistics block */
+ size = sizeof(StatInfo_t);
+ mac_control->stats_mem = pci_alloc_consistent
+ (nic->pdev, size, &mac_control->stats_mem_phy);
+
+ if (!mac_control->stats_mem) {
+ /*
+ * In case of failure, free_shared_mem() is called, which
+ * should free any memory that was alloced till the
+ * failure happened.
+ */
+ return -ENOMEM;
+ }
+ mac_control->stats_mem_sz = size;
+
+ tmp_v_addr = mac_control->stats_mem;
+ mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
+ memset(tmp_v_addr, 0, size);
+
+ DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
+ (unsigned long long) tmp_p_addr);
+
+ return SUCCESS;
+}
+
+/**
+ * free_shared_mem - Free the allocated Memory
+ * @nic: Device private variable.
+ * Description: This function is to free all memory locations allocated by
+ * the init_shared_mem() function and return it to the kernel.
+ */
+
+static void free_shared_mem(struct s2io_nic *nic)
+{
+ int i, j, blk_cnt, size;
+ void *tmp_v_addr;
+ dma_addr_t tmp_p_addr;
+ mac_info_t *mac_control;
+ struct config_param *config;
+ int lst_size, lst_per_page;
+
+
+ if (!nic)
+ return;
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ lst_size = (sizeof(TxD_t) * config->max_txds);
+ lst_per_page = PAGE_SIZE / lst_size;
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
+ lst_per_page);
+ for (j = 0; j < page_num; j++) {
+ int mem_blks = (j * lst_per_page);
+ if (!nic->list_info[i][mem_blks].list_virt_addr)
+ break;
+ pci_free_consistent(nic->pdev, PAGE_SIZE,
+ nic->list_info[i][mem_blks].
+ list_virt_addr,
+ nic->list_info[i][mem_blks].
+ list_phy_addr);
+ }
+ kfree(nic->list_info[i]);
+ }
+
+#ifndef CONFIG_2BUFF_MODE
+ size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
+#else
+ size = SIZE_OF_BLOCK;
+#endif
+ for (i = 0; i < config->rx_ring_num; i++) {
+ blk_cnt = nic->block_count[i];
+ for (j = 0; j < blk_cnt; j++) {
+ tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
+ tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
+ if (tmp_v_addr == NULL)
+ break;
+ pci_free_consistent(nic->pdev, size,
+ tmp_v_addr, tmp_p_addr);
+ }
+ }
+
+#ifdef CONFIG_2BUFF_MODE
+ /* Freeing buffer storage addresses in 2BUFF mode. */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ blk_cnt =
+ config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
+ if (!nic->ba[i])
+ goto end_free;
+ for (j = 0; j < blk_cnt; j++) {
+ int k = 0;
+ if (!nic->ba[i][j]) {
+ kfree(nic->ba[i]);
+ goto end_free;
+ }
+ while (k != MAX_RXDS_PER_BLOCK) {
+ buffAdd_t *ba = &nic->ba[i][j][k];
+ if (!ba || !ba->ba_0_org || !ba->ba_1_org)
+ {
+ kfree(nic->ba[i]);
+ kfree(nic->ba[i][j]);
+ if(ba->ba_0_org)
+ kfree(ba->ba_0_org);
+ if(ba->ba_1_org)
+ kfree(ba->ba_1_org);
+ goto end_free;
+ }
+ kfree(ba->ba_0_org);
+ kfree(ba->ba_1_org);
+ k++;
+ }
+ kfree(nic->ba[i][j]);
+ }
+ kfree(nic->ba[i]);
+ }
+end_free:
+#endif
+
+ if (mac_control->stats_mem) {
+ pci_free_consistent(nic->pdev,
+ mac_control->stats_mem_sz,
+ mac_control->stats_mem,
+ mac_control->stats_mem_phy);
+ }
+}
+
+/**
+ * init_nic - Initialization of hardware
+ * @nic: device peivate variable
+ * Description: The function sequentially configures every block
+ * of the H/W from their reset values.
+ * Return Value: SUCCESS on success and
+ * '-1' on failure (endian settings incorrect).
+ */
+
+static int init_nic(struct s2io_nic *nic)
+{
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ struct net_device *dev = nic->dev;
+ register u64 val64 = 0;
+ void __iomem *add;
+ u32 time;
+ int i, j;
+ mac_info_t *mac_control;
+ struct config_param *config;
+ int mdio_cnt = 0, dtx_cnt = 0;
+ unsigned long long mem_share;
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ /* Initialize swapper control register */
+ if (s2io_set_swapper(nic)) {
+ DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
+ return -1;
+ }
+
+ /* Remove XGXS from reset state */
+ val64 = 0;
+ writeq(val64, &bar0->sw_reset);
+ val64 = readq(&bar0->sw_reset);
+ msleep(500);
+
+ /* Enable Receiving broadcasts */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 |= MAC_RMAC_BCAST_ENABLE;
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) val64, add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+
+ /* Read registers in all blocks */
+ val64 = readq(&bar0->mac_int_mask);
+ val64 = readq(&bar0->mc_int_mask);
+ val64 = readq(&bar0->xgxs_int_mask);
+
+ /* Set MTU */
+ val64 = dev->mtu;
+ writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
+
+ /*
+ * Configuring the XAUI Interface of Xena.
+ * ***************************************
+ * To Configure the Xena's XAUI, one has to write a series
+ * of 64 bit values into two registers in a particular
+ * sequence. Hence a macro 'SWITCH_SIGN' has been defined
+ * which will be defined in the array of configuration values
+ * (default_dtx_cfg & default_mdio_cfg) at appropriate places
+ * to switch writing from one regsiter to another. We continue
+ * writing these values until we encounter the 'END_SIGN' macro.
+ * For example, After making a series of 21 writes into
+ * dtx_control register the 'SWITCH_SIGN' appears and hence we
+ * start writing into mdio_control until we encounter END_SIGN.
+ */
+ while (1) {
+ dtx_cfg:
+ while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
+ if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
+ dtx_cnt++;
+ goto mdio_cfg;
+ }
+ SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
+ &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ dtx_cnt++;
+ }
+ mdio_cfg:
+ while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
+ if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
+ mdio_cnt++;
+ goto dtx_cfg;
+ }
+ SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
+ &bar0->mdio_control, UF);
+ val64 = readq(&bar0->mdio_control);
+ mdio_cnt++;
+ }
+ if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
+ (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
+ break;
+ } else {
+ goto dtx_cfg;
+ }
+ }
+
+ /* Tx DMA Initialization */
+ val64 = 0;
+ writeq(val64, &bar0->tx_fifo_partition_0);
+ writeq(val64, &bar0->tx_fifo_partition_1);
+ writeq(val64, &bar0->tx_fifo_partition_2);
+ writeq(val64, &bar0->tx_fifo_partition_3);
+
+
+ for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
+ val64 |=
+ vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
+ 13) | vBIT(config->tx_cfg[i].fifo_priority,
+ ((i * 32) + 5), 3);
+
+ if (i == (config->tx_fifo_num - 1)) {
+ if (i % 2 == 0)
+ i++;
+ }
+
+ switch (i) {
+ case 1:
+ writeq(val64, &bar0->tx_fifo_partition_0);
+ val64 = 0;
+ break;
+ case 3:
+ writeq(val64, &bar0->tx_fifo_partition_1);
+ val64 = 0;
+ break;
+ case 5:
+ writeq(val64, &bar0->tx_fifo_partition_2);
+ val64 = 0;
+ break;
+ case 7:
+ writeq(val64, &bar0->tx_fifo_partition_3);
+ break;
+ }
+ }
+
+ /* Enable Tx FIFO partition 0. */
+ val64 = readq(&bar0->tx_fifo_partition_0);
+ val64 |= BIT(0); /* To enable the FIFO partition. */
+ writeq(val64, &bar0->tx_fifo_partition_0);
+
+ val64 = readq(&bar0->tx_fifo_partition_0);
+ DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
+ &bar0->tx_fifo_partition_0, (unsigned long long) val64);
+
+ /*
+ * Initialization of Tx_PA_CONFIG register to ignore packet
+ * integrity checking.
+ */
+ val64 = readq(&bar0->tx_pa_cfg);
+ val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
+ TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
+ writeq(val64, &bar0->tx_pa_cfg);
+
+ /* Rx DMA intialization. */
+ val64 = 0;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ val64 |=
+ vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
+ 3);
+ }
+ writeq(val64, &bar0->rx_queue_priority);
+
+ /*
+ * Allocating equal share of memory to all the
+ * configured Rings.
+ */
+ val64 = 0;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ switch (i) {
+ case 0:
+ mem_share = (64 / config->rx_ring_num +
+ 64 % config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
+ continue;
+ case 1:
+ mem_share = (64 / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
+ continue;
+ case 2:
+ mem_share = (64 / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
+ continue;
+ case 3:
+ mem_share = (64 / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
+ continue;
+ case 4:
+ mem_share = (64 / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
+ continue;
+ case 5:
+ mem_share = (64 / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
+ continue;
+ case 6:
+ mem_share = (64 / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
+ continue;
+ case 7:
+ mem_share = (64 / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
+ continue;
+ }
+ }
+ writeq(val64, &bar0->rx_queue_cfg);
+
+ /*
+ * Initializing the Tx round robin registers to 0.
+ * Filling Tx and Rx round robin registers as per the
+ * number of FIFOs and Rings is still TODO.
+ */
+ writeq(0, &bar0->tx_w_round_robin_0);
+ writeq(0, &bar0->tx_w_round_robin_1);
+ writeq(0, &bar0->tx_w_round_robin_2);
+ writeq(0, &bar0->tx_w_round_robin_3);
+ writeq(0, &bar0->tx_w_round_robin_4);
+
+ /*
+ * TODO
+ * Disable Rx steering. Hard coding all packets be steered to
+ * Queue 0 for now.
+ */
+ val64 = 0x8080808080808080ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+
+ /* UDP Fix */
+ val64 = 0;
+ for (i = 1; i < 8; i++)
+ writeq(val64, &bar0->rts_frm_len_n[i]);
+
+ /* Set rts_frm_len register for fifo 0 */
+ writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
+ &bar0->rts_frm_len_n[0]);
+
+ /* Enable statistics */
+ writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
+ val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
+ STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
+ writeq(val64, &bar0->stat_cfg);
+
+ /*
+ * Initializing the sampling rate for the device to calculate the
+ * bandwidth utilization.
+ */
+ val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
+ MAC_RX_LINK_UTIL_VAL(rmac_util_period);
+ writeq(val64, &bar0->mac_link_util);
+
+
+ /*
+ * Initializing the Transmit and Receive Traffic Interrupt
+ * Scheme.
+ */
+ /* TTI Initialization. Default Tx timer gets us about
+ * 250 interrupts per sec. Continuous interrupts are enabled
+ * by default.
+ */
+ val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
+ TTI_DATA1_MEM_TX_URNG_A(0xA) |
+ TTI_DATA1_MEM_TX_URNG_B(0x10) |
+ TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
+ TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ writeq(val64, &bar0->tti_data1_mem);
+
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
+ TTI_DATA2_MEM_TX_UFC_B(0x20) |
+ TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
+ writeq(val64, &bar0->tti_data2_mem);
+
+ val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
+ writeq(val64, &bar0->tti_command_mem);
+
+ /*
+ * Once the operation completes, the Strobe bit of the command
+ * register will be reset. We poll for this particular condition
+ * We wait for a maximum of 500ms for the operation to complete,
+ * if it's not complete by then we return error.
+ */
+ time = 0;
+ while (TRUE) {
+ val64 = readq(&bar0->tti_command_mem);
+ if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
+ break;
+ }
+ if (time > 10) {
+ DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
+ dev->name);
+ return -1;
+ }
+ msleep(50);
+ time++;
+ }
+
+ /* RTI Initialization */
+ val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
+ RTI_DATA1_MEM_RX_URNG_A(0xA) |
+ RTI_DATA1_MEM_RX_URNG_B(0x10) |
+ RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
+
+ writeq(val64, &bar0->rti_data1_mem);
+
+ val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
+ RTI_DATA2_MEM_RX_UFC_B(0x2) |
+ RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
+ writeq(val64, &bar0->rti_data2_mem);
+
+ val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
+ writeq(val64, &bar0->rti_command_mem);
+
+ /*
+ * Once the operation completes, the Strobe bit of the command
+ * register will be reset. We poll for this particular condition
+ * We wait for a maximum of 500ms for the operation to complete,
+ * if it's not complete by then we return error.
+ */
+ time = 0;
+ while (TRUE) {
+ val64 = readq(&bar0->rti_command_mem);
+ if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
+ break;
+ }
+ if (time > 10) {
+ DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
+ dev->name);
+ return -1;
+ }
+ time++;
+ msleep(50);
+ }
+
+ /*
+ * Initializing proper values as Pause threshold into all
+ * the 8 Queues on Rx side.
+ */
+ writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
+ writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
+
+ /* Disable RMAC PAD STRIPPING */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64), add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+ val64 = readq(&bar0->mac_cfg);
+
+ /*
+ * Set the time value to be inserted in the pause frame
+ * generated by xena.
+ */
+ val64 = readq(&bar0->rmac_pause_cfg);
+ val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
+ val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
+ writeq(val64, &bar0->rmac_pause_cfg);
+
+ /*
+ * Set the Threshold Limit for Generating the pause frame
+ * If the amount of data in any Queue exceeds ratio of
+ * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
+ * pause frame is generated
+ */
+ val64 = 0;
+ for (i = 0; i < 4; i++) {
+ val64 |=
+ (((u64) 0xFF00 | nic->mac_control.
+ mc_pause_threshold_q0q3)
+ << (i * 2 * 8));
+ }
+ writeq(val64, &bar0->mc_pause_thresh_q0q3);
+
+ val64 = 0;
+ for (i = 0; i < 4; i++) {
+ val64 |=
+ (((u64) 0xFF00 | nic->mac_control.
+ mc_pause_threshold_q4q7)
+ << (i * 2 * 8));
+ }
+ writeq(val64, &bar0->mc_pause_thresh_q4q7);
+
+ /*
+ * TxDMA will stop Read request if the number of read split has
+ * exceeded the limit pointed by shared_splits
+ */
+ val64 = readq(&bar0->pic_control);
+ val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
+ writeq(val64, &bar0->pic_control);
+
+ return SUCCESS;
+}
+
+/**
+ * en_dis_able_nic_intrs - Enable or Disable the interrupts
+ * @nic: device private variable,
+ * @mask: A mask indicating which Intr block must be modified and,
+ * @flag: A flag indicating whether to enable or disable the Intrs.
+ * Description: This function will either disable or enable the interrupts
+ * depending on the flag argument. The mask argument can be used to
+ * enable/disable any Intr block.
+ * Return Value: NONE.
+ */
+
+static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
+{
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0, temp64 = 0;
+
+ /* Top level interrupt classification */
+ /* PIC Interrupts */
+ if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
+ /* Enable PIC Intrs in the general intr mask register */
+ val64 = TXPIC_INT_M | PIC_RX_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /*
+ * Disabled all PCIX, Flash, MDIO, IIC and GPIO
+ * interrupts for now.
+ * TODO
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
+ /*
+ * No MSI Support is available presently, so TTI and
+ * RTI interrupts are also disabled.
+ */
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable PIC Intrs in the general
+ * intr mask register
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+
+ /* DMA Interrupts */
+ /* Enabling/Disabling Tx DMA interrupts */
+ if (mask & TX_DMA_INTR) {
+ /* Enable TxDMA Intrs in the general intr mask register */
+ val64 = TXDMA_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /*
+ * Keep all interrupts other than PFC interrupt
+ * and PCC interrupt disabled in DMA level.
+ */
+ val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
+ TXDMA_PCC_INT_M);
+ writeq(val64, &bar0->txdma_int_mask);
+ /*
+ * Enable only the MISC error 1 interrupt in PFC block
+ */
+ val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
+ writeq(val64, &bar0->pfc_err_mask);
+ /*
+ * Enable only the FB_ECC error interrupt in PCC block
+ */
+ val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
+ writeq(val64, &bar0->pcc_err_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable TxDMA Intrs in the general intr mask
+ * register
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
+ writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+
+ /* Enabling/Disabling Rx DMA interrupts */
+ if (mask & RX_DMA_INTR) {
+ /* Enable RxDMA Intrs in the general intr mask register */
+ val64 = RXDMA_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /*
+ * All RxDMA block interrupts are disabled for now
+ * TODO
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable RxDMA Intrs in the general intr mask
+ * register
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+
+ /* MAC Interrupts */
+ /* Enabling/Disabling MAC interrupts */
+ if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
+ val64 = TXMAC_INT_M | RXMAC_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /*
+ * All MAC block error interrupts are disabled for now
+ * except the link status change interrupt.
+ * TODO
+ */
+ val64 = MAC_INT_STATUS_RMAC_INT;
+ temp64 = readq(&bar0->mac_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->mac_int_mask);
+
+ val64 = readq(&bar0->mac_rmac_err_mask);
+ val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
+ writeq(val64, &bar0->mac_rmac_err_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable MAC Intrs in the general intr mask register
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
+ writeq(DISABLE_ALL_INTRS,
+ &bar0->mac_rmac_err_mask);
+
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+
+ /* XGXS Interrupts */
+ if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
+ val64 = TXXGXS_INT_M | RXXGXS_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /*
+ * All XGXS block error interrupts are disabled for now
+ * TODO
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable MC Intrs in the general intr mask register
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+
+ /* Memory Controller(MC) interrupts */
+ if (mask & MC_INTR) {
+ val64 = MC_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /*
+ * All MC block error interrupts are disabled for now
+ * TODO
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable MC Intrs in the general intr mask register
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+
+
+ /* Tx traffic interrupts */
+ if (mask & TX_TRAFFIC_INTR) {
+ val64 = TXTRAFFIC_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /*
+ * Enable all the Tx side interrupts
+ * writing 0 Enables all 64 TX interrupt levels
+ */
+ writeq(0x0, &bar0->tx_traffic_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable Tx Traffic Intrs in the general intr mask
+ * register.
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+
+ /* Rx traffic interrupts */
+ if (mask & RX_TRAFFIC_INTR) {
+ val64 = RXTRAFFIC_INT_M;
+ if (flag == ENABLE_INTRS) {
+ temp64 = readq(&bar0->general_int_mask);
+ temp64 &= ~((u64) val64);
+ writeq(temp64, &bar0->general_int_mask);
+ /* writing 0 Enables all 8 RX interrupt levels */
+ writeq(0x0, &bar0->rx_traffic_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable Rx Traffic Intrs in the general intr mask
+ * register.
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
+ temp64 = readq(&bar0->general_int_mask);
+ val64 |= temp64;
+ writeq(val64, &bar0->general_int_mask);
+ }
+ }
+}
+
+/**
+ * verify_xena_quiescence - Checks whether the H/W is ready
+ * @val64 : Value read from adapter status register.
+ * @flag : indicates if the adapter enable bit was ever written once
+ * before.
+ * Description: Returns whether the H/W is ready to go or not. Depending
+ * on whether adapter enable bit was written or not the comparison
+ * differs and the calling function passes the input argument flag to
+ * indicate this.
+ * Return: 1 If xena is quiescence
+ * 0 If Xena is not quiescence
+ */
+
+static int verify_xena_quiescence(u64 val64, int flag)
+{
+ int ret = 0;
+ u64 tmp64 = ~((u64) val64);
+
+ if (!
+ (tmp64 &
+ (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
+ ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
+ ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
+ ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
+ ADAPTER_STATUS_P_PLL_LOCK))) {
+ if (flag == FALSE) {
+ if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
+ ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+ ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
+
+ ret = 1;
+
+ }
+ } else {
+ if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
+ ADAPTER_STATUS_RMAC_PCC_IDLE) &&
+ (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
+ ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+ ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
+
+ ret = 1;
+
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * fix_mac_address - Fix for Mac addr problem on Alpha platforms
+ * @sp: Pointer to device specifc structure
+ * Description :
+ * New procedure to clear mac address reading problems on Alpha platforms
+ *
+ */
+
+static void fix_mac_address(nic_t * sp)
+{
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64;
+ int i = 0;
+
+ while (fix_mac[i] != END_SIGN) {
+ writeq(fix_mac[i++], &bar0->gpio_control);
+ val64 = readq(&bar0->gpio_control);
+ }
+}
+
+/**
+ * start_nic - Turns the device on
+ * @nic : device private variable.
+ * Description:
+ * This function actually turns the device on. Before this function is
+ * called,all Registers are configured from their reset states
+ * and shared memory is allocated but the NIC is still quiescent. On
+ * calling this function, the device interrupts are cleared and the NIC is
+ * literally switched on by writing into the adapter control register.
+ * Return Value:
+ * SUCCESS on success and -1 on failure.
+ */
+
+static int start_nic(struct s2io_nic *nic)
+{
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ struct net_device *dev = nic->dev;
+ register u64 val64 = 0;
+ u16 interruptible, i;
+ u16 subid;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ /* PRC Initialization and configuration */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
+ &bar0->prc_rxd0_n[i]);
+
+ val64 = readq(&bar0->prc_ctrl_n[i]);
+#ifndef CONFIG_2BUFF_MODE
+ val64 |= PRC_CTRL_RC_ENABLED;
+#else
+ val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
+#endif
+ writeq(val64, &bar0->prc_ctrl_n[i]);
+ }
+
+#ifdef CONFIG_2BUFF_MODE
+ /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 |= RX_PA_CFG_IGNORE_L2_ERR;
+ writeq(val64, &bar0->rx_pa_cfg);
+#endif
+
+ /*
+ * Enabling MC-RLDRAM. After enabling the device, we timeout
+ * for around 100ms, which is approximately the time required
+ * for the device to be ready for operation.
+ */
+ val64 = readq(&bar0->mc_rldram_mrs);
+ val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
+ val64 = readq(&bar0->mc_rldram_mrs);
+
+ msleep(100); /* Delay by around 100 ms. */
+
+ /* Enabling ECC Protection. */
+ val64 = readq(&bar0->adapter_control);
+ val64 &= ~ADAPTER_ECC_EN;
+ writeq(val64, &bar0->adapter_control);
+
+ /*
+ * Clearing any possible Link state change interrupts that
+ * could have popped up just before Enabling the card.
+ */
+ val64 = readq(&bar0->mac_rmac_err_reg);
+ if (val64)
+ writeq(val64, &bar0->mac_rmac_err_reg);
+
+ /*
+ * Verify if the device is ready to be enabled, if so enable
+ * it.
+ */
+ val64 = readq(&bar0->adapter_status);
+ if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
+ DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
+ DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
+ (unsigned long long) val64);
+ return FAILURE;
+ }
+
+ /* Enable select interrupts */
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
+ RX_MAC_INTR;
+ en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
+
+ /*
+ * With some switches, link might be already up at this point.
+ * Because of this weird behavior, when we enable laser,
+ * we may not get link. We need to handle this. We cannot
+ * figure out which switch is misbehaving. So we are forced to
+ * make a global change.
+ */
+
+ /* Enabling Laser. */
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_EOI_TX_ON;
+ writeq(val64, &bar0->adapter_control);
+
+ /* SXE-002: Initialize link and activity LED */
+ subid = nic->pdev->subsystem_device;
+ if ((subid & 0xFF) >= 0x07) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= 0x0000800000000000ULL;
+ writeq(val64, &bar0->gpio_control);
+ val64 = 0x0411040400000000ULL;
+ writeq(val64, (void __iomem *) bar0 + 0x2700);
+ }
+
+ /*
+ * Don't see link state interrupts on certain switches, so
+ * directly scheduling a link state task from here.
+ */
+ schedule_work(&nic->set_link_task);
+
+ /*
+ * Here we are performing soft reset on XGXS to
+ * force link down. Since link is already up, we will get
+ * link state change interrupt after this reset
+ */
+ SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ udelay(50);
+ SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ udelay(50);
+ SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ udelay(50);
+
+ return SUCCESS;
+}
+
+/**
+ * free_tx_buffers - Free all queued Tx buffers
+ * @nic : device private variable.
+ * Description:
+ * Free all queued Tx buffers.
+ * Return Value: void
+*/
+
+static void free_tx_buffers(struct s2io_nic *nic)
+{
+ struct net_device *dev = nic->dev;
+ struct sk_buff *skb;
+ TxD_t *txdp;
+ int i, j;
+ mac_info_t *mac_control;
+ struct config_param *config;
+ int cnt = 0;
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
+ txdp = (TxD_t *) nic->list_info[i][j].
+ list_virt_addr;
+ skb =
+ (struct sk_buff *) ((unsigned long) txdp->
+ Host_Control);
+ if (skb == NULL) {
+ memset(txdp, 0, sizeof(TxD_t));
+ continue;
+ }
+ dev_kfree_skb(skb);
+ memset(txdp, 0, sizeof(TxD_t));
+ cnt++;
+ }
+ DBG_PRINT(INTR_DBG,
+ "%s:forcibly freeing %d skbs on FIFO%d\n",
+ dev->name, cnt, i);
+ mac_control->tx_curr_get_info[i].offset = 0;
+ mac_control->tx_curr_put_info[i].offset = 0;
+ }
+}
+
+/**
+ * stop_nic - To stop the nic
+ * @nic ; device private variable.
+ * Description:
+ * This function does exactly the opposite of what the start_nic()
+ * function does. This function is called to stop the device.
+ * Return Value:
+ * void.
+ */
+
+static void stop_nic(struct s2io_nic *nic)
+{
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0;
+ u16 interruptible, i;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ /* Disable all interrupts */
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
+ RX_MAC_INTR;
+ en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
+
+ /* Disable PRCs */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ val64 = readq(&bar0->prc_ctrl_n[i]);
+ val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
+ writeq(val64, &bar0->prc_ctrl_n[i]);
+ }
+}
+
+/**
+ * fill_rx_buffers - Allocates the Rx side skbs
+ * @nic: device private variable
+ * @ring_no: ring number
+ * Description:
+ * The function allocates Rx side skbs and puts the physical
+ * address of these buffers into the RxD buffer pointers, so that the NIC
+ * can DMA the received frame into these locations.
+ * The NIC supports 3 receive modes, viz
+ * 1. single buffer,
+ * 2. three buffer and
+ * 3. Five buffer modes.
+ * Each mode defines how many fragments the received frame will be split
+ * up into by the NIC. The frame is split into L3 header, L4 Header,
+ * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
+ * is split into 3 fragments. As of now only single buffer mode is
+ * supported.
+ * Return Value:
+ * SUCCESS on success or an appropriate -ve value on failure.
+ */
+
+static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
+{
+ struct net_device *dev = nic->dev;
+ struct sk_buff *skb;
+ RxD_t *rxdp;
+ int off, off1, size, block_no, block_no1;
+ int offset, offset1;
+ u32 alloc_tab = 0;
+ u32 alloc_cnt = nic->pkt_cnt[ring_no] -
+ atomic_read(&nic->rx_bufs_left[ring_no]);
+ mac_info_t *mac_control;
+ struct config_param *config;
+#ifdef CONFIG_2BUFF_MODE
+ RxD_t *rxdpnext;
+ int nextblk;
+ unsigned long tmp;
+ buffAdd_t *ba;
+ dma_addr_t rxdpphys;
+#endif
+#ifndef CONFIG_S2IO_NAPI
+ unsigned long flags;
+#endif
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
+
+ while (alloc_tab < alloc_cnt) {
+ block_no = mac_control->rx_curr_put_info[ring_no].
+ block_index;
+ block_no1 = mac_control->rx_curr_get_info[ring_no].
+ block_index;
+ off = mac_control->rx_curr_put_info[ring_no].offset;
+ off1 = mac_control->rx_curr_get_info[ring_no].offset;
+#ifndef CONFIG_2BUFF_MODE
+ offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
+ offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
+#else
+ offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
+ offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
+#endif
+
+ rxdp = nic->rx_blocks[ring_no][block_no].
+ block_virt_addr + off;
+ if ((offset == offset1) && (rxdp->Host_Control)) {
+ DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
+ DBG_PRINT(INTR_DBG, " info equated\n");
+ goto end;
+ }
+#ifndef CONFIG_2BUFF_MODE
+ if (rxdp->Control_1 == END_OF_BLOCK) {
+ mac_control->rx_curr_put_info[ring_no].
+ block_index++;
+ mac_control->rx_curr_put_info[ring_no].
+ block_index %= nic->block_count[ring_no];
+ block_no = mac_control->rx_curr_put_info
+ [ring_no].block_index;
+ off++;
+ off %= (MAX_RXDS_PER_BLOCK + 1);
+ mac_control->rx_curr_put_info[ring_no].offset =
+ off;
+ rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
+ DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
+ dev->name, rxdp);
+ }
+#ifndef CONFIG_S2IO_NAPI
+ spin_lock_irqsave(&nic->put_lock, flags);
+ nic->put_pos[ring_no] =
+ (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
+ spin_unlock_irqrestore(&nic->put_lock, flags);
+#endif
+#else
+ if (rxdp->Host_Control == END_OF_BLOCK) {
+ mac_control->rx_curr_put_info[ring_no].
+ block_index++;
+ mac_control->rx_curr_put_info[ring_no].
+ block_index %= nic->block_count[ring_no];
+ block_no = mac_control->rx_curr_put_info
+ [ring_no].block_index;
+ off = 0;
+ DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
+ dev->name, block_no,
+ (unsigned long long) rxdp->Control_1);
+ mac_control->rx_curr_put_info[ring_no].offset =
+ off;
+ rxdp = nic->rx_blocks[ring_no][block_no].
+ block_virt_addr;
+ }
+#ifndef CONFIG_S2IO_NAPI
+ spin_lock_irqsave(&nic->put_lock, flags);
+ nic->put_pos[ring_no] = (block_no *
+ (MAX_RXDS_PER_BLOCK + 1)) + off;
+ spin_unlock_irqrestore(&nic->put_lock, flags);
+#endif
+#endif
+
+#ifndef CONFIG_2BUFF_MODE
+ if (rxdp->Control_1 & RXD_OWN_XENA)
+#else
+ if (rxdp->Control_2 & BIT(0))
+#endif
+ {
+ mac_control->rx_curr_put_info[ring_no].
+ offset = off;
+ goto end;
+ }
+#ifdef CONFIG_2BUFF_MODE
+ /*
+ * RxDs Spanning cache lines will be replenished only
+ * if the succeeding RxD is also owned by Host. It
+ * will always be the ((8*i)+3) and ((8*i)+6)
+ * descriptors for the 48 byte descriptor. The offending
+ * decsriptor is of-course the 3rd descriptor.
+ */
+ rxdpphys = nic->rx_blocks[ring_no][block_no].
+ block_dma_addr + (off * sizeof(RxD_t));
+ if (((u64) (rxdpphys)) % 128 > 80) {
+ rxdpnext = nic->rx_blocks[ring_no][block_no].
+ block_virt_addr + (off + 1);
+ if (rxdpnext->Host_Control == END_OF_BLOCK) {
+ nextblk = (block_no + 1) %
+ (nic->block_count[ring_no]);
+ rxdpnext = nic->rx_blocks[ring_no]
+ [nextblk].block_virt_addr;
+ }
+ if (rxdpnext->Control_2 & BIT(0))
+ goto end;
+ }
+#endif
+
+#ifndef CONFIG_2BUFF_MODE
+ skb = dev_alloc_skb(size + NET_IP_ALIGN);
+#else
+ skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
+#endif
+ if (!skb) {
+ DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
+ DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
+ return -ENOMEM;
+ }
+#ifndef CONFIG_2BUFF_MODE
+ skb_reserve(skb, NET_IP_ALIGN);
+ memset(rxdp, 0, sizeof(RxD_t));
+ rxdp->Buffer0_ptr = pci_map_single
+ (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
+ rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
+ rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
+ rxdp->Host_Control = (unsigned long) (skb);
+ rxdp->Control_1 |= RXD_OWN_XENA;
+ off++;
+ off %= (MAX_RXDS_PER_BLOCK + 1);
+ mac_control->rx_curr_put_info[ring_no].offset = off;
+#else
+ ba = &nic->ba[ring_no][block_no][off];
+ skb_reserve(skb, BUF0_LEN);
+ tmp = (unsigned long) skb->data;
+ tmp += ALIGN_SIZE;
+ tmp &= ~ALIGN_SIZE;
+ skb->data = (void *) tmp;
+ skb->tail = (void *) tmp;
+
+ memset(rxdp, 0, sizeof(RxD_t));
+ rxdp->Buffer2_ptr = pci_map_single
+ (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Buffer0_ptr =
+ pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Buffer1_ptr =
+ pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+
+ rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
+ rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
+ rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
+ rxdp->Host_Control = (u64) ((unsigned long) (skb));
+ rxdp->Control_1 |= RXD_OWN_XENA;
+ off++;
+ mac_control->rx_curr_put_info[ring_no].offset = off;
+#endif
+ atomic_inc(&nic->rx_bufs_left[ring_no]);
+ alloc_tab++;
+ }
+
+ end:
+ return SUCCESS;
+}
+
+/**
+ * free_rx_buffers - Frees all Rx buffers
+ * @sp: device private variable.
+ * Description:
+ * This function will free all Rx buffers allocated by host.
+ * Return Value:
+ * NONE.
+ */
+
+static void free_rx_buffers(struct s2io_nic *sp)
+{
+ struct net_device *dev = sp->dev;
+ int i, j, blk = 0, off, buf_cnt = 0;
+ RxD_t *rxdp;
+ struct sk_buff *skb;
+ mac_info_t *mac_control;
+ struct config_param *config;
+#ifdef CONFIG_2BUFF_MODE
+ buffAdd_t *ba;
+#endif
+
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
+ off = j % (MAX_RXDS_PER_BLOCK + 1);
+ rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
+
+#ifndef CONFIG_2BUFF_MODE
+ if (rxdp->Control_1 == END_OF_BLOCK) {
+ rxdp =
+ (RxD_t *) ((unsigned long) rxdp->
+ Control_2);
+ j++;
+ blk++;
+ }
+#else
+ if (rxdp->Host_Control == END_OF_BLOCK) {
+ blk++;
+ continue;
+ }
+#endif
+
+ if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
+ memset(rxdp, 0, sizeof(RxD_t));
+ continue;
+ }
+
+ skb =
+ (struct sk_buff *) ((unsigned long) rxdp->
+ Host_Control);
+ if (skb) {
+#ifndef CONFIG_2BUFF_MODE
+ pci_unmap_single(sp->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ dev->mtu +
+ HEADER_ETHERNET_II_802_3_SIZE
+ + HEADER_802_2_SIZE +
+ HEADER_SNAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+#else
+ ba = &sp->ba[i][blk][off];
+ pci_unmap_single(sp->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_single(sp->pdev, (dma_addr_t)
+ rxdp->Buffer1_ptr,
+ BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_single(sp->pdev, (dma_addr_t)
+ rxdp->Buffer2_ptr,
+ dev->mtu + BUF0_LEN + 4,
+ PCI_DMA_FROMDEVICE);
+#endif
+ dev_kfree_skb(skb);
+ atomic_dec(&sp->rx_bufs_left[i]);
+ buf_cnt++;
+ }
+ memset(rxdp, 0, sizeof(RxD_t));
+ }
+ mac_control->rx_curr_put_info[i].block_index = 0;
+ mac_control->rx_curr_get_info[i].block_index = 0;
+ mac_control->rx_curr_put_info[i].offset = 0;
+ mac_control->rx_curr_get_info[i].offset = 0;
+ atomic_set(&sp->rx_bufs_left[i], 0);
+ DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
+ dev->name, buf_cnt, i);
+ }
+}
+
+/**
+ * s2io_poll - Rx interrupt handler for NAPI support
+ * @dev : pointer to the device structure.
+ * @budget : The number of packets that were budgeted to be processed
+ * during one pass through the 'Poll" function.
+ * Description:
+ * Comes into picture only if NAPI support has been incorporated. It does
+ * the same thing that rx_intr_handler does, but not in a interrupt context
+ * also It will process only a given number of packets.
+ * Return value:
+ * 0 on success and 1 if there are No Rx packets to be processed.
+ */
+
+#ifdef CONFIG_S2IO_NAPI
+static int s2io_poll(struct net_device *dev, int *budget)
+{
+ nic_t *nic = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ int pkts_to_process = *budget, pkt_cnt = 0;
+ register u64 val64 = 0;
+ rx_curr_get_info_t get_info, put_info;
+ int i, get_block, put_block, get_offset, put_offset, ring_bufs;
+#ifndef CONFIG_2BUFF_MODE
+ u16 val16, cksum;
+#endif
+ struct sk_buff *skb;
+ RxD_t *rxdp;
+ mac_info_t *mac_control;
+ struct config_param *config;
+#ifdef CONFIG_2BUFF_MODE
+ buffAdd_t *ba;
+#endif
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ if (pkts_to_process > dev->quota)
+ pkts_to_process = dev->quota;
+
+ val64 = readq(&bar0->rx_traffic_int);
+ writeq(val64, &bar0->rx_traffic_int);
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ get_info = mac_control->rx_curr_get_info[i];
+ get_block = get_info.block_index;
+ put_info = mac_control->rx_curr_put_info[i];
+ put_block = put_info.block_index;
+ ring_bufs = config->rx_cfg[i].num_rxd;
+ rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
+ get_info.offset;
+#ifndef CONFIG_2BUFF_MODE
+ get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ put_info.offset;
+ while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
+ (((get_offset + 1) % ring_bufs) != put_offset)) {
+ if (--pkts_to_process < 0) {
+ goto no_rx;
+ }
+ if (rxdp->Control_1 == END_OF_BLOCK) {
+ rxdp =
+ (RxD_t *) ((unsigned long) rxdp->
+ Control_2);
+ get_info.offset++;
+ get_info.offset %=
+ (MAX_RXDS_PER_BLOCK + 1);
+ get_block++;
+ get_block %= nic->block_count[i];
+ mac_control->rx_curr_get_info[i].
+ offset = get_info.offset;
+ mac_control->rx_curr_get_info[i].
+ block_index = get_block;
+ continue;
+ }
+ get_offset =
+ (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ skb =
+ (struct sk_buff *) ((unsigned long) rxdp->
+ Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: The skb is ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
+ goto no_rx;
+ }
+ val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
+ val16 = (u16) (val64 >> 48);
+ cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ dev->mtu +
+ HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE +
+ HEADER_SNAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rx_osm_handler(nic, val16, rxdp, i);
+ pkt_cnt++;
+ get_info.offset++;
+ get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
+ rxdp =
+ nic->rx_blocks[i][get_block].block_virt_addr +
+ get_info.offset;
+ mac_control->rx_curr_get_info[i].offset =
+ get_info.offset;
+ }
+#else
+ get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ put_info.offset;
+ while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
+ !(rxdp->Control_2 & BIT(0))) &&
+ (((get_offset + 1) % ring_bufs) != put_offset)) {
+ if (--pkts_to_process < 0) {
+ goto no_rx;
+ }
+ skb = (struct sk_buff *) ((unsigned long)
+ rxdp->Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: The skb is ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
+ goto no_rx;
+ }
+
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ BUF0_LEN, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer1_ptr,
+ BUF1_LEN, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer2_ptr,
+ dev->mtu + BUF0_LEN + 4,
+ PCI_DMA_FROMDEVICE);
+ ba = &nic->ba[i][get_block][get_info.offset];
+
+ rx_osm_handler(nic, rxdp, i, ba);
+
+ get_info.offset++;
+ mac_control->rx_curr_get_info[i].offset =
+ get_info.offset;
+ rxdp =
+ nic->rx_blocks[i][get_block].block_virt_addr +
+ get_info.offset;
+
+ if (get_info.offset &&
+ (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
+ get_info.offset = 0;
+ mac_control->rx_curr_get_info[i].
+ offset = get_info.offset;
+ get_block++;
+ get_block %= nic->block_count[i];
+ mac_control->rx_curr_get_info[i].
+ block_index = get_block;
+ rxdp =
+ nic->rx_blocks[i][get_block].
+ block_virt_addr;
+ }
+ get_offset =
+ (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ pkt_cnt++;
+ }
+#endif
+ }
+ if (!pkt_cnt)
+ pkt_cnt = 1;
+
+ dev->quota -= pkt_cnt;
+ *budget -= pkt_cnt;
+ netif_rx_complete(dev);
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ if (fill_rx_buffers(nic, i) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
+ DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
+ break;
+ }
+ }
+ /* Re enable the Rx interrupts. */
+ en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
+ return 0;
+
+ no_rx:
+ dev->quota -= pkt_cnt;
+ *budget -= pkt_cnt;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ if (fill_rx_buffers(nic, i) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
+ DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
+ break;
+ }
+ }
+ return 1;
+}
+#else
+/**
+ * rx_intr_handler - Rx interrupt handler
+ * @nic: device private variable.
+ * Description:
+ * If the interrupt is because of a received frame or if the
+ * receive ring contains fresh as yet un-processed frames,this function is
+ * called. It picks out the RxD at which place the last Rx processing had
+ * stopped and sends the skb to the OSM's Rx handler and then increments
+ * the offset.
+ * Return Value:
+ * NONE.
+ */
+
+static void rx_intr_handler(struct s2io_nic *nic)
+{
+ struct net_device *dev = (struct net_device *) nic->dev;
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ rx_curr_get_info_t get_info, put_info;
+ RxD_t *rxdp;
+ struct sk_buff *skb;
+#ifndef CONFIG_2BUFF_MODE
+ u16 val16, cksum;
+#endif
+ register u64 val64 = 0;
+ int get_block, get_offset, put_block, put_offset, ring_bufs;
+ int i, pkt_cnt = 0;
+ mac_info_t *mac_control;
+ struct config_param *config;
+#ifdef CONFIG_2BUFF_MODE
+ buffAdd_t *ba;
+#endif
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ /*
+ * rx_traffic_int reg is an R1 register, hence we read and write back
+ * the samevalue in the register to clear it.
+ */
+ val64 = readq(&bar0->rx_traffic_int);
+ writeq(val64, &bar0->rx_traffic_int);
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ get_info = mac_control->rx_curr_get_info[i];
+ get_block = get_info.block_index;
+ put_info = mac_control->rx_curr_put_info[i];
+ put_block = put_info.block_index;
+ ring_bufs = config->rx_cfg[i].num_rxd;
+ rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
+ get_info.offset;
+#ifndef CONFIG_2BUFF_MODE
+ get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ spin_lock(&nic->put_lock);
+ put_offset = nic->put_pos[i];
+ spin_unlock(&nic->put_lock);
+ while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
+ (((get_offset + 1) % ring_bufs) != put_offset)) {
+ if (rxdp->Control_1 == END_OF_BLOCK) {
+ rxdp = (RxD_t *) ((unsigned long)
+ rxdp->Control_2);
+ get_info.offset++;
+ get_info.offset %=
+ (MAX_RXDS_PER_BLOCK + 1);
+ get_block++;
+ get_block %= nic->block_count[i];
+ mac_control->rx_curr_get_info[i].
+ offset = get_info.offset;
+ mac_control->rx_curr_get_info[i].
+ block_index = get_block;
+ continue;
+ }
+ get_offset =
+ (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ skb = (struct sk_buff *) ((unsigned long)
+ rxdp->Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: The skb is ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
+ return;
+ }
+ val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
+ val16 = (u16) (val64 >> 48);
+ cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ dev->mtu +
+ HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE +
+ HEADER_SNAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rx_osm_handler(nic, val16, rxdp, i);
+ get_info.offset++;
+ get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
+ rxdp =
+ nic->rx_blocks[i][get_block].block_virt_addr +
+ get_info.offset;
+ mac_control->rx_curr_get_info[i].offset =
+ get_info.offset;
+ pkt_cnt++;
+ if ((indicate_max_pkts)
+ && (pkt_cnt > indicate_max_pkts))
+ break;
+ }
+#else
+ get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ spin_lock(&nic->put_lock);
+ put_offset = nic->put_pos[i];
+ spin_unlock(&nic->put_lock);
+ while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
+ !(rxdp->Control_2 & BIT(0))) &&
+ (((get_offset + 1) % ring_bufs) != put_offset)) {
+ skb = (struct sk_buff *) ((unsigned long)
+ rxdp->Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: The skb is ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
+ return;
+ }
+
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ BUF0_LEN, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer1_ptr,
+ BUF1_LEN, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer2_ptr,
+ dev->mtu + BUF0_LEN + 4,
+ PCI_DMA_FROMDEVICE);
+ ba = &nic->ba[i][get_block][get_info.offset];
+
+ rx_osm_handler(nic, rxdp, i, ba);
+
+ get_info.offset++;
+ mac_control->rx_curr_get_info[i].offset =
+ get_info.offset;
+ rxdp =
+ nic->rx_blocks[i][get_block].block_virt_addr +
+ get_info.offset;
+
+ if (get_info.offset &&
+ (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
+ get_info.offset = 0;
+ mac_control->rx_curr_get_info[i].
+ offset = get_info.offset;
+ get_block++;
+ get_block %= nic->block_count[i];
+ mac_control->rx_curr_get_info[i].
+ block_index = get_block;
+ rxdp =
+ nic->rx_blocks[i][get_block].
+ block_virt_addr;
+ }
+ get_offset =
+ (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+ pkt_cnt++;
+ if ((indicate_max_pkts)
+ && (pkt_cnt > indicate_max_pkts))
+ break;
+ }
+#endif
+ if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
+ break;
+ }
+}
+#endif
+/**
+ * tx_intr_handler - Transmit interrupt handler
+ * @nic : device private variable
+ * Description:
+ * If an interrupt was raised to indicate DMA complete of the
+ * Tx packet, this function is called. It identifies the last TxD
+ * whose buffer was freed and frees all skbs whose data have already
+ * DMA'ed into the NICs internal memory.
+ * Return Value:
+ * NONE
+ */
+
+static void tx_intr_handler(struct s2io_nic *nic)
+{
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ struct net_device *dev = (struct net_device *) nic->dev;
+ tx_curr_get_info_t get_info, put_info;
+ struct sk_buff *skb;
+ TxD_t *txdlp;
+ register u64 val64 = 0;
+ int i;
+ u16 j, frg_cnt;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ mac_control = &nic->mac_control;
+ config = &nic->config;
+
+ /*
+ * tx_traffic_int reg is an R1 register, hence we read and write
+ * back the samevalue in the register to clear it.
+ */
+ val64 = readq(&bar0->tx_traffic_int);
+ writeq(val64, &bar0->tx_traffic_int);
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ get_info = mac_control->tx_curr_get_info[i];
+ put_info = mac_control->tx_curr_put_info[i];
+ txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
+ list_virt_addr;
+ while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
+ (get_info.offset != put_info.offset) &&
+ (txdlp->Host_Control)) {
+ /* Check for TxD errors */
+ if (txdlp->Control_1 & TXD_T_CODE) {
+ unsigned long long err;
+ err = txdlp->Control_1 & TXD_T_CODE;
+ DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
+ err);
+ }
+
+ skb = (struct sk_buff *) ((unsigned long)
+ txdlp->Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: Null skb ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
+ return;
+ }
+ nic->tx_pkt_count++;
+
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+
+ /* For unfragmented skb */
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ txdlp->Buffer_Pointer,
+ skb->len - skb->data_len,
+ PCI_DMA_TODEVICE);
+ if (frg_cnt) {
+ TxD_t *temp = txdlp;
+ txdlp++;
+ for (j = 0; j < frg_cnt; j++, txdlp++) {
+ skb_frag_t *frag =
+ &skb_shinfo(skb)->frags[j];
+ pci_unmap_page(nic->pdev,
+ (dma_addr_t)
+ txdlp->
+ Buffer_Pointer,
+ frag->size,
+ PCI_DMA_TODEVICE);
+ }
+ txdlp = temp;
+ }
+ memset(txdlp, 0,
+ (sizeof(TxD_t) * config->max_txds));
+
+ /* Updating the statistics block */
+ nic->stats.tx_packets++;
+ nic->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+
+ get_info.offset++;
+ get_info.offset %= get_info.fifo_len + 1;
+ txdlp = (TxD_t *) nic->list_info[i]
+ [get_info.offset].list_virt_addr;
+ mac_control->tx_curr_get_info[i].offset =
+ get_info.offset;
+ }
+ }
+
+ spin_lock(&nic->tx_lock);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ spin_unlock(&nic->tx_lock);
+}
+
+/**
+ * alarm_intr_handler - Alarm Interrrupt handler
+ * @nic: device private variable
+ * Description: If the interrupt was neither because of Rx packet or Tx
+ * complete, this function is called. If the interrupt was to indicate
+ * a loss of link, the OSM link status handler is invoked for any other
+ * alarm interrupt the block that raised the interrupt is displayed
+ * and a H/W reset is issued.
+ * Return Value:
+ * NONE
+*/
+
+static void alarm_intr_handler(struct s2io_nic *nic)
+{
+ struct net_device *dev = (struct net_device *) nic->dev;
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0, err_reg = 0;
+
+ /* Handling link status change error Intr */
+ err_reg = readq(&bar0->mac_rmac_err_reg);
+ writeq(err_reg, &bar0->mac_rmac_err_reg);
+ if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
+ schedule_work(&nic->set_link_task);
+ }
+
+ /* In case of a serious error, the device will be Reset. */
+ val64 = readq(&bar0->serr_source);
+ if (val64 & SERR_SOURCE_ANY) {
+ DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
+ DBG_PRINT(ERR_DBG, "serious error!!\n");
+ netif_stop_queue(dev);
+ schedule_work(&nic->rst_timer_task);
+ }
+
+ /*
+ * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
+ * Error occurs, the adapter will be recycled by disabling the
+ * adapter enable bit and enabling it again after the device
+ * becomes Quiescent.
+ */
+ val64 = readq(&bar0->pcc_err_reg);
+ writeq(val64, &bar0->pcc_err_reg);
+ if (val64 & PCC_FB_ECC_DB_ERR) {
+ u64 ac = readq(&bar0->adapter_control);
+ ac &= ~(ADAPTER_CNTL_EN);
+ writeq(ac, &bar0->adapter_control);
+ ac = readq(&bar0->adapter_control);
+ schedule_work(&nic->set_link_task);
+ }
+
+ /* Other type of interrupts are not being handled now, TODO */
+}
+
+/**
+ * wait_for_cmd_complete - waits for a command to complete.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * Description: Function that waits for a command to Write into RMAC
+ * ADDR DATA registers to be completed and returns either success or
+ * error depending on whether the command was complete or not.
+ * Return value:
+ * SUCCESS on success and FAILURE on failure.
+ */
+
+static int wait_for_cmd_complete(nic_t * sp)
+{
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ int ret = FAILURE, cnt = 0;
+ u64 val64;
+
+ while (TRUE) {
+ val64 = readq(&bar0->rmac_addr_cmd_mem);
+ if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
+ ret = SUCCESS;
+ break;
+ }
+ msleep(50);
+ if (cnt++ > 10)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * s2io_reset - Resets the card.
+ * @sp : private member of the device structure.
+ * Description: Function to Reset the card. This function then also
+ * restores the previously saved PCI configuration space registers as
+ * the card reset also resets the configuration space.
+ * Return value:
+ * void.
+ */
+
+static void s2io_reset(nic_t * sp)
+{
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64;
+ u16 subid;
+
+ val64 = SW_RESET_ALL;
+ writeq(val64, &bar0->sw_reset);
+
+ /*
+ * At this stage, if the PCI write is indeed completed, the
+ * card is reset and so is the PCI Config space of the device.
+ * So a read cannot be issued at this stage on any of the
+ * registers to ensure the write into "sw_reset" register
+ * has gone through.
+ * Question: Is there any system call that will explicitly force
+ * all the write commands still pending on the bus to be pushed
+ * through?
+ * As of now I'am just giving a 250ms delay and hoping that the
+ * PCI write to sw_reset register is done by this time.
+ */
+ msleep(250);
+
+ /* Restore the PCI state saved during initializarion. */
+ pci_restore_state(sp->pdev);
+ s2io_init_pci(sp);
+
+ msleep(250);
+
+ /* SXE-002: Configure link and activity LED to turn it off */
+ subid = sp->pdev->subsystem_device;
+ if ((subid & 0xFF) >= 0x07) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= 0x0000800000000000ULL;
+ writeq(val64, &bar0->gpio_control);
+ val64 = 0x0411040400000000ULL;
+ writeq(val64, (void __iomem *) bar0 + 0x2700);
+ }
+
+ sp->device_enabled_once = FALSE;
+}
+
+/**
+ * s2io_set_swapper - to set the swapper controle on the card
+ * @sp : private member of the device structure,
+ * pointer to the s2io_nic structure.
+ * Description: Function to set the swapper control on the card
+ * correctly depending on the 'endianness' of the system.
+ * Return value:
+ * SUCCESS on success and FAILURE on failure.
+ */
+
+static int s2io_set_swapper(nic_t * sp)
+{
+ struct net_device *dev = sp->dev;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64, valt, valr;
+
+ /*
+ * Set proper endian settings and verify the same by reading
+ * the PIF Feed-back register.
+ */
+
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 != 0x0123456789ABCDEFULL) {
+ int i = 0;
+ u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
+ 0x8100008181000081ULL, /* FE=1, SE=0 */
+ 0x4200004242000042ULL, /* FE=0, SE=1 */
+ 0}; /* FE=0, SE=0 */
+
+ while(i<4) {
+ writeq(value[i], &bar0->swapper_ctrl);
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 == 0x0123456789ABCDEFULL)
+ break;
+ i++;
+ }
+ if (i == 4) {
+ DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "feedback read %llx\n",
+ (unsigned long long) val64);
+ return FAILURE;
+ }
+ valr = value[i];
+ } else {
+ valr = readq(&bar0->swapper_ctrl);
+ }
+
+ valt = 0x0123456789ABCDEFULL;
+ writeq(valt, &bar0->xmsi_address);
+ val64 = readq(&bar0->xmsi_address);
+
+ if(val64 != valt) {
+ int i = 0;
+ u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
+ 0x0081810000818100ULL, /* FE=1, SE=0 */
+ 0x0042420000424200ULL, /* FE=0, SE=1 */
+ 0}; /* FE=0, SE=0 */
+
+ while(i<4) {
+ writeq((value[i] | valr), &bar0->swapper_ctrl);
+ writeq(valt, &bar0->xmsi_address);
+ val64 = readq(&bar0->xmsi_address);
+ if(val64 == valt)
+ break;
+ i++;
+ }
+ if(i == 4) {
+ DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
+ DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
+ return FAILURE;
+ }
+ }
+ val64 = readq(&bar0->swapper_ctrl);
+ val64 &= 0xFFFF000000000000ULL;
+
+#ifdef __BIG_ENDIAN
+ /*
+ * The device by default set to a big endian format, so a
+ * big endian driver need not set anything.
+ */
+ val64 |= (SWAPPER_CTRL_TXP_FE |
+ SWAPPER_CTRL_TXP_SE |
+ SWAPPER_CTRL_TXD_R_FE |
+ SWAPPER_CTRL_TXD_W_FE |
+ SWAPPER_CTRL_TXF_R_FE |
+ SWAPPER_CTRL_RXD_R_FE |
+ SWAPPER_CTRL_RXD_W_FE |
+ SWAPPER_CTRL_RXF_W_FE |
+ SWAPPER_CTRL_XMSI_FE |
+ SWAPPER_CTRL_XMSI_SE |
+ SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
+ writeq(val64, &bar0->swapper_ctrl);
+#else
+ /*
+ * Initially we enable all bits to make it accessible by the
+ * driver, then we selectively enable only those bits that
+ * we want to set.
+ */
+ val64 |= (SWAPPER_CTRL_TXP_FE |
+ SWAPPER_CTRL_TXP_SE |
+ SWAPPER_CTRL_TXD_R_FE |
+ SWAPPER_CTRL_TXD_R_SE |
+ SWAPPER_CTRL_TXD_W_FE |
+ SWAPPER_CTRL_TXD_W_SE |
+ SWAPPER_CTRL_TXF_R_FE |
+ SWAPPER_CTRL_RXD_R_FE |
+ SWAPPER_CTRL_RXD_R_SE |
+ SWAPPER_CTRL_RXD_W_FE |
+ SWAPPER_CTRL_RXD_W_SE |
+ SWAPPER_CTRL_RXF_W_FE |
+ SWAPPER_CTRL_XMSI_FE |
+ SWAPPER_CTRL_XMSI_SE |
+ SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
+ writeq(val64, &bar0->swapper_ctrl);
+#endif
+ val64 = readq(&bar0->swapper_ctrl);
+
+ /*
+ * Verifying if endian settings are accurate by reading a
+ * feedback register.
+ */
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 != 0x0123456789ABCDEFULL) {
+ /* Endian settings are incorrect, calls for another dekko. */
+ DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "feedback read %llx\n",
+ (unsigned long long) val64);
+ return FAILURE;
+ }
+
+ return SUCCESS;
+}
+
+/* ********************************************************* *
+ * Functions defined below concern the OS part of the driver *
+ * ********************************************************* */
+
+/**
+ * s2io_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver. It mainly calls a
+ * function to allocate Rx buffers and inserts them into the buffer
+ * descriptors and then enables the Rx part of the NIC.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+
+static int s2io_open(struct net_device *dev)
+{
+ nic_t *sp = dev->priv;
+ int err = 0;
+
+ /*
+ * Make sure you have link off by default every time
+ * Nic is initialized
+ */
+ netif_carrier_off(dev);
+ sp->last_link_state = LINK_DOWN;
+
+ /* Initialize H/W and enable interrupts */
+ if (s2io_card_up(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
+ dev->name);
+ return -ENODEV;
+ }
+
+ /* After proper initialization of H/W, register ISR */
+ err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
+ sp->name, dev);
+ if (err) {
+ s2io_reset(sp);
+ DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+ dev->name);
+ return err;
+ }
+
+ if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
+ DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
+ s2io_reset(sp);
+ return -ENODEV;
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/**
+ * s2io_close -close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver. It needs to undo exactly
+ * whatever was done by the open entry point,thus it's usually referred to
+ * as the close function.Among other things this function mainly stops the
+ * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+
+static int s2io_close(struct net_device *dev)
+{
+ nic_t *sp = dev->priv;
+
+ flush_scheduled_work();
+ netif_stop_queue(dev);
+ /* Reset card, kill tasklet and free Tx and Rx buffers. */
+ s2io_card_down(sp);
+
+ free_irq(dev->irq, dev);
+ sp->device_close_flag = TRUE; /* Device is shut down. */
+ return 0;
+}
+
+/**
+ * s2io_xmit - Tx entry point of te driver
+ * @skb : the socket buffer containing the Tx data.
+ * @dev : device pointer.
+ * Description :
+ * This function is the Tx entry point of the driver. S2IO NIC supports
+ * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
+ * NOTE: when device cant queue the pkt,just the trans_start variable will
+ * not be upadted.
+ * Return value:
+ * 0 on success & 1 on failure.
+ */
+
+static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ nic_t *sp = dev->priv;
+ u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
+ register u64 val64;
+ TxD_t *txdp;
+ TxFIFO_element_t __iomem *tx_fifo;
+ unsigned long flags;
+#ifdef NETIF_F_TSO
+ int mss;
+#endif
+ mac_info_t *mac_control;
+ struct config_param *config;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
+ spin_lock_irqsave(&sp->tx_lock, flags);
+
+ if (atomic_read(&sp->card_state) == CARD_DOWN) {
+ DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
+ dev->name);
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+ return 1;
+ }
+
+ queue = 0;
+ put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
+ get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
+ txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
+
+ queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
+ /* Avoid "put" pointer going beyond "get" pointer */
+ if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
+ DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
+ netif_stop_queue(dev);
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+ return 0;
+ }
+#ifdef NETIF_F_TSO
+ mss = skb_shinfo(skb)->tso_size;
+ if (mss) {
+ txdp->Control_1 |= TXD_TCP_LSO_EN;
+ txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
+ }
+#endif
+
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ frg_len = skb->len - skb->data_len;
+
+ txdp->Host_Control = (unsigned long) skb;
+ txdp->Buffer_Pointer = pci_map_single
+ (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
+ if (skb->ip_summed == CHECKSUM_HW) {
+ txdp->Control_2 |=
+ (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
+ TXD_TX_CKO_UDP_EN);
+ }
+
+ txdp->Control_2 |= config->tx_intr_type;
+
+ txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
+ TXD_GATHER_CODE_FIRST);
+ txdp->Control_1 |= TXD_LIST_OWN_XENA;
+
+ /* For fragmented SKB. */
+ for (i = 0; i < frg_cnt; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ txdp++;
+ txdp->Buffer_Pointer = (u64) pci_map_page
+ (sp->pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
+ }
+ txdp->Control_1 |= TXD_GATHER_CODE_LAST;
+
+ tx_fifo = mac_control->tx_FIFO_start[queue];
+ val64 = sp->list_info[queue][put_off].list_phy_addr;
+ writeq(val64, &tx_fifo->TxDL_Pointer);
+
+ val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
+ TX_FIFO_LAST_LIST);
+#ifdef NETIF_F_TSO
+ if (mss)
+ val64 |= TX_FIFO_SPECIAL_FUNC;
+#endif
+ writeq(val64, &tx_fifo->List_Control);
+
+ /* Perform a PCI read to flush previous writes */
+ val64 = readq(&bar0->general_int_status);
+
+ put_off++;
+ put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
+ mac_control->tx_curr_put_info[queue].offset = put_off;
+
+ /* Avoid "put" pointer going beyond "get" pointer */
+ if (((put_off + 1) % queue_len) == get_off) {
+ DBG_PRINT(TX_DBG,
+ "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
+ put_off, get_off);
+ netif_stop_queue(dev);
+ }
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+
+ return 0;
+}
+
+/**
+ * s2io_isr - ISR handler of the device .
+ * @irq: the irq of the device.
+ * @dev_id: a void pointer to the dev structure of the NIC.
+ * @pt_regs: pointer to the registers pushed on the stack.
+ * Description: This function is the ISR handler of the device. It
+ * identifies the reason for the interrupt and calls the relevant
+ * service routines. As a contongency measure, this ISR allocates the
+ * recv buffers, if their numbers are below the panic value which is
+ * presently set to 25% of the original number of rcv buffers allocated.
+ * Return value:
+ * IRQ_HANDLED: will be returned if IRQ was handled by this routine
+ * IRQ_NONE: will be returned if interrupt is not from our device
+ */
+static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+#ifndef CONFIG_S2IO_NAPI
+ int i, ret;
+#endif
+ u64 reason = 0;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ /*
+ * Identify the cause for interrupt and call the appropriate
+ * interrupt handler. Causes for the interrupt could be;
+ * 1. Rx of packet.
+ * 2. Tx complete.
+ * 3. Link down.
+ * 4. Error in any functional blocks of the NIC.
+ */
+ reason = readq(&bar0->general_int_status);
+
+ if (!reason) {
+ /* The interrupt was not raised by Xena. */
+ return IRQ_NONE;
+ }
+
+ /* If Intr is because of Tx Traffic */
+ if (reason & GEN_INTR_TXTRAFFIC) {
+ tx_intr_handler(sp);
+ }
+
+ /* If Intr is because of an error */
+ if (reason & (GEN_ERROR_INTR))
+ alarm_intr_handler(sp);
+
+#ifdef CONFIG_S2IO_NAPI
+ if (reason & GEN_INTR_RXTRAFFIC) {
+ if (netif_rx_schedule_prep(dev)) {
+ en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
+ DISABLE_INTRS);
+ __netif_rx_schedule(dev);
+ }
+ }
+#else
+ /* If Intr is because of Rx Traffic */
+ if (reason & GEN_INTR_RXTRAFFIC) {
+ rx_intr_handler(sp);
+ }
+#endif
+
+ /*
+ * If the Rx buffer count is below the panic threshold then
+ * reallocate the buffers from the interrupt handler itself,
+ * else schedule a tasklet to reallocate the buffers.
+ */
+#ifndef CONFIG_S2IO_NAPI
+ for (i = 0; i < config->rx_ring_num; i++) {
+ int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
+ int level = rx_buffer_level(sp, rxb_size, i);
+
+ if ((level == PANIC) && (!TASKLET_IN_USE)) {
+ DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
+ DBG_PRINT(INTR_DBG, "PANIC levels\n");
+ if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s:Out of memory",
+ dev->name);
+ DBG_PRINT(ERR_DBG, " in ISR!!\n");
+ clear_bit(0, (&sp->tasklet_status));
+ return IRQ_HANDLED;
+ }
+ clear_bit(0, (&sp->tasklet_status));
+ } else if (level == LOW) {
+ tasklet_schedule(&sp->task);
+ }
+ }
+#endif
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * s2io_get_stats - Updates the device statistics structure.
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function updates the device statistics structure in the s2io_nic
+ * structure and returns a pointer to the same.
+ * Return value:
+ * pointer to the updated net_device_stats structure.
+ */
+
+static struct net_device_stats *s2io_get_stats(struct net_device *dev)
+{
+ nic_t *sp = dev->priv;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
+ sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
+ sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
+ sp->stats.rx_length_errors =
+ mac_control->stats_info->rmac_long_frms;
+
+ return (&sp->stats);
+}
+
+/**
+ * s2io_set_multicast - entry point for multicast address enable/disable.
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled. This also gets
+ * called to set/reset promiscuous mode. Depending on the deivce flag, we
+ * determine, if multicast address must be enabled or if promiscuous mode
+ * is to be disabled etc.
+ * Return value:
+ * void.
+ */
+
+static void s2io_set_multicast(struct net_device *dev)
+{
+ int i, j, prev_cnt;
+ struct dev_mc_list *mclist;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
+ 0xfeffffffffffULL;
+ u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
+ void __iomem *add;
+
+ if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
+ /* Enable all Multicast addresses */
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
+ &bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
+ &bar0->rmac_addr_data1_mem);
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+ /* Wait till command completes */
+ wait_for_cmd_complete(sp);
+
+ sp->m_cast_flg = 1;
+ sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
+ } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
+ /* Disable all Multicast addresses */
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
+ &bar0->rmac_addr_data0_mem);
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+ /* Wait till command completes */
+ wait_for_cmd_complete(sp);
+
+ sp->m_cast_flg = 0;
+ sp->all_multi_pos = 0;
+ }
+
+ if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
+ /* Put the NIC into promiscuous mode */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 |= MAC_CFG_RMAC_PROM_ENABLE;
+
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) val64, add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+
+ val64 = readq(&bar0->mac_cfg);
+ sp->promisc_flg = 1;
+ DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
+ dev->name);
+ } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
+ /* Remove the NIC from promiscuous mode */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
+
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) val64, add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+
+ val64 = readq(&bar0->mac_cfg);
+ sp->promisc_flg = 0;
+ DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
+ dev->name);
+ }
+
+ /* Update individual M_CAST address list */
+ if ((!sp->m_cast_flg) && dev->mc_count) {
+ if (dev->mc_count >
+ (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
+ DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "can be added, please enable ");
+ DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
+ return;
+ }
+
+ prev_cnt = sp->mc_addr_count;
+ sp->mc_addr_count = dev->mc_count;
+
+ /* Clear out the previous list of Mc in the H/W. */
+ for (i = 0; i < prev_cnt; i++) {
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
+ &bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
+ &bar0->rmac_addr_data1_mem);
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET
+ (MAC_MC_ADDR_START_OFFSET + i);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+
+ /* Wait for command completes */
+ if (wait_for_cmd_complete(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Adding ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "Multicasts failed\n");
+ return;
+ }
+ }
+
+ /* Create the new Rx filter list and update the same in H/W. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
+ ETH_ALEN);
+ for (j = 0; j < ETH_ALEN; j++) {
+ mac_addr |= mclist->dmi_addr[j];
+ mac_addr <<= 8;
+ }
+ mac_addr >>= 8;
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
+ &bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
+ &bar0->rmac_addr_data1_mem);
+
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET
+ (i + MAC_MC_ADDR_START_OFFSET);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+
+ /* Wait for command completes */
+ if (wait_for_cmd_complete(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Adding ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "Multicasts failed\n");
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * s2io_set_mac_addr - Programs the Xframe mac address
+ * @dev : pointer to the device structure.
+ * @addr: a uchar pointer to the new mac address which is to be set.
+ * Description : This procedure will program the Xframe to receive
+ * frames with new Mac Address
+ * Return value: SUCCESS on success and an appropriate (-)ve integer
+ * as defined in errno.h file on failure.
+ */
+
+int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
+{
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ register u64 val64, mac_addr = 0;
+ int i;
+
+ /*
+ * Set the new MAC address as the new unicast filter and reflect this
+ * change on the device address registered with the OS. It will be
+ * at offset 0.
+ */
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr <<= 8;
+ mac_addr |= addr[i];
+ }
+
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
+ &bar0->rmac_addr_data0_mem);
+
+ val64 =
+ RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(0);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+ /* Wait till command completes */
+ if (wait_for_cmd_complete(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
+ return FAILURE;
+ }
+
+ return SUCCESS;
+}
+
+/**
+ * s2io_ethtool_sset - Sets different link parameters.
+ * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @info: pointer to the structure with parameters given by ethtool to set
+ * link information.
+ * Description:
+ * The function sets different link parameters provided by the user onto
+ * the NIC.
+ * Return value:
+ * 0 on success.
+*/
+
+static int s2io_ethtool_sset(struct net_device *dev,
+ struct ethtool_cmd *info)
+{
+ nic_t *sp = dev->priv;
+ if ((info->autoneg == AUTONEG_ENABLE) ||
+ (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ else {
+ s2io_close(sp->dev);
+ s2io_open(sp->dev);
+ }
+
+ return 0;
+}
+
+/**
+ * s2io_ethtol_gset - Return link specific information.
+ * @sp : private member of the device structure, pointer to the
+ * s2io_nic structure.
+ * @info : pointer to the structure with parameters given by ethtool
+ * to return link information.
+ * Description:
+ * Returns link specific information like speed, duplex etc.. to ethtool.
+ * Return value :
+ * return 0 on success.
+ */
+
+static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
+{
+ nic_t *sp = dev->priv;
+ info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ info->port = PORT_FIBRE;
+ /* info->transceiver?? TODO */
+
+ if (netif_carrier_ok(sp->dev)) {
+ info->speed = 10000;
+ info->duplex = DUPLEX_FULL;
+ } else {
+ info->speed = -1;
+ info->duplex = -1;
+ }
+
+ info->autoneg = AUTONEG_DISABLE;
+ return 0;
+}
+
+/**
+ * s2io_ethtool_gdrvinfo - Returns driver specific information.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @info : pointer to the structure with parameters given by ethtool to
+ * return driver information.
+ * Description:
+ * Returns driver specefic information like name, version etc.. to ethtool.
+ * Return value:
+ * void
+ */
+
+static void s2io_ethtool_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ nic_t *sp = dev->priv;
+
+ strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
+ strncpy(info->version, s2io_driver_version,
+ sizeof(s2io_driver_version));
+ strncpy(info->fw_version, "", 32);
+ strncpy(info->bus_info, pci_name(sp->pdev), 32);
+ info->regdump_len = XENA_REG_SPACE;
+ info->eedump_len = XENA_EEPROM_SPACE;
+ info->testinfo_len = S2IO_TEST_LEN;
+ info->n_stats = S2IO_STAT_LEN;
+}
+
+/**
+ * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
+ * @sp: private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @regs : pointer to the structure with parameters given by ethtool for
+ * dumping the registers.
+ * @reg_space: The input argumnet into which all the registers are dumped.
+ * Description:
+ * Dumps the entire register space of xFrame NIC into the user given
+ * buffer area.
+ * Return value :
+ * void .
+*/
+
+static void s2io_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int i;
+ u64 reg;
+ u8 *reg_space = (u8 *) space;
+ nic_t *sp = dev->priv;
+
+ regs->len = XENA_REG_SPACE;
+ regs->version = sp->pdev->subsystem_device;
+
+ for (i = 0; i < regs->len; i += 8) {
+ reg = readq(sp->bar0 + i);
+ memcpy((reg_space + i), &reg, 8);
+ }
+}
+
+/**
+ * s2io_phy_id - timer function that alternates adapter LED.
+ * @data : address of the private member of the device structure, which
+ * is a pointer to the s2io_nic structure, provided as an u32.
+ * Description: This is actually the timer function that alternates the
+ * adapter LED bit of the adapter control bit to set/reset every time on
+ * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
+ * once every second.
+*/
+static void s2io_phy_id(unsigned long data)
+{
+ nic_t *sp = (nic_t *) data;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64 = 0;
+ u16 subid;
+
+ subid = sp->pdev->subsystem_device;
+ if ((subid & 0xFF) >= 0x07) {
+ val64 = readq(&bar0->gpio_control);
+ val64 ^= GPIO_CTRL_GPIO_0;
+ writeq(val64, &bar0->gpio_control);
+ } else {
+ val64 = readq(&bar0->adapter_control);
+ val64 ^= ADAPTER_LED_ON;
+ writeq(val64, &bar0->adapter_control);
+ }
+
+ mod_timer(&sp->id_timer, jiffies + HZ / 2);
+}
+
+/**
+ * s2io_ethtool_idnic - To physically identify the nic on the system.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @id : pointer to the structure with identification parameters given by
+ * ethtool.
+ * Description: Used to physically identify the NIC on the system.
+ * The Link LED will blink for a time specified by the user for
+ * identification.
+ * NOTE: The Link has to be Up to be able to blink the LED. Hence
+ * identification is possible only if it's link is up.
+ * Return value:
+ * int , returns 0 on success
+ */
+
+static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
+{
+ u64 val64 = 0, last_gpio_ctrl_val;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u16 subid;
+
+ subid = sp->pdev->subsystem_device;
+ last_gpio_ctrl_val = readq(&bar0->gpio_control);
+ if ((subid & 0xFF) < 0x07) {
+ val64 = readq(&bar0->adapter_control);
+ if (!(val64 & ADAPTER_CNTL_EN)) {
+ printk(KERN_ERR
+ "Adapter Link down, cannot blink LED\n");
+ return -EFAULT;
+ }
+ }
+ if (sp->id_timer.function == NULL) {
+ init_timer(&sp->id_timer);
+ sp->id_timer.function = s2io_phy_id;
+ sp->id_timer.data = (unsigned long) sp;
+ }
+ mod_timer(&sp->id_timer, jiffies);
+ if (data)
+ msleep(data * 1000);
+ else
+ msleep(0xFFFFFFFF);
+ del_timer_sync(&sp->id_timer);
+
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+ writeq(last_gpio_ctrl_val, &bar0->gpio_control);
+ last_gpio_ctrl_val = readq(&bar0->gpio_control);
+ }
+
+ return 0;
+}
+
+/**
+ * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
+ * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @ep : pointer to the structure with pause parameters given by ethtool.
+ * Description:
+ * Returns the Pause frame generation and reception capability of the NIC.
+ * Return value:
+ * void
+ */
+static void s2io_ethtool_getpause_data(struct net_device *dev,
+ struct ethtool_pauseparam *ep)
+{
+ u64 val64;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+
+ val64 = readq(&bar0->rmac_pause_cfg);
+ if (val64 & RMAC_PAUSE_GEN_ENABLE)
+ ep->tx_pause = TRUE;
+ if (val64 & RMAC_PAUSE_RX_ENABLE)
+ ep->rx_pause = TRUE;
+ ep->autoneg = FALSE;
+}
+
+/**
+ * s2io_ethtool_setpause_data - set/reset pause frame generation.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @ep : pointer to the structure with pause parameters given by ethtool.
+ * Description:
+ * It can be used to set or reset Pause frame generation or reception
+ * support of the NIC.
+ * Return value:
+ * int, returns 0 on Success
+ */
+
+static int s2io_ethtool_setpause_data(struct net_device *dev,
+ struct ethtool_pauseparam *ep)
+{
+ u64 val64;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+
+ val64 = readq(&bar0->rmac_pause_cfg);
+ if (ep->tx_pause)
+ val64 |= RMAC_PAUSE_GEN_ENABLE;
+ else
+ val64 &= ~RMAC_PAUSE_GEN_ENABLE;
+ if (ep->rx_pause)
+ val64 |= RMAC_PAUSE_RX_ENABLE;
+ else
+ val64 &= ~RMAC_PAUSE_RX_ENABLE;
+ writeq(val64, &bar0->rmac_pause_cfg);
+ return 0;
+}
+
+/**
+ * read_eeprom - reads 4 bytes of data from user given offset.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @off : offset at which the data must be written
+ * @data : Its an output parameter where the data read at the given
+ * offset is stored.
+ * Description:
+ * Will read 4 bytes of data from the user given offset and return the
+ * read data.
+ * NOTE: Will allow to read only part of the EEPROM visible through the
+ * I2C bus.
+ * Return value:
+ * -1 on failure and 0 on success.
+ */
+
+#define S2IO_DEV_ID 5
+static int read_eeprom(nic_t * sp, int off, u32 * data)
+{
+ int ret = -1;
+ u32 exit_cnt = 0;
+ u64 val64;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+
+ val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
+ I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
+ I2C_CONTROL_CNTL_START;
+ SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->i2c_control);
+ if (I2C_CONTROL_CNTL_END(val64)) {
+ *data = I2C_CONTROL_GET_DATA(val64);
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+
+ return ret;
+}
+
+/**
+ * write_eeprom - actually writes the relevant part of the data value.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @off : offset at which the data must be written
+ * @data : The data that is to be written
+ * @cnt : Number of bytes of the data that are actually to be written into
+ * the Eeprom. (max of 3)
+ * Description:
+ * Actually writes the relevant part of the data value into the Eeprom
+ * through the I2C bus.
+ * Return value:
+ * 0 on success, -1 on failure.
+ */
+
+static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
+{
+ int exit_cnt = 0, ret = -1;
+ u64 val64;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+
+ val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
+ I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
+ I2C_CONTROL_CNTL_START;
+ SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->i2c_control);
+ if (I2C_CONTROL_CNTL_END(val64)) {
+ if (!(val64 & I2C_CONTROL_NACK))
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+
+ return ret;
+}
+
+/**
+ * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
+ * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @eeprom : pointer to the user level structure provided by ethtool,
+ * containing all relevant information.
+ * @data_buf : user defined value to be written into Eeprom.
+ * Description: Reads the values stored in the Eeprom at given offset
+ * for a given length. Stores these values int the input argument data
+ * buffer 'data_buf' and returns these to the caller (ethtool.)
+ * Return value:
+ * int 0 on success
+ */
+
+static int s2io_ethtool_geeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 * data_buf)
+{
+ u32 data, i, valid;
+ nic_t *sp = dev->priv;
+
+ eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
+
+ if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
+ eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
+
+ for (i = 0; i < eeprom->len; i += 4) {
+ if (read_eeprom(sp, (eeprom->offset + i), &data)) {
+ DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
+ return -EFAULT;
+ }
+ valid = INV(data);
+ memcpy((data_buf + i), &valid, 4);
+ }
+ return 0;
+}
+
+/**
+ * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @eeprom : pointer to the user level structure provided by ethtool,
+ * containing all relevant information.
+ * @data_buf ; user defined value to be written into Eeprom.
+ * Description:
+ * Tries to write the user provided value in the Eeprom, at the offset
+ * given by the user.
+ * Return value:
+ * 0 on success, -EFAULT on failure.
+ */
+
+static int s2io_ethtool_seeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 * data_buf)
+{
+ int len = eeprom->len, cnt = 0;
+ u32 valid = 0, data;
+ nic_t *sp = dev->priv;
+
+ if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
+ DBG_PRINT(ERR_DBG,
+ "ETHTOOL_WRITE_EEPROM Err: Magic value ");
+ DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
+ eeprom->magic);
+ return -EFAULT;
+ }
+
+ while (len) {
+ data = (u32) data_buf[cnt] & 0x000000FF;
+ if (data) {
+ valid = (u32) (data << 24);
+ } else
+ valid = data;
+
+ if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
+ DBG_PRINT(ERR_DBG,
+ "ETHTOOL_WRITE_EEPROM Err: Cannot ");
+ DBG_PRINT(ERR_DBG,
+ "write into the specified offset\n");
+ return -EFAULT;
+ }
+ cnt++;
+ len--;
+ }
+
+ return 0;
+}
+
+/**
+ * s2io_register_test - reads and writes into all clock domains.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data : variable that returns the result of each of the test conducted b
+ * by the driver.
+ * Description:
+ * Read and write into all clock domains. The NIC has 3 clock domains,
+ * see that registers in all the three regions are accessible.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_register_test(nic_t * sp, uint64_t * data)
+{
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64 = 0;
+ int fail = 0;
+
+ val64 = readq(&bar0->pcc_enable);
+ if (val64 != 0xff00000000000000ULL) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
+ }
+
+ val64 = readq(&bar0->rmac_pause_cfg);
+ if (val64 != 0xc000ffff00000000ULL) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
+ }
+
+ val64 = readq(&bar0->rx_queue_cfg);
+ if (val64 != 0x0808080808080808ULL) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
+ }
+
+ val64 = readq(&bar0->xgxs_efifo_cfg);
+ if (val64 != 0x000000001923141EULL) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
+ }
+
+ val64 = 0x5A5A5A5A5A5A5A5AULL;
+ writeq(val64, &bar0->xmsi_data);
+ val64 = readq(&bar0->xmsi_data);
+ if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
+ fail = 1;
+ DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
+ }
+
+ val64 = 0xA5A5A5A5A5A5A5A5ULL;
+ writeq(val64, &bar0->xmsi_data);
+ val64 = readq(&bar0->xmsi_data);
+ if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
+ fail = 1;
+ DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
+ }
+
+ *data = fail;
+ return 0;
+}
+
+/**
+ * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data:variable that returns the result of each of the test conducted by
+ * the driver.
+ * Description:
+ * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
+ * register.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
+{
+ int fail = 0;
+ u32 ret_data;
+
+ /* Test Write Error at offset 0 */
+ if (!write_eeprom(sp, 0, 0, 3))
+ fail = 1;
+
+ /* Test Write at offset 4f0 */
+ if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
+ fail = 1;
+ if (read_eeprom(sp, 0x4F0, &ret_data))
+ fail = 1;
+
+ if (ret_data != 0x01234567)
+ fail = 1;
+
+ /* Reset the EEPROM data go FFFF */
+ write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
+
+ /* Test Write Request Error at offset 0x7c */
+ if (!write_eeprom(sp, 0x07C, 0, 3))
+ fail = 1;
+
+ /* Test Write Request at offset 0x7fc */
+ if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
+ fail = 1;
+ if (read_eeprom(sp, 0x7FC, &ret_data))
+ fail = 1;
+
+ if (ret_data != 0x01234567)
+ fail = 1;
+
+ /* Reset the EEPROM data go FFFF */
+ write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
+
+ /* Test Write Error at offset 0x80 */
+ if (!write_eeprom(sp, 0x080, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 0xfc */
+ if (!write_eeprom(sp, 0x0FC, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 0x100 */
+ if (!write_eeprom(sp, 0x100, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 4ec */
+ if (!write_eeprom(sp, 0x4EC, 0, 3))
+ fail = 1;
+
+ *data = fail;
+ return 0;
+}
+
+/**
+ * s2io_bist_test - invokes the MemBist test of the card .
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data:variable that returns the result of each of the test conducted by
+ * the driver.
+ * Description:
+ * This invokes the MemBist test of the card. We give around
+ * 2 secs time for the Test to complete. If it's still not complete
+ * within this peiod, we consider that the test failed.
+ * Return value:
+ * 0 on success and -1 on failure.
+ */
+
+static int s2io_bist_test(nic_t * sp, uint64_t * data)
+{
+ u8 bist = 0;
+ int cnt = 0, ret = -1;
+
+ pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
+ bist |= PCI_BIST_START;
+ pci_write_config_word(sp->pdev, PCI_BIST, bist);
+
+ while (cnt < 20) {
+ pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
+ if (!(bist & PCI_BIST_START)) {
+ *data = (bist & PCI_BIST_CODE_MASK);
+ ret = 0;
+ break;
+ }
+ msleep(100);
+ cnt++;
+ }
+
+ return ret;
+}
+
+/**
+ * s2io-link_test - verifies the link state of the nic
+ * @sp ; private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data: variable that returns the result of each of the test conducted by
+ * the driver.
+ * Description:
+ * The function verifies the link state of the NIC and updates the input
+ * argument 'data' appropriately.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_link_test(nic_t * sp, uint64_t * data)
+{
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64;
+
+ val64 = readq(&bar0->adapter_status);
+ if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
+ *data = 1;
+
+ return 0;
+}
+
+/**
+ * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
+ * @sp - private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data - variable that returns the result of each of the test
+ * conducted by the driver.
+ * Description:
+ * This is one of the offline test that tests the read and write
+ * access to the RldRam chip on the NIC.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_rldram_test(nic_t * sp, uint64_t * data)
+{
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64;
+ int cnt, iteration = 0, test_pass = 0;
+
+ val64 = readq(&bar0->adapter_control);
+ val64 &= ~ADAPTER_ECC_EN;
+ writeq(val64, &bar0->adapter_control);
+
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ val64 |= MC_RLDRAM_TEST_MODE;
+ writeq(val64, &bar0->mc_rldram_test_ctrl);
+
+ val64 = readq(&bar0->mc_rldram_mrs);
+ val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
+
+ val64 |= MC_RLDRAM_MRS_ENABLE;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
+
+ while (iteration < 2) {
+ val64 = 0x55555555aaaa0000ULL;
+ if (iteration == 1) {
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ }
+ writeq(val64, &bar0->mc_rldram_test_d0);
+
+ val64 = 0xaaaa5a5555550000ULL;
+ if (iteration == 1) {
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ }
+ writeq(val64, &bar0->mc_rldram_test_d1);
+
+ val64 = 0x55aaaaaaaa5a0000ULL;
+ if (iteration == 1) {
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ }
+ writeq(val64, &bar0->mc_rldram_test_d2);
+
+ val64 = (u64) (0x0000003fffff0000ULL);
+ writeq(val64, &bar0->mc_rldram_test_add);
+
+
+ val64 = MC_RLDRAM_TEST_MODE;
+ writeq(val64, &bar0->mc_rldram_test_ctrl);
+
+ val64 |=
+ MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
+ MC_RLDRAM_TEST_GO;
+ writeq(val64, &bar0->mc_rldram_test_ctrl);
+
+ for (cnt = 0; cnt < 5; cnt++) {
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ if (val64 & MC_RLDRAM_TEST_DONE)
+ break;
+ msleep(200);
+ }
+
+ if (cnt == 5)
+ break;
+
+ val64 = MC_RLDRAM_TEST_MODE;
+ writeq(val64, &bar0->mc_rldram_test_ctrl);
+
+ val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
+ writeq(val64, &bar0->mc_rldram_test_ctrl);
+
+ for (cnt = 0; cnt < 5; cnt++) {
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ if (val64 & MC_RLDRAM_TEST_DONE)
+ break;
+ msleep(500);
+ }
+
+ if (cnt == 5)
+ break;
+
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ if (val64 & MC_RLDRAM_TEST_PASS)
+ test_pass = 1;
+
+ iteration++;
+ }
+
+ if (!test_pass)
+ *data = 1;
+ else
+ *data = 0;
+
+ return 0;
+}
+
+/**
+ * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @ethtest : pointer to a ethtool command specific structure that will be
+ * returned to the user.
+ * @data : variable that returns the result of each of the test
+ * conducted by the driver.
+ * Description:
+ * This function conducts 6 tests ( 4 offline and 2 online) to determine
+ * the health of the card.
+ * Return value:
+ * void
+ */
+
+static void s2io_ethtool_test(struct net_device *dev,
+ struct ethtool_test *ethtest,
+ uint64_t * data)
+{
+ nic_t *sp = dev->priv;
+ int orig_state = netif_running(sp->dev);
+
+ if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline Tests. */
+ if (orig_state) {
+ s2io_close(sp->dev);
+ s2io_set_swapper(sp);
+ } else
+ s2io_set_swapper(sp);
+
+ if (s2io_register_test(sp, &data[0]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ s2io_reset(sp);
+ s2io_set_swapper(sp);
+
+ if (s2io_rldram_test(sp, &data[3]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ s2io_reset(sp);
+ s2io_set_swapper(sp);
+
+ if (s2io_eeprom_test(sp, &data[1]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ if (s2io_bist_test(sp, &data[4]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ if (orig_state)
+ s2io_open(sp->dev);
+
+ data[2] = 0;
+ } else {
+ /* Online Tests. */
+ if (!orig_state) {
+ DBG_PRINT(ERR_DBG,
+ "%s: is not up, cannot run test\n",
+ dev->name);
+ data[0] = -1;
+ data[1] = -1;
+ data[2] = -1;
+ data[3] = -1;
+ data[4] = -1;
+ }
+
+ if (s2io_link_test(sp, &data[2]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ data[0] = 0;
+ data[1] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ }
+}
+
+static void s2io_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats,
+ u64 * tmp_stats)
+{
+ int i = 0;
+ nic_t *sp = dev->priv;
+ StatInfo_t *stat_info = sp->mac_control.stats_info;
+
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
+ tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
+ tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
+}
+
+static int s2io_ethtool_get_regs_len(struct net_device *dev)
+{
+ return (XENA_REG_SPACE);
+}
+
+
+static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
+{
+ nic_t *sp = dev->priv;
+
+ return (sp->rx_csum);
+}
+
+static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
+{
+ nic_t *sp = dev->priv;
+
+ if (data)
+ sp->rx_csum = 1;
+ else
+ sp->rx_csum = 0;
+
+ return 0;
+}
+
+static int s2io_get_eeprom_len(struct net_device *dev)
+{
+ return (XENA_EEPROM_SPACE);
+}
+
+static int s2io_ethtool_self_test_count(struct net_device *dev)
+{
+ return (S2IO_TEST_LEN);
+}
+
+static void s2io_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 * data)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
+ break;
+ case ETH_SS_STATS:
+ memcpy(data, &ethtool_stats_keys,
+ sizeof(ethtool_stats_keys));
+ }
+}
+
+static int s2io_ethtool_get_stats_count(struct net_device *dev)
+{
+ return (S2IO_STAT_LEN);
+}
+
+static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ return 0;
+}
+
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = s2io_ethtool_gset,
+ .set_settings = s2io_ethtool_sset,
+ .get_drvinfo = s2io_ethtool_gdrvinfo,
+ .get_regs_len = s2io_ethtool_get_regs_len,
+ .get_regs = s2io_ethtool_gregs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = s2io_get_eeprom_len,
+ .get_eeprom = s2io_ethtool_geeprom,
+ .set_eeprom = s2io_ethtool_seeprom,
+ .get_pauseparam = s2io_ethtool_getpause_data,
+ .set_pauseparam = s2io_ethtool_setpause_data,
+ .get_rx_csum = s2io_ethtool_get_rx_csum,
+ .set_rx_csum = s2io_ethtool_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = s2io_ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+#endif
+ .self_test_count = s2io_ethtool_self_test_count,
+ .self_test = s2io_ethtool_test,
+ .get_strings = s2io_ethtool_get_strings,
+ .phys_id = s2io_ethtool_idnic,
+ .get_stats_count = s2io_ethtool_get_stats_count,
+ .get_ethtool_stats = s2io_get_ethtool_stats
+};
+
+/**
+ * s2io_ioctl - Entry point for the Ioctl
+ * @dev : Device pointer.
+ * @ifr : An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd : This is used to distinguish between the different commands that
+ * can be passed to the IOCTL functions.
+ * Description:
+ * This function has support for ethtool, adding multiple MAC addresses on
+ * the NIC and some DBG commands for the util tool.
+ * Return value:
+ * Currently the IOCTL supports no operations, hence by default this
+ * function returns OP NOT SUPPORTED value.
+ */
+
+static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * s2io_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: A driver entry point to change MTU size for the device.
+ * Before changing the MTU the device must be stopped.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+
+static int s2io_change_mtu(struct net_device *dev, int new_mtu)
+{
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ register u64 val64;
+
+ if (netif_running(dev)) {
+ DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
+ DBG_PRINT(ERR_DBG, "change its MTU \n");
+ return -EBUSY;
+ }
+
+ if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
+ DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
+ dev->name);
+ return -EPERM;
+ }
+
+ /* Set the new MTU into the PYLD register of the NIC */
+ val64 = new_mtu;
+ writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/**
+ * s2io_tasklet - Bottom half of the ISR.
+ * @dev_adr : address of the device structure in dma_addr_t format.
+ * Description:
+ * This is the tasklet or the bottom half of the ISR. This is
+ * an extension of the ISR which is scheduled by the scheduler to be run
+ * when the load on the CPU is low. All low priority tasks of the ISR can
+ * be pushed into the tasklet. For now the tasklet is used only to
+ * replenish the Rx buffers in the Rx buffer descriptors.
+ * Return value:
+ * void.
+ */
+
+static void s2io_tasklet(unsigned long dev_addr)
+{
+ struct net_device *dev = (struct net_device *) dev_addr;
+ nic_t *sp = dev->priv;
+ int i, ret;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ if (!TASKLET_IN_USE) {
+ for (i = 0; i < config->rx_ring_num; i++) {
+ ret = fill_rx_buffers(sp, i);
+ if (ret == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s: Out of ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "memory in tasklet\n");
+ break;
+ } else if (ret == -EFILL) {
+ DBG_PRINT(ERR_DBG,
+ "%s: Rx Ring %d is full\n",
+ dev->name, i);
+ break;
+ }
+ }
+ clear_bit(0, (&sp->tasklet_status));
+ }
+}
+
+/**
+ * s2io_set_link - Set the LInk status
+ * @data: long pointer to device private structue
+ * Description: Sets the link status for the adapter
+ */
+
+static void s2io_set_link(unsigned long data)
+{
+ nic_t *nic = (nic_t *) data;
+ struct net_device *dev = nic->dev;
+ XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ register u64 val64;
+ u16 subid;
+
+ if (test_and_set_bit(0, &(nic->link_state))) {
+ /* The card is being reset, no point doing anything */
+ return;
+ }
+
+ subid = nic->pdev->subsystem_device;
+ /*
+ * Allow a small delay for the NICs self initiated
+ * cleanup to complete.
+ */
+ msleep(100);
+
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
+ if (LINK_IS_UP(val64)) {
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_CNTL_EN;
+ writeq(val64, &bar0->adapter_control);
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= GPIO_CTRL_GPIO_0;
+ writeq(val64, &bar0->gpio_control);
+ val64 = readq(&bar0->gpio_control);
+ } else {
+ val64 |= ADAPTER_LED_ON;
+ writeq(val64, &bar0->adapter_control);
+ }
+ val64 = readq(&bar0->adapter_status);
+ if (!LINK_IS_UP(val64)) {
+ DBG_PRINT(ERR_DBG, "%s:", dev->name);
+ DBG_PRINT(ERR_DBG, " Link down");
+ DBG_PRINT(ERR_DBG, "after ");
+ DBG_PRINT(ERR_DBG, "enabling ");
+ DBG_PRINT(ERR_DBG, "device \n");
+ }
+ if (nic->device_enabled_once == FALSE) {
+ nic->device_enabled_once = TRUE;
+ }
+ s2io_link(nic, LINK_UP);
+ } else {
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+ val64 = readq(&bar0->gpio_control);
+ val64 &= ~GPIO_CTRL_GPIO_0;
+ writeq(val64, &bar0->gpio_control);
+ val64 = readq(&bar0->gpio_control);
+ }
+ s2io_link(nic, LINK_DOWN);
+ }
+ } else { /* NIC is not Quiescent. */
+ DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
+ DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
+ netif_stop_queue(dev);
+ }
+ clear_bit(0, &(nic->link_state));
+}
+
+static void s2io_card_down(nic_t * sp)
+{
+ int cnt = 0;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ unsigned long flags;
+ register u64 val64 = 0;
+
+ /* If s2io_set_link task is executing, wait till it completes. */
+ while (test_and_set_bit(0, &(sp->link_state)))
+ msleep(50);
+ atomic_set(&sp->card_state, CARD_DOWN);
+
+ /* disable Tx and Rx traffic on the NIC */
+ stop_nic(sp);
+
+ /* Kill tasklet. */
+ tasklet_kill(&sp->task);
+
+ /* Check if the device is Quiescent and then Reset the NIC */
+ do {
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
+ break;
+ }
+
+ msleep(50);
+ cnt++;
+ if (cnt == 10) {
+ DBG_PRINT(ERR_DBG,
+ "s2io_close:Device not Quiescent ");
+ DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
+ (unsigned long long) val64);
+ break;
+ }
+ } while (1);
+ spin_lock_irqsave(&sp->tx_lock, flags);
+ s2io_reset(sp);
+
+ /* Free all unused Tx and Rx buffers */
+ free_tx_buffers(sp);
+ free_rx_buffers(sp);
+
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+ clear_bit(0, &(sp->link_state));
+}
+
+static int s2io_card_up(nic_t * sp)
+{
+ int i, ret;
+ mac_info_t *mac_control;
+ struct config_param *config;
+ struct net_device *dev = (struct net_device *) sp->dev;
+
+ /* Initialize the H/W I/O registers */
+ if (init_nic(sp) != 0) {
+ DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
+ dev->name);
+ return -ENODEV;
+ }
+
+ /*
+ * Initializing the Rx buffers. For now we are considering only 1
+ * Rx ring and initializing buffers into 30 Rx blocks
+ */
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ if ((ret = fill_rx_buffers(sp, i))) {
+ DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
+ dev->name);
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return -ENOMEM;
+ }
+ DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
+ atomic_read(&sp->rx_bufs_left[i]));
+ }
+
+ /* Setting its receive mode */
+ s2io_set_multicast(dev);
+
+ /* Enable tasklet for the device */
+ tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
+
+ /* Enable Rx Traffic and interrupts on the NIC */
+ if (start_nic(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
+ tasklet_kill(&sp->task);
+ s2io_reset(sp);
+ free_irq(dev->irq, dev);
+ free_rx_buffers(sp);
+ return -ENODEV;
+ }
+
+ atomic_set(&sp->card_state, CARD_UP);
+ return 0;
+}
+
+/**
+ * s2io_restart_nic - Resets the NIC.
+ * @data : long pointer to the device private structure
+ * Description:
+ * This function is scheduled to be run by the s2io_tx_watchdog
+ * function after 0.5 secs to reset the NIC. The idea is to reduce
+ * the run time of the watch dog routine which is run holding a
+ * spin lock.
+ */
+
+static void s2io_restart_nic(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ nic_t *sp = dev->priv;
+
+ s2io_card_down(sp);
+ if (s2io_card_up(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
+ dev->name);
+ }
+ netif_wake_queue(dev);
+ DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
+ dev->name);
+}
+
+/**
+ * s2io_tx_watchdog - Watchdog for transmit side.
+ * @dev : Pointer to net device structure
+ * Description:
+ * This function is triggered if the Tx Queue is stopped
+ * for a pre-defined amount of time when the Interface is still up.
+ * If the Interface is jammed in such a situation, the hardware is
+ * reset (by s2io_close) and restarted again (by s2io_open) to
+ * overcome any problem that might have been caused in the hardware.
+ * Return value:
+ * void
+ */
+
+static void s2io_tx_watchdog(struct net_device *dev)
+{
+ nic_t *sp = dev->priv;
+
+ if (netif_carrier_ok(dev)) {
+ schedule_work(&sp->rst_timer_task);
+ }
+}
+
+/**
+ * rx_osm_handler - To perform some OS related operations on SKB.
+ * @sp: private member of the device structure,pointer to s2io_nic structure.
+ * @skb : the socket buffer pointer.
+ * @len : length of the packet
+ * @cksum : FCS checksum of the frame.
+ * @ring_no : the ring from which this RxD was extracted.
+ * Description:
+ * This function is called by the Tx interrupt serivce routine to perform
+ * some OS related operations on the SKB before passing it to the upper
+ * layers. It mainly checks if the checksum is OK, if so adds it to the
+ * SKBs cksum variable, increments the Rx packet count and passes the SKB
+ * to the upper layer. If the checksum is wrong, it increments the Rx
+ * packet error count, frees the SKB and returns error.
+ * Return value:
+ * SUCCESS on success and -1 on failure.
+ */
+#ifndef CONFIG_2BUFF_MODE
+static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
+#else
+static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
+ buffAdd_t * ba)
+#endif
+{
+ struct net_device *dev = (struct net_device *) sp->dev;
+ struct sk_buff *skb =
+ (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
+ u16 l3_csum, l4_csum;
+#ifdef CONFIG_2BUFF_MODE
+ int buf0_len, buf2_len;
+ unsigned char *buff;
+#endif
+
+ l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
+ if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
+ l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
+ if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
+ /*
+ * NIC verifies if the Checksum of the received
+ * frame is Ok or not and accordingly returns
+ * a flag in the RxD.
+ */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ /*
+ * Packet with erroneous checksum, let the
+ * upper layers deal with it.
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ if (rxdp->Control_1 & RXD_T_CODE) {
+ unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
+ DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
+ dev->name, err);
+ }
+#ifdef CONFIG_2BUFF_MODE
+ buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
+ buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
+#endif
+
+ skb->dev = dev;
+#ifndef CONFIG_2BUFF_MODE
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ buff = skb_push(skb, buf0_len);
+ memcpy(buff, ba->ba_0, buf0_len);
+ skb_put(skb, buf2_len);
+ skb->protocol = eth_type_trans(skb, dev);
+#endif
+
+#ifdef CONFIG_S2IO_NAPI
+ netif_receive_skb(skb);
+#else
+ netif_rx(skb);
+#endif
+
+ dev->last_rx = jiffies;
+ sp->rx_pkt_count++;
+ sp->stats.rx_packets++;
+#ifndef CONFIG_2BUFF_MODE
+ sp->stats.rx_bytes += len;
+#else
+ sp->stats.rx_bytes += buf0_len + buf2_len;
+#endif
+
+ atomic_dec(&sp->rx_bufs_left[ring_no]);
+ rxdp->Host_Control = 0;
+ return SUCCESS;
+}
+
+/**
+ * s2io_link - stops/starts the Tx queue.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @link : inidicates whether link is UP/DOWN.
+ * Description:
+ * This function stops/starts the Tx queue depending on whether the link
+ * status of the NIC is is down or up. This is called by the Alarm
+ * interrupt handler whenever a link change interrupt comes up.
+ * Return value:
+ * void.
+ */
+
+static void s2io_link(nic_t * sp, int link)
+{
+ struct net_device *dev = (struct net_device *) sp->dev;
+
+ if (link != sp->last_link_state) {
+ if (link == LINK_DOWN) {
+ DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
+ netif_carrier_off(dev);
+ } else {
+ DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
+ netif_carrier_on(dev);
+ }
+ }
+ sp->last_link_state = link;
+}
+
+/**
+ * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * Description:
+ * This function initializes a few of the PCI and PCI-X configuration registers
+ * with recommended values.
+ * Return value:
+ * void
+ */
+
+static void s2io_init_pci(nic_t * sp)
+{
+ u16 pci_cmd = 0;
+
+ /* Enable Data Parity Error Recovery in PCI-X command register. */
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ &(sp->pcix_cmd));
+ pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ (sp->pcix_cmd | 1));
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ &(sp->pcix_cmd));
+
+ /* Set the PErr Response bit in PCI command register. */
+ pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_word(sp->pdev, PCI_COMMAND,
+ (pci_cmd | PCI_COMMAND_PARITY));
+ pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
+
+ /* Set MMRB count to 1024 in PCI-X Command register. */
+ sp->pcix_cmd &= 0xFFF3;
+ pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ &(sp->pcix_cmd));
+
+ /* Setting Maximum outstanding splits based on system type. */
+ sp->pcix_cmd &= 0xFF8F;
+
+ sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
+ pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ sp->pcix_cmd);
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ &(sp->pcix_cmd));
+ /* Forcibly disabling relaxed ordering capability of the card. */
+ sp->pcix_cmd &= 0xfffd;
+ pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ sp->pcix_cmd);
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ &(sp->pcix_cmd));
+}
+
+MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
+MODULE_LICENSE("GPL");
+module_param(tx_fifo_num, int, 0);
+module_param_array(tx_fifo_len, int, NULL, 0);
+module_param(rx_ring_num, int, 0);
+module_param_array(rx_ring_sz, int, NULL, 0);
+module_param(Stats_refresh_time, int, 0);
+module_param(rmac_pause_time, int, 0);
+module_param(mc_pause_threshold_q0q3, int, 0);
+module_param(mc_pause_threshold_q4q7, int, 0);
+module_param(shared_splits, int, 0);
+module_param(tmac_util_period, int, 0);
+module_param(rmac_util_period, int, 0);
+#ifndef CONFIG_S2IO_NAPI
+module_param(indicate_max_pkts, int, 0);
+#endif
+/**
+ * s2io_init_nic - Initialization of the adapter .
+ * @pdev : structure containing the PCI related information of the device.
+ * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
+ * Description:
+ * The function initializes an adapter identified by the pci_dec structure.
+ * All OS related initialization including memory and device structure and
+ * initlaization of the device private variable is done. Also the swapper
+ * control register is initialized to enable read and write into the I/O
+ * registers of the device.
+ * Return value:
+ * returns 0 on success and negative on failure.
+ */
+
+static int __devinit
+s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
+{
+ nic_t *sp;
+ struct net_device *dev;
+ char *dev_name = "S2IO 10GE NIC";
+ int i, j, ret;
+ int dma_flag = FALSE;
+ u32 mac_up, mac_down;
+ u64 val64 = 0, tmp64 = 0;
+ XENA_dev_config_t __iomem *bar0 = NULL;
+ u16 subid;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+
+ DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
+ s2io_driver_version);
+
+ if ((ret = pci_enable_device(pdev))) {
+ DBG_PRINT(ERR_DBG,
+ "s2io_init_nic: pci_enable_device failed\n");
+ return ret;
+ }
+
+ if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
+ dma_flag = TRUE;
+
+ if (pci_set_consistent_dma_mask
+ (pdev, 0xffffffffffffffffULL)) {
+ DBG_PRINT(ERR_DBG,
+ "Unable to obtain 64bit DMA for \
+ consistent allocations\n");
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+ } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
+ DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
+ } else {
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+
+ if (pci_request_regions(pdev, s2io_driver_name)) {
+ DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
+ pci_disable_device(pdev);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(nic_t));
+ if (dev == NULL) {
+ DBG_PRINT(ERR_DBG, "Device allocation failed\n");
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ return -ENODEV;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, dev);
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Private member variable initialized to s2io NIC structure */
+ sp = dev->priv;
+ memset(sp, 0, sizeof(nic_t));
+ sp->dev = dev;
+ sp->pdev = pdev;
+ sp->vendor_id = pdev->vendor;
+ sp->device_id = pdev->device;
+ sp->high_dma_flag = dma_flag;
+ sp->irq = pdev->irq;
+ sp->device_enabled_once = FALSE;
+ strcpy(sp->name, dev_name);
+
+ /* Initialize some PCI/PCI-X fields of the NIC. */
+ s2io_init_pci(sp);
+
+ /*
+ * Setting the device configuration parameters.
+ * Most of these parameters can be specified by the user during
+ * module insertion as they are module loadable parameters. If
+ * these parameters are not not specified during load time, they
+ * are initialized with default values.
+ */
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+
+ /* Tx side parameters. */
+ tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
+ config->tx_fifo_num = tx_fifo_num;
+ for (i = 0; i < MAX_TX_FIFOS; i++) {
+ config->tx_cfg[i].fifo_len = tx_fifo_len[i];
+ config->tx_cfg[i].fifo_priority = i;
+ }
+
+ config->tx_intr_type = TXD_INT_TYPE_UTILZ;
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ config->tx_cfg[i].f_no_snoop =
+ (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
+ if (config->tx_cfg[i].fifo_len < 65) {
+ config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
+ break;
+ }
+ }
+ config->max_txds = MAX_SKB_FRAGS;
+
+ /* Rx side parameters. */
+ rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
+ config->rx_ring_num = rx_ring_num;
+ for (i = 0; i < MAX_RX_RINGS; i++) {
+ config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
+ (MAX_RXDS_PER_BLOCK + 1);
+ config->rx_cfg[i].ring_priority = i;
+ }
+
+ for (i = 0; i < rx_ring_num; i++) {
+ config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
+ config->rx_cfg[i].f_no_snoop =
+ (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
+ }
+
+ /* Setting Mac Control parameters */
+ mac_control->rmac_pause_time = rmac_pause_time;
+ mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
+ mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
+
+
+ /* Initialize Ring buffer parameters. */
+ for (i = 0; i < config->rx_ring_num; i++)
+ atomic_set(&sp->rx_bufs_left[i], 0);
+
+ /* initialize the shared memory used by the NIC and the host */
+ if (init_shared_mem(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
+ dev->name);
+ ret = -ENOMEM;
+ goto mem_alloc_failed;
+ }
+
+ sp->bar0 = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!sp->bar0) {
+ DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
+ dev->name);
+ ret = -ENOMEM;
+ goto bar0_remap_failed;
+ }
+
+ sp->bar1 = ioremap(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!sp->bar1) {
+ DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
+ dev->name);
+ ret = -ENOMEM;
+ goto bar1_remap_failed;
+ }
+
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long) sp->bar0;
+
+ /* Initializing the BAR1 address as the start of the FIFO pointer. */
+ for (j = 0; j < MAX_TX_FIFOS; j++) {
+ mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
+ (sp->bar1 + (j * 0x00020000));
+ }
+
+ /* Driver entry points */
+ dev->open = &s2io_open;
+ dev->stop = &s2io_close;
+ dev->hard_start_xmit = &s2io_xmit;
+ dev->get_stats = &s2io_get_stats;
+ dev->set_multicast_list = &s2io_set_multicast;
+ dev->do_ioctl = &s2io_ioctl;
+ dev->change_mtu = &s2io_change_mtu;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ /*
+ * will use eth_mac_addr() for dev->set_mac_address
+ * mac address will be set every time dev->open() is called
+ */
+#ifdef CONFIG_S2IO_NAPI
+ dev->poll = s2io_poll;
+ dev->weight = 90;
+#endif
+
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ if (sp->high_dma_flag == TRUE)
+ dev->features |= NETIF_F_HIGHDMA;
+#ifdef NETIF_F_TSO
+ dev->features |= NETIF_F_TSO;
+#endif
+
+ dev->tx_timeout = &s2io_tx_watchdog;
+ dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
+ INIT_WORK(&sp->rst_timer_task,
+ (void (*)(void *)) s2io_restart_nic, dev);
+ INIT_WORK(&sp->set_link_task,
+ (void (*)(void *)) s2io_set_link, sp);
+
+ pci_save_state(sp->pdev);
+
+ /* Setting swapper control on the NIC, for proper reset operation */
+ if (s2io_set_swapper(sp)) {
+ DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
+ dev->name);
+ ret = -EAGAIN;
+ goto set_swap_failed;
+ }
+
+ /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
+ fix_mac_address(sp);
+ s2io_reset(sp);
+
+ /*
+ * Setting swapper control on the NIC, so the MAC address can be read.
+ */
+ if (s2io_set_swapper(sp)) {
+ DBG_PRINT(ERR_DBG,
+ "%s: S2IO: swapper settings are wrong\n",
+ dev->name);
+ ret = -EAGAIN;
+ goto set_swap_failed;
+ }
+
+ /*
+ * MAC address initialization.
+ * For now only one mac address will be read and used.
+ */
+ bar0 = sp->bar0;
+ val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+ wait_for_cmd_complete(sp);
+
+ tmp64 = readq(&bar0->rmac_addr_data0_mem);
+ mac_down = (u32) tmp64;
+ mac_up = (u32) (tmp64 >> 32);
+
+ memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
+
+ sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
+ sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
+ sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
+ sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
+ sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
+ sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
+
+ DBG_PRINT(INIT_DBG,
+ "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
+ sp->def_mac_addr[0].mac_addr[0],
+ sp->def_mac_addr[0].mac_addr[1],
+ sp->def_mac_addr[0].mac_addr[2],
+ sp->def_mac_addr[0].mac_addr[3],
+ sp->def_mac_addr[0].mac_addr[4],
+ sp->def_mac_addr[0].mac_addr[5]);
+
+ /* Set the factory defined MAC address initially */
+ dev->addr_len = ETH_ALEN;
+ memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+
+ /*
+ * Initialize the tasklet status and link state flags
+ * and the card statte parameter
+ */
+ atomic_set(&(sp->card_state), 0);
+ sp->tasklet_status = 0;
+ sp->link_state = 0;
+
+
+ /* Initialize spinlocks */
+ spin_lock_init(&sp->tx_lock);
+#ifndef CONFIG_S2IO_NAPI
+ spin_lock_init(&sp->put_lock);
+#endif
+
+ /*
+ * SXE-002: Configure link and activity LED to init state
+ * on driver load.
+ */
+ subid = sp->pdev->subsystem_device;
+ if ((subid & 0xFF) >= 0x07) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= 0x0000800000000000ULL;
+ writeq(val64, &bar0->gpio_control);
+ val64 = 0x0411040400000000ULL;
+ writeq(val64, (void __iomem *) bar0 + 0x2700);
+ val64 = readq(&bar0->gpio_control);
+ }
+
+ sp->rx_csum = 1; /* Rx chksum verify enabled by default */
+
+ if (register_netdev(dev)) {
+ DBG_PRINT(ERR_DBG, "Device registration failed\n");
+ ret = -ENODEV;
+ goto register_failed;
+ }
+
+ /*
+ * Make Link state as off at this point, when the Link change
+ * interrupt comes the state will be automatically changed to
+ * the right state.
+ */
+ netif_carrier_off(dev);
+ sp->last_link_state = LINK_DOWN;
+
+ return 0;
+
+ register_failed:
+ set_swap_failed:
+ iounmap(sp->bar1);
+ bar1_remap_failed:
+ iounmap(sp->bar0);
+ bar0_remap_failed:
+ mem_alloc_failed:
+ free_shared_mem(sp);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+
+ return ret;
+}
+
+/**
+ * s2io_rem_nic - Free the PCI device
+ * @pdev: structure containing the PCI related information of the device.
+ * Description: This function is called by the Pci subsystem to release a
+ * PCI device and free up all resource held up by the device. This could
+ * be in response to a Hot plug event or when the driver is to be removed
+ * from memory.
+ */
+
+static void __devexit s2io_rem_nic(struct pci_dev *pdev)
+{
+ struct net_device *dev =
+ (struct net_device *) pci_get_drvdata(pdev);
+ nic_t *sp;
+
+ if (dev == NULL) {
+ DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
+ return;
+ }
+
+ sp = dev->priv;
+ unregister_netdev(dev);
+
+ free_shared_mem(sp);
+ iounmap(sp->bar0);
+ iounmap(sp->bar1);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(dev);
+}
+
+/**
+ * s2io_starter - Entry point for the driver
+ * Description: This function is the entry point for the driver. It verifies
+ * the module loadable parameters and initializes PCI configuration space.
+ */
+
+int __init s2io_starter(void)
+{
+ return pci_module_init(&s2io_driver);
+}
+
+/**
+ * s2io_closer - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
+ */
+
+static void s2io_closer(void)
+{
+ pci_unregister_driver(&s2io_driver);
+ DBG_PRINT(INIT_DBG, "cleanup done\n");
+}
+
+module_init(s2io_starter);
+module_exit(s2io_closer);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
new file mode 100644
index 000000000000..1711c8c3dc99
--- /dev/null
+++ b/drivers/net/s2io.h
@@ -0,0 +1,760 @@
+/************************************************************************
+ * s2io.h: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
+ * Copyright(c) 2002-2005 Neterion Inc.
+
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ ************************************************************************/
+#ifndef _S2IO_H
+#define _S2IO_H
+
+#define TBD 0
+#define BIT(loc) (0x8000000000000000ULL >> (loc))
+#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
+#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
+
+#ifndef BOOL
+#define BOOL int
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif
+
+#undef SUCCESS
+#define SUCCESS 0
+#define FAILURE -1
+
+/* Maximum outstanding splits to be configured into xena. */
+typedef enum xena_max_outstanding_splits {
+ XENA_ONE_SPLIT_TRANSACTION = 0,
+ XENA_TWO_SPLIT_TRANSACTION = 1,
+ XENA_THREE_SPLIT_TRANSACTION = 2,
+ XENA_FOUR_SPLIT_TRANSACTION = 3,
+ XENA_EIGHT_SPLIT_TRANSACTION = 4,
+ XENA_TWELVE_SPLIT_TRANSACTION = 5,
+ XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
+ XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
+} xena_max_outstanding_splits;
+#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
+
+/* OS concerned variables and constants */
+#define WATCH_DOG_TIMEOUT 5*HZ
+#define EFILL 0x1234
+#define ALIGN_SIZE 127
+#define PCIX_COMMAND_REGISTER 0x62
+
+/*
+ * Debug related variables.
+ */
+/* different debug levels. */
+#define ERR_DBG 0
+#define INIT_DBG 1
+#define INFO_DBG 2
+#define TX_DBG 3
+#define INTR_DBG 4
+
+/* Global variable that defines the present debug level of the driver. */
+static int debug_level = ERR_DBG; /* Default level. */
+
+/* DEBUG message print. */
+#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
+
+/* Protocol assist features of the NIC */
+#define L3_CKSUM_OK 0xFFFF
+#define L4_CKSUM_OK 0xFFFF
+#define S2IO_JUMBO_SIZE 9600
+
+/* The statistics block of Xena */
+typedef struct stat_block {
+/* Tx MAC statistics counters. */
+ u32 tmac_data_octets;
+ u32 tmac_frms;
+ u64 tmac_drop_frms;
+ u32 tmac_bcst_frms;
+ u32 tmac_mcst_frms;
+ u64 tmac_pause_ctrl_frms;
+ u32 tmac_ucst_frms;
+ u32 tmac_ttl_octets;
+ u32 tmac_any_err_frms;
+ u32 tmac_nucst_frms;
+ u64 tmac_ttl_less_fb_octets;
+ u64 tmac_vld_ip_octets;
+ u32 tmac_drop_ip;
+ u32 tmac_vld_ip;
+ u32 tmac_rst_tcp;
+ u32 tmac_icmp;
+ u64 tmac_tcp;
+ u32 reserved_0;
+ u32 tmac_udp;
+
+/* Rx MAC Statistics counters. */
+ u32 rmac_data_octets;
+ u32 rmac_vld_frms;
+ u64 rmac_fcs_err_frms;
+ u64 rmac_drop_frms;
+ u32 rmac_vld_bcst_frms;
+ u32 rmac_vld_mcst_frms;
+ u32 rmac_out_rng_len_err_frms;
+ u32 rmac_in_rng_len_err_frms;
+ u64 rmac_long_frms;
+ u64 rmac_pause_ctrl_frms;
+ u64 rmac_unsup_ctrl_frms;
+ u32 rmac_accepted_ucst_frms;
+ u32 rmac_ttl_octets;
+ u32 rmac_discarded_frms;
+ u32 rmac_accepted_nucst_frms;
+ u32 reserved_1;
+ u32 rmac_drop_events;
+ u64 rmac_ttl_less_fb_octets;
+ u64 rmac_ttl_frms;
+ u64 reserved_2;
+ u32 rmac_usized_frms;
+ u32 reserved_3;
+ u32 rmac_frag_frms;
+ u32 rmac_osized_frms;
+ u32 reserved_4;
+ u32 rmac_jabber_frms;
+ u64 rmac_ttl_64_frms;
+ u64 rmac_ttl_65_127_frms;
+ u64 reserved_5;
+ u64 rmac_ttl_128_255_frms;
+ u64 rmac_ttl_256_511_frms;
+ u64 reserved_6;
+ u64 rmac_ttl_512_1023_frms;
+ u64 rmac_ttl_1024_1518_frms;
+ u32 rmac_ip;
+ u32 reserved_7;
+ u64 rmac_ip_octets;
+ u32 rmac_drop_ip;
+ u32 rmac_hdr_err_ip;
+ u32 reserved_8;
+ u32 rmac_icmp;
+ u64 rmac_tcp;
+ u32 rmac_err_drp_udp;
+ u32 rmac_udp;
+ u64 rmac_xgmii_err_sym;
+ u64 rmac_frms_q0;
+ u64 rmac_frms_q1;
+ u64 rmac_frms_q2;
+ u64 rmac_frms_q3;
+ u64 rmac_frms_q4;
+ u64 rmac_frms_q5;
+ u64 rmac_frms_q6;
+ u64 rmac_frms_q7;
+ u16 rmac_full_q3;
+ u16 rmac_full_q2;
+ u16 rmac_full_q1;
+ u16 rmac_full_q0;
+ u16 rmac_full_q7;
+ u16 rmac_full_q6;
+ u16 rmac_full_q5;
+ u16 rmac_full_q4;
+ u32 reserved_9;
+ u32 rmac_pause_cnt;
+ u64 rmac_xgmii_data_err_cnt;
+ u64 rmac_xgmii_ctrl_err_cnt;
+ u32 rmac_err_tcp;
+ u32 rmac_accepted_ip;
+
+/* PCI/PCI-X Read transaction statistics. */
+ u32 new_rd_req_cnt;
+ u32 rd_req_cnt;
+ u32 rd_rtry_cnt;
+ u32 new_rd_req_rtry_cnt;
+
+/* PCI/PCI-X Write/Read transaction statistics. */
+ u32 wr_req_cnt;
+ u32 wr_rtry_rd_ack_cnt;
+ u32 new_wr_req_rtry_cnt;
+ u32 new_wr_req_cnt;
+ u32 wr_disc_cnt;
+ u32 wr_rtry_cnt;
+
+/* PCI/PCI-X Write / DMA Transaction statistics. */
+ u32 txp_wr_cnt;
+ u32 rd_rtry_wr_ack_cnt;
+ u32 txd_wr_cnt;
+ u32 txd_rd_cnt;
+ u32 rxd_wr_cnt;
+ u32 rxd_rd_cnt;
+ u32 rxf_wr_cnt;
+ u32 txf_rd_cnt;
+} StatInfo_t;
+
+/* Structures representing different init time configuration
+ * parameters of the NIC.
+ */
+
+/* Maintains Per FIFO related information. */
+typedef struct tx_fifo_config {
+#define MAX_AVAILABLE_TXDS 8192
+ u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */
+/* Priority definition */
+#define TX_FIFO_PRI_0 0 /*Highest */
+#define TX_FIFO_PRI_1 1
+#define TX_FIFO_PRI_2 2
+#define TX_FIFO_PRI_3 3
+#define TX_FIFO_PRI_4 4
+#define TX_FIFO_PRI_5 5
+#define TX_FIFO_PRI_6 6
+#define TX_FIFO_PRI_7 7 /*lowest */
+ u8 fifo_priority; /* specifies pointer level for FIFO */
+ /* user should not set twos fifos with same pri */
+ u8 f_no_snoop;
+#define NO_SNOOP_TXD 0x01
+#define NO_SNOOP_TXD_BUFFER 0x02
+} tx_fifo_config_t;
+
+
+/* Maintains per Ring related information */
+typedef struct rx_ring_config {
+ u32 num_rxd; /*No of RxDs per Rx Ring */
+#define RX_RING_PRI_0 0 /* highest */
+#define RX_RING_PRI_1 1
+#define RX_RING_PRI_2 2
+#define RX_RING_PRI_3 3
+#define RX_RING_PRI_4 4
+#define RX_RING_PRI_5 5
+#define RX_RING_PRI_6 6
+#define RX_RING_PRI_7 7 /* lowest */
+
+ u8 ring_priority; /*Specifies service priority of ring */
+ /* OSM should not set any two rings with same priority */
+ u8 ring_org; /*Organization of ring */
+#define RING_ORG_BUFF1 0x01
+#define RX_RING_ORG_BUFF3 0x03
+#define RX_RING_ORG_BUFF5 0x05
+
+ u8 f_no_snoop;
+#define NO_SNOOP_RXD 0x01
+#define NO_SNOOP_RXD_BUFFER 0x02
+} rx_ring_config_t;
+
+/* This structure provides contains values of the tunable parameters
+ * of the H/W
+ */
+struct config_param {
+/* Tx Side */
+ u32 tx_fifo_num; /*Number of Tx FIFOs */
+#define MAX_TX_FIFOS 8
+
+ tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
+ u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
+ u64 tx_intr_type;
+ /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
+
+/* Rx Side */
+ u32 rx_ring_num; /*Number of receive rings */
+#define MAX_RX_RINGS 8
+#define MAX_RX_BLOCKS_PER_RING 150
+
+ rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
+
+#define HEADER_ETHERNET_II_802_3_SIZE 14
+#define HEADER_802_2_SIZE 3
+#define HEADER_SNAP_SIZE 5
+#define HEADER_VLAN_SIZE 4
+
+#define MIN_MTU 46
+#define MAX_PYLD 1500
+#define MAX_MTU (MAX_PYLD+18)
+#define MAX_MTU_VLAN (MAX_PYLD+22)
+#define MAX_PYLD_JUMBO 9600
+#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
+#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
+};
+
+/* Structure representing MAC Addrs */
+typedef struct mac_addr {
+ u8 mac_addr[ETH_ALEN];
+} macaddr_t;
+
+/* Structure that represent every FIFO element in the BAR1
+ * Address location.
+ */
+typedef struct _TxFIFO_element {
+ u64 TxDL_Pointer;
+
+ u64 List_Control;
+#define TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8)
+#define TX_FIFO_FIRST_LIST BIT(14)
+#define TX_FIFO_LAST_LIST BIT(15)
+#define TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2)
+#define TX_FIFO_SPECIAL_FUNC BIT(23)
+#define TX_FIFO_DS_NO_SNOOP BIT(31)
+#define TX_FIFO_BUFF_NO_SNOOP BIT(30)
+} TxFIFO_element_t;
+
+/* Tx descriptor structure */
+typedef struct _TxD {
+ u64 Control_1;
+/* bit mask */
+#define TXD_LIST_OWN_XENA BIT(7)
+#define TXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define TXD_T_CODE_OK(val) (|(val & TXD_T_CODE))
+#define GET_TXD_T_CODE(val) ((val & TXD_T_CODE)<<12)
+#define TXD_GATHER_CODE (BIT(22) | BIT(23))
+#define TXD_GATHER_CODE_FIRST BIT(22)
+#define TXD_GATHER_CODE_LAST BIT(23)
+#define TXD_TCP_LSO_EN BIT(30)
+#define TXD_UDP_COF_EN BIT(31)
+#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
+#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
+
+ u64 Control_2;
+#define TXD_TX_CKO_CONTROL (BIT(5)|BIT(6)|BIT(7))
+#define TXD_TX_CKO_IPV4_EN BIT(5)
+#define TXD_TX_CKO_TCP_EN BIT(6)
+#define TXD_TX_CKO_UDP_EN BIT(7)
+#define TXD_VLAN_ENABLE BIT(15)
+#define TXD_VLAN_TAG(val) vBIT(val,16,16)
+#define TXD_INT_NUMBER(val) vBIT(val,34,6)
+#define TXD_INT_TYPE_PER_LIST BIT(47)
+#define TXD_INT_TYPE_UTILZ BIT(46)
+#define TXD_SET_MARKER vBIT(0x6,0,4)
+
+ u64 Buffer_Pointer;
+ u64 Host_Control; /* reserved for host */
+} TxD_t;
+
+/* Structure to hold the phy and virt addr of every TxDL. */
+typedef struct list_info_hold {
+ dma_addr_t list_phy_addr;
+ void *list_virt_addr;
+} list_info_hold_t;
+
+/* Rx descriptor structure */
+typedef struct _RxD_t {
+ u64 Host_Control; /* reserved for host */
+ u64 Control_1;
+#define RXD_OWN_XENA BIT(7)
+#define RXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
+#define RXD_FRAME_PROTO_IPV4 BIT(27)
+#define RXD_FRAME_PROTO_IPV6 BIT(28)
+#define RXD_FRAME_PROTO_TCP BIT(30)
+#define RXD_FRAME_PROTO_UDP BIT(31)
+#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
+#define RXD_GET_L3_CKSUM(val) ((u16)(val>> 16) & 0xFFFF)
+#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
+
+ u64 Control_2;
+#ifndef CONFIG_2BUFF_MODE
+#define MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
+#define SET_BUFFER0_SIZE(val) vBIT(val,0,16)
+#else
+#define MASK_BUFFER0_SIZE vBIT(0xFF,0,16)
+#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
+#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
+#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
+#define SET_BUFFER1_SIZE(val) vBIT(val,16,16)
+#define SET_BUFFER2_SIZE(val) vBIT(val,32,16)
+#endif
+
+#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
+#define SET_VLAN_TAG(val) vBIT(val,48,16)
+#define SET_NUM_TAG(val) vBIT(val,16,32)
+
+#ifndef CONFIG_2BUFF_MODE
+#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0xFFFF,0,16)))
+#else
+#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \
+ >> 48)
+#define RXD_GET_BUFFER1_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER1_SIZE) \
+ >> 32)
+#define RXD_GET_BUFFER2_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER2_SIZE) \
+ >> 16)
+#define BUF0_LEN 40
+#define BUF1_LEN 1
+#endif
+
+ u64 Buffer0_ptr;
+#ifdef CONFIG_2BUFF_MODE
+ u64 Buffer1_ptr;
+ u64 Buffer2_ptr;
+#endif
+} RxD_t;
+
+/* Structure that represents the Rx descriptor block which contains
+ * 128 Rx descriptors.
+ */
+#ifndef CONFIG_2BUFF_MODE
+typedef struct _RxD_block {
+#define MAX_RXDS_PER_BLOCK 127
+ RxD_t rxd[MAX_RXDS_PER_BLOCK];
+
+ u64 reserved_0;
+#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
+ u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
+ * Rxd in this blk */
+ u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
+ u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
+ * the upper 32 bits should
+ * be 0 */
+} RxD_block_t;
+#else
+typedef struct _RxD_block {
+#define MAX_RXDS_PER_BLOCK 85
+ RxD_t rxd[MAX_RXDS_PER_BLOCK];
+
+#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
+ u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
+ * in this blk */
+ u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
+} RxD_block_t;
+#define SIZE_OF_BLOCK 4096
+
+/* Structure to hold virtual addresses of Buf0 and Buf1 in
+ * 2buf mode. */
+typedef struct bufAdd {
+ void *ba_0_org;
+ void *ba_1_org;
+ void *ba_0;
+ void *ba_1;
+} buffAdd_t;
+#endif
+
+/* Structure which stores all the MAC control parameters */
+
+/* This structure stores the offset of the RxD in the ring
+ * from which the Rx Interrupt processor can start picking
+ * up the RxDs for processing.
+ */
+typedef struct _rx_curr_get_info_t {
+ u32 block_index;
+ u32 offset;
+ u32 ring_len;
+} rx_curr_get_info_t;
+
+typedef rx_curr_get_info_t rx_curr_put_info_t;
+
+/* This structure stores the offset of the TxDl in the FIFO
+ * from which the Tx Interrupt processor can start picking
+ * up the TxDLs for send complete interrupt processing.
+ */
+typedef struct {
+ u32 offset;
+ u32 fifo_len;
+} tx_curr_get_info_t;
+
+typedef tx_curr_get_info_t tx_curr_put_info_t;
+
+/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
+ * is maintained in this structure.
+ */
+typedef struct mac_info {
+/* rx side stuff */
+ /* Put pointer info which indictes which RxD has to be replenished
+ * with a new buffer.
+ */
+ rx_curr_put_info_t rx_curr_put_info[MAX_RX_RINGS];
+
+ /* Get pointer info which indictes which is the last RxD that was
+ * processed by the driver.
+ */
+ rx_curr_get_info_t rx_curr_get_info[MAX_RX_RINGS];
+
+ u16 rmac_pause_time;
+ u16 mc_pause_threshold_q0q3;
+ u16 mc_pause_threshold_q4q7;
+
+/* tx side stuff */
+ /* logical pointer of start of each Tx FIFO */
+ TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS];
+
+/* Current offset within tx_FIFO_start, where driver would write new Tx frame*/
+ tx_curr_put_info_t tx_curr_put_info[MAX_TX_FIFOS];
+ tx_curr_get_info_t tx_curr_get_info[MAX_TX_FIFOS];
+
+ void *stats_mem; /* orignal pointer to allocated mem */
+ dma_addr_t stats_mem_phy; /* Physical address of the stat block */
+ u32 stats_mem_sz;
+ StatInfo_t *stats_info; /* Logical address of the stat block */
+} mac_info_t;
+
+/* structure representing the user defined MAC addresses */
+typedef struct {
+ char addr[ETH_ALEN];
+ int usage_cnt;
+} usr_addr_t;
+
+/* Structure that holds the Phy and virt addresses of the Blocks */
+typedef struct rx_block_info {
+ RxD_t *block_virt_addr;
+ dma_addr_t block_dma_addr;
+} rx_block_info_t;
+
+/* Default Tunable parameters of the NIC. */
+#define DEFAULT_FIFO_LEN 4096
+#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
+#define LARGE_RXD_CNT 100 * (MAX_RXDS_PER_BLOCK+1)
+#define SMALL_BLK_CNT 30
+#define LARGE_BLK_CNT 100
+
+/* Structure representing one instance of the NIC */
+typedef struct s2io_nic {
+#define MAX_MAC_SUPPORTED 16
+#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
+
+ macaddr_t def_mac_addr[MAX_MAC_SUPPORTED];
+ macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
+
+ struct net_device_stats stats;
+ void __iomem *bar0;
+ void __iomem *bar1;
+ struct config_param config;
+ mac_info_t mac_control;
+ int high_dma_flag;
+ int device_close_flag;
+ int device_enabled_once;
+
+ char name[32];
+ struct tasklet_struct task;
+ volatile unsigned long tasklet_status;
+ struct timer_list timer;
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ u16 vendor_id;
+ u16 device_id;
+ u16 ccmd;
+ u32 cbar0_1;
+ u32 cbar0_2;
+ u32 cbar1_1;
+ u32 cbar1_2;
+ u32 cirq;
+ u8 cache_line;
+ u32 rom_expansion;
+ u16 pcix_cmd;
+ u32 irq;
+ atomic_t rx_bufs_left[MAX_RX_RINGS];
+
+ spinlock_t tx_lock;
+#ifndef CONFIG_S2IO_NAPI
+ spinlock_t put_lock;
+#endif
+
+#define PROMISC 1
+#define ALL_MULTI 2
+
+#define MAX_ADDRS_SUPPORTED 64
+ u16 usr_addr_count;
+ u16 mc_addr_count;
+ usr_addr_t usr_addrs[MAX_ADDRS_SUPPORTED];
+
+ u16 m_cast_flg;
+ u16 all_multi_pos;
+ u16 promisc_flg;
+
+ u16 tx_pkt_count;
+ u16 rx_pkt_count;
+ u16 tx_err_count;
+ u16 rx_err_count;
+
+#ifndef CONFIG_S2IO_NAPI
+ /* Index to the absolute position of the put pointer of Rx ring. */
+ int put_pos[MAX_RX_RINGS];
+#endif
+
+ /*
+ * Place holders for the virtual and physical addresses of
+ * all the Rx Blocks
+ */
+ rx_block_info_t rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
+ int block_count[MAX_RX_RINGS];
+ int pkt_cnt[MAX_RX_RINGS];
+
+ /* Place holder of all the TX List's Phy and Virt addresses. */
+ list_info_hold_t *list_info[MAX_TX_FIFOS];
+
+ /* Id timer, used to blink NIC to physically identify NIC. */
+ struct timer_list id_timer;
+
+ /* Restart timer, used to restart NIC if the device is stuck and
+ * a schedule task that will set the correct Link state once the
+ * NIC's PHY has stabilized after a state change.
+ */
+#ifdef INIT_TQUEUE
+ struct tq_struct rst_timer_task;
+ struct tq_struct set_link_task;
+#else
+ struct work_struct rst_timer_task;
+ struct work_struct set_link_task;
+#endif
+
+ /* Flag that can be used to turn on or turn off the Rx checksum
+ * offload feature.
+ */
+ int rx_csum;
+
+ /* after blink, the adapter must be restored with original
+ * values.
+ */
+ u64 adapt_ctrl_org;
+
+ /* Last known link state. */
+ u16 last_link_state;
+#define LINK_DOWN 1
+#define LINK_UP 2
+
+#ifdef CONFIG_2BUFF_MODE
+ /* Buffer Address store. */
+ buffAdd_t **ba[MAX_RX_RINGS];
+#endif
+ int task_flag;
+#define CARD_DOWN 1
+#define CARD_UP 2
+ atomic_t card_state;
+ volatile unsigned long link_state;
+} nic_t;
+
+#define RESET_ERROR 1;
+#define CMD_ERROR 2;
+
+/* OS related system calls */
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ u64 ret = readl(addr + 4);
+ ret <<= 32;
+ ret |= readl(addr);
+
+ return ret;
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel((u32) (val), addr);
+ writel((u32) (val >> 32), (addr + 4));
+}
+
+/* In 32 bit modes, some registers have to be written in a
+ * particular order to expect correct hardware operation. The
+ * macro SPECIAL_REG_WRITE is used to perform such ordered
+ * writes. Defines UF (Upper First) and LF (Lower First) will
+ * be used to specify the required write order.
+ */
+#define UF 1
+#define LF 2
+static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
+{
+ if (order == LF) {
+ writel((u32) (val), addr);
+ writel((u32) (val >> 32), (addr + 4));
+ } else {
+ writel((u32) (val >> 32), (addr + 4));
+ writel((u32) (val), addr);
+ }
+}
+#else
+#define SPECIAL_REG_WRITE(val, addr, dummy) writeq(val, addr)
+#endif
+
+/* Interrupt related values of Xena */
+
+#define ENABLE_INTRS 1
+#define DISABLE_INTRS 2
+
+/* Highest level interrupt blocks */
+#define TX_PIC_INTR (0x0001<<0)
+#define TX_DMA_INTR (0x0001<<1)
+#define TX_MAC_INTR (0x0001<<2)
+#define TX_XGXS_INTR (0x0001<<3)
+#define TX_TRAFFIC_INTR (0x0001<<4)
+#define RX_PIC_INTR (0x0001<<5)
+#define RX_DMA_INTR (0x0001<<6)
+#define RX_MAC_INTR (0x0001<<7)
+#define RX_XGXS_INTR (0x0001<<8)
+#define RX_TRAFFIC_INTR (0x0001<<9)
+#define MC_INTR (0x0001<<10)
+#define ENA_ALL_INTRS ( TX_PIC_INTR | \
+ TX_DMA_INTR | \
+ TX_MAC_INTR | \
+ TX_XGXS_INTR | \
+ TX_TRAFFIC_INTR | \
+ RX_PIC_INTR | \
+ RX_DMA_INTR | \
+ RX_MAC_INTR | \
+ RX_XGXS_INTR | \
+ RX_TRAFFIC_INTR | \
+ MC_INTR )
+
+/* Interrupt masks for the general interrupt mask register */
+#define DISABLE_ALL_INTRS 0xFFFFFFFFFFFFFFFFULL
+
+#define TXPIC_INT_M BIT(0)
+#define TXDMA_INT_M BIT(1)
+#define TXMAC_INT_M BIT(2)
+#define TXXGXS_INT_M BIT(3)
+#define TXTRAFFIC_INT_M BIT(8)
+#define PIC_RX_INT_M BIT(32)
+#define RXDMA_INT_M BIT(33)
+#define RXMAC_INT_M BIT(34)
+#define MC_INT_M BIT(35)
+#define RXXGXS_INT_M BIT(36)
+#define RXTRAFFIC_INT_M BIT(40)
+
+/* PIC level Interrupts TODO*/
+
+/* DMA level Inressupts */
+#define TXDMA_PFC_INT_M BIT(0)
+#define TXDMA_PCC_INT_M BIT(2)
+
+/* PFC block interrupts */
+#define PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO full */
+
+/* PCC block interrupts. */
+#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
+ PCC_FB_ECC Error. */
+
+/*
+ * Prototype declaration.
+ */
+static int __devinit s2io_init_nic(struct pci_dev *pdev,
+ const struct pci_device_id *pre);
+static void __devexit s2io_rem_nic(struct pci_dev *pdev);
+static int init_shared_mem(struct s2io_nic *sp);
+static void free_shared_mem(struct s2io_nic *sp);
+static int init_nic(struct s2io_nic *nic);
+#ifndef CONFIG_S2IO_NAPI
+static void rx_intr_handler(struct s2io_nic *sp);
+#endif
+static void tx_intr_handler(struct s2io_nic *sp);
+static void alarm_intr_handler(struct s2io_nic *sp);
+
+static int s2io_starter(void);
+static void s2io_closer(void);
+static void s2io_tx_watchdog(struct net_device *dev);
+static void s2io_tasklet(unsigned long dev_addr);
+static void s2io_set_multicast(struct net_device *dev);
+#ifndef CONFIG_2BUFF_MODE
+static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no);
+#else
+static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
+ buffAdd_t * ba);
+#endif
+static void s2io_link(nic_t * sp, int link);
+static void s2io_reset(nic_t * sp);
+#ifdef CONFIG_S2IO_NAPI
+static int s2io_poll(struct net_device *dev, int *budget);
+#endif
+static void s2io_init_pci(nic_t * sp);
+static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
+static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
+static int verify_xena_quiescence(u64 val64, int flag);
+static struct ethtool_ops netdev_ethtool_ops;
+static void s2io_set_link(unsigned long data);
+static int s2io_set_swapper(nic_t * sp);
+static void s2io_card_down(nic_t * nic);
+static int s2io_card_up(nic_t * nic);
+
+#endif /* _S2IO_H */
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
new file mode 100644
index 000000000000..fd0167077fbe
--- /dev/null
+++ b/drivers/net/saa9730.c
@@ -0,0 +1,1184 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * SAA9730 ethernet driver.
+ *
+ * Changes:
+ * Angelo Dell'Aera <buffer@antifork.org> : Conversion to the new PCI API (pci_driver).
+ * Conversion to spinlocks.
+ * Error handling fixes.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <asm/addrspace.h>
+#include <asm/mips-boards/prom.h>
+
+#include "saa9730.h"
+
+#ifdef LAN_SAA9730_DEBUG
+int lan_saa9730_debug = LAN_SAA9730_DEBUG;
+#else
+int lan_saa9730_debug;
+#endif
+
+#define DRV_MODULE_NAME "saa9730"
+
+static struct pci_device_id saa9730_pci_tbl[] = {
+ { PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA9370,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, saa9730_pci_tbl);
+
+/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
+static unsigned int pci_irq_line;
+
+#define INL(a) inl((unsigned long)a)
+#define OUTL(x,a) outl(x,(unsigned long)a)
+
+static void evm_saa9730_enable_lan_int(struct lan_saa9730_private *lp)
+{
+ OUTL(INL(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT,
+ &lp->evm_saa9730_regs->InterruptBlock1);
+ OUTL(INL(&lp->evm_saa9730_regs->InterruptStatus1) | EVM_LAN_INT,
+ &lp->evm_saa9730_regs->InterruptStatus1);
+ OUTL(INL(&lp->evm_saa9730_regs->InterruptEnable1) | EVM_LAN_INT |
+ EVM_MASTER_EN, &lp->evm_saa9730_regs->InterruptEnable1);
+}
+static void evm_saa9730_disable_lan_int(struct lan_saa9730_private *lp)
+{
+ OUTL(INL(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT,
+ &lp->evm_saa9730_regs->InterruptBlock1);
+ OUTL(INL(&lp->evm_saa9730_regs->InterruptEnable1) & ~EVM_LAN_INT,
+ &lp->evm_saa9730_regs->InterruptEnable1);
+}
+
+static void evm_saa9730_clear_lan_int(struct lan_saa9730_private *lp)
+{
+ OUTL(EVM_LAN_INT, &lp->evm_saa9730_regs->InterruptStatus1);
+}
+
+static void evm_saa9730_block_lan_int(struct lan_saa9730_private *lp)
+{
+ OUTL(INL(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT,
+ &lp->evm_saa9730_regs->InterruptBlock1);
+}
+
+static void evm_saa9730_unblock_lan_int(struct lan_saa9730_private *lp)
+{
+ OUTL(INL(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT,
+ &lp->evm_saa9730_regs->InterruptBlock1);
+}
+
+static void show_saa9730_regs(struct lan_saa9730_private *lp)
+{
+ int i, j;
+ printk("TxmBufferA = %x\n", lp->TxmBuffer[0][0]);
+ printk("TxmBufferB = %x\n", lp->TxmBuffer[1][0]);
+ printk("RcvBufferA = %x\n", lp->RcvBuffer[0][0]);
+ printk("RcvBufferB = %x\n", lp->RcvBuffer[1][0]);
+ for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
+ for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) {
+ printk("TxmBuffer[%d][%d] = %x\n", i, j,
+ le32_to_cpu(*(unsigned int *)
+ lp->TxmBuffer[i][j]));
+ }
+ }
+ for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
+ for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) {
+ printk("RcvBuffer[%d][%d] = %x\n", i, j,
+ le32_to_cpu(*(unsigned int *)
+ lp->RcvBuffer[i][j]));
+ }
+ }
+ printk("lp->evm_saa9730_regs->InterruptBlock1 = %x\n",
+ INL(&lp->evm_saa9730_regs->InterruptBlock1));
+ printk("lp->evm_saa9730_regs->InterruptStatus1 = %x\n",
+ INL(&lp->evm_saa9730_regs->InterruptStatus1));
+ printk("lp->evm_saa9730_regs->InterruptEnable1 = %x\n",
+ INL(&lp->evm_saa9730_regs->InterruptEnable1));
+ printk("lp->lan_saa9730_regs->Ok2Use = %x\n",
+ INL(&lp->lan_saa9730_regs->Ok2Use));
+ printk("lp->NextTxmBufferIndex = %x\n", lp->NextTxmBufferIndex);
+ printk("lp->NextTxmPacketIndex = %x\n", lp->NextTxmPacketIndex);
+ printk("lp->PendingTxmBufferIndex = %x\n",
+ lp->PendingTxmBufferIndex);
+ printk("lp->PendingTxmPacketIndex = %x\n",
+ lp->PendingTxmPacketIndex);
+ printk("lp->lan_saa9730_regs->LanDmaCtl = %x\n",
+ INL(&lp->lan_saa9730_regs->LanDmaCtl));
+ printk("lp->lan_saa9730_regs->DmaStatus = %x\n",
+ INL(&lp->lan_saa9730_regs->DmaStatus));
+ printk("lp->lan_saa9730_regs->CamCtl = %x\n",
+ INL(&lp->lan_saa9730_regs->CamCtl));
+ printk("lp->lan_saa9730_regs->TxCtl = %x\n",
+ INL(&lp->lan_saa9730_regs->TxCtl));
+ printk("lp->lan_saa9730_regs->TxStatus = %x\n",
+ INL(&lp->lan_saa9730_regs->TxStatus));
+ printk("lp->lan_saa9730_regs->RxCtl = %x\n",
+ INL(&lp->lan_saa9730_regs->RxCtl));
+ printk("lp->lan_saa9730_regs->RxStatus = %x\n",
+ INL(&lp->lan_saa9730_regs->RxStatus));
+ for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) {
+ OUTL(i, &lp->lan_saa9730_regs->CamAddress);
+ printk("lp->lan_saa9730_regs->CamData = %x\n",
+ INL(&lp->lan_saa9730_regs->CamData));
+ }
+ printk("lp->stats.tx_packets = %lx\n", lp->stats.tx_packets);
+ printk("lp->stats.tx_errors = %lx\n", lp->stats.tx_errors);
+ printk("lp->stats.tx_aborted_errors = %lx\n",
+ lp->stats.tx_aborted_errors);
+ printk("lp->stats.tx_window_errors = %lx\n",
+ lp->stats.tx_window_errors);
+ printk("lp->stats.tx_carrier_errors = %lx\n",
+ lp->stats.tx_carrier_errors);
+ printk("lp->stats.tx_fifo_errors = %lx\n",
+ lp->stats.tx_fifo_errors);
+ printk("lp->stats.tx_heartbeat_errors = %lx\n",
+ lp->stats.tx_heartbeat_errors);
+ printk("lp->stats.collisions = %lx\n", lp->stats.collisions);
+
+ printk("lp->stats.rx_packets = %lx\n", lp->stats.rx_packets);
+ printk("lp->stats.rx_errors = %lx\n", lp->stats.rx_errors);
+ printk("lp->stats.rx_dropped = %lx\n", lp->stats.rx_dropped);
+ printk("lp->stats.rx_crc_errors = %lx\n", lp->stats.rx_crc_errors);
+ printk("lp->stats.rx_frame_errors = %lx\n",
+ lp->stats.rx_frame_errors);
+ printk("lp->stats.rx_fifo_errors = %lx\n",
+ lp->stats.rx_fifo_errors);
+ printk("lp->stats.rx_length_errors = %lx\n",
+ lp->stats.rx_length_errors);
+
+ printk("lp->lan_saa9730_regs->DebugPCIMasterAddr = %x\n",
+ INL(&lp->lan_saa9730_regs->DebugPCIMasterAddr));
+ printk("lp->lan_saa9730_regs->DebugLanTxStateMachine = %x\n",
+ INL(&lp->lan_saa9730_regs->DebugLanTxStateMachine));
+ printk("lp->lan_saa9730_regs->DebugLanRxStateMachine = %x\n",
+ INL(&lp->lan_saa9730_regs->DebugLanRxStateMachine));
+ printk("lp->lan_saa9730_regs->DebugLanTxFifoPointers = %x\n",
+ INL(&lp->lan_saa9730_regs->DebugLanTxFifoPointers));
+ printk("lp->lan_saa9730_regs->DebugLanRxFifoPointers = %x\n",
+ INL(&lp->lan_saa9730_regs->DebugLanRxFifoPointers));
+ printk("lp->lan_saa9730_regs->DebugLanCtlStateMachine = %x\n",
+ INL(&lp->lan_saa9730_regs->DebugLanCtlStateMachine));
+}
+
+static void lan_saa9730_buffer_init(struct lan_saa9730_private *lp)
+{
+ int i, j;
+
+ /* Init RX buffers */
+ for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
+ for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) {
+ *(unsigned int *) lp->RcvBuffer[i][j] =
+ cpu_to_le32(RXSF_READY <<
+ RX_STAT_CTL_OWNER_SHF);
+ }
+ }
+
+ /* Init TX buffers */
+ for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
+ for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) {
+ *(unsigned int *) lp->TxmBuffer[i][j] =
+ cpu_to_le32(TXSF_EMPTY <<
+ TX_STAT_CTL_OWNER_SHF);
+ }
+ }
+}
+
+static int lan_saa9730_allocate_buffers(struct lan_saa9730_private *lp)
+{
+ unsigned int mem_size;
+ void *Pa;
+ unsigned int i, j, RcvBufferSize, TxmBufferSize;
+ unsigned int buffer_start;
+
+ /*
+ * Allocate all RX and TX packets in one chunk.
+ * The Rx and Tx packets must be PACKET_SIZE aligned.
+ */
+ mem_size = ((LAN_SAA9730_RCV_Q_SIZE + LAN_SAA9730_TXM_Q_SIZE) *
+ LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_BUFFERS) +
+ LAN_SAA9730_PACKET_SIZE;
+ buffer_start =
+ (unsigned int) kmalloc(mem_size, GFP_DMA | GFP_KERNEL);
+
+ if (!buffer_start)
+ return -ENOMEM;
+
+ /*
+ * Set DMA buffer to kseg1 (uncached).
+ * Make sure to flush before using it uncached.
+ */
+ Pa = (void *) KSEG1ADDR((buffer_start + LAN_SAA9730_PACKET_SIZE) &
+ ~(LAN_SAA9730_PACKET_SIZE - 1));
+ dma_cache_wback_inv((unsigned long) Pa, mem_size);
+
+ /* Initialize buffer space */
+ RcvBufferSize = LAN_SAA9730_PACKET_SIZE;
+ TxmBufferSize = LAN_SAA9730_PACKET_SIZE;
+ lp->DmaRcvPackets = LAN_SAA9730_RCV_Q_SIZE;
+ lp->DmaTxmPackets = LAN_SAA9730_TXM_Q_SIZE;
+
+ /* Init RX buffers */
+ for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
+ for (j = 0; j < LAN_SAA9730_RCV_Q_SIZE; j++) {
+ *(unsigned int *) Pa =
+ cpu_to_le32(RXSF_READY <<
+ RX_STAT_CTL_OWNER_SHF);
+ lp->RcvBuffer[i][j] = (unsigned int) Pa;
+ Pa += RcvBufferSize;
+ }
+ }
+
+ /* Init TX buffers */
+ for (i = 0; i < LAN_SAA9730_BUFFERS; i++) {
+ for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) {
+ *(unsigned int *) Pa =
+ cpu_to_le32(TXSF_EMPTY <<
+ TX_STAT_CTL_OWNER_SHF);
+ lp->TxmBuffer[i][j] = (unsigned int) Pa;
+ Pa += TxmBufferSize;
+ }
+ }
+
+ /*
+ * Set rx buffer A and rx buffer B to point to the first two buffer
+ * spaces.
+ */
+ OUTL(PHYSADDR(lp->RcvBuffer[0][0]),
+ &lp->lan_saa9730_regs->RxBuffA);
+ OUTL(PHYSADDR(lp->RcvBuffer[1][0]),
+ &lp->lan_saa9730_regs->RxBuffB);
+
+ /* Initialize Buffer Index */
+ lp->NextRcvPacketIndex = 0;
+ lp->NextRcvToUseIsA = 1;
+
+ /* Set current buffer index & next availble packet index */
+ lp->NextTxmPacketIndex = 0;
+ lp->NextTxmBufferIndex = 0;
+ lp->PendingTxmPacketIndex = 0;
+ lp->PendingTxmBufferIndex = 0;
+
+ /*
+ * Set txm_buf_a and txm_buf_b to point to the first two buffer
+ * space
+ */
+ OUTL(PHYSADDR(lp->TxmBuffer[0][0]),
+ &lp->lan_saa9730_regs->TxBuffA);
+ OUTL(PHYSADDR(lp->TxmBuffer[1][0]),
+ &lp->lan_saa9730_regs->TxBuffB);
+
+ /* Set packet number */
+ OUTL((lp->DmaRcvPackets << PK_COUNT_RX_A_SHF) |
+ (lp->DmaRcvPackets << PK_COUNT_RX_B_SHF) |
+ (lp->DmaTxmPackets << PK_COUNT_TX_A_SHF) |
+ (lp->DmaTxmPackets << PK_COUNT_TX_B_SHF),
+ &lp->lan_saa9730_regs->PacketCount);
+
+ return 0;
+}
+
+static int lan_saa9730_cam_load(struct lan_saa9730_private *lp)
+{
+ unsigned int i;
+ unsigned char *NetworkAddress;
+
+ NetworkAddress = (unsigned char *) &lp->PhysicalAddress[0][0];
+
+ for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) {
+ /* First set address to where data is written */
+ OUTL(i, &lp->lan_saa9730_regs->CamAddress);
+ OUTL((NetworkAddress[0] << 24) | (NetworkAddress[1] << 16)
+ | (NetworkAddress[2] << 8) | NetworkAddress[3],
+ &lp->lan_saa9730_regs->CamData);
+ NetworkAddress += 4;
+ }
+ return 0;
+}
+
+static int lan_saa9730_cam_init(struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+ unsigned int i;
+
+ /* Copy MAC-address into all entries. */
+ for (i = 0; i < LAN_SAA9730_CAM_ENTRIES; i++) {
+ memcpy((unsigned char *) lp->PhysicalAddress[i],
+ (unsigned char *) dev->dev_addr, 6);
+ }
+
+ return 0;
+}
+
+static int lan_saa9730_mii_init(struct lan_saa9730_private *lp)
+{
+ int i, l;
+
+ /* Check link status, spin here till station is not busy. */
+ i = 0;
+ while (INL(&lp->lan_saa9730_regs->StationMgmtCtl) & MD_CA_BUSY) {
+ i++;
+ if (i > 100) {
+ printk("Error: lan_saa9730_mii_init: timeout\n");
+ return -1;
+ }
+ mdelay(1); /* wait 1 ms. */
+ }
+
+ /* Now set the control and address register. */
+ OUTL(MD_CA_BUSY | PHY_STATUS | PHY_ADDRESS << MD_CA_PHY_SHF,
+ &lp->lan_saa9730_regs->StationMgmtCtl);
+
+ /* check link status, spin here till station is not busy */
+ i = 0;
+ while (INL(&lp->lan_saa9730_regs->StationMgmtCtl) & MD_CA_BUSY) {
+ i++;
+ if (i > 100) {
+ printk("Error: lan_saa9730_mii_init: timeout\n");
+ return -1;
+ }
+ mdelay(1); /* wait 1 ms. */
+ }
+
+ /* Wait for 1 ms. */
+ mdelay(1);
+
+ /* Check the link status. */
+ if (INL(&lp->lan_saa9730_regs->StationMgmtData) &
+ PHY_STATUS_LINK_UP) {
+ /* Link is up. */
+ return 0;
+ } else {
+ /* Link is down, reset the PHY first. */
+
+ /* set PHY address = 'CONTROL' */
+ OUTL(PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | PHY_CONTROL,
+ &lp->lan_saa9730_regs->StationMgmtCtl);
+
+ /* Wait for 1 ms. */
+ mdelay(1);
+
+ /* set 'CONTROL' = force reset and renegotiate */
+ OUTL(PHY_CONTROL_RESET | PHY_CONTROL_AUTO_NEG |
+ PHY_CONTROL_RESTART_AUTO_NEG,
+ &lp->lan_saa9730_regs->StationMgmtData);
+
+ /* Wait for 50 ms. */
+ mdelay(50);
+
+ /* set 'BUSY' to start operation */
+ OUTL(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR |
+ PHY_CONTROL, &lp->lan_saa9730_regs->StationMgmtCtl);
+
+ /* await completion */
+ i = 0;
+ while (INL(&lp->lan_saa9730_regs->StationMgmtCtl) &
+ MD_CA_BUSY) {
+ i++;
+ if (i > 100) {
+ printk
+ ("Error: lan_saa9730_mii_init: timeout\n");
+ return -1;
+ }
+ mdelay(1); /* wait 1 ms. */
+ }
+
+ /* Wait for 1 ms. */
+ mdelay(1);
+
+ for (l = 0; l < 2; l++) {
+ /* set PHY address = 'STATUS' */
+ OUTL(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF |
+ PHY_STATUS,
+ &lp->lan_saa9730_regs->StationMgmtCtl);
+
+ /* await completion */
+ i = 0;
+ while (INL(&lp->lan_saa9730_regs->StationMgmtCtl) &
+ MD_CA_BUSY) {
+ i++;
+ if (i > 100) {
+ printk
+ ("Error: lan_saa9730_mii_init: timeout\n");
+ return -1;
+ }
+ mdelay(1); /* wait 1 ms. */
+ }
+
+ /* wait for 3 sec. */
+ mdelay(3000);
+
+ /* check the link status */
+ if (INL(&lp->lan_saa9730_regs->StationMgmtData) &
+ PHY_STATUS_LINK_UP) {
+ /* link is up */
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int lan_saa9730_control_init(struct lan_saa9730_private *lp)
+{
+ /* Initialize DMA control register. */
+ OUTL((LANMB_ANY << DMA_CTL_MAX_XFER_SHF) |
+ (LANEND_LITTLE << DMA_CTL_ENDIAN_SHF) |
+ (LAN_SAA9730_RCV_Q_INT_THRESHOLD << DMA_CTL_RX_INT_COUNT_SHF)
+ | DMA_CTL_RX_INT_TO_EN | DMA_CTL_RX_INT_EN |
+ DMA_CTL_MAC_RX_INT_EN | DMA_CTL_MAC_TX_INT_EN,
+ &lp->lan_saa9730_regs->LanDmaCtl);
+
+ /* Initial MAC control register. */
+ OUTL((MACCM_MII << MAC_CONTROL_CONN_SHF) | MAC_CONTROL_FULL_DUP,
+ &lp->lan_saa9730_regs->MacCtl);
+
+ /* Initialize CAM control register. */
+ OUTL(CAM_CONTROL_COMP_EN | CAM_CONTROL_BROAD_ACC,
+ &lp->lan_saa9730_regs->CamCtl);
+
+ /*
+ * Initialize CAM enable register, only turn on first entry, should
+ * contain own addr.
+ */
+ OUTL(0x0001, &lp->lan_saa9730_regs->CamEnable);
+
+ /* Initialize Tx control register */
+ OUTL(TX_CTL_EN_COMP, &lp->lan_saa9730_regs->TxCtl);
+
+ /* Initialize Rcv control register */
+ OUTL(RX_CTL_STRIP_CRC, &lp->lan_saa9730_regs->RxCtl);
+
+ /* Reset DMA engine */
+ OUTL(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest);
+
+ return 0;
+}
+
+static int lan_saa9730_stop(struct lan_saa9730_private *lp)
+{
+ int i;
+
+ /* Stop DMA first */
+ OUTL(INL(&lp->lan_saa9730_regs->LanDmaCtl) &
+ ~(DMA_CTL_EN_TX_DMA | DMA_CTL_EN_RX_DMA),
+ &lp->lan_saa9730_regs->LanDmaCtl);
+
+ /* Set the SW Reset bits in DMA and MAC control registers */
+ OUTL(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest);
+ OUTL(INL(&lp->lan_saa9730_regs->MacCtl) | MAC_CONTROL_RESET,
+ &lp->lan_saa9730_regs->MacCtl);
+
+ /*
+ * Wait for MAC reset to have finished. The reset bit is auto cleared
+ * when the reset is done.
+ */
+ i = 0;
+ while (INL(&lp->lan_saa9730_regs->MacCtl) & MAC_CONTROL_RESET) {
+ i++;
+ if (i > 100) {
+ printk
+ ("Error: lan_sa9730_stop: MAC reset timeout\n");
+ return -1;
+ }
+ mdelay(1); /* wait 1 ms. */
+ }
+
+ return 0;
+}
+
+static int lan_saa9730_dma_init(struct lan_saa9730_private *lp)
+{
+ /* Stop lan controller. */
+ lan_saa9730_stop(lp);
+
+ OUTL(LAN_SAA9730_DEFAULT_TIME_OUT_CNT,
+ &lp->lan_saa9730_regs->Timeout);
+
+ return 0;
+}
+
+static int lan_saa9730_start(struct lan_saa9730_private *lp)
+{
+ lan_saa9730_buffer_init(lp);
+
+ /* Initialize Rx Buffer Index */
+ lp->NextRcvPacketIndex = 0;
+ lp->NextRcvToUseIsA = 1;
+
+ /* Set current buffer index & next availble packet index */
+ lp->NextTxmPacketIndex = 0;
+ lp->NextTxmBufferIndex = 0;
+ lp->PendingTxmPacketIndex = 0;
+ lp->PendingTxmBufferIndex = 0;
+
+ OUTL(INL(&lp->lan_saa9730_regs->LanDmaCtl) | DMA_CTL_EN_TX_DMA |
+ DMA_CTL_EN_RX_DMA, &lp->lan_saa9730_regs->LanDmaCtl);
+
+ /* For Tx, turn on MAC then DMA */
+ OUTL(INL(&lp->lan_saa9730_regs->TxCtl) | TX_CTL_TX_EN,
+ &lp->lan_saa9730_regs->TxCtl);
+
+ /* For Rx, turn on DMA then MAC */
+ OUTL(INL(&lp->lan_saa9730_regs->RxCtl) | RX_CTL_RX_EN,
+ &lp->lan_saa9730_regs->RxCtl);
+
+ /* Set Ok2Use to let hardware owns the buffers */
+ OUTL(OK2USE_RX_A | OK2USE_RX_B | OK2USE_TX_A | OK2USE_TX_B,
+ &lp->lan_saa9730_regs->Ok2Use);
+
+ return 0;
+}
+
+static int lan_saa9730_restart(struct lan_saa9730_private *lp)
+{
+ lan_saa9730_stop(lp);
+ lan_saa9730_start(lp);
+
+ return 0;
+}
+
+static int lan_saa9730_tx(struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+ unsigned int *pPacket;
+ unsigned int tx_status;
+
+ if (lan_saa9730_debug > 5)
+ printk("lan_saa9730_tx interrupt\n");
+
+ /* Clear interrupt. */
+ OUTL(DMA_STATUS_MAC_TX_INT, &lp->lan_saa9730_regs->DmaStatus);
+
+ while (1) {
+ pPacket =
+ (unsigned int *) lp->TxmBuffer[lp->
+ PendingTxmBufferIndex]
+ [lp->PendingTxmPacketIndex];
+
+ /* Get status of first packet transmitted. */
+ tx_status = le32_to_cpu(*pPacket);
+
+ /* Check ownership. */
+ if ((tx_status & TX_STAT_CTL_OWNER_MSK) !=
+ (TXSF_HWDONE << TX_STAT_CTL_OWNER_SHF)) break;
+
+ /* Check for error. */
+ if (tx_status & TX_STAT_CTL_ERROR_MSK) {
+ if (lan_saa9730_debug > 1)
+ printk("lan_saa9730_tx: tx error = %x\n",
+ tx_status);
+
+ lp->stats.tx_errors++;
+ if (tx_status &
+ (TX_STATUS_EX_COLL << TX_STAT_CTL_STATUS_SHF))
+ lp->stats.tx_aborted_errors++;
+ if (tx_status &
+ (TX_STATUS_LATE_COLL <<
+ TX_STAT_CTL_STATUS_SHF)) lp->stats.
+ tx_window_errors++;
+ if (tx_status &
+ (TX_STATUS_L_CARR << TX_STAT_CTL_STATUS_SHF))
+ lp->stats.tx_carrier_errors++;
+ if (tx_status &
+ (TX_STATUS_UNDER << TX_STAT_CTL_STATUS_SHF))
+ lp->stats.tx_fifo_errors++;
+ if (tx_status &
+ (TX_STATUS_SQ_ERR << TX_STAT_CTL_STATUS_SHF))
+ lp->stats.tx_heartbeat_errors++;
+
+ lp->stats.collisions +=
+ tx_status & TX_STATUS_TX_COLL_MSK;
+ }
+
+ /* Free buffer. */
+ *pPacket =
+ cpu_to_le32(TXSF_EMPTY << TX_STAT_CTL_OWNER_SHF);
+
+ /* Update pending index pointer. */
+ lp->PendingTxmPacketIndex++;
+ if (lp->PendingTxmPacketIndex >= LAN_SAA9730_TXM_Q_SIZE) {
+ lp->PendingTxmPacketIndex = 0;
+ lp->PendingTxmBufferIndex ^= 1;
+ }
+ }
+
+ /* Make sure A and B are available to hardware. */
+ OUTL(OK2USE_TX_A | OK2USE_TX_B, &lp->lan_saa9730_regs->Ok2Use);
+
+ if (netif_queue_stopped(dev)) {
+ /* The tx buffer is no longer full. */
+ netif_wake_queue(dev);
+ }
+
+ return 0;
+}
+
+static int lan_saa9730_rx(struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+ int len = 0;
+ struct sk_buff *skb = 0;
+ unsigned int rx_status;
+ int BufferIndex;
+ int PacketIndex;
+ unsigned int *pPacket;
+ unsigned char *pData;
+
+ if (lan_saa9730_debug > 5)
+ printk("lan_saa9730_rx interrupt\n");
+
+ /* Clear receive interrupts. */
+ OUTL(DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT |
+ DMA_STATUS_RX_TO_INT, &lp->lan_saa9730_regs->DmaStatus);
+
+ /* Address next packet */
+ if (lp->NextRcvToUseIsA)
+ BufferIndex = 0;
+ else
+ BufferIndex = 1;
+ PacketIndex = lp->NextRcvPacketIndex;
+ pPacket = (unsigned int *) lp->RcvBuffer[BufferIndex][PacketIndex];
+ rx_status = le32_to_cpu(*pPacket);
+
+ /* Process each packet. */
+ while ((rx_status & RX_STAT_CTL_OWNER_MSK) ==
+ (RXSF_HWDONE << RX_STAT_CTL_OWNER_SHF)) {
+ /* Check the rx status. */
+ if (rx_status & (RX_STATUS_GOOD << RX_STAT_CTL_STATUS_SHF)) {
+ /* Received packet is good. */
+ len = (rx_status & RX_STAT_CTL_LENGTH_MSK) >>
+ RX_STAT_CTL_LENGTH_SHF;
+
+ pData = (unsigned char *) pPacket;
+ pData += 4;
+ skb = dev_alloc_skb(len + 2);
+ if (skb == 0) {
+ printk
+ ("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ } else {
+ lp->stats.rx_bytes += len;
+ lp->stats.rx_packets++;
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *) pData,
+ len, 0);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ } else {
+ /* We got an error packet. */
+ if (lan_saa9730_debug > 2)
+ printk
+ ("lan_saa9730_rx: We got an error packet = %x\n",
+ rx_status);
+
+ lp->stats.rx_errors++;
+ if (rx_status &
+ (RX_STATUS_CRC_ERR << RX_STAT_CTL_STATUS_SHF))
+ lp->stats.rx_crc_errors++;
+ if (rx_status &
+ (RX_STATUS_ALIGN_ERR <<
+ RX_STAT_CTL_STATUS_SHF)) lp->stats.
+ rx_frame_errors++;
+ if (rx_status &
+ (RX_STATUS_OVERFLOW << RX_STAT_CTL_STATUS_SHF))
+ lp->stats.rx_fifo_errors++;
+ if (rx_status &
+ (RX_STATUS_LONG_ERR << RX_STAT_CTL_STATUS_SHF))
+ lp->stats.rx_length_errors++;
+ }
+
+ /* Indicate we have processed the buffer. */
+ *pPacket =
+ cpu_to_le32(RXSF_READY << RX_STAT_CTL_OWNER_SHF);
+
+ /* Go to next packet in sequence. */
+ lp->NextRcvPacketIndex++;
+ if (lp->NextRcvPacketIndex >= LAN_SAA9730_RCV_Q_SIZE) {
+ lp->NextRcvPacketIndex = 0;
+ if (BufferIndex) {
+ lp->NextRcvToUseIsA = 1;
+ } else {
+ lp->NextRcvToUseIsA = 0;
+ }
+ }
+ OUTL(OK2USE_RX_A | OK2USE_RX_B,
+ &lp->lan_saa9730_regs->Ok2Use);
+
+ /* Address next packet */
+ if (lp->NextRcvToUseIsA)
+ BufferIndex = 0;
+ else
+ BufferIndex = 1;
+ PacketIndex = lp->NextRcvPacketIndex;
+ pPacket =
+ (unsigned int *) lp->
+ RcvBuffer[BufferIndex][PacketIndex];
+ rx_status = le32_to_cpu(*pPacket);
+ }
+
+ /* Make sure A and B are available to hardware. */
+ OUTL(OK2USE_RX_A | OK2USE_RX_B, &lp->lan_saa9730_regs->Ok2Use);
+
+ return 0;
+}
+
+static irqreturn_t lan_saa9730_interrupt(const int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+
+ if (lan_saa9730_debug > 5)
+ printk("lan_saa9730_interrupt\n");
+
+ /* Disable the EVM LAN interrupt. */
+ evm_saa9730_block_lan_int(lp);
+
+ /* Clear the EVM LAN interrupt. */
+ evm_saa9730_clear_lan_int(lp);
+
+ /* Service pending transmit interrupts. */
+ if (INL(&lp->lan_saa9730_regs->DmaStatus) & DMA_STATUS_MAC_TX_INT)
+ lan_saa9730_tx(dev);
+
+ /* Service pending receive interrupts. */
+ if (INL(&lp->lan_saa9730_regs->DmaStatus) &
+ (DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT |
+ DMA_STATUS_RX_TO_INT)) lan_saa9730_rx(dev);
+
+ /* Enable the EVM LAN interrupt. */
+ evm_saa9730_unblock_lan_int(lp);
+
+ return IRQ_HANDLED;
+}
+
+static int lan_saa9730_open_fail(struct net_device *dev)
+{
+ return -ENODEV;
+}
+
+static int lan_saa9730_open(struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+
+ /* Associate IRQ with lan_saa9730_interrupt */
+ if (request_irq(dev->irq, &lan_saa9730_interrupt, 0, "SAA9730 Eth",
+ dev)) {
+ printk("lan_saa9730_open: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+ }
+
+ /* Enable the Lan interrupt in the event manager. */
+ evm_saa9730_enable_lan_int(lp);
+
+ /* Start the LAN controller */
+ if (lan_saa9730_start(lp))
+ return -1;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int lan_saa9730_write(struct lan_saa9730_private *lp,
+ struct sk_buff *skb, int skblen)
+{
+ unsigned char *pbData = skb->data;
+ unsigned int len = skblen;
+ unsigned char *pbPacketData;
+ unsigned int tx_status;
+ int BufferIndex;
+ int PacketIndex;
+
+ if (lan_saa9730_debug > 5)
+ printk("lan_saa9730_write: skb=%08x\n",
+ (unsigned int) skb);
+
+ BufferIndex = lp->NextTxmBufferIndex;
+ PacketIndex = lp->NextTxmPacketIndex;
+
+ tx_status =
+ le32_to_cpu(*(unsigned int *) lp->
+ TxmBuffer[BufferIndex][PacketIndex]);
+ if ((tx_status & TX_STAT_CTL_OWNER_MSK) !=
+ (TXSF_EMPTY << TX_STAT_CTL_OWNER_SHF)) {
+ if (lan_saa9730_debug > 4)
+ printk
+ ("lan_saa9730_write: Tx buffer not available: tx_status = %x\n",
+ tx_status);
+ return -1;
+ }
+
+ lp->NextTxmPacketIndex++;
+ if (lp->NextTxmPacketIndex >= LAN_SAA9730_TXM_Q_SIZE) {
+ lp->NextTxmPacketIndex = 0;
+ lp->NextTxmBufferIndex ^= 1;
+ }
+
+ pbPacketData =
+ (unsigned char *) lp->TxmBuffer[BufferIndex][PacketIndex];
+ pbPacketData += 4;
+
+ /* copy the bits */
+ memcpy(pbPacketData, pbData, len);
+
+ /* Set transmit status for hardware */
+ *(unsigned int *) lp->TxmBuffer[BufferIndex][PacketIndex] =
+ cpu_to_le32((TXSF_READY << TX_STAT_CTL_OWNER_SHF) |
+ (TX_STAT_CTL_INT_AFTER_TX << TX_STAT_CTL_FRAME_SHF)
+ | (len << TX_STAT_CTL_LENGTH_SHF));
+
+ /* Set hardware tx buffer. */
+ OUTL(OK2USE_TX_A | OK2USE_TX_B, &lp->lan_saa9730_regs->Ok2Use);
+
+ return 0;
+}
+
+static void lan_saa9730_tx_timeout(struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+
+ /* Transmitter timeout, serious problems */
+ lp->stats.tx_errors++;
+ printk("%s: transmit timed out, reset\n", dev->name);
+ /*show_saa9730_regs(lp); */
+ lan_saa9730_restart(lp);
+
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+}
+
+static int lan_saa9730_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+ unsigned long flags;
+ int skblen;
+ int len;
+
+ if (lan_saa9730_debug > 4)
+ printk("Send packet: skb=%08x\n", (unsigned int) skb);
+
+ skblen = skb->len;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+
+ if (lan_saa9730_write(lp, skb, skblen)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ printk("Error when writing packet to controller: skb=%08x\n",
+ (unsigned int) skb);
+ netif_stop_queue(dev);
+ return -1;
+ }
+
+ lp->stats.tx_bytes += len;
+ lp->stats.tx_packets++;
+
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+ dev_kfree_skb(skb);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
+}
+
+static int lan_saa9730_close(struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+
+ if (lan_saa9730_debug > 1)
+ printk("lan_saa9730_close:\n");
+
+ netif_stop_queue(dev);
+
+ /* Disable the Lan interrupt in the event manager. */
+ evm_saa9730_disable_lan_int(lp);
+
+ /* Stop the controller */
+ if (lan_saa9730_stop(lp))
+ return -1;
+
+ free_irq(dev->irq, (void *) dev);
+
+ return 0;
+}
+
+static struct net_device_stats *lan_saa9730_get_stats(struct net_device
+ *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+
+ return &lp->stats;
+}
+
+static void lan_saa9730_set_multicast(struct net_device *dev)
+{
+ struct lan_saa9730_private *lp =
+ (struct lan_saa9730_private *) dev->priv;
+
+ /* Stop the controller */
+ lan_saa9730_stop(lp);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* accept all packets */
+ OUTL(CAM_CONTROL_COMP_EN | CAM_CONTROL_STATION_ACC |
+ CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC,
+ &lp->lan_saa9730_regs->CamCtl);
+ } else {
+ if (dev->flags & IFF_ALLMULTI) {
+ /* accept all multicast packets */
+ OUTL(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
+ CAM_CONTROL_BROAD_ACC,
+ &lp->lan_saa9730_regs->CamCtl);
+ } else {
+ /*
+ * Will handle the multicast stuff later. -carstenl
+ */
+ }
+ }
+
+ lan_saa9730_restart(lp);
+}
+
+
+static void __devexit saa9730_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ unregister_netdev(dev);
+
+ if (dev->priv)
+ kfree(dev->priv);
+
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+
+static int lan_saa9730_init(struct net_device *dev, int ioaddr, int irq)
+{
+ struct lan_saa9730_private *lp;
+ unsigned char ethernet_addr[6];
+ int ret = 0;
+
+ dev->open = lan_saa9730_open_fail;
+
+ if (get_ethernet_addr(ethernet_addr))
+ return -ENODEV;
+
+ memcpy(dev->dev_addr, ethernet_addr, 6);
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /*
+ * Make certain the data structures used by the controller are aligned
+ * and DMAble.
+ */
+ /*
+ * XXX: that is obviously broken - kfree() won't be happy with us.
+ */
+ lp = (struct lan_saa9730_private *) (((unsigned long)
+ kmalloc(sizeof(*lp) + 7,
+ GFP_DMA | GFP_KERNEL)
+ + 7) & ~7);
+
+ if (!lp)
+ return -ENOMEM;
+
+ dev->priv = lp;
+ memset(lp, 0, sizeof(*lp));
+
+ /* Set SAA9730 LAN base address. */
+ lp->lan_saa9730_regs = (t_lan_saa9730_regmap *) (ioaddr +
+ SAA9730_LAN_REGS_ADDR);
+
+ /* Set SAA9730 EVM base address. */
+ lp->evm_saa9730_regs = (t_evm_saa9730_regmap *) (ioaddr +
+ SAA9730_EVM_REGS_ADDR);
+
+ /* Allocate LAN RX/TX frame buffer space. */
+ /* FIXME: a leak */
+ if ((ret = lan_saa9730_allocate_buffers(lp)))
+ goto out;
+
+ /* Stop LAN controller. */
+ if ((ret = lan_saa9730_stop(lp)))
+ goto out;
+
+ /* Initialize CAM registers. */
+ if ((ret = lan_saa9730_cam_init(dev)))
+ goto out;
+
+ /* Initialize MII registers. */
+ if ((ret = lan_saa9730_mii_init(lp)))
+ goto out;
+
+ /* Initialize control registers. */
+ if ((ret = lan_saa9730_control_init(lp)))
+ goto out;
+
+ /* Load CAM registers. */
+ if ((ret = lan_saa9730_cam_load(lp)))
+ goto out;
+
+ /* Initialize DMA context registers. */
+ if ((ret = lan_saa9730_dma_init(lp)))
+ goto out;
+
+ spin_lock_init(&lp->lock);
+
+ dev->open = lan_saa9730_open;
+ dev->hard_start_xmit = lan_saa9730_start_xmit;
+ dev->stop = lan_saa9730_close;
+ dev->get_stats = lan_saa9730_get_stats;
+ dev->set_multicast_list = lan_saa9730_set_multicast;
+ dev->tx_timeout = lan_saa9730_tx_timeout;
+ dev->watchdog_timeo = (HZ >> 1);
+ dev->dma = 0;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto out;
+ return 0;
+
+ out:
+ if (dev->priv)
+ kfree(dev->priv);
+ return ret;
+}
+
+
+static int __devinit saa9730_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ unsigned int pci_ioaddr;
+ int err;
+
+ if (lan_saa9730_debug > 1)
+ printk("saa9730.c: PCI bios is present, checking for devices...\n");
+
+ err = -ENOMEM;
+ dev = alloc_etherdev(0);
+ if (!dev)
+ goto out;
+
+ SET_MODULE_OWNER(dev);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "Cannot enable PCI device, aborting.\n");
+ goto out1;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
+ goto out2;
+ }
+
+ pci_irq_line = pdev->irq;
+ /* LAN base address in located at BAR 1. */
+
+ pci_ioaddr = pci_resource_start(pdev, 1);
+ pci_set_master(pdev);
+
+ printk("Found SAA9730 (PCI) at %#x, irq %d.\n",
+ pci_ioaddr, pci_irq_line);
+
+ err = lan_saa9730_init(dev, pci_ioaddr, pci_irq_line);
+ if (err) {
+ printk("Lan init failed");
+ goto out2;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ return 0;
+
+out2:
+ pci_disable_device(pdev);
+out1:
+ free_netdev(dev);
+out:
+ return err;
+}
+
+
+static struct pci_driver saa9730_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = saa9730_pci_tbl,
+ .probe = saa9730_init_one,
+ .remove = __devexit_p(saa9730_remove_one),
+};
+
+
+static int __init saa9730_init(void)
+{
+ return pci_module_init(&saa9730_driver);
+}
+
+static void __exit saa9730_cleanup(void)
+{
+ pci_unregister_driver(&saa9730_driver);
+}
+
+module_init(saa9730_init);
+module_exit(saa9730_cleanup);
+
+
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/saa9730.h b/drivers/net/saa9730.h
new file mode 100644
index 000000000000..9e9da6b4080f
--- /dev/null
+++ b/drivers/net/saa9730.h
@@ -0,0 +1,371 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * SAA9730 ethernet driver description.
+ *
+ */
+#ifndef _SAA9730_H
+#define _SAA9730_H
+
+
+/* Number of 6-byte entries in the CAM. */
+#define LAN_SAA9730_CAM_ENTRIES 10
+#define LAN_SAA9730_CAM_DWORDS ((LAN_SAA9730_CAM_ENTRIES*6)/4)
+
+/* TX and RX packet size: fixed to 2048 bytes, according to HW requirements. */
+#define LAN_SAA9730_PACKET_SIZE 2048
+
+/*
+ * Number of TX buffers = number of RX buffers = 2, which is fixed according
+ * to HW requirements.
+ */
+#define LAN_SAA9730_BUFFERS 2
+
+/* Number of RX packets per RX buffer. */
+#define LAN_SAA9730_RCV_Q_SIZE 15
+
+/* Number of TX packets per TX buffer. */
+#define LAN_SAA9730_TXM_Q_SIZE 15
+
+/*
+ * We get an interrupt for each LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD
+ * packets received.
+ * If however we receive less than LAN_SAA9730_DEFAULT_RCV_Q_INT_THRESHOLD
+ * packets, the hardware can timeout after a certain time and still tell
+ * us packets have arrived.
+ * The timeout value in unit of 32 PCI clocks (33Mhz).
+ * The value 200 approximates 0.0002 seconds.
+ */
+#define LAN_SAA9730_RCV_Q_INT_THRESHOLD 1
+#define LAN_SAA9730_DEFAULT_TIME_OUT_CNT 10
+
+#define RXSF_NDIS 0
+#define RXSF_READY 2
+#define RXSF_HWDONE 3
+
+#define TXSF_EMPTY 0
+#define TXSF_READY 2
+#define TXSF_HWDONE 3
+
+#define LANEND_LITTLE 0
+#define LANEND_BIG_2143 1
+#define LANEND_BIG_4321 2
+
+#define LANMB_ANY 0
+#define LANMB_8 1
+#define LANMB_32 2
+#define LANMB_64 3
+
+#define MACCM_AUTOMATIC 0
+#define MACCM_10MB 1
+#define MACCM_MII 2
+
+/*
+ * PHY definitions for Basic registers of QS6612 (used on MIPS ATLAS board)
+ */
+#define PHY_CONTROL 0x0
+#define PHY_STATUS 0x1
+#define PHY_STATUS_LINK_UP 0x4
+#define PHY_CONTROL_RESET 0x8000
+#define PHY_CONTROL_AUTO_NEG 0x1000
+#define PHY_CONTROL_RESTART_AUTO_NEG 0x0200
+#define PHY_ADDRESS 0x0
+
+/* PK_COUNT register. */
+#define PK_COUNT_TX_A_SHF 24
+#define PK_COUNT_TX_A_MSK (0xff << PK_COUNT_TX_A_SHF)
+#define PK_COUNT_TX_B_SHF 16
+#define PK_COUNT_TX_B_MSK (0xff << PK_COUNT_TX_B_SHF)
+#define PK_COUNT_RX_A_SHF 8
+#define PK_COUNT_RX_A_MSK (0xff << PK_COUNT_RX_A_SHF)
+#define PK_COUNT_RX_B_SHF 0
+#define PK_COUNT_RX_B_MSK (0xff << PK_COUNT_RX_B_SHF)
+
+/* OK2USE register. */
+#define OK2USE_TX_A 0x8
+#define OK2USE_TX_B 0x4
+#define OK2USE_RX_A 0x2
+#define OK2USE_RX_B 0x1
+
+/* LAN DMA CONTROL register. */
+#define DMA_CTL_BLK_INT 0x80000000
+#define DMA_CTL_MAX_XFER_SHF 18
+#define DMA_CTL_MAX_XFER_MSK (0x3 << LAN_DMA_CTL_MAX_XFER_SHF)
+#define DMA_CTL_ENDIAN_SHF 16
+#define DMA_CTL_ENDIAN_MSK (0x3 << LAN_DMA_CTL_ENDIAN_SHF)
+#define DMA_CTL_RX_INT_COUNT_SHF 8
+#define DMA_CTL_RX_INT_COUNT_MSK (0xff << LAN_DMA_CTL_RX_INT_COUNT_SHF)
+#define DMA_CTL_EN_TX_DMA 0x00000080
+#define DMA_CTL_EN_RX_DMA 0x00000040
+#define DMA_CTL_RX_INT_BUFFUL_EN 0x00000020
+#define DMA_CTL_RX_INT_TO_EN 0x00000010
+#define DMA_CTL_RX_INT_EN 0x00000008
+#define DMA_CTL_TX_INT_EN 0x00000004
+#define DMA_CTL_MAC_TX_INT_EN 0x00000002
+#define DMA_CTL_MAC_RX_INT_EN 0x00000001
+
+/* DMA STATUS register. */
+#define DMA_STATUS_BAD_ADDR_SHF 16
+#define DMA_STATUS_BAD_ADDR_MSK (0xf << DMA_STATUS_BAD_ADDR_SHF)
+#define DMA_STATUS_RX_PKTS_RECEIVED_SHF 8
+#define DMA_STATUS_RX_PKTS_RECEIVED_MSK (0xff << DMA_STATUS_RX_PKTS_RECEIVED_SHF)
+#define DMA_STATUS_TX_EN_SYNC 0x00000080
+#define DMA_STATUS_RX_BUF_A_FUL 0x00000040
+#define DMA_STATUS_RX_BUF_B_FUL 0x00000020
+#define DMA_STATUS_RX_TO_INT 0x00000010
+#define DMA_STATUS_RX_INT 0x00000008
+#define DMA_STATUS_TX_INT 0x00000004
+#define DMA_STATUS_MAC_TX_INT 0x00000002
+#define DMA_STATUS_MAC_RX_INT 0x00000001
+
+/* DMA TEST/PANIC SWITHES register. */
+#define DMA_TEST_LOOPBACK 0x01000000
+#define DMA_TEST_SW_RESET 0x00000001
+
+/* MAC CONTROL register. */
+#define MAC_CONTROL_EN_MISS_ROLL 0x00002000
+#define MAC_CONTROL_MISS_ROLL 0x00000400
+#define MAC_CONTROL_LOOP10 0x00000080
+#define MAC_CONTROL_CONN_SHF 5
+#define MAC_CONTROL_CONN_MSK (0x3 << MAC_CONTROL_CONN_SHF)
+#define MAC_CONTROL_MAC_LOOP 0x00000010
+#define MAC_CONTROL_FULL_DUP 0x00000008
+#define MAC_CONTROL_RESET 0x00000004
+#define MAC_CONTROL_HALT_IMM 0x00000002
+#define MAC_CONTROL_HALT_REQ 0x00000001
+
+/* CAM CONTROL register. */
+#define CAM_CONTROL_COMP_EN 0x00000010
+#define CAM_CONTROL_NEG_CAM 0x00000008
+#define CAM_CONTROL_BROAD_ACC 0x00000004
+#define CAM_CONTROL_GROUP_ACC 0x00000002
+#define CAM_CONTROL_STATION_ACC 0x00000001
+
+/* TRANSMIT CONTROL register. */
+#define TX_CTL_EN_COMP 0x00004000
+#define TX_CTL_EN_TX_PAR 0x00002000
+#define TX_CTL_EN_LATE_COLL 0x00001000
+#define TX_CTL_EN_EX_COLL 0x00000800
+#define TX_CTL_EN_L_CARR 0x00000400
+#define TX_CTL_EN_EX_DEFER 0x00000200
+#define TX_CTL_EN_UNDER 0x00000100
+#define TX_CTL_MII10 0x00000080
+#define TX_CTL_SD_PAUSE 0x00000040
+#define TX_CTL_NO_EX_DEF0 0x00000020
+#define TX_CTL_F_BACK 0x00000010
+#define TX_CTL_NO_CRC 0x00000008
+#define TX_CTL_NO_PAD 0x00000004
+#define TX_CTL_TX_HALT 0x00000002
+#define TX_CTL_TX_EN 0x00000001
+
+/* TRANSMIT STATUS register. */
+#define TX_STATUS_SQ_ERR 0x00010000
+#define TX_STATUS_TX_HALTED 0x00008000
+#define TX_STATUS_COMP 0x00004000
+#define TX_STATUS_TX_PAR 0x00002000
+#define TX_STATUS_LATE_COLL 0x00001000
+#define TX_STATUS_TX10_STAT 0x00000800
+#define TX_STATUS_L_CARR 0x00000400
+#define TX_STATUS_EX_DEFER 0x00000200
+#define TX_STATUS_UNDER 0x00000100
+#define TX_STATUS_IN_TX 0x00000080
+#define TX_STATUS_PAUSED 0x00000040
+#define TX_STATUS_TX_DEFERRED 0x00000020
+#define TX_STATUS_EX_COLL 0x00000010
+#define TX_STATUS_TX_COLL_SHF 0
+#define TX_STATUS_TX_COLL_MSK (0xf << TX_STATUS_TX_COLL_SHF)
+
+/* RECEIVE CONTROL register. */
+#define RX_CTL_EN_GOOD 0x00004000
+#define RX_CTL_EN_RX_PAR 0x00002000
+#define RX_CTL_EN_LONG_ERR 0x00000800
+#define RX_CTL_EN_OVER 0x00000400
+#define RX_CTL_EN_CRC_ERR 0x00000200
+#define RX_CTL_EN_ALIGN 0x00000100
+#define RX_CTL_IGNORE_CRC 0x00000040
+#define RX_CTL_PASS_CTL 0x00000020
+#define RX_CTL_STRIP_CRC 0x00000010
+#define RX_CTL_SHORT_EN 0x00000008
+#define RX_CTL_LONG_EN 0x00000004
+#define RX_CTL_RX_HALT 0x00000002
+#define RX_CTL_RX_EN 0x00000001
+
+/* RECEIVE STATUS register. */
+#define RX_STATUS_RX_HALTED 0x00008000
+#define RX_STATUS_GOOD 0x00004000
+#define RX_STATUS_RX_PAR 0x00002000
+#define RX_STATUS_LONG_ERR 0x00000800
+#define RX_STATUS_OVERFLOW 0x00000400
+#define RX_STATUS_CRC_ERR 0x00000200
+#define RX_STATUS_ALIGN_ERR 0x00000100
+#define RX_STATUS_RX10_STAT 0x00000080
+#define RX_STATUS_INT_RX 0x00000040
+#define RX_STATUS_CTL_RECD 0x00000020
+
+/* MD_CA register. */
+#define MD_CA_PRE_SUP 0x00001000
+#define MD_CA_BUSY 0x00000800
+#define MD_CA_WR 0x00000400
+#define MD_CA_PHY_SHF 5
+#define MD_CA_PHY_MSK (0x1f << MD_CA_PHY_SHF)
+#define MD_CA_ADDR_SHF 0
+#define MD_CA_ADDR_MSK (0x1f << MD_CA_ADDR_SHF)
+
+/* Tx Status/Control. */
+#define TX_STAT_CTL_OWNER_SHF 30
+#define TX_STAT_CTL_OWNER_MSK (0x3 << TX_STAT_CTL_OWNER_SHF)
+#define TX_STAT_CTL_FRAME_SHF 27
+#define TX_STAT_CTL_FRAME_MSK (0x7 << TX_STAT_CTL_FRAME_SHF)
+#define TX_STAT_CTL_STATUS_SHF 11
+#define TX_STAT_CTL_STATUS_MSK (0x1ffff << TX_STAT_CTL_STATUS_SHF)
+#define TX_STAT_CTL_LENGTH_SHF 0
+#define TX_STAT_CTL_LENGTH_MSK (0x7ff << TX_STAT_CTL_LENGTH_SHF)
+
+#define TX_STAT_CTL_ERROR_MSK ((TX_STATUS_SQ_ERR | \
+ TX_STATUS_TX_HALTED | \
+ TX_STATUS_TX_PAR | \
+ TX_STATUS_LATE_COLL | \
+ TX_STATUS_L_CARR | \
+ TX_STATUS_EX_DEFER | \
+ TX_STATUS_UNDER | \
+ TX_STATUS_PAUSED | \
+ TX_STATUS_TX_DEFERRED | \
+ TX_STATUS_EX_COLL | \
+ TX_STATUS_TX_COLL_MSK) \
+ << TX_STAT_CTL_STATUS_SHF)
+#define TX_STAT_CTL_INT_AFTER_TX 0x4
+
+/* Rx Status/Control. */
+#define RX_STAT_CTL_OWNER_SHF 30
+#define RX_STAT_CTL_OWNER_MSK (0x3 << RX_STAT_CTL_OWNER_SHF)
+#define RX_STAT_CTL_STATUS_SHF 11
+#define RX_STAT_CTL_STATUS_MSK (0xffff << RX_STAT_CTL_STATUS_SHF)
+#define RX_STAT_CTL_LENGTH_SHF 0
+#define RX_STAT_CTL_LENGTH_MSK (0x7ff << RX_STAT_CTL_LENGTH_SHF)
+
+
+
+/* The SAA9730 (LAN) controller register map, as seen via the PCI-bus. */
+#define SAA9730_LAN_REGS_ADDR 0x20400
+
+struct lan_saa9730_regmap {
+ volatile unsigned int TxBuffA; /* 0x20400 */
+ volatile unsigned int TxBuffB; /* 0x20404 */
+ volatile unsigned int RxBuffA; /* 0x20408 */
+ volatile unsigned int RxBuffB; /* 0x2040c */
+ volatile unsigned int PacketCount; /* 0x20410 */
+ volatile unsigned int Ok2Use; /* 0x20414 */
+ volatile unsigned int LanDmaCtl; /* 0x20418 */
+ volatile unsigned int Timeout; /* 0x2041c */
+ volatile unsigned int DmaStatus; /* 0x20420 */
+ volatile unsigned int DmaTest; /* 0x20424 */
+ volatile unsigned char filler20428[0x20430 - 0x20428];
+ volatile unsigned int PauseCount; /* 0x20430 */
+ volatile unsigned int RemotePauseCount; /* 0x20434 */
+ volatile unsigned char filler20438[0x20440 - 0x20438];
+ volatile unsigned int MacCtl; /* 0x20440 */
+ volatile unsigned int CamCtl; /* 0x20444 */
+ volatile unsigned int TxCtl; /* 0x20448 */
+ volatile unsigned int TxStatus; /* 0x2044c */
+ volatile unsigned int RxCtl; /* 0x20450 */
+ volatile unsigned int RxStatus; /* 0x20454 */
+ volatile unsigned int StationMgmtData; /* 0x20458 */
+ volatile unsigned int StationMgmtCtl; /* 0x2045c */
+ volatile unsigned int CamAddress; /* 0x20460 */
+ volatile unsigned int CamData; /* 0x20464 */
+ volatile unsigned int CamEnable; /* 0x20468 */
+ volatile unsigned char filler2046c[0x20500 - 0x2046c];
+ volatile unsigned int DebugPCIMasterAddr; /* 0x20500 */
+ volatile unsigned int DebugLanTxStateMachine; /* 0x20504 */
+ volatile unsigned int DebugLanRxStateMachine; /* 0x20508 */
+ volatile unsigned int DebugLanTxFifoPointers; /* 0x2050c */
+ volatile unsigned int DebugLanRxFifoPointers; /* 0x20510 */
+ volatile unsigned int DebugLanCtlStateMachine; /* 0x20514 */
+};
+typedef volatile struct lan_saa9730_regmap t_lan_saa9730_regmap;
+
+
+/* EVM interrupt control registers. */
+#define EVM_LAN_INT 0x00010000
+#define EVM_MASTER_EN 0x00000001
+
+/* The SAA9730 (EVM) controller register map, as seen via the PCI-bus. */
+#define SAA9730_EVM_REGS_ADDR 0x02000
+
+struct evm_saa9730_regmap {
+ volatile unsigned int InterruptStatus1; /* 0x2000 */
+ volatile unsigned int InterruptEnable1; /* 0x2004 */
+ volatile unsigned int InterruptMonitor1; /* 0x2008 */
+ volatile unsigned int Counter; /* 0x200c */
+ volatile unsigned int CounterThreshold; /* 0x2010 */
+ volatile unsigned int CounterControl; /* 0x2014 */
+ volatile unsigned int GpioControl1; /* 0x2018 */
+ volatile unsigned int InterruptStatus2; /* 0x201c */
+ volatile unsigned int InterruptEnable2; /* 0x2020 */
+ volatile unsigned int InterruptMonitor2; /* 0x2024 */
+ volatile unsigned int GpioControl2; /* 0x2028 */
+ volatile unsigned int InterruptBlock1; /* 0x202c */
+ volatile unsigned int InterruptBlock2; /* 0x2030 */
+};
+typedef volatile struct evm_saa9730_regmap t_evm_saa9730_regmap;
+
+
+struct lan_saa9730_private {
+ /* Pointer for the SAA9730 LAN controller register set. */
+ t_lan_saa9730_regmap *lan_saa9730_regs;
+
+ /* Pointer to the SAA9730 EVM register. */
+ t_evm_saa9730_regmap *evm_saa9730_regs;
+
+ /* TRUE if the next buffer to write is RxBuffA, FALSE if RxBuffB. */
+ unsigned char NextRcvToUseIsA;
+ /* Rcv buffer Index. */
+ unsigned char NextRcvPacketIndex;
+
+ /* Index of next packet to use in that buffer. */
+ unsigned char NextTxmPacketIndex;
+ /* Next buffer index. */
+ unsigned char NextTxmBufferIndex;
+
+ /* Index of first pending packet ready to send. */
+ unsigned char PendingTxmPacketIndex;
+ /* Pending buffer index. */
+ unsigned char PendingTxmBufferIndex;
+
+ unsigned char DmaRcvPackets;
+ unsigned char DmaTxmPackets;
+
+ unsigned char RcvAIndex; /* index into RcvBufferSpace[] for Blk A */
+ unsigned char RcvBIndex; /* index into RcvBufferSpace[] for Blk B */
+
+ unsigned int
+ TxmBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_TXM_Q_SIZE];
+ unsigned int
+ RcvBuffer[LAN_SAA9730_BUFFERS][LAN_SAA9730_RCV_Q_SIZE];
+ unsigned int TxBufferFree[LAN_SAA9730_BUFFERS];
+
+ unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6];
+
+ struct net_device_stats stats;
+ spinlock_t lock;
+};
+
+#endif /* _SAA9730_H */
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
new file mode 100644
index 000000000000..e15369c8d165
--- /dev/null
+++ b/drivers/net/sb1000.c
@@ -0,0 +1,1202 @@
+/* sb1000.c: A General Instruments SB1000 driver for linux. */
+/*
+ Written 1998 by Franco Venturi.
+
+ Copyright 1998 by Franco Venturi.
+ Copyright 1994,1995 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This driver is for the General Instruments SB1000 (internal SURFboard)
+
+ The author may be reached as fventuri@mediaone.net
+
+ This program is free software; you can redistribute it
+ and/or modify it under the terms of the GNU General
+ Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at
+ your option) any later version.
+
+ Changes:
+
+ 981115 Steven Hirsch <shirsch@adelphia.net>
+
+ Linus changed the timer interface. Should work on all recent
+ development kernels.
+
+ 980608 Steven Hirsch <shirsch@adelphia.net>
+
+ Small changes to make it work with 2.1.x kernels. Hopefully,
+ nothing major will change before official release of Linux 2.2.
+
+ Merged with 2.2 - Alan Cox
+*/
+
+static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h> /* for udelay() */
+#include <linux/etherdevice.h>
+#include <linux/pnp.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+
+#ifdef SB1000_DEBUG
+static int sb1000_debug = SB1000_DEBUG;
+#else
+static int sb1000_debug = 1;
+#endif
+
+static const int SB1000_IO_EXTENT = 8;
+/* SB1000 Maximum Receive Unit */
+static const int SB1000_MRU = 1500; /* octects */
+
+#define NPIDS 4
+struct sb1000_private {
+ struct sk_buff *rx_skb[NPIDS];
+ short rx_dlen[NPIDS];
+ unsigned int rx_frames;
+ short rx_error_count;
+ short rx_error_dpc_count;
+ unsigned char rx_session_id[NPIDS];
+ unsigned char rx_frame_id[NPIDS];
+ unsigned char rx_pkt_type[NPIDS];
+ struct net_device_stats stats;
+};
+
+/* prototypes for Linux interface */
+extern int sb1000_probe(struct net_device *dev);
+static int sb1000_open(struct net_device *dev);
+static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
+static int sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t sb1000_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct net_device_stats *sb1000_stats(struct net_device *dev);
+static int sb1000_close(struct net_device *dev);
+
+
+/* SB1000 hardware routines to be used during open/configuration phases */
+static inline void nicedelay(unsigned long usecs);
+static inline int card_wait_for_busy_clear(const int ioaddr[],
+ const char* name);
+static inline int card_wait_for_ready(const int ioaddr[], const char* name,
+ unsigned char in[]);
+static inline int card_send_command(const int ioaddr[], const char* name,
+ const unsigned char out[], unsigned char in[]);
+
+/* SB1000 hardware routines to be used during frame rx interrupt */
+static inline int sb1000_wait_for_ready(const int ioaddr[], const char* name);
+static inline int sb1000_wait_for_ready_clear(const int ioaddr[],
+ const char* name);
+static inline void sb1000_send_command(const int ioaddr[], const char* name,
+ const unsigned char out[]);
+static inline void sb1000_read_status(const int ioaddr[], unsigned char in[]);
+static inline void sb1000_issue_read_command(const int ioaddr[],
+ const char* name);
+
+/* SB1000 commands for open/configuration */
+static inline int sb1000_reset(const int ioaddr[], const char* name);
+static inline int sb1000_check_CRC(const int ioaddr[], const char* name);
+static inline int sb1000_start_get_set_command(const int ioaddr[],
+ const char* name);
+static inline int sb1000_end_get_set_command(const int ioaddr[],
+ const char* name);
+static inline int sb1000_activate(const int ioaddr[], const char* name);
+static int sb1000_get_firmware_version(const int ioaddr[],
+ const char* name, unsigned char version[], int do_end);
+static int sb1000_get_frequency(const int ioaddr[], const char* name,
+ int* frequency);
+static int sb1000_set_frequency(const int ioaddr[], const char* name,
+ int frequency);
+static int sb1000_get_PIDs(const int ioaddr[], const char* name,
+ short PID[]);
+static int sb1000_set_PIDs(const int ioaddr[], const char* name,
+ const short PID[]);
+
+/* SB1000 commands for frame rx interrupt */
+static inline int sb1000_rx(struct net_device *dev);
+static inline void sb1000_error_dpc(struct net_device *dev);
+
+static const struct pnp_device_id sb1000_pnp_ids[] = {
+ { "GIC1000", 0 },
+ { "", 0 }
+};
+MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
+
+static int
+sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
+{
+ struct net_device *dev;
+ unsigned short ioaddr[2], irq;
+ unsigned int serial_number;
+ int error = -ENODEV;
+
+ if (pnp_device_attach(pdev) < 0)
+ return -ENODEV;
+ if (pnp_activate_dev(pdev) < 0)
+ goto out_detach;
+
+ if (!pnp_port_valid(pdev, 0) || !pnp_port_valid(pdev, 1))
+ goto out_disable;
+ if (!pnp_irq_valid(pdev, 0))
+ goto out_disable;
+
+ serial_number = pdev->card->serial;
+
+ ioaddr[0] = pnp_port_start(pdev, 0);
+ ioaddr[1] = pnp_port_start(pdev, 0);
+
+ irq = pnp_irq(pdev, 0);
+
+ if (!request_region(ioaddr[0], 16, "sb1000"))
+ goto out_disable;
+ if (!request_region(ioaddr[1], 16, "sb1000"))
+ goto out_release_region0;
+
+ dev = alloc_etherdev(sizeof(struct sb1000_private));
+ if (!dev) {
+ error = -ENOMEM;
+ goto out_release_regions;
+ }
+
+
+ dev->base_addr = ioaddr[0];
+ /* mem_start holds the second I/O address */
+ dev->mem_start = ioaddr[1];
+ dev->irq = irq;
+
+ if (sb1000_debug > 0)
+ printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), "
+ "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr,
+ dev->mem_start, serial_number, dev->irq);
+
+ /*
+ * The SB1000 is an rx-only cable modem device. The uplink is a modem
+ * and we do not want to arp on it.
+ */
+ dev->flags = IFF_POINTOPOINT|IFF_NOARP;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (sb1000_debug > 0)
+ printk(KERN_NOTICE "%s", version);
+
+ /* The SB1000-specific entries in the device structure. */
+ dev->open = sb1000_open;
+ dev->do_ioctl = sb1000_dev_ioctl;
+ dev->hard_start_xmit = sb1000_start_xmit;
+ dev->stop = sb1000_close;
+ dev->get_stats = sb1000_stats;
+
+ /* hardware address is 0:0:serial_number */
+ dev->dev_addr[2] = serial_number >> 24 & 0xff;
+ dev->dev_addr[3] = serial_number >> 16 & 0xff;
+ dev->dev_addr[4] = serial_number >> 8 & 0xff;
+ dev->dev_addr[5] = serial_number >> 0 & 0xff;
+
+ pnp_set_drvdata(pdev, dev);
+
+ error = register_netdev(dev);
+ if (error)
+ goto out_free_netdev;
+ return 0;
+
+ out_free_netdev:
+ free_netdev(dev);
+ out_release_regions:
+ release_region(ioaddr[1], 16);
+ out_release_region0:
+ release_region(ioaddr[0], 16);
+ out_disable:
+ pnp_disable_dev(pdev);
+ out_detach:
+ pnp_device_detach(pdev);
+ return error;
+}
+
+static void
+sb1000_remove_one(struct pnp_dev *pdev)
+{
+ struct net_device *dev = pnp_get_drvdata(pdev);
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, 16);
+ release_region(dev->mem_start, 16);
+ free_netdev(dev);
+}
+
+static struct pnp_driver sb1000_driver = {
+ .name = "sb1000",
+ .id_table = sb1000_pnp_ids,
+ .probe = sb1000_probe_one,
+ .remove = sb1000_remove_one,
+};
+
+
+/*
+ * SB1000 hardware routines to be used during open/configuration phases
+ */
+
+static const int TimeOutJiffies = (875 * HZ) / 100;
+
+static inline void nicedelay(unsigned long usecs)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ);
+ return;
+}
+
+/* Card Wait For Busy Clear (cannot be used during an interrupt) */
+static inline int
+card_wait_for_busy_clear(const int ioaddr[], const char* name)
+{
+ unsigned char a;
+ unsigned long timeout;
+
+ a = inb(ioaddr[0] + 7);
+ timeout = jiffies + TimeOutJiffies;
+ while (a & 0x80 || a & 0x40) {
+ /* a little sleep */
+ yield();
+
+ a = inb(ioaddr[0] + 7);
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: card_wait_for_busy_clear timeout\n",
+ name);
+ return -ETIME;
+ }
+ }
+
+ return 0;
+}
+
+/* Card Wait For Ready (cannot be used during an interrupt) */
+static inline int
+card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
+{
+ unsigned char a;
+ unsigned long timeout;
+
+ a = inb(ioaddr[1] + 6);
+ timeout = jiffies + TimeOutJiffies;
+ while (a & 0x80 || !(a & 0x40)) {
+ /* a little sleep */
+ yield();
+
+ a = inb(ioaddr[1] + 6);
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: card_wait_for_ready timeout\n",
+ name);
+ return -ETIME;
+ }
+ }
+
+ in[1] = inb(ioaddr[0] + 1);
+ in[2] = inb(ioaddr[0] + 2);
+ in[3] = inb(ioaddr[0] + 3);
+ in[4] = inb(ioaddr[0] + 4);
+ in[0] = inb(ioaddr[0] + 5);
+ in[6] = inb(ioaddr[0] + 6);
+ in[5] = inb(ioaddr[1] + 6);
+ return 0;
+}
+
+/* Card Send Command (cannot be used during an interrupt) */
+static inline int
+card_send_command(const int ioaddr[], const char* name,
+ const unsigned char out[], unsigned char in[])
+{
+ int status, x;
+
+ if ((status = card_wait_for_busy_clear(ioaddr, name)))
+ return status;
+ outb(0xa0, ioaddr[0] + 6);
+ outb(out[2], ioaddr[0] + 1);
+ outb(out[3], ioaddr[0] + 2);
+ outb(out[4], ioaddr[0] + 3);
+ outb(out[5], ioaddr[0] + 4);
+ outb(out[1], ioaddr[0] + 5);
+ outb(0xa0, ioaddr[0] + 6);
+ outb(out[0], ioaddr[0] + 7);
+ if (out[0] != 0x20 && out[0] != 0x30) {
+ if ((status = card_wait_for_ready(ioaddr, name, in)))
+ return status;
+ inb(ioaddr[0] + 7);
+ if (sb1000_debug > 3)
+ printk(KERN_DEBUG "%s: card_send_command "
+ "out: %02x%02x%02x%02x%02x%02x "
+ "in: %02x%02x%02x%02x%02x%02x%02x\n", name,
+ out[0], out[1], out[2], out[3], out[4], out[5],
+ in[0], in[1], in[2], in[3], in[4], in[5], in[6]);
+ } else {
+ if (sb1000_debug > 3)
+ printk(KERN_DEBUG "%s: card_send_command "
+ "out: %02x%02x%02x%02x%02x%02x\n", name,
+ out[0], out[1], out[2], out[3], out[4], out[5]);
+ }
+
+ if (out[1] == 0x1b) {
+ x = (out[2] == 0x02);
+ } else {
+ if (out[0] >= 0x80 && in[0] != (out[1] | 0x80))
+ return -EIO;
+ }
+ return 0;
+}
+
+
+/*
+ * SB1000 hardware routines to be used during frame rx interrupt
+ */
+static const int Sb1000TimeOutJiffies = 7 * HZ;
+
+/* Card Wait For Ready (to be used during frame rx) */
+static inline int
+sb1000_wait_for_ready(const int ioaddr[], const char* name)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + Sb1000TimeOutJiffies;
+ while (inb(ioaddr[1] + 6) & 0x80) {
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
+ name);
+ return -ETIME;
+ }
+ }
+ timeout = jiffies + Sb1000TimeOutJiffies;
+ while (!(inb(ioaddr[1] + 6) & 0x40)) {
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
+ name);
+ return -ETIME;
+ }
+ }
+ inb(ioaddr[0] + 7);
+ return 0;
+}
+
+/* Card Wait For Ready Clear (to be used during frame rx) */
+static inline int
+sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + Sb1000TimeOutJiffies;
+ while (inb(ioaddr[1] + 6) & 0x80) {
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
+ name);
+ return -ETIME;
+ }
+ }
+ timeout = jiffies + Sb1000TimeOutJiffies;
+ while (inb(ioaddr[1] + 6) & 0x40) {
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
+ name);
+ return -ETIME;
+ }
+ }
+ return 0;
+}
+
+/* Card Send Command (to be used during frame rx) */
+static inline void
+sb1000_send_command(const int ioaddr[], const char* name,
+ const unsigned char out[])
+{
+ outb(out[2], ioaddr[0] + 1);
+ outb(out[3], ioaddr[0] + 2);
+ outb(out[4], ioaddr[0] + 3);
+ outb(out[5], ioaddr[0] + 4);
+ outb(out[1], ioaddr[0] + 5);
+ outb(out[0], ioaddr[0] + 7);
+ if (sb1000_debug > 3)
+ printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
+ "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
+ return;
+}
+
+/* Card Read Status (to be used during frame rx) */
+static inline void
+sb1000_read_status(const int ioaddr[], unsigned char in[])
+{
+ in[1] = inb(ioaddr[0] + 1);
+ in[2] = inb(ioaddr[0] + 2);
+ in[3] = inb(ioaddr[0] + 3);
+ in[4] = inb(ioaddr[0] + 4);
+ in[0] = inb(ioaddr[0] + 5);
+ return;
+}
+
+/* Issue Read Command (to be used during frame rx) */
+static inline void
+sb1000_issue_read_command(const int ioaddr[], const char* name)
+{
+ const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00};
+
+ sb1000_wait_for_ready_clear(ioaddr, name);
+ outb(0xa0, ioaddr[0] + 6);
+ sb1000_send_command(ioaddr, name, Command0);
+ return;
+}
+
+
+/*
+ * SB1000 commands for open/configuration
+ */
+/* reset SB1000 card */
+static inline int
+sb1000_reset(const int ioaddr[], const char* name)
+{
+ unsigned char st[7];
+ int port, status;
+ const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
+
+ port = ioaddr[1] + 6;
+ outb(0x4, port);
+ inb(port);
+ udelay(1000);
+ outb(0x0, port);
+ inb(port);
+ nicedelay(60000);
+ outb(0x4, port);
+ inb(port);
+ udelay(1000);
+ outb(0x0, port);
+ inb(port);
+ udelay(0);
+
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+ if (st[3] != 0xf0)
+ return -EIO;
+ return 0;
+}
+
+/* check SB1000 firmware CRC */
+static inline int
+sb1000_check_CRC(const int ioaddr[], const char* name)
+{
+ unsigned char st[7];
+ int crc, status;
+ const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
+
+ /* check CRC */
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+ if (st[1] != st[3] || st[2] != st[4])
+ return -EIO;
+ crc = st[1] << 8 | st[2];
+ return 0;
+}
+
+static inline int
+sb1000_start_get_set_command(const int ioaddr[], const char* name)
+{
+ unsigned char st[7];
+ const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
+
+ return card_send_command(ioaddr, name, Command0, st);
+}
+
+static inline int
+sb1000_end_get_set_command(const int ioaddr[], const char* name)
+{
+ unsigned char st[7];
+ int status;
+ const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
+ const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+ return card_send_command(ioaddr, name, Command1, st);
+}
+
+static inline int
+sb1000_activate(const int ioaddr[], const char* name)
+{
+ unsigned char st[7];
+ int status;
+ const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
+ const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
+
+ nicedelay(50000);
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+ if ((status = card_send_command(ioaddr, name, Command1, st)))
+ return status;
+ if (st[3] != 0xf1) {
+ if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ return status;
+ return -EIO;
+ }
+ udelay(1000);
+ return sb1000_start_get_set_command(ioaddr, name);
+}
+
+/* get SB1000 firmware version */
+static int
+sb1000_get_firmware_version(const int ioaddr[], const char* name,
+ unsigned char version[], int do_end)
+{
+ unsigned char st[7];
+ int status;
+ const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
+
+ if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ return status;
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+ if (st[0] != 0xa3)
+ return -EIO;
+ version[0] = st[1];
+ version[1] = st[2];
+ if (do_end)
+ return sb1000_end_get_set_command(ioaddr, name);
+ else
+ return 0;
+}
+
+/* get SB1000 frequency */
+static int
+sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency)
+{
+ unsigned char st[7];
+ int status;
+ const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
+
+ udelay(1000);
+ if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ return status;
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+ *frequency = ((st[1] << 8 | st[2]) << 8 | st[3]) << 8 | st[4];
+ return sb1000_end_get_set_command(ioaddr, name);
+}
+
+/* set SB1000 frequency */
+static int
+sb1000_set_frequency(const int ioaddr[], const char* name, int frequency)
+{
+ unsigned char st[7];
+ int status;
+ unsigned char Command0[6] = {0x80, 0x29, 0x00, 0x00, 0x00, 0x00};
+
+ const int FrequencyLowerLimit = 57000;
+ const int FrequencyUpperLimit = 804000;
+
+ if (frequency < FrequencyLowerLimit || frequency > FrequencyUpperLimit) {
+ printk(KERN_ERR "%s: frequency chosen (%d kHz) is not in the range "
+ "[%d,%d] kHz\n", name, frequency, FrequencyLowerLimit,
+ FrequencyUpperLimit);
+ return -EINVAL;
+ }
+ udelay(1000);
+ if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ return status;
+ Command0[5] = frequency & 0xff;
+ frequency >>= 8;
+ Command0[4] = frequency & 0xff;
+ frequency >>= 8;
+ Command0[3] = frequency & 0xff;
+ frequency >>= 8;
+ Command0[2] = frequency & 0xff;
+ return card_send_command(ioaddr, name, Command0, st);
+}
+
+/* get SB1000 PIDs */
+static int
+sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
+{
+ unsigned char st[7];
+ int status;
+ const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
+ const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
+ const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
+ const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
+
+ udelay(1000);
+ if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ return status;
+
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+ PID[0] = st[1] << 8 | st[2];
+
+ if ((status = card_send_command(ioaddr, name, Command1, st)))
+ return status;
+ PID[1] = st[1] << 8 | st[2];
+
+ if ((status = card_send_command(ioaddr, name, Command2, st)))
+ return status;
+ PID[2] = st[1] << 8 | st[2];
+
+ if ((status = card_send_command(ioaddr, name, Command3, st)))
+ return status;
+ PID[3] = st[1] << 8 | st[2];
+
+ return sb1000_end_get_set_command(ioaddr, name);
+}
+
+/* set SB1000 PIDs */
+static int
+sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
+{
+ unsigned char st[7];
+ short p;
+ int status;
+ unsigned char Command0[6] = {0x80, 0x31, 0x00, 0x00, 0x00, 0x00};
+ unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00};
+ unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00};
+ unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00};
+ const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
+
+ udelay(1000);
+ if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ return status;
+
+ p = PID[0];
+ Command0[3] = p & 0xff;
+ p >>= 8;
+ Command0[2] = p & 0xff;
+ if ((status = card_send_command(ioaddr, name, Command0, st)))
+ return status;
+
+ p = PID[1];
+ Command1[3] = p & 0xff;
+ p >>= 8;
+ Command1[2] = p & 0xff;
+ if ((status = card_send_command(ioaddr, name, Command1, st)))
+ return status;
+
+ p = PID[2];
+ Command2[3] = p & 0xff;
+ p >>= 8;
+ Command2[2] = p & 0xff;
+ if ((status = card_send_command(ioaddr, name, Command2, st)))
+ return status;
+
+ p = PID[3];
+ Command3[3] = p & 0xff;
+ p >>= 8;
+ Command3[2] = p & 0xff;
+ if ((status = card_send_command(ioaddr, name, Command3, st)))
+ return status;
+
+ if ((status = card_send_command(ioaddr, name, Command4, st)))
+ return status;
+ return sb1000_end_get_set_command(ioaddr, name);
+}
+
+
+static inline void
+sb1000_print_status_buffer(const char* name, unsigned char st[],
+ unsigned char buffer[], int size)
+{
+ int i, j, k;
+
+ printk(KERN_DEBUG "%s: status: %02x %02x\n", name, st[0], st[1]);
+ if (buffer[24] == 0x08 && buffer[25] == 0x00 && buffer[26] == 0x45) {
+ printk(KERN_DEBUG "%s: length: %d protocol: %d from: %d.%d.%d.%d:%d "
+ "to %d.%d.%d.%d:%d\n", name, buffer[28] << 8 | buffer[29],
+ buffer[35], buffer[38], buffer[39], buffer[40], buffer[41],
+ buffer[46] << 8 | buffer[47],
+ buffer[42], buffer[43], buffer[44], buffer[45],
+ buffer[48] << 8 | buffer[49]);
+ } else {
+ for (i = 0, k = 0; i < (size + 7) / 8; i++) {
+ printk(KERN_DEBUG "%s: %s", name, i ? " " : "buffer:");
+ for (j = 0; j < 8 && k < size; j++, k++)
+ printk(" %02x", buffer[k]);
+ printk("\n");
+ }
+ }
+ return;
+}
+
+/*
+ * SB1000 commands for frame rx interrupt
+ */
+/* receive a single frame and assemble datagram
+ * (this is the heart of the interrupt routine)
+ */
+static inline int
+sb1000_rx(struct net_device *dev)
+{
+
+#define FRAMESIZE 184
+ unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id;
+ short dlen;
+ int ioaddr, ns;
+ unsigned int skbsize;
+ struct sk_buff *skb;
+ struct sb1000_private *lp = netdev_priv(dev);
+ struct net_device_stats *stats = &lp->stats;
+
+ /* SB1000 frame constants */
+ const int FrameSize = FRAMESIZE;
+ const int NewDatagramHeaderSkip = 8;
+ const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18;
+ const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize;
+ const int ContDatagramHeaderSkip = 7;
+ const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1;
+ const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize;
+ const int TrailerSize = 4;
+
+ ioaddr = dev->base_addr;
+
+ insw(ioaddr, (unsigned short*) st, 1);
+#ifdef XXXDEBUG
+printk("cm0: received: %02x %02x\n", st[0], st[1]);
+#endif /* XXXDEBUG */
+ lp->rx_frames++;
+
+ /* decide if it is a good or bad frame */
+ for (ns = 0; ns < NPIDS; ns++) {
+ session_id = lp->rx_session_id[ns];
+ frame_id = lp->rx_frame_id[ns];
+ if (st[0] == session_id) {
+ if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) {
+ goto good_frame;
+ } else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) {
+ goto skipped_frame;
+ } else {
+ goto bad_frame;
+ }
+ } else if (st[0] == (session_id | 0x40)) {
+ if ((st[1] & 0xf0) == 0x30) {
+ goto skipped_frame;
+ } else {
+ goto bad_frame;
+ }
+ }
+ }
+ goto bad_frame;
+
+skipped_frame:
+ stats->rx_frame_errors++;
+ skb = lp->rx_skb[ns];
+ if (sb1000_debug > 1)
+ printk(KERN_WARNING "%s: missing frame(s): got %02x %02x "
+ "expecting %02x %02x\n", dev->name, st[0], st[1],
+ skb ? session_id : session_id | 0x40, frame_id);
+ if (skb) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+
+good_frame:
+ lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f);
+ /* new datagram */
+ if (st[0] & 0x40) {
+ /* get data length */
+ insw(ioaddr, buffer, NewDatagramHeaderSize / 2);
+#ifdef XXXDEBUG
+printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]);
+#endif /* XXXDEBUG */
+ if (buffer[0] != NewDatagramHeaderSkip) {
+ if (sb1000_debug > 1)
+ printk(KERN_WARNING "%s: new datagram header skip error: "
+ "got %02x expecting %02x\n", dev->name, buffer[0],
+ NewDatagramHeaderSkip);
+ stats->rx_length_errors++;
+ insw(ioaddr, buffer, NewDatagramDataSize / 2);
+ goto bad_frame_next;
+ }
+ dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 |
+ buffer[NewDatagramHeaderSkip + 4]) - 17;
+ if (dlen > SB1000_MRU) {
+ if (sb1000_debug > 1)
+ printk(KERN_WARNING "%s: datagram length (%d) greater "
+ "than MRU (%d)\n", dev->name, dlen, SB1000_MRU);
+ stats->rx_length_errors++;
+ insw(ioaddr, buffer, NewDatagramDataSize / 2);
+ goto bad_frame_next;
+ }
+ lp->rx_dlen[ns] = dlen;
+ /* compute size to allocate for datagram */
+ skbsize = dlen + FrameSize;
+ if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
+ if (sb1000_debug > 1)
+ printk(KERN_WARNING "%s: can't allocate %d bytes long "
+ "skbuff\n", dev->name, skbsize);
+ stats->rx_dropped++;
+ insw(ioaddr, buffer, NewDatagramDataSize / 2);
+ goto dropped_frame;
+ }
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
+ insw(ioaddr, skb_put(skb, NewDatagramDataSize),
+ NewDatagramDataSize / 2);
+ lp->rx_skb[ns] = skb;
+ } else {
+ /* continuation of previous datagram */
+ insw(ioaddr, buffer, ContDatagramHeaderSize / 2);
+ if (buffer[0] != ContDatagramHeaderSkip) {
+ if (sb1000_debug > 1)
+ printk(KERN_WARNING "%s: cont datagram header skip error: "
+ "got %02x expecting %02x\n", dev->name, buffer[0],
+ ContDatagramHeaderSkip);
+ stats->rx_length_errors++;
+ insw(ioaddr, buffer, ContDatagramDataSize / 2);
+ goto bad_frame_next;
+ }
+ skb = lp->rx_skb[ns];
+ insw(ioaddr, skb_put(skb, ContDatagramDataSize),
+ ContDatagramDataSize / 2);
+ dlen = lp->rx_dlen[ns];
+ }
+ if (skb->len < dlen + TrailerSize) {
+ lp->rx_session_id[ns] &= ~0x40;
+ return 0;
+ }
+
+ /* datagram completed: send to upper level */
+ skb_trim(skb, dlen);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ stats->rx_bytes+=dlen;
+ stats->rx_packets++;
+ lp->rx_skb[ns] = NULL;
+ lp->rx_session_id[ns] |= 0x40;
+ return 0;
+
+bad_frame:
+ insw(ioaddr, buffer, FrameSize / 2);
+ if (sb1000_debug > 1)
+ printk(KERN_WARNING "%s: frame error: got %02x %02x\n",
+ dev->name, st[0], st[1]);
+ stats->rx_frame_errors++;
+bad_frame_next:
+ if (sb1000_debug > 2)
+ sb1000_print_status_buffer(dev->name, st, buffer, FrameSize);
+dropped_frame:
+ stats->rx_errors++;
+ if (ns < NPIDS) {
+ if ((skb = lp->rx_skb[ns])) {
+ dev_kfree_skb(skb);
+ lp->rx_skb[ns] = NULL;
+ }
+ lp->rx_session_id[ns] |= 0x40;
+ }
+ return -1;
+}
+
+static inline void
+sb1000_error_dpc(struct net_device *dev)
+{
+ char *name;
+ unsigned char st[5];
+ int ioaddr[2];
+ struct sb1000_private *lp = netdev_priv(dev);
+ const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
+ const int ErrorDpcCounterInitialize = 200;
+
+ ioaddr[0] = dev->base_addr;
+ /* mem_start holds the second I/O address */
+ ioaddr[1] = dev->mem_start;
+ name = dev->name;
+
+ sb1000_wait_for_ready_clear(ioaddr, name);
+ sb1000_send_command(ioaddr, name, Command0);
+ sb1000_wait_for_ready(ioaddr, name);
+ sb1000_read_status(ioaddr, st);
+ if (st[1] & 0x10)
+ lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
+ return;
+}
+
+
+/*
+ * Linux interface functions
+ */
+static int
+sb1000_open(struct net_device *dev)
+{
+ char *name;
+ int ioaddr[2], status;
+ struct sb1000_private *lp = netdev_priv(dev);
+ const unsigned short FirmwareVersion[] = {0x01, 0x01};
+
+ ioaddr[0] = dev->base_addr;
+ /* mem_start holds the second I/O address */
+ ioaddr[1] = dev->mem_start;
+ name = dev->name;
+
+ /* initialize sb1000 */
+ if ((status = sb1000_reset(ioaddr, name)))
+ return status;
+ nicedelay(200000);
+ if ((status = sb1000_check_CRC(ioaddr, name)))
+ return status;
+
+ /* initialize private data before board can catch interrupts */
+ lp->rx_skb[0] = NULL;
+ lp->rx_skb[1] = NULL;
+ lp->rx_skb[2] = NULL;
+ lp->rx_skb[3] = NULL;
+ lp->rx_dlen[0] = 0;
+ lp->rx_dlen[1] = 0;
+ lp->rx_dlen[2] = 0;
+ lp->rx_dlen[3] = 0;
+ lp->rx_frames = 0;
+ lp->rx_error_count = 0;
+ lp->rx_error_dpc_count = 0;
+ lp->rx_session_id[0] = 0x50;
+ lp->rx_session_id[0] = 0x48;
+ lp->rx_session_id[0] = 0x44;
+ lp->rx_session_id[0] = 0x42;
+ lp->rx_frame_id[0] = 0;
+ lp->rx_frame_id[1] = 0;
+ lp->rx_frame_id[2] = 0;
+ lp->rx_frame_id[3] = 0;
+ if (request_irq(dev->irq, &sb1000_interrupt, 0, "sb1000", dev)) {
+ return -EAGAIN;
+ }
+
+ if (sb1000_debug > 2)
+ printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq);
+
+ /* Activate board and check firmware version */
+ udelay(1000);
+ if ((status = sb1000_activate(ioaddr, name)))
+ return status;
+ udelay(0);
+ if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0)))
+ return status;
+ if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1])
+ printk(KERN_WARNING "%s: found firmware version %x.%02x "
+ "(should be %x.%02x)\n", name, version[0], version[1],
+ FirmwareVersion[0], FirmwareVersion[1]);
+
+
+ netif_start_queue(dev);
+ return 0; /* Always succeed */
+}
+
+static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ char* name;
+ unsigned char version[2];
+ short PID[4];
+ int ioaddr[2], status, frequency;
+ unsigned int stats[5];
+ struct sb1000_private *lp = netdev_priv(dev);
+
+ if (!(dev && dev->flags & IFF_UP))
+ return -ENODEV;
+
+ ioaddr[0] = dev->base_addr;
+ /* mem_start holds the second I/O address */
+ ioaddr[1] = dev->mem_start;
+ name = dev->name;
+
+ switch (cmd) {
+ case SIOCGCMSTATS: /* get statistics */
+ stats[0] = lp->stats.rx_bytes;
+ stats[1] = lp->rx_frames;
+ stats[2] = lp->stats.rx_packets;
+ stats[3] = lp->stats.rx_errors;
+ stats[4] = lp->stats.rx_dropped;
+ if(copy_to_user(ifr->ifr_data, stats, sizeof(stats)))
+ return -EFAULT;
+ status = 0;
+ break;
+
+ case SIOCGCMFIRMWARE: /* get firmware version */
+ if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1)))
+ return status;
+ if(copy_to_user(ifr->ifr_data, version, sizeof(version)))
+ return -EFAULT;
+ break;
+
+ case SIOCGCMFREQUENCY: /* get frequency */
+ if ((status = sb1000_get_frequency(ioaddr, name, &frequency)))
+ return status;
+ if(put_user(frequency, (int __user *) ifr->ifr_data))
+ return -EFAULT;
+ break;
+
+ case SIOCSCMFREQUENCY: /* set frequency */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if(get_user(frequency, (int __user *) ifr->ifr_data))
+ return -EFAULT;
+ if ((status = sb1000_set_frequency(ioaddr, name, frequency)))
+ return status;
+ break;
+
+ case SIOCGCMPIDS: /* get PIDs */
+ if ((status = sb1000_get_PIDs(ioaddr, name, PID)))
+ return status;
+ if(copy_to_user(ifr->ifr_data, PID, sizeof(PID)))
+ return -EFAULT;
+ break;
+
+ case SIOCSCMPIDS: /* set PIDs */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if(copy_from_user(PID, ifr->ifr_data, sizeof(PID)))
+ return -EFAULT;
+ if ((status = sb1000_set_PIDs(ioaddr, name, PID)))
+ return status;
+ /* set session_id, frame_id and pkt_type too */
+ lp->rx_session_id[0] = 0x50 | (PID[0] & 0x0f);
+ lp->rx_session_id[1] = 0x48;
+ lp->rx_session_id[2] = 0x44;
+ lp->rx_session_id[3] = 0x42;
+ lp->rx_frame_id[0] = 0;
+ lp->rx_frame_id[1] = 0;
+ lp->rx_frame_id[2] = 0;
+ lp->rx_frame_id[3] = 0;
+ break;
+
+ default:
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+
+/* transmit function: do nothing since SB1000 can't send anything out */
+static int
+sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name);
+ /* sb1000 can't xmit datagrams */
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* SB1000 interrupt handler. */
+static irqreturn_t sb1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ char *name;
+ unsigned char st;
+ int ioaddr[2];
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct sb1000_private *lp = netdev_priv(dev);
+
+ const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
+ const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
+ const int MaxRxErrorCount = 6;
+
+ if (dev == NULL) {
+ printk(KERN_ERR "sb1000_interrupt(): irq %d for unknown device.\n",
+ irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr[0] = dev->base_addr;
+ /* mem_start holds the second I/O address */
+ ioaddr[1] = dev->mem_start;
+ name = dev->name;
+
+ /* is it a good interrupt? */
+ st = inb(ioaddr[1] + 6);
+ if (!(st & 0x08 && st & 0x20)) {
+ return IRQ_NONE;
+ }
+
+ if (sb1000_debug > 3)
+ printk(KERN_DEBUG "%s: entering interrupt\n", dev->name);
+
+ st = inb(ioaddr[0] + 7);
+ if (sb1000_rx(dev))
+ lp->rx_error_count++;
+#ifdef SB1000_DELAY
+ udelay(SB1000_DELAY);
+#endif /* SB1000_DELAY */
+ sb1000_issue_read_command(ioaddr, name);
+ if (st & 0x01) {
+ sb1000_error_dpc(dev);
+ sb1000_issue_read_command(ioaddr, name);
+ }
+ if (lp->rx_error_dpc_count && !(--lp->rx_error_dpc_count)) {
+ sb1000_wait_for_ready_clear(ioaddr, name);
+ sb1000_send_command(ioaddr, name, Command0);
+ sb1000_wait_for_ready(ioaddr, name);
+ sb1000_issue_read_command(ioaddr, name);
+ }
+ if (lp->rx_error_count >= MaxRxErrorCount) {
+ sb1000_wait_for_ready_clear(ioaddr, name);
+ sb1000_send_command(ioaddr, name, Command1);
+ sb1000_wait_for_ready(ioaddr, name);
+ sb1000_issue_read_command(ioaddr, name);
+ lp->rx_error_count = 0;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct net_device_stats *sb1000_stats(struct net_device *dev)
+{
+ struct sb1000_private *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+static int sb1000_close(struct net_device *dev)
+{
+ int i;
+ int ioaddr[2];
+ struct sb1000_private *lp = netdev_priv(dev);
+
+ if (sb1000_debug > 2)
+ printk(KERN_DEBUG "%s: Shutting down sb1000.\n", dev->name);
+
+ netif_stop_queue(dev);
+
+ ioaddr[0] = dev->base_addr;
+ /* mem_start holds the second I/O address */
+ ioaddr[1] = dev->mem_start;
+
+ free_irq(dev->irq, dev);
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(ioaddr[1], SB1000_IO_EXTENT);
+ release_region(ioaddr[0], SB1000_IO_EXTENT);
+
+ /* free rx_skb's if needed */
+ for (i=0; i<4; i++) {
+ if (lp->rx_skb[i]) {
+ dev_kfree_skb(lp->rx_skb[i]);
+ }
+ }
+ return 0;
+}
+
+MODULE_AUTHOR("Franco Venturi <fventuri@mediaone.net>");
+MODULE_DESCRIPTION("General Instruments SB1000 driver");
+MODULE_LICENSE("GPL");
+
+static int __init
+sb1000_init(void)
+{
+ return pnp_register_driver(&sb1000_driver);
+}
+
+static void __exit
+sb1000_exit(void)
+{
+ pnp_unregister_driver(&sb1000_driver);
+}
+
+module_init(sb1000_init);
+module_exit(sb1000_exit);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
new file mode 100644
index 000000000000..fd2e7c374906
--- /dev/null
+++ b/drivers/net/sb1250-mac.c
@@ -0,0 +1,2920 @@
+/*
+ * Copyright (C) 2001,2002,2003 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *
+ * This driver is designed for the Broadcom SiByte SOC built-in
+ * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/config.h>
+#include <linux/bitops.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/cache.h>
+
+/* This is only here until the firmware is ready. In that case,
+ the firmware leaves the ethernet address in the register for us. */
+#ifdef CONFIG_SIBYTE_STANDALONE
+#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
+#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
+#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
+#endif
+
+
+/* These identify the driver base version and may not be removed. */
+#if 0
+static char version1[] __devinitdata =
+"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
+#endif
+
+
+/* Operational parameters that usually are not changed. */
+
+#define CONFIG_SBMAC_COALESCE
+
+#define MAX_UNITS 3 /* More are supported, limit only on options */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+
+MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
+MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
+
+/* A few user-configurable values which may be modified when a driver
+ module is loaded. */
+
+/* 1 normal messages, 0 quiet .. 7 verbose. */
+static int debug = 1;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug messages");
+
+/* mii status msgs */
+static int noisy_mii = 1;
+module_param(noisy_mii, int, S_IRUGO);
+MODULE_PARM_DESC(noisy_mii, "MII status messages");
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+*/
+#ifdef MODULE
+static int options[MAX_UNITS] = {-1, -1, -1};
+module_param_array(options, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS));
+
+static int full_duplex[MAX_UNITS] = {-1, -1, -1};
+module_param_array(full_duplex, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS));
+#endif
+
+#ifdef CONFIG_SBMAC_COALESCE
+static int int_pktcnt = 0;
+module_param(int_pktcnt, int, S_IRUGO);
+MODULE_PARM_DESC(int_pktcnt, "Packet count");
+
+static int int_timeout = 0;
+module_param(int_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(int_timeout, "Timeout value");
+#endif
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_defs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_mac.h>
+#include <asm/sibyte/sb1250_dma.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+
+/**********************************************************************
+ * Simple types
+ ********************************************************************* */
+
+
+typedef unsigned long sbmac_port_t;
+
+typedef enum { sbmac_speed_auto, sbmac_speed_10,
+ sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
+
+typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
+ sbmac_duplex_full } sbmac_duplex_t;
+
+typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
+ sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
+
+typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
+ sbmac_state_broken } sbmac_state_t;
+
+
+/**********************************************************************
+ * Macros
+ ********************************************************************* */
+
+
+#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
+ (d)->sbdma_dscrtable : (d)->f+1)
+
+
+#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
+
+#define SBMAC_READCSR(t) __raw_readq((unsigned long)t)
+#define SBMAC_WRITECSR(t,v) __raw_writeq(v, (unsigned long)t)
+
+
+#define SBMAC_MAX_TXDESCR 32
+#define SBMAC_MAX_RXDESCR 32
+
+#define ETHER_ALIGN 2
+#define ETHER_ADDR_LEN 6
+#define ENET_PACKET_SIZE 1518
+/*#define ENET_PACKET_SIZE 9216 */
+
+/**********************************************************************
+ * DMA Descriptor structure
+ ********************************************************************* */
+
+typedef struct sbdmadscr_s {
+ uint64_t dscr_a;
+ uint64_t dscr_b;
+} sbdmadscr_t;
+
+typedef unsigned long paddr_t;
+
+/**********************************************************************
+ * DMA Controller structure
+ ********************************************************************* */
+
+typedef struct sbmacdma_s {
+
+ /*
+ * This stuff is used to identify the channel and the registers
+ * associated with it.
+ */
+
+ struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
+ int sbdma_channel; /* channel number */
+ int sbdma_txdir; /* direction (1=transmit) */
+ int sbdma_maxdescr; /* total # of descriptors in ring */
+#ifdef CONFIG_SBMAC_COALESCE
+ int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/
+ int sbdma_int_timeout; /* # usec rx/tx interrupt */
+#endif
+
+ sbmac_port_t sbdma_config0; /* DMA config register 0 */
+ sbmac_port_t sbdma_config1; /* DMA config register 1 */
+ sbmac_port_t sbdma_dscrbase; /* Descriptor base address */
+ sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */
+ sbmac_port_t sbdma_curdscr; /* current descriptor address */
+
+ /*
+ * This stuff is for maintenance of the ring
+ */
+
+ sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
+ sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */
+
+ struct sk_buff **sbdma_ctxtable; /* context table, one per descr */
+
+ paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
+ sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */
+ sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */
+} sbmacdma_t;
+
+
+/**********************************************************************
+ * Ethernet softc structure
+ ********************************************************************* */
+
+struct sbmac_softc {
+
+ /*
+ * Linux-specific things
+ */
+
+ struct net_device *sbm_dev; /* pointer to linux device */
+ spinlock_t sbm_lock; /* spin lock */
+ struct timer_list sbm_timer; /* for monitoring MII */
+ struct net_device_stats sbm_stats;
+ int sbm_devflags; /* current device flags */
+
+ int sbm_phy_oldbmsr;
+ int sbm_phy_oldanlpar;
+ int sbm_phy_oldk1stsr;
+ int sbm_phy_oldlinkstat;
+ int sbm_buffersize;
+
+ unsigned char sbm_phys[2];
+
+ /*
+ * Controller-specific things
+ */
+
+ unsigned long sbm_base; /* MAC's base address */
+ sbmac_state_t sbm_state; /* current state */
+
+ sbmac_port_t sbm_macenable; /* MAC Enable Register */
+ sbmac_port_t sbm_maccfg; /* MAC Configuration Register */
+ sbmac_port_t sbm_fifocfg; /* FIFO configuration register */
+ sbmac_port_t sbm_framecfg; /* Frame configuration register */
+ sbmac_port_t sbm_rxfilter; /* receive filter register */
+ sbmac_port_t sbm_isr; /* Interrupt status register */
+ sbmac_port_t sbm_imr; /* Interrupt mask register */
+ sbmac_port_t sbm_mdio; /* MDIO register */
+
+ sbmac_speed_t sbm_speed; /* current speed */
+ sbmac_duplex_t sbm_duplex; /* current duplex */
+ sbmac_fc_t sbm_fc; /* current flow control setting */
+
+ unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
+
+ sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
+ sbmacdma_t sbm_rxdma;
+ int rx_hw_checksum;
+ int sbe_idx;
+};
+
+
+/**********************************************************************
+ * Externs
+ ********************************************************************* */
+
+/**********************************************************************
+ * Prototypes
+ ********************************************************************* */
+
+static void sbdma_initctx(sbmacdma_t *d,
+ struct sbmac_softc *s,
+ int chan,
+ int txrx,
+ int maxdescr);
+static void sbdma_channel_start(sbmacdma_t *d, int rxtx);
+static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m);
+static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m);
+static void sbdma_emptyring(sbmacdma_t *d);
+static void sbdma_fillring(sbmacdma_t *d);
+static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d);
+static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d);
+static int sbmac_initctx(struct sbmac_softc *s);
+static void sbmac_channel_start(struct sbmac_softc *s);
+static void sbmac_channel_stop(struct sbmac_softc *s);
+static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,sbmac_state_t);
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff);
+static uint64_t sbmac_addr2reg(unsigned char *ptr);
+static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs);
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
+static void sbmac_setmulti(struct sbmac_softc *sc);
+static int sbmac_init(struct net_device *dev, int idx);
+static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed);
+static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc);
+
+static int sbmac_open(struct net_device *dev);
+static void sbmac_timer(unsigned long data);
+static void sbmac_tx_timeout (struct net_device *dev);
+static struct net_device_stats *sbmac_get_stats(struct net_device *dev);
+static void sbmac_set_rx_mode(struct net_device *dev);
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int sbmac_close(struct net_device *dev);
+static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
+
+static void sbmac_mii_sync(struct sbmac_softc *s);
+static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt);
+static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx);
+static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
+ unsigned int regval);
+
+
+/**********************************************************************
+ * Globals
+ ********************************************************************* */
+
+static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
+
+
+/**********************************************************************
+ * MDIO constants
+ ********************************************************************* */
+
+#define MII_COMMAND_START 0x01
+#define MII_COMMAND_READ 0x02
+#define MII_COMMAND_WRITE 0x01
+#define MII_COMMAND_ACK 0x02
+
+#define BMCR_RESET 0x8000
+#define BMCR_LOOPBACK 0x4000
+#define BMCR_SPEED0 0x2000
+#define BMCR_ANENABLE 0x1000
+#define BMCR_POWERDOWN 0x0800
+#define BMCR_ISOLATE 0x0400
+#define BMCR_RESTARTAN 0x0200
+#define BMCR_DUPLEX 0x0100
+#define BMCR_COLTEST 0x0080
+#define BMCR_SPEED1 0x0040
+#define BMCR_SPEED1000 BMCR_SPEED1
+#define BMCR_SPEED100 BMCR_SPEED0
+#define BMCR_SPEED10 0
+
+#define BMSR_100BT4 0x8000
+#define BMSR_100BT_FDX 0x4000
+#define BMSR_100BT_HDX 0x2000
+#define BMSR_10BT_FDX 0x1000
+#define BMSR_10BT_HDX 0x0800
+#define BMSR_100BT2_FDX 0x0400
+#define BMSR_100BT2_HDX 0x0200
+#define BMSR_1000BT_XSR 0x0100
+#define BMSR_PRESUP 0x0040
+#define BMSR_ANCOMPLT 0x0020
+#define BMSR_REMFAULT 0x0010
+#define BMSR_AUTONEG 0x0008
+#define BMSR_LINKSTAT 0x0004
+#define BMSR_JABDETECT 0x0002
+#define BMSR_EXTCAPAB 0x0001
+
+#define PHYIDR1 0x2000
+#define PHYIDR2 0x5C60
+
+#define ANAR_NP 0x8000
+#define ANAR_RF 0x2000
+#define ANAR_ASYPAUSE 0x0800
+#define ANAR_PAUSE 0x0400
+#define ANAR_T4 0x0200
+#define ANAR_TXFD 0x0100
+#define ANAR_TXHD 0x0080
+#define ANAR_10FD 0x0040
+#define ANAR_10HD 0x0020
+#define ANAR_PSB 0x0001
+
+#define ANLPAR_NP 0x8000
+#define ANLPAR_ACK 0x4000
+#define ANLPAR_RF 0x2000
+#define ANLPAR_ASYPAUSE 0x0800
+#define ANLPAR_PAUSE 0x0400
+#define ANLPAR_T4 0x0200
+#define ANLPAR_TXFD 0x0100
+#define ANLPAR_TXHD 0x0080
+#define ANLPAR_10FD 0x0040
+#define ANLPAR_10HD 0x0020
+#define ANLPAR_PSB 0x0001 /* 802.3 */
+
+#define ANER_PDF 0x0010
+#define ANER_LPNPABLE 0x0008
+#define ANER_NPABLE 0x0004
+#define ANER_PAGERX 0x0002
+#define ANER_LPANABLE 0x0001
+
+#define ANNPTR_NP 0x8000
+#define ANNPTR_MP 0x2000
+#define ANNPTR_ACK2 0x1000
+#define ANNPTR_TOGTX 0x0800
+#define ANNPTR_CODE 0x0008
+
+#define ANNPRR_NP 0x8000
+#define ANNPRR_MP 0x2000
+#define ANNPRR_ACK3 0x1000
+#define ANNPRR_TOGTX 0x0800
+#define ANNPRR_CODE 0x0008
+
+#define K1TCR_TESTMODE 0x0000
+#define K1TCR_MSMCE 0x1000
+#define K1TCR_MSCV 0x0800
+#define K1TCR_RPTR 0x0400
+#define K1TCR_1000BT_FDX 0x200
+#define K1TCR_1000BT_HDX 0x100
+
+#define K1STSR_MSMCFLT 0x8000
+#define K1STSR_MSCFGRES 0x4000
+#define K1STSR_LRSTAT 0x2000
+#define K1STSR_RRSTAT 0x1000
+#define K1STSR_LP1KFD 0x0800
+#define K1STSR_LP1KHD 0x0400
+#define K1STSR_LPASMDIR 0x0200
+
+#define K1SCR_1KX_FDX 0x8000
+#define K1SCR_1KX_HDX 0x4000
+#define K1SCR_1KT_FDX 0x2000
+#define K1SCR_1KT_HDX 0x1000
+
+#define STRAP_PHY1 0x0800
+#define STRAP_NCMODE 0x0400
+#define STRAP_MANMSCFG 0x0200
+#define STRAP_ANENABLE 0x0100
+#define STRAP_MSVAL 0x0080
+#define STRAP_1KHDXADV 0x0010
+#define STRAP_1KFDXADV 0x0008
+#define STRAP_100ADV 0x0004
+#define STRAP_SPEEDSEL 0x0000
+#define STRAP_SPEED100 0x0001
+
+#define PHYSUP_SPEED1000 0x10
+#define PHYSUP_SPEED100 0x08
+#define PHYSUP_SPEED10 0x00
+#define PHYSUP_LINKUP 0x04
+#define PHYSUP_FDX 0x02
+
+#define MII_BMCR 0x00 /* Basic mode control register (rw) */
+#define MII_BMSR 0x01 /* Basic mode status register (ro) */
+#define MII_K1STSR 0x0A /* 1K Status Register (ro) */
+#define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */
+
+
+#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
+
+#define ENABLE 1
+#define DISABLE 0
+
+/**********************************************************************
+ * SBMAC_MII_SYNC(s)
+ *
+ * Synchronize with the MII - send a pattern of bits to the MII
+ * that will guarantee that it is ready to accept a command.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_mii_sync(struct sbmac_softc *s)
+{
+ int cnt;
+ uint64_t bits;
+ int mac_mdio_genc;
+
+ mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
+
+ bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
+
+ SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+
+ for (cnt = 0; cnt < 32; cnt++) {
+ SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc);
+ SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+ }
+}
+
+/**********************************************************************
+ * SBMAC_MII_SENDDATA(s,data,bitcnt)
+ *
+ * Send some bits to the MII. The bits to be sent are right-
+ * justified in the 'data' parameter.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ * data - data to send
+ * bitcnt - number of bits to send
+ ********************************************************************* */
+
+static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt)
+{
+ int i;
+ uint64_t bits;
+ unsigned int curmask;
+ int mac_mdio_genc;
+
+ mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
+
+ bits = M_MAC_MDIO_DIR_OUTPUT;
+ SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+
+ curmask = 1 << (bitcnt - 1);
+
+ for (i = 0; i < bitcnt; i++) {
+ if (data & curmask)
+ bits |= M_MAC_MDIO_OUT;
+ else bits &= ~M_MAC_MDIO_OUT;
+ SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+ SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc);
+ SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+ curmask >>= 1;
+ }
+}
+
+
+
+/**********************************************************************
+ * SBMAC_MII_READ(s,phyaddr,regidx)
+ *
+ * Read a PHY register.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ * phyaddr - PHY's address
+ * regidx = index of register to read
+ *
+ * Return value:
+ * value read, or 0 if an error occurred.
+ ********************************************************************* */
+
+static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
+{
+ int idx;
+ int error;
+ int regval;
+ int mac_mdio_genc;
+
+ /*
+ * Synchronize ourselves so that the PHY knows the next
+ * thing coming down is a command
+ */
+
+ sbmac_mii_sync(s);
+
+ /*
+ * Send the data to the PHY. The sequence is
+ * a "start" command (2 bits)
+ * a "read" command (2 bits)
+ * the PHY addr (5 bits)
+ * the register index (5 bits)
+ */
+
+ sbmac_mii_senddata(s,MII_COMMAND_START, 2);
+ sbmac_mii_senddata(s,MII_COMMAND_READ, 2);
+ sbmac_mii_senddata(s,phyaddr, 5);
+ sbmac_mii_senddata(s,regidx, 5);
+
+ mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
+
+ /*
+ * Switch the port around without a clock transition.
+ */
+ SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
+
+ /*
+ * Send out a clock pulse to signal we want the status
+ */
+
+ SBMAC_WRITECSR(s->sbm_mdio,
+ M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc);
+ SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
+
+ /*
+ * If an error occurred, the PHY will signal '1' back
+ */
+ error = SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN;
+
+ /*
+ * Issue an 'idle' clock pulse, but keep the direction
+ * the same.
+ */
+ SBMAC_WRITECSR(s->sbm_mdio,
+ M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc);
+ SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
+
+ regval = 0;
+
+ for (idx = 0; idx < 16; idx++) {
+ regval <<= 1;
+
+ if (error == 0) {
+ if (SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN)
+ regval |= 1;
+ }
+
+ SBMAC_WRITECSR(s->sbm_mdio,
+ M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc);
+ SBMAC_WRITECSR(s->sbm_mdio,
+ M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
+ }
+
+ /* Switch back to output */
+ SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc);
+
+ if (error == 0)
+ return regval;
+ return 0;
+}
+
+
+/**********************************************************************
+ * SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
+ *
+ * Write a value to a PHY register.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ * phyaddr - PHY to use
+ * regidx - register within the PHY
+ * regval - data to write to register
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
+ unsigned int regval)
+{
+ int mac_mdio_genc;
+
+ sbmac_mii_sync(s);
+
+ sbmac_mii_senddata(s,MII_COMMAND_START,2);
+ sbmac_mii_senddata(s,MII_COMMAND_WRITE,2);
+ sbmac_mii_senddata(s,phyaddr, 5);
+ sbmac_mii_senddata(s,regidx, 5);
+ sbmac_mii_senddata(s,MII_COMMAND_ACK,2);
+ sbmac_mii_senddata(s,regval,16);
+
+ mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
+
+ SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc);
+}
+
+
+
+/**********************************************************************
+ * SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
+ *
+ * Initialize a DMA channel context. Since there are potentially
+ * eight DMA channels per MAC, it's nice to do this in a standard
+ * way.
+ *
+ * Input parameters:
+ * d - sbmacdma_t structure (DMA channel context)
+ * s - sbmac_softc structure (pointer to a MAC)
+ * chan - channel number (0..1 right now)
+ * txrx - Identifies DMA_TX or DMA_RX for channel direction
+ * maxdescr - number of descriptors
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_initctx(sbmacdma_t *d,
+ struct sbmac_softc *s,
+ int chan,
+ int txrx,
+ int maxdescr)
+{
+ /*
+ * Save away interesting stuff in the structure
+ */
+
+ d->sbdma_eth = s;
+ d->sbdma_channel = chan;
+ d->sbdma_txdir = txrx;
+
+#if 0
+ /* RMON clearing */
+ s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
+#endif
+
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)), 0);
+ SBMAC_WRITECSR(IOADDR(
+ A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)), 0);
+
+ /*
+ * initialize register pointers
+ */
+
+ d->sbdma_config0 =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
+ d->sbdma_config1 =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
+ d->sbdma_dscrbase =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
+ d->sbdma_dscrcnt =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
+ d->sbdma_curdscr =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
+
+ /*
+ * Allocate memory for the ring
+ */
+
+ d->sbdma_maxdescr = maxdescr;
+
+ d->sbdma_dscrtable = (sbdmadscr_t *)
+ kmalloc(d->sbdma_maxdescr*sizeof(sbdmadscr_t), GFP_KERNEL);
+
+ memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t));
+
+ d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
+
+ d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
+
+ /*
+ * And context table
+ */
+
+ d->sbdma_ctxtable = (struct sk_buff **)
+ kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL);
+
+ memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *));
+
+#ifdef CONFIG_SBMAC_COALESCE
+ /*
+ * Setup Rx/Tx DMA coalescing defaults
+ */
+
+ if ( int_pktcnt ) {
+ d->sbdma_int_pktcnt = int_pktcnt;
+ } else {
+ d->sbdma_int_pktcnt = 1;
+ }
+
+ if ( int_timeout ) {
+ d->sbdma_int_timeout = int_timeout;
+ } else {
+ d->sbdma_int_timeout = 0;
+ }
+#endif
+
+}
+
+/**********************************************************************
+ * SBDMA_CHANNEL_START(d)
+ *
+ * Initialize the hardware registers for a DMA channel.
+ *
+ * Input parameters:
+ * d - DMA channel to init (context must be previously init'd
+ * rxtx - DMA_RX or DMA_TX depending on what type of channel
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
+{
+ /*
+ * Turn on the DMA channel
+ */
+
+#ifdef CONFIG_SBMAC_COALESCE
+ SBMAC_WRITECSR(d->sbdma_config1,
+ V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
+ 0);
+ SBMAC_WRITECSR(d->sbdma_config0,
+ M_DMA_EOP_INT_EN |
+ V_DMA_RINGSZ(d->sbdma_maxdescr) |
+ V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
+ 0);
+#else
+ SBMAC_WRITECSR(d->sbdma_config1,0);
+ SBMAC_WRITECSR(d->sbdma_config0,
+ V_DMA_RINGSZ(d->sbdma_maxdescr) |
+ 0);
+#endif
+
+ SBMAC_WRITECSR(d->sbdma_dscrbase,d->sbdma_dscrtable_phys);
+
+ /*
+ * Initialize ring pointers
+ */
+
+ d->sbdma_addptr = d->sbdma_dscrtable;
+ d->sbdma_remptr = d->sbdma_dscrtable;
+}
+
+/**********************************************************************
+ * SBDMA_CHANNEL_STOP(d)
+ *
+ * Initialize the hardware registers for a DMA channel.
+ *
+ * Input parameters:
+ * d - DMA channel to init (context must be previously init'd
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_channel_stop(sbmacdma_t *d)
+{
+ /*
+ * Turn off the DMA channel
+ */
+
+ SBMAC_WRITECSR(d->sbdma_config1,0);
+
+ SBMAC_WRITECSR(d->sbdma_dscrbase,0);
+
+ SBMAC_WRITECSR(d->sbdma_config0,0);
+
+ /*
+ * Zero ring pointers
+ */
+
+ d->sbdma_addptr = 0;
+ d->sbdma_remptr = 0;
+}
+
+static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
+{
+ unsigned long addr;
+ unsigned long newaddr;
+
+ addr = (unsigned long) skb->data;
+
+ newaddr = (addr + power2 - 1) & ~(power2 - 1);
+
+ skb_reserve(skb,newaddr-addr+offset);
+}
+
+
+/**********************************************************************
+ * SBDMA_ADD_RCVBUFFER(d,sb)
+ *
+ * Add a buffer to the specified DMA channel. For receive channels,
+ * this queues a buffer for inbound packets.
+ *
+ * Input parameters:
+ * d - DMA channel descriptor
+ * sb - sk_buff to add, or NULL if we should allocate one
+ *
+ * Return value:
+ * 0 if buffer could not be added (ring is full)
+ * 1 if buffer added successfully
+ ********************************************************************* */
+
+
+static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
+{
+ sbdmadscr_t *dsc;
+ sbdmadscr_t *nextdsc;
+ struct sk_buff *sb_new = NULL;
+ int pktsize = ENET_PACKET_SIZE;
+
+ /* get pointer to our current place in the ring */
+
+ dsc = d->sbdma_addptr;
+ nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+ /*
+ * figure out if the ring is full - if the next descriptor
+ * is the same as the one that we're going to remove from
+ * the ring, the ring is full
+ */
+
+ if (nextdsc == d->sbdma_remptr) {
+ return -ENOSPC;
+ }
+
+ /*
+ * Allocate a sk_buff if we don't already have one.
+ * If we do have an sk_buff, reset it so that it's empty.
+ *
+ * Note: sk_buffs don't seem to be guaranteed to have any sort
+ * of alignment when they are allocated. Therefore, allocate enough
+ * extra space to make sure that:
+ *
+ * 1. the data does not start in the middle of a cache line.
+ * 2. The data does not end in the middle of a cache line
+ * 3. The buffer can be aligned such that the IP addresses are
+ * naturally aligned.
+ *
+ * Remember, the SOCs MAC writes whole cache lines at a time,
+ * without reading the old contents first. So, if the sk_buff's
+ * data portion starts in the middle of a cache line, the SOC
+ * DMA will trash the beginning (and ending) portions.
+ */
+
+ if (sb == NULL) {
+ sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
+ if (sb_new == NULL) {
+ printk(KERN_INFO "%s: sk_buff allocation failed\n",
+ d->sbdma_eth->sbm_dev->name);
+ return -ENOBUFS;
+ }
+
+ sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
+
+ /* mark skbuff owned by our device */
+ sb_new->dev = d->sbdma_eth->sbm_dev;
+ }
+ else {
+ sb_new = sb;
+ /*
+ * nothing special to reinit buffer, it's already aligned
+ * and sb->data already points to a good place.
+ */
+ }
+
+ /*
+ * fill in the descriptor
+ */
+
+#ifdef CONFIG_SBMAC_COALESCE
+ /*
+ * Do not interrupt per DMA transfer.
+ */
+ dsc->dscr_a = virt_to_phys(sb_new->tail) |
+ V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
+ 0;
+#else
+ dsc->dscr_a = virt_to_phys(sb_new->tail) |
+ V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
+ M_DMA_DSCRA_INTERRUPT;
+#endif
+
+ /* receiving: no options */
+ dsc->dscr_b = 0;
+
+ /*
+ * fill in the context
+ */
+
+ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
+
+ /*
+ * point at next packet
+ */
+
+ d->sbdma_addptr = nextdsc;
+
+ /*
+ * Give the buffer to the DMA engine.
+ */
+
+ SBMAC_WRITECSR(d->sbdma_dscrcnt,1);
+
+ return 0; /* we did it */
+}
+
+/**********************************************************************
+ * SBDMA_ADD_TXBUFFER(d,sb)
+ *
+ * Add a transmit buffer to the specified DMA channel, causing a
+ * transmit to start.
+ *
+ * Input parameters:
+ * d - DMA channel descriptor
+ * sb - sk_buff to add
+ *
+ * Return value:
+ * 0 transmit queued successfully
+ * otherwise error code
+ ********************************************************************* */
+
+
+static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
+{
+ sbdmadscr_t *dsc;
+ sbdmadscr_t *nextdsc;
+ uint64_t phys;
+ uint64_t ncb;
+ int length;
+
+ /* get pointer to our current place in the ring */
+
+ dsc = d->sbdma_addptr;
+ nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+ /*
+ * figure out if the ring is full - if the next descriptor
+ * is the same as the one that we're going to remove from
+ * the ring, the ring is full
+ */
+
+ if (nextdsc == d->sbdma_remptr) {
+ return -ENOSPC;
+ }
+
+ /*
+ * Under Linux, it's not necessary to copy/coalesce buffers
+ * like it is on NetBSD. We think they're all contiguous,
+ * but that may not be true for GBE.
+ */
+
+ length = sb->len;
+
+ /*
+ * fill in the descriptor. Note that the number of cache
+ * blocks in the descriptor is the number of blocks
+ * *spanned*, so we need to add in the offset (if any)
+ * while doing the calculation.
+ */
+
+ phys = virt_to_phys(sb->data);
+ ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
+
+ dsc->dscr_a = phys |
+ V_DMA_DSCRA_A_SIZE(ncb) |
+#ifndef CONFIG_SBMAC_COALESCE
+ M_DMA_DSCRA_INTERRUPT |
+#endif
+ M_DMA_ETHTX_SOP;
+
+ /* transmitting: set outbound options and length */
+
+ dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
+ V_DMA_DSCRB_PKT_SIZE(length);
+
+ /*
+ * fill in the context
+ */
+
+ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
+
+ /*
+ * point at next packet
+ */
+
+ d->sbdma_addptr = nextdsc;
+
+ /*
+ * Give the buffer to the DMA engine.
+ */
+
+ SBMAC_WRITECSR(d->sbdma_dscrcnt,1);
+
+ return 0; /* we did it */
+}
+
+
+
+
+/**********************************************************************
+ * SBDMA_EMPTYRING(d)
+ *
+ * Free all allocated sk_buffs on the specified DMA channel;
+ *
+ * Input parameters:
+ * d - DMA channel
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_emptyring(sbmacdma_t *d)
+{
+ int idx;
+ struct sk_buff *sb;
+
+ for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
+ sb = d->sbdma_ctxtable[idx];
+ if (sb) {
+ dev_kfree_skb(sb);
+ d->sbdma_ctxtable[idx] = NULL;
+ }
+ }
+}
+
+
+/**********************************************************************
+ * SBDMA_FILLRING(d)
+ *
+ * Fill the specified DMA channel (must be receive channel)
+ * with sk_buffs
+ *
+ * Input parameters:
+ * d - DMA channel
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_fillring(sbmacdma_t *d)
+{
+ int idx;
+
+ for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
+ if (sbdma_add_rcvbuffer(d,NULL) != 0)
+ break;
+ }
+}
+
+
+/**********************************************************************
+ * SBDMA_RX_PROCESS(sc,d)
+ *
+ * Process "completed" receive buffers on the specified DMA channel.
+ * Note that this isn't really ideal for priority channels, since
+ * it processes all of the packets on a given channel before
+ * returning.
+ *
+ * Input parameters:
+ * sc - softc structure
+ * d - DMA channel context
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
+{
+ int curidx;
+ int hwidx;
+ sbdmadscr_t *dsc;
+ struct sk_buff *sb;
+ int len;
+
+ for (;;) {
+ /*
+ * figure out where we are (as an index) and where
+ * the hardware is (also as an index)
+ *
+ * This could be done faster if (for example) the
+ * descriptor table was page-aligned and contiguous in
+ * both virtual and physical memory -- you could then
+ * just compare the low-order bits of the virtual address
+ * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+ */
+
+ curidx = d->sbdma_remptr - d->sbdma_dscrtable;
+ hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+ d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
+
+ /*
+ * If they're the same, that means we've processed all
+ * of the descriptors up to (but not including) the one that
+ * the hardware is working on right now.
+ */
+
+ if (curidx == hwidx)
+ break;
+
+ /*
+ * Otherwise, get the packet's sk_buff ptr back
+ */
+
+ dsc = &(d->sbdma_dscrtable[curidx]);
+ sb = d->sbdma_ctxtable[curidx];
+ d->sbdma_ctxtable[curidx] = NULL;
+
+ len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
+
+ /*
+ * Check packet status. If good, process it.
+ * If not, silently drop it and put it back on the
+ * receive ring.
+ */
+
+ if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) {
+
+ /*
+ * Add a new buffer to replace the old one. If we fail
+ * to allocate a buffer, we're going to drop this
+ * packet and put it right back on the receive ring.
+ */
+
+ if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) {
+ sc->sbm_stats.rx_dropped++;
+ sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
+ } else {
+ /*
+ * Set length into the packet
+ */
+ skb_put(sb,len);
+
+ /*
+ * Buffer has been replaced on the
+ * receive ring. Pass the buffer to
+ * the kernel
+ */
+ sc->sbm_stats.rx_bytes += len;
+ sc->sbm_stats.rx_packets++;
+ sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
+ /* Check hw IPv4/TCP checksum if supported */
+ if (sc->rx_hw_checksum == ENABLE) {
+ if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
+ !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
+ sb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* don't need to set sb->csum */
+ } else {
+ sb->ip_summed = CHECKSUM_NONE;
+ }
+ }
+
+ netif_rx(sb);
+ }
+ } else {
+ /*
+ * Packet was mangled somehow. Just drop it and
+ * put it back on the receive ring.
+ */
+ sc->sbm_stats.rx_errors++;
+ sbdma_add_rcvbuffer(d,sb);
+ }
+
+
+ /*
+ * .. and advance to the next buffer.
+ */
+
+ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+
+ }
+}
+
+
+
+/**********************************************************************
+ * SBDMA_TX_PROCESS(sc,d)
+ *
+ * Process "completed" transmit buffers on the specified DMA channel.
+ * This is normally called within the interrupt service routine.
+ * Note that this isn't really ideal for priority channels, since
+ * it processes all of the packets on a given channel before
+ * returning.
+ *
+ * Input parameters:
+ * sc - softc structure
+ * d - DMA channel context
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
+{
+ int curidx;
+ int hwidx;
+ sbdmadscr_t *dsc;
+ struct sk_buff *sb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&(sc->sbm_lock), flags);
+
+ for (;;) {
+ /*
+ * figure out where we are (as an index) and where
+ * the hardware is (also as an index)
+ *
+ * This could be done faster if (for example) the
+ * descriptor table was page-aligned and contiguous in
+ * both virtual and physical memory -- you could then
+ * just compare the low-order bits of the virtual address
+ * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+ */
+
+ curidx = d->sbdma_remptr - d->sbdma_dscrtable;
+ hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+ d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
+
+ /*
+ * If they're the same, that means we've processed all
+ * of the descriptors up to (but not including) the one that
+ * the hardware is working on right now.
+ */
+
+ if (curidx == hwidx)
+ break;
+
+ /*
+ * Otherwise, get the packet's sk_buff ptr back
+ */
+
+ dsc = &(d->sbdma_dscrtable[curidx]);
+ sb = d->sbdma_ctxtable[curidx];
+ d->sbdma_ctxtable[curidx] = NULL;
+
+ /*
+ * Stats
+ */
+
+ sc->sbm_stats.tx_bytes += sb->len;
+ sc->sbm_stats.tx_packets++;
+
+ /*
+ * for transmits, we just free buffers.
+ */
+
+ dev_kfree_skb_irq(sb);
+
+ /*
+ * .. and advance to the next buffer.
+ */
+
+ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+
+ }
+
+ /*
+ * Decide if we should wake up the protocol or not.
+ * Other drivers seem to do this when we reach a low
+ * watermark on the transmit queue.
+ */
+
+ netif_wake_queue(d->sbdma_eth->sbm_dev);
+
+ spin_unlock_irqrestore(&(sc->sbm_lock), flags);
+
+}
+
+
+
+/**********************************************************************
+ * SBMAC_INITCTX(s)
+ *
+ * Initialize an Ethernet context structure - this is called
+ * once per MAC on the 1250. Memory is allocated here, so don't
+ * call it again from inside the ioctl routines that bring the
+ * interface up/down
+ *
+ * Input parameters:
+ * s - sbmac context structure
+ *
+ * Return value:
+ * 0
+ ********************************************************************* */
+
+static int sbmac_initctx(struct sbmac_softc *s)
+{
+
+ /*
+ * figure out the addresses of some ports
+ */
+
+ s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
+ s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
+ s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
+ s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG;
+ s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG;
+ s->sbm_isr = s->sbm_base + R_MAC_STATUS;
+ s->sbm_imr = s->sbm_base + R_MAC_INT_MASK;
+ s->sbm_mdio = s->sbm_base + R_MAC_MDIO;
+
+ s->sbm_phys[0] = 1;
+ s->sbm_phys[1] = 0;
+
+ s->sbm_phy_oldbmsr = 0;
+ s->sbm_phy_oldanlpar = 0;
+ s->sbm_phy_oldk1stsr = 0;
+ s->sbm_phy_oldlinkstat = 0;
+
+ /*
+ * Initialize the DMA channels. Right now, only one per MAC is used
+ * Note: Only do this _once_, as it allocates memory from the kernel!
+ */
+
+ sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
+ sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
+
+ /*
+ * initial state is OFF
+ */
+
+ s->sbm_state = sbmac_state_off;
+
+ /*
+ * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
+ */
+
+ s->sbm_speed = sbmac_speed_10;
+ s->sbm_duplex = sbmac_duplex_half;
+ s->sbm_fc = sbmac_fc_disabled;
+
+ return 0;
+}
+
+
+static void sbdma_uninitctx(struct sbmacdma_s *d)
+{
+ if (d->sbdma_dscrtable) {
+ kfree(d->sbdma_dscrtable);
+ d->sbdma_dscrtable = NULL;
+ }
+
+ if (d->sbdma_ctxtable) {
+ kfree(d->sbdma_ctxtable);
+ d->sbdma_ctxtable = NULL;
+ }
+}
+
+
+static void sbmac_uninitctx(struct sbmac_softc *sc)
+{
+ sbdma_uninitctx(&(sc->sbm_txdma));
+ sbdma_uninitctx(&(sc->sbm_rxdma));
+}
+
+
+/**********************************************************************
+ * SBMAC_CHANNEL_START(s)
+ *
+ * Start packet processing on this MAC.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_channel_start(struct sbmac_softc *s)
+{
+ uint64_t reg;
+ sbmac_port_t port;
+ uint64_t cfg,fifo,framecfg;
+ int idx, th_value;
+
+ /*
+ * Don't do this if running
+ */
+
+ if (s->sbm_state == sbmac_state_on)
+ return;
+
+ /*
+ * Bring the controller out of reset, but leave it off.
+ */
+
+ SBMAC_WRITECSR(s->sbm_macenable,0);
+
+ /*
+ * Ignore all received packets
+ */
+
+ SBMAC_WRITECSR(s->sbm_rxfilter,0);
+
+ /*
+ * Calculate values for various control registers.
+ */
+
+ cfg = M_MAC_RETRY_EN |
+ M_MAC_TX_HOLD_SOP_EN |
+ V_MAC_TX_PAUSE_CNT_16K |
+ M_MAC_AP_STAT_EN |
+ M_MAC_FAST_SYNC |
+ M_MAC_SS_EN |
+ 0;
+
+ /*
+ * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
+ * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
+ * Use a larger RD_THRSH for gigabit
+ */
+ if (periph_rev >= 2)
+ th_value = 64;
+ else
+ th_value = 28;
+
+ fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
+ ((s->sbm_speed == sbmac_speed_1000)
+ ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
+ V_MAC_TX_RL_THRSH(4) |
+ V_MAC_RX_PL_THRSH(4) |
+ V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
+ V_MAC_RX_PL_THRSH(4) |
+ V_MAC_RX_RL_THRSH(8) |
+ 0;
+
+ framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
+ V_MAC_MAX_FRAMESZ_DEFAULT |
+ V_MAC_BACKOFF_SEL(1);
+
+ /*
+ * Clear out the hash address map
+ */
+
+ port = s->sbm_base + R_MAC_HASH_BASE;
+ for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+ SBMAC_WRITECSR(port,0);
+ port += sizeof(uint64_t);
+ }
+
+ /*
+ * Clear out the exact-match table
+ */
+
+ port = s->sbm_base + R_MAC_ADDR_BASE;
+ for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
+ SBMAC_WRITECSR(port,0);
+ port += sizeof(uint64_t);
+ }
+
+ /*
+ * Clear out the DMA Channel mapping table registers
+ */
+
+ port = s->sbm_base + R_MAC_CHUP0_BASE;
+ for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+ SBMAC_WRITECSR(port,0);
+ port += sizeof(uint64_t);
+ }
+
+
+ port = s->sbm_base + R_MAC_CHLO0_BASE;
+ for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+ SBMAC_WRITECSR(port,0);
+ port += sizeof(uint64_t);
+ }
+
+ /*
+ * Program the hardware address. It goes into the hardware-address
+ * register as well as the first filter register.
+ */
+
+ reg = sbmac_addr2reg(s->sbm_hwaddr);
+
+ port = s->sbm_base + R_MAC_ADDR_BASE;
+ SBMAC_WRITECSR(port,reg);
+ port = s->sbm_base + R_MAC_ETHERNET_ADDR;
+
+#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
+ /*
+ * Pass1 SOCs do not receive packets addressed to the
+ * destination address in the R_MAC_ETHERNET_ADDR register.
+ * Set the value to zero.
+ */
+ SBMAC_WRITECSR(port,0);
+#else
+ SBMAC_WRITECSR(port,reg);
+#endif
+
+ /*
+ * Set the receive filter for no packets, and write values
+ * to the various config registers
+ */
+
+ SBMAC_WRITECSR(s->sbm_rxfilter,0);
+ SBMAC_WRITECSR(s->sbm_imr,0);
+ SBMAC_WRITECSR(s->sbm_framecfg,framecfg);
+ SBMAC_WRITECSR(s->sbm_fifocfg,fifo);
+ SBMAC_WRITECSR(s->sbm_maccfg,cfg);
+
+ /*
+ * Initialize DMA channels (rings should be ok now)
+ */
+
+ sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
+ sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
+
+ /*
+ * Configure the speed, duplex, and flow control
+ */
+
+ sbmac_set_speed(s,s->sbm_speed);
+ sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
+
+ /*
+ * Fill the receive ring
+ */
+
+ sbdma_fillring(&(s->sbm_rxdma));
+
+ /*
+ * Turn on the rest of the bits in the enable register
+ */
+
+ SBMAC_WRITECSR(s->sbm_macenable,
+ M_MAC_RXDMA_EN0 |
+ M_MAC_TXDMA_EN0 |
+ M_MAC_RX_ENABLE |
+ M_MAC_TX_ENABLE);
+
+
+
+
+#ifdef CONFIG_SBMAC_COALESCE
+ /*
+ * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
+ */
+ SBMAC_WRITECSR(s->sbm_imr,
+ ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+ ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0));
+#else
+ /*
+ * Accept any kind of interrupt on TX and RX DMA channel 0
+ */
+ SBMAC_WRITECSR(s->sbm_imr,
+ (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+ (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
+#endif
+
+ /*
+ * Enable receiving unicasts and broadcasts
+ */
+
+ SBMAC_WRITECSR(s->sbm_rxfilter,M_MAC_UCAST_EN | M_MAC_BCAST_EN);
+
+ /*
+ * we're running now.
+ */
+
+ s->sbm_state = sbmac_state_on;
+
+ /*
+ * Program multicast addresses
+ */
+
+ sbmac_setmulti(s);
+
+ /*
+ * If channel was in promiscuous mode before, turn that on
+ */
+
+ if (s->sbm_devflags & IFF_PROMISC) {
+ sbmac_promiscuous_mode(s,1);
+ }
+
+}
+
+
+/**********************************************************************
+ * SBMAC_CHANNEL_STOP(s)
+ *
+ * Stop packet processing on this MAC.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_channel_stop(struct sbmac_softc *s)
+{
+ /* don't do this if already stopped */
+
+ if (s->sbm_state == sbmac_state_off)
+ return;
+
+ /* don't accept any packets, disable all interrupts */
+
+ SBMAC_WRITECSR(s->sbm_rxfilter,0);
+ SBMAC_WRITECSR(s->sbm_imr,0);
+
+ /* Turn off ticker */
+
+ /* XXX */
+
+ /* turn off receiver and transmitter */
+
+ SBMAC_WRITECSR(s->sbm_macenable,0);
+
+ /* We're stopped now. */
+
+ s->sbm_state = sbmac_state_off;
+
+ /*
+ * Stop DMA channels (rings should be ok now)
+ */
+
+ sbdma_channel_stop(&(s->sbm_rxdma));
+ sbdma_channel_stop(&(s->sbm_txdma));
+
+ /* Empty the receive and transmit rings */
+
+ sbdma_emptyring(&(s->sbm_rxdma));
+ sbdma_emptyring(&(s->sbm_txdma));
+
+}
+
+/**********************************************************************
+ * SBMAC_SET_CHANNEL_STATE(state)
+ *
+ * Set the channel's state ON or OFF
+ *
+ * Input parameters:
+ * state - new state
+ *
+ * Return value:
+ * old state
+ ********************************************************************* */
+static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
+ sbmac_state_t state)
+{
+ sbmac_state_t oldstate = sc->sbm_state;
+
+ /*
+ * If same as previous state, return
+ */
+
+ if (state == oldstate) {
+ return oldstate;
+ }
+
+ /*
+ * If new state is ON, turn channel on
+ */
+
+ if (state == sbmac_state_on) {
+ sbmac_channel_start(sc);
+ }
+ else {
+ sbmac_channel_stop(sc);
+ }
+
+ /*
+ * Return previous state
+ */
+
+ return oldstate;
+}
+
+
+/**********************************************************************
+ * SBMAC_PROMISCUOUS_MODE(sc,onoff)
+ *
+ * Turn on or off promiscuous mode
+ *
+ * Input parameters:
+ * sc - softc
+ * onoff - 1 to turn on, 0 to turn off
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
+{
+ uint64_t reg;
+
+ if (sc->sbm_state != sbmac_state_on)
+ return;
+
+ if (onoff) {
+ reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg |= M_MAC_ALLPKT_EN;
+ SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+ }
+ else {
+ reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg &= ~M_MAC_ALLPKT_EN;
+ SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+ }
+}
+
+/**********************************************************************
+ * SBMAC_SETIPHDR_OFFSET(sc,onoff)
+ *
+ * Set the iphdr offset as 15 assuming ethernet encapsulation
+ *
+ * Input parameters:
+ * sc - softc
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
+{
+ uint64_t reg;
+
+ /* Hard code the off set to 15 for now */
+ reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
+ SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+
+ /* read system identification to determine revision */
+ if (periph_rev >= 2) {
+ sc->rx_hw_checksum = ENABLE;
+ } else {
+ sc->rx_hw_checksum = DISABLE;
+ }
+}
+
+
+/**********************************************************************
+ * SBMAC_ADDR2REG(ptr)
+ *
+ * Convert six bytes into the 64-bit register value that
+ * we typically write into the SBMAC's address/mcast registers
+ *
+ * Input parameters:
+ * ptr - pointer to 6 bytes
+ *
+ * Return value:
+ * register value
+ ********************************************************************* */
+
+static uint64_t sbmac_addr2reg(unsigned char *ptr)
+{
+ uint64_t reg = 0;
+
+ ptr += 6;
+
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+
+ return reg;
+}
+
+
+/**********************************************************************
+ * SBMAC_SET_SPEED(s,speed)
+ *
+ * Configure LAN speed for the specified MAC.
+ * Warning: must be called when MAC is off!
+ *
+ * Input parameters:
+ * s - sbmac structure
+ * speed - speed to set MAC to (see sbmac_speed_t enum)
+ *
+ * Return value:
+ * 1 if successful
+ * 0 indicates invalid parameters
+ ********************************************************************* */
+
+static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
+{
+ uint64_t cfg;
+ uint64_t framecfg;
+
+ /*
+ * Save new current values
+ */
+
+ s->sbm_speed = speed;
+
+ if (s->sbm_state == sbmac_state_on)
+ return 0; /* save for next restart */
+
+ /*
+ * Read current register values
+ */
+
+ cfg = SBMAC_READCSR(s->sbm_maccfg);
+ framecfg = SBMAC_READCSR(s->sbm_framecfg);
+
+ /*
+ * Mask out the stuff we want to change
+ */
+
+ cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
+ framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
+ M_MAC_SLOT_SIZE);
+
+ /*
+ * Now add in the new bits
+ */
+
+ switch (speed) {
+ case sbmac_speed_10:
+ framecfg |= V_MAC_IFG_RX_10 |
+ V_MAC_IFG_TX_10 |
+ K_MAC_IFG_THRSH_10 |
+ V_MAC_SLOT_SIZE_10;
+ cfg |= V_MAC_SPEED_SEL_10MBPS;
+ break;
+
+ case sbmac_speed_100:
+ framecfg |= V_MAC_IFG_RX_100 |
+ V_MAC_IFG_TX_100 |
+ V_MAC_IFG_THRSH_100 |
+ V_MAC_SLOT_SIZE_100;
+ cfg |= V_MAC_SPEED_SEL_100MBPS ;
+ break;
+
+ case sbmac_speed_1000:
+ framecfg |= V_MAC_IFG_RX_1000 |
+ V_MAC_IFG_TX_1000 |
+ V_MAC_IFG_THRSH_1000 |
+ V_MAC_SLOT_SIZE_1000;
+ cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
+ break;
+
+ case sbmac_speed_auto: /* XXX not implemented */
+ /* fall through */
+ default:
+ return 0;
+ }
+
+ /*
+ * Send the bits back to the hardware
+ */
+
+ SBMAC_WRITECSR(s->sbm_framecfg,framecfg);
+ SBMAC_WRITECSR(s->sbm_maccfg,cfg);
+
+ return 1;
+}
+
+/**********************************************************************
+ * SBMAC_SET_DUPLEX(s,duplex,fc)
+ *
+ * Set Ethernet duplex and flow control options for this MAC
+ * Warning: must be called when MAC is off!
+ *
+ * Input parameters:
+ * s - sbmac structure
+ * duplex - duplex setting (see sbmac_duplex_t)
+ * fc - flow control setting (see sbmac_fc_t)
+ *
+ * Return value:
+ * 1 if ok
+ * 0 if an invalid parameter combination was specified
+ ********************************************************************* */
+
+static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc)
+{
+ uint64_t cfg;
+
+ /*
+ * Save new current values
+ */
+
+ s->sbm_duplex = duplex;
+ s->sbm_fc = fc;
+
+ if (s->sbm_state == sbmac_state_on)
+ return 0; /* save for next restart */
+
+ /*
+ * Read current register values
+ */
+
+ cfg = SBMAC_READCSR(s->sbm_maccfg);
+
+ /*
+ * Mask off the stuff we're about to change
+ */
+
+ cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
+
+
+ switch (duplex) {
+ case sbmac_duplex_half:
+ switch (fc) {
+ case sbmac_fc_disabled:
+ cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
+ break;
+
+ case sbmac_fc_collision:
+ cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
+ break;
+
+ case sbmac_fc_carrier:
+ cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
+ break;
+
+ case sbmac_fc_auto: /* XXX not implemented */
+ /* fall through */
+ case sbmac_fc_frame: /* not valid in half duplex */
+ default: /* invalid selection */
+ return 0;
+ }
+ break;
+
+ case sbmac_duplex_full:
+ switch (fc) {
+ case sbmac_fc_disabled:
+ cfg |= V_MAC_FC_CMD_DISABLED;
+ break;
+
+ case sbmac_fc_frame:
+ cfg |= V_MAC_FC_CMD_ENABLED;
+ break;
+
+ case sbmac_fc_collision: /* not valid in full duplex */
+ case sbmac_fc_carrier: /* not valid in full duplex */
+ case sbmac_fc_auto: /* XXX not implemented */
+ /* fall through */
+ default:
+ return 0;
+ }
+ break;
+ case sbmac_duplex_auto:
+ /* XXX not implemented */
+ break;
+ }
+
+ /*
+ * Send the bits back to the hardware
+ */
+
+ SBMAC_WRITECSR(s->sbm_maccfg,cfg);
+
+ return 1;
+}
+
+
+
+
+/**********************************************************************
+ * SBMAC_INTR()
+ *
+ * Interrupt handler for MAC interrupts
+ *
+ * Input parameters:
+ * MAC structure
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct sbmac_softc *sc = netdev_priv(dev);
+ uint64_t isr;
+ int handled = 0;
+
+ for (;;) {
+
+ /*
+ * Read the ISR (this clears the bits in the real
+ * register, except for counter addr)
+ */
+
+ isr = SBMAC_READCSR(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
+
+ if (isr == 0)
+ break;
+
+ handled = 1;
+
+ /*
+ * Transmits on channel 0
+ */
+
+ if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
+ sbdma_tx_process(sc,&(sc->sbm_txdma));
+ }
+
+ /*
+ * Receives on channel 0
+ */
+
+ /*
+ * It's important to test all the bits (or at least the
+ * EOP_SEEN bit) when deciding to do the RX process
+ * particularly when coalescing, to make sure we
+ * take care of the following:
+ *
+ * If you have some packets waiting (have been received
+ * but no interrupt) and get a TX interrupt before
+ * the RX timer or counter expires, reading the ISR
+ * above will clear the timer and counter, and you
+ * won't get another interrupt until a packet shows
+ * up to start the timer again. Testing
+ * EOP_SEEN here takes care of this case.
+ * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
+ */
+
+
+ if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
+ sbdma_rx_process(sc,&(sc->sbm_rxdma));
+ }
+ }
+ return IRQ_RETVAL(handled);
+}
+
+
+/**********************************************************************
+ * SBMAC_START_TX(skb,dev)
+ *
+ * Start output on the specified interface. Basically, we
+ * queue as many buffers as we can until the ring fills up, or
+ * we run off the end of the queue, whichever comes first.
+ *
+ * Input parameters:
+ *
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ /* lock eth irq */
+ spin_lock_irq (&sc->sbm_lock);
+
+ /*
+ * Put the buffer on the transmit ring. If we
+ * don't have room, stop the queue.
+ */
+
+ if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
+ /* XXX save skb that we could not send */
+ netif_stop_queue(dev);
+ spin_unlock_irq(&sc->sbm_lock);
+
+ return 1;
+ }
+
+ dev->trans_start = jiffies;
+
+ spin_unlock_irq (&sc->sbm_lock);
+
+ return 0;
+}
+
+/**********************************************************************
+ * SBMAC_SETMULTI(sc)
+ *
+ * Reprogram the multicast table into the hardware, given
+ * the list of multicasts associated with the interface
+ * structure.
+ *
+ * Input parameters:
+ * sc - softc
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_setmulti(struct sbmac_softc *sc)
+{
+ uint64_t reg;
+ sbmac_port_t port;
+ int idx;
+ struct dev_mc_list *mclist;
+ struct net_device *dev = sc->sbm_dev;
+
+ /*
+ * Clear out entire multicast table. We do this by nuking
+ * the entire hash table and all the direct matches except
+ * the first one, which is used for our station address
+ */
+
+ for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
+ port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
+ SBMAC_WRITECSR(port,0);
+ }
+
+ for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+ port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
+ SBMAC_WRITECSR(port,0);
+ }
+
+ /*
+ * Clear the filter to say we don't want any multicasts.
+ */
+
+ reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+ SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /*
+ * Enable ALL multicasts. Do this by inverting the
+ * multicast enable bit.
+ */
+ reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+ SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+ return;
+ }
+
+
+ /*
+ * Progam new multicast entries. For now, only use the
+ * perfect filter. In the future we'll need to use the
+ * hash filter if the perfect filter overflows
+ */
+
+ /* XXX only using perfect filter for now, need to use hash
+ * XXX if the table overflows */
+
+ idx = 1; /* skip station address */
+ mclist = dev->mc_list;
+ while (mclist && (idx < MAC_ADDR_COUNT)) {
+ reg = sbmac_addr2reg(mclist->dmi_addr);
+ port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
+ SBMAC_WRITECSR(port,reg);
+ idx++;
+ mclist = mclist->next;
+ }
+
+ /*
+ * Enable the "accept multicast bits" if we programmed at least one
+ * multicast.
+ */
+
+ if (idx > 1) {
+ reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg |= M_MAC_MCAST_EN;
+ SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+ }
+}
+
+
+
+#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR)
+/**********************************************************************
+ * SBMAC_PARSE_XDIGIT(str)
+ *
+ * Parse a hex digit, returning its value
+ *
+ * Input parameters:
+ * str - character
+ *
+ * Return value:
+ * hex value, or -1 if invalid
+ ********************************************************************* */
+
+static int sbmac_parse_xdigit(char str)
+{
+ int digit;
+
+ if ((str >= '0') && (str <= '9'))
+ digit = str - '0';
+ else if ((str >= 'a') && (str <= 'f'))
+ digit = str - 'a' + 10;
+ else if ((str >= 'A') && (str <= 'F'))
+ digit = str - 'A' + 10;
+ else
+ return -1;
+
+ return digit;
+}
+
+/**********************************************************************
+ * SBMAC_PARSE_HWADDR(str,hwaddr)
+ *
+ * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
+ * Ethernet address.
+ *
+ * Input parameters:
+ * str - string
+ * hwaddr - pointer to hardware address
+ *
+ * Return value:
+ * 0 if ok, else -1
+ ********************************************************************* */
+
+static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
+{
+ int digit1,digit2;
+ int idx = 6;
+
+ while (*str && (idx > 0)) {
+ digit1 = sbmac_parse_xdigit(*str);
+ if (digit1 < 0)
+ return -1;
+ str++;
+ if (!*str)
+ return -1;
+
+ if ((*str == ':') || (*str == '-')) {
+ digit2 = digit1;
+ digit1 = 0;
+ }
+ else {
+ digit2 = sbmac_parse_xdigit(*str);
+ if (digit2 < 0)
+ return -1;
+ str++;
+ }
+
+ *hwaddr++ = (digit1 << 4) | digit2;
+ idx--;
+
+ if (*str == '-')
+ str++;
+ if (*str == ':')
+ str++;
+ }
+ return 0;
+}
+#endif
+
+static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
+{
+ if (new_mtu > ENET_PACKET_SIZE)
+ return -EINVAL;
+ _dev->mtu = new_mtu;
+ printk(KERN_INFO "changing the mtu to %d\n", new_mtu);
+ return 0;
+}
+
+/**********************************************************************
+ * SBMAC_INIT(dev)
+ *
+ * Attach routine - init hardware and hook ourselves into linux
+ *
+ * Input parameters:
+ * dev - net_device structure
+ *
+ * Return value:
+ * status
+ ********************************************************************* */
+
+static int sbmac_init(struct net_device *dev, int idx)
+{
+ struct sbmac_softc *sc;
+ unsigned char *eaddr;
+ uint64_t ea_reg;
+ int i;
+ int err;
+
+ sc = netdev_priv(dev);
+
+ /* Determine controller base address */
+
+ sc->sbm_base = IOADDR(dev->base_addr);
+ sc->sbm_dev = dev;
+ sc->sbe_idx = idx;
+
+ eaddr = sc->sbm_hwaddr;
+
+ /*
+ * Read the ethernet address. The firwmare left this programmed
+ * for us in the ethernet address register for each mac.
+ */
+
+ ea_reg = SBMAC_READCSR(sc->sbm_base + R_MAC_ETHERNET_ADDR);
+ SBMAC_WRITECSR(sc->sbm_base + R_MAC_ETHERNET_ADDR, 0);
+ for (i = 0; i < 6; i++) {
+ eaddr[i] = (uint8_t) (ea_reg & 0xFF);
+ ea_reg >>= 8;
+ }
+
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = eaddr[i];
+ }
+
+
+ /*
+ * Init packet size
+ */
+
+ sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
+
+ /*
+ * Initialize context (get pointers to registers and stuff), then
+ * allocate the memory for the descriptor tables.
+ */
+
+ sbmac_initctx(sc);
+
+ /*
+ * Set up Linux device callins
+ */
+
+ spin_lock_init(&(sc->sbm_lock));
+
+ dev->open = sbmac_open;
+ dev->hard_start_xmit = sbmac_start_tx;
+ dev->stop = sbmac_close;
+ dev->get_stats = sbmac_get_stats;
+ dev->set_multicast_list = sbmac_set_rx_mode;
+ dev->do_ioctl = sbmac_mii_ioctl;
+ dev->tx_timeout = sbmac_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->change_mtu = sb1250_change_mtu;
+
+ /* This is needed for PASS2 for Rx H/W checksum feature */
+ sbmac_set_iphdr_offset(sc);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out_uninit;
+
+ if (periph_rev >= 2) {
+ printk(KERN_INFO "%s: enabling TCP rcv checksum\n",
+ sc->sbm_dev->name);
+ }
+
+ /*
+ * Display Ethernet address (this is called during the config
+ * process so we need to finish off the config message that
+ * was being displayed)
+ */
+ printk(KERN_INFO
+ "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->name, dev->base_addr,
+ eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]);
+
+
+ return 0;
+
+out_uninit:
+ sbmac_uninitctx(sc);
+
+ return err;
+}
+
+
+static int sbmac_open(struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
+ }
+
+ /*
+ * map/route interrupt (clear status first, in case something
+ * weird is pending; we haven't initialized the mac registers
+ * yet)
+ */
+
+ SBMAC_READCSR(sc->sbm_isr);
+ if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev))
+ return -EBUSY;
+
+ /*
+ * Configure default speed
+ */
+
+ sbmac_mii_poll(sc,noisy_mii);
+
+ /*
+ * Turn on the channel
+ */
+
+ sbmac_set_channel_state(sc,sbmac_state_on);
+
+ /*
+ * XXX Station address is in dev->dev_addr
+ */
+
+ if (dev->if_port == 0)
+ dev->if_port = 0;
+
+ netif_start_queue(dev);
+
+ sbmac_set_rx_mode(dev);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&sc->sbm_timer);
+ sc->sbm_timer.expires = jiffies + 2 * HZ/100;
+ sc->sbm_timer.data = (unsigned long)dev;
+ sc->sbm_timer.function = &sbmac_timer;
+ add_timer(&sc->sbm_timer);
+
+ return 0;
+}
+
+
+
+static int sbmac_mii_poll(struct sbmac_softc *s,int noisy)
+{
+ int bmsr,bmcr,k1stsr,anlpar;
+ int chg;
+ char buffer[100];
+ char *p = buffer;
+
+ /* Read the mode status and mode control registers. */
+ bmsr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMSR);
+ bmcr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMCR);
+
+ /* get the link partner status */
+ anlpar = sbmac_mii_read(s,s->sbm_phys[0],MII_ANLPAR);
+
+ /* if supported, read the 1000baseT register */
+ if (bmsr & BMSR_1000BT_XSR) {
+ k1stsr = sbmac_mii_read(s,s->sbm_phys[0],MII_K1STSR);
+ }
+ else {
+ k1stsr = 0;
+ }
+
+ chg = 0;
+
+ if ((bmsr & BMSR_LINKSTAT) == 0) {
+ /*
+ * If link status is down, clear out old info so that when
+ * it comes back up it will force us to reconfigure speed
+ */
+ s->sbm_phy_oldbmsr = 0;
+ s->sbm_phy_oldanlpar = 0;
+ s->sbm_phy_oldk1stsr = 0;
+ return 0;
+ }
+
+ if ((s->sbm_phy_oldbmsr != bmsr) ||
+ (s->sbm_phy_oldanlpar != anlpar) ||
+ (s->sbm_phy_oldk1stsr != k1stsr)) {
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: bmsr:%x/%x anlpar:%x/%x k1stsr:%x/%x\n",
+ s->sbm_dev->name,
+ s->sbm_phy_oldbmsr,bmsr,
+ s->sbm_phy_oldanlpar,anlpar,
+ s->sbm_phy_oldk1stsr,k1stsr);
+ }
+ s->sbm_phy_oldbmsr = bmsr;
+ s->sbm_phy_oldanlpar = anlpar;
+ s->sbm_phy_oldk1stsr = k1stsr;
+ chg = 1;
+ }
+
+ if (chg == 0)
+ return 0;
+
+ p += sprintf(p,"Link speed: ");
+
+ if (k1stsr & K1STSR_LP1KFD) {
+ s->sbm_speed = sbmac_speed_1000;
+ s->sbm_duplex = sbmac_duplex_full;
+ s->sbm_fc = sbmac_fc_frame;
+ p += sprintf(p,"1000BaseT FDX");
+ }
+ else if (k1stsr & K1STSR_LP1KHD) {
+ s->sbm_speed = sbmac_speed_1000;
+ s->sbm_duplex = sbmac_duplex_half;
+ s->sbm_fc = sbmac_fc_disabled;
+ p += sprintf(p,"1000BaseT HDX");
+ }
+ else if (anlpar & ANLPAR_TXFD) {
+ s->sbm_speed = sbmac_speed_100;
+ s->sbm_duplex = sbmac_duplex_full;
+ s->sbm_fc = (anlpar & ANLPAR_PAUSE) ? sbmac_fc_frame : sbmac_fc_disabled;
+ p += sprintf(p,"100BaseT FDX");
+ }
+ else if (anlpar & ANLPAR_TXHD) {
+ s->sbm_speed = sbmac_speed_100;
+ s->sbm_duplex = sbmac_duplex_half;
+ s->sbm_fc = sbmac_fc_disabled;
+ p += sprintf(p,"100BaseT HDX");
+ }
+ else if (anlpar & ANLPAR_10FD) {
+ s->sbm_speed = sbmac_speed_10;
+ s->sbm_duplex = sbmac_duplex_full;
+ s->sbm_fc = sbmac_fc_frame;
+ p += sprintf(p,"10BaseT FDX");
+ }
+ else if (anlpar & ANLPAR_10HD) {
+ s->sbm_speed = sbmac_speed_10;
+ s->sbm_duplex = sbmac_duplex_half;
+ s->sbm_fc = sbmac_fc_collision;
+ p += sprintf(p,"10BaseT HDX");
+ }
+ else {
+ p += sprintf(p,"Unknown");
+ }
+
+ if (noisy) {
+ printk(KERN_INFO "%s: %s\n",s->sbm_dev->name,buffer);
+ }
+
+ return 1;
+}
+
+
+static void sbmac_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct sbmac_softc *sc = netdev_priv(dev);
+ int next_tick = HZ;
+ int mii_status;
+
+ spin_lock_irq (&sc->sbm_lock);
+
+ /* make IFF_RUNNING follow the MII status bit "Link established" */
+ mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR);
+
+ if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) {
+ sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT;
+ if (mii_status & BMSR_LINKSTAT) {
+ netif_carrier_on(dev);
+ }
+ else {
+ netif_carrier_off(dev);
+ }
+ }
+
+ /*
+ * Poll the PHY to see what speed we should be running at
+ */
+
+ if (sbmac_mii_poll(sc,noisy_mii)) {
+ if (sc->sbm_state != sbmac_state_off) {
+ /*
+ * something changed, restart the channel
+ */
+ if (debug > 1) {
+ printk("%s: restarting channel because speed changed\n",
+ sc->sbm_dev->name);
+ }
+ sbmac_channel_stop(sc);
+ sbmac_channel_start(sc);
+ }
+ }
+
+ spin_unlock_irq (&sc->sbm_lock);
+
+ sc->sbm_timer.expires = jiffies + next_tick;
+ add_timer(&sc->sbm_timer);
+}
+
+
+static void sbmac_tx_timeout (struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ spin_lock_irq (&sc->sbm_lock);
+
+
+ dev->trans_start = jiffies;
+ sc->sbm_stats.tx_errors++;
+
+ spin_unlock_irq (&sc->sbm_lock);
+
+ printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
+}
+
+
+
+
+static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+
+ /* XXX update other stats here */
+
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+ return &sc->sbm_stats;
+}
+
+
+
+static void sbmac_set_rx_mode(struct net_device *dev)
+{
+ unsigned long flags;
+ int msg_flag = 0;
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+ if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
+ /*
+ * Promiscuous changed.
+ */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ msg_flag = 1;
+ sbmac_promiscuous_mode(sc,1);
+ }
+ else {
+ msg_flag = 2;
+ sbmac_promiscuous_mode(sc,0);
+ }
+ }
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+ if (msg_flag) {
+ printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
+ dev->name,(msg_flag==1)?"en":"dis");
+ }
+
+ /*
+ * Program the multicasts. Do this every time.
+ */
+
+ sbmac_setmulti(sc);
+
+}
+
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ u16 *data = (u16 *)&rq->ifr_ifru;
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+ retval = 0;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ data[0] = sc->sbm_phys[0] & 0x1f;
+ /* Fall Through */
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ data[3] = sbmac_mii_read(sc, data[0] & 0x1f, data[1] & 0x1f);
+ break;
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN)) {
+ retval = -EPERM;
+ break;
+ }
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: sbmac_mii_ioctl: write %02X %02X %02X\n",dev->name,
+ data[0],data[1],data[2]);
+ }
+ sbmac_mii_write(sc, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ break;
+ default:
+ retval = -EOPNOTSUPP;
+ }
+
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+ return retval;
+}
+
+static int sbmac_close(struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ unsigned long flags;
+ int irq;
+
+ sbmac_set_channel_state(sc,sbmac_state_off);
+
+ del_timer_sync(&sc->sbm_timer);
+
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+
+ netif_stop_queue(dev);
+
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard\n",dev->name);
+ }
+
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+ irq = dev->irq;
+ synchronize_irq(irq);
+ free_irq(irq, dev);
+
+ sbdma_emptyring(&(sc->sbm_txdma));
+ sbdma_emptyring(&(sc->sbm_rxdma));
+
+ return 0;
+}
+
+
+
+#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR)
+static void
+sbmac_setup_hwaddr(int chan,char *addr)
+{
+ uint8_t eaddr[6];
+ uint64_t val;
+ sbmac_port_t port;
+
+ port = A_MAC_CHANNEL_BASE(chan);
+ sbmac_parse_hwaddr(addr,eaddr);
+ val = sbmac_addr2reg(eaddr);
+ SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),val);
+ val = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR));
+}
+#endif
+
+static struct net_device *dev_sbmac[MAX_UNITS];
+
+static int __init
+sbmac_init_module(void)
+{
+ int idx;
+ struct net_device *dev;
+ sbmac_port_t port;
+ int chip_max_units;
+
+ /*
+ * For bringup when not using the firmware, we can pre-fill
+ * the MAC addresses using the environment variables
+ * specified in this file (or maybe from the config file?)
+ */
+#ifdef SBMAC_ETH0_HWADDR
+ sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR);
+#endif
+#ifdef SBMAC_ETH1_HWADDR
+ sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR);
+#endif
+#ifdef SBMAC_ETH2_HWADDR
+ sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR);
+#endif
+
+ /*
+ * Walk through the Ethernet controllers and find
+ * those who have their MAC addresses set.
+ */
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1250:
+ case K_SYS_SOC_TYPE_BCM1250_ALT:
+ chip_max_units = 3;
+ break;
+ case K_SYS_SOC_TYPE_BCM1120:
+ case K_SYS_SOC_TYPE_BCM1125:
+ case K_SYS_SOC_TYPE_BCM1125H:
+ case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
+ chip_max_units = 2;
+ break;
+ default:
+ chip_max_units = 0;
+ break;
+ }
+ if (chip_max_units > MAX_UNITS)
+ chip_max_units = MAX_UNITS;
+
+ for (idx = 0; idx < chip_max_units; idx++) {
+
+ /*
+ * This is the base address of the MAC.
+ */
+
+ port = A_MAC_CHANNEL_BASE(idx);
+
+ /*
+ * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
+ * value for us by the firmware if we're going to use this MAC.
+ * If we find a zero, skip this MAC.
+ */
+
+ sbmac_orig_hwaddr[idx] = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR));
+ if (sbmac_orig_hwaddr[idx] == 0) {
+ printk(KERN_DEBUG "sbmac: not configuring MAC at "
+ "%lx\n", port);
+ continue;
+ }
+
+ /*
+ * Okay, cool. Initialize this MAC.
+ */
+
+ dev = alloc_etherdev(sizeof(struct sbmac_softc));
+ if (!dev)
+ return -ENOMEM; /* return ENOMEM */
+
+ printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
+
+ dev->irq = K_INT_MAC_0 + idx;
+ dev->base_addr = port;
+ dev->mem_end = 0;
+ if (sbmac_init(dev, idx)) {
+ port = A_MAC_CHANNEL_BASE(idx);
+ SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),
+ sbmac_orig_hwaddr[idx]);
+ free_netdev(dev);
+ continue;
+ }
+ dev_sbmac[idx] = dev;
+ }
+ return 0;
+}
+
+
+static void __exit
+sbmac_cleanup_module(void)
+{
+ struct net_device *dev;
+ int idx;
+
+ for (idx = 0; idx < MAX_UNITS; idx++) {
+ struct sbmac_softc *sc;
+ dev = dev_sbmac[idx];
+ if (!dev)
+ continue;
+
+ sc = netdev_priv(dev);
+ unregister_netdev(dev);
+ sbmac_uninitctx(sc);
+ free_netdev(dev);
+ }
+}
+
+module_init(sbmac_init_module);
+module_exit(sbmac_cleanup_module);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
new file mode 100644
index 000000000000..79dca398f3ac
--- /dev/null
+++ b/drivers/net/seeq8005.c
@@ -0,0 +1,769 @@
+/* seeq8005.c: A network driver for linux. */
+/*
+ Based on skeleton.c,
+ Written 1993-94 by Donald Becker.
+ See the skeleton.c file for further copyright information.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as hamish@zot.apana.org.au
+
+ This file is a network device driver for the SEEQ 8005 chipset and
+ the Linux operating system.
+
+*/
+
+static const char version[] =
+ "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
+
+/*
+ Sources:
+ SEEQ 8005 databook
+
+ Version history:
+ 1.00 Public release. cosmetic changes (no warnings now)
+ 0.68 Turning per- packet,interrupt debug messages off - testing for release.
+ 0.67 timing problems/bad buffer reads seem to be fixed now
+ 0.63 *!@$ protocol=eth_type_trans -- now packets flow
+ 0.56 Send working
+ 0.48 Receive working
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "seeq8005.h"
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int seeq8005_portlist[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct net_device_stats stats;
+ unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
+ long open_time; /* Useless example local info. */
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00
+#define SA_ADDR1 0x80
+#define SA_ADDR2 0x4b
+
+/* Index to functions, as function prototypes. */
+
+static int seeq8005_probe1(struct net_device *dev, int ioaddr);
+static int seeq8005_open(struct net_device *dev);
+static void seeq8005_timeout(struct net_device *dev);
+static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t seeq8005_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void seeq8005_rx(struct net_device *dev);
+static int seeq8005_close(struct net_device *dev);
+static struct net_device_stats *seeq8005_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
+static void hardware_send_packet(struct net_device *dev, char *buf, int length);
+extern void seeq8005_init(struct net_device *dev, int startp);
+static inline void wait_for_buffer(struct net_device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ */
+
+static int io = 0x320;
+static int irq = 10;
+
+struct net_device * __init seeq8005_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ }
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = seeq8005_probe1(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = seeq8005_portlist; *port; port++) {
+ if (seeq8005_probe1(dev, *port) == 0)
+ break;
+ }
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, SEEQ8005_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
+{
+ static unsigned version_printed;
+ int i,j;
+ unsigned char SA_prom[32];
+ int old_cfg1;
+ int old_cfg2;
+ int old_stat;
+ int old_dmaar;
+ int old_rear;
+ int retval;
+
+ if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005"))
+ return -ENODEV;
+
+ if (net_debug>1)
+ printk("seeq8005: probing at 0x%x\n",ioaddr);
+
+ old_stat = inw(SEEQ_STATUS); /* read status register */
+ if (old_stat == 0xffff) {
+ retval = -ENODEV;
+ goto out; /* assume that 0xffff == no device */
+ }
+ if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
+ if (net_debug>1) {
+ printk("seeq8005: reserved stat bits != 0x1800\n");
+ printk(" == 0x%04x\n",old_stat);
+ }
+ retval = -ENODEV;
+ goto out;
+ }
+
+ old_rear = inw(SEEQ_REA);
+ if (old_rear == 0xffff) {
+ outw(0,SEEQ_REA);
+ if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
+ retval = -ENODEV;
+ goto out;
+ }
+ } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
+ if (net_debug>1) {
+ printk("seeq8005: unused rear bits != 0xff00\n");
+ printk(" == 0x%04x\n",old_rear);
+ }
+ retval = -ENODEV;
+ goto out;
+ }
+
+ old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
+ old_cfg1 = inw(SEEQ_CFG1);
+ old_dmaar = inw(SEEQ_DMAAR);
+
+ if (net_debug>4) {
+ printk("seeq8005: stat = 0x%04x\n",old_stat);
+ printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
+ printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
+ printk("seeq8005: raer = 0x%04x\n",old_rear);
+ printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
+ }
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
+ outw( 0, SEEQ_DMAAR); /* set starting PROM address */
+ outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
+
+
+ j=0;
+ for(i=0; i <32; i++) {
+ j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
+ }
+
+#if 0
+ /* untested because I only have the one card */
+ if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
+ if (net_debug>1) { /* check this before deciding that we have a card */
+ printk("seeq8005: prom sum error\n");
+ }
+ outw( old_stat, SEEQ_STATUS);
+ outw( old_dmaar, SEEQ_DMAAR);
+ outw( old_cfg1, SEEQ_CFG1);
+ retval = -ENODEV;
+ goto out;
+ }
+#endif
+
+ outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
+ udelay(5);
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ if (net_debug) {
+ printk("seeq8005: prom sum = 0x%08x\n",j);
+ for(j=0; j<32; j+=16) {
+ printk("seeq8005: prom %02x: ",j);
+ for(i=0;i<16;i++) {
+ printk("%02x ",SA_prom[j|i]);
+ }
+ printk(" ");
+ for(i=0;i<16;i++) {
+ if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
+ printk("%c", SA_prom[j|i]);
+ } else {
+ printk(" ");
+ }
+ }
+ printk("\n");
+ }
+ }
+
+#if 0
+ /*
+ * testing the packet buffer memory doesn't work yet
+ * but all other buffer accesses do
+ * - fixing is not a priority
+ */
+ if (net_debug>1) { /* test packet buffer memory */
+ printk("seeq8005: testing packet buffer ... ");
+ outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0 , SEEQ_DMAAR);
+ for(i=0;i<32768;i++) {
+ outw(0x5a5a, SEEQ_BUFFER);
+ }
+ j=jiffies+HZ;
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && time_before(jiffies, j) )
+ mb();
+ outw( 0 , SEEQ_DMAAR);
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, j+HZ))
+ mb();
+ if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ j=0;
+ for(i=0;i<32768;i++) {
+ if (inw(SEEQ_BUFFER) != 0x5a5a)
+ j++;
+ }
+ if (j) {
+ printk("%i\n",j);
+ } else {
+ printk("ok.\n");
+ }
+ }
+#endif
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = SA_prom[i+6]);
+
+ if (dev->irq == 0xff)
+ ; /* Do nothing: a user-level program will set it. */
+ else if (dev->irq < 2) { /* "Auto-IRQ" */
+ unsigned long cookie = probe_irq_on();
+
+ outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
+
+ dev->irq = probe_irq_off(cookie);
+
+ if (net_debug >= 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ * or don't know which one to set.
+ */
+ dev->irq = 9;
+
+#if 0
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ retval = -EAGAIN;
+ goto out;
+ }
+ }
+#endif
+ dev->open = seeq8005_open;
+ dev->stop = seeq8005_close;
+ dev->hard_start_xmit = seeq8005_send_packet;
+ dev->tx_timeout = seeq8005_timeout;
+ dev->watchdog_timeo = HZ/20;
+ dev->get_stats = seeq8005_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->flags &= ~IFF_MULTICAST;
+
+ return 0;
+out:
+ release_region(ioaddr, SEEQ8005_IO_EXTENT);
+ return retval;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int seeq8005_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return -EAGAIN;
+ }
+ }
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ seeq8005_init(dev, 1);
+
+ lp->open_time = jiffies;
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static void seeq8005_timeout(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ seeq8005_init(dev, 1);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ short length = skb->len;
+ unsigned char *buf;
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+ buf = skb->data;
+
+ /* Block a timer-based transmit from overlapping */
+ netif_stop_queue(dev);
+
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ lp->stats.tx_bytes += length;
+ dev_kfree_skb (skb);
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it. This has to
+ * occur in the interrupt handler!
+ */
+inline void wait_for_buffer(struct net_device * dev)
+{
+ int ioaddr = dev->base_addr;
+ unsigned long tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
+ cpu_relax();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t seeq8005_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ status = inw(SEEQ_STATUS);
+ do {
+ if (net_debug >2) {
+ printk("%s: int, status=0x%04x\n",dev->name,status);
+ }
+
+ if (status & SEEQSTAT_WINDOW_INT) {
+ handled = 1;
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ if (net_debug) {
+ printk("%s: window int!\n",dev->name);
+ }
+ }
+ if (status & SEEQSTAT_TX_INT) {
+ handled = 1;
+ outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ lp->stats.tx_packets++;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+ if (status & SEEQSTAT_RX_INT) {
+ handled = 1;
+ /* Got a packet(s). */
+ seeq8005_rx(dev);
+ }
+ status = inw(SEEQ_STATUS);
+ } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
+
+ if(net_debug>2) {
+ printk("%s: eoi\n",dev->name);
+ }
+ return IRQ_RETVAL(handled);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void seeq8005_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int boguscount = 10;
+ int pkt_hdr;
+ int ioaddr = dev->base_addr;
+
+ do {
+ int next_packet;
+ int pkt_len;
+ int i;
+ int status;
+
+ status = inw(SEEQ_STATUS);
+ outw( lp->receive_ptr, SEEQ_DMAAR);
+ outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ wait_for_buffer(dev);
+ next_packet = ntohs(inw(SEEQ_BUFFER));
+ pkt_hdr = inw(SEEQ_BUFFER);
+
+ if (net_debug>2) {
+ printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
+ }
+
+ if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
+ return; /* Done for now */
+ }
+
+ if ((pkt_hdr & SEEQPKTS_DONE)==0)
+ break;
+
+ if (next_packet < lp->receive_ptr) {
+ pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
+ } else {
+ pkt_len = next_packet - lp->receive_ptr - 4;
+ }
+
+ if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
+ printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
+ seeq8005_init(dev,1);
+ return;
+ }
+
+ lp->receive_ptr = next_packet;
+
+ if (net_debug>2) {
+ printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
+ }
+
+ if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (pkt_hdr & SEEQPKTS_SHORT) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_DRIB) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_OVERSIZE) lp->stats.rx_over_errors++;
+ if (pkt_hdr & SEEQPKTS_CRC_ERR) lp->stats.rx_crc_errors++;
+ /* skip over this packet */
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ unsigned char *buf;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* align data on 16 byte */
+ buf = skb_put(skb,pkt_len);
+
+ insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
+
+ if (net_debug>2) {
+ char * p = buf;
+ printk("%s: recv ",dev->name);
+ for(i=0;i<14;i++) {
+ printk("%02x ",*(p++)&0xff);
+ }
+ printk("\n");
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
+
+ /* If any worth-while packets have been received, netif_rx()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int seeq8005_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ lp->open_time = 0;
+
+ netif_stop_queue(dev);
+
+ /* Flush the Tx and disable Rx here. */
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ free_irq(dev->irq, dev);
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *seeq8005_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+/*
+ * I _could_ do up to 6 addresses here, but won't (yet?)
+ */
+
+#if 0
+ int ioaddr = dev->base_addr;
+/*
+ * hmm, not even sure if my matching works _anyway_ - seem to be receiving
+ * _everything_ . . .
+ */
+
+ if (num_addrs) { /* Enable promiscuous mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
+ dev->flags|=IFF_PROMISC;
+ } else { /* Disable promiscuous mode, use normal mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
+ }
+#endif
+}
+
+void seeq8005_init(struct net_device *dev, int startp)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
+ udelay(5);
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
+/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) { /* set Station address */
+ outb(dev->dev_addr[i], SEEQ_BUFFER);
+ udelay(2);
+ }
+
+ outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
+ outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
+
+ lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
+ outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
+
+ outw( 0x00ff, SEEQ_REA); /* Receive Area End */
+
+ if (net_debug>4) {
+ printk("%s: SA0 = ",dev->name);
+
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR);
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) {
+ printk("%02x ",inb(SEEQ_BUFFER));
+ }
+ printk("\n");
+ }
+
+ outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
+ outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
+
+ if (net_debug>4) {
+ int old_cfg1;
+ old_cfg1 = inw(SEEQ_CFG1);
+ printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
+ printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
+ printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
+ printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
+ printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
+
+ }
+}
+
+
+static void hardware_send_packet(struct net_device * dev, char *buf, int length)
+{
+ int ioaddr = dev->base_addr;
+ int status = inw(SEEQ_STATUS);
+ int transmit_ptr = 0;
+ int tmp;
+
+ if (net_debug>4) {
+ printk("%s: send 0x%04x\n",dev->name,length);
+ }
+
+ /* Set FIFO to writemode and set packet-buffer address */
+ outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( transmit_ptr, SEEQ_DMAAR);
+
+ /* output SEEQ Packet header barfage */
+ outw( htons(length + 4), SEEQ_BUFFER);
+ outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
+
+ /* blat the buffer */
+ outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
+ /* paranoia !! */
+ outw( 0, SEEQ_BUFFER);
+ outw( 0, SEEQ_BUFFER);
+
+ /* set address of start of transmit chain */
+ outw( transmit_ptr, SEEQ_TPR);
+
+ /* drain FIFO */
+ tmp = jiffies;
+ while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && (jiffies - tmp < HZ))
+ mb();
+
+ /* doit ! */
+ outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+
+}
+
+
+#ifdef MODULE
+
+static struct net_device *dev_seeq;
+MODULE_LICENSE("GPL");
+module_param(io, int, 0);
+module_param(irq, int, 0);
+MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
+MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
+
+int init_module(void)
+{
+ dev_seeq = seeq8005_probe(-1);
+ if (IS_ERR(dev_seeq))
+ return PTR_ERR(dev_seeq);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(dev_seeq);
+ release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT);
+ free_netdev(dev_seeq);
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c skeleton.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/seeq8005.h b/drivers/net/seeq8005.h
new file mode 100644
index 000000000000..809ba6dc8fb9
--- /dev/null
+++ b/drivers/net/seeq8005.h
@@ -0,0 +1,156 @@
+/*
+ * defines, etc for the seeq8005
+ */
+
+/*
+ * This file is distributed under GPL.
+ *
+ * This style and layout of this file is also copied
+ * from many of the other linux network device drivers.
+ */
+
+/* The number of low I/O ports used by the ethercard. */
+#define SEEQ8005_IO_EXTENT 16
+
+#define SEEQ_B (ioaddr)
+
+#define SEEQ_CMD (SEEQ_B) /* Write only */
+#define SEEQ_STATUS (SEEQ_B) /* Read only */
+#define SEEQ_CFG1 (SEEQ_B + 2)
+#define SEEQ_CFG2 (SEEQ_B + 4)
+#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
+#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
+#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
+#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
+#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
+
+#define DEFAULT_TEA (0x3f)
+
+#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
+#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
+#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
+#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
+#define SEEQCMD_INT_MASK (0x000f)
+
+#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
+#define SEEQCMD_RX_INT_ACK (0x0020)
+#define SEEQCMD_TX_INT_ACK (0x0040)
+#define SEEQCMD_WINDOW_INT_ACK (0x0080)
+#define SEEQCMD_ACK_ALL (0x00f0)
+
+#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
+#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
+#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
+#define SEEQCMD_SET_DMA_OFF (0x0800)
+#define SEEQCMD_SET_RX_OFF (0x1000)
+#define SEEQCMD_SET_TX_OFF (0x2000)
+#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
+
+#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
+#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
+
+#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
+#define SEEQSTAT_RX_INT_EN (0x0002)
+#define SEEQSTAT_TX_INT_EN (0x0004)
+#define SEEQSTAT_WINDOW_INT_EN (0x0008)
+
+#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
+#define SEEQSTAT_RX_INT (0x0020)
+#define SEEQSTAT_TX_INT (0x0040)
+#define SEEQSTAT_WINDOW_INT (0x0080)
+#define SEEQSTAT_ANY_INT (0x00f0)
+
+#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
+#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
+#define SEEQSTAT_TX_ON (0x0400) /* TX running */
+
+#define SEEQSTAT_FIFO_FULL (0x2000)
+#define SEEQSTAT_FIFO_EMPTY (0x4000)
+#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
+
+#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */
+#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
+#define SEEQCFG1_BUFFER_MAC1 (0x0001)
+#define SEEQCFG1_BUFFER_MAC2 (0x0002)
+#define SEEQCFG1_BUFFER_MAC3 (0x0003)
+#define SEEQCFG1_BUFFER_MAC4 (0x0004)
+#define SEEQCFG1_BUFFER_MAC5 (0x0005)
+#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
+#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
+#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
+#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
+
+#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
+#define SEEQCFG1_DMA_CONT (0x0000)
+#define SEEQCFG1_DMA_800ns (0x0010)
+#define SEEQCFG1_DMA_1600ns (0x0020)
+#define SEEQCFG1_DMA_3200ns (0x0030)
+
+#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
+#define SEEQCFG1_DMA_LEN1 (0x0000)
+#define SEEQCFG1_DMA_LEN2 (0x0040)
+#define SEEQCFG1_DMA_LEN4 (0x0080)
+#define SEEQCFG1_DMA_LEN8 (0x00c0)
+
+#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
+#define SEEQCFG1_MAC0_EN (0x0100)
+#define SEEQCFG1_MAC1_EN (0x0200)
+#define SEEQCFG1_MAC2_EN (0x0400)
+#define SEEQCFG1_MAC3_EN (0x0800)
+#define SEEQCFG1_MAC4_EN (0x1000)
+#define SEEQCFG1_MAC5_EN (0x2000)
+
+#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
+#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
+#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
+#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
+#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
+
+#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
+
+#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
+#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
+
+#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
+#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
+#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
+
+#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
+#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
+#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
+#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
+#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */
+#define SEEQCFG2_LOOPBACK (0x0800)
+#define SEEQCFG2_CTRLO (0x1000)
+#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
+
+struct seeq_pkt_hdr {
+ unsigned short next; /* address of next packet header */
+ unsigned char babble_int:1, /* enable int on >1514 byte packet */
+ coll_int:1, /* enable int on collision */
+ coll_16_int:1, /* enable int on >15 collision */
+ xmit_int:1, /* enable int on success (or xmit with <15 collision) */
+ unused:1,
+ data_follows:1, /* if not set, process this as a header and pointer only */
+ chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
+ xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
+ unsigned char status;
+};
+
+#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
+#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
+#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
+#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
+#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
+#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
+#define SEEQPKTH_XMIT (0x80)
+
+#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
+#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
+#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
+#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
+#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
+#define SEEQPKTS_DRIB (0x0400) /* recv only */
+#define SEEQPKTS_SHORT (0x0800) /* recv only */
+#define SEEQPKTS_DONE (0x8000)
+#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
new file mode 100644
index 000000000000..9bc3b1c0dd6a
--- /dev/null
+++ b/drivers/net/sgiseeq.c
@@ -0,0 +1,773 @@
+/*
+ * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/route.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+#include <asm/sgialib.h>
+
+#include "sgiseeq.h"
+
+static char *version = "sgiseeq.c: David S. Miller (dm@engr.sgi.com)\n";
+
+static char *sgiseeqstr = "SGI Seeq8003";
+
+/*
+ * If you want speed, you do something silly, it always has worked for me. So,
+ * with that in mind, I've decided to make this driver look completely like a
+ * stupid Lance from a driver architecture perspective. Only difference is that
+ * here our "ring buffer" looks and acts like a real Lance one does but is
+ * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised
+ * how a stupid idea like this can pay off in performance, not to mention
+ * making this driver 2,000 times easier to write. ;-)
+ */
+
+/* Tune these if we tend to run out often etc. */
+#define SEEQ_RX_BUFFERS 16
+#define SEEQ_TX_BUFFERS 16
+
+#define PKT_BUF_SZ 1584
+
+#define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
+#define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
+#define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
+#define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
+
+#define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
+ sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
+ sp->tx_old - sp->tx_new - 1)
+
+#define DEBUG
+
+struct sgiseeq_rx_desc {
+ volatile struct hpc_dma_desc rdma;
+ volatile signed int buf_vaddr;
+};
+
+struct sgiseeq_tx_desc {
+ volatile struct hpc_dma_desc tdma;
+ volatile signed int buf_vaddr;
+};
+
+/*
+ * Warning: This structure is layed out in a certain way because HPC dma
+ * descriptors must be 8-byte aligned. So don't touch this without
+ * some care.
+ */
+struct sgiseeq_init_block { /* Note the name ;-) */
+ struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
+ struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
+};
+
+struct sgiseeq_private {
+ struct sgiseeq_init_block *srings;
+
+ /* Ptrs to the descriptors in uncached space. */
+ struct sgiseeq_rx_desc *rx_desc;
+ struct sgiseeq_tx_desc *tx_desc;
+
+ char *name;
+ struct hpc3_ethregs *hregs;
+ struct sgiseeq_regs *sregs;
+
+ /* Ring entry counters. */
+ unsigned int rx_new, tx_new;
+ unsigned int rx_old, tx_old;
+
+ int is_edlc;
+ unsigned char control;
+ unsigned char mode;
+
+ struct net_device_stats stats;
+
+ struct net_device *next_module;
+ spinlock_t tx_lock;
+};
+
+/* A list of all installed seeq devices, for removing the driver module. */
+static struct net_device *root_sgiseeq_dev;
+
+static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
+{
+ hregs->rx_reset = HPC3_ERXRST_CRESET | HPC3_ERXRST_CLRIRQ;
+ udelay(20);
+ hregs->rx_reset = 0;
+}
+
+static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ hregs->rx_ctrl = hregs->tx_ctrl = 0;
+ hpc3_eth_reset(hregs);
+}
+
+#define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
+ SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
+
+static inline void seeq_go(struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ sregs->rstat = sp->mode | RSTAT_GO_BITS;
+ hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
+}
+
+static inline void __sgiseeq_set_mac_address(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+ int i;
+
+ sregs->tstat = SEEQ_TCMD_RB0;
+ for (i = 0; i < 6; i++)
+ sregs->rw.eth_addr[i] = dev->dev_addr[i];
+}
+
+static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sockaddr *sa = addr;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ spin_lock_irq(&sp->tx_lock);
+ __sgiseeq_set_mac_address(dev);
+ spin_unlock_irq(&sp->tx_lock);
+
+ return 0;
+}
+
+#define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
+#define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
+#define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
+
+static int seeq_init_ring(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ int i;
+
+ netif_stop_queue(dev);
+ sp->rx_new = sp->tx_new = 0;
+ sp->rx_old = sp->tx_old = 0;
+
+ __sgiseeq_set_mac_address(dev);
+
+ /* Setup tx ring. */
+ for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
+ if (!sp->tx_desc[i].tdma.pbuf) {
+ unsigned long buffer;
+
+ buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ sp->tx_desc[i].buf_vaddr = CKSEG1ADDR(buffer);
+ sp->tx_desc[i].tdma.pbuf = CPHYSADDR(buffer);
+ }
+ sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
+ }
+
+ /* And now the rx ring. */
+ for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
+ if (!sp->rx_desc[i].rdma.pbuf) {
+ unsigned long buffer;
+
+ buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ sp->rx_desc[i].buf_vaddr = CKSEG1ADDR(buffer);
+ sp->rx_desc[i].rdma.pbuf = CPHYSADDR(buffer);
+ }
+ sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
+ }
+ sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
+ return 0;
+}
+
+#ifdef DEBUG
+static struct sgiseeq_private *gpriv;
+static struct net_device *gdev;
+
+void sgiseeq_dump_rings(void)
+{
+ static int once;
+ struct sgiseeq_rx_desc *r = gpriv->rx_desc;
+ struct sgiseeq_tx_desc *t = gpriv->tx_desc;
+ struct hpc3_ethregs *hregs = gpriv->hregs;
+ int i;
+
+ if (once)
+ return;
+ once++;
+ printk("RING DUMP:\n");
+ for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
+ printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
+ i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
+ r[i].rdma.pnext);
+ i += 1;
+ printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
+ i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
+ r[i].rdma.pnext);
+ }
+ for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
+ printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
+ i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
+ t[i].tdma.pnext);
+ i += 1;
+ printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
+ i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
+ t[i].tdma.pnext);
+ }
+ printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
+ gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
+ printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
+ hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
+ printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
+ hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
+}
+#endif
+
+#define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
+#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
+#define RDMACFG_INIT (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ)
+
+static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
+ struct sgiseeq_regs *sregs)
+{
+ struct hpc3_ethregs *hregs = sp->hregs;
+ int err;
+
+ reset_hpc3_and_seeq(hregs, sregs);
+ err = seeq_init_ring(dev);
+ if (err)
+ return err;
+
+ /* Setup to field the proper interrupt types. */
+ if (sp->is_edlc) {
+ sregs->tstat = TSTAT_INIT_EDLC;
+ sregs->rw.wregs.control = sp->control;
+ sregs->rw.wregs.frame_gap = 0;
+ } else {
+ sregs->tstat = TSTAT_INIT_SEEQ;
+ }
+
+ hregs->rx_dconfig |= RDMACFG_INIT;
+
+ hregs->rx_ndptr = CPHYSADDR(sp->rx_desc);
+ hregs->tx_ndptr = CPHYSADDR(sp->tx_desc);
+
+ seeq_go(sp, hregs, sregs);
+ return 0;
+}
+
+static inline void record_rx_errors(struct sgiseeq_private *sp,
+ unsigned char status)
+{
+ if (status & SEEQ_RSTAT_OVERF ||
+ status & SEEQ_RSTAT_SFRAME)
+ sp->stats.rx_over_errors++;
+ if (status & SEEQ_RSTAT_CERROR)
+ sp->stats.rx_crc_errors++;
+ if (status & SEEQ_RSTAT_DERROR)
+ sp->stats.rx_frame_errors++;
+ if (status & SEEQ_RSTAT_REOF)
+ sp->stats.rx_errors++;
+}
+
+static inline void rx_maybe_restart(struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
+ hregs->rx_ndptr = CPHYSADDR(sp->rx_desc + sp->rx_new);
+ seeq_go(sp, hregs, sregs);
+ }
+}
+
+#define for_each_rx(rd, sp) for((rd) = &(sp)->rx_desc[(sp)->rx_new]; \
+ !((rd)->rdma.cntinfo & HPCDMA_OWN); \
+ (rd) = &(sp)->rx_desc[(sp)->rx_new])
+
+static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ struct sgiseeq_rx_desc *rd;
+ struct sk_buff *skb = 0;
+ unsigned char pkt_status;
+ unsigned char *pkt_pointer = 0;
+ int len = 0;
+ unsigned int orig_end = PREV_RX(sp->rx_new);
+
+ /* Service every received packet. */
+ for_each_rx(rd, sp) {
+ len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
+ pkt_pointer = (unsigned char *)(long)rd->buf_vaddr;
+ pkt_status = pkt_pointer[len + 2];
+
+ if (pkt_status & SEEQ_RSTAT_FIG) {
+ /* Packet is OK. */
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ skb_put(skb, len);
+
+ /* Copy out of kseg1 to avoid silly cache flush. */
+ eth_copy_and_sum(skb, pkt_pointer + 2, len, 0);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* We don't want to receive our own packets */
+ if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) {
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ sp->stats.rx_packets++;
+ sp->stats.rx_bytes += len;
+ } else {
+ /* Silently drop my own packets */
+ dev_kfree_skb_irq(skb);
+ }
+ } else {
+ printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ sp->stats.rx_dropped++;
+ }
+ } else {
+ record_rx_errors(sp, pkt_status);
+ }
+
+ /* Return the entry to the ring pool. */
+ rd->rdma.cntinfo = RCNTINFO_INIT;
+ sp->rx_new = NEXT_RX(sp->rx_new);
+ }
+ sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
+ sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
+ rx_maybe_restart(sp, hregs, sregs);
+}
+
+static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
+ struct sgiseeq_regs *sregs)
+{
+ if (sp->is_edlc) {
+ sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
+ sregs->rw.wregs.control = sp->control;
+ }
+}
+
+static inline void kick_tx(struct sgiseeq_tx_desc *td,
+ struct hpc3_ethregs *hregs)
+{
+ /* If the HPC aint doin nothin, and there are more packets
+ * with ETXD cleared and XIU set we must make very certain
+ * that we restart the HPC else we risk locking up the
+ * adapter. The following code is only safe iff the HPCDMA
+ * is not active!
+ */
+ while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
+ (HPCDMA_XIU | HPCDMA_ETXD))
+ td = (struct sgiseeq_tx_desc *)(long) CKSEG1ADDR(td->tdma.pnext);
+ if (td->tdma.cntinfo & HPCDMA_XIU) {
+ hregs->tx_ndptr = CPHYSADDR(td);
+ hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
+ }
+}
+
+static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ struct sgiseeq_tx_desc *td;
+ unsigned long status = hregs->tx_ctrl;
+ int j;
+
+ tx_maybe_reset_collisions(sp, sregs);
+
+ if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
+ /* Oops, HPC detected some sort of error. */
+ if (status & SEEQ_TSTAT_R16)
+ sp->stats.tx_aborted_errors++;
+ if (status & SEEQ_TSTAT_UFLOW)
+ sp->stats.tx_fifo_errors++;
+ if (status & SEEQ_TSTAT_LCLS)
+ sp->stats.collisions++;
+ }
+
+ /* Ack 'em... */
+ for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
+ td = &sp->tx_desc[j];
+
+ if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
+ break;
+ if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
+ if (!(status & HPC3_ETXCTRL_ACTIVE)) {
+ hregs->tx_ndptr = CPHYSADDR(td);
+ hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
+ }
+ break;
+ }
+ sp->stats.tx_packets++;
+ sp->tx_old = NEXT_TX(sp->tx_old);
+ td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
+ td->tdma.cntinfo |= HPCDMA_EOX;
+ }
+}
+
+static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct hpc3_ethregs *hregs = sp->hregs;
+ struct sgiseeq_regs *sregs = sp->sregs;
+
+ spin_lock(&sp->tx_lock);
+
+ /* Ack the IRQ and set software state. */
+ hregs->rx_reset = HPC3_ERXRST_CLRIRQ;
+
+ /* Always check for received packets. */
+ sgiseeq_rx(dev, sp, hregs, sregs);
+
+ /* Only check for tx acks if we have something queued. */
+ if (sp->tx_old != sp->tx_new)
+ sgiseeq_tx(dev, sp, hregs, sregs);
+
+ if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
+ netif_wake_queue(dev);
+ }
+ spin_unlock(&sp->tx_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sgiseeq_open(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+ unsigned int irq = dev->irq;
+ int err;
+
+ if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
+ printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
+ err = -EAGAIN;
+ }
+
+ err = init_seeq(dev, sp, sregs);
+ if (err)
+ goto out_free_irq;
+
+ netif_start_queue(dev);
+
+ return 0;
+
+out_free_irq:
+ free_irq(irq, dev);
+
+ return err;
+}
+
+static int sgiseeq_close(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+
+ netif_stop_queue(dev);
+
+ /* Shutdown the Seeq. */
+ reset_hpc3_and_seeq(sp->hregs, sregs);
+
+ return 0;
+}
+
+static inline int sgiseeq_reset(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+ int err;
+
+ err = init_seeq(dev, sp, sregs);
+ if (err)
+ return err;
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+
+ return 0;
+}
+
+void sgiseeq_my_reset(void)
+{
+ printk("RESET!\n");
+ sgiseeq_reset(gdev);
+}
+
+static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct hpc3_ethregs *hregs = sp->hregs;
+ unsigned long flags;
+ struct sgiseeq_tx_desc *td;
+ int skblen, len, entry;
+
+ spin_lock_irqsave(&sp->tx_lock, flags);
+
+ /* Setup... */
+ skblen = skb->len;
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ sp->stats.tx_bytes += len;
+ entry = sp->tx_new;
+ td = &sp->tx_desc[entry];
+
+ /* Create entry. There are so many races with adding a new
+ * descriptor to the chain:
+ * 1) Assume that the HPC is off processing a DMA chain while
+ * we are changing all of the following.
+ * 2) Do no allow the HPC to look at a new descriptor until
+ * we have completely set up it's state. This means, do
+ * not clear HPCDMA_EOX in the current last descritptor
+ * until the one we are adding looks consistent and could
+ * be processes right now.
+ * 3) The tx interrupt code must notice when we've added a new
+ * entry and the HPC got to the end of the chain before we
+ * added this new entry and restarted it.
+ */
+ memcpy((char *)(long)td->buf_vaddr, skb->data, skblen);
+ if (len != skblen)
+ memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen);
+ td->tdma.cntinfo = (len & HPCDMA_BCNT) |
+ HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
+ if (sp->tx_old != sp->tx_new) {
+ struct sgiseeq_tx_desc *backend;
+
+ backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
+ backend->tdma.cntinfo &= ~HPCDMA_EOX;
+ }
+ sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
+
+ /* Maybe kick the HPC back into motion. */
+ if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
+ kick_tx(&sp->tx_desc[sp->tx_old], hregs);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+
+ if (!TX_BUFFS_AVAIL(sp))
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+
+ return 0;
+}
+
+static void timeout(struct net_device *dev)
+{
+ printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
+ sgiseeq_reset(dev);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static struct net_device_stats *sgiseeq_get_stats(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ return &sp->stats;
+}
+
+static void sgiseeq_set_multicast(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv;
+ unsigned char oldmode = sp->mode;
+
+ if(dev->flags & IFF_PROMISC)
+ sp->mode = SEEQ_RCMD_RANY;
+ else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
+ sp->mode = SEEQ_RCMD_RBMCAST;
+ else
+ sp->mode = SEEQ_RCMD_RBCAST;
+
+ /* XXX I know this sucks, but is there a better way to reprogram
+ * XXX the receiver? At least, this shouldn't happen too often.
+ */
+
+ if (oldmode != sp->mode)
+ sgiseeq_reset(dev);
+}
+
+static inline void setup_tx_ring(struct sgiseeq_tx_desc *buf, int nbufs)
+{
+ int i = 0;
+
+ while (i < (nbufs - 1)) {
+ buf[i].tdma.pnext = CPHYSADDR(buf + i + 1);
+ buf[i].tdma.pbuf = 0;
+ i++;
+ }
+ buf[i].tdma.pnext = CPHYSADDR(buf);
+}
+
+static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs)
+{
+ int i = 0;
+
+ while (i < (nbufs - 1)) {
+ buf[i].rdma.pnext = CPHYSADDR(buf + i + 1);
+ buf[i].rdma.pbuf = 0;
+ i++;
+ }
+ buf[i].rdma.pbuf = 0;
+ buf[i].rdma.pnext = CPHYSADDR(buf);
+}
+
+#define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf))
+
+static int sgiseeq_init(struct hpc3_regs* regs, int irq)
+{
+ struct sgiseeq_init_block *sr;
+ struct sgiseeq_private *sp;
+ struct net_device *dev;
+ int err, i;
+
+ dev = alloc_etherdev(sizeof (struct sgiseeq_private));
+ if (!dev) {
+ printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ sp = netdev_priv(dev);
+
+ /* Make private data page aligned */
+ sr = (struct sgiseeq_init_block *) get_zeroed_page(GFP_KERNEL);
+ if (!sr) {
+ printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+ sp->srings = sr;
+
+#define EADDR_NVOFS 250
+ for (i = 0; i < 3; i++) {
+ unsigned short tmp = ip22_nvram_read(EADDR_NVOFS / 2 + i);
+
+ dev->dev_addr[2 * i] = tmp >> 8;
+ dev->dev_addr[2 * i + 1] = tmp & 0xff;
+ }
+
+#ifdef DEBUG
+ gpriv = sp;
+ gdev = dev;
+#endif
+ sp->sregs = (struct sgiseeq_regs *) &hpc3c0->eth_ext[0];
+ sp->hregs = &hpc3c0->ethregs;
+ sp->name = sgiseeqstr;
+ sp->mode = SEEQ_RCMD_RBCAST;
+
+ sp->rx_desc = (struct sgiseeq_rx_desc *)
+ CKSEG1ADDR(ALIGNED(&sp->srings->rxvector[0]));
+ dma_cache_wback_inv((unsigned long)&sp->srings->rxvector,
+ sizeof(sp->srings->rxvector));
+ sp->tx_desc = (struct sgiseeq_tx_desc *)
+ CKSEG1ADDR(ALIGNED(&sp->srings->txvector[0]));
+ dma_cache_wback_inv((unsigned long)&sp->srings->txvector,
+ sizeof(sp->srings->txvector));
+
+ /* A couple calculations now, saves many cycles later. */
+ setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS);
+ setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS);
+
+ /* Reset the chip. */
+ hpc3_eth_reset(sp->hregs);
+
+ sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
+ if (sp->is_edlc)
+ sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
+ SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
+ SEEQ_CTRL_ENCARR;
+
+ dev->open = sgiseeq_open;
+ dev->stop = sgiseeq_close;
+ dev->hard_start_xmit = sgiseeq_start_xmit;
+ dev->tx_timeout = timeout;
+ dev->watchdog_timeo = (200 * HZ) / 1000;
+ dev->get_stats = sgiseeq_get_stats;
+ dev->set_multicast_list = sgiseeq_set_multicast;
+ dev->set_mac_address = sgiseeq_set_mac_address;
+ dev->irq = irq;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "Sgiseeq: Cannot register net device, "
+ "aborting.\n");
+ err = -ENODEV;
+ goto err_out_free_page;
+ }
+
+ printk(KERN_INFO "%s: SGI Seeq8003 ", dev->name);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ sp->next_module = root_sgiseeq_dev;
+ root_sgiseeq_dev = dev;
+
+ return 0;
+
+err_out_free_page:
+ free_page((unsigned long) sp);
+err_out_free_dev:
+ kfree(dev);
+
+err_out:
+ return err;
+}
+
+static int __init sgiseeq_probe(void)
+{
+ printk(version);
+
+ /* On board adapter on 1st HPC is always present */
+ return sgiseeq_init(hpc3c0, SGI_ENET_IRQ);
+}
+
+static void __exit sgiseeq_exit(void)
+{
+ struct net_device *next, *dev;
+ struct sgiseeq_private *sp;
+ int irq;
+
+ for (dev = root_sgiseeq_dev; dev; dev = next) {
+ sp = (struct sgiseeq_private *) netdev_priv(dev);
+ next = sp->next_module;
+ irq = dev->irq;
+ unregister_netdev(dev);
+ free_irq(irq, dev);
+ free_page((unsigned long) sp);
+ free_netdev(dev);
+ }
+}
+
+module_init(sgiseeq_probe);
+module_exit(sgiseeq_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sgiseeq.h b/drivers/net/sgiseeq.h
new file mode 100644
index 000000000000..ebcca688dac4
--- /dev/null
+++ b/drivers/net/sgiseeq.h
@@ -0,0 +1,103 @@
+/*
+ * sgiseeq.h: Defines for the Seeq8003 ethernet controller.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ */
+#ifndef _SGISEEQ_H
+#define _SGISEEQ_H
+
+struct sgiseeq_wregs {
+ volatile unsigned int multicase_high[2];
+ volatile unsigned int frame_gap;
+ volatile unsigned int control;
+};
+
+struct sgiseeq_rregs {
+ volatile unsigned int collision_tx[2];
+ volatile unsigned int collision_all[2];
+ volatile unsigned int _unused0;
+ volatile unsigned int rflags;
+};
+
+struct sgiseeq_regs {
+ union {
+ volatile unsigned int eth_addr[6];
+ volatile unsigned int multicast_low[6];
+ struct sgiseeq_wregs wregs;
+ struct sgiseeq_rregs rregs;
+ } rw;
+ volatile unsigned int rstat;
+ volatile unsigned int tstat;
+};
+
+/* Seeq8003 receive status register */
+#define SEEQ_RSTAT_OVERF 0x001 /* Overflow */
+#define SEEQ_RSTAT_CERROR 0x002 /* CRC error */
+#define SEEQ_RSTAT_DERROR 0x004 /* Dribble error */
+#define SEEQ_RSTAT_SFRAME 0x008 /* Short frame */
+#define SEEQ_RSTAT_REOF 0x010 /* Received end of frame */
+#define SEEQ_RSTAT_FIG 0x020 /* Frame is good */
+#define SEEQ_RSTAT_TIMEO 0x040 /* Timeout, or late receive */
+#define SEEQ_RSTAT_WHICH 0x080 /* Which status, 1=old 0=new */
+#define SEEQ_RSTAT_LITTLE 0x100 /* DMA is done in little endian format */
+#define SEEQ_RSTAT_SDMA 0x200 /* DMA has started */
+#define SEEQ_RSTAT_ADMA 0x400 /* DMA is active */
+#define SEEQ_RSTAT_ROVERF 0x800 /* Receive buffer overflow */
+
+/* Seeq8003 receive command register */
+#define SEEQ_RCMD_RDISAB 0x000 /* Disable receiver on the Seeq8003 */
+#define SEEQ_RCMD_IOVERF 0x001 /* IRQ on buffer overflows */
+#define SEEQ_RCMD_ICRC 0x002 /* IRQ on CRC errors */
+#define SEEQ_RCMD_IDRIB 0x004 /* IRQ on dribble errors */
+#define SEEQ_RCMD_ISHORT 0x008 /* IRQ on short frames */
+#define SEEQ_RCMD_IEOF 0x010 /* IRQ on end of frame */
+#define SEEQ_RCMD_IGOOD 0x020 /* IRQ on good frames */
+#define SEEQ_RCMD_RANY 0x040 /* Receive any frame */
+#define SEEQ_RCMD_RBCAST 0x080 /* Receive broadcasts */
+#define SEEQ_RCMD_RBMCAST 0x0c0 /* Receive broadcasts/multicasts */
+
+/* Seeq8003 transmit status register */
+#define SEEQ_TSTAT_UFLOW 0x001 /* Transmit buffer underflow */
+#define SEEQ_TSTAT_CLS 0x002 /* Collision detected */
+#define SEEQ_TSTAT_R16 0x004 /* Did 16 retries to tx a frame */
+#define SEEQ_TSTAT_PTRANS 0x008 /* Packet was transmitted ok */
+#define SEEQ_TSTAT_LCLS 0x010 /* Late collision occurred */
+#define SEEQ_TSTAT_WHICH 0x080 /* Which status, 1=old 0=new */
+#define SEEQ_TSTAT_TLE 0x100 /* DMA is done in little endian format */
+#define SEEQ_TSTAT_SDMA 0x200 /* DMA has started */
+#define SEEQ_TSTAT_ADMA 0x400 /* DMA is active */
+
+/* Seeq8003 transmit command register */
+#define SEEQ_TCMD_RB0 0x00 /* Register bank zero w/station addr */
+#define SEEQ_TCMD_IUF 0x01 /* IRQ on tx underflow */
+#define SEEQ_TCMD_IC 0x02 /* IRQ on collisions */
+#define SEEQ_TCMD_I16 0x04 /* IRQ after 16 failed attempts to tx frame */
+#define SEEQ_TCMD_IPT 0x08 /* IRQ when packet successfully transmitted */
+#define SEEQ_TCMD_RB1 0x20 /* Register bank one w/multi-cast low byte */
+#define SEEQ_TCMD_RB2 0x40 /* Register bank two w/multi-cast high byte */
+
+/* Seeq8003 control register */
+#define SEEQ_CTRL_XCNT 0x01
+#define SEEQ_CTRL_ACCNT 0x02
+#define SEEQ_CTRL_SFLAG 0x04
+#define SEEQ_CTRL_EMULTI 0x08
+#define SEEQ_CTRL_ESHORT 0x10
+#define SEEQ_CTRL_ENCARR 0x20
+
+/* Seeq8003 control registers on the SGI Hollywood HPC. */
+#define SEEQ_HPIO_P1BITS 0x00000001 /* cycles to stay in P1 phase for PIO */
+#define SEEQ_HPIO_P2BITS 0x00000060 /* cycles to stay in P2 phase for PIO */
+#define SEEQ_HPIO_P3BITS 0x00000100 /* cycles to stay in P3 phase for PIO */
+#define SEEQ_HDMA_D1BITS 0x00000006 /* cycles to stay in D1 phase for DMA */
+#define SEEQ_HDMA_D2BITS 0x00000020 /* cycles to stay in D2 phase for DMA */
+#define SEEQ_HDMA_D3BITS 0x00000000 /* cycles to stay in D3 phase for DMA */
+#define SEEQ_HDMA_TIMEO 0x00030000 /* cycles for DMA timeout */
+#define SEEQ_HCTL_NORM 0x00000000 /* Normal operation mode */
+#define SEEQ_HCTL_RESET 0x00000001 /* Reset Seeq8003 and HPC interface */
+#define SEEQ_HCTL_IPEND 0x00000002 /* IRQ is pending for the chip */
+#define SEEQ_HCTL_IPG 0x00001000 /* Inter-packet gap */
+#define SEEQ_HCTL_RFIX 0x00002000 /* At rxdc, clear end-of-packet */
+#define SEEQ_HCTL_EFIX 0x00004000 /* fixes intr status bit settings */
+#define SEEQ_HCTL_IFIX 0x00008000 /* enable startup timeouts */
+
+#endif /* !(_SGISEEQ_H) */
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
new file mode 100644
index 000000000000..e68cf5fb4920
--- /dev/null
+++ b/drivers/net/shaper.c
@@ -0,0 +1,755 @@
+/*
+ * Simple traffic shaper for Linux NET3.
+ *
+ * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
+ * http://www.redhat.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
+ * warranty for any of this software. This material is provided
+ * "AS-IS" and at no charge.
+ *
+ *
+ * Algorithm:
+ *
+ * Queue Frame:
+ * Compute time length of frame at regulated speed
+ * Add frame to queue at appropriate point
+ * Adjust time length computation for followup frames
+ * Any frame that falls outside of its boundaries is freed
+ *
+ * We work to the following constants
+ *
+ * SHAPER_QLEN Maximum queued frames
+ * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
+ * window drops the frame. This stops us queueing
+ * frames for a long time and confusing a remote
+ * host.
+ * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
+ * That bounds the penalty we will inflict on low
+ * priority traffic.
+ * SHAPER_BURST Time range we call "now" in order to reduce
+ * system load. The more we make this the burstier
+ * the behaviour, the better local performance you
+ * get through packet clustering on routers and the
+ * worse the remote end gets to judge rtts.
+ *
+ * This is designed to handle lower speed links ( < 200K/second or so). We
+ * run off a 100-150Hz base clock typically. This gives us a resolution at
+ * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
+ * resolution may start to cause much more burstiness in the traffic. We
+ * could avoid a lot of that by calling kick_shaper() at the end of the
+ * tied device transmissions. If you run above about 100K second you
+ * may need to tune the supposed speed rate for the right values.
+ *
+ * BUGS:
+ * Downing the interface under the shaper before the shaper
+ * will render your machine defunct. Don't for now shape over
+ * PPP or SLIP therefore!
+ * This will be fixed in BETA4
+ *
+ * Update History :
+ *
+ * bh_atomic() SMP races fixes and rewritten the locking code to
+ * be SMP safe and irq-mask friendly.
+ * NOTE: we can't use start_bh_atomic() in kick_shaper()
+ * because it's going to be recalled from an irq handler,
+ * and synchronize_bh() is a nono if called from irq context.
+ * 1999 Andrea Arcangeli
+ *
+ * Device statistics (tx_pakets, tx_bytes,
+ * tx_drops: queue_over_time and collisions: max_queue_exceded)
+ * 1999/06/18 Jordi Murgo <savage@apostols.org>
+ *
+ * Use skb->cb for private data.
+ * 2000/03 Andi Kleen
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/if_shaper.h>
+
+#include <net/dst.h>
+#include <net/arp.h>
+
+struct shaper_cb {
+ unsigned long shapeclock; /* Time it should go out */
+ unsigned long shapestamp; /* Stamp for shaper */
+ __u32 shapelatency; /* Latency on frame */
+ __u32 shapelen; /* Frame length in clocks */
+ __u16 shapepend; /* Pending */
+};
+#define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
+
+static int sh_debug; /* Debug flag */
+
+#define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
+
+/*
+ * Locking
+ */
+
+static int shaper_lock(struct shaper *sh)
+{
+ /*
+ * Lock in an interrupt must fail
+ */
+ while (test_and_set_bit(0, &sh->locked))
+ {
+ if (!in_interrupt())
+ sleep_on(&sh->wait_queue);
+ else
+ return 0;
+
+ }
+ return 1;
+}
+
+static void shaper_kick(struct shaper *sh);
+
+static void shaper_unlock(struct shaper *sh)
+{
+ clear_bit(0, &sh->locked);
+ wake_up(&sh->wait_queue);
+ shaper_kick(sh);
+}
+
+/*
+ * Compute clocks on a buffer
+ */
+
+static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
+{
+ int t=skb->len/shaper->bytespertick;
+ return t;
+}
+
+/*
+ * Set the speed of a shaper. We compute this in bytes per tick since
+ * thats how the machine wants to run. Quoted input is in bits per second
+ * as is traditional (note not BAUD). We assume 8 bit bytes.
+ */
+
+static void shaper_setspeed(struct shaper *shaper, int bitspersec)
+{
+ shaper->bitspersec=bitspersec;
+ shaper->bytespertick=(bitspersec/HZ)/8;
+ if(!shaper->bytespertick)
+ shaper->bytespertick++;
+}
+
+/*
+ * Throw a frame at a shaper.
+ */
+
+static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
+{
+ struct sk_buff *ptr;
+
+ /*
+ * Get ready to work on this shaper. Lock may fail if its
+ * an interrupt and locked.
+ */
+
+ if(!shaper_lock(shaper))
+ return -1;
+ ptr=shaper->sendq.prev;
+
+ /*
+ * Set up our packet details
+ */
+
+ SHAPERCB(skb)->shapelatency=0;
+ SHAPERCB(skb)->shapeclock=shaper->recovery;
+ if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
+ SHAPERCB(skb)->shapeclock=jiffies;
+ skb->priority=0; /* short term bug fix */
+ SHAPERCB(skb)->shapestamp=jiffies;
+
+ /*
+ * Time slots for this packet.
+ */
+
+ SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
+
+#ifdef SHAPER_COMPLEX /* and broken.. */
+
+ while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
+ {
+ if(ptr->pri<skb->pri
+ && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
+ {
+ struct sk_buff *tmp=ptr->prev;
+
+ /*
+ * It goes before us therefore we slip the length
+ * of the new frame.
+ */
+
+ SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
+ SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
+
+ /*
+ * The packet may have slipped so far back it
+ * fell off.
+ */
+ if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
+ {
+ skb_unlink(ptr);
+ dev_kfree_skb(ptr);
+ }
+ ptr=tmp;
+ }
+ else
+ break;
+ }
+ if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
+ skb_queue_head(&shaper->sendq,skb);
+ else
+ {
+ struct sk_buff *tmp;
+ /*
+ * Set the packet clock out time according to the
+ * frames ahead. Im sure a bit of thought could drop
+ * this loop.
+ */
+ for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
+ SHAPERCB(skb)->shapeclock+=tmp->shapelen;
+ skb_append(ptr,skb);
+ }
+#else
+ {
+ struct sk_buff *tmp;
+ /*
+ * Up our shape clock by the time pending on the queue
+ * (Should keep this in the shaper as a variable..)
+ */
+ for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
+ tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
+ SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
+ /*
+ * Queue over time. Spill packet.
+ */
+ if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) {
+ dev_kfree_skb(skb);
+ shaper->stats.tx_dropped++;
+ } else
+ skb_queue_tail(&shaper->sendq, skb);
+ }
+#endif
+ if(sh_debug)
+ printk("Frame queued.\n");
+ if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
+ {
+ ptr=skb_dequeue(&shaper->sendq);
+ dev_kfree_skb(ptr);
+ shaper->stats.collisions++;
+ }
+ shaper_unlock(shaper);
+ return 0;
+}
+
+/*
+ * Transmit from a shaper
+ */
+
+static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
+{
+ struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
+ if(sh_debug)
+ printk("Kick frame on %p\n",newskb);
+ if(newskb)
+ {
+ newskb->dev=shaper->dev;
+ newskb->priority=2;
+ if(sh_debug)
+ printk("Kick new frame to %s, %d\n",
+ shaper->dev->name,newskb->priority);
+ dev_queue_xmit(newskb);
+
+ shaper->stats.tx_bytes += skb->len;
+ shaper->stats.tx_packets++;
+
+ if(sh_debug)
+ printk("Kicked new frame out.\n");
+ dev_kfree_skb(skb);
+ }
+}
+
+/*
+ * Timer handler for shaping clock
+ */
+
+static void shaper_timer(unsigned long data)
+{
+ struct shaper *sh=(struct shaper *)data;
+ shaper_kick(sh);
+}
+
+/*
+ * Kick a shaper queue and try and do something sensible with the
+ * queue.
+ */
+
+static void shaper_kick(struct shaper *shaper)
+{
+ struct sk_buff *skb;
+
+ /*
+ * Shaper unlock will kick
+ */
+
+ if (test_and_set_bit(0, &shaper->locked))
+ {
+ if(sh_debug)
+ printk("Shaper locked.\n");
+ mod_timer(&shaper->timer, jiffies);
+ return;
+ }
+
+
+ /*
+ * Walk the list (may be empty)
+ */
+
+ while((skb=skb_peek(&shaper->sendq))!=NULL)
+ {
+ /*
+ * Each packet due to go out by now (within an error
+ * of SHAPER_BURST) gets kicked onto the link
+ */
+
+ if(sh_debug)
+ printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
+ if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
+ {
+ /*
+ * Pull the frame and get interrupts back on.
+ */
+
+ skb_unlink(skb);
+ if (shaper->recovery <
+ SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
+ shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
+ /*
+ * Pass on to the physical target device via
+ * our low level packet thrower.
+ */
+
+ SHAPERCB(skb)->shapepend=0;
+ shaper_queue_xmit(shaper, skb); /* Fire */
+ }
+ else
+ break;
+ }
+
+ /*
+ * Next kick.
+ */
+
+ if(skb!=NULL)
+ mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
+
+ clear_bit(0, &shaper->locked);
+}
+
+
+/*
+ * Flush the shaper queues on a closedown
+ */
+
+static void shaper_flush(struct shaper *shaper)
+{
+ struct sk_buff *skb;
+ if(!shaper_lock(shaper))
+ {
+ printk(KERN_ERR "shaper: shaper_flush() called by an irq!\n");
+ return;
+ }
+ while((skb=skb_dequeue(&shaper->sendq))!=NULL)
+ dev_kfree_skb(skb);
+ shaper_unlock(shaper);
+}
+
+/*
+ * Bring the interface up. We just disallow this until a
+ * bind.
+ */
+
+static int shaper_open(struct net_device *dev)
+{
+ struct shaper *shaper=dev->priv;
+
+ /*
+ * Can't open until attached.
+ * Also can't open until speed is set, or we'll get
+ * a division by zero.
+ */
+
+ if(shaper->dev==NULL)
+ return -ENODEV;
+ if(shaper->bitspersec==0)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Closing a shaper flushes the queues.
+ */
+
+static int shaper_close(struct net_device *dev)
+{
+ struct shaper *shaper=dev->priv;
+ shaper_flush(shaper);
+ del_timer_sync(&shaper->timer);
+ return 0;
+}
+
+/*
+ * Revectored calls. We alter the parameters and call the functions
+ * for our attached device. This enables us to bandwidth allocate after
+ * ARP and other resolutions and not before.
+ */
+
+
+static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct shaper *sh=dev->priv;
+ return shaper_qframe(sh, skb);
+}
+
+static struct net_device_stats *shaper_get_stats(struct net_device *dev)
+{
+ struct shaper *sh=dev->priv;
+ return &sh->stats;
+}
+
+static int shaper_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+ struct shaper *sh=dev->priv;
+ int v;
+ if(sh_debug)
+ printk("Shaper header\n");
+ skb->dev=sh->dev;
+ v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
+ skb->dev=dev;
+ return v;
+}
+
+static int shaper_rebuild_header(struct sk_buff *skb)
+{
+ struct shaper *sh=skb->dev->priv;
+ struct net_device *dev=skb->dev;
+ int v;
+ if(sh_debug)
+ printk("Shaper rebuild header\n");
+ skb->dev=sh->dev;
+ v=sh->rebuild_header(skb);
+ skb->dev=dev;
+ return v;
+}
+
+#if 0
+static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
+{
+ struct shaper *sh=neigh->dev->priv;
+ struct net_device *tmp;
+ int ret;
+ if(sh_debug)
+ printk("Shaper header cache bind\n");
+ tmp=neigh->dev;
+ neigh->dev=sh->dev;
+ ret=sh->hard_header_cache(neigh,hh);
+ neigh->dev=tmp;
+ return ret;
+}
+
+static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
+ unsigned char *haddr)
+{
+ struct shaper *sh=dev->priv;
+ if(sh_debug)
+ printk("Shaper cache update\n");
+ sh->header_cache_update(hh, sh->dev, haddr);
+}
+#endif
+
+#ifdef CONFIG_INET
+
+static int shaper_neigh_setup(struct neighbour *n)
+{
+#ifdef CONFIG_INET
+ if (n->nud_state == NUD_NONE) {
+ n->ops = &arp_broken_ops;
+ n->output = n->ops->output;
+ }
+#endif
+ return 0;
+}
+
+static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
+{
+#ifdef CONFIG_INET
+ if (p->tbl->family == AF_INET) {
+ p->neigh_setup = shaper_neigh_setup;
+ p->ucast_probes = 0;
+ p->mcast_probes = 0;
+ }
+#endif
+ return 0;
+}
+
+#else /* !(CONFIG_INET) */
+
+static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
+{
+ return 0;
+}
+
+#endif
+
+static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
+{
+ sh->dev = dev;
+ sh->hard_start_xmit=dev->hard_start_xmit;
+ sh->get_stats=dev->get_stats;
+ if(dev->hard_header)
+ {
+ sh->hard_header=dev->hard_header;
+ shdev->hard_header = shaper_header;
+ }
+ else
+ shdev->hard_header = NULL;
+
+ if(dev->rebuild_header)
+ {
+ sh->rebuild_header = dev->rebuild_header;
+ shdev->rebuild_header = shaper_rebuild_header;
+ }
+ else
+ shdev->rebuild_header = NULL;
+
+#if 0
+ if(dev->hard_header_cache)
+ {
+ sh->hard_header_cache = dev->hard_header_cache;
+ shdev->hard_header_cache= shaper_cache;
+ }
+ else
+ {
+ shdev->hard_header_cache= NULL;
+ }
+
+ if(dev->header_cache_update)
+ {
+ sh->header_cache_update = dev->header_cache_update;
+ shdev->header_cache_update = shaper_cache_update;
+ }
+ else
+ shdev->header_cache_update= NULL;
+#else
+ shdev->header_cache_update = NULL;
+ shdev->hard_header_cache = NULL;
+#endif
+ shdev->neigh_setup = shaper_neigh_setup_dev;
+
+ shdev->hard_header_len=dev->hard_header_len;
+ shdev->type=dev->type;
+ shdev->addr_len=dev->addr_len;
+ shdev->mtu=dev->mtu;
+ sh->bitspersec=0;
+ return 0;
+}
+
+static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
+ struct shaper *sh=dev->priv;
+
+ if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
+ {
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ switch(ss->ss_cmd)
+ {
+ case SHAPER_SET_DEV:
+ {
+ struct net_device *them=__dev_get_by_name(ss->ss_name);
+ if(them==NULL)
+ return -ENODEV;
+ if(sh->dev)
+ return -EBUSY;
+ return shaper_attach(dev,dev->priv, them);
+ }
+ case SHAPER_GET_DEV:
+ if(sh->dev==NULL)
+ return -ENODEV;
+ strcpy(ss->ss_name, sh->dev->name);
+ return 0;
+ case SHAPER_SET_SPEED:
+ shaper_setspeed(sh,ss->ss_speed);
+ return 0;
+ case SHAPER_GET_SPEED:
+ ss->ss_speed=sh->bitspersec;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void shaper_init_priv(struct net_device *dev)
+{
+ struct shaper *sh = dev->priv;
+
+ skb_queue_head_init(&sh->sendq);
+ init_timer(&sh->timer);
+ sh->timer.function=shaper_timer;
+ sh->timer.data=(unsigned long)sh;
+ init_waitqueue_head(&sh->wait_queue);
+}
+
+/*
+ * Add a shaper device to the system
+ */
+
+static void __init shaper_setup(struct net_device *dev)
+{
+ /*
+ * Set up the shaper.
+ */
+
+ SET_MODULE_OWNER(dev);
+
+ shaper_init_priv(dev);
+
+ dev->open = shaper_open;
+ dev->stop = shaper_close;
+ dev->hard_start_xmit = shaper_start_xmit;
+ dev->get_stats = shaper_get_stats;
+ dev->set_multicast_list = NULL;
+
+ /*
+ * Intialise the packet queues
+ */
+
+ /*
+ * Handlers for when we attach to a device.
+ */
+
+ dev->hard_header = shaper_header;
+ dev->rebuild_header = shaper_rebuild_header;
+#if 0
+ dev->hard_header_cache = shaper_cache;
+ dev->header_cache_update= shaper_cache_update;
+#endif
+ dev->neigh_setup = shaper_neigh_setup_dev;
+ dev->do_ioctl = shaper_ioctl;
+ dev->hard_header_len = 0;
+ dev->type = ARPHRD_ETHER; /* initially */
+ dev->set_mac_address = NULL;
+ dev->mtu = 1500;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+ dev->flags = 0;
+}
+
+static int shapers = 1;
+#ifdef MODULE
+
+module_param(shapers, int, 0);
+MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
+
+#else /* MODULE */
+
+static int __init set_num_shapers(char *str)
+{
+ shapers = simple_strtol(str, NULL, 0);
+ return 1;
+}
+
+__setup("shapers=", set_num_shapers);
+
+#endif /* MODULE */
+
+static struct net_device **devs;
+
+static unsigned int shapers_registered = 0;
+
+static int __init shaper_init(void)
+{
+ int i;
+ size_t alloc_size;
+ struct net_device *dev;
+ char name[IFNAMSIZ];
+
+ if (shapers < 1)
+ return -ENODEV;
+
+ alloc_size = sizeof(*dev) * shapers;
+ devs = kmalloc(alloc_size, GFP_KERNEL);
+ if (!devs)
+ return -ENOMEM;
+ memset(devs, 0, alloc_size);
+
+ for (i = 0; i < shapers; i++) {
+
+ snprintf(name, IFNAMSIZ, "shaper%d", i);
+ dev = alloc_netdev(sizeof(struct shaper), name,
+ shaper_setup);
+ if (!dev)
+ break;
+
+ if (register_netdev(dev)) {
+ free_netdev(dev);
+ break;
+ }
+
+ devs[i] = dev;
+ shapers_registered++;
+ }
+
+ if (!shapers_registered) {
+ kfree(devs);
+ devs = NULL;
+ }
+
+ return (shapers_registered ? 0 : -ENODEV);
+}
+
+static void __exit shaper_exit (void)
+{
+ int i;
+
+ for (i = 0; i < shapers_registered; i++) {
+ if (devs[i]) {
+ unregister_netdev(devs[i]);
+ free_netdev(devs[i]);
+ }
+ }
+
+ kfree(devs);
+ devs = NULL;
+}
+
+module_init(shaper_init);
+module_exit(shaper_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
new file mode 100644
index 000000000000..3e9d9aab0588
--- /dev/null
+++ b/drivers/net/sis900.c
@@ -0,0 +1,2370 @@
+/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
+ Copyright 1999 Silicon Integrated System Corporation
+ Revision: 1.08.08 Jan. 22 2005
+
+ Modified from the driver which is originally written by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on this skeleton fall under the GPL and must retain
+ the authorship (implicit copyright) notice.
+
+ References:
+ SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ preliminary Rev. 1.0 Jan. 14, 1998
+ SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ preliminary Rev. 1.0 Nov. 10, 1998
+ SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ preliminary Rev. 1.0 Jan. 18, 1998
+
+ Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
+ Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
+ Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
+ Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary
+ Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support
+ Rev 1.08.03 Feb. 1 2002 Matt Domsch <Matt_Domsch@dell.com> update to use library crc32 function
+ Rev 1.08.02 Nov. 30 2001 Hui-Fen Hsu workaround for EDB & bug fix for dhcp problem
+ Rev 1.08.01 Aug. 25 2001 Hui-Fen Hsu update for 630ET & workaround for ICS1893 PHY
+ Rev 1.08.00 Jun. 11 2001 Hui-Fen Hsu workaround for RTL8201 PHY and some bug fix
+ Rev 1.07.11 Apr. 2 2001 Hui-Fen Hsu updates PCI drivers to use the new pci_set_dma_mask for kernel 2.4.3
+ Rev 1.07.10 Mar. 1 2001 Hui-Fen Hsu <hfhsu@sis.com.tw> some bug fix & 635M/B support
+ Rev 1.07.09 Feb. 9 2001 Dave Jones <davej@suse.de> PCI enable cleanup
+ Rev 1.07.08 Jan. 8 2001 Lei-Chun Chang added RTL8201 PHY support
+ Rev 1.07.07 Nov. 29 2000 Lei-Chun Chang added kernel-doc extractable documentation and 630 workaround fix
+ Rev 1.07.06 Nov. 7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
+ Rev 1.07.05 Nov. 6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
+ Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support
+ Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule
+ Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
+ Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
+ Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
+ Rev 1.06.03 Dec. 23 1999 Ollie Lho Third release
+ Rev 1.06.02 Nov. 23 1999 Ollie Lho bug in mac probing fixed
+ Rev 1.06.01 Nov. 16 1999 Ollie Lho CRC calculation provide by Joseph Zbiciak (im14u2c@primenet.com)
+ Rev 1.06 Nov. 4 1999 Ollie Lho (ollie@sis.com.tw) Second release
+ Rev 1.05.05 Oct. 29 1999 Ollie Lho (ollie@sis.com.tw) Single buffer Tx/Rx
+ Chin-Shan Li (lcs@sis.com.tw) Added AMD Am79c901 HomePNA PHY support
+ Rev 1.05 Aug. 7 1999 Jim Huang (cmhuang@sis.com.tw) Initial release
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h> /* User space memory access functions */
+
+#include "sis900.h"
+
+#define SIS900_MODULE_NAME "sis900"
+#define SIS900_DRV_VERSION "v1.08.08 Jan. 22 2005"
+
+static char version[] __devinitdata =
+KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
+
+static int max_interrupt_work = 40;
+static int multicast_filter_limit = 128;
+
+static int sis900_debug = -1; /* Use SIS900_DEF_MSG as value */
+
+#define SIS900_DEF_MSG \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+/* SiS 900 is capable of 32 bits BM DMA */
+#define SIS900_DMA_MASK 0xffffffff
+
+enum {
+ SIS_900 = 0,
+ SIS_7016
+};
+static char * card_names[] = {
+ "SiS 900 PCI Fast Ethernet",
+ "SiS 7016 PCI Fast Ethernet"
+};
+static struct pci_device_id sis900_pci_tbl [] = {
+ {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
+ {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7016},
+ {0,}
+};
+MODULE_DEVICE_TABLE (pci, sis900_pci_tbl);
+
+static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex);
+
+static struct mii_chip_info {
+ const char * name;
+ u16 phy_id0;
+ u16 phy_id1;
+ u8 phy_types;
+#define HOME 0x0001
+#define LAN 0x0002
+#define MIX 0x0003
+#define UNKNOWN 0x0
+} mii_chip_table[] = {
+ { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
+ { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
+ { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
+ { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
+ { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
+ { "ICS LAN PHY", 0x0015, 0xF440, LAN },
+ { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
+ { "NS 83847 PHY", 0x2000, 0x5C30, MIX },
+ { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
+ { "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
+ {NULL,},
+};
+
+struct mii_phy {
+ struct mii_phy * next;
+ int phy_addr;
+ u16 phy_id0;
+ u16 phy_id1;
+ u16 status;
+ u8 phy_types;
+};
+
+typedef struct _BufferDesc {
+ u32 link;
+ u32 cmdsts;
+ u32 bufptr;
+} BufferDesc;
+
+struct sis900_private {
+ struct net_device_stats stats;
+ struct pci_dev * pci_dev;
+
+ spinlock_t lock;
+
+ struct mii_phy * mii;
+ struct mii_phy * first_mii; /* record the first mii structure */
+ unsigned int cur_phy;
+
+ struct timer_list timer; /* Link status detection timer. */
+ u8 autong_complete; /* 1: auto-negotiate complete */
+
+ u32 msg_enable;
+
+ unsigned int cur_rx, dirty_rx; /* producer/comsumer pointers for Tx/Rx ring */
+ unsigned int cur_tx, dirty_tx;
+
+ /* The saved address of a sent/receive-in-place packet buffer */
+ struct sk_buff *tx_skbuff[NUM_TX_DESC];
+ struct sk_buff *rx_skbuff[NUM_RX_DESC];
+ BufferDesc *tx_ring;
+ BufferDesc *rx_ring;
+
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+
+ unsigned int tx_full; /* The Tx queue is full. */
+ u8 host_bridge_rev;
+ u8 chipset_rev;
+};
+
+MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
+MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(multicast_filter_limit, int, 0444);
+module_param(max_interrupt_work, int, 0444);
+module_param(sis900_debug, int, 0444);
+MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
+MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sis900_poll(struct net_device *dev);
+#endif
+static int sis900_open(struct net_device *net_dev);
+static int sis900_mii_probe (struct net_device * net_dev);
+static void sis900_init_rxfilter (struct net_device * net_dev);
+static u16 read_eeprom(long ioaddr, int location);
+static u16 mdio_read(struct net_device *net_dev, int phy_id, int location);
+static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
+static void sis900_timer(unsigned long data);
+static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
+static void sis900_tx_timeout(struct net_device *net_dev);
+static void sis900_init_tx_ring(struct net_device *net_dev);
+static void sis900_init_rx_ring(struct net_device *net_dev);
+static int sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
+static int sis900_rx(struct net_device *net_dev);
+static void sis900_finish_xmit (struct net_device *net_dev);
+static irqreturn_t sis900_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int sis900_close(struct net_device *net_dev);
+static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *sis900_get_stats(struct net_device *net_dev);
+static u16 sis900_mcast_bitnr(u8 *addr, u8 revision);
+static void set_rx_mode(struct net_device *net_dev);
+static void sis900_reset(struct net_device *net_dev);
+static void sis630_set_eq(struct net_device *net_dev, u8 revision);
+static int sis900_set_config(struct net_device *dev, struct ifmap *map);
+static u16 sis900_default_phy(struct net_device * net_dev);
+static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
+static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
+static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
+static void sis900_set_mode (long ioaddr, int speed, int duplex);
+static struct ethtool_ops sis900_ethtool_ops;
+
+/**
+ * sis900_get_mac_addr - Get MAC address for stand alone SiS900 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * Older SiS900 and friends, use EEPROM to store MAC address.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr.
+ */
+
+static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
+{
+ long ioaddr = pci_resource_start(pci_dev, 0);
+ u16 signature;
+ int i;
+
+ /* check to see if we have sane EEPROM */
+ signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
+ if (signature == 0xffff || signature == 0x0000) {
+ printk (KERN_WARNING "%s: Error EERPOM read %x\n",
+ pci_name(pci_dev), signature);
+ return 0;
+ }
+
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+
+ return 1;
+}
+
+/**
+ * sis630e_get_mac_addr - Get MAC address for SiS630E model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS630E model, use APC CMOS RAM to store MAC address.
+ * APC CMOS RAM is accessed through ISA bridge.
+ * MAC address is read into @net_dev->dev_addr.
+ */
+
+static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
+ struct net_device *net_dev)
+{
+ struct pci_dev *isa_bridge = NULL;
+ u8 reg;
+ int i;
+
+ isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, isa_bridge);
+ if (!isa_bridge)
+ isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0018, isa_bridge);
+ if (!isa_bridge) {
+ printk(KERN_WARNING "%s: Can not find ISA bridge\n",
+ pci_name(pci_dev));
+ return 0;
+ }
+ pci_read_config_byte(isa_bridge, 0x48, &reg);
+ pci_write_config_byte(isa_bridge, 0x48, reg | 0x40);
+
+ for (i = 0; i < 6; i++) {
+ outb(0x09 + i, 0x70);
+ ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
+ }
+ pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
+ pci_dev_put(isa_bridge);
+
+ return 1;
+}
+
+
+/**
+ * sis635_get_mac_addr - Get MAC address for SIS635 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS635 model, set MAC Reload Bit to load Mac address from APC
+ * to rfdr. rfdr is accessed through rfcr. MAC address is read into
+ * @net_dev->dev_addr.
+ */
+
+static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
+ struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ outl(rfcrSave | RELOAD, ioaddr + cr);
+ outl(0, ioaddr + cr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
+ }
+
+ /* enable packet filtering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+
+ return 1;
+}
+
+/**
+ * sis96x_get_mac_addr - Get MAC address for SiS962 or SiS963 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS962 or SiS963 model, use EEPROM to store MAC address. And EEPROM
+ * is shared by
+ * LAN and 1394. When access EEPROM, send EEREQ signal to hardware first
+ * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be access
+ * by LAN, otherwise is not. After MAC address is read from EEPROM, send
+ * EEDONE signal to refuse EEPROM access by LAN.
+ * The EEPROM map of SiS962 or SiS963 is different to SiS900.
+ * The signature field in SiS962 or SiS963 spec is meaningless.
+ * MAC address is read into @net_dev->dev_addr.
+ */
+
+static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
+ struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ long ee_addr = ioaddr + mear;
+ u32 waittime = 0;
+ int i;
+
+ outl(EEREQ, ee_addr);
+ while(waittime < 2000) {
+ if(inl(ee_addr) & EEGNT) {
+
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+
+ outl(EEDONE, ee_addr);
+ return 1;
+ } else {
+ udelay(1);
+ waittime ++;
+ }
+ }
+ outl(EEDONE, ee_addr);
+ return 0;
+}
+
+/**
+ * sis900_probe - Probe for sis900 device
+ * @pci_dev: the sis900 pci device
+ * @pci_id: the pci device ID
+ *
+ * Check and probe sis900 net device for @pci_dev.
+ * Get mac address according to the chip revision,
+ * and assign SiS900-specific entries in the device structure.
+ * ie: sis900_open(), sis900_start_xmit(), sis900_close(), etc.
+ */
+
+static int __devinit sis900_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ struct sis900_private *sis_priv;
+ struct net_device *net_dev;
+ struct pci_dev *dev;
+ dma_addr_t ring_dma;
+ void *ring_space;
+ long ioaddr;
+ int i, ret;
+ char *card_name = card_names[pci_id->driver_data];
+ const char *dev_name = pci_name(pci_dev);
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ /* setup various bits in PCI command register */
+ ret = pci_enable_device(pci_dev);
+ if(ret) return ret;
+
+ i = pci_set_dma_mask(pci_dev, SIS900_DMA_MASK);
+ if(i){
+ printk(KERN_ERR "sis900.c: architecture does not support"
+ "32bit PCI busmaster DMA\n");
+ return i;
+ }
+
+ pci_set_master(pci_dev);
+
+ net_dev = alloc_etherdev(sizeof(struct sis900_private));
+ if (!net_dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(net_dev);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+
+ /* We do a request_region() to register /proc/ioports info. */
+ ioaddr = pci_resource_start(pci_dev, 0);
+ ret = pci_request_regions(pci_dev, "sis900");
+ if (ret)
+ goto err_out;
+
+ sis_priv = net_dev->priv;
+ net_dev->base_addr = ioaddr;
+ net_dev->irq = pci_dev->irq;
+ sis_priv->pci_dev = pci_dev;
+ spin_lock_init(&sis_priv->lock);
+
+ pci_set_drvdata(pci_dev, net_dev);
+
+ ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ ret = -ENOMEM;
+ goto err_out_cleardev;
+ }
+ sis_priv->tx_ring = (BufferDesc *)ring_space;
+ sis_priv->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ ret = -ENOMEM;
+ goto err_unmap_tx;
+ }
+ sis_priv->rx_ring = (BufferDesc *)ring_space;
+ sis_priv->rx_ring_dma = ring_dma;
+
+ /* The SiS900-specific entries in the device structure. */
+ net_dev->open = &sis900_open;
+ net_dev->hard_start_xmit = &sis900_start_xmit;
+ net_dev->stop = &sis900_close;
+ net_dev->get_stats = &sis900_get_stats;
+ net_dev->set_config = &sis900_set_config;
+ net_dev->set_multicast_list = &set_rx_mode;
+ net_dev->do_ioctl = &mii_ioctl;
+ net_dev->tx_timeout = sis900_tx_timeout;
+ net_dev->watchdog_timeo = TX_TIMEOUT;
+ net_dev->ethtool_ops = &sis900_ethtool_ops;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ net_dev->poll_controller = &sis900_poll;
+#endif
+
+ if (sis900_debug > 0)
+ sis_priv->msg_enable = sis900_debug;
+ else
+ sis_priv->msg_enable = SIS900_DEF_MSG;
+
+ /* Get Mac address according to the chip revision */
+ pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev));
+ if(netif_msg_probe(sis_priv))
+ printk(KERN_DEBUG "%s: detected revision %2.2x, "
+ "trying to get MAC address...\n",
+ dev_name, sis_priv->chipset_rev);
+
+ ret = 0;
+ if (sis_priv->chipset_rev == SIS630E_900_REV)
+ ret = sis630e_get_mac_addr(pci_dev, net_dev);
+ else if ((sis_priv->chipset_rev > 0x81) && (sis_priv->chipset_rev <= 0x90) )
+ ret = sis635_get_mac_addr(pci_dev, net_dev);
+ else if (sis_priv->chipset_rev == SIS96x_900_REV)
+ ret = sis96x_get_mac_addr(pci_dev, net_dev);
+ else
+ ret = sis900_get_mac_addr(pci_dev, net_dev);
+
+ if (ret == 0) {
+ printk(KERN_WARNING "%s: Cannot read MAC address.\n", dev_name);
+ ret = -ENODEV;
+ goto err_unmap_rx;
+ }
+
+ /* 630ET : set the mii access mode as software-mode */
+ if (sis_priv->chipset_rev == SIS630ET_900_REV)
+ outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr);
+
+ /* probe for mii transceiver */
+ if (sis900_mii_probe(net_dev) == 0) {
+ printk(KERN_WARNING "%s: Error probing MII device.\n",
+ dev_name);
+ ret = -ENODEV;
+ goto err_unmap_rx;
+ }
+
+ /* save our host bridge revision */
+ dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
+ if (dev) {
+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev);
+ pci_dev_put(dev);
+ }
+
+ ret = register_netdev(net_dev);
+ if (ret)
+ goto err_unmap_rx;
+
+ /* print some information about our NIC */
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", net_dev->name,
+ card_name, ioaddr, net_dev->irq);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", (u8)net_dev->dev_addr[i]);
+ printk("%2.2x.\n", net_dev->dev_addr[i]);
+
+ return 0;
+
+ err_unmap_rx:
+ pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+ sis_priv->rx_ring_dma);
+ err_unmap_tx:
+ pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+ sis_priv->tx_ring_dma);
+ err_out_cleardev:
+ pci_set_drvdata(pci_dev, NULL);
+ pci_release_regions(pci_dev);
+ err_out:
+ free_netdev(net_dev);
+ return ret;
+}
+
+/**
+ * sis900_mii_probe - Probe MII PHY for sis900
+ * @net_dev: the net device to probe for
+ *
+ * Search for total of 32 possible mii phy addresses.
+ * Identify and set current phy if found one,
+ * return error if it failed to found.
+ */
+
+static int __init sis900_mii_probe(struct net_device * net_dev)
+{
+ struct sis900_private * sis_priv = net_dev->priv;
+ const char *dev_name = pci_name(sis_priv->pci_dev);
+ u16 poll_bit = MII_STAT_LINK, status = 0;
+ unsigned long timeout = jiffies + 5 * HZ;
+ int phy_addr;
+
+ sis_priv->mii = NULL;
+
+ /* search for total of 32 possible mii phy addresses */
+ for (phy_addr = 0; phy_addr < 32; phy_addr++) {
+ struct mii_phy * mii_phy = NULL;
+ u16 mii_status;
+ int i;
+
+ mii_phy = NULL;
+ for(i = 0; i < 2; i++)
+ mii_status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (mii_status == 0xffff || mii_status == 0x0000) {
+ if (netif_msg_probe(sis_priv))
+ printk(KERN_DEBUG "%s: MII at address %d"
+ " not accessible\n",
+ dev_name, phy_addr);
+ continue;
+ }
+
+ if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
+ printk(KERN_WARNING "Cannot allocate mem for struct mii_phy\n");
+ mii_phy = sis_priv->first_mii;
+ while (mii_phy) {
+ struct mii_phy *phy;
+ phy = mii_phy;
+ mii_phy = mii_phy->next;
+ kfree(phy);
+ }
+ return 0;
+ }
+
+ mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0);
+ mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1);
+ mii_phy->phy_addr = phy_addr;
+ mii_phy->status = mii_status;
+ mii_phy->next = sis_priv->mii;
+ sis_priv->mii = mii_phy;
+ sis_priv->first_mii = mii_phy;
+
+ for (i = 0; mii_chip_table[i].phy_id1; i++)
+ if ((mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) &&
+ ((mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1)){
+ mii_phy->phy_types = mii_chip_table[i].phy_types;
+ if (mii_chip_table[i].phy_types == MIX)
+ mii_phy->phy_types =
+ (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX)) ? LAN : HOME;
+ printk(KERN_INFO "%s: %s transceiver found "
+ "at address %d.\n",
+ dev_name,
+ mii_chip_table[i].name,
+ phy_addr);
+ break;
+ }
+
+ if( !mii_chip_table[i].phy_id1 ) {
+ printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n",
+ dev_name, phy_addr);
+ mii_phy->phy_types = UNKNOWN;
+ }
+ }
+
+ if (sis_priv->mii == NULL) {
+ printk(KERN_INFO "%s: No MII transceivers found!\n", dev_name);
+ return 0;
+ }
+
+ /* select default PHY for mac */
+ sis_priv->mii = NULL;
+ sis900_default_phy( net_dev );
+
+ /* Reset phy if default phy is internal sis900 */
+ if ((sis_priv->mii->phy_id0 == 0x001D) &&
+ ((sis_priv->mii->phy_id1&0xFFF0) == 0x8000))
+ status = sis900_reset_phy(net_dev, sis_priv->cur_phy);
+
+ /* workaround for ICS1893 PHY */
+ if ((sis_priv->mii->phy_id0 == 0x0015) &&
+ ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
+ mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
+
+ if(status & MII_STAT_LINK){
+ while (poll_bit) {
+ yield();
+
+ poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit);
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: reset phy and link down now\n",
+ dev_name);
+ return -ETIME;
+ }
+ }
+ }
+
+ if (sis_priv->chipset_rev == SIS630E_900_REV) {
+ /* SiS 630E has some bugs on default value of PHY registers */
+ mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0);
+ //mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, 0x1000);
+ }
+
+ if (sis_priv->mii->status & MII_STAT_LINK)
+ netif_carrier_on(net_dev);
+ else
+ netif_carrier_off(net_dev);
+
+ return 1;
+}
+
+/**
+ * sis900_default_phy - Select default PHY for sis900 mac.
+ * @net_dev: the net device to probe for
+ *
+ * Select first detected PHY with link as default.
+ * If no one is link on, select PHY whose types is HOME as default.
+ * If HOME doesn't exist, select LAN.
+ */
+
+static u16 sis900_default_phy(struct net_device * net_dev)
+{
+ struct sis900_private * sis_priv = net_dev->priv;
+ struct mii_phy *phy = NULL, *phy_home = NULL,
+ *default_phy = NULL, *phy_lan = NULL;
+ u16 status;
+
+ for (phy=sis_priv->first_mii; phy; phy=phy->next) {
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ /* Link ON & Not select default PHY & not ghost PHY */
+ if ((status & MII_STAT_LINK) && !default_phy &&
+ (phy->phy_types != UNKNOWN))
+ default_phy = phy;
+ else {
+ status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
+ mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
+ status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
+ if (phy->phy_types == HOME)
+ phy_home = phy;
+ else if(phy->phy_types == LAN)
+ phy_lan = phy;
+ }
+ }
+
+ if (!default_phy && phy_home)
+ default_phy = phy_home;
+ else if (!default_phy && phy_lan)
+ default_phy = phy_lan;
+ else if (!default_phy)
+ default_phy = sis_priv->first_mii;
+
+ if (sis_priv->mii != default_phy) {
+ sis_priv->mii = default_phy;
+ sis_priv->cur_phy = default_phy->phy_addr;
+ printk(KERN_INFO "%s: Using transceiver found at address %d as default\n",
+ pci_name(sis_priv->pci_dev), sis_priv->cur_phy);
+ }
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
+ status &= (~MII_CNTL_ISOLATE);
+
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ return status;
+}
+
+
+/**
+ * sis900_set_capability - set the media capability of network adapter.
+ * @net_dev : the net device to probe for
+ * @phy : default PHY
+ *
+ * Set the media capability of network adapter according to
+ * mii status register. It's necessary before auto-negotiate.
+ */
+
+static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
+{
+ u16 cap;
+ u16 status;
+
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ cap = MII_NWAY_CSMA_CD |
+ ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
+ ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) |
+ ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)|
+ ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0);
+
+ mdio_write(net_dev, phy->phy_addr, MII_ANADV, cap);
+}
+
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay() inl(ee_addr)
+
+/**
+ * read_eeprom - Read Serial EEPROM
+ * @ioaddr: base i/o address
+ * @location: the EEPROM location to read
+ *
+ * Read Serial EEPROM through EEPROM Access Register.
+ * Note that location is in word (16 bits) unit
+ */
+
+static u16 __devinit read_eeprom(long ioaddr, int location)
+{
+ int i;
+ u16 retval = 0;
+ long ee_addr = ioaddr + mear;
+ u32 read_cmd = location | EEread;
+
+ outl(0, ee_addr);
+ eeprom_delay();
+ outl(EECS, ee_addr);
+ eeprom_delay();
+
+ /* Shift the read command (9) bits out. */
+ for (i = 8; i >= 0; i--) {
+ u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
+ outl(dataval, ee_addr);
+ eeprom_delay();
+ outl(dataval | EECLK, ee_addr);
+ eeprom_delay();
+ }
+ outl(EECS, ee_addr);
+ eeprom_delay();
+
+ /* read the 16-bits data in */
+ for (i = 16; i > 0; i--) {
+ outl(EECS, ee_addr);
+ eeprom_delay();
+ outl(EECS | EECLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(0, ee_addr);
+ eeprom_delay();
+
+ return (retval);
+}
+
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol. Note that the command bits and data bits are
+ send out separately */
+#define mdio_delay() inl(mdio_addr)
+
+static void mdio_idle(long mdio_addr)
+{
+ outl(MDIO | MDDIR, mdio_addr);
+ mdio_delay();
+ outl(MDIO | MDDIR | MDC, mdio_addr);
+}
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_reset(long mdio_addr)
+{
+ int i;
+
+ for (i = 31; i >= 0; i--) {
+ outl(MDDIR | MDIO, mdio_addr);
+ mdio_delay();
+ outl(MDDIR | MDIO | MDC, mdio_addr);
+ mdio_delay();
+ }
+ return;
+}
+
+/**
+ * mdio_read - read MII PHY register
+ * @net_dev: the net device to read
+ * @phy_id: the phy address to read
+ * @location: the phy regiester id to read
+ *
+ * Read MII registers through MDIO and MDC
+ * using MDIO management frame structure and protocol(defined by ISO/IEC).
+ * Please see SiS7014 or ICS spec
+ */
+
+static u16 mdio_read(struct net_device *net_dev, int phy_id, int location)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ u16 retval = 0;
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+
+ /* Read the 16 data bits. */
+ for (i = 16; i > 0; i--) {
+ outl(0, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0);
+ outl(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+
+ return retval;
+}
+
+/**
+ * mdio_write - write MII PHY register
+ * @net_dev: the net device to write
+ * @phy_id: the phy address to write
+ * @location: the phy regiester id to write
+ * @value: the register value to write with
+ *
+ * Write MII registers with @value through MDIO and MDC
+ * using MDIO management frame structure and protocol(defined by ISO/IEC)
+ * please see SiS7014 or ICS spec
+ */
+
+static void mdio_write(struct net_device *net_dev, int phy_id, int location,
+ int value)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outb(dataval, mdio_addr);
+ mdio_delay();
+ outb(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Shift the value bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay();
+ outb(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+
+ return;
+}
+
+
+/**
+ * sis900_reset_phy - reset sis900 mii phy.
+ * @net_dev: the net device to write
+ * @phy_addr: default phy address
+ *
+ * Some specific phy can't work properly without reset.
+ * This function will be called during initialization and
+ * link status change from ON to DOWN.
+ */
+
+static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
+{
+ int i = 0;
+ u16 status;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
+
+ return status;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+*/
+static void sis900_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ sis900_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * sis900_open - open sis900 device
+ * @net_dev: the net device to open
+ *
+ * Do some initialization and start net interface.
+ * enable interrupts and set sis900 timer.
+ */
+
+static int
+sis900_open(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int ret;
+
+ /* Soft reset the chip. */
+ sis900_reset(net_dev);
+
+ /* Equalizer workaround Rule */
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+
+ ret = request_irq(net_dev->irq, &sis900_interrupt, SA_SHIRQ,
+ net_dev->name, net_dev);
+ if (ret)
+ return ret;
+
+ sis900_init_rxfilter(net_dev);
+
+ sis900_init_tx_ring(net_dev);
+ sis900_init_rx_ring(net_dev);
+
+ set_rx_mode(net_dev);
+
+ netif_start_queue(net_dev);
+
+ /* Workaround for EDB */
+ sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
+ outl(IE, ioaddr + ier);
+
+ sis900_check_mode(net_dev, sis_priv->mii);
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&sis_priv->timer);
+ sis_priv->timer.expires = jiffies + HZ;
+ sis_priv->timer.data = (unsigned long)net_dev;
+ sis_priv->timer.function = &sis900_timer;
+ add_timer(&sis_priv->timer);
+
+ return 0;
+}
+
+/**
+ * sis900_init_rxfilter - Initialize the Rx filter
+ * @net_dev: the net device to initialize for
+ *
+ * Set receive filter address to our MAC address
+ * and enable packet filtering.
+ */
+
+static void
+sis900_init_rxfilter (struct net_device * net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ u32 w;
+
+ w = (u32) *((u16 *)(net_dev->dev_addr)+i);
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ outl(w, ioaddr + rfdr);
+
+ if (netif_msg_hw(sis_priv)) {
+ printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
+ net_dev->name, i, inl(ioaddr + rfdr));
+ }
+ }
+
+ /* enable packet filtering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+}
+
+/**
+ * sis900_init_tx_ring - Initialize the Tx descriptor ring
+ * @net_dev: the net device to initialize for
+ *
+ * Initialize the Tx descriptor ring,
+ */
+
+static void
+sis900_init_tx_ring(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->tx_full = 0;
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ sis_priv->tx_skbuff[i] = NULL;
+
+ sis_priv->tx_ring[i].link = sis_priv->tx_ring_dma +
+ ((i+1)%NUM_TX_DESC)*sizeof(BufferDesc);
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ }
+
+ /* load Transmit Descriptor Register */
+ outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+ if (netif_msg_hw(sis_priv))
+ printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + txdp));
+}
+
+/**
+ * sis900_init_rx_ring - Initialize the Rx descriptor ring
+ * @net_dev: the net device to initialize for
+ *
+ * Initialize the Rx descriptor ring,
+ * and pre-allocate recevie buffers (socket buffer)
+ */
+
+static void
+sis900_init_rx_ring(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->cur_rx = 0;
+ sis_priv->dirty_rx = 0;
+
+ /* init RX descriptor */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ sis_priv->rx_skbuff[i] = NULL;
+
+ sis_priv->rx_ring[i].link = sis_priv->rx_ring_dma +
+ ((i+1)%NUM_RX_DESC)*sizeof(BufferDesc);
+ sis_priv->rx_ring[i].cmdsts = 0;
+ sis_priv->rx_ring[i].bufptr = 0;
+ }
+
+ /* allocate sock buffers */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[i] = skb;
+ sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
+ skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ }
+ sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
+
+ /* load Receive Descriptor Register */
+ outl(sis_priv->rx_ring_dma, ioaddr + rxdp);
+ if (netif_msg_hw(sis_priv))
+ printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + rxdp));
+}
+
+/**
+ * sis630_set_eq - set phy equalizer value for 630 LAN
+ * @net_dev: the net device to set equalizer value
+ * @revision: 630 LAN revision number
+ *
+ * 630E equalizer workaround rule(Cyrus Huang 08/15)
+ * PHY register 14h(Test)
+ * Bit 14: 0 -- Automatically dectect (default)
+ * 1 -- Manually set Equalizer filter
+ * Bit 13: 0 -- (Default)
+ * 1 -- Speed up convergence of equalizer setting
+ * Bit 9 : 0 -- (Default)
+ * 1 -- Disable Baseline Wander
+ * Bit 3~7 -- Equalizer filter setting
+ * Link ON: Set Bit 9, 13 to 1, Bit 14 to 0
+ * Then calculate equalizer value
+ * Then set equalizer value, and set Bit 14 to 1, Bit 9 to 0
+ * Link Off:Set Bit 13 to 1, Bit 14 to 0
+ * Calculate Equalizer value:
+ * When Link is ON and Bit 14 is 0, SIS900PHY will auto-dectect proper equalizer value.
+ * When the equalizer is stable, this value is not a fixed value. It will be within
+ * a small range(eg. 7~9). Then we get a minimum and a maximum value(eg. min=7, max=9)
+ * 0 <= max <= 4 --> set equalizer to max
+ * 5 <= max <= 14 --> set equalizer to max+1 or set equalizer to max+2 if max == min
+ * max >= 15 --> set equalizer to max+5 or set equalizer to max+6 if max == min
+ */
+
+static void sis630_set_eq(struct net_device *net_dev, u8 revision)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ u16 reg14h, eq_value=0, max_value=0, min_value=0;
+ int i, maxcount=10;
+
+ if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630A_900_REV || revision == SIS630ET_900_REV) )
+ return;
+
+ if (netif_carrier_ok(net_dev)) {
+ reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
+ (0x2200 | reg14h) & 0xBFFF);
+ for (i=0; i < maxcount; i++) {
+ eq_value = (0x00F8 & mdio_read(net_dev,
+ sis_priv->cur_phy, MII_RESV)) >> 3;
+ if (i == 0)
+ max_value=min_value=eq_value;
+ max_value = (eq_value > max_value) ?
+ eq_value : max_value;
+ min_value = (eq_value < min_value) ?
+ eq_value : min_value;
+ }
+ /* 630E rule to determine the equalizer value */
+ if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630ET_900_REV) {
+ if (max_value < 5)
+ eq_value = max_value;
+ else if (max_value >= 5 && max_value < 15)
+ eq_value = (max_value == min_value) ?
+ max_value+2 : max_value+1;
+ else if (max_value >= 15)
+ eq_value=(max_value == min_value) ?
+ max_value+6 : max_value+5;
+ }
+ /* 630B0&B1 rule to determine the equalizer value */
+ if (revision == SIS630A_900_REV &&
+ (sis_priv->host_bridge_rev == SIS630B0 ||
+ sis_priv->host_bridge_rev == SIS630B1)) {
+ if (max_value == 0)
+ eq_value = 3;
+ else
+ eq_value = (max_value + min_value + 1)/2;
+ }
+ /* write equalizer value and setting */
+ reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ reg14h = (reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8);
+ reg14h = (reg14h | 0x6000) & 0xFDFF;
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h);
+ } else {
+ reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ if (revision == SIS630A_900_REV &&
+ (sis_priv->host_bridge_rev == SIS630B0 ||
+ sis_priv->host_bridge_rev == SIS630B1))
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
+ (reg14h | 0x2200) & 0xBFFF);
+ else
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
+ (reg14h | 0x2000) & 0xBFFF);
+ }
+ return;
+}
+
+/**
+ * sis900_timer - sis900 timer routine
+ * @data: pointer to sis900 net device
+ *
+ * On each timer ticks we check two things,
+ * link status (ON/OFF) and link mode (10/100/Full/Half)
+ */
+
+static void sis900_timer(unsigned long data)
+{
+ struct net_device *net_dev = (struct net_device *)data;
+ struct sis900_private *sis_priv = net_dev->priv;
+ struct mii_phy *mii_phy = sis_priv->mii;
+ static int next_tick = 5*HZ;
+ u16 status;
+
+ if (!sis_priv->autong_complete){
+ int speed, duplex = 0;
+
+ sis900_read_mode(net_dev, &speed, &duplex);
+ if (duplex){
+ sis900_set_mode(net_dev->base_addr, speed, duplex);
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+ netif_start_queue(net_dev);
+ }
+
+ sis_priv->timer.expires = jiffies + HZ;
+ add_timer(&sis_priv->timer);
+ return;
+ }
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ /* Link OFF -> ON */
+ if (!netif_carrier_ok(net_dev)) {
+ LookForLink:
+ /* Search for new PHY */
+ status = sis900_default_phy(net_dev);
+ mii_phy = sis_priv->mii;
+
+ if (status & MII_STAT_LINK){
+ sis900_check_mode(net_dev, mii_phy);
+ netif_carrier_on(net_dev);
+ }
+ } else {
+ /* Link ON -> OFF */
+ if (!(status & MII_STAT_LINK)){
+ netif_carrier_off(net_dev);
+ if(netif_msg_link(sis_priv))
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+
+ /* Change mode issue */
+ if ((mii_phy->phy_id0 == 0x001D) &&
+ ((mii_phy->phy_id1 & 0xFFF0) == 0x8000))
+ sis900_reset_phy(net_dev, sis_priv->cur_phy);
+
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+
+ goto LookForLink;
+ }
+ }
+
+ sis_priv->timer.expires = jiffies + next_tick;
+ add_timer(&sis_priv->timer);
+}
+
+/**
+ * sis900_check_mode - check the media mode for sis900
+ * @net_dev: the net device to be checked
+ * @mii_phy: the mii phy
+ *
+ * Older driver gets the media mode from mii status output
+ * register. Now we set our media capability and auto-negotiate
+ * to get the upper bound of speed and duplex between two ends.
+ * If the types of mii phy is HOME, it doesn't need to auto-negotiate
+ * and autong_complete should be set to 1.
+ */
+
+static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int speed, duplex;
+
+ if (mii_phy->phy_types == LAN) {
+ outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg);
+ sis900_set_capability(net_dev , mii_phy);
+ sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
+ } else {
+ outl(EXD | inl(ioaddr + cfg), ioaddr + cfg);
+ speed = HW_SPEED_HOME;
+ duplex = FDX_CAPABLE_HALF_SELECTED;
+ sis900_set_mode(ioaddr, speed, duplex);
+ sis_priv->autong_complete = 1;
+ }
+}
+
+/**
+ * sis900_set_mode - Set the media mode of mac register.
+ * @ioaddr: the address of the device
+ * @speed : the transmit speed to be determined
+ * @duplex: the duplex mode to be determined
+ *
+ * Set the media mode of mac register txcfg/rxcfg according to
+ * speed and duplex of phy. Bit EDB_MASTER_EN indicates the EDB
+ * bus is used instead of PCI bus. When this bit is set 1, the
+ * Max DMA Burst Size for TX/RX DMA should be no larger than 16
+ * double words.
+ */
+
+static void sis900_set_mode (long ioaddr, int speed, int duplex)
+{
+ u32 tx_flags = 0, rx_flags = 0;
+
+ if (inl(ioaddr + cfg) & EDB_MASTER_EN) {
+ tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
+ (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_64 << RxMXDMA_shift;
+ } else {
+ tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) |
+ (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_512 << RxMXDMA_shift;
+ }
+
+ if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS) {
+ rx_flags |= (RxDRNT_10 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_10 << TxDRNT_shift);
+ } else {
+ rx_flags |= (RxDRNT_100 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_100 << TxDRNT_shift);
+ }
+
+ if (duplex == FDX_CAPABLE_FULL_SELECTED) {
+ tx_flags |= (TxCSI | TxHBI);
+ rx_flags |= RxATX;
+ }
+
+ outl (tx_flags, ioaddr + txcfg);
+ outl (rx_flags, ioaddr + rxcfg);
+}
+
+/**
+ * sis900_auto_negotiate - Set the Auto-Negotiation Enable/Reset bit.
+ * @net_dev: the net device to read mode for
+ * @phy_addr: mii phy address
+ *
+ * If the adapter is link-on, set the auto-negotiate enable/reset bit.
+ * autong_complete should be set to 0 when starting auto-negotiation.
+ * autong_complete should be set to 1 if we didn't start auto-negotiation.
+ * sis900_timer will wait for link on again if autong_complete = 0.
+ */
+
+static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ int i = 0;
+ u32 status;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK)){
+ if(netif_msg_link(sis_priv))
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+ sis_priv->autong_complete = 1;
+ netif_carrier_off(net_dev);
+ return;
+ }
+
+ /* (Re)start AutoNegotiate */
+ mdio_write(net_dev, phy_addr, MII_CONTROL,
+ MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
+ sis_priv->autong_complete = 0;
+}
+
+
+/**
+ * sis900_read_mode - read media mode for sis900 internal phy
+ * @net_dev: the net device to read mode for
+ * @speed : the transmit speed to be determined
+ * @duplex : the duplex mode to be determined
+ *
+ * The capability of remote end will be put in mii register autorec
+ * after auto-negotiation. Use AND operation to get the upper bound
+ * of speed and duplex between two ends.
+ */
+
+static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ struct mii_phy *phy = sis_priv->mii;
+ int phy_addr = sis_priv->cur_phy;
+ u32 status;
+ u16 autoadv, autorec;
+ int i = 0;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK))
+ return;
+
+ /* AutoNegotiate completed */
+ autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
+ autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
+ status = autoadv & autorec;
+
+ *speed = HW_SPEED_10_MBPS;
+ *duplex = FDX_CAPABLE_HALF_SELECTED;
+
+ if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
+ *speed = HW_SPEED_100_MBPS;
+ if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+
+ sis_priv->autong_complete = 1;
+
+ /* Workaround for Realtek RTL8201 PHY issue */
+ if ((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)) {
+ if (mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+ if (mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
+ *speed = HW_SPEED_100_MBPS;
+ }
+
+ if(netif_msg_link(sis_priv))
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
+ net_dev->name,
+ *speed == HW_SPEED_100_MBPS ?
+ "100mbps" : "10mbps",
+ *duplex == FDX_CAPABLE_FULL_SELECTED ?
+ "full" : "half");
+}
+
+/**
+ * sis900_tx_timeout - sis900 transmit timeout routine
+ * @net_dev: the net device to transmit
+ *
+ * print transmit timeout status
+ * disable interrupts and do some tasks
+ */
+
+static void sis900_tx_timeout(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ if(netif_msg_tx_err(sis_priv))
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n",
+ net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+
+ /* use spinlock to prevent interrupt handler accessing buffer ring */
+ spin_lock_irqsave(&sis_priv->lock, flags);
+
+ /* discard unsent packets */
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ struct sk_buff *skb = sis_priv->tx_skbuff[i];
+
+ if (skb) {
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->tx_ring[i].bufptr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ sis_priv->tx_skbuff[i] = NULL;
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ sis_priv->stats.tx_dropped++;
+ }
+ }
+ sis_priv->tx_full = 0;
+ netif_wake_queue(net_dev);
+
+ spin_unlock_irqrestore(&sis_priv->lock, flags);
+
+ net_dev->trans_start = jiffies;
+
+ /* load Transmit Descriptor Register */
+ outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ return;
+}
+
+/**
+ * sis900_start_xmit - sis900 start transmit routine
+ * @skb: socket buffer pointer to put the data being transmitted
+ * @net_dev: the net device to transmit with
+ *
+ * Set the transmit buffer descriptor,
+ * and write TxENA to enable transmit state machine.
+ * tell upper layer if the buffer is full
+ */
+
+static int
+sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry;
+ unsigned long flags;
+ unsigned int index_cur_tx, index_dirty_tx;
+ unsigned int count_dirty_tx;
+
+ /* Don't transmit data before the complete of auto-negotiation */
+ if(!sis_priv->autong_complete){
+ netif_stop_queue(net_dev);
+ return 1;
+ }
+
+ spin_lock_irqsave(&sis_priv->lock, flags);
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = sis_priv->cur_tx % NUM_TX_DESC;
+ sis_priv->tx_skbuff[entry] = skb;
+
+ /* set the transmit buffer descriptor and enable Transmit State Machine */
+ sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
+ skb->data, skb->len, PCI_DMA_TODEVICE);
+ sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+ outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+
+ sis_priv->cur_tx ++;
+ index_cur_tx = sis_priv->cur_tx;
+ index_dirty_tx = sis_priv->dirty_tx;
+
+ for (count_dirty_tx = 0; index_cur_tx != index_dirty_tx; index_dirty_tx++)
+ count_dirty_tx ++;
+
+ if (index_cur_tx == index_dirty_tx) {
+ /* dirty_tx is met in the cycle of cur_tx, buffer full */
+ sis_priv->tx_full = 1;
+ netif_stop_queue(net_dev);
+ } else if (count_dirty_tx < NUM_TX_DESC) {
+ /* Typical path, tell upper layer that more transmission is possible */
+ netif_start_queue(net_dev);
+ } else {
+ /* buffer full, tell upper layer no more transmission */
+ sis_priv->tx_full = 1;
+ netif_stop_queue(net_dev);
+ }
+
+ spin_unlock_irqrestore(&sis_priv->lock, flags);
+
+ net_dev->trans_start = jiffies;
+
+ if (netif_msg_tx_queued(sis_priv))
+ printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
+ "to slot %d.\n",
+ net_dev->name, skb->data, (int)skb->len, entry);
+
+ return 0;
+}
+
+/**
+ * sis900_interrupt - sis900 interrupt handler
+ * @irq: the irq number
+ * @dev_instance: the client data object
+ * @regs: snapshot of processor context
+ *
+ * The interrupt handler does all of the Rx thread work,
+ * and cleans up after the Tx thread
+ */
+
+static irqreturn_t sis900_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *net_dev = dev_instance;
+ struct sis900_private *sis_priv = net_dev->priv;
+ int boguscnt = max_interrupt_work;
+ long ioaddr = net_dev->base_addr;
+ u32 status;
+ unsigned int handled = 0;
+
+ spin_lock (&sis_priv->lock);
+
+ do {
+ status = inl(ioaddr + isr);
+
+ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+ /* nothing intresting happened */
+ break;
+ handled = 1;
+
+ /* why dow't we break after Tx/Rx case ?? keyword: full-duplex */
+ if (status & (RxORN | RxERR | RxOK))
+ /* Rx interrupt */
+ sis900_rx(net_dev);
+
+ if (status & (TxURN | TxERR | TxIDLE))
+ /* Tx interrupt */
+ sis900_finish_xmit(net_dev);
+
+ /* something strange happened !!! */
+ if (status & HIBERR) {
+ if(netif_msg_intr(sis_priv))
+ printk(KERN_INFO "%s: Abnormal interrupt,"
+ "status %#8.8x.\n", net_dev->name, status);
+ break;
+ }
+ if (--boguscnt < 0) {
+ if(netif_msg_intr(sis_priv))
+ printk(KERN_INFO "%s: Too much work at interrupt, "
+ "interrupt status = %#8.8x.\n",
+ net_dev->name, status);
+ break;
+ }
+ } while (1);
+
+ if(netif_msg_intr(sis_priv))
+ printk(KERN_DEBUG "%s: exiting interrupt, "
+ "interrupt status = 0x%#8.8x.\n",
+ net_dev->name, inl(ioaddr + isr));
+
+ spin_unlock (&sis_priv->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/**
+ * sis900_rx - sis900 receive routine
+ * @net_dev: the net device which receives data
+ *
+ * Process receive interrupt events,
+ * put buffer to higher layer and refill buffer pool
+ * Note: This fucntion is called by interrupt handler,
+ * don't do "too much" work here
+ */
+
+static int sis900_rx(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
+ u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
+
+ if (netif_msg_rx_status(sis_priv))
+ printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
+ "status:0x%8.8x\n",
+ sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
+
+ while (rx_status & OWN) {
+ unsigned int rx_size;
+
+ rx_size = (rx_status & DSIZE) - CRC_SIZE;
+
+ if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
+ /* corrupted packet received */
+ if (netif_msg_rx_err(sis_priv))
+ printk(KERN_DEBUG "%s: Corrupted packet "
+ "received, buffer status = 0x%8.8x.\n",
+ net_dev->name, rx_status);
+ sis_priv->stats.rx_errors++;
+ if (rx_status & OVERRUN)
+ sis_priv->stats.rx_over_errors++;
+ if (rx_status & (TOOLONG|RUNT))
+ sis_priv->stats.rx_length_errors++;
+ if (rx_status & (RXISERR | FAERR))
+ sis_priv->stats.rx_frame_errors++;
+ if (rx_status & CRCERR)
+ sis_priv->stats.rx_crc_errors++;
+ /* reset buffer descriptor state */
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ } else {
+ struct sk_buff * skb;
+
+ /* This situation should never happen, but due to
+ some unknow bugs, it is possible that
+ we are working on NULL sk_buff :-( */
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ if (netif_msg_rx_err(sis_priv))
+ printk(KERN_INFO "%s: NULL pointer "
+ "encountered in Rx ring, skipping\n",
+ net_dev->name);
+ break;
+ }
+
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ /* give the socket buffer to upper layers */
+ skb = sis_priv->rx_skbuff[entry];
+ skb_put(skb, rx_size);
+ skb->protocol = eth_type_trans(skb, net_dev);
+ netif_rx(skb);
+
+ /* some network statistics */
+ if ((rx_status & BCAST) == MCAST)
+ sis_priv->stats.multicast++;
+ net_dev->last_rx = jiffies;
+ sis_priv->stats.rx_bytes += rx_size;
+ sis_priv->stats.rx_packets++;
+
+ /* refill the Rx buffer, what if there is not enought
+ * memory for new socket buffer ?? */
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a
+ * "hole" on the buffer ring, it is not clear
+ * how the hardware will react to this kind
+ * of degenerated buffer */
+ if (netif_msg_rx_status(sis_priv))
+ printk(KERN_INFO "%s: Memory squeeze,"
+ "deferring packet.\n",
+ net_dev->name);
+ sis_priv->rx_skbuff[entry] = NULL;
+ /* reset buffer descriptor state */
+ sis_priv->rx_ring[entry].cmdsts = 0;
+ sis_priv->rx_ring[entry].bufptr = 0;
+ sis_priv->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr =
+ pci_map_single(sis_priv->pci_dev, skb->tail,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ sis_priv->dirty_rx++;
+ }
+ sis_priv->cur_rx++;
+ entry = sis_priv->cur_rx % NUM_RX_DESC;
+ rx_status = sis_priv->rx_ring[entry].cmdsts;
+ } // while
+
+ /* refill the Rx buffer, what if the rate of refilling is slower
+ * than consuming ?? */
+ for (;sis_priv->cur_rx - sis_priv->dirty_rx > 0; sis_priv->dirty_rx++) {
+ struct sk_buff *skb;
+
+ entry = sis_priv->dirty_rx % NUM_RX_DESC;
+
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a
+ * "hole" on the buffer ring, it is not clear
+ * how the hardware will react to this kind
+ * of degenerated buffer */
+ if (netif_msg_rx_err(sis_priv))
+ printk(KERN_INFO "%s: Memory squeeze,"
+ "deferring packet.\n",
+ net_dev->name);
+ sis_priv->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr =
+ pci_map_single(sis_priv->pci_dev, skb->tail,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ }
+ }
+ /* re-enable the potentially idle receive state matchine */
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr );
+
+ return 0;
+}
+
+/**
+ * sis900_finish_xmit - finish up transmission of packets
+ * @net_dev: the net device to be transmitted on
+ *
+ * Check for error condition and free socket buffer etc
+ * schedule for more transmission as needed
+ * Note: This fucntion is called by interrupt handler,
+ * don't do "too much" work here
+ */
+
+static void sis900_finish_xmit (struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+
+ for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) {
+ struct sk_buff *skb;
+ unsigned int entry;
+ u32 tx_status;
+
+ entry = sis_priv->dirty_tx % NUM_TX_DESC;
+ tx_status = sis_priv->tx_ring[entry].cmdsts;
+
+ if (tx_status & OWN) {
+ /* The packet is not transmitted yet (owned by hardware) !
+ * Note: the interrupt is generated only when Tx Machine
+ * is idle, so this is an almost impossible case */
+ break;
+ }
+
+ if (tx_status & (ABORT | UNDERRUN | OWCOLL)) {
+ /* packet unsuccessfully transmitted */
+ if (netif_msg_tx_err(sis_priv))
+ printk(KERN_DEBUG "%s: Transmit "
+ "error, Tx status %8.8x.\n",
+ net_dev->name, tx_status);
+ sis_priv->stats.tx_errors++;
+ if (tx_status & UNDERRUN)
+ sis_priv->stats.tx_fifo_errors++;
+ if (tx_status & ABORT)
+ sis_priv->stats.tx_aborted_errors++;
+ if (tx_status & NOCARRIER)
+ sis_priv->stats.tx_carrier_errors++;
+ if (tx_status & OWCOLL)
+ sis_priv->stats.tx_window_errors++;
+ } else {
+ /* packet successfully transmitted */
+ sis_priv->stats.collisions += (tx_status & COLCNT) >> 16;
+ sis_priv->stats.tx_bytes += tx_status & DSIZE;
+ sis_priv->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ skb = sis_priv->tx_skbuff[entry];
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->tx_ring[entry].bufptr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ sis_priv->tx_skbuff[entry] = NULL;
+ sis_priv->tx_ring[entry].bufptr = 0;
+ sis_priv->tx_ring[entry].cmdsts = 0;
+ }
+
+ if (sis_priv->tx_full && netif_queue_stopped(net_dev) &&
+ sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
+ /* The ring is no longer full, clear tx_full and schedule
+ * more transmission by netif_wake_queue(net_dev) */
+ sis_priv->tx_full = 0;
+ netif_wake_queue (net_dev);
+ }
+}
+
+/**
+ * sis900_close - close sis900 device
+ * @net_dev: the net device to be closed
+ *
+ * Disable interrupts, stop the Tx and Rx Status Machine
+ * free Tx and RX socket buffer
+ */
+
+static int sis900_close(struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private *sis_priv = net_dev->priv;
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue(net_dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+ outl(0x0000, ioaddr + ier);
+
+ /* Stop the chip's Tx and Rx Status Machine */
+ outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+
+ del_timer(&sis_priv->timer);
+
+ free_irq(net_dev->irq, net_dev);
+
+ /* Free Tx and RX skbuff */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ skb = sis_priv->rx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->rx_ring[i].bufptr,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ sis_priv->rx_skbuff[i] = NULL;
+ }
+ }
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ skb = sis_priv->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->tx_ring[i].bufptr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ sis_priv->tx_skbuff[i] = NULL;
+ }
+ }
+
+ /* Green! Put the chip in low-power mode. */
+
+ return 0;
+}
+
+/**
+ * sis900_get_drvinfo - Return information about driver
+ * @net_dev: the net device to probe
+ * @info: container for info returned
+ *
+ * Process ethtool command such as "ehtool -i" to show information
+ */
+
+static void sis900_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+
+ strcpy (info->driver, SIS900_MODULE_NAME);
+ strcpy (info->version, SIS900_DRV_VERSION);
+ strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+}
+
+static u32 sis900_get_msglevel(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ return sis_priv->msg_enable;
+}
+
+static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ sis_priv->msg_enable = value;
+}
+
+static struct ethtool_ops sis900_ethtool_ops = {
+ .get_drvinfo = sis900_get_drvinfo,
+ .get_msglevel = sis900_get_msglevel,
+ .set_msglevel = sis900_set_msglevel,
+};
+
+/**
+ * mii_ioctl - process MII i/o control command
+ * @net_dev: the net device to command for
+ * @rq: parameter for command
+ * @cmd: the i/o command
+ *
+ * Process MII command like read/write MII register
+ */
+
+static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+ struct mii_ioctl_data *data = if_mii(rq);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = sis_priv->mii->phy_addr;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * sis900_get_stats - Get sis900 read/write statistics
+ * @net_dev: the net device to get statistics for
+ *
+ * get tx/rx statistics for sis900
+ */
+
+static struct net_device_stats *
+sis900_get_stats(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = net_dev->priv;
+
+ return &sis_priv->stats;
+}
+
+/**
+ * sis900_set_config - Set media type by net_device.set_config
+ * @dev: the net device for media type change
+ * @map: ifmap passed by ifconfig
+ *
+ * Set media type to 10baseT, 100baseT or 0(for auto) by ifconfig
+ * we support only port changes. All other runtime configuration
+ * changes will be ignored
+ */
+
+static int sis900_set_config(struct net_device *dev, struct ifmap *map)
+{
+ struct sis900_private *sis_priv = dev->priv;
+ struct mii_phy *mii_phy = sis_priv->mii;
+
+ u16 status;
+
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ /* we switch on the ifmap->port field. I couldn't find anything
+ * like a definition or standard for the values of that field.
+ * I think the meaning of those values is device specific. But
+ * since I would like to change the media type via the ifconfig
+ * command I use the definition from linux/netdevice.h
+ * (which seems to be different from the ifport(pcmcia) definition) */
+ switch(map->port){
+ case IF_PORT_UNKNOWN: /* use auto here */
+ dev->if_port = map->port;
+ /* we are going to change the media type, so the Link
+ * will be temporary down and we need to reflect that
+ * here. When the Link comes up again, it will be
+ * sensed by the sis_timer procedure, which also does
+ * all the rest for us */
+ netif_carrier_off(dev);
+
+ /* read current state */
+ status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
+
+ /* enable auto negotiation and reset the negotioation
+ * (I don't really know what the auto negatiotiation
+ * reset really means, but it sounds for me right to
+ * do one here) */
+ mdio_write(dev, mii_phy->phy_addr,
+ MII_CONTROL, status | MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
+
+ break;
+
+ case IF_PORT_10BASET: /* 10BaseT */
+ dev->if_port = map->port;
+
+ /* we are going to change the media type, so the Link
+ * will be temporary down and we need to reflect that
+ * here. When the Link comes up again, it will be
+ * sensed by the sis_timer procedure, which also does
+ * all the rest for us */
+ netif_carrier_off(dev);
+
+ /* set Speed to 10Mbps */
+ /* read current state */
+ status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
+
+ /* disable auto negotiation and force 10MBit mode*/
+ mdio_write(dev, mii_phy->phy_addr,
+ MII_CONTROL, status & ~(MII_CNTL_SPEED |
+ MII_CNTL_AUTO));
+ break;
+
+ case IF_PORT_100BASET: /* 100BaseT */
+ case IF_PORT_100BASETX: /* 100BaseTx */
+ dev->if_port = map->port;
+
+ /* we are going to change the media type, so the Link
+ * will be temporary down and we need to reflect that
+ * here. When the Link comes up again, it will be
+ * sensed by the sis_timer procedure, which also does
+ * all the rest for us */
+ netif_carrier_off(dev);
+
+ /* set Speed to 100Mbps */
+ /* disable auto negotiation and enable 100MBit Mode */
+ status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
+ mdio_write(dev, mii_phy->phy_addr,
+ MII_CONTROL, (status & ~MII_CNTL_SPEED) |
+ MII_CNTL_SPEED);
+
+ break;
+
+ case IF_PORT_10BASE2: /* 10Base2 */
+ case IF_PORT_AUI: /* AUI */
+ case IF_PORT_100BASEFX: /* 100BaseFx */
+ /* These Modes are not supported (are they?)*/
+ return -EOPNOTSUPP;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * sis900_mcast_bitnr - compute hashtable index
+ * @addr: multicast address
+ * @revision: revision id of chip
+ *
+ * SiS 900 uses the most sigificant 7 bits to index a 128 bits multicast
+ * hash table, which makes this function a little bit different from other drivers
+ * SiS 900 B0 & 635 M/B uses the most significat 8 bits to index 256 bits
+ * multicast hash table.
+ */
+
+static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
+{
+
+ u32 crc = ether_crc(6, addr);
+
+ /* leave 8 or 7 most siginifant bits */
+ if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
+ return ((int)(crc >> 24));
+ else
+ return ((int)(crc >> 25));
+}
+
+/**
+ * set_rx_mode - Set SiS900 receive mode
+ * @net_dev: the net device to be set
+ *
+ * Set SiS900 receive mode for promiscuous, multicast, or broadcast mode.
+ * And set the appropriate multicast filter.
+ * Multicast hash table changes from 128 to 256 bits for 635M/B & 900B0.
+ */
+
+static void set_rx_mode(struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private * sis_priv = net_dev->priv;
+ u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
+ int i, table_entries;
+ u32 rx_mode;
+
+ /* 635 Hash Table entires = 256(2^16) */
+ if((sis_priv->chipset_rev >= SIS635A_900_REV) ||
+ (sis_priv->chipset_rev == SIS900B_900_REV))
+ table_entries = 16;
+ else
+ table_entries = 8;
+
+ if (net_dev->flags & IFF_PROMISC) {
+ /* Accept any kinds of packets */
+ rx_mode = RFPromiscuous;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ (net_dev->flags & IFF_ALLMULTI)) {
+ /* too many multicast addresses or accept all multicast packet */
+ rx_mode = RFAAB | RFAAM;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else {
+ /* Accept Broadcast packet, destination address matchs our
+ * MAC address, use Receive Filter to reject unwanted MCAST
+ * packets */
+ struct dev_mc_list *mclist;
+ rx_mode = RFAAB;
+ for (i = 0, mclist = net_dev->mc_list;
+ mclist && i < net_dev->mc_count;
+ i++, mclist = mclist->next) {
+ unsigned int bit_nr =
+ sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
+ mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
+ }
+ }
+
+ /* update Multicast Hash Table in Receive Filter */
+ for (i = 0; i < table_entries; i++) {
+ /* why plus 0x04 ??, That makes the correct value for hash table. */
+ outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr);
+ outl(mc_filter[i], ioaddr + rfdr);
+ }
+
+ outl(RFEN | rx_mode, ioaddr + rfcr);
+
+ /* sis900 is capable of looping back packets at MAC level for
+ * debugging purpose */
+ if (net_dev->flags & IFF_LOOPBACK) {
+ u32 cr_saved;
+ /* We must disable Tx/Rx before setting loopback mode */
+ cr_saved = inl(ioaddr + cr);
+ outl(cr_saved | TxDIS | RxDIS, ioaddr + cr);
+ /* enable loopback */
+ outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg);
+ outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg);
+ /* restore cr */
+ outl(cr_saved, ioaddr + cr);
+ }
+
+ return;
+}
+
+/**
+ * sis900_reset - Reset sis900 MAC
+ * @net_dev: the net device to reset
+ *
+ * reset sis900 MAC and wait until finished
+ * reset through command register
+ * change backoff algorithm for 900B0 & 635 M/B
+ */
+
+static void sis900_reset(struct net_device *net_dev)
+{
+ struct sis900_private * sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i = 0;
+ u32 status = TxRCMP | RxRCMP;
+
+ outl(0, ioaddr + ier);
+ outl(0, ioaddr + imr);
+ outl(0, ioaddr + rfcr);
+
+ outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr);
+
+ /* Check that the chip has finished the reset. */
+ while (status && (i++ < 1000)) {
+ status ^= (inl(isr + ioaddr) & status);
+ }
+
+ if( (sis_priv->chipset_rev >= SIS635A_900_REV) ||
+ (sis_priv->chipset_rev == SIS900B_900_REV) )
+ outl(PESEL | RND_CNT, ioaddr + cfg);
+ else
+ outl(PESEL, ioaddr + cfg);
+}
+
+/**
+ * sis900_remove - Remove sis900 device
+ * @pci_dev: the pci device to be removed
+ *
+ * remove and release SiS900 net device
+ */
+
+static void __devexit sis900_remove(struct pci_dev *pci_dev)
+{
+ struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ struct sis900_private * sis_priv = net_dev->priv;
+ struct mii_phy *phy = NULL;
+
+ while (sis_priv->first_mii) {
+ phy = sis_priv->first_mii;
+ sis_priv->first_mii = phy->next;
+ kfree(phy);
+ }
+
+ pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+ sis_priv->rx_ring_dma);
+ pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+ sis_priv->tx_ring_dma);
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+ pci_release_regions(pci_dev);
+ pci_set_drvdata(pci_dev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ long ioaddr = net_dev->base_addr;
+
+ if(!netif_running(net_dev))
+ return 0;
+
+ netif_stop_queue(net_dev);
+ netif_device_detach(net_dev);
+
+ /* Stop the chip's Tx and Rx Status Machine */
+ outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+
+ pci_set_power_state(pci_dev, PCI_D3hot);
+ pci_save_state(pci_dev);
+
+ return 0;
+}
+
+static int sis900_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ struct sis900_private *sis_priv = net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+
+ if(!netif_running(net_dev))
+ return 0;
+ pci_restore_state(pci_dev);
+ pci_set_power_state(pci_dev, PCI_D0);
+
+ sis900_init_rxfilter(net_dev);
+
+ sis900_init_tx_ring(net_dev);
+ sis900_init_rx_ring(net_dev);
+
+ set_rx_mode(net_dev);
+
+ netif_device_attach(net_dev);
+ netif_start_queue(net_dev);
+
+ /* Workaround for EDB */
+ sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
+ outl(IE, ioaddr + ier);
+
+ sis900_check_mode(net_dev, sis_priv->mii);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver sis900_pci_driver = {
+ .name = SIS900_MODULE_NAME,
+ .id_table = sis900_pci_tbl,
+ .probe = sis900_probe,
+ .remove = __devexit_p(sis900_remove),
+#ifdef CONFIG_PM
+ .suspend = sis900_suspend,
+ .resume = sis900_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init sis900_init_module(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+
+ return pci_module_init(&sis900_pci_driver);
+}
+
+static void __exit sis900_cleanup_module(void)
+{
+ pci_unregister_driver(&sis900_pci_driver);
+}
+
+module_init(sis900_init_module);
+module_exit(sis900_cleanup_module);
+
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h
new file mode 100644
index 000000000000..de3c06735d15
--- /dev/null
+++ b/drivers/net/sis900.h
@@ -0,0 +1,279 @@
+/* sis900.h Definitions for SiS ethernet controllers including 7014/7016 and 900
+ * Copyright 1999 Silicon Integrated System Corporation
+ * References:
+ * SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ * preliminary Rev. 1.0 Jan. 14, 1998
+ * SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ * preliminary Rev. 1.0 Nov. 10, 1998
+ * SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ * preliminary Rev. 1.0 Jan. 18, 1998
+ * http://www.sis.com.tw/support/databook.htm
+ */
+
+/*
+ * SiS 7016 and SiS 900 ethernet controller registers
+ */
+
+/* The I/O extent, SiS 900 needs 256 bytes of io address */
+#define SIS900_TOTAL_SIZE 0x100
+
+/* Symbolic offsets to registers. */
+enum sis900_registers {
+ cr=0x0, //Command Register
+ cfg=0x4, //Configuration Register
+ mear=0x8, //EEPROM Access Register
+ ptscr=0xc, //PCI Test Control Register
+ isr=0x10, //Interrupt Status Register
+ imr=0x14, //Interrupt Mask Register
+ ier=0x18, //Interrupt Enable Register
+ epar=0x18, //Enhanced PHY Access Register
+ txdp=0x20, //Transmit Descriptor Pointer Register
+ txcfg=0x24, //Transmit Configuration Register
+ rxdp=0x30, //Receive Descriptor Pointer Register
+ rxcfg=0x34, //Receive Configuration Register
+ flctrl=0x38, //Flow Control Register
+ rxlen=0x3c, //Receive Packet Length Register
+ rfcr=0x48, //Receive Filter Control Register
+ rfdr=0x4C, //Receive Filter Data Register
+ pmctrl=0xB0, //Power Management Control Register
+ pmer=0xB4 //Power Management Wake-up Event Register
+};
+
+/* Symbolic names for bits in various registers */
+enum sis900_command_register_bits {
+ RELOAD = 0x00000400, ACCESSMODE = 0x00000200,/* ET */
+ RESET = 0x00000100, SWI = 0x00000080, RxRESET = 0x00000020,
+ TxRESET = 0x00000010, RxDIS = 0x00000008, RxENA = 0x00000004,
+ TxDIS = 0x00000002, TxENA = 0x00000001
+};
+
+enum sis900_configuration_register_bits {
+ DESCRFMT = 0x00000100 /* 7016 specific */, REQALG = 0x00000080,
+ SB = 0x00000040, POW = 0x00000020, EXD = 0x00000010,
+ PESEL = 0x00000008, LPM = 0x00000004, BEM = 0x00000001,
+ /* 635 & 900B Specific */
+ RND_CNT = 0x00000400, FAIR_BACKOFF = 0x00000200,
+ EDB_MASTER_EN = 0x00002000
+};
+
+enum sis900_eeprom_access_reigster_bits {
+ MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */
+ EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002,
+ EEDI = 0x00000001
+};
+
+enum sis900_interrupt_register_bits {
+ WKEVT = 0x10000000, TxPAUSEEND = 0x08000000, TxPAUSE = 0x04000000,
+ TxRCMP = 0x02000000, RxRCMP = 0x01000000, DPERR = 0x00800000,
+ SSERR = 0x00400000, RMABT = 0x00200000, RTABT = 0x00100000,
+ RxSOVR = 0x00010000, HIBERR = 0x00008000, SWINT = 0x00001000,
+ MIBINT = 0x00000800, TxURN = 0x00000400, TxIDLE = 0x00000200,
+ TxERR = 0x00000100, TxDESC = 0x00000080, TxOK = 0x00000040,
+ RxORN = 0x00000020, RxIDLE = 0x00000010, RxEARLY = 0x00000008,
+ RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001
+};
+
+enum sis900_interrupt_enable_reigster_bits {
+ IE = 0x00000001
+};
+
+/* maximum dma burst for transmission and receive */
+#define MAX_DMA_RANGE 7 /* actually 0 means MAXIMUM !! */
+#define TxMXDMA_shift 20
+#define RxMXDMA_shift 20
+
+enum sis900_tx_rx_dma{
+ DMA_BURST_512 = 0, DMA_BURST_64 = 5
+};
+
+/* transmit FIFO thresholds */
+#define TX_FILL_THRESH 16 /* 1/4 FIFO size */
+#define TxFILLT_shift 8
+#define TxDRNT_shift 0
+#define TxDRNT_100 48 /* 3/4 FIFO size */
+#define TxDRNT_10 16 /* 1/2 FIFO size */
+
+enum sis900_transmit_config_register_bits {
+ TxCSI = 0x80000000, TxHBI = 0x40000000, TxMLB = 0x20000000,
+ TxATP = 0x10000000, TxIFG = 0x0C000000, TxFILLT = 0x00003F00,
+ TxDRNT = 0x0000003F
+};
+
+/* recevie FIFO thresholds */
+#define RxDRNT_shift 1
+#define RxDRNT_100 16 /* 1/2 FIFO size */
+#define RxDRNT_10 24 /* 3/4 FIFO size */
+
+enum sis900_reveive_config_register_bits {
+ RxAEP = 0x80000000, RxARP = 0x40000000, RxATX = 0x10000000,
+ RxAJAB = 0x08000000, RxDRNT = 0x0000007F
+};
+
+#define RFAA_shift 28
+#define RFADDR_shift 16
+
+enum sis900_receive_filter_control_register_bits {
+ RFEN = 0x80000000, RFAAB = 0x40000000, RFAAM = 0x20000000,
+ RFAAP = 0x10000000, RFPromiscuous = (RFAAB|RFAAM|RFAAP)
+};
+
+enum sis900_reveive_filter_data_mask {
+ RFDAT = 0x0000FFFF
+};
+
+/* EEPROM Addresses */
+enum sis900_eeprom_address {
+ EEPROMSignature = 0x00, EEPROMVendorID = 0x02, EEPROMDeviceID = 0x03,
+ EEPROMMACAddr = 0x08, EEPROMChecksum = 0x0b
+};
+
+/* The EEPROM commands include the alway-set leading bit. Refer to NM93Cxx datasheet */
+enum sis900_eeprom_command {
+ EEread = 0x0180, EEwrite = 0x0140, EEerase = 0x01C0,
+ EEwriteEnable = 0x0130, EEwriteDisable = 0x0100,
+ EEeraseAll = 0x0120, EEwriteAll = 0x0110,
+ EEaddrMask = 0x013F, EEcmdShift = 16
+};
+
+/* For SiS962 or SiS963, request the eeprom software access */
+enum sis96x_eeprom_command {
+ EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100
+};
+
+/* Management Data I/O (mdio) frame */
+#define MIIread 0x6000
+#define MIIwrite 0x5002
+#define MIIpmdShift 7
+#define MIIregShift 2
+#define MIIcmdLen 16
+#define MIIcmdShift 16
+
+/* Buffer Descriptor Status*/
+enum sis900_buffer_status {
+ OWN = 0x80000000, MORE = 0x40000000, INTR = 0x20000000,
+ SUPCRC = 0x10000000, INCCRC = 0x10000000,
+ OK = 0x08000000, DSIZE = 0x00000FFF
+};
+/* Status for TX Buffers */
+enum sis900_tx_buffer_status {
+ ABORT = 0x04000000, UNDERRUN = 0x02000000, NOCARRIER = 0x01000000,
+ DEFERD = 0x00800000, EXCDEFER = 0x00400000, OWCOLL = 0x00200000,
+ EXCCOLL = 0x00100000, COLCNT = 0x000F0000
+};
+
+enum sis900_rx_bufer_status {
+ OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000,
+ MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
+ RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000,
+ FAERR = 0x00040000, LOOPBK = 0x00020000, RXCOL = 0x00010000
+};
+
+/* MII register offsets */
+enum mii_registers {
+ MII_CONTROL = 0x0000, MII_STATUS = 0x0001, MII_PHY_ID0 = 0x0002,
+ MII_PHY_ID1 = 0x0003, MII_ANADV = 0x0004, MII_ANLPAR = 0x0005,
+ MII_ANEXT = 0x0006
+};
+
+/* mii registers specific to SiS 900 */
+enum sis_mii_registers {
+ MII_CONFIG1 = 0x0010, MII_CONFIG2 = 0x0011, MII_STSOUT = 0x0012,
+ MII_MASK = 0x0013, MII_RESV = 0x0014
+};
+
+/* mii registers specific to ICS 1893 */
+enum ics_mii_registers {
+ MII_EXTCTRL = 0x0010, MII_QPDSTS = 0x0011, MII_10BTOP = 0x0012,
+ MII_EXTCTRL2 = 0x0013
+};
+
+/* mii registers specific to AMD 79C901 */
+enum amd_mii_registers {
+ MII_STATUS_SUMMARY = 0x0018
+};
+
+/* MII Control register bit definitions. */
+enum mii_control_register_bits {
+ MII_CNTL_FDX = 0x0100, MII_CNTL_RST_AUTO = 0x0200,
+ MII_CNTL_ISOLATE = 0x0400, MII_CNTL_PWRDWN = 0x0800,
+ MII_CNTL_AUTO = 0x1000, MII_CNTL_SPEED = 0x2000,
+ MII_CNTL_LPBK = 0x4000, MII_CNTL_RESET = 0x8000
+};
+
+/* MII Status register bit */
+enum mii_status_register_bits {
+ MII_STAT_EXT = 0x0001, MII_STAT_JAB = 0x0002,
+ MII_STAT_LINK = 0x0004, MII_STAT_CAN_AUTO = 0x0008,
+ MII_STAT_FAULT = 0x0010, MII_STAT_AUTO_DONE = 0x0020,
+ MII_STAT_CAN_T = 0x0800, MII_STAT_CAN_T_FDX = 0x1000,
+ MII_STAT_CAN_TX = 0x2000, MII_STAT_CAN_TX_FDX = 0x4000,
+ MII_STAT_CAN_T4 = 0x8000
+};
+
+#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */
+#define MII_ID1_MODEL 0x03F0 /* model number */
+#define MII_ID1_REV 0x000F /* model number */
+
+/* MII NWAY Register Bits ...
+ valid for the ANAR (Auto-Negotiation Advertisement) and
+ ANLPAR (Auto-Negotiation Link Partner) registers */
+enum mii_nway_register_bits {
+ MII_NWAY_NODE_SEL = 0x001f, MII_NWAY_CSMA_CD = 0x0001,
+ MII_NWAY_T = 0x0020, MII_NWAY_T_FDX = 0x0040,
+ MII_NWAY_TX = 0x0080, MII_NWAY_TX_FDX = 0x0100,
+ MII_NWAY_T4 = 0x0200, MII_NWAY_PAUSE = 0x0400,
+ MII_NWAY_RF = 0x2000, MII_NWAY_ACK = 0x4000,
+ MII_NWAY_NP = 0x8000
+};
+
+enum mii_stsout_register_bits {
+ MII_STSOUT_LINK_FAIL = 0x4000,
+ MII_STSOUT_SPD = 0x0080, MII_STSOUT_DPLX = 0x0040
+};
+
+enum mii_stsics_register_bits {
+ MII_STSICS_SPD = 0x8000, MII_STSICS_DPLX = 0x4000,
+ MII_STSICS_LINKSTS = 0x0001
+};
+
+enum mii_stssum_register_bits {
+ MII_STSSUM_LINK = 0x0008, MII_STSSUM_DPLX = 0x0004,
+ MII_STSSUM_AUTO = 0x0002, MII_STSSUM_SPD = 0x0001
+};
+
+enum sis900_revision_id {
+ SIS630A_900_REV = 0x80, SIS630E_900_REV = 0x81,
+ SIS630S_900_REV = 0x82, SIS630EA1_900_REV = 0x83,
+ SIS630ET_900_REV = 0x84, SIS635A_900_REV = 0x90,
+ SIS96x_900_REV = 0X91, SIS900B_900_REV = 0x03
+};
+
+enum sis630_revision_id {
+ SIS630A0 = 0x00, SIS630A1 = 0x01,
+ SIS630B0 = 0x10, SIS630B1 = 0x11
+};
+
+#define FDX_CAPABLE_DUPLEX_UNKNOWN 0
+#define FDX_CAPABLE_HALF_SELECTED 1
+#define FDX_CAPABLE_FULL_SELECTED 2
+
+#define HW_SPEED_UNCONFIG 0
+#define HW_SPEED_HOME 1
+#define HW_SPEED_10_MBPS 10
+#define HW_SPEED_100_MBPS 100
+#define HW_SPEED_DEFAULT (HW_SPEED_100_MBPS)
+
+#define CRC_SIZE 4
+#define MAC_HEADER_SIZE 14
+
+#define TX_BUF_SIZE 1536
+#define RX_BUF_SIZE 1536
+
+#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */
+#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */
+#define TX_TOTAL_SIZE NUM_TX_DESC*sizeof(BufferDesc)
+#define RX_TOTAL_SIZE NUM_RX_DESC*sizeof(BufferDesc)
+
+/* PCI stuff, should be move to pci.h */
+#define SIS630_VENDOR_ID 0x1039
+#define SIS630_DEVICE_ID 0x0630
diff --git a/drivers/net/sk98lin/Makefile b/drivers/net/sk98lin/Makefile
new file mode 100644
index 000000000000..6783039ffb75
--- /dev/null
+++ b/drivers/net/sk98lin/Makefile
@@ -0,0 +1,89 @@
+#
+# Makefile for the SysKonnect SK-98xx device driver.
+#
+
+
+#
+# Standalone driver params
+# SKPARAM += -DSK_KERNEL_24
+# SKPARAM += -DSK_KERNEL_24_26
+# SKPARAM += -DSK_KERNEL_26
+# SKPARAM += -DSK_KERNEL_22_24
+
+obj-$(CONFIG_SK98LIN) += sk98lin.o
+sk98lin-objs := \
+ skge.o \
+ skethtool.o \
+ skdim.o \
+ skaddr.o \
+ skgehwt.o \
+ skgeinit.o \
+ skgepnmi.o \
+ skgesirq.o \
+ ski2c.o \
+ sklm80.o \
+ skqueue.o \
+ skrlmt.o \
+ sktimer.o \
+ skvpd.o \
+ skxmac2.o \
+ skproc.o \
+ skcsum.o
+
+# DBGDEF = \
+# -DDEBUG
+
+ifdef DEBUG
+DBGDEF += \
+-DSK_DEBUG_CHKMOD=0x00000000L \
+-DSK_DEBUG_CHKCAT=0x00000000L
+endif
+
+
+# **** possible debug modules for SK_DEBUG_CHKMOD *****************
+# SK_DBGMOD_MERR 0x00000001L /* general module error indication */
+# SK_DBGMOD_HWM 0x00000002L /* Hardware init module */
+# SK_DBGMOD_RLMT 0x00000004L /* RLMT module */
+# SK_DBGMOD_VPD 0x00000008L /* VPD module */
+# SK_DBGMOD_I2C 0x00000010L /* I2C module */
+# SK_DBGMOD_PNMI 0x00000020L /* PNMI module */
+# SK_DBGMOD_CSUM 0x00000040L /* CSUM module */
+# SK_DBGMOD_ADDR 0x00000080L /* ADDR module */
+# SK_DBGMOD_DRV 0x00010000L /* DRV module */
+
+# **** possible debug categories for SK_DEBUG_CHKCAT **************
+# *** common modules ***
+# SK_DBGCAT_INIT 0x00000001L module/driver initialization
+# SK_DBGCAT_CTRL 0x00000002L controlling: add/rmv MCA/MAC and other controls (IOCTL)
+# SK_DBGCAT_ERR 0x00000004L error handling paths
+# SK_DBGCAT_TX 0x00000008L transmit path
+# SK_DBGCAT_RX 0x00000010L receive path
+# SK_DBGCAT_IRQ 0x00000020L general IRQ handling
+# SK_DBGCAT_QUEUE 0x00000040L any queue management
+# SK_DBGCAT_DUMP 0x00000080L large data output e.g. hex dump
+# SK_DBGCAT_FATAL 0x00000100L large data output e.g. hex dump
+
+# *** driver (file skge.c) ***
+# SK_DBGCAT_DRV_ENTRY 0x00010000 entry points
+# SK_DBGCAT_DRV_??? 0x00020000 not used
+# SK_DBGCAT_DRV_MCA 0x00040000 multicast
+# SK_DBGCAT_DRV_TX_PROGRESS 0x00080000 tx path
+# SK_DBGCAT_DRV_RX_PROGRESS 0x00100000 rx path
+# SK_DBGCAT_DRV_PROGRESS 0x00200000 general runtime
+# SK_DBGCAT_DRV_??? 0x00400000 not used
+# SK_DBGCAT_DRV_PROM 0x00800000 promiscuous mode
+# SK_DBGCAT_DRV_TX_FRAME 0x01000000 display tx frames
+# SK_DBGCAT_DRV_ERROR 0x02000000 error conditions
+# SK_DBGCAT_DRV_INT_SRC 0x04000000 interrupts sources
+# SK_DBGCAT_DRV_EVENT 0x08000000 driver events
+
+EXTRA_CFLAGS += -Idrivers/net/sk98lin -DSK_DIAG_SUPPORT -DSK_USE_CSUM -DGENESIS -DYUKON $(DBGDEF) $(SKPARAM)
+
+clean:
+ rm -f core *.o *.a *.s
+
+
+
+
+
+
diff --git a/drivers/net/sk98lin/h/lm80.h b/drivers/net/sk98lin/h/lm80.h
new file mode 100644
index 000000000000..4e2dbbf78000
--- /dev/null
+++ b/drivers/net/sk98lin/h/lm80.h
@@ -0,0 +1,179 @@
+/******************************************************************************
+ *
+ * Name: lm80.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.6 $
+ * Date: $Date: 2003/05/13 17:26:52 $
+ * Purpose: Contains all defines for the LM80 Chip
+ * (National Semiconductor).
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef __INC_LM80_H
+#define __INC_LM80_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines ********************************************************************/
+
+/*
+ * LM80 register definition
+ *
+ * All registers are 8 bit wide
+ */
+#define LM80_CFG 0x00 /* Configuration Register */
+#define LM80_ISRC_1 0x01 /* Interrupt Status Register 1 */
+#define LM80_ISRC_2 0x02 /* Interrupt Status Register 2 */
+#define LM80_IMSK_1 0x03 /* Interrupt Mask Register 1 */
+#define LM80_IMSK_2 0x04 /* Interrupt Mask Register 2 */
+#define LM80_FAN_CTRL 0x05 /* Fan Devisor/RST#/OS# Register */
+#define LM80_TEMP_CTRL 0x06 /* OS# Config, Temp Res. Reg */
+ /* 0x07 - 0x1f reserved */
+ /* current values */
+#define LM80_VT0_IN 0x20 /* current Voltage 0 value */
+#define LM80_VT1_IN 0x21 /* current Voltage 1 value */
+#define LM80_VT2_IN 0x22 /* current Voltage 2 value */
+#define LM80_VT3_IN 0x23 /* current Voltage 3 value */
+#define LM80_VT4_IN 0x24 /* current Voltage 4 value */
+#define LM80_VT5_IN 0x25 /* current Voltage 5 value */
+#define LM80_VT6_IN 0x26 /* current Voltage 6 value */
+#define LM80_TEMP_IN 0x27 /* current Temperature value */
+#define LM80_FAN1_IN 0x28 /* current Fan 1 count */
+#define LM80_FAN2_IN 0x29 /* current Fan 2 count */
+ /* limit values */
+#define LM80_VT0_HIGH_LIM 0x2a /* high limit val for Voltage 0 */
+#define LM80_VT0_LOW_LIM 0x2b /* low limit val for Voltage 0 */
+#define LM80_VT1_HIGH_LIM 0x2c /* high limit val for Voltage 1 */
+#define LM80_VT1_LOW_LIM 0x2d /* low limit val for Voltage 1 */
+#define LM80_VT2_HIGH_LIM 0x2e /* high limit val for Voltage 2 */
+#define LM80_VT2_LOW_LIM 0x2f /* low limit val for Voltage 2 */
+#define LM80_VT3_HIGH_LIM 0x30 /* high limit val for Voltage 3 */
+#define LM80_VT3_LOW_LIM 0x31 /* low limit val for Voltage 3 */
+#define LM80_VT4_HIGH_LIM 0x32 /* high limit val for Voltage 4 */
+#define LM80_VT4_LOW_LIM 0x33 /* low limit val for Voltage 4 */
+#define LM80_VT5_HIGH_LIM 0x34 /* high limit val for Voltage 5 */
+#define LM80_VT5_LOW_LIM 0x35 /* low limit val for Voltage 5 */
+#define LM80_VT6_HIGH_LIM 0x36 /* high limit val for Voltage 6 */
+#define LM80_VT6_LOW_LIM 0x37 /* low limit val for Voltage 6 */
+#define LM80_THOT_LIM_UP 0x38 /* hot temperature limit (high) */
+#define LM80_THOT_LIM_LO 0x39 /* hot temperature limit (low) */
+#define LM80_TOS_LIM_UP 0x3a /* OS temperature limit (high) */
+#define LM80_TOS_LIM_LO 0x3b /* OS temperature limit (low) */
+#define LM80_FAN1_COUNT_LIM 0x3c /* Fan 1 count limit (high) */
+#define LM80_FAN2_COUNT_LIM 0x3d /* Fan 2 count limit (low) */
+ /* 0x3e - 0x3f reserved */
+
+/*
+ * LM80 bit definitions
+ */
+
+/* LM80_CFG Configuration Register */
+#define LM80_CFG_START (1<<0) /* start monitoring operation */
+#define LM80_CFG_INT_ENA (1<<1) /* enables the INT# Interrupt output */
+#define LM80_CFG_INT_POL (1<<2) /* INT# pol: 0 act low, 1 act high */
+#define LM80_CFG_INT_CLR (1<<3) /* disables INT#/RST_OUT#/OS# outputs */
+#define LM80_CFG_RESET (1<<4) /* signals a reset */
+#define LM80_CFG_CHASS_CLR (1<<5) /* clears Chassis Intrusion (CI) pin */
+#define LM80_CFG_GPO (1<<6) /* drives the GPO# pin */
+#define LM80_CFG_INIT (1<<7) /* restore power on defaults */
+
+/* LM80_ISRC_1 Interrupt Status Register 1 */
+/* LM80_IMSK_1 Interrupt Mask Register 1 */
+#define LM80_IS_VT0 (1<<0) /* limit exceeded for Voltage 0 */
+#define LM80_IS_VT1 (1<<1) /* limit exceeded for Voltage 1 */
+#define LM80_IS_VT2 (1<<2) /* limit exceeded for Voltage 2 */
+#define LM80_IS_VT3 (1<<3) /* limit exceeded for Voltage 3 */
+#define LM80_IS_VT4 (1<<4) /* limit exceeded for Voltage 4 */
+#define LM80_IS_VT5 (1<<5) /* limit exceeded for Voltage 5 */
+#define LM80_IS_VT6 (1<<6) /* limit exceeded for Voltage 6 */
+#define LM80_IS_INT_IN (1<<7) /* state of INT_IN# */
+
+/* LM80_ISRC_2 Interrupt Status Register 2 */
+/* LM80_IMSK_2 Interrupt Mask Register 2 */
+#define LM80_IS_TEMP (1<<0) /* HOT temperature limit exceeded */
+#define LM80_IS_BTI (1<<1) /* state of BTI# pin */
+#define LM80_IS_FAN1 (1<<2) /* count limit exceeded for Fan 1 */
+#define LM80_IS_FAN2 (1<<3) /* count limit exceeded for Fan 2 */
+#define LM80_IS_CI (1<<4) /* Chassis Intrusion occured */
+#define LM80_IS_OS (1<<5) /* OS temperature limit exceeded */
+ /* bit 6 and 7 are reserved in LM80_ISRC_2 */
+#define LM80_IS_HT_IRQ_MD (1<<6) /* Hot temperature interrupt mode */
+#define LM80_IS_OT_IRQ_MD (1<<7) /* OS temperature interrupt mode */
+
+/* LM80_FAN_CTRL Fan Devisor/RST#/OS# Register */
+#define LM80_FAN1_MD_SEL (1<<0) /* Fan 1 mode select */
+#define LM80_FAN2_MD_SEL (1<<1) /* Fan 2 mode select */
+#define LM80_FAN1_PRM_CTL (3<<2) /* Fan 1 speed control */
+#define LM80_FAN2_PRM_CTL (3<<4) /* Fan 2 speed control */
+#define LM80_FAN_OS_ENA (1<<6) /* enable OS mode on RST_OUT#/OS# pins*/
+#define LM80_FAN_RST_ENA (1<<7) /* sets RST_OUT#/OS# pins in RST mode */
+
+/* LM80_TEMP_CTRL OS# Config, Temp Res. Reg */
+#define LM80_TEMP_OS_STAT (1<<0) /* mirrors the state of RST_OUT#/OS# */
+#define LM80_TEMP_OS_POL (1<<1) /* select OS# polarity */
+#define LM80_TEMP_OS_MODE (1<<2) /* selects Interrupt mode */
+#define LM80_TEMP_RES (1<<3) /* selects 9 or 11 bit temp resulution*/
+#define LM80_TEMP_LSB (0xf<<4)/* 4 LSBs of 11 bit temp data */
+#define LM80_TEMP_LSB_9 (1<<7) /* LSB of 9 bit temperature data */
+
+ /* 0x07 - 0x1f reserved */
+/* LM80_VT0_IN current Voltage 0 value */
+/* LM80_VT1_IN current Voltage 1 value */
+/* LM80_VT2_IN current Voltage 2 value */
+/* LM80_VT3_IN current Voltage 3 value */
+/* LM80_VT4_IN current Voltage 4 value */
+/* LM80_VT5_IN current Voltage 5 value */
+/* LM80_VT6_IN current Voltage 6 value */
+/* LM80_TEMP_IN current temperature value */
+/* LM80_FAN1_IN current Fan 1 count */
+/* LM80_FAN2_IN current Fan 2 count */
+/* LM80_VT0_HIGH_LIM high limit val for Voltage 0 */
+/* LM80_VT0_LOW_LIM low limit val for Voltage 0 */
+/* LM80_VT1_HIGH_LIM high limit val for Voltage 1 */
+/* LM80_VT1_LOW_LIM low limit val for Voltage 1 */
+/* LM80_VT2_HIGH_LIM high limit val for Voltage 2 */
+/* LM80_VT2_LOW_LIM low limit val for Voltage 2 */
+/* LM80_VT3_HIGH_LIM high limit val for Voltage 3 */
+/* LM80_VT3_LOW_LIM low limit val for Voltage 3 */
+/* LM80_VT4_HIGH_LIM high limit val for Voltage 4 */
+/* LM80_VT4_LOW_LIM low limit val for Voltage 4 */
+/* LM80_VT5_HIGH_LIM high limit val for Voltage 5 */
+/* LM80_VT5_LOW_LIM low limit val for Voltage 5 */
+/* LM80_VT6_HIGH_LIM high limit val for Voltage 6 */
+/* LM80_VT6_LOW_LIM low limit val for Voltage 6 */
+/* LM80_THOT_LIM_UP hot temperature limit (high) */
+/* LM80_THOT_LIM_LO hot temperature limit (low) */
+/* LM80_TOS_LIM_UP OS temperature limit (high) */
+/* LM80_TOS_LIM_LO OS temperature limit (low) */
+/* LM80_FAN1_COUNT_LIM Fan 1 count limit (high) */
+/* LM80_FAN2_COUNT_LIM Fan 2 count limit (low) */
+ /* 0x3e - 0x3f reserved */
+
+#define LM80_ADDR 0x28 /* LM80 default addr */
+
+/* typedefs *******************************************************************/
+
+
+/* function prototypes ********************************************************/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INC_LM80_H */
diff --git a/drivers/net/sk98lin/h/skaddr.h b/drivers/net/sk98lin/h/skaddr.h
new file mode 100644
index 000000000000..3a2ea4a4b539
--- /dev/null
+++ b/drivers/net/sk98lin/h/skaddr.h
@@ -0,0 +1,333 @@
+/******************************************************************************
+ *
+ * Name: skaddr.h
+ * Project: Gigabit Ethernet Adapters, ADDR-Modul
+ * Version: $Revision: 1.29 $
+ * Date: $Date: 2003/05/13 16:57:24 $
+ * Purpose: Header file for Address Management (MC, UC, Prom).
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This module is intended to manage multicast addresses and promiscuous mode
+ * on GEnesis adapters.
+ *
+ * Include File Hierarchy:
+ *
+ * "skdrv1st.h"
+ * ...
+ * "sktypes.h"
+ * "skqueue.h"
+ * "skaddr.h"
+ * ...
+ * "skdrv2nd.h"
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKADDR_H
+#define __INC_SKADDR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* cplusplus */
+
+/* defines ********************************************************************/
+
+#define SK_MAC_ADDR_LEN 6 /* Length of MAC address. */
+#define SK_MAX_ADDRS 14 /* #Addrs for exact match. */
+
+/* ----- Common return values ----- */
+
+#define SK_ADDR_SUCCESS 0 /* Function returned successfully. */
+#define SK_ADDR_ILLEGAL_PORT 100 /* Port number too high. */
+#define SK_ADDR_TOO_EARLY 101 /* Function called too early. */
+
+/* ----- Clear/Add flag bits ----- */
+
+#define SK_ADDR_PERMANENT 1 /* RLMT Address */
+
+/* ----- Additional Clear flag bits ----- */
+
+#define SK_MC_SW_ONLY 2 /* Do not update HW when clearing. */
+
+/* ----- Override flag bits ----- */
+
+#define SK_ADDR_LOGICAL_ADDRESS 0
+#define SK_ADDR_VIRTUAL_ADDRESS (SK_ADDR_LOGICAL_ADDRESS) /* old */
+#define SK_ADDR_PHYSICAL_ADDRESS 1
+#define SK_ADDR_CLEAR_LOGICAL 2
+#define SK_ADDR_SET_LOGICAL 4
+
+/* ----- Override return values ----- */
+
+#define SK_ADDR_OVERRIDE_SUCCESS (SK_ADDR_SUCCESS)
+#define SK_ADDR_DUPLICATE_ADDRESS 1
+#define SK_ADDR_MULTICAST_ADDRESS 2
+
+/* ----- Partitioning of excact match table ----- */
+
+#define SK_ADDR_EXACT_MATCHES 16 /* #Exact match entries. */
+
+#define SK_ADDR_FIRST_MATCH_RLMT 1
+#define SK_ADDR_LAST_MATCH_RLMT 2
+#define SK_ADDR_FIRST_MATCH_DRV 3
+#define SK_ADDR_LAST_MATCH_DRV (SK_ADDR_EXACT_MATCHES - 1)
+
+/* ----- SkAddrMcAdd/SkAddrMcUpdate return values ----- */
+
+#define SK_MC_FILTERING_EXACT 0 /* Exact filtering. */
+#define SK_MC_FILTERING_INEXACT 1 /* Inexact filtering. */
+
+/* ----- Additional SkAddrMcAdd return values ----- */
+
+#define SK_MC_ILLEGAL_ADDRESS 2 /* Illegal address. */
+#define SK_MC_ILLEGAL_PORT 3 /* Illegal port (not the active one). */
+#define SK_MC_RLMT_OVERFLOW 4 /* Too many RLMT mc addresses. */
+
+/* Promiscuous mode bits ----- */
+
+#define SK_PROM_MODE_NONE 0 /* Normal receive. */
+#define SK_PROM_MODE_LLC 1 /* Receive all LLC frames. */
+#define SK_PROM_MODE_ALL_MC 2 /* Receive all multicast frames. */
+/* #define SK_PROM_MODE_NON_LLC 4 */ /* Receive all non-LLC frames. */
+
+/* Macros */
+
+#ifdef OLD_STUFF
+#ifndef SK_ADDR_EQUAL
+/*
+ * "&" instead of "&&" allows better optimization on IA-64.
+ * The replacement is safe here, as all bytes exist.
+ */
+#ifndef SK_ADDR_DWORD_COMPARE
+#define SK_ADDR_EQUAL(A1,A2) ( \
+ (((SK_U8 *)(A1))[5] == ((SK_U8 *)(A2))[5]) & \
+ (((SK_U8 *)(A1))[4] == ((SK_U8 *)(A2))[4]) & \
+ (((SK_U8 *)(A1))[3] == ((SK_U8 *)(A2))[3]) & \
+ (((SK_U8 *)(A1))[2] == ((SK_U8 *)(A2))[2]) & \
+ (((SK_U8 *)(A1))[1] == ((SK_U8 *)(A2))[1]) & \
+ (((SK_U8 *)(A1))[0] == ((SK_U8 *)(A2))[0]))
+#else /* SK_ADDR_DWORD_COMPARE */
+#define SK_ADDR_EQUAL(A1,A2) ( \
+ (*(SK_U32 *)&(((SK_U8 *)(A1))[2]) == *(SK_U32 *)&(((SK_U8 *)(A2))[2])) & \
+ (*(SK_U32 *)&(((SK_U8 *)(A1))[0]) == *(SK_U32 *)&(((SK_U8 *)(A2))[0])))
+#endif /* SK_ADDR_DWORD_COMPARE */
+#endif /* SK_ADDR_EQUAL */
+#endif /* 0 */
+
+#ifndef SK_ADDR_EQUAL
+#ifndef SK_ADDR_DWORD_COMPARE
+#define SK_ADDR_EQUAL(A1,A2) ( \
+ (((SK_U8 SK_FAR *)(A1))[5] == ((SK_U8 SK_FAR *)(A2))[5]) & \
+ (((SK_U8 SK_FAR *)(A1))[4] == ((SK_U8 SK_FAR *)(A2))[4]) & \
+ (((SK_U8 SK_FAR *)(A1))[3] == ((SK_U8 SK_FAR *)(A2))[3]) & \
+ (((SK_U8 SK_FAR *)(A1))[2] == ((SK_U8 SK_FAR *)(A2))[2]) & \
+ (((SK_U8 SK_FAR *)(A1))[1] == ((SK_U8 SK_FAR *)(A2))[1]) & \
+ (((SK_U8 SK_FAR *)(A1))[0] == ((SK_U8 SK_FAR *)(A2))[0]))
+#else /* SK_ADDR_DWORD_COMPARE */
+#define SK_ADDR_EQUAL(A1,A2) ( \
+ (*(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[4]) == \
+ *(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[4])) && \
+ (*(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[0]) == \
+ *(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[0])))
+#endif /* SK_ADDR_DWORD_COMPARE */
+#endif /* SK_ADDR_EQUAL */
+
+/* typedefs *******************************************************************/
+
+typedef struct s_MacAddr {
+ SK_U8 a[SK_MAC_ADDR_LEN];
+} SK_MAC_ADDR;
+
+
+/* SK_FILTER is used to ensure alignment of the filter. */
+typedef union s_InexactFilter {
+ SK_U8 Bytes[8];
+ SK_U64 Val; /* Dummy entry for alignment only. */
+} SK_FILTER64;
+
+
+typedef struct s_AddrNet SK_ADDR_NET;
+
+
+typedef struct s_AddrPort {
+
+/* ----- Public part (read-only) ----- */
+
+ SK_MAC_ADDR CurrentMacAddress; /* Current physical MAC Address. */
+ SK_MAC_ADDR PermanentMacAddress; /* Permanent physical MAC Address. */
+ int PromMode; /* Promiscuous Mode. */
+
+/* ----- Private part ----- */
+
+ SK_MAC_ADDR PreviousMacAddress; /* Prev. phys. MAC Address. */
+ SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */
+ SK_U8 Align01;
+
+ SK_U32 FirstExactMatchRlmt;
+ SK_U32 NextExactMatchRlmt;
+ SK_U32 FirstExactMatchDrv;
+ SK_U32 NextExactMatchDrv;
+ SK_MAC_ADDR Exact[SK_ADDR_EXACT_MATCHES];
+ SK_FILTER64 InexactFilter; /* For 64-bit hash register. */
+ SK_FILTER64 InexactRlmtFilter; /* For 64-bit hash register. */
+ SK_FILTER64 InexactDrvFilter; /* For 64-bit hash register. */
+} SK_ADDR_PORT;
+
+
+struct s_AddrNet {
+/* ----- Public part (read-only) ----- */
+
+ SK_MAC_ADDR CurrentMacAddress; /* Logical MAC Address. */
+ SK_MAC_ADDR PermanentMacAddress; /* Logical MAC Address. */
+
+/* ----- Private part ----- */
+
+ SK_U32 ActivePort; /* View of module ADDR. */
+ SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */
+ SK_U8 Align01;
+ SK_U16 Align02;
+};
+
+
+typedef struct s_Addr {
+
+/* ----- Public part (read-only) ----- */
+
+ SK_ADDR_NET Net[SK_MAX_NETS];
+ SK_ADDR_PORT Port[SK_MAX_MACS];
+
+/* ----- Private part ----- */
+} SK_ADDR;
+
+/* function prototypes ********************************************************/
+
+#ifndef SK_KR_PROTO
+
+/* Functions provided by SkAddr */
+
+/* ANSI/C++ compliant function prototypes */
+
+extern int SkAddrInit(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Level);
+
+extern int SkAddrMcClear(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ int Flags);
+
+extern int SkAddrXmacMcClear(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ int Flags);
+
+extern int SkAddrGmacMcClear(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ int Flags);
+
+extern int SkAddrMcAdd(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ SK_MAC_ADDR *pMc,
+ int Flags);
+
+extern int SkAddrXmacMcAdd(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ SK_MAC_ADDR *pMc,
+ int Flags);
+
+extern int SkAddrGmacMcAdd(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ SK_MAC_ADDR *pMc,
+ int Flags);
+
+extern int SkAddrMcUpdate(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber);
+
+extern int SkAddrXmacMcUpdate(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber);
+
+extern int SkAddrGmacMcUpdate(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber);
+
+extern int SkAddrOverride(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ SK_MAC_ADDR SK_FAR *pNewAddr,
+ int Flags);
+
+extern int SkAddrPromiscuousChange(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ int NewPromMode);
+
+extern int SkAddrXmacPromiscuousChange(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ int NewPromMode);
+
+extern int SkAddrGmacPromiscuousChange(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 PortNumber,
+ int NewPromMode);
+
+#ifndef SK_SLIM
+extern int SkAddrSwap(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 FromPortNumber,
+ SK_U32 ToPortNumber);
+#endif
+
+#else /* defined(SK_KR_PROTO)) */
+
+/* Non-ANSI/C++ compliant function prototypes */
+
+#error KR-style prototypes are not yet provided.
+
+#endif /* defined(SK_KR_PROTO)) */
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INC_SKADDR_H */
diff --git a/drivers/net/sk98lin/h/skcsum.h b/drivers/net/sk98lin/h/skcsum.h
new file mode 100644
index 000000000000..2b94adb93331
--- /dev/null
+++ b/drivers/net/sk98lin/h/skcsum.h
@@ -0,0 +1,219 @@
+/******************************************************************************
+ *
+ * Name: skcsum.h
+ * Project: GEnesis - SysKonnect SK-NET Gigabit Ethernet (SK-98xx)
+ * Version: $Revision: 1.10 $
+ * Date: $Date: 2003/08/20 13:59:57 $
+ * Purpose: Store/verify Internet checksum in send/receive packets.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2001 SysKonnect GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * Public header file for the "GEnesis" common module "CSUM".
+ *
+ * "GEnesis" is an abbreviation of "Gigabit Ethernet Network System in Silicon"
+ * and is the code name of this SysKonnect project.
+ *
+ * Compilation Options:
+ *
+ * SK_USE_CSUM - Define if CSUM is to be used. Otherwise, CSUM will be an
+ * empty module.
+ *
+ * SKCS_OVERWRITE_PROTO - Define to overwrite the default protocol id
+ * definitions. In this case, all SKCS_PROTO_xxx definitions must be made
+ * external.
+ *
+ * SKCS_OVERWRITE_STATUS - Define to overwrite the default return status
+ * definitions. In this case, all SKCS_STATUS_xxx definitions must be made
+ * external.
+ *
+ * Include File Hierarchy:
+ *
+ * "h/skcsum.h"
+ * "h/sktypes.h"
+ * "h/skqueue.h"
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKCSUM_H
+#define __INC_SKCSUM_H
+
+#include "h/sktypes.h"
+#include "h/skqueue.h"
+
+/* defines ********************************************************************/
+
+/*
+ * Define the default bit flags for 'SKCS_PACKET_INFO.ProtocolFlags' if no user
+ * overwrite.
+ */
+#ifndef SKCS_OVERWRITE_PROTO /* User overwrite? */
+#define SKCS_PROTO_IP 0x1 /* IP (Internet Protocol version 4) */
+#define SKCS_PROTO_TCP 0x2 /* TCP (Transmission Control Protocol) */
+#define SKCS_PROTO_UDP 0x4 /* UDP (User Datagram Protocol) */
+
+/* Indices for protocol statistics. */
+#define SKCS_PROTO_STATS_IP 0
+#define SKCS_PROTO_STATS_UDP 1
+#define SKCS_PROTO_STATS_TCP 2
+#define SKCS_NUM_PROTOCOLS 3 /* Number of supported protocols. */
+#endif /* !SKCS_OVERWRITE_PROTO */
+
+/*
+ * Define the default SKCS_STATUS type and values if no user overwrite.
+ *
+ * SKCS_STATUS_UNKNOWN_IP_VERSION - Not an IP v4 frame.
+ * SKCS_STATUS_IP_CSUM_ERROR - IP checksum error.
+ * SKCS_STATUS_IP_CSUM_ERROR_TCP - IP checksum error in TCP frame.
+ * SKCS_STATUS_IP_CSUM_ERROR_UDP - IP checksum error in UDP frame
+ * SKCS_STATUS_IP_FRAGMENT - IP fragment (IP checksum ok).
+ * SKCS_STATUS_IP_CSUM_OK - IP checksum ok (not a TCP or UDP frame).
+ * SKCS_STATUS_TCP_CSUM_ERROR - TCP checksum error (IP checksum ok).
+ * SKCS_STATUS_UDP_CSUM_ERROR - UDP checksum error (IP checksum ok).
+ * SKCS_STATUS_TCP_CSUM_OK - IP and TCP checksum ok.
+ * SKCS_STATUS_UDP_CSUM_OK - IP and UDP checksum ok.
+ * SKCS_STATUS_IP_CSUM_OK_NO_UDP - IP checksum OK and no UDP checksum.
+ */
+#ifndef SKCS_OVERWRITE_STATUS /* User overwrite? */
+#define SKCS_STATUS int /* Define status type. */
+
+#define SKCS_STATUS_UNKNOWN_IP_VERSION 1
+#define SKCS_STATUS_IP_CSUM_ERROR 2
+#define SKCS_STATUS_IP_FRAGMENT 3
+#define SKCS_STATUS_IP_CSUM_OK 4
+#define SKCS_STATUS_TCP_CSUM_ERROR 5
+#define SKCS_STATUS_UDP_CSUM_ERROR 6
+#define SKCS_STATUS_TCP_CSUM_OK 7
+#define SKCS_STATUS_UDP_CSUM_OK 8
+/* needed for Microsoft */
+#define SKCS_STATUS_IP_CSUM_ERROR_UDP 9
+#define SKCS_STATUS_IP_CSUM_ERROR_TCP 10
+/* UDP checksum may be omitted */
+#define SKCS_STATUS_IP_CSUM_OK_NO_UDP 11
+#endif /* !SKCS_OVERWRITE_STATUS */
+
+/* Clear protocol statistics event. */
+#define SK_CSUM_EVENT_CLEAR_PROTO_STATS 1
+
+/*
+ * Add two values in one's complement.
+ *
+ * Note: One of the two input values may be "longer" than 16-bit, but then the
+ * resulting sum may be 17 bits long. In this case, add zero to the result using
+ * SKCS_OC_ADD() again.
+ *
+ * Result = Value1 + Value2
+ */
+#define SKCS_OC_ADD(Result, Value1, Value2) { \
+ unsigned long Sum; \
+ \
+ Sum = (unsigned long) (Value1) + (unsigned long) (Value2); \
+ /* Add-in any carry. */ \
+ (Result) = (Sum & 0xffff) + (Sum >> 16); \
+}
+
+/*
+ * Subtract two values in one's complement.
+ *
+ * Result = Value1 - Value2
+ */
+#define SKCS_OC_SUB(Result, Value1, Value2) \
+ SKCS_OC_ADD((Result), (Value1), ~(Value2) & 0xffff)
+
+/* typedefs *******************************************************************/
+
+/*
+ * SKCS_PROTO_STATS - The CSUM protocol statistics structure.
+ *
+ * There is one instance of this structure for each protocol supported.
+ */
+typedef struct s_CsProtocolStatistics {
+ SK_U64 RxOkCts; /* Receive checksum ok. */
+ SK_U64 RxUnableCts; /* Unable to verify receive checksum. */
+ SK_U64 RxErrCts; /* Receive checksum error. */
+ SK_U64 TxOkCts; /* Transmit checksum ok. */
+ SK_U64 TxUnableCts; /* Unable to calculate checksum in hw. */
+} SKCS_PROTO_STATS;
+
+/*
+ * s_Csum - The CSUM module context structure.
+ */
+typedef struct s_Csum {
+ /* Enabled receive SK_PROTO_XXX bit flags. */
+ unsigned ReceiveFlags[SK_MAX_NETS];
+#ifdef TX_CSUM
+ unsigned TransmitFlags[SK_MAX_NETS];
+#endif /* TX_CSUM */
+
+ /* The protocol statistics structure; one per supported protocol. */
+ SKCS_PROTO_STATS ProtoStats[SK_MAX_NETS][SKCS_NUM_PROTOCOLS];
+} SK_CSUM;
+
+/*
+ * SKCS_PACKET_INFO - The packet information structure.
+ */
+typedef struct s_CsPacketInfo {
+ /* Bit field specifiying the desired/found protocols. */
+ unsigned ProtocolFlags;
+
+ /* Length of complete IP header, including any option fields. */
+ unsigned IpHeaderLength;
+
+ /* IP header checksum. */
+ unsigned IpHeaderChecksum;
+
+ /* TCP/UDP pseudo header checksum. */
+ unsigned PseudoHeaderChecksum;
+} SKCS_PACKET_INFO;
+
+/* function prototypes ********************************************************/
+
+#ifndef SK_CS_CALCULATE_CHECKSUM
+extern unsigned SkCsCalculateChecksum(
+ void *pData,
+ unsigned Length);
+#endif /* SK_CS_CALCULATE_CHECKSUM */
+
+extern int SkCsEvent(
+ SK_AC *pAc,
+ SK_IOC Ioc,
+ SK_U32 Event,
+ SK_EVPARA Param);
+
+extern SKCS_STATUS SkCsGetReceiveInfo(
+ SK_AC *pAc,
+ void *pIpHeader,
+ unsigned Checksum1,
+ unsigned Checksum2,
+ int NetNumber);
+
+extern void SkCsGetSendInfo(
+ SK_AC *pAc,
+ void *pIpHeader,
+ SKCS_PACKET_INFO *pPacketInfo,
+ int NetNumber);
+
+extern void SkCsSetReceiveFlags(
+ SK_AC *pAc,
+ unsigned ReceiveFlags,
+ unsigned *pChecksum1Offset,
+ unsigned *pChecksum2Offset,
+ int NetNumber);
+
+#endif /* __INC_SKCSUM_H */
diff --git a/drivers/net/sk98lin/h/skdebug.h b/drivers/net/sk98lin/h/skdebug.h
new file mode 100644
index 000000000000..3cba171d74b2
--- /dev/null
+++ b/drivers/net/sk98lin/h/skdebug.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ *
+ * Name: skdebug.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.14 $
+ * Date: $Date: 2003/05/13 17:26:00 $
+ * Purpose: SK specific DEBUG support
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKDEBUG_H
+#define __INC_SKDEBUG_H
+
+#ifdef DEBUG
+#ifndef SK_DBG_MSG
+#define SK_DBG_MSG(pAC,comp,cat,arg) \
+ if ( ((comp) & SK_DBG_CHKMOD(pAC)) && \
+ ((cat) & SK_DBG_CHKCAT(pAC)) ) { \
+ SK_DBG_PRINTF arg ; \
+ }
+#endif
+#else
+#define SK_DBG_MSG(pAC,comp,lev,arg)
+#endif
+
+/* PLS NOTE:
+ * =========
+ * Due to any restrictions of kernel printf routines do not use other
+ * format identifiers as: %x %d %c %s .
+ * Never use any combined format identifiers such as: %lx %ld in your
+ * printf - argument (arg) because some OS specific kernel printfs may
+ * only support some basic identifiers.
+ */
+
+/* Debug modules */
+
+#define SK_DBGMOD_MERR 0x00000001L /* general module error indication */
+#define SK_DBGMOD_HWM 0x00000002L /* Hardware init module */
+#define SK_DBGMOD_RLMT 0x00000004L /* RLMT module */
+#define SK_DBGMOD_VPD 0x00000008L /* VPD module */
+#define SK_DBGMOD_I2C 0x00000010L /* I2C module */
+#define SK_DBGMOD_PNMI 0x00000020L /* PNMI module */
+#define SK_DBGMOD_CSUM 0x00000040L /* CSUM module */
+#define SK_DBGMOD_ADDR 0x00000080L /* ADDR module */
+#define SK_DBGMOD_PECP 0x00000100L /* PECP module */
+#define SK_DBGMOD_POWM 0x00000200L /* Power Management module */
+
+/* Debug events */
+
+#define SK_DBGCAT_INIT 0x00000001L /* module/driver initialization */
+#define SK_DBGCAT_CTRL 0x00000002L /* controlling devices */
+#define SK_DBGCAT_ERR 0x00000004L /* error handling paths */
+#define SK_DBGCAT_TX 0x00000008L /* transmit path */
+#define SK_DBGCAT_RX 0x00000010L /* receive path */
+#define SK_DBGCAT_IRQ 0x00000020L /* general IRQ handling */
+#define SK_DBGCAT_QUEUE 0x00000040L /* any queue management */
+#define SK_DBGCAT_DUMP 0x00000080L /* large data output e.g. hex dump */
+#define SK_DBGCAT_FATAL 0x00000100L /* fatal error */
+
+#endif /* __INC_SKDEBUG_H */
diff --git a/drivers/net/sk98lin/h/skdrv1st.h b/drivers/net/sk98lin/h/skdrv1st.h
new file mode 100644
index 000000000000..308440bd0e12
--- /dev/null
+++ b/drivers/net/sk98lin/h/skdrv1st.h
@@ -0,0 +1,191 @@
+/******************************************************************************
+ *
+ * Name: skdrv1st.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.4 $
+ * Date: $Date: 2003/11/12 14:28:14 $
+ * Purpose: First header file for driver and all other modules
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This is the first include file of the driver, which includes all
+ * neccessary system header files and some of the GEnesis header files.
+ * It also defines some basic items.
+ *
+ * Include File Hierarchy:
+ *
+ * see skge.c
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKDRV1ST_H
+#define __INC_SKDRV1ST_H
+
+/* Check kernel version */
+#include <linux/version.h>
+
+typedef struct s_AC SK_AC;
+
+/* Set card versions */
+#define SK_FAR
+
+/* override some default functions with optimized linux functions */
+
+#define SK_PNMI_STORE_U16(p,v) memcpy((char*)(p),(char*)&(v),2)
+#define SK_PNMI_STORE_U32(p,v) memcpy((char*)(p),(char*)&(v),4)
+#define SK_PNMI_STORE_U64(p,v) memcpy((char*)(p),(char*)&(v),8)
+#define SK_PNMI_READ_U16(p,v) memcpy((char*)&(v),(char*)(p),2)
+#define SK_PNMI_READ_U32(p,v) memcpy((char*)&(v),(char*)(p),4)
+#define SK_PNMI_READ_U64(p,v) memcpy((char*)&(v),(char*)(p),8)
+
+#define SK_ADDR_EQUAL(a1,a2) (!memcmp(a1,a2,6))
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <net/checksum.h>
+
+#define SK_CS_CALCULATE_CHECKSUM
+#ifndef CONFIG_X86_64
+#define SkCsCalculateChecksum(p,l) ((~ip_compute_csum(p, l)) & 0xffff)
+#else
+#define SkCsCalculateChecksum(p,l) ((~ip_fast_csum(p, l)) & 0xffff)
+#endif
+
+#include "h/sktypes.h"
+#include "h/skerror.h"
+#include "h/skdebug.h"
+#include "h/lm80.h"
+#include "h/xmac_ii.h"
+
+#ifdef __LITTLE_ENDIAN
+#define SK_LITTLE_ENDIAN
+#else
+#define SK_BIG_ENDIAN
+#endif
+
+#define SK_NET_DEVICE net_device
+
+
+/* we use gethrtime(), return unit: nanoseconds */
+#define SK_TICKS_PER_SEC 100
+
+#define SK_MEM_MAPPED_IO
+
+// #define SK_RLMT_SLOW_LOOKAHEAD
+
+#define SK_MAX_MACS 2
+#define SK_MAX_NETS 2
+
+#define SK_IOC char __iomem *
+
+typedef struct s_DrvRlmtMbuf SK_MBUF;
+
+#define SK_CONST64 INT64_C
+#define SK_CONSTU64 UINT64_C
+
+#define SK_MEMCPY(dest,src,size) memcpy(dest,src,size)
+#define SK_MEMCMP(s1,s2,size) memcmp(s1,s2,size)
+#define SK_MEMSET(dest,val,size) memset(dest,val,size)
+#define SK_STRLEN(pStr) strlen((char*)(pStr))
+#define SK_STRNCPY(pDest,pSrc,size) strncpy((char*)(pDest),(char*)(pSrc),size)
+#define SK_STRCMP(pStr1,pStr2) strcmp((char*)(pStr1),(char*)(pStr2))
+
+/* macros to access the adapter */
+#define SK_OUT8(b,a,v) writeb((v), ((b)+(a)))
+#define SK_OUT16(b,a,v) writew((v), ((b)+(a)))
+#define SK_OUT32(b,a,v) writel((v), ((b)+(a)))
+#define SK_IN8(b,a,pv) (*(pv) = readb((b)+(a)))
+#define SK_IN16(b,a,pv) (*(pv) = readw((b)+(a)))
+#define SK_IN32(b,a,pv) (*(pv) = readl((b)+(a)))
+
+#define int8_t char
+#define int16_t short
+#define int32_t long
+#define int64_t long long
+#define uint8_t u_char
+#define uint16_t u_short
+#define uint32_t u_long
+#define uint64_t unsigned long long
+#define t_scalar_t int
+#define t_uscalar_t unsigned int
+#define uintptr_t unsigned long
+
+#define __CONCAT__(A,B) A##B
+
+#define INT32_C(a) __CONCAT__(a,L)
+#define INT64_C(a) __CONCAT__(a,LL)
+#define UINT32_C(a) __CONCAT__(a,UL)
+#define UINT64_C(a) __CONCAT__(a,ULL)
+
+#ifdef DEBUG
+#define SK_DBG_PRINTF printk
+#ifndef SK_DEBUG_CHKMOD
+#define SK_DEBUG_CHKMOD 0
+#endif
+#ifndef SK_DEBUG_CHKCAT
+#define SK_DEBUG_CHKCAT 0
+#endif
+/* those come from the makefile */
+#define SK_DBG_CHKMOD(pAC) (SK_DEBUG_CHKMOD)
+#define SK_DBG_CHKCAT(pAC) (SK_DEBUG_CHKCAT)
+
+extern void SkDbgPrintf(const char *format,...);
+
+#define SK_DBGMOD_DRV 0x00010000
+
+/**** possible driver debug categories ********************************/
+#define SK_DBGCAT_DRV_ENTRY 0x00010000
+#define SK_DBGCAT_DRV_SAP 0x00020000
+#define SK_DBGCAT_DRV_MCA 0x00040000
+#define SK_DBGCAT_DRV_TX_PROGRESS 0x00080000
+#define SK_DBGCAT_DRV_RX_PROGRESS 0x00100000
+#define SK_DBGCAT_DRV_PROGRESS 0x00200000
+#define SK_DBGCAT_DRV_MSG 0x00400000
+#define SK_DBGCAT_DRV_PROM 0x00800000
+#define SK_DBGCAT_DRV_TX_FRAME 0x01000000
+#define SK_DBGCAT_DRV_ERROR 0x02000000
+#define SK_DBGCAT_DRV_INT_SRC 0x04000000
+#define SK_DBGCAT_DRV_EVENT 0x08000000
+
+#endif
+
+#define SK_ERR_LOG SkErrorLog
+
+extern void SkErrorLog(SK_AC*, int, int, char*);
+
+#endif
+
diff --git a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h
new file mode 100644
index 000000000000..542cec57f86a
--- /dev/null
+++ b/drivers/net/sk98lin/h/skdrv2nd.h
@@ -0,0 +1,456 @@
+/******************************************************************************
+ *
+ * Name: skdrv2nd.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.10 $
+ * Date: $Date: 2003/12/11 16:04:45 $
+ * Purpose: Second header file for driver and all other modules
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This is the second include file of the driver, which includes all other
+ * neccessary files and defines all structures and constants used by the
+ * driver and the common modules.
+ *
+ * Include File Hierarchy:
+ *
+ * see skge.c
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKDRV2ND_H
+#define __INC_SKDRV2ND_H
+
+#include "h/skqueue.h"
+#include "h/skgehwt.h"
+#include "h/sktimer.h"
+#include "h/ski2c.h"
+#include "h/skgepnmi.h"
+#include "h/skvpd.h"
+#include "h/skgehw.h"
+#include "h/skgeinit.h"
+#include "h/skaddr.h"
+#include "h/skgesirq.h"
+#include "h/skcsum.h"
+#include "h/skrlmt.h"
+#include "h/skgedrv.h"
+
+
+extern SK_MBUF *SkDrvAllocRlmtMbuf(SK_AC*, SK_IOC, unsigned);
+extern void SkDrvFreeRlmtMbuf(SK_AC*, SK_IOC, SK_MBUF*);
+extern SK_U64 SkOsGetTime(SK_AC*);
+extern int SkPciReadCfgDWord(SK_AC*, int, SK_U32*);
+extern int SkPciReadCfgWord(SK_AC*, int, SK_U16*);
+extern int SkPciReadCfgByte(SK_AC*, int, SK_U8*);
+extern int SkPciWriteCfgDWord(SK_AC*, int, SK_U32);
+extern int SkPciWriteCfgWord(SK_AC*, int, SK_U16);
+extern int SkPciWriteCfgByte(SK_AC*, int, SK_U8);
+extern int SkDrvEvent(SK_AC*, SK_IOC IoC, SK_U32, SK_EVPARA);
+
+#ifdef SK_DIAG_SUPPORT
+extern int SkDrvEnterDiagMode(SK_AC *pAc);
+extern int SkDrvLeaveDiagMode(SK_AC *pAc);
+#endif
+
+struct s_DrvRlmtMbuf {
+ SK_MBUF *pNext; /* Pointer to next RLMT Mbuf. */
+ SK_U8 *pData; /* Data buffer (virtually contig.). */
+ unsigned Size; /* Data buffer size. */
+ unsigned Length; /* Length of packet (<= Size). */
+ SK_U32 PortIdx; /* Receiving/transmitting port. */
+#ifdef SK_RLMT_MBUF_PRIVATE
+ SK_RLMT_MBUF Rlmt; /* Private part for RLMT. */
+#endif /* SK_RLMT_MBUF_PRIVATE */
+ struct sk_buff *pOs; /* Pointer to message block */
+};
+
+
+/*
+ * Time macros
+ */
+#if SK_TICKS_PER_SEC == 100
+#define SK_PNMI_HUNDREDS_SEC(t) (t)
+#else
+#define SK_PNMI_HUNDREDS_SEC(t) ((((unsigned long)t) * 100) / \
+ (SK_TICKS_PER_SEC))
+#endif
+
+/*
+ * New SkOsGetTime
+ */
+#define SkOsGetTimeCurrent(pAC, pUsec) {\
+ struct timeval t;\
+ do_gettimeofday(&t);\
+ *pUsec = ((((t.tv_sec) * 1000000L)+t.tv_usec)/10000);\
+}
+
+
+/*
+ * ioctl definitions
+ */
+#define SK_IOCTL_BASE (SIOCDEVPRIVATE)
+#define SK_IOCTL_GETMIB (SK_IOCTL_BASE + 0)
+#define SK_IOCTL_SETMIB (SK_IOCTL_BASE + 1)
+#define SK_IOCTL_PRESETMIB (SK_IOCTL_BASE + 2)
+#define SK_IOCTL_GEN (SK_IOCTL_BASE + 3)
+#define SK_IOCTL_DIAG (SK_IOCTL_BASE + 4)
+
+typedef struct s_IOCTL SK_GE_IOCTL;
+
+struct s_IOCTL {
+ char __user * pData;
+ unsigned int Len;
+};
+
+
+/*
+ * define sizes of descriptor rings in bytes
+ */
+
+#define TX_RING_SIZE (8*1024)
+#define RX_RING_SIZE (24*1024)
+
+/*
+ * Buffer size for ethernet packets
+ */
+#define ETH_BUF_SIZE 1540
+#define ETH_MAX_MTU 1514
+#define ETH_MIN_MTU 60
+#define ETH_MULTICAST_BIT 0x01
+#define SK_JUMBO_MTU 9000
+
+/*
+ * transmit priority selects the queue: LOW=asynchron, HIGH=synchron
+ */
+#define TX_PRIO_LOW 0
+#define TX_PRIO_HIGH 1
+
+/*
+ * alignment of rx/tx descriptors
+ */
+#define DESCR_ALIGN 64
+
+/*
+ * definitions for pnmi. TODO
+ */
+#define SK_DRIVER_RESET(pAC, IoC) 0
+#define SK_DRIVER_SENDEVENT(pAC, IoC) 0
+#define SK_DRIVER_SELFTEST(pAC, IoC) 0
+/* For get mtu you must add an own function */
+#define SK_DRIVER_GET_MTU(pAc,IoC,i) 0
+#define SK_DRIVER_SET_MTU(pAc,IoC,i,v) 0
+#define SK_DRIVER_PRESET_MTU(pAc,IoC,i,v) 0
+
+/*
+** Interim definition of SK_DRV_TIMER placed in this file until
+** common modules have boon finallized
+*/
+#define SK_DRV_TIMER 11
+#define SK_DRV_MODERATION_TIMER 1
+#define SK_DRV_MODERATION_TIMER_LENGTH 1000000 /* 1 second */
+#define SK_DRV_RX_CLEANUP_TIMER 2
+#define SK_DRV_RX_CLEANUP_TIMER_LENGTH 1000000 /* 100 millisecs */
+
+/*
+** Definitions regarding transmitting frames
+** any calculating any checksum.
+*/
+#define C_LEN_ETHERMAC_HEADER_DEST_ADDR 6
+#define C_LEN_ETHERMAC_HEADER_SRC_ADDR 6
+#define C_LEN_ETHERMAC_HEADER_LENTYPE 2
+#define C_LEN_ETHERMAC_HEADER ( (C_LEN_ETHERMAC_HEADER_DEST_ADDR) + \
+ (C_LEN_ETHERMAC_HEADER_SRC_ADDR) + \
+ (C_LEN_ETHERMAC_HEADER_LENTYPE) )
+
+#define C_LEN_ETHERMTU_MINSIZE 46
+#define C_LEN_ETHERMTU_MAXSIZE_STD 1500
+#define C_LEN_ETHERMTU_MAXSIZE_JUMBO 9000
+
+#define C_LEN_ETHERNET_MINSIZE ( (C_LEN_ETHERMAC_HEADER) + \
+ (C_LEN_ETHERMTU_MINSIZE) )
+
+#define C_OFFSET_IPHEADER C_LEN_ETHERMAC_HEADER
+#define C_OFFSET_IPHEADER_IPPROTO 9
+#define C_OFFSET_TCPHEADER_TCPCS 16
+#define C_OFFSET_UDPHEADER_UDPCS 6
+
+#define C_OFFSET_IPPROTO ( (C_LEN_ETHERMAC_HEADER) + \
+ (C_OFFSET_IPHEADER_IPPROTO) )
+
+#define C_PROTO_ID_UDP 17 /* refer to RFC 790 or Stevens' */
+#define C_PROTO_ID_TCP 6 /* TCP/IP illustrated for details */
+
+/* TX and RX descriptors *****************************************************/
+
+typedef struct s_RxD RXD; /* the receive descriptor */
+
+struct s_RxD {
+ volatile SK_U32 RBControl; /* Receive Buffer Control */
+ SK_U32 VNextRxd; /* Next receive descriptor,low dword */
+ SK_U32 VDataLow; /* Receive buffer Addr, low dword */
+ SK_U32 VDataHigh; /* Receive buffer Addr, high dword */
+ SK_U32 FrameStat; /* Receive Frame Status word */
+ SK_U32 TimeStamp; /* Time stamp from XMAC */
+ SK_U32 TcpSums; /* TCP Sum 2 / TCP Sum 1 */
+ SK_U32 TcpSumStarts; /* TCP Sum Start 2 / TCP Sum Start 1 */
+ RXD *pNextRxd; /* Pointer to next Rxd */
+ struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */
+};
+
+typedef struct s_TxD TXD; /* the transmit descriptor */
+
+struct s_TxD {
+ volatile SK_U32 TBControl; /* Transmit Buffer Control */
+ SK_U32 VNextTxd; /* Next transmit descriptor,low dword */
+ SK_U32 VDataLow; /* Transmit Buffer Addr, low dword */
+ SK_U32 VDataHigh; /* Transmit Buffer Addr, high dword */
+ SK_U32 FrameStat; /* Transmit Frame Status Word */
+ SK_U32 TcpSumOfs; /* Reserved / TCP Sum Offset */
+ SK_U16 TcpSumSt; /* TCP Sum Start */
+ SK_U16 TcpSumWr; /* TCP Sum Write */
+ SK_U32 TcpReserved; /* not used */
+ TXD *pNextTxd; /* Pointer to next Txd */
+ struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */
+};
+
+/* Used interrupt bits in the interrupts source register *********************/
+
+#define DRIVER_IRQS ((IS_IRQ_SW) | \
+ (IS_R1_F) |(IS_R2_F) | \
+ (IS_XS1_F) |(IS_XA1_F) | \
+ (IS_XS2_F) |(IS_XA2_F))
+
+#define SPECIAL_IRQS ((IS_HW_ERR) |(IS_I2C_READY) | \
+ (IS_EXT_REG) |(IS_TIMINT) | \
+ (IS_PA_TO_RX1) |(IS_PA_TO_RX2) | \
+ (IS_PA_TO_TX1) |(IS_PA_TO_TX2) | \
+ (IS_MAC1) |(IS_LNK_SYNC_M1)| \
+ (IS_MAC2) |(IS_LNK_SYNC_M2)| \
+ (IS_R1_C) |(IS_R2_C) | \
+ (IS_XS1_C) |(IS_XA1_C) | \
+ (IS_XS2_C) |(IS_XA2_C))
+
+#define IRQ_MASK ((IS_IRQ_SW) | \
+ (IS_R1_B) |(IS_R1_F) |(IS_R2_B) |(IS_R2_F) | \
+ (IS_XS1_B) |(IS_XS1_F) |(IS_XA1_B)|(IS_XA1_F)| \
+ (IS_XS2_B) |(IS_XS2_F) |(IS_XA2_B)|(IS_XA2_F)| \
+ (IS_HW_ERR) |(IS_I2C_READY)| \
+ (IS_EXT_REG) |(IS_TIMINT) | \
+ (IS_PA_TO_RX1) |(IS_PA_TO_RX2)| \
+ (IS_PA_TO_TX1) |(IS_PA_TO_TX2)| \
+ (IS_MAC1) |(IS_MAC2) | \
+ (IS_R1_C) |(IS_R2_C) | \
+ (IS_XS1_C) |(IS_XA1_C) | \
+ (IS_XS2_C) |(IS_XA2_C))
+
+#define IRQ_HWE_MASK (IS_ERR_MSK) /* enable all HW irqs */
+
+typedef struct s_DevNet DEV_NET;
+
+struct s_DevNet {
+ int PortNr;
+ int NetNr;
+ int Mtu;
+ int Up;
+ SK_AC *pAC;
+};
+
+typedef struct s_TxPort TX_PORT;
+
+struct s_TxPort {
+ /* the transmit descriptor rings */
+ caddr_t pTxDescrRing; /* descriptor area memory */
+ SK_U64 VTxDescrRing; /* descr. area bus virt. addr. */
+ TXD *pTxdRingHead; /* Head of Tx rings */
+ TXD *pTxdRingTail; /* Tail of Tx rings */
+ TXD *pTxdRingPrev; /* descriptor sent previously */
+ int TxdRingFree; /* # of free entrys */
+ spinlock_t TxDesRingLock; /* serialize descriptor accesses */
+ SK_IOC HwAddr; /* bmu registers address */
+ int PortIndex; /* index number of port (0 or 1) */
+};
+
+typedef struct s_RxPort RX_PORT;
+
+struct s_RxPort {
+ /* the receive descriptor rings */
+ caddr_t pRxDescrRing; /* descriptor area memory */
+ SK_U64 VRxDescrRing; /* descr. area bus virt. addr. */
+ RXD *pRxdRingHead; /* Head of Rx rings */
+ RXD *pRxdRingTail; /* Tail of Rx rings */
+ RXD *pRxdRingPrev; /* descriptor given to BMU previously */
+ int RxdRingFree; /* # of free entrys */
+ spinlock_t RxDesRingLock; /* serialize descriptor accesses */
+ int RxFillLimit; /* limit for buffers in ring */
+ SK_IOC HwAddr; /* bmu registers address */
+ int PortIndex; /* index number of port (0 or 1) */
+};
+
+/* Definitions needed for interrupt moderation *******************************/
+
+#define IRQ_EOF_AS_TX ((IS_XA1_F) | (IS_XA2_F))
+#define IRQ_EOF_SY_TX ((IS_XS1_F) | (IS_XS2_F))
+#define IRQ_MASK_TX_ONLY ((IRQ_EOF_AS_TX)| (IRQ_EOF_SY_TX))
+#define IRQ_MASK_RX_ONLY ((IS_R1_F) | (IS_R2_F))
+#define IRQ_MASK_SP_ONLY (SPECIAL_IRQS)
+#define IRQ_MASK_TX_RX ((IRQ_MASK_TX_ONLY)| (IRQ_MASK_RX_ONLY))
+#define IRQ_MASK_SP_RX ((SPECIAL_IRQS) | (IRQ_MASK_RX_ONLY))
+#define IRQ_MASK_SP_TX ((SPECIAL_IRQS) | (IRQ_MASK_TX_ONLY))
+#define IRQ_MASK_RX_TX_SP ((SPECIAL_IRQS) | (IRQ_MASK_TX_RX))
+
+#define C_INT_MOD_NONE 1
+#define C_INT_MOD_STATIC 2
+#define C_INT_MOD_DYNAMIC 4
+
+#define C_CLK_FREQ_GENESIS 53215000 /* shorter: 53.125 MHz */
+#define C_CLK_FREQ_YUKON 78215000 /* shorter: 78.125 MHz */
+
+#define C_INTS_PER_SEC_DEFAULT 2000
+#define C_INT_MOD_ENABLE_PERCENTAGE 50 /* if higher 50% enable */
+#define C_INT_MOD_DISABLE_PERCENTAGE 50 /* if lower 50% disable */
+#define C_INT_MOD_IPS_LOWER_RANGE 30
+#define C_INT_MOD_IPS_UPPER_RANGE 40000
+
+
+typedef struct s_DynIrqModInfo DIM_INFO;
+struct s_DynIrqModInfo {
+ unsigned long PrevTimeVal;
+ unsigned int PrevSysLoad;
+ unsigned int PrevUsedTime;
+ unsigned int PrevTotalTime;
+ int PrevUsedDescrRatio;
+ int NbrProcessedDescr;
+ SK_U64 PrevPort0RxIntrCts;
+ SK_U64 PrevPort1RxIntrCts;
+ SK_U64 PrevPort0TxIntrCts;
+ SK_U64 PrevPort1TxIntrCts;
+ SK_BOOL ModJustEnabled; /* Moderation just enabled yes/no */
+
+ int MaxModIntsPerSec; /* Moderation Threshold */
+ int MaxModIntsPerSecUpperLimit; /* Upper limit for DIM */
+ int MaxModIntsPerSecLowerLimit; /* Lower limit for DIM */
+
+ long MaskIrqModeration; /* ModIrqType (eg. 'TxRx') */
+ SK_BOOL DisplayStats; /* Stats yes/no */
+ SK_BOOL AutoSizing; /* Resize DIM-timer on/off */
+ int IntModTypeSelect; /* EnableIntMod (eg. 'dynamic') */
+
+ SK_TIMER ModTimer; /* just some timer */
+};
+
+typedef struct s_PerStrm PER_STRM;
+
+#define SK_ALLOC_IRQ 0x00000001
+
+#ifdef SK_DIAG_SUPPORT
+#define DIAG_ACTIVE 1
+#define DIAG_NOTACTIVE 0
+#endif
+
+/****************************************************************************
+ * Per board structure / Adapter Context structure:
+ * Allocated within attach(9e) and freed within detach(9e).
+ * Contains all 'per device' necessary handles, flags, locks etc.:
+ */
+struct s_AC {
+ SK_GEINIT GIni; /* GE init struct */
+ SK_PNMI Pnmi; /* PNMI data struct */
+ SK_VPD vpd; /* vpd data struct */
+ SK_QUEUE Event; /* Event queue */
+ SK_HWT Hwt; /* Hardware Timer control struct */
+ SK_TIMCTRL Tim; /* Software Timer control struct */
+ SK_I2C I2c; /* I2C relevant data structure */
+ SK_ADDR Addr; /* for Address module */
+ SK_CSUM Csum; /* for checksum module */
+ SK_RLMT Rlmt; /* for rlmt module */
+ spinlock_t SlowPathLock; /* Normal IRQ lock */
+ struct timer_list BlinkTimer; /* for LED blinking */
+ int LedsOn;
+ SK_PNMI_STRUCT_DATA PnmiStruct; /* structure to get all Pnmi-Data */
+ int RlmtMode; /* link check mode to set */
+ int RlmtNets; /* Number of nets */
+
+ SK_IOC IoBase; /* register set of adapter */
+ int BoardLevel; /* level of active hw init (0-2) */
+ char DeviceStr[80]; /* adapter string from vpd */
+ SK_U32 AllocFlag; /* flag allocation of resources */
+ struct pci_dev *PciDev; /* for access to pci config space */
+ SK_U32 PciDevId; /* pci device id */
+ struct SK_NET_DEVICE *dev[2]; /* pointer to device struct */
+ char Name[30]; /* driver name */
+
+ int RxBufSize; /* length of receive buffers */
+ struct net_device_stats stats; /* linux 'netstat -i' statistics */
+ int Index; /* internal board index number */
+
+ /* adapter RAM sizes for queues of active port */
+ int RxQueueSize; /* memory used for receive queue */
+ int TxSQueueSize; /* memory used for sync. tx queue */
+ int TxAQueueSize; /* memory used for async. tx queue */
+
+ int PromiscCount; /* promiscuous mode counter */
+ int AllMultiCount; /* allmulticast mode counter */
+ int MulticCount; /* number of different MC */
+ /* addresses for this board */
+ /* (may be more than HW can)*/
+
+ int HWRevision; /* Hardware revision */
+ int ActivePort; /* the active XMAC port */
+ int MaxPorts; /* number of activated ports */
+ int TxDescrPerRing; /* # of descriptors per tx ring */
+ int RxDescrPerRing; /* # of descriptors per rx ring */
+
+ caddr_t pDescrMem; /* Pointer to the descriptor area */
+ dma_addr_t pDescrMemDMA; /* PCI DMA address of area */
+
+ /* the port structures with descriptor rings */
+ TX_PORT TxPort[SK_MAX_MACS][2];
+ RX_PORT RxPort[SK_MAX_MACS];
+
+ unsigned int CsOfs1; /* for checksum calculation */
+ unsigned int CsOfs2; /* for checksum calculation */
+ SK_U32 CsOfs; /* for checksum calculation */
+
+ SK_BOOL CheckQueue; /* check event queue soon */
+ SK_TIMER DrvCleanupTimer;/* to check for pending descriptors */
+ DIM_INFO DynIrqModInfo; /* all data related to DIM */
+
+ /* Only for tests */
+ int PortUp;
+ int PortDown;
+ int ChipsetType; /* Chipset family type
+ * 0 == Genesis family support
+ * 1 == Yukon family support
+ */
+#ifdef SK_DIAG_SUPPORT
+ SK_U32 DiagModeActive; /* is diag active? */
+ SK_BOOL DiagFlowCtrl; /* for control purposes */
+ SK_PNMI_STRUCT_DATA PnmiBackup; /* backup structure for all Pnmi-Data */
+ SK_BOOL WasIfUp[SK_MAX_MACS]; /* for OpenClose while
+ * DIAG is busy with NIC
+ */
+#endif
+
+};
+
+
+#endif /* __INC_SKDRV2ND_H */
+
diff --git a/drivers/net/sk98lin/h/skerror.h b/drivers/net/sk98lin/h/skerror.h
new file mode 100644
index 000000000000..da062f766238
--- /dev/null
+++ b/drivers/net/sk98lin/h/skerror.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Name: skerror.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.7 $
+ * Date: $Date: 2003/05/13 17:25:13 $
+ * Purpose: SK specific Error log support
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _INC_SKERROR_H_
+#define _INC_SKERROR_H_
+
+/*
+ * Define Error Classes
+ */
+#define SK_ERRCL_OTHER (0) /* Other error */
+#define SK_ERRCL_CONFIG (1L<<0) /* Configuration error */
+#define SK_ERRCL_INIT (1L<<1) /* Initialization error */
+#define SK_ERRCL_NORES (1L<<2) /* Out of Resources error */
+#define SK_ERRCL_SW (1L<<3) /* Internal Software error */
+#define SK_ERRCL_HW (1L<<4) /* Hardware Failure */
+#define SK_ERRCL_COMM (1L<<5) /* Communication error */
+
+
+/*
+ * Define Error Code Bases
+ */
+#define SK_ERRBASE_RLMT 100 /* Base Error number for RLMT */
+#define SK_ERRBASE_HWINIT 200 /* Base Error number for HWInit */
+#define SK_ERRBASE_VPD 300 /* Base Error number for VPD */
+#define SK_ERRBASE_PNMI 400 /* Base Error number for PNMI */
+#define SK_ERRBASE_CSUM 500 /* Base Error number for Checksum */
+#define SK_ERRBASE_SIRQ 600 /* Base Error number for Special IRQ */
+#define SK_ERRBASE_I2C 700 /* Base Error number for I2C module */
+#define SK_ERRBASE_QUEUE 800 /* Base Error number for Scheduler */
+#define SK_ERRBASE_ADDR 900 /* Base Error number for Address module */
+#define SK_ERRBASE_PECP 1000 /* Base Error number for PECP */
+#define SK_ERRBASE_DRV 1100 /* Base Error number for Driver */
+
+#endif /* _INC_SKERROR_H_ */
diff --git a/drivers/net/sk98lin/h/skgedrv.h b/drivers/net/sk98lin/h/skgedrv.h
new file mode 100644
index 000000000000..44fd4c3de818
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgedrv.h
@@ -0,0 +1,51 @@
+/******************************************************************************
+ *
+ * Name: skgedrv.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.10 $
+ * Date: $Date: 2003/07/04 12:25:01 $
+ * Purpose: Interface with the driver
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKGEDRV_H_
+#define __INC_SKGEDRV_H_
+
+/* defines ********************************************************************/
+
+/*
+ * Define the driver events.
+ * Usually the events are defined by the destination module.
+ * In case of the driver we put the definition of the events here.
+ */
+#define SK_DRV_PORT_RESET 1 /* The port needs to be reset */
+#define SK_DRV_NET_UP 2 /* The net is operational */
+#define SK_DRV_NET_DOWN 3 /* The net is down */
+#define SK_DRV_SWITCH_SOFT 4 /* Ports switch with both links connected */
+#define SK_DRV_SWITCH_HARD 5 /* Port switch due to link failure */
+#define SK_DRV_RLMT_SEND 6 /* Send a RLMT packet */
+#define SK_DRV_ADAP_FAIL 7 /* The whole adapter fails */
+#define SK_DRV_PORT_FAIL 8 /* One port fails */
+#define SK_DRV_SWITCH_INTERN 9 /* Port switch by the driver itself */
+#define SK_DRV_POWER_DOWN 10 /* Power down mode */
+#define SK_DRV_TIMER 11 /* Timer for free use */
+#ifdef SK_NO_RLMT
+#define SK_DRV_LINK_UP 12 /* Link Up event for driver */
+#define SK_DRV_LINK_DOWN 13 /* Link Down event for driver */
+#endif
+#define SK_DRV_DOWNSHIFT_DET 14 /* Downshift 4-Pair / 2-Pair (YUKON only) */
+#endif /* __INC_SKGEDRV_H_ */
diff --git a/drivers/net/sk98lin/h/skgehw.h b/drivers/net/sk98lin/h/skgehw.h
new file mode 100644
index 000000000000..f6282b7956db
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgehw.h
@@ -0,0 +1,2126 @@
+/******************************************************************************
+ *
+ * Name: skgehw.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.56 $
+ * Date: $Date: 2003/09/23 09:01:00 $
+ * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKGEHW_H
+#define __INC_SKGEHW_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines ********************************************************************/
+
+#define BIT_31 (1UL << 31)
+#define BIT_30 (1L << 30)
+#define BIT_29 (1L << 29)
+#define BIT_28 (1L << 28)
+#define BIT_27 (1L << 27)
+#define BIT_26 (1L << 26)
+#define BIT_25 (1L << 25)
+#define BIT_24 (1L << 24)
+#define BIT_23 (1L << 23)
+#define BIT_22 (1L << 22)
+#define BIT_21 (1L << 21)
+#define BIT_20 (1L << 20)
+#define BIT_19 (1L << 19)
+#define BIT_18 (1L << 18)
+#define BIT_17 (1L << 17)
+#define BIT_16 (1L << 16)
+#define BIT_15 (1L << 15)
+#define BIT_14 (1L << 14)
+#define BIT_13 (1L << 13)
+#define BIT_12 (1L << 12)
+#define BIT_11 (1L << 11)
+#define BIT_10 (1L << 10)
+#define BIT_9 (1L << 9)
+#define BIT_8 (1L << 8)
+#define BIT_7 (1L << 7)
+#define BIT_6 (1L << 6)
+#define BIT_5 (1L << 5)
+#define BIT_4 (1L << 4)
+#define BIT_3 (1L << 3)
+#define BIT_2 (1L << 2)
+#define BIT_1 (1L << 1)
+#define BIT_0 1L
+
+#define BIT_15S (1U << 15)
+#define BIT_14S (1 << 14)
+#define BIT_13S (1 << 13)
+#define BIT_12S (1 << 12)
+#define BIT_11S (1 << 11)
+#define BIT_10S (1 << 10)
+#define BIT_9S (1 << 9)
+#define BIT_8S (1 << 8)
+#define BIT_7S (1 << 7)
+#define BIT_6S (1 << 6)
+#define BIT_5S (1 << 5)
+#define BIT_4S (1 << 4)
+#define BIT_3S (1 << 3)
+#define BIT_2S (1 << 2)
+#define BIT_1S (1 << 1)
+#define BIT_0S 1
+
+#define SHIFT31(x) ((x) << 31)
+#define SHIFT30(x) ((x) << 30)
+#define SHIFT29(x) ((x) << 29)
+#define SHIFT28(x) ((x) << 28)
+#define SHIFT27(x) ((x) << 27)
+#define SHIFT26(x) ((x) << 26)
+#define SHIFT25(x) ((x) << 25)
+#define SHIFT24(x) ((x) << 24)
+#define SHIFT23(x) ((x) << 23)
+#define SHIFT22(x) ((x) << 22)
+#define SHIFT21(x) ((x) << 21)
+#define SHIFT20(x) ((x) << 20)
+#define SHIFT19(x) ((x) << 19)
+#define SHIFT18(x) ((x) << 18)
+#define SHIFT17(x) ((x) << 17)
+#define SHIFT16(x) ((x) << 16)
+#define SHIFT15(x) ((x) << 15)
+#define SHIFT14(x) ((x) << 14)
+#define SHIFT13(x) ((x) << 13)
+#define SHIFT12(x) ((x) << 12)
+#define SHIFT11(x) ((x) << 11)
+#define SHIFT10(x) ((x) << 10)
+#define SHIFT9(x) ((x) << 9)
+#define SHIFT8(x) ((x) << 8)
+#define SHIFT7(x) ((x) << 7)
+#define SHIFT6(x) ((x) << 6)
+#define SHIFT5(x) ((x) << 5)
+#define SHIFT4(x) ((x) << 4)
+#define SHIFT3(x) ((x) << 3)
+#define SHIFT2(x) ((x) << 2)
+#define SHIFT1(x) ((x) << 1)
+#define SHIFT0(x) ((x) << 0)
+
+/*
+ * Configuration Space header
+ * Since this module is used for different OS', those may be
+ * duplicate on some of them (e.g. Linux). But to keep the
+ * common source, we have to live with this...
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */
+#define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */
+#define PCI_COMMAND 0x04 /* 16 bit Command */
+#define PCI_STATUS 0x06 /* 16 bit Status */
+#define PCI_REV_ID 0x08 /* 8 bit Revision ID */
+#define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */
+#define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */
+#define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */
+#define PCI_HEADER_T 0x0e /* 8 bit Header Type */
+#define PCI_BIST 0x0f /* 8 bit Built-in selftest */
+#define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */
+#define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */
+ /* Byte 0x18..0x2b: reserved */
+#define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */
+#define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */
+#define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */
+#define PCI_CAP_PTR 0x34 /* 8 bit Capabilities Ptr */
+ /* Byte 0x35..0x3b: reserved */
+#define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */
+#define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */
+#define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */
+#define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */
+ /* Device Dependent Region */
+#define PCI_OUR_REG_1 0x40 /* 32 bit Our Register 1 */
+#define PCI_OUR_REG_2 0x44 /* 32 bit Our Register 2 */
+ /* Power Management Region */
+#define PCI_PM_CAP_ID 0x48 /* 8 bit Power Management Cap. ID */
+#define PCI_PM_NITEM 0x49 /* 8 bit Next Item Ptr */
+#define PCI_PM_CAP_REG 0x4a /* 16 bit Power Management Capabilities */
+#define PCI_PM_CTL_STS 0x4c /* 16 bit Power Manag. Control/Status */
+ /* Byte 0x4e: reserved */
+#define PCI_PM_DAT_REG 0x4f /* 8 bit Power Manag. Data Register */
+ /* VPD Region */
+#define PCI_VPD_CAP_ID 0x50 /* 8 bit VPD Cap. ID */
+#define PCI_VPD_NITEM 0x51 /* 8 bit Next Item Ptr */
+#define PCI_VPD_ADR_REG 0x52 /* 16 bit VPD Address Register */
+#define PCI_VPD_DAT_REG 0x54 /* 32 bit VPD Data Register */
+ /* Byte 0x58..0x59: reserved */
+#define PCI_SER_LD_CTRL 0x5a /* 16 bit SEEPROM Loader Ctrl (YUKON only) */
+ /* Byte 0x5c..0xff: reserved */
+
+/*
+ * I2C Address (PCI Config)
+ *
+ * Note: The temperature and voltage sensors are relocated on a different
+ * I2C bus.
+ */
+#define I2C_ADDR_VPD 0xa0 /* I2C address for the VPD EEPROM */
+
+/*
+ * Define Bits and Values of the registers
+ */
+/* PCI_COMMAND 16 bit Command */
+ /* Bit 15..11: reserved */
+#define PCI_INT_DIS BIT_10S /* Interrupt INTx# disable (PCI 2.3) */
+#define PCI_FBTEN BIT_9S /* Fast Back-To-Back enable */
+#define PCI_SERREN BIT_8S /* SERR enable */
+#define PCI_ADSTEP BIT_7S /* Address Stepping */
+#define PCI_PERREN BIT_6S /* Parity Report Response enable */
+#define PCI_VGA_SNOOP BIT_5S /* VGA palette snoop */
+#define PCI_MWIEN BIT_4S /* Memory write an inv cycl ena */
+#define PCI_SCYCEN BIT_3S /* Special Cycle enable */
+#define PCI_BMEN BIT_2S /* Bus Master enable */
+#define PCI_MEMEN BIT_1S /* Memory Space Access enable */
+#define PCI_IOEN BIT_0S /* I/O Space Access enable */
+
+#define PCI_COMMAND_VAL (PCI_FBTEN | PCI_SERREN | PCI_PERREN | PCI_MWIEN |\
+ PCI_BMEN | PCI_MEMEN | PCI_IOEN)
+
+/* PCI_STATUS 16 bit Status */
+#define PCI_PERR BIT_15S /* Parity Error */
+#define PCI_SERR BIT_14S /* Signaled SERR */
+#define PCI_RMABORT BIT_13S /* Received Master Abort */
+#define PCI_RTABORT BIT_12S /* Received Target Abort */
+ /* Bit 11: reserved */
+#define PCI_DEVSEL (3<<9) /* Bit 10.. 9: DEVSEL Timing */
+#define PCI_DEV_FAST (0<<9) /* fast */
+#define PCI_DEV_MEDIUM (1<<9) /* medium */
+#define PCI_DEV_SLOW (2<<9) /* slow */
+#define PCI_DATAPERR BIT_8S /* DATA Parity error detected */
+#define PCI_FB2BCAP BIT_7S /* Fast Back-to-Back Capability */
+#define PCI_UDF BIT_6S /* User Defined Features */
+#define PCI_66MHZCAP BIT_5S /* 66 MHz PCI bus clock capable */
+#define PCI_NEWCAP BIT_4S /* New cap. list implemented */
+#define PCI_INT_STAT BIT_3S /* Interrupt INTx# Status (PCI 2.3) */
+ /* Bit 2.. 0: reserved */
+
+#define PCI_ERRBITS (PCI_PERR | PCI_SERR | PCI_RMABORT | PCI_RTABORT |\
+ PCI_DATAPERR)
+
+/* PCI_CLASS_CODE 24 bit Class Code */
+/* Byte 2: Base Class (02) */
+/* Byte 1: SubClass (00) */
+/* Byte 0: Programming Interface (00) */
+
+/* PCI_CACHE_LSZ 8 bit Cache Line Size */
+/* Possible values: 0,2,4,8,16,32,64,128 */
+
+/* PCI_HEADER_T 8 bit Header Type */
+#define PCI_HD_MF_DEV BIT_7S /* 0= single, 1= multi-func dev */
+#define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */
+
+/* PCI_BIST 8 bit Built-in selftest */
+/* Built-in Self test not supported (optional) */
+
+/* PCI_BASE_1ST 32 bit 1st Base address */
+#define PCI_MEMSIZE 0x4000L /* use 16 kB Memory Base */
+#define PCI_MEMBASE_MSK 0xffffc000L /* Bit 31..14: Memory Base Address */
+#define PCI_MEMSIZE_MSK 0x00003ff0L /* Bit 13.. 4: Memory Size Req. */
+#define PCI_PREFEN BIT_3 /* Prefetchable */
+#define PCI_MEM_TYP (3L<<2) /* Bit 2.. 1: Memory Type */
+#define PCI_MEM32BIT (0L<<1) /* Base addr anywhere in 32 Bit range */
+#define PCI_MEM1M (1L<<1) /* Base addr below 1 MegaByte */
+#define PCI_MEM64BIT (2L<<1) /* Base addr anywhere in 64 Bit range */
+#define PCI_MEMSPACE BIT_0 /* Memory Space Indicator */
+
+/* PCI_BASE_2ND 32 bit 2nd Base address */
+#define PCI_IOBASE 0xffffff00L /* Bit 31.. 8: I/O Base address */
+#define PCI_IOSIZE 0x000000fcL /* Bit 7.. 2: I/O Size Requirements */
+ /* Bit 1: reserved */
+#define PCI_IOSPACE BIT_0 /* I/O Space Indicator */
+
+/* PCI_BASE_ROM 32 bit Expansion ROM Base Address */
+#define PCI_ROMBASE_MSK 0xfffe0000L /* Bit 31..17: ROM Base address */
+#define PCI_ROMBASE_SIZ (0x1cL<<14) /* Bit 16..14: Treat as Base or Size */
+#define PCI_ROMSIZE (0x38L<<11) /* Bit 13..11: ROM Size Requirements */
+ /* Bit 10.. 1: reserved */
+#define PCI_ROMEN BIT_0 /* Address Decode enable */
+
+/* Device Dependent Region */
+/* PCI_OUR_REG_1 32 bit Our Register 1 */
+ /* Bit 31..29: reserved */
+#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode (YUKON only) */
+#define PCI_TEST_CAL BIT_27 /* Test PCI buffer calib. (YUKON only) */
+#define PCI_EN_CAL BIT_26 /* Enable PCI buffer calib. (YUKON only) */
+#define PCI_VIO BIT_25 /* PCI I/O Voltage, 0 = 3.3V, 1 = 5V */
+#define PCI_DIS_BOOT BIT_24 /* Disable BOOT via ROM */
+#define PCI_EN_IO BIT_23 /* Mapping to I/O space */
+#define PCI_EN_FPROM BIT_22 /* Enable FLASH mapping to memory */
+ /* 1 = Map Flash to memory */
+ /* 0 = Disable addr. dec */
+#define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */
+#define PCI_PAGE_16 (0L<<20) /* 16 k pages */
+#define PCI_PAGE_32K (1L<<20) /* 32 k pages */
+#define PCI_PAGE_64K (2L<<20) /* 64 k pages */
+#define PCI_PAGE_128K (3L<<20) /* 128 k pages */
+ /* Bit 19: reserved */
+#define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */
+#define PCI_NOTAR BIT_15 /* No turnaround cycle */
+#define PCI_FORCE_BE BIT_14 /* Assert all BEs on MR */
+#define PCI_DIS_MRL BIT_13 /* Disable Mem Read Line */
+#define PCI_DIS_MRM BIT_12 /* Disable Mem Read Multiple */
+#define PCI_DIS_MWI BIT_11 /* Disable Mem Write & Invalidate */
+#define PCI_DISC_CLS BIT_10 /* Disc: cacheLsz bound */
+#define PCI_BURST_DIS BIT_9 /* Burst Disable */
+#define PCI_DIS_PCI_CLK BIT_8 /* Disable PCI clock driving */
+#define PCI_SKEW_DAS (0xfL<<4) /* Bit 7.. 4: Skew Ctrl, DAS Ext */
+#define PCI_SKEW_BASE 0xfL /* Bit 3.. 0: Skew Ctrl, Base */
+
+
+/* PCI_OUR_REG_2 32 bit Our Register 2 */
+#define PCI_VPD_WR_THR (0xffL<<24) /* Bit 31..24: VPD Write Threshold */
+#define PCI_DEV_SEL (0x7fL<<17) /* Bit 23..17: EEPROM Device Select */
+#define PCI_VPD_ROM_SZ (7L<<14) /* Bit 16..14: VPD ROM Size */
+ /* Bit 13..12: reserved */
+#define PCI_PATCH_DIR (0xfL<<8) /* Bit 11.. 8: Ext Patches dir 3..0 */
+#define PCI_PATCH_DIR_3 BIT_11
+#define PCI_PATCH_DIR_2 BIT_10
+#define PCI_PATCH_DIR_1 BIT_9
+#define PCI_PATCH_DIR_0 BIT_8
+#define PCI_EXT_PATCHS (0xfL<<4) /* Bit 7.. 4: Extended Patches 3..0 */
+#define PCI_EXT_PATCH_3 BIT_7
+#define PCI_EXT_PATCH_2 BIT_6
+#define PCI_EXT_PATCH_1 BIT_5
+#define PCI_EXT_PATCH_0 BIT_4
+#define PCI_EN_DUMMY_RD BIT_3 /* Enable Dummy Read */
+#define PCI_REV_DESC BIT_2 /* Reverse Desc. Bytes */
+ /* Bit 1: reserved */
+#define PCI_USEDATA64 BIT_0 /* Use 64Bit Data bus ext */
+
+
+/* Power Management Region */
+/* PCI_PM_CAP_REG 16 bit Power Management Capabilities */
+#define PCI_PME_SUP_MSK (0x1f<<11) /* Bit 15..11: PM Event Support Mask */
+#define PCI_PME_D3C_SUP BIT_15S /* PME from D3cold Support (if Vaux) */
+#define PCI_PME_D3H_SUP BIT_14S /* PME from D3hot Support */
+#define PCI_PME_D2_SUP BIT_13S /* PME from D2 Support */
+#define PCI_PME_D1_SUP BIT_12S /* PME from D1 Support */
+#define PCI_PME_D0_SUP BIT_11S /* PME from D0 Support */
+#define PCI_PM_D2_SUP BIT_10S /* D2 Support in 33 MHz mode */
+#define PCI_PM_D1_SUP BIT_9S /* D1 Support */
+ /* Bit 8.. 6: reserved */
+#define PCI_PM_DSI BIT_5S /* Device Specific Initialization */
+#define PCI_PM_APS BIT_4S /* Auxialiary Power Source */
+#define PCI_PME_CLOCK BIT_3S /* PM Event Clock */
+#define PCI_PM_VER_MSK 7 /* Bit 2.. 0: PM PCI Spec. version */
+
+/* PCI_PM_CTL_STS 16 bit Power Management Control/Status */
+#define PCI_PME_STATUS BIT_15S /* PME Status (YUKON only) */
+#define PCI_PM_DAT_SCL (3<<13) /* Bit 14..13: Data Reg. scaling factor */
+#define PCI_PM_DAT_SEL (0xf<<9) /* Bit 12.. 9: PM data selector field */
+#define PCI_PME_EN BIT_8S /* Enable PME# generation (YUKON only) */
+ /* Bit 7.. 2: reserved */
+#define PCI_PM_STATE_MSK 3 /* Bit 1.. 0: Power Management State */
+
+#define PCI_PM_STATE_D0 0 /* D0: Operational (default) */
+#define PCI_PM_STATE_D1 1 /* D1: (YUKON only) */
+#define PCI_PM_STATE_D2 2 /* D2: (YUKON only) */
+#define PCI_PM_STATE_D3 3 /* D3: HOT, Power Down and Reset */
+
+/* VPD Region */
+/* PCI_VPD_ADR_REG 16 bit VPD Address Register */
+#define PCI_VPD_FLAG BIT_15S /* starts VPD rd/wr cycle */
+#define PCI_VPD_ADR_MSK 0x7fffL /* Bit 14.. 0: VPD address mask */
+
+/* Control Register File (Address Map) */
+
+/*
+ * Bank 0
+ */
+#define B0_RAP 0x0000 /* 8 bit Register Address Port */
+ /* 0x0001 - 0x0003: reserved */
+#define B0_CTST 0x0004 /* 16 bit Control/Status register */
+#define B0_LED 0x0006 /* 8 Bit LED register */
+#define B0_POWER_CTRL 0x0007 /* 8 Bit Power Control reg (YUKON only) */
+#define B0_ISRC 0x0008 /* 32 bit Interrupt Source Register */
+#define B0_IMSK 0x000c /* 32 bit Interrupt Mask Register */
+#define B0_HWE_ISRC 0x0010 /* 32 bit HW Error Interrupt Src Reg */
+#define B0_HWE_IMSK 0x0014 /* 32 bit HW Error Interrupt Mask Reg */
+#define B0_SP_ISRC 0x0018 /* 32 bit Special Interrupt Source Reg */
+ /* 0x001c: reserved */
+
+/* B0 XMAC 1 registers (GENESIS only) */
+#define B0_XM1_IMSK 0x0020 /* 16 bit r/w XMAC 1 Interrupt Mask Register*/
+ /* 0x0022 - 0x0027: reserved */
+#define B0_XM1_ISRC 0x0028 /* 16 bit ro XMAC 1 Interrupt Status Reg */
+ /* 0x002a - 0x002f: reserved */
+#define B0_XM1_PHY_ADDR 0x0030 /* 16 bit r/w XMAC 1 PHY Address Register */
+ /* 0x0032 - 0x0033: reserved */
+#define B0_XM1_PHY_DATA 0x0034 /* 16 bit r/w XMAC 1 PHY Data Register */
+ /* 0x0036 - 0x003f: reserved */
+
+/* B0 XMAC 2 registers (GENESIS only) */
+#define B0_XM2_IMSK 0x0040 /* 16 bit r/w XMAC 2 Interrupt Mask Register*/
+ /* 0x0042 - 0x0047: reserved */
+#define B0_XM2_ISRC 0x0048 /* 16 bit ro XMAC 2 Interrupt Status Reg */
+ /* 0x004a - 0x004f: reserved */
+#define B0_XM2_PHY_ADDR 0x0050 /* 16 bit r/w XMAC 2 PHY Address Register */
+ /* 0x0052 - 0x0053: reserved */
+#define B0_XM2_PHY_DATA 0x0054 /* 16 bit r/w XMAC 2 PHY Data Register */
+ /* 0x0056 - 0x005f: reserved */
+
+/* BMU Control Status Registers */
+#define B0_R1_CSR 0x0060 /* 32 bit BMU Ctrl/Stat Rx Queue 1 */
+#define B0_R2_CSR 0x0064 /* 32 bit BMU Ctrl/Stat Rx Queue 2 */
+#define B0_XS1_CSR 0x0068 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
+#define B0_XA1_CSR 0x006c /* 32 bit BMU Ctrl/Stat Async Tx Queue 1*/
+#define B0_XS2_CSR 0x0070 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
+#define B0_XA2_CSR 0x0074 /* 32 bit BMU Ctrl/Stat Async Tx Queue 2*/
+ /* 0x0078 - 0x007f: reserved */
+
+/*
+ * Bank 1
+ * - completely empty (this is the RAP Block window)
+ * Note: if RAP = 1 this page is reserved
+ */
+
+/*
+ * Bank 2
+ */
+/* NA reg = 48 bit Network Address Register, 3x16 or 8x8 bit readable */
+#define B2_MAC_1 0x0100 /* NA reg MAC Address 1 */
+ /* 0x0106 - 0x0107: reserved */
+#define B2_MAC_2 0x0108 /* NA reg MAC Address 2 */
+ /* 0x010e - 0x010f: reserved */
+#define B2_MAC_3 0x0110 /* NA reg MAC Address 3 */
+ /* 0x0116 - 0x0117: reserved */
+#define B2_CONN_TYP 0x0118 /* 8 bit Connector type */
+#define B2_PMD_TYP 0x0119 /* 8 bit PMD type */
+#define B2_MAC_CFG 0x011a /* 8 bit MAC Configuration / Chip Revision */
+#define B2_CHIP_ID 0x011b /* 8 bit Chip Identification Number */
+ /* Eprom registers are currently of no use */
+#define B2_E_0 0x011c /* 8 bit EPROM Byte 0 (ext. SRAM size */
+#define B2_E_1 0x011d /* 8 bit EPROM Byte 1 (PHY type) */
+#define B2_E_2 0x011e /* 8 bit EPROM Byte 2 */
+#define B2_E_3 0x011f /* 8 bit EPROM Byte 3 */
+#define B2_FAR 0x0120 /* 32 bit Flash-Prom Addr Reg/Cnt */
+#define B2_FDP 0x0124 /* 8 bit Flash-Prom Data Port */
+ /* 0x0125 - 0x0127: reserved */
+#define B2_LD_CTRL 0x0128 /* 8 bit EPROM loader control register */
+#define B2_LD_TEST 0x0129 /* 8 bit EPROM loader test register */
+ /* 0x012a - 0x012f: reserved */
+#define B2_TI_INI 0x0130 /* 32 bit Timer Init Value */
+#define B2_TI_VAL 0x0134 /* 32 bit Timer Value */
+#define B2_TI_CTRL 0x0138 /* 8 bit Timer Control */
+#define B2_TI_TEST 0x0139 /* 8 Bit Timer Test */
+ /* 0x013a - 0x013f: reserved */
+#define B2_IRQM_INI 0x0140 /* 32 bit IRQ Moderation Timer Init Reg.*/
+#define B2_IRQM_VAL 0x0144 /* 32 bit IRQ Moderation Timer Value */
+#define B2_IRQM_CTRL 0x0148 /* 8 bit IRQ Moderation Timer Control */
+#define B2_IRQM_TEST 0x0149 /* 8 bit IRQ Moderation Timer Test */
+#define B2_IRQM_MSK 0x014c /* 32 bit IRQ Moderation Mask */
+#define B2_IRQM_HWE_MSK 0x0150 /* 32 bit IRQ Moderation HW Error Mask */
+ /* 0x0154 - 0x0157: reserved */
+#define B2_TST_CTRL1 0x0158 /* 8 bit Test Control Register 1 */
+#define B2_TST_CTRL2 0x0159 /* 8 bit Test Control Register 2 */
+ /* 0x015a - 0x015b: reserved */
+#define B2_GP_IO 0x015c /* 32 bit General Purpose I/O Register */
+#define B2_I2C_CTRL 0x0160 /* 32 bit I2C HW Control Register */
+#define B2_I2C_DATA 0x0164 /* 32 bit I2C HW Data Register */
+#define B2_I2C_IRQ 0x0168 /* 32 bit I2C HW IRQ Register */
+#define B2_I2C_SW 0x016c /* 32 bit I2C SW Port Register */
+
+/* Blink Source Counter (GENESIS only) */
+#define B2_BSC_INI 0x0170 /* 32 bit Blink Source Counter Init Val */
+#define B2_BSC_VAL 0x0174 /* 32 bit Blink Source Counter Value */
+#define B2_BSC_CTRL 0x0178 /* 8 bit Blink Source Counter Control */
+#define B2_BSC_STAT 0x0179 /* 8 bit Blink Source Counter Status */
+#define B2_BSC_TST 0x017a /* 16 bit Blink Source Counter Test Reg */
+ /* 0x017c - 0x017f: reserved */
+
+/*
+ * Bank 3
+ */
+/* RAM Random Registers */
+#define B3_RAM_ADDR 0x0180 /* 32 bit RAM Address, to read or write */
+#define B3_RAM_DATA_LO 0x0184 /* 32 bit RAM Data Word (low dWord) */
+#define B3_RAM_DATA_HI 0x0188 /* 32 bit RAM Data Word (high dWord) */
+ /* 0x018c - 0x018f: reserved */
+
+/* RAM Interface Registers */
+/*
+ * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
+ * not usable in SW. Please notice these are NOT real timeouts, these are
+ * the number of qWords transferred continuously.
+ */
+#define B3_RI_WTO_R1 0x0190 /* 8 bit WR Timeout Queue R1 (TO0) */
+#define B3_RI_WTO_XA1 0x0191 /* 8 bit WR Timeout Queue XA1 (TO1) */
+#define B3_RI_WTO_XS1 0x0192 /* 8 bit WR Timeout Queue XS1 (TO2) */
+#define B3_RI_RTO_R1 0x0193 /* 8 bit RD Timeout Queue R1 (TO3) */
+#define B3_RI_RTO_XA1 0x0194 /* 8 bit RD Timeout Queue XA1 (TO4) */
+#define B3_RI_RTO_XS1 0x0195 /* 8 bit RD Timeout Queue XS1 (TO5) */
+#define B3_RI_WTO_R2 0x0196 /* 8 bit WR Timeout Queue R2 (TO6) */
+#define B3_RI_WTO_XA2 0x0197 /* 8 bit WR Timeout Queue XA2 (TO7) */
+#define B3_RI_WTO_XS2 0x0198 /* 8 bit WR Timeout Queue XS2 (TO8) */
+#define B3_RI_RTO_R2 0x0199 /* 8 bit RD Timeout Queue R2 (TO9) */
+#define B3_RI_RTO_XA2 0x019a /* 8 bit RD Timeout Queue XA2 (TO10)*/
+#define B3_RI_RTO_XS2 0x019b /* 8 bit RD Timeout Queue XS2 (TO11)*/
+#define B3_RI_TO_VAL 0x019c /* 8 bit Current Timeout Count Val */
+ /* 0x019d - 0x019f: reserved */
+#define B3_RI_CTRL 0x01a0 /* 16 bit RAM Interface Control Register */
+#define B3_RI_TEST 0x01a2 /* 8 bit RAM Interface Test Register */
+ /* 0x01a3 - 0x01af: reserved */
+
+/* MAC Arbiter Registers (GENESIS only) */
+/* these are the no. of qWord transferred continuously and NOT real timeouts */
+#define B3_MA_TOINI_RX1 0x01b0 /* 8 bit Timeout Init Val Rx Path MAC 1 */
+#define B3_MA_TOINI_RX2 0x01b1 /* 8 bit Timeout Init Val Rx Path MAC 2 */
+#define B3_MA_TOINI_TX1 0x01b2 /* 8 bit Timeout Init Val Tx Path MAC 1 */
+#define B3_MA_TOINI_TX2 0x01b3 /* 8 bit Timeout Init Val Tx Path MAC 2 */
+#define B3_MA_TOVAL_RX1 0x01b4 /* 8 bit Timeout Value Rx Path MAC 1 */
+#define B3_MA_TOVAL_RX2 0x01b5 /* 8 bit Timeout Value Rx Path MAC 1 */
+#define B3_MA_TOVAL_TX1 0x01b6 /* 8 bit Timeout Value Tx Path MAC 2 */
+#define B3_MA_TOVAL_TX2 0x01b7 /* 8 bit Timeout Value Tx Path MAC 2 */
+#define B3_MA_TO_CTRL 0x01b8 /* 16 bit MAC Arbiter Timeout Ctrl Reg */
+#define B3_MA_TO_TEST 0x01ba /* 16 bit MAC Arbiter Timeout Test Reg */
+ /* 0x01bc - 0x01bf: reserved */
+#define B3_MA_RCINI_RX1 0x01c0 /* 8 bit Recovery Init Val Rx Path MAC 1 */
+#define B3_MA_RCINI_RX2 0x01c1 /* 8 bit Recovery Init Val Rx Path MAC 2 */
+#define B3_MA_RCINI_TX1 0x01c2 /* 8 bit Recovery Init Val Tx Path MAC 1 */
+#define B3_MA_RCINI_TX2 0x01c3 /* 8 bit Recovery Init Val Tx Path MAC 2 */
+#define B3_MA_RCVAL_RX1 0x01c4 /* 8 bit Recovery Value Rx Path MAC 1 */
+#define B3_MA_RCVAL_RX2 0x01c5 /* 8 bit Recovery Value Rx Path MAC 1 */
+#define B3_MA_RCVAL_TX1 0x01c6 /* 8 bit Recovery Value Tx Path MAC 2 */
+#define B3_MA_RCVAL_TX2 0x01c7 /* 8 bit Recovery Value Tx Path MAC 2 */
+#define B3_MA_RC_CTRL 0x01c8 /* 16 bit MAC Arbiter Recovery Ctrl Reg */
+#define B3_MA_RC_TEST 0x01ca /* 16 bit MAC Arbiter Recovery Test Reg */
+ /* 0x01cc - 0x01cf: reserved */
+
+/* Packet Arbiter Registers (GENESIS only) */
+/* these are real timeouts */
+#define B3_PA_TOINI_RX1 0x01d0 /* 16 bit Timeout Init Val Rx Path MAC 1 */
+ /* 0x01d2 - 0x01d3: reserved */
+#define B3_PA_TOINI_RX2 0x01d4 /* 16 bit Timeout Init Val Rx Path MAC 2 */
+ /* 0x01d6 - 0x01d7: reserved */
+#define B3_PA_TOINI_TX1 0x01d8 /* 16 bit Timeout Init Val Tx Path MAC 1 */
+ /* 0x01da - 0x01db: reserved */
+#define B3_PA_TOINI_TX2 0x01dc /* 16 bit Timeout Init Val Tx Path MAC 2 */
+ /* 0x01de - 0x01df: reserved */
+#define B3_PA_TOVAL_RX1 0x01e0 /* 16 bit Timeout Val Rx Path MAC 1 */
+ /* 0x01e2 - 0x01e3: reserved */
+#define B3_PA_TOVAL_RX2 0x01e4 /* 16 bit Timeout Val Rx Path MAC 2 */
+ /* 0x01e6 - 0x01e7: reserved */
+#define B3_PA_TOVAL_TX1 0x01e8 /* 16 bit Timeout Val Tx Path MAC 1 */
+ /* 0x01ea - 0x01eb: reserved */
+#define B3_PA_TOVAL_TX2 0x01ec /* 16 bit Timeout Val Tx Path MAC 2 */
+ /* 0x01ee - 0x01ef: reserved */
+#define B3_PA_CTRL 0x01f0 /* 16 bit Packet Arbiter Ctrl Register */
+#define B3_PA_TEST 0x01f2 /* 16 bit Packet Arbiter Test Register */
+ /* 0x01f4 - 0x01ff: reserved */
+
+/*
+ * Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */
+#define TXA_ITI_INI 0x0200 /* 32 bit Tx Arb Interval Timer Init Val*/
+#define TXA_ITI_VAL 0x0204 /* 32 bit Tx Arb Interval Timer Value */
+#define TXA_LIM_INI 0x0208 /* 32 bit Tx Arb Limit Counter Init Val */
+#define TXA_LIM_VAL 0x020c /* 32 bit Tx Arb Limit Counter Value */
+#define TXA_CTRL 0x0210 /* 8 bit Tx Arbiter Control Register */
+#define TXA_TEST 0x0211 /* 8 bit Tx Arbiter Test Register */
+#define TXA_STAT 0x0212 /* 8 bit Tx Arbiter Status Register */
+ /* 0x0213 - 0x027f: reserved */
+ /* 0x0280 - 0x0292: MAC 2 */
+ /* 0x0213 - 0x027f: reserved */
+
+/*
+ * Bank 6
+ */
+/* External registers (GENESIS only) */
+#define B6_EXT_REG 0x0300
+
+/*
+ * Bank 7
+ */
+/* This is a copy of the Configuration register file (lower half) */
+#define B7_CFG_SPC 0x0380
+
+/*
+ * Bank 8 - 15
+ */
+/* Receive and Transmit Queue Registers, use Q_ADDR() to access */
+#define B8_Q_REGS 0x0400
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+#define Q_D 0x00 /* 8*32 bit Current Descriptor */
+#define Q_DA_L 0x20 /* 32 bit Current Descriptor Address Low dWord */
+#define Q_DA_H 0x24 /* 32 bit Current Descriptor Address High dWord */
+#define Q_AC_L 0x28 /* 32 bit Current Address Counter Low dWord */
+#define Q_AC_H 0x2c /* 32 bit Current Address Counter High dWord */
+#define Q_BC 0x30 /* 32 bit Current Byte Counter */
+#define Q_CSR 0x34 /* 32 bit BMU Control/Status Register */
+#define Q_F 0x38 /* 32 bit Flag Register */
+#define Q_T1 0x3c /* 32 bit Test Register 1 */
+#define Q_T1_TR 0x3c /* 8 bit Test Register 1 Transfer SM */
+#define Q_T1_WR 0x3d /* 8 bit Test Register 1 Write Descriptor SM */
+#define Q_T1_RD 0x3e /* 8 bit Test Register 1 Read Descriptor SM */
+#define Q_T1_SV 0x3f /* 8 bit Test Register 1 Supervisor SM */
+#define Q_T2 0x40 /* 32 bit Test Register 2 */
+#define Q_T3 0x44 /* 32 bit Test Register 3 */
+ /* 0x48 - 0x7f: reserved */
+
+/*
+ * Bank 16 - 23
+ */
+/* RAM Buffer Registers */
+#define B16_RAM_REGS 0x0800
+
+/* RAM Buffer Register Offsets, use RB_ADDR() to access */
+#define RB_START 0x00 /* 32 bit RAM Buffer Start Address */
+#define RB_END 0x04 /* 32 bit RAM Buffer End Address */
+#define RB_WP 0x08 /* 32 bit RAM Buffer Write Pointer */
+#define RB_RP 0x0c /* 32 bit RAM Buffer Read Pointer */
+#define RB_RX_UTPP 0x10 /* 32 bit Rx Upper Threshold, Pause Pack */
+#define RB_RX_LTPP 0x14 /* 32 bit Rx Lower Threshold, Pause Pack */
+#define RB_RX_UTHP 0x18 /* 32 bit Rx Upper Threshold, High Prio */
+#define RB_RX_LTHP 0x1c /* 32 bit Rx Lower Threshold, High Prio */
+ /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
+#define RB_PC 0x20 /* 32 bit RAM Buffer Packet Counter */
+#define RB_LEV 0x24 /* 32 bit RAM Buffer Level Register */
+#define RB_CTRL 0x28 /* 8 bit RAM Buffer Control Register */
+#define RB_TST1 0x29 /* 8 bit RAM Buffer Test Register 1 */
+#define RB_TST2 0x2A /* 8 bit RAM Buffer Test Register 2 */
+ /* 0x2c - 0x7f: reserved */
+
+/*
+ * Bank 24
+ */
+/*
+ * Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only)
+ * use MR_ADDR() to access
+ */
+#define RX_MFF_EA 0x0c00 /* 32 bit Receive MAC FIFO End Address */
+#define RX_MFF_WP 0x0c04 /* 32 bit Receive MAC FIFO Write Pointer */
+ /* 0x0c08 - 0x0c0b: reserved */
+#define RX_MFF_RP 0x0c0c /* 32 bit Receive MAC FIFO Read Pointer */
+#define RX_MFF_PC 0x0c10 /* 32 bit Receive MAC FIFO Packet Cnt */
+#define RX_MFF_LEV 0x0c14 /* 32 bit Receive MAC FIFO Level */
+#define RX_MFF_CTRL1 0x0c18 /* 16 bit Receive MAC FIFO Control Reg 1*/
+#define RX_MFF_STAT_TO 0x0c1a /* 8 bit Receive MAC Status Timeout */
+#define RX_MFF_TIST_TO 0x0c1b /* 8 bit Receive MAC Time Stamp Timeout */
+#define RX_MFF_CTRL2 0x0c1c /* 8 bit Receive MAC FIFO Control Reg 2*/
+#define RX_MFF_TST1 0x0c1d /* 8 bit Receive MAC FIFO Test Reg 1 */
+#define RX_MFF_TST2 0x0c1e /* 8 bit Receive MAC FIFO Test Reg 2 */
+ /* 0x0c1f: reserved */
+#define RX_LED_INI 0x0c20 /* 32 bit Receive LED Cnt Init Value */
+#define RX_LED_VAL 0x0c24 /* 32 bit Receive LED Cnt Current Value */
+#define RX_LED_CTRL 0x0c28 /* 8 bit Receive LED Cnt Control Reg */
+#define RX_LED_TST 0x0c29 /* 8 bit Receive LED Cnt Test Register */
+ /* 0x0c2a - 0x0c2f: reserved */
+#define LNK_SYNC_INI 0x0c30 /* 32 bit Link Sync Cnt Init Value */
+#define LNK_SYNC_VAL 0x0c34 /* 32 bit Link Sync Cnt Current Value */
+#define LNK_SYNC_CTRL 0x0c38 /* 8 bit Link Sync Cnt Control Register */
+#define LNK_SYNC_TST 0x0c39 /* 8 bit Link Sync Cnt Test Register */
+ /* 0x0c3a - 0x0c3b: reserved */
+#define LNK_LED_REG 0x0c3c /* 8 bit Link LED Register */
+ /* 0x0c3d - 0x0c3f: reserved */
+
+/* Receive GMAC FIFO (YUKON only), use MR_ADDR() to access */
+#define RX_GMF_EA 0x0c40 /* 32 bit Rx GMAC FIFO End Address */
+#define RX_GMF_AF_THR 0x0c44 /* 32 bit Rx GMAC FIFO Almost Full Thresh. */
+#define RX_GMF_CTRL_T 0x0c48 /* 32 bit Rx GMAC FIFO Control/Test */
+#define RX_GMF_FL_MSK 0x0c4c /* 32 bit Rx GMAC FIFO Flush Mask */
+#define RX_GMF_FL_THR 0x0c50 /* 32 bit Rx GMAC FIFO Flush Threshold */
+ /* 0x0c54 - 0x0c5f: reserved */
+#define RX_GMF_WP 0x0c60 /* 32 bit Rx GMAC FIFO Write Pointer */
+ /* 0x0c64 - 0x0c67: reserved */
+#define RX_GMF_WLEV 0x0c68 /* 32 bit Rx GMAC FIFO Write Level */
+ /* 0x0c6c - 0x0c6f: reserved */
+#define RX_GMF_RP 0x0c70 /* 32 bit Rx GMAC FIFO Read Pointer */
+ /* 0x0c74 - 0x0c77: reserved */
+#define RX_GMF_RLEV 0x0c78 /* 32 bit Rx GMAC FIFO Read Level */
+ /* 0x0c7c - 0x0c7f: reserved */
+
+/*
+ * Bank 25
+ */
+ /* 0x0c80 - 0x0cbf: MAC 2 */
+ /* 0x0cc0 - 0x0cff: reserved */
+
+/*
+ * Bank 26
+ */
+/*
+ * Transmit MAC FIFO and Transmit LED Registers (GENESIS only),
+ * use MR_ADDR() to access
+ */
+#define TX_MFF_EA 0x0d00 /* 32 bit Transmit MAC FIFO End Address */
+#define TX_MFF_WP 0x0d04 /* 32 bit Transmit MAC FIFO WR Pointer */
+#define TX_MFF_WSP 0x0d08 /* 32 bit Transmit MAC FIFO WR Shadow Ptr */
+#define TX_MFF_RP 0x0d0c /* 32 bit Transmit MAC FIFO RD Pointer */
+#define TX_MFF_PC 0x0d10 /* 32 bit Transmit MAC FIFO Packet Cnt */
+#define TX_MFF_LEV 0x0d14 /* 32 bit Transmit MAC FIFO Level */
+#define TX_MFF_CTRL1 0x0d18 /* 16 bit Transmit MAC FIFO Ctrl Reg 1 */
+#define TX_MFF_WAF 0x0d1a /* 8 bit Transmit MAC Wait after flush */
+ /* 0x0c1b: reserved */
+#define TX_MFF_CTRL2 0x0d1c /* 8 bit Transmit MAC FIFO Ctrl Reg 2 */
+#define TX_MFF_TST1 0x0d1d /* 8 bit Transmit MAC FIFO Test Reg 1 */
+#define TX_MFF_TST2 0x0d1e /* 8 bit Transmit MAC FIFO Test Reg 2 */
+ /* 0x0d1f: reserved */
+#define TX_LED_INI 0x0d20 /* 32 bit Transmit LED Cnt Init Value */
+#define TX_LED_VAL 0x0d24 /* 32 bit Transmit LED Cnt Current Val */
+#define TX_LED_CTRL 0x0d28 /* 8 bit Transmit LED Cnt Control Reg */
+#define TX_LED_TST 0x0d29 /* 8 bit Transmit LED Cnt Test Reg */
+ /* 0x0d2a - 0x0d3f: reserved */
+
+/* Transmit GMAC FIFO (YUKON only), use MR_ADDR() to access */
+#define TX_GMF_EA 0x0d40 /* 32 bit Tx GMAC FIFO End Address */
+#define TX_GMF_AE_THR 0x0d44 /* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
+#define TX_GMF_CTRL_T 0x0d48 /* 32 bit Tx GMAC FIFO Control/Test */
+ /* 0x0d4c - 0x0d5f: reserved */
+#define TX_GMF_WP 0x0d60 /* 32 bit Tx GMAC FIFO Write Pointer */
+#define TX_GMF_WSP 0x0d64 /* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
+#define TX_GMF_WLEV 0x0d68 /* 32 bit Tx GMAC FIFO Write Level */
+ /* 0x0d6c - 0x0d6f: reserved */
+#define TX_GMF_RP 0x0d70 /* 32 bit Tx GMAC FIFO Read Pointer */
+#define TX_GMF_RSTP 0x0d74 /* 32 bit Tx GMAC FIFO Restart Pointer */
+#define TX_GMF_RLEV 0x0d78 /* 32 bit Tx GMAC FIFO Read Level */
+ /* 0x0d7c - 0x0d7f: reserved */
+
+/*
+ * Bank 27
+ */
+ /* 0x0d80 - 0x0dbf: MAC 2 */
+ /* 0x0daa - 0x0dff: reserved */
+
+/*
+ * Bank 28
+ */
+/* Descriptor Poll Timer Registers */
+#define B28_DPT_INI 0x0e00 /* 24 bit Descriptor Poll Timer Init Val */
+#define B28_DPT_VAL 0x0e04 /* 24 bit Descriptor Poll Timer Curr Val */
+#define B28_DPT_CTRL 0x0e08 /* 8 bit Descriptor Poll Timer Ctrl Reg */
+ /* 0x0e09: reserved */
+#define B28_DPT_TST 0x0e0a /* 8 bit Descriptor Poll Timer Test Reg */
+ /* 0x0e0b: reserved */
+
+/* Time Stamp Timer Registers (YUKON only) */
+ /* 0x0e10: reserved */
+#define GMAC_TI_ST_VAL 0x0e14 /* 32 bit Time Stamp Timer Curr Val */
+#define GMAC_TI_ST_CTRL 0x0e18 /* 8 bit Time Stamp Timer Ctrl Reg */
+ /* 0x0e19: reserved */
+#define GMAC_TI_ST_TST 0x0e1a /* 8 bit Time Stamp Timer Test Reg */
+ /* 0x0e1b - 0x0e7f: reserved */
+
+/*
+ * Bank 29
+ */
+ /* 0x0e80 - 0x0efc: reserved */
+
+/*
+ * Bank 30
+ */
+/* GMAC and GPHY Control Registers (YUKON only) */
+#define GMAC_CTRL 0x0f00 /* 32 bit GMAC Control Reg */
+#define GPHY_CTRL 0x0f04 /* 32 bit GPHY Control Reg */
+#define GMAC_IRQ_SRC 0x0f08 /* 8 bit GMAC Interrupt Source Reg */
+ /* 0x0f09 - 0x0f0b: reserved */
+#define GMAC_IRQ_MSK 0x0f0c /* 8 bit GMAC Interrupt Mask Reg */
+ /* 0x0f0d - 0x0f0f: reserved */
+#define GMAC_LINK_CTRL 0x0f10 /* 16 bit Link Control Reg */
+ /* 0x0f14 - 0x0f1f: reserved */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+
+#define WOL_REG_OFFS 0x20 /* HW-Bug: Address is + 0x20 against spec. */
+
+#define WOL_CTRL_STAT 0x0f20 /* 16 bit WOL Control/Status Reg */
+#define WOL_MATCH_CTL 0x0f22 /* 8 bit WOL Match Control Reg */
+#define WOL_MATCH_RES 0x0f23 /* 8 bit WOL Match Result Reg */
+#define WOL_MAC_ADDR_LO 0x0f24 /* 32 bit WOL MAC Address Low */
+#define WOL_MAC_ADDR_HI 0x0f28 /* 16 bit WOL MAC Address High */
+#define WOL_PATT_RPTR 0x0f2c /* 8 bit WOL Pattern Read Ptr */
+
+/* use this macro to access above registers */
+#define WOL_REG(Reg) ((Reg) + (pAC->GIni.GIWolOffs))
+
+
+/* WOL Pattern Length Registers (YUKON only) */
+
+#define WOL_PATT_LEN_LO 0x0f30 /* 32 bit WOL Pattern Length 3..0 */
+#define WOL_PATT_LEN_HI 0x0f34 /* 24 bit WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+
+#define WOL_PATT_CNT_0 0x0f38 /* 32 bit WOL Pattern Counter 3..0 */
+#define WOL_PATT_CNT_4 0x0f3c /* 24 bit WOL Pattern Counter 6..4 */
+ /* 0x0f40 - 0x0f7f: reserved */
+
+/*
+ * Bank 31
+ */
+/* 0x0f80 - 0x0fff: reserved */
+
+/*
+ * Bank 32 - 33
+ */
+#define WOL_PATT_RAM_1 0x1000 /* WOL Pattern RAM Link 1 */
+
+/*
+ * Bank 0x22 - 0x3f
+ */
+/* 0x1100 - 0x1fff: reserved */
+
+/*
+ * Bank 0x40 - 0x4f
+ */
+#define BASE_XMAC_1 0x2000 /* XMAC 1 registers */
+
+/*
+ * Bank 0x50 - 0x5f
+ */
+
+#define BASE_GMAC_1 0x2800 /* GMAC 1 registers */
+
+/*
+ * Bank 0x60 - 0x6f
+ */
+#define BASE_XMAC_2 0x3000 /* XMAC 2 registers */
+
+/*
+ * Bank 0x70 - 0x7f
+ */
+#define BASE_GMAC_2 0x3800 /* GMAC 2 registers */
+
+/*
+ * Control Register Bit Definitions:
+ */
+/* B0_RAP 8 bit Register Address Port */
+ /* Bit 7: reserved */
+#define RAP_RAP 0x3f /* Bit 6..0: 0 = block 0,..,6f = block 6f */
+
+/* B0_CTST 16 bit Control/Status register */
+ /* Bit 15..14: reserved */
+#define CS_CLK_RUN_HOT BIT_13S /* CLK_RUN hot m. (YUKON-Lite only) */
+#define CS_CLK_RUN_RST BIT_12S /* CLK_RUN reset (YUKON-Lite only) */
+#define CS_CLK_RUN_ENA BIT_11S /* CLK_RUN enable (YUKON-Lite only) */
+#define CS_VAUX_AVAIL BIT_10S /* VAUX available (YUKON only) */
+#define CS_BUS_CLOCK BIT_9S /* Bus Clock 0/1 = 33/66 MHz */
+#define CS_BUS_SLOT_SZ BIT_8S /* Slot Size 0/1 = 32/64 bit slot */
+#define CS_ST_SW_IRQ BIT_7S /* Set IRQ SW Request */
+#define CS_CL_SW_IRQ BIT_6S /* Clear IRQ SW Request */
+#define CS_STOP_DONE BIT_5S /* Stop Master is finished */
+#define CS_STOP_MAST BIT_4S /* Command Bit to stop the master */
+#define CS_MRST_CLR BIT_3S /* Clear Master reset */
+#define CS_MRST_SET BIT_2S /* Set Master reset */
+#define CS_RST_CLR BIT_1S /* Clear Software reset */
+#define CS_RST_SET BIT_0S /* Set Software reset */
+
+/* B0_LED 8 Bit LED register */
+ /* Bit 7.. 2: reserved */
+#define LED_STAT_ON BIT_1S /* Status LED on */
+#define LED_STAT_OFF BIT_0S /* Status LED off */
+
+/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
+#define PC_VAUX_ENA BIT_7 /* Switch VAUX Enable */
+#define PC_VAUX_DIS BIT_6 /* Switch VAUX Disable */
+#define PC_VCC_ENA BIT_5 /* Switch VCC Enable */
+#define PC_VCC_DIS BIT_4 /* Switch VCC Disable */
+#define PC_VAUX_ON BIT_3 /* Switch VAUX On */
+#define PC_VAUX_OFF BIT_2 /* Switch VAUX Off */
+#define PC_VCC_ON BIT_1 /* Switch VCC On */
+#define PC_VCC_OFF BIT_0 /* Switch VCC Off */
+
+/* B0_ISRC 32 bit Interrupt Source Register */
+/* B0_IMSK 32 bit Interrupt Mask Register */
+/* B0_SP_ISRC 32 bit Special Interrupt Source Reg */
+/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
+#define IS_ALL_MSK 0xbfffffffUL /* All Interrupt bits */
+#define IS_HW_ERR BIT_31 /* Interrupt HW Error */
+ /* Bit 30: reserved */
+#define IS_PA_TO_RX1 BIT_29 /* Packet Arb Timeout Rx1 */
+#define IS_PA_TO_RX2 BIT_28 /* Packet Arb Timeout Rx2 */
+#define IS_PA_TO_TX1 BIT_27 /* Packet Arb Timeout Tx1 */
+#define IS_PA_TO_TX2 BIT_26 /* Packet Arb Timeout Tx2 */
+#define IS_I2C_READY BIT_25 /* IRQ on end of I2C Tx */
+#define IS_IRQ_SW BIT_24 /* SW forced IRQ */
+#define IS_EXT_REG BIT_23 /* IRQ from LM80 or PHY (GENESIS only) */
+ /* IRQ from PHY (YUKON only) */
+#define IS_TIMINT BIT_22 /* IRQ from Timer */
+#define IS_MAC1 BIT_21 /* IRQ from MAC 1 */
+#define IS_LNK_SYNC_M1 BIT_20 /* Link Sync Cnt wrap MAC 1 */
+#define IS_MAC2 BIT_19 /* IRQ from MAC 2 */
+#define IS_LNK_SYNC_M2 BIT_18 /* Link Sync Cnt wrap MAC 2 */
+/* Receive Queue 1 */
+#define IS_R1_B BIT_17 /* Q_R1 End of Buffer */
+#define IS_R1_F BIT_16 /* Q_R1 End of Frame */
+#define IS_R1_C BIT_15 /* Q_R1 Encoding Error */
+/* Receive Queue 2 */
+#define IS_R2_B BIT_14 /* Q_R2 End of Buffer */
+#define IS_R2_F BIT_13 /* Q_R2 End of Frame */
+#define IS_R2_C BIT_12 /* Q_R2 Encoding Error */
+/* Synchronous Transmit Queue 1 */
+#define IS_XS1_B BIT_11 /* Q_XS1 End of Buffer */
+#define IS_XS1_F BIT_10 /* Q_XS1 End of Frame */
+#define IS_XS1_C BIT_9 /* Q_XS1 Encoding Error */
+/* Asynchronous Transmit Queue 1 */
+#define IS_XA1_B BIT_8 /* Q_XA1 End of Buffer */
+#define IS_XA1_F BIT_7 /* Q_XA1 End of Frame */
+#define IS_XA1_C BIT_6 /* Q_XA1 Encoding Error */
+/* Synchronous Transmit Queue 2 */
+#define IS_XS2_B BIT_5 /* Q_XS2 End of Buffer */
+#define IS_XS2_F BIT_4 /* Q_XS2 End of Frame */
+#define IS_XS2_C BIT_3 /* Q_XS2 Encoding Error */
+/* Asynchronous Transmit Queue 2 */
+#define IS_XA2_B BIT_2 /* Q_XA2 End of Buffer */
+#define IS_XA2_F BIT_1 /* Q_XA2 End of Frame */
+#define IS_XA2_C BIT_0 /* Q_XA2 Encoding Error */
+
+
+/* B0_HWE_ISRC 32 bit HW Error Interrupt Src Reg */
+/* B0_HWE_IMSK 32 bit HW Error Interrupt Mask Reg */
+/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
+#define IS_ERR_MSK 0x00000fffL /* All Error bits */
+ /* Bit 31..14: reserved */
+#define IS_IRQ_TIST_OV BIT_13 /* Time Stamp Timer Overflow (YUKON only) */
+#define IS_IRQ_SENSOR BIT_12 /* IRQ from Sensor (YUKON only) */
+#define IS_IRQ_MST_ERR BIT_11 /* IRQ master error detected */
+#define IS_IRQ_STAT BIT_10 /* IRQ status exception */
+#define IS_NO_STAT_M1 BIT_9 /* No Rx Status from MAC 1 */
+#define IS_NO_STAT_M2 BIT_8 /* No Rx Status from MAC 2 */
+#define IS_NO_TIST_M1 BIT_7 /* No Time Stamp from MAC 1 */
+#define IS_NO_TIST_M2 BIT_6 /* No Time Stamp from MAC 2 */
+#define IS_RAM_RD_PAR BIT_5 /* RAM Read Parity Error */
+#define IS_RAM_WR_PAR BIT_4 /* RAM Write Parity Error */
+#define IS_M1_PAR_ERR BIT_3 /* MAC 1 Parity Error */
+#define IS_M2_PAR_ERR BIT_2 /* MAC 2 Parity Error */
+#define IS_R1_PAR_ERR BIT_1 /* Queue R1 Parity Error */
+#define IS_R2_PAR_ERR BIT_0 /* Queue R2 Parity Error */
+
+/* B2_CONN_TYP 8 bit Connector type */
+/* B2_PMD_TYP 8 bit PMD type */
+/* Values of connector and PMD type comply to SysKonnect internal std */
+
+/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
+#define CFG_CHIP_R_MSK (0xf<<4) /* Bit 7.. 4: Chip Revision */
+ /* Bit 3.. 2: reserved */
+#define CFG_DIS_M2_CLK BIT_1S /* Disable Clock for 2nd MAC */
+#define CFG_SNG_MAC BIT_0S /* MAC Config: 0=2 MACs / 1=1 MAC*/
+
+/* B2_CHIP_ID 8 bit Chip Identification Number */
+#define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */
+#define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */
+#define CHIP_ID_YUKON_LITE 0xb1 /* Chip ID for YUKON-Lite (Rev. A1-A3) */
+#define CHIP_ID_YUKON_LP 0xb2 /* Chip ID for YUKON-LP */
+
+#define CHIP_REV_YU_LITE_A1 3 /* Chip Rev. for YUKON-Lite A1,A2 */
+#define CHIP_REV_YU_LITE_A3 7 /* Chip Rev. for YUKON-Lite A3 */
+
+/* B2_FAR 32 bit Flash-Prom Addr Reg/Cnt */
+#define FAR_ADDR 0x1ffffL /* Bit 16.. 0: FPROM Address mask */
+
+/* B2_LD_CTRL 8 bit EPROM loader control register */
+/* Bits are currently reserved */
+
+/* B2_LD_TEST 8 bit EPROM loader test register */
+ /* Bit 7.. 4: reserved */
+#define LD_T_ON BIT_3S /* Loader Test mode on */
+#define LD_T_OFF BIT_2S /* Loader Test mode off */
+#define LD_T_STEP BIT_1S /* Decrement FPROM addr. Counter */
+#define LD_START BIT_0S /* Start loading FPROM */
+
+/*
+ * Timer Section
+ */
+/* B2_TI_CTRL 8 bit Timer control */
+/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
+ /* Bit 7.. 3: reserved */
+#define TIM_START BIT_2S /* Start Timer */
+#define TIM_STOP BIT_1S /* Stop Timer */
+#define TIM_CLR_IRQ BIT_0S /* Clear Timer IRQ (!IRQM) */
+
+/* B2_TI_TEST 8 Bit Timer Test */
+/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
+/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
+ /* Bit 7.. 3: reserved */
+#define TIM_T_ON BIT_2S /* Test mode on */
+#define TIM_T_OFF BIT_1S /* Test mode off */
+#define TIM_T_STEP BIT_0S /* Test step */
+
+/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */
+/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */
+ /* Bit 31..24: reserved */
+#define DPT_MSK 0x00ffffffL /* Bit 23.. 0: Desc Poll Timer Bits */
+
+/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
+ /* Bit 7.. 2: reserved */
+#define DPT_START BIT_1S /* Start Descriptor Poll Timer */
+#define DPT_STOP BIT_0S /* Stop Descriptor Poll Timer */
+
+/* B2_E_3 8 bit lower 4 bits used for HW self test result */
+#define B2_E3_RES_MASK 0x0f
+
+/* B2_TST_CTRL1 8 bit Test Control Register 1 */
+#define TST_FRC_DPERR_MR BIT_7S /* force DATAPERR on MST RD */
+#define TST_FRC_DPERR_MW BIT_6S /* force DATAPERR on MST WR */
+#define TST_FRC_DPERR_TR BIT_5S /* force DATAPERR on TRG RD */
+#define TST_FRC_DPERR_TW BIT_4S /* force DATAPERR on TRG WR */
+#define TST_FRC_APERR_M BIT_3S /* force ADDRPERR on MST */
+#define TST_FRC_APERR_T BIT_2S /* force ADDRPERR on TRG */
+#define TST_CFG_WRITE_ON BIT_1S /* Enable Config Reg WR */
+#define TST_CFG_WRITE_OFF BIT_0S /* Disable Config Reg WR */
+
+/* B2_TST_CTRL2 8 bit Test Control Register 2 */
+ /* Bit 7.. 4: reserved */
+ /* force the following error on the next master read/write */
+#define TST_FRC_DPERR_MR64 BIT_3S /* DataPERR RD 64 */
+#define TST_FRC_DPERR_MW64 BIT_2S /* DataPERR WR 64 */
+#define TST_FRC_APERR_1M64 BIT_1S /* AddrPERR on 1. phase */
+#define TST_FRC_APERR_2M64 BIT_0S /* AddrPERR on 2. phase */
+
+/* B2_GP_IO 32 bit General Purpose I/O Register */
+ /* Bit 31..26: reserved */
+#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=In/1=Out */
+#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=In/1=Out */
+#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=In/1=Out */
+#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=In/1=Out */
+#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=In/1=Out */
+#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=In/1=Out */
+#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=In/1=Out */
+#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=In/1=Out */
+#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=In/1=Out */
+#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=In/1=Out */
+ /* Bit 15..10: reserved */
+#define GP_IO_9 BIT_9 /* IO_9 pin */
+#define GP_IO_8 BIT_8 /* IO_8 pin */
+#define GP_IO_7 BIT_7 /* IO_7 pin */
+#define GP_IO_6 BIT_6 /* IO_6 pin */
+#define GP_IO_5 BIT_5 /* IO_5 pin */
+#define GP_IO_4 BIT_4 /* IO_4 pin */
+#define GP_IO_3 BIT_3 /* IO_3 pin */
+#define GP_IO_2 BIT_2 /* IO_2 pin */
+#define GP_IO_1 BIT_1 /* IO_1 pin */
+#define GP_IO_0 BIT_0 /* IO_0 pin */
+
+/* B2_I2C_CTRL 32 bit I2C HW Control Register */
+#define I2C_FLAG BIT_31 /* Start read/write if WR */
+#define I2C_ADDR (0x7fffL<<16) /* Bit 30..16: Addr to be RD/WR */
+#define I2C_DEV_SEL (0x7fL<<9) /* Bit 15.. 9: I2C Device Select */
+ /* Bit 8.. 5: reserved */
+#define I2C_BURST_LEN BIT_4 /* Burst Len, 1/4 bytes */
+#define I2C_DEV_SIZE (7<<1) /* Bit 3.. 1: I2C Device Size */
+#define I2C_025K_DEV (0<<1) /* 0: 256 Bytes or smal. */
+#define I2C_05K_DEV (1<<1) /* 1: 512 Bytes */
+#define I2C_1K_DEV (2<<1) /* 2: 1024 Bytes */
+#define I2C_2K_DEV (3<<1) /* 3: 2048 Bytes */
+#define I2C_4K_DEV (4<<1) /* 4: 4096 Bytes */
+#define I2C_8K_DEV (5<<1) /* 5: 8192 Bytes */
+#define I2C_16K_DEV (6<<1) /* 6: 16384 Bytes */
+#define I2C_32K_DEV (7<<1) /* 7: 32768 Bytes */
+#define I2C_STOP BIT_0 /* Interrupt I2C transfer */
+
+/* B2_I2C_IRQ 32 bit I2C HW IRQ Register */
+ /* Bit 31.. 1 reserved */
+#define I2C_CLR_IRQ BIT_0 /* Clear I2C IRQ */
+
+/* B2_I2C_SW 32 bit (8 bit access) I2C HW SW Port Register */
+ /* Bit 7.. 3: reserved */
+#define I2C_DATA_DIR BIT_2S /* direction of I2C_DATA */
+#define I2C_DATA BIT_1S /* I2C Data Port */
+#define I2C_CLK BIT_0S /* I2C Clock Port */
+
+/*
+ * I2C Address
+ */
+#define I2C_SENS_ADDR LM80_ADDR /* I2C Sensor Address, (Volt and Temp)*/
+
+
+/* B2_BSC_CTRL 8 bit Blink Source Counter Control */
+ /* Bit 7.. 2: reserved */
+#define BSC_START BIT_1S /* Start Blink Source Counter */
+#define BSC_STOP BIT_0S /* Stop Blink Source Counter */
+
+/* B2_BSC_STAT 8 bit Blink Source Counter Status */
+ /* Bit 7.. 1: reserved */
+#define BSC_SRC BIT_0S /* Blink Source, 0=Off / 1=On */
+
+/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */
+#define BSC_T_ON BIT_2S /* Test mode on */
+#define BSC_T_OFF BIT_1S /* Test mode off */
+#define BSC_T_STEP BIT_0S /* Test step */
+
+
+/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
+ /* Bit 31..19: reserved */
+#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
+
+/* RAM Interface Registers */
+/* B3_RI_CTRL 16 bit RAM Iface Control Register */
+ /* Bit 15..10: reserved */
+#define RI_CLR_RD_PERR BIT_9S /* Clear IRQ RAM Read Parity Err */
+#define RI_CLR_WR_PERR BIT_8S /* Clear IRQ RAM Write Parity Err*/
+ /* Bit 7.. 2: reserved */
+#define RI_RST_CLR BIT_1S /* Clear RAM Interface Reset */
+#define RI_RST_SET BIT_0S /* Set RAM Interface Reset */
+
+/* B3_RI_TEST 8 bit RAM Iface Test Register */
+ /* Bit 15.. 4: reserved */
+#define RI_T_EV BIT_3S /* Timeout Event occured */
+#define RI_T_ON BIT_2S /* Timeout Timer Test On */
+#define RI_T_OFF BIT_1S /* Timeout Timer Test Off */
+#define RI_T_STEP BIT_0S /* Timeout Timer Step */
+
+/* MAC Arbiter Registers */
+/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
+ /* Bit 15.. 4: reserved */
+#define MA_FOE_ON BIT_3S /* XMAC Fast Output Enable ON */
+#define MA_FOE_OFF BIT_2S /* XMAC Fast Output Enable OFF */
+#define MA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */
+#define MA_RST_SET BIT_0S /* Set MAC Arbiter Reset */
+
+/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */
+ /* Bit 15.. 8: reserved */
+#define MA_ENA_REC_TX2 BIT_7S /* Enable Recovery Timer TX2 */
+#define MA_DIS_REC_TX2 BIT_6S /* Disable Recovery Timer TX2 */
+#define MA_ENA_REC_TX1 BIT_5S /* Enable Recovery Timer TX1 */
+#define MA_DIS_REC_TX1 BIT_4S /* Disable Recovery Timer TX1 */
+#define MA_ENA_REC_RX2 BIT_3S /* Enable Recovery Timer RX2 */
+#define MA_DIS_REC_RX2 BIT_2S /* Disable Recovery Timer RX2 */
+#define MA_ENA_REC_RX1 BIT_1S /* Enable Recovery Timer RX1 */
+#define MA_DIS_REC_RX1 BIT_0S /* Disable Recovery Timer RX1 */
+
+/* Packet Arbiter Registers */
+/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
+ /* Bit 15..14: reserved */
+#define PA_CLR_TO_TX2 BIT_13S /* Clear IRQ Packet Timeout TX2 */
+#define PA_CLR_TO_TX1 BIT_12S /* Clear IRQ Packet Timeout TX1 */
+#define PA_CLR_TO_RX2 BIT_11S /* Clear IRQ Packet Timeout RX2 */
+#define PA_CLR_TO_RX1 BIT_10S /* Clear IRQ Packet Timeout RX1 */
+#define PA_ENA_TO_TX2 BIT_9S /* Enable Timeout Timer TX2 */
+#define PA_DIS_TO_TX2 BIT_8S /* Disable Timeout Timer TX2 */
+#define PA_ENA_TO_TX1 BIT_7S /* Enable Timeout Timer TX1 */
+#define PA_DIS_TO_TX1 BIT_6S /* Disable Timeout Timer TX1 */
+#define PA_ENA_TO_RX2 BIT_5S /* Enable Timeout Timer RX2 */
+#define PA_DIS_TO_RX2 BIT_4S /* Disable Timeout Timer RX2 */
+#define PA_ENA_TO_RX1 BIT_3S /* Enable Timeout Timer RX1 */
+#define PA_DIS_TO_RX1 BIT_2S /* Disable Timeout Timer RX1 */
+#define PA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */
+#define PA_RST_SET BIT_0S /* Set MAC Arbiter Reset */
+
+#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
+ PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
+
+/* Rx/Tx Path related Arbiter Test Registers */
+/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */
+/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */
+/* B3_PA_TEST 16 bit Packet Arbiter Test Register */
+/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */
+#define TX2_T_EV BIT_15S /* TX2 Timeout/Recv Event occured */
+#define TX2_T_ON BIT_14S /* TX2 Timeout/Recv Timer Test On */
+#define TX2_T_OFF BIT_13S /* TX2 Timeout/Recv Timer Tst Off */
+#define TX2_T_STEP BIT_12S /* TX2 Timeout/Recv Timer Step */
+#define TX1_T_EV BIT_11S /* TX1 Timeout/Recv Event occured */
+#define TX1_T_ON BIT_10S /* TX1 Timeout/Recv Timer Test On */
+#define TX1_T_OFF BIT_9S /* TX1 Timeout/Recv Timer Tst Off */
+#define TX1_T_STEP BIT_8S /* TX1 Timeout/Recv Timer Step */
+#define RX2_T_EV BIT_7S /* RX2 Timeout/Recv Event occured */
+#define RX2_T_ON BIT_6S /* RX2 Timeout/Recv Timer Test On */
+#define RX2_T_OFF BIT_5S /* RX2 Timeout/Recv Timer Tst Off */
+#define RX2_T_STEP BIT_4S /* RX2 Timeout/Recv Timer Step */
+#define RX1_T_EV BIT_3S /* RX1 Timeout/Recv Event occured */
+#define RX1_T_ON BIT_2S /* RX1 Timeout/Recv Timer Test On */
+#define RX1_T_OFF BIT_1S /* RX1 Timeout/Recv Timer Tst Off */
+#define RX1_T_STEP BIT_0S /* RX1 Timeout/Recv Timer Step */
+
+
+/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */
+/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
+/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
+/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
+/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
+ /* Bit 31..24: reserved */
+#define TXA_MAX_VAL 0x00ffffffUL/* Bit 23.. 0: Max TXA Timer/Cnt Val */
+
+/* TXA_CTRL 8 bit Tx Arbiter Control Register */
+#define TXA_ENA_FSYNC BIT_7S /* Enable force of sync Tx queue */
+#define TXA_DIS_FSYNC BIT_6S /* Disable force of sync Tx queue */
+#define TXA_ENA_ALLOC BIT_5S /* Enable alloc of free bandwidth */
+#define TXA_DIS_ALLOC BIT_4S /* Disable alloc of free bandwidth */
+#define TXA_START_RC BIT_3S /* Start sync Rate Control */
+#define TXA_STOP_RC BIT_2S /* Stop sync Rate Control */
+#define TXA_ENA_ARB BIT_1S /* Enable Tx Arbiter */
+#define TXA_DIS_ARB BIT_0S /* Disable Tx Arbiter */
+
+/* TXA_TEST 8 bit Tx Arbiter Test Register */
+ /* Bit 7.. 6: reserved */
+#define TXA_INT_T_ON BIT_5S /* Tx Arb Interval Timer Test On */
+#define TXA_INT_T_OFF BIT_4S /* Tx Arb Interval Timer Test Off */
+#define TXA_INT_T_STEP BIT_3S /* Tx Arb Interval Timer Step */
+#define TXA_LIM_T_ON BIT_2S /* Tx Arb Limit Timer Test On */
+#define TXA_LIM_T_OFF BIT_1S /* Tx Arb Limit Timer Test Off */
+#define TXA_LIM_T_STEP BIT_0S /* Tx Arb Limit Timer Step */
+
+/* TXA_STAT 8 bit Tx Arbiter Status Register */
+ /* Bit 7.. 1: reserved */
+#define TXA_PRIO_XS BIT_0S /* sync queue has prio to send */
+
+/* Q_BC 32 bit Current Byte Counter */
+ /* Bit 31..16: reserved */
+#define BC_MAX 0xffff /* Bit 15.. 0: Byte counter */
+
+/* BMU Control Status Registers */
+/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
+/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
+/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
+/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
+/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
+/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
+/* Q_CSR 32 bit BMU Control/Status Register */
+ /* Bit 31..25: reserved */
+#define CSR_SV_IDLE BIT_24 /* BMU SM Idle */
+ /* Bit 23..22: reserved */
+#define CSR_DESC_CLR BIT_21 /* Clear Reset for Descr */
+#define CSR_DESC_SET BIT_20 /* Set Reset for Descr */
+#define CSR_FIFO_CLR BIT_19 /* Clear Reset for FIFO */
+#define CSR_FIFO_SET BIT_18 /* Set Reset for FIFO */
+#define CSR_HPI_RUN BIT_17 /* Release HPI SM */
+#define CSR_HPI_RST BIT_16 /* Reset HPI SM to Idle */
+#define CSR_SV_RUN BIT_15 /* Release Supervisor SM */
+#define CSR_SV_RST BIT_14 /* Reset Supervisor SM */
+#define CSR_DREAD_RUN BIT_13 /* Release Descr Read SM */
+#define CSR_DREAD_RST BIT_12 /* Reset Descr Read SM */
+#define CSR_DWRITE_RUN BIT_11 /* Release Descr Write SM */
+#define CSR_DWRITE_RST BIT_10 /* Reset Descr Write SM */
+#define CSR_TRANS_RUN BIT_9 /* Release Transfer SM */
+#define CSR_TRANS_RST BIT_8 /* Reset Transfer SM */
+#define CSR_ENA_POL BIT_7 /* Enable Descr Polling */
+#define CSR_DIS_POL BIT_6 /* Disable Descr Polling */
+#define CSR_STOP BIT_5 /* Stop Rx/Tx Queue */
+#define CSR_START BIT_4 /* Start Rx/Tx Queue */
+#define CSR_IRQ_CL_P BIT_3 /* (Rx) Clear Parity IRQ */
+#define CSR_IRQ_CL_B BIT_2 /* Clear EOB IRQ */
+#define CSR_IRQ_CL_F BIT_1 /* Clear EOF IRQ */
+#define CSR_IRQ_CL_C BIT_0 /* Clear ERR IRQ */
+
+#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
+ CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
+ CSR_TRANS_RST)
+#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
+ CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
+ CSR_TRANS_RUN)
+
+/* Q_F 32 bit Flag Register */
+ /* Bit 31..28: reserved */
+#define F_ALM_FULL BIT_27 /* Rx FIFO: almost full */
+#define F_EMPTY BIT_27 /* Tx FIFO: empty flag */
+#define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */
+#define F_WM_REACHED BIT_25 /* Watermark reached */
+ /* reserved */
+#define F_FIFO_LEVEL (0x1fL<<16) /* Bit 23..16: # of Qwords in FIFO */
+ /* Bit 15..11: reserved */
+#define F_WATER_MARK 0x0007ffL /* Bit 10.. 0: Watermark */
+
+/* Q_T1 32 bit Test Register 1 */
+/* Holds four State Machine control Bytes */
+#define SM_CTRL_SV_MSK (0xffL<<24) /* Bit 31..24: Control Supervisor SM */
+#define SM_CTRL_RD_MSK (0xffL<<16) /* Bit 23..16: Control Read Desc SM */
+#define SM_CTRL_WR_MSK (0xffL<<8) /* Bit 15.. 8: Control Write Desc SM */
+#define SM_CTRL_TR_MSK 0xffL /* Bit 7.. 0: Control Transfer SM */
+
+/* Q_T1_TR 8 bit Test Register 1 Transfer SM */
+/* Q_T1_WR 8 bit Test Register 1 Write Descriptor SM */
+/* Q_T1_RD 8 bit Test Register 1 Read Descriptor SM */
+/* Q_T1_SV 8 bit Test Register 1 Supervisor SM */
+
+/* The control status byte of each machine looks like ... */
+#define SM_STATE 0xf0 /* Bit 7.. 4: State which shall be loaded */
+#define SM_LOAD BIT_3S /* Load the SM with SM_STATE */
+#define SM_TEST_ON BIT_2S /* Switch on SM Test Mode */
+#define SM_TEST_OFF BIT_1S /* Go off the Test Mode */
+#define SM_STEP BIT_0S /* Step the State Machine */
+/* The encoding of the states is not supported by the Diagnostics Tool */
+
+/* Q_T2 32 bit Test Register 2 */
+ /* Bit 31.. 8: reserved */
+#define T2_AC_T_ON BIT_7 /* Address Counter Test Mode on */
+#define T2_AC_T_OFF BIT_6 /* Address Counter Test Mode off */
+#define T2_BC_T_ON BIT_5 /* Byte Counter Test Mode on */
+#define T2_BC_T_OFF BIT_4 /* Byte Counter Test Mode off */
+#define T2_STEP04 BIT_3 /* Inc AC/Dec BC by 4 */
+#define T2_STEP03 BIT_2 /* Inc AC/Dec BC by 3 */
+#define T2_STEP02 BIT_1 /* Inc AC/Dec BC by 2 */
+#define T2_STEP01 BIT_0 /* Inc AC/Dec BC by 1 */
+
+/* Q_T3 32 bit Test Register 3 */
+ /* Bit 31.. 7: reserved */
+#define T3_MUX_MSK (7<<4) /* Bit 6.. 4: Mux Position */
+ /* Bit 3: reserved */
+#define T3_VRAM_MSK 7 /* Bit 2.. 0: Virtual RAM Buffer Address */
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/* RB_START 32 bit RAM Buffer Start Address */
+/* RB_END 32 bit RAM Buffer End Address */
+/* RB_WP 32 bit RAM Buffer Write Pointer */
+/* RB_RP 32 bit RAM Buffer Read Pointer */
+/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
+/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
+/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
+/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
+/* RB_PC 32 bit RAM Buffer Packet Counter */
+/* RB_LEV 32 bit RAM Buffer Level Register */
+ /* Bit 31..19: reserved */
+#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
+
+/* RB_TST2 8 bit RAM Buffer Test Register 2 */
+ /* Bit 7.. 4: reserved */
+#define RB_PC_DEC BIT_3S /* Packet Counter Decrem */
+#define RB_PC_T_ON BIT_2S /* Packet Counter Test On */
+#define RB_PC_T_OFF BIT_1S /* Packet Counter Tst Off */
+#define RB_PC_INC BIT_0S /* Packet Counter Increm */
+
+/* RB_TST1 8 bit RAM Buffer Test Register 1 */
+ /* Bit 7: reserved */
+#define RB_WP_T_ON BIT_6S /* Write Pointer Test On */
+#define RB_WP_T_OFF BIT_5S /* Write Pointer Test Off */
+#define RB_WP_INC BIT_4S /* Write Pointer Increm */
+ /* Bit 3: reserved */
+#define RB_RP_T_ON BIT_2S /* Read Pointer Test On */
+#define RB_RP_T_OFF BIT_1S /* Read Pointer Test Off */
+#define RB_RP_DEC BIT_0S /* Read Pointer Decrement */
+
+/* RB_CTRL 8 bit RAM Buffer Control Register */
+ /* Bit 7.. 6: reserved */
+#define RB_ENA_STFWD BIT_5S /* Enable Store & Forward */
+#define RB_DIS_STFWD BIT_4S /* Disable Store & Forward */
+#define RB_ENA_OP_MD BIT_3S /* Enable Operation Mode */
+#define RB_DIS_OP_MD BIT_2S /* Disable Operation Mode */
+#define RB_RST_CLR BIT_1S /* Clear RAM Buf STM Reset */
+#define RB_RST_SET BIT_0S /* Set RAM Buf STM Reset */
+
+
+/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
+
+/* RX_MFF_EA 32 bit Receive MAC FIFO End Address */
+/* RX_MFF_WP 32 bit Receive MAC FIFO Write Pointer */
+/* RX_MFF_RP 32 bit Receive MAC FIFO Read Pointer */
+/* RX_MFF_PC 32 bit Receive MAC FIFO Packet Counter */
+/* RX_MFF_LEV 32 bit Receive MAC FIFO Level */
+/* TX_MFF_EA 32 bit Transmit MAC FIFO End Address */
+/* TX_MFF_WP 32 bit Transmit MAC FIFO Write Pointer */
+/* TX_MFF_WSP 32 bit Transmit MAC FIFO WR Shadow Pointer */
+/* TX_MFF_RP 32 bit Transmit MAC FIFO Read Pointer */
+/* TX_MFF_PC 32 bit Transmit MAC FIFO Packet Cnt */
+/* TX_MFF_LEV 32 bit Transmit MAC FIFO Level */
+ /* Bit 31.. 6: reserved */
+#define MFF_MSK 0x007fL /* Bit 5.. 0: MAC FIFO Address/Ptr Bits */
+
+/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */
+ /* Bit 15..14: reserved */
+#define MFF_ENA_RDY_PAT BIT_13S /* Enable Ready Patch */
+#define MFF_DIS_RDY_PAT BIT_12S /* Disable Ready Patch */
+#define MFF_ENA_TIM_PAT BIT_11S /* Enable Timing Patch */
+#define MFF_DIS_TIM_PAT BIT_10S /* Disable Timing Patch */
+#define MFF_ENA_ALM_FUL BIT_9S /* Enable AlmostFull Sign */
+#define MFF_DIS_ALM_FUL BIT_8S /* Disable AlmostFull Sign */
+#define MFF_ENA_PAUSE BIT_7S /* Enable Pause Signaling */
+#define MFF_DIS_PAUSE BIT_6S /* Disable Pause Signaling */
+#define MFF_ENA_FLUSH BIT_5S /* Enable Frame Flushing */
+#define MFF_DIS_FLUSH BIT_4S /* Disable Frame Flushing */
+#define MFF_ENA_TIST BIT_3S /* Enable Time Stamp Gener */
+#define MFF_DIS_TIST BIT_2S /* Disable Time Stamp Gener */
+#define MFF_CLR_INTIST BIT_1S /* Clear IRQ No Time Stamp */
+#define MFF_CLR_INSTAT BIT_0S /* Clear IRQ No Status */
+
+#define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT
+
+/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */
+#define MFF_CLR_PERR BIT_15S /* Clear Parity Error IRQ */
+ /* Bit 14: reserved */
+#define MFF_ENA_PKT_REC BIT_13S /* Enable Packet Recovery */
+#define MFF_DIS_PKT_REC BIT_12S /* Disable Packet Recovery */
+/* MFF_ENA_TIM_PAT (see RX_MFF_CTRL1) Bit 11: Enable Timing Patch */
+/* MFF_DIS_TIM_PAT (see RX_MFF_CTRL1) Bit 10: Disable Timing Patch */
+/* MFF_ENA_ALM_FUL (see RX_MFF_CTRL1) Bit 9: Enable Almost Full Sign */
+/* MFF_DIS_ALM_FUL (see RX_MFF_CTRL1) Bit 8: Disable Almost Full Sign */
+#define MFF_ENA_W4E BIT_7S /* Enable Wait for Empty */
+#define MFF_DIS_W4E BIT_6S /* Disable Wait for Empty */
+/* MFF_ENA_FLUSH (see RX_MFF_CTRL1) Bit 5: Enable Frame Flushing */
+/* MFF_DIS_FLUSH (see RX_MFF_CTRL1) Bit 4: Disable Frame Flushing */
+#define MFF_ENA_LOOPB BIT_3S /* Enable Loopback */
+#define MFF_DIS_LOOPB BIT_2S /* Disable Loopback */
+#define MFF_CLR_MAC_RST BIT_1S /* Clear XMAC Reset */
+#define MFF_SET_MAC_RST BIT_0S /* Set XMAC Reset */
+
+#define MFF_TX_CTRL_DEF (MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH)
+
+/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */
+/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */
+ /* Bit 7: reserved */
+#define MFF_WSP_T_ON BIT_6S /* Tx: Write Shadow Ptr TestOn */
+#define MFF_WSP_T_OFF BIT_5S /* Tx: Write Shadow Ptr TstOff */
+#define MFF_WSP_INC BIT_4S /* Tx: Write Shadow Ptr Increment */
+#define MFF_PC_DEC BIT_3S /* Packet Counter Decrement */
+#define MFF_PC_T_ON BIT_2S /* Packet Counter Test On */
+#define MFF_PC_T_OFF BIT_1S /* Packet Counter Test Off */
+#define MFF_PC_INC BIT_0S /* Packet Counter Increment */
+
+/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */
+/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */
+ /* Bit 7: reserved */
+#define MFF_WP_T_ON BIT_6S /* Write Pointer Test On */
+#define MFF_WP_T_OFF BIT_5S /* Write Pointer Test Off */
+#define MFF_WP_INC BIT_4S /* Write Pointer Increm */
+ /* Bit 3: reserved */
+#define MFF_RP_T_ON BIT_2S /* Read Pointer Test On */
+#define MFF_RP_T_OFF BIT_1S /* Read Pointer Test Off */
+#define MFF_RP_DEC BIT_0S /* Read Pointer Decrement */
+
+/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */
+/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */
+ /* Bit 7..4: reserved */
+#define MFF_ENA_OP_MD BIT_3S /* Enable Operation Mode */
+#define MFF_DIS_OP_MD BIT_2S /* Disable Operation Mode */
+#define MFF_RST_CLR BIT_1S /* Clear MAC FIFO Reset */
+#define MFF_RST_SET BIT_0S /* Set MAC FIFO Reset */
+
+
+/* Link LED Counter Registers (GENESIS only) */
+
+/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */
+/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */
+/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */
+ /* Bit 7.. 3: reserved */
+#define LED_START BIT_2S /* Start Timer */
+#define LED_STOP BIT_1S /* Stop Timer */
+#define LED_STATE BIT_0S /* Rx/Tx: LED State, 1=LED on */
+#define LED_CLR_IRQ BIT_0S /* Lnk: Clear Link IRQ */
+
+/* RX_LED_TST 8 bit Receive LED Cnt Test Register */
+/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */
+/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */
+ /* Bit 7.. 3: reserved */
+#define LED_T_ON BIT_2S /* LED Counter Test mode On */
+#define LED_T_OFF BIT_1S /* LED Counter Test mode Off */
+#define LED_T_STEP BIT_0S /* LED Counter Step */
+
+/* LNK_LED_REG 8 bit Link LED Register */
+ /* Bit 7.. 6: reserved */
+#define LED_BLK_ON BIT_5S /* Link LED Blinking On */
+#define LED_BLK_OFF BIT_4S /* Link LED Blinking Off */
+#define LED_SYNC_ON BIT_3S /* Use Sync Wire to switch LED */
+#define LED_SYNC_OFF BIT_2S /* Disable Sync Wire Input */
+#define LED_ON BIT_1S /* switch LED on */
+#define LED_OFF BIT_0S /* switch LED off */
+
+/* Receive and Transmit GMAC FIFO Registers (YUKON only) */
+
+/* RX_GMF_EA 32 bit Rx GMAC FIFO End Address */
+/* RX_GMF_AF_THR 32 bit Rx GMAC FIFO Almost Full Thresh. */
+/* RX_GMF_WP 32 bit Rx GMAC FIFO Write Pointer */
+/* RX_GMF_WLEV 32 bit Rx GMAC FIFO Write Level */
+/* RX_GMF_RP 32 bit Rx GMAC FIFO Read Pointer */
+/* RX_GMF_RLEV 32 bit Rx GMAC FIFO Read Level */
+/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
+/* TX_GMF_AE_THR 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
+/* TX_GMF_WP 32 bit Tx GMAC FIFO Write Pointer */
+/* TX_GMF_WSP 32 bit Tx GMAC FIFO Write Shadow Ptr. */
+/* TX_GMF_WLEV 32 bit Tx GMAC FIFO Write Level */
+/* TX_GMF_RP 32 bit Tx GMAC FIFO Read Pointer */
+/* TX_GMF_RSTP 32 bit Tx GMAC FIFO Restart Pointer */
+/* TX_GMF_RLEV 32 bit Tx GMAC FIFO Read Level */
+
+/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
+ /* Bits 31..15: reserved */
+#define GMF_WP_TST_ON BIT_14 /* Write Pointer Test On */
+#define GMF_WP_TST_OFF BIT_13 /* Write Pointer Test Off */
+#define GMF_WP_STEP BIT_12 /* Write Pointer Step/Increment */
+ /* Bit 11: reserved */
+#define GMF_RP_TST_ON BIT_10 /* Read Pointer Test On */
+#define GMF_RP_TST_OFF BIT_9 /* Read Pointer Test Off */
+#define GMF_RP_STEP BIT_8 /* Read Pointer Step/Increment */
+#define GMF_RX_F_FL_ON BIT_7 /* Rx FIFO Flush Mode On */
+#define GMF_RX_F_FL_OFF BIT_6 /* Rx FIFO Flush Mode Off */
+#define GMF_CLI_RX_FO BIT_5 /* Clear IRQ Rx FIFO Overrun */
+#define GMF_CLI_RX_FC BIT_4 /* Clear IRQ Rx Frame Complete */
+#define GMF_OPER_ON BIT_3 /* Operational Mode On */
+#define GMF_OPER_OFF BIT_2 /* Operational Mode Off */
+#define GMF_RST_CLR BIT_1 /* Clear GMAC FIFO Reset */
+#define GMF_RST_SET BIT_0 /* Set GMAC FIFO Reset */
+
+/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
+ /* Bits 31..19: reserved */
+#define GMF_WSP_TST_ON BIT_18 /* Write Shadow Pointer Test On */
+#define GMF_WSP_TST_OFF BIT_17 /* Write Shadow Pointer Test Off */
+#define GMF_WSP_STEP BIT_16 /* Write Shadow Pointer Step/Increment */
+ /* Bits 15..7: same as for RX_GMF_CTRL_T */
+#define GMF_CLI_TX_FU BIT_6 /* Clear IRQ Tx FIFO Underrun */
+#define GMF_CLI_TX_FC BIT_5 /* Clear IRQ Tx Frame Complete */
+#define GMF_CLI_TX_PE BIT_4 /* Clear IRQ Tx Parity Error */
+ /* Bits 3..0: same as for RX_GMF_CTRL_T */
+
+#define GMF_RX_CTRL_DEF (GMF_OPER_ON | GMF_RX_F_FL_ON)
+#define GMF_TX_CTRL_DEF GMF_OPER_ON
+
+#define RX_GMF_FL_THR_DEF 0x0a /* Rx GMAC FIFO Flush Threshold default */
+
+/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
+ /* Bit 7.. 3: reserved */
+#define GMT_ST_START BIT_2S /* Start Time Stamp Timer */
+#define GMT_ST_STOP BIT_1S /* Stop Time Stamp Timer */
+#define GMT_ST_CLR_IRQ BIT_0S /* Clear Time Stamp Timer IRQ */
+
+/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
+ /* Bits 31.. 8: reserved */
+#define GMC_H_BURST_ON BIT_7 /* Half Duplex Burst Mode On */
+#define GMC_H_BURST_OFF BIT_6 /* Half Duplex Burst Mode Off */
+#define GMC_F_LOOPB_ON BIT_5 /* FIFO Loopback On */
+#define GMC_F_LOOPB_OFF BIT_4 /* FIFO Loopback Off */
+#define GMC_PAUSE_ON BIT_3 /* Pause On */
+#define GMC_PAUSE_OFF BIT_2 /* Pause Off */
+#define GMC_RST_CLR BIT_1 /* Clear GMAC Reset */
+#define GMC_RST_SET BIT_0 /* Set GMAC Reset */
+
+/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
+ /* Bits 31..29: reserved */
+#define GPC_SEL_BDT BIT_28 /* Select Bi-Dir. Transfer for MDC/MDIO */
+#define GPC_INT_POL_HI BIT_27 /* IRQ Polarity is Active HIGH */
+#define GPC_75_OHM BIT_26 /* Use 75 Ohm Termination instead of 50 */
+#define GPC_DIS_FC BIT_25 /* Disable Automatic Fiber/Copper Detection */
+#define GPC_DIS_SLEEP BIT_24 /* Disable Energy Detect */
+#define GPC_HWCFG_M_3 BIT_23 /* HWCFG_MODE[3] */
+#define GPC_HWCFG_M_2 BIT_22 /* HWCFG_MODE[2] */
+#define GPC_HWCFG_M_1 BIT_21 /* HWCFG_MODE[1] */
+#define GPC_HWCFG_M_0 BIT_20 /* HWCFG_MODE[0] */
+#define GPC_ANEG_0 BIT_19 /* ANEG[0] */
+#define GPC_ENA_XC BIT_18 /* Enable MDI crossover */
+#define GPC_DIS_125 BIT_17 /* Disable 125 MHz clock */
+#define GPC_ANEG_3 BIT_16 /* ANEG[3] */
+#define GPC_ANEG_2 BIT_15 /* ANEG[2] */
+#define GPC_ANEG_1 BIT_14 /* ANEG[1] */
+#define GPC_ENA_PAUSE BIT_13 /* Enable Pause (SYM_OR_REM) */
+#define GPC_PHYADDR_4 BIT_12 /* Bit 4 of Phy Addr */
+#define GPC_PHYADDR_3 BIT_11 /* Bit 3 of Phy Addr */
+#define GPC_PHYADDR_2 BIT_10 /* Bit 2 of Phy Addr */
+#define GPC_PHYADDR_1 BIT_9 /* Bit 1 of Phy Addr */
+#define GPC_PHYADDR_0 BIT_8 /* Bit 0 of Phy Addr */
+ /* Bits 7..2: reserved */
+#define GPC_RST_CLR BIT_1 /* Clear GPHY Reset */
+#define GPC_RST_SET BIT_0 /* Set GPHY Reset */
+
+#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | \
+ GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+
+#define GPC_HWCFG_GMII_FIB ( GPC_HWCFG_M_2 | \
+ GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+
+#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | \
+ GPC_ANEG_1 | GPC_ANEG_0)
+
+/* forced speed and duplex mode (don't mix with other ANEG bits) */
+#define GPC_FRC10MBIT_HALF 0
+#define GPC_FRC10MBIT_FULL GPC_ANEG_0
+#define GPC_FRC100MBIT_HALF GPC_ANEG_1
+#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1)
+
+/* auto-negotiation with limited advertised speeds */
+/* mix only with master/slave settings (for copper) */
+#define GPC_ADV_1000_HALF GPC_ANEG_2
+#define GPC_ADV_1000_FULL GPC_ANEG_3
+#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3)
+
+/* master/slave settings */
+/* only for copper with 1000 Mbps */
+#define GPC_FORCE_MASTER 0
+#define GPC_FORCE_SLAVE GPC_ANEG_0
+#define GPC_PREF_MASTER GPC_ANEG_1
+#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0)
+
+/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
+/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
+#define GM_IS_TX_CO_OV BIT_5 /* Transmit Counter Overflow IRQ */
+#define GM_IS_RX_CO_OV BIT_4 /* Receive Counter Overflow IRQ */
+#define GM_IS_TX_FF_UR BIT_3 /* Transmit FIFO Underrun */
+#define GM_IS_TX_COMPL BIT_2 /* Frame Transmission Complete */
+#define GM_IS_RX_FF_OR BIT_1 /* Receive FIFO Overrun */
+#define GM_IS_RX_COMPL BIT_0 /* Frame Reception Complete */
+
+#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | \
+ GM_IS_TX_FF_UR)
+
+/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+ /* Bits 15.. 2: reserved */
+#define GMLC_RST_CLR BIT_1S /* Clear GMAC Link Reset */
+#define GMLC_RST_SET BIT_0S /* Set GMAC Link Reset */
+
+
+/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
+#define WOL_CTL_LINK_CHG_OCC BIT_15S
+#define WOL_CTL_MAGIC_PKT_OCC BIT_14S
+#define WOL_CTL_PATTERN_OCC BIT_13S
+
+#define WOL_CTL_CLEAR_RESULT BIT_12S
+
+#define WOL_CTL_ENA_PME_ON_LINK_CHG BIT_11S
+#define WOL_CTL_DIS_PME_ON_LINK_CHG BIT_10S
+#define WOL_CTL_ENA_PME_ON_MAGIC_PKT BIT_9S
+#define WOL_CTL_DIS_PME_ON_MAGIC_PKT BIT_8S
+#define WOL_CTL_ENA_PME_ON_PATTERN BIT_7S
+#define WOL_CTL_DIS_PME_ON_PATTERN BIT_6S
+
+#define WOL_CTL_ENA_LINK_CHG_UNIT BIT_5S
+#define WOL_CTL_DIS_LINK_CHG_UNIT BIT_4S
+#define WOL_CTL_ENA_MAGIC_PKT_UNIT BIT_3S
+#define WOL_CTL_DIS_MAGIC_PKT_UNIT BIT_2S
+#define WOL_CTL_ENA_PATTERN_UNIT BIT_1S
+#define WOL_CTL_DIS_PATTERN_UNIT BIT_0S
+
+#define WOL_CTL_DEFAULT \
+ (WOL_CTL_DIS_PME_ON_LINK_CHG | \
+ WOL_CTL_DIS_PME_ON_PATTERN | \
+ WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
+ WOL_CTL_DIS_LINK_CHG_UNIT | \
+ WOL_CTL_DIS_PATTERN_UNIT | \
+ WOL_CTL_DIS_MAGIC_PKT_UNIT)
+
+/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
+#define WOL_CTL_PATT_ENA(x) (BIT_0 << (x))
+
+#define SK_NUM_WOL_PATTERN 7
+#define SK_PATTERN_PER_WORD 4
+#define SK_BITMASK_PATTERN 7
+#define SK_POW_PATTERN_LENGTH 128
+
+#define WOL_LENGTH_MSK 0x7f
+#define WOL_LENGTH_SHIFT 8
+
+
+/* Receive and Transmit Descriptors ******************************************/
+
+/* Transmit Descriptor struct */
+typedef struct s_HwTxd {
+ SK_U32 volatile TxCtrl; /* Transmit Buffer Control Field */
+ SK_U32 TxNext; /* Physical Address Pointer to the next TxD */
+ SK_U32 TxAdrLo; /* Physical Tx Buffer Address lower dword */
+ SK_U32 TxAdrHi; /* Physical Tx Buffer Address upper dword */
+ SK_U32 TxStat; /* Transmit Frame Status Word */
+#ifndef SK_USE_REV_DESC
+ SK_U16 TxTcpOffs; /* TCP Checksum Calculation Start Value */
+ SK_U16 TxRes1; /* 16 bit reserved field */
+ SK_U16 TxTcpWp; /* TCP Checksum Write Position */
+ SK_U16 TxTcpSp; /* TCP Checksum Calculation Start Position */
+#else /* SK_USE_REV_DESC */
+ SK_U16 TxRes1; /* 16 bit reserved field */
+ SK_U16 TxTcpOffs; /* TCP Checksum Calculation Start Value */
+ SK_U16 TxTcpSp; /* TCP Checksum Calculation Start Position */
+ SK_U16 TxTcpWp; /* TCP Checksum Write Position */
+#endif /* SK_USE_REV_DESC */
+ SK_U32 TxRes2; /* 32 bit reserved field */
+} SK_HWTXD;
+
+/* Receive Descriptor struct */
+typedef struct s_HwRxd {
+ SK_U32 volatile RxCtrl; /* Receive Buffer Control Field */
+ SK_U32 RxNext; /* Physical Address Pointer to the next RxD */
+ SK_U32 RxAdrLo; /* Physical Rx Buffer Address lower dword */
+ SK_U32 RxAdrHi; /* Physical Rx Buffer Address upper dword */
+ SK_U32 RxStat; /* Receive Frame Status Word */
+ SK_U32 RxTiSt; /* Receive Time Stamp (from XMAC on GENESIS) */
+#ifndef SK_USE_REV_DESC
+ SK_U16 RxTcpSum1; /* TCP Checksum 1 */
+ SK_U16 RxTcpSum2; /* TCP Checksum 2 */
+ SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */
+ SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */
+#else /* SK_USE_REV_DESC */
+ SK_U16 RxTcpSum2; /* TCP Checksum 2 */
+ SK_U16 RxTcpSum1; /* TCP Checksum 1 */
+ SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */
+ SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */
+#endif /* SK_USE_REV_DESC */
+} SK_HWRXD;
+
+/*
+ * Drivers which use the reverse descriptor feature (PCI_OUR_REG_2)
+ * should set the define SK_USE_REV_DESC.
+ * Structures are 'normaly' not endianess dependent. But in
+ * this case the SK_U16 fields are bound to bit positions inside the
+ * descriptor. RxTcpSum1 e.g. must start at bit 0 within the 6.th DWord.
+ * The bit positions inside a DWord are of course endianess dependent and
+ * swaps if the DWord is swapped by the hardware.
+ */
+
+
+/* Descriptor Bit Definition */
+/* TxCtrl Transmit Buffer Control Field */
+/* RxCtrl Receive Buffer Control Field */
+#define BMU_OWN BIT_31 /* OWN bit: 0=host/1=BMU */
+#define BMU_STF BIT_30 /* Start of Frame */
+#define BMU_EOF BIT_29 /* End of Frame */
+#define BMU_IRQ_EOB BIT_28 /* Req "End of Buffer" IRQ */
+#define BMU_IRQ_EOF BIT_27 /* Req "End of Frame" IRQ */
+/* TxCtrl specific bits */
+#define BMU_STFWD BIT_26 /* (Tx) Store & Forward Frame */
+#define BMU_NO_FCS BIT_25 /* (Tx) Disable MAC FCS (CRC) generation */
+#define BMU_SW BIT_24 /* (Tx) 1 bit res. for SW use */
+/* RxCtrl specific bits */
+#define BMU_DEV_0 BIT_26 /* (Rx) Transfer data to Dev0 */
+#define BMU_STAT_VAL BIT_25 /* (Rx) Rx Status Valid */
+#define BMU_TIST_VAL BIT_24 /* (Rx) Rx TimeStamp Valid */
+ /* Bit 23..16: BMU Check Opcodes */
+#define BMU_CHECK (0x55L<<16) /* Default BMU check */
+#define BMU_TCP_CHECK (0x56L<<16) /* Descr with TCP ext */
+#define BMU_UDP_CHECK (0x57L<<16) /* Descr with UDP ext (YUKON only) */
+#define BMU_BBC 0xffffL /* Bit 15.. 0: Buffer Byte Counter */
+
+/* TxStat Transmit Frame Status Word */
+/* RxStat Receive Frame Status Word */
+/*
+ *Note: TxStat is reserved for ASIC loopback mode only
+ *
+ * The Bits of the Status words are defined in xmac_ii.h
+ * (see XMR_FS bits)
+ */
+
+/* macros ********************************************************************/
+
+/* Receive and Transmit Queues */
+#define Q_R1 0x0000 /* Receive Queue 1 */
+#define Q_R2 0x0080 /* Receive Queue 2 */
+#define Q_XS1 0x0200 /* Synchronous Transmit Queue 1 */
+#define Q_XA1 0x0280 /* Asynchronous Transmit Queue 1 */
+#define Q_XS2 0x0300 /* Synchronous Transmit Queue 2 */
+#define Q_XA2 0x0380 /* Asynchronous Transmit Queue 2 */
+
+/*
+ * Macro Q_ADDR()
+ *
+ * Use this macro to access the Receive and Transmit Queue Registers.
+ *
+ * para:
+ * Queue Queue to access.
+ * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2
+ * Offs Queue register offset.
+ * Values: Q_D, Q_DA_L ... Q_T2, Q_T3
+ *
+ * usage SK_IN32(pAC, Q_ADDR(Q_R2, Q_BC), pVal)
+ */
+#define Q_ADDR(Queue, Offs) (B8_Q_REGS + (Queue) + (Offs))
+
+/*
+ * Macro RB_ADDR()
+ *
+ * Use this macro to access the RAM Buffer Registers.
+ *
+ * para:
+ * Queue Queue to access.
+ * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2
+ * Offs Queue register offset.
+ * Values: RB_START, RB_END ... RB_LEV, RB_CTRL
+ *
+ * usage SK_IN32(pAC, RB_ADDR(Q_R2, RB_RP), pVal)
+ */
+#define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs))
+
+
+/* MAC Related Registers */
+#define MAC_1 0 /* belongs to the port near the slot */
+#define MAC_2 1 /* belongs to the port far away from the slot */
+
+/*
+ * Macro MR_ADDR()
+ *
+ * Use this macro to access a MAC Related Registers inside the ASIC.
+ *
+ * para:
+ * Mac MAC to access.
+ * Values: MAC_1, MAC_2
+ * Offs MAC register offset.
+ * Values: RX_MFF_EA, RX_MFF_WP ... LNK_LED_REG,
+ * TX_MFF_EA, TX_MFF_WP ... TX_LED_TST
+ *
+ * usage SK_IN32(pAC, MR_ADDR(MAC_1, TX_MFF_EA), pVal)
+ */
+#define MR_ADDR(Mac, Offs) (((Mac) << 7) + (Offs))
+
+#ifdef SK_LITTLE_ENDIAN
+#define XM_WORD_LO 0
+#define XM_WORD_HI 1
+#else /* !SK_LITTLE_ENDIAN */
+#define XM_WORD_LO 1
+#define XM_WORD_HI 0
+#endif /* !SK_LITTLE_ENDIAN */
+
+
+/*
+ * macros to access the XMAC (GENESIS only)
+ *
+ * XM_IN16(), to read a 16 bit register (e.g. XM_MMU_CMD)
+ * XM_OUT16(), to write a 16 bit register (e.g. XM_MMU_CMD)
+ * XM_IN32(), to read a 32 bit register (e.g. XM_TX_EV_CNT)
+ * XM_OUT32(), to write a 32 bit register (e.g. XM_TX_EV_CNT)
+ * XM_INADDR(), to read a network address register (e.g. XM_SRC_CHK)
+ * XM_OUTADDR(), to write a network address register (e.g. XM_SRC_CHK)
+ * XM_INHASH(), to read the XM_HSM_CHK register
+ * XM_OUTHASH() to write the XM_HSM_CHK register
+ *
+ * para:
+ * Mac XMAC to access values: MAC_1 or MAC_2
+ * IoC I/O context needed for SK I/O macros
+ * Reg XMAC Register to read or write
+ * (p)Val Value or pointer to the value which should be read or written
+ *
+ * usage: XM_OUT16(IoC, MAC_1, XM_MMU_CMD, Value);
+ */
+
+#define XMA(Mac, Reg) \
+ ((BASE_XMAC_1 + (Mac) * (BASE_XMAC_2 - BASE_XMAC_1)) | ((Reg) << 1))
+
+#define XM_IN16(IoC, Mac, Reg, pVal) \
+ SK_IN16((IoC), XMA((Mac), (Reg)), (pVal))
+
+#define XM_OUT16(IoC, Mac, Reg, Val) \
+ SK_OUT16((IoC), XMA((Mac), (Reg)), (Val))
+
+#define XM_IN32(IoC, Mac, Reg, pVal) { \
+ SK_IN16((IoC), XMA((Mac), (Reg)), \
+ (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \
+ SK_IN16((IoC), XMA((Mac), (Reg+2)), \
+ (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \
+}
+
+#define XM_OUT32(IoC, Mac, Reg, Val) { \
+ SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \
+ SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16)(((Val) >> 16) & 0xffffL));\
+}
+
+/* Remember: we are always writing to / reading from LITTLE ENDIAN memory */
+
+#define XM_INADDR(IoC, Mac, Reg, pVal) { \
+ SK_U16 Word; \
+ SK_U8 *pByte; \
+ pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
+ SK_IN16((IoC), XMA((Mac), (Reg)), &Word); \
+ pByte[0] = (SK_U8)(Word & 0x00ff); \
+ pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), XMA((Mac), (Reg+2)), &Word); \
+ pByte[2] = (SK_U8)(Word & 0x00ff); \
+ pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), XMA((Mac), (Reg+4)), &Word); \
+ pByte[4] = (SK_U8)(Word & 0x00ff); \
+ pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
+}
+
+#define XM_OUTADDR(IoC, Mac, Reg, pVal) { \
+ SK_U8 SK_FAR *pByte; \
+ pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
+ SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \
+ (((SK_U16)(pByte[0]) & 0x00ff) | \
+ (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16) \
+ (((SK_U16)(pByte[2]) & 0x00ff) | \
+ (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), XMA((Mac), (Reg+4)), (SK_U16) \
+ (((SK_U16)(pByte[4]) & 0x00ff) | \
+ (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
+}
+
+#define XM_INHASH(IoC, Mac, Reg, pVal) { \
+ SK_U16 Word; \
+ SK_U8 SK_FAR *pByte; \
+ pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
+ SK_IN16((IoC), XMA((Mac), (Reg)), &Word); \
+ pByte[0] = (SK_U8)(Word & 0x00ff); \
+ pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), XMA((Mac), (Reg+2)), &Word); \
+ pByte[2] = (SK_U8)(Word & 0x00ff); \
+ pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), XMA((Mac), (Reg+4)), &Word); \
+ pByte[4] = (SK_U8)(Word & 0x00ff); \
+ pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), XMA((Mac), (Reg+6)), &Word); \
+ pByte[6] = (SK_U8)(Word & 0x00ff); \
+ pByte[7] = (SK_U8)((Word >> 8) & 0x00ff); \
+}
+
+#define XM_OUTHASH(IoC, Mac, Reg, pVal) { \
+ SK_U8 SK_FAR *pByte; \
+ pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
+ SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \
+ (((SK_U16)(pByte[0]) & 0x00ff)| \
+ (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16) \
+ (((SK_U16)(pByte[2]) & 0x00ff)| \
+ (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), XMA((Mac), (Reg+4)), (SK_U16) \
+ (((SK_U16)(pByte[4]) & 0x00ff)| \
+ (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), XMA((Mac), (Reg+6)), (SK_U16) \
+ (((SK_U16)(pByte[6]) & 0x00ff)| \
+ (((SK_U16)(pByte[7]) << 8) & 0xff00))); \
+}
+
+/*
+ * macros to access the GMAC (YUKON only)
+ *
+ * GM_IN16(), to read a 16 bit register (e.g. GM_GP_STAT)
+ * GM_OUT16(), to write a 16 bit register (e.g. GM_GP_CTRL)
+ * GM_IN32(), to read a 32 bit register (e.g. GM_)
+ * GM_OUT32(), to write a 32 bit register (e.g. GM_)
+ * GM_INADDR(), to read a network address register (e.g. GM_SRC_ADDR_1L)
+ * GM_OUTADDR(), to write a network address register (e.g. GM_SRC_ADDR_2L)
+ * GM_INHASH(), to read the GM_MC_ADDR_H1 register
+ * GM_OUTHASH() to write the GM_MC_ADDR_H1 register
+ *
+ * para:
+ * Mac GMAC to access values: MAC_1 or MAC_2
+ * IoC I/O context needed for SK I/O macros
+ * Reg GMAC Register to read or write
+ * (p)Val Value or pointer to the value which should be read or written
+ *
+ * usage: GM_OUT16(IoC, MAC_1, GM_GP_CTRL, Value);
+ */
+
+#define GMA(Mac, Reg) \
+ ((BASE_GMAC_1 + (Mac) * (BASE_GMAC_2 - BASE_GMAC_1)) | (Reg))
+
+#define GM_IN16(IoC, Mac, Reg, pVal) \
+ SK_IN16((IoC), GMA((Mac), (Reg)), (pVal))
+
+#define GM_OUT16(IoC, Mac, Reg, Val) \
+ SK_OUT16((IoC), GMA((Mac), (Reg)), (Val))
+
+#define GM_IN32(IoC, Mac, Reg, pVal) { \
+ SK_IN16((IoC), GMA((Mac), (Reg)), \
+ (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \
+ SK_IN16((IoC), GMA((Mac), (Reg+4)), \
+ (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \
+}
+
+#define GM_OUT32(IoC, Mac, Reg, Val) { \
+ SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \
+ SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16)(((Val) >> 16) & 0xffffL));\
+}
+
+#define GM_INADDR(IoC, Mac, Reg, pVal) { \
+ SK_U16 Word; \
+ SK_U8 *pByte; \
+ pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
+ SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \
+ pByte[0] = (SK_U8)(Word & 0x00ff); \
+ pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \
+ pByte[2] = (SK_U8)(Word & 0x00ff); \
+ pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \
+ pByte[4] = (SK_U8)(Word & 0x00ff); \
+ pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
+}
+
+#define GM_OUTADDR(IoC, Mac, Reg, pVal) { \
+ SK_U8 SK_FAR *pByte; \
+ pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
+ SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \
+ (((SK_U16)(pByte[0]) & 0x00ff) | \
+ (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \
+ (((SK_U16)(pByte[2]) & 0x00ff) | \
+ (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \
+ (((SK_U16)(pByte[4]) & 0x00ff) | \
+ (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
+}
+
+#define GM_INHASH(IoC, Mac, Reg, pVal) { \
+ SK_U16 Word; \
+ SK_U8 *pByte; \
+ pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
+ SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \
+ pByte[0] = (SK_U8)(Word & 0x00ff); \
+ pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \
+ pByte[2] = (SK_U8)(Word & 0x00ff); \
+ pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \
+ pByte[4] = (SK_U8)(Word & 0x00ff); \
+ pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
+ SK_IN16((IoC), GMA((Mac), (Reg+12)), &Word); \
+ pByte[6] = (SK_U8)(Word & 0x00ff); \
+ pByte[7] = (SK_U8)((Word >> 8) & 0x00ff); \
+}
+
+#define GM_OUTHASH(IoC, Mac, Reg, pVal) { \
+ SK_U8 *pByte; \
+ pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
+ SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \
+ (((SK_U16)(pByte[0]) & 0x00ff)| \
+ (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \
+ (((SK_U16)(pByte[2]) & 0x00ff)| \
+ (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \
+ (((SK_U16)(pByte[4]) & 0x00ff)| \
+ (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
+ SK_OUT16((IoC), GMA((Mac), (Reg+12)), (SK_U16) \
+ (((SK_U16)(pByte[6]) & 0x00ff)| \
+ (((SK_U16)(pByte[7]) << 8) & 0xff00))); \
+}
+
+/*
+ * Different MAC Types
+ */
+#define SK_MAC_XMAC 0 /* Xaqti XMAC II */
+#define SK_MAC_GMAC 1 /* Marvell GMAC */
+
+/*
+ * Different PHY Types
+ */
+#define SK_PHY_XMAC 0 /* integrated in XMAC II */
+#define SK_PHY_BCOM 1 /* Broadcom BCM5400 */
+#define SK_PHY_LONE 2 /* Level One LXT1000 */
+#define SK_PHY_NAT 3 /* National DP83891 */
+#define SK_PHY_MARV_COPPER 4 /* Marvell 88E1011S */
+#define SK_PHY_MARV_FIBER 5 /* Marvell 88E1011S working on fiber */
+
+/*
+ * PHY addresses (bits 12..8 of PHY address reg)
+ */
+#define PHY_ADDR_XMAC (0<<8)
+#define PHY_ADDR_BCOM (1<<8)
+#define PHY_ADDR_LONE (3<<8)
+#define PHY_ADDR_NAT (0<<8)
+
+/* GPHY address (bits 15..11 of SMI control reg) */
+#define PHY_ADDR_MARV 0
+
+/*
+ * macros to access the PHY
+ *
+ * PHY_READ() read a 16 bit value from the PHY
+ * PHY_WRITE() write a 16 bit value to the PHY
+ *
+ * para:
+ * IoC I/O context needed for SK I/O macros
+ * pPort Pointer to port struct for PhyAddr
+ * Mac XMAC to access values: MAC_1 or MAC_2
+ * PhyReg PHY Register to read or write
+ * (p)Val Value or pointer to the value which should be read or
+ * written.
+ *
+ * usage: PHY_READ(IoC, pPort, MAC_1, PHY_CTRL, Value);
+ * Warning: a PHY_READ on an uninitialized PHY (PHY still in reset) never
+ * comes back. This is checked in DEBUG mode.
+ */
+#ifndef DEBUG
+#define PHY_READ(IoC, pPort, Mac, PhyReg, pVal) { \
+ SK_U16 Mmu; \
+ \
+ XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \
+ XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
+ if ((pPort)->PhyType != SK_PHY_XMAC) { \
+ do { \
+ XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
+ } while ((Mmu & XM_MMU_PHY_RDY) == 0); \
+ XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
+ } \
+}
+#else
+#define PHY_READ(IoC, pPort, Mac, PhyReg, pVal) { \
+ SK_U16 Mmu; \
+ int __i = 0; \
+ \
+ XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \
+ XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
+ if ((pPort)->PhyType != SK_PHY_XMAC) { \
+ do { \
+ XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
+ __i++; \
+ if (__i > 100000) { \
+ SK_DBG_PRINTF("*****************************\n"); \
+ SK_DBG_PRINTF("PHY_READ on uninitialized PHY\n"); \
+ SK_DBG_PRINTF("*****************************\n"); \
+ break; \
+ } \
+ } while ((Mmu & XM_MMU_PHY_RDY) == 0); \
+ XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
+ } \
+}
+#endif /* DEBUG */
+
+#define PHY_WRITE(IoC, pPort, Mac, PhyReg, Val) { \
+ SK_U16 Mmu; \
+ \
+ if ((pPort)->PhyType != SK_PHY_XMAC) { \
+ do { \
+ XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
+ } while ((Mmu & XM_MMU_PHY_BUSY) != 0); \
+ } \
+ XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \
+ XM_OUT16((IoC), (Mac), XM_PHY_DATA, (Val)); \
+ if ((pPort)->PhyType != SK_PHY_XMAC) { \
+ do { \
+ XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
+ } while ((Mmu & XM_MMU_PHY_BUSY) != 0); \
+ } \
+}
+
+/*
+ * Macro PCI_C()
+ *
+ * Use this macro to access PCI config register from the I/O space.
+ *
+ * para:
+ * Addr PCI configuration register to access.
+ * Values: PCI_VENDOR_ID ... PCI_VPD_ADR_REG,
+ *
+ * usage SK_IN16(pAC, PCI_C(PCI_VENDOR_ID), pVal);
+ */
+#define PCI_C(Addr) (B7_CFG_SPC + (Addr)) /* PCI Config Space */
+
+/*
+ * Macro SK_HW_ADDR(Base, Addr)
+ *
+ * Calculates the effective HW address
+ *
+ * para:
+ * Base I/O or memory base address
+ * Addr Address offset
+ *
+ * usage: May be used in SK_INxx and SK_OUTxx macros
+ * #define SK_IN8(pAC, Addr, pVal) ...\
+ * *pVal = (SK_U8)inp(SK_HW_ADDR(pAC->Hw.Iop, Addr)))
+ */
+#ifdef SK_MEM_MAPPED_IO
+#define SK_HW_ADDR(Base, Addr) ((Base) + (Addr))
+#else /* SK_MEM_MAPPED_IO */
+#define SK_HW_ADDR(Base, Addr) \
+ ((Base) + (((Addr) & 0x7f) | (((Addr) >> 7 > 0) ? 0x80 : 0)))
+#endif /* SK_MEM_MAPPED_IO */
+
+#define SZ_LONG (sizeof(SK_U32))
+
+/*
+ * Macro SK_HWAC_LINK_LED()
+ *
+ * Use this macro to set the link LED mode.
+ * para:
+ * pAC Pointer to adapter context struct
+ * IoC I/O context needed for SK I/O macros
+ * Port Port number
+ * Mode Mode to set for this LED
+ */
+#define SK_HWAC_LINK_LED(pAC, IoC, Port, Mode) \
+ SK_OUT8(IoC, MR_ADDR(Port, LNK_LED_REG), Mode);
+
+
+/* typedefs *******************************************************************/
+
+
+/* function prototypes ********************************************************/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INC_SKGEHW_H */
diff --git a/drivers/net/sk98lin/h/skgehwt.h b/drivers/net/sk98lin/h/skgehwt.h
new file mode 100644
index 000000000000..e6b0016a695c
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgehwt.h
@@ -0,0 +1,48 @@
+/******************************************************************************
+ *
+ * Name: skhwt.h
+ * Project: Gigabit Ethernet Adapters, Event Scheduler Module
+ * Version: $Revision: 1.7 $
+ * Date: $Date: 2003/09/16 12:55:08 $
+ * Purpose: Defines for the hardware timer functions
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * SKGEHWT.H contains all defines and types for the timer functions
+ */
+
+#ifndef _SKGEHWT_H_
+#define _SKGEHWT_H_
+
+/*
+ * SK Hardware Timer
+ * - needed wherever the HWT module is used
+ * - use in Adapters context name pAC->Hwt
+ */
+typedef struct s_Hwt {
+ SK_U32 TStart; /* HWT start */
+ SK_U32 TStop; /* HWT stop */
+ int TActive; /* HWT: flag : active/inactive */
+} SK_HWT;
+
+extern void SkHwtInit(SK_AC *pAC, SK_IOC Ioc);
+extern void SkHwtStart(SK_AC *pAC, SK_IOC Ioc, SK_U32 Time);
+extern void SkHwtStop(SK_AC *pAC, SK_IOC Ioc);
+extern SK_U32 SkHwtRead(SK_AC *pAC, SK_IOC Ioc);
+extern void SkHwtIsr(SK_AC *pAC, SK_IOC Ioc);
+#endif /* _SKGEHWT_H_ */
diff --git a/drivers/net/sk98lin/h/skgei2c.h b/drivers/net/sk98lin/h/skgei2c.h
new file mode 100644
index 000000000000..d9b6f6d8dfe2
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgei2c.h
@@ -0,0 +1,210 @@
+/******************************************************************************
+ *
+ * Name: skgei2c.h
+ * Project: Gigabit Ethernet Adapters, TWSI-Module
+ * Version: $Revision: 1.25 $
+ * Date: $Date: 2003/10/20 09:06:05 $
+ * Purpose: Special defines for TWSI
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * SKGEI2C.H contains all SK-98xx specific defines for the TWSI handling
+ */
+
+#ifndef _INC_SKGEI2C_H_
+#define _INC_SKGEI2C_H_
+
+/*
+ * Macros to access the B2_I2C_CTRL
+ */
+#define SK_I2C_CTL(IoC, flag, dev, dev_size, reg, burst) \
+ SK_OUT32(IoC, B2_I2C_CTRL,\
+ (flag ? 0x80000000UL : 0x0L) | \
+ (((SK_U32)reg << 16) & I2C_ADDR) | \
+ (((SK_U32)dev << 9) & I2C_DEV_SEL) | \
+ (dev_size & I2C_DEV_SIZE) | \
+ ((burst << 4) & I2C_BURST_LEN))
+
+#define SK_I2C_STOP(IoC) { \
+ SK_U32 I2cCtrl; \
+ SK_IN32(IoC, B2_I2C_CTRL, &I2cCtrl); \
+ SK_OUT32(IoC, B2_I2C_CTRL, I2cCtrl | I2C_STOP); \
+}
+
+#define SK_I2C_GET_CTL(IoC, pI2cCtrl) SK_IN32(IoC, B2_I2C_CTRL, pI2cCtrl)
+
+/*
+ * Macros to access the TWSI SW Registers
+ */
+#define SK_I2C_SET_BIT(IoC, SetBits) { \
+ SK_U8 OrgBits; \
+ SK_IN8(IoC, B2_I2C_SW, &OrgBits); \
+ SK_OUT8(IoC, B2_I2C_SW, OrgBits | (SK_U8)(SetBits)); \
+}
+
+#define SK_I2C_CLR_BIT(IoC, ClrBits) { \
+ SK_U8 OrgBits; \
+ SK_IN8(IoC, B2_I2C_SW, &OrgBits); \
+ SK_OUT8(IoC, B2_I2C_SW, OrgBits & ~((SK_U8)(ClrBits))); \
+}
+
+#define SK_I2C_GET_SW(IoC, pI2cSw) SK_IN8(IoC, B2_I2C_SW, pI2cSw)
+
+/*
+ * define the possible sensor states
+ */
+#define SK_SEN_IDLE 0 /* Idle: sensor not read */
+#define SK_SEN_VALUE 1 /* Value Read cycle */
+#define SK_SEN_VALEXT 2 /* Extended Value Read cycle */
+
+/*
+ * Conversion factor to convert read Voltage sensor to milli Volt
+ * Conversion factor to convert read Temperature sensor to 10th degree Celsius
+ */
+#define SK_LM80_VT_LSB 22 /* 22mV LSB resolution */
+#define SK_LM80_TEMP_LSB 10 /* 1 degree LSB resolution */
+#define SK_LM80_TEMPEXT_LSB 5 /* 0.5 degree LSB resolution for ext. val. */
+
+/*
+ * formula: counter = (22500*60)/(rpm * divisor * pulses/2)
+ * assuming: 6500rpm, 4 pulses, divisor 1
+ */
+#define SK_LM80_FAN_FAKTOR ((22500L*60)/(1*2))
+
+/*
+ * Define sensor management data
+ * Maximum is reached on Genesis copper dual port and Yukon-64
+ * Board specific maximum is in pAC->I2c.MaxSens
+ */
+#define SK_MAX_SENSORS 8 /* maximal no. of installed sensors */
+#define SK_MIN_SENSORS 5 /* minimal no. of installed sensors */
+
+/*
+ * To watch the state machine (SM) use the timer in two ways
+ * instead of one as hitherto
+ */
+#define SK_TIMER_WATCH_SM 0 /* Watch the SM to finish in a spec. time */
+#define SK_TIMER_NEW_GAUGING 1 /* Start a new gauging when timer expires */
+
+/*
+ * Defines for the individual thresholds
+ */
+
+/* Temperature sensor */
+#define SK_SEN_TEMP_HIGH_ERR 800 /* Temperature High Err Threshold */
+#define SK_SEN_TEMP_HIGH_WARN 700 /* Temperature High Warn Threshold */
+#define SK_SEN_TEMP_LOW_WARN 100 /* Temperature Low Warn Threshold */
+#define SK_SEN_TEMP_LOW_ERR 0 /* Temperature Low Err Threshold */
+
+/* VCC which should be 5 V */
+#define SK_SEN_PCI_5V_HIGH_ERR 5588 /* Voltage PCI High Err Threshold */
+#define SK_SEN_PCI_5V_HIGH_WARN 5346 /* Voltage PCI High Warn Threshold */
+#define SK_SEN_PCI_5V_LOW_WARN 4664 /* Voltage PCI Low Warn Threshold */
+#define SK_SEN_PCI_5V_LOW_ERR 4422 /* Voltage PCI Low Err Threshold */
+
+/*
+ * VIO may be 5 V or 3.3 V. Initialization takes two parts:
+ * 1. Initialize lowest lower limit and highest higher limit.
+ * 2. After the first value is read correct the upper or the lower limit to
+ * the appropriate C constant.
+ *
+ * Warning limits are +-5% of the exepected voltage.
+ * Error limits are +-10% of the expected voltage.
+ */
+
+/* Bug fix AF: 16.Aug.2001: Correct the init base of LM80 sensor */
+
+#define SK_SEN_PCI_IO_5V_HIGH_ERR 5566 /* + 10% V PCI-IO High Err Threshold */
+#define SK_SEN_PCI_IO_5V_HIGH_WARN 5324 /* + 5% V PCI-IO High Warn Threshold */
+ /* 5000 mVolt */
+#define SK_SEN_PCI_IO_5V_LOW_WARN 4686 /* - 5% V PCI-IO Low Warn Threshold */
+#define SK_SEN_PCI_IO_5V_LOW_ERR 4444 /* - 10% V PCI-IO Low Err Threshold */
+
+#define SK_SEN_PCI_IO_RANGE_LIMITER 4000 /* 4000 mV range delimiter */
+
+/* correction values for the second pass */
+#define SK_SEN_PCI_IO_3V3_HIGH_ERR 3850 /* + 15% V PCI-IO High Err Threshold */
+#define SK_SEN_PCI_IO_3V3_HIGH_WARN 3674 /* + 10% V PCI-IO High Warn Threshold */
+ /* 3300 mVolt */
+#define SK_SEN_PCI_IO_3V3_LOW_WARN 2926 /* - 10% V PCI-IO Low Warn Threshold */
+#define SK_SEN_PCI_IO_3V3_LOW_ERR 2772 /* - 15% V PCI-IO Low Err Threshold */
+
+/*
+ * VDD voltage
+ */
+#define SK_SEN_VDD_HIGH_ERR 3630 /* Voltage ASIC High Err Threshold */
+#define SK_SEN_VDD_HIGH_WARN 3476 /* Voltage ASIC High Warn Threshold */
+#define SK_SEN_VDD_LOW_WARN 3146 /* Voltage ASIC Low Warn Threshold */
+#define SK_SEN_VDD_LOW_ERR 2970 /* Voltage ASIC Low Err Threshold */
+
+/*
+ * PHY PLL 3V3 voltage
+ */
+#define SK_SEN_PLL_3V3_HIGH_ERR 3630 /* Voltage PMA High Err Threshold */
+#define SK_SEN_PLL_3V3_HIGH_WARN 3476 /* Voltage PMA High Warn Threshold */
+#define SK_SEN_PLL_3V3_LOW_WARN 3146 /* Voltage PMA Low Warn Threshold */
+#define SK_SEN_PLL_3V3_LOW_ERR 2970 /* Voltage PMA Low Err Threshold */
+
+/*
+ * VAUX (YUKON only)
+ */
+#define SK_SEN_VAUX_3V3_HIGH_ERR 3630 /* Voltage VAUX High Err Threshold */
+#define SK_SEN_VAUX_3V3_HIGH_WARN 3476 /* Voltage VAUX High Warn Threshold */
+#define SK_SEN_VAUX_3V3_LOW_WARN 3146 /* Voltage VAUX Low Warn Threshold */
+#define SK_SEN_VAUX_3V3_LOW_ERR 2970 /* Voltage VAUX Low Err Threshold */
+#define SK_SEN_VAUX_0V_WARN_ERR 0 /* if VAUX not present */
+#define SK_SEN_VAUX_RANGE_LIMITER 1000 /* 1000 mV range delimiter */
+
+/*
+ * PHY 2V5 voltage
+ */
+#define SK_SEN_PHY_2V5_HIGH_ERR 2750 /* Voltage PHY High Err Threshold */
+#define SK_SEN_PHY_2V5_HIGH_WARN 2640 /* Voltage PHY High Warn Threshold */
+#define SK_SEN_PHY_2V5_LOW_WARN 2376 /* Voltage PHY Low Warn Threshold */
+#define SK_SEN_PHY_2V5_LOW_ERR 2222 /* Voltage PHY Low Err Threshold */
+
+/*
+ * ASIC Core 1V5 voltage (YUKON only)
+ */
+#define SK_SEN_CORE_1V5_HIGH_ERR 1650 /* Voltage ASIC Core High Err Threshold */
+#define SK_SEN_CORE_1V5_HIGH_WARN 1575 /* Voltage ASIC Core High Warn Threshold */
+#define SK_SEN_CORE_1V5_LOW_WARN 1425 /* Voltage ASIC Core Low Warn Threshold */
+#define SK_SEN_CORE_1V5_LOW_ERR 1350 /* Voltage ASIC Core Low Err Threshold */
+
+/*
+ * FAN 1 speed
+ */
+/* assuming: 6500rpm +-15%, 4 pulses,
+ * warning at: 80 %
+ * error at: 70 %
+ * no upper limit
+ */
+#define SK_SEN_FAN_HIGH_ERR 20000 /* FAN Speed High Err Threshold */
+#define SK_SEN_FAN_HIGH_WARN 20000 /* FAN Speed High Warn Threshold */
+#define SK_SEN_FAN_LOW_WARN 5200 /* FAN Speed Low Warn Threshold */
+#define SK_SEN_FAN_LOW_ERR 4550 /* FAN Speed Low Err Threshold */
+
+/*
+ * Some Voltages need dynamic thresholds
+ */
+#define SK_SEN_DYN_INIT_NONE 0 /* No dynamic init of thresholds */
+#define SK_SEN_DYN_INIT_PCI_IO 10 /* Init PCI-IO with new thresholds */
+#define SK_SEN_DYN_INIT_VAUX 11 /* Init VAUX with new thresholds */
+
+extern int SkLm80ReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen);
+#endif /* n_INC_SKGEI2C_H */
diff --git a/drivers/net/sk98lin/h/skgeinit.h b/drivers/net/sk98lin/h/skgeinit.h
new file mode 100644
index 000000000000..184f47c5a60f
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgeinit.h
@@ -0,0 +1,853 @@
+/******************************************************************************
+ *
+ * Name: skgeinit.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.83 $
+ * Date: $Date: 2003/09/16 14:07:37 $
+ * Purpose: Structures and prototypes for the GE Init Module
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKGEINIT_H_
+#define __INC_SKGEINIT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines ********************************************************************/
+
+#define SK_TEST_VAL 0x11335577UL
+
+/* modifying Link LED behaviour (used with SkGeLinkLED()) */
+#define SK_LNK_OFF LED_OFF
+#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF)
+#define SK_LNK_BLINK (LED_ON | LED_BLK_ON | LED_SYNC_ON)
+#define SK_LNK_PERM (LED_ON | LED_BLK_OFF | LED_SYNC_ON)
+#define SK_LNK_TST (LED_ON | LED_BLK_ON | LED_SYNC_OFF)
+
+/* parameter 'Mode' when calling SK_HWAC_LINK_LED() */
+#define SK_LED_OFF LED_OFF
+#define SK_LED_ACTIVE (LED_ON | LED_BLK_OFF | LED_SYNC_OFF)
+#define SK_LED_STANDBY (LED_ON | LED_BLK_ON | LED_SYNC_OFF)
+
+/* addressing LED Registers in SkGeXmitLED() */
+#define XMIT_LED_INI 0
+#define XMIT_LED_CNT (RX_LED_VAL - RX_LED_INI)
+#define XMIT_LED_CTRL (RX_LED_CTRL- RX_LED_INI)
+#define XMIT_LED_TST (RX_LED_TST - RX_LED_INI)
+
+/* parameter 'Mode' when calling SkGeXmitLED() */
+#define SK_LED_DIS 0
+#define SK_LED_ENA 1
+#define SK_LED_TST 2
+
+/* Counter and Timer constants, for a host clock of 62.5 MHz */
+#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */
+#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */
+
+#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */
+
+#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */
+ /* 215 ms at 78.12 MHz */
+
+#define SK_FACT_62 100 /* is given in percent */
+#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */
+#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */
+
+/* Timeout values */
+#define SK_MAC_TO_53 72 /* MAC arbiter timeout */
+#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */
+#define SK_PKT_TO_MAX 0xffff /* Maximum value */
+#define SK_RI_TO_53 36 /* RAM interface timeout */
+
+#define SK_PHY_ACC_TO 600000 /* PHY access timeout */
+
+/* RAM Buffer High Pause Threshold values */
+#define SK_RB_ULPP ( 8 * 1024) /* Upper Level in kB/8 */
+#define SK_RB_LLPP_S (10 * 1024) /* Lower Level for small Queues */
+#define SK_RB_LLPP_B (16 * 1024) /* Lower Level for big Queues */
+
+#ifndef SK_BMU_RX_WM
+#define SK_BMU_RX_WM 0x600 /* BMU Rx Watermark */
+#endif
+#ifndef SK_BMU_TX_WM
+#define SK_BMU_TX_WM 0x600 /* BMU Tx Watermark */
+#endif
+
+/* XMAC II Rx High Watermark */
+#define SK_XM_RX_HI_WM 0x05aa /* 1450 */
+
+/* XMAC II Tx Threshold */
+#define SK_XM_THR_REDL 0x01fb /* .. for redundant link usage */
+#define SK_XM_THR_SL 0x01fb /* .. for single link adapters */
+#define SK_XM_THR_MULL 0x01fb /* .. for multiple link usage */
+#define SK_XM_THR_JUMBO 0x03fc /* .. for jumbo frame usage */
+
+/* values for GIPortUsage */
+#define SK_RED_LINK 1 /* redundant link usage */
+#define SK_MUL_LINK 2 /* multiple link usage */
+#define SK_JUMBO_LINK 3 /* driver uses jumbo frames */
+
+/* Minimum RAM Buffer Rx Queue Size */
+#define SK_MIN_RXQ_SIZE 16 /* 16 kB */
+
+/* Minimum RAM Buffer Tx Queue Size */
+#define SK_MIN_TXQ_SIZE 16 /* 16 kB */
+
+/* Queue Size units */
+#define QZ_UNITS 0x7
+#define QZ_STEP 8
+
+/* Percentage of queue size from whole memory */
+/* 80 % for receive */
+#define RAM_QUOTA_RX 80L
+/* 0% for sync transfer */
+#define RAM_QUOTA_SYNC 0L
+/* the rest (20%) is taken for async transfer */
+
+/* Get the rounded queue size in Bytes in 8k steps */
+#define ROUND_QUEUE_SIZE(SizeInBytes) \
+ ((((unsigned long) (SizeInBytes) + (QZ_STEP*1024L)-1) / 1024) & \
+ ~(QZ_STEP-1))
+
+/* Get the rounded queue size in KBytes in 8k steps */
+#define ROUND_QUEUE_SIZE_KB(Kilobytes) \
+ ROUND_QUEUE_SIZE((Kilobytes) * 1024L)
+
+/* Types of RAM Buffer Queues */
+#define SK_RX_SRAM_Q 1 /* small receive queue */
+#define SK_RX_BRAM_Q 2 /* big receive queue */
+#define SK_TX_RAM_Q 3 /* small or big transmit queue */
+
+/* parameter 'Dir' when calling SkGeStopPort() */
+#define SK_STOP_TX 1 /* Stops the transmit path, resets the XMAC */
+#define SK_STOP_RX 2 /* Stops the receive path */
+#define SK_STOP_ALL 3 /* Stops Rx and Tx path, resets the XMAC */
+
+/* parameter 'RstMode' when calling SkGeStopPort() */
+#define SK_SOFT_RST 1 /* perform a software reset */
+#define SK_HARD_RST 2 /* perform a hardware reset */
+
+/* Init Levels */
+#define SK_INIT_DATA 0 /* Init level 0: init data structures */
+#define SK_INIT_IO 1 /* Init level 1: init with IOs */
+#define SK_INIT_RUN 2 /* Init level 2: init for run time */
+
+/* Link Mode Parameter */
+#define SK_LMODE_HALF 1 /* Half Duplex Mode */
+#define SK_LMODE_FULL 2 /* Full Duplex Mode */
+#define SK_LMODE_AUTOHALF 3 /* AutoHalf Duplex Mode */
+#define SK_LMODE_AUTOFULL 4 /* AutoFull Duplex Mode */
+#define SK_LMODE_AUTOBOTH 5 /* AutoBoth Duplex Mode */
+#define SK_LMODE_AUTOSENSE 6 /* configured mode auto sensing */
+#define SK_LMODE_INDETERMINATED 7 /* indeterminated */
+
+/* Auto-negotiation timeout in 100ms granularity */
+#define SK_AND_MAX_TO 6 /* Wait 600 msec before link comes up */
+
+/* Auto-negotiation error codes */
+#define SK_AND_OK 0 /* no error */
+#define SK_AND_OTHER 1 /* other error than below */
+#define SK_AND_DUP_CAP 2 /* Duplex capabilities error */
+
+
+/* Link Speed Capabilities */
+#define SK_LSPEED_CAP_AUTO (1<<0) /* Automatic resolution */
+#define SK_LSPEED_CAP_10MBPS (1<<1) /* 10 Mbps */
+#define SK_LSPEED_CAP_100MBPS (1<<2) /* 100 Mbps */
+#define SK_LSPEED_CAP_1000MBPS (1<<3) /* 1000 Mbps */
+#define SK_LSPEED_CAP_INDETERMINATED (1<<4) /* indeterminated */
+
+/* Link Speed Parameter */
+#define SK_LSPEED_AUTO 1 /* Automatic resolution */
+#define SK_LSPEED_10MBPS 2 /* 10 Mbps */
+#define SK_LSPEED_100MBPS 3 /* 100 Mbps */
+#define SK_LSPEED_1000MBPS 4 /* 1000 Mbps */
+#define SK_LSPEED_INDETERMINATED 5 /* indeterminated */
+
+/* Link Speed Current State */
+#define SK_LSPEED_STAT_UNKNOWN 1
+#define SK_LSPEED_STAT_10MBPS 2
+#define SK_LSPEED_STAT_100MBPS 3
+#define SK_LSPEED_STAT_1000MBPS 4
+#define SK_LSPEED_STAT_INDETERMINATED 5
+
+
+/* Link Capability Parameter */
+#define SK_LMODE_CAP_HALF (1<<0) /* Half Duplex Mode */
+#define SK_LMODE_CAP_FULL (1<<1) /* Full Duplex Mode */
+#define SK_LMODE_CAP_AUTOHALF (1<<2) /* AutoHalf Duplex Mode */
+#define SK_LMODE_CAP_AUTOFULL (1<<3) /* AutoFull Duplex Mode */
+#define SK_LMODE_CAP_INDETERMINATED (1<<4) /* indeterminated */
+
+/* Link Mode Current State */
+#define SK_LMODE_STAT_UNKNOWN 1 /* Unknown Duplex Mode */
+#define SK_LMODE_STAT_HALF 2 /* Half Duplex Mode */
+#define SK_LMODE_STAT_FULL 3 /* Full Duplex Mode */
+#define SK_LMODE_STAT_AUTOHALF 4 /* Half Duplex Mode obtained by Auto-Neg */
+#define SK_LMODE_STAT_AUTOFULL 5 /* Full Duplex Mode obtained by Auto-Neg */
+#define SK_LMODE_STAT_INDETERMINATED 6 /* indeterminated */
+
+/* Flow Control Mode Parameter (and capabilities) */
+#define SK_FLOW_MODE_NONE 1 /* No Flow-Control */
+#define SK_FLOW_MODE_LOC_SEND 2 /* Local station sends PAUSE */
+#define SK_FLOW_MODE_SYMMETRIC 3 /* Both stations may send PAUSE */
+#define SK_FLOW_MODE_SYM_OR_REM 4 /* Both stations may send PAUSE or
+ * just the remote station may send PAUSE
+ */
+#define SK_FLOW_MODE_INDETERMINATED 5 /* indeterminated */
+
+/* Flow Control Status Parameter */
+#define SK_FLOW_STAT_NONE 1 /* No Flow Control */
+#define SK_FLOW_STAT_REM_SEND 2 /* Remote Station sends PAUSE */
+#define SK_FLOW_STAT_LOC_SEND 3 /* Local station sends PAUSE */
+#define SK_FLOW_STAT_SYMMETRIC 4 /* Both station may send PAUSE */
+#define SK_FLOW_STAT_INDETERMINATED 5 /* indeterminated */
+
+/* Master/Slave Mode Capabilities */
+#define SK_MS_CAP_AUTO (1<<0) /* Automatic resolution */
+#define SK_MS_CAP_MASTER (1<<1) /* This station is master */
+#define SK_MS_CAP_SLAVE (1<<2) /* This station is slave */
+#define SK_MS_CAP_INDETERMINATED (1<<3) /* indeterminated */
+
+/* Set Master/Slave Mode Parameter (and capabilities) */
+#define SK_MS_MODE_AUTO 1 /* Automatic resolution */
+#define SK_MS_MODE_MASTER 2 /* This station is master */
+#define SK_MS_MODE_SLAVE 3 /* This station is slave */
+#define SK_MS_MODE_INDETERMINATED 4 /* indeterminated */
+
+/* Master/Slave Status Parameter */
+#define SK_MS_STAT_UNSET 1 /* The M/S status is not set */
+#define SK_MS_STAT_MASTER 2 /* This station is master */
+#define SK_MS_STAT_SLAVE 3 /* This station is slave */
+#define SK_MS_STAT_FAULT 4 /* M/S resolution failed */
+#define SK_MS_STAT_INDETERMINATED 5 /* indeterminated */
+
+/* parameter 'Mode' when calling SkXmSetRxCmd() */
+#define SK_STRIP_FCS_ON (1<<0) /* Enable FCS stripping of Rx frames */
+#define SK_STRIP_FCS_OFF (1<<1) /* Disable FCS stripping of Rx frames */
+#define SK_STRIP_PAD_ON (1<<2) /* Enable pad byte stripping of Rx fr */
+#define SK_STRIP_PAD_OFF (1<<3) /* Disable pad byte stripping of Rx fr */
+#define SK_LENERR_OK_ON (1<<4) /* Don't chk fr for in range len error */
+#define SK_LENERR_OK_OFF (1<<5) /* Check frames for in range len error */
+#define SK_BIG_PK_OK_ON (1<<6) /* Don't set Rx Error bit for big frames */
+#define SK_BIG_PK_OK_OFF (1<<7) /* Set Rx Error bit for big frames */
+#define SK_SELF_RX_ON (1<<8) /* Enable Rx of own packets */
+#define SK_SELF_RX_OFF (1<<9) /* Disable Rx of own packets */
+
+/* parameter 'Para' when calling SkMacSetRxTxEn() */
+#define SK_MAC_LOOPB_ON (1<<0) /* Enable MAC Loopback Mode */
+#define SK_MAC_LOOPB_OFF (1<<1) /* Disable MAC Loopback Mode */
+#define SK_PHY_LOOPB_ON (1<<2) /* Enable PHY Loopback Mode */
+#define SK_PHY_LOOPB_OFF (1<<3) /* Disable PHY Loopback Mode */
+#define SK_PHY_FULLD_ON (1<<4) /* Enable GMII Full Duplex */
+#define SK_PHY_FULLD_OFF (1<<5) /* Disable GMII Full Duplex */
+
+/* States of PState */
+#define SK_PRT_RESET 0 /* the port is reset */
+#define SK_PRT_STOP 1 /* the port is stopped (similar to SW reset) */
+#define SK_PRT_INIT 2 /* the port is initialized */
+#define SK_PRT_RUN 3 /* the port has an active link */
+
+/* PHY power down modes */
+#define PHY_PM_OPERATIONAL_MODE 0 /* PHY operational mode */
+#define PHY_PM_DEEP_SLEEP 1 /* coma mode --> minimal power */
+#define PHY_PM_IEEE_POWER_DOWN 2 /* IEEE 22.2.4.1.5 compl. power down */
+#define PHY_PM_ENERGY_DETECT 3 /* energy detect */
+#define PHY_PM_ENERGY_DETECT_PLUS 4 /* energy detect plus */
+
+/* Default receive frame limit for Workaround of XMAC Errata */
+#define SK_DEF_RX_WA_LIM SK_CONSTU64(100)
+
+/* values for GILedBlinkCtrl (LED Blink Control) */
+#define SK_ACT_LED_BLINK (1<<0) /* Active LED blinking */
+#define SK_DUP_LED_NORMAL (1<<1) /* Duplex LED normal */
+#define SK_LED_LINK100_ON (1<<2) /* Link 100M LED on */
+
+/* Link Partner Status */
+#define SK_LIPA_UNKNOWN 0 /* Link partner is in unknown state */
+#define SK_LIPA_MANUAL 1 /* Link partner is in detected manual state */
+#define SK_LIPA_AUTO 2 /* Link partner is in auto-negotiation state */
+
+/* Maximum Restarts before restart is ignored (3Com WA) */
+#define SK_MAX_LRESTART 3 /* Max. 3 times the link is restarted */
+
+/* Max. Auto-neg. timeouts before link detection in sense mode is reset */
+#define SK_MAX_ANEG_TO 10 /* Max. 10 times the sense mode is reset */
+
+/* structures *****************************************************************/
+
+/*
+ * MAC specific functions
+ */
+typedef struct s_GeMacFunc {
+ int (*pFnMacUpdateStats)(SK_AC *pAC, SK_IOC IoC, unsigned int Port);
+ int (*pFnMacStatistic)(SK_AC *pAC, SK_IOC IoC, unsigned int Port,
+ SK_U16 StatAddr, SK_U32 SK_FAR *pVal);
+ int (*pFnMacResetCounter)(SK_AC *pAC, SK_IOC IoC, unsigned int Port);
+ int (*pFnMacOverflow)(SK_AC *pAC, SK_IOC IoC, unsigned int Port,
+ SK_U16 IStatus, SK_U64 SK_FAR *pVal);
+} SK_GEMACFUNC;
+
+/*
+ * Port Structure
+ */
+typedef struct s_GePort {
+#ifndef SK_DIAG
+ SK_TIMER PWaTimer; /* Workaround Timer */
+ SK_TIMER HalfDupChkTimer;
+#endif /* SK_DIAG */
+ SK_U32 PPrevShorts; /* Previous Short Counter checking */
+ SK_U32 PPrevFcs; /* Previous FCS Error Counter checking */
+ SK_U64 PPrevRx; /* Previous RxOk Counter checking */
+ SK_U64 PRxLim; /* Previous RxOk Counter checking */
+ SK_U64 LastOctets; /* For half duplex hang check */
+ int PLinkResCt; /* Link Restart Counter */
+ int PAutoNegTimeOut;/* Auto-negotiation timeout current value */
+ int PAutoNegTOCt; /* Auto-negotiation Timeout Counter */
+ int PRxQSize; /* Port Rx Queue Size in kB */
+ int PXSQSize; /* Port Synchronous Transmit Queue Size in kB */
+ int PXAQSize; /* Port Asynchronous Transmit Queue Size in kB */
+ SK_U32 PRxQRamStart; /* Receive Queue RAM Buffer Start Address */
+ SK_U32 PRxQRamEnd; /* Receive Queue RAM Buffer End Address */
+ SK_U32 PXsQRamStart; /* Sync Tx Queue RAM Buffer Start Address */
+ SK_U32 PXsQRamEnd; /* Sync Tx Queue RAM Buffer End Address */
+ SK_U32 PXaQRamStart; /* Async Tx Queue RAM Buffer Start Address */
+ SK_U32 PXaQRamEnd; /* Async Tx Queue RAM Buffer End Address */
+ SK_U32 PRxOverCnt; /* Receive Overflow Counter */
+ int PRxQOff; /* Rx Queue Address Offset */
+ int PXsQOff; /* Synchronous Tx Queue Address Offset */
+ int PXaQOff; /* Asynchronous Tx Queue Address Offset */
+ int PhyType; /* PHY used on this port */
+ int PState; /* Port status (reset, stop, init, run) */
+ SK_U16 PhyId1; /* PHY Id1 on this port */
+ SK_U16 PhyAddr; /* MDIO/MDC PHY address */
+ SK_U16 PIsave; /* Saved Interrupt status word */
+ SK_U16 PSsave; /* Saved PHY status word */
+ SK_U16 PGmANegAdv; /* Saved GPhy AutoNegAdvertisment register */
+ SK_BOOL PHWLinkUp; /* The hardware Link is up (wiring) */
+ SK_BOOL PLinkBroken; /* Is Link broken ? */
+ SK_BOOL PCheckPar; /* Do we check for parity errors ? */
+ SK_BOOL HalfDupTimerActive;
+ SK_U8 PLinkCap; /* Link Capabilities */
+ SK_U8 PLinkModeConf; /* Link Mode configured */
+ SK_U8 PLinkMode; /* Link Mode currently used */
+ SK_U8 PLinkModeStatus;/* Link Mode Status */
+ SK_U8 PLinkSpeedCap; /* Link Speed Capabilities(10/100/1000 Mbps) */
+ SK_U8 PLinkSpeed; /* configured Link Speed (10/100/1000 Mbps) */
+ SK_U8 PLinkSpeedUsed; /* current Link Speed (10/100/1000 Mbps) */
+ SK_U8 PFlowCtrlCap; /* Flow Control Capabilities */
+ SK_U8 PFlowCtrlMode; /* Flow Control Mode */
+ SK_U8 PFlowCtrlStatus;/* Flow Control Status */
+ SK_U8 PMSCap; /* Master/Slave Capabilities */
+ SK_U8 PMSMode; /* Master/Slave Mode */
+ SK_U8 PMSStatus; /* Master/Slave Status */
+ SK_BOOL PAutoNegFail; /* Auto-negotiation fail flag */
+ SK_U8 PLipaAutoNeg; /* Auto-negotiation possible with Link Partner */
+ SK_U8 PCableLen; /* Cable Length */
+ SK_U8 PMdiPairLen[4]; /* MDI[0..3] Pair Length */
+ SK_U8 PMdiPairSts[4]; /* MDI[0..3] Pair Diagnostic Status */
+ SK_U8 PPhyPowerState; /* PHY current power state */
+ int PMacColThres; /* MAC Collision Threshold */
+ int PMacJamLen; /* MAC Jam length */
+ int PMacJamIpgVal; /* MAC Jam IPG */
+ int PMacJamIpgData; /* MAC IPG Jam to Data */
+ int PMacIpgData; /* MAC Data IPG */
+ SK_BOOL PMacLimit4; /* reset collision counter and backoff algorithm */
+} SK_GEPORT;
+
+/*
+ * Gigabit Ethernet Initialization Struct
+ * (has to be included in the adapter context)
+ */
+typedef struct s_GeInit {
+ int GIChipId; /* Chip Identification Number */
+ int GIChipRev; /* Chip Revision Number */
+ SK_U8 GIPciHwRev; /* PCI HW Revision Number */
+ SK_BOOL GIGenesis; /* Genesis adapter ? */
+ SK_BOOL GIYukon; /* YUKON-A1/Bx chip */
+ SK_BOOL GIYukonLite; /* YUKON-Lite chip */
+ SK_BOOL GICopperType; /* Copper Type adapter ? */
+ SK_BOOL GIPciSlot64; /* 64-bit PCI Slot */
+ SK_BOOL GIPciClock66; /* 66 MHz PCI Clock */
+ SK_BOOL GIVauxAvail; /* VAUX available (YUKON) */
+ SK_BOOL GIYukon32Bit; /* 32-Bit YUKON adapter */
+ SK_U16 GILedBlinkCtrl; /* LED Blink Control */
+ int GIMacsFound; /* Number of MACs found on this adapter */
+ int GIMacType; /* MAC Type used on this adapter */
+ int GIHstClkFact; /* Host Clock Factor (62.5 / HstClk * 100) */
+ int GIPortUsage; /* Driver Port Usage */
+ int GILevel; /* Initialization Level completed */
+ int GIRamSize; /* The RAM size of the adapter in kB */
+ int GIWolOffs; /* WOL Register Offset (HW-Bug in Rev. A) */
+ SK_U32 GIRamOffs; /* RAM Address Offset for addr calculation */
+ SK_U32 GIPollTimerVal; /* Descr. Poll Timer Init Val (HstClk ticks) */
+ SK_U32 GIValIrqMask; /* Value for Interrupt Mask */
+ SK_U32 GITimeStampCnt; /* Time Stamp High Counter (YUKON only) */
+ SK_GEPORT GP[SK_MAX_MACS];/* Port Dependent Information */
+ SK_GEMACFUNC GIFunc; /* MAC depedent functions */
+} SK_GEINIT;
+
+/*
+ * Error numbers and messages for skxmac2.c and skgeinit.c
+ */
+#define SKERR_HWI_E001 (SK_ERRBASE_HWINIT)
+#define SKERR_HWI_E001MSG "SkXmClrExactAddr() has got illegal parameters"
+#define SKERR_HWI_E002 (SKERR_HWI_E001+1)
+#define SKERR_HWI_E002MSG "SkGeInit(): Level 1 call missing"
+#define SKERR_HWI_E003 (SKERR_HWI_E002+1)
+#define SKERR_HWI_E003MSG "SkGeInit() called with illegal init Level"
+#define SKERR_HWI_E004 (SKERR_HWI_E003+1)
+#define SKERR_HWI_E004MSG "SkGeInitPort(): Queue Size illegal configured"
+#define SKERR_HWI_E005 (SKERR_HWI_E004+1)
+#define SKERR_HWI_E005MSG "SkGeInitPort(): cannot init running ports"
+#define SKERR_HWI_E006 (SKERR_HWI_E005+1)
+#define SKERR_HWI_E006MSG "SkGeMacInit(): PState does not match HW state"
+#define SKERR_HWI_E007 (SKERR_HWI_E006+1)
+#define SKERR_HWI_E007MSG "SkXmInitDupMd() called with invalid Dup Mode"
+#define SKERR_HWI_E008 (SKERR_HWI_E007+1)
+#define SKERR_HWI_E008MSG "SkXmSetRxCmd() called with invalid Mode"
+#define SKERR_HWI_E009 (SKERR_HWI_E008+1)
+#define SKERR_HWI_E009MSG "SkGeCfgSync() called although PXSQSize zero"
+#define SKERR_HWI_E010 (SKERR_HWI_E009+1)
+#define SKERR_HWI_E010MSG "SkGeCfgSync() called with invalid parameters"
+#define SKERR_HWI_E011 (SKERR_HWI_E010+1)
+#define SKERR_HWI_E011MSG "SkGeInitPort(): Receive Queue Size too small"
+#define SKERR_HWI_E012 (SKERR_HWI_E011+1)
+#define SKERR_HWI_E012MSG "SkGeInitPort(): invalid Queue Size specified"
+#define SKERR_HWI_E013 (SKERR_HWI_E012+1)
+#define SKERR_HWI_E013MSG "SkGeInitPort(): cfg changed for running queue"
+#define SKERR_HWI_E014 (SKERR_HWI_E013+1)
+#define SKERR_HWI_E014MSG "SkGeInitPort(): unknown GIPortUsage specified"
+#define SKERR_HWI_E015 (SKERR_HWI_E014+1)
+#define SKERR_HWI_E015MSG "Illegal Link mode parameter"
+#define SKERR_HWI_E016 (SKERR_HWI_E015+1)
+#define SKERR_HWI_E016MSG "Illegal Flow control mode parameter"
+#define SKERR_HWI_E017 (SKERR_HWI_E016+1)
+#define SKERR_HWI_E017MSG "Illegal value specified for GIPollTimerVal"
+#define SKERR_HWI_E018 (SKERR_HWI_E017+1)
+#define SKERR_HWI_E018MSG "FATAL: SkGeStopPort() does not terminate (Tx)"
+#define SKERR_HWI_E019 (SKERR_HWI_E018+1)
+#define SKERR_HWI_E019MSG "Illegal Speed parameter"
+#define SKERR_HWI_E020 (SKERR_HWI_E019+1)
+#define SKERR_HWI_E020MSG "Illegal Master/Slave parameter"
+#define SKERR_HWI_E021 (SKERR_HWI_E020+1)
+#define SKERR_HWI_E021MSG "MacUpdateStats(): cannot update statistic counter"
+#define SKERR_HWI_E022 (SKERR_HWI_E021+1)
+#define SKERR_HWI_E022MSG "MacStatistic(): illegal statistic base address"
+#define SKERR_HWI_E023 (SKERR_HWI_E022+1)
+#define SKERR_HWI_E023MSG "SkGeInitPort(): Transmit Queue Size too small"
+#define SKERR_HWI_E024 (SKERR_HWI_E023+1)
+#define SKERR_HWI_E024MSG "FATAL: SkGeStopPort() does not terminate (Rx)"
+#define SKERR_HWI_E025 (SKERR_HWI_E024+1)
+#define SKERR_HWI_E025MSG ""
+
+/* function prototypes ********************************************************/
+
+#ifndef SK_KR_PROTO
+
+/*
+ * public functions in skgeinit.c
+ */
+extern void SkGePollRxD(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL PollRxD);
+
+extern void SkGePollTxD(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL PollTxD);
+
+extern void SkGeYellowLED(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int State);
+
+extern int SkGeCfgSync(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_U32 IntTime,
+ SK_U32 LimCount,
+ int SyncMode);
+
+extern void SkGeLoadLnkSyncCnt(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_U32 CntVal);
+
+extern void SkGeStopPort(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Dir,
+ int RstMode);
+
+extern int SkGeInit(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Level);
+
+extern void SkGeDeInit(
+ SK_AC *pAC,
+ SK_IOC IoC);
+
+extern int SkGeInitPort(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkGeXmitLED(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Led,
+ int Mode);
+
+extern void SkGeInitRamIface(
+ SK_AC *pAC,
+ SK_IOC IoC);
+
+extern int SkGeInitAssignRamToQueues(
+ SK_AC *pAC,
+ int ActivePort,
+ SK_BOOL DualNet);
+
+/*
+ * public functions in skxmac2.c
+ */
+extern void SkMacRxTxDisable(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacSoftRst(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacHardRst(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacClearRst(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkXmInitMac(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkGmInitMac(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacInitPhy(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL DoLoop);
+
+extern void SkMacIrqDisable(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacFlushTxFifo(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacFlushRxFifo(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacIrq(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern int SkMacAutoNegDone(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacAutoNegLipaPhy(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_U16 IStatus);
+
+extern void SkMacSetRxTxEn(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Para);
+
+extern int SkMacRxTxEnable(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkMacPromiscMode(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL Enable);
+
+extern void SkMacHashing(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL Enable);
+
+extern void SkXmPhyRead(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Addr,
+ SK_U16 SK_FAR *pVal);
+
+extern void SkXmPhyWrite(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Addr,
+ SK_U16 Val);
+
+extern void SkGmPhyRead(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Addr,
+ SK_U16 SK_FAR *pVal);
+
+extern void SkGmPhyWrite(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Addr,
+ SK_U16 Val);
+
+extern void SkXmClrExactAddr(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int StartNum,
+ int StopNum);
+
+extern void SkXmInitDupMd(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkXmInitPauseMd(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+extern void SkXmAutoNegLipaXmac(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_U16 IStatus);
+
+extern int SkXmUpdateStats(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port);
+
+extern int SkGmUpdateStats(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port);
+
+extern int SkXmMacStatistic(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port,
+ SK_U16 StatAddr,
+ SK_U32 SK_FAR *pVal);
+
+extern int SkGmMacStatistic(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port,
+ SK_U16 StatAddr,
+ SK_U32 SK_FAR *pVal);
+
+extern int SkXmResetCounter(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port);
+
+extern int SkGmResetCounter(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port);
+
+extern int SkXmOverflowStatus(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port,
+ SK_U16 IStatus,
+ SK_U64 SK_FAR *pStatus);
+
+extern int SkGmOverflowStatus(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ unsigned int Port,
+ SK_U16 MacStatus,
+ SK_U64 SK_FAR *pStatus);
+
+extern int SkGmCableDiagStatus(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL StartTest);
+
+extern int SkGmEnterLowPowerMode(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_U8 Mode);
+
+extern int SkGmLeaveLowPowerMode(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port);
+
+#ifdef SK_DIAG
+extern void SkGePhyRead(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Addr,
+ SK_U16 *pVal);
+
+extern void SkGePhyWrite(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Addr,
+ SK_U16 Val);
+
+extern void SkMacSetRxCmd(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ int Mode);
+extern void SkMacCrcGener(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL Enable);
+extern void SkMacTimeStamp(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL Enable);
+extern void SkXmSendCont(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Port,
+ SK_BOOL Enable);
+#endif /* SK_DIAG */
+
+#else /* SK_KR_PROTO */
+
+/*
+ * public functions in skgeinit.c
+ */
+extern void SkGePollRxD();
+extern void SkGePollTxD();
+extern void SkGeYellowLED();
+extern int SkGeCfgSync();
+extern void SkGeLoadLnkSyncCnt();
+extern void SkGeStopPort();
+extern int SkGeInit();
+extern void SkGeDeInit();
+extern int SkGeInitPort();
+extern void SkGeXmitLED();
+extern void SkGeInitRamIface();
+extern int SkGeInitAssignRamToQueues();
+
+/*
+ * public functions in skxmac2.c
+ */
+extern void SkMacRxTxDisable();
+extern void SkMacSoftRst();
+extern void SkMacHardRst();
+extern void SkMacClearRst();
+extern void SkMacInitPhy();
+extern int SkMacRxTxEnable();
+extern void SkMacPromiscMode();
+extern void SkMacHashing();
+extern void SkMacIrqDisable();
+extern void SkMacFlushTxFifo();
+extern void SkMacFlushRxFifo();
+extern void SkMacIrq();
+extern int SkMacAutoNegDone();
+extern void SkMacAutoNegLipaPhy();
+extern void SkMacSetRxTxEn();
+extern void SkXmInitMac();
+extern void SkXmPhyRead();
+extern void SkXmPhyWrite();
+extern void SkGmInitMac();
+extern void SkGmPhyRead();
+extern void SkGmPhyWrite();
+extern void SkXmClrExactAddr();
+extern void SkXmInitDupMd();
+extern void SkXmInitPauseMd();
+extern void SkXmAutoNegLipaXmac();
+extern int SkXmUpdateStats();
+extern int SkGmUpdateStats();
+extern int SkXmMacStatistic();
+extern int SkGmMacStatistic();
+extern int SkXmResetCounter();
+extern int SkGmResetCounter();
+extern int SkXmOverflowStatus();
+extern int SkGmOverflowStatus();
+extern int SkGmCableDiagStatus();
+extern int SkGmEnterLowPowerMode();
+extern int SkGmLeaveLowPowerMode();
+
+#ifdef SK_DIAG
+extern void SkGePhyRead();
+extern void SkGePhyWrite();
+extern void SkMacSetRxCmd();
+extern void SkMacCrcGener();
+extern void SkMacTimeStamp();
+extern void SkXmSendCont();
+#endif /* SK_DIAG */
+
+#endif /* SK_KR_PROTO */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INC_SKGEINIT_H_ */
diff --git a/drivers/net/sk98lin/h/skgepnm2.h b/drivers/net/sk98lin/h/skgepnm2.h
new file mode 100644
index 000000000000..ddd304f1a48b
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgepnm2.h
@@ -0,0 +1,334 @@
+/*****************************************************************************
+ *
+ * Name: skgepnm2.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.36 $
+ * Date: $Date: 2003/05/23 12:45:13 $
+ * Purpose: Defines for Private Network Management Interface
+ *
+ ****************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _SKGEPNM2_H_
+#define _SKGEPNM2_H_
+
+/*
+ * General definitions
+ */
+#define SK_PNMI_CHIPSET_XMAC 1 /* XMAC11800FP */
+#define SK_PNMI_CHIPSET_YUKON 2 /* YUKON */
+
+#define SK_PNMI_BUS_PCI 1 /* PCI bus*/
+
+/*
+ * Actions
+ */
+#define SK_PNMI_ACT_IDLE 1
+#define SK_PNMI_ACT_RESET 2
+#define SK_PNMI_ACT_SELFTEST 3
+#define SK_PNMI_ACT_RESETCNT 4
+
+/*
+ * VPD releated defines
+ */
+
+#define SK_PNMI_VPD_RW 1
+#define SK_PNMI_VPD_RO 2
+
+#define SK_PNMI_VPD_OK 0
+#define SK_PNMI_VPD_NOTFOUND 1
+#define SK_PNMI_VPD_CUT 2
+#define SK_PNMI_VPD_TIMEOUT 3
+#define SK_PNMI_VPD_FULL 4
+#define SK_PNMI_VPD_NOWRITE 5
+#define SK_PNMI_VPD_FATAL 6
+
+#define SK_PNMI_VPD_IGNORE 0
+#define SK_PNMI_VPD_CREATE 1
+#define SK_PNMI_VPD_DELETE 2
+
+
+/*
+ * RLMT related defines
+ */
+#define SK_PNMI_DEF_RLMT_CHG_THRES 240 /* 4 changes per minute */
+
+
+/*
+ * VCT internal status values
+ */
+#define SK_PNMI_VCT_PENDING 32
+#define SK_PNMI_VCT_TEST_DONE 64
+#define SK_PNMI_VCT_LINK 128
+
+/*
+ * Internal table definitions
+ */
+#define SK_PNMI_GET 0
+#define SK_PNMI_PRESET 1
+#define SK_PNMI_SET 2
+
+#define SK_PNMI_RO 0
+#define SK_PNMI_RW 1
+#define SK_PNMI_WO 2
+
+typedef struct s_OidTabEntry {
+ SK_U32 Id;
+ SK_U32 InstanceNo;
+ unsigned int StructSize;
+ unsigned int Offset;
+ int Access;
+ int (* Func)(SK_AC *pAc, SK_IOC pIo, int action,
+ SK_U32 Id, char* pBuf, unsigned int* pLen,
+ SK_U32 Instance, unsigned int TableIndex,
+ SK_U32 NetNumber);
+ SK_U16 Param;
+} SK_PNMI_TAB_ENTRY;
+
+
+/*
+ * Trap lengths
+ */
+#define SK_PNMI_TRAP_SIMPLE_LEN 17
+#define SK_PNMI_TRAP_SENSOR_LEN_BASE 46
+#define SK_PNMI_TRAP_RLMT_CHANGE_LEN 23
+#define SK_PNMI_TRAP_RLMT_PORT_LEN 23
+
+/*
+ * Number of MAC types supported
+ */
+#define SK_PNMI_MAC_TYPES (SK_MAC_GMAC + 1)
+
+/*
+ * MAC statistic data list (overall set for MAC types used)
+ */
+enum SK_MACSTATS {
+ SK_PNMI_HTX = 0,
+ SK_PNMI_HTX_OCTET,
+ SK_PNMI_HTX_OCTETHIGH = SK_PNMI_HTX_OCTET,
+ SK_PNMI_HTX_OCTETLOW,
+ SK_PNMI_HTX_BROADCAST,
+ SK_PNMI_HTX_MULTICAST,
+ SK_PNMI_HTX_UNICAST,
+ SK_PNMI_HTX_BURST,
+ SK_PNMI_HTX_PMACC,
+ SK_PNMI_HTX_MACC,
+ SK_PNMI_HTX_COL,
+ SK_PNMI_HTX_SINGLE_COL,
+ SK_PNMI_HTX_MULTI_COL,
+ SK_PNMI_HTX_EXCESS_COL,
+ SK_PNMI_HTX_LATE_COL,
+ SK_PNMI_HTX_DEFFERAL,
+ SK_PNMI_HTX_EXCESS_DEF,
+ SK_PNMI_HTX_UNDERRUN,
+ SK_PNMI_HTX_CARRIER,
+ SK_PNMI_HTX_UTILUNDER,
+ SK_PNMI_HTX_UTILOVER,
+ SK_PNMI_HTX_64,
+ SK_PNMI_HTX_127,
+ SK_PNMI_HTX_255,
+ SK_PNMI_HTX_511,
+ SK_PNMI_HTX_1023,
+ SK_PNMI_HTX_MAX,
+ SK_PNMI_HTX_LONGFRAMES,
+ SK_PNMI_HTX_SYNC,
+ SK_PNMI_HTX_SYNC_OCTET,
+ SK_PNMI_HTX_RESERVED,
+
+ SK_PNMI_HRX,
+ SK_PNMI_HRX_OCTET,
+ SK_PNMI_HRX_OCTETHIGH = SK_PNMI_HRX_OCTET,
+ SK_PNMI_HRX_OCTETLOW,
+ SK_PNMI_HRX_BADOCTET,
+ SK_PNMI_HRX_BADOCTETHIGH = SK_PNMI_HRX_BADOCTET,
+ SK_PNMI_HRX_BADOCTETLOW,
+ SK_PNMI_HRX_BROADCAST,
+ SK_PNMI_HRX_MULTICAST,
+ SK_PNMI_HRX_UNICAST,
+ SK_PNMI_HRX_PMACC,
+ SK_PNMI_HRX_MACC,
+ SK_PNMI_HRX_PMACC_ERR,
+ SK_PNMI_HRX_MACC_UNKWN,
+ SK_PNMI_HRX_BURST,
+ SK_PNMI_HRX_MISSED,
+ SK_PNMI_HRX_FRAMING,
+ SK_PNMI_HRX_UNDERSIZE,
+ SK_PNMI_HRX_OVERFLOW,
+ SK_PNMI_HRX_JABBER,
+ SK_PNMI_HRX_CARRIER,
+ SK_PNMI_HRX_IRLENGTH,
+ SK_PNMI_HRX_SYMBOL,
+ SK_PNMI_HRX_SHORTS,
+ SK_PNMI_HRX_RUNT,
+ SK_PNMI_HRX_TOO_LONG,
+ SK_PNMI_HRX_FCS,
+ SK_PNMI_HRX_CEXT,
+ SK_PNMI_HRX_UTILUNDER,
+ SK_PNMI_HRX_UTILOVER,
+ SK_PNMI_HRX_64,
+ SK_PNMI_HRX_127,
+ SK_PNMI_HRX_255,
+ SK_PNMI_HRX_511,
+ SK_PNMI_HRX_1023,
+ SK_PNMI_HRX_MAX,
+ SK_PNMI_HRX_LONGFRAMES,
+
+ SK_PNMI_HRX_RESERVED,
+
+ SK_PNMI_MAX_IDX /* NOTE: Ensure SK_PNMI_CNT_NO is set to this value */
+};
+
+/*
+ * MAC specific data
+ */
+typedef struct s_PnmiStatAddr {
+ SK_U16 Reg; /* MAC register containing the value */
+ SK_BOOL GetOffset; /* TRUE: Offset managed by PNMI (call GetStatVal())*/
+} SK_PNMI_STATADDR;
+
+
+/*
+ * SK_PNMI_STRUCT_DATA copy offset evaluation macros
+ */
+#define SK_PNMI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e))
+#define SK_PNMI_MAI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e))
+#define SK_PNMI_VPD_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_VPD *)0)->e))
+#define SK_PNMI_SEN_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_SENSOR *)0)->e))
+#define SK_PNMI_CHK_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CHECKSUM *)0)->e))
+#define SK_PNMI_STA_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STAT *)0)->e))
+#define SK_PNMI_CNF_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CONF *)0)->e))
+#define SK_PNMI_RLM_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT *)0)->e))
+#define SK_PNMI_MON_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT_MONITOR *)0)->e))
+#define SK_PNMI_TRP_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_TRAP *)0)->e))
+
+#define SK_PNMI_SET_STAT(b,s,o) {SK_U32 Val32; char *pVal; \
+ Val32 = (s); \
+ pVal = (char *)(b) + ((SK_U32)(SK_UPTR) \
+ &(((SK_PNMI_STRUCT_DATA *)0)-> \
+ ReturnStatus.ErrorStatus)); \
+ SK_PNMI_STORE_U32(pVal, Val32); \
+ Val32 = (o); \
+ pVal = (char *)(b) + ((SK_U32)(SK_UPTR) \
+ &(((SK_PNMI_STRUCT_DATA *)0)-> \
+ ReturnStatus.ErrorOffset)); \
+ SK_PNMI_STORE_U32(pVal, Val32);}
+
+/*
+ * Time macros
+ */
+#ifndef SK_PNMI_HUNDREDS_SEC
+#if SK_TICKS_PER_SEC == 100
+#define SK_PNMI_HUNDREDS_SEC(t) (t)
+#else
+#define SK_PNMI_HUNDREDS_SEC(t) (((t) * 100) / (SK_TICKS_PER_SEC))
+#endif /* !SK_TICKS_PER_SEC */
+#endif /* !SK_PNMI_HUNDREDS_SEC */
+
+/*
+ * Macros to work around alignment problems
+ */
+#ifndef SK_PNMI_STORE_U16
+#define SK_PNMI_STORE_U16(p,v) {*(char *)(p) = *((char *)&(v)); \
+ *((char *)(p) + 1) = \
+ *(((char *)&(v)) + 1);}
+#endif
+
+#ifndef SK_PNMI_STORE_U32
+#define SK_PNMI_STORE_U32(p,v) {*(char *)(p) = *((char *)&(v)); \
+ *((char *)(p) + 1) = \
+ *(((char *)&(v)) + 1); \
+ *((char *)(p) + 2) = \
+ *(((char *)&(v)) + 2); \
+ *((char *)(p) + 3) = \
+ *(((char *)&(v)) + 3);}
+#endif
+
+#ifndef SK_PNMI_STORE_U64
+#define SK_PNMI_STORE_U64(p,v) {*(char *)(p) = *((char *)&(v)); \
+ *((char *)(p) + 1) = \
+ *(((char *)&(v)) + 1); \
+ *((char *)(p) + 2) = \
+ *(((char *)&(v)) + 2); \
+ *((char *)(p) + 3) = \
+ *(((char *)&(v)) + 3); \
+ *((char *)(p) + 4) = \
+ *(((char *)&(v)) + 4); \
+ *((char *)(p) + 5) = \
+ *(((char *)&(v)) + 5); \
+ *((char *)(p) + 6) = \
+ *(((char *)&(v)) + 6); \
+ *((char *)(p) + 7) = \
+ *(((char *)&(v)) + 7);}
+#endif
+
+#ifndef SK_PNMI_READ_U16
+#define SK_PNMI_READ_U16(p,v) {*((char *)&(v)) = *(char *)(p); \
+ *(((char *)&(v)) + 1) = \
+ *((char *)(p) + 1);}
+#endif
+
+#ifndef SK_PNMI_READ_U32
+#define SK_PNMI_READ_U32(p,v) {*((char *)&(v)) = *(char *)(p); \
+ *(((char *)&(v)) + 1) = \
+ *((char *)(p) + 1); \
+ *(((char *)&(v)) + 2) = \
+ *((char *)(p) + 2); \
+ *(((char *)&(v)) + 3) = \
+ *((char *)(p) + 3);}
+#endif
+
+#ifndef SK_PNMI_READ_U64
+#define SK_PNMI_READ_U64(p,v) {*((char *)&(v)) = *(char *)(p); \
+ *(((char *)&(v)) + 1) = \
+ *((char *)(p) + 1); \
+ *(((char *)&(v)) + 2) = \
+ *((char *)(p) + 2); \
+ *(((char *)&(v)) + 3) = \
+ *((char *)(p) + 3); \
+ *(((char *)&(v)) + 4) = \
+ *((char *)(p) + 4); \
+ *(((char *)&(v)) + 5) = \
+ *((char *)(p) + 5); \
+ *(((char *)&(v)) + 6) = \
+ *((char *)(p) + 6); \
+ *(((char *)&(v)) + 7) = \
+ *((char *)(p) + 7);}
+#endif
+
+/*
+ * Macros for Debug
+ */
+#ifdef DEBUG
+
+#define SK_PNMI_CHECKFLAGS(vSt) {if (pAC->Pnmi.MacUpdatedFlag > 0 || \
+ pAC->Pnmi.RlmtUpdatedFlag > 0 || \
+ pAC->Pnmi.SirqUpdatedFlag > 0) { \
+ SK_DBG_MSG(pAC, \
+ SK_DBGMOD_PNMI, \
+ SK_DBGCAT_CTRL, \
+ ("PNMI: ERR: %s MacUFlag=%d, RlmtUFlag=%d, SirqUFlag=%d\n", \
+ vSt, \
+ pAC->Pnmi.MacUpdatedFlag, \
+ pAC->Pnmi.RlmtUpdatedFlag, \
+ pAC->Pnmi.SirqUpdatedFlag))}}
+
+#else /* !DEBUG */
+
+#define SK_PNMI_CHECKFLAGS(vSt) /* Nothing */
+
+#endif /* !DEBUG */
+
+#endif /* _SKGEPNM2_H_ */
diff --git a/drivers/net/sk98lin/h/skgepnmi.h b/drivers/net/sk98lin/h/skgepnmi.h
new file mode 100644
index 000000000000..3b2773e6f822
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgepnmi.h
@@ -0,0 +1,966 @@
+/*****************************************************************************
+ *
+ * Name: skgepnmi.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.62 $
+ * Date: $Date: 2003/08/15 12:31:52 $
+ * Purpose: Defines for Private Network Management Interface
+ *
+ ****************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _SKGEPNMI_H_
+#define _SKGEPNMI_H_
+
+/*
+ * Include dependencies
+ */
+#include "h/sktypes.h"
+#include "h/skerror.h"
+#include "h/sktimer.h"
+#include "h/ski2c.h"
+#include "h/skaddr.h"
+#include "h/skrlmt.h"
+#include "h/skvpd.h"
+
+/*
+ * Management Database Version
+ */
+#define SK_PNMI_MDB_VERSION 0x00030001 /* 3.1 */
+
+
+/*
+ * Event definitions
+ */
+#define SK_PNMI_EVT_SIRQ_OVERFLOW 1 /* Counter overflow */
+#define SK_PNMI_EVT_SEN_WAR_LOW 2 /* Lower war thres exceeded */
+#define SK_PNMI_EVT_SEN_WAR_UPP 3 /* Upper war thres exceeded */
+#define SK_PNMI_EVT_SEN_ERR_LOW 4 /* Lower err thres exceeded */
+#define SK_PNMI_EVT_SEN_ERR_UPP 5 /* Upper err thres exceeded */
+#define SK_PNMI_EVT_CHG_EST_TIMER 6 /* Timer event for RLMT Chg */
+#define SK_PNMI_EVT_UTILIZATION_TIMER 7 /* Timer event for Utiliza. */
+#define SK_PNMI_EVT_CLEAR_COUNTER 8 /* Clear statistic counters */
+#define SK_PNMI_EVT_XMAC_RESET 9 /* XMAC will be reset */
+
+#define SK_PNMI_EVT_RLMT_PORT_UP 10 /* Port came logically up */
+#define SK_PNMI_EVT_RLMT_PORT_DOWN 11 /* Port went logically down */
+#define SK_PNMI_EVT_RLMT_SEGMENTATION 13 /* Two SP root bridges found */
+#define SK_PNMI_EVT_RLMT_ACTIVE_DOWN 14 /* Port went logically down */
+#define SK_PNMI_EVT_RLMT_ACTIVE_UP 15 /* Port came logically up */
+#define SK_PNMI_EVT_RLMT_SET_NETS 16 /* 1. Parameter is number of nets
+ 1 = single net; 2 = dual net */
+#define SK_PNMI_EVT_VCT_RESET 17 /* VCT port reset timer event started with SET. */
+
+
+/*
+ * Return values
+ */
+#define SK_PNMI_ERR_OK 0
+#define SK_PNMI_ERR_GENERAL 1
+#define SK_PNMI_ERR_TOO_SHORT 2
+#define SK_PNMI_ERR_BAD_VALUE 3
+#define SK_PNMI_ERR_READ_ONLY 4
+#define SK_PNMI_ERR_UNKNOWN_OID 5
+#define SK_PNMI_ERR_UNKNOWN_INST 6
+#define SK_PNMI_ERR_UNKNOWN_NET 7
+#define SK_PNMI_ERR_NOT_SUPPORTED 10
+
+
+/*
+ * Return values of driver reset function SK_DRIVER_RESET() and
+ * driver event function SK_DRIVER_EVENT()
+ */
+#define SK_PNMI_ERR_OK 0
+#define SK_PNMI_ERR_FAIL 1
+
+
+/*
+ * Return values of driver test function SK_DRIVER_SELFTEST()
+ */
+#define SK_PNMI_TST_UNKNOWN (1 << 0)
+#define SK_PNMI_TST_TRANCEIVER (1 << 1)
+#define SK_PNMI_TST_ASIC (1 << 2)
+#define SK_PNMI_TST_SENSOR (1 << 3)
+#define SK_PNMI_TST_POWERMGMT (1 << 4)
+#define SK_PNMI_TST_PCI (1 << 5)
+#define SK_PNMI_TST_MAC (1 << 6)
+
+
+/*
+ * RLMT specific definitions
+ */
+#define SK_PNMI_RLMT_STATUS_STANDBY 1
+#define SK_PNMI_RLMT_STATUS_ACTIVE 2
+#define SK_PNMI_RLMT_STATUS_ERROR 3
+
+#define SK_PNMI_RLMT_LSTAT_PHY_DOWN 1
+#define SK_PNMI_RLMT_LSTAT_AUTONEG 2
+#define SK_PNMI_RLMT_LSTAT_LOG_DOWN 3
+#define SK_PNMI_RLMT_LSTAT_LOG_UP 4
+#define SK_PNMI_RLMT_LSTAT_INDETERMINATED 5
+
+#define SK_PNMI_RLMT_MODE_CHK_LINK (SK_RLMT_CHECK_LINK)
+#define SK_PNMI_RLMT_MODE_CHK_RX (SK_RLMT_CHECK_LOC_LINK)
+#define SK_PNMI_RLMT_MODE_CHK_SPT (SK_RLMT_CHECK_SEG)
+/* #define SK_PNMI_RLMT_MODE_CHK_EX */
+
+/*
+ * OID definition
+ */
+#ifndef _NDIS_ /* Check, whether NDIS already included OIDs */
+
+#define OID_GEN_XMIT_OK 0x00020101
+#define OID_GEN_RCV_OK 0x00020102
+#define OID_GEN_XMIT_ERROR 0x00020103
+#define OID_GEN_RCV_ERROR 0x00020104
+#define OID_GEN_RCV_NO_BUFFER 0x00020105
+
+/* #define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 */
+#define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
+/* #define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 */
+#define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
+/* #define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 */
+#define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
+/* #define OID_GEN_DIRECTED_BYTES_RCV 0x00020207 */
+#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
+/* #define OID_GEN_MULTICAST_BYTES_RCV 0x00020209 */
+#define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
+/* #define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B */
+#define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
+#define OID_GEN_RCV_CRC_ERROR 0x0002020D
+#define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
+
+#define OID_802_3_PERMANENT_ADDRESS 0x01010101
+#define OID_802_3_CURRENT_ADDRESS 0x01010102
+/* #define OID_802_3_MULTICAST_LIST 0x01010103 */
+/* #define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 */
+/* #define OID_802_3_MAC_OPTIONS 0x01010105 */
+
+#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
+#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
+#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
+#define OID_802_3_XMIT_DEFERRED 0x01020201
+#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
+#define OID_802_3_RCV_OVERRUN 0x01020203
+#define OID_802_3_XMIT_UNDERRUN 0x01020204
+#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
+#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
+
+/*
+ * PnP and PM OIDs
+ */
+#ifdef SK_POWER_MGMT
+#define OID_PNP_CAPABILITIES 0xFD010100
+#define OID_PNP_SET_POWER 0xFD010101
+#define OID_PNP_QUERY_POWER 0xFD010102
+#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103
+#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104
+#define OID_PNP_ENABLE_WAKE_UP 0xFD010106
+#endif /* SK_POWER_MGMT */
+
+#endif /* _NDIS_ */
+
+#define OID_SKGE_MDB_VERSION 0xFF010100
+#define OID_SKGE_SUPPORTED_LIST 0xFF010101
+#define OID_SKGE_VPD_FREE_BYTES 0xFF010102
+#define OID_SKGE_VPD_ENTRIES_LIST 0xFF010103
+#define OID_SKGE_VPD_ENTRIES_NUMBER 0xFF010104
+#define OID_SKGE_VPD_KEY 0xFF010105
+#define OID_SKGE_VPD_VALUE 0xFF010106
+#define OID_SKGE_VPD_ACCESS 0xFF010107
+#define OID_SKGE_VPD_ACTION 0xFF010108
+
+#define OID_SKGE_PORT_NUMBER 0xFF010110
+#define OID_SKGE_DEVICE_TYPE 0xFF010111
+#define OID_SKGE_DRIVER_DESCR 0xFF010112
+#define OID_SKGE_DRIVER_VERSION 0xFF010113
+#define OID_SKGE_HW_DESCR 0xFF010114
+#define OID_SKGE_HW_VERSION 0xFF010115
+#define OID_SKGE_CHIPSET 0xFF010116
+#define OID_SKGE_ACTION 0xFF010117
+#define OID_SKGE_RESULT 0xFF010118
+#define OID_SKGE_BUS_TYPE 0xFF010119
+#define OID_SKGE_BUS_SPEED 0xFF01011A
+#define OID_SKGE_BUS_WIDTH 0xFF01011B
+/* 0xFF01011C unused */
+#define OID_SKGE_DIAG_ACTION 0xFF01011D
+#define OID_SKGE_DIAG_RESULT 0xFF01011E
+#define OID_SKGE_MTU 0xFF01011F
+#define OID_SKGE_PHYS_CUR_ADDR 0xFF010120
+#define OID_SKGE_PHYS_FAC_ADDR 0xFF010121
+#define OID_SKGE_PMD 0xFF010122
+#define OID_SKGE_CONNECTOR 0xFF010123
+#define OID_SKGE_LINK_CAP 0xFF010124
+#define OID_SKGE_LINK_MODE 0xFF010125
+#define OID_SKGE_LINK_MODE_STATUS 0xFF010126
+#define OID_SKGE_LINK_STATUS 0xFF010127
+#define OID_SKGE_FLOWCTRL_CAP 0xFF010128
+#define OID_SKGE_FLOWCTRL_MODE 0xFF010129
+#define OID_SKGE_FLOWCTRL_STATUS 0xFF01012A
+#define OID_SKGE_PHY_OPERATION_CAP 0xFF01012B
+#define OID_SKGE_PHY_OPERATION_MODE 0xFF01012C
+#define OID_SKGE_PHY_OPERATION_STATUS 0xFF01012D
+#define OID_SKGE_MULTICAST_LIST 0xFF01012E
+#define OID_SKGE_CURRENT_PACKET_FILTER 0xFF01012F
+
+#define OID_SKGE_TRAP 0xFF010130
+#define OID_SKGE_TRAP_NUMBER 0xFF010131
+
+#define OID_SKGE_RLMT_MODE 0xFF010140
+#define OID_SKGE_RLMT_PORT_NUMBER 0xFF010141
+#define OID_SKGE_RLMT_PORT_ACTIVE 0xFF010142
+#define OID_SKGE_RLMT_PORT_PREFERRED 0xFF010143
+#define OID_SKGE_INTERMEDIATE_SUPPORT 0xFF010160
+
+#define OID_SKGE_SPEED_CAP 0xFF010170
+#define OID_SKGE_SPEED_MODE 0xFF010171
+#define OID_SKGE_SPEED_STATUS 0xFF010172
+
+#define OID_SKGE_BOARDLEVEL 0xFF010180
+
+#define OID_SKGE_SENSOR_NUMBER 0xFF020100
+#define OID_SKGE_SENSOR_INDEX 0xFF020101
+#define OID_SKGE_SENSOR_DESCR 0xFF020102
+#define OID_SKGE_SENSOR_TYPE 0xFF020103
+#define OID_SKGE_SENSOR_VALUE 0xFF020104
+#define OID_SKGE_SENSOR_WAR_THRES_LOW 0xFF020105
+#define OID_SKGE_SENSOR_WAR_THRES_UPP 0xFF020106
+#define OID_SKGE_SENSOR_ERR_THRES_LOW 0xFF020107
+#define OID_SKGE_SENSOR_ERR_THRES_UPP 0xFF020108
+#define OID_SKGE_SENSOR_STATUS 0xFF020109
+#define OID_SKGE_SENSOR_WAR_CTS 0xFF02010A
+#define OID_SKGE_SENSOR_ERR_CTS 0xFF02010B
+#define OID_SKGE_SENSOR_WAR_TIME 0xFF02010C
+#define OID_SKGE_SENSOR_ERR_TIME 0xFF02010D
+
+#define OID_SKGE_CHKSM_NUMBER 0xFF020110
+#define OID_SKGE_CHKSM_RX_OK_CTS 0xFF020111
+#define OID_SKGE_CHKSM_RX_UNABLE_CTS 0xFF020112
+#define OID_SKGE_CHKSM_RX_ERR_CTS 0xFF020113
+#define OID_SKGE_CHKSM_TX_OK_CTS 0xFF020114
+#define OID_SKGE_CHKSM_TX_UNABLE_CTS 0xFF020115
+
+#define OID_SKGE_STAT_TX 0xFF020120
+#define OID_SKGE_STAT_TX_OCTETS 0xFF020121
+#define OID_SKGE_STAT_TX_BROADCAST 0xFF020122
+#define OID_SKGE_STAT_TX_MULTICAST 0xFF020123
+#define OID_SKGE_STAT_TX_UNICAST 0xFF020124
+#define OID_SKGE_STAT_TX_LONGFRAMES 0xFF020125
+#define OID_SKGE_STAT_TX_BURST 0xFF020126
+#define OID_SKGE_STAT_TX_PFLOWC 0xFF020127
+#define OID_SKGE_STAT_TX_FLOWC 0xFF020128
+#define OID_SKGE_STAT_TX_SINGLE_COL 0xFF020129
+#define OID_SKGE_STAT_TX_MULTI_COL 0xFF02012A
+#define OID_SKGE_STAT_TX_EXCESS_COL 0xFF02012B
+#define OID_SKGE_STAT_TX_LATE_COL 0xFF02012C
+#define OID_SKGE_STAT_TX_DEFFERAL 0xFF02012D
+#define OID_SKGE_STAT_TX_EXCESS_DEF 0xFF02012E
+#define OID_SKGE_STAT_TX_UNDERRUN 0xFF02012F
+#define OID_SKGE_STAT_TX_CARRIER 0xFF020130
+/* #define OID_SKGE_STAT_TX_UTIL 0xFF020131 */
+#define OID_SKGE_STAT_TX_64 0xFF020132
+#define OID_SKGE_STAT_TX_127 0xFF020133
+#define OID_SKGE_STAT_TX_255 0xFF020134
+#define OID_SKGE_STAT_TX_511 0xFF020135
+#define OID_SKGE_STAT_TX_1023 0xFF020136
+#define OID_SKGE_STAT_TX_MAX 0xFF020137
+#define OID_SKGE_STAT_TX_SYNC 0xFF020138
+#define OID_SKGE_STAT_TX_SYNC_OCTETS 0xFF020139
+#define OID_SKGE_STAT_RX 0xFF02013A
+#define OID_SKGE_STAT_RX_OCTETS 0xFF02013B
+#define OID_SKGE_STAT_RX_BROADCAST 0xFF02013C
+#define OID_SKGE_STAT_RX_MULTICAST 0xFF02013D
+#define OID_SKGE_STAT_RX_UNICAST 0xFF02013E
+#define OID_SKGE_STAT_RX_PFLOWC 0xFF02013F
+#define OID_SKGE_STAT_RX_FLOWC 0xFF020140
+#define OID_SKGE_STAT_RX_PFLOWC_ERR 0xFF020141
+#define OID_SKGE_STAT_RX_FLOWC_UNKWN 0xFF020142
+#define OID_SKGE_STAT_RX_BURST 0xFF020143
+#define OID_SKGE_STAT_RX_MISSED 0xFF020144
+#define OID_SKGE_STAT_RX_FRAMING 0xFF020145
+#define OID_SKGE_STAT_RX_OVERFLOW 0xFF020146
+#define OID_SKGE_STAT_RX_JABBER 0xFF020147
+#define OID_SKGE_STAT_RX_CARRIER 0xFF020148
+#define OID_SKGE_STAT_RX_IR_LENGTH 0xFF020149
+#define OID_SKGE_STAT_RX_SYMBOL 0xFF02014A
+#define OID_SKGE_STAT_RX_SHORTS 0xFF02014B
+#define OID_SKGE_STAT_RX_RUNT 0xFF02014C
+#define OID_SKGE_STAT_RX_CEXT 0xFF02014D
+#define OID_SKGE_STAT_RX_TOO_LONG 0xFF02014E
+#define OID_SKGE_STAT_RX_FCS 0xFF02014F
+/* #define OID_SKGE_STAT_RX_UTIL 0xFF020150 */
+#define OID_SKGE_STAT_RX_64 0xFF020151
+#define OID_SKGE_STAT_RX_127 0xFF020152
+#define OID_SKGE_STAT_RX_255 0xFF020153
+#define OID_SKGE_STAT_RX_511 0xFF020154
+#define OID_SKGE_STAT_RX_1023 0xFF020155
+#define OID_SKGE_STAT_RX_MAX 0xFF020156
+#define OID_SKGE_STAT_RX_LONGFRAMES 0xFF020157
+
+#define OID_SKGE_RLMT_CHANGE_CTS 0xFF020160
+#define OID_SKGE_RLMT_CHANGE_TIME 0xFF020161
+#define OID_SKGE_RLMT_CHANGE_ESTIM 0xFF020162
+#define OID_SKGE_RLMT_CHANGE_THRES 0xFF020163
+
+#define OID_SKGE_RLMT_PORT_INDEX 0xFF020164
+#define OID_SKGE_RLMT_STATUS 0xFF020165
+#define OID_SKGE_RLMT_TX_HELLO_CTS 0xFF020166
+#define OID_SKGE_RLMT_RX_HELLO_CTS 0xFF020167
+#define OID_SKGE_RLMT_TX_SP_REQ_CTS 0xFF020168
+#define OID_SKGE_RLMT_RX_SP_CTS 0xFF020169
+
+#define OID_SKGE_RLMT_MONITOR_NUMBER 0xFF010150
+#define OID_SKGE_RLMT_MONITOR_INDEX 0xFF010151
+#define OID_SKGE_RLMT_MONITOR_ADDR 0xFF010152
+#define OID_SKGE_RLMT_MONITOR_ERRS 0xFF010153
+#define OID_SKGE_RLMT_MONITOR_TIMESTAMP 0xFF010154
+#define OID_SKGE_RLMT_MONITOR_ADMIN 0xFF010155
+
+#define OID_SKGE_TX_SW_QUEUE_LEN 0xFF020170
+#define OID_SKGE_TX_SW_QUEUE_MAX 0xFF020171
+#define OID_SKGE_TX_RETRY 0xFF020172
+#define OID_SKGE_RX_INTR_CTS 0xFF020173
+#define OID_SKGE_TX_INTR_CTS 0xFF020174
+#define OID_SKGE_RX_NO_BUF_CTS 0xFF020175
+#define OID_SKGE_TX_NO_BUF_CTS 0xFF020176
+#define OID_SKGE_TX_USED_DESCR_NO 0xFF020177
+#define OID_SKGE_RX_DELIVERED_CTS 0xFF020178
+#define OID_SKGE_RX_OCTETS_DELIV_CTS 0xFF020179
+#define OID_SKGE_RX_HW_ERROR_CTS 0xFF02017A
+#define OID_SKGE_TX_HW_ERROR_CTS 0xFF02017B
+#define OID_SKGE_IN_ERRORS_CTS 0xFF02017C
+#define OID_SKGE_OUT_ERROR_CTS 0xFF02017D
+#define OID_SKGE_ERR_RECOVERY_CTS 0xFF02017E
+#define OID_SKGE_SYSUPTIME 0xFF02017F
+
+#define OID_SKGE_ALL_DATA 0xFF020190
+
+/* Defines for VCT. */
+#define OID_SKGE_VCT_GET 0xFF020200
+#define OID_SKGE_VCT_SET 0xFF020201
+#define OID_SKGE_VCT_STATUS 0xFF020202
+
+#ifdef SK_DIAG_SUPPORT
+/* Defines for driver DIAG mode. */
+#define OID_SKGE_DIAG_MODE 0xFF020204
+#endif /* SK_DIAG_SUPPORT */
+
+/* New OIDs */
+#define OID_SKGE_DRIVER_RELDATE 0xFF020210
+#define OID_SKGE_DRIVER_FILENAME 0xFF020211
+#define OID_SKGE_CHIPID 0xFF020212
+#define OID_SKGE_RAMSIZE 0xFF020213
+#define OID_SKGE_VAUXAVAIL 0xFF020214
+#define OID_SKGE_PHY_TYPE 0xFF020215
+#define OID_SKGE_PHY_LP_MODE 0xFF020216
+
+/* VCT struct to store a backup copy of VCT data after a port reset. */
+typedef struct s_PnmiVct {
+ SK_U8 VctStatus;
+ SK_U8 PCableLen;
+ SK_U32 PMdiPairLen[4];
+ SK_U8 PMdiPairSts[4];
+} SK_PNMI_VCT;
+
+
+/* VCT status values (to be given to CPA via OID_SKGE_VCT_STATUS). */
+#define SK_PNMI_VCT_NONE 0
+#define SK_PNMI_VCT_OLD_VCT_DATA 1
+#define SK_PNMI_VCT_NEW_VCT_DATA 2
+#define SK_PNMI_VCT_OLD_DSP_DATA 4
+#define SK_PNMI_VCT_NEW_DSP_DATA 8
+#define SK_PNMI_VCT_RUNNING 16
+
+
+/* VCT cable test status. */
+#define SK_PNMI_VCT_NORMAL_CABLE 0
+#define SK_PNMI_VCT_SHORT_CABLE 1
+#define SK_PNMI_VCT_OPEN_CABLE 2
+#define SK_PNMI_VCT_TEST_FAIL 3
+#define SK_PNMI_VCT_IMPEDANCE_MISMATCH 4
+
+#define OID_SKGE_TRAP_SEN_WAR_LOW 500
+#define OID_SKGE_TRAP_SEN_WAR_UPP 501
+#define OID_SKGE_TRAP_SEN_ERR_LOW 502
+#define OID_SKGE_TRAP_SEN_ERR_UPP 503
+#define OID_SKGE_TRAP_RLMT_CHANGE_THRES 520
+#define OID_SKGE_TRAP_RLMT_CHANGE_PORT 521
+#define OID_SKGE_TRAP_RLMT_PORT_DOWN 522
+#define OID_SKGE_TRAP_RLMT_PORT_UP 523
+#define OID_SKGE_TRAP_RLMT_SEGMENTATION 524
+
+#ifdef SK_DIAG_SUPPORT
+/* Defines for driver DIAG mode. */
+#define SK_DIAG_ATTACHED 2
+#define SK_DIAG_RUNNING 1
+#define SK_DIAG_IDLE 0
+#endif /* SK_DIAG_SUPPORT */
+
+/*
+ * Generic PNMI IOCTL subcommand definitions.
+ */
+#define SK_GET_SINGLE_VAR 1
+#define SK_SET_SINGLE_VAR 2
+#define SK_PRESET_SINGLE_VAR 3
+#define SK_GET_FULL_MIB 4
+#define SK_SET_FULL_MIB 5
+#define SK_PRESET_FULL_MIB 6
+
+
+/*
+ * Define error numbers and messages for syslog
+ */
+#define SK_PNMI_ERR001 (SK_ERRBASE_PNMI + 1)
+#define SK_PNMI_ERR001MSG "SkPnmiGetStruct: Unknown OID"
+#define SK_PNMI_ERR002 (SK_ERRBASE_PNMI + 2)
+#define SK_PNMI_ERR002MSG "SkPnmiGetStruct: Cannot read VPD keys"
+#define SK_PNMI_ERR003 (SK_ERRBASE_PNMI + 3)
+#define SK_PNMI_ERR003MSG "OidStruct: Called with wrong OID"
+#define SK_PNMI_ERR004 (SK_ERRBASE_PNMI + 4)
+#define SK_PNMI_ERR004MSG "OidStruct: Called with wrong action"
+#define SK_PNMI_ERR005 (SK_ERRBASE_PNMI + 5)
+#define SK_PNMI_ERR005MSG "Perform: Cannot reset driver"
+#define SK_PNMI_ERR006 (SK_ERRBASE_PNMI + 6)
+#define SK_PNMI_ERR006MSG "Perform: Unknown OID action command"
+#define SK_PNMI_ERR007 (SK_ERRBASE_PNMI + 7)
+#define SK_PNMI_ERR007MSG "General: Driver description not initialized"
+#define SK_PNMI_ERR008 (SK_ERRBASE_PNMI + 8)
+#define SK_PNMI_ERR008MSG "Addr: Tried to get unknown OID"
+#define SK_PNMI_ERR009 (SK_ERRBASE_PNMI + 9)
+#define SK_PNMI_ERR009MSG "Addr: Unknown OID"
+#define SK_PNMI_ERR010 (SK_ERRBASE_PNMI + 10)
+#define SK_PNMI_ERR010MSG "CsumStat: Unknown OID"
+#define SK_PNMI_ERR011 (SK_ERRBASE_PNMI + 11)
+#define SK_PNMI_ERR011MSG "SensorStat: Sensor descr string too long"
+#define SK_PNMI_ERR012 (SK_ERRBASE_PNMI + 12)
+#define SK_PNMI_ERR012MSG "SensorStat: Unknown OID"
+#define SK_PNMI_ERR013 (SK_ERRBASE_PNMI + 13)
+#define SK_PNMI_ERR013MSG ""
+#define SK_PNMI_ERR014 (SK_ERRBASE_PNMI + 14)
+#define SK_PNMI_ERR014MSG "Vpd: Cannot read VPD keys"
+#define SK_PNMI_ERR015 (SK_ERRBASE_PNMI + 15)
+#define SK_PNMI_ERR015MSG "Vpd: Internal array for VPD keys to small"
+#define SK_PNMI_ERR016 (SK_ERRBASE_PNMI + 16)
+#define SK_PNMI_ERR016MSG "Vpd: Key string too long"
+#define SK_PNMI_ERR017 (SK_ERRBASE_PNMI + 17)
+#define SK_PNMI_ERR017MSG "Vpd: Invalid VPD status pointer"
+#define SK_PNMI_ERR018 (SK_ERRBASE_PNMI + 18)
+#define SK_PNMI_ERR018MSG "Vpd: VPD data not valid"
+#define SK_PNMI_ERR019 (SK_ERRBASE_PNMI + 19)
+#define SK_PNMI_ERR019MSG "Vpd: VPD entries list string too long"
+#define SK_PNMI_ERR021 (SK_ERRBASE_PNMI + 21)
+#define SK_PNMI_ERR021MSG "Vpd: VPD data string too long"
+#define SK_PNMI_ERR022 (SK_ERRBASE_PNMI + 22)
+#define SK_PNMI_ERR022MSG "Vpd: VPD data string too long should be errored before"
+#define SK_PNMI_ERR023 (SK_ERRBASE_PNMI + 23)
+#define SK_PNMI_ERR023MSG "Vpd: Unknown OID in get action"
+#define SK_PNMI_ERR024 (SK_ERRBASE_PNMI + 24)
+#define SK_PNMI_ERR024MSG "Vpd: Unknown OID in preset/set action"
+#define SK_PNMI_ERR025 (SK_ERRBASE_PNMI + 25)
+#define SK_PNMI_ERR025MSG "Vpd: Cannot write VPD after modify entry"
+#define SK_PNMI_ERR026 (SK_ERRBASE_PNMI + 26)
+#define SK_PNMI_ERR026MSG "Vpd: Cannot update VPD"
+#define SK_PNMI_ERR027 (SK_ERRBASE_PNMI + 27)
+#define SK_PNMI_ERR027MSG "Vpd: Cannot delete VPD entry"
+#define SK_PNMI_ERR028 (SK_ERRBASE_PNMI + 28)
+#define SK_PNMI_ERR028MSG "Vpd: Cannot update VPD after delete entry"
+#define SK_PNMI_ERR029 (SK_ERRBASE_PNMI + 29)
+#define SK_PNMI_ERR029MSG "General: Driver description string too long"
+#define SK_PNMI_ERR030 (SK_ERRBASE_PNMI + 30)
+#define SK_PNMI_ERR030MSG "General: Driver version not initialized"
+#define SK_PNMI_ERR031 (SK_ERRBASE_PNMI + 31)
+#define SK_PNMI_ERR031MSG "General: Driver version string too long"
+#define SK_PNMI_ERR032 (SK_ERRBASE_PNMI + 32)
+#define SK_PNMI_ERR032MSG "General: Cannot read VPD Name for HW descr"
+#define SK_PNMI_ERR033 (SK_ERRBASE_PNMI + 33)
+#define SK_PNMI_ERR033MSG "General: HW description string too long"
+#define SK_PNMI_ERR034 (SK_ERRBASE_PNMI + 34)
+#define SK_PNMI_ERR034MSG "General: Unknown OID"
+#define SK_PNMI_ERR035 (SK_ERRBASE_PNMI + 35)
+#define SK_PNMI_ERR035MSG "Rlmt: Unknown OID"
+#define SK_PNMI_ERR036 (SK_ERRBASE_PNMI + 36)
+#define SK_PNMI_ERR036MSG ""
+#define SK_PNMI_ERR037 (SK_ERRBASE_PNMI + 37)
+#define SK_PNMI_ERR037MSG "Rlmt: SK_RLMT_MODE_CHANGE event return not 0"
+#define SK_PNMI_ERR038 (SK_ERRBASE_PNMI + 38)
+#define SK_PNMI_ERR038MSG "Rlmt: SK_RLMT_PREFPORT_CHANGE event return not 0"
+#define SK_PNMI_ERR039 (SK_ERRBASE_PNMI + 39)
+#define SK_PNMI_ERR039MSG "RlmtStat: Unknown OID"
+#define SK_PNMI_ERR040 (SK_ERRBASE_PNMI + 40)
+#define SK_PNMI_ERR040MSG "PowerManagement: Unknown OID"
+#define SK_PNMI_ERR041 (SK_ERRBASE_PNMI + 41)
+#define SK_PNMI_ERR041MSG "MacPrivateConf: Unknown OID"
+#define SK_PNMI_ERR042 (SK_ERRBASE_PNMI + 42)
+#define SK_PNMI_ERR042MSG "MacPrivateConf: SK_HWEV_SET_ROLE returned not 0"
+#define SK_PNMI_ERR043 (SK_ERRBASE_PNMI + 43)
+#define SK_PNMI_ERR043MSG "MacPrivateConf: SK_HWEV_SET_LMODE returned not 0"
+#define SK_PNMI_ERR044 (SK_ERRBASE_PNMI + 44)
+#define SK_PNMI_ERR044MSG "MacPrivateConf: SK_HWEV_SET_FLOWMODE returned not 0"
+#define SK_PNMI_ERR045 (SK_ERRBASE_PNMI + 45)
+#define SK_PNMI_ERR045MSG "MacPrivateConf: SK_HWEV_SET_SPEED returned not 0"
+#define SK_PNMI_ERR046 (SK_ERRBASE_PNMI + 46)
+#define SK_PNMI_ERR046MSG "Monitor: Unknown OID"
+#define SK_PNMI_ERR047 (SK_ERRBASE_PNMI + 47)
+#define SK_PNMI_ERR047MSG "SirqUpdate: Event function returns not 0"
+#define SK_PNMI_ERR048 (SK_ERRBASE_PNMI + 48)
+#define SK_PNMI_ERR048MSG "RlmtUpdate: Event function returns not 0"
+#define SK_PNMI_ERR049 (SK_ERRBASE_PNMI + 49)
+#define SK_PNMI_ERR049MSG "SkPnmiInit: Invalid size of 'CounterOffset' struct!!"
+#define SK_PNMI_ERR050 (SK_ERRBASE_PNMI + 50)
+#define SK_PNMI_ERR050MSG "SkPnmiInit: Invalid size of 'StatAddr' table!!"
+#define SK_PNMI_ERR051 (SK_ERRBASE_PNMI + 51)
+#define SK_PNMI_ERR051MSG "SkPnmiEvent: Port switch suspicious"
+#define SK_PNMI_ERR052 (SK_ERRBASE_PNMI + 52)
+#define SK_PNMI_ERR052MSG ""
+#define SK_PNMI_ERR053 (SK_ERRBASE_PNMI + 53)
+#define SK_PNMI_ERR053MSG "General: Driver release date not initialized"
+#define SK_PNMI_ERR054 (SK_ERRBASE_PNMI + 54)
+#define SK_PNMI_ERR054MSG "General: Driver release date string too long"
+#define SK_PNMI_ERR055 (SK_ERRBASE_PNMI + 55)
+#define SK_PNMI_ERR055MSG "General: Driver file name not initialized"
+#define SK_PNMI_ERR056 (SK_ERRBASE_PNMI + 56)
+#define SK_PNMI_ERR056MSG "General: Driver file name string too long"
+
+/*
+ * Management counter macros called by the driver
+ */
+#define SK_PNMI_SET_DRIVER_DESCR(pAC,v) ((pAC)->Pnmi.pDriverDescription = \
+ (char *)(v))
+
+#define SK_PNMI_SET_DRIVER_VER(pAC,v) ((pAC)->Pnmi.pDriverVersion = \
+ (char *)(v))
+
+#define SK_PNMI_SET_DRIVER_RELDATE(pAC,v) ((pAC)->Pnmi.pDriverReleaseDate = \
+ (char *)(v))
+
+#define SK_PNMI_SET_DRIVER_FILENAME(pAC,v) ((pAC)->Pnmi.pDriverFileName = \
+ (char *)(v))
+
+#define SK_PNMI_CNT_TX_QUEUE_LEN(pAC,v,p) \
+ { \
+ (pAC)->Pnmi.Port[p].TxSwQueueLen = (SK_U64)(v); \
+ if ((pAC)->Pnmi.Port[p].TxSwQueueLen > (pAC)->Pnmi.Port[p].TxSwQueueMax) { \
+ (pAC)->Pnmi.Port[p].TxSwQueueMax = (pAC)->Pnmi.Port[p].TxSwQueueLen; \
+ } \
+ }
+#define SK_PNMI_CNT_TX_RETRY(pAC,p) (((pAC)->Pnmi.Port[p].TxRetryCts)++)
+#define SK_PNMI_CNT_RX_INTR(pAC,p) (((pAC)->Pnmi.Port[p].RxIntrCts)++)
+#define SK_PNMI_CNT_TX_INTR(pAC,p) (((pAC)->Pnmi.Port[p].TxIntrCts)++)
+#define SK_PNMI_CNT_NO_RX_BUF(pAC,p) (((pAC)->Pnmi.Port[p].RxNoBufCts)++)
+#define SK_PNMI_CNT_NO_TX_BUF(pAC,p) (((pAC)->Pnmi.Port[p].TxNoBufCts)++)
+#define SK_PNMI_CNT_USED_TX_DESCR(pAC,v,p) \
+ ((pAC)->Pnmi.Port[p].TxUsedDescrNo=(SK_U64)(v));
+#define SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC,v,p) \
+ { \
+ ((pAC)->Pnmi.Port[p].RxDeliveredCts)++; \
+ (pAC)->Pnmi.Port[p].RxOctetsDeliveredCts += (SK_U64)(v); \
+ }
+#define SK_PNMI_CNT_ERR_RECOVERY(pAC,p) (((pAC)->Pnmi.Port[p].ErrRecoveryCts)++);
+
+#define SK_PNMI_CNT_SYNC_OCTETS(pAC,p,v) \
+ { \
+ if ((p) < SK_MAX_MACS) { \
+ ((pAC)->Pnmi.Port[p].StatSyncCts)++; \
+ (pAC)->Pnmi.Port[p].StatSyncOctetsCts += (SK_U64)(v); \
+ } \
+ }
+
+#define SK_PNMI_CNT_RX_LONGFRAMES(pAC,p) \
+ { \
+ if ((p) < SK_MAX_MACS) { \
+ ((pAC)->Pnmi.Port[p].StatRxLongFrameCts++); \
+ } \
+ }
+
+#define SK_PNMI_CNT_RX_FRAMETOOLONG(pAC,p) \
+ { \
+ if ((p) < SK_MAX_MACS) { \
+ ((pAC)->Pnmi.Port[p].StatRxFrameTooLongCts++); \
+ } \
+ }
+
+#define SK_PNMI_CNT_RX_PMACC_ERR(pAC,p) \
+ { \
+ if ((p) < SK_MAX_MACS) { \
+ ((pAC)->Pnmi.Port[p].StatRxPMaccErr++); \
+ } \
+ }
+
+/*
+ * Conversion Macros
+ */
+#define SK_PNMI_PORT_INST2LOG(i) ((unsigned int)(i) - 1)
+#define SK_PNMI_PORT_LOG2INST(l) ((unsigned int)(l) + 1)
+#define SK_PNMI_PORT_PHYS2LOG(p) ((unsigned int)(p) + 1)
+#define SK_PNMI_PORT_LOG2PHYS(pAC,l) ((unsigned int)(l) - 1)
+#define SK_PNMI_PORT_PHYS2INST(pAC,p) \
+ (pAC->Pnmi.DualNetActiveFlag ? 2 : ((unsigned int)(p) + 2))
+#define SK_PNMI_PORT_INST2PHYS(pAC,i) ((unsigned int)(i) - 2)
+
+/*
+ * Structure definition for SkPnmiGetStruct and SkPnmiSetStruct
+ */
+#define SK_PNMI_VPD_KEY_SIZE 5
+#define SK_PNMI_VPD_BUFSIZE (VPD_SIZE)
+#define SK_PNMI_VPD_ENTRIES (VPD_SIZE / 4)
+#define SK_PNMI_VPD_DATALEN 128 /* Number of data bytes */
+
+#define SK_PNMI_MULTICAST_LISTLEN 64
+#define SK_PNMI_SENSOR_ENTRIES (SK_MAX_SENSORS)
+#define SK_PNMI_CHECKSUM_ENTRIES 3
+#define SK_PNMI_MAC_ENTRIES (SK_MAX_MACS + 1)
+#define SK_PNMI_MONITOR_ENTRIES 20
+#define SK_PNMI_TRAP_ENTRIES 10
+#define SK_PNMI_TRAPLEN 128
+#define SK_PNMI_STRINGLEN1 80
+#define SK_PNMI_STRINGLEN2 25
+#define SK_PNMI_TRAP_QUEUE_LEN 512
+
+typedef struct s_PnmiVpd {
+ char VpdKey[SK_PNMI_VPD_KEY_SIZE];
+ char VpdValue[SK_PNMI_VPD_DATALEN];
+ SK_U8 VpdAccess;
+ SK_U8 VpdAction;
+} SK_PNMI_VPD;
+
+typedef struct s_PnmiSensor {
+ SK_U8 SensorIndex;
+ char SensorDescr[SK_PNMI_STRINGLEN2];
+ SK_U8 SensorType;
+ SK_U32 SensorValue;
+ SK_U32 SensorWarningThresholdLow;
+ SK_U32 SensorWarningThresholdHigh;
+ SK_U32 SensorErrorThresholdLow;
+ SK_U32 SensorErrorThresholdHigh;
+ SK_U8 SensorStatus;
+ SK_U64 SensorWarningCts;
+ SK_U64 SensorErrorCts;
+ SK_U64 SensorWarningTimestamp;
+ SK_U64 SensorErrorTimestamp;
+} SK_PNMI_SENSOR;
+
+typedef struct s_PnmiChecksum {
+ SK_U64 ChecksumRxOkCts;
+ SK_U64 ChecksumRxUnableCts;
+ SK_U64 ChecksumRxErrCts;
+ SK_U64 ChecksumTxOkCts;
+ SK_U64 ChecksumTxUnableCts;
+} SK_PNMI_CHECKSUM;
+
+typedef struct s_PnmiStat {
+ SK_U64 StatTxOkCts;
+ SK_U64 StatTxOctetsOkCts;
+ SK_U64 StatTxBroadcastOkCts;
+ SK_U64 StatTxMulticastOkCts;
+ SK_U64 StatTxUnicastOkCts;
+ SK_U64 StatTxLongFramesCts;
+ SK_U64 StatTxBurstCts;
+ SK_U64 StatTxPauseMacCtrlCts;
+ SK_U64 StatTxMacCtrlCts;
+ SK_U64 StatTxSingleCollisionCts;
+ SK_U64 StatTxMultipleCollisionCts;
+ SK_U64 StatTxExcessiveCollisionCts;
+ SK_U64 StatTxLateCollisionCts;
+ SK_U64 StatTxDeferralCts;
+ SK_U64 StatTxExcessiveDeferralCts;
+ SK_U64 StatTxFifoUnderrunCts;
+ SK_U64 StatTxCarrierCts;
+ SK_U64 Dummy1; /* StatTxUtilization */
+ SK_U64 StatTx64Cts;
+ SK_U64 StatTx127Cts;
+ SK_U64 StatTx255Cts;
+ SK_U64 StatTx511Cts;
+ SK_U64 StatTx1023Cts;
+ SK_U64 StatTxMaxCts;
+ SK_U64 StatTxSyncCts;
+ SK_U64 StatTxSyncOctetsCts;
+ SK_U64 StatRxOkCts;
+ SK_U64 StatRxOctetsOkCts;
+ SK_U64 StatRxBroadcastOkCts;
+ SK_U64 StatRxMulticastOkCts;
+ SK_U64 StatRxUnicastOkCts;
+ SK_U64 StatRxLongFramesCts;
+ SK_U64 StatRxPauseMacCtrlCts;
+ SK_U64 StatRxMacCtrlCts;
+ SK_U64 StatRxPauseMacCtrlErrorCts;
+ SK_U64 StatRxMacCtrlUnknownCts;
+ SK_U64 StatRxBurstCts;
+ SK_U64 StatRxMissedCts;
+ SK_U64 StatRxFramingCts;
+ SK_U64 StatRxFifoOverflowCts;
+ SK_U64 StatRxJabberCts;
+ SK_U64 StatRxCarrierCts;
+ SK_U64 StatRxIRLengthCts;
+ SK_U64 StatRxSymbolCts;
+ SK_U64 StatRxShortsCts;
+ SK_U64 StatRxRuntCts;
+ SK_U64 StatRxCextCts;
+ SK_U64 StatRxTooLongCts;
+ SK_U64 StatRxFcsCts;
+ SK_U64 Dummy2; /* StatRxUtilization */
+ SK_U64 StatRx64Cts;
+ SK_U64 StatRx127Cts;
+ SK_U64 StatRx255Cts;
+ SK_U64 StatRx511Cts;
+ SK_U64 StatRx1023Cts;
+ SK_U64 StatRxMaxCts;
+} SK_PNMI_STAT;
+
+typedef struct s_PnmiConf {
+ char ConfMacCurrentAddr[6];
+ char ConfMacFactoryAddr[6];
+ SK_U8 ConfPMD;
+ SK_U8 ConfConnector;
+ SK_U32 ConfPhyType;
+ SK_U32 ConfPhyMode;
+ SK_U8 ConfLinkCapability;
+ SK_U8 ConfLinkMode;
+ SK_U8 ConfLinkModeStatus;
+ SK_U8 ConfLinkStatus;
+ SK_U8 ConfFlowCtrlCapability;
+ SK_U8 ConfFlowCtrlMode;
+ SK_U8 ConfFlowCtrlStatus;
+ SK_U8 ConfPhyOperationCapability;
+ SK_U8 ConfPhyOperationMode;
+ SK_U8 ConfPhyOperationStatus;
+ SK_U8 ConfSpeedCapability;
+ SK_U8 ConfSpeedMode;
+ SK_U8 ConfSpeedStatus;
+} SK_PNMI_CONF;
+
+typedef struct s_PnmiRlmt {
+ SK_U32 RlmtIndex;
+ SK_U32 RlmtStatus;
+ SK_U64 RlmtTxHelloCts;
+ SK_U64 RlmtRxHelloCts;
+ SK_U64 RlmtTxSpHelloReqCts;
+ SK_U64 RlmtRxSpHelloCts;
+} SK_PNMI_RLMT;
+
+typedef struct s_PnmiRlmtMonitor {
+ SK_U32 RlmtMonitorIndex;
+ char RlmtMonitorAddr[6];
+ SK_U64 RlmtMonitorErrorCts;
+ SK_U64 RlmtMonitorTimestamp;
+ SK_U8 RlmtMonitorAdmin;
+} SK_PNMI_RLMT_MONITOR;
+
+typedef struct s_PnmiRequestStatus {
+ SK_U32 ErrorStatus;
+ SK_U32 ErrorOffset;
+} SK_PNMI_REQUEST_STATUS;
+
+typedef struct s_PnmiStrucData {
+ SK_U32 MgmtDBVersion;
+ SK_PNMI_REQUEST_STATUS ReturnStatus;
+ SK_U32 VpdFreeBytes;
+ char VpdEntriesList[SK_PNMI_VPD_ENTRIES * SK_PNMI_VPD_KEY_SIZE];
+ SK_U32 VpdEntriesNumber;
+ SK_PNMI_VPD Vpd[SK_PNMI_VPD_ENTRIES];
+ SK_U32 PortNumber;
+ SK_U32 DeviceType;
+ char DriverDescr[SK_PNMI_STRINGLEN1];
+ char DriverVersion[SK_PNMI_STRINGLEN2];
+ char DriverReleaseDate[SK_PNMI_STRINGLEN1];
+ char DriverFileName[SK_PNMI_STRINGLEN1];
+ char HwDescr[SK_PNMI_STRINGLEN1];
+ char HwVersion[SK_PNMI_STRINGLEN2];
+ SK_U16 Chipset;
+ SK_U32 ChipId;
+ SK_U8 VauxAvail;
+ SK_U32 RamSize;
+ SK_U32 MtuSize;
+ SK_U32 Action;
+ SK_U32 TestResult;
+ SK_U8 BusType;
+ SK_U8 BusSpeed;
+ SK_U8 BusWidth;
+ SK_U8 SensorNumber;
+ SK_PNMI_SENSOR Sensor[SK_PNMI_SENSOR_ENTRIES];
+ SK_U8 ChecksumNumber;
+ SK_PNMI_CHECKSUM Checksum[SK_PNMI_CHECKSUM_ENTRIES];
+ SK_PNMI_STAT Stat[SK_PNMI_MAC_ENTRIES];
+ SK_PNMI_CONF Conf[SK_PNMI_MAC_ENTRIES];
+ SK_U8 RlmtMode;
+ SK_U32 RlmtPortNumber;
+ SK_U8 RlmtPortActive;
+ SK_U8 RlmtPortPreferred;
+ SK_U64 RlmtChangeCts;
+ SK_U64 RlmtChangeTime;
+ SK_U64 RlmtChangeEstimate;
+ SK_U64 RlmtChangeThreshold;
+ SK_PNMI_RLMT Rlmt[SK_MAX_MACS];
+ SK_U32 RlmtMonitorNumber;
+ SK_PNMI_RLMT_MONITOR RlmtMonitor[SK_PNMI_MONITOR_ENTRIES];
+ SK_U32 TrapNumber;
+ SK_U8 Trap[SK_PNMI_TRAP_QUEUE_LEN];
+ SK_U64 TxSwQueueLen;
+ SK_U64 TxSwQueueMax;
+ SK_U64 TxRetryCts;
+ SK_U64 RxIntrCts;
+ SK_U64 TxIntrCts;
+ SK_U64 RxNoBufCts;
+ SK_U64 TxNoBufCts;
+ SK_U64 TxUsedDescrNo;
+ SK_U64 RxDeliveredCts;
+ SK_U64 RxOctetsDeliveredCts;
+ SK_U64 RxHwErrorsCts;
+ SK_U64 TxHwErrorsCts;
+ SK_U64 InErrorsCts;
+ SK_U64 OutErrorsCts;
+ SK_U64 ErrRecoveryCts;
+ SK_U64 SysUpTime;
+} SK_PNMI_STRUCT_DATA;
+
+#define SK_PNMI_STRUCT_SIZE (sizeof(SK_PNMI_STRUCT_DATA))
+#define SK_PNMI_MIN_STRUCT_SIZE ((unsigned int)(SK_UPTR)\
+ &(((SK_PNMI_STRUCT_DATA *)0)->VpdFreeBytes))
+ /*
+ * ReturnStatus field
+ * must be located
+ * before VpdFreeBytes
+ */
+
+/*
+ * Various definitions
+ */
+#define SK_PNMI_MAX_PROTOS 3
+
+#define SK_PNMI_CNT_NO 66 /* Must have the value of the enum
+ * SK_PNMI_MAX_IDX. Define SK_PNMI_CHECK
+ * for check while init phase 1
+ */
+
+/*
+ * Estimate data structure
+ */
+typedef struct s_PnmiEstimate {
+ unsigned int EstValueIndex;
+ SK_U64 EstValue[7];
+ SK_U64 Estimate;
+ SK_TIMER EstTimer;
+} SK_PNMI_ESTIMATE;
+
+
+/*
+ * VCT timer data structure
+ */
+typedef struct s_VctTimer {
+ SK_TIMER VctTimer;
+} SK_PNMI_VCT_TIMER;
+
+
+/*
+ * PNMI specific adapter context structure
+ */
+typedef struct s_PnmiPort {
+ SK_U64 StatSyncCts;
+ SK_U64 StatSyncOctetsCts;
+ SK_U64 StatRxLongFrameCts;
+ SK_U64 StatRxFrameTooLongCts;
+ SK_U64 StatRxPMaccErr;
+ SK_U64 TxSwQueueLen;
+ SK_U64 TxSwQueueMax;
+ SK_U64 TxRetryCts;
+ SK_U64 RxIntrCts;
+ SK_U64 TxIntrCts;
+ SK_U64 RxNoBufCts;
+ SK_U64 TxNoBufCts;
+ SK_U64 TxUsedDescrNo;
+ SK_U64 RxDeliveredCts;
+ SK_U64 RxOctetsDeliveredCts;
+ SK_U64 RxHwErrorsCts;
+ SK_U64 TxHwErrorsCts;
+ SK_U64 InErrorsCts;
+ SK_U64 OutErrorsCts;
+ SK_U64 ErrRecoveryCts;
+ SK_U64 RxShortZeroMark;
+ SK_U64 CounterOffset[SK_PNMI_CNT_NO];
+ SK_U32 CounterHigh[SK_PNMI_CNT_NO];
+ SK_BOOL ActiveFlag;
+ SK_U8 Align[3];
+} SK_PNMI_PORT;
+
+
+typedef struct s_PnmiData {
+ SK_PNMI_PORT Port [SK_MAX_MACS];
+ SK_PNMI_PORT BufPort [SK_MAX_MACS]; /* 2002-09-13 pweber */
+ SK_U64 VirtualCounterOffset[SK_PNMI_CNT_NO];
+ SK_U32 TestResult;
+ char HwVersion[10];
+ SK_U16 Align01;
+
+ char *pDriverDescription;
+ char *pDriverVersion;
+ char *pDriverReleaseDate;
+ char *pDriverFileName;
+
+ int MacUpdatedFlag;
+ int RlmtUpdatedFlag;
+ int SirqUpdatedFlag;
+
+ SK_U64 RlmtChangeCts;
+ SK_U64 RlmtChangeTime;
+ SK_PNMI_ESTIMATE RlmtChangeEstimate;
+ SK_U64 RlmtChangeThreshold;
+
+ SK_U64 StartUpTime;
+ SK_U32 DeviceType;
+ char PciBusSpeed;
+ char PciBusWidth;
+ char Chipset;
+ char PMD;
+ char Connector;
+ SK_BOOL DualNetActiveFlag;
+ SK_U16 Align02;
+
+ char TrapBuf[SK_PNMI_TRAP_QUEUE_LEN];
+ unsigned int TrapBufFree;
+ unsigned int TrapQueueBeg;
+ unsigned int TrapQueueEnd;
+ unsigned int TrapBufPad;
+ unsigned int TrapUnique;
+ SK_U8 VctStatus[SK_MAX_MACS];
+ SK_PNMI_VCT VctBackup[SK_MAX_MACS];
+ SK_PNMI_VCT_TIMER VctTimeout[SK_MAX_MACS];
+#ifdef SK_DIAG_SUPPORT
+ SK_U32 DiagAttached;
+#endif /* SK_DIAG_SUPPORT */
+} SK_PNMI;
+
+
+/*
+ * Function prototypes
+ */
+extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level);
+extern int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
+ unsigned int* pLen, SK_U32 Instance, SK_U32 NetIndex);
+extern int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id,
+ void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
+extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
+ unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
+extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
+ unsigned int *pLen, SK_U32 NetIndex);
+extern int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
+ unsigned int *pLen, SK_U32 NetIndex);
+extern int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
+ unsigned int *pLen, SK_U32 NetIndex);
+extern int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event,
+ SK_EVPARA Param);
+extern int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf,
+ unsigned int * pLen, SK_U32 NetIndex);
+
+#endif
diff --git a/drivers/net/sk98lin/h/skgesirq.h b/drivers/net/sk98lin/h/skgesirq.h
new file mode 100644
index 000000000000..b486bd9b6628
--- /dev/null
+++ b/drivers/net/sk98lin/h/skgesirq.h
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Name: skgesirq.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.30 $
+ * Date: $Date: 2003/07/04 12:34:13 $
+ * Purpose: SK specific Gigabit Ethernet special IRQ functions
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _INC_SKGESIRQ_H_
+#define _INC_SKGESIRQ_H_
+
+/* Define return codes of SkGePortCheckUp and CheckShort */
+#define SK_HW_PS_NONE 0 /* No action needed */
+#define SK_HW_PS_RESTART 1 /* Restart needed */
+#define SK_HW_PS_LINK 2 /* Link Up actions needed */
+
+/*
+ * Define the Event the special IRQ/INI module can handle
+ */
+#define SK_HWEV_WATIM 1 /* Timeout for WA Errata #2 XMAC */
+#define SK_HWEV_PORT_START 2 /* Port Start Event by RLMT */
+#define SK_HWEV_PORT_STOP 3 /* Port Stop Event by RLMT */
+#define SK_HWEV_CLEAR_STAT 4 /* Clear Statistics by PNMI */
+#define SK_HWEV_UPDATE_STAT 5 /* Update Statistics by PNMI */
+#define SK_HWEV_SET_LMODE 6 /* Set Link Mode by PNMI */
+#define SK_HWEV_SET_FLOWMODE 7 /* Set Flow Control Mode by PNMI */
+#define SK_HWEV_SET_ROLE 8 /* Set Master/Slave (Role) by PNMI */
+#define SK_HWEV_SET_SPEED 9 /* Set Link Speed by PNMI */
+#define SK_HWEV_HALFDUP_CHK 10 /* Half Duplex Hangup Workaround */
+
+#define SK_WA_ACT_TIME (5000000UL) /* 5 sec */
+#define SK_WA_INA_TIME (100000UL) /* 100 msec */
+
+#define SK_HALFDUP_CHK_TIME (10000UL) /* 10 msec */
+
+/*
+ * Define the error numbers and messages
+ */
+#define SKERR_SIRQ_E001 (SK_ERRBASE_SIRQ+0)
+#define SKERR_SIRQ_E001MSG "Unknown event"
+#define SKERR_SIRQ_E002 (SKERR_SIRQ_E001+1)
+#define SKERR_SIRQ_E002MSG "Packet timeout RX1"
+#define SKERR_SIRQ_E003 (SKERR_SIRQ_E002+1)
+#define SKERR_SIRQ_E003MSG "Packet timeout RX2"
+#define SKERR_SIRQ_E004 (SKERR_SIRQ_E003+1)
+#define SKERR_SIRQ_E004MSG "MAC 1 not correctly initialized"
+#define SKERR_SIRQ_E005 (SKERR_SIRQ_E004+1)
+#define SKERR_SIRQ_E005MSG "MAC 2 not correctly initialized"
+#define SKERR_SIRQ_E006 (SKERR_SIRQ_E005+1)
+#define SKERR_SIRQ_E006MSG "CHECK failure R1"
+#define SKERR_SIRQ_E007 (SKERR_SIRQ_E006+1)
+#define SKERR_SIRQ_E007MSG "CHECK failure R2"
+#define SKERR_SIRQ_E008 (SKERR_SIRQ_E007+1)
+#define SKERR_SIRQ_E008MSG "CHECK failure XS1"
+#define SKERR_SIRQ_E009 (SKERR_SIRQ_E008+1)
+#define SKERR_SIRQ_E009MSG "CHECK failure XA1"
+#define SKERR_SIRQ_E010 (SKERR_SIRQ_E009+1)
+#define SKERR_SIRQ_E010MSG "CHECK failure XS2"
+#define SKERR_SIRQ_E011 (SKERR_SIRQ_E010+1)
+#define SKERR_SIRQ_E011MSG "CHECK failure XA2"
+#define SKERR_SIRQ_E012 (SKERR_SIRQ_E011+1)
+#define SKERR_SIRQ_E012MSG "unexpected IRQ Master error"
+#define SKERR_SIRQ_E013 (SKERR_SIRQ_E012+1)
+#define SKERR_SIRQ_E013MSG "unexpected IRQ Status error"
+#define SKERR_SIRQ_E014 (SKERR_SIRQ_E013+1)
+#define SKERR_SIRQ_E014MSG "Parity error on RAM (read)"
+#define SKERR_SIRQ_E015 (SKERR_SIRQ_E014+1)
+#define SKERR_SIRQ_E015MSG "Parity error on RAM (write)"
+#define SKERR_SIRQ_E016 (SKERR_SIRQ_E015+1)
+#define SKERR_SIRQ_E016MSG "Parity error MAC 1"
+#define SKERR_SIRQ_E017 (SKERR_SIRQ_E016+1)
+#define SKERR_SIRQ_E017MSG "Parity error MAC 2"
+#define SKERR_SIRQ_E018 (SKERR_SIRQ_E017+1)
+#define SKERR_SIRQ_E018MSG "Parity error RX 1"
+#define SKERR_SIRQ_E019 (SKERR_SIRQ_E018+1)
+#define SKERR_SIRQ_E019MSG "Parity error RX 2"
+#define SKERR_SIRQ_E020 (SKERR_SIRQ_E019+1)
+#define SKERR_SIRQ_E020MSG "MAC transmit FIFO underrun"
+#define SKERR_SIRQ_E021 (SKERR_SIRQ_E020+1)
+#define SKERR_SIRQ_E021MSG "Spurious TWSI interrupt"
+#define SKERR_SIRQ_E022 (SKERR_SIRQ_E021+1)
+#define SKERR_SIRQ_E022MSG "Cable pair swap error"
+#define SKERR_SIRQ_E023 (SKERR_SIRQ_E022+1)
+#define SKERR_SIRQ_E023MSG "Auto-negotiation error"
+#define SKERR_SIRQ_E024 (SKERR_SIRQ_E023+1)
+#define SKERR_SIRQ_E024MSG "FIFO overflow error"
+#define SKERR_SIRQ_E025 (SKERR_SIRQ_E024+1)
+#define SKERR_SIRQ_E025MSG "2 Pair Downshift detected"
+
+extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus);
+extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para);
+extern void SkHWLinkUp(SK_AC *pAC, SK_IOC IoC, int Port);
+extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port);
+
+#endif /* _INC_SKGESIRQ_H_ */
diff --git a/drivers/net/sk98lin/h/ski2c.h b/drivers/net/sk98lin/h/ski2c.h
new file mode 100644
index 000000000000..598bb42ccc3d
--- /dev/null
+++ b/drivers/net/sk98lin/h/ski2c.h
@@ -0,0 +1,177 @@
+/******************************************************************************
+ *
+ * Name: ski2c.h
+ * Project: Gigabit Ethernet Adapters, TWSI-Module
+ * Version: $Revision: 1.35 $
+ * Date: $Date: 2003/10/20 09:06:30 $
+ * Purpose: Defines to access Voltage and Temperature Sensor
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * SKI2C.H contains all I2C specific defines
+ */
+
+#ifndef _SKI2C_H_
+#define _SKI2C_H_
+
+typedef struct s_Sensor SK_SENSOR;
+
+#include "h/skgei2c.h"
+
+/*
+ * Define the I2C events.
+ */
+#define SK_I2CEV_IRQ 1 /* IRQ happened Event */
+#define SK_I2CEV_TIM 2 /* Timeout event */
+#define SK_I2CEV_CLEAR 3 /* Clear MIB Values */
+
+/*
+ * Define READ and WRITE Constants.
+ */
+#define I2C_READ 0
+#define I2C_WRITE 1
+#define I2C_BURST 1
+#define I2C_SINGLE 0
+
+#define SKERR_I2C_E001 (SK_ERRBASE_I2C+0)
+#define SKERR_I2C_E001MSG "Sensor index unknown"
+#define SKERR_I2C_E002 (SKERR_I2C_E001+1)
+#define SKERR_I2C_E002MSG "TWSI: transfer does not complete"
+#define SKERR_I2C_E003 (SKERR_I2C_E002+1)
+#define SKERR_I2C_E003MSG "LM80: NAK on device send"
+#define SKERR_I2C_E004 (SKERR_I2C_E003+1)
+#define SKERR_I2C_E004MSG "LM80: NAK on register send"
+#define SKERR_I2C_E005 (SKERR_I2C_E004+1)
+#define SKERR_I2C_E005MSG "LM80: NAK on device (2) send"
+#define SKERR_I2C_E006 (SKERR_I2C_E005+1)
+#define SKERR_I2C_E006MSG "Unknown event"
+#define SKERR_I2C_E007 (SKERR_I2C_E006+1)
+#define SKERR_I2C_E007MSG "LM80 read out of state"
+#define SKERR_I2C_E008 (SKERR_I2C_E007+1)
+#define SKERR_I2C_E008MSG "Unexpected sensor read completed"
+#define SKERR_I2C_E009 (SKERR_I2C_E008+1)
+#define SKERR_I2C_E009MSG "WARNING: temperature sensor out of range"
+#define SKERR_I2C_E010 (SKERR_I2C_E009+1)
+#define SKERR_I2C_E010MSG "WARNING: voltage sensor out of range"
+#define SKERR_I2C_E011 (SKERR_I2C_E010+1)
+#define SKERR_I2C_E011MSG "ERROR: temperature sensor out of range"
+#define SKERR_I2C_E012 (SKERR_I2C_E011+1)
+#define SKERR_I2C_E012MSG "ERROR: voltage sensor out of range"
+#define SKERR_I2C_E013 (SKERR_I2C_E012+1)
+#define SKERR_I2C_E013MSG "ERROR: couldn't init sensor"
+#define SKERR_I2C_E014 (SKERR_I2C_E013+1)
+#define SKERR_I2C_E014MSG "WARNING: fan sensor out of range"
+#define SKERR_I2C_E015 (SKERR_I2C_E014+1)
+#define SKERR_I2C_E015MSG "ERROR: fan sensor out of range"
+#define SKERR_I2C_E016 (SKERR_I2C_E015+1)
+#define SKERR_I2C_E016MSG "TWSI: active transfer does not complete"
+
+/*
+ * Define Timeout values
+ */
+#define SK_I2C_TIM_LONG 2000000L /* 2 seconds */
+#define SK_I2C_TIM_SHORT 100000L /* 100 milliseconds */
+#define SK_I2C_TIM_WATCH 1000000L /* 1 second */
+
+/*
+ * Define trap and error log hold times
+ */
+#ifndef SK_SEN_ERR_TR_HOLD
+#define SK_SEN_ERR_TR_HOLD (4*SK_TICKS_PER_SEC)
+#endif
+#ifndef SK_SEN_ERR_LOG_HOLD
+#define SK_SEN_ERR_LOG_HOLD (60*SK_TICKS_PER_SEC)
+#endif
+#ifndef SK_SEN_WARN_TR_HOLD
+#define SK_SEN_WARN_TR_HOLD (15*SK_TICKS_PER_SEC)
+#endif
+#ifndef SK_SEN_WARN_LOG_HOLD
+#define SK_SEN_WARN_LOG_HOLD (15*60*SK_TICKS_PER_SEC)
+#endif
+
+/*
+ * Defines for SenType
+ */
+#define SK_SEN_UNKNOWN 0
+#define SK_SEN_TEMP 1
+#define SK_SEN_VOLT 2
+#define SK_SEN_FAN 3
+
+/*
+ * Define for the SenErrorFlag
+ */
+#define SK_SEN_ERR_NOT_PRESENT 0 /* Error Flag: Sensor not present */
+#define SK_SEN_ERR_OK 1 /* Error Flag: O.K. */
+#define SK_SEN_ERR_WARN 2 /* Error Flag: Warning */
+#define SK_SEN_ERR_ERR 3 /* Error Flag: Error */
+#define SK_SEN_ERR_FAULTY 4 /* Error Flag: Faulty */
+
+/*
+ * Define the Sensor struct
+ */
+struct s_Sensor {
+ char *SenDesc; /* Description */
+ int SenType; /* Voltage or Temperature */
+ SK_I32 SenValue; /* Current value of the sensor */
+ SK_I32 SenThreErrHigh; /* High error Threshhold of this sensor */
+ SK_I32 SenThreWarnHigh; /* High warning Threshhold of this sensor */
+ SK_I32 SenThreErrLow; /* Lower error Threshold of the sensor */
+ SK_I32 SenThreWarnLow; /* Lower warning Threshold of the sensor */
+ int SenErrFlag; /* Sensor indicated an error */
+ SK_BOOL SenInit; /* Is sensor initialized ? */
+ SK_U64 SenErrCts; /* Error trap counter */
+ SK_U64 SenWarnCts; /* Warning trap counter */
+ SK_U64 SenBegErrTS; /* Begin error timestamp */
+ SK_U64 SenBegWarnTS; /* Begin warning timestamp */
+ SK_U64 SenLastErrTrapTS; /* Last error trap timestamp */
+ SK_U64 SenLastErrLogTS; /* Last error log timestamp */
+ SK_U64 SenLastWarnTrapTS; /* Last warning trap timestamp */
+ SK_U64 SenLastWarnLogTS; /* Last warning log timestamp */
+ int SenState; /* Sensor State (see HW specific include) */
+ int (*SenRead)(SK_AC *pAC, SK_IOC IoC, struct s_Sensor *pSen);
+ /* Sensors read function */
+ SK_U16 SenReg; /* Register Address for this sensor */
+ SK_U8 SenDev; /* Device Selection for this sensor */
+};
+
+typedef struct s_I2c {
+ SK_SENSOR SenTable[SK_MAX_SENSORS]; /* Sensor Table */
+ int CurrSens; /* Which sensor is currently queried */
+ int MaxSens; /* Max. number of sensors */
+ int TimerMode; /* Use the timer also to watch the state machine */
+ int InitLevel; /* Initialized Level */
+#ifndef SK_DIAG
+ int DummyReads; /* Number of non-checked dummy reads */
+ SK_TIMER SenTimer; /* Sensors timer */
+#endif /* !SK_DIAG */
+} SK_I2C;
+
+extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level);
+extern int SkI2cWrite(SK_AC *pAC, SK_IOC IoC, SK_U32 Data, int Dev, int Size,
+ int Reg, int Burst);
+extern int SkI2cReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen);
+#ifdef SK_DIAG
+extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg,
+ int Burst);
+#else /* !SK_DIAG */
+extern int SkI2cEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para);
+extern void SkI2cWaitIrq(SK_AC *pAC, SK_IOC IoC);
+extern void SkI2cIsr(SK_AC *pAC, SK_IOC IoC);
+#endif /* !SK_DIAG */
+#endif /* n_SKI2C_H */
+
diff --git a/drivers/net/sk98lin/h/skqueue.h b/drivers/net/sk98lin/h/skqueue.h
new file mode 100644
index 000000000000..2ec40d4fdf60
--- /dev/null
+++ b/drivers/net/sk98lin/h/skqueue.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * Name: skqueue.h
+ * Project: Gigabit Ethernet Adapters, Event Scheduler Module
+ * Version: $Revision: 1.16 $
+ * Date: $Date: 2003/09/16 12:50:32 $
+ * Purpose: Defines for the Event queue
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * SKQUEUE.H contains all defines and types for the event queue
+ */
+
+#ifndef _SKQUEUE_H_
+#define _SKQUEUE_H_
+
+
+/*
+ * define the event classes to be served
+ */
+#define SKGE_DRV 1 /* Driver Event Class */
+#define SKGE_RLMT 2 /* RLMT Event Class */
+#define SKGE_I2C 3 /* I2C Event Class */
+#define SKGE_PNMI 4 /* PNMI Event Class */
+#define SKGE_CSUM 5 /* Checksum Event Class */
+#define SKGE_HWAC 6 /* Hardware Access Event Class */
+
+#define SKGE_SWT 9 /* Software Timer Event Class */
+#define SKGE_LACP 10 /* LACP Aggregation Event Class */
+#define SKGE_RSF 11 /* RSF Aggregation Event Class */
+#define SKGE_MARKER 12 /* MARKER Aggregation Event Class */
+#define SKGE_FD 13 /* FD Distributor Event Class */
+
+/*
+ * define event queue as circular buffer
+ */
+#define SK_MAX_EVENT 64
+
+/*
+ * Parameter union for the Para stuff
+ */
+typedef union u_EvPara {
+ void *pParaPtr; /* Parameter Pointer */
+ SK_U64 Para64; /* Parameter 64bit version */
+ SK_U32 Para32[2]; /* Parameter Array of 32bit parameters */
+} SK_EVPARA;
+
+/*
+ * Event Queue
+ * skqueue.c
+ * events are class/value pairs
+ * class is addressee, e.g. RLMT, PNMI etc.
+ * value is command, e.g. line state change, ring op change etc.
+ */
+typedef struct s_EventElem {
+ SK_U32 Class; /* Event class */
+ SK_U32 Event; /* Event value */
+ SK_EVPARA Para; /* Event parameter */
+} SK_EVENTELEM;
+
+typedef struct s_Queue {
+ SK_EVENTELEM EvQueue[SK_MAX_EVENT];
+ SK_EVENTELEM *EvPut;
+ SK_EVENTELEM *EvGet;
+} SK_QUEUE;
+
+extern void SkEventInit(SK_AC *pAC, SK_IOC Ioc, int Level);
+extern void SkEventQueue(SK_AC *pAC, SK_U32 Class, SK_U32 Event,
+ SK_EVPARA Para);
+extern int SkEventDispatcher(SK_AC *pAC, SK_IOC Ioc);
+
+
+/* Define Error Numbers and messages */
+#define SKERR_Q_E001 (SK_ERRBASE_QUEUE+0)
+#define SKERR_Q_E001MSG "Event queue overflow"
+#define SKERR_Q_E002 (SKERR_Q_E001+1)
+#define SKERR_Q_E002MSG "Undefined event class"
+#endif /* _SKQUEUE_H_ */
+
diff --git a/drivers/net/sk98lin/h/skrlmt.h b/drivers/net/sk98lin/h/skrlmt.h
new file mode 100644
index 000000000000..ca75dfdcf2d6
--- /dev/null
+++ b/drivers/net/sk98lin/h/skrlmt.h
@@ -0,0 +1,438 @@
+/******************************************************************************
+ *
+ * Name: skrlmt.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.37 $
+ * Date: $Date: 2003/04/15 09:43:43 $
+ * Purpose: Header file for Redundant Link ManagemenT.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This is the header file for Redundant Link ManagemenT.
+ *
+ * Include File Hierarchy:
+ *
+ * "skdrv1st.h"
+ * ...
+ * "sktypes.h"
+ * "skqueue.h"
+ * "skaddr.h"
+ * "skrlmt.h"
+ * ...
+ * "skdrv2nd.h"
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKRLMT_H
+#define __INC_SKRLMT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* cplusplus */
+
+/* defines ********************************************************************/
+
+#define SK_RLMT_NET_DOWN_TEMP 1 /* NET_DOWN due to last port down. */
+#define SK_RLMT_NET_DOWN_FINAL 2 /* NET_DOWN due to RLMT_STOP. */
+
+/* ----- Default queue sizes - must be multiples of 8 KB ----- */
+
+/* Less than 8 KB free in RX queue => pause frames. */
+#define SK_RLMT_STANDBY_QRXSIZE 128 /* Size of rx standby queue in KB. */
+#define SK_RLMT_STANDBY_QXASIZE 32 /* Size of async standby queue in KB. */
+#define SK_RLMT_STANDBY_QXSSIZE 0 /* Size of sync standby queue in KB. */
+
+#define SK_RLMT_MAX_TX_BUF_SIZE 60 /* Maximum RLMT transmit size. */
+
+/* ----- PORT states ----- */
+
+#define SK_RLMT_PS_INIT 0 /* Port state: Init. */
+#define SK_RLMT_PS_LINK_DOWN 1 /* Port state: Link down. */
+#define SK_RLMT_PS_DOWN 2 /* Port state: Port down. */
+#define SK_RLMT_PS_GOING_UP 3 /* Port state: Going up. */
+#define SK_RLMT_PS_UP 4 /* Port state: Up. */
+
+/* ----- RLMT states ----- */
+
+#define SK_RLMT_RS_INIT 0 /* RLMT state: Init. */
+#define SK_RLMT_RS_NET_DOWN 1 /* RLMT state: Net down. */
+#define SK_RLMT_RS_NET_UP 2 /* RLMT state: Net up. */
+
+/* ----- PORT events ----- */
+
+#define SK_RLMT_LINK_UP 1001 /* Link came up. */
+#define SK_RLMT_LINK_DOWN 1002 /* Link went down. */
+#define SK_RLMT_PORT_ADDR 1003 /* Port address changed. */
+
+/* ----- RLMT events ----- */
+
+#define SK_RLMT_START 2001 /* Start RLMT. */
+#define SK_RLMT_STOP 2002 /* Stop RLMT. */
+#define SK_RLMT_PACKET_RECEIVED 2003 /* Packet was received for RLMT. */
+#define SK_RLMT_STATS_CLEAR 2004 /* Clear statistics. */
+#define SK_RLMT_STATS_UPDATE 2005 /* Update statistics. */
+#define SK_RLMT_PREFPORT_CHANGE 2006 /* Change preferred port. */
+#define SK_RLMT_MODE_CHANGE 2007 /* New RlmtMode. */
+#define SK_RLMT_SET_NETS 2008 /* Number of Nets (1 or 2). */
+
+/* ----- RLMT mode bits ----- */
+
+/*
+ * CAUTION: These defines are private to RLMT.
+ * Please use the RLMT mode defines below.
+ */
+
+#define SK_RLMT_CHECK_LINK 1 /* Check Link. */
+#define SK_RLMT_CHECK_LOC_LINK 2 /* Check other link on same adapter. */
+#define SK_RLMT_CHECK_SEG 4 /* Check segmentation. */
+
+#ifndef RLMT_CHECK_REMOTE
+#define SK_RLMT_CHECK_OTHERS SK_RLMT_CHECK_LOC_LINK
+#else /* RLMT_CHECK_REMOTE */
+#define SK_RLMT_CHECK_REM_LINK 8 /* Check link(s) on other adapter(s). */
+#define SK_RLMT_MAX_REMOTE_PORTS_CHECKED 3
+#define SK_RLMT_CHECK_OTHERS \
+ (SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_REM_LINK)
+#endif /* RLMT_CHECK_REMOTE */
+
+#ifndef SK_RLMT_ENABLE_TRANSPARENT
+#define SK_RLMT_TRANSPARENT 0 /* RLMT transparent - inactive. */
+#else /* SK_RLMT_ENABLE_TRANSPARENT */
+#define SK_RLMT_TRANSPARENT 128 /* RLMT transparent. */
+#endif /* SK_RLMT_ENABLE_TRANSPARENT */
+
+/* ----- RLMT modes ----- */
+
+/* Check Link State. */
+#define SK_RLMT_MODE_CLS (SK_RLMT_CHECK_LINK)
+
+/* Check Local Ports: check other links on the same adapter. */
+#define SK_RLMT_MODE_CLP (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK)
+
+/* Check Local Ports and Segmentation Status. */
+#define SK_RLMT_MODE_CLPSS \
+ (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_SEG)
+
+#ifdef RLMT_CHECK_REMOTE
+/* Check Local and Remote Ports: check links (local or remote). */
+ Name of define TBD!
+#define SK_RLMT_MODE_CRP \
+ (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_REM_LINK)
+
+/* Check Local and Remote Ports and Segmentation Status. */
+ Name of define TBD!
+#define SK_RLMT_MODE_CRPSS \
+ (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | \
+ SK_RLMT_CHECK_REM_LINK | SK_RLMT_CHECK_SEG)
+#endif /* RLMT_CHECK_REMOTE */
+
+/* ----- RLMT lookahead result bits ----- */
+
+#define SK_RLMT_RX_RLMT 1 /* Give packet to RLMT. */
+#define SK_RLMT_RX_PROTOCOL 2 /* Give packet to protocol. */
+
+/* Macros */
+
+#if 0
+SK_AC *pAC /* adapter context */
+SK_U32 PortNum /* receiving port */
+unsigned PktLen /* received packet's length */
+SK_BOOL IsBc /* Flag: packet is broadcast */
+unsigned *pOffset /* offs. of bytes to present to SK_RLMT_LOOKAHEAD */
+unsigned *pNumBytes /* #Bytes to present to SK_RLMT_LOOKAHEAD */
+#endif /* 0 */
+
+#define SK_RLMT_PRE_LOOKAHEAD(pAC,PortNum,PktLen,IsBc,pOffset,pNumBytes) { \
+ SK_AC *_pAC; \
+ SK_U32 _PortNum; \
+ _pAC = (pAC); \
+ _PortNum = (SK_U32)(PortNum); \
+ /* _pAC->Rlmt.Port[_PortNum].PacketsRx++; */ \
+ _pAC->Rlmt.Port[_PortNum].PacketsPerTimeSlot++; \
+ if (_pAC->Rlmt.RlmtOff) { \
+ *(pNumBytes) = 0; \
+ } \
+ else {\
+ if ((_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_TRANSPARENT) != 0) { \
+ *(pNumBytes) = 0; \
+ } \
+ else if (IsBc) { \
+ if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode != SK_RLMT_MODE_CLS) { \
+ *(pNumBytes) = 6; \
+ *(pOffset) = 6; \
+ } \
+ else { \
+ *(pNumBytes) = 0; \
+ } \
+ } \
+ else { \
+ if ((PktLen) > SK_RLMT_MAX_TX_BUF_SIZE) { \
+ /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
+ *(pNumBytes) = 0; \
+ } \
+ else { \
+ *(pNumBytes) = 6; \
+ *(pOffset) = 0; \
+ } \
+ } \
+ } \
+}
+
+#if 0
+SK_AC *pAC /* adapter context */
+SK_U32 PortNum /* receiving port */
+SK_U8 *pLaPacket, /* received packet's data (points to pOffset) */
+SK_BOOL IsBc /* Flag: packet is broadcast */
+SK_BOOL IsMc /* Flag: packet is multicast */
+unsigned *pForRlmt /* Result: bits SK_RLMT_RX_RLMT, SK_RLMT_RX_PROTOCOL */
+SK_RLMT_LOOKAHEAD() expects *pNumBytes from
+packet offset *pOffset (s.a.) at *pLaPacket.
+
+If you use SK_RLMT_LOOKAHEAD in a path where you already know if the packet is
+BC, MC, or UC, you should use constants for IsBc and IsMc, so that your compiler
+can trash unneeded parts of the if construction.
+#endif /* 0 */
+
+#define SK_RLMT_LOOKAHEAD(pAC,PortNum,pLaPacket,IsBc,IsMc,pForRlmt) { \
+ SK_AC *_pAC; \
+ SK_U32 _PortNum; \
+ SK_U8 *_pLaPacket; \
+ _pAC = (pAC); \
+ _PortNum = (SK_U32)(PortNum); \
+ _pLaPacket = (SK_U8 *)(pLaPacket); \
+ if (IsBc) {\
+ if (!SK_ADDR_EQUAL(_pLaPacket, _pAC->Addr.Net[_pAC->Rlmt.Port[ \
+ _PortNum].Net->NetNumber].CurrentMacAddress.a)) { \
+ _pAC->Rlmt.Port[_PortNum].BcTimeStamp = SkOsGetTime(_pAC); \
+ _pAC->Rlmt.CheckSwitch = SK_TRUE; \
+ } \
+ /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
+ *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
+ } \
+ else if (IsMc) { \
+ if (SK_ADDR_EQUAL(_pLaPacket, BridgeMcAddr.a)) { \
+ _pAC->Rlmt.Port[_PortNum].BpduPacketsPerTimeSlot++; \
+ if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_CHECK_SEG) { \
+ *(pForRlmt) = SK_RLMT_RX_RLMT | SK_RLMT_RX_PROTOCOL; \
+ } \
+ else { \
+ *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
+ } \
+ } \
+ else if (SK_ADDR_EQUAL(_pLaPacket, SkRlmtMcAddr.a)) { \
+ *(pForRlmt) = SK_RLMT_RX_RLMT; \
+ } \
+ else { \
+ /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
+ *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
+ } \
+ } \
+ else { \
+ if (SK_ADDR_EQUAL( \
+ _pLaPacket, \
+ _pAC->Addr.Port[_PortNum].CurrentMacAddress.a)) { \
+ *(pForRlmt) = SK_RLMT_RX_RLMT; \
+ } \
+ else { \
+ /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
+ *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
+ } \
+ } \
+}
+
+#ifdef SK_RLMT_FAST_LOOKAHEAD
+Error: SK_RLMT_FAST_LOOKAHEAD no longer used. Use new macros for lookahead.
+#endif /* SK_RLMT_FAST_LOOKAHEAD */
+#ifdef SK_RLMT_SLOW_LOOKAHEAD
+Error: SK_RLMT_SLOW_LOOKAHEAD no longer used. Use new macros for lookahead.
+#endif /* SK_RLMT_SLOW_LOOKAHEAD */
+
+/* typedefs *******************************************************************/
+
+#ifdef SK_RLMT_MBUF_PRIVATE
+typedef struct s_RlmtMbuf {
+ some content
+} SK_RLMT_MBUF;
+#endif /* SK_RLMT_MBUF_PRIVATE */
+
+
+#ifdef SK_LA_INFO
+typedef struct s_Rlmt_PacketInfo {
+ unsigned PacketLength; /* Length of packet. */
+ unsigned PacketType; /* Directed/Multicast/Broadcast. */
+} SK_RLMT_PINFO;
+#endif /* SK_LA_INFO */
+
+
+typedef struct s_RootId {
+ SK_U8 Id[8]; /* Root Bridge Id. */
+} SK_RLMT_ROOT_ID;
+
+
+typedef struct s_port {
+ SK_MAC_ADDR CheckAddr;
+ SK_BOOL SuspectTx;
+} SK_PORT_CHECK;
+
+
+typedef struct s_RlmtNet SK_RLMT_NET;
+
+
+typedef struct s_RlmtPort {
+
+/* ----- Public part (read-only) ----- */
+
+ SK_U8 PortState; /* Current state of this port. */
+
+ /* For PNMI */
+ SK_BOOL LinkDown;
+ SK_BOOL PortDown;
+ SK_U8 Align01;
+
+ SK_U32 PortNumber; /* Number of port on adapter. */
+ SK_RLMT_NET * Net; /* Net port belongs to. */
+
+ SK_U64 TxHelloCts;
+ SK_U64 RxHelloCts;
+ SK_U64 TxSpHelloReqCts;
+ SK_U64 RxSpHelloCts;
+
+/* ----- Private part ----- */
+
+/* SK_U64 PacketsRx; */ /* Total packets received. */
+ SK_U32 PacketsPerTimeSlot; /* Packets rxed between TOs. */
+/* SK_U32 DataPacketsPerTimeSlot; */ /* Data packets ... */
+ SK_U32 BpduPacketsPerTimeSlot; /* BPDU packets rxed in TS. */
+ SK_U64 BcTimeStamp; /* Time of last BC receive. */
+ SK_U64 GuTimeStamp; /* Time of entering GOING_UP. */
+
+ SK_TIMER UpTimer; /* Timer struct Link/Port up. */
+ SK_TIMER DownRxTimer; /* Timer struct down rx. */
+ SK_TIMER DownTxTimer; /* Timer struct down tx. */
+
+ SK_U32 CheckingState; /* Checking State. */
+
+ SK_ADDR_PORT * AddrPort;
+
+ SK_U8 Random[4]; /* Random value. */
+ unsigned PortsChecked; /* #ports checked. */
+ unsigned PortsSuspect; /* #ports checked that are s. */
+ SK_PORT_CHECK PortCheck[1];
+/* SK_PORT_CHECK PortCheck[SK_MAX_MACS - 1]; */
+
+ SK_BOOL PortStarted; /* Port is started. */
+ SK_BOOL PortNoRx; /* NoRx for >= 1 time slot. */
+ SK_BOOL RootIdSet;
+ SK_RLMT_ROOT_ID Root; /* Root Bridge Id. */
+} SK_RLMT_PORT;
+
+
+struct s_RlmtNet {
+
+/* ----- Public part (read-only) ----- */
+
+ SK_U32 NetNumber; /* Number of net. */
+
+ SK_RLMT_PORT * Port[SK_MAX_MACS]; /* Ports that belong to this net. */
+ SK_U32 NumPorts; /* Number of ports. */
+ SK_U32 PrefPort; /* Preferred port. */
+
+ /* For PNMI */
+
+ SK_U32 ChgBcPrio; /* Change Priority of last broadcast received */
+ SK_U32 RlmtMode; /* Check ... */
+ SK_U32 ActivePort; /* Active port. */
+ SK_U32 Preference; /* 0xFFFFFFFF: Automatic. */
+
+ SK_U8 RlmtState; /* Current RLMT state. */
+
+/* ----- Private part ----- */
+ SK_BOOL RootIdSet;
+ SK_U16 Align01;
+
+ int LinksUp; /* #Links up. */
+ int PortsUp; /* #Ports up. */
+ SK_U32 TimeoutValue; /* RLMT timeout value. */
+
+ SK_U32 CheckingState; /* Checking State. */
+ SK_RLMT_ROOT_ID Root; /* Root Bridge Id. */
+
+ SK_TIMER LocTimer; /* Timer struct. */
+ SK_TIMER SegTimer; /* Timer struct. */
+};
+
+
+typedef struct s_Rlmt {
+
+/* ----- Public part (read-only) ----- */
+
+ SK_U32 NumNets; /* Number of nets. */
+ SK_U32 NetsStarted; /* Number of nets started. */
+ SK_RLMT_NET Net[SK_MAX_NETS]; /* Array of available nets. */
+ SK_RLMT_PORT Port[SK_MAX_MACS]; /* Array of available ports. */
+
+/* ----- Private part ----- */
+ SK_BOOL CheckSwitch;
+ SK_BOOL RlmtOff; /* set to zero if the Mac addresses
+ are equal or the second one
+ is zero */
+ SK_U16 Align01;
+
+} SK_RLMT;
+
+
+extern SK_MAC_ADDR BridgeMcAddr;
+extern SK_MAC_ADDR SkRlmtMcAddr;
+
+/* function prototypes ********************************************************/
+
+
+#ifndef SK_KR_PROTO
+
+/* Functions provided by SkRlmt */
+
+/* ANSI/C++ compliant function prototypes */
+
+extern void SkRlmtInit(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int Level);
+
+extern int SkRlmtEvent(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 Event,
+ SK_EVPARA Para);
+
+#else /* defined(SK_KR_PROTO) */
+
+/* Non-ANSI/C++ compliant function prototypes */
+
+#error KR-style function prototypes are not yet provided.
+
+#endif /* defined(SK_KR_PROTO)) */
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INC_SKRLMT_H */
diff --git a/drivers/net/sk98lin/h/sktimer.h b/drivers/net/sk98lin/h/sktimer.h
new file mode 100644
index 000000000000..04e6d7c1ec33
--- /dev/null
+++ b/drivers/net/sk98lin/h/sktimer.h
@@ -0,0 +1,63 @@
+/******************************************************************************
+ *
+ * Name: sktimer.h
+ * Project: Gigabit Ethernet Adapters, Event Scheduler Module
+ * Version: $Revision: 1.11 $
+ * Date: $Date: 2003/09/16 12:58:18 $
+ * Purpose: Defines for the timer functions
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * SKTIMER.H contains all defines and types for the timer functions
+ */
+
+#ifndef _SKTIMER_H_
+#define _SKTIMER_H_
+
+#include "h/skqueue.h"
+
+/*
+ * SK timer
+ * - needed wherever a timer is used. Put this in your data structure
+ * wherever you want.
+ */
+typedef struct s_Timer SK_TIMER;
+
+struct s_Timer {
+ SK_TIMER *TmNext; /* linked list */
+ SK_U32 TmClass; /* Timer Event class */
+ SK_U32 TmEvent; /* Timer Event value */
+ SK_EVPARA TmPara; /* Timer Event parameter */
+ SK_U32 TmDelta; /* delta time */
+ int TmActive; /* flag: active/inactive */
+};
+
+/*
+ * Timer control struct.
+ * - use in Adapters context name pAC->Tim
+ */
+typedef struct s_TimCtrl {
+ SK_TIMER *StQueue; /* Head of Timer queue */
+} SK_TIMCTRL;
+
+extern void SkTimerInit(SK_AC *pAC, SK_IOC Ioc, int Level);
+extern void SkTimerStop(SK_AC *pAC, SK_IOC Ioc, SK_TIMER *pTimer);
+extern void SkTimerStart(SK_AC *pAC, SK_IOC Ioc, SK_TIMER *pTimer,
+ SK_U32 Time, SK_U32 Class, SK_U32 Event, SK_EVPARA Para);
+extern void SkTimerDone(SK_AC *pAC, SK_IOC Ioc);
+#endif /* _SKTIMER_H_ */
diff --git a/drivers/net/sk98lin/h/sktypes.h b/drivers/net/sk98lin/h/sktypes.h
new file mode 100644
index 000000000000..40edc96e1055
--- /dev/null
+++ b/drivers/net/sk98lin/h/sktypes.h
@@ -0,0 +1,69 @@
+/******************************************************************************
+ *
+ * Name: sktypes.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.2 $
+ * Date: $Date: 2003/10/07 08:16:51 $
+ * Purpose: Define data types for Linux
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * In this file, all data types that are needed by the common modules
+ * are mapped to Linux data types.
+ *
+ *
+ * Include File Hierarchy:
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __INC_SKTYPES_H
+#define __INC_SKTYPES_H
+
+
+/* defines *******************************************************************/
+
+/*
+ * Data types with a specific size. 'I' = signed, 'U' = unsigned.
+ */
+#define SK_I8 s8
+#define SK_U8 u8
+#define SK_I16 s16
+#define SK_U16 u16
+#define SK_I32 s32
+#define SK_U32 u32
+#define SK_I64 s64
+#define SK_U64 u64
+
+#define SK_UPTR ulong /* casting pointer <-> integral */
+
+/*
+* Boolean type.
+*/
+#define SK_BOOL SK_U8
+#define SK_FALSE 0
+#define SK_TRUE (!SK_FALSE)
+
+/* typedefs *******************************************************************/
+
+/* function prototypes ********************************************************/
+
+#endif /* __INC_SKTYPES_H */
diff --git a/drivers/net/sk98lin/h/skversion.h b/drivers/net/sk98lin/h/skversion.h
new file mode 100644
index 000000000000..a1a7294828e5
--- /dev/null
+++ b/drivers/net/sk98lin/h/skversion.h
@@ -0,0 +1,38 @@
+/******************************************************************************
+ *
+ * Name: version.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.5 $
+ * Date: $Date: 2003/10/07 08:16:51 $
+ * Purpose: SK specific Error log support
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifdef lint
+static const char SysKonnectFileId[] = "@(#) (C) SysKonnect GmbH.";
+static const char SysKonnectBuildNumber[] =
+ "@(#)SK-BUILD: 6.23 PL: 01";
+#endif /* !defined(lint) */
+
+#define BOOT_STRING "sk98lin: Network Device Driver v6.23\n" \
+ "(C)Copyright 1999-2004 Marvell(R)."
+
+#define VER_STRING "6.23"
+#define DRIVER_FILE_NAME "sk98lin"
+#define DRIVER_REL_DATE "Feb-13-2004"
+
+
diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h
new file mode 100644
index 000000000000..bdc1a5eaaae9
--- /dev/null
+++ b/drivers/net/sk98lin/h/skvpd.h
@@ -0,0 +1,271 @@
+/******************************************************************************
+ *
+ * Name: skvpd.h
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.15 $
+ * Date: $Date: 2003/01/13 10:39:38 $
+ * Purpose: Defines and Macros for VPD handling
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2003 SysKonnect GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * skvpd.h contains Diagnostic specific defines for VPD handling
+ */
+
+#ifndef __INC_SKVPD_H_
+#define __INC_SKVPD_H_
+
+/*
+ * Define Resource Type Identifiers and VPD keywords
+ */
+#define RES_ID 0x82 /* Resource Type ID String (Product Name) */
+#define RES_VPD_R 0x90 /* start of VPD read only area */
+#define RES_VPD_W 0x91 /* start of VPD read/write area */
+#define RES_END 0x78 /* Resource Type End Tag */
+
+#ifndef VPD_NAME
+#define VPD_NAME "Name" /* Product Name, VPD name of RES_ID */
+#endif /* VPD_NAME */
+#define VPD_PN "PN" /* Adapter Part Number */
+#define VPD_EC "EC" /* Adapter Engineering Level */
+#define VPD_MN "MN" /* Manufacture ID */
+#define VPD_SN "SN" /* Serial Number */
+#define VPD_CP "CP" /* Extended Capability */
+#define VPD_RV "RV" /* Checksum and Reserved */
+#define VPD_YA "YA" /* Asset Tag Identifier */
+#define VPD_VL "VL" /* First Error Log Message (SK specific) */
+#define VPD_VF "VF" /* Second Error Log Message (SK specific) */
+#define VPD_RW "RW" /* Remaining Read / Write Area */
+
+/* 'type' values for vpd_setup_para() */
+#define VPD_RO_KEY 1 /* RO keys are "PN", "EC", "MN", "SN", "RV" */
+#define VPD_RW_KEY 2 /* RW keys are "Yx", "Vx", and "RW" */
+
+/* 'op' values for vpd_setup_para() */
+#define ADD_KEY 1 /* add the key at the pos "RV" or "RW" */
+#define OWR_KEY 2 /* overwrite key if already exists */
+
+/*
+ * Define READ and WRITE Constants.
+ */
+
+#define VPD_DEV_ID_GENESIS 0x4300
+
+#define VPD_SIZE_YUKON 256
+#define VPD_SIZE_GENESIS 512
+#define VPD_SIZE 512
+#define VPD_READ 0x0000
+#define VPD_WRITE 0x8000
+
+#define VPD_STOP(pAC,IoC) VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG,VPD_WRITE)
+
+#define VPD_GET_RES_LEN(p) ((unsigned int) \
+ (* (SK_U8 *)&(p)[1]) |\
+ ((* (SK_U8 *)&(p)[2]) << 8))
+#define VPD_GET_VPD_LEN(p) ((unsigned int)(* (SK_U8 *)&(p)[2]))
+#define VPD_GET_VAL(p) ((char *)&(p)[3])
+
+#define VPD_MAX_LEN 50
+
+/* VPD status */
+ /* bit 7..1 reserved */
+#define VPD_VALID (1<<0) /* VPD data buffer, vpd_free_ro, */
+ /* and vpd_free_rw valid */
+
+/*
+ * VPD structs
+ */
+typedef struct s_vpd_status {
+ unsigned short Align01; /* Alignment */
+ unsigned short vpd_status; /* VPD status, description see above */
+ int vpd_free_ro; /* unused bytes in read only area */
+ int vpd_free_rw; /* bytes available in read/write area */
+} SK_VPD_STATUS;
+
+typedef struct s_vpd {
+ SK_VPD_STATUS v; /* VPD status structure */
+ char vpd_buf[VPD_SIZE]; /* VPD buffer */
+ int rom_size; /* VPD ROM Size from PCI_OUR_REG_2 */
+ int vpd_size; /* saved VPD-size */
+} SK_VPD;
+
+typedef struct s_vpd_para {
+ unsigned int p_len; /* parameter length */
+ char *p_val; /* points to the value */
+} SK_VPD_PARA;
+
+/*
+ * structure of Large Resource Type Identifiers
+ */
+
+/* was removed because of alignment problems */
+
+/*
+ * structure of VPD keywords
+ */
+typedef struct s_vpd_key {
+ char p_key[2]; /* 2 bytes ID string */
+ unsigned char p_len; /* 1 byte length */
+ char p_val; /* start of the value string */
+} SK_VPD_KEY;
+
+
+/*
+ * System specific VPD macros
+ */
+#ifndef SKDIAG
+#ifndef VPD_DO_IO
+#define VPD_OUT8(pAC,IoC,Addr,Val) (void)SkPciWriteCfgByte(pAC,Addr,Val)
+#define VPD_OUT16(pAC,IoC,Addr,Val) (void)SkPciWriteCfgWord(pAC,Addr,Val)
+#define VPD_OUT32(pAC,IoC,Addr,Val) (void)SkPciWriteCfgDWord(pAC,Addr,Val)
+#define VPD_IN8(pAC,IoC,Addr,pVal) (void)SkPciReadCfgByte(pAC,Addr,pVal)
+#define VPD_IN16(pAC,IoC,Addr,pVal) (void)SkPciReadCfgWord(pAC,Addr,pVal)
+#define VPD_IN32(pAC,IoC,Addr,pVal) (void)SkPciReadCfgDWord(pAC,Addr,pVal)
+#else /* VPD_DO_IO */
+#define VPD_OUT8(pAC,IoC,Addr,Val) SK_OUT8(IoC,PCI_C(Addr),Val)
+#define VPD_OUT16(pAC,IoC,Addr,Val) SK_OUT16(IoC,PCI_C(Addr),Val)
+#define VPD_OUT32(pAC,IoC,Addr,Val) SK_OUT32(IoC,PCI_C(Addr),Val)
+#define VPD_IN8(pAC,IoC,Addr,pVal) SK_IN8(IoC,PCI_C(Addr),pVal)
+#define VPD_IN16(pAC,IoC,Addr,pVal) SK_IN16(IoC,PCI_C(Addr),pVal)
+#define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal)
+#endif /* VPD_DO_IO */
+#else /* SKDIAG */
+#define VPD_OUT8(pAC,Ioc,Addr,Val) { \
+ if ((pAC)->DgT.DgUseCfgCycle) \
+ SkPciWriteCfgByte(pAC,Addr,Val); \
+ else \
+ SK_OUT8(pAC,PCI_C(Addr),Val); \
+ }
+#define VPD_OUT16(pAC,Ioc,Addr,Val) { \
+ if ((pAC)->DgT.DgUseCfgCycle) \
+ SkPciWriteCfgWord(pAC,Addr,Val); \
+ else \
+ SK_OUT16(pAC,PCI_C(Addr),Val); \
+ }
+#define VPD_OUT32(pAC,Ioc,Addr,Val) { \
+ if ((pAC)->DgT.DgUseCfgCycle) \
+ SkPciWriteCfgDWord(pAC,Addr,Val); \
+ else \
+ SK_OUT32(pAC,PCI_C(Addr),Val); \
+ }
+#define VPD_IN8(pAC,Ioc,Addr,pVal) { \
+ if ((pAC)->DgT.DgUseCfgCycle) \
+ SkPciReadCfgByte(pAC,Addr,pVal); \
+ else \
+ SK_IN8(pAC,PCI_C(Addr),pVal); \
+ }
+#define VPD_IN16(pAC,Ioc,Addr,pVal) { \
+ if ((pAC)->DgT.DgUseCfgCycle) \
+ SkPciReadCfgWord(pAC,Addr,pVal); \
+ else \
+ SK_IN16(pAC,PCI_C(Addr),pVal); \
+ }
+#define VPD_IN32(pAC,Ioc,Addr,pVal) { \
+ if ((pAC)->DgT.DgUseCfgCycle) \
+ SkPciReadCfgDWord(pAC,Addr,pVal); \
+ else \
+ SK_IN32(pAC,PCI_C(Addr),pVal); \
+ }
+#endif /* nSKDIAG */
+
+/* function prototypes ********************************************************/
+
+#ifndef SK_KR_PROTO
+#ifdef SKDIAG
+extern SK_U32 VpdReadDWord(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ int addr);
+#endif /* SKDIAG */
+
+extern int VpdSetupPara(
+ SK_AC *pAC,
+ const char *key,
+ const char *buf,
+ int len,
+ int type,
+ int op);
+
+extern SK_VPD_STATUS *VpdStat(
+ SK_AC *pAC,
+ SK_IOC IoC);
+
+extern int VpdKeys(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ char *buf,
+ int *len,
+ int *elements);
+
+extern int VpdRead(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ const char *key,
+ char *buf,
+ int *len);
+
+extern SK_BOOL VpdMayWrite(
+ char *key);
+
+extern int VpdWrite(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ const char *key,
+ const char *buf);
+
+extern int VpdDelete(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ char *key);
+
+extern int VpdUpdate(
+ SK_AC *pAC,
+ SK_IOC IoC);
+
+extern void VpdErrLog(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ char *msg);
+
+#ifdef SKDIAG
+extern int VpdReadBlock(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ char *buf,
+ int addr,
+ int len);
+
+extern int VpdWriteBlock(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ char *buf,
+ int addr,
+ int len);
+#endif /* SKDIAG */
+#else /* SK_KR_PROTO */
+extern SK_U32 VpdReadDWord();
+extern int VpdSetupPara();
+extern SK_VPD_STATUS *VpdStat();
+extern int VpdKeys();
+extern int VpdRead();
+extern SK_BOOL VpdMayWrite();
+extern int VpdWrite();
+extern int VpdDelete();
+extern int VpdUpdate();
+extern void VpdErrLog();
+#endif /* SK_KR_PROTO */
+
+#endif /* __INC_SKVPD_H_ */
diff --git a/drivers/net/sk98lin/h/xmac_ii.h b/drivers/net/sk98lin/h/xmac_ii.h
new file mode 100644
index 000000000000..2b19f8ad0318
--- /dev/null
+++ b/drivers/net/sk98lin/h/xmac_ii.h
@@ -0,0 +1,1579 @@
+/******************************************************************************
+ *
+ * Name: xmac_ii.h
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.52 $
+ * Date: $Date: 2003/10/02 16:35:50 $
+ * Purpose: Defines and Macros for Gigabit Ethernet Controller
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef __INC_XMAC_H
+#define __INC_XMAC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines ********************************************************************/
+
+/*
+ * XMAC II registers
+ *
+ * The XMAC registers are 16 or 32 bits wide.
+ * The XMACs host processor interface is set to 16 bit mode,
+ * therefore ALL registers will be addressed with 16 bit accesses.
+ *
+ * The following macros are provided to access the XMAC registers
+ * XM_IN16(), XM_OUT16, XM_IN32(), XM_OUT32(), XM_INADR(), XM_OUTADR(),
+ * XM_INHASH(), and XM_OUTHASH().
+ * The macros are defined in SkGeHw.h.
+ *
+ * Note: NA reg = Network Address e.g DA, SA etc.
+ *
+ */
+#define XM_MMU_CMD 0x0000 /* 16 bit r/w MMU Command Register */
+ /* 0x0004: reserved */
+#define XM_POFF 0x0008 /* 32 bit r/w Packet Offset Register */
+#define XM_BURST 0x000c /* 32 bit r/w Burst Register for half duplex*/
+#define XM_1L_VLAN_TAG 0x0010 /* 16 bit r/w One Level VLAN Tag ID */
+#define XM_2L_VLAN_TAG 0x0014 /* 16 bit r/w Two Level VLAN Tag ID */
+ /* 0x0018 - 0x001e: reserved */
+#define XM_TX_CMD 0x0020 /* 16 bit r/w Transmit Command Register */
+#define XM_TX_RT_LIM 0x0024 /* 16 bit r/w Transmit Retry Limit Register */
+#define XM_TX_STIME 0x0028 /* 16 bit r/w Transmit Slottime Register */
+#define XM_TX_IPG 0x002c /* 16 bit r/w Transmit Inter Packet Gap */
+#define XM_RX_CMD 0x0030 /* 16 bit r/w Receive Command Register */
+#define XM_PHY_ADDR 0x0034 /* 16 bit r/w PHY Address Register */
+#define XM_PHY_DATA 0x0038 /* 16 bit r/w PHY Data Register */
+ /* 0x003c: reserved */
+#define XM_GP_PORT 0x0040 /* 32 bit r/w General Purpose Port Register */
+#define XM_IMSK 0x0044 /* 16 bit r/w Interrupt Mask Register */
+#define XM_ISRC 0x0048 /* 16 bit r/o Interrupt Status Register */
+#define XM_HW_CFG 0x004c /* 16 bit r/w Hardware Config Register */
+ /* 0x0050 - 0x005e: reserved */
+#define XM_TX_LO_WM 0x0060 /* 16 bit r/w Tx FIFO Low Water Mark */
+#define XM_TX_HI_WM 0x0062 /* 16 bit r/w Tx FIFO High Water Mark */
+#define XM_TX_THR 0x0064 /* 16 bit r/w Tx Request Threshold */
+#define XM_HT_THR 0x0066 /* 16 bit r/w Host Request Threshold */
+#define XM_PAUSE_DA 0x0068 /* NA reg r/w Pause Destination Address */
+ /* 0x006e: reserved */
+#define XM_CTL_PARA 0x0070 /* 32 bit r/w Control Parameter Register */
+#define XM_MAC_OPCODE 0x0074 /* 16 bit r/w Opcode for MAC control frames */
+#define XM_MAC_PTIME 0x0076 /* 16 bit r/w Pause time for MAC ctrl frames*/
+#define XM_TX_STAT 0x0078 /* 32 bit r/o Tx Status LIFO Register */
+
+ /* 0x0080 - 0x00fc: 16 NA reg r/w Exact Match Address Registers */
+ /* use the XM_EXM() macro to address */
+#define XM_EXM_START 0x0080 /* r/w Start Address of the EXM Regs */
+
+ /*
+ * XM_EXM(Reg)
+ *
+ * returns the XMAC address offset of specified Exact Match Addr Reg
+ *
+ * para: Reg EXM register to addr (0 .. 15)
+ *
+ * usage: XM_INADDR(IoC, MAC_1, XM_EXM(i), &val[i]);
+ */
+#define XM_EXM(Reg) (XM_EXM_START + ((Reg) << 3))
+
+#define XM_SRC_CHK 0x0100 /* NA reg r/w Source Check Address Register */
+#define XM_SA 0x0108 /* NA reg r/w Station Address Register */
+#define XM_HSM 0x0110 /* 64 bit r/w Hash Match Address Registers */
+#define XM_RX_LO_WM 0x0118 /* 16 bit r/w Receive Low Water Mark */
+#define XM_RX_HI_WM 0x011a /* 16 bit r/w Receive High Water Mark */
+#define XM_RX_THR 0x011c /* 32 bit r/w Receive Request Threshold */
+#define XM_DEV_ID 0x0120 /* 32 bit r/o Device ID Register */
+#define XM_MODE 0x0124 /* 32 bit r/w Mode Register */
+#define XM_LSA 0x0128 /* NA reg r/o Last Source Register */
+ /* 0x012e: reserved */
+#define XM_TS_READ 0x0130 /* 32 bit r/o Time Stamp Read Register */
+#define XM_TS_LOAD 0x0134 /* 32 bit r/o Time Stamp Load Value */
+ /* 0x0138 - 0x01fe: reserved */
+#define XM_STAT_CMD 0x0200 /* 16 bit r/w Statistics Command Register */
+#define XM_RX_CNT_EV 0x0204 /* 32 bit r/o Rx Counter Event Register */
+#define XM_TX_CNT_EV 0x0208 /* 32 bit r/o Tx Counter Event Register */
+#define XM_RX_EV_MSK 0x020c /* 32 bit r/w Rx Counter Event Mask */
+#define XM_TX_EV_MSK 0x0210 /* 32 bit r/w Tx Counter Event Mask */
+ /* 0x0204 - 0x027e: reserved */
+#define XM_TXF_OK 0x0280 /* 32 bit r/o Frames Transmitted OK Conuter */
+#define XM_TXO_OK_HI 0x0284 /* 32 bit r/o Octets Transmitted OK High Cnt*/
+#define XM_TXO_OK_LO 0x0288 /* 32 bit r/o Octets Transmitted OK Low Cnt */
+#define XM_TXF_BC_OK 0x028c /* 32 bit r/o Broadcast Frames Xmitted OK */
+#define XM_TXF_MC_OK 0x0290 /* 32 bit r/o Multicast Frames Xmitted OK */
+#define XM_TXF_UC_OK 0x0294 /* 32 bit r/o Unicast Frames Xmitted OK */
+#define XM_TXF_LONG 0x0298 /* 32 bit r/o Tx Long Frame Counter */
+#define XM_TXE_BURST 0x029c /* 32 bit r/o Tx Burst Event Counter */
+#define XM_TXF_MPAUSE 0x02a0 /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */
+#define XM_TXF_MCTRL 0x02a4 /* 32 bit r/o Tx MAC Ctrl Frame Counter */
+#define XM_TXF_SNG_COL 0x02a8 /* 32 bit r/o Tx Single Collision Counter */
+#define XM_TXF_MUL_COL 0x02ac /* 32 bit r/o Tx Multiple Collision Counter */
+#define XM_TXF_ABO_COL 0x02b0 /* 32 bit r/o Tx aborted due to Exces. Col. */
+#define XM_TXF_LAT_COL 0x02b4 /* 32 bit r/o Tx Late Collision Counter */
+#define XM_TXF_DEF 0x02b8 /* 32 bit r/o Tx Deferred Frame Counter */
+#define XM_TXF_EX_DEF 0x02bc /* 32 bit r/o Tx Excessive Deferall Counter */
+#define XM_TXE_FIFO_UR 0x02c0 /* 32 bit r/o Tx FIFO Underrun Event Cnt */
+#define XM_TXE_CS_ERR 0x02c4 /* 32 bit r/o Tx Carrier Sense Error Cnt */
+#define XM_TXP_UTIL 0x02c8 /* 32 bit r/o Tx Utilization in % */
+ /* 0x02cc - 0x02ce: reserved */
+#define XM_TXF_64B 0x02d0 /* 32 bit r/o 64 Byte Tx Frame Counter */
+#define XM_TXF_127B 0x02d4 /* 32 bit r/o 65-127 Byte Tx Frame Counter */
+#define XM_TXF_255B 0x02d8 /* 32 bit r/o 128-255 Byte Tx Frame Counter */
+#define XM_TXF_511B 0x02dc /* 32 bit r/o 256-511 Byte Tx Frame Counter */
+#define XM_TXF_1023B 0x02e0 /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/
+#define XM_TXF_MAX_SZ 0x02e4 /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/
+ /* 0x02e8 - 0x02fe: reserved */
+#define XM_RXF_OK 0x0300 /* 32 bit r/o Frames Received OK */
+#define XM_RXO_OK_HI 0x0304 /* 32 bit r/o Octets Received OK High Cnt */
+#define XM_RXO_OK_LO 0x0308 /* 32 bit r/o Octets Received OK Low Counter*/
+#define XM_RXF_BC_OK 0x030c /* 32 bit r/o Broadcast Frames Received OK */
+#define XM_RXF_MC_OK 0x0310 /* 32 bit r/o Multicast Frames Received OK */
+#define XM_RXF_UC_OK 0x0314 /* 32 bit r/o Unicast Frames Received OK */
+#define XM_RXF_MPAUSE 0x0318 /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */
+#define XM_RXF_MCTRL 0x031c /* 32 bit r/o Rx MAC Ctrl Frame Counter */
+#define XM_RXF_INV_MP 0x0320 /* 32 bit r/o Rx invalid Pause Frame Cnt */
+#define XM_RXF_INV_MOC 0x0324 /* 32 bit r/o Rx Frames with inv. MAC Opcode*/
+#define XM_RXE_BURST 0x0328 /* 32 bit r/o Rx Burst Event Counter */
+#define XM_RXE_FMISS 0x032c /* 32 bit r/o Rx Missed Frames Event Cnt */
+#define XM_RXF_FRA_ERR 0x0330 /* 32 bit r/o Rx Framing Error Counter */
+#define XM_RXE_FIFO_OV 0x0334 /* 32 bit r/o Rx FIFO overflow Event Cnt */
+#define XM_RXF_JAB_PKT 0x0338 /* 32 bit r/o Rx Jabber Packet Frame Cnt */
+#define XM_RXE_CAR_ERR 0x033c /* 32 bit r/o Rx Carrier Event Error Cnt */
+#define XM_RXF_LEN_ERR 0x0340 /* 32 bit r/o Rx in Range Length Error */
+#define XM_RXE_SYM_ERR 0x0344 /* 32 bit r/o Rx Symbol Error Counter */
+#define XM_RXE_SHT_ERR 0x0348 /* 32 bit r/o Rx Short Event Error Cnt */
+#define XM_RXE_RUNT 0x034c /* 32 bit r/o Rx Runt Event Counter */
+#define XM_RXF_LNG_ERR 0x0350 /* 32 bit r/o Rx Frame too Long Error Cnt */
+#define XM_RXF_FCS_ERR 0x0354 /* 32 bit r/o Rx Frame Check Seq. Error Cnt */
+ /* 0x0358 - 0x035a: reserved */
+#define XM_RXF_CEX_ERR 0x035c /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/
+#define XM_RXP_UTIL 0x0360 /* 32 bit r/o Rx Utilization in % */
+ /* 0x0364 - 0x0366: reserved */
+#define XM_RXF_64B 0x0368 /* 32 bit r/o 64 Byte Rx Frame Counter */
+#define XM_RXF_127B 0x036c /* 32 bit r/o 65-127 Byte Rx Frame Counter */
+#define XM_RXF_255B 0x0370 /* 32 bit r/o 128-255 Byte Rx Frame Counter */
+#define XM_RXF_511B 0x0374 /* 32 bit r/o 256-511 Byte Rx Frame Counter */
+#define XM_RXF_1023B 0x0378 /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/
+#define XM_RXF_MAX_SZ 0x037c /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/
+ /* 0x02e8 - 0x02fe: reserved */
+
+
+/*----------------------------------------------------------------------------*/
+/*
+ * XMAC Bit Definitions
+ *
+ * If the bit access behaviour differs from the register access behaviour
+ * (r/w, r/o) this is documented after the bit number.
+ * The following bit access behaviours are used:
+ * (sc) self clearing
+ * (ro) read only
+ */
+
+/* XM_MMU_CMD 16 bit r/w MMU Command Register */
+ /* Bit 15..13: reserved */
+#define XM_MMU_PHY_RDY (1<<12) /* Bit 12: PHY Read Ready */
+#define XM_MMU_PHY_BUSY (1<<11) /* Bit 11: PHY Busy */
+#define XM_MMU_IGN_PF (1<<10) /* Bit 10: Ignore Pause Frame */
+#define XM_MMU_MAC_LB (1<<9) /* Bit 9: Enable MAC Loopback */
+ /* Bit 8: reserved */
+#define XM_MMU_FRC_COL (1<<7) /* Bit 7: Force Collision */
+#define XM_MMU_SIM_COL (1<<6) /* Bit 6: Simulate Collision */
+#define XM_MMU_NO_PRE (1<<5) /* Bit 5: No MDIO Preamble */
+#define XM_MMU_GMII_FD (1<<4) /* Bit 4: GMII uses Full Duplex */
+#define XM_MMU_RAT_CTRL (1<<3) /* Bit 3: Enable Rate Control */
+#define XM_MMU_GMII_LOOP (1<<2) /* Bit 2: PHY is in Loopback Mode */
+#define XM_MMU_ENA_RX (1<<1) /* Bit 1: Enable Receiver */
+#define XM_MMU_ENA_TX (1<<0) /* Bit 0: Enable Transmitter */
+
+
+/* XM_TX_CMD 16 bit r/w Transmit Command Register */
+ /* Bit 15..7: reserved */
+#define XM_TX_BK2BK (1<<6) /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/
+#define XM_TX_ENC_BYP (1<<5) /* Bit 5: Set Encoder in Bypass Mode */
+#define XM_TX_SAM_LINE (1<<4) /* Bit 4: (sc) Start utilization calculation */
+#define XM_TX_NO_GIG_MD (1<<3) /* Bit 3: Disable Carrier Extension */
+#define XM_TX_NO_PRE (1<<2) /* Bit 2: Disable Preamble Generation */
+#define XM_TX_NO_CRC (1<<1) /* Bit 1: Disable CRC Generation */
+#define XM_TX_AUTO_PAD (1<<0) /* Bit 0: Enable Automatic Padding */
+
+
+/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */
+ /* Bit 15..5: reserved */
+#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */
+
+
+/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */
+ /* Bit 15..7: reserved */
+#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */
+
+
+/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */
+ /* Bit 15..8: reserved */
+#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */
+
+
+/* XM_RX_CMD 16 bit r/w Receive Command Register */
+ /* Bit 15..9: reserved */
+#define XM_RX_LENERR_OK (1<<8) /* Bit 8 don't set Rx Err bit for */
+ /* inrange error packets */
+#define XM_RX_BIG_PK_OK (1<<7) /* Bit 7 don't set Rx Err bit for */
+ /* jumbo packets */
+#define XM_RX_IPG_CAP (1<<6) /* Bit 6 repl. type field with IPG */
+#define XM_RX_TP_MD (1<<5) /* Bit 5: Enable transparent Mode */
+#define XM_RX_STRIP_FCS (1<<4) /* Bit 4: Enable FCS Stripping */
+#define XM_RX_SELF_RX (1<<3) /* Bit 3: Enable Rx of own packets */
+#define XM_RX_SAM_LINE (1<<2) /* Bit 2: (sc) Start utilization calculation */
+#define XM_RX_STRIP_PAD (1<<1) /* Bit 1: Strip pad bytes of Rx frames */
+#define XM_RX_DIS_CEXT (1<<0) /* Bit 0: Disable carrier ext. check */
+
+
+/* XM_PHY_ADDR 16 bit r/w PHY Address Register */
+ /* Bit 15..5: reserved */
+#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */
+
+
+/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
+ /* Bit 31..7: reserved */
+#define XM_GP_ANIP (1L<<6) /* Bit 6: (ro) Auto-Neg. in progress */
+#define XM_GP_FRC_INT (1L<<5) /* Bit 5: (sc) Force Interrupt */
+ /* Bit 4: reserved */
+#define XM_GP_RES_MAC (1L<<3) /* Bit 3: (sc) Reset MAC and FIFOs */
+#define XM_GP_RES_STAT (1L<<2) /* Bit 2: (sc) Reset the statistics module */
+ /* Bit 1: reserved */
+#define XM_GP_INP_ASS (1L<<0) /* Bit 0: (ro) GP Input Pin asserted */
+
+
+/* XM_IMSK 16 bit r/w Interrupt Mask Register */
+/* XM_ISRC 16 bit r/o Interrupt Status Register */
+ /* Bit 15: reserved */
+#define XM_IS_LNK_AE (1<<14) /* Bit 14: Link Asynchronous Event */
+#define XM_IS_TX_ABORT (1<<13) /* Bit 13: Transmit Abort, late Col. etc */
+#define XM_IS_FRC_INT (1<<12) /* Bit 12: Force INT bit set in GP */
+#define XM_IS_INP_ASS (1<<11) /* Bit 11: Input Asserted, GP bit 0 set */
+#define XM_IS_LIPA_RC (1<<10) /* Bit 10: Link Partner requests config */
+#define XM_IS_RX_PAGE (1<<9) /* Bit 9: Page Received */
+#define XM_IS_TX_PAGE (1<<8) /* Bit 8: Next Page Loaded for Transmit */
+#define XM_IS_AND (1<<7) /* Bit 7: Auto-Negotiation Done */
+#define XM_IS_TSC_OV (1<<6) /* Bit 6: Time Stamp Counter Overflow */
+#define XM_IS_RXC_OV (1<<5) /* Bit 5: Rx Counter Event Overflow */
+#define XM_IS_TXC_OV (1<<4) /* Bit 4: Tx Counter Event Overflow */
+#define XM_IS_RXF_OV (1<<3) /* Bit 3: Receive FIFO Overflow */
+#define XM_IS_TXF_UR (1<<2) /* Bit 2: Transmit FIFO Underrun */
+#define XM_IS_TX_COMP (1<<1) /* Bit 1: Frame Tx Complete */
+#define XM_IS_RX_COMP (1<<0) /* Bit 0: Frame Rx Complete */
+
+#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE |\
+ XM_IS_AND | XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_TXF_UR))
+
+
+/* XM_HW_CFG 16 bit r/w Hardware Config Register */
+ /* Bit 15.. 4: reserved */
+#define XM_HW_GEN_EOP (1<<3) /* Bit 3: generate End of Packet pulse */
+#define XM_HW_COM4SIG (1<<2) /* Bit 2: use Comma Detect for Sig. Det.*/
+ /* Bit 1: reserved */
+#define XM_HW_GMII_MD (1<<0) /* Bit 0: GMII Interface selected */
+
+
+/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */
+/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */
+ /* Bit 15..10 reserved */
+#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */
+
+/* XM_TX_THR 16 bit r/w Tx Request Threshold */
+/* XM_HT_THR 16 bit r/w Host Request Threshold */
+/* XM_RX_THR 16 bit r/w Rx Request Threshold */
+ /* Bit 15..11 reserved */
+#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */
+
+
+/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */
+#define XM_ST_VALID (1UL<<31) /* Bit 31: Status Valid */
+#define XM_ST_BYTE_CNT (0x3fffL<<17) /* Bit 30..17: Tx frame Length */
+#define XM_ST_RETRY_CNT (0x1fL<<12) /* Bit 16..12: Retry Count */
+#define XM_ST_EX_COL (1L<<11) /* Bit 11: Excessive Collisions */
+#define XM_ST_EX_DEF (1L<<10) /* Bit 10: Excessive Deferral */
+#define XM_ST_BURST (1L<<9) /* Bit 9: p. xmitted in burst md*/
+#define XM_ST_DEFER (1L<<8) /* Bit 8: packet was defered */
+#define XM_ST_BC (1L<<7) /* Bit 7: Broadcast packet */
+#define XM_ST_MC (1L<<6) /* Bit 6: Multicast packet */
+#define XM_ST_UC (1L<<5) /* Bit 5: Unicast packet */
+#define XM_ST_TX_UR (1L<<4) /* Bit 4: FIFO Underrun occured */
+#define XM_ST_CS_ERR (1L<<3) /* Bit 3: Carrier Sense Error */
+#define XM_ST_LAT_COL (1L<<2) /* Bit 2: Late Collision Error */
+#define XM_ST_MUL_COL (1L<<1) /* Bit 1: Multiple Collisions */
+#define XM_ST_SGN_COL (1L<<0) /* Bit 0: Single Collision */
+
+/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */
+/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */
+ /* Bit 15..11: reserved */
+#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */
+
+
+/* XM_DEV_ID 32 bit r/o Device ID Register */
+#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */
+#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */
+
+
+/* XM_MODE 32 bit r/w Mode Register */
+ /* Bit 31..27: reserved */
+#define XM_MD_ENA_REJ (1L<<26) /* Bit 26: Enable Frame Reject */
+#define XM_MD_SPOE_E (1L<<25) /* Bit 25: Send Pause on Edge */
+ /* extern generated */
+#define XM_MD_TX_REP (1L<<24) /* Bit 24: Transmit Repeater Mode */
+#define XM_MD_SPOFF_I (1L<<23) /* Bit 23: Send Pause on FIFO full */
+ /* intern generated */
+#define XM_MD_LE_STW (1L<<22) /* Bit 22: Rx Stat Word in Little Endian */
+#define XM_MD_TX_CONT (1L<<21) /* Bit 21: Send Continuous */
+#define XM_MD_TX_PAUSE (1L<<20) /* Bit 20: (sc) Send Pause Frame */
+#define XM_MD_ATS (1L<<19) /* Bit 19: Append Time Stamp */
+#define XM_MD_SPOL_I (1L<<18) /* Bit 18: Send Pause on Low */
+ /* intern generated */
+#define XM_MD_SPOH_I (1L<<17) /* Bit 17: Send Pause on High */
+ /* intern generated */
+#define XM_MD_CAP (1L<<16) /* Bit 16: Check Address Pair */
+#define XM_MD_ENA_HASH (1L<<15) /* Bit 15: Enable Hashing */
+#define XM_MD_CSA (1L<<14) /* Bit 14: Check Station Address */
+#define XM_MD_CAA (1L<<13) /* Bit 13: Check Address Array */
+#define XM_MD_RX_MCTRL (1L<<12) /* Bit 12: Rx MAC Control Frame */
+#define XM_MD_RX_RUNT (1L<<11) /* Bit 11: Rx Runt Frames */
+#define XM_MD_RX_IRLE (1L<<10) /* Bit 10: Rx in Range Len Err Frame */
+#define XM_MD_RX_LONG (1L<<9) /* Bit 9: Rx Long Frame */
+#define XM_MD_RX_CRCE (1L<<8) /* Bit 8: Rx CRC Error Frame */
+#define XM_MD_RX_ERR (1L<<7) /* Bit 7: Rx Error Frame */
+#define XM_MD_DIS_UC (1L<<6) /* Bit 6: Disable Rx Unicast */
+#define XM_MD_DIS_MC (1L<<5) /* Bit 5: Disable Rx Multicast */
+#define XM_MD_DIS_BC (1L<<4) /* Bit 4: Disable Rx Broadcast */
+#define XM_MD_ENA_PROM (1L<<3) /* Bit 3: Enable Promiscuous */
+#define XM_MD_ENA_BE (1L<<2) /* Bit 2: Enable Big Endian */
+#define XM_MD_FTF (1L<<1) /* Bit 1: (sc) Flush Tx FIFO */
+#define XM_MD_FRF (1L<<0) /* Bit 0: (sc) Flush Rx FIFO */
+
+#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
+#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
+ XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA)
+
+/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
+ /* Bit 16..6: reserved */
+#define XM_SC_SNP_RXC (1<<5) /* Bit 5: (sc) Snap Rx Counters */
+#define XM_SC_SNP_TXC (1<<4) /* Bit 4: (sc) Snap Tx Counters */
+#define XM_SC_CP_RXC (1<<3) /* Bit 3: Copy Rx Counters Continuously */
+#define XM_SC_CP_TXC (1<<2) /* Bit 2: Copy Tx Counters Continuously */
+#define XM_SC_CLR_RXC (1<<1) /* Bit 1: (sc) Clear Rx Counters */
+#define XM_SC_CLR_TXC (1<<0) /* Bit 0: (sc) Clear Tx Counters */
+
+
+/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */
+/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */
+#define XMR_MAX_SZ_OV (1UL<<31) /* Bit 31: 1024-MaxSize Rx Cnt Ov*/
+#define XMR_1023B_OV (1L<<30) /* Bit 30: 512-1023Byte Rx Cnt Ov*/
+#define XMR_511B_OV (1L<<29) /* Bit 29: 256-511 Byte Rx Cnt Ov*/
+#define XMR_255B_OV (1L<<28) /* Bit 28: 128-255 Byte Rx Cnt Ov*/
+#define XMR_127B_OV (1L<<27) /* Bit 27: 65-127 Byte Rx Cnt Ov */
+#define XMR_64B_OV (1L<<26) /* Bit 26: 64 Byte Rx Cnt Ov */
+#define XMR_UTIL_OV (1L<<25) /* Bit 25: Rx Util Cnt Overflow */
+#define XMR_UTIL_UR (1L<<24) /* Bit 24: Rx Util Cnt Underrun */
+#define XMR_CEX_ERR_OV (1L<<23) /* Bit 23: CEXT Err Cnt Ov */
+ /* Bit 22: reserved */
+#define XMR_FCS_ERR_OV (1L<<21) /* Bit 21: Rx FCS Error Cnt Ov */
+#define XMR_LNG_ERR_OV (1L<<20) /* Bit 20: Rx too Long Err Cnt Ov*/
+#define XMR_RUNT_OV (1L<<19) /* Bit 19: Runt Event Cnt Ov */
+#define XMR_SHT_ERR_OV (1L<<18) /* Bit 18: Rx Short Ev Err Cnt Ov*/
+#define XMR_SYM_ERR_OV (1L<<17) /* Bit 17: Rx Sym Err Cnt Ov */
+ /* Bit 16: reserved */
+#define XMR_CAR_ERR_OV (1L<<15) /* Bit 15: Rx Carr Ev Err Cnt Ov */
+#define XMR_JAB_PKT_OV (1L<<14) /* Bit 14: Rx Jabb Packet Cnt Ov */
+#define XMR_FIFO_OV (1L<<13) /* Bit 13: Rx FIFO Ov Ev Cnt Ov */
+#define XMR_FRA_ERR_OV (1L<<12) /* Bit 12: Rx Framing Err Cnt Ov */
+#define XMR_FMISS_OV (1L<<11) /* Bit 11: Rx Missed Ev Cnt Ov */
+#define XMR_BURST (1L<<10) /* Bit 10: Rx Burst Event Cnt Ov */
+#define XMR_INV_MOC (1L<<9) /* Bit 9: Rx with inv. MAC OC Ov*/
+#define XMR_INV_MP (1L<<8) /* Bit 8: Rx inv Pause Frame Ov */
+#define XMR_MCTRL_OV (1L<<7) /* Bit 7: Rx MAC Ctrl-F Cnt Ov */
+#define XMR_MPAUSE_OV (1L<<6) /* Bit 6: Rx Pause MAC Ctrl-F Ov*/
+#define XMR_UC_OK_OV (1L<<5) /* Bit 5: Rx Unicast Frame CntOv*/
+#define XMR_MC_OK_OV (1L<<4) /* Bit 4: Rx Multicast Cnt Ov */
+#define XMR_BC_OK_OV (1L<<3) /* Bit 3: Rx Broadcast Cnt Ov */
+#define XMR_OK_LO_OV (1L<<2) /* Bit 2: Octets Rx OK Low CntOv*/
+#define XMR_OK_HI_OV (1L<<1) /* Bit 1: Octets Rx OK Hi Cnt Ov*/
+#define XMR_OK_OV (1L<<0) /* Bit 0: Frames Received Ok Ov */
+
+#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV)
+
+/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */
+/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */
+ /* Bit 31..26: reserved */
+#define XMT_MAX_SZ_OV (1L<<25) /* Bit 25: 1024-MaxSize Tx Cnt Ov*/
+#define XMT_1023B_OV (1L<<24) /* Bit 24: 512-1023Byte Tx Cnt Ov*/
+#define XMT_511B_OV (1L<<23) /* Bit 23: 256-511 Byte Tx Cnt Ov*/
+#define XMT_255B_OV (1L<<22) /* Bit 22: 128-255 Byte Tx Cnt Ov*/
+#define XMT_127B_OV (1L<<21) /* Bit 21: 65-127 Byte Tx Cnt Ov */
+#define XMT_64B_OV (1L<<20) /* Bit 20: 64 Byte Tx Cnt Ov */
+#define XMT_UTIL_OV (1L<<19) /* Bit 19: Tx Util Cnt Overflow */
+#define XMT_UTIL_UR (1L<<18) /* Bit 18: Tx Util Cnt Underrun */
+#define XMT_CS_ERR_OV (1L<<17) /* Bit 17: Tx Carr Sen Err Cnt Ov*/
+#define XMT_FIFO_UR_OV (1L<<16) /* Bit 16: Tx FIFO Ur Ev Cnt Ov */
+#define XMT_EX_DEF_OV (1L<<15) /* Bit 15: Tx Ex Deferall Cnt Ov */
+#define XMT_DEF (1L<<14) /* Bit 14: Tx Deferred Cnt Ov */
+#define XMT_LAT_COL_OV (1L<<13) /* Bit 13: Tx Late Col Cnt Ov */
+#define XMT_ABO_COL_OV (1L<<12) /* Bit 12: Tx abo dueto Ex Col Ov*/
+#define XMT_MUL_COL_OV (1L<<11) /* Bit 11: Tx Mult Col Cnt Ov */
+#define XMT_SNG_COL (1L<<10) /* Bit 10: Tx Single Col Cnt Ov */
+#define XMT_MCTRL_OV (1L<<9) /* Bit 9: Tx MAC Ctrl Counter Ov*/
+#define XMT_MPAUSE (1L<<8) /* Bit 8: Tx Pause MAC Ctrl-F Ov*/
+#define XMT_BURST (1L<<7) /* Bit 7: Tx Burst Event Cnt Ov */
+#define XMT_LONG (1L<<6) /* Bit 6: Tx Long Frame Cnt Ov */
+#define XMT_UC_OK_OV (1L<<5) /* Bit 5: Tx Unicast Cnt Ov */
+#define XMT_MC_OK_OV (1L<<4) /* Bit 4: Tx Multicast Cnt Ov */
+#define XMT_BC_OK_OV (1L<<3) /* Bit 3: Tx Broadcast Cnt Ov */
+#define XMT_OK_LO_OV (1L<<2) /* Bit 2: Octets Tx OK Low CntOv*/
+#define XMT_OK_HI_OV (1L<<1) /* Bit 1: Octets Tx OK Hi Cnt Ov*/
+#define XMT_OK_OV (1L<<0) /* Bit 0: Frames Tx Ok Ov */
+
+#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV)
+
+/*
+ * Receive Frame Status Encoding
+ */
+#define XMR_FS_LEN (0x3fffUL<<18) /* Bit 31..18: Rx Frame Length */
+#define XMR_FS_2L_VLAN (1L<<17) /* Bit 17: tagged wh 2Lev VLAN ID*/
+#define XMR_FS_1L_VLAN (1L<<16) /* Bit 16: tagged wh 1Lev VLAN ID*/
+#define XMR_FS_BC (1L<<15) /* Bit 15: Broadcast Frame */
+#define XMR_FS_MC (1L<<14) /* Bit 14: Multicast Frame */
+#define XMR_FS_UC (1L<<13) /* Bit 13: Unicast Frame */
+ /* Bit 12: reserved */
+#define XMR_FS_BURST (1L<<11) /* Bit 11: Burst Mode */
+#define XMR_FS_CEX_ERR (1L<<10) /* Bit 10: Carrier Ext. Error */
+#define XMR_FS_802_3 (1L<<9) /* Bit 9: 802.3 Frame */
+#define XMR_FS_COL_ERR (1L<<8) /* Bit 8: Collision Error */
+#define XMR_FS_CAR_ERR (1L<<7) /* Bit 7: Carrier Event Error */
+#define XMR_FS_LEN_ERR (1L<<6) /* Bit 6: In-Range Length Error */
+#define XMR_FS_FRA_ERR (1L<<5) /* Bit 5: Framing Error */
+#define XMR_FS_RUNT (1L<<4) /* Bit 4: Runt Frame */
+#define XMR_FS_LNG_ERR (1L<<3) /* Bit 3: Giant (Jumbo) Frame */
+#define XMR_FS_FCS_ERR (1L<<2) /* Bit 2: Frame Check Sequ Err */
+#define XMR_FS_ERR (1L<<1) /* Bit 1: Frame Error */
+#define XMR_FS_MCTRL (1L<<0) /* Bit 0: MAC Control Packet */
+
+/*
+ * XMR_FS_ERR will be set if
+ * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
+ * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
+ * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
+ * XMR_FS_ERR unless the corresponding bit in the Receive Command
+ * Register is set.
+ */
+#define XMR_FS_ANY_ERR XMR_FS_ERR
+
+/*----------------------------------------------------------------------------*/
+/*
+ * XMAC-PHY Registers, indirect addressed over the XMAC
+ */
+#define PHY_XMAC_CTRL 0x00 /* 16 bit r/w PHY Control Register */
+#define PHY_XMAC_STAT 0x01 /* 16 bit r/w PHY Status Register */
+#define PHY_XMAC_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
+#define PHY_XMAC_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
+#define PHY_XMAC_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
+#define PHY_XMAC_AUNE_LP 0x05 /* 16 bit r/o Link Partner Abi Reg */
+#define PHY_XMAC_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
+#define PHY_XMAC_NEPG 0x07 /* 16 bit r/w Next Page Register */
+#define PHY_XMAC_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
+ /* 0x09 - 0x0e: reserved */
+#define PHY_XMAC_EXT_STAT 0x0f /* 16 bit r/o Ext Status Register */
+#define PHY_XMAC_RES_ABI 0x10 /* 16 bit r/o PHY Resolved Ability */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Broadcom-PHY Registers, indirect addressed over XMAC
+ */
+#define PHY_BCOM_CTRL 0x00 /* 16 bit r/w PHY Control Register */
+#define PHY_BCOM_STAT 0x01 /* 16 bit r/o PHY Status Register */
+#define PHY_BCOM_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
+#define PHY_BCOM_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
+#define PHY_BCOM_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
+#define PHY_BCOM_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */
+#define PHY_BCOM_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
+#define PHY_BCOM_NEPG 0x07 /* 16 bit r/w Next Page Register */
+#define PHY_BCOM_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
+ /* Broadcom-specific registers */
+#define PHY_BCOM_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */
+#define PHY_BCOM_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
+ /* 0x0b - 0x0e: reserved */
+#define PHY_BCOM_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */
+#define PHY_BCOM_P_EXT_CTRL 0x10 /* 16 bit r/w PHY Extended Ctrl Reg */
+#define PHY_BCOM_P_EXT_STAT 0x11 /* 16 bit r/o PHY Extended Stat Reg */
+#define PHY_BCOM_RE_CTR 0x12 /* 16 bit r/w Receive Error Counter */
+#define PHY_BCOM_FC_CTR 0x13 /* 16 bit r/w False Carrier Sense Cnt */
+#define PHY_BCOM_RNO_CTR 0x14 /* 16 bit r/w Receiver NOT_OK Cnt */
+ /* 0x15 - 0x17: reserved */
+#define PHY_BCOM_AUX_CTRL 0x18 /* 16 bit r/w Auxiliary Control Reg */
+#define PHY_BCOM_AUX_STAT 0x19 /* 16 bit r/o Auxiliary Stat Summary */
+#define PHY_BCOM_INT_STAT 0x1a /* 16 bit r/o Interrupt Status Reg */
+#define PHY_BCOM_INT_MASK 0x1b /* 16 bit r/w Interrupt Mask Reg */
+ /* 0x1c: reserved */
+ /* 0x1d - 0x1f: test registers */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+#define PHY_MARV_CTRL 0x00 /* 16 bit r/w PHY Control Register */
+#define PHY_MARV_STAT 0x01 /* 16 bit r/o PHY Status Register */
+#define PHY_MARV_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
+#define PHY_MARV_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
+#define PHY_MARV_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
+#define PHY_MARV_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */
+#define PHY_MARV_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
+#define PHY_MARV_NEPG 0x07 /* 16 bit r/w Next Page Register */
+#define PHY_MARV_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
+ /* Marvel-specific registers */
+#define PHY_MARV_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */
+#define PHY_MARV_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
+ /* 0x0b - 0x0e: reserved */
+#define PHY_MARV_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */
+#define PHY_MARV_PHY_CTRL 0x10 /* 16 bit r/w PHY Specific Ctrl Reg */
+#define PHY_MARV_PHY_STAT 0x11 /* 16 bit r/o PHY Specific Stat Reg */
+#define PHY_MARV_INT_MASK 0x12 /* 16 bit r/w Interrupt Mask Reg */
+#define PHY_MARV_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */
+#define PHY_MARV_EXT_CTRL 0x14 /* 16 bit r/w Ext. PHY Specific Ctrl */
+#define PHY_MARV_RXE_CNT 0x15 /* 16 bit r/w Receive Error Counter */
+#define PHY_MARV_EXT_ADR 0x16 /* 16 bit r/w Ext. Ad. for Cable Diag. */
+ /* 0x17: reserved */
+#define PHY_MARV_LED_CTRL 0x18 /* 16 bit r/w LED Control Reg */
+#define PHY_MARV_LED_OVER 0x19 /* 16 bit r/w Manual LED Override Reg */
+#define PHY_MARV_EXT_CTRL_2 0x1a /* 16 bit r/w Ext. PHY Specific Ctrl 2 */
+#define PHY_MARV_EXT_P_STAT 0x1b /* 16 bit r/w Ext. PHY Spec. Stat Reg */
+#define PHY_MARV_CABLE_DIAG 0x1c /* 16 bit r/o Cable Diagnostic Reg */
+ /* 0x1d - 0x1f: reserved */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Level One-PHY Registers, indirect addressed over XMAC
+ */
+#define PHY_LONE_CTRL 0x00 /* 16 bit r/w PHY Control Register */
+#define PHY_LONE_STAT 0x01 /* 16 bit r/o PHY Status Register */
+#define PHY_LONE_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
+#define PHY_LONE_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
+#define PHY_LONE_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
+#define PHY_LONE_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */
+#define PHY_LONE_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
+#define PHY_LONE_NEPG 0x07 /* 16 bit r/w Next Page Register */
+#define PHY_LONE_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
+ /* Level One-specific registers */
+#define PHY_LONE_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg*/
+#define PHY_LONE_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
+ /* 0x0b -0x0e: reserved */
+#define PHY_LONE_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */
+#define PHY_LONE_PORT_CFG 0x10 /* 16 bit r/w Port Configuration Reg*/
+#define PHY_LONE_Q_STAT 0x11 /* 16 bit r/o Quick Status Reg */
+#define PHY_LONE_INT_ENAB 0x12 /* 16 bit r/w Interrupt Enable Reg */
+#define PHY_LONE_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */
+#define PHY_LONE_LED_CFG 0x14 /* 16 bit r/w LED Configuration Reg */
+#define PHY_LONE_PORT_CTRL 0x15 /* 16 bit r/w Port Control Reg */
+#define PHY_LONE_CIM 0x16 /* 16 bit r/o CIM Reg */
+ /* 0x17 -0x1c: reserved */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * National-PHY Registers, indirect addressed over XMAC
+ */
+#define PHY_NAT_CTRL 0x00 /* 16 bit r/w PHY Control Register */
+#define PHY_NAT_STAT 0x01 /* 16 bit r/w PHY Status Register */
+#define PHY_NAT_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
+#define PHY_NAT_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
+#define PHY_NAT_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
+#define PHY_NAT_AUNE_LP 0x05 /* 16 bit r/o Link Partner Ability Reg */
+#define PHY_NAT_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
+#define PHY_NAT_NEPG 0x07 /* 16 bit r/w Next Page Register */
+#define PHY_NAT_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner Reg */
+ /* National-specific registers */
+#define PHY_NAT_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg */
+#define PHY_NAT_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
+ /* 0x0b -0x0e: reserved */
+#define PHY_NAT_EXT_STAT 0x0f /* 16 bit r/o Extended Status Register */
+#define PHY_NAT_EXT_CTRL1 0x10 /* 16 bit r/o Extended Control Reg1 */
+#define PHY_NAT_Q_STAT1 0x11 /* 16 bit r/o Quick Status Reg1 */
+#define PHY_NAT_10B_OP 0x12 /* 16 bit r/o 10Base-T Operations Reg */
+#define PHY_NAT_EXT_CTRL2 0x13 /* 16 bit r/o Extended Control Reg1 */
+#define PHY_NAT_Q_STAT2 0x14 /* 16 bit r/o Quick Status Reg2 */
+ /* 0x15 -0x18: reserved */
+#define PHY_NAT_PHY_ADDR 0x19 /* 16 bit r/o PHY Address Register */
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * PHY bit definitions
+ * Bits defined as PHY_X_..., PHY_B_..., PHY_L_... or PHY_N_... are
+ * XMAC/Broadcom/LevelOne/National/Marvell-specific.
+ * All other are general.
+ */
+
+/***** PHY_XMAC_CTRL 16 bit r/w PHY Control Register *****/
+/***** PHY_BCOM_CTRL 16 bit r/w PHY Control Register *****/
+/***** PHY_MARV_CTRL 16 bit r/w PHY Status Register *****/
+/***** PHY_LONE_CTRL 16 bit r/w PHY Control Register *****/
+#define PHY_CT_RESET (1<<15) /* Bit 15: (sc) clear all PHY related regs */
+#define PHY_CT_LOOP (1<<14) /* Bit 14: enable Loopback over PHY */
+#define PHY_CT_SPS_LSB (1<<13) /* Bit 13: (BC,L1) Speed select, lower bit */
+#define PHY_CT_ANE (1<<12) /* Bit 12: Auto-Negotiation Enabled */
+#define PHY_CT_PDOWN (1<<11) /* Bit 11: (BC,L1) Power Down Mode */
+#define PHY_CT_ISOL (1<<10) /* Bit 10: (BC,L1) Isolate Mode */
+#define PHY_CT_RE_CFG (1<<9) /* Bit 9: (sc) Restart Auto-Negotiation */
+#define PHY_CT_DUP_MD (1<<8) /* Bit 8: Duplex Mode */
+#define PHY_CT_COL_TST (1<<7) /* Bit 7: (BC,L1) Collision Test enabled */
+#define PHY_CT_SPS_MSB (1<<6) /* Bit 6: (BC,L1) Speed select, upper bit */
+ /* Bit 5..0: reserved */
+
+#define PHY_CT_SP1000 PHY_CT_SPS_MSB /* enable speed of 1000 Mbps */
+#define PHY_CT_SP100 PHY_CT_SPS_LSB /* enable speed of 100 Mbps */
+#define PHY_CT_SP10 (0) /* enable speed of 10 Mbps */
+
+
+/***** PHY_XMAC_STAT 16 bit r/w PHY Status Register *****/
+/***** PHY_BCOM_STAT 16 bit r/w PHY Status Register *****/
+/***** PHY_MARV_STAT 16 bit r/w PHY Status Register *****/
+/***** PHY_LONE_STAT 16 bit r/w PHY Status Register *****/
+ /* Bit 15..9: reserved */
+ /* (BC/L1) 100/10 Mbps cap bits ignored*/
+#define PHY_ST_EXT_ST (1<<8) /* Bit 8: Extended Status Present */
+ /* Bit 7: reserved */
+#define PHY_ST_PRE_SUP (1<<6) /* Bit 6: (BC/L1) preamble suppression */
+#define PHY_ST_AN_OVER (1<<5) /* Bit 5: Auto-Negotiation Over */
+#define PHY_ST_REM_FLT (1<<4) /* Bit 4: Remote Fault Condition Occured */
+#define PHY_ST_AN_CAP (1<<3) /* Bit 3: Auto-Negotiation Capability */
+#define PHY_ST_LSYNC (1<<2) /* Bit 2: Link Synchronized */
+#define PHY_ST_JAB_DET (1<<1) /* Bit 1: (BC/L1) Jabber Detected */
+#define PHY_ST_EXT_REG (1<<0) /* Bit 0: Extended Register available */
+
+
+/***** PHY_XMAC_ID1 16 bit r/o PHY ID1 Register */
+/***** PHY_BCOM_ID1 16 bit r/o PHY ID1 Register */
+/***** PHY_MARV_ID1 16 bit r/o PHY ID1 Register */
+/***** PHY_LONE_ID1 16 bit r/o PHY ID1 Register */
+#define PHY_I1_OUI_MSK (0x3f<<10) /* Bit 15..10: Organization Unique ID */
+#define PHY_I1_MOD_NUM (0x3f<<4) /* Bit 9.. 4: Model Number */
+#define PHY_I1_REV_MSK 0x0f /* Bit 3.. 0: Revision Number */
+
+/* different Broadcom PHY Ids */
+#define PHY_BCOM_ID1_A1 0x6041
+#define PHY_BCOM_ID1_B2 0x6043
+#define PHY_BCOM_ID1_C0 0x6044
+#define PHY_BCOM_ID1_C5 0x6047
+
+
+/***** PHY_XMAC_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
+/***** PHY_XMAC_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
+#define PHY_AN_NXT_PG (1<<15) /* Bit 15: Request Next Page */
+#define PHY_X_AN_ACK (1<<14) /* Bit 14: (ro) Acknowledge Received */
+#define PHY_X_AN_RFB (3<<12) /* Bit 13..12: Remote Fault Bits */
+ /* Bit 11.. 9: reserved */
+#define PHY_X_AN_PAUSE (3<<7) /* Bit 8.. 7: Pause Bits */
+#define PHY_X_AN_HD (1<<6) /* Bit 6: Half Duplex */
+#define PHY_X_AN_FD (1<<5) /* Bit 5: Full Duplex */
+ /* Bit 4.. 0: reserved */
+
+/***** PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
+/***** PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
+/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
+ /* Bit 14: reserved */
+#define PHY_B_AN_RF (1<<13) /* Bit 13: Remote Fault */
+ /* Bit 12: reserved */
+#define PHY_B_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */
+#define PHY_B_AN_PC (1<<10) /* Bit 10: Pause Capable */
+ /* Bit 9..5: 100/10 BT cap bits ingnored */
+#define PHY_B_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/
+
+/***** PHY_LONE_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
+/***** PHY_LONE_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
+/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
+ /* Bit 14: reserved */
+#define PHY_L_AN_RF (1<<13) /* Bit 13: Remote Fault */
+ /* Bit 12: reserved */
+#define PHY_L_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */
+#define PHY_L_AN_PC (1<<10) /* Bit 10: Pause Capable */
+ /* Bit 9..5: 100/10 BT cap bits ingnored */
+#define PHY_L_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/
+
+/***** PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
+/***** PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
+/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
+ /* Bit 14: reserved */
+#define PHY_N_AN_RF (1<<13) /* Bit 13: Remote Fault */
+ /* Bit 12: reserved */
+#define PHY_N_AN_100F (1<<11) /* Bit 11: 100Base-T2 FD Support */
+#define PHY_N_AN_100H (1<<10) /* Bit 10: 100Base-T2 HD Support */
+ /* Bit 9..5: 100/10 BT cap bits ingnored */
+#define PHY_N_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/
+
+/* field type definition for PHY_x_AN_SEL */
+#define PHY_SEL_TYPE 0x01 /* 00001 = Ethernet */
+
+/***** PHY_XMAC_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
+ /* Bit 15..4: reserved */
+#define PHY_ANE_LP_NP (1<<3) /* Bit 3: Link Partner can Next Page */
+#define PHY_ANE_LOC_NP (1<<2) /* Bit 2: Local PHY can Next Page */
+#define PHY_ANE_RX_PG (1<<1) /* Bit 1: Page Received */
+ /* Bit 0: reserved */
+
+/***** PHY_BCOM_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
+/***** PHY_LONE_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
+/***** PHY_MARV_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
+ /* Bit 15..5: reserved */
+#define PHY_ANE_PAR_DF (1<<4) /* Bit 4: Parallel Detection Fault */
+/* PHY_ANE_LP_NP (see XMAC) Bit 3: Link Partner can Next Page */
+/* PHY_ANE_LOC_NP (see XMAC) Bit 2: Local PHY can Next Page */
+/* PHY_ANE_RX_PG (see XMAC) Bit 1: Page Received */
+#define PHY_ANE_LP_CAP (1<<0) /* Bit 0: Link Partner Auto-Neg. Cap. */
+
+/***** PHY_XMAC_NEPG 16 bit r/w Next Page Register *****/
+/***** PHY_BCOM_NEPG 16 bit r/w Next Page Register *****/
+/***** PHY_LONE_NEPG 16 bit r/w Next Page Register *****/
+/***** PHY_XMAC_NEPG_LP 16 bit r/o Next Page Link Partner *****/
+/***** PHY_BCOM_NEPG_LP 16 bit r/o Next Page Link Partner *****/
+/***** PHY_LONE_NEPG_LP 16 bit r/o Next Page Link Partner *****/
+#define PHY_NP_MORE (1<<15) /* Bit 15: More, Next Pages to follow */
+#define PHY_NP_ACK1 (1<<14) /* Bit 14: (ro) Ack1, for receiving a message */
+#define PHY_NP_MSG_VAL (1<<13) /* Bit 13: Message Page valid */
+#define PHY_NP_ACK2 (1<<12) /* Bit 12: Ack2, comply with msg content */
+#define PHY_NP_TOG (1<<11) /* Bit 11: Toggle Bit, ensure sync */
+#define PHY_NP_MSG 0x07ff /* Bit 10..0: Message from/to Link Partner */
+
+/*
+ * XMAC-Specific
+ */
+/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/
+#define PHY_X_EX_FD (1<<15) /* Bit 15: Device Supports Full Duplex */
+#define PHY_X_EX_HD (1<<14) /* Bit 14: Device Supports Half Duplex */
+ /* Bit 13..0: reserved */
+
+/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/
+ /* Bit 15..9: reserved */
+#define PHY_X_RS_PAUSE (3<<7) /* Bit 8..7: selected Pause Mode */
+#define PHY_X_RS_HD (1<<6) /* Bit 6: Half Duplex Mode selected */
+#define PHY_X_RS_FD (1<<5) /* Bit 5: Full Duplex Mode selected */
+#define PHY_X_RS_ABLMIS (1<<4) /* Bit 4: duplex or pause cap mismatch */
+#define PHY_X_RS_PAUMIS (1<<3) /* Bit 3: pause capability mismatch */
+ /* Bit 2..0: reserved */
+/*
+ * Remote Fault Bits (PHY_X_AN_RFB) encoding
+ */
+#define X_RFB_OK (0<<12) /* Bit 13..12 No errors, Link OK */
+#define X_RFB_LF (1<<12) /* Bit 13..12 Link Failure */
+#define X_RFB_OFF (2<<12) /* Bit 13..12 Offline */
+#define X_RFB_AN_ERR (3<<12) /* Bit 13..12 Auto-Negotiation Error */
+
+/*
+ * Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding
+ */
+#define PHY_X_P_NO_PAUSE (0<<7) /* Bit 8..7: no Pause Mode */
+#define PHY_X_P_SYM_MD (1<<7) /* Bit 8..7: symmetric Pause Mode */
+#define PHY_X_P_ASYM_MD (2<<7) /* Bit 8..7: asymmetric Pause Mode */
+#define PHY_X_P_BOTH_MD (3<<7) /* Bit 8..7: both Pause Mode */
+
+
+/*
+ * Broadcom-Specific
+ */
+/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+#define PHY_B_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
+#define PHY_B_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */
+#define PHY_B_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */
+#define PHY_B_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */
+#define PHY_B_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
+#define PHY_B_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
+ /* Bit 7..0: reserved */
+
+/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+#define PHY_B_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */
+#define PHY_B_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */
+#define PHY_B_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */
+#define PHY_B_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */
+#define PHY_B_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */
+#define PHY_B_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */
+ /* Bit 9..8: reserved */
+#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */
+
+/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/
+#define PHY_B_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */
+#define PHY_B_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */
+#define PHY_B_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */
+#define PHY_B_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */
+ /* Bit 11..0: reserved */
+
+/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/
+#define PHY_B_PEC_MAC_PHY (1<<15) /* Bit 15: 10BIT/GMI-Interface */
+#define PHY_B_PEC_DIS_CROSS (1<<14) /* Bit 14: Disable MDI Crossover */
+#define PHY_B_PEC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */
+#define PHY_B_PEC_INT_DIS (1<<12) /* Bit 12: Interrupts Disabled */
+#define PHY_B_PEC_F_INT (1<<11) /* Bit 11: Force Interrupt */
+#define PHY_B_PEC_BY_45 (1<<10) /* Bit 10: Bypass 4B5B-Decoder */
+#define PHY_B_PEC_BY_SCR (1<<9) /* Bit 9: Bypass Scrambler */
+#define PHY_B_PEC_BY_MLT3 (1<<8) /* Bit 8: Bypass MLT3 Encoder */
+#define PHY_B_PEC_BY_RXA (1<<7) /* Bit 7: Bypass Rx Alignm. */
+#define PHY_B_PEC_RES_SCR (1<<6) /* Bit 6: Reset Scrambler */
+#define PHY_B_PEC_EN_LTR (1<<5) /* Bit 5: Ena LED Traffic Mode */
+#define PHY_B_PEC_LED_ON (1<<4) /* Bit 4: Force LED's on */
+#define PHY_B_PEC_LED_OFF (1<<3) /* Bit 3: Force LED's off */
+#define PHY_B_PEC_EX_IPG (1<<2) /* Bit 2: Extend Tx IPG Mode */
+#define PHY_B_PEC_3_LED (1<<1) /* Bit 1: Three Link LED mode */
+#define PHY_B_PEC_HIGH_LA (1<<0) /* Bit 0: GMII FIFO Elasticy */
+
+/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/
+ /* Bit 15..14: reserved */
+#define PHY_B_PES_CROSS_STAT (1<<13) /* Bit 13: MDI Crossover Status */
+#define PHY_B_PES_INT_STAT (1<<12) /* Bit 12: Interrupt Status */
+#define PHY_B_PES_RRS (1<<11) /* Bit 11: Remote Receiver Stat. */
+#define PHY_B_PES_LRS (1<<10) /* Bit 10: Local Receiver Stat. */
+#define PHY_B_PES_LOCKED (1<<9) /* Bit 9: Locked */
+#define PHY_B_PES_LS (1<<8) /* Bit 8: Link Status */
+#define PHY_B_PES_RF (1<<7) /* Bit 7: Remote Fault */
+#define PHY_B_PES_CE_ER (1<<6) /* Bit 6: Carrier Ext Error */
+#define PHY_B_PES_BAD_SSD (1<<5) /* Bit 5: Bad SSD */
+#define PHY_B_PES_BAD_ESD (1<<4) /* Bit 4: Bad ESD */
+#define PHY_B_PES_RX_ER (1<<3) /* Bit 3: Receive Error */
+#define PHY_B_PES_TX_ER (1<<2) /* Bit 2: Transmit Error */
+#define PHY_B_PES_LOCK_ER (1<<1) /* Bit 1: Lock Error */
+#define PHY_B_PES_MLT3_ER (1<<0) /* Bit 0: MLT3 code Error */
+
+/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
+ /* Bit 15..8: reserved */
+#define PHY_B_FC_CTR 0xff /* Bit 7..0: False Carrier Counter */
+
+/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/
+#define PHY_B_RC_LOC_MSK 0xff00 /* Bit 15..8: Local Rx NOT_OK cnt */
+#define PHY_B_RC_REM_MSK 0x00ff /* Bit 7..0: Remote Rx NOT_OK cnt */
+
+/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/
+#define PHY_B_AC_L_SQE (1<<15) /* Bit 15: Low Squelch */
+#define PHY_B_AC_LONG_PACK (1<<14) /* Bit 14: Rx Long Packets */
+#define PHY_B_AC_ER_CTRL (3<<12) /* Bit 13..12: Edgerate Control */
+ /* Bit 11: reserved */
+#define PHY_B_AC_TX_TST (1<<10) /* Bit 10: Tx test bit, always 1 */
+ /* Bit 9.. 8: reserved */
+#define PHY_B_AC_DIS_PRF (1<<7) /* Bit 7: dis part resp filter */
+ /* Bit 6: reserved */
+#define PHY_B_AC_DIS_PM (1<<5) /* Bit 5: dis power management */
+ /* Bit 4: reserved */
+#define PHY_B_AC_DIAG (1<<3) /* Bit 3: Diagnostic Mode */
+ /* Bit 2.. 0: reserved */
+
+/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/
+#define PHY_B_AS_AN_C (1<<15) /* Bit 15: AutoNeg complete */
+#define PHY_B_AS_AN_CA (1<<14) /* Bit 14: AN Complete Ack */
+#define PHY_B_AS_ANACK_D (1<<13) /* Bit 13: AN Ack Detect */
+#define PHY_B_AS_ANAB_D (1<<12) /* Bit 12: AN Ability Detect */
+#define PHY_B_AS_NPW (1<<11) /* Bit 11: AN Next Page Wait */
+#define PHY_B_AS_AN_RES_MSK (7<<8) /* Bit 10..8: AN HDC */
+#define PHY_B_AS_PDF (1<<7) /* Bit 7: Parallel Detect. Fault */
+#define PHY_B_AS_RF (1<<6) /* Bit 6: Remote Fault */
+#define PHY_B_AS_ANP_R (1<<5) /* Bit 5: AN Page Received */
+#define PHY_B_AS_LP_ANAB (1<<4) /* Bit 4: LP AN Ability */
+#define PHY_B_AS_LP_NPAB (1<<3) /* Bit 3: LP Next Page Ability */
+#define PHY_B_AS_LS (1<<2) /* Bit 2: Link Status */
+#define PHY_B_AS_PRR (1<<1) /* Bit 1: Pause Resolution-Rx */
+#define PHY_B_AS_PRT (1<<0) /* Bit 0: Pause Resolution-Tx */
+
+#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT)
+
+/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/
+/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
+ /* Bit 15: reserved */
+#define PHY_B_IS_PSE (1<<14) /* Bit 14: Pair Swap Error */
+#define PHY_B_IS_MDXI_SC (1<<13) /* Bit 13: MDIX Status Change */
+#define PHY_B_IS_HCT (1<<12) /* Bit 12: counter above 32k */
+#define PHY_B_IS_LCT (1<<11) /* Bit 11: counter above 128 */
+#define PHY_B_IS_AN_PR (1<<10) /* Bit 10: Page Received */
+#define PHY_B_IS_NO_HDCL (1<<9) /* Bit 9: No HCD Link */
+#define PHY_B_IS_NO_HDC (1<<8) /* Bit 8: No HCD */
+#define PHY_B_IS_NEG_USHDC (1<<7) /* Bit 7: Negotiated Unsup. HCD */
+#define PHY_B_IS_SCR_S_ER (1<<6) /* Bit 6: Scrambler Sync Error */
+#define PHY_B_IS_RRS_CHANGE (1<<5) /* Bit 5: Remote Rx Stat Change */
+#define PHY_B_IS_LRS_CHANGE (1<<4) /* Bit 4: Local Rx Stat Change */
+#define PHY_B_IS_DUP_CHANGE (1<<3) /* Bit 3: Duplex Mode Change */
+#define PHY_B_IS_LSP_CHANGE (1<<2) /* Bit 2: Link Speed Change */
+#define PHY_B_IS_LST_CHANGE (1<<1) /* Bit 1: Link Status Changed */
+#define PHY_B_IS_CRC_ER (1<<0) /* Bit 0: CRC Error */
+
+#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
+
+/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
+#define PHY_B_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */
+#define PHY_B_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */
+#define PHY_B_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */
+#define PHY_B_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */
+
+/*
+ * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
+ */
+#define PHY_B_RES_1000FD (7<<8) /* Bit 10..8: 1000Base-T Full Dup. */
+#define PHY_B_RES_1000HD (6<<8) /* Bit 10..8: 1000Base-T Half Dup. */
+/* others: 100/10: invalid for us */
+
+/*
+ * Level One-Specific
+ */
+/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+#define PHY_L_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
+#define PHY_L_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */
+#define PHY_L_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */
+#define PHY_L_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */
+#define PHY_L_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
+#define PHY_L_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
+ /* Bit 7..0: reserved */
+
+/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+#define PHY_L_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */
+#define PHY_L_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */
+#define PHY_L_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */
+#define PHY_L_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */
+#define PHY_L_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */
+#define PHY_L_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */
+ /* Bit 9..8: reserved */
+#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */
+
+/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/
+#define PHY_L_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */
+#define PHY_L_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */
+#define PHY_L_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */
+#define PHY_L_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */
+ /* Bit 11..0: reserved */
+
+/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/
+#define PHY_L_PC_REP_MODE (1<<15) /* Bit 15: Repeater Mode */
+ /* Bit 14: reserved */
+#define PHY_L_PC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */
+#define PHY_L_PC_BY_SCR (1<<12) /* Bit 12: Bypass Scrambler */
+#define PHY_L_PC_BY_45 (1<<11) /* Bit 11: Bypass 4B5B-Decoder */
+#define PHY_L_PC_JAB_DIS (1<<10) /* Bit 10: Jabber Disabled */
+#define PHY_L_PC_SQE (1<<9) /* Bit 9: Enable Heartbeat */
+#define PHY_L_PC_TP_LOOP (1<<8) /* Bit 8: TP Loopback */
+#define PHY_L_PC_SSS (1<<7) /* Bit 7: Smart Speed Selection */
+#define PHY_L_PC_FIFO_SIZE (1<<6) /* Bit 6: FIFO Size */
+#define PHY_L_PC_PRE_EN (1<<5) /* Bit 5: Preamble Enable */
+#define PHY_L_PC_CIM (1<<4) /* Bit 4: Carrier Integrity Mon */
+#define PHY_L_PC_10_SER (1<<3) /* Bit 3: Use Serial Output */
+#define PHY_L_PC_ANISOL (1<<2) /* Bit 2: Unisolate Port */
+#define PHY_L_PC_TEN_BIT (1<<1) /* Bit 1: 10bit iface mode on */
+#define PHY_L_PC_ALTCLOCK (1<<0) /* Bit 0: (ro) ALTCLOCK Mode on */
+
+/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/
+#define PHY_L_QS_D_RATE (3<<14) /* Bit 15..14: Data Rate */
+#define PHY_L_QS_TX_STAT (1<<13) /* Bit 13: Transmitting */
+#define PHY_L_QS_RX_STAT (1<<12) /* Bit 12: Receiving */
+#define PHY_L_QS_COL_STAT (1<<11) /* Bit 11: Collision */
+#define PHY_L_QS_L_STAT (1<<10) /* Bit 10: Link is up */
+#define PHY_L_QS_DUP_MOD (1<<9) /* Bit 9: Full/Half Duplex */
+#define PHY_L_QS_AN (1<<8) /* Bit 8: AutoNeg is On */
+#define PHY_L_QS_AN_C (1<<7) /* Bit 7: AN is Complete */
+#define PHY_L_QS_LLE (7<<4) /* Bit 6: Line Length Estim. */
+#define PHY_L_QS_PAUSE (1<<3) /* Bit 3: LP advertised Pause */
+#define PHY_L_QS_AS_PAUSE (1<<2) /* Bit 2: LP adv. asym. Pause */
+#define PHY_L_QS_ISOLATE (1<<1) /* Bit 1: CIM Isolated */
+#define PHY_L_QS_EVENT (1<<0) /* Bit 0: Event has occurred */
+
+/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/
+/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/
+ /* Bit 15..14: reserved */
+#define PHY_L_IS_AN_F (1<<13) /* Bit 13: Auto-Negotiation fault */
+ /* Bit 12: not described */
+#define PHY_L_IS_CROSS (1<<11) /* Bit 11: Crossover used */
+#define PHY_L_IS_POL (1<<10) /* Bit 10: Polarity correct. used */
+#define PHY_L_IS_SS (1<<9) /* Bit 9: Smart Speed Downgrade */
+#define PHY_L_IS_CFULL (1<<8) /* Bit 8: Counter Full */
+#define PHY_L_IS_AN_C (1<<7) /* Bit 7: AutoNeg Complete */
+#define PHY_L_IS_SPEED (1<<6) /* Bit 6: Speed Changed */
+#define PHY_L_IS_DUP (1<<5) /* Bit 5: Duplex Changed */
+#define PHY_L_IS_LS (1<<4) /* Bit 4: Link Status Changed */
+#define PHY_L_IS_ISOL (1<<3) /* Bit 3: Isolate Occured */
+#define PHY_L_IS_MDINT (1<<2) /* Bit 2: (ro) STAT: MII Int Pending */
+#define PHY_L_IS_INTEN (1<<1) /* Bit 1: ENAB: Enable IRQs */
+#define PHY_L_IS_FORCE (1<<0) /* Bit 0: ENAB: Force Interrupt */
+
+/* int. mask */
+#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN)
+
+/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/
+#define PHY_L_LC_LEDC (3<<14) /* Bit 15..14: Col/Blink/On/Off */
+#define PHY_L_LC_LEDR (3<<12) /* Bit 13..12: Rx/Blink/On/Off */
+#define PHY_L_LC_LEDT (3<<10) /* Bit 11..10: Tx/Blink/On/Off */
+#define PHY_L_LC_LEDG (3<<8) /* Bit 9..8: Giga/Blink/On/Off */
+#define PHY_L_LC_LEDS (3<<6) /* Bit 7..6: 10-100/Blink/On/Off */
+#define PHY_L_LC_LEDL (3<<4) /* Bit 5..4: Link/Blink/On/Off */
+#define PHY_L_LC_LEDF (3<<2) /* Bit 3..2: Duplex/Blink/On/Off */
+#define PHY_L_LC_PSTRECH (1<<1) /* Bit 1: Strech LED Pulses */
+#define PHY_L_LC_FREQ (1<<0) /* Bit 0: 30/100 ms */
+
+/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/
+#define PHY_L_PC_TX_TCLK (1<<15) /* Bit 15: Enable TX_TCLK */
+ /* Bit 14: reserved */
+#define PHY_L_PC_ALT_NP (1<<13) /* Bit 14: Alternate Next Page */
+#define PHY_L_PC_GMII_ALT (1<<12) /* Bit 13: Alternate GMII driver */
+ /* Bit 11: reserved */
+#define PHY_L_PC_TEN_CRS (1<<10) /* Bit 10: Extend CRS*/
+ /* Bit 9..0: not described */
+
+/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/
+#define PHY_L_CIM_ISOL (255<<8)/* Bit 15..8: Isolate Count */
+#define PHY_L_CIM_FALSE_CAR (255<<0)/* Bit 7..0: False Carrier Count */
+
+
+/*
+ * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding
+ */
+#define PHY_L_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */
+#define PHY_L_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */
+#define PHY_L_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */
+#define PHY_L_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */
+
+
+/*
+ * National-Specific
+ */
+/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+#define PHY_N_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
+#define PHY_N_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */
+#define PHY_N_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */
+#define PHY_N_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */
+#define PHY_N_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
+#define PHY_N_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
+#define PHY_N_1000C_APC (1<<7) /* Bit 7: Asymmetric Pause Cap. */
+ /* Bit 6..0: reserved */
+
+/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+#define PHY_N_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */
+#define PHY_N_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */
+#define PHY_N_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */
+#define PHY_N_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/
+#define PHY_N_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */
+#define PHY_N_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */
+#define PHY_N_1000C_LP_APC (1<<9) /* Bit 9: LP Asym. Pause Cap. */
+ /* Bit 8: reserved */
+#define PHY_N_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */
+
+/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/
+#define PHY_N_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */
+#define PHY_N_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */
+#define PHY_N_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */
+#define PHY_N_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */
+ /* Bit 11..0: reserved */
+
+/* todo: those are still missing */
+/***** PHY_NAT_EXT_CTRL1 16 bit r/o Extended Control Reg1 *****/
+/***** PHY_NAT_Q_STAT1 16 bit r/o Quick Status Reg1 *****/
+/***** PHY_NAT_10B_OP 16 bit r/o 10Base-T Operations Reg *****/
+/***** PHY_NAT_EXT_CTRL2 16 bit r/o Extended Control Reg1 *****/
+/***** PHY_NAT_Q_STAT2 16 bit r/o Quick Status Reg2 *****/
+/***** PHY_NAT_PHY_ADDR 16 bit r/o PHY Address Register *****/
+
+/*
+ * Marvell-Specific
+ */
+/***** PHY_MARV_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
+/***** PHY_MARV_AUNE_LP 16 bit r/w Link Part Ability Reg *****/
+#define PHY_M_AN_NXT_PG BIT_15 /* Request Next Page */
+#define PHY_M_AN_ACK BIT_14 /* (ro) Acknowledge Received */
+#define PHY_M_AN_RF BIT_13 /* Remote Fault */
+ /* Bit 12: reserved */
+#define PHY_M_AN_ASP BIT_11 /* Asymmetric Pause */
+#define PHY_M_AN_PC BIT_10 /* MAC Pause implemented */
+#define PHY_M_AN_100_FD BIT_8 /* Advertise 100Base-TX Full Duplex */
+#define PHY_M_AN_100_HD BIT_7 /* Advertise 100Base-TX Half Duplex */
+#define PHY_M_AN_10_FD BIT_6 /* Advertise 10Base-TX Full Duplex */
+#define PHY_M_AN_10_HD BIT_5 /* Advertise 10Base-TX Half Duplex */
+
+/* special defines for FIBER (88E1011S only) */
+#define PHY_M_AN_ASP_X BIT_8 /* Asymmetric Pause */
+#define PHY_M_AN_PC_X BIT_7 /* MAC Pause implemented */
+#define PHY_M_AN_1000X_AHD BIT_6 /* Advertise 10000Base-X Half Duplex */
+#define PHY_M_AN_1000X_AFD BIT_5 /* Advertise 10000Base-X Full Duplex */
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+#define PHY_M_P_NO_PAUSE_X (0<<7) /* Bit 8.. 7: no Pause Mode */
+#define PHY_M_P_SYM_MD_X (1<<7) /* Bit 8.. 7: symmetric Pause Mode */
+#define PHY_M_P_ASYM_MD_X (2<<7) /* Bit 8.. 7: asymmetric Pause Mode */
+#define PHY_M_P_BOTH_MD_X (3<<7) /* Bit 8.. 7: both Pause Mode */
+
+/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+#define PHY_M_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
+#define PHY_M_1000C_MSE (1<<12) /* Bit 12: Manual Master/Slave Enable */
+#define PHY_M_1000C_MSC (1<<11) /* Bit 11: M/S Configuration (1=Master) */
+#define PHY_M_1000C_MPD (1<<10) /* Bit 10: Multi-Port Device */
+#define PHY_M_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
+#define PHY_M_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
+ /* Bit 7..0: reserved */
+
+/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
+#define PHY_M_PC_TX_FFD_MSK (3<<14) /* Bit 15..14: Tx FIFO Depth Mask */
+#define PHY_M_PC_RX_FFD_MSK (3<<12) /* Bit 13..12: Rx FIFO Depth Mask */
+#define PHY_M_PC_ASS_CRS_TX (1<<11) /* Bit 11: Assert CRS on Transmit */
+#define PHY_M_PC_FL_GOOD (1<<10) /* Bit 10: Force Link Good */
+#define PHY_M_PC_EN_DET_MSK (3<<8) /* Bit 9.. 8: Energy Detect Mask */
+#define PHY_M_PC_ENA_EXT_D (1<<7) /* Bit 7: Enable Ext. Distance (10BT) */
+#define PHY_M_PC_MDIX_MSK (3<<5) /* Bit 6.. 5: MDI/MDIX Config. Mask */
+#define PHY_M_PC_DIS_125CLK (1<<4) /* Bit 4: Disable 125 CLK */
+#define PHY_M_PC_MAC_POW_UP (1<<3) /* Bit 3: MAC Power up */
+#define PHY_M_PC_SQE_T_ENA (1<<2) /* Bit 2: SQE Test Enabled */
+#define PHY_M_PC_POL_R_DIS (1<<1) /* Bit 1: Polarity Reversal Disabled */
+#define PHY_M_PC_DIS_JABBER (1<<0) /* Bit 0: Disable Jabber */
+
+#define PHY_M_PC_EN_DET SHIFT8(2) /* Energy Detect (Mode 1) */
+#define PHY_M_PC_EN_DET_PLUS SHIFT8(3) /* Energy Detect Plus (Mode 2) */
+
+#define PHY_M_PC_MDI_XMODE(x) SHIFT5(x)
+#define PHY_M_PC_MAN_MDI 0 /* 00 = Manual MDI configuration */
+#define PHY_M_PC_MAN_MDIX 1 /* 01 = Manual MDIX configuration */
+#define PHY_M_PC_ENA_AUTO 3 /* 11 = Enable Automatic Crossover */
+
+/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
+#define PHY_M_PS_SPEED_MSK (3<<14) /* Bit 15..14: Speed Mask */
+#define PHY_M_PS_SPEED_1000 (1<<15) /* 10 = 1000 Mbps */
+#define PHY_M_PS_SPEED_100 (1<<14) /* 01 = 100 Mbps */
+#define PHY_M_PS_SPEED_10 0 /* 00 = 10 Mbps */
+#define PHY_M_PS_FULL_DUP (1<<13) /* Bit 13: Full Duplex */
+#define PHY_M_PS_PAGE_REC (1<<12) /* Bit 12: Page Received */
+#define PHY_M_PS_SPDUP_RES (1<<11) /* Bit 11: Speed & Duplex Resolved */
+#define PHY_M_PS_LINK_UP (1<<10) /* Bit 10: Link Up */
+#define PHY_M_PS_CABLE_MSK (3<<7) /* Bit 9.. 7: Cable Length Mask */
+#define PHY_M_PS_MDI_X_STAT (1<<6) /* Bit 6: MDI Crossover Stat (1=MDIX) */
+#define PHY_M_PS_DOWNS_STAT (1<<5) /* Bit 5: Downshift Status (1=downsh.) */
+#define PHY_M_PS_ENDET_STAT (1<<4) /* Bit 4: Energy Detect Status (1=act) */
+#define PHY_M_PS_TX_P_EN (1<<3) /* Bit 3: Tx Pause Enabled */
+#define PHY_M_PS_RX_P_EN (1<<2) /* Bit 2: Rx Pause Enabled */
+#define PHY_M_PS_POL_REV (1<<1) /* Bit 1: Polarity Reversed */
+#define PHY_M_PC_JABBER (1<<0) /* Bit 0: Jabber */
+
+#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/***** PHY_MARV_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
+/***** PHY_MARV_INT_STAT 16 bit r/o Interrupt Status Reg *****/
+#define PHY_M_IS_AN_ERROR (1<<15) /* Bit 15: Auto-Negotiation Error */
+#define PHY_M_IS_LSP_CHANGE (1<<14) /* Bit 14: Link Speed Changed */
+#define PHY_M_IS_DUP_CHANGE (1<<13) /* Bit 13: Duplex Mode Changed */
+#define PHY_M_IS_AN_PR (1<<12) /* Bit 12: Page Received */
+#define PHY_M_IS_AN_COMPL (1<<11) /* Bit 11: Auto-Negotiation Completed */
+#define PHY_M_IS_LST_CHANGE (1<<10) /* Bit 10: Link Status Changed */
+#define PHY_M_IS_SYMB_ERROR (1<<9) /* Bit 9: Symbol Error */
+#define PHY_M_IS_FALSE_CARR (1<<8) /* Bit 8: False Carrier */
+#define PHY_M_IS_FIFO_ERROR (1<<7) /* Bit 7: FIFO Overflow/Underrun Error */
+#define PHY_M_IS_MDI_CHANGE (1<<6) /* Bit 6: MDI Crossover Changed */
+#define PHY_M_IS_DOWNSH_DET (1<<5) /* Bit 5: Downshift Detected */
+#define PHY_M_IS_END_CHANGE (1<<4) /* Bit 4: Energy Detect Changed */
+ /* Bit 3..2: reserved */
+#define PHY_M_IS_POL_CHANGE (1<<1) /* Bit 1: Polarity Changed */
+#define PHY_M_IS_JABBER (1<<0) /* Bit 0: Jabber */
+
+#define PHY_M_DEF_MSK (PHY_M_IS_AN_ERROR | PHY_M_IS_AN_PR | \
+ PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR)
+
+/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
+#define PHY_M_EC_M_DSC_MSK (3<<10) /* Bit 11..10: Master downshift counter */
+#define PHY_M_EC_S_DSC_MSK (3<<8) /* Bit 9.. 8: Slave downshift counter */
+#define PHY_M_EC_MAC_S_MSK (7<<4) /* Bit 6.. 4: Def. MAC interface speed */
+#define PHY_M_EC_FIB_AN_ENA (1<<3) /* Bit 3: Fiber Auto-Neg. Enable */
+
+#define PHY_M_EC_M_DSC(x) SHIFT10(x) /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x) SHIFT8(x) /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_MAC_S(x) SHIFT4(x) /* 01X=0; 110=2.5; 111=25 (MHz) */
+
+#define MAC_TX_CLK_0_MHZ 2
+#define MAC_TX_CLK_2_5_MHZ 6
+#define MAC_TX_CLK_25_MHZ 7
+
+/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
+#define PHY_M_LEDC_DIS_LED (1<<15) /* Bit 15: Disable LED */
+#define PHY_M_LEDC_PULS_MSK (7<<12) /* Bit 14..12: Pulse Stretch Mask */
+#define PHY_M_LEDC_F_INT (1<<11) /* Bit 11: Force Interrupt */
+#define PHY_M_LEDC_BL_R_MSK (7<<8) /* Bit 10.. 8: Blink Rate Mask */
+ /* Bit 7.. 5: reserved */
+#define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4.. 3: Link Control Mask */
+#define PHY_M_LEDC_DP_CTRL (1<<2) /* Bit 2: Duplex Control */
+#define PHY_M_LEDC_RX_CTRL (1<<1) /* Bit 1: Rx activity / Link */
+#define PHY_M_LEDC_TX_CTRL (1<<0) /* Bit 0: Tx activity / Link */
+
+#define PHY_M_LED_PULS_DUR(x) SHIFT12(x) /* Pulse Stretch Duration */
+
+#define PULS_NO_STR 0 /* no pulse stretching */
+#define PULS_21MS 1 /* 21 ms to 42 ms */
+#define PULS_42MS 2 /* 42 ms to 84 ms */
+#define PULS_84MS 3 /* 84 ms to 170 ms */
+#define PULS_170MS 4 /* 170 ms to 340 ms */
+#define PULS_340MS 5 /* 340 ms to 670 ms */
+#define PULS_670MS 6 /* 670 ms to 1.3 s */
+#define PULS_1300MS 7 /* 1.3 s to 2.7 s */
+
+#define PHY_M_LED_BLINK_RT(x) SHIFT8(x) /* Blink Rate */
+
+#define BLINK_42MS 0 /* 42 ms */
+#define BLINK_84MS 1 /* 84 ms */
+#define BLINK_170MS 2 /* 170 ms */
+#define BLINK_340MS 3 /* 340 ms */
+#define BLINK_670MS 4 /* 670 ms */
+ /* values 5 - 7: reserved */
+
+/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
+#define PHY_M_LED_MO_DUP(x) SHIFT10(x) /* Bit 11..10: Duplex */
+#define PHY_M_LED_MO_10(x) SHIFT8(x) /* Bit 9.. 8: Link 10 */
+#define PHY_M_LED_MO_100(x) SHIFT6(x) /* Bit 7.. 6: Link 100 */
+#define PHY_M_LED_MO_1000(x) SHIFT4(x) /* Bit 5.. 4: Link 1000 */
+#define PHY_M_LED_MO_RX(x) SHIFT2(x) /* Bit 3.. 2: Rx */
+#define PHY_M_LED_MO_TX(x) SHIFT0(x) /* Bit 1.. 0: Tx */
+
+#define MO_LED_NORM 0
+#define MO_LED_BLINK 1
+#define MO_LED_OFF 2
+#define MO_LED_ON 3
+
+/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
+ /* Bit 15.. 7: reserved */
+#define PHY_M_EC2_FI_IMPED (1<<6) /* Bit 6: Fiber Input Impedance */
+#define PHY_M_EC2_FO_IMPED (1<<5) /* Bit 5: Fiber Output Impedance */
+#define PHY_M_EC2_FO_M_CLK (1<<4) /* Bit 4: Fiber Mode Clock Enable */
+#define PHY_M_EC2_FO_BOOST (1<<3) /* Bit 3: Fiber Output Boost */
+#define PHY_M_EC2_FO_AM_MSK 7 /* Bit 2.. 0: Fiber Output Amplitude */
+
+/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
+#define PHY_M_FC_AUTO_SEL (1<<15) /* Bit 15: Fiber/Copper Auto Sel. dis. */
+#define PHY_M_FC_AN_REG_ACC (1<<14) /* Bit 14: Fiber/Copper Autoneg. reg acc */
+#define PHY_M_FC_RESULUTION (1<<13) /* Bit 13: Fiber/Copper Resulution */
+#define PHY_M_SER_IF_AN_BP (1<<12) /* Bit 12: Ser IF autoneg. bypass enable */
+#define PHY_M_SER_IF_BP_ST (1<<11) /* Bit 11: Ser IF autoneg. bypass status */
+#define PHY_M_IRQ_POLARITY (1<<10) /* Bit 10: IRQ polarity */
+ /* Bit 9..4: reserved */
+#define PHY_M_UNDOC1 (1<< 7) /* undocumented bit !! */
+#define PHY_M_MODE_MASK (0xf<<0)/* Bit 3..0: copy of HWCFG MODE[3:0] */
+
+
+/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/
+#define PHY_M_CABD_ENA_TEST (1<<15) /* Bit 15: Enable Test */
+#define PHY_M_CABD_STAT_MSK (3<<13) /* Bit 14..13: Status */
+ /* Bit 12.. 8: reserved */
+#define PHY_M_CABD_DIST_MSK 0xff /* Bit 7.. 0: Distance */
+
+/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
+#define CABD_STAT_NORMAL 0
+#define CABD_STAT_SHORT 1
+#define CABD_STAT_OPEN 2
+#define CABD_STAT_FAIL 3
+
+
+/*
+ * GMAC registers
+ *
+ * The GMAC registers are 16 or 32 bits wide.
+ * The GMACs host processor interface is 16 bits wide,
+ * therefore ALL registers will be addressed with 16 bit accesses.
+ *
+ * The following macros are provided to access the GMAC registers
+ * GM_IN16(), GM_OUT16, GM_IN32(), GM_OUT32(), GM_INADR(), GM_OUTADR(),
+ * GM_INHASH(), and GM_OUTHASH().
+ * The macros are defined in SkGeHw.h.
+ *
+ * Note: NA reg = Network Address e.g DA, SA etc.
+ *
+ */
+
+/* Port Registers */
+#define GM_GP_STAT 0x0000 /* 16 bit r/o General Purpose Status */
+#define GM_GP_CTRL 0x0004 /* 16 bit r/w General Purpose Control */
+#define GM_TX_CTRL 0x0008 /* 16 bit r/w Transmit Control Reg. */
+#define GM_RX_CTRL 0x000c /* 16 bit r/w Receive Control Reg. */
+#define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow-Control */
+#define GM_TX_PARAM 0x0014 /* 16 bit r/w Transmit Parameter Reg. */
+#define GM_SERIAL_MODE 0x0018 /* 16 bit r/w Serial Mode Register */
+
+/* Source Address Registers */
+#define GM_SRC_ADDR_1L 0x001c /* 16 bit r/w Source Address 1 (low) */
+#define GM_SRC_ADDR_1M 0x0020 /* 16 bit r/w Source Address 1 (middle) */
+#define GM_SRC_ADDR_1H 0x0024 /* 16 bit r/w Source Address 1 (high) */
+#define GM_SRC_ADDR_2L 0x0028 /* 16 bit r/w Source Address 2 (low) */
+#define GM_SRC_ADDR_2M 0x002c /* 16 bit r/w Source Address 2 (middle) */
+#define GM_SRC_ADDR_2H 0x0030 /* 16 bit r/w Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+#define GM_MC_ADDR_H1 0x0034 /* 16 bit r/w Multicast Address Hash 1 */
+#define GM_MC_ADDR_H2 0x0038 /* 16 bit r/w Multicast Address Hash 2 */
+#define GM_MC_ADDR_H3 0x003c /* 16 bit r/w Multicast Address Hash 3 */
+#define GM_MC_ADDR_H4 0x0040 /* 16 bit r/w Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+#define GM_TX_IRQ_SRC 0x0044 /* 16 bit r/o Tx Overflow IRQ Source */
+#define GM_RX_IRQ_SRC 0x0048 /* 16 bit r/o Rx Overflow IRQ Source */
+#define GM_TR_IRQ_SRC 0x004c /* 16 bit r/o Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+#define GM_TX_IRQ_MSK 0x0050 /* 16 bit r/w Tx Overflow IRQ Mask */
+#define GM_RX_IRQ_MSK 0x0054 /* 16 bit r/w Rx Overflow IRQ Mask */
+#define GM_TR_IRQ_MSK 0x0058 /* 16 bit r/w Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+#define GM_SMI_CTRL 0x0080 /* 16 bit r/w SMI Control Register */
+#define GM_SMI_DATA 0x0084 /* 16 bit r/w SMI Data Register */
+#define GM_PHY_ADDR 0x0088 /* 16 bit r/w GPHY Address Register */
+
+/* MIB Counters */
+#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
+#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word (32 bit r/o)
+ */
+#define GM_RXF_UC_OK \
+ (GM_MIB_CNT_BASE + 0) /* Unicast Frames Received OK */
+#define GM_RXF_BC_OK \
+ (GM_MIB_CNT_BASE + 8) /* Broadcast Frames Received OK */
+#define GM_RXF_MPAUSE \
+ (GM_MIB_CNT_BASE + 16) /* Pause MAC Ctrl Frames Received */
+#define GM_RXF_MC_OK \
+ (GM_MIB_CNT_BASE + 24) /* Multicast Frames Received OK */
+#define GM_RXF_FCS_ERR \
+ (GM_MIB_CNT_BASE + 32) /* Rx Frame Check Seq. Error */
+ /* GM_MIB_CNT_BASE + 40: reserved */
+#define GM_RXO_OK_LO \
+ (GM_MIB_CNT_BASE + 48) /* Octets Received OK Low */
+#define GM_RXO_OK_HI \
+ (GM_MIB_CNT_BASE + 56) /* Octets Received OK High */
+#define GM_RXO_ERR_LO \
+ (GM_MIB_CNT_BASE + 64) /* Octets Received Invalid Low */
+#define GM_RXO_ERR_HI \
+ (GM_MIB_CNT_BASE + 72) /* Octets Received Invalid High */
+#define GM_RXF_SHT \
+ (GM_MIB_CNT_BASE + 80) /* Frames <64 Byte Received OK */
+#define GM_RXE_FRAG \
+ (GM_MIB_CNT_BASE + 88) /* Frames <64 Byte Received with FCS Err */
+#define GM_RXF_64B \
+ (GM_MIB_CNT_BASE + 96) /* 64 Byte Rx Frame */
+#define GM_RXF_127B \
+ (GM_MIB_CNT_BASE + 104) /* 65-127 Byte Rx Frame */
+#define GM_RXF_255B \
+ (GM_MIB_CNT_BASE + 112) /* 128-255 Byte Rx Frame */
+#define GM_RXF_511B \
+ (GM_MIB_CNT_BASE + 120) /* 256-511 Byte Rx Frame */
+#define GM_RXF_1023B \
+ (GM_MIB_CNT_BASE + 128) /* 512-1023 Byte Rx Frame */
+#define GM_RXF_1518B \
+ (GM_MIB_CNT_BASE + 136) /* 1024-1518 Byte Rx Frame */
+#define GM_RXF_MAX_SZ \
+ (GM_MIB_CNT_BASE + 144) /* 1519-MaxSize Byte Rx Frame */
+#define GM_RXF_LNG_ERR \
+ (GM_MIB_CNT_BASE + 152) /* Rx Frame too Long Error */
+#define GM_RXF_JAB_PKT \
+ (GM_MIB_CNT_BASE + 160) /* Rx Jabber Packet Frame */
+ /* GM_MIB_CNT_BASE + 168: reserved */
+#define GM_RXE_FIFO_OV \
+ (GM_MIB_CNT_BASE + 176) /* Rx FIFO overflow Event */
+ /* GM_MIB_CNT_BASE + 184: reserved */
+#define GM_TXF_UC_OK \
+ (GM_MIB_CNT_BASE + 192) /* Unicast Frames Xmitted OK */
+#define GM_TXF_BC_OK \
+ (GM_MIB_CNT_BASE + 200) /* Broadcast Frames Xmitted OK */
+#define GM_TXF_MPAUSE \
+ (GM_MIB_CNT_BASE + 208) /* Pause MAC Ctrl Frames Xmitted */
+#define GM_TXF_MC_OK \
+ (GM_MIB_CNT_BASE + 216) /* Multicast Frames Xmitted OK */
+#define GM_TXO_OK_LO \
+ (GM_MIB_CNT_BASE + 224) /* Octets Transmitted OK Low */
+#define GM_TXO_OK_HI \
+ (GM_MIB_CNT_BASE + 232) /* Octets Transmitted OK High */
+#define GM_TXF_64B \
+ (GM_MIB_CNT_BASE + 240) /* 64 Byte Tx Frame */
+#define GM_TXF_127B \
+ (GM_MIB_CNT_BASE + 248) /* 65-127 Byte Tx Frame */
+#define GM_TXF_255B \
+ (GM_MIB_CNT_BASE + 256) /* 128-255 Byte Tx Frame */
+#define GM_TXF_511B \
+ (GM_MIB_CNT_BASE + 264) /* 256-511 Byte Tx Frame */
+#define GM_TXF_1023B \
+ (GM_MIB_CNT_BASE + 272) /* 512-1023 Byte Tx Frame */
+#define GM_TXF_1518B \
+ (GM_MIB_CNT_BASE + 280) /* 1024-1518 Byte Tx Frame */
+#define GM_TXF_MAX_SZ \
+ (GM_MIB_CNT_BASE + 288) /* 1519-MaxSize Byte Tx Frame */
+ /* GM_MIB_CNT_BASE + 296: reserved */
+#define GM_TXF_COL \
+ (GM_MIB_CNT_BASE + 304) /* Tx Collision */
+#define GM_TXF_LAT_COL \
+ (GM_MIB_CNT_BASE + 312) /* Tx Late Collision */
+#define GM_TXF_ABO_COL \
+ (GM_MIB_CNT_BASE + 320) /* Tx aborted due to Exces. Col. */
+#define GM_TXF_MUL_COL \
+ (GM_MIB_CNT_BASE + 328) /* Tx Multiple Collision */
+#define GM_TXF_SNG_COL \
+ (GM_MIB_CNT_BASE + 336) /* Tx Single Collision */
+#define GM_TXE_FIFO_UR \
+ (GM_MIB_CNT_BASE + 344) /* Tx FIFO Underrun Event */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * GMAC Bit Definitions
+ *
+ * If the bit access behaviour differs from the register access behaviour
+ * (r/w, r/o) this is documented after the bit number.
+ * The following bit access behaviours are used:
+ * (sc) self clearing
+ * (r/o) read only
+ */
+
+/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
+#define GM_GPSR_SPEED (1<<15) /* Bit 15: Port Speed (1 = 100 Mbps) */
+#define GM_GPSR_DUPLEX (1<<14) /* Bit 14: Duplex Mode (1 = Full) */
+#define GM_GPSR_FC_TX_DIS (1<<13) /* Bit 13: Tx Flow-Control Mode Disabled */
+#define GM_GPSR_LINK_UP (1<<12) /* Bit 12: Link Up Status */
+#define GM_GPSR_PAUSE (1<<11) /* Bit 11: Pause State */
+#define GM_GPSR_TX_ACTIVE (1<<10) /* Bit 10: Tx in Progress */
+#define GM_GPSR_EXC_COL (1<<9) /* Bit 9: Excessive Collisions Occured */
+#define GM_GPSR_LAT_COL (1<<8) /* Bit 8: Late Collisions Occured */
+ /* Bit 7..6: reserved */
+#define GM_GPSR_PHY_ST_CH (1<<5) /* Bit 5: PHY Status Change */
+#define GM_GPSR_GIG_SPEED (1<<4) /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
+#define GM_GPSR_PART_MODE (1<<3) /* Bit 3: Partition mode */
+#define GM_GPSR_FC_RX_DIS (1<<2) /* Bit 2: Rx Flow-Control Mode Disabled */
+#define GM_GPSR_PROM_EN (1<<1) /* Bit 1: Promiscuous Mode Enabled */
+ /* Bit 0: reserved */
+
+/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
+ /* Bit 15: reserved */
+#define GM_GPCR_PROM_ENA (1<<14) /* Bit 14: Enable Promiscuous Mode */
+#define GM_GPCR_FC_TX_DIS (1<<13) /* Bit 13: Disable Tx Flow-Control Mode */
+#define GM_GPCR_TX_ENA (1<<12) /* Bit 12: Enable Transmit */
+#define GM_GPCR_RX_ENA (1<<11) /* Bit 11: Enable Receive */
+#define GM_GPCR_BURST_ENA (1<<10) /* Bit 10: Enable Burst Mode */
+#define GM_GPCR_LOOP_ENA (1<<9) /* Bit 9: Enable MAC Loopback Mode */
+#define GM_GPCR_PART_ENA (1<<8) /* Bit 8: Enable Partition Mode */
+#define GM_GPCR_GIGS_ENA (1<<7) /* Bit 7: Gigabit Speed (1000 Mbps) */
+#define GM_GPCR_FL_PASS (1<<6) /* Bit 6: Force Link Pass */
+#define GM_GPCR_DUP_FULL (1<<5) /* Bit 5: Full Duplex Mode */
+#define GM_GPCR_FC_RX_DIS (1<<4) /* Bit 4: Disable Rx Flow-Control Mode */
+#define GM_GPCR_SPEED_100 (1<<3) /* Bit 3: Port Speed 100 Mbps */
+#define GM_GPCR_AU_DUP_DIS (1<<2) /* Bit 2: Disable Auto-Update Duplex */
+#define GM_GPCR_AU_FCT_DIS (1<<1) /* Bit 1: Disable Auto-Update Flow-C. */
+#define GM_GPCR_AU_SPD_DIS (1<<0) /* Bit 0: Disable Auto-Update Speed */
+
+#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |\
+ GM_GPCR_AU_SPD_DIS)
+
+/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
+#define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */
+#define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */
+#define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */
+#define GM_TXCR_COL_THR_MSK (1<<10) /* Bit 12..10: Collision Threshold */
+
+#define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK)
+
+#define TX_COL_DEF 0x04
+
+/* GM_RX_CTRL 16 bit r/w Receive Control Register */
+#define GM_RXCR_UCF_ENA (1<<15) /* Bit 15: Enable Unicast filtering */
+#define GM_RXCR_MCF_ENA (1<<14) /* Bit 14: Enable Multicast filtering */
+#define GM_RXCR_CRC_DIS (1<<13) /* Bit 13: Remove 4-byte CRC */
+#define GM_RXCR_PASS_FC (1<<12) /* Bit 12: Pass FC packets to FIFO */
+
+/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
+#define GM_TXPA_JAMLEN_MSK (0x03<<14) /* Bit 15..14: Jam Length */
+#define GM_TXPA_JAMIPG_MSK (0x1f<<9) /* Bit 13..9: Jam IPG */
+#define GM_TXPA_JAMDAT_MSK (0x1f<<4) /* Bit 8..4: IPG Jam to Data */
+ /* Bit 3..0: reserved */
+
+#define TX_JAM_LEN_VAL(x) (SHIFT14(x) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x) (SHIFT9(x) & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x) (SHIFT4(x) & GM_TXPA_JAMDAT_MSK)
+
+#define TX_JAM_LEN_DEF 0x03
+#define TX_JAM_IPG_DEF 0x0b
+#define TX_IPG_JAM_DEF 0x1c
+
+/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
+#define GM_SMOD_DATABL_MSK (0x1f<<11) /* Bit 15..11: Data Blinder (r/o) */
+#define GM_SMOD_LIMIT_4 (1<<10) /* Bit 10: 4 consecutive Tx trials */
+#define GM_SMOD_VLAN_ENA (1<<9) /* Bit 9: Enable VLAN (Max. Frame Len) */
+#define GM_SMOD_JUMBO_ENA (1<<8) /* Bit 8: Enable Jumbo (Max. Frame Len) */
+ /* Bit 7..5: reserved */
+#define GM_SMOD_IPG_MSK 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+
+#define DATA_BLIND_VAL(x) (SHIFT11(x) & GM_SMOD_DATABL_MSK)
+#define DATA_BLIND_DEF 0x04
+
+#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
+#define IPG_DATA_DEF 0x1e
+
+/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
+#define GM_SMI_CT_PHY_A_MSK (0x1f<<11) /* Bit 15..11: PHY Device Address */
+#define GM_SMI_CT_REG_A_MSK (0x1f<<6) /* Bit 10.. 6: PHY Register Address */
+#define GM_SMI_CT_OP_RD (1<<5) /* Bit 5: OpCode Read (0=Write)*/
+#define GM_SMI_CT_RD_VAL (1<<4) /* Bit 4: Read Valid (Read completed) */
+#define GM_SMI_CT_BUSY (1<<3) /* Bit 3: Busy (Operation in progress) */
+ /* Bit 2..0: reserved */
+
+#define GM_SMI_CT_PHY_AD(x) (SHIFT11(x) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x) (SHIFT6(x) & GM_SMI_CT_REG_A_MSK)
+
+ /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
+ /* Bit 15..6: reserved */
+#define GM_PAR_MIB_CLR (1<<5) /* Bit 5: Set MIB Clear Counter Mode */
+#define GM_PAR_MIB_TST (1<<4) /* Bit 4: MIB Load Counter (Test Mode) */
+ /* Bit 3..0: reserved */
+
+/* Receive Frame Status Encoding */
+#define GMR_FS_LEN (0xffffUL<<16) /* Bit 31..16: Rx Frame Length */
+ /* Bit 15..14: reserved */
+#define GMR_FS_VLAN (1L<<13) /* Bit 13: VLAN Packet */
+#define GMR_FS_JABBER (1L<<12) /* Bit 12: Jabber Packet */
+#define GMR_FS_UN_SIZE (1L<<11) /* Bit 11: Undersize Packet */
+#define GMR_FS_MC (1L<<10) /* Bit 10: Multicast Packet */
+#define GMR_FS_BC (1L<<9) /* Bit 9: Broadcast Packet */
+#define GMR_FS_RX_OK (1L<<8) /* Bit 8: Receive OK (Good Packet) */
+#define GMR_FS_GOOD_FC (1L<<7) /* Bit 7: Good Flow-Control Packet */
+#define GMR_FS_BAD_FC (1L<<6) /* Bit 6: Bad Flow-Control Packet */
+#define GMR_FS_MII_ERR (1L<<5) /* Bit 5: MII Error */
+#define GMR_FS_LONG_ERR (1L<<4) /* Bit 4: Too Long Packet */
+#define GMR_FS_FRAGMENT (1L<<3) /* Bit 3: Fragment */
+ /* Bit 2: reserved */
+#define GMR_FS_CRC_ERR (1L<<1) /* Bit 1: CRC Error */
+#define GMR_FS_RX_FF_OV (1L<<0) /* Bit 0: Rx FIFO Overflow */
+
+/*
+ * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
+ */
+#define GMR_FS_ANY_ERR (GMR_FS_CRC_ERR | \
+ GMR_FS_LONG_ERR | \
+ GMR_FS_MII_ERR | \
+ GMR_FS_BAD_FC | \
+ GMR_FS_GOOD_FC | \
+ GMR_FS_JABBER)
+
+/* Rx GMAC FIFO Flush Mask (default) */
+#define RX_FF_FL_DEF_MSK (GMR_FS_CRC_ERR | \
+ GMR_FS_RX_FF_OV | \
+ GMR_FS_MII_ERR | \
+ GMR_FS_BAD_FC | \
+ GMR_FS_GOOD_FC | \
+ GMR_FS_UN_SIZE | \
+ GMR_FS_JABBER)
+
+/* typedefs *******************************************************************/
+
+
+/* function prototypes ********************************************************/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INC_XMAC_H */
diff --git a/drivers/net/sk98lin/skaddr.c b/drivers/net/sk98lin/skaddr.c
new file mode 100644
index 000000000000..a7e25edc7fc4
--- /dev/null
+++ b/drivers/net/sk98lin/skaddr.c
@@ -0,0 +1,1773 @@
+/******************************************************************************
+ *
+ * Name: skaddr.c
+ * Project: Gigabit Ethernet Adapters, ADDR-Module
+ * Version: $Revision: 1.52 $
+ * Date: $Date: 2003/06/02 13:46:15 $
+ * Purpose: Manage Addresses (Multicast and Unicast) and Promiscuous Mode.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This module is intended to manage multicast addresses, address override,
+ * and promiscuous mode on GEnesis and Yukon adapters.
+ *
+ * Address Layout:
+ * port address: physical MAC address
+ * 1st exact match: logical MAC address (GEnesis only)
+ * 2nd exact match: RLMT multicast (GEnesis only)
+ * exact match 3-13: OS-specific multicasts (GEnesis only)
+ *
+ * Include File Hierarchy:
+ *
+ * "skdrv1st.h"
+ * "skdrv2nd.h"
+ *
+ ******************************************************************************/
+
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skaddr.c,v 1.52 2003/06/02 13:46:15 tschilli Exp $ (C) Marvell.";
+#endif /* DEBUG ||!LINT || !SK_SLIM */
+
+#define __SKADDR_C
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* cplusplus */
+
+#include "h/skdrv1st.h"
+#include "h/skdrv2nd.h"
+
+/* defines ********************************************************************/
+
+
+#define XMAC_POLY 0xEDB88320UL /* CRC32-Poly - XMAC: Little Endian */
+#define GMAC_POLY 0x04C11DB7L /* CRC16-Poly - GMAC: Little Endian */
+#define HASH_BITS 6 /* #bits in hash */
+#define SK_MC_BIT 0x01
+
+/* Error numbers and messages. */
+
+#define SKERR_ADDR_E001 (SK_ERRBASE_ADDR + 0)
+#define SKERR_ADDR_E001MSG "Bad Flags."
+#define SKERR_ADDR_E002 (SKERR_ADDR_E001 + 1)
+#define SKERR_ADDR_E002MSG "New Error."
+
+/* typedefs *******************************************************************/
+
+/* None. */
+
+/* global variables ***********************************************************/
+
+/* 64-bit hash values with all bits set. */
+
+static const SK_U16 OnesHash[4] = {0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF};
+
+/* local variables ************************************************************/
+
+#ifdef DEBUG
+static int Next0[SK_MAX_MACS] = {0};
+#endif /* DEBUG */
+
+/* functions ******************************************************************/
+
+/******************************************************************************
+ *
+ * SkAddrInit - initialize data, set state to init
+ *
+ * Description:
+ *
+ * SK_INIT_DATA
+ * ============
+ *
+ * This routine clears the multicast tables and resets promiscuous mode.
+ * Some entries are reserved for the "logical MAC address", the
+ * SK-RLMT multicast address, and the BPDU multicast address.
+ *
+ *
+ * SK_INIT_IO
+ * ==========
+ *
+ * All permanent MAC addresses are read from EPROM.
+ * If the current MAC addresses are not already set in software,
+ * they are set to the values of the permanent addresses.
+ * The current addresses are written to the corresponding MAC.
+ *
+ *
+ * SK_INIT_RUN
+ * ===========
+ *
+ * Nothing.
+ *
+ * Context:
+ * init, pageable
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ */
+int SkAddrInit(
+SK_AC *pAC, /* the adapter context */
+SK_IOC IoC, /* I/O context */
+int Level) /* initialization level */
+{
+ int j;
+ SK_U32 i;
+ SK_U8 *InAddr;
+ SK_U16 *OutAddr;
+ SK_ADDR_PORT *pAPort;
+
+ switch (Level) {
+ case SK_INIT_DATA:
+ SK_MEMSET((char *) &pAC->Addr, (SK_U8) 0,
+ (SK_U16) sizeof(SK_ADDR));
+
+ for (i = 0; i < SK_MAX_MACS; i++) {
+ pAPort = &pAC->Addr.Port[i];
+ pAPort->PromMode = SK_PROM_MODE_NONE;
+
+ pAPort->FirstExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT;
+ pAPort->FirstExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV;
+ pAPort->NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT;
+ pAPort->NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV;
+ }
+#ifdef xDEBUG
+ for (i = 0; i < SK_MAX_MACS; i++) {
+ if (pAC->Addr.Port[i].NextExactMatchRlmt <
+ SK_ADDR_FIRST_MATCH_RLMT) {
+ Next0[i] |= 4;
+ }
+ }
+#endif /* DEBUG */
+ /* pAC->Addr.InitDone = SK_INIT_DATA; */
+ break;
+
+ case SK_INIT_IO:
+#ifndef SK_NO_RLMT
+ for (i = 0; i < SK_MAX_NETS; i++) {
+ pAC->Addr.Net[i].ActivePort = pAC->Rlmt.Net[i].ActivePort;
+ }
+#endif /* !SK_NO_RLMT */
+#ifdef xDEBUG
+ for (i = 0; i < SK_MAX_MACS; i++) {
+ if (pAC->Addr.Port[i].NextExactMatchRlmt <
+ SK_ADDR_FIRST_MATCH_RLMT) {
+ Next0[i] |= 8;
+ }
+ }
+#endif /* DEBUG */
+
+ /* Read permanent logical MAC address from Control Register File. */
+ for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
+ InAddr = (SK_U8 *) &pAC->Addr.Net[0].PermanentMacAddress.a[j];
+ SK_IN8(IoC, B2_MAC_1 + j, InAddr);
+ }
+
+ if (!pAC->Addr.Net[0].CurrentMacAddressSet) {
+ /* Set the current logical MAC address to the permanent one. */
+ pAC->Addr.Net[0].CurrentMacAddress =
+ pAC->Addr.Net[0].PermanentMacAddress;
+ pAC->Addr.Net[0].CurrentMacAddressSet = SK_TRUE;
+ }
+
+ /* Set the current logical MAC address. */
+ pAC->Addr.Port[pAC->Addr.Net[0].ActivePort].Exact[0] =
+ pAC->Addr.Net[0].CurrentMacAddress;
+#if SK_MAX_NETS > 1
+ /* Set logical MAC address for net 2 to (log | 3). */
+ if (!pAC->Addr.Net[1].CurrentMacAddressSet) {
+ pAC->Addr.Net[1].PermanentMacAddress =
+ pAC->Addr.Net[0].PermanentMacAddress;
+ pAC->Addr.Net[1].PermanentMacAddress.a[5] |= 3;
+ /* Set the current logical MAC address to the permanent one. */
+ pAC->Addr.Net[1].CurrentMacAddress =
+ pAC->Addr.Net[1].PermanentMacAddress;
+ pAC->Addr.Net[1].CurrentMacAddressSet = SK_TRUE;
+ }
+#endif /* SK_MAX_NETS > 1 */
+
+#ifdef DEBUG
+ for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
+ ("Permanent MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n",
+ i,
+ pAC->Addr.Net[i].PermanentMacAddress.a[0],
+ pAC->Addr.Net[i].PermanentMacAddress.a[1],
+ pAC->Addr.Net[i].PermanentMacAddress.a[2],
+ pAC->Addr.Net[i].PermanentMacAddress.a[3],
+ pAC->Addr.Net[i].PermanentMacAddress.a[4],
+ pAC->Addr.Net[i].PermanentMacAddress.a[5]))
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
+ ("Logical MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n",
+ i,
+ pAC->Addr.Net[i].CurrentMacAddress.a[0],
+ pAC->Addr.Net[i].CurrentMacAddress.a[1],
+ pAC->Addr.Net[i].CurrentMacAddress.a[2],
+ pAC->Addr.Net[i].CurrentMacAddress.a[3],
+ pAC->Addr.Net[i].CurrentMacAddress.a[4],
+ pAC->Addr.Net[i].CurrentMacAddress.a[5]))
+ }
+#endif /* DEBUG */
+
+ for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
+ pAPort = &pAC->Addr.Port[i];
+
+ /* Read permanent port addresses from Control Register File. */
+ for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
+ InAddr = (SK_U8 *) &pAPort->PermanentMacAddress.a[j];
+ SK_IN8(IoC, B2_MAC_2 + 8 * i + j, InAddr);
+ }
+
+ if (!pAPort->CurrentMacAddressSet) {
+ /*
+ * Set the current and previous physical MAC address
+ * of this port to its permanent MAC address.
+ */
+ pAPort->CurrentMacAddress = pAPort->PermanentMacAddress;
+ pAPort->PreviousMacAddress = pAPort->PermanentMacAddress;
+ pAPort->CurrentMacAddressSet = SK_TRUE;
+ }
+
+ /* Set port's current physical MAC address. */
+ OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0];
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ XM_OUTADDR(IoC, i, XM_SA, OutAddr);
+ }
+#endif /* GENESIS */
+#ifdef YUKON
+ if (!pAC->GIni.GIGenesis) {
+ GM_OUTADDR(IoC, i, GM_SRC_ADDR_1L, OutAddr);
+ }
+#endif /* YUKON */
+#ifdef DEBUG
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
+ ("SkAddrInit: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
+ pAPort->PermanentMacAddress.a[0],
+ pAPort->PermanentMacAddress.a[1],
+ pAPort->PermanentMacAddress.a[2],
+ pAPort->PermanentMacAddress.a[3],
+ pAPort->PermanentMacAddress.a[4],
+ pAPort->PermanentMacAddress.a[5]))
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
+ ("SkAddrInit: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
+ pAPort->CurrentMacAddress.a[0],
+ pAPort->CurrentMacAddress.a[1],
+ pAPort->CurrentMacAddress.a[2],
+ pAPort->CurrentMacAddress.a[3],
+ pAPort->CurrentMacAddress.a[4],
+ pAPort->CurrentMacAddress.a[5]))
+#endif /* DEBUG */
+ }
+ /* pAC->Addr.InitDone = SK_INIT_IO; */
+ break;
+
+ case SK_INIT_RUN:
+#ifdef xDEBUG
+ for (i = 0; i < SK_MAX_MACS; i++) {
+ if (pAC->Addr.Port[i].NextExactMatchRlmt <
+ SK_ADDR_FIRST_MATCH_RLMT) {
+ Next0[i] |= 16;
+ }
+ }
+#endif /* DEBUG */
+
+ /* pAC->Addr.InitDone = SK_INIT_RUN; */
+ break;
+
+ default: /* error */
+ break;
+ }
+
+ return (SK_ADDR_SUCCESS);
+
+} /* SkAddrInit */
+
+#ifndef SK_SLIM
+
+/******************************************************************************
+ *
+ * SkAddrMcClear - clear the multicast table
+ *
+ * Description:
+ * This routine clears the multicast table.
+ *
+ * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated
+ * immediately.
+ *
+ * It calls either SkAddrXmacMcClear or SkAddrGmacMcClear, according
+ * to the adapter in use. The real work is done there.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY
+ * may be called after SK_INIT_IO without limitation
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrMcClear(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* Index of affected port */
+int Flags) /* permanent/non-perm, sw-only */
+{
+ int ReturnCode;
+
+ if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+
+ if (pAC->GIni.GIGenesis) {
+ ReturnCode = SkAddrXmacMcClear(pAC, IoC, PortNumber, Flags);
+ }
+ else {
+ ReturnCode = SkAddrGmacMcClear(pAC, IoC, PortNumber, Flags);
+ }
+
+ return (ReturnCode);
+
+} /* SkAddrMcClear */
+
+#endif /* !SK_SLIM */
+
+#ifndef SK_SLIM
+
+/******************************************************************************
+ *
+ * SkAddrXmacMcClear - clear the multicast table
+ *
+ * Description:
+ * This routine clears the multicast table
+ * (either entry 2 or entries 3-16 and InexactFilter) of the given port.
+ * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated
+ * immediately.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY
+ * may be called after SK_INIT_IO without limitation
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrXmacMcClear(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* Index of affected port */
+int Flags) /* permanent/non-perm, sw-only */
+{
+ int i;
+
+ if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
+
+ /* Clear RLMT multicast addresses. */
+ pAC->Addr.Port[PortNumber].NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT;
+ }
+ else { /* not permanent => DRV */
+
+ /* Clear InexactFilter */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0;
+ }
+
+ /* Clear DRV multicast addresses. */
+
+ pAC->Addr.Port[PortNumber].NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV;
+ }
+
+ if (!(Flags & SK_MC_SW_ONLY)) {
+ (void) SkAddrXmacMcUpdate(pAC, IoC, PortNumber);
+ }
+
+ return (SK_ADDR_SUCCESS);
+
+} /* SkAddrXmacMcClear */
+
+#endif /* !SK_SLIM */
+
+#ifndef SK_SLIM
+
+/******************************************************************************
+ *
+ * SkAddrGmacMcClear - clear the multicast table
+ *
+ * Description:
+ * This routine clears the multicast hashing table (InexactFilter)
+ * (either the RLMT or the driver bits) of the given port.
+ *
+ * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated
+ * immediately.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY
+ * may be called after SK_INIT_IO without limitation
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrGmacMcClear(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* Index of affected port */
+int Flags) /* permanent/non-perm, sw-only */
+{
+ int i;
+
+#ifdef DEBUG
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("GMAC InexactFilter (not cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7]))
+#endif /* DEBUG */
+
+ /* Clear InexactFilter */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0;
+ }
+
+ if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
+
+ /* Copy DRV bits to InexactFilter. */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i];
+
+ /* Clear InexactRlmtFilter. */
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i] = 0;
+
+ }
+ }
+ else { /* not permanent => DRV */
+
+ /* Copy RLMT bits to InexactFilter. */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i];
+
+ /* Clear InexactDrvFilter. */
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i] = 0;
+ }
+ }
+
+#ifdef DEBUG
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("GMAC InexactFilter (cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6],
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7]))
+#endif /* DEBUG */
+
+ if (!(Flags & SK_MC_SW_ONLY)) {
+ (void) SkAddrGmacMcUpdate(pAC, IoC, PortNumber);
+ }
+
+ return (SK_ADDR_SUCCESS);
+
+} /* SkAddrGmacMcClear */
+
+#ifndef SK_ADDR_CHEAT
+
+/******************************************************************************
+ *
+ * SkXmacMcHash - hash multicast address
+ *
+ * Description:
+ * This routine computes the hash value for a multicast address.
+ * A CRC32 algorithm is used.
+ *
+ * Notes:
+ * The code was adapted from the XaQti data sheet.
+ *
+ * Context:
+ * runtime, pageable
+ *
+ * Returns:
+ * Hash value of multicast address.
+ */
+SK_U32 SkXmacMcHash(
+unsigned char *pMc) /* Multicast address */
+{
+ SK_U32 Idx;
+ SK_U32 Bit;
+ SK_U32 Data;
+ SK_U32 Crc;
+
+ Crc = 0xFFFFFFFFUL;
+ for (Idx = 0; Idx < SK_MAC_ADDR_LEN; Idx++) {
+ Data = *pMc++;
+ for (Bit = 0; Bit < 8; Bit++, Data >>= 1) {
+ Crc = (Crc >> 1) ^ (((Crc ^ Data) & 1) ? XMAC_POLY : 0);
+ }
+ }
+
+ return (Crc & ((1 << HASH_BITS) - 1));
+
+} /* SkXmacMcHash */
+
+
+/******************************************************************************
+ *
+ * SkGmacMcHash - hash multicast address
+ *
+ * Description:
+ * This routine computes the hash value for a multicast address.
+ * A CRC16 algorithm is used.
+ *
+ * Notes:
+ *
+ *
+ * Context:
+ * runtime, pageable
+ *
+ * Returns:
+ * Hash value of multicast address.
+ */
+SK_U32 SkGmacMcHash(
+unsigned char *pMc) /* Multicast address */
+{
+ SK_U32 Data;
+ SK_U32 TmpData;
+ SK_U32 Crc;
+ int Byte;
+ int Bit;
+
+ Crc = 0xFFFFFFFFUL;
+ for (Byte = 0; Byte < 6; Byte++) {
+ /* Get next byte. */
+ Data = (SK_U32) pMc[Byte];
+
+ /* Change bit order in byte. */
+ TmpData = Data;
+ for (Bit = 0; Bit < 8; Bit++) {
+ if (TmpData & 1L) {
+ Data |= 1L << (7 - Bit);
+ }
+ else {
+ Data &= ~(1L << (7 - Bit));
+ }
+ TmpData >>= 1;
+ }
+
+ Crc ^= (Data << 24);
+ for (Bit = 0; Bit < 8; Bit++) {
+ if (Crc & 0x80000000) {
+ Crc = (Crc << 1) ^ GMAC_POLY;
+ }
+ else {
+ Crc <<= 1;
+ }
+ }
+ }
+
+ return (Crc & ((1 << HASH_BITS) - 1));
+
+} /* SkGmacMcHash */
+
+#endif /* !SK_ADDR_CHEAT */
+
+/******************************************************************************
+ *
+ * SkAddrMcAdd - add a multicast address to a port
+ *
+ * Description:
+ * This routine enables reception for a given address on the given port.
+ *
+ * It calls either SkAddrXmacMcAdd or SkAddrGmacMcAdd, according to the
+ * adapter in use. The real work is done there.
+ *
+ * Notes:
+ * The return code is only valid for SK_PROM_MODE_NONE.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_DATA
+ *
+ * Returns:
+ * SK_MC_FILTERING_EXACT
+ * SK_MC_FILTERING_INEXACT
+ * SK_MC_ILLEGAL_ADDRESS
+ * SK_MC_ILLEGAL_PORT
+ * SK_MC_RLMT_OVERFLOW
+ */
+int SkAddrMcAdd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* Port Number */
+SK_MAC_ADDR *pMc, /* multicast address to be added */
+int Flags) /* permanent/non-permanent */
+{
+ int ReturnCode;
+
+ if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+
+ if (pAC->GIni.GIGenesis) {
+ ReturnCode = SkAddrXmacMcAdd(pAC, IoC, PortNumber, pMc, Flags);
+ }
+ else {
+ ReturnCode = SkAddrGmacMcAdd(pAC, IoC, PortNumber, pMc, Flags);
+ }
+
+ return (ReturnCode);
+
+} /* SkAddrMcAdd */
+
+
+/******************************************************************************
+ *
+ * SkAddrXmacMcAdd - add a multicast address to a port
+ *
+ * Description:
+ * This routine enables reception for a given address on the given port.
+ *
+ * Notes:
+ * The return code is only valid for SK_PROM_MODE_NONE.
+ *
+ * The multicast bit is only checked if there are no free exact match
+ * entries.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_DATA
+ *
+ * Returns:
+ * SK_MC_FILTERING_EXACT
+ * SK_MC_FILTERING_INEXACT
+ * SK_MC_ILLEGAL_ADDRESS
+ * SK_MC_RLMT_OVERFLOW
+ */
+int SkAddrXmacMcAdd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* Port Number */
+SK_MAC_ADDR *pMc, /* multicast address to be added */
+int Flags) /* permanent/non-permanent */
+{
+ int i;
+ SK_U8 Inexact;
+#ifndef SK_ADDR_CHEAT
+ SK_U32 HashBit;
+#endif /* !defined(SK_ADDR_CHEAT) */
+
+ if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
+#ifdef xDEBUG
+ if (pAC->Addr.Port[PortNumber].NextExactMatchRlmt <
+ SK_ADDR_FIRST_MATCH_RLMT) {
+ Next0[PortNumber] |= 1;
+ return (SK_MC_RLMT_OVERFLOW);
+ }
+#endif /* DEBUG */
+
+ if (pAC->Addr.Port[PortNumber].NextExactMatchRlmt >
+ SK_ADDR_LAST_MATCH_RLMT) {
+ return (SK_MC_RLMT_OVERFLOW);
+ }
+
+ /* Set a RLMT multicast address. */
+
+ pAC->Addr.Port[PortNumber].Exact[
+ pAC->Addr.Port[PortNumber].NextExactMatchRlmt++] = *pMc;
+
+ return (SK_MC_FILTERING_EXACT);
+ }
+
+#ifdef xDEBUG
+ if (pAC->Addr.Port[PortNumber].NextExactMatchDrv <
+ SK_ADDR_FIRST_MATCH_DRV) {
+ Next0[PortNumber] |= 2;
+ return (SK_MC_RLMT_OVERFLOW);
+ }
+#endif /* DEBUG */
+
+ if (pAC->Addr.Port[PortNumber].NextExactMatchDrv <= SK_ADDR_LAST_MATCH_DRV) {
+
+ /* Set exact match entry. */
+ pAC->Addr.Port[PortNumber].Exact[
+ pAC->Addr.Port[PortNumber].NextExactMatchDrv++] = *pMc;
+
+ /* Clear InexactFilter */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0;
+ }
+ }
+ else {
+ if (!(pMc->a[0] & SK_MC_BIT)) {
+ /* Hashing only possible with multicast addresses */
+ return (SK_MC_ILLEGAL_ADDRESS);
+ }
+#ifndef SK_ADDR_CHEAT
+ /* Compute hash value of address. */
+ HashBit = 63 - SkXmacMcHash(&pMc->a[0]);
+
+ /* Add bit to InexactFilter. */
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[HashBit / 8] |=
+ 1 << (HashBit % 8);
+#else /* SK_ADDR_CHEAT */
+ /* Set all bits in InexactFilter. */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0xFF;
+ }
+#endif /* SK_ADDR_CHEAT */
+ }
+
+ for (Inexact = 0, i = 0; i < 8; i++) {
+ Inexact |= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i];
+ }
+
+ if (Inexact == 0 && pAC->Addr.Port[PortNumber].PromMode == 0) {
+ return (SK_MC_FILTERING_EXACT);
+ }
+ else {
+ return (SK_MC_FILTERING_INEXACT);
+ }
+
+} /* SkAddrXmacMcAdd */
+
+
+/******************************************************************************
+ *
+ * SkAddrGmacMcAdd - add a multicast address to a port
+ *
+ * Description:
+ * This routine enables reception for a given address on the given port.
+ *
+ * Notes:
+ * The return code is only valid for SK_PROM_MODE_NONE.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_DATA
+ *
+ * Returns:
+ * SK_MC_FILTERING_INEXACT
+ * SK_MC_ILLEGAL_ADDRESS
+ */
+int SkAddrGmacMcAdd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* Port Number */
+SK_MAC_ADDR *pMc, /* multicast address to be added */
+int Flags) /* permanent/non-permanent */
+{
+ int i;
+#ifndef SK_ADDR_CHEAT
+ SK_U32 HashBit;
+#endif /* !defined(SK_ADDR_CHEAT) */
+
+ if (!(pMc->a[0] & SK_MC_BIT)) {
+ /* Hashing only possible with multicast addresses */
+ return (SK_MC_ILLEGAL_ADDRESS);
+ }
+
+#ifndef SK_ADDR_CHEAT
+
+ /* Compute hash value of address. */
+ HashBit = SkGmacMcHash(&pMc->a[0]);
+
+ if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
+
+ /* Add bit to InexactRlmtFilter. */
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[HashBit / 8] |=
+ 1 << (HashBit % 8);
+
+ /* Copy bit to InexactFilter. */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i];
+ }
+#ifdef DEBUG
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("GMAC InexactRlmtFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[0],
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[1],
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[2],
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[3],
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[4],
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[5],
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[6],
+ pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[7]))
+#endif /* DEBUG */
+ }
+ else { /* not permanent => DRV */
+
+ /* Add bit to InexactDrvFilter. */
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[HashBit / 8] |=
+ 1 << (HashBit % 8);
+
+ /* Copy bit to InexactFilter. */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i];
+ }
+#ifdef DEBUG
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("GMAC InexactDrvFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[0],
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[1],
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[2],
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[3],
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[4],
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[5],
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[6],
+ pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[7]))
+#endif /* DEBUG */
+ }
+
+#else /* SK_ADDR_CHEAT */
+
+ /* Set all bits in InexactFilter. */
+ for (i = 0; i < 8; i++) {
+ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0xFF;
+ }
+#endif /* SK_ADDR_CHEAT */
+
+ return (SK_MC_FILTERING_INEXACT);
+
+} /* SkAddrGmacMcAdd */
+
+#endif /* !SK_SLIM */
+
+/******************************************************************************
+ *
+ * SkAddrMcUpdate - update the HW MC address table and set the MAC address
+ *
+ * Description:
+ * This routine enables reception of the addresses contained in a local
+ * table for a given port.
+ * It also programs the port's current physical MAC address.
+ *
+ * It calls either SkAddrXmacMcUpdate or SkAddrGmacMcUpdate, according
+ * to the adapter in use. The real work is done there.
+ *
+ * Notes:
+ * The return code is only valid for SK_PROM_MODE_NONE.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_MC_FILTERING_EXACT
+ * SK_MC_FILTERING_INEXACT
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrMcUpdate(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber) /* Port Number */
+{
+ int ReturnCode = 0;
+#if (!defined(SK_SLIM) || defined(DEBUG))
+ if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+#endif /* !SK_SLIM || DEBUG */
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ ReturnCode = SkAddrXmacMcUpdate(pAC, IoC, PortNumber);
+ }
+#endif /* GENESIS */
+#ifdef YUKON
+ if (!pAC->GIni.GIGenesis) {
+ ReturnCode = SkAddrGmacMcUpdate(pAC, IoC, PortNumber);
+ }
+#endif /* YUKON */
+ return (ReturnCode);
+
+} /* SkAddrMcUpdate */
+
+
+#ifdef GENESIS
+
+/******************************************************************************
+ *
+ * SkAddrXmacMcUpdate - update the HW MC address table and set the MAC address
+ *
+ * Description:
+ * This routine enables reception of the addresses contained in a local
+ * table for a given port.
+ * It also programs the port's current physical MAC address.
+ *
+ * Notes:
+ * The return code is only valid for SK_PROM_MODE_NONE.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_MC_FILTERING_EXACT
+ * SK_MC_FILTERING_INEXACT
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrXmacMcUpdate(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber) /* Port Number */
+{
+ SK_U32 i;
+ SK_U8 Inexact;
+ SK_U16 *OutAddr;
+ SK_ADDR_PORT *pAPort;
+
+ SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("SkAddrXmacMcUpdate on Port %u.\n", PortNumber))
+
+ pAPort = &pAC->Addr.Port[PortNumber];
+
+#ifdef DEBUG
+ SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber]))
+#endif /* DEBUG */
+
+ /* Start with 0 to also program the logical MAC address. */
+ for (i = 0; i < pAPort->NextExactMatchRlmt; i++) {
+ /* Set exact match address i on XMAC */
+ OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0];
+ XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr);
+ }
+
+ /* Clear other permanent exact match addresses on XMAC */
+ if (pAPort->NextExactMatchRlmt <= SK_ADDR_LAST_MATCH_RLMT) {
+
+ SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchRlmt,
+ SK_ADDR_LAST_MATCH_RLMT);
+ }
+
+ for (i = pAPort->FirstExactMatchDrv; i < pAPort->NextExactMatchDrv; i++) {
+ OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0];
+ XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr);
+ }
+
+ /* Clear other non-permanent exact match addresses on XMAC */
+ if (pAPort->NextExactMatchDrv <= SK_ADDR_LAST_MATCH_DRV) {
+
+ SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchDrv,
+ SK_ADDR_LAST_MATCH_DRV);
+ }
+
+ for (Inexact = 0, i = 0; i < 8; i++) {
+ Inexact |= pAPort->InexactFilter.Bytes[i];
+ }
+
+ if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) {
+
+ /* Set all bits in 64-bit hash register. */
+ XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash);
+
+ /* Enable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+ else if (Inexact != 0) {
+
+ /* Set 64-bit hash register to InexactFilter. */
+ XM_OUTHASH(IoC, PortNumber, XM_HSM, &pAPort->InexactFilter.Bytes[0]);
+
+ /* Enable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+ else {
+ /* Disable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE);
+ }
+
+ if (pAPort->PromMode != SK_PROM_MODE_NONE) {
+ (void) SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode);
+ }
+
+ /* Set port's current physical MAC address. */
+ OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0];
+
+ XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr);
+
+#ifdef xDEBUG
+ for (i = 0; i < pAPort->NextExactMatchRlmt; i++) {
+ SK_U8 InAddr8[6];
+ SK_U16 *InAddr;
+
+ /* Get exact match address i from port PortNumber. */
+ InAddr = (SK_U16 *) &InAddr8[0];
+
+ XM_INADDR(IoC, PortNumber, XM_EXM(i), InAddr);
+
+ SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("SkAddrXmacMcUpdate: MC address %d on Port %u: ",
+ "%02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x\n",
+ i,
+ PortNumber,
+ InAddr8[0],
+ InAddr8[1],
+ InAddr8[2],
+ InAddr8[3],
+ InAddr8[4],
+ InAddr8[5],
+ pAPort->Exact[i].a[0],
+ pAPort->Exact[i].a[1],
+ pAPort->Exact[i].a[2],
+ pAPort->Exact[i].a[3],
+ pAPort->Exact[i].a[4],
+ pAPort->Exact[i].a[5]))
+ }
+#endif /* DEBUG */
+
+ /* Determine return value. */
+ if (Inexact == 0 && pAPort->PromMode == 0) {
+ return (SK_MC_FILTERING_EXACT);
+ }
+ else {
+ return (SK_MC_FILTERING_INEXACT);
+ }
+
+} /* SkAddrXmacMcUpdate */
+
+#endif /* GENESIS */
+
+#ifdef YUKON
+
+/******************************************************************************
+ *
+ * SkAddrGmacMcUpdate - update the HW MC address table and set the MAC address
+ *
+ * Description:
+ * This routine enables reception of the addresses contained in a local
+ * table for a given port.
+ * It also programs the port's current physical MAC address.
+ *
+ * Notes:
+ * The return code is only valid for SK_PROM_MODE_NONE.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_MC_FILTERING_EXACT
+ * SK_MC_FILTERING_INEXACT
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrGmacMcUpdate(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber) /* Port Number */
+{
+#ifndef SK_SLIM
+ SK_U32 i;
+ SK_U8 Inexact;
+#endif /* not SK_SLIM */
+ SK_U16 *OutAddr;
+ SK_ADDR_PORT *pAPort;
+
+ SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("SkAddrGmacMcUpdate on Port %u.\n", PortNumber))
+
+ pAPort = &pAC->Addr.Port[PortNumber];
+
+#ifdef DEBUG
+ SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber]))
+#endif /* DEBUG */
+
+#ifndef SK_SLIM
+ for (Inexact = 0, i = 0; i < 8; i++) {
+ Inexact |= pAPort->InexactFilter.Bytes[i];
+ }
+
+ /* Set 64-bit hash register to InexactFilter. */
+ GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1,
+ &pAPort->InexactFilter.Bytes[0]);
+
+ if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) {
+
+ /* Set all bits in 64-bit hash register. */
+ GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash);
+
+ /* Enable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+ else {
+ /* Enable Hashing. */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+
+ if (pAPort->PromMode != SK_PROM_MODE_NONE) {
+ (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode);
+ }
+#else /* SK_SLIM */
+
+ /* Set all bits in 64-bit hash register. */
+ GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash);
+
+ /* Enable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+
+ (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode);
+
+#endif /* SK_SLIM */
+
+ /* Set port's current physical MAC address. */
+ OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0];
+ GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr);
+
+ /* Set port's current logical MAC address. */
+ OutAddr = (SK_U16 *) &pAPort->Exact[0].a[0];
+ GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_2L, OutAddr);
+
+#ifdef DEBUG
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("SkAddrGmacMcUpdate: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
+ pAPort->Exact[0].a[0],
+ pAPort->Exact[0].a[1],
+ pAPort->Exact[0].a[2],
+ pAPort->Exact[0].a[3],
+ pAPort->Exact[0].a[4],
+ pAPort->Exact[0].a[5]))
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("SkAddrGmacMcUpdate: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
+ pAPort->CurrentMacAddress.a[0],
+ pAPort->CurrentMacAddress.a[1],
+ pAPort->CurrentMacAddress.a[2],
+ pAPort->CurrentMacAddress.a[3],
+ pAPort->CurrentMacAddress.a[4],
+ pAPort->CurrentMacAddress.a[5]))
+#endif /* DEBUG */
+
+#ifndef SK_SLIM
+ /* Determine return value. */
+ if (Inexact == 0 && pAPort->PromMode == 0) {
+ return (SK_MC_FILTERING_EXACT);
+ }
+ else {
+ return (SK_MC_FILTERING_INEXACT);
+ }
+#else /* SK_SLIM */
+ return (SK_MC_FILTERING_INEXACT);
+#endif /* SK_SLIM */
+
+} /* SkAddrGmacMcUpdate */
+
+#endif /* YUKON */
+
+#ifndef SK_NO_MAO
+
+/******************************************************************************
+ *
+ * SkAddrOverride - override a port's MAC address
+ *
+ * Description:
+ * This routine overrides the MAC address of one port.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS if successful.
+ * SK_ADDR_DUPLICATE_ADDRESS if duplicate MAC address.
+ * SK_ADDR_MULTICAST_ADDRESS if multicast or broadcast address.
+ * SK_ADDR_TOO_EARLY if SK_INIT_IO was not executed before.
+ */
+int SkAddrOverride(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* Port Number */
+SK_MAC_ADDR SK_FAR *pNewAddr, /* new MAC address */
+int Flags) /* logical/physical MAC address */
+{
+#ifndef SK_NO_RLMT
+ SK_EVPARA Para;
+#endif /* !SK_NO_RLMT */
+ SK_U32 NetNumber;
+ SK_U32 i;
+ SK_U16 SK_FAR *OutAddr;
+
+#ifndef SK_NO_RLMT
+ NetNumber = pAC->Rlmt.Port[PortNumber].Net->NetNumber;
+#else
+ NetNumber = 0;
+#endif /* SK_NO_RLMT */
+#if (!defined(SK_SLIM) || defined(DEBUG))
+ if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+#endif /* !SK_SLIM || DEBUG */
+ if (pNewAddr != NULL && (pNewAddr->a[0] & SK_MC_BIT) != 0) {
+ return (SK_ADDR_MULTICAST_ADDRESS);
+ }
+
+ if (!pAC->Addr.Net[NetNumber].CurrentMacAddressSet) {
+ return (SK_ADDR_TOO_EARLY);
+ }
+
+ if (Flags & SK_ADDR_SET_LOGICAL) { /* Activate logical MAC address. */
+ /* Parameter *pNewAddr is ignored. */
+ for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
+ if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
+ return (SK_ADDR_TOO_EARLY);
+ }
+ }
+#ifndef SK_NO_RLMT
+ /* Set PortNumber to number of net's active port. */
+ PortNumber = pAC->Rlmt.Net[NetNumber].
+ Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber;
+#endif /* !SK_NO_RLMT */
+ pAC->Addr.Port[PortNumber].Exact[0] =
+ pAC->Addr.Net[NetNumber].CurrentMacAddress;
+
+ /* Write address to first exact match entry of active port. */
+ (void) SkAddrMcUpdate(pAC, IoC, PortNumber);
+ }
+ else if (Flags & SK_ADDR_CLEAR_LOGICAL) {
+ /* Deactivate logical MAC address. */
+ /* Parameter *pNewAddr is ignored. */
+ for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
+ if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
+ return (SK_ADDR_TOO_EARLY);
+ }
+ }
+#ifndef SK_NO_RLMT
+ /* Set PortNumber to number of net's active port. */
+ PortNumber = pAC->Rlmt.Net[NetNumber].
+ Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber;
+#endif /* !SK_NO_RLMT */
+ for (i = 0; i < SK_MAC_ADDR_LEN; i++ ) {
+ pAC->Addr.Port[PortNumber].Exact[0].a[i] = 0;
+ }
+
+ /* Write address to first exact match entry of active port. */
+ (void) SkAddrMcUpdate(pAC, IoC, PortNumber);
+ }
+ else if (Flags & SK_ADDR_PHYSICAL_ADDRESS) { /* Physical MAC address. */
+ if (SK_ADDR_EQUAL(pNewAddr->a,
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a)) {
+ return (SK_ADDR_DUPLICATE_ADDRESS);
+ }
+
+ for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
+ if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
+ return (SK_ADDR_TOO_EARLY);
+ }
+
+ if (SK_ADDR_EQUAL(pNewAddr->a,
+ pAC->Addr.Port[i].CurrentMacAddress.a)) {
+ if (i == PortNumber) {
+ return (SK_ADDR_SUCCESS);
+ }
+ else {
+ return (SK_ADDR_DUPLICATE_ADDRESS);
+ }
+ }
+ }
+
+ pAC->Addr.Port[PortNumber].PreviousMacAddress =
+ pAC->Addr.Port[PortNumber].CurrentMacAddress;
+ pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr;
+
+ /* Change port's physical MAC address. */
+ OutAddr = (SK_U16 SK_FAR *) pNewAddr;
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr);
+ }
+#endif /* GENESIS */
+#ifdef YUKON
+ if (!pAC->GIni.GIGenesis) {
+ GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr);
+ }
+#endif /* YUKON */
+
+#ifndef SK_NO_RLMT
+ /* Report address change to RLMT. */
+ Para.Para32[0] = PortNumber;
+ Para.Para32[0] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para);
+#endif /* !SK_NO_RLMT */
+ }
+ else { /* Logical MAC address. */
+ if (SK_ADDR_EQUAL(pNewAddr->a,
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a)) {
+ return (SK_ADDR_SUCCESS);
+ }
+
+ for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
+ if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
+ return (SK_ADDR_TOO_EARLY);
+ }
+
+ if (SK_ADDR_EQUAL(pNewAddr->a,
+ pAC->Addr.Port[i].CurrentMacAddress.a)) {
+ return (SK_ADDR_DUPLICATE_ADDRESS);
+ }
+ }
+
+ /*
+ * In case that the physical and the logical MAC addresses are equal
+ * we must also change the physical MAC address here.
+ * In this case we have an adapter which initially was programmed with
+ * two identical MAC addresses.
+ */
+ if (SK_ADDR_EQUAL(pAC->Addr.Port[PortNumber].CurrentMacAddress.a,
+ pAC->Addr.Port[PortNumber].Exact[0].a)) {
+
+ pAC->Addr.Port[PortNumber].PreviousMacAddress =
+ pAC->Addr.Port[PortNumber].CurrentMacAddress;
+ pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr;
+
+#ifndef SK_NO_RLMT
+ /* Report address change to RLMT. */
+ Para.Para32[0] = PortNumber;
+ Para.Para32[0] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para);
+#endif /* !SK_NO_RLMT */
+ }
+
+#ifndef SK_NO_RLMT
+ /* Set PortNumber to number of net's active port. */
+ PortNumber = pAC->Rlmt.Net[NetNumber].
+ Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber;
+#endif /* !SK_NO_RLMT */
+ pAC->Addr.Net[NetNumber].CurrentMacAddress = *pNewAddr;
+ pAC->Addr.Port[PortNumber].Exact[0] = *pNewAddr;
+#ifdef DEBUG
+ SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("SkAddrOverride: Permanent MAC Address: %02X %02X %02X %02X %02X %02X\n",
+ pAC->Addr.Net[NetNumber].PermanentMacAddress.a[0],
+ pAC->Addr.Net[NetNumber].PermanentMacAddress.a[1],
+ pAC->Addr.Net[NetNumber].PermanentMacAddress.a[2],
+ pAC->Addr.Net[NetNumber].PermanentMacAddress.a[3],
+ pAC->Addr.Net[NetNumber].PermanentMacAddress.a[4],
+ pAC->Addr.Net[NetNumber].PermanentMacAddress.a[5]))
+
+ SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
+ ("SkAddrOverride: New logical MAC Address: %02X %02X %02X %02X %02X %02X\n",
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a[0],
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a[1],
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a[2],
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a[3],
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a[4],
+ pAC->Addr.Net[NetNumber].CurrentMacAddress.a[5]))
+#endif /* DEBUG */
+
+ /* Write address to first exact match entry of active port. */
+ (void) SkAddrMcUpdate(pAC, IoC, PortNumber);
+ }
+
+ return (SK_ADDR_SUCCESS);
+
+} /* SkAddrOverride */
+
+
+#endif /* SK_NO_MAO */
+
+/******************************************************************************
+ *
+ * SkAddrPromiscuousChange - set promiscuous mode for given port
+ *
+ * Description:
+ * This routine manages promiscuous mode:
+ * - none
+ * - all LLC frames
+ * - all MC frames
+ *
+ * It calls either SkAddrXmacPromiscuousChange or
+ * SkAddrGmacPromiscuousChange, according to the adapter in use.
+ * The real work is done there.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrPromiscuousChange(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* port whose promiscuous mode changes */
+int NewPromMode) /* new promiscuous mode */
+{
+ int ReturnCode = 0;
+#if (!defined(SK_SLIM) || defined(DEBUG))
+ if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+#endif /* !SK_SLIM || DEBUG */
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ ReturnCode =
+ SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode);
+ }
+#endif /* GENESIS */
+#ifdef YUKON
+ if (!pAC->GIni.GIGenesis) {
+ ReturnCode =
+ SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode);
+ }
+#endif /* YUKON */
+
+ return (ReturnCode);
+
+} /* SkAddrPromiscuousChange */
+
+#ifdef GENESIS
+
+/******************************************************************************
+ *
+ * SkAddrXmacPromiscuousChange - set promiscuous mode for given port
+ *
+ * Description:
+ * This routine manages promiscuous mode:
+ * - none
+ * - all LLC frames
+ * - all MC frames
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrXmacPromiscuousChange(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* port whose promiscuous mode changes */
+int NewPromMode) /* new promiscuous mode */
+{
+ int i;
+ SK_BOOL InexactModeBit;
+ SK_U8 Inexact;
+ SK_U8 HwInexact;
+ SK_FILTER64 HwInexactFilter;
+ SK_U16 LoMode; /* Lower 16 bits of XMAC Mode Register. */
+ int CurPromMode = SK_PROM_MODE_NONE;
+
+ /* Read CurPromMode from Hardware. */
+ XM_IN16(IoC, PortNumber, XM_MODE, &LoMode);
+
+ if ((LoMode & XM_MD_ENA_PROM) != 0) {
+ /* Promiscuous mode! */
+ CurPromMode |= SK_PROM_MODE_LLC;
+ }
+
+ for (Inexact = 0xFF, i = 0; i < 8; i++) {
+ Inexact &= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i];
+ }
+ if (Inexact == 0xFF) {
+ CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC);
+ }
+ else {
+ /* Get InexactModeBit (bit XM_MD_ENA_HASH in mode register) */
+ XM_IN16(IoC, PortNumber, XM_MODE, &LoMode);
+
+ InexactModeBit = (LoMode & XM_MD_ENA_HASH) != 0;
+
+ /* Read 64-bit hash register from XMAC */
+ XM_INHASH(IoC, PortNumber, XM_HSM, &HwInexactFilter.Bytes[0]);
+
+ for (HwInexact = 0xFF, i = 0; i < 8; i++) {
+ HwInexact &= HwInexactFilter.Bytes[i];
+ }
+
+ if (InexactModeBit && (HwInexact == 0xFF)) {
+ CurPromMode |= SK_PROM_MODE_ALL_MC;
+ }
+ }
+
+ pAC->Addr.Port[PortNumber].PromMode = NewPromMode;
+
+ if (NewPromMode == CurPromMode) {
+ return (SK_ADDR_SUCCESS);
+ }
+
+ if ((NewPromMode & SK_PROM_MODE_ALL_MC) &&
+ !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC. */
+
+ /* Set all bits in 64-bit hash register. */
+ XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash);
+
+ /* Enable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+ else if ((CurPromMode & SK_PROM_MODE_ALL_MC) &&
+ !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm MC. */
+ for (Inexact = 0, i = 0; i < 8; i++) {
+ Inexact |= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i];
+ }
+ if (Inexact == 0) {
+ /* Disable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE);
+ }
+ else {
+ /* Set 64-bit hash register to InexactFilter. */
+ XM_OUTHASH(IoC, PortNumber, XM_HSM,
+ &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]);
+
+ /* Enable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+ }
+
+ if ((NewPromMode & SK_PROM_MODE_LLC) &&
+ !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */
+ /* Set the MAC in Promiscuous Mode */
+ SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+ else if ((CurPromMode & SK_PROM_MODE_LLC) &&
+ !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC. */
+ /* Clear Promiscuous Mode */
+ SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE);
+ }
+
+ return (SK_ADDR_SUCCESS);
+
+} /* SkAddrXmacPromiscuousChange */
+
+#endif /* GENESIS */
+
+#ifdef YUKON
+
+/******************************************************************************
+ *
+ * SkAddrGmacPromiscuousChange - set promiscuous mode for given port
+ *
+ * Description:
+ * This routine manages promiscuous mode:
+ * - none
+ * - all LLC frames
+ * - all MC frames
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrGmacPromiscuousChange(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 PortNumber, /* port whose promiscuous mode changes */
+int NewPromMode) /* new promiscuous mode */
+{
+ SK_U16 ReceiveControl; /* GMAC Receive Control Register */
+ int CurPromMode = SK_PROM_MODE_NONE;
+
+ /* Read CurPromMode from Hardware. */
+ GM_IN16(IoC, PortNumber, GM_RX_CTRL, &ReceiveControl);
+
+ if ((ReceiveControl & (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA)) == 0) {
+ /* Promiscuous mode! */
+ CurPromMode |= SK_PROM_MODE_LLC;
+ }
+
+ if ((ReceiveControl & GM_RXCR_MCF_ENA) == 0) {
+ /* All Multicast mode! */
+ CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC);
+ }
+
+ pAC->Addr.Port[PortNumber].PromMode = NewPromMode;
+
+ if (NewPromMode == CurPromMode) {
+ return (SK_ADDR_SUCCESS);
+ }
+
+ if ((NewPromMode & SK_PROM_MODE_ALL_MC) &&
+ !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC */
+
+ /* Set all bits in 64-bit hash register. */
+ GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash);
+
+ /* Enable Hashing */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+
+ if ((CurPromMode & SK_PROM_MODE_ALL_MC) &&
+ !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm. MC */
+
+ /* Set 64-bit hash register to InexactFilter. */
+ GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1,
+ &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]);
+
+ /* Enable Hashing. */
+ SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+
+ if ((NewPromMode & SK_PROM_MODE_LLC) &&
+ !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */
+
+ /* Set the MAC to Promiscuous Mode. */
+ SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE);
+ }
+ else if ((CurPromMode & SK_PROM_MODE_LLC) &&
+ !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC */
+
+ /* Clear Promiscuous Mode. */
+ SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE);
+ }
+
+ return (SK_ADDR_SUCCESS);
+
+} /* SkAddrGmacPromiscuousChange */
+
+#endif /* YUKON */
+
+#ifndef SK_SLIM
+
+/******************************************************************************
+ *
+ * SkAddrSwap - swap address info
+ *
+ * Description:
+ * This routine swaps address info of two ports.
+ *
+ * Context:
+ * runtime, pageable
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * SK_ADDR_SUCCESS
+ * SK_ADDR_ILLEGAL_PORT
+ */
+int SkAddrSwap(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+SK_U32 FromPortNumber, /* Port1 Index */
+SK_U32 ToPortNumber) /* Port2 Index */
+{
+ int i;
+ SK_U8 Byte;
+ SK_MAC_ADDR MacAddr;
+ SK_U32 DWord;
+
+ if (FromPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+
+ if (ToPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+
+ if (pAC->Rlmt.Port[FromPortNumber].Net != pAC->Rlmt.Port[ToPortNumber].Net) {
+ return (SK_ADDR_ILLEGAL_PORT);
+ }
+
+ /*
+ * Swap:
+ * - Exact Match Entries (GEnesis and Yukon)
+ * Yukon uses first entry for the logical MAC
+ * address (stored in the second GMAC register).
+ * - FirstExactMatchRlmt (GEnesis only)
+ * - NextExactMatchRlmt (GEnesis only)
+ * - FirstExactMatchDrv (GEnesis only)
+ * - NextExactMatchDrv (GEnesis only)
+ * - 64-bit filter (InexactFilter)
+ * - Promiscuous Mode
+ * of ports.
+ */
+
+ for (i = 0; i < SK_ADDR_EXACT_MATCHES; i++) {
+ MacAddr = pAC->Addr.Port[FromPortNumber].Exact[i];
+ pAC->Addr.Port[FromPortNumber].Exact[i] =
+ pAC->Addr.Port[ToPortNumber].Exact[i];
+ pAC->Addr.Port[ToPortNumber].Exact[i] = MacAddr;
+ }
+
+ for (i = 0; i < 8; i++) {
+ Byte = pAC->Addr.Port[FromPortNumber].InexactFilter.Bytes[i];
+ pAC->Addr.Port[FromPortNumber].InexactFilter.Bytes[i] =
+ pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i];
+ pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i] = Byte;
+ }
+
+ i = pAC->Addr.Port[FromPortNumber].PromMode;
+ pAC->Addr.Port[FromPortNumber].PromMode = pAC->Addr.Port[ToPortNumber].PromMode;
+ pAC->Addr.Port[ToPortNumber].PromMode = i;
+
+ if (pAC->GIni.GIGenesis) {
+ DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt;
+ pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt =
+ pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt;
+ pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt = DWord;
+
+ DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt;
+ pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt =
+ pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt;
+ pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt = DWord;
+
+ DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv;
+ pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv =
+ pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv;
+ pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv = DWord;
+
+ DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchDrv;
+ pAC->Addr.Port[FromPortNumber].NextExactMatchDrv =
+ pAC->Addr.Port[ToPortNumber].NextExactMatchDrv;
+ pAC->Addr.Port[ToPortNumber].NextExactMatchDrv = DWord;
+ }
+
+ /* CAUTION: Solution works if only ports of one adapter are in use. */
+ for (i = 0; (SK_U32) i < pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber].
+ Net->NetNumber].NumPorts; i++) {
+ if (pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber].
+ Port[i]->PortNumber == ToPortNumber) {
+ pAC->Addr.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber].
+ ActivePort = i;
+ /* 20001207 RA: Was "ToPortNumber;". */
+ }
+ }
+
+ (void) SkAddrMcUpdate(pAC, IoC, FromPortNumber);
+ (void) SkAddrMcUpdate(pAC, IoC, ToPortNumber);
+
+ return (SK_ADDR_SUCCESS);
+
+} /* SkAddrSwap */
+
+#endif /* !SK_SLIM */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
diff --git a/drivers/net/sk98lin/skcsum.c b/drivers/net/sk98lin/skcsum.c
new file mode 100644
index 000000000000..38a6e7a631f3
--- /dev/null
+++ b/drivers/net/sk98lin/skcsum.c
@@ -0,0 +1,871 @@
+/******************************************************************************
+ *
+ * Name: skcsum.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.12 $
+ * Date: $Date: 2003/08/20 13:55:53 $
+ * Purpose: Store/verify Internet checksum in send/receive packets.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2003 SysKonnect GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifdef SK_USE_CSUM /* Check if CSUM is to be used. */
+
+#ifndef lint
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skcsum.c,v 1.12 2003/08/20 13:55:53 mschmid Exp $ (C) SysKonnect.";
+#endif /* !lint */
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This is the "GEnesis" common module "CSUM".
+ *
+ * This module contains the code necessary to calculate, store, and verify the
+ * Internet Checksum of IP, TCP, and UDP frames.
+ *
+ * "GEnesis" is an abbreviation of "Gigabit Ethernet Network System in Silicon"
+ * and is the code name of this SysKonnect project.
+ *
+ * Compilation Options:
+ *
+ * SK_USE_CSUM - Define if CSUM is to be used. Otherwise, CSUM will be an
+ * empty module.
+ *
+ * SKCS_OVERWRITE_PROTO - Define to overwrite the default protocol id
+ * definitions. In this case, all SKCS_PROTO_xxx definitions must be made
+ * external.
+ *
+ * SKCS_OVERWRITE_STATUS - Define to overwrite the default return status
+ * definitions. In this case, all SKCS_STATUS_xxx definitions must be made
+ * external.
+ *
+ * Include File Hierarchy:
+ *
+ * "h/skdrv1st.h"
+ * "h/skcsum.h"
+ * "h/sktypes.h"
+ * "h/skqueue.h"
+ * "h/skdrv2nd.h"
+ *
+ ******************************************************************************/
+
+#include "h/skdrv1st.h"
+#include "h/skcsum.h"
+#include "h/skdrv2nd.h"
+
+/* defines ********************************************************************/
+
+/* The size of an Ethernet MAC header. */
+#define SKCS_ETHERNET_MAC_HEADER_SIZE (6+6+2)
+
+/* The size of the used topology's MAC header. */
+#define SKCS_MAC_HEADER_SIZE SKCS_ETHERNET_MAC_HEADER_SIZE
+
+/* The size of the IP header without any option fields. */
+#define SKCS_IP_HEADER_SIZE 20
+
+/*
+ * Field offsets within the IP header.
+ */
+
+/* "Internet Header Version" and "Length". */
+#define SKCS_OFS_IP_HEADER_VERSION_AND_LENGTH 0
+
+/* "Total Length". */
+#define SKCS_OFS_IP_TOTAL_LENGTH 2
+
+/* "Flags" "Fragment Offset". */
+#define SKCS_OFS_IP_FLAGS_AND_FRAGMENT_OFFSET 6
+
+/* "Next Level Protocol" identifier. */
+#define SKCS_OFS_IP_NEXT_LEVEL_PROTOCOL 9
+
+/* Source IP address. */
+#define SKCS_OFS_IP_SOURCE_ADDRESS 12
+
+/* Destination IP address. */
+#define SKCS_OFS_IP_DESTINATION_ADDRESS 16
+
+
+/*
+ * Field offsets within the UDP header.
+ */
+
+/* UDP checksum. */
+#define SKCS_OFS_UDP_CHECKSUM 6
+
+/* IP "Next Level Protocol" identifiers (see RFC 790). */
+#define SKCS_PROTO_ID_TCP 6 /* Transport Control Protocol */
+#define SKCS_PROTO_ID_UDP 17 /* User Datagram Protocol */
+
+/* IP "Don't Fragment" bit. */
+#define SKCS_IP_DONT_FRAGMENT SKCS_HTON16(0x4000)
+
+/* Add a byte offset to a pointer. */
+#define SKCS_IDX(pPtr, Ofs) ((void *) ((char *) (pPtr) + (Ofs)))
+
+/*
+ * Macros that convert host to network representation and vice versa, i.e.
+ * little/big endian conversion on little endian machines only.
+ */
+#ifdef SK_LITTLE_ENDIAN
+#define SKCS_HTON16(Val16) (((unsigned) (Val16) >> 8) | (((Val16) & 0xff) << 8))
+#endif /* SK_LITTLE_ENDIAN */
+#ifdef SK_BIG_ENDIAN
+#define SKCS_HTON16(Val16) (Val16)
+#endif /* SK_BIG_ENDIAN */
+#define SKCS_NTOH16(Val16) SKCS_HTON16(Val16)
+
+/* typedefs *******************************************************************/
+
+/* function prototypes ********************************************************/
+
+/******************************************************************************
+ *
+ * SkCsGetSendInfo - get checksum information for a send packet
+ *
+ * Description:
+ * Get all checksum information necessary to send a TCP or UDP packet. The
+ * function checks the IP header passed to it. If the high-level protocol
+ * is either TCP or UDP the pseudo header checksum is calculated and
+ * returned.
+ *
+ * The function returns the total length of the IP header (including any
+ * IP option fields), which is the same as the start offset of the IP data
+ * which in turn is the start offset of the TCP or UDP header.
+ *
+ * The function also returns the TCP or UDP pseudo header checksum, which
+ * should be used as the start value for the hardware checksum calculation.
+ * (Note that any actual pseudo header checksum can never calculate to
+ * zero.)
+ *
+ * Note:
+ * There is a bug in the GENESIS ASIC which may lead to wrong checksums.
+ *
+ * Arguments:
+ * pAc - A pointer to the adapter context struct.
+ *
+ * pIpHeader - Pointer to IP header. Must be at least the IP header *not*
+ * including any option fields, i.e. at least 20 bytes.
+ *
+ * Note: This pointer will be used to address 8-, 16-, and 32-bit
+ * variables with the respective alignment offsets relative to the pointer.
+ * Thus, the pointer should point to a 32-bit aligned address. If the
+ * target system cannot address 32-bit variables on non 32-bit aligned
+ * addresses, then the pointer *must* point to a 32-bit aligned address.
+ *
+ * pPacketInfo - A pointer to the packet information structure for this
+ * packet. Before calling this SkCsGetSendInfo(), the following field must
+ * be initialized:
+ *
+ * ProtocolFlags - Initialize with any combination of
+ * SKCS_PROTO_XXX bit flags. SkCsGetSendInfo() will only work on
+ * the protocols specified here. Any protocol(s) not specified
+ * here will be ignored.
+ *
+ * Note: Only one checksum can be calculated in hardware. Thus, if
+ * SKCS_PROTO_IP is specified in the 'ProtocolFlags',
+ * SkCsGetSendInfo() must calculate the IP header checksum in
+ * software. It might be a better idea to have the calling
+ * protocol stack calculate the IP header checksum.
+ *
+ * Returns: N/A
+ * On return, the following fields in 'pPacketInfo' may or may not have
+ * been filled with information, depending on the protocol(s) found in the
+ * packet:
+ *
+ * ProtocolFlags - Returns the SKCS_PROTO_XXX bit flags of the protocol(s)
+ * that were both requested by the caller and actually found in the packet.
+ * Protocol(s) not specified by the caller and/or not found in the packet
+ * will have their respective SKCS_PROTO_XXX bit flags reset.
+ *
+ * Note: For IP fragments, TCP and UDP packet information is ignored.
+ *
+ * IpHeaderLength - The total length in bytes of the complete IP header
+ * including any option fields is returned here. This is the start offset
+ * of the IP data, i.e. the TCP or UDP header if present.
+ *
+ * IpHeaderChecksum - If IP has been specified in the 'ProtocolFlags', the
+ * 16-bit Internet Checksum of the IP header is returned here. This value
+ * is to be stored into the packet's 'IP Header Checksum' field.
+ *
+ * PseudoHeaderChecksum - If this is a TCP or UDP packet and if TCP or UDP
+ * has been specified in the 'ProtocolFlags', the 16-bit Internet Checksum
+ * of the TCP or UDP pseudo header is returned here.
+ */
+void SkCsGetSendInfo(
+SK_AC *pAc, /* Adapter context struct. */
+void *pIpHeader, /* IP header. */
+SKCS_PACKET_INFO *pPacketInfo, /* Packet information struct. */
+int NetNumber) /* Net number */
+{
+ /* Internet Header Version found in IP header. */
+ unsigned InternetHeaderVersion;
+
+ /* Length of the IP header as found in IP header. */
+ unsigned IpHeaderLength;
+
+ /* Bit field specifiying the desired/found protocols. */
+ unsigned ProtocolFlags;
+
+ /* Next level protocol identifier found in IP header. */
+ unsigned NextLevelProtocol;
+
+ /* Length of IP data portion. */
+ unsigned IpDataLength;
+
+ /* TCP/UDP pseudo header checksum. */
+ unsigned long PseudoHeaderChecksum;
+
+ /* Pointer to next level protocol statistics structure. */
+ SKCS_PROTO_STATS *NextLevelProtoStats;
+
+ /* Temporary variable. */
+ unsigned Tmp;
+
+ Tmp = *(SK_U8 *)
+ SKCS_IDX(pIpHeader, SKCS_OFS_IP_HEADER_VERSION_AND_LENGTH);
+
+ /* Get the Internet Header Version (IHV). */
+ /* Note: The IHV is stored in the upper four bits. */
+
+ InternetHeaderVersion = Tmp >> 4;
+
+ /* Check the Internet Header Version. */
+ /* Note: We currently only support IP version 4. */
+
+ if (InternetHeaderVersion != 4) { /* IPv4? */
+ SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_TX,
+ ("Tx: Unknown Internet Header Version %u.\n",
+ InternetHeaderVersion));
+ pPacketInfo->ProtocolFlags = 0;
+ pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].TxUnableCts++;
+ return;
+ }
+
+ /* Get the IP header length (IHL). */
+ /*
+ * Note: The IHL is stored in the lower four bits as the number of
+ * 4-byte words.
+ */
+
+ IpHeaderLength = (Tmp & 0xf) * 4;
+ pPacketInfo->IpHeaderLength = IpHeaderLength;
+
+ /* Check the IP header length. */
+
+ /* 04-Aug-1998 sw - Really check the IHL? Necessary? */
+
+ if (IpHeaderLength < 5*4) {
+ SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_TX,
+ ("Tx: Invalid IP Header Length %u.\n", IpHeaderLength));
+ pPacketInfo->ProtocolFlags = 0;
+ pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].TxUnableCts++;
+ return;
+ }
+
+ /* This is an IPv4 frame with a header of valid length. */
+
+ pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].TxOkCts++;
+
+ /* Check if we should calculate the IP header checksum. */
+
+ ProtocolFlags = pPacketInfo->ProtocolFlags;
+
+ if (ProtocolFlags & SKCS_PROTO_IP) {
+ pPacketInfo->IpHeaderChecksum =
+ SkCsCalculateChecksum(pIpHeader, IpHeaderLength);
+ }
+
+ /* Get the next level protocol identifier. */
+
+ NextLevelProtocol =
+ *(SK_U8 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_NEXT_LEVEL_PROTOCOL);
+
+ /*
+ * Check if this is a TCP or UDP frame and if we should calculate the
+ * TCP/UDP pseudo header checksum.
+ *
+ * Also clear all protocol bit flags of protocols not present in the
+ * frame.
+ */
+
+ if ((ProtocolFlags & SKCS_PROTO_TCP) != 0 &&
+ NextLevelProtocol == SKCS_PROTO_ID_TCP) {
+ /* TCP/IP frame. */
+ ProtocolFlags &= SKCS_PROTO_TCP | SKCS_PROTO_IP;
+ NextLevelProtoStats =
+ &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_TCP];
+ }
+ else if ((ProtocolFlags & SKCS_PROTO_UDP) != 0 &&
+ NextLevelProtocol == SKCS_PROTO_ID_UDP) {
+ /* UDP/IP frame. */
+ ProtocolFlags &= SKCS_PROTO_UDP | SKCS_PROTO_IP;
+ NextLevelProtoStats =
+ &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_UDP];
+ }
+ else {
+ /*
+ * Either not a TCP or UDP frame and/or TCP/UDP processing not
+ * specified.
+ */
+ pPacketInfo->ProtocolFlags = ProtocolFlags & SKCS_PROTO_IP;
+ return;
+ }
+
+ /* Check if this is an IP fragment. */
+
+ /*
+ * Note: An IP fragment has a non-zero "Fragment Offset" field and/or
+ * the "More Fragments" bit set. Thus, if both the "Fragment Offset"
+ * and the "More Fragments" are zero, it is *not* a fragment. We can
+ * easily check both at the same time since they are in the same 16-bit
+ * word.
+ */
+
+ if ((*(SK_U16 *)
+ SKCS_IDX(pIpHeader, SKCS_OFS_IP_FLAGS_AND_FRAGMENT_OFFSET) &
+ ~SKCS_IP_DONT_FRAGMENT) != 0) {
+ /* IP fragment; ignore all other protocols. */
+ pPacketInfo->ProtocolFlags = ProtocolFlags & SKCS_PROTO_IP;
+ NextLevelProtoStats->TxUnableCts++;
+ return;
+ }
+
+ /*
+ * Calculate the TCP/UDP pseudo header checksum.
+ */
+
+ /* Get total length of IP header and data. */
+
+ IpDataLength =
+ *(SK_U16 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_TOTAL_LENGTH);
+
+ /* Get length of IP data portion. */
+
+ IpDataLength = SKCS_NTOH16(IpDataLength) - IpHeaderLength;
+
+ /* Calculate the sum of all pseudo header fields (16-bit). */
+
+ PseudoHeaderChecksum =
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_SOURCE_ADDRESS + 0) +
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_SOURCE_ADDRESS + 2) +
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_DESTINATION_ADDRESS + 0) +
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_DESTINATION_ADDRESS + 2) +
+ (unsigned long) SKCS_HTON16(NextLevelProtocol) +
+ (unsigned long) SKCS_HTON16(IpDataLength);
+
+ /* Add-in any carries. */
+
+ SKCS_OC_ADD(PseudoHeaderChecksum, PseudoHeaderChecksum, 0);
+
+ /* Add-in any new carry. */
+
+ SKCS_OC_ADD(pPacketInfo->PseudoHeaderChecksum, PseudoHeaderChecksum, 0);
+
+ pPacketInfo->ProtocolFlags = ProtocolFlags;
+ NextLevelProtoStats->TxOkCts++; /* Success. */
+} /* SkCsGetSendInfo */
+
+
+/******************************************************************************
+ *
+ * SkCsGetReceiveInfo - verify checksum information for a received packet
+ *
+ * Description:
+ * Verify a received frame's checksum. The function returns a status code
+ * reflecting the result of the verification.
+ *
+ * Note:
+ * Before calling this function you have to verify that the frame is
+ * not padded and Checksum1 and Checksum2 are bigger than 1.
+ *
+ * Arguments:
+ * pAc - Pointer to adapter context struct.
+ *
+ * pIpHeader - Pointer to IP header. Must be at least the length in bytes
+ * of the received IP header including any option fields. For UDP packets,
+ * 8 additional bytes are needed to access the UDP checksum.
+ *
+ * Note: The actual length of the IP header is stored in the lower four
+ * bits of the first octet of the IP header as the number of 4-byte words,
+ * so it must be multiplied by four to get the length in bytes. Thus, the
+ * maximum IP header length is 15 * 4 = 60 bytes.
+ *
+ * Checksum1 - The first 16-bit Internet Checksum calculated by the
+ * hardware starting at the offset returned by SkCsSetReceiveFlags().
+ *
+ * Checksum2 - The second 16-bit Internet Checksum calculated by the
+ * hardware starting at the offset returned by SkCsSetReceiveFlags().
+ *
+ * Returns:
+ * SKCS_STATUS_UNKNOWN_IP_VERSION - Not an IP v4 frame.
+ * SKCS_STATUS_IP_CSUM_ERROR - IP checksum error.
+ * SKCS_STATUS_IP_CSUM_ERROR_TCP - IP checksum error in TCP frame.
+ * SKCS_STATUS_IP_CSUM_ERROR_UDP - IP checksum error in UDP frame
+ * SKCS_STATUS_IP_FRAGMENT - IP fragment (IP checksum ok).
+ * SKCS_STATUS_IP_CSUM_OK - IP checksum ok (not a TCP or UDP frame).
+ * SKCS_STATUS_TCP_CSUM_ERROR - TCP checksum error (IP checksum ok).
+ * SKCS_STATUS_UDP_CSUM_ERROR - UDP checksum error (IP checksum ok).
+ * SKCS_STATUS_TCP_CSUM_OK - IP and TCP checksum ok.
+ * SKCS_STATUS_UDP_CSUM_OK - IP and UDP checksum ok.
+ * SKCS_STATUS_IP_CSUM_OK_NO_UDP - IP checksum OK and no UDP checksum.
+ *
+ * Note: If SKCS_OVERWRITE_STATUS is defined, the SKCS_STATUS_XXX values
+ * returned here can be defined in some header file by the module using CSUM.
+ * In this way, the calling module can assign return values for its own needs,
+ * e.g. by assigning bit flags to the individual protocols.
+ */
+SKCS_STATUS SkCsGetReceiveInfo(
+SK_AC *pAc, /* Adapter context struct. */
+void *pIpHeader, /* IP header. */
+unsigned Checksum1, /* Hardware checksum 1. */
+unsigned Checksum2, /* Hardware checksum 2. */
+int NetNumber) /* Net number */
+{
+ /* Internet Header Version found in IP header. */
+ unsigned InternetHeaderVersion;
+
+ /* Length of the IP header as found in IP header. */
+ unsigned IpHeaderLength;
+
+ /* Length of IP data portion. */
+ unsigned IpDataLength;
+
+ /* IP header checksum. */
+ unsigned IpHeaderChecksum;
+
+ /* IP header options checksum, if any. */
+ unsigned IpOptionsChecksum;
+
+ /* IP data checksum, i.e. TCP/UDP checksum. */
+ unsigned IpDataChecksum;
+
+ /* Next level protocol identifier found in IP header. */
+ unsigned NextLevelProtocol;
+
+ /* The checksum of the "next level protocol", i.e. TCP or UDP. */
+ unsigned long NextLevelProtocolChecksum;
+
+ /* Pointer to next level protocol statistics structure. */
+ SKCS_PROTO_STATS *NextLevelProtoStats;
+
+ /* Temporary variable. */
+ unsigned Tmp;
+
+ Tmp = *(SK_U8 *)
+ SKCS_IDX(pIpHeader, SKCS_OFS_IP_HEADER_VERSION_AND_LENGTH);
+
+ /* Get the Internet Header Version (IHV). */
+ /* Note: The IHV is stored in the upper four bits. */
+
+ InternetHeaderVersion = Tmp >> 4;
+
+ /* Check the Internet Header Version. */
+ /* Note: We currently only support IP version 4. */
+
+ if (InternetHeaderVersion != 4) { /* IPv4? */
+ SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_RX,
+ ("Rx: Unknown Internet Header Version %u.\n",
+ InternetHeaderVersion));
+ pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].RxUnableCts++;
+ return (SKCS_STATUS_UNKNOWN_IP_VERSION);
+ }
+
+ /* Get the IP header length (IHL). */
+ /*
+ * Note: The IHL is stored in the lower four bits as the number of
+ * 4-byte words.
+ */
+
+ IpHeaderLength = (Tmp & 0xf) * 4;
+
+ /* Check the IP header length. */
+
+ /* 04-Aug-1998 sw - Really check the IHL? Necessary? */
+
+ if (IpHeaderLength < 5*4) {
+ SK_DBG_MSG(pAc, SK_DBGMOD_CSUM, SK_DBGCAT_ERR | SK_DBGCAT_RX,
+ ("Rx: Invalid IP Header Length %u.\n", IpHeaderLength));
+ pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].RxErrCts++;
+ return (SKCS_STATUS_IP_CSUM_ERROR);
+ }
+
+ /* This is an IPv4 frame with a header of valid length. */
+
+ /* Get the IP header and data checksum. */
+
+ IpDataChecksum = Checksum2;
+
+ /*
+ * The IP header checksum is calculated as follows:
+ *
+ * IpHeaderChecksum = Checksum1 - Checksum2
+ */
+
+ SKCS_OC_SUB(IpHeaderChecksum, Checksum1, Checksum2);
+
+ /* Check if any IP header options. */
+
+ if (IpHeaderLength > SKCS_IP_HEADER_SIZE) {
+
+ /* Get the IP options checksum. */
+
+ IpOptionsChecksum = SkCsCalculateChecksum(
+ SKCS_IDX(pIpHeader, SKCS_IP_HEADER_SIZE),
+ IpHeaderLength - SKCS_IP_HEADER_SIZE);
+
+ /* Adjust the IP header and IP data checksums. */
+
+ SKCS_OC_ADD(IpHeaderChecksum, IpHeaderChecksum, IpOptionsChecksum);
+
+ SKCS_OC_SUB(IpDataChecksum, IpDataChecksum, IpOptionsChecksum);
+ }
+
+ /*
+ * Check if the IP header checksum is ok.
+ *
+ * NOTE: We must check the IP header checksum even if the caller just wants
+ * us to check upper-layer checksums, because we cannot do any further
+ * processing of the packet without a valid IP checksum.
+ */
+
+ /* Get the next level protocol identifier. */
+
+ NextLevelProtocol = *(SK_U8 *)
+ SKCS_IDX(pIpHeader, SKCS_OFS_IP_NEXT_LEVEL_PROTOCOL);
+
+ if (IpHeaderChecksum != 0xffff) {
+ pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_IP].RxErrCts++;
+ /* the NDIS tester wants to know the upper level protocol too */
+ if (NextLevelProtocol == SKCS_PROTO_ID_TCP) {
+ return(SKCS_STATUS_IP_CSUM_ERROR_TCP);
+ }
+ else if (NextLevelProtocol == SKCS_PROTO_ID_UDP) {
+ return(SKCS_STATUS_IP_CSUM_ERROR_UDP);
+ }
+ return (SKCS_STATUS_IP_CSUM_ERROR);
+ }
+
+ /*
+ * Check if this is a TCP or UDP frame and if we should calculate the
+ * TCP/UDP pseudo header checksum.
+ *
+ * Also clear all protocol bit flags of protocols not present in the
+ * frame.
+ */
+
+ if ((pAc->Csum.ReceiveFlags[NetNumber] & SKCS_PROTO_TCP) != 0 &&
+ NextLevelProtocol == SKCS_PROTO_ID_TCP) {
+ /* TCP/IP frame. */
+ NextLevelProtoStats =
+ &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_TCP];
+ }
+ else if ((pAc->Csum.ReceiveFlags[NetNumber] & SKCS_PROTO_UDP) != 0 &&
+ NextLevelProtocol == SKCS_PROTO_ID_UDP) {
+ /* UDP/IP frame. */
+ NextLevelProtoStats =
+ &pAc->Csum.ProtoStats[NetNumber][SKCS_PROTO_STATS_UDP];
+ }
+ else {
+ /*
+ * Either not a TCP or UDP frame and/or TCP/UDP processing not
+ * specified.
+ */
+ return (SKCS_STATUS_IP_CSUM_OK);
+ }
+
+ /* Check if this is an IP fragment. */
+
+ /*
+ * Note: An IP fragment has a non-zero "Fragment Offset" field and/or
+ * the "More Fragments" bit set. Thus, if both the "Fragment Offset"
+ * and the "More Fragments" are zero, it is *not* a fragment. We can
+ * easily check both at the same time since they are in the same 16-bit
+ * word.
+ */
+
+ if ((*(SK_U16 *)
+ SKCS_IDX(pIpHeader, SKCS_OFS_IP_FLAGS_AND_FRAGMENT_OFFSET) &
+ ~SKCS_IP_DONT_FRAGMENT) != 0) {
+ /* IP fragment; ignore all other protocols. */
+ NextLevelProtoStats->RxUnableCts++;
+ return (SKCS_STATUS_IP_FRAGMENT);
+ }
+
+ /*
+ * 08-May-2000 ra
+ *
+ * From RFC 768 (UDP)
+ * If the computed checksum is zero, it is transmitted as all ones (the
+ * equivalent in one's complement arithmetic). An all zero transmitted
+ * checksum value means that the transmitter generated no checksum (for
+ * debugging or for higher level protocols that don't care).
+ */
+
+ if (NextLevelProtocol == SKCS_PROTO_ID_UDP &&
+ *(SK_U16*)SKCS_IDX(pIpHeader, IpHeaderLength + 6) == 0x0000) {
+
+ NextLevelProtoStats->RxOkCts++;
+
+ return (SKCS_STATUS_IP_CSUM_OK_NO_UDP);
+ }
+
+ /*
+ * Calculate the TCP/UDP checksum.
+ */
+
+ /* Get total length of IP header and data. */
+
+ IpDataLength =
+ *(SK_U16 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_TOTAL_LENGTH);
+
+ /* Get length of IP data portion. */
+
+ IpDataLength = SKCS_NTOH16(IpDataLength) - IpHeaderLength;
+
+ NextLevelProtocolChecksum =
+
+ /* Calculate the pseudo header checksum. */
+
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_SOURCE_ADDRESS + 0) +
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_SOURCE_ADDRESS + 2) +
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_DESTINATION_ADDRESS + 0) +
+ (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader,
+ SKCS_OFS_IP_DESTINATION_ADDRESS + 2) +
+ (unsigned long) SKCS_HTON16(NextLevelProtocol) +
+ (unsigned long) SKCS_HTON16(IpDataLength) +
+
+ /* Add the TCP/UDP header checksum. */
+
+ (unsigned long) IpDataChecksum;
+
+ /* Add-in any carries. */
+
+ SKCS_OC_ADD(NextLevelProtocolChecksum, NextLevelProtocolChecksum, 0);
+
+ /* Add-in any new carry. */
+
+ SKCS_OC_ADD(NextLevelProtocolChecksum, NextLevelProtocolChecksum, 0);
+
+ /* Check if the TCP/UDP checksum is ok. */
+
+ if ((unsigned) NextLevelProtocolChecksum == 0xffff) {
+
+ /* TCP/UDP checksum ok. */
+
+ NextLevelProtoStats->RxOkCts++;
+
+ return (NextLevelProtocol == SKCS_PROTO_ID_TCP ?
+ SKCS_STATUS_TCP_CSUM_OK : SKCS_STATUS_UDP_CSUM_OK);
+ }
+
+ /* TCP/UDP checksum error. */
+
+ NextLevelProtoStats->RxErrCts++;
+
+ return (NextLevelProtocol == SKCS_PROTO_ID_TCP ?
+ SKCS_STATUS_TCP_CSUM_ERROR : SKCS_STATUS_UDP_CSUM_ERROR);
+} /* SkCsGetReceiveInfo */
+
+
+/******************************************************************************
+ *
+ * SkCsSetReceiveFlags - set checksum receive flags
+ *
+ * Description:
+ * Use this function to set the various receive flags. According to the
+ * protocol flags set by the caller, the start offsets within received
+ * packets of the two hardware checksums are returned. These offsets must
+ * be stored in all receive descriptors.
+ *
+ * Arguments:
+ * pAc - Pointer to adapter context struct.
+ *
+ * ReceiveFlags - Any combination of SK_PROTO_XXX flags of the protocols
+ * for which the caller wants checksum information on received frames.
+ *
+ * pChecksum1Offset - The start offset of the first receive descriptor
+ * hardware checksum to be calculated for received frames is returned
+ * here.
+ *
+ * pChecksum2Offset - The start offset of the second receive descriptor
+ * hardware checksum to be calculated for received frames is returned
+ * here.
+ *
+ * Returns: N/A
+ * Returns the two hardware checksum start offsets.
+ */
+void SkCsSetReceiveFlags(
+SK_AC *pAc, /* Adapter context struct. */
+unsigned ReceiveFlags, /* New receive flags. */
+unsigned *pChecksum1Offset, /* Offset for hardware checksum 1. */
+unsigned *pChecksum2Offset, /* Offset for hardware checksum 2. */
+int NetNumber)
+{
+ /* Save the receive flags. */
+
+ pAc->Csum.ReceiveFlags[NetNumber] = ReceiveFlags;
+
+ /* First checksum start offset is the IP header. */
+ *pChecksum1Offset = SKCS_MAC_HEADER_SIZE;
+
+ /*
+ * Second checksum start offset is the IP data. Note that this may vary
+ * if there are any IP header options in the actual packet.
+ */
+ *pChecksum2Offset = SKCS_MAC_HEADER_SIZE + SKCS_IP_HEADER_SIZE;
+} /* SkCsSetReceiveFlags */
+
+#ifndef SK_CS_CALCULATE_CHECKSUM
+
+/******************************************************************************
+ *
+ * SkCsCalculateChecksum - calculate checksum for specified data
+ *
+ * Description:
+ * Calculate and return the 16-bit Internet Checksum for the specified
+ * data.
+ *
+ * Arguments:
+ * pData - Pointer to data for which the checksum shall be calculated.
+ * Note: The pointer should be aligned on a 16-bit boundary.
+ *
+ * Length - Length in bytes of data to checksum.
+ *
+ * Returns:
+ * The 16-bit Internet Checksum for the specified data.
+ *
+ * Note: The checksum is calculated in the machine's natural byte order,
+ * i.e. little vs. big endian. Thus, the resulting checksum is different
+ * for the same input data on little and big endian machines.
+ *
+ * However, when written back to the network packet, the byte order is
+ * always in correct network order.
+ */
+unsigned SkCsCalculateChecksum(
+void *pData, /* Data to checksum. */
+unsigned Length) /* Length of data. */
+{
+ SK_U16 *pU16; /* Pointer to the data as 16-bit words. */
+ unsigned long Checksum; /* Checksum; must be at least 32 bits. */
+
+ /* Sum up all 16-bit words. */
+
+ pU16 = (SK_U16 *) pData;
+ for (Checksum = 0; Length > 1; Length -= 2) {
+ Checksum += *pU16++;
+ }
+
+ /* If this is an odd number of bytes, add-in the last byte. */
+
+ if (Length > 0) {
+#ifdef SK_BIG_ENDIAN
+ /* Add the last byte as the high byte. */
+ Checksum += ((unsigned) *(SK_U8 *) pU16) << 8;
+#else /* !SK_BIG_ENDIAN */
+ /* Add the last byte as the low byte. */
+ Checksum += *(SK_U8 *) pU16;
+#endif /* !SK_BIG_ENDIAN */
+ }
+
+ /* Add-in any carries. */
+
+ SKCS_OC_ADD(Checksum, Checksum, 0);
+
+ /* Add-in any new carry. */
+
+ SKCS_OC_ADD(Checksum, Checksum, 0);
+
+ /* Note: All bits beyond the 16-bit limit are now zero. */
+
+ return ((unsigned) Checksum);
+} /* SkCsCalculateChecksum */
+
+#endif /* SK_CS_CALCULATE_CHECKSUM */
+
+/******************************************************************************
+ *
+ * SkCsEvent - the CSUM event dispatcher
+ *
+ * Description:
+ * This is the event handler for the CSUM module.
+ *
+ * Arguments:
+ * pAc - Pointer to adapter context.
+ *
+ * Ioc - I/O context.
+ *
+ * Event - Event id.
+ *
+ * Param - Event dependent parameter.
+ *
+ * Returns:
+ * The 16-bit Internet Checksum for the specified data.
+ *
+ * Note: The checksum is calculated in the machine's natural byte order,
+ * i.e. little vs. big endian. Thus, the resulting checksum is different
+ * for the same input data on little and big endian machines.
+ *
+ * However, when written back to the network packet, the byte order is
+ * always in correct network order.
+ */
+int SkCsEvent(
+SK_AC *pAc, /* Pointer to adapter context. */
+SK_IOC Ioc, /* I/O context. */
+SK_U32 Event, /* Event id. */
+SK_EVPARA Param) /* Event dependent parameter. */
+{
+ int ProtoIndex;
+ int NetNumber;
+
+ switch (Event) {
+ /*
+ * Clear protocol statistics.
+ *
+ * Param - Protocol index, or -1 for all protocols.
+ * - Net number.
+ */
+ case SK_CSUM_EVENT_CLEAR_PROTO_STATS:
+
+ ProtoIndex = (int)Param.Para32[1];
+ NetNumber = (int)Param.Para32[0];
+ if (ProtoIndex < 0) { /* Clear for all protocols. */
+ if (NetNumber >= 0) {
+ SK_MEMSET(&pAc->Csum.ProtoStats[NetNumber][0], 0,
+ sizeof(pAc->Csum.ProtoStats[NetNumber]));
+ }
+ }
+ else { /* Clear for individual protocol. */
+ SK_MEMSET(&pAc->Csum.ProtoStats[NetNumber][ProtoIndex], 0,
+ sizeof(pAc->Csum.ProtoStats[NetNumber][ProtoIndex]));
+ }
+ break;
+ default:
+ break;
+ }
+ return (0); /* Success. */
+} /* SkCsEvent */
+
+#endif /* SK_USE_CSUM */
diff --git a/drivers/net/sk98lin/skdim.c b/drivers/net/sk98lin/skdim.c
new file mode 100644
index 000000000000..0fddf61047b4
--- /dev/null
+++ b/drivers/net/sk98lin/skdim.c
@@ -0,0 +1,742 @@
+/******************************************************************************
+ *
+ * Name: skdim.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.5 $
+ * Date: $Date: 2003/11/28 12:55:40 $
+ * Purpose: All functions to maintain interrupt moderation
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This module is intended to manage the dynamic interrupt moderation on both
+ * GEnesis and Yukon adapters.
+ *
+ * Include File Hierarchy:
+ *
+ * "skdrv1st.h"
+ * "skdrv2nd.h"
+ *
+ ******************************************************************************/
+
+#ifndef lint
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skdim.c,v 1.5 2003/11/28 12:55:40 rroesler Exp $ (C) SysKonnect.";
+#endif
+
+#define __SKADDR_C
+
+#ifdef __cplusplus
+#error C++ is not yet supported.
+extern "C" {
+#endif
+
+/*******************************************************************************
+**
+** Includes
+**
+*******************************************************************************/
+
+#ifndef __INC_SKDRV1ST_H
+#include "h/skdrv1st.h"
+#endif
+
+#ifndef __INC_SKDRV2ND_H
+#include "h/skdrv2nd.h"
+#endif
+
+#include <linux/kernel_stat.h>
+
+/*******************************************************************************
+**
+** Defines
+**
+*******************************************************************************/
+
+/*******************************************************************************
+**
+** Typedefs
+**
+*******************************************************************************/
+
+/*******************************************************************************
+**
+** Local function prototypes
+**
+*******************************************************************************/
+
+static unsigned int GetCurrentSystemLoad(SK_AC *pAC);
+static SK_U64 GetIsrCalls(SK_AC *pAC);
+static SK_BOOL IsIntModEnabled(SK_AC *pAC);
+static void SetCurrIntCtr(SK_AC *pAC);
+static void EnableIntMod(SK_AC *pAC);
+static void DisableIntMod(SK_AC *pAC);
+static void ResizeDimTimerDuration(SK_AC *pAC);
+static void DisplaySelectedModerationType(SK_AC *pAC);
+static void DisplaySelectedModerationMask(SK_AC *pAC);
+static void DisplayDescrRatio(SK_AC *pAC);
+
+/*******************************************************************************
+**
+** Global variables
+**
+*******************************************************************************/
+
+/*******************************************************************************
+**
+** Local variables
+**
+*******************************************************************************/
+
+/*******************************************************************************
+**
+** Global functions
+**
+*******************************************************************************/
+
+/*******************************************************************************
+** Function : SkDimModerate
+** Description : Called in every ISR to check if moderation is to be applied
+** or not for the current number of interrupts
+** Programmer : Ralph Roesler
+** Last Modified: 22-mar-03
+** Returns : void (!)
+** Notes : -
+*******************************************************************************/
+
+void
+SkDimModerate(SK_AC *pAC) {
+ unsigned int CurrSysLoad = 0; /* expressed in percent */
+ unsigned int LoadIncrease = 0; /* expressed in percent */
+ SK_U64 ThresholdInts = 0;
+ SK_U64 IsrCallsPerSec = 0;
+
+#define M_DIMINFO pAC->DynIrqModInfo
+
+ if (!IsIntModEnabled(pAC)) {
+ if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
+ CurrSysLoad = GetCurrentSystemLoad(pAC);
+ if (CurrSysLoad > 75) {
+ /*
+ ** More than 75% total system load! Enable the moderation
+ ** to shield the system against too many interrupts.
+ */
+ EnableIntMod(pAC);
+ } else if (CurrSysLoad > M_DIMINFO.PrevSysLoad) {
+ LoadIncrease = (CurrSysLoad - M_DIMINFO.PrevSysLoad);
+ if (LoadIncrease > ((M_DIMINFO.PrevSysLoad *
+ C_INT_MOD_ENABLE_PERCENTAGE) / 100)) {
+ if (CurrSysLoad > 10) {
+ /*
+ ** More than 50% increase with respect to the
+ ** previous load of the system. Most likely this
+ ** is due to our ISR-proc...
+ */
+ EnableIntMod(pAC);
+ }
+ }
+ } else {
+ /*
+ ** Neither too much system load at all nor too much increase
+ ** with respect to the previous system load. Hence, we can leave
+ ** the ISR-handling like it is without enabling moderation.
+ */
+ }
+ M_DIMINFO.PrevSysLoad = CurrSysLoad;
+ }
+ } else {
+ if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
+ ThresholdInts = ((M_DIMINFO.MaxModIntsPerSec *
+ C_INT_MOD_DISABLE_PERCENTAGE) / 100);
+ IsrCallsPerSec = GetIsrCalls(pAC);
+ if (IsrCallsPerSec <= ThresholdInts) {
+ /*
+ ** The number of interrupts within the last second is
+ ** lower than the disable_percentage of the desried
+ ** maxrate. Therefore we can disable the moderation.
+ */
+ DisableIntMod(pAC);
+ M_DIMINFO.MaxModIntsPerSec =
+ (M_DIMINFO.MaxModIntsPerSecUpperLimit +
+ M_DIMINFO.MaxModIntsPerSecLowerLimit) / 2;
+ } else {
+ /*
+ ** The number of interrupts per sec is the same as expected.
+ ** Evalulate the descriptor-ratio. If it has changed, a resize
+ ** in the moderation timer might be usefull
+ */
+ if (M_DIMINFO.AutoSizing) {
+ ResizeDimTimerDuration(pAC);
+ }
+ }
+ }
+ }
+
+ /*
+ ** Some information to the log...
+ */
+ if (M_DIMINFO.DisplayStats) {
+ DisplaySelectedModerationType(pAC);
+ DisplaySelectedModerationMask(pAC);
+ DisplayDescrRatio(pAC);
+ }
+
+ M_DIMINFO.NbrProcessedDescr = 0;
+ SetCurrIntCtr(pAC);
+}
+
+/*******************************************************************************
+** Function : SkDimStartModerationTimer
+** Description : Starts the audit-timer for the dynamic interrupt moderation
+** Programmer : Ralph Roesler
+** Last Modified: 22-mar-03
+** Returns : void (!)
+** Notes : -
+*******************************************************************************/
+
+void
+SkDimStartModerationTimer(SK_AC *pAC) {
+ SK_EVPARA EventParam; /* Event struct for timer event */
+
+ SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam));
+ EventParam.Para32[0] = SK_DRV_MODERATION_TIMER;
+ SkTimerStart(pAC, pAC->IoBase, &pAC->DynIrqModInfo.ModTimer,
+ SK_DRV_MODERATION_TIMER_LENGTH,
+ SKGE_DRV, SK_DRV_TIMER, EventParam);
+}
+
+/*******************************************************************************
+** Function : SkDimEnableModerationIfNeeded
+** Description : Either enables or disables moderation
+** Programmer : Ralph Roesler
+** Last Modified: 22-mar-03
+** Returns : void (!)
+** Notes : This function is called when a particular adapter is opened
+** There is no Disable function, because when all interrupts
+** might be disable, the moderation timer has no meaning at all
+******************************************************************************/
+
+void
+SkDimEnableModerationIfNeeded(SK_AC *pAC) {
+
+ if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_STATIC) {
+ EnableIntMod(pAC); /* notification print in this function */
+ } else if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
+ SkDimStartModerationTimer(pAC);
+ if (M_DIMINFO.DisplayStats) {
+ printk("Dynamic moderation has been enabled\n");
+ }
+ } else {
+ if (M_DIMINFO.DisplayStats) {
+ printk("No moderation has been enabled\n");
+ }
+ }
+}
+
+/*******************************************************************************
+** Function : SkDimDisplayModerationSettings
+** Description : Displays the current settings regaring interrupt moderation
+** Programmer : Ralph Roesler
+** Last Modified: 22-mar-03
+** Returns : void (!)
+** Notes : -
+*******************************************************************************/
+
+void
+SkDimDisplayModerationSettings(SK_AC *pAC) {
+ DisplaySelectedModerationType(pAC);
+ DisplaySelectedModerationMask(pAC);
+}
+
+/*******************************************************************************
+**
+** Local functions
+**
+*******************************************************************************/
+
+/*******************************************************************************
+** Function : GetCurrentSystemLoad
+** Description : Retrieves the current system load of the system. This load
+** is evaluated for all processors within the system.
+** Programmer : Ralph Roesler
+** Last Modified: 22-mar-03
+** Returns : unsigned int: load expressed in percentage
+** Notes : The possible range being returned is from 0 up to 100.
+** Whereas 0 means 'no load at all' and 100 'system fully loaded'
+** It is impossible to determine what actually causes the system
+** to be in 100%, but maybe that is due to too much interrupts.
+*******************************************************************************/
+
+static unsigned int
+GetCurrentSystemLoad(SK_AC *pAC) {
+ unsigned long jif = jiffies;
+ unsigned int UserTime = 0;
+ unsigned int SystemTime = 0;
+ unsigned int NiceTime = 0;
+ unsigned int IdleTime = 0;
+ unsigned int TotalTime = 0;
+ unsigned int UsedTime = 0;
+ unsigned int SystemLoad = 0;
+
+ /* unsigned int NbrCpu = 0; */
+
+ /*
+ ** The following lines have been commented out, because
+ ** from kernel 2.5.44 onwards, the kernel-owned structure
+ **
+ ** struct kernel_stat kstat
+ **
+ ** is not marked as an exported symbol in the file
+ **
+ ** kernel/ksyms.c
+ **
+ ** As a consequence, using this driver as KLM is not possible
+ ** and any access of the structure kernel_stat via the
+ ** dedicated macros kstat_cpu(i).cpustat.xxx is to be avoided.
+ **
+ ** The kstat-information might be added again in future
+ ** versions of the 2.5.xx kernel, but for the time being,
+ ** number of interrupts will serve as indication how much
+ ** load we currently have...
+ **
+ ** for (NbrCpu = 0; NbrCpu < num_online_cpus(); NbrCpu++) {
+ ** UserTime = UserTime + kstat_cpu(NbrCpu).cpustat.user;
+ ** NiceTime = NiceTime + kstat_cpu(NbrCpu).cpustat.nice;
+ ** SystemTime = SystemTime + kstat_cpu(NbrCpu).cpustat.system;
+ ** }
+ */
+ SK_U64 ThresholdInts = 0;
+ SK_U64 IsrCallsPerSec = 0;
+
+ ThresholdInts = ((M_DIMINFO.MaxModIntsPerSec *
+ C_INT_MOD_ENABLE_PERCENTAGE) + 100);
+ IsrCallsPerSec = GetIsrCalls(pAC);
+ if (IsrCallsPerSec >= ThresholdInts) {
+ /*
+ ** We do not know how much the real CPU-load is!
+ ** Return 80% as a default in order to activate DIM
+ */
+ SystemLoad = 80;
+ return (SystemLoad);
+ }
+
+ UsedTime = UserTime + NiceTime + SystemTime;
+
+ IdleTime = jif * num_online_cpus() - UsedTime;
+ TotalTime = UsedTime + IdleTime;
+
+ SystemLoad = ( 100 * (UsedTime - M_DIMINFO.PrevUsedTime) ) /
+ (TotalTime - M_DIMINFO.PrevTotalTime);
+
+ if (M_DIMINFO.DisplayStats) {
+ printk("Current system load is: %u\n", SystemLoad);
+ }
+
+ M_DIMINFO.PrevTotalTime = TotalTime;
+ M_DIMINFO.PrevUsedTime = UsedTime;
+
+ return (SystemLoad);
+}
+
+/*******************************************************************************
+** Function : GetIsrCalls
+** Description : Depending on the selected moderation mask, this function will
+** return the number of interrupts handled in the previous time-
+** frame. This evaluated number is based on the current number
+** of interrupts stored in PNMI-context and the previous stored
+** interrupts.
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : int: the number of interrupts being executed in the last
+** timeframe
+** Notes : It makes only sense to call this function, when dynamic
+** interrupt moderation is applied
+*******************************************************************************/
+
+static SK_U64
+GetIsrCalls(SK_AC *pAC) {
+ SK_U64 RxPort0IntDiff = 0;
+ SK_U64 RxPort1IntDiff = 0;
+ SK_U64 TxPort0IntDiff = 0;
+ SK_U64 TxPort1IntDiff = 0;
+
+ if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_TX_ONLY) {
+ if (pAC->GIni.GIMacsFound == 2) {
+ TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts -
+ pAC->DynIrqModInfo.PrevPort1TxIntrCts;
+ }
+ TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts -
+ pAC->DynIrqModInfo.PrevPort0TxIntrCts;
+ } else if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_RX_ONLY) {
+ if (pAC->GIni.GIMacsFound == 2) {
+ RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts -
+ pAC->DynIrqModInfo.PrevPort1RxIntrCts;
+ }
+ RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts -
+ pAC->DynIrqModInfo.PrevPort0RxIntrCts;
+ } else {
+ if (pAC->GIni.GIMacsFound == 2) {
+ RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts -
+ pAC->DynIrqModInfo.PrevPort1RxIntrCts;
+ TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts -
+ pAC->DynIrqModInfo.PrevPort1TxIntrCts;
+ }
+ RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts -
+ pAC->DynIrqModInfo.PrevPort0RxIntrCts;
+ TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts -
+ pAC->DynIrqModInfo.PrevPort0TxIntrCts;
+ }
+
+ return (RxPort0IntDiff + RxPort1IntDiff + TxPort0IntDiff + TxPort1IntDiff);
+}
+
+/*******************************************************************************
+** Function : GetRxCalls
+** Description : This function will return the number of times a receive inter-
+** rupt was processed. This is needed to evaluate any resizing
+** factor.
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : SK_U64: the number of RX-ints being processed
+** Notes : It makes only sense to call this function, when dynamic
+** interrupt moderation is applied
+*******************************************************************************/
+
+static SK_U64
+GetRxCalls(SK_AC *pAC) {
+ SK_U64 RxPort0IntDiff = 0;
+ SK_U64 RxPort1IntDiff = 0;
+
+ if (pAC->GIni.GIMacsFound == 2) {
+ RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts -
+ pAC->DynIrqModInfo.PrevPort1RxIntrCts;
+ }
+ RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts -
+ pAC->DynIrqModInfo.PrevPort0RxIntrCts;
+
+ return (RxPort0IntDiff + RxPort1IntDiff);
+}
+
+/*******************************************************************************
+** Function : SetCurrIntCtr
+** Description : Will store the current number orf occured interrupts in the
+** adapter context. This is needed to evaluated the number of
+** interrupts within a current timeframe.
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : void (!)
+** Notes : -
+*******************************************************************************/
+
+static void
+SetCurrIntCtr(SK_AC *pAC) {
+ if (pAC->GIni.GIMacsFound == 2) {
+ pAC->DynIrqModInfo.PrevPort1RxIntrCts = pAC->Pnmi.Port[1].RxIntrCts;
+ pAC->DynIrqModInfo.PrevPort1TxIntrCts = pAC->Pnmi.Port[1].TxIntrCts;
+ }
+ pAC->DynIrqModInfo.PrevPort0RxIntrCts = pAC->Pnmi.Port[0].RxIntrCts;
+ pAC->DynIrqModInfo.PrevPort0TxIntrCts = pAC->Pnmi.Port[0].TxIntrCts;
+}
+
+/*******************************************************************************
+** Function : IsIntModEnabled()
+** Description : Retrieves the current value of the interrupts moderation
+** command register. Its content determines whether any
+** moderation is running or not.
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : SK_TRUE : if mod timer running
+** SK_FALSE : if no moderation is being performed
+** Notes : -
+*******************************************************************************/
+
+static SK_BOOL
+IsIntModEnabled(SK_AC *pAC) {
+ unsigned long CtrCmd;
+
+ SK_IN32(pAC->IoBase, B2_IRQM_CTRL, &CtrCmd);
+ if ((CtrCmd & TIM_START) == TIM_START) {
+ return SK_TRUE;
+ } else {
+ return SK_FALSE;
+ }
+}
+
+/*******************************************************************************
+** Function : EnableIntMod()
+** Description : Enables the interrupt moderation using the values stored in
+** in the pAC->DynIntMod data structure
+** Programmer : Ralph Roesler
+** Last Modified: 22-mar-03
+** Returns : -
+** Notes : -
+*******************************************************************************/
+
+static void
+EnableIntMod(SK_AC *pAC) {
+ unsigned long ModBase;
+
+ if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
+ ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec;
+ } else {
+ ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec;
+ }
+
+ SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase);
+ SK_OUT32(pAC->IoBase, B2_IRQM_MSK, pAC->DynIrqModInfo.MaskIrqModeration);
+ SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_START);
+ if (M_DIMINFO.DisplayStats) {
+ printk("Enabled interrupt moderation (%i ints/sec)\n",
+ M_DIMINFO.MaxModIntsPerSec);
+ }
+}
+
+/*******************************************************************************
+** Function : DisableIntMod()
+** Description : Disbles the interrupt moderation independent of what inter-
+** rupts are running or not
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : -
+** Notes : -
+*******************************************************************************/
+
+static void
+DisableIntMod(SK_AC *pAC) {
+
+ SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_STOP);
+ if (M_DIMINFO.DisplayStats) {
+ printk("Disabled interrupt moderation\n");
+ }
+}
+
+/*******************************************************************************
+** Function : ResizeDimTimerDuration();
+** Description : Checks the current used descriptor ratio and resizes the
+** duration timer (longer/smaller) if possible.
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : -
+** Notes : There are both maximum and minimum timer duration value.
+** This function assumes that interrupt moderation is already
+** enabled!
+*******************************************************************************/
+
+static void
+ResizeDimTimerDuration(SK_AC *pAC) {
+ SK_BOOL IncreaseTimerDuration;
+ int TotalMaxNbrDescr;
+ int UsedDescrRatio;
+ int RatioDiffAbs;
+ int RatioDiffRel;
+ int NewMaxModIntsPerSec;
+ int ModAdjValue;
+ long ModBase;
+
+ /*
+ ** Check first if we are allowed to perform any modification
+ */
+ if (IsIntModEnabled(pAC)) {
+ if (M_DIMINFO.IntModTypeSelect != C_INT_MOD_DYNAMIC) {
+ return;
+ } else {
+ if (M_DIMINFO.ModJustEnabled) {
+ M_DIMINFO.ModJustEnabled = SK_FALSE;
+ return;
+ }
+ }
+ }
+
+ /*
+ ** If we got until here, we have to evaluate the amount of the
+ ** descriptor ratio change...
+ */
+ TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC);
+ UsedDescrRatio = (M_DIMINFO.NbrProcessedDescr * 100) / TotalMaxNbrDescr;
+
+ if (UsedDescrRatio > M_DIMINFO.PrevUsedDescrRatio) {
+ RatioDiffAbs = (UsedDescrRatio - M_DIMINFO.PrevUsedDescrRatio);
+ RatioDiffRel = (RatioDiffAbs * 100) / UsedDescrRatio;
+ M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio;
+ IncreaseTimerDuration = SK_FALSE; /* in other words: DECREASE */
+ } else if (UsedDescrRatio < M_DIMINFO.PrevUsedDescrRatio) {
+ RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio);
+ RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio;
+ M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio;
+ IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */
+ } else {
+ RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio);
+ RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio;
+ M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio;
+ IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */
+ }
+
+ /*
+ ** Now we can determine the change in percent
+ */
+ if ((RatioDiffRel >= 0) && (RatioDiffRel <= 5) ) {
+ ModAdjValue = 1; /* 1% change - maybe some other value in future */
+ } else if ((RatioDiffRel > 5) && (RatioDiffRel <= 10) ) {
+ ModAdjValue = 1; /* 1% change - maybe some other value in future */
+ } else if ((RatioDiffRel > 10) && (RatioDiffRel <= 15) ) {
+ ModAdjValue = 1; /* 1% change - maybe some other value in future */
+ } else {
+ ModAdjValue = 1; /* 1% change - maybe some other value in future */
+ }
+
+ if (IncreaseTimerDuration) {
+ NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec +
+ (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100;
+ } else {
+ NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec -
+ (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100;
+ }
+
+ /*
+ ** Check if we exceed boundaries...
+ */
+ if ( (NewMaxModIntsPerSec > M_DIMINFO.MaxModIntsPerSecUpperLimit) ||
+ (NewMaxModIntsPerSec < M_DIMINFO.MaxModIntsPerSecLowerLimit)) {
+ if (M_DIMINFO.DisplayStats) {
+ printk("Cannot change ModTim from %i to %i ints/sec\n",
+ M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec);
+ }
+ return;
+ } else {
+ if (M_DIMINFO.DisplayStats) {
+ printk("Resized ModTim from %i to %i ints/sec\n",
+ M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec);
+ }
+ }
+
+ M_DIMINFO.MaxModIntsPerSec = NewMaxModIntsPerSec;
+
+ if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
+ ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec;
+ } else {
+ ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec;
+ }
+
+ /*
+ ** We do not need to touch any other registers
+ */
+ SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase);
+}
+
+/*******************************************************************************
+** Function : DisplaySelectedModerationType()
+** Description : Displays what type of moderation we have
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : void!
+** Notes : -
+*******************************************************************************/
+
+static void
+DisplaySelectedModerationType(SK_AC *pAC) {
+
+ if (pAC->DynIrqModInfo.DisplayStats) {
+ if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC) {
+ printk("Static int moderation runs with %i INTS/sec\n",
+ pAC->DynIrqModInfo.MaxModIntsPerSec);
+ } else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
+ if (IsIntModEnabled(pAC)) {
+ printk("Dynamic int moderation runs with %i INTS/sec\n",
+ pAC->DynIrqModInfo.MaxModIntsPerSec);
+ } else {
+ printk("Dynamic int moderation currently not applied\n");
+ }
+ } else {
+ printk("No interrupt moderation selected!\n");
+ }
+ }
+}
+
+/*******************************************************************************
+** Function : DisplaySelectedModerationMask()
+** Description : Displays what interrupts are moderated
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : void!
+** Notes : -
+*******************************************************************************/
+
+static void
+DisplaySelectedModerationMask(SK_AC *pAC) {
+
+ if (pAC->DynIrqModInfo.DisplayStats) {
+ if (pAC->DynIrqModInfo.IntModTypeSelect != C_INT_MOD_NONE) {
+ switch (pAC->DynIrqModInfo.MaskIrqModeration) {
+ case IRQ_MASK_TX_ONLY:
+ printk("Only Tx-interrupts are moderated\n");
+ break;
+ case IRQ_MASK_RX_ONLY:
+ printk("Only Rx-interrupts are moderated\n");
+ break;
+ case IRQ_MASK_SP_ONLY:
+ printk("Only special-interrupts are moderated\n");
+ break;
+ case IRQ_MASK_TX_RX:
+ printk("Tx- and Rx-interrupts are moderated\n");
+ break;
+ case IRQ_MASK_SP_RX:
+ printk("Special- and Rx-interrupts are moderated\n");
+ break;
+ case IRQ_MASK_SP_TX:
+ printk("Special- and Tx-interrupts are moderated\n");
+ break;
+ case IRQ_MASK_RX_TX_SP:
+ printk("All Rx-, Tx and special-interrupts are moderated\n");
+ break;
+ default:
+ printk("Don't know what is moderated\n");
+ break;
+ }
+ } else {
+ printk("No specific interrupts masked for moderation\n");
+ }
+ }
+}
+
+/*******************************************************************************
+** Function : DisplayDescrRatio
+** Description : Like the name states...
+** Programmer : Ralph Roesler
+** Last Modified: 23-mar-03
+** Returns : void!
+** Notes : -
+*******************************************************************************/
+
+static void
+DisplayDescrRatio(SK_AC *pAC) {
+ int TotalMaxNbrDescr = 0;
+
+ if (pAC->DynIrqModInfo.DisplayStats) {
+ TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC);
+ printk("Ratio descriptors: %i/%i\n",
+ M_DIMINFO.NbrProcessedDescr, TotalMaxNbrDescr);
+ }
+}
+
+/*******************************************************************************
+**
+** End of file
+**
+*******************************************************************************/
diff --git a/drivers/net/sk98lin/skethtool.c b/drivers/net/sk98lin/skethtool.c
new file mode 100644
index 000000000000..fb639959292b
--- /dev/null
+++ b/drivers/net/sk98lin/skethtool.c
@@ -0,0 +1,552 @@
+/******************************************************************************
+ *
+ * Name: skethtool.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.7 $
+ * Date: $Date: 2004/09/29 13:32:07 $
+ * Purpose: All functions regarding ethtool handling
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2004 Marvell.
+ *
+ * Driver for Marvell Yukon/2 chipset and SysKonnect Gigabit Ethernet
+ * Server Adapters.
+ *
+ * Author: Ralph Roesler (rroesler@syskonnect.de)
+ * Mirko Lindner (mlindner@syskonnect.de)
+ *
+ * Address all question to: linux@syskonnect.de
+ *
+ * The technical manual for the adapters is available from SysKonnect's
+ * web pages: www.syskonnect.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ *****************************************************************************/
+
+#include "h/skdrv1st.h"
+#include "h/skdrv2nd.h"
+#include "h/skversion.h"
+
+#include <linux/ethtool.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+
+/******************************************************************************
+ *
+ * Defines
+ *
+ *****************************************************************************/
+
+#define SUPP_COPPER_ALL (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+ SUPPORTED_1000baseT_Half| SUPPORTED_1000baseT_Full| \
+ SUPPORTED_TP)
+
+#define ADV_COPPER_ALL (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+ ADVERTISED_1000baseT_Half| ADVERTISED_1000baseT_Full| \
+ ADVERTISED_TP)
+
+#define SUPP_FIBRE_ALL (SUPPORTED_1000baseT_Full | \
+ SUPPORTED_FIBRE | \
+ SUPPORTED_Autoneg)
+
+#define ADV_FIBRE_ALL (ADVERTISED_1000baseT_Full | \
+ ADVERTISED_FIBRE | \
+ ADVERTISED_Autoneg)
+
+
+/******************************************************************************
+ *
+ * Local Functions
+ *
+ *****************************************************************************/
+
+/*****************************************************************************
+ *
+ * getSettings - retrieves the current settings of the selected adapter
+ *
+ * Description:
+ * The current configuration of the selected adapter is returned.
+ * This configuration involves a)speed, b)duplex and c)autoneg plus
+ * a number of other variables.
+ *
+ * Returns: always 0
+ *
+ */
+static int getSettings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ const DEV_NET *pNet = netdev_priv(dev);
+ int port = pNet->PortNr;
+ const SK_AC *pAC = pNet->pAC;
+ const SK_GEPORT *pPort = &pAC->GIni.GP[port];
+
+ static int DuplexAutoNegConfMap[9][3]= {
+ { -1 , -1 , -1 },
+ { 0 , -1 , -1 },
+ { SK_LMODE_HALF , DUPLEX_HALF, AUTONEG_DISABLE },
+ { SK_LMODE_FULL , DUPLEX_FULL, AUTONEG_DISABLE },
+ { SK_LMODE_AUTOHALF , DUPLEX_HALF, AUTONEG_ENABLE },
+ { SK_LMODE_AUTOFULL , DUPLEX_FULL, AUTONEG_ENABLE },
+ { SK_LMODE_AUTOBOTH , DUPLEX_FULL, AUTONEG_ENABLE },
+ { SK_LMODE_AUTOSENSE , -1 , -1 },
+ { SK_LMODE_INDETERMINATED, -1 , -1 }
+ };
+ static int SpeedConfMap[6][2] = {
+ { 0 , -1 },
+ { SK_LSPEED_AUTO , -1 },
+ { SK_LSPEED_10MBPS , SPEED_10 },
+ { SK_LSPEED_100MBPS , SPEED_100 },
+ { SK_LSPEED_1000MBPS , SPEED_1000 },
+ { SK_LSPEED_INDETERMINATED, -1 }
+ };
+ static int AdvSpeedMap[6][2] = {
+ { 0 , -1 },
+ { SK_LSPEED_AUTO , -1 },
+ { SK_LSPEED_10MBPS , ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full },
+ { SK_LSPEED_100MBPS , ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full },
+ { SK_LSPEED_1000MBPS , ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full},
+ { SK_LSPEED_INDETERMINATED, -1 }
+ };
+
+ ecmd->phy_address = port;
+ ecmd->speed = SpeedConfMap[pPort->PLinkSpeedUsed][1];
+ ecmd->duplex = DuplexAutoNegConfMap[pPort->PLinkModeStatus][1];
+ ecmd->autoneg = DuplexAutoNegConfMap[pPort->PLinkModeStatus][2];
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (pAC->GIni.GICopperType) {
+ ecmd->port = PORT_TP;
+ ecmd->supported = (SUPP_COPPER_ALL|SUPPORTED_Autoneg);
+ if (pAC->GIni.GIGenesis) {
+ ecmd->supported &= ~(SUPPORTED_10baseT_Half);
+ ecmd->supported &= ~(SUPPORTED_10baseT_Full);
+ ecmd->supported &= ~(SUPPORTED_100baseT_Half);
+ ecmd->supported &= ~(SUPPORTED_100baseT_Full);
+ } else {
+ if (pAC->GIni.GIChipId == CHIP_ID_YUKON) {
+ ecmd->supported &= ~(SUPPORTED_1000baseT_Half);
+ }
+#ifdef CHIP_ID_YUKON_FE
+ if (pAC->GIni.GIChipId == CHIP_ID_YUKON_FE) {
+ ecmd->supported &= ~(SUPPORTED_1000baseT_Half);
+ ecmd->supported &= ~(SUPPORTED_1000baseT_Full);
+ }
+#endif
+ }
+ if (pAC->GIni.GP[0].PLinkSpeed != SK_LSPEED_AUTO) {
+ ecmd->advertising = AdvSpeedMap[pPort->PLinkSpeed][1];
+ if (pAC->GIni.GIChipId == CHIP_ID_YUKON) {
+ ecmd->advertising &= ~(SUPPORTED_1000baseT_Half);
+ }
+ } else {
+ ecmd->advertising = ecmd->supported;
+ }
+
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ } else {
+ ecmd->port = PORT_FIBRE;
+ ecmd->supported = SUPP_FIBRE_ALL;
+ ecmd->advertising = ADV_FIBRE_ALL;
+ }
+ return 0;
+}
+
+/*
+ * MIB infrastructure uses instance value starting at 1
+ * based on board and port.
+ */
+static inline u32 pnmiInstance(const DEV_NET *pNet)
+{
+ return 1 + (pNet->pAC->RlmtNets == 2) + pNet->PortNr;
+}
+
+/*****************************************************************************
+ *
+ * setSettings - configures the settings of a selected adapter
+ *
+ * Description:
+ * Possible settings that may be altered are a)speed, b)duplex or
+ * c)autonegotiation.
+ *
+ * Returns:
+ * 0: everything fine, no error
+ * <0: the return value is the error code of the failure
+ */
+static int setSettings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+ u32 instance;
+ char buf[4];
+ int len = 1;
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100
+ && ecmd->speed != SPEED_1000)
+ return -EINVAL;
+
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (ecmd->autoneg == AUTONEG_DISABLE)
+ *buf = (ecmd->duplex == DUPLEX_FULL)
+ ? SK_LMODE_FULL : SK_LMODE_HALF;
+ else
+ *buf = (ecmd->duplex == DUPLEX_FULL)
+ ? SK_LMODE_AUTOFULL : SK_LMODE_AUTOHALF;
+
+ instance = pnmiInstance(pNet);
+ if (SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_LINK_MODE,
+ &buf, &len, instance, pNet->NetNr) != SK_PNMI_ERR_OK)
+ return -EINVAL;
+
+ switch(ecmd->speed) {
+ case SPEED_1000:
+ *buf = SK_LSPEED_1000MBPS;
+ break;
+ case SPEED_100:
+ *buf = SK_LSPEED_100MBPS;
+ break;
+ case SPEED_10:
+ *buf = SK_LSPEED_10MBPS;
+ }
+
+ if (SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_SPEED_MODE,
+ &buf, &len, instance, pNet->NetNr) != SK_PNMI_ERR_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*****************************************************************************
+ *
+ * getDriverInfo - returns generic driver and adapter information
+ *
+ * Description:
+ * Generic driver information is returned via this function, such as
+ * the name of the driver, its version and and firmware version.
+ * In addition to this, the location of the selected adapter is
+ * returned as a bus info string (e.g. '01:05.0').
+ *
+ * Returns: N/A
+ *
+ */
+static void getDriverInfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ const DEV_NET *pNet = netdev_priv(dev);
+ const SK_AC *pAC = pNet->pAC;
+ char vers[32];
+
+ snprintf(vers, sizeof(vers)-1, VER_STRING "(v%d.%d)",
+ (pAC->GIni.GIPciHwRev >> 4) & 0xf, pAC->GIni.GIPciHwRev & 0xf);
+
+ strlcpy(info->driver, DRIVER_FILE_NAME, sizeof(info->driver));
+ strcpy(info->version, vers);
+ strcpy(info->fw_version, "N/A");
+ strlcpy(info->bus_info, pci_name(pAC->PciDev), ETHTOOL_BUSINFO_LEN);
+}
+
+/*
+ * Ethtool statistics support.
+ */
+static const char StringsStats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets",
+ "rx_bytes", "tx_bytes",
+ "rx_errors", "tx_errors",
+ "rx_dropped", "tx_dropped",
+ "multicasts", "collisions",
+ "rx_length_errors", "rx_buffer_overflow_errors",
+ "rx_crc_errors", "rx_frame_errors",
+ "rx_too_short_errors", "rx_too_long_errors",
+ "rx_carrier_extension_errors", "rx_symbol_errors",
+ "rx_llc_mac_size_errors", "rx_carrier_errors",
+ "rx_jabber_errors", "rx_missed_errors",
+ "tx_abort_collision_errors", "tx_carrier_errors",
+ "tx_buffer_underrun_errors", "tx_heartbeat_errors",
+ "tx_window_errors",
+};
+
+static int getStatsCount(struct net_device *dev)
+{
+ return ARRAY_SIZE(StringsStats);
+}
+
+static void getStrings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch(stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *StringsStats, sizeof(StringsStats));
+ break;
+ }
+}
+
+static void getEthtoolStats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ const DEV_NET *pNet = netdev_priv(dev);
+ const SK_AC *pAC = pNet->pAC;
+ const SK_PNMI_STRUCT_DATA *pPnmiStruct = &pAC->PnmiStruct;
+
+ *data++ = pPnmiStruct->Stat[0].StatRxOkCts;
+ *data++ = pPnmiStruct->Stat[0].StatTxOkCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxOctetsOkCts;
+ *data++ = pPnmiStruct->Stat[0].StatTxOctetsOkCts;
+ *data++ = pPnmiStruct->InErrorsCts;
+ *data++ = pPnmiStruct->Stat[0].StatTxSingleCollisionCts;
+ *data++ = pPnmiStruct->RxNoBufCts;
+ *data++ = pPnmiStruct->TxNoBufCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxMulticastOkCts;
+ *data++ = pPnmiStruct->Stat[0].StatTxSingleCollisionCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxRuntCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxFifoOverflowCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxFcsCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxFramingCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxShortsCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxTooLongCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxCextCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxSymbolCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxIRLengthCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxCarrierCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxJabberCts;
+ *data++ = pPnmiStruct->Stat[0].StatRxMissedCts;
+ *data++ = pAC->stats.tx_aborted_errors;
+ *data++ = pPnmiStruct->Stat[0].StatTxCarrierCts;
+ *data++ = pPnmiStruct->Stat[0].StatTxFifoUnderrunCts;
+ *data++ = pPnmiStruct->Stat[0].StatTxCarrierCts;
+ *data++ = pAC->stats.tx_window_errors;
+}
+
+
+/*****************************************************************************
+ *
+ * toggleLeds - Changes the LED state of an adapter
+ *
+ * Description:
+ * This function changes the current state of all LEDs of an adapter so
+ * that it can be located by a user.
+ *
+ * Returns: N/A
+ *
+ */
+static void toggleLeds(DEV_NET *pNet, int on)
+{
+ SK_AC *pAC = pNet->pAC;
+ int port = pNet->PortNr;
+ void __iomem *io = pAC->IoBase;
+
+ if (pAC->GIni.GIGenesis) {
+ SK_OUT8(io, MR_ADDR(port,LNK_LED_REG),
+ on ? SK_LNK_ON : SK_LNK_OFF);
+ SkGeYellowLED(pAC, io,
+ on ? (LED_ON >> 1) : (LED_OFF >> 1));
+ SkGeXmitLED(pAC, io, MR_ADDR(port,RX_LED_INI),
+ on ? SK_LED_TST : SK_LED_DIS);
+
+ if (pAC->GIni.GP[port].PhyType == SK_PHY_BCOM)
+ SkXmPhyWrite(pAC, io, port, PHY_BCOM_P_EXT_CTRL,
+ on ? PHY_B_PEC_LED_ON : PHY_B_PEC_LED_OFF);
+ else if (pAC->GIni.GP[port].PhyType == SK_PHY_LONE)
+ SkXmPhyWrite(pAC, io, port, PHY_LONE_LED_CFG,
+ on ? 0x0800 : PHY_L_LC_LEDT);
+ else
+ SkGeXmitLED(pAC, io, MR_ADDR(port,TX_LED_INI),
+ on ? SK_LED_TST : SK_LED_DIS);
+ } else {
+ const u16 YukLedOn = (PHY_M_LED_MO_DUP(MO_LED_ON) |
+ PHY_M_LED_MO_10(MO_LED_ON) |
+ PHY_M_LED_MO_100(MO_LED_ON) |
+ PHY_M_LED_MO_1000(MO_LED_ON) |
+ PHY_M_LED_MO_RX(MO_LED_ON));
+ const u16 YukLedOff = (PHY_M_LED_MO_DUP(MO_LED_OFF) |
+ PHY_M_LED_MO_10(MO_LED_OFF) |
+ PHY_M_LED_MO_100(MO_LED_OFF) |
+ PHY_M_LED_MO_1000(MO_LED_OFF) |
+ PHY_M_LED_MO_RX(MO_LED_OFF));
+
+
+ SkGmPhyWrite(pAC,io,port,PHY_MARV_LED_CTRL,0);
+ SkGmPhyWrite(pAC,io,port,PHY_MARV_LED_OVER,
+ on ? YukLedOn : YukLedOff);
+ }
+}
+
+/*****************************************************************************
+ *
+ * skGeBlinkTimer - Changes the LED state of an adapter
+ *
+ * Description:
+ * This function changes the current state of all LEDs of an adapter so
+ * that it can be located by a user. If the requested time interval for
+ * this test has elapsed, this function cleans up everything that was
+ * temporarily setup during the locate NIC test. This involves of course
+ * also closing or opening any adapter so that the initial board state
+ * is recovered.
+ *
+ * Returns: N/A
+ *
+ */
+void SkGeBlinkTimer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+
+ toggleLeds(pNet, pAC->LedsOn);
+
+ pAC->LedsOn = !pAC->LedsOn;
+ mod_timer(&pAC->BlinkTimer, jiffies + HZ/4);
+}
+
+/*****************************************************************************
+ *
+ * locateDevice - start the locate NIC feature of the elected adapter
+ *
+ * Description:
+ * This function is used if the user want to locate a particular NIC.
+ * All LEDs are regularly switched on and off, so the NIC can easily
+ * be identified.
+ *
+ * Returns:
+ * ==0: everything fine, no error, locateNIC test was started
+ * !=0: one locateNIC test runs already
+ *
+ */
+static int locateDevice(struct net_device *dev, u32 data)
+{
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+
+ if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
+ data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+
+ /* start blinking */
+ pAC->LedsOn = 0;
+ mod_timer(&pAC->BlinkTimer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&pAC->BlinkTimer);
+ toggleLeds(pNet, 0);
+
+ return 0;
+}
+
+/*****************************************************************************
+ *
+ * getPauseParams - retrieves the pause parameters
+ *
+ * Description:
+ * All current pause parameters of a selected adapter are placed
+ * in the passed ethtool_pauseparam structure and are returned.
+ *
+ * Returns: N/A
+ *
+ */
+static void getPauseParams(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+ SK_GEPORT *pPort = &pAC->GIni.GP[pNet->PortNr];
+
+ epause->rx_pause = (pPort->PFlowCtrlMode == SK_FLOW_MODE_SYMMETRIC) ||
+ (pPort->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM);
+
+ epause->tx_pause = epause->rx_pause || (pPort->PFlowCtrlMode == SK_FLOW_MODE_LOC_SEND);
+ epause->autoneg = epause->rx_pause || epause->tx_pause;
+}
+
+/*****************************************************************************
+ *
+ * setPauseParams - configures the pause parameters of an adapter
+ *
+ * Description:
+ * This function sets the Rx or Tx pause parameters
+ *
+ * Returns:
+ * ==0: everything fine, no error
+ * !=0: the return value is the error code of the failure
+ */
+static int setPauseParams(struct net_device *dev , struct ethtool_pauseparam *epause)
+{
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+ SK_GEPORT *pPort = &pAC->GIni.GP[pNet->PortNr];
+ u32 instance = pnmiInstance(pNet);
+ struct ethtool_pauseparam old;
+ u8 oldspeed = pPort->PLinkSpeedUsed;
+ char buf[4];
+ int len = 1;
+ int ret;
+
+ /*
+ ** we have to determine the current settings to see if
+ ** the operator requested any modification of the flow
+ ** control parameters...
+ */
+ getPauseParams(dev, &old);
+
+ /*
+ ** perform modifications regarding the changes
+ ** requested by the operator
+ */
+ if (epause->autoneg != old.autoneg)
+ *buf = epause->autoneg ? SK_FLOW_MODE_NONE : SK_FLOW_MODE_SYMMETRIC;
+ else {
+ if (epause->rx_pause && epause->tx_pause)
+ *buf = SK_FLOW_MODE_SYMMETRIC;
+ else if (epause->rx_pause && !epause->tx_pause)
+ *buf = SK_FLOW_MODE_SYM_OR_REM;
+ else if (!epause->rx_pause && epause->tx_pause)
+ *buf = SK_FLOW_MODE_LOC_SEND;
+ else
+ *buf = SK_FLOW_MODE_NONE;
+ }
+
+ ret = SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_FLOWCTRL_MODE,
+ &buf, &len, instance, pNet->NetNr);
+
+ if (ret != SK_PNMI_ERR_OK) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_CTRL,
+ ("ethtool (sk98lin): error changing rx/tx pause (%i)\n", ret));
+ goto err;
+ }
+
+ /*
+ ** It may be that autoneg has been disabled! Therefore
+ ** set the speed to the previously used value...
+ */
+ if (!epause->autoneg) {
+ len = 1;
+ ret = SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_SPEED_MODE,
+ &oldspeed, &len, instance, pNet->NetNr);
+ if (ret != SK_PNMI_ERR_OK)
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_CTRL,
+ ("ethtool (sk98lin): error setting speed (%i)\n", ret));
+ }
+ err:
+ return ret ? -EIO : 0;
+}
+
+struct ethtool_ops SkGeEthtoolOps = {
+ .get_settings = getSettings,
+ .set_settings = setSettings,
+ .get_drvinfo = getDriverInfo,
+ .get_strings = getStrings,
+ .get_stats_count = getStatsCount,
+ .get_ethtool_stats = getEthtoolStats,
+ .phys_id = locateDevice,
+ .get_pauseparam = getPauseParams,
+ .set_pauseparam = setPauseParams,
+};
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
new file mode 100644
index 000000000000..05b827f79f54
--- /dev/null
+++ b/drivers/net/sk98lin/skge.c
@@ -0,0 +1,5186 @@
+/******************************************************************************
+ *
+ * Name: skge.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.45 $
+ * Date: $Date: 2004/02/12 14:41:02 $
+ * Purpose: The main driver source module
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * Driver for Marvell Yukon chipset and SysKonnect Gigabit Ethernet
+ * Server Adapters.
+ *
+ * Created 10-Feb-1999, based on Linux' acenic.c, 3c59x.c and
+ * SysKonnects GEnesis Solaris driver
+ * Author: Christoph Goos (cgoos@syskonnect.de)
+ * Mirko Lindner (mlindner@syskonnect.de)
+ *
+ * Address all question to: linux@syskonnect.de
+ *
+ * The technical manual for the adapters is available from SysKonnect's
+ * web pages: www.syskonnect.com
+ * Goto "Support" and search Knowledge Base for "manual".
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Possible compiler options (#define xxx / -Dxxx):
+ *
+ * debugging can be enable by changing SK_DEBUG_CHKMOD and
+ * SK_DEBUG_CHKCAT in makefile (described there).
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This is the main module of the Linux GE driver.
+ *
+ * All source files except skge.c, skdrv1st.h, skdrv2nd.h and sktypes.h
+ * are part of SysKonnect's COMMON MODULES for the SK-98xx adapters.
+ * Those are used for drivers on multiple OS', so some thing may seem
+ * unnecessary complicated on Linux. Please do not try to 'clean up'
+ * them without VERY good reasons, because this will make it more
+ * difficult to keep the Linux driver in synchronisation with the
+ * other versions.
+ *
+ * Include file hierarchy:
+ *
+ * <linux/module.h>
+ *
+ * "h/skdrv1st.h"
+ * <linux/types.h>
+ * <linux/kernel.h>
+ * <linux/string.h>
+ * <linux/errno.h>
+ * <linux/ioport.h>
+ * <linux/slab.h>
+ * <linux/interrupt.h>
+ * <linux/pci.h>
+ * <linux/bitops.h>
+ * <asm/byteorder.h>
+ * <asm/io.h>
+ * <linux/netdevice.h>
+ * <linux/etherdevice.h>
+ * <linux/skbuff.h>
+ * those three depending on kernel version used:
+ * <linux/bios32.h>
+ * <linux/init.h>
+ * <asm/uaccess.h>
+ * <net/checksum.h>
+ *
+ * "h/skerror.h"
+ * "h/skdebug.h"
+ * "h/sktypes.h"
+ * "h/lm80.h"
+ * "h/xmac_ii.h"
+ *
+ * "h/skdrv2nd.h"
+ * "h/skqueue.h"
+ * "h/skgehwt.h"
+ * "h/sktimer.h"
+ * "h/ski2c.h"
+ * "h/skgepnmi.h"
+ * "h/skvpd.h"
+ * "h/skgehw.h"
+ * "h/skgeinit.h"
+ * "h/skaddr.h"
+ * "h/skgesirq.h"
+ * "h/skcsum.h"
+ * "h/skrlmt.h"
+ *
+ ******************************************************************************/
+
+#include "h/skversion.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+
+#include "h/skdrv1st.h"
+#include "h/skdrv2nd.h"
+
+/*******************************************************************************
+ *
+ * Defines
+ *
+ ******************************************************************************/
+
+/* for debuging on x86 only */
+/* #define BREAKPOINT() asm(" int $3"); */
+
+/* use the transmit hw checksum driver functionality */
+#define USE_SK_TX_CHECKSUM
+
+/* use the receive hw checksum driver functionality */
+#define USE_SK_RX_CHECKSUM
+
+/* use the scatter-gather functionality with sendfile() */
+#define SK_ZEROCOPY
+
+/* use of a transmit complete interrupt */
+#define USE_TX_COMPLETE
+
+/*
+ * threshold for copying small receive frames
+ * set to 0 to avoid copying, set to 9001 to copy all frames
+ */
+#define SK_COPY_THRESHOLD 50
+
+/* number of adapters that can be configured via command line params */
+#define SK_MAX_CARD_PARAM 16
+
+
+
+/*
+ * use those defines for a compile-in version of the driver instead
+ * of command line parameters
+ */
+// #define LINK_SPEED_A {"Auto", }
+// #define LINK_SPEED_B {"Auto", }
+// #define AUTO_NEG_A {"Sense", }
+// #define AUTO_NEG_B {"Sense", }
+// #define DUP_CAP_A {"Both", }
+// #define DUP_CAP_B {"Both", }
+// #define FLOW_CTRL_A {"SymOrRem", }
+// #define FLOW_CTRL_B {"SymOrRem", }
+// #define ROLE_A {"Auto", }
+// #define ROLE_B {"Auto", }
+// #define PREF_PORT {"A", }
+// #define CON_TYPE {"Auto", }
+// #define RLMT_MODE {"CheckLinkState", }
+
+#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb)
+#define DEV_KFREE_SKB_IRQ(skb) dev_kfree_skb_irq(skb)
+#define DEV_KFREE_SKB_ANY(skb) dev_kfree_skb_any(skb)
+
+
+/* Set blink mode*/
+#define OEM_CONFIG_VALUE ( SK_ACT_LED_BLINK | \
+ SK_DUP_LED_NORMAL | \
+ SK_LED_LINK100_ON)
+
+
+/* Isr return value */
+#define SkIsrRetVar irqreturn_t
+#define SkIsrRetNone IRQ_NONE
+#define SkIsrRetHandled IRQ_HANDLED
+
+
+/*******************************************************************************
+ *
+ * Local Function Prototypes
+ *
+ ******************************************************************************/
+
+static void FreeResources(struct SK_NET_DEVICE *dev);
+static int SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC);
+static SK_BOOL BoardAllocMem(SK_AC *pAC);
+static void BoardFreeMem(SK_AC *pAC);
+static void BoardInitMem(SK_AC *pAC);
+static void SetupRing(SK_AC*, void*, uintptr_t, RXD**, RXD**, RXD**, int*, SK_BOOL);
+static SkIsrRetVar SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs);
+static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs);
+static int SkGeOpen(struct SK_NET_DEVICE *dev);
+static int SkGeClose(struct SK_NET_DEVICE *dev);
+static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev);
+static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p);
+static void SkGeSetRxMode(struct SK_NET_DEVICE *dev);
+static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev);
+static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd);
+static void GetConfiguration(SK_AC*);
+static void ProductStr(SK_AC*);
+static int XmitFrame(SK_AC*, TX_PORT*, struct sk_buff*);
+static void FreeTxDescriptors(SK_AC*pAC, TX_PORT*);
+static void FillRxRing(SK_AC*, RX_PORT*);
+static SK_BOOL FillRxDescriptor(SK_AC*, RX_PORT*);
+static void ReceiveIrq(SK_AC*, RX_PORT*, SK_BOOL);
+static void ClearAndStartRx(SK_AC*, int);
+static void ClearTxIrq(SK_AC*, int, int);
+static void ClearRxRing(SK_AC*, RX_PORT*);
+static void ClearTxRing(SK_AC*, TX_PORT*);
+static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int new_mtu);
+static void PortReInitBmu(SK_AC*, int);
+static int SkGeIocMib(DEV_NET*, unsigned int, int);
+static int SkGeInitPCI(SK_AC *pAC);
+static void StartDrvCleanupTimer(SK_AC *pAC);
+static void StopDrvCleanupTimer(SK_AC *pAC);
+static int XmitFrameSG(SK_AC*, TX_PORT*, struct sk_buff*);
+
+#ifdef SK_DIAG_SUPPORT
+static SK_U32 ParseDeviceNbrFromSlotName(const char *SlotName);
+static int SkDrvInitAdapter(SK_AC *pAC, int devNbr);
+static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr);
+#endif
+
+/*******************************************************************************
+ *
+ * Extern Function Prototypes
+ *
+ ******************************************************************************/
+static const char SKRootName[] = "sk98lin";
+static struct proc_dir_entry *pSkRootDir;
+extern struct file_operations sk_proc_fops;
+
+static inline void SkGeProcCreate(struct net_device *dev)
+{
+ struct proc_dir_entry *pe;
+
+ if (pSkRootDir &&
+ (pe = create_proc_entry(dev->name, S_IRUGO, pSkRootDir))) {
+ pe->proc_fops = &sk_proc_fops;
+ pe->data = dev;
+ pe->owner = THIS_MODULE;
+ }
+}
+
+static inline void SkGeProcRemove(struct net_device *dev)
+{
+ if (pSkRootDir)
+ remove_proc_entry(dev->name, pSkRootDir);
+}
+
+extern void SkDimEnableModerationIfNeeded(SK_AC *pAC);
+extern void SkDimDisplayModerationSettings(SK_AC *pAC);
+extern void SkDimStartModerationTimer(SK_AC *pAC);
+extern void SkDimModerate(SK_AC *pAC);
+extern void SkGeBlinkTimer(unsigned long data);
+
+#ifdef DEBUG
+static void DumpMsg(struct sk_buff*, char*);
+static void DumpData(char*, int);
+static void DumpLong(char*, int);
+#endif
+
+/* global variables *********************************************************/
+static SK_BOOL DoPrintInterfaceChange = SK_TRUE;
+extern struct ethtool_ops SkGeEthtoolOps;
+
+/* local variables **********************************************************/
+static uintptr_t TxQueueAddr[SK_MAX_MACS][2] = {{0x680, 0x600},{0x780, 0x700}};
+static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480};
+
+/*****************************************************************************
+ *
+ * SkGeInitPCI - Init the PCI resources
+ *
+ * Description:
+ * This function initialize the PCI resources and IO
+ *
+ * Returns: N/A
+ *
+ */
+int SkGeInitPCI(SK_AC *pAC)
+{
+ struct SK_NET_DEVICE *dev = pAC->dev[0];
+ struct pci_dev *pdev = pAC->PciDev;
+ int retval;
+
+ if (pci_enable_device(pdev) != 0) {
+ return 1;
+ }
+
+ dev->mem_start = pci_resource_start (pdev, 0);
+ pci_set_master(pdev);
+
+ if (pci_request_regions(pdev, pAC->Name) != 0) {
+ retval = 2;
+ goto out_disable;
+ }
+
+#ifdef SK_BIG_ENDIAN
+ /*
+ * On big endian machines, we use the adapter's aibility of
+ * reading the descriptors as big endian.
+ */
+ {
+ SK_U32 our2;
+ SkPciReadCfgDWord(pAC, PCI_OUR_REG_2, &our2);
+ our2 |= PCI_REV_DESC;
+ SkPciWriteCfgDWord(pAC, PCI_OUR_REG_2, our2);
+ }
+#endif
+
+ /*
+ * Remap the regs into kernel space.
+ */
+ pAC->IoBase = ioremap_nocache(dev->mem_start, 0x4000);
+
+ if (!pAC->IoBase){
+ retval = 3;
+ goto out_release;
+ }
+
+ return 0;
+
+ out_release:
+ pci_release_regions(pdev);
+ out_disable:
+ pci_disable_device(pdev);
+ return retval;
+}
+
+
+/*****************************************************************************
+ *
+ * FreeResources - release resources allocated for adapter
+ *
+ * Description:
+ * This function releases the IRQ, unmaps the IO and
+ * frees the desriptor ring.
+ *
+ * Returns: N/A
+ *
+ */
+static void FreeResources(struct SK_NET_DEVICE *dev)
+{
+SK_U32 AllocFlag;
+DEV_NET *pNet;
+SK_AC *pAC;
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+ AllocFlag = pAC->AllocFlag;
+ if (pAC->PciDev) {
+ pci_release_regions(pAC->PciDev);
+ }
+ if (AllocFlag & SK_ALLOC_IRQ) {
+ free_irq(dev->irq, dev);
+ }
+ if (pAC->IoBase) {
+ iounmap(pAC->IoBase);
+ }
+ if (pAC->pDescrMem) {
+ BoardFreeMem(pAC);
+ }
+
+} /* FreeResources */
+
+MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
+MODULE_DESCRIPTION("SysKonnect SK-NET Gigabit Ethernet SK-98xx driver");
+MODULE_LICENSE("GPL");
+
+#ifdef LINK_SPEED_A
+static char *Speed_A[SK_MAX_CARD_PARAM] = LINK_SPEED;
+#else
+static char *Speed_A[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef LINK_SPEED_B
+static char *Speed_B[SK_MAX_CARD_PARAM] = LINK_SPEED;
+#else
+static char *Speed_B[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef AUTO_NEG_A
+static char *AutoNeg_A[SK_MAX_CARD_PARAM] = AUTO_NEG_A;
+#else
+static char *AutoNeg_A[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef DUP_CAP_A
+static char *DupCap_A[SK_MAX_CARD_PARAM] = DUP_CAP_A;
+#else
+static char *DupCap_A[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef FLOW_CTRL_A
+static char *FlowCtrl_A[SK_MAX_CARD_PARAM] = FLOW_CTRL_A;
+#else
+static char *FlowCtrl_A[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef ROLE_A
+static char *Role_A[SK_MAX_CARD_PARAM] = ROLE_A;
+#else
+static char *Role_A[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef AUTO_NEG_B
+static char *AutoNeg_B[SK_MAX_CARD_PARAM] = AUTO_NEG_B;
+#else
+static char *AutoNeg_B[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef DUP_CAP_B
+static char *DupCap_B[SK_MAX_CARD_PARAM] = DUP_CAP_B;
+#else
+static char *DupCap_B[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef FLOW_CTRL_B
+static char *FlowCtrl_B[SK_MAX_CARD_PARAM] = FLOW_CTRL_B;
+#else
+static char *FlowCtrl_B[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef ROLE_B
+static char *Role_B[SK_MAX_CARD_PARAM] = ROLE_B;
+#else
+static char *Role_B[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef CON_TYPE
+static char *ConType[SK_MAX_CARD_PARAM] = CON_TYPE;
+#else
+static char *ConType[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef PREF_PORT
+static char *PrefPort[SK_MAX_CARD_PARAM] = PREF_PORT;
+#else
+static char *PrefPort[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+#ifdef RLMT_MODE
+static char *RlmtMode[SK_MAX_CARD_PARAM] = RLMT_MODE;
+#else
+static char *RlmtMode[SK_MAX_CARD_PARAM] = {"", };
+#endif
+
+static int IntsPerSec[SK_MAX_CARD_PARAM];
+static char *Moderation[SK_MAX_CARD_PARAM];
+static char *ModerationMask[SK_MAX_CARD_PARAM];
+static char *AutoSizing[SK_MAX_CARD_PARAM];
+static char *Stats[SK_MAX_CARD_PARAM];
+
+module_param_array(Speed_A, charp, NULL, 0);
+module_param_array(Speed_B, charp, NULL, 0);
+module_param_array(AutoNeg_A, charp, NULL, 0);
+module_param_array(AutoNeg_B, charp, NULL, 0);
+module_param_array(DupCap_A, charp, NULL, 0);
+module_param_array(DupCap_B, charp, NULL, 0);
+module_param_array(FlowCtrl_A, charp, NULL, 0);
+module_param_array(FlowCtrl_B, charp, NULL, 0);
+module_param_array(Role_A, charp, NULL, 0);
+module_param_array(Role_B, charp, NULL, 0);
+module_param_array(ConType, charp, NULL, 0);
+module_param_array(PrefPort, charp, NULL, 0);
+module_param_array(RlmtMode, charp, NULL, 0);
+/* used for interrupt moderation */
+module_param_array(IntsPerSec, int, NULL, 0);
+module_param_array(Moderation, charp, NULL, 0);
+module_param_array(Stats, charp, NULL, 0);
+module_param_array(ModerationMask, charp, NULL, 0);
+module_param_array(AutoSizing, charp, NULL, 0);
+
+/*****************************************************************************
+ *
+ * SkGeBoardInit - do level 0 and 1 initialization
+ *
+ * Description:
+ * This function prepares the board hardware for running. The desriptor
+ * ring is set up, the IRQ is allocated and the configuration settings
+ * are examined.
+ *
+ * Returns:
+ * 0, if everything is ok
+ * !=0, on error
+ */
+static int __init SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC)
+{
+short i;
+unsigned long Flags;
+char *DescrString = "sk98lin: Driver for Linux"; /* this is given to PNMI */
+char *VerStr = VER_STRING;
+int Ret; /* return code of request_irq */
+SK_BOOL DualNet;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("IoBase: %08lX\n", (unsigned long)pAC->IoBase));
+ for (i=0; i<SK_MAX_MACS; i++) {
+ pAC->TxPort[i][0].HwAddr = pAC->IoBase + TxQueueAddr[i][0];
+ pAC->TxPort[i][0].PortIndex = i;
+ pAC->RxPort[i].HwAddr = pAC->IoBase + RxQueueAddr[i];
+ pAC->RxPort[i].PortIndex = i;
+ }
+
+ /* Initialize the mutexes */
+ for (i=0; i<SK_MAX_MACS; i++) {
+ spin_lock_init(&pAC->TxPort[i][0].TxDesRingLock);
+ spin_lock_init(&pAC->RxPort[i].RxDesRingLock);
+ }
+ spin_lock_init(&pAC->SlowPathLock);
+
+ /* setup phy_id blink timer */
+ pAC->BlinkTimer.function = SkGeBlinkTimer;
+ pAC->BlinkTimer.data = (unsigned long) dev;
+ init_timer(&pAC->BlinkTimer);
+
+ /* level 0 init common modules here */
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ /* Does a RESET on board ...*/
+ if (SkGeInit(pAC, pAC->IoBase, SK_INIT_DATA) != 0) {
+ printk("HWInit (0) failed.\n");
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ return(-EAGAIN);
+ }
+ SkI2cInit( pAC, pAC->IoBase, SK_INIT_DATA);
+ SkEventInit(pAC, pAC->IoBase, SK_INIT_DATA);
+ SkPnmiInit( pAC, pAC->IoBase, SK_INIT_DATA);
+ SkAddrInit( pAC, pAC->IoBase, SK_INIT_DATA);
+ SkRlmtInit( pAC, pAC->IoBase, SK_INIT_DATA);
+ SkTimerInit(pAC, pAC->IoBase, SK_INIT_DATA);
+
+ pAC->BoardLevel = SK_INIT_DATA;
+ pAC->RxBufSize = ETH_BUF_SIZE;
+
+ SK_PNMI_SET_DRIVER_DESCR(pAC, DescrString);
+ SK_PNMI_SET_DRIVER_VER(pAC, VerStr);
+
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ /* level 1 init common modules here (HW init) */
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) {
+ printk("sk98lin: HWInit (1) failed.\n");
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ return(-EAGAIN);
+ }
+ SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkEventInit(pAC, pAC->IoBase, SK_INIT_IO);
+ SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO);
+
+ /* Set chipset type support */
+ pAC->ChipsetType = 0;
+ if ((pAC->GIni.GIChipId == CHIP_ID_YUKON) ||
+ (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE)) {
+ pAC->ChipsetType = 1;
+ }
+
+ GetConfiguration(pAC);
+ if (pAC->RlmtNets == 2) {
+ pAC->GIni.GIPortUsage = SK_MUL_LINK;
+ }
+
+ pAC->BoardLevel = SK_INIT_IO;
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ if (pAC->GIni.GIMacsFound == 2) {
+ Ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ, pAC->Name, dev);
+ } else if (pAC->GIni.GIMacsFound == 1) {
+ Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ,
+ pAC->Name, dev);
+ } else {
+ printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n",
+ pAC->GIni.GIMacsFound);
+ return -EAGAIN;
+ }
+
+ if (Ret) {
+ printk(KERN_WARNING "sk98lin: Requested IRQ %d is busy.\n",
+ dev->irq);
+ return -EAGAIN;
+ }
+ pAC->AllocFlag |= SK_ALLOC_IRQ;
+
+ /* Alloc memory for this board (Mem for RxD/TxD) : */
+ if(!BoardAllocMem(pAC)) {
+ printk("No memory for descriptor rings.\n");
+ return(-EAGAIN);
+ }
+
+ SkCsSetReceiveFlags(pAC,
+ SKCS_PROTO_IP | SKCS_PROTO_TCP | SKCS_PROTO_UDP,
+ &pAC->CsOfs1, &pAC->CsOfs2, 0);
+ pAC->CsOfs = (pAC->CsOfs2 << 16) | pAC->CsOfs1;
+
+ BoardInitMem(pAC);
+ /* tschilling: New common function with minimum size check. */
+ DualNet = SK_FALSE;
+ if (pAC->RlmtNets == 2) {
+ DualNet = SK_TRUE;
+ }
+
+ if (SkGeInitAssignRamToQueues(
+ pAC,
+ pAC->ActivePort,
+ DualNet)) {
+ BoardFreeMem(pAC);
+ printk("sk98lin: SkGeInitAssignRamToQueues failed.\n");
+ return(-EAGAIN);
+ }
+
+ return (0);
+} /* SkGeBoardInit */
+
+
+/*****************************************************************************
+ *
+ * BoardAllocMem - allocate the memory for the descriptor rings
+ *
+ * Description:
+ * This function allocates the memory for all descriptor rings.
+ * Each ring is aligned for the desriptor alignment and no ring
+ * has a 4 GByte boundary in it (because the upper 32 bit must
+ * be constant for all descriptiors in one rings).
+ *
+ * Returns:
+ * SK_TRUE, if all memory could be allocated
+ * SK_FALSE, if not
+ */
+static SK_BOOL BoardAllocMem(
+SK_AC *pAC)
+{
+caddr_t pDescrMem; /* pointer to descriptor memory area */
+size_t AllocLength; /* length of complete descriptor area */
+int i; /* loop counter */
+unsigned long BusAddr;
+
+
+ /* rings plus one for alignment (do not cross 4 GB boundary) */
+ /* RX_RING_SIZE is assumed bigger than TX_RING_SIZE */
+#if (BITS_PER_LONG == 32)
+ AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + 8;
+#else
+ AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound
+ + RX_RING_SIZE + 8;
+#endif
+
+ pDescrMem = pci_alloc_consistent(pAC->PciDev, AllocLength,
+ &pAC->pDescrMemDMA);
+
+ if (pDescrMem == NULL) {
+ return (SK_FALSE);
+ }
+ pAC->pDescrMem = pDescrMem;
+ BusAddr = (unsigned long) pAC->pDescrMemDMA;
+
+ /* Descriptors need 8 byte alignment, and this is ensured
+ * by pci_alloc_consistent.
+ */
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS,
+ ("TX%d/A: pDescrMem: %lX, PhysDescrMem: %lX\n",
+ i, (unsigned long) pDescrMem,
+ BusAddr));
+ pAC->TxPort[i][0].pTxDescrRing = pDescrMem;
+ pAC->TxPort[i][0].VTxDescrRing = BusAddr;
+ pDescrMem += TX_RING_SIZE;
+ BusAddr += TX_RING_SIZE;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS,
+ ("RX%d: pDescrMem: %lX, PhysDescrMem: %lX\n",
+ i, (unsigned long) pDescrMem,
+ (unsigned long)BusAddr));
+ pAC->RxPort[i].pRxDescrRing = pDescrMem;
+ pAC->RxPort[i].VRxDescrRing = BusAddr;
+ pDescrMem += RX_RING_SIZE;
+ BusAddr += RX_RING_SIZE;
+ } /* for */
+
+ return (SK_TRUE);
+} /* BoardAllocMem */
+
+
+/****************************************************************************
+ *
+ * BoardFreeMem - reverse of BoardAllocMem
+ *
+ * Description:
+ * Free all memory allocated in BoardAllocMem: adapter context,
+ * descriptor rings, locks.
+ *
+ * Returns: N/A
+ */
+static void BoardFreeMem(
+SK_AC *pAC)
+{
+size_t AllocLength; /* length of complete descriptor area */
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("BoardFreeMem\n"));
+#if (BITS_PER_LONG == 32)
+ AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + 8;
+#else
+ AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound
+ + RX_RING_SIZE + 8;
+#endif
+
+ pci_free_consistent(pAC->PciDev, AllocLength,
+ pAC->pDescrMem, pAC->pDescrMemDMA);
+ pAC->pDescrMem = NULL;
+} /* BoardFreeMem */
+
+
+/*****************************************************************************
+ *
+ * BoardInitMem - initiate the descriptor rings
+ *
+ * Description:
+ * This function sets the descriptor rings up in memory.
+ * The adapter is initialized with the descriptor start addresses.
+ *
+ * Returns: N/A
+ */
+static void BoardInitMem(
+SK_AC *pAC) /* pointer to adapter context */
+{
+int i; /* loop counter */
+int RxDescrSize; /* the size of a rx descriptor rounded up to alignment*/
+int TxDescrSize; /* the size of a tx descriptor rounded up to alignment*/
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("BoardInitMem\n"));
+
+ RxDescrSize = (((sizeof(RXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN;
+ pAC->RxDescrPerRing = RX_RING_SIZE / RxDescrSize;
+ TxDescrSize = (((sizeof(TXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN;
+ pAC->TxDescrPerRing = TX_RING_SIZE / RxDescrSize;
+
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ SetupRing(
+ pAC,
+ pAC->TxPort[i][0].pTxDescrRing,
+ pAC->TxPort[i][0].VTxDescrRing,
+ (RXD**)&pAC->TxPort[i][0].pTxdRingHead,
+ (RXD**)&pAC->TxPort[i][0].pTxdRingTail,
+ (RXD**)&pAC->TxPort[i][0].pTxdRingPrev,
+ &pAC->TxPort[i][0].TxdRingFree,
+ SK_TRUE);
+ SetupRing(
+ pAC,
+ pAC->RxPort[i].pRxDescrRing,
+ pAC->RxPort[i].VRxDescrRing,
+ &pAC->RxPort[i].pRxdRingHead,
+ &pAC->RxPort[i].pRxdRingTail,
+ &pAC->RxPort[i].pRxdRingPrev,
+ &pAC->RxPort[i].RxdRingFree,
+ SK_FALSE);
+ }
+} /* BoardInitMem */
+
+
+/*****************************************************************************
+ *
+ * SetupRing - create one descriptor ring
+ *
+ * Description:
+ * This function creates one descriptor ring in the given memory area.
+ * The head, tail and number of free descriptors in the ring are set.
+ *
+ * Returns:
+ * none
+ */
+static void SetupRing(
+SK_AC *pAC,
+void *pMemArea, /* a pointer to the memory area for the ring */
+uintptr_t VMemArea, /* the virtual bus address of the memory area */
+RXD **ppRingHead, /* address where the head should be written */
+RXD **ppRingTail, /* address where the tail should be written */
+RXD **ppRingPrev, /* address where the tail should be written */
+int *pRingFree, /* address where the # of free descr. goes */
+SK_BOOL IsTx) /* flag: is this a tx ring */
+{
+int i; /* loop counter */
+int DescrSize; /* the size of a descriptor rounded up to alignment*/
+int DescrNum; /* number of descriptors per ring */
+RXD *pDescr; /* pointer to a descriptor (receive or transmit) */
+RXD *pNextDescr; /* pointer to the next descriptor */
+RXD *pPrevDescr; /* pointer to the previous descriptor */
+uintptr_t VNextDescr; /* the virtual bus address of the next descriptor */
+
+ if (IsTx == SK_TRUE) {
+ DescrSize = (((sizeof(TXD) - 1) / DESCR_ALIGN) + 1) *
+ DESCR_ALIGN;
+ DescrNum = TX_RING_SIZE / DescrSize;
+ } else {
+ DescrSize = (((sizeof(RXD) - 1) / DESCR_ALIGN) + 1) *
+ DESCR_ALIGN;
+ DescrNum = RX_RING_SIZE / DescrSize;
+ }
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS,
+ ("Descriptor size: %d Descriptor Number: %d\n",
+ DescrSize,DescrNum));
+
+ pDescr = (RXD*) pMemArea;
+ pPrevDescr = NULL;
+ pNextDescr = (RXD*) (((char*)pDescr) + DescrSize);
+ VNextDescr = VMemArea + DescrSize;
+ for(i=0; i<DescrNum; i++) {
+ /* set the pointers right */
+ pDescr->VNextRxd = VNextDescr & 0xffffffffULL;
+ pDescr->pNextRxd = pNextDescr;
+ pDescr->TcpSumStarts = pAC->CsOfs;
+
+ /* advance one step */
+ pPrevDescr = pDescr;
+ pDescr = pNextDescr;
+ pNextDescr = (RXD*) (((char*)pDescr) + DescrSize);
+ VNextDescr += DescrSize;
+ }
+ pPrevDescr->pNextRxd = (RXD*) pMemArea;
+ pPrevDescr->VNextRxd = VMemArea;
+ pDescr = (RXD*) pMemArea;
+ *ppRingHead = (RXD*) pMemArea;
+ *ppRingTail = *ppRingHead;
+ *ppRingPrev = pPrevDescr;
+ *pRingFree = DescrNum;
+} /* SetupRing */
+
+
+/*****************************************************************************
+ *
+ * PortReInitBmu - re-initiate the descriptor rings for one port
+ *
+ * Description:
+ * This function reinitializes the descriptor rings of one port
+ * in memory. The port must be stopped before.
+ * The HW is initialized with the descriptor start addresses.
+ *
+ * Returns:
+ * none
+ */
+static void PortReInitBmu(
+SK_AC *pAC, /* pointer to adapter context */
+int PortIndex) /* index of the port for which to re-init */
+{
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("PortReInitBmu "));
+
+ /* set address of first descriptor of ring in BMU */
+ SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_L,
+ (uint32_t)(((caddr_t)
+ (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) -
+ pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing +
+ pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) &
+ 0xFFFFFFFF));
+ SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_H,
+ (uint32_t)(((caddr_t)
+ (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) -
+ pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing +
+ pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) >> 32));
+ SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_L,
+ (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) -
+ pAC->RxPort[PortIndex].pRxDescrRing +
+ pAC->RxPort[PortIndex].VRxDescrRing) & 0xFFFFFFFF));
+ SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_H,
+ (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) -
+ pAC->RxPort[PortIndex].pRxDescrRing +
+ pAC->RxPort[PortIndex].VRxDescrRing) >> 32));
+} /* PortReInitBmu */
+
+
+/****************************************************************************
+ *
+ * SkGeIsr - handle adapter interrupts
+ *
+ * Description:
+ * The interrupt routine is called when the network adapter
+ * generates an interrupt. It may also be called if another device
+ * shares this interrupt vector with the driver.
+ *
+ * Returns: N/A
+ *
+ */
+static SkIsrRetVar SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id;
+DEV_NET *pNet;
+SK_AC *pAC;
+SK_U32 IntSrc; /* interrupts source register contents */
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+
+ /*
+ * Check and process if its our interrupt
+ */
+ SK_IN32(pAC->IoBase, B0_SP_ISRC, &IntSrc);
+ if (IntSrc == 0) {
+ return SkIsrRetNone;
+ }
+
+ while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) {
+#if 0 /* software irq currently not used */
+ if (IntSrc & IS_IRQ_SW) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("Software IRQ\n"));
+ }
+#endif
+ if (IntSrc & IS_R1_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF RX1 IRQ\n"));
+ ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
+ SK_PNMI_CNT_RX_INTR(pAC, 0);
+ }
+ if (IntSrc & IS_R2_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF RX2 IRQ\n"));
+ ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE);
+ SK_PNMI_CNT_RX_INTR(pAC, 1);
+ }
+#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
+ if (IntSrc & IS_XA1_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF AS TX1 IRQ\n"));
+ SK_PNMI_CNT_TX_INTR(pAC, 0);
+ spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
+ FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]);
+ spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
+ }
+ if (IntSrc & IS_XA2_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF AS TX2 IRQ\n"));
+ SK_PNMI_CNT_TX_INTR(pAC, 1);
+ spin_lock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock);
+ FreeTxDescriptors(pAC, &pAC->TxPort[1][TX_PRIO_LOW]);
+ spin_unlock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock);
+ }
+#if 0 /* only if sync. queues used */
+ if (IntSrc & IS_XS1_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF SY TX1 IRQ\n"));
+ SK_PNMI_CNT_TX_INTR(pAC, 1);
+ spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
+ FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH);
+ spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
+ ClearTxIrq(pAC, 0, TX_PRIO_HIGH);
+ }
+ if (IntSrc & IS_XS2_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF SY TX2 IRQ\n"));
+ SK_PNMI_CNT_TX_INTR(pAC, 1);
+ spin_lock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock);
+ FreeTxDescriptors(pAC, 1, TX_PRIO_HIGH);
+ spin_unlock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock);
+ ClearTxIrq(pAC, 1, TX_PRIO_HIGH);
+ }
+#endif
+#endif
+
+ /* do all IO at once */
+ if (IntSrc & IS_R1_F)
+ ClearAndStartRx(pAC, 0);
+ if (IntSrc & IS_R2_F)
+ ClearAndStartRx(pAC, 1);
+#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
+ if (IntSrc & IS_XA1_F)
+ ClearTxIrq(pAC, 0, TX_PRIO_LOW);
+ if (IntSrc & IS_XA2_F)
+ ClearTxIrq(pAC, 1, TX_PRIO_LOW);
+#endif
+ SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc);
+ } /* while (IntSrc & IRQ_MASK != 0) */
+
+ IntSrc &= pAC->GIni.GIValIrqMask;
+ if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC,
+ ("SPECIAL IRQ DP-Cards => %x\n", IntSrc));
+ pAC->CheckQueue = SK_FALSE;
+ spin_lock(&pAC->SlowPathLock);
+ if (IntSrc & SPECIAL_IRQS)
+ SkGeSirqIsr(pAC, pAC->IoBase, IntSrc);
+
+ SkEventDispatcher(pAC, pAC->IoBase);
+ spin_unlock(&pAC->SlowPathLock);
+ }
+ /*
+ * do it all again is case we cleared an interrupt that
+ * came in after handling the ring (OUTs may be delayed
+ * in hardware buffers, but are through after IN)
+ *
+ * rroesler: has been commented out and shifted to
+ * SkGeDrvEvent(), because it is timer
+ * guarded now
+ *
+ ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
+ ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE);
+ */
+
+ if (pAC->CheckQueue) {
+ pAC->CheckQueue = SK_FALSE;
+ spin_lock(&pAC->SlowPathLock);
+ SkEventDispatcher(pAC, pAC->IoBase);
+ spin_unlock(&pAC->SlowPathLock);
+ }
+
+ /* IRQ is processed - Enable IRQs again*/
+ SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
+
+ return SkIsrRetHandled;
+} /* SkGeIsr */
+
+
+/****************************************************************************
+ *
+ * SkGeIsrOnePort - handle adapter interrupts for single port adapter
+ *
+ * Description:
+ * The interrupt routine is called when the network adapter
+ * generates an interrupt. It may also be called if another device
+ * shares this interrupt vector with the driver.
+ * This is the same as above, but handles only one port.
+ *
+ * Returns: N/A
+ *
+ */
+static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id;
+DEV_NET *pNet;
+SK_AC *pAC;
+SK_U32 IntSrc; /* interrupts source register contents */
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+
+ /*
+ * Check and process if its our interrupt
+ */
+ SK_IN32(pAC->IoBase, B0_SP_ISRC, &IntSrc);
+ if (IntSrc == 0) {
+ return SkIsrRetNone;
+ }
+
+ while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) {
+#if 0 /* software irq currently not used */
+ if (IntSrc & IS_IRQ_SW) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("Software IRQ\n"));
+ }
+#endif
+ if (IntSrc & IS_R1_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF RX1 IRQ\n"));
+ ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
+ SK_PNMI_CNT_RX_INTR(pAC, 0);
+ }
+#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
+ if (IntSrc & IS_XA1_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF AS TX1 IRQ\n"));
+ SK_PNMI_CNT_TX_INTR(pAC, 0);
+ spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
+ FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]);
+ spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
+ }
+#if 0 /* only if sync. queues used */
+ if (IntSrc & IS_XS1_F) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_INT_SRC,
+ ("EOF SY TX1 IRQ\n"));
+ SK_PNMI_CNT_TX_INTR(pAC, 0);
+ spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
+ FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH);
+ spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
+ ClearTxIrq(pAC, 0, TX_PRIO_HIGH);
+ }
+#endif
+#endif
+
+ /* do all IO at once */
+ if (IntSrc & IS_R1_F)
+ ClearAndStartRx(pAC, 0);
+#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
+ if (IntSrc & IS_XA1_F)
+ ClearTxIrq(pAC, 0, TX_PRIO_LOW);
+#endif
+ SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc);
+ } /* while (IntSrc & IRQ_MASK != 0) */
+
+ IntSrc &= pAC->GIni.GIValIrqMask;
+ if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC,
+ ("SPECIAL IRQ SP-Cards => %x\n", IntSrc));
+ pAC->CheckQueue = SK_FALSE;
+ spin_lock(&pAC->SlowPathLock);
+ if (IntSrc & SPECIAL_IRQS)
+ SkGeSirqIsr(pAC, pAC->IoBase, IntSrc);
+
+ SkEventDispatcher(pAC, pAC->IoBase);
+ spin_unlock(&pAC->SlowPathLock);
+ }
+ /*
+ * do it all again is case we cleared an interrupt that
+ * came in after handling the ring (OUTs may be delayed
+ * in hardware buffers, but are through after IN)
+ *
+ * rroesler: has been commented out and shifted to
+ * SkGeDrvEvent(), because it is timer
+ * guarded now
+ *
+ ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
+ */
+
+ /* IRQ is processed - Enable IRQs again*/
+ SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
+
+ return SkIsrRetHandled;
+} /* SkGeIsrOnePort */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/****************************************************************************
+ *
+ * SkGePollController - polling receive, for netconsole
+ *
+ * Description:
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ *
+ * Returns: N/A
+ */
+static void SkGePollController(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ SkGeIsr(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+/****************************************************************************
+ *
+ * SkGeOpen - handle start of initialized adapter
+ *
+ * Description:
+ * This function starts the initialized adapter.
+ * The board level variable is set and the adapter is
+ * brought to full functionality.
+ * The device flags are set for operation.
+ * Do all necessary level 2 initialization, enable interrupts and
+ * give start command to RLMT.
+ *
+ * Returns:
+ * 0 on success
+ * != 0 on error
+ */
+static int SkGeOpen(
+struct SK_NET_DEVICE *dev)
+{
+ DEV_NET *pNet;
+ SK_AC *pAC;
+ unsigned long Flags; /* for spin lock */
+ int i;
+ SK_EVPARA EvPara; /* an event parameter union */
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeOpen: pAC=0x%lX:\n", (unsigned long)pAC));
+
+#ifdef SK_DIAG_SUPPORT
+ if (pAC->DiagModeActive == DIAG_ACTIVE) {
+ if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) {
+ return (-1); /* still in use by diag; deny actions */
+ }
+ }
+#endif
+
+ /* Set blink mode */
+ if ((pAC->PciDev->vendor == 0x1186) || (pAC->PciDev->vendor == 0x11ab ))
+ pAC->GIni.GILedBlinkCtrl = OEM_CONFIG_VALUE;
+
+ if (pAC->BoardLevel == SK_INIT_DATA) {
+ /* level 1 init common modules here */
+ if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) {
+ printk("%s: HWInit (1) failed.\n", pAC->dev[pNet->PortNr]->name);
+ return (-1);
+ }
+ SkI2cInit (pAC, pAC->IoBase, SK_INIT_IO);
+ SkEventInit (pAC, pAC->IoBase, SK_INIT_IO);
+ SkPnmiInit (pAC, pAC->IoBase, SK_INIT_IO);
+ SkAddrInit (pAC, pAC->IoBase, SK_INIT_IO);
+ SkRlmtInit (pAC, pAC->IoBase, SK_INIT_IO);
+ SkTimerInit (pAC, pAC->IoBase, SK_INIT_IO);
+ pAC->BoardLevel = SK_INIT_IO;
+ }
+
+ if (pAC->BoardLevel != SK_INIT_RUN) {
+ /* tschilling: Level 2 init modules here, check return value. */
+ if (SkGeInit(pAC, pAC->IoBase, SK_INIT_RUN) != 0) {
+ printk("%s: HWInit (2) failed.\n", pAC->dev[pNet->PortNr]->name);
+ return (-1);
+ }
+ SkI2cInit (pAC, pAC->IoBase, SK_INIT_RUN);
+ SkEventInit (pAC, pAC->IoBase, SK_INIT_RUN);
+ SkPnmiInit (pAC, pAC->IoBase, SK_INIT_RUN);
+ SkAddrInit (pAC, pAC->IoBase, SK_INIT_RUN);
+ SkRlmtInit (pAC, pAC->IoBase, SK_INIT_RUN);
+ SkTimerInit (pAC, pAC->IoBase, SK_INIT_RUN);
+ pAC->BoardLevel = SK_INIT_RUN;
+ }
+
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ /* Enable transmit descriptor polling. */
+ SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE);
+ FillRxRing(pAC, &pAC->RxPort[i]);
+ }
+ SkGeYellowLED(pAC, pAC->IoBase, 1);
+
+ StartDrvCleanupTimer(pAC);
+ SkDimEnableModerationIfNeeded(pAC);
+ SkDimDisplayModerationSettings(pAC);
+
+ pAC->GIni.GIValIrqMask &= IRQ_MASK;
+
+ /* enable Interrupts */
+ SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
+ SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK);
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+
+ if ((pAC->RlmtMode != 0) && (pAC->MaxPorts == 0)) {
+ EvPara.Para32[0] = pAC->RlmtNets;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS,
+ EvPara);
+ EvPara.Para32[0] = pAC->RlmtMode;
+ EvPara.Para32[1] = 0;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_MODE_CHANGE,
+ EvPara);
+ }
+
+ EvPara.Para32[0] = pNet->NetNr;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
+ SkEventDispatcher(pAC, pAC->IoBase);
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ pAC->MaxPorts++;
+ pNet->Up = 1;
+
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeOpen suceeded\n"));
+
+ return (0);
+} /* SkGeOpen */
+
+
+/****************************************************************************
+ *
+ * SkGeClose - Stop initialized adapter
+ *
+ * Description:
+ * Close initialized adapter.
+ *
+ * Returns:
+ * 0 - on success
+ * error code - on error
+ */
+static int SkGeClose(
+struct SK_NET_DEVICE *dev)
+{
+ DEV_NET *pNet;
+ DEV_NET *newPtrNet;
+ SK_AC *pAC;
+
+ unsigned long Flags; /* for spin lock */
+ int i;
+ int PortIdx;
+ SK_EVPARA EvPara;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeClose: pAC=0x%lX ", (unsigned long)pAC));
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+
+#ifdef SK_DIAG_SUPPORT
+ if (pAC->DiagModeActive == DIAG_ACTIVE) {
+ if (pAC->DiagFlowCtrl == SK_FALSE) {
+ /*
+ ** notify that the interface which has been closed
+ ** by operator interaction must not be started up
+ ** again when the DIAG has finished.
+ */
+ newPtrNet = netdev_priv(pAC->dev[0]);
+ if (newPtrNet == pNet) {
+ pAC->WasIfUp[0] = SK_FALSE;
+ } else {
+ pAC->WasIfUp[1] = SK_FALSE;
+ }
+ return 0; /* return to system everything is fine... */
+ } else {
+ pAC->DiagFlowCtrl = SK_FALSE;
+ }
+ }
+#endif
+
+ netif_stop_queue(dev);
+
+ if (pAC->RlmtNets == 1)
+ PortIdx = pAC->ActivePort;
+ else
+ PortIdx = pNet->NetNr;
+
+ StopDrvCleanupTimer(pAC);
+
+ /*
+ * Clear multicast table, promiscuous mode ....
+ */
+ SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0);
+ SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
+ SK_PROM_MODE_NONE);
+
+ if (pAC->MaxPorts == 1) {
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ /* disable interrupts */
+ SK_OUT32(pAC->IoBase, B0_IMSK, 0);
+ EvPara.Para32[0] = pNet->NetNr;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ SkEventDispatcher(pAC, pAC->IoBase);
+ SK_OUT32(pAC->IoBase, B0_IMSK, 0);
+ /* stop the hardware */
+ SkGeDeInit(pAC, pAC->IoBase);
+ pAC->BoardLevel = SK_INIT_DATA;
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ } else {
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ EvPara.Para32[0] = pNet->NetNr;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ SkPnmiEvent(pAC, pAC->IoBase, SK_PNMI_EVT_XMAC_RESET, EvPara);
+ SkEventDispatcher(pAC, pAC->IoBase);
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ /* Stop port */
+ spin_lock_irqsave(&pAC->TxPort[pNet->PortNr]
+ [TX_PRIO_LOW].TxDesRingLock, Flags);
+ SkGeStopPort(pAC, pAC->IoBase, pNet->PortNr,
+ SK_STOP_ALL, SK_HARD_RST);
+ spin_unlock_irqrestore(&pAC->TxPort[pNet->PortNr]
+ [TX_PRIO_LOW].TxDesRingLock, Flags);
+ }
+
+ if (pAC->RlmtNets == 1) {
+ /* clear all descriptor rings */
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE);
+ ClearRxRing(pAC, &pAC->RxPort[i]);
+ ClearTxRing(pAC, &pAC->TxPort[i][TX_PRIO_LOW]);
+ }
+ } else {
+ /* clear port descriptor rings */
+ ReceiveIrq(pAC, &pAC->RxPort[pNet->PortNr], SK_TRUE);
+ ClearRxRing(pAC, &pAC->RxPort[pNet->PortNr]);
+ ClearTxRing(pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW]);
+ }
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeClose: done "));
+
+ SK_MEMSET(&(pAC->PnmiBackup), 0, sizeof(SK_PNMI_STRUCT_DATA));
+ SK_MEMCPY(&(pAC->PnmiBackup), &(pAC->PnmiStruct),
+ sizeof(SK_PNMI_STRUCT_DATA));
+
+ pAC->MaxPorts--;
+ pNet->Up = 0;
+
+ return (0);
+} /* SkGeClose */
+
+
+/*****************************************************************************
+ *
+ * SkGeXmit - Linux frame transmit function
+ *
+ * Description:
+ * The system calls this function to send frames onto the wire.
+ * It puts the frame in the tx descriptor ring. If the ring is
+ * full then, the 'tbusy' flag is set.
+ *
+ * Returns:
+ * 0, if everything is ok
+ * !=0, on error
+ * WARNING: returning 1 in 'tbusy' case caused system crashes (double
+ * allocated skb's) !!!
+ */
+static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev)
+{
+DEV_NET *pNet;
+SK_AC *pAC;
+int Rc; /* return code of XmitFrame */
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+
+ if ((!skb_shinfo(skb)->nr_frags) ||
+ (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) {
+ /* Don't activate scatter-gather and hardware checksum */
+
+ if (pAC->RlmtNets == 2)
+ Rc = XmitFrame(
+ pAC,
+ &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW],
+ skb);
+ else
+ Rc = XmitFrame(
+ pAC,
+ &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW],
+ skb);
+ } else {
+ /* scatter-gather and hardware TCP checksumming anabled*/
+ if (pAC->RlmtNets == 2)
+ Rc = XmitFrameSG(
+ pAC,
+ &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW],
+ skb);
+ else
+ Rc = XmitFrameSG(
+ pAC,
+ &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW],
+ skb);
+ }
+
+ /* Transmitter out of resources? */
+ if (Rc <= 0) {
+ netif_stop_queue(dev);
+ }
+
+ /* If not taken, give buffer ownership back to the
+ * queueing layer.
+ */
+ if (Rc < 0)
+ return (1);
+
+ dev->trans_start = jiffies;
+ return (0);
+} /* SkGeXmit */
+
+
+/*****************************************************************************
+ *
+ * XmitFrame - fill one socket buffer into the transmit ring
+ *
+ * Description:
+ * This function puts a message into the transmit descriptor ring
+ * if there is a descriptors left.
+ * Linux skb's consist of only one continuous buffer.
+ * The first step locks the ring. It is held locked
+ * all time to avoid problems with SWITCH_../PORT_RESET.
+ * Then the descriptoris allocated.
+ * The second part is linking the buffer to the descriptor.
+ * At the very last, the Control field of the descriptor
+ * is made valid for the BMU and a start TX command is given
+ * if necessary.
+ *
+ * Returns:
+ * > 0 - on succes: the number of bytes in the message
+ * = 0 - on resource shortage: this frame sent or dropped, now
+ * the ring is full ( -> set tbusy)
+ * < 0 - on failure: other problems ( -> return failure to upper layers)
+ */
+static int XmitFrame(
+SK_AC *pAC, /* pointer to adapter context */
+TX_PORT *pTxPort, /* pointer to struct of port to send to */
+struct sk_buff *pMessage) /* pointer to send-message */
+{
+ TXD *pTxd; /* the rxd to fill */
+ TXD *pOldTxd;
+ unsigned long Flags;
+ SK_U64 PhysAddr;
+ int Protocol;
+ int IpHeaderLength;
+ int BytesSend = pMessage->len;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X"));
+
+ spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);
+#ifndef USE_TX_COMPLETE
+ FreeTxDescriptors(pAC, pTxPort);
+#endif
+ if (pTxPort->TxdRingFree == 0) {
+ /*
+ ** no enough free descriptors in ring at the moment.
+ ** Maybe free'ing some old one help?
+ */
+ FreeTxDescriptors(pAC, pTxPort);
+ if (pTxPort->TxdRingFree == 0) {
+ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
+ SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex);
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_TX_PROGRESS,
+ ("XmitFrame failed\n"));
+ /*
+ ** the desired message can not be sent
+ ** Because tbusy seems to be set, the message
+ ** should not be freed here. It will be used
+ ** by the scheduler of the ethernet handler
+ */
+ return (-1);
+ }
+ }
+
+ /*
+ ** If the passed socket buffer is of smaller MTU-size than 60,
+ ** copy everything into new buffer and fill all bytes between
+ ** the original packet end and the new packet end of 60 with 0x00.
+ ** This is to resolve faulty padding by the HW with 0xaa bytes.
+ */
+ if (BytesSend < C_LEN_ETHERNET_MINSIZE) {
+ if ((pMessage = skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) == NULL) {
+ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
+ return 0;
+ }
+ pMessage->len = C_LEN_ETHERNET_MINSIZE;
+ }
+
+ /*
+ ** advance head counter behind descriptor needed for this frame,
+ ** so that needed descriptor is reserved from that on. The next
+ ** action will be to add the passed buffer to the TX-descriptor
+ */
+ pTxd = pTxPort->pTxdRingHead;
+ pTxPort->pTxdRingHead = pTxd->pNextTxd;
+ pTxPort->TxdRingFree--;
+
+#ifdef SK_DUMP_TX
+ DumpMsg(pMessage, "XmitFrame");
+#endif
+
+ /*
+ ** First step is to map the data to be sent via the adapter onto
+ ** the DMA memory. Kernel 2.2 uses virt_to_bus(), but kernels 2.4
+ ** and 2.6 need to use pci_map_page() for that mapping.
+ */
+ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
+ virt_to_page(pMessage->data),
+ ((unsigned long) pMessage->data & ~PAGE_MASK),
+ pMessage->len,
+ PCI_DMA_TODEVICE);
+ pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
+ pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
+ pTxd->pMBuf = pMessage;
+
+ if (pMessage->ip_summed == CHECKSUM_HW) {
+ Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff);
+ if ((Protocol == C_PROTO_ID_UDP) &&
+ (pAC->GIni.GIChipRev == 0) &&
+ (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
+ pTxd->TBControl = BMU_TCP_CHECK;
+ } else {
+ pTxd->TBControl = BMU_UDP_CHECK;
+ }
+
+ IpHeaderLength = (SK_U8)pMessage->data[C_OFFSET_IPHEADER];
+ IpHeaderLength = (IpHeaderLength & 0xf) * 4;
+ pTxd->TcpSumOfs = 0; /* PH-Checksum already calculated */
+ pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength +
+ (Protocol == C_PROTO_ID_UDP ?
+ C_OFFSET_UDPHEADER_UDPCS :
+ C_OFFSET_TCPHEADER_TCPCS);
+ pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength;
+
+ pTxd->TBControl |= BMU_OWN | BMU_STF |
+ BMU_SW | BMU_EOF |
+#ifdef USE_TX_COMPLETE
+ BMU_IRQ_EOF |
+#endif
+ pMessage->len;
+ } else {
+ pTxd->TBControl = BMU_OWN | BMU_STF | BMU_CHECK |
+ BMU_SW | BMU_EOF |
+#ifdef USE_TX_COMPLETE
+ BMU_IRQ_EOF |
+#endif
+ pMessage->len;
+ }
+
+ /*
+ ** If previous descriptor already done, give TX start cmd
+ */
+ pOldTxd = xchg(&pTxPort->pTxdRingPrev, pTxd);
+ if ((pOldTxd->TBControl & BMU_OWN) == 0) {
+ SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START);
+ }
+
+ /*
+ ** after releasing the lock, the skb may immediately be free'd
+ */
+ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
+ if (pTxPort->TxdRingFree != 0) {
+ return (BytesSend);
+ } else {
+ return (0);
+ }
+
+} /* XmitFrame */
+
+/*****************************************************************************
+ *
+ * XmitFrameSG - fill one socket buffer into the transmit ring
+ * (use SG and TCP/UDP hardware checksumming)
+ *
+ * Description:
+ * This function puts a message into the transmit descriptor ring
+ * if there is a descriptors left.
+ *
+ * Returns:
+ * > 0 - on succes: the number of bytes in the message
+ * = 0 - on resource shortage: this frame sent or dropped, now
+ * the ring is full ( -> set tbusy)
+ * < 0 - on failure: other problems ( -> return failure to upper layers)
+ */
+static int XmitFrameSG(
+SK_AC *pAC, /* pointer to adapter context */
+TX_PORT *pTxPort, /* pointer to struct of port to send to */
+struct sk_buff *pMessage) /* pointer to send-message */
+{
+
+ TXD *pTxd;
+ TXD *pTxdFst;
+ TXD *pTxdLst;
+ int CurrFrag;
+ int BytesSend;
+ int IpHeaderLength;
+ int Protocol;
+ skb_frag_t *sk_frag;
+ SK_U64 PhysAddr;
+ unsigned long Flags;
+
+ spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);
+#ifndef USE_TX_COMPLETE
+ FreeTxDescriptors(pAC, pTxPort);
+#endif
+ if ((skb_shinfo(pMessage)->nr_frags +1) > pTxPort->TxdRingFree) {
+ FreeTxDescriptors(pAC, pTxPort);
+ if ((skb_shinfo(pMessage)->nr_frags + 1) > pTxPort->TxdRingFree) {
+ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
+ SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex);
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_TX_PROGRESS,
+ ("XmitFrameSG failed - Ring full\n"));
+ /* this message can not be sent now */
+ return(-1);
+ }
+ }
+
+ pTxd = pTxPort->pTxdRingHead;
+ pTxdFst = pTxd;
+ pTxdLst = pTxd;
+ BytesSend = 0;
+ Protocol = 0;
+
+ /*
+ ** Map the first fragment (header) into the DMA-space
+ */
+ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
+ virt_to_page(pMessage->data),
+ ((unsigned long) pMessage->data & ~PAGE_MASK),
+ skb_headlen(pMessage),
+ PCI_DMA_TODEVICE);
+
+ pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
+ pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
+
+ /*
+ ** Does the HW need to evaluate checksum for TCP or UDP packets?
+ */
+ if (pMessage->ip_summed == CHECKSUM_HW) {
+ pTxd->TBControl = BMU_STF | BMU_STFWD | skb_headlen(pMessage);
+ /*
+ ** We have to use the opcode for tcp here, because the
+ ** opcode for udp is not working in the hardware yet
+ ** (Revision 2.0)
+ */
+ Protocol = ((SK_U8)pMessage->data[C_OFFSET_IPPROTO] & 0xff);
+ if ((Protocol == C_PROTO_ID_UDP) &&
+ (pAC->GIni.GIChipRev == 0) &&
+ (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
+ pTxd->TBControl |= BMU_TCP_CHECK;
+ } else {
+ pTxd->TBControl |= BMU_UDP_CHECK;
+ }
+
+ IpHeaderLength = ((SK_U8)pMessage->data[C_OFFSET_IPHEADER] & 0xf)*4;
+ pTxd->TcpSumOfs = 0; /* PH-Checksum already claculated */
+ pTxd->TcpSumSt = C_LEN_ETHERMAC_HEADER + IpHeaderLength +
+ (Protocol == C_PROTO_ID_UDP ?
+ C_OFFSET_UDPHEADER_UDPCS :
+ C_OFFSET_TCPHEADER_TCPCS);
+ pTxd->TcpSumWr = C_LEN_ETHERMAC_HEADER + IpHeaderLength;
+ } else {
+ pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_STF |
+ skb_headlen(pMessage);
+ }
+
+ pTxd = pTxd->pNextTxd;
+ pTxPort->TxdRingFree--;
+ BytesSend += skb_headlen(pMessage);
+
+ /*
+ ** Browse over all SG fragments and map each of them into the DMA space
+ */
+ for (CurrFrag = 0; CurrFrag < skb_shinfo(pMessage)->nr_frags; CurrFrag++) {
+ sk_frag = &skb_shinfo(pMessage)->frags[CurrFrag];
+ /*
+ ** we already have the proper value in entry
+ */
+ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
+ sk_frag->page,
+ sk_frag->page_offset,
+ sk_frag->size,
+ PCI_DMA_TODEVICE);
+
+ pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
+ pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
+ pTxd->pMBuf = pMessage;
+
+ /*
+ ** Does the HW need to evaluate checksum for TCP or UDP packets?
+ */
+ if (pMessage->ip_summed == CHECKSUM_HW) {
+ pTxd->TBControl = BMU_OWN | BMU_SW | BMU_STFWD;
+ /*
+ ** We have to use the opcode for tcp here because the
+ ** opcode for udp is not working in the hardware yet
+ ** (revision 2.0)
+ */
+ if ((Protocol == C_PROTO_ID_UDP) &&
+ (pAC->GIni.GIChipRev == 0) &&
+ (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
+ pTxd->TBControl |= BMU_TCP_CHECK;
+ } else {
+ pTxd->TBControl |= BMU_UDP_CHECK;
+ }
+ } else {
+ pTxd->TBControl = BMU_CHECK | BMU_SW | BMU_OWN;
+ }
+
+ /*
+ ** Do we have the last fragment?
+ */
+ if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) {
+#ifdef USE_TX_COMPLETE
+ pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF | sk_frag->size;
+#else
+ pTxd->TBControl |= BMU_EOF | sk_frag->size;
+#endif
+ pTxdFst->TBControl |= BMU_OWN | BMU_SW;
+
+ } else {
+ pTxd->TBControl |= sk_frag->size;
+ }
+ pTxdLst = pTxd;
+ pTxd = pTxd->pNextTxd;
+ pTxPort->TxdRingFree--;
+ BytesSend += sk_frag->size;
+ }
+
+ /*
+ ** If previous descriptor already done, give TX start cmd
+ */
+ if ((pTxPort->pTxdRingPrev->TBControl & BMU_OWN) == 0) {
+ SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START);
+ }
+
+ pTxPort->pTxdRingPrev = pTxdLst;
+ pTxPort->pTxdRingHead = pTxd;
+
+ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
+
+ if (pTxPort->TxdRingFree > 0) {
+ return (BytesSend);
+ } else {
+ return (0);
+ }
+}
+
+/*****************************************************************************
+ *
+ * FreeTxDescriptors - release descriptors from the descriptor ring
+ *
+ * Description:
+ * This function releases descriptors from a transmit ring if they
+ * have been sent by the BMU.
+ * If a descriptors is sent, it can be freed and the message can
+ * be freed, too.
+ * The SOFTWARE controllable bit is used to prevent running around a
+ * completely free ring for ever. If this bit is no set in the
+ * frame (by XmitFrame), this frame has never been sent or is
+ * already freed.
+ * The Tx descriptor ring lock must be held while calling this function !!!
+ *
+ * Returns:
+ * none
+ */
+static void FreeTxDescriptors(
+SK_AC *pAC, /* pointer to the adapter context */
+TX_PORT *pTxPort) /* pointer to destination port structure */
+{
+TXD *pTxd; /* pointer to the checked descriptor */
+TXD *pNewTail; /* pointer to 'end' of the ring */
+SK_U32 Control; /* TBControl field of descriptor */
+SK_U64 PhysAddr; /* address of DMA mapping */
+
+ pNewTail = pTxPort->pTxdRingTail;
+ pTxd = pNewTail;
+ /*
+ ** loop forever; exits if BMU_SW bit not set in start frame
+ ** or BMU_OWN bit set in any frame
+ */
+ while (1) {
+ Control = pTxd->TBControl;
+ if ((Control & BMU_SW) == 0) {
+ /*
+ ** software controllable bit is set in first
+ ** fragment when given to BMU. Not set means that
+ ** this fragment was never sent or is already
+ ** freed ( -> ring completely free now).
+ */
+ pTxPort->pTxdRingTail = pTxd;
+ netif_wake_queue(pAC->dev[pTxPort->PortIndex]);
+ return;
+ }
+ if (Control & BMU_OWN) {
+ pTxPort->pTxdRingTail = pTxd;
+ if (pTxPort->TxdRingFree > 0) {
+ netif_wake_queue(pAC->dev[pTxPort->PortIndex]);
+ }
+ return;
+ }
+
+ /*
+ ** release the DMA mapping, because until not unmapped
+ ** this buffer is considered being under control of the
+ ** adapter card!
+ */
+ PhysAddr = ((SK_U64) pTxd->VDataHigh) << (SK_U64) 32;
+ PhysAddr |= (SK_U64) pTxd->VDataLow;
+ pci_unmap_page(pAC->PciDev, PhysAddr,
+ pTxd->pMBuf->len,
+ PCI_DMA_TODEVICE);
+
+ if (Control & BMU_EOF)
+ DEV_KFREE_SKB_ANY(pTxd->pMBuf); /* free message */
+
+ pTxPort->TxdRingFree++;
+ pTxd->TBControl &= ~BMU_SW;
+ pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */
+ } /* while(forever) */
+} /* FreeTxDescriptors */
+
+/*****************************************************************************
+ *
+ * FillRxRing - fill the receive ring with valid descriptors
+ *
+ * Description:
+ * This function fills the receive ring descriptors with data
+ * segments and makes them valid for the BMU.
+ * The active ring is filled completely, if possible.
+ * The non-active ring is filled only partial to save memory.
+ *
+ * Description of rx ring structure:
+ * head - points to the descriptor which will be used next by the BMU
+ * tail - points to the next descriptor to give to the BMU
+ *
+ * Returns: N/A
+ */
+static void FillRxRing(
+SK_AC *pAC, /* pointer to the adapter context */
+RX_PORT *pRxPort) /* ptr to port struct for which the ring
+ should be filled */
+{
+unsigned long Flags;
+
+ spin_lock_irqsave(&pRxPort->RxDesRingLock, Flags);
+ while (pRxPort->RxdRingFree > pRxPort->RxFillLimit) {
+ if(!FillRxDescriptor(pAC, pRxPort))
+ break;
+ }
+ spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags);
+} /* FillRxRing */
+
+
+/*****************************************************************************
+ *
+ * FillRxDescriptor - fill one buffer into the receive ring
+ *
+ * Description:
+ * The function allocates a new receive buffer and
+ * puts it into the next descriptor.
+ *
+ * Returns:
+ * SK_TRUE - a buffer was added to the ring
+ * SK_FALSE - a buffer could not be added
+ */
+static SK_BOOL FillRxDescriptor(
+SK_AC *pAC, /* pointer to the adapter context struct */
+RX_PORT *pRxPort) /* ptr to port struct of ring to fill */
+{
+struct sk_buff *pMsgBlock; /* pointer to a new message block */
+RXD *pRxd; /* the rxd to fill */
+SK_U16 Length; /* data fragment length */
+SK_U64 PhysAddr; /* physical address of a rx buffer */
+
+ pMsgBlock = alloc_skb(pAC->RxBufSize, GFP_ATOMIC);
+ if (pMsgBlock == NULL) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_ENTRY,
+ ("%s: Allocation of rx buffer failed !\n",
+ pAC->dev[pRxPort->PortIndex]->name));
+ SK_PNMI_CNT_NO_RX_BUF(pAC, pRxPort->PortIndex);
+ return(SK_FALSE);
+ }
+ skb_reserve(pMsgBlock, 2); /* to align IP frames */
+ /* skb allocated ok, so add buffer */
+ pRxd = pRxPort->pRxdRingTail;
+ pRxPort->pRxdRingTail = pRxd->pNextRxd;
+ pRxPort->RxdRingFree--;
+ Length = pAC->RxBufSize;
+ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
+ virt_to_page(pMsgBlock->data),
+ ((unsigned long) pMsgBlock->data &
+ ~PAGE_MASK),
+ pAC->RxBufSize - 2,
+ PCI_DMA_FROMDEVICE);
+
+ pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
+ pRxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
+ pRxd->pMBuf = pMsgBlock;
+ pRxd->RBControl = BMU_OWN |
+ BMU_STF |
+ BMU_IRQ_EOF |
+ BMU_TCP_CHECK |
+ Length;
+ return (SK_TRUE);
+
+} /* FillRxDescriptor */
+
+
+/*****************************************************************************
+ *
+ * ReQueueRxBuffer - fill one buffer back into the receive ring
+ *
+ * Description:
+ * Fill a given buffer back into the rx ring. The buffer
+ * has been previously allocated and aligned, and its phys.
+ * address calculated, so this is no more necessary.
+ *
+ * Returns: N/A
+ */
+static void ReQueueRxBuffer(
+SK_AC *pAC, /* pointer to the adapter context struct */
+RX_PORT *pRxPort, /* ptr to port struct of ring to fill */
+struct sk_buff *pMsg, /* pointer to the buffer */
+SK_U32 PhysHigh, /* phys address high dword */
+SK_U32 PhysLow) /* phys address low dword */
+{
+RXD *pRxd; /* the rxd to fill */
+SK_U16 Length; /* data fragment length */
+
+ pRxd = pRxPort->pRxdRingTail;
+ pRxPort->pRxdRingTail = pRxd->pNextRxd;
+ pRxPort->RxdRingFree--;
+ Length = pAC->RxBufSize;
+
+ pRxd->VDataLow = PhysLow;
+ pRxd->VDataHigh = PhysHigh;
+ pRxd->pMBuf = pMsg;
+ pRxd->RBControl = BMU_OWN |
+ BMU_STF |
+ BMU_IRQ_EOF |
+ BMU_TCP_CHECK |
+ Length;
+ return;
+} /* ReQueueRxBuffer */
+
+/*****************************************************************************
+ *
+ * ReceiveIrq - handle a receive IRQ
+ *
+ * Description:
+ * This function is called when a receive IRQ is set.
+ * It walks the receive descriptor ring and sends up all
+ * frames that are complete.
+ *
+ * Returns: N/A
+ */
+static void ReceiveIrq(
+ SK_AC *pAC, /* pointer to adapter context */
+ RX_PORT *pRxPort, /* pointer to receive port struct */
+ SK_BOOL SlowPathLock) /* indicates if SlowPathLock is needed */
+{
+RXD *pRxd; /* pointer to receive descriptors */
+SK_U32 Control; /* control field of descriptor */
+struct sk_buff *pMsg; /* pointer to message holding frame */
+struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */
+int FrameLength; /* total length of received frame */
+int IpFrameLength;
+SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */
+SK_EVPARA EvPara; /* an event parameter union */
+unsigned long Flags; /* for spin lock */
+int PortIndex = pRxPort->PortIndex;
+unsigned int Offset;
+unsigned int NumBytes;
+unsigned int ForRlmt;
+SK_BOOL IsBc;
+SK_BOOL IsMc;
+SK_BOOL IsBadFrame; /* Bad frame */
+
+SK_U32 FrameStat;
+unsigned short Csum1;
+unsigned short Csum2;
+unsigned short Type;
+int Result;
+SK_U64 PhysAddr;
+
+rx_start:
+ /* do forever; exit if BMU_OWN found */
+ for ( pRxd = pRxPort->pRxdRingHead ;
+ pRxPort->RxdRingFree < pAC->RxDescrPerRing ;
+ pRxd = pRxd->pNextRxd,
+ pRxPort->pRxdRingHead = pRxd,
+ pRxPort->RxdRingFree ++) {
+
+ /*
+ * For a better understanding of this loop
+ * Go through every descriptor beginning at the head
+ * Please note: the ring might be completely received so the OWN bit
+ * set is not a good crirteria to leave that loop.
+ * Therefore the RingFree counter is used.
+ * On entry of this loop pRxd is a pointer to the Rxd that needs
+ * to be checked next.
+ */
+
+ Control = pRxd->RBControl;
+
+ /* check if this descriptor is ready */
+ if ((Control & BMU_OWN) != 0) {
+ /* this descriptor is not yet ready */
+ /* This is the usual end of the loop */
+ /* We don't need to start the ring again */
+ FillRxRing(pAC, pRxPort);
+ return;
+ }
+ pAC->DynIrqModInfo.NbrProcessedDescr++;
+
+ /* get length of frame and check it */
+ FrameLength = Control & BMU_BBC;
+ if (FrameLength > pAC->RxBufSize) {
+ goto rx_failed;
+ }
+
+ /* check for STF and EOF */
+ if ((Control & (BMU_STF | BMU_EOF)) != (BMU_STF | BMU_EOF)) {
+ goto rx_failed;
+ }
+
+ /* here we have a complete frame in the ring */
+ pMsg = pRxd->pMBuf;
+
+ FrameStat = pRxd->FrameStat;
+
+ /* check for frame length mismatch */
+#define XMR_FS_LEN_SHIFT 18
+#define GMR_FS_LEN_SHIFT 16
+ if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
+ if (FrameLength != (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_RX_PROGRESS,
+ ("skge: Frame length mismatch (%u/%u).\n",
+ FrameLength,
+ (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)));
+ goto rx_failed;
+ }
+ }
+ else {
+ if (FrameLength != (SK_U32) (FrameStat >> GMR_FS_LEN_SHIFT)) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_RX_PROGRESS,
+ ("skge: Frame length mismatch (%u/%u).\n",
+ FrameLength,
+ (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)));
+ goto rx_failed;
+ }
+ }
+
+ /* Set Rx Status */
+ if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
+ IsBc = (FrameStat & XMR_FS_BC) != 0;
+ IsMc = (FrameStat & XMR_FS_MC) != 0;
+ IsBadFrame = (FrameStat &
+ (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0;
+ } else {
+ IsBc = (FrameStat & GMR_FS_BC) != 0;
+ IsMc = (FrameStat & GMR_FS_MC) != 0;
+ IsBadFrame = (((FrameStat & GMR_FS_ANY_ERR) != 0) ||
+ ((FrameStat & GMR_FS_RX_OK) == 0));
+ }
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0,
+ ("Received frame of length %d on port %d\n",
+ FrameLength, PortIndex));
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0,
+ ("Number of free rx descriptors: %d\n",
+ pRxPort->RxdRingFree));
+/* DumpMsg(pMsg, "Rx"); */
+
+ if ((Control & BMU_STAT_VAL) != BMU_STAT_VAL || (IsBadFrame)) {
+#if 0
+ (FrameStat & (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0) {
+#endif
+ /* there is a receive error in this frame */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_RX_PROGRESS,
+ ("skge: Error in received frame, dropped!\n"
+ "Control: %x\nRxStat: %x\n",
+ Control, FrameStat));
+
+ ReQueueRxBuffer(pAC, pRxPort, pMsg,
+ pRxd->VDataHigh, pRxd->VDataLow);
+
+ continue;
+ }
+
+ /*
+ * if short frame then copy data to reduce memory waste
+ */
+ if ((FrameLength < SK_COPY_THRESHOLD) &&
+ ((pNewMsg = alloc_skb(FrameLength+2, GFP_ATOMIC)) != NULL)) {
+ /*
+ * Short frame detected and allocation successfull
+ */
+ /* use new skb and copy data */
+ skb_reserve(pNewMsg, 2);
+ skb_put(pNewMsg, FrameLength);
+ PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
+ PhysAddr |= (SK_U64) pRxd->VDataLow;
+
+ pci_dma_sync_single_for_cpu(pAC->PciDev,
+ (dma_addr_t) PhysAddr,
+ FrameLength,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(pNewMsg, pMsg->data,
+ FrameLength, 0);
+ pci_dma_sync_single_for_device(pAC->PciDev,
+ (dma_addr_t) PhysAddr,
+ FrameLength,
+ PCI_DMA_FROMDEVICE);
+ ReQueueRxBuffer(pAC, pRxPort, pMsg,
+ pRxd->VDataHigh, pRxd->VDataLow);
+
+ pMsg = pNewMsg;
+
+ }
+ else {
+ /*
+ * if large frame, or SKB allocation failed, pass
+ * the SKB directly to the networking
+ */
+
+ PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
+ PhysAddr |= (SK_U64) pRxd->VDataLow;
+
+ /* release the DMA mapping */
+ pci_unmap_single(pAC->PciDev,
+ PhysAddr,
+ pAC->RxBufSize - 2,
+ PCI_DMA_FROMDEVICE);
+
+ /* set length in message */
+ skb_put(pMsg, FrameLength);
+ /* hardware checksum */
+ Type = ntohs(*((short*)&pMsg->data[12]));
+
+#ifdef USE_SK_RX_CHECKSUM
+ if (Type == 0x800) {
+ Csum1=le16_to_cpu(pRxd->TcpSums & 0xffff);
+ Csum2=le16_to_cpu((pRxd->TcpSums >> 16) & 0xffff);
+ IpFrameLength = (int) ntohs((unsigned short)
+ ((unsigned short *) pMsg->data)[8]);
+
+ /*
+ * Test: If frame is padded, a check is not possible!
+ * Frame not padded? Length difference must be 14 (0xe)!
+ */
+ if ((FrameLength - IpFrameLength) != 0xe) {
+ /* Frame padded => TCP offload not possible! */
+ pMsg->ip_summed = CHECKSUM_NONE;
+ } else {
+ /* Frame not padded => TCP offload! */
+ if ((((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) &&
+ (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) ||
+ (pAC->ChipsetType)) {
+ Result = SkCsGetReceiveInfo(pAC,
+ &pMsg->data[14],
+ Csum1, Csum2, pRxPort->PortIndex);
+ if (Result ==
+ SKCS_STATUS_IP_FRAGMENT ||
+ Result ==
+ SKCS_STATUS_IP_CSUM_OK ||
+ Result ==
+ SKCS_STATUS_TCP_CSUM_OK ||
+ Result ==
+ SKCS_STATUS_UDP_CSUM_OK) {
+ pMsg->ip_summed =
+ CHECKSUM_UNNECESSARY;
+ }
+ else if (Result ==
+ SKCS_STATUS_TCP_CSUM_ERROR ||
+ Result ==
+ SKCS_STATUS_UDP_CSUM_ERROR ||
+ Result ==
+ SKCS_STATUS_IP_CSUM_ERROR_UDP ||
+ Result ==
+ SKCS_STATUS_IP_CSUM_ERROR_TCP ||
+ Result ==
+ SKCS_STATUS_IP_CSUM_ERROR ) {
+ /* HW Checksum error */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_RX_PROGRESS,
+ ("skge: CRC error. Frame dropped!\n"));
+ goto rx_failed;
+ } else {
+ pMsg->ip_summed =
+ CHECKSUM_NONE;
+ }
+ }/* checksumControl calculation valid */
+ } /* Frame length check */
+ } /* IP frame */
+#else
+ pMsg->ip_summed = CHECKSUM_NONE;
+#endif
+ } /* frame > SK_COPY_TRESHOLD */
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("V"));
+ ForRlmt = SK_RLMT_RX_PROTOCOL;
+#if 0
+ IsBc = (FrameStat & XMR_FS_BC)==XMR_FS_BC;
+#endif
+ SK_RLMT_PRE_LOOKAHEAD(pAC, PortIndex, FrameLength,
+ IsBc, &Offset, &NumBytes);
+ if (NumBytes != 0) {
+#if 0
+ IsMc = (FrameStat & XMR_FS_MC)==XMR_FS_MC;
+#endif
+ SK_RLMT_LOOKAHEAD(pAC, PortIndex,
+ &pMsg->data[Offset],
+ IsBc, IsMc, &ForRlmt);
+ }
+ if (ForRlmt == SK_RLMT_RX_PROTOCOL) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("W"));
+ /* send up only frames from active port */
+ if ((PortIndex == pAC->ActivePort) ||
+ (pAC->RlmtNets == 2)) {
+ /* frame for upper layer */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("U"));
+#ifdef xDEBUG
+ DumpMsg(pMsg, "Rx");
+#endif
+ SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC,
+ FrameLength, pRxPort->PortIndex);
+
+ pMsg->dev = pAC->dev[pRxPort->PortIndex];
+ pMsg->protocol = eth_type_trans(pMsg,
+ pAC->dev[pRxPort->PortIndex]);
+ netif_rx(pMsg);
+ pAC->dev[pRxPort->PortIndex]->last_rx = jiffies;
+ }
+ else {
+ /* drop frame */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_RX_PROGRESS,
+ ("D"));
+ DEV_KFREE_SKB(pMsg);
+ }
+
+ } /* if not for rlmt */
+ else {
+ /* packet for rlmt */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_RX_PROGRESS, ("R"));
+ pRlmtMbuf = SkDrvAllocRlmtMbuf(pAC,
+ pAC->IoBase, FrameLength);
+ if (pRlmtMbuf != NULL) {
+ pRlmtMbuf->pNext = NULL;
+ pRlmtMbuf->Length = FrameLength;
+ pRlmtMbuf->PortIdx = PortIndex;
+ EvPara.pParaPtr = pRlmtMbuf;
+ memcpy((char*)(pRlmtMbuf->pData),
+ (char*)(pMsg->data),
+ FrameLength);
+
+ /* SlowPathLock needed? */
+ if (SlowPathLock == SK_TRUE) {
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ SkEventQueue(pAC, SKGE_RLMT,
+ SK_RLMT_PACKET_RECEIVED,
+ EvPara);
+ pAC->CheckQueue = SK_TRUE;
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ } else {
+ SkEventQueue(pAC, SKGE_RLMT,
+ SK_RLMT_PACKET_RECEIVED,
+ EvPara);
+ pAC->CheckQueue = SK_TRUE;
+ }
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
+ SK_DBGCAT_DRV_RX_PROGRESS,
+ ("Q"));
+ }
+ if ((pAC->dev[pRxPort->PortIndex]->flags &
+ (IFF_PROMISC | IFF_ALLMULTI)) != 0 ||
+ (ForRlmt & SK_RLMT_RX_PROTOCOL) ==
+ SK_RLMT_RX_PROTOCOL) {
+ pMsg->dev = pAC->dev[pRxPort->PortIndex];
+ pMsg->protocol = eth_type_trans(pMsg,
+ pAC->dev[pRxPort->PortIndex]);
+ netif_rx(pMsg);
+ pAC->dev[pRxPort->PortIndex]->last_rx = jiffies;
+ }
+ else {
+ DEV_KFREE_SKB(pMsg);
+ }
+
+ } /* if packet for rlmt */
+ } /* for ... scanning the RXD ring */
+
+ /* RXD ring is empty -> fill and restart */
+ FillRxRing(pAC, pRxPort);
+ /* do not start if called from Close */
+ if (pAC->BoardLevel > SK_INIT_DATA) {
+ ClearAndStartRx(pAC, PortIndex);
+ }
+ return;
+
+rx_failed:
+ /* remove error frame */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ERROR,
+ ("Schrottdescriptor, length: 0x%x\n", FrameLength));
+
+ /* release the DMA mapping */
+
+ PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
+ PhysAddr |= (SK_U64) pRxd->VDataLow;
+ pci_unmap_page(pAC->PciDev,
+ PhysAddr,
+ pAC->RxBufSize - 2,
+ PCI_DMA_FROMDEVICE);
+ DEV_KFREE_SKB_IRQ(pRxd->pMBuf);
+ pRxd->pMBuf = NULL;
+ pRxPort->RxdRingFree++;
+ pRxPort->pRxdRingHead = pRxd->pNextRxd;
+ goto rx_start;
+
+} /* ReceiveIrq */
+
+
+/*****************************************************************************
+ *
+ * ClearAndStartRx - give a start receive command to BMU, clear IRQ
+ *
+ * Description:
+ * This function sends a start command and a clear interrupt
+ * command for one receive queue to the BMU.
+ *
+ * Returns: N/A
+ * none
+ */
+static void ClearAndStartRx(
+SK_AC *pAC, /* pointer to the adapter context */
+int PortIndex) /* index of the receive port (XMAC) */
+{
+ SK_OUT8(pAC->IoBase,
+ RxQueueAddr[PortIndex]+Q_CSR,
+ CSR_START | CSR_IRQ_CL_F);
+} /* ClearAndStartRx */
+
+
+/*****************************************************************************
+ *
+ * ClearTxIrq - give a clear transmit IRQ command to BMU
+ *
+ * Description:
+ * This function sends a clear tx IRQ command for one
+ * transmit queue to the BMU.
+ *
+ * Returns: N/A
+ */
+static void ClearTxIrq(
+SK_AC *pAC, /* pointer to the adapter context */
+int PortIndex, /* index of the transmit port (XMAC) */
+int Prio) /* priority or normal queue */
+{
+ SK_OUT8(pAC->IoBase,
+ TxQueueAddr[PortIndex][Prio]+Q_CSR,
+ CSR_IRQ_CL_F);
+} /* ClearTxIrq */
+
+
+/*****************************************************************************
+ *
+ * ClearRxRing - remove all buffers from the receive ring
+ *
+ * Description:
+ * This function removes all receive buffers from the ring.
+ * The receive BMU must be stopped before calling this function.
+ *
+ * Returns: N/A
+ */
+static void ClearRxRing(
+SK_AC *pAC, /* pointer to adapter context */
+RX_PORT *pRxPort) /* pointer to rx port struct */
+{
+RXD *pRxd; /* pointer to the current descriptor */
+unsigned long Flags;
+SK_U64 PhysAddr;
+
+ if (pRxPort->RxdRingFree == pAC->RxDescrPerRing) {
+ return;
+ }
+ spin_lock_irqsave(&pRxPort->RxDesRingLock, Flags);
+ pRxd = pRxPort->pRxdRingHead;
+ do {
+ if (pRxd->pMBuf != NULL) {
+
+ PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
+ PhysAddr |= (SK_U64) pRxd->VDataLow;
+ pci_unmap_page(pAC->PciDev,
+ PhysAddr,
+ pAC->RxBufSize - 2,
+ PCI_DMA_FROMDEVICE);
+ DEV_KFREE_SKB(pRxd->pMBuf);
+ pRxd->pMBuf = NULL;
+ }
+ pRxd->RBControl &= BMU_OWN;
+ pRxd = pRxd->pNextRxd;
+ pRxPort->RxdRingFree++;
+ } while (pRxd != pRxPort->pRxdRingTail);
+ pRxPort->pRxdRingTail = pRxPort->pRxdRingHead;
+ spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags);
+} /* ClearRxRing */
+
+/*****************************************************************************
+ *
+ * ClearTxRing - remove all buffers from the transmit ring
+ *
+ * Description:
+ * This function removes all transmit buffers from the ring.
+ * The transmit BMU must be stopped before calling this function
+ * and transmitting at the upper level must be disabled.
+ * The BMU own bit of all descriptors is cleared, the rest is
+ * done by calling FreeTxDescriptors.
+ *
+ * Returns: N/A
+ */
+static void ClearTxRing(
+SK_AC *pAC, /* pointer to adapter context */
+TX_PORT *pTxPort) /* pointer to tx prt struct */
+{
+TXD *pTxd; /* pointer to the current descriptor */
+int i;
+unsigned long Flags;
+
+ spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);
+ pTxd = pTxPort->pTxdRingHead;
+ for (i=0; i<pAC->TxDescrPerRing; i++) {
+ pTxd->TBControl &= ~BMU_OWN;
+ pTxd = pTxd->pNextTxd;
+ }
+ FreeTxDescriptors(pAC, pTxPort);
+ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
+} /* ClearTxRing */
+
+/*****************************************************************************
+ *
+ * SkGeSetMacAddr - Set the hardware MAC address
+ *
+ * Description:
+ * This function sets the MAC address used by the adapter.
+ *
+ * Returns:
+ * 0, if everything is ok
+ * !=0, on error
+ */
+static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p)
+{
+
+DEV_NET *pNet = netdev_priv(dev);
+SK_AC *pAC = pNet->pAC;
+
+struct sockaddr *addr = p;
+unsigned long Flags;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeSetMacAddr starts now...\n"));
+ if(netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+
+ if (pAC->RlmtNets == 2)
+ SkAddrOverride(pAC, pAC->IoBase, pNet->NetNr,
+ (SK_MAC_ADDR*)dev->dev_addr, SK_ADDR_VIRTUAL_ADDRESS);
+ else
+ SkAddrOverride(pAC, pAC->IoBase, pAC->ActivePort,
+ (SK_MAC_ADDR*)dev->dev_addr, SK_ADDR_VIRTUAL_ADDRESS);
+
+
+
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ return 0;
+} /* SkGeSetMacAddr */
+
+
+/*****************************************************************************
+ *
+ * SkGeSetRxMode - set receive mode
+ *
+ * Description:
+ * This function sets the receive mode of an adapter. The adapter
+ * supports promiscuous mode, allmulticast mode and a number of
+ * multicast addresses. If more multicast addresses the available
+ * are selected, a hash function in the hardware is used.
+ *
+ * Returns:
+ * 0, if everything is ok
+ * !=0, on error
+ */
+static void SkGeSetRxMode(struct SK_NET_DEVICE *dev)
+{
+
+DEV_NET *pNet;
+SK_AC *pAC;
+
+struct dev_mc_list *pMcList;
+int i;
+int PortIdx;
+unsigned long Flags;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeSetRxMode starts now... "));
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+ if (pAC->RlmtNets == 1)
+ PortIdx = pAC->ActivePort;
+ else
+ PortIdx = pNet->NetNr;
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ if (dev->flags & IFF_PROMISC) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("PROMISCUOUS mode\n"));
+ SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
+ SK_PROM_MODE_LLC);
+ } else if (dev->flags & IFF_ALLMULTI) {
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("ALLMULTI mode\n"));
+ SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
+ SK_PROM_MODE_ALL_MC);
+ } else {
+ SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
+ SK_PROM_MODE_NONE);
+ SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0);
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("Number of MC entries: %d ", dev->mc_count));
+
+ pMcList = dev->mc_list;
+ for (i=0; i<dev->mc_count; i++, pMcList = pMcList->next) {
+ SkAddrMcAdd(pAC, pAC->IoBase, PortIdx,
+ (SK_MAC_ADDR*)pMcList->dmi_addr, 0);
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_MCA,
+ ("%02x:%02x:%02x:%02x:%02x:%02x\n",
+ pMcList->dmi_addr[0],
+ pMcList->dmi_addr[1],
+ pMcList->dmi_addr[2],
+ pMcList->dmi_addr[3],
+ pMcList->dmi_addr[4],
+ pMcList->dmi_addr[5]));
+ }
+ SkAddrMcUpdate(pAC, pAC->IoBase, PortIdx);
+ }
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ return;
+} /* SkGeSetRxMode */
+
+
+/*****************************************************************************
+ *
+ * SkGeChangeMtu - set the MTU to another value
+ *
+ * Description:
+ * This function sets is called whenever the MTU size is changed
+ * (ifconfig mtu xxx dev ethX). If the MTU is bigger than standard
+ * ethernet MTU size, long frame support is activated.
+ *
+ * Returns:
+ * 0, if everything is ok
+ * !=0, on error
+ */
+static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int NewMtu)
+{
+DEV_NET *pNet;
+DEV_NET *pOtherNet;
+SK_AC *pAC;
+unsigned long Flags;
+int i;
+SK_EVPARA EvPara;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeChangeMtu starts now...\n"));
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+
+ if ((NewMtu < 68) || (NewMtu > SK_JUMBO_MTU)) {
+ return -EINVAL;
+ }
+
+ if(pAC->BoardLevel != SK_INIT_RUN) {
+ return -EINVAL;
+ }
+
+#ifdef SK_DIAG_SUPPORT
+ if (pAC->DiagModeActive == DIAG_ACTIVE) {
+ if (pAC->DiagFlowCtrl == SK_FALSE) {
+ return -1; /* still in use, deny any actions of MTU */
+ } else {
+ pAC->DiagFlowCtrl = SK_FALSE;
+ }
+ }
+#endif
+
+ pNet->Mtu = NewMtu;
+ pOtherNet = netdev_priv(pAC->dev[1 - pNet->NetNr]);
+ if ((pOtherNet->Mtu>1500) && (NewMtu<=1500) && (pOtherNet->Up==1)) {
+ return(0);
+ }
+
+ pAC->RxBufSize = NewMtu + 32;
+ dev->mtu = NewMtu;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("New MTU: %d\n", NewMtu));
+
+ /*
+ ** Prevent any reconfiguration while changing the MTU
+ ** by disabling any interrupts
+ */
+ SK_OUT32(pAC->IoBase, B0_IMSK, 0);
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+
+ /*
+ ** Notify RLMT that any ports are to be stopped
+ */
+ EvPara.Para32[0] = 0;
+ EvPara.Para32[1] = -1;
+ if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ EvPara.Para32[0] = 1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ } else {
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ }
+
+ /*
+ ** After calling the SkEventDispatcher(), RLMT is aware about
+ ** the stopped ports -> configuration can take place!
+ */
+ SkEventDispatcher(pAC, pAC->IoBase);
+
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ spin_lock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock);
+ netif_stop_queue(pAC->dev[i]);
+
+ }
+
+ /*
+ ** Depending on the desired MTU size change, a different number of
+ ** RX buffers need to be allocated
+ */
+ if (NewMtu > 1500) {
+ /*
+ ** Use less rx buffers
+ */
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
+ pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
+ (pAC->RxDescrPerRing / 4);
+ } else {
+ if (i == pAC->ActivePort) {
+ pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
+ (pAC->RxDescrPerRing / 4);
+ } else {
+ pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
+ (pAC->RxDescrPerRing / 10);
+ }
+ }
+ }
+ } else {
+ /*
+ ** Use the normal amount of rx buffers
+ */
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
+ pAC->RxPort[i].RxFillLimit = 1;
+ } else {
+ if (i == pAC->ActivePort) {
+ pAC->RxPort[i].RxFillLimit = 1;
+ } else {
+ pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
+ (pAC->RxDescrPerRing / 4);
+ }
+ }
+ }
+ }
+
+ SkGeDeInit(pAC, pAC->IoBase);
+
+ /*
+ ** enable/disable hardware support for long frames
+ */
+ if (NewMtu > 1500) {
+// pAC->JumboActivated = SK_TRUE; /* is never set back !!! */
+ pAC->GIni.GIPortUsage = SK_JUMBO_LINK;
+ } else {
+ if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
+ pAC->GIni.GIPortUsage = SK_MUL_LINK;
+ } else {
+ pAC->GIni.GIPortUsage = SK_RED_LINK;
+ }
+ }
+
+ SkGeInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkEventInit(pAC, pAC->IoBase, SK_INIT_IO);
+ SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO);
+ SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO);
+
+ /*
+ ** tschilling:
+ ** Speed and others are set back to default in level 1 init!
+ */
+ GetConfiguration(pAC);
+
+ SkGeInit( pAC, pAC->IoBase, SK_INIT_RUN);
+ SkI2cInit( pAC, pAC->IoBase, SK_INIT_RUN);
+ SkEventInit(pAC, pAC->IoBase, SK_INIT_RUN);
+ SkPnmiInit( pAC, pAC->IoBase, SK_INIT_RUN);
+ SkAddrInit( pAC, pAC->IoBase, SK_INIT_RUN);
+ SkRlmtInit( pAC, pAC->IoBase, SK_INIT_RUN);
+ SkTimerInit(pAC, pAC->IoBase, SK_INIT_RUN);
+
+ /*
+ ** clear and reinit the rx rings here
+ */
+ for (i=0; i<pAC->GIni.GIMacsFound; i++) {
+ ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE);
+ ClearRxRing(pAC, &pAC->RxPort[i]);
+ FillRxRing(pAC, &pAC->RxPort[i]);
+
+ /*
+ ** Enable transmit descriptor polling
+ */
+ SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE);
+ FillRxRing(pAC, &pAC->RxPort[i]);
+ };
+
+ SkGeYellowLED(pAC, pAC->IoBase, 1);
+ SkDimEnableModerationIfNeeded(pAC);
+ SkDimDisplayModerationSettings(pAC);
+
+ netif_start_queue(pAC->dev[pNet->PortNr]);
+ for (i=pAC->GIni.GIMacsFound-1; i>=0; i--) {
+ spin_unlock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock);
+ }
+
+ /*
+ ** Enable Interrupts again
+ */
+ SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
+ SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK);
+
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
+ SkEventDispatcher(pAC, pAC->IoBase);
+
+ /*
+ ** Notify RLMT about the changing and restarting one (or more) ports
+ */
+ if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
+ EvPara.Para32[0] = pAC->RlmtNets;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, EvPara);
+ EvPara.Para32[0] = pNet->PortNr;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
+
+ if (pOtherNet->Up) {
+ EvPara.Para32[0] = pOtherNet->PortNr;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
+ }
+ } else {
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
+ }
+
+ SkEventDispatcher(pAC, pAC->IoBase);
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ /*
+ ** While testing this driver with latest kernel 2.5 (2.5.70), it
+ ** seems as if upper layers have a problem to handle a successful
+ ** return value of '0'. If such a zero is returned, the complete
+ ** system hangs for several minutes (!), which is in acceptable.
+ **
+ ** Currently it is not clear, what the exact reason for this problem
+ ** is. The implemented workaround for 2.5 is to return the desired
+ ** new MTU size if all needed changes for the new MTU size where
+ ** performed. In kernels 2.2 and 2.4, a zero value is returned,
+ ** which indicates the successful change of the mtu-size.
+ */
+ return NewMtu;
+
+} /* SkGeChangeMtu */
+
+
+/*****************************************************************************
+ *
+ * SkGeStats - return ethernet device statistics
+ *
+ * Description:
+ * This function return statistic data about the ethernet device
+ * to the operating system.
+ *
+ * Returns:
+ * pointer to the statistic structure.
+ */
+static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev)
+{
+DEV_NET *pNet = netdev_priv(dev);
+SK_AC *pAC = pNet->pAC;
+SK_PNMI_STRUCT_DATA *pPnmiStruct; /* structure for all Pnmi-Data */
+SK_PNMI_STAT *pPnmiStat; /* pointer to virtual XMAC stat. data */
+SK_PNMI_CONF *pPnmiConf; /* pointer to virtual link config. */
+unsigned int Size; /* size of pnmi struct */
+unsigned long Flags; /* for spin lock */
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeStats starts now...\n"));
+ pPnmiStruct = &pAC->PnmiStruct;
+
+#ifdef SK_DIAG_SUPPORT
+ if ((pAC->DiagModeActive == DIAG_NOTACTIVE) &&
+ (pAC->BoardLevel == SK_INIT_RUN)) {
+#endif
+ SK_MEMSET(pPnmiStruct, 0, sizeof(SK_PNMI_STRUCT_DATA));
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ Size = SK_PNMI_STRUCT_SIZE;
+ SkPnmiGetStruct(pAC, pAC->IoBase, pPnmiStruct, &Size, pNet->NetNr);
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+#ifdef SK_DIAG_SUPPORT
+ }
+#endif
+
+ pPnmiStat = &pPnmiStruct->Stat[0];
+ pPnmiConf = &pPnmiStruct->Conf[0];
+
+ pAC->stats.rx_packets = (SK_U32) pPnmiStruct->RxDeliveredCts & 0xFFFFFFFF;
+ pAC->stats.tx_packets = (SK_U32) pPnmiStat->StatTxOkCts & 0xFFFFFFFF;
+ pAC->stats.rx_bytes = (SK_U32) pPnmiStruct->RxOctetsDeliveredCts;
+ pAC->stats.tx_bytes = (SK_U32) pPnmiStat->StatTxOctetsOkCts;
+
+ if (pNet->Mtu <= 1500) {
+ pAC->stats.rx_errors = (SK_U32) pPnmiStruct->InErrorsCts & 0xFFFFFFFF;
+ } else {
+ pAC->stats.rx_errors = (SK_U32) ((pPnmiStruct->InErrorsCts -
+ pPnmiStat->StatRxTooLongCts) & 0xFFFFFFFF);
+ }
+
+
+ if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC && pAC->HWRevision < 12)
+ pAC->stats.rx_errors = pAC->stats.rx_errors - pPnmiStat->StatRxShortsCts;
+
+ pAC->stats.tx_errors = (SK_U32) pPnmiStat->StatTxSingleCollisionCts & 0xFFFFFFFF;
+ pAC->stats.rx_dropped = (SK_U32) pPnmiStruct->RxNoBufCts & 0xFFFFFFFF;
+ pAC->stats.tx_dropped = (SK_U32) pPnmiStruct->TxNoBufCts & 0xFFFFFFFF;
+ pAC->stats.multicast = (SK_U32) pPnmiStat->StatRxMulticastOkCts & 0xFFFFFFFF;
+ pAC->stats.collisions = (SK_U32) pPnmiStat->StatTxSingleCollisionCts & 0xFFFFFFFF;
+
+ /* detailed rx_errors: */
+ pAC->stats.rx_length_errors = (SK_U32) pPnmiStat->StatRxRuntCts & 0xFFFFFFFF;
+ pAC->stats.rx_over_errors = (SK_U32) pPnmiStat->StatRxFifoOverflowCts & 0xFFFFFFFF;
+ pAC->stats.rx_crc_errors = (SK_U32) pPnmiStat->StatRxFcsCts & 0xFFFFFFFF;
+ pAC->stats.rx_frame_errors = (SK_U32) pPnmiStat->StatRxFramingCts & 0xFFFFFFFF;
+ pAC->stats.rx_fifo_errors = (SK_U32) pPnmiStat->StatRxFifoOverflowCts & 0xFFFFFFFF;
+ pAC->stats.rx_missed_errors = (SK_U32) pPnmiStat->StatRxMissedCts & 0xFFFFFFFF;
+
+ /* detailed tx_errors */
+ pAC->stats.tx_aborted_errors = (SK_U32) 0;
+ pAC->stats.tx_carrier_errors = (SK_U32) pPnmiStat->StatTxCarrierCts & 0xFFFFFFFF;
+ pAC->stats.tx_fifo_errors = (SK_U32) pPnmiStat->StatTxFifoUnderrunCts & 0xFFFFFFFF;
+ pAC->stats.tx_heartbeat_errors = (SK_U32) pPnmiStat->StatTxCarrierCts & 0xFFFFFFFF;
+ pAC->stats.tx_window_errors = (SK_U32) 0;
+
+ return(&pAC->stats);
+} /* SkGeStats */
+
+
+/*****************************************************************************
+ *
+ * SkGeIoctl - IO-control function
+ *
+ * Description:
+ * This function is called if an ioctl is issued on the device.
+ * There are three subfunction for reading, writing and test-writing
+ * the private MIB data structure (usefull for SysKonnect-internal tools).
+ *
+ * Returns:
+ * 0, if everything is ok
+ * !=0, on error
+ */
+static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd)
+{
+DEV_NET *pNet;
+SK_AC *pAC;
+void *pMemBuf;
+struct pci_dev *pdev = NULL;
+SK_GE_IOCTL Ioctl;
+unsigned int Err = 0;
+int Size = 0;
+int Ret = 0;
+unsigned int Length = 0;
+int HeaderLength = sizeof(SK_U32) + sizeof(SK_U32);
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeIoctl starts now...\n"));
+
+ pNet = netdev_priv(dev);
+ pAC = pNet->pAC;
+
+ if(copy_from_user(&Ioctl, rq->ifr_data, sizeof(SK_GE_IOCTL))) {
+ return -EFAULT;
+ }
+
+ switch(cmd) {
+ case SK_IOCTL_SETMIB:
+ case SK_IOCTL_PRESETMIB:
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ case SK_IOCTL_GETMIB:
+ if(copy_from_user(&pAC->PnmiStruct, Ioctl.pData,
+ Ioctl.Len<sizeof(pAC->PnmiStruct)?
+ Ioctl.Len : sizeof(pAC->PnmiStruct))) {
+ return -EFAULT;
+ }
+ Size = SkGeIocMib(pNet, Ioctl.Len, cmd);
+ if(copy_to_user(Ioctl.pData, &pAC->PnmiStruct,
+ Ioctl.Len<Size? Ioctl.Len : Size)) {
+ return -EFAULT;
+ }
+ Ioctl.Len = Size;
+ if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) {
+ return -EFAULT;
+ }
+ break;
+ case SK_IOCTL_GEN:
+ if (Ioctl.Len < (sizeof(pAC->PnmiStruct) + HeaderLength)) {
+ Length = Ioctl.Len;
+ } else {
+ Length = sizeof(pAC->PnmiStruct) + HeaderLength;
+ }
+ if (NULL == (pMemBuf = kmalloc(Length, GFP_KERNEL))) {
+ return -ENOMEM;
+ }
+ if(copy_from_user(pMemBuf, Ioctl.pData, Length)) {
+ Err = -EFAULT;
+ goto fault_gen;
+ }
+ if ((Ret = SkPnmiGenIoctl(pAC, pAC->IoBase, pMemBuf, &Length, 0)) < 0) {
+ Err = -EFAULT;
+ goto fault_gen;
+ }
+ if(copy_to_user(Ioctl.pData, pMemBuf, Length) ) {
+ Err = -EFAULT;
+ goto fault_gen;
+ }
+ Ioctl.Len = Length;
+ if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) {
+ Err = -EFAULT;
+ goto fault_gen;
+ }
+fault_gen:
+ kfree(pMemBuf); /* cleanup everything */
+ break;
+#ifdef SK_DIAG_SUPPORT
+ case SK_IOCTL_DIAG:
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (Ioctl.Len < (sizeof(pAC->PnmiStruct) + HeaderLength)) {
+ Length = Ioctl.Len;
+ } else {
+ Length = sizeof(pAC->PnmiStruct) + HeaderLength;
+ }
+ if (NULL == (pMemBuf = kmalloc(Length, GFP_KERNEL))) {
+ return -ENOMEM;
+ }
+ if(copy_from_user(pMemBuf, Ioctl.pData, Length)) {
+ Err = -EFAULT;
+ goto fault_diag;
+ }
+ pdev = pAC->PciDev;
+ Length = 3 * sizeof(SK_U32); /* Error, Bus and Device */
+ /*
+ ** While coding this new IOCTL interface, only a few lines of code
+ ** are to to be added. Therefore no dedicated function has been
+ ** added. If more functionality is added, a separate function
+ ** should be used...
+ */
+ * ((SK_U32 *)pMemBuf) = 0;
+ * ((SK_U32 *)pMemBuf + 1) = pdev->bus->number;
+ * ((SK_U32 *)pMemBuf + 2) = ParseDeviceNbrFromSlotName(pci_name(pdev));
+ if(copy_to_user(Ioctl.pData, pMemBuf, Length) ) {
+ Err = -EFAULT;
+ goto fault_diag;
+ }
+ Ioctl.Len = Length;
+ if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) {
+ Err = -EFAULT;
+ goto fault_diag;
+ }
+fault_diag:
+ kfree(pMemBuf); /* cleanup everything */
+ break;
+#endif
+ default:
+ Err = -EOPNOTSUPP;
+ }
+
+ return(Err);
+
+} /* SkGeIoctl */
+
+
+/*****************************************************************************
+ *
+ * SkGeIocMib - handle a GetMib, SetMib- or PresetMib-ioctl message
+ *
+ * Description:
+ * This function reads/writes the MIB data using PNMI (Private Network
+ * Management Interface).
+ * The destination for the data must be provided with the
+ * ioctl call and is given to the driver in the form of
+ * a user space address.
+ * Copying from the user-provided data area into kernel messages
+ * and back is done by copy_from_user and copy_to_user calls in
+ * SkGeIoctl.
+ *
+ * Returns:
+ * returned size from PNMI call
+ */
+static int SkGeIocMib(
+DEV_NET *pNet, /* pointer to the adapter context */
+unsigned int Size, /* length of ioctl data */
+int mode) /* flag for set/preset */
+{
+unsigned long Flags; /* for spin lock */
+SK_AC *pAC;
+
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("SkGeIocMib starts now...\n"));
+ pAC = pNet->pAC;
+ /* access MIB */
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ switch(mode) {
+ case SK_IOCTL_GETMIB:
+ SkPnmiGetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size,
+ pNet->NetNr);
+ break;
+ case SK_IOCTL_PRESETMIB:
+ SkPnmiPreSetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size,
+ pNet->NetNr);
+ break;
+ case SK_IOCTL_SETMIB:
+ SkPnmiSetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size,
+ pNet->NetNr);
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
+ ("MIB data access succeeded\n"));
+ return (Size);
+} /* SkGeIocMib */
+
+
+/*****************************************************************************
+ *
+ * GetConfiguration - read configuration information
+ *
+ * Description:
+ * This function reads per-adapter configuration information from
+ * the options provided on the command line.
+ *
+ * Returns:
+ * none
+ */
+static void GetConfiguration(
+SK_AC *pAC) /* pointer to the adapter context structure */
+{
+SK_I32 Port; /* preferred port */
+SK_BOOL AutoSet;
+SK_BOOL DupSet;
+int LinkSpeed = SK_LSPEED_AUTO; /* Link speed */
+int AutoNeg = 1; /* autoneg off (0) or on (1) */
+int DuplexCap = 0; /* 0=both,1=full,2=half */
+int FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; /* FlowControl */
+int MSMode = SK_MS_MODE_AUTO; /* master/slave mode */
+
+SK_BOOL IsConTypeDefined = SK_TRUE;
+SK_BOOL IsLinkSpeedDefined = SK_TRUE;
+SK_BOOL IsFlowCtrlDefined = SK_TRUE;
+SK_BOOL IsRoleDefined = SK_TRUE;
+SK_BOOL IsModeDefined = SK_TRUE;
+/*
+ * The two parameters AutoNeg. and DuplexCap. map to one configuration
+ * parameter. The mapping is described by this table:
+ * DuplexCap -> | both | full | half |
+ * AutoNeg | | | |
+ * -----------------------------------------------------------------
+ * Off | illegal | Full | Half |
+ * -----------------------------------------------------------------
+ * On | AutoBoth | AutoFull | AutoHalf |
+ * -----------------------------------------------------------------
+ * Sense | AutoSense | AutoSense | AutoSense |
+ */
+int Capabilities[3][3] =
+ { { -1, SK_LMODE_FULL , SK_LMODE_HALF },
+ {SK_LMODE_AUTOBOTH , SK_LMODE_AUTOFULL , SK_LMODE_AUTOHALF },
+ {SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE} };
+
+#define DC_BOTH 0
+#define DC_FULL 1
+#define DC_HALF 2
+#define AN_OFF 0
+#define AN_ON 1
+#define AN_SENS 2
+#define M_CurrPort pAC->GIni.GP[Port]
+
+
+ /*
+ ** Set the default values first for both ports!
+ */
+ for (Port = 0; Port < SK_MAX_MACS; Port++) {
+ M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH];
+ M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM;
+ M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
+ M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO;
+ }
+
+ /*
+ ** Check merged parameter ConType. If it has not been used,
+ ** verify any other parameter (e.g. AutoNeg) and use default values.
+ **
+ ** Stating both ConType and other lowlevel link parameters is also
+ ** possible. If this is the case, the passed ConType-parameter is
+ ** overwritten by the lowlevel link parameter.
+ **
+ ** The following settings are used for a merged ConType-parameter:
+ **
+ ** ConType DupCap AutoNeg FlowCtrl Role Speed
+ ** ------- ------ ------- -------- ---------- -----
+ ** Auto Both On SymOrRem Auto Auto
+ ** 100FD Full Off None <ignored> 100
+ ** 100HD Half Off None <ignored> 100
+ ** 10FD Full Off None <ignored> 10
+ ** 10HD Half Off None <ignored> 10
+ **
+ ** This ConType parameter is used for all ports of the adapter!
+ */
+ if ( (ConType != NULL) &&
+ (pAC->Index < SK_MAX_CARD_PARAM) &&
+ (ConType[pAC->Index] != NULL) ) {
+
+ /* Check chipset family */
+ if ((!pAC->ChipsetType) &&
+ (strcmp(ConType[pAC->Index],"Auto")!=0) &&
+ (strcmp(ConType[pAC->Index],"")!=0)) {
+ /* Set the speed parameter back */
+ printk("sk98lin: Illegal value \"%s\" "
+ "for ConType."
+ " Using Auto.\n",
+ ConType[pAC->Index]);
+
+ sprintf(ConType[pAC->Index], "Auto");
+ }
+
+ if (strcmp(ConType[pAC->Index],"")==0) {
+ IsConTypeDefined = SK_FALSE; /* No ConType defined */
+ } else if (strcmp(ConType[pAC->Index],"Auto")==0) {
+ for (Port = 0; Port < SK_MAX_MACS; Port++) {
+ M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH];
+ M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM;
+ M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
+ M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO;
+ }
+ } else if (strcmp(ConType[pAC->Index],"100FD")==0) {
+ for (Port = 0; Port < SK_MAX_MACS; Port++) {
+ M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL];
+ M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
+ M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
+ M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS;
+ }
+ } else if (strcmp(ConType[pAC->Index],"100HD")==0) {
+ for (Port = 0; Port < SK_MAX_MACS; Port++) {
+ M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF];
+ M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
+ M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
+ M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS;
+ }
+ } else if (strcmp(ConType[pAC->Index],"10FD")==0) {
+ for (Port = 0; Port < SK_MAX_MACS; Port++) {
+ M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL];
+ M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
+ M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
+ M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS;
+ }
+ } else if (strcmp(ConType[pAC->Index],"10HD")==0) {
+ for (Port = 0; Port < SK_MAX_MACS; Port++) {
+ M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF];
+ M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
+ M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
+ M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS;
+ }
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for ConType\n",
+ ConType[pAC->Index]);
+ IsConTypeDefined = SK_FALSE; /* Wrong ConType defined */
+ }
+ } else {
+ IsConTypeDefined = SK_FALSE; /* No ConType defined */
+ }
+
+ /*
+ ** Parse any parameter settings for port A:
+ ** a) any LinkSpeed stated?
+ */
+ if (Speed_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ Speed_A[pAC->Index] != NULL) {
+ if (strcmp(Speed_A[pAC->Index],"")==0) {
+ IsLinkSpeedDefined = SK_FALSE;
+ } else if (strcmp(Speed_A[pAC->Index],"Auto")==0) {
+ LinkSpeed = SK_LSPEED_AUTO;
+ } else if (strcmp(Speed_A[pAC->Index],"10")==0) {
+ LinkSpeed = SK_LSPEED_10MBPS;
+ } else if (strcmp(Speed_A[pAC->Index],"100")==0) {
+ LinkSpeed = SK_LSPEED_100MBPS;
+ } else if (strcmp(Speed_A[pAC->Index],"1000")==0) {
+ LinkSpeed = SK_LSPEED_1000MBPS;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for Speed_A\n",
+ Speed_A[pAC->Index]);
+ IsLinkSpeedDefined = SK_FALSE;
+ }
+ } else {
+ IsLinkSpeedDefined = SK_FALSE;
+ }
+
+ /*
+ ** Check speed parameter:
+ ** Only copper type adapter and GE V2 cards
+ */
+ if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) &&
+ ((LinkSpeed != SK_LSPEED_AUTO) &&
+ (LinkSpeed != SK_LSPEED_1000MBPS))) {
+ printk("sk98lin: Illegal value for Speed_A. "
+ "Not a copper card or GE V2 card\n Using "
+ "speed 1000\n");
+ LinkSpeed = SK_LSPEED_1000MBPS;
+ }
+
+ /*
+ ** Decide whether to set new config value if somethig valid has
+ ** been received.
+ */
+ if (IsLinkSpeedDefined) {
+ pAC->GIni.GP[0].PLinkSpeed = LinkSpeed;
+ }
+
+ /*
+ ** b) Any Autonegotiation and DuplexCapabilities set?
+ ** Please note that both belong together...
+ */
+ AutoNeg = AN_ON; /* tschilling: Default: Autonegotiation on! */
+ AutoSet = SK_FALSE;
+ if (AutoNeg_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ AutoNeg_A[pAC->Index] != NULL) {
+ AutoSet = SK_TRUE;
+ if (strcmp(AutoNeg_A[pAC->Index],"")==0) {
+ AutoSet = SK_FALSE;
+ } else if (strcmp(AutoNeg_A[pAC->Index],"On")==0) {
+ AutoNeg = AN_ON;
+ } else if (strcmp(AutoNeg_A[pAC->Index],"Off")==0) {
+ AutoNeg = AN_OFF;
+ } else if (strcmp(AutoNeg_A[pAC->Index],"Sense")==0) {
+ AutoNeg = AN_SENS;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for AutoNeg_A\n",
+ AutoNeg_A[pAC->Index]);
+ }
+ }
+
+ DuplexCap = DC_BOTH;
+ DupSet = SK_FALSE;
+ if (DupCap_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ DupCap_A[pAC->Index] != NULL) {
+ DupSet = SK_TRUE;
+ if (strcmp(DupCap_A[pAC->Index],"")==0) {
+ DupSet = SK_FALSE;
+ } else if (strcmp(DupCap_A[pAC->Index],"Both")==0) {
+ DuplexCap = DC_BOTH;
+ } else if (strcmp(DupCap_A[pAC->Index],"Full")==0) {
+ DuplexCap = DC_FULL;
+ } else if (strcmp(DupCap_A[pAC->Index],"Half")==0) {
+ DuplexCap = DC_HALF;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for DupCap_A\n",
+ DupCap_A[pAC->Index]);
+ }
+ }
+
+ /*
+ ** Check for illegal combinations
+ */
+ if ((LinkSpeed == SK_LSPEED_1000MBPS) &&
+ ((DuplexCap == SK_LMODE_STAT_AUTOHALF) ||
+ (DuplexCap == SK_LMODE_STAT_HALF)) &&
+ (pAC->ChipsetType)) {
+ printk("sk98lin: Half Duplex not possible with Gigabit speed!\n"
+ " Using Full Duplex.\n");
+ DuplexCap = DC_FULL;
+ }
+
+ if ( AutoSet && AutoNeg==AN_SENS && DupSet) {
+ printk("sk98lin, Port A: DuplexCapabilities"
+ " ignored using Sense mode\n");
+ }
+
+ if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){
+ printk("sk98lin: Port A: Illegal combination"
+ " of values AutoNeg. and DuplexCap.\n Using "
+ "Full Duplex\n");
+ DuplexCap = DC_FULL;
+ }
+
+ if (AutoSet && AutoNeg==AN_OFF && !DupSet) {
+ DuplexCap = DC_FULL;
+ }
+
+ if (!AutoSet && DupSet) {
+ printk("sk98lin: Port A: Duplex setting not"
+ " possible in\n default AutoNegotiation mode"
+ " (Sense).\n Using AutoNegotiation On\n");
+ AutoNeg = AN_ON;
+ }
+
+ /*
+ ** set the desired mode
+ */
+ if (AutoSet || DupSet) {
+ pAC->GIni.GP[0].PLinkModeConf = Capabilities[AutoNeg][DuplexCap];
+ }
+
+ /*
+ ** c) Any Flowcontrol-parameter set?
+ */
+ if (FlowCtrl_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ FlowCtrl_A[pAC->Index] != NULL) {
+ if (strcmp(FlowCtrl_A[pAC->Index],"") == 0) {
+ IsFlowCtrlDefined = SK_FALSE;
+ } else if (strcmp(FlowCtrl_A[pAC->Index],"SymOrRem") == 0) {
+ FlowCtrl = SK_FLOW_MODE_SYM_OR_REM;
+ } else if (strcmp(FlowCtrl_A[pAC->Index],"Sym")==0) {
+ FlowCtrl = SK_FLOW_MODE_SYMMETRIC;
+ } else if (strcmp(FlowCtrl_A[pAC->Index],"LocSend")==0) {
+ FlowCtrl = SK_FLOW_MODE_LOC_SEND;
+ } else if (strcmp(FlowCtrl_A[pAC->Index],"None")==0) {
+ FlowCtrl = SK_FLOW_MODE_NONE;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for FlowCtrl_A\n",
+ FlowCtrl_A[pAC->Index]);
+ IsFlowCtrlDefined = SK_FALSE;
+ }
+ } else {
+ IsFlowCtrlDefined = SK_FALSE;
+ }
+
+ if (IsFlowCtrlDefined) {
+ if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) {
+ printk("sk98lin: Port A: FlowControl"
+ " impossible without AutoNegotiation,"
+ " disabled\n");
+ FlowCtrl = SK_FLOW_MODE_NONE;
+ }
+ pAC->GIni.GP[0].PFlowCtrlMode = FlowCtrl;
+ }
+
+ /*
+ ** d) What is with the RoleParameter?
+ */
+ if (Role_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ Role_A[pAC->Index] != NULL) {
+ if (strcmp(Role_A[pAC->Index],"")==0) {
+ IsRoleDefined = SK_FALSE;
+ } else if (strcmp(Role_A[pAC->Index],"Auto")==0) {
+ MSMode = SK_MS_MODE_AUTO;
+ } else if (strcmp(Role_A[pAC->Index],"Master")==0) {
+ MSMode = SK_MS_MODE_MASTER;
+ } else if (strcmp(Role_A[pAC->Index],"Slave")==0) {
+ MSMode = SK_MS_MODE_SLAVE;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for Role_A\n",
+ Role_A[pAC->Index]);
+ IsRoleDefined = SK_FALSE;
+ }
+ } else {
+ IsRoleDefined = SK_FALSE;
+ }
+
+ if (IsRoleDefined == SK_TRUE) {
+ pAC->GIni.GP[0].PMSMode = MSMode;
+ }
+
+
+
+ /*
+ ** Parse any parameter settings for port B:
+ ** a) any LinkSpeed stated?
+ */
+ IsConTypeDefined = SK_TRUE;
+ IsLinkSpeedDefined = SK_TRUE;
+ IsFlowCtrlDefined = SK_TRUE;
+ IsModeDefined = SK_TRUE;
+
+ if (Speed_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ Speed_B[pAC->Index] != NULL) {
+ if (strcmp(Speed_B[pAC->Index],"")==0) {
+ IsLinkSpeedDefined = SK_FALSE;
+ } else if (strcmp(Speed_B[pAC->Index],"Auto")==0) {
+ LinkSpeed = SK_LSPEED_AUTO;
+ } else if (strcmp(Speed_B[pAC->Index],"10")==0) {
+ LinkSpeed = SK_LSPEED_10MBPS;
+ } else if (strcmp(Speed_B[pAC->Index],"100")==0) {
+ LinkSpeed = SK_LSPEED_100MBPS;
+ } else if (strcmp(Speed_B[pAC->Index],"1000")==0) {
+ LinkSpeed = SK_LSPEED_1000MBPS;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for Speed_B\n",
+ Speed_B[pAC->Index]);
+ IsLinkSpeedDefined = SK_FALSE;
+ }
+ } else {
+ IsLinkSpeedDefined = SK_FALSE;
+ }
+
+ /*
+ ** Check speed parameter:
+ ** Only copper type adapter and GE V2 cards
+ */
+ if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) &&
+ ((LinkSpeed != SK_LSPEED_AUTO) &&
+ (LinkSpeed != SK_LSPEED_1000MBPS))) {
+ printk("sk98lin: Illegal value for Speed_B. "
+ "Not a copper card or GE V2 card\n Using "
+ "speed 1000\n");
+ LinkSpeed = SK_LSPEED_1000MBPS;
+ }
+
+ /*
+ ** Decide whether to set new config value if somethig valid has
+ ** been received.
+ */
+ if (IsLinkSpeedDefined) {
+ pAC->GIni.GP[1].PLinkSpeed = LinkSpeed;
+ }
+
+ /*
+ ** b) Any Autonegotiation and DuplexCapabilities set?
+ ** Please note that both belong together...
+ */
+ AutoNeg = AN_SENS; /* default: do auto Sense */
+ AutoSet = SK_FALSE;
+ if (AutoNeg_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ AutoNeg_B[pAC->Index] != NULL) {
+ AutoSet = SK_TRUE;
+ if (strcmp(AutoNeg_B[pAC->Index],"")==0) {
+ AutoSet = SK_FALSE;
+ } else if (strcmp(AutoNeg_B[pAC->Index],"On")==0) {
+ AutoNeg = AN_ON;
+ } else if (strcmp(AutoNeg_B[pAC->Index],"Off")==0) {
+ AutoNeg = AN_OFF;
+ } else if (strcmp(AutoNeg_B[pAC->Index],"Sense")==0) {
+ AutoNeg = AN_SENS;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for AutoNeg_B\n",
+ AutoNeg_B[pAC->Index]);
+ }
+ }
+
+ DuplexCap = DC_BOTH;
+ DupSet = SK_FALSE;
+ if (DupCap_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ DupCap_B[pAC->Index] != NULL) {
+ DupSet = SK_TRUE;
+ if (strcmp(DupCap_B[pAC->Index],"")==0) {
+ DupSet = SK_FALSE;
+ } else if (strcmp(DupCap_B[pAC->Index],"Both")==0) {
+ DuplexCap = DC_BOTH;
+ } else if (strcmp(DupCap_B[pAC->Index],"Full")==0) {
+ DuplexCap = DC_FULL;
+ } else if (strcmp(DupCap_B[pAC->Index],"Half")==0) {
+ DuplexCap = DC_HALF;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for DupCap_B\n",
+ DupCap_B[pAC->Index]);
+ }
+ }
+
+
+ /*
+ ** Check for illegal combinations
+ */
+ if ((LinkSpeed == SK_LSPEED_1000MBPS) &&
+ ((DuplexCap == SK_LMODE_STAT_AUTOHALF) ||
+ (DuplexCap == SK_LMODE_STAT_HALF)) &&
+ (pAC->ChipsetType)) {
+ printk("sk98lin: Half Duplex not possible with Gigabit speed!\n"
+ " Using Full Duplex.\n");
+ DuplexCap = DC_FULL;
+ }
+
+ if (AutoSet && AutoNeg==AN_SENS && DupSet) {
+ printk("sk98lin, Port B: DuplexCapabilities"
+ " ignored using Sense mode\n");
+ }
+
+ if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){
+ printk("sk98lin: Port B: Illegal combination"
+ " of values AutoNeg. and DuplexCap.\n Using "
+ "Full Duplex\n");
+ DuplexCap = DC_FULL;
+ }
+
+ if (AutoSet && AutoNeg==AN_OFF && !DupSet) {
+ DuplexCap = DC_FULL;
+ }
+
+ if (!AutoSet && DupSet) {
+ printk("sk98lin: Port B: Duplex setting not"
+ " possible in\n default AutoNegotiation mode"
+ " (Sense).\n Using AutoNegotiation On\n");
+ AutoNeg = AN_ON;
+ }
+
+ /*
+ ** set the desired mode
+ */
+ if (AutoSet || DupSet) {
+ pAC->GIni.GP[1].PLinkModeConf = Capabilities[AutoNeg][DuplexCap];
+ }
+
+ /*
+ ** c) Any FlowCtrl parameter set?
+ */
+ if (FlowCtrl_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ FlowCtrl_B[pAC->Index] != NULL) {
+ if (strcmp(FlowCtrl_B[pAC->Index],"") == 0) {
+ IsFlowCtrlDefined = SK_FALSE;
+ } else if (strcmp(FlowCtrl_B[pAC->Index],"SymOrRem") == 0) {
+ FlowCtrl = SK_FLOW_MODE_SYM_OR_REM;
+ } else if (strcmp(FlowCtrl_B[pAC->Index],"Sym")==0) {
+ FlowCtrl = SK_FLOW_MODE_SYMMETRIC;
+ } else if (strcmp(FlowCtrl_B[pAC->Index],"LocSend")==0) {
+ FlowCtrl = SK_FLOW_MODE_LOC_SEND;
+ } else if (strcmp(FlowCtrl_B[pAC->Index],"None")==0) {
+ FlowCtrl = SK_FLOW_MODE_NONE;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for FlowCtrl_B\n",
+ FlowCtrl_B[pAC->Index]);
+ IsFlowCtrlDefined = SK_FALSE;
+ }
+ } else {
+ IsFlowCtrlDefined = SK_FALSE;
+ }
+
+ if (IsFlowCtrlDefined) {
+ if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) {
+ printk("sk98lin: Port B: FlowControl"
+ " impossible without AutoNegotiation,"
+ " disabled\n");
+ FlowCtrl = SK_FLOW_MODE_NONE;
+ }
+ pAC->GIni.GP[1].PFlowCtrlMode = FlowCtrl;
+ }
+
+ /*
+ ** d) What is the RoleParameter?
+ */
+ if (Role_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ Role_B[pAC->Index] != NULL) {
+ if (strcmp(Role_B[pAC->Index],"")==0) {
+ IsRoleDefined = SK_FALSE;
+ } else if (strcmp(Role_B[pAC->Index],"Auto")==0) {
+ MSMode = SK_MS_MODE_AUTO;
+ } else if (strcmp(Role_B[pAC->Index],"Master")==0) {
+ MSMode = SK_MS_MODE_MASTER;
+ } else if (strcmp(Role_B[pAC->Index],"Slave")==0) {
+ MSMode = SK_MS_MODE_SLAVE;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for Role_B\n",
+ Role_B[pAC->Index]);
+ IsRoleDefined = SK_FALSE;
+ }
+ } else {
+ IsRoleDefined = SK_FALSE;
+ }
+
+ if (IsRoleDefined) {
+ pAC->GIni.GP[1].PMSMode = MSMode;
+ }
+
+ /*
+ ** Evaluate settings for both ports
+ */
+ pAC->ActivePort = 0;
+ if (PrefPort != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ PrefPort[pAC->Index] != NULL) {
+ if (strcmp(PrefPort[pAC->Index],"") == 0) { /* Auto */
+ pAC->ActivePort = 0;
+ pAC->Rlmt.Net[0].Preference = -1; /* auto */
+ pAC->Rlmt.Net[0].PrefPort = 0;
+ } else if (strcmp(PrefPort[pAC->Index],"A") == 0) {
+ /*
+ ** do not set ActivePort here, thus a port
+ ** switch is issued after net up.
+ */
+ Port = 0;
+ pAC->Rlmt.Net[0].Preference = Port;
+ pAC->Rlmt.Net[0].PrefPort = Port;
+ } else if (strcmp(PrefPort[pAC->Index],"B") == 0) {
+ /*
+ ** do not set ActivePort here, thus a port
+ ** switch is issued after net up.
+ */
+ if (pAC->GIni.GIMacsFound == 1) {
+ printk("sk98lin: Illegal value \"B\" for PrefPort.\n"
+ " Port B not available on single port adapters.\n");
+
+ pAC->ActivePort = 0;
+ pAC->Rlmt.Net[0].Preference = -1; /* auto */
+ pAC->Rlmt.Net[0].PrefPort = 0;
+ } else {
+ Port = 1;
+ pAC->Rlmt.Net[0].Preference = Port;
+ pAC->Rlmt.Net[0].PrefPort = Port;
+ }
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for PrefPort\n",
+ PrefPort[pAC->Index]);
+ }
+ }
+
+ pAC->RlmtNets = 1;
+
+ if (RlmtMode != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
+ RlmtMode[pAC->Index] != NULL) {
+ if (strcmp(RlmtMode[pAC->Index], "") == 0) {
+ pAC->RlmtMode = 0;
+ } else if (strcmp(RlmtMode[pAC->Index], "CheckLinkState") == 0) {
+ pAC->RlmtMode = SK_RLMT_CHECK_LINK;
+ } else if (strcmp(RlmtMode[pAC->Index], "CheckLocalPort") == 0) {
+ pAC->RlmtMode = SK_RLMT_CHECK_LINK |
+ SK_RLMT_CHECK_LOC_LINK;
+ } else if (strcmp(RlmtMode[pAC->Index], "CheckSeg") == 0) {
+ pAC->RlmtMode = SK_RLMT_CHECK_LINK |
+ SK_RLMT_CHECK_LOC_LINK |
+ SK_RLMT_CHECK_SEG;
+ } else if ((strcmp(RlmtMode[pAC->Index], "DualNet") == 0) &&
+ (pAC->GIni.GIMacsFound == 2)) {
+ pAC->RlmtMode = SK_RLMT_CHECK_LINK;
+ pAC->RlmtNets = 2;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for"
+ " RlmtMode, using default\n",
+ RlmtMode[pAC->Index]);
+ pAC->RlmtMode = 0;
+ }
+ } else {
+ pAC->RlmtMode = 0;
+ }
+
+ /*
+ ** Check the interrupt moderation parameters
+ */
+ if (Moderation[pAC->Index] != NULL) {
+ if (strcmp(Moderation[pAC->Index], "") == 0) {
+ pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
+ } else if (strcmp(Moderation[pAC->Index], "Static") == 0) {
+ pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_STATIC;
+ } else if (strcmp(Moderation[pAC->Index], "Dynamic") == 0) {
+ pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_DYNAMIC;
+ } else if (strcmp(Moderation[pAC->Index], "None") == 0) {
+ pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
+ } else {
+ printk("sk98lin: Illegal value \"%s\" for Moderation.\n"
+ " Disable interrupt moderation.\n",
+ Moderation[pAC->Index]);
+ pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
+ }
+ } else {
+ pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
+ }
+
+ if (Stats[pAC->Index] != NULL) {
+ if (strcmp(Stats[pAC->Index], "Yes") == 0) {
+ pAC->DynIrqModInfo.DisplayStats = SK_TRUE;
+ } else {
+ pAC->DynIrqModInfo.DisplayStats = SK_FALSE;
+ }
+ } else {
+ pAC->DynIrqModInfo.DisplayStats = SK_FALSE;
+ }
+
+ if (ModerationMask[pAC->Index] != NULL) {
+ if (strcmp(ModerationMask[pAC->Index], "Rx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY;
+ } else if (strcmp(ModerationMask[pAC->Index], "Tx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_ONLY;
+ } else if (strcmp(ModerationMask[pAC->Index], "Sp") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_ONLY;
+ } else if (strcmp(ModerationMask[pAC->Index], "RxSp") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX;
+ } else if (strcmp(ModerationMask[pAC->Index], "SpRx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX;
+ } else if (strcmp(ModerationMask[pAC->Index], "RxTx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX;
+ } else if (strcmp(ModerationMask[pAC->Index], "TxRx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX;
+ } else if (strcmp(ModerationMask[pAC->Index], "TxSp") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX;
+ } else if (strcmp(ModerationMask[pAC->Index], "SpTx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX;
+ } else if (strcmp(ModerationMask[pAC->Index], "RxTxSp") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
+ } else if (strcmp(ModerationMask[pAC->Index], "RxSpTx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
+ } else if (strcmp(ModerationMask[pAC->Index], "TxRxSp") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
+ } else if (strcmp(ModerationMask[pAC->Index], "TxSpRx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
+ } else if (strcmp(ModerationMask[pAC->Index], "SpTxRx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
+ } else if (strcmp(ModerationMask[pAC->Index], "SpRxTx") == 0) {
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
+ } else { /* some rubbish */
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY;
+ }
+ } else { /* operator has stated nothing */
+ pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX;
+ }
+
+ if (AutoSizing[pAC->Index] != NULL) {
+ if (strcmp(AutoSizing[pAC->Index], "On") == 0) {
+ pAC->DynIrqModInfo.AutoSizing = SK_FALSE;
+ } else {
+ pAC->DynIrqModInfo.AutoSizing = SK_FALSE;
+ }
+ } else { /* operator has stated nothing */
+ pAC->DynIrqModInfo.AutoSizing = SK_FALSE;
+ }
+
+ if (IntsPerSec[pAC->Index] != 0) {
+ if ((IntsPerSec[pAC->Index]< C_INT_MOD_IPS_LOWER_RANGE) ||
+ (IntsPerSec[pAC->Index] > C_INT_MOD_IPS_UPPER_RANGE)) {
+ printk("sk98lin: Illegal value \"%d\" for IntsPerSec. (Range: %d - %d)\n"
+ " Using default value of %i.\n",
+ IntsPerSec[pAC->Index],
+ C_INT_MOD_IPS_LOWER_RANGE,
+ C_INT_MOD_IPS_UPPER_RANGE,
+ C_INTS_PER_SEC_DEFAULT);
+ pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT;
+ } else {
+ pAC->DynIrqModInfo.MaxModIntsPerSec = IntsPerSec[pAC->Index];
+ }
+ } else {
+ pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT;
+ }
+
+ /*
+ ** Evaluate upper and lower moderation threshold
+ */
+ pAC->DynIrqModInfo.MaxModIntsPerSecUpperLimit =
+ pAC->DynIrqModInfo.MaxModIntsPerSec +
+ (pAC->DynIrqModInfo.MaxModIntsPerSec / 2);
+
+ pAC->DynIrqModInfo.MaxModIntsPerSecLowerLimit =
+ pAC->DynIrqModInfo.MaxModIntsPerSec -
+ (pAC->DynIrqModInfo.MaxModIntsPerSec / 2);
+
+ pAC->DynIrqModInfo.PrevTimeVal = jiffies; /* initial value */
+
+
+} /* GetConfiguration */
+
+
+/*****************************************************************************
+ *
+ * ProductStr - return a adapter identification string from vpd
+ *
+ * Description:
+ * This function reads the product name string from the vpd area
+ * and puts it the field pAC->DeviceString.
+ *
+ * Returns: N/A
+ */
+static void ProductStr(
+SK_AC *pAC /* pointer to adapter context */
+)
+{
+int StrLen = 80; /* length of the string, defined in SK_AC */
+char Keyword[] = VPD_NAME; /* vpd productname identifier */
+int ReturnCode; /* return code from vpd_read */
+unsigned long Flags;
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ ReturnCode = VpdRead(pAC, pAC->IoBase, Keyword, pAC->DeviceStr,
+ &StrLen);
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ if (ReturnCode != 0) {
+ /* there was an error reading the vpd data */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ERROR,
+ ("Error reading VPD data: %d\n", ReturnCode));
+ pAC->DeviceStr[0] = '\0';
+ }
+} /* ProductStr */
+
+/*****************************************************************************
+ *
+ * StartDrvCleanupTimer - Start timer to check for descriptors which
+ * might be placed in descriptor ring, but
+ * havent been handled up to now
+ *
+ * Description:
+ * This function requests a HW-timer fo the Yukon card. The actions to
+ * perform when this timer expires, are located in the SkDrvEvent().
+ *
+ * Returns: N/A
+ */
+static void
+StartDrvCleanupTimer(SK_AC *pAC) {
+ SK_EVPARA EventParam; /* Event struct for timer event */
+
+ SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam));
+ EventParam.Para32[0] = SK_DRV_RX_CLEANUP_TIMER;
+ SkTimerStart(pAC, pAC->IoBase, &pAC->DrvCleanupTimer,
+ SK_DRV_RX_CLEANUP_TIMER_LENGTH,
+ SKGE_DRV, SK_DRV_TIMER, EventParam);
+}
+
+/*****************************************************************************
+ *
+ * StopDrvCleanupTimer - Stop timer to check for descriptors
+ *
+ * Description:
+ * This function requests a HW-timer fo the Yukon card. The actions to
+ * perform when this timer expires, are located in the SkDrvEvent().
+ *
+ * Returns: N/A
+ */
+static void
+StopDrvCleanupTimer(SK_AC *pAC) {
+ SkTimerStop(pAC, pAC->IoBase, &pAC->DrvCleanupTimer);
+ SK_MEMSET((char *) &pAC->DrvCleanupTimer, 0, sizeof(SK_TIMER));
+}
+
+/****************************************************************************/
+/* functions for common modules *********************************************/
+/****************************************************************************/
+
+
+/*****************************************************************************
+ *
+ * SkDrvAllocRlmtMbuf - allocate an RLMT mbuf
+ *
+ * Description:
+ * This routine returns an RLMT mbuf or NULL. The RLMT Mbuf structure
+ * is embedded into a socket buff data area.
+ *
+ * Context:
+ * runtime
+ *
+ * Returns:
+ * NULL or pointer to Mbuf.
+ */
+SK_MBUF *SkDrvAllocRlmtMbuf(
+SK_AC *pAC, /* pointer to adapter context */
+SK_IOC IoC, /* the IO-context */
+unsigned BufferSize) /* size of the requested buffer */
+{
+SK_MBUF *pRlmtMbuf; /* pointer to a new rlmt-mbuf structure */
+struct sk_buff *pMsgBlock; /* pointer to a new message block */
+
+ pMsgBlock = alloc_skb(BufferSize + sizeof(SK_MBUF), GFP_ATOMIC);
+ if (pMsgBlock == NULL) {
+ return (NULL);
+ }
+ pRlmtMbuf = (SK_MBUF*) pMsgBlock->data;
+ skb_reserve(pMsgBlock, sizeof(SK_MBUF));
+ pRlmtMbuf->pNext = NULL;
+ pRlmtMbuf->pOs = pMsgBlock;
+ pRlmtMbuf->pData = pMsgBlock->data; /* Data buffer. */
+ pRlmtMbuf->Size = BufferSize; /* Data buffer size. */
+ pRlmtMbuf->Length = 0; /* Length of packet (<= Size). */
+ return (pRlmtMbuf);
+
+} /* SkDrvAllocRlmtMbuf */
+
+
+/*****************************************************************************
+ *
+ * SkDrvFreeRlmtMbuf - free an RLMT mbuf
+ *
+ * Description:
+ * This routine frees one or more RLMT mbuf(s).
+ *
+ * Context:
+ * runtime
+ *
+ * Returns:
+ * Nothing
+ */
+void SkDrvFreeRlmtMbuf(
+SK_AC *pAC, /* pointer to adapter context */
+SK_IOC IoC, /* the IO-context */
+SK_MBUF *pMbuf) /* size of the requested buffer */
+{
+SK_MBUF *pFreeMbuf;
+SK_MBUF *pNextMbuf;
+
+ pFreeMbuf = pMbuf;
+ do {
+ pNextMbuf = pFreeMbuf->pNext;
+ DEV_KFREE_SKB_ANY(pFreeMbuf->pOs);
+ pFreeMbuf = pNextMbuf;
+ } while ( pFreeMbuf != NULL );
+} /* SkDrvFreeRlmtMbuf */
+
+
+/*****************************************************************************
+ *
+ * SkOsGetTime - provide a time value
+ *
+ * Description:
+ * This routine provides a time value. The unit is 1/HZ (defined by Linux).
+ * It is not used for absolute time, but only for time differences.
+ *
+ *
+ * Returns:
+ * Time value
+ */
+SK_U64 SkOsGetTime(SK_AC *pAC)
+{
+ SK_U64 PrivateJiffies;
+ SkOsGetTimeCurrent(pAC, &PrivateJiffies);
+ return PrivateJiffies;
+} /* SkOsGetTime */
+
+
+/*****************************************************************************
+ *
+ * SkPciReadCfgDWord - read a 32 bit value from pci config space
+ *
+ * Description:
+ * This routine reads a 32 bit value from the pci configuration
+ * space.
+ *
+ * Returns:
+ * 0 - indicate everything worked ok.
+ * != 0 - error indication
+ */
+int SkPciReadCfgDWord(
+SK_AC *pAC, /* Adapter Control structure pointer */
+int PciAddr, /* PCI register address */
+SK_U32 *pVal) /* pointer to store the read value */
+{
+ pci_read_config_dword(pAC->PciDev, PciAddr, pVal);
+ return(0);
+} /* SkPciReadCfgDWord */
+
+
+/*****************************************************************************
+ *
+ * SkPciReadCfgWord - read a 16 bit value from pci config space
+ *
+ * Description:
+ * This routine reads a 16 bit value from the pci configuration
+ * space.
+ *
+ * Returns:
+ * 0 - indicate everything worked ok.
+ * != 0 - error indication
+ */
+int SkPciReadCfgWord(
+SK_AC *pAC, /* Adapter Control structure pointer */
+int PciAddr, /* PCI register address */
+SK_U16 *pVal) /* pointer to store the read value */
+{
+ pci_read_config_word(pAC->PciDev, PciAddr, pVal);
+ return(0);
+} /* SkPciReadCfgWord */
+
+
+/*****************************************************************************
+ *
+ * SkPciReadCfgByte - read a 8 bit value from pci config space
+ *
+ * Description:
+ * This routine reads a 8 bit value from the pci configuration
+ * space.
+ *
+ * Returns:
+ * 0 - indicate everything worked ok.
+ * != 0 - error indication
+ */
+int SkPciReadCfgByte(
+SK_AC *pAC, /* Adapter Control structure pointer */
+int PciAddr, /* PCI register address */
+SK_U8 *pVal) /* pointer to store the read value */
+{
+ pci_read_config_byte(pAC->PciDev, PciAddr, pVal);
+ return(0);
+} /* SkPciReadCfgByte */
+
+
+/*****************************************************************************
+ *
+ * SkPciWriteCfgDWord - write a 32 bit value to pci config space
+ *
+ * Description:
+ * This routine writes a 32 bit value to the pci configuration
+ * space.
+ *
+ * Returns:
+ * 0 - indicate everything worked ok.
+ * != 0 - error indication
+ */
+int SkPciWriteCfgDWord(
+SK_AC *pAC, /* Adapter Control structure pointer */
+int PciAddr, /* PCI register address */
+SK_U32 Val) /* pointer to store the read value */
+{
+ pci_write_config_dword(pAC->PciDev, PciAddr, Val);
+ return(0);
+} /* SkPciWriteCfgDWord */
+
+
+/*****************************************************************************
+ *
+ * SkPciWriteCfgWord - write a 16 bit value to pci config space
+ *
+ * Description:
+ * This routine writes a 16 bit value to the pci configuration
+ * space. The flag PciConfigUp indicates whether the config space
+ * is accesible or must be set up first.
+ *
+ * Returns:
+ * 0 - indicate everything worked ok.
+ * != 0 - error indication
+ */
+int SkPciWriteCfgWord(
+SK_AC *pAC, /* Adapter Control structure pointer */
+int PciAddr, /* PCI register address */
+SK_U16 Val) /* pointer to store the read value */
+{
+ pci_write_config_word(pAC->PciDev, PciAddr, Val);
+ return(0);
+} /* SkPciWriteCfgWord */
+
+
+/*****************************************************************************
+ *
+ * SkPciWriteCfgWord - write a 8 bit value to pci config space
+ *
+ * Description:
+ * This routine writes a 8 bit value to the pci configuration
+ * space. The flag PciConfigUp indicates whether the config space
+ * is accesible or must be set up first.
+ *
+ * Returns:
+ * 0 - indicate everything worked ok.
+ * != 0 - error indication
+ */
+int SkPciWriteCfgByte(
+SK_AC *pAC, /* Adapter Control structure pointer */
+int PciAddr, /* PCI register address */
+SK_U8 Val) /* pointer to store the read value */
+{
+ pci_write_config_byte(pAC->PciDev, PciAddr, Val);
+ return(0);
+} /* SkPciWriteCfgByte */
+
+
+/*****************************************************************************
+ *
+ * SkDrvEvent - handle driver events
+ *
+ * Description:
+ * This function handles events from all modules directed to the driver
+ *
+ * Context:
+ * Is called under protection of slow path lock.
+ *
+ * Returns:
+ * 0 if everything ok
+ * < 0 on error
+ *
+ */
+int SkDrvEvent(
+SK_AC *pAC, /* pointer to adapter context */
+SK_IOC IoC, /* io-context */
+SK_U32 Event, /* event-id */
+SK_EVPARA Param) /* event-parameter */
+{
+SK_MBUF *pRlmtMbuf; /* pointer to a rlmt-mbuf structure */
+struct sk_buff *pMsg; /* pointer to a message block */
+int FromPort; /* the port from which we switch away */
+int ToPort; /* the port we switch to */
+SK_EVPARA NewPara; /* parameter for further events */
+int Stat;
+unsigned long Flags;
+SK_BOOL DualNet;
+
+ switch (Event) {
+ case SK_DRV_ADAP_FAIL:
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("ADAPTER FAIL EVENT\n"));
+ printk("%s: Adapter failed.\n", pAC->dev[0]->name);
+ /* disable interrupts */
+ SK_OUT32(pAC->IoBase, B0_IMSK, 0);
+ /* cgoos */
+ break;
+ case SK_DRV_PORT_FAIL:
+ FromPort = Param.Para32[0];
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("PORT FAIL EVENT, Port: %d\n", FromPort));
+ if (FromPort == 0) {
+ printk("%s: Port A failed.\n", pAC->dev[0]->name);
+ } else {
+ printk("%s: Port B failed.\n", pAC->dev[1]->name);
+ }
+ /* cgoos */
+ break;
+ case SK_DRV_PORT_RESET: /* SK_U32 PortIdx */
+ /* action list 4 */
+ FromPort = Param.Para32[0];
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("PORT RESET EVENT, Port: %d ", FromPort));
+ NewPara.Para64 = FromPort;
+ SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara);
+ spin_lock_irqsave(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+
+ SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_HARD_RST);
+ pAC->dev[Param.Para32[0]]->flags &= ~IFF_RUNNING;
+ spin_unlock_irqrestore(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+
+ /* clear rx ring from received frames */
+ ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE);
+
+ ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]);
+ spin_lock_irqsave(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+
+ /* tschilling: Handling of return value inserted. */
+ if (SkGeInitPort(pAC, IoC, FromPort)) {
+ if (FromPort == 0) {
+ printk("%s: SkGeInitPort A failed.\n", pAC->dev[0]->name);
+ } else {
+ printk("%s: SkGeInitPort B failed.\n", pAC->dev[1]->name);
+ }
+ }
+ SkAddrMcUpdate(pAC,IoC, FromPort);
+ PortReInitBmu(pAC, FromPort);
+ SkGePollTxD(pAC, IoC, FromPort, SK_TRUE);
+ ClearAndStartRx(pAC, FromPort);
+ spin_unlock_irqrestore(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+ break;
+ case SK_DRV_NET_UP: /* SK_U32 PortIdx */
+ /* action list 5 */
+ FromPort = Param.Para32[0];
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("NET UP EVENT, Port: %d ", Param.Para32[0]));
+ /* Mac update */
+ SkAddrMcUpdate(pAC,IoC, FromPort);
+
+ if (DoPrintInterfaceChange) {
+ printk("%s: network connection up using"
+ " port %c\n", pAC->dev[Param.Para32[0]]->name, 'A'+Param.Para32[0]);
+
+ /* tschilling: Values changed according to LinkSpeedUsed. */
+ Stat = pAC->GIni.GP[FromPort].PLinkSpeedUsed;
+ if (Stat == SK_LSPEED_STAT_10MBPS) {
+ printk(" speed: 10\n");
+ } else if (Stat == SK_LSPEED_STAT_100MBPS) {
+ printk(" speed: 100\n");
+ } else if (Stat == SK_LSPEED_STAT_1000MBPS) {
+ printk(" speed: 1000\n");
+ } else {
+ printk(" speed: unknown\n");
+ }
+
+
+ Stat = pAC->GIni.GP[FromPort].PLinkModeStatus;
+ if (Stat == SK_LMODE_STAT_AUTOHALF ||
+ Stat == SK_LMODE_STAT_AUTOFULL) {
+ printk(" autonegotiation: yes\n");
+ }
+ else {
+ printk(" autonegotiation: no\n");
+ }
+ if (Stat == SK_LMODE_STAT_AUTOHALF ||
+ Stat == SK_LMODE_STAT_HALF) {
+ printk(" duplex mode: half\n");
+ }
+ else {
+ printk(" duplex mode: full\n");
+ }
+ Stat = pAC->GIni.GP[FromPort].PFlowCtrlStatus;
+ if (Stat == SK_FLOW_STAT_REM_SEND ) {
+ printk(" flowctrl: remote send\n");
+ }
+ else if (Stat == SK_FLOW_STAT_LOC_SEND ){
+ printk(" flowctrl: local send\n");
+ }
+ else if (Stat == SK_FLOW_STAT_SYMMETRIC ){
+ printk(" flowctrl: symmetric\n");
+ }
+ else {
+ printk(" flowctrl: none\n");
+ }
+
+ /* tschilling: Check against CopperType now. */
+ if ((pAC->GIni.GICopperType == SK_TRUE) &&
+ (pAC->GIni.GP[FromPort].PLinkSpeedUsed ==
+ SK_LSPEED_STAT_1000MBPS)) {
+ Stat = pAC->GIni.GP[FromPort].PMSStatus;
+ if (Stat == SK_MS_STAT_MASTER ) {
+ printk(" role: master\n");
+ }
+ else if (Stat == SK_MS_STAT_SLAVE ) {
+ printk(" role: slave\n");
+ }
+ else {
+ printk(" role: ???\n");
+ }
+ }
+
+ /*
+ Display dim (dynamic interrupt moderation)
+ informations
+ */
+ if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC)
+ printk(" irq moderation: static (%d ints/sec)\n",
+ pAC->DynIrqModInfo.MaxModIntsPerSec);
+ else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC)
+ printk(" irq moderation: dynamic (%d ints/sec)\n",
+ pAC->DynIrqModInfo.MaxModIntsPerSec);
+ else
+ printk(" irq moderation: disabled\n");
+
+
+#ifdef SK_ZEROCOPY
+ if (pAC->ChipsetType)
+#ifdef USE_SK_TX_CHECKSUM
+ printk(" scatter-gather: enabled\n");
+#else
+ printk(" tx-checksum: disabled\n");
+#endif
+ else
+ printk(" scatter-gather: disabled\n");
+#else
+ printk(" scatter-gather: disabled\n");
+#endif
+
+#ifndef USE_SK_RX_CHECKSUM
+ printk(" rx-checksum: disabled\n");
+#endif
+
+ } else {
+ DoPrintInterfaceChange = SK_TRUE;
+ }
+
+ if ((Param.Para32[0] != pAC->ActivePort) &&
+ (pAC->RlmtNets == 1)) {
+ NewPara.Para32[0] = pAC->ActivePort;
+ NewPara.Para32[1] = Param.Para32[0];
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_INTERN,
+ NewPara);
+ }
+
+ /* Inform the world that link protocol is up. */
+ pAC->dev[Param.Para32[0]]->flags |= IFF_RUNNING;
+
+ break;
+ case SK_DRV_NET_DOWN: /* SK_U32 Reason */
+ /* action list 7 */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("NET DOWN EVENT "));
+ if (DoPrintInterfaceChange) {
+ printk("%s: network connection down\n",
+ pAC->dev[Param.Para32[1]]->name);
+ } else {
+ DoPrintInterfaceChange = SK_TRUE;
+ }
+ pAC->dev[Param.Para32[1]]->flags &= ~IFF_RUNNING;
+ break;
+ case SK_DRV_SWITCH_HARD: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("PORT SWITCH HARD "));
+ case SK_DRV_SWITCH_SOFT: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
+ /* action list 6 */
+ printk("%s: switching to port %c\n", pAC->dev[0]->name,
+ 'A'+Param.Para32[1]);
+ case SK_DRV_SWITCH_INTERN: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
+ FromPort = Param.Para32[0];
+ ToPort = Param.Para32[1];
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("PORT SWITCH EVENT, From: %d To: %d (Pref %d) ",
+ FromPort, ToPort, pAC->Rlmt.Net[0].PrefPort));
+ NewPara.Para64 = FromPort;
+ SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara);
+ NewPara.Para64 = ToPort;
+ SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara);
+ spin_lock_irqsave(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+ spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
+ SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_SOFT_RST);
+ SkGeStopPort(pAC, IoC, ToPort, SK_STOP_ALL, SK_SOFT_RST);
+ spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
+ spin_unlock_irqrestore(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+
+ ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE); /* clears rx ring */
+ ReceiveIrq(pAC, &pAC->RxPort[ToPort], SK_FALSE); /* clears rx ring */
+
+ ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]);
+ ClearTxRing(pAC, &pAC->TxPort[ToPort][TX_PRIO_LOW]);
+ spin_lock_irqsave(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+ spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
+ pAC->ActivePort = ToPort;
+#if 0
+ SetQueueSizes(pAC);
+#else
+ /* tschilling: New common function with minimum size check. */
+ DualNet = SK_FALSE;
+ if (pAC->RlmtNets == 2) {
+ DualNet = SK_TRUE;
+ }
+
+ if (SkGeInitAssignRamToQueues(
+ pAC,
+ pAC->ActivePort,
+ DualNet)) {
+ spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
+ spin_unlock_irqrestore(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+ printk("SkGeInitAssignRamToQueues failed.\n");
+ break;
+ }
+#endif
+ /* tschilling: Handling of return values inserted. */
+ if (SkGeInitPort(pAC, IoC, FromPort) ||
+ SkGeInitPort(pAC, IoC, ToPort)) {
+ printk("%s: SkGeInitPort failed.\n", pAC->dev[0]->name);
+ }
+ if (Event == SK_DRV_SWITCH_SOFT) {
+ SkMacRxTxEnable(pAC, IoC, FromPort);
+ }
+ SkMacRxTxEnable(pAC, IoC, ToPort);
+ SkAddrSwap(pAC, IoC, FromPort, ToPort);
+ SkAddrMcUpdate(pAC, IoC, FromPort);
+ SkAddrMcUpdate(pAC, IoC, ToPort);
+ PortReInitBmu(pAC, FromPort);
+ PortReInitBmu(pAC, ToPort);
+ SkGePollTxD(pAC, IoC, FromPort, SK_TRUE);
+ SkGePollTxD(pAC, IoC, ToPort, SK_TRUE);
+ ClearAndStartRx(pAC, FromPort);
+ ClearAndStartRx(pAC, ToPort);
+ spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
+ spin_unlock_irqrestore(
+ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
+ Flags);
+ break;
+ case SK_DRV_RLMT_SEND: /* SK_MBUF *pMb */
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("RLS "));
+ pRlmtMbuf = (SK_MBUF*) Param.pParaPtr;
+ pMsg = (struct sk_buff*) pRlmtMbuf->pOs;
+ skb_put(pMsg, pRlmtMbuf->Length);
+ if (XmitFrame(pAC, &pAC->TxPort[pRlmtMbuf->PortIdx][TX_PRIO_LOW],
+ pMsg) < 0)
+
+ DEV_KFREE_SKB_ANY(pMsg);
+ break;
+ case SK_DRV_TIMER:
+ if (Param.Para32[0] == SK_DRV_MODERATION_TIMER) {
+ /*
+ ** expiration of the moderation timer implies that
+ ** dynamic moderation is to be applied
+ */
+ SkDimStartModerationTimer(pAC);
+ SkDimModerate(pAC);
+ if (pAC->DynIrqModInfo.DisplayStats) {
+ SkDimDisplayModerationSettings(pAC);
+ }
+ } else if (Param.Para32[0] == SK_DRV_RX_CLEANUP_TIMER) {
+ /*
+ ** check if we need to check for descriptors which
+ ** haven't been handled the last millisecs
+ */
+ StartDrvCleanupTimer(pAC);
+ if (pAC->GIni.GIMacsFound == 2) {
+ ReceiveIrq(pAC, &pAC->RxPort[1], SK_FALSE);
+ }
+ ReceiveIrq(pAC, &pAC->RxPort[0], SK_FALSE);
+ } else {
+ printk("Expiration of unknown timer\n");
+ }
+ break;
+ default:
+ break;
+ }
+ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
+ ("END EVENT "));
+
+ return (0);
+} /* SkDrvEvent */
+
+
+/*****************************************************************************
+ *
+ * SkErrorLog - log errors
+ *
+ * Description:
+ * This function logs errors to the system buffer and to the console
+ *
+ * Returns:
+ * 0 if everything ok
+ * < 0 on error
+ *
+ */
+void SkErrorLog(
+SK_AC *pAC,
+int ErrClass,
+int ErrNum,
+char *pErrorMsg)
+{
+char ClassStr[80];
+
+ switch (ErrClass) {
+ case SK_ERRCL_OTHER:
+ strcpy(ClassStr, "Other error");
+ break;
+ case SK_ERRCL_CONFIG:
+ strcpy(ClassStr, "Configuration error");
+ break;
+ case SK_ERRCL_INIT:
+ strcpy(ClassStr, "Initialization error");
+ break;
+ case SK_ERRCL_NORES:
+ strcpy(ClassStr, "Out of resources error");
+ break;
+ case SK_ERRCL_SW:
+ strcpy(ClassStr, "internal Software error");
+ break;
+ case SK_ERRCL_HW:
+ strcpy(ClassStr, "Hardware failure");
+ break;
+ case SK_ERRCL_COMM:
+ strcpy(ClassStr, "Communication error");
+ break;
+ }
+ printk(KERN_INFO "%s: -- ERROR --\n Class: %s\n"
+ " Nr: 0x%x\n Msg: %s\n", pAC->dev[0]->name,
+ ClassStr, ErrNum, pErrorMsg);
+
+} /* SkErrorLog */
+
+#ifdef SK_DIAG_SUPPORT
+
+/*****************************************************************************
+ *
+ * SkDrvEnterDiagMode - handles DIAG attach request
+ *
+ * Description:
+ * Notify the kernel to NOT access the card any longer due to DIAG
+ * Deinitialize the Card
+ *
+ * Returns:
+ * int
+ */
+int SkDrvEnterDiagMode(
+SK_AC *pAc) /* pointer to adapter context */
+{
+ DEV_NET *pNet = netdev_priv(pAc->dev[0]);
+ SK_AC *pAC = pNet->pAC;
+
+ SK_MEMCPY(&(pAc->PnmiBackup), &(pAc->PnmiStruct),
+ sizeof(SK_PNMI_STRUCT_DATA));
+
+ pAC->DiagModeActive = DIAG_ACTIVE;
+ if (pAC->BoardLevel > SK_INIT_DATA) {
+ if (pNet->Up) {
+ pAC->WasIfUp[0] = SK_TRUE;
+ pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
+ DoPrintInterfaceChange = SK_FALSE;
+ SkDrvDeInitAdapter(pAC, 0); /* performs SkGeClose */
+ } else {
+ pAC->WasIfUp[0] = SK_FALSE;
+ }
+ if (pNet != netdev_priv(pAC->dev[1])) {
+ pNet = netdev_priv(pAC->dev[1]);
+ if (pNet->Up) {
+ pAC->WasIfUp[1] = SK_TRUE;
+ pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
+ DoPrintInterfaceChange = SK_FALSE;
+ SkDrvDeInitAdapter(pAC, 1); /* do SkGeClose */
+ } else {
+ pAC->WasIfUp[1] = SK_FALSE;
+ }
+ }
+ pAC->BoardLevel = SK_INIT_DATA;
+ }
+ return(0);
+}
+
+/*****************************************************************************
+ *
+ * SkDrvLeaveDiagMode - handles DIAG detach request
+ *
+ * Description:
+ * Notify the kernel to may access the card again after use by DIAG
+ * Initialize the Card
+ *
+ * Returns:
+ * int
+ */
+int SkDrvLeaveDiagMode(
+SK_AC *pAc) /* pointer to adapter control context */
+{
+ SK_MEMCPY(&(pAc->PnmiStruct), &(pAc->PnmiBackup),
+ sizeof(SK_PNMI_STRUCT_DATA));
+ pAc->DiagModeActive = DIAG_NOTACTIVE;
+ pAc->Pnmi.DiagAttached = SK_DIAG_IDLE;
+ if (pAc->WasIfUp[0] == SK_TRUE) {
+ pAc->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
+ DoPrintInterfaceChange = SK_FALSE;
+ SkDrvInitAdapter(pAc, 0); /* first device */
+ }
+ if (pAc->WasIfUp[1] == SK_TRUE) {
+ pAc->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
+ DoPrintInterfaceChange = SK_FALSE;
+ SkDrvInitAdapter(pAc, 1); /* second device */
+ }
+ return(0);
+}
+
+/*****************************************************************************
+ *
+ * ParseDeviceNbrFromSlotName - Evaluate PCI device number
+ *
+ * Description:
+ * This function parses the PCI slot name information string and will
+ * retrieve the devcie number out of it. The slot_name maintianed by
+ * linux is in the form of '02:0a.0', whereas the first two characters
+ * represent the bus number in hex (in the sample above this is
+ * pci bus 0x02) and the next two characters the device number (0x0a).
+ *
+ * Returns:
+ * SK_U32: The device number from the PCI slot name
+ */
+
+static SK_U32 ParseDeviceNbrFromSlotName(
+const char *SlotName) /* pointer to pci slot name eg. '02:0a.0' */
+{
+ char *CurrCharPos = (char *) SlotName;
+ int FirstNibble = -1;
+ int SecondNibble = -1;
+ SK_U32 Result = 0;
+
+ while (*CurrCharPos != '\0') {
+ if (*CurrCharPos == ':') {
+ while (*CurrCharPos != '.') {
+ CurrCharPos++;
+ if ( (*CurrCharPos >= '0') &&
+ (*CurrCharPos <= '9')) {
+ if (FirstNibble == -1) {
+ /* dec. value for '0' */
+ FirstNibble = *CurrCharPos - 48;
+ } else {
+ SecondNibble = *CurrCharPos - 48;
+ }
+ } else if ( (*CurrCharPos >= 'a') &&
+ (*CurrCharPos <= 'f') ) {
+ if (FirstNibble == -1) {
+ FirstNibble = *CurrCharPos - 87;
+ } else {
+ SecondNibble = *CurrCharPos - 87;
+ }
+ } else {
+ Result = 0;
+ }
+ }
+
+ Result = FirstNibble;
+ Result = Result << 4; /* first nibble is higher one */
+ Result = Result | SecondNibble;
+ }
+ CurrCharPos++; /* next character */
+ }
+ return (Result);
+}
+
+/****************************************************************************
+ *
+ * SkDrvDeInitAdapter - deinitialize adapter (this function is only
+ * called if Diag attaches to that card)
+ *
+ * Description:
+ * Close initialized adapter.
+ *
+ * Returns:
+ * 0 - on success
+ * error code - on error
+ */
+static int SkDrvDeInitAdapter(
+SK_AC *pAC, /* pointer to adapter context */
+int devNbr) /* what device is to be handled */
+{
+ struct SK_NET_DEVICE *dev;
+
+ dev = pAC->dev[devNbr];
+
+ /* On Linux 2.6 the network driver does NOT mess with reference
+ ** counts. The driver MUST be able to be unloaded at any time
+ ** due to the possibility of hotplug.
+ */
+ if (SkGeClose(dev) != 0) {
+ return (-1);
+ }
+ return (0);
+
+} /* SkDrvDeInitAdapter() */
+
+/****************************************************************************
+ *
+ * SkDrvInitAdapter - Initialize adapter (this function is only
+ * called if Diag deattaches from that card)
+ *
+ * Description:
+ * Close initialized adapter.
+ *
+ * Returns:
+ * 0 - on success
+ * error code - on error
+ */
+static int SkDrvInitAdapter(
+SK_AC *pAC, /* pointer to adapter context */
+int devNbr) /* what device is to be handled */
+{
+ struct SK_NET_DEVICE *dev;
+
+ dev = pAC->dev[devNbr];
+
+ if (SkGeOpen(dev) != 0) {
+ return (-1);
+ }
+
+ /*
+ ** Use correct MTU size and indicate to kernel TX queue can be started
+ */
+ if (SkGeChangeMtu(dev, dev->mtu) != 0) {
+ return (-1);
+ }
+ return (0);
+
+} /* SkDrvInitAdapter */
+
+#endif
+
+#ifdef DEBUG
+/****************************************************************************/
+/* "debug only" section *****************************************************/
+/****************************************************************************/
+
+
+/*****************************************************************************
+ *
+ * DumpMsg - print a frame
+ *
+ * Description:
+ * This function prints frames to the system logfile/to the console.
+ *
+ * Returns: N/A
+ *
+ */
+static void DumpMsg(struct sk_buff *skb, char *str)
+{
+ int msglen;
+
+ if (skb == NULL) {
+ printk("DumpMsg(): NULL-Message\n");
+ return;
+ }
+
+ if (skb->data == NULL) {
+ printk("DumpMsg(): Message empty\n");
+ return;
+ }
+
+ msglen = skb->len;
+ if (msglen > 64)
+ msglen = 64;
+
+ printk("--- Begin of message from %s , len %d (from %d) ----\n", str, msglen, skb->len);
+
+ DumpData((char *)skb->data, msglen);
+
+ printk("------- End of message ---------\n");
+} /* DumpMsg */
+
+
+
+/*****************************************************************************
+ *
+ * DumpData - print a data area
+ *
+ * Description:
+ * This function prints a area of data to the system logfile/to the
+ * console.
+ *
+ * Returns: N/A
+ *
+ */
+static void DumpData(char *p, int size)
+{
+register int i;
+int haddr, addr;
+char hex_buffer[180];
+char asc_buffer[180];
+char HEXCHAR[] = "0123456789ABCDEF";
+
+ addr = 0;
+ haddr = 0;
+ hex_buffer[0] = 0;
+ asc_buffer[0] = 0;
+ for (i=0; i < size; ) {
+ if (*p >= '0' && *p <='z')
+ asc_buffer[addr] = *p;
+ else
+ asc_buffer[addr] = '.';
+ addr++;
+ asc_buffer[addr] = 0;
+ hex_buffer[haddr] = HEXCHAR[(*p & 0xf0) >> 4];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[*p & 0x0f];
+ haddr++;
+ hex_buffer[haddr] = ' ';
+ haddr++;
+ hex_buffer[haddr] = 0;
+ p++;
+ i++;
+ if (i%16 == 0) {
+ printk("%s %s\n", hex_buffer, asc_buffer);
+ addr = 0;
+ haddr = 0;
+ }
+ }
+} /* DumpData */
+
+
+/*****************************************************************************
+ *
+ * DumpLong - print a data area as long values
+ *
+ * Description:
+ * This function prints a area of data to the system logfile/to the
+ * console.
+ *
+ * Returns: N/A
+ *
+ */
+static void DumpLong(char *pc, int size)
+{
+register int i;
+int haddr, addr;
+char hex_buffer[180];
+char asc_buffer[180];
+char HEXCHAR[] = "0123456789ABCDEF";
+long *p;
+int l;
+
+ addr = 0;
+ haddr = 0;
+ hex_buffer[0] = 0;
+ asc_buffer[0] = 0;
+ p = (long*) pc;
+ for (i=0; i < size; ) {
+ l = (long) *p;
+ hex_buffer[haddr] = HEXCHAR[(l >> 28) & 0xf];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[(l >> 24) & 0xf];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[(l >> 20) & 0xf];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[(l >> 16) & 0xf];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[(l >> 12) & 0xf];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[(l >> 8) & 0xf];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[(l >> 4) & 0xf];
+ haddr++;
+ hex_buffer[haddr] = HEXCHAR[l & 0x0f];
+ haddr++;
+ hex_buffer[haddr] = ' ';
+ haddr++;
+ hex_buffer[haddr] = 0;
+ p++;
+ i++;
+ if (i%8 == 0) {
+ printk("%4x %s\n", (i-8)*4, hex_buffer);
+ haddr = 0;
+ }
+ }
+ printk("------------------------\n");
+} /* DumpLong */
+
+#endif
+
+static int __devinit skge_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ SK_AC *pAC;
+ DEV_NET *pNet = NULL;
+ struct net_device *dev = NULL;
+ static int boards_found = 0;
+ int error = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ goto out;
+
+ /* Configure DMA attributes. */
+ if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL) &&
+ pci_set_dma_mask(pdev, (u64) 0xffffffff))
+ goto out_disable_device;
+
+
+ if ((dev = alloc_etherdev(sizeof(DEV_NET))) == NULL) {
+ printk(KERN_ERR "Unable to allocate etherdev "
+ "structure!\n");
+ goto out_disable_device;
+ }
+
+ pNet = netdev_priv(dev);
+ pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL);
+ if (!pNet->pAC) {
+ printk(KERN_ERR "Unable to allocate adapter "
+ "structure!\n");
+ goto out_free_netdev;
+ }
+
+ memset(pNet->pAC, 0, sizeof(SK_AC));
+ pAC = pNet->pAC;
+ pAC->PciDev = pdev;
+ pAC->PciDevId = pdev->device;
+ pAC->dev[0] = dev;
+ pAC->dev[1] = dev;
+ sprintf(pAC->Name, "SysKonnect SK-98xx");
+ pAC->CheckQueue = SK_FALSE;
+
+ pNet->Mtu = 1500;
+ pNet->Up = 0;
+ dev->irq = pdev->irq;
+ error = SkGeInitPCI(pAC);
+ if (error) {
+ printk("SKGE: PCI setup failed: %i\n", error);
+ goto out_free_netdev;
+ }
+
+ SET_MODULE_OWNER(dev);
+ dev->open = &SkGeOpen;
+ dev->stop = &SkGeClose;
+ dev->hard_start_xmit = &SkGeXmit;
+ dev->get_stats = &SkGeStats;
+ dev->set_multicast_list = &SkGeSetRxMode;
+ dev->set_mac_address = &SkGeSetMacAddr;
+ dev->do_ioctl = &SkGeIoctl;
+ dev->change_mtu = &SkGeChangeMtu;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &SkGePollController;
+#endif
+ dev->flags &= ~IFF_RUNNING;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
+
+#ifdef SK_ZEROCOPY
+#ifdef USE_SK_TX_CHECKSUM
+ if (pAC->ChipsetType) {
+ /* Use only if yukon hardware */
+ /* SK and ZEROCOPY - fly baby... */
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ }
+#endif
+#endif
+
+ pAC->Index = boards_found++;
+
+ if (SkGeBoardInit(dev, pAC))
+ goto out_free_netdev;
+
+ /* Register net device */
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SKGE: Could not register device.\n");
+ goto out_free_resources;
+ }
+
+ /* Print adapter specific string from vpd */
+ ProductStr(pAC);
+ printk("%s: %s\n", dev->name, pAC->DeviceStr);
+
+ /* Print configuration settings */
+ printk(" PrefPort:%c RlmtMode:%s\n",
+ 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber,
+ (pAC->RlmtMode==0) ? "Check Link State" :
+ ((pAC->RlmtMode==1) ? "Check Link State" :
+ ((pAC->RlmtMode==3) ? "Check Local Port" :
+ ((pAC->RlmtMode==7) ? "Check Segmentation" :
+ ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error")))));
+
+ SkGeYellowLED(pAC, pAC->IoBase, 1);
+
+
+ memcpy(&dev->dev_addr, &pAC->Addr.Net[0].CurrentMacAddress, 6);
+
+ SkGeProcCreate(dev);
+
+ pNet->PortNr = 0;
+ pNet->NetNr = 0;
+
+ boards_found++;
+
+ /* More then one port found */
+ if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
+ if ((dev = alloc_etherdev(sizeof(DEV_NET))) == 0) {
+ printk(KERN_ERR "Unable to allocate etherdev "
+ "structure!\n");
+ goto out;
+ }
+
+ pAC->dev[1] = dev;
+ pNet = netdev_priv(dev);
+ pNet->PortNr = 1;
+ pNet->NetNr = 1;
+ pNet->pAC = pAC;
+ pNet->Mtu = 1500;
+ pNet->Up = 0;
+
+ dev->open = &SkGeOpen;
+ dev->stop = &SkGeClose;
+ dev->hard_start_xmit = &SkGeXmit;
+ dev->get_stats = &SkGeStats;
+ dev->set_multicast_list = &SkGeSetRxMode;
+ dev->set_mac_address = &SkGeSetMacAddr;
+ dev->do_ioctl = &SkGeIoctl;
+ dev->change_mtu = &SkGeChangeMtu;
+ dev->flags &= ~IFF_RUNNING;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
+
+#ifdef SK_ZEROCOPY
+#ifdef USE_SK_TX_CHECKSUM
+ if (pAC->ChipsetType) {
+ /* SG and ZEROCOPY - fly baby... */
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ }
+#endif
+#endif
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SKGE: Could not register device.\n");
+ free_netdev(dev);
+ pAC->dev[1] = pAC->dev[0];
+ } else {
+ SkGeProcCreate(dev);
+ memcpy(&dev->dev_addr,
+ &pAC->Addr.Net[1].CurrentMacAddress, 6);
+
+ printk("%s: %s\n", dev->name, pAC->DeviceStr);
+ printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
+ }
+ }
+
+ /* Save the hardware revision */
+ pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) +
+ (pAC->GIni.GIPciHwRev & 0x0F);
+
+ /* Set driver globals */
+ pAC->Pnmi.pDriverFileName = DRIVER_FILE_NAME;
+ pAC->Pnmi.pDriverReleaseDate = DRIVER_REL_DATE;
+
+ memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA));
+ memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA));
+
+ pci_set_drvdata(pdev, dev);
+ return 0;
+
+ out_free_resources:
+ FreeResources(dev);
+ out_free_netdev:
+ free_netdev(dev);
+ out_disable_device:
+ pci_disable_device(pdev);
+ out:
+ return error;
+}
+
+static void __devexit skge_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+ struct net_device *otherdev = pAC->dev[1];
+
+ SkGeProcRemove(dev);
+ unregister_netdev(dev);
+ if (otherdev != dev)
+ SkGeProcRemove(otherdev);
+
+ SkGeYellowLED(pAC, pAC->IoBase, 0);
+
+ if (pAC->BoardLevel == SK_INIT_RUN) {
+ SK_EVPARA EvPara;
+ unsigned long Flags;
+
+ /* board is still alive */
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ EvPara.Para32[0] = 0;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ EvPara.Para32[0] = 1;
+ EvPara.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
+ SkEventDispatcher(pAC, pAC->IoBase);
+ /* disable interrupts */
+ SK_OUT32(pAC->IoBase, B0_IMSK, 0);
+ SkGeDeInit(pAC, pAC->IoBase);
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+ pAC->BoardLevel = SK_INIT_DATA;
+ /* We do NOT check here, if IRQ was pending, of course*/
+ }
+
+ if (pAC->BoardLevel == SK_INIT_IO) {
+ /* board is still alive */
+ SkGeDeInit(pAC, pAC->IoBase);
+ pAC->BoardLevel = SK_INIT_DATA;
+ }
+
+ FreeResources(dev);
+ free_netdev(dev);
+ if (otherdev != dev)
+ free_netdev(otherdev);
+ kfree(pAC);
+}
+
+static struct pci_device_id skge_pci_tbl[] = {
+ { PCI_VENDOR_ID_3COM, 0x1700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+#if 0 /* don't handle Yukon2 cards at the moment -- mlindner@syskonnect.de */
+ { PCI_VENDOR_ID_MARVELL, 0x4360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MARVELL, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+#endif
+ { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, skge_pci_tbl);
+
+static struct pci_driver skge_driver = {
+ .name = "sk98lin",
+ .id_table = skge_pci_tbl,
+ .probe = skge_probe_one,
+ .remove = __devexit_p(skge_remove_one),
+};
+
+static int __init skge_init(void)
+{
+ int error;
+
+ pSkRootDir = proc_mkdir(SKRootName, proc_net);
+ if (pSkRootDir)
+ pSkRootDir->owner = THIS_MODULE;
+
+ error = pci_register_driver(&skge_driver);
+ if (error)
+ proc_net_remove(SKRootName);
+ return error;
+}
+
+static void __exit skge_exit(void)
+{
+ pci_unregister_driver(&skge_driver);
+ proc_net_remove(SKRootName);
+
+}
+
+module_init(skge_init);
+module_exit(skge_exit);
diff --git a/drivers/net/sk98lin/skgehwt.c b/drivers/net/sk98lin/skgehwt.c
new file mode 100644
index 000000000000..db670993c2df
--- /dev/null
+++ b/drivers/net/sk98lin/skgehwt.c
@@ -0,0 +1,171 @@
+/******************************************************************************
+ *
+ * Name: skgehwt.c
+ * Project: Gigabit Ethernet Adapters, Event Scheduler Module
+ * Version: $Revision: 1.15 $
+ * Date: $Date: 2003/09/16 13:41:23 $
+ * Purpose: Hardware Timer
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * Event queue and dispatcher
+ */
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skgehwt.c,v 1.15 2003/09/16 13:41:23 rschmidt Exp $ (C) Marvell.";
+#endif
+
+#include "h/skdrv1st.h" /* Driver Specific Definitions */
+#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
+
+#ifdef __C2MAN__
+/*
+ * Hardware Timer function queue management.
+ */
+intro()
+{}
+#endif
+
+/*
+ * Prototypes of local functions.
+ */
+#define SK_HWT_MAX (65000)
+
+/* correction factor */
+#define SK_HWT_FAC (1000 * (SK_U32)pAC->GIni.GIHstClkFact / 100)
+
+/*
+ * Initialize hardware timer.
+ *
+ * Must be called during init level 1.
+ */
+void SkHwtInit(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc) /* IoContext */
+{
+ pAC->Hwt.TStart = 0 ;
+ pAC->Hwt.TStop = 0 ;
+ pAC->Hwt.TActive = SK_FALSE;
+
+ SkHwtStop(pAC, Ioc);
+}
+
+/*
+ *
+ * Start hardware timer (clock ticks are 16us).
+ *
+ */
+void SkHwtStart(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc, /* IoContext */
+SK_U32 Time) /* Time in units of 16us to load the timer with. */
+{
+ SK_U32 Cnt;
+
+ if (Time > SK_HWT_MAX)
+ Time = SK_HWT_MAX;
+
+ pAC->Hwt.TStart = Time;
+ pAC->Hwt.TStop = 0L;
+
+ Cnt = Time;
+
+ /*
+ * if time < 16 us
+ * time = 16 us
+ */
+ if (!Cnt) {
+ Cnt++;
+ }
+
+ SK_OUT32(Ioc, B2_TI_INI, Cnt * SK_HWT_FAC);
+
+ SK_OUT16(Ioc, B2_TI_CTRL, TIM_START); /* Start timer. */
+
+ pAC->Hwt.TActive = SK_TRUE;
+}
+
+/*
+ * Stop hardware timer.
+ * and clear the timer IRQ
+ */
+void SkHwtStop(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc) /* IoContext */
+{
+ SK_OUT16(Ioc, B2_TI_CTRL, TIM_STOP);
+
+ SK_OUT16(Ioc, B2_TI_CTRL, TIM_CLR_IRQ);
+
+ pAC->Hwt.TActive = SK_FALSE;
+}
+
+
+/*
+ * Stop hardware timer and read time elapsed since last start.
+ *
+ * returns
+ * The elapsed time since last start in units of 16us.
+ *
+ */
+SK_U32 SkHwtRead(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc) /* IoContext */
+{
+ SK_U32 TRead;
+ SK_U32 IStatus;
+
+ if (pAC->Hwt.TActive) {
+
+ SkHwtStop(pAC, Ioc);
+
+ SK_IN32(Ioc, B2_TI_VAL, &TRead);
+ TRead /= SK_HWT_FAC;
+
+ SK_IN32(Ioc, B0_ISRC, &IStatus);
+
+ /* Check if timer expired (or wraped around) */
+ if ((TRead > pAC->Hwt.TStart) || (IStatus & IS_TIMINT)) {
+
+ SkHwtStop(pAC, Ioc);
+
+ pAC->Hwt.TStop = pAC->Hwt.TStart;
+ }
+ else {
+
+ pAC->Hwt.TStop = pAC->Hwt.TStart - TRead;
+ }
+ }
+ return(pAC->Hwt.TStop);
+}
+
+/*
+ * interrupt source= timer
+ */
+void SkHwtIsr(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc) /* IoContext */
+{
+ SkHwtStop(pAC, Ioc);
+
+ pAC->Hwt.TStop = pAC->Hwt.TStart;
+
+ SkTimerDone(pAC, Ioc);
+}
+
+/* End of file */
diff --git a/drivers/net/sk98lin/skgeinit.c b/drivers/net/sk98lin/skgeinit.c
new file mode 100644
index 000000000000..df4483429a77
--- /dev/null
+++ b/drivers/net/sk98lin/skgeinit.c
@@ -0,0 +1,2151 @@
+/******************************************************************************
+ *
+ * Name: skgeinit.c
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.97 $
+ * Date: $Date: 2003/10/02 16:45:31 $
+ * Purpose: Contains functions to initialize the adapter
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#include "h/skdrv1st.h"
+#include "h/skdrv2nd.h"
+
+/* global variables ***********************************************************/
+
+/* local variables ************************************************************/
+
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skgeinit.c,v 1.97 2003/10/02 16:45:31 rschmidt Exp $ (C) Marvell.";
+#endif
+
+struct s_QOffTab {
+ int RxQOff; /* Receive Queue Address Offset */
+ int XsQOff; /* Sync Tx Queue Address Offset */
+ int XaQOff; /* Async Tx Queue Address Offset */
+};
+static struct s_QOffTab QOffTab[] = {
+ {Q_R1, Q_XS1, Q_XA1}, {Q_R2, Q_XS2, Q_XA2}
+};
+
+struct s_Config {
+ char ScanString[8];
+ SK_U32 Value;
+};
+
+static struct s_Config OemConfig = {
+ {'O','E','M','_','C','o','n','f'},
+#ifdef SK_OEM_CONFIG
+ OEM_CONFIG_VALUE,
+#else
+ 0,
+#endif
+};
+
+/******************************************************************************
+ *
+ * SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring
+ *
+ * Description:
+ * Enable or disable the descriptor polling of the receive descriptor
+ * ring (RxD) for port 'Port'.
+ * The new configuration is *not* saved over any SkGeStopPort() and
+ * SkGeInitPort() calls.
+ *
+ * Returns:
+ * nothing
+ */
+void SkGePollRxD(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL PollRxD) /* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */
+{
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), (PollRxD) ?
+ CSR_ENA_POL : CSR_DIS_POL);
+} /* SkGePollRxD */
+
+
+/******************************************************************************
+ *
+ * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings
+ *
+ * Description:
+ * Enable or disable the descriptor polling of the transmit descriptor
+ * ring(s) (TxD) for port 'Port'.
+ * The new configuration is *not* saved over any SkGeStopPort() and
+ * SkGeInitPort() calls.
+ *
+ * Returns:
+ * nothing
+ */
+void SkGePollTxD(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL PollTxD) /* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */
+{
+ SK_GEPORT *pPrt;
+ SK_U32 DWord;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ DWord = (SK_U32)(PollTxD ? CSR_ENA_POL : CSR_DIS_POL);
+
+ if (pPrt->PXSQSize != 0) {
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), DWord);
+ }
+
+ if (pPrt->PXAQSize != 0) {
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), DWord);
+ }
+} /* SkGePollTxD */
+
+
+/******************************************************************************
+ *
+ * SkGeYellowLED() - Switch the yellow LED on or off.
+ *
+ * Description:
+ * Switch the yellow LED on or off.
+ *
+ * Note:
+ * This function may be called any time after SkGeInit(Level 1).
+ *
+ * Returns:
+ * nothing
+ */
+void SkGeYellowLED(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int State) /* yellow LED state, 0 = OFF, 0 != ON */
+{
+ if (State == 0) {
+ /* Switch yellow LED OFF */
+ SK_OUT8(IoC, B0_LED, LED_STAT_OFF);
+ }
+ else {
+ /* Switch yellow LED ON */
+ SK_OUT8(IoC, B0_LED, LED_STAT_ON);
+ }
+} /* SkGeYellowLED */
+
+
+#if (!defined(SK_SLIM) || defined(GENESIS))
+/******************************************************************************
+ *
+ * SkGeXmitLED() - Modify the Operational Mode of a transmission LED.
+ *
+ * Description:
+ * The Rx or Tx LED which is specified by 'Led' will be
+ * enabled, disabled or switched on in test mode.
+ *
+ * Note:
+ * 'Led' must contain the address offset of the LEDs INI register.
+ *
+ * Usage:
+ * SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA);
+ *
+ * Returns:
+ * nothing
+ */
+void SkGeXmitLED(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Led, /* offset to the LED Init Value register */
+int Mode) /* Mode may be SK_LED_DIS, SK_LED_ENA, SK_LED_TST */
+{
+ SK_U32 LedIni;
+
+ switch (Mode) {
+ case SK_LED_ENA:
+ LedIni = SK_XMIT_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100;
+ SK_OUT32(IoC, Led + XMIT_LED_INI, LedIni);
+ SK_OUT8(IoC, Led + XMIT_LED_CTRL, LED_START);
+ break;
+ case SK_LED_TST:
+ SK_OUT8(IoC, Led + XMIT_LED_TST, LED_T_ON);
+ SK_OUT32(IoC, Led + XMIT_LED_CNT, 100);
+ SK_OUT8(IoC, Led + XMIT_LED_CTRL, LED_START);
+ break;
+ case SK_LED_DIS:
+ default:
+ /*
+ * Do NOT stop the LED Timer here. The LED might be
+ * in on state. But it needs to go off.
+ */
+ SK_OUT32(IoC, Led + XMIT_LED_CNT, 0);
+ SK_OUT8(IoC, Led + XMIT_LED_TST, LED_T_OFF);
+ break;
+ }
+
+ /*
+ * 1000BT: The Transmit LED is driven by the PHY.
+ * But the default LED configuration is used for
+ * Level One and Broadcom PHYs.
+ * (Broadcom: It may be that PHY_B_PEC_EN_LTR has to be set.)
+ * (In this case it has to be added here. But we will see. XXX)
+ */
+} /* SkGeXmitLED */
+#endif /* !SK_SLIM || GENESIS */
+
+
+/******************************************************************************
+ *
+ * DoCalcAddr() - Calculates the start and the end address of a queue.
+ *
+ * Description:
+ * This function calculates the start and the end address of a queue.
+ * Afterwards the 'StartVal' is incremented to the next start position.
+ * If the port is already initialized the calculated values
+ * will be checked against the configured values and an
+ * error will be returned, if they are not equal.
+ * If the port is not initialized the values will be written to
+ * *StartAdr and *EndAddr.
+ *
+ * Returns:
+ * 0: success
+ * 1: configuration error
+ */
+static int DoCalcAddr(
+SK_AC *pAC, /* adapter context */
+SK_GEPORT SK_FAR *pPrt, /* port index */
+int QuSize, /* size of the queue to configure in kB */
+SK_U32 SK_FAR *StartVal, /* start value for address calculation */
+SK_U32 SK_FAR *QuStartAddr,/* start addr to calculate */
+SK_U32 SK_FAR *QuEndAddr) /* end address to calculate */
+{
+ SK_U32 EndVal;
+ SK_U32 NextStart;
+ int Rtv;
+
+ Rtv = 0;
+ if (QuSize == 0) {
+ EndVal = *StartVal;
+ NextStart = EndVal;
+ }
+ else {
+ EndVal = *StartVal + ((SK_U32)QuSize * 1024) - 1;
+ NextStart = EndVal + 1;
+ }
+
+ if (pPrt->PState >= SK_PRT_INIT) {
+ if (*StartVal != *QuStartAddr || EndVal != *QuEndAddr) {
+ Rtv = 1;
+ }
+ }
+ else {
+ *QuStartAddr = *StartVal;
+ *QuEndAddr = EndVal;
+ }
+
+ *StartVal = NextStart;
+ return(Rtv);
+} /* DoCalcAddr */
+
+/******************************************************************************
+ *
+ * SkGeInitAssignRamToQueues() - allocate default queue sizes
+ *
+ * Description:
+ * This function assigns the memory to the different queues and ports.
+ * When DualNet is set to SK_TRUE all ports get the same amount of memory.
+ * Otherwise the first port gets most of the memory and all the
+ * other ports just the required minimum.
+ * This function can only be called when pAC->GIni.GIRamSize and
+ * pAC->GIni.GIMacsFound have been initialized, usually this happens
+ * at init level 1
+ *
+ * Returns:
+ * 0 - ok
+ * 1 - invalid input values
+ * 2 - not enough memory
+ */
+
+int SkGeInitAssignRamToQueues(
+SK_AC *pAC, /* Adapter context */
+int ActivePort, /* Active Port in RLMT mode */
+SK_BOOL DualNet) /* adapter context */
+{
+ int i;
+ int UsedKilobytes; /* memory already assigned */
+ int ActivePortKilobytes; /* memory available for active port */
+ SK_GEPORT *pGePort;
+
+ UsedKilobytes = 0;
+
+ if (ActivePort >= pAC->GIni.GIMacsFound) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
+ ("SkGeInitAssignRamToQueues: ActivePort (%d) invalid\n",
+ ActivePort));
+ return(1);
+ }
+ if (((pAC->GIni.GIMacsFound * (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE)) +
+ ((RAM_QUOTA_SYNC == 0) ? 0 : SK_MIN_TXQ_SIZE)) > pAC->GIni.GIRamSize) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
+ ("SkGeInitAssignRamToQueues: Not enough memory (%d)\n",
+ pAC->GIni.GIRamSize));
+ return(2);
+ }
+
+ if (DualNet) {
+ /* every port gets the same amount of memory */
+ ActivePortKilobytes = pAC->GIni.GIRamSize / pAC->GIni.GIMacsFound;
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+
+ pGePort = &pAC->GIni.GP[i];
+
+ /* take away the minimum memory for active queues */
+ ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE);
+
+ /* receive queue gets the minimum + 80% of the rest */
+ pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB((
+ ActivePortKilobytes * (unsigned long) RAM_QUOTA_RX) / 100))
+ + SK_MIN_RXQ_SIZE;
+
+ ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE);
+
+ /* synchronous transmit queue */
+ pGePort->PXSQSize = 0;
+
+ /* asynchronous transmit queue */
+ pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes +
+ SK_MIN_TXQ_SIZE);
+ }
+ }
+ else {
+ /* Rlmt Mode or single link adapter */
+
+ /* Set standby queue size defaults for all standby ports */
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+
+ if (i != ActivePort) {
+ pGePort = &pAC->GIni.GP[i];
+
+ pGePort->PRxQSize = SK_MIN_RXQ_SIZE;
+ pGePort->PXAQSize = SK_MIN_TXQ_SIZE;
+ pGePort->PXSQSize = 0;
+
+ /* Count used RAM */
+ UsedKilobytes += pGePort->PRxQSize + pGePort->PXAQSize;
+ }
+ }
+ /* what's left? */
+ ActivePortKilobytes = pAC->GIni.GIRamSize - UsedKilobytes;
+
+ /* assign it to the active port */
+ /* first take away the minimum memory */
+ ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE);
+ pGePort = &pAC->GIni.GP[ActivePort];
+
+ /* receive queue get's the minimum + 80% of the rest */
+ pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB((ActivePortKilobytes *
+ (unsigned long) RAM_QUOTA_RX) / 100)) + SK_MIN_RXQ_SIZE;
+
+ ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE);
+
+ /* synchronous transmit queue */
+ pGePort->PXSQSize = 0;
+
+ /* asynchronous transmit queue */
+ pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes) +
+ SK_MIN_TXQ_SIZE;
+ }
+#ifdef VCPU
+ VCPUprintf(0, "PRxQSize=%u, PXSQSize=%u, PXAQSize=%u\n",
+ pGePort->PRxQSize, pGePort->PXSQSize, pGePort->PXAQSize);
+#endif /* VCPU */
+
+ return(0);
+} /* SkGeInitAssignRamToQueues */
+
+/******************************************************************************
+ *
+ * SkGeCheckQSize() - Checks the Adapters Queue Size Configuration
+ *
+ * Description:
+ * This function verifies the Queue Size Configuration specified
+ * in the variables PRxQSize, PXSQSize, and PXAQSize of all
+ * used ports.
+ * This requirements must be fullfilled to have a valid configuration:
+ * - The size of all queues must not exceed GIRamSize.
+ * - The queue sizes must be specified in units of 8 kB.
+ * - The size of Rx queues of available ports must not be
+ * smaller than 16 kB.
+ * - The size of at least one Tx queue (synch. or asynch.)
+ * of available ports must not be smaller than 16 kB
+ * when Jumbo Frames are used.
+ * - The RAM start and end addresses must not be changed
+ * for ports which are already initialized.
+ * Furthermore SkGeCheckQSize() defines the Start and End Addresses
+ * of all ports and stores them into the HWAC port structure.
+ *
+ * Returns:
+ * 0: Queue Size Configuration valid
+ * 1: Queue Size Configuration invalid
+ */
+static int SkGeCheckQSize(
+SK_AC *pAC, /* adapter context */
+int Port) /* port index */
+{
+ SK_GEPORT *pPrt;
+ int i;
+ int Rtv;
+ int Rtv2;
+ SK_U32 StartAddr;
+#ifndef SK_SLIM
+ int UsedMem; /* total memory used (max. found ports) */
+#endif
+
+ Rtv = 0;
+
+#ifndef SK_SLIM
+
+ UsedMem = 0;
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+ pPrt = &pAC->GIni.GP[i];
+
+ if ((pPrt->PRxQSize & QZ_UNITS) != 0 ||
+ (pPrt->PXSQSize & QZ_UNITS) != 0 ||
+ (pPrt->PXAQSize & QZ_UNITS) != 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG);
+ return(1);
+ }
+
+ if (i == Port && pPrt->PRxQSize < SK_MIN_RXQ_SIZE) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E011, SKERR_HWI_E011MSG);
+ return(1);
+ }
+
+ /*
+ * the size of at least one Tx queue (synch. or asynch.) has to be > 0.
+ * if Jumbo Frames are used, this size has to be >= 16 kB.
+ */
+ if ((i == Port && pPrt->PXSQSize == 0 && pPrt->PXAQSize == 0) ||
+ (pAC->GIni.GIPortUsage == SK_JUMBO_LINK &&
+ ((pPrt->PXSQSize > 0 && pPrt->PXSQSize < SK_MIN_TXQ_SIZE) ||
+ (pPrt->PXAQSize > 0 && pPrt->PXAQSize < SK_MIN_TXQ_SIZE)))) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E023, SKERR_HWI_E023MSG);
+ return(1);
+ }
+
+ UsedMem += pPrt->PRxQSize + pPrt->PXSQSize + pPrt->PXAQSize;
+ }
+
+ if (UsedMem > pAC->GIni.GIRamSize) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG);
+ return(1);
+ }
+#endif /* !SK_SLIM */
+
+ /* Now start address calculation */
+ StartAddr = pAC->GIni.GIRamOffs;
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+ pPrt = &pAC->GIni.GP[i];
+
+ /* Calculate/Check values for the receive queue */
+ Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PRxQSize, &StartAddr,
+ &pPrt->PRxQRamStart, &pPrt->PRxQRamEnd);
+ Rtv |= Rtv2;
+
+ /* Calculate/Check values for the synchronous Tx queue */
+ Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXSQSize, &StartAddr,
+ &pPrt->PXsQRamStart, &pPrt->PXsQRamEnd);
+ Rtv |= Rtv2;
+
+ /* Calculate/Check values for the asynchronous Tx queue */
+ Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXAQSize, &StartAddr,
+ &pPrt->PXaQRamStart, &pPrt->PXaQRamEnd);
+ Rtv |= Rtv2;
+
+ if (Rtv) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E013, SKERR_HWI_E013MSG);
+ return(1);
+ }
+ }
+
+ return(0);
+} /* SkGeCheckQSize */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkGeInitMacArb() - Initialize the MAC Arbiter
+ *
+ * Description:
+ * This function initializes the MAC Arbiter.
+ * It must not be called if there is still an
+ * initialized or active port.
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGeInitMacArb(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+ /* release local reset */
+ SK_OUT16(IoC, B3_MA_TO_CTRL, MA_RST_CLR);
+
+ /* configure timeout values */
+ SK_OUT8(IoC, B3_MA_TOINI_RX1, SK_MAC_TO_53);
+ SK_OUT8(IoC, B3_MA_TOINI_RX2, SK_MAC_TO_53);
+ SK_OUT8(IoC, B3_MA_TOINI_TX1, SK_MAC_TO_53);
+ SK_OUT8(IoC, B3_MA_TOINI_TX2, SK_MAC_TO_53);
+
+ SK_OUT8(IoC, B3_MA_RCINI_RX1, 0);
+ SK_OUT8(IoC, B3_MA_RCINI_RX2, 0);
+ SK_OUT8(IoC, B3_MA_RCINI_TX1, 0);
+ SK_OUT8(IoC, B3_MA_RCINI_TX2, 0);
+
+ /* recovery values are needed for XMAC II Rev. B2 only */
+ /* Fast Output Enable Mode was intended to use with Rev. B2, but now? */
+
+ /*
+ * There is no start or enable button to push, therefore
+ * the MAC arbiter is configured and enabled now.
+ */
+} /* SkGeInitMacArb */
+
+
+/******************************************************************************
+ *
+ * SkGeInitPktArb() - Initialize the Packet Arbiter
+ *
+ * Description:
+ * This function initializes the Packet Arbiter.
+ * It must not be called if there is still an
+ * initialized or active port.
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGeInitPktArb(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+ /* release local reset */
+ SK_OUT16(IoC, B3_PA_CTRL, PA_RST_CLR);
+
+ /* configure timeout values */
+ SK_OUT16(IoC, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
+ SK_OUT16(IoC, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
+ SK_OUT16(IoC, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
+ SK_OUT16(IoC, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
+
+ /*
+ * enable timeout timers if jumbo frames not used
+ * NOTE: the packet arbiter timeout interrupt is needed for
+ * half duplex hangup workaround
+ */
+ if (pAC->GIni.GIPortUsage != SK_JUMBO_LINK) {
+ if (pAC->GIni.GIMacsFound == 1) {
+ SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1);
+ }
+ else {
+ SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1 | PA_ENA_TO_TX2);
+ }
+ }
+} /* SkGeInitPktArb */
+#endif /* GENESIS */
+
+
+/******************************************************************************
+ *
+ * SkGeInitMacFifo() - Initialize the MAC FIFOs
+ *
+ * Description:
+ * Initialize all MAC FIFOs of the specified port
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGeInitMacFifo(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U16 Word;
+#ifdef VCPU
+ SK_U32 DWord;
+#endif /* VCPU */
+ /*
+ * For each FIFO:
+ * - release local reset
+ * - use default value for MAC FIFO size
+ * - setup defaults for the control register
+ * - enable the FIFO
+ */
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* Configure Rx MAC FIFO */
+ SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_CLR);
+ SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_RX_CTRL_DEF);
+ SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+ /* Configure Tx MAC FIFO */
+ SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_CLR);
+ SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
+ SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+ /* Enable frame flushing if jumbo frames used */
+ if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
+ SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* set Rx GMAC FIFO Flush Mask */
+ SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_MSK), (SK_U16)RX_FF_FL_DEF_MSK);
+
+ Word = (SK_U16)GMF_RX_CTRL_DEF;
+
+ /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
+ if (pAC->GIni.GIYukonLite && pAC->GIni.GIChipId == CHIP_ID_YUKON) {
+
+ Word &= ~GMF_RX_F_FL_ON;
+ }
+
+ /* Configure Rx MAC FIFO */
+ SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR);
+ SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), Word);
+
+ /* set Rx GMAC FIFO Flush Threshold (default: 0x0a -> 56 bytes) */
+ SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
+
+ /* Configure Tx MAC FIFO */
+ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR);
+ SK_OUT16(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U16)GMF_TX_CTRL_DEF);
+
+#ifdef VCPU
+ SK_IN32(IoC, MR_ADDR(Port, RX_GMF_AF_THR), &DWord);
+ SK_IN32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), &DWord);
+#endif /* VCPU */
+
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+/* SK_OUT32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), 0); */
+ }
+#endif /* YUKON */
+
+} /* SkGeInitMacFifo */
+
+#ifdef SK_LNK_SYNC_CNT
+/******************************************************************************
+ *
+ * SkGeLoadLnkSyncCnt() - Load the Link Sync Counter and starts counting
+ *
+ * Description:
+ * This function starts the Link Sync Counter of the specified
+ * port and enables the generation of an Link Sync IRQ.
+ * The Link Sync Counter may be used to detect an active link,
+ * if autonegotiation is not used.
+ *
+ * Note:
+ * o To ensure receiving the Link Sync Event the LinkSyncCounter
+ * should be initialized BEFORE clearing the XMAC's reset!
+ * o Enable IS_LNK_SYNC_M1 and IS_LNK_SYNC_M2 after calling this
+ * function.
+ *
+ * Returns:
+ * nothing
+ */
+void SkGeLoadLnkSyncCnt(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_U32 CntVal) /* Counter value */
+{
+ SK_U32 OrgIMsk;
+ SK_U32 NewIMsk;
+ SK_U32 ISrc;
+ SK_BOOL IrqPend;
+
+ /* stop counter */
+ SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_STOP);
+
+ /*
+ * ASIC problem:
+ * Each time starting the Link Sync Counter an IRQ is generated
+ * by the adapter. See problem report entry from 21.07.98
+ *
+ * Workaround: Disable Link Sync IRQ and clear the unexpeced IRQ
+ * if no IRQ is already pending.
+ */
+ IrqPend = SK_FALSE;
+ SK_IN32(IoC, B0_ISRC, &ISrc);
+ SK_IN32(IoC, B0_IMSK, &OrgIMsk);
+ if (Port == MAC_1) {
+ NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M1;
+ if ((ISrc & IS_LNK_SYNC_M1) != 0) {
+ IrqPend = SK_TRUE;
+ }
+ }
+ else {
+ NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M2;
+ if ((ISrc & IS_LNK_SYNC_M2) != 0) {
+ IrqPend = SK_TRUE;
+ }
+ }
+ if (!IrqPend) {
+ SK_OUT32(IoC, B0_IMSK, NewIMsk);
+ }
+
+ /* load counter */
+ SK_OUT32(IoC, MR_ADDR(Port, LNK_SYNC_INI), CntVal);
+
+ /* start counter */
+ SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_START);
+
+ if (!IrqPend) {
+ /* clear the unexpected IRQ, and restore the interrupt mask */
+ SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_CLR_IRQ);
+ SK_OUT32(IoC, B0_IMSK, OrgIMsk);
+ }
+} /* SkGeLoadLnkSyncCnt*/
+#endif /* SK_LNK_SYNC_CNT */
+
+#if defined(SK_DIAG) || defined(SK_CFG_SYNC)
+/******************************************************************************
+ *
+ * SkGeCfgSync() - Configure synchronous bandwidth for this port.
+ *
+ * Description:
+ * This function may be used to configure synchronous bandwidth
+ * to the specified port. This may be done any time after
+ * initializing the port. The configuration values are NOT saved
+ * in the HWAC port structure and will be overwritten any
+ * time when stopping and starting the port.
+ * Any values for the synchronous configuration will be ignored
+ * if the size of the synchronous queue is zero!
+ *
+ * The default configuration for the synchronous service is
+ * TXA_ENA_FSYNC. This means if the size of
+ * the synchronous queue is unequal zero but no specific
+ * synchronous bandwidth is configured, the synchronous queue
+ * will always have the 'unlimited' transmit priority!
+ *
+ * This mode will be restored if the synchronous bandwidth is
+ * deallocated ('IntTime' = 0 and 'LimCount' = 0).
+ *
+ * Returns:
+ * 0: success
+ * 1: parameter configuration error
+ * 2: try to configure quality of service although no
+ * synchronous queue is configured
+ */
+int SkGeCfgSync(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_U32 IntTime, /* Interval Timer Value in units of 8ns */
+SK_U32 LimCount, /* Number of bytes to transfer during IntTime */
+int SyncMode) /* Sync Mode: TXA_ENA_ALLOC | TXA_DIS_ALLOC | 0 */
+{
+ int Rtv;
+
+ Rtv = 0;
+
+ /* check the parameters */
+ if (LimCount > IntTime ||
+ (LimCount == 0 && IntTime != 0) ||
+ (LimCount != 0 && IntTime == 0)) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG);
+ return(1);
+ }
+
+ if (pAC->GIni.GP[Port].PXSQSize == 0) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E009, SKERR_HWI_E009MSG);
+ return(2);
+ }
+
+ /* calculate register values */
+ IntTime = (IntTime / 2) * pAC->GIni.GIHstClkFact / 100;
+ LimCount = LimCount / 8;
+
+ if (IntTime > TXA_MAX_VAL || LimCount > TXA_MAX_VAL) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG);
+ return(1);
+ }
+
+ /*
+ * - Enable 'Force Sync' to ensure the synchronous queue
+ * has the priority while configuring the new values.
+ * - Also 'disable alloc' to ensure the settings complies
+ * to the SyncMode parameter.
+ * - Disable 'Rate Control' to configure the new values.
+ * - write IntTime and LimCount
+ * - start 'Rate Control' and disable 'Force Sync'
+ * if Interval Timer or Limit Counter not zero.
+ */
+ SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL),
+ TXA_ENA_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+ SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), IntTime);
+ SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), LimCount);
+
+ SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL),
+ (SK_U8)(SyncMode & (TXA_ENA_ALLOC | TXA_DIS_ALLOC)));
+
+ if (IntTime != 0 || LimCount != 0) {
+ SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_DIS_FSYNC | TXA_START_RC);
+ }
+
+ return(0);
+} /* SkGeCfgSync */
+#endif /* SK_DIAG || SK_CFG_SYNC*/
+
+
+/******************************************************************************
+ *
+ * DoInitRamQueue() - Initialize the RAM Buffer Address of a single Queue
+ *
+ * Desccription:
+ * If the queue is used, enable and initialize it.
+ * Make sure the queue is still reset, if it is not used.
+ *
+ * Returns:
+ * nothing
+ */
+static void DoInitRamQueue(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int QuIoOffs, /* Queue IO Address Offset */
+SK_U32 QuStartAddr, /* Queue Start Address */
+SK_U32 QuEndAddr, /* Queue End Address */
+int QuType) /* Queue Type (SK_RX_SRAM_Q|SK_RX_BRAM_Q|SK_TX_RAM_Q) */
+{
+ SK_U32 RxUpThresVal;
+ SK_U32 RxLoThresVal;
+
+ if (QuStartAddr != QuEndAddr) {
+ /* calculate thresholds, assume we have a big Rx queue */
+ RxUpThresVal = (QuEndAddr + 1 - QuStartAddr - SK_RB_ULPP) / 8;
+ RxLoThresVal = (QuEndAddr + 1 - QuStartAddr - SK_RB_LLPP_B)/8;
+
+ /* build HW address format */
+ QuStartAddr = QuStartAddr / 8;
+ QuEndAddr = QuEndAddr / 8;
+
+ /* release local reset */
+ SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_RST_CLR);
+
+ /* configure addresses */
+ SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_START), QuStartAddr);
+ SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_END), QuEndAddr);
+ SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_WP), QuStartAddr);
+ SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RP), QuStartAddr);
+
+ switch (QuType) {
+ case SK_RX_SRAM_Q:
+ /* configure threshold for small Rx Queue */
+ RxLoThresVal += (SK_RB_LLPP_B - SK_RB_LLPP_S) / 8;
+
+ /* continue with SK_RX_BRAM_Q */
+ case SK_RX_BRAM_Q:
+ /* write threshold for Rx Queue */
+
+ SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_UTPP), RxUpThresVal);
+ SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_LTPP), RxLoThresVal);
+
+ /* the high priority threshold not used */
+ break;
+ case SK_TX_RAM_Q:
+ /*
+ * Do NOT use Store & Forward under normal operation due to
+ * performance optimization (GENESIS only).
+ * But if Jumbo Frames are configured (XMAC Tx FIFO is only 4 kB)
+ * or YUKON is used ((GMAC Tx FIFO is only 1 kB)
+ * we NEED Store & Forward of the RAM buffer.
+ */
+ if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK ||
+ pAC->GIni.GIYukon) {
+ /* enable Store & Forward Mode for the Tx Side */
+ SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_STFWD);
+ }
+ break;
+ }
+
+ /* set queue operational */
+ SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_OP_MD);
+ }
+ else {
+ /* ensure the queue is still disabled */
+ SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_RST_SET);
+ }
+} /* DoInitRamQueue */
+
+
+/******************************************************************************
+ *
+ * SkGeInitRamBufs() - Initialize the RAM Buffer Queues
+ *
+ * Description:
+ * Initialize all RAM Buffer Queues of the specified port
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGeInitRamBufs(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ int RxQType;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PRxQSize == SK_MIN_RXQ_SIZE) {
+ RxQType = SK_RX_SRAM_Q; /* small Rx Queue */
+ }
+ else {
+ RxQType = SK_RX_BRAM_Q; /* big Rx Queue */
+ }
+
+ DoInitRamQueue(pAC, IoC, pPrt->PRxQOff, pPrt->PRxQRamStart,
+ pPrt->PRxQRamEnd, RxQType);
+
+ DoInitRamQueue(pAC, IoC, pPrt->PXsQOff, pPrt->PXsQRamStart,
+ pPrt->PXsQRamEnd, SK_TX_RAM_Q);
+
+ DoInitRamQueue(pAC, IoC, pPrt->PXaQOff, pPrt->PXaQRamStart,
+ pPrt->PXaQRamEnd, SK_TX_RAM_Q);
+
+} /* SkGeInitRamBufs */
+
+
+/******************************************************************************
+ *
+ * SkGeInitRamIface() - Initialize the RAM Interface
+ *
+ * Description:
+ * This function initializes the Adapters RAM Interface.
+ *
+ * Note:
+ * This function is used in the diagnostics.
+ *
+ * Returns:
+ * nothing
+ */
+void SkGeInitRamIface(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+ /* release local reset */
+ SK_OUT16(IoC, B3_RI_CTRL, RI_RST_CLR);
+
+ /* configure timeout values */
+ SK_OUT8(IoC, B3_RI_WTO_R1, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_WTO_XA1, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_WTO_XS1, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_RTO_R1, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_RTO_XA1, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_RTO_XS1, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_WTO_R2, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_WTO_XA2, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_WTO_XS2, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_RTO_R2, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_RTO_XA2, SK_RI_TO_53);
+ SK_OUT8(IoC, B3_RI_RTO_XS2, SK_RI_TO_53);
+
+} /* SkGeInitRamIface */
+
+
+/******************************************************************************
+ *
+ * SkGeInitBmu() - Initialize the BMU state machines
+ *
+ * Description:
+ * Initialize all BMU state machines of the specified port
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGeInitBmu(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U32 RxWm;
+ SK_U32 TxWm;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ RxWm = SK_BMU_RX_WM;
+ TxWm = SK_BMU_TX_WM;
+
+ if (!pAC->GIni.GIPciSlot64 && !pAC->GIni.GIPciClock66) {
+ /* for better performance */
+ RxWm /= 2;
+ TxWm /= 2;
+ }
+
+ /* Rx Queue: Release all local resets and set the watermark */
+ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_CLR_RESET);
+ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_F), RxWm);
+
+ /*
+ * Tx Queue: Release all local resets if the queue is used !
+ * set watermark
+ */
+ if (pPrt->PXSQSize != 0) {
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_CLR_RESET);
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_F), TxWm);
+ }
+
+ if (pPrt->PXAQSize != 0) {
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_CLR_RESET);
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_F), TxWm);
+ }
+ /*
+ * Do NOT enable the descriptor poll timers here, because
+ * the descriptor addresses are not specified yet.
+ */
+} /* SkGeInitBmu */
+
+
+/******************************************************************************
+ *
+ * TestStopBit() - Test the stop bit of the queue
+ *
+ * Description:
+ * Stopping a queue is not as simple as it seems to be.
+ * If descriptor polling is enabled, it may happen
+ * that RX/TX stop is done and SV idle is NOT set.
+ * In this case we have to issue another stop command.
+ *
+ * Returns:
+ * The queues control status register
+ */
+static SK_U32 TestStopBit(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int QuIoOffs) /* Queue IO Address Offset */
+{
+ SK_U32 QuCsr; /* CSR contents */
+
+ SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr);
+
+ if ((QuCsr & (CSR_STOP | CSR_SV_IDLE)) == 0) {
+ /* Stop Descriptor overridden by start command */
+ SK_OUT32(IoC, Q_ADDR(QuIoOffs, Q_CSR), CSR_STOP);
+
+ SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr);
+ }
+
+ return(QuCsr);
+} /* TestStopBit */
+
+
+/******************************************************************************
+ *
+ * SkGeStopPort() - Stop the Rx/Tx activity of the port 'Port'.
+ *
+ * Description:
+ * After calling this function the descriptor rings and Rx and Tx
+ * queues of this port may be reconfigured.
+ *
+ * It is possible to stop the receive and transmit path separate or
+ * both together.
+ *
+ * Dir = SK_STOP_TX Stops the transmit path only and resets the MAC.
+ * The receive queue is still active and
+ * the pending Rx frames may be still transferred
+ * into the RxD.
+ * SK_STOP_RX Stop the receive path. The tansmit path
+ * has to be stopped once before.
+ * SK_STOP_ALL SK_STOP_TX + SK_STOP_RX
+ *
+ * RstMode = SK_SOFT_RST Resets the MAC. The PHY is still alive.
+ * SK_HARD_RST Resets the MAC and the PHY.
+ *
+ * Example:
+ * 1) A Link Down event was signaled for a port. Therefore the activity
+ * of this port should be stopped and a hardware reset should be issued
+ * to enable the workaround of XMAC Errata #2. But the received frames
+ * should not be discarded.
+ * ...
+ * SkGeStopPort(pAC, IoC, Port, SK_STOP_TX, SK_HARD_RST);
+ * (transfer all pending Rx frames)
+ * SkGeStopPort(pAC, IoC, Port, SK_STOP_RX, SK_HARD_RST);
+ * ...
+ *
+ * 2) An event was issued which request the driver to switch
+ * the 'virtual active' link to an other already active port
+ * as soon as possible. The frames in the receive queue of this
+ * port may be lost. But the PHY must not be reset during this
+ * event.
+ * ...
+ * SkGeStopPort(pAC, IoC, Port, SK_STOP_ALL, SK_SOFT_RST);
+ * ...
+ *
+ * Extended Description:
+ * If SK_STOP_TX is set,
+ * o disable the MAC's receive and transmitter to prevent
+ * from sending incomplete frames
+ * o stop the port's transmit queues before terminating the
+ * BMUs to prevent from performing incomplete PCI cycles
+ * on the PCI bus
+ * - The network Rx and Tx activity and PCI Tx transfer is
+ * disabled now.
+ * o reset the MAC depending on the RstMode
+ * o Stop Interval Timer and Limit Counter of Tx Arbiter,
+ * also disable Force Sync bit and Enable Alloc bit.
+ * o perform a local reset of the port's Tx path
+ * - reset the PCI FIFO of the async Tx queue
+ * - reset the PCI FIFO of the sync Tx queue
+ * - reset the RAM Buffer async Tx queue
+ * - reset the RAM Buffer sync Tx queue
+ * - reset the MAC Tx FIFO
+ * o switch Link and Tx LED off, stop the LED counters
+ *
+ * If SK_STOP_RX is set,
+ * o stop the port's receive queue
+ * - The path data transfer activity is fully stopped now.
+ * o perform a local reset of the port's Rx path
+ * - reset the PCI FIFO of the Rx queue
+ * - reset the RAM Buffer receive queue
+ * - reset the MAC Rx FIFO
+ * o switch Rx LED off, stop the LED counter
+ *
+ * If all ports are stopped,
+ * o reset the RAM Interface.
+ *
+ * Notes:
+ * o This function may be called during the driver states RESET_PORT and
+ * SWITCH_PORT.
+ */
+void SkGeStopPort(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* I/O context */
+int Port, /* port to stop (MAC_1 + n) */
+int Dir, /* Direction to Stop (SK_STOP_RX, SK_STOP_TX, SK_STOP_ALL) */
+int RstMode)/* Reset Mode (SK_SOFT_RST, SK_HARD_RST) */
+{
+#ifndef SK_DIAG
+ SK_EVPARA Para;
+#endif /* !SK_DIAG */
+ SK_GEPORT *pPrt;
+ SK_U32 DWord;
+ SK_U32 XsCsr;
+ SK_U32 XaCsr;
+ SK_U64 ToutStart;
+ int i;
+ int ToutCnt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if ((Dir & SK_STOP_TX) != 0) {
+ /* disable receiver and transmitter */
+ SkMacRxTxDisable(pAC, IoC, Port);
+
+ /* stop both transmit queues */
+ /*
+ * If the BMU is in the reset state CSR_STOP will terminate
+ * immediately.
+ */
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_STOP);
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_STOP);
+
+ ToutStart = SkOsGetTime(pAC);
+ ToutCnt = 0;
+ do {
+ /*
+ * Clear packet arbiter timeout to make sure
+ * this loop will terminate.
+ */
+ SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ?
+ PA_CLR_TO_TX1 : PA_CLR_TO_TX2));
+
+ /*
+ * If the transfer stucks at the MAC the STOP command will not
+ * terminate if we don't flush the XMAC's transmit FIFO !
+ */
+ SkMacFlushTxFifo(pAC, IoC, Port);
+
+ XsCsr = TestStopBit(pAC, IoC, pPrt->PXsQOff);
+ XaCsr = TestStopBit(pAC, IoC, pPrt->PXaQOff);
+
+ if (SkOsGetTime(pAC) - ToutStart > (SK_TICKS_PER_SEC / 18)) {
+ /*
+ * Timeout of 1/18 second reached.
+ * This needs to be checked at 1/18 sec only.
+ */
+ ToutCnt++;
+ if (ToutCnt > 1) {
+ /* Might be a problem when the driver event handler
+ * calls StopPort again. XXX.
+ */
+
+ /* Fatal Error, Loop aborted */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E018,
+ SKERR_HWI_E018MSG);
+#ifndef SK_DIAG
+ Para.Para64 = Port;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+#endif /* !SK_DIAG */
+ return;
+ }
+ /*
+ * Cache incoherency workaround: Assume a start command
+ * has been lost while sending the frame.
+ */
+ ToutStart = SkOsGetTime(pAC);
+
+ if ((XsCsr & CSR_STOP) != 0) {
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_START);
+ }
+ if ((XaCsr & CSR_STOP) != 0) {
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_START);
+ }
+ }
+
+ /*
+ * Because of the ASIC problem report entry from 21.08.1998 it is
+ * required to wait until CSR_STOP is reset and CSR_SV_IDLE is set.
+ */
+ } while ((XsCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE ||
+ (XaCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE);
+
+ /* Reset the MAC depending on the RstMode */
+ if (RstMode == SK_SOFT_RST) {
+ SkMacSoftRst(pAC, IoC, Port);
+ }
+ else {
+ SkMacHardRst(pAC, IoC, Port);
+ }
+
+ /* Disable Force Sync bit and Enable Alloc bit */
+ SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL),
+ TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+ /* Stop Interval Timer and Limit Counter of Tx Arbiter */
+ SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), 0L);
+ SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), 0L);
+
+ /* Perform a local reset of the port's Tx path */
+
+ /* Reset the PCI FIFO of the async Tx queue */
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_SET_RESET);
+ /* Reset the PCI FIFO of the sync Tx queue */
+ SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_SET_RESET);
+ /* Reset the RAM Buffer async Tx queue */
+ SK_OUT8(IoC, RB_ADDR(pPrt->PXaQOff, RB_CTRL), RB_RST_SET);
+ /* Reset the RAM Buffer sync Tx queue */
+ SK_OUT8(IoC, RB_ADDR(pPrt->PXsQOff, RB_CTRL), RB_RST_SET);
+
+ /* Reset Tx MAC FIFO */
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* Note: MFF_RST_SET does NOT reset the XMAC ! */
+ SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_SET);
+
+ /* switch Link and Tx LED off, stop the LED counters */
+ /* Link LED is switched off by the RLMT and the Diag itself */
+ SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_DIS);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* Reset TX MAC FIFO */
+ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_SET);
+ }
+#endif /* YUKON */
+ }
+
+ if ((Dir & SK_STOP_RX) != 0) {
+ /*
+ * The RX Stop Command will not terminate if no buffers
+ * are queued in the RxD ring. But it will always reach
+ * the Idle state. Therefore we can use this feature to
+ * stop the transfer of received packets.
+ */
+ /* stop the port's receive queue */
+ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_STOP);
+
+ i = 100;
+ do {
+ /*
+ * Clear packet arbiter timeout to make sure
+ * this loop will terminate
+ */
+ SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ?
+ PA_CLR_TO_RX1 : PA_CLR_TO_RX2));
+
+ DWord = TestStopBit(pAC, IoC, pPrt->PRxQOff);
+
+ /* timeout if i==0 (bug fix for #10748) */
+ if (--i == 0) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E024,
+ SKERR_HWI_E024MSG);
+ break;
+ }
+ /*
+ * because of the ASIC problem report entry from 21.08.98
+ * it is required to wait until CSR_STOP is reset and
+ * CSR_SV_IDLE is set.
+ */
+ } while ((DWord & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE);
+
+ /* The path data transfer activity is fully stopped now */
+
+ /* Perform a local reset of the port's Rx path */
+
+ /* Reset the PCI FIFO of the Rx queue */
+ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_SET_RESET);
+ /* Reset the RAM Buffer receive queue */
+ SK_OUT8(IoC, RB_ADDR(pPrt->PRxQOff, RB_CTRL), RB_RST_SET);
+
+ /* Reset Rx MAC FIFO */
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_SET);
+
+ /* switch Rx LED off, stop the LED counter */
+ SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_DIS);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* Reset Rx MAC FIFO */
+ SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_SET);
+ }
+#endif /* YUKON */
+ }
+} /* SkGeStopPort */
+
+
+/******************************************************************************
+ *
+ * SkGeInit0() - Level 0 Initialization
+ *
+ * Description:
+ * - Initialize the BMU address offsets
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGeInit0(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+ int i;
+ SK_GEPORT *pPrt;
+
+ for (i = 0; i < SK_MAX_MACS; i++) {
+ pPrt = &pAC->GIni.GP[i];
+
+ pPrt->PState = SK_PRT_RESET;
+ pPrt->PRxQOff = QOffTab[i].RxQOff;
+ pPrt->PXsQOff = QOffTab[i].XsQOff;
+ pPrt->PXaQOff = QOffTab[i].XaQOff;
+ pPrt->PCheckPar = SK_FALSE;
+ pPrt->PIsave = 0;
+ pPrt->PPrevShorts = 0;
+ pPrt->PLinkResCt = 0;
+ pPrt->PAutoNegTOCt = 0;
+ pPrt->PPrevRx = 0;
+ pPrt->PPrevFcs = 0;
+ pPrt->PRxLim = SK_DEF_RX_WA_LIM;
+ pPrt->PLinkMode = (SK_U8)SK_LMODE_AUTOFULL;
+ pPrt->PLinkSpeedCap = (SK_U8)SK_LSPEED_CAP_1000MBPS;
+ pPrt->PLinkSpeed = (SK_U8)SK_LSPEED_1000MBPS;
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_UNKNOWN;
+ pPrt->PLinkModeConf = (SK_U8)SK_LMODE_AUTOSENSE;
+ pPrt->PFlowCtrlMode = (SK_U8)SK_FLOW_MODE_SYM_OR_REM;
+ pPrt->PLinkCap = (SK_U8)(SK_LMODE_CAP_HALF | SK_LMODE_CAP_FULL |
+ SK_LMODE_CAP_AUTOHALF | SK_LMODE_CAP_AUTOFULL);
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
+ pPrt->PFlowCtrlCap = (SK_U8)SK_FLOW_MODE_SYM_OR_REM;
+ pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE;
+ pPrt->PMSCap = 0;
+ pPrt->PMSMode = (SK_U8)SK_MS_MODE_AUTO;
+ pPrt->PMSStatus = (SK_U8)SK_MS_STAT_UNSET;
+ pPrt->PLipaAutoNeg = (SK_U8)SK_LIPA_UNKNOWN;
+ pPrt->PAutoNegFail = SK_FALSE;
+ pPrt->PHWLinkUp = SK_FALSE;
+ pPrt->PLinkBroken = SK_TRUE; /* See WA code */
+ pPrt->PPhyPowerState = PHY_PM_OPERATIONAL_MODE;
+ pPrt->PMacColThres = TX_COL_DEF;
+ pPrt->PMacJamLen = TX_JAM_LEN_DEF;
+ pPrt->PMacJamIpgVal = TX_JAM_IPG_DEF;
+ pPrt->PMacJamIpgData = TX_IPG_JAM_DEF;
+ pPrt->PMacIpgData = IPG_DATA_DEF;
+ pPrt->PMacLimit4 = SK_FALSE;
+ }
+
+ pAC->GIni.GIPortUsage = SK_RED_LINK;
+ pAC->GIni.GILedBlinkCtrl = (SK_U16)OemConfig.Value;
+ pAC->GIni.GIValIrqMask = IS_ALL_MSK;
+
+} /* SkGeInit0*/
+
+#ifdef SK_PCI_RESET
+
+/******************************************************************************
+ *
+ * SkGePciReset() - Reset PCI interface
+ *
+ * Description:
+ * o Read PCI configuration.
+ * o Change power state to 3.
+ * o Change power state to 0.
+ * o Restore PCI configuration.
+ *
+ * Returns:
+ * 0: Success.
+ * 1: Power state could not be changed to 3.
+ */
+static int SkGePciReset(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+ int i;
+ SK_U16 PmCtlSts;
+ SK_U32 Bp1;
+ SK_U32 Bp2;
+ SK_U16 PciCmd;
+ SK_U8 Cls;
+ SK_U8 Lat;
+ SK_U8 ConfigSpace[PCI_CFG_SIZE];
+
+ /*
+ * Note: Switching to D3 state is like a software reset.
+ * Switching from D3 to D0 is a hardware reset.
+ * We have to save and restore the configuration space.
+ */
+ for (i = 0; i < PCI_CFG_SIZE; i++) {
+ SkPciReadCfgDWord(pAC, i*4, &ConfigSpace[i]);
+ }
+
+ /* We know the RAM Interface Arbiter is enabled. */
+ SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D3);
+ SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
+
+ if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D3) {
+ return(1);
+ }
+
+ /* Return to D0 state. */
+ SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D0);
+
+ /* Check for D0 state. */
+ SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
+
+ if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D0) {
+ return(1);
+ }
+
+ /* Check PCI Config Registers. */
+ SkPciReadCfgWord(pAC, PCI_COMMAND, &PciCmd);
+ SkPciReadCfgByte(pAC, PCI_CACHE_LSZ, &Cls);
+ SkPciReadCfgDWord(pAC, PCI_BASE_1ST, &Bp1);
+ SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2);
+ SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat);
+
+ if (PciCmd != 0 || Cls != (SK_U8)0 || Lat != (SK_U8)0 ||
+ (Bp1 & 0xfffffff0L) != 0 || Bp2 != 1) {
+ return(1);
+ }
+
+ /* Restore PCI Config Space. */
+ for (i = 0; i < PCI_CFG_SIZE; i++) {
+ SkPciWriteCfgDWord(pAC, i*4, ConfigSpace[i]);
+ }
+
+ return(0);
+} /* SkGePciReset */
+
+#endif /* SK_PCI_RESET */
+
+/******************************************************************************
+ *
+ * SkGeInit1() - Level 1 Initialization
+ *
+ * Description:
+ * o Do a software reset.
+ * o Clear all reset bits.
+ * o Verify that the detected hardware is present.
+ * Return an error if not.
+ * o Get the hardware configuration
+ * + Read the number of MACs/Ports.
+ * + Read the RAM size.
+ * + Read the PCI Revision Id.
+ * + Find out the adapters host clock speed
+ * + Read and check the PHY type
+ *
+ * Returns:
+ * 0: success
+ * 5: Unexpected PHY type detected
+ * 6: HW self test failed
+ */
+static int SkGeInit1(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+ SK_U8 Byte;
+ SK_U16 Word;
+ SK_U16 CtrlStat;
+ SK_U32 DWord;
+ int RetVal;
+ int i;
+
+ RetVal = 0;
+
+ /* save CLK_RUN bits (YUKON-Lite) */
+ SK_IN16(IoC, B0_CTST, &CtrlStat);
+
+#ifdef SK_PCI_RESET
+ (void)SkGePciReset(pAC, IoC);
+#endif /* SK_PCI_RESET */
+
+ /* do the SW-reset */
+ SK_OUT8(IoC, B0_CTST, CS_RST_SET);
+
+ /* release the SW-reset */
+ SK_OUT8(IoC, B0_CTST, CS_RST_CLR);
+
+ /* reset all error bits in the PCI STATUS register */
+ /*
+ * Note: PCI Cfg cycles cannot be used, because they are not
+ * available on some platforms after 'boot time'.
+ */
+ SK_IN16(IoC, PCI_C(PCI_STATUS), &Word);
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS));
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ /* release Master Reset */
+ SK_OUT8(IoC, B0_CTST, CS_MRST_CLR);
+
+#ifdef CLK_RUN
+ CtrlStat |= CS_CLK_RUN_ENA;
+#endif /* CLK_RUN */
+
+ /* restore CLK_RUN bits */
+ SK_OUT16(IoC, B0_CTST, (SK_U16)(CtrlStat &
+ (CS_CLK_RUN_HOT | CS_CLK_RUN_RST | CS_CLK_RUN_ENA)));
+
+ /* read Chip Identification Number */
+ SK_IN8(IoC, B2_CHIP_ID, &Byte);
+ pAC->GIni.GIChipId = Byte;
+
+ /* read number of MACs */
+ SK_IN8(IoC, B2_MAC_CFG, &Byte);
+ pAC->GIni.GIMacsFound = (Byte & CFG_SNG_MAC) ? 1 : 2;
+
+ /* get Chip Revision Number */
+ pAC->GIni.GIChipRev = (SK_U8)((Byte & CFG_CHIP_R_MSK) >> 4);
+
+ /* get diff. PCI parameters */
+ SK_IN16(IoC, B0_CTST, &CtrlStat);
+
+ /* read the adapters RAM size */
+ SK_IN8(IoC, B2_E_0, &Byte);
+
+ pAC->GIni.GIGenesis = SK_FALSE;
+ pAC->GIni.GIYukon = SK_FALSE;
+ pAC->GIni.GIYukonLite = SK_FALSE;
+
+#ifdef GENESIS
+ if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
+
+ pAC->GIni.GIGenesis = SK_TRUE;
+
+ if (Byte == (SK_U8)3) {
+ /* special case: 4 x 64k x 36, offset = 0x80000 */
+ pAC->GIni.GIRamSize = 1024;
+ pAC->GIni.GIRamOffs = (SK_U32)512 * 1024;
+ }
+ else {
+ pAC->GIni.GIRamSize = (int)Byte * 512;
+ pAC->GIni.GIRamOffs = 0;
+ }
+ /* all GE adapters work with 53.125 MHz host clock */
+ pAC->GIni.GIHstClkFact = SK_FACT_53;
+
+ /* set Descr. Poll Timer Init Value to 250 ms */
+ pAC->GIni.GIPollTimerVal =
+ SK_DPOLL_DEF * (SK_U32)pAC->GIni.GIHstClkFact / 100;
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIChipId != CHIP_ID_GENESIS) {
+
+ pAC->GIni.GIYukon = SK_TRUE;
+
+ pAC->GIni.GIRamSize = (Byte == (SK_U8)0) ? 128 : (int)Byte * 4;
+
+ pAC->GIni.GIRamOffs = 0;
+
+ /* WA for chip Rev. A */
+ pAC->GIni.GIWolOffs = (pAC->GIni.GIChipId == CHIP_ID_YUKON &&
+ pAC->GIni.GIChipRev == 0) ? WOL_REG_OFFS : 0;
+
+ /* get PM Capabilities of PCI config space */
+ SK_IN16(IoC, PCI_C(PCI_PM_CAP_REG), &Word);
+
+ /* check if VAUX is available */
+ if (((CtrlStat & CS_VAUX_AVAIL) != 0) &&
+ /* check also if PME from D3cold is set */
+ ((Word & PCI_PME_D3C_SUP) != 0)) {
+ /* set entry in GE init struct */
+ pAC->GIni.GIVauxAvail = SK_TRUE;
+ }
+
+ if (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE) {
+ /* this is Rev. A1 */
+ pAC->GIni.GIYukonLite = SK_TRUE;
+ }
+ else {
+ /* save Flash-Address Register */
+ SK_IN32(IoC, B2_FAR, &DWord);
+
+ /* test Flash-Address Register */
+ SK_OUT8(IoC, B2_FAR + 3, 0xff);
+ SK_IN8(IoC, B2_FAR + 3, &Byte);
+
+ if (Byte != 0) {
+ /* this is Rev. A0 */
+ pAC->GIni.GIYukonLite = SK_TRUE;
+
+ /* restore Flash-Address Register */
+ SK_OUT32(IoC, B2_FAR, DWord);
+ }
+ }
+
+ /* switch power to VCC (WA for VAUX problem) */
+ SK_OUT8(IoC, B0_POWER_CTRL, (SK_U8)(PC_VAUX_ENA | PC_VCC_ENA |
+ PC_VAUX_OFF | PC_VCC_ON));
+
+ /* read the Interrupt source */
+ SK_IN32(IoC, B0_ISRC, &DWord);
+
+ if ((DWord & IS_HW_ERR) != 0) {
+ /* read the HW Error Interrupt source */
+ SK_IN32(IoC, B0_HWE_ISRC, &DWord);
+
+ if ((DWord & IS_IRQ_SENSOR) != 0) {
+ /* disable HW Error IRQ */
+ pAC->GIni.GIValIrqMask &= ~IS_HW_ERR;
+ }
+ }
+
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+ /* set GMAC Link Control reset */
+ SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+
+ /* clear GMAC Link Control reset */
+ SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+ }
+ /* all YU chips work with 78.125 MHz host clock */
+ pAC->GIni.GIHstClkFact = SK_FACT_78;
+
+ pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX; /* 215 ms */
+ }
+#endif /* YUKON */
+
+ /* check if 64-bit PCI Slot is present */
+ pAC->GIni.GIPciSlot64 = (SK_BOOL)((CtrlStat & CS_BUS_SLOT_SZ) != 0);
+
+ /* check if 66 MHz PCI Clock is active */
+ pAC->GIni.GIPciClock66 = (SK_BOOL)((CtrlStat & CS_BUS_CLOCK) != 0);
+
+ /* read PCI HW Revision Id. */
+ SK_IN8(IoC, PCI_C(PCI_REV_ID), &Byte);
+ pAC->GIni.GIPciHwRev = Byte;
+
+ /* read the PMD type */
+ SK_IN8(IoC, B2_PMD_TYP, &Byte);
+ pAC->GIni.GICopperType = (SK_U8)(Byte == 'T');
+
+ /* read the PHY type */
+ SK_IN8(IoC, B2_E_1, &Byte);
+
+ Byte &= 0x0f; /* the PHY type is stored in the lower nibble */
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ switch (Byte) {
+ case SK_PHY_XMAC:
+ pAC->GIni.GP[i].PhyAddr = PHY_ADDR_XMAC;
+ break;
+ case SK_PHY_BCOM:
+ pAC->GIni.GP[i].PhyAddr = PHY_ADDR_BCOM;
+ pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO |
+ SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE);
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ pAC->GIni.GP[i].PhyAddr = PHY_ADDR_LONE;
+ break;
+ case SK_PHY_NAT:
+ pAC->GIni.GP[i].PhyAddr = PHY_ADDR_NAT;
+ break;
+#endif /* OTHER_PHY */
+ default:
+ /* ERROR: unexpected PHY type detected */
+ RetVal = 5;
+ break;
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ if (Byte < (SK_U8)SK_PHY_MARV_COPPER) {
+ /* if this field is not initialized */
+ Byte = (SK_U8)SK_PHY_MARV_COPPER;
+
+ pAC->GIni.GICopperType = SK_TRUE;
+ }
+
+ pAC->GIni.GP[i].PhyAddr = PHY_ADDR_MARV;
+
+ if (pAC->GIni.GICopperType) {
+
+ pAC->GIni.GP[i].PLinkSpeedCap = (SK_U8)(SK_LSPEED_CAP_AUTO |
+ SK_LSPEED_CAP_10MBPS | SK_LSPEED_CAP_100MBPS |
+ SK_LSPEED_CAP_1000MBPS);
+
+ pAC->GIni.GP[i].PLinkSpeed = (SK_U8)SK_LSPEED_AUTO;
+
+ pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO |
+ SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE);
+ }
+ else {
+ Byte = (SK_U8)SK_PHY_MARV_FIBER;
+ }
+ }
+#endif /* YUKON */
+
+ pAC->GIni.GP[i].PhyType = (int)Byte;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
+ ("PHY type: %d PHY addr: %04x\n", Byte,
+ pAC->GIni.GP[i].PhyAddr));
+ }
+
+ /* get MAC Type & set function pointers dependent on */
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ pAC->GIni.GIMacType = SK_MAC_XMAC;
+
+ pAC->GIni.GIFunc.pFnMacUpdateStats = SkXmUpdateStats;
+ pAC->GIni.GIFunc.pFnMacStatistic = SkXmMacStatistic;
+ pAC->GIni.GIFunc.pFnMacResetCounter = SkXmResetCounter;
+ pAC->GIni.GIFunc.pFnMacOverflow = SkXmOverflowStatus;
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ pAC->GIni.GIMacType = SK_MAC_GMAC;
+
+ pAC->GIni.GIFunc.pFnMacUpdateStats = SkGmUpdateStats;
+ pAC->GIni.GIFunc.pFnMacStatistic = SkGmMacStatistic;
+ pAC->GIni.GIFunc.pFnMacResetCounter = SkGmResetCounter;
+ pAC->GIni.GIFunc.pFnMacOverflow = SkGmOverflowStatus;
+
+#ifdef SPECIAL_HANDLING
+ if (pAC->GIni.GIChipId == CHIP_ID_YUKON) {
+ /* check HW self test result */
+ SK_IN8(IoC, B2_E_3, &Byte);
+ if (Byte & B2_E3_RES_MASK) {
+ RetVal = 6;
+ }
+ }
+#endif
+ }
+#endif /* YUKON */
+
+ return(RetVal);
+} /* SkGeInit1 */
+
+
+/******************************************************************************
+ *
+ * SkGeInit2() - Level 2 Initialization
+ *
+ * Description:
+ * - start the Blink Source Counter
+ * - start the Descriptor Poll Timer
+ * - configure the MAC-Arbiter
+ * - configure the Packet-Arbiter
+ * - enable the Tx Arbiters
+ * - enable the RAM Interface Arbiter
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGeInit2(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+#ifdef GENESIS
+ SK_U32 DWord;
+#endif /* GENESIS */
+ int i;
+
+ /* start the Descriptor Poll Timer */
+ if (pAC->GIni.GIPollTimerVal != 0) {
+ if (pAC->GIni.GIPollTimerVal > SK_DPOLL_MAX) {
+ pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX;
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E017, SKERR_HWI_E017MSG);
+ }
+ SK_OUT32(IoC, B28_DPT_INI, pAC->GIni.GIPollTimerVal);
+ SK_OUT8(IoC, B28_DPT_CTRL, DPT_START);
+ }
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* start the Blink Source Counter */
+ DWord = SK_BLK_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100;
+
+ SK_OUT32(IoC, B2_BSC_INI, DWord);
+ SK_OUT8(IoC, B2_BSC_CTRL, BSC_START);
+
+ /*
+ * Configure the MAC Arbiter and the Packet Arbiter.
+ * They will be started once and never be stopped.
+ */
+ SkGeInitMacArb(pAC, IoC);
+
+ SkGeInitPktArb(pAC, IoC);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* start Time Stamp Timer */
+ SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_START);
+ }
+#endif /* YUKON */
+
+ /* enable the Tx Arbiters */
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+ SK_OUT8(IoC, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB);
+ }
+
+ /* enable the RAM Interface Arbiter */
+ SkGeInitRamIface(pAC, IoC);
+
+} /* SkGeInit2 */
+
+/******************************************************************************
+ *
+ * SkGeInit() - Initialize the GE Adapter with the specified level.
+ *
+ * Description:
+ * Level 0: Initialize the Module structures.
+ * Level 1: Generic Hardware Initialization. The IOP/MemBase pointer has
+ * to be set before calling this level.
+ *
+ * o Do a software reset.
+ * o Clear all reset bits.
+ * o Verify that the detected hardware is present.
+ * Return an error if not.
+ * o Get the hardware configuration
+ * + Set GIMacsFound with the number of MACs.
+ * + Store the RAM size in GIRamSize.
+ * + Save the PCI Revision ID in GIPciHwRev.
+ * o return an error
+ * if Number of MACs > SK_MAX_MACS
+ *
+ * After returning from Level 0 the adapter
+ * may be accessed with IO operations.
+ *
+ * Level 2: start the Blink Source Counter
+ *
+ * Returns:
+ * 0: success
+ * 1: Number of MACs exceeds SK_MAX_MACS (after level 1)
+ * 2: Adapter not present or not accessible
+ * 3: Illegal initialization level
+ * 4: Initialization Level 1 Call missing
+ * 5: Unexpected PHY type detected
+ * 6: HW self test failed
+ */
+int SkGeInit(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Level) /* initialization level */
+{
+ int RetVal; /* return value */
+ SK_U32 DWord;
+
+ RetVal = 0;
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
+ ("SkGeInit(Level %d)\n", Level));
+
+ switch (Level) {
+ case SK_INIT_DATA:
+ /* Initialization Level 0 */
+ SkGeInit0(pAC, IoC);
+ pAC->GIni.GILevel = SK_INIT_DATA;
+ break;
+
+ case SK_INIT_IO:
+ /* Initialization Level 1 */
+ RetVal = SkGeInit1(pAC, IoC);
+ if (RetVal != 0) {
+ break;
+ }
+
+ /* check if the adapter seems to be accessible */
+ SK_OUT32(IoC, B2_IRQM_INI, SK_TEST_VAL);
+ SK_IN32(IoC, B2_IRQM_INI, &DWord);
+ SK_OUT32(IoC, B2_IRQM_INI, 0L);
+
+ if (DWord != SK_TEST_VAL) {
+ RetVal = 2;
+ break;
+ }
+
+ /* check if the number of GIMacsFound matches SK_MAX_MACS */
+ if (pAC->GIni.GIMacsFound > SK_MAX_MACS) {
+ RetVal = 1;
+ break;
+ }
+
+ /* Level 1 successfully passed */
+ pAC->GIni.GILevel = SK_INIT_IO;
+ break;
+
+ case SK_INIT_RUN:
+ /* Initialization Level 2 */
+ if (pAC->GIni.GILevel != SK_INIT_IO) {
+#ifndef SK_DIAG
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E002, SKERR_HWI_E002MSG);
+#endif /* !SK_DIAG */
+ RetVal = 4;
+ break;
+ }
+ SkGeInit2(pAC, IoC);
+
+ /* Level 2 successfully passed */
+ pAC->GIni.GILevel = SK_INIT_RUN;
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E003, SKERR_HWI_E003MSG);
+ RetVal = 3;
+ break;
+ }
+
+ return(RetVal);
+} /* SkGeInit */
+
+
+/******************************************************************************
+ *
+ * SkGeDeInit() - Deinitialize the adapter
+ *
+ * Description:
+ * All ports of the adapter will be stopped if not already done.
+ * Do a software reset and switch off all LEDs.
+ *
+ * Returns:
+ * nothing
+ */
+void SkGeDeInit(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC) /* IO context */
+{
+ int i;
+ SK_U16 Word;
+
+#ifdef SK_PHY_LP_MODE
+ SK_U8 Byte;
+ SK_U16 PmCtlSts;
+#endif /* SK_PHY_LP_MODE */
+
+#if (!defined(SK_SLIM) && !defined(VCPU))
+ /* ensure I2C is ready */
+ SkI2cWaitIrq(pAC, IoC);
+#endif
+
+ /* stop all current transfer activity */
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+ if (pAC->GIni.GP[i].PState != SK_PRT_STOP &&
+ pAC->GIni.GP[i].PState != SK_PRT_RESET) {
+
+ SkGeStopPort(pAC, IoC, i, SK_STOP_ALL, SK_HARD_RST);
+ }
+ }
+
+#ifdef SK_PHY_LP_MODE
+ /*
+ * for power saving purposes within mobile environments
+ * we set the PHY to coma mode and switch to D3 power state.
+ */
+ if (pAC->GIni.GIYukonLite &&
+ pAC->GIni.GIChipRev == CHIP_REV_YU_LITE_A3) {
+
+ /* for all ports switch PHY to coma mode */
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+
+ SkGmEnterLowPowerMode(pAC, IoC, i, PHY_PM_DEEP_SLEEP);
+ }
+
+ if (pAC->GIni.GIVauxAvail) {
+ /* switch power to VAUX */
+ Byte = PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF;
+
+ SK_OUT8(IoC, B0_POWER_CTRL, Byte);
+ }
+
+ /* switch to D3 state */
+ SK_IN16(IoC, PCI_C(PCI_PM_CTL_STS), &PmCtlSts);
+
+ PmCtlSts |= PCI_PM_STATE_D3;
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+
+ SK_OUT16(IoC, PCI_C(PCI_PM_CTL_STS), PmCtlSts);
+ }
+#endif /* SK_PHY_LP_MODE */
+
+ /* Reset all bits in the PCI STATUS register */
+ /*
+ * Note: PCI Cfg cycles cannot be used, because they are not
+ * available on some platforms after 'boot time'.
+ */
+ SK_IN16(IoC, PCI_C(PCI_STATUS), &Word);
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS));
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ /* do the reset, all LEDs are switched off now */
+ SK_OUT8(IoC, B0_CTST, CS_RST_SET);
+
+ pAC->GIni.GILevel = SK_INIT_DATA;
+} /* SkGeDeInit */
+
+
+/******************************************************************************
+ *
+ * SkGeInitPort() Initialize the specified port.
+ *
+ * Description:
+ * PRxQSize, PXSQSize, and PXAQSize has to be
+ * configured for the specified port before calling this function.
+ * The descriptor rings has to be initialized too.
+ *
+ * o (Re)configure queues of the specified port.
+ * o configure the MAC of the specified port.
+ * o put ASIC and MAC(s) in operational mode.
+ * o initialize Rx/Tx and Sync LED
+ * o initialize RAM Buffers and MAC FIFOs
+ *
+ * The port is ready to connect when returning.
+ *
+ * Note:
+ * The MAC's Rx and Tx state machine is still disabled when returning.
+ *
+ * Returns:
+ * 0: success
+ * 1: Queue size initialization error. The configured values
+ * for PRxQSize, PXSQSize, or PXAQSize are invalid for one
+ * or more queues. The specified port was NOT initialized.
+ * An error log entry was generated.
+ * 2: The port has to be stopped before it can be initialized again.
+ */
+int SkGeInitPort(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port to configure */
+{
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (SkGeCheckQSize(pAC, Port) != 0) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E004, SKERR_HWI_E004MSG);
+ return(1);
+ }
+
+ if (pPrt->PState == SK_PRT_INIT || pPrt->PState == SK_PRT_RUN) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E005, SKERR_HWI_E005MSG);
+ return(2);
+ }
+
+ /* configuration ok, initialize the Port now */
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* initialize Rx, Tx and Link LED */
+ /*
+ * If 1000BT Phy needs LED initialization than swap
+ * LED and XMAC initialization order
+ */
+ SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA);
+ SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_ENA);
+ /* The Link LED is initialized by RLMT or Diagnostics itself */
+
+ SkXmInitMac(pAC, IoC, Port);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ SkGmInitMac(pAC, IoC, Port);
+ }
+#endif /* YUKON */
+
+ /* do NOT initialize the Link Sync Counter */
+
+ SkGeInitMacFifo(pAC, IoC, Port);
+
+ SkGeInitRamBufs(pAC, IoC, Port);
+
+ if (pPrt->PXSQSize != 0) {
+ /* enable Force Sync bit if synchronous queue available */
+ SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_ENA_FSYNC);
+ }
+
+ SkGeInitBmu(pAC, IoC, Port);
+
+ /* mark port as initialized */
+ pPrt->PState = SK_PRT_INIT;
+
+ return(0);
+} /* SkGeInitPort */
diff --git a/drivers/net/sk98lin/skgemib.c b/drivers/net/sk98lin/skgemib.c
new file mode 100644
index 000000000000..2991bc85cf2c
--- /dev/null
+++ b/drivers/net/sk98lin/skgemib.c
@@ -0,0 +1,1082 @@
+/*****************************************************************************
+ *
+ * Name: skgemib.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.11 $
+ * Date: $Date: 2003/09/15 13:38:12 $
+ * Purpose: Private Network Management Interface Management Database
+ *
+ ****************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * PRIVATE OID handler function prototypes
+ */
+PNMI_STATIC int Addr(SK_AC *pAC, SK_IOC IoC, int action,
+ SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int CsumStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int General(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int Mac8023Stat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int MacPrivateConf(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int MacPrivateStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int Monitor(SK_AC *pAC, SK_IOC IoC, int action,
+ SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int OidStruct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int Perform(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int* pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int Rlmt(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int RlmtStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int SensorStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int Vpd(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+
+#ifdef SK_POWER_MGMT
+PNMI_STATIC int PowerManagement(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+#endif /* SK_POWER_MGMT */
+
+#ifdef SK_DIAG_SUPPORT
+PNMI_STATIC int DiagActions(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance,
+ unsigned int TableIndex, SK_U32 NetIndex);
+#endif /* SK_DIAG_SUPPORT */
+
+
+/* defines *******************************************************************/
+#define ID_TABLE_SIZE (sizeof(IdTable)/sizeof(IdTable[0]))
+
+
+/* global variables **********************************************************/
+
+/*
+ * Table to correlate OID with handler function and index to
+ * hardware register stored in StatAddress if applicable.
+ */
+PNMI_STATIC const SK_PNMI_TAB_ENTRY IdTable[] = {
+ {OID_GEN_XMIT_OK,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX},
+ {OID_GEN_RCV_OK,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX},
+ {OID_GEN_XMIT_ERROR,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, General, 0},
+ {OID_GEN_RCV_ERROR,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, General, 0},
+ {OID_GEN_RCV_NO_BUFFER,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, General, 0},
+ {OID_GEN_DIRECTED_FRAMES_XMIT,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNICAST},
+ {OID_GEN_MULTICAST_FRAMES_XMIT,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTICAST},
+ {OID_GEN_BROADCAST_FRAMES_XMIT,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_BROADCAST},
+ {OID_GEN_DIRECTED_FRAMES_RCV,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_UNICAST},
+ {OID_GEN_MULTICAST_FRAMES_RCV,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_MULTICAST},
+ {OID_GEN_BROADCAST_FRAMES_RCV,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_BROADCAST},
+ {OID_GEN_RCV_CRC_ERROR,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FCS},
+ {OID_GEN_TRANSMIT_QUEUE_LENGTH,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, General, 0},
+ {OID_802_3_PERMANENT_ADDRESS,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, 0},
+ {OID_802_3_CURRENT_ADDRESS,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, 0},
+ {OID_802_3_RCV_ERROR_ALIGNMENT,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FRAMING},
+ {OID_802_3_XMIT_ONE_COLLISION,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_SINGLE_COL},
+ {OID_802_3_XMIT_MORE_COLLISIONS,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTI_COL},
+ {OID_802_3_XMIT_DEFERRED,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_DEFFERAL},
+ {OID_802_3_XMIT_MAX_COLLISIONS,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_EXCESS_COL},
+ {OID_802_3_RCV_OVERRUN,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_OVERFLOW},
+ {OID_802_3_XMIT_UNDERRUN,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNDERRUN},
+ {OID_802_3_XMIT_TIMES_CRS_LOST,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_CARRIER},
+ {OID_802_3_XMIT_LATE_COLLISIONS,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_LATE_COL},
+#ifdef SK_POWER_MGMT
+ {OID_PNP_CAPABILITIES,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, PowerManagement, 0},
+ {OID_PNP_SET_POWER,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_WO, PowerManagement, 0},
+ {OID_PNP_QUERY_POWER,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, PowerManagement, 0},
+ {OID_PNP_ADD_WAKE_UP_PATTERN,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_WO, PowerManagement, 0},
+ {OID_PNP_REMOVE_WAKE_UP_PATTERN,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_WO, PowerManagement, 0},
+ {OID_PNP_ENABLE_WAKE_UP,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RW, PowerManagement, 0},
+#endif /* SK_POWER_MGMT */
+#ifdef SK_DIAG_SUPPORT
+ {OID_SKGE_DIAG_MODE,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RW, DiagActions, 0},
+#endif /* SK_DIAG_SUPPORT */
+ {OID_SKGE_MDB_VERSION,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(MgmtDBVersion),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_SUPPORTED_LIST,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_ALL_DATA,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RW, OidStruct, 0},
+ {OID_SKGE_VPD_FREE_BYTES,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(VpdFreeBytes),
+ SK_PNMI_RO, Vpd, 0},
+ {OID_SKGE_VPD_ENTRIES_LIST,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(VpdEntriesList),
+ SK_PNMI_RO, Vpd, 0},
+ {OID_SKGE_VPD_ENTRIES_NUMBER,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(VpdEntriesNumber),
+ SK_PNMI_RO, Vpd, 0},
+ {OID_SKGE_VPD_KEY,
+ SK_PNMI_VPD_ENTRIES,
+ sizeof(SK_PNMI_VPD),
+ SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdKey),
+ SK_PNMI_RO, Vpd, 0},
+ {OID_SKGE_VPD_VALUE,
+ SK_PNMI_VPD_ENTRIES,
+ sizeof(SK_PNMI_VPD),
+ SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdValue),
+ SK_PNMI_RO, Vpd, 0},
+ {OID_SKGE_VPD_ACCESS,
+ SK_PNMI_VPD_ENTRIES,
+ sizeof(SK_PNMI_VPD),
+ SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAccess),
+ SK_PNMI_RO, Vpd, 0},
+ {OID_SKGE_VPD_ACTION,
+ SK_PNMI_VPD_ENTRIES,
+ sizeof(SK_PNMI_VPD),
+ SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAction),
+ SK_PNMI_RW, Vpd, 0},
+ {OID_SKGE_PORT_NUMBER,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(PortNumber),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_DEVICE_TYPE,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(DeviceType),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_DRIVER_DESCR,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(DriverDescr),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_DRIVER_VERSION,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(DriverVersion),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_DRIVER_RELDATE,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(DriverReleaseDate),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_DRIVER_FILENAME,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(DriverFileName),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_HW_DESCR,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(HwDescr),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_HW_VERSION,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(HwVersion),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_CHIPSET,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(Chipset),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_CHIPID,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(ChipId),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RAMSIZE,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RamSize),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_VAUXAVAIL,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(VauxAvail),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_ACTION,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(Action),
+ SK_PNMI_RW, Perform, 0},
+ {OID_SKGE_RESULT,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TestResult),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_BUS_TYPE,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(BusType),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_BUS_SPEED,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(BusSpeed),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_BUS_WIDTH,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(BusWidth),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TX_SW_QUEUE_LEN,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TxSwQueueLen),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TX_SW_QUEUE_MAX,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TxSwQueueMax),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TX_RETRY,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TxRetryCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RX_INTR_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RxIntrCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TX_INTR_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TxIntrCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RX_NO_BUF_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RxNoBufCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TX_NO_BUF_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TxNoBufCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TX_USED_DESCR_NO,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TxUsedDescrNo),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RX_DELIVERED_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RxDeliveredCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RX_OCTETS_DELIV_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RxOctetsDeliveredCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RX_HW_ERROR_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RxHwErrorsCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TX_HW_ERROR_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TxHwErrorsCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_IN_ERRORS_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(InErrorsCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_OUT_ERROR_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(OutErrorsCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_ERR_RECOVERY_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(ErrRecoveryCts),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_SYSUPTIME,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(SysUpTime),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_SENSOR_NUMBER,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(SensorNumber),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_SENSOR_INDEX,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorIndex),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_DESCR,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorDescr),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_TYPE,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorType),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_VALUE,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorValue),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_WAR_THRES_LOW,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdLow),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_WAR_THRES_UPP,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdHigh),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_ERR_THRES_LOW,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdLow),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_ERR_THRES_UPP,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdHigh),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_STATUS,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorStatus),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_WAR_CTS,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningCts),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_ERR_CTS,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorCts),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_WAR_TIME,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningTimestamp),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_SENSOR_ERR_TIME,
+ SK_PNMI_SENSOR_ENTRIES,
+ sizeof(SK_PNMI_SENSOR),
+ SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorTimestamp),
+ SK_PNMI_RO, SensorStat, 0},
+ {OID_SKGE_CHKSM_NUMBER,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(ChecksumNumber),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_CHKSM_RX_OK_CTS,
+ SKCS_NUM_PROTOCOLS,
+ sizeof(SK_PNMI_CHECKSUM),
+ SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxOkCts),
+ SK_PNMI_RO, CsumStat, 0},
+ {OID_SKGE_CHKSM_RX_UNABLE_CTS,
+ SKCS_NUM_PROTOCOLS,
+ sizeof(SK_PNMI_CHECKSUM),
+ SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxUnableCts),
+ SK_PNMI_RO, CsumStat, 0},
+ {OID_SKGE_CHKSM_RX_ERR_CTS,
+ SKCS_NUM_PROTOCOLS,
+ sizeof(SK_PNMI_CHECKSUM),
+ SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxErrCts),
+ SK_PNMI_RO, CsumStat, 0},
+ {OID_SKGE_CHKSM_TX_OK_CTS,
+ SKCS_NUM_PROTOCOLS,
+ sizeof(SK_PNMI_CHECKSUM),
+ SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxOkCts),
+ SK_PNMI_RO, CsumStat, 0},
+ {OID_SKGE_CHKSM_TX_UNABLE_CTS,
+ SKCS_NUM_PROTOCOLS,
+ sizeof(SK_PNMI_CHECKSUM),
+ SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxUnableCts),
+ SK_PNMI_RO, CsumStat, 0},
+ {OID_SKGE_STAT_TX,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX},
+ {OID_SKGE_STAT_TX_OCTETS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOctetsOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_OCTET},
+ {OID_SKGE_STAT_TX_BROADCAST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBroadcastOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BROADCAST},
+ {OID_SKGE_STAT_TX_MULTICAST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMulticastOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTICAST},
+ {OID_SKGE_STAT_TX_UNICAST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUnicastOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNICAST},
+ {OID_SKGE_STAT_TX_LONGFRAMES,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLongFramesCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LONGFRAMES},
+ {OID_SKGE_STAT_TX_BURST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBurstCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BURST},
+ {OID_SKGE_STAT_TX_PFLOWC,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxPauseMacCtrlCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_PMACC},
+ {OID_SKGE_STAT_TX_FLOWC,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMacCtrlCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MACC},
+ {OID_SKGE_STAT_TX_SINGLE_COL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSingleCollisionCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SINGLE_COL},
+ {OID_SKGE_STAT_TX_MULTI_COL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMultipleCollisionCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTI_COL},
+ {OID_SKGE_STAT_TX_EXCESS_COL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveCollisionCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_COL},
+ {OID_SKGE_STAT_TX_LATE_COL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLateCollisionCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LATE_COL},
+ {OID_SKGE_STAT_TX_DEFFERAL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxDeferralCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_DEFFERAL},
+ {OID_SKGE_STAT_TX_EXCESS_DEF,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveDeferralCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_DEF},
+ {OID_SKGE_STAT_TX_UNDERRUN,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxFifoUnderrunCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNDERRUN},
+ {OID_SKGE_STAT_TX_CARRIER,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxCarrierCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_CARRIER},
+/* {OID_SKGE_STAT_TX_UTIL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUtilization),
+ SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */
+ {OID_SKGE_STAT_TX_64,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx64Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_64},
+ {OID_SKGE_STAT_TX_127,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx127Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_127},
+ {OID_SKGE_STAT_TX_255,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx255Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_255},
+ {OID_SKGE_STAT_TX_511,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx511Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_511},
+ {OID_SKGE_STAT_TX_1023,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx1023Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_1023},
+ {OID_SKGE_STAT_TX_MAX,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMaxCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MAX},
+ {OID_SKGE_STAT_TX_SYNC,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC},
+ {OID_SKGE_STAT_TX_SYNC_OCTETS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncOctetsCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC_OCTET},
+ {OID_SKGE_STAT_RX,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX},
+ {OID_SKGE_STAT_RX_OCTETS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOctetsOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OCTET},
+ {OID_SKGE_STAT_RX_BROADCAST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBroadcastOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BROADCAST},
+ {OID_SKGE_STAT_RX_MULTICAST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMulticastOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MULTICAST},
+ {OID_SKGE_STAT_RX_UNICAST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUnicastOkCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_UNICAST},
+ {OID_SKGE_STAT_RX_LONGFRAMES,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxLongFramesCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_LONGFRAMES},
+ {OID_SKGE_STAT_RX_PFLOWC,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC},
+ {OID_SKGE_STAT_RX_FLOWC,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC},
+ {OID_SKGE_STAT_RX_PFLOWC_ERR,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlErrorCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC_ERR},
+ {OID_SKGE_STAT_RX_FLOWC_UNKWN,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlUnknownCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC_UNKWN},
+ {OID_SKGE_STAT_RX_BURST,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBurstCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BURST},
+ {OID_SKGE_STAT_RX_MISSED,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMissedCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MISSED},
+ {OID_SKGE_STAT_RX_FRAMING,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFramingCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FRAMING},
+ {OID_SKGE_STAT_RX_OVERFLOW,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFifoOverflowCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OVERFLOW},
+ {OID_SKGE_STAT_RX_JABBER,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxJabberCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_JABBER},
+ {OID_SKGE_STAT_RX_CARRIER,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCarrierCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CARRIER},
+ {OID_SKGE_STAT_RX_IR_LENGTH,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxIRLengthCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_IRLENGTH},
+ {OID_SKGE_STAT_RX_SYMBOL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxSymbolCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SYMBOL},
+ {OID_SKGE_STAT_RX_SHORTS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxShortsCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SHORTS},
+ {OID_SKGE_STAT_RX_RUNT,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxRuntCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_RUNT},
+ {OID_SKGE_STAT_RX_CEXT,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCextCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CEXT},
+ {OID_SKGE_STAT_RX_TOO_LONG,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxTooLongCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_TOO_LONG},
+ {OID_SKGE_STAT_RX_FCS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFcsCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FCS},
+/* {OID_SKGE_STAT_RX_UTIL,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUtilization),
+ SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */
+ {OID_SKGE_STAT_RX_64,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx64Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_64},
+ {OID_SKGE_STAT_RX_127,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx127Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_127},
+ {OID_SKGE_STAT_RX_255,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx255Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_255},
+ {OID_SKGE_STAT_RX_511,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx511Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_511},
+ {OID_SKGE_STAT_RX_1023,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx1023Cts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_1023},
+ {OID_SKGE_STAT_RX_MAX,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_STAT),
+ SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMaxCts),
+ SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MAX},
+ {OID_SKGE_PHYS_CUR_ADDR,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacCurrentAddr),
+ SK_PNMI_RW, Addr, 0},
+ {OID_SKGE_PHYS_FAC_ADDR,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacFactoryAddr),
+ SK_PNMI_RO, Addr, 0},
+ {OID_SKGE_PMD,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPMD),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_CONNECTOR,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfConnector),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_PHY_TYPE,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType),
+ SK_PNMI_RO, MacPrivateConf, 0},
+#ifdef SK_PHY_LP_MODE
+ {OID_SKGE_PHY_LP_MODE,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyMode),
+ SK_PNMI_RW, MacPrivateConf, 0},
+#endif
+ {OID_SKGE_LINK_CAP,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkCapability),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_LINK_MODE,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkMode),
+ SK_PNMI_RW, MacPrivateConf, 0},
+ {OID_SKGE_LINK_MODE_STATUS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkModeStatus),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_LINK_STATUS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkStatus),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_FLOWCTRL_CAP,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlCapability),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_FLOWCTRL_MODE,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlMode),
+ SK_PNMI_RW, MacPrivateConf, 0},
+ {OID_SKGE_FLOWCTRL_STATUS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlStatus),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_PHY_OPERATION_CAP,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationCapability),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_PHY_OPERATION_MODE,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationMode),
+ SK_PNMI_RW, MacPrivateConf, 0},
+ {OID_SKGE_PHY_OPERATION_STATUS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationStatus),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_SPEED_CAP,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedCapability),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_SPEED_MODE,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedMode),
+ SK_PNMI_RW, MacPrivateConf, 0},
+ {OID_SKGE_SPEED_STATUS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_CONF),
+ SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedStatus),
+ SK_PNMI_RO, MacPrivateConf, 0},
+ {OID_SKGE_TRAP,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(Trap),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_TRAP_NUMBER,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(TrapNumber),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RLMT_MODE,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtMode),
+ SK_PNMI_RW, Rlmt, 0},
+ {OID_SKGE_RLMT_PORT_NUMBER,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtPortNumber),
+ SK_PNMI_RO, Rlmt, 0},
+ {OID_SKGE_RLMT_PORT_ACTIVE,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtPortActive),
+ SK_PNMI_RO, Rlmt, 0},
+ {OID_SKGE_RLMT_PORT_PREFERRED,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtPortPreferred),
+ SK_PNMI_RW, Rlmt, 0},
+ {OID_SKGE_RLMT_CHANGE_CTS,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtChangeCts),
+ SK_PNMI_RO, Rlmt, 0},
+ {OID_SKGE_RLMT_CHANGE_TIME,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtChangeTime),
+ SK_PNMI_RO, Rlmt, 0},
+ {OID_SKGE_RLMT_CHANGE_ESTIM,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtChangeEstimate),
+ SK_PNMI_RO, Rlmt, 0},
+ {OID_SKGE_RLMT_CHANGE_THRES,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtChangeThreshold),
+ SK_PNMI_RW, Rlmt, 0},
+ {OID_SKGE_RLMT_PORT_INDEX,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_RLMT),
+ SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtIndex),
+ SK_PNMI_RO, RlmtStat, 0},
+ {OID_SKGE_RLMT_STATUS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_RLMT),
+ SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtStatus),
+ SK_PNMI_RO, RlmtStat, 0},
+ {OID_SKGE_RLMT_TX_HELLO_CTS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_RLMT),
+ SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxHelloCts),
+ SK_PNMI_RO, RlmtStat, 0},
+ {OID_SKGE_RLMT_RX_HELLO_CTS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_RLMT),
+ SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxHelloCts),
+ SK_PNMI_RO, RlmtStat, 0},
+ {OID_SKGE_RLMT_TX_SP_REQ_CTS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_RLMT),
+ SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxSpHelloReqCts),
+ SK_PNMI_RO, RlmtStat, 0},
+ {OID_SKGE_RLMT_RX_SP_CTS,
+ SK_PNMI_MAC_ENTRIES,
+ sizeof(SK_PNMI_RLMT),
+ SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxSpHelloCts),
+ SK_PNMI_RO, RlmtStat, 0},
+ {OID_SKGE_RLMT_MONITOR_NUMBER,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(RlmtMonitorNumber),
+ SK_PNMI_RO, General, 0},
+ {OID_SKGE_RLMT_MONITOR_INDEX,
+ SK_PNMI_MONITOR_ENTRIES,
+ sizeof(SK_PNMI_RLMT_MONITOR),
+ SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorIndex),
+ SK_PNMI_RO, Monitor, 0},
+ {OID_SKGE_RLMT_MONITOR_ADDR,
+ SK_PNMI_MONITOR_ENTRIES,
+ sizeof(SK_PNMI_RLMT_MONITOR),
+ SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAddr),
+ SK_PNMI_RO, Monitor, 0},
+ {OID_SKGE_RLMT_MONITOR_ERRS,
+ SK_PNMI_MONITOR_ENTRIES,
+ sizeof(SK_PNMI_RLMT_MONITOR),
+ SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorErrorCts),
+ SK_PNMI_RO, Monitor, 0},
+ {OID_SKGE_RLMT_MONITOR_TIMESTAMP,
+ SK_PNMI_MONITOR_ENTRIES,
+ sizeof(SK_PNMI_RLMT_MONITOR),
+ SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorTimestamp),
+ SK_PNMI_RO, Monitor, 0},
+ {OID_SKGE_RLMT_MONITOR_ADMIN,
+ SK_PNMI_MONITOR_ENTRIES,
+ sizeof(SK_PNMI_RLMT_MONITOR),
+ SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAdmin),
+ SK_PNMI_RW, Monitor, 0},
+ {OID_SKGE_MTU,
+ 1,
+ 0,
+ SK_PNMI_MAI_OFF(MtuSize),
+ SK_PNMI_RW, MacPrivateConf, 0},
+ {OID_SKGE_VCT_GET,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Vct, 0},
+ {OID_SKGE_VCT_SET,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_WO, Vct, 0},
+ {OID_SKGE_VCT_STATUS,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, Vct, 0},
+ {OID_SKGE_BOARDLEVEL,
+ 0,
+ 0,
+ 0,
+ SK_PNMI_RO, General, 0},
+};
+
diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c
new file mode 100644
index 000000000000..58e1a5be913f
--- /dev/null
+++ b/drivers/net/sk98lin/skgepnmi.c
@@ -0,0 +1,8359 @@
+/*****************************************************************************
+ *
+ * Name: skgepnmi.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.111 $
+ * Date: $Date: 2003/09/15 13:35:35 $
+ * Purpose: Private Network Management Interface
+ *
+ ****************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+
+#ifndef _lint
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skgepnmi.c,v 1.111 2003/09/15 13:35:35 tschilli Exp $ (C) Marvell.";
+#endif /* !_lint */
+
+#include "h/skdrv1st.h"
+#include "h/sktypes.h"
+#include "h/xmac_ii.h"
+#include "h/skdebug.h"
+#include "h/skqueue.h"
+#include "h/skgepnmi.h"
+#include "h/skgesirq.h"
+#include "h/skcsum.h"
+#include "h/skvpd.h"
+#include "h/skgehw.h"
+#include "h/skgeinit.h"
+#include "h/skdrv2nd.h"
+#include "h/skgepnm2.h"
+#ifdef SK_POWER_MGMT
+#include "h/skgepmgt.h"
+#endif
+/* defines *******************************************************************/
+
+#ifndef DEBUG
+#define PNMI_STATIC static
+#else /* DEBUG */
+#define PNMI_STATIC
+#endif /* DEBUG */
+
+/*
+ * Public Function prototypes
+ */
+int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level);
+int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
+ unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
+int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
+ unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
+int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
+ unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
+int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
+ unsigned int *pLen, SK_U32 NetIndex);
+int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
+ unsigned int *pLen, SK_U32 NetIndex);
+int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
+ unsigned int *pLen, SK_U32 NetIndex);
+int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Param);
+int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf,
+ unsigned int * pLen, SK_U32 NetIndex);
+
+
+/*
+ * Private Function prototypes
+ */
+
+PNMI_STATIC SK_U8 CalculateLinkModeStatus(SK_AC *pAC, SK_IOC IoC, unsigned int
+ PhysPortIndex);
+PNMI_STATIC SK_U8 CalculateLinkStatus(SK_AC *pAC, SK_IOC IoC, unsigned int
+ PhysPortIndex);
+PNMI_STATIC void CopyMac(char *pDst, SK_MAC_ADDR *pMac);
+PNMI_STATIC void CopyTrapQueue(SK_AC *pAC, char *pDstBuf);
+PNMI_STATIC SK_U64 GetPhysStatVal(SK_AC *pAC, SK_IOC IoC,
+ unsigned int PhysPortIndex, unsigned int StatIndex);
+PNMI_STATIC SK_U64 GetStatVal(SK_AC *pAC, SK_IOC IoC, unsigned int LogPortIndex,
+ unsigned int StatIndex, SK_U32 NetIndex);
+PNMI_STATIC char* GetTrapEntry(SK_AC *pAC, SK_U32 TrapId, unsigned int Size);
+PNMI_STATIC void GetTrapQueueLen(SK_AC *pAC, unsigned int *pLen,
+ unsigned int *pEntries);
+PNMI_STATIC int GetVpdKeyArr(SK_AC *pAC, SK_IOC IoC, char *pKeyArr,
+ unsigned int KeyArrLen, unsigned int *pKeyNo);
+PNMI_STATIC int LookupId(SK_U32 Id);
+PNMI_STATIC int MacUpdate(SK_AC *pAC, SK_IOC IoC, unsigned int FirstMac,
+ unsigned int LastMac);
+PNMI_STATIC int PnmiStruct(SK_AC *pAC, SK_IOC IoC, int Action, char *pBuf,
+ unsigned int *pLen, SK_U32 NetIndex);
+PNMI_STATIC int PnmiVar(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id,
+ char *pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
+PNMI_STATIC void QueueRlmtNewMacTrap(SK_AC *pAC, unsigned int ActiveMac);
+PNMI_STATIC void QueueRlmtPortTrap(SK_AC *pAC, SK_U32 TrapId,
+ unsigned int PortIndex);
+PNMI_STATIC void QueueSensorTrap(SK_AC *pAC, SK_U32 TrapId,
+ unsigned int SensorIndex);
+PNMI_STATIC void QueueSimpleTrap(SK_AC *pAC, SK_U32 TrapId);
+PNMI_STATIC void ResetCounter(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex);
+PNMI_STATIC int RlmtUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex);
+PNMI_STATIC int SirqUpdate(SK_AC *pAC, SK_IOC IoC);
+PNMI_STATIC void VirtualConf(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, char *pBuf);
+PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id, char *pBuf,
+ unsigned int *pLen, SK_U32 Instance, unsigned int TableIndex, SK_U32 NetIndex);
+PNMI_STATIC void CheckVctStatus(SK_AC *, SK_IOC, char *, SK_U32, SK_U32);
+
+/*
+ * Table to correlate OID with handler function and index to
+ * hardware register stored in StatAddress if applicable.
+ */
+#include "skgemib.c"
+
+/* global variables **********************************************************/
+
+/*
+ * Overflow status register bit table and corresponding counter
+ * dependent on MAC type - the number relates to the size of overflow
+ * mask returned by the pFnMacOverflow function
+ */
+PNMI_STATIC const SK_U16 StatOvrflwBit[][SK_PNMI_MAC_TYPES] = {
+/* Bit0 */ { SK_PNMI_HTX, SK_PNMI_HTX_UNICAST},
+/* Bit1 */ { SK_PNMI_HTX_OCTETHIGH, SK_PNMI_HTX_BROADCAST},
+/* Bit2 */ { SK_PNMI_HTX_OCTETLOW, SK_PNMI_HTX_PMACC},
+/* Bit3 */ { SK_PNMI_HTX_BROADCAST, SK_PNMI_HTX_MULTICAST},
+/* Bit4 */ { SK_PNMI_HTX_MULTICAST, SK_PNMI_HTX_OCTETLOW},
+/* Bit5 */ { SK_PNMI_HTX_UNICAST, SK_PNMI_HTX_OCTETHIGH},
+/* Bit6 */ { SK_PNMI_HTX_LONGFRAMES, SK_PNMI_HTX_64},
+/* Bit7 */ { SK_PNMI_HTX_BURST, SK_PNMI_HTX_127},
+/* Bit8 */ { SK_PNMI_HTX_PMACC, SK_PNMI_HTX_255},
+/* Bit9 */ { SK_PNMI_HTX_MACC, SK_PNMI_HTX_511},
+/* Bit10 */ { SK_PNMI_HTX_SINGLE_COL, SK_PNMI_HTX_1023},
+/* Bit11 */ { SK_PNMI_HTX_MULTI_COL, SK_PNMI_HTX_MAX},
+/* Bit12 */ { SK_PNMI_HTX_EXCESS_COL, SK_PNMI_HTX_LONGFRAMES},
+/* Bit13 */ { SK_PNMI_HTX_LATE_COL, SK_PNMI_HTX_RESERVED},
+/* Bit14 */ { SK_PNMI_HTX_DEFFERAL, SK_PNMI_HTX_COL},
+/* Bit15 */ { SK_PNMI_HTX_EXCESS_DEF, SK_PNMI_HTX_LATE_COL},
+/* Bit16 */ { SK_PNMI_HTX_UNDERRUN, SK_PNMI_HTX_EXCESS_COL},
+/* Bit17 */ { SK_PNMI_HTX_CARRIER, SK_PNMI_HTX_MULTI_COL},
+/* Bit18 */ { SK_PNMI_HTX_UTILUNDER, SK_PNMI_HTX_SINGLE_COL},
+/* Bit19 */ { SK_PNMI_HTX_UTILOVER, SK_PNMI_HTX_UNDERRUN},
+/* Bit20 */ { SK_PNMI_HTX_64, SK_PNMI_HTX_RESERVED},
+/* Bit21 */ { SK_PNMI_HTX_127, SK_PNMI_HTX_RESERVED},
+/* Bit22 */ { SK_PNMI_HTX_255, SK_PNMI_HTX_RESERVED},
+/* Bit23 */ { SK_PNMI_HTX_511, SK_PNMI_HTX_RESERVED},
+/* Bit24 */ { SK_PNMI_HTX_1023, SK_PNMI_HTX_RESERVED},
+/* Bit25 */ { SK_PNMI_HTX_MAX, SK_PNMI_HTX_RESERVED},
+/* Bit26 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
+/* Bit27 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
+/* Bit28 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
+/* Bit29 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
+/* Bit30 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
+/* Bit31 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
+/* Bit32 */ { SK_PNMI_HRX, SK_PNMI_HRX_UNICAST},
+/* Bit33 */ { SK_PNMI_HRX_OCTETHIGH, SK_PNMI_HRX_BROADCAST},
+/* Bit34 */ { SK_PNMI_HRX_OCTETLOW, SK_PNMI_HRX_PMACC},
+/* Bit35 */ { SK_PNMI_HRX_BROADCAST, SK_PNMI_HRX_MULTICAST},
+/* Bit36 */ { SK_PNMI_HRX_MULTICAST, SK_PNMI_HRX_FCS},
+/* Bit37 */ { SK_PNMI_HRX_UNICAST, SK_PNMI_HRX_RESERVED},
+/* Bit38 */ { SK_PNMI_HRX_PMACC, SK_PNMI_HRX_OCTETLOW},
+/* Bit39 */ { SK_PNMI_HRX_MACC, SK_PNMI_HRX_OCTETHIGH},
+/* Bit40 */ { SK_PNMI_HRX_PMACC_ERR, SK_PNMI_HRX_BADOCTETLOW},
+/* Bit41 */ { SK_PNMI_HRX_MACC_UNKWN, SK_PNMI_HRX_BADOCTETHIGH},
+/* Bit42 */ { SK_PNMI_HRX_BURST, SK_PNMI_HRX_UNDERSIZE},
+/* Bit43 */ { SK_PNMI_HRX_MISSED, SK_PNMI_HRX_RUNT},
+/* Bit44 */ { SK_PNMI_HRX_FRAMING, SK_PNMI_HRX_64},
+/* Bit45 */ { SK_PNMI_HRX_OVERFLOW, SK_PNMI_HRX_127},
+/* Bit46 */ { SK_PNMI_HRX_JABBER, SK_PNMI_HRX_255},
+/* Bit47 */ { SK_PNMI_HRX_CARRIER, SK_PNMI_HRX_511},
+/* Bit48 */ { SK_PNMI_HRX_IRLENGTH, SK_PNMI_HRX_1023},
+/* Bit49 */ { SK_PNMI_HRX_SYMBOL, SK_PNMI_HRX_MAX},
+/* Bit50 */ { SK_PNMI_HRX_SHORTS, SK_PNMI_HRX_LONGFRAMES},
+/* Bit51 */ { SK_PNMI_HRX_RUNT, SK_PNMI_HRX_TOO_LONG},
+/* Bit52 */ { SK_PNMI_HRX_TOO_LONG, SK_PNMI_HRX_JABBER},
+/* Bit53 */ { SK_PNMI_HRX_FCS, SK_PNMI_HRX_RESERVED},
+/* Bit54 */ { SK_PNMI_HRX_RESERVED, SK_PNMI_HRX_OVERFLOW},
+/* Bit55 */ { SK_PNMI_HRX_CEXT, SK_PNMI_HRX_RESERVED},
+/* Bit56 */ { SK_PNMI_HRX_UTILUNDER, SK_PNMI_HRX_RESERVED},
+/* Bit57 */ { SK_PNMI_HRX_UTILOVER, SK_PNMI_HRX_RESERVED},
+/* Bit58 */ { SK_PNMI_HRX_64, SK_PNMI_HRX_RESERVED},
+/* Bit59 */ { SK_PNMI_HRX_127, SK_PNMI_HRX_RESERVED},
+/* Bit60 */ { SK_PNMI_HRX_255, SK_PNMI_HRX_RESERVED},
+/* Bit61 */ { SK_PNMI_HRX_511, SK_PNMI_HRX_RESERVED},
+/* Bit62 */ { SK_PNMI_HRX_1023, SK_PNMI_HRX_RESERVED},
+/* Bit63 */ { SK_PNMI_HRX_MAX, SK_PNMI_HRX_RESERVED}
+};
+
+/*
+ * Table for hardware register saving on resets and port switches
+ */
+PNMI_STATIC const SK_PNMI_STATADDR StatAddr[SK_PNMI_MAX_IDX][SK_PNMI_MAC_TYPES] = {
+ /* SK_PNMI_HTX */
+ {{XM_TXF_OK, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_OCTETHIGH */
+ {{XM_TXO_OK_HI, SK_TRUE}, {GM_TXO_OK_HI, SK_TRUE}},
+ /* SK_PNMI_HTX_OCTETLOW */
+ {{XM_TXO_OK_LO, SK_FALSE}, {GM_TXO_OK_LO, SK_FALSE}},
+ /* SK_PNMI_HTX_BROADCAST */
+ {{XM_TXF_BC_OK, SK_TRUE}, {GM_TXF_BC_OK, SK_TRUE}},
+ /* SK_PNMI_HTX_MULTICAST */
+ {{XM_TXF_MC_OK, SK_TRUE}, {GM_TXF_MC_OK, SK_TRUE}},
+ /* SK_PNMI_HTX_UNICAST */
+ {{XM_TXF_UC_OK, SK_TRUE}, {GM_TXF_UC_OK, SK_TRUE}},
+ /* SK_PNMI_HTX_BURST */
+ {{XM_TXE_BURST, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_PMACC */
+ {{XM_TXF_MPAUSE, SK_TRUE}, {GM_TXF_MPAUSE, SK_TRUE}},
+ /* SK_PNMI_HTX_MACC */
+ {{XM_TXF_MCTRL, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_COL */
+ {{0, SK_FALSE}, {GM_TXF_COL, SK_TRUE}},
+ /* SK_PNMI_HTX_SINGLE_COL */
+ {{XM_TXF_SNG_COL, SK_TRUE}, {GM_TXF_SNG_COL, SK_TRUE}},
+ /* SK_PNMI_HTX_MULTI_COL */
+ {{XM_TXF_MUL_COL, SK_TRUE}, {GM_TXF_MUL_COL, SK_TRUE}},
+ /* SK_PNMI_HTX_EXCESS_COL */
+ {{XM_TXF_ABO_COL, SK_TRUE}, {GM_TXF_ABO_COL, SK_TRUE}},
+ /* SK_PNMI_HTX_LATE_COL */
+ {{XM_TXF_LAT_COL, SK_TRUE}, {GM_TXF_LAT_COL, SK_TRUE}},
+ /* SK_PNMI_HTX_DEFFERAL */
+ {{XM_TXF_DEF, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_EXCESS_DEF */
+ {{XM_TXF_EX_DEF, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_UNDERRUN */
+ {{XM_TXE_FIFO_UR, SK_TRUE}, {GM_TXE_FIFO_UR, SK_TRUE}},
+ /* SK_PNMI_HTX_CARRIER */
+ {{XM_TXE_CS_ERR, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_UTILUNDER */
+ {{0, SK_FALSE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_UTILOVER */
+ {{0, SK_FALSE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_64 */
+ {{XM_TXF_64B, SK_TRUE}, {GM_TXF_64B, SK_TRUE}},
+ /* SK_PNMI_HTX_127 */
+ {{XM_TXF_127B, SK_TRUE}, {GM_TXF_127B, SK_TRUE}},
+ /* SK_PNMI_HTX_255 */
+ {{XM_TXF_255B, SK_TRUE}, {GM_TXF_255B, SK_TRUE}},
+ /* SK_PNMI_HTX_511 */
+ {{XM_TXF_511B, SK_TRUE}, {GM_TXF_511B, SK_TRUE}},
+ /* SK_PNMI_HTX_1023 */
+ {{XM_TXF_1023B, SK_TRUE}, {GM_TXF_1023B, SK_TRUE}},
+ /* SK_PNMI_HTX_MAX */
+ {{XM_TXF_MAX_SZ, SK_TRUE}, {GM_TXF_1518B, SK_TRUE}},
+ /* SK_PNMI_HTX_LONGFRAMES */
+ {{XM_TXF_LONG, SK_TRUE}, {GM_TXF_MAX_SZ, SK_TRUE}},
+ /* SK_PNMI_HTX_SYNC */
+ {{0, SK_FALSE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_SYNC_OCTET */
+ {{0, SK_FALSE}, {0, SK_FALSE}},
+ /* SK_PNMI_HTX_RESERVED */
+ {{0, SK_FALSE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX */
+ {{XM_RXF_OK, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_OCTETHIGH */
+ {{XM_RXO_OK_HI, SK_TRUE}, {GM_RXO_OK_HI, SK_TRUE}},
+ /* SK_PNMI_HRX_OCTETLOW */
+ {{XM_RXO_OK_LO, SK_FALSE}, {GM_RXO_OK_LO, SK_FALSE}},
+ /* SK_PNMI_HRX_BADOCTETHIGH */
+ {{0, SK_FALSE}, {GM_RXO_ERR_HI, SK_TRUE}},
+ /* SK_PNMI_HRX_BADOCTETLOW */
+ {{0, SK_FALSE}, {GM_RXO_ERR_LO, SK_TRUE}},
+ /* SK_PNMI_HRX_BROADCAST */
+ {{XM_RXF_BC_OK, SK_TRUE}, {GM_RXF_BC_OK, SK_TRUE}},
+ /* SK_PNMI_HRX_MULTICAST */
+ {{XM_RXF_MC_OK, SK_TRUE}, {GM_RXF_MC_OK, SK_TRUE}},
+ /* SK_PNMI_HRX_UNICAST */
+ {{XM_RXF_UC_OK, SK_TRUE}, {GM_RXF_UC_OK, SK_TRUE}},
+ /* SK_PNMI_HRX_PMACC */
+ {{XM_RXF_MPAUSE, SK_TRUE}, {GM_RXF_MPAUSE, SK_TRUE}},
+ /* SK_PNMI_HRX_MACC */
+ {{XM_RXF_MCTRL, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_PMACC_ERR */
+ {{XM_RXF_INV_MP, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_MACC_UNKWN */
+ {{XM_RXF_INV_MOC, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_BURST */
+ {{XM_RXE_BURST, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_MISSED */
+ {{XM_RXE_FMISS, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_FRAMING */
+ {{XM_RXF_FRA_ERR, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_UNDERSIZE */
+ {{0, SK_FALSE}, {GM_RXF_SHT, SK_TRUE}},
+ /* SK_PNMI_HRX_OVERFLOW */
+ {{XM_RXE_FIFO_OV, SK_TRUE}, {GM_RXE_FIFO_OV, SK_TRUE}},
+ /* SK_PNMI_HRX_JABBER */
+ {{XM_RXF_JAB_PKT, SK_TRUE}, {GM_RXF_JAB_PKT, SK_TRUE}},
+ /* SK_PNMI_HRX_CARRIER */
+ {{XM_RXE_CAR_ERR, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_IRLENGTH */
+ {{XM_RXF_LEN_ERR, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_SYMBOL */
+ {{XM_RXE_SYM_ERR, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_SHORTS */
+ {{XM_RXE_SHT_ERR, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_RUNT */
+ {{XM_RXE_RUNT, SK_TRUE}, {GM_RXE_FRAG, SK_TRUE}},
+ /* SK_PNMI_HRX_TOO_LONG */
+ {{XM_RXF_LNG_ERR, SK_TRUE}, {GM_RXF_LNG_ERR, SK_TRUE}},
+ /* SK_PNMI_HRX_FCS */
+ {{XM_RXF_FCS_ERR, SK_TRUE}, {GM_RXF_FCS_ERR, SK_TRUE}},
+ /* SK_PNMI_HRX_CEXT */
+ {{XM_RXF_CEX_ERR, SK_TRUE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_UTILUNDER */
+ {{0, SK_FALSE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_UTILOVER */
+ {{0, SK_FALSE}, {0, SK_FALSE}},
+ /* SK_PNMI_HRX_64 */
+ {{XM_RXF_64B, SK_TRUE}, {GM_RXF_64B, SK_TRUE}},
+ /* SK_PNMI_HRX_127 */
+ {{XM_RXF_127B, SK_TRUE}, {GM_RXF_127B, SK_TRUE}},
+ /* SK_PNMI_HRX_255 */
+ {{XM_RXF_255B, SK_TRUE}, {GM_RXF_255B, SK_TRUE}},
+ /* SK_PNMI_HRX_511 */
+ {{XM_RXF_511B, SK_TRUE}, {GM_RXF_511B, SK_TRUE}},
+ /* SK_PNMI_HRX_1023 */
+ {{XM_RXF_1023B, SK_TRUE}, {GM_RXF_1023B, SK_TRUE}},
+ /* SK_PNMI_HRX_MAX */
+ {{XM_RXF_MAX_SZ, SK_TRUE}, {GM_RXF_1518B, SK_TRUE}},
+ /* SK_PNMI_HRX_LONGFRAMES */
+ {{0, SK_FALSE}, {GM_RXF_MAX_SZ, SK_TRUE}},
+ /* SK_PNMI_HRX_RESERVED */
+ {{0, SK_FALSE}, {0, SK_FALSE}}
+};
+
+
+/*****************************************************************************
+ *
+ * Public functions
+ *
+ */
+
+/*****************************************************************************
+ *
+ * SkPnmiInit - Init function of PNMI
+ *
+ * Description:
+ * SK_INIT_DATA: Initialises the data structures
+ * SK_INIT_IO: Resets the XMAC statistics, determines the device and
+ * connector type.
+ * SK_INIT_RUN: Starts a timer event for port switch per hour
+ * calculation.
+ *
+ * Returns:
+ * Always 0
+ */
+int SkPnmiInit(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Level) /* Initialization level */
+{
+ unsigned int PortMax; /* Number of ports */
+ unsigned int PortIndex; /* Current port index in loop */
+ SK_U16 Val16; /* Multiple purpose 16 bit variable */
+ SK_U8 Val8; /* Mulitple purpose 8 bit variable */
+ SK_EVPARA EventParam; /* Event struct for timer event */
+ SK_PNMI_VCT *pVctBackupData;
+
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiInit: Called, level=%d\n", Level));
+
+ switch (Level) {
+
+ case SK_INIT_DATA:
+ SK_MEMSET((char *)&pAC->Pnmi, 0, sizeof(pAC->Pnmi));
+ pAC->Pnmi.TrapBufFree = SK_PNMI_TRAP_QUEUE_LEN;
+ pAC->Pnmi.StartUpTime = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
+ pAC->Pnmi.RlmtChangeThreshold = SK_PNMI_DEF_RLMT_CHG_THRES;
+ for (PortIndex = 0; PortIndex < SK_MAX_MACS; PortIndex ++) {
+
+ pAC->Pnmi.Port[PortIndex].ActiveFlag = SK_FALSE;
+ pAC->Pnmi.DualNetActiveFlag = SK_FALSE;
+ }
+
+#ifdef SK_PNMI_CHECK
+ if (SK_PNMI_MAX_IDX != SK_PNMI_CNT_NO) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR049, SK_PNMI_ERR049MSG);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL,
+ ("CounterOffset struct size (%d) differs from"
+ "SK_PNMI_MAX_IDX (%d)\n",
+ SK_PNMI_CNT_NO, SK_PNMI_MAX_IDX));
+ }
+
+ if (SK_PNMI_MAX_IDX !=
+ (sizeof(StatAddr) / (sizeof(SK_PNMI_STATADDR) * SK_PNMI_MAC_TYPES))) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR050, SK_PNMI_ERR050MSG);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL,
+ ("StatAddr table size (%d) differs from "
+ "SK_PNMI_MAX_IDX (%d)\n",
+ (sizeof(StatAddr) /
+ (sizeof(SK_PNMI_STATADDR) * SK_PNMI_MAC_TYPES)),
+ SK_PNMI_MAX_IDX));
+ }
+#endif /* SK_PNMI_CHECK */
+ break;
+
+ case SK_INIT_IO:
+ /*
+ * Reset MAC counters
+ */
+ PortMax = pAC->GIni.GIMacsFound;
+
+ for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) {
+
+ pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PortIndex);
+ }
+
+ /* Initialize DSP variables for Vct() to 0xff => Never written! */
+ for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) {
+ pAC->GIni.GP[PortIndex].PCableLen = 0xff;
+ pVctBackupData = &pAC->Pnmi.VctBackup[PortIndex];
+ pVctBackupData->PCableLen = 0xff;
+ }
+
+ /*
+ * Get pci bus speed
+ */
+ SK_IN16(IoC, B0_CTST, &Val16);
+ if ((Val16 & CS_BUS_CLOCK) == 0) {
+
+ pAC->Pnmi.PciBusSpeed = 33;
+ }
+ else {
+ pAC->Pnmi.PciBusSpeed = 66;
+ }
+
+ /*
+ * Get pci bus width
+ */
+ SK_IN16(IoC, B0_CTST, &Val16);
+ if ((Val16 & CS_BUS_SLOT_SZ) == 0) {
+
+ pAC->Pnmi.PciBusWidth = 32;
+ }
+ else {
+ pAC->Pnmi.PciBusWidth = 64;
+ }
+
+ /*
+ * Get chipset
+ */
+ switch (pAC->GIni.GIChipId) {
+ case CHIP_ID_GENESIS:
+ pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_XMAC;
+ break;
+
+ case CHIP_ID_YUKON:
+ pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_YUKON;
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Get PMD and DeviceType
+ */
+ SK_IN8(IoC, B2_PMD_TYP, &Val8);
+ switch (Val8) {
+ case 'S':
+ pAC->Pnmi.PMD = 3;
+ if (pAC->GIni.GIMacsFound > 1) {
+
+ pAC->Pnmi.DeviceType = 0x00020002;
+ }
+ else {
+ pAC->Pnmi.DeviceType = 0x00020001;
+ }
+ break;
+
+ case 'L':
+ pAC->Pnmi.PMD = 2;
+ if (pAC->GIni.GIMacsFound > 1) {
+
+ pAC->Pnmi.DeviceType = 0x00020004;
+ }
+ else {
+ pAC->Pnmi.DeviceType = 0x00020003;
+ }
+ break;
+
+ case 'C':
+ pAC->Pnmi.PMD = 4;
+ if (pAC->GIni.GIMacsFound > 1) {
+
+ pAC->Pnmi.DeviceType = 0x00020006;
+ }
+ else {
+ pAC->Pnmi.DeviceType = 0x00020005;
+ }
+ break;
+
+ case 'T':
+ pAC->Pnmi.PMD = 5;
+ if (pAC->GIni.GIMacsFound > 1) {
+
+ pAC->Pnmi.DeviceType = 0x00020008;
+ }
+ else {
+ pAC->Pnmi.DeviceType = 0x00020007;
+ }
+ break;
+
+ default :
+ pAC->Pnmi.PMD = 1;
+ pAC->Pnmi.DeviceType = 0;
+ break;
+ }
+
+ /*
+ * Get connector
+ */
+ SK_IN8(IoC, B2_CONN_TYP, &Val8);
+ switch (Val8) {
+ case 'C':
+ pAC->Pnmi.Connector = 2;
+ break;
+
+ case 'D':
+ pAC->Pnmi.Connector = 3;
+ break;
+
+ case 'F':
+ pAC->Pnmi.Connector = 4;
+ break;
+
+ case 'J':
+ pAC->Pnmi.Connector = 5;
+ break;
+
+ case 'V':
+ pAC->Pnmi.Connector = 6;
+ break;
+
+ default:
+ pAC->Pnmi.Connector = 1;
+ break;
+ }
+ break;
+
+ case SK_INIT_RUN:
+ /*
+ * Start timer for RLMT change counter
+ */
+ SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
+ SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer,
+ 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER,
+ EventParam);
+ break;
+
+ default:
+ break; /* Nothing todo */
+ }
+
+ return (0);
+}
+
+/*****************************************************************************
+ *
+ * SkPnmiGetVar - Retrieves the value of a single OID
+ *
+ * Description:
+ * Calls a general sub-function for all this stuff. If the instance
+ * -1 is passed, the values of all instances are returned in an
+ * array of values.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take
+ * the data.
+ * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+int SkPnmiGetVar(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+SK_U32 Id, /* Object ID that is to be processed */
+void *pBuf, /* Buffer to which the management data will be copied */
+unsigned int *pLen, /* On call: buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiGetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n",
+ Id, *pLen, Instance, NetIndex));
+
+ return (PnmiVar(pAC, IoC, SK_PNMI_GET, Id, (char *)pBuf, pLen,
+ Instance, NetIndex));
+}
+
+/*****************************************************************************
+ *
+ * SkPnmiPreSetVar - Presets the value of a single OID
+ *
+ * Description:
+ * Calls a general sub-function for all this stuff. The preset does
+ * the same as a set, but returns just before finally setting the
+ * new value. This is usefull to check if a set might be successfull.
+ * If the instance -1 is passed, an array of values is supposed and
+ * all instances of the OID will be set.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+int SkPnmiPreSetVar(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+SK_U32 Id, /* Object ID that is to be processed */
+void *pBuf, /* Buffer to which the management data will be copied */
+unsigned int *pLen, /* Total length of management data */
+SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiPreSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n",
+ Id, *pLen, Instance, NetIndex));
+
+
+ return (PnmiVar(pAC, IoC, SK_PNMI_PRESET, Id, (char *)pBuf, pLen,
+ Instance, NetIndex));
+}
+
+/*****************************************************************************
+ *
+ * SkPnmiSetVar - Sets the value of a single OID
+ *
+ * Description:
+ * Calls a general sub-function for all this stuff. The preset does
+ * the same as a set, but returns just before finally setting the
+ * new value. This is usefull to check if a set might be successfull.
+ * If the instance -1 is passed, an array of values is supposed and
+ * all instances of the OID will be set.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+int SkPnmiSetVar(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+SK_U32 Id, /* Object ID that is to be processed */
+void *pBuf, /* Buffer to which the management data will be copied */
+unsigned int *pLen, /* Total length of management data */
+SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n",
+ Id, *pLen, Instance, NetIndex));
+
+ return (PnmiVar(pAC, IoC, SK_PNMI_SET, Id, (char *)pBuf, pLen,
+ Instance, NetIndex));
+}
+
+/*****************************************************************************
+ *
+ * SkPnmiGetStruct - Retrieves the management database in SK_PNMI_STRUCT_DATA
+ *
+ * Description:
+ * Runs through the IdTable, queries the single OIDs and stores the
+ * returned data into the management database structure
+ * SK_PNMI_STRUCT_DATA. The offset of the OID in the structure
+ * is stored in the IdTable. The return value of the function will also
+ * be stored in SK_PNMI_STRUCT_DATA if the passed buffer has the
+ * minimum size of SK_PNMI_MIN_STRUCT_SIZE.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take
+ * the data.
+ * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist
+ */
+int SkPnmiGetStruct(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+void *pBuf, /* Buffer to which the management data will be copied. */
+unsigned int *pLen, /* Length of buffer */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ int Ret;
+ unsigned int TableIndex;
+ unsigned int DstOffset;
+ unsigned int InstanceNo;
+ unsigned int InstanceCnt;
+ SK_U32 Instance;
+ unsigned int TmpLen;
+ char KeyArr[SK_PNMI_VPD_ENTRIES][SK_PNMI_VPD_KEY_SIZE];
+
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiGetStruct: Called, BufLen=%d, NetIndex=%d\n",
+ *pLen, NetIndex));
+
+ if (*pLen < SK_PNMI_STRUCT_SIZE) {
+
+ if (*pLen >= SK_PNMI_MIN_STRUCT_SIZE) {
+
+ SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_TOO_SHORT,
+ (SK_U32)(-1));
+ }
+
+ *pLen = SK_PNMI_STRUCT_SIZE;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /*
+ * Check NetIndex
+ */
+ if (NetIndex >= pAC->Rlmt.NumNets) {
+ return (SK_PNMI_ERR_UNKNOWN_NET);
+ }
+
+ /* Update statistic */
+ SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On call");
+
+ if ((Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1)) !=
+ SK_PNMI_ERR_OK) {
+
+ SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (Ret);
+ }
+
+ if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
+
+ SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (Ret);
+ }
+
+ if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) {
+
+ SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (Ret);
+ }
+
+ /*
+ * Increment semaphores to indicate that an update was
+ * already done
+ */
+ pAC->Pnmi.MacUpdatedFlag ++;
+ pAC->Pnmi.RlmtUpdatedFlag ++;
+ pAC->Pnmi.SirqUpdatedFlag ++;
+
+ /* Get vpd keys for instance calculation */
+ Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), &TmpLen);
+ if (Ret != SK_PNMI_ERR_OK) {
+
+ pAC->Pnmi.MacUpdatedFlag --;
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ pAC->Pnmi.SirqUpdatedFlag --;
+
+ SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return");
+ SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /* Retrieve values */
+ SK_MEMSET((char *)pBuf, 0, SK_PNMI_STRUCT_SIZE);
+ for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) {
+
+ InstanceNo = IdTable[TableIndex].InstanceNo;
+ for (InstanceCnt = 1; InstanceCnt <= InstanceNo;
+ InstanceCnt ++) {
+
+ DstOffset = IdTable[TableIndex].Offset +
+ (InstanceCnt - 1) *
+ IdTable[TableIndex].StructSize;
+
+ /*
+ * For the VPD the instance is not an index number
+ * but the key itself. Determin with the instance
+ * counter the VPD key to be used.
+ */
+ if (IdTable[TableIndex].Id == OID_SKGE_VPD_KEY ||
+ IdTable[TableIndex].Id == OID_SKGE_VPD_VALUE ||
+ IdTable[TableIndex].Id == OID_SKGE_VPD_ACCESS ||
+ IdTable[TableIndex].Id == OID_SKGE_VPD_ACTION) {
+
+ SK_STRNCPY((char *)&Instance, KeyArr[InstanceCnt - 1], 4);
+ }
+ else {
+ Instance = (SK_U32)InstanceCnt;
+ }
+
+ TmpLen = *pLen - DstOffset;
+ Ret = IdTable[TableIndex].Func(pAC, IoC, SK_PNMI_GET,
+ IdTable[TableIndex].Id, (char *)pBuf +
+ DstOffset, &TmpLen, Instance, TableIndex, NetIndex);
+
+ /*
+ * An unknown instance error means that we reached
+ * the last instance of that variable. Proceed with
+ * the next OID in the table and ignore the return
+ * code.
+ */
+ if (Ret == SK_PNMI_ERR_UNKNOWN_INST) {
+
+ break;
+ }
+
+ if (Ret != SK_PNMI_ERR_OK) {
+
+ pAC->Pnmi.MacUpdatedFlag --;
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ pAC->Pnmi.SirqUpdatedFlag --;
+
+ SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return");
+ SK_PNMI_SET_STAT(pBuf, Ret, DstOffset);
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (Ret);
+ }
+ }
+ }
+
+ pAC->Pnmi.MacUpdatedFlag --;
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ pAC->Pnmi.SirqUpdatedFlag --;
+
+ *pLen = SK_PNMI_STRUCT_SIZE;
+ SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return");
+ SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_OK, (SK_U32)(-1));
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * SkPnmiPreSetStruct - Presets the management database in SK_PNMI_STRUCT_DATA
+ *
+ * Description:
+ * Calls a general sub-function for all this set stuff. The preset does
+ * the same as a set, but returns just before finally setting the
+ * new value. This is usefull to check if a set might be successfull.
+ * The sub-function runs through the IdTable, checks which OIDs are able
+ * to set, and calls the handler function of the OID to perform the
+ * preset. The return value of the function will also be stored in
+ * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of
+ * SK_PNMI_MIN_STRUCT_SIZE.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ */
+int SkPnmiPreSetStruct(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+void *pBuf, /* Buffer which contains the data to be set */
+unsigned int *pLen, /* Length of buffer */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiPreSetStruct: Called, BufLen=%d, NetIndex=%d\n",
+ *pLen, NetIndex));
+
+ return (PnmiStruct(pAC, IoC, SK_PNMI_PRESET, (char *)pBuf,
+ pLen, NetIndex));
+}
+
+/*****************************************************************************
+ *
+ * SkPnmiSetStruct - Sets the management database in SK_PNMI_STRUCT_DATA
+ *
+ * Description:
+ * Calls a general sub-function for all this set stuff. The return value
+ * of the function will also be stored in SK_PNMI_STRUCT_DATA if the
+ * passed buffer has the minimum size of SK_PNMI_MIN_STRUCT_SIZE.
+ * The sub-function runs through the IdTable, checks which OIDs are able
+ * to set, and calls the handler function of the OID to perform the
+ * set. The return value of the function will also be stored in
+ * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of
+ * SK_PNMI_MIN_STRUCT_SIZE.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ */
+int SkPnmiSetStruct(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+void *pBuf, /* Buffer which contains the data to be set */
+unsigned int *pLen, /* Length of buffer */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiSetStruct: Called, BufLen=%d, NetIndex=%d\n",
+ *pLen, NetIndex));
+
+ return (PnmiStruct(pAC, IoC, SK_PNMI_SET, (char *)pBuf,
+ pLen, NetIndex));
+}
+
+/*****************************************************************************
+ *
+ * SkPnmiEvent - Event handler
+ *
+ * Description:
+ * Handles the following events:
+ * SK_PNMI_EVT_SIRQ_OVERFLOW When a hardware counter overflows an
+ * interrupt will be generated which is
+ * first handled by SIRQ which generates a
+ * this event. The event increments the
+ * upper 32 bit of the 64 bit counter.
+ * SK_PNMI_EVT_SEN_XXX The event is generated by the I2C module
+ * when a sensor reports a warning or
+ * error. The event will store a trap
+ * message in the trap buffer.
+ * SK_PNMI_EVT_CHG_EST_TIMER The timer event was initiated by this
+ * module and is used to calculate the
+ * port switches per hour.
+ * SK_PNMI_EVT_CLEAR_COUNTER The event clears all counters and
+ * timestamps.
+ * SK_PNMI_EVT_XMAC_RESET The event is generated by the driver
+ * before a hard reset of the XMAC is
+ * performed. All counters will be saved
+ * and added to the hardware counter
+ * values after reset to grant continuous
+ * counter values.
+ * SK_PNMI_EVT_RLMT_PORT_UP Generated by RLMT to notify that a port
+ * went logically up. A trap message will
+ * be stored to the trap buffer.
+ * SK_PNMI_EVT_RLMT_PORT_DOWN Generated by RLMT to notify that a port
+ * went logically down. A trap message will
+ * be stored to the trap buffer.
+ * SK_PNMI_EVT_RLMT_SEGMENTATION Generated by RLMT to notify that two
+ * spanning tree root bridges were
+ * detected. A trap message will be stored
+ * to the trap buffer.
+ * SK_PNMI_EVT_RLMT_ACTIVE_DOWN Notifies PNMI that an active port went
+ * down. PNMI will not further add the
+ * statistic values to the virtual port.
+ * SK_PNMI_EVT_RLMT_ACTIVE_UP Notifies PNMI that a port went up and
+ * is now an active port. PNMI will now
+ * add the statistic data of this port to
+ * the virtual port.
+ * SK_PNMI_EVT_RLMT_SET_NETS Notifies PNMI about the net mode. The first parameter
+ * contains the number of nets. 1 means single net, 2 means
+ * dual net. The second parameter is -1
+ *
+ * Returns:
+ * Always 0
+ */
+int SkPnmiEvent(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+SK_U32 Event, /* Event-Id */
+SK_EVPARA Param) /* Event dependent parameter */
+{
+ unsigned int PhysPortIndex;
+ unsigned int MaxNetNumber;
+ int CounterIndex;
+ int Ret;
+ SK_U16 MacStatus;
+ SK_U64 OverflowStatus;
+ SK_U64 Mask;
+ int MacType;
+ SK_U64 Value;
+ SK_U32 Val32;
+ SK_U16 Register;
+ SK_EVPARA EventParam;
+ SK_U64 NewestValue;
+ SK_U64 OldestValue;
+ SK_U64 Delta;
+ SK_PNMI_ESTIMATE *pEst;
+ SK_U32 NetIndex;
+ SK_GEPORT *pPrt;
+ SK_PNMI_VCT *pVctBackupData;
+ SK_U32 RetCode;
+ int i;
+ SK_U32 CableLength;
+
+
+#ifdef DEBUG
+ if (Event != SK_PNMI_EVT_XMAC_RESET) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: SkPnmiEvent: Called, Event=0x%x, Param=0x%x\n",
+ (unsigned int)Event, (unsigned int)Param.Para64));
+ }
+#endif /* DEBUG */
+ SK_PNMI_CHECKFLAGS("SkPnmiEvent: On call");
+
+ MacType = pAC->GIni.GIMacType;
+
+ switch (Event) {
+
+ case SK_PNMI_EVT_SIRQ_OVERFLOW:
+ PhysPortIndex = (int)Param.Para32[0];
+ MacStatus = (SK_U16)Param.Para32[1];
+#ifdef DEBUG
+ if (PhysPortIndex >= SK_MAX_MACS) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SIRQ_OVERFLOW parameter"
+ " wrong, PhysPortIndex=0x%x\n",
+ PhysPortIndex));
+ return (0);
+ }
+#endif /* DEBUG */
+ OverflowStatus = 0;
+
+ /*
+ * Check which source caused an overflow interrupt.
+ */
+ if ((pAC->GIni.GIFunc.pFnMacOverflow(pAC, IoC, PhysPortIndex,
+ MacStatus, &OverflowStatus) != 0) ||
+ (OverflowStatus == 0)) {
+
+ SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
+ return (0);
+ }
+
+ /*
+ * Check the overflow status register and increment
+ * the upper dword of corresponding counter.
+ */
+ for (CounterIndex = 0; CounterIndex < sizeof(Mask) * 8;
+ CounterIndex ++) {
+
+ Mask = (SK_U64)1 << CounterIndex;
+ if ((OverflowStatus & Mask) == 0) {
+
+ continue;
+ }
+
+ switch (StatOvrflwBit[CounterIndex][MacType]) {
+
+ case SK_PNMI_HTX_UTILUNDER:
+ case SK_PNMI_HTX_UTILOVER:
+ if (MacType == SK_MAC_XMAC) {
+ XM_IN16(IoC, PhysPortIndex, XM_TX_CMD, &Register);
+ Register |= XM_TX_SAM_LINE;
+ XM_OUT16(IoC, PhysPortIndex, XM_TX_CMD, Register);
+ }
+ break;
+
+ case SK_PNMI_HRX_UTILUNDER:
+ case SK_PNMI_HRX_UTILOVER:
+ if (MacType == SK_MAC_XMAC) {
+ XM_IN16(IoC, PhysPortIndex, XM_RX_CMD, &Register);
+ Register |= XM_RX_SAM_LINE;
+ XM_OUT16(IoC, PhysPortIndex, XM_RX_CMD, Register);
+ }
+ break;
+
+ case SK_PNMI_HTX_OCTETHIGH:
+ case SK_PNMI_HTX_OCTETLOW:
+ case SK_PNMI_HTX_RESERVED:
+ case SK_PNMI_HRX_OCTETHIGH:
+ case SK_PNMI_HRX_OCTETLOW:
+ case SK_PNMI_HRX_IRLENGTH:
+ case SK_PNMI_HRX_RESERVED:
+
+ /*
+ * the following counters aren't be handled (id > 63)
+ */
+ case SK_PNMI_HTX_SYNC:
+ case SK_PNMI_HTX_SYNC_OCTET:
+ break;
+
+ case SK_PNMI_HRX_LONGFRAMES:
+ if (MacType == SK_MAC_GMAC) {
+ pAC->Pnmi.Port[PhysPortIndex].
+ CounterHigh[CounterIndex] ++;
+ }
+ break;
+
+ default:
+ pAC->Pnmi.Port[PhysPortIndex].
+ CounterHigh[CounterIndex] ++;
+ }
+ }
+ break;
+
+ case SK_PNMI_EVT_SEN_WAR_LOW:
+#ifdef DEBUG
+ if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_LOW parameter wrong, SensorIndex=%d\n",
+ (unsigned int)Param.Para64));
+ return (0);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Store a trap message in the trap buffer and generate
+ * an event for user space applications with the
+ * SK_DRIVER_SENDEVENT macro.
+ */
+ QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_WAR_LOW,
+ (unsigned int)Param.Para64);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+ break;
+
+ case SK_PNMI_EVT_SEN_WAR_UPP:
+#ifdef DEBUG
+ if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_UPP parameter wrong, SensorIndex=%d\n",
+ (unsigned int)Param.Para64));
+ return (0);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Store a trap message in the trap buffer and generate
+ * an event for user space applications with the
+ * SK_DRIVER_SENDEVENT macro.
+ */
+ QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_WAR_UPP,
+ (unsigned int)Param.Para64);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+ break;
+
+ case SK_PNMI_EVT_SEN_ERR_LOW:
+#ifdef DEBUG
+ if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_ERR_LOW parameter wrong, SensorIndex=%d\n",
+ (unsigned int)Param.Para64));
+ return (0);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Store a trap message in the trap buffer and generate
+ * an event for user space applications with the
+ * SK_DRIVER_SENDEVENT macro.
+ */
+ QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_ERR_LOW,
+ (unsigned int)Param.Para64);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+ break;
+
+ case SK_PNMI_EVT_SEN_ERR_UPP:
+#ifdef DEBUG
+ if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_ERR_UPP parameter wrong, SensorIndex=%d\n",
+ (unsigned int)Param.Para64));
+ return (0);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Store a trap message in the trap buffer and generate
+ * an event for user space applications with the
+ * SK_DRIVER_SENDEVENT macro.
+ */
+ QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_ERR_UPP,
+ (unsigned int)Param.Para64);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+ break;
+
+ case SK_PNMI_EVT_CHG_EST_TIMER:
+ /*
+ * Calculate port switch average on a per hour basis
+ * Time interval for check : 28125 ms
+ * Number of values for average : 8
+ *
+ * Be careful in changing these values, on change check
+ * - typedef of SK_PNMI_ESTIMATE (Size of EstValue
+ * array one less than value number)
+ * - Timer initialization SkTimerStart() in SkPnmiInit
+ * - Delta value below must be multiplicated with
+ * power of 2
+ *
+ */
+ pEst = &pAC->Pnmi.RlmtChangeEstimate;
+ CounterIndex = pEst->EstValueIndex + 1;
+ if (CounterIndex == 7) {
+
+ CounterIndex = 0;
+ }
+ pEst->EstValueIndex = CounterIndex;
+
+ NewestValue = pAC->Pnmi.RlmtChangeCts;
+ OldestValue = pEst->EstValue[CounterIndex];
+ pEst->EstValue[CounterIndex] = NewestValue;
+
+ /*
+ * Calculate average. Delta stores the number of
+ * port switches per 28125 * 8 = 225000 ms
+ */
+ if (NewestValue >= OldestValue) {
+
+ Delta = NewestValue - OldestValue;
+ }
+ else {
+ /* Overflow situation */
+ Delta = (SK_U64)(0 - OldestValue) + NewestValue;
+ }
+
+ /*
+ * Extrapolate delta to port switches per hour.
+ * Estimate = Delta * (3600000 / 225000)
+ * = Delta * 16
+ * = Delta << 4
+ */
+ pAC->Pnmi.RlmtChangeEstimate.Estimate = Delta << 4;
+
+ /*
+ * Check if threshold is exceeded. If the threshold is
+ * permanently exceeded every 28125 ms an event will be
+ * generated to remind the user of this condition.
+ */
+ if ((pAC->Pnmi.RlmtChangeThreshold != 0) &&
+ (pAC->Pnmi.RlmtChangeEstimate.Estimate >=
+ pAC->Pnmi.RlmtChangeThreshold)) {
+
+ QueueSimpleTrap(pAC, OID_SKGE_TRAP_RLMT_CHANGE_THRES);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+ }
+
+ SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
+ SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer,
+ 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER,
+ EventParam);
+ break;
+
+ case SK_PNMI_EVT_CLEAR_COUNTER:
+ /*
+ * Param.Para32[0] contains the NetIndex (0 ..1).
+ * Param.Para32[1] is reserved, contains -1.
+ */
+ NetIndex = (SK_U32)Param.Para32[0];
+
+#ifdef DEBUG
+ if (NetIndex >= pAC->Rlmt.NumNets) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_CLEAR_COUNTER parameter wrong, NetIndex=%d\n",
+ NetIndex));
+
+ return (0);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Set all counters and timestamps to zero.
+ * The according NetIndex is required as a
+ * parameter of the event.
+ */
+ ResetCounter(pAC, IoC, NetIndex);
+ break;
+
+ case SK_PNMI_EVT_XMAC_RESET:
+ /*
+ * To grant continuous counter values store the current
+ * XMAC statistic values to the entries 1..n of the
+ * CounterOffset array. XMAC Errata #2
+ */
+#ifdef DEBUG
+ if ((unsigned int)Param.Para64 >= SK_MAX_MACS) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_XMAC_RESET parameter wrong, PhysPortIndex=%d\n",
+ (unsigned int)Param.Para64));
+ return (0);
+ }
+#endif
+ PhysPortIndex = (unsigned int)Param.Para64;
+
+ /*
+ * Update XMAC statistic to get fresh values
+ */
+ Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
+ if (Ret != SK_PNMI_ERR_OK) {
+
+ SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
+ return (0);
+ }
+ /*
+ * Increment semaphore to indicate that an update was
+ * already done
+ */
+ pAC->Pnmi.MacUpdatedFlag ++;
+
+ for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX;
+ CounterIndex ++) {
+
+ if (!StatAddr[CounterIndex][MacType].GetOffset) {
+
+ continue;
+ }
+
+ pAC->Pnmi.Port[PhysPortIndex].CounterOffset[CounterIndex] =
+ GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex);
+
+ pAC->Pnmi.Port[PhysPortIndex].CounterHigh[CounterIndex] = 0;
+ }
+
+ pAC->Pnmi.MacUpdatedFlag --;
+ break;
+
+ case SK_PNMI_EVT_RLMT_PORT_UP:
+ PhysPortIndex = (unsigned int)Param.Para32[0];
+#ifdef DEBUG
+ if (PhysPortIndex >= SK_MAX_MACS) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_UP parameter"
+ " wrong, PhysPortIndex=%d\n", PhysPortIndex));
+
+ return (0);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Store a trap message in the trap buffer and generate an event for
+ * user space applications with the SK_DRIVER_SENDEVENT macro.
+ */
+ QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_UP, PhysPortIndex);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+
+ /* Bugfix for XMAC errata (#10620)*/
+ if (MacType == SK_MAC_XMAC) {
+ /* Add incremental difference to offset (#10620)*/
+ (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ XM_RXE_SHT_ERR, &Val32);
+
+ Value = (((SK_U64)pAC->Pnmi.Port[PhysPortIndex].
+ CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32);
+ pAC->Pnmi.Port[PhysPortIndex].CounterOffset[SK_PNMI_HRX_SHORTS] +=
+ Value - pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark;
+ }
+
+ /* Tell VctStatus() that a link was up meanwhile. */
+ pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_LINK;
+ break;
+
+ case SK_PNMI_EVT_RLMT_PORT_DOWN:
+ PhysPortIndex = (unsigned int)Param.Para32[0];
+
+#ifdef DEBUG
+ if (PhysPortIndex >= SK_MAX_MACS) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_DOWN parameter"
+ " wrong, PhysPortIndex=%d\n", PhysPortIndex));
+
+ return (0);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Store a trap message in the trap buffer and generate an event for
+ * user space applications with the SK_DRIVER_SENDEVENT macro.
+ */
+ QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_DOWN, PhysPortIndex);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+
+ /* Bugfix #10620 - get zero level for incremental difference */
+ if (MacType == SK_MAC_XMAC) {
+
+ (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ XM_RXE_SHT_ERR, &Val32);
+
+ pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark =
+ (((SK_U64)pAC->Pnmi.Port[PhysPortIndex].
+ CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32);
+ }
+ break;
+
+ case SK_PNMI_EVT_RLMT_ACTIVE_DOWN:
+ PhysPortIndex = (unsigned int)Param.Para32[0];
+ NetIndex = (SK_U32)Param.Para32[1];
+
+#ifdef DEBUG
+ if (PhysPortIndex >= SK_MAX_MACS) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_DOWN parameter too high, PhysPort=%d\n",
+ PhysPortIndex));
+ }
+
+ if (NetIndex >= pAC->Rlmt.NumNets) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_DOWN parameter too high, NetIndex=%d\n",
+ NetIndex));
+ }
+#endif /* DEBUG */
+
+ /*
+ * For now, ignore event if NetIndex != 0.
+ */
+ if (Param.Para32[1] != 0) {
+
+ return (0);
+ }
+
+ /*
+ * Nothing to do if port is already inactive
+ */
+ if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
+
+ return (0);
+ }
+
+ /*
+ * Update statistic counters to calculate new offset for the virtual
+ * port and increment semaphore to indicate that an update was already
+ * done.
+ */
+ if (MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1) !=
+ SK_PNMI_ERR_OK) {
+
+ SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
+ return (0);
+ }
+ pAC->Pnmi.MacUpdatedFlag ++;
+
+ /*
+ * Calculate new counter offset for virtual port to grant continous
+ * counting on port switches. The virtual port consists of all currently
+ * active ports. The port down event indicates that a port is removed
+ * from the virtual port. Therefore add the counter value of the removed
+ * port to the CounterOffset for the virtual port to grant the same
+ * counter value.
+ */
+ for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX;
+ CounterIndex ++) {
+
+ if (!StatAddr[CounterIndex][MacType].GetOffset) {
+
+ continue;
+ }
+
+ Value = GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex);
+
+ pAC->Pnmi.VirtualCounterOffset[CounterIndex] += Value;
+ }
+
+ /*
+ * Set port to inactive
+ */
+ pAC->Pnmi.Port[PhysPortIndex].ActiveFlag = SK_FALSE;
+
+ pAC->Pnmi.MacUpdatedFlag --;
+ break;
+
+ case SK_PNMI_EVT_RLMT_ACTIVE_UP:
+ PhysPortIndex = (unsigned int)Param.Para32[0];
+ NetIndex = (SK_U32)Param.Para32[1];
+
+#ifdef DEBUG
+ if (PhysPortIndex >= SK_MAX_MACS) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_UP parameter too high, PhysPort=%d\n",
+ PhysPortIndex));
+ }
+
+ if (NetIndex >= pAC->Rlmt.NumNets) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
+ ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_UP parameter too high, NetIndex=%d\n",
+ NetIndex));
+ }
+#endif /* DEBUG */
+
+ /*
+ * For now, ignore event if NetIndex != 0.
+ */
+ if (Param.Para32[1] != 0) {
+
+ return (0);
+ }
+
+ /*
+ * Nothing to do if port is already active
+ */
+ if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
+
+ return (0);
+ }
+
+ /*
+ * Statistic maintenance
+ */
+ pAC->Pnmi.RlmtChangeCts ++;
+ pAC->Pnmi.RlmtChangeTime = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
+
+ /*
+ * Store a trap message in the trap buffer and generate an event for
+ * user space applications with the SK_DRIVER_SENDEVENT macro.
+ */
+ QueueRlmtNewMacTrap(pAC, PhysPortIndex);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+
+ /*
+ * Update statistic counters to calculate new offset for the virtual
+ * port and increment semaphore to indicate that an update was
+ * already done.
+ */
+ if (MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1) !=
+ SK_PNMI_ERR_OK) {
+
+ SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
+ return (0);
+ }
+ pAC->Pnmi.MacUpdatedFlag ++;
+
+ /*
+ * Calculate new counter offset for virtual port to grant continous
+ * counting on port switches. A new port is added to the virtual port.
+ * Therefore substract the counter value of the new port from the
+ * CounterOffset for the virtual port to grant the same value.
+ */
+ for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX;
+ CounterIndex ++) {
+
+ if (!StatAddr[CounterIndex][MacType].GetOffset) {
+
+ continue;
+ }
+
+ Value = GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex);
+
+ pAC->Pnmi.VirtualCounterOffset[CounterIndex] -= Value;
+ }
+
+ /* Set port to active */
+ pAC->Pnmi.Port[PhysPortIndex].ActiveFlag = SK_TRUE;
+
+ pAC->Pnmi.MacUpdatedFlag --;
+ break;
+
+ case SK_PNMI_EVT_RLMT_SEGMENTATION:
+ /*
+ * Para.Para32[0] contains the NetIndex.
+ */
+
+ /*
+ * Store a trap message in the trap buffer and generate an event for
+ * user space applications with the SK_DRIVER_SENDEVENT macro.
+ */
+ QueueSimpleTrap(pAC, OID_SKGE_TRAP_RLMT_SEGMENTATION);
+ (void)SK_DRIVER_SENDEVENT(pAC, IoC);
+ break;
+
+ case SK_PNMI_EVT_RLMT_SET_NETS:
+ /*
+ * Param.Para32[0] contains the number of Nets.
+ * Param.Para32[1] is reserved, contains -1.
+ */
+ /*
+ * Check number of nets
+ */
+ MaxNetNumber = pAC->GIni.GIMacsFound;
+ if (((unsigned int)Param.Para32[0] < 1)
+ || ((unsigned int)Param.Para32[0] > MaxNetNumber)) {
+ return (SK_PNMI_ERR_UNKNOWN_NET);
+ }
+
+ if ((unsigned int)Param.Para32[0] == 1) { /* single net mode */
+ pAC->Pnmi.DualNetActiveFlag = SK_FALSE;
+ }
+ else { /* dual net mode */
+ pAC->Pnmi.DualNetActiveFlag = SK_TRUE;
+ }
+ break;
+
+ case SK_PNMI_EVT_VCT_RESET:
+ PhysPortIndex = Param.Para32[0];
+ pPrt = &pAC->GIni.GP[PhysPortIndex];
+ pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex];
+
+ if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) {
+ RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE);
+ if (RetCode == 2) {
+ /*
+ * VCT test is still running.
+ * Start VCT timer counter again.
+ */
+ SK_MEMSET((char *) &Param, 0, sizeof(Param));
+ Param.Para32[0] = PhysPortIndex;
+ Param.Para32[1] = -1;
+ SkTimerStart(pAC, IoC,
+ &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer,
+ 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Param);
+ break;
+ }
+ pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING;
+ pAC->Pnmi.VctStatus[PhysPortIndex] |=
+ (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE);
+
+ /* Copy results for later use to PNMI struct. */
+ for (i = 0; i < 4; i++) {
+ if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) {
+ if ((pPrt->PMdiPairLen[i] > 35) &&
+ (pPrt->PMdiPairLen[i] < 0xff)) {
+ pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH;
+ }
+ }
+ if ((pPrt->PMdiPairLen[i] > 35) &&
+ (pPrt->PMdiPairLen[i] != 0xff)) {
+ CableLength = 1000 *
+ (((175 * pPrt->PMdiPairLen[i]) / 210) - 28);
+ }
+ else {
+ CableLength = 0;
+ }
+ pVctBackupData->PMdiPairLen[i] = CableLength;
+ pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i];
+ }
+
+ Param.Para32[0] = PhysPortIndex;
+ Param.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Param);
+ SkEventDispatcher(pAC, IoC);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
+ return (0);
+}
+
+
+/******************************************************************************
+ *
+ * Private functions
+ *
+ */
+
+/*****************************************************************************
+ *
+ * PnmiVar - Gets, presets, and sets single OIDs
+ *
+ * Description:
+ * Looks up the requested OID, calls the corresponding handler
+ * function, and passes the parameters with the get, preset, or
+ * set command. The function is called by SkGePnmiGetVar,
+ * SkGePnmiPreSetVar, or SkGePnmiSetVar.
+ *
+ * Returns:
+ * SK_PNMI_ERR_XXX. For details have a look at the description of the
+ * calling functions.
+ * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist
+ */
+PNMI_STATIC int PnmiVar(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* Total length of pBuf management data */
+SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ unsigned int TableIndex;
+ int Ret;
+
+
+ if ((TableIndex = LookupId(Id)) == (unsigned int)(-1)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_OID);
+ }
+
+ /* Check NetIndex */
+ if (NetIndex >= pAC->Rlmt.NumNets) {
+ return (SK_PNMI_ERR_UNKNOWN_NET);
+ }
+
+ SK_PNMI_CHECKFLAGS("PnmiVar: On call");
+
+ Ret = IdTable[TableIndex].Func(pAC, IoC, Action, Id, pBuf, pLen,
+ Instance, TableIndex, NetIndex);
+
+ SK_PNMI_CHECKFLAGS("PnmiVar: On return");
+
+ return (Ret);
+}
+
+/*****************************************************************************
+ *
+ * PnmiStruct - Presets and Sets data in structure SK_PNMI_STRUCT_DATA
+ *
+ * Description:
+ * The return value of the function will also be stored in
+ * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of
+ * SK_PNMI_MIN_STRUCT_SIZE. The sub-function runs through the IdTable,
+ * checks which OIDs are able to set, and calls the handler function of
+ * the OID to perform the set. The return value of the function will
+ * also be stored in SK_PNMI_STRUCT_DATA if the passed buffer has the
+ * minimum size of SK_PNMI_MIN_STRUCT_SIZE. The function is called
+ * by SkGePnmiPreSetStruct and SkGePnmiSetStruct.
+ *
+ * Returns:
+ * SK_PNMI_ERR_XXX. The codes are described in the calling functions.
+ * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist
+ */
+PNMI_STATIC int PnmiStruct(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* PRESET/SET action to be performed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* Length of pBuf management data buffer */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ int Ret;
+ unsigned int TableIndex;
+ unsigned int DstOffset;
+ unsigned int Len;
+ unsigned int InstanceNo;
+ unsigned int InstanceCnt;
+ SK_U32 Instance;
+ SK_U32 Id;
+
+
+ /* Check if the passed buffer has the right size */
+ if (*pLen < SK_PNMI_STRUCT_SIZE) {
+
+ /* Check if we can return the error within the buffer */
+ if (*pLen >= SK_PNMI_MIN_STRUCT_SIZE) {
+
+ SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_TOO_SHORT,
+ (SK_U32)(-1));
+ }
+
+ *pLen = SK_PNMI_STRUCT_SIZE;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /* Check NetIndex */
+ if (NetIndex >= pAC->Rlmt.NumNets) {
+ return (SK_PNMI_ERR_UNKNOWN_NET);
+ }
+
+ SK_PNMI_CHECKFLAGS("PnmiStruct: On call");
+
+ /*
+ * Update the values of RLMT and SIRQ and increment semaphores to
+ * indicate that an update was already done.
+ */
+ if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
+
+ SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (Ret);
+ }
+
+ if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) {
+
+ SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (Ret);
+ }
+
+ pAC->Pnmi.RlmtUpdatedFlag ++;
+ pAC->Pnmi.SirqUpdatedFlag ++;
+
+ /* Preset/Set values */
+ for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) {
+
+ if ((IdTable[TableIndex].Access != SK_PNMI_RW) &&
+ (IdTable[TableIndex].Access != SK_PNMI_WO)) {
+
+ continue;
+ }
+
+ InstanceNo = IdTable[TableIndex].InstanceNo;
+ Id = IdTable[TableIndex].Id;
+
+ for (InstanceCnt = 1; InstanceCnt <= InstanceNo;
+ InstanceCnt ++) {
+
+ DstOffset = IdTable[TableIndex].Offset +
+ (InstanceCnt - 1) *
+ IdTable[TableIndex].StructSize;
+
+ /*
+ * Because VPD multiple instance variables are
+ * not setable we do not need to evaluate VPD
+ * instances. Have a look to VPD instance
+ * calculation in SkPnmiGetStruct().
+ */
+ Instance = (SK_U32)InstanceCnt;
+
+ /*
+ * Evaluate needed buffer length
+ */
+ Len = 0;
+ Ret = IdTable[TableIndex].Func(pAC, IoC,
+ SK_PNMI_GET, IdTable[TableIndex].Id,
+ NULL, &Len, Instance, TableIndex, NetIndex);
+
+ if (Ret == SK_PNMI_ERR_UNKNOWN_INST) {
+
+ break;
+ }
+ if (Ret != SK_PNMI_ERR_TOO_SHORT) {
+
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ pAC->Pnmi.SirqUpdatedFlag --;
+
+ SK_PNMI_CHECKFLAGS("PnmiStruct: On return");
+ SK_PNMI_SET_STAT(pBuf,
+ SK_PNMI_ERR_GENERAL, DstOffset);
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ if (Id == OID_SKGE_VPD_ACTION) {
+
+ switch (*(pBuf + DstOffset)) {
+
+ case SK_PNMI_VPD_CREATE:
+ Len = 3 + *(pBuf + DstOffset + 3);
+ break;
+
+ case SK_PNMI_VPD_DELETE:
+ Len = 3;
+ break;
+
+ default:
+ Len = 1;
+ break;
+ }
+ }
+
+ /* Call the OID handler function */
+ Ret = IdTable[TableIndex].Func(pAC, IoC, Action,
+ IdTable[TableIndex].Id, pBuf + DstOffset,
+ &Len, Instance, TableIndex, NetIndex);
+
+ if (Ret != SK_PNMI_ERR_OK) {
+
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ pAC->Pnmi.SirqUpdatedFlag --;
+
+ SK_PNMI_CHECKFLAGS("PnmiStruct: On return");
+ SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_BAD_VALUE,
+ DstOffset);
+ *pLen = SK_PNMI_MIN_STRUCT_SIZE;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ }
+ }
+
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ pAC->Pnmi.SirqUpdatedFlag --;
+
+ SK_PNMI_CHECKFLAGS("PnmiStruct: On return");
+ SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_OK, (SK_U32)(-1));
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * LookupId - Lookup an OID in the IdTable
+ *
+ * Description:
+ * Scans the IdTable to find the table entry of an OID.
+ *
+ * Returns:
+ * The table index or -1 if not found.
+ */
+PNMI_STATIC int LookupId(
+SK_U32 Id) /* Object identifier to be searched */
+{
+ int i;
+
+ for (i = 0; i < ID_TABLE_SIZE; i++) {
+
+ if (IdTable[i].Id == Id) {
+
+ return i;
+ }
+ }
+
+ return (-1);
+}
+
+/*****************************************************************************
+ *
+ * OidStruct - Handler of OID_SKGE_ALL_DATA
+ *
+ * Description:
+ * This OID performs a Get/Preset/SetStruct call and returns all data
+ * in a SK_PNMI_STRUCT_DATA structure.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int OidStruct(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ if (Id != OID_SKGE_ALL_DATA) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR003,
+ SK_PNMI_ERR003MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /*
+ * Check instance. We only handle single instance variables
+ */
+ if (Instance != (SK_U32)(-1) && Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ switch (Action) {
+
+ case SK_PNMI_GET:
+ return (SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex));
+
+ case SK_PNMI_PRESET:
+ return (SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex));
+
+ case SK_PNMI_SET:
+ return (SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex));
+ }
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR004, SK_PNMI_ERR004MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+}
+
+/*****************************************************************************
+ *
+ * Perform - OID handler of OID_SKGE_ACTION
+ *
+ * Description:
+ * None.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int Perform(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ int Ret;
+ SK_U32 ActionOp;
+
+
+ /*
+ * Check instance. We only handle single instance variables
+ */
+ if (Instance != (SK_U32)(-1) && Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /* Check if a get should be performed */
+ if (Action == SK_PNMI_GET) {
+
+ /* A get is easy. We always return the same value */
+ ActionOp = (SK_U32)SK_PNMI_ACT_IDLE;
+ SK_PNMI_STORE_U32(pBuf, ActionOp);
+ *pLen = sizeof(SK_U32);
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /* Continue with PRESET/SET action */
+ if (*pLen > sizeof(SK_U32)) {
+
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /* Check if the command is a known one */
+ SK_PNMI_READ_U32(pBuf, ActionOp);
+ if (*pLen > sizeof(SK_U32) ||
+ (ActionOp != SK_PNMI_ACT_IDLE &&
+ ActionOp != SK_PNMI_ACT_RESET &&
+ ActionOp != SK_PNMI_ACT_SELFTEST &&
+ ActionOp != SK_PNMI_ACT_RESETCNT)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /* A preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ switch (ActionOp) {
+
+ case SK_PNMI_ACT_IDLE:
+ /* Nothing to do */
+ break;
+
+ case SK_PNMI_ACT_RESET:
+ /*
+ * Perform a driver reset or something that comes near
+ * to this.
+ */
+ Ret = SK_DRIVER_RESET(pAC, IoC);
+ if (Ret != 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR005,
+ SK_PNMI_ERR005MSG);
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ break;
+
+ case SK_PNMI_ACT_SELFTEST:
+ /*
+ * Perform a driver selftest or something similar to this.
+ * Currently this feature is not used and will probably
+ * implemented in another way.
+ */
+ Ret = SK_DRIVER_SELFTEST(pAC, IoC);
+ pAC->Pnmi.TestResult = Ret;
+ break;
+
+ case SK_PNMI_ACT_RESETCNT:
+ /* Set all counters and timestamps to zero */
+ ResetCounter(pAC, IoC, NetIndex);
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR006,
+ SK_PNMI_ERR006MSG);
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * Mac8023Stat - OID handler of OID_GEN_XXX and OID_802_3_XXX
+ *
+ * Description:
+ * Retrieves the statistic values of the virtual port (logical
+ * index 0). Only special OIDs of NDIS are handled which consist
+ * of a 32 bit instead of a 64 bit value. The OIDs are public
+ * because perhaps some other platform can use them too.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int Mac8023Stat(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ int Ret;
+ SK_U64 StatVal;
+ SK_U32 StatVal32;
+ SK_BOOL Is64BitReq = SK_FALSE;
+
+ /*
+ * Only the active Mac is returned
+ */
+ if (Instance != (SK_U32)(-1) && Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ /*
+ * Check action type
+ */
+ if (Action != SK_PNMI_GET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /* Check length */
+ switch (Id) {
+
+ case OID_802_3_PERMANENT_ADDRESS:
+ case OID_802_3_CURRENT_ADDRESS:
+ if (*pLen < sizeof(SK_MAC_ADDR)) {
+
+ *pLen = sizeof(SK_MAC_ADDR);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+#ifndef SK_NDIS_64BIT_CTR
+ if (*pLen < sizeof(SK_U32)) {
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+#else /* SK_NDIS_64BIT_CTR */
+
+ /* for compatibility, at least 32bit are required for OID */
+ if (*pLen < sizeof(SK_U32)) {
+ /*
+ * but indicate handling for 64bit values,
+ * if insufficient space is provided
+ */
+ *pLen = sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ Is64BitReq = (*pLen < sizeof(SK_U64)) ? SK_FALSE : SK_TRUE;
+#endif /* SK_NDIS_64BIT_CTR */
+ break;
+ }
+
+ /*
+ * Update all statistics, because we retrieve virtual MAC, which
+ * consists of multiple physical statistics and increment semaphore
+ * to indicate that an update was already done.
+ */
+ Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
+ if ( Ret != SK_PNMI_ERR_OK) {
+
+ *pLen = 0;
+ return (Ret);
+ }
+ pAC->Pnmi.MacUpdatedFlag ++;
+
+ /*
+ * Get value (MAC Index 0 identifies the virtual MAC)
+ */
+ switch (Id) {
+
+ case OID_802_3_PERMANENT_ADDRESS:
+ CopyMac(pBuf, &pAC->Addr.Net[NetIndex].PermanentMacAddress);
+ *pLen = sizeof(SK_MAC_ADDR);
+ break;
+
+ case OID_802_3_CURRENT_ADDRESS:
+ CopyMac(pBuf, &pAC->Addr.Net[NetIndex].CurrentMacAddress);
+ *pLen = sizeof(SK_MAC_ADDR);
+ break;
+
+ default:
+ StatVal = GetStatVal(pAC, IoC, 0, IdTable[TableIndex].Param, NetIndex);
+
+ /* by default 32bit values are evaluated */
+ if (!Is64BitReq) {
+ StatVal32 = (SK_U32)StatVal;
+ SK_PNMI_STORE_U32(pBuf, StatVal32);
+ *pLen = sizeof(SK_U32);
+ }
+ else {
+ SK_PNMI_STORE_U64(pBuf, StatVal);
+ *pLen = sizeof(SK_U64);
+ }
+ break;
+ }
+
+ pAC->Pnmi.MacUpdatedFlag --;
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * MacPrivateStat - OID handler function of OID_SKGE_STAT_XXX
+ *
+ * Description:
+ * Retrieves the MAC statistic data.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int MacPrivateStat(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ unsigned int LogPortMax;
+ unsigned int LogPortIndex;
+ unsigned int PhysPortMax;
+ unsigned int Limit;
+ unsigned int Offset;
+ int MacType;
+ int Ret;
+ SK_U64 StatVal;
+
+
+
+ /* Calculate instance if wished. MAC index 0 is the virtual MAC */
+ PhysPortMax = pAC->GIni.GIMacsFound;
+ LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
+
+ MacType = pAC->GIni.GIMacType;
+
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
+ LogPortMax--;
+ }
+
+ if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */
+ /* Check instance range */
+ if ((Instance < 1) || (Instance > LogPortMax)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+ LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance);
+ Limit = LogPortIndex + 1;
+ }
+
+ else { /* Instance == (SK_U32)(-1), get all Instances of that OID */
+
+ LogPortIndex = 0;
+ Limit = LogPortMax;
+ }
+
+ /* Check action */
+ if (Action != SK_PNMI_GET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /* Check length */
+ if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U64)) {
+
+ *pLen = (Limit - LogPortIndex) * sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /*
+ * Update MAC statistic and increment semaphore to indicate that
+ * an update was already done.
+ */
+ Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
+ if (Ret != SK_PNMI_ERR_OK) {
+
+ *pLen = 0;
+ return (Ret);
+ }
+ pAC->Pnmi.MacUpdatedFlag ++;
+
+ /* Get value */
+ Offset = 0;
+ for (; LogPortIndex < Limit; LogPortIndex ++) {
+
+ switch (Id) {
+
+/* XXX not yet implemented due to XMAC problems
+ case OID_SKGE_STAT_TX_UTIL:
+ return (SK_PNMI_ERR_GENERAL);
+*/
+/* XXX not yet implemented due to XMAC problems
+ case OID_SKGE_STAT_RX_UTIL:
+ return (SK_PNMI_ERR_GENERAL);
+*/
+ case OID_SKGE_STAT_RX:
+ if (MacType == SK_MAC_GMAC) {
+ StatVal =
+ GetStatVal(pAC, IoC, LogPortIndex,
+ SK_PNMI_HRX_BROADCAST, NetIndex) +
+ GetStatVal(pAC, IoC, LogPortIndex,
+ SK_PNMI_HRX_MULTICAST, NetIndex) +
+ GetStatVal(pAC, IoC, LogPortIndex,
+ SK_PNMI_HRX_UNICAST, NetIndex) +
+ GetStatVal(pAC, IoC, LogPortIndex,
+ SK_PNMI_HRX_UNDERSIZE, NetIndex);
+ }
+ else {
+ StatVal = GetStatVal(pAC, IoC, LogPortIndex,
+ IdTable[TableIndex].Param, NetIndex);
+ }
+ break;
+
+ case OID_SKGE_STAT_TX:
+ if (MacType == SK_MAC_GMAC) {
+ StatVal =
+ GetStatVal(pAC, IoC, LogPortIndex,
+ SK_PNMI_HTX_BROADCAST, NetIndex) +
+ GetStatVal(pAC, IoC, LogPortIndex,
+ SK_PNMI_HTX_MULTICAST, NetIndex) +
+ GetStatVal(pAC, IoC, LogPortIndex,
+ SK_PNMI_HTX_UNICAST, NetIndex);
+ }
+ else {
+ StatVal = GetStatVal(pAC, IoC, LogPortIndex,
+ IdTable[TableIndex].Param, NetIndex);
+ }
+ break;
+
+ default:
+ StatVal = GetStatVal(pAC, IoC, LogPortIndex,
+ IdTable[TableIndex].Param, NetIndex);
+ }
+ SK_PNMI_STORE_U64(pBuf + Offset, StatVal);
+
+ Offset += sizeof(SK_U64);
+ }
+ *pLen = Offset;
+
+ pAC->Pnmi.MacUpdatedFlag --;
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * Addr - OID handler function of OID_SKGE_PHYS_CUR_ADDR and _FAC_ADDR
+ *
+ * Description:
+ * Get/Presets/Sets the current and factory MAC address. The MAC
+ * address of the virtual port, which is reported to the OS, may
+ * not be changed, but the physical ones. A set to the virtual port
+ * will be ignored. No error should be reported because otherwise
+ * a multiple instance set (-1) would always fail.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int Addr(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ int Ret;
+ unsigned int LogPortMax;
+ unsigned int PhysPortMax;
+ unsigned int LogPortIndex;
+ unsigned int PhysPortIndex;
+ unsigned int Limit;
+ unsigned int Offset = 0;
+
+ /*
+ * Calculate instance if wished. MAC index 0 is the virtual
+ * MAC.
+ */
+ PhysPortMax = pAC->GIni.GIMacsFound;
+ LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
+
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
+ LogPortMax--;
+ }
+
+ if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */
+ /* Check instance range */
+ if ((Instance < 1) || (Instance > LogPortMax)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+ LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance);
+ Limit = LogPortIndex + 1;
+ }
+ else { /* Instance == (SK_U32)(-1), get all Instances of that OID */
+
+ LogPortIndex = 0;
+ Limit = LogPortMax;
+ }
+
+ /*
+ * Perform Action
+ */
+ if (Action == SK_PNMI_GET) {
+
+ /* Check length */
+ if (*pLen < (Limit - LogPortIndex) * 6) {
+
+ *pLen = (Limit - LogPortIndex) * 6;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /*
+ * Get value
+ */
+ for (; LogPortIndex < Limit; LogPortIndex ++) {
+
+ switch (Id) {
+
+ case OID_SKGE_PHYS_CUR_ADDR:
+ if (LogPortIndex == 0) {
+ CopyMac(pBuf + Offset, &pAC->Addr.Net[NetIndex].CurrentMacAddress);
+ }
+ else {
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
+
+ CopyMac(pBuf + Offset,
+ &pAC->Addr.Port[PhysPortIndex].CurrentMacAddress);
+ }
+ Offset += 6;
+ break;
+
+ case OID_SKGE_PHYS_FAC_ADDR:
+ if (LogPortIndex == 0) {
+ CopyMac(pBuf + Offset,
+ &pAC->Addr.Net[NetIndex].PermanentMacAddress);
+ }
+ else {
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ CopyMac(pBuf + Offset,
+ &pAC->Addr.Port[PhysPortIndex].PermanentMacAddress);
+ }
+ Offset += 6;
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR008,
+ SK_PNMI_ERR008MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+
+ *pLen = Offset;
+ }
+ else {
+ /*
+ * The logical MAC address may not be changed only
+ * the physical ones
+ */
+ if (Id == OID_SKGE_PHYS_FAC_ADDR) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /*
+ * Only the current address may be changed
+ */
+ if (Id != OID_SKGE_PHYS_CUR_ADDR) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR009,
+ SK_PNMI_ERR009MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /* Check length */
+ if (*pLen < (Limit - LogPortIndex) * 6) {
+
+ *pLen = (Limit - LogPortIndex) * 6;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ if (*pLen > (Limit - LogPortIndex) * 6) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /*
+ * Check Action
+ */
+ if (Action == SK_PNMI_PRESET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /*
+ * Set OID_SKGE_MAC_CUR_ADDR
+ */
+ for (; LogPortIndex < Limit; LogPortIndex ++, Offset += 6) {
+
+ /*
+ * A set to virtual port and set of broadcast
+ * address will be ignored
+ */
+ if (LogPortIndex == 0 || SK_MEMCMP(pBuf + Offset,
+ "\xff\xff\xff\xff\xff\xff", 6) == 0) {
+
+ continue;
+ }
+
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC,
+ LogPortIndex);
+
+ Ret = SkAddrOverride(pAC, IoC, PhysPortIndex,
+ (SK_MAC_ADDR *)(pBuf + Offset),
+ (LogPortIndex == 0 ? SK_ADDR_VIRTUAL_ADDRESS :
+ SK_ADDR_PHYSICAL_ADDRESS));
+ if (Ret != SK_ADDR_OVERRIDE_SUCCESS) {
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ *pLen = Offset;
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * CsumStat - OID handler function of OID_SKGE_CHKSM_XXX
+ *
+ * Description:
+ * Retrieves the statistic values of the CSUM module. The CSUM data
+ * structure must be available in the SK_AC even if the CSUM module
+ * is not included, because PNMI reads the statistic data from the
+ * CSUM part of SK_AC directly.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int CsumStat(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ unsigned int Index;
+ unsigned int Limit;
+ unsigned int Offset = 0;
+ SK_U64 StatVal;
+
+
+ /*
+ * Calculate instance if wished
+ */
+ if (Instance != (SK_U32)(-1)) {
+
+ if ((Instance < 1) || (Instance > SKCS_NUM_PROTOCOLS)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+ Index = (unsigned int)Instance - 1;
+ Limit = Index + 1;
+ }
+ else {
+ Index = 0;
+ Limit = SKCS_NUM_PROTOCOLS;
+ }
+
+ /*
+ * Check action
+ */
+ if (Action != SK_PNMI_GET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /* Check length */
+ if (*pLen < (Limit - Index) * sizeof(SK_U64)) {
+
+ *pLen = (Limit - Index) * sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /*
+ * Get value
+ */
+ for (; Index < Limit; Index ++) {
+
+ switch (Id) {
+
+ case OID_SKGE_CHKSM_RX_OK_CTS:
+ StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxOkCts;
+ break;
+
+ case OID_SKGE_CHKSM_RX_UNABLE_CTS:
+ StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxUnableCts;
+ break;
+
+ case OID_SKGE_CHKSM_RX_ERR_CTS:
+ StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxErrCts;
+ break;
+
+ case OID_SKGE_CHKSM_TX_OK_CTS:
+ StatVal = pAC->Csum.ProtoStats[NetIndex][Index].TxOkCts;
+ break;
+
+ case OID_SKGE_CHKSM_TX_UNABLE_CTS:
+ StatVal = pAC->Csum.ProtoStats[NetIndex][Index].TxUnableCts;
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR010,
+ SK_PNMI_ERR010MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ SK_PNMI_STORE_U64(pBuf + Offset, StatVal);
+ Offset += sizeof(SK_U64);
+ }
+
+ /*
+ * Store used buffer space
+ */
+ *pLen = Offset;
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * SensorStat - OID handler function of OID_SKGE_SENSOR_XXX
+ *
+ * Description:
+ * Retrieves the statistic values of the I2C module, which handles
+ * the temperature and voltage sensors.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int SensorStat(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ unsigned int i;
+ unsigned int Index;
+ unsigned int Limit;
+ unsigned int Offset;
+ unsigned int Len;
+ SK_U32 Val32;
+ SK_U64 Val64;
+
+
+ /*
+ * Calculate instance if wished
+ */
+ if ((Instance != (SK_U32)(-1))) {
+
+ if ((Instance < 1) || (Instance > (SK_U32)pAC->I2c.MaxSens)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ Index = (unsigned int)Instance -1;
+ Limit = (unsigned int)Instance;
+ }
+ else {
+ Index = 0;
+ Limit = (unsigned int) pAC->I2c.MaxSens;
+ }
+
+ /*
+ * Check action
+ */
+ if (Action != SK_PNMI_GET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /* Check length */
+ switch (Id) {
+
+ case OID_SKGE_SENSOR_VALUE:
+ case OID_SKGE_SENSOR_WAR_THRES_LOW:
+ case OID_SKGE_SENSOR_WAR_THRES_UPP:
+ case OID_SKGE_SENSOR_ERR_THRES_LOW:
+ case OID_SKGE_SENSOR_ERR_THRES_UPP:
+ if (*pLen < (Limit - Index) * sizeof(SK_U32)) {
+
+ *pLen = (Limit - Index) * sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_SENSOR_DESCR:
+ for (Offset = 0, i = Index; i < Limit; i ++) {
+
+ Len = (unsigned int)
+ SK_STRLEN(pAC->I2c.SenTable[i].SenDesc) + 1;
+ if (Len >= SK_PNMI_STRINGLEN2) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR011,
+ SK_PNMI_ERR011MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ Offset += Len;
+ }
+ if (*pLen < Offset) {
+
+ *pLen = Offset;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_SENSOR_INDEX:
+ case OID_SKGE_SENSOR_TYPE:
+ case OID_SKGE_SENSOR_STATUS:
+ if (*pLen < Limit - Index) {
+
+ *pLen = Limit - Index;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_SENSOR_WAR_CTS:
+ case OID_SKGE_SENSOR_WAR_TIME:
+ case OID_SKGE_SENSOR_ERR_CTS:
+ case OID_SKGE_SENSOR_ERR_TIME:
+ if (*pLen < (Limit - Index) * sizeof(SK_U64)) {
+
+ *pLen = (Limit - Index) * sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR012,
+ SK_PNMI_ERR012MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+
+ }
+
+ /*
+ * Get value
+ */
+ for (Offset = 0; Index < Limit; Index ++) {
+
+ switch (Id) {
+
+ case OID_SKGE_SENSOR_INDEX:
+ *(pBuf + Offset) = (char)Index;
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_SENSOR_DESCR:
+ Len = SK_STRLEN(pAC->I2c.SenTable[Index].SenDesc);
+ SK_MEMCPY(pBuf + Offset + 1,
+ pAC->I2c.SenTable[Index].SenDesc, Len);
+ *(pBuf + Offset) = (char)Len;
+ Offset += Len + 1;
+ break;
+
+ case OID_SKGE_SENSOR_TYPE:
+ *(pBuf + Offset) =
+ (char)pAC->I2c.SenTable[Index].SenType;
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_SENSOR_VALUE:
+ Val32 = (SK_U32)pAC->I2c.SenTable[Index].SenValue;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_SENSOR_WAR_THRES_LOW:
+ Val32 = (SK_U32)pAC->I2c.SenTable[Index].
+ SenThreWarnLow;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_SENSOR_WAR_THRES_UPP:
+ Val32 = (SK_U32)pAC->I2c.SenTable[Index].
+ SenThreWarnHigh;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_SENSOR_ERR_THRES_LOW:
+ Val32 = (SK_U32)pAC->I2c.SenTable[Index].
+ SenThreErrLow;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_SENSOR_ERR_THRES_UPP:
+ Val32 = pAC->I2c.SenTable[Index].SenThreErrHigh;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_SENSOR_STATUS:
+ *(pBuf + Offset) =
+ (char)pAC->I2c.SenTable[Index].SenErrFlag;
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_SENSOR_WAR_CTS:
+ Val64 = pAC->I2c.SenTable[Index].SenWarnCts;
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_SENSOR_ERR_CTS:
+ Val64 = pAC->I2c.SenTable[Index].SenErrCts;
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_SENSOR_WAR_TIME:
+ Val64 = SK_PNMI_HUNDREDS_SEC(pAC->I2c.SenTable[Index].
+ SenBegWarnTS);
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_SENSOR_ERR_TIME:
+ Val64 = SK_PNMI_HUNDREDS_SEC(pAC->I2c.SenTable[Index].
+ SenBegErrTS);
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ default:
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
+ ("SensorStat: Unknown OID should be handled before"));
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+
+ /*
+ * Store used buffer space
+ */
+ *pLen = Offset;
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * Vpd - OID handler function of OID_SKGE_VPD_XXX
+ *
+ * Description:
+ * Get/preset/set of VPD data. As instance the name of a VPD key
+ * can be passed. The Instance parameter is a SK_U32 and can be
+ * used as a string buffer for the VPD key, because their maximum
+ * length is 4 byte.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int Vpd(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ SK_VPD_STATUS *pVpdStatus;
+ unsigned int BufLen;
+ char Buf[256];
+ char KeyArr[SK_PNMI_VPD_ENTRIES][SK_PNMI_VPD_KEY_SIZE];
+ char KeyStr[SK_PNMI_VPD_KEY_SIZE];
+ unsigned int KeyNo;
+ unsigned int Offset;
+ unsigned int Index;
+ unsigned int FirstIndex;
+ unsigned int LastIndex;
+ unsigned int Len;
+ int Ret;
+ SK_U32 Val32;
+
+ /*
+ * Get array of all currently stored VPD keys
+ */
+ Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), &KeyNo);
+ if (Ret != SK_PNMI_ERR_OK) {
+ *pLen = 0;
+ return (Ret);
+ }
+
+ /*
+ * If instance is not -1, try to find the requested VPD key for
+ * the multiple instance variables. The other OIDs as for example
+ * OID VPD_ACTION are single instance variables and must be
+ * handled separatly.
+ */
+ FirstIndex = 0;
+ LastIndex = KeyNo;
+
+ if ((Instance != (SK_U32)(-1))) {
+
+ if (Id == OID_SKGE_VPD_KEY || Id == OID_SKGE_VPD_VALUE ||
+ Id == OID_SKGE_VPD_ACCESS) {
+
+ SK_STRNCPY(KeyStr, (char *)&Instance, 4);
+ KeyStr[4] = 0;
+
+ for (Index = 0; Index < KeyNo; Index ++) {
+
+ if (SK_STRCMP(KeyStr, KeyArr[Index]) == 0) {
+ FirstIndex = Index;
+ LastIndex = Index+1;
+ break;
+ }
+ }
+ if (Index == KeyNo) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+ }
+ else if (Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+ }
+
+ /*
+ * Get value, if a query should be performed
+ */
+ if (Action == SK_PNMI_GET) {
+
+ switch (Id) {
+
+ case OID_SKGE_VPD_FREE_BYTES:
+ /* Check length of buffer */
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ /* Get number of free bytes */
+ pVpdStatus = VpdStat(pAC, IoC);
+ if (pVpdStatus == NULL) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR017,
+ SK_PNMI_ERR017MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ if ((pVpdStatus->vpd_status & VPD_VALID) == 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR018,
+ SK_PNMI_ERR018MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ Val32 = (SK_U32)pVpdStatus->vpd_free_rw;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_VPD_ENTRIES_LIST:
+ /* Check length */
+ for (Len = 0, Index = 0; Index < KeyNo; Index ++) {
+
+ Len += SK_STRLEN(KeyArr[Index]) + 1;
+ }
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /* Get value */
+ *(pBuf) = (char)Len - 1;
+ for (Offset = 1, Index = 0; Index < KeyNo; Index ++) {
+
+ Len = SK_STRLEN(KeyArr[Index]);
+ SK_MEMCPY(pBuf + Offset, KeyArr[Index], Len);
+
+ Offset += Len;
+
+ if (Index < KeyNo - 1) {
+
+ *(pBuf + Offset) = ' ';
+ Offset ++;
+ }
+ }
+ *pLen = Offset;
+ break;
+
+ case OID_SKGE_VPD_ENTRIES_NUMBER:
+ /* Check length */
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ Val32 = (SK_U32)KeyNo;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_VPD_KEY:
+ /* Check buffer length, if it is large enough */
+ for (Len = 0, Index = FirstIndex;
+ Index < LastIndex; Index ++) {
+
+ Len += SK_STRLEN(KeyArr[Index]) + 1;
+ }
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /*
+ * Get the key to an intermediate buffer, because
+ * we have to prepend a length byte.
+ */
+ for (Offset = 0, Index = FirstIndex;
+ Index < LastIndex; Index ++) {
+
+ Len = SK_STRLEN(KeyArr[Index]);
+
+ *(pBuf + Offset) = (char)Len;
+ SK_MEMCPY(pBuf + Offset + 1, KeyArr[Index],
+ Len);
+ Offset += Len + 1;
+ }
+ *pLen = Offset;
+ break;
+
+ case OID_SKGE_VPD_VALUE:
+ /* Check the buffer length if it is large enough */
+ for (Offset = 0, Index = FirstIndex;
+ Index < LastIndex; Index ++) {
+
+ BufLen = 256;
+ if (VpdRead(pAC, IoC, KeyArr[Index], Buf,
+ (int *)&BufLen) > 0 ||
+ BufLen >= SK_PNMI_VPD_DATALEN) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR021,
+ SK_PNMI_ERR021MSG);
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ Offset += BufLen + 1;
+ }
+ if (*pLen < Offset) {
+
+ *pLen = Offset;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /*
+ * Get the value to an intermediate buffer, because
+ * we have to prepend a length byte.
+ */
+ for (Offset = 0, Index = FirstIndex;
+ Index < LastIndex; Index ++) {
+
+ BufLen = 256;
+ if (VpdRead(pAC, IoC, KeyArr[Index], Buf,
+ (int *)&BufLen) > 0 ||
+ BufLen >= SK_PNMI_VPD_DATALEN) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR022,
+ SK_PNMI_ERR022MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ *(pBuf + Offset) = (char)BufLen;
+ SK_MEMCPY(pBuf + Offset + 1, Buf, BufLen);
+ Offset += BufLen + 1;
+ }
+ *pLen = Offset;
+ break;
+
+ case OID_SKGE_VPD_ACCESS:
+ if (*pLen < LastIndex - FirstIndex) {
+
+ *pLen = LastIndex - FirstIndex;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ for (Offset = 0, Index = FirstIndex;
+ Index < LastIndex; Index ++) {
+
+ if (VpdMayWrite(KeyArr[Index])) {
+
+ *(pBuf + Offset) = SK_PNMI_VPD_RW;
+ }
+ else {
+ *(pBuf + Offset) = SK_PNMI_VPD_RO;
+ }
+ Offset ++;
+ }
+ *pLen = Offset;
+ break;
+
+ case OID_SKGE_VPD_ACTION:
+ Offset = LastIndex - FirstIndex;
+ if (*pLen < Offset) {
+
+ *pLen = Offset;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ SK_MEMSET(pBuf, 0, Offset);
+ *pLen = Offset;
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR023,
+ SK_PNMI_ERR023MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ else {
+ /* The only OID which can be set is VPD_ACTION */
+ if (Id != OID_SKGE_VPD_ACTION) {
+
+ if (Id == OID_SKGE_VPD_FREE_BYTES ||
+ Id == OID_SKGE_VPD_ENTRIES_LIST ||
+ Id == OID_SKGE_VPD_ENTRIES_NUMBER ||
+ Id == OID_SKGE_VPD_KEY ||
+ Id == OID_SKGE_VPD_VALUE ||
+ Id == OID_SKGE_VPD_ACCESS) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR024,
+ SK_PNMI_ERR024MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /*
+ * From this point we handle VPD_ACTION. Check the buffer
+ * length. It should at least have the size of one byte.
+ */
+ if (*pLen < 1) {
+
+ *pLen = 1;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ /*
+ * The first byte contains the VPD action type we should
+ * perform.
+ */
+ switch (*pBuf) {
+
+ case SK_PNMI_VPD_IGNORE:
+ /* Nothing to do */
+ break;
+
+ case SK_PNMI_VPD_CREATE:
+ /*
+ * We have to create a new VPD entry or we modify
+ * an existing one. Check first the buffer length.
+ */
+ if (*pLen < 4) {
+
+ *pLen = 4;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ KeyStr[0] = pBuf[1];
+ KeyStr[1] = pBuf[2];
+ KeyStr[2] = 0;
+
+ /*
+ * Is the entry writable or does it belong to the
+ * read-only area?
+ */
+ if (!VpdMayWrite(KeyStr)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ Offset = (int)pBuf[3] & 0xFF;
+
+ SK_MEMCPY(Buf, pBuf + 4, Offset);
+ Buf[Offset] = 0;
+
+ /* A preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /* Write the new entry or modify an existing one */
+ Ret = VpdWrite(pAC, IoC, KeyStr, Buf);
+ if (Ret == SK_PNMI_VPD_NOWRITE ) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ else if (Ret != SK_PNMI_VPD_OK) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR025,
+ SK_PNMI_ERR025MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /*
+ * Perform an update of the VPD data. This is
+ * not mandantory, but just to be sure.
+ */
+ Ret = VpdUpdate(pAC, IoC);
+ if (Ret != SK_PNMI_VPD_OK) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR026,
+ SK_PNMI_ERR026MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ break;
+
+ case SK_PNMI_VPD_DELETE:
+ /* Check if the buffer size is plausible */
+ if (*pLen < 3) {
+
+ *pLen = 3;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ if (*pLen > 3) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ KeyStr[0] = pBuf[1];
+ KeyStr[1] = pBuf[2];
+ KeyStr[2] = 0;
+
+ /* Find the passed key in the array */
+ for (Index = 0; Index < KeyNo; Index ++) {
+
+ if (SK_STRCMP(KeyStr, KeyArr[Index]) == 0) {
+
+ break;
+ }
+ }
+ /*
+ * If we cannot find the key it is wrong, so we
+ * return an appropriate error value.
+ */
+ if (Index == KeyNo) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /* Ok, you wanted it and you will get it */
+ Ret = VpdDelete(pAC, IoC, KeyStr);
+ if (Ret != SK_PNMI_VPD_OK) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR027,
+ SK_PNMI_ERR027MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /*
+ * Perform an update of the VPD data. This is
+ * not mandantory, but just to be sure.
+ */
+ Ret = VpdUpdate(pAC, IoC);
+ if (Ret != SK_PNMI_VPD_OK) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR028,
+ SK_PNMI_ERR028MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ break;
+
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * General - OID handler function of various single instance OIDs
+ *
+ * Description:
+ * The code is simple. No description necessary.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int General(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ int Ret;
+ unsigned int Index;
+ unsigned int Len;
+ unsigned int Offset;
+ unsigned int Val;
+ SK_U8 Val8;
+ SK_U16 Val16;
+ SK_U32 Val32;
+ SK_U64 Val64;
+ SK_U64 Val64RxHwErrs = 0;
+ SK_U64 Val64TxHwErrs = 0;
+ SK_BOOL Is64BitReq = SK_FALSE;
+ char Buf[256];
+ int MacType;
+
+ /*
+ * Check instance. We only handle single instance variables.
+ */
+ if (Instance != (SK_U32)(-1) && Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ /*
+ * Check action. We only allow get requests.
+ */
+ if (Action != SK_PNMI_GET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ MacType = pAC->GIni.GIMacType;
+
+ /*
+ * Check length for the various supported OIDs
+ */
+ switch (Id) {
+
+ case OID_GEN_XMIT_ERROR:
+ case OID_GEN_RCV_ERROR:
+ case OID_GEN_RCV_NO_BUFFER:
+#ifndef SK_NDIS_64BIT_CTR
+ if (*pLen < sizeof(SK_U32)) {
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+#else /* SK_NDIS_64BIT_CTR */
+
+ /*
+ * for compatibility, at least 32bit are required for oid
+ */
+ if (*pLen < sizeof(SK_U32)) {
+ /*
+ * but indicate handling for 64bit values,
+ * if insufficient space is provided
+ */
+ *pLen = sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+
+ Is64BitReq = (*pLen < sizeof(SK_U64)) ? SK_FALSE : SK_TRUE;
+#endif /* SK_NDIS_64BIT_CTR */
+ break;
+
+ case OID_SKGE_PORT_NUMBER:
+ case OID_SKGE_DEVICE_TYPE:
+ case OID_SKGE_RESULT:
+ case OID_SKGE_RLMT_MONITOR_NUMBER:
+ case OID_GEN_TRANSMIT_QUEUE_LENGTH:
+ case OID_SKGE_TRAP_NUMBER:
+ case OID_SKGE_MDB_VERSION:
+ case OID_SKGE_BOARDLEVEL:
+ case OID_SKGE_CHIPID:
+ case OID_SKGE_RAMSIZE:
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_CHIPSET:
+ if (*pLen < sizeof(SK_U16)) {
+
+ *pLen = sizeof(SK_U16);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_BUS_TYPE:
+ case OID_SKGE_BUS_SPEED:
+ case OID_SKGE_BUS_WIDTH:
+ case OID_SKGE_SENSOR_NUMBER:
+ case OID_SKGE_CHKSM_NUMBER:
+ case OID_SKGE_VAUXAVAIL:
+ if (*pLen < sizeof(SK_U8)) {
+
+ *pLen = sizeof(SK_U8);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_TX_SW_QUEUE_LEN:
+ case OID_SKGE_TX_SW_QUEUE_MAX:
+ case OID_SKGE_TX_RETRY:
+ case OID_SKGE_RX_INTR_CTS:
+ case OID_SKGE_TX_INTR_CTS:
+ case OID_SKGE_RX_NO_BUF_CTS:
+ case OID_SKGE_TX_NO_BUF_CTS:
+ case OID_SKGE_TX_USED_DESCR_NO:
+ case OID_SKGE_RX_DELIVERED_CTS:
+ case OID_SKGE_RX_OCTETS_DELIV_CTS:
+ case OID_SKGE_RX_HW_ERROR_CTS:
+ case OID_SKGE_TX_HW_ERROR_CTS:
+ case OID_SKGE_IN_ERRORS_CTS:
+ case OID_SKGE_OUT_ERROR_CTS:
+ case OID_SKGE_ERR_RECOVERY_CTS:
+ case OID_SKGE_SYSUPTIME:
+ if (*pLen < sizeof(SK_U64)) {
+
+ *pLen = sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ /* Checked later */
+ break;
+ }
+
+ /* Update statistic */
+ if (Id == OID_SKGE_RX_HW_ERROR_CTS ||
+ Id == OID_SKGE_TX_HW_ERROR_CTS ||
+ Id == OID_SKGE_IN_ERRORS_CTS ||
+ Id == OID_SKGE_OUT_ERROR_CTS ||
+ Id == OID_GEN_XMIT_ERROR ||
+ Id == OID_GEN_RCV_ERROR) {
+
+ /* Force the XMAC to update its statistic counters and
+ * Increment semaphore to indicate that an update was
+ * already done.
+ */
+ Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
+ if (Ret != SK_PNMI_ERR_OK) {
+
+ *pLen = 0;
+ return (Ret);
+ }
+ pAC->Pnmi.MacUpdatedFlag ++;
+
+ /*
+ * Some OIDs consist of multiple hardware counters. Those
+ * values which are contained in all of them will be added
+ * now.
+ */
+ switch (Id) {
+
+ case OID_SKGE_RX_HW_ERROR_CTS:
+ case OID_SKGE_IN_ERRORS_CTS:
+ case OID_GEN_RCV_ERROR:
+ Val64RxHwErrs =
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_MISSED, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FRAMING, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_OVERFLOW, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_JABBER, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CARRIER, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_IRLENGTH, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SYMBOL, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SHORTS, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_RUNT, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_TOO_LONG, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FCS, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CEXT, NetIndex);
+ break;
+
+ case OID_SKGE_TX_HW_ERROR_CTS:
+ case OID_SKGE_OUT_ERROR_CTS:
+ case OID_GEN_XMIT_ERROR:
+ Val64TxHwErrs =
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_EXCESS_COL, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_LATE_COL, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_UNDERRUN, NetIndex) +
+ GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_CARRIER, NetIndex);
+ break;
+ }
+ }
+
+ /*
+ * Retrieve value
+ */
+ switch (Id) {
+
+ case OID_SKGE_SUPPORTED_LIST:
+ Len = ID_TABLE_SIZE * sizeof(SK_U32);
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ for (Offset = 0, Index = 0; Offset < Len;
+ Offset += sizeof(SK_U32), Index ++) {
+
+ Val32 = (SK_U32)IdTable[Index].Id;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ }
+ *pLen = Len;
+ break;
+
+ case OID_SKGE_BOARDLEVEL:
+ Val32 = (SK_U32)pAC->GIni.GILevel;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_PORT_NUMBER:
+ Val32 = (SK_U32)pAC->GIni.GIMacsFound;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_DEVICE_TYPE:
+ Val32 = (SK_U32)pAC->Pnmi.DeviceType;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_DRIVER_DESCR:
+ if (pAC->Pnmi.pDriverDescription == NULL) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR007,
+ SK_PNMI_ERR007MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ Len = SK_STRLEN(pAC->Pnmi.pDriverDescription) + 1;
+ if (Len > SK_PNMI_STRINGLEN1) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR029,
+ SK_PNMI_ERR029MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ *pBuf = (char)(Len - 1);
+ SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverDescription, Len - 1);
+ *pLen = Len;
+ break;
+
+ case OID_SKGE_DRIVER_VERSION:
+ if (pAC->Pnmi.pDriverVersion == NULL) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030,
+ SK_PNMI_ERR030MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ Len = SK_STRLEN(pAC->Pnmi.pDriverVersion) + 1;
+ if (Len > SK_PNMI_STRINGLEN1) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031,
+ SK_PNMI_ERR031MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ *pBuf = (char)(Len - 1);
+ SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverVersion, Len - 1);
+ *pLen = Len;
+ break;
+
+ case OID_SKGE_DRIVER_RELDATE:
+ if (pAC->Pnmi.pDriverReleaseDate == NULL) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030,
+ SK_PNMI_ERR053MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ Len = SK_STRLEN(pAC->Pnmi.pDriverReleaseDate) + 1;
+ if (Len > SK_PNMI_STRINGLEN1) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031,
+ SK_PNMI_ERR054MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ *pBuf = (char)(Len - 1);
+ SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverReleaseDate, Len - 1);
+ *pLen = Len;
+ break;
+
+ case OID_SKGE_DRIVER_FILENAME:
+ if (pAC->Pnmi.pDriverFileName == NULL) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030,
+ SK_PNMI_ERR055MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ Len = SK_STRLEN(pAC->Pnmi.pDriverFileName) + 1;
+ if (Len > SK_PNMI_STRINGLEN1) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031,
+ SK_PNMI_ERR056MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ *pBuf = (char)(Len - 1);
+ SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverFileName, Len - 1);
+ *pLen = Len;
+ break;
+
+ case OID_SKGE_HW_DESCR:
+ /*
+ * The hardware description is located in the VPD. This
+ * query may move to the initialisation routine. But
+ * the VPD data is cached and therefore a call here
+ * will not make much difference.
+ */
+ Len = 256;
+ if (VpdRead(pAC, IoC, VPD_NAME, Buf, (int *)&Len) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR032,
+ SK_PNMI_ERR032MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ Len ++;
+ if (Len > SK_PNMI_STRINGLEN1) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR033,
+ SK_PNMI_ERR033MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ *pBuf = (char)(Len - 1);
+ SK_MEMCPY(pBuf + 1, Buf, Len - 1);
+ *pLen = Len;
+ break;
+
+ case OID_SKGE_HW_VERSION:
+ /* Oh, I love to do some string manipulation */
+ if (*pLen < 5) {
+
+ *pLen = 5;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ Val8 = (SK_U8)pAC->GIni.GIPciHwRev;
+ pBuf[0] = 4;
+ pBuf[1] = 'v';
+ pBuf[2] = (char)(0x30 | ((Val8 >> 4) & 0x0F));
+ pBuf[3] = '.';
+ pBuf[4] = (char)(0x30 | (Val8 & 0x0F));
+ *pLen = 5;
+ break;
+
+ case OID_SKGE_CHIPSET:
+ Val16 = pAC->Pnmi.Chipset;
+ SK_PNMI_STORE_U16(pBuf, Val16);
+ *pLen = sizeof(SK_U16);
+ break;
+
+ case OID_SKGE_CHIPID:
+ Val32 = pAC->GIni.GIChipId;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_RAMSIZE:
+ Val32 = pAC->GIni.GIRamSize;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_VAUXAVAIL:
+ *pBuf = (char) pAC->GIni.GIVauxAvail;
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_BUS_TYPE:
+ *pBuf = (char) SK_PNMI_BUS_PCI;
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_BUS_SPEED:
+ *pBuf = pAC->Pnmi.PciBusSpeed;
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_BUS_WIDTH:
+ *pBuf = pAC->Pnmi.PciBusWidth;
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_RESULT:
+ Val32 = pAC->Pnmi.TestResult;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_SENSOR_NUMBER:
+ *pBuf = (char)pAC->I2c.MaxSens;
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_CHKSM_NUMBER:
+ *pBuf = SKCS_NUM_PROTOCOLS;
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_TRAP_NUMBER:
+ GetTrapQueueLen(pAC, &Len, &Val);
+ Val32 = (SK_U32)Val;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_TRAP:
+ GetTrapQueueLen(pAC, &Len, &Val);
+ if (*pLen < Len) {
+
+ *pLen = Len;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ CopyTrapQueue(pAC, pBuf);
+ *pLen = Len;
+ break;
+
+ case OID_SKGE_RLMT_MONITOR_NUMBER:
+/* XXX Not yet implemented by RLMT therefore we return zero elements */
+ Val32 = 0;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_TX_SW_QUEUE_LEN:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueLen;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].TxSwQueueLen +
+ pAC->Pnmi.BufPort[1].TxSwQueueLen;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueLen;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].TxSwQueueLen +
+ pAC->Pnmi.Port[1].TxSwQueueLen;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+
+ case OID_SKGE_TX_SW_QUEUE_MAX:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueMax;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].TxSwQueueMax +
+ pAC->Pnmi.BufPort[1].TxSwQueueMax;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueMax;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].TxSwQueueMax +
+ pAC->Pnmi.Port[1].TxSwQueueMax;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_TX_RETRY:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].TxRetryCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].TxRetryCts +
+ pAC->Pnmi.BufPort[1].TxRetryCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].TxRetryCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].TxRetryCts +
+ pAC->Pnmi.Port[1].TxRetryCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RX_INTR_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].RxIntrCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].RxIntrCts +
+ pAC->Pnmi.BufPort[1].RxIntrCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].RxIntrCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].RxIntrCts +
+ pAC->Pnmi.Port[1].RxIntrCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_TX_INTR_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].TxIntrCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].TxIntrCts +
+ pAC->Pnmi.BufPort[1].TxIntrCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].TxIntrCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].TxIntrCts +
+ pAC->Pnmi.Port[1].TxIntrCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RX_NO_BUF_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].RxNoBufCts +
+ pAC->Pnmi.BufPort[1].RxNoBufCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].RxNoBufCts +
+ pAC->Pnmi.Port[1].RxNoBufCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_TX_NO_BUF_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].TxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].TxNoBufCts +
+ pAC->Pnmi.BufPort[1].TxNoBufCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].TxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].TxNoBufCts +
+ pAC->Pnmi.Port[1].TxNoBufCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_TX_USED_DESCR_NO:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].TxUsedDescrNo;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].TxUsedDescrNo +
+ pAC->Pnmi.BufPort[1].TxUsedDescrNo;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].TxUsedDescrNo;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].TxUsedDescrNo +
+ pAC->Pnmi.Port[1].TxUsedDescrNo;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RX_DELIVERED_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].RxDeliveredCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].RxDeliveredCts +
+ pAC->Pnmi.BufPort[1].RxDeliveredCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].RxDeliveredCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].RxDeliveredCts +
+ pAC->Pnmi.Port[1].RxDeliveredCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RX_OCTETS_DELIV_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].RxOctetsDeliveredCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].RxOctetsDeliveredCts +
+ pAC->Pnmi.BufPort[1].RxOctetsDeliveredCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].RxOctetsDeliveredCts +
+ pAC->Pnmi.Port[1].RxOctetsDeliveredCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RX_HW_ERROR_CTS:
+ SK_PNMI_STORE_U64(pBuf, Val64RxHwErrs);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_TX_HW_ERROR_CTS:
+ SK_PNMI_STORE_U64(pBuf, Val64TxHwErrs);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_IN_ERRORS_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = Val64RxHwErrs +
+ pAC->Pnmi.BufPort[0].RxNoBufCts +
+ pAC->Pnmi.BufPort[1].RxNoBufCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = Val64RxHwErrs +
+ pAC->Pnmi.Port[0].RxNoBufCts +
+ pAC->Pnmi.Port[1].RxNoBufCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_OUT_ERROR_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = Val64TxHwErrs +
+ pAC->Pnmi.BufPort[0].TxNoBufCts +
+ pAC->Pnmi.BufPort[1].TxNoBufCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = Val64TxHwErrs +
+ pAC->Pnmi.Port[0].TxNoBufCts +
+ pAC->Pnmi.Port[1].TxNoBufCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_ERR_RECOVERY_CTS:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].ErrRecoveryCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.BufPort[0].ErrRecoveryCts +
+ pAC->Pnmi.BufPort[1].ErrRecoveryCts;
+ }
+ }
+ else {
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ Val64 = pAC->Pnmi.Port[NetIndex].ErrRecoveryCts;
+ }
+ /* Single net mode */
+ else {
+ Val64 = pAC->Pnmi.Port[0].ErrRecoveryCts +
+ pAC->Pnmi.Port[1].ErrRecoveryCts;
+ }
+ }
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_SYSUPTIME:
+ Val64 = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
+ Val64 -= pAC->Pnmi.StartUpTime;
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_MDB_VERSION:
+ Val32 = SK_PNMI_MDB_VERSION;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_GEN_RCV_ERROR:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
+ }
+ else {
+ Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts;
+ }
+
+ /*
+ * by default 32bit values are evaluated
+ */
+ if (!Is64BitReq) {
+ Val32 = (SK_U32)Val64;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ }
+ else {
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ }
+ break;
+
+ case OID_GEN_XMIT_ERROR:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts;
+ }
+ else {
+ Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts;
+ }
+
+ /*
+ * by default 32bit values are evaluated
+ */
+ if (!Is64BitReq) {
+ Val32 = (SK_U32)Val64;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ }
+ else {
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ }
+ break;
+
+ case OID_GEN_RCV_NO_BUFFER:
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
+ }
+ else {
+ Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts;
+ }
+
+ /*
+ * by default 32bit values are evaluated
+ */
+ if (!Is64BitReq) {
+ Val32 = (SK_U32)Val64;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ }
+ else {
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ }
+ break;
+
+ case OID_GEN_TRANSMIT_QUEUE_LENGTH:
+ Val32 = (SK_U32)pAC->Pnmi.Port[NetIndex].TxSwQueueLen;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR034,
+ SK_PNMI_ERR034MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ if (Id == OID_SKGE_RX_HW_ERROR_CTS ||
+ Id == OID_SKGE_TX_HW_ERROR_CTS ||
+ Id == OID_SKGE_IN_ERRORS_CTS ||
+ Id == OID_SKGE_OUT_ERROR_CTS ||
+ Id == OID_GEN_XMIT_ERROR ||
+ Id == OID_GEN_RCV_ERROR) {
+
+ pAC->Pnmi.MacUpdatedFlag --;
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * Rlmt - OID handler function of OID_SKGE_RLMT_XXX single instance.
+ *
+ * Description:
+ * Get/Presets/Sets the RLMT OIDs.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int Rlmt(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ int Ret;
+ unsigned int PhysPortIndex;
+ unsigned int PhysPortMax;
+ SK_EVPARA EventParam;
+ SK_U32 Val32;
+ SK_U64 Val64;
+
+
+ /*
+ * Check instance. Only single instance OIDs are allowed here.
+ */
+ if (Instance != (SK_U32)(-1) && Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ /*
+ * Perform the requested action.
+ */
+ if (Action == SK_PNMI_GET) {
+
+ /*
+ * Check if the buffer length is large enough.
+ */
+
+ switch (Id) {
+
+ case OID_SKGE_RLMT_MODE:
+ case OID_SKGE_RLMT_PORT_ACTIVE:
+ case OID_SKGE_RLMT_PORT_PREFERRED:
+ if (*pLen < sizeof(SK_U8)) {
+
+ *pLen = sizeof(SK_U8);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_RLMT_PORT_NUMBER:
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_RLMT_CHANGE_CTS:
+ case OID_SKGE_RLMT_CHANGE_TIME:
+ case OID_SKGE_RLMT_CHANGE_ESTIM:
+ case OID_SKGE_RLMT_CHANGE_THRES:
+ if (*pLen < sizeof(SK_U64)) {
+
+ *pLen = sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR035,
+ SK_PNMI_ERR035MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /*
+ * Update RLMT statistic and increment semaphores to indicate
+ * that an update was already done. Maybe RLMT will hold its
+ * statistic always up to date some time. Then we can
+ * remove this type of call.
+ */
+ if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
+
+ *pLen = 0;
+ return (Ret);
+ }
+ pAC->Pnmi.RlmtUpdatedFlag ++;
+
+ /*
+ * Retrieve Value
+ */
+ switch (Id) {
+
+ case OID_SKGE_RLMT_MODE:
+ *pBuf = (char)pAC->Rlmt.Net[0].RlmtMode;
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_RLMT_PORT_NUMBER:
+ Val32 = (SK_U32)pAC->GIni.GIMacsFound;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ *pLen = sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_RLMT_PORT_ACTIVE:
+ *pBuf = 0;
+ /*
+ * If multiple ports may become active this OID
+ * doesn't make sense any more. A new variable in
+ * the port structure should be created. However,
+ * for this variable the first active port is
+ * returned.
+ */
+ PhysPortMax = pAC->GIni.GIMacsFound;
+
+ for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax;
+ PhysPortIndex ++) {
+
+ if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
+
+ *pBuf = (char)SK_PNMI_PORT_PHYS2LOG(PhysPortIndex);
+ break;
+ }
+ }
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_RLMT_PORT_PREFERRED:
+ *pBuf = (char)SK_PNMI_PORT_PHYS2LOG(pAC->Rlmt.Net[NetIndex].Preference);
+ *pLen = sizeof(char);
+ break;
+
+ case OID_SKGE_RLMT_CHANGE_CTS:
+ Val64 = pAC->Pnmi.RlmtChangeCts;
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RLMT_CHANGE_TIME:
+ Val64 = pAC->Pnmi.RlmtChangeTime;
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RLMT_CHANGE_ESTIM:
+ Val64 = pAC->Pnmi.RlmtChangeEstimate.Estimate;
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RLMT_CHANGE_THRES:
+ Val64 = pAC->Pnmi.RlmtChangeThreshold;
+ SK_PNMI_STORE_U64(pBuf, Val64);
+ *pLen = sizeof(SK_U64);
+ break;
+
+ default:
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
+ ("Rlmt: Unknown OID should be handled before"));
+
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ }
+ else {
+ /* Perform a preset or set */
+ switch (Id) {
+
+ case OID_SKGE_RLMT_MODE:
+ /* Check if the buffer length is plausible */
+ if (*pLen < sizeof(char)) {
+
+ *pLen = sizeof(char);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ /* Check if the value range is correct */
+ if (*pLen != sizeof(char) ||
+ (*pBuf & SK_PNMI_RLMT_MODE_CHK_LINK) == 0 ||
+ *(SK_U8 *)pBuf > 15) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_OK);
+ }
+ /* Send an event to RLMT to change the mode */
+ SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
+ EventParam.Para32[0] |= (SK_U32)(*pBuf);
+ EventParam.Para32[1] = 0;
+ if (SkRlmtEvent(pAC, IoC, SK_RLMT_MODE_CHANGE,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR037,
+ SK_PNMI_ERR037MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ break;
+
+ case OID_SKGE_RLMT_PORT_PREFERRED:
+ /* Check if the buffer length is plausible */
+ if (*pLen < sizeof(char)) {
+
+ *pLen = sizeof(char);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ /* Check if the value range is correct */
+ if (*pLen != sizeof(char) || *(SK_U8 *)pBuf >
+ (SK_U8)pAC->GIni.GIMacsFound) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /*
+ * Send an event to RLMT change the preferred port.
+ * A param of -1 means automatic mode. RLMT will
+ * make the decision which is the preferred port.
+ */
+ SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
+ EventParam.Para32[0] = (SK_U32)(*pBuf) - 1;
+ EventParam.Para32[1] = NetIndex;
+ if (SkRlmtEvent(pAC, IoC, SK_RLMT_PREFPORT_CHANGE,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR038,
+ SK_PNMI_ERR038MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ break;
+
+ case OID_SKGE_RLMT_CHANGE_THRES:
+ /* Check if the buffer length is plausible */
+ if (*pLen < sizeof(SK_U64)) {
+
+ *pLen = sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ /*
+ * There are not many restrictions to the
+ * value range.
+ */
+ if (*pLen != sizeof(SK_U64)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ /* A preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_OK);
+ }
+ /*
+ * Store the new threshold, which will be taken
+ * on the next timer event.
+ */
+ SK_PNMI_READ_U64(pBuf, Val64);
+ pAC->Pnmi.RlmtChangeThreshold = Val64;
+ break;
+
+ default:
+ /* The other OIDs are not be able for set */
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * RlmtStat - OID handler function of OID_SKGE_RLMT_XXX multiple instance.
+ *
+ * Description:
+ * Performs get requests on multiple instance variables.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int RlmtStat(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ unsigned int PhysPortMax;
+ unsigned int PhysPortIndex;
+ unsigned int Limit;
+ unsigned int Offset;
+ int Ret;
+ SK_U32 Val32;
+ SK_U64 Val64;
+
+ /*
+ * Calculate the port indexes from the instance.
+ */
+ PhysPortMax = pAC->GIni.GIMacsFound;
+
+ if ((Instance != (SK_U32)(-1))) {
+ /* Check instance range */
+ if ((Instance < 1) || (Instance > PhysPortMax)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ /* Single net mode */
+ PhysPortIndex = Instance - 1;
+
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ PhysPortIndex = NetIndex;
+ }
+
+ /* Both net modes */
+ Limit = PhysPortIndex + 1;
+ }
+ else {
+ /* Single net mode */
+ PhysPortIndex = 0;
+ Limit = PhysPortMax;
+
+ /* Dual net mode */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ PhysPortIndex = NetIndex;
+ Limit = PhysPortIndex + 1;
+ }
+ }
+
+ /*
+ * Currently only get requests are allowed.
+ */
+ if (Action != SK_PNMI_GET) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /*
+ * Check if the buffer length is large enough.
+ */
+ switch (Id) {
+
+ case OID_SKGE_RLMT_PORT_INDEX:
+ case OID_SKGE_RLMT_STATUS:
+ if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) {
+
+ *pLen = (Limit - PhysPortIndex) * sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_RLMT_TX_HELLO_CTS:
+ case OID_SKGE_RLMT_RX_HELLO_CTS:
+ case OID_SKGE_RLMT_TX_SP_REQ_CTS:
+ case OID_SKGE_RLMT_RX_SP_CTS:
+ if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U64)) {
+
+ *pLen = (Limit - PhysPortIndex) * sizeof(SK_U64);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR039,
+ SK_PNMI_ERR039MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+
+ }
+
+ /*
+ * Update statistic and increment semaphores to indicate that
+ * an update was already done.
+ */
+ if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
+
+ *pLen = 0;
+ return (Ret);
+ }
+ pAC->Pnmi.RlmtUpdatedFlag ++;
+
+ /*
+ * Get value
+ */
+ Offset = 0;
+ for (; PhysPortIndex < Limit; PhysPortIndex ++) {
+
+ switch (Id) {
+
+ case OID_SKGE_RLMT_PORT_INDEX:
+ Val32 = PhysPortIndex;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_RLMT_STATUS:
+ if (pAC->Rlmt.Port[PhysPortIndex].PortState ==
+ SK_RLMT_PS_INIT ||
+ pAC->Rlmt.Port[PhysPortIndex].PortState ==
+ SK_RLMT_PS_DOWN) {
+
+ Val32 = SK_PNMI_RLMT_STATUS_ERROR;
+ }
+ else if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
+
+ Val32 = SK_PNMI_RLMT_STATUS_ACTIVE;
+ }
+ else {
+ Val32 = SK_PNMI_RLMT_STATUS_STANDBY;
+ }
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ case OID_SKGE_RLMT_TX_HELLO_CTS:
+ Val64 = pAC->Rlmt.Port[PhysPortIndex].TxHelloCts;
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RLMT_RX_HELLO_CTS:
+ Val64 = pAC->Rlmt.Port[PhysPortIndex].RxHelloCts;
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RLMT_TX_SP_REQ_CTS:
+ Val64 = pAC->Rlmt.Port[PhysPortIndex].TxSpHelloReqCts;
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ case OID_SKGE_RLMT_RX_SP_CTS:
+ Val64 = pAC->Rlmt.Port[PhysPortIndex].RxSpHelloCts;
+ SK_PNMI_STORE_U64(pBuf + Offset, Val64);
+ Offset += sizeof(SK_U64);
+ break;
+
+ default:
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
+ ("RlmtStat: Unknown OID should be errored before"));
+
+ pAC->Pnmi.RlmtUpdatedFlag --;
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ *pLen = Offset;
+
+ pAC->Pnmi.RlmtUpdatedFlag --;
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * MacPrivateConf - OID handler function of OIDs concerning the configuration
+ *
+ * Description:
+ * Get/Presets/Sets the OIDs concerning the configuration.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int MacPrivateConf(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ unsigned int PhysPortMax;
+ unsigned int PhysPortIndex;
+ unsigned int LogPortMax;
+ unsigned int LogPortIndex;
+ unsigned int Limit;
+ unsigned int Offset;
+ char Val8;
+ char *pBufPtr;
+ int Ret;
+ SK_EVPARA EventParam;
+ SK_U32 Val32;
+
+ /*
+ * Calculate instance if wished. MAC index 0 is the virtual MAC.
+ */
+ PhysPortMax = pAC->GIni.GIMacsFound;
+ LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
+
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
+ LogPortMax--;
+ }
+
+ if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */
+ /* Check instance range */
+ if ((Instance < 1) || (Instance > LogPortMax)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+ LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance);
+ Limit = LogPortIndex + 1;
+ }
+
+ else { /* Instance == (SK_U32)(-1), get all Instances of that OID */
+
+ LogPortIndex = 0;
+ Limit = LogPortMax;
+ }
+
+ /*
+ * Perform action
+ */
+ if (Action == SK_PNMI_GET) {
+
+ /* Check length */
+ switch (Id) {
+
+ case OID_SKGE_PMD:
+ case OID_SKGE_CONNECTOR:
+ case OID_SKGE_LINK_CAP:
+ case OID_SKGE_LINK_MODE:
+ case OID_SKGE_LINK_MODE_STATUS:
+ case OID_SKGE_LINK_STATUS:
+ case OID_SKGE_FLOWCTRL_CAP:
+ case OID_SKGE_FLOWCTRL_MODE:
+ case OID_SKGE_FLOWCTRL_STATUS:
+ case OID_SKGE_PHY_OPERATION_CAP:
+ case OID_SKGE_PHY_OPERATION_MODE:
+ case OID_SKGE_PHY_OPERATION_STATUS:
+ case OID_SKGE_SPEED_CAP:
+ case OID_SKGE_SPEED_MODE:
+ case OID_SKGE_SPEED_STATUS:
+#ifdef SK_PHY_LP_MODE
+ case OID_SKGE_PHY_LP_MODE:
+#endif
+ if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) {
+
+ *pLen = (Limit - LogPortIndex) * sizeof(SK_U8);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_MTU:
+ case OID_SKGE_PHY_TYPE:
+ if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U32)) {
+
+ *pLen = (Limit - LogPortIndex) * sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR041,
+ SK_PNMI_ERR041MSG);
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /*
+ * Update statistic and increment semaphore to indicate
+ * that an update was already done.
+ */
+ if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) {
+
+ *pLen = 0;
+ return (Ret);
+ }
+ pAC->Pnmi.SirqUpdatedFlag ++;
+
+ /*
+ * Get value
+ */
+ Offset = 0;
+ for (; LogPortIndex < Limit; LogPortIndex ++) {
+
+ pBufPtr = pBuf + Offset;
+
+ switch (Id) {
+
+ case OID_SKGE_PMD:
+ *pBufPtr = pAC->Pnmi.PMD;
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_CONNECTOR:
+ *pBufPtr = pAC->Pnmi.Connector;
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_PHY_TYPE:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ continue;
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+ Val32 = pAC->GIni.GP[PhysPortIndex].PhyType;
+ SK_PNMI_STORE_U32(pBufPtr, Val32);
+ }
+ }
+ else { /* DualNetMode */
+
+ Val32 = pAC->GIni.GP[NetIndex].PhyType;
+ SK_PNMI_STORE_U32(pBufPtr, Val32);
+ }
+ Offset += sizeof(SK_U32);
+ break;
+
+#ifdef SK_PHY_LP_MODE
+ case OID_SKGE_PHY_LP_MODE:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ continue;
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
+ Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
+ *pBufPtr = Val8;
+ }
+ }
+ else { /* DualNetMode */
+
+ Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
+ *pBufPtr = Val8;
+ }
+ Offset += sizeof(SK_U8);
+ break;
+#endif
+
+ case OID_SKGE_LINK_CAP:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkCap;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PLinkCap;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_LINK_MODE:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkModeConf;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PLinkModeConf;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_LINK_MODE_STATUS:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr =
+ CalculateLinkModeStatus(pAC, IoC, PhysPortIndex);
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = CalculateLinkModeStatus(pAC, IoC, NetIndex);
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_LINK_STATUS:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = CalculateLinkStatus(pAC, IoC, PhysPortIndex);
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = CalculateLinkStatus(pAC, IoC, NetIndex);
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_FLOWCTRL_CAP:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlCap;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlCap;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_FLOWCTRL_MODE:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlMode;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_FLOWCTRL_STATUS:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlStatus;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_PHY_OPERATION_CAP:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSCap;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PMSCap;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_PHY_OPERATION_MODE:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSMode;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PMSMode;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_PHY_OPERATION_STATUS:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSStatus;
+ }
+ }
+ else {
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PMSStatus;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_SPEED_CAP:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedCap;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedCap;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_SPEED_MODE:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeed;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeed;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_SPEED_STATUS:
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ /* Get value for virtual port */
+ VirtualConf(pAC, IoC, Id, pBufPtr);
+ }
+ else {
+ /* Get value for physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+
+ *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed;
+ }
+ }
+ else { /* DualNetMode */
+
+ *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedUsed;
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_MTU:
+ Val32 = SK_DRIVER_GET_MTU(pAC, IoC, NetIndex);
+ SK_PNMI_STORE_U32(pBufPtr, Val32);
+ Offset += sizeof(SK_U32);
+ break;
+
+ default:
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
+ ("MacPrivateConf: Unknown OID should be handled before"));
+
+ pAC->Pnmi.SirqUpdatedFlag --;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ *pLen = Offset;
+ pAC->Pnmi.SirqUpdatedFlag --;
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /*
+ * From here SET or PRESET action. Check if the passed
+ * buffer length is plausible.
+ */
+ switch (Id) {
+
+ case OID_SKGE_LINK_MODE:
+ case OID_SKGE_FLOWCTRL_MODE:
+ case OID_SKGE_PHY_OPERATION_MODE:
+ case OID_SKGE_SPEED_MODE:
+ if (*pLen < Limit - LogPortIndex) {
+
+ *pLen = Limit - LogPortIndex;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ if (*pLen != Limit - LogPortIndex) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ break;
+
+#ifdef SK_PHY_LP_MODE
+ case OID_SKGE_PHY_LP_MODE:
+ if (*pLen < Limit - LogPortIndex) {
+
+ *pLen = Limit - LogPortIndex;
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+#endif
+
+ case OID_SKGE_MTU:
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ if (*pLen != sizeof(SK_U32)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ break;
+
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /*
+ * Perform preset or set
+ */
+ Offset = 0;
+ for (; LogPortIndex < Limit; LogPortIndex ++) {
+
+ switch (Id) {
+
+ case OID_SKGE_LINK_MODE:
+ /* Check the value range */
+ Val8 = *(pBuf + Offset);
+ if (Val8 == 0) {
+
+ Offset += sizeof(char);
+ break;
+ }
+ if (Val8 < SK_LMODE_HALF ||
+ (LogPortIndex != 0 && Val8 > SK_LMODE_AUTOSENSE) ||
+ (LogPortIndex == 0 && Val8 > SK_LMODE_INDETERMINATED)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ if (LogPortIndex == 0) {
+
+ /*
+ * The virtual port consists of all currently
+ * active ports. Find them and send an event
+ * with the new link mode to SIRQ.
+ */
+ for (PhysPortIndex = 0;
+ PhysPortIndex < PhysPortMax;
+ PhysPortIndex ++) {
+
+ if (!pAC->Pnmi.Port[PhysPortIndex].
+ ActiveFlag) {
+
+ continue;
+ }
+
+ EventParam.Para32[0] = PhysPortIndex;
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC,
+ SK_HWEV_SET_LMODE,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR043,
+ SK_PNMI_ERR043MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ }
+ else {
+ /*
+ * Send an event with the new link mode to
+ * the SIRQ module.
+ */
+ EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC, SK_HWEV_SET_LMODE,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR043,
+ SK_PNMI_ERR043MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_FLOWCTRL_MODE:
+ /* Check the value range */
+ Val8 = *(pBuf + Offset);
+ if (Val8 == 0) {
+
+ Offset += sizeof(char);
+ break;
+ }
+ if (Val8 < SK_FLOW_MODE_NONE ||
+ (LogPortIndex != 0 && Val8 > SK_FLOW_MODE_SYM_OR_REM) ||
+ (LogPortIndex == 0 && Val8 > SK_FLOW_MODE_INDETERMINATED)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ if (LogPortIndex == 0) {
+
+ /*
+ * The virtual port consists of all currently
+ * active ports. Find them and send an event
+ * with the new flow control mode to SIRQ.
+ */
+ for (PhysPortIndex = 0;
+ PhysPortIndex < PhysPortMax;
+ PhysPortIndex ++) {
+
+ if (!pAC->Pnmi.Port[PhysPortIndex].
+ ActiveFlag) {
+
+ continue;
+ }
+
+ EventParam.Para32[0] = PhysPortIndex;
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC,
+ SK_HWEV_SET_FLOWMODE,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR044,
+ SK_PNMI_ERR044MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ }
+ else {
+ /*
+ * Send an event with the new flow control
+ * mode to the SIRQ module.
+ */
+ EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC,
+ SK_HWEV_SET_FLOWMODE, EventParam)
+ > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR044,
+ SK_PNMI_ERR044MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_PHY_OPERATION_MODE :
+ /* Check the value range */
+ Val8 = *(pBuf + Offset);
+ if (Val8 == 0) {
+ /* mode of this port remains unchanged */
+ Offset += sizeof(char);
+ break;
+ }
+ if (Val8 < SK_MS_MODE_AUTO ||
+ (LogPortIndex != 0 && Val8 > SK_MS_MODE_SLAVE) ||
+ (LogPortIndex == 0 && Val8 > SK_MS_MODE_INDETERMINATED)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ if (LogPortIndex == 0) {
+
+ /*
+ * The virtual port consists of all currently
+ * active ports. Find them and send an event
+ * with new master/slave (role) mode to SIRQ.
+ */
+ for (PhysPortIndex = 0;
+ PhysPortIndex < PhysPortMax;
+ PhysPortIndex ++) {
+
+ if (!pAC->Pnmi.Port[PhysPortIndex].
+ ActiveFlag) {
+
+ continue;
+ }
+
+ EventParam.Para32[0] = PhysPortIndex;
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC,
+ SK_HWEV_SET_ROLE,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR042,
+ SK_PNMI_ERR042MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ }
+ else {
+ /*
+ * Send an event with the new master/slave
+ * (role) mode to the SIRQ module.
+ */
+ EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC,
+ SK_HWEV_SET_ROLE, EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR042,
+ SK_PNMI_ERR042MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_SPEED_MODE:
+ /* Check the value range */
+ Val8 = *(pBuf + Offset);
+ if (Val8 == 0) {
+
+ Offset += sizeof(char);
+ break;
+ }
+ if (Val8 < (SK_LSPEED_AUTO) ||
+ (LogPortIndex != 0 && Val8 > (SK_LSPEED_1000MBPS)) ||
+ (LogPortIndex == 0 && Val8 > (SK_LSPEED_INDETERMINATED))) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ if (LogPortIndex == 0) {
+
+ /*
+ * The virtual port consists of all currently
+ * active ports. Find them and send an event
+ * with the new flow control mode to SIRQ.
+ */
+ for (PhysPortIndex = 0;
+ PhysPortIndex < PhysPortMax;
+ PhysPortIndex ++) {
+
+ if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
+
+ continue;
+ }
+
+ EventParam.Para32[0] = PhysPortIndex;
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC,
+ SK_HWEV_SET_SPEED,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR045,
+ SK_PNMI_ERR045MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ }
+ else {
+ /*
+ * Send an event with the new flow control
+ * mode to the SIRQ module.
+ */
+ EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
+ pAC, LogPortIndex);
+ EventParam.Para32[1] = (SK_U32)Val8;
+ if (SkGeSirqEvent(pAC, IoC,
+ SK_HWEV_SET_SPEED,
+ EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW,
+ SK_PNMI_ERR045,
+ SK_PNMI_ERR045MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ Offset += sizeof(char);
+ break;
+
+ case OID_SKGE_MTU :
+ /* Check the value range */
+ Val32 = *(SK_U32*)(pBuf + Offset);
+ if (Val32 == 0) {
+ /* mtu of this port remains unchanged */
+ Offset += sizeof(SK_U32);
+ break;
+ }
+ if (SK_DRIVER_PRESET_MTU(pAC, IoC, NetIndex, Val32) != 0) {
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+ return (SK_PNMI_ERR_OK);
+ }
+
+ if (SK_DRIVER_SET_MTU(pAC, IoC, NetIndex, Val32) != 0) {
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ Offset += sizeof(SK_U32);
+ break;
+
+#ifdef SK_PHY_LP_MODE
+ case OID_SKGE_PHY_LP_MODE:
+ /* The preset ends here */
+ if (Action == SK_PNMI_PRESET) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
+ if (LogPortIndex == 0) {
+ Offset = 0;
+ continue;
+ }
+ else {
+ /* Set value for physical ports */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
+
+ switch (*(pBuf + Offset)) {
+ case 0:
+ /* If LowPowerMode is active, we can leave it. */
+ if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
+
+ Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
+
+ if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) {
+
+ SkDrvInitAdapter(pAC);
+ }
+ break;
+ }
+ else {
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ /* If no LowPowerMode is active, we can enter it. */
+ if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
+
+ if ((*(pBuf + Offset)) < 3) {
+
+ SkDrvDeInitAdapter(pAC);
+ }
+
+ Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
+ break;
+ }
+ else {
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ }
+ }
+ else { /* DualNetMode */
+
+ switch (*(pBuf + Offset)) {
+ case 0:
+ /* If we are in a LowPowerMode, we can leave it. */
+ if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
+
+ Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
+
+ if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3) {
+
+ SkDrvInitAdapter(pAC);
+ }
+ break;
+ }
+ else {
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ /* If we are not already in LowPowerMode, we can enter it. */
+ if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
+
+ if ((*(pBuf + Offset)) < 3) {
+
+ SkDrvDeInitAdapter(pAC);
+ }
+ else {
+
+ Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
+ }
+ break;
+ }
+ else {
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+ }
+ Offset += sizeof(SK_U8);
+ break;
+#endif
+
+ default:
+ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
+ ("MacPrivateConf: Unknown OID should be handled before set"));
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * Monitor - OID handler function for RLMT_MONITOR_XXX
+ *
+ * Description:
+ * Because RLMT currently does not support the monitoring of
+ * remote adapter cards, we return always an empty table.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
+ * value range.
+ * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+PNMI_STATIC int Monitor(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ unsigned int Index;
+ unsigned int Limit;
+ unsigned int Offset;
+ unsigned int Entries;
+
+
+ /*
+ * Calculate instance if wished.
+ */
+ /* XXX Not yet implemented. Return always an empty table. */
+ Entries = 0;
+
+ if ((Instance != (SK_U32)(-1))) {
+
+ if ((Instance < 1) || (Instance > Entries)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ Index = (unsigned int)Instance - 1;
+ Limit = (unsigned int)Instance;
+ }
+ else {
+ Index = 0;
+ Limit = Entries;
+ }
+
+ /*
+ * Get/Set value
+ */
+ if (Action == SK_PNMI_GET) {
+
+ for (Offset=0; Index < Limit; Index ++) {
+
+ switch (Id) {
+
+ case OID_SKGE_RLMT_MONITOR_INDEX:
+ case OID_SKGE_RLMT_MONITOR_ADDR:
+ case OID_SKGE_RLMT_MONITOR_ERRS:
+ case OID_SKGE_RLMT_MONITOR_TIMESTAMP:
+ case OID_SKGE_RLMT_MONITOR_ADMIN:
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR046,
+ SK_PNMI_ERR046MSG);
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+ *pLen = Offset;
+ }
+ else {
+ /* Only MONITOR_ADMIN can be set */
+ if (Id != OID_SKGE_RLMT_MONITOR_ADMIN) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_READ_ONLY);
+ }
+
+ /* Check if the length is plausible */
+ if (*pLen < (Limit - Index)) {
+
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ /* Okay, we have a wide value range */
+ if (*pLen != (Limit - Index)) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+/*
+ for (Offset=0; Index < Limit; Index ++) {
+ }
+*/
+/*
+ * XXX Not yet implemented. Return always BAD_VALUE, because the table
+ * is empty.
+ */
+ *pLen = 0;
+ return (SK_PNMI_ERR_BAD_VALUE);
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * VirtualConf - Calculates the values of configuration OIDs for virtual port
+ *
+ * Description:
+ * We handle here the get of the configuration group OIDs, which are
+ * a little bit complicated. The virtual port consists of all currently
+ * active physical ports. If multiple ports are active and configured
+ * differently we get in some trouble to return a single value. So we
+ * get the value of the first active port and compare it with that of
+ * the other active ports. If they are not the same, we return a value
+ * that indicates that the state is indeterminated.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void VirtualConf(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf) /* Buffer used for the management data transfer */
+{
+ unsigned int PhysPortMax;
+ unsigned int PhysPortIndex;
+ SK_U8 Val8;
+ SK_U32 Val32;
+ SK_BOOL PortActiveFlag;
+ SK_GEPORT *pPrt;
+
+ *pBuf = 0;
+ PortActiveFlag = SK_FALSE;
+ PhysPortMax = pAC->GIni.GIMacsFound;
+
+ for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax;
+ PhysPortIndex ++) {
+
+ pPrt = &pAC->GIni.GP[PhysPortIndex];
+
+ /* Check if the physical port is active */
+ if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
+
+ continue;
+ }
+
+ PortActiveFlag = SK_TRUE;
+
+ switch (Id) {
+
+ case OID_SKGE_PHY_TYPE:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+ Val32 = pPrt->PhyType;
+ SK_PNMI_STORE_U32(pBuf, Val32);
+ continue;
+ }
+
+ case OID_SKGE_LINK_CAP:
+
+ /*
+ * Different capabilities should not happen, but
+ * in the case of the cases OR them all together.
+ * From a curious point of view the virtual port
+ * is capable of all found capabilities.
+ */
+ *pBuf |= pPrt->PLinkCap;
+ break;
+
+ case OID_SKGE_LINK_MODE:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PLinkModeConf;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different link
+ * mode than the first one we return a value that
+ * indicates that the link mode is indeterminated.
+ */
+ if (*pBuf != pPrt->PLinkModeConf) {
+
+ *pBuf = SK_LMODE_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_LINK_MODE_STATUS:
+ /* Get the link mode of the physical port */
+ Val8 = CalculateLinkModeStatus(pAC, IoC, PhysPortIndex);
+
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = Val8;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different link
+ * mode status than the first one we return a value
+ * that indicates that the link mode status is
+ * indeterminated.
+ */
+ if (*pBuf != Val8) {
+
+ *pBuf = SK_LMODE_STAT_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_LINK_STATUS:
+ /* Get the link status of the physical port */
+ Val8 = CalculateLinkStatus(pAC, IoC, PhysPortIndex);
+
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = Val8;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different link
+ * status than the first one, we return a value
+ * that indicates that the link status is
+ * indeterminated.
+ */
+ if (*pBuf != Val8) {
+
+ *pBuf = SK_PNMI_RLMT_LSTAT_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_FLOWCTRL_CAP:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PFlowCtrlCap;
+ continue;
+ }
+
+ /*
+ * From a curious point of view the virtual port
+ * is capable of all found capabilities.
+ */
+ *pBuf |= pPrt->PFlowCtrlCap;
+ break;
+
+ case OID_SKGE_FLOWCTRL_MODE:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PFlowCtrlMode;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different flow
+ * control mode than the first one, we return a value
+ * that indicates that the mode is indeterminated.
+ */
+ if (*pBuf != pPrt->PFlowCtrlMode) {
+
+ *pBuf = SK_FLOW_MODE_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_FLOWCTRL_STATUS:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PFlowCtrlStatus;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different flow
+ * control status than the first one, we return a
+ * value that indicates that the status is
+ * indeterminated.
+ */
+ if (*pBuf != pPrt->PFlowCtrlStatus) {
+
+ *pBuf = SK_FLOW_STAT_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_PHY_OPERATION_CAP:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PMSCap;
+ continue;
+ }
+
+ /*
+ * From a curious point of view the virtual port
+ * is capable of all found capabilities.
+ */
+ *pBuf |= pPrt->PMSCap;
+ break;
+
+ case OID_SKGE_PHY_OPERATION_MODE:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PMSMode;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different master/
+ * slave mode than the first one, we return a value
+ * that indicates that the mode is indeterminated.
+ */
+ if (*pBuf != pPrt->PMSMode) {
+
+ *pBuf = SK_MS_MODE_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_PHY_OPERATION_STATUS:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PMSStatus;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different master/
+ * slave status than the first one, we return a
+ * value that indicates that the status is
+ * indeterminated.
+ */
+ if (*pBuf != pPrt->PMSStatus) {
+
+ *pBuf = SK_MS_STAT_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_SPEED_MODE:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PLinkSpeed;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different flow
+ * control mode than the first one, we return a value
+ * that indicates that the mode is indeterminated.
+ */
+ if (*pBuf != pPrt->PLinkSpeed) {
+
+ *pBuf = SK_LSPEED_INDETERMINATED;
+ }
+ break;
+
+ case OID_SKGE_SPEED_STATUS:
+ /* Check if it is the first active port */
+ if (*pBuf == 0) {
+
+ *pBuf = pPrt->PLinkSpeedUsed;
+ continue;
+ }
+
+ /*
+ * If we find an active port with a different flow
+ * control status than the first one, we return a
+ * value that indicates that the status is
+ * indeterminated.
+ */
+ if (*pBuf != pPrt->PLinkSpeedUsed) {
+
+ *pBuf = SK_LSPEED_STAT_INDETERMINATED;
+ }
+ break;
+ }
+ }
+
+ /*
+ * If no port is active return an indeterminated answer
+ */
+ if (!PortActiveFlag) {
+
+ switch (Id) {
+
+ case OID_SKGE_LINK_CAP:
+ *pBuf = SK_LMODE_CAP_INDETERMINATED;
+ break;
+
+ case OID_SKGE_LINK_MODE:
+ *pBuf = SK_LMODE_INDETERMINATED;
+ break;
+
+ case OID_SKGE_LINK_MODE_STATUS:
+ *pBuf = SK_LMODE_STAT_INDETERMINATED;
+ break;
+
+ case OID_SKGE_LINK_STATUS:
+ *pBuf = SK_PNMI_RLMT_LSTAT_INDETERMINATED;
+ break;
+
+ case OID_SKGE_FLOWCTRL_CAP:
+ case OID_SKGE_FLOWCTRL_MODE:
+ *pBuf = SK_FLOW_MODE_INDETERMINATED;
+ break;
+
+ case OID_SKGE_FLOWCTRL_STATUS:
+ *pBuf = SK_FLOW_STAT_INDETERMINATED;
+ break;
+
+ case OID_SKGE_PHY_OPERATION_CAP:
+ *pBuf = SK_MS_CAP_INDETERMINATED;
+ break;
+
+ case OID_SKGE_PHY_OPERATION_MODE:
+ *pBuf = SK_MS_MODE_INDETERMINATED;
+ break;
+
+ case OID_SKGE_PHY_OPERATION_STATUS:
+ *pBuf = SK_MS_STAT_INDETERMINATED;
+ break;
+ case OID_SKGE_SPEED_CAP:
+ *pBuf = SK_LSPEED_CAP_INDETERMINATED;
+ break;
+
+ case OID_SKGE_SPEED_MODE:
+ *pBuf = SK_LSPEED_INDETERMINATED;
+ break;
+
+ case OID_SKGE_SPEED_STATUS:
+ *pBuf = SK_LSPEED_STAT_INDETERMINATED;
+ break;
+ }
+ }
+}
+
+/*****************************************************************************
+ *
+ * CalculateLinkStatus - Determins the link status of a physical port
+ *
+ * Description:
+ * Determins the link status the following way:
+ * LSTAT_PHY_DOWN: Link is down
+ * LSTAT_AUTONEG: Auto-negotiation failed
+ * LSTAT_LOG_DOWN: Link is up but RLMT did not yet put the port
+ * logically up.
+ * LSTAT_LOG_UP: RLMT marked the port as up
+ *
+ * Returns:
+ * Link status of physical port
+ */
+PNMI_STATIC SK_U8 CalculateLinkStatus(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+unsigned int PhysPortIndex) /* Physical port index */
+{
+ SK_U8 Result;
+
+ if (!pAC->GIni.GP[PhysPortIndex].PHWLinkUp) {
+
+ Result = SK_PNMI_RLMT_LSTAT_PHY_DOWN;
+ }
+ else if (pAC->GIni.GP[PhysPortIndex].PAutoNegFail > 0) {
+
+ Result = SK_PNMI_RLMT_LSTAT_AUTONEG;
+ }
+ else if (!pAC->Rlmt.Port[PhysPortIndex].PortDown) {
+
+ Result = SK_PNMI_RLMT_LSTAT_LOG_UP;
+ }
+ else {
+ Result = SK_PNMI_RLMT_LSTAT_LOG_DOWN;
+ }
+
+ return (Result);
+}
+
+/*****************************************************************************
+ *
+ * CalculateLinkModeStatus - Determins the link mode status of a phys. port
+ *
+ * Description:
+ * The COMMON module only tells us if the mode is half or full duplex.
+ * But in the decade of auto sensing it is usefull for the user to
+ * know if the mode was negotiated or forced. Therefore we have a
+ * look to the mode, which was last used by the negotiation process.
+ *
+ * Returns:
+ * The link mode status
+ */
+PNMI_STATIC SK_U8 CalculateLinkModeStatus(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+unsigned int PhysPortIndex) /* Physical port index */
+{
+ SK_U8 Result;
+
+ /* Get the current mode, which can be full or half duplex */
+ Result = pAC->GIni.GP[PhysPortIndex].PLinkModeStatus;
+
+ /* Check if no valid mode could be found (link is down) */
+ if (Result < SK_LMODE_STAT_HALF) {
+
+ Result = SK_LMODE_STAT_UNKNOWN;
+ }
+ else if (pAC->GIni.GP[PhysPortIndex].PLinkMode >= SK_LMODE_AUTOHALF) {
+
+ /*
+ * Auto-negotiation was used to bring up the link. Change
+ * the already found duplex status that it indicates
+ * auto-negotiation was involved.
+ */
+ if (Result == SK_LMODE_STAT_HALF) {
+
+ Result = SK_LMODE_STAT_AUTOHALF;
+ }
+ else if (Result == SK_LMODE_STAT_FULL) {
+
+ Result = SK_LMODE_STAT_AUTOFULL;
+ }
+ }
+
+ return (Result);
+}
+
+/*****************************************************************************
+ *
+ * GetVpdKeyArr - Obtain an array of VPD keys
+ *
+ * Description:
+ * Read the VPD keys and build an array of VPD keys, which are
+ * easy to access.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK Task successfully performed.
+ * SK_PNMI_ERR_GENERAL Something went wrong.
+ */
+PNMI_STATIC int GetVpdKeyArr(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+char *pKeyArr, /* Ptr KeyArray */
+unsigned int KeyArrLen, /* Length of array in bytes */
+unsigned int *pKeyNo) /* Number of keys */
+{
+ unsigned int BufKeysLen = SK_PNMI_VPD_BUFSIZE;
+ char BufKeys[SK_PNMI_VPD_BUFSIZE];
+ unsigned int StartOffset;
+ unsigned int Offset;
+ int Index;
+ int Ret;
+
+
+ SK_MEMSET(pKeyArr, 0, KeyArrLen);
+
+ /*
+ * Get VPD key list
+ */
+ Ret = VpdKeys(pAC, IoC, (char *)&BufKeys, (int *)&BufKeysLen,
+ (int *)pKeyNo);
+ if (Ret > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR014,
+ SK_PNMI_ERR014MSG);
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ /* If no keys are available return now */
+ if (*pKeyNo == 0 || BufKeysLen == 0) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+ /*
+ * If the key list is too long for us trunc it and give a
+ * errorlog notification. This case should not happen because
+ * the maximum number of keys is limited due to RAM limitations
+ */
+ if (*pKeyNo > SK_PNMI_VPD_ENTRIES) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR015,
+ SK_PNMI_ERR015MSG);
+
+ *pKeyNo = SK_PNMI_VPD_ENTRIES;
+ }
+
+ /*
+ * Now build an array of fixed string length size and copy
+ * the keys together.
+ */
+ for (Index = 0, StartOffset = 0, Offset = 0; Offset < BufKeysLen;
+ Offset ++) {
+
+ if (BufKeys[Offset] != 0) {
+
+ continue;
+ }
+
+ if (Offset - StartOffset > SK_PNMI_VPD_KEY_SIZE) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR016,
+ SK_PNMI_ERR016MSG);
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ SK_STRNCPY(pKeyArr + Index * SK_PNMI_VPD_KEY_SIZE,
+ &BufKeys[StartOffset], SK_PNMI_VPD_KEY_SIZE);
+
+ Index ++;
+ StartOffset = Offset + 1;
+ }
+
+ /* Last key not zero terminated? Get it anyway */
+ if (StartOffset < Offset) {
+
+ SK_STRNCPY(pKeyArr + Index * SK_PNMI_VPD_KEY_SIZE,
+ &BufKeys[StartOffset], SK_PNMI_VPD_KEY_SIZE);
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * SirqUpdate - Let the SIRQ update its internal values
+ *
+ * Description:
+ * Just to be sure that the SIRQ module holds its internal data
+ * structures up to date, we send an update event before we make
+ * any access.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK Task successfully performed.
+ * SK_PNMI_ERR_GENERAL Something went wrong.
+ */
+PNMI_STATIC int SirqUpdate(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC) /* IO context handle */
+{
+ SK_EVPARA EventParam;
+
+
+ /* Was the module already updated during the current PNMI call? */
+ if (pAC->Pnmi.SirqUpdatedFlag > 0) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /* Send an synchronuous update event to the module */
+ SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
+ if (SkGeSirqEvent(pAC, IoC, SK_HWEV_UPDATE_STAT, EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR047,
+ SK_PNMI_ERR047MSG);
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * RlmtUpdate - Let the RLMT update its internal values
+ *
+ * Description:
+ * Just to be sure that the RLMT module holds its internal data
+ * structures up to date, we send an update event before we make
+ * any access.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK Task successfully performed.
+ * SK_PNMI_ERR_GENERAL Something went wrong.
+ */
+PNMI_STATIC int RlmtUpdate(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */
+{
+ SK_EVPARA EventParam;
+
+
+ /* Was the module already updated during the current PNMI call? */
+ if (pAC->Pnmi.RlmtUpdatedFlag > 0) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /* Send an synchronuous update event to the module */
+ SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
+ EventParam.Para32[0] = NetIndex;
+ EventParam.Para32[1] = (SK_U32)-1;
+ if (SkRlmtEvent(pAC, IoC, SK_RLMT_STATS_UPDATE, EventParam) > 0) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR048,
+ SK_PNMI_ERR048MSG);
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * MacUpdate - Force the XMAC to output the current statistic
+ *
+ * Description:
+ * The XMAC holds its statistic internally. To obtain the current
+ * values we must send a command so that the statistic data will
+ * be written to a predefined memory area on the adapter.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK Task successfully performed.
+ * SK_PNMI_ERR_GENERAL Something went wrong.
+ */
+PNMI_STATIC int MacUpdate(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+unsigned int FirstMac, /* Index of the first Mac to be updated */
+unsigned int LastMac) /* Index of the last Mac to be updated */
+{
+ unsigned int MacIndex;
+
+ /*
+ * Were the statistics already updated during the
+ * current PNMI call?
+ */
+ if (pAC->Pnmi.MacUpdatedFlag > 0) {
+
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /* Send an update command to all MACs specified */
+ for (MacIndex = FirstMac; MacIndex <= LastMac; MacIndex ++) {
+
+ /*
+ * 2002-09-13 pweber: Freeze the current SW counters.
+ * (That should be done as close as
+ * possible to the update of the
+ * HW counters)
+ */
+ if (pAC->GIni.GIMacType == SK_MAC_XMAC) {
+ pAC->Pnmi.BufPort[MacIndex] = pAC->Pnmi.Port[MacIndex];
+ }
+
+ /* 2002-09-13 pweber: Update the HW counter */
+ if (pAC->GIni.GIFunc.pFnMacUpdateStats(pAC, IoC, MacIndex) != 0) {
+
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ }
+
+ return (SK_PNMI_ERR_OK);
+}
+
+/*****************************************************************************
+ *
+ * GetStatVal - Retrieve an XMAC statistic counter
+ *
+ * Description:
+ * Retrieves the statistic counter of a virtual or physical port. The
+ * virtual port is identified by the index 0. It consists of all
+ * currently active ports. To obtain the counter value for this port
+ * we must add the statistic counter of all active ports. To grant
+ * continuous counter values for the virtual port even when port
+ * switches occur we must additionally add a delta value, which was
+ * calculated during a SK_PNMI_EVT_RLMT_ACTIVE_UP event.
+ *
+ * Returns:
+ * Requested statistic value
+ */
+PNMI_STATIC SK_U64 GetStatVal(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+unsigned int LogPortIndex, /* Index of the logical Port to be processed */
+unsigned int StatIndex, /* Index to statistic value */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */
+{
+ unsigned int PhysPortIndex;
+ unsigned int PhysPortMax;
+ SK_U64 Val = 0;
+
+
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
+
+ PhysPortIndex = NetIndex;
+
+ Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex);
+ }
+ else { /* Single Net mode */
+
+ if (LogPortIndex == 0) {
+
+ PhysPortMax = pAC->GIni.GIMacsFound;
+
+ /* Add counter of all active ports */
+ for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax;
+ PhysPortIndex ++) {
+
+ if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
+
+ Val += GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex);
+ }
+ }
+
+ /* Correct value because of port switches */
+ Val += pAC->Pnmi.VirtualCounterOffset[StatIndex];
+ }
+ else {
+ /* Get counter value of physical port */
+ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
+
+ Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex);
+ }
+ }
+ return (Val);
+}
+
+/*****************************************************************************
+ *
+ * GetPhysStatVal - Get counter value for physical port
+ *
+ * Description:
+ * Builds a 64bit counter value. Except for the octet counters
+ * the lower 32bit are counted in hardware and the upper 32bit
+ * in software by monitoring counter overflow interrupts in the
+ * event handler. To grant continous counter values during XMAC
+ * resets (caused by a workaround) we must add a delta value.
+ * The delta was calculated in the event handler when a
+ * SK_PNMI_EVT_XMAC_RESET was received.
+ *
+ * Returns:
+ * Counter value
+ */
+PNMI_STATIC SK_U64 GetPhysStatVal(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+unsigned int PhysPortIndex, /* Index of the logical Port to be processed */
+unsigned int StatIndex) /* Index to statistic value */
+{
+ SK_U64 Val = 0;
+ SK_U32 LowVal = 0;
+ SK_U32 HighVal = 0;
+ SK_U16 Word;
+ int MacType;
+ unsigned int HelpIndex;
+ SK_GEPORT *pPrt;
+
+ SK_PNMI_PORT *pPnmiPrt;
+ SK_GEMACFUNC *pFnMac;
+
+ pPrt = &pAC->GIni.GP[PhysPortIndex];
+
+ MacType = pAC->GIni.GIMacType;
+
+ /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
+ if (MacType == SK_MAC_XMAC) {
+ pPnmiPrt = &pAC->Pnmi.BufPort[PhysPortIndex];
+ }
+ else {
+ pPnmiPrt = &pAC->Pnmi.Port[PhysPortIndex];
+ }
+
+ pFnMac = &pAC->GIni.GIFunc;
+
+ switch (StatIndex) {
+ case SK_PNMI_HTX:
+ if (MacType == SK_MAC_GMAC) {
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[SK_PNMI_HTX_BROADCAST][MacType].Reg,
+ &LowVal);
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[SK_PNMI_HTX_MULTICAST][MacType].Reg,
+ &HighVal);
+ LowVal += HighVal;
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[SK_PNMI_HTX_UNICAST][MacType].Reg,
+ &HighVal);
+ LowVal += HighVal;
+ }
+ else {
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ }
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ case SK_PNMI_HRX:
+ if (MacType == SK_MAC_GMAC) {
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[SK_PNMI_HRX_BROADCAST][MacType].Reg,
+ &LowVal);
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[SK_PNMI_HRX_MULTICAST][MacType].Reg,
+ &HighVal);
+ LowVal += HighVal;
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[SK_PNMI_HRX_UNICAST][MacType].Reg,
+ &HighVal);
+ LowVal += HighVal;
+ }
+ else {
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ }
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ case SK_PNMI_HTX_OCTET:
+ case SK_PNMI_HRX_OCTET:
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &HighVal);
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex + 1][MacType].Reg,
+ &LowVal);
+ break;
+
+ case SK_PNMI_HTX_BURST:
+ case SK_PNMI_HTX_EXCESS_DEF:
+ case SK_PNMI_HTX_CARRIER:
+ /* Not supported by GMAC */
+ if (MacType == SK_MAC_GMAC) {
+ return (Val);
+ }
+
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ case SK_PNMI_HTX_MACC:
+ /* GMAC only supports PAUSE MAC control frames */
+ if (MacType == SK_MAC_GMAC) {
+ HelpIndex = SK_PNMI_HTX_PMACC;
+ }
+ else {
+ HelpIndex = StatIndex;
+ }
+
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[HelpIndex][MacType].Reg,
+ &LowVal);
+
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ case SK_PNMI_HTX_COL:
+ case SK_PNMI_HRX_UNDERSIZE:
+ /* Not supported by XMAC */
+ if (MacType == SK_MAC_XMAC) {
+ return (Val);
+ }
+
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ case SK_PNMI_HTX_DEFFERAL:
+ /* Not supported by GMAC */
+ if (MacType == SK_MAC_GMAC) {
+ return (Val);
+ }
+
+ /*
+ * XMAC counts frames with deferred transmission
+ * even in full-duplex mode.
+ *
+ * In full-duplex mode the counter remains constant!
+ */
+ if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) ||
+ (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL)) {
+
+ LowVal = 0;
+ HighVal = 0;
+ }
+ else {
+ /* Otherwise get contents of hardware register */
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ }
+ break;
+
+ case SK_PNMI_HRX_BADOCTET:
+ /* Not supported by XMAC */
+ if (MacType == SK_MAC_XMAC) {
+ return (Val);
+ }
+
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &HighVal);
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex + 1][MacType].Reg,
+ &LowVal);
+ break;
+
+ case SK_PNMI_HTX_OCTETLOW:
+ case SK_PNMI_HRX_OCTETLOW:
+ case SK_PNMI_HRX_BADOCTETLOW:
+ return (Val);
+
+ case SK_PNMI_HRX_LONGFRAMES:
+ /* For XMAC the SW counter is managed by PNMI */
+ if (MacType == SK_MAC_XMAC) {
+ return (pPnmiPrt->StatRxLongFrameCts);
+ }
+
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ case SK_PNMI_HRX_TOO_LONG:
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+
+ Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal);
+
+ if (MacType == SK_MAC_GMAC) {
+ /* For GMAC the SW counter is additionally managed by PNMI */
+ Val += pPnmiPrt->StatRxFrameTooLongCts;
+ }
+ else {
+ /*
+ * Frames longer than IEEE 802.3 frame max size are counted
+ * by XMAC in frame_too_long counter even reception of long
+ * frames was enabled and the frame was correct.
+ * So correct the value by subtracting RxLongFrame counter.
+ */
+ Val -= pPnmiPrt->StatRxLongFrameCts;
+ }
+
+ LowVal = (SK_U32)Val;
+ HighVal = (SK_U32)(Val >> 32);
+ break;
+
+ case SK_PNMI_HRX_SHORTS:
+ /* Not supported by GMAC */
+ if (MacType == SK_MAC_GMAC) {
+ /* GM_RXE_FRAG?? */
+ return (Val);
+ }
+
+ /*
+ * XMAC counts short frame errors even if link down (#10620)
+ *
+ * If link-down the counter remains constant
+ */
+ if (pPrt->PLinkModeStatus != SK_LMODE_STAT_UNKNOWN) {
+
+ /* Otherwise get incremental difference */
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+
+ Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal);
+ Val -= pPnmiPrt->RxShortZeroMark;
+
+ LowVal = (SK_U32)Val;
+ HighVal = (SK_U32)(Val >> 32);
+ }
+ break;
+
+ case SK_PNMI_HRX_MACC:
+ case SK_PNMI_HRX_MACC_UNKWN:
+ case SK_PNMI_HRX_BURST:
+ case SK_PNMI_HRX_MISSED:
+ case SK_PNMI_HRX_FRAMING:
+ case SK_PNMI_HRX_CARRIER:
+ case SK_PNMI_HRX_IRLENGTH:
+ case SK_PNMI_HRX_SYMBOL:
+ case SK_PNMI_HRX_CEXT:
+ /* Not supported by GMAC */
+ if (MacType == SK_MAC_GMAC) {
+ return (Val);
+ }
+
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ case SK_PNMI_HRX_PMACC_ERR:
+ /* For GMAC the SW counter is managed by PNMI */
+ if (MacType == SK_MAC_GMAC) {
+ return (pPnmiPrt->StatRxPMaccErr);
+ }
+
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+
+ /* SW counter managed by PNMI */
+ case SK_PNMI_HTX_SYNC:
+ LowVal = (SK_U32)pPnmiPrt->StatSyncCts;
+ HighVal = (SK_U32)(pPnmiPrt->StatSyncCts >> 32);
+ break;
+
+ /* SW counter managed by PNMI */
+ case SK_PNMI_HTX_SYNC_OCTET:
+ LowVal = (SK_U32)pPnmiPrt->StatSyncOctetsCts;
+ HighVal = (SK_U32)(pPnmiPrt->StatSyncOctetsCts >> 32);
+ break;
+
+ case SK_PNMI_HRX_FCS:
+ /*
+ * Broadcom filters FCS errors and counts it in
+ * Receive Error Counter register
+ */
+ if (pPrt->PhyType == SK_PHY_BCOM) {
+ /* do not read while not initialized (PHY_READ hangs!)*/
+ if (pPrt->PState != SK_PRT_RESET) {
+ SkXmPhyRead(pAC, IoC, PhysPortIndex, PHY_BCOM_RE_CTR, &Word);
+
+ LowVal = Word;
+ }
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ }
+ else {
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ }
+ break;
+
+ default:
+ (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
+ StatAddr[StatIndex][MacType].Reg,
+ &LowVal);
+ HighVal = pPnmiPrt->CounterHigh[StatIndex];
+ break;
+ }
+
+ Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal);
+
+ /* Correct value because of possible XMAC reset. XMAC Errata #2 */
+ Val += pPnmiPrt->CounterOffset[StatIndex];
+
+ return (Val);
+}
+
+/*****************************************************************************
+ *
+ * ResetCounter - Set all counters and timestamps to zero
+ *
+ * Description:
+ * Notifies other common modules which store statistic data to
+ * reset their counters and finally reset our own counters.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void ResetCounter(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+SK_U32 NetIndex)
+{
+ unsigned int PhysPortIndex;
+ SK_EVPARA EventParam;
+
+
+ SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
+
+ /* Notify sensor module */
+ SkEventQueue(pAC, SKGE_I2C, SK_I2CEV_CLEAR, EventParam);
+
+ /* Notify RLMT module */
+ EventParam.Para32[0] = NetIndex;
+ EventParam.Para32[1] = (SK_U32)-1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STATS_CLEAR, EventParam);
+ EventParam.Para32[1] = 0;
+
+ /* Notify SIRQ module */
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_CLEAR_STAT, EventParam);
+
+ /* Notify CSUM module */
+#ifdef SK_USE_CSUM
+ EventParam.Para32[0] = NetIndex;
+ EventParam.Para32[1] = (SK_U32)-1;
+ SkEventQueue(pAC, SKGE_CSUM, SK_CSUM_EVENT_CLEAR_PROTO_STATS,
+ EventParam);
+#endif /* SK_USE_CSUM */
+
+ /* Clear XMAC statistic */
+ for (PhysPortIndex = 0; PhysPortIndex <
+ (unsigned int)pAC->GIni.GIMacsFound; PhysPortIndex ++) {
+
+ (void)pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PhysPortIndex);
+
+ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].CounterHigh,
+ 0, sizeof(pAC->Pnmi.Port[PhysPortIndex].CounterHigh));
+ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
+ CounterOffset, 0, sizeof(pAC->Pnmi.Port[
+ PhysPortIndex].CounterOffset));
+ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].StatSyncCts,
+ 0, sizeof(pAC->Pnmi.Port[PhysPortIndex].StatSyncCts));
+ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
+ StatSyncOctetsCts, 0, sizeof(pAC->Pnmi.Port[
+ PhysPortIndex].StatSyncOctetsCts));
+ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
+ StatRxLongFrameCts, 0, sizeof(pAC->Pnmi.Port[
+ PhysPortIndex].StatRxLongFrameCts));
+ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
+ StatRxFrameTooLongCts, 0, sizeof(pAC->Pnmi.Port[
+ PhysPortIndex].StatRxFrameTooLongCts));
+ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
+ StatRxPMaccErr, 0, sizeof(pAC->Pnmi.Port[
+ PhysPortIndex].StatRxPMaccErr));
+ }
+
+ /*
+ * Clear local statistics
+ */
+ SK_MEMSET((char *)&pAC->Pnmi.VirtualCounterOffset, 0,
+ sizeof(pAC->Pnmi.VirtualCounterOffset));
+ pAC->Pnmi.RlmtChangeCts = 0;
+ pAC->Pnmi.RlmtChangeTime = 0;
+ SK_MEMSET((char *)&pAC->Pnmi.RlmtChangeEstimate.EstValue[0], 0,
+ sizeof(pAC->Pnmi.RlmtChangeEstimate.EstValue));
+ pAC->Pnmi.RlmtChangeEstimate.EstValueIndex = 0;
+ pAC->Pnmi.RlmtChangeEstimate.Estimate = 0;
+ pAC->Pnmi.Port[NetIndex].TxSwQueueMax = 0;
+ pAC->Pnmi.Port[NetIndex].TxRetryCts = 0;
+ pAC->Pnmi.Port[NetIndex].RxIntrCts = 0;
+ pAC->Pnmi.Port[NetIndex].TxIntrCts = 0;
+ pAC->Pnmi.Port[NetIndex].RxNoBufCts = 0;
+ pAC->Pnmi.Port[NetIndex].TxNoBufCts = 0;
+ pAC->Pnmi.Port[NetIndex].TxUsedDescrNo = 0;
+ pAC->Pnmi.Port[NetIndex].RxDeliveredCts = 0;
+ pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts = 0;
+ pAC->Pnmi.Port[NetIndex].ErrRecoveryCts = 0;
+}
+
+/*****************************************************************************
+ *
+ * GetTrapEntry - Get an entry in the trap buffer
+ *
+ * Description:
+ * The trap buffer stores various events. A user application somehow
+ * gets notified that an event occured and retrieves the trap buffer
+ * contens (or simply polls the buffer). The buffer is organized as
+ * a ring which stores the newest traps at the beginning. The oldest
+ * traps are overwritten by the newest ones. Each trap entry has a
+ * unique number, so that applications may detect new trap entries.
+ *
+ * Returns:
+ * A pointer to the trap entry
+ */
+PNMI_STATIC char* GetTrapEntry(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_U32 TrapId, /* SNMP ID of the trap */
+unsigned int Size) /* Space needed for trap entry */
+{
+ unsigned int BufPad = pAC->Pnmi.TrapBufPad;
+ unsigned int BufFree = pAC->Pnmi.TrapBufFree;
+ unsigned int Beg = pAC->Pnmi.TrapQueueBeg;
+ unsigned int End = pAC->Pnmi.TrapQueueEnd;
+ char *pBuf = &pAC->Pnmi.TrapBuf[0];
+ int Wrap;
+ unsigned int NeededSpace;
+ unsigned int EntrySize;
+ SK_U32 Val32;
+ SK_U64 Val64;
+
+
+ /* Last byte of entry will get a copy of the entry length */
+ Size ++;
+
+ /*
+ * Calculate needed buffer space */
+ if (Beg >= Size) {
+
+ NeededSpace = Size;
+ Wrap = SK_FALSE;
+ }
+ else {
+ NeededSpace = Beg + Size;
+ Wrap = SK_TRUE;
+ }
+
+ /*
+ * Check if enough buffer space is provided. Otherwise
+ * free some entries. Leave one byte space between begin
+ * and end of buffer to make it possible to detect whether
+ * the buffer is full or empty
+ */
+ while (BufFree < NeededSpace + 1) {
+
+ if (End == 0) {
+
+ End = SK_PNMI_TRAP_QUEUE_LEN;
+ }
+
+ EntrySize = (unsigned int)*((unsigned char *)pBuf + End - 1);
+ BufFree += EntrySize;
+ End -= EntrySize;
+#ifdef DEBUG
+ SK_MEMSET(pBuf + End, (char)(-1), EntrySize);
+#endif /* DEBUG */
+ if (End == BufPad) {
+#ifdef DEBUG
+ SK_MEMSET(pBuf, (char)(-1), End);
+#endif /* DEBUG */
+ BufFree += End;
+ End = 0;
+ BufPad = 0;
+ }
+ }
+
+ /*
+ * Insert new entry as first entry. Newest entries are
+ * stored at the beginning of the queue.
+ */
+ if (Wrap) {
+
+ BufPad = Beg;
+ Beg = SK_PNMI_TRAP_QUEUE_LEN - Size;
+ }
+ else {
+ Beg = Beg - Size;
+ }
+ BufFree -= NeededSpace;
+
+ /* Save the current offsets */
+ pAC->Pnmi.TrapQueueBeg = Beg;
+ pAC->Pnmi.TrapQueueEnd = End;
+ pAC->Pnmi.TrapBufPad = BufPad;
+ pAC->Pnmi.TrapBufFree = BufFree;
+
+ /* Initialize the trap entry */
+ *(pBuf + Beg + Size - 1) = (char)Size;
+ *(pBuf + Beg) = (char)Size;
+ Val32 = (pAC->Pnmi.TrapUnique) ++;
+ SK_PNMI_STORE_U32(pBuf + Beg + 1, Val32);
+ SK_PNMI_STORE_U32(pBuf + Beg + 1 + sizeof(SK_U32), TrapId);
+ Val64 = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
+ SK_PNMI_STORE_U64(pBuf + Beg + 1 + 2 * sizeof(SK_U32), Val64);
+
+ return (pBuf + Beg);
+}
+
+/*****************************************************************************
+ *
+ * CopyTrapQueue - Copies the trap buffer for the TRAP OID
+ *
+ * Description:
+ * On a query of the TRAP OID the trap buffer contents will be
+ * copied continuously to the request buffer, which must be large
+ * enough. No length check is performed.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void CopyTrapQueue(
+SK_AC *pAC, /* Pointer to adapter context */
+char *pDstBuf) /* Buffer to which the queued traps will be copied */
+{
+ unsigned int BufPad = pAC->Pnmi.TrapBufPad;
+ unsigned int Trap = pAC->Pnmi.TrapQueueBeg;
+ unsigned int End = pAC->Pnmi.TrapQueueEnd;
+ char *pBuf = &pAC->Pnmi.TrapBuf[0];
+ unsigned int Len;
+ unsigned int DstOff = 0;
+
+
+ while (Trap != End) {
+
+ Len = (unsigned int)*(pBuf + Trap);
+
+ /*
+ * Last byte containing a copy of the length will
+ * not be copied.
+ */
+ *(pDstBuf + DstOff) = (char)(Len - 1);
+ SK_MEMCPY(pDstBuf + DstOff + 1, pBuf + Trap + 1, Len - 2);
+ DstOff += Len - 1;
+
+ Trap += Len;
+ if (Trap == SK_PNMI_TRAP_QUEUE_LEN) {
+
+ Trap = BufPad;
+ }
+ }
+}
+
+/*****************************************************************************
+ *
+ * GetTrapQueueLen - Get the length of the trap buffer
+ *
+ * Description:
+ * Evaluates the number of currently stored traps and the needed
+ * buffer size to retrieve them.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void GetTrapQueueLen(
+SK_AC *pAC, /* Pointer to adapter context */
+unsigned int *pLen, /* Length in Bytes of all queued traps */
+unsigned int *pEntries) /* Returns number of trapes stored in queue */
+{
+ unsigned int BufPad = pAC->Pnmi.TrapBufPad;
+ unsigned int Trap = pAC->Pnmi.TrapQueueBeg;
+ unsigned int End = pAC->Pnmi.TrapQueueEnd;
+ char *pBuf = &pAC->Pnmi.TrapBuf[0];
+ unsigned int Len;
+ unsigned int Entries = 0;
+ unsigned int TotalLen = 0;
+
+
+ while (Trap != End) {
+
+ Len = (unsigned int)*(pBuf + Trap);
+ TotalLen += Len - 1;
+ Entries ++;
+
+ Trap += Len;
+ if (Trap == SK_PNMI_TRAP_QUEUE_LEN) {
+
+ Trap = BufPad;
+ }
+ }
+
+ *pEntries = Entries;
+ *pLen = TotalLen;
+}
+
+/*****************************************************************************
+ *
+ * QueueSimpleTrap - Store a simple trap to the trap buffer
+ *
+ * Description:
+ * A simple trap is a trap with now additional data. It consists
+ * simply of a trap code.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void QueueSimpleTrap(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_U32 TrapId) /* Type of sensor trap */
+{
+ GetTrapEntry(pAC, TrapId, SK_PNMI_TRAP_SIMPLE_LEN);
+}
+
+/*****************************************************************************
+ *
+ * QueueSensorTrap - Stores a sensor trap in the trap buffer
+ *
+ * Description:
+ * Gets an entry in the trap buffer and fills it with sensor related
+ * data.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void QueueSensorTrap(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_U32 TrapId, /* Type of sensor trap */
+unsigned int SensorIndex) /* Index of sensor which caused the trap */
+{
+ char *pBuf;
+ unsigned int Offset;
+ unsigned int DescrLen;
+ SK_U32 Val32;
+
+
+ /* Get trap buffer entry */
+ DescrLen = SK_STRLEN(pAC->I2c.SenTable[SensorIndex].SenDesc);
+ pBuf = GetTrapEntry(pAC, TrapId,
+ SK_PNMI_TRAP_SENSOR_LEN_BASE + DescrLen);
+ Offset = SK_PNMI_TRAP_SIMPLE_LEN;
+
+ /* Store additionally sensor trap related data */
+ Val32 = OID_SKGE_SENSOR_INDEX;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ *(pBuf + Offset + 4) = 4;
+ Val32 = (SK_U32)SensorIndex;
+ SK_PNMI_STORE_U32(pBuf + Offset + 5, Val32);
+ Offset += 9;
+
+ Val32 = (SK_U32)OID_SKGE_SENSOR_DESCR;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ *(pBuf + Offset + 4) = (char)DescrLen;
+ SK_MEMCPY(pBuf + Offset + 5, pAC->I2c.SenTable[SensorIndex].SenDesc,
+ DescrLen);
+ Offset += DescrLen + 5;
+
+ Val32 = OID_SKGE_SENSOR_TYPE;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ *(pBuf + Offset + 4) = 1;
+ *(pBuf + Offset + 5) = (char)pAC->I2c.SenTable[SensorIndex].SenType;
+ Offset += 6;
+
+ Val32 = OID_SKGE_SENSOR_VALUE;
+ SK_PNMI_STORE_U32(pBuf + Offset, Val32);
+ *(pBuf + Offset + 4) = 4;
+ Val32 = (SK_U32)pAC->I2c.SenTable[SensorIndex].SenValue;
+ SK_PNMI_STORE_U32(pBuf + Offset + 5, Val32);
+}
+
+/*****************************************************************************
+ *
+ * QueueRlmtNewMacTrap - Store a port switch trap in the trap buffer
+ *
+ * Description:
+ * Nothing further to explain.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void QueueRlmtNewMacTrap(
+SK_AC *pAC, /* Pointer to adapter context */
+unsigned int ActiveMac) /* Index (0..n) of the currently active port */
+{
+ char *pBuf;
+ SK_U32 Val32;
+
+
+ pBuf = GetTrapEntry(pAC, OID_SKGE_TRAP_RLMT_CHANGE_PORT,
+ SK_PNMI_TRAP_RLMT_CHANGE_LEN);
+
+ Val32 = OID_SKGE_RLMT_PORT_ACTIVE;
+ SK_PNMI_STORE_U32(pBuf + SK_PNMI_TRAP_SIMPLE_LEN, Val32);
+ *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 4) = 1;
+ *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 5) = (char)ActiveMac;
+}
+
+/*****************************************************************************
+ *
+ * QueueRlmtPortTrap - Store port related RLMT trap to trap buffer
+ *
+ * Description:
+ * Nothing further to explain.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void QueueRlmtPortTrap(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_U32 TrapId, /* Type of RLMT port trap */
+unsigned int PortIndex) /* Index of the port, which changed its state */
+{
+ char *pBuf;
+ SK_U32 Val32;
+
+
+ pBuf = GetTrapEntry(pAC, TrapId, SK_PNMI_TRAP_RLMT_PORT_LEN);
+
+ Val32 = OID_SKGE_RLMT_PORT_INDEX;
+ SK_PNMI_STORE_U32(pBuf + SK_PNMI_TRAP_SIMPLE_LEN, Val32);
+ *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 4) = 1;
+ *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 5) = (char)PortIndex;
+}
+
+/*****************************************************************************
+ *
+ * CopyMac - Copies a MAC address
+ *
+ * Description:
+ * Nothing further to explain.
+ *
+ * Returns:
+ * Nothing
+ */
+PNMI_STATIC void CopyMac(
+char *pDst, /* Pointer to destination buffer */
+SK_MAC_ADDR *pMac) /* Pointer of Source */
+{
+ int i;
+
+
+ for (i = 0; i < sizeof(SK_MAC_ADDR); i ++) {
+
+ *(pDst + i) = pMac->a[i];
+ }
+}
+
+#ifdef SK_POWER_MGMT
+/*****************************************************************************
+ *
+ * PowerManagement - OID handler function of PowerManagement OIDs
+ *
+ * Description:
+ * The code is simple. No description necessary.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+
+PNMI_STATIC int PowerManagement(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* Get/PreSet/Set action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer to which to mgmt data will be retrieved */
+unsigned int *pLen, /* On call: buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */
+{
+
+ SK_U32 RetCode = SK_PNMI_ERR_GENERAL;
+
+ /*
+ * Check instance. We only handle single instance variables
+ */
+ if (Instance != (SK_U32)(-1) && Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+
+ /* Check length */
+ switch (Id) {
+
+ case OID_PNP_CAPABILITIES:
+ if (*pLen < sizeof(SK_PNP_CAPABILITIES)) {
+
+ *pLen = sizeof(SK_PNP_CAPABILITIES);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_PNP_SET_POWER:
+ case OID_PNP_QUERY_POWER:
+ if (*pLen < sizeof(SK_DEVICE_POWER_STATE))
+ {
+ *pLen = sizeof(SK_DEVICE_POWER_STATE);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_PNP_ADD_WAKE_UP_PATTERN:
+ case OID_PNP_REMOVE_WAKE_UP_PATTERN:
+ if (*pLen < sizeof(SK_PM_PACKET_PATTERN)) {
+
+ *pLen = sizeof(SK_PM_PACKET_PATTERN);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_PNP_ENABLE_WAKE_UP:
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+ }
+
+ /*
+ * Perform action
+ */
+ if (Action == SK_PNMI_GET) {
+
+ /*
+ * Get value
+ */
+ switch (Id) {
+
+ case OID_PNP_CAPABILITIES:
+ RetCode = SkPowerQueryPnPCapabilities(pAC, IoC, pBuf, pLen);
+ break;
+
+ case OID_PNP_QUERY_POWER:
+ /* The Windows DDK describes: An OID_PNP_QUERY_POWER requests
+ the miniport to indicate whether it can transition its NIC
+ to the low-power state.
+ A miniport driver must always return NDIS_STATUS_SUCCESS
+ to a query of OID_PNP_QUERY_POWER. */
+ *pLen = sizeof(SK_DEVICE_POWER_STATE);
+ RetCode = SK_PNMI_ERR_OK;
+ break;
+
+ /* NDIS handles these OIDs as write-only.
+ * So in case of get action the buffer with written length = 0
+ * is returned
+ */
+ case OID_PNP_SET_POWER:
+ case OID_PNP_ADD_WAKE_UP_PATTERN:
+ case OID_PNP_REMOVE_WAKE_UP_PATTERN:
+ *pLen = 0;
+ RetCode = SK_PNMI_ERR_NOT_SUPPORTED;
+ break;
+
+ case OID_PNP_ENABLE_WAKE_UP:
+ RetCode = SkPowerGetEnableWakeUp(pAC, IoC, pBuf, pLen);
+ break;
+
+ default:
+ RetCode = SK_PNMI_ERR_GENERAL;
+ break;
+ }
+
+ return (RetCode);
+ }
+
+
+ /*
+ * Perform preset or set
+ */
+
+ /* POWER module does not support PRESET action */
+ if (Action == SK_PNMI_PRESET) {
+ return (SK_PNMI_ERR_OK);
+ }
+
+ switch (Id) {
+ case OID_PNP_SET_POWER:
+ RetCode = SkPowerSetPower(pAC, IoC, pBuf, pLen);
+ break;
+
+ case OID_PNP_ADD_WAKE_UP_PATTERN:
+ RetCode = SkPowerAddWakeUpPattern(pAC, IoC, pBuf, pLen);
+ break;
+
+ case OID_PNP_REMOVE_WAKE_UP_PATTERN:
+ RetCode = SkPowerRemoveWakeUpPattern(pAC, IoC, pBuf, pLen);
+ break;
+
+ case OID_PNP_ENABLE_WAKE_UP:
+ RetCode = SkPowerSetEnableWakeUp(pAC, IoC, pBuf, pLen);
+ break;
+
+ default:
+ RetCode = SK_PNMI_ERR_READ_ONLY;
+ }
+
+ return (RetCode);
+}
+#endif /* SK_POWER_MGMT */
+
+#ifdef SK_DIAG_SUPPORT
+/*****************************************************************************
+ *
+ * DiagActions - OID handler function of Diagnostic driver
+ *
+ * Description:
+ * The code is simple. No description necessary.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+
+PNMI_STATIC int DiagActions(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+
+ SK_U32 DiagStatus;
+ SK_U32 RetCode = SK_PNMI_ERR_GENERAL;
+
+ /*
+ * Check instance. We only handle single instance variables.
+ */
+ if (Instance != (SK_U32)(-1) && Instance != 1) {
+
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ /*
+ * Check length.
+ */
+ switch (Id) {
+
+ case OID_SKGE_DIAG_MODE:
+ if (*pLen < sizeof(SK_U32)) {
+
+ *pLen = sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR040, SK_PNMI_ERR040MSG);
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /* Perform action. */
+
+ /* GET value. */
+ if (Action == SK_PNMI_GET) {
+
+ switch (Id) {
+
+ case OID_SKGE_DIAG_MODE:
+ DiagStatus = pAC->Pnmi.DiagAttached;
+ SK_PNMI_STORE_U32(pBuf, DiagStatus);
+ *pLen = sizeof(SK_U32);
+ RetCode = SK_PNMI_ERR_OK;
+ break;
+
+ default:
+ *pLen = 0;
+ RetCode = SK_PNMI_ERR_GENERAL;
+ break;
+ }
+ return (RetCode);
+ }
+
+ /* From here SET or PRESET value. */
+
+ /* PRESET value is not supported. */
+ if (Action == SK_PNMI_PRESET) {
+ return (SK_PNMI_ERR_OK);
+ }
+
+ /* SET value. */
+ switch (Id) {
+ case OID_SKGE_DIAG_MODE:
+
+ /* Handle the SET. */
+ switch (*pBuf) {
+
+ /* Attach the DIAG to this adapter. */
+ case SK_DIAG_ATTACHED:
+ /* Check if we come from running */
+ if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) {
+
+ RetCode = SkDrvLeaveDiagMode(pAC);
+
+ }
+ else if (pAC->Pnmi.DiagAttached == SK_DIAG_IDLE) {
+
+ RetCode = SK_PNMI_ERR_OK;
+ }
+
+ else {
+
+ RetCode = SK_PNMI_ERR_GENERAL;
+
+ }
+
+ if (RetCode == SK_PNMI_ERR_OK) {
+
+ pAC->Pnmi.DiagAttached = SK_DIAG_ATTACHED;
+ }
+ break;
+
+ /* Enter the DIAG mode in the driver. */
+ case SK_DIAG_RUNNING:
+ RetCode = SK_PNMI_ERR_OK;
+
+ /*
+ * If DiagAttached is set, we can tell the driver
+ * to enter the DIAG mode.
+ */
+ if (pAC->Pnmi.DiagAttached == SK_DIAG_ATTACHED) {
+ /* If DiagMode is not active, we can enter it. */
+ if (!pAC->DiagModeActive) {
+
+ RetCode = SkDrvEnterDiagMode(pAC);
+ }
+ else {
+
+ RetCode = SK_PNMI_ERR_GENERAL;
+ }
+ }
+ else {
+
+ RetCode = SK_PNMI_ERR_GENERAL;
+ }
+
+ if (RetCode == SK_PNMI_ERR_OK) {
+
+ pAC->Pnmi.DiagAttached = SK_DIAG_RUNNING;
+ }
+ break;
+
+ case SK_DIAG_IDLE:
+ /* Check if we come from running */
+ if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) {
+
+ RetCode = SkDrvLeaveDiagMode(pAC);
+
+ }
+ else if (pAC->Pnmi.DiagAttached == SK_DIAG_ATTACHED) {
+
+ RetCode = SK_PNMI_ERR_OK;
+ }
+
+ else {
+
+ RetCode = SK_PNMI_ERR_GENERAL;
+
+ }
+
+ if (RetCode == SK_PNMI_ERR_OK) {
+
+ pAC->Pnmi.DiagAttached = SK_DIAG_IDLE;
+ }
+ break;
+
+ default:
+ RetCode = SK_PNMI_ERR_BAD_VALUE;
+ break;
+ }
+ break;
+
+ default:
+ RetCode = SK_PNMI_ERR_GENERAL;
+ }
+
+ if (RetCode == SK_PNMI_ERR_OK) {
+ *pLen = sizeof(SK_U32);
+ }
+ else {
+
+ *pLen = 0;
+ }
+ return (RetCode);
+}
+#endif /* SK_DIAG_SUPPORT */
+
+/*****************************************************************************
+ *
+ * Vct - OID handler function of OIDs
+ *
+ * Description:
+ * The code is simple. No description necessary.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was performed successfully.
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured.
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
+ * the correct data (e.g. a 32bit value is
+ * needed, but a 16 bit value was passed).
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter).
+ * SK_PNMI_ERR_READ_ONLY Only the Get action is allowed.
+ *
+ */
+
+PNMI_STATIC int Vct(
+SK_AC *pAC, /* Pointer to adapter context */
+SK_IOC IoC, /* IO context handle */
+int Action, /* GET/PRESET/SET action */
+SK_U32 Id, /* Object ID that is to be processed */
+char *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
+SK_U32 Instance, /* Instance (-1,2..n) that is to be queried */
+unsigned int TableIndex, /* Index to the Id table */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+ SK_GEPORT *pPrt;
+ SK_PNMI_VCT *pVctBackupData;
+ SK_U32 LogPortMax;
+ SK_U32 PhysPortMax;
+ SK_U32 PhysPortIndex;
+ SK_U32 Limit;
+ SK_U32 Offset;
+ SK_BOOL Link;
+ SK_U32 RetCode = SK_PNMI_ERR_GENERAL;
+ int i;
+ SK_EVPARA Para;
+ SK_U32 CableLength;
+
+ /*
+ * Calculate the port indexes from the instance.
+ */
+ PhysPortMax = pAC->GIni.GIMacsFound;
+ LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
+
+ /* Dual net mode? */
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ LogPortMax--;
+ }
+
+ if ((Instance != (SK_U32) (-1))) {
+ /* Check instance range. */
+ if ((Instance < 2) || (Instance > LogPortMax)) {
+ *pLen = 0;
+ return (SK_PNMI_ERR_UNKNOWN_INST);
+ }
+
+ if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
+ PhysPortIndex = NetIndex;
+ }
+ else {
+ PhysPortIndex = Instance - 2;
+ }
+ Limit = PhysPortIndex + 1;
+ }
+ else {
+ /*
+ * Instance == (SK_U32) (-1), get all Instances of that OID.
+ *
+ * Not implemented yet. May be used in future releases.
+ */
+ PhysPortIndex = 0;
+ Limit = PhysPortMax;
+ }
+
+ pPrt = &pAC->GIni.GP[PhysPortIndex];
+ if (pPrt->PHWLinkUp) {
+ Link = SK_TRUE;
+ }
+ else {
+ Link = SK_FALSE;
+ }
+
+ /* Check MAC type */
+ if (pPrt->PhyType != SK_PHY_MARV_COPPER) {
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /* Initialize backup data pointer. */
+ pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex];
+
+ /* Check action type */
+ if (Action == SK_PNMI_GET) {
+ /* Check length */
+ switch (Id) {
+
+ case OID_SKGE_VCT_GET:
+ if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT)) {
+ *pLen = (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ case OID_SKGE_VCT_STATUS:
+ if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U8)) {
+ *pLen = (Limit - PhysPortIndex) * sizeof(SK_U8);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /* Get value */
+ Offset = 0;
+ for (; PhysPortIndex < Limit; PhysPortIndex++) {
+ switch (Id) {
+
+ case OID_SKGE_VCT_GET:
+ if ((Link == SK_FALSE) &&
+ (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING)) {
+ RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE);
+ if (RetCode == 0) {
+ pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING;
+ pAC->Pnmi.VctStatus[PhysPortIndex] |=
+ (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE);
+
+ /* Copy results for later use to PNMI struct. */
+ for (i = 0; i < 4; i++) {
+ if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) {
+ if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] < 0xff)) {
+ pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH;
+ }
+ }
+ if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] != 0xff)) {
+ CableLength = 1000 * (((175 * pPrt->PMdiPairLen[i]) / 210) - 28);
+ }
+ else {
+ CableLength = 0;
+ }
+ pVctBackupData->PMdiPairLen[i] = CableLength;
+ pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i];
+ }
+
+ Para.Para32[0] = PhysPortIndex;
+ Para.Para32[1] = -1;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para);
+ SkEventDispatcher(pAC, IoC);
+ }
+ else {
+ ; /* VCT test is running. */
+ }
+ }
+
+ /* Get all results. */
+ CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex);
+ Offset += sizeof(SK_U8);
+ *(pBuf + Offset) = pPrt->PCableLen;
+ Offset += sizeof(SK_U8);
+ for (i = 0; i < 4; i++) {
+ SK_PNMI_STORE_U32((pBuf + Offset), pVctBackupData->PMdiPairLen[i]);
+ Offset += sizeof(SK_U32);
+ }
+ for (i = 0; i < 4; i++) {
+ *(pBuf + Offset) = pVctBackupData->PMdiPairSts[i];
+ Offset += sizeof(SK_U8);
+ }
+
+ RetCode = SK_PNMI_ERR_OK;
+ break;
+
+ case OID_SKGE_VCT_STATUS:
+ CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex);
+ Offset += sizeof(SK_U8);
+ RetCode = SK_PNMI_ERR_OK;
+ break;
+
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ } /* for */
+ *pLen = Offset;
+ return (RetCode);
+
+ } /* if SK_PNMI_GET */
+
+ /*
+ * From here SET or PRESET action. Check if the passed
+ * buffer length is plausible.
+ */
+
+ /* Check length */
+ switch (Id) {
+ case OID_SKGE_VCT_SET:
+ if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) {
+ *pLen = (Limit - PhysPortIndex) * sizeof(SK_U32);
+ return (SK_PNMI_ERR_TOO_SHORT);
+ }
+ break;
+
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+
+ /*
+ * Perform preset or set.
+ */
+
+ /* VCT does not support PRESET action. */
+ if (Action == SK_PNMI_PRESET) {
+ return (SK_PNMI_ERR_OK);
+ }
+
+ Offset = 0;
+ for (; PhysPortIndex < Limit; PhysPortIndex++) {
+ switch (Id) {
+ case OID_SKGE_VCT_SET: /* Start VCT test. */
+ if (Link == SK_FALSE) {
+ SkGeStopPort(pAC, IoC, PhysPortIndex, SK_STOP_ALL, SK_SOFT_RST);
+
+ RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_TRUE);
+ if (RetCode == 0) { /* RetCode: 0 => Start! */
+ pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_PENDING;
+ pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_NEW_VCT_DATA;
+ pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_LINK;
+
+ /*
+ * Start VCT timer counter.
+ */
+ SK_MEMSET((char *) &Para, 0, sizeof(Para));
+ Para.Para32[0] = PhysPortIndex;
+ Para.Para32[1] = -1;
+ SkTimerStart(pAC, IoC, &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer,
+ 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Para);
+ SK_PNMI_STORE_U32((pBuf + Offset), RetCode);
+ RetCode = SK_PNMI_ERR_OK;
+ }
+ else { /* RetCode: 2 => Running! */
+ SK_PNMI_STORE_U32((pBuf + Offset), RetCode);
+ RetCode = SK_PNMI_ERR_OK;
+ }
+ }
+ else { /* RetCode: 4 => Link! */
+ RetCode = 4;
+ SK_PNMI_STORE_U32((pBuf + Offset), RetCode);
+ RetCode = SK_PNMI_ERR_OK;
+ }
+ Offset += sizeof(SK_U32);
+ break;
+
+ default:
+ *pLen = 0;
+ return (SK_PNMI_ERR_GENERAL);
+ }
+ } /* for */
+ *pLen = Offset;
+ return (RetCode);
+
+} /* Vct */
+
+
+PNMI_STATIC void CheckVctStatus(
+SK_AC *pAC,
+SK_IOC IoC,
+char *pBuf,
+SK_U32 Offset,
+SK_U32 PhysPortIndex)
+{
+ SK_GEPORT *pPrt;
+ SK_PNMI_VCT *pVctData;
+ SK_U32 RetCode;
+
+ pPrt = &pAC->GIni.GP[PhysPortIndex];
+
+ pVctData = (SK_PNMI_VCT *) (pBuf + Offset);
+ pVctData->VctStatus = SK_PNMI_VCT_NONE;
+
+ if (!pPrt->PHWLinkUp) {
+
+ /* Was a VCT test ever made before? */
+ if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) {
+ if ((pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_LINK)) {
+ pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA;
+ }
+ else {
+ pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA;
+ }
+ }
+
+ /* Check VCT test status. */
+ RetCode = SkGmCableDiagStatus(pAC,IoC, PhysPortIndex, SK_FALSE);
+ if (RetCode == 2) { /* VCT test is running. */
+ pVctData->VctStatus |= SK_PNMI_VCT_RUNNING;
+ }
+ else { /* VCT data was copied to pAC here. Check PENDING state. */
+ if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) {
+ pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA;
+ }
+ }
+
+ if (pPrt->PCableLen != 0xff) { /* Old DSP value. */
+ pVctData->VctStatus |= SK_PNMI_VCT_OLD_DSP_DATA;
+ }
+ }
+ else {
+
+ /* Was a VCT test ever made before? */
+ if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) {
+ pVctData->VctStatus &= ~SK_PNMI_VCT_NEW_VCT_DATA;
+ pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA;
+ }
+
+ /* DSP only valid in 100/1000 modes. */
+ if (pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed !=
+ SK_LSPEED_STAT_10MBPS) {
+ pVctData->VctStatus |= SK_PNMI_VCT_NEW_DSP_DATA;
+ }
+ }
+} /* CheckVctStatus */
+
+
+/*****************************************************************************
+ *
+ * SkPnmiGenIoctl - Handles new generic PNMI IOCTL, calls the needed
+ * PNMI function depending on the subcommand and
+ * returns all data belonging to the complete database
+ * or OID request.
+ *
+ * Description:
+ * Looks up the requested subcommand, calls the corresponding handler
+ * function and passes all required parameters to it.
+ * The function is called by the driver. It is needed to handle the new
+ * generic PNMI IOCTL. This IOCTL is given to the driver and contains both
+ * the OID and a subcommand to decide what kind of request has to be done.
+ *
+ * Returns:
+ * SK_PNMI_ERR_OK The request was successfully performed
+ * SK_PNMI_ERR_GENERAL A general severe internal error occured
+ * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take
+ * the data.
+ * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown
+ * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
+ * exist (e.g. port instance 3 on a two port
+ * adapter.
+ */
+int SkPnmiGenIoctl(
+SK_AC *pAC, /* Pointer to adapter context struct */
+SK_IOC IoC, /* I/O context */
+void *pBuf, /* Buffer used for the management data transfer */
+unsigned int *pLen, /* Length of buffer */
+SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
+{
+SK_I32 Mode; /* Store value of subcommand. */
+SK_U32 Oid; /* Store value of OID. */
+int ReturnCode; /* Store return value to show status of PNMI action. */
+int HeaderLength; /* Length of desired action plus OID. */
+
+ ReturnCode = SK_PNMI_ERR_GENERAL;
+
+ SK_MEMCPY(&Mode, pBuf, sizeof(SK_I32));
+ SK_MEMCPY(&Oid, (char *) pBuf + sizeof(SK_I32), sizeof(SK_U32));
+ HeaderLength = sizeof(SK_I32) + sizeof(SK_U32);
+ *pLen = *pLen - HeaderLength;
+ SK_MEMCPY((char *) pBuf + sizeof(SK_I32), (char *) pBuf + HeaderLength, *pLen);
+
+ switch(Mode) {
+ case SK_GET_SINGLE_VAR:
+ ReturnCode = SkPnmiGetVar(pAC, IoC, Oid,
+ (char *) pBuf + sizeof(SK_I32), pLen,
+ ((SK_U32) (-1)), NetIndex);
+ SK_PNMI_STORE_U32(pBuf, ReturnCode);
+ *pLen = *pLen + sizeof(SK_I32);
+ break;
+ case SK_PRESET_SINGLE_VAR:
+ ReturnCode = SkPnmiPreSetVar(pAC, IoC, Oid,
+ (char *) pBuf + sizeof(SK_I32), pLen,
+ ((SK_U32) (-1)), NetIndex);
+ SK_PNMI_STORE_U32(pBuf, ReturnCode);
+ *pLen = *pLen + sizeof(SK_I32);
+ break;
+ case SK_SET_SINGLE_VAR:
+ ReturnCode = SkPnmiSetVar(pAC, IoC, Oid,
+ (char *) pBuf + sizeof(SK_I32), pLen,
+ ((SK_U32) (-1)), NetIndex);
+ SK_PNMI_STORE_U32(pBuf, ReturnCode);
+ *pLen = *pLen + sizeof(SK_I32);
+ break;
+ case SK_GET_FULL_MIB:
+ ReturnCode = SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex);
+ break;
+ case SK_PRESET_FULL_MIB:
+ ReturnCode = SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex);
+ break;
+ case SK_SET_FULL_MIB:
+ ReturnCode = SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex);
+ break;
+ default:
+ break;
+ }
+
+ return (ReturnCode);
+
+} /* SkGeIocGen */
diff --git a/drivers/net/sk98lin/skgesirq.c b/drivers/net/sk98lin/skgesirq.c
new file mode 100644
index 000000000000..87520f0057d7
--- /dev/null
+++ b/drivers/net/sk98lin/skgesirq.c
@@ -0,0 +1,2251 @@
+/******************************************************************************
+ *
+ * Name: skgesirq.c
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.92 $
+ * Date: $Date: 2003/09/16 14:37:07 $
+ * Purpose: Special IRQ module
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * Special Interrupt handler
+ *
+ * The following abstract should show how this module is included
+ * in the driver path:
+ *
+ * In the ISR of the driver the bits for frame transmission complete and
+ * for receive complete are checked and handled by the driver itself.
+ * The bits of the slow path mask are checked after that and then the
+ * entry into the so-called "slow path" is prepared. It is an implementors
+ * decision whether this is executed directly or just scheduled by
+ * disabling the mask. In the interrupt service routine some events may be
+ * generated, so it would be a good idea to call the EventDispatcher
+ * right after this ISR.
+ *
+ * The Interrupt source register of the adapter is NOT read by this module.
+ * SO if the drivers implementor needs a while loop around the
+ * slow data paths interrupt bits, he needs to call the SkGeSirqIsr() for
+ * each loop entered.
+ *
+ * However, the MAC Interrupt status registers are read in a while loop.
+ *
+ */
+
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skgesirq.c,v 1.92 2003/09/16 14:37:07 rschmidt Exp $ (C) Marvell.";
+#endif
+
+#include "h/skdrv1st.h" /* Driver Specific Definitions */
+#ifndef SK_SLIM
+#include "h/skgepnmi.h" /* PNMI Definitions */
+#include "h/skrlmt.h" /* RLMT Definitions */
+#endif
+#include "h/skdrv2nd.h" /* Adapter Control and Driver specific Def. */
+
+/* local function prototypes */
+#ifdef GENESIS
+static int SkGePortCheckUpXmac(SK_AC*, SK_IOC, int, SK_BOOL);
+static int SkGePortCheckUpBcom(SK_AC*, SK_IOC, int, SK_BOOL);
+static void SkPhyIsrBcom(SK_AC*, SK_IOC, int, SK_U16);
+#endif /* GENESIS */
+#ifdef YUKON
+static int SkGePortCheckUpGmac(SK_AC*, SK_IOC, int, SK_BOOL);
+static void SkPhyIsrGmac(SK_AC*, SK_IOC, int, SK_U16);
+#endif /* YUKON */
+#ifdef OTHER_PHY
+static int SkGePortCheckUpLone(SK_AC*, SK_IOC, int, SK_BOOL);
+static int SkGePortCheckUpNat(SK_AC*, SK_IOC, int, SK_BOOL);
+static void SkPhyIsrLone(SK_AC*, SK_IOC, int, SK_U16);
+#endif /* OTHER_PHY */
+
+#ifdef GENESIS
+/*
+ * array of Rx counter from XMAC which are checked
+ * in AutoSense mode to check whether a link is not able to auto-negotiate.
+ */
+static const SK_U16 SkGeRxRegs[]= {
+ XM_RXF_64B,
+ XM_RXF_127B,
+ XM_RXF_255B,
+ XM_RXF_511B,
+ XM_RXF_1023B,
+ XM_RXF_MAX_SZ
+} ;
+#endif /* GENESIS */
+
+#ifdef __C2MAN__
+/*
+ * Special IRQ function
+ *
+ * General Description:
+ *
+ */
+intro()
+{}
+#endif
+
+/******************************************************************************
+ *
+ * SkHWInitDefSense() - Default Autosensing mode initialization
+ *
+ * Description: sets the PLinkMode for HWInit
+ *
+ * Returns: N/A
+ */
+static void SkHWInitDefSense(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ pPrt->PAutoNegTimeOut = 0;
+
+ if (pPrt->PLinkModeConf != SK_LMODE_AUTOSENSE) {
+ pPrt->PLinkMode = pPrt->PLinkModeConf;
+ return;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("AutoSensing: First mode %d on Port %d\n",
+ (int)SK_LMODE_AUTOFULL, Port));
+
+ pPrt->PLinkMode = (SK_U8)SK_LMODE_AUTOFULL;
+
+ return;
+} /* SkHWInitDefSense */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkHWSenseGetNext() - Get Next Autosensing Mode
+ *
+ * Description: gets the appropriate next mode
+ *
+ * Note:
+ *
+ */
+static SK_U8 SkHWSenseGetNext(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ pPrt->PAutoNegTimeOut = 0;
+
+ if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) {
+ /* Leave all as configured */
+ return(pPrt->PLinkModeConf);
+ }
+
+ if (pPrt->PLinkMode == (SK_U8)SK_LMODE_AUTOFULL) {
+ /* Return next mode AUTOBOTH */
+ return ((SK_U8)SK_LMODE_AUTOBOTH);
+ }
+
+ /* Return default autofull */
+ return ((SK_U8)SK_LMODE_AUTOFULL);
+} /* SkHWSenseGetNext */
+
+
+/******************************************************************************
+ *
+ * SkHWSenseSetNext() - Autosensing Set next mode
+ *
+ * Description: sets the appropriate next mode
+ *
+ * Returns: N/A
+ */
+static void SkHWSenseSetNext(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_U8 NewMode) /* New Mode to be written in sense mode */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ pPrt->PAutoNegTimeOut = 0;
+
+ if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) {
+ return;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("AutoSensing: next mode %d on Port %d\n",
+ (int)NewMode, Port));
+
+ pPrt->PLinkMode = NewMode;
+
+ return;
+} /* SkHWSenseSetNext */
+#endif /* GENESIS */
+
+
+/******************************************************************************
+ *
+ * SkHWLinkDown() - Link Down handling
+ *
+ * Description: handles the hardware link down signal
+ *
+ * Returns: N/A
+ */
+void SkHWLinkDown(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Disable all MAC interrupts */
+ SkMacIrqDisable(pAC, IoC, Port);
+
+ /* Disable Receiver and Transmitter */
+ SkMacRxTxDisable(pAC, IoC, Port);
+
+ /* Init default sense mode */
+ SkHWInitDefSense(pAC, IoC, Port);
+
+ if (pPrt->PHWLinkUp == SK_FALSE) {
+ return;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Link down Port %d\n", Port));
+
+ /* Set Link to DOWN */
+ pPrt->PHWLinkUp = SK_FALSE;
+
+ /* Reset Port stati */
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
+ pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE;
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_INDETERMINATED;
+
+ /* Re-init Phy especially when the AutoSense default is set now */
+ SkMacInitPhy(pAC, IoC, Port, SK_FALSE);
+
+ /* GP0: used for workaround of Rev. C Errata 2 */
+
+ /* Do NOT signal to RLMT */
+
+ /* Do NOT start the timer here */
+} /* SkHWLinkDown */
+
+
+/******************************************************************************
+ *
+ * SkHWLinkUp() - Link Up handling
+ *
+ * Description: handles the hardware link up signal
+ *
+ * Returns: N/A
+ */
+void SkHWLinkUp(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PHWLinkUp) {
+ /* We do NOT need to proceed on active link */
+ return;
+ }
+
+ pPrt->PHWLinkUp = SK_TRUE;
+ pPrt->PAutoNegFail = SK_FALSE;
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
+
+ if (pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOHALF &&
+ pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOFULL &&
+ pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOBOTH) {
+ /* Link is up and no Auto-negotiation should be done */
+
+ /* Link speed should be the configured one */
+ switch (pPrt->PLinkSpeed) {
+ case SK_LSPEED_AUTO:
+ /* default is 1000 Mbps */
+ case SK_LSPEED_1000MBPS:
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
+ break;
+ case SK_LSPEED_100MBPS:
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_100MBPS;
+ break;
+ case SK_LSPEED_10MBPS:
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_10MBPS;
+ break;
+ }
+
+ /* Set Link Mode Status */
+ if (pPrt->PLinkMode == SK_LMODE_FULL) {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_FULL;
+ }
+ else {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_HALF;
+ }
+
+ /* No flow control without auto-negotiation */
+ pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE;
+
+ /* enable Rx/Tx */
+ (void)SkMacRxTxEnable(pAC, IoC, Port);
+ }
+} /* SkHWLinkUp */
+
+
+/******************************************************************************
+ *
+ * SkMacParity() - MAC parity workaround
+ *
+ * Description: handles MAC parity errors correctly
+ *
+ * Returns: N/A
+ */
+static void SkMacParity(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index of the port failed */
+{
+ SK_EVPARA Para;
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ SK_U32 TxMax; /* Tx Max Size Counter */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Clear IRQ Tx Parity Error */
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_PERR);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
+ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T),
+ (SK_U8)((pAC->GIni.GIChipId == CHIP_ID_YUKON &&
+ pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE));
+ }
+#endif /* YUKON */
+
+ if (pPrt->PCheckPar) {
+
+ if (Port == MAC_1) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E016, SKERR_SIRQ_E016MSG);
+ }
+ else {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E017, SKERR_SIRQ_E017MSG);
+ }
+ Para.Para64 = Port;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+
+ Para.Para32[0] = Port;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+
+ return;
+ }
+
+ /* Check whether frames with a size of 1k were sent */
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* Snap statistic counters */
+ (void)SkXmUpdateStats(pAC, IoC, Port);
+
+ (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXF_MAX_SZ, &TxMax);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ (void)SkGmMacStatistic(pAC, IoC, Port, GM_TXF_1518B, &TxMax);
+ }
+#endif /* YUKON */
+
+ if (TxMax > 0) {
+ /* From now on check the parity */
+ pPrt->PCheckPar = SK_TRUE;
+ }
+} /* SkMacParity */
+
+
+/******************************************************************************
+ *
+ * SkGeHwErr() - Hardware Error service routine
+ *
+ * Description: handles all HW Error interrupts
+ *
+ * Returns: N/A
+ */
+static void SkGeHwErr(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+SK_U32 HwStatus) /* Interrupt status word */
+{
+ SK_EVPARA Para;
+ SK_U16 Word;
+
+ if ((HwStatus & (IS_IRQ_MST_ERR | IS_IRQ_STAT)) != 0) {
+ /* PCI Errors occured */
+ if ((HwStatus & IS_IRQ_STAT) != 0) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E013, SKERR_SIRQ_E013MSG);
+ }
+ else {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E012, SKERR_SIRQ_E012MSG);
+ }
+
+ /* Reset all bits in the PCI STATUS register */
+ SK_IN16(IoC, PCI_C(PCI_STATUS), &Word);
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS));
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ Para.Para64 = 0;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para);
+ }
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ if ((HwStatus & IS_NO_STAT_M1) != 0) {
+ /* Ignore it */
+ /* This situation is also indicated in the descriptor */
+ SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INSTAT);
+ }
+
+ if ((HwStatus & IS_NO_STAT_M2) != 0) {
+ /* Ignore it */
+ /* This situation is also indicated in the descriptor */
+ SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INSTAT);
+ }
+
+ if ((HwStatus & IS_NO_TIST_M1) != 0) {
+ /* Ignore it */
+ /* This situation is also indicated in the descriptor */
+ SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INTIST);
+ }
+
+ if ((HwStatus & IS_NO_TIST_M2) != 0) {
+ /* Ignore it */
+ /* This situation is also indicated in the descriptor */
+ SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INTIST);
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* This is necessary only for Rx timing measurements */
+ if ((HwStatus & IS_IRQ_TIST_OV) != 0) {
+ /* increment Time Stamp Timer counter (high) */
+ pAC->GIni.GITimeStampCnt++;
+
+ /* Clear Time Stamp Timer IRQ */
+ SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_CLR_IRQ);
+ }
+
+ if ((HwStatus & IS_IRQ_SENSOR) != 0) {
+ /* no sensors on 32-bit Yukon */
+ if (pAC->GIni.GIYukon32Bit) {
+ /* disable HW Error IRQ */
+ pAC->GIni.GIValIrqMask &= ~IS_HW_ERR;
+ }
+ }
+ }
+#endif /* YUKON */
+
+ if ((HwStatus & IS_RAM_RD_PAR) != 0) {
+ SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_RD_PERR);
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E014, SKERR_SIRQ_E014MSG);
+ Para.Para64 = 0;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para);
+ }
+
+ if ((HwStatus & IS_RAM_WR_PAR) != 0) {
+ SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_WR_PERR);
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E015, SKERR_SIRQ_E015MSG);
+ Para.Para64 = 0;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para);
+ }
+
+ if ((HwStatus & IS_M1_PAR_ERR) != 0) {
+ SkMacParity(pAC, IoC, MAC_1);
+ }
+
+ if ((HwStatus & IS_M2_PAR_ERR) != 0) {
+ SkMacParity(pAC, IoC, MAC_2);
+ }
+
+ if ((HwStatus & IS_R1_PAR_ERR) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_P);
+
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E018, SKERR_SIRQ_E018MSG);
+ Para.Para64 = MAC_1;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+
+ Para.Para32[0] = MAC_1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ if ((HwStatus & IS_R2_PAR_ERR) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_P);
+
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E019, SKERR_SIRQ_E019MSG);
+ Para.Para64 = MAC_2;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+
+ Para.Para32[0] = MAC_2;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+} /* SkGeHwErr */
+
+
+/******************************************************************************
+ *
+ * SkGeSirqIsr() - Special Interrupt Service Routine
+ *
+ * Description: handles all non data transfer specific interrupts (slow path)
+ *
+ * Returns: N/A
+ */
+void SkGeSirqIsr(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+SK_U32 Istatus) /* Interrupt status word */
+{
+ SK_EVPARA Para;
+ SK_U32 RegVal32; /* Read register value */
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ SK_U16 PhyInt;
+ int i;
+
+ if (((Istatus & IS_HW_ERR) & pAC->GIni.GIValIrqMask) != 0) {
+ /* read the HW Error Interrupt source */
+ SK_IN32(IoC, B0_HWE_ISRC, &RegVal32);
+
+ SkGeHwErr(pAC, IoC, RegVal32);
+ }
+
+ /*
+ * Packet Timeout interrupts
+ */
+ /* Check whether MACs are correctly initialized */
+ if (((Istatus & (IS_PA_TO_RX1 | IS_PA_TO_TX1)) != 0) &&
+ pAC->GIni.GP[MAC_1].PState == SK_PRT_RESET) {
+ /* MAC 1 was not initialized but Packet timeout occured */
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E004,
+ SKERR_SIRQ_E004MSG);
+ }
+
+ if (((Istatus & (IS_PA_TO_RX2 | IS_PA_TO_TX2)) != 0) &&
+ pAC->GIni.GP[MAC_2].PState == SK_PRT_RESET) {
+ /* MAC 2 was not initialized but Packet timeout occured */
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E005,
+ SKERR_SIRQ_E005MSG);
+ }
+
+ if ((Istatus & IS_PA_TO_RX1) != 0) {
+ /* Means network is filling us up */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E002,
+ SKERR_SIRQ_E002MSG);
+ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX1);
+ }
+
+ if ((Istatus & IS_PA_TO_RX2) != 0) {
+ /* Means network is filling us up */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E003,
+ SKERR_SIRQ_E003MSG);
+ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX2);
+ }
+
+ if ((Istatus & IS_PA_TO_TX1) != 0) {
+
+ pPrt = &pAC->GIni.GP[0];
+
+ /* May be a normal situation in a server with a slow network */
+ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX1);
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /*
+ * workaround: if in half duplex mode, check for Tx hangup.
+ * Read number of TX'ed bytes, wait for 10 ms, then compare
+ * the number with current value. If nothing changed, we assume
+ * that Tx is hanging and do a FIFO flush (see event routine).
+ */
+ if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
+ pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) &&
+ !pPrt->HalfDupTimerActive) {
+ /*
+ * many more pack. arb. timeouts may come in between,
+ * we ignore those
+ */
+ pPrt->HalfDupTimerActive = SK_TRUE;
+#ifdef XXX
+ Len = sizeof(SK_U64);
+ SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
+ &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 0),
+ pAC->Rlmt.Port[0].Net->NetNumber);
+
+ pPrt->LastOctets = Octets;
+#endif /* XXX */
+ /* Snap statistic counters */
+ (void)SkXmUpdateStats(pAC, IoC, 0);
+
+ (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_HI, &RegVal32);
+
+ pPrt->LastOctets = (SK_U64)RegVal32 << 32;
+
+ (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_LO, &RegVal32);
+
+ pPrt->LastOctets += RegVal32;
+
+ Para.Para32[0] = 0;
+ SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME,
+ SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para);
+ }
+ }
+#endif /* GENESIS */
+ }
+
+ if ((Istatus & IS_PA_TO_TX2) != 0) {
+
+ pPrt = &pAC->GIni.GP[1];
+
+ /* May be a normal situation in a server with a slow network */
+ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX2);
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* workaround: see above */
+ if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
+ pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) &&
+ !pPrt->HalfDupTimerActive) {
+ pPrt->HalfDupTimerActive = SK_TRUE;
+#ifdef XXX
+ Len = sizeof(SK_U64);
+ SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
+ &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 1),
+ pAC->Rlmt.Port[1].Net->NetNumber);
+
+ pPrt->LastOctets = Octets;
+#endif /* XXX */
+ /* Snap statistic counters */
+ (void)SkXmUpdateStats(pAC, IoC, 1);
+
+ (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_HI, &RegVal32);
+
+ pPrt->LastOctets = (SK_U64)RegVal32 << 32;
+
+ (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_LO, &RegVal32);
+
+ pPrt->LastOctets += RegVal32;
+
+ Para.Para32[0] = 1;
+ SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME,
+ SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para);
+ }
+ }
+#endif /* GENESIS */
+ }
+
+ /* Check interrupts of the particular queues */
+ if ((Istatus & IS_R1_C) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_C);
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E006,
+ SKERR_SIRQ_E006MSG);
+ Para.Para64 = MAC_1;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+ Para.Para32[0] = MAC_1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ if ((Istatus & IS_R2_C) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_C);
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E007,
+ SKERR_SIRQ_E007MSG);
+ Para.Para64 = MAC_2;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+ Para.Para32[0] = MAC_2;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ if ((Istatus & IS_XS1_C) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_XS1_CSR, CSR_IRQ_CL_C);
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E008,
+ SKERR_SIRQ_E008MSG);
+ Para.Para64 = MAC_1;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+ Para.Para32[0] = MAC_1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ if ((Istatus & IS_XA1_C) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_XA1_CSR, CSR_IRQ_CL_C);
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E009,
+ SKERR_SIRQ_E009MSG);
+ Para.Para64 = MAC_1;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+ Para.Para32[0] = MAC_1;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ if ((Istatus & IS_XS2_C) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_XS2_CSR, CSR_IRQ_CL_C);
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E010,
+ SKERR_SIRQ_E010MSG);
+ Para.Para64 = MAC_2;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+ Para.Para32[0] = MAC_2;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ if ((Istatus & IS_XA2_C) != 0) {
+ /* Clear IRQ */
+ SK_OUT32(IoC, B0_XA2_CSR, CSR_IRQ_CL_C);
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E011,
+ SKERR_SIRQ_E011MSG);
+ Para.Para64 = MAC_2;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
+ Para.Para32[0] = MAC_2;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ /* External reg interrupt */
+ if ((Istatus & IS_EXT_REG) != 0) {
+ /* Test IRQs from PHY */
+ for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
+
+ pPrt = &pAC->GIni.GP[i];
+
+ if (pPrt->PState == SK_PRT_RESET) {
+ continue;
+ }
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ switch (pPrt->PhyType) {
+
+ case SK_PHY_XMAC:
+ break;
+
+ case SK_PHY_BCOM:
+ SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_STAT, &PhyInt);
+
+ if ((PhyInt & ~PHY_B_DEF_MSK) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Port %d Bcom Int: 0x%04X\n",
+ i, PhyInt));
+ SkPhyIsrBcom(pAC, IoC, i, PhyInt);
+ }
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_STAT, &PhyInt);
+
+ if ((PhyInt & PHY_L_DEF_MSK) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Port %d Lone Int: %x\n",
+ i, PhyInt));
+ SkPhyIsrLone(pAC, IoC, i, PhyInt);
+ }
+ break;
+#endif /* OTHER_PHY */
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* Read PHY Interrupt Status */
+ SkGmPhyRead(pAC, IoC, i, PHY_MARV_INT_STAT, &PhyInt);
+
+ if ((PhyInt & PHY_M_DEF_MSK) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Port %d Marv Int: 0x%04X\n",
+ i, PhyInt));
+ SkPhyIsrGmac(pAC, IoC, i, PhyInt);
+ }
+ }
+#endif /* YUKON */
+ }
+ }
+
+ /* I2C Ready interrupt */
+ if ((Istatus & IS_I2C_READY) != 0) {
+#ifdef SK_SLIM
+ SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ);
+#else
+ SkI2cIsr(pAC, IoC);
+#endif
+ }
+
+ /* SW forced interrupt */
+ if ((Istatus & IS_IRQ_SW) != 0) {
+ /* clear the software IRQ */
+ SK_OUT8(IoC, B0_CTST, CS_CL_SW_IRQ);
+ }
+
+ if ((Istatus & IS_LNK_SYNC_M1) != 0) {
+ /*
+ * We do NOT need the Link Sync interrupt, because it shows
+ * us only a link going down.
+ */
+ /* clear interrupt */
+ SK_OUT8(IoC, MR_ADDR(MAC_1, LNK_SYNC_CTRL), LED_CLR_IRQ);
+ }
+
+ /* Check MAC after link sync counter */
+ if ((Istatus & IS_MAC1) != 0) {
+ /* IRQ from MAC 1 */
+ SkMacIrq(pAC, IoC, MAC_1);
+ }
+
+ if ((Istatus & IS_LNK_SYNC_M2) != 0) {
+ /*
+ * We do NOT need the Link Sync interrupt, because it shows
+ * us only a link going down.
+ */
+ /* clear interrupt */
+ SK_OUT8(IoC, MR_ADDR(MAC_2, LNK_SYNC_CTRL), LED_CLR_IRQ);
+ }
+
+ /* Check MAC after link sync counter */
+ if ((Istatus & IS_MAC2) != 0) {
+ /* IRQ from MAC 2 */
+ SkMacIrq(pAC, IoC, MAC_2);
+ }
+
+ /* Timer interrupt (served last) */
+ if ((Istatus & IS_TIMINT) != 0) {
+ /* check for HW Errors */
+ if (((Istatus & IS_HW_ERR) & ~pAC->GIni.GIValIrqMask) != 0) {
+ /* read the HW Error Interrupt source */
+ SK_IN32(IoC, B0_HWE_ISRC, &RegVal32);
+
+ SkGeHwErr(pAC, IoC, RegVal32);
+ }
+
+ SkHwtIsr(pAC, IoC);
+ }
+
+} /* SkGeSirqIsr */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkGePortCheckShorts() - Implementing XMAC Workaround Errata # 2
+ *
+ * return:
+ * 0 o.k. nothing needed
+ * 1 Restart needed on this port
+ */
+static int SkGePortCheckShorts(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int Port) /* Which port should be checked */
+{
+ SK_U32 Shorts; /* Short Event Counter */
+ SK_U32 CheckShorts; /* Check value for Short Event Counter */
+ SK_U64 RxCts; /* Rx Counter (packets on network) */
+ SK_U32 RxTmp; /* Rx temp. Counter */
+ SK_U32 FcsErrCts; /* FCS Error Counter */
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ int Rtv; /* Return value */
+ int i;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Default: no action */
+ Rtv = SK_HW_PS_NONE;
+
+ (void)SkXmUpdateStats(pAC, IoC, Port);
+
+ /* Extra precaution: check for short Event counter */
+ (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts);
+
+ /*
+ * Read Rx counters (packets seen on the network and not necessarily
+ * really received.
+ */
+ RxCts = 0;
+
+ for (i = 0; i < sizeof(SkGeRxRegs)/sizeof(SkGeRxRegs[0]); i++) {
+
+ (void)SkXmMacStatistic(pAC, IoC, Port, SkGeRxRegs[i], &RxTmp);
+
+ RxCts += (SK_U64)RxTmp;
+ }
+
+ /* On default: check shorts against zero */
+ CheckShorts = 0;
+
+ /* Extra precaution on active links */
+ if (pPrt->PHWLinkUp) {
+ /* Reset Link Restart counter */
+ pPrt->PLinkResCt = 0;
+ pPrt->PAutoNegTOCt = 0;
+
+ /* If link is up check for 2 */
+ CheckShorts = 2;
+
+ (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXF_FCS_ERR, &FcsErrCts);
+
+ if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
+ pPrt->PLipaAutoNeg == SK_LIPA_UNKNOWN &&
+ (pPrt->PLinkMode == SK_LMODE_HALF ||
+ pPrt->PLinkMode == SK_LMODE_FULL)) {
+ /*
+ * This is autosensing and we are in the fallback
+ * manual full/half duplex mode.
+ */
+ if (RxCts == pPrt->PPrevRx) {
+ /* Nothing received, restart link */
+ pPrt->PPrevFcs = FcsErrCts;
+ pPrt->PPrevShorts = Shorts;
+
+ return(SK_HW_PS_RESTART);
+ }
+ else {
+ pPrt->PLipaAutoNeg = SK_LIPA_MANUAL;
+ }
+ }
+
+ if (((RxCts - pPrt->PPrevRx) > pPrt->PRxLim) ||
+ (!(FcsErrCts - pPrt->PPrevFcs))) {
+ /*
+ * Note: The compare with zero above has to be done the way shown,
+ * otherwise the Linux driver will have a problem.
+ */
+ /*
+ * We received a bunch of frames or no CRC error occured on the
+ * network -> ok.
+ */
+ pPrt->PPrevRx = RxCts;
+ pPrt->PPrevFcs = FcsErrCts;
+ pPrt->PPrevShorts = Shorts;
+
+ return(SK_HW_PS_NONE);
+ }
+
+ pPrt->PPrevFcs = FcsErrCts;
+ }
+
+
+ if ((Shorts - pPrt->PPrevShorts) > CheckShorts) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Short Event Count Restart Port %d \n", Port));
+ Rtv = SK_HW_PS_RESTART;
+ }
+
+ pPrt->PPrevShorts = Shorts;
+ pPrt->PPrevRx = RxCts;
+
+ return(Rtv);
+} /* SkGePortCheckShorts */
+#endif /* GENESIS */
+
+
+/******************************************************************************
+ *
+ * SkGePortCheckUp() - Check if the link is up
+ *
+ * return:
+ * 0 o.k. nothing needed
+ * 1 Restart needed on this port
+ * 2 Link came up
+ */
+static int SkGePortCheckUp(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int Port) /* Which port should be checked */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */
+ int Rtv; /* Return value */
+
+ Rtv = SK_HW_PS_NONE;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
+ AutoNeg = SK_FALSE;
+ }
+ else {
+ AutoNeg = SK_TRUE;
+ }
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ switch (pPrt->PhyType) {
+
+ case SK_PHY_XMAC:
+ Rtv = SkGePortCheckUpXmac(pAC, IoC, Port, AutoNeg);
+ break;
+ case SK_PHY_BCOM:
+ Rtv = SkGePortCheckUpBcom(pAC, IoC, Port, AutoNeg);
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ Rtv = SkGePortCheckUpLone(pAC, IoC, Port, AutoNeg);
+ break;
+ case SK_PHY_NAT:
+ Rtv = SkGePortCheckUpNat(pAC, IoC, Port, AutoNeg);
+ break;
+#endif /* OTHER_PHY */
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ Rtv = SkGePortCheckUpGmac(pAC, IoC, Port, AutoNeg);
+ }
+#endif /* YUKON */
+
+ return(Rtv);
+} /* SkGePortCheckUp */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkGePortCheckUpXmac() - Implementing of the Workaround Errata # 2
+ *
+ * return:
+ * 0 o.k. nothing needed
+ * 1 Restart needed on this port
+ * 2 Link came up
+ */
+static int SkGePortCheckUpXmac(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int Port, /* Which port should be checked */
+SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
+{
+ SK_U32 Shorts; /* Short Event Counter */
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ int Done;
+ SK_U32 GpReg; /* General Purpose register value */
+ SK_U16 Isrc; /* Interrupt source register */
+ SK_U16 IsrcSum; /* Interrupt source register sum */
+ SK_U16 LpAb; /* Link Partner Ability */
+ SK_U16 ResAb; /* Resolved Ability */
+ SK_U16 ExtStat; /* Extended Status Register */
+ SK_U8 NextMode; /* Next AutoSensing Mode */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PHWLinkUp) {
+ if (pPrt->PhyType != SK_PHY_XMAC) {
+ return(SK_HW_PS_NONE);
+ }
+ else {
+ return(SkGePortCheckShorts(pAC, IoC, Port));
+ }
+ }
+
+ IsrcSum = pPrt->PIsave;
+ pPrt->PIsave = 0;
+
+ /* Now wait for each port's link */
+ if (pPrt->PLinkBroken) {
+ /* Link was broken */
+ XM_IN32(IoC, Port, XM_GP_PORT, &GpReg);
+
+ if ((GpReg & XM_GP_INP_ASS) == 0) {
+ /* The Link is in sync */
+ XM_IN16(IoC, Port, XM_ISRC, &Isrc);
+ IsrcSum |= Isrc;
+ SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum);
+
+ if ((Isrc & XM_IS_INP_ASS) == 0) {
+ /* It has been in sync since last time */
+ /* Restart the PORT */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Link in sync Restart Port %d\n", Port));
+
+ (void)SkXmUpdateStats(pAC, IoC, Port);
+
+ /* We now need to reinitialize the PrevShorts counter */
+ (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts);
+ pPrt->PPrevShorts = Shorts;
+
+ pPrt->PLinkBroken = SK_FALSE;
+
+ /*
+ * Link Restart Workaround:
+ * it may be possible that the other Link side
+ * restarts its link as well an we detect
+ * another LinkBroken. To prevent this
+ * happening we check for a maximum number
+ * of consecutive restart. If those happens,
+ * we do NOT restart the active link and
+ * check whether the link is now o.k.
+ */
+ pPrt->PLinkResCt++;
+
+ pPrt->PAutoNegTimeOut = 0;
+
+ if (pPrt->PLinkResCt < SK_MAX_LRESTART) {
+ return(SK_HW_PS_RESTART);
+ }
+
+ pPrt->PLinkResCt = 0;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Do NOT restart on Port %d %x %x\n", Port, Isrc, IsrcSum));
+ }
+ else {
+ pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Save Sync/nosync Port %d %x %x\n", Port, Isrc, IsrcSum));
+
+ /* Do nothing more if link is broken */
+ return(SK_HW_PS_NONE);
+ }
+ }
+ else {
+ /* Do nothing more if link is broken */
+ return(SK_HW_PS_NONE);
+ }
+
+ }
+ else {
+ /* Link was not broken, check if it is */
+ XM_IN16(IoC, Port, XM_ISRC, &Isrc);
+ IsrcSum |= Isrc;
+ if ((Isrc & XM_IS_INP_ASS) != 0) {
+ XM_IN16(IoC, Port, XM_ISRC, &Isrc);
+ IsrcSum |= Isrc;
+ if ((Isrc & XM_IS_INP_ASS) != 0) {
+ XM_IN16(IoC, Port, XM_ISRC, &Isrc);
+ IsrcSum |= Isrc;
+ if ((Isrc & XM_IS_INP_ASS) != 0) {
+ pPrt->PLinkBroken = SK_TRUE;
+ /* Re-Init Link partner Autoneg flag */
+ pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN;
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Link broken Port %d\n", Port));
+
+ /* Cable removed-> reinit sense mode */
+ SkHWInitDefSense(pAC, IoC, Port);
+
+ return(SK_HW_PS_RESTART);
+ }
+ }
+ }
+ else {
+ SkXmAutoNegLipaXmac(pAC, IoC, Port, Isrc);
+
+ if (SkGePortCheckShorts(pAC, IoC, Port) == SK_HW_PS_RESTART) {
+ return(SK_HW_PS_RESTART);
+ }
+ }
+ }
+
+ /*
+ * here we usually can check whether the link is in sync and
+ * auto-negotiation is done.
+ */
+ XM_IN32(IoC, Port, XM_GP_PORT, &GpReg);
+ XM_IN16(IoC, Port, XM_ISRC, &Isrc);
+ IsrcSum |= Isrc;
+
+ SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum);
+
+ if ((GpReg & XM_GP_INP_ASS) != 0 || (IsrcSum & XM_IS_INP_ASS) != 0) {
+ if ((GpReg & XM_GP_INP_ASS) == 0) {
+ /* Save Auto-negotiation Done interrupt only if link is in sync */
+ pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND);
+ }
+#ifdef DEBUG
+ if ((pPrt->PIsave & XM_IS_AND) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNeg done rescheduled Port %d\n", Port));
+ }
+#endif /* DEBUG */
+ return(SK_HW_PS_NONE);
+ }
+
+ if (AutoNeg) {
+ if ((IsrcSum & XM_IS_AND) != 0) {
+ SkHWLinkUp(pAC, IoC, Port);
+ Done = SkMacAutoNegDone(pAC, IoC, Port);
+ if (Done != SK_AND_OK) {
+ /* Get PHY parameters, for debugging only */
+ SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LpAb);
+ SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNeg FAIL Port %d (LpAb %x, ResAb %x)\n",
+ Port, LpAb, ResAb));
+
+ /* Try next possible mode */
+ NextMode = SkHWSenseGetNext(pAC, IoC, Port);
+ SkHWLinkDown(pAC, IoC, Port);
+ if (Done == SK_AND_DUP_CAP) {
+ /* GoTo next mode */
+ SkHWSenseSetNext(pAC, IoC, Port, NextMode);
+ }
+
+ return(SK_HW_PS_RESTART);
+ }
+ /*
+ * Dummy Read extended status to prevent extra link down/ups
+ * (clear Page Received bit if set)
+ */
+ SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_EXP, &ExtStat);
+
+ return(SK_HW_PS_LINK);
+ }
+
+ /* AutoNeg not done, but HW link is up. Check for timeouts */
+ pPrt->PAutoNegTimeOut++;
+ if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) {
+ /* Increase the Timeout counter */
+ pPrt->PAutoNegTOCt++;
+
+ /* Timeout occured */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("AutoNeg timeout Port %d\n", Port));
+ if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
+ pPrt->PLipaAutoNeg != SK_LIPA_AUTO) {
+ /* Set Link manually up */
+ SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Set manual full duplex Port %d\n", Port));
+ }
+
+ if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
+ pPrt->PLipaAutoNeg == SK_LIPA_AUTO &&
+ pPrt->PAutoNegTOCt >= SK_MAX_ANEG_TO) {
+ /*
+ * This is rather complicated.
+ * we need to check here whether the LIPA_AUTO
+ * we saw before is false alert. We saw at one
+ * switch ( SR8800) that on boot time it sends
+ * just one auto-neg packet and does no further
+ * auto-negotiation.
+ * Solution: we restart the autosensing after
+ * a few timeouts.
+ */
+ pPrt->PAutoNegTOCt = 0;
+ pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN;
+ SkHWInitDefSense(pAC, IoC, Port);
+ }
+
+ /* Do the restart */
+ return(SK_HW_PS_RESTART);
+ }
+ }
+ else {
+ /* Link is up and we don't need more */
+#ifdef DEBUG
+ if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("ERROR: Lipa auto detected on port %d\n", Port));
+ }
+#endif /* DEBUG */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Link sync(GP), Port %d\n", Port));
+ SkHWLinkUp(pAC, IoC, Port);
+
+ /*
+ * Link sync (GP) and so assume a good connection. But if not received
+ * a bunch of frames received in a time slot (maybe broken tx cable)
+ * the port is restart.
+ */
+ return(SK_HW_PS_LINK);
+ }
+
+ return(SK_HW_PS_NONE);
+} /* SkGePortCheckUpXmac */
+
+
+/******************************************************************************
+ *
+ * SkGePortCheckUpBcom() - Check if the link is up on Bcom PHY
+ *
+ * return:
+ * 0 o.k. nothing needed
+ * 1 Restart needed on this port
+ * 2 Link came up
+ */
+static int SkGePortCheckUpBcom(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int Port, /* Which port should be checked */
+SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ int Done;
+ SK_U16 Isrc; /* Interrupt source register */
+ SK_U16 PhyStat; /* Phy Status Register */
+ SK_U16 ResAb; /* Master/Slave resolution */
+ SK_U16 Ctrl; /* Broadcom control flags */
+#ifdef DEBUG
+ SK_U16 LpAb;
+ SK_U16 ExtStat;
+#endif /* DEBUG */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Check for No HCD Link events (#10523) */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &Isrc);
+
+#ifdef xDEBUG
+ if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT) ==
+ (PHY_B_IS_SCR_S_ER | PHY_B_IS_RRS_CHANGE | PHY_B_IS_LRS_CHANGE)) {
+
+ SK_U32 Stat1, Stat2, Stat3;
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1);
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "CheckUp1 - Stat: %x, Mask: %x",
+ (void *)Isrc,
+ (void *)Stat1);
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1);
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &Stat2);
+ Stat1 = Stat1 << 16 | Stat2;
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2);
+ Stat3 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3);
+ Stat2 = Stat2 << 16 | Stat3;
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "Ctrl/Stat: %x, AN Adv/LP: %x",
+ (void *)Stat1,
+ (void *)Stat2);
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1);
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2);
+ Stat1 = Stat1 << 16 | Stat2;
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2);
+ Stat3 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &Stat3);
+ Stat2 = Stat2 << 16 | Stat3;
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "AN Exp/IEEE Ext: %x, 1000T Ctrl/Stat: %x",
+ (void *)Stat1,
+ (void *)Stat2);
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1);
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2);
+ Stat1 = Stat1 << 16 | Stat2;
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2);
+ Stat3 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3);
+ Stat2 = Stat2 << 16 | Stat3;
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "PHY Ext Ctrl/Stat: %x, Aux Ctrl/Stat: %x",
+ (void *)Stat1,
+ (void *)Stat2);
+ }
+#endif /* DEBUG */
+
+ if ((Isrc & (PHY_B_IS_NO_HDCL /* | PHY_B_IS_NO_HDC */)) != 0) {
+ /*
+ * Workaround BCom Errata:
+ * enable and disable loopback mode if "NO HCD" occurs.
+ */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Ctrl);
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL,
+ (SK_U16)(Ctrl | PHY_CT_LOOP));
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL,
+ (SK_U16)(Ctrl & ~PHY_CT_LOOP));
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("No HCD Link event, Port %d\n", Port));
+#ifdef xDEBUG
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "No HCD link event, port %d.",
+ (void *)Port,
+ (void *)NULL);
+#endif /* DEBUG */
+ }
+
+ /* Not obsolete: link status bit is latched to 0 and autoclearing! */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat);
+
+ if (pPrt->PHWLinkUp) {
+ return(SK_HW_PS_NONE);
+ }
+
+#ifdef xDEBUG
+ {
+ SK_U32 Stat1, Stat2, Stat3;
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1);
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "CheckUp1a - Stat: %x, Mask: %x",
+ (void *)Isrc,
+ (void *)Stat1);
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1);
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat);
+ Stat1 = Stat1 << 16 | PhyStat;
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2);
+ Stat3 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3);
+ Stat2 = Stat2 << 16 | Stat3;
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "Ctrl/Stat: %x, AN Adv/LP: %x",
+ (void *)Stat1,
+ (void *)Stat2);
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1);
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2);
+ Stat1 = Stat1 << 16 | Stat2;
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2);
+ Stat3 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb);
+ Stat2 = Stat2 << 16 | ResAb;
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "AN Exp/IEEE Ext: %x, 1000T Ctrl/Stat: %x",
+ (void *)Stat1,
+ (void *)Stat2);
+
+ Stat1 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1);
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2);
+ Stat1 = Stat1 << 16 | Stat2;
+ Stat2 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2);
+ Stat3 = 0;
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3);
+ Stat2 = Stat2 << 16 | Stat3;
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "PHY Ext Ctrl/Stat: %x, Aux Ctrl/Stat: %x",
+ (void *)Stat1,
+ (void *)Stat2);
+ }
+#endif /* DEBUG */
+
+ /*
+ * Here we usually can check whether the link is in sync and
+ * auto-negotiation is done.
+ */
+
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat);
+
+ SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("CheckUp Port %d, PhyStat: 0x%04X\n", Port, PhyStat));
+
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb);
+
+ if ((ResAb & PHY_B_1000S_MSF) != 0) {
+ /* Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Master/Slave Fault port %d\n", Port));
+
+ pPrt->PAutoNegFail = SK_TRUE;
+ pPrt->PMSStatus = SK_MS_STAT_FAULT;
+
+ return(SK_HW_PS_RESTART);
+ }
+
+ if ((PhyStat & PHY_ST_LSYNC) == 0) {
+ return(SK_HW_PS_NONE);
+ }
+
+ pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
+ SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Port %d, ResAb: 0x%04X\n", Port, ResAb));
+
+ if (AutoNeg) {
+ if ((PhyStat & PHY_ST_AN_OVER) != 0) {
+
+ SkHWLinkUp(pAC, IoC, Port);
+
+ Done = SkMacAutoNegDone(pAC, IoC, Port);
+
+ if (Done != SK_AND_OK) {
+#ifdef DEBUG
+ /* Get PHY parameters, for debugging only */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LpAb);
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ExtStat);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n",
+ Port, LpAb, ExtStat));
+#endif /* DEBUG */
+ return(SK_HW_PS_RESTART);
+ }
+ else {
+#ifdef xDEBUG
+ /* Dummy read ISR to prevent extra link downs/ups */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat);
+
+ if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) {
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "CheckUp2 - Stat: %x",
+ (void *)ExtStat,
+ (void *)NULL);
+ }
+#endif /* DEBUG */
+ return(SK_HW_PS_LINK);
+ }
+ }
+ }
+ else { /* !AutoNeg */
+ /* Link is up and we don't need more. */
+#ifdef DEBUG
+ if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("ERROR: Lipa auto detected on port %d\n", Port));
+ }
+#endif /* DEBUG */
+
+#ifdef xDEBUG
+ /* Dummy read ISR to prevent extra link downs/ups */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat);
+
+ if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) {
+ CMSMPrintString(
+ pAC->pConfigTable,
+ MSG_TYPE_RUNTIME_INFO,
+ "CheckUp3 - Stat: %x",
+ (void *)ExtStat,
+ (void *)NULL);
+ }
+#endif /* DEBUG */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Link sync(GP), Port %d\n", Port));
+ SkHWLinkUp(pAC, IoC, Port);
+
+ return(SK_HW_PS_LINK);
+ }
+
+ return(SK_HW_PS_NONE);
+} /* SkGePortCheckUpBcom */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGePortCheckUpGmac() - Check if the link is up on Marvell PHY
+ *
+ * return:
+ * 0 o.k. nothing needed
+ * 1 Restart needed on this port
+ * 2 Link came up
+ */
+static int SkGePortCheckUpGmac(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int Port, /* Which port should be checked */
+SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ int Done;
+ SK_U16 PhyIsrc; /* PHY Interrupt source */
+ SK_U16 PhyStat; /* PPY Status */
+ SK_U16 PhySpecStat;/* PHY Specific Status */
+ SK_U16 ResAb; /* Master/Slave resolution */
+ SK_EVPARA Para;
+#ifdef DEBUG
+ SK_U16 Word; /* I/O helper */
+#endif /* DEBUG */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PHWLinkUp) {
+ return(SK_HW_PS_NONE);
+ }
+
+ /* Read PHY Status */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("CheckUp Port %d, PhyStat: 0x%04X\n", Port, PhyStat));
+
+ /* Read PHY Interrupt Status */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &PhyIsrc);
+
+ if ((PhyIsrc & PHY_M_IS_AN_COMPL) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Auto-Negotiation Completed, PhyIsrc: 0x%04X\n", PhyIsrc));
+ }
+
+ if ((PhyIsrc & PHY_M_IS_LSP_CHANGE) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Link Speed Changed, PhyIsrc: 0x%04X\n", PhyIsrc));
+ }
+
+ SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat);
+
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb);
+
+ if ((ResAb & PHY_B_1000S_MSF) != 0) {
+ /* Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Master/Slave Fault port %d\n", Port));
+
+ pPrt->PAutoNegFail = SK_TRUE;
+ pPrt->PMSStatus = SK_MS_STAT_FAULT;
+
+ return(SK_HW_PS_RESTART);
+ }
+
+ /* Read PHY Specific Status */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Phy1000BT: 0x%04X, PhySpecStat: 0x%04X\n", ResAb, PhySpecStat));
+
+#ifdef DEBUG
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_EXP, &Word);
+
+ if ((PhyIsrc & PHY_M_IS_AN_PR) != 0 || (Word & PHY_ANE_RX_PG) != 0 ||
+ (PhySpecStat & PHY_M_PS_PAGE_REC) != 0) {
+ /* Read PHY Next Page Link Partner */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_NEPG_LP, &Word);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Page Received, NextPage: 0x%04X\n", Word));
+ }
+#endif /* DEBUG */
+
+ if ((PhySpecStat & PHY_M_PS_LINK_UP) == 0) {
+ return(SK_HW_PS_NONE);
+ }
+
+ if ((PhySpecStat & PHY_M_PS_DOWNS_STAT) != 0 ||
+ (PhyIsrc & PHY_M_IS_DOWNSH_DET) != 0) {
+ /* Downshift detected */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E025, SKERR_SIRQ_E025MSG);
+
+ Para.Para64 = Port;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_DOWNSHIFT_DET, Para);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Downshift detected, PhyIsrc: 0x%04X\n", PhyIsrc));
+ }
+
+ pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
+ SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE;
+
+ pPrt->PCableLen = (SK_U8)((PhySpecStat & PHY_M_PS_CABLE_MSK) >> 7);
+
+ if (AutoNeg) {
+ /* Auto-Negotiation Over ? */
+ if ((PhyStat & PHY_ST_AN_OVER) != 0) {
+
+ SkHWLinkUp(pAC, IoC, Port);
+
+ Done = SkMacAutoNegDone(pAC, IoC, Port);
+
+ if (Done != SK_AND_OK) {
+ return(SK_HW_PS_RESTART);
+ }
+
+ return(SK_HW_PS_LINK);
+ }
+ }
+ else { /* !AutoNeg */
+ /* Link is up and we don't need more */
+#ifdef DEBUG
+ if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("ERROR: Lipa auto detected on port %d\n", Port));
+ }
+#endif /* DEBUG */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Link sync, Port %d\n", Port));
+ SkHWLinkUp(pAC, IoC, Port);
+
+ return(SK_HW_PS_LINK);
+ }
+
+ return(SK_HW_PS_NONE);
+} /* SkGePortCheckUpGmac */
+#endif /* YUKON */
+
+
+#ifdef OTHER_PHY
+/******************************************************************************
+ *
+ * SkGePortCheckUpLone() - Check if the link is up on Level One PHY
+ *
+ * return:
+ * 0 o.k. nothing needed
+ * 1 Restart needed on this port
+ * 2 Link came up
+ */
+static int SkGePortCheckUpLone(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int Port, /* Which port should be checked */
+SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ int Done;
+ SK_U16 Isrc; /* Interrupt source register */
+ SK_U16 LpAb; /* Link Partner Ability */
+ SK_U16 ExtStat; /* Extended Status Register */
+ SK_U16 PhyStat; /* Phy Status Register */
+ SK_U16 StatSum;
+ SK_U8 NextMode; /* Next AutoSensing Mode */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PHWLinkUp) {
+ return(SK_HW_PS_NONE);
+ }
+
+ StatSum = pPrt->PIsave;
+ pPrt->PIsave = 0;
+
+ /*
+ * here we usually can check whether the link is in sync and
+ * auto-negotiation is done.
+ */
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_STAT, &PhyStat);
+ StatSum |= PhyStat;
+
+ SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat);
+
+ if ((PhyStat & PHY_ST_LSYNC) == 0) {
+ /* Save Auto-negotiation Done bit */
+ pPrt->PIsave = (SK_U16)(StatSum & PHY_ST_AN_OVER);
+#ifdef DEBUG
+ if ((pPrt->PIsave & PHY_ST_AN_OVER) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNeg done rescheduled Port %d\n", Port));
+ }
+#endif /* DEBUG */
+ return(SK_HW_PS_NONE);
+ }
+
+ if (AutoNeg) {
+ if ((StatSum & PHY_ST_AN_OVER) != 0) {
+ SkHWLinkUp(pAC, IoC, Port);
+ Done = SkMacAutoNegDone(pAC, IoC, Port);
+ if (Done != SK_AND_OK) {
+ /* Get PHY parameters, for debugging only */
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LpAb);
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ExtStat);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n",
+ Port, LpAb, ExtStat));
+
+ /* Try next possible mode */
+ NextMode = SkHWSenseGetNext(pAC, IoC, Port);
+ SkHWLinkDown(pAC, IoC, Port);
+ if (Done == SK_AND_DUP_CAP) {
+ /* GoTo next mode */
+ SkHWSenseSetNext(pAC, IoC, Port, NextMode);
+ }
+
+ return(SK_HW_PS_RESTART);
+
+ }
+ else {
+ /*
+ * Dummy Read interrupt status to prevent
+ * extra link down/ups
+ */
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat);
+ return(SK_HW_PS_LINK);
+ }
+ }
+
+ /* AutoNeg not done, but HW link is up. Check for timeouts */
+ pPrt->PAutoNegTimeOut++;
+ if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) {
+ /* Timeout occured */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("AutoNeg timeout Port %d\n", Port));
+ if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
+ pPrt->PLipaAutoNeg != SK_LIPA_AUTO) {
+ /* Set Link manually up */
+ SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Set manual full duplex Port %d\n", Port));
+ }
+
+ /* Do the restart */
+ return(SK_HW_PS_RESTART);
+ }
+ }
+ else {
+ /* Link is up and we don't need more */
+#ifdef DEBUG
+ if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("ERROR: Lipa auto detected on port %d\n", Port));
+ }
+#endif /* DEBUG */
+
+ /*
+ * Dummy Read interrupt status to prevent
+ * extra link down/ups
+ */
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("Link sync(GP), Port %d\n", Port));
+ SkHWLinkUp(pAC, IoC, Port);
+
+ return(SK_HW_PS_LINK);
+ }
+
+ return(SK_HW_PS_NONE);
+} /* SkGePortCheckUpLone */
+
+
+/******************************************************************************
+ *
+ * SkGePortCheckUpNat() - Check if the link is up on National PHY
+ *
+ * return:
+ * 0 o.k. nothing needed
+ * 1 Restart needed on this port
+ * 2 Link came up
+ */
+static int SkGePortCheckUpNat(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO Context */
+int Port, /* Which port should be checked */
+SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
+{
+ /* todo: National */
+ return(SK_HW_PS_NONE);
+} /* SkGePortCheckUpNat */
+#endif /* OTHER_PHY */
+
+
+/******************************************************************************
+ *
+ * SkGeSirqEvent() - Event Service Routine
+ *
+ * Description:
+ *
+ * Notes:
+ */
+int SkGeSirqEvent(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* Io Context */
+SK_U32 Event, /* Module specific Event */
+SK_EVPARA Para) /* Event specific Parameter */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ SK_U32 Port;
+ SK_U32 Val32;
+ int PortStat;
+ SK_U8 Val8;
+#ifdef GENESIS
+ SK_U64 Octets;
+#endif /* GENESIS */
+
+ Port = Para.Para32[0];
+ pPrt = &pAC->GIni.GP[Port];
+
+ switch (Event) {
+ case SK_HWEV_WATIM:
+ if (pPrt->PState == SK_PRT_RESET) {
+
+ PortStat = SK_HW_PS_NONE;
+ }
+ else {
+ /* Check whether port came up */
+ PortStat = SkGePortCheckUp(pAC, IoC, (int)Port);
+ }
+
+ switch (PortStat) {
+ case SK_HW_PS_RESTART:
+ if (pPrt->PHWLinkUp) {
+ /* Set Link to down */
+ SkHWLinkDown(pAC, IoC, (int)Port);
+
+ /*
+ * Signal directly to RLMT to ensure correct
+ * sequence of SWITCH and RESET event.
+ */
+ SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ /* Restart needed */
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para);
+ break;
+
+ case SK_HW_PS_LINK:
+ /* Signal to RLMT */
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_UP, Para);
+ break;
+ }
+
+ /* Start again the check Timer */
+ if (pPrt->PHWLinkUp) {
+ Val32 = SK_WA_ACT_TIME;
+ }
+ else {
+ Val32 = SK_WA_INA_TIME;
+ }
+
+ /* Todo: still needed for non-XMAC PHYs??? */
+ /* Start workaround Errata #2 timer */
+ SkTimerStart(pAC, IoC, &pPrt->PWaTimer, Val32,
+ SKGE_HWAC, SK_HWEV_WATIM, Para);
+ break;
+
+ case SK_HWEV_PORT_START:
+ if (pPrt->PHWLinkUp) {
+ /*
+ * Signal directly to RLMT to ensure correct
+ * sequence of SWITCH and RESET event.
+ */
+ SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ SkHWLinkDown(pAC, IoC, (int)Port);
+
+ /* Schedule Port RESET */
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para);
+
+ /* Start workaround Errata #2 timer */
+ SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME,
+ SKGE_HWAC, SK_HWEV_WATIM, Para);
+ break;
+
+ case SK_HWEV_PORT_STOP:
+ if (pPrt->PHWLinkUp) {
+ /*
+ * Signal directly to RLMT to ensure correct
+ * sequence of SWITCH and RESET event.
+ */
+ SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ /* Stop Workaround Timer */
+ SkTimerStop(pAC, IoC, &pPrt->PWaTimer);
+
+ SkHWLinkDown(pAC, IoC, (int)Port);
+ break;
+
+ case SK_HWEV_UPDATE_STAT:
+ /* We do NOT need to update any statistics */
+ break;
+
+ case SK_HWEV_CLEAR_STAT:
+ /* We do NOT need to clear any statistics */
+ for (Port = 0; Port < (SK_U32)pAC->GIni.GIMacsFound; Port++) {
+ pPrt->PPrevRx = 0;
+ pPrt->PPrevFcs = 0;
+ pPrt->PPrevShorts = 0;
+ }
+ break;
+
+ case SK_HWEV_SET_LMODE:
+ Val8 = (SK_U8)Para.Para32[1];
+ if (pPrt->PLinkModeConf != Val8) {
+ /* Set New link mode */
+ pPrt->PLinkModeConf = Val8;
+
+ /* Restart Port */
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
+ }
+ break;
+
+ case SK_HWEV_SET_FLOWMODE:
+ Val8 = (SK_U8)Para.Para32[1];
+ if (pPrt->PFlowCtrlMode != Val8) {
+ /* Set New Flow Control mode */
+ pPrt->PFlowCtrlMode = Val8;
+
+ /* Restart Port */
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
+ }
+ break;
+
+ case SK_HWEV_SET_ROLE:
+ /* not possible for fiber */
+ if (!pAC->GIni.GICopperType) {
+ break;
+ }
+ Val8 = (SK_U8)Para.Para32[1];
+ if (pPrt->PMSMode != Val8) {
+ /* Set New Role (Master/Slave) mode */
+ pPrt->PMSMode = Val8;
+
+ /* Restart Port */
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
+ }
+ break;
+
+ case SK_HWEV_SET_SPEED:
+ if (pPrt->PhyType != SK_PHY_MARV_COPPER) {
+ break;
+ }
+ Val8 = (SK_U8)Para.Para32[1];
+ if (pPrt->PLinkSpeed != Val8) {
+ /* Set New Speed parameter */
+ pPrt->PLinkSpeed = Val8;
+
+ /* Restart Port */
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
+ }
+ break;
+
+#ifdef GENESIS
+ case SK_HWEV_HALFDUP_CHK:
+ if (pAC->GIni.GIGenesis) {
+ /*
+ * half duplex hangup workaround.
+ * See packet arbiter timeout interrupt for description
+ */
+ pPrt->HalfDupTimerActive = SK_FALSE;
+ if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
+ pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) {
+#ifdef XXX
+ Len = sizeof(SK_U64);
+ SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
+ &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port),
+ pAC->Rlmt.Port[Port].Net->NetNumber);
+#endif /* XXX */
+ /* Snap statistic counters */
+ (void)SkXmUpdateStats(pAC, IoC, Port);
+
+ (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_HI, &Val32);
+
+ Octets = (SK_U64)Val32 << 32;
+
+ (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_LO, &Val32);
+
+ Octets += Val32;
+
+ if (pPrt->LastOctets == Octets) {
+ /* Tx hanging, a FIFO flush restarts it */
+ SkMacFlushTxFifo(pAC, IoC, Port);
+ }
+ }
+ }
+ break;
+#endif /* GENESIS */
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_SIRQ_E001, SKERR_SIRQ_E001MSG);
+ break;
+ }
+
+ return(0);
+} /* SkGeSirqEvent */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkPhyIsrBcom() - PHY interrupt service routine
+ *
+ * Description: handles all interrupts from BCom PHY
+ *
+ * Returns: N/A
+ */
+static void SkPhyIsrBcom(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* Io Context */
+int Port, /* Port Num = PHY Num */
+SK_U16 IStatus) /* Interrupt Status */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ SK_EVPARA Para;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if ((IStatus & PHY_B_IS_PSE) != 0) {
+ /* Incorrectable pair swap error */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E022,
+ SKERR_SIRQ_E022MSG);
+ }
+
+ if ((IStatus & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) != 0) {
+
+ SkHWLinkDown(pAC, IoC, Port);
+
+ Para.Para32[0] = (SK_U32)Port;
+ /* Signal to RLMT */
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+
+ /* Start workaround Errata #2 timer */
+ SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME,
+ SKGE_HWAC, SK_HWEV_WATIM, Para);
+ }
+
+} /* SkPhyIsrBcom */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkPhyIsrGmac() - PHY interrupt service routine
+ *
+ * Description: handles all interrupts from Marvell PHY
+ *
+ * Returns: N/A
+ */
+static void SkPhyIsrGmac(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* Io Context */
+int Port, /* Port Num = PHY Num */
+SK_U16 IStatus) /* Interrupt Status */
+{
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+ SK_EVPARA Para;
+ SK_U16 Word;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if ((IStatus & (PHY_M_IS_AN_PR | PHY_M_IS_LST_CHANGE)) != 0) {
+
+ SkHWLinkDown(pAC, IoC, Port);
+
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &Word);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNeg.Adv: 0x%04X\n", Word));
+
+ /* Set Auto-negotiation advertisement */
+ if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) {
+ /* restore Asymmetric Pause bit */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV,
+ (SK_U16)(Word | PHY_M_AN_ASP));
+ }
+
+ Para.Para32[0] = (SK_U32)Port;
+ /* Signal to RLMT */
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+ if ((IStatus & PHY_M_IS_AN_ERROR) != 0) {
+ /* Auto-Negotiation Error */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E023, SKERR_SIRQ_E023MSG);
+ }
+
+ if ((IStatus & PHY_M_IS_FIFO_ERROR) != 0) {
+ /* FIFO Overflow/Underrun Error */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E024, SKERR_SIRQ_E024MSG);
+ }
+
+} /* SkPhyIsrGmac */
+#endif /* YUKON */
+
+
+#ifdef OTHER_PHY
+/******************************************************************************
+ *
+ * SkPhyIsrLone() - PHY interrupt service routine
+ *
+ * Description: handles all interrupts from LONE PHY
+ *
+ * Returns: N/A
+ */
+static void SkPhyIsrLone(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* Io Context */
+int Port, /* Port Num = PHY Num */
+SK_U16 IStatus) /* Interrupt Status */
+{
+ SK_EVPARA Para;
+
+ if (IStatus & (PHY_L_IS_DUP | PHY_L_IS_ISOL)) {
+
+ SkHWLinkDown(pAC, IoC, Port);
+
+ Para.Para32[0] = (SK_U32)Port;
+ /* Signal to RLMT */
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+ }
+
+} /* SkPhyIsrLone */
+#endif /* OTHER_PHY */
+
+/* End of File */
diff --git a/drivers/net/sk98lin/ski2c.c b/drivers/net/sk98lin/ski2c.c
new file mode 100644
index 000000000000..075a0464e56b
--- /dev/null
+++ b/drivers/net/sk98lin/ski2c.c
@@ -0,0 +1,1296 @@
+/******************************************************************************
+ *
+ * Name: ski2c.c
+ * Project: Gigabit Ethernet Adapters, TWSI-Module
+ * Version: $Revision: 1.59 $
+ * Date: $Date: 2003/10/20 09:07:25 $
+ * Purpose: Functions to access Voltage and Temperature Sensor
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * I2C Protocol
+ */
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: ski2c.c,v 1.59 2003/10/20 09:07:25 rschmidt Exp $ (C) Marvell. ";
+#endif
+
+#include "h/skdrv1st.h" /* Driver Specific Definitions */
+#include "h/lm80.h"
+#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
+
+#ifdef __C2MAN__
+/*
+ I2C protocol implementation.
+
+ General Description:
+
+ The I2C protocol is used for the temperature sensors and for
+ the serial EEPROM which hold the configuration.
+
+ This file covers functions that allow to read write and do
+ some bulk requests a specified I2C address.
+
+ The Genesis has 2 I2C buses. One for the EEPROM which holds
+ the VPD Data and one for temperature and voltage sensor.
+ The following picture shows the I2C buses, I2C devices and
+ their control registers.
+
+ Note: The VPD functions are in skvpd.c
+.
+. PCI Config I2C Bus for VPD Data:
+.
+. +------------+
+. | VPD EEPROM |
+. +------------+
+. |
+. | <-- I2C
+. |
+. +-----------+-----------+
+. | |
+. +-----------------+ +-----------------+
+. | PCI_VPD_ADR_REG | | PCI_VPD_DAT_REG |
+. +-----------------+ +-----------------+
+.
+.
+. I2C Bus for LM80 sensor:
+.
+. +-----------------+
+. | Temperature and |
+. | Voltage Sensor |
+. | LM80 |
+. +-----------------+
+. |
+. |
+. I2C --> |
+. |
+. +----+
+. +-------------->| OR |<--+
+. | +----+ |
+. +------+------+ |
+. | | |
+. +--------+ +--------+ +----------+
+. | B2_I2C | | B2_I2C | | B2_I2C |
+. | _CTRL | | _DATA | | _SW |
+. +--------+ +--------+ +----------+
+.
+ The I2C bus may be driven by the B2_I2C_SW or by the B2_I2C_CTRL
+ and B2_I2C_DATA registers.
+ For driver software it is recommended to use the I2C control and
+ data register, because I2C bus timing is done by the ASIC and
+ an interrupt may be received when the I2C request is completed.
+
+ Clock Rate Timing: MIN MAX generated by
+ VPD EEPROM: 50 kHz 100 kHz HW
+ LM80 over I2C Ctrl/Data reg. 50 kHz 100 kHz HW
+ LM80 over B2_I2C_SW register 0 400 kHz SW
+
+ Note: The clock generated by the hardware is dependend on the
+ PCI clock. If the PCI bus clock is 33 MHz, the I2C/VPD
+ clock is 50 kHz.
+ */
+intro()
+{}
+#endif
+
+#ifdef SK_DIAG
+/*
+ * I2C Fast Mode timing values used by the LM80.
+ * If new devices are added to the I2C bus the timing values have to be checked.
+ */
+#ifndef I2C_SLOW_TIMING
+#define T_CLK_LOW 1300L /* clock low time in ns */
+#define T_CLK_HIGH 600L /* clock high time in ns */
+#define T_DATA_IN_SETUP 100L /* data in Set-up Time */
+#define T_START_HOLD 600L /* start condition hold time */
+#define T_START_SETUP 600L /* start condition Set-up time */
+#define T_STOP_SETUP 600L /* stop condition Set-up time */
+#define T_BUS_IDLE 1300L /* time the bus must free after Tx */
+#define T_CLK_2_DATA_OUT 900L /* max. clock low to data output valid */
+#else /* I2C_SLOW_TIMING */
+/* I2C Standard Mode Timing */
+#define T_CLK_LOW 4700L /* clock low time in ns */
+#define T_CLK_HIGH 4000L /* clock high time in ns */
+#define T_DATA_IN_SETUP 250L /* data in Set-up Time */
+#define T_START_HOLD 4000L /* start condition hold time */
+#define T_START_SETUP 4700L /* start condition Set-up time */
+#define T_STOP_SETUP 4000L /* stop condition Set-up time */
+#define T_BUS_IDLE 4700L /* time the bus must free after Tx */
+#endif /* !I2C_SLOW_TIMING */
+
+#define NS2BCLK(x) (((x)*125)/10000)
+
+/*
+ * I2C Wire Operations
+ *
+ * About I2C_CLK_LOW():
+ *
+ * The Data Direction bit (I2C_DATA_DIR) has to be set to input when setting
+ * clock to low, to prevent the ASIC and the I2C data client from driving the
+ * serial data line simultaneously (ASIC: last bit of a byte = '1', I2C client
+ * send an 'ACK'). See also Concentrator Bugreport No. 10192.
+ */
+#define I2C_DATA_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA)
+#define I2C_DATA_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA)
+#define I2C_DATA_OUT(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA_DIR)
+#define I2C_DATA_IN(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA)
+#define I2C_CLK_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_CLK)
+#define I2C_CLK_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK | I2C_DATA_DIR)
+#define I2C_START_COND(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK)
+
+#define NS2CLKT(x) ((x*125L)/10000)
+
+/*--------------- I2C Interface Register Functions --------------- */
+
+/*
+ * sending one bit
+ */
+void SkI2cSndBit(
+SK_IOC IoC, /* I/O Context */
+SK_U8 Bit) /* Bit to send */
+{
+ I2C_DATA_OUT(IoC);
+ if (Bit) {
+ I2C_DATA_HIGH(IoC);
+ }
+ else {
+ I2C_DATA_LOW(IoC);
+ }
+ SkDgWaitTime(IoC, NS2BCLK(T_DATA_IN_SETUP));
+ I2C_CLK_HIGH(IoC);
+ SkDgWaitTime(IoC, NS2BCLK(T_CLK_HIGH));
+ I2C_CLK_LOW(IoC);
+} /* SkI2cSndBit*/
+
+
+/*
+ * Signal a start to the I2C Bus.
+ *
+ * A start is signaled when data goes to low in a high clock cycle.
+ *
+ * Ends with Clock Low.
+ *
+ * Status: not tested
+ */
+void SkI2cStart(
+SK_IOC IoC) /* I/O Context */
+{
+ /* Init data and Clock to output lines */
+ /* Set Data high */
+ I2C_DATA_OUT(IoC);
+ I2C_DATA_HIGH(IoC);
+ /* Set Clock high */
+ I2C_CLK_HIGH(IoC);
+
+ SkDgWaitTime(IoC, NS2BCLK(T_START_SETUP));
+
+ /* Set Data Low */
+ I2C_DATA_LOW(IoC);
+
+ SkDgWaitTime(IoC, NS2BCLK(T_START_HOLD));
+
+ /* Clock low without Data to Input */
+ I2C_START_COND(IoC);
+
+ SkDgWaitTime(IoC, NS2BCLK(T_CLK_LOW));
+} /* SkI2cStart */
+
+
+void SkI2cStop(
+SK_IOC IoC) /* I/O Context */
+{
+ /* Init data and Clock to output lines */
+ /* Set Data low */
+ I2C_DATA_OUT(IoC);
+ I2C_DATA_LOW(IoC);
+
+ SkDgWaitTime(IoC, NS2BCLK(T_CLK_2_DATA_OUT));
+
+ /* Set Clock high */
+ I2C_CLK_HIGH(IoC);
+
+ SkDgWaitTime(IoC, NS2BCLK(T_STOP_SETUP));
+
+ /*
+ * Set Data High: Do it by setting the Data Line to Input.
+ * Because of a pull up resistor the Data Line
+ * floods to high.
+ */
+ I2C_DATA_IN(IoC);
+
+ /*
+ * When I2C activity is stopped
+ * o DATA should be set to input and
+ * o CLOCK should be set to high!
+ */
+ SkDgWaitTime(IoC, NS2BCLK(T_BUS_IDLE));
+} /* SkI2cStop */
+
+
+/*
+ * Receive just one bit via the I2C bus.
+ *
+ * Note: Clock must be set to LOW before calling this function.
+ *
+ * Returns The received bit.
+ */
+int SkI2cRcvBit(
+SK_IOC IoC) /* I/O Context */
+{
+ int Bit;
+ SK_U8 I2cSwCtrl;
+
+ /* Init data as input line */
+ I2C_DATA_IN(IoC);
+
+ SkDgWaitTime(IoC, NS2BCLK(T_CLK_2_DATA_OUT));
+
+ I2C_CLK_HIGH(IoC);
+
+ SkDgWaitTime(IoC, NS2BCLK(T_CLK_HIGH));
+
+ SK_I2C_GET_SW(IoC, &I2cSwCtrl);
+
+ Bit = (I2cSwCtrl & I2C_DATA) ? 1 : 0;
+
+ I2C_CLK_LOW(IoC);
+ SkDgWaitTime(IoC, NS2BCLK(T_CLK_LOW-T_CLK_2_DATA_OUT));
+
+ return(Bit);
+} /* SkI2cRcvBit */
+
+
+/*
+ * Receive an ACK.
+ *
+ * returns 0 If acknowledged
+ * 1 in case of an error
+ */
+int SkI2cRcvAck(
+SK_IOC IoC) /* I/O Context */
+{
+ /*
+ * Received bit must be zero.
+ */
+ return(SkI2cRcvBit(IoC) != 0);
+} /* SkI2cRcvAck */
+
+
+/*
+ * Send an NACK.
+ */
+void SkI2cSndNAck(
+SK_IOC IoC) /* I/O Context */
+{
+ /*
+ * Received bit must be zero.
+ */
+ SkI2cSndBit(IoC, 1);
+} /* SkI2cSndNAck */
+
+
+/*
+ * Send an ACK.
+ */
+void SkI2cSndAck(
+SK_IOC IoC) /* I/O Context */
+{
+ /*
+ * Received bit must be zero.
+ */
+ SkI2cSndBit(IoC, 0);
+} /* SkI2cSndAck */
+
+
+/*
+ * Send one byte to the I2C device and wait for ACK.
+ *
+ * Return acknowleged status.
+ */
+int SkI2cSndByte(
+SK_IOC IoC, /* I/O Context */
+int Byte) /* byte to send */
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (Byte & (1<<(7-i))) {
+ SkI2cSndBit(IoC, 1);
+ }
+ else {
+ SkI2cSndBit(IoC, 0);
+ }
+ }
+
+ return(SkI2cRcvAck(IoC));
+} /* SkI2cSndByte */
+
+
+/*
+ * Receive one byte and ack it.
+ *
+ * Return byte.
+ */
+int SkI2cRcvByte(
+SK_IOC IoC, /* I/O Context */
+int Last) /* Last Byte Flag */
+{
+ int i;
+ int Byte = 0;
+
+ for (i = 0; i < 8; i++) {
+ Byte <<= 1;
+ Byte |= SkI2cRcvBit(IoC);
+ }
+
+ if (Last) {
+ SkI2cSndNAck(IoC);
+ }
+ else {
+ SkI2cSndAck(IoC);
+ }
+
+ return(Byte);
+} /* SkI2cRcvByte */
+
+
+/*
+ * Start dialog and send device address
+ *
+ * Return 0 if acknowleged, 1 in case of an error
+ */
+int SkI2cSndDev(
+SK_IOC IoC, /* I/O Context */
+int Addr, /* Device Address */
+int Rw) /* Read / Write Flag */
+{
+ SkI2cStart(IoC);
+ Rw = ~Rw;
+ Rw &= I2C_WRITE;
+ return(SkI2cSndByte(IoC, (Addr<<1) | Rw));
+} /* SkI2cSndDev */
+
+#endif /* SK_DIAG */
+
+/*----------------- I2C CTRL Register Functions ----------*/
+
+/*
+ * waits for a completion of an I2C transfer
+ *
+ * returns 0: success, transfer completes
+ * 1: error, transfer does not complete, I2C transfer
+ * killed, wait loop terminated.
+ */
+int SkI2cWait(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */
+{
+ SK_U64 StartTime;
+ SK_U64 CurrentTime;
+ SK_U32 I2cCtrl;
+
+ StartTime = SkOsGetTime(pAC);
+
+ do {
+ CurrentTime = SkOsGetTime(pAC);
+
+ if (CurrentTime - StartTime > SK_TICKS_PER_SEC / 8) {
+
+ SK_I2C_STOP(IoC);
+#ifndef SK_DIAG
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E002, SKERR_I2C_E002MSG);
+#endif /* !SK_DIAG */
+ return(1);
+ }
+
+ SK_I2C_GET_CTL(IoC, &I2cCtrl);
+
+#ifdef xYUKON_DBG
+ printf("StartTime=%lu, CurrentTime=%lu\n",
+ StartTime, CurrentTime);
+ if (kbhit()) {
+ return(1);
+ }
+#endif /* YUKON_DBG */
+
+ } while ((I2cCtrl & I2C_FLAG) == (SK_U32)Event << 31);
+
+ return(0);
+} /* SkI2cWait */
+
+
+/*
+ * waits for a completion of an I2C transfer
+ *
+ * Returns
+ * Nothing
+ */
+void SkI2cWaitIrq(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC) /* I/O Context */
+{
+ SK_SENSOR *pSen;
+ SK_U64 StartTime;
+ SK_U32 IrqSrc;
+
+ pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
+
+ if (pSen->SenState == SK_SEN_IDLE) {
+ return;
+ }
+
+ StartTime = SkOsGetTime(pAC);
+
+ do {
+ if (SkOsGetTime(pAC) - StartTime > SK_TICKS_PER_SEC / 8) {
+
+ SK_I2C_STOP(IoC);
+#ifndef SK_DIAG
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E016, SKERR_I2C_E016MSG);
+#endif /* !SK_DIAG */
+ return;
+ }
+
+ SK_IN32(IoC, B0_ISRC, &IrqSrc);
+
+ } while ((IrqSrc & IS_I2C_READY) == 0);
+
+ pSen->SenState = SK_SEN_IDLE;
+ return;
+} /* SkI2cWaitIrq */
+
+/*
+ * writes a single byte or 4 bytes into the I2C device
+ *
+ * returns 0: success
+ * 1: error
+ */
+int SkI2cWrite(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 I2cData, /* I2C Data to write */
+int I2cDev, /* I2C Device Address */
+int I2cDevSize, /* I2C Device Size (e.g. I2C_025K_DEV or I2C_2K_DEV) */
+int I2cReg, /* I2C Device Register Address */
+int I2cBurst) /* I2C Burst Flag */
+{
+ SK_OUT32(IoC, B2_I2C_DATA, I2cData);
+
+ SK_I2C_CTL(IoC, I2C_WRITE, I2cDev, I2cDevSize, I2cReg, I2cBurst);
+
+ return(SkI2cWait(pAC, IoC, I2C_WRITE));
+} /* SkI2cWrite*/
+
+
+#ifdef SK_DIAG
+/*
+ * reads a single byte or 4 bytes from the I2C device
+ *
+ * returns the word read
+ */
+SK_U32 SkI2cRead(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int I2cDev, /* I2C Device Address */
+int I2cDevSize, /* I2C Device Size (e.g. I2C_025K_DEV or I2C_2K_DEV) */
+int I2cReg, /* I2C Device Register Address */
+int I2cBurst) /* I2C Burst Flag */
+{
+ SK_U32 Data;
+
+ SK_OUT32(IoC, B2_I2C_DATA, 0);
+ SK_I2C_CTL(IoC, I2C_READ, I2cDev, I2cDevSize, I2cReg, I2cBurst);
+
+ if (SkI2cWait(pAC, IoC, I2C_READ) != 0) {
+ w_print("%s\n", SKERR_I2C_E002MSG);
+ }
+
+ SK_IN32(IoC, B2_I2C_DATA, &Data);
+
+ return(Data);
+} /* SkI2cRead */
+#endif /* SK_DIAG */
+
+
+/*
+ * read a sensor's value
+ *
+ * This function reads a sensor's value from the I2C sensor chip. The sensor
+ * is defined by its index into the sensors database in the struct pAC points
+ * to.
+ * Returns
+ * 1 if the read is completed
+ * 0 if the read must be continued (I2C Bus still allocated)
+ */
+int SkI2cReadSensor(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_SENSOR *pSen) /* Sensor to be read */
+{
+ if (pSen->SenRead != NULL) {
+ return((*pSen->SenRead)(pAC, IoC, pSen));
+ }
+ else {
+ return(0); /* no success */
+ }
+} /* SkI2cReadSensor */
+
+/*
+ * Do the Init state 0 initialization
+ */
+static int SkI2cInit0(
+SK_AC *pAC) /* Adapter Context */
+{
+ int i;
+
+ /* Begin with first sensor */
+ pAC->I2c.CurrSens = 0;
+
+ /* Begin with timeout control for state machine */
+ pAC->I2c.TimerMode = SK_TIMER_WATCH_SM;
+
+ /* Set sensor number to zero */
+ pAC->I2c.MaxSens = 0;
+
+#ifndef SK_DIAG
+ /* Initialize Number of Dummy Reads */
+ pAC->I2c.DummyReads = SK_MAX_SENSORS;
+#endif
+
+ for (i = 0; i < SK_MAX_SENSORS; i++) {
+ pAC->I2c.SenTable[i].SenDesc = "unknown";
+ pAC->I2c.SenTable[i].SenType = SK_SEN_UNKNOWN;
+ pAC->I2c.SenTable[i].SenThreErrHigh = 0;
+ pAC->I2c.SenTable[i].SenThreErrLow = 0;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = 0;
+ pAC->I2c.SenTable[i].SenThreWarnLow = 0;
+ pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN;
+ pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_NONE;
+ pAC->I2c.SenTable[i].SenValue = 0;
+ pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_NOT_PRESENT;
+ pAC->I2c.SenTable[i].SenErrCts = 0;
+ pAC->I2c.SenTable[i].SenBegErrTS = 0;
+ pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE;
+ pAC->I2c.SenTable[i].SenRead = NULL;
+ pAC->I2c.SenTable[i].SenDev = 0;
+ }
+
+ /* Now we are "INIT data"ed */
+ pAC->I2c.InitLevel = SK_INIT_DATA;
+ return(0);
+} /* SkI2cInit0*/
+
+
+/*
+ * Do the init state 1 initialization
+ *
+ * initialize the following register of the LM80:
+ * Configuration register:
+ * - START, noINT, activeLOW, noINT#Clear, noRESET, noCI, noGPO#, noINIT
+ *
+ * Interrupt Mask Register 1:
+ * - all interrupts are Disabled (0xff)
+ *
+ * Interrupt Mask Register 2:
+ * - all interrupts are Disabled (0xff) Interrupt modi doesn't matter.
+ *
+ * Fan Divisor/RST_OUT register:
+ * - Divisors set to 1 (bits 00), all others 0s.
+ *
+ * OS# Configuration/Temperature resolution Register:
+ * - all 0s
+ *
+ */
+static int SkI2cInit1(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC) /* I/O Context */
+{
+ int i;
+ SK_U8 I2cSwCtrl;
+ SK_GEPORT *pPrt; /* GIni Port struct pointer */
+
+ if (pAC->I2c.InitLevel != SK_INIT_DATA) {
+ /* ReInit not needed in I2C module */
+ return(0);
+ }
+
+ /* Set the Direction of I2C-Data Pin to IN */
+ SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA);
+ /* Check for 32-Bit Yukon with Low at I2C-Data Pin */
+ SK_I2C_GET_SW(IoC, &I2cSwCtrl);
+
+ if ((I2cSwCtrl & I2C_DATA) == 0) {
+ /* this is a 32-Bit board */
+ pAC->GIni.GIYukon32Bit = SK_TRUE;
+ return(0);
+ }
+
+ /* Check for 64 Bit Yukon without sensors */
+ if (SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_CFG, 0) != 0) {
+ return(0);
+ }
+
+ (void)SkI2cWrite(pAC, IoC, 0xffUL, LM80_ADDR, I2C_025K_DEV, LM80_IMSK_1, 0);
+
+ (void)SkI2cWrite(pAC, IoC, 0xffUL, LM80_ADDR, I2C_025K_DEV, LM80_IMSK_2, 0);
+
+ (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_FAN_CTRL, 0);
+
+ (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_TEMP_CTRL, 0);
+
+ (void)SkI2cWrite(pAC, IoC, (SK_U32)LM80_CFG_START, LM80_ADDR, I2C_025K_DEV,
+ LM80_CFG, 0);
+
+ /*
+ * MaxSens has to be updated here, because PhyType is not
+ * set when performing Init Level 0
+ */
+ pAC->I2c.MaxSens = 5;
+
+ pPrt = &pAC->GIni.GP[0];
+
+ if (pAC->GIni.GIGenesis) {
+ if (pPrt->PhyType == SK_PHY_BCOM) {
+ if (pAC->GIni.GIMacsFound == 1) {
+ pAC->I2c.MaxSens += 1;
+ }
+ else {
+ pAC->I2c.MaxSens += 3;
+ }
+ }
+ }
+ else {
+ pAC->I2c.MaxSens += 3;
+ }
+
+ for (i = 0; i < pAC->I2c.MaxSens; i++) {
+ switch (i) {
+ case 0:
+ pAC->I2c.SenTable[i].SenDesc = "Temperature";
+ pAC->I2c.SenTable[i].SenType = SK_SEN_TEMP;
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_TEMP_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_TEMP_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_TEMP_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_TEMP_LOW_ERR;
+ pAC->I2c.SenTable[i].SenReg = LM80_TEMP_IN;
+ break;
+ case 1:
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PCI";
+ pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_5V_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_5V_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_5V_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_5V_LOW_ERR;
+ pAC->I2c.SenTable[i].SenReg = LM80_VT0_IN;
+ break;
+ case 2:
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PCI-IO";
+ pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_IO_5V_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_IO_5V_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_IO_3V3_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_IO_3V3_LOW_ERR;
+ pAC->I2c.SenTable[i].SenReg = LM80_VT1_IN;
+ pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_PCI_IO;
+ break;
+ case 3:
+ pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC";
+ pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VDD_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VDD_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VDD_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VDD_LOW_ERR;
+ pAC->I2c.SenTable[i].SenReg = LM80_VT2_IN;
+ break;
+ case 4:
+ if (pAC->GIni.GIGenesis) {
+ if (pPrt->PhyType == SK_PHY_BCOM) {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PHY A PLL";
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR;
+ }
+ else {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PMA";
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR;
+ }
+ }
+ else {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage VAUX";
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VAUX_3V3_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VAUX_3V3_HIGH_WARN;
+ if (pAC->GIni.GIVauxAvail) {
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR;
+ }
+ else {
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_0V_WARN_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_0V_WARN_ERR;
+ }
+ }
+ pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
+ pAC->I2c.SenTable[i].SenReg = LM80_VT3_IN;
+ break;
+ case 5:
+ if (pAC->GIni.GIGenesis) {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5";
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR;
+ }
+ else {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage Core 1V5";
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_CORE_1V5_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_CORE_1V5_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_CORE_1V5_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_CORE_1V5_LOW_ERR;
+ }
+ pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
+ pAC->I2c.SenTable[i].SenReg = LM80_VT4_IN;
+ break;
+ case 6:
+ if (pAC->GIni.GIGenesis) {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PHY B PLL";
+ }
+ else {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 3V3";
+ }
+ pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR;
+ pAC->I2c.SenTable[i].SenReg = LM80_VT5_IN;
+ break;
+ case 7:
+ if (pAC->GIni.GIGenesis) {
+ pAC->I2c.SenTable[i].SenDesc = "Speed Fan";
+ pAC->I2c.SenTable[i].SenType = SK_SEN_FAN;
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_FAN_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_FAN_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_FAN_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_FAN_LOW_ERR;
+ pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN;
+ }
+ else {
+ pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5";
+ pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
+ pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR;
+ pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN;
+ pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN;
+ pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR;
+ pAC->I2c.SenTable[i].SenReg = LM80_VT6_IN;
+ }
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_INIT | SK_ERRCL_SW,
+ SKERR_I2C_E001, SKERR_I2C_E001MSG);
+ break;
+ }
+
+ pAC->I2c.SenTable[i].SenValue = 0;
+ pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK;
+ pAC->I2c.SenTable[i].SenErrCts = 0;
+ pAC->I2c.SenTable[i].SenBegErrTS = 0;
+ pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE;
+ pAC->I2c.SenTable[i].SenRead = SkLm80ReadSensor;
+ pAC->I2c.SenTable[i].SenDev = LM80_ADDR;
+ }
+
+#ifndef SK_DIAG
+ pAC->I2c.DummyReads = pAC->I2c.MaxSens;
+#endif /* !SK_DIAG */
+
+ /* Clear I2C IRQ */
+ SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ);
+
+ /* Now we are I/O initialized */
+ pAC->I2c.InitLevel = SK_INIT_IO;
+ return(0);
+} /* SkI2cInit1 */
+
+
+/*
+ * Init level 2: Start first sensor read.
+ */
+static int SkI2cInit2(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC) /* I/O Context */
+{
+ int ReadComplete;
+ SK_SENSOR *pSen;
+
+ if (pAC->I2c.InitLevel != SK_INIT_IO) {
+ /* ReInit not needed in I2C module */
+ /* Init0 and Init2 not permitted */
+ return(0);
+ }
+
+ pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
+ ReadComplete = SkI2cReadSensor(pAC, IoC, pSen);
+
+ if (ReadComplete) {
+ SK_ERR_LOG(pAC, SK_ERRCL_INIT, SKERR_I2C_E008, SKERR_I2C_E008MSG);
+ }
+
+ /* Now we are correctly initialized */
+ pAC->I2c.InitLevel = SK_INIT_RUN;
+
+ return(0);
+} /* SkI2cInit2*/
+
+
+/*
+ * Initialize I2C devices
+ *
+ * Get the first voltage value and discard it.
+ * Go into temperature read mode. A default pointer is not set.
+ *
+ * The things to be done depend on the init level in the parameter list:
+ * Level 0:
+ * Initialize only the data structures. Do NOT access hardware.
+ * Level 1:
+ * Initialize hardware through SK_IN / SK_OUT commands. Do NOT use interrupts.
+ * Level 2:
+ * Everything is possible. Interrupts may be used from now on.
+ *
+ * return:
+ * 0 = success
+ * other = error.
+ */
+int SkI2cInit(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context needed in levels 1 and 2 */
+int Level) /* Init Level */
+{
+
+ switch (Level) {
+ case SK_INIT_DATA:
+ return(SkI2cInit0(pAC));
+ case SK_INIT_IO:
+ return(SkI2cInit1(pAC, IoC));
+ case SK_INIT_RUN:
+ return(SkI2cInit2(pAC, IoC));
+ default:
+ break;
+ }
+
+ return(0);
+} /* SkI2cInit */
+
+
+#ifndef SK_DIAG
+
+/*
+ * Interrupt service function for the I2C Interface
+ *
+ * Clears the Interrupt source
+ *
+ * Reads the register and check it for sending a trap.
+ *
+ * Starts the timer if necessary.
+ */
+void SkI2cIsr(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC) /* I/O Context */
+{
+ SK_EVPARA Para;
+
+ /* Clear I2C IRQ */
+ SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ);
+
+ Para.Para64 = 0;
+ SkEventQueue(pAC, SKGE_I2C, SK_I2CEV_IRQ, Para);
+} /* SkI2cIsr */
+
+
+/*
+ * Check this sensors Value against the threshold and send events.
+ */
+static void SkI2cCheckSensor(
+SK_AC *pAC, /* Adapter Context */
+SK_SENSOR *pSen)
+{
+ SK_EVPARA ParaLocal;
+ SK_BOOL TooHigh; /* Is sensor too high? */
+ SK_BOOL TooLow; /* Is sensor too low? */
+ SK_U64 CurrTime; /* Current Time */
+ SK_BOOL DoTrapSend; /* We need to send a trap */
+ SK_BOOL DoErrLog; /* We need to log the error */
+ SK_BOOL IsError; /* We need to log the error */
+
+ /* Check Dummy Reads first */
+ if (pAC->I2c.DummyReads > 0) {
+ pAC->I2c.DummyReads--;
+ return;
+ }
+
+ /* Get the current time */
+ CurrTime = SkOsGetTime(pAC);
+
+ /* Set para to the most useful setting: The current sensor. */
+ ParaLocal.Para64 = (SK_U64)pAC->I2c.CurrSens;
+
+ /* Check the Value against the thresholds. First: Error Thresholds */
+ TooHigh = (pSen->SenValue > pSen->SenThreErrHigh);
+ TooLow = (pSen->SenValue < pSen->SenThreErrLow);
+
+ IsError = SK_FALSE;
+ if (TooHigh || TooLow) {
+ /* Error condition is satisfied */
+ DoTrapSend = SK_TRUE;
+ DoErrLog = SK_TRUE;
+
+ /* Now error condition is satisfied */
+ IsError = SK_TRUE;
+
+ if (pSen->SenErrFlag == SK_SEN_ERR_ERR) {
+ /* This state is the former one */
+
+ /* So check first whether we have to send a trap */
+ if (pSen->SenLastErrTrapTS + SK_SEN_ERR_TR_HOLD >
+ CurrTime) {
+ /*
+ * Do NOT send the Trap. The hold back time
+ * has to run out first.
+ */
+ DoTrapSend = SK_FALSE;
+ }
+
+ /* Check now whether we have to log an Error */
+ if (pSen->SenLastErrLogTS + SK_SEN_ERR_LOG_HOLD >
+ CurrTime) {
+ /*
+ * Do NOT log the error. The hold back time
+ * has to run out first.
+ */
+ DoErrLog = SK_FALSE;
+ }
+ }
+ else {
+ /* We came from a different state -> Set Begin Time Stamp */
+ pSen->SenBegErrTS = CurrTime;
+ pSen->SenErrFlag = SK_SEN_ERR_ERR;
+ }
+
+ if (DoTrapSend) {
+ /* Set current Time */
+ pSen->SenLastErrTrapTS = CurrTime;
+ pSen->SenErrCts++;
+
+ /* Queue PNMI Event */
+ SkEventQueue(pAC, SKGE_PNMI, (TooHigh ?
+ SK_PNMI_EVT_SEN_ERR_UPP :
+ SK_PNMI_EVT_SEN_ERR_LOW),
+ ParaLocal);
+ }
+
+ if (DoErrLog) {
+ /* Set current Time */
+ pSen->SenLastErrLogTS = CurrTime;
+
+ if (pSen->SenType == SK_SEN_TEMP) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E011, SKERR_I2C_E011MSG);
+ }
+ else if (pSen->SenType == SK_SEN_VOLT) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E012, SKERR_I2C_E012MSG);
+ }
+ else {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E015, SKERR_I2C_E015MSG);
+ }
+ }
+ }
+
+ /* Check the Value against the thresholds */
+ /* 2nd: Warning thresholds */
+ TooHigh = (pSen->SenValue > pSen->SenThreWarnHigh);
+ TooLow = (pSen->SenValue < pSen->SenThreWarnLow);
+
+ if (!IsError && (TooHigh || TooLow)) {
+ /* Error condition is satisfied */
+ DoTrapSend = SK_TRUE;
+ DoErrLog = SK_TRUE;
+
+ if (pSen->SenErrFlag == SK_SEN_ERR_WARN) {
+ /* This state is the former one */
+
+ /* So check first whether we have to send a trap */
+ if (pSen->SenLastWarnTrapTS + SK_SEN_WARN_TR_HOLD > CurrTime) {
+ /*
+ * Do NOT send the Trap. The hold back time
+ * has to run out first.
+ */
+ DoTrapSend = SK_FALSE;
+ }
+
+ /* Check now whether we have to log an Error */
+ if (pSen->SenLastWarnLogTS + SK_SEN_WARN_LOG_HOLD > CurrTime) {
+ /*
+ * Do NOT log the error. The hold back time
+ * has to run out first.
+ */
+ DoErrLog = SK_FALSE;
+ }
+ }
+ else {
+ /* We came from a different state -> Set Begin Time Stamp */
+ pSen->SenBegWarnTS = CurrTime;
+ pSen->SenErrFlag = SK_SEN_ERR_WARN;
+ }
+
+ if (DoTrapSend) {
+ /* Set current Time */
+ pSen->SenLastWarnTrapTS = CurrTime;
+ pSen->SenWarnCts++;
+
+ /* Queue PNMI Event */
+ SkEventQueue(pAC, SKGE_PNMI, (TooHigh ?
+ SK_PNMI_EVT_SEN_WAR_UPP :
+ SK_PNMI_EVT_SEN_WAR_LOW),
+ ParaLocal);
+ }
+
+ if (DoErrLog) {
+ /* Set current Time */
+ pSen->SenLastWarnLogTS = CurrTime;
+
+ if (pSen->SenType == SK_SEN_TEMP) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E009, SKERR_I2C_E009MSG);
+ }
+ else if (pSen->SenType == SK_SEN_VOLT) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E010, SKERR_I2C_E010MSG);
+ }
+ else {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E014, SKERR_I2C_E014MSG);
+ }
+ }
+ }
+
+ /* Check for NO error at all */
+ if (!IsError && !TooHigh && !TooLow) {
+ /* Set o.k. Status if no error and no warning condition */
+ pSen->SenErrFlag = SK_SEN_ERR_OK;
+ }
+
+ /* End of check against the thresholds */
+
+ /* Bug fix AF: 16.Aug.2001: Correct the init base
+ * of LM80 sensor.
+ */
+ if (pSen->SenInit == SK_SEN_DYN_INIT_PCI_IO) {
+
+ pSen->SenInit = SK_SEN_DYN_INIT_NONE;
+
+ if (pSen->SenValue > SK_SEN_PCI_IO_RANGE_LIMITER) {
+ /* 5V PCI-IO Voltage */
+ pSen->SenThreWarnLow = SK_SEN_PCI_IO_5V_LOW_WARN;
+ pSen->SenThreErrLow = SK_SEN_PCI_IO_5V_LOW_ERR;
+ }
+ else {
+ /* 3.3V PCI-IO Voltage */
+ pSen->SenThreWarnHigh = SK_SEN_PCI_IO_3V3_HIGH_WARN;
+ pSen->SenThreErrHigh = SK_SEN_PCI_IO_3V3_HIGH_ERR;
+ }
+ }
+
+#ifdef TEST_ONLY
+ /* Dynamic thresholds also for VAUX of LM80 sensor */
+ if (pSen->SenInit == SK_SEN_DYN_INIT_VAUX) {
+
+ pSen->SenInit = SK_SEN_DYN_INIT_NONE;
+
+ /* 3.3V VAUX Voltage */
+ if (pSen->SenValue > SK_SEN_VAUX_RANGE_LIMITER) {
+ pSen->SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN;
+ pSen->SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR;
+ }
+ /* 0V VAUX Voltage */
+ else {
+ pSen->SenThreWarnHigh = SK_SEN_VAUX_0V_WARN_ERR;
+ pSen->SenThreErrHigh = SK_SEN_VAUX_0V_WARN_ERR;
+ }
+ }
+
+ /*
+ * Check initialization state:
+ * The VIO Thresholds need adaption
+ */
+ if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN &&
+ pSen->SenValue > SK_SEN_WARNLOW2C &&
+ pSen->SenValue < SK_SEN_WARNHIGH2) {
+ pSen->SenThreErrLow = SK_SEN_ERRLOW2C;
+ pSen->SenThreWarnLow = SK_SEN_WARNLOW2C;
+ pSen->SenInit = SK_TRUE;
+ }
+
+ if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN &&
+ pSen->SenValue > SK_SEN_WARNLOW2 &&
+ pSen->SenValue < SK_SEN_WARNHIGH2C) {
+ pSen->SenThreErrHigh = SK_SEN_ERRHIGH2C;
+ pSen->SenThreWarnHigh = SK_SEN_WARNHIGH2C;
+ pSen->SenInit = SK_TRUE;
+ }
+#endif
+
+ if (pSen->SenInit != SK_SEN_DYN_INIT_NONE) {
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E013, SKERR_I2C_E013MSG);
+ }
+} /* SkI2cCheckSensor */
+
+
+/*
+ * The only Event to be served is the timeout event
+ *
+ */
+int SkI2cEvent(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Event, /* Module specific Event */
+SK_EVPARA Para) /* Event specific Parameter */
+{
+ int ReadComplete;
+ SK_SENSOR *pSen;
+ SK_U32 Time;
+ SK_EVPARA ParaLocal;
+ int i;
+
+ /* New case: no sensors */
+ if (pAC->I2c.MaxSens == 0) {
+ return(0);
+ }
+
+ switch (Event) {
+ case SK_I2CEV_IRQ:
+ pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
+ ReadComplete = SkI2cReadSensor(pAC, IoC, pSen);
+
+ if (ReadComplete) {
+ /* Check sensor against defined thresholds */
+ SkI2cCheckSensor(pAC, pSen);
+
+ /* Increment Current sensor and set appropriate Timeout */
+ pAC->I2c.CurrSens++;
+ if (pAC->I2c.CurrSens >= pAC->I2c.MaxSens) {
+ pAC->I2c.CurrSens = 0;
+ Time = SK_I2C_TIM_LONG;
+ }
+ else {
+ Time = SK_I2C_TIM_SHORT;
+ }
+
+ /* Start Timer */
+ ParaLocal.Para64 = (SK_U64)0;
+
+ pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING;
+
+ SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time,
+ SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
+ }
+ else {
+ /* Start Timer */
+ ParaLocal.Para64 = (SK_U64)0;
+
+ pAC->I2c.TimerMode = SK_TIMER_WATCH_SM;
+
+ SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, SK_I2C_TIM_WATCH,
+ SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
+ }
+ break;
+ case SK_I2CEV_TIM:
+ if (pAC->I2c.TimerMode == SK_TIMER_NEW_GAUGING) {
+
+ ParaLocal.Para64 = (SK_U64)0;
+ SkTimerStop(pAC, IoC, &pAC->I2c.SenTimer);
+
+ pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
+ ReadComplete = SkI2cReadSensor(pAC, IoC, pSen);
+
+ if (ReadComplete) {
+ /* Check sensor against defined thresholds */
+ SkI2cCheckSensor(pAC, pSen);
+
+ /* Increment Current sensor and set appropriate Timeout */
+ pAC->I2c.CurrSens++;
+ if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) {
+ pAC->I2c.CurrSens = 0;
+ Time = SK_I2C_TIM_LONG;
+ }
+ else {
+ Time = SK_I2C_TIM_SHORT;
+ }
+
+ /* Start Timer */
+ ParaLocal.Para64 = (SK_U64)0;
+
+ pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING;
+
+ SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time,
+ SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
+ }
+ }
+ else {
+ pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
+ pSen->SenErrFlag = SK_SEN_ERR_FAULTY;
+ SK_I2C_STOP(IoC);
+
+ /* Increment Current sensor and set appropriate Timeout */
+ pAC->I2c.CurrSens++;
+ if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) {
+ pAC->I2c.CurrSens = 0;
+ Time = SK_I2C_TIM_LONG;
+ }
+ else {
+ Time = SK_I2C_TIM_SHORT;
+ }
+
+ /* Start Timer */
+ ParaLocal.Para64 = (SK_U64)0;
+
+ pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING;
+
+ SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time,
+ SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
+ }
+ break;
+ case SK_I2CEV_CLEAR:
+ for (i = 0; i < SK_MAX_SENSORS; i++) {
+ pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK;
+ pAC->I2c.SenTable[i].SenErrCts = 0;
+ pAC->I2c.SenTable[i].SenWarnCts = 0;
+ pAC->I2c.SenTable[i].SenBegErrTS = 0;
+ pAC->I2c.SenTable[i].SenBegWarnTS = 0;
+ pAC->I2c.SenTable[i].SenLastErrTrapTS = (SK_U64)0;
+ pAC->I2c.SenTable[i].SenLastErrLogTS = (SK_U64)0;
+ pAC->I2c.SenTable[i].SenLastWarnTrapTS = (SK_U64)0;
+ pAC->I2c.SenTable[i].SenLastWarnLogTS = (SK_U64)0;
+ }
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E006, SKERR_I2C_E006MSG);
+ }
+
+ return(0);
+} /* SkI2cEvent*/
+
+#endif /* !SK_DIAG */
diff --git a/drivers/net/sk98lin/sklm80.c b/drivers/net/sk98lin/sklm80.c
new file mode 100644
index 000000000000..68292d18175b
--- /dev/null
+++ b/drivers/net/sk98lin/sklm80.c
@@ -0,0 +1,213 @@
+/******************************************************************************
+ *
+ * Name: sklm80.c
+ * Project: Gigabit Ethernet Adapters, TWSI-Module
+ * Version: $Revision: 1.22 $
+ * Date: $Date: 2003/10/20 09:08:21 $
+ * Purpose: Functions to access Voltage and Temperature Sensor (LM80)
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ LM80 functions
+*/
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: sklm80.c,v 1.22 2003/10/20 09:08:21 rschmidt Exp $ (C) Marvell. ";
+#endif
+
+#include "h/skdrv1st.h" /* Driver Specific Definitions */
+#include "h/lm80.h"
+#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
+
+#ifdef SK_DIAG
+#define BREAK_OR_WAIT(pAC,IoC,Event) SkI2cWait(pAC,IoC,Event)
+#else /* nSK_DIAG */
+#define BREAK_OR_WAIT(pAC,IoC,Event) break
+#endif /* nSK_DIAG */
+
+#ifdef SK_DIAG
+/*
+ * read the register 'Reg' from the device 'Dev'
+ *
+ * return read error -1
+ * success the read value
+ */
+int SkLm80RcvReg(
+SK_IOC IoC, /* Adapter Context */
+int Dev, /* I2C device address */
+int Reg) /* register to read */
+{
+ int Val = 0;
+ int TempExt;
+
+ /* Signal device number */
+ if (SkI2cSndDev(IoC, Dev, I2C_WRITE)) {
+ return(-1);
+ }
+
+ if (SkI2cSndByte(IoC, Reg)) {
+ return(-1);
+ }
+
+ /* repeat start */
+ if (SkI2cSndDev(IoC, Dev, I2C_READ)) {
+ return(-1);
+ }
+
+ switch (Reg) {
+ case LM80_TEMP_IN:
+ Val = (int)SkI2cRcvByte(IoC, 1);
+
+ /* First: correct the value: it might be negative */
+ if ((Val & 0x80) != 0) {
+ /* Value is negative */
+ Val = Val - 256;
+ }
+ Val = Val * SK_LM80_TEMP_LSB;
+ SkI2cStop(IoC);
+
+ TempExt = (int)SkLm80RcvReg(IoC, LM80_ADDR, LM80_TEMP_CTRL);
+
+ if (Val > 0) {
+ Val += ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
+ }
+ else {
+ Val -= ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
+ }
+ return(Val);
+ break;
+ case LM80_VT0_IN:
+ case LM80_VT1_IN:
+ case LM80_VT2_IN:
+ case LM80_VT3_IN:
+ Val = (int)SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB;
+ break;
+
+ default:
+ Val = (int)SkI2cRcvByte(IoC, 1);
+ break;
+ }
+
+ SkI2cStop(IoC);
+ return(Val);
+}
+#endif /* SK_DIAG */
+
+/*
+ * read a sensors value (LM80 specific)
+ *
+ * This function reads a sensors value from the I2C sensor chip LM80.
+ * The sensor is defined by its index into the sensors database in the struct
+ * pAC points to.
+ *
+ * Returns 1 if the read is completed
+ * 0 if the read must be continued (I2C Bus still allocated)
+ */
+int SkLm80ReadSensor(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context needed in level 1 and 2 */
+SK_SENSOR *pSen) /* Sensor to be read */
+{
+ SK_I32 Value;
+
+ switch (pSen->SenState) {
+ case SK_SEN_IDLE:
+ /* Send address to ADDR register */
+ SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, I2C_025K_DEV, pSen->SenReg, 0);
+
+ pSen->SenState = SK_SEN_VALUE ;
+ BREAK_OR_WAIT(pAC, IoC, I2C_READ);
+
+ case SK_SEN_VALUE:
+ /* Read value from data register */
+ SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value));
+
+ Value &= 0xff; /* only least significant byte is valid */
+
+ /* Do NOT check the Value against the thresholds */
+ /* Checking is done in the calling instance */
+
+ if (pSen->SenType == SK_SEN_VOLT) {
+ /* Voltage sensor */
+ pSen->SenValue = Value * SK_LM80_VT_LSB;
+ pSen->SenState = SK_SEN_IDLE ;
+ return(1);
+ }
+
+ if (pSen->SenType == SK_SEN_FAN) {
+ if (Value != 0 && Value != 0xff) {
+ /* Fan speed counter */
+ pSen->SenValue = SK_LM80_FAN_FAKTOR/Value;
+ }
+ else {
+ /* Indicate Fan error */
+ pSen->SenValue = 0;
+ }
+ pSen->SenState = SK_SEN_IDLE ;
+ return(1);
+ }
+
+ /* First: correct the value: it might be negative */
+ if ((Value & 0x80) != 0) {
+ /* Value is negative */
+ Value = Value - 256;
+ }
+
+ /* We have a temperature sensor and need to get the signed extension.
+ * For now we get the extension from the last reading, so in the normal
+ * case we won't see flickering temperatures.
+ */
+ pSen->SenValue = (Value * SK_LM80_TEMP_LSB) +
+ (pSen->SenValue % SK_LM80_TEMP_LSB);
+
+ /* Send address to ADDR register */
+ SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, I2C_025K_DEV, LM80_TEMP_CTRL, 0);
+
+ pSen->SenState = SK_SEN_VALEXT ;
+ BREAK_OR_WAIT(pAC, IoC, I2C_READ);
+
+ case SK_SEN_VALEXT:
+ /* Read value from data register */
+ SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value));
+ Value &= LM80_TEMP_LSB_9; /* only bit 7 is valid */
+
+ /* cut the LSB bit */
+ pSen->SenValue = ((pSen->SenValue / SK_LM80_TEMP_LSB) *
+ SK_LM80_TEMP_LSB);
+
+ if (pSen->SenValue < 0) {
+ /* Value negative: The bit value must be subtracted */
+ pSen->SenValue -= ((Value >> 7) * SK_LM80_TEMPEXT_LSB);
+ }
+ else {
+ /* Value positive: The bit value must be added */
+ pSen->SenValue += ((Value >> 7) * SK_LM80_TEMPEXT_LSB);
+ }
+
+ pSen->SenState = SK_SEN_IDLE ;
+ return(1);
+
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E007, SKERR_I2C_E007MSG);
+ return(1);
+ }
+
+ /* Not completed */
+ return(0);
+}
+
diff --git a/drivers/net/sk98lin/skproc.c b/drivers/net/sk98lin/skproc.c
new file mode 100644
index 000000000000..5cece25c034e
--- /dev/null
+++ b/drivers/net/sk98lin/skproc.c
@@ -0,0 +1,265 @@
+/******************************************************************************
+ *
+ * Name: skproc.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.11 $
+ * Date: $Date: 2003/12/11 16:03:57 $
+ * Purpose: Funktions to display statictic data
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Created 22-Nov-2000
+ * Author: Mirko Lindner (mlindner@syskonnect.de)
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "h/skdrv1st.h"
+#include "h/skdrv2nd.h"
+#include "h/skversion.h"
+
+static int sk_seq_show(struct seq_file *seq, void *v);
+static int sk_proc_open(struct inode *inode, struct file *file);
+
+struct file_operations sk_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = sk_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+/*****************************************************************************
+ *
+ * sk_seq_show - show proc information of a particular adapter
+ *
+ * Description:
+ * This function fills the proc entry with statistic data about
+ * the ethernet device. It invokes the generic sk_gen_browse() to
+ * print out all items one per one.
+ *
+ * Returns: 0
+ *
+ */
+static int sk_seq_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ DEV_NET *pNet = netdev_priv(dev);
+ SK_AC *pAC = pNet->pAC;
+ SK_PNMI_STRUCT_DATA *pPnmiStruct = &pAC->PnmiStruct;
+ unsigned long Flags;
+ unsigned int Size;
+ char sens_msg[50];
+ int t;
+ int i;
+
+ /* NetIndex in GetStruct is now required, zero is only dummy */
+ for (t=pAC->GIni.GIMacsFound; t > 0; t--) {
+ if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 1)
+ t--;
+
+ spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+ Size = SK_PNMI_STRUCT_SIZE;
+#ifdef SK_DIAG_SUPPORT
+ if (pAC->BoardLevel == SK_INIT_DATA) {
+ SK_MEMCPY(&(pAC->PnmiStruct), &(pAC->PnmiBackup), sizeof(SK_PNMI_STRUCT_DATA));
+ if (pAC->DiagModeActive == DIAG_NOTACTIVE) {
+ pAC->Pnmi.DiagAttached = SK_DIAG_IDLE;
+ }
+ } else {
+ SkPnmiGetStruct(pAC, pAC->IoBase, pPnmiStruct, &Size, t-1);
+ }
+#else
+ SkPnmiGetStruct(pAC, pAC->IoBase,
+ pPnmiStruct, &Size, t-1);
+#endif
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ if (pAC->dev[t-1] == dev) {
+ SK_PNMI_STAT *pPnmiStat = &pPnmiStruct->Stat[0];
+
+ seq_printf(seq, "\nDetailed statistic for device %s\n",
+ pAC->dev[t-1]->name);
+ seq_printf(seq, "=======================================\n");
+
+ /* Board statistics */
+ seq_printf(seq, "\nBoard statistics\n\n");
+ seq_printf(seq, "Active Port %c\n",
+ 'A' + pAC->Rlmt.Net[t-1].Port[pAC->Rlmt.
+ Net[t-1].PrefPort]->PortNumber);
+ seq_printf(seq, "Preferred Port %c\n",
+ 'A' + pAC->Rlmt.Net[t-1].Port[pAC->Rlmt.
+ Net[t-1].PrefPort]->PortNumber);
+
+ seq_printf(seq, "Bus speed (MHz) %d\n",
+ pPnmiStruct->BusSpeed);
+
+ seq_printf(seq, "Bus width (Bit) %d\n",
+ pPnmiStruct->BusWidth);
+ seq_printf(seq, "Driver version %s\n",
+ VER_STRING);
+ seq_printf(seq, "Hardware revision v%d.%d\n",
+ (pAC->GIni.GIPciHwRev >> 4) & 0x0F,
+ pAC->GIni.GIPciHwRev & 0x0F);
+
+ /* Print sensor informations */
+ for (i=0; i < pAC->I2c.MaxSens; i ++) {
+ /* Check type */
+ switch (pAC->I2c.SenTable[i].SenType) {
+ case 1:
+ strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
+ strcat(sens_msg, " (C)");
+ seq_printf(seq, "%-25s %d.%02d\n",
+ sens_msg,
+ pAC->I2c.SenTable[i].SenValue / 10,
+ pAC->I2c.SenTable[i].SenValue % 10);
+
+ strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
+ strcat(sens_msg, " (F)");
+ seq_printf(seq, "%-25s %d.%02d\n",
+ sens_msg,
+ ((((pAC->I2c.SenTable[i].SenValue)
+ *10)*9)/5 + 3200)/100,
+ ((((pAC->I2c.SenTable[i].SenValue)
+ *10)*9)/5 + 3200) % 10);
+ break;
+ case 2:
+ strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
+ strcat(sens_msg, " (V)");
+ seq_printf(seq, "%-25s %d.%03d\n",
+ sens_msg,
+ pAC->I2c.SenTable[i].SenValue / 1000,
+ pAC->I2c.SenTable[i].SenValue % 1000);
+ break;
+ case 3:
+ strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc);
+ strcat(sens_msg, " (rpm)");
+ seq_printf(seq, "%-25s %d\n",
+ sens_msg,
+ pAC->I2c.SenTable[i].SenValue);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*Receive statistics */
+ seq_printf(seq, "\nReceive statistics\n\n");
+
+ seq_printf(seq, "Received bytes %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxOctetsOkCts);
+ seq_printf(seq, "Received packets %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxOkCts);
+#if 0
+ if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC &&
+ pAC->HWRevision < 12) {
+ pPnmiStruct->InErrorsCts = pPnmiStruct->InErrorsCts -
+ pPnmiStat->StatRxShortsCts;
+ pPnmiStat->StatRxShortsCts = 0;
+ }
+#endif
+ if (dev->mtu > 1500)
+ pPnmiStruct->InErrorsCts = pPnmiStruct->InErrorsCts -
+ pPnmiStat->StatRxTooLongCts;
+
+ seq_printf(seq, "Receive errors %Lu\n",
+ (unsigned long long) pPnmiStruct->InErrorsCts);
+ seq_printf(seq, "Receive dropped %Lu\n",
+ (unsigned long long) pPnmiStruct->RxNoBufCts);
+ seq_printf(seq, "Received multicast %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxMulticastOkCts);
+ seq_printf(seq, "Receive error types\n");
+ seq_printf(seq, " length %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxRuntCts);
+ seq_printf(seq, " buffer overflow %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxFifoOverflowCts);
+ seq_printf(seq, " bad crc %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxFcsCts);
+ seq_printf(seq, " framing %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxFramingCts);
+ seq_printf(seq, " missed frames %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxMissedCts);
+
+ if (dev->mtu > 1500)
+ pPnmiStat->StatRxTooLongCts = 0;
+
+ seq_printf(seq, " too long %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxTooLongCts);
+ seq_printf(seq, " carrier extension %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxCextCts);
+ seq_printf(seq, " too short %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxShortsCts);
+ seq_printf(seq, " symbol %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxSymbolCts);
+ seq_printf(seq, " LLC MAC size %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxIRLengthCts);
+ seq_printf(seq, " carrier event %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxCarrierCts);
+ seq_printf(seq, " jabber %Lu\n",
+ (unsigned long long) pPnmiStat->StatRxJabberCts);
+
+
+ /*Transmit statistics */
+ seq_printf(seq, "\nTransmit statistics\n\n");
+
+ seq_printf(seq, "Transmited bytes %Lu\n",
+ (unsigned long long) pPnmiStat->StatTxOctetsOkCts);
+ seq_printf(seq, "Transmited packets %Lu\n",
+ (unsigned long long) pPnmiStat->StatTxOkCts);
+ seq_printf(seq, "Transmit errors %Lu\n",
+ (unsigned long long) pPnmiStat->StatTxSingleCollisionCts);
+ seq_printf(seq, "Transmit dropped %Lu\n",
+ (unsigned long long) pPnmiStruct->TxNoBufCts);
+ seq_printf(seq, "Transmit collisions %Lu\n",
+ (unsigned long long) pPnmiStat->StatTxSingleCollisionCts);
+ seq_printf(seq, "Transmit error types\n");
+ seq_printf(seq, " excessive collision %ld\n",
+ pAC->stats.tx_aborted_errors);
+ seq_printf(seq, " carrier %Lu\n",
+ (unsigned long long) pPnmiStat->StatTxCarrierCts);
+ seq_printf(seq, " fifo underrun %Lu\n",
+ (unsigned long long) pPnmiStat->StatTxFifoUnderrunCts);
+ seq_printf(seq, " heartbeat %Lu\n",
+ (unsigned long long) pPnmiStat->StatTxCarrierCts);
+ seq_printf(seq, " window %ld\n",
+ pAC->stats.tx_window_errors);
+
+ }
+ }
+ return 0;
+}
+
+/*****************************************************************************
+ *
+ * sk_proc_open - register the show function when proc is open'ed
+ *
+ * Description:
+ * This function is called whenever a sk98lin proc file is queried.
+ *
+ * Returns: the return value of single_open()
+ *
+ */
+static int sk_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sk_seq_show, PDE(inode)->data);
+}
+
+/*******************************************************************************
+ *
+ * End of file
+ *
+ ******************************************************************************/
diff --git a/drivers/net/sk98lin/skqueue.c b/drivers/net/sk98lin/skqueue.c
new file mode 100644
index 000000000000..0275b4f71d9b
--- /dev/null
+++ b/drivers/net/sk98lin/skqueue.c
@@ -0,0 +1,179 @@
+/******************************************************************************
+ *
+ * Name: skqueue.c
+ * Project: Gigabit Ethernet Adapters, Event Scheduler Module
+ * Version: $Revision: 1.20 $
+ * Date: $Date: 2003/09/16 13:44:00 $
+ * Purpose: Management of an event queue.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+
+/*
+ * Event queue and dispatcher
+ */
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skqueue.c,v 1.20 2003/09/16 13:44:00 rschmidt Exp $ (C) Marvell.";
+#endif
+
+#include "h/skdrv1st.h" /* Driver Specific Definitions */
+#include "h/skqueue.h" /* Queue Definitions */
+#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
+
+#ifdef __C2MAN__
+/*
+ Event queue management.
+
+ General Description:
+
+ */
+intro()
+{}
+#endif
+
+#define PRINTF(a,b,c)
+
+/*
+ * init event queue management
+ *
+ * Must be called during init level 0.
+ */
+void SkEventInit(
+SK_AC *pAC, /* Adapter context */
+SK_IOC Ioc, /* IO context */
+int Level) /* Init level */
+{
+ switch (Level) {
+ case SK_INIT_DATA:
+ pAC->Event.EvPut = pAC->Event.EvGet = pAC->Event.EvQueue;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * add event to queue
+ */
+void SkEventQueue(
+SK_AC *pAC, /* Adapters context */
+SK_U32 Class, /* Event Class */
+SK_U32 Event, /* Event to be queued */
+SK_EVPARA Para) /* Event parameter */
+{
+ pAC->Event.EvPut->Class = Class;
+ pAC->Event.EvPut->Event = Event;
+ pAC->Event.EvPut->Para = Para;
+
+ if (++pAC->Event.EvPut == &pAC->Event.EvQueue[SK_MAX_EVENT])
+ pAC->Event.EvPut = pAC->Event.EvQueue;
+
+ if (pAC->Event.EvPut == pAC->Event.EvGet) {
+ SK_ERR_LOG(pAC, SK_ERRCL_NORES, SKERR_Q_E001, SKERR_Q_E001MSG);
+ }
+}
+
+/*
+ * event dispatcher
+ * while event queue is not empty
+ * get event from queue
+ * send command to state machine
+ * end
+ * return error reported by individual Event function
+ * 0 if no error occured.
+ */
+int SkEventDispatcher(
+SK_AC *pAC, /* Adapters Context */
+SK_IOC Ioc) /* Io context */
+{
+ SK_EVENTELEM *pEv; /* pointer into queue */
+ SK_U32 Class;
+ int Rtv;
+
+ pEv = pAC->Event.EvGet;
+
+ PRINTF("dispatch get %x put %x\n", pEv, pAC->Event.ev_put);
+
+ while (pEv != pAC->Event.EvPut) {
+ PRINTF("dispatch Class %d Event %d\n", pEv->Class, pEv->Event);
+
+ switch (Class = pEv->Class) {
+#ifndef SK_USE_LAC_EV
+#ifndef SK_SLIM
+ case SKGE_RLMT: /* RLMT Event */
+ Rtv = SkRlmtEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+ case SKGE_I2C: /* I2C Event */
+ Rtv = SkI2cEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+ case SKGE_PNMI: /* PNMI Event */
+ Rtv = SkPnmiEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+#endif /* not SK_SLIM */
+#endif /* not SK_USE_LAC_EV */
+ case SKGE_DRV: /* Driver Event */
+ Rtv = SkDrvEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+#ifndef SK_USE_SW_TIMER
+ case SKGE_HWAC:
+ Rtv = SkGeSirqEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+#else /* !SK_USE_SW_TIMER */
+ case SKGE_SWT :
+ Rtv = SkSwtEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+#endif /* !SK_USE_SW_TIMER */
+#ifdef SK_USE_LAC_EV
+ case SKGE_LACP :
+ Rtv = SkLacpEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+ case SKGE_RSF :
+ Rtv = SkRsfEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+ case SKGE_MARKER :
+ Rtv = SkMarkerEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+ case SKGE_FD :
+ Rtv = SkFdEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+#endif /* SK_USE_LAC_EV */
+#ifdef SK_USE_CSUM
+ case SKGE_CSUM :
+ Rtv = SkCsEvent(pAC, Ioc, pEv->Event, pEv->Para);
+ break;
+#endif /* SK_USE_CSUM */
+ default :
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_Q_E002, SKERR_Q_E002MSG);
+ Rtv = 0;
+ }
+
+ if (Rtv != 0) {
+ return(Rtv);
+ }
+
+ if (++pEv == &pAC->Event.EvQueue[SK_MAX_EVENT])
+ pEv = pAC->Event.EvQueue;
+
+ /* Renew get: it is used in queue_events to detect overruns */
+ pAC->Event.EvGet = pEv;
+ }
+
+ return(0);
+}
+
+/* End of file */
diff --git a/drivers/net/sk98lin/skrlmt.c b/drivers/net/sk98lin/skrlmt.c
new file mode 100644
index 000000000000..9ea11ab2296a
--- /dev/null
+++ b/drivers/net/sk98lin/skrlmt.c
@@ -0,0 +1,3258 @@
+/******************************************************************************
+ *
+ * Name: skrlmt.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.69 $
+ * Date: $Date: 2003/04/15 09:39:22 $
+ * Purpose: Manage links on SK-NET Adapters, esp. redundant ones.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * Description:
+ *
+ * This module contains code for Link ManagemenT (LMT) of SK-NET Adapters.
+ * It is mainly intended for adapters with more than one link.
+ * For such adapters, this module realizes Redundant Link ManagemenT (RLMT).
+ *
+ * Include File Hierarchy:
+ *
+ * "skdrv1st.h"
+ * "skdrv2nd.h"
+ *
+ ******************************************************************************/
+
+#ifndef lint
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skrlmt.c,v 1.69 2003/04/15 09:39:22 tschilli Exp $ (C) Marvell.";
+#endif /* !defined(lint) */
+
+#define __SKRLMT_C
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* cplusplus */
+
+#include "h/skdrv1st.h"
+#include "h/skdrv2nd.h"
+
+/* defines ********************************************************************/
+
+#ifndef SK_HWAC_LINK_LED
+#define SK_HWAC_LINK_LED(a,b,c,d)
+#endif /* !defined(SK_HWAC_LINK_LED) */
+
+#ifndef DEBUG
+#define RLMT_STATIC static
+#else /* DEBUG */
+#define RLMT_STATIC
+
+#ifndef SK_LITTLE_ENDIAN
+/* First 32 bits */
+#define OFFS_LO32 1
+
+/* Second 32 bits */
+#define OFFS_HI32 0
+#else /* SK_LITTLE_ENDIAN */
+/* First 32 bits */
+#define OFFS_LO32 0
+
+/* Second 32 bits */
+#define OFFS_HI32 1
+#endif /* SK_LITTLE_ENDIAN */
+
+#endif /* DEBUG */
+
+/* ----- Private timeout values ----- */
+
+#define SK_RLMT_MIN_TO_VAL 125000 /* 1/8 sec. */
+#define SK_RLMT_DEF_TO_VAL 1000000 /* 1 sec. */
+#define SK_RLMT_PORTDOWN_TIM_VAL 900000 /* another 0.9 sec. */
+#define SK_RLMT_PORTSTART_TIM_VAL 100000 /* 0.1 sec. */
+#define SK_RLMT_PORTUP_TIM_VAL 2500000 /* 2.5 sec. */
+#define SK_RLMT_SEG_TO_VAL 900000000 /* 15 min. */
+
+/* Assume tick counter increment is 1 - may be set OS-dependent. */
+#ifndef SK_TICK_INCR
+#define SK_TICK_INCR SK_CONSTU64(1)
+#endif /* !defined(SK_TICK_INCR) */
+
+/*
+ * Amount that a time stamp must be later to be recognized as "substantially
+ * later". This is about 1/128 sec, but above 1 tick counter increment.
+ */
+#define SK_RLMT_BC_DELTA (1 + ((SK_TICKS_PER_SEC >> 7) > SK_TICK_INCR ? \
+ (SK_TICKS_PER_SEC >> 7) : SK_TICK_INCR))
+
+/* ----- Private RLMT defaults ----- */
+
+#define SK_RLMT_DEF_PREF_PORT 0 /* "Lower" port. */
+#define SK_RLMT_DEF_MODE SK_RLMT_CHECK_LINK /* Default RLMT Mode. */
+
+/* ----- Private RLMT checking states ----- */
+
+#define SK_RLMT_RCS_SEG 1 /* RLMT Check State: check seg. */
+#define SK_RLMT_RCS_START_SEG 2 /* RLMT Check State: start check seg. */
+#define SK_RLMT_RCS_SEND_SEG 4 /* RLMT Check State: send BPDU packet */
+#define SK_RLMT_RCS_REPORT_SEG 8 /* RLMT Check State: report seg. */
+
+/* ----- Private PORT checking states ----- */
+
+#define SK_RLMT_PCS_TX 1 /* Port Check State: check tx. */
+#define SK_RLMT_PCS_RX 2 /* Port Check State: check rx. */
+
+/* ----- Private PORT events ----- */
+
+/* Note: Update simulation when changing these. */
+#define SK_RLMT_PORTSTART_TIM 1100 /* Port start timeout. */
+#define SK_RLMT_PORTUP_TIM 1101 /* Port can now go up. */
+#define SK_RLMT_PORTDOWN_RX_TIM 1102 /* Port did not receive once ... */
+#define SK_RLMT_PORTDOWN 1103 /* Port went down. */
+#define SK_RLMT_PORTDOWN_TX_TIM 1104 /* Partner did not receive ... */
+
+/* ----- Private RLMT events ----- */
+
+/* Note: Update simulation when changing these. */
+#define SK_RLMT_TIM 2100 /* RLMT timeout. */
+#define SK_RLMT_SEG_TIM 2101 /* RLMT segmentation check timeout. */
+
+#define TO_SHORTEN(tim) ((tim) / 2)
+
+/* Error numbers and messages. */
+#define SKERR_RLMT_E001 (SK_ERRBASE_RLMT + 0)
+#define SKERR_RLMT_E001_MSG "No Packet."
+#define SKERR_RLMT_E002 (SKERR_RLMT_E001 + 1)
+#define SKERR_RLMT_E002_MSG "Short Packet."
+#define SKERR_RLMT_E003 (SKERR_RLMT_E002 + 1)
+#define SKERR_RLMT_E003_MSG "Unknown RLMT event."
+#define SKERR_RLMT_E004 (SKERR_RLMT_E003 + 1)
+#define SKERR_RLMT_E004_MSG "PortsUp incorrect."
+#define SKERR_RLMT_E005 (SKERR_RLMT_E004 + 1)
+#define SKERR_RLMT_E005_MSG \
+ "Net seems to be segmented (different root bridges are reported on the ports)."
+#define SKERR_RLMT_E006 (SKERR_RLMT_E005 + 1)
+#define SKERR_RLMT_E006_MSG "Duplicate MAC Address detected."
+#define SKERR_RLMT_E007 (SKERR_RLMT_E006 + 1)
+#define SKERR_RLMT_E007_MSG "LinksUp incorrect."
+#define SKERR_RLMT_E008 (SKERR_RLMT_E007 + 1)
+#define SKERR_RLMT_E008_MSG "Port not started but link came up."
+#define SKERR_RLMT_E009 (SKERR_RLMT_E008 + 1)
+#define SKERR_RLMT_E009_MSG "Corrected illegal setting of Preferred Port."
+#define SKERR_RLMT_E010 (SKERR_RLMT_E009 + 1)
+#define SKERR_RLMT_E010_MSG "Ignored illegal Preferred Port."
+
+/* LLC field values. */
+#define LLC_COMMAND_RESPONSE_BIT 1
+#define LLC_TEST_COMMAND 0xE3
+#define LLC_UI 0x03
+
+/* RLMT Packet fields. */
+#define SK_RLMT_DSAP 0
+#define SK_RLMT_SSAP 0
+#define SK_RLMT_CTRL (LLC_TEST_COMMAND)
+#define SK_RLMT_INDICATOR0 0x53 /* S */
+#define SK_RLMT_INDICATOR1 0x4B /* K */
+#define SK_RLMT_INDICATOR2 0x2D /* - */
+#define SK_RLMT_INDICATOR3 0x52 /* R */
+#define SK_RLMT_INDICATOR4 0x4C /* L */
+#define SK_RLMT_INDICATOR5 0x4D /* M */
+#define SK_RLMT_INDICATOR6 0x54 /* T */
+#define SK_RLMT_PACKET_VERSION 0
+
+/* RLMT SPT Flag values. */
+#define SK_RLMT_SPT_FLAG_CHANGE 0x01
+#define SK_RLMT_SPT_FLAG_CHANGE_ACK 0x80
+
+/* RLMT SPT Packet fields. */
+#define SK_RLMT_SPT_DSAP 0x42
+#define SK_RLMT_SPT_SSAP 0x42
+#define SK_RLMT_SPT_CTRL (LLC_UI)
+#define SK_RLMT_SPT_PROTOCOL_ID0 0x00
+#define SK_RLMT_SPT_PROTOCOL_ID1 0x00
+#define SK_RLMT_SPT_PROTOCOL_VERSION_ID 0x00
+#define SK_RLMT_SPT_BPDU_TYPE 0x00
+#define SK_RLMT_SPT_FLAGS 0x00 /* ?? */
+#define SK_RLMT_SPT_ROOT_ID0 0xFF /* Lowest possible priority. */
+#define SK_RLMT_SPT_ROOT_ID1 0xFF /* Lowest possible priority. */
+
+/* Remaining 6 bytes will be the current port address. */
+#define SK_RLMT_SPT_ROOT_PATH_COST0 0x00
+#define SK_RLMT_SPT_ROOT_PATH_COST1 0x00
+#define SK_RLMT_SPT_ROOT_PATH_COST2 0x00
+#define SK_RLMT_SPT_ROOT_PATH_COST3 0x00
+#define SK_RLMT_SPT_BRIDGE_ID0 0xFF /* Lowest possible priority. */
+#define SK_RLMT_SPT_BRIDGE_ID1 0xFF /* Lowest possible priority. */
+
+/* Remaining 6 bytes will be the current port address. */
+#define SK_RLMT_SPT_PORT_ID0 0xFF /* Lowest possible priority. */
+#define SK_RLMT_SPT_PORT_ID1 0xFF /* Lowest possible priority. */
+#define SK_RLMT_SPT_MSG_AGE0 0x00
+#define SK_RLMT_SPT_MSG_AGE1 0x00
+#define SK_RLMT_SPT_MAX_AGE0 0x00
+#define SK_RLMT_SPT_MAX_AGE1 0xFF
+#define SK_RLMT_SPT_HELLO_TIME0 0x00
+#define SK_RLMT_SPT_HELLO_TIME1 0xFF
+#define SK_RLMT_SPT_FWD_DELAY0 0x00
+#define SK_RLMT_SPT_FWD_DELAY1 0x40
+
+/* Size defines. */
+#define SK_RLMT_MIN_PACKET_SIZE 34
+#define SK_RLMT_MAX_PACKET_SIZE (SK_RLMT_MAX_TX_BUF_SIZE)
+#define SK_PACKET_DATA_LEN (SK_RLMT_MAX_PACKET_SIZE - \
+ SK_RLMT_MIN_PACKET_SIZE)
+
+/* ----- RLMT packet types ----- */
+#define SK_PACKET_ANNOUNCE 1 /* Port announcement. */
+#define SK_PACKET_ALIVE 2 /* Alive packet to port. */
+#define SK_PACKET_ADDR_CHANGED 3 /* Port address changed. */
+#define SK_PACKET_CHECK_TX 4 /* Check your tx line. */
+
+#ifdef SK_LITTLE_ENDIAN
+#define SK_U16_TO_NETWORK_ORDER(Val,Addr) { \
+ SK_U8 *_Addr = (SK_U8*)(Addr); \
+ SK_U16 _Val = (SK_U16)(Val); \
+ *_Addr++ = (SK_U8)(_Val >> 8); \
+ *_Addr = (SK_U8)(_Val & 0xFF); \
+}
+#endif /* SK_LITTLE_ENDIAN */
+
+#ifdef SK_BIG_ENDIAN
+#define SK_U16_TO_NETWORK_ORDER(Val,Addr) (*(SK_U16*)(Addr) = (SK_U16)(Val))
+#endif /* SK_BIG_ENDIAN */
+
+#define AUTONEG_FAILED SK_FALSE
+#define AUTONEG_SUCCESS SK_TRUE
+
+
+/* typedefs *******************************************************************/
+
+/* RLMT packet. Length: SK_RLMT_MAX_PACKET_SIZE (60) bytes. */
+typedef struct s_RlmtPacket {
+ SK_U8 DstAddr[SK_MAC_ADDR_LEN];
+ SK_U8 SrcAddr[SK_MAC_ADDR_LEN];
+ SK_U8 TypeLen[2];
+ SK_U8 DSap;
+ SK_U8 SSap;
+ SK_U8 Ctrl;
+ SK_U8 Indicator[7];
+ SK_U8 RlmtPacketType[2];
+ SK_U8 Align1[2];
+ SK_U8 Random[4]; /* Random value of requesting(!) station. */
+ SK_U8 RlmtPacketVersion[2]; /* RLMT Packet version. */
+ SK_U8 Data[SK_PACKET_DATA_LEN];
+} SK_RLMT_PACKET;
+
+typedef struct s_SpTreeRlmtPacket {
+ SK_U8 DstAddr[SK_MAC_ADDR_LEN];
+ SK_U8 SrcAddr[SK_MAC_ADDR_LEN];
+ SK_U8 TypeLen[2];
+ SK_U8 DSap;
+ SK_U8 SSap;
+ SK_U8 Ctrl;
+ SK_U8 ProtocolId[2];
+ SK_U8 ProtocolVersionId;
+ SK_U8 BpduType;
+ SK_U8 Flags;
+ SK_U8 RootId[8];
+ SK_U8 RootPathCost[4];
+ SK_U8 BridgeId[8];
+ SK_U8 PortId[2];
+ SK_U8 MessageAge[2];
+ SK_U8 MaxAge[2];
+ SK_U8 HelloTime[2];
+ SK_U8 ForwardDelay[2];
+} SK_SPTREE_PACKET;
+
+/* global variables ***********************************************************/
+
+SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}};
+SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}};
+SK_MAC_ADDR BcAddr = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
+
+/* local variables ************************************************************/
+
+/* None. */
+
+/* functions ******************************************************************/
+
+RLMT_STATIC void SkRlmtCheckSwitch(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 NetIdx);
+RLMT_STATIC void SkRlmtCheckSeg(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_U32 NetIdx);
+RLMT_STATIC void SkRlmtEvtSetNets(
+ SK_AC *pAC,
+ SK_IOC IoC,
+ SK_EVPARA Para);
+
+/******************************************************************************
+ *
+ * SkRlmtInit - initialize data, set state to init
+ *
+ * Description:
+ *
+ * SK_INIT_DATA
+ * ============
+ *
+ * This routine initializes all RLMT-related variables to a known state.
+ * The initial state is SK_RLMT_RS_INIT.
+ * All ports are initialized to SK_RLMT_PS_INIT.
+ *
+ *
+ * SK_INIT_IO
+ * ==========
+ *
+ * Nothing.
+ *
+ *
+ * SK_INIT_RUN
+ * ===========
+ *
+ * Determine the adapter's random value.
+ * Set the hw registers, the "logical MAC address", the
+ * RLMT multicast address, and eventually the BPDU multicast address.
+ *
+ * Context:
+ * init, pageable
+ *
+ * Returns:
+ * Nothing.
+ */
+void SkRlmtInit(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Level) /* Initialization Level */
+{
+ SK_U32 i, j;
+ SK_U64 Random;
+ SK_EVPARA Para;
+ SK_MAC_ADDR VirtualMacAddress;
+ SK_MAC_ADDR PhysicalAMacAddress;
+ SK_BOOL VirtualMacAddressSet;
+ SK_BOOL PhysicalAMacAddressSet;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_INIT,
+ ("RLMT Init level %d.\n", Level))
+
+ switch (Level) {
+ case SK_INIT_DATA: /* Initialize data structures. */
+ SK_MEMSET((char *)&pAC->Rlmt, 0, sizeof(SK_RLMT));
+
+ for (i = 0; i < SK_MAX_MACS; i++) {
+ pAC->Rlmt.Port[i].PortState = SK_RLMT_PS_INIT;
+ pAC->Rlmt.Port[i].LinkDown = SK_TRUE;
+ pAC->Rlmt.Port[i].PortDown = SK_TRUE;
+ pAC->Rlmt.Port[i].PortStarted = SK_FALSE;
+ pAC->Rlmt.Port[i].PortNoRx = SK_FALSE;
+ pAC->Rlmt.Port[i].RootIdSet = SK_FALSE;
+ pAC->Rlmt.Port[i].PortNumber = i;
+ pAC->Rlmt.Port[i].Net = &pAC->Rlmt.Net[0];
+ pAC->Rlmt.Port[i].AddrPort = &pAC->Addr.Port[i];
+ }
+
+ pAC->Rlmt.NumNets = 1;
+ for (i = 0; i < SK_MAX_NETS; i++) {
+ pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT;
+ pAC->Rlmt.Net[i].RootIdSet = SK_FALSE;
+ pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT;
+ pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* Automatic. */
+ /* Just assuming. */
+ pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort;
+ pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE;
+ pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL;
+ pAC->Rlmt.Net[i].NetNumber = i;
+ }
+
+ pAC->Rlmt.Net[0].Port[0] = &pAC->Rlmt.Port[0];
+ pAC->Rlmt.Net[0].Port[1] = &pAC->Rlmt.Port[1];
+#if SK_MAX_NETS > 1
+ pAC->Rlmt.Net[1].Port[0] = &pAC->Rlmt.Port[1];
+#endif /* SK_MAX_NETS > 1 */
+ break;
+
+ case SK_INIT_IO: /* GIMacsFound first available here. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_INIT,
+ ("RLMT: %d MACs were detected.\n", pAC->GIni.GIMacsFound))
+
+ pAC->Rlmt.Net[0].NumPorts = pAC->GIni.GIMacsFound;
+
+ /* Initialize HW registers? */
+ if (pAC->GIni.GIMacsFound == 1) {
+ Para.Para32[0] = SK_RLMT_MODE_CLS;
+ Para.Para32[1] = 0;
+ (void)SkRlmtEvent(pAC, IoC, SK_RLMT_MODE_CHANGE, Para);
+ }
+ break;
+
+ case SK_INIT_RUN:
+ /* Ensure RLMT is set to one net. */
+ if (pAC->Rlmt.NumNets > 1) {
+ Para.Para32[0] = 1;
+ Para.Para32[1] = -1;
+ SkRlmtEvtSetNets(pAC, IoC, Para);
+ }
+
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ Random = SkOsGetTime(pAC);
+ *(SK_U32*)&pAC->Rlmt.Port[i].Random = *(SK_U32*)&Random;
+
+ for (j = 0; j < 4; j++) {
+ pAC->Rlmt.Port[i].Random[j] ^= pAC->Rlmt.Port[i].AddrPort->
+ CurrentMacAddress.a[SK_MAC_ADDR_LEN - 1 - j];
+ }
+
+ (void)SkAddrMcClear(pAC, IoC, i, SK_ADDR_PERMANENT | SK_MC_SW_ONLY);
+
+ /* Add RLMT MC address. */
+ (void)SkAddrMcAdd(pAC, IoC, i, &SkRlmtMcAddr, SK_ADDR_PERMANENT);
+
+ if (pAC->Rlmt.Net[0].RlmtMode & SK_RLMT_CHECK_SEG) {
+ /* Add BPDU MC address. */
+ (void)SkAddrMcAdd(pAC, IoC, i, &BridgeMcAddr, SK_ADDR_PERMANENT);
+ }
+
+ (void)SkAddrMcUpdate(pAC, IoC, i);
+ }
+
+ VirtualMacAddressSet = SK_FALSE;
+ /* Read virtual MAC address from Control Register File. */
+ for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
+
+ SK_IN8(IoC, B2_MAC_1 + j, &VirtualMacAddress.a[j]);
+ VirtualMacAddressSet |= VirtualMacAddress.a[j];
+ }
+
+ PhysicalAMacAddressSet = SK_FALSE;
+ /* Read physical MAC address for MAC A from Control Register File. */
+ for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
+
+ SK_IN8(IoC, B2_MAC_2 + j, &PhysicalAMacAddress.a[j]);
+ PhysicalAMacAddressSet |= PhysicalAMacAddress.a[j];
+ }
+
+ /* check if the two mac addresses contain reasonable values */
+ if (!VirtualMacAddressSet || !PhysicalAMacAddressSet) {
+
+ pAC->Rlmt.RlmtOff = SK_TRUE;
+ }
+
+ /* if the two mac addresses are equal switch off the RLMT_PRE_LOOKAHEAD
+ and the RLMT_LOOKAHEAD macros */
+ else if (SK_ADDR_EQUAL(PhysicalAMacAddress.a, VirtualMacAddress.a)) {
+
+ pAC->Rlmt.RlmtOff = SK_TRUE;
+ }
+ else {
+ pAC->Rlmt.RlmtOff = SK_FALSE;
+ }
+ break;
+
+ default: /* error */
+ break;
+ }
+ return;
+} /* SkRlmtInit */
+
+
+/******************************************************************************
+ *
+ * SkRlmtBuildCheckChain - build the check chain
+ *
+ * Description:
+ * This routine builds the local check chain:
+ * - Each port that is up checks the next port.
+ * - The last port that is up checks the first port that is up.
+ *
+ * Notes:
+ * - Currently only local ports are considered when building the chain.
+ * - Currently the SuspectState is just reset;
+ * it would be better to save it ...
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtBuildCheckChain(
+SK_AC *pAC, /* Adapter Context */
+SK_U32 NetIdx) /* Net Number */
+{
+ SK_U32 i;
+ SK_U32 NumMacsUp;
+ SK_RLMT_PORT * FirstMacUp;
+ SK_RLMT_PORT * PrevMacUp;
+
+ FirstMacUp = NULL;
+ PrevMacUp = NULL;
+
+ if (!(pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_CHECK_LOC_LINK)) {
+ for (i = 0; i < pAC->Rlmt.Net[i].NumPorts; i++) {
+ pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked = 0;
+ }
+ return; /* Done. */
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SkRlmtBuildCheckChain.\n"))
+
+ NumMacsUp = 0;
+
+ for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) {
+ pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked = 0;
+ pAC->Rlmt.Net[NetIdx].Port[i]->PortsSuspect = 0;
+ pAC->Rlmt.Net[NetIdx].Port[i]->CheckingState &=
+ ~(SK_RLMT_PCS_RX | SK_RLMT_PCS_TX);
+
+ /*
+ * If more than two links are detected we should consider
+ * checking at least two other ports:
+ * 1. the next port that is not LinkDown and
+ * 2. the next port that is not PortDown.
+ */
+ if (!pAC->Rlmt.Net[NetIdx].Port[i]->LinkDown) {
+ if (NumMacsUp == 0) {
+ FirstMacUp = pAC->Rlmt.Net[NetIdx].Port[i];
+ }
+ else {
+ PrevMacUp->PortCheck[
+ pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked].CheckAddr =
+ pAC->Rlmt.Net[NetIdx].Port[i]->AddrPort->CurrentMacAddress;
+ PrevMacUp->PortCheck[
+ PrevMacUp->PortsChecked].SuspectTx = SK_FALSE;
+ PrevMacUp->PortsChecked++;
+ }
+ PrevMacUp = pAC->Rlmt.Net[NetIdx].Port[i];
+ NumMacsUp++;
+ }
+ }
+
+ if (NumMacsUp > 1) {
+ PrevMacUp->PortCheck[PrevMacUp->PortsChecked].CheckAddr =
+ FirstMacUp->AddrPort->CurrentMacAddress;
+ PrevMacUp->PortCheck[PrevMacUp->PortsChecked].SuspectTx =
+ SK_FALSE;
+ PrevMacUp->PortsChecked++;
+ }
+
+#ifdef DEBUG
+ for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Port %d checks %d other ports: %2X.\n", i,
+ pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked,
+ pAC->Rlmt.Net[NetIdx].Port[i]->PortCheck[0].CheckAddr.a[5]))
+ }
+#endif /* DEBUG */
+
+ return;
+} /* SkRlmtBuildCheckChain */
+
+
+/******************************************************************************
+ *
+ * SkRlmtBuildPacket - build an RLMT packet
+ *
+ * Description:
+ * This routine sets up an RLMT packet.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * NULL or pointer to RLMT mbuf
+ */
+RLMT_STATIC SK_MBUF *SkRlmtBuildPacket(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 PortNumber, /* Sending port */
+SK_U16 PacketType, /* RLMT packet type */
+SK_MAC_ADDR *SrcAddr, /* Source address */
+SK_MAC_ADDR *DestAddr) /* Destination address */
+{
+ int i;
+ SK_U16 Length;
+ SK_MBUF *pMb;
+ SK_RLMT_PACKET *pPacket;
+
+#ifdef DEBUG
+ SK_U8 CheckSrc = 0;
+ SK_U8 CheckDest = 0;
+
+ for (i = 0; i < SK_MAC_ADDR_LEN; ++i) {
+ CheckSrc |= SrcAddr->a[i];
+ CheckDest |= DestAddr->a[i];
+ }
+
+ if ((CheckSrc == 0) || (CheckDest == 0)) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_ERR,
+ ("SkRlmtBuildPacket: Invalid %s%saddr.\n",
+ (CheckSrc == 0 ? "Src" : ""), (CheckDest == 0 ? "Dest" : "")))
+ }
+#endif
+
+ if ((pMb = SkDrvAllocRlmtMbuf(pAC, IoC, SK_RLMT_MAX_PACKET_SIZE)) != NULL) {
+ pPacket = (SK_RLMT_PACKET*)pMb->pData;
+ for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
+ pPacket->DstAddr[i] = DestAddr->a[i];
+ pPacket->SrcAddr[i] = SrcAddr->a[i];
+ }
+ pPacket->DSap = SK_RLMT_DSAP;
+ pPacket->SSap = SK_RLMT_SSAP;
+ pPacket->Ctrl = SK_RLMT_CTRL;
+ pPacket->Indicator[0] = SK_RLMT_INDICATOR0;
+ pPacket->Indicator[1] = SK_RLMT_INDICATOR1;
+ pPacket->Indicator[2] = SK_RLMT_INDICATOR2;
+ pPacket->Indicator[3] = SK_RLMT_INDICATOR3;
+ pPacket->Indicator[4] = SK_RLMT_INDICATOR4;
+ pPacket->Indicator[5] = SK_RLMT_INDICATOR5;
+ pPacket->Indicator[6] = SK_RLMT_INDICATOR6;
+
+ SK_U16_TO_NETWORK_ORDER(PacketType, &pPacket->RlmtPacketType[0]);
+
+ for (i = 0; i < 4; i++) {
+ pPacket->Random[i] = pAC->Rlmt.Port[PortNumber].Random[i];
+ }
+
+ SK_U16_TO_NETWORK_ORDER(
+ SK_RLMT_PACKET_VERSION, &pPacket->RlmtPacketVersion[0]);
+
+ for (i = 0; i < SK_PACKET_DATA_LEN; i++) {
+ pPacket->Data[i] = 0x00;
+ }
+
+ Length = SK_RLMT_MAX_PACKET_SIZE; /* Or smaller. */
+ pMb->Length = Length;
+ pMb->PortIdx = PortNumber;
+ Length -= 14;
+ SK_U16_TO_NETWORK_ORDER(Length, &pPacket->TypeLen[0]);
+
+ if (PacketType == SK_PACKET_ALIVE) {
+ pAC->Rlmt.Port[PortNumber].TxHelloCts++;
+ }
+ }
+
+ return (pMb);
+} /* SkRlmtBuildPacket */
+
+
+/******************************************************************************
+ *
+ * SkRlmtBuildSpanningTreePacket - build spanning tree check packet
+ *
+ * Description:
+ * This routine sets up a BPDU packet for spanning tree check.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * NULL or pointer to RLMT mbuf
+ */
+RLMT_STATIC SK_MBUF *SkRlmtBuildSpanningTreePacket(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 PortNumber) /* Sending port */
+{
+ unsigned i;
+ SK_U16 Length;
+ SK_MBUF *pMb;
+ SK_SPTREE_PACKET *pSPacket;
+
+ if ((pMb = SkDrvAllocRlmtMbuf(pAC, IoC, SK_RLMT_MAX_PACKET_SIZE)) !=
+ NULL) {
+ pSPacket = (SK_SPTREE_PACKET*)pMb->pData;
+ for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
+ pSPacket->DstAddr[i] = BridgeMcAddr.a[i];
+ pSPacket->SrcAddr[i] =
+ pAC->Addr.Port[PortNumber].CurrentMacAddress.a[i];
+ }
+ pSPacket->DSap = SK_RLMT_SPT_DSAP;
+ pSPacket->SSap = SK_RLMT_SPT_SSAP;
+ pSPacket->Ctrl = SK_RLMT_SPT_CTRL;
+
+ pSPacket->ProtocolId[0] = SK_RLMT_SPT_PROTOCOL_ID0;
+ pSPacket->ProtocolId[1] = SK_RLMT_SPT_PROTOCOL_ID1;
+ pSPacket->ProtocolVersionId = SK_RLMT_SPT_PROTOCOL_VERSION_ID;
+ pSPacket->BpduType = SK_RLMT_SPT_BPDU_TYPE;
+ pSPacket->Flags = SK_RLMT_SPT_FLAGS;
+ pSPacket->RootId[0] = SK_RLMT_SPT_ROOT_ID0;
+ pSPacket->RootId[1] = SK_RLMT_SPT_ROOT_ID1;
+ pSPacket->RootPathCost[0] = SK_RLMT_SPT_ROOT_PATH_COST0;
+ pSPacket->RootPathCost[1] = SK_RLMT_SPT_ROOT_PATH_COST1;
+ pSPacket->RootPathCost[2] = SK_RLMT_SPT_ROOT_PATH_COST2;
+ pSPacket->RootPathCost[3] = SK_RLMT_SPT_ROOT_PATH_COST3;
+ pSPacket->BridgeId[0] = SK_RLMT_SPT_BRIDGE_ID0;
+ pSPacket->BridgeId[1] = SK_RLMT_SPT_BRIDGE_ID1;
+
+ /*
+ * Use logical MAC address as bridge ID and filter these packets
+ * on receive.
+ */
+ for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
+ pSPacket->BridgeId[i + 2] = pSPacket->RootId[i + 2] =
+ pAC->Addr.Net[pAC->Rlmt.Port[PortNumber].Net->NetNumber].
+ CurrentMacAddress.a[i];
+ }
+ pSPacket->PortId[0] = SK_RLMT_SPT_PORT_ID0;
+ pSPacket->PortId[1] = SK_RLMT_SPT_PORT_ID1;
+ pSPacket->MessageAge[0] = SK_RLMT_SPT_MSG_AGE0;
+ pSPacket->MessageAge[1] = SK_RLMT_SPT_MSG_AGE1;
+ pSPacket->MaxAge[0] = SK_RLMT_SPT_MAX_AGE0;
+ pSPacket->MaxAge[1] = SK_RLMT_SPT_MAX_AGE1;
+ pSPacket->HelloTime[0] = SK_RLMT_SPT_HELLO_TIME0;
+ pSPacket->HelloTime[1] = SK_RLMT_SPT_HELLO_TIME1;
+ pSPacket->ForwardDelay[0] = SK_RLMT_SPT_FWD_DELAY0;
+ pSPacket->ForwardDelay[1] = SK_RLMT_SPT_FWD_DELAY1;
+
+ Length = SK_RLMT_MAX_PACKET_SIZE; /* Or smaller. */
+ pMb->Length = Length;
+ pMb->PortIdx = PortNumber;
+ Length -= 14;
+ SK_U16_TO_NETWORK_ORDER(Length, &pSPacket->TypeLen[0]);
+
+ pAC->Rlmt.Port[PortNumber].TxSpHelloReqCts++;
+ }
+
+ return (pMb);
+} /* SkRlmtBuildSpanningTreePacket */
+
+
+/******************************************************************************
+ *
+ * SkRlmtSend - build and send check packets
+ *
+ * Description:
+ * Depending on the RLMT state and the checking state, several packets
+ * are sent through the indicated port.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * Nothing.
+ */
+RLMT_STATIC void SkRlmtSend(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 PortNumber) /* Sending port */
+{
+ unsigned j;
+ SK_EVPARA Para;
+ SK_RLMT_PORT *pRPort;
+
+ pRPort = &pAC->Rlmt.Port[PortNumber];
+ if (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) {
+ if (pRPort->CheckingState & (SK_RLMT_PCS_TX | SK_RLMT_PCS_RX)) {
+ /* Port is suspicious. Send the RLMT packet to the RLMT mc addr. */
+ if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber,
+ SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress,
+ &SkRlmtMcAddr)) != NULL) {
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ }
+ }
+ else {
+ /*
+ * Send a directed RLMT packet to all ports that are
+ * checked by the indicated port.
+ */
+ for (j = 0; j < pRPort->PortsChecked; j++) {
+ if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber,
+ SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress,
+ &pRPort->PortCheck[j].CheckAddr)) != NULL) {
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ }
+ }
+ }
+ }
+
+ if ((pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG) &&
+ (pAC->Rlmt.Port[PortNumber].Net->CheckingState & SK_RLMT_RCS_SEND_SEG)) {
+ /*
+ * Send a BPDU packet to make a connected switch tell us
+ * the correct root bridge.
+ */
+ if ((Para.pParaPtr =
+ SkRlmtBuildSpanningTreePacket(pAC, IoC, PortNumber)) != NULL) {
+ pAC->Rlmt.Port[PortNumber].Net->CheckingState &= ~SK_RLMT_RCS_SEND_SEG;
+ pRPort->RootIdSet = SK_FALSE;
+
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_TX,
+ ("SkRlmtSend: BPDU Packet on Port %u.\n", PortNumber))
+ }
+ }
+ return;
+} /* SkRlmtSend */
+
+
+/******************************************************************************
+ *
+ * SkRlmtPortReceives - check if port is (going) down and bring it up
+ *
+ * Description:
+ * This routine checks if a port who received a non-BPDU packet
+ * needs to go up or needs to be stopped going down.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * Nothing.
+ */
+RLMT_STATIC void SkRlmtPortReceives(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 PortNumber) /* Port to check */
+{
+ SK_RLMT_PORT *pRPort;
+ SK_EVPARA Para;
+
+ pRPort = &pAC->Rlmt.Port[PortNumber];
+ pRPort->PortNoRx = SK_FALSE;
+
+ if ((pRPort->PortState == SK_RLMT_PS_DOWN) &&
+ !(pRPort->CheckingState & SK_RLMT_PCS_TX)) {
+ /*
+ * Port is marked down (rx), but received a non-BPDU packet.
+ * Bring it up.
+ */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Received on PortDown.\n"))
+
+ pRPort->PortState = SK_RLMT_PS_GOING_UP;
+ pRPort->GuTimeStamp = SkOsGetTime(pAC);
+ Para.Para32[0] = PortNumber;
+ Para.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pRPort->UpTimer, SK_RLMT_PORTUP_TIM_VAL,
+ SKGE_RLMT, SK_RLMT_PORTUP_TIM, Para);
+ pRPort->CheckingState &= ~SK_RLMT_PCS_RX;
+ /* pAC->Rlmt.CheckSwitch = SK_TRUE; */
+ SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
+ } /* PortDown && !SuspectTx */
+ else if (pRPort->CheckingState & SK_RLMT_PCS_RX) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Stop bringing port down.\n"))
+ SkTimerStop(pAC, IoC, &pRPort->DownRxTimer);
+ pRPort->CheckingState &= ~SK_RLMT_PCS_RX;
+ /* pAC->Rlmt.CheckSwitch = SK_TRUE; */
+ SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
+ } /* PortGoingDown */
+
+ return;
+} /* SkRlmtPortReceives */
+
+
+/******************************************************************************
+ *
+ * SkRlmtPacketReceive - receive a packet for closer examination
+ *
+ * Description:
+ * This routine examines a packet more closely than SK_RLMT_LOOKAHEAD.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * Nothing.
+ */
+RLMT_STATIC void SkRlmtPacketReceive(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_MBUF *pMb) /* Received packet */
+{
+#ifdef xDEBUG
+ extern void DumpData(char *p, int size);
+#endif /* DEBUG */
+ int i;
+ unsigned j;
+ SK_U16 PacketType;
+ SK_U32 PortNumber;
+ SK_ADDR_PORT *pAPort;
+ SK_RLMT_PORT *pRPort;
+ SK_RLMT_PACKET *pRPacket;
+ SK_SPTREE_PACKET *pSPacket;
+ SK_EVPARA Para;
+
+ PortNumber = pMb->PortIdx;
+ pAPort = &pAC->Addr.Port[PortNumber];
+ pRPort = &pAC->Rlmt.Port[PortNumber];
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: PortNumber == %d.\n", PortNumber))
+
+ pRPacket = (SK_RLMT_PACKET*)pMb->pData;
+ pSPacket = (SK_SPTREE_PACKET*)pRPacket;
+
+#ifdef xDEBUG
+ DumpData((char *)pRPacket, 32);
+#endif /* DEBUG */
+
+ if ((pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot) != 0) {
+ SkRlmtPortReceives(pAC, IoC, PortNumber);
+ }
+
+ /* Check destination address. */
+
+ if (!SK_ADDR_EQUAL(pAPort->CurrentMacAddress.a, pRPacket->DstAddr) &&
+ !SK_ADDR_EQUAL(SkRlmtMcAddr.a, pRPacket->DstAddr) &&
+ !SK_ADDR_EQUAL(BridgeMcAddr.a, pRPacket->DstAddr)) {
+
+ /* Not sent to current MAC or registered MC address => Trash it. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Not for me.\n"))
+
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ return;
+ }
+ else if (SK_ADDR_EQUAL(pAPort->CurrentMacAddress.a, pRPacket->SrcAddr)) {
+
+ /*
+ * Was sent by same port (may happen during port switching
+ * or in case of duplicate MAC addresses).
+ */
+
+ /*
+ * Check for duplicate address here:
+ * If Packet.Random != My.Random => DupAddr.
+ */
+ for (i = 3; i >= 0; i--) {
+ if (pRPort->Random[i] != pRPacket->Random[i]) {
+ break;
+ }
+ }
+
+ /*
+ * CAUTION: Do not check for duplicate MAC address in RLMT Alive Reply
+ * packets (they have the LLC_COMMAND_RESPONSE_BIT set in
+ * pRPacket->SSap).
+ */
+ if (i >= 0 && pRPacket->DSap == SK_RLMT_DSAP &&
+ pRPacket->Ctrl == SK_RLMT_CTRL &&
+ pRPacket->SSap == SK_RLMT_SSAP &&
+ pRPacket->Indicator[0] == SK_RLMT_INDICATOR0 &&
+ pRPacket->Indicator[1] == SK_RLMT_INDICATOR1 &&
+ pRPacket->Indicator[2] == SK_RLMT_INDICATOR2 &&
+ pRPacket->Indicator[3] == SK_RLMT_INDICATOR3 &&
+ pRPacket->Indicator[4] == SK_RLMT_INDICATOR4 &&
+ pRPacket->Indicator[5] == SK_RLMT_INDICATOR5 &&
+ pRPacket->Indicator[6] == SK_RLMT_INDICATOR6) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Duplicate MAC Address.\n"))
+
+ /* Error Log entry. */
+ SK_ERR_LOG(pAC, SK_ERRCL_COMM, SKERR_RLMT_E006, SKERR_RLMT_E006_MSG);
+ }
+ else {
+ /* Simply trash it. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Sent by me.\n"))
+ }
+
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ return;
+ }
+
+ /* Check SuspectTx entries. */
+ if (pRPort->PortsSuspect > 0) {
+ for (j = 0; j < pRPort->PortsChecked; j++) {
+ if (pRPort->PortCheck[j].SuspectTx &&
+ SK_ADDR_EQUAL(
+ pRPacket->SrcAddr, pRPort->PortCheck[j].CheckAddr.a)) {
+ pRPort->PortCheck[j].SuspectTx = SK_FALSE;
+ pRPort->PortsSuspect--;
+ break;
+ }
+ }
+ }
+
+ /* Determine type of packet. */
+ if (pRPacket->DSap == SK_RLMT_DSAP &&
+ pRPacket->Ctrl == SK_RLMT_CTRL &&
+ (pRPacket->SSap & ~LLC_COMMAND_RESPONSE_BIT) == SK_RLMT_SSAP &&
+ pRPacket->Indicator[0] == SK_RLMT_INDICATOR0 &&
+ pRPacket->Indicator[1] == SK_RLMT_INDICATOR1 &&
+ pRPacket->Indicator[2] == SK_RLMT_INDICATOR2 &&
+ pRPacket->Indicator[3] == SK_RLMT_INDICATOR3 &&
+ pRPacket->Indicator[4] == SK_RLMT_INDICATOR4 &&
+ pRPacket->Indicator[5] == SK_RLMT_INDICATOR5 &&
+ pRPacket->Indicator[6] == SK_RLMT_INDICATOR6) {
+
+ /* It's an RLMT packet. */
+ PacketType = (SK_U16)((pRPacket->RlmtPacketType[0] << 8) |
+ pRPacket->RlmtPacketType[1]);
+
+ switch (PacketType) {
+ case SK_PACKET_ANNOUNCE: /* Not yet used. */
+#if 0
+ /* Build the check chain. */
+ SkRlmtBuildCheckChain(pAC);
+#endif /* 0 */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Announce.\n"))
+
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ break;
+
+ case SK_PACKET_ALIVE:
+ if (pRPacket->SSap & LLC_COMMAND_RESPONSE_BIT) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Alive Reply.\n"))
+
+ if (!(pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_LLC) ||
+ SK_ADDR_EQUAL(
+ pRPacket->DstAddr, pAPort->CurrentMacAddress.a)) {
+ /* Obviously we could send something. */
+ if (pRPort->CheckingState & SK_RLMT_PCS_TX) {
+ pRPort->CheckingState &= ~SK_RLMT_PCS_TX;
+ SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
+ }
+
+ if ((pRPort->PortState == SK_RLMT_PS_DOWN) &&
+ !(pRPort->CheckingState & SK_RLMT_PCS_RX)) {
+ pRPort->PortState = SK_RLMT_PS_GOING_UP;
+ pRPort->GuTimeStamp = SkOsGetTime(pAC);
+
+ SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
+
+ Para.Para32[0] = PortNumber;
+ Para.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pRPort->UpTimer,
+ SK_RLMT_PORTUP_TIM_VAL, SKGE_RLMT,
+ SK_RLMT_PORTUP_TIM, Para);
+ }
+ }
+
+ /* Mark sending port as alive? */
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ }
+ else { /* Alive Request Packet. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Alive Request.\n"))
+
+ pRPort->RxHelloCts++;
+
+ /* Answer. */
+ for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
+ pRPacket->DstAddr[i] = pRPacket->SrcAddr[i];
+ pRPacket->SrcAddr[i] =
+ pAC->Addr.Port[PortNumber].CurrentMacAddress.a[i];
+ }
+ pRPacket->SSap |= LLC_COMMAND_RESPONSE_BIT;
+
+ Para.pParaPtr = pMb;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ }
+ break;
+
+ case SK_PACKET_CHECK_TX:
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Check your tx line.\n"))
+
+ /* A port checking us requests us to check our tx line. */
+ pRPort->CheckingState |= SK_RLMT_PCS_TX;
+
+ /* Start PortDownTx timer. */
+ Para.Para32[0] = PortNumber;
+ Para.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pRPort->DownTxTimer,
+ SK_RLMT_PORTDOWN_TIM_VAL, SKGE_RLMT,
+ SK_RLMT_PORTDOWN_TX_TIM, Para);
+
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+
+ if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber,
+ SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress,
+ &SkRlmtMcAddr)) != NULL) {
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ }
+ break;
+
+ case SK_PACKET_ADDR_CHANGED:
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Address Change.\n"))
+
+ /* Build the check chain. */
+ SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber);
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ break;
+
+ default:
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Unknown RLMT packet.\n"))
+
+ /* RA;:;: ??? */
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ }
+ }
+ else if (pSPacket->DSap == SK_RLMT_SPT_DSAP &&
+ pSPacket->Ctrl == SK_RLMT_SPT_CTRL &&
+ (pSPacket->SSap & ~LLC_COMMAND_RESPONSE_BIT) == SK_RLMT_SPT_SSAP) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: BPDU Packet.\n"))
+
+ /* Spanning Tree packet. */
+ pRPort->RxSpHelloCts++;
+
+ if (!SK_ADDR_EQUAL(&pSPacket->RootId[2], &pAC->Addr.Net[pAC->Rlmt.
+ Port[PortNumber].Net->NetNumber].CurrentMacAddress.a[0])) {
+ /*
+ * Check segmentation if a new root bridge is set and
+ * the segmentation check is not currently running.
+ */
+ if (!SK_ADDR_EQUAL(&pSPacket->RootId[2], &pRPort->Root.Id[2]) &&
+ (pAC->Rlmt.Port[PortNumber].Net->LinksUp > 1) &&
+ (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG)
+ != 0 && (pAC->Rlmt.Port[PortNumber].Net->CheckingState &
+ SK_RLMT_RCS_SEG) == 0) {
+ pAC->Rlmt.Port[PortNumber].Net->CheckingState |=
+ SK_RLMT_RCS_START_SEG | SK_RLMT_RCS_SEND_SEG;
+ }
+
+ /* Store tree view of this port. */
+ for (i = 0; i < 8; i++) {
+ pRPort->Root.Id[i] = pSPacket->RootId[i];
+ }
+ pRPort->RootIdSet = SK_TRUE;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_DUMP,
+ ("Root ID %d: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
+ PortNumber,
+ pRPort->Root.Id[0], pRPort->Root.Id[1],
+ pRPort->Root.Id[2], pRPort->Root.Id[3],
+ pRPort->Root.Id[4], pRPort->Root.Id[5],
+ pRPort->Root.Id[6], pRPort->Root.Id[7]))
+ }
+
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ if ((pAC->Rlmt.Port[PortNumber].Net->CheckingState &
+ SK_RLMT_RCS_REPORT_SEG) != 0) {
+ SkRlmtCheckSeg(pAC, IoC, pAC->Rlmt.Port[PortNumber].Net->NetNumber);
+ }
+ }
+ else {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
+ ("SkRlmtPacketReceive: Unknown Packet Type.\n"))
+
+ /* Unknown packet. */
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ }
+ return;
+} /* SkRlmtPacketReceive */
+
+
+/******************************************************************************
+ *
+ * SkRlmtCheckPort - check if a port works
+ *
+ * Description:
+ * This routine checks if a port whose link is up received something
+ * and if it seems to transmit successfully.
+ *
+ * # PortState: PsInit, PsLinkDown, PsDown, PsGoingUp, PsUp
+ * # PortCheckingState (Bitfield): ChkTx, ChkRx, ChkSeg
+ * # RlmtCheckingState (Bitfield): ChkSeg, StartChkSeg, ReportSeg
+ *
+ * if (Rx - RxBpdu == 0) { # No rx.
+ * if (state == PsUp) {
+ * PortCheckingState |= ChkRx
+ * }
+ * if (ModeCheckSeg && (Timeout ==
+ * TO_SHORTEN(RLMT_DEFAULT_TIMEOUT))) {
+ * RlmtCheckingState |= ChkSeg)
+ * PortCheckingState |= ChkSeg
+ * }
+ * NewTimeout = TO_SHORTEN(Timeout)
+ * if (NewTimeout < RLMT_MIN_TIMEOUT) {
+ * NewTimeout = RLMT_MIN_TIMEOUT
+ * PortState = PsDown
+ * ...
+ * }
+ * }
+ * else { # something was received
+ * # Set counter to 0 at LinkDown?
+ * # No - rx may be reported after LinkDown ???
+ * PortCheckingState &= ~ChkRx
+ * NewTimeout = RLMT_DEFAULT_TIMEOUT
+ * if (RxAck == 0) {
+ * possible reasons:
+ * is my tx line bad? --
+ * send RLMT multicast and report
+ * back internally? (only possible
+ * between ports on same adapter)
+ * }
+ * if (RxChk == 0) {
+ * possible reasons:
+ * - tx line of port set to check me
+ * maybe bad
+ * - no other port/adapter available or set
+ * to check me
+ * - adapter checking me has a longer
+ * timeout
+ * ??? anything that can be done here?
+ * }
+ * }
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * New timeout value.
+ */
+RLMT_STATIC SK_U32 SkRlmtCheckPort(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 PortNumber) /* Port to check */
+{
+ unsigned i;
+ SK_U32 NewTimeout;
+ SK_RLMT_PORT *pRPort;
+ SK_EVPARA Para;
+
+ pRPort = &pAC->Rlmt.Port[PortNumber];
+
+ if ((pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot) == 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SkRlmtCheckPort %d: No (%d) receives in last time slot.\n",
+ PortNumber, pRPort->PacketsPerTimeSlot))
+
+ /*
+ * Check segmentation if there was no receive at least twice
+ * in a row (PortNoRx is already set) and the segmentation
+ * check is not currently running.
+ */
+
+ if (pRPort->PortNoRx && (pAC->Rlmt.Port[PortNumber].Net->LinksUp > 1) &&
+ (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG) &&
+ !(pAC->Rlmt.Port[PortNumber].Net->CheckingState & SK_RLMT_RCS_SEG)) {
+ pAC->Rlmt.Port[PortNumber].Net->CheckingState |=
+ SK_RLMT_RCS_START_SEG | SK_RLMT_RCS_SEND_SEG;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SkRlmtCheckPort: PortsSuspect %d, PcsRx %d.\n",
+ pRPort->PortsSuspect, pRPort->CheckingState & SK_RLMT_PCS_RX))
+
+ if (pRPort->PortState != SK_RLMT_PS_DOWN) {
+ NewTimeout = TO_SHORTEN(pAC->Rlmt.Port[PortNumber].Net->TimeoutValue);
+ if (NewTimeout < SK_RLMT_MIN_TO_VAL) {
+ NewTimeout = SK_RLMT_MIN_TO_VAL;
+ }
+
+ if (!(pRPort->CheckingState & SK_RLMT_PCS_RX)) {
+ Para.Para32[0] = PortNumber;
+ pRPort->CheckingState |= SK_RLMT_PCS_RX;
+
+ /*
+ * What shall we do if the port checked by this one receives
+ * our request frames? What's bad - our rx line or his tx line?
+ */
+ Para.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pRPort->DownRxTimer,
+ SK_RLMT_PORTDOWN_TIM_VAL, SKGE_RLMT,
+ SK_RLMT_PORTDOWN_RX_TIM, Para);
+
+ for (i = 0; i < pRPort->PortsChecked; i++) {
+ if (pRPort->PortCheck[i].SuspectTx) {
+ continue;
+ }
+ pRPort->PortCheck[i].SuspectTx = SK_TRUE;
+ pRPort->PortsSuspect++;
+ if ((Para.pParaPtr =
+ SkRlmtBuildPacket(pAC, IoC, PortNumber, SK_PACKET_CHECK_TX,
+ &pAC->Addr.Port[PortNumber].CurrentMacAddress,
+ &pRPort->PortCheck[i].CheckAddr)) != NULL) {
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ }
+ }
+ }
+ }
+ else { /* PortDown -- or all partners suspect. */
+ NewTimeout = SK_RLMT_DEF_TO_VAL;
+ }
+ pRPort->PortNoRx = SK_TRUE;
+ }
+ else { /* A non-BPDU packet was received. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SkRlmtCheckPort %d: %d (%d) receives in last time slot.\n",
+ PortNumber,
+ pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot,
+ pRPort->PacketsPerTimeSlot))
+
+ SkRlmtPortReceives(pAC, IoC, PortNumber);
+ if (pAC->Rlmt.CheckSwitch) {
+ SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
+ }
+
+ NewTimeout = SK_RLMT_DEF_TO_VAL;
+ }
+
+ return (NewTimeout);
+} /* SkRlmtCheckPort */
+
+
+/******************************************************************************
+ *
+ * SkRlmtSelectBcRx - select new active port, criteria 1 (CLP)
+ *
+ * Description:
+ * This routine selects the port that received a broadcast frame
+ * substantially later than all other ports.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * SK_BOOL
+ */
+RLMT_STATIC SK_BOOL SkRlmtSelectBcRx(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Active, /* Active port */
+SK_U32 PrefPort, /* Preferred port */
+SK_U32 *pSelect) /* New active port */
+{
+ SK_U64 BcTimeStamp;
+ SK_U32 i;
+ SK_BOOL PortFound;
+
+ BcTimeStamp = 0; /* Not totally necessary, but feeling better. */
+ PortFound = SK_FALSE;
+
+ /* Select port with the latest TimeStamp. */
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("TimeStamp Port %d (Down: %d, NoRx: %d): %08x %08x.\n",
+ i,
+ pAC->Rlmt.Port[i].PortDown, pAC->Rlmt.Port[i].PortNoRx,
+ *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_HI32),
+ *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_LO32)))
+
+ if (!pAC->Rlmt.Port[i].PortDown && !pAC->Rlmt.Port[i].PortNoRx) {
+ if (!PortFound || pAC->Rlmt.Port[i].BcTimeStamp > BcTimeStamp) {
+ BcTimeStamp = pAC->Rlmt.Port[i].BcTimeStamp;
+ *pSelect = i;
+ PortFound = SK_TRUE;
+ }
+ }
+ }
+
+ if (PortFound) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Port %d received the last broadcast.\n", *pSelect))
+
+ /* Look if another port's time stamp is similar. */
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ if (i == *pSelect) {
+ continue;
+ }
+ if (!pAC->Rlmt.Port[i].PortDown && !pAC->Rlmt.Port[i].PortNoRx &&
+ (pAC->Rlmt.Port[i].BcTimeStamp >
+ BcTimeStamp - SK_RLMT_BC_DELTA ||
+ pAC->Rlmt.Port[i].BcTimeStamp +
+ SK_RLMT_BC_DELTA > BcTimeStamp)) {
+ PortFound = SK_FALSE;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Port %d received a broadcast at a similar time.\n", i))
+ break;
+ }
+ }
+ }
+
+#ifdef DEBUG
+ if (PortFound) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SELECT_BCRX found Port %d receiving the substantially "
+ "latest broadcast (%u).\n",
+ *pSelect,
+ BcTimeStamp - pAC->Rlmt.Port[1 - *pSelect].BcTimeStamp))
+ }
+#endif /* DEBUG */
+
+ return (PortFound);
+} /* SkRlmtSelectBcRx */
+
+
+/******************************************************************************
+ *
+ * SkRlmtSelectNotSuspect - select new active port, criteria 2 (CLP)
+ *
+ * Description:
+ * This routine selects a good port (it is PortUp && !SuspectRx).
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * SK_BOOL
+ */
+RLMT_STATIC SK_BOOL SkRlmtSelectNotSuspect(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Active, /* Active port */
+SK_U32 PrefPort, /* Preferred port */
+SK_U32 *pSelect) /* New active port */
+{
+ SK_U32 i;
+ SK_BOOL PortFound;
+
+ PortFound = SK_FALSE;
+
+ /* Select first port that is PortUp && !SuspectRx. */
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ if (!pAC->Rlmt.Port[i].PortDown &&
+ !(pAC->Rlmt.Port[i].CheckingState & SK_RLMT_PCS_RX)) {
+ *pSelect = i;
+ if (!pAC->Rlmt.Port[Active].PortDown &&
+ !(pAC->Rlmt.Port[Active].CheckingState & SK_RLMT_PCS_RX)) {
+ *pSelect = Active;
+ }
+ if (!pAC->Rlmt.Port[PrefPort].PortDown &&
+ !(pAC->Rlmt.Port[PrefPort].CheckingState & SK_RLMT_PCS_RX)) {
+ *pSelect = PrefPort;
+ }
+ PortFound = SK_TRUE;
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SELECT_NOTSUSPECT found Port %d up and not check RX.\n",
+ *pSelect))
+ break;
+ }
+ }
+ return (PortFound);
+} /* SkRlmtSelectNotSuspect */
+
+
+/******************************************************************************
+ *
+ * SkRlmtSelectUp - select new active port, criteria 3, 4 (CLP)
+ *
+ * Description:
+ * This routine selects a port that is up.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * SK_BOOL
+ */
+RLMT_STATIC SK_BOOL SkRlmtSelectUp(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Active, /* Active port */
+SK_U32 PrefPort, /* Preferred port */
+SK_U32 *pSelect, /* New active port */
+SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */
+{
+ SK_U32 i;
+ SK_BOOL PortFound;
+
+ PortFound = SK_FALSE;
+
+ /* Select first port that is PortUp. */
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_UP &&
+ pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
+ *pSelect = i;
+ if (pAC->Rlmt.Port[Active].PortState == SK_RLMT_PS_UP &&
+ pAC->GIni.GP[Active].PAutoNegFail != AutoNegDone) {
+ *pSelect = Active;
+ }
+ if (pAC->Rlmt.Port[PrefPort].PortState == SK_RLMT_PS_UP &&
+ pAC->GIni.GP[PrefPort].PAutoNegFail != AutoNegDone) {
+ *pSelect = PrefPort;
+ }
+ PortFound = SK_TRUE;
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SELECT_UP found Port %d up.\n", *pSelect))
+ break;
+ }
+ }
+ return (PortFound);
+} /* SkRlmtSelectUp */
+
+
+/******************************************************************************
+ *
+ * SkRlmtSelectGoingUp - select new active port, criteria 5, 6 (CLP)
+ *
+ * Description:
+ * This routine selects the port that is going up for the longest time.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * SK_BOOL
+ */
+RLMT_STATIC SK_BOOL SkRlmtSelectGoingUp(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Active, /* Active port */
+SK_U32 PrefPort, /* Preferred port */
+SK_U32 *pSelect, /* New active port */
+SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */
+{
+ SK_U64 GuTimeStamp;
+ SK_U32 i;
+ SK_BOOL PortFound;
+
+ GuTimeStamp = 0;
+ PortFound = SK_FALSE;
+
+ /* Select port that is PortGoingUp for the longest time. */
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_GOING_UP &&
+ pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
+ GuTimeStamp = pAC->Rlmt.Port[i].GuTimeStamp;
+ *pSelect = i;
+ PortFound = SK_TRUE;
+ break;
+ }
+ }
+
+ if (!PortFound) {
+ return (SK_FALSE);
+ }
+
+ for (i = *pSelect + 1; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_GOING_UP &&
+ pAC->Rlmt.Port[i].GuTimeStamp < GuTimeStamp &&
+ pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
+ GuTimeStamp = pAC->Rlmt.Port[i].GuTimeStamp;
+ *pSelect = i;
+ }
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SELECT_GOINGUP found Port %d going up.\n", *pSelect))
+ return (SK_TRUE);
+} /* SkRlmtSelectGoingUp */
+
+
+/******************************************************************************
+ *
+ * SkRlmtSelectDown - select new active port, criteria 7, 8 (CLP)
+ *
+ * Description:
+ * This routine selects a port that is down.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * SK_BOOL
+ */
+RLMT_STATIC SK_BOOL SkRlmtSelectDown(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Active, /* Active port */
+SK_U32 PrefPort, /* Preferred port */
+SK_U32 *pSelect, /* New active port */
+SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */
+{
+ SK_U32 i;
+ SK_BOOL PortFound;
+
+ PortFound = SK_FALSE;
+
+ /* Select first port that is PortDown. */
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_DOWN &&
+ pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
+ *pSelect = i;
+ if (pAC->Rlmt.Port[Active].PortState == SK_RLMT_PS_DOWN &&
+ pAC->GIni.GP[Active].PAutoNegFail != AutoNegDone) {
+ *pSelect = Active;
+ }
+ if (pAC->Rlmt.Port[PrefPort].PortState == SK_RLMT_PS_DOWN &&
+ pAC->GIni.GP[PrefPort].PAutoNegFail != AutoNegDone) {
+ *pSelect = PrefPort;
+ }
+ PortFound = SK_TRUE;
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SELECT_DOWN found Port %d down.\n", *pSelect))
+ break;
+ }
+ }
+ return (PortFound);
+} /* SkRlmtSelectDown */
+
+
+/******************************************************************************
+ *
+ * SkRlmtCheckSwitch - select new active port and switch to it
+ *
+ * Description:
+ * This routine decides which port should be the active one and queues
+ * port switching if necessary.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * Nothing.
+ */
+RLMT_STATIC void SkRlmtCheckSwitch(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 NetIdx) /* Net index */
+{
+ SK_EVPARA Para;
+ SK_U32 Active;
+ SK_U32 PrefPort;
+ SK_U32 i;
+ SK_BOOL PortFound;
+
+ Active = pAC->Rlmt.Net[NetIdx].ActivePort; /* Index of active port. */
+ PrefPort = pAC->Rlmt.Net[NetIdx].PrefPort; /* Index of preferred port. */
+ PortFound = SK_FALSE;
+ pAC->Rlmt.CheckSwitch = SK_FALSE;
+
+#if 0 /* RW 2001/10/18 - active port becomes always prefered one */
+ if (pAC->Rlmt.Net[NetIdx].Preference == 0xFFFFFFFF) { /* Automatic */
+ /* disable auto-fail back */
+ PrefPort = Active;
+ }
+#endif
+
+ if (pAC->Rlmt.Net[NetIdx].LinksUp == 0) {
+ /* Last link went down - shut down the net. */
+ pAC->Rlmt.Net[NetIdx].RlmtState = SK_RLMT_RS_NET_DOWN;
+ Para.Para32[0] = SK_RLMT_NET_DOWN_TEMP;
+ Para.Para32[1] = NetIdx;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_DOWN, Para);
+
+ Para.Para32[0] = pAC->Rlmt.Net[NetIdx].
+ Port[pAC->Rlmt.Net[NetIdx].ActivePort]->PortNumber;
+ Para.Para32[1] = NetIdx;
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_DOWN, Para);
+ return;
+ } /* pAC->Rlmt.LinksUp == 0 */
+ else if (pAC->Rlmt.Net[NetIdx].LinksUp == 1 &&
+ pAC->Rlmt.Net[NetIdx].RlmtState == SK_RLMT_RS_NET_DOWN) {
+ /* First link came up - get the net up. */
+ pAC->Rlmt.Net[NetIdx].RlmtState = SK_RLMT_RS_NET_UP;
+
+ /*
+ * If pAC->Rlmt.ActivePort != Para.Para32[0],
+ * the DRV switches to the port that came up.
+ */
+ for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) {
+ if (!pAC->Rlmt.Net[NetIdx].Port[i]->LinkDown) {
+ if (!pAC->Rlmt.Net[NetIdx].Port[Active]->LinkDown) {
+ i = Active;
+ }
+ if (!pAC->Rlmt.Net[NetIdx].Port[PrefPort]->LinkDown) {
+ i = PrefPort;
+ }
+ PortFound = SK_TRUE;
+ break;
+ }
+ }
+
+ if (PortFound) {
+ Para.Para32[0] = pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber;
+ Para.Para32[1] = NetIdx;
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_UP, Para);
+
+ pAC->Rlmt.Net[NetIdx].ActivePort = i;
+ Para.Para32[0] = pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber;
+ Para.Para32[1] = NetIdx;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_UP, Para);
+
+ if ((pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_TRANSPARENT) == 0 &&
+ (Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC,
+ pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber,
+ SK_PACKET_ANNOUNCE, &pAC->Addr.Net[NetIdx].
+ CurrentMacAddress, &SkRlmtMcAddr)) != NULL) {
+ /*
+ * Send announce packet to RLMT multicast address to force
+ * switches to learn the new location of the logical MAC address.
+ */
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ }
+ }
+ else {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E007, SKERR_RLMT_E007_MSG);
+ }
+
+ return;
+ } /* LinksUp == 1 && RlmtState == SK_RLMT_RS_NET_DOWN */
+ else { /* Cannot be reached in dual-net mode. */
+ Para.Para32[0] = Active;
+
+ /*
+ * Preselection:
+ * If RLMT Mode != CheckLinkState
+ * select port that received a broadcast frame substantially later
+ * than all other ports
+ * else select first port that is not SuspectRx
+ * else select first port that is PortUp
+ * else select port that is PortGoingUp for the longest time
+ * else select first port that is PortDown
+ * else stop.
+ *
+ * For the preselected port:
+ * If ActivePort is equal in quality, select ActivePort.
+ *
+ * If PrefPort is equal in quality, select PrefPort.
+ *
+ * If ActivePort != SelectedPort,
+ * If old ActivePort is LinkDown,
+ * SwitchHard
+ * else
+ * SwitchSoft
+ */
+ /* check of ChgBcPrio flag added */
+ if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) &&
+ (!pAC->Rlmt.Net[0].ChgBcPrio)) {
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectBcRx(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1]);
+ }
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectNotSuspect(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1]);
+ }
+ } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */
+
+ /* with changed priority for last broadcast received */
+ if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) &&
+ (pAC->Rlmt.Net[0].ChgBcPrio)) {
+ if (!PortFound) {
+ PortFound = SkRlmtSelectNotSuspect(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1]);
+ }
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectBcRx(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1]);
+ }
+ } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectUp(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS);
+ }
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectUp(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED);
+ }
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectGoingUp(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS);
+ }
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectGoingUp(
+ pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED);
+ }
+
+ if (pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) {
+ if (!PortFound) {
+ PortFound = SkRlmtSelectDown(pAC, IoC,
+ Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS);
+ }
+
+ if (!PortFound) {
+ PortFound = SkRlmtSelectDown(pAC, IoC,
+ Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED);
+ }
+ } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */
+
+ if (PortFound) {
+
+ if (Para.Para32[1] != Active) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Active: %d, Para1: %d.\n", Active, Para.Para32[1]))
+ pAC->Rlmt.Net[NetIdx].ActivePort = Para.Para32[1];
+ Para.Para32[0] = pAC->Rlmt.Net[NetIdx].
+ Port[Para.Para32[0]]->PortNumber;
+ Para.Para32[1] = pAC->Rlmt.Net[NetIdx].
+ Port[Para.Para32[1]]->PortNumber;
+ SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[1], SK_LED_ACTIVE);
+ if (pAC->Rlmt.Port[Active].LinkDown) {
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_HARD, Para);
+ }
+ else {
+ SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_STANDBY);
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_SOFT, Para);
+ }
+ Para.Para32[1] = NetIdx;
+ Para.Para32[0] =
+ pAC->Rlmt.Net[NetIdx].Port[Para.Para32[0]]->PortNumber;
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_DOWN, Para);
+ Para.Para32[0] = pAC->Rlmt.Net[NetIdx].
+ Port[pAC->Rlmt.Net[NetIdx].ActivePort]->PortNumber;
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_UP, Para);
+ if ((pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_TRANSPARENT) == 0 &&
+ (Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, Para.Para32[0],
+ SK_PACKET_ANNOUNCE, &pAC->Addr.Net[NetIdx].CurrentMacAddress,
+ &SkRlmtMcAddr)) != NULL) {
+ /*
+ * Send announce packet to RLMT multicast address to force
+ * switches to learn the new location of the logical
+ * MAC address.
+ */
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
+ } /* (Para.pParaPtr = SkRlmtBuildPacket(...)) != NULL */
+ } /* Para.Para32[1] != Active */
+ } /* PortFound */
+ else {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E004, SKERR_RLMT_E004_MSG);
+ }
+ } /* LinksUp > 1 || LinksUp == 1 && RlmtState != SK_RLMT_RS_NET_DOWN */
+ return;
+} /* SkRlmtCheckSwitch */
+
+
+/******************************************************************************
+ *
+ * SkRlmtCheckSeg - Report if segmentation is detected
+ *
+ * Description:
+ * This routine checks if the ports see different root bridges and reports
+ * segmentation in such a case.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * Nothing.
+ */
+RLMT_STATIC void SkRlmtCheckSeg(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 NetIdx) /* Net number */
+{
+ SK_EVPARA Para;
+ SK_RLMT_NET *pNet;
+ SK_U32 i, j;
+ SK_BOOL Equal;
+
+ pNet = &pAC->Rlmt.Net[NetIdx];
+ pNet->RootIdSet = SK_FALSE;
+ Equal = SK_TRUE;
+
+ for (i = 0; i < pNet->NumPorts; i++) {
+ if (pNet->Port[i]->LinkDown || !pNet->Port[i]->RootIdSet) {
+ continue;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_DUMP,
+ ("Root ID %d: %02x %02x %02x %02x %02x %02x %02x %02x.\n", i,
+ pNet->Port[i]->Root.Id[0], pNet->Port[i]->Root.Id[1],
+ pNet->Port[i]->Root.Id[2], pNet->Port[i]->Root.Id[3],
+ pNet->Port[i]->Root.Id[4], pNet->Port[i]->Root.Id[5],
+ pNet->Port[i]->Root.Id[6], pNet->Port[i]->Root.Id[7]))
+
+ if (!pNet->RootIdSet) {
+ pNet->Root = pNet->Port[i]->Root;
+ pNet->RootIdSet = SK_TRUE;
+ continue;
+ }
+
+ for (j = 0; j < 8; j ++) {
+ Equal &= pNet->Port[i]->Root.Id[j] == pNet->Root.Id[j];
+ if (!Equal) {
+ break;
+ }
+ }
+
+ if (!Equal) {
+ SK_ERR_LOG(pAC, SK_ERRCL_COMM, SKERR_RLMT_E005, SKERR_RLMT_E005_MSG);
+ Para.Para32[0] = NetIdx;
+ Para.Para32[1] = (SK_U32)-1;
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SEGMENTATION, Para);
+
+ pNet->CheckingState &= ~SK_RLMT_RCS_REPORT_SEG;
+
+ /* 2000-03-06 RA: New. */
+ Para.Para32[0] = NetIdx;
+ Para.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pNet->SegTimer, SK_RLMT_SEG_TO_VAL,
+ SKGE_RLMT, SK_RLMT_SEG_TIM, Para);
+ break;
+ }
+ } /* for (i = 0; i < pNet->NumPorts; i++) */
+
+ /* 2000-03-06 RA: Moved here. */
+ /* Segmentation check not running anymore. */
+ pNet->CheckingState &= ~SK_RLMT_RCS_SEG;
+
+} /* SkRlmtCheckSeg */
+
+
+/******************************************************************************
+ *
+ * SkRlmtPortStart - initialize port variables and start port
+ *
+ * Description:
+ * This routine initializes a port's variables and issues a PORT_START
+ * to the HWAC module. This handles retries if the start fails or the
+ * link eventually goes down.
+ *
+ * Context:
+ * runtime, pageable?
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtPortStart(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 PortNumber) /* Port number */
+{
+ SK_EVPARA Para;
+
+ pAC->Rlmt.Port[PortNumber].PortState = SK_RLMT_PS_LINK_DOWN;
+ pAC->Rlmt.Port[PortNumber].PortStarted = SK_TRUE;
+ pAC->Rlmt.Port[PortNumber].LinkDown = SK_TRUE;
+ pAC->Rlmt.Port[PortNumber].PortDown = SK_TRUE;
+ pAC->Rlmt.Port[PortNumber].CheckingState = 0;
+ pAC->Rlmt.Port[PortNumber].RootIdSet = SK_FALSE;
+ Para.Para32[0] = PortNumber;
+ Para.Para32[1] = (SK_U32)-1;
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
+} /* SkRlmtPortStart */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtPortStartTim - PORT_START_TIM
+ *
+ * Description:
+ * This routine handles PORT_START_TIM events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtPortStartTim(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
+{
+ SK_U32 i;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTSTART_TIMEOUT Port %d Event BEGIN.\n", Para.Para32[0]))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTSTART_TIMEOUT Event EMPTY.\n"))
+ return;
+ }
+
+ /*
+ * Used to start non-preferred ports if the preferred one
+ * does not come up.
+ * This timeout needs only be set when starting the first
+ * (preferred) port.
+ */
+ if (pAC->Rlmt.Port[Para.Para32[0]].LinkDown) {
+ /* PORT_START failed. */
+ for (i = 0; i < pAC->Rlmt.Port[Para.Para32[0]].Net->NumPorts; i++) {
+ if (!pAC->Rlmt.Port[Para.Para32[0]].Net->Port[i]->PortStarted) {
+ SkRlmtPortStart(pAC, IoC,
+ pAC->Rlmt.Port[Para.Para32[0]].Net->Port[i]->PortNumber);
+ }
+ }
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTSTART_TIMEOUT Event END.\n"))
+} /* SkRlmtEvtPortStartTim */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtLinkUp - LINK_UP
+ *
+ * Description:
+ * This routine handles LLINK_UP events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtLinkUp(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 Undefined */
+{
+ SK_U32 i;
+ SK_RLMT_PORT *pRPort;
+ SK_EVPARA Para2;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_LINK_UP Port %d Event BEGIN.\n", Para.Para32[0]))
+
+ pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
+ if (!pRPort->PortStarted) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E008, SKERR_RLMT_E008_MSG);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_LINK_UP Event EMPTY.\n"))
+ return;
+ }
+
+ if (!pRPort->LinkDown) {
+ /* RA;:;: Any better solution? */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_LINK_UP Event EMPTY.\n"))
+ return;
+ }
+
+ SkTimerStop(pAC, IoC, &pRPort->UpTimer);
+ SkTimerStop(pAC, IoC, &pRPort->DownRxTimer);
+ SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
+
+ /* Do something if timer already fired? */
+
+ pRPort->LinkDown = SK_FALSE;
+ pRPort->PortState = SK_RLMT_PS_GOING_UP;
+ pRPort->GuTimeStamp = SkOsGetTime(pAC);
+ pRPort->BcTimeStamp = 0;
+ pRPort->Net->LinksUp++;
+ if (pRPort->Net->LinksUp == 1) {
+ SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_ACTIVE);
+ }
+ else {
+ SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_STANDBY);
+ }
+
+ for (i = 0; i < pRPort->Net->NumPorts; i++) {
+ if (!pRPort->Net->Port[i]->PortStarted) {
+ SkRlmtPortStart(pAC, IoC, pRPort->Net->Port[i]->PortNumber);
+ }
+ }
+
+ SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
+
+ if (pRPort->Net->LinksUp >= 2) {
+ if (pRPort->Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) {
+ /* Build the check chain. */
+ SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber);
+ }
+ }
+
+ /* If the first link comes up, start the periodical RLMT timeout. */
+ if (pRPort->Net->NumPorts > 1 && pRPort->Net->LinksUp == 1 &&
+ (pRPort->Net->RlmtMode & SK_RLMT_CHECK_OTHERS) != 0) {
+ Para2.Para32[0] = pRPort->Net->NetNumber;
+ Para2.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pRPort->Net->LocTimer,
+ pRPort->Net->TimeoutValue, SKGE_RLMT, SK_RLMT_TIM, Para2);
+ }
+
+ Para2 = Para;
+ Para2.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pRPort->UpTimer, SK_RLMT_PORTUP_TIM_VAL,
+ SKGE_RLMT, SK_RLMT_PORTUP_TIM, Para2);
+
+ /* Later: if (pAC->Rlmt.RlmtMode & SK_RLMT_CHECK_LOC_LINK) && */
+ if ((pRPort->Net->RlmtMode & SK_RLMT_TRANSPARENT) == 0 &&
+ (pRPort->Net->RlmtMode & SK_RLMT_CHECK_LINK) != 0 &&
+ (Para2.pParaPtr =
+ SkRlmtBuildPacket(pAC, IoC, Para.Para32[0], SK_PACKET_ANNOUNCE,
+ &pAC->Addr.Port[Para.Para32[0]].CurrentMacAddress, &SkRlmtMcAddr)
+ ) != NULL) {
+ /* Send "new" packet to RLMT multicast address. */
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2);
+ }
+
+ if (pRPort->Net->RlmtMode & SK_RLMT_CHECK_SEG) {
+ if ((Para2.pParaPtr =
+ SkRlmtBuildSpanningTreePacket(pAC, IoC, Para.Para32[0])) != NULL) {
+ pAC->Rlmt.Port[Para.Para32[0]].RootIdSet = SK_FALSE;
+ pRPort->Net->CheckingState |=
+ SK_RLMT_RCS_SEG | SK_RLMT_RCS_REPORT_SEG;
+
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2);
+
+ Para.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pRPort->Net->SegTimer,
+ SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para);
+ }
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_LINK_UP Event END.\n"))
+} /* SkRlmtEvtLinkUp */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtPortUpTim - PORT_UP_TIM
+ *
+ * Description:
+ * This routine handles PORT_UP_TIM events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtPortUpTim(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
+{
+ SK_RLMT_PORT *pRPort;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTUP_TIM Port %d Event BEGIN.\n", Para.Para32[0]))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTUP_TIM Event EMPTY.\n"))
+ return;
+ }
+
+ pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
+ if (pRPort->LinkDown || (pRPort->PortState == SK_RLMT_PS_UP)) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTUP_TIM Port %d Event EMPTY.\n", Para.Para32[0]))
+ return;
+ }
+
+ pRPort->PortDown = SK_FALSE;
+ pRPort->PortState = SK_RLMT_PS_UP;
+ pRPort->Net->PortsUp++;
+ if (pRPort->Net->RlmtState != SK_RLMT_RS_INIT) {
+ if (pAC->Rlmt.NumNets <= 1) {
+ SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
+ }
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_PORT_UP, Para);
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTUP_TIM Event END.\n"))
+} /* SkRlmtEvtPortUpTim */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtPortDownTim - PORT_DOWN_*
+ *
+ * Description:
+ * This routine handles PORT_DOWN_* events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtPortDownX(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Event, /* Event code */
+SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
+{
+ SK_RLMT_PORT *pRPort;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTDOWN* Port %d Event (%d) BEGIN.\n",
+ Para.Para32[0], Event))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTDOWN* Event EMPTY.\n"))
+ return;
+ }
+
+ pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
+ if (!pRPort->PortStarted || (Event == SK_RLMT_PORTDOWN_TX_TIM &&
+ !(pRPort->CheckingState & SK_RLMT_PCS_TX))) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTDOWN* Event (%d) EMPTY.\n", Event))
+ return;
+ }
+
+ /* Stop port's timers. */
+ SkTimerStop(pAC, IoC, &pRPort->UpTimer);
+ SkTimerStop(pAC, IoC, &pRPort->DownRxTimer);
+ SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
+
+ if (pRPort->PortState != SK_RLMT_PS_LINK_DOWN) {
+ pRPort->PortState = SK_RLMT_PS_DOWN;
+ }
+
+ if (!pRPort->PortDown) {
+ pRPort->Net->PortsUp--;
+ pRPort->PortDown = SK_TRUE;
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_PORT_DOWN, Para);
+ }
+
+ pRPort->PacketsPerTimeSlot = 0;
+ /* pRPort->DataPacketsPerTimeSlot = 0; */
+ pRPort->BpduPacketsPerTimeSlot = 0;
+ pRPort->BcTimeStamp = 0;
+
+ /*
+ * RA;:;: To be checked:
+ * - actions at RLMT_STOP: We should not switch anymore.
+ */
+ if (pRPort->Net->RlmtState != SK_RLMT_RS_INIT) {
+ if (Para.Para32[0] ==
+ pRPort->Net->Port[pRPort->Net->ActivePort]->PortNumber) {
+ /* Active Port went down. */
+ SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
+ }
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORTDOWN* Event (%d) END.\n", Event))
+} /* SkRlmtEvtPortDownX */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtLinkDown - LINK_DOWN
+ *
+ * Description:
+ * This routine handles LINK_DOWN events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtLinkDown(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 Undefined */
+{
+ SK_RLMT_PORT *pRPort;
+
+ pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_LINK_DOWN Port %d Event BEGIN.\n", Para.Para32[0]))
+
+ if (!pAC->Rlmt.Port[Para.Para32[0]].LinkDown) {
+ pRPort->Net->LinksUp--;
+ pRPort->LinkDown = SK_TRUE;
+ pRPort->PortState = SK_RLMT_PS_LINK_DOWN;
+ SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_OFF);
+
+ if ((pRPort->Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) != 0) {
+ /* Build the check chain. */
+ SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber);
+ }
+
+ /* Ensure that port is marked down. */
+ Para.Para32[1] = -1;
+ (void)SkRlmtEvent(pAC, IoC, SK_RLMT_PORTDOWN, Para);
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_LINK_DOWN Event END.\n"))
+} /* SkRlmtEvtLinkDown */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtPortAddr - PORT_ADDR
+ *
+ * Description:
+ * This routine handles PORT_ADDR events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtPortAddr(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
+{
+ SK_U32 i, j;
+ SK_RLMT_PORT *pRPort;
+ SK_MAC_ADDR *pOldMacAddr;
+ SK_MAC_ADDR *pNewMacAddr;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORT_ADDR Port %d Event BEGIN.\n", Para.Para32[0]))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORT_ADDR Event EMPTY.\n"))
+ return;
+ }
+
+ /* Port's physical MAC address changed. */
+ pOldMacAddr = &pAC->Addr.Port[Para.Para32[0]].PreviousMacAddress;
+ pNewMacAddr = &pAC->Addr.Port[Para.Para32[0]].CurrentMacAddress;
+
+ /*
+ * NOTE: This is not scalable for solutions where ports are
+ * checked remotely. There, we need to send an RLMT
+ * address change packet - and how do we ensure delivery?
+ */
+ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
+ pRPort = &pAC->Rlmt.Port[i];
+ for (j = 0; j < pRPort->PortsChecked; j++) {
+ if (SK_ADDR_EQUAL(
+ pRPort->PortCheck[j].CheckAddr.a, pOldMacAddr->a)) {
+ pRPort->PortCheck[j].CheckAddr = *pNewMacAddr;
+ }
+ }
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PORT_ADDR Event END.\n"))
+} /* SkRlmtEvtPortAddr */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtStart - START
+ *
+ * Description:
+ * This routine handles START events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtStart(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
+{
+ SK_EVPARA Para2;
+ SK_U32 PortIdx;
+ SK_U32 PortNumber;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_START Net %d Event BEGIN.\n", Para.Para32[0]))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_START Event EMPTY.\n"))
+ return;
+ }
+
+ if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad NetNumber %d.\n", Para.Para32[0]))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_START Event EMPTY.\n"))
+ return;
+ }
+
+ if (pAC->Rlmt.Net[Para.Para32[0]].RlmtState != SK_RLMT_RS_INIT) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_START Event EMPTY.\n"))
+ return;
+ }
+
+ if (pAC->Rlmt.NetsStarted >= pAC->Rlmt.NumNets) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("All nets should have been started.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_START Event EMPTY.\n"))
+ return;
+ }
+
+ if (pAC->Rlmt.Net[Para.Para32[0]].PrefPort >=
+ pAC->Rlmt.Net[Para.Para32[0]].NumPorts) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E009, SKERR_RLMT_E009_MSG);
+
+ /* Change PrefPort to internal default. */
+ Para2.Para32[0] = 0xFFFFFFFF;
+ Para2.Para32[1] = Para.Para32[0];
+ (void)SkRlmtEvent(pAC, IoC, SK_RLMT_PREFPORT_CHANGE, Para2);
+ }
+
+ PortIdx = pAC->Rlmt.Net[Para.Para32[0]].PrefPort;
+ PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[PortIdx]->PortNumber;
+
+ pAC->Rlmt.Net[Para.Para32[0]].LinksUp = 0;
+ pAC->Rlmt.Net[Para.Para32[0]].PortsUp = 0;
+ pAC->Rlmt.Net[Para.Para32[0]].CheckingState = 0;
+ pAC->Rlmt.Net[Para.Para32[0]].RlmtState = SK_RLMT_RS_NET_DOWN;
+
+ /* Start preferred port. */
+ SkRlmtPortStart(pAC, IoC, PortNumber);
+
+ /* Start Timer (for first port only). */
+ Para2.Para32[0] = PortNumber;
+ Para2.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pAC->Rlmt.Port[PortNumber].UpTimer,
+ SK_RLMT_PORTSTART_TIM_VAL, SKGE_RLMT, SK_RLMT_PORTSTART_TIM, Para2);
+
+ pAC->Rlmt.NetsStarted++;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_START Event END.\n"))
+} /* SkRlmtEvtStart */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtStop - STOP
+ *
+ * Description:
+ * This routine handles STOP events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtStop(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
+{
+ SK_EVPARA Para2;
+ SK_U32 PortNumber;
+ SK_U32 i;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STOP Net %d Event BEGIN.\n", Para.Para32[0]))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STOP Event EMPTY.\n"))
+ return;
+ }
+
+ if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad NetNumber %d.\n", Para.Para32[0]))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STOP Event EMPTY.\n"))
+ return;
+ }
+
+ if (pAC->Rlmt.Net[Para.Para32[0]].RlmtState == SK_RLMT_RS_INIT) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STOP Event EMPTY.\n"))
+ return;
+ }
+
+ if (pAC->Rlmt.NetsStarted == 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("All nets are stopped.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STOP Event EMPTY.\n"))
+ return;
+ }
+
+ /* Stop RLMT timers. */
+ SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer);
+ SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].SegTimer);
+
+ /* Stop net. */
+ pAC->Rlmt.Net[Para.Para32[0]].RlmtState = SK_RLMT_RS_INIT;
+ pAC->Rlmt.Net[Para.Para32[0]].RootIdSet = SK_FALSE;
+ Para2.Para32[0] = SK_RLMT_NET_DOWN_FINAL;
+ Para2.Para32[1] = Para.Para32[0]; /* Net# */
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_DOWN, Para2);
+
+ /* Stop ports. */
+ for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
+ PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber;
+ if (pAC->Rlmt.Port[PortNumber].PortState != SK_RLMT_PS_INIT) {
+ SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].UpTimer);
+ SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].DownRxTimer);
+ SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].DownTxTimer);
+
+ pAC->Rlmt.Port[PortNumber].PortState = SK_RLMT_PS_INIT;
+ pAC->Rlmt.Port[PortNumber].RootIdSet = SK_FALSE;
+ pAC->Rlmt.Port[PortNumber].PortStarted = SK_FALSE;
+ Para2.Para32[0] = PortNumber;
+ Para2.Para32[1] = (SK_U32)-1;
+ SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para2);
+ }
+ }
+
+ pAC->Rlmt.NetsStarted--;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STOP Event END.\n"))
+} /* SkRlmtEvtStop */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtTim - TIM
+ *
+ * Description:
+ * This routine handles TIM events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtTim(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
+{
+ SK_RLMT_PORT *pRPort;
+ SK_U32 Timeout;
+ SK_U32 NewTimeout;
+ SK_U32 PortNumber;
+ SK_U32 i;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_TIM Event BEGIN.\n"))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_TIM Event EMPTY.\n"))
+ return;
+ }
+
+ if ((pAC->Rlmt.Net[Para.Para32[0]].RlmtMode & SK_RLMT_CHECK_OTHERS) == 0 ||
+ pAC->Rlmt.Net[Para.Para32[0]].LinksUp == 0) {
+ /* Mode changed or all links down: No more link checking. */
+ return;
+ }
+
+#if 0
+ pAC->Rlmt.SwitchCheckCounter--;
+ if (pAC->Rlmt.SwitchCheckCounter == 0) {
+ pAC->Rlmt.SwitchCheckCounter;
+ }
+#endif /* 0 */
+
+ NewTimeout = SK_RLMT_DEF_TO_VAL;
+ for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
+ PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber;
+ pRPort = &pAC->Rlmt.Port[PortNumber];
+ if (!pRPort->LinkDown) {
+ Timeout = SkRlmtCheckPort(pAC, IoC, PortNumber);
+ if (Timeout < NewTimeout) {
+ NewTimeout = Timeout;
+ }
+
+ /*
+ * These counters should be set to 0 for all ports before the
+ * first frame is sent in the next loop.
+ */
+ pRPort->PacketsPerTimeSlot = 0;
+ /* pRPort->DataPacketsPerTimeSlot = 0; */
+ pRPort->BpduPacketsPerTimeSlot = 0;
+ }
+ }
+ pAC->Rlmt.Net[Para.Para32[0]].TimeoutValue = NewTimeout;
+
+ if (pAC->Rlmt.Net[Para.Para32[0]].LinksUp > 1) {
+ /*
+ * If checking remote ports, also send packets if
+ * (LinksUp == 1) &&
+ * this port checks at least one (remote) port.
+ */
+
+ /*
+ * Must be new loop, as SkRlmtCheckPort can request to
+ * check segmentation when e.g. checking the last port.
+ */
+ for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
+ if (!pAC->Rlmt.Net[Para.Para32[0]].Port[i]->LinkDown) {
+ SkRlmtSend(pAC, IoC,
+ pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber);
+ }
+ }
+ }
+
+ SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer,
+ pAC->Rlmt.Net[Para.Para32[0]].TimeoutValue, SKGE_RLMT, SK_RLMT_TIM,
+ Para);
+
+ if (pAC->Rlmt.Net[Para.Para32[0]].LinksUp > 1 &&
+ (pAC->Rlmt.Net[Para.Para32[0]].RlmtMode & SK_RLMT_CHECK_SEG) &&
+ (pAC->Rlmt.Net[Para.Para32[0]].CheckingState & SK_RLMT_RCS_START_SEG)) {
+ SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].SegTimer,
+ SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para);
+ pAC->Rlmt.Net[Para.Para32[0]].CheckingState &= ~SK_RLMT_RCS_START_SEG;
+ pAC->Rlmt.Net[Para.Para32[0]].CheckingState |=
+ SK_RLMT_RCS_SEG | SK_RLMT_RCS_REPORT_SEG;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_TIM Event END.\n"))
+} /* SkRlmtEvtTim */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtSegTim - SEG_TIM
+ *
+ * Description:
+ * This routine handles SEG_TIM events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtSegTim(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
+{
+#ifdef xDEBUG
+ int j;
+#endif /* DEBUG */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SEG_TIM Event BEGIN.\n"))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SEG_TIM Event EMPTY.\n"))
+ return;
+ }
+
+#ifdef xDEBUG
+ for (j = 0; j < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; j++) {
+ SK_ADDR_PORT *pAPort;
+ SK_U32 k;
+ SK_U16 *InAddr;
+ SK_U8 InAddr8[6];
+
+ InAddr = (SK_U16 *)&InAddr8[0];
+ pAPort = pAC->Rlmt.Net[Para.Para32[0]].Port[j]->AddrPort;
+ for (k = 0; k < pAPort->NextExactMatchRlmt; k++) {
+ /* Get exact match address k from port j. */
+ XM_INADDR(IoC, pAC->Rlmt.Net[Para.Para32[0]].Port[j]->PortNumber,
+ XM_EXM(k), InAddr);
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("MC address %d on Port %u: %02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x.\n",
+ k, pAC->Rlmt.Net[Para.Para32[0]].Port[j]->PortNumber,
+ InAddr8[0], InAddr8[1], InAddr8[2],
+ InAddr8[3], InAddr8[4], InAddr8[5],
+ pAPort->Exact[k].a[0], pAPort->Exact[k].a[1],
+ pAPort->Exact[k].a[2], pAPort->Exact[k].a[3],
+ pAPort->Exact[k].a[4], pAPort->Exact[k].a[5]))
+ }
+ }
+#endif /* xDEBUG */
+
+ SkRlmtCheckSeg(pAC, IoC, Para.Para32[0]);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SEG_TIM Event END.\n"))
+} /* SkRlmtEvtSegTim */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtPacketRx - PACKET_RECEIVED
+ *
+ * Description:
+ * This routine handles PACKET_RECEIVED events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtPacketRx(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_MBUF *pMb */
+{
+ SK_MBUF *pMb;
+ SK_MBUF *pNextMb;
+ SK_U32 NetNumber;
+
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PACKET_RECEIVED Event BEGIN.\n"))
+
+ /* Should we ignore frames during port switching? */
+
+#ifdef DEBUG
+ pMb = Para.pParaPtr;
+ if (pMb == NULL) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("No mbuf.\n"))
+ }
+ else if (pMb->pNext != NULL) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("More than one mbuf or pMb->pNext not set.\n"))
+ }
+#endif /* DEBUG */
+
+ for (pMb = Para.pParaPtr; pMb != NULL; pMb = pNextMb) {
+ pNextMb = pMb->pNext;
+ pMb->pNext = NULL;
+
+ NetNumber = pAC->Rlmt.Port[pMb->PortIdx].Net->NetNumber;
+ if (pAC->Rlmt.Net[NetNumber].RlmtState == SK_RLMT_RS_INIT) {
+ SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
+ }
+ else {
+ SkRlmtPacketReceive(pAC, IoC, pMb);
+ }
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PACKET_RECEIVED Event END.\n"))
+} /* SkRlmtEvtPacketRx */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtStatsClear - STATS_CLEAR
+ *
+ * Description:
+ * This routine handles STATS_CLEAR events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtStatsClear(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
+{
+ SK_U32 i;
+ SK_RLMT_PORT *pRPort;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_CLEAR Event BEGIN.\n"))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_CLEAR Event EMPTY.\n"))
+ return;
+ }
+
+ if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad NetNumber %d.\n", Para.Para32[0]))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_CLEAR Event EMPTY.\n"))
+ return;
+ }
+
+ /* Clear statistics for logical and physical ports. */
+ for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
+ pRPort =
+ &pAC->Rlmt.Port[pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber];
+ pRPort->TxHelloCts = 0;
+ pRPort->RxHelloCts = 0;
+ pRPort->TxSpHelloReqCts = 0;
+ pRPort->RxSpHelloCts = 0;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_CLEAR Event END.\n"))
+} /* SkRlmtEvtStatsClear */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtStatsUpdate - STATS_UPDATE
+ *
+ * Description:
+ * This routine handles STATS_UPDATE events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtStatsUpdate(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
+{
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_UPDATE Event BEGIN.\n"))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_UPDATE Event EMPTY.\n"))
+ return;
+ }
+
+ if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad NetNumber %d.\n", Para.Para32[0]))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_UPDATE Event EMPTY.\n"))
+ return;
+ }
+
+ /* Update statistics - currently always up-to-date. */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_STATS_UPDATE Event END.\n"))
+} /* SkRlmtEvtStatsUpdate */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtPrefportChange - PREFPORT_CHANGE
+ *
+ * Description:
+ * This routine handles PREFPORT_CHANGE events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtPrefportChange(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 PortIndex; SK_U32 NetNumber */
+{
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PREFPORT_CHANGE to Port %d Event BEGIN.\n", Para.Para32[0]))
+
+ if (Para.Para32[1] >= pAC->Rlmt.NumNets) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad NetNumber %d.\n", Para.Para32[1]))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PREFPORT_CHANGE Event EMPTY.\n"))
+ return;
+ }
+
+ /* 0xFFFFFFFF == auto-mode. */
+ if (Para.Para32[0] == 0xFFFFFFFF) {
+ pAC->Rlmt.Net[Para.Para32[1]].PrefPort = SK_RLMT_DEF_PREF_PORT;
+ }
+ else {
+ if (Para.Para32[0] >= pAC->Rlmt.Net[Para.Para32[1]].NumPorts) {
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E010, SKERR_RLMT_E010_MSG);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PREFPORT_CHANGE Event EMPTY.\n"))
+ return;
+ }
+
+ pAC->Rlmt.Net[Para.Para32[1]].PrefPort = Para.Para32[0];
+ }
+
+ pAC->Rlmt.Net[Para.Para32[1]].Preference = Para.Para32[0];
+
+ if (pAC->Rlmt.Net[Para.Para32[1]].RlmtState != SK_RLMT_RS_INIT) {
+ SkRlmtCheckSwitch(pAC, IoC, Para.Para32[1]);
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_PREFPORT_CHANGE Event END.\n"))
+} /* SkRlmtEvtPrefportChange */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtSetNets - SET_NETS
+ *
+ * Description:
+ * This routine handles SET_NETS events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtSetNets(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NumNets; SK_U32 -1 */
+{
+ int i;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SET_NETS Event BEGIN.\n"))
+
+ if (Para.Para32[1] != (SK_U32)-1) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad Parameter.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SET_NETS Event EMPTY.\n"))
+ return;
+ }
+
+ if (Para.Para32[0] == 0 || Para.Para32[0] > SK_MAX_NETS ||
+ Para.Para32[0] > (SK_U32)pAC->GIni.GIMacsFound) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad number of nets: %d.\n", Para.Para32[0]))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SET_NETS Event EMPTY.\n"))
+ return;
+ }
+
+ if (Para.Para32[0] == pAC->Rlmt.NumNets) { /* No change. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SET_NETS Event EMPTY.\n"))
+ return;
+ }
+
+ /* Entering and leaving dual mode only allowed while nets are stopped. */
+ if (pAC->Rlmt.NetsStarted > 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Changing dual mode only allowed while all nets are stopped.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SET_NETS Event EMPTY.\n"))
+ return;
+ }
+
+ if (Para.Para32[0] == 1) {
+ if (pAC->Rlmt.NumNets > 1) {
+ /* Clear logical MAC addr from second net's active port. */
+ (void)SkAddrOverride(pAC, IoC, pAC->Rlmt.Net[1].Port[pAC->Addr.
+ Net[1].ActivePort]->PortNumber, NULL, SK_ADDR_CLEAR_LOGICAL);
+ pAC->Rlmt.Net[1].NumPorts = 0;
+ }
+
+ pAC->Rlmt.NumNets = Para.Para32[0];
+ for (i = 0; (SK_U32)i < pAC->Rlmt.NumNets; i++) {
+ pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT;
+ pAC->Rlmt.Net[i].RootIdSet = SK_FALSE;
+ pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* "Automatic" */
+ pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT;
+ /* Just assuming. */
+ pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort;
+ pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE;
+ pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL;
+ pAC->Rlmt.Net[i].NetNumber = i;
+ }
+
+ pAC->Rlmt.Port[1].Net= &pAC->Rlmt.Net[0];
+ pAC->Rlmt.Net[0].NumPorts = pAC->GIni.GIMacsFound;
+
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SET_NETS, Para);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("RLMT: Changed to one net with two ports.\n"))
+ }
+ else if (Para.Para32[0] == 2) {
+ pAC->Rlmt.Port[1].Net= &pAC->Rlmt.Net[1];
+ pAC->Rlmt.Net[1].NumPorts = pAC->GIni.GIMacsFound - 1;
+ pAC->Rlmt.Net[0].NumPorts =
+ pAC->GIni.GIMacsFound - pAC->Rlmt.Net[1].NumPorts;
+
+ pAC->Rlmt.NumNets = Para.Para32[0];
+ for (i = 0; (SK_U32)i < pAC->Rlmt.NumNets; i++) {
+ pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT;
+ pAC->Rlmt.Net[i].RootIdSet = SK_FALSE;
+ pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* "Automatic" */
+ pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT;
+ /* Just assuming. */
+ pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort;
+ pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE;
+ pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL;
+
+ pAC->Rlmt.Net[i].NetNumber = i;
+ }
+
+ /* Set logical MAC addr on second net's active port. */
+ (void)SkAddrOverride(pAC, IoC, pAC->Rlmt.Net[1].Port[pAC->Addr.
+ Net[1].ActivePort]->PortNumber, NULL, SK_ADDR_SET_LOGICAL);
+
+ SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SET_NETS, Para);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("RLMT: Changed to two nets with one port each.\n"))
+ }
+ else {
+ /* Not implemented for more than two nets. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SetNets not implemented for more than two nets.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SET_NETS Event EMPTY.\n"))
+ return;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_SET_NETS Event END.\n"))
+} /* SkRlmtSetNets */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvtModeChange - MODE_CHANGE
+ *
+ * Description:
+ * This routine handles MODE_CHANGE events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * Nothing
+ */
+RLMT_STATIC void SkRlmtEvtModeChange(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_EVPARA Para) /* SK_U32 NewMode; SK_U32 NetNumber */
+{
+ SK_EVPARA Para2;
+ SK_U32 i;
+ SK_U32 PrevRlmtMode;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_MODE_CHANGE Event BEGIN.\n"))
+
+ if (Para.Para32[1] >= pAC->Rlmt.NumNets) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Bad NetNumber %d.\n", Para.Para32[1]))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_MODE_CHANGE Event EMPTY.\n"))
+ return;
+ }
+
+ Para.Para32[0] |= SK_RLMT_CHECK_LINK;
+
+ if ((pAC->Rlmt.Net[Para.Para32[1]].NumPorts == 1) &&
+ Para.Para32[0] != SK_RLMT_MODE_CLS) {
+ pAC->Rlmt.Net[Para.Para32[1]].RlmtMode = SK_RLMT_MODE_CLS;
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Forced RLMT mode to CLS on single port net.\n"))
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_MODE_CHANGE Event EMPTY.\n"))
+ return;
+ }
+
+ /* Update RLMT mode. */
+ PrevRlmtMode = pAC->Rlmt.Net[Para.Para32[1]].RlmtMode;
+ pAC->Rlmt.Net[Para.Para32[1]].RlmtMode = Para.Para32[0];
+
+ if ((PrevRlmtMode & SK_RLMT_CHECK_LOC_LINK) !=
+ (pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_LOC_LINK)) {
+ /* SK_RLMT_CHECK_LOC_LINK bit changed. */
+ if ((PrevRlmtMode & SK_RLMT_CHECK_OTHERS) == 0 &&
+ pAC->Rlmt.Net[Para.Para32[1]].NumPorts > 1 &&
+ pAC->Rlmt.Net[Para.Para32[1]].PortsUp >= 1) {
+ /* 20001207 RA: Was "PortsUp == 1". */
+ Para2.Para32[0] = Para.Para32[1];
+ Para2.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[1]].LocTimer,
+ pAC->Rlmt.Net[Para.Para32[1]].TimeoutValue,
+ SKGE_RLMT, SK_RLMT_TIM, Para2);
+ }
+ }
+
+ if ((PrevRlmtMode & SK_RLMT_CHECK_SEG) !=
+ (pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_SEG)) {
+ /* SK_RLMT_CHECK_SEG bit changed. */
+ for (i = 0; i < pAC->Rlmt.Net[Para.Para32[1]].NumPorts; i++) {
+ (void)SkAddrMcClear(pAC, IoC,
+ pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber,
+ SK_ADDR_PERMANENT | SK_MC_SW_ONLY);
+
+ /* Add RLMT MC address. */
+ (void)SkAddrMcAdd(pAC, IoC,
+ pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber,
+ &SkRlmtMcAddr, SK_ADDR_PERMANENT);
+
+ if ((pAC->Rlmt.Net[Para.Para32[1]].RlmtMode &
+ SK_RLMT_CHECK_SEG) != 0) {
+ /* Add BPDU MC address. */
+ (void)SkAddrMcAdd(pAC, IoC,
+ pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber,
+ &BridgeMcAddr, SK_ADDR_PERMANENT);
+
+ if (pAC->Rlmt.Net[Para.Para32[1]].RlmtState != SK_RLMT_RS_INIT) {
+ if (!pAC->Rlmt.Net[Para.Para32[1]].Port[i]->LinkDown &&
+ (Para2.pParaPtr = SkRlmtBuildSpanningTreePacket(
+ pAC, IoC, i)) != NULL) {
+ pAC->Rlmt.Net[Para.Para32[1]].Port[i]->RootIdSet =
+ SK_FALSE;
+ SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2);
+ }
+ }
+ }
+ (void)SkAddrMcUpdate(pAC, IoC,
+ pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber);
+ } /* for ... */
+
+ if ((pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_SEG) != 0) {
+ Para2.Para32[0] = Para.Para32[1];
+ Para2.Para32[1] = (SK_U32)-1;
+ SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[1]].SegTimer,
+ SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para2);
+ }
+ } /* SK_RLMT_CHECK_SEG bit changed. */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("SK_RLMT_MODE_CHANGE Event END.\n"))
+} /* SkRlmtEvtModeChange */
+
+
+/******************************************************************************
+ *
+ * SkRlmtEvent - a PORT- or an RLMT-specific event happened
+ *
+ * Description:
+ * This routine calls subroutines to handle PORT- and RLMT-specific events.
+ *
+ * Context:
+ * runtime, pageable?
+ * may be called after SK_INIT_IO
+ *
+ * Returns:
+ * 0
+ */
+int SkRlmtEvent(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+SK_U32 Event, /* Event code */
+SK_EVPARA Para) /* Event-specific parameter */
+{
+ switch (Event) {
+
+ /* ----- PORT events ----- */
+
+ case SK_RLMT_PORTSTART_TIM: /* From RLMT via TIME. */
+ SkRlmtEvtPortStartTim(pAC, IoC, Para);
+ break;
+ case SK_RLMT_LINK_UP: /* From SIRQ. */
+ SkRlmtEvtLinkUp(pAC, IoC, Para);
+ break;
+ case SK_RLMT_PORTUP_TIM: /* From RLMT via TIME. */
+ SkRlmtEvtPortUpTim(pAC, IoC, Para);
+ break;
+ case SK_RLMT_PORTDOWN: /* From RLMT. */
+ case SK_RLMT_PORTDOWN_RX_TIM: /* From RLMT via TIME. */
+ case SK_RLMT_PORTDOWN_TX_TIM: /* From RLMT via TIME. */
+ SkRlmtEvtPortDownX(pAC, IoC, Event, Para);
+ break;
+ case SK_RLMT_LINK_DOWN: /* From SIRQ. */
+ SkRlmtEvtLinkDown(pAC, IoC, Para);
+ break;
+ case SK_RLMT_PORT_ADDR: /* From ADDR. */
+ SkRlmtEvtPortAddr(pAC, IoC, Para);
+ break;
+
+ /* ----- RLMT events ----- */
+
+ case SK_RLMT_START: /* From DRV. */
+ SkRlmtEvtStart(pAC, IoC, Para);
+ break;
+ case SK_RLMT_STOP: /* From DRV. */
+ SkRlmtEvtStop(pAC, IoC, Para);
+ break;
+ case SK_RLMT_TIM: /* From RLMT via TIME. */
+ SkRlmtEvtTim(pAC, IoC, Para);
+ break;
+ case SK_RLMT_SEG_TIM:
+ SkRlmtEvtSegTim(pAC, IoC, Para);
+ break;
+ case SK_RLMT_PACKET_RECEIVED: /* From DRV. */
+ SkRlmtEvtPacketRx(pAC, IoC, Para);
+ break;
+ case SK_RLMT_STATS_CLEAR: /* From PNMI. */
+ SkRlmtEvtStatsClear(pAC, IoC, Para);
+ break;
+ case SK_RLMT_STATS_UPDATE: /* From PNMI. */
+ SkRlmtEvtStatsUpdate(pAC, IoC, Para);
+ break;
+ case SK_RLMT_PREFPORT_CHANGE: /* From PNMI. */
+ SkRlmtEvtPrefportChange(pAC, IoC, Para);
+ break;
+ case SK_RLMT_MODE_CHANGE: /* From PNMI. */
+ SkRlmtEvtModeChange(pAC, IoC, Para);
+ break;
+ case SK_RLMT_SET_NETS: /* From DRV. */
+ SkRlmtEvtSetNets(pAC, IoC, Para);
+ break;
+
+ /* ----- Unknown events ----- */
+
+ default: /* Create error log entry. */
+ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
+ ("Unknown RLMT Event %d.\n", Event))
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E003, SKERR_RLMT_E003_MSG);
+ break;
+ } /* switch() */
+
+ return (0);
+} /* SkRlmtEvent */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
diff --git a/drivers/net/sk98lin/sktimer.c b/drivers/net/sk98lin/sktimer.c
new file mode 100644
index 000000000000..4e462955ecd8
--- /dev/null
+++ b/drivers/net/sk98lin/sktimer.c
@@ -0,0 +1,250 @@
+/******************************************************************************
+ *
+ * Name: sktimer.c
+ * Project: Gigabit Ethernet Adapters, Event Scheduler Module
+ * Version: $Revision: 1.14 $
+ * Date: $Date: 2003/09/16 13:46:51 $
+ * Purpose: High level timer functions.
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect GmbH.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+
+/*
+ * Event queue and dispatcher
+ */
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: sktimer.c,v 1.14 2003/09/16 13:46:51 rschmidt Exp $ (C) Marvell.";
+#endif
+
+#include "h/skdrv1st.h" /* Driver Specific Definitions */
+#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
+
+#ifdef __C2MAN__
+/*
+ Event queue management.
+
+ General Description:
+
+ */
+intro()
+{}
+#endif
+
+
+/* Forward declaration */
+static void timer_done(SK_AC *pAC,SK_IOC Ioc,int Restart);
+
+
+/*
+ * Inits the software timer
+ *
+ * needs to be called during Init level 1.
+ */
+void SkTimerInit(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc, /* IoContext */
+int Level) /* Init Level */
+{
+ switch (Level) {
+ case SK_INIT_DATA:
+ pAC->Tim.StQueue = NULL;
+ break;
+ case SK_INIT_IO:
+ SkHwtInit(pAC, Ioc);
+ SkTimerDone(pAC, Ioc);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Stops a high level timer
+ * - If a timer is not in the queue the function returns normally, too.
+ */
+void SkTimerStop(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc, /* IoContext */
+SK_TIMER *pTimer) /* Timer Pointer to be started */
+{
+ SK_TIMER **ppTimPrev;
+ SK_TIMER *pTm;
+
+ /*
+ * remove timer from queue
+ */
+ pTimer->TmActive = SK_FALSE;
+
+ if (pAC->Tim.StQueue == pTimer && !pTimer->TmNext) {
+ SkHwtStop(pAC, Ioc);
+ }
+
+ for (ppTimPrev = &pAC->Tim.StQueue; (pTm = *ppTimPrev);
+ ppTimPrev = &pTm->TmNext ) {
+
+ if (pTm == pTimer) {
+ /*
+ * Timer found in queue
+ * - dequeue it and
+ * - correct delta of the next timer
+ */
+ *ppTimPrev = pTm->TmNext;
+
+ if (pTm->TmNext) {
+ /* correct delta of next timer in queue */
+ pTm->TmNext->TmDelta += pTm->TmDelta;
+ }
+ return;
+ }
+ }
+}
+
+/*
+ * Start a high level software timer
+ */
+void SkTimerStart(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc, /* IoContext */
+SK_TIMER *pTimer, /* Timer Pointer to be started */
+SK_U32 Time, /* Time value */
+SK_U32 Class, /* Event Class for this timer */
+SK_U32 Event, /* Event Value for this timer */
+SK_EVPARA Para) /* Event Parameter for this timer */
+{
+ SK_TIMER **ppTimPrev;
+ SK_TIMER *pTm;
+ SK_U32 Delta;
+
+ Time /= 16; /* input is uS, clock ticks are 16uS */
+
+ if (!Time)
+ Time = 1;
+
+ SkTimerStop(pAC, Ioc, pTimer);
+
+ pTimer->TmClass = Class;
+ pTimer->TmEvent = Event;
+ pTimer->TmPara = Para;
+ pTimer->TmActive = SK_TRUE;
+
+ if (!pAC->Tim.StQueue) {
+ /* First Timer to be started */
+ pAC->Tim.StQueue = pTimer;
+ pTimer->TmNext = NULL;
+ pTimer->TmDelta = Time;
+
+ SkHwtStart(pAC, Ioc, Time);
+
+ return;
+ }
+
+ /*
+ * timer correction
+ */
+ timer_done(pAC, Ioc, 0);
+
+ /*
+ * find position in queue
+ */
+ Delta = 0;
+ for (ppTimPrev = &pAC->Tim.StQueue; (pTm = *ppTimPrev);
+ ppTimPrev = &pTm->TmNext ) {
+
+ if (Delta + pTm->TmDelta > Time) {
+ /* Position found */
+ /* Here the timer needs to be inserted. */
+ break;
+ }
+ Delta += pTm->TmDelta;
+ }
+
+ /* insert in queue */
+ *ppTimPrev = pTimer;
+ pTimer->TmNext = pTm;
+ pTimer->TmDelta = Time - Delta;
+
+ if (pTm) {
+ /* There is a next timer
+ * -> correct its Delta value.
+ */
+ pTm->TmDelta -= pTimer->TmDelta;
+ }
+
+ /* restart with first */
+ SkHwtStart(pAC, Ioc, pAC->Tim.StQueue->TmDelta);
+}
+
+
+void SkTimerDone(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc) /* IoContext */
+{
+ timer_done(pAC, Ioc, 1);
+}
+
+
+static void timer_done(
+SK_AC *pAC, /* Adapters context */
+SK_IOC Ioc, /* IoContext */
+int Restart) /* Do we need to restart the Hardware timer ? */
+{
+ SK_U32 Delta;
+ SK_TIMER *pTm;
+ SK_TIMER *pTComp; /* Timer completed now now */
+ SK_TIMER **ppLast; /* Next field of Last timer to be deq */
+ int Done = 0;
+
+ Delta = SkHwtRead(pAC, Ioc);
+
+ ppLast = &pAC->Tim.StQueue;
+ pTm = pAC->Tim.StQueue;
+ while (pTm && !Done) {
+ if (Delta >= pTm->TmDelta) {
+ /* Timer ran out */
+ pTm->TmActive = SK_FALSE;
+ Delta -= pTm->TmDelta;
+ ppLast = &pTm->TmNext;
+ pTm = pTm->TmNext;
+ }
+ else {
+ /* We found the first timer that did not run out */
+ pTm->TmDelta -= Delta;
+ Delta = 0;
+ Done = 1;
+ }
+ }
+ *ppLast = NULL;
+ /*
+ * pTm points to the first Timer that did not run out.
+ * StQueue points to the first Timer that run out.
+ */
+
+ for ( pTComp = pAC->Tim.StQueue; pTComp; pTComp = pTComp->TmNext) {
+ SkEventQueue(pAC,pTComp->TmClass, pTComp->TmEvent, pTComp->TmPara);
+ }
+
+ /* Set head of timer queue to the first timer that did not run out */
+ pAC->Tim.StQueue = pTm;
+
+ if (Restart && pAC->Tim.StQueue) {
+ /* Restart HW timer */
+ SkHwtStart(pAC, Ioc, pAC->Tim.StQueue->TmDelta);
+ }
+}
+
+/* End of file */
diff --git a/drivers/net/sk98lin/skvpd.c b/drivers/net/sk98lin/skvpd.c
new file mode 100644
index 000000000000..eb3c8988ced1
--- /dev/null
+++ b/drivers/net/sk98lin/skvpd.c
@@ -0,0 +1,1197 @@
+/******************************************************************************
+ *
+ * Name: skvpd.c
+ * Project: GEnesis, PCI Gigabit Ethernet Adapter
+ * Version: $Revision: 1.37 $
+ * Date: $Date: 2003/01/13 10:42:45 $
+ * Purpose: Shared software to read and write VPD data
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2003 SysKonnect GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ Please refer skvpd.txt for infomation how to include this module
+ */
+static const char SysKonnectFileId[] =
+ "@(#)$Id: skvpd.c,v 1.37 2003/01/13 10:42:45 rschmidt Exp $ (C) SK";
+
+#include "h/skdrv1st.h"
+#include "h/sktypes.h"
+#include "h/skdebug.h"
+#include "h/skdrv2nd.h"
+
+/*
+ * Static functions
+ */
+#ifndef SK_KR_PROTO
+static SK_VPD_PARA *vpd_find_para(
+ SK_AC *pAC,
+ const char *key,
+ SK_VPD_PARA *p);
+#else /* SK_KR_PROTO */
+static SK_VPD_PARA *vpd_find_para();
+#endif /* SK_KR_PROTO */
+
+/*
+ * waits for a completion of a VPD transfer
+ * The VPD transfer must complete within SK_TICKS_PER_SEC/16
+ *
+ * returns 0: success, transfer completes
+ * error exit(9) with a error message
+ */
+static int VpdWait(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC, /* IO Context */
+int event) /* event to wait for (VPD_READ / VPD_write) completion*/
+{
+ SK_U64 start_time;
+ SK_U16 state;
+
+ SK_DBG_MSG(pAC,SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD wait for %s\n", event?"Write":"Read"));
+ start_time = SkOsGetTime(pAC);
+ do {
+ if (SkOsGetTime(pAC) - start_time > SK_TICKS_PER_SEC) {
+
+ /* Bug fix AF: Thu Mar 28 2002
+ * Do not call: VPD_STOP(pAC, IoC);
+ * A pending VPD read cycle can not be aborted by writing
+ * VPD_WRITE to the PCI_VPD_ADR_REG (VPD address register).
+ * Although the write threshold in the OUR-register protects
+ * VPD read only space from being overwritten this does not
+ * protect a VPD read from being `converted` into a VPD write
+ * operation (on the fly). As a consequence the VPD_STOP would
+ * delete VPD read only data. In case of any problems with the
+ * I2C bus we exit the loop here. The I2C read operation can
+ * not be aborted except by a reset (->LR).
+ */
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_FATAL | SK_DBGCAT_ERR,
+ ("ERROR:VPD wait timeout\n"));
+ return(1);
+ }
+
+ VPD_IN16(pAC, IoC, PCI_VPD_ADR_REG, &state);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("state = %x, event %x\n",state,event));
+ } while((int)(state & PCI_VPD_FLAG) == event);
+
+ return(0);
+}
+
+#ifdef SKDIAG
+
+/*
+ * Read the dword at address 'addr' from the VPD EEPROM.
+ *
+ * Needed Time: MIN 1,3 ms MAX 2,6 ms
+ *
+ * Note: The DWord is returned in the endianess of the machine the routine
+ * is running on.
+ *
+ * Returns the data read.
+ */
+SK_U32 VpdReadDWord(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC, /* IO Context */
+int addr) /* VPD address */
+{
+ SK_U32 Rtv;
+
+ /* start VPD read */
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD read dword at 0x%x\n",addr));
+ addr &= ~VPD_WRITE; /* ensure the R/W bit is set to read */
+
+ VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)addr);
+
+ /* ignore return code here */
+ (void)VpdWait(pAC, IoC, VPD_READ);
+
+ /* Don't swap here, it's a data stream of bytes */
+ Rtv = 0;
+
+ VPD_IN32(pAC, IoC, PCI_VPD_DAT_REG, &Rtv);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD read dword data = 0x%x\n",Rtv));
+ return(Rtv);
+}
+
+#endif /* SKDIAG */
+
+#if 0
+
+/*
+ Write the dword 'data' at address 'addr' into the VPD EEPROM, and
+ verify that the data is written.
+
+ Needed Time:
+
+. MIN MAX
+. -------------------------------------------------------------------
+. write 1.8 ms 3.6 ms
+. internal write cyles 0.7 ms 7.0 ms
+. -------------------------------------------------------------------
+. over all program time 2.5 ms 10.6 ms
+. read 1.3 ms 2.6 ms
+. -------------------------------------------------------------------
+. over all 3.8 ms 13.2 ms
+.
+
+
+ Returns 0: success
+ 1: error, I2C transfer does not terminate
+ 2: error, data verify error
+
+ */
+static int VpdWriteDWord(
+SK_AC *pAC, /* pAC pointer */
+SK_IOC IoC, /* IO Context */
+int addr, /* VPD address */
+SK_U32 data) /* VPD data to write */
+{
+ /* start VPD write */
+ /* Don't swap here, it's a data stream of bytes */
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD write dword at addr 0x%x, data = 0x%x\n",addr,data));
+ VPD_OUT32(pAC, IoC, PCI_VPD_DAT_REG, (SK_U32)data);
+ /* But do it here */
+ addr |= VPD_WRITE;
+
+ VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE));
+
+ /* this may take up to 10,6 ms */
+ if (VpdWait(pAC, IoC, VPD_WRITE)) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("Write Timed Out\n"));
+ return(1);
+ };
+
+ /* verify data */
+ if (VpdReadDWord(pAC, IoC, addr) != data) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Data Verify Error\n"));
+ return(2);
+ }
+ return(0);
+} /* VpdWriteDWord */
+
+#endif /* 0 */
+
+/*
+ * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from
+ * or to the I2C EEPROM.
+ *
+ * Returns number of bytes read / written.
+ */
+static int VpdWriteStream(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC, /* IO Context */
+char *buf, /* data buffer */
+int Addr, /* VPD start address */
+int Len) /* number of bytes to read / to write */
+{
+ int i;
+ int j;
+ SK_U16 AdrReg;
+ int Rtv;
+ SK_U8 * pComp; /* Compare pointer */
+ SK_U8 Data; /* Input Data for Compare */
+
+ /* Init Compare Pointer */
+ pComp = (SK_U8 *) buf;
+
+ for (i = 0; i < Len; i++, buf++) {
+ if ((i%sizeof(SK_U32)) == 0) {
+ /*
+ * At the begin of each cycle read the Data Reg
+ * So it is initialized even if only a few bytes
+ * are written.
+ */
+ AdrReg = (SK_U16) Addr;
+ AdrReg &= ~VPD_WRITE; /* READ operation */
+
+ VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
+
+ /* Wait for termination */
+ Rtv = VpdWait(pAC, IoC, VPD_READ);
+ if (Rtv != 0) {
+ return(i);
+ }
+ }
+
+ /* Write current Byte */
+ VPD_OUT8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)),
+ *(SK_U8*)buf);
+
+ if (((i%sizeof(SK_U32)) == 3) || (i == (Len - 1))) {
+ /* New Address needs to be written to VPD_ADDR reg */
+ AdrReg = (SK_U16) Addr;
+ Addr += sizeof(SK_U32);
+ AdrReg |= VPD_WRITE; /* WRITE operation */
+
+ VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
+
+ /* Wait for termination */
+ Rtv = VpdWait(pAC, IoC, VPD_WRITE);
+ if (Rtv != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("Write Timed Out\n"));
+ return(i - (i%sizeof(SK_U32)));
+ }
+
+ /*
+ * Now re-read to verify
+ */
+ AdrReg &= ~VPD_WRITE; /* READ operation */
+
+ VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
+
+ /* Wait for termination */
+ Rtv = VpdWait(pAC, IoC, VPD_READ);
+ if (Rtv != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("Verify Timed Out\n"));
+ return(i - (i%sizeof(SK_U32)));
+ }
+
+ for (j = 0; j <= (int)(i%sizeof(SK_U32)); j++, pComp++) {
+
+ VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + j, &Data);
+
+ if (Data != *pComp) {
+ /* Verify Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("WriteStream Verify Error\n"));
+ return(i - (i%sizeof(SK_U32)) + j);
+ }
+ }
+ }
+ }
+
+ return(Len);
+}
+
+
+/*
+ * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from
+ * or to the I2C EEPROM.
+ *
+ * Returns number of bytes read / written.
+ */
+static int VpdReadStream(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC, /* IO Context */
+char *buf, /* data buffer */
+int Addr, /* VPD start address */
+int Len) /* number of bytes to read / to write */
+{
+ int i;
+ SK_U16 AdrReg;
+ int Rtv;
+
+ for (i = 0; i < Len; i++, buf++) {
+ if ((i%sizeof(SK_U32)) == 0) {
+ /* New Address needs to be written to VPD_ADDR reg */
+ AdrReg = (SK_U16) Addr;
+ Addr += sizeof(SK_U32);
+ AdrReg &= ~VPD_WRITE; /* READ operation */
+
+ VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
+
+ /* Wait for termination */
+ Rtv = VpdWait(pAC, IoC, VPD_READ);
+ if (Rtv != 0) {
+ return(i);
+ }
+ }
+ VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)),
+ (SK_U8 *)buf);
+ }
+
+ return(Len);
+}
+
+/*
+ * Read ore writes 'len' bytes of VPD data, starting at 'addr' from
+ * or to the I2C EEPROM.
+ *
+ * Returns number of bytes read / written.
+ */
+static int VpdTransferBlock(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC, /* IO Context */
+char *buf, /* data buffer */
+int addr, /* VPD start address */
+int len, /* number of bytes to read / to write */
+int dir) /* transfer direction may be VPD_READ or VPD_WRITE */
+{
+ int Rtv; /* Return value */
+ int vpd_rom_size;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD %s block, addr = 0x%x, len = %d\n",
+ dir ? "write" : "read", addr, len));
+
+ if (len == 0)
+ return(0);
+
+ vpd_rom_size = pAC->vpd.rom_size;
+
+ if (addr > vpd_rom_size - 4) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Address error: 0x%x, exp. < 0x%x\n",
+ addr, vpd_rom_size - 4));
+ return(0);
+ }
+
+ if (addr + len > vpd_rom_size) {
+ len = vpd_rom_size - addr;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("Warning: len was cut to %d\n", len));
+ }
+
+ if (dir == VPD_READ) {
+ Rtv = VpdReadStream(pAC, IoC, buf, addr, len);
+ }
+ else {
+ Rtv = VpdWriteStream(pAC, IoC, buf, addr, len);
+ }
+
+ return(Rtv);
+}
+
+#ifdef SKDIAG
+
+/*
+ * Read 'len' bytes of VPD data, starting at 'addr'.
+ *
+ * Returns number of bytes read.
+ */
+int VpdReadBlock(
+SK_AC *pAC, /* pAC pointer */
+SK_IOC IoC, /* IO Context */
+char *buf, /* buffer were the data should be stored */
+int addr, /* start reading at the VPD address */
+int len) /* number of bytes to read */
+{
+ return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_READ));
+}
+
+/*
+ * Write 'len' bytes of *but to the VPD EEPROM, starting at 'addr'.
+ *
+ * Returns number of bytes writes.
+ */
+int VpdWriteBlock(
+SK_AC *pAC, /* pAC pointer */
+SK_IOC IoC, /* IO Context */
+char *buf, /* buffer, holds the data to write */
+int addr, /* start writing at the VPD address */
+int len) /* number of bytes to write */
+{
+ return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_WRITE));
+}
+#endif /* SKDIAG */
+
+/*
+ * (re)initialize the VPD buffer
+ *
+ * Reads the VPD data from the EEPROM into the VPD buffer.
+ * Get the remaining read only and read / write space.
+ *
+ * return 0: success
+ * 1: fatal VPD error
+ */
+static int VpdInit(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC) /* IO Context */
+{
+ SK_VPD_PARA *r, rp; /* RW or RV */
+ int i;
+ unsigned char x;
+ int vpd_size;
+ SK_U16 dev_id;
+ SK_U32 our_reg2;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT, ("VpdInit .. "));
+
+ VPD_IN16(pAC, IoC, PCI_DEVICE_ID, &dev_id);
+
+ VPD_IN32(pAC, IoC, PCI_OUR_REG_2, &our_reg2);
+
+ pAC->vpd.rom_size = 256 << ((our_reg2 & PCI_VPD_ROM_SZ) >> 14);
+
+ /*
+ * this function might get used before the hardware is initialized
+ * therefore we cannot always trust in GIChipId
+ */
+ if (((pAC->vpd.v.vpd_status & VPD_VALID) == 0 &&
+ dev_id != VPD_DEV_ID_GENESIS) ||
+ ((pAC->vpd.v.vpd_status & VPD_VALID) != 0 &&
+ !pAC->GIni.GIGenesis)) {
+
+ /* for Yukon the VPD size is always 256 */
+ vpd_size = VPD_SIZE_YUKON;
+ }
+ else {
+ /* Genesis uses the maximum ROM size up to 512 for VPD */
+ if (pAC->vpd.rom_size > VPD_SIZE_GENESIS) {
+ vpd_size = VPD_SIZE_GENESIS;
+ }
+ else {
+ vpd_size = pAC->vpd.rom_size;
+ }
+ }
+
+ /* read the VPD data into the VPD buffer */
+ if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf, 0, vpd_size, VPD_READ)
+ != vpd_size) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("Block Read Error\n"));
+ return(1);
+ }
+
+ pAC->vpd.vpd_size = vpd_size;
+
+ /* Asus K8V Se Deluxe bugfix. Correct VPD content */
+ /* MBo April 2004 */
+ if (((unsigned char)pAC->vpd.vpd_buf[0x3f] == 0x38) &&
+ ((unsigned char)pAC->vpd.vpd_buf[0x40] == 0x3c) &&
+ ((unsigned char)pAC->vpd.vpd_buf[0x41] == 0x45)) {
+ printk("sk98lin: Asus mainboard with buggy VPD? "
+ "Correcting data.\n");
+ pAC->vpd.vpd_buf[0x40] = 0x38;
+ }
+
+
+ /* find the end tag of the RO area */
+ if (!(r = vpd_find_para(pAC, VPD_RV, &rp))) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Encoding Error: RV Tag not found\n"));
+ return(1);
+ }
+
+ if (r->p_val + r->p_len > pAC->vpd.vpd_buf + vpd_size/2) {
+ SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Encoding Error: Invalid VPD struct size\n"));
+ return(1);
+ }
+ pAC->vpd.v.vpd_free_ro = r->p_len - 1;
+
+ /* test the checksum */
+ for (i = 0, x = 0; (unsigned)i <= (unsigned)vpd_size/2 - r->p_len; i++) {
+ x += pAC->vpd.vpd_buf[i];
+ }
+
+ if (x != 0) {
+ /* checksum error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("VPD Checksum Error\n"));
+ return(1);
+ }
+
+ /* find and check the end tag of the RW area */
+ if (!(r = vpd_find_para(pAC, VPD_RW, &rp))) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Encoding Error: RV Tag not found\n"));
+ return(1);
+ }
+
+ if (r->p_val < pAC->vpd.vpd_buf + vpd_size/2) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Encoding Error: Invalid VPD struct size\n"));
+ return(1);
+ }
+ pAC->vpd.v.vpd_free_rw = r->p_len;
+
+ /* everything seems to be ok */
+ if (pAC->GIni.GIChipId != 0) {
+ pAC->vpd.v.vpd_status |= VPD_VALID;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT,
+ ("done. Free RO = %d, Free RW = %d\n",
+ pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw));
+
+ return(0);
+}
+
+/*
+ * find the Keyword 'key' in the VPD buffer and fills the
+ * parameter struct 'p' with it's values
+ *
+ * returns *p success
+ * 0: parameter was not found or VPD encoding error
+ */
+static SK_VPD_PARA *vpd_find_para(
+SK_AC *pAC, /* common data base */
+const char *key, /* keyword to find (e.g. "MN") */
+SK_VPD_PARA *p) /* parameter description struct */
+{
+ char *v ; /* points to VPD buffer */
+ int max; /* Maximum Number of Iterations */
+
+ v = pAC->vpd.vpd_buf;
+ max = 128;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD find para %s .. ",key));
+
+ /* check mandatory resource type ID string (Product Name) */
+ if (*v != (char)RES_ID) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Error: 0x%x missing\n", RES_ID));
+ return NULL;
+ }
+
+ if (strcmp(key, VPD_NAME) == 0) {
+ p->p_len = VPD_GET_RES_LEN(v);
+ p->p_val = VPD_GET_VAL(v);
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("found, len = %d\n", p->p_len));
+ return(p);
+ }
+
+ v += 3 + VPD_GET_RES_LEN(v) + 3;
+ for (;; ) {
+ if (SK_MEMCMP(key,v,2) == 0) {
+ p->p_len = VPD_GET_VPD_LEN(v);
+ p->p_val = VPD_GET_VAL(v);
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("found, len = %d\n",p->p_len));
+ return(p);
+ }
+
+ /* exit when reaching the "RW" Tag or the maximum of itera. */
+ max--;
+ if (SK_MEMCMP(VPD_RW,v,2) == 0 || max == 0) {
+ break;
+ }
+
+ if (SK_MEMCMP(VPD_RV,v,2) == 0) {
+ v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */
+ }
+ else {
+ v += 3 + VPD_GET_VPD_LEN(v);
+ }
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("scanning '%c%c' len = %d\n",v[0],v[1],v[2]));
+ }
+
+#ifdef DEBUG
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, ("not found\n"));
+ if (max == 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Key/Len Encoding error\n"));
+ }
+#endif /* DEBUG */
+ return NULL;
+}
+
+/*
+ * Move 'n' bytes. Begin with the last byte if 'n' is > 0,
+ * Start with the last byte if n is < 0.
+ *
+ * returns nothing
+ */
+static void vpd_move_para(
+char *start, /* start of memory block */
+char *end, /* end of memory block to move */
+int n) /* number of bytes the memory block has to be moved */
+{
+ char *p;
+ int i; /* number of byte copied */
+
+ if (n == 0)
+ return;
+
+ i = (int) (end - start + 1);
+ if (n < 0) {
+ p = start + n;
+ while (i != 0) {
+ *p++ = *start++;
+ i--;
+ }
+ }
+ else {
+ p = end + n;
+ while (i != 0) {
+ *p-- = *end--;
+ i--;
+ }
+ }
+}
+
+/*
+ * setup the VPD keyword 'key' at 'ip'.
+ *
+ * returns nothing
+ */
+static void vpd_insert_key(
+const char *key, /* keyword to insert */
+const char *buf, /* buffer with the keyword value */
+int len, /* length of the value string */
+char *ip) /* inseration point */
+{
+ SK_VPD_KEY *p;
+
+ p = (SK_VPD_KEY *) ip;
+ p->p_key[0] = key[0];
+ p->p_key[1] = key[1];
+ p->p_len = (unsigned char) len;
+ SK_MEMCPY(&p->p_val,buf,len);
+}
+
+/*
+ * Setup the VPD end tag "RV" / "RW".
+ * Also correct the remaining space variables vpd_free_ro / vpd_free_rw.
+ *
+ * returns 0: success
+ * 1: encoding error
+ */
+static int vpd_mod_endtag(
+SK_AC *pAC, /* common data base */
+char *etp) /* end pointer input position */
+{
+ SK_VPD_KEY *p;
+ unsigned char x;
+ int i;
+ int vpd_size;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD modify endtag at 0x%x = '%c%c'\n",etp,etp[0],etp[1]));
+
+ vpd_size = pAC->vpd.vpd_size;
+
+ p = (SK_VPD_KEY *) etp;
+
+ if (p->p_key[0] != 'R' || (p->p_key[1] != 'V' && p->p_key[1] != 'W')) {
+ /* something wrong here, encoding error */
+ SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
+ ("Encoding Error: invalid end tag\n"));
+ return(1);
+ }
+ if (etp > pAC->vpd.vpd_buf + vpd_size/2) {
+ /* create "RW" tag */
+ p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size-etp-3-1);
+ pAC->vpd.v.vpd_free_rw = (int) p->p_len;
+ i = pAC->vpd.v.vpd_free_rw;
+ etp += 3;
+ }
+ else {
+ /* create "RV" tag */
+ p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size/2-etp-3);
+ pAC->vpd.v.vpd_free_ro = (int) p->p_len - 1;
+
+ /* setup checksum */
+ for (i = 0, x = 0; i < vpd_size/2 - p->p_len; i++) {
+ x += pAC->vpd.vpd_buf[i];
+ }
+ p->p_val = (char) 0 - x;
+ i = pAC->vpd.v.vpd_free_ro;
+ etp += 4;
+ }
+ while (i) {
+ *etp++ = 0x00;
+ i--;
+ }
+
+ return(0);
+}
+
+/*
+ * Insert a VPD keyword into the VPD buffer.
+ *
+ * The keyword 'key' is inserted at the position 'ip' in the
+ * VPD buffer.
+ * The keywords behind the input position will
+ * be moved. The VPD end tag "RV" or "RW" is generated again.
+ *
+ * returns 0: success
+ * 2: value string was cut
+ * 4: VPD full, keyword was not written
+ * 6: fatal VPD error
+ *
+ */
+int VpdSetupPara(
+SK_AC *pAC, /* common data base */
+const char *key, /* keyword to insert */
+const char *buf, /* buffer with the keyword value */
+int len, /* length of the keyword value */
+int type, /* VPD_RO_KEY or VPD_RW_KEY */
+int op) /* operation to do: ADD_KEY or OWR_KEY */
+{
+ SK_VPD_PARA vp;
+ char *etp; /* end tag position */
+ int free; /* remaining space in selected area */
+ char *ip; /* input position inside the VPD buffer */
+ int rtv; /* return code */
+ int head; /* additional haeder bytes to move */
+ int found; /* additinoal bytes if the keyword was found */
+ int vpd_size;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("VPD setup para key = %s, val = %s\n",key,buf));
+
+ vpd_size = pAC->vpd.vpd_size;
+
+ rtv = 0;
+ ip = NULL;
+ if (type == VPD_RW_KEY) {
+ /* end tag is "RW" */
+ free = pAC->vpd.v.vpd_free_rw;
+ etp = pAC->vpd.vpd_buf + (vpd_size - free - 1 - 3);
+ }
+ else {
+ /* end tag is "RV" */
+ free = pAC->vpd.v.vpd_free_ro;
+ etp = pAC->vpd.vpd_buf + (vpd_size/2 - free - 4);
+ }
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("Free RO = %d, Free RW = %d\n",
+ pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw));
+
+ head = 0;
+ found = 0;
+ if (op == OWR_KEY) {
+ if (vpd_find_para(pAC, key, &vp)) {
+ found = 3;
+ ip = vp.p_val - 3;
+ free += vp.p_len + 3;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("Overwrite Key\n"));
+ }
+ else {
+ op = ADD_KEY;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
+ ("Add Key\n"));
+ }
+ }
+ if (op == ADD_KEY) {
+ ip = etp;
+ vp.p_len = 0;
+ head = 3;
+ }
+
+ if (len + 3 > free) {
+ if (free < 7) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD Buffer Overflow, keyword not written\n"));
+ return(4);
+ }
+ /* cut it again */
+ len = free - 3;
+ rtv = 2;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD Buffer Full, Keyword was cut\n"));
+ }
+
+ vpd_move_para(ip + vp.p_len + found, etp+2, len-vp.p_len+head);
+ vpd_insert_key(key, buf, len, ip);
+ if (vpd_mod_endtag(pAC, etp + len - vp.p_len + head)) {
+ pAC->vpd.v.vpd_status &= ~VPD_VALID;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD Encoding Error\n"));
+ return(6);
+ }
+
+ return(rtv);
+}
+
+
+/*
+ * Read the contents of the VPD EEPROM and copy it to the
+ * VPD buffer if not already done.
+ *
+ * return: A pointer to the vpd_status structure. The structure contains
+ * this fields.
+ */
+SK_VPD_STATUS *VpdStat(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC) /* IO Context */
+{
+ if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
+ (void)VpdInit(pAC, IoC);
+ }
+ return(&pAC->vpd.v);
+}
+
+
+/*
+ * Read the contents of the VPD EEPROM and copy it to the VPD
+ * buffer if not already done.
+ * Scan the VPD buffer for VPD keywords and create the VPD
+ * keyword list by copying the keywords to 'buf', all after
+ * each other and terminated with a '\0'.
+ *
+ * Exceptions: o The Resource Type ID String (product name) is called "Name"
+ * o The VPD end tags 'RV' and 'RW' are not listed
+ *
+ * The number of copied keywords is counted in 'elements'.
+ *
+ * returns 0: success
+ * 2: buffer overfull, one or more keywords are missing
+ * 6: fatal VPD error
+ *
+ * example values after returning:
+ *
+ * buf = "Name\0PN\0EC\0MN\0SN\0CP\0VF\0VL\0YA\0"
+ * *len = 30
+ * *elements = 9
+ */
+int VpdKeys(
+SK_AC *pAC, /* common data base */
+SK_IOC IoC, /* IO Context */
+char *buf, /* buffer where to copy the keywords */
+int *len, /* buffer length */
+int *elements) /* number of keywords returned */
+{
+ char *v;
+ int n;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("list VPD keys .. "));
+ *elements = 0;
+ if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
+ if (VpdInit(pAC, IoC) != 0) {
+ *len = 0;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD Init Error, terminated\n"));
+ return(6);
+ }
+ }
+
+ if ((signed)strlen(VPD_NAME) + 1 <= *len) {
+ v = pAC->vpd.vpd_buf;
+ strcpy(buf,VPD_NAME);
+ n = strlen(VPD_NAME) + 1;
+ buf += n;
+ *elements = 1;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX,
+ ("'%c%c' ",v[0],v[1]));
+ }
+ else {
+ *len = 0;
+ SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR,
+ ("buffer overflow\n"));
+ return(2);
+ }
+
+ v += 3 + VPD_GET_RES_LEN(v) + 3;
+ for (;; ) {
+ /* exit when reaching the "RW" Tag */
+ if (SK_MEMCMP(VPD_RW,v,2) == 0) {
+ break;
+ }
+
+ if (SK_MEMCMP(VPD_RV,v,2) == 0) {
+ v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */
+ continue;
+ }
+
+ if (n+3 <= *len) {
+ SK_MEMCPY(buf,v,2);
+ buf += 2;
+ *buf++ = '\0';
+ n += 3;
+ v += 3 + VPD_GET_VPD_LEN(v);
+ *elements += 1;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX,
+ ("'%c%c' ",v[0],v[1]));
+ }
+ else {
+ *len = n;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("buffer overflow\n"));
+ return(2);
+ }
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("\n"));
+ *len = n;
+ return(0);
+}
+
+
+/*
+ * Read the contents of the VPD EEPROM and copy it to the
+ * VPD buffer if not already done. Search for the VPD keyword
+ * 'key' and copy its value to 'buf'. Add a terminating '\0'.
+ * If the value does not fit into the buffer cut it after
+ * 'len' - 1 bytes.
+ *
+ * returns 0: success
+ * 1: keyword not found
+ * 2: value string was cut
+ * 3: VPD transfer timeout
+ * 6: fatal VPD error
+ */
+int VpdRead(
+SK_AC *pAC, /* common data base */
+SK_IOC IoC, /* IO Context */
+const char *key, /* keyword to read (e.g. "MN") */
+char *buf, /* buffer where to copy the keyword value */
+int *len) /* buffer length */
+{
+ SK_VPD_PARA *p, vp;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("VPD read %s .. ", key));
+ if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
+ if (VpdInit(pAC, IoC) != 0) {
+ *len = 0;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD init error\n"));
+ return(6);
+ }
+ }
+
+ if ((p = vpd_find_para(pAC, key, &vp)) != NULL) {
+ if (p->p_len > (*(unsigned *)len)-1) {
+ p->p_len = *len - 1;
+ }
+ SK_MEMCPY(buf, p->p_val, p->p_len);
+ buf[p->p_len] = '\0';
+ *len = p->p_len;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX,
+ ("%c%c%c%c.., len = %d\n",
+ buf[0],buf[1],buf[2],buf[3],*len));
+ }
+ else {
+ *len = 0;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("not found\n"));
+ return(1);
+ }
+ return(0);
+}
+
+
+/*
+ * Check whether a given key may be written
+ *
+ * returns
+ * SK_TRUE Yes it may be written
+ * SK_FALSE No it may be written
+ */
+SK_BOOL VpdMayWrite(
+char *key) /* keyword to write (allowed values "Yx", "Vx") */
+{
+ if ((*key != 'Y' && *key != 'V') ||
+ key[1] < '0' || key[1] > 'Z' ||
+ (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) {
+
+ return(SK_FALSE);
+ }
+ return(SK_TRUE);
+}
+
+/*
+ * Read the contents of the VPD EEPROM and copy it to the VPD
+ * buffer if not already done. Insert/overwrite the keyword 'key'
+ * in the VPD buffer. Cut the keyword value if it does not fit
+ * into the VPD read / write area.
+ *
+ * returns 0: success
+ * 2: value string was cut
+ * 3: VPD transfer timeout
+ * 4: VPD full, keyword was not written
+ * 5: keyword cannot be written
+ * 6: fatal VPD error
+ */
+int VpdWrite(
+SK_AC *pAC, /* common data base */
+SK_IOC IoC, /* IO Context */
+const char *key, /* keyword to write (allowed values "Yx", "Vx") */
+const char *buf) /* buffer where the keyword value can be read from */
+{
+ int len; /* length of the keyword to write */
+ int rtv; /* return code */
+ int rtv2;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX,
+ ("VPD write %s = %s\n",key,buf));
+
+ if ((*key != 'Y' && *key != 'V') ||
+ key[1] < '0' || key[1] > 'Z' ||
+ (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("illegal key tag, keyword not written\n"));
+ return(5);
+ }
+
+ if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
+ if (VpdInit(pAC, IoC) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD init error\n"));
+ return(6);
+ }
+ }
+
+ rtv = 0;
+ len = strlen(buf);
+ if (len > VPD_MAX_LEN) {
+ /* cut it */
+ len = VPD_MAX_LEN;
+ rtv = 2;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("keyword too long, cut after %d bytes\n",VPD_MAX_LEN));
+ }
+ if ((rtv2 = VpdSetupPara(pAC, key, buf, len, VPD_RW_KEY, OWR_KEY)) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD write error\n"));
+ return(rtv2);
+ }
+
+ return(rtv);
+}
+
+/*
+ * Read the contents of the VPD EEPROM and copy it to the
+ * VPD buffer if not already done. Remove the VPD keyword
+ * 'key' from the VPD buffer.
+ * Only the keywords in the read/write area can be deleted.
+ * Keywords in the read only area cannot be deleted.
+ *
+ * returns 0: success, keyword was removed
+ * 1: keyword not found
+ * 5: keyword cannot be deleted
+ * 6: fatal VPD error
+ */
+int VpdDelete(
+SK_AC *pAC, /* common data base */
+SK_IOC IoC, /* IO Context */
+char *key) /* keyword to read (e.g. "MN") */
+{
+ SK_VPD_PARA *p, vp;
+ char *etp;
+ int vpd_size;
+
+ vpd_size = pAC->vpd.vpd_size;
+
+ SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("VPD delete key %s\n",key));
+ if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
+ if (VpdInit(pAC, IoC) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD init error\n"));
+ return(6);
+ }
+ }
+
+ if ((p = vpd_find_para(pAC, key, &vp)) != NULL) {
+ if (p->p_val < pAC->vpd.vpd_buf + vpd_size/2) {
+ /* try to delete read only keyword */
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("cannot delete RO keyword\n"));
+ return(5);
+ }
+
+ etp = pAC->vpd.vpd_buf + (vpd_size-pAC->vpd.v.vpd_free_rw-1-3);
+
+ vpd_move_para(vp.p_val+vp.p_len, etp+2,
+ - ((int)(vp.p_len + 3)));
+ if (vpd_mod_endtag(pAC, etp - vp.p_len - 3)) {
+ pAC->vpd.v.vpd_status &= ~VPD_VALID;
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD encoding error\n"));
+ return(6);
+ }
+ }
+ else {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("keyword not found\n"));
+ return(1);
+ }
+
+ return(0);
+}
+
+/*
+ * If the VPD buffer contains valid data write the VPD
+ * read/write area back to the VPD EEPROM.
+ *
+ * returns 0: success
+ * 3: VPD transfer timeout
+ */
+int VpdUpdate(
+SK_AC *pAC, /* Adapters context */
+SK_IOC IoC) /* IO Context */
+{
+ int vpd_size;
+
+ vpd_size = pAC->vpd.vpd_size;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("VPD update .. "));
+ if ((pAC->vpd.v.vpd_status & VPD_VALID) != 0) {
+ if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf + vpd_size/2,
+ vpd_size/2, vpd_size/2, VPD_WRITE) != vpd_size/2) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("transfer timed out\n"));
+ return(3);
+ }
+ }
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("done\n"));
+ return(0);
+}
+
+
+
+/*
+ * Read the contents of the VPD EEPROM and copy it to the VPD buffer
+ * if not already done. If the keyword "VF" is not present it will be
+ * created and the error log message will be stored to this keyword.
+ * If "VF" is not present the error log message will be stored to the
+ * keyword "VL". "VL" will created or overwritten if "VF" is present.
+ * The VPD read/write area is saved to the VPD EEPROM.
+ *
+ * returns nothing, errors will be ignored.
+ */
+void VpdErrLog(
+SK_AC *pAC, /* common data base */
+SK_IOC IoC, /* IO Context */
+char *msg) /* error log message */
+{
+ SK_VPD_PARA *v, vf; /* VF */
+ int len;
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX,
+ ("VPD error log msg %s\n", msg));
+ if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
+ if (VpdInit(pAC, IoC) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
+ ("VPD init error\n"));
+ return;
+ }
+ }
+
+ len = strlen(msg);
+ if (len > VPD_MAX_LEN) {
+ /* cut it */
+ len = VPD_MAX_LEN;
+ }
+ if ((v = vpd_find_para(pAC, VPD_VF, &vf)) != NULL) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("overwrite VL\n"));
+ (void)VpdSetupPara(pAC, VPD_VL, msg, len, VPD_RW_KEY, OWR_KEY);
+ }
+ else {
+ SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n"));
+ (void)VpdSetupPara(pAC, VPD_VF, msg, len, VPD_RW_KEY, ADD_KEY);
+ }
+
+ (void)VpdUpdate(pAC, IoC);
+}
+
diff --git a/drivers/net/sk98lin/skxmac2.c b/drivers/net/sk98lin/skxmac2.c
new file mode 100644
index 000000000000..94a09deecb32
--- /dev/null
+++ b/drivers/net/sk98lin/skxmac2.c
@@ -0,0 +1,4607 @@
+/******************************************************************************
+ *
+ * Name: skxmac2.c
+ * Project: Gigabit Ethernet Adapters, Common Modules
+ * Version: $Revision: 1.102 $
+ * Date: $Date: 2003/10/02 16:53:58 $
+ * Purpose: Contains functions to initialize the MACs and PHYs
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ * (C)Copyright 1998-2002 SysKonnect.
+ * (C)Copyright 2002-2003 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#include "h/skdrv1st.h"
+#include "h/skdrv2nd.h"
+
+/* typedefs *******************************************************************/
+
+/* BCOM PHY magic pattern list */
+typedef struct s_PhyHack {
+ int PhyReg; /* Phy register */
+ SK_U16 PhyVal; /* Value to write */
+} BCOM_HACK;
+
+/* local variables ************************************************************/
+
+#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
+static const char SysKonnectFileId[] =
+ "@(#) $Id: skxmac2.c,v 1.102 2003/10/02 16:53:58 rschmidt Exp $ (C) Marvell.";
+#endif
+
+#ifdef GENESIS
+BCOM_HACK BcomRegA1Hack[] = {
+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
+ { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
+ { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
+ { 0, 0 }
+};
+BCOM_HACK BcomRegC0Hack[] = {
+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 },
+ { 0x15, 0x0A04 }, { 0x18, 0x0420 },
+ { 0, 0 }
+};
+#endif
+
+/* function prototypes ********************************************************/
+#ifdef GENESIS
+static void SkXmInitPhyXmac(SK_AC*, SK_IOC, int, SK_BOOL);
+static void SkXmInitPhyBcom(SK_AC*, SK_IOC, int, SK_BOOL);
+static int SkXmAutoNegDoneXmac(SK_AC*, SK_IOC, int);
+static int SkXmAutoNegDoneBcom(SK_AC*, SK_IOC, int);
+#endif /* GENESIS */
+#ifdef YUKON
+static void SkGmInitPhyMarv(SK_AC*, SK_IOC, int, SK_BOOL);
+static int SkGmAutoNegDoneMarv(SK_AC*, SK_IOC, int);
+#endif /* YUKON */
+#ifdef OTHER_PHY
+static void SkXmInitPhyLone(SK_AC*, SK_IOC, int, SK_BOOL);
+static void SkXmInitPhyNat (SK_AC*, SK_IOC, int, SK_BOOL);
+static int SkXmAutoNegDoneLone(SK_AC*, SK_IOC, int);
+static int SkXmAutoNegDoneNat (SK_AC*, SK_IOC, int);
+#endif /* OTHER_PHY */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmPhyRead() - Read from XMAC PHY register
+ *
+ * Description: reads a 16-bit word from XMAC PHY or ext. PHY
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmPhyRead(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Port, /* Port Index (MAC_1 + n) */
+int PhyReg, /* Register Address (Offset) */
+SK_U16 SK_FAR *pVal) /* Pointer to Value */
+{
+ SK_U16 Mmu;
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* write the PHY register's address */
+ XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr);
+
+ /* get the PHY register's value */
+ XM_IN16(IoC, Port, XM_PHY_DATA, pVal);
+
+ if (pPrt->PhyType != SK_PHY_XMAC) {
+ do {
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu);
+ /* wait until 'Ready' is set */
+ } while ((Mmu & XM_MMU_PHY_RDY) == 0);
+
+ /* get the PHY register's value */
+ XM_IN16(IoC, Port, XM_PHY_DATA, pVal);
+ }
+} /* SkXmPhyRead */
+
+
+/******************************************************************************
+ *
+ * SkXmPhyWrite() - Write to XMAC PHY register
+ *
+ * Description: writes a 16-bit word to XMAC PHY or ext. PHY
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmPhyWrite(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Port, /* Port Index (MAC_1 + n) */
+int PhyReg, /* Register Address (Offset) */
+SK_U16 Val) /* Value */
+{
+ SK_U16 Mmu;
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PhyType != SK_PHY_XMAC) {
+ do {
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu);
+ /* wait until 'Busy' is cleared */
+ } while ((Mmu & XM_MMU_PHY_BUSY) != 0);
+ }
+
+ /* write the PHY register's address */
+ XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr);
+
+ /* write the PHY register's value */
+ XM_OUT16(IoC, Port, XM_PHY_DATA, Val);
+
+ if (pPrt->PhyType != SK_PHY_XMAC) {
+ do {
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu);
+ /* wait until 'Busy' is cleared */
+ } while ((Mmu & XM_MMU_PHY_BUSY) != 0);
+ }
+} /* SkXmPhyWrite */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGmPhyRead() - Read from GPHY register
+ *
+ * Description: reads a 16-bit word from GPHY through MDIO
+ *
+ * Returns:
+ * nothing
+ */
+void SkGmPhyRead(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Port, /* Port Index (MAC_1 + n) */
+int PhyReg, /* Register Address (Offset) */
+SK_U16 SK_FAR *pVal) /* Pointer to Value */
+{
+ SK_U16 Ctrl;
+ SK_GEPORT *pPrt;
+#ifdef VCPU
+ u_long SimCyle;
+ u_long SimLowTime;
+
+ VCPUgetTime(&SimCyle, &SimLowTime);
+ VCPUprintf(0, "SkGmPhyRead(%u), SimCyle=%u, SimLowTime=%u\n",
+ PhyReg, SimCyle, SimLowTime);
+#endif /* VCPU */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* set PHY-Register offset and 'Read' OpCode (= 1) */
+ *pVal = (SK_U16)(GM_SMI_CT_PHY_AD(pPrt->PhyAddr) |
+ GM_SMI_CT_REG_AD(PhyReg) | GM_SMI_CT_OP_RD);
+
+ GM_OUT16(IoC, Port, GM_SMI_CTRL, *pVal);
+
+ GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
+
+ /* additional check for MDC/MDIO activity */
+ if ((Ctrl & GM_SMI_CT_BUSY) == 0) {
+ *pVal = 0;
+ return;
+ }
+
+ *pVal |= GM_SMI_CT_BUSY;
+
+ do {
+#ifdef VCPU
+ VCPUwaitTime(1000);
+#endif /* VCPU */
+
+ GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
+
+ /* wait until 'ReadValid' is set */
+ } while (Ctrl == *pVal);
+
+ /* get the PHY register's value */
+ GM_IN16(IoC, Port, GM_SMI_DATA, pVal);
+
+#ifdef VCPU
+ VCPUgetTime(&SimCyle, &SimLowTime);
+ VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n",
+ SimCyle, SimLowTime);
+#endif /* VCPU */
+
+} /* SkGmPhyRead */
+
+
+/******************************************************************************
+ *
+ * SkGmPhyWrite() - Write to GPHY register
+ *
+ * Description: writes a 16-bit word to GPHY through MDIO
+ *
+ * Returns:
+ * nothing
+ */
+void SkGmPhyWrite(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Port, /* Port Index (MAC_1 + n) */
+int PhyReg, /* Register Address (Offset) */
+SK_U16 Val) /* Value */
+{
+ SK_U16 Ctrl;
+ SK_GEPORT *pPrt;
+#ifdef VCPU
+ SK_U32 DWord;
+ u_long SimCyle;
+ u_long SimLowTime;
+
+ VCPUgetTime(&SimCyle, &SimLowTime);
+ VCPUprintf(0, "SkGmPhyWrite(Reg=%u, Val=0x%04x), SimCyle=%u, SimLowTime=%u\n",
+ PhyReg, Val, SimCyle, SimLowTime);
+#endif /* VCPU */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* write the PHY register's value */
+ GM_OUT16(IoC, Port, GM_SMI_DATA, Val);
+
+ /* set PHY-Register offset and 'Write' OpCode (= 0) */
+ Val = GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | GM_SMI_CT_REG_AD(PhyReg);
+
+ GM_OUT16(IoC, Port, GM_SMI_CTRL, Val);
+
+ GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
+
+ /* additional check for MDC/MDIO activity */
+ if ((Ctrl & GM_SMI_CT_BUSY) == 0) {
+ return;
+ }
+
+ Val |= GM_SMI_CT_BUSY;
+
+ do {
+#ifdef VCPU
+ /* read Timer value */
+ SK_IN32(IoC, B2_TI_VAL, &DWord);
+
+ VCPUwaitTime(1000);
+#endif /* VCPU */
+
+ GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
+
+ /* wait until 'Busy' is cleared */
+ } while (Ctrl == Val);
+
+#ifdef VCPU
+ VCPUgetTime(&SimCyle, &SimLowTime);
+ VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n",
+ SimCyle, SimLowTime);
+#endif /* VCPU */
+
+} /* SkGmPhyWrite */
+#endif /* YUKON */
+
+
+#ifdef SK_DIAG
+/******************************************************************************
+ *
+ * SkGePhyRead() - Read from PHY register
+ *
+ * Description: calls a read PHY routine dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkGePhyRead(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Port, /* Port Index (MAC_1 + n) */
+int PhyReg, /* Register Address (Offset) */
+SK_U16 *pVal) /* Pointer to Value */
+{
+ void (*r_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 *pVal);
+
+ if (pAC->GIni.GIGenesis) {
+ r_func = SkXmPhyRead;
+ }
+ else {
+ r_func = SkGmPhyRead;
+ }
+
+ r_func(pAC, IoC, Port, PhyReg, pVal);
+} /* SkGePhyRead */
+
+
+/******************************************************************************
+ *
+ * SkGePhyWrite() - Write to PHY register
+ *
+ * Description: calls a write PHY routine dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkGePhyWrite(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* I/O Context */
+int Port, /* Port Index (MAC_1 + n) */
+int PhyReg, /* Register Address (Offset) */
+SK_U16 Val) /* Value */
+{
+ void (*w_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 Val);
+
+ if (pAC->GIni.GIGenesis) {
+ w_func = SkXmPhyWrite;
+ }
+ else {
+ w_func = SkGmPhyWrite;
+ }
+
+ w_func(pAC, IoC, Port, PhyReg, Val);
+} /* SkGePhyWrite */
+#endif /* SK_DIAG */
+
+
+/******************************************************************************
+ *
+ * SkMacPromiscMode() - Enable / Disable Promiscuous Mode
+ *
+ * Description:
+ * enables / disables promiscuous mode by setting Mode Register (XMAC) or
+ * Receive Control Register (GMAC) dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacPromiscMode(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL Enable) /* Enable / Disable */
+{
+#ifdef YUKON
+ SK_U16 RcReg;
+#endif
+#ifdef GENESIS
+ SK_U32 MdReg;
+#endif
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ XM_IN32(IoC, Port, XM_MODE, &MdReg);
+ /* enable or disable promiscuous mode */
+ if (Enable) {
+ MdReg |= XM_MD_ENA_PROM;
+ }
+ else {
+ MdReg &= ~XM_MD_ENA_PROM;
+ }
+ /* setup Mode Register */
+ XM_OUT32(IoC, Port, XM_MODE, MdReg);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg);
+
+ /* enable or disable unicast and multicast filtering */
+ if (Enable) {
+ RcReg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+ }
+ else {
+ RcReg |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+ }
+ /* setup Receive Control Register */
+ GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg);
+ }
+#endif /* YUKON */
+
+} /* SkMacPromiscMode*/
+
+
+/******************************************************************************
+ *
+ * SkMacHashing() - Enable / Disable Hashing
+ *
+ * Description:
+ * enables / disables hashing by setting Mode Register (XMAC) or
+ * Receive Control Register (GMAC) dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacHashing(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL Enable) /* Enable / Disable */
+{
+#ifdef YUKON
+ SK_U16 RcReg;
+#endif
+#ifdef GENESIS
+ SK_U32 MdReg;
+#endif
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ XM_IN32(IoC, Port, XM_MODE, &MdReg);
+ /* enable or disable hashing */
+ if (Enable) {
+ MdReg |= XM_MD_ENA_HASH;
+ }
+ else {
+ MdReg &= ~XM_MD_ENA_HASH;
+ }
+ /* setup Mode Register */
+ XM_OUT32(IoC, Port, XM_MODE, MdReg);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg);
+
+ /* enable or disable multicast filtering */
+ if (Enable) {
+ RcReg |= GM_RXCR_MCF_ENA;
+ }
+ else {
+ RcReg &= ~GM_RXCR_MCF_ENA;
+ }
+ /* setup Receive Control Register */
+ GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg);
+ }
+#endif /* YUKON */
+
+} /* SkMacHashing*/
+
+
+#ifdef SK_DIAG
+/******************************************************************************
+ *
+ * SkXmSetRxCmd() - Modify the value of the XMAC's Rx Command Register
+ *
+ * Description:
+ * The features
+ * - FCS stripping, SK_STRIP_FCS_ON/OFF
+ * - pad byte stripping, SK_STRIP_PAD_ON/OFF
+ * - don't set XMR_FS_ERR in status SK_LENERR_OK_ON/OFF
+ * for inrange length error frames
+ * - don't set XMR_FS_ERR in status SK_BIG_PK_OK_ON/OFF
+ * for frames > 1514 bytes
+ * - enable Rx of own packets SK_SELF_RX_ON/OFF
+ *
+ * for incoming packets may be enabled/disabled by this function.
+ * Additional modes may be added later.
+ * Multiple modes can be enabled/disabled at the same time.
+ * The new configuration is written to the Rx Command register immediately.
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmSetRxCmd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF,
+ SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */
+{
+ SK_U16 OldRxCmd;
+ SK_U16 RxCmd;
+
+ XM_IN16(IoC, Port, XM_RX_CMD, &OldRxCmd);
+
+ RxCmd = OldRxCmd;
+
+ switch (Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) {
+ case SK_STRIP_FCS_ON:
+ RxCmd |= XM_RX_STRIP_FCS;
+ break;
+ case SK_STRIP_FCS_OFF:
+ RxCmd &= ~XM_RX_STRIP_FCS;
+ break;
+ }
+
+ switch (Mode & (SK_STRIP_PAD_ON | SK_STRIP_PAD_OFF)) {
+ case SK_STRIP_PAD_ON:
+ RxCmd |= XM_RX_STRIP_PAD;
+ break;
+ case SK_STRIP_PAD_OFF:
+ RxCmd &= ~XM_RX_STRIP_PAD;
+ break;
+ }
+
+ switch (Mode & (SK_LENERR_OK_ON | SK_LENERR_OK_OFF)) {
+ case SK_LENERR_OK_ON:
+ RxCmd |= XM_RX_LENERR_OK;
+ break;
+ case SK_LENERR_OK_OFF:
+ RxCmd &= ~XM_RX_LENERR_OK;
+ break;
+ }
+
+ switch (Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) {
+ case SK_BIG_PK_OK_ON:
+ RxCmd |= XM_RX_BIG_PK_OK;
+ break;
+ case SK_BIG_PK_OK_OFF:
+ RxCmd &= ~XM_RX_BIG_PK_OK;
+ break;
+ }
+
+ switch (Mode & (SK_SELF_RX_ON | SK_SELF_RX_OFF)) {
+ case SK_SELF_RX_ON:
+ RxCmd |= XM_RX_SELF_RX;
+ break;
+ case SK_SELF_RX_OFF:
+ RxCmd &= ~XM_RX_SELF_RX;
+ break;
+ }
+
+ /* Write the new mode to the Rx command register if required */
+ if (OldRxCmd != RxCmd) {
+ XM_OUT16(IoC, Port, XM_RX_CMD, RxCmd);
+ }
+} /* SkXmSetRxCmd */
+
+
+/******************************************************************************
+ *
+ * SkGmSetRxCmd() - Modify the value of the GMAC's Rx Control Register
+ *
+ * Description:
+ * The features
+ * - FCS (CRC) stripping, SK_STRIP_FCS_ON/OFF
+ * - don't set GMR_FS_LONG_ERR SK_BIG_PK_OK_ON/OFF
+ * for frames > 1514 bytes
+ * - enable Rx of own packets SK_SELF_RX_ON/OFF
+ *
+ * for incoming packets may be enabled/disabled by this function.
+ * Additional modes may be added later.
+ * Multiple modes can be enabled/disabled at the same time.
+ * The new configuration is written to the Rx Command register immediately.
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGmSetRxCmd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF,
+ SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */
+{
+ SK_U16 OldRxCmd;
+ SK_U16 RxCmd;
+
+ if ((Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) != 0) {
+
+ GM_IN16(IoC, Port, GM_RX_CTRL, &OldRxCmd);
+
+ RxCmd = OldRxCmd;
+
+ if ((Mode & SK_STRIP_FCS_ON) != 0) {
+ RxCmd |= GM_RXCR_CRC_DIS;
+ }
+ else {
+ RxCmd &= ~GM_RXCR_CRC_DIS;
+ }
+ /* Write the new mode to the Rx control register if required */
+ if (OldRxCmd != RxCmd) {
+ GM_OUT16(IoC, Port, GM_RX_CTRL, RxCmd);
+ }
+ }
+
+ if ((Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) != 0) {
+
+ GM_IN16(IoC, Port, GM_SERIAL_MODE, &OldRxCmd);
+
+ RxCmd = OldRxCmd;
+
+ if ((Mode & SK_BIG_PK_OK_ON) != 0) {
+ RxCmd |= GM_SMOD_JUMBO_ENA;
+ }
+ else {
+ RxCmd &= ~GM_SMOD_JUMBO_ENA;
+ }
+ /* Write the new mode to the Rx control register if required */
+ if (OldRxCmd != RxCmd) {
+ GM_OUT16(IoC, Port, GM_SERIAL_MODE, RxCmd);
+ }
+ }
+} /* SkGmSetRxCmd */
+
+
+/******************************************************************************
+ *
+ * SkMacSetRxCmd() - Modify the value of the MAC's Rx Control Register
+ *
+ * Description: modifies the MAC's Rx Control reg. dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacSetRxCmd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+int Mode) /* Rx Mode */
+{
+ if (pAC->GIni.GIGenesis) {
+
+ SkXmSetRxCmd(pAC, IoC, Port, Mode);
+ }
+ else {
+
+ SkGmSetRxCmd(pAC, IoC, Port, Mode);
+ }
+
+} /* SkMacSetRxCmd */
+
+
+/******************************************************************************
+ *
+ * SkMacCrcGener() - Enable / Disable CRC Generation
+ *
+ * Description: enables / disables CRC generation dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacCrcGener(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL Enable) /* Enable / Disable */
+{
+ SK_U16 Word;
+
+ if (pAC->GIni.GIGenesis) {
+
+ XM_IN16(IoC, Port, XM_TX_CMD, &Word);
+
+ if (Enable) {
+ Word &= ~XM_TX_NO_CRC;
+ }
+ else {
+ Word |= XM_TX_NO_CRC;
+ }
+ /* setup Tx Command Register */
+ XM_OUT16(IoC, Port, XM_TX_CMD, Word);
+ }
+ else {
+
+ GM_IN16(IoC, Port, GM_TX_CTRL, &Word);
+
+ if (Enable) {
+ Word &= ~GM_TXCR_CRC_DIS;
+ }
+ else {
+ Word |= GM_TXCR_CRC_DIS;
+ }
+ /* setup Tx Control Register */
+ GM_OUT16(IoC, Port, GM_TX_CTRL, Word);
+ }
+
+} /* SkMacCrcGener*/
+
+#endif /* SK_DIAG */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmClrExactAddr() - Clear Exact Match Address Registers
+ *
+ * Description:
+ * All Exact Match Address registers of the XMAC 'Port' will be
+ * cleared starting with 'StartNum' up to (and including) the
+ * Exact Match address number of 'StopNum'.
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmClrExactAddr(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+int StartNum, /* Begin with this Address Register Index (0..15) */
+int StopNum) /* Stop after finished with this Register Idx (0..15) */
+{
+ int i;
+ SK_U16 ZeroAddr[3] = {0x0000, 0x0000, 0x0000};
+
+ if ((unsigned)StartNum > 15 || (unsigned)StopNum > 15 ||
+ StartNum > StopNum) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E001, SKERR_HWI_E001MSG);
+ return;
+ }
+
+ for (i = StartNum; i <= StopNum; i++) {
+ XM_OUTADDR(IoC, Port, XM_EXM(i), &ZeroAddr[0]);
+ }
+} /* SkXmClrExactAddr */
+#endif /* GENESIS */
+
+
+/******************************************************************************
+ *
+ * SkMacFlushTxFifo() - Flush the MAC's transmit FIFO
+ *
+ * Description:
+ * Flush the transmit FIFO of the MAC specified by the index 'Port'
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacFlushTxFifo(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+#ifdef GENESIS
+ SK_U32 MdReg;
+
+ if (pAC->GIni.GIGenesis) {
+
+ XM_IN32(IoC, Port, XM_MODE, &MdReg);
+
+ XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FTF);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* no way to flush the FIFO we have to issue a reset */
+ /* TBD */
+ }
+#endif /* YUKON */
+
+} /* SkMacFlushTxFifo */
+
+
+/******************************************************************************
+ *
+ * SkMacFlushRxFifo() - Flush the MAC's receive FIFO
+ *
+ * Description:
+ * Flush the receive FIFO of the MAC specified by the index 'Port'
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacFlushRxFifo(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+#ifdef GENESIS
+ SK_U32 MdReg;
+
+ if (pAC->GIni.GIGenesis) {
+
+ XM_IN32(IoC, Port, XM_MODE, &MdReg);
+
+ XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FRF);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* no way to flush the FIFO we have to issue a reset */
+ /* TBD */
+ }
+#endif /* YUKON */
+
+} /* SkMacFlushRxFifo */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmSoftRst() - Do a XMAC software reset
+ *
+ * Description:
+ * The PHY registers should not be destroyed during this
+ * kind of software reset. Therefore the XMAC Software Reset
+ * (XM_GP_RES_MAC bit in XM_GP_PORT) must not be used!
+ *
+ * The software reset is done by
+ * - disabling the Rx and Tx state machine,
+ * - resetting the statistics module,
+ * - clear all other significant XMAC Mode,
+ * Command, and Control Registers
+ * - clearing the Hash Register and the
+ * Exact Match Address registers, and
+ * - flushing the XMAC's Rx and Tx FIFOs.
+ *
+ * Note:
+ * Another requirement when stopping the XMAC is to
+ * avoid sending corrupted frames on the network.
+ * Disabling the Tx state machine will NOT interrupt
+ * the currently transmitted frame. But we must take care
+ * that the Tx FIFO is cleared AFTER the current frame
+ * is complete sent to the network.
+ *
+ * It takes about 12ns to send a frame with 1538 bytes.
+ * One PCI clock goes at least 15ns (66MHz). Therefore
+ * after reading XM_GP_PORT back, we are sure that the
+ * transmitter is disabled AND idle. And this means
+ * we may flush the transmit FIFO now.
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmSoftRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U16 ZeroAddr[4] = {0x0000, 0x0000, 0x0000, 0x0000};
+
+ /* reset the statistics module */
+ XM_OUT32(IoC, Port, XM_GP_PORT, XM_GP_RES_STAT);
+
+ /* disable all XMAC IRQs */
+ XM_OUT16(IoC, Port, XM_IMSK, 0xffff);
+
+ XM_OUT32(IoC, Port, XM_MODE, 0); /* clear Mode Reg */
+
+ XM_OUT16(IoC, Port, XM_TX_CMD, 0); /* reset TX CMD Reg */
+ XM_OUT16(IoC, Port, XM_RX_CMD, 0); /* reset RX CMD Reg */
+
+ /* disable all PHY IRQs */
+ switch (pAC->GIni.GP[Port].PhyType) {
+ case SK_PHY_BCOM:
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff);
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0);
+ break;
+ case SK_PHY_NAT:
+ /* todo: National
+ SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */
+ break;
+#endif /* OTHER_PHY */
+ }
+
+ /* clear the Hash Register */
+ XM_OUTHASH(IoC, Port, XM_HSM, &ZeroAddr);
+
+ /* clear the Exact Match Address registers */
+ SkXmClrExactAddr(pAC, IoC, Port, 0, 15);
+
+ /* clear the Source Check Address registers */
+ XM_OUTHASH(IoC, Port, XM_SRC_CHK, &ZeroAddr);
+
+} /* SkXmSoftRst */
+
+
+/******************************************************************************
+ *
+ * SkXmHardRst() - Do a XMAC hardware reset
+ *
+ * Description:
+ * The XMAC of the specified 'Port' and all connected devices
+ * (PHY and SERDES) will receive a reset signal on its *Reset pins.
+ * External PHYs must be reset by clearing a bit in the GPIO register
+ * (Timing requirements: Broadcom: 400ns, Level One: none, National: 80ns).
+ *
+ * ATTENTION:
+ * It is absolutely necessary to reset the SW_RST Bit first
+ * before calling this function.
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmHardRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U32 Reg;
+ int i;
+ int TOut;
+ SK_U16 Word;
+
+ for (i = 0; i < 4; i++) {
+ /* TX_MFF_CTRL1 has 32 bits, but only the lowest 16 bits are used */
+ SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+
+ TOut = 0;
+ do {
+ if (TOut++ > 10000) {
+ /*
+ * Adapter seems to be in RESET state.
+ * Registers cannot be written.
+ */
+ return;
+ }
+
+ SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
+
+ SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &Word);
+
+ } while ((Word & MFF_SET_MAC_RST) == 0);
+ }
+
+ /* For external PHYs there must be special handling */
+ if (pAC->GIni.GP[Port].PhyType != SK_PHY_XMAC) {
+
+ SK_IN32(IoC, B2_GP_IO, &Reg);
+
+ if (Port == 0) {
+ Reg |= GP_DIR_0; /* set to output */
+ Reg &= ~GP_IO_0; /* set PHY reset (active low) */
+ }
+ else {
+ Reg |= GP_DIR_2; /* set to output */
+ Reg &= ~GP_IO_2; /* set PHY reset (active low) */
+ }
+ /* reset external PHY */
+ SK_OUT32(IoC, B2_GP_IO, Reg);
+
+ /* short delay */
+ SK_IN32(IoC, B2_GP_IO, &Reg);
+ }
+} /* SkXmHardRst */
+
+
+/******************************************************************************
+ *
+ * SkXmClearRst() - Release the PHY & XMAC reset
+ *
+ * Description:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmClearRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U32 DWord;
+
+ /* clear HW reset */
+ SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+
+ if (pAC->GIni.GP[Port].PhyType != SK_PHY_XMAC) {
+
+ SK_IN32(IoC, B2_GP_IO, &DWord);
+
+ if (Port == 0) {
+ DWord |= (GP_DIR_0 | GP_IO_0); /* set to output */
+ }
+ else {
+ DWord |= (GP_DIR_2 | GP_IO_2); /* set to output */
+ }
+ /* Clear PHY reset */
+ SK_OUT32(IoC, B2_GP_IO, DWord);
+
+ /* Enable GMII interface */
+ XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_GMII_MD);
+ }
+} /* SkXmClearRst */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGmSoftRst() - Do a GMAC software reset
+ *
+ * Description:
+ * The GPHY registers should not be destroyed during this
+ * kind of software reset.
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGmSoftRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U16 EmptyHash[4] = {0x0000, 0x0000, 0x0000, 0x0000};
+ SK_U16 RxCtrl;
+
+ /* reset the statistics module */
+
+ /* disable all GMAC IRQs */
+ SK_OUT8(IoC, GMAC_IRQ_MSK, 0);
+
+ /* disable all PHY IRQs */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0);
+
+ /* clear the Hash Register */
+ GM_OUTHASH(IoC, Port, GM_MC_ADDR_H1, EmptyHash);
+
+ /* Enable Unicast and Multicast filtering */
+ GM_IN16(IoC, Port, GM_RX_CTRL, &RxCtrl);
+
+ GM_OUT16(IoC, Port, GM_RX_CTRL,
+ (SK_U16)(RxCtrl | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA));
+
+} /* SkGmSoftRst */
+
+
+/******************************************************************************
+ *
+ * SkGmHardRst() - Do a GMAC hardware reset
+ *
+ * Description:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGmHardRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U32 DWord;
+
+ /* WA code for COMA mode */
+ if (pAC->GIni.GIYukonLite &&
+ pAC->GIni.GIChipRev == CHIP_REV_YU_LITE_A3) {
+
+ SK_IN32(IoC, B2_GP_IO, &DWord);
+
+ DWord |= (GP_DIR_9 | GP_IO_9);
+
+ /* set PHY reset */
+ SK_OUT32(IoC, B2_GP_IO, DWord);
+ }
+
+ /* set GPHY Control reset */
+ SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), GPC_RST_SET);
+
+ /* set GMAC Control reset */
+ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET);
+
+} /* SkGmHardRst */
+
+
+/******************************************************************************
+ *
+ * SkGmClearRst() - Release the GPHY & GMAC reset
+ *
+ * Description:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGmClearRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U32 DWord;
+
+#ifdef XXX
+ /* clear GMAC Control reset */
+ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_CLR);
+
+ /* set GMAC Control reset */
+ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET);
+#endif /* XXX */
+
+ /* WA code for COMA mode */
+ if (pAC->GIni.GIYukonLite &&
+ pAC->GIni.GIChipRev == CHIP_REV_YU_LITE_A3) {
+
+ SK_IN32(IoC, B2_GP_IO, &DWord);
+
+ DWord |= GP_DIR_9; /* set to output */
+ DWord &= ~GP_IO_9; /* clear PHY reset (active high) */
+
+ /* clear PHY reset */
+ SK_OUT32(IoC, B2_GP_IO, DWord);
+ }
+
+ /* set HWCFG_MODE */
+ DWord = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
+ GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE |
+ (pAC->GIni.GICopperType ? GPC_HWCFG_GMII_COP :
+ GPC_HWCFG_GMII_FIB);
+
+ /* set GPHY Control reset */
+ SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_SET);
+
+ /* release GPHY Control reset */
+ SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_CLR);
+
+#ifdef VCPU
+ VCpuWait(9000);
+#endif /* VCPU */
+
+ /* clear GMAC Control reset */
+ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
+
+#ifdef VCPU
+ VCpuWait(2000);
+
+ SK_IN32(IoC, MR_ADDR(Port, GPHY_CTRL), &DWord);
+
+ SK_IN32(IoC, B0_ISRC, &DWord);
+#endif /* VCPU */
+
+} /* SkGmClearRst */
+#endif /* YUKON */
+
+
+/******************************************************************************
+ *
+ * SkMacSoftRst() - Do a MAC software reset
+ *
+ * Description: calls a MAC software reset routine dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacSoftRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* disable receiver and transmitter */
+ SkMacRxTxDisable(pAC, IoC, Port);
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ SkXmSoftRst(pAC, IoC, Port);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ SkGmSoftRst(pAC, IoC, Port);
+ }
+#endif /* YUKON */
+
+ /* flush the MAC's Rx and Tx FIFOs */
+ SkMacFlushTxFifo(pAC, IoC, Port);
+
+ SkMacFlushRxFifo(pAC, IoC, Port);
+
+ pPrt->PState = SK_PRT_STOP;
+
+} /* SkMacSoftRst */
+
+
+/******************************************************************************
+ *
+ * SkMacHardRst() - Do a MAC hardware reset
+ *
+ * Description: calls a MAC hardware reset routine dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacHardRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ SkXmHardRst(pAC, IoC, Port);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ SkGmHardRst(pAC, IoC, Port);
+ }
+#endif /* YUKON */
+
+ pAC->GIni.GP[Port].PState = SK_PRT_RESET;
+
+} /* SkMacHardRst */
+
+
+/******************************************************************************
+ *
+ * SkMacClearRst() - Clear the MAC reset
+ *
+ * Description: calls a clear MAC reset routine dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacClearRst(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ SkXmClearRst(pAC, IoC, Port);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ SkGmClearRst(pAC, IoC, Port);
+ }
+#endif /* YUKON */
+
+} /* SkMacClearRst */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmInitMac() - Initialize the XMAC II
+ *
+ * Description:
+ * Initialize the XMAC of the specified port.
+ * The XMAC must be reset or stopped before calling this function.
+ *
+ * Note:
+ * The XMAC's Rx and Tx state machine is still disabled when returning.
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmInitMac(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ int i;
+ SK_U16 SWord;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PState == SK_PRT_STOP) {
+ /* Port State: SK_PRT_STOP */
+ /* Verify that the reset bit is cleared */
+ SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &SWord);
+
+ if ((SWord & MFF_SET_MAC_RST) != 0) {
+ /* PState does not match HW state */
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG);
+ /* Correct it */
+ pPrt->PState = SK_PRT_RESET;
+ }
+ }
+
+ if (pPrt->PState == SK_PRT_RESET) {
+
+ SkXmClearRst(pAC, IoC, Port);
+
+ if (pPrt->PhyType != SK_PHY_XMAC) {
+ /* read Id from external PHY (all have the same address) */
+ SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_ID1, &pPrt->PhyId1);
+
+ /*
+ * Optimize MDIO transfer by suppressing preamble.
+ * Must be done AFTER first access to BCOM chip.
+ */
+ XM_IN16(IoC, Port, XM_MMU_CMD, &SWord);
+
+ XM_OUT16(IoC, Port, XM_MMU_CMD, SWord | XM_MMU_NO_PRE);
+
+ if (pPrt->PhyId1 == PHY_BCOM_ID1_C0) {
+ /*
+ * Workaround BCOM Errata for the C0 type.
+ * Write magic patterns to reserved registers.
+ */
+ i = 0;
+ while (BcomRegC0Hack[i].PhyReg != 0) {
+ SkXmPhyWrite(pAC, IoC, Port, BcomRegC0Hack[i].PhyReg,
+ BcomRegC0Hack[i].PhyVal);
+ i++;
+ }
+ }
+ else if (pPrt->PhyId1 == PHY_BCOM_ID1_A1) {
+ /*
+ * Workaround BCOM Errata for the A1 type.
+ * Write magic patterns to reserved registers.
+ */
+ i = 0;
+ while (BcomRegA1Hack[i].PhyReg != 0) {
+ SkXmPhyWrite(pAC, IoC, Port, BcomRegA1Hack[i].PhyReg,
+ BcomRegA1Hack[i].PhyVal);
+ i++;
+ }
+ }
+
+ /*
+ * Workaround BCOM Errata (#10523) for all BCom PHYs.
+ * Disable Power Management after reset.
+ */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord);
+
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL,
+ (SK_U16)(SWord | PHY_B_AC_DIS_PM));
+
+ /* PHY LED initialization is done in SkGeXmitLED() */
+ }
+
+ /* Dummy read the Interrupt source register */
+ XM_IN16(IoC, Port, XM_ISRC, &SWord);
+
+ /*
+ * The auto-negotiation process starts immediately after
+ * clearing the reset. The auto-negotiation process should be
+ * started by the SIRQ, therefore stop it here immediately.
+ */
+ SkMacInitPhy(pAC, IoC, Port, SK_FALSE);
+
+#ifdef TEST_ONLY
+ /* temp. code: enable signal detect */
+ /* WARNING: do not override GMII setting above */
+ XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_COM4SIG);
+#endif
+ }
+
+ /*
+ * configure the XMACs Station Address
+ * B2_MAC_2 = xx xx xx xx xx x1 is programmed to XMAC A
+ * B2_MAC_3 = xx xx xx xx xx x2 is programmed to XMAC B
+ */
+ for (i = 0; i < 3; i++) {
+ /*
+ * The following 2 statements are together endianess
+ * independent. Remember this when changing.
+ */
+ SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord);
+
+ XM_OUT16(IoC, Port, (XM_SA + i * 2), SWord);
+ }
+
+ /* Tx Inter Packet Gap (XM_TX_IPG): use default */
+ /* Tx High Water Mark (XM_TX_HI_WM): use default */
+ /* Tx Low Water Mark (XM_TX_LO_WM): use default */
+ /* Host Request Threshold (XM_HT_THR): use default */
+ /* Rx Request Threshold (XM_RX_THR): use default */
+ /* Rx Low Water Mark (XM_RX_LO_WM): use default */
+
+ /* configure Rx High Water Mark (XM_RX_HI_WM) */
+ XM_OUT16(IoC, Port, XM_RX_HI_WM, SK_XM_RX_HI_WM);
+
+ /* Configure Tx Request Threshold */
+ SWord = SK_XM_THR_SL; /* for single port */
+
+ if (pAC->GIni.GIMacsFound > 1) {
+ switch (pAC->GIni.GIPortUsage) {
+ case SK_RED_LINK:
+ SWord = SK_XM_THR_REDL; /* redundant link */
+ break;
+ case SK_MUL_LINK:
+ SWord = SK_XM_THR_MULL; /* load balancing */
+ break;
+ case SK_JUMBO_LINK:
+ SWord = SK_XM_THR_JUMBO; /* jumbo frames */
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E014, SKERR_HWI_E014MSG);
+ break;
+ }
+ }
+ XM_OUT16(IoC, Port, XM_TX_THR, SWord);
+
+ /* setup register defaults for the Tx Command Register */
+ XM_OUT16(IoC, Port, XM_TX_CMD, XM_TX_AUTO_PAD);
+
+ /* setup register defaults for the Rx Command Register */
+ SWord = XM_RX_STRIP_FCS | XM_RX_LENERR_OK;
+
+ if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
+ SWord |= XM_RX_BIG_PK_OK;
+ }
+
+ if (pPrt->PLinkMode == SK_LMODE_HALF) {
+ /*
+ * If in manual half duplex mode the other side might be in
+ * full duplex mode, so ignore if a carrier extension is not seen
+ * on frames received
+ */
+ SWord |= XM_RX_DIS_CEXT;
+ }
+
+ XM_OUT16(IoC, Port, XM_RX_CMD, SWord);
+
+ /*
+ * setup register defaults for the Mode Register
+ * - Don't strip error frames to avoid Store & Forward
+ * on the Rx side.
+ * - Enable 'Check Station Address' bit
+ * - Enable 'Check Address Array' bit
+ */
+ XM_OUT32(IoC, Port, XM_MODE, XM_DEF_MODE);
+
+ /*
+ * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
+ * - Enable all bits excepting 'Octets Rx OK Low CntOv'
+ * and 'Octets Rx OK Hi Cnt Ov'.
+ */
+ XM_OUT32(IoC, Port, XM_RX_EV_MSK, XMR_DEF_MSK);
+
+ /*
+ * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
+ * - Enable all bits excepting 'Octets Tx OK Low CntOv'
+ * and 'Octets Tx OK Hi Cnt Ov'.
+ */
+ XM_OUT32(IoC, Port, XM_TX_EV_MSK, XMT_DEF_MSK);
+
+ /*
+ * Do NOT init XMAC interrupt mask here.
+ * All interrupts remain disable until link comes up!
+ */
+
+ /*
+ * Any additional configuration changes may be done now.
+ * The last action is to enable the Rx and Tx state machine.
+ * This should be done after the auto-negotiation process
+ * has been completed successfully.
+ */
+} /* SkXmInitMac */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGmInitMac() - Initialize the GMAC
+ *
+ * Description:
+ * Initialize the GMAC of the specified port.
+ * The GMAC must be reset or stopped before calling this function.
+ *
+ * Note:
+ * The GMAC's Rx and Tx state machine is still disabled when returning.
+ *
+ * Returns:
+ * nothing
+ */
+void SkGmInitMac(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ int i;
+ SK_U16 SWord;
+ SK_U32 DWord;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PState == SK_PRT_STOP) {
+ /* Port State: SK_PRT_STOP */
+ /* Verify that the reset bit is cleared */
+ SK_IN32(IoC, MR_ADDR(Port, GMAC_CTRL), &DWord);
+
+ if ((DWord & GMC_RST_SET) != 0) {
+ /* PState does not match HW state */
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG);
+ /* Correct it */
+ pPrt->PState = SK_PRT_RESET;
+ }
+ }
+
+ if (pPrt->PState == SK_PRT_RESET) {
+
+ SkGmHardRst(pAC, IoC, Port);
+
+ SkGmClearRst(pAC, IoC, Port);
+
+ /* Auto-negotiation ? */
+ if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
+ /* Auto-negotiation disabled */
+
+ /* get General Purpose Control */
+ GM_IN16(IoC, Port, GM_GP_CTRL, &SWord);
+
+ /* disable auto-update for speed, duplex and flow-control */
+ SWord |= GM_GPCR_AU_ALL_DIS;
+
+ /* setup General Purpose Control Register */
+ GM_OUT16(IoC, Port, GM_GP_CTRL, SWord);
+
+ SWord = GM_GPCR_AU_ALL_DIS;
+ }
+ else {
+ SWord = 0;
+ }
+
+ /* speed settings */
+ switch (pPrt->PLinkSpeed) {
+ case SK_LSPEED_AUTO:
+ case SK_LSPEED_1000MBPS:
+ SWord |= GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100;
+ break;
+ case SK_LSPEED_100MBPS:
+ SWord |= GM_GPCR_SPEED_100;
+ break;
+ case SK_LSPEED_10MBPS:
+ break;
+ }
+
+ /* duplex settings */
+ if (pPrt->PLinkMode != SK_LMODE_HALF) {
+ /* set full duplex */
+ SWord |= GM_GPCR_DUP_FULL;
+ }
+
+ /* flow-control settings */
+ switch (pPrt->PFlowCtrlMode) {
+ case SK_FLOW_MODE_NONE:
+ /* set Pause Off */
+ SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_OFF);
+ /* disable Tx & Rx flow-control */
+ SWord |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+ break;
+ case SK_FLOW_MODE_LOC_SEND:
+ /* disable Rx flow-control */
+ SWord |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+ break;
+ case SK_FLOW_MODE_SYMMETRIC:
+ case SK_FLOW_MODE_SYM_OR_REM:
+ /* enable Tx & Rx flow-control */
+ break;
+ }
+
+ /* setup General Purpose Control Register */
+ GM_OUT16(IoC, Port, GM_GP_CTRL, SWord);
+
+ /* dummy read the Interrupt Source Register */
+ SK_IN16(IoC, GMAC_IRQ_SRC, &SWord);
+
+#ifndef VCPU
+ /* read Id from PHY */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_ID1, &pPrt->PhyId1);
+
+ SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
+#endif /* VCPU */
+ }
+
+ (void)SkGmResetCounter(pAC, IoC, Port);
+
+ /* setup Transmit Control Register */
+ GM_OUT16(IoC, Port, GM_TX_CTRL, TX_COL_THR(pPrt->PMacColThres));
+
+ /* setup Receive Control Register */
+ GM_OUT16(IoC, Port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA |
+ GM_RXCR_CRC_DIS);
+
+ /* setup Transmit Flow Control Register */
+ GM_OUT16(IoC, Port, GM_TX_FLOW_CTRL, 0xffff);
+
+ /* setup Transmit Parameter Register */
+#ifdef VCPU
+ GM_IN16(IoC, Port, GM_TX_PARAM, &SWord);
+#endif /* VCPU */
+
+ SWord = TX_JAM_LEN_VAL(pPrt->PMacJamLen) |
+ TX_JAM_IPG_VAL(pPrt->PMacJamIpgVal) |
+ TX_IPG_JAM_DATA(pPrt->PMacJamIpgData);
+
+ GM_OUT16(IoC, Port, GM_TX_PARAM, SWord);
+
+ /* configure the Serial Mode Register */
+#ifdef VCPU
+ GM_IN16(IoC, Port, GM_SERIAL_MODE, &SWord);
+#endif /* VCPU */
+
+ SWord = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(pPrt->PMacIpgData);
+
+ if (pPrt->PMacLimit4) {
+ /* reset of collision counter after 4 consecutive collisions */
+ SWord |= GM_SMOD_LIMIT_4;
+ }
+
+ if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
+ /* enable jumbo mode (Max. Frame Length = 9018) */
+ SWord |= GM_SMOD_JUMBO_ENA;
+ }
+
+ GM_OUT16(IoC, Port, GM_SERIAL_MODE, SWord);
+
+ /*
+ * configure the GMACs Station Addresses
+ * in PROM you can find our addresses at:
+ * B2_MAC_1 = xx xx xx xx xx x0 virtual address
+ * B2_MAC_2 = xx xx xx xx xx x1 is programmed to GMAC A
+ * B2_MAC_3 = xx xx xx xx xx x2 is reserved for DualPort
+ */
+
+ for (i = 0; i < 3; i++) {
+ /*
+ * The following 2 statements are together endianess
+ * independent. Remember this when changing.
+ */
+ /* physical address: will be used for pause frames */
+ SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord);
+
+#ifdef WA_DEV_16
+ /* WA for deviation #16 */
+ if (pAC->GIni.GIChipId == CHIP_ID_YUKON && pAC->GIni.GIChipRev == 0) {
+ /* swap the address bytes */
+ SWord = ((SWord & 0xff00) >> 8) | ((SWord & 0x00ff) << 8);
+
+ /* write to register in reversed order */
+ GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + (2 - i) * 4), SWord);
+ }
+ else {
+ GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord);
+ }
+#else
+ GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord);
+#endif /* WA_DEV_16 */
+
+ /* virtual address: will be used for data */
+ SK_IN16(IoC, (B2_MAC_1 + Port * 8 + i * 2), &SWord);
+
+ GM_OUT16(IoC, Port, (GM_SRC_ADDR_2L + i * 4), SWord);
+
+ /* reset Multicast filtering Hash registers 1-3 */
+ GM_OUT16(IoC, Port, GM_MC_ADDR_H1 + 4*i, 0);
+ }
+
+ /* reset Multicast filtering Hash register 4 */
+ GM_OUT16(IoC, Port, GM_MC_ADDR_H4, 0);
+
+ /* enable interrupt mask for counter overflows */
+ GM_OUT16(IoC, Port, GM_TX_IRQ_MSK, 0);
+ GM_OUT16(IoC, Port, GM_RX_IRQ_MSK, 0);
+ GM_OUT16(IoC, Port, GM_TR_IRQ_MSK, 0);
+
+#if defined(SK_DIAG) || defined(DEBUG)
+ /* read General Purpose Status */
+ GM_IN16(IoC, Port, GM_GP_STAT, &SWord);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("MAC Stat Reg.=0x%04X\n", SWord));
+#endif /* SK_DIAG || DEBUG */
+
+#ifdef SK_DIAG
+ c_print("MAC Stat Reg=0x%04X\n", SWord);
+#endif /* SK_DIAG */
+
+} /* SkGmInitMac */
+#endif /* YUKON */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmInitDupMd() - Initialize the XMACs Duplex Mode
+ *
+ * Description:
+ * This function initializes the XMACs Duplex Mode.
+ * It should be called after successfully finishing
+ * the Auto-negotiation Process
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmInitDupMd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ switch (pAC->GIni.GP[Port].PLinkModeStatus) {
+ case SK_LMODE_STAT_AUTOHALF:
+ case SK_LMODE_STAT_HALF:
+ /* Configuration Actions for Half Duplex Mode */
+ /*
+ * XM_BURST = default value. We are probable not quick
+ * enough at the 'XMAC' bus to burst 8kB.
+ * The XMAC stops bursting if no transmit frames
+ * are available or the burst limit is exceeded.
+ */
+ /* XM_TX_RT_LIM = default value (15) */
+ /* XM_TX_STIME = default value (0xff = 4096 bit times) */
+ break;
+ case SK_LMODE_STAT_AUTOFULL:
+ case SK_LMODE_STAT_FULL:
+ /* Configuration Actions for Full Duplex Mode */
+ /*
+ * The duplex mode is configured by the PHY,
+ * therefore it seems to be that there is nothing
+ * to do here.
+ */
+ break;
+ case SK_LMODE_STAT_UNKNOWN:
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E007, SKERR_HWI_E007MSG);
+ break;
+ }
+} /* SkXmInitDupMd */
+
+
+/******************************************************************************
+ *
+ * SkXmInitPauseMd() - initialize the Pause Mode to be used for this port
+ *
+ * Description:
+ * This function initializes the Pause Mode which should
+ * be used for this port.
+ * It should be called after successfully finishing
+ * the Auto-negotiation Process
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmInitPauseMd(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U32 DWord;
+ SK_U16 Word;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
+
+ if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_NONE ||
+ pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) {
+
+ /* Disable Pause Frame Reception */
+ Word |= XM_MMU_IGN_PF;
+ }
+ else {
+ /*
+ * enabling pause frame reception is required for 1000BT
+ * because the XMAC is not reset if the link is going down
+ */
+ /* Enable Pause Frame Reception */
+ Word &= ~XM_MMU_IGN_PF;
+ }
+
+ XM_OUT16(IoC, Port, XM_MMU_CMD, Word);
+
+ XM_IN32(IoC, Port, XM_MODE, &DWord);
+
+ if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_SYMMETRIC ||
+ pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) {
+
+ /*
+ * Configure Pause Frame Generation
+ * Use internal and external Pause Frame Generation.
+ * Sending pause frames is edge triggered.
+ * Send a Pause frame with the maximum pause time if
+ * internal oder external FIFO full condition occurs.
+ * Send a zero pause time frame to re-start transmission.
+ */
+
+ /* XM_PAUSE_DA = '010000C28001' (default) */
+
+ /* XM_MAC_PTIME = 0xffff (maximum) */
+ /* remember this value is defined in big endian (!) */
+ XM_OUT16(IoC, Port, XM_MAC_PTIME, 0xffff);
+
+ /* Set Pause Mode in Mode Register */
+ DWord |= XM_PAUSE_MODE;
+
+ /* Set Pause Mode in MAC Rx FIFO */
+ SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
+ }
+ else {
+ /*
+ * disable pause frame generation is required for 1000BT
+ * because the XMAC is not reset if the link is going down
+ */
+ /* Disable Pause Mode in Mode Register */
+ DWord &= ~XM_PAUSE_MODE;
+
+ /* Disable Pause Mode in MAC Rx FIFO */
+ SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
+ }
+
+ XM_OUT32(IoC, Port, XM_MODE, DWord);
+} /* SkXmInitPauseMd*/
+
+
+/******************************************************************************
+ *
+ * SkXmInitPhyXmac() - Initialize the XMAC Phy registers
+ *
+ * Description: initializes all the XMACs Phy registers
+ *
+ * Note:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmInitPhyXmac(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 Ctrl;
+
+ pPrt = &pAC->GIni.GP[Port];
+ Ctrl = 0;
+
+ /* Auto-negotiation ? */
+ if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("InitPhyXmac: no auto-negotiation Port %d\n", Port));
+ /* Set DuplexMode in Config register */
+ if (pPrt->PLinkMode == SK_LMODE_FULL) {
+ Ctrl |= PHY_CT_DUP_MD;
+ }
+
+ /*
+ * Do NOT enable Auto-negotiation here. This would hold
+ * the link down because no IDLEs are transmitted
+ */
+ }
+ else {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("InitPhyXmac: with auto-negotiation Port %d\n", Port));
+ /* Set Auto-negotiation advertisement */
+
+ /* Set Full/half duplex capabilities */
+ switch (pPrt->PLinkMode) {
+ case SK_LMODE_AUTOHALF:
+ Ctrl |= PHY_X_AN_HD;
+ break;
+ case SK_LMODE_AUTOFULL:
+ Ctrl |= PHY_X_AN_FD;
+ break;
+ case SK_LMODE_AUTOBOTH:
+ Ctrl |= PHY_X_AN_FD | PHY_X_AN_HD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
+ SKERR_HWI_E015MSG);
+ }
+
+ /* Set Flow-control capabilities */
+ switch (pPrt->PFlowCtrlMode) {
+ case SK_FLOW_MODE_NONE:
+ Ctrl |= PHY_X_P_NO_PAUSE;
+ break;
+ case SK_FLOW_MODE_LOC_SEND:
+ Ctrl |= PHY_X_P_ASYM_MD;
+ break;
+ case SK_FLOW_MODE_SYMMETRIC:
+ Ctrl |= PHY_X_P_SYM_MD;
+ break;
+ case SK_FLOW_MODE_SYM_OR_REM:
+ Ctrl |= PHY_X_P_BOTH_MD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
+ SKERR_HWI_E016MSG);
+ }
+
+ /* Write AutoNeg Advertisement Register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_AUNE_ADV, Ctrl);
+
+ /* Restart Auto-negotiation */
+ Ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
+ }
+
+ if (DoLoop) {
+ /* Set the Phy Loopback bit, too */
+ Ctrl |= PHY_CT_LOOP;
+ }
+
+ /* Write to the Phy control register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_CTRL, Ctrl);
+} /* SkXmInitPhyXmac */
+
+
+/******************************************************************************
+ *
+ * SkXmInitPhyBcom() - Initialize the Broadcom Phy registers
+ *
+ * Description: initializes all the Broadcom Phy registers
+ *
+ * Note:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmInitPhyBcom(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 Ctrl1;
+ SK_U16 Ctrl2;
+ SK_U16 Ctrl3;
+ SK_U16 Ctrl4;
+ SK_U16 Ctrl5;
+
+ Ctrl1 = PHY_CT_SP1000;
+ Ctrl2 = 0;
+ Ctrl3 = PHY_SEL_TYPE;
+ Ctrl4 = PHY_B_PEC_EN_LTR;
+ Ctrl5 = PHY_B_AC_TX_TST;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* manually Master/Slave ? */
+ if (pPrt->PMSMode != SK_MS_MODE_AUTO) {
+ Ctrl2 |= PHY_B_1000C_MSE;
+
+ if (pPrt->PMSMode == SK_MS_MODE_MASTER) {
+ Ctrl2 |= PHY_B_1000C_MSC;
+ }
+ }
+ /* Auto-negotiation ? */
+ if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("InitPhyBcom: no auto-negotiation Port %d\n", Port));
+ /* Set DuplexMode in Config register */
+ if (pPrt->PLinkMode == SK_LMODE_FULL) {
+ Ctrl1 |= PHY_CT_DUP_MD;
+ }
+
+ /* Determine Master/Slave manually if not already done */
+ if (pPrt->PMSMode == SK_MS_MODE_AUTO) {
+ Ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */
+ }
+
+ /*
+ * Do NOT enable Auto-negotiation here. This would hold
+ * the link down because no IDLES are transmitted
+ */
+ }
+ else {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("InitPhyBcom: with auto-negotiation Port %d\n", Port));
+ /* Set Auto-negotiation advertisement */
+
+ /*
+ * Workaround BCOM Errata #1 for the C5 type.
+ * 1000Base-T Link Acquisition Failure in Slave Mode
+ * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
+ */
+ Ctrl2 |= PHY_B_1000C_RD;
+
+ /* Set Full/half duplex capabilities */
+ switch (pPrt->PLinkMode) {
+ case SK_LMODE_AUTOHALF:
+ Ctrl2 |= PHY_B_1000C_AHD;
+ break;
+ case SK_LMODE_AUTOFULL:
+ Ctrl2 |= PHY_B_1000C_AFD;
+ break;
+ case SK_LMODE_AUTOBOTH:
+ Ctrl2 |= PHY_B_1000C_AFD | PHY_B_1000C_AHD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
+ SKERR_HWI_E015MSG);
+ }
+
+ /* Set Flow-control capabilities */
+ switch (pPrt->PFlowCtrlMode) {
+ case SK_FLOW_MODE_NONE:
+ Ctrl3 |= PHY_B_P_NO_PAUSE;
+ break;
+ case SK_FLOW_MODE_LOC_SEND:
+ Ctrl3 |= PHY_B_P_ASYM_MD;
+ break;
+ case SK_FLOW_MODE_SYMMETRIC:
+ Ctrl3 |= PHY_B_P_SYM_MD;
+ break;
+ case SK_FLOW_MODE_SYM_OR_REM:
+ Ctrl3 |= PHY_B_P_BOTH_MD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
+ SKERR_HWI_E016MSG);
+ }
+
+ /* Restart Auto-negotiation */
+ Ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ }
+
+ /* Initialize LED register here? */
+ /* No. Please do it in SkDgXmitLed() (if required) and swap
+ init order of LEDs and XMAC. (MAl) */
+
+ /* Write 1000Base-T Control Register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, Ctrl2);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Set 1000B-T Ctrl Reg=0x%04X\n", Ctrl2));
+
+ /* Write AutoNeg Advertisement Register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, Ctrl3);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Set Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3));
+
+ if (DoLoop) {
+ /* Set the Phy Loopback bit, too */
+ Ctrl1 |= PHY_CT_LOOP;
+ }
+
+ if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
+ /* configure FIFO to high latency for transmission of ext. packets */
+ Ctrl4 |= PHY_B_PEC_HIGH_LA;
+
+ /* configure reception of extended packets */
+ Ctrl5 |= PHY_B_AC_LONG_PACK;
+
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, Ctrl5);
+ }
+
+ /* Configure LED Traffic Mode and Jumbo Frame usage if specified */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, Ctrl4);
+
+ /* Write to the Phy control register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, Ctrl1);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("PHY Control Reg=0x%04X\n", Ctrl1));
+} /* SkXmInitPhyBcom */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+#ifndef SK_SLIM
+/******************************************************************************
+ *
+ * SkGmEnterLowPowerMode()
+ *
+ * Description:
+ * This function sets the Marvell Alaska PHY to the low power mode
+ * given by parameter mode.
+ * The following low power modes are available:
+ *
+ * - Coma Mode (Deep Sleep):
+ * Power consumption: ~15 - 30 mW
+ * The PHY cannot wake up on its own.
+ *
+ * - IEEE 22.2.4.1.5 compatible power down mode
+ * Power consumption: ~240 mW
+ * The PHY cannot wake up on its own.
+ *
+ * - energy detect mode
+ * Power consumption: ~160 mW
+ * The PHY can wake up on its own by detecting activity
+ * on the CAT 5 cable.
+ *
+ * - energy detect plus mode
+ * Power consumption: ~150 mW
+ * The PHY can wake up on its own by detecting activity
+ * on the CAT 5 cable.
+ * Connected devices can be woken up by sending normal link
+ * pulses every one second.
+ *
+ * Note:
+ *
+ * Returns:
+ * 0: ok
+ * 1: error
+ */
+int SkGmEnterLowPowerMode(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (e.g. MAC_1) */
+SK_U8 Mode) /* low power mode */
+{
+ SK_U16 Word;
+ SK_U32 DWord;
+ SK_U8 LastMode;
+ int Ret = 0;
+
+ if (pAC->GIni.GIYukonLite &&
+ pAC->GIni.GIChipRev == CHIP_REV_YU_LITE_A3) {
+
+ /* save current power mode */
+ LastMode = pAC->GIni.GP[Port].PPhyPowerState;
+ pAC->GIni.GP[Port].PPhyPowerState = Mode;
+
+ switch (Mode) {
+ /* coma mode (deep sleep) */
+ case PHY_PM_DEEP_SLEEP:
+ /* setup General Purpose Control Register */
+ GM_OUT16(IoC, 0, GM_GP_CTRL, GM_GPCR_FL_PASS |
+ GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
+
+ /* apply COMA mode workaround */
+ SkGmPhyWrite(pAC, IoC, Port, 29, 0x001f);
+ SkGmPhyWrite(pAC, IoC, Port, 30, 0xfff3);
+
+ SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+
+ /* Set PHY to Coma Mode */
+ SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord | PCI_PHY_COMA);
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ break;
+
+ /* IEEE 22.2.4.1.5 compatible power down mode */
+ case PHY_PM_IEEE_POWER_DOWN:
+ /*
+ * - disable MAC 125 MHz clock
+ * - allow MAC power down
+ */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
+ Word |= PHY_M_PC_DIS_125CLK;
+ Word &= ~PHY_M_PC_MAC_POW_UP;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
+
+ /*
+ * register changes must be followed by a software
+ * reset to take effect
+ */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
+ Word |= PHY_CT_RESET;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
+
+ /* switch IEEE compatible power down mode on */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
+ Word |= PHY_CT_PDOWN;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
+ break;
+
+ /* energy detect and energy detect plus mode */
+ case PHY_PM_ENERGY_DETECT:
+ case PHY_PM_ENERGY_DETECT_PLUS:
+ /*
+ * - disable MAC 125 MHz clock
+ */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
+ Word |= PHY_M_PC_DIS_125CLK;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
+
+ /* activate energy detect mode 1 */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
+
+ /* energy detect mode */
+ if (Mode == PHY_PM_ENERGY_DETECT) {
+ Word |= PHY_M_PC_EN_DET;
+ }
+ /* energy detect plus mode */
+ else {
+ Word |= PHY_M_PC_EN_DET_PLUS;
+ }
+
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
+
+ /*
+ * reinitialize the PHY to force a software reset
+ * which is necessary after the register settings
+ * for the energy detect modes.
+ * Furthermore reinitialisation prevents that the
+ * PHY is running out of a stable state.
+ */
+ SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
+ break;
+
+ /* don't change current power mode */
+ default:
+ pAC->GIni.GP[Port].PPhyPowerState = LastMode;
+ Ret = 1;
+ break;
+ }
+ }
+ /* low power modes are not supported by this chip */
+ else {
+ Ret = 1;
+ }
+
+ return(Ret);
+
+} /* SkGmEnterLowPowerMode */
+
+/******************************************************************************
+ *
+ * SkGmLeaveLowPowerMode()
+ *
+ * Description:
+ * Leave the current low power mode and switch to normal mode
+ *
+ * Note:
+ *
+ * Returns:
+ * 0: ok
+ * 1: error
+ */
+int SkGmLeaveLowPowerMode(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (e.g. MAC_1) */
+{
+ SK_U32 DWord;
+ SK_U16 Word;
+ SK_U8 LastMode;
+ int Ret = 0;
+
+ if (pAC->GIni.GIYukonLite &&
+ pAC->GIni.GIChipRev == CHIP_REV_YU_LITE_A3) {
+
+ /* save current power mode */
+ LastMode = pAC->GIni.GP[Port].PPhyPowerState;
+ pAC->GIni.GP[Port].PPhyPowerState = PHY_PM_OPERATIONAL_MODE;
+
+ switch (LastMode) {
+ /* coma mode (deep sleep) */
+ case PHY_PM_DEEP_SLEEP:
+ SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+
+ /* Release PHY from Coma Mode */
+ SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord & ~PCI_PHY_COMA);
+
+ SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ SK_IN32(IoC, B2_GP_IO, &DWord);
+
+ /* set to output */
+ DWord |= (GP_DIR_9 | GP_IO_9);
+
+ /* set PHY reset */
+ SK_OUT32(IoC, B2_GP_IO, DWord);
+
+ DWord &= ~GP_IO_9; /* clear PHY reset (active high) */
+
+ /* clear PHY reset */
+ SK_OUT32(IoC, B2_GP_IO, DWord);
+ break;
+
+ /* IEEE 22.2.4.1.5 compatible power down mode */
+ case PHY_PM_IEEE_POWER_DOWN:
+ /*
+ * - enable MAC 125 MHz clock
+ * - set MAC power up
+ */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
+ Word &= ~PHY_M_PC_DIS_125CLK;
+ Word |= PHY_M_PC_MAC_POW_UP;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
+
+ /*
+ * register changes must be followed by a software
+ * reset to take effect
+ */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
+ Word |= PHY_CT_RESET;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
+
+ /* switch IEEE compatible power down mode off */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
+ Word &= ~PHY_CT_PDOWN;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
+ break;
+
+ /* energy detect and energy detect plus mode */
+ case PHY_PM_ENERGY_DETECT:
+ case PHY_PM_ENERGY_DETECT_PLUS:
+ /*
+ * - enable MAC 125 MHz clock
+ */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
+ Word &= ~PHY_M_PC_DIS_125CLK;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
+
+ /* disable energy detect mode */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
+ Word &= ~PHY_M_PC_EN_DET_MSK;
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
+
+ /*
+ * reinitialize the PHY to force a software reset
+ * which is necessary after the register settings
+ * for the energy detect modes.
+ * Furthermore reinitialisation prevents that the
+ * PHY is running out of a stable state.
+ */
+ SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
+ break;
+
+ /* don't change current power mode */
+ default:
+ pAC->GIni.GP[Port].PPhyPowerState = LastMode;
+ Ret = 1;
+ break;
+ }
+ }
+ /* low power modes are not supported by this chip */
+ else {
+ Ret = 1;
+ }
+
+ return(Ret);
+
+} /* SkGmLeaveLowPowerMode */
+#endif /* !SK_SLIM */
+
+
+/******************************************************************************
+ *
+ * SkGmInitPhyMarv() - Initialize the Marvell Phy registers
+ *
+ * Description: initializes all the Marvell Phy registers
+ *
+ * Note:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkGmInitPhyMarv(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 PhyCtrl;
+ SK_U16 C1000BaseT;
+ SK_U16 AutoNegAdv;
+ SK_U16 ExtPhyCtrl;
+ SK_U16 LedCtrl;
+ SK_BOOL AutoNeg;
+#if defined(SK_DIAG) || defined(DEBUG)
+ SK_U16 PhyStat;
+ SK_U16 PhyStat1;
+ SK_U16 PhySpecStat;
+#endif /* SK_DIAG || DEBUG */
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Auto-negotiation ? */
+ if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
+ AutoNeg = SK_FALSE;
+ }
+ else {
+ AutoNeg = SK_TRUE;
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("InitPhyMarv: Port %d, auto-negotiation %s\n",
+ Port, AutoNeg ? "ON" : "OFF"));
+
+#ifdef VCPU
+ VCPUprintf(0, "SkGmInitPhyMarv(), Port=%u, DoLoop=%u\n",
+ Port, DoLoop);
+#else /* VCPU */
+ if (DoLoop) {
+ /* Set 'MAC Power up'-bit, set Manual MDI configuration */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL,
+ PHY_M_PC_MAC_POW_UP);
+ }
+ else if (AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_AUTO) {
+ /* Read Ext. PHY Specific Control */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl);
+
+ ExtPhyCtrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+ PHY_M_EC_MAC_S_MSK);
+
+ ExtPhyCtrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ) |
+ PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, ExtPhyCtrl);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Set Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl));
+ }
+
+ /* Read PHY Control */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl);
+
+ if (!AutoNeg) {
+ /* Disable Auto-negotiation */
+ PhyCtrl &= ~PHY_CT_ANE;
+ }
+
+ PhyCtrl |= PHY_CT_RESET;
+ /* Assert software reset */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl);
+#endif /* VCPU */
+
+ PhyCtrl = 0 /* PHY_CT_COL_TST */;
+ C1000BaseT = 0;
+ AutoNegAdv = PHY_SEL_TYPE;
+
+ /* manually Master/Slave ? */
+ if (pPrt->PMSMode != SK_MS_MODE_AUTO) {
+ /* enable Manual Master/Slave */
+ C1000BaseT |= PHY_M_1000C_MSE;
+
+ if (pPrt->PMSMode == SK_MS_MODE_MASTER) {
+ C1000BaseT |= PHY_M_1000C_MSC; /* set it to Master */
+ }
+ }
+
+ /* Auto-negotiation ? */
+ if (!AutoNeg) {
+
+ if (pPrt->PLinkMode == SK_LMODE_FULL) {
+ /* Set Full Duplex Mode */
+ PhyCtrl |= PHY_CT_DUP_MD;
+ }
+
+ /* Set Master/Slave manually if not already done */
+ if (pPrt->PMSMode == SK_MS_MODE_AUTO) {
+ C1000BaseT |= PHY_M_1000C_MSE; /* set it to Slave */
+ }
+
+ /* Set Speed */
+ switch (pPrt->PLinkSpeed) {
+ case SK_LSPEED_AUTO:
+ case SK_LSPEED_1000MBPS:
+ PhyCtrl |= PHY_CT_SP1000;
+ break;
+ case SK_LSPEED_100MBPS:
+ PhyCtrl |= PHY_CT_SP100;
+ break;
+ case SK_LSPEED_10MBPS:
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019,
+ SKERR_HWI_E019MSG);
+ }
+
+ if (!DoLoop) {
+ PhyCtrl |= PHY_CT_RESET;
+ }
+ }
+ else {
+ /* Set Auto-negotiation advertisement */
+
+ if (pAC->GIni.GICopperType) {
+ /* Set Speed capabilities */
+ switch (pPrt->PLinkSpeed) {
+ case SK_LSPEED_AUTO:
+ C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD;
+ AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD |
+ PHY_M_AN_10_FD | PHY_M_AN_10_HD;
+ break;
+ case SK_LSPEED_1000MBPS:
+ C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD;
+ break;
+ case SK_LSPEED_100MBPS:
+ AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD |
+ /* advertise 10Base-T also */
+ PHY_M_AN_10_FD | PHY_M_AN_10_HD;
+ break;
+ case SK_LSPEED_10MBPS:
+ AutoNegAdv |= PHY_M_AN_10_FD | PHY_M_AN_10_HD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019,
+ SKERR_HWI_E019MSG);
+ }
+
+ /* Set Full/half duplex capabilities */
+ switch (pPrt->PLinkMode) {
+ case SK_LMODE_AUTOHALF:
+ C1000BaseT &= ~PHY_M_1000C_AFD;
+ AutoNegAdv &= ~(PHY_M_AN_100_FD | PHY_M_AN_10_FD);
+ break;
+ case SK_LMODE_AUTOFULL:
+ C1000BaseT &= ~PHY_M_1000C_AHD;
+ AutoNegAdv &= ~(PHY_M_AN_100_HD | PHY_M_AN_10_HD);
+ break;
+ case SK_LMODE_AUTOBOTH:
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
+ SKERR_HWI_E015MSG);
+ }
+
+ /* Set Flow-control capabilities */
+ switch (pPrt->PFlowCtrlMode) {
+ case SK_FLOW_MODE_NONE:
+ AutoNegAdv |= PHY_B_P_NO_PAUSE;
+ break;
+ case SK_FLOW_MODE_LOC_SEND:
+ AutoNegAdv |= PHY_B_P_ASYM_MD;
+ break;
+ case SK_FLOW_MODE_SYMMETRIC:
+ AutoNegAdv |= PHY_B_P_SYM_MD;
+ break;
+ case SK_FLOW_MODE_SYM_OR_REM:
+ AutoNegAdv |= PHY_B_P_BOTH_MD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
+ SKERR_HWI_E016MSG);
+ }
+ }
+ else { /* special defines for FIBER (88E1011S only) */
+
+ /* Set Full/half duplex capabilities */
+ switch (pPrt->PLinkMode) {
+ case SK_LMODE_AUTOHALF:
+ AutoNegAdv |= PHY_M_AN_1000X_AHD;
+ break;
+ case SK_LMODE_AUTOFULL:
+ AutoNegAdv |= PHY_M_AN_1000X_AFD;
+ break;
+ case SK_LMODE_AUTOBOTH:
+ AutoNegAdv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
+ SKERR_HWI_E015MSG);
+ }
+
+ /* Set Flow-control capabilities */
+ switch (pPrt->PFlowCtrlMode) {
+ case SK_FLOW_MODE_NONE:
+ AutoNegAdv |= PHY_M_P_NO_PAUSE_X;
+ break;
+ case SK_FLOW_MODE_LOC_SEND:
+ AutoNegAdv |= PHY_M_P_ASYM_MD_X;
+ break;
+ case SK_FLOW_MODE_SYMMETRIC:
+ AutoNegAdv |= PHY_M_P_SYM_MD_X;
+ break;
+ case SK_FLOW_MODE_SYM_OR_REM:
+ AutoNegAdv |= PHY_M_P_BOTH_MD_X;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
+ SKERR_HWI_E016MSG);
+ }
+ }
+
+ if (!DoLoop) {
+ /* Restart Auto-negotiation */
+ PhyCtrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ }
+ }
+
+#ifdef VCPU
+ /*
+ * E-mail from Gu Lin (08-03-2002):
+ */
+
+ /* Program PHY register 30 as 16'h0708 for simulation speed up */
+ SkGmPhyWrite(pAC, IoC, Port, 30, 0x0700 /* 0x0708 */);
+
+ VCpuWait(2000);
+
+#else /* VCPU */
+
+ /* Write 1000Base-T Control Register */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_1000T_CTRL, C1000BaseT);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Set 1000B-T Ctrl =0x%04X\n", C1000BaseT));
+
+ /* Write AutoNeg Advertisement Register */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV, AutoNegAdv);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Set Auto-Neg.Adv.=0x%04X\n", AutoNegAdv));
+#endif /* VCPU */
+
+ if (DoLoop) {
+ /* Set the PHY Loopback bit */
+ PhyCtrl |= PHY_CT_LOOP;
+
+#ifdef XXX
+ /* Program PHY register 16 as 16'h0400 to force link good */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_FL_GOOD);
+#endif /* XXX */
+
+#ifndef VCPU
+ if (pPrt->PLinkSpeed != SK_LSPEED_AUTO) {
+ /* Write Ext. PHY Specific Control */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL,
+ (SK_U16)((pPrt->PLinkSpeed + 2) << 4));
+ }
+#endif /* VCPU */
+ }
+#ifdef TEST_ONLY
+ else if (pPrt->PLinkSpeed == SK_LSPEED_10MBPS) {
+ /* Write PHY Specific Control */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL,
+ PHY_M_PC_EN_DET_MSK);
+ }
+#endif
+
+ /* Write to the PHY Control register */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Set PHY Ctrl Reg.=0x%04X\n", PhyCtrl));
+
+#ifdef VCPU
+ VCpuWait(2000);
+#else
+
+ LedCtrl = PHY_M_LED_PULS_DUR(PULS_170MS) | PHY_M_LED_BLINK_RT(BLINK_84MS);
+
+ if ((pAC->GIni.GILedBlinkCtrl & SK_ACT_LED_BLINK) != 0) {
+ LedCtrl |= PHY_M_LEDC_RX_CTRL | PHY_M_LEDC_TX_CTRL;
+ }
+
+ if ((pAC->GIni.GILedBlinkCtrl & SK_DUP_LED_NORMAL) != 0) {
+ LedCtrl |= PHY_M_LEDC_DP_CTRL;
+ }
+
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_CTRL, LedCtrl);
+
+ if ((pAC->GIni.GILedBlinkCtrl & SK_LED_LINK100_ON) != 0) {
+ /* only in forced 100 Mbps mode */
+ if (!AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_100MBPS) {
+
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_100(MO_LED_ON));
+ }
+ }
+
+#ifdef SK_DIAG
+ c_print("Set PHY Ctrl=0x%04X\n", PhyCtrl);
+ c_print("Set 1000 B-T=0x%04X\n", C1000BaseT);
+ c_print("Set Auto-Neg=0x%04X\n", AutoNegAdv);
+ c_print("Set Ext Ctrl=0x%04X\n", ExtPhyCtrl);
+#endif /* SK_DIAG */
+
+#if defined(SK_DIAG) || defined(DEBUG)
+ /* Read PHY Control */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("PHY Ctrl Reg.=0x%04X\n", PhyCtrl));
+
+ /* Read 1000Base-T Control Register */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_CTRL, &C1000BaseT);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("1000B-T Ctrl =0x%04X\n", C1000BaseT));
+
+ /* Read AutoNeg Advertisement Register */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &AutoNegAdv);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Auto-Neg.Adv.=0x%04X\n", AutoNegAdv));
+
+ /* Read Ext. PHY Specific Control */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl));
+
+ /* Read PHY Status */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("PHY Stat Reg.=0x%04X\n", PhyStat));
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat1);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("PHY Stat Reg.=0x%04X\n", PhyStat1));
+
+ /* Read PHY Specific Status */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("PHY Spec Stat=0x%04X\n", PhySpecStat));
+#endif /* SK_DIAG || DEBUG */
+
+#ifdef SK_DIAG
+ c_print("PHY Ctrl Reg=0x%04X\n", PhyCtrl);
+ c_print("PHY 1000 Reg=0x%04X\n", C1000BaseT);
+ c_print("PHY AnAd Reg=0x%04X\n", AutoNegAdv);
+ c_print("Ext Ctrl Reg=0x%04X\n", ExtPhyCtrl);
+ c_print("PHY Stat Reg=0x%04X\n", PhyStat);
+ c_print("PHY Stat Reg=0x%04X\n", PhyStat1);
+ c_print("PHY Spec Reg=0x%04X\n", PhySpecStat);
+#endif /* SK_DIAG */
+
+#endif /* VCPU */
+
+} /* SkGmInitPhyMarv */
+#endif /* YUKON */
+
+
+#ifdef OTHER_PHY
+/******************************************************************************
+ *
+ * SkXmInitPhyLone() - Initialize the Level One Phy registers
+ *
+ * Description: initializes all the Level One Phy registers
+ *
+ * Note:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmInitPhyLone(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 Ctrl1;
+ SK_U16 Ctrl2;
+ SK_U16 Ctrl3;
+
+ Ctrl1 = PHY_CT_SP1000;
+ Ctrl2 = 0;
+ Ctrl3 = PHY_SEL_TYPE;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* manually Master/Slave ? */
+ if (pPrt->PMSMode != SK_MS_MODE_AUTO) {
+ Ctrl2 |= PHY_L_1000C_MSE;
+
+ if (pPrt->PMSMode == SK_MS_MODE_MASTER) {
+ Ctrl2 |= PHY_L_1000C_MSC;
+ }
+ }
+ /* Auto-negotiation ? */
+ if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
+ /*
+ * level one spec say: "1000 Mbps: manual mode not allowed"
+ * but lets see what happens...
+ */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("InitPhyLone: no auto-negotiation Port %d\n", Port));
+ /* Set DuplexMode in Config register */
+ if (pPrt->PLinkMode == SK_LMODE_FULL) {
+ Ctrl1 |= PHY_CT_DUP_MD;
+ }
+
+ /* Determine Master/Slave manually if not already done */
+ if (pPrt->PMSMode == SK_MS_MODE_AUTO) {
+ Ctrl2 |= PHY_L_1000C_MSE; /* set it to Slave */
+ }
+
+ /*
+ * Do NOT enable Auto-negotiation here. This would hold
+ * the link down because no IDLES are transmitted
+ */
+ }
+ else {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("InitPhyLone: with auto-negotiation Port %d\n", Port));
+ /* Set Auto-negotiation advertisement */
+
+ /* Set Full/half duplex capabilities */
+ switch (pPrt->PLinkMode) {
+ case SK_LMODE_AUTOHALF:
+ Ctrl2 |= PHY_L_1000C_AHD;
+ break;
+ case SK_LMODE_AUTOFULL:
+ Ctrl2 |= PHY_L_1000C_AFD;
+ break;
+ case SK_LMODE_AUTOBOTH:
+ Ctrl2 |= PHY_L_1000C_AFD | PHY_L_1000C_AHD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
+ SKERR_HWI_E015MSG);
+ }
+
+ /* Set Flow-control capabilities */
+ switch (pPrt->PFlowCtrlMode) {
+ case SK_FLOW_MODE_NONE:
+ Ctrl3 |= PHY_L_P_NO_PAUSE;
+ break;
+ case SK_FLOW_MODE_LOC_SEND:
+ Ctrl3 |= PHY_L_P_ASYM_MD;
+ break;
+ case SK_FLOW_MODE_SYMMETRIC:
+ Ctrl3 |= PHY_L_P_SYM_MD;
+ break;
+ case SK_FLOW_MODE_SYM_OR_REM:
+ Ctrl3 |= PHY_L_P_BOTH_MD;
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
+ SKERR_HWI_E016MSG);
+ }
+
+ /* Restart Auto-negotiation */
+ Ctrl1 = PHY_CT_ANE | PHY_CT_RE_CFG;
+ }
+
+ /* Write 1000Base-T Control Register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_1000T_CTRL, Ctrl2);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("1000B-T Ctrl Reg=0x%04X\n", Ctrl2));
+
+ /* Write AutoNeg Advertisement Register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_AUNE_ADV, Ctrl3);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3));
+
+ if (DoLoop) {
+ /* Set the Phy Loopback bit, too */
+ Ctrl1 |= PHY_CT_LOOP;
+ }
+
+ /* Write to the Phy control register */
+ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_CTRL, Ctrl1);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("PHY Control Reg=0x%04X\n", Ctrl1));
+} /* SkXmInitPhyLone */
+
+
+/******************************************************************************
+ *
+ * SkXmInitPhyNat() - Initialize the National Phy registers
+ *
+ * Description: initializes all the National Phy registers
+ *
+ * Note:
+ *
+ * Returns:
+ * nothing
+ */
+static void SkXmInitPhyNat(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
+{
+/* todo: National */
+} /* SkXmInitPhyNat */
+#endif /* OTHER_PHY */
+
+
+/******************************************************************************
+ *
+ * SkMacInitPhy() - Initialize the PHY registers
+ *
+ * Description: calls the Init PHY routines dep. on board type
+ *
+ * Note:
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacInitPhy(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
+{
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ switch (pPrt->PhyType) {
+ case SK_PHY_XMAC:
+ SkXmInitPhyXmac(pAC, IoC, Port, DoLoop);
+ break;
+ case SK_PHY_BCOM:
+ SkXmInitPhyBcom(pAC, IoC, Port, DoLoop);
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ SkXmInitPhyLone(pAC, IoC, Port, DoLoop);
+ break;
+ case SK_PHY_NAT:
+ SkXmInitPhyNat(pAC, IoC, Port, DoLoop);
+ break;
+#endif /* OTHER_PHY */
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ SkGmInitPhyMarv(pAC, IoC, Port, DoLoop);
+ }
+#endif /* YUKON */
+
+} /* SkMacInitPhy */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmAutoNegDoneXmac() - Auto-negotiation handling
+ *
+ * Description:
+ * This function handles the auto-negotiation if the Done bit is set.
+ *
+ * Returns:
+ * SK_AND_OK o.k.
+ * SK_AND_DUP_CAP Duplex capability error happened
+ * SK_AND_OTHER Other error happened
+ */
+static int SkXmAutoNegDoneXmac(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 ResAb; /* Resolved Ability */
+ SK_U16 LPAb; /* Link Partner Ability */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegDoneXmac, Port %d\n", Port));
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Get PHY parameters */
+ SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LPAb);
+ SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb);
+
+ if ((LPAb & PHY_X_AN_RFB) != 0) {
+ /* At least one of the remote fault bit is set */
+ /* Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegFail: Remote fault bit set Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ return(SK_AND_OTHER);
+ }
+
+ /* Check Duplex mismatch */
+ if ((ResAb & (PHY_X_RS_HD | PHY_X_RS_FD)) == PHY_X_RS_FD) {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
+ }
+ else if ((ResAb & (PHY_X_RS_HD | PHY_X_RS_FD)) == PHY_X_RS_HD) {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
+ }
+ else {
+ /* Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegFail: Duplex mode mismatch Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ return(SK_AND_DUP_CAP);
+ }
+
+ /* Check PAUSE mismatch */
+ /* We are NOT using chapter 4.23 of the Xaqti manual */
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ if ((pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYMMETRIC ||
+ pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) &&
+ (LPAb & PHY_X_P_SYM_MD) != 0) {
+ /* Symmetric PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
+ }
+ else if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM &&
+ (LPAb & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) {
+ /* Enable PAUSE receive, disable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
+ }
+ else if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_LOC_SEND &&
+ (LPAb & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) {
+ /* Disable PAUSE receive, enable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
+ }
+ else {
+ /* PAUSE mismatch -> no PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
+ }
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
+
+ return(SK_AND_OK);
+} /* SkXmAutoNegDoneXmac */
+
+
+/******************************************************************************
+ *
+ * SkXmAutoNegDoneBcom() - Auto-negotiation handling
+ *
+ * Description:
+ * This function handles the auto-negotiation if the Done bit is set.
+ *
+ * Returns:
+ * SK_AND_OK o.k.
+ * SK_AND_DUP_CAP Duplex capability error happened
+ * SK_AND_OTHER Other error happened
+ */
+static int SkXmAutoNegDoneBcom(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 LPAb; /* Link Partner Ability */
+ SK_U16 AuxStat; /* Auxiliary Status */
+
+#ifdef TEST_ONLY
+01-Sep-2000 RA;:;:
+ SK_U16 ResAb; /* Resolved Ability */
+#endif /* 0 */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegDoneBcom, Port %d\n", Port));
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Get PHY parameters */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LPAb);
+#ifdef TEST_ONLY
+01-Sep-2000 RA;:;:
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb);
+#endif /* 0 */
+
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &AuxStat);
+
+ if ((LPAb & PHY_B_AN_RF) != 0) {
+ /* Remote fault bit is set: Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegFail: Remote fault bit set Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ return(SK_AND_OTHER);
+ }
+
+ /* Check Duplex mismatch */
+ if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000FD) {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
+ }
+ else if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000HD) {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
+ }
+ else {
+ /* Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegFail: Duplex mode mismatch Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ return(SK_AND_DUP_CAP);
+ }
+
+#ifdef TEST_ONLY
+01-Sep-2000 RA;:;:
+ /* Check Master/Slave resolution */
+ if ((ResAb & PHY_B_1000S_MSF) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Master/Slave Fault Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ pPrt->PMSStatus = SK_MS_STAT_FAULT;
+ return(SK_AND_OTHER);
+ }
+
+ pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
+ SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE;
+#endif /* 0 */
+
+ /* Check PAUSE mismatch ??? */
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PAUSE_MSK) {
+ /* Symmetric PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
+ }
+ else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRR) {
+ /* Enable PAUSE receive, disable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
+ }
+ else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRT) {
+ /* Disable PAUSE receive, enable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
+ }
+ else {
+ /* PAUSE mismatch -> no PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
+ }
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
+
+ return(SK_AND_OK);
+} /* SkXmAutoNegDoneBcom */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGmAutoNegDoneMarv() - Auto-negotiation handling
+ *
+ * Description:
+ * This function handles the auto-negotiation if the Done bit is set.
+ *
+ * Returns:
+ * SK_AND_OK o.k.
+ * SK_AND_DUP_CAP Duplex capability error happened
+ * SK_AND_OTHER Other error happened
+ */
+static int SkGmAutoNegDoneMarv(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 LPAb; /* Link Partner Ability */
+ SK_U16 ResAb; /* Resolved Ability */
+ SK_U16 AuxStat; /* Auxiliary Status */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegDoneMarv, Port %d\n", Port));
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Get PHY parameters */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_LP, &LPAb);
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Link P.Abil.=0x%04X\n", LPAb));
+
+ if ((LPAb & PHY_M_AN_RF) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegFail: Remote fault bit set Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ return(SK_AND_OTHER);
+ }
+
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb);
+
+ /* Check Master/Slave resolution */
+ if ((ResAb & PHY_B_1000S_MSF) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Master/Slave Fault Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ pPrt->PMSStatus = SK_MS_STAT_FAULT;
+ return(SK_AND_OTHER);
+ }
+
+ pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
+ (SK_U8)SK_MS_STAT_MASTER : (SK_U8)SK_MS_STAT_SLAVE;
+
+ /* Read PHY Specific Status */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &AuxStat);
+
+ /* Check Speed & Duplex resolved */
+ if ((AuxStat & PHY_M_PS_SPDUP_RES) == 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegFail: Speed & Duplex not resolved, Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
+ return(SK_AND_DUP_CAP);
+ }
+
+ if ((AuxStat & PHY_M_PS_FULL_DUP) != 0) {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
+ }
+ else {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
+ }
+
+ /* Check PAUSE mismatch ??? */
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_PAUSE_MSK) {
+ /* Symmetric PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
+ }
+ else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_RX_P_EN) {
+ /* Enable PAUSE receive, disable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
+ }
+ else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_TX_P_EN) {
+ /* Disable PAUSE receive, enable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
+ }
+ else {
+ /* PAUSE mismatch -> no PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
+ }
+
+ /* set used link speed */
+ switch ((unsigned)(AuxStat & PHY_M_PS_SPEED_MSK)) {
+ case (unsigned)PHY_M_PS_SPEED_1000:
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
+ break;
+ case PHY_M_PS_SPEED_100:
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_100MBPS;
+ break;
+ default:
+ pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_10MBPS;
+ }
+
+ return(SK_AND_OK);
+} /* SkGmAutoNegDoneMarv */
+#endif /* YUKON */
+
+
+#ifdef OTHER_PHY
+/******************************************************************************
+ *
+ * SkXmAutoNegDoneLone() - Auto-negotiation handling
+ *
+ * Description:
+ * This function handles the auto-negotiation if the Done bit is set.
+ *
+ * Returns:
+ * SK_AND_OK o.k.
+ * SK_AND_DUP_CAP Duplex capability error happened
+ * SK_AND_OTHER Other error happened
+ */
+static int SkXmAutoNegDoneLone(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 ResAb; /* Resolved Ability */
+ SK_U16 LPAb; /* Link Partner Ability */
+ SK_U16 QuickStat; /* Auxiliary Status */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegDoneLone, Port %d\n", Port));
+ pPrt = &pAC->GIni.GP[Port];
+
+ /* Get PHY parameters */
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LPAb);
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ResAb);
+ SkXmPhyRead(pAC, IoC, Port, PHY_LONE_Q_STAT, &QuickStat);
+
+ if ((LPAb & PHY_L_AN_RF) != 0) {
+ /* Remote fault bit is set */
+ /* Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegFail: Remote fault bit set Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ return(SK_AND_OTHER);
+ }
+
+ /* Check Duplex mismatch */
+ if ((QuickStat & PHY_L_QS_DUP_MOD) != 0) {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
+ }
+ else {
+ pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
+ }
+
+ /* Check Master/Slave resolution */
+ if ((ResAb & PHY_L_1000S_MSF) != 0) {
+ /* Error */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("Master/Slave Fault Port %d\n", Port));
+ pPrt->PAutoNegFail = SK_TRUE;
+ pPrt->PMSStatus = SK_MS_STAT_FAULT;
+ return(SK_AND_OTHER);
+ }
+ else if (ResAb & PHY_L_1000S_MSR) {
+ pPrt->PMSStatus = SK_MS_STAT_MASTER;
+ }
+ else {
+ pPrt->PMSStatus = SK_MS_STAT_SLAVE;
+ }
+
+ /* Check PAUSE mismatch */
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ /* we must manually resolve the abilities here */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
+
+ switch (pPrt->PFlowCtrlMode) {
+ case SK_FLOW_MODE_NONE:
+ /* default */
+ break;
+ case SK_FLOW_MODE_LOC_SEND:
+ if ((QuickStat & (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) ==
+ (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) {
+ /* Disable PAUSE receive, enable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
+ }
+ break;
+ case SK_FLOW_MODE_SYMMETRIC:
+ if ((QuickStat & PHY_L_QS_PAUSE) != 0) {
+ /* Symmetric PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
+ }
+ break;
+ case SK_FLOW_MODE_SYM_OR_REM:
+ if ((QuickStat & (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) ==
+ PHY_L_QS_AS_PAUSE) {
+ /* Enable PAUSE receive, disable PAUSE transmit */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
+ }
+ else if ((QuickStat & PHY_L_QS_PAUSE) != 0) {
+ /* Symmetric PAUSE */
+ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
+ }
+ break;
+ default:
+ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
+ SKERR_HWI_E016MSG);
+ }
+
+ return(SK_AND_OK);
+} /* SkXmAutoNegDoneLone */
+
+
+/******************************************************************************
+ *
+ * SkXmAutoNegDoneNat() - Auto-negotiation handling
+ *
+ * Description:
+ * This function handles the auto-negotiation if the Done bit is set.
+ *
+ * Returns:
+ * SK_AND_OK o.k.
+ * SK_AND_DUP_CAP Duplex capability error happened
+ * SK_AND_OTHER Other error happened
+ */
+static int SkXmAutoNegDoneNat(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+/* todo: National */
+ return(SK_AND_OK);
+} /* SkXmAutoNegDoneNat */
+#endif /* OTHER_PHY */
+
+
+/******************************************************************************
+ *
+ * SkMacAutoNegDone() - Auto-negotiation handling
+ *
+ * Description: calls the auto-negotiation done routines dep. on board type
+ *
+ * Returns:
+ * SK_AND_OK o.k.
+ * SK_AND_DUP_CAP Duplex capability error happened
+ * SK_AND_OTHER Other error happened
+ */
+int SkMacAutoNegDone(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ int Rtv;
+
+ Rtv = SK_AND_OK;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ switch (pPrt->PhyType) {
+
+ case SK_PHY_XMAC:
+ Rtv = SkXmAutoNegDoneXmac(pAC, IoC, Port);
+ break;
+ case SK_PHY_BCOM:
+ Rtv = SkXmAutoNegDoneBcom(pAC, IoC, Port);
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ Rtv = SkXmAutoNegDoneLone(pAC, IoC, Port);
+ break;
+ case SK_PHY_NAT:
+ Rtv = SkXmAutoNegDoneNat(pAC, IoC, Port);
+ break;
+#endif /* OTHER_PHY */
+ default:
+ return(SK_AND_OTHER);
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ Rtv = SkGmAutoNegDoneMarv(pAC, IoC, Port);
+ }
+#endif /* YUKON */
+
+ if (Rtv != SK_AND_OK) {
+ return(Rtv);
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNeg done Port %d\n", Port));
+
+ /* We checked everything and may now enable the link */
+ pPrt->PAutoNegFail = SK_FALSE;
+
+ SkMacRxTxEnable(pAC, IoC, Port);
+
+ return(SK_AND_OK);
+} /* SkMacAutoNegDone */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC
+ *
+ * Description:
+ * sets MAC or PHY LoopBack and Duplex Mode in the MMU Command Reg.
+ * enables Rx/Tx
+ *
+ * Returns: N/A
+ */
+static void SkXmSetRxTxEn(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+int Para) /* Parameter to set: MAC or PHY LoopBack, Duplex Mode */
+{
+ SK_U16 Word;
+
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
+
+ switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
+ case SK_MAC_LOOPB_ON:
+ Word |= XM_MMU_MAC_LB;
+ break;
+ case SK_MAC_LOOPB_OFF:
+ Word &= ~XM_MMU_MAC_LB;
+ break;
+ }
+
+ switch (Para & (SK_PHY_LOOPB_ON | SK_PHY_LOOPB_OFF)) {
+ case SK_PHY_LOOPB_ON:
+ Word |= XM_MMU_GMII_LOOP;
+ break;
+ case SK_PHY_LOOPB_OFF:
+ Word &= ~XM_MMU_GMII_LOOP;
+ break;
+ }
+
+ switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
+ case SK_PHY_FULLD_ON:
+ Word |= XM_MMU_GMII_FD;
+ break;
+ case SK_PHY_FULLD_OFF:
+ Word &= ~XM_MMU_GMII_FD;
+ break;
+ }
+
+ XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+
+ /* dummy read to ensure writing */
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
+
+} /* SkXmSetRxTxEn */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC
+ *
+ * Description:
+ * sets MAC LoopBack and Duplex Mode in the General Purpose Control Reg.
+ * enables Rx/Tx
+ *
+ * Returns: N/A
+ */
+static void SkGmSetRxTxEn(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+int Para) /* Parameter to set: MAC LoopBack, Duplex Mode */
+{
+ SK_U16 Ctrl;
+
+ GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
+
+ switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
+ case SK_MAC_LOOPB_ON:
+ Ctrl |= GM_GPCR_LOOP_ENA;
+ break;
+ case SK_MAC_LOOPB_OFF:
+ Ctrl &= ~GM_GPCR_LOOP_ENA;
+ break;
+ }
+
+ switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
+ case SK_PHY_FULLD_ON:
+ Ctrl |= GM_GPCR_DUP_FULL;
+ break;
+ case SK_PHY_FULLD_OFF:
+ Ctrl &= ~GM_GPCR_DUP_FULL;
+ break;
+ }
+
+ GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Ctrl | GM_GPCR_RX_ENA |
+ GM_GPCR_TX_ENA));
+
+ /* dummy read to ensure writing */
+ GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
+
+} /* SkGmSetRxTxEn */
+#endif /* YUKON */
+
+
+#ifndef SK_SLIM
+/******************************************************************************
+ *
+ * SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters
+ *
+ * Description: calls the Special Set Rx/Tx Enable routines dep. on board type
+ *
+ * Returns: N/A
+ */
+void SkMacSetRxTxEn(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+int Para)
+{
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ SkXmSetRxTxEn(pAC, IoC, Port, Para);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ SkGmSetRxTxEn(pAC, IoC, Port, Para);
+ }
+#endif /* YUKON */
+
+} /* SkMacSetRxTxEn */
+#endif /* !SK_SLIM */
+
+
+/******************************************************************************
+ *
+ * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up
+ *
+ * Description: enables Rx/Tx dep. on board type
+ *
+ * Returns:
+ * 0 o.k.
+ * != 0 Error happened
+ */
+int SkMacRxTxEnable(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 Reg; /* 16-bit register value */
+ SK_U16 IntMask; /* MAC interrupt mask */
+#ifdef GENESIS
+ SK_U16 SWord;
+#endif
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (!pPrt->PHWLinkUp) {
+ /* The Hardware link is NOT up */
+ return(0);
+ }
+
+ if ((pPrt->PLinkMode == SK_LMODE_AUTOHALF ||
+ pPrt->PLinkMode == SK_LMODE_AUTOFULL ||
+ pPrt->PLinkMode == SK_LMODE_AUTOBOTH) &&
+ pPrt->PAutoNegFail) {
+ /* Auto-negotiation is not done or failed */
+ return(0);
+ }
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* set Duplex Mode and Pause Mode */
+ SkXmInitDupMd(pAC, IoC, Port);
+
+ SkXmInitPauseMd(pAC, IoC, Port);
+
+ /*
+ * Initialize the Interrupt Mask Register. Default IRQs are...
+ * - Link Asynchronous Event
+ * - Link Partner requests config
+ * - Auto Negotiation Done
+ * - Rx Counter Event Overflow
+ * - Tx Counter Event Overflow
+ * - Transmit FIFO Underrun
+ */
+ IntMask = XM_DEF_MSK;
+
+#ifdef DEBUG
+ /* add IRQ for Receive FIFO Overflow */
+ IntMask &= ~XM_IS_RXF_OV;
+#endif /* DEBUG */
+
+ if (pPrt->PhyType != SK_PHY_XMAC) {
+ /* disable GP0 interrupt bit */
+ IntMask |= XM_IS_INP_ASS;
+ }
+ XM_OUT16(IoC, Port, XM_IMSK, IntMask);
+
+ /* get MMU Command Reg. */
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Reg);
+
+ if (pPrt->PhyType != SK_PHY_XMAC &&
+ (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL ||
+ pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL)) {
+ /* set to Full Duplex */
+ Reg |= XM_MMU_GMII_FD;
+ }
+
+ switch (pPrt->PhyType) {
+ case SK_PHY_BCOM:
+ /*
+ * Workaround BCOM Errata (#10523) for all BCom Phys
+ * Enable Power Management after link up
+ */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord);
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL,
+ (SK_U16)(SWord & ~PHY_B_AC_DIS_PM));
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK,
+ (SK_U16)PHY_B_DEF_MSK);
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, PHY_L_DEF_MSK);
+ break;
+ case SK_PHY_NAT:
+ /* todo National:
+ SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, PHY_N_DEF_MSK); */
+ /* no interrupts possible from National ??? */
+ break;
+#endif /* OTHER_PHY */
+ }
+
+ /* enable Rx/Tx */
+ XM_OUT16(IoC, Port, XM_MMU_CMD, Reg | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /*
+ * Initialize the Interrupt Mask Register. Default IRQs are...
+ * - Rx Counter Event Overflow
+ * - Tx Counter Event Overflow
+ * - Transmit FIFO Underrun
+ */
+ IntMask = GMAC_DEF_MSK;
+
+#ifdef DEBUG
+ /* add IRQ for Receive FIFO Overrun */
+ IntMask |= GM_IS_RX_FF_OR;
+#endif /* DEBUG */
+
+ SK_OUT8(IoC, GMAC_IRQ_MSK, (SK_U8)IntMask);
+
+ /* get General Purpose Control */
+ GM_IN16(IoC, Port, GM_GP_CTRL, &Reg);
+
+ if (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL ||
+ pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) {
+ /* set to Full Duplex */
+ Reg |= GM_GPCR_DUP_FULL;
+ }
+
+ /* enable Rx/Tx */
+ GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Reg | GM_GPCR_RX_ENA |
+ GM_GPCR_TX_ENA));
+
+#ifndef VCPU
+ /* Enable all PHY interrupts */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK,
+ (SK_U16)PHY_M_DEF_MSK);
+#endif /* VCPU */
+ }
+#endif /* YUKON */
+
+ return(0);
+
+} /* SkMacRxTxEnable */
+
+
+/******************************************************************************
+ *
+ * SkMacRxTxDisable() - Disable Receiver and Transmitter
+ *
+ * Description: disables Rx/Tx dep. on board type
+ *
+ * Returns: N/A
+ */
+void SkMacRxTxDisable(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U16 Word;
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
+
+ XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
+
+ /* dummy read to ensure writing */
+ XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+
+ GM_IN16(IoC, Port, GM_GP_CTRL, &Word);
+
+ GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Word & ~(GM_GPCR_RX_ENA |
+ GM_GPCR_TX_ENA)));
+
+ /* dummy read to ensure writing */
+ GM_IN16(IoC, Port, GM_GP_CTRL, &Word);
+ }
+#endif /* YUKON */
+
+} /* SkMacRxTxDisable */
+
+
+/******************************************************************************
+ *
+ * SkMacIrqDisable() - Disable IRQ from MAC
+ *
+ * Description: sets the IRQ-mask to disable IRQ dep. on board type
+ *
+ * Returns: N/A
+ */
+void SkMacIrqDisable(
+SK_AC *pAC, /* Adapter Context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+#ifdef GENESIS
+ SK_U16 Word;
+#endif
+
+ pPrt = &pAC->GIni.GP[Port];
+
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+
+ /* disable all XMAC IRQs */
+ XM_OUT16(IoC, Port, XM_IMSK, 0xffff);
+
+ /* Disable all PHY interrupts */
+ switch (pPrt->PhyType) {
+ case SK_PHY_BCOM:
+ /* Make sure that PHY is initialized */
+ if (pPrt->PState != SK_PRT_RESET) {
+ /* NOT allowed if BCOM is in RESET state */
+ /* Workaround BCOM Errata (#10523) all BCom */
+ /* Disable Power Management if link is down */
+ SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Word);
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL,
+ (SK_U16)(Word | PHY_B_AC_DIS_PM));
+ SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff);
+ }
+ break;
+#ifdef OTHER_PHY
+ case SK_PHY_LONE:
+ SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0);
+ break;
+ case SK_PHY_NAT:
+ /* todo: National
+ SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */
+ break;
+#endif /* OTHER_PHY */
+ }
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* disable all GMAC IRQs */
+ SK_OUT8(IoC, GMAC_IRQ_MSK, 0);
+
+#ifndef VCPU
+ /* Disable all PHY interrupts */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0);
+#endif /* VCPU */
+ }
+#endif /* YUKON */
+
+} /* SkMacIrqDisable */
+
+
+#ifdef SK_DIAG
+/******************************************************************************
+ *
+ * SkXmSendCont() - Enable / Disable Send Continuous Mode
+ *
+ * Description: enable / disable Send Continuous Mode on XMAC
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmSendCont(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL Enable) /* Enable / Disable */
+{
+ SK_U32 MdReg;
+
+ XM_IN32(IoC, Port, XM_MODE, &MdReg);
+
+ if (Enable) {
+ MdReg |= XM_MD_TX_CONT;
+ }
+ else {
+ MdReg &= ~XM_MD_TX_CONT;
+ }
+ /* setup Mode Register */
+ XM_OUT32(IoC, Port, XM_MODE, MdReg);
+
+} /* SkXmSendCont */
+
+
+/******************************************************************************
+ *
+ * SkMacTimeStamp() - Enable / Disable Time Stamp
+ *
+ * Description: enable / disable Time Stamp generation for Rx packets
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacTimeStamp(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL Enable) /* Enable / Disable */
+{
+ SK_U32 MdReg;
+ SK_U8 TimeCtrl;
+
+ if (pAC->GIni.GIGenesis) {
+
+ XM_IN32(IoC, Port, XM_MODE, &MdReg);
+
+ if (Enable) {
+ MdReg |= XM_MD_ATS;
+ }
+ else {
+ MdReg &= ~XM_MD_ATS;
+ }
+ /* setup Mode Register */
+ XM_OUT32(IoC, Port, XM_MODE, MdReg);
+ }
+ else {
+ if (Enable) {
+ TimeCtrl = GMT_ST_START | GMT_ST_CLR_IRQ;
+ }
+ else {
+ TimeCtrl = GMT_ST_STOP | GMT_ST_CLR_IRQ;
+ }
+ /* Start/Stop Time Stamp Timer */
+ SK_OUT8(IoC, GMAC_TI_ST_CTRL, TimeCtrl);
+ }
+
+} /* SkMacTimeStamp*/
+
+#else /* !SK_DIAG */
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmAutoNegLipaXmac() - Decides whether Link Partner could do auto-neg
+ *
+ * This function analyses the Interrupt status word. If any of the
+ * Auto-negotiating interrupt bits are set, the PLipaAutoNeg variable
+ * is set true.
+ */
+void SkXmAutoNegLipaXmac(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_U16 IStatus) /* Interrupt Status word to analyse */
+{
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO &&
+ (IStatus & (XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND)) != 0) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegLipa: AutoNeg detected on Port %d, IStatus=0x%04X\n",
+ Port, IStatus));
+ pPrt->PLipaAutoNeg = SK_LIPA_AUTO;
+ }
+} /* SkXmAutoNegLipaXmac */
+#endif /* GENESIS */
+
+
+/******************************************************************************
+ *
+ * SkMacAutoNegLipaPhy() - Decides whether Link Partner could do auto-neg
+ *
+ * This function analyses the PHY status word.
+ * If any of the Auto-negotiating bits are set, the PLipaAutoNeg variable
+ * is set true.
+ */
+void SkMacAutoNegLipaPhy(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_U16 PhyStat) /* PHY Status word to analyse */
+{
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO &&
+ (PhyStat & PHY_ST_AN_OVER) != 0) {
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("AutoNegLipa: AutoNeg detected on Port %d, PhyStat=0x%04X\n",
+ Port, PhyStat));
+ pPrt->PLipaAutoNeg = SK_LIPA_AUTO;
+ }
+} /* SkMacAutoNegLipaPhy */
+
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmIrq() - Interrupt Service Routine
+ *
+ * Description: services an Interrupt Request of the XMAC
+ *
+ * Note:
+ * With an external PHY, some interrupt bits are not meaningfull any more:
+ * - LinkAsyncEvent (bit #14) XM_IS_LNK_AE
+ * - LinkPartnerReqConfig (bit #10) XM_IS_LIPA_RC
+ * - Page Received (bit #9) XM_IS_RX_PAGE
+ * - NextPageLoadedForXmt (bit #8) XM_IS_TX_PAGE
+ * - AutoNegDone (bit #7) XM_IS_AND
+ * Also probably not valid any more is the GP0 input bit:
+ * - GPRegisterBit0set XM_IS_INP_ASS
+ *
+ * Returns:
+ * nothing
+ */
+void SkXmIrq(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_EVPARA Para;
+ SK_U16 IStatus; /* Interrupt status read from the XMAC */
+ SK_U16 IStatus2;
+#ifdef SK_SLIM
+ SK_U64 OverflowStatus;
+#endif
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ XM_IN16(IoC, Port, XM_ISRC, &IStatus);
+
+ /* LinkPartner Auto-negable? */
+ if (pPrt->PhyType == SK_PHY_XMAC) {
+ SkXmAutoNegLipaXmac(pAC, IoC, Port, IStatus);
+ }
+ else {
+ /* mask bits that are not used with ext. PHY */
+ IStatus &= ~(XM_IS_LNK_AE | XM_IS_LIPA_RC |
+ XM_IS_RX_PAGE | XM_IS_TX_PAGE |
+ XM_IS_AND | XM_IS_INP_ASS);
+ }
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("XmacIrq Port %d Isr 0x%04X\n", Port, IStatus));
+
+ if (!pPrt->PHWLinkUp) {
+ /* Spurious XMAC interrupt */
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("SkXmIrq: spurious interrupt on Port %d\n", Port));
+ return;
+ }
+
+ if ((IStatus & XM_IS_INP_ASS) != 0) {
+ /* Reread ISR Register if link is not in sync */
+ XM_IN16(IoC, Port, XM_ISRC, &IStatus2);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("SkXmIrq: Link async. Double check Port %d 0x%04X 0x%04X\n",
+ Port, IStatus, IStatus2));
+ IStatus &= ~XM_IS_INP_ASS;
+ IStatus |= IStatus2;
+ }
+
+ if ((IStatus & XM_IS_LNK_AE) != 0) {
+ /* not used, GP0 is used instead */
+ }
+
+ if ((IStatus & XM_IS_TX_ABORT) != 0) {
+ /* not used */
+ }
+
+ if ((IStatus & XM_IS_FRC_INT) != 0) {
+ /* not used, use ASIC IRQ instead if needed */
+ }
+
+ if ((IStatus & (XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE)) != 0) {
+ SkHWLinkDown(pAC, IoC, Port);
+
+ /* Signal to RLMT */
+ Para.Para32[0] = (SK_U32)Port;
+ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
+
+ /* Start workaround Errata #2 timer */
+ SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME,
+ SKGE_HWAC, SK_HWEV_WATIM, Para);
+ }
+
+ if ((IStatus & XM_IS_RX_PAGE) != 0) {
+ /* not used */
+ }
+
+ if ((IStatus & XM_IS_TX_PAGE) != 0) {
+ /* not used */
+ }
+
+ if ((IStatus & XM_IS_AND) != 0) {
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("SkXmIrq: AND on link that is up Port %d\n", Port));
+ }
+
+ if ((IStatus & XM_IS_TSC_OV) != 0) {
+ /* not used */
+ }
+
+ /* Combined Tx & Rx Counter Overflow SIRQ Event */
+ if ((IStatus & (XM_IS_RXC_OV | XM_IS_TXC_OV)) != 0) {
+#ifdef SK_SLIM
+ SkXmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus);
+#else
+ Para.Para32[0] = (SK_U32)Port;
+ Para.Para32[1] = (SK_U32)IStatus;
+ SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para);
+#endif /* SK_SLIM */
+ }
+
+ if ((IStatus & XM_IS_RXF_OV) != 0) {
+ /* normal situation -> no effect */
+#ifdef DEBUG
+ pPrt->PRxOverCnt++;
+#endif /* DEBUG */
+ }
+
+ if ((IStatus & XM_IS_TXF_UR) != 0) {
+ /* may NOT happen -> error log */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG);
+ }
+
+ if ((IStatus & XM_IS_TX_COMP) != 0) {
+ /* not served here */
+ }
+
+ if ((IStatus & XM_IS_RX_COMP) != 0) {
+ /* not served here */
+ }
+} /* SkXmIrq */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGmIrq() - Interrupt Service Routine
+ *
+ * Description: services an Interrupt Request of the GMAC
+ *
+ * Note:
+ *
+ * Returns:
+ * nothing
+ */
+void SkGmIrq(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U8 IStatus; /* Interrupt status */
+#ifdef SK_SLIM
+ SK_U64 OverflowStatus;
+#else
+ SK_EVPARA Para;
+#endif
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ SK_IN8(IoC, GMAC_IRQ_SRC, &IStatus);
+
+#ifdef XXX
+ /* LinkPartner Auto-negable? */
+ SkMacAutoNegLipaPhy(pAC, IoC, Port, IStatus);
+#endif /* XXX */
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
+ ("GmacIrq Port %d Isr 0x%04X\n", Port, IStatus));
+
+ /* Combined Tx & Rx Counter Overflow SIRQ Event */
+ if (IStatus & (GM_IS_RX_CO_OV | GM_IS_TX_CO_OV)) {
+ /* these IRQs will be cleared by reading GMACs register */
+#ifdef SK_SLIM
+ SkGmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus);
+#else
+ Para.Para32[0] = (SK_U32)Port;
+ Para.Para32[1] = (SK_U32)IStatus;
+ SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para);
+#endif
+ }
+
+ if (IStatus & GM_IS_RX_FF_OR) {
+ /* clear GMAC Rx FIFO Overrun IRQ */
+ SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_CLI_RX_FO);
+#ifdef DEBUG
+ pPrt->PRxOverCnt++;
+#endif /* DEBUG */
+ }
+
+ if (IStatus & GM_IS_TX_FF_UR) {
+ /* clear GMAC Tx FIFO Underrun IRQ */
+ SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_CLI_TX_FU);
+ /* may NOT happen -> error log */
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG);
+ }
+
+ if (IStatus & GM_IS_TX_COMPL) {
+ /* not served here */
+ }
+
+ if (IStatus & GM_IS_RX_COMPL) {
+ /* not served here */
+ }
+} /* SkGmIrq */
+#endif /* YUKON */
+
+
+/******************************************************************************
+ *
+ * SkMacIrq() - Interrupt Service Routine for MAC
+ *
+ * Description: calls the Interrupt Service Routine dep. on board type
+ *
+ * Returns:
+ * nothing
+ */
+void SkMacIrq(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port) /* Port Index (MAC_1 + n) */
+{
+#ifdef GENESIS
+ if (pAC->GIni.GIGenesis) {
+ /* IRQ from XMAC */
+ SkXmIrq(pAC, IoC, Port);
+ }
+#endif /* GENESIS */
+
+#ifdef YUKON
+ if (pAC->GIni.GIYukon) {
+ /* IRQ from GMAC */
+ SkGmIrq(pAC, IoC, Port);
+ }
+#endif /* YUKON */
+
+} /* SkMacIrq */
+
+#endif /* !SK_DIAG */
+
+#ifdef GENESIS
+/******************************************************************************
+ *
+ * SkXmUpdateStats() - Force the XMAC to output the current statistic
+ *
+ * Description:
+ * The XMAC holds its statistic internally. To obtain the current
+ * values a command must be sent so that the statistic data will
+ * be written to a predefined memory area on the adapter.
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkXmUpdateStats(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_GEPORT *pPrt;
+ SK_U16 StatReg;
+ int WaitIndex;
+
+ pPrt = &pAC->GIni.GP[Port];
+ WaitIndex = 0;
+
+ /* Send an update command to XMAC specified */
+ XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
+
+ /*
+ * It is an auto-clearing register. If the command bits
+ * went to zero again, the statistics are transferred.
+ * Normally the command should be executed immediately.
+ * But just to be sure we execute a loop.
+ */
+ do {
+
+ XM_IN16(IoC, Port, XM_STAT_CMD, &StatReg);
+
+ if (++WaitIndex > 10) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E021, SKERR_HWI_E021MSG);
+
+ return(1);
+ }
+ } while ((StatReg & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) != 0);
+
+ return(0);
+} /* SkXmUpdateStats */
+
+
+/******************************************************************************
+ *
+ * SkXmMacStatistic() - Get XMAC counter value
+ *
+ * Description:
+ * Gets the 32bit counter value. Except for the octet counters
+ * the lower 32bit are counted in hardware and the upper 32bit
+ * must be counted in software by monitoring counter overflow interrupts.
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkXmMacStatistic(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port, /* Port Index (MAC_1 + n) */
+SK_U16 StatAddr, /* MIB counter base address */
+SK_U32 SK_FAR *pVal) /* ptr to return statistic value */
+{
+ if ((StatAddr < XM_TXF_OK) || (StatAddr > XM_RXF_MAX_SZ)) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG);
+
+ return(1);
+ }
+
+ XM_IN32(IoC, Port, StatAddr, pVal);
+
+ return(0);
+} /* SkXmMacStatistic */
+
+
+/******************************************************************************
+ *
+ * SkXmResetCounter() - Clear MAC statistic counter
+ *
+ * Description:
+ * Force the XMAC to clear its statistic counter.
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkXmResetCounter(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port) /* Port Index (MAC_1 + n) */
+{
+ XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+ /* Clear two times according to Errata #3 */
+ XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+
+ return(0);
+} /* SkXmResetCounter */
+
+
+/******************************************************************************
+ *
+ * SkXmOverflowStatus() - Gets the status of counter overflow interrupt
+ *
+ * Description:
+ * Checks the source causing an counter overflow interrupt. On success the
+ * resulting counter overflow status is written to <pStatus>, whereas the
+ * upper dword stores the XMAC ReceiveCounterEvent register and the lower
+ * dword the XMAC TransmitCounterEvent register.
+ *
+ * Note:
+ * For XMAC the interrupt source is a self-clearing register, so the source
+ * must be checked only once. SIRQ module does another check to be sure
+ * that no interrupt get lost during process time.
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkXmOverflowStatus(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port, /* Port Index (MAC_1 + n) */
+SK_U16 IStatus, /* Interupt Status from MAC */
+SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */
+{
+ SK_U64 Status; /* Overflow status */
+ SK_U32 RegVal;
+
+ Status = 0;
+
+ if ((IStatus & XM_IS_RXC_OV) != 0) {
+
+ XM_IN32(IoC, Port, XM_RX_CNT_EV, &RegVal);
+ Status |= (SK_U64)RegVal << 32;
+ }
+
+ if ((IStatus & XM_IS_TXC_OV) != 0) {
+
+ XM_IN32(IoC, Port, XM_TX_CNT_EV, &RegVal);
+ Status |= (SK_U64)RegVal;
+ }
+
+ *pStatus = Status;
+
+ return(0);
+} /* SkXmOverflowStatus */
+#endif /* GENESIS */
+
+
+#ifdef YUKON
+/******************************************************************************
+ *
+ * SkGmUpdateStats() - Force the GMAC to output the current statistic
+ *
+ * Description:
+ * Empty function for GMAC. Statistic data is accessible in direct way.
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkGmUpdateStats(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port) /* Port Index (MAC_1 + n) */
+{
+ return(0);
+}
+
+
+/******************************************************************************
+ *
+ * SkGmMacStatistic() - Get GMAC counter value
+ *
+ * Description:
+ * Gets the 32bit counter value. Except for the octet counters
+ * the lower 32bit are counted in hardware and the upper 32bit
+ * must be counted in software by monitoring counter overflow interrupts.
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkGmMacStatistic(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port, /* Port Index (MAC_1 + n) */
+SK_U16 StatAddr, /* MIB counter base address */
+SK_U32 SK_FAR *pVal) /* ptr to return statistic value */
+{
+
+ if ((StatAddr < GM_RXF_UC_OK) || (StatAddr > GM_TXE_FIFO_UR)) {
+
+ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("SkGmMacStat: wrong MIB counter 0x%04X\n", StatAddr));
+ return(1);
+ }
+
+ GM_IN32(IoC, Port, StatAddr, pVal);
+
+ return(0);
+} /* SkGmMacStatistic */
+
+
+/******************************************************************************
+ *
+ * SkGmResetCounter() - Clear MAC statistic counter
+ *
+ * Description:
+ * Force GMAC to clear its statistic counter.
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkGmResetCounter(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port) /* Port Index (MAC_1 + n) */
+{
+ SK_U16 Reg; /* Phy Address Register */
+ SK_U16 Word;
+ int i;
+
+ GM_IN16(IoC, Port, GM_PHY_ADDR, &Reg);
+
+ /* set MIB Clear Counter Mode */
+ GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg | GM_PAR_MIB_CLR);
+
+ /* read all MIB Counters with Clear Mode set */
+ for (i = 0; i < GM_MIB_CNT_SIZE; i++) {
+ /* the reset is performed only when the lower 16 bits are read */
+ GM_IN16(IoC, Port, GM_MIB_CNT_BASE + 8*i, &Word);
+ }
+
+ /* clear MIB Clear Counter Mode */
+ GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg);
+
+ return(0);
+} /* SkGmResetCounter */
+
+
+/******************************************************************************
+ *
+ * SkGmOverflowStatus() - Gets the status of counter overflow interrupt
+ *
+ * Description:
+ * Checks the source causing an counter overflow interrupt. On success the
+ * resulting counter overflow status is written to <pStatus>, whereas the
+ * the following bit coding is used:
+ * 63:56 - unused
+ * 55:48 - TxRx interrupt register bit7:0
+ * 32:47 - Rx interrupt register
+ * 31:24 - unused
+ * 23:16 - TxRx interrupt register bit15:8
+ * 15:0 - Tx interrupt register
+ *
+ * Returns:
+ * 0: success
+ * 1: something went wrong
+ */
+int SkGmOverflowStatus(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+unsigned int Port, /* Port Index (MAC_1 + n) */
+SK_U16 IStatus, /* Interupt Status from MAC */
+SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */
+{
+ SK_U64 Status; /* Overflow status */
+ SK_U16 RegVal;
+
+ Status = 0;
+
+ if ((IStatus & GM_IS_RX_CO_OV) != 0) {
+ /* this register is self-clearing after read */
+ GM_IN16(IoC, Port, GM_RX_IRQ_SRC, &RegVal);
+ Status |= (SK_U64)RegVal << 32;
+ }
+
+ if ((IStatus & GM_IS_TX_CO_OV) != 0) {
+ /* this register is self-clearing after read */
+ GM_IN16(IoC, Port, GM_TX_IRQ_SRC, &RegVal);
+ Status |= (SK_U64)RegVal;
+ }
+
+ /* this register is self-clearing after read */
+ GM_IN16(IoC, Port, GM_TR_IRQ_SRC, &RegVal);
+ /* Rx overflow interrupt register bits (LoByte)*/
+ Status |= (SK_U64)((SK_U8)RegVal) << 48;
+ /* Tx overflow interrupt register bits (HiByte)*/
+ Status |= (SK_U64)(RegVal >> 8) << 16;
+
+ *pStatus = Status;
+
+ return(0);
+} /* SkGmOverflowStatus */
+
+
+#ifndef SK_SLIM
+/******************************************************************************
+ *
+ * SkGmCableDiagStatus() - Starts / Gets status of cable diagnostic test
+ *
+ * Description:
+ * starts the cable diagnostic test if 'StartTest' is true
+ * gets the results if 'StartTest' is true
+ *
+ * NOTE: this test is meaningful only when link is down
+ *
+ * Returns:
+ * 0: success
+ * 1: no YUKON copper
+ * 2: test in progress
+ */
+int SkGmCableDiagStatus(
+SK_AC *pAC, /* adapter context */
+SK_IOC IoC, /* IO context */
+int Port, /* Port Index (MAC_1 + n) */
+SK_BOOL StartTest) /* flag for start / get result */
+{
+ int i;
+ SK_U16 RegVal;
+ SK_GEPORT *pPrt;
+
+ pPrt = &pAC->GIni.GP[Port];
+
+ if (pPrt->PhyType != SK_PHY_MARV_COPPER) {
+
+ return(1);
+ }
+
+ if (StartTest) {
+ /* only start the cable test */
+ if ((pPrt->PhyId1 & PHY_I1_REV_MSK) < 4) {
+ /* apply TDR workaround from Marvell */
+ SkGmPhyWrite(pAC, IoC, Port, 29, 0x001e);
+
+ SkGmPhyWrite(pAC, IoC, Port, 30, 0xcc00);
+ SkGmPhyWrite(pAC, IoC, Port, 30, 0xc800);
+ SkGmPhyWrite(pAC, IoC, Port, 30, 0xc400);
+ SkGmPhyWrite(pAC, IoC, Port, 30, 0xc000);
+ SkGmPhyWrite(pAC, IoC, Port, 30, 0xc100);
+ }
+
+ /* set address to 0 for MDI[0] */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, 0);
+
+ /* Read Cable Diagnostic Reg */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal);
+
+ /* start Cable Diagnostic Test */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CABLE_DIAG,
+ (SK_U16)(RegVal | PHY_M_CABD_ENA_TEST));
+
+ return(0);
+ }
+
+ /* Read Cable Diagnostic Reg */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal);
+
+ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
+ ("PHY Cable Diag.=0x%04X\n", RegVal));
+
+ if ((RegVal & PHY_M_CABD_ENA_TEST) != 0) {
+ /* test is running */
+ return(2);
+ }
+
+ /* get the test results */
+ for (i = 0; i < 4; i++) {
+ /* set address to i for MDI[i] */
+ SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, (SK_U16)i);
+
+ /* get Cable Diagnostic values */
+ SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal);
+
+ pPrt->PMdiPairLen[i] = (SK_U8)(RegVal & PHY_M_CABD_DIST_MSK);
+
+ pPrt->PMdiPairSts[i] = (SK_U8)((RegVal & PHY_M_CABD_STAT_MSK) >> 13);
+ }
+
+ return(0);
+} /* SkGmCableDiagStatus */
+#endif /* !SK_SLIM */
+#endif /* YUKON */
+
+/* End of file */
diff --git a/drivers/net/sk_g16.c b/drivers/net/sk_g16.c
new file mode 100644
index 000000000000..134ae0e6495b
--- /dev/null
+++ b/drivers/net/sk_g16.c
@@ -0,0 +1,2066 @@
+/*-
+ * Copyright (C) 1994 by PJD Weichmann & SWS Bern, Switzerland
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.c
+ *
+ * Version : $Revision: 1.1 $
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/26
+ * Last Updated : $Date: 1994/06/30 16:25:15 $
+ *
+ * Description : Schneider & Koch G16 Ethernet Device Driver for
+ * Linux Kernel >= 1.1.22
+ * Update History :
+ * Paul Gortmaker, 03/97: Fix for v2.1.x to use read{b,w}
+ * write{b,w} and memcpy -> memcpy_{to,from}io
+ *
+ * Jeff Garzik, 06/2000, Modularize
+ *
+-*/
+
+static const char rcsid[] = "$Id: sk_g16.c,v 1.1 1994/06/30 16:25:15 root Exp $";
+
+/*
+ * The Schneider & Koch (SK) G16 Network device driver is based
+ * on the 'ni6510' driver from Michael Hipp which can be found at
+ * ftp://sunsite.unc.edu/pub/Linux/system/Network/drivers/nidrivers.tar.gz
+ *
+ * Sources: 1) ni6510.c by M. Hipp
+ * 2) depca.c by D.C. Davies
+ * 3) skeleton.c by D. Becker
+ * 4) Am7990 Local Area Network Controller for Ethernet (LANCE),
+ * AMD, Pub. #05698, June 1989
+ *
+ * Many Thanks for helping me to get things working to:
+ *
+ * A. Cox (A.Cox@swansea.ac.uk)
+ * M. Hipp (mhipp@student.uni-tuebingen.de)
+ * R. Bolz (Schneider & Koch, Germany)
+ *
+ * To Do:
+ * - Support of SK_G8 and other SK Network Cards.
+ * - Autoset memory mapped RAM. Check for free memory and then
+ * configure RAM correctly.
+ * - SK_close should really set card in to initial state.
+ * - Test if IRQ 3 is not switched off. Use autoirq() functionality.
+ * (as in /drivers/net/skeleton.c)
+ * - Implement Multicast addressing. At minimum something like
+ * in depca.c.
+ * - Redo the statistics part.
+ * - Try to find out if the board is in 8 Bit or 16 Bit slot.
+ * If in 8 Bit mode don't use IRQ 11.
+ * - (Try to make it slightly faster.)
+ * - Power management support
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "sk_g16.h"
+
+/*
+ * Schneider & Koch Card Definitions
+ * =================================
+ */
+
+#define SK_NAME "SK_G16"
+
+/*
+ * SK_G16 Configuration
+ * --------------------
+ */
+
+/*
+ * Abbreviations
+ * -------------
+ *
+ * RAM - used for the 16KB shared memory
+ * Boot_ROM, ROM - are used for referencing the BootEPROM
+ *
+ * SK_BOOT_ROM and SK_ADDR are symbolic constants used to configure
+ * the behaviour of the driver and the SK_G16.
+ *
+ * ! See sk_g16.install on how to install and configure the driver !
+ *
+ * SK_BOOT_ROM defines if the Boot_ROM should be switched off or not.
+ *
+ * SK_ADDR defines the address where the RAM will be mapped into the real
+ * host memory.
+ * valid addresses are from 0xa0000 to 0xfc000 in 16Kbyte steps.
+ */
+
+#define SK_BOOT_ROM 1 /* 1=BootROM on 0=off */
+
+#define SK_ADDR 0xcc000
+
+/*
+ * In POS3 are bits A14-A19 of the address bus. These bits can be set
+ * to choose the RAM address. That's why we only can choose the RAM address
+ * in 16KB steps.
+ */
+
+#define POS_ADDR (rom_addr>>14) /* Do not change this line */
+
+/*
+ * SK_G16 I/O PORT's + IRQ's + Boot_ROM locations
+ * ----------------------------------------------
+ */
+
+/*
+ * As nearly every card has also SK_G16 a specified I/O Port region and
+ * only a few possible IRQ's.
+ * In the Installation Guide from Schneider & Koch is listed a possible
+ * Interrupt IRQ2. IRQ2 is always IRQ9 in boards with two cascaded interrupt
+ * controllers. So we use in SK_IRQS IRQ9.
+ */
+
+/* Don't touch any of the following #defines. */
+
+#define SK_IO_PORTS { 0x100, 0x180, 0x208, 0x220, 0x288, 0x320, 0x328, 0x390, 0 }
+
+#define SK_IRQS { 3, 5, 9, 11, 0 }
+
+#define SK_BOOT_ROM_LOCATIONS { 0xc0000, 0xc4000, 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xdc000, 0 }
+
+#define SK_BOOT_ROM_ID { 0x55, 0xaa, 0x10, 0x50, 0x06, 0x33 }
+
+/*
+ * SK_G16 POS REGISTERS
+ * --------------------
+ */
+
+/*
+ * SK_G16 has a Programmable Option Select (POS) Register.
+ * The POS is composed of 8 separate registers (POS0-7) which
+ * are I/O mapped on an address set by the W1 switch.
+ *
+ */
+
+#define SK_POS_SIZE 8 /* 8 I/O Ports are used by SK_G16 */
+
+#define SK_POS0 ioaddr /* Card-ID Low (R) */
+#define SK_POS1 ioaddr+1 /* Card-ID High (R) */
+#define SK_POS2 ioaddr+2 /* Card-Enable, Boot-ROM Disable (RW) */
+#define SK_POS3 ioaddr+3 /* Base address of RAM */
+#define SK_POS4 ioaddr+4 /* IRQ */
+
+/* POS5 - POS7 are unused */
+
+/*
+ * SK_G16 MAC PREFIX
+ * -----------------
+ */
+
+/*
+ * Scheider & Koch manufacturer code (00:00:a5).
+ * This must be checked, that we are sure it is a SK card.
+ */
+
+#define SK_MAC0 0x00
+#define SK_MAC1 0x00
+#define SK_MAC2 0x5a
+
+/*
+ * SK_G16 ID
+ * ---------
+ */
+
+/*
+ * If POS0,POS1 contain the following ID, then we know
+ * at which I/O Port Address we are.
+ */
+
+#define SK_IDLOW 0xfd
+#define SK_IDHIGH 0x6a
+
+
+/*
+ * LANCE POS Bit definitions
+ * -------------------------
+ */
+
+#define SK_ROM_RAM_ON (POS2_CARD)
+#define SK_ROM_RAM_OFF (POS2_EPROM)
+#define SK_ROM_ON (inb(SK_POS2) & POS2_CARD)
+#define SK_ROM_OFF (inb(SK_POS2) | POS2_EPROM)
+#define SK_RAM_ON (inb(SK_POS2) | POS2_CARD)
+#define SK_RAM_OFF (inb(SK_POS2) & POS2_EPROM)
+
+#define POS2_CARD 0x0001 /* 1 = SK_G16 on 0 = off */
+#define POS2_EPROM 0x0002 /* 1 = Boot EPROM off 0 = on */
+
+/*
+ * SK_G16 Memory mapped Registers
+ * ------------------------------
+ *
+ */
+
+#define SK_IOREG (&board->ioreg) /* LANCE data registers. */
+#define SK_PORT (&board->port) /* Control, Status register */
+#define SK_IOCOM (&board->iocom) /* I/O Command */
+
+/*
+ * SK_G16 Status/Control Register bits
+ * -----------------------------------
+ *
+ * (C) Controlreg (S) Statusreg
+ */
+
+/*
+ * Register transfer: 0 = no transfer
+ * 1 = transferring data between LANCE and I/O reg
+ */
+#define SK_IORUN 0x20
+
+/*
+ * LANCE interrupt: 0 = LANCE interrupt occurred
+ * 1 = no LANCE interrupt occurred
+ */
+#define SK_IRQ 0x10
+
+#define SK_RESET 0x08 /* Reset SK_CARD: 0 = RESET 1 = normal */
+#define SK_RW 0x02 /* 0 = write to 1 = read from */
+#define SK_ADR 0x01 /* 0 = REG DataPort 1 = RAP Reg addr port */
+
+
+#define SK_RREG SK_RW /* Transferdirection to read from lance */
+#define SK_WREG 0 /* Transferdirection to write to lance */
+#define SK_RAP SK_ADR /* Destination Register RAP */
+#define SK_RDATA 0 /* Destination Register REG DataPort */
+
+/*
+ * SK_G16 I/O Command
+ * ------------------
+ */
+
+/*
+ * Any bitcombination sets the internal I/O bit (transfer will start)
+ * when written to I/O Command
+ */
+
+#define SK_DOIO 0x80 /* Do Transfer */
+
+/*
+ * LANCE RAP (Register Address Port).
+ * ---------------------------------
+ */
+
+/*
+ * The LANCE internal registers are selected through the RAP.
+ * The Registers are:
+ *
+ * CSR0 - Status and Control flags
+ * CSR1 - Low order bits of initialize block (bits 15:00)
+ * CSR2 - High order bits of initialize block (bits 07:00, 15:08 are reserved)
+ * CSR3 - Allows redefinition of the Bus Master Interface.
+ * This register must be set to 0x0002, which means BSWAP = 0,
+ * ACON = 1, BCON = 0;
+ *
+ */
+
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+/*
+ * General Definitions
+ * ===================
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * We have 16KB RAM which can be accessed by the LANCE. In the
+ * memory are not only the buffers but also the ring descriptors and
+ * the initialize block.
+ * Don't change anything unless you really know what you do.
+ */
+
+#define LC_LOG_TX_BUFFERS 1 /* (2 == 2^^1) 2 Transmit buffers */
+#define LC_LOG_RX_BUFFERS 3 /* (8 == 2^^3) 8 Receive buffers */
+
+/* Descriptor ring sizes */
+
+#define TMDNUM (1 << (LC_LOG_TX_BUFFERS)) /* 2 Transmit descriptor rings */
+#define RMDNUM (1 << (LC_LOG_RX_BUFFERS)) /* 8 Receive Buffers */
+
+/* Define Mask for setting RMD, TMD length in the LANCE init_block */
+
+#define TMDNUMMASK (LC_LOG_TX_BUFFERS << 29)
+#define RMDNUMMASK (LC_LOG_RX_BUFFERS << 29)
+
+/*
+ * Data Buffer size is set to maximum packet length.
+ */
+
+#define PKT_BUF_SZ 1518
+
+/*
+ * The number of low I/O ports used by the ethercard.
+ */
+
+#define ETHERCARD_TOTAL_SIZE SK_POS_SIZE
+
+/*
+ * SK_DEBUG
+ *
+ * Here you can choose what level of debugging wanted.
+ *
+ * If SK_DEBUG and SK_DEBUG2 are undefined, then only the
+ * necessary messages will be printed.
+ *
+ * If SK_DEBUG is defined, there will be many debugging prints
+ * which can help to find some mistakes in configuration or even
+ * in the driver code.
+ *
+ * If SK_DEBUG2 is defined, many many messages will be printed
+ * which normally you don't need. I used this to check the interrupt
+ * routine.
+ *
+ * (If you define only SK_DEBUG2 then only the messages for
+ * checking interrupts will be printed!)
+ *
+ * Normal way of live is:
+ *
+ * For the whole thing get going let both symbolic constants
+ * undefined. If you face any problems and you know what's going
+ * on (you know something about the card and you can interpret some
+ * hex LANCE register output) then define SK_DEBUG
+ *
+ */
+
+#undef SK_DEBUG /* debugging */
+#undef SK_DEBUG2 /* debugging with more verbose report */
+
+#ifdef SK_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x) /**/
+#endif
+
+#ifdef SK_DEBUG2
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x) /**/
+#endif
+
+/*
+ * SK_G16 RAM
+ *
+ * The components are memory mapped and can be set in a region from
+ * 0x00000 through 0xfc000 in 16KB steps.
+ *
+ * The Network components are: dual ported RAM, Prom, I/O Reg, Status-,
+ * Controlregister and I/O Command.
+ *
+ * dual ported RAM: This is the only memory region which the LANCE chip
+ * has access to. From the Lance it is addressed from 0x0000 to
+ * 0x3fbf. The host accesses it normally.
+ *
+ * PROM: The PROM obtains the ETHERNET-MAC-Address. It is realised as a
+ * 8-Bit PROM, this means only the 16 even addresses are used of the
+ * 32 Byte Address region. Access to an odd address results in invalid
+ * data.
+ *
+ * LANCE I/O Reg: The I/O Reg is build of 4 single Registers, Low-Byte Write,
+ * Hi-Byte Write, Low-Byte Read, Hi-Byte Read.
+ * Transfer from or to the LANCE is always in 16Bit so Low and High
+ * registers are always relevant.
+ *
+ * The Data from the Readregister is not the data in the Writeregister!!
+ *
+ * Port: Status- and Controlregister.
+ * Two different registers which share the same address, Status is
+ * read-only, Control is write-only.
+ *
+ * I/O Command:
+ * Any bitcombination written in here starts the transmission between
+ * Host and LANCE.
+ */
+
+typedef struct
+{
+ unsigned char ram[0x3fc0]; /* 16KB dual ported ram */
+ unsigned char rom[0x0020]; /* 32Byte PROM containing 6Byte MAC */
+ unsigned char res1[0x0010]; /* reserved */
+ unsigned volatile short ioreg;/* LANCE I/O Register */
+ unsigned volatile char port; /* Statusregister and Controlregister */
+ unsigned char iocom; /* I/O Command Register */
+} SK_RAM;
+
+/* struct */
+
+/*
+ * This is the structure for the dual ported ram. We
+ * have exactly 16 320 Bytes. In here there must be:
+ *
+ * - Initialize Block (starting at a word boundary)
+ * - Receive and Transmit Descriptor Rings (quadword boundary)
+ * - Data Buffers (arbitrary boundary)
+ *
+ * This is because LANCE has on SK_G16 only access to the dual ported
+ * RAM and nowhere else.
+ */
+
+struct SK_ram
+{
+ struct init_block ib;
+ struct tmd tmde[TMDNUM];
+ struct rmd rmde[RMDNUM];
+ char tmdbuf[TMDNUM][PKT_BUF_SZ];
+ char rmdbuf[RMDNUM][PKT_BUF_SZ];
+};
+
+/*
+ * Structure where all necessary information is for ring buffer
+ * management and statistics.
+ */
+
+struct priv
+{
+ struct SK_ram *ram; /* dual ported ram structure */
+ struct rmd *rmdhead; /* start of receive ring descriptors */
+ struct tmd *tmdhead; /* start of transmit ring descriptors */
+ int rmdnum; /* actual used ring descriptor */
+ int tmdnum; /* actual transmit descriptor for transmitting data */
+ int tmdlast; /* last sent descriptor used for error handling, etc */
+ void *rmdbufs[RMDNUM]; /* pointer to the receive buffers */
+ void *tmdbufs[TMDNUM]; /* pointer to the transmit buffers */
+ struct net_device_stats stats; /* Device driver statistics */
+};
+
+/* global variable declaration */
+
+/* IRQ map used to reserve a IRQ (see SK_open()) */
+
+/* static variables */
+
+static SK_RAM *board; /* pointer to our memory mapped board components */
+static DEFINE_SPINLOCK(SK_lock);
+
+/* Macros */
+
+
+/* Function Prototypes */
+
+/*
+ * Device Driver functions
+ * -----------------------
+ * See for short explanation of each function its definitions header.
+ */
+
+static int SK_probe(struct net_device *dev, short ioaddr);
+
+static void SK_timeout(struct net_device *dev);
+static int SK_open(struct net_device *dev);
+static int SK_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t SK_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static void SK_rxintr(struct net_device *dev);
+static void SK_txintr(struct net_device *dev);
+static int SK_close(struct net_device *dev);
+
+static struct net_device_stats *SK_get_stats(struct net_device *dev);
+
+unsigned int SK_rom_addr(void);
+
+static void set_multicast_list(struct net_device *dev);
+
+/*
+ * LANCE Functions
+ * ---------------
+ */
+
+static int SK_lance_init(struct net_device *dev, unsigned short mode);
+void SK_reset_board(void);
+void SK_set_RAP(int reg_number);
+int SK_read_reg(int reg_number);
+int SK_rread_reg(void);
+void SK_write_reg(int reg_number, int value);
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+void SK_print_pos(struct net_device *dev, char *text);
+void SK_print_dev(struct net_device *dev, char *text);
+void SK_print_ram(struct net_device *dev);
+
+
+/*-
+ * Function : SK_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Check for a SK_G16 network adaptor and initialize it.
+ * This function gets called by dev_init which initializes
+ * all Network devices.
+ *
+ * Parameters : I : struct net_device *dev - structure preconfigured
+ * from Space.c
+ * Return Value : 0 = Driver Found and initialized
+ * Errors : ENODEV - no device found
+ * ENXIO - not probed
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int io; /* 0 == probe */
+
+/*
+ * Check for a network adaptor of this type, and return '0' if one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ */
+
+struct net_device * __init SK_init(int unit)
+{
+ int *port, ports[] = SK_IO_PORTS; /* SK_G16 supported ports */
+ static unsigned version_printed;
+ struct net_device *dev = alloc_etherdev(sizeof(struct priv));
+ int err = -ENODEV;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ }
+
+ if (version_printed++ == 0)
+ PRINTK(("%s: %s", SK_NAME, rcsid));
+
+ if (io > 0xff) { /* Check a single specified address */
+ err = -EBUSY;
+ /* Check if on specified address is a SK_G16 */
+ if (request_region(io, ETHERCARD_TOTAL_SIZE, "sk_g16")) {
+ err = SK_probe(dev, io);
+ if (!err)
+ goto got_it;
+ release_region(io, ETHERCARD_TOTAL_SIZE);
+ }
+ } else if (io > 0) { /* Don't probe at all */
+ err = -ENXIO;
+ } else {
+ /* Autoprobe base_addr */
+ for (port = &ports[0]; *port; port++) {
+ io = *port;
+
+ /* Check if I/O Port region is used by another board */
+ if (!request_region(io, ETHERCARD_TOTAL_SIZE, "sk_g16"))
+ continue; /* Try next Port address */
+
+ /* Check if at ioaddr is a SK_G16 */
+ if (SK_probe(dev, io) == 0)
+ goto got_it;
+
+ release_region(io, ETHERCARD_TOTAL_SIZE);
+ }
+ }
+err_out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+
+got_it:
+ err = register_netdev(dev);
+ if (err) {
+ release_region(dev->base_addr, ETHERCARD_TOTAL_SIZE);
+ goto err_out;
+ }
+ return dev;
+
+} /* End of SK_init */
+
+
+MODULE_AUTHOR("Patrick J.D. Weichmann");
+MODULE_DESCRIPTION("Schneider & Koch G16 Ethernet Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(io, "i");
+MODULE_PARM_DESC(io, "0 to probe common ports (unsafe), or the I/O base of the board");
+
+
+#ifdef MODULE
+
+static struct net_device *SK_dev;
+
+static int __init SK_init_module (void)
+{
+ SK_dev = SK_init(-1);
+ return IS_ERR(SK_dev) ? PTR_ERR(SK_dev) : 0;
+}
+
+static void __exit SK_cleanup_module (void)
+{
+ unregister_netdev(SK_dev);
+ release_region(SK_dev->base_addr, ETHERCARD_TOTAL_SIZE);
+ free_netdev(SK_dev);
+}
+
+module_init(SK_init_module);
+module_exit(SK_cleanup_module);
+#endif
+
+
+/*-
+ * Function : SK_probe
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called by SK_init and
+ * does the main part of initialization.
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device structure
+ * I : short ioaddr - I/O Port address where POS is.
+ * Return Value : 0 = Initialization done
+ * Errors : ENODEV - No SK_G16 found
+ * -1 - Configuration problem
+ * Globals : board - pointer to SK_RAM
+ * Update History :
+ * YY/MM/DD uid Description
+ * 94/06/30 pwe SK_ADDR now checked and at the correct place
+-*/
+
+int __init SK_probe(struct net_device *dev, short ioaddr)
+{
+ int i,j; /* Counters */
+ int sk_addr_flag = 0; /* SK ADDR correct? 1 - no, 0 - yes */
+ unsigned int rom_addr; /* used to store RAM address used for POS_ADDR */
+
+ struct priv *p = netdev_priv(dev); /* SK_G16 private structure */
+
+ if (inb(SK_POS0) != SK_IDLOW || inb(SK_POS1) != SK_IDHIGH)
+ return -ENODEV;
+ dev->base_addr = ioaddr;
+
+ if (SK_ADDR & 0x3fff || SK_ADDR < 0xa0000)
+ {
+
+ sk_addr_flag = 1;
+
+ /*
+ * Now here we could use a routine which searches for a free
+ * place in the ram and set SK_ADDR if found. TODO.
+ */
+ }
+
+ if (SK_BOOT_ROM) /* Shall we keep Boot_ROM on ? */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is set.\n", SK_NAME));
+
+ rom_addr = SK_rom_addr();
+
+ if (rom_addr == 0) /* No Boot_ROM found */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR; /* assign predefined address */
+
+ PRINTK(("## %s: NO Bootrom found \n", SK_NAME));
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else if (rom_addr == SK_ADDR)
+ {
+ printk("%s: RAM + ROM are set to the same address %#08x\n"
+ " Check configuration. Now switching off Boot_ROM\n",
+ SK_NAME, rom_addr);
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off*/
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else
+ {
+ PRINTK(("## %s: Found ROM at %#08x\n", SK_NAME, rom_addr));
+ PRINTK(("## %s: Keeping Boot_ROM on\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_ROM_RAM_ON, SK_POS2); /* RAM on, BOOT_ROM on */
+ }
+ }
+ else /* Don't keep Boot_ROM */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is not set.\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_rom_addr(); /* Try to find a Boot_ROM */
+
+ /* IF we find a Boot_ROM disable it */
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+
+ /* We found a Boot_ROM and it's gone. Set RAM address on
+ * Boot_ROM address.
+ */
+
+ if (rom_addr)
+ {
+ printk("%s: We found Boot_ROM at %#08x. Now setting RAM on"
+ "that address\n", SK_NAME, rom_addr);
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM on Boot_ROM address */
+ }
+ else /* We did not find a Boot_ROM, use predefined SK_ADDR for ram */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ }
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "POS registers after ROM, RAM config");
+#endif
+
+ board = (SK_RAM *) isa_bus_to_virt(rom_addr);
+
+ /* Read in station address */
+ for (i = 0, j = 0; i < ETH_ALEN; i++, j+=2)
+ {
+ dev->dev_addr[i] = readb(board->rom+j);
+ }
+
+ /* Check for manufacturer code */
+ if (!(dev->dev_addr[0] == SK_MAC0 &&
+ dev->dev_addr[1] == SK_MAC1 &&
+ dev->dev_addr[2] == SK_MAC2) )
+ {
+ PRINTK(("## %s: We did not find SK_G16 at RAM location.\n",
+ SK_NAME));
+ return -ENODEV; /* NO SK_G16 found */
+ }
+
+ printk("%s: %s found at %#3x, HW addr: %#04x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ "Schneider & Koch Netcard",
+ (unsigned int) dev->base_addr,
+ dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ memset((char *) dev->priv, 0, sizeof(struct priv)); /* clear memory */
+
+ /* Assign our Device Driver functions */
+
+ dev->open = SK_open;
+ dev->stop = SK_close;
+ dev->hard_start_xmit = SK_send_packet;
+ dev->get_stats = SK_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+ dev->tx_timeout = SK_timeout;
+ dev->watchdog_timeo = HZ/7;
+
+
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* Initialize private structure */
+
+ p->ram = (struct SK_ram *) rom_addr; /* Set dual ported RAM addr */
+ p->tmdhead = &(p->ram)->tmde[0]; /* Set TMD head */
+ p->rmdhead = &(p->ram)->rmde[0]; /* Set RMD head */
+
+ /* Initialize buffer pointers */
+
+ for (i = 0; i < TMDNUM; i++)
+ {
+ p->tmdbufs[i] = &(p->ram)->tmdbuf[i];
+ }
+
+ for (i = 0; i < RMDNUM; i++)
+ {
+ p->rmdbufs[i] = &(p->ram)->rmdbuf[i];
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "End of SK_probe");
+ SK_print_ram(dev);
+#endif
+ return 0; /* Initialization done */
+} /* End of SK_probe() */
+
+
+/*-
+ * Function : SK_open
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called sometimes after booting
+ * when ifconfig program is run.
+ *
+ * This function requests an IRQ, sets the correct
+ * IRQ in the card. Then calls SK_lance_init() to
+ * init and start the LANCE chip. Then if everything is
+ * ok returns with 0 (OK), which means SK_G16 is now
+ * opened and operational.
+ *
+ * (Called by dev_open() /net/inet/dev.c)
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device structure
+ * Return Value : 0 - Device opened
+ * Errors : -EAGAIN - Open failed
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_open(struct net_device *dev)
+{
+ int i = 0;
+ int irqval = 0;
+ int ioaddr = dev->base_addr;
+
+ int irqtab[] = SK_IRQS;
+
+ struct priv *p = netdev_priv(dev);
+
+ PRINTK(("## %s: At beginning of SK_open(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev->irq == 0) /* Autoirq */
+ {
+ i = 0;
+
+ /*
+ * Check if one IRQ out of SK_IRQS is free and install
+ * interrupt handler.
+ * Most done by request_irq().
+ * irqval: 0 - interrupt handler installed for IRQ irqtab[i]
+ * -EBUSY - interrupt busy
+ * -EINVAL - irq > 15 or handler = NULL
+ */
+
+ do
+ {
+ irqval = request_irq(irqtab[i], &SK_interrupt, 0, "sk_g16", dev);
+ i++;
+ } while (irqval && irqtab[i]);
+
+ if (irqval) /* We tried every possible IRQ but no success */
+ {
+ printk("%s: unable to get an IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ dev->irq = irqtab[--i];
+
+ outb(i<<2, SK_POS4); /* Set Card on probed IRQ */
+
+ }
+ else if (dev->irq == 2) /* IRQ2 is always IRQ9 */
+ {
+ if (request_irq(9, &SK_interrupt, 0, "sk_g16", dev))
+ {
+ printk("%s: unable to get IRQ 9\n", dev->name);
+ return -EAGAIN;
+ }
+ dev->irq = 9;
+
+ /*
+ * Now we set card on IRQ2.
+ * This can be confusing, but remember that IRQ2 on the network
+ * card is in reality IRQ9
+ */
+ outb(0x08, SK_POS4); /* set card to IRQ2 */
+
+ }
+ else /* Check IRQ as defined in Space.c */
+ {
+ int i = 0;
+
+ /* check if IRQ free and valid. Then install Interrupt handler */
+
+ if (request_irq(dev->irq, &SK_interrupt, 0, "sk_g16", dev))
+ {
+ printk("%s: unable to get selected IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ switch(dev->irq)
+ {
+ case 3: i = 0;
+ break;
+ case 5: i = 1;
+ break;
+ case 2: i = 2;
+ break;
+ case 11:i = 3;
+ break;
+ default:
+ printk("%s: Preselected IRQ %d is invalid for %s boards",
+ dev->name,
+ dev->irq,
+ SK_NAME);
+ return -EAGAIN;
+ }
+
+ outb(i<<2, SK_POS4); /* Set IRQ on card */
+ }
+
+ printk("%s: Schneider & Koch G16 at %#3x, IRQ %d, shared mem at %#08x\n",
+ dev->name, (unsigned int)dev->base_addr,
+ (int) dev->irq, (unsigned int) p->ram);
+
+ if (!(i = SK_lance_init(dev, 0))) /* LANCE init OK? */
+ {
+ netif_start_queue(dev);
+
+#ifdef SK_DEBUG
+
+ /*
+ * This debug block tries to stop LANCE,
+ * reinit LANCE with transmitter and receiver disabled,
+ * then stop again and reinit with NORMAL_MODE
+ */
+
+ printk("## %s: After lance init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_DTX | MODE_DRX);
+ printk("## %s: Reinit with DTX + DRX off. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_NORMAL);
+ printk("## %s: LANCE back to normal mode. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_print_pos(dev, "POS regs before returning OK");
+
+#endif /* SK_DEBUG */
+
+ return 0; /* SK_open() is successful */
+ }
+ else /* LANCE init failed */
+ {
+
+ PRINTK(("## %s: LANCE init failed: CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ return -EAGAIN;
+ }
+
+} /* End of SK_open() */
+
+
+/*-
+ * Function : SK_lance_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Reset LANCE chip, fill RMD, TMD structures with
+ * start values and Start LANCE.
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device structure
+ * I : int mode - put LANCE into "mode" see data-sheet for
+ * more info.
+ * Return Value : 0 - Init done
+ * Errors : -1 - Init failed
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_lance_init(struct net_device *dev, unsigned short mode)
+{
+ int i;
+ unsigned long flags;
+ struct priv *p = netdev_priv(dev);
+ struct tmd *tmdp;
+ struct rmd *rmdp;
+
+ PRINTK(("## %s: At beginning of LANCE init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Reset LANCE */
+ SK_reset_board();
+
+ /* Initialize TMD's with start values */
+ p->tmdnum = 0; /* First descriptor for transmitting */
+ p->tmdlast = 0; /* First descriptor for reading stats */
+
+ for (i = 0; i < TMDNUM; i++) /* Init all TMD's */
+ {
+ tmdp = p->tmdhead + i;
+
+ writel((unsigned long) p->tmdbufs[i], tmdp->u.buffer); /* assign buffer */
+
+ /* Mark TMD as start and end of packet */
+ writeb(TX_STP | TX_ENP, &tmdp->u.s.status);
+ }
+
+
+ /* Initialize RMD's with start values */
+
+ p->rmdnum = 0; /* First RMD which will be used */
+
+ for (i = 0; i < RMDNUM; i++) /* Init all RMD's */
+ {
+ rmdp = p->rmdhead + i;
+
+
+ writel((unsigned long) p->rmdbufs[i], rmdp->u.buffer); /* assign buffer */
+
+ /*
+ * LANCE must be owner at beginning so that he can fill in
+ * receiving packets, set status and release RMD
+ */
+
+ writeb(RX_OWN, &rmdp->u.s.status);
+
+ writew(-PKT_BUF_SZ, &rmdp->blen); /* Buffer Size (two's complement) */
+
+ writeb(0, &rmdp->mlen); /* init message length */
+
+ }
+
+ /* Fill LANCE Initialize Block */
+
+ writew(mode, (&((p->ram)->ib.mode))); /* Set operation mode */
+
+ for (i = 0; i < ETH_ALEN; i++) /* Set physical address */
+ {
+ writeb(dev->dev_addr[i], (&((p->ram)->ib.paddr[i])));
+ }
+
+ for (i = 0; i < 8; i++) /* Set multicast, logical address */
+ {
+ writeb(0, (&((p->ram)->ib.laddr[i]))); /* We do not use logical addressing */
+ }
+
+ /* Set ring descriptor pointers and set number of descriptors */
+
+ writel((int)p->rmdhead | RMDNUMMASK, (&((p->ram)->ib.rdrp)));
+ writel((int)p->tmdhead | TMDNUMMASK, (&((p->ram)->ib.tdrp)));
+
+ /* Prepare LANCE Control and Status Registers */
+
+ spin_lock_irqsave(&SK_lock, flags);
+
+ SK_write_reg(CSR3, CSR3_ACON); /* Ale Control !!!THIS MUST BE SET!!!! */
+
+ /*
+ * LANCE addresses the RAM from 0x0000 to 0x3fbf and has no access to
+ * PC Memory locations.
+ *
+ * In structure SK_ram is defined that the first thing in ram
+ * is the initialization block. So his address is for LANCE always
+ * 0x0000
+ *
+ * CSR1 contains low order bits 15:0 of initialization block address
+ * CSR2 is built of:
+ * 7:0 High order bits 23:16 of initialization block address
+ * 15:8 reserved, must be 0
+ */
+
+ /* Set initialization block address (must be on word boundary) */
+ SK_write_reg(CSR1, 0); /* Set low order bits 15:0 */
+ SK_write_reg(CSR2, 0); /* Set high order bits 23:16 */
+
+
+ PRINTK(("## %s: After setting CSR1-3. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Initialize LANCE */
+
+ /*
+ * INIT = Initialize, when set, causes the LANCE to begin the
+ * initialization procedure and access the Init Block.
+ */
+
+ SK_write_reg(CSR0, CSR0_INIT);
+
+ spin_unlock_irqrestore(&SK_lock, flags);
+
+ /* Wait until LANCE finished initialization */
+
+ SK_set_RAP(CSR0); /* Register Address Pointer to CSR0 */
+
+ for (i = 0; (i < 100) && !(SK_rread_reg() & CSR0_IDON); i++)
+ ; /* Wait until init done or go ahead if problems (i>=100) */
+
+ if (i >= 100) /* Something is wrong ! */
+ {
+ printk("%s: can't init am7990, status: %04x "
+ "init_block: %#08x\n",
+ dev->name, (int) SK_read_reg(CSR0),
+ (unsigned int) &(p->ram)->ib);
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "LANCE INIT failed");
+ SK_print_dev(dev,"Device Structure:");
+#endif
+
+ return -1; /* LANCE init failed */
+ }
+
+ PRINTK(("## %s: init done after %d ticks\n", SK_NAME, i));
+
+ /* Clear Initialize done, enable Interrupts, start LANCE */
+
+ SK_write_reg(CSR0, CSR0_IDON | CSR0_INEA | CSR0_STRT);
+
+ PRINTK(("## %s: LANCE started. CSR0: %#06x\n", SK_NAME,
+ SK_read_reg(CSR0)));
+
+ return 0; /* LANCE is up and running */
+
+} /* End of SK_lance_init() */
+
+
+
+/*-
+ * Function : SK_send_packet
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Writes an socket buffer into a transmit descriptor
+ * and starts transmission.
+ *
+ * Parameters : I : struct sk_buff *skb - packet to transfer
+ * I : struct net_device *dev - SK_G16 device structure
+ * Return Value : 0 - OK
+ * 1 - Could not transmit (dev_queue_xmit will queue it)
+ * and try to sent it later
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: xmitter timed out, try to restart!\n", dev->name);
+ SK_lance_init(dev, MODE_NORMAL); /* Reinit LANCE */
+ netif_wake_queue(dev); /* Clear Transmitter flag */
+ dev->trans_start = jiffies; /* Mark Start of transmission */
+}
+
+static int SK_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ struct tmd *tmdp;
+ static char pad[64];
+
+ PRINTK2(("## %s: SK_send_packet() called, CSR0 %#04x.\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ * This means check if we are already in.
+ */
+
+ netif_stop_queue (dev);
+
+ {
+
+ /* Evaluate Packet length */
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ tmdp = p->tmdhead + p->tmdnum; /* Which descriptor for transmitting */
+
+ /* Fill in Transmit Message Descriptor */
+
+ /* Copy data into dual ported ram */
+
+ memcpy_toio((tmdp->u.buffer & 0x00ffffff), skb->data, skb->len);
+ if (len != skb->len)
+ memcpy_toio((tmdp->u.buffer & 0x00ffffff) + skb->len, pad, len-skb->len);
+
+ writew(-len, &tmdp->blen); /* set length to transmit */
+
+ /*
+ * Packet start and end is always set because we use the maximum
+ * packet length as buffer length.
+ * Relinquish ownership to LANCE
+ */
+
+ writeb(TX_OWN | TX_STP | TX_ENP, &tmdp->u.s.status);
+
+ /* Start Demand Transmission */
+ SK_write_reg(CSR0, CSR0_TDMD | CSR0_INEA);
+
+ dev->trans_start = jiffies; /* Mark start of transmission */
+
+ /* Set pointer to next transmit buffer */
+ p->tmdnum++;
+ p->tmdnum &= TMDNUM-1;
+
+ /* Do we own the next transmit buffer ? */
+ if (! (readb(&((p->tmdhead + p->tmdnum)->u.s.status)) & TX_OWN) )
+ {
+ /*
+ * We own next buffer and are ready to transmit, so
+ * clear busy flag
+ */
+ netif_start_queue(dev);
+ }
+
+ p->stats.tx_bytes += skb->len;
+
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+} /* End of SK_send_packet */
+
+
+/*-
+ * Function : SK_interrupt
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : SK_G16 interrupt handler which checks for LANCE
+ * Errors, handles transmit and receive interrupts
+ *
+ * Parameters : I : int irq, void *dev_id, struct pt_regs * regs -
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static irqreturn_t SK_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ int csr0;
+ struct net_device *dev = dev_id;
+ struct priv *p = netdev_priv(dev);
+
+
+ PRINTK2(("## %s: SK_interrupt(). status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev == NULL)
+ {
+ printk("SK_interrupt(): IRQ %d for unknown device.\n", irq);
+ }
+
+ spin_lock (&SK_lock);
+
+ csr0 = SK_read_reg(CSR0); /* store register for checking */
+
+ /*
+ * Acknowledge all of the current interrupt sources, disable
+ * Interrupts (INEA = 0)
+ */
+
+ SK_write_reg(CSR0, csr0 & CSR0_CLRALL);
+
+ if (csr0 & CSR0_ERR) /* LANCE Error */
+ {
+ printk("%s: error: %04x\n", dev->name, csr0);
+
+ if (csr0 & CSR0_MISS) /* No place to store packet ? */
+ {
+ p->stats.rx_dropped++;
+ }
+ }
+
+ if (csr0 & CSR0_RINT) /* Receive Interrupt (packet arrived) */
+ {
+ SK_rxintr(dev);
+ }
+
+ if (csr0 & CSR0_TINT) /* Transmit interrupt (packet sent) */
+ {
+ SK_txintr(dev);
+ }
+
+ SK_write_reg(CSR0, CSR0_INEA); /* Enable Interrupts */
+
+ spin_unlock (&SK_lock);
+ return IRQ_HANDLED;
+} /* End of SK_interrupt() */
+
+
+/*-
+ * Function : SK_txintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : After sending a packet we check status, update
+ * statistics and relinquish ownership of transmit
+ * descriptor ring.
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_txintr(struct net_device *dev)
+{
+ int tmdstat;
+ struct tmd *tmdp;
+ struct priv *p = netdev_priv(dev);
+
+
+ PRINTK2(("## %s: SK_txintr() status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ tmdp = p->tmdhead + p->tmdlast; /* Which buffer we sent at last ? */
+
+ /* Set next buffer */
+ p->tmdlast++;
+ p->tmdlast &= TMDNUM-1;
+
+ tmdstat = readb(&tmdp->u.s.status);
+
+ /*
+ * We check status of transmitted packet.
+ * see LANCE data-sheet for error explanation
+ */
+ if (tmdstat & TX_ERR) /* Error occurred */
+ {
+ int stat2 = readw(&tmdp->status2);
+
+ printk("%s: TX error: %04x %04x\n", dev->name, tmdstat, stat2);
+
+ if (stat2 & TX_TDR) /* TDR problems? */
+ {
+ printk("%s: tdr-problems \n", dev->name);
+ }
+
+ if (stat2 & TX_RTRY) /* Failed in 16 attempts to transmit ? */
+ p->stats.tx_aborted_errors++;
+ if (stat2 & TX_LCOL) /* Late collision ? */
+ p->stats.tx_window_errors++;
+ if (stat2 & TX_LCAR) /* Loss of Carrier ? */
+ p->stats.tx_carrier_errors++;
+ if (stat2 & TX_UFLO) /* Underflow error ? */
+ {
+ p->stats.tx_fifo_errors++;
+
+ /*
+ * If UFLO error occurs it will turn transmitter of.
+ * So we must reinit LANCE
+ */
+
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+
+ p->stats.tx_errors++;
+
+ writew(0, &tmdp->status2); /* Clear error flags */
+ }
+ else if (tmdstat & TX_MORE) /* Collisions occurred ? */
+ {
+ /*
+ * Here I have a problem.
+ * I only know that there must be one or up to 15 collisions.
+ * That's why TX_MORE is set, because after 16 attempts TX_RTRY
+ * will be set which means couldn't send packet aborted transfer.
+ *
+ * First I did not have this in but then I thought at minimum
+ * we see that something was not ok.
+ * If anyone knows something better than this to handle this
+ * please report it.
+ */
+
+ p->stats.collisions++;
+ }
+ else /* Packet sent without any problems */
+ {
+ p->stats.tx_packets++;
+ }
+
+ /*
+ * We mark transmitter not busy anymore, because now we have a free
+ * transmit descriptor which can be filled by SK_send_packet and
+ * afterwards sent by the LANCE
+ *
+ * The function which do handle slow IRQ parts is do_bottom_half()
+ * which runs at normal kernel priority, that means all interrupt are
+ * enabled. (see kernel/irq.c)
+ *
+ * net_bh does something like this:
+ * - check if already in net_bh
+ * - try to transmit something from the send queue
+ * - if something is in the receive queue send it up to higher
+ * levels if it is a known protocol
+ * - try to transmit something from the send queue
+ */
+
+ netif_wake_queue(dev);
+
+} /* End of SK_txintr() */
+
+
+/*-
+ * Function : SK_rxintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Buffer sent, check for errors, relinquish ownership
+ * of the receive message descriptor.
+ *
+ * Parameters : I : SK_G16 device structure
+ * Return Value : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_rxintr(struct net_device *dev)
+{
+
+ struct rmd *rmdp;
+ int rmdstat;
+ struct priv *p = netdev_priv(dev);
+
+ PRINTK2(("## %s: SK_rxintr(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ rmdp = p->rmdhead + p->rmdnum;
+
+ /* As long as we own the next entry, check status and send
+ * it up to higher layer
+ */
+
+ while (!( (rmdstat = readb(&rmdp->u.s.status)) & RX_OWN))
+ {
+ /*
+ * Start and end of packet must be set, because we use
+ * the ethernet maximum packet length (1518) as buffer size.
+ *
+ * Because our buffers are at maximum OFLO and BUFF errors are
+ * not to be concerned (see Data sheet)
+ */
+
+ if ((rmdstat & (RX_STP | RX_ENP)) != (RX_STP | RX_ENP))
+ {
+ /* Start of a frame > 1518 Bytes ? */
+
+ if (rmdstat & RX_STP)
+ {
+ p->stats.rx_errors++; /* bad packet received */
+ p->stats.rx_length_errors++; /* packet too long */
+
+ printk("%s: packet too long\n", dev->name);
+ }
+
+ /*
+ * All other packets will be ignored until a new frame with
+ * start (RX_STP) set follows.
+ *
+ * What we do is just give descriptor free for new incoming
+ * packets.
+ */
+
+ writeb(RX_OWN, &rmdp->u.s.status); /* Relinquish ownership to LANCE */
+
+ }
+ else if (rmdstat & RX_ERR) /* Receive Error ? */
+ {
+ printk("%s: RX error: %04x\n", dev->name, (int) rmdstat);
+
+ p->stats.rx_errors++;
+
+ if (rmdstat & RX_FRAM) p->stats.rx_frame_errors++;
+ if (rmdstat & RX_CRC) p->stats.rx_crc_errors++;
+
+ writeb(RX_OWN, &rmdp->u.s.status); /* Relinquish ownership to LANCE */
+
+ }
+ else /* We have a packet which can be queued for the upper layers */
+ {
+
+ int len = readw(&rmdp->mlen) & 0x0fff; /* extract message length from receive buffer */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(len+2); /* allocate socket buffer */
+
+ if (skb == NULL) /* Could not get mem ? */
+ {
+
+ /*
+ * Couldn't allocate sk_buffer so we give descriptor back
+ * to Lance, update statistics and go ahead.
+ */
+
+ writeb(RX_OWN, &rmdp->u.s.status); /* Relinquish ownership to LANCE */
+ printk("%s: Couldn't allocate sk_buff, deferring packet.\n",
+ dev->name);
+ p->stats.rx_dropped++;
+
+ break; /* Jump out */
+ }
+
+ /* Prepare sk_buff to queue for upper layers */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align IP header on 16 byte boundary */
+
+ /*
+ * Copy data out of our receive descriptor into sk_buff.
+ *
+ * (rmdp->u.buffer & 0x00ffffff) -> get address of buffer and
+ * ignore status fields)
+ */
+
+ memcpy_fromio(skb_put(skb,len), (rmdp->u.buffer & 0x00ffffff), len);
+
+
+ /*
+ * Notify the upper protocol layers that there is another packet
+ * to handle
+ *
+ * netif_rx() always succeeds. see /net/inet/dev.c for more.
+ */
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* queue packet and mark it for processing */
+
+ /*
+ * Packet is queued and marked for processing so we
+ * free our descriptor and update statistics
+ */
+
+ writeb(RX_OWN, &rmdp->u.s.status);
+ dev->last_rx = jiffies;
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += len;
+
+
+ p->rmdnum++;
+ p->rmdnum %= RMDNUM;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+ }
+} /* End of SK_rxintr() */
+
+
+/*-
+ * Function : SK_close
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : close gets called from dev_close() and should
+ * deinstall the card (free_irq, mem etc).
+ *
+ * Parameters : I : struct net_device *dev - our device structure
+ * Return Value : 0 - closed device driver
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+/* I have tried to set BOOT_ROM on and RAM off but then, after a 'ifconfig
+ * down' the system stops. So I don't shut set card to init state.
+ */
+
+static int SK_close(struct net_device *dev)
+{
+
+ PRINTK(("## %s: SK_close(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ netif_stop_queue(dev); /* Transmitter busy */
+
+ printk("%s: Shutting %s down CSR0 %#06x\n", dev->name, SK_NAME,
+ (int) SK_read_reg(CSR0));
+
+ SK_write_reg(CSR0, CSR0_STOP); /* STOP the LANCE */
+
+ free_irq(dev->irq, dev); /* Free IRQ */
+
+ return 0; /* always succeed */
+
+} /* End of SK_close() */
+
+
+/*-
+ * Function : SK_get_stats
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Return current status structure to upper layers.
+ * It is called by sprintf_stats (dev.c).
+ *
+ * Parameters : I : struct net_device *dev - our device structure
+ * Return Value : struct net_device_stats * - our current statistics
+ * Errors : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static struct net_device_stats *SK_get_stats(struct net_device *dev)
+{
+
+ struct priv *p = netdev_priv(dev);
+
+ PRINTK(("## %s: SK_get_stats(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ return &p->stats; /* Return Device status */
+
+} /* End of SK_get_stats() */
+
+
+/*-
+ * Function : set_multicast_list
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function gets called when a program performs
+ * a SIOCSIFFLAGS call. Ifconfig does this if you call
+ * 'ifconfig [-]allmulti' which enables or disables the
+ * Promiscuous mode.
+ * Promiscuous mode is when the Network card accepts all
+ * packets, not only the packets which match our MAC
+ * Address. It is useful for writing a network monitor,
+ * but it is also a security problem. You have to remember
+ * that all information on the net is not encrypted.
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device Structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+ * 95/10/18 ACox New multicast calling scheme
+-*/
+
+
+/* Set or clear the multicast filter for SK_G16.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+
+ if (dev->flags&IFF_PROMISC)
+ {
+ /* Reinitialize LANCE with MODE_PROM set */
+ SK_lance_init(dev, MODE_PROM);
+ }
+ else if (dev->mc_count==0 && !(dev->flags&IFF_ALLMULTI))
+ {
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+ else
+ {
+ /* Multicast with logical address filter on */
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+
+ /* Not implemented yet. */
+ }
+} /* End of set_multicast_list() */
+
+
+
+/*-
+ * Function : SK_rom_addr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/01
+ *
+ * Description : Try to find a Boot_ROM at all possible locations
+ *
+ * Parameters : None
+ * Return Value : Address where Boot_ROM is
+ * Errors : 0 - Did not find Boot_ROM
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+unsigned int __init SK_rom_addr(void)
+{
+ int i,j;
+ int rom_found = 0;
+ unsigned int rom_location[] = SK_BOOT_ROM_LOCATIONS;
+ unsigned char rom_id[] = SK_BOOT_ROM_ID;
+ unsigned char test_byte;
+
+ /* Autodetect Boot_ROM */
+ PRINTK(("## %s: Autodetection of Boot_ROM\n", SK_NAME));
+
+ for (i = 0; (rom_location[i] != 0) && (rom_found == 0); i++)
+ {
+
+ PRINTK(("## Trying ROM location %#08x", rom_location[i]));
+
+ rom_found = 1;
+ for (j = 0; j < 6; j++)
+ {
+ test_byte = readb(rom_location[i]+j);
+ PRINTK((" %02x ", *test_byte));
+
+ if(test_byte != rom_id[j])
+ {
+ rom_found = 0;
+ }
+ }
+ PRINTK(("\n"));
+ }
+
+ if (rom_found == 1)
+ {
+ PRINTK(("## %s: Boot_ROM found at %#08x\n",
+ SK_NAME, rom_location[(i-1)]));
+
+ return (rom_location[--i]);
+ }
+ else
+ {
+ PRINTK(("%s: No Boot_ROM found\n", SK_NAME));
+ return 0;
+ }
+} /* End of SK_rom_addr() */
+
+
+
+/* LANCE access functions
+ *
+ * ! CSR1-3 can only be accessed when in CSR0 the STOP bit is set !
+ */
+
+
+/*-
+ * Function : SK_reset_board
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : This function resets SK_G16 and all components, but
+ * POS registers are not changed
+ *
+ * Parameters : None
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ *
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_reset_board(void)
+{
+ writeb(0x00, SK_PORT); /* Reset active */
+ mdelay(5); /* Delay min 5ms */
+ writeb(SK_RESET, SK_PORT); /* Set back to normal operation */
+
+} /* End of SK_reset_board() */
+
+
+/*-
+ * Function : SK_set_RAP
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set LANCE Register Address Port to register
+ * for later data transfer.
+ *
+ * Parameters : I : reg_number - which CSR to read/write from/to
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_set_RAP(int reg_number)
+{
+ writew(reg_number, SK_IOREG);
+ writeb(SK_RESET | SK_RAP | SK_WREG, SK_PORT);
+ writeb(SK_DOIO, SK_IOCOM);
+
+ while (readb(SK_PORT) & SK_IORUN)
+ barrier();
+} /* End of SK_set_RAP() */
+
+
+/*-
+ * Function : SK_read_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set RAP and read data from a LANCE CSR register
+ *
+ * Parameters : I : reg_number - which CSR to read from
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_read_reg(int reg_number)
+{
+ SK_set_RAP(reg_number);
+
+ writeb(SK_RESET | SK_RDATA | SK_RREG, SK_PORT);
+ writeb(SK_DOIO, SK_IOCOM);
+
+ while (readb(SK_PORT) & SK_IORUN)
+ barrier();
+ return (readw(SK_IOREG));
+
+} /* End of SK_read_reg() */
+
+
+/*-
+ * Function : SK_rread_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/28
+ *
+ * Description : Read data from preseted register.
+ * This function requires that you know which
+ * Register is actually set. Be aware that CSR1-3
+ * can only be accessed when in CSR0 STOP is set.
+ *
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_rread_reg(void)
+{
+ writeb(SK_RESET | SK_RDATA | SK_RREG, SK_PORT);
+
+ writeb(SK_DOIO, SK_IOCOM);
+
+ while (readb(SK_PORT) & SK_IORUN)
+ barrier();
+ return (readw(SK_IOREG));
+
+} /* End of SK_rread_reg() */
+
+
+/*-
+ * Function : SK_write_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function sets the RAP then fills in the
+ * LANCE I/O Reg and starts Transfer to LANCE.
+ * It waits until transfer has ended which is max. 7 ms
+ * and then it returns.
+ *
+ * Parameters : I : reg_number - which CSR to write to
+ * I : value - what value to fill into register
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_write_reg(int reg_number, int value)
+{
+ SK_set_RAP(reg_number);
+
+ writew(value, SK_IOREG);
+ writeb(SK_RESET | SK_RDATA | SK_WREG, SK_PORT);
+ writeb(SK_DOIO, SK_IOCOM);
+
+ while (readb(SK_PORT) & SK_IORUN)
+ barrier();
+} /* End of SK_write_reg */
+
+
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+/*-
+ * Function : SK_print_pos
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function prints out the 4 POS (Programmable
+ * Option Select) Registers. Used mainly to debug operation.
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device structure
+ * I : char * - Text which will be printed as title
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_pos(struct net_device *dev, char *text)
+{
+ int ioaddr = dev->base_addr;
+
+ unsigned char pos0 = inb(SK_POS0),
+ pos1 = inb(SK_POS1),
+ pos2 = inb(SK_POS2),
+ pos3 = inb(SK_POS3),
+ pos4 = inb(SK_POS4);
+
+
+ printk("## %s: %s.\n"
+ "## pos0=%#4x pos1=%#4x pos2=%#04x pos3=%#08x pos4=%#04x\n",
+ SK_NAME, text, pos0, pos1, pos2, (pos3<<14), pos4);
+
+} /* End of SK_print_pos() */
+
+
+
+/*-
+ * Function : SK_print_dev
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function simply prints out the important fields
+ * of the device structure.
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device structure
+ * I : char *text - Title for printing
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_dev(struct net_device *dev, char *text)
+{
+ if (dev == NULL)
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## DEVICE == NULL\n");
+ }
+ else
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## Device Name: %s Base Address: %#06lx IRQ: %d\n",
+ dev->name, dev->base_addr, dev->irq);
+
+ printk("## next device: %#08x init function: %#08x\n",
+ (int) dev->next, (int) dev->init);
+ }
+
+} /* End of SK_print_dev() */
+
+
+
+/*-
+ * Function : SK_print_ram
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/02
+ *
+ * Description : This function is used to check how are things set up
+ * in the 16KB RAM. Also the pointers to the receive and
+ * transmit descriptor rings and rx and tx buffers locations.
+ * It contains a minor bug in printing, but has no effect to the values
+ * only newlines are not correct.
+ *
+ * Parameters : I : struct net_device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void __init SK_print_ram(struct net_device *dev)
+{
+
+ int i;
+ struct priv *p = netdev_priv(dev);
+
+ printk("## %s: RAM Details.\n"
+ "## RAM at %#08x tmdhead: %#08x rmdhead: %#08x initblock: %#08x\n",
+ SK_NAME,
+ (unsigned int) p->ram,
+ (unsigned int) p->tmdhead,
+ (unsigned int) p->rmdhead,
+ (unsigned int) &(p->ram)->ib);
+
+ printk("## ");
+
+ for(i = 0; i < TMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("tmdbufs%d: %#08x ", (i+1), (int) p->tmdbufs[i]);
+ }
+ printk("## ");
+
+ for(i = 0; i < RMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("rmdbufs%d: %#08x ", (i+1), (int) p->rmdbufs[i]);
+ }
+ printk("\n");
+
+} /* End of SK_print_ram() */
+
diff --git a/drivers/net/sk_g16.h b/drivers/net/sk_g16.h
new file mode 100644
index 000000000000..0a5dc0908a04
--- /dev/null
+++ b/drivers/net/sk_g16.h
@@ -0,0 +1,165 @@
+/*-
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.h
+ * Version : $Revision$
+ *
+ * Author : M.Hipp (mhipp@student.uni-tuebingen.de)
+ * changes by : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : In here are all necessary definitions of
+ * the am7990 (LANCE) chip used for writing a
+ * network device driver which uses this chip
+ *
+ * $Log$
+-*/
+
+#ifndef SK_G16_H
+
+#define SK_G16_H
+
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ *
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+
+/*
+ * Control and Status Register 3 (CSR3) bit definitions
+ *
+ */
+
+#define CSR3_BSWAP 0x0004 /* Byte Swap (RW) */
+#define CSR3_ACON 0x0002 /* ALE Control (RW) */
+#define CSR3_BCON 0x0001 /* Byte Control (RW) */
+
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define MODE_PROM 0x8000 /* Promiscuous Mode */
+#define MODE_INTL 0x0040 /* Internal Loopback */
+#define MODE_DRTY 0x0020 /* Disable Retry */
+#define MODE_COLL 0x0010 /* Force Collision */
+#define MODE_DTCR 0x0008 /* Disable Transmit CRC) */
+#define MODE_LOOP 0x0004 /* Loopback */
+#define MODE_DTX 0x0002 /* Disable the Transmitter */
+#define MODE_DRX 0x0001 /* Disable the Receiver */
+
+#define MODE_NORMAL 0x0000 /* Normal operation mode */
+
+/*
+ * Receive message descriptor status bit definitions.
+ */
+
+#define RX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define RX_ERR 0x40 /* Error Summary */
+#define RX_FRAM 0x20 /* Framing Error */
+#define RX_OFLO 0x10 /* Overflow Error */
+#define RX_CRC 0x08 /* CRC Error */
+#define RX_BUFF 0x04 /* Buffer Error */
+#define RX_STP 0x02 /* Start of Packet */
+#define RX_ENP 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor status bit definitions.
+ */
+
+#define TX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define TX_ERR 0x40 /* Error Summary */
+#define TX_MORE 0x10 /* More the 1 retry needed to Xmit */
+#define TX_ONE 0x08 /* One retry needed to Xmit */
+#define TX_DEF 0x04 /* Deferred */
+#define TX_STP 0x02 /* Start of Packet */
+#define TX_ENP 0x01 /* End of Packet */
+
+/*
+ * Transmit status (2) (valid if TX_ERR == 1)
+ */
+
+#define TX_BUFF 0x8000 /* Buffering error (no ENP) */
+#define TX_UFLO 0x4000 /* Underflow (late memory) */
+#define TX_LCOL 0x1000 /* Late collision */
+#define TX_LCAR 0x0400 /* Loss of Carrier */
+#define TX_RTRY 0x0200 /* Failed after 16 retransmissions */
+#define TX_TDR 0x003f /* Time-domain-reflectometer-value */
+
+
+/*
+ * Structures used for Communication with the LANCE
+ */
+
+/* LANCE Initialize Block */
+
+struct init_block
+{
+ unsigned short mode; /* Mode Register */
+ unsigned char paddr[6]; /* Physical Address (MAC) */
+ unsigned char laddr[8]; /* Logical Filter Address (not used) */
+ unsigned int rdrp; /* Receive Descriptor Ring pointer */
+ unsigned int tdrp; /* Transmit Descriptor Ring pointer */
+};
+
+
+/* Receive Message Descriptor Entry */
+
+struct rmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ volatile short blen; /* Buffer Length (two's complement) */
+ unsigned short mlen; /* Message Byte Count */
+};
+
+
+/* Transmit Message Descriptor Entry */
+
+struct tmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ unsigned short blen; /* Buffer Length (two's complement) */
+ unsigned volatile short status2; /* Error Status Bits */
+};
+
+#endif /* End of SK_G16_H */
diff --git a/drivers/net/sk_mca.c b/drivers/net/sk_mca.c
new file mode 100644
index 000000000000..4c56b8d8221b
--- /dev/null
+++ b/drivers/net/sk_mca.c
@@ -0,0 +1,1217 @@
+/*
+net-3-driver for the SKNET MCA-based cards
+
+This is an extension to the Linux operating system, and is covered by the
+same GNU General Public License that covers that work.
+
+Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
+ alfred.arnold@lancom.de)
+
+This driver is based both on the 3C523 driver and the SK_G16 driver.
+
+paper sources:
+ 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
+ Hans-Peter Messmer for the basic Microchannel stuff
+
+ 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
+ for help on Ethernet driver programming
+
+ 'Ethernet/IEEE 802.3 Family 1992 World Network Data Book/Handbook' by AMD
+ for documentation on the AM7990 LANCE
+
+ 'SKNET Personal Technisches Manual', Version 1.2 by Schneider&Koch
+ for documentation on the Junior board
+
+ 'SK-NET MC2+ Technical Manual", Version 1.1 by Schneider&Koch for
+ documentation on the MC2 bord
+
+ A big thank you to the S&K support for providing me so quickly with
+ documentation!
+
+ Also see http://www.syskonnect.com/
+
+ Missing things:
+
+ -> set debug level via ioctl instead of compile-time switches
+ -> I didn't follow the development of the 2.1.x kernels, so my
+ assumptions about which things changed with which kernel version
+ are probably nonsense
+
+History:
+ May 16th, 1999
+ startup
+ May 22st, 1999
+ added private structure, methods
+ begun building data structures in RAM
+ May 23nd, 1999
+ can receive frames, send frames
+ May 24th, 1999
+ modularized initialization of LANCE
+ loadable as module
+ still Tx problem :-(
+ May 26th, 1999
+ MC2 works
+ support for multiple devices
+ display media type for MC2+
+ May 28th, 1999
+ fixed problem in GetLANCE leaving interrupts turned off
+ increase TX queue to 4 packets to improve send performance
+ May 29th, 1999
+ a few corrections in statistics, caught rcvr overruns
+ reinitialization of LANCE/board in critical situations
+ MCA info implemented
+ implemented LANCE multicast filter
+ Jun 6th, 1999
+ additions for Linux 2.2
+ Dec 25th, 1999
+ unfortunately there seem to be newer MC2+ boards that react
+ on IRQ 3/5/9/10 instead of 3/5/10/11, so we have to autoprobe
+ in questionable cases...
+ Dec 28th, 1999
+ integrated patches from David Weinehall & Bill Wendling for 2.3
+ kernels (isa_...functions). Things are defined in a way that
+ it still works with 2.0.x 8-)
+ Dec 30th, 1999
+ added handling of the remaining interrupt conditions. That
+ should cure the spurious hangs.
+ Jan 30th, 2000
+ newer kernels automatically probe more than one board, so the
+ 'startslot' as a variable is also needed here
+ June 1st, 2000
+ added changes for recent 2.3 kernels
+
+ *************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/mca-legacy.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+
+#define _SK_MCA_DRIVER_
+#include "sk_mca.h"
+
+/* ------------------------------------------------------------------------
+ * global static data - not more since we can handle multiple boards and
+ * have to pack all state info into the device struct!
+ * ------------------------------------------------------------------------ */
+
+static char *MediaNames[Media_Count] =
+ { "10Base2", "10BaseT", "10Base5", "Unknown" };
+
+static unsigned char poly[] =
+ { 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0
+};
+
+/* ------------------------------------------------------------------------
+ * private subfunctions
+ * ------------------------------------------------------------------------ */
+
+/* dump parts of shared memory - only needed during debugging */
+
+#ifdef DEBUG
+static void dumpmem(struct net_device *dev, u32 start, u32 len)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ int z;
+
+ for (z = 0; z < len; z++) {
+ if ((z & 15) == 0)
+ printk("%04x:", z);
+ printk(" %02x", readb(priv->base + start + z));
+ if ((z & 15) == 15)
+ printk("\n");
+ }
+}
+
+/* print exact time - ditto */
+
+static void PrTime(void)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ printk("%9d:%06d: ", tv.tv_sec, tv.tv_usec);
+}
+#endif
+
+/* deduce resources out of POS registers */
+
+static void __init getaddrs(int slot, int junior, int *base, int *irq,
+ skmca_medium * medium)
+{
+ u_char pos0, pos1, pos2;
+
+ if (junior) {
+ pos0 = mca_read_stored_pos(slot, 2);
+ *base = ((pos0 & 0x0e) << 13) + 0xc0000;
+ *irq = ((pos0 & 0x10) >> 4) + 10;
+ *medium = Media_Unknown;
+ } else {
+ /* reset POS 104 Bits 0+1 so the shared memory region goes to the
+ configured area between 640K and 1M. Afterwards, enable the MC2.
+ I really don't know what rode SK to do this... */
+
+ mca_write_pos(slot, 4,
+ mca_read_stored_pos(slot, 4) & 0xfc);
+ mca_write_pos(slot, 2,
+ mca_read_stored_pos(slot, 2) | 0x01);
+
+ pos1 = mca_read_stored_pos(slot, 3);
+ pos2 = mca_read_stored_pos(slot, 4);
+ *base = ((pos1 & 0x07) << 14) + 0xc0000;
+ switch (pos2 & 0x0c) {
+ case 0:
+ *irq = 3;
+ break;
+ case 4:
+ *irq = 5;
+ break;
+ case 8:
+ *irq = -10;
+ break;
+ case 12:
+ *irq = -11;
+ break;
+ }
+ *medium = (pos2 >> 6) & 3;
+ }
+}
+
+/* check for both cards:
+ When the MC2 is turned off, it was configured for more than 15MB RAM,
+ is disabled and won't get detected using the standard probe. We
+ therefore have to scan the slots manually :-( */
+
+static int __init dofind(int *junior, int firstslot)
+{
+ int slot;
+ unsigned int id;
+
+ for (slot = firstslot; slot < MCA_MAX_SLOT_NR; slot++) {
+ id = mca_read_stored_pos(slot, 0)
+ + (((unsigned int) mca_read_stored_pos(slot, 1)) << 8);
+
+ *junior = 0;
+ if (id == SKNET_MCA_ID)
+ return slot;
+ *junior = 1;
+ if (id == SKNET_JUNIOR_MCA_ID)
+ return slot;
+ }
+ return MCA_NOTFOUND;
+}
+
+/* reset the whole board */
+
+static void ResetBoard(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+
+ writeb(CTRL_RESET_ON, priv->ctrladdr);
+ udelay(10);
+ writeb(CTRL_RESET_OFF, priv->ctrladdr);
+}
+
+/* wait for LANCE interface to become not busy */
+
+static int WaitLANCE(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ int t = 0;
+
+ while ((readb(priv->ctrladdr) & STAT_IO_BUSY) ==
+ STAT_IO_BUSY) {
+ udelay(1);
+ if (++t > 1000) {
+ printk("%s: LANCE access timeout", dev->name);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* set LANCE register - must be atomic */
+
+static void SetLANCE(struct net_device *dev, u16 addr, u16 value)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ /* disable interrupts */
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* wait until no transfer is pending */
+
+ WaitLANCE(dev);
+
+ /* transfer register address to RAP */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
+ writew(addr, priv->ioregaddr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ WaitLANCE(dev);
+
+ /* transfer data to register */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_DATA, priv->ctrladdr);
+ writew(value, priv->ioregaddr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ WaitLANCE(dev);
+
+ /* reenable interrupts */
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* get LANCE register */
+
+static u16 GetLANCE(struct net_device *dev, u16 addr)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int res;
+
+ /* disable interrupts */
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* wait until no transfer is pending */
+
+ WaitLANCE(dev);
+
+ /* transfer register address to RAP */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
+ writew(addr, priv->ioregaddr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ WaitLANCE(dev);
+
+ /* transfer data from register */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_READ | CTRL_ADR_DATA, priv->ctrladdr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ WaitLANCE(dev);
+ res = readw(priv->ioregaddr);
+
+ /* reenable interrupts */
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return res;
+}
+
+/* build up descriptors in shared RAM */
+
+static void InitDscrs(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ u32 bufaddr;
+
+ /* Set up Tx descriptors. The board has only 16K RAM so bits 16..23
+ are always 0. */
+
+ bufaddr = RAM_DATABASE;
+ {
+ LANCE_TxDescr descr;
+ int z;
+
+ for (z = 0; z < TXCOUNT; z++) {
+ descr.LowAddr = bufaddr;
+ descr.Flags = 0;
+ descr.Len = 0xf000;
+ descr.Status = 0;
+ memcpy_toio(priv->base + RAM_TXBASE +
+ (z * sizeof(LANCE_TxDescr)), &descr,
+ sizeof(LANCE_TxDescr));
+ memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
+ bufaddr += RAM_BUFSIZE;
+ }
+ }
+
+ /* do the same for the Rx descriptors */
+
+ {
+ LANCE_RxDescr descr;
+ int z;
+
+ for (z = 0; z < RXCOUNT; z++) {
+ descr.LowAddr = bufaddr;
+ descr.Flags = RXDSCR_FLAGS_OWN;
+ descr.MaxLen = -RAM_BUFSIZE;
+ descr.Len = 0;
+ memcpy_toio(priv->base + RAM_RXBASE +
+ (z * sizeof(LANCE_RxDescr)), &descr,
+ sizeof(LANCE_RxDescr));
+ memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
+ bufaddr += RAM_BUFSIZE;
+ }
+ }
+}
+
+/* calculate the hash bit position for a given multicast address
+ taken more or less directly from the AMD datasheet... */
+
+static void UpdateCRC(unsigned char *CRC, int bit)
+{
+ int j;
+
+ /* shift CRC one bit */
+
+ memmove(CRC + 1, CRC, 32 * sizeof(unsigned char));
+ CRC[0] = 0;
+
+ /* if bit XOR controlbit = 1, set CRC = CRC XOR polynomial */
+
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+}
+
+static unsigned int GetHash(char *address)
+{
+ unsigned char CRC[33];
+ int i, byte, hashcode;
+
+ /* a multicast address has bit 0 in the first byte set */
+
+ if ((address[0] & 1) == 0)
+ return -1;
+
+ /* initialize CRC */
+
+ memset(CRC, 1, sizeof(CRC));
+
+ /* loop through address bits */
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ UpdateCRC(CRC, (address[byte] >> i) & 1);
+
+ /* hashcode is the 6 least significant bits of the CRC */
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+ return hashcode;
+}
+
+/* feed ready-built initialization block into LANCE */
+
+static void InitLANCE(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+
+ /* build up descriptors. */
+
+ InitDscrs(dev);
+
+ /* next RX descriptor to be read is the first one. Since the LANCE
+ will start from the beginning after initialization, we have to
+ reset out pointers too. */
+
+ priv->nextrx = 0;
+
+ /* no TX descriptors active */
+
+ priv->nexttxput = priv->nexttxdone = priv->txbusy = 0;
+
+ /* set up the LANCE bus control register - constant for SKnet boards */
+
+ SetLANCE(dev, LANCE_CSR3,
+ CSR3_BSWAP_OFF | CSR3_ALE_LOW | CSR3_BCON_HOLD);
+
+ /* write address of initialization block into LANCE */
+
+ SetLANCE(dev, LANCE_CSR1, RAM_INITBASE & 0xffff);
+ SetLANCE(dev, LANCE_CSR2, (RAM_INITBASE >> 16) & 0xff);
+
+ /* we don't get ready until the LANCE has read the init block */
+
+ netif_stop_queue(dev);
+
+ /* let LANCE read the initialization block. LANCE is ready
+ when we receive the corresponding interrupt. */
+
+ SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_INIT);
+}
+
+/* stop the LANCE so we can reinitialize it */
+
+static void StopLANCE(struct net_device *dev)
+{
+ /* can't take frames any more */
+
+ netif_stop_queue(dev);
+
+ /* disable interrupts, stop it */
+
+ SetLANCE(dev, LANCE_CSR0, CSR0_STOP);
+}
+
+/* initialize card and LANCE for proper operation */
+
+static void InitBoard(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ LANCE_InitBlock block;
+
+ /* Lay out the shared RAM - first we create the init block for the LANCE.
+ We do not overwrite it later because we need it again when we switch
+ promiscous mode on/off. */
+
+ block.Mode = 0;
+ if (dev->flags & IFF_PROMISC)
+ block.Mode |= LANCE_INIT_PROM;
+ memcpy(block.PAdr, dev->dev_addr, 6);
+ memset(block.LAdrF, 0, sizeof(block.LAdrF));
+ block.RdrP = (RAM_RXBASE & 0xffffff) | (LRXCOUNT << 29);
+ block.TdrP = (RAM_TXBASE & 0xffffff) | (LTXCOUNT << 29);
+
+ memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
+
+ /* initialize LANCE. Implicitly sets up other structures in RAM. */
+
+ InitLANCE(dev);
+}
+
+/* deinitialize card and LANCE */
+
+static void DeinitBoard(struct net_device *dev)
+{
+ /* stop LANCE */
+
+ StopLANCE(dev);
+
+ /* reset board */
+
+ ResetBoard(dev);
+}
+
+/* probe for device's irq */
+
+static int __init ProbeIRQ(struct net_device *dev)
+{
+ unsigned long imaskval, njiffies, irq;
+ u16 csr0val;
+
+ /* enable all interrupts */
+
+ imaskval = probe_irq_on();
+
+ /* initialize the board. Wait for interrupt 'Initialization done'. */
+
+ ResetBoard(dev);
+ InitBoard(dev);
+
+ njiffies = jiffies + HZ;
+ do {
+ csr0val = GetLANCE(dev, LANCE_CSR0);
+ }
+ while (((csr0val & CSR0_IDON) == 0) && (jiffies != njiffies));
+
+ /* turn of interrupts again */
+
+ irq = probe_irq_off(imaskval);
+
+ /* if we found something, ack the interrupt */
+
+ if (irq)
+ SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_IDON);
+
+ /* back to idle state */
+
+ DeinitBoard(dev);
+
+ return irq;
+}
+
+/* ------------------------------------------------------------------------
+ * interrupt handler(s)
+ * ------------------------------------------------------------------------ */
+
+/* LANCE has read initialization block -> start it */
+
+static u16 irqstart_handler(struct net_device *dev, u16 oldcsr0)
+{
+ /* now we're ready to transmit */
+
+ netif_wake_queue(dev);
+
+ /* reset IDON bit, start LANCE */
+
+ SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_IDON | CSR0_STRT);
+ return GetLANCE(dev, LANCE_CSR0);
+}
+
+/* did we lose blocks due to a FIFO overrun ? */
+
+static u16 irqmiss_handler(struct net_device *dev, u16 oldcsr0)
+{
+ skmca_priv *priv = netdev_priv(dev);
+
+ /* update statistics */
+
+ priv->stat.rx_fifo_errors++;
+
+ /* reset MISS bit */
+
+ SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_MISS);
+ return GetLANCE(dev, LANCE_CSR0);
+}
+
+/* receive interrupt */
+
+static u16 irqrx_handler(struct net_device *dev, u16 oldcsr0)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ LANCE_RxDescr descr;
+ unsigned int descraddr;
+
+ /* run through queue until we reach a descriptor we do not own */
+
+ descraddr = RAM_RXBASE + (priv->nextrx * sizeof(LANCE_RxDescr));
+ while (1) {
+ /* read descriptor */
+ memcpy_fromio(&descr, priv->base + descraddr,
+ sizeof(LANCE_RxDescr));
+
+ /* if we reach a descriptor we do not own, we're done */
+ if ((descr.Flags & RXDSCR_FLAGS_OWN) != 0)
+ break;
+
+#ifdef DEBUG
+ PrTime();
+ printk("Receive packet on descr %d len %d\n", priv->nextrx,
+ descr.Len);
+#endif
+
+ /* erroneous packet ? */
+ if ((descr.Flags & RXDSCR_FLAGS_ERR) != 0) {
+ priv->stat.rx_errors++;
+ if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
+ priv->stat.rx_crc_errors++;
+ else if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
+ priv->stat.rx_frame_errors++;
+ else if ((descr.Flags & RXDSCR_FLAGS_OFLO) != 0)
+ priv->stat.rx_fifo_errors++;
+ }
+
+ /* good packet ? */
+ else {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(descr.Len + 2);
+ if (skb == NULL)
+ priv->stat.rx_dropped++;
+ else {
+ memcpy_fromio(skb_put(skb, descr.Len),
+ priv->base +
+ descr.LowAddr, descr.Len);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stat.rx_packets++;
+ priv->stat.rx_bytes += descr.Len;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ }
+
+ /* give descriptor back to LANCE */
+ descr.Len = 0;
+ descr.Flags |= RXDSCR_FLAGS_OWN;
+
+ /* update descriptor in shared RAM */
+ memcpy_toio(priv->base + descraddr, &descr,
+ sizeof(LANCE_RxDescr));
+
+ /* go to next descriptor */
+ priv->nextrx++;
+ descraddr += sizeof(LANCE_RxDescr);
+ if (priv->nextrx >= RXCOUNT) {
+ priv->nextrx = 0;
+ descraddr = RAM_RXBASE;
+ }
+ }
+
+ /* reset RINT bit */
+
+ SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_RINT);
+ return GetLANCE(dev, LANCE_CSR0);
+}
+
+/* transmit interrupt */
+
+static u16 irqtx_handler(struct net_device *dev, u16 oldcsr0)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ LANCE_TxDescr descr;
+ unsigned int descraddr;
+
+ /* check descriptors at most until no busy one is left */
+
+ descraddr =
+ RAM_TXBASE + (priv->nexttxdone * sizeof(LANCE_TxDescr));
+ while (priv->txbusy > 0) {
+ /* read descriptor */
+ memcpy_fromio(&descr, priv->base + descraddr,
+ sizeof(LANCE_TxDescr));
+
+ /* if the LANCE still owns this one, we've worked out all sent packets */
+ if ((descr.Flags & TXDSCR_FLAGS_OWN) != 0)
+ break;
+
+#ifdef DEBUG
+ PrTime();
+ printk("Send packet done on descr %d\n", priv->nexttxdone);
+#endif
+
+ /* update statistics */
+ if ((descr.Flags & TXDSCR_FLAGS_ERR) == 0) {
+ priv->stat.tx_packets++;
+ priv->stat.tx_bytes++;
+ } else {
+ priv->stat.tx_errors++;
+ if ((descr.Status & TXDSCR_STATUS_UFLO) != 0) {
+ priv->stat.tx_fifo_errors++;
+ InitLANCE(dev);
+ }
+ else
+ if ((descr.Status & TXDSCR_STATUS_LCOL) !=
+ 0) priv->stat.tx_window_errors++;
+ else if ((descr.Status & TXDSCR_STATUS_LCAR) != 0)
+ priv->stat.tx_carrier_errors++;
+ else if ((descr.Status & TXDSCR_STATUS_RTRY) != 0)
+ priv->stat.tx_aborted_errors++;
+ }
+
+ /* go to next descriptor */
+ priv->nexttxdone++;
+ descraddr += sizeof(LANCE_TxDescr);
+ if (priv->nexttxdone >= TXCOUNT) {
+ priv->nexttxdone = 0;
+ descraddr = RAM_TXBASE;
+ }
+ priv->txbusy--;
+ }
+
+ /* reset TX interrupt bit */
+
+ SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_TINT);
+ oldcsr0 = GetLANCE(dev, LANCE_CSR0);
+
+ /* at least one descriptor is freed. Therefore we can accept
+ a new one */
+ /* inform upper layers we're in business again */
+
+ netif_wake_queue(dev);
+
+ return oldcsr0;
+}
+
+/* general interrupt entry */
+
+static irqreturn_t irq_handler(int irq, void *device, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) device;
+ u16 csr0val;
+
+ /* read CSR0 to get interrupt cause */
+
+ csr0val = GetLANCE(dev, LANCE_CSR0);
+
+ /* in case we're not meant... */
+
+ if ((csr0val & CSR0_INTR) == 0)
+ return IRQ_NONE;
+
+#if 0
+ set_bit(LINK_STATE_RXSEM, &dev->state);
+#endif
+
+ /* loop through the interrupt bits until everything is clear */
+
+ do {
+ if ((csr0val & CSR0_IDON) != 0)
+ csr0val = irqstart_handler(dev, csr0val);
+ if ((csr0val & CSR0_RINT) != 0)
+ csr0val = irqrx_handler(dev, csr0val);
+ if ((csr0val & CSR0_MISS) != 0)
+ csr0val = irqmiss_handler(dev, csr0val);
+ if ((csr0val & CSR0_TINT) != 0)
+ csr0val = irqtx_handler(dev, csr0val);
+ if ((csr0val & CSR0_MERR) != 0) {
+ SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_MERR);
+ csr0val = GetLANCE(dev, LANCE_CSR0);
+ }
+ if ((csr0val & CSR0_BABL) != 0) {
+ SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_BABL);
+ csr0val = GetLANCE(dev, LANCE_CSR0);
+ }
+ }
+ while ((csr0val & CSR0_INTR) != 0);
+
+#if 0
+ clear_bit(LINK_STATE_RXSEM, &dev->state);
+#endif
+ return IRQ_HANDLED;
+}
+
+/* ------------------------------------------------------------------------
+ * driver methods
+ * ------------------------------------------------------------------------ */
+
+/* MCA info */
+
+static int skmca_getinfo(char *buf, int slot, void *d)
+{
+ int len = 0, i;
+ struct net_device *dev = (struct net_device *) d;
+ skmca_priv *priv;
+
+ /* can't say anything about an uninitialized device... */
+
+ if (dev == NULL)
+ return len;
+ priv = netdev_priv(dev);
+
+ /* print info */
+
+ len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
+ len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
+ dev->mem_end - 1);
+ len +=
+ sprintf(buf + len, "Transceiver: %s\n",
+ MediaNames[priv->medium]);
+ len += sprintf(buf + len, "Device: %s\n", dev->name);
+ len += sprintf(buf + len, "MAC address:");
+ for (i = 0; i < 6; i++)
+ len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
+ buf[len++] = '\n';
+ buf[len] = 0;
+
+ return len;
+}
+
+/* open driver. Means also initialization and start of LANCE */
+
+static int skmca_open(struct net_device *dev)
+{
+ int result;
+ skmca_priv *priv = netdev_priv(dev);
+
+ /* register resources - only necessary for IRQ */
+ result =
+ request_irq(priv->realirq, irq_handler,
+ SA_SHIRQ | SA_SAMPLE_RANDOM, "sk_mca", dev);
+ if (result != 0) {
+ printk("%s: failed to register irq %d\n", dev->name,
+ dev->irq);
+ return result;
+ }
+ dev->irq = priv->realirq;
+
+ /* set up the card and LANCE */
+
+ InitBoard(dev);
+
+ /* set up flags */
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* close driver. Shut down board and free allocated resources */
+
+static int skmca_close(struct net_device *dev)
+{
+ /* turn off board */
+ DeinitBoard(dev);
+
+ /* release resources */
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+ dev->irq = 0;
+
+ return 0;
+}
+
+/* transmit a block. */
+
+static int skmca_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ LANCE_TxDescr descr;
+ unsigned int address;
+ int tmplen, retval = 0;
+ unsigned long flags;
+
+ /* if we get called with a NULL descriptor, the Ethernet layer thinks
+ our card is stuck an we should reset it. We'll do this completely: */
+
+ if (skb == NULL) {
+ DeinitBoard(dev);
+ InitBoard(dev);
+ return 0; /* don't try to free the block here ;-) */
+ }
+
+ /* is there space in the Tx queue ? If no, the upper layer gave us a
+ packet in spite of us not being ready and is really in trouble.
+ We'll do the dropping for him: */
+ if (priv->txbusy >= TXCOUNT) {
+ priv->stat.tx_dropped++;
+ retval = -EIO;
+ goto tx_done;
+ }
+
+ /* get TX descriptor */
+ address = RAM_TXBASE + (priv->nexttxput * sizeof(LANCE_TxDescr));
+ memcpy_fromio(&descr, priv->base + address, sizeof(LANCE_TxDescr));
+
+ /* enter packet length as 2s complement - assure minimum length */
+ tmplen = skb->len;
+ if (tmplen < 60)
+ tmplen = 60;
+ descr.Len = 65536 - tmplen;
+
+ /* copy filler into RAM - in case we're filling up...
+ we're filling a bit more than necessary, but that doesn't harm
+ since the buffer is far larger... */
+ if (tmplen > skb->len) {
+ char *fill = "NetBSD is a nice OS too! ";
+ unsigned int destoffs = 0, l = strlen(fill);
+
+ while (destoffs < tmplen) {
+ memcpy_toio(priv->base + descr.LowAddr +
+ destoffs, fill, l);
+ destoffs += l;
+ }
+ }
+
+ /* do the real data copying */
+ memcpy_toio(priv->base + descr.LowAddr, skb->data, skb->len);
+
+ /* hand descriptor over to LANCE - this is the first and last chunk */
+ descr.Flags =
+ TXDSCR_FLAGS_OWN | TXDSCR_FLAGS_STP | TXDSCR_FLAGS_ENP;
+
+#ifdef DEBUG
+ PrTime();
+ printk("Send packet on descr %d len %d\n", priv->nexttxput,
+ skb->len);
+#endif
+
+ /* one more descriptor busy */
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->nexttxput++;
+ if (priv->nexttxput >= TXCOUNT)
+ priv->nexttxput = 0;
+ priv->txbusy++;
+
+ /* are we saturated ? */
+
+ if (priv->txbusy >= TXCOUNT)
+ netif_stop_queue(dev);
+
+ /* write descriptor back to RAM */
+ memcpy_toio(priv->base + address, &descr, sizeof(LANCE_TxDescr));
+
+ /* if no descriptors were active, give the LANCE a hint to read it
+ immediately */
+
+ if (priv->txbusy == 0)
+ SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_TDMD);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ tx_done:
+
+ dev_kfree_skb(skb);
+
+ return retval;
+}
+
+/* return pointer to Ethernet statistics */
+
+static struct net_device_stats *skmca_stats(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+
+ return &(priv->stat);
+}
+
+/* switch receiver mode. We use the LANCE's multicast filter to prefilter
+ multicast addresses. */
+
+static void skmca_set_multicast_list(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ LANCE_InitBlock block;
+
+ /* first stop the LANCE... */
+ StopLANCE(dev);
+
+ /* ...then modify the initialization block... */
+ memcpy_fromio(&block, priv->base + RAM_INITBASE, sizeof(block));
+ if (dev->flags & IFF_PROMISC)
+ block.Mode |= LANCE_INIT_PROM;
+ else
+ block.Mode &= ~LANCE_INIT_PROM;
+
+ if (dev->flags & IFF_ALLMULTI) { /* get all multicasts */
+ memset(block.LAdrF, 0xff, sizeof(block.LAdrF));
+ } else { /* get selected/no multicasts */
+
+ struct dev_mc_list *mptr;
+ int code;
+
+ memset(block.LAdrF, 0, sizeof(block.LAdrF));
+ for (mptr = dev->mc_list; mptr != NULL; mptr = mptr->next) {
+ code = GetHash(mptr->dmi_addr);
+ block.LAdrF[(code >> 3) & 7] |= 1 << (code & 7);
+ }
+ }
+
+ memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
+
+ /* ...then reinit LANCE with the correct flags */
+ InitLANCE(dev);
+}
+
+/* ------------------------------------------------------------------------
+ * hardware check
+ * ------------------------------------------------------------------------ */
+
+static int startslot; /* counts through slots when probing multiple devices */
+
+static void cleanup_card(struct net_device *dev)
+{
+ skmca_priv *priv = netdev_priv(dev);
+ DeinitBoard(dev);
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+ iounmap(priv->base);
+ mca_mark_as_unused(priv->slot);
+ mca_set_adapter_procfn(priv->slot, NULL, NULL);
+}
+
+struct net_device * __init skmca_probe(int unit)
+{
+ struct net_device *dev;
+ int force_detect = 0;
+ int junior, slot, i;
+ int base = 0, irq = 0;
+ skmca_priv *priv;
+ skmca_medium medium;
+ int err;
+
+ /* can't work without an MCA bus ;-) */
+
+ if (MCA_bus == 0)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(skmca_priv));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ /* start address of 1 --> forced detection */
+
+ if (dev->mem_start == 1)
+ force_detect = 1;
+
+ /* search through slots */
+
+ base = dev->mem_start;
+ irq = dev->base_addr;
+ for (slot = startslot; (slot = dofind(&junior, slot)) != -1; slot++) {
+ /* deduce card addresses */
+
+ getaddrs(slot, junior, &base, &irq, &medium);
+
+ /* slot already in use ? */
+
+ if (mca_is_adapter_used(slot))
+ continue;
+
+ /* were we looking for something different ? */
+
+ if (dev->irq && dev->irq != irq)
+ continue;
+ if (dev->mem_start && dev->mem_start != base)
+ continue;
+
+ /* found something that matches */
+
+ break;
+ }
+
+ /* nothing found ? */
+
+ if (slot == -1) {
+ free_netdev(dev);
+ return (base || irq) ? ERR_PTR(-ENXIO) : ERR_PTR(-ENODEV);
+ }
+
+ /* make procfs entries */
+
+ if (junior)
+ mca_set_adapter_name(slot,
+ "SKNET junior MC2 Ethernet Adapter");
+ else
+ mca_set_adapter_name(slot, "SKNET MC2+ Ethernet Adapter");
+ mca_set_adapter_procfn(slot, (MCA_ProcFn) skmca_getinfo, dev);
+
+ mca_mark_as_used(slot);
+
+ /* announce success */
+ printk("%s: SKNet %s adapter found in slot %d\n", dev->name,
+ junior ? "Junior MC2" : "MC2+", slot + 1);
+
+ priv = netdev_priv(dev);
+ priv->base = ioremap(base, 0x4000);
+ if (!priv->base) {
+ mca_set_adapter_procfn(slot, NULL, NULL);
+ mca_mark_as_unused(slot);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ priv->slot = slot;
+ priv->macbase = priv->base + 0x3fc0;
+ priv->ioregaddr = priv->base + 0x3ff0;
+ priv->ctrladdr = priv->base + 0x3ff2;
+ priv->cmdaddr = priv->base + 0x3ff3;
+ priv->medium = medium;
+ memset(&priv->stat, 0, sizeof(struct net_device_stats));
+ spin_lock_init(&priv->lock);
+
+ /* set base + irq for this device (irq not allocated so far) */
+ dev->irq = 0;
+ dev->mem_start = base;
+ dev->mem_end = base + 0x4000;
+
+ /* autoprobe ? */
+ if (irq < 0) {
+ int nirq;
+
+ printk
+ ("%s: ambigous POS bit combination, must probe for IRQ...\n",
+ dev->name);
+ nirq = ProbeIRQ(dev);
+ if (nirq <= 0)
+ printk("%s: IRQ probe failed, assuming IRQ %d",
+ dev->name, priv->realirq = -irq);
+ else
+ priv->realirq = nirq;
+ } else
+ priv->realirq = irq;
+
+ /* set methods */
+ dev->open = skmca_open;
+ dev->stop = skmca_close;
+ dev->hard_start_xmit = skmca_tx;
+ dev->do_ioctl = NULL;
+ dev->get_stats = skmca_stats;
+ dev->set_multicast_list = skmca_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+
+ /* copy out MAC address */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(priv->macbase + (i << 1));
+
+ /* print config */
+ printk("%s: IRQ %d, memory %#lx-%#lx, "
+ "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n",
+ dev->name, priv->realirq, dev->mem_start, dev->mem_end - 1,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk("%s: %s medium\n", dev->name, MediaNames[priv->medium]);
+
+ /* reset board */
+
+ ResetBoard(dev);
+
+ startslot = slot + 1;
+
+ err = register_netdev(dev);
+ if (err) {
+ cleanup_card(dev);
+ free_netdev(dev);
+ dev = ERR_PTR(err);
+ }
+ return dev;
+}
+
+/* ------------------------------------------------------------------------
+ * modularization support
+ * ------------------------------------------------------------------------ */
+
+#ifdef MODULE
+MODULE_LICENSE("GPL");
+
+#define DEVMAX 5
+
+static struct net_device *moddevs[DEVMAX];
+
+int init_module(void)
+{
+ int z;
+
+ startslot = 0;
+ for (z = 0; z < DEVMAX; z++) {
+ struct net_device *dev = skmca_probe(-1);
+ if (IS_ERR(dev))
+ break;
+ moddevs[z] = dev;
+ }
+ if (!z)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int z;
+
+ for (z = 0; z < DEVMAX; z++) {
+ struct net_device *dev = moddevs[z];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/sk_mca.h b/drivers/net/sk_mca.h
new file mode 100644
index 000000000000..7e7c99582746
--- /dev/null
+++ b/drivers/net/sk_mca.h
@@ -0,0 +1,172 @@
+#include <linux/version.h>
+
+#ifndef _SK_MCA_INCLUDE_
+#define _SK_MCA_INCLUDE_
+
+#ifdef _SK_MCA_DRIVER_
+
+/* Adapter ID's */
+#define SKNET_MCA_ID 0x6afd
+#define SKNET_JUNIOR_MCA_ID 0x6be9
+
+/* media enumeration - defined in a way that it fits onto the MC2+'s
+ POS registers... */
+
+typedef enum { Media_10Base2, Media_10BaseT,
+ Media_10Base5, Media_Unknown, Media_Count
+} skmca_medium;
+
+/* private structure */
+typedef struct {
+ unsigned int slot; /* MCA-Slot-# */
+ void __iomem *base;
+ void __iomem *macbase; /* base address of MAC address PROM */
+ void __iomem *ioregaddr;/* address of I/O-register (Lo) */
+ void __iomem *ctrladdr; /* address of control/stat register */
+ void __iomem *cmdaddr; /* address of I/O-command register */
+ int nextrx; /* index of next RX descriptor to
+ be read */
+ int nexttxput; /* index of next free TX descriptor */
+ int nexttxdone; /* index of next TX descriptor to
+ be finished */
+ int txbusy; /* # of busy TX descriptors */
+ struct net_device_stats stat; /* packet statistics */
+ int realirq; /* memorizes actual IRQ, even when
+ currently not allocated */
+ skmca_medium medium; /* physical cannector */
+ spinlock_t lock;
+} skmca_priv;
+
+/* card registers: control/status register bits */
+
+#define CTRL_ADR_DATA 0 /* Bit 0 = 0 ->access data register */
+#define CTRL_ADR_RAP 1 /* Bit 0 = 1 ->access RAP register */
+#define CTRL_RW_WRITE 0 /* Bit 1 = 0 ->write register */
+#define CTRL_RW_READ 2 /* Bit 1 = 1 ->read register */
+#define CTRL_RESET_ON 0 /* Bit 3 = 0 ->reset board */
+#define CTRL_RESET_OFF 8 /* Bit 3 = 1 ->no reset of board */
+
+#define STAT_ADR_DATA 0 /* Bit 0 of ctrl register read back */
+#define STAT_ADR_RAP 1
+#define STAT_RW_WRITE 0 /* Bit 1 of ctrl register read back */
+#define STAT_RW_READ 2
+#define STAT_RESET_ON 0 /* Bit 3 of ctrl register read back */
+#define STAT_RESET_OFF 8
+#define STAT_IRQ_ACT 0 /* interrupt pending */
+#define STAT_IRQ_NOACT 16 /* no interrupt pending */
+#define STAT_IO_NOBUSY 0 /* no transfer busy */
+#define STAT_IO_BUSY 32 /* transfer busy */
+
+/* I/O command register bits */
+
+#define IOCMD_GO 128 /* Bit 7 = 1 -> start register xfer */
+
+/* LANCE registers */
+
+#define LANCE_CSR0 0 /* Status/Control */
+
+#define CSR0_ERR 0x8000 /* general error flag */
+#define CSR0_BABL 0x4000 /* transmitter timeout */
+#define CSR0_CERR 0x2000 /* collision error */
+#define CSR0_MISS 0x1000 /* lost Rx block */
+#define CSR0_MERR 0x0800 /* memory access error */
+#define CSR0_RINT 0x0400 /* receiver interrupt */
+#define CSR0_TINT 0x0200 /* transmitter interrupt */
+#define CSR0_IDON 0x0100 /* initialization done */
+#define CSR0_INTR 0x0080 /* general interrupt flag */
+#define CSR0_INEA 0x0040 /* interrupt enable */
+#define CSR0_RXON 0x0020 /* receiver enabled */
+#define CSR0_TXON 0x0010 /* transmitter enabled */
+#define CSR0_TDMD 0x0008 /* force transmission now */
+#define CSR0_STOP 0x0004 /* stop LANCE */
+#define CSR0_STRT 0x0002 /* start LANCE */
+#define CSR0_INIT 0x0001 /* read initialization block */
+
+#define LANCE_CSR1 1 /* addr bit 0..15 of initialization */
+#define LANCE_CSR2 2 /* 16..23 block */
+
+#define LANCE_CSR3 3 /* Bus control */
+#define CSR3_BCON_HOLD 0 /* Bit 0 = 0 -> BM1,BM0,HOLD */
+#define CSR3_BCON_BUSRQ 1 /* Bit 0 = 1 -> BUSAK0,BYTE,BUSRQ */
+#define CSR3_ALE_HIGH 0 /* Bit 1 = 0 -> ALE asserted high */
+#define CSR3_ALE_LOW 2 /* Bit 1 = 1 -> ALE asserted low */
+#define CSR3_BSWAP_OFF 0 /* Bit 2 = 0 -> no byte swap */
+#define CSR3_BSWAP_ON 4 /* Bit 2 = 1 -> byte swap */
+
+/* LANCE structures */
+
+typedef struct { /* LANCE initialization block */
+ u16 Mode; /* mode flags */
+ u8 PAdr[6]; /* MAC address */
+ u8 LAdrF[8]; /* Multicast filter */
+ u32 RdrP; /* Receive descriptor */
+ u32 TdrP; /* Transmit descriptor */
+} LANCE_InitBlock;
+
+/* Mode flags init block */
+
+#define LANCE_INIT_PROM 0x8000 /* enable promiscous mode */
+#define LANCE_INIT_INTL 0x0040 /* internal loopback */
+#define LANCE_INIT_DRTY 0x0020 /* disable retry */
+#define LANCE_INIT_COLL 0x0010 /* force collision */
+#define LANCE_INIT_DTCR 0x0008 /* disable transmit CRC */
+#define LANCE_INIT_LOOP 0x0004 /* loopback */
+#define LANCE_INIT_DTX 0x0002 /* disable transmitter */
+#define LANCE_INIT_DRX 0x0001 /* disable receiver */
+
+typedef struct { /* LANCE Tx descriptor */
+ u16 LowAddr; /* bit 0..15 of address */
+ u16 Flags; /* bit 16..23 of address + Flags */
+ u16 Len; /* 2s complement of packet length */
+ u16 Status; /* Result of transmission */
+} LANCE_TxDescr;
+
+#define TXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
+#define TXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
+#define TXDSCR_FLAGS_MORE 0x1000 /* more than one retry needed? */
+#define TXDSCR_FLAGS_ONE 0x0800 /* one retry? */
+#define TXDSCR_FLAGS_DEF 0x0400 /* transmission deferred? */
+#define TXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
+#define TXDSCR_FLAGS_ENP 0x0100 /* last packet in chain? */
+
+#define TXDSCR_STATUS_BUFF 0x8000 /* buffer error? */
+#define TXDSCR_STATUS_UFLO 0x4000 /* silo underflow during transmit? */
+#define TXDSCR_STATUS_LCOL 0x1000 /* late collision? */
+#define TXDSCR_STATUS_LCAR 0x0800 /* loss of carrier? */
+#define TXDSCR_STATUS_RTRY 0x0400 /* retry error? */
+
+typedef struct { /* LANCE Rx descriptor */
+ u16 LowAddr; /* bit 0..15 of address */
+ u16 Flags; /* bit 16..23 of address + Flags */
+ u16 MaxLen; /* 2s complement of buffer length */
+ u16 Len; /* packet length */
+} LANCE_RxDescr;
+
+#define RXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
+#define RXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
+#define RXDSCR_FLAGS_FRAM 0x2000 /* framing error flag */
+#define RXDSCR_FLAGS_OFLO 0x1000 /* FIFO overflow? */
+#define RXDSCR_FLAGS_CRC 0x0800 /* CRC error? */
+#define RXDSCR_FLAGS_BUFF 0x0400 /* buffer error? */
+#define RXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
+#define RXDCSR_FLAGS_ENP 0x0100 /* last packet in chain? */
+
+/* RAM layout */
+
+#define TXCOUNT 4 /* length of TX descriptor queue */
+#define LTXCOUNT 2 /* log2 of it */
+#define RXCOUNT 4 /* length of RX descriptor queue */
+#define LRXCOUNT 2 /* log2 of it */
+
+#define RAM_INITBASE 0 /* LANCE init block */
+#define RAM_TXBASE 24 /* Start of TX descriptor queue */
+#define RAM_RXBASE \
+(RAM_TXBASE + (TXCOUNT * 8)) /* Start of RX descriptor queue */
+#define RAM_DATABASE \
+(RAM_RXBASE + (RXCOUNT * 8)) /* Start of data area for frames */
+#define RAM_BUFSIZE 1580 /* max. frame size - should never be
+ reached */
+
+#endif /* _SK_MCA_DRIVER_ */
+
+#endif /* _SK_MCA_INCLUDE_ */
diff --git a/drivers/net/skfp/Makefile b/drivers/net/skfp/Makefile
new file mode 100644
index 000000000000..6cfccfb7889f
--- /dev/null
+++ b/drivers/net/skfp/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the SysKonnect FDDI PCI adapter driver
+#
+
+obj-$(CONFIG_SKFP) += skfp.o
+
+skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
+ ecm.o pcmplc.o pmf.o queue.o rmt.o \
+ smtdef.o smtinit.o smttimer.o srf.o lnkstat.o \
+ smtparse.o hwt.o drvfbi.o ess.o
+
+# NOTE:
+# Compiling this driver produces some warnings (and some more are
+# switched off below), but I did not fix this, because the Hardware
+# Module source (see skfddi.c for details) is used for different
+# drivers, and fixing it for Linux might bring problems on other
+# projects. To keep the source common for all those drivers (and
+# thus simplify fixes to it), please do not clean it up!
+
+EXTRA_CFLAGS += -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/skfp/can.c b/drivers/net/skfp/can.c
new file mode 100644
index 000000000000..8a49abce7961
--- /dev/null
+++ b/drivers/net/skfp/can.c
@@ -0,0 +1,83 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef lint
+static const char xID_sccs[] = "@(#)can.c 1.5 97/04/07 (C) SK " ;
+#endif
+
+/*
+ * canonical bit order
+ */
+const u_char canonical[256] = {
+ 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0,
+ 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0,
+ 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8,
+ 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8,
+ 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4,
+ 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4,
+ 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec,
+ 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc,
+ 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2,
+ 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2,
+ 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea,
+ 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa,
+ 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6,
+ 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6,
+ 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee,
+ 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe,
+ 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1,
+ 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1,
+ 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9,
+ 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9,
+ 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5,
+ 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5,
+ 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed,
+ 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd,
+ 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3,
+ 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3,
+ 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb,
+ 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb,
+ 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7,
+ 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7,
+ 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef,
+ 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
+} ;
+
+#ifdef MAKE_TABLE
+int byte_reverse(x)
+int x ;
+{
+ int y = 0 ;
+
+ if (x & 0x01)
+ y |= 0x80 ;
+ if (x & 0x02)
+ y |= 0x40 ;
+ if (x & 0x04)
+ y |= 0x20 ;
+ if (x & 0x08)
+ y |= 0x10 ;
+ if (x & 0x10)
+ y |= 0x08 ;
+ if (x & 0x20)
+ y |= 0x04 ;
+ if (x & 0x40)
+ y |= 0x02 ;
+ if (x & 0x80)
+ y |= 0x01 ;
+ return(y) ;
+}
+#endif
diff --git a/drivers/net/skfp/cfm.c b/drivers/net/skfp/cfm.c
new file mode 100644
index 000000000000..4c8aaa762333
--- /dev/null
+++ b/drivers/net/skfp/cfm.c
@@ -0,0 +1,627 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ SMT CFM
+ Configuration Management
+ DAS with single MAC
+*/
+
+/*
+ * Hardware independent state machine implemantation
+ * The following external SMT functions are referenced :
+ *
+ * queue_event()
+ *
+ * The following external HW dependent functions are referenced :
+ * config_mux()
+ *
+ * The following HW dependent events are required :
+ * NONE
+ */
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ;
+#endif
+
+/*
+ * FSM Macros
+ */
+#define AFLAG 0x10
+#define GO_STATE(x) (smc->mib.fddiSMTCF_State = (x)|AFLAG)
+#define ACTIONS_DONE() (smc->mib.fddiSMTCF_State &= ~AFLAG)
+#define ACTIONS(x) (x|AFLAG)
+
+#ifdef DEBUG
+/*
+ * symbolic state names
+ */
+static const char * const cfm_states[] = {
+ "SC0_ISOLATED","CF1","CF2","CF3","CF4",
+ "SC1_WRAP_A","SC2_WRAP_B","SC5_TRHU_B","SC7_WRAP_S",
+ "SC9_C_WRAP_A","SC10_C_WRAP_B","SC11_C_WRAP_S","SC4_THRU_A"
+} ;
+
+/*
+ * symbolic event names
+ */
+static const char * const cfm_events[] = {
+ "NONE","CF_LOOP_A","CF_LOOP_B","CF_JOIN_A","CF_JOIN_B"
+} ;
+#endif
+
+/*
+ * map from state to downstream port type
+ */
+static const u_char cf_to_ptype[] = {
+ TNONE,TNONE,TNONE,TNONE,TNONE,
+ TNONE,TB,TB,TS,
+ TA,TB,TS,TB
+} ;
+
+/*
+ * CEM port states
+ */
+#define CEM_PST_DOWN 0
+#define CEM_PST_UP 1
+#define CEM_PST_HOLD 2
+/* define portstate array only for A and B port */
+/* Do this within the smc structure (use in multiple cards) */
+
+/*
+ * all Globals are defined in smc.h
+ * struct s_cfm
+ */
+
+/*
+ * function declarations
+ */
+static void cfm_fsm(struct s_smc *smc, int cmd);
+
+/*
+ init CFM state machine
+ clear all CFM vars and flags
+*/
+void cfm_init(struct s_smc *smc)
+{
+ smc->mib.fddiSMTCF_State = ACTIONS(SC0_ISOLATED) ;
+ smc->r.rm_join = 0 ;
+ smc->r.rm_loop = 0 ;
+ smc->y[PA].scrub = 0 ;
+ smc->y[PB].scrub = 0 ;
+ smc->y[PA].cem_pst = CEM_PST_DOWN ;
+ smc->y[PB].cem_pst = CEM_PST_DOWN ;
+}
+
+/* Some terms conditions used by the selection criteria */
+#define THRU_ENABLED(smc) (smc->y[PA].pc_mode != PM_TREE && \
+ smc->y[PB].pc_mode != PM_TREE)
+/* Selection criteria for the ports */
+static void selection_criteria (struct s_smc *smc, struct s_phy *phy)
+{
+
+ switch (phy->mib->fddiPORTMy_Type) {
+ case TA:
+ if ( !THRU_ENABLED(smc) && smc->y[PB].cf_join ) {
+ phy->wc_flag = TRUE ;
+ } else {
+ phy->wc_flag = FALSE ;
+ }
+
+ break;
+ case TB:
+ /* take precedence over PA */
+ phy->wc_flag = FALSE ;
+ break;
+ case TS:
+ phy->wc_flag = FALSE ;
+ break;
+ case TM:
+ phy->wc_flag = FALSE ;
+ break;
+ }
+
+}
+
+void all_selection_criteria(struct s_smc *smc)
+{
+ struct s_phy *phy ;
+ int p ;
+
+ for ( p = 0,phy = smc->y ; p < NUMPHYS; p++, phy++ ) {
+ /* Do the selection criteria */
+ selection_criteria (smc,phy);
+ }
+}
+
+static void cem_priv_state(struct s_smc *smc, int event)
+/* State machine for private PORT states: used to optimize dual homing */
+{
+ int np; /* Number of the port */
+ int i;
+
+ /* Do this only in a DAS */
+ if (smc->s.sas != SMT_DAS )
+ return ;
+
+ np = event - CF_JOIN;
+
+ if (np != PA && np != PB) {
+ return ;
+ }
+ /* Change the port state according to the event (portnumber) */
+ if (smc->y[np].cf_join) {
+ smc->y[np].cem_pst = CEM_PST_UP ;
+ } else if (!smc->y[np].wc_flag) {
+ /* set the port to done only if it is not withheld */
+ smc->y[np].cem_pst = CEM_PST_DOWN ;
+ }
+
+ /* Don't set an hold port to down */
+
+ /* Check all ports of restart conditions */
+ for (i = 0 ; i < 2 ; i ++ ) {
+ /* Check all port for PORT is on hold and no withhold is done */
+ if ( smc->y[i].cem_pst == CEM_PST_HOLD && !smc->y[i].wc_flag ) {
+ smc->y[i].cem_pst = CEM_PST_DOWN;
+ queue_event(smc,(int)(EVENT_PCM+i),PC_START) ;
+ }
+ if ( smc->y[i].cem_pst == CEM_PST_UP && smc->y[i].wc_flag ) {
+ smc->y[i].cem_pst = CEM_PST_HOLD;
+ queue_event(smc,(int)(EVENT_PCM+i),PC_START) ;
+ }
+ if ( smc->y[i].cem_pst == CEM_PST_DOWN && smc->y[i].wc_flag ) {
+ /*
+ * The port must be restarted when the wc_flag
+ * will be reset. So set the port on hold.
+ */
+ smc->y[i].cem_pst = CEM_PST_HOLD;
+ }
+ }
+ return ;
+}
+
+/*
+ CFM state machine
+ called by dispatcher
+
+ do
+ display state change
+ process event
+ until SM is stable
+*/
+void cfm(struct s_smc *smc, int event)
+{
+ int state ; /* remember last state */
+ int cond ;
+ int oldstate ;
+
+ /* We will do the following: */
+ /* - compute the variable WC_Flag for every port (This is where */
+ /* we can extend the requested path checking !!) */
+ /* - do the old (SMT 6.2 like) state machine */
+ /* - do the resulting station states */
+
+ all_selection_criteria (smc);
+
+ /* We will check now whether a state transition is allowed or not */
+ /* - change the portstates */
+ cem_priv_state (smc, event);
+
+ oldstate = smc->mib.fddiSMTCF_State ;
+ do {
+ DB_CFM("CFM : state %s%s",
+ (smc->mib.fddiSMTCF_State & AFLAG) ? "ACTIONS " : "",
+ cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG]) ;
+ DB_CFM(" event %s\n",cfm_events[event],0) ;
+ state = smc->mib.fddiSMTCF_State ;
+ cfm_fsm(smc,event) ;
+ event = 0 ;
+ } while (state != smc->mib.fddiSMTCF_State) ;
+
+#ifndef SLIM_SMT
+ /*
+ * check peer wrap condition
+ */
+ cond = FALSE ;
+ if ( (smc->mib.fddiSMTCF_State == SC9_C_WRAP_A &&
+ smc->y[PA].pc_mode == PM_PEER) ||
+ (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B &&
+ smc->y[PB].pc_mode == PM_PEER) ||
+ (smc->mib.fddiSMTCF_State == SC11_C_WRAP_S &&
+ smc->y[PS].pc_mode == PM_PEER &&
+ smc->y[PS].mib->fddiPORTNeighborType != TS ) ) {
+ cond = TRUE ;
+ }
+ if (cond != smc->mib.fddiSMTPeerWrapFlag)
+ smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ;
+
+#if 0
+ /*
+ * Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired
+ * to the primary path.
+ */
+ /*
+ * path change
+ */
+ if (smc->mib.fddiSMTCF_State != oldstate) {
+ smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ;
+ }
+#endif
+#endif /* no SLIM_SMT */
+
+ /*
+ * set MAC port type
+ */
+ smc->mib.m[MAC0].fddiMACDownstreamPORTType =
+ cf_to_ptype[smc->mib.fddiSMTCF_State] ;
+ cfm_state_change(smc,(int)smc->mib.fddiSMTCF_State) ;
+}
+
+/*
+ process CFM event
+*/
+/*ARGSUSED1*/
+static void cfm_fsm(struct s_smc *smc, int cmd)
+{
+ switch(smc->mib.fddiSMTCF_State) {
+ case ACTIONS(SC0_ISOLATED) :
+ smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
+ smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
+ smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
+ smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
+ smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_SEPA ;
+ config_mux(smc,MUX_ISOLATE) ; /* configure PHY Mux */
+ smc->r.rm_loop = FALSE ;
+ smc->r.rm_join = FALSE ;
+ queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
+ /* Don't do the WC-Flag changing here */
+ ACTIONS_DONE() ;
+ DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ break;
+ case SC0_ISOLATED :
+ /*SC07*/
+ /*SAS port can be PA or PB ! */
+ if (smc->s.sas && (smc->y[PA].cf_join || smc->y[PA].cf_loop ||
+ smc->y[PB].cf_join || smc->y[PB].cf_loop)) {
+ GO_STATE(SC11_C_WRAP_S) ;
+ break ;
+ }
+ /*SC01*/
+ if ((smc->y[PA].cem_pst == CEM_PST_UP && smc->y[PA].cf_join &&
+ !smc->y[PA].wc_flag) || smc->y[PA].cf_loop) {
+ GO_STATE(SC9_C_WRAP_A) ;
+ break ;
+ }
+ /*SC02*/
+ if ((smc->y[PB].cem_pst == CEM_PST_UP && smc->y[PB].cf_join &&
+ !smc->y[PB].wc_flag) || smc->y[PB].cf_loop) {
+ GO_STATE(SC10_C_WRAP_B) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(SC9_C_WRAP_A) :
+ smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
+ smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
+ smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
+ smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
+ smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
+ config_mux(smc,MUX_WRAPA) ; /* configure PHY mux */
+ if (smc->y[PA].cf_loop) {
+ smc->r.rm_join = FALSE ;
+ smc->r.rm_loop = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
+ }
+ if (smc->y[PA].cf_join) {
+ smc->r.rm_loop = FALSE ;
+ smc->r.rm_join = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
+ }
+ ACTIONS_DONE() ;
+ DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ break ;
+ case SC9_C_WRAP_A :
+ /*SC10*/
+ if ( (smc->y[PA].wc_flag || !smc->y[PA].cf_join) &&
+ !smc->y[PA].cf_loop ) {
+ GO_STATE(SC0_ISOLATED) ;
+ break ;
+ }
+ /*SC12*/
+ else if ( (smc->y[PB].cf_loop && smc->y[PA].cf_join &&
+ smc->y[PA].cem_pst == CEM_PST_UP) ||
+ ((smc->y[PB].cf_loop ||
+ (smc->y[PB].cf_join &&
+ smc->y[PB].cem_pst == CEM_PST_UP)) &&
+ (smc->y[PA].pc_mode == PM_TREE ||
+ smc->y[PB].pc_mode == PM_TREE))) {
+ smc->y[PA].scrub = TRUE ;
+ GO_STATE(SC10_C_WRAP_B) ;
+ break ;
+ }
+ /*SC14*/
+ else if (!smc->s.attach_s &&
+ smc->y[PA].cf_join &&
+ smc->y[PA].cem_pst == CEM_PST_UP &&
+ smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join &&
+ smc->y[PB].cem_pst == CEM_PST_UP &&
+ smc->y[PB].pc_mode == PM_PEER) {
+ smc->y[PA].scrub = TRUE ;
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC4_THRU_A) ;
+ break ;
+ }
+ /*SC15*/
+ else if ( smc->s.attach_s &&
+ smc->y[PA].cf_join &&
+ smc->y[PA].cem_pst == CEM_PST_UP &&
+ smc->y[PA].pc_mode == PM_PEER &&
+ smc->y[PB].cf_join &&
+ smc->y[PB].cem_pst == CEM_PST_UP &&
+ smc->y[PB].pc_mode == PM_PEER) {
+ smc->y[PA].scrub = TRUE ;
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC5_THRU_B) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(SC10_C_WRAP_B) :
+ smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
+ smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
+ smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
+ smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
+ smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
+ config_mux(smc,MUX_WRAPB) ; /* configure PHY mux */
+ if (smc->y[PB].cf_loop) {
+ smc->r.rm_join = FALSE ;
+ smc->r.rm_loop = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
+ }
+ if (smc->y[PB].cf_join) {
+ smc->r.rm_loop = FALSE ;
+ smc->r.rm_join = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
+ }
+ ACTIONS_DONE() ;
+ DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ break ;
+ case SC10_C_WRAP_B :
+ /*SC20*/
+ if ( !smc->y[PB].cf_join && !smc->y[PB].cf_loop ) {
+ GO_STATE(SC0_ISOLATED) ;
+ break ;
+ }
+ /*SC21*/
+ else if ( smc->y[PA].cf_loop && smc->y[PA].pc_mode == PM_PEER &&
+ smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC9_C_WRAP_A) ;
+ break ;
+ }
+ /*SC24*/
+ else if (!smc->s.attach_s &&
+ smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER &&
+ smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
+ smc->y[PA].scrub = TRUE ;
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC4_THRU_A) ;
+ break ;
+ }
+ /*SC25*/
+ else if ( smc->s.attach_s &&
+ smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER &&
+ smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
+ smc->y[PA].scrub = TRUE ;
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC5_THRU_B) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(SC4_THRU_A) :
+ smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
+ smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
+ smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
+ smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
+ smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ;
+ config_mux(smc,MUX_THRUA) ; /* configure PHY mux */
+ smc->r.rm_loop = FALSE ;
+ smc->r.rm_join = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
+ ACTIONS_DONE() ;
+ DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ break ;
+ case SC4_THRU_A :
+ /*SC41*/
+ if (smc->y[PB].wc_flag || !smc->y[PB].cf_join) {
+ smc->y[PA].scrub = TRUE ;
+ GO_STATE(SC9_C_WRAP_A) ;
+ break ;
+ }
+ /*SC42*/
+ else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) {
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC10_C_WRAP_B) ;
+ break ;
+ }
+ /*SC45*/
+ else if (smc->s.attach_s) {
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC5_THRU_B) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(SC5_THRU_B) :
+ smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
+ smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
+ smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
+ smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
+ smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ;
+ config_mux(smc,MUX_THRUB) ; /* configure PHY mux */
+ smc->r.rm_loop = FALSE ;
+ smc->r.rm_join = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
+ ACTIONS_DONE() ;
+ DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ break ;
+ case SC5_THRU_B :
+ /*SC51*/
+ if (!smc->y[PB].cf_join || smc->y[PB].wc_flag) {
+ smc->y[PA].scrub = TRUE ;
+ GO_STATE(SC9_C_WRAP_A) ;
+ break ;
+ }
+ /*SC52*/
+ else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) {
+ smc->y[PB].scrub = TRUE ;
+ GO_STATE(SC10_C_WRAP_B) ;
+ break ;
+ }
+ /*SC54*/
+ else if (!smc->s.attach_s) {
+ smc->y[PA].scrub = TRUE ;
+ GO_STATE(SC4_THRU_A) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(SC11_C_WRAP_S) :
+ smc->mib.p[PS].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
+ smc->mib.p[PS].fddiPORTMACPlacement = INDEX_MAC ;
+ smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
+ config_mux(smc,MUX_WRAPS) ; /* configure PHY mux */
+ if (smc->y[PA].cf_loop || smc->y[PB].cf_loop) {
+ smc->r.rm_join = FALSE ;
+ smc->r.rm_loop = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
+ }
+ if (smc->y[PA].cf_join || smc->y[PB].cf_join) {
+ smc->r.rm_loop = FALSE ;
+ smc->r.rm_join = TRUE ;
+ queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
+ }
+ ACTIONS_DONE() ;
+ DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+ break ;
+ case SC11_C_WRAP_S :
+ /*SC70*/
+ if ( !smc->y[PA].cf_join && !smc->y[PA].cf_loop &&
+ !smc->y[PB].cf_join && !smc->y[PB].cf_loop) {
+ GO_STATE(SC0_ISOLATED) ;
+ break ;
+ }
+ break ;
+ default:
+ SMT_PANIC(smc,SMT_E0106, SMT_E0106_MSG) ;
+ break;
+ }
+}
+
+/*
+ * get MAC's input Port
+ * return :
+ * PA or PB
+ */
+int cfm_get_mac_input(struct s_smc *smc)
+{
+ return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
+ smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA) ;
+}
+
+/*
+ * get MAC's output Port
+ * return :
+ * PA or PB
+ */
+int cfm_get_mac_output(struct s_smc *smc)
+{
+ return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
+ smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA) ;
+}
+
+static char path_iso[] = {
+ 0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_ISO,
+ 0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_ISO,
+ 0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_ISO
+} ;
+
+static char path_wrap_a[] = {
+ 0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_PRIM,
+ 0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
+ 0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_ISO
+} ;
+
+static char path_wrap_b[] = {
+ 0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_PRIM,
+ 0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
+ 0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_ISO
+} ;
+
+static char path_thru[] = {
+ 0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_PRIM,
+ 0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
+ 0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_PRIM
+} ;
+
+static char path_wrap_s[] = {
+ 0,0, 0,RES_PORT, 0,PS + INDEX_PORT, 0,PATH_PRIM,
+ 0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
+} ;
+
+static char path_iso_s[] = {
+ 0,0, 0,RES_PORT, 0,PS + INDEX_PORT, 0,PATH_ISO,
+ 0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_ISO,
+} ;
+
+int cem_build_path(struct s_smc *smc, char *to, int path_index)
+{
+ char *path ;
+ int len ;
+
+ switch (smc->mib.fddiSMTCF_State) {
+ default :
+ case SC0_ISOLATED :
+ path = smc->s.sas ? path_iso_s : path_iso ;
+ len = smc->s.sas ? sizeof(path_iso_s) : sizeof(path_iso) ;
+ break ;
+ case SC9_C_WRAP_A :
+ path = path_wrap_a ;
+ len = sizeof(path_wrap_a) ;
+ break ;
+ case SC10_C_WRAP_B :
+ path = path_wrap_b ;
+ len = sizeof(path_wrap_b) ;
+ break ;
+ case SC4_THRU_A :
+ path = path_thru ;
+ len = sizeof(path_thru) ;
+ break ;
+ case SC11_C_WRAP_S :
+ path = path_wrap_s ;
+ len = sizeof(path_wrap_s) ;
+ break ;
+ }
+ memcpy(to,path,len) ;
+
+ LINT_USE(path_index);
+
+ return(len) ;
+}
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c
new file mode 100644
index 000000000000..052e841ba187
--- /dev/null
+++ b/drivers/net/skfp/drvfbi.c
@@ -0,0 +1,1529 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * FBI board dependent Driver for SMT and LLC
+ */
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/supern_2.h"
+#include "h/skfbiinc.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
+#endif
+
+/*
+ * PCM active state
+ */
+#define PC8_ACTIVE 8
+
+#define LED_Y_ON 0x11 /* Used for ring up/down indication */
+#define LED_Y_OFF 0x10
+
+
+#define MS2BCLK(x) ((x)*12500L)
+
+/*
+ * valid configuration values are:
+ */
+#ifdef ISA
+const int opt_ints[] = {8, 3, 4, 5, 9, 10, 11, 12, 15} ;
+const int opt_iops[] = {8,
+ 0x100, 0x120, 0x180, 0x1a0, 0x220, 0x240, 0x320, 0x340};
+const int opt_dmas[] = {4, 3, 5, 6, 7} ;
+const int opt_eproms[] = {15, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce,
+ 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc} ;
+#endif
+#ifdef EISA
+const int opt_ints[] = {5, 9, 10, 11} ;
+const int opt_dmas[] = {0, 5, 6, 7} ;
+const int opt_eproms[] = {0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce,
+ 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc} ;
+#endif
+
+#ifdef MCA
+int opt_ints[] = {3, 11, 10, 9} ; /* FM1 */
+int opt_eproms[] = {0, 0xc4, 0xc8, 0xcc, 0xd0, 0xd4, 0xd8, 0xdc} ;
+#endif /* MCA */
+
+/*
+ * xPOS_ID:xxxx
+ * | \ /
+ * | \/
+ * | --------------------- the patched POS_ID of the Adapter
+ * | xxxx = (Vendor ID low byte,
+ * | Vendor ID high byte,
+ * | Device ID low byte,
+ * | Device ID high byte)
+ * +------------------------------ the patched oem_id must be
+ * 'S' for SK or 'I' for IBM
+ * this is a short id for the driver.
+ */
+#ifndef MULT_OEM
+#ifndef OEM_CONCEPT
+#ifndef MCA
+const u_char oem_id[] = "xPOS_ID:xxxx" ;
+#else
+const u_char oem_id[] = "xPOSID1:xxxx" ; /* FM1 card id. */
+#endif
+#else /* OEM_CONCEPT */
+#ifndef MCA
+const u_char oem_id[] = OEM_ID ;
+#else
+const u_char oem_id[] = OEM_ID1 ; /* FM1 card id. */
+#endif /* MCA */
+#endif /* OEM_CONCEPT */
+#define ID_BYTE0 8
+#define OEMID(smc,i) oem_id[ID_BYTE0 + i]
+#else /* MULT_OEM */
+const struct s_oem_ids oem_ids[] = {
+#include "oemids.h"
+{0}
+};
+#define OEMID(smc,i) smc->hw.oem_id->oi_id[i]
+#endif /* MULT_OEM */
+
+/* Prototypes of external functions */
+#ifdef AIX
+extern int AIX_vpdReadByte() ;
+#endif
+
+
+/* Prototypes of local functions. */
+void smt_stop_watchdog(struct s_smc *smc);
+
+#ifdef MCA
+static int read_card_id() ;
+static void DisableSlotAccess() ;
+static void EnableSlotAccess() ;
+#ifdef AIX
+extern int attach_POS_addr() ;
+extern int detach_POS_addr() ;
+extern u_char read_POS() ;
+extern void write_POS() ;
+extern int AIX_vpdReadByte() ;
+#else
+#define read_POS(smc,a1,a2) ((u_char) inp(a1))
+#define write_POS(smc,a1,a2,a3) outp((a1),(a3))
+#endif
+#endif /* MCA */
+
+
+/*
+ * FDDI card reset
+ */
+static void card_start(struct s_smc *smc)
+{
+ int i ;
+#ifdef PCI
+ u_char rev_id ;
+ u_short word;
+#endif
+
+ smt_stop_watchdog(smc) ;
+
+#ifdef ISA
+ outpw(CSR_A,0) ; /* reset for all chips */
+ for (i = 10 ; i ; i--) /* delay for PLC's */
+ (void)inpw(ISR_A) ;
+ OUT_82c54_TIMER(3,COUNT(2) | RW_OP(3) | TMODE(2)) ;
+ /* counter 2, mode 2 */
+ OUT_82c54_TIMER(2,97) ; /* LSB */
+ OUT_82c54_TIMER(2,0) ; /* MSB ( 15.6 us ) */
+ outpw(CSR_A,CS_CRESET) ;
+#endif
+#ifdef EISA
+ outpw(CSR_A,0) ; /* reset for all chips */
+ for (i = 10 ; i ; i--) /* delay for PLC's */
+ (void)inpw(ISR_A) ;
+ outpw(CSR_A,CS_CRESET) ;
+ smc->hw.led = (2<<6) ;
+ outpw(CSR_A,CS_CRESET | smc->hw.led) ;
+#endif
+#ifdef MCA
+ outp(ADDR(CARD_DIS),0) ; /* reset for all chips */
+ for (i = 10 ; i ; i--) /* delay for PLC's */
+ (void)inpw(ISR_A) ;
+ outp(ADDR(CARD_EN),0) ;
+ /* first I/O after reset must not be a access to FORMAC or PLC */
+
+ /*
+ * bus timeout (MCA)
+ */
+ OUT_82c54_TIMER(3,COUNT(2) | RW_OP(3) | TMODE(3)) ;
+ /* counter 2, mode 3 */
+ OUT_82c54_TIMER(2,(2*24)) ; /* 3.9 us * 2 square wave */
+ OUT_82c54_TIMER(2,0) ; /* MSB */
+
+ /* POS 102 indicated an activ Check Line or Buss Error monitoring */
+ if (inpw(CSA_A) & (POS_EN_CHKINT | POS_EN_BUS_ERR)) {
+ outp(ADDR(IRQ_CHCK_EN),0) ;
+ }
+
+ if (!((i = inpw(CSR_A)) & CS_SAS)) {
+ if (!(i & CS_BYSTAT)) {
+ outp(ADDR(BYPASS(STAT_INS)),0) ;/* insert station */
+ }
+ }
+ outpw(LEDR_A,LED_1) ; /* yellow */
+#endif /* MCA */
+#ifdef PCI
+ /*
+ * make sure no transfer activity is pending
+ */
+ outpw(FM_A(FM_MDREG1),FM_MINIT) ;
+ outp(ADDR(B0_CTRL), CTRL_HPI_SET) ;
+ hwt_wait_time(smc,hwt_quick_read(smc),MS2BCLK(10)) ;
+ /*
+ * now reset everything
+ */
+ outp(ADDR(B0_CTRL),CTRL_RST_SET) ; /* reset for all chips */
+ i = (int) inp(ADDR(B0_CTRL)) ; /* do dummy read */
+ SK_UNUSED(i) ; /* Make LINT happy. */
+ outp(ADDR(B0_CTRL), CTRL_RST_CLR) ;
+
+ /*
+ * Reset all bits in the PCI STATUS register
+ */
+ outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_ON) ; /* enable for writes */
+ word = inpw(PCI_C(PCI_STATUS)) ;
+ outpw(PCI_C(PCI_STATUS), word | PCI_ERRBITS) ;
+ outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_OFF) ; /* disable writes */
+
+ /*
+ * Release the reset of all the State machines
+ * Release Master_Reset
+ * Release HPI_SM_Reset
+ */
+ outp(ADDR(B0_CTRL), CTRL_MRST_CLR|CTRL_HPI_CLR) ;
+
+ /*
+ * determine the adapter type
+ * Note: Do it here, because some drivers may call card_start() once
+ * at very first before any other initialization functions is
+ * executed.
+ */
+ rev_id = inp(PCI_C(PCI_REV_ID)) ;
+ if ((rev_id & 0xf0) == SK_ML_ID_1 || (rev_id & 0xf0) == SK_ML_ID_2) {
+ smc->hw.hw_is_64bit = TRUE ;
+ } else {
+ smc->hw.hw_is_64bit = FALSE ;
+ }
+
+ /*
+ * Watermark initialization
+ */
+ if (!smc->hw.hw_is_64bit) {
+ outpd(ADDR(B4_R1_F), RX_WATERMARK) ;
+ outpd(ADDR(B5_XA_F), TX_WATERMARK) ;
+ outpd(ADDR(B5_XS_F), TX_WATERMARK) ;
+ }
+
+ outp(ADDR(B0_CTRL),CTRL_RST_CLR) ; /* clear the reset chips */
+ outp(ADDR(B0_LED),LED_GA_OFF|LED_MY_ON|LED_GB_OFF) ; /* ye LED on */
+
+ /* init the timer value for the watch dog 2,5 minutes */
+ outpd(ADDR(B2_WDOG_INI),0x6FC23AC0) ;
+
+ /* initialize the ISR mask */
+ smc->hw.is_imask = ISR_MASK ;
+ smc->hw.hw_state = STOPPED ;
+#endif
+ GET_PAGE(0) ; /* necessary for BOOT */
+}
+
+void card_stop(struct s_smc *smc)
+{
+ smt_stop_watchdog(smc) ;
+ smc->hw.mac_ring_is_up = 0 ; /* ring down */
+#ifdef ISA
+ outpw(CSR_A,0) ; /* reset for all chips */
+#endif
+#ifdef EISA
+ outpw(CSR_A,0) ; /* reset for all chips */
+#endif
+#ifdef MCA
+ outp(ADDR(CARD_DIS),0) ; /* reset for all chips */
+#endif
+#ifdef PCI
+ /*
+ * make sure no transfer activity is pending
+ */
+ outpw(FM_A(FM_MDREG1),FM_MINIT) ;
+ outp(ADDR(B0_CTRL), CTRL_HPI_SET) ;
+ hwt_wait_time(smc,hwt_quick_read(smc),MS2BCLK(10)) ;
+ /*
+ * now reset everything
+ */
+ outp(ADDR(B0_CTRL),CTRL_RST_SET) ; /* reset for all chips */
+ outp(ADDR(B0_CTRL),CTRL_RST_CLR) ; /* reset for all chips */
+ outp(ADDR(B0_LED),LED_GA_OFF|LED_MY_OFF|LED_GB_OFF) ; /* all LEDs off */
+ smc->hw.hw_state = STOPPED ;
+#endif
+}
+/*--------------------------- ISR handling ----------------------------------*/
+
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl)
+{
+ int restart_tx = 0 ;
+again:
+#ifndef PCI
+#ifndef ISA
+/*
+ * FORMAC+ bug modified the queue pointer if many read/write accesses happens!?
+ */
+ if (stl & (FM_SPCEPDS | /* parit/coding err. syn.q.*/
+ FM_SPCEPDA0 | /* parit/coding err. a.q.0 */
+ FM_SPCEPDA1 | /* parit/coding err. a.q.1 */
+ FM_SPCEPDA2)) { /* parit/coding err. a.q.2 */
+ SMT_PANIC(smc,SMT_E0132, SMT_E0132_MSG) ;
+ }
+ if (stl & (FM_STBURS | /* tx buffer underrun syn.q.*/
+ FM_STBURA0 | /* tx buffer underrun a.q.0 */
+ FM_STBURA1 | /* tx buffer underrun a.q.1 */
+ FM_STBURA2)) { /* tx buffer underrun a.q.2 */
+ SMT_PANIC(smc,SMT_E0133, SMT_E0133_MSG) ;
+ }
+#endif
+ if ( (stu & (FM_SXMTABT | /* transmit abort */
+#ifdef SYNC
+ FM_STXABRS | /* syn. tx abort */
+#endif /* SYNC */
+ FM_STXABRA0)) || /* asyn. tx abort */
+ (stl & (FM_SQLCKS | /* lock for syn. q. */
+ FM_SQLCKA0)) ) { /* lock for asyn. q. */
+ formac_tx_restart(smc) ; /* init tx */
+ restart_tx = 1 ;
+ stu = inpw(FM_A(FM_ST1U)) ;
+ stl = inpw(FM_A(FM_ST1L)) ;
+ stu &= ~ (FM_STECFRMA0 | FM_STEFRMA0 | FM_STEFRMS) ;
+ if (stu || stl)
+ goto again ;
+ }
+
+#ifndef SYNC
+ if (stu & (FM_STECFRMA0 | /* end of chain asyn tx */
+ FM_STEFRMA0)) { /* end of frame asyn tx */
+ /* free tx_queue */
+ smc->hw.n_a_send = 0 ;
+ if (++smc->hw.fp.tx_free < smc->hw.fp.tx_max) {
+ start_next_send(smc);
+ }
+ restart_tx = 1 ;
+ }
+#else /* SYNC */
+ if (stu & (FM_STEFRMA0 | /* end of asyn tx */
+ FM_STEFRMS)) { /* end of sync tx */
+ restart_tx = 1 ;
+ }
+#endif /* SYNC */
+ if (restart_tx)
+ llc_restart_tx(smc) ;
+}
+#else /* PCI */
+
+ /*
+ * parity error: note encoding error is not possible in tag mode
+ */
+ if (stl & (FM_SPCEPDS | /* parity err. syn.q.*/
+ FM_SPCEPDA0 | /* parity err. a.q.0 */
+ FM_SPCEPDA1)) { /* parity err. a.q.1 */
+ SMT_PANIC(smc,SMT_E0134, SMT_E0134_MSG) ;
+ }
+ /*
+ * buffer underrun: can only occur if a tx threshold is specified
+ */
+ if (stl & (FM_STBURS | /* tx buffer underrun syn.q.*/
+ FM_STBURA0 | /* tx buffer underrun a.q.0 */
+ FM_STBURA1)) { /* tx buffer underrun a.q.2 */
+ SMT_PANIC(smc,SMT_E0133, SMT_E0133_MSG) ;
+ }
+
+ if ( (stu & (FM_SXMTABT | /* transmit abort */
+ FM_STXABRS | /* syn. tx abort */
+ FM_STXABRA0)) || /* asyn. tx abort */
+ (stl & (FM_SQLCKS | /* lock for syn. q. */
+ FM_SQLCKA0)) ) { /* lock for asyn. q. */
+ formac_tx_restart(smc) ; /* init tx */
+ restart_tx = 1 ;
+ stu = inpw(FM_A(FM_ST1U)) ;
+ stl = inpw(FM_A(FM_ST1L)) ;
+ stu &= ~ (FM_STECFRMA0 | FM_STEFRMA0 | FM_STEFRMS) ;
+ if (stu || stl)
+ goto again ;
+ }
+
+ if (stu & (FM_STEFRMA0 | /* end of asyn tx */
+ FM_STEFRMS)) { /* end of sync tx */
+ restart_tx = 1 ;
+ }
+
+ if (restart_tx)
+ llc_restart_tx(smc) ;
+}
+#endif /* PCI */
+/*
+ * interrupt source= plc1
+ * this function is called in nwfbisr.asm
+ */
+void plc1_irq(struct s_smc *smc)
+{
+ u_short st = inpw(PLC(PB,PL_INTR_EVENT)) ;
+
+#if (defined(ISA) || defined(EISA))
+ /* reset PLC Int. bits */
+ outpw(PLC1_I,inpw(PLC1_I)) ;
+#endif
+ plc_irq(smc,PB,st) ;
+}
+
+/*
+ * interrupt source= plc2
+ * this function is called in nwfbisr.asm
+ */
+void plc2_irq(struct s_smc *smc)
+{
+ u_short st = inpw(PLC(PA,PL_INTR_EVENT)) ;
+
+#if (defined(ISA) || defined(EISA))
+ /* reset PLC Int. bits */
+ outpw(PLC2_I,inpw(PLC2_I)) ;
+#endif
+ plc_irq(smc,PA,st) ;
+}
+
+
+/*
+ * interrupt source= timer
+ */
+void timer_irq(struct s_smc *smc)
+{
+ hwt_restart(smc);
+ smc->hw.t_stop = smc->hw.t_start;
+ smt_timer_done(smc) ;
+}
+
+/*
+ * return S-port (PA or PB)
+ */
+int pcm_get_s_port(struct s_smc *smc)
+{
+ SK_UNUSED(smc) ;
+ return(PS) ;
+}
+
+/*
+ * Station Label = "FDDI-XYZ" where
+ *
+ * X = connector type
+ * Y = PMD type
+ * Z = port type
+ */
+#define STATION_LABEL_CONNECTOR_OFFSET 5
+#define STATION_LABEL_PMD_OFFSET 6
+#define STATION_LABEL_PORT_OFFSET 7
+
+void read_address(struct s_smc *smc, u_char *mac_addr)
+{
+ char ConnectorType ;
+ char PmdType ;
+ int i ;
+
+ extern const u_char canonical[256] ;
+
+#if (defined(ISA) || defined(MCA))
+ for (i = 0; i < 4 ;i++) { /* read mac address from board */
+ smc->hw.fddi_phys_addr.a[i] =
+ canonical[(inpw(PR_A(i+SA_MAC))&0xff)] ;
+ }
+ for (i = 4; i < 6; i++) {
+ smc->hw.fddi_phys_addr.a[i] =
+ canonical[(inpw(PR_A(i+SA_MAC+PRA_OFF))&0xff)] ;
+ }
+#endif
+#ifdef EISA
+ /*
+ * Note: We get trouble on an Alpha machine if we make a inpw()
+ * instead of inp()
+ */
+ for (i = 0; i < 4 ;i++) { /* read mac address from board */
+ smc->hw.fddi_phys_addr.a[i] =
+ canonical[inp(PR_A(i+SA_MAC))] ;
+ }
+ for (i = 4; i < 6; i++) {
+ smc->hw.fddi_phys_addr.a[i] =
+ canonical[inp(PR_A(i+SA_MAC+PRA_OFF))] ;
+ }
+#endif
+#ifdef PCI
+ for (i = 0; i < 6; i++) { /* read mac address from board */
+ smc->hw.fddi_phys_addr.a[i] =
+ canonical[inp(ADDR(B2_MAC_0+i))] ;
+ }
+#endif
+#ifndef PCI
+ ConnectorType = inpw(PR_A(SA_PMD_TYPE)) & 0xff ;
+ PmdType = inpw(PR_A(SA_PMD_TYPE+1)) & 0xff ;
+#else
+ ConnectorType = inp(ADDR(B2_CONN_TYP)) ;
+ PmdType = inp(ADDR(B2_PMD_TYP)) ;
+#endif
+
+ smc->y[PA].pmd_type[PMD_SK_CONN] =
+ smc->y[PB].pmd_type[PMD_SK_CONN] = ConnectorType ;
+ smc->y[PA].pmd_type[PMD_SK_PMD ] =
+ smc->y[PB].pmd_type[PMD_SK_PMD ] = PmdType ;
+
+ if (mac_addr) {
+ for (i = 0; i < 6 ;i++) {
+ smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ;
+ smc->hw.fddi_home_addr.a[i] = canonical[mac_addr[i]] ;
+ }
+ return ;
+ }
+ smc->hw.fddi_home_addr = smc->hw.fddi_phys_addr ;
+
+ for (i = 0; i < 6 ;i++) {
+ smc->hw.fddi_canon_addr.a[i] =
+ canonical[smc->hw.fddi_phys_addr.a[i]] ;
+ }
+}
+
+/*
+ * FDDI card soft reset
+ */
+void init_board(struct s_smc *smc, u_char *mac_addr)
+{
+ card_start(smc) ;
+ read_address(smc,mac_addr) ;
+
+#ifndef PCI
+ if (inpw(CSR_A) & CS_SAS)
+#else
+ if (!(inp(ADDR(B0_DAS)) & DAS_AVAIL))
+#endif
+ smc->s.sas = SMT_SAS ; /* Single att. station */
+ else
+ smc->s.sas = SMT_DAS ; /* Dual att. station */
+
+#ifndef PCI
+ if (inpw(CSR_A) & CS_BYSTAT)
+#else
+ if (!(inp(ADDR(B0_DAS)) & DAS_BYP_ST))
+#endif
+ smc->mib.fddiSMTBypassPresent = 0 ;
+ /* without opt. bypass */
+ else
+ smc->mib.fddiSMTBypassPresent = 1 ;
+ /* with opt. bypass */
+}
+
+/*
+ * insert or deinsert optical bypass (called by ECM)
+ */
+void sm_pm_bypass_req(struct s_smc *smc, int mode)
+{
+#if (defined(ISA) || defined(EISA))
+ int csra_v ;
+#endif
+
+ DB_ECMN(1,"ECM : sm_pm_bypass_req(%s)\n",(mode == BP_INSERT) ?
+ "BP_INSERT" : "BP_DEINSERT",0) ;
+
+ if (smc->s.sas != SMT_DAS)
+ return ;
+
+#if (defined(ISA) || defined(EISA))
+
+ csra_v = inpw(CSR_A) & ~CS_BYPASS ;
+#ifdef EISA
+ csra_v |= smc->hw.led ;
+#endif
+
+ switch(mode) {
+ case BP_INSERT :
+ outpw(CSR_A,csra_v | CS_BYPASS) ;
+ break ;
+ case BP_DEINSERT :
+ outpw(CSR_A,csra_v) ;
+ break ;
+ }
+#endif /* ISA / EISA */
+#ifdef MCA
+ switch(mode) {
+ case BP_INSERT :
+ outp(ADDR(BYPASS(STAT_INS)),0) ;/* insert station */
+ break ;
+ case BP_DEINSERT :
+ outp(ADDR(BYPASS(STAT_BYP)),0) ; /* bypass station */
+ break ;
+ }
+#endif
+#ifdef PCI
+ switch(mode) {
+ case BP_INSERT :
+ outp(ADDR(B0_DAS),DAS_BYP_INS) ; /* insert station */
+ break ;
+ case BP_DEINSERT :
+ outp(ADDR(B0_DAS),DAS_BYP_RMV) ; /* bypass station */
+ break ;
+ }
+#endif
+}
+
+/*
+ * check if bypass connected
+ */
+int sm_pm_bypass_present(struct s_smc *smc)
+{
+#ifndef PCI
+ return( (inpw(CSR_A) & CS_BYSTAT) ? FALSE : TRUE ) ;
+#else
+ return( (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE: FALSE) ;
+#endif
+}
+
+void plc_clear_irq(struct s_smc *smc, int p)
+{
+ SK_UNUSED(p) ;
+
+#if (defined(ISA) || defined(EISA))
+ switch (p) {
+ case PA :
+ /* reset PLC Int. bits */
+ outpw(PLC2_I,inpw(PLC2_I)) ;
+ break ;
+ case PB :
+ /* reset PLC Int. bits */
+ outpw(PLC1_I,inpw(PLC1_I)) ;
+ break ;
+ }
+#else
+ SK_UNUSED(smc) ;
+#endif
+}
+
+
+/*
+ * led_indication called by rmt_indication() and
+ * pcm_state_change()
+ *
+ * Input:
+ * smc: SMT context
+ * led_event:
+ * 0 Only switch green LEDs according to their respective PCM state
+ * LED_Y_OFF just switch yellow LED off
+ * LED_Y_ON just switch yello LED on
+ */
+void led_indication(struct s_smc *smc, int led_event)
+{
+ /* use smc->hw.mac_ring_is_up == TRUE
+ * as indication for Ring Operational
+ */
+ u_short led_state ;
+ struct s_phy *phy ;
+ struct fddi_mib_p *mib_a ;
+ struct fddi_mib_p *mib_b ;
+
+ phy = &smc->y[PA] ;
+ mib_a = phy->mib ;
+ phy = &smc->y[PB] ;
+ mib_b = phy->mib ;
+
+#ifdef EISA
+ /* Ring up = yellow led OFF*/
+ if (led_event == LED_Y_ON) {
+ smc->hw.led |= CS_LED_1 ;
+ }
+ else if (led_event == LED_Y_OFF) {
+ smc->hw.led &= ~CS_LED_1 ;
+ }
+ else {
+ /* Link at Port A or B = green led ON */
+ if (mib_a->fddiPORTPCMState == PC8_ACTIVE ||
+ mib_b->fddiPORTPCMState == PC8_ACTIVE) {
+ smc->hw.led |= CS_LED_0 ;
+ }
+ else {
+ smc->hw.led &= ~CS_LED_0 ;
+ }
+ }
+#endif
+#ifdef MCA
+ led_state = inpw(LEDR_A) ;
+
+ /* Ring up = yellow led OFF*/
+ if (led_event == LED_Y_ON) {
+ led_state |= LED_1 ;
+ }
+ else if (led_event == LED_Y_OFF) {
+ led_state &= ~LED_1 ;
+ }
+ else {
+ led_state &= ~(LED_2|LED_0) ;
+
+ /* Link at Port A = green led A ON */
+ if (mib_a->fddiPORTPCMState == PC8_ACTIVE) {
+ led_state |= LED_2 ;
+ }
+
+ /* Link at Port B/S = green led B ON */
+ if (mib_b->fddiPORTPCMState == PC8_ACTIVE) {
+ led_state |= LED_0 ;
+ }
+ }
+
+ outpw(LEDR_A, led_state) ;
+#endif /* MCA */
+#ifdef PCI
+ led_state = 0 ;
+
+ /* Ring up = yellow led OFF*/
+ if (led_event == LED_Y_ON) {
+ led_state |= LED_MY_ON ;
+ }
+ else if (led_event == LED_Y_OFF) {
+ led_state |= LED_MY_OFF ;
+ }
+ else { /* PCM state changed */
+ /* Link at Port A/S = green led A ON */
+ if (mib_a->fddiPORTPCMState == PC8_ACTIVE) {
+ led_state |= LED_GA_ON ;
+ }
+ else {
+ led_state |= LED_GA_OFF ;
+ }
+
+ /* Link at Port B = green led B ON */
+ if (mib_b->fddiPORTPCMState == PC8_ACTIVE) {
+ led_state |= LED_GB_ON ;
+ }
+ else {
+ led_state |= LED_GB_OFF ;
+ }
+ }
+
+ outp(ADDR(B0_LED), led_state) ;
+#endif /* PCI */
+
+}
+
+
+void pcm_state_change(struct s_smc *smc, int plc, int p_state)
+{
+ /*
+ * the current implementation of pcm_state_change() in the driver
+ * parts must be renamed to drv_pcm_state_change() which will be called
+ * now after led_indication.
+ */
+ DRV_PCM_STATE_CHANGE(smc,plc,p_state) ;
+
+ led_indication(smc,0) ;
+}
+
+
+void rmt_indication(struct s_smc *smc, int i)
+{
+ /* Call a driver special function if defined */
+ DRV_RMT_INDICATION(smc,i) ;
+
+ led_indication(smc, i ? LED_Y_OFF : LED_Y_ON) ;
+}
+
+
+/*
+ * llc_recover_tx called by init_tx (fplus.c)
+ */
+void llc_recover_tx(struct s_smc *smc)
+{
+#ifdef LOAD_GEN
+ extern int load_gen_flag ;
+
+ load_gen_flag = 0 ;
+#endif
+#ifndef SYNC
+ smc->hw.n_a_send= 0 ;
+#else
+ SK_UNUSED(smc) ;
+#endif
+}
+
+/*--------------------------- DMA init ----------------------------*/
+#ifdef ISA
+
+/*
+ * init DMA
+ */
+void init_dma(struct s_smc *smc, int dma)
+{
+ SK_UNUSED(smc) ;
+
+ /*
+ * set cascade mode,
+ * clear mask bit (enable DMA cannal)
+ */
+ if (dma > 3) {
+ outp(0xd6,(dma & 0x03) | 0xc0) ;
+ outp(0xd4, dma & 0x03) ;
+ }
+ else {
+ outp(0x0b,(dma & 0x03) | 0xc0) ;
+ outp(0x0a,dma & 0x03) ;
+ }
+}
+
+/*
+ * disable DMA
+ */
+void dis_dma(struct s_smc *smc, int dma)
+{
+ SK_UNUSED(smc) ;
+
+ /*
+ * set mask bit (disable DMA cannal)
+ */
+ if (dma > 3) {
+ outp(0xd4,(dma & 0x03) | 0x04) ;
+ }
+ else {
+ outp(0x0a,(dma & 0x03) | 0x04) ;
+ }
+}
+
+#endif /* ISA */
+
+#ifdef EISA
+
+/*arrays with io addresses of dma controller length and address registers*/
+static const int cntr[8] = { 0x001,0x003,0x005,0x007,0,0x0c6,0x0ca,0x0ce } ;
+static const int base[8] = { 0x000,0x002,0x004,0x006,0,0x0c4,0x0c8,0x0cc } ;
+static const int page[8] = { 0x087,0x083,0x081,0x082,0,0x08b,0x089,0x08a } ;
+
+void init_dma(struct s_smc *smc, int dma)
+{
+ /*
+ * extended mode register
+ * 32 bit IO
+ * type c
+ * TC output
+ * disable stop
+ */
+
+ /* mode read (write) demand */
+ smc->hw.dma_rmode = (dma & 3) | 0x08 | 0x0 ;
+ smc->hw.dma_wmode = (dma & 3) | 0x04 | 0x0 ;
+
+ /* 32 bit IO's, burst DMA mode (type "C") */
+ smc->hw.dma_emode = (dma & 3) | 0x08 | 0x30 ;
+
+ outp((dma < 4) ? 0x40b : 0x4d6,smc->hw.dma_emode) ;
+
+ /* disable chaining */
+ outp((dma < 4) ? 0x40a : 0x4d4,(dma&3)) ;
+
+ /*load dma controller addresses for fast access during set dma*/
+ smc->hw.dma_base_word_count = cntr[smc->hw.dma];
+ smc->hw.dma_base_address = base[smc->hw.dma];
+ smc->hw.dma_base_address_page = page[smc->hw.dma];
+
+}
+
+void dis_dma(struct s_smc *smc, int dma)
+{
+ SK_UNUSED(smc) ;
+
+ outp((dma < 4) ? 0x0a : 0xd4,(dma&3)|4) ;/* mask bit */
+}
+#endif /* EISA */
+
+#ifdef MCA
+void init_dma(struct s_smc *smc, int dma)
+{
+ SK_UNUSED(smc) ;
+ SK_UNUSED(dma) ;
+}
+
+void dis_dma(struct s_smc *smc, int dma)
+{
+ SK_UNUSED(smc) ;
+ SK_UNUSED(dma) ;
+}
+#endif
+
+#ifdef PCI
+void init_dma(struct s_smc *smc, int dma)
+{
+ SK_UNUSED(smc) ;
+ SK_UNUSED(dma) ;
+}
+
+void dis_dma(struct s_smc *smc, int dma)
+{
+ SK_UNUSED(smc) ;
+ SK_UNUSED(dma) ;
+}
+#endif
+
+#ifdef MULT_OEM
+static int is_equal_num(char comp1[], char comp2[], int num)
+{
+ int i ;
+
+ for (i = 0 ; i < num ; i++) {
+ if (comp1[i] != comp2[i])
+ return (0) ;
+ }
+ return (1) ;
+} /* is_equal_num */
+
+
+/*
+ * set the OEM ID defaults, and test the contents of the OEM data base
+ * The default OEM is the first ACTIVE entry in the OEM data base
+ *
+ * returns: 0 success
+ * 1 error in data base
+ * 2 data base empty
+ * 3 no active entry
+ */
+int set_oi_id_def(struct s_smc *smc)
+{
+ int sel_id ;
+ int i ;
+ int act_entries ;
+
+ i = 0 ;
+ sel_id = -1 ;
+ act_entries = FALSE ;
+ smc->hw.oem_id = 0 ;
+ smc->hw.oem_min_status = OI_STAT_ACTIVE ;
+
+ /* check OEM data base */
+ while (oem_ids[i].oi_status) {
+ switch (oem_ids[i].oi_status) {
+ case OI_STAT_ACTIVE:
+ act_entries = TRUE ; /* we have active IDs */
+ if (sel_id == -1)
+ sel_id = i ; /* save the first active ID */
+ case OI_STAT_VALID:
+ case OI_STAT_PRESENT:
+ i++ ;
+ break ; /* entry ok */
+ default:
+ return (1) ; /* invalid oi_status */
+ }
+ }
+
+ if (i == 0)
+ return (2) ;
+ if (!act_entries)
+ return (3) ;
+
+ /* ok, we have a valid OEM data base with an active entry */
+ smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[sel_id] ;
+ return (0) ;
+}
+#endif /* MULT_OEM */
+
+
+#ifdef MCA
+/************************
+ *
+ * BEGIN_MANUAL_ENTRY()
+ *
+ * exist_board
+ *
+ * Check if an MCA board is present in the specified slot.
+ *
+ * int exist_board(
+ * struct s_smc *smc,
+ * int slot) ;
+ * In
+ * smc - A pointer to the SMT Context struct.
+ *
+ * slot - The number of the slot to inspect.
+ * Out
+ * 0 = No adapter present.
+ * 1 = Found FM1 adapter.
+ *
+ * Pseudo
+ * Read MCA ID
+ * for all valid OEM_IDs
+ * compare with ID read
+ * if equal, return 1
+ * return(0
+ *
+ * Note
+ * The smc pointer must be valid now.
+ *
+ * END_MANUAL_ENTRY()
+ *
+ ************************/
+#define LONG_CARD_ID(lo, hi) ((((hi) & 0xff) << 8) | ((lo) & 0xff))
+int exist_board(struct s_smc *smc, int slot)
+{
+#ifdef MULT_OEM
+ SK_LOC_DECL(u_char,id[2]) ;
+ int idi ;
+#endif /* MULT_OEM */
+
+ /* No longer valid. */
+ if (smc == NULL)
+ return(0) ;
+
+#ifndef MULT_OEM
+ if (read_card_id(smc, slot)
+ == LONG_CARD_ID(OEMID(smc,0), OEMID(smc,1)))
+ return (1) ; /* Found FM adapter. */
+
+#else /* MULT_OEM */
+ idi = read_card_id(smc, slot) ;
+ id[0] = idi & 0xff ;
+ id[1] = idi >> 8 ;
+
+ smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ;
+ for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) {
+ if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status)
+ continue ;
+
+ if (is_equal_num(&id[0],&OEMID(smc,0),2))
+ return (1) ;
+ }
+#endif /* MULT_OEM */
+ return (0) ; /* No adapter found. */
+}
+
+/************************
+ *
+ * read_card_id
+ *
+ * Read the MCA card id from the specified slot.
+ * In
+ * smc - A pointer to the SMT Context struct.
+ * CAVEAT: This pointer may be NULL and *must not* be used within this
+ * function. It's only purpose is for drivers that need some information
+ * for the inp() and outp() macros.
+ *
+ * slot - The number of the slot for which the card id is returned.
+ * Out
+ * Returns the card id read from the specified slot. If an illegal slot
+ * number is specified, the function returns zero.
+ *
+ ************************/
+static int read_card_id(struct s_smc *smc, int slot)
+/* struct s_smc *smc ; Do not use. */
+{
+ int card_id ;
+
+ SK_UNUSED(smc) ; /* Make LINT happy. */
+ if ((slot < 1) || (slot > 15)) /* max 16 slots, 0 = motherboard */
+ return (0) ; /* Illegal slot number specified. */
+
+ EnableSlotAccess(smc, slot) ;
+
+ card_id = ((read_POS(smc,POS_ID_HIGH,slot - 1) & 0xff) << 8) |
+ (read_POS(smc,POS_ID_LOW,slot - 1) & 0xff) ;
+
+ DisableSlotAccess(smc) ;
+
+ return (card_id) ;
+}
+
+/************************
+ *
+ * BEGIN_MANUAL_ENTRY()
+ *
+ * get_board_para
+ *
+ * Get adapter configuration information. Fill all board specific
+ * parameters within the 'smc' structure.
+ *
+ * int get_board_para(
+ * struct s_smc *smc,
+ * int slot) ;
+ * In
+ * smc - A pointer to the SMT Context struct, to which this function will
+ * write some adapter configuration data.
+ *
+ * slot - The number of the slot, in which the adapter is installed.
+ * Out
+ * 0 = No adapter present.
+ * 1 = Ok.
+ * 2 = Adapter present, but card enable bit not set.
+ *
+ * END_MANUAL_ENTRY()
+ *
+ ************************/
+int get_board_para(struct s_smc *smc, int slot)
+{
+ int val ;
+ int i ;
+
+ /* Check if adapter present & get type of adapter. */
+ switch (exist_board(smc, slot)) {
+ case 0: /* Adapter not present. */
+ return (0) ;
+ case 1: /* FM Rev. 1 */
+ smc->hw.rev = FM1_REV ;
+ smc->hw.VFullRead = 0x0a ;
+ smc->hw.VFullWrite = 0x05 ;
+ smc->hw.DmaWriteExtraBytes = 8 ; /* 2 extra words. */
+ break ;
+ }
+ smc->hw.slot = slot ;
+
+ EnableSlotAccess(smc, slot) ;
+
+ if (!(read_POS(smc,POS_102, slot - 1) & POS_CARD_EN)) {
+ DisableSlotAccess(smc) ;
+ return (2) ; /* Card enable bit not set. */
+ }
+
+ val = read_POS(smc,POS_104, slot - 1) ; /* I/O, IRQ */
+
+#ifndef MEM_MAPPED_IO /* is defined by the operating system */
+ i = val & POS_IOSEL ; /* I/O base addr. (0x0200 .. 0xfe00) */
+ smc->hw.iop = (i + 1) * 0x0400 - 0x200 ;
+#endif
+ i = ((val & POS_IRQSEL) >> 6) & 0x03 ; /* IRQ <0, 1> */
+ smc->hw.irq = opt_ints[i] ;
+
+ /* FPROM base addr. */
+ i = ((read_POS(smc,POS_103, slot - 1) & POS_MSEL) >> 4) & 0x07 ;
+ smc->hw.eprom = opt_eproms[i] ;
+
+ DisableSlotAccess(smc) ;
+
+ /* before this, the smc->hw.iop must be set !!! */
+ smc->hw.slot_32 = inpw(CSF_A) & SLOT_32 ;
+
+ return (1) ;
+}
+
+/* Enable access to specified MCA slot. */
+static void EnableSlotAccess(struct s_smc *smc, int slot)
+{
+ SK_UNUSED(slot) ;
+
+#ifndef AIX
+ SK_UNUSED(smc) ;
+
+ /* System mode. */
+ outp(POS_SYS_SETUP, POS_SYSTEM) ;
+
+ /* Select slot. */
+ outp(POS_CHANNEL_POS, POS_CHANNEL_BIT | (slot-1)) ;
+#else
+ attach_POS_addr (smc) ;
+#endif
+}
+
+/* Disable access to MCA slot formerly enabled via EnableSlotAccess(). */
+static void DisableSlotAccess(struct s_smc *smc)
+{
+#ifndef AIX
+ SK_UNUSED(smc) ;
+
+ outp(POS_CHANNEL_POS, 0) ;
+#else
+ detach_POS_addr (smc) ;
+#endif
+}
+#endif /* MCA */
+
+#ifdef EISA
+#ifndef MEM_MAPPED_IO
+#define SADDR(slot) (((slot)<<12)&0xf000)
+#else /* MEM_MAPPED_IO */
+#define SADDR(slot) (smc->hw.iop)
+#endif /* MEM_MAPPED_IO */
+
+/************************
+ *
+ * BEGIN_MANUAL_ENTRY()
+ *
+ * exist_board
+ *
+ * Check if an EISA board is present in the specified slot.
+ *
+ * int exist_board(
+ * struct s_smc *smc,
+ * int slot) ;
+ * In
+ * smc - A pointer to the SMT Context struct.
+ *
+ * slot - The number of the slot to inspect.
+ * Out
+ * 0 = No adapter present.
+ * 1 = Found adapter.
+ *
+ * Pseudo
+ * Read EISA ID
+ * for all valid OEM_IDs
+ * compare with ID read
+ * if equal, return 1
+ * return(0
+ *
+ * Note
+ * The smc pointer must be valid now.
+ *
+ ************************/
+int exist_board(struct s_smc *smc, int slot)
+{
+ int i ;
+#ifdef MULT_OEM
+ SK_LOC_DECL(u_char,id[4]) ;
+#endif /* MULT_OEM */
+
+ /* No longer valid. */
+ if (smc == NULL)
+ return(0);
+
+ SK_UNUSED(slot) ;
+
+#ifndef MULT_OEM
+ for (i = 0 ; i < 4 ; i++) {
+ if (inp(SADDR(slot)+PRA(i)) != OEMID(smc,i))
+ return(0) ;
+ }
+ return(1) ;
+#else /* MULT_OEM */
+ for (i = 0 ; i < 4 ; i++)
+ id[i] = inp(SADDR(slot)+PRA(i)) ;
+
+ smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ;
+
+ for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) {
+ if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status)
+ continue ;
+
+ if (is_equal_num(&id[0],&OEMID(smc,0),4))
+ return (1) ;
+ }
+ return (0) ; /* No adapter found. */
+#endif /* MULT_OEM */
+}
+
+
+int get_board_para(struct s_smc *smc, int slot)
+{
+ int i ;
+
+ if (!exist_board(smc,slot))
+ return(0) ;
+
+ smc->hw.slot = slot ;
+#ifndef MEM_MAPPED_IO /* if defined by the operating system */
+ smc->hw.iop = SADDR(slot) ;
+#endif
+
+ if (!(inp(C0_A(0))&CFG_CARD_EN)) {
+ return(2) ; /* CFG_CARD_EN bit not set! */
+ }
+
+ smc->hw.irq = opt_ints[(inp(C1_A(0)) & CFG_IRQ_SEL)] ;
+ smc->hw.dma = opt_dmas[((inp(C1_A(0)) & CFG_DRQ_SEL)>>3)] ;
+
+ if ((i = inp(C2_A(0)) & CFG_EPROM_SEL) != 0x0f)
+ smc->hw.eprom = opt_eproms[i] ;
+ else
+ smc->hw.eprom = 0 ;
+
+ smc->hw.DmaWriteExtraBytes = 8 ;
+
+ return(1) ;
+}
+#endif /* EISA */
+
+#ifdef ISA
+#ifndef MULT_OEM
+const u_char sklogo[6] = SKLOGO_STR ;
+#define SIZE_SKLOGO(smc) sizeof(sklogo)
+#define SKLOGO(smc,i) sklogo[i]
+#else /* MULT_OEM */
+#define SIZE_SKLOGO(smc) smc->hw.oem_id->oi_logo_len
+#define SKLOGO(smc,i) smc->hw.oem_id->oi_logo[i]
+#endif /* MULT_OEM */
+
+
+int exist_board(struct s_smc *smc, HW_PTR port)
+{
+ int i ;
+#ifdef MULT_OEM
+ int bytes_read ;
+ u_char board_logo[15] ;
+ SK_LOC_DECL(u_char,id[4]) ;
+#endif /* MULT_OEM */
+
+ /* No longer valid. */
+ if (smc == NULL)
+ return(0);
+
+ SK_UNUSED(smc) ;
+#ifndef MULT_OEM
+ for (i = SADDRL ; i < (signed) (SADDRL+SIZE_SKLOGO(smc)) ; i++) {
+ if ((u_char)inpw((PRA(i)+port)) != SKLOGO(smc,i-SADDRL)) {
+ return(0) ;
+ }
+ }
+
+ /* check MAC address (S&K or other) */
+ for (i = 0 ; i < 3 ; i++) {
+ if ((u_char)inpw((PRA(i)+port)) != OEMID(smc,i))
+ return(0) ;
+ }
+ return(1) ;
+#else /* MULT_OEM */
+ smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ;
+ board_logo[0] = (u_char)inpw((PRA(SADDRL)+port)) ;
+ bytes_read = 1 ;
+
+ for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) {
+ if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status)
+ continue ;
+
+ /* Test all read bytes with current OEM_entry */
+ /* for (i=0; (i<bytes_read) && (i < SIZE_SKLOGO(smc)); i++) { */
+ for (i = 0; i < bytes_read; i++) {
+ if (board_logo[i] != SKLOGO(smc,i))
+ break ;
+ }
+
+ /* If mismatch, switch to next OEM entry */
+ if ((board_logo[i] != SKLOGO(smc,i)) && (i < bytes_read))
+ continue ;
+
+ --i ;
+ while (bytes_read < SIZE_SKLOGO(smc)) {
+ // inpw next byte SK_Logo
+ i++ ;
+ board_logo[i] = (u_char)inpw((PRA(SADDRL+i)+port)) ;
+ bytes_read++ ;
+ if (board_logo[i] != SKLOGO(smc,i))
+ break ;
+ }
+
+ for (i = 0 ; i < 3 ; i++)
+ id[i] = (u_char)inpw((PRA(i)+port)) ;
+
+ if ((board_logo[i] == SKLOGO(smc,i))
+ && (bytes_read == SIZE_SKLOGO(smc))) {
+
+ if (is_equal_num(&id[0],&OEMID(smc,0),3))
+ return(1);
+ }
+ } /* for */
+ return(0) ;
+#endif /* MULT_OEM */
+}
+
+int get_board_para(struct s_smc *smc, int slot)
+{
+ SK_UNUSED(smc) ;
+ SK_UNUSED(slot) ;
+ return(0) ; /* for ISA not supported */
+}
+#endif /* ISA */
+
+#ifdef PCI
+#ifdef USE_BIOS_FUN
+int exist_board(struct s_smc *smc, int slot)
+{
+ u_short dev_id ;
+ u_short ven_id ;
+ int found ;
+ int i ;
+
+ found = FALSE ; /* make sure we returned with adatper not found*/
+ /* if an empty oemids.h was included */
+
+#ifdef MULT_OEM
+ smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ;
+ for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) {
+ if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status)
+ continue ;
+#endif
+ ven_id = OEMID(smc,0) + (OEMID(smc,1) << 8) ;
+ dev_id = OEMID(smc,2) + (OEMID(smc,3) << 8) ;
+ for (i = 0; i < slot; i++) {
+ if (pci_find_device(i,&smc->hw.pci_handle,
+ dev_id,ven_id) != 0) {
+
+ found = FALSE ;
+ } else {
+ found = TRUE ;
+ }
+ }
+ if (found) {
+ return(1) ; /* adapter was found */
+ }
+#ifdef MULT_OEM
+ }
+#endif
+ return(0) ; /* adapter was not found */
+}
+#endif /* PCI */
+#endif /* USE_BIOS_FUNC */
+
+void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr)
+{
+ int i ;
+
+ extern const u_char canonical[256] ;
+
+ for (i = 0 ; i < 6 ; i++) {
+ bia_addr->a[i] = canonical[smc->hw.fddi_phys_addr.a[i]] ;
+ }
+}
+
+void smt_start_watchdog(struct s_smc *smc)
+{
+ SK_UNUSED(smc) ; /* Make LINT happy. */
+
+#ifndef DEBUG
+
+#ifdef PCI
+ if (smc->hw.wdog_used) {
+ outpw(ADDR(B2_WDOG_CRTL),TIM_START) ; /* Start timer. */
+ }
+#endif
+
+#endif /* DEBUG */
+}
+
+void smt_stop_watchdog(struct s_smc *smc)
+{
+ SK_UNUSED(smc) ; /* Make LINT happy. */
+#ifndef DEBUG
+
+#ifdef PCI
+ if (smc->hw.wdog_used) {
+ outpw(ADDR(B2_WDOG_CRTL),TIM_STOP) ; /* Stop timer. */
+ }
+#endif
+
+#endif /* DEBUG */
+}
+
+#ifdef PCI
+static char get_rom_byte(struct s_smc *smc, u_short addr)
+{
+ GET_PAGE(addr) ;
+ return (READ_PROM(ADDR(B2_FDP))) ;
+}
+
+/*
+ * ROM image defines
+ */
+#define ROM_SIG_1 0
+#define ROM_SIG_2 1
+#define PCI_DATA_1 0x18
+#define PCI_DATA_2 0x19
+
+/*
+ * PCI data structure defines
+ */
+#define VPD_DATA_1 0x08
+#define VPD_DATA_2 0x09
+#define IMAGE_LEN_1 0x10
+#define IMAGE_LEN_2 0x11
+#define CODE_TYPE 0x14
+#define INDICATOR 0x15
+
+/*
+ * BEGIN_MANUAL_ENTRY(mac_drv_vpd_read)
+ * mac_drv_vpd_read(smc,buf,size,image)
+ *
+ * function DOWNCALL (FDDIWARE)
+ * reads the VPD data of the FPROM and writes it into the
+ * buffer
+ *
+ * para buf points to the buffer for the VPD data
+ * size size of the VPD data buffer
+ * image boot image; code type of the boot image
+ * image = 0 Intel x86, PC-AT compatible
+ * 1 OPENBOOT standard for PCI
+ * 2-FF reserved
+ *
+ * returns len number of VPD data bytes read form the FPROM
+ * <0 number of read bytes
+ * >0 error: data invalid
+ *
+ * END_MANUAL_ENTRY
+ */
+int mac_drv_vpd_read(struct s_smc *smc, char *buf, int size, char image)
+{
+ u_short ibase ;
+ u_short pci_base ;
+ u_short vpd ;
+ int len ;
+
+ len = 0 ;
+ ibase = 0 ;
+ /*
+ * as long images defined
+ */
+ while (get_rom_byte(smc,ibase+ROM_SIG_1) == 0x55 &&
+ (u_char) get_rom_byte(smc,ibase+ROM_SIG_2) == 0xaa) {
+ /*
+ * get the pointer to the PCI data structure
+ */
+ pci_base = ibase + get_rom_byte(smc,ibase+PCI_DATA_1) +
+ (get_rom_byte(smc,ibase+PCI_DATA_2) << 8) ;
+
+ if (image == get_rom_byte(smc,pci_base+CODE_TYPE)) {
+ /*
+ * we have the right image, read the VPD data
+ */
+ vpd = ibase + get_rom_byte(smc,pci_base+VPD_DATA_1) +
+ (get_rom_byte(smc,pci_base+VPD_DATA_2) << 8) ;
+ if (vpd == ibase) {
+ break ; /* no VPD data */
+ }
+ for (len = 0; len < size; len++,buf++,vpd++) {
+ *buf = get_rom_byte(smc,vpd) ;
+ }
+ break ;
+ }
+ else {
+ /*
+ * try the next image
+ */
+ if (get_rom_byte(smc,pci_base+INDICATOR) & 0x80) {
+ break ; /* this was the last image */
+ }
+ ibase = ibase + get_rom_byte(smc,ibase+IMAGE_LEN_1) +
+ (get_rom_byte(smc,ibase+IMAGE_LEN_2) << 8) ;
+ }
+ }
+
+ return(len) ;
+}
+
+void mac_drv_pci_fix(struct s_smc *smc, u_long fix_value)
+{
+ smc->hw.pci_fix_value = fix_value ;
+}
+
+void mac_do_pci_fix(struct s_smc *smc)
+{
+ SK_UNUSED(smc) ;
+}
+#endif /* PCI */
+
diff --git a/drivers/net/skfp/ecm.c b/drivers/net/skfp/ecm.c
new file mode 100644
index 000000000000..47d922cb3c08
--- /dev/null
+++ b/drivers/net/skfp/ecm.c
@@ -0,0 +1,536 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ SMT ECM
+ Entity Coordination Management
+ Hardware independent state machine
+*/
+
+/*
+ * Hardware independent state machine implemantation
+ * The following external SMT functions are referenced :
+ *
+ * queue_event()
+ * smt_timer_start()
+ * smt_timer_stop()
+ *
+ * The following external HW dependent functions are referenced :
+ * sm_pm_bypass_req()
+ * sm_pm_ls_latch()
+ * sm_pm_get_ls()
+ *
+ * The following HW dependent events are required :
+ * NONE
+ *
+ */
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ;
+#endif
+
+/*
+ * FSM Macros
+ */
+#define AFLAG 0x10
+#define GO_STATE(x) (smc->mib.fddiSMTECMState = (x)|AFLAG)
+#define ACTIONS_DONE() (smc->mib.fddiSMTECMState &= ~AFLAG)
+#define ACTIONS(x) (x|AFLAG)
+
+#define EC0_OUT 0 /* not inserted */
+#define EC1_IN 1 /* inserted */
+#define EC2_TRACE 2 /* tracing */
+#define EC3_LEAVE 3 /* leaving the ring */
+#define EC4_PATH_TEST 4 /* performing path test */
+#define EC5_INSERT 5 /* bypass being turned on */
+#define EC6_CHECK 6 /* checking bypass */
+#define EC7_DEINSERT 7 /* bypass being turnde off */
+
+#ifdef DEBUG
+/*
+ * symbolic state names
+ */
+static const char * const ecm_states[] = {
+ "EC0_OUT","EC1_IN","EC2_TRACE","EC3_LEAVE","EC4_PATH_TEST",
+ "EC5_INSERT","EC6_CHECK","EC7_DEINSERT"
+} ;
+
+/*
+ * symbolic event names
+ */
+static const char * const ecm_events[] = {
+ "NONE","EC_CONNECT","EC_DISCONNECT","EC_TRACE_PROP","EC_PATH_TEST",
+ "EC_TIMEOUT_TD","EC_TIMEOUT_TMAX",
+ "EC_TIMEOUT_IMAX","EC_TIMEOUT_INMAX","EC_TEST_DONE"
+} ;
+#endif
+
+/*
+ * all Globals are defined in smc.h
+ * struct s_ecm
+ */
+
+/*
+ * function declarations
+ */
+
+static void ecm_fsm(struct s_smc *smc, int cmd);
+static void start_ecm_timer(struct s_smc *smc, u_long value, int event);
+static void stop_ecm_timer(struct s_smc *smc);
+static void prop_actions(struct s_smc *smc);
+
+/*
+ init ECM state machine
+ clear all ECM vars and flags
+*/
+void ecm_init(struct s_smc *smc)
+{
+ smc->e.path_test = PT_PASSED ;
+ smc->e.trace_prop = 0 ;
+ smc->e.sb_flag = 0 ;
+ smc->mib.fddiSMTECMState = ACTIONS(EC0_OUT) ;
+ smc->e.ecm_line_state = FALSE ;
+}
+
+/*
+ ECM state machine
+ called by dispatcher
+
+ do
+ display state change
+ process event
+ until SM is stable
+*/
+void ecm(struct s_smc *smc, int event)
+{
+ int state ;
+
+ do {
+ DB_ECM("ECM : state %s%s",
+ (smc->mib.fddiSMTECMState & AFLAG) ? "ACTIONS " : "",
+ ecm_states[smc->mib.fddiSMTECMState & ~AFLAG]) ;
+ DB_ECM(" event %s\n",ecm_events[event],0) ;
+ state = smc->mib.fddiSMTECMState ;
+ ecm_fsm(smc,event) ;
+ event = 0 ;
+ } while (state != smc->mib.fddiSMTECMState) ;
+ ecm_state_change(smc,(int)smc->mib.fddiSMTECMState) ;
+}
+
+/*
+ process ECM event
+*/
+static void ecm_fsm(struct s_smc *smc, int cmd)
+{
+ int ls_a ; /* current line state PHY A */
+ int ls_b ; /* current line state PHY B */
+ int p ; /* ports */
+
+
+ smc->mib.fddiSMTBypassPresent = sm_pm_bypass_present(smc) ;
+ if (cmd == EC_CONNECT)
+ smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ;
+
+ /* For AIX event notification: */
+ /* Is a disconnect command remotely issued ? */
+ if (cmd == EC_DISCONNECT &&
+ smc->mib.fddiSMTRemoteDisconnectFlag == TRUE)
+ AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long)
+ FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc),
+ smt_get_error_word(smc) );
+
+ /*jd 05-Aug-1999 Bug #10419 "Port Disconnect fails at Dup MAc Cond."*/
+ if (cmd == EC_CONNECT) {
+ smc->e.DisconnectFlag = FALSE ;
+ }
+ else if (cmd == EC_DISCONNECT) {
+ smc->e.DisconnectFlag = TRUE ;
+ }
+
+ switch(smc->mib.fddiSMTECMState) {
+ case ACTIONS(EC0_OUT) :
+ /*
+ * We do not perform a path test
+ */
+ smc->e.path_test = PT_PASSED ;
+ smc->e.ecm_line_state = FALSE ;
+ stop_ecm_timer(smc) ;
+ ACTIONS_DONE() ;
+ break ;
+ case EC0_OUT:
+ /*EC01*/
+ if (cmd == EC_CONNECT && !smc->mib.fddiSMTBypassPresent
+ && smc->e.path_test==PT_PASSED) {
+ GO_STATE(EC1_IN) ;
+ break ;
+ }
+ /*EC05*/
+ else if (cmd == EC_CONNECT && (smc->e.path_test==PT_PASSED) &&
+ smc->mib.fddiSMTBypassPresent &&
+ (smc->s.sas == SMT_DAS)) {
+ GO_STATE(EC5_INSERT) ;
+ break ;
+ }
+ break;
+ case ACTIONS(EC1_IN) :
+ stop_ecm_timer(smc) ;
+ smc->e.trace_prop = 0 ;
+ sm_ma_control(smc,MA_TREQ) ;
+ for (p = 0 ; p < NUMPHYS ; p++)
+ if (smc->mib.p[p].fddiPORTHardwarePresent)
+ queue_event(smc,EVENT_PCMA+p,PC_START) ;
+ ACTIONS_DONE() ;
+ break ;
+ case EC1_IN:
+ /*EC12*/
+ if (cmd == EC_TRACE_PROP) {
+ prop_actions(smc) ;
+ GO_STATE(EC2_TRACE) ;
+ break ;
+ }
+ /*EC13*/
+ else if (cmd == EC_DISCONNECT) {
+ GO_STATE(EC3_LEAVE) ;
+ break ;
+ }
+ break;
+ case ACTIONS(EC2_TRACE) :
+ start_ecm_timer(smc,MIB2US(smc->mib.fddiSMTTrace_MaxExpiration),
+ EC_TIMEOUT_TMAX) ;
+ ACTIONS_DONE() ;
+ break ;
+ case EC2_TRACE :
+ /*EC22*/
+ if (cmd == EC_TRACE_PROP) {
+ prop_actions(smc) ;
+ GO_STATE(EC2_TRACE) ;
+ break ;
+ }
+ /*EC23a*/
+ else if (cmd == EC_DISCONNECT) {
+ smc->e.path_test = PT_EXITING ;
+ GO_STATE(EC3_LEAVE) ;
+ break ;
+ }
+ /*EC23b*/
+ else if (smc->e.path_test == PT_PENDING) {
+ GO_STATE(EC3_LEAVE) ;
+ break ;
+ }
+ /*EC23c*/
+ else if (cmd == EC_TIMEOUT_TMAX) {
+ /* Trace_Max is expired */
+ /* -> send AIX_EVENT */
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS,
+ (u_long) FDDI_SMT_ERROR, (u_long)
+ FDDI_TRACE_MAX, smt_get_error_word(smc));
+ smc->e.path_test = PT_PENDING ;
+ GO_STATE(EC3_LEAVE) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(EC3_LEAVE) :
+ start_ecm_timer(smc,smc->s.ecm_td_min,EC_TIMEOUT_TD) ;
+ for (p = 0 ; p < NUMPHYS ; p++)
+ queue_event(smc,EVENT_PCMA+p,PC_STOP) ;
+ ACTIONS_DONE() ;
+ break ;
+ case EC3_LEAVE:
+ /*EC30*/
+ if (cmd == EC_TIMEOUT_TD && !smc->mib.fddiSMTBypassPresent &&
+ (smc->e.path_test != PT_PENDING)) {
+ GO_STATE(EC0_OUT) ;
+ break ;
+ }
+ /*EC34*/
+ else if (cmd == EC_TIMEOUT_TD &&
+ (smc->e.path_test == PT_PENDING)) {
+ GO_STATE(EC4_PATH_TEST) ;
+ break ;
+ }
+ /*EC31*/
+ else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) {
+ GO_STATE(EC1_IN) ;
+ break ;
+ }
+ /*EC33*/
+ else if (cmd == EC_DISCONNECT &&
+ smc->e.path_test == PT_PENDING) {
+ smc->e.path_test = PT_EXITING ;
+ /*
+ * stay in state - state will be left via timeout
+ */
+ }
+ /*EC37*/
+ else if (cmd == EC_TIMEOUT_TD &&
+ smc->mib.fddiSMTBypassPresent &&
+ smc->e.path_test != PT_PENDING) {
+ GO_STATE(EC7_DEINSERT) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(EC4_PATH_TEST) :
+ stop_ecm_timer(smc) ;
+ smc->e.path_test = PT_TESTING ;
+ start_ecm_timer(smc,smc->s.ecm_test_done,EC_TEST_DONE) ;
+ /* now perform path test ... just a simulation */
+ ACTIONS_DONE() ;
+ break ;
+ case EC4_PATH_TEST :
+ /* path test done delay */
+ if (cmd == EC_TEST_DONE)
+ smc->e.path_test = PT_PASSED ;
+
+ if (smc->e.path_test == PT_FAILED)
+ RS_SET(smc,RS_PATHTEST) ;
+
+ /*EC40a*/
+ if (smc->e.path_test == PT_FAILED &&
+ !smc->mib.fddiSMTBypassPresent) {
+ GO_STATE(EC0_OUT) ;
+ break ;
+ }
+ /*EC40b*/
+ else if (cmd == EC_DISCONNECT &&
+ !smc->mib.fddiSMTBypassPresent) {
+ GO_STATE(EC0_OUT) ;
+ break ;
+ }
+ /*EC41*/
+ else if (smc->e.path_test == PT_PASSED) {
+ GO_STATE(EC1_IN) ;
+ break ;
+ }
+ /*EC47a*/
+ else if (smc->e.path_test == PT_FAILED &&
+ smc->mib.fddiSMTBypassPresent) {
+ GO_STATE(EC7_DEINSERT) ;
+ break ;
+ }
+ /*EC47b*/
+ else if (cmd == EC_DISCONNECT &&
+ smc->mib.fddiSMTBypassPresent) {
+ GO_STATE(EC7_DEINSERT) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(EC5_INSERT) :
+ sm_pm_bypass_req(smc,BP_INSERT);
+ start_ecm_timer(smc,smc->s.ecm_in_max,EC_TIMEOUT_INMAX) ;
+ ACTIONS_DONE() ;
+ break ;
+ case EC5_INSERT :
+ /*EC56*/
+ if (cmd == EC_TIMEOUT_INMAX) {
+ GO_STATE(EC6_CHECK) ;
+ break ;
+ }
+ /*EC57*/
+ else if (cmd == EC_DISCONNECT) {
+ GO_STATE(EC7_DEINSERT) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(EC6_CHECK) :
+ /*
+ * in EC6_CHECK, we *POLL* the line state !
+ * check whether both bypass switches have switched.
+ */
+ start_ecm_timer(smc,smc->s.ecm_check_poll,0) ;
+ smc->e.ecm_line_state = TRUE ; /* flag to pcm: report Q/HLS */
+ (void) sm_pm_ls_latch(smc,PA,1) ; /* enable line state latch */
+ (void) sm_pm_ls_latch(smc,PB,1) ; /* enable line state latch */
+ ACTIONS_DONE() ;
+ break ;
+ case EC6_CHECK :
+ ls_a = sm_pm_get_ls(smc,PA) ;
+ ls_b = sm_pm_get_ls(smc,PB) ;
+
+ /*EC61*/
+ if (((ls_a == PC_QLS) || (ls_a == PC_HLS)) &&
+ ((ls_b == PC_QLS) || (ls_b == PC_HLS)) ) {
+ smc->e.sb_flag = FALSE ;
+ smc->e.ecm_line_state = FALSE ;
+ GO_STATE(EC1_IN) ;
+ break ;
+ }
+ /*EC66*/
+ else if (!smc->e.sb_flag &&
+ (((ls_a == PC_ILS) && (ls_b == PC_QLS)) ||
+ ((ls_a == PC_QLS) && (ls_b == PC_ILS)))){
+ smc->e.sb_flag = TRUE ;
+ DB_ECMN(1,"ECM : EC6_CHECK - stuck bypass\n",0,0) ;
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
+ FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK,
+ smt_get_error_word(smc));
+ }
+ /*EC67*/
+ else if (cmd == EC_DISCONNECT) {
+ smc->e.ecm_line_state = FALSE ;
+ GO_STATE(EC7_DEINSERT) ;
+ break ;
+ }
+ else {
+ /*
+ * restart poll
+ */
+ start_ecm_timer(smc,smc->s.ecm_check_poll,0) ;
+ }
+ break ;
+ case ACTIONS(EC7_DEINSERT) :
+ sm_pm_bypass_req(smc,BP_DEINSERT);
+ start_ecm_timer(smc,smc->s.ecm_i_max,EC_TIMEOUT_IMAX) ;
+ ACTIONS_DONE() ;
+ break ;
+ case EC7_DEINSERT:
+ /*EC70*/
+ if (cmd == EC_TIMEOUT_IMAX) {
+ GO_STATE(EC0_OUT) ;
+ break ;
+ }
+ /*EC75*/
+ else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) {
+ GO_STATE(EC5_INSERT) ;
+ break ;
+ }
+ break;
+ default:
+ SMT_PANIC(smc,SMT_E0107, SMT_E0107_MSG) ;
+ break;
+ }
+}
+
+#ifndef CONCENTRATOR
+/*
+ * trace propagation actions for SAS & DAS
+ */
+static void prop_actions(struct s_smc *smc)
+{
+ int port_in = 0 ;
+ int port_out = 0 ;
+
+ RS_SET(smc,RS_EVENT) ;
+ switch (smc->s.sas) {
+ case SMT_SAS :
+ port_in = port_out = pcm_get_s_port(smc) ;
+ break ;
+ case SMT_DAS :
+ port_in = cfm_get_mac_input(smc) ; /* PA or PB */
+ port_out = cfm_get_mac_output(smc) ; /* PA or PB */
+ break ;
+ case SMT_NAC :
+ SMT_PANIC(smc,SMT_E0108, SMT_E0108_MSG) ;
+ return ;
+ }
+
+ DB_ECM("ECM : prop_actions - trace_prop %d\n", smc->e.trace_prop,0) ;
+ DB_ECM("ECM : prop_actions - in %d out %d\n", port_in,port_out) ;
+
+ if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
+ /* trace initiatior */
+ DB_ECM("ECM : initiate TRACE on PHY %c\n",'A'+port_in-PA,0) ;
+ queue_event(smc,EVENT_PCM+port_in,PC_TRACE) ;
+ }
+ else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) &&
+ port_out != PA) {
+ /* trace propagate upstream */
+ DB_ECM("ECM : propagate TRACE on PHY B\n",0,0) ;
+ queue_event(smc,EVENT_PCMB,PC_TRACE) ;
+ }
+ else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) &&
+ port_out != PB) {
+ /* trace propagate upstream */
+ DB_ECM("ECM : propagate TRACE on PHY A\n",0,0) ;
+ queue_event(smc,EVENT_PCMA,PC_TRACE) ;
+ }
+ else {
+ /* signal trace termination */
+ DB_ECM("ECM : TRACE terminated\n",0,0) ;
+ smc->e.path_test = PT_PENDING ;
+ }
+ smc->e.trace_prop = 0 ;
+}
+#else
+/*
+ * trace propagation actions for Concentrator
+ */
+static void prop_actions(struct s_smc *smc)
+{
+ int initiator ;
+ int upstream ;
+ int p ;
+
+ RS_SET(smc,RS_EVENT) ;
+ while (smc->e.trace_prop) {
+ DB_ECM("ECM : prop_actions - trace_prop %d\n",
+ smc->e.trace_prop,0) ;
+
+ if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
+ initiator = ENTITY_MAC ;
+ smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_MAC) ;
+ DB_ECM("ECM: MAC initiates trace\n",0,0) ;
+ }
+ else {
+ for (p = NUMPHYS-1 ; p >= 0 ; p--) {
+ if (smc->e.trace_prop &
+ ENTITY_BIT(ENTITY_PHY(p)))
+ break ;
+ }
+ initiator = ENTITY_PHY(p) ;
+ smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_PHY(p)) ;
+ }
+ upstream = cem_get_upstream(smc,initiator) ;
+
+ if (upstream == ENTITY_MAC) {
+ /* signal trace termination */
+ DB_ECM("ECM : TRACE terminated\n",0,0) ;
+ smc->e.path_test = PT_PENDING ;
+ }
+ else {
+ /* trace propagate upstream */
+ DB_ECM("ECM : propagate TRACE on PHY %d\n",upstream,0) ;
+ queue_event(smc,EVENT_PCM+upstream,PC_TRACE) ;
+ }
+ }
+}
+#endif
+
+
+/*
+ * SMT timer interface
+ * start ECM timer
+ */
+static void start_ecm_timer(struct s_smc *smc, u_long value, int event)
+{
+ smt_timer_start(smc,&smc->e.ecm_timer,value,EV_TOKEN(EVENT_ECM,event));
+}
+
+/*
+ * SMT timer interface
+ * stop ECM timer
+ */
+static void stop_ecm_timer(struct s_smc *smc)
+{
+ if (smc->e.ecm_timer.tm_active)
+ smt_timer_stop(smc,&smc->e.ecm_timer) ;
+}
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
new file mode 100644
index 000000000000..fd39b4b2ef7d
--- /dev/null
+++ b/drivers/net/skfp/ess.c
@@ -0,0 +1,720 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * *******************************************************************
+ * This SBA code implements the Synchronous Bandwidth Allocation
+ * functions described in the "FDDI Synchronous Forum Implementer's
+ * Agreement" dated December 1th, 1993.
+ * *******************************************************************
+ *
+ * PURPOSE: The purpose of this function is to control
+ * synchronous allocations on a single FDDI segment.
+ * Allocations are limited to the primary FDDI ring.
+ * The SBM provides recovery mechanisms to recover
+ * unused bandwidth also resolves T_Neg and
+ * reconfiguration changes. Many of the SBM state
+ * machine inputs are sourced by the underlying
+ * FDDI sub-system supporting the SBA application.
+ *
+ * *******************************************************************
+ */
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/smt_p.h"
+
+
+#ifndef SLIM_SMT
+
+#ifdef ESS
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)ess.c 1.10 96/02/23 (C) SK" ;
+#define LINT_USE(x)
+#else
+#define LINT_USE(x) (x)=(x)
+#endif
+#define MS2BCLK(x) ((x)*12500L)
+
+/*
+ -------------------------------------------------------------
+ LOCAL VARIABLES:
+ -------------------------------------------------------------
+*/
+
+static const u_short plist_raf_alc_res[] = { SMT_P0012, SMT_P320B, SMT_P320F,
+ SMT_P3210, SMT_P0019, SMT_P001A,
+ SMT_P001D, 0 } ;
+
+static const u_short plist_raf_chg_req[] = { SMT_P320B, SMT_P320F, SMT_P3210,
+ SMT_P001A, 0 } ;
+
+static const struct fddi_addr smt_sba_da = {{0x80,0x01,0x43,0x00,0x80,0x0C}} ;
+static const struct fddi_addr null_addr = {{0,0,0,0,0,0}} ;
+
+/*
+ -------------------------------------------------------------
+ GLOBAL VARIABLES:
+ -------------------------------------------------------------
+*/
+
+
+/*
+ -------------------------------------------------------------
+ LOCAL FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
+ int sba_cmd);
+static void ess_config_fifo(struct s_smc *smc);
+static void ess_send_alc_req(struct s_smc *smc);
+static void ess_send_frame(struct s_smc *smc, SMbuf *mb);
+
+/*
+ -------------------------------------------------------------
+ EXTERNAL FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+/*
+ -------------------------------------------------------------
+ PUBLIC FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+void ess_timer_poll(struct s_smc *smc);
+void ess_para_change(struct s_smc *smc);
+int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
+ int fs);
+int process_bw_alloc(struct s_smc *smc, long int payload, long int overhead);
+
+
+/*
+ * --------------------------------------------------------------------------
+ * End Station Support (ESS)
+ * --------------------------------------------------------------------------
+ */
+
+/*
+ * evaluate the RAF frame
+ */
+int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
+ int fs)
+{
+ void *p ; /* universal pointer */
+ struct smt_p_0016 *cmd ; /* para: command for the ESS */
+ SMbuf *db ;
+ u_long msg_res_type ; /* recource type */
+ u_long payload, overhead ;
+ int local ;
+ int i ;
+
+ /*
+ * Message Processing Code
+ */
+ local = ((fs & L_INDICATOR) != 0) ;
+
+ /*
+ * get the resource type
+ */
+ if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
+ DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ;
+ return(fs) ;
+ }
+ msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
+
+ /*
+ * get the pointer to the ESS command
+ */
+ if (!(cmd = (struct smt_p_0016 *) sm_to_para(smc,sm,SMT_P0016))) {
+ /*
+ * error in frame: para ESS command was not found
+ */
+ DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0);
+ return(fs) ;
+ }
+
+ DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ;
+ DB_ESSN(2,"ver %x tran %lx\n",sm->smt_version,sm->smt_tid) ;
+ DB_ESSN(2,"stn_id %s\n",addr_to_string(&sm->smt_source),0) ;
+
+ DB_ESSN(2,"infolen %x res %x\n",sm->smt_len, msg_res_type) ;
+ DB_ESSN(2,"sbacmd %x\n",cmd->sba_cmd,0) ;
+
+ /*
+ * evaluate the ESS command
+ */
+ switch (cmd->sba_cmd) {
+
+ /*
+ * Process an ESS Allocation Request
+ */
+ case REQUEST_ALLOCATION :
+ /*
+ * check for an RAF Request (Allocation Request)
+ */
+ if (sm->smt_type == SMT_REQUEST) {
+ /*
+ * process the Allocation request only if the frame is
+ * local and no static allocation is used
+ */
+ if (!local || smc->mib.fddiESSPayload)
+ return(fs) ;
+
+ p = (void *) sm_to_para(smc,sm,SMT_P0019) ;
+ for (i = 0; i < 5; i++) {
+ if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) {
+ return(fs) ;
+ }
+ }
+
+ /*
+ * Note: The Application should send a LAN_LOC_FRAME.
+ * The ESS do not send the Frame to the network!
+ */
+ smc->ess.alloc_trans_id = sm->smt_tid ;
+ DB_ESS("ESS: save Alloc Req Trans ID %lx\n",sm->smt_tid,0);
+ p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
+ ((struct smt_p_320f *)p)->mib_payload =
+ smc->mib.a[PATH0].fddiPATHSbaPayload ;
+ p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
+ ((struct smt_p_3210 *)p)->mib_overhead =
+ smc->mib.a[PATH0].fddiPATHSbaOverhead ;
+ sm->smt_dest = smt_sba_da ;
+
+ if (smc->ess.local_sba_active)
+ return(fs | I_INDICATOR) ;
+
+ if (!(db = smt_get_mbuf(smc)))
+ return(fs) ;
+
+ db->sm_len = mb->sm_len ;
+ db->sm_off = mb->sm_off ;
+ memcpy(((char *)(db->sm_data+db->sm_off)),(char *)sm,
+ (int)db->sm_len) ;
+ dump_smt(smc,
+ (struct smt_header *)(db->sm_data+db->sm_off),
+ "RAF") ;
+ smt_send_frame(smc,db,FC_SMT_INFO,0) ;
+ return(fs) ;
+ }
+
+ /*
+ * The RAF frame is an Allocation Response !
+ * check the parameters
+ */
+ if (smt_check_para(smc,sm,plist_raf_alc_res)) {
+ DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
+ return(fs) ;
+ }
+
+ /*
+ * VERIFY THE FRAME IS WELL BUILT:
+ *
+ * 1. path index = primary ring only
+ * 2. resource type = sync bw only
+ * 3. trans action id = alloc_trans_id
+ * 4. reason code = success
+ *
+ * If any are violated, discard the RAF frame
+ */
+ if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
+ != PRIMARY_RING) ||
+ (msg_res_type != SYNC_BW) ||
+ (((struct smt_p_reason *)sm_to_para(smc,sm,SMT_P0012))->rdf_reason
+ != SMT_RDF_SUCCESS) ||
+ (sm->smt_tid != smc->ess.alloc_trans_id)) {
+
+ DB_ESS("ESS: Allocation Responce not accepted\n",0,0) ;
+ return(fs) ;
+ }
+
+ /*
+ * Extract message parameters
+ */
+ p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
+ if (!p) {
+ printk(KERN_ERR "ESS: sm_to_para failed");
+ return fs;
+ }
+ payload = ((struct smt_p_320f *)p)->mib_payload ;
+ p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
+ if (!p) {
+ printk(KERN_ERR "ESS: sm_to_para failed");
+ return fs;
+ }
+ overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
+
+ DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ;
+
+ /*
+ * process the bandwidth allocation
+ */
+ (void)process_bw_alloc(smc,(long)payload,(long)overhead) ;
+
+ return(fs) ;
+ /* end of Process Allocation Request */
+
+ /*
+ * Process an ESS Change Request
+ */
+ case CHANGE_ALLOCATION :
+ /*
+ * except only replies
+ */
+ if (sm->smt_type != SMT_REQUEST) {
+ DB_ESS("ESS: Do not process Change Responses\n",0,0) ;
+ return(fs) ;
+ }
+
+ /*
+ * check the para for the Change Request
+ */
+ if (smt_check_para(smc,sm,plist_raf_chg_req)) {
+ DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
+ return(fs) ;
+ }
+
+ /*
+ * Verify the path index and resource
+ * type are correct. If any of
+ * these are false, don't process this
+ * change request frame.
+ */
+ if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
+ != PRIMARY_RING) || (msg_res_type != SYNC_BW)) {
+ DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ;
+ return(fs) ;
+ }
+
+ /*
+ * Extract message queue parameters
+ */
+ p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
+ payload = ((struct smt_p_320f *)p)->mib_payload ;
+ p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
+ overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
+
+ DB_ESSN(2,"ESS: Change Request from %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ;
+
+ /*
+ * process the bandwidth allocation
+ */
+ if(!process_bw_alloc(smc,(long)payload,(long)overhead))
+ return(fs) ;
+
+ /*
+ * send an RAF Change Reply
+ */
+ ess_send_response(smc,sm,CHANGE_ALLOCATION) ;
+
+ return(fs) ;
+ /* end of Process Change Request */
+
+ /*
+ * Process Report Response
+ */
+ case REPORT_ALLOCATION :
+ /*
+ * except only requests
+ */
+ if (sm->smt_type != SMT_REQUEST) {
+ DB_ESS("ESS: Do not process a Report Reply\n",0,0) ;
+ return(fs) ;
+ }
+
+ DB_ESSN(2,"ESS: Report Request from %s\n",
+ addr_to_string(&(sm->smt_source)),0) ;
+
+ /*
+ * verify that the resource type is sync bw only
+ */
+ if (msg_res_type != SYNC_BW) {
+ DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ;
+ return(fs) ;
+ }
+
+ /*
+ * send an RAF Change Reply
+ */
+ ess_send_response(smc,sm,REPORT_ALLOCATION) ;
+
+ return(fs) ;
+ /* end of Process Report Request */
+
+ default:
+ /*
+ * error in frame
+ */
+ DB_ESS("ESS: ignoring RAF with bad sba_cmd\n",0,0) ;
+ break ;
+ }
+
+ return(fs) ;
+}
+
+/*
+ * determines the synchronous bandwidth, set the TSYNC register and the
+ * mib variables SBAPayload, SBAOverhead and fddiMACT-NEG.
+ */
+int process_bw_alloc(struct s_smc *smc, long int payload, long int overhead)
+{
+ /*
+ * determine the synchronous bandwidth (sync_bw) in bytes per T-NEG,
+ * if the payload is greater than zero.
+ * For the SBAPayload and the SBAOverhead we have the following
+ * unite quations
+ * _ _
+ * | bytes |
+ * SBAPayload = | 8000 ------ |
+ * | s |
+ * - -
+ * _ _
+ * | bytes |
+ * SBAOverhead = | ------ |
+ * | T-NEG |
+ * - -
+ *
+ * T-NEG is discribed by the equation:
+ *
+ * (-) fddiMACT-NEG
+ * T-NEG = -------------------
+ * 12500000 1/s
+ *
+ * The number of bytes we are able to send is the payload
+ * plus the overhead.
+ *
+ * bytes T-NEG SBAPayload 8000 bytes/s
+ * sync_bw = SBAOverhead ------ + -----------------------------
+ * T-NEG T-NEG
+ *
+ *
+ * 1
+ * sync_bw = SBAOverhead + ---- (-)fddiMACT-NEG * SBAPayload
+ * 1562
+ *
+ */
+
+ /*
+ * set the mib attributes fddiPATHSbaOverhead, fddiPATHSbaPayload
+ */
+/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) {
+ DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ;
+ return(FALSE) ;
+ }
+ if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) {
+ DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ;
+ return(FALSE) ;
+ } */
+
+ /* premliminary */
+ if (payload > MAX_PAYLOAD || overhead > 5000) {
+ DB_ESS("ESS: payload / overhead not accepted\n",0,0) ;
+ return(FALSE) ;
+ }
+
+ /*
+ * start the iterative allocation process if the payload or the overhead
+ * are smaller than the parsed values
+ */
+ if (smc->mib.fddiESSPayload &&
+ ((u_long)payload != smc->mib.fddiESSPayload ||
+ (u_long)overhead != smc->mib.fddiESSOverhead)) {
+ smc->ess.raf_act_timer_poll = TRUE ;
+ smc->ess.timer_count = 0 ;
+ }
+
+ /*
+ * evulate the Payload
+ */
+ if (payload) {
+ DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit on\n",0,0) ;
+ smc->ess.sync_bw_available = TRUE ;
+
+ smc->ess.sync_bw = overhead -
+ (long)smc->mib.m[MAC0].fddiMACT_Neg *
+ payload / 1562 ;
+ }
+ else {
+ DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit off\n",0,0) ;
+ smc->ess.sync_bw_available = FALSE ;
+ smc->ess.sync_bw = 0 ;
+ overhead = 0 ;
+ }
+
+ smc->mib.a[PATH0].fddiPATHSbaPayload = payload ;
+ smc->mib.a[PATH0].fddiPATHSbaOverhead = overhead ;
+
+
+ DB_ESSN(2,"tsync = %lx\n",smc->ess.sync_bw,0) ;
+
+ ess_config_fifo(smc) ;
+ set_formac_tsync(smc,smc->ess.sync_bw) ;
+ return(TRUE) ;
+}
+
+static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
+ int sba_cmd)
+{
+ struct smt_sba_chg *chg ;
+ SMbuf *mb ;
+ void *p ;
+
+ /*
+ * get and initialize the responce frame
+ */
+ if (sba_cmd == CHANGE_ALLOCATION) {
+ if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REPLY,
+ sizeof(struct smt_sba_chg))))
+ return ;
+ }
+ else {
+ if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REPLY,
+ sizeof(struct smt_sba_rep_res))))
+ return ;
+ }
+
+ chg = smtod(mb,struct smt_sba_chg *) ;
+ chg->smt.smt_tid = sm->smt_tid ;
+ chg->smt.smt_dest = sm->smt_source ;
+
+ /* set P15 */
+ chg->s_type.para.p_type = SMT_P0015 ;
+ chg->s_type.para.p_len = sizeof(struct smt_p_0015) - PARA_LEN ;
+ chg->s_type.res_type = SYNC_BW ;
+
+ /* set P16 */
+ chg->cmd.para.p_type = SMT_P0016 ;
+ chg->cmd.para.p_len = sizeof(struct smt_p_0016) - PARA_LEN ;
+ chg->cmd.sba_cmd = sba_cmd ;
+
+ /* set P320B */
+ chg->path.para.p_type = SMT_P320B ;
+ chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
+ chg->path.mib_index = SBAPATHINDEX ;
+ chg->path.path_pad = (u_short)NULL ;
+ chg->path.path_index = PRIMARY_RING ;
+
+ /* set P320F */
+ chg->payload.para.p_type = SMT_P320F ;
+ chg->payload.para.p_len = sizeof(struct smt_p_320f) - PARA_LEN ;
+ chg->payload.mib_index = SBAPATHINDEX ;
+ chg->payload.mib_payload = smc->mib.a[PATH0].fddiPATHSbaPayload ;
+
+ /* set P3210 */
+ chg->overhead.para.p_type = SMT_P3210 ;
+ chg->overhead.para.p_len = sizeof(struct smt_p_3210) - PARA_LEN ;
+ chg->overhead.mib_index = SBAPATHINDEX ;
+ chg->overhead.mib_overhead = smc->mib.a[PATH0].fddiPATHSbaOverhead ;
+
+ if (sba_cmd == CHANGE_ALLOCATION) {
+ /* set P1A */
+ chg->cat.para.p_type = SMT_P001A ;
+ chg->cat.para.p_len = sizeof(struct smt_p_001a) - PARA_LEN ;
+ p = (void *) sm_to_para(smc,sm,SMT_P001A) ;
+ chg->cat.category = ((struct smt_p_001a *)p)->category ;
+ }
+ dump_smt(smc,(struct smt_header *)chg,"RAF") ;
+ ess_send_frame(smc,mb) ;
+}
+
+void ess_timer_poll(struct s_smc *smc)
+{
+ if (!smc->ess.raf_act_timer_poll)
+ return ;
+
+ DB_ESSN(2,"ESS: timer_poll\n",0,0) ;
+
+ smc->ess.timer_count++ ;
+ if (smc->ess.timer_count == 10) {
+ smc->ess.timer_count = 0 ;
+ ess_send_alc_req(smc) ;
+ }
+}
+
+static void ess_send_alc_req(struct s_smc *smc)
+{
+ struct smt_sba_alc_req *req ;
+ SMbuf *mb ;
+
+ /*
+ * send never allocation request where the requested payload and
+ * overhead is zero or deallocate bandwidht when no bandwidth is
+ * parsed
+ */
+ if (!smc->mib.fddiESSPayload) {
+ smc->mib.fddiESSOverhead = 0 ;
+ }
+ else {
+ if (!smc->mib.fddiESSOverhead)
+ smc->mib.fddiESSOverhead = DEFAULT_OV ;
+ }
+
+ if (smc->mib.fddiESSOverhead ==
+ smc->mib.a[PATH0].fddiPATHSbaOverhead &&
+ smc->mib.fddiESSPayload ==
+ smc->mib.a[PATH0].fddiPATHSbaPayload){
+ smc->ess.raf_act_timer_poll = FALSE ;
+ smc->ess.timer_count = 7 ; /* next RAF alc req after 3 s */
+ return ;
+ }
+
+ /*
+ * get and initialize the responce frame
+ */
+ if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REQUEST,
+ sizeof(struct smt_sba_alc_req))))
+ return ;
+ req = smtod(mb,struct smt_sba_alc_req *) ;
+ req->smt.smt_tid = smc->ess.alloc_trans_id = smt_get_tid(smc) ;
+ req->smt.smt_dest = smt_sba_da ;
+
+ /* set P15 */
+ req->s_type.para.p_type = SMT_P0015 ;
+ req->s_type.para.p_len = sizeof(struct smt_p_0015) - PARA_LEN ;
+ req->s_type.res_type = SYNC_BW ;
+
+ /* set P16 */
+ req->cmd.para.p_type = SMT_P0016 ;
+ req->cmd.para.p_len = sizeof(struct smt_p_0016) - PARA_LEN ;
+ req->cmd.sba_cmd = REQUEST_ALLOCATION ;
+
+ /*
+ * set the parameter type and parameter lenght of all used
+ * parameters
+ */
+
+ /* set P320B */
+ req->path.para.p_type = SMT_P320B ;
+ req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
+ req->path.mib_index = SBAPATHINDEX ;
+ req->path.path_pad = (u_short)NULL ;
+ req->path.path_index = PRIMARY_RING ;
+
+ /* set P0017 */
+ req->pl_req.para.p_type = SMT_P0017 ;
+ req->pl_req.para.p_len = sizeof(struct smt_p_0017) - PARA_LEN ;
+ req->pl_req.sba_pl_req = smc->mib.fddiESSPayload -
+ smc->mib.a[PATH0].fddiPATHSbaPayload ;
+
+ /* set P0018 */
+ req->ov_req.para.p_type = SMT_P0018 ;
+ req->ov_req.para.p_len = sizeof(struct smt_p_0018) - PARA_LEN ;
+ req->ov_req.sba_ov_req = smc->mib.fddiESSOverhead -
+ smc->mib.a[PATH0].fddiPATHSbaOverhead ;
+
+ /* set P320F */
+ req->payload.para.p_type = SMT_P320F ;
+ req->payload.para.p_len = sizeof(struct smt_p_320f) - PARA_LEN ;
+ req->payload.mib_index = SBAPATHINDEX ;
+ req->payload.mib_payload = smc->mib.a[PATH0].fddiPATHSbaPayload ;
+
+ /* set P3210 */
+ req->overhead.para.p_type = SMT_P3210 ;
+ req->overhead.para.p_len = sizeof(struct smt_p_3210) - PARA_LEN ;
+ req->overhead.mib_index = SBAPATHINDEX ;
+ req->overhead.mib_overhead = smc->mib.a[PATH0].fddiPATHSbaOverhead ;
+
+ /* set P19 */
+ req->a_addr.para.p_type = SMT_P0019 ;
+ req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ;
+ req->a_addr.sba_pad = (u_short)NULL ;
+ req->a_addr.alloc_addr = null_addr ;
+
+ /* set P1A */
+ req->cat.para.p_type = SMT_P001A ;
+ req->cat.para.p_len = sizeof(struct smt_p_001a) - PARA_LEN ;
+ req->cat.category = smc->mib.fddiESSCategory ;
+
+ /* set P1B */
+ req->tneg.para.p_type = SMT_P001B ;
+ req->tneg.para.p_len = sizeof(struct smt_p_001b) - PARA_LEN ;
+ req->tneg.max_t_neg = smc->mib.fddiESSMaxTNeg ;
+
+ /* set P1C */
+ req->segm.para.p_type = SMT_P001C ;
+ req->segm.para.p_len = sizeof(struct smt_p_001c) - PARA_LEN ;
+ req->segm.min_seg_siz = smc->mib.fddiESSMinSegmentSize ;
+
+ dump_smt(smc,(struct smt_header *)req,"RAF") ;
+ ess_send_frame(smc,mb) ;
+}
+
+static void ess_send_frame(struct s_smc *smc, SMbuf *mb)
+{
+ /*
+ * check if the frame must be send to the own ESS
+ */
+ if (smc->ess.local_sba_active) {
+ /*
+ * Send the Change Reply to the local SBA
+ */
+ DB_ESS("ESS:Send to the local SBA\n",0,0) ;
+ if (!smc->ess.sba_reply_pend)
+ smc->ess.sba_reply_pend = mb ;
+ else {
+ DB_ESS("Frame is lost - another frame was pending\n",0,0);
+ smt_free_mbuf(smc,mb) ;
+ }
+ }
+ else {
+ /*
+ * Send the SBA RAF Change Reply to the network
+ */
+ DB_ESS("ESS:Send to the network\n",0,0) ;
+ smt_send_frame(smc,mb,FC_SMT_INFO,0) ;
+ }
+}
+
+void ess_para_change(struct s_smc *smc)
+{
+ (void)process_bw_alloc(smc,(long)smc->mib.a[PATH0].fddiPATHSbaPayload,
+ (long)smc->mib.a[PATH0].fddiPATHSbaOverhead) ;
+}
+
+static void ess_config_fifo(struct s_smc *smc)
+{
+ /*
+ * if nothing to do exit
+ */
+ if (smc->mib.a[PATH0].fddiPATHSbaPayload) {
+ if (smc->hw.fp.fifo.fifo_config_mode & SYNC_TRAFFIC_ON &&
+ (smc->hw.fp.fifo.fifo_config_mode&SEND_ASYNC_AS_SYNC) ==
+ smc->mib.fddiESSSynchTxMode) {
+ return ;
+ }
+ }
+ else {
+ if (!(smc->hw.fp.fifo.fifo_config_mode & SYNC_TRAFFIC_ON)) {
+ return ;
+ }
+ }
+
+ /*
+ * split up the FIFO and reinitialize the queues
+ */
+ formac_reinit_tx(smc) ;
+}
+
+#endif /* ESS */
+
+#endif /* no SLIM_SMT */
+
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
new file mode 100644
index 000000000000..76e78442fc24
--- /dev/null
+++ b/drivers/net/skfp/fplustm.c
@@ -0,0 +1,1561 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * FORMAC+ Driver for tag mode
+ */
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/supern_2.h"
+#include "can.c"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
+#endif
+
+#ifndef UNUSED
+#ifdef lint
+#define UNUSED(x) (x) = (x)
+#else
+#define UNUSED(x)
+#endif
+#endif
+
+#define FM_ADDRX (FM_ADDET|FM_EXGPA0|FM_EXGPA1)
+#define MS2BCLK(x) ((x)*12500L)
+#define US2BCLK(x) ((x)*1250L)
+
+/*
+ * prototypes for static function
+ */
+static void build_claim_beacon(struct s_smc *smc, u_long t_request);
+static int init_mac(struct s_smc *smc, int all);
+static void rtm_init(struct s_smc *smc);
+static void smt_split_up_fifo(struct s_smc *smc);
+
+#if (!defined(NO_SMT_PANIC) || defined(DEBUG))
+static char write_mdr_warning [] = "E350 write_mdr() FM_SNPPND is set\n";
+static char cam_warning [] = "E_SMT_004: CAM still busy\n";
+#endif
+
+#define DUMMY_READ() smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
+
+#define CHECK_NPP() { unsigned k = 10000 ;\
+ while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
+ if (!k) { \
+ SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
+ } \
+ }
+
+#define CHECK_CAM() { unsigned k = 10 ;\
+ while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
+ if (!k) { \
+ SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
+ } \
+ }
+
+const struct fddi_addr fddi_broadcast = {{0xff,0xff,0xff,0xff,0xff,0xff}};
+static const struct fddi_addr null_addr = {{0,0,0,0,0,0}};
+static const struct fddi_addr dbeacon_multi = {{0x01,0x80,0xc2,0x00,0x01,0x00}};
+
+static const u_short my_said = 0xffff ; /* short address (n.u.) */
+static const u_short my_sagp = 0xffff ; /* short group address (n.u.) */
+
+/*
+ * define my address
+ */
+#ifdef USE_CAN_ADDR
+#define MA smc->hw.fddi_canon_addr
+#else
+#define MA smc->hw.fddi_home_addr
+#endif
+
+
+/*
+ * useful interrupt bits
+ */
+static int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ;
+static int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0|
+ FM_STBURS | FM_STBURA0 ;
+
+ /* delete FM_SRBFL after tests */
+static int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL |
+ FM_SMYCLM ;
+static int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR |
+ FM_SERRCTR | FM_SLSTCTR |
+ FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ;
+
+static int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ;
+static int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ;
+
+static int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC |
+ FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ;
+
+
+static u_long mac_get_tneg(struct s_smc *smc)
+{
+ u_long tneg ;
+
+ tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ;
+ return((u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) |
+ 0xffe00000L)) ;
+}
+
+void mac_update_counter(struct s_smc *smc)
+{
+ smc->mib.m[MAC0].fddiMACFrame_Ct =
+ (smc->mib.m[MAC0].fddiMACFrame_Ct & 0xffff0000L)
+ + (u_short) inpw(FM_A(FM_FCNTR)) ;
+ smc->mib.m[MAC0].fddiMACLost_Ct =
+ (smc->mib.m[MAC0].fddiMACLost_Ct & 0xffff0000L)
+ + (u_short) inpw(FM_A(FM_LCNTR)) ;
+ smc->mib.m[MAC0].fddiMACError_Ct =
+ (smc->mib.m[MAC0].fddiMACError_Ct & 0xffff0000L)
+ + (u_short) inpw(FM_A(FM_ECNTR)) ;
+ smc->mib.m[MAC0].fddiMACT_Neg = mac_get_tneg(smc) ;
+#ifdef SMT_REAL_TOKEN_CT
+ /*
+ * If the token counter is emulated it is updated in smt_event.
+ */
+ TBD
+#else
+ smt_emulate_token_ct( smc, MAC0 );
+#endif
+}
+
+/*
+ * write long value into buffer memory over memory data register (MDR),
+ */
+static void write_mdr(struct s_smc *smc, u_long val)
+{
+ CHECK_NPP() ;
+ MDRW(val) ;
+}
+
+#if 0
+/*
+ * read long value from buffer memory over memory data register (MDR),
+ */
+static u_long read_mdr(struct s_smc *smc, unsigned int addr)
+{
+ long p ;
+ CHECK_NPP() ;
+ MARR(addr) ;
+ outpw(FM_A(FM_CMDREG1),FM_IRMEMWO) ;
+ CHECK_NPP() ; /* needed for PCI to prevent from timeing violations */
+/* p = MDRR() ; */ /* bad read values if the workaround */
+ /* smc->hw.mc_dummy = *((short volatile far *)(addr)))*/
+ /* is used */
+ p = (u_long)inpw(FM_A(FM_MDRU))<<16 ;
+ p += (u_long)inpw(FM_A(FM_MDRL)) ;
+ return(p) ;
+}
+#endif
+
+/*
+ * clear buffer memory
+ */
+static void init_ram(struct s_smc *smc)
+{
+ u_short i ;
+
+ smc->hw.fp.fifo.rbc_ram_start = 0 ;
+ smc->hw.fp.fifo.rbc_ram_end =
+ smc->hw.fp.fifo.rbc_ram_start + RBC_MEM_SIZE ;
+ CHECK_NPP() ;
+ MARW(smc->hw.fp.fifo.rbc_ram_start) ;
+ for (i = smc->hw.fp.fifo.rbc_ram_start;
+ i < (u_short) (smc->hw.fp.fifo.rbc_ram_end-1); i++)
+ write_mdr(smc,0L) ;
+ /* Erase the last byte too */
+ write_mdr(smc,0L) ;
+}
+
+/*
+ * set receive FIFO pointer
+ */
+static void set_recvptr(struct s_smc *smc)
+{
+ /*
+ * initialize the pointer for receive queue 1
+ */
+ outpw(FM_A(FM_RPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* RPR1 */
+ outpw(FM_A(FM_SWPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* SWPR1 */
+ outpw(FM_A(FM_WPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* WPR1 */
+ outpw(FM_A(FM_EARV1),smc->hw.fp.fifo.tx_s_start-1) ; /* EARV1 */
+
+ /*
+ * initialize the pointer for receive queue 2
+ */
+ if (smc->hw.fp.fifo.rx2_fifo_size) {
+ outpw(FM_A(FM_RPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
+ outpw(FM_A(FM_SWPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
+ outpw(FM_A(FM_WPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
+ outpw(FM_A(FM_EARV2),smc->hw.fp.fifo.rbc_ram_end-1) ;
+ }
+ else {
+ outpw(FM_A(FM_RPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
+ outpw(FM_A(FM_SWPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
+ outpw(FM_A(FM_WPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
+ outpw(FM_A(FM_EARV2),smc->hw.fp.fifo.rbc_ram_end-1) ;
+ }
+}
+
+/*
+ * set transmit FIFO pointer
+ */
+static void set_txptr(struct s_smc *smc)
+{
+ outpw(FM_A(FM_CMDREG2),FM_IRSTQ) ; /* reset transmit queues */
+
+ /*
+ * initialize the pointer for asynchronous transmit queue
+ */
+ outpw(FM_A(FM_RPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* RPXA0 */
+ outpw(FM_A(FM_SWPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* SWPXA0 */
+ outpw(FM_A(FM_WPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* WPXA0 */
+ outpw(FM_A(FM_EAA0),smc->hw.fp.fifo.rx2_fifo_start-1) ; /* EAA0 */
+
+ /*
+ * initialize the pointer for synchronous transmit queue
+ */
+ if (smc->hw.fp.fifo.tx_s_size) {
+ outpw(FM_A(FM_RPXS),smc->hw.fp.fifo.tx_s_start) ;
+ outpw(FM_A(FM_SWPXS),smc->hw.fp.fifo.tx_s_start) ;
+ outpw(FM_A(FM_WPXS),smc->hw.fp.fifo.tx_s_start) ;
+ outpw(FM_A(FM_EAS),smc->hw.fp.fifo.tx_a0_start-1) ;
+ }
+ else {
+ outpw(FM_A(FM_RPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
+ outpw(FM_A(FM_SWPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
+ outpw(FM_A(FM_WPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
+ outpw(FM_A(FM_EAS),smc->hw.fp.fifo.tx_a0_start-1) ;
+ }
+}
+
+/*
+ * init memory buffer management registers
+ */
+static void init_rbc(struct s_smc *smc)
+{
+ u_short rbc_ram_addr ;
+
+ /*
+ * set unused pointers or permanent pointers
+ */
+ rbc_ram_addr = smc->hw.fp.fifo.rx2_fifo_start - 1 ;
+
+ outpw(FM_A(FM_RPXA1),rbc_ram_addr) ; /* a1-send pointer */
+ outpw(FM_A(FM_WPXA1),rbc_ram_addr) ;
+ outpw(FM_A(FM_SWPXA1),rbc_ram_addr) ;
+ outpw(FM_A(FM_EAA1),rbc_ram_addr) ;
+
+ set_recvptr(smc) ;
+ set_txptr(smc) ;
+}
+
+/*
+ * init rx pointer
+ */
+static void init_rx(struct s_smc *smc)
+{
+ struct s_smt_rx_queue *queue ;
+
+ /*
+ * init all tx data structures for receive queue 1
+ */
+ smc->hw.fp.rx[QUEUE_R1] = queue = &smc->hw.fp.rx_q[QUEUE_R1] ;
+ queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R1_CSR) ;
+ queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R1_DA) ;
+
+ /*
+ * init all tx data structures for receive queue 2
+ */
+ smc->hw.fp.rx[QUEUE_R2] = queue = &smc->hw.fp.rx_q[QUEUE_R2] ;
+ queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R2_CSR) ;
+ queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R2_DA) ;
+}
+
+/*
+ * set the TSYNC register of the FORMAC to regulate synchronous transmission
+ */
+void set_formac_tsync(struct s_smc *smc, long sync_bw)
+{
+ outpw(FM_A(FM_TSYNC),(unsigned int) (((-sync_bw) >> 5) & 0xffff) ) ;
+}
+
+/*
+ * init all tx data structures
+ */
+static void init_tx(struct s_smc *smc)
+{
+ struct s_smt_tx_queue *queue ;
+
+ /*
+ * init all tx data structures for the synchronous queue
+ */
+ smc->hw.fp.tx[QUEUE_S] = queue = &smc->hw.fp.tx_q[QUEUE_S] ;
+ queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XS_CSR) ;
+ queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XS_DA) ;
+
+#ifdef ESS
+ set_formac_tsync(smc,smc->ess.sync_bw) ;
+#endif
+
+ /*
+ * init all tx data structures for the asynchronous queue 0
+ */
+ smc->hw.fp.tx[QUEUE_A0] = queue = &smc->hw.fp.tx_q[QUEUE_A0] ;
+ queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XA_CSR) ;
+ queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XA_DA) ;
+
+
+ llc_recover_tx(smc) ;
+}
+
+static void mac_counter_init(struct s_smc *smc)
+{
+ int i ;
+ u_long *ec ;
+
+ /*
+ * clear FORMAC+ frame-, lost- and error counter
+ */
+ outpw(FM_A(FM_FCNTR),0) ;
+ outpw(FM_A(FM_LCNTR),0) ;
+ outpw(FM_A(FM_ECNTR),0) ;
+ /*
+ * clear internal error counter stucture
+ */
+ ec = (u_long *)&smc->hw.fp.err_stats ;
+ for (i = (sizeof(struct err_st)/sizeof(long)) ; i ; i--)
+ *ec++ = 0L ;
+ smc->mib.m[MAC0].fddiMACRingOp_Ct = 0 ;
+}
+
+/*
+ * set FORMAC address, and t_request
+ */
+static void set_formac_addr(struct s_smc *smc)
+{
+ long t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
+
+ outpw(FM_A(FM_SAID),my_said) ; /* set short address */
+ outpw(FM_A(FM_LAIL),(unsigned)((smc->hw.fddi_home_addr.a[4]<<8) +
+ smc->hw.fddi_home_addr.a[5])) ;
+ outpw(FM_A(FM_LAIC),(unsigned)((smc->hw.fddi_home_addr.a[2]<<8) +
+ smc->hw.fddi_home_addr.a[3])) ;
+ outpw(FM_A(FM_LAIM),(unsigned)((smc->hw.fddi_home_addr.a[0]<<8) +
+ smc->hw.fddi_home_addr.a[1])) ;
+
+ outpw(FM_A(FM_SAGP),my_sagp) ; /* set short group address */
+
+ outpw(FM_A(FM_LAGL),(unsigned)((smc->hw.fp.group_addr.a[4]<<8) +
+ smc->hw.fp.group_addr.a[5])) ;
+ outpw(FM_A(FM_LAGC),(unsigned)((smc->hw.fp.group_addr.a[2]<<8) +
+ smc->hw.fp.group_addr.a[3])) ;
+ outpw(FM_A(FM_LAGM),(unsigned)((smc->hw.fp.group_addr.a[0]<<8) +
+ smc->hw.fp.group_addr.a[1])) ;
+
+ /* set r_request regs. (MSW & LSW of TRT ) */
+ outpw(FM_A(FM_TREQ1),(unsigned)(t_requ>>16)) ;
+ outpw(FM_A(FM_TREQ0),(unsigned)t_requ) ;
+}
+
+static void set_int(char *p, int l)
+{
+ p[0] = (char)(l >> 24) ;
+ p[1] = (char)(l >> 16) ;
+ p[2] = (char)(l >> 8) ;
+ p[3] = (char)(l >> 0) ;
+}
+
+/*
+ * copy TX descriptor to buffer mem
+ * append FC field and MAC frame
+ * if more bit is set in descr
+ * append pointer to descriptor (endless loop)
+ * else
+ * append 'end of chain' pointer
+ */
+static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
+ unsigned off, int len)
+/* u_long td; transmit descriptor */
+/* struct fddi_mac *mac; mac frame pointer */
+/* unsigned off; start address within buffer memory */
+/* int len ; lenght of the frame including the FC */
+{
+ int i ;
+ u_int *p ;
+
+ CHECK_NPP() ;
+ MARW(off) ; /* set memory address reg for writes */
+
+ p = (u_int *) mac ;
+ for (i = (len + 3)/4 ; i ; i--) {
+ if (i == 1) {
+ /* last word, set the tag bit */
+ outpw(FM_A(FM_CMDREG2),FM_ISTTB) ;
+ }
+ write_mdr(smc,MDR_REVERSE(*p)) ;
+ p++ ;
+ }
+
+ outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
+ write_mdr(smc,td) ; /* write over memory data reg to buffer */
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(module;tests;3)
+ How to test directed beacon frames
+ ----------------------------------------------------------------
+
+ o Insert a break point in the function build_claim_beacon()
+ before calling copy_tx_mac() for building the claim frame.
+ o Modify the RM3_DETECT case so that the RM6_DETECT state
+ will always entered from the RM3_DETECT state (function rmt_fsm(),
+ rmt.c)
+ o Compile the driver.
+ o Set the parameter TREQ in the protocol.ini or net.cfg to a
+ small value to make sure your station will win the claim
+ process.
+ o Start the driver.
+ o When you reach the break point, modify the SA and DA address
+ of the claim frame (e.g. SA = DA = 10005affffff).
+ o When you see RM3_DETECT and RM6_DETECT, observe the direct
+ beacon frames on the UPPSLANA.
+
+ END_MANUAL_ENTRY
+ */
+static void directed_beacon(struct s_smc *smc)
+{
+ SK_LOC_DECL(u_int,a[2]) ;
+
+ /*
+ * set UNA in frame
+ * enable FORMAC to send endless queue of directed beacon
+ * important: the UNA starts at byte 1 (not at byte 0)
+ */
+ * (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
+ a[1] = 0 ;
+ memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+
+ CHECK_NPP() ;
+ /* set memory address reg for writes */
+ MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ;
+ write_mdr(smc,MDR_REVERSE(a[0])) ;
+ outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
+ write_mdr(smc,MDR_REVERSE(a[1])) ;
+
+ outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ;
+}
+
+/*
+ setup claim & beacon pointer
+ NOTE :
+ special frame packets end with a pointer to their own
+ descriptor, and the MORE bit is set in the descriptor
+*/
+static void build_claim_beacon(struct s_smc *smc, u_long t_request)
+{
+ u_int td ;
+ int len ;
+ struct fddi_mac_sf *mac ;
+
+ /*
+ * build claim packet
+ */
+ len = 17 ;
+ td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
+ mac = &smc->hw.fp.mac_sfb ;
+ mac->mac_fc = FC_CLAIM ;
+ /* DA == SA in claim frame */
+ mac->mac_source = mac->mac_dest = MA ;
+ /* 2's complement */
+ set_int((char *)mac->mac_info,(int)t_request) ;
+
+ copy_tx_mac(smc,td,(struct fddi_mac *)mac,
+ smc->hw.fp.fifo.rbc_ram_start + CLAIM_FRAME_OFF,len) ;
+ /* set CLAIM start pointer */
+ outpw(FM_A(FM_SACL),smc->hw.fp.fifo.rbc_ram_start + CLAIM_FRAME_OFF) ;
+
+ /*
+ * build beacon packet
+ */
+ len = 17 ;
+ td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
+ mac->mac_fc = FC_BEACON ;
+ mac->mac_source = MA ;
+ mac->mac_dest = null_addr ; /* DA == 0 in beacon frame */
+ set_int((char *) mac->mac_info,((int)BEACON_INFO<<24) + 0 ) ;
+
+ copy_tx_mac(smc,td,(struct fddi_mac *)mac,
+ smc->hw.fp.fifo.rbc_ram_start + BEACON_FRAME_OFF,len) ;
+ /* set beacon start pointer */
+ outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + BEACON_FRAME_OFF) ;
+
+ /*
+ * build directed beacon packet
+ * contains optional UNA
+ */
+ len = 23 ;
+ td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
+ mac->mac_fc = FC_BEACON ;
+ mac->mac_source = MA ;
+ mac->mac_dest = dbeacon_multi ; /* multicast */
+ set_int((char *) mac->mac_info,((int)DBEACON_INFO<<24) + 0 ) ;
+ set_int((char *) mac->mac_info+4,0) ;
+ set_int((char *) mac->mac_info+8,0) ;
+
+ copy_tx_mac(smc,td,(struct fddi_mac *)mac,
+ smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF,len) ;
+
+ /* end of claim/beacon queue */
+ outpw(FM_A(FM_EACB),smc->hw.fp.fifo.rx1_fifo_start-1) ;
+
+ outpw(FM_A(FM_WPXSF),0) ;
+ outpw(FM_A(FM_RPXSF),0) ;
+}
+
+static void formac_rcv_restart(struct s_smc *smc)
+{
+ /* enable receive function */
+ SETMASK(FM_A(FM_MDREG1),smc->hw.fp.rx_mode,FM_ADDRX) ;
+
+ outpw(FM_A(FM_CMDREG1),FM_ICLLR) ; /* clear receive lock */
+}
+
+void formac_tx_restart(struct s_smc *smc)
+{
+ outpw(FM_A(FM_CMDREG1),FM_ICLLS) ; /* clear s-frame lock */
+ outpw(FM_A(FM_CMDREG1),FM_ICLLA0) ; /* clear a-frame lock */
+}
+
+static void enable_formac(struct s_smc *smc)
+{
+ /* set formac IMSK : 0 enables irq */
+ outpw(FM_A(FM_IMSK1U),~mac_imsk1u) ;
+ outpw(FM_A(FM_IMSK1L),~mac_imsk1l) ;
+ outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ;
+ outpw(FM_A(FM_IMSK2L),~mac_imsk2l) ;
+ outpw(FM_A(FM_IMSK3U),~mac_imsk3u) ;
+ outpw(FM_A(FM_IMSK3L),~mac_imsk3l) ;
+}
+
+#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */
+ /* The FORMACs tx complete IRQ should be used any longer */
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;4)
+
+ void enable_tx_irq(smc, queue)
+ struct s_smc *smc ;
+ u_short queue ;
+
+Function DOWNCALL (SMT, fplustm.c)
+ enable_tx_irq() enables the FORMACs transmit complete
+ interrupt of the queue.
+
+Para queue = QUEUE_S: synchronous queue
+ = QUEUE_A0: asynchronous queue
+
+Note After any ring operational change the transmit complete
+ interrupts are disabled.
+ The operating system dependent module must enable
+ the transmit complete interrupt of a queue,
+ - when it queues the first frame,
+ because of no transmit resources are beeing
+ available and
+ - when it escapes from the function llc_restart_tx
+ while some frames are still queued.
+
+ END_MANUAL_ENTRY
+ */
+void enable_tx_irq(struct s_smc *smc, u_short queue)
+/* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
+{
+ u_short imask ;
+
+ imask = ~(inpw(FM_A(FM_IMSK1U))) ;
+
+ if (queue == 0) {
+ outpw(FM_A(FM_IMSK1U),~(imask|FM_STEFRMS)) ;
+ }
+ if (queue == 1) {
+ outpw(FM_A(FM_IMSK1U),~(imask|FM_STEFRMA0)) ;
+ }
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;4)
+
+ void disable_tx_irq(smc, queue)
+ struct s_smc *smc ;
+ u_short queue ;
+
+Function DOWNCALL (SMT, fplustm.c)
+ disable_tx_irq disables the FORMACs transmit complete
+ interrupt of the queue
+
+Para queue = QUEUE_S: synchronous queue
+ = QUEUE_A0: asynchronous queue
+
+Note The operating system dependent module should disable
+ the transmit complete interrupts if it escapes from the
+ function llc_restart_tx and no frames are queued.
+
+ END_MANUAL_ENTRY
+ */
+void disable_tx_irq(struct s_smc *smc, u_short queue)
+/* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
+{
+ u_short imask ;
+
+ imask = ~(inpw(FM_A(FM_IMSK1U))) ;
+
+ if (queue == 0) {
+ outpw(FM_A(FM_IMSK1U),~(imask&~FM_STEFRMS)) ;
+ }
+ if (queue == 1) {
+ outpw(FM_A(FM_IMSK1U),~(imask&~FM_STEFRMA0)) ;
+ }
+}
+#endif
+
+static void disable_formac(struct s_smc *smc)
+{
+ /* clear formac IMSK : 1 disables irq */
+ outpw(FM_A(FM_IMSK1U),MW) ;
+ outpw(FM_A(FM_IMSK1L),MW) ;
+ outpw(FM_A(FM_IMSK2U),MW) ;
+ outpw(FM_A(FM_IMSK2L),MW) ;
+ outpw(FM_A(FM_IMSK3U),MW) ;
+ outpw(FM_A(FM_IMSK3L),MW) ;
+}
+
+
+static void mac_ring_up(struct s_smc *smc, int up)
+{
+ if (up) {
+ formac_rcv_restart(smc) ; /* enable receive function */
+ smc->hw.mac_ring_is_up = TRUE ;
+ llc_restart_tx(smc) ; /* TX queue */
+ }
+ else {
+ /* disable receive function */
+ SETMASK(FM_A(FM_MDREG1),FM_MDISRCV,FM_ADDET) ;
+
+ /* abort current transmit activity */
+ outpw(FM_A(FM_CMDREG2),FM_IACTR) ;
+
+ smc->hw.mac_ring_is_up = FALSE ;
+ }
+}
+
+/*--------------------------- ISR handling ----------------------------------*/
+/*
+ * mac1_irq is in drvfbi.c
+ */
+
+/*
+ * mac2_irq: status bits for the receive queue 1, and ring status
+ * ring status indication bits
+ */
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l)
+{
+ u_short change_s2l ;
+ u_short change_s2u ;
+
+ /* (jd) 22-Feb-1999
+ * Restart 2_DMax Timer after end of claiming or beaconing
+ */
+ if (code_s2u & (FM_SCLM|FM_SHICLM|FM_SBEC|FM_SOTRBEC)) {
+ queue_event(smc,EVENT_RMT,RM_TX_STATE_CHANGE) ;
+ }
+ else if (code_s2l & (FM_STKISS)) {
+ queue_event(smc,EVENT_RMT,RM_TX_STATE_CHANGE) ;
+ }
+
+ /*
+ * XOR current st bits with the last to avoid useless RMT event queuing
+ */
+ change_s2l = smc->hw.fp.s2l ^ code_s2l ;
+ change_s2u = smc->hw.fp.s2u ^ code_s2u ;
+
+ if ((change_s2l & FM_SRNGOP) ||
+ (!smc->hw.mac_ring_is_up && ((code_s2l & FM_SRNGOP)))) {
+ if (code_s2l & FM_SRNGOP) {
+ mac_ring_up(smc,1) ;
+ queue_event(smc,EVENT_RMT,RM_RING_OP) ;
+ smc->mib.m[MAC0].fddiMACRingOp_Ct++ ;
+ }
+ else {
+ mac_ring_up(smc,0) ;
+ queue_event(smc,EVENT_RMT,RM_RING_NON_OP) ;
+ }
+ goto mac2_end ;
+ }
+ if (code_s2l & FM_SMISFRM) { /* missed frame */
+ smc->mib.m[MAC0].fddiMACNotCopied_Ct++ ;
+ }
+ if (code_s2u & (FM_SRCVOVR | /* recv. FIFO overflow */
+ FM_SRBFL)) { /* recv. buffer full */
+ smc->hw.mac_ct.mac_r_restart_counter++ ;
+/* formac_rcv_restart(smc) ; */
+ smt_stat_counter(smc,1) ;
+/* goto mac2_end ; */
+ }
+ if (code_s2u & FM_SOTRBEC)
+ queue_event(smc,EVENT_RMT,RM_OTHER_BEACON) ;
+ if (code_s2u & FM_SMYBEC)
+ queue_event(smc,EVENT_RMT,RM_MY_BEACON) ;
+ if (change_s2u & code_s2u & FM_SLOCLM) {
+ DB_RMTN(2,"RMT : lower claim received\n",0,0) ;
+ }
+ if ((code_s2u & FM_SMYCLM) && !(code_s2l & FM_SDUPCLM)) {
+ /*
+ * This is my claim and that claim is not detected as a
+ * duplicate one.
+ */
+ queue_event(smc,EVENT_RMT,RM_MY_CLAIM) ;
+ }
+ if (code_s2l & FM_SDUPCLM) {
+ /*
+ * If a duplicate claim frame (same SA but T_Bid != T_Req)
+ * this flag will be set.
+ * In the RMT state machine we need a RM_VALID_CLAIM event
+ * to do the appropriate state change.
+ * RM(34c)
+ */
+ queue_event(smc,EVENT_RMT,RM_VALID_CLAIM) ;
+ }
+ if (change_s2u & code_s2u & FM_SHICLM) {
+ DB_RMTN(2,"RMT : higher claim received\n",0,0) ;
+ }
+ if ( (code_s2l & FM_STRTEXP) ||
+ (code_s2l & FM_STRTEXR) )
+ queue_event(smc,EVENT_RMT,RM_TRT_EXP) ;
+ if (code_s2l & FM_SMULTDA) {
+ /*
+ * The MAC has found a 2. MAC with the same address.
+ * Signal dup_addr_test = failed to RMT state machine.
+ * RM(25)
+ */
+ smc->r.dup_addr_test = DA_FAILED ;
+ queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ;
+ }
+ if (code_s2u & FM_SBEC)
+ smc->hw.fp.err_stats.err_bec_stat++ ;
+ if (code_s2u & FM_SCLM)
+ smc->hw.fp.err_stats.err_clm_stat++ ;
+ if (code_s2l & FM_STVXEXP)
+ smc->mib.m[MAC0].fddiMACTvxExpired_Ct++ ;
+ if ((code_s2u & (FM_SBEC|FM_SCLM))) {
+ if (!(change_s2l & FM_SRNGOP) && (smc->hw.fp.s2l & FM_SRNGOP)) {
+ mac_ring_up(smc,0) ;
+ queue_event(smc,EVENT_RMT,RM_RING_NON_OP) ;
+
+ mac_ring_up(smc,1) ;
+ queue_event(smc,EVENT_RMT,RM_RING_OP) ;
+ smc->mib.m[MAC0].fddiMACRingOp_Ct++ ;
+ }
+ }
+ if (code_s2l & FM_SPHINV)
+ smc->hw.fp.err_stats.err_phinv++ ;
+ if (code_s2l & FM_SSIFG)
+ smc->hw.fp.err_stats.err_sifg_det++ ;
+ if (code_s2l & FM_STKISS)
+ smc->hw.fp.err_stats.err_tkiss++ ;
+ if (code_s2l & FM_STKERR)
+ smc->hw.fp.err_stats.err_tkerr++ ;
+ if (code_s2l & FM_SFRMCTR)
+ smc->mib.m[MAC0].fddiMACFrame_Ct += 0x10000L ;
+ if (code_s2l & FM_SERRCTR)
+ smc->mib.m[MAC0].fddiMACError_Ct += 0x10000L ;
+ if (code_s2l & FM_SLSTCTR)
+ smc->mib.m[MAC0].fddiMACLost_Ct += 0x10000L ;
+ if (code_s2u & FM_SERRSF) {
+ SMT_PANIC(smc,SMT_E0114, SMT_E0114_MSG) ;
+ }
+mac2_end:
+ /* notice old status */
+ smc->hw.fp.s2l = code_s2l ;
+ smc->hw.fp.s2u = code_s2u ;
+ outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ;
+}
+
+/*
+ * mac3_irq: receive queue 2 bits and address detection bits
+ */
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l)
+{
+ UNUSED(code_s3l) ;
+
+ if (code_s3u & (FM_SRCVOVR2 | /* recv. FIFO overflow */
+ FM_SRBFL2)) { /* recv. buffer full */
+ smc->hw.mac_ct.mac_r_restart_counter++ ;
+ smt_stat_counter(smc,1);
+ }
+
+
+ if (code_s3u & FM_SRPERRQ2) { /* parity error receive queue 2 */
+ SMT_PANIC(smc,SMT_E0115, SMT_E0115_MSG) ;
+ }
+ if (code_s3u & FM_SRPERRQ1) { /* parity error receive queue 2 */
+ SMT_PANIC(smc,SMT_E0116, SMT_E0116_MSG) ;
+ }
+}
+
+
+/*
+ * take formac offline
+ */
+static void formac_offline(struct s_smc *smc)
+{
+ outpw(FM_A(FM_CMDREG2),FM_IACTR) ;/* abort current transmit activity */
+
+ /* disable receive function */
+ SETMASK(FM_A(FM_MDREG1),FM_MDISRCV,FM_ADDET) ;
+
+ /* FORMAC+ 'Initialize Mode' */
+ SETMASK(FM_A(FM_MDREG1),FM_MINIT,FM_MMODE) ;
+
+ disable_formac(smc) ;
+ smc->hw.mac_ring_is_up = FALSE ;
+ smc->hw.hw_state = STOPPED ;
+}
+
+/*
+ * bring formac online
+ */
+static void formac_online(struct s_smc *smc)
+{
+ enable_formac(smc) ;
+ SETMASK(FM_A(FM_MDREG1),FM_MONLINE | FM_SELRA | MDR1INIT |
+ smc->hw.fp.rx_mode, FM_MMODE | FM_SELRA | FM_ADDRX) ;
+}
+
+/*
+ * FORMAC+ full init. (tx, rx, timer, counter, claim & beacon)
+ */
+int init_fplus(struct s_smc *smc)
+{
+ smc->hw.fp.nsa_mode = FM_MRNNSAFNMA ;
+ smc->hw.fp.rx_mode = FM_MDAMA ;
+ smc->hw.fp.group_addr = fddi_broadcast ;
+ smc->hw.fp.func_addr = 0 ;
+ smc->hw.fp.frselreg_init = 0 ;
+
+ init_driver_fplus(smc) ;
+ if (smc->s.sas == SMT_DAS)
+ smc->hw.fp.mdr3init |= FM_MENDAS ;
+
+ smc->hw.mac_ct.mac_nobuf_counter = 0 ;
+ smc->hw.mac_ct.mac_r_restart_counter = 0 ;
+
+ smc->hw.fp.fm_st1u = (HW_PTR) ADDR(B0_ST1U) ;
+ smc->hw.fp.fm_st1l = (HW_PTR) ADDR(B0_ST1L) ;
+ smc->hw.fp.fm_st2u = (HW_PTR) ADDR(B0_ST2U) ;
+ smc->hw.fp.fm_st2l = (HW_PTR) ADDR(B0_ST2L) ;
+ smc->hw.fp.fm_st3u = (HW_PTR) ADDR(B0_ST3U) ;
+ smc->hw.fp.fm_st3l = (HW_PTR) ADDR(B0_ST3L) ;
+
+ smc->hw.fp.s2l = smc->hw.fp.s2u = 0 ;
+ smc->hw.mac_ring_is_up = 0 ;
+
+ mac_counter_init(smc) ;
+
+ /* convert BCKL units to symbol time */
+ smc->hw.mac_pa.t_neg = (u_long)0 ;
+ smc->hw.mac_pa.t_pri = (u_long)0 ;
+
+ /* make sure all PCI settings are correct */
+ mac_do_pci_fix(smc) ;
+
+ return(init_mac(smc,1)) ;
+ /* enable_formac(smc) ; */
+}
+
+static int init_mac(struct s_smc *smc, int all)
+{
+ u_short t_max,x ;
+ u_long time=0 ;
+
+ /*
+ * clear memory
+ */
+ outpw(FM_A(FM_MDREG1),FM_MINIT) ; /* FORMAC+ init mode */
+ set_formac_addr(smc) ;
+ outpw(FM_A(FM_MDREG1),FM_MMEMACT) ; /* FORMAC+ memory activ mode */
+ /* Note: Mode register 2 is set here, incase parity is enabled. */
+ outpw(FM_A(FM_MDREG2),smc->hw.fp.mdr2init) ;
+
+ if (all) {
+ init_ram(smc) ;
+ }
+ else {
+ /*
+ * reset the HPI, the Master and the BMUs
+ */
+ outp(ADDR(B0_CTRL), CTRL_HPI_SET) ;
+ time = hwt_quick_read(smc) ;
+ }
+
+ /*
+ * set all pointers, frames etc
+ */
+ smt_split_up_fifo(smc) ;
+
+ init_tx(smc) ;
+ init_rx(smc) ;
+ init_rbc(smc) ;
+
+ build_claim_beacon(smc,smc->mib.m[MAC0].fddiMACT_Req) ;
+
+ /* set RX threshold */
+ /* see Errata #SN2 Phantom receive overflow */
+ outpw(FM_A(FM_FRMTHR),14<<12) ; /* switch on */
+
+ /* set formac work mode */
+ outpw(FM_A(FM_MDREG1),MDR1INIT | FM_SELRA | smc->hw.fp.rx_mode) ;
+ outpw(FM_A(FM_MDREG2),smc->hw.fp.mdr2init) ;
+ outpw(FM_A(FM_MDREG3),smc->hw.fp.mdr3init) ;
+ outpw(FM_A(FM_FRSELREG),smc->hw.fp.frselreg_init) ;
+
+ /* set timer */
+ /*
+ * errata #22 fplus:
+ * T_MAX must not be FFFE
+ * or one of FFDF, FFB8, FF91 (-0x27 etc..)
+ */
+ t_max = (u_short)(smc->mib.m[MAC0].fddiMACT_Max/32) ;
+ x = t_max/0x27 ;
+ x *= 0x27 ;
+ if ((t_max == 0xfffe) || (t_max - x == 0x16))
+ t_max-- ;
+ outpw(FM_A(FM_TMAX),(u_short)t_max) ;
+
+ /* BugFix for report #10204 */
+ if (smc->mib.m[MAC0].fddiMACTvxValue < (u_long) (- US2BCLK(52))) {
+ outpw(FM_A(FM_TVX), (u_short) (- US2BCLK(52))/255 & MB) ;
+ } else {
+ outpw(FM_A(FM_TVX),
+ (u_short)((smc->mib.m[MAC0].fddiMACTvxValue/255) & MB)) ;
+ }
+
+ outpw(FM_A(FM_CMDREG1),FM_ICLLS) ; /* clear s-frame lock */
+ outpw(FM_A(FM_CMDREG1),FM_ICLLA0) ; /* clear a-frame lock */
+ outpw(FM_A(FM_CMDREG1),FM_ICLLR); /* clear receive lock */
+
+ /* Auto unlock receice threshold for receive queue 1 and 2 */
+ outpw(FM_A(FM_UNLCKDLY),(0xff|(0xff<<8))) ;
+
+ rtm_init(smc) ; /* RT-Monitor */
+
+ if (!all) {
+ /*
+ * after 10ms, reset the BMUs and repair the rings
+ */
+ hwt_wait_time(smc,time,MS2BCLK(10)) ;
+ outpd(ADDR(B0_R1_CSR),CSR_SET_RESET) ;
+ outpd(ADDR(B0_XA_CSR),CSR_SET_RESET) ;
+ outpd(ADDR(B0_XS_CSR),CSR_SET_RESET) ;
+ outp(ADDR(B0_CTRL), CTRL_HPI_CLR) ;
+ outpd(ADDR(B0_R1_CSR),CSR_CLR_RESET) ;
+ outpd(ADDR(B0_XA_CSR),CSR_CLR_RESET) ;
+ outpd(ADDR(B0_XS_CSR),CSR_CLR_RESET) ;
+ if (!smc->hw.hw_is_64bit) {
+ outpd(ADDR(B4_R1_F), RX_WATERMARK) ;
+ outpd(ADDR(B5_XA_F), TX_WATERMARK) ;
+ outpd(ADDR(B5_XS_F), TX_WATERMARK) ;
+ }
+ smc->hw.hw_state = STOPPED ;
+ mac_drv_repair_descr(smc) ;
+ }
+ smc->hw.hw_state = STARTED ;
+
+ return(0) ;
+}
+
+
+/*
+ * called by CFM
+ */
+void config_mux(struct s_smc *smc, int mux)
+{
+ plc_config_mux(smc,mux) ;
+
+ SETMASK(FM_A(FM_MDREG1),FM_SELRA,FM_SELRA) ;
+}
+
+/*
+ * called by RMT
+ * enable CLAIM/BEACON interrupts
+ * (only called if these events are of interest, e.g. in DETECT state
+ * the interrupt must not be permanently enabled
+ * RMT calls this function periodically (timer driven polling)
+ */
+void sm_mac_check_beacon_claim(struct s_smc *smc)
+{
+ /* set formac IMSK : 0 enables irq */
+ outpw(FM_A(FM_IMSK2U),~(mac_imsk2u | mac_beacon_imsk2u)) ;
+ /* the driver must receive the directed beacons */
+ formac_rcv_restart(smc) ;
+ process_receive(smc) ;
+}
+
+/*-------------------------- interface functions ----------------------------*/
+/*
+ * control MAC layer (called by RMT)
+ */
+void sm_ma_control(struct s_smc *smc, int mode)
+{
+ switch(mode) {
+ case MA_OFFLINE :
+ /* Add to make the MAC offline in RM0_ISOLATED state */
+ formac_offline(smc) ;
+ break ;
+ case MA_RESET :
+ (void)init_mac(smc,0) ;
+ break ;
+ case MA_BEACON :
+ formac_online(smc) ;
+ break ;
+ case MA_DIRECTED :
+ directed_beacon(smc) ;
+ break ;
+ case MA_TREQ :
+ /*
+ * no actions necessary, TREQ is already set
+ */
+ break ;
+ }
+}
+
+int sm_mac_get_tx_state(struct s_smc *smc)
+{
+ return((inpw(FM_A(FM_STMCHN))>>4)&7) ;
+}
+
+/*
+ * multicast functions
+ */
+
+static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
+ struct fddi_addr *user,
+ struct fddi_addr *own,
+ int del, int can)
+{
+ struct s_fpmc *tb ;
+ struct s_fpmc *slot ;
+ u_char *p ;
+ int i ;
+
+ /*
+ * set own = can(user)
+ */
+ *own = *user ;
+ if (can) {
+ p = own->a ;
+ for (i = 0 ; i < 6 ; i++, p++)
+ *p = canonical[*p] ;
+ }
+ slot = NULL;
+ for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
+ if (!tb->n) { /* not used */
+ if (!del && !slot) /* if !del save first free */
+ slot = tb ;
+ continue ;
+ }
+ if (memcmp((char *)&tb->a,(char *)own,6))
+ continue ;
+ return(tb) ;
+ }
+ return(slot) ; /* return first free or NULL */
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;2)
+
+ void mac_clear_multicast(smc)
+ struct s_smc *smc ;
+
+Function DOWNCALL (SMT, fplustm.c)
+ Clear all multicast entries
+
+ END_MANUAL_ENTRY()
+ */
+void mac_clear_multicast(struct s_smc *smc)
+{
+ struct s_fpmc *tb ;
+ int i ;
+
+ smc->hw.fp.os_slots_used = 0 ; /* note the SMT addresses */
+ /* will not be deleted */
+ for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
+ if (!tb->perm) {
+ tb->n = 0 ;
+ }
+ }
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;2)
+
+ int mac_set_func_addr(smc,f_addr)
+ struct s_smc *smc ;
+ u_long f_addr ;
+
+Function DOWNCALL (SMT, fplustm.c)
+ Set a Token-Ring functional address, the address will
+ be activated after calling mac_update_multicast()
+
+Para f_addr functional bits in non-canonical format
+
+Returns 0: always success
+
+ END_MANUAL_ENTRY()
+ */
+int mac_set_func_addr(struct s_smc *smc, u_long f_addr)
+{
+ smc->hw.fp.func_addr = f_addr ;
+ return(0) ;
+}
+
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;2)
+
+ int mac_add_multicast(smc,addr,can)
+ struct s_smc *smc ;
+ struct fddi_addr *addr ;
+ int can ;
+
+Function DOWNCALL (SMC, fplustm.c)
+ Add an entry to the multicast table
+
+Para addr pointer to a multicast address
+ can = 0: the multicast address has the physical format
+ = 1: the multicast address has the canonical format
+ | 0x80 permanent
+
+Returns 0: success
+ 1: address table full
+
+Note After a 'driver reset' or a 'station set address' all
+ entries of the multicast table are cleared.
+ In this case the driver has to fill the multicast table again.
+ After the operating system dependent module filled
+ the multicast table it must call mac_update_multicast
+ to activate the new multicast addresses!
+
+ END_MANUAL_ENTRY()
+ */
+int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
+{
+ SK_LOC_DECL(struct fddi_addr,own) ;
+ struct s_fpmc *tb ;
+
+ /*
+ * check if there are free table entries
+ */
+ if (can & 0x80) {
+ if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) {
+ return(1) ;
+ }
+ }
+ else {
+ if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) {
+ return(1) ;
+ }
+ }
+
+ /*
+ * find empty slot
+ */
+ if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80)))
+ return(1) ;
+ tb->n++ ;
+ tb->a = own ;
+ tb->perm = (can & 0x80) ? 1 : 0 ;
+
+ if (can & 0x80)
+ smc->hw.fp.smt_slots_used++ ;
+ else
+ smc->hw.fp.os_slots_used++ ;
+
+ return(0) ;
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;2)
+
+ void mac_del_multicast(smc,addr,can)
+ struct s_smc *smc ;
+ struct fddi_addr *addr ;
+ int can ;
+
+Function DOWNCALL (SMT, fplustm.c)
+ Delete an entry from the multicast table
+
+Para addr pointer to a multicast address
+ can = 0: the multicast address has the physical format
+ = 1: the multicast address has the canonical format
+ | 0x80 permanent
+
+ END_MANUAL_ENTRY()
+ */
+void mac_del_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
+{
+ SK_LOC_DECL(struct fddi_addr,own) ;
+ struct s_fpmc *tb ;
+
+ if (!(tb = mac_get_mc_table(smc,addr,&own,1,can & ~0x80)))
+ return ;
+ /*
+ * permanent addresses must be deleted with perm bit
+ * and vice versa
+ */
+ if (( tb->perm && (can & 0x80)) ||
+ (!tb->perm && !(can & 0x80))) {
+ /*
+ * delete it
+ */
+ if (tb->n) {
+ tb->n-- ;
+ if (tb->perm) {
+ smc->hw.fp.smt_slots_used-- ;
+ }
+ else {
+ smc->hw.fp.os_slots_used-- ;
+ }
+ }
+ }
+}
+
+/*
+ * mode
+ */
+
+#define RX_MODE_PROM 0x1
+#define RX_MODE_ALL_MULTI 0x2
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;2)
+
+ void mac_update_multicast(smc)
+ struct s_smc *smc ;
+
+Function DOWNCALL (SMT, fplustm.c)
+ Update FORMAC multicast registers
+
+ END_MANUAL_ENTRY()
+ */
+void mac_update_multicast(struct s_smc *smc)
+{
+ struct s_fpmc *tb ;
+ u_char *fu ;
+ int i ;
+
+ /*
+ * invalidate the CAM
+ */
+ outpw(FM_A(FM_AFCMD),FM_IINV_CAM) ;
+
+ /*
+ * set the functional address
+ */
+ if (smc->hw.fp.func_addr) {
+ fu = (u_char *) &smc->hw.fp.func_addr ;
+ outpw(FM_A(FM_AFMASK2),0xffff) ;
+ outpw(FM_A(FM_AFMASK1),(u_short) ~((fu[0] << 8) + fu[1])) ;
+ outpw(FM_A(FM_AFMASK0),(u_short) ~((fu[2] << 8) + fu[3])) ;
+ outpw(FM_A(FM_AFPERS),FM_VALID|FM_DA) ;
+ outpw(FM_A(FM_AFCOMP2), 0xc000) ;
+ outpw(FM_A(FM_AFCOMP1), 0x0000) ;
+ outpw(FM_A(FM_AFCOMP0), 0x0000) ;
+ outpw(FM_A(FM_AFCMD),FM_IWRITE_CAM) ;
+ }
+
+ /*
+ * set the mask and the personality register(s)
+ */
+ outpw(FM_A(FM_AFMASK0),0xffff) ;
+ outpw(FM_A(FM_AFMASK1),0xffff) ;
+ outpw(FM_A(FM_AFMASK2),0xffff) ;
+ outpw(FM_A(FM_AFPERS),FM_VALID|FM_DA) ;
+
+ for (i = 0, tb = smc->hw.fp.mc.table; i < FPMAX_MULTICAST; i++, tb++) {
+ if (tb->n) {
+ CHECK_CAM() ;
+
+ /*
+ * write the multicast address into the CAM
+ */
+ outpw(FM_A(FM_AFCOMP2),
+ (u_short)((tb->a.a[0]<<8)+tb->a.a[1])) ;
+ outpw(FM_A(FM_AFCOMP1),
+ (u_short)((tb->a.a[2]<<8)+tb->a.a[3])) ;
+ outpw(FM_A(FM_AFCOMP0),
+ (u_short)((tb->a.a[4]<<8)+tb->a.a[5])) ;
+ outpw(FM_A(FM_AFCMD),FM_IWRITE_CAM) ;
+ }
+ }
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;3)
+
+ void mac_set_rx_mode(smc,mode)
+ struct s_smc *smc ;
+ int mode ;
+
+Function DOWNCALL/INTERN (SMT, fplustm.c)
+ This function enables / disables the selected receive.
+ Don't call this function if the hardware module is
+ used -- use mac_drv_rx_mode() instead of.
+
+Para mode = 1 RX_ENABLE_ALLMULTI enable all multicasts
+ 2 RX_DISABLE_ALLMULTI disable "enable all multicasts"
+ 3 RX_ENABLE_PROMISC enable promiscous
+ 4 RX_DISABLE_PROMISC disable promiscous
+ 5 RX_ENABLE_NSA enable reception of NSA frames
+ 6 RX_DISABLE_NSA disable reception of NSA frames
+
+Note The selected receive modes will be lost after 'driver reset'
+ or 'set station address'
+
+ END_MANUAL_ENTRY
+ */
+void mac_set_rx_mode(struct s_smc *smc, int mode)
+{
+ switch (mode) {
+ case RX_ENABLE_ALLMULTI :
+ smc->hw.fp.rx_prom |= RX_MODE_ALL_MULTI ;
+ break ;
+ case RX_DISABLE_ALLMULTI :
+ smc->hw.fp.rx_prom &= ~RX_MODE_ALL_MULTI ;
+ break ;
+ case RX_ENABLE_PROMISC :
+ smc->hw.fp.rx_prom |= RX_MODE_PROM ;
+ break ;
+ case RX_DISABLE_PROMISC :
+ smc->hw.fp.rx_prom &= ~RX_MODE_PROM ;
+ break ;
+ case RX_ENABLE_NSA :
+ smc->hw.fp.nsa_mode = FM_MDAMA ;
+ smc->hw.fp.rx_mode = (smc->hw.fp.rx_mode & ~FM_ADDET) |
+ smc->hw.fp.nsa_mode ;
+ break ;
+ case RX_DISABLE_NSA :
+ smc->hw.fp.nsa_mode = FM_MRNNSAFNMA ;
+ smc->hw.fp.rx_mode = (smc->hw.fp.rx_mode & ~FM_ADDET) |
+ smc->hw.fp.nsa_mode ;
+ break ;
+ }
+ if (smc->hw.fp.rx_prom & RX_MODE_PROM) {
+ smc->hw.fp.rx_mode = FM_MLIMPROM ;
+ }
+ else if (smc->hw.fp.rx_prom & RX_MODE_ALL_MULTI) {
+ smc->hw.fp.rx_mode = smc->hw.fp.nsa_mode | FM_EXGPA0 ;
+ }
+ else
+ smc->hw.fp.rx_mode = smc->hw.fp.nsa_mode ;
+ SETMASK(FM_A(FM_MDREG1),smc->hw.fp.rx_mode,FM_ADDRX) ;
+ mac_update_multicast(smc) ;
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(module;tests;3)
+ How to test the Restricted Token Monitor
+ ----------------------------------------------------------------
+
+ o Insert a break point in the function rtm_irq()
+ o Remove all stations with a restricted token monitor from the
+ network.
+ o Connect a UPPS ISA or EISA station to the network.
+ o Give the FORMAC of UPPS station the command to send
+ restricted tokens until the ring becomes instable.
+ o Now connect your test test client.
+ o The restricted token monitor should detect the restricted token,
+ and your break point will be reached.
+ o You can ovserve how the station will clean the ring.
+
+ END_MANUAL_ENTRY
+ */
+void rtm_irq(struct s_smc *smc)
+{
+ outpw(ADDR(B2_RTM_CRTL),TIM_CL_IRQ) ; /* clear IRQ */
+ if (inpw(ADDR(B2_RTM_CRTL)) & TIM_RES_TOK) {
+ outpw(FM_A(FM_CMDREG1),FM_ICL) ; /* force claim */
+ DB_RMT("RMT: fddiPATHT_Rmode expired\n",0,0) ;
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS,
+ (u_long) FDDI_SMT_EVENT,
+ (u_long) FDDI_RTT, smt_get_event_word(smc));
+ }
+ outpw(ADDR(B2_RTM_CRTL),TIM_START) ; /* enable RTM monitoring */
+}
+
+static void rtm_init(struct s_smc *smc)
+{
+ outpd(ADDR(B2_RTM_INI),0) ; /* timer = 0 */
+ outpw(ADDR(B2_RTM_CRTL),TIM_START) ; /* enable IRQ */
+}
+
+void rtm_set_timer(struct s_smc *smc)
+{
+ /*
+ * MIB timer and hardware timer have the same resolution of 80nS
+ */
+ DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n",
+ (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
+ outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
+}
+
+static void smt_split_up_fifo(struct s_smc *smc)
+{
+
+/*
+ BEGIN_MANUAL_ENTRY(module;mem;1)
+ -------------------------------------------------------------
+ RECEIVE BUFFER MEMORY DIVERSION
+ -------------------------------------------------------------
+
+ R1_RxD == SMT_R1_RXD_COUNT
+ R2_RxD == SMT_R2_RXD_COUNT
+
+ SMT_R1_RXD_COUNT must be unequal zero
+
+ | R1_RxD R2_RxD |R1_RxD R2_RxD | R1_RxD R2_RxD
+ | x 0 | x 1-3 | x < 3
+ ----------------------------------------------------------------------
+ | 63,75 kB | 54,75 | R1_RxD
+ rx queue 1 | RX_FIFO_SPACE | RX_LARGE_FIFO| ------------- * 63,75 kB
+ | | | R1_RxD+R2_RxD
+ ----------------------------------------------------------------------
+ | | 9 kB | R2_RxD
+ rx queue 2 | 0 kB | RX_SMALL_FIFO| ------------- * 63,75 kB
+ | (not used) | | R1_RxD+R2_RxD
+
+ END_MANUAL_ENTRY
+*/
+
+ if (SMT_R1_RXD_COUNT == 0) {
+ SMT_PANIC(smc,SMT_E0117, SMT_E0117_MSG) ;
+ }
+
+ switch(SMT_R2_RXD_COUNT) {
+ case 0:
+ smc->hw.fp.fifo.rx1_fifo_size = RX_FIFO_SPACE ;
+ smc->hw.fp.fifo.rx2_fifo_size = 0 ;
+ break ;
+ case 1:
+ case 2:
+ case 3:
+ smc->hw.fp.fifo.rx1_fifo_size = RX_LARGE_FIFO ;
+ smc->hw.fp.fifo.rx2_fifo_size = RX_SMALL_FIFO ;
+ break ;
+ default: /* this is not the real defaule */
+ smc->hw.fp.fifo.rx1_fifo_size = RX_FIFO_SPACE *
+ SMT_R1_RXD_COUNT/(SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) ;
+ smc->hw.fp.fifo.rx2_fifo_size = RX_FIFO_SPACE *
+ SMT_R2_RXD_COUNT/(SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) ;
+ break ;
+ }
+
+/*
+ BEGIN_MANUAL_ENTRY(module;mem;1)
+ -------------------------------------------------------------
+ TRANSMIT BUFFER MEMORY DIVERSION
+ -------------------------------------------------------------
+
+
+ | no sync bw | sync bw available and | sync bw available and
+ | available | SynchTxMode = SPLIT | SynchTxMode = ALL
+ -----------------------------------------------------------------------
+ sync tx | 0 kB | 32 kB | 55 kB
+ queue | | TX_MEDIUM_FIFO | TX_LARGE_FIFO
+ -----------------------------------------------------------------------
+ async tx | 64 kB | 32 kB | 9 k
+ queue | TX_FIFO_SPACE| TX_MEDIUM_FIFO | TX_SMALL_FIFO
+
+ END_MANUAL_ENTRY
+*/
+
+ /*
+ * set the tx mode bits
+ */
+ if (smc->mib.a[PATH0].fddiPATHSbaPayload) {
+#ifdef ESS
+ smc->hw.fp.fifo.fifo_config_mode |=
+ smc->mib.fddiESSSynchTxMode | SYNC_TRAFFIC_ON ;
+#endif
+ }
+ else {
+ smc->hw.fp.fifo.fifo_config_mode &=
+ ~(SEND_ASYNC_AS_SYNC|SYNC_TRAFFIC_ON) ;
+ }
+
+ /*
+ * split up the FIFO
+ */
+ if (smc->hw.fp.fifo.fifo_config_mode & SYNC_TRAFFIC_ON) {
+ if (smc->hw.fp.fifo.fifo_config_mode & SEND_ASYNC_AS_SYNC) {
+ smc->hw.fp.fifo.tx_s_size = TX_LARGE_FIFO ;
+ smc->hw.fp.fifo.tx_a0_size = TX_SMALL_FIFO ;
+ }
+ else {
+ smc->hw.fp.fifo.tx_s_size = TX_MEDIUM_FIFO ;
+ smc->hw.fp.fifo.tx_a0_size = TX_MEDIUM_FIFO ;
+ }
+ }
+ else {
+ smc->hw.fp.fifo.tx_s_size = 0 ;
+ smc->hw.fp.fifo.tx_a0_size = TX_FIFO_SPACE ;
+ }
+
+ smc->hw.fp.fifo.rx1_fifo_start = smc->hw.fp.fifo.rbc_ram_start +
+ RX_FIFO_OFF ;
+ smc->hw.fp.fifo.tx_s_start = smc->hw.fp.fifo.rx1_fifo_start +
+ smc->hw.fp.fifo.rx1_fifo_size ;
+ smc->hw.fp.fifo.tx_a0_start = smc->hw.fp.fifo.tx_s_start +
+ smc->hw.fp.fifo.tx_s_size ;
+ smc->hw.fp.fifo.rx2_fifo_start = smc->hw.fp.fifo.tx_a0_start +
+ smc->hw.fp.fifo.tx_a0_size ;
+
+ DB_SMT("FIFO split: mode = %x\n",smc->hw.fp.fifo.fifo_config_mode,0) ;
+ DB_SMT("rbc_ram_start = %x rbc_ram_end = %x\n",
+ smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end) ;
+ DB_SMT("rx1_fifo_start = %x tx_s_start = %x\n",
+ smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start) ;
+ DB_SMT("tx_a0_start = %x rx2_fifo_start = %x\n",
+ smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start) ;
+}
+
+void formac_reinit_tx(struct s_smc *smc)
+{
+ /*
+ * Split up the FIFO and reinitialize the MAC if synchronous
+ * bandwidth becomes available but no synchronous queue is
+ * configured.
+ */
+ if (!smc->hw.fp.fifo.tx_s_size && smc->mib.a[PATH0].fddiPATHSbaPayload){
+ (void)init_mac(smc,0) ;
+ }
+}
+
diff --git a/drivers/net/skfp/h/cmtdef.h b/drivers/net/skfp/h/cmtdef.h
new file mode 100644
index 000000000000..603982debc71
--- /dev/null
+++ b/drivers/net/skfp/h/cmtdef.h
@@ -0,0 +1,763 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _CMTDEF_
+#define _CMTDEF_
+
+/* **************************************************************** */
+
+/*
+ * implementation specific constants
+ * MODIIFY THE FOLLWOING THREE DEFINES
+ */
+#define AMDPLC /* if Amd PLC chip used */
+#ifdef CONC
+#define NUMPHYS 12 /* 2 for SAS or DAS, more for Concentrator */
+#else
+#ifdef CONC_II
+#define NUMPHYS 24 /* 2 for SAS or DAS, more for Concentrator */
+#else
+#define NUMPHYS 2 /* 2 for SAS or DAS, more for Concentrator */
+#endif
+#endif
+#define NUMMACS 1 /* only 1 supported at the moment */
+#define NUMPATHS 2 /* primary and secondary path supported */
+
+/*
+ * DO NOT MODIFY BEYOND THIS POINT
+ */
+
+/* **************************************************************** */
+
+#if NUMPHYS > 2
+#define CONCENTRATOR
+#endif
+
+/*
+ * Definitions for comfortable LINT usage
+ */
+#ifdef lint
+#define LINT_USE(x) (x)=(x)
+#else
+#define LINT_USE(x)
+#endif
+
+#ifdef DEBUG
+#define DB_PR(flag,a,b,c) { if (flag) printf(a,b,c) ; }
+#else
+#define DB_PR(flag,a,b,c)
+#endif
+
+#ifdef DEBUG_BRD
+#define DB_ECM(a,b,c) DB_PR((smc->debug.d_smt&1),a,b,c)
+#define DB_ECMN(n,a,b,c) DB_PR((smc->debug.d_ecm >=(n)),a,b,c)
+#define DB_RMT(a,b,c) DB_PR((smc->debug.d_smt&2),a,b,c)
+#define DB_RMTN(n,a,b,c) DB_PR((smc->debug.d_rmt >=(n)),a,b,c)
+#define DB_CFM(a,b,c) DB_PR((smc->debug.d_smt&4),a,b,c)
+#define DB_CFMN(n,a,b,c) DB_PR((smc->debug.d_cfm >=(n)),a,b,c)
+#define DB_PCM(a,b,c) DB_PR((smc->debug.d_smt&8),a,b,c)
+#define DB_PCMN(n,a,b,c) DB_PR((smc->debug.d_pcm >=(n)),a,b,c)
+#define DB_SMT(a,b,c) DB_PR((smc->debug.d_smtf),a,b,c)
+#define DB_SMTN(n,a,b,c) DB_PR((smc->debug.d_smtf >=(n)),a,b,c)
+#define DB_SBA(a,b,c) DB_PR((smc->debug.d_sba),a,b,c)
+#define DB_SBAN(n,a,b,c) DB_PR((smc->debug.d_sba >=(n)),a,b,c)
+#define DB_ESS(a,b,c) DB_PR((smc->debug.d_ess),a,b,c)
+#define DB_ESSN(n,a,b,c) DB_PR((smc->debug.d_ess >=(n)),a,b,c)
+#else
+#define DB_ECM(a,b,c) DB_PR((debug.d_smt&1),a,b,c)
+#define DB_ECMN(n,a,b,c) DB_PR((debug.d_ecm >=(n)),a,b,c)
+#define DB_RMT(a,b,c) DB_PR((debug.d_smt&2),a,b,c)
+#define DB_RMTN(n,a,b,c) DB_PR((debug.d_rmt >=(n)),a,b,c)
+#define DB_CFM(a,b,c) DB_PR((debug.d_smt&4),a,b,c)
+#define DB_CFMN(n,a,b,c) DB_PR((debug.d_cfm >=(n)),a,b,c)
+#define DB_PCM(a,b,c) DB_PR((debug.d_smt&8),a,b,c)
+#define DB_PCMN(n,a,b,c) DB_PR((debug.d_pcm >=(n)),a,b,c)
+#define DB_SMT(a,b,c) DB_PR((debug.d_smtf),a,b,c)
+#define DB_SMTN(n,a,b,c) DB_PR((debug.d_smtf >=(n)),a,b,c)
+#define DB_SBA(a,b,c) DB_PR((debug.d_sba),a,b,c)
+#define DB_SBAN(n,a,b,c) DB_PR((debug.d_sba >=(n)),a,b,c)
+#define DB_ESS(a,b,c) DB_PR((debug.d_ess),a,b,c)
+#define DB_ESSN(n,a,b,c) DB_PR((debug.d_ess >=(n)),a,b,c)
+#endif
+
+#ifndef SS_NOT_DS
+#define SK_LOC_DECL(type,var) type var
+#else
+#define SK_LOC_DECL(type,var) static type var
+#endif
+/*
+ * PHYs and PORTS
+ * Note: Don't touch the definition of PA and PB. Those might be used
+ * by some "for" loops.
+ */
+#define PA 0
+#define PB 1
+#if defined(SUPERNET_3) || defined(CONC_II)
+/*
+ * The port indices have to be different,
+ * because the MAC output goes through the 2. PLC
+ * Conc II: It has to be the first port in the row.
+ */
+#define PS 0 /* Internal PLC which is the same as PA */
+#else
+#define PS 1
+#endif
+#define PM 2 /* PM .. PA+NUM_PHYS-1 */
+
+/*
+ * PHY types - as in path descriptor 'fddiPHYType'
+ */
+#define TA 0 /* A port */
+#define TB 1 /* B port */
+#define TS 2 /* S port */
+#define TM 3 /* M port */
+#define TNONE 4
+
+
+/*
+ * indexes in MIB
+ */
+#define INDEX_MAC 1
+#define INDEX_PATH 1
+#define INDEX_PORT 1
+
+
+/*
+ * policies
+ */
+#define POLICY_AA (1<<0) /* reject AA */
+#define POLICY_AB (1<<1) /* reject AB */
+#define POLICY_AS (1<<2) /* reject AS */
+#define POLICY_AM (1<<3) /* reject AM */
+#define POLICY_BA (1<<4) /* reject BA */
+#define POLICY_BB (1<<5) /* reject BB */
+#define POLICY_BS (1<<6) /* reject BS */
+#define POLICY_BM (1<<7) /* reject BM */
+#define POLICY_SA (1<<8) /* reject SA */
+#define POLICY_SB (1<<9) /* reject SB */
+#define POLICY_SS (1<<10) /* reject SS */
+#define POLICY_SM (1<<11) /* reject SM */
+#define POLICY_MA (1<<12) /* reject MA */
+#define POLICY_MB (1<<13) /* reject MB */
+#define POLICY_MS (1<<14) /* reject MS */
+#define POLICY_MM (1<<15) /* reject MM */
+
+/*
+ * commands
+ */
+
+/*
+ * EVENTS
+ * event classes
+ */
+#define EVENT_ECM 1 /* event class ECM */
+#define EVENT_CFM 2 /* event class CFM */
+#define EVENT_RMT 3 /* event class RMT */
+#define EVENT_SMT 4 /* event class SMT */
+#define EVENT_PCM 5 /* event class PCM */
+#define EVENT_PCMA 5 /* event class PCMA */
+#define EVENT_PCMB 6 /* event class PCMB */
+
+/* WARNING :
+ * EVENT_PCM* must be last in the above list
+ * if more than two ports are used, EVENT_PCM .. EVENT_PCMA+NUM_PHYS-1
+ * are used !
+ */
+
+#define EV_TOKEN(class,event) (((u_long)(class)<<16L)|((u_long)(event)))
+#define EV_T_CLASS(token) ((int)((token)>>16)&0xffff)
+#define EV_T_EVENT(token) ((int)(token)&0xffff)
+
+/*
+ * ECM events
+ */
+#define EC_CONNECT 1 /* connect request */
+#define EC_DISCONNECT 2 /* disconnect request */
+#define EC_TRACE_PROP 3 /* trace propagation */
+#define EC_PATH_TEST 4 /* path test */
+#define EC_TIMEOUT_TD 5 /* timer TD_min */
+#define EC_TIMEOUT_TMAX 6 /* timer trace_max */
+#define EC_TIMEOUT_IMAX 7 /* timer I_max */
+#define EC_TIMEOUT_INMAX 8 /* timer IN_max */
+#define EC_TEST_DONE 9 /* path test done */
+
+/*
+ * CFM events
+ */
+#define CF_LOOP 1 /* cf_loop flag from PCM */
+#define CF_LOOP_A 1 /* cf_loop flag from PCM */
+#define CF_LOOP_B 2 /* cf_loop flag from PCM */
+#define CF_JOIN 3 /* cf_join flag from PCM */
+#define CF_JOIN_A 3 /* cf_join flag from PCM */
+#define CF_JOIN_B 4 /* cf_join flag from PCM */
+
+/*
+ * PCM events
+ */
+#define PC_START 1
+#define PC_STOP 2
+#define PC_LOOP 3
+#define PC_JOIN 4
+#define PC_SIGNAL 5
+#define PC_REJECT 6
+#define PC_MAINT 7
+#define PC_TRACE 8
+#define PC_PDR 9
+#define PC_ENABLE 10
+#define PC_DISABLE 11
+
+/*
+ * must be ordered as in LineStateType
+ */
+#define PC_QLS 12
+#define PC_ILS 13
+#define PC_MLS 14
+#define PC_HLS 15
+#define PC_LS_PDR 16
+#define PC_LS_NONE 17
+#define LS2MIB(x) ((x)-PC_QLS)
+#define MIB2LS(x) ((x)+PC_QLS)
+
+#define PC_TIMEOUT_TB_MAX 18 /* timer TB_max */
+#define PC_TIMEOUT_TB_MIN 19 /* timer TB_min */
+#define PC_TIMEOUT_C_MIN 20 /* timer C_Min */
+#define PC_TIMEOUT_T_OUT 21 /* timer T_Out */
+#define PC_TIMEOUT_TL_MIN 22 /* timer TL_Min */
+#define PC_TIMEOUT_T_NEXT 23 /* timer t_next[] */
+#define PC_TIMEOUT_LCT 24
+#define PC_NSE 25 /* NOISE hardware timer */
+#define PC_LEM 26 /* LEM done */
+
+/*
+ * RMT events meaning from
+ */
+#define RM_RING_OP 1 /* ring operational MAC */
+#define RM_RING_NON_OP 2 /* ring not operational MAC */
+#define RM_MY_BEACON 3 /* recvd my beacon MAC */
+#define RM_OTHER_BEACON 4 /* recvd other beacon MAC */
+#define RM_MY_CLAIM 5 /* recvd my claim MAC */
+#define RM_TRT_EXP 6 /* TRT exp MAC */
+#define RM_VALID_CLAIM 7 /* claim from dup addr MAC */
+#define RM_JOIN 8 /* signal rm_join CFM */
+#define RM_LOOP 9 /* signal rm_loop CFM */
+#define RM_DUP_ADDR 10 /* dup_addr_test hange SMT-NIF */
+#define RM_ENABLE_FLAG 11 /* enable flag */
+
+#define RM_TIMEOUT_NON_OP 12 /* timeout T_Non_OP */
+#define RM_TIMEOUT_T_STUCK 13 /* timeout T_Stuck */
+#define RM_TIMEOUT_ANNOUNCE 14 /* timeout T_Announce */
+#define RM_TIMEOUT_T_DIRECT 15 /* timeout T_Direct */
+#define RM_TIMEOUT_D_MAX 16 /* timeout D_Max */
+#define RM_TIMEOUT_POLL 17 /* claim/beacon poller */
+#define RM_TX_STATE_CHANGE 18 /* To restart timer for D_Max */
+
+/*
+ * SMT events
+ */
+#define SM_TIMER 1 /* timer */
+#define SM_FAST 2 /* smt_force_irq */
+
+/* PC modes */
+#define PM_NONE 0
+#define PM_PEER 1
+#define PM_TREE 2
+
+/*
+ * PCM withhold codes
+ * MIB PC-WithholdType ENUM
+ */
+#define PC_WH_NONE 0 /* ok */
+#define PC_WH_M_M 1 /* M to M */
+#define PC_WH_OTHER 2 /* other incompatible phys */
+#define PC_WH_PATH 3 /* path not available */
+/*
+ * LCT duration
+ */
+#define LC_SHORT 1 /* short LCT */
+#define LC_MEDIUM 2 /* medium LCT */
+#define LC_LONG 3 /* long LCT */
+#define LC_EXTENDED 4 /* extended LCT */
+
+/*
+ * path_test values
+ */
+#define PT_NONE 0
+#define PT_TESTING 1 /* test is running */
+#define PT_PASSED 2 /* test passed */
+#define PT_FAILED 3 /* test failed */
+#define PT_PENDING 4 /* path test follows */
+#define PT_EXITING 5 /* disconnected while in trace/leave */
+
+/*
+ * duplicate address test
+ * MIB DupAddressTest ENUM
+ */
+#define DA_NONE 0 /* */
+#define DA_PASSED 1 /* test passed */
+#define DA_FAILED 2 /* test failed */
+
+
+/*
+ * optical bypass
+ */
+#define BP_DEINSERT 0 /* disable bypass */
+#define BP_INSERT 1 /* enable bypass */
+
+/*
+ * ODL enable/disable
+ */
+#define PM_TRANSMIT_DISABLE 0 /* disable xmit */
+#define PM_TRANSMIT_ENABLE 1 /* enable xmit */
+
+/*
+ * parameter for config_mux
+ * note : number is index in config_endec table !
+ */
+#define MUX_THRUA 0 /* through A */
+#define MUX_THRUB 1 /* through B */
+#define MUX_WRAPA 2 /* wrap A */
+#define MUX_WRAPB 3 /* wrap B */
+#define MUX_ISOLATE 4 /* isolated */
+#define MUX_WRAPS 5 /* SAS */
+
+/*
+ * MAC control
+ */
+#define MA_RESET 0
+#define MA_BEACON 1
+#define MA_CLAIM 2
+#define MA_DIRECTED 3 /* directed beacon */
+#define MA_TREQ 4 /* change T_Req */
+#define MA_OFFLINE 5 /* switch MAC to offline */
+
+
+/*
+ * trace prop
+ * bit map for trace propagation
+ */
+#define ENTITY_MAC (NUMPHYS)
+#define ENTITY_PHY(p) (p)
+#define ENTITY_BIT(m) (1<<(m))
+
+/*
+ * Resource Tag Types
+ */
+#define PATH_ISO 0 /* isolated */
+#define PATH_PRIM 3 /* primary path */
+#define PATH_THRU 5 /* through path */
+
+#define RES_MAC 2 /* resource type MAC */
+#define RES_PORT 4 /* resource type PORT */
+
+
+/*
+ * CFM state
+ * oops: MUST MATCH CF-StateType in SMT7.2 !
+ */
+#define SC0_ISOLATED 0 /* isolated */
+#define SC1_WRAP_A 5 /* wrap A (not used) */
+#define SC2_WRAP_B 6 /* wrap B (not used) */
+#define SC4_THRU_A 12 /* through A */
+#define SC5_THRU_B 7 /* through B (used in SMT 6.2) */
+#define SC7_WRAP_S 8 /* SAS (not used) */
+#define SC9_C_WRAP_A 9 /* c wrap A */
+#define SC10_C_WRAP_B 10 /* c wrap B */
+#define SC11_C_WRAP_S 11 /* c wrap S */
+
+/*
+ * convert MIB time in units of 80nS to uS
+ */
+#define MIB2US(t) ((t)/12)
+#define SEC2MIB(s) ((s)*12500000L)
+/*
+ * SMT timer
+ */
+struct smt_timer {
+ struct smt_timer *tm_next ; /* linked list */
+ struct s_smc *tm_smc ; /* pointer to context */
+ u_long tm_delta ; /* delta time */
+ u_long tm_token ; /* token value */
+ u_short tm_active ; /* flag : active/inactive */
+ u_short tm_pad ; /* pad field */
+} ;
+
+/*
+ * communication structures
+ */
+struct mac_parameter {
+ u_long t_neg ; /* T_Neg parameter */
+ u_long t_pri ; /* T_Pri register in MAC */
+} ;
+
+/*
+ * MAC counters
+ */
+struct mac_counter {
+ u_long mac_nobuf_counter ; /* MAC SW counter: no buffer */
+ u_long mac_r_restart_counter ; /* MAC SW counter: rx restarted */
+} ;
+
+/*
+ * para struct context for SMT parameters
+ */
+struct s_pcon {
+ int pc_len ;
+ int pc_err ;
+ int pc_badset ;
+ void *pc_p ;
+} ;
+
+/*
+ * link error monitor
+ */
+#define LEM_AVG 5
+struct lem_counter {
+#ifdef AM29K
+ int lem_on ;
+ u_long lem_errors ;
+ u_long lem_symbols ;
+ u_long lem_tsymbols ;
+ int lem_s_count ;
+ int lem_n_s ;
+ int lem_values ;
+ int lem_index ;
+ int lem_avg_ber[LEM_AVG] ;
+ int lem_sum ;
+#else
+ u_short lem_float_ber ; /* 10E-nn bit error rate */
+ u_long lem_errors ; /* accumulated error count */
+ u_short lem_on ;
+#endif
+} ;
+
+#define NUMBITS 10
+
+#ifdef AMDPLC
+
+/*
+ * PLC state table
+ */
+struct s_plc {
+ u_short p_state ; /* current state */
+ u_short p_bits ; /* number of bits to send */
+ u_short p_start ; /* first bit pos */
+ u_short p_pad ; /* padding for alignment */
+ u_long soft_err ; /* error counter */
+ u_long parity_err ; /* error counter */
+ u_long ebuf_err ; /* error counter */
+ u_long ebuf_cont ; /* continous error counter */
+ u_long phyinv ; /* error counter */
+ u_long vsym_ctr ; /* error counter */
+ u_long mini_ctr ; /* error counter */
+ u_long tpc_exp ; /* error counter */
+ u_long np_err ; /* error counter */
+ u_long b_pcs ; /* error counter */
+ u_long b_tpc ; /* error counter */
+ u_long b_tne ; /* error counter */
+ u_long b_qls ; /* error counter */
+ u_long b_ils ; /* error counter */
+ u_long b_hls ; /* error counter */
+} ;
+#endif
+
+#ifdef PROTOTYP_INC
+#include "fddi/driver.pro"
+#else /* PROTOTYP_INC */
+/*
+ * function prototypes
+ */
+#include "h/mbuf.h" /* Type definitions for MBUFs */
+#include "h/smtstate.h" /* struct smt_state */
+
+void hwt_restart(struct s_smc *smc); /* hwt.c */
+SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
+ int length); /* smt.c */
+SMbuf *smt_get_mbuf(struct s_smc *smc); /* drvsr.c */
+void *sm_to_para(struct s_smc *smc, struct smt_header *sm,
+ int para); /* smt.c */
+
+#ifndef SK_UNUSED
+#define SK_UNUSED(var) (void)(var)
+#endif
+
+void queue_event(struct s_smc *smc, int class, int event);
+void ecm(struct s_smc *smc, int event);
+void ecm_init(struct s_smc *smc);
+void rmt(struct s_smc *smc, int event);
+void rmt_init(struct s_smc *smc);
+void pcm(struct s_smc *smc, const int np, int event);
+void pcm_init(struct s_smc *smc);
+void cfm(struct s_smc *smc, int event);
+void cfm_init(struct s_smc *smc);
+void smt_timer_start(struct s_smc *smc, struct smt_timer *timer, u_long time,
+ u_long token);
+void smt_timer_stop(struct s_smc *smc, struct smt_timer *timer);
+void pcm_status_state(struct s_smc *smc, int np, int *type, int *state,
+ int *remote, int *mac);
+void plc_config_mux(struct s_smc *smc, int mux);
+void sm_lem_evaluate(struct s_smc *smc);
+void smt_clear_una_dna(struct s_smc *smc);
+void mac_update_counter(struct s_smc *smc);
+void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off);
+void sm_ma_control(struct s_smc *smc, int mode);
+void sm_mac_check_beacon_claim(struct s_smc *smc);
+void config_mux(struct s_smc *smc, int mux);
+void smt_agent_init(struct s_smc *smc);
+void smt_timer_init(struct s_smc *smc);
+void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs);
+void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
+ int index, int local);
+void smt_swap_para(struct smt_header *sm, int len, int direction);
+void ev_init(struct s_smc *smc);
+void hwt_init(struct s_smc *smc);
+u_long hwt_read(struct s_smc *smc);
+void hwt_stop(struct s_smc *smc);
+void hwt_start(struct s_smc *smc, u_long time);
+void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc);
+void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
+void sm_pm_bypass_req(struct s_smc *smc, int mode);
+void rmt_indication(struct s_smc *smc, int i);
+void cfm_state_change(struct s_smc *smc, int c_state);
+
+#if defined(DEBUG) || !defined(NO_SMT_PANIC)
+void smt_panic(struct s_smc *smc, char *text);
+#else
+#define smt_panic(smc,text)
+#endif /* DEBUG || !NO_SMT_PANIC */
+
+void smt_stat_counter(struct s_smc *smc, int stat);
+void smt_timer_poll(struct s_smc *smc);
+u_long smt_get_time(void);
+u_long smt_get_tid(struct s_smc *smc);
+void smt_timer_done(struct s_smc *smc);
+void smt_set_defaults(struct s_smc *smc);
+void smt_fixup_mib(struct s_smc *smc);
+void smt_reset_defaults(struct s_smc *smc, int level);
+void smt_agent_task(struct s_smc *smc);
+void smt_please_reconnect(struct s_smc *smc, int reconn_time);
+int smt_check_para(struct s_smc *smc, struct smt_header *sm,
+ const u_short list[]);
+void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr);
+
+#ifdef SUPERNET_3
+void drv_reset_indication(struct s_smc *smc);
+#endif /* SUPERNET_3 */
+
+void smt_start_watchdog(struct s_smc *smc);
+void smt_event(struct s_smc *smc, int event);
+void timer_event(struct s_smc *smc, u_long token);
+void ev_dispatcher(struct s_smc *smc);
+void pcm_get_state(struct s_smc *smc, struct smt_state *state);
+void ecm_state_change(struct s_smc *smc, int e_state);
+int sm_pm_bypass_present(struct s_smc *smc);
+void pcm_state_change(struct s_smc *smc, int plc, int p_state);
+void rmt_state_change(struct s_smc *smc, int r_state);
+int sm_pm_get_ls(struct s_smc *smc, int phy);
+int pcm_get_s_port(struct s_smc *smc);
+int pcm_rooted_station(struct s_smc *smc);
+int cfm_get_mac_input(struct s_smc *smc);
+int cfm_get_mac_output(struct s_smc *smc);
+int port_to_mib(struct s_smc *smc, int p);
+int cem_build_path(struct s_smc *smc, char *to, int path_index);
+int sm_mac_get_tx_state(struct s_smc *smc);
+char *get_pcmstate(struct s_smc *smc, int np);
+int smt_action(struct s_smc *smc, int class, int code, int index);
+u_short smt_online(struct s_smc *smc, int on);
+void smt_force_irq(struct s_smc *smc);
+void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local);
+void smt_send_frame(struct s_smc *smc, SMbuf *mb, int fc, int local);
+void smt_set_timestamp(struct s_smc *smc, u_char *p);
+void mac_set_rx_mode(struct s_smc *smc, int mode);
+int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can);
+int mac_set_func_addr(struct s_smc *smc, u_long f_addr);
+void mac_del_multicast(struct s_smc *smc, struct fddi_addr *addr, int can);
+void mac_update_multicast(struct s_smc *smc);
+void mac_clear_multicast(struct s_smc *smc);
+void set_formac_tsync(struct s_smc *smc, long sync_bw);
+void formac_reinit_tx(struct s_smc *smc);
+void formac_tx_restart(struct s_smc *smc);
+void process_receive(struct s_smc *smc);
+void init_driver_fplus(struct s_smc *smc);
+void rtm_irq(struct s_smc *smc);
+void rtm_set_timer(struct s_smc *smc);
+void ring_status_indication(struct s_smc *smc, u_long status);
+void llc_recover_tx(struct s_smc *smc);
+void llc_restart_tx(struct s_smc *smc);
+void plc_clear_irq(struct s_smc *smc, int p);
+void plc_irq(struct s_smc *smc, int np, unsigned int cmd);
+int smt_set_mac_opvalues(struct s_smc *smc);
+
+#ifdef TAG_MODE
+void mac_drv_pci_fix(struct s_smc *smc, u_long fix_value);
+void mac_do_pci_fix(struct s_smc *smc);
+void mac_drv_clear_tx_queue(struct s_smc *smc);
+void mac_drv_repair_descr(struct s_smc *smc);
+u_long hwt_quick_read(struct s_smc *smc);
+void hwt_wait_time(struct s_smc *smc, u_long start, long duration);
+#endif
+
+#ifdef SMT_PNMI
+int pnmi_init(struct s_smc* smc);
+int pnmi_process_ndis_id(struct s_smc *smc, u_long ndis_oid, void *buf, int len,
+ int *BytesAccessed, int *BytesNeeded, u_char action);
+#endif
+
+#ifdef SBA
+#ifndef _H2INC
+void sba();
+#endif
+void sba_raf_received_pack();
+void sba_timer_poll();
+void smt_init_sba();
+#endif
+
+#ifdef ESS
+int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
+ int fs);
+void ess_timer_poll(struct s_smc *smc);
+void ess_para_change(struct s_smc *smc);
+#endif
+
+#ifndef BOOT
+void smt_init_evc(struct s_smc *smc);
+void smt_srf_event(struct s_smc *smc, int code, int index, int cond);
+#else
+#define smt_init_evc(smc)
+#define smt_srf_event(smc,code,index,cond)
+#endif
+
+#ifndef SMT_REAL_TOKEN_CT
+void smt_emulate_token_ct(struct s_smc *smc, int mac_index);
+#endif
+
+#if defined(DEBUG) && !defined(BOOT)
+void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text);
+#else
+#define dump_smt(smc,sm,text)
+#endif
+
+#ifdef DEBUG
+char* addr_to_string(struct fddi_addr *addr);
+void dump_hex(char *p, int len);
+#endif
+
+#endif /* PROTOTYP_INC */
+
+/* PNMI default defines */
+#ifndef PNMI_INIT
+#define PNMI_INIT(smc) /* Nothing */
+#endif
+#ifndef PNMI_GET_ID
+#define PNMI_GET_ID( smc, ndis_oid, buf, len, BytesWritten, BytesNeeded ) \
+ ( 1 ? (-1) : (-1) )
+#endif
+#ifndef PNMI_SET_ID
+#define PNMI_SET_ID( smc, ndis_oid, buf, len, BytesRead, BytesNeeded, \
+ set_type) ( 1 ? (-1) : (-1) )
+#endif
+
+/*
+ * SMT_PANIC defines
+ */
+#ifndef SMT_PANIC
+#define SMT_PANIC(smc,nr,msg) smt_panic (smc, msg)
+#endif
+
+#ifndef SMT_ERR_LOG
+#define SMT_ERR_LOG(smc,nr,msg) SMT_PANIC (smc, nr, msg)
+#endif
+
+#ifndef SMT_EBASE
+#define SMT_EBASE 100
+#endif
+
+#define SMT_E0100 SMT_EBASE + 0
+#define SMT_E0100_MSG "cfm FSM: invalid ce_type"
+#define SMT_E0101 SMT_EBASE + 1
+#define SMT_E0101_MSG "CEM: case ???"
+#define SMT_E0102 SMT_EBASE + 2
+#define SMT_E0102_MSG "CEM A: invalid state"
+#define SMT_E0103 SMT_EBASE + 3
+#define SMT_E0103_MSG "CEM B: invalid state"
+#define SMT_E0104 SMT_EBASE + 4
+#define SMT_E0104_MSG "CEM M: invalid state"
+#define SMT_E0105 SMT_EBASE + 5
+#define SMT_E0105_MSG "CEM S: invalid state"
+#define SMT_E0106 SMT_EBASE + 6
+#define SMT_E0106_MSG "CFM : invalid state"
+#define SMT_E0107 SMT_EBASE + 7
+#define SMT_E0107_MSG "ECM : invalid state"
+#define SMT_E0108 SMT_EBASE + 8
+#define SMT_E0108_MSG "prop_actions : NAC in DAS CFM"
+#define SMT_E0109 SMT_EBASE + 9
+#define SMT_E0109_MSG "ST2U.FM_SERRSF error in special frame"
+#define SMT_E0110 SMT_EBASE + 10
+#define SMT_E0110_MSG "ST2U.FM_SRFRCTOV recv. count. overflow"
+#define SMT_E0111 SMT_EBASE + 11
+#define SMT_E0111_MSG "ST2U.FM_SNFSLD NP & FORMAC simult. load"
+#define SMT_E0112 SMT_EBASE + 12
+#define SMT_E0112_MSG "ST2U.FM_SRCVFRM single-frame recv.-mode"
+#define SMT_E0113 SMT_EBASE + 13
+#define SMT_E0113_MSG "FPLUS: Buffer Memory Error"
+#define SMT_E0114 SMT_EBASE + 14
+#define SMT_E0114_MSG "ST2U.FM_SERRSF error in special frame"
+#define SMT_E0115 SMT_EBASE + 15
+#define SMT_E0115_MSG "ST3L: parity error in receive queue 2"
+#define SMT_E0116 SMT_EBASE + 16
+#define SMT_E0116_MSG "ST3L: parity error in receive queue 1"
+#define SMT_E0117 SMT_EBASE + 17
+#define SMT_E0117_MSG "E_SMT_001: RxD count for receive queue 1 = 0"
+#define SMT_E0118 SMT_EBASE + 18
+#define SMT_E0118_MSG "PCM : invalid state"
+#define SMT_E0119 SMT_EBASE + 19
+#define SMT_E0119_MSG "smt_add_para"
+#define SMT_E0120 SMT_EBASE + 20
+#define SMT_E0120_MSG "smt_set_para"
+#define SMT_E0121 SMT_EBASE + 21
+#define SMT_E0121_MSG "invalid event in dispatcher"
+#define SMT_E0122 SMT_EBASE + 22
+#define SMT_E0122_MSG "RMT : invalid state"
+#define SMT_E0123 SMT_EBASE + 23
+#define SMT_E0123_MSG "SBA: state machine has invalid state"
+#define SMT_E0124 SMT_EBASE + 24
+#define SMT_E0124_MSG "sba_free_session() called with NULL pointer"
+#define SMT_E0125 SMT_EBASE + 25
+#define SMT_E0125_MSG "SBA : invalid session pointer"
+#define SMT_E0126 SMT_EBASE + 26
+#define SMT_E0126_MSG "smt_free_mbuf() called with NULL pointer\n"
+#define SMT_E0127 SMT_EBASE + 27
+#define SMT_E0127_MSG "sizeof evcs"
+#define SMT_E0128 SMT_EBASE + 28
+#define SMT_E0128_MSG "evc->evc_cond_state = 0"
+#define SMT_E0129 SMT_EBASE + 29
+#define SMT_E0129_MSG "evc->evc_multiple = 0"
+#define SMT_E0130 SMT_EBASE + 30
+#define SMT_E0130_MSG write_mdr_warning
+#define SMT_E0131 SMT_EBASE + 31
+#define SMT_E0131_MSG cam_warning
+#define SMT_E0132 SMT_EBASE + 32
+#define SMT_E0132_MSG "ST1L.FM_SPCEPDx parity/coding error"
+#define SMT_E0133 SMT_EBASE + 33
+#define SMT_E0133_MSG "ST1L.FM_STBURx tx buffer underrun"
+#define SMT_E0134 SMT_EBASE + 34
+#define SMT_E0134_MSG "ST1L.FM_SPCEPDx parity error"
+#define SMT_E0135 SMT_EBASE + 35
+#define SMT_E0135_MSG "RMT: duplicate MAC address detected. Ring left!"
+#define SMT_E0136 SMT_EBASE + 36
+#define SMT_E0136_MSG "Elasticity Buffer hang-up"
+#define SMT_E0137 SMT_EBASE + 37
+#define SMT_E0137_MSG "SMT: queue overrun"
+#define SMT_E0138 SMT_EBASE + 38
+#define SMT_E0138_MSG "RMT: duplicate MAC address detected. Ring NOT left!"
+#endif /* _CMTDEF_ */
diff --git a/drivers/net/skfp/h/fddi.h b/drivers/net/skfp/h/fddi.h
new file mode 100644
index 000000000000..c9a28a8a383b
--- /dev/null
+++ b/drivers/net/skfp/h/fddi.h
@@ -0,0 +1,69 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _FDDI_
+#define _FDDI_
+
+struct fddi_addr {
+ u_char a[6] ;
+} ;
+
+#define GROUP_ADDR 0x80 /* MSB in a[0] */
+
+struct fddi_mac {
+ struct fddi_addr mac_dest ;
+ struct fddi_addr mac_source ;
+ u_char mac_info[4478] ;
+} ;
+
+#define FDDI_MAC_SIZE (12)
+#define FDDI_RAW_MTU (4500-5) /* exl. Pr,SD, ED/FS */
+#define FDDI_RAW (4500)
+
+/*
+ * FC values
+ */
+#define FC_VOID 0x40 /* void frame */
+#define FC_TOKEN 0x80 /* token */
+#define FC_RES_TOKEN 0xc0 /* restricted token */
+#define FC_SMT_INFO 0x41 /* SMT Info frame */
+/*
+ * FC_SMT_LAN_LOC && FC_SMT_LOC are SK specific !
+ */
+#define FC_SMT_LAN_LOC 0x42 /* local SMT Info frame */
+#define FC_SMT_LOC 0x43 /* local SMT Info frame */
+#define FC_SMT_NSA 0x4f /* SMT NSA frame */
+#define FC_MAC 0xc0 /* MAC frame */
+#define FC_BEACON 0xc2 /* MAC beacon frame */
+#define FC_CLAIM 0xc3 /* MAC claim frame */
+#define FC_SYNC_LLC 0xd0 /* sync. LLC frame */
+#define FC_ASYNC_LLC 0x50 /* async. LLC frame */
+#define FC_SYNC_BIT 0x80 /* sync. bit in FC */
+
+#define FC_LLC_PRIOR 0x07 /* priority bits */
+
+#define BEACON_INFO 0 /* beacon type */
+#define DBEACON_INFO 1 /* beacon type DIRECTED */
+
+
+/*
+ * indicator bits
+ */
+#define C_INDICATOR (1<<0)
+#define A_INDICATOR (1<<1)
+#define E_INDICATOR (1<<2)
+#define I_INDICATOR (1<<6) /* SK specific */
+#define L_INDICATOR (1<<7) /* SK specific */
+
+#endif /* _FDDI_ */
diff --git a/drivers/net/skfp/h/fddimib.h b/drivers/net/skfp/h/fddimib.h
new file mode 100644
index 000000000000..d1acdc773950
--- /dev/null
+++ b/drivers/net/skfp/h/fddimib.h
@@ -0,0 +1,349 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * FDDI MIB
+ */
+
+/*
+ * typedefs
+ */
+
+typedef u_long Counter ;
+typedef u_char TimeStamp[8] ;
+typedef struct fddi_addr LongAddr ;
+typedef u_long Timer_2 ;
+typedef u_long Timer ;
+typedef u_short ResId ;
+typedef u_short SMTEnum ;
+typedef u_char SMTFlag ;
+
+typedef struct {
+ Counter count ;
+ TimeStamp timestamp ;
+} SetCountType ;
+
+/*
+ * bits for bit string "available_path"
+ */
+#define MIB_PATH_P (1<<0)
+#define MIB_PATH_S (1<<1)
+#define MIB_PATH_L (1<<2)
+
+/*
+ * bits for bit string PermittedPaths & RequestedPaths (SIZE(8))
+ */
+#define MIB_P_PATH_LOCAL (1<<0)
+#define MIB_P_PATH_SEC_ALTER (1<<1)
+#define MIB_P_PATH_PRIM_ALTER (1<<2)
+#define MIB_P_PATH_CON_ALTER (1<<3)
+#define MIB_P_PATH_SEC_PREFER (1<<4)
+#define MIB_P_PATH_PRIM_PREFER (1<<5)
+#define MIB_P_PATH_CON_PREFER (1<<6)
+#define MIB_P_PATH_THRU (1<<7)
+
+/*
+ * enum current path
+ */
+#define MIB_PATH_ISOLATED 0
+#define MIB_PATH_LOCAL 1
+#define MIB_PATH_SECONDARY 2
+#define MIB_PATH_PRIMARY 3
+#define MIB_PATH_CONCATENATED 4
+#define MIB_PATH_THRU 5
+
+/*
+ * enum PMDClass
+ */
+#define MIB_PMDCLASS_MULTI 0
+#define MIB_PMDCLASS_SINGLE1 1
+#define MIB_PMDCLASS_SINGLE2 2
+#define MIB_PMDCLASS_SONET 3
+#define MIB_PMDCLASS_LCF 4
+#define MIB_PMDCLASS_TP 5
+#define MIB_PMDCLASS_UNKNOWN 6
+#define MIB_PMDCLASS_UNSPEC 7
+
+/*
+ * enum SMTStationStatus
+ */
+#define MIB_SMT_STASTA_CON 0
+#define MIB_SMT_STASTA_SEPA 1
+#define MIB_SMT_STASTA_THRU 2
+
+
+struct fddi_mib {
+ /*
+ * private
+ */
+ u_char fddiPRPMFPasswd[8] ;
+ struct smt_sid fddiPRPMFStation ;
+
+#ifdef ESS
+ /*
+ * private variables for static allocation of the
+ * End Station Support
+ */
+ u_long fddiESSPayload ; /* payload for static alloc */
+ u_long fddiESSOverhead ; /* frame ov for static alloc */
+ u_long fddiESSMaxTNeg ; /* maximum of T-NEG */
+ u_long fddiESSMinSegmentSize ; /* min size of the sync frames */
+ u_long fddiESSCategory ; /* category for the Alloc req */
+ short fddiESSSynchTxMode ; /* send all LLC frames as sync */
+#endif /* ESS */
+#ifdef SBA
+ /*
+ * private variables for the Synchronous Bandwidth Allocator
+ */
+ char fddiSBACommand ; /* holds the parsed SBA cmd */
+ u_char fddiSBAAvailable ; /* SBA allocatable value */
+#endif /* SBA */
+
+ /*
+ * SMT standard mib
+ */
+ struct smt_sid fddiSMTStationId ;
+ u_short fddiSMTOpVersionId ;
+ u_short fddiSMTHiVersionId ;
+ u_short fddiSMTLoVersionId ;
+ u_char fddiSMTManufacturerData[32] ;
+ u_char fddiSMTUserData[32] ;
+ u_short fddiSMTMIBVersionId ;
+
+ /*
+ * ConfigGrp
+ */
+ u_char fddiSMTMac_Ct ;
+ u_char fddiSMTNonMaster_Ct ;
+ u_char fddiSMTMaster_Ct ;
+ u_char fddiSMTAvailablePaths ;
+ u_short fddiSMTConfigCapabilities ;
+ u_short fddiSMTConfigPolicy ;
+ u_short fddiSMTConnectionPolicy ;
+ u_short fddiSMTTT_Notify ;
+ u_char fddiSMTStatRptPolicy ;
+ u_long fddiSMTTrace_MaxExpiration ;
+ u_short fddiSMTPORTIndexes[NUMPHYS] ;
+ u_short fddiSMTMACIndexes ;
+ u_char fddiSMTBypassPresent ;
+
+ /*
+ * StatusGrp
+ */
+ SMTEnum fddiSMTECMState ;
+ SMTEnum fddiSMTCF_State ;
+ SMTEnum fddiSMTStationStatus ;
+ u_char fddiSMTRemoteDisconnectFlag ;
+ u_char fddiSMTPeerWrapFlag ;
+
+ /*
+ * MIBOperationGrp
+ */
+ TimeStamp fddiSMTTimeStamp ;
+ TimeStamp fddiSMTTransitionTimeStamp ;
+ SetCountType fddiSMTSetCount ;
+ struct smt_sid fddiSMTLastSetStationId ;
+
+ struct fddi_mib_m {
+ u_short fddiMACFrameStatusFunctions ;
+ Timer_2 fddiMACT_MaxCapabilitiy ;
+ Timer_2 fddiMACTVXCapabilitiy ;
+
+ /* ConfigGrp */
+ u_char fddiMACMultiple_N ; /* private */
+ u_char fddiMACMultiple_P ; /* private */
+ u_char fddiMACDuplicateAddressCond ;/* private */
+ u_char fddiMACAvailablePaths ;
+ u_short fddiMACCurrentPath ;
+ LongAddr fddiMACUpstreamNbr ;
+ LongAddr fddiMACDownstreamNbr ;
+ LongAddr fddiMACOldUpstreamNbr ;
+ LongAddr fddiMACOldDownstreamNbr ;
+ SMTEnum fddiMACDupAddressTest ;
+ u_short fddiMACRequestedPaths ;
+ SMTEnum fddiMACDownstreamPORTType ;
+ ResId fddiMACIndex ;
+
+ /* AddressGrp */
+ LongAddr fddiMACSMTAddress ;
+
+ /* OperationGrp */
+ Timer_2 fddiMACT_Min ; /* private */
+ Timer_2 fddiMACT_ReqMIB ;
+ Timer_2 fddiMACT_Req ; /* private */
+ Timer_2 fddiMACT_Neg ;
+ Timer_2 fddiMACT_MaxMIB ;
+ Timer_2 fddiMACT_Max ; /* private */
+ Timer_2 fddiMACTvxValueMIB ;
+ Timer_2 fddiMACTvxValue ; /* private */
+ Timer_2 fddiMACT_Pri0 ;
+ Timer_2 fddiMACT_Pri1 ;
+ Timer_2 fddiMACT_Pri2 ;
+ Timer_2 fddiMACT_Pri3 ;
+ Timer_2 fddiMACT_Pri4 ;
+ Timer_2 fddiMACT_Pri5 ;
+ Timer_2 fddiMACT_Pri6 ;
+
+ /* CountersGrp */
+ Counter fddiMACFrame_Ct ;
+ Counter fddiMACCopied_Ct ;
+ Counter fddiMACTransmit_Ct ;
+ Counter fddiMACToken_Ct ;
+ Counter fddiMACError_Ct ;
+ Counter fddiMACLost_Ct ;
+ Counter fddiMACTvxExpired_Ct ;
+ Counter fddiMACNotCopied_Ct ;
+ Counter fddiMACRingOp_Ct ;
+
+ Counter fddiMACSMTCopied_Ct ; /* private */
+ Counter fddiMACSMTTransmit_Ct ; /* private */
+
+ /* private for delta ratio */
+ Counter fddiMACOld_Frame_Ct ;
+ Counter fddiMACOld_Copied_Ct ;
+ Counter fddiMACOld_Error_Ct ;
+ Counter fddiMACOld_Lost_Ct ;
+ Counter fddiMACOld_NotCopied_Ct ;
+
+ /* FrameErrorConditionGrp */
+ u_short fddiMACFrameErrorThreshold ;
+ u_short fddiMACFrameErrorRatio ;
+
+ /* NotCopiedConditionGrp */
+ u_short fddiMACNotCopiedThreshold ;
+ u_short fddiMACNotCopiedRatio ;
+
+ /* StatusGrp */
+ SMTEnum fddiMACRMTState ;
+ SMTFlag fddiMACDA_Flag ;
+ SMTFlag fddiMACUNDA_Flag ;
+ SMTFlag fddiMACFrameErrorFlag ;
+ SMTFlag fddiMACNotCopiedFlag ;
+ SMTFlag fddiMACMA_UnitdataAvailable ;
+ SMTFlag fddiMACHardwarePresent ;
+ SMTFlag fddiMACMA_UnitdataEnable ;
+
+ } m[NUMMACS] ;
+#define MAC0 0
+
+ struct fddi_mib_a {
+ ResId fddiPATHIndex ;
+ u_long fddiPATHSbaPayload ;
+ u_long fddiPATHSbaOverhead ;
+ /* fddiPATHConfiguration is built on demand */
+ /* u_long fddiPATHConfiguration ; */
+ Timer fddiPATHT_Rmode ;
+ u_long fddiPATHSbaAvailable ;
+ Timer_2 fddiPATHTVXLowerBound ;
+ Timer_2 fddiPATHT_MaxLowerBound ;
+ Timer_2 fddiPATHMaxT_Req ;
+ } a[NUMPATHS] ;
+#define PATH0 0
+
+ struct fddi_mib_p {
+ /* ConfigGrp */
+ SMTEnum fddiPORTMy_Type ;
+ SMTEnum fddiPORTNeighborType ;
+ u_char fddiPORTConnectionPolicies ;
+ struct {
+ u_char T_val ;
+ u_char R_val ;
+ } fddiPORTMacIndicated ;
+ SMTEnum fddiPORTCurrentPath ;
+ /* must be 4: is 32 bit in SMT format
+ * indices :
+ * 1 none
+ * 2 tree
+ * 3 peer
+ */
+ u_char fddiPORTRequestedPaths[4] ;
+ u_short fddiPORTMACPlacement ;
+ u_char fddiPORTAvailablePaths ;
+ u_char fddiPORTConnectionCapabilities ;
+ SMTEnum fddiPORTPMDClass ;
+ ResId fddiPORTIndex ;
+
+ /* OperationGrp */
+ SMTEnum fddiPORTMaint_LS ;
+ SMTEnum fddiPORTPC_LS ;
+ u_char fddiPORTBS_Flag ;
+
+ /* ErrorCtrsGrp */
+ Counter fddiPORTLCTFail_Ct ;
+ Counter fddiPORTEBError_Ct ;
+ Counter fddiPORTOldEBError_Ct ;
+
+ /* LerGrp */
+ Counter fddiPORTLem_Reject_Ct ;
+ Counter fddiPORTLem_Ct ;
+ u_char fddiPORTLer_Estimate ;
+ u_char fddiPORTLer_Cutoff ;
+ u_char fddiPORTLer_Alarm ;
+
+ /* StatusGrp */
+ SMTEnum fddiPORTConnectState ;
+ SMTEnum fddiPORTPCMState ; /* real value */
+ SMTEnum fddiPORTPCMStateX ; /* value for MIB */
+ SMTEnum fddiPORTPC_Withhold ;
+ SMTFlag fddiPORTHardwarePresent ;
+ u_char fddiPORTLerFlag ;
+
+ u_char fddiPORTMultiple_U ; /* private */
+ u_char fddiPORTMultiple_P ; /* private */
+ u_char fddiPORTEB_Condition ; /* private */
+ } p[NUMPHYS] ;
+ struct {
+ Counter fddiPRIVECF_Req_Rx ; /* ECF req received */
+ Counter fddiPRIVECF_Reply_Rx ; /* ECF repl received */
+ Counter fddiPRIVECF_Req_Tx ; /* ECF req transm */
+ Counter fddiPRIVECF_Reply_Tx ; /* ECF repl transm */
+ Counter fddiPRIVPMF_Get_Rx ; /* PMF Get rec */
+ Counter fddiPRIVPMF_Set_Rx ; /* PMF Set rec */
+ Counter fddiPRIVRDF_Rx ; /* RDF received */
+ Counter fddiPRIVRDF_Tx ; /* RDF transmitted */
+ } priv ;
+} ;
+
+/*
+ * OIDs for statistics
+ */
+#define SMT_OID_CF_STATE 1 /* fddiSMTCF_State */
+#define SMT_OID_PCM_STATE_A 2 /* fddiPORTPCMState port A */
+#define SMT_OID_PCM_STATE_B 17 /* fddiPORTPCMState port B */
+#define SMT_OID_RMT_STATE 3 /* fddiMACRMTState */
+#define SMT_OID_UNA 4 /* fddiMACUpstreamNbr */
+#define SMT_OID_DNA 5 /* fddiMACOldDownstreamNbr */
+#define SMT_OID_ERROR_CT 6 /* fddiMACError_Ct */
+#define SMT_OID_LOST_CT 7 /* fddiMACLost_Ct */
+#define SMT_OID_LEM_CT 8 /* fddiPORTLem_Ct */
+#define SMT_OID_LEM_CT_A 11 /* fddiPORTLem_Ct port A */
+#define SMT_OID_LEM_CT_B 12 /* fddiPORTLem_Ct port B */
+#define SMT_OID_LCT_FAIL_CT 9 /* fddiPORTLCTFail_Ct */
+#define SMT_OID_LCT_FAIL_CT_A 13 /* fddiPORTLCTFail_Ct port A */
+#define SMT_OID_LCT_FAIL_CT_B 14 /* fddiPORTLCTFail_Ct port B */
+#define SMT_OID_LEM_REJECT_CT 10 /* fddiPORTLem_Reject_Ct */
+#define SMT_OID_LEM_REJECT_CT_A 15 /* fddiPORTLem_Reject_Ct port A */
+#define SMT_OID_LEM_REJECT_CT_B 16 /* fddiPORTLem_Reject_Ct port B */
+
+/*
+ * SK MIB
+ */
+#define SMT_OID_ECF_REQ_RX 20 /* ECF requests received */
+#define SMT_OID_ECF_REPLY_RX 21 /* ECF replies received */
+#define SMT_OID_ECF_REQ_TX 22 /* ECF requests transmitted */
+#define SMT_OID_ECF_REPLY_TX 23 /* ECF replies transmitted */
+#define SMT_OID_PMF_GET_RX 24 /* PMF get requests received */
+#define SMT_OID_PMF_SET_RX 25 /* PMF set requests received */
+#define SMT_OID_RDF_RX 26 /* RDF received */
+#define SMT_OID_RDF_TX 27 /* RDF transmitted */
diff --git a/drivers/net/skfp/h/fplustm.h b/drivers/net/skfp/h/fplustm.h
new file mode 100644
index 000000000000..98bbf654d12f
--- /dev/null
+++ b/drivers/net/skfp/h/fplustm.h
@@ -0,0 +1,274 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * AMD Fplus in tag mode data structs
+ * defs for fplustm.c
+ */
+
+#ifndef _FPLUS_
+#define _FPLUS_
+
+#ifndef HW_PTR
+#define HW_PTR void __iomem *
+#endif
+
+/*
+ * fplus error statistic structure
+ */
+struct err_st {
+ u_long err_valid ; /* memory status valid */
+ u_long err_abort ; /* memory status receive abort */
+ u_long err_e_indicator ; /* error indicator */
+ u_long err_crc ; /* error detected (CRC or length) */
+ u_long err_llc_frame ; /* LLC frame */
+ u_long err_mac_frame ; /* MAC frame */
+ u_long err_smt_frame ; /* SMT frame */
+ u_long err_imp_frame ; /* implementer frame */
+ u_long err_no_buf ; /* no buffer available */
+ u_long err_too_long ; /* longer than max. buffer */
+ u_long err_bec_stat ; /* beacon state entered */
+ u_long err_clm_stat ; /* claim state entered */
+ u_long err_sifg_det ; /* short interframe gap detect */
+ u_long err_phinv ; /* PHY invalid */
+ u_long err_tkiss ; /* token issued */
+ u_long err_tkerr ; /* token error */
+} ;
+
+/*
+ * Transmit Descriptor struct
+ */
+struct s_smt_fp_txd {
+ u_int txd_tbctrl ; /* transmit buffer control */
+ u_int txd_txdscr ; /* transmit frame status word */
+ u_int txd_tbadr ; /* physical tx buffer address */
+ u_int txd_ntdadr ; /* physical pointer to the next TxD */
+#ifdef ENA_64BIT_SUP
+ u_int txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/
+#endif
+ char far *txd_virt ; /* virtual pointer to the data frag */
+ /* virt pointer to the next TxD */
+ struct s_smt_fp_txd volatile far *txd_next ;
+ struct s_txd_os txd_os ; /* OS - specific struct */
+} ;
+
+/*
+ * Receive Descriptor struct
+ */
+struct s_smt_fp_rxd {
+ u_int rxd_rbctrl ; /* receive buffer control */
+ u_int rxd_rfsw ; /* receive frame status word */
+ u_int rxd_rbadr ; /* physical rx buffer address */
+ u_int rxd_nrdadr ; /* physical pointer to the next RxD */
+#ifdef ENA_64BIT_SUP
+ u_int rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/
+#endif
+ char far *rxd_virt ; /* virtual pointer to the data frag */
+ /* virt pointer to the next RxD */
+ struct s_smt_fp_rxd volatile far *rxd_next ;
+ struct s_rxd_os rxd_os ; /* OS - specific struct */
+} ;
+
+/*
+ * Descriptor Union Definition
+ */
+union s_fp_descr {
+ struct s_smt_fp_txd t ; /* pointer to the TxD */
+ struct s_smt_fp_rxd r ; /* pointer to the RxD */
+} ;
+
+/*
+ * TxD Ring Control struct
+ */
+struct s_smt_tx_queue {
+ struct s_smt_fp_txd volatile *tx_curr_put ; /* next free TxD */
+ struct s_smt_fp_txd volatile *tx_prev_put ; /* shadow put pointer */
+ struct s_smt_fp_txd volatile *tx_curr_get ; /* next TxD to release*/
+ u_short tx_free ; /* count of free TxD's */
+ u_short tx_used ; /* count of used TxD's */
+ HW_PTR tx_bmu_ctl ; /* BMU addr for tx start */
+ HW_PTR tx_bmu_dsc ; /* BMU addr for curr dsc. */
+} ;
+
+/*
+ * RxD Ring Control struct
+ */
+struct s_smt_rx_queue {
+ struct s_smt_fp_rxd volatile *rx_curr_put ; /* next RxD to queue into */
+ struct s_smt_fp_rxd volatile *rx_prev_put ; /* shadow put pointer */
+ struct s_smt_fp_rxd volatile *rx_curr_get ; /* next RxD to fill */
+ u_short rx_free ; /* count of free RxD's */
+ u_short rx_used ; /* count of used RxD's */
+ HW_PTR rx_bmu_ctl ; /* BMU addr for rx start */
+ HW_PTR rx_bmu_dsc ; /* BMU addr for curr dsc. */
+} ;
+
+#define VOID_FRAME_OFF 0x00
+#define CLAIM_FRAME_OFF 0x08
+#define BEACON_FRAME_OFF 0x10
+#define DBEACON_FRAME_OFF 0x18
+#define RX_FIFO_OFF 0x21 /* to get a prime number for */
+ /* the RX_FIFO_SPACE */
+
+#define RBC_MEM_SIZE 0x8000
+#define SEND_ASYNC_AS_SYNC 0x1
+#define SYNC_TRAFFIC_ON 0x2
+
+/* big FIFO memory */
+#define RX_FIFO_SPACE 0x4000 - RX_FIFO_OFF
+#define TX_FIFO_SPACE 0x4000
+
+#define TX_SMALL_FIFO 0x0900
+#define TX_MEDIUM_FIFO TX_FIFO_SPACE / 2
+#define TX_LARGE_FIFO TX_FIFO_SPACE - TX_SMALL_FIFO
+
+#define RX_SMALL_FIFO 0x0900
+#define RX_LARGE_FIFO RX_FIFO_SPACE - RX_SMALL_FIFO
+
+struct s_smt_fifo_conf {
+ u_short rbc_ram_start ; /* FIFO start address */
+ u_short rbc_ram_end ; /* FIFO size */
+ u_short rx1_fifo_start ; /* rx queue start address */
+ u_short rx1_fifo_size ; /* rx queue size */
+ u_short rx2_fifo_start ; /* rx queue start address */
+ u_short rx2_fifo_size ; /* rx queue size */
+ u_short tx_s_start ; /* sync queue start address */
+ u_short tx_s_size ; /* sync queue size */
+ u_short tx_a0_start ; /* async queue A0 start address */
+ u_short tx_a0_size ; /* async queue A0 size */
+ u_short fifo_config_mode ; /* FIFO configuration mode */
+} ;
+
+#define FM_ADDRX (FM_ADDET|FM_EXGPA0|FM_EXGPA1)
+
+struct s_smt_fp {
+ u_short mdr2init ; /* mode register 2 init value */
+ u_short mdr3init ; /* mode register 3 init value */
+ u_short frselreg_init ; /* frame selection register init val */
+ u_short rx_mode ; /* address mode broad/multi/promisc */
+ u_short nsa_mode ;
+ u_short rx_prom ;
+ u_short exgpa ;
+
+ struct err_st err_stats ; /* error statistics */
+
+ /*
+ * MAC buffers
+ */
+ struct fddi_mac_sf { /* special frame build buffer */
+ u_char mac_fc ;
+ struct fddi_addr mac_dest ;
+ struct fddi_addr mac_source ;
+ u_char mac_info[0x20] ;
+ } mac_sfb ;
+
+
+ /*
+ * queues
+ */
+#define QUEUE_S 0
+#define QUEUE_A0 1
+#define QUEUE_R1 0
+#define QUEUE_R2 1
+#define USED_QUEUES 2
+
+ /*
+ * queue pointers; points to the queue dependent variables
+ */
+ struct s_smt_tx_queue *tx[USED_QUEUES] ;
+ struct s_smt_rx_queue *rx[USED_QUEUES] ;
+
+ /*
+ * queue dependent variables
+ */
+ struct s_smt_tx_queue tx_q[USED_QUEUES] ;
+ struct s_smt_rx_queue rx_q[USED_QUEUES] ;
+
+ /*
+ * FIFO configuration struct
+ */
+ struct s_smt_fifo_conf fifo ;
+
+ /* last formac status */
+ u_short s2u ;
+ u_short s2l ;
+
+ /* calculated FORMAC+ reg.addr. */
+ HW_PTR fm_st1u ;
+ HW_PTR fm_st1l ;
+ HW_PTR fm_st2u ;
+ HW_PTR fm_st2l ;
+ HW_PTR fm_st3u ;
+ HW_PTR fm_st3l ;
+
+
+ /*
+ * multicast table
+ */
+#define FPMAX_MULTICAST 32
+#define SMT_MAX_MULTI 4
+ struct {
+ struct s_fpmc {
+ struct fddi_addr a ; /* mc address */
+ u_char n ; /* usage counter */
+ u_char perm ; /* flag: permanent */
+ } table[FPMAX_MULTICAST] ;
+ } mc ;
+ struct fddi_addr group_addr ;
+ u_long func_addr ; /* functional address */
+ int smt_slots_used ; /* count of table entries for the SMT */
+ int os_slots_used ; /* count of table entries */
+ /* used by the os-specific module */
+} ;
+
+/*
+ * modes for mac_set_rx_mode()
+ */
+#define RX_ENABLE_ALLMULTI 1 /* enable all multicasts */
+#define RX_DISABLE_ALLMULTI 2 /* disable "enable all multicasts" */
+#define RX_ENABLE_PROMISC 3 /* enable promiscous */
+#define RX_DISABLE_PROMISC 4 /* disable promiscous */
+#define RX_ENABLE_NSA 5 /* enable reception of NSA frames */
+#define RX_DISABLE_NSA 6 /* disable reception of NSA frames */
+
+
+/*
+ * support for byte reversal in AIX
+ * (descriptors and pointers must be byte reversed in memory
+ * CPU is big endian; M-Channel is little endian)
+ */
+#ifdef AIX
+#define MDR_REV
+#define AIX_REVERSE(x) ((((x)<<24L)&0xff000000L) + \
+ (((x)<< 8L)&0x00ff0000L) + \
+ (((x)>> 8L)&0x0000ff00L) + \
+ (((x)>>24L)&0x000000ffL))
+#else
+#ifndef AIX_REVERSE
+#define AIX_REVERSE(x) (x)
+#endif
+#endif
+
+#ifdef MDR_REV
+#define MDR_REVERSE(x) ((((x)<<24L)&0xff000000L) + \
+ (((x)<< 8L)&0x00ff0000L) + \
+ (((x)>> 8L)&0x0000ff00L) + \
+ (((x)>>24L)&0x000000ffL))
+#else
+#ifndef MDR_REVERSE
+#define MDR_REVERSE(x) (x)
+#endif
+#endif
+
+#endif
diff --git a/drivers/net/skfp/h/hwmtm.h b/drivers/net/skfp/h/hwmtm.h
new file mode 100644
index 000000000000..4e360af07d77
--- /dev/null
+++ b/drivers/net/skfp/h/hwmtm.h
@@ -0,0 +1,424 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _HWM_
+#define _HWM_
+
+#include "h/mbuf.h"
+
+/*
+ * MACRO for DMA synchronization:
+ * The descriptor 'desc' is flushed for the device 'flag'.
+ * Devices are the CPU (DDI_DMA_SYNC_FORCPU) and the
+ * adapter (DDI_DMA_SYNC_FORDEV).
+ *
+ * 'desc' Pointer to a Rx or Tx descriptor.
+ * 'flag' Flag for direction (view for CPU or DEVICE) that
+ * should be synchronized.
+ *
+ * Empty macros and defines are specified here. The real macro
+ * is os-specific and should be defined in osdef1st.h.
+ */
+#ifndef DRV_BUF_FLUSH
+#define DRV_BUF_FLUSH(desc,flag)
+#define DDI_DMA_SYNC_FORCPU
+#define DDI_DMA_SYNC_FORDEV
+#endif
+
+ /*
+ * hardware modul dependent receive modes
+ */
+#define RX_ENABLE_PASS_SMT 21
+#define RX_DISABLE_PASS_SMT 22
+#define RX_ENABLE_PASS_NSA 23
+#define RX_DISABLE_PASS_NSA 24
+#define RX_ENABLE_PASS_DB 25
+#define RX_DISABLE_PASS_DB 26
+#define RX_DISABLE_PASS_ALL 27
+#define RX_DISABLE_LLC_PROMISC 28
+#define RX_ENABLE_LLC_PROMISC 29
+
+
+#ifndef DMA_RD
+#define DMA_RD 1 /* memory -> hw */
+#endif
+#ifndef DMA_WR
+#define DMA_WR 2 /* hw -> memory */
+#endif
+#define SMT_BUF 0x80
+
+ /*
+ * bits of the frame status byte
+ */
+#define EN_IRQ_EOF 0x02 /* get IRQ after end of frame transmission */
+#define LOC_TX 0x04 /* send frame to the local SMT */
+#define LAST_FRAG 0x08 /* last TxD of the frame */
+#define FIRST_FRAG 0x10 /* first TxD of the frame */
+#define LAN_TX 0x20 /* send frame to network if set */
+#define RING_DOWN 0x40 /* error: unable to send, ring down */
+#define OUT_OF_TXD 0x80 /* error: not enough TxDs available */
+
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifdef LITTLE_ENDIAN
+#define HWM_REVERSE(x) (x)
+#else
+#define HWM_REVERSE(x) ((((x)<<24L)&0xff000000L) + \
+ (((x)<< 8L)&0x00ff0000L) + \
+ (((x)>> 8L)&0x0000ff00L) + \
+ (((x)>>24L)&0x000000ffL))
+#endif
+
+#define C_INDIC (1L<<25)
+#define A_INDIC (1L<<26)
+#define RD_FS_LOCAL 0x80
+
+ /*
+ * DEBUG FLAGS
+ */
+#define DEBUG_SMTF 1
+#define DEBUG_SMT 2
+#define DEBUG_ECM 3
+#define DEBUG_RMT 4
+#define DEBUG_CFM 5
+#define DEBUG_PCM 6
+#define DEBUG_SBA 7
+#define DEBUG_ESS 8
+
+#define DB_HWM_RX 10
+#define DB_HWM_TX 11
+#define DB_HWM_GEN 12
+
+struct s_mbuf_pool {
+#ifndef MB_OUTSIDE_SMC
+ SMbuf mb[MAX_MBUF] ; /* mbuf pool */
+#endif
+ SMbuf *mb_start ; /* points to the first mb */
+ SMbuf *mb_free ; /* free queue */
+} ;
+
+struct hwm_r {
+ /*
+ * hardware modul specific receive variables
+ */
+ u_int len ; /* length of the whole frame */
+ char *mb_pos ; /* SMbuf receive position */
+} ;
+
+struct hw_modul {
+ /*
+ * All hardware modul specific variables
+ */
+ struct s_mbuf_pool mbuf_pool ;
+ struct hwm_r r ;
+
+ union s_fp_descr volatile *descr_p ; /* points to the desriptor area */
+
+ u_short pass_SMT ; /* pass SMT frames */
+ u_short pass_NSA ; /* pass all NSA frames */
+ u_short pass_DB ; /* pass Direct Beacon Frames */
+ u_short pass_llc_promisc ; /* pass all llc frames (default ON) */
+
+ SMbuf *llc_rx_pipe ; /* points to the first queued llc fr */
+ SMbuf *llc_rx_tail ; /* points to the last queued llc fr */
+ int queued_rx_frames ; /* number of queued frames */
+
+ SMbuf *txd_tx_pipe ; /* points to first mb in the txd ring */
+ SMbuf *txd_tx_tail ; /* points to last mb in the txd ring */
+ int queued_txd_mb ; /* number of SMT MBufs in txd ring */
+
+ int rx_break ; /* rev. was breaked because ind. off */
+ int leave_isr ; /* leave fddi_isr immedeately if set */
+ int isr_flag ; /* set, when HWM is entered from isr */
+ /*
+ * varaibles for the current transmit frame
+ */
+ struct s_smt_tx_queue *tx_p ; /* pointer to the transmit queue */
+ u_long tx_descr ; /* tx descriptor for FORMAC+ */
+ int tx_len ; /* tx frame length */
+ SMbuf *tx_mb ; /* SMT tx MBuf pointer */
+ char *tx_data ; /* data pointer to the SMT tx Mbuf */
+
+ int detec_count ; /* counter for out of RxD condition */
+ u_long rx_len_error ; /* rx len FORMAC != sum of fragments */
+} ;
+
+
+/*
+ * DEBUG structs and macros
+ */
+
+#ifdef DEBUG
+struct os_debug {
+ int hwm_rx ;
+ int hwm_tx ;
+ int hwm_gen ;
+} ;
+#endif
+
+#ifdef DEBUG
+#ifdef DEBUG_BRD
+#define DB_P smc->debug
+#else
+#define DB_P debug
+#endif
+
+#define DB_RX(a,b,c,lev) if (DB_P.d_os.hwm_rx >= (lev)) printf(a,b,c)
+#define DB_TX(a,b,c,lev) if (DB_P.d_os.hwm_tx >= (lev)) printf(a,b,c)
+#define DB_GEN(a,b,c,lev) if (DB_P.d_os.hwm_gen >= (lev)) printf(a,b,c)
+#else /* DEBUG */
+#define DB_RX(a,b,c,lev)
+#define DB_TX(a,b,c,lev)
+#define DB_GEN(a,b,c,lev)
+#endif /* DEBUG */
+
+#ifndef SK_BREAK
+#define SK_BREAK()
+#endif
+
+
+/*
+ * HWM Macros
+ */
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_TX_PHYS)
+ * u_long HWM_GET_TX_PHYS(txd)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to read
+ * the physical address of the specified TxD.
+ *
+ * para txd pointer to the TxD
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_TX_PHYS(txd) (u_long)AIX_REVERSE((txd)->txd_tbadr)
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_TX_LEN)
+ * int HWM_GET_TX_LEN(txd)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to read
+ * the fragment length of the specified TxD
+ *
+ * para rxd pointer to the TxD
+ *
+ * return the length of the fragment in bytes
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_TX_LEN(txd) ((int)AIX_REVERSE((txd)->txd_tbctrl)& RD_LENGTH)
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_TX_USED)
+ * txd *HWM_GET_TX_USED(smc,queue)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to get the
+ * number of used TxDs for the queue, specified by the index.
+ *
+ * para queue the number of the send queue: Can be specified by
+ * QUEUE_A0, QUEUE_S or (frame_status & QUEUE_A0)
+ *
+ * return number of used TxDs for this send queue
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_TX_USED(smc,queue) (int) (smc)->hw.fp.tx_q[queue].tx_used
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_CURR_TXD)
+ * txd *HWM_GET_CURR_TXD(smc,queue)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to get the
+ * pointer to the TxD which points to the current queue put
+ * position.
+ *
+ * para queue the number of the send queue: Can be specified by
+ * QUEUE_A0, QUEUE_S or (frame_status & QUEUE_A0)
+ *
+ * return pointer to the current TxD
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_CURR_TXD(smc,queue) (struct s_smt_fp_txd volatile *)\
+ (smc)->hw.fp.tx_q[queue].tx_curr_put
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_TX_CHECK)
+ * void HWM_TX_CHECK(smc,frame_status,low_water)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro is invoked by the OS-specific before it left it's
+ * driver_send function. This macro calls mac_drv_clear_txd
+ * if the free TxDs of the current transmit queue is equal or
+ * lower than the given low water mark.
+ *
+ * para frame_status status of the frame, see design description
+ * low_water low water mark of free TxD's
+ *
+ * END_MANUAL_ENTRY
+ */
+#ifndef HWM_NO_FLOW_CTL
+#define HWM_TX_CHECK(smc,frame_status,low_water) {\
+ if ((low_water)>=(smc)->hw.fp.tx_q[(frame_status)&QUEUE_A0].tx_free) {\
+ mac_drv_clear_txd(smc) ;\
+ }\
+}
+#else
+#define HWM_TX_CHECK(smc,frame_status,low_water) mac_drv_clear_txd(smc)
+#endif
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_RX_FRAG_LEN)
+ * int HWM_GET_RX_FRAG_LEN(rxd)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to read
+ * the fragment length of the specified RxD
+ *
+ * para rxd pointer to the RxD
+ *
+ * return the length of the fragment in bytes
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_RX_FRAG_LEN(rxd) ((int)AIX_REVERSE((rxd)->rxd_rbctrl)& \
+ RD_LENGTH)
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_RX_PHYS)
+ * u_long HWM_GET_RX_PHYS(rxd)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to read
+ * the physical address of the specified RxD.
+ *
+ * para rxd pointer to the RxD
+ *
+ * return the RxD's physical pointer to the data fragment
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_RX_PHYS(rxd) (u_long)AIX_REVERSE((rxd)->rxd_rbadr)
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_RX_USED)
+ * int HWM_GET_RX_USED(smc)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to get
+ * the count of used RXDs in receive queue 1.
+ *
+ * return the used RXD count of receive queue 1
+ *
+ * NOTE: Remember, because of an ASIC bug at least one RXD should be unused
+ * in the descriptor ring !
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_RX_USED(smc) ((int)(smc)->hw.fp.rx_q[QUEUE_R1].rx_used)
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_RX_FREE)
+ * int HWM_GET_RX_FREE(smc)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to get
+ * the rxd_free count of receive queue 1.
+ *
+ * return the rxd_free count of receive queue 1
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_RX_FREE(smc) ((int)(smc)->hw.fp.rx_q[QUEUE_R1].rx_free-1)
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_GET_CURR_RXD)
+ * rxd *HWM_GET_CURR_RXD(smc)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro may be invoked by the OS-specific module to get the
+ * pointer to the RxD which points to the current queue put
+ * position.
+ *
+ * return pointer to the current RxD
+ *
+ * END_MANUAL_ENTRY
+ */
+#define HWM_GET_CURR_RXD(smc) (struct s_smt_fp_rxd volatile *)\
+ (smc)->hw.fp.rx_q[QUEUE_R1].rx_curr_put
+
+/*
+ * BEGIN_MANUAL_ENTRY(HWM_RX_CHECK)
+ * void HWM_RX_CHECK(smc,low_water)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This macro is invoked by the OS-specific before it left the
+ * function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
+ * if the number of used RxDs is equal or lower than the
+ * the given low water mark.
+ *
+ * para low_water low water mark of used RxD's
+ *
+ * END_MANUAL_ENTRY
+ */
+#ifndef HWM_NO_FLOW_CTL
+#define HWM_RX_CHECK(smc,low_water) {\
+ if ((low_water) >= (smc)->hw.fp.rx_q[QUEUE_R1].rx_used) {\
+ mac_drv_fill_rxd(smc) ;\
+ }\
+}
+#else
+#define HWM_RX_CHECK(smc,low_water) mac_drv_fill_rxd(smc)
+#endif
+
+#ifndef HWM_EBASE
+#define HWM_EBASE 500
+#endif
+
+#define HWM_E0001 HWM_EBASE + 1
+#define HWM_E0001_MSG "HWM: Wrong size of s_rxd_os struct"
+#define HWM_E0002 HWM_EBASE + 2
+#define HWM_E0002_MSG "HWM: Wrong size of s_txd_os struct"
+#define HWM_E0003 HWM_EBASE + 3
+#define HWM_E0003_MSG "HWM: smt_free_mbuf() called with NULL pointer"
+#define HWM_E0004 HWM_EBASE + 4
+#define HWM_E0004_MSG "HWM: Parity error rx queue 1"
+#define HWM_E0005 HWM_EBASE + 5
+#define HWM_E0005_MSG "HWM: Encoding error rx queue 1"
+#define HWM_E0006 HWM_EBASE + 6
+#define HWM_E0006_MSG "HWM: Encoding error async tx queue"
+#define HWM_E0007 HWM_EBASE + 7
+#define HWM_E0007_MSG "HWM: Encoding error sync tx queue"
+#define HWM_E0008 HWM_EBASE + 8
+#define HWM_E0008_MSG ""
+#define HWM_E0009 HWM_EBASE + 9
+#define HWM_E0009_MSG "HWM: Out of RxD condition detected"
+#define HWM_E0010 HWM_EBASE + 10
+#define HWM_E0010_MSG "HWM: A protocol layer has tried to send a frame with an invalid frame control"
+#define HWM_E0011 HWM_EBASE + 11
+#define HWM_E0011_MSG "HWM: mac_drv_clear_tx_queue was called although the hardware wasn't stopped"
+#define HWM_E0012 HWM_EBASE + 12
+#define HWM_E0012_MSG "HWM: mac_drv_clear_rx_queue was called although the hardware wasn't stopped"
+#define HWM_E0013 HWM_EBASE + 13
+#define HWM_E0013_MSG "HWM: mac_drv_repair_descr was called although the hardware wasn't stopped"
+
+#endif
diff --git a/drivers/net/skfp/h/lnkstat.h b/drivers/net/skfp/h/lnkstat.h
new file mode 100644
index 000000000000..c73dcd96a40f
--- /dev/null
+++ b/drivers/net/skfp/h/lnkstat.h
@@ -0,0 +1,84 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * Definition of the Error Log Structure
+ * This structure will be copied into the Error Log buffer
+ * during the NDIS General Request ReadErrorLog by the MAC Driver
+ */
+
+struct s_error_log {
+
+ /*
+ * place holder for token ring adapter error log (zeros)
+ */
+ u_char reserved_0 ; /* byte 0 inside Error Log */
+ u_char reserved_1 ; /* byte 1 */
+ u_char reserved_2 ; /* byte 2 */
+ u_char reserved_3 ; /* byte 3 */
+ u_char reserved_4 ; /* byte 4 */
+ u_char reserved_5 ; /* byte 5 */
+ u_char reserved_6 ; /* byte 6 */
+ u_char reserved_7 ; /* byte 7 */
+ u_char reserved_8 ; /* byte 8 */
+ u_char reserved_9 ; /* byte 9 */
+ u_char reserved_10 ; /* byte 10 */
+ u_char reserved_11 ; /* byte 11 */
+ u_char reserved_12 ; /* byte 12 */
+ u_char reserved_13 ; /* byte 13 */
+
+ /*
+ * FDDI link statistics
+ */
+/*
+ * smt error low
+ */
+#define SMT_ERL_AEB (1<<15) /* A elast. buffer */
+#define SMT_ERL_BLC (1<<14) /* B link error condition */
+#define SMT_ERL_ALC (1<<13) /* A link error condition */
+#define SMT_ERL_NCC (1<<12) /* not copied condition */
+#define SMT_ERL_FEC (1<<11) /* frame error condition */
+
+/*
+ * smt event low
+ */
+#define SMT_EVL_NCE (1<<5)
+
+ u_short smt_error_low ; /* byte 14/15 */
+ u_short smt_error_high ; /* byte 16/17 */
+ u_short smt_event_low ; /* byte 18/19 */
+ u_short smt_event_high ; /* byte 20/21 */
+ u_short connection_policy_violation ; /* byte 22/23 */
+ u_short port_event ; /* byte 24/25 */
+ u_short set_count_low ; /* byte 26/27 */
+ u_short set_count_high ; /* byte 28/29 */
+ u_short aci_id_code ; /* byte 30/31 */
+ u_short purge_frame_counter ; /* byte 32/33 */
+
+ /*
+ * CMT and RMT state machines
+ */
+ u_short ecm_state ; /* byte 34/35 */
+ u_short pcm_a_state ; /* byte 36/37 */
+ u_short pcm_b_state ; /* byte 38/39 */
+ u_short cfm_state ; /* byte 40/41 */
+ u_short rmt_state ; /* byte 42/43 */
+
+ u_short not_used[30] ; /* byte 44-103 */
+
+ u_short ucode_version_level ; /* byte 104/105 */
+
+ u_short not_used_1 ; /* byte 106/107 */
+ u_short not_used_2 ; /* byte 108/109 */
+} ;
diff --git a/drivers/net/skfp/h/mbuf.h b/drivers/net/skfp/h/mbuf.h
new file mode 100644
index 000000000000..b339d1f2e0e5
--- /dev/null
+++ b/drivers/net/skfp/h/mbuf.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _MBUF_
+#define _MBUF_
+
+#ifndef PCI
+#define M_SIZE 4550
+#else
+#define M_SIZE 4504
+#endif
+
+#ifndef MAX_MBUF
+#define MAX_MBUF 4
+#endif
+
+#ifndef NO_STD_MBUF
+#define sm_next m_next
+#define sm_off m_off
+#define sm_len m_len
+#define sm_data m_data
+#define SMbuf Mbuf
+#define mtod smtod
+#define mtodoff smtodoff
+#endif
+
+struct s_mbuf {
+ struct s_mbuf *sm_next ; /* low level linked list */
+ short sm_off ; /* offset in m_data */
+ u_int sm_len ; /* len of data */
+#ifdef PCI
+ int sm_use_count ;
+#endif
+ char sm_data[M_SIZE] ;
+} ;
+
+typedef struct s_mbuf SMbuf ;
+
+/* mbuf head, to typed data */
+#define smtod(x,t) ((t)((x)->sm_data + (x)->sm_off))
+#define smtodoff(x,t,o) ((t)((x)->sm_data + (o)))
+
+#endif /* _MBUF_ */
diff --git a/drivers/net/skfp/h/osdef1st.h b/drivers/net/skfp/h/osdef1st.h
new file mode 100644
index 000000000000..5359eb53008d
--- /dev/null
+++ b/drivers/net/skfp/h/osdef1st.h
@@ -0,0 +1,123 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * Operating system-dependent definitions that have to be defined
+ * before any other header files are included.
+ */
+
+// HWM (HardWare Module) Definitions
+// -----------------------
+
+#ifdef __LITTLE_ENDIAN
+#define LITTLE_ENDIAN
+#else
+#define BIG_ENDIAN
+#endif
+
+// this is set in the makefile
+// #define PCI /* only PCI adapters supported by this driver */
+// #define MEM_MAPPED_IO /* use memory mapped I/O */
+
+
+#define USE_CAN_ADDR /* DA and SA in MAC header are canonical. */
+
+#define MB_OUTSIDE_SMC /* SMT Mbufs outside of smc struct. */
+
+// -----------------------
+
+
+// SMT Definitions
+// -----------------------
+#define SYNC /* allow synchronous frames */
+
+// #define SBA /* Synchronous Bandwidth Allocator support */
+ /* not available as free source */
+
+#define ESS /* SBA End Station Support */
+
+#define SMT_PANIC(smc, nr, msg) printk(KERN_INFO "SMT PANIC: code: %d, msg: %s\n",nr,msg)
+
+
+#ifdef DEBUG
+#define printf(s,args...) printk(KERN_INFO s, ## args)
+#endif
+
+// #define HW_PTR u_long
+// -----------------------
+
+
+
+// HWM and OS-specific buffer definitions
+// -----------------------
+
+// default number of receive buffers.
+#define NUM_RECEIVE_BUFFERS 10
+
+// default number of transmit buffers.
+#define NUM_TRANSMIT_BUFFERS 10
+
+// Number of SMT buffers (Mbufs).
+#define NUM_SMT_BUF 4
+
+// Number of TXDs for asynchronous transmit queue.
+#define HWM_ASYNC_TXD_COUNT (NUM_TRANSMIT_BUFFERS + NUM_SMT_BUF)
+
+// Number of TXDs for synchronous transmit queue.
+#define HWM_SYNC_TXD_COUNT HWM_ASYNC_TXD_COUNT
+
+
+// Number of RXDs for receive queue #1.
+// Note: Workaround for ASIC Errata #7: One extra RXD is required.
+#if (NUM_RECEIVE_BUFFERS > 100)
+#define SMT_R1_RXD_COUNT (1 + 100)
+#else
+#define SMT_R1_RXD_COUNT (1 + NUM_RECEIVE_BUFFERS)
+#endif
+
+// Number of RXDs for receive queue #2.
+#define SMT_R2_RXD_COUNT 0 // Not used.
+// -----------------------
+
+
+
+/*
+ * OS-specific part of the transmit/receive descriptor structure (TXD/RXD).
+ *
+ * Note: The size of these structures must follow this rule:
+ *
+ * sizeof(struct) + 2*sizeof(void*) == n * 16, n >= 1
+ *
+ * We use the dma_addr fields under Linux to keep track of the
+ * DMA address of the packet data, for later pci_unmap_single. -DaveM
+ */
+
+struct s_txd_os { // os-specific part of transmit descriptor
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+} ;
+
+struct s_rxd_os { // os-specific part of receive descriptor
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+} ;
+
+
+/*
+ * So we do not need to make too many modifications to the generic driver
+ * parts, we take advantage of the AIX byte swapping macro interface.
+ */
+
+#define AIX_REVERSE(x) ((u32)le32_to_cpu((u32)(x)))
+#define MDR_REVERSE(x) ((u32)le32_to_cpu((u32)(x)))
diff --git a/drivers/net/skfp/h/sba.h b/drivers/net/skfp/h/sba.h
new file mode 100644
index 000000000000..df716cd5784a
--- /dev/null
+++ b/drivers/net/skfp/h/sba.h
@@ -0,0 +1,142 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * Synchronous Bandwith Allocation (SBA) structs
+ */
+
+#ifndef _SBA_
+#define _SBA_
+
+#include "h/mbuf.h"
+#include "h/sba_def.h"
+
+#ifdef SBA
+
+/* Timer Cell Template */
+struct timer_cell {
+ struct timer_cell *next_ptr ;
+ struct timer_cell *prev_ptr ;
+ u_long start_time ;
+ struct s_sba_node_vars *node_var ;
+} ;
+
+/*
+ * Node variables
+ */
+struct s_sba_node_vars {
+ u_char change_resp_flag ;
+ u_char report_resp_flag ;
+ u_char change_req_flag ;
+ u_char report_req_flag ;
+ long change_amount ;
+ long node_overhead ;
+ long node_payload ;
+ u_long node_status ;
+ u_char deallocate_status ;
+ u_char timer_state ;
+ u_short report_cnt ;
+ long lastrep_req_tranid ;
+ struct fddi_addr mac_address ;
+ struct s_sba_sessions *node_sessions ;
+ struct timer_cell timer ;
+} ;
+
+/*
+ * Session variables
+ */
+struct s_sba_sessions {
+ u_long deallocate_status ;
+ long session_overhead ;
+ u_long min_segment_size ;
+ long session_payload ;
+ u_long session_status ;
+ u_long sba_category ;
+ long lastchg_req_tranid ;
+ u_short session_id ;
+ u_char class ;
+ u_char fddi2 ;
+ u_long max_t_neg ;
+ struct s_sba_sessions *next_session ;
+} ;
+
+struct s_sba {
+
+ struct s_sba_node_vars node[MAX_NODES] ;
+ struct s_sba_sessions session[MAX_SESSIONS] ;
+
+ struct s_sba_sessions *free_session ; /* points to the first */
+ /* free session */
+
+ struct timer_cell *tail_timer ; /* points to the last timer cell */
+
+ /*
+ * variables for allocation actions
+ */
+ long total_payload ; /* Total Payload */
+ long total_overhead ; /* Total Overhead */
+ long sba_allocatable ; /* allocatable sync bandwidth */
+
+ /*
+ * RAF message receive parameters
+ */
+ long msg_path_index ; /* Path Type */
+ long msg_sba_pl_req ; /* Payload Request */
+ long msg_sba_ov_req ; /* Overhead Request */
+ long msg_mib_pl ; /* Current Payload for this Path */
+ long msg_mib_ov ; /* Current Overhead for this Path*/
+ long msg_category ; /* Category of the Allocation */
+ u_long msg_max_t_neg ; /* longest T_Neg acceptable */
+ u_long msg_min_seg_siz ; /* minimum segement size */
+ struct smt_header *sm ; /* points to the rec message */
+ struct fddi_addr *msg_alloc_addr ; /* Allocation Address */
+
+ /*
+ * SBA variables
+ */
+ u_long sba_t_neg ; /* holds the last T_NEG */
+ long sba_max_alloc ; /* the parsed value of SBAAvailable */
+
+ /*
+ * SBA state machine variables
+ */
+ short sba_next_state ; /* the next state of the SBA */
+ char sba_command ; /* holds the execuded SBA cmd */
+ u_char sba_available ; /* parsed value after possible check */
+} ;
+
+#endif /* SBA */
+
+ /*
+ * variables for the End Station Support
+ */
+struct s_ess {
+
+ /*
+ * flags and counters
+ */
+ u_char sync_bw_available ; /* is set if sync bw is allocated */
+ u_char local_sba_active ; /* set when a local sba is available */
+ char raf_act_timer_poll ; /* activate the timer to send allc req */
+ char timer_count ; /* counts every timer function call */
+
+ SMbuf *sba_reply_pend ; /* local reply for the sba is pending */
+
+ /*
+ * variables for the ess bandwidth control
+ */
+ long sync_bw ; /* holds the allocaed sync bw */
+ u_long alloc_trans_id ; /* trans id of the last alloc req */
+} ;
+#endif
diff --git a/drivers/net/skfp/h/sba_def.h b/drivers/net/skfp/h/sba_def.h
new file mode 100644
index 000000000000..0459a095d0cd
--- /dev/null
+++ b/drivers/net/skfp/h/sba_def.h
@@ -0,0 +1,76 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#define PHYS 0 /* physical addr */
+#define PERM_ADDR 0x80 /* permanet address */
+#define SB_STATIC 0x00000001
+#define MAX_PAYLOAD 1562
+#define PRIMARY_RING 0x00000001
+#ifndef NULL
+#define NULL 0x00
+#endif
+
+/*********************** SB_Input Variable Values ***********************/
+/* may be needed when ever the SBA state machine is called */
+
+#define UNKNOWN_SYNC_SOURCE 0x0001
+#define REQ_ALLOCATION 0x0002
+#define REPORT_RESP 0x0003
+#define CHANGE_RESP 0x0004
+#define TNEG 0x0005
+#define NIF 0x0006
+#define SB_STOP 0x0007
+#define SB_START 0x0008
+#define REPORT_TIMER 0x0009
+#define CHANGE_REQUIRED 0x000A
+
+#define DEFAULT_OV 50
+
+#ifdef SBA
+/**************************** SBA STATES *****************************/
+
+#define SBA_STANDBY 0x00000000
+#define SBA_ACTIVE 0x00000001
+#define SBA_RECOVERY 0x00000002
+#define SBA_REPORT 0x00000003
+#define SBA_CHANGE 0x00000004
+
+/**************************** OTHERS *********************************/
+
+#define FIFTY_PERCENT 50 /* bytes per second */
+#define MAX_SESSIONS 150
+#define TWO_MINUTES 13079 /* 9.175 ms/tick */
+#define FIFTY_BYTES 50
+#define SBA_DENIED 0x0000000D
+#define I_NEED_ONE 0x00000000
+#define MAX_NODES 50
+/*#define T_REPORT 0x59682F00L*/ /* 120s/80ns in Hex */
+#define TWO_MIN 120 /* seconds */
+#define SBA_ST_UNKNOWN 0x00000002
+#define SBA_ST_ACTIVE 0x00000001
+#define S_CLEAR 0x00000000L
+#define ZERO 0x00000000
+#define FULL 0x00000000 /* old: 0xFFFFFFFFF */
+#define S_SET 0x00000001L
+#define LOW_PRIO 0x02 /* ??????? */
+#define OK 0x01 /* ??????? */
+#define NOT_OK 0x00 /* ??????? */
+
+/****************************************/
+/* deallocate_status[ni][si] values */
+/****************************************/
+#define TX_CHANGE 0X00000001L
+#define PENDING 0x00000002L
+#define NONE 0X00000000L
+#endif
diff --git a/drivers/net/skfp/h/skfbi.h b/drivers/net/skfp/h/skfbi.h
new file mode 100644
index 000000000000..ba347d6910f1
--- /dev/null
+++ b/drivers/net/skfp/h/skfbi.h
@@ -0,0 +1,1919 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _SKFBI_H_
+#define _SKFBI_H_
+
+#ifdef SYNC
+#define exist_board_far exist_board
+#define get_board_para_far get_board_para
+#endif
+
+/*
+ * physical address offset + IO-Port base address
+ */
+#ifndef PCI
+#define ADDR(a) ((a)+smc->hw.iop)
+#define ADDRS(smc,a) ((a)+(smc)->hw.iop)
+#endif
+
+/*
+ * FDDI-Fx (x := {I(SA), E(ISA), M(CA), P(CI)})
+ * address calculation & function defines
+ */
+
+#ifdef EISA
+
+/*
+ * Configuration PROM: !! all 8-Bit IO's !!
+ * |<- MAC-Address ->|
+ * /-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-/
+ * val: |PROD_ID0..3| | free | |00|00|5A|40| |nn|mm|00|00|
+ * /-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-/
+ * IO- ^ ^ ^ ^ ^
+ * port 0C80 0C83 0C88 0C90 0C98
+ * | \
+ * | \
+ * | \______________________________________________
+ * EISA Expansion Board Product ID: \
+ * BIT: |7 6 5 4 3 2 1 0| \
+ * | PROD_ID0 | PROD_ID1 | PROD_ID2 | PROD_ID3 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0| MAN_C0 | MAN_C1 | MAN_C2 | PROD1 | PROD0 | REV1 | REV0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ^=reserved | product numb. | revision numb |
+ * MAN_Cx = compressed manufacterer code (x:=0..2)
+ * ASCII : 'A'..'Z' : 0x41..0x5A -> compr.(c-0x40) : 0x01..0x1A (5Bits!)
+ */
+
+#ifndef MULT_OEM
+#ifndef OEM_CONCEPT
+#define MAN_C0 ('S'-0x40)
+#define MAN_C1 ('K'-0x40)
+#define MAN_C2 ('D'-0x40)
+#define PROD_ID0 (u_char)((MAN_C0<<2) | (MAN_C1>>3))
+#define PROD_ID1 (u_char)(((MAN_C1<<5) & 0xff) | MAN_C2)
+#define PROD_ID2 (u_char)(1) /* prod. nr. */
+#define PROD_ID3 (u_char)(0) /* rev. nr. */
+
+#ifndef OEM_USER_DATA
+#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata"
+#endif
+#else /* OEM_CONCEPT */
+
+/* MAN_C(0|1|2) no longer present (ra). */
+#define PROD_ID0 (u_char)OEM_PROD_ID0
+#define PROD_ID1 (u_char)OEM_PROD_ID1
+#define PROD_ID2 (u_char)OEM_PROD_ID2
+#define PROD_ID3 (u_char)OEM_PROD_ID3
+#endif /* OEM_CONCEPT */
+
+#define SKLOGO PROD_ID0, PROD_ID1, PROD_ID2, PROD_ID3
+#endif /* MULT_OEM */
+
+#define SADDRL (0) /* start address SKLOGO */
+#define SA_MAC (0x10) /* start addr. MAC_AD within the PROM */
+#define PRA_OFF (4)
+#define SA_PMD_TYPE (8) /* start addr. PMD-Type */
+
+#define SKFDDI_PSZ 32 /* address PROM size */
+
+/*
+ * address transmission from logical to physical offset address on board
+ */
+#define FMA(a) (0x0400|((a)<<1)) /* FORMAC+ (r/w) */
+#define P1A(a) (0x0800|((a)<<1)) /* PLC1 (r/w) */
+#define P2A(a) (0x0840|((a)<<1)) /* PLC2 (r/w) */
+#define TIA(a) (0x0880|((a)<<1)) /* Timer (r/w) */
+#define PRA(a) (0x0c80| (a)) /* configuration PROM */
+#define C0A(a) (0x0c84| (a)) /* config. RAM */
+#define C1A(a) (0x0ca0| (a)) /* IRQ-, DMA-nr., EPROM type */
+#define C2A(a) (0x0ca4| (a)) /* EPROM and PAGE selector */
+
+#define CONF C0A(0) /* config RAM (card enable bit port) */
+#define PGRA C2A(0) /* Flash page register */
+#define CDID PRA(0) /* Card ID I/O port addr. offset */
+
+
+/*
+ * physical address offset + slot specific IO-Port base address
+ */
+#define FM_A(a) (FMA(a)+smc->hw.iop) /* FORMAC Plus physical addr */
+#define P1_A(a) (P1A(a)+smc->hw.iop) /* PLC1 (r/w) */
+#define P2_A(a) (P2A(a)+smc->hw.iop) /* PLC2 (r/w) */
+#define TI_A(a) (TIA(a)+smc->hw.iop) /* Timer (r/w) */
+#define PR_A(a) (PRA(a)+smc->hw.iop) /* config. PROM */
+#define C0_A(a) (C0A(a)+smc->hw.iop) /* config. RAM */
+#define C1_A(a) (C1A(a)+smc->hw.iop) /* config. RAM */
+#define C2_A(a) (C2A(a)+smc->hw.iop) /* config. RAM */
+
+
+#define CSRA 0x0008 /* control/status register address (r/w) */
+#define ISRA 0x0008 /* int. source register address (upper 8Bits) */
+#define PLC1I 0x001a /* clear PLC1 interrupt (write only) */
+#define PLC2I 0x0020 /* clear PLC2 interrupt (write only) */
+#define CSFA 0x001c /* control/status FIFO BUSY flags (read only) */
+#define RQAA 0x001c /* Request reg. (write only) */
+#define WCTA 0x001e /* word counter (r/w) */
+#define FFLAG 0x005e /* FLAG/V_FULL (FIFO almost full, write only)*/
+
+#define CSR_A (CSRA+smc->hw.iop) /* control/status register address (r/w) */
+#ifdef UNIX
+#define CSR_AS(smc) (CSRA+(smc)->hw.iop) /* control/status register address (r/w) */
+#endif
+#define ISR_A (ISRA+smc->hw.iop) /* int. source register address (upper 8Bits) */
+#define PLC1_I (PLC1I+smc->hw.iop) /* clear PLC1 internupt (write only) */
+#define PLC2_I (PLC2I+smc->hw.iop) /* clear PLC2 interrupt (write only) */
+#define CSF_A (CSFA+smc->hw.iop) /* control/status FIFO BUSY flags (r/w) */
+#define RQA_A (RQAA+smc->hw.iop) /* Request reg. (write only) */
+#define WCT_A (WCTA+smc->hw.iop) /* word counter (r/w) */
+#define FFLAG_A (FFLAG+smc->hw.iop) /* FLAG/V_FULL (FIFO almost full, write only)*/
+
+/*
+ * control/status register CSRA bits
+ */
+/* write */
+#define CS_CRESET 0x01 /* Card reset (0=reset) */
+#define CS_RESET_FIFO 0x02 /* FIFO reset (0=reset) */
+#define CS_IMSK 0x04 /* enable IRQ (1=enable, 0=disable) */
+#define CS_EN_IRQ_TC 0x08 /* enable IRQ from transfer counter */
+#define CS_BYPASS 0x20 /* bypass switch (0=remove, 1=insert)*/
+#define CS_LED_0 0x40 /* switch LED 0 */
+#define CS_LED_1 0x80 /* switch LED 1 */
+/* read */
+#define CS_BYSTAT 0x40 /* 0=Bypass exist, 1= ..not */
+#define CS_SAS 0x80 /* single attachement station (=1) */
+
+/*
+ * control/status register CSFA bits (FIFO)
+ */
+#define CSF_MUX0 0x01
+#define CSF_MUX1 0x02
+#define CSF_HSREQ0 0x04
+#define CSF_HSREQ1 0x08
+#define CSF_HSREQ2 0x10
+#define CSF_BUSY_DMA 0x40
+#define CSF_BUSY_FIFO 0x80
+
+/*
+ * Interrupt source register ISRA (upper 8 data bits) read only & low activ.
+ */
+#define IS_MINTR1 0x0100 /* FORMAC ST1U/L & ~IMSK1U/L*/
+#define IS_MINTR2 0x0200 /* FORMAC ST2U/L & ~IMSK2U/L*/
+#define IS_PLINT1 0x0400 /* PLC1 */
+#define IS_PLINT2 0x0800 /* PLC2 */
+#define IS_TIMINT 0x1000 /* Timer 82C54-2 */
+#define IS_TC 0x2000 /* transf. counter */
+
+#define ALL_IRSR (IS_MINTR1|IS_MINTR2|IS_PLINT1|IS_PLINT2|IS_TIMINT|IS_TC)
+
+/*
+ * CONFIG<0> RAM (C0_A())
+ */
+#define CFG_CARD_EN 0x01 /* card enable */
+
+/*
+ * CONFIG<1> RAM (C1_A())
+ */
+#define CFG_IRQ_SEL 0x03 /* IRQ select (4 nr.) */
+#define CFG_IRQ_TT 0x04 /* IRQ trigger type (LEVEL/EDGE) */
+#define CFG_DRQ_SEL 0x18 /* DMA requ. (4 nr.) */
+#define CFG_BOOT_EN 0x20 /* 0=BOOT-, 1=Application Software */
+#define CFG_PROG_EN 0x40 /* V_Prog for FLASH_PROM (1=on) */
+
+/*
+ * CONFIG<2> RAM (C2_A())
+ */
+#define CFG_EPROM_SEL 0x0f /* FPROM start address selection */
+#define CFG_PAGE 0xf0 /* FPROM page selection */
+
+
+#define READ_PROM(a) ((u_char)inp(a))
+#define GET_PAGE(i) outp(C2_A(0),((int)(i)<<4) | (inp(C2_A(0)) & ~CFG_PAGE))
+#define FPROM_SW() (inp(C1_A(0)) & CFG_BOOT_EN)
+
+#define MAX_PAGES 16 /* 16 pages */
+#define MAX_FADDR 0x2000 /* 8K per page */
+#define VPP_ON() outp(C1_A(0),inp(C1_A(0)) | CFG_PROG_EN)
+#define VPP_OFF() outp(C1_A(0),inp(C1_A(0)) & ~CFG_PROG_EN)
+
+#define DMA_BUSY() (inpw(CSF_A) & CSF_BUSY_DMA)
+#define FIFO_BUSY() (inpw(CSF_A) & CSF_BUSY_FIFO)
+#define DMA_FIFO_BUSY() (inpw(CSF_A) & (CSF_BUSY_DMA | CSF_BUSY_FIFO))
+#define BUS_CHECK()
+
+#ifdef UNISYS
+/* For UNISYS use another macro with drv_usecewait function */
+#define CHECK_DMA() {u_long k = 1000000; \
+ while (k && (DMA_BUSY())) { k--; drv_usecwait(20); } \
+ if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; }
+#else
+#define CHECK_DMA() {u_long k = 1000000 ;\
+ while (k && (DMA_BUSY())) k-- ;\
+ if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; }
+#endif
+
+#define CHECK_FIFO() {u_long k = 1000000 ;\
+ while (k && (FIFO_BUSY())) k-- ;\
+ if (!k) SMT_PANIC(smc,HWM_E0019,HWM_E0019_MSG) ; }
+
+#define CHECK_DMA_FIFO() {u_long k = 1000000 ;\
+ while (k && (DMA_FIFO_BUSY())) k-- ;\
+ if (!k) SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; }
+
+#define GET_ISR() ~inpw(ISR_A)
+#define CHECK_ISR() ~inpw(ISR_A)
+
+#ifndef UNIX
+#ifndef WINNT
+#define CLI_FBI() outpw(CSR_A,(inpw(CSR_A)&\
+ (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|smc->hw.led)
+#else /* WINNT */
+#define CLI_FBI() outpw(CSR_A,(l_inpw(CSR_A)&\
+ (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|smc->hw.led)
+#endif /* WINNT */
+#else /* UNIX */
+#define CLI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))&\
+ (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|(smc)->hw.led)
+#endif
+
+#ifndef UNIX
+#define STI_FBI() outpw(CSR_A,(inpw(CSR_A)&\
+ (CS_CRESET|CS_BYPASS|CS_RESET_FIFO))|CS_IMSK|smc->hw.led)
+#else
+#define STI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))&\
+ (CS_CRESET|CS_BYPASS|CS_RESET_FIFO))|CS_IMSK|(smc)->hw.led)
+#endif
+
+/* EISA DMA Controller */
+#define DMA_WRITE_SINGLE_MASK_BIT_M 0x0a /* Master DMA Controller */
+#define DMA_WRITE_SINGLE_MASK_BIT_S 0xd4 /* Slave DMA Controller */
+#define DMA_CLEAR_BYTE_POINTER_M 0x0c
+#define DMA_CLEAR_BYTE_POINTER_S 0xd8
+
+#endif /* EISA */
+
+#ifdef MCA
+
+/*
+ * POS Register: !! all I/O's are 8-Bit !!
+ */
+#define POS_SYS_SETUP 0x94 /* system setup register */
+#define POS_SYSTEM 0xff /* system mode */
+
+#define POS_CHANNEL_POS 0x96 /* register slot ID */
+#define POS_CHANNEL_BIT 0x08 /* mask for -"- */
+
+#define POS_BASE 0x100 /* POS base address */
+#define POS_ID_LOW POS_BASE /* card ID low */
+#define POS_ID_HIGH (POS_BASE+1) /* card ID high */
+#define POS_102 (POS_BASE+2) /* card en., arbitration level .. */
+#define POS_103 (POS_BASE+3) /* FPROM addr, page */
+#define POS_104 (POS_BASE+4) /* I/O, IRQ */
+#define POS_105 (POS_BASE+5) /* POS_CHCK */
+#define POS_106 (POS_BASE+6) /* to read VPD */
+#define POS_107 (POS_BASE+7) /* added without function */
+
+/* FM1 card IDs */
+#define FM1_CARD_ID0 0x83
+#define FM1_CARD_ID1 0
+
+#define FM1_IBM_ID0 0x9c
+#define FM1_IBM_ID1 0x8f
+
+
+/* FM2 card IDs */
+#define FM2_CARD_ID0 0xab
+#define FM2_CARD_ID1 0
+
+#define FM2_IBM_ID0 0x7e
+#define FM2_IBM_ID1 0x8f
+
+/* Board revision. */
+#define FM1_REV 0
+#define FM2_REV 1
+
+#define MAX_SLOT 8
+
+/*
+ * POS_102
+ */
+#define POS_CARD_EN 0x01 /* card enable =1 */
+#define POS_SDAT_EN 0x02 /* enable 32-bit streaming data mode */
+#define POS_EN_CHKINT 0x04 /* enable int. from check line asserted */
+#define POS_EN_BUS_ERR 0x08 /* enable int. on invalid busmaster transf. */
+#define POS_FAIRNESS 0x10 /* fairnes on =1 */
+/* attention: arbitration level used with bit 0 POS 105 */
+#define POS_LARBIT 0xe0 /* arbitration level (0,0,0)->level = 0x8
+ (1,1,1)->level = 0xf */
+/*
+ * POS_103
+ */
+#define POS_PAGE 0x07 /* FPROM page selection */
+#define POS_BOOT_EN 0x08 /* boot PROM enable =1 */
+#define POS_MSEL 0x70 /* memory start address for FPROM mapping */
+#define PROG_EN 0x80 /* FM1: Vpp prog on/off */
+#define POS_SDR 0x80 /* FM2: Streaming data bit */
+
+/*
+ * POS_104
+ */
+#define POS_IOSEL 0x3f /* selected I/O base address */
+#define POS_IRQSEL 0xc0 /* selected interrupt */
+
+/*
+ * POS_105
+ */
+#define POS_CHCK 0x80
+#define POS_SYNC_ERR 0x20 /* FM2: synchronous error reporting */
+#define POS_PAR_DATA 0x10 /* FM2: data parity enable bit */
+#define POS_PAR_ADDR 0x08 /* FM2: address parity enable bit */
+#define POS_IRQHSEL 0x02 /* FM2: Highest bit for IRQ_selection */
+#define POS_HARBIT 0x01 /* Highest bit in Bus arbitration selection */
+
+#define SA_MAC (0) /* start addr. MAC_AD within the PROM */
+#define PRA_OFF (0)
+#define SA_PMD_TYPE (8) /* start addr. PMD-Type */
+
+/*
+ * address transmission from logical to physical offset address on board
+ */
+#define FMA(a) (0x0100|((a)<<1)) /* FORMAC+ (r/w) */
+#define P2(a) (0x00c0|((a)<<1)) /* PLC2 (r/w) (DAS) */
+#define P1(a) (0x0080|((a)<<1)) /* PLC1 (r/w) */
+#define TI(a) (0x0060|((a)<<1)) /* Timer (r/w) */
+#define PR(a) (0x0040|((a)<<1)) /* configuration PROM */
+#define CS(a) (0x0020| (a)) /* control/status */
+#define FF(a) (0x0010|((a)<<1)) /* FIFO ASIC */
+#define CT(a) (0x0000|((a)<<1)) /* counter */
+
+/*
+ * counter
+ */
+#define ACLA CT(0) /* address counter low */
+#define ACHA CT(1) /* address counter high */
+#define BCN CT(2) /* byte counter */
+#define MUX CT(3) /* MUX-register */
+#define WCN CT(0x08) /* word counter */
+#define FFLG CT(0x09) /* FIFO Flags */
+
+/*
+ * test/control register (FM2 only)
+ */
+#define CNT_TST 0x018 /* Counter test control register */
+#define CNT_STP 0x01a /* Counter test step reg. (8 Bit) */
+
+/*
+ * CS register (read only)
+ */
+#define CSRA CS(0) /* control/status register address */
+#define CSFA CS(2) /* control/status FIFO BUSY ... */
+#define ISRA CS(4) /* first int. source register address */
+#define ISR2 CS(6) /* second int. source register address */
+#define LEDR CS(0x0c) /* LED register r/w */
+#define CSIL CS(0x10) /* I/O mapped POS_ID_low (100) */
+#define CSIH CS(0x12) /* - " - POS_ID_HIGH (101) */
+#define CSA CS(0x14) /* - " - POS_102 */
+#define CSM CS(0x0e) /* - " - POS_103 */
+#define CSM_FM1 CS(0x16) /* - " - POS_103 (copy in FM1) */
+#define CSI CS(0x18) /* - " - POS_104 */
+#define CSS CS(0x1a) /* - " - POS_105 */
+#define CSP_06 CS(0x1c) /* - " - POS_106 */
+#define WDOG_ST 0x1c /* Watchdog status (FM2 only) */
+#define WDOG_EN 0x1c /* Watchdog enabling (FM2 only, 8Bit) */
+#define WDOG_DIS 0x1e /* Watchdog disabling (FM2 only, 8Bit) */
+
+#define PGRA CSM /* Flash page register */
+
+
+#define WCTA FF(0) /* word counter */
+#define FFLAG FF(1) /* FLAG/V_FULL (FIFO almost full, write only)*/
+
+/*
+ * Timer register (FM2 only)
+ */
+#define RTM_CNT 0x28 /* RTM Counter */
+#define TI_DIV 0x60 /* Timer Prescaler */
+#define TI_CH1 0x62 /* Timer channel 1 counter */
+#define TI_STOP 0x64 /* Stop timer on channel 1 */
+#define TI_STRT 0x66 /* Start timer on channel 1 */
+#define TI_INI2 0x68 /* Timer: Bus master preemption */
+#define TI_CNT2 0x6a /* Timer */
+#define TI_INI3 0x6c /* Timer: Streaming data */
+#define TI_CNT3 0x6e /* Timer */
+#define WDOG_LO 0x70 /* Watchdog counter low */
+#define WDOG_HI 0x72 /* Watchdog counter high */
+#define RTM_PRE 0x74 /* restr. token prescaler */
+#define RTM_TIM 0x76 /* restr. token timer */
+
+/*
+ * Recommended Timeout values (for FM2 timer only)
+ */
+#define TOUT_BM_PRE 188 /* 3.76 usec */
+#define TOUT_S_DAT 374 /* 7.48 usec */
+
+/*
+ * CS register (write only)
+ */
+#define HSR(p) CS(0x18|(p)) /* Host request register */
+
+#define RTM_PUT 0x36 /* restr. token counter write */
+#define RTM_GET 0x28 /* - " - clear */
+#define RTM_CLEAR 0x34 /* - " - read */
+
+/*
+ * BCN Bit definitions
+ */
+#define BCN_BUSY 0x8000 /* DMA Busy flag */
+#define BCN_AZERO 0x4000 /* Almost zero flag (BCN < 4) */
+#define BCN_STREAM 0x2000 /* Allow streaming data (BCN >= 8) */
+
+/*
+ * WCN Bit definitions
+ */
+#define WCN_ZERO 0x2000 /* Zero flag (counted to zero) */
+#define WCN_AZERO 0x1000 /* Almost zero flag (BCN < 4) */
+
+/*
+ * CNT_TST Bit definitions
+ */
+#define CNT_MODE 0x01 /* Go into test mode */
+#define CNT_D32 0x02 /* 16/32 BIT test mode */
+
+/*
+ * FIFO Flag FIFO Flags/Vfull register
+ */
+#define FF_VFULL 0x003f /* V_full value mask */
+#define FFLG_FULL 0x2000 /* FULL flag */
+#define FFLG_A_FULL 0x1000 /* Almost full flag */
+#define FFLG_VFULL 0x0800 /* V_full Flag */
+#define FFLG_A_EMP 0x0400 /* almost empty flag */
+#define FFLG_EMP 0x0200 /* empty flag */
+#define FFLG_T_EMP 0x0100 /* totally empty flag */
+
+/*
+ * WDOG Watchdog status register
+ */
+#define WDOG_ALM 0x01 /* Watchdog alarm Bit */
+#define WDOG_ACT 0x02 /* Watchdog active Bit */
+
+/*
+ * CS(0) CONTROLS
+ */
+#define CS_CRESET 0x0001
+#define FIFO_RST 0x0002
+#define CS_IMSK 0x0004
+#define EN_IRQ_CHCK 0x0008
+#define EN_IRQ_TOKEN 0x0010
+#define EN_IRQ_TC 0x0020
+#define TOKEN_STATUS 0x0040
+#define RTM_CHANGE 0x0080
+
+#define CS_SAS 0x0100
+#define CS_BYSTAT 0x0200 /* bypass connected (0=conn.) */
+#define CS_BYPASS 0x0400 /* bypass on/off indication */
+
+/*
+ * CS(2) FIFOSTAT
+ */
+#define HSREQ 0x0007
+#define BIGDIR 0x0008
+#define CSF_BUSY_FIFO 0x0010
+#define CSF_BUSY_DMA 0x0020
+#define SLOT_32 0x0040
+
+#define LED_0 0x0001
+#define LED_1 0x0002
+#define LED_2 0x0100
+
+#define MAX_PAGES 8 /* pages */
+#define MAX_FADDR 0x4000 /* 16K per page */
+
+/*
+ * IRQ = ISRA || ISR2 ;
+ *
+ * ISRA = IRQ_OTH_EN && (IS_LAN | IS_BUS) ;
+ * ISR2 = IRQ_TC_EN && IS_TC ;
+ *
+ * IS_LAN = (IS_MINTR1 | IS_MINTR2 | IS_PLINT1 | IS_PLINT2 | IS_TIMINT) ||
+ * (IRQ_EN_TOKEN && IS_TOKEN) ;
+ * IS_BUS = IRQ_CHCK_EN && (IS_BUSERR | IS_CHCK_L) ;
+ */
+/*
+ * ISRA !!! activ high !!!
+ */
+#define IS_MINTR1 0x0001 /* FORMAC ST1U/L & ~IMSK1U/L*/
+#define IS_MINTR2 0x0002 /* FORMAC ST2U/L & ~IMSK2U/L*/
+#define IS_PLINT1 0x0004 /* PLC1 */
+#define IS_PLINT2 0x0008 /* PLC2 */
+#define IS_TIMINT 0x0010 /* Timer 82C54-2 */
+#define IS_TOKEN 0x0020 /* restrictet token monitoring */
+#define IS_CHCK_L 0x0040 /* check line asserted */
+#define IS_BUSERR 0x0080 /* bus error */
+/*
+ * ISR2
+ */
+#define IS_TC 0x0001 /* terminal count irq */
+#define IS_SFDBKRTN 0x0002 /* selected feedback return */
+#define IS_D16 0x0004 /* DS16 */
+#define IS_D32 0x0008 /* DS32 */
+#define IS_DPEI 0x0010 /* Data Parity Indication */
+
+#define ALL_IRSR 0x00ff
+
+#define FM_A(a) ADDR(FMA(a)) /* FORMAC Plus physical addr */
+#define P1_A(a) ADDR(P1(a)) /* PLC1 (r/w) */
+#define P2_A(a) ADDR(P2(a)) /* PLC2 (r/w) (DAS) */
+#define TI_A(a) ADDR(TI(a)) /* Timer (r/w) FM1 only! */
+#define PR_A(a) ADDR(PR(a)) /* config. PROM */
+#define CS_A(a) ADDR(CS(a)) /* control/status */
+
+#define ISR1_A ADDR(ISRA) /* first int. source register address */
+#define ISR2_A ADDR(ISR2) /* second -"- */
+#define CSR_A ADDR(CSRA) /* control/status register address */
+#define CSF_A ADDR(CSFA) /* control/status FIFO BUSY flags (r/w) */
+
+#define CSIL_A ADDR(CSIL) /* I/O mapped POS_ID_low (102) */
+#define CSIH_A ADDR(CSIH) /* - " - POS_ID_HIGH (101) */
+#define CSA_A ADDR(CSA) /* - " - POS_102 */
+#define CSI_A ADDR(CSI) /* - " - POS_104 */
+#define CSM_A ADDR(CSM) /* - " - POS_103 */
+#define CSM_FM1_A ADDR(CSM_FM1) /* - " - POS_103 (2nd copy, FM1) */
+#define CSP_06_A ADDR(CSP_06) /* - " - POS_106 */
+
+#define WCT_A ADDR(WCTA) /* word counter (r/w) */
+#define FFLAG_A ADDR(FFLAG) /* FLAG/V_FULL (FIFO almost full, write only)*/
+
+#define ACL_A ADDR(ACLA) /* address counter low */
+#define ACH_A ADDR(ACHA) /* address counter high */
+#define BCN_A ADDR(BCN) /* byte counter */
+#define MUX_A ADDR(MUX) /* MUX-register */
+
+#define ISR_A ADDR(ISRA) /* Interrupt Source Register */
+#define FIFO_RESET_A ADDR(FIFO_RESET) /* reset the FIFO */
+#define FIFO_EN_A ADDR(FIFO_EN) /* enable the FIFO */
+
+#define WDOG_EN_A ADDR(WDOG_EN) /* reset and start the WDOG */
+#define WDOG_DIS_A ADDR(WDOG_DIS) /* disable the WDOG */
+/*
+ * all control reg. (read!) are 8 bit (except PAGE_RG_A and LEDR_A)
+ */
+#define HSR_A(p) ADDR(HSR(p)) /* Host request register */
+
+#define STAT_BYP 0 /* bypass station */
+#define STAT_INS 2 /* insert station */
+#define BYPASS(o) CS(0x10|(o)) /* o=STAT_BYP || STAT_INS */
+
+#define IRQ_TC_EN CS(0x0b) /* enable/disable IRQ on TC */
+#define IRQ_TC_DIS CS(0x0a)
+#define IRQ_TOKEN_EN CS(9) /* enable/disable IRQ on restr. Token */
+#define IRQ_TOKEN_DIS CS(8)
+#define IRQ_CHCK_EN CS(7) /* -"- IRQ after CHCK line */
+#define IRQ_CHCK_DIS CS(6)
+#define IRQ_OTH_EN CS(5) /* -"- other IRQ's */
+#define IRQ_OTH_DIS CS(4)
+#define FIFO_EN CS(3) /* disable (reset), enable FIFO */
+#define FIFO_RESET CS(2)
+#define CARD_EN CS(1) /* disable (reset), enable card */
+#define CARD_DIS CS(0)
+
+#define LEDR_A ADDR(LEDR) /* D0=green, D1=yellow, D8=L2 */
+#define PAGE_RG_A ADDR(CSM) /* D<2..0> */
+#define IRQ_CHCK_EN_A ADDR(IRQ_CHCK_EN)
+#define IRQ_CHCK_DIS_A ADDR(IRQ_CHCK_DIS)
+
+#define GET_PAGE(bank) outpw(PAGE_RG_A,(inpw(PAGE_RG_A) &\
+ (~POS_PAGE)) |(int) (bank))
+#define VPP_ON() if (smc->hw.rev == FM1_REV) { \
+ outpw(PAGE_RG_A, \
+ (inpw(PAGE_RG_A) & POS_PAGE) | PROG_EN); \
+ }
+#define VPP_OFF() if (smc->hw.rev == FM1_REV) { \
+ outpw(PAGE_RG_A,(inpw(PAGE_RG_A) & POS_PAGE)); \
+ }
+
+#define SKFDDI_PSZ 16 /* address PROM size */
+
+#define READ_PROM(a) ((u_char)inp(a))
+
+#define GET_ISR() ~inpw(ISR1_A)
+#ifndef TCI
+#define CHECK_ISR() ~inpw(ISR1_A)
+#define CHECK_ISR_SMP(iop) ~inpw((iop)+ISRA)
+#else
+#define CHECK_ISR() (~inpw(ISR1_A) | ~inpw(ISR2_A))
+#define CHECK_ISR_SMP(iop) (~inpw((iop)+ISRA) | ~inpw((iop)+ISR2))
+#endif
+
+#define DMA_BUSY() (inpw(CSF_A) & CSF_BUSY_DMA)
+#define FIFO_BUSY() (inpw(CSF_A) & CSF_BUSY_FIFO)
+#define DMA_FIFO_BUSY() (inpw(CSF_A) & (CSF_BUSY_DMA | CSF_BUSY_FIFO))
+#define BUS_CHECK() { int i ; \
+ if ((i = GET_ISR()) & IS_BUSERR) \
+ SMT_PANIC(smc,HWM_E0020,HWM_E0020_MSG) ; \
+ if (i & IS_CHCK_L) \
+ SMT_PANIC(smc,HWM_E0014,HWM_E0014_MSG) ; \
+ }
+
+#define CHECK_DMA() { u_long k = 10000 ; \
+ while (k && (DMA_BUSY())) { \
+ k-- ; \
+ BUS_CHECK() ; \
+ } \
+ if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; }
+
+#define CHECK_FIFO() {u_long k = 1000000 ;\
+ while (k && (FIFO_BUSY())) k-- ;\
+ if (!k) SMT_PANIC(smc,HWM_E0019,HWM_E0019_MSG) ; }
+
+#define CHECK_DMA_FIFO() {u_long k = 1000000 ;\
+ while (k && (DMA_FIFO_BUSY())) { \
+ k-- ;\
+ BUS_CHECK() ; \
+ } \
+ if (!k) SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; }
+
+#ifndef UNIX
+#define CLI_FBI() outp(ADDR(IRQ_OTH_DIS),0)
+#else
+#define CLI_FBI(smc) outp(ADDRS((smc),IRQ_OTH_DIS),0)
+#endif
+
+#ifndef TCI
+#define CLI_FBI_SMP(iop) outp((iop)+IRQ_OTH_DIS,0)
+#else
+#define CLI_FBI_SMP(iop) outp((iop)+IRQ_OTH_DIS,0) ;\
+ outp((iop)+IRQ_TC_DIS,0)
+#endif
+
+#ifndef UNIX
+#define STI_FBI() outp(ADDR(IRQ_OTH_EN),0)
+#else
+#define STI_FBI(smc) outp(ADDRS((smc),IRQ_OTH_EN),0)
+#endif
+
+/*
+ * Terminal count primitives
+ */
+#define CLI_TCI(smc) outp(ADDRS((smc),IRQ_TC_DIS),0)
+#define STI_TCI(smc) outp(ADDRS((smc),IRQ_TC_EN),0)
+#define CHECK_TC(smc,k) {(k) = 10000 ;\
+ while ((k) && (~inpw(ISR2_A) & IS_TC)) (k)-- ;\
+ if (!k) SMT_PANIC(smc,HWM_E0018,HWM_E0018_MSG) ; }
+
+#endif /* MCA */
+
+#ifdef ISA
+
+/*
+ * address transmission from logic NPADDR6-0 to physical offset address on board
+ */
+#define FMA(a) (0x8000|(((a)&0x07)<<1)|(((a)&0x78)<<7)) /* FORMAC+ (r/w) */
+#define PRA(a) (0x1000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PROM (read only)*/
+#define P1A(a) (0x4000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PLC1 (r/w) */
+#define P2A(a) (0x5000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PLC2 (r/w) */
+#define TIA(a) (0x6000|(((a)&0x03)<<1)) /* Timer (r/w) */
+
+#define ISRA 0x0000 /* int. source register address (read only) */
+#define ACLA 0x0000 /* address counter low address (write only) */
+#define ACHA 0x0002 /* address counter high address (write only) */
+#define TRCA 0x0004 /* transfer counter address (write only) */
+#define PGRA 0x0006 /* page register address (write only) */
+#define RQAA 0x2000 /* Request reg. (write only) */
+#define CSRA 0x3000 /* control/status register address (r/w) */
+
+/*
+ * physical address offset + IO-Port base address
+ */
+#define FM_A(a) (FMA(a)+smc->hw.iop) /* FORMAC Plus physical addr */
+#define PR_A(a) (PRA(a)+smc->hw.iop) /* PROM (read only)*/
+#define P1_A(a) (P1A(a)+smc->hw.iop) /* PLC1 (r/w) */
+#define P2_A(a) (P2A(a)+smc->hw.iop) /* PLC2 (r/w) */
+#define TI_A(a) (TIA(a)+smc->hw.iop) /* Timer (r/w) */
+
+#define ISR_A (0x0000+smc->hw.iop) /* int. source register address (read only) */
+#define ACL_A (0x0000+smc->hw.iop) /* address counter low address (write only) */
+#define ACH_A (0x0002+smc->hw.iop) /* address counter high address (write only)*/
+#define TRC_A (0x0004+smc->hw.iop) /* transfer counter address (write only) */
+#define PGR_A (0x0006+smc->hw.iop) /* page register address (write only) */
+#define RQA_A (0x2000+smc->hw.iop) /* Request reg. (write only) */
+#define CSR_A (0x3000+smc->hw.iop) /* control/status register address (r/w) */
+#ifdef UNIX
+#define CSR_AS(smc) (0x3000+(smc)->hw.iop) /* control/status register address */
+#endif
+#define PLC1_I (0x3400+smc->hw.iop) /* clear PLC1 interrupt bit */
+#define PLC2_I (0x3800+smc->hw.iop) /* clear PLC2 interrupt bit */
+
+#ifndef MULT_OEM
+#ifndef OEM_CONCEPT
+#define SKLOGO_STR "SKFDDI"
+#else /* OEM_CONCEPT */
+#define SKLOGO_STR OEM_FDDI_LOGO
+#endif /* OEM_CONCEPT */
+#endif /* MULT_OEM */
+#define SADDRL (24) /* start address SKLOGO */
+#define SA_MAC (0) /* start addr. MAC_AD within the PROM */
+#define PRA_OFF (0)
+#define SA_PMD_TYPE (8) /* start addr. PMD-Type */
+
+#define CDID (PRA(SADDRL)) /* Card ID int/O port addr. offset */
+#define NEXT_CDID ((PRA(SADDRL+1)) - CDID)
+
+#define SKFDDI_PSZ 32 /* address PROM size */
+
+#define READ_PROM(a) ((u_char)inpw(a))
+#define GET_PAGE(i) outpw(PGR_A,(int)(i))
+
+#define MAX_PAGES 16 /* 16 pages */
+#define MAX_FADDR 0x2000 /* 8K per page */
+#define VPP_OFF() outpw(CSR_A,(inpw(CSR_A) & (CS_CRESET|CS_BYPASS)))
+#define VPP_ON() outpw(CSR_A,(inpw(CSR_A) & (CS_CRESET|CS_BYPASS)) | \
+ CS_VPPSW)
+
+/*
+ * control/status register CSRA bits (log. addr: 0x3000)
+ */
+/* write */
+#define CS_CRESET 0x01 /* Card reset (0=reset) */
+#define CS_IMSK 0x02 /* enable IRQ (1=enable, 0=disable) */
+#define CS_RESINT1 0x04 /* PLINT1 reset */
+#define CS_VPPSW 0x10 /* 12V power switch (0=off, 1=on) */
+#define CS_BYPASS 0x20 /* bypass switch (0=remove, 1=insert)*/
+#define CS_RESINT2 0x40 /* PLINT2 reset */
+/* read */
+#define CS_BUSY 0x04 /* master transfer activ (=1) */
+#define CS_SW_EPROM 0x08 /* 0=Application Soft. 1=BOOT-EPROM */
+#define CS_BYSTAT 0x40 /* 0=Bypass exist, 1= ..not */
+#define CS_SAS 0x80 /* single attachement station (=1) */
+
+/*
+ * Interrupt source register ISRA (log. addr: 0x0000) read only & low activ.
+ */
+#define IS_MINTR1 0x01 /* FORMAC ST1U/L && ~IMSK1U/L*/
+#define IS_MINTR2 0x02 /* FORMAC ST2U/L && ~IMSK2U/L*/
+#define IS_PLINT1 0x04 /* PLC1 */
+#define IS_PLINT2 0x08 /* PLC2 */
+#define IS_TIMINT 0x10 /* Timer 82C54-2 */
+
+#define ALL_IRSR (IS_MINTR1|IS_MINTR2|IS_PLINT1|IS_PLINT2|IS_TIMINT)
+
+#define FPROM_SW() (inpw(CSR_A)&CS_SW_EPROM)
+#define DMA_BUSY() (inpw(CSR_A)&CS_BUSY)
+#define CHECK_FIFO()
+#define BUS_CHECK()
+
+/*
+ * set Host Request register (wr.)
+ */
+#define SET_HRQ(qup) outpw(RQA_A+((qup)<<1),0)
+
+#ifndef UNIX
+#ifndef WINNT
+#define CLI_FBI() outpw(CSR_A,(inpw(CSR_A)&(CS_CRESET|CS_BYPASS|CS_VPPSW)))
+#else
+#define CLI_FBI() outpw(CSR_A,(l_inpw(CSR_A) & \
+ (CS_CRESET|CS_BYPASS|CS_VPPSW)))
+#endif
+#else
+#define CLI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))& \
+ (CS_CRESET|CS_BYPASS|CS_VPPSW)))
+#endif
+
+#ifndef UNIX
+#define STI_FBI() outpw(CSR_A,(inpw(CSR_A) & \
+ (CS_CRESET|CS_BYPASS|CS_VPPSW)) | CS_IMSK)
+#else
+#define STI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc)) & \
+ (CS_CRESET|CS_BYPASS|CS_VPPSW)) | CS_IMSK)
+#endif
+
+#define CHECK_DMA() {unsigned k = 10000 ;\
+ while (k && (DMA_BUSY())) k-- ;\
+ if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; }
+
+#define GET_ISR() ~inpw(ISR_A)
+
+#endif /* ISA */
+
+/*--------------------------------------------------------------------------*/
+#ifdef PCI
+
+/*
+ * (DV) = only defined for Da Vinci
+ * (ML) = only defined for Monalisa
+ */
+
+/*
+ * Configuration Space header
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */
+#define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */
+#define PCI_COMMAND 0x04 /* 16 bit Command */
+#define PCI_STATUS 0x06 /* 16 bit Status */
+#define PCI_REV_ID 0x08 /* 8 bit Revision ID */
+#define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */
+#define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */
+#define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */
+#define PCI_HEADER_T 0x0e /* 8 bit Header Type */
+#define PCI_BIST 0x0f /* 8 bit Built-in selftest */
+#define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */
+#define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */
+/* Byte 18..2b: Reserved */
+#define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */
+#define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */
+#define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */
+/* Byte 34..33: Reserved */
+#define PCI_CAP_PTR 0x34 /* 8 bit (ML) Capabilities Ptr */
+/* Byte 35..3b: Reserved */
+#define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */
+#define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */
+#define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */
+#define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */
+/* Device Dependent Region */
+#define PCI_OUR_REG 0x40 /* 32 bit (DV) Our Register */
+#define PCI_OUR_REG_1 0x40 /* 32 bit (ML) Our Register 1 */
+#define PCI_OUR_REG_2 0x44 /* 32 bit (ML) Our Register 2 */
+/* Power Management Region */
+#define PCI_PM_CAP_ID 0x48 /* 8 bit (ML) Power Management Cap. ID */
+#define PCI_PM_NITEM 0x49 /* 8 bit (ML) Next Item Ptr */
+#define PCI_PM_CAP_REG 0x4a /* 16 bit (ML) Power Management Capabilities */
+#define PCI_PM_CTL_STS 0x4c /* 16 bit (ML) Power Manag. Control/Status */
+/* Byte 0x4e: Reserved */
+#define PCI_PM_DAT_REG 0x4f /* 8 bit (ML) Power Manag. Data Register */
+/* VPD Region */
+#define PCI_VPD_CAP_ID 0x50 /* 8 bit (ML) VPD Cap. ID */
+#define PCI_VPD_NITEM 0x51 /* 8 bit (ML) Next Item Ptr */
+#define PCI_VPD_ADR_REG 0x52 /* 16 bit (ML) VPD Address Register */
+#define PCI_VPD_DAT_REG 0x54 /* 32 bit (ML) VPD Data Register */
+/* Byte 58..ff: Reserved */
+
+/*
+ * I2C Address (PCI Config)
+ *
+ * Note: The temperature and voltage sensors are relocated on a different
+ * I2C bus.
+ */
+#define I2C_ADDR_VPD 0xA0 /* I2C address for the VPD EEPROM */
+
+/*
+ * Define Bits and Values of the registers
+ */
+/* PCI_VENDOR_ID 16 bit Vendor ID */
+/* PCI_DEVICE_ID 16 bit Device ID */
+/* Values for Vendor ID and Device ID shall be patched into the code */
+/* PCI_COMMAND 16 bit Command */
+#define PCI_FBTEN 0x0200 /* Bit 9: Fast Back-To-Back enable */
+#define PCI_SERREN 0x0100 /* Bit 8: SERR enable */
+#define PCI_ADSTEP 0x0080 /* Bit 7: Address Stepping */
+#define PCI_PERREN 0x0040 /* Bit 6: Parity Report Response enable */
+#define PCI_VGA_SNOOP 0x0020 /* Bit 5: VGA palette snoop */
+#define PCI_MWIEN 0x0010 /* Bit 4: Memory write an inv cycl ena */
+#define PCI_SCYCEN 0x0008 /* Bit 3: Special Cycle enable */
+#define PCI_BMEN 0x0004 /* Bit 2: Bus Master enable */
+#define PCI_MEMEN 0x0002 /* Bit 1: Memory Space Access enable */
+#define PCI_IOEN 0x0001 /* Bit 0: IO Space Access enable */
+
+/* PCI_STATUS 16 bit Status */
+#define PCI_PERR 0x8000 /* Bit 15: Parity Error */
+#define PCI_SERR 0x4000 /* Bit 14: Signaled SERR */
+#define PCI_RMABORT 0x2000 /* Bit 13: Received Master Abort */
+#define PCI_RTABORT 0x1000 /* Bit 12: Received Target Abort */
+#define PCI_STABORT 0x0800 /* Bit 11: Sent Target Abort */
+#define PCI_DEVSEL 0x0600 /* Bit 10..9: DEVSEL Timing */
+#define PCI_DEV_FAST (0<<9) /* fast */
+#define PCI_DEV_MEDIUM (1<<9) /* medium */
+#define PCI_DEV_SLOW (2<<9) /* slow */
+#define PCI_DATAPERR 0x0100 /* Bit 8: DATA Parity error detected */
+#define PCI_FB2BCAP 0x0080 /* Bit 7: Fast Back-to-Back Capability */
+#define PCI_UDF 0x0040 /* Bit 6: User Defined Features */
+#define PCI_66MHZCAP 0x0020 /* Bit 5: 66 MHz PCI bus clock capable */
+#define PCI_NEWCAP 0x0010 /* Bit 4: New cap. list implemented */
+
+#define PCI_ERRBITS (PCI_PERR|PCI_SERR|PCI_RMABORT|PCI_STABORT|PCI_DATAPERR)
+
+/* PCI_REV_ID 8 bit Revision ID */
+/* PCI_CLASS_CODE 24 bit Class Code */
+/* Byte 2: Base Class (02) */
+/* Byte 1: SubClass (02) */
+/* Byte 0: Programming Interface (00) */
+
+/* PCI_CACHE_LSZ 8 bit Cache Line Size */
+/* Possible values: 0,2,4,8,16 */
+
+/* PCI_LAT_TIM 8 bit Latency Timer */
+
+/* PCI_HEADER_T 8 bit Header Type */
+#define PCI_HD_MF_DEV 0x80 /* Bit 7: 0= single, 1= multi-func dev */
+#define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */
+
+/* PCI_BIST 8 bit Built-in selftest */
+#define PCI_BIST_CAP 0x80 /* Bit 7: BIST Capable */
+#define PCI_BIST_ST 0x40 /* Bit 6: Start BIST */
+#define PCI_BIST_RET 0x0f /* Bit 3..0: Completion Code */
+
+/* PCI_BASE_1ST 32 bit 1st Base address */
+#define PCI_MEMSIZE 0x800L /* use 2 kB Memory Base */
+#define PCI_MEMBASE_BITS 0xfffff800L /* Bit 31..11: Memory Base Address */
+#define PCI_MEMSIZE_BIIS 0x000007f0L /* Bit 10..4: Memory Size Req. */
+#define PCI_PREFEN 0x00000008L /* Bit 3: Prefetchable */
+#define PCI_MEM_TYP 0x00000006L /* Bit 2..1: Memory Type */
+#define PCI_MEM32BIT (0<<1) /* Base addr anywhere in 32 Bit range */
+#define PCI_MEM1M (1<<1) /* Base addr below 1 MegaByte */
+#define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */
+#define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */
+
+/* PCI_BASE_2ND 32 bit 2nd Base address */
+#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */
+#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */
+#define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */
+
+/* PCI_SUB_VID 16 bit Subsystem Vendor ID */
+/* PCI_SUB_ID 16 bit Subsystem ID */
+
+/* PCI_BASE_ROM 32 bit Expansion ROM Base Address */
+#define PCI_ROMBASE 0xfffe0000L /* Bit 31..17: ROM BASE address (1st) */
+#define PCI_ROMBASZ 0x0001c000L /* Bit 16..14: Treat as BASE or SIZE */
+#define PCI_ROMSIZE 0x00003800L /* Bit 13..11: ROM Size Requirements */
+#define PCI_ROMEN 0x00000001L /* Bit 0: Address Decode enable */
+
+/* PCI_CAP_PTR 8 bit New Capabilities Pointers */
+/* PCI_IRQ_LINE 8 bit Interrupt Line */
+/* PCI_IRQ_PIN 8 bit Interrupt Pin */
+/* PCI_MIN_GNT 8 bit Min_Gnt */
+/* PCI_MAX_LAT 8 bit Max_Lat */
+/* Device Dependent Region */
+/* PCI_OUR_REG (DV) 32 bit Our Register */
+/* PCI_OUR_REG_1 (ML) 32 bit Our Register 1 */
+ /* Bit 31..29: reserved */
+#define PCI_PATCH_DIR (3L<<27) /*(DV) Bit 28..27: Ext Patchs direction */
+#define PCI_PATCH_DIR_0 (1L<<27) /*(DV) Type of the pins EXT_PATCHS<1..0> */
+#define PCI_PATCH_DIR_1 (1L<<28) /* 0 = input */
+ /* 1 = output */
+#define PCI_EXT_PATCHS (3L<<25) /*(DV) Bit 26..25: Extended Patches */
+#define PCI_EXT_PATCH_0 (1L<<25) /*(DV) */
+#define PCI_EXT_PATCH_1 (1L<<26) /* CLK for MicroWire (ML) */
+#define PCI_VIO (1L<<25) /*(ML) */
+#define PCI_EN_BOOT (1L<<24) /* Bit 24: Enable BOOT via ROM */
+ /* 1 = Don't boot with ROM */
+ /* 0 = Boot with ROM */
+#define PCI_EN_IO (1L<<23) /* Bit 23: Mapping to IO space */
+#define PCI_EN_FPROM (1L<<22) /* Bit 22: FLASH mapped to mem? */
+ /* 1 = Map Flash to Memory */
+ /* 0 = Disable all addr. decoding */
+#define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */
+#define PCI_PAGE_16 (0L<<20) /* 16 k pages */
+#define PCI_PAGE_32K (1L<<20) /* 32 k pages */
+#define PCI_PAGE_64K (2L<<20) /* 64 k pages */
+#define PCI_PAGE_128K (3L<<20) /* 128 k pages */
+ /* Bit 19: reserved (ML) and (DV) */
+#define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */
+ /* Bit 15: reserved */
+#define PCI_FORCE_BE (1L<<14) /* Bit 14: Assert all BEs on MR */
+#define PCI_DIS_MRL (1L<<13) /* Bit 13: Disable Mem R Line */
+#define PCI_DIS_MRM (1L<<12) /* Bit 12: Disable Mem R multip */
+#define PCI_DIS_MWI (1L<<11) /* Bit 11: Disable Mem W & inv */
+#define PCI_DISC_CLS (1L<<10) /* Bit 10: Disc: cacheLsz bound */
+#define PCI_BURST_DIS (1L<<9) /* Bit 9: Burst Disable */
+#define PCI_BYTE_SWAP (1L<<8) /*(DV) Bit 8: Byte Swap in DATA */
+#define PCI_SKEW_DAS (0xfL<<4) /* Bit 7..4: Skew Ctrl, DAS Ext */
+#define PCI_SKEW_BASE (0xfL<<0) /* Bit 3..0: Skew Ctrl, Base */
+
+/* PCI_OUR_REG_2 (ML) 32 bit Our Register 2 (Monalisa only) */
+#define PCI_VPD_WR_TH (0xffL<<24) /* Bit 24..31 VPD Write Threshold */
+#define PCI_DEV_SEL (0x7fL<<17) /* Bit 17..23 EEPROM Device Select */
+#define PCI_VPD_ROM_SZ (7L<<14) /* Bit 14..16 VPD ROM Size */
+ /* Bit 12..13 reserved */
+#define PCI_PATCH_DIR2 (0xfL<<8) /* Bit 8..11 Ext Patchs dir 2..5 */
+#define PCI_PATCH_DIR_2 (1L<<8) /* Bit 8 CS for MicroWire */
+#define PCI_PATCH_DIR_3 (1L<<9)
+#define PCI_PATCH_DIR_4 (1L<<10)
+#define PCI_PATCH_DIR_5 (1L<<11)
+#define PCI_EXT_PATCHS2 (0xfL<<4) /* Bit 4..7 Extended Patches */
+#define PCI_EXT_PATCH_2 (1L<<4) /* Bit 4 CS for MicroWire */
+#define PCI_EXT_PATCH_3 (1L<<5)
+#define PCI_EXT_PATCH_4 (1L<<6)
+#define PCI_EXT_PATCH_5 (1L<<7)
+#define PCI_EN_DUMMY_RD (1L<<3) /* Bit 3 Enable Dummy Read */
+#define PCI_REV_DESC (1L<<2) /* Bit 2 Reverse Desc. Bytes */
+#define PCI_USEADDR64 (1L<<1) /* Bit 1 Use 64 Bit Addresse */
+#define PCI_USEDATA64 (1L<<0) /* Bit 0 Use 64 Bit Data bus ext*/
+
+/* Power Management Region */
+/* PCI_PM_CAP_ID 8 bit (ML) Power Management Cap. ID */
+/* PCI_PM_NITEM 8 bit (ML) Next Item Ptr */
+/* PCI_PM_CAP_REG 16 bit (ML) Power Management Capabilities*/
+#define PCI_PME_SUP (0x1f<<11) /* Bit 11..15 PM Manag. Event Support*/
+#define PCI_PM_D2_SUB (1<<10) /* Bit 10 D2 Support Bit */
+#define PCI_PM_D1_SUB (1<<9) /* Bit 9 D1 Support Bit */
+ /* Bit 6..8 reserved */
+#define PCI_PM_DSI (1<<5) /* Bit 5 Device Specific Init.*/
+#define PCI_PM_APS (1<<4) /* Bit 4 Auxialiary Power Src */
+#define PCI_PME_CLOCK (1<<3) /* Bit 3 PM Event Clock */
+#define PCI_PM_VER (7<<0) /* Bit 0..2 PM PCI Spec. version */
+
+/* PCI_PM_CTL_STS 16 bit (ML) Power Manag. Control/Status */
+#define PCI_PME_STATUS (1<<15) /* Bit 15 PFA doesn't sup. PME#*/
+#define PCI_PM_DAT_SCL (3<<13) /* Bit 13..14 dat reg Scaling factor */
+#define PCI_PM_DAT_SEL (0xf<<9) /* Bit 9..12 PM data selector field */
+ /* Bit 7.. 2 reserved */
+#define PCI_PM_STATE (3<<0) /* Bit 0.. 1 Power Management State */
+#define PCI_PM_STATE_D0 (0<<0) /* D0: Operational (default) */
+#define PCI_PM_STATE_D1 (1<<0) /* D1: not supported */
+#define PCI_PM_STATE_D2 (2<<0) /* D2: not supported */
+#define PCI_PM_STATE_D3 (3<<0) /* D3: HOT, Power Down and Reset */
+
+/* PCI_PM_DAT_REG 8 bit (ML) Power Manag. Data Register */
+/* VPD Region */
+/* PCI_VPD_CAP_ID 8 bit (ML) VPD Cap. ID */
+/* PCI_VPD_NITEM 8 bit (ML) Next Item Ptr */
+/* PCI_VPD_ADR_REG 16 bit (ML) VPD Address Register */
+#define PCI_VPD_FLAG (1<<15) /* Bit 15 starts VPD rd/wd cycle*/
+
+/* PCI_VPD_DAT_REG 32 bit (ML) VPD Data Register */
+
+/*
+ * Control Register File:
+ * Bank 0
+ */
+#define B0_RAP 0x0000 /* 8 bit register address port */
+ /* 0x0001 - 0x0003: reserved */
+#define B0_CTRL 0x0004 /* 8 bit control register */
+#define B0_DAS 0x0005 /* 8 Bit control register (DAS) */
+#define B0_LED 0x0006 /* 8 Bit LED register */
+#define B0_TST_CTRL 0x0007 /* 8 bit test control register */
+#define B0_ISRC 0x0008 /* 32 bit Interrupt source register */
+#define B0_IMSK 0x000c /* 32 bit Interrupt mask register */
+
+/* 0x0010 - 0x006b: formac+ (supernet_3) fequently used registers */
+#define B0_CMDREG1 0x0010 /* write command reg 1 instruction */
+#define B0_CMDREG2 0x0014 /* write command reg 2 instruction */
+#define B0_ST1U 0x0010 /* read upper 16-bit of status reg 1 */
+#define B0_ST1L 0x0014 /* read lower 16-bit of status reg 1 */
+#define B0_ST2U 0x0018 /* read upper 16-bit of status reg 2 */
+#define B0_ST2L 0x001c /* read lower 16-bit of status reg 2 */
+
+#define B0_MARR 0x0020 /* r/w the memory read addr register */
+#define B0_MARW 0x0024 /* r/w the memory write addr register*/
+#define B0_MDRU 0x0028 /* r/w upper 16-bit of mem. data reg */
+#define B0_MDRL 0x002c /* r/w lower 16-bit of mem. data reg */
+
+#define B0_MDREG3 0x0030 /* r/w Mode Register 3 */
+#define B0_ST3U 0x0034 /* read upper 16-bit of status reg 3 */
+#define B0_ST3L 0x0038 /* read lower 16-bit of status reg 3 */
+#define B0_IMSK3U 0x003c /* r/w upper 16-bit of IMSK reg 3 */
+#define B0_IMSK3L 0x0040 /* r/w lower 16-bit of IMSK reg 3 */
+#define B0_IVR 0x0044 /* read Interrupt Vector register */
+#define B0_IMR 0x0048 /* r/w Interrupt mask register */
+/* 0x4c Hidden */
+
+#define B0_CNTRL_A 0x0050 /* control register A (r/w) */
+#define B0_CNTRL_B 0x0054 /* control register B (r/w) */
+#define B0_INTR_MASK 0x0058 /* interrupt mask (r/w) */
+#define B0_XMIT_VECTOR 0x005c /* transmit vector register (r/w) */
+
+#define B0_STATUS_A 0x0060 /* status register A (read only) */
+#define B0_STATUS_B 0x0064 /* status register B (read only) */
+#define B0_CNTRL_C 0x0068 /* control register C (r/w) */
+#define B0_MDREG1 0x006c /* r/w Mode Register 1 */
+
+#define B0_R1_CSR 0x0070 /* 32 bit BMU control/status reg (rec q 1) */
+#define B0_R2_CSR 0x0074 /* 32 bit BMU control/status reg (rec q 2)(DV)*/
+#define B0_XA_CSR 0x0078 /* 32 bit BMU control/status reg (a xmit q) */
+#define B0_XS_CSR 0x007c /* 32 bit BMU control/status reg (s xmit q) */
+
+/*
+ * Bank 1
+ * - completely empty (this is the RAP Block window)
+ * Note: if RAP = 1 this page is reserved
+ */
+
+/*
+ * Bank 2
+ */
+#define B2_MAC_0 0x0100 /* 8 bit MAC address Byte 0 */
+#define B2_MAC_1 0x0101 /* 8 bit MAC address Byte 1 */
+#define B2_MAC_2 0x0102 /* 8 bit MAC address Byte 2 */
+#define B2_MAC_3 0x0103 /* 8 bit MAC address Byte 3 */
+#define B2_MAC_4 0x0104 /* 8 bit MAC address Byte 4 */
+#define B2_MAC_5 0x0105 /* 8 bit MAC address Byte 5 */
+#define B2_MAC_6 0x0106 /* 8 bit MAC address Byte 6 (== 0) (DV) */
+#define B2_MAC_7 0x0107 /* 8 bit MAC address Byte 7 (== 0) (DV) */
+
+#define B2_CONN_TYP 0x0108 /* 8 bit Connector type */
+#define B2_PMD_TYP 0x0109 /* 8 bit PMD type */
+ /* 0x010a - 0x010b: reserved */
+ /* Eprom registers are currently of no use */
+#define B2_E_0 0x010c /* 8 bit EPROM Byte 0 */
+#define B2_E_1 0x010d /* 8 bit EPROM Byte 1 */
+#define B2_E_2 0x010e /* 8 bit EPROM Byte 2 */
+#define B2_E_3 0x010f /* 8 bit EPROM Byte 3 */
+#define B2_FAR 0x0110 /* 32 bit Flash-Prom Address Register/Counter */
+#define B2_FDP 0x0114 /* 8 bit Flash-Prom Data Port */
+ /* 0x0115 - 0x0117: reserved */
+#define B2_LD_CRTL 0x0118 /* 8 bit loader control */
+#define B2_LD_TEST 0x0119 /* 8 bit loader test */
+ /* 0x011a - 0x011f: reserved */
+#define B2_TI_INI 0x0120 /* 32 bit Timer init value */
+#define B2_TI_VAL 0x0124 /* 32 bit Timer value */
+#define B2_TI_CRTL 0x0128 /* 8 bit Timer control */
+#define B2_TI_TEST 0x0129 /* 8 Bit Timer Test */
+ /* 0x012a - 0x012f: reserved */
+#define B2_WDOG_INI 0x0130 /* 32 bit Watchdog init value */
+#define B2_WDOG_VAL 0x0134 /* 32 bit Watchdog value */
+#define B2_WDOG_CRTL 0x0138 /* 8 bit Watchdog control */
+#define B2_WDOG_TEST 0x0139 /* 8 Bit Watchdog Test */
+ /* 0x013a - 0x013f: reserved */
+#define B2_RTM_INI 0x0140 /* 32 bit RTM init value */
+#define B2_RTM_VAL 0x0144 /* 32 bit RTM value */
+#define B2_RTM_CRTL 0x0148 /* 8 bit RTM control */
+#define B2_RTM_TEST 0x0149 /* 8 Bit RTM Test */
+
+#define B2_TOK_COUNT 0x014c /* (ML) 32 bit Token Counter */
+#define B2_DESC_ADDR_H 0x0150 /* (ML) 32 bit Desciptor Base Addr Reg High */
+#define B2_CTRL_2 0x0154 /* (ML) 8 bit Control Register 2 */
+#define B2_IFACE_REG 0x0155 /* (ML) 8 bit Interface Register */
+ /* 0x0156: reserved */
+#define B2_TST_CTRL_2 0x0157 /* (ML) 8 bit Test Control Register 2 */
+#define B2_I2C_CTRL 0x0158 /* (ML) 32 bit I2C Control Register */
+#define B2_I2C_DATA 0x015c /* (ML) 32 bit I2C Data Register */
+
+#define B2_IRQ_MOD_INI 0x0160 /* (ML) 32 bit IRQ Moderation Timer Init Reg. */
+#define B2_IRQ_MOD_VAL 0x0164 /* (ML) 32 bit IRQ Moderation Timer Value */
+#define B2_IRQ_MOD_CTRL 0x0168 /* (ML) 8 bit IRQ Moderation Timer Control */
+#define B2_IRQ_MOD_TEST 0x0169 /* (ML) 8 bit IRQ Moderation Timer Test */
+ /* 0x016a - 0x017f: reserved */
+
+/*
+ * Bank 3
+ */
+/*
+ * This is a copy of the Configuration register file (lower half)
+ */
+#define B3_CFG_SPC 0x180
+
+/*
+ * Bank 4
+ */
+#define B4_R1_D 0x0200 /* 4*32 bit current receive Descriptor */
+#define B4_R1_DA 0x0210 /* 32 bit current rec desc address */
+#define B4_R1_AC 0x0214 /* 32 bit current receive Address Count */
+#define B4_R1_BC 0x0218 /* 32 bit current receive Byte Counter */
+#define B4_R1_CSR 0x021c /* 32 bit BMU Control/Status Register */
+#define B4_R1_F 0x0220 /* 32 bit flag register */
+#define B4_R1_T1 0x0224 /* 32 bit Test Register 1 */
+#define B4_R1_T1_TR 0x0224 /* 8 bit Test Register 1 TR */
+#define B4_R1_T1_WR 0x0225 /* 8 bit Test Register 1 WR */
+#define B4_R1_T1_RD 0x0226 /* 8 bit Test Register 1 RD */
+#define B4_R1_T1_SV 0x0227 /* 8 bit Test Register 1 SV */
+#define B4_R1_T2 0x0228 /* 32 bit Test Register 2 */
+#define B4_R1_T3 0x022c /* 32 bit Test Register 3 */
+#define B4_R1_DA_H 0x0230 /* (ML) 32 bit Curr Rx Desc Address High */
+#define B4_R1_AC_H 0x0234 /* (ML) 32 bit Curr Addr Counter High dword */
+ /* 0x0238 - 0x023f: reserved */
+ /* Receive queue 2 is removed on Monalisa */
+#define B4_R2_D 0x0240 /* 4*32 bit current receive Descriptor (q2) */
+#define B4_R2_DA 0x0250 /* 32 bit current rec desc address (q2) */
+#define B4_R2_AC 0x0254 /* 32 bit current receive Address Count (q2) */
+#define B4_R2_BC 0x0258 /* 32 bit current receive Byte Counter (q2) */
+#define B4_R2_CSR 0x025c /* 32 bit BMU Control/Status Register (q2) */
+#define B4_R2_F 0x0260 /* 32 bit flag register (q2) */
+#define B4_R2_T1 0x0264 /* 32 bit Test Register 1 (q2) */
+#define B4_R2_T1_TR 0x0264 /* 8 bit Test Register 1 TR (q2) */
+#define B4_R2_T1_WR 0x0265 /* 8 bit Test Register 1 WR (q2) */
+#define B4_R2_T1_RD 0x0266 /* 8 bit Test Register 1 RD (q2) */
+#define B4_R2_T1_SV 0x0267 /* 8 bit Test Register 1 SV (q2) */
+#define B4_R2_T2 0x0268 /* 32 bit Test Register 2 (q2) */
+#define B4_R2_T3 0x026c /* 32 bit Test Register 3 (q2) */
+ /* 0x0270 - 0x027c: reserved */
+
+/*
+ * Bank 5
+ */
+#define B5_XA_D 0x0280 /* 4*32 bit current transmit Descriptor (xa) */
+#define B5_XA_DA 0x0290 /* 32 bit current tx desc address (xa) */
+#define B5_XA_AC 0x0294 /* 32 bit current tx Address Count (xa) */
+#define B5_XA_BC 0x0298 /* 32 bit current tx Byte Counter (xa) */
+#define B5_XA_CSR 0x029c /* 32 bit BMU Control/Status Register (xa) */
+#define B5_XA_F 0x02a0 /* 32 bit flag register (xa) */
+#define B5_XA_T1 0x02a4 /* 32 bit Test Register 1 (xa) */
+#define B5_XA_T1_TR 0x02a4 /* 8 bit Test Register 1 TR (xa) */
+#define B5_XA_T1_WR 0x02a5 /* 8 bit Test Register 1 WR (xa) */
+#define B5_XA_T1_RD 0x02a6 /* 8 bit Test Register 1 RD (xa) */
+#define B5_XA_T1_SV 0x02a7 /* 8 bit Test Register 1 SV (xa) */
+#define B5_XA_T2 0x02a8 /* 32 bit Test Register 2 (xa) */
+#define B5_XA_T3 0x02ac /* 32 bit Test Register 3 (xa) */
+#define B5_XA_DA_H 0x02b0 /* (ML) 32 bit Curr Tx Desc Address High */
+#define B5_XA_AC_H 0x02b4 /* (ML) 32 bit Curr Addr Counter High dword */
+ /* 0x02b8 - 0x02bc: reserved */
+#define B5_XS_D 0x02c0 /* 4*32 bit current transmit Descriptor (xs) */
+#define B5_XS_DA 0x02d0 /* 32 bit current tx desc address (xs) */
+#define B5_XS_AC 0x02d4 /* 32 bit current transmit Address Count(xs) */
+#define B5_XS_BC 0x02d8 /* 32 bit current transmit Byte Counter (xs) */
+#define B5_XS_CSR 0x02dc /* 32 bit BMU Control/Status Register (xs) */
+#define B5_XS_F 0x02e0 /* 32 bit flag register (xs) */
+#define B5_XS_T1 0x02e4 /* 32 bit Test Register 1 (xs) */
+#define B5_XS_T1_TR 0x02e4 /* 8 bit Test Register 1 TR (xs) */
+#define B5_XS_T1_WR 0x02e5 /* 8 bit Test Register 1 WR (xs) */
+#define B5_XS_T1_RD 0x02e6 /* 8 bit Test Register 1 RD (xs) */
+#define B5_XS_T1_SV 0x02e7 /* 8 bit Test Register 1 SV (xs) */
+#define B5_XS_T2 0x02e8 /* 32 bit Test Register 2 (xs) */
+#define B5_XS_T3 0x02ec /* 32 bit Test Register 3 (xs) */
+#define B5_XS_DA_H 0x02f0 /* (ML) 32 bit Curr Tx Desc Address High */
+#define B5_XS_AC_H 0x02f4 /* (ML) 32 bit Curr Addr Counter High dword */
+ /* 0x02f8 - 0x02fc: reserved */
+
+/*
+ * Bank 6
+ */
+/* External PLC-S registers (SN2 compatibility for DV) */
+/* External registers (ML) */
+#define B6_EXT_REG 0x300
+
+/*
+ * Bank 7
+ */
+/* DAS PLC-S Registers */
+
+/*
+ * Bank 8 - 15
+ */
+/* IFCP registers */
+
+/*---------------------------------------------------------------------------*/
+/* Definitions of the Bits in the registers */
+
+/* B0_RAP 16 bit register address port */
+#define RAP_RAP 0x0f /* Bit 3..0: 0 = block0, .., f = block15 */
+
+/* B0_CTRL 8 bit control register */
+#define CTRL_FDDI_CLR (1<<7) /* Bit 7: (ML) Clear FDDI Reset */
+#define CTRL_FDDI_SET (1<<6) /* Bit 6: (ML) Set FDDI Reset */
+#define CTRL_HPI_CLR (1<<5) /* Bit 5: Clear HPI SM reset */
+#define CTRL_HPI_SET (1<<4) /* Bit 4: Set HPI SM reset */
+#define CTRL_MRST_CLR (1<<3) /* Bit 3: Clear Master reset */
+#define CTRL_MRST_SET (1<<2) /* Bit 2: Set Master reset */
+#define CTRL_RST_CLR (1<<1) /* Bit 1: Clear Software reset */
+#define CTRL_RST_SET (1<<0) /* Bit 0: Set Software reset */
+
+/* B0_DAS 8 Bit control register (DAS) */
+#define BUS_CLOCK (1<<7) /* Bit 7: (ML) Bus Clock 0/1 = 33/66MHz */
+#define BUS_SLOT_SZ (1<<6) /* Bit 6: (ML) Slot Size 0/1 = 32/64 bit slot*/
+ /* Bit 5..4: reserved */
+#define DAS_AVAIL (1<<3) /* Bit 3: 1 = DAS, 0 = SAS */
+#define DAS_BYP_ST (1<<2) /* Bit 2: 1 = avail,SAS, 0 = not avail */
+#define DAS_BYP_INS (1<<1) /* Bit 1: 1 = insert Bypass */
+#define DAS_BYP_RMV (1<<0) /* Bit 0: 1 = remove Bypass */
+
+/* B0_LED 8 Bit LED register */
+ /* Bit 7..6: reserved */
+#define LED_2_ON (1<<5) /* Bit 5: 1 = switch LED_2 on (left,gn)*/
+#define LED_2_OFF (1<<4) /* Bit 4: 1 = switch LED_2 off */
+#define LED_1_ON (1<<3) /* Bit 3: 1 = switch LED_1 on (mid,yel)*/
+#define LED_1_OFF (1<<2) /* Bit 2: 1 = switch LED_1 off */
+#define LED_0_ON (1<<1) /* Bit 1: 1 = switch LED_0 on (rght,gn)*/
+#define LED_0_OFF (1<<0) /* Bit 0: 1 = switch LED_0 off */
+/* This hardware defines are very ugly therefore we define some others */
+
+#define LED_GA_ON LED_2_ON /* S port = A port */
+#define LED_GA_OFF LED_2_OFF /* S port = A port */
+#define LED_MY_ON LED_1_ON
+#define LED_MY_OFF LED_1_OFF
+#define LED_GB_ON LED_0_ON
+#define LED_GB_OFF LED_0_OFF
+
+/* B0_TST_CTRL 8 bit test control register */
+#define TST_FRC_DPERR_MR (1<<7) /* Bit 7: force DATAPERR on MST RE. */
+#define TST_FRC_DPERR_MW (1<<6) /* Bit 6: force DATAPERR on MST WR. */
+#define TST_FRC_DPERR_TR (1<<5) /* Bit 5: force DATAPERR on TRG RE. */
+#define TST_FRC_DPERR_TW (1<<4) /* Bit 4: force DATAPERR on TRG WR. */
+#define TST_FRC_APERR_M (1<<3) /* Bit 3: force ADDRPERR on MST */
+#define TST_FRC_APERR_T (1<<2) /* Bit 2: force ADDRPERR on TRG */
+#define TST_CFG_WRITE_ON (1<<1) /* Bit 1: ena configuration reg. WR */
+#define TST_CFG_WRITE_OFF (1<<0) /* Bit 0: dis configuration reg. WR */
+
+/* B0_ISRC 32 bit Interrupt source register */
+ /* Bit 31..28: reserved */
+#define IS_I2C_READY (1L<<27) /* Bit 27: (ML) IRQ on end of I2C tx */
+#define IS_IRQ_SW (1L<<26) /* Bit 26: (ML) SW forced IRQ */
+#define IS_EXT_REG (1L<<25) /* Bit 25: (ML) IRQ from external reg*/
+#define IS_IRQ_STAT (1L<<24) /* Bit 24: IRQ status exception */
+ /* PERR, RMABORT, RTABORT DATAPERR */
+#define IS_IRQ_MST_ERR (1L<<23) /* Bit 23: IRQ master error */
+ /* RMABORT, RTABORT, DATAPERR */
+#define IS_TIMINT (1L<<22) /* Bit 22: IRQ_TIMER */
+#define IS_TOKEN (1L<<21) /* Bit 21: IRQ_RTM */
+/*
+ * Note: The DAS is our First Port (!=PA)
+ */
+#define IS_PLINT1 (1L<<20) /* Bit 20: IRQ_PHY_DAS */
+#define IS_PLINT2 (1L<<19) /* Bit 19: IRQ_IFCP_4 */
+#define IS_MINTR3 (1L<<18) /* Bit 18: IRQ_IFCP_3/IRQ_PHY */
+#define IS_MINTR2 (1L<<17) /* Bit 17: IRQ_IFCP_2/IRQ_MAC_2 */
+#define IS_MINTR1 (1L<<16) /* Bit 16: IRQ_IFCP_1/IRQ_MAC_1 */
+/* Receive Queue 1 */
+#define IS_R1_P (1L<<15) /* Bit 15: Parity Error (q1) */
+#define IS_R1_B (1L<<14) /* Bit 14: End of Buffer (q1) */
+#define IS_R1_F (1L<<13) /* Bit 13: End of Frame (q1) */
+#define IS_R1_C (1L<<12) /* Bit 12: Encoding Error (q1) */
+/* Receive Queue 2 */
+#define IS_R2_P (1L<<11) /* Bit 11: (DV) Parity Error (q2) */
+#define IS_R2_B (1L<<10) /* Bit 10: (DV) End of Buffer (q2) */
+#define IS_R2_F (1L<<9) /* Bit 9: (DV) End of Frame (q2) */
+#define IS_R2_C (1L<<8) /* Bit 8: (DV) Encoding Error (q2) */
+/* Asynchronous Transmit queue */
+ /* Bit 7: reserved */
+#define IS_XA_B (1L<<6) /* Bit 6: End of Buffer (xa) */
+#define IS_XA_F (1L<<5) /* Bit 5: End of Frame (xa) */
+#define IS_XA_C (1L<<4) /* Bit 4: Encoding Error (xa) */
+/* Synchronous Transmit queue */
+ /* Bit 3: reserved */
+#define IS_XS_B (1L<<2) /* Bit 2: End of Buffer (xs) */
+#define IS_XS_F (1L<<1) /* Bit 1: End of Frame (xs) */
+#define IS_XS_C (1L<<0) /* Bit 0: Encoding Error (xs) */
+
+/*
+ * Define all valid interrupt source Bits from GET_ISR ()
+ */
+#define ALL_IRSR 0x01ffff77L /* (DV) */
+#define ALL_IRSR_ML 0x0ffff077L /* (ML) */
+
+
+/* B0_IMSK 32 bit Interrupt mask register */
+/*
+ * The Bit definnition of this register are the same as of the interrupt
+ * source register. These definition are directly derived from the Hardware
+ * spec.
+ */
+ /* Bit 31..28: reserved */
+#define IRQ_I2C_READY (1L<<27) /* Bit 27: (ML) IRQ on end of I2C tx */
+#define IRQ_SW (1L<<26) /* Bit 26: (ML) SW forced IRQ */
+#define IRQ_EXT_REG (1L<<25) /* Bit 25: (ML) IRQ from external reg*/
+#define IRQ_STAT (1L<<24) /* Bit 24: IRQ status exception */
+ /* PERR, RMABORT, RTABORT DATAPERR */
+#define IRQ_MST_ERR (1L<<23) /* Bit 23: IRQ master error */
+ /* RMABORT, RTABORT, DATAPERR */
+#define IRQ_TIMER (1L<<22) /* Bit 22: IRQ_TIMER */
+#define IRQ_RTM (1L<<21) /* Bit 21: IRQ_RTM */
+#define IRQ_DAS (1L<<20) /* Bit 20: IRQ_PHY_DAS */
+#define IRQ_IFCP_4 (1L<<19) /* Bit 19: IRQ_IFCP_4 */
+#define IRQ_IFCP_3 (1L<<18) /* Bit 18: IRQ_IFCP_3/IRQ_PHY */
+#define IRQ_IFCP_2 (1L<<17) /* Bit 17: IRQ_IFCP_2/IRQ_MAC_2 */
+#define IRQ_IFCP_1 (1L<<16) /* Bit 16: IRQ_IFCP_1/IRQ_MAC_1 */
+/* Receive Queue 1 */
+#define IRQ_R1_P (1L<<15) /* Bit 15: Parity Error (q1) */
+#define IRQ_R1_B (1L<<14) /* Bit 14: End of Buffer (q1) */
+#define IRQ_R1_F (1L<<13) /* Bit 13: End of Frame (q1) */
+#define IRQ_R1_C (1L<<12) /* Bit 12: Encoding Error (q1) */
+/* Receive Queue 2 */
+#define IRQ_R2_P (1L<<11) /* Bit 11: (DV) Parity Error (q2) */
+#define IRQ_R2_B (1L<<10) /* Bit 10: (DV) End of Buffer (q2) */
+#define IRQ_R2_F (1L<<9) /* Bit 9: (DV) End of Frame (q2) */
+#define IRQ_R2_C (1L<<8) /* Bit 8: (DV) Encoding Error (q2) */
+/* Asynchronous Transmit queue */
+ /* Bit 7: reserved */
+#define IRQ_XA_B (1L<<6) /* Bit 6: End of Buffer (xa) */
+#define IRQ_XA_F (1L<<5) /* Bit 5: End of Frame (xa) */
+#define IRQ_XA_C (1L<<4) /* Bit 4: Encoding Error (xa) */
+/* Synchronous Transmit queue */
+ /* Bit 3: reserved */
+#define IRQ_XS_B (1L<<2) /* Bit 2: End of Buffer (xs) */
+#define IRQ_XS_F (1L<<1) /* Bit 1: End of Frame (xs) */
+#define IRQ_XS_C (1L<<0) /* Bit 0: Encoding Error (xs) */
+
+/* 0x0010 - 0x006b: formac+ (supernet_3) fequently used registers */
+/* B0_R1_CSR 32 bit BMU control/status reg (rec q 1 ) */
+/* B0_R2_CSR 32 bit BMU control/status reg (rec q 2 ) */
+/* B0_XA_CSR 32 bit BMU control/status reg (a xmit q ) */
+/* B0_XS_CSR 32 bit BMU control/status reg (s xmit q ) */
+/* The registers are the same as B4_R1_CSR, B4_R2_CSR, B5_Xa_CSR, B5_XS_CSR */
+
+/* B2_MAC_0 8 bit MAC address Byte 0 */
+/* B2_MAC_1 8 bit MAC address Byte 1 */
+/* B2_MAC_2 8 bit MAC address Byte 2 */
+/* B2_MAC_3 8 bit MAC address Byte 3 */
+/* B2_MAC_4 8 bit MAC address Byte 4 */
+/* B2_MAC_5 8 bit MAC address Byte 5 */
+/* B2_MAC_6 8 bit MAC address Byte 6 (== 0) (DV) */
+/* B2_MAC_7 8 bit MAC address Byte 7 (== 0) (DV) */
+
+/* B2_CONN_TYP 8 bit Connector type */
+/* B2_PMD_TYP 8 bit PMD type */
+/* Values of connector and PMD type comply to SysKonnect internal std */
+
+/* The EPROM register are currently of no use */
+/* B2_E_0 8 bit EPROM Byte 0 */
+/* B2_E_1 8 bit EPROM Byte 1 */
+/* B2_E_2 8 bit EPROM Byte 2 */
+/* B2_E_3 8 bit EPROM Byte 3 */
+
+/* B2_FAR 32 bit Flash-Prom Address Register/Counter */
+#define FAR_ADDR 0x1ffffL /* Bit 16..0: FPROM Address mask */
+
+/* B2_FDP 8 bit Flash-Prom Data Port */
+
+/* B2_LD_CRTL 8 bit loader control */
+/* Bits are currently reserved */
+
+/* B2_LD_TEST 8 bit loader test */
+#define LD_T_ON (1<<3) /* Bit 3: Loader Testmode on */
+#define LD_T_OFF (1<<2) /* Bit 2: Loader Testmode off */
+#define LD_T_STEP (1<<1) /* Bit 1: Decrement FPROM addr. Counter */
+#define LD_START (1<<0) /* Bit 0: Start loading FPROM */
+
+/* B2_TI_INI 32 bit Timer init value */
+/* B2_TI_VAL 32 bit Timer value */
+/* B2_TI_CRTL 8 bit Timer control */
+/* B2_TI_TEST 8 Bit Timer Test */
+/* B2_WDOG_INI 32 bit Watchdog init value */
+/* B2_WDOG_VAL 32 bit Watchdog value */
+/* B2_WDOG_CRTL 8 bit Watchdog control */
+/* B2_WDOG_TEST 8 Bit Watchdog Test */
+/* B2_RTM_INI 32 bit RTM init value */
+/* B2_RTM_VAL 32 bit RTM value */
+/* B2_RTM_CRTL 8 bit RTM control */
+/* B2_RTM_TEST 8 Bit RTM Test */
+/* B2_<TIM>_CRTL 8 bit <TIM> control */
+/* B2_IRQ_MOD_INI 32 bit IRQ Moderation Timer Init Reg. (ML) */
+/* B2_IRQ_MOD_VAL 32 bit IRQ Moderation Timer Value (ML) */
+/* B2_IRQ_MOD_CTRL 8 bit IRQ Moderation Timer Control (ML) */
+/* B2_IRQ_MOD_TEST 8 bit IRQ Moderation Timer Test (ML) */
+#define GET_TOK_CT (1<<4) /* Bit 4: Get the Token Counter (RTM) */
+#define TIM_RES_TOK (1<<3) /* Bit 3: RTM Status: 1 == restricted */
+#define TIM_ALARM (1<<3) /* Bit 3: Timer Alarm (WDOG) */
+#define TIM_START (1<<2) /* Bit 2: Start Timer (TI,WDOG,RTM,IRQ_MOD)*/
+#define TIM_STOP (1<<1) /* Bit 1: Stop Timer (TI,WDOG,RTM,IRQ_MOD) */
+#define TIM_CL_IRQ (1<<0) /* Bit 0: Clear Timer IRQ (TI,WDOG,RTM) */
+/* B2_<TIM>_TEST 8 Bit <TIM> Test */
+#define TIM_T_ON (1<<2) /* Bit 2: Test mode on (TI,WDOG,RTM,IRQ_MOD) */
+#define TIM_T_OFF (1<<1) /* Bit 1: Test mode off (TI,WDOG,RTM,IRQ_MOD) */
+#define TIM_T_STEP (1<<0) /* Bit 0: Test step (TI,WDOG,RTM,IRQ_MOD) */
+
+/* B2_TOK_COUNT 0x014c (ML) 32 bit Token Counter */
+/* B2_DESC_ADDR_H 0x0150 (ML) 32 bit Desciptor Base Addr Reg High */
+/* B2_CTRL_2 0x0154 (ML) 8 bit Control Register 2 */
+ /* Bit 7..5: reserved */
+#define CTRL_CL_I2C_IRQ (1<<4) /* Bit 4: Clear I2C IRQ */
+#define CTRL_ST_SW_IRQ (1<<3) /* Bit 3: Set IRQ SW Request */
+#define CTRL_CL_SW_IRQ (1<<2) /* Bit 2: Clear IRQ SW Request */
+#define CTRL_STOP_DONE (1<<1) /* Bit 1: Stop Master is finished */
+#define CTRL_STOP_MAST (1<<0) /* Bit 0: Command Bit to stop the master*/
+
+/* B2_IFACE_REG 0x0155 (ML) 8 bit Interface Register */
+ /* Bit 7..3: reserved */
+#define IF_I2C_DATA_DIR (1<<2) /* Bit 2: direction of IF_I2C_DATA*/
+#define IF_I2C_DATA (1<<1) /* Bit 1: I2C Data Port */
+#define IF_I2C_CLK (1<<0) /* Bit 0: I2C Clock Port */
+
+ /* 0x0156: reserved */
+/* B2_TST_CTRL_2 0x0157 (ML) 8 bit Test Control Register 2 */
+ /* Bit 7..4: reserved */
+ /* force the following error on */
+ /* the next master read/write */
+#define TST_FRC_DPERR_MR64 (1<<3) /* Bit 3: DataPERR RD 64 */
+#define TST_FRC_DPERR_MW64 (1<<2) /* Bit 2: DataPERR WR 64 */
+#define TST_FRC_APERR_1M64 (1<<1) /* Bit 1: AddrPERR on 1. phase */
+#define TST_FRC_APERR_2M64 (1<<0) /* Bit 0: AddrPERR on 2. phase */
+
+/* B2_I2C_CTRL 0x0158 (ML) 32 bit I2C Control Register */
+#define I2C_FLAG (1L<<31) /* Bit 31: Start read/write if WR */
+#define I2C_ADDR (0x7fffL<<16) /* Bit 30..16: Addr to be read/written*/
+#define I2C_DEV_SEL (0x7fL<<9) /* Bit 9..15: I2C Device Select */
+ /* Bit 5.. 8: reserved */
+#define I2C_BURST_LEN (1L<<4) /* Bit 4 Burst Len, 1/4 bytes */
+#define I2C_DEV_SIZE (7L<<1) /* Bit 1.. 3: I2C Device Size */
+#define I2C_025K_DEV (0L<<1) /* 0: 256 Bytes or smaller*/
+#define I2C_05K_DEV (1L<<1) /* 1: 512 Bytes */
+#define I2C_1K_DEV (2L<<1) /* 2: 1024 Bytes */
+#define I2C_2K_DEV (3L<<1) /* 3: 2048 Bytes */
+#define I2C_4K_DEV (4L<<1) /* 4: 4096 Bytes */
+#define I2C_8K_DEV (5L<<1) /* 5: 8192 Bytes */
+#define I2C_16K_DEV (6L<<1) /* 6: 16384 Bytes */
+#define I2C_32K_DEV (7L<<1) /* 7: 32768 Bytes */
+#define I2C_STOP_BIT (1<<0) /* Bit 0: Interrupt I2C transfer */
+
+/*
+ * I2C Addresses
+ *
+ * The temperature sensor and the voltage sensor are on the same I2C bus.
+ * Note: The voltage sensor (Micorwire) will be selected by PCI_EXT_PATCH_1
+ * in PCI_OUR_REG 1.
+ */
+#define I2C_ADDR_TEMP 0x90 /* I2C Address Temperature Sensor */
+
+/* B2_I2C_DATA 0x015c (ML) 32 bit I2C Data Register */
+
+/* B4_R1_D 4*32 bit current receive Descriptor (q1) */
+/* B4_R1_DA 32 bit current rec desc address (q1) */
+/* B4_R1_AC 32 bit current receive Address Count (q1) */
+/* B4_R1_BC 32 bit current receive Byte Counter (q1) */
+/* B4_R1_CSR 32 bit BMU Control/Status Register (q1) */
+/* B4_R1_F 32 bit flag register (q1) */
+/* B4_R1_T1 32 bit Test Register 1 (q1) */
+/* B4_R1_T2 32 bit Test Register 2 (q1) */
+/* B4_R1_T3 32 bit Test Register 3 (q1) */
+/* B4_R2_D 4*32 bit current receive Descriptor (q2) */
+/* B4_R2_DA 32 bit current rec desc address (q2) */
+/* B4_R2_AC 32 bit current receive Address Count (q2) */
+/* B4_R2_BC 32 bit current receive Byte Counter (q2) */
+/* B4_R2_CSR 32 bit BMU Control/Status Register (q2) */
+/* B4_R2_F 32 bit flag register (q2) */
+/* B4_R2_T1 32 bit Test Register 1 (q2) */
+/* B4_R2_T2 32 bit Test Register 2 (q2) */
+/* B4_R2_T3 32 bit Test Register 3 (q2) */
+/* B5_XA_D 4*32 bit current receive Descriptor (xa) */
+/* B5_XA_DA 32 bit current rec desc address (xa) */
+/* B5_XA_AC 32 bit current receive Address Count (xa) */
+/* B5_XA_BC 32 bit current receive Byte Counter (xa) */
+/* B5_XA_CSR 32 bit BMU Control/Status Register (xa) */
+/* B5_XA_F 32 bit flag register (xa) */
+/* B5_XA_T1 32 bit Test Register 1 (xa) */
+/* B5_XA_T2 32 bit Test Register 2 (xa) */
+/* B5_XA_T3 32 bit Test Register 3 (xa) */
+/* B5_XS_D 4*32 bit current receive Descriptor (xs) */
+/* B5_XS_DA 32 bit current rec desc address (xs) */
+/* B5_XS_AC 32 bit current receive Address Count (xs) */
+/* B5_XS_BC 32 bit current receive Byte Counter (xs) */
+/* B5_XS_CSR 32 bit BMU Control/Status Register (xs) */
+/* B5_XS_F 32 bit flag register (xs) */
+/* B5_XS_T1 32 bit Test Register 1 (xs) */
+/* B5_XS_T2 32 bit Test Register 2 (xs) */
+/* B5_XS_T3 32 bit Test Register 3 (xs) */
+/* B5_<xx>_CSR 32 bit BMU Control/Status Register (xx) */
+#define CSR_DESC_CLEAR (1L<<21) /* Bit 21: Clear Reset for Descr */
+#define CSR_DESC_SET (1L<<20) /* Bit 20: Set Reset for Descr */
+#define CSR_FIFO_CLEAR (1L<<19) /* Bit 19: Clear Reset for FIFO */
+#define CSR_FIFO_SET (1L<<18) /* Bit 18: Set Reset for FIFO */
+#define CSR_HPI_RUN (1L<<17) /* Bit 17: Release HPI SM */
+#define CSR_HPI_RST (1L<<16) /* Bit 16: Reset HPI SM to Idle */
+#define CSR_SV_RUN (1L<<15) /* Bit 15: Release Supervisor SM */
+#define CSR_SV_RST (1L<<14) /* Bit 14: Reset Supervisor SM */
+#define CSR_DREAD_RUN (1L<<13) /* Bit 13: Release Descr Read SM */
+#define CSR_DREAD_RST (1L<<12) /* Bit 12: Reset Descr Read SM */
+#define CSR_DWRITE_RUN (1L<<11) /* Bit 11: Rel. Descr Write SM */
+#define CSR_DWRITE_RST (1L<<10) /* Bit 10: Reset Descr Write SM */
+#define CSR_TRANS_RUN (1L<<9) /* Bit 9: Release Transfer SM */
+#define CSR_TRANS_RST (1L<<8) /* Bit 8: Reset Transfer SM */
+ /* Bit 7..5: reserved */
+#define CSR_START (1L<<4) /* Bit 4: Start Rec/Xmit Queue */
+#define CSR_IRQ_CL_P (1L<<3) /* Bit 3: Clear Parity IRQ, Rcv */
+#define CSR_IRQ_CL_B (1L<<2) /* Bit 2: Clear EOB IRQ */
+#define CSR_IRQ_CL_F (1L<<1) /* Bit 1: Clear EOF IRQ */
+#define CSR_IRQ_CL_C (1L<<0) /* Bit 0: Clear ERR IRQ */
+
+#define CSR_SET_RESET (CSR_DESC_SET|CSR_FIFO_SET|CSR_HPI_RST|CSR_SV_RST|\
+ CSR_DREAD_RST|CSR_DWRITE_RST|CSR_TRANS_RST)
+#define CSR_CLR_RESET (CSR_DESC_CLEAR|CSR_FIFO_CLEAR|CSR_HPI_RUN|CSR_SV_RUN|\
+ CSR_DREAD_RUN|CSR_DWRITE_RUN|CSR_TRANS_RUN)
+
+
+/* B5_<xx>_F 32 bit flag register (xx) */
+ /* Bit 28..31: reserved */
+#define F_ALM_FULL (1L<<27) /* Bit 27: (ML) FIFO almost full */
+#define F_FIFO_EOF (1L<<26) /* Bit 26: (ML) Fag bit in FIFO */
+#define F_WM_REACHED (1L<<25) /* Bit 25: (ML) Watermark reached */
+#define F_UP_DW_USED (1L<<24) /* Bit 24: (ML) Upper Dword used (bug)*/
+ /* Bit 23: reserved */
+#define F_FIFO_LEVEL (0x1fL<<16) /* Bit 16..22:(ML) # of Qwords in FIFO*/
+ /* Bit 8..15: reserved */
+#define F_ML_WATER_M 0x0000ffL /* Bit 0.. 7:(ML) Watermark */
+#define FLAG_WATER 0x00001fL /* Bit 4..0:(DV) Level of req data tr.*/
+
+/* B5_<xx>_T1 32 bit Test Register 1 (xx) */
+/* Holds four State Machine control Bytes */
+#define SM_CRTL_SV (0xffL<<24) /* Bit 31..24: Control Supervisor SM */
+#define SM_CRTL_RD (0xffL<<16) /* Bit 23..16: Control Read Desc SM */
+#define SM_CRTL_WR (0xffL<<8) /* Bit 15..8: Control Write Desc SM */
+#define SM_CRTL_TR (0xffL<<0) /* Bit 7..0: Control Transfer SM */
+
+/* B4_<xx>_T1_TR 8 bit Test Register 1 TR (xx) */
+/* B4_<xx>_T1_WR 8 bit Test Register 1 WR (xx) */
+/* B4_<xx>_T1_RD 8 bit Test Register 1 RD (xx) */
+/* B4_<xx>_T1_SV 8 bit Test Register 1 SV (xx) */
+/* The control status byte of each machine looks like ... */
+#define SM_STATE 0xf0 /* Bit 7..4: State which shall be loaded */
+#define SM_LOAD 0x08 /* Bit 3: Load the SM with SM_STATE */
+#define SM_TEST_ON 0x04 /* Bit 2: Switch on SM Test Mode */
+#define SM_TEST_OFF 0x02 /* Bit 1: Go off the Test Mode */
+#define SM_STEP 0x01 /* Bit 0: Step the State Machine */
+
+/* The coding of the states */
+#define SM_SV_IDLE 0x0 /* Supervisor Idle Tr/Re */
+#define SM_SV_RES_START 0x1 /* Supervisor Res_Start Tr/Re */
+#define SM_SV_GET_DESC 0x3 /* Supervisor Get_Desc Tr/Re */
+#define SM_SV_CHECK 0x2 /* Supervisor Check Tr/Re */
+#define SM_SV_MOV_DATA 0x6 /* Supervisor Move_Data Tr/Re */
+#define SM_SV_PUT_DESC 0x7 /* Supervisor Put_Desc Tr/Re */
+#define SM_SV_SET_IRQ 0x5 /* Supervisor Set_Irq Tr/Re */
+
+#define SM_RD_IDLE 0x0 /* Read Desc. Idle Tr/Re */
+#define SM_RD_LOAD 0x1 /* Read Desc. Load Tr/Re */
+#define SM_RD_WAIT_TC 0x3 /* Read Desc. Wait_TC Tr/Re */
+#define SM_RD_RST_EOF 0x6 /* Read Desc. Reset_EOF Re */
+#define SM_RD_WDONE_R 0x2 /* Read Desc. Wait_Done Re */
+#define SM_RD_WDONE_T 0x4 /* Read Desc. Wait_Done Tr */
+
+#define SM_TR_IDLE 0x0 /* Trans. Data Idle Tr/Re */
+#define SM_TR_LOAD 0x3 /* Trans. Data Load Tr/Re */
+#define SM_TR_LOAD_R_ML 0x1 /* Trans. Data Load /Re (ML) */
+#define SM_TR_WAIT_TC 0x2 /* Trans. Data Wait_TC Tr/Re */
+#define SM_TR_WDONE 0x4 /* Trans. Data Wait_Done Tr/Re */
+
+#define SM_WR_IDLE 0x0 /* Write Desc. Idle Tr/Re */
+#define SM_WR_ABLEN 0x1 /* Write Desc. Act_Buf_Length Tr/Re */
+#define SM_WR_LD_A4 0x2 /* Write Desc. Load_A4 Re */
+#define SM_WR_RES_OWN 0x2 /* Write Desc. Res_OWN Tr */
+#define SM_WR_WAIT_EOF 0x3 /* Write Desc. Wait_EOF Re */
+#define SM_WR_LD_N2C_R 0x4 /* Write Desc. Load_N2C Re */
+#define SM_WR_WAIT_TC_R 0x5 /* Write Desc. Wait_TC Re */
+#define SM_WR_WAIT_TC4 0x6 /* Write Desc. Wait_TC4 Re */
+#define SM_WR_LD_A_T 0x6 /* Write Desc. Load_A Tr */
+#define SM_WR_LD_A_R 0x7 /* Write Desc. Load_A Re */
+#define SM_WR_WAIT_TC_T 0x7 /* Write Desc. Wait_TC Tr */
+#define SM_WR_LD_N2C_T 0xc /* Write Desc. Load_N2C Tr */
+#define SM_WR_WDONE_T 0x9 /* Write Desc. Wait_Done Tr */
+#define SM_WR_WDONE_R 0xc /* Write Desc. Wait_Done Re */
+#define SM_WR_LD_D_AD 0xe /* Write Desc. Load_Dumr_A Re (ML) */
+#define SM_WR_WAIT_D_TC 0xf /* Write Desc. Wait_Dumr_TC Re (ML) */
+
+/* B5_<xx>_T2 32 bit Test Register 2 (xx) */
+/* Note: This register is only defined for the transmit queues */
+ /* Bit 31..8: reserved */
+#define AC_TEST_ON (1<<7) /* Bit 7: Address Counter Test Mode on */
+#define AC_TEST_OFF (1<<6) /* Bit 6: Address Counter Test Mode off*/
+#define BC_TEST_ON (1<<5) /* Bit 5: Byte Counter Test Mode on */
+#define BC_TEST_OFF (1<<4) /* Bit 4: Byte Counter Test Mode off */
+#define TEST_STEP04 (1<<3) /* Bit 3: Inc AC/Dec BC by 4 */
+#define TEST_STEP03 (1<<2) /* Bit 2: Inc AC/Dec BC by 3 */
+#define TEST_STEP02 (1<<1) /* Bit 1: Inc AC/Dec BC by 2 */
+#define TEST_STEP01 (1<<0) /* Bit 0: Inc AC/Dec BC by 1 */
+
+/* B5_<xx>_T3 32 bit Test Register 3 (xx) */
+/* Note: This register is only defined for the transmit queues */
+ /* Bit 31..8: reserved */
+#define T3_MUX_2 (1<<7) /* Bit 7: (ML) Mux position MSB */
+#define T3_VRAM_2 (1<<6) /* Bit 6: (ML) Virtual RAM buffer addr MSB */
+#define T3_LOOP (1<<5) /* Bit 5: Set Loopback (Xmit) */
+#define T3_UNLOOP (1<<4) /* Bit 4: Unset Loopback (Xmit) */
+#define T3_MUX (3<<2) /* Bit 3..2: Mux position */
+#define T3_VRAM (3<<0) /* Bit 1..0: Virtual RAM buffer Address */
+
+/* PCI card IDs */
+/*
+ * Note: The following 4 byte definitions shall not be used! Use OEM Concept!
+ */
+#define PCI_VEND_ID0 0x48 /* PCI vendor ID (SysKonnect) */
+#define PCI_VEND_ID1 0x11 /* PCI vendor ID (SysKonnect) */
+ /* (High byte) */
+#define PCI_DEV_ID0 0x00 /* PCI device ID */
+#define PCI_DEV_ID1 0x40 /* PCI device ID (High byte) */
+
+/*#define PCI_CLASS 0x02*/ /* PCI class code: network device */
+#define PCI_NW_CLASS 0x02 /* PCI class code: network device */
+#define PCI_SUB_CLASS 0x02 /* PCI subclass ID: FDDI device */
+#define PCI_PROG_INTFC 0x00 /* PCI programming Interface (=0) */
+
+/*
+ * address transmission from logical to physical offset address on board
+ */
+#define FMA(a) (0x0400|((a)<<2)) /* FORMAC+ (r/w) (SN3) */
+#define P1(a) (0x0380|((a)<<2)) /* PLC1 (r/w) (DAS) */
+#define P2(a) (0x0600|((a)<<2)) /* PLC2 (r/w) (covered by the SN3) */
+#define PRA(a) (B2_MAC_0 + (a)) /* configuration PROM (MAC address) */
+
+/*
+ * FlashProm specification
+ */
+#define MAX_PAGES 0x20000L /* Every byte has a single page */
+#define MAX_FADDR 1 /* 1 byte per page */
+
+/*
+ * Receive / Transmit Buffer Control word
+ */
+#define BMU_OWN (1UL<<31) /* OWN bit: 0 == host, 1 == adapter */
+#define BMU_STF (1L<<30) /* Start of Frame ? */
+#define BMU_EOF (1L<<29) /* End of Frame ? */
+#define BMU_EN_IRQ_EOB (1L<<28) /* Enable "End of Buffer" IRQ */
+#define BMU_EN_IRQ_EOF (1L<<27) /* Enable "End of Frame" IRQ */
+#define BMU_DEV_0 (1L<<26) /* RX: don't transfer to system mem */
+#define BMU_SMT_TX (1L<<25) /* TX: if set, buffer type SMT_MBuf */
+#define BMU_ST_BUF (1L<<25) /* RX: copy of start of frame */
+#define BMU_UNUSED (1L<<24) /* Set if the Descr is curr unused */
+#define BMU_SW (3L<<24) /* 2 Bits reserved for SW usage */
+#define BMU_CHECK 0x00550000L /* To identify the control word */
+#define BMU_BBC 0x0000FFFFL /* R/T Buffer Byte Count */
+
+/*
+ * physical address offset + IO-Port base address
+ */
+#ifdef MEM_MAPPED_IO
+#define ADDR(a) (char far *) smc->hw.iop+(a)
+#define ADDRS(smc,a) (char far *) (smc)->hw.iop+(a)
+#else
+#define ADDR(a) (((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), \
+ (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \
+ (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
+#define ADDRS(smc,a) (((a)>>7) ? (outp((smc)->hw.iop+B0_RAP,(a)>>7), \
+ ((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \
+ ((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
+#endif
+
+/*
+ * Define a macro to access the configuration space
+ */
+#define PCI_C(a) ADDR(B3_CFG_SPC + (a)) /* PCI Config Space */
+
+#define EXT_R(a) ADDR(B6_EXT_REG + (a)) /* External Registers */
+
+/*
+ * Define some values needed for the MAC address (PROM)
+ */
+#define SA_MAC (0) /* start addr. MAC_AD within the PROM */
+#define PRA_OFF (0) /* offset correction when 4th byte reading */
+
+#define SKFDDI_PSZ 8 /* address PROM size */
+
+#define FM_A(a) ADDR(FMA(a)) /* FORMAC Plus physical addr */
+#define P1_A(a) ADDR(P1(a)) /* PLC1 (r/w) */
+#define P2_A(a) ADDR(P2(a)) /* PLC2 (r/w) (DAS) */
+#define PR_A(a) ADDR(PRA(a)) /* config. PROM (MAC address) */
+
+/*
+ * Macro to read the PROM
+ */
+#define READ_PROM(a) ((u_char)inp(a))
+
+#define GET_PAGE(bank) outpd(ADDR(B2_FAR),bank)
+#define VPP_ON()
+#define VPP_OFF()
+
+/*
+ * Note: Values of the Interrupt Source Register are defined above
+ */
+#define ISR_A ADDR(B0_ISRC)
+#define GET_ISR() inpd(ISR_A)
+#define GET_ISR_SMP(iop) inpd((iop)+B0_ISRC)
+#define CHECK_ISR() (inpd(ISR_A) & inpd(ADDR(B0_IMSK)))
+#define CHECK_ISR_SMP(iop) (inpd((iop)+B0_ISRC) & inpd((iop)+B0_IMSK))
+
+#define BUS_CHECK()
+
+/*
+ * CLI_FBI: Disable Board Interrupts
+ * STI_FBI: Enable Board Interrupts
+ */
+#ifndef UNIX
+#define CLI_FBI() outpd(ADDR(B0_IMSK),0)
+#else
+#define CLI_FBI(smc) outpd(ADDRS((smc),B0_IMSK),0)
+#endif
+
+#ifndef UNIX
+#define STI_FBI() outpd(ADDR(B0_IMSK),smc->hw.is_imask)
+#else
+#define STI_FBI(smc) outpd(ADDRS((smc),B0_IMSK),(smc)->hw.is_imask)
+#endif
+
+#define CLI_FBI_SMP(iop) outpd((iop)+B0_IMSK,0)
+#define STI_FBI_SMP(smc,iop) outpd((iop)+B0_IMSK,(smc)->hw.is_imask)
+
+#endif /* PCI */
+/*--------------------------------------------------------------------------*/
+
+/*
+ * 12 bit transfer (dword) counter:
+ * (ISA: 2*trc = number of byte)
+ * (EISA: 4*trc = number of byte)
+ * (MCA: 4*trc = number of byte)
+ */
+#define MAX_TRANS (0x0fff)
+
+/*
+ * PC PIC
+ */
+#define MST_8259 (0x20)
+#define SLV_8259 (0xA0)
+
+#define TPS (18) /* ticks per second */
+
+/*
+ * error timer defs
+ */
+#define TN (4) /* number of supported timer = TN+1 */
+#define SNPPND_TIME (5) /* buffer memory access over mem. data reg. */
+
+#define MAC_AD 0x405a0000
+
+#define MODR1 FM_A(FM_MDREG1) /* mode register 1 */
+#define MODR2 FM_A(FM_MDREG2) /* mode register 2 */
+
+#define CMDR1 FM_A(FM_CMDREG1) /* command register 1 */
+#define CMDR2 FM_A(FM_CMDREG2) /* command register 2 */
+
+
+/*
+ * function defines
+ */
+#define CLEAR(io,mask) outpw((io),inpw(io)&(~(mask)))
+#define SET(io,mask) outpw((io),inpw(io)|(mask))
+#define GET(io,mask) (inpw(io)&(mask))
+#define SETMASK(io,val,mask) outpw((io),(inpw(io) & ~(mask)) | (val))
+
+/*
+ * PHY Port A (PA) = PLC 1
+ * With SuperNet 3 PHY-A and PHY S are identical.
+ */
+#define PLC(np,reg) (((np) == PA) ? P2_A(reg) : P1_A(reg))
+
+/*
+ * set memory address register for write and read
+ */
+#define MARW(ma) outpw(FM_A(FM_MARW),(unsigned int)(ma))
+#define MARR(ma) outpw(FM_A(FM_MARR),(unsigned int)(ma))
+
+/*
+ * read/write from/to memory data register
+ */
+/* write double word */
+#define MDRW(dd) outpw(FM_A(FM_MDRU),(unsigned int)((dd)>>16)) ;\
+ outpw(FM_A(FM_MDRL),(unsigned int)(dd))
+
+#ifndef WINNT
+/* read double word */
+#define MDRR() (((long)inpw(FM_A(FM_MDRU))<<16) + inpw(FM_A(FM_MDRL)))
+
+/* read FORMAC+ 32-bit status register */
+#define GET_ST1() (((long)inpw(FM_A(FM_ST1U))<<16) + inpw(FM_A(FM_ST1L)))
+#define GET_ST2() (((long)inpw(FM_A(FM_ST2U))<<16) + inpw(FM_A(FM_ST2L)))
+#ifdef SUPERNET_3
+#define GET_ST3() (((long)inpw(FM_A(FM_ST3U))<<16) + inpw(FM_A(FM_ST3L)))
+#endif
+#else
+/* read double word */
+#define MDRR() inp2w((FM_A(FM_MDRU)),(FM_A(FM_MDRL)))
+
+/* read FORMAC+ 32-bit status register */
+#define GET_ST1() inp2w((FM_A(FM_ST1U)),(FM_A(FM_ST1L)))
+#define GET_ST2() inp2w((FM_A(FM_ST2U)),(FM_A(FM_ST2L)))
+#ifdef SUPERNET_3
+#define GET_ST3() inp2w((FM_A(FM_ST3U)),(FM_A(FM_ST3L)))
+#endif
+#endif
+
+/* Special timer macro for 82c54 */
+ /* timer access over data bus bit 8..15 */
+#define OUT_82c54_TIMER(port,val) outpw(TI_A(port),(val)<<8)
+#define IN_82c54_TIMER(port) ((inpw(TI_A(port))>>8) & 0xff)
+
+
+#ifdef DEBUG
+#define DB_MAC(mac,st) {if (debug_mac & 0x1)\
+ printf("M") ;\
+ if (debug_mac & 0x2)\
+ printf("\tMAC %d status 0x%08lx\n",mac,st) ;\
+ if (debug_mac & 0x4)\
+ dp_mac(mac,st) ;\
+}
+
+#define DB_PLC(p,iev) { if (debug_plc & 0x1)\
+ printf("P") ;\
+ if (debug_plc & 0x2)\
+ printf("\tPLC %s Int 0x%04x\n", \
+ (p == PA) ? "A" : "B", iev) ;\
+ if (debug_plc & 0x4)\
+ dp_plc(p,iev) ;\
+}
+
+#define DB_TIMER() { if (debug_timer & 0x1)\
+ printf("T") ;\
+ if (debug_timer & 0x2)\
+ printf("\tTimer ISR\n") ;\
+}
+
+#else /* no DEBUG */
+
+#define DB_MAC(mac,st)
+#define DB_PLC(p,iev)
+#define DB_TIMER()
+
+#endif /* no DEBUG */
+
+#define INC_PTR(sp,cp,ep) if (++cp == ep) cp = sp
+/*
+ * timer defs
+ */
+#define COUNT(t) ((t)<<6) /* counter */
+#define RW_OP(o) ((o)<<4) /* read/write operation */
+#define TMODE(m) ((m)<<1) /* timer mode */
+
+#endif
diff --git a/drivers/net/skfp/h/skfbiinc.h b/drivers/net/skfp/h/skfbiinc.h
new file mode 100644
index 000000000000..79d55ad2cd2a
--- /dev/null
+++ b/drivers/net/skfp/h/skfbiinc.h
@@ -0,0 +1,123 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _SKFBIINC_
+#define _SKFBIINC_
+
+#include "h/supern_2.h"
+
+/*
+ * special defines for use into .asm files
+ */
+#define ERR_FLAGS (FS_MSRABT | FS_SEAC2 | FS_SFRMERR | FS_SFRMTY1)
+
+#ifdef ISA
+#define DMA_BUSY_CHECK CSRA
+#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT)
+#define HRQR (RQAA+(RQ_RRQ<<1))
+#define HRQW (RQAA+(RQ_WA2<<1))
+#define HRQA0 (RQAA+(RQ_WA0<<1))
+#define HRQSQ (RQAA+(RQ_WSQ<<1))
+#endif
+
+#ifdef EISA
+#define DMA_BUSY_CHECK CSRA
+#define DMA_HIGH_WORD 0x0400
+#define DMA_MASK_M 0x0a
+#define DMA_MODE_M 0x0b
+#define DMA_BYTE_PTR_M 0x0c
+#define DMA_MASK_S 0x0d4
+#define DMA_MODE_S 0x0d6
+#define DMA_BYTE_PTR_S 0x0d8
+#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TC)
+#endif /* EISA */
+
+#ifdef MCA
+#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
+ IS_CHCK_L | IS_BUSERR)
+#endif
+
+#ifdef PCI
+#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
+ IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
+ IS_R1_C | IS_XA_C | IS_XS_C)
+#endif
+
+#ifdef PCI
+#define ISR_MASK (IS_MINTR1 | IS_R1_F | IS_XS_F| IS_XA_F | IMASK_FAST)
+#else
+#define ISR_MASK (IS_MINTR1 | IS_MINTR2 | IMASK_FAST)
+#endif
+
+#define FMA_FM_CMDREG1 FMA(FM_CMDREG1)
+#define FMA_FM_CMDREG2 FMA(FM_CMDREG2)
+#define FMA_FM_STMCHN FMA(FM_STMCHN)
+#define FMA_FM_RPR FMA(FM_RPR)
+#define FMA_FM_WPXA0 FMA(FM_WPXA0)
+#define FMA_FM_WPXA2 FMA(FM_WPXA2)
+#define FMA_FM_MARR FMA(FM_MARR)
+#define FMA_FM_MARW FMA(FM_MARW)
+#define FMA_FM_MDRU FMA(FM_MDRU)
+#define FMA_FM_MDRL FMA(FM_MDRL)
+#define FMA_ST1L FMA(FM_ST1L)
+#define FMA_ST1U FMA(FM_ST1U)
+#define FMA_ST2L FMA(FM_ST2L)
+#define FMA_ST2U FMA(FM_ST2U)
+#ifdef SUPERNET_3
+#define FMA_ST3L FMA(FM_ST3L)
+#define FMA_ST3U FMA(FM_ST3U)
+#endif
+
+#define TMODE_RRQ RQ_RRQ
+#define TMODE_WAQ2 RQ_WA2
+#define HSRA HSR(0)
+
+
+#define FMA_FM_ST1L FMA_ST1L
+#define FMA_FM_ST1U FMA_ST1U
+#define FMA_FM_ST2L FMA_ST2L
+#define FMA_FM_ST2U FMA_ST2U
+#ifdef SUPERNET_3
+#define FMA_FM_ST3L FMA_ST3L
+#define FMA_FM_ST3U FMA_ST3U
+#endif
+
+#define FMA_FM_SWPR FMA(FM_SWPR)
+
+#define FMA_FM_RPXA0 FMA(FM_RPXA0)
+
+#define FMA_FM_RPXS FMA(FM_RPXS)
+#define FMA_FM_WPXS FMA(FM_WPXS)
+
+#define FMA_FM_IMSK1U FMA(FM_IMSK1U)
+#define FMA_FM_IMSK1L FMA(FM_IMSK1L)
+
+#define FMA_FM_EAS FMA(FM_EAS)
+#define FMA_FM_EAA0 FMA(FM_EAA0)
+
+#define TMODE_WAQ0 RQ_WA0
+#define TMODE_WSQ RQ_WSQ
+
+/* Define default for DRV_PCM_STATE_CHANGE */
+#ifndef DRV_PCM_STATE_CHANGE
+#define DRV_PCM_STATE_CHANGE(smc,plc,p_state) /* nothing */
+#endif
+
+/* Define default for DRV_RMT_INDICATION */
+#ifndef DRV_RMT_INDICATION
+#define DRV_RMT_INDICATION(smc,i) /* nothing */
+#endif
+
+#endif /* n_SKFBIINC_ */
+
diff --git a/drivers/net/skfp/h/smc.h b/drivers/net/skfp/h/smc.h
new file mode 100644
index 000000000000..94325915e0d5
--- /dev/null
+++ b/drivers/net/skfp/h/smc.h
@@ -0,0 +1,471 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _SCMECM_
+#define _SCMECM_
+
+#if defined(PCI) && !defined(OSDEF)
+/*
+ * In the case of the PCI bus the file osdef1st.h must be present
+ */
+#define OSDEF
+#endif
+
+#ifdef PCI
+#ifndef SUPERNET_3
+#define SUPERNET_3
+#endif
+#ifndef TAG_MODE
+#define TAG_MODE
+#endif
+#endif
+
+/*
+ * include all other files in required order
+ * the following files must have been included before:
+ * types.h
+ * fddi.h
+ */
+#ifdef OSDEF
+#include "h/osdef1st.h"
+#endif /* OSDEF */
+#ifdef OEM_CONCEPT
+#include "oemdef.h"
+#endif /* OEM_CONCEPT */
+#include "h/smt.h"
+#include "h/cmtdef.h"
+#include "h/fddimib.h"
+#include "h/targethw.h" /* all target hw dependencies */
+#include "h/targetos.h" /* all target os dependencies */
+#ifdef ESS
+#include "h/sba.h"
+#endif
+
+/*
+ * Event Queue
+ * queue.c
+ * events are class/value pairs
+ * class is addressee, e.g. RMT, PCM etc.
+ * value is command, e.g. line state change, ring op change etc.
+ */
+struct event_queue {
+ u_short class ; /* event class */
+ u_short event ; /* event value */
+} ;
+
+/*
+ * define event queue as circular buffer
+ */
+#ifdef CONCENTRATOR
+#define MAX_EVENT 128
+#else /* nCONCENTRATOR */
+#define MAX_EVENT 64
+#endif /* nCONCENTRATOR */
+
+struct s_queue {
+
+ struct event_queue ev_queue[MAX_EVENT];
+ struct event_queue *ev_put ;
+ struct event_queue *ev_get ;
+} ;
+
+/*
+ * ECM - Entity Coordination Management
+ * ecm.c
+ */
+struct s_ecm {
+ u_char path_test ; /* ECM path test variable */
+ u_char sb_flag ; /* ECM stuck bypass */
+ u_char DisconnectFlag ; /* jd 05-Aug-1999 Bug #10419
+ * ECM disconnected */
+ u_char ecm_line_state ; /* flag to dispatcher : line states */
+ u_long trace_prop ; /* ECM Trace_Prop flag >= 16 bits !! */
+ /* NUMPHYS note:
+ * this variable must have enough bits to hold all entiies in
+ * the station. So NUMPHYS may not be greater than 31.
+ */
+ char ec_pad[2] ;
+ struct smt_timer ecm_timer ; /* timer */
+} ;
+
+
+/*
+ * RMT - Ring Management
+ * rmt.c
+ */
+struct s_rmt {
+ u_char dup_addr_test ; /* state of dupl. addr. test */
+ u_char da_flag ; /* flag : duplicate address det. */
+ u_char loop_avail ; /* flag : MAC available for loopback */
+ u_char sm_ma_avail ; /* flag : MAC available for SMT */
+ u_char no_flag ; /* flag : ring not operational */
+ u_char bn_flag ; /* flag : MAC reached beacon state */
+ u_char jm_flag ; /* flag : jamming in NON_OP_DUP */
+ u_char rm_join ; /* CFM flag RM_Join */
+ u_char rm_loop ; /* CFM flag RM_Loop */
+
+ long fast_rm_join ; /* bit mask of active ports */
+ /*
+ * timer and flags
+ */
+ struct smt_timer rmt_timer0 ; /* timer 0 */
+ struct smt_timer rmt_timer1 ; /* timer 1 */
+ struct smt_timer rmt_timer2 ; /* timer 2 */
+ u_char timer0_exp ; /* flag : timer 0 expired */
+ u_char timer1_exp ; /* flag : timer 1 expired */
+ u_char timer2_exp ; /* flag : timer 2 expired */
+
+ u_char rm_pad1[1] ;
+} ;
+
+/*
+ * CFM - Configuration Management
+ * cfm.c
+ * used for SAS and DAS
+ */
+struct s_cfm {
+ u_char cf_state; /* CFM state machine current state */
+ u_char cf_pad[3] ;
+} ;
+
+/*
+ * CEM - Configuration Element Management
+ * cem.c
+ * used for Concentrator
+ */
+#ifdef CONCENTRATOR
+struct s_cem {
+ int ce_state ; /* CEM state */
+ int ce_port ; /* PA PB PM PM+1 .. */
+ int ce_type ; /* TA TB TS TM */
+} ;
+
+/*
+ * linked list of CCEs in current token path
+ */
+struct s_c_ring {
+ struct s_c_ring *c_next ;
+ char c_entity ;
+} ;
+
+struct mib_path_config {
+ u_long fddimibPATHConfigSMTIndex;
+ u_long fddimibPATHConfigPATHIndex;
+ u_long fddimibPATHConfigTokenOrder;
+ u_long fddimibPATHConfigResourceType;
+#define SNMP_RES_TYPE_MAC 2 /* Resource is a MAC */
+#define SNMP_RES_TYPE_PORT 4 /* Resource is a PORT */
+ u_long fddimibPATHConfigResourceIndex;
+ u_long fddimibPATHConfigCurrentPath;
+#define SNMP_PATH_ISOLATED 1 /* Current path is isolated */
+#define SNMP_PATH_LOCAL 2 /* Current path is local */
+#define SNMP_PATH_SECONDARY 3 /* Current path is secondary */
+#define SNMP_PATH_PRIMARY 4 /* Current path is primary */
+#define SNMP_PATH_CONCATENATED 5 /* Current path is concatenated */
+#define SNMP_PATH_THRU 6 /* Current path is thru */
+};
+
+
+#endif
+
+/*
+ * PCM connect states
+ */
+#define PCM_DISABLED 0
+#define PCM_CONNECTING 1
+#define PCM_STANDBY 2
+#define PCM_ACTIVE 3
+
+struct s_pcm {
+ u_char pcm_pad[3] ;
+} ;
+
+/*
+ * PHY struct
+ * one per physical port
+ */
+struct s_phy {
+ /* Inter Module Globals */
+ struct fddi_mib_p *mib ;
+
+ u_char np ; /* index 0 .. NUMPHYS */
+ u_char cf_join ;
+ u_char cf_loop ;
+ u_char wc_flag ; /* withhold connection flag */
+ u_char pc_mode ; /* Holds the negotiated mode of the PCM */
+ u_char pc_lem_fail ; /* flag : LCT failed */
+ u_char lc_test ;
+ u_char scrub ; /* CFM flag Scrub -> PCM */
+ char phy_name ;
+ u_char pmd_type[2] ; /* SK connector/transceiver type codes */
+#define PMD_SK_CONN 0 /* pmd_type[PMD_SK_CONN] = Connector */
+#define PMD_SK_PMD 1 /* pmd_type[PMD_SK_PMD] = Xver */
+ u_char pmd_scramble ; /* scrambler on/off */
+
+ /* inner Module Globals */
+ u_char curr_ls ; /* current line state */
+ u_char ls_flag ;
+ u_char rc_flag ;
+ u_char tc_flag ;
+ u_char td_flag ;
+ u_char bitn ;
+ u_char tr_flag ; /* trace recvd while in active */
+ u_char twisted ; /* flag to indicate an A-A or B-B connection */
+ u_char t_val[NUMBITS] ; /* transmit bits for signaling */
+ u_char r_val[NUMBITS] ; /* receive bits for signaling */
+ u_long t_next[NUMBITS] ;
+ struct smt_timer pcm_timer0 ;
+ struct smt_timer pcm_timer1 ;
+ struct smt_timer pcm_timer2 ;
+ u_char timer0_exp ;
+ u_char timer1_exp ;
+ u_char timer2_exp ;
+ u_char pcm_pad1[1] ;
+ int cem_pst ; /* CEM privae state; used for dual homing */
+ struct lem_counter lem ;
+#ifdef AMDPLC
+ struct s_plc plc ;
+#endif
+} ;
+
+/*
+ * timer package
+ * smttimer.c
+ */
+struct s_timer {
+ struct smt_timer *st_queue ;
+ struct smt_timer st_fast ;
+} ;
+
+/*
+ * SRF types and data
+ */
+#define SMT_EVENT_BASE 1
+#define SMT_EVENT_MAC_PATH_CHANGE (SMT_EVENT_BASE+0)
+#define SMT_EVENT_MAC_NEIGHBOR_CHANGE (SMT_EVENT_BASE+1)
+#define SMT_EVENT_PORT_PATH_CHANGE (SMT_EVENT_BASE+2)
+#define SMT_EVENT_PORT_CONNECTION (SMT_EVENT_BASE+3)
+
+#define SMT_IS_CONDITION(x) ((x)>=SMT_COND_BASE)
+
+#define SMT_COND_BASE (SMT_EVENT_PORT_CONNECTION+1)
+#define SMT_COND_SMT_PEER_WRAP (SMT_COND_BASE+0)
+#define SMT_COND_SMT_HOLD (SMT_COND_BASE+1)
+#define SMT_COND_MAC_FRAME_ERROR (SMT_COND_BASE+2)
+#define SMT_COND_MAC_DUP_ADDR (SMT_COND_BASE+3)
+#define SMT_COND_MAC_NOT_COPIED (SMT_COND_BASE+4)
+#define SMT_COND_PORT_EB_ERROR (SMT_COND_BASE+5)
+#define SMT_COND_PORT_LER (SMT_COND_BASE+6)
+
+#define SR0_WAIT 0
+#define SR1_HOLDOFF 1
+#define SR2_DISABLED 2
+
+struct s_srf {
+ u_long SRThreshold ; /* threshold value */
+ u_char RT_Flag ; /* report transmitted flag */
+ u_char sr_state ; /* state-machine */
+ u_char any_report ; /* any report required */
+ u_long TSR ; /* timer */
+ u_short ring_status ; /* IBM ring status */
+} ;
+
+/*
+ * IBM token ring status
+ */
+#define RS_RES15 (1<<15) /* reserved */
+#define RS_HARDERROR (1<<14) /* ring down */
+#define RS_SOFTERROR (1<<13) /* sent SRF */
+#define RS_BEACON (1<<12) /* transmitted beacon */
+#define RS_PATHTEST (1<<11) /* path test failed */
+#define RS_SELFTEST (1<<10) /* selftest required */
+#define RS_RES9 (1<< 9) /* reserved */
+#define RS_DISCONNECT (1<< 8) /* remote disconnect */
+#define RS_RES7 (1<< 7) /* reserved */
+#define RS_DUPADDR (1<< 6) /* duplicate address */
+#define RS_NORINGOP (1<< 5) /* no ring op */
+#define RS_VERSION (1<< 4) /* SMT version mismatch */
+#define RS_STUCKBYPASSS (1<< 3) /* stuck bypass */
+#define RS_EVENT (1<< 2) /* FDDI event occurred */
+#define RS_RINGOPCHANGE (1<< 1) /* ring op changed */
+#define RS_RES0 (1<< 0) /* reserved */
+
+#define RS_SET(smc,bit) \
+ ring_status_indication(smc,smc->srf.ring_status |= bit)
+#define RS_CLEAR(smc,bit) \
+ ring_status_indication(smc,smc->srf.ring_status &= ~bit)
+
+#define RS_CLEAR_EVENT (0xffff & ~(RS_NORINGOP))
+
+/* Define the AIX-event-Notification as null function if it isn't defined */
+/* in the targetos.h file */
+#ifndef AIX_EVENT
+#define AIX_EVENT(smc,opt0,opt1,opt2,opt3) /* nothing */
+#endif
+
+struct s_srf_evc {
+ u_char evc_code ; /* event code type */
+ u_char evc_index ; /* index for mult. instances */
+ u_char evc_rep_required ; /* report required */
+ u_short evc_para ; /* SMT Para Number */
+ u_char *evc_cond_state ; /* condition state */
+ u_char *evc_multiple ; /* multiple occurrence */
+} ;
+
+/*
+ * Values used by frame based services
+ * smt.c
+ */
+#define SMT_MAX_TEST 5
+#define SMT_TID_NIF 0 /* pending NIF request */
+#define SMT_TID_NIF_TEST 1 /* pending NIF test */
+#define SMT_TID_ECF_UNA 2 /* pending ECF UNA test */
+#define SMT_TID_ECF_DNA 3 /* pending ECF DNA test */
+#define SMT_TID_ECF 4 /* pending ECF test */
+
+struct smt_values {
+ u_long smt_tvu ; /* timer valid una */
+ u_long smt_tvd ; /* timer valid dna */
+ u_long smt_tid ; /* transaction id */
+ u_long pend[SMT_MAX_TEST] ; /* TID of requests */
+ u_long uniq_time ; /* unique time stamp */
+ u_short uniq_ticks ; /* unique time stamp */
+ u_short please_reconnect ; /* flag : reconnect */
+ u_long smt_last_lem ;
+ u_long smt_last_notify ;
+ struct smt_timer smt_timer ; /* SMT NIF timer */
+ u_long last_tok_time[NUMMACS]; /* token cnt emulation */
+} ;
+
+/*
+ * SMT/CMT configurable parameters
+ */
+#define SMT_DAS 0 /* dual attach */
+#define SMT_SAS 1 /* single attach */
+#define SMT_NAC 2 /* null attach concentrator */
+
+struct smt_config {
+ u_char attach_s ; /* CFM attach to secondary path */
+ u_char sas ; /* SMT_DAS/SAS/NAC */
+ u_char build_ring_map ; /* build ringmap if TRUE */
+ u_char numphys ; /* number of active phys */
+ u_char sc_pad[1] ;
+
+ u_long pcm_tb_min ; /* PCM : TB_Min timer value */
+ u_long pcm_tb_max ; /* PCM : TB_Max timer value */
+ u_long pcm_c_min ; /* PCM : C_Min timer value */
+ u_long pcm_t_out ; /* PCM : T_Out timer value */
+ u_long pcm_tl_min ; /* PCM : TL_min timer value */
+ u_long pcm_lc_short ; /* PCM : LC_Short timer value */
+ u_long pcm_lc_medium ; /* PCM : LC_Medium timer value */
+ u_long pcm_lc_long ; /* PCM : LC_Long timer value */
+ u_long pcm_lc_extended ; /* PCM : LC_Extended timer value */
+ u_long pcm_t_next_9 ; /* PCM : T_Next[9] timer value */
+ u_long pcm_ns_max ; /* PCM : NS_Max timer value */
+
+ u_long ecm_i_max ; /* ECM : I_Max timer value */
+ u_long ecm_in_max ; /* ECM : IN_Max timer value */
+ u_long ecm_td_min ; /* ECM : TD_Min timer */
+ u_long ecm_test_done ; /* ECM : path test done timer */
+ u_long ecm_check_poll ; /* ECM : check bypass poller */
+
+ u_long rmt_t_non_op ; /* RMT : T_Non_OP timer value */
+ u_long rmt_t_stuck ; /* RMT : T_Stuck timer value */
+ u_long rmt_t_direct ; /* RMT : T_Direct timer value */
+ u_long rmt_t_jam ; /* RMT : T_Jam timer value */
+ u_long rmt_t_announce ; /* RMT : T_Announce timer value */
+ u_long rmt_t_poll ; /* RMT : claim/beacon poller */
+ u_long rmt_dup_mac_behavior ; /* Flag for the beavior of SMT if
+ * a Duplicate MAC Address was detected.
+ * FALSE: SMT will leave finaly the ring
+ * TRUE: SMT will reinstert into the ring
+ */
+ u_long mac_d_max ; /* MAC : D_Max timer value */
+
+ u_long lct_short ; /* LCT : error threshhold */
+ u_long lct_medium ; /* LCT : error threshhold */
+ u_long lct_long ; /* LCT : error threshhold */
+ u_long lct_extended ; /* LCT : error threshhold */
+} ;
+
+#ifdef DEBUG
+/*
+ * Debugging struct sometimes used in smc
+ */
+struct smt_debug {
+ int d_smtf ;
+ int d_smt ;
+ int d_ecm ;
+ int d_rmt ;
+ int d_cfm ;
+ int d_pcm ;
+ int d_plc ;
+#ifdef ESS
+ int d_ess ;
+#endif
+#ifdef SBA
+ int d_sba ;
+#endif
+ struct os_debug d_os; /* Include specific OS DEBUG struct */
+} ;
+
+#ifndef DEBUG_BRD
+/* all boards shall be debugged with one debug struct */
+extern struct smt_debug debug; /* Declaration of debug struct */
+#endif /* DEBUG_BRD */
+
+#endif /* DEBUG */
+
+/*
+ * the SMT Context Struct SMC
+ * this struct contains ALL global variables of SMT
+ */
+struct s_smc {
+ struct s_smt_os os ; /* os specific */
+ struct s_smt_hw hw ; /* hardware */
+
+/*
+ * NOTE: os and hw MUST BE the first two structs
+ * anything beyond hw WILL BE SET TO ZERO in smt_set_defaults()
+ */
+ struct smt_config s ; /* smt constants */
+ struct smt_values sm ; /* smt variables */
+ struct s_ecm e ; /* ecm */
+ struct s_rmt r ; /* rmt */
+ struct s_cfm cf ; /* cfm/cem */
+#ifdef CONCENTRATOR
+ struct s_cem ce[NUMPHYS] ; /* cem */
+ struct s_c_ring cr[NUMPHYS+NUMMACS] ;
+#endif
+ struct s_pcm p ; /* pcm */
+ struct s_phy y[NUMPHYS] ; /* phy */
+ struct s_queue q ; /* queue */
+ struct s_timer t ; /* timer */
+ struct s_srf srf ; /* SRF */
+ struct s_srf_evc evcs[6+NUMPHYS*4] ;
+ struct fddi_mib mib ; /* __THE_MIB__ */
+#ifdef SBA
+ struct s_sba sba ; /* SBA variables */
+#endif
+#ifdef ESS
+ struct s_ess ess ; /* Ess variables */
+#endif
+#if defined(DEBUG) && defined(DEBUG_BRD)
+ /* If you want all single board to be debugged separately */
+ struct smt_debug debug; /* Declaration of debug struct */
+#endif /* DEBUG_BRD && DEBUG */
+} ;
+
+#endif /* _SCMECM_ */
+
diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h
new file mode 100644
index 000000000000..1ff589988d10
--- /dev/null
+++ b/drivers/net/skfp/h/smt.h
@@ -0,0 +1,882 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * SMT 7.2 frame definitions
+ */
+
+#ifndef _SMT_
+#define _SMT_
+
+/* #define SMT5_10 */
+#define SMT6_10
+#define SMT7_20
+
+#define OPT_PMF /* if parameter management is supported */
+#define OPT_SRF /* if status report is supported */
+
+/*
+ * SMT frame version 5.1
+ */
+
+#define SMT_VID 0x0001 /* V 5.1 .. 6.1 */
+#define SMT_VID_2 0x0002 /* V 7.2 */
+
+struct smt_sid {
+ u_char sid_oem[2] ; /* implementation spec. */
+ struct fddi_addr sid_node ; /* node address */
+} ;
+
+typedef u_char t_station_id[8] ;
+
+/*
+ * note on alignment :
+ * sizeof(struct smt_header) = 32
+ * all parameters are long aligned
+ * if struct smt_header starts at offset 0, all longs are aligned correctly
+ * (FC starts at offset 3)
+ */
+_packed struct smt_header {
+ struct fddi_addr smt_dest ; /* destination address */
+ struct fddi_addr smt_source ; /* source address */
+ u_char smt_class ; /* NIF, SIF ... */
+ u_char smt_type ; /* req., response .. */
+ u_short smt_version ; /* version id */
+ u_int smt_tid ; /* transaction ID */
+ struct smt_sid smt_sid ; /* station ID */
+ u_short smt_pad ; /* pad with 0 */
+ u_short smt_len ; /* length of info field */
+} ;
+#define SWAP_SMTHEADER "662sl8ss"
+
+#if 0
+/*
+ * MAC FC values
+ */
+#define FC_SMT_INFO 0x41 /* SMT info */
+#define FC_SMT_NSA 0x4f /* SMT Next Station Addressing */
+#endif
+
+
+/*
+ * type codes
+ */
+#define SMT_ANNOUNCE 0x01 /* announcement */
+#define SMT_REQUEST 0x02 /* request */
+#define SMT_REPLY 0x03 /* reply */
+
+/*
+ * class codes
+ */
+#define SMT_NIF 0x01 /* neighbor information frames */
+#define SMT_SIF_CONFIG 0x02 /* station information configuration */
+#define SMT_SIF_OPER 0x03 /* station information operation */
+#define SMT_ECF 0x04 /* echo frames */
+#define SMT_RAF 0x05 /* resource allocation */
+#define SMT_RDF 0x06 /* request denied */
+#define SMT_SRF 0x07 /* status report */
+#define SMT_PMF_GET 0x08 /* parameter management get */
+#define SMT_PMF_SET 0x09 /* parameter management set */
+#define SMT_ESF 0xff /* extended service */
+
+#define SMT_MAX_ECHO_LEN 4458 /* max length of SMT Echo */
+#if defined(CONC) || defined(CONC_II)
+#define SMT_TEST_ECHO_LEN 50 /* test length of SMT Echo */
+#else
+#define SMT_TEST_ECHO_LEN SMT_MAX_ECHO_LEN /* test length */
+#endif
+
+#define SMT_MAX_INFO_LEN (4352-20) /* max length for SMT info */
+
+
+/*
+ * parameter types
+ */
+
+struct smt_para {
+ u_short p_type ; /* type */
+ u_short p_len ; /* length of parameter */
+} ;
+
+#define PARA_LEN (sizeof(struct smt_para))
+
+#define SMTSETPARA(p,t) (p)->para.p_type = (t),\
+ (p)->para.p_len = sizeof(*(p)) - PARA_LEN
+
+/*
+ * P01 : Upstream Neighbor Address, UNA
+ */
+#define SMT_P_UNA 0x0001 /* upstream neighbor address */
+#define SWAP_SMT_P_UNA "s6"
+
+struct smt_p_una {
+ struct smt_para para ; /* generic parameter header */
+ u_short una_pad ;
+ struct fddi_addr una_node ; /* node address, zero if unknown */
+} ;
+
+/*
+ * P02 : Station Descriptor
+ */
+#define SMT_P_SDE 0x0002 /* station descriptor */
+#define SWAP_SMT_P_SDE "1111"
+
+#define SMT_SDE_STATION 0 /* end node */
+#define SMT_SDE_CONCENTRATOR 1 /* concentrator */
+
+struct smt_p_sde {
+ struct smt_para para ; /* generic parameter header */
+ u_char sde_type ; /* station type */
+ u_char sde_mac_count ; /* number of MACs */
+ u_char sde_non_master ; /* number of A,B or S ports */
+ u_char sde_master ; /* number of S ports on conc. */
+} ;
+
+/*
+ * P03 : Station State
+ */
+#define SMT_P_STATE 0x0003 /* station state */
+#define SWAP_SMT_P_STATE "scc"
+
+struct smt_p_state {
+ struct smt_para para ; /* generic parameter header */
+ u_short st_pad ;
+ u_char st_topology ; /* topology */
+ u_char st_dupl_addr ; /* duplicate address detected */
+} ;
+#define SMT_ST_WRAPPED (1<<0) /* station wrapped */
+#define SMT_ST_UNATTACHED (1<<1) /* unattached concentrator */
+#define SMT_ST_TWISTED_A (1<<2) /* A-A connection, twisted ring */
+#define SMT_ST_TWISTED_B (1<<3) /* B-B connection, twisted ring */
+#define SMT_ST_ROOTED_S (1<<4) /* rooted station */
+#define SMT_ST_SRF (1<<5) /* SRF protocol supported */
+#define SMT_ST_SYNC_SERVICE (1<<6) /* use synchronous bandwidth */
+
+#define SMT_ST_MY_DUPA (1<<0) /* my station detected dupl. */
+#define SMT_ST_UNA_DUPA (1<<1) /* my UNA detected duplicate */
+
+/*
+ * P04 : timestamp
+ */
+#define SMT_P_TIMESTAMP 0x0004 /* time stamp */
+#define SWAP_SMT_P_TIMESTAMP "8"
+struct smt_p_timestamp {
+ struct smt_para para ; /* generic parameter header */
+ u_char ts_time[8] ; /* time, resolution 80nS, unique */
+} ;
+
+/*
+ * P05 : station policies
+ */
+#define SMT_P_POLICY 0x0005 /* station policies */
+#define SWAP_SMT_P_POLICY "ss"
+
+struct smt_p_policy {
+ struct smt_para para ; /* generic parameter header */
+ u_short pl_config ;
+ u_short pl_connect ; /* bit string POLICY_AA ... */
+} ;
+#define SMT_PL_HOLD 1 /* hold policy supported (Dual MAC) */
+
+/*
+ * P06 : latency equivalent
+ */
+#define SMT_P_LATENCY 0x0006 /* latency */
+#define SWAP_SMT_P_LATENCY "ssss"
+
+/*
+ * note: latency has two phy entries by definition
+ * for a SAS, the 2nd one is null
+ */
+struct smt_p_latency {
+ struct smt_para para ; /* generic parameter header */
+ u_short lt_phyout_idx1 ; /* index */
+ u_short lt_latency1 ; /* latency , unit : byte clock */
+ u_short lt_phyout_idx2 ; /* 0 if SAS */
+ u_short lt_latency2 ; /* 0 if SAS */
+} ;
+
+/*
+ * P07 : MAC neighbors
+ */
+#define SMT_P_NEIGHBORS 0x0007 /* MAC neighbor description */
+#define SWAP_SMT_P_NEIGHBORS "ss66"
+
+struct smt_p_neighbor {
+ struct smt_para para ; /* generic parameter header */
+ u_short nb_mib_index ; /* MIB index */
+ u_short nb_mac_index ; /* n+1 .. n+m, m = #MACs, n = #PHYs */
+ struct fddi_addr nb_una ; /* UNA , 0 for unknown */
+ struct fddi_addr nb_dna ; /* DNA , 0 for unknown */
+} ;
+
+/*
+ * PHY record
+ */
+#define SMT_PHY_A 0 /* A port */
+#define SMT_PHY_B 1 /* B port */
+#define SMT_PHY_S 2 /* slave port */
+#define SMT_PHY_M 3 /* master port */
+
+#define SMT_CS_DISABLED 0 /* connect state : disabled */
+#define SMT_CS_CONNECTING 1 /* connect state : connecting */
+#define SMT_CS_STANDBY 2 /* connect state : stand by */
+#define SMT_CS_ACTIVE 3 /* connect state : active */
+
+#define SMT_RM_NONE 0
+#define SMT_RM_MAC 1
+
+struct smt_phy_rec {
+ u_short phy_mib_index ; /* MIB index */
+ u_char phy_type ; /* A/B/S/M */
+ u_char phy_connect_state ; /* disabled/connecting/active */
+ u_char phy_remote_type ; /* A/B/S/M */
+ u_char phy_remote_mac ; /* none/remote */
+ u_short phy_resource_idx ; /* 1 .. n */
+} ;
+
+/*
+ * MAC record
+ */
+struct smt_mac_rec {
+ struct fddi_addr mac_addr ; /* MAC address */
+ u_short mac_resource_idx ; /* n+1 .. n+m */
+} ;
+
+/*
+ * P08 : path descriptors
+ * should be really an array ; however our environment has a fixed number of
+ * PHYs and MACs
+ */
+#define SMT_P_PATH 0x0008 /* path descriptor */
+#define SWAP_SMT_P_PATH "[6s]"
+
+struct smt_p_path {
+ struct smt_para para ; /* generic parameter header */
+ struct smt_phy_rec pd_phy[2] ; /* PHY A */
+ struct smt_mac_rec pd_mac ; /* MAC record */
+} ;
+
+/*
+ * P09 : MAC status
+ */
+#define SMT_P_MAC_STATUS 0x0009 /* MAC status */
+#define SWAP_SMT_P_MAC_STATUS "sslllllllll"
+
+struct smt_p_mac_status {
+ struct smt_para para ; /* generic parameter header */
+ u_short st_mib_index ; /* MIB index */
+ u_short st_mac_index ; /* n+1 .. n+m */
+ u_int st_t_req ; /* T_Req */
+ u_int st_t_neg ; /* T_Neg */
+ u_int st_t_max ; /* T_Max */
+ u_int st_tvx_value ; /* TVX_Value */
+ u_int st_t_min ; /* T_Min */
+ u_int st_sba ; /* synchr. bandwidth alloc */
+ u_int st_frame_ct ; /* frame counter */
+ u_int st_error_ct ; /* error counter */
+ u_int st_lost_ct ; /* lost frames counter */
+} ;
+
+/*
+ * P0A : PHY link error rate monitoring
+ */
+#define SMT_P_LEM 0x000a /* link error monitor */
+#define SWAP_SMT_P_LEM "ssccccll"
+/*
+ * units of lem_cutoff,lem_alarm,lem_estimate : 10**-x
+ */
+struct smt_p_lem {
+ struct smt_para para ; /* generic parameter header */
+ u_short lem_mib_index ; /* MIB index */
+ u_short lem_phy_index ; /* 1 .. n */
+ u_char lem_pad2 ; /* be nice and make it even . */
+ u_char lem_cutoff ; /* 0x4 .. 0xf, default 0x7 */
+ u_char lem_alarm ; /* 0x4 .. 0xf, default 0x8 */
+ u_char lem_estimate ; /* 0x0 .. 0xff */
+ u_int lem_reject_ct ; /* 0x00000000 .. 0xffffffff */
+ u_int lem_ct ; /* 0x00000000 .. 0xffffffff */
+} ;
+
+/*
+ * P0B : MAC frame counters
+ */
+#define SMT_P_MAC_COUNTER 0x000b /* MAC frame counters */
+#define SWAP_SMT_P_MAC_COUNTER "ssll"
+
+struct smt_p_mac_counter {
+ struct smt_para para ; /* generic parameter header */
+ u_short mc_mib_index ; /* MIB index */
+ u_short mc_index ; /* mac index */
+ u_int mc_receive_ct ; /* receive counter */
+ u_int mc_transmit_ct ; /* transmit counter */
+} ;
+
+/*
+ * P0C : MAC frame not copied counter
+ */
+#define SMT_P_MAC_FNC 0x000c /* MAC frame not copied counter */
+#define SWAP_SMT_P_MAC_FNC "ssl"
+
+struct smt_p_mac_fnc {
+ struct smt_para para ; /* generic parameter header */
+ u_short nc_mib_index ; /* MIB index */
+ u_short nc_index ; /* mac index */
+ u_int nc_counter ; /* not copied counter */
+} ;
+
+
+/*
+ * P0D : MAC priority values
+ */
+#define SMT_P_PRIORITY 0x000d /* MAC priority values */
+#define SWAP_SMT_P_PRIORITY "ssl"
+
+struct smt_p_priority {
+ struct smt_para para ; /* generic parameter header */
+ u_short pr_mib_index ; /* MIB index */
+ u_short pr_index ; /* mac index */
+ u_int pr_priority[7] ; /* priority values */
+} ;
+
+/*
+ * P0E : PHY elasticity buffer status
+ */
+#define SMT_P_EB 0x000e /* PHY EB status */
+#define SWAP_SMT_P_EB "ssl"
+
+struct smt_p_eb {
+ struct smt_para para ; /* generic parameter header */
+ u_short eb_mib_index ; /* MIB index */
+ u_short eb_index ; /* phy index */
+ u_int eb_error_ct ; /* # of eb overflows */
+} ;
+
+/*
+ * P0F : manufacturer field
+ */
+#define SMT_P_MANUFACTURER 0x000f /* manufacturer field */
+#define SWAP_SMT_P_MANUFACTURER ""
+
+struct smp_p_manufacturer {
+ struct smt_para para ; /* generic parameter header */
+ u_char mf_data[32] ; /* OUI + arbitrary data */
+} ;
+
+/*
+ * P10 : user field
+ */
+#define SMT_P_USER 0x0010 /* manufacturer field */
+#define SWAP_SMT_P_USER ""
+
+struct smp_p_user {
+ struct smt_para para ; /* generic parameter header */
+ u_char us_data[32] ; /* arbitrary data */
+} ;
+
+
+
+/*
+ * P11 : echo data
+ */
+#define SMT_P_ECHODATA 0x0011 /* echo data */
+#define SWAP_SMT_P_ECHODATA ""
+
+struct smt_p_echo {
+ struct smt_para para ; /* generic parameter header */
+ u_char ec_data[SMT_MAX_ECHO_LEN-4] ; /* echo data */
+} ;
+
+/*
+ * P12 : reason code
+ */
+#define SMT_P_REASON 0x0012 /* reason code */
+#define SWAP_SMT_P_REASON "l"
+
+struct smt_p_reason {
+ struct smt_para para ; /* generic parameter header */
+ u_int rdf_reason ; /* CLASS/VERSION */
+} ;
+#define SMT_RDF_CLASS 0x00000001 /* class not supported */
+#define SMT_RDF_VERSION 0x00000002 /* version not supported */
+#define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */
+#define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */
+#define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */
+#define SMT_RDF_NOPARAM 0x6 /* paramter not supported (PMF) */
+#define SMT_RDF_RANGE 0x8 /* out of range */
+#define SMT_RDF_AUTHOR 0x9 /* not autohorized */
+#define SMT_RDF_LENGTH 0x0a /* length error */
+#define SMT_RDF_TOOLONG 0x0b /* length error */
+#define SMT_RDF_SBA 0x0d /* SBA denied */
+
+/*
+ * P13 : refused frame beginning
+ */
+#define SMT_P_REFUSED 0x0013 /* refused frame beginning */
+#define SWAP_SMT_P_REFUSED "l"
+
+struct smt_p_refused {
+ struct smt_para para ; /* generic parameter header */
+ u_int ref_fc ; /* 3 bytes 0 + FC */
+ struct smt_header ref_header ; /* refused header */
+} ;
+
+/*
+ * P14 : supported SMT versions
+ */
+#define SMT_P_VERSION 0x0014 /* SMT supported versions */
+#define SWAP_SMT_P_VERSION "sccss"
+
+struct smt_p_version {
+ struct smt_para para ; /* generic parameter header */
+ u_short v_pad ;
+ u_char v_n ; /* 1 .. 0xff, #versions */
+ u_char v_index ; /* 1 .. 0xff, index of op. v. */
+ u_short v_version[1] ; /* list of min. 1 version */
+ u_short v_pad2 ; /* pad if necessary */
+} ;
+
+/*
+ * P15 : Resource Type
+ */
+#define SWAP_SMT_P0015 "l"
+
+struct smt_p_0015 {
+ struct smt_para para ; /* generic parameter header */
+ u_int res_type ; /* recsource type */
+} ;
+
+#define SYNC_BW 0x00000001L /* Synchronous Bandwidth */
+
+/*
+ * P16 : SBA Command
+ */
+#define SWAP_SMT_P0016 "l"
+
+struct smt_p_0016 {
+ struct smt_para para ; /* generic parameter header */
+ u_int sba_cmd ; /* command for the SBA */
+} ;
+
+#define REQUEST_ALLOCATION 0x1 /* req allocation of sync bandwidth */
+#define REPORT_ALLOCATION 0x2 /* rep of sync bandwidth allocation */
+#define CHANGE_ALLOCATION 0x3 /* forces a station using sync band-*/
+ /* width to change its current allo-*/
+ /* cation */
+
+/*
+ * P17 : SBA Payload Request
+ */
+#define SWAP_SMT_P0017 "l"
+
+struct smt_p_0017 {
+ struct smt_para para ; /* generic parameter header */
+ int sba_pl_req ; /* total sync bandwidth measured in */
+} ; /* bytes per 125 us */
+
+/*
+ * P18 : SBA Overhead Request
+ */
+#define SWAP_SMT_P0018 "l"
+
+struct smt_p_0018 {
+ struct smt_para para ; /* generic parameter header */
+ int sba_ov_req ; /* total sync bandwidth req for overhead*/
+} ; /* measuered in bytes per T_Neg */
+
+/*
+ * P19 : SBA Allocation Address
+ */
+#define SWAP_SMT_P0019 "s6"
+
+struct smt_p_0019 {
+ struct smt_para para ; /* generic parameter header */
+ u_short sba_pad ;
+ struct fddi_addr alloc_addr ; /* Allocation Address */
+} ;
+
+/*
+ * P1A : SBA Category
+ */
+#define SWAP_SMT_P001A "l"
+
+struct smt_p_001a {
+ struct smt_para para ; /* generic parameter header */
+ u_int category ; /* Allocator defined classification */
+} ;
+
+/*
+ * P1B : Maximum T_Neg
+ */
+#define SWAP_SMT_P001B "l"
+
+struct smt_p_001b {
+ struct smt_para para ; /* generic parameter header */
+ u_int max_t_neg ; /* longest T_NEG for the sync service*/
+} ;
+
+/*
+ * P1C : Minimum SBA Segment Size
+ */
+#define SWAP_SMT_P001C "l"
+
+struct smt_p_001c {
+ struct smt_para para ; /* generic parameter header */
+ u_int min_seg_siz ; /* smallest number of bytes per frame*/
+} ;
+
+/*
+ * P1D : SBA Allocatable
+ */
+#define SWAP_SMT_P001D "l"
+
+struct smt_p_001d {
+ struct smt_para para ; /* generic parameter header */
+ u_int allocatable ; /* total sync bw available for alloc */
+} ;
+
+/*
+ * P20 0B : frame status capabilities
+ * NOTE: not in swap table, is used by smt.c AND PMF table
+ */
+#define SMT_P_FSC 0x200b
+/* #define SWAP_SMT_P_FSC "ssss" */
+
+struct smt_p_fsc {
+ struct smt_para para ; /* generic parameter header */
+ u_short fsc_pad0 ;
+ u_short fsc_mac_index ; /* mac index 1 .. ff */
+ u_short fsc_pad1 ;
+ u_short fsc_value ; /* FSC_TYPE[0-2] */
+} ;
+
+#define FSC_TYPE0 0 /* "normal" node (A/C handling) */
+#define FSC_TYPE1 1 /* Special A/C indicator forwarding */
+#define FSC_TYPE2 2 /* Special A/C indicator forwarding */
+
+/*
+ * P00 21 : user defined authoriziation (see pmf.c)
+ */
+#define SMT_P_AUTHOR 0x0021
+
+/*
+ * notification parameters
+ */
+#define SWAP_SMT_P1048 "ll"
+struct smt_p_1048 {
+ u_int p1048_flag ;
+ u_int p1048_cf_state ;
+} ;
+
+/*
+ * NOTE: all 2xxx 3xxx and 4xxx must include the INDEX in the swap string,
+ * even so the INDEX is NOT part of the struct.
+ * INDEX is already swapped in pmf.c, format in string is '4'
+ */
+#define SWAP_SMT_P208C "4lss66"
+struct smt_p_208c {
+ u_int p208c_flag ;
+ u_short p208c_pad ;
+ u_short p208c_dupcondition ;
+ struct fddi_addr p208c_fddilong ;
+ struct fddi_addr p208c_fddiunalong ;
+} ;
+
+#define SWAP_SMT_P208D "4lllll"
+struct smt_p_208d {
+ u_int p208d_flag ;
+ u_int p208d_frame_ct ;
+ u_int p208d_error_ct ;
+ u_int p208d_lost_ct ;
+ u_int p208d_ratio ;
+} ;
+
+#define SWAP_SMT_P208E "4llll"
+struct smt_p_208e {
+ u_int p208e_flag ;
+ u_int p208e_not_copied ;
+ u_int p208e_copied ;
+ u_int p208e_not_copied_ratio ;
+} ;
+
+#define SWAP_SMT_P208F "4ll6666s6"
+
+struct smt_p_208f {
+ u_int p208f_multiple ;
+ u_int p208f_nacondition ;
+ struct fddi_addr p208f_old_una ;
+ struct fddi_addr p208f_new_una ;
+ struct fddi_addr p208f_old_dna ;
+ struct fddi_addr p208f_new_dna ;
+ u_short p208f_curren_path ;
+ struct fddi_addr p208f_smt_address ;
+} ;
+
+#define SWAP_SMT_P2090 "4lssl"
+
+struct smt_p_2090 {
+ u_int p2090_multiple ;
+ u_short p2090_availablepaths ;
+ u_short p2090_currentpath ;
+ u_int p2090_requestedpaths ;
+} ;
+
+/*
+ * NOTE:
+ * special kludge for parameters 320b,320f,3210
+ * these parameters are part of RAF frames
+ * RAF frames are parsed in SBA.C and must be swapped
+ * PMF.C has special code to avoid double swapping
+ */
+#ifdef LITTLE_ENDIAN
+#define SBAPATHINDEX (0x01000000L)
+#else
+#define SBAPATHINDEX (0x01L)
+#endif
+
+#define SWAP_SMT_P320B "42s"
+
+struct smt_p_320b {
+ struct smt_para para ; /* generic parameter header */
+ u_int mib_index ;
+ u_short path_pad ;
+ u_short path_index ;
+} ;
+
+#define SWAP_SMT_P320F "4l"
+
+struct smt_p_320f {
+ struct smt_para para ; /* generic parameter header */
+ u_int mib_index ;
+ u_int mib_payload ;
+} ;
+
+#define SWAP_SMT_P3210 "4l"
+
+struct smt_p_3210 {
+ struct smt_para para ; /* generic parameter header */
+ u_int mib_index ;
+ u_int mib_overhead ;
+} ;
+
+#define SWAP_SMT_P4050 "4l1111ll"
+
+struct smt_p_4050 {
+ u_int p4050_flag ;
+ u_char p4050_pad ;
+ u_char p4050_cutoff ;
+ u_char p4050_alarm ;
+ u_char p4050_estimate ;
+ u_int p4050_reject_ct ;
+ u_int p4050_ct ;
+} ;
+
+#define SWAP_SMT_P4051 "4lssss"
+struct smt_p_4051 {
+ u_int p4051_multiple ;
+ u_short p4051_porttype ;
+ u_short p4051_connectstate ;
+ u_short p4051_pc_neighbor ;
+ u_short p4051_pc_withhold ;
+} ;
+
+#define SWAP_SMT_P4052 "4ll"
+struct smt_p_4052 {
+ u_int p4052_flag ;
+ u_int p4052_eberrorcount ;
+} ;
+
+#define SWAP_SMT_P4053 "4lsslss"
+
+struct smt_p_4053 {
+ u_int p4053_multiple ;
+ u_short p4053_availablepaths ;
+ u_short p4053_currentpath ;
+ u_int p4053_requestedpaths ;
+ u_short p4053_mytype ;
+ u_short p4053_neighbortype ;
+} ;
+
+
+#define SMT_P_SETCOUNT 0x1035
+#define SWAP_SMT_P_SETCOUNT "l8"
+
+struct smt_p_setcount {
+ struct smt_para para ; /* generic parameter header */
+ u_int count ;
+ u_char timestamp[8] ;
+} ;
+
+/*
+ * SMT FRAMES
+ */
+
+/*
+ * NIF : neighbor information frames
+ */
+struct smt_nif {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_una una ; /* UNA */
+ struct smt_p_sde sde ; /* station descriptor */
+ struct smt_p_state state ; /* station state */
+#ifdef SMT6_10
+ struct smt_p_fsc fsc ; /* frame status cap. */
+#endif
+} ;
+
+/*
+ * SIF : station information frames
+ */
+struct smt_sif_config {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_timestamp ts ; /* time stamp */
+ struct smt_p_sde sde ; /* station descriptor */
+ struct smt_p_version version ; /* supported versions */
+ struct smt_p_state state ; /* station state */
+ struct smt_p_policy policy ; /* station policy */
+ struct smt_p_latency latency ; /* path latency */
+ struct smt_p_neighbor neighbor ; /* neighbors, we have only one*/
+#ifdef OPT_PMF
+ struct smt_p_setcount setcount ; /* Set Count mandatory */
+#endif
+ /* WARNING : path MUST BE LAST FIELD !!! (see smt.c:smt_fill_path) */
+ struct smt_p_path path ; /* path descriptor */
+} ;
+#define SIZEOF_SMT_SIF_CONFIG (sizeof(struct smt_sif_config)- \
+ sizeof(struct smt_p_path))
+
+struct smt_sif_operation {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_timestamp ts ; /* time stamp */
+ struct smt_p_mac_status status ; /* mac status */
+ struct smt_p_mac_counter mc ; /* MAC counter */
+ struct smt_p_mac_fnc fnc ; /* MAC frame not copied */
+ struct smp_p_manufacturer man ; /* manufacturer field */
+ struct smp_p_user user ; /* user field */
+#ifdef OPT_PMF
+ struct smt_p_setcount setcount ; /* Set Count mandatory */
+#endif
+ /* must be last */
+ struct smt_p_lem lem[1] ; /* phy lem status */
+} ;
+#define SIZEOF_SMT_SIF_OPERATION (sizeof(struct smt_sif_operation)- \
+ sizeof(struct smt_p_lem))
+
+/*
+ * ECF : echo frame
+ */
+struct smt_ecf {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_echo ec_echo ; /* echo parameter */
+} ;
+#define SMT_ECF_LEN (sizeof(struct smt_header)+sizeof(struct smt_para))
+
+/*
+ * RDF : request denied frame
+ */
+struct smt_rdf {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_reason reason ; /* reason code */
+ struct smt_p_version version ; /* supported versions */
+ struct smt_p_refused refused ; /* refused frame fragment */
+} ;
+
+/*
+ * SBA Request Allocation Responce Frame
+ */
+struct smt_sba_alc_res {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_0015 s_type ; /* resource type */
+ struct smt_p_0016 cmd ; /* SBA command */
+ struct smt_p_reason reason ; /* reason code */
+ struct smt_p_320b path ; /* path type */
+ struct smt_p_320f payload ; /* current SBA payload */
+ struct smt_p_3210 overhead ; /* current SBA overhead */
+ struct smt_p_0019 a_addr ; /* Allocation Address */
+ struct smt_p_001a cat ; /* Category - from the request */
+ struct smt_p_001d alloc ; /* SBA Allocatable */
+} ;
+
+/*
+ * SBA Request Allocation Request Frame
+ */
+struct smt_sba_alc_req {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_0015 s_type ; /* resource type */
+ struct smt_p_0016 cmd ; /* SBA command */
+ struct smt_p_320b path ; /* path type */
+ struct smt_p_0017 pl_req ; /* requested payload */
+ struct smt_p_0018 ov_req ; /* requested SBA overhead */
+ struct smt_p_320f payload ; /* current SBA payload */
+ struct smt_p_3210 overhead ; /* current SBA overhead */
+ struct smt_p_0019 a_addr ; /* Allocation Address */
+ struct smt_p_001a cat ; /* Category - from the request */
+ struct smt_p_001b tneg ; /* max T-NEG */
+ struct smt_p_001c segm ; /* minimum segment size */
+} ;
+
+/*
+ * SBA Change Allocation Request Frame
+ */
+struct smt_sba_chg {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_0015 s_type ; /* resource type */
+ struct smt_p_0016 cmd ; /* SBA command */
+ struct smt_p_320b path ; /* path type */
+ struct smt_p_320f payload ; /* current SBA payload */
+ struct smt_p_3210 overhead ; /* current SBA overhead */
+ struct smt_p_001a cat ; /* Category - from the request */
+} ;
+
+/*
+ * SBA Report Allocation Request Frame
+ */
+struct smt_sba_rep_req {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_0015 s_type ; /* resource type */
+ struct smt_p_0016 cmd ; /* SBA command */
+} ;
+
+/*
+ * SBA Report Allocation Response Frame
+ */
+struct smt_sba_rep_res {
+ struct smt_header smt ; /* generic header */
+ struct smt_p_0015 s_type ; /* resource type */
+ struct smt_p_0016 cmd ; /* SBA command */
+ struct smt_p_320b path ; /* path type */
+ struct smt_p_320f payload ; /* current SBA payload */
+ struct smt_p_3210 overhead ; /* current SBA overhead */
+} ;
+
+/*
+ * actions
+ */
+#define SMT_STATION_ACTION 1
+#define SMT_STATION_ACTION_CONNECT 0
+#define SMT_STATION_ACTION_DISCONNECT 1
+#define SMT_STATION_ACTION_PATHTEST 2
+#define SMT_STATION_ACTION_SELFTEST 3
+#define SMT_STATION_ACTION_DISABLE_A 4
+#define SMT_STATION_ACTION_DISABLE_B 5
+#define SMT_STATION_ACTION_DISABLE_M 6
+
+#define SMT_PORT_ACTION 2
+#define SMT_PORT_ACTION_MAINT 0
+#define SMT_PORT_ACTION_ENABLE 1
+#define SMT_PORT_ACTION_DISABLE 2
+#define SMT_PORT_ACTION_START 3
+#define SMT_PORT_ACTION_STOP 4
+
+#endif /* _SMT_ */
diff --git a/drivers/net/skfp/h/smt_p.h b/drivers/net/skfp/h/smt_p.h
new file mode 100644
index 000000000000..99f9be9552bb
--- /dev/null
+++ b/drivers/net/skfp/h/smt_p.h
@@ -0,0 +1,326 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * defines for all SMT attributes
+ */
+
+/*
+ * this boring file was produced by perl
+ * thanks Larry !
+ */
+#define SMT_P0012 0x0012
+
+#define SMT_P0015 0x0015
+#define SMT_P0016 0x0016
+#define SMT_P0017 0x0017
+#define SMT_P0018 0x0018
+#define SMT_P0019 0x0019
+
+#define SMT_P001A 0x001a
+#define SMT_P001B 0x001b
+#define SMT_P001C 0x001c
+#define SMT_P001D 0x001d
+
+#define SMT_P100A 0x100a
+#define SMT_P100B 0x100b
+#define SMT_P100C 0x100c
+#define SMT_P100D 0x100d
+#define SMT_P100E 0x100e
+#define SMT_P100F 0x100f
+#define SMT_P1010 0x1010
+#define SMT_P1011 0x1011
+#define SMT_P1012 0x1012
+#define SMT_P1013 0x1013
+#define SMT_P1014 0x1014
+#define SMT_P1015 0x1015
+#define SMT_P1016 0x1016
+#define SMT_P1017 0x1017
+#define SMT_P1018 0x1018
+#define SMT_P1019 0x1019
+#define SMT_P101A 0x101a
+#define SMT_P101B 0x101b
+#define SMT_P101C 0x101c
+#define SMT_P101D 0x101d
+#define SMT_P101E 0x101e
+#define SMT_P101F 0x101f
+#define SMT_P1020 0x1020
+#define SMT_P1021 0x1021
+#define SMT_P1022 0x1022
+#define SMT_P1023 0x1023
+#define SMT_P1024 0x1024
+#define SMT_P1025 0x1025
+#define SMT_P1026 0x1026
+#define SMT_P1027 0x1027
+#define SMT_P1028 0x1028
+#define SMT_P1029 0x1029
+#define SMT_P102A 0x102a
+#define SMT_P102B 0x102b
+#define SMT_P102C 0x102c
+#define SMT_P102D 0x102d
+#define SMT_P102E 0x102e
+#define SMT_P102F 0x102f
+#define SMT_P1030 0x1030
+#define SMT_P1031 0x1031
+#define SMT_P1032 0x1032
+#define SMT_P1033 0x1033
+#define SMT_P1034 0x1034
+#define SMT_P1035 0x1035
+#define SMT_P1036 0x1036
+#define SMT_P1037 0x1037
+#define SMT_P1038 0x1038
+#define SMT_P1039 0x1039
+#define SMT_P103A 0x103a
+#define SMT_P103B 0x103b
+#define SMT_P103C 0x103c
+#define SMT_P103D 0x103d
+#define SMT_P103E 0x103e
+#define SMT_P103F 0x103f
+#define SMT_P1040 0x1040
+#define SMT_P1041 0x1041
+#define SMT_P1042 0x1042
+#define SMT_P1043 0x1043
+#define SMT_P1044 0x1044
+#define SMT_P1045 0x1045
+#define SMT_P1046 0x1046
+#define SMT_P1047 0x1047
+#define SMT_P1048 0x1048
+#define SMT_P1049 0x1049
+#define SMT_P104A 0x104a
+#define SMT_P104B 0x104b
+#define SMT_P104C 0x104c
+#define SMT_P104D 0x104d
+#define SMT_P104E 0x104e
+#define SMT_P104F 0x104f
+#define SMT_P1050 0x1050
+#define SMT_P1051 0x1051
+#define SMT_P1052 0x1052
+#define SMT_P1053 0x1053
+#define SMT_P1054 0x1054
+
+#define SMT_P10F0 0x10f0
+#define SMT_P10F1 0x10f1
+#ifdef ESS
+#define SMT_P10F2 0x10f2
+#define SMT_P10F3 0x10f3
+#define SMT_P10F4 0x10f4
+#define SMT_P10F5 0x10f5
+#define SMT_P10F6 0x10f6
+#define SMT_P10F7 0x10f7
+#endif
+#ifdef SBA
+#define SMT_P10F8 0x10f8
+#define SMT_P10F9 0x10f9
+#endif
+
+#define SMT_P200A 0x200a
+#define SMT_P200B 0x200b
+#define SMT_P200C 0x200c
+#define SMT_P200D 0x200d
+#define SMT_P200E 0x200e
+#define SMT_P200F 0x200f
+#define SMT_P2010 0x2010
+#define SMT_P2011 0x2011
+#define SMT_P2012 0x2012
+#define SMT_P2013 0x2013
+#define SMT_P2014 0x2014
+#define SMT_P2015 0x2015
+#define SMT_P2016 0x2016
+#define SMT_P2017 0x2017
+#define SMT_P2018 0x2018
+#define SMT_P2019 0x2019
+#define SMT_P201A 0x201a
+#define SMT_P201B 0x201b
+#define SMT_P201C 0x201c
+#define SMT_P201D 0x201d
+#define SMT_P201E 0x201e
+#define SMT_P201F 0x201f
+#define SMT_P2020 0x2020
+#define SMT_P2021 0x2021
+#define SMT_P2022 0x2022
+#define SMT_P2023 0x2023
+#define SMT_P2024 0x2024
+#define SMT_P2025 0x2025
+#define SMT_P2026 0x2026
+#define SMT_P2027 0x2027
+#define SMT_P2028 0x2028
+#define SMT_P2029 0x2029
+#define SMT_P202A 0x202a
+#define SMT_P202B 0x202b
+#define SMT_P202C 0x202c
+#define SMT_P202D 0x202d
+#define SMT_P202E 0x202e
+#define SMT_P202F 0x202f
+#define SMT_P2030 0x2030
+#define SMT_P2031 0x2031
+#define SMT_P2032 0x2032
+#define SMT_P2033 0x2033
+#define SMT_P2034 0x2034
+#define SMT_P2035 0x2035
+#define SMT_P2036 0x2036
+#define SMT_P2037 0x2037
+#define SMT_P2038 0x2038
+#define SMT_P2039 0x2039
+#define SMT_P203A 0x203a
+#define SMT_P203B 0x203b
+#define SMT_P203C 0x203c
+#define SMT_P203D 0x203d
+#define SMT_P203E 0x203e
+#define SMT_P203F 0x203f
+#define SMT_P2040 0x2040
+#define SMT_P2041 0x2041
+#define SMT_P2042 0x2042
+#define SMT_P2043 0x2043
+#define SMT_P2044 0x2044
+#define SMT_P2045 0x2045
+#define SMT_P2046 0x2046
+#define SMT_P2047 0x2047
+#define SMT_P2048 0x2048
+#define SMT_P2049 0x2049
+#define SMT_P204A 0x204a
+#define SMT_P204B 0x204b
+#define SMT_P204C 0x204c
+#define SMT_P204D 0x204d
+#define SMT_P204E 0x204e
+#define SMT_P204F 0x204f
+#define SMT_P2050 0x2050
+#define SMT_P2051 0x2051
+#define SMT_P2052 0x2052
+#define SMT_P2053 0x2053
+#define SMT_P2054 0x2054
+#define SMT_P2055 0x2055
+#define SMT_P2056 0x2056
+#define SMT_P2057 0x2057
+#define SMT_P2058 0x2058
+#define SMT_P2059 0x2059
+#define SMT_P205A 0x205a
+#define SMT_P205B 0x205b
+#define SMT_P205C 0x205c
+#define SMT_P205D 0x205d
+#define SMT_P205E 0x205e
+#define SMT_P205F 0x205f
+#define SMT_P2060 0x2060
+#define SMT_P2061 0x2061
+#define SMT_P2062 0x2062
+#define SMT_P2063 0x2063
+#define SMT_P2064 0x2064
+#define SMT_P2065 0x2065
+#define SMT_P2066 0x2066
+#define SMT_P2067 0x2067
+#define SMT_P2068 0x2068
+#define SMT_P2069 0x2069
+#define SMT_P206A 0x206a
+#define SMT_P206B 0x206b
+#define SMT_P206C 0x206c
+#define SMT_P206D 0x206d
+#define SMT_P206E 0x206e
+#define SMT_P206F 0x206f
+#define SMT_P2070 0x2070
+#define SMT_P2071 0x2071
+#define SMT_P2072 0x2072
+#define SMT_P2073 0x2073
+#define SMT_P2074 0x2074
+#define SMT_P2075 0x2075
+#define SMT_P2076 0x2076
+
+#define SMT_P208C 0x208c
+#define SMT_P208D 0x208d
+#define SMT_P208E 0x208e
+#define SMT_P208F 0x208f
+#define SMT_P2090 0x2090
+
+#define SMT_P20F0 0x20F0
+#define SMT_P20F1 0x20F1
+
+#define SMT_P320A 0x320a
+#define SMT_P320B 0x320b
+#define SMT_P320C 0x320c
+#define SMT_P320D 0x320d
+#define SMT_P320E 0x320e
+#define SMT_P320F 0x320f
+#define SMT_P3210 0x3210
+#define SMT_P3211 0x3211
+#define SMT_P3212 0x3212
+#define SMT_P3213 0x3213
+#define SMT_P3214 0x3214
+#define SMT_P3215 0x3215
+#define SMT_P3216 0x3216
+#define SMT_P3217 0x3217
+
+#define SMT_P400A 0x400a
+#define SMT_P400B 0x400b
+#define SMT_P400C 0x400c
+#define SMT_P400D 0x400d
+#define SMT_P400E 0x400e
+#define SMT_P400F 0x400f
+#define SMT_P4010 0x4010
+#define SMT_P4011 0x4011
+#define SMT_P4012 0x4012
+#define SMT_P4013 0x4013
+#define SMT_P4014 0x4014
+#define SMT_P4015 0x4015
+#define SMT_P4016 0x4016
+#define SMT_P4017 0x4017
+#define SMT_P4018 0x4018
+#define SMT_P4019 0x4019
+#define SMT_P401A 0x401a
+#define SMT_P401B 0x401b
+#define SMT_P401C 0x401c
+#define SMT_P401D 0x401d
+#define SMT_P401E 0x401e
+#define SMT_P401F 0x401f
+#define SMT_P4020 0x4020
+#define SMT_P4021 0x4021
+#define SMT_P4022 0x4022
+#define SMT_P4023 0x4023
+#define SMT_P4024 0x4024
+#define SMT_P4025 0x4025
+#define SMT_P4026 0x4026
+#define SMT_P4027 0x4027
+#define SMT_P4028 0x4028
+#define SMT_P4029 0x4029
+#define SMT_P402A 0x402a
+#define SMT_P402B 0x402b
+#define SMT_P402C 0x402c
+#define SMT_P402D 0x402d
+#define SMT_P402E 0x402e
+#define SMT_P402F 0x402f
+#define SMT_P4030 0x4030
+#define SMT_P4031 0x4031
+#define SMT_P4032 0x4032
+#define SMT_P4033 0x4033
+#define SMT_P4034 0x4034
+#define SMT_P4035 0x4035
+#define SMT_P4036 0x4036
+#define SMT_P4037 0x4037
+#define SMT_P4038 0x4038
+#define SMT_P4039 0x4039
+#define SMT_P403A 0x403a
+#define SMT_P403B 0x403b
+#define SMT_P403C 0x403c
+#define SMT_P403D 0x403d
+#define SMT_P403E 0x403e
+#define SMT_P403F 0x403f
+#define SMT_P4040 0x4040
+#define SMT_P4041 0x4041
+#define SMT_P4042 0x4042
+#define SMT_P4043 0x4043
+#define SMT_P4044 0x4044
+#define SMT_P4045 0x4045
+#define SMT_P4046 0x4046
+
+#define SMT_P4050 0x4050
+#define SMT_P4051 0x4051
+#define SMT_P4052 0x4052
+#define SMT_P4053 0x4053
diff --git a/drivers/net/skfp/h/smtstate.h b/drivers/net/skfp/h/smtstate.h
new file mode 100644
index 000000000000..62fe695077a9
--- /dev/null
+++ b/drivers/net/skfp/h/smtstate.h
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _SKFP_H_SMTSTATE_H_
+#define _SKFP_H_SMTSTATE_H_
+
+/*
+ * SMT state definitions
+ */
+
+#ifndef KERNEL
+/*
+ * PCM states
+ */
+#define PC0_OFF 0
+#define PC1_BREAK 1
+#define PC2_TRACE 2
+#define PC3_CONNECT 3
+#define PC4_NEXT 4
+#define PC5_SIGNAL 5
+#define PC6_JOIN 6
+#define PC7_VERIFY 7
+#define PC8_ACTIVE 8
+#define PC9_MAINT 9
+
+/*
+ * PCM modes
+ */
+#define PM_NONE 0
+#define PM_PEER 1
+#define PM_TREE 2
+
+/*
+ * PCM type
+ */
+#define TA 0
+#define TB 1
+#define TS 2
+#define TM 3
+#define TNONE 4
+
+/*
+ * CFM states
+ */
+#define SC0_ISOLATED 0 /* isolated */
+#define SC1_WRAP_A 5 /* wrap A */
+#define SC2_WRAP_B 6 /* wrap B */
+#define SC4_THRU_A 12 /* through A */
+#define SC5_THRU_B 7 /* through B (SMt 6.2) */
+#define SC7_WRAP_S 8 /* SAS */
+
+/*
+ * ECM states
+ */
+#define EC0_OUT 0
+#define EC1_IN 1
+#define EC2_TRACE 2
+#define EC3_LEAVE 3
+#define EC4_PATH_TEST 4
+#define EC5_INSERT 5
+#define EC6_CHECK 6
+#define EC7_DEINSERT 7
+
+/*
+ * RMT states
+ */
+#define RM0_ISOLATED 0
+#define RM1_NON_OP 1 /* not operational */
+#define RM2_RING_OP 2 /* ring operational */
+#define RM3_DETECT 3 /* detect dupl addresses */
+#define RM4_NON_OP_DUP 4 /* dupl. addr detected */
+#define RM5_RING_OP_DUP 5 /* ring oper. with dupl. addr */
+#define RM6_DIRECTED 6 /* sending directed beacons */
+#define RM7_TRACE 7 /* trace initiated */
+#endif
+
+struct pcm_state {
+ unsigned char pcm_type ; /* TA TB TS TM */
+ unsigned char pcm_state ; /* state PC[0-9]_* */
+ unsigned char pcm_mode ; /* PM_{NONE,PEER,TREE} */
+ unsigned char pcm_neighbor ; /* TA TB TS TM */
+ unsigned char pcm_bsf ; /* flag bs : TRUE/FALSE */
+ unsigned char pcm_lsf ; /* flag ls : TRUE/FALSE */
+ unsigned char pcm_lct_fail ; /* counter lct_fail */
+ unsigned char pcm_ls_rx ; /* rx line state */
+ short pcm_r_val ; /* signaling bits */
+ short pcm_t_val ; /* signaling bits */
+} ;
+
+struct smt_state {
+ struct pcm_state pcm_state[NUMPHYS] ; /* port A & port B */
+} ;
+
+#endif
+
diff --git a/drivers/net/skfp/h/supern_2.h b/drivers/net/skfp/h/supern_2.h
new file mode 100644
index 000000000000..5ba0b8306753
--- /dev/null
+++ b/drivers/net/skfp/h/supern_2.h
@@ -0,0 +1,1059 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ defines for AMD Supernet II chip set
+ the chips are refered to as
+ FPLUS Formac Plus
+ PLC Physical Layer
+
+ added defines for AMD Supernet III chip set
+ added comments on differences between Supernet II and Supernet III
+ added defines for the Motorola ELM (MOT_ELM)
+*/
+
+#ifndef _SUPERNET_
+#define _SUPERNET_
+
+/*
+ * Define Supernet 3 when used
+ */
+#ifdef PCI
+#ifndef SUPERNET_3
+#define SUPERNET_3
+#endif
+#define TAG
+#endif
+
+#define MB 0xff
+#define MW 0xffff
+#define MD 0xffffffff
+
+/*
+ * FORMAC frame status (rx_msext)
+ */
+#define FS_EI (1<<2)
+#define FS_AI (1<<1)
+#define FS_CI (1<<0)
+
+#define FS_MSVALID (1<<15) /* end of queue */
+#define FS_MSRABT (1<<14) /* frame was aborted during reception*/
+#define FS_SSRCRTG (1<<12) /* if SA has set MSB (source-routing)*/
+#define FS_SEAC2 (FS_EI<<9) /* error indicator */
+#define FS_SEAC1 (FS_AI<<9) /* address indicator */
+#define FS_SEAC0 (FS_CI<<9) /* copy indicator */
+#define FS_SFRMERR (1<<8) /* error detected (CRC or length) */
+#define FS_SADRRG (1<<7) /* address recognized */
+#define FS_SFRMTY2 (1<<6) /* frame-class bit */
+#define FS_SFRMTY1 (1<<5) /* frame-type bit (impementor) */
+#define FS_SFRMTY0 (1<<4) /* frame-type bit (LLC) */
+#define FS_ERFBB1 (1<<1) /* byte offset (depends on LSB bit) */
+#define FS_ERFBB0 (1<<0) /* - " - */
+
+/*
+ * status frame type
+ */
+#define FRM_SMT (0) /* asynchr. frames */
+#define FRM_LLCA (1)
+#define FRM_IMPA (2)
+#define FRM_MAC (4) /* synchr. frames */
+#define FRM_LLCS (5)
+#define FRM_IMPS (6)
+
+/*
+ * bits in rx_descr.i (receive frame status word)
+ */
+#define RX_MSVALID ((long)1<<31) /* memory status valid */
+#define RX_MSRABT ((long)1<<30) /* memory status receive abort */
+#define RX_FS_E ((long)FS_SEAC2<<16) /* error indicator */
+#define RX_FS_A ((long)FS_SEAC1<<16) /* address indicator */
+#define RX_FS_C ((long)FS_SEAC0<<16) /* copy indicator */
+#define RX_FS_CRC ((long)FS_SFRMERR<<16)/* error detected */
+#define RX_FS_ADDRESS ((long)FS_SADRRG<<16) /* address recognized */
+#define RX_FS_MAC ((long)FS_SFRMTY2<<16)/* MAC frame */
+#define RX_FS_SMT ((long)0<<16) /* SMT frame */
+#define RX_FS_IMPL ((long)FS_SFRMTY1<<16)/* implementer frame */
+#define RX_FS_LLC ((long)FS_SFRMTY0<<16)/* LLC frame */
+
+/*
+ * receive frame descriptor
+ */
+union rx_descr {
+ struct {
+#ifdef LITTLE_ENDIAN
+ unsigned rx_length :16 ; /* frame length lower/upper byte */
+ unsigned rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned rx_reserv2:2 ; /* reserved */
+ unsigned rx_sfrmty :3 ; /* frame type bits */
+ unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned rx_reserv1:1 ; /* reserved */
+ unsigned rx_msrabt :1 ; /* memory status receive abort */
+ unsigned rx_msvalid:1 ; /* memory status valid */
+#else
+ unsigned rx_msvalid:1 ; /* memory status valid */
+ unsigned rx_msrabt :1 ; /* memory status receive abort */
+ unsigned rx_reserv1:1 ; /* reserved */
+ unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned rx_sfrmty :3 ; /* frame type bits */
+ unsigned rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned rx_reserv2:2 ; /* reserved */
+ unsigned rx_length :16 ; /* frame length lower/upper byte */
+#endif
+ } r ;
+ long i ;
+} ;
+
+/* defines for Receive Frame Descriptor access */
+#define RD_S_ERFBB 0x00030000L /* received frame byte boundary */
+#define RD_S_RES2 0x000c0000L /* reserved */
+#define RD_S_SFRMTY 0x00700000L /* frame type bits */
+#define RD_S_SADRRG 0x00800000L /* DA == MA or broad-/multicast */
+#define RD_S_SFRMERR 0x01000000L /* received frame not valid */
+#define RD_S_SEAC 0x0e000000L /* frame status indicators */
+#define RD_S_SEAC0 0x02000000L /* frame-copied case-indicator */
+#define RD_S_SEAC1 0x04000000L /* address-match A-indicator */
+#define RD_S_SEAC2 0x08000000L /* frame-error E-indicator */
+#define RD_S_SSRCRTG 0x10000000L /* == 1 SA has MSB set */
+#define RD_S_RES1 0x20000000L /* reserved */
+#define RD_S_MSRABT 0x40000000L /* memory status receive abort */
+#define RD_S_MSVALID 0x80000000L /* memory status valid */
+
+#define RD_STATUS 0xffff0000L
+#define RD_LENGTH 0x0000ffffL
+
+/* defines for Receive Frames Status Word values */
+/*RD_S_SFRMTY*/
+#define RD_FRM_SMT (unsigned long)(0<<20) /* asynchr. frames */
+#define RD_FRM_LLCA (unsigned long)(1<<20)
+#define RD_FRM_IMPA (unsigned long)(2<<20)
+#define RD_FRM_MAC (unsigned long)(4<<20) /* synchr. frames */
+#define RD_FRM_LLCS (unsigned long)(5<<20)
+#define RD_FRM_IMPS (unsigned long)(6<<20)
+
+#define TX_DESCRIPTOR 0x40000000L
+#define TX_OFFSET_3 0x18000000L
+
+#define TXP1 2
+
+/*
+ * transmit frame descriptor
+ */
+union tx_descr {
+ struct {
+#ifdef LITTLE_ENDIAN
+ unsigned tx_length:16 ; /* frame length lower/upper byte */
+ unsigned tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned tx_xmtabt:1 ; /* transmit abort */
+ unsigned tx_nfcs :1 ; /* no frame check sequence */
+ unsigned tx_xdone :1 ; /* give up token */
+ unsigned tx_rpxm :2 ; /* byte offset */
+ unsigned tx_pat1 :2 ; /* must be TXP1 */
+ unsigned tx_more :1 ; /* more frame in chain */
+#else
+ unsigned tx_more :1 ; /* more frame in chain */
+ unsigned tx_pat1 :2 ; /* must be TXP1 */
+ unsigned tx_rpxm :2 ; /* byte offset */
+ unsigned tx_xdone :1 ; /* give up token */
+ unsigned tx_nfcs :1 ; /* no frame check sequence */
+ unsigned tx_xmtabt:1 ; /* transmit abort */
+ unsigned tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned tx_length:16 ; /* frame length lower/upper byte */
+#endif
+ } t ;
+ long i ;
+} ;
+
+/* defines for Transmit Descriptor access */
+#define TD_C_MORE 0x80000000L /* more frame in chain */
+#define TD_C_DESCR 0x60000000L /* must be TXP1 */
+#define TD_C_TXFBB 0x18000000L /* byte offset */
+#define TD_C_XDONE 0x04000000L /* give up token */
+#define TD_C_NFCS 0x02000000L /* no frame check sequence */
+#define TD_C_XMTABT 0x01000000L /* transmit abort */
+
+#define TD_C_LNCNU 0x0000ff00L
+#define TD_C_LNCNL 0x000000ffL
+#define TD_C_LNCN 0x0000ffffL /* frame length lower/upper byte */
+
+/*
+ * transmit pointer
+ */
+union tx_pointer {
+ struct t {
+#ifdef LITTLE_ENDIAN
+ unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+#else
+ unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+#endif
+ } t ;
+ long i ;
+} ;
+
+/* defines for Nontag Mode Pointer access */
+#define TD_P_CNTRL 0xff000000L
+#define TD_P_RPXU 0x0000ff00L
+#define TD_P_RPXL 0x000000ffL
+#define TD_P_RPX 0x0000ffffL
+
+
+#define TX_PATTERN 0xa0
+#define TX_POINTER_END 0xa0000000L
+#define TX_INT_PATTERN 0xa0000000L
+
+struct tx_queue {
+ struct tx_queue *tq_next ;
+ u_short tq_pack_offset ; /* offset buffer memory */
+ u_char tq_pad[2] ;
+} ;
+
+/*
+ defines for FORMAC Plus (Am79C830)
+*/
+
+/*
+ * FORMAC+ read/write (r/w) registers
+ */
+#define FM_CMDREG1 0x00 /* write command reg 1 instruction */
+#define FM_CMDREG2 0x01 /* write command reg 2 instruction */
+#define FM_ST1U 0x00 /* read upper 16-bit of status reg 1 */
+#define FM_ST1L 0x01 /* read lower 16-bit of status reg 1 */
+#define FM_ST2U 0x02 /* read upper 16-bit of status reg 2 */
+#define FM_ST2L 0x03 /* read lower 16-bit of status reg 2 */
+#define FM_IMSK1U 0x04 /* r/w upper 16-bit of IMSK 1 */
+#define FM_IMSK1L 0x05 /* r/w lower 16-bit of IMSK 1 */
+#define FM_IMSK2U 0x06 /* r/w upper 16-bit of IMSK 2 */
+#define FM_IMSK2L 0x07 /* r/w lower 16-bit of IMSK 2 */
+#define FM_SAID 0x08 /* r/w short addr.-individual */
+#define FM_LAIM 0x09 /* r/w long addr.-ind. (MSW of LAID) */
+#define FM_LAIC 0x0a /* r/w long addr.-ind. (middle)*/
+#define FM_LAIL 0x0b /* r/w long addr.-ind. (LSW) */
+#define FM_SAGP 0x0c /* r/w short address-group */
+#define FM_LAGM 0x0d /* r/w long addr.-gr. (MSW of LAGP) */
+#define FM_LAGC 0x0e /* r/w long addr.-gr. (middle) */
+#define FM_LAGL 0x0f /* r/w long addr.-gr. (LSW) */
+#define FM_MDREG1 0x10 /* r/w 16-bit mode reg 1 */
+#define FM_STMCHN 0x11 /* read state-machine reg */
+#define FM_MIR1 0x12 /* read upper 16-bit of MAC Info Reg */
+#define FM_MIR0 0x13 /* read lower 16-bit of MAC Info Reg */
+#define FM_TMAX 0x14 /* r/w 16-bit TMAX reg */
+#define FM_TVX 0x15 /* write 8-bit TVX reg with NP7-0
+ read TVX on NP7-0, timer on NP15-8*/
+#define FM_TRT 0x16 /* r/w upper 16-bit of TRT timer */
+#define FM_THT 0x17 /* r/w upper 16-bit of THT timer */
+#define FM_TNEG 0x18 /* read upper 16-bit of TNEG (TTRT) */
+#define FM_TMRS 0x19 /* read lower 5-bit of TNEG,TRT,THT */
+ /* F E D C B A 9 8 7 6 5 4 3 2 1 0
+ x |-TNEG4-0| |-TRT4-0-| |-THT4-0-| (x-late count) */
+#define FM_TREQ0 0x1a /* r/w 16-bit TREQ0 reg (LSW of TRT) */
+#define FM_TREQ1 0x1b /* r/w 16-bit TREQ1 reg (MSW of TRT) */
+#define FM_PRI0 0x1c /* r/w priority r. for asyn.-queue 0 */
+#define FM_PRI1 0x1d /* r/w priority r. for asyn.-queue 1 */
+#define FM_PRI2 0x1e /* r/w priority r. for asyn.-queue 2 */
+#define FM_TSYNC 0x1f /* r/w 16-bit of the TSYNC register */
+#define FM_MDREG2 0x20 /* r/w 16-bit mode reg 2 */
+#define FM_FRMTHR 0x21 /* r/w the frame threshold register */
+#define FM_EACB 0x22 /* r/w end addr of claim/beacon area */
+#define FM_EARV 0x23 /* r/w end addr of receive queue */
+/* Supernet 3 */
+#define FM_EARV1 FM_EARV
+
+#define FM_EAS 0x24 /* r/w end addr of synchr. queue */
+#define FM_EAA0 0x25 /* r/w end addr of asyn. queue 0 */
+#define FM_EAA1 0x26 /* r/w end addr of asyn. queue 1 */
+#define FM_EAA2 0x27 /* r/w end addr of asyn. queue 2 */
+#define FM_SACL 0x28 /* r/w start addr of claim frame */
+#define FM_SABC 0x29 /* r/w start addr of beacon frame */
+#define FM_WPXSF 0x2a /* r/w the write ptr. for special fr.*/
+#define FM_RPXSF 0x2b /* r/w the read ptr. for special fr. */
+#define FM_RPR 0x2d /* r/w the read ptr. for receive qu. */
+#define FM_WPR 0x2e /* r/w the write ptr. for receive qu.*/
+#define FM_SWPR 0x2f /* r/w the shadow wr.-ptr. for rec.q.*/
+/* Supernet 3 */
+#define FM_RPR1 FM_RPR
+#define FM_WPR1 FM_WPR
+#define FM_SWPR1 FM_SWPR
+
+#define FM_WPXS 0x30 /* r/w the write ptr. for synchr. qu.*/
+#define FM_WPXA0 0x31 /* r/w the write ptr. for asyn. qu.0 */
+#define FM_WPXA1 0x32 /* r/w the write ptr. for asyn. qu.1 */
+#define FM_WPXA2 0x33 /* r/w the write ptr. for asyn. qu.2 */
+#define FM_SWPXS 0x34 /* r/w the shadow wr.-ptr. for syn.q.*/
+#define FM_SWPXA0 0x35 /* r/w the shad. wr.-ptr. for asyn.q0*/
+#define FM_SWPXA1 0x36 /* r/w the shad. wr.-ptr. for asyn.q1*/
+#define FM_SWPXA2 0x37 /* r/w the shad. wr.-ptr. for asyn.q2*/
+#define FM_RPXS 0x38 /* r/w the read ptr. for synchr. qu. */
+#define FM_RPXA0 0x39 /* r/w the read ptr. for asyn. qu. 0 */
+#define FM_RPXA1 0x3a /* r/w the read ptr. for asyn. qu. 1 */
+#define FM_RPXA2 0x3b /* r/w the read ptr. for asyn. qu. 2 */
+#define FM_MARR 0x3c /* r/w the memory read addr register */
+#define FM_MARW 0x3d /* r/w the memory write addr register*/
+#define FM_MDRU 0x3e /* r/w upper 16-bit of mem. data reg */
+#define FM_MDRL 0x3f /* r/w lower 16-bit of mem. data reg */
+
+/* following instructions relate to MAC counters and timer */
+#define FM_TMSYNC 0x40 /* r/w upper 16 bits of TMSYNC timer */
+#define FM_FCNTR 0x41 /* r/w the 16-bit frame counter */
+#define FM_LCNTR 0x42 /* r/w the 16-bit lost counter */
+#define FM_ECNTR 0x43 /* r/w the 16-bit error counter */
+
+/* Supernet 3: extensions to old register block */
+#define FM_FSCNTR 0x44 /* r/? Frame Strip Counter */
+#define FM_FRSELREG 0x45 /* r/w Frame Selection Register */
+
+/* Supernet 3: extensions for 2. receive queue etc. */
+#define FM_MDREG3 0x60 /* r/w Mode Register 3 */
+#define FM_ST3U 0x61 /* read upper 16-bit of status reg 3 */
+#define FM_ST3L 0x62 /* read lower 16-bit of status reg 3 */
+#define FM_IMSK3U 0x63 /* r/w upper 16-bit of IMSK reg 3 */
+#define FM_IMSK3L 0x64 /* r/w lower 16-bit of IMSK reg 3 */
+#define FM_IVR 0x65 /* read Interrupt Vector register */
+#define FM_IMR 0x66 /* r/w Interrupt mask register */
+/* 0x67 Hidden */
+#define FM_RPR2 0x68 /* r/w the read ptr. for rec. qu. 2 */
+#define FM_WPR2 0x69 /* r/w the write ptr. for rec. qu. 2 */
+#define FM_SWPR2 0x6a /* r/w the shadow wptr. for rec. q. 2 */
+#define FM_EARV2 0x6b /* r/w end addr of rec. qu. 2 */
+#define FM_UNLCKDLY 0x6c /* r/w Auto Unlock Delay register */
+ /* Bit 15-8: RECV2 unlock threshold */
+ /* Bit 7-0: RECV1 unlock threshold */
+/* 0x6f-0x73 Hidden */
+#define FM_LTDPA1 0x79 /* r/w Last Trans desc ptr for A1 qu. */
+/* 0x80-0x9a PLCS registers of built-in PLCS (Supernet 3 only) */
+
+/* Supernet 3: Adderss Filter Registers */
+#define FM_AFCMD 0xb0 /* r/w Address Filter Command Reg */
+#define FM_AFSTAT 0xb2 /* r/w Address Filter Status Reg */
+#define FM_AFBIST 0xb4 /* r/w Address Filter BIST signature */
+#define FM_AFCOMP2 0xb6 /* r/w Address Filter Comparand 2 */
+#define FM_AFCOMP1 0xb8 /* r/w Address Filter Comparand 1 */
+#define FM_AFCOMP0 0xba /* r/w Address Filter Comparand 0 */
+#define FM_AFMASK2 0xbc /* r/w Address Filter Mask 2 */
+#define FM_AFMASK1 0xbe /* r/w Address Filter Mask 1 */
+#define FM_AFMASK0 0xc0 /* r/w Address Filter Mask 0 */
+#define FM_AFPERS 0xc2 /* r/w Address Filter Personality Reg */
+
+/* Supernet 3: Orion (PDX?) Registers */
+#define FM_ORBIST 0xd0 /* r/w Orion BIST signature */
+#define FM_ORSTAT 0xd2 /* r/w Orion Status Register */
+
+
+/*
+ * Mode Register 1 (MDREG1)
+ */
+#define FM_RES0 0x0001 /* reserved */
+ /* SN3: other definition */
+#define FM_XMTINH_HOLD 0x0002 /* transmit-inhibit/hold bit */
+ /* SN3: other definition */
+#define FM_HOFLXI 0x0003 /* SN3: Hold / Flush / Inhibit */
+#define FM_FULL_HALF 0x0004 /* full-duplex/half-duplex bit */
+#define FM_LOCKTX 0x0008 /* lock-transmit-asynchr.-queues bit */
+#define FM_EXGPA0 0x0010 /* extended-group-addressing bit 0 */
+#define FM_EXGPA1 0x0020 /* extended-group-addressing bit 1 */
+#define FM_DISCRY 0x0040 /* disable-carry bit */
+ /* SN3: reserved */
+#define FM_SELRA 0x0080 /* select input from PHY (1=RA,0=RB) */
+
+#define FM_ADDET 0x0700 /* address detection */
+#define FM_MDAMA (0<<8) /* address detection : DA = MA */
+#define FM_MDASAMA (1<<8) /* address detection : DA=MA||SA=MA */
+#define FM_MRNNSAFNMA (2<<8) /* rec. non-NSA frames DA=MA&&SA!=MA */
+#define FM_MRNNSAF (3<<8) /* rec. non-NSA frames DA = MA */
+#define FM_MDISRCV (4<<8) /* disable receive function */
+#define FM_MRES0 (5<<8) /* reserve */
+#define FM_MLIMPROM (6<<8) /* limited-promiscuous mode */
+#define FM_MPROMISCOUS (7<<8) /* address detection : promiscous */
+
+#define FM_SELSA 0x0800 /* select-short-address bit */
+
+#define FM_MMODE 0x7000 /* mode select */
+#define FM_MINIT (0<<12) /* initialize */
+#define FM_MMEMACT (1<<12) /* memory activate */
+#define FM_MONLINESP (2<<12) /* on-line special */
+#define FM_MONLINE (3<<12) /* on-line (FDDI operational mode) */
+#define FM_MILOOP (4<<12) /* internal loopback */
+#define FM_MRES1 (5<<12) /* reserved */
+#define FM_MRES2 (6<<12) /* reserved */
+#define FM_MELOOP (7<<12) /* external loopback */
+
+#define FM_SNGLFRM 0x8000 /* single-frame-receive mode */
+ /* SN3: reserved */
+
+#define MDR1INIT (FM_MINIT | FM_MDAMA)
+
+/*
+ * Mode Register 2 (MDREG2)
+ */
+#define FM_AFULL 0x000f /* 4-bit value (empty loc.in txqueue)*/
+#define FM_RCVERR 0x0010 /* rec.-errored-frames bit */
+#define FM_SYMCTL 0x0020 /* sysmbol-control bit */
+ /* SN3: reserved */
+#define FM_SYNPRQ 0x0040 /* synchron.-NP-DMA-request bit */
+#define FM_ENNPRQ 0x0080 /* enable-NP-DMA-request bit */
+#define FM_ENHSRQ 0x0100 /* enable-host-request bit */
+#define FM_RXFBB01 0x0600 /* rec. frame byte boundary bit0 & 1 */
+#define FM_LSB 0x0800 /* determ. ordering of bytes in buffer*/
+#define FM_PARITY 0x1000 /* 1 = even, 0 = odd */
+#define FM_CHKPAR 0x2000 /* 1 = parity of 32-bit buffer BD-bus*/
+#define FM_STRPFCS 0x4000 /* 1 = strips FCS field of rec.frame */
+#define FM_BMMODE 0x8000 /* Buffer-Memory-Mode (1 = tag mode) */
+ /* SN3: 1 = tag, 0 = modified tag */
+
+/*
+ * Status Register 1, Upper 16 Bits (ST1U)
+ */
+#define FM_STEFRMS 0x0001 /* transmit end of frame: synchr. qu.*/
+#define FM_STEFRMA0 0x0002 /* transmit end of frame: asyn. qu.0 */
+#define FM_STEFRMA1 0x0004 /* transmit end of frame: asyn. qu.1 */
+#define FM_STEFRMA2 0x0008 /* transmit end of frame: asyn. qu.2 */
+ /* SN3: reserved */
+#define FM_STECFRMS 0x0010 /* transmit end of chain of syn. qu. */
+ /* SN3: reserved */
+#define FM_STECFRMA0 0x0020 /* transmit end of chain of asyn. q0 */
+ /* SN3: reserved */
+#define FM_STECFRMA1 0x0040 /* transmit end of chain of asyn. q1 */
+ /* SN3: STECMDA1 */
+#define FM_STECMDA1 0x0040 /* SN3: 'no description' */
+#define FM_STECFRMA2 0x0080 /* transmit end of chain of asyn. q2 */
+ /* SN3: reserved */
+#define FM_STEXDONS 0x0100 /* transmit until XDONE in syn. qu. */
+#define FM_STBFLA 0x0200 /* asynchr.-queue trans. buffer full */
+#define FM_STBFLS 0x0400 /* synchr.-queue transm. buffer full */
+#define FM_STXABRS 0x0800 /* synchr. queue transmit-abort */
+#define FM_STXABRA0 0x1000 /* asynchr. queue 0 transmit-abort */
+#define FM_STXABRA1 0x2000 /* asynchr. queue 1 transmit-abort */
+#define FM_STXABRA2 0x4000 /* asynchr. queue 2 transmit-abort */
+ /* SN3: reserved */
+#define FM_SXMTABT 0x8000 /* transmit abort */
+
+/*
+ * Status Register 1, Lower 16 Bits (ST1L)
+ */
+#define FM_SQLCKS 0x0001 /* queue lock for synchr. queue */
+#define FM_SQLCKA0 0x0002 /* queue lock for asynchr. queue 0 */
+#define FM_SQLCKA1 0x0004 /* queue lock for asynchr. queue 1 */
+#define FM_SQLCKA2 0x0008 /* queue lock for asynchr. queue 2 */
+ /* SN3: reserved */
+#define FM_STXINFLS 0x0010 /* transmit instruction full: syn. */
+ /* SN3: reserved */
+#define FM_STXINFLA0 0x0020 /* transmit instruction full: asyn.0 */
+ /* SN3: reserved */
+#define FM_STXINFLA1 0x0040 /* transmit instruction full: asyn.1 */
+ /* SN3: reserved */
+#define FM_STXINFLA2 0x0080 /* transmit instruction full: asyn.2 */
+ /* SN3: reserved */
+#define FM_SPCEPDS 0x0100 /* parity/coding error: syn. queue */
+#define FM_SPCEPDA0 0x0200 /* parity/coding error: asyn. queue0 */
+#define FM_SPCEPDA1 0x0400 /* parity/coding error: asyn. queue1 */
+#define FM_SPCEPDA2 0x0800 /* parity/coding error: asyn. queue2 */
+ /* SN3: reserved */
+#define FM_STBURS 0x1000 /* transmit buffer underrun: syn. q. */
+#define FM_STBURA0 0x2000 /* transmit buffer underrun: asyn.0 */
+#define FM_STBURA1 0x4000 /* transmit buffer underrun: asyn.1 */
+#define FM_STBURA2 0x8000 /* transmit buffer underrun: asyn.2 */
+ /* SN3: reserved */
+
+/*
+ * Status Register 2, Upper 16 Bits (ST2U)
+ */
+#define FM_SOTRBEC 0x0001 /* other beacon received */
+#define FM_SMYBEC 0x0002 /* my beacon received */
+#define FM_SBEC 0x0004 /* beacon state entered */
+#define FM_SLOCLM 0x0008 /* low claim received */
+#define FM_SHICLM 0x0010 /* high claim received */
+#define FM_SMYCLM 0x0020 /* my claim received */
+#define FM_SCLM 0x0040 /* claim state entered */
+#define FM_SERRSF 0x0080 /* error in special frame */
+#define FM_SNFSLD 0x0100 /* NP and FORMAC+ simultaneous load */
+#define FM_SRFRCTOV 0x0200 /* receive frame counter overflow */
+ /* SN3: reserved */
+#define FM_SRCVFRM 0x0400 /* receive frame */
+ /* SN3: reserved */
+#define FM_SRCVOVR 0x0800 /* receive FIFO overflow */
+#define FM_SRBFL 0x1000 /* receive buffer full */
+#define FM_SRABT 0x2000 /* receive abort */
+#define FM_SRBMT 0x4000 /* receive buffer empty */
+#define FM_SRCOMP 0x8000 /* receive complete. Nontag mode */
+
+/*
+ * Status Register 2, Lower 16 Bits (ST2L)
+ * Attention: SN3 docu shows these bits the other way around
+ */
+#define FM_SRES0 0x0001 /* reserved */
+#define FM_SESTRIPTK 0x0001 /* SN3: 'no description' */
+#define FM_STRTEXR 0x0002 /* TRT expired in claim | beacon st. */
+#define FM_SDUPCLM 0x0004 /* duplicate claim received */
+#define FM_SSIFG 0x0008 /* short interframe gap */
+#define FM_SFRMCTR 0x0010 /* frame counter overflow */
+#define FM_SERRCTR 0x0020 /* error counter overflow */
+#define FM_SLSTCTR 0x0040 /* lost counter overflow */
+#define FM_SPHINV 0x0080 /* PHY invalid */
+#define FM_SADET 0x0100 /* address detect */
+#define FM_SMISFRM 0x0200 /* missed frame */
+#define FM_STRTEXP 0x0400 /* TRT expired and late count > 0 */
+#define FM_STVXEXP 0x0800 /* TVX expired */
+#define FM_STKISS 0x1000 /* token issued */
+#define FM_STKERR 0x2000 /* token error */
+#define FM_SMULTDA 0x4000 /* multiple destination address */
+#define FM_SRNGOP 0x8000 /* ring operational */
+
+/*
+ * Supernet 3:
+ * Status Register 3, Upper 16 Bits (ST3U)
+ */
+#define FM_SRQUNLCK1 0x0001 /* receive queue unlocked queue 1 */
+#define FM_SRQUNLCK2 0x0002 /* receive queue unlocked queue 2 */
+#define FM_SRPERRQ1 0x0004 /* receive parity error rx queue 1 */
+#define FM_SRPERRQ2 0x0008 /* receive parity error rx queue 2 */
+ /* Bit 4-10: reserved */
+#define FM_SRCVOVR2 0x0800 /* receive FIFO overfull rx queue 2 */
+#define FM_SRBFL2 0x1000 /* receive buffer full rx queue 2 */
+#define FM_SRABT2 0x2000 /* receive abort rx queue 2 */
+#define FM_SRBMT2 0x4000 /* receive buf empty rx queue 2 */
+#define FM_SRCOMP2 0x8000 /* receive comp rx queue 2 */
+
+/*
+ * Supernet 3:
+ * Status Register 3, Lower 16 Bits (ST3L)
+ */
+#define FM_AF_BIST_DONE 0x0001 /* Address Filter BIST is done */
+#define FM_PLC_BIST_DONE 0x0002 /* internal PLC Bist is done */
+#define FM_PDX_BIST_DONE 0x0004 /* PDX BIST is done */
+ /* Bit 3: reserved */
+#define FM_SICAMDAMAT 0x0010 /* Status internal CAM DA match */
+#define FM_SICAMDAXACT 0x0020 /* Status internal CAM DA exact match */
+#define FM_SICAMSAMAT 0x0040 /* Status internal CAM SA match */
+#define FM_SICAMSAXACT 0x0080 /* Status internal CAM SA exact match */
+
+/*
+ * MAC State-Machine Register FM_STMCHN
+ */
+#define FM_MDRTAG 0x0004 /* tag bit of long word read */
+#define FM_SNPPND 0x0008 /* r/w from buffer mem. is pending */
+#define FM_TXSTAT 0x0070 /* transmitter state machine state */
+#define FM_RCSTAT 0x0380 /* receiver state machine state */
+#define FM_TM01 0x0c00 /* indicate token mode */
+#define FM_SIM 0x1000 /* indicate send immediate-mode */
+#define FM_REV 0xe000 /* FORMAC Plus revision number */
+
+/*
+ * Supernet 3
+ * Mode Register 3
+ */
+#define FM_MENRS 0x0001 /* Ena enhanced rec status encoding */
+#define FM_MENXS 0x0002 /* Ena enhanced xmit status encoding */
+#define FM_MENXCT 0x0004 /* Ena EXACT/INEXACT matching */
+#define FM_MENAFULL 0x0008 /* Ena enh QCTRL encoding for AFULL */
+#define FM_MEIND 0x0030 /* Ena enh A,C indicator settings */
+#define FM_MENQCTRL 0x0040 /* Ena enh QCTRL encoding */
+#define FM_MENRQAUNLCK 0x0080 /* Ena rec q auto unlock */
+#define FM_MENDAS 0x0100 /* Ena DAS connections by cntr MUX */
+#define FM_MENPLCCST 0x0200 /* Ena Counter Segm test in PLC blck */
+#define FM_MENSGLINT 0x0400 /* Ena Vectored Interrupt reading */
+#define FM_MENDRCV 0x0800 /* Ena dual receive queue operation */
+#define FM_MENFCLOC 0x3000 /* Ena FC location within frm data */
+#define FM_MENTRCMD 0x4000 /* Ena ASYNC1 xmit only after command */
+#define FM_MENTDLPBK 0x8000 /* Ena TDAT to RDAT lkoopback */
+
+/*
+ * Supernet 3
+ * Frame Selection Register
+ */
+#define FM_RECV1 0x000f /* options for receive queue 1 */
+#define FM_RCV1_ALL (0<<0) /* receive all frames */
+#define FM_RCV1_LLC (1<<0) /* rec all LLC frames */
+#define FM_RCV1_SMT (2<<0) /* rec all SMT frames */
+#define FM_RCV1_NSMT (3<<0) /* rec non-SMT frames */
+#define FM_RCV1_IMP (4<<0) /* rec Implementor frames */
+#define FM_RCV1_MAC (5<<0) /* rec all MAC frames */
+#define FM_RCV1_SLLC (6<<0) /* rec all sync LLC frames */
+#define FM_RCV1_ALLC (7<<0) /* rec all async LLC frames */
+#define FM_RCV1_VOID (8<<0) /* rec all void frames */
+#define FM_RCV1_ALSMT (9<<0) /* rec all async LLC & SMT frames */
+#define FM_RECV2 0x00f0 /* options for receive queue 2 */
+#define FM_RCV2_ALL (0<<4) /* receive all other frames */
+#define FM_RCV2_LLC (1<<4) /* rec all LLC frames */
+#define FM_RCV2_SMT (2<<4) /* rec all SMT frames */
+#define FM_RCV2_NSMT (3<<4) /* rec non-SMT frames */
+#define FM_RCV2_IMP (4<<4) /* rec Implementor frames */
+#define FM_RCV2_MAC (5<<4) /* rec all MAC frames */
+#define FM_RCV2_SLLC (6<<4) /* rec all sync LLC frames */
+#define FM_RCV2_ALLC (7<<4) /* rec all async LLC frames */
+#define FM_RCV2_VOID (8<<4) /* rec all void frames */
+#define FM_RCV2_ALSMT (9<<4) /* rec all async LLC & SMT frames */
+#define FM_ENXMTADSWAP 0x4000 /* enh rec addr swap (phys -> can) */
+#define FM_ENRCVADSWAP 0x8000 /* enh tx addr swap (can -> phys) */
+
+/*
+ * Supernet 3:
+ * Address Filter Command Register (AFCMD)
+ */
+#define FM_INST 0x0007 /* Address Filter Operation */
+#define FM_IINV_CAM (0<<0) /* Invalidate CAM */
+#define FM_IWRITE_CAM (1<<0) /* Write CAM */
+#define FM_IREAD_CAM (2<<0) /* Read CAM */
+#define FM_IRUN_BIST (3<<0) /* Run BIST */
+#define FM_IFIND (4<<0) /* Find */
+#define FM_IINV (5<<0) /* Invalidate */
+#define FM_ISKIP (6<<0) /* Skip */
+#define FM_ICL_SKIP (7<<0) /* Clear all SKIP bits */
+
+/*
+ * Supernet 3:
+ * Address Filter Status Register (AFSTAT)
+ */
+ /* Bit 0-4: reserved */
+#define FM_REV_NO 0x00e0 /* Revision Number of Address Filter */
+#define FM_BIST_DONE 0x0100 /* BIST complete */
+#define FM_EMPTY 0x0200 /* CAM empty */
+#define FM_ERROR 0x0400 /* Error (improper operation) */
+#define FM_MULT 0x0800 /* Multiple Match */
+#define FM_EXACT 0x1000 /* Exact Match */
+#define FM_FOUND 0x2000 /* Comparand found in CAM */
+#define FM_FULL 0x4000 /* CAM full */
+#define FM_DONE 0x8000 /* DONE indicator */
+
+/*
+ * Supernet 3:
+ * BIST Signature Register (AFBIST)
+ */
+#define AF_BIST_SIGNAT 0x0553 /* Address Filter BIST Signature */
+
+/*
+ * Supernet 3:
+ * Personality Register (AFPERS)
+ */
+#define FM_VALID 0x0001 /* CAM Entry Valid */
+#define FM_DA 0x0002 /* Destination Address */
+#define FM_DAX 0x0004 /* Destination Address Exact */
+#define FM_SA 0x0008 /* Source Address */
+#define FM_SAX 0x0010 /* Source Address Exact */
+#define FM_SKIP 0x0020 /* Skip this entry */
+
+/*
+ * instruction set for command register 1 (NPADDR6-0 = 0x00)
+ */
+#define FM_IRESET 0x01 /* software reset */
+#define FM_IRMEMWI 0x02 /* load Memory Data Reg., inc MARR */
+#define FM_IRMEMWO 0x03 /* load MDR from buffer memory, n.i. */
+#define FM_IIL 0x04 /* idle/listen */
+#define FM_ICL 0x05 /* claim/listen */
+#define FM_IBL 0x06 /* beacon/listen */
+#define FM_ILTVX 0x07 /* load TVX timer from TVX reg */
+#define FM_INRTM 0x08 /* nonrestricted token mode */
+#define FM_IENTM 0x09 /* enter nonrestricted token mode */
+#define FM_IERTM 0x0a /* enter restricted token mode */
+#define FM_IRTM 0x0b /* restricted token mode */
+#define FM_ISURT 0x0c /* send unrestricted token */
+#define FM_ISRT 0x0d /* send restricted token */
+#define FM_ISIM 0x0e /* enter send-immediate mode */
+#define FM_IESIM 0x0f /* exit send-immediate mode */
+#define FM_ICLLS 0x11 /* clear synchronous queue lock */
+#define FM_ICLLA0 0x12 /* clear asynchronous queue 0 lock */
+#define FM_ICLLA1 0x14 /* clear asynchronous queue 1 lock */
+#define FM_ICLLA2 0x18 /* clear asynchronous queue 2 lock */
+ /* SN3: reserved */
+#define FM_ICLLR 0x20 /* clear receive queue (SN3:1) lock */
+#define FM_ICLLR2 0x21 /* SN3: clear receive queue 2 lock */
+#define FM_ITRXBUS 0x22 /* SN3: Tristate X-Bus (SAS only) */
+#define FM_IDRXBUS 0x23 /* SN3: drive X-Bus */
+#define FM_ICLLAL 0x3f /* clear all queue locks */
+
+/*
+ * instruction set for command register 2 (NPADDR6-0 = 0x01)
+ */
+#define FM_ITRS 0x01 /* transmit synchronous queue */
+ /* SN3: reserved */
+#define FM_ITRA0 0x02 /* transmit asynchronous queue 0 */
+ /* SN3: reserved */
+#define FM_ITRA1 0x04 /* transmit asynchronous queue 1 */
+ /* SN3: reserved */
+#define FM_ITRA2 0x08 /* transmit asynchronous queue 2 */
+ /* SN3: reserved */
+#define FM_IACTR 0x10 /* abort current transmit activity */
+#define FM_IRSTQ 0x20 /* reset transmit queues */
+#define FM_ISTTB 0x30 /* set tag bit */
+#define FM_IERSF 0x40 /* enable receive single frame */
+ /* SN3: reserved */
+#define FM_ITR 0x50 /* SN3: Transmit Command */
+
+
+/*
+ * defines for PLC (Am79C864)
+ */
+
+/*
+ * PLC read/write (r/w) registers
+ */
+#define PL_CNTRL_A 0x00 /* control register A (r/w) */
+#define PL_CNTRL_B 0x01 /* control register B (r/w) */
+#define PL_INTR_MASK 0x02 /* interrupt mask (r/w) */
+#define PL_XMIT_VECTOR 0x03 /* transmit vector register (r/w) */
+#define PL_VECTOR_LEN 0x04 /* transmit vector length (r/w) */
+#define PL_LE_THRESHOLD 0x05 /* link error event threshold (r/w) */
+#define PL_C_MIN 0x06 /* minimum connect state time (r/w) */
+#define PL_TL_MIN 0x07 /* min. line state transmit t. (r/w) */
+#define PL_TB_MIN 0x08 /* minimum break time (r/w) */
+#define PL_T_OUT 0x09 /* signal timeout (r/w) */
+#define PL_CNTRL_C 0x0a /* control register C (r/w) */
+#define PL_LC_LENGTH 0x0b /* link confidence test time (r/w) */
+#define PL_T_SCRUB 0x0c /* scrub time = MAC TVX (r/w) */
+#define PL_NS_MAX 0x0d /* max. noise time before break (r/w)*/
+#define PL_TPC_LOAD_V 0x0e /* TPC timer load value (write only) */
+#define PL_TNE_LOAD_V 0x0f /* TNE timer load value (write only) */
+#define PL_STATUS_A 0x10 /* status register A (read only) */
+#define PL_STATUS_B 0x11 /* status register B (read only) */
+#define PL_TPC 0x12 /* timer for PCM (ro) [20.48 us] */
+#define PL_TNE 0x13 /* time of noise event [0.32 us] */
+#define PL_CLK_DIV 0x14 /* TNE clock divider (read only) */
+#define PL_BIST_SIGNAT 0x15 /* built in self test signature (ro)*/
+#define PL_RCV_VECTOR 0x16 /* receive vector reg. (read only) */
+#define PL_INTR_EVENT 0x17 /* interrupt event reg. (read only) */
+#define PL_VIOL_SYM_CTR 0x18 /* violation symbol count. (read o) */
+#define PL_MIN_IDLE_CTR 0x19 /* minimum idle counter (read only) */
+#define PL_LINK_ERR_CTR 0x1a /* link error event ctr.(read only) */
+#ifdef MOT_ELM
+#define PL_T_FOT_ASS 0x1e /* FOTOFF Assert Timer */
+#define PL_T_FOT_DEASS 0x1f /* FOTOFF Deassert Timer */
+#endif /* MOT_ELM */
+
+#ifdef MOT_ELM
+/*
+ * Special Quad-Elm Registers.
+ * A Quad-ELM consists of for ELMs and these additional registers.
+ */
+#define QELM_XBAR_W 0x80 /* Crossbar Control ELM W */
+#define QELM_XBAR_X 0x81 /* Crossbar Control ELM X */
+#define QELM_XBAR_Y 0x82 /* Crossbar Control ELM Y */
+#define QELM_XBAR_Z 0x83 /* Crossbar Control ELM Z */
+#define QELM_XBAR_P 0x84 /* Crossbar Control Bus P */
+#define QELM_XBAR_S 0x85 /* Crossbar Control Bus S */
+#define QELM_XBAR_R 0x86 /* Crossbar Control Bus R */
+#define QELM_WR_XBAR 0x87 /* Write the Crossbar now (write) */
+#define QELM_CTR_W 0x88 /* Counter W */
+#define QELM_CTR_X 0x89 /* Counter X */
+#define QELM_CTR_Y 0x8a /* Counter Y */
+#define QELM_CTR_Z 0x8b /* Counter Z */
+#define QELM_INT_MASK 0x8c /* Interrupt mask register */
+#define QELM_INT_DATA 0x8d /* Interrupt data (event) register */
+#define QELM_ELMB 0x00 /* Elm base */
+#define QELM_ELM_SIZE 0x20 /* ELM size */
+#endif /* MOT_ELM */
+/*
+ * PLC control register A (PL_CNTRL_A: log. addr. 0x00)
+ * It is used for timer configuration, specification of PCM MAINT state option,
+ * counter interrupt frequency, PLC data path config. and Built In Self Test.
+ */
+#define PL_RUN_BIST 0x0001 /* begin running its Built In Self T.*/
+#define PL_RF_DISABLE 0x0002 /* disable the Repeat Filter state m.*/
+#define PL_SC_REM_LOOP 0x0004 /* remote loopback path */
+#define PL_SC_BYPASS 0x0008 /* by providing a physical bypass */
+#define PL_LM_LOC_LOOP 0x0010 /* loop path just after elastic buff.*/
+#define PL_EB_LOC_LOOP 0x0020 /* loop path just prior to PDT/PDR IF*/
+#define PL_FOT_OFF 0x0040 /* assertion of /FOTOFF pin of PLC */
+#define PL_LOOPBACK 0x0080 /* it cause the /LPBCK pin ass. low */
+#define PL_MINI_CTR_INT 0x0100 /* partially contr. when bit is ass. */
+#define PL_VSYM_CTR_INT 0x0200 /* controls when int bit is asserted */
+#define PL_ENA_PAR_CHK 0x0400 /* enable parity check */
+#define PL_REQ_SCRUB 0x0800 /* limited access to scrub capability*/
+#define PL_TPC_16BIT 0x1000 /* causes the TPC as a 16 bit timer */
+#define PL_TNE_16BIT 0x2000 /* causes the TNE as a 16 bit timer */
+#define PL_NOISE_TIMER 0x4000 /* allows the noise timing function */
+
+/*
+ * PLC control register B (PL_CNTRL_B: log. addr. 0x01)
+ * It contains signals and requeste to direct the process of PCM and it is also
+ * used to control the Line State Match interrupt.
+ */
+#define PL_PCM_CNTRL 0x0003 /* control PCM state machine */
+#define PL_PCM_NAF (0) /* state is not affected */
+#define PL_PCM_START (1) /* goes to the BREAK state */
+#define PL_PCM_TRACE (2) /* goes to the TRACE state */
+#define PL_PCM_STOP (3) /* goes to the OFF state */
+
+#define PL_MAINT 0x0004 /* if OFF state --> MAINT state */
+#define PL_LONG 0x0008 /* perf. a long Link Confid.Test(LCT)*/
+#define PL_PC_JOIN 0x0010 /* if NEXT state --> JOIN state */
+
+#define PL_PC_LOOP 0x0060 /* loopback used in the LCT */
+#define PL_NOLCT (0<<5) /* no LCT is performed */
+#define PL_TPDR (1<<5) /* PCM asserts transmit PDR */
+#define PL_TIDLE (2<<5) /* PCM asserts transmit idle */
+#define PL_RLBP (3<<5) /* trans. PDR & remote loopb. path */
+
+#define PL_CLASS_S 0x0080 /* signif. that single att. station */
+
+#define PL_MAINT_LS 0x0700 /* line state while in the MAINT st. */
+#define PL_M_QUI0 (0<<8) /* transmit QUIET line state */
+#define PL_M_IDLE (1<<8) /* transmit IDLE line state */
+#define PL_M_HALT (2<<8) /* transmit HALT line state */
+#define PL_M_MASTR (3<<8) /* transmit MASTER line state */
+#define PL_M_QUI1 (4<<8) /* transmit QUIET line state */
+#define PL_M_QUI2 (5<<8) /* transmit QUIET line state */
+#define PL_M_TPDR (6<<8) /* tr. PHY_DATA requ.-symbol is tr.ed*/
+#define PL_M_QUI3 (7<<8) /* transmit QUIET line state */
+
+#define PL_MATCH_LS 0x7800 /* line state to be comp. with curr.*/
+#define PL_I_ANY (0<<11) /* Int. on any change in *_LINE_ST */
+#define PL_I_IDLE (1<<11) /* Interrupt on IDLE line state */
+#define PL_I_HALT (2<<11) /* Interrupt on HALT line state */
+#define PL_I_MASTR (4<<11) /* Interrupt on MASTER line state */
+#define PL_I_QUIET (8<<11) /* Interrupt on QUIET line state */
+
+#define PL_CONFIG_CNTRL 0x8000 /* control over scrub, byp. & loopb.*/
+
+/*
+ * PLC control register C (PL_CNTRL_C: log. addr. 0x0a)
+ * It contains the scrambling control registers (PLC-S only)
+ */
+#define PL_C_CIPHER_ENABLE (1<<0) /* enable scrambler */
+#define PL_C_CIPHER_LPBCK (1<<1) /* loopback scrambler */
+#define PL_C_SDOFF_ENABLE (1<<6) /* enable SDOFF timer */
+#define PL_C_SDON_ENABLE (1<<7) /* enable SDON timer */
+#ifdef MOT_ELM
+#define PL_C_FOTOFF_CTRL (3<<2) /* FOTOFF timer control */
+#define PL_C_FOTOFF_TIM (0<<2) /* FOTOFF use timer for (de)-assert */
+#define PL_C_FOTOFF_INA (2<<2) /* FOTOFF forced inactive */
+#define PL_C_FOTOFF_ACT (3<<2) /* FOTOFF forced active */
+#define PL_C_FOTOFF_SRCE (1<<4) /* FOTOFF source is PCM state != OFF */
+#define PL_C_RXDATA_EN (1<<5) /* Rec scr data forced to 0 */
+#define PL_C_SDNRZEN (1<<8) /* Monitor rec descr. data for act */
+#else /* nMOT_ELM */
+#define PL_C_FOTOFF_CTRL (3<<8) /* FOTOFF timer control */
+#define PL_C_FOTOFF_0 (0<<8) /* timer off */
+#define PL_C_FOTOFF_30 (1<<8) /* 30uS */
+#define PL_C_FOTOFF_50 (2<<8) /* 50uS */
+#define PL_C_FOTOFF_NEVER (3<<8) /* never */
+#define PL_C_SDON_TIMER (3<<10) /* SDON timer control */
+#define PL_C_SDON_084 (0<<10) /* 0.84 uS */
+#define PL_C_SDON_132 (1<<10) /* 1.32 uS */
+#define PL_C_SDON_252 (2<<10) /* 2.52 uS */
+#define PL_C_SDON_512 (3<<10) /* 5.12 uS */
+#define PL_C_SOFF_TIMER (3<<12) /* SDOFF timer control */
+#define PL_C_SOFF_076 (0<<12) /* 0.76 uS */
+#define PL_C_SOFF_132 (1<<12) /* 1.32 uS */
+#define PL_C_SOFF_252 (2<<12) /* 2.52 uS */
+#define PL_C_SOFF_512 (3<<12) /* 5.12 uS */
+#define PL_C_TSEL (3<<14) /* scrambler path select */
+#endif /* nMOT_ELM */
+
+/*
+ * PLC status register A (PL_STATUS_A: log. addr. 0x10)
+ * It is used to report status information to the Node Processor about the
+ * Line State Machine (LSM).
+ */
+#ifdef MOT_ELM
+#define PLC_INT_MASK 0xc000 /* ELM integration bits in status A */
+#define PLC_INT_C 0x0000 /* ELM Revision Band C */
+#define PLC_INT_CAMEL 0x4000 /* ELM integrated into CAMEL */
+#define PLC_INT_QE 0x8000 /* ELM integrated into Quad ELM */
+#define PLC_REV_MASK 0x3800 /* revision bits in status A */
+#define PLC_REVISION_B 0x0000 /* rev bits for ELM Rev B */
+#define PLC_REVISION_QA 0x0800 /* rev bits for ELM core in QELM-A */
+#else /* nMOT_ELM */
+#define PLC_REV_MASK 0xf800 /* revision bits in status A */
+#define PLC_REVISION_A 0x0000 /* revision bits for PLC */
+#define PLC_REVISION_S 0xf800 /* revision bits for PLC-S */
+#define PLC_REV_SN3 0x7800 /* revision bits for PLC-S in IFCP */
+#endif /* nMOT_ELM */
+#define PL_SYM_PR_CTR 0x0007 /* contains the LSM symbol pair Ctr. */
+#define PL_UNKN_LINE_ST 0x0008 /* unknown line state bit from LSM */
+#define PL_LSM_STATE 0x0010 /* state bit of LSM */
+
+#define PL_LINE_ST 0x00e0 /* contains recogn. line state of LSM*/
+#define PL_L_NLS (0<<5) /* noise line state */
+#define PL_L_ALS (1<<5) /* activ line state */
+#define PL_L_UND (2<<5) /* undefined */
+#define PL_L_ILS4 (3<<5) /* idle l. s. (after 4 idle symbols) */
+#define PL_L_QLS (4<<5) /* quiet line state */
+#define PL_L_MLS (5<<5) /* master line state */
+#define PL_L_HLS (6<<5) /* halt line state */
+#define PL_L_ILS16 (7<<5) /* idle line state (after 16 idle s.)*/
+
+#define PL_PREV_LINE_ST 0x0300 /* value of previous line state */
+#define PL_P_QLS (0<<8) /* quiet line state */
+#define PL_P_MLS (1<<8) /* master line state */
+#define PL_P_HLS (2<<8) /* halt line state */
+#define PL_P_ILS16 (3<<8) /* idle line state (after 16 idle s.)*/
+
+#define PL_SIGNAL_DET 0x0400 /* 1=that signal detect is deasserted*/
+
+
+/*
+ * PLC status register B (PL_STATUS_B: log. addr. 0x11)
+ * It contains signals and status from the repeat filter and PCM state machine.
+ */
+#define PL_BREAK_REASON 0x0007 /* reason for PCM state mach.s to br.*/
+#define PL_B_NOT (0) /* PCM SM has not gone to BREAK state*/
+#define PL_B_PCS (1) /* PC_Start issued */
+#define PL_B_TPC (2) /* TPC timer expired after T_OUT */
+#define PL_B_TNE (3) /* TNE timer expired after NS_MAX */
+#define PL_B_QLS (4) /* quit line state detected */
+#define PL_B_ILS (5) /* idle line state detected */
+#define PL_B_HLS (6) /* halt line state detected */
+
+#define PL_TCF 0x0008 /* transmit code flag (start exec.) */
+#define PL_RCF 0x0010 /* receive code flag (start exec.) */
+#define PL_LSF 0x0020 /* line state flag (l.s. has been r.)*/
+#define PL_PCM_SIGNAL 0x0040 /* indic. that XMIT_VECTOR hb.written*/
+
+#define PL_PCM_STATE 0x0780 /* state bits of PCM state machine */
+#define PL_PC0 (0<<7) /* OFF - when /RST or PCM_CNTRL */
+#define PL_PC1 (1<<7) /* BREAK - entry point in start PCM*/
+#define PL_PC2 (2<<7) /* TRACE - to localize stuck Beacon*/
+#define PL_PC3 (3<<7) /* CONNECT - synchronize ends of conn*/
+#define PL_PC4 (4<<7) /* NEXT - to separate the signalng*/
+#define PL_PC5 (5<<7) /* SIGNAL - PCM trans/rec. bit infos*/
+#define PL_PC6 (6<<7) /* JOIN - 1. state to activ conn. */
+#define PL_PC7 (7<<7) /* VERIFY - 2. - " - (3. ACTIVE) */
+#define PL_PC8 (8<<7) /* ACTIVE - PHY has been incorporated*/
+#define PL_PC9 (9<<7) /* MAINT - for test purposes or so
+ that PCM op. completely in softw. */
+
+#define PL_PCI_SCRUB 0x0800 /* scrubbing function is being exec. */
+
+#define PL_PCI_STATE 0x3000 /* Physical Connect. Insertion SM */
+#define PL_CI_REMV (0<<12) /* REMOVED */
+#define PL_CI_ISCR (1<<12) /* INSERT_SCRUB */
+#define PL_CI_RSCR (2<<12) /* REMOVE_SCRUB */
+#define PL_CI_INS (3<<12) /* INSERTED */
+
+#define PL_RF_STATE 0xc000 /* state bit of repeate filter SM */
+#define PL_RF_REPT (0<<14) /* REPEAT */
+#define PL_RF_IDLE (1<<14) /* IDLE */
+#define PL_RF_HALT1 (2<<14) /* HALT1 */
+#define PL_RF_HALT2 (3<<14) /* HALT2 */
+
+
+/*
+ * PLC interrupt event register (PL_INTR_EVENT: log. addr. 0x17)
+ * It is read only and is clearde whenever it is read!
+ * It is used by the PLC to report events to the node processor.
+ */
+#define PL_PARITY_ERR 0x0001 /* p. error h.b.detected on TX9-0 inp*/
+#define PL_LS_MATCH 0x0002 /* l.s.== l.s. PLC_CNTRL_B's MATCH_LS*/
+#define PL_PCM_CODE 0x0004 /* transmit&receive | LCT complete */
+#define PL_TRACE_PROP 0x0008 /* master l.s. while PCM ACTIV|TRACE */
+#define PL_SELF_TEST 0x0010 /* QUIET|HALT while PCM in TRACE st. */
+#define PL_PCM_BREAK 0x0020 /* PCM has entered the BREAK state */
+#define PL_PCM_ENABLED 0x0040 /* asserted SC_JOIN, scrub. & ACTIV */
+#define PL_TPC_EXPIRED 0x0080 /* TPC timer reached zero */
+#define PL_TNE_EXPIRED 0x0100 /* TNE timer reached zero */
+#define PL_EBUF_ERR 0x0200 /* elastic buff. det. over-|underflow*/
+#define PL_PHYINV 0x0400 /* physical layer invalid signal */
+#define PL_VSYM_CTR 0x0800 /* violation symbol counter has incr.*/
+#define PL_MINI_CTR 0x1000 /* dep. on PLC_CNTRL_A's MINI_CTR_INT*/
+#define PL_LE_CTR 0x2000 /* link error event counter */
+#define PL_LSDO 0x4000 /* SDO input pin changed to a 1 */
+#define PL_NP_ERR 0x8000 /* NP has requested to r/w an inv. r.*/
+
+/*
+ * The PLC interrupt mask register (PL_INTR_MASK: log. addr. 0x02) constr. is
+ * equal PL_INTR_EVENT register.
+ * For each set bit, the setting of corresponding bit generate an int to NP.
+ */
+
+#ifdef MOT_ELM
+/*
+ * Quad ELM Crosbar Control register values (QELM_XBAR_?)
+ */
+#define QELM_XOUT_IDLE 0x0000 /* Idles/Passthrough */
+#define QELM_XOUT_P 0x0001 /* Output to: Bus P */
+#define QELM_XOUT_S 0x0002 /* Output to: Bus S */
+#define QELM_XOUT_R 0x0003 /* Output to: Bus R */
+#define QELM_XOUT_W 0x0004 /* Output to: ELM W */
+#define QELM_XOUT_X 0x0005 /* Output to: ELM X */
+#define QELM_XOUT_Y 0x0006 /* Output to: ELM Y */
+#define QELM_XOUT_Z 0x0007 /* Output to: ELM Z */
+
+/*
+ * Quad ELM Interrupt data and event registers.
+ */
+#define QELM_NP_ERR (1<<15) /* Node Processor Error */
+#define QELM_COUNT_Z (1<<7) /* Counter Z Interrupt */
+#define QELM_COUNT_Y (1<<6) /* Counter Y Interrupt */
+#define QELM_COUNT_X (1<<5) /* Counter X Interrupt */
+#define QELM_COUNT_W (1<<4) /* Counter W Interrupt */
+#define QELM_ELM_Z (1<<3) /* ELM Z Interrupt */
+#define QELM_ELM_Y (1<<2) /* ELM Y Interrupt */
+#define QELM_ELM_X (1<<1) /* ELM X Interrupt */
+#define QELM_ELM_W (1<<0) /* ELM W Interrupt */
+#endif /* MOT_ELM */
+/*
+ * PLC Timing Parameters
+ */
+#define TP_C_MIN 0xff9c /* 2 ms */
+#define TP_TL_MIN 0xfff0 /* 0.3 ms */
+#define TP_TB_MIN 0xff10 /* 5 ms */
+#define TP_T_OUT 0xd9db /* 200 ms */
+#define TP_LC_LENGTH 0xf676 /* 50 ms */
+#define TP_LC_LONGLN 0xa0a2 /* 500 ms */
+#define TP_T_SCRUB 0xff6d /* 3.5 ms */
+#define TP_NS_MAX 0xf021 /* 1.3 ms */
+
+/*
+ * BIST values
+ */
+#define PLC_BIST 0x6ecd /* BIST signature for PLC */
+#define PLCS_BIST 0x5b6b /* BIST signature for PLC-S */
+#define PLC_ELM_B_BIST 0x6ecd /* BIST signature of ELM Rev. B */
+#define PLC_ELM_D_BIST 0x5b6b /* BIST signature of ELM Rev. D */
+#define PLC_CAM_A_BIST 0x9e75 /* BIST signature of CAMEL Rev. A */
+#define PLC_CAM_B_BIST 0x5b6b /* BIST signature of CAMEL Rev. B */
+#define PLC_IFD_A_BIST 0x9e75 /* BIST signature of IFDDI Rev. A */
+#define PLC_IFD_B_BIST 0x5b6b /* BIST signature of IFDDI Rev. B */
+#define PLC_QELM_A_BIST 0x5b6b /* BIST signature of QELM Rev. A */
+
+/*
+ FDDI board recources
+ */
+
+/*
+ * request register array (log. addr: RQA_A + a<<1 {a=0..7}) write only.
+ * It specifies to FORMAC+ the type of buffer memory access the host requires.
+ */
+#define RQ_NOT 0 /* not request */
+#define RQ_RES 1 /* reserved */
+#define RQ_SFW 2 /* special frame write */
+#define RQ_RRQ 3 /* read request: receive queue */
+#define RQ_WSQ 4 /* write request: synchronous queue */
+#define RQ_WA0 5 /* write requ.: asynchronous queue 0 */
+#define RQ_WA1 6 /* write requ.: asynchronous queue 1 */
+#define RQ_WA2 7 /* write requ.: asynchronous queue 2 */
+
+#define SZ_LONG (sizeof(long))
+
+/*
+ * FDDI defaults
+ * NOTE : In the ANSI docs, times are specified in units of "symbol time".
+ * AMD chips use BCLK as unit. 1 BCKL == 2 symbols
+ */
+#define COMPLREF ((u_long)32*256*256) /* two's complement 21 bit */
+#define MSTOBCLK(x) ((u_long)(x)*12500L)
+#define MSTOTVX(x) (((u_long)(x)*1000L)/80/255)
+
+#endif /* _SUPERNET_ */
diff --git a/drivers/net/skfp/h/targethw.h b/drivers/net/skfp/h/targethw.h
new file mode 100644
index 000000000000..22c4923241d3
--- /dev/null
+++ b/drivers/net/skfp/h/targethw.h
@@ -0,0 +1,169 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef _TARGETHW_
+#define _TARGETHW_
+
+ /*
+ * PCI Watermark definition
+ */
+#ifdef PCI
+#define RX_WATERMARK 24
+#define TX_WATERMARK 24
+#define SK_ML_ID_1 0x20
+#define SK_ML_ID_2 0x30
+#endif
+
+#include "h/skfbi.h"
+#ifndef TAG_MODE
+#include "h/fplus.h"
+#else
+#include "h/fplustm.h"
+#endif
+
+#ifndef HW_PTR
+#define HW_PTR void __iomem *
+#endif
+
+#ifdef MULT_OEM
+#define OI_STAT_LAST 0 /* end of OEM data base */
+#define OI_STAT_PRESENT 1 /* entry present but not empty */
+#define OI_STAT_VALID 2 /* holds valid ID, but is not active */
+#define OI_STAT_ACTIVE 3 /* holds valid ID, entry is active */
+ /* active = adapter is supported */
+
+/* Memory representation of IDs must match representation in adapter. */
+struct s_oem_ids {
+ u_char oi_status ; /* Stat: last, present, valid, active */
+ u_char oi_mark[5] ; /* "PID00" .. "PID07" .. */
+ u_char oi_id[4] ; /* id bytes, representation as */
+ /* defined by hardware, */
+#ifdef PCI
+ u_char oi_sub_id[4] ; /* sub id bytes, representation as */
+ /* defined by hardware, */
+#endif
+#ifdef ISA
+ u_char oi_logo_len ; /* the length of the adapter logo */
+ u_char oi_logo[6] ; /* the adapter logo */
+ u_char oi_reserved1 ;
+#endif /* ISA */
+} ;
+#endif /* MULT_OEM */
+
+
+struct s_smt_hw {
+ /*
+ * global
+ */
+ HW_PTR iop ; /* IO base address */
+ short dma ; /* DMA channel */
+ short irq ; /* IRQ level */
+ short eprom ; /* FLASH prom */
+#ifndef PCI
+ short DmaWriteExtraBytes ; /* add bytes for DMA write */
+#endif
+
+#ifndef SYNC
+ u_short n_a_send ; /* pending send requests */
+#endif
+
+#if (defined(EISA) || defined(MCA) || defined(PCI))
+ short slot ; /* slot number */
+ short max_slots ; /* maximum number of slots */
+#endif
+
+#if (defined(PCI) || defined(MCA))
+ short wdog_used ; /* TRUE if the watch dog is used */
+#endif
+
+#ifdef MCA
+ short slot_32 ; /* 32bit slot (1) or 16bit slot (0) */
+ short rev ; /* Board revision (FMx_REV). */
+ short VFullRead ; /* V_full value for DMA read */
+ short VFullWrite ; /* V_full value for DMA write */
+#endif
+
+#ifdef EISA
+ short led ; /* LED for FE card */
+
+ short dma_rmode ; /* read mode */
+ short dma_wmode ; /* write mode */
+ short dma_emode ; /* extend mode */
+
+ /* DMA controller channel dependent io addresses */
+ u_short dma_base_word_count ;
+ u_short dma_base_address ;
+ u_short dma_base_address_page ;
+#endif
+
+#ifdef PCI
+ u_short pci_handle ; /* handle to access the BIOS func */
+ u_long is_imask ; /* int maske for the int source reg */
+ u_long phys_mem_addr ; /* physical memory address */
+ u_short mc_dummy ; /* work around for MC compiler bug */
+ /*
+ * state of the hardware
+ */
+ u_short hw_state ; /* started or stopped */
+
+#define STARTED 1
+#define STOPPED 0
+
+ int hw_is_64bit ; /* does we have a 64 bit adapter */
+#endif
+
+#ifdef TAG_MODE
+ u_long pci_fix_value ; /* value parsed by PCIFIX */
+#endif
+
+ /*
+ * hwt.c
+ */
+ u_long t_start ; /* HWT start */
+ u_long t_stop ; /* HWT stop */
+ u_short timer_activ ; /* HWT timer active */
+
+ /*
+ * PIC
+ */
+ u_char pic_a1 ;
+ u_char pic_21 ;
+
+ /*
+ * GENERIC ; do not modify beyond this line
+ */
+
+ /*
+ * physical and canonical address
+ */
+ struct fddi_addr fddi_home_addr ;
+ struct fddi_addr fddi_canon_addr ;
+ struct fddi_addr fddi_phys_addr ;
+
+ /*
+ * mac variables
+ */
+ struct mac_parameter mac_pa ; /* tmin, tmax, tvx, treq .. */
+ struct mac_counter mac_ct ; /* recv., lost, error */
+ u_short mac_ring_is_up ; /* ring is up flag */
+
+ struct s_smt_fp fp ; /* formac+ */
+
+#ifdef MULT_OEM
+ struct s_oem_ids *oem_id ; /* pointer to selected id */
+ int oem_min_status ; /* IDs to take care of */
+#endif /* MULT_OEM */
+
+} ;
+#endif
diff --git a/drivers/net/skfp/h/targetos.h b/drivers/net/skfp/h/targetos.h
new file mode 100644
index 000000000000..5d940e7b8ea0
--- /dev/null
+++ b/drivers/net/skfp/h/targetos.h
@@ -0,0 +1,165 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * Operating system specific definitions for driver and
+ * hardware module.
+ */
+
+#ifndef TARGETOS_H
+#define TARGETOS_H
+
+
+//-------- those should go into include/linux/pci.h
+#define PCI_VENDOR_ID_SK 0x1148
+#define PCI_DEVICE_ID_SK_FP 0x4000
+//--------
+
+
+
+//-------- those should go into include/linux/if_fddi.h
+#define FDDI_MAC_HDR_LEN 13
+
+#define FDDI_RII 0x01 /* routing information bit */
+#define FDDI_RCF_DIR_BIT 0x80
+#define FDDI_RCF_LEN_MASK 0x1f
+#define FDDI_RCF_BROADCAST 0x8000
+#define FDDI_RCF_LIMITED_BROADCAST 0xA000
+#define FDDI_RCF_FRAME2K 0x20
+#define FDDI_RCF_FRAME4K 0x30
+//--------
+
+
+#undef ADDR
+
+#include <asm/io.h>
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+// is redefined by linux, but we need our definition
+#undef ADDR
+#ifdef MEM_MAPPED_IO
+#define ADDR(a) (smc->hw.iop+(a))
+#else
+#define ADDR(a) (((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), (smc->hw.iop+( ((a)&0x7F) | ((a)>>7 ? 0x80:0)) )) : (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
+#endif
+
+#include "h/hwmtm.h"
+
+#define TRUE 1
+#define FALSE 0
+
+// HWM Definitions
+// -----------------------
+#define FDDI_TRACE(string, arg1, arg2, arg3) // Performance analysis.
+#ifdef PCI
+#define NDD_TRACE(string, arg1, arg2, arg3) // Performance analysis.
+#endif // PCI
+#define SMT_PAGESIZE PAGE_SIZE // Size of a memory page (power of 2).
+// -----------------------
+
+
+// SMT Definitions
+// -----------------------
+#define TICKS_PER_SECOND HZ
+#define SMC_VERSION 1
+// -----------------------
+
+
+// OS-Driver Definitions
+// -----------------------
+#define NO_ADDRESS 0xffe0 /* No Device (I/O) Address */
+#define SKFP_MAX_NUM_BOARDS 8 /* maximum number of PCI boards */
+
+#define SK_BUS_TYPE_PCI 0
+#define SK_BUS_TYPE_EISA 1
+
+#define FP_IO_LEN 256 /* length of IO area used */
+
+#define u8 unsigned char
+#define u16 unsigned short
+#define u32 unsigned int
+
+#define MAX_TX_QUEUE_LEN 20 // number of packets queued by driver
+#define MAX_FRAME_SIZE 4550
+
+#define RX_LOW_WATERMARK NUM_RECEIVE_BUFFERS / 2
+#define TX_LOW_WATERMARK NUM_TRANSMIT_BUFFERS - 2
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define SKFPIOCTL SIOCDEVPRIVATE
+
+struct s_skfp_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised ioctl commands for the driver
+*/
+#define SKFP_GET_STATS 0x05 /* Get the driver statistics */
+#define SKFP_CLR_STATS 0x06 /* Zero out the driver statistics */
+
+// The per-adapter driver structure
+struct s_smt_os {
+ struct net_device *dev;
+ struct net_device *next_module;
+ u32 bus_type; /* bus type (0 == PCI, 1 == EISA) */
+ struct pci_dev pdev; /* PCI device structure */
+
+ unsigned long base_addr;
+ unsigned char factory_mac_addr[8];
+ ulong SharedMemSize;
+ ulong SharedMemHeap;
+ void* SharedMemAddr;
+ dma_addr_t SharedMemDMA;
+
+ ulong QueueSkb;
+ struct sk_buff_head SendSkbQueue;
+
+ ulong MaxFrameSize;
+ u8 ResetRequested;
+
+ // MAC statistics structure
+ struct fddi_statistics MacStat;
+
+ // receive into this local buffer if no skb available
+ // data will be not valid, because multiple RxDs can
+ // point here at the same time, it must be at least
+ // MAX_FRAME_SIZE bytes in size
+ unsigned char *LocalRxBuffer;
+ dma_addr_t LocalRxBufferDMA;
+
+ // Version (required by SMT module).
+ u_long smc_version ;
+
+ // Required by Hardware Module (HWM).
+ struct hw_modul hwm ;
+
+ // For SMP-savety
+ spinlock_t DriverLock;
+
+};
+
+typedef struct s_smt_os skfddi_priv;
+
+#endif // _TARGETOS_
diff --git a/drivers/net/skfp/h/types.h b/drivers/net/skfp/h/types.h
new file mode 100644
index 000000000000..5a3bf8378f9e
--- /dev/null
+++ b/drivers/net/skfp/h/types.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#include <linux/types.h>
+/*
+ ----------------------
+ Basic SMT system types
+ ----------------------
+*/
+#ifndef _TYPES_
+#define _TYPES_
+
+#define _packed
+#ifndef far
+#define far
+#endif
+#ifndef _far
+#define _far
+#endif
+
+#define inp(p) ioread8(p)
+#define inpw(p) ioread16(p)
+#define inpd(p) ioread32(p)
+#define outp(p,c) iowrite8(c,p)
+#define outpw(p,s) iowrite16(s,p)
+#define outpd(p,l) iowrite32(l,p)
+
+#endif /* _TYPES_ */
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
new file mode 100644
index 000000000000..18d429021edb
--- /dev/null
+++ b/drivers/net/skfp/hwmtm.c
@@ -0,0 +1,2219 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#ifndef lint
+static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ;
+#endif
+
+#define HWMTM
+
+#ifndef FDDI
+#define FDDI
+#endif
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/supern_2.h"
+#include "h/skfbiinc.h"
+
+/*
+ -------------------------------------------------------------
+ DOCUMENTATION
+ -------------------------------------------------------------
+ BEGIN_MANUAL_ENTRY(DOCUMENTATION)
+
+ T B D
+
+ END_MANUAL_ENTRY
+*/
+/*
+ -------------------------------------------------------------
+ LOCAL VARIABLES:
+ -------------------------------------------------------------
+*/
+#ifdef COMMON_MB_POOL
+static SMbuf *mb_start = 0 ;
+static SMbuf *mb_free = 0 ;
+static int mb_init = FALSE ;
+static int call_count = 0 ;
+#endif
+
+/*
+ -------------------------------------------------------------
+ EXTERNE VARIABLES:
+ -------------------------------------------------------------
+*/
+
+#ifdef DEBUG
+#ifndef DEBUG_BRD
+extern struct smt_debug debug ;
+#endif
+#endif
+
+#ifdef NDIS_OS2
+extern u_char offDepth ;
+extern u_char force_irq_pending ;
+#endif
+
+/*
+ -------------------------------------------------------------
+ LOCAL FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
+static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
+static void init_txd_ring(struct s_smc *smc);
+static void init_rxd_ring(struct s_smc *smc);
+static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
+static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
+ int count);
+static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
+static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
+static SMbuf* get_llc_rx(struct s_smc *smc);
+static SMbuf* get_txd_mb(struct s_smc *smc);
+
+/*
+ -------------------------------------------------------------
+ EXTERNAL FUNCTIONS:
+ -------------------------------------------------------------
+*/
+/* The external SMT functions are listed in cmtdef.h */
+
+extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
+extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
+extern void init_board(struct s_smc *smc, u_char *mac_addr);
+extern void mac_drv_fill_rxd(struct s_smc *smc);
+extern void plc1_irq(struct s_smc *smc);
+extern void mac_drv_tx_complete(struct s_smc *smc,
+ volatile struct s_smt_fp_txd *txd);
+extern void plc2_irq(struct s_smc *smc);
+extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+extern void timer_irq(struct s_smc *smc);
+extern void mac_drv_rx_complete(struct s_smc *smc,
+ volatile struct s_smt_fp_rxd *rxd,
+ int frag_count, int len);
+extern void mac_drv_requeue_rxd(struct s_smc *smc,
+ volatile struct s_smt_fp_rxd *rxd,
+ int frag_count);
+extern void init_plc(struct s_smc *smc);
+extern void mac_drv_clear_rxd(struct s_smc *smc,
+ volatile struct s_smt_fp_rxd *rxd, int frag_count);
+
+#ifdef USE_OS_CPY
+extern void hwm_cpy_rxd2mb(void);
+extern void hwm_cpy_txd2mb(void);
+#endif
+
+#ifdef ALL_RX_COMPLETE
+extern void mac_drv_all_receives_complete(void);
+#endif
+
+extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
+extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
+
+#ifdef NDIS_OS2
+extern void post_proc(void);
+#else
+extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
+ int flag);
+#endif
+
+extern int init_fplus(struct s_smc *smc);
+extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
+ int la_len);
+
+/*
+ -------------------------------------------------------------
+ PUBLIC FUNCTIONS:
+ -------------------------------------------------------------
+*/
+void process_receive(struct s_smc *smc);
+void fddi_isr(struct s_smc *smc);
+void mac_drv_clear_txd(struct s_smc *smc);
+void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
+void init_driver_fplus(struct s_smc *smc);
+void mac_drv_rx_mode(struct s_smc *smc, int mode);
+void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
+void mac_drv_clear_tx_queue(struct s_smc *smc);
+void mac_drv_clear_rx_queue(struct s_smc *smc);
+void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
+ int frame_status);
+void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
+ int frame_status);
+
+int mac_drv_rx_frag(struct s_smc *smc, void far *virt, int len);
+int mac_drv_init(struct s_smc *smc);
+int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
+ int frame_status);
+
+u_int mac_drv_check_space(void);
+
+SMbuf* smt_get_mbuf(struct s_smc *smc);
+
+#ifdef DEBUG
+ void mac_drv_debug_lev(void);
+#endif
+
+/*
+ -------------------------------------------------------------
+ MACROS:
+ -------------------------------------------------------------
+*/
+#ifndef UNUSED
+#ifdef lint
+#define UNUSED(x) (x) = (x)
+#else
+#define UNUSED(x)
+#endif
+#endif
+
+#ifdef USE_CAN_ADDR
+#define MA smc->hw.fddi_canon_addr.a
+#define GROUP_ADDR_BIT 0x01
+#else
+#define MA smc->hw.fddi_home_addr.a
+#define GROUP_ADDR_BIT 0x80
+#endif
+
+#define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
+ SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
+
+#ifdef MB_OUTSIDE_SMC
+#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
+ MAX_MBUF*sizeof(SMbuf))
+#define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
+#else
+#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
+#endif
+
+ /*
+ * define critical read for 16 Bit drivers
+ */
+#if defined(NDIS_OS2) || defined(ODI2)
+#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
+#else
+#define CR_READ(var) (u_long)(var)
+#endif
+
+#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
+ IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
+ IS_R1_C | IS_XA_C | IS_XS_C)
+
+/*
+ -------------------------------------------------------------
+ INIT- AND SMT FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+
+/*
+ * BEGIN_MANUAL_ENTRY(mac_drv_check_space)
+ * u_int mac_drv_check_space()
+ *
+ * function DOWNCALL (drvsr.c)
+ * This function calculates the needed non virtual
+ * memory for MBufs, RxD and TxD descriptors etc.
+ * needed by the driver.
+ *
+ * return u_int memory in bytes
+ *
+ * END_MANUAL_ENTRY
+ */
+u_int mac_drv_check_space(void)
+{
+#ifdef MB_OUTSIDE_SMC
+#ifdef COMMON_MB_POOL
+ call_count++ ;
+ if (call_count == 1) {
+ return(EXT_VIRT_MEM) ;
+ }
+ else {
+ return(EXT_VIRT_MEM_2) ;
+ }
+#else
+ return (EXT_VIRT_MEM) ;
+#endif
+#else
+ return (0) ;
+#endif
+}
+
+/*
+ * BEGIN_MANUAL_ENTRY(mac_drv_init)
+ * void mac_drv_init(smc)
+ *
+ * function DOWNCALL (drvsr.c)
+ * In this function the hardware module allocates it's
+ * memory.
+ * The operating system dependent module should call
+ * mac_drv_init once, after the adatper is detected.
+ * END_MANUAL_ENTRY
+ */
+int mac_drv_init(struct s_smc *smc)
+{
+ if (sizeof(struct s_smt_fp_rxd) % 16) {
+ SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
+ }
+ if (sizeof(struct s_smt_fp_txd) % 16) {
+ SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
+ }
+
+ /*
+ * get the required memory for the RxDs and TxDs
+ */
+ if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
+ mac_drv_get_desc_mem(smc,(u_int)
+ (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
+ return(1) ; /* no space the hwm modul can't work */
+ }
+
+ /*
+ * get the memory for the SMT MBufs
+ */
+#ifndef MB_OUTSIDE_SMC
+ smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
+#else
+#ifndef COMMON_MB_POOL
+ if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
+ MAX_MBUF*sizeof(SMbuf)))) {
+ return(1) ; /* no space the hwm modul can't work */
+ }
+#else
+ if (!mb_start) {
+ if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
+ MAX_MBUF*sizeof(SMbuf)))) {
+ return(1) ; /* no space the hwm modul can't work */
+ }
+ }
+#endif
+#endif
+ return (0) ;
+}
+
+/*
+ * BEGIN_MANUAL_ENTRY(init_driver_fplus)
+ * init_driver_fplus(smc)
+ *
+ * Sets hardware modul specific values for the mode register 2
+ * (e.g. the byte alignment for the received frames, the position of the
+ * least significant byte etc.)
+ * END_MANUAL_ENTRY
+ */
+void init_driver_fplus(struct s_smc *smc)
+{
+ smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
+
+#ifdef PCI
+ smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
+#endif
+ smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
+
+#ifdef USE_CAN_ADDR
+ /* enable address bit swapping */
+ smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
+#endif
+}
+
+static u_long init_descr_ring(struct s_smc *smc,
+ union s_fp_descr volatile *start,
+ int count)
+{
+ int i ;
+ union s_fp_descr volatile *d1 ;
+ union s_fp_descr volatile *d2 ;
+ u_long phys ;
+
+ DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ;
+ for (i=count-1, d1=start; i ; i--) {
+ d2 = d1 ;
+ d1++ ; /* descr is owned by the host */
+ d2->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ;
+ d2->r.rxd_next = &d1->r ;
+ phys = mac_drv_virt2phys(smc,(void *)d1) ;
+ d2->r.rxd_nrdadr = AIX_REVERSE(phys) ;
+ }
+ DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
+ d1->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ;
+ d1->r.rxd_next = &start->r ;
+ phys = mac_drv_virt2phys(smc,(void *)start) ;
+ d1->r.rxd_nrdadr = AIX_REVERSE(phys) ;
+
+ for (i=count, d1=start; i ; i--) {
+ DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
+ d1++;
+ }
+ return(phys) ;
+}
+
+static void init_txd_ring(struct s_smc *smc)
+{
+ struct s_smt_fp_txd volatile *ds ;
+ struct s_smt_tx_queue *queue ;
+ u_long phys ;
+
+ /*
+ * initialize the transmit descriptors
+ */
+ ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
+ SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
+ queue = smc->hw.fp.tx[QUEUE_A0] ;
+ DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
+ (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
+ HWM_ASYNC_TXD_COUNT) ;
+ phys = AIX_REVERSE(ds->txd_ntdadr) ;
+ ds++ ;
+ queue->tx_curr_put = queue->tx_curr_get = ds ;
+ ds-- ;
+ queue->tx_free = HWM_ASYNC_TXD_COUNT ;
+ queue->tx_used = 0 ;
+ outpd(ADDR(B5_XA_DA),phys) ;
+
+ ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
+ HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
+ queue = smc->hw.fp.tx[QUEUE_S] ;
+ DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
+ (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
+ HWM_SYNC_TXD_COUNT) ;
+ phys = AIX_REVERSE(ds->txd_ntdadr) ;
+ ds++ ;
+ queue->tx_curr_put = queue->tx_curr_get = ds ;
+ queue->tx_free = HWM_SYNC_TXD_COUNT ;
+ queue->tx_used = 0 ;
+ outpd(ADDR(B5_XS_DA),phys) ;
+}
+
+static void init_rxd_ring(struct s_smc *smc)
+{
+ struct s_smt_fp_rxd volatile *ds ;
+ struct s_smt_rx_queue *queue ;
+ u_long phys ;
+
+ /*
+ * initialize the receive descriptors
+ */
+ ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
+ queue = smc->hw.fp.rx[QUEUE_R1] ;
+ DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
+ (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
+ SMT_R1_RXD_COUNT) ;
+ phys = AIX_REVERSE(ds->rxd_nrdadr) ;
+ ds++ ;
+ queue->rx_curr_put = queue->rx_curr_get = ds ;
+ queue->rx_free = SMT_R1_RXD_COUNT ;
+ queue->rx_used = 0 ;
+ outpd(ADDR(B4_R1_DA),phys) ;
+}
+
+/*
+ * BEGIN_MANUAL_ENTRY(init_fddi_driver)
+ * void init_fddi_driver(smc,mac_addr)
+ *
+ * initializes the driver and it's variables
+ *
+ * END_MANUAL_ENTRY
+ */
+void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
+{
+ SMbuf *mb ;
+ int i ;
+
+ init_board(smc,mac_addr) ;
+ (void)init_fplus(smc) ;
+
+ /*
+ * initialize the SMbufs for the SMT
+ */
+#ifndef COMMON_MB_POOL
+ mb = smc->os.hwm.mbuf_pool.mb_start ;
+ smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
+ for (i = 0; i < MAX_MBUF; i++) {
+ mb->sm_use_count = 1 ;
+ smt_free_mbuf(smc,mb) ;
+ mb++ ;
+ }
+#else
+ mb = mb_start ;
+ if (!mb_init) {
+ mb_free = 0 ;
+ for (i = 0; i < MAX_MBUF; i++) {
+ mb->sm_use_count = 1 ;
+ smt_free_mbuf(smc,mb) ;
+ mb++ ;
+ }
+ mb_init = TRUE ;
+ }
+#endif
+
+ /*
+ * initialize the other variables
+ */
+ smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
+ smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
+ smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
+ smc->os.hwm.pass_llc_promisc = TRUE ;
+ smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
+ smc->os.hwm.detec_count = 0 ;
+ smc->os.hwm.rx_break = 0 ;
+ smc->os.hwm.rx_len_error = 0 ;
+ smc->os.hwm.isr_flag = FALSE ;
+
+ /*
+ * make sure that the start pointer is 16 byte aligned
+ */
+ i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
+ if (i != 16) {
+ DB_GEN("i = %d",i,0,3) ;
+ smc->os.hwm.descr_p = (union s_fp_descr volatile *)
+ ((char *)smc->os.hwm.descr_p+i) ;
+ }
+ DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ;
+
+ init_txd_ring(smc) ;
+ init_rxd_ring(smc) ;
+ mac_drv_fill_rxd(smc) ;
+
+ init_plc(smc) ;
+}
+
+
+SMbuf *smt_get_mbuf(struct s_smc *smc)
+{
+ register SMbuf *mb ;
+
+#ifndef COMMON_MB_POOL
+ mb = smc->os.hwm.mbuf_pool.mb_free ;
+#else
+ mb = mb_free ;
+#endif
+ if (mb) {
+#ifndef COMMON_MB_POOL
+ smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
+#else
+ mb_free = mb->sm_next ;
+#endif
+ mb->sm_off = 8 ;
+ mb->sm_use_count = 1 ;
+ }
+ DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
+ return (mb) ; /* May be NULL */
+}
+
+void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
+{
+
+ if (mb) {
+ mb->sm_use_count-- ;
+ DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ;
+ /*
+ * If the use_count is != zero the MBuf is queued
+ * more than once and must not queued into the
+ * free MBuf queue
+ */
+ if (!mb->sm_use_count) {
+ DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ;
+#ifndef COMMON_MB_POOL
+ mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
+ smc->os.hwm.mbuf_pool.mb_free = mb ;
+#else
+ mb->sm_next = mb_free ;
+ mb_free = mb ;
+#endif
+ }
+ }
+ else
+ SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
+}
+
+
+/*
+ * BEGIN_MANUAL_ENTRY(mac_drv_repair_descr)
+ * void mac_drv_repair_descr(smc)
+ *
+ * function called from SMT (HWM / hwmtm.c)
+ * The BMU is idle when this function is called.
+ * Mac_drv_repair_descr sets up the physical address
+ * for all receive and transmit queues where the BMU
+ * should continue.
+ * It may be that the BMU was reseted during a fragmented
+ * transfer. In this case there are some fragments which will
+ * never completed by the BMU. The OWN bit of this fragments
+ * must be switched to be owned by the host.
+ *
+ * Give a start command to the receive BMU.
+ * Start the transmit BMUs if transmit frames pending.
+ *
+ * END_MANUAL_ENTRY
+ */
+void mac_drv_repair_descr(struct s_smc *smc)
+{
+ u_long phys ;
+
+ if (smc->hw.hw_state != STOPPED) {
+ SK_BREAK() ;
+ SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
+ return ;
+ }
+
+ /*
+ * repair tx queues: don't start
+ */
+ phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
+ outpd(ADDR(B5_XA_DA),phys) ;
+ if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
+ outpd(ADDR(B0_XA_CSR),CSR_START) ;
+ }
+ phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
+ outpd(ADDR(B5_XS_DA),phys) ;
+ if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
+ outpd(ADDR(B0_XS_CSR),CSR_START) ;
+ }
+
+ /*
+ * repair rx queues
+ */
+ phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
+ outpd(ADDR(B4_R1_DA),phys) ;
+ outpd(ADDR(B0_R1_CSR),CSR_START) ;
+}
+
+static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
+{
+ int i ;
+ int tx_used ;
+ u_long phys ;
+ u_long tbctrl ;
+ struct s_smt_fp_txd volatile *t ;
+
+ SK_UNUSED(smc) ;
+
+ t = queue->tx_curr_get ;
+ tx_used = queue->tx_used ;
+ for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
+ t = t->txd_next ;
+ }
+ phys = AIX_REVERSE(t->txd_ntdadr) ;
+
+ t = queue->tx_curr_get ;
+ while (tx_used) {
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
+ tbctrl = AIX_REVERSE(t->txd_tbctrl) ;
+
+ if (tbctrl & BMU_OWN) {
+ if (tbctrl & BMU_STF) {
+ break ; /* exit the loop */
+ }
+ else {
+ /*
+ * repair the descriptor
+ */
+ t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ }
+ }
+ phys = AIX_REVERSE(t->txd_ntdadr) ;
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
+ t = t->txd_next ;
+ tx_used-- ;
+ }
+ return(phys) ;
+}
+
+/*
+ * Repairs the receive descriptor ring and returns the physical address
+ * where the BMU should continue working.
+ *
+ * o The physical address where the BMU was stopped has to be
+ * determined. This is the next RxD after rx_curr_get with an OWN
+ * bit set.
+ * o The BMU should start working at beginning of the next frame.
+ * RxDs with an OWN bit set but with a reset STF bit should be
+ * skipped and owned by the driver (OWN = 0).
+ */
+static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
+{
+ int i ;
+ int rx_used ;
+ u_long phys ;
+ u_long rbctrl ;
+ struct s_smt_fp_rxd volatile *r ;
+
+ SK_UNUSED(smc) ;
+
+ r = queue->rx_curr_get ;
+ rx_used = queue->rx_used ;
+ for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
+ r = r->rxd_next ;
+ }
+ phys = AIX_REVERSE(r->rxd_nrdadr) ;
+
+ r = queue->rx_curr_get ;
+ while (rx_used) {
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
+ rbctrl = AIX_REVERSE(r->rxd_rbctrl) ;
+
+ if (rbctrl & BMU_OWN) {
+ if (rbctrl & BMU_STF) {
+ break ; /* exit the loop */
+ }
+ else {
+ /*
+ * repair the descriptor
+ */
+ r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ }
+ }
+ phys = AIX_REVERSE(r->rxd_nrdadr) ;
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
+ r = r->rxd_next ;
+ rx_used-- ;
+ }
+ return(phys) ;
+}
+
+
+/*
+ -------------------------------------------------------------
+ INTERRUPT SERVICE ROUTINE:
+ -------------------------------------------------------------
+*/
+
+/*
+ * BEGIN_MANUAL_ENTRY(fddi_isr)
+ * void fddi_isr(smc)
+ *
+ * function DOWNCALL (drvsr.c)
+ * interrupt service routine, handles the interrupt requests
+ * generated by the FDDI adapter.
+ *
+ * NOTE: The operating system dependent module must garantee that the
+ * interrupts of the adapter are disabled when it calls fddi_isr.
+ *
+ * About the USE_BREAK_ISR mechanismn:
+ *
+ * The main requirement of this mechanismn is to force an timer IRQ when
+ * leaving process_receive() with leave_isr set. process_receive() may
+ * be called at any time from anywhere!
+ * To be sure we don't miss such event we set 'force_irq' per default.
+ * We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND
+ * 'force_irq' are set. 'force_irq' may be reset if a receive complete
+ * IRQ is pending.
+ *
+ * END_MANUAL_ENTRY
+ */
+void fddi_isr(struct s_smc *smc)
+{
+ u_long is ; /* ISR source */
+ u_short stu, stl ;
+ SMbuf *mb ;
+
+#ifdef USE_BREAK_ISR
+ int force_irq ;
+#endif
+
+#ifdef ODI2
+ if (smc->os.hwm.rx_break) {
+ mac_drv_fill_rxd(smc) ;
+ if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
+ smc->os.hwm.rx_break = 0 ;
+ process_receive(smc) ;
+ }
+ else {
+ smc->os.hwm.detec_count = 0 ;
+ smt_force_irq(smc) ;
+ }
+ }
+#endif
+ smc->os.hwm.isr_flag = TRUE ;
+
+#ifdef USE_BREAK_ISR
+ force_irq = TRUE ;
+ if (smc->os.hwm.leave_isr) {
+ smc->os.hwm.leave_isr = FALSE ;
+ process_receive(smc) ;
+ }
+#endif
+
+ while ((is = GET_ISR() & ISR_MASK)) {
+ NDD_TRACE("CH0B",is,0,0) ;
+ DB_GEN("ISA = 0x%x",is,0,7) ;
+
+ if (is & IMASK_SLOW) {
+ NDD_TRACE("CH1b",is,0,0) ;
+ if (is & IS_PLINT1) { /* PLC1 */
+ plc1_irq(smc) ;
+ }
+ if (is & IS_PLINT2) { /* PLC2 */
+ plc2_irq(smc) ;
+ }
+ if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */
+ stu = inpw(FM_A(FM_ST1U)) ;
+ stl = inpw(FM_A(FM_ST1L)) ;
+ DB_GEN("Slow transmit complete",0,0,6) ;
+ mac1_irq(smc,stu,stl) ;
+ }
+ if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */
+ stu= inpw(FM_A(FM_ST2U)) ;
+ stl= inpw(FM_A(FM_ST2L)) ;
+ DB_GEN("Slow receive complete",0,0,6) ;
+ DB_GEN("stl = %x : stu = %x",stl,stu,7) ;
+ mac2_irq(smc,stu,stl) ;
+ }
+ if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */
+ stu= inpw(FM_A(FM_ST3U)) ;
+ stl= inpw(FM_A(FM_ST3L)) ;
+ DB_GEN("FORMAC Mode Register 3",0,0,6) ;
+ mac3_irq(smc,stu,stl) ;
+ }
+ if (is & IS_TIMINT) { /* Timer 82C54-2 */
+ timer_irq(smc) ;
+#ifdef NDIS_OS2
+ force_irq_pending = 0 ;
+#endif
+ /*
+ * out of RxD detection
+ */
+ if (++smc->os.hwm.detec_count > 4) {
+ /*
+ * check out of RxD condition
+ */
+ process_receive(smc) ;
+ }
+ }
+ if (is & IS_TOKEN) { /* Restricted Token Monitor */
+ rtm_irq(smc) ;
+ }
+ if (is & IS_R1_P) { /* Parity error rx queue 1 */
+ /* clear IRQ */
+ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
+ SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
+ }
+ if (is & IS_R1_C) { /* Encoding error rx queue 1 */
+ /* clear IRQ */
+ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
+ SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
+ }
+ if (is & IS_XA_C) { /* Encoding error async tx q */
+ /* clear IRQ */
+ outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
+ SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
+ }
+ if (is & IS_XS_C) { /* Encoding error sync tx q */
+ /* clear IRQ */
+ outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
+ SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
+ }
+ }
+
+ /*
+ * Fast Tx complete Async/Sync Queue (BMU service)
+ */
+ if (is & (IS_XS_F|IS_XA_F)) {
+ DB_GEN("Fast tx complete queue",0,0,6) ;
+ /*
+ * clear IRQ, Note: no IRQ is lost, because
+ * we always service both queues
+ */
+ outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
+ outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
+ mac_drv_clear_txd(smc) ;
+ llc_restart_tx(smc) ;
+ }
+
+ /*
+ * Fast Rx Complete (BMU service)
+ */
+ if (is & IS_R1_F) {
+ DB_GEN("Fast receive complete",0,0,6) ;
+ /* clear IRQ */
+#ifndef USE_BREAK_ISR
+ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
+ process_receive(smc) ;
+#else
+ process_receive(smc) ;
+ if (smc->os.hwm.leave_isr) {
+ force_irq = FALSE ;
+ } else {
+ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
+ process_receive(smc) ;
+ }
+#endif
+ }
+
+#ifndef NDIS_OS2
+ while ((mb = get_llc_rx(smc))) {
+ smt_to_llc(smc,mb) ;
+ }
+#else
+ if (offDepth)
+ post_proc() ;
+
+ while (!offDepth && (mb = get_llc_rx(smc))) {
+ smt_to_llc(smc,mb) ;
+ }
+
+ if (!offDepth && smc->os.hwm.rx_break) {
+ process_receive(smc) ;
+ }
+#endif
+ if (smc->q.ev_get != smc->q.ev_put) {
+ NDD_TRACE("CH2a",0,0,0) ;
+ ev_dispatcher(smc) ;
+ }
+#ifdef NDIS_OS2
+ post_proc() ;
+ if (offDepth) { /* leave fddi_isr because */
+ break ; /* indications not allowed */
+ }
+#endif
+#ifdef USE_BREAK_ISR
+ if (smc->os.hwm.leave_isr) {
+ break ; /* leave fddi_isr */
+ }
+#endif
+
+ /* NOTE: when the isr is left, no rx is pending */
+ } /* end of interrupt source polling loop */
+
+#ifdef USE_BREAK_ISR
+ if (smc->os.hwm.leave_isr && force_irq) {
+ smt_force_irq(smc) ;
+ }
+#endif
+ smc->os.hwm.isr_flag = FALSE ;
+ NDD_TRACE("CH0E",0,0,0) ;
+}
+
+
+/*
+ -------------------------------------------------------------
+ RECEIVE FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+#ifndef NDIS_OS2
+/*
+ * BEGIN_MANUAL_ENTRY(mac_drv_rx_mode)
+ * void mac_drv_rx_mode(smc,mode)
+ *
+ * function DOWNCALL (fplus.c)
+ * Corresponding to the parameter mode, the operating system
+ * dependent module can activate several receive modes.
+ *
+ * para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts
+ * = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts"
+ * = 3: RX_ENABLE_PROMISC enable promiscuous
+ * = 4: RX_DISABLE_PROMISC disable promiscuous
+ * = 5: RX_ENABLE_NSA enable rec. of all NSA frames
+ * (disabled after 'driver reset' & 'set station address')
+ * = 6: RX_DISABLE_NSA disable rec. of all NSA frames
+ *
+ * = 21: RX_ENABLE_PASS_SMT ( see description )
+ * = 22: RX_DISABLE_PASS_SMT ( " " )
+ * = 23: RX_ENABLE_PASS_NSA ( " " )
+ * = 24: RX_DISABLE_PASS_NSA ( " " )
+ * = 25: RX_ENABLE_PASS_DB ( " " )
+ * = 26: RX_DISABLE_PASS_DB ( " " )
+ * = 27: RX_DISABLE_PASS_ALL ( " " )
+ * = 28: RX_DISABLE_LLC_PROMISC ( " " )
+ * = 29: RX_ENABLE_LLC_PROMISC ( " " )
+ *
+ *
+ * RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT
+ *
+ * If the operating system dependent module activates the
+ * mode RX_ENABLE_PASS_SMT, the hardware module
+ * duplicates all SMT frames with the frame control
+ * FC_SMT_INFO and passes them to the LLC receive channel
+ * by calling mac_drv_rx_init.
+ * The SMT Frames which are sent by the local SMT and the NSA
+ * frames whose A- and C-Indicator is not set are also duplicated
+ * and passed.
+ * The receive mode RX_DISABLE_PASS_SMT disables the passing
+ * of SMT frames.
+ *
+ * RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA
+ *
+ * If the operating system dependent module activates the
+ * mode RX_ENABLE_PASS_NSA, the hardware module
+ * duplicates all NSA frames with frame control FC_SMT_NSA
+ * and a set A-Indicator and passed them to the LLC
+ * receive channel by calling mac_drv_rx_init.
+ * All NSA Frames which are sent by the local SMT
+ * are also duplicated and passed.
+ * The receive mode RX_DISABLE_PASS_NSA disables the passing
+ * of NSA frames with the A- or C-Indicator set.
+ *
+ * NOTE: For fear that the hardware module receives NSA frames with
+ * a reset A-Indicator, the operating system dependent module
+ * has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA
+ * before activate the RX_ENABLE_PASS_NSA mode and after every
+ * 'driver reset' and 'set station address'.
+ *
+ * RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB
+ *
+ * If the operating system dependent module activates the
+ * mode RX_ENABLE_PASS_DB, direct BEACON frames
+ * (FC_BEACON frame control) are passed to the LLC receive
+ * channel by mac_drv_rx_init.
+ * The receive mode RX_DISABLE_PASS_DB disables the passing
+ * of direct BEACON frames.
+ *
+ * RX_DISABLE_PASS_ALL
+ *
+ * Disables all special receives modes. It is equal to
+ * call mac_drv_set_rx_mode successively with the
+ * parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT,
+ * RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB.
+ *
+ * RX_ENABLE_LLC_PROMISC
+ *
+ * (default) all received LLC frames and all SMT/NSA/DBEACON
+ * frames depending on the attitude of the flags
+ * PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the
+ * LLC layer
+ *
+ * RX_DISABLE_LLC_PROMISC
+ *
+ * all received SMT/NSA/DBEACON frames depending on the
+ * attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON
+ * will be delivered to the LLC layer.
+ * all received LLC frames with a directed address, Multicast
+ * or Broadcast address will be delivered to the LLC
+ * layer too.
+ *
+ * END_MANUAL_ENTRY
+ */
+void mac_drv_rx_mode(struct s_smc *smc, int mode)
+{
+ switch(mode) {
+ case RX_ENABLE_PASS_SMT:
+ smc->os.hwm.pass_SMT = TRUE ;
+ break ;
+ case RX_DISABLE_PASS_SMT:
+ smc->os.hwm.pass_SMT = FALSE ;
+ break ;
+ case RX_ENABLE_PASS_NSA:
+ smc->os.hwm.pass_NSA = TRUE ;
+ break ;
+ case RX_DISABLE_PASS_NSA:
+ smc->os.hwm.pass_NSA = FALSE ;
+ break ;
+ case RX_ENABLE_PASS_DB:
+ smc->os.hwm.pass_DB = TRUE ;
+ break ;
+ case RX_DISABLE_PASS_DB:
+ smc->os.hwm.pass_DB = FALSE ;
+ break ;
+ case RX_DISABLE_PASS_ALL:
+ smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
+ smc->os.hwm.pass_DB = FALSE ;
+ smc->os.hwm.pass_llc_promisc = TRUE ;
+ mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
+ break ;
+ case RX_DISABLE_LLC_PROMISC:
+ smc->os.hwm.pass_llc_promisc = FALSE ;
+ break ;
+ case RX_ENABLE_LLC_PROMISC:
+ smc->os.hwm.pass_llc_promisc = TRUE ;
+ break ;
+ case RX_ENABLE_ALLMULTI:
+ case RX_DISABLE_ALLMULTI:
+ case RX_ENABLE_PROMISC:
+ case RX_DISABLE_PROMISC:
+ case RX_ENABLE_NSA:
+ case RX_DISABLE_NSA:
+ default:
+ mac_set_rx_mode(smc,mode) ;
+ break ;
+ }
+}
+#endif /* ifndef NDIS_OS2 */
+
+/*
+ * process receive queue
+ */
+void process_receive(struct s_smc *smc)
+{
+ int i ;
+ int n ;
+ int frag_count ; /* number of RxDs of the curr rx buf */
+ int used_frags ; /* number of RxDs of the curr frame */
+ struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */
+ struct s_smt_fp_rxd volatile *r ; /* rxd pointer */
+ struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */
+ u_long rbctrl ; /* receive buffer control word */
+ u_long rfsw ; /* receive frame status word */
+ u_short rx_used ;
+ u_char far *virt ;
+ char far *data ;
+ SMbuf *mb ;
+ u_char fc ; /* Frame control */
+ int len ; /* Frame length */
+
+ smc->os.hwm.detec_count = 0 ;
+ queue = smc->hw.fp.rx[QUEUE_R1] ;
+ NDD_TRACE("RHxB",0,0,0) ;
+ for ( ; ; ) {
+ r = queue->rx_curr_get ;
+ rx_used = queue->rx_used ;
+ frag_count = 0 ;
+
+#ifdef USE_BREAK_ISR
+ if (smc->os.hwm.leave_isr) {
+ goto rx_end ;
+ }
+#endif
+#ifdef NDIS_OS2
+ if (offDepth) {
+ smc->os.hwm.rx_break = 1 ;
+ goto rx_end ;
+ }
+ smc->os.hwm.rx_break = 0 ;
+#endif
+#ifdef ODI2
+ if (smc->os.hwm.rx_break) {
+ goto rx_end ;
+ }
+#endif
+ n = 0 ;
+ do {
+ DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
+ rbctrl = CR_READ(r->rxd_rbctrl) ;
+ rbctrl = AIX_REVERSE(rbctrl) ;
+
+ if (rbctrl & BMU_OWN) {
+ NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
+ DB_RX("End of RxDs",0,0,4) ;
+ goto rx_end ;
+ }
+ /*
+ * out of RxD detection
+ */
+ if (!rx_used) {
+ SK_BREAK() ;
+ SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
+ /* Either we don't have an RxD or all
+ * RxDs are filled. Therefore it's allowed
+ * for to set the STOPPED flag */
+ smc->hw.hw_state = STOPPED ;
+ mac_drv_clear_rx_queue(smc) ;
+ smc->hw.hw_state = STARTED ;
+ mac_drv_fill_rxd(smc) ;
+ smc->os.hwm.detec_count = 0 ;
+ goto rx_end ;
+ }
+ rfsw = AIX_REVERSE(r->rxd_rfsw) ;
+ if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
+ /*
+ * The BMU_STF bit is deleted, 1 frame is
+ * placed into more than 1 rx buffer
+ *
+ * skip frame by setting the rx len to 0
+ *
+ * if fragment count == 0
+ * The missing STF bit belongs to the
+ * current frame, search for the
+ * EOF bit to complete the frame
+ * else
+ * the fragment belongs to the next frame,
+ * exit the loop and process the frame
+ */
+ SK_BREAK() ;
+ rfsw = 0 ;
+ if (frag_count) {
+ break ;
+ }
+ }
+ n += rbctrl & 0xffff ;
+ r = r->rxd_next ;
+ frag_count++ ;
+ rx_used-- ;
+ } while (!(rbctrl & BMU_EOF)) ;
+ used_frags = frag_count ;
+ DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ;
+
+ /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
+ /* BMU_ST_BUF will not be changed by the ASIC */
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
+ while (rx_used && !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) {
+ DB_RX("Check STF bit in %x",(void *)r,0,5) ;
+ r = r->rxd_next ;
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
+ frag_count++ ;
+ rx_used-- ;
+ }
+ DB_RX("STF bit found",0,0,5) ;
+
+ /*
+ * The received frame is finished for the process receive
+ */
+ rxd = queue->rx_curr_get ;
+ queue->rx_curr_get = r ;
+ queue->rx_free += frag_count ;
+ queue->rx_used = rx_used ;
+
+ /*
+ * ASIC Errata no. 7 (STF - Bit Bug)
+ */
+ rxd->rxd_rbctrl &= AIX_REVERSE(~BMU_STF) ;
+
+ for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
+ DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
+ dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
+ }
+ smc->hw.fp.err_stats.err_valid++ ;
+ smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
+
+ /* the length of the data including the FC */
+ len = (rfsw & RD_LENGTH) - 4 ;
+
+ DB_RX("frame length = %d",len,0,4) ;
+ /*
+ * check the frame_lenght and all error flags
+ */
+ if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
+ if (rfsw & RD_S_MSRABT) {
+ DB_RX("Frame aborted by the FORMAC",0,0,2) ;
+ smc->hw.fp.err_stats.err_abort++ ;
+ }
+ /*
+ * check frame status
+ */
+ if (rfsw & RD_S_SEAC2) {
+ DB_RX("E-Indicator set",0,0,2) ;
+ smc->hw.fp.err_stats.err_e_indicator++ ;
+ }
+ if (rfsw & RD_S_SFRMERR) {
+ DB_RX("CRC error",0,0,2) ;
+ smc->hw.fp.err_stats.err_crc++ ;
+ }
+ if (rfsw & RX_FS_IMPL) {
+ DB_RX("Implementer frame",0,0,2) ;
+ smc->hw.fp.err_stats.err_imp_frame++ ;
+ }
+ goto abort_frame ;
+ }
+ if (len > FDDI_RAW_MTU-4) {
+ DB_RX("Frame too long error",0,0,2) ;
+ smc->hw.fp.err_stats.err_too_long++ ;
+ goto abort_frame ;
+ }
+ /*
+ * SUPERNET 3 Bug: FORMAC delivers status words
+ * of aborded frames to the BMU
+ */
+ if (len <= 4) {
+ DB_RX("Frame length = 0",0,0,2) ;
+ goto abort_frame ;
+ }
+
+ if (len != (n-4)) {
+ DB_RX("BMU: rx len differs: [%d:%d]",len,n,4);
+ smc->os.hwm.rx_len_error++ ;
+ goto abort_frame ;
+ }
+
+ /*
+ * Check SA == MA
+ */
+ virt = (u_char far *) rxd->rxd_virt ;
+ DB_RX("FC = %x",*virt,0,2) ;
+ if (virt[12] == MA[5] &&
+ virt[11] == MA[4] &&
+ virt[10] == MA[3] &&
+ virt[9] == MA[2] &&
+ virt[8] == MA[1] &&
+ (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
+ goto abort_frame ;
+ }
+
+ /*
+ * test if LLC frame
+ */
+ if (rfsw & RX_FS_LLC) {
+ /*
+ * if pass_llc_promisc is disable
+ * if DA != Multicast or Broadcast or DA!=MA
+ * abort the frame
+ */
+ if (!smc->os.hwm.pass_llc_promisc) {
+ if(!(virt[1] & GROUP_ADDR_BIT)) {
+ if (virt[6] != MA[5] ||
+ virt[5] != MA[4] ||
+ virt[4] != MA[3] ||
+ virt[3] != MA[2] ||
+ virt[2] != MA[1] ||
+ virt[1] != MA[0]) {
+ DB_RX("DA != MA and not multi- or broadcast",0,0,2) ;
+ goto abort_frame ;
+ }
+ }
+ }
+
+ /*
+ * LLC frame received
+ */
+ DB_RX("LLC - receive",0,0,4) ;
+ mac_drv_rx_complete(smc,rxd,frag_count,len) ;
+ }
+ else {
+ if (!(mb = smt_get_mbuf(smc))) {
+ smc->hw.fp.err_stats.err_no_buf++ ;
+ DB_RX("No SMbuf; receive terminated",0,0,4) ;
+ goto abort_frame ;
+ }
+ data = smtod(mb,char *) - 1 ;
+
+ /*
+ * copy the frame into a SMT_MBuf
+ */
+#ifdef USE_OS_CPY
+ hwm_cpy_rxd2mb(rxd,data,len) ;
+#else
+ for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
+ n = AIX_REVERSE(r->rxd_rbctrl) & RD_LENGTH ;
+ DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
+ memcpy(data,r->rxd_virt,n) ;
+ data += n ;
+ }
+ data = smtod(mb,char *) - 1 ;
+#endif
+ fc = *(char *)mb->sm_data = *data ;
+ mb->sm_len = len - 1 ; /* len - fc */
+ data++ ;
+
+ /*
+ * SMT frame received
+ */
+ switch(fc) {
+ case FC_SMT_INFO :
+ smc->hw.fp.err_stats.err_smt_frame++ ;
+ DB_RX("SMT frame received ",0,0,5) ;
+
+ if (smc->os.hwm.pass_SMT) {
+ DB_RX("pass SMT frame ",0,0,5) ;
+ mac_drv_rx_complete(smc, rxd,
+ frag_count,len) ;
+ }
+ else {
+ DB_RX("requeue RxD",0,0,5) ;
+ mac_drv_requeue_rxd(smc,rxd,frag_count);
+ }
+
+ smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
+ break ;
+ case FC_SMT_NSA :
+ smc->hw.fp.err_stats.err_smt_frame++ ;
+ DB_RX("SMT frame received ",0,0,5) ;
+
+ /* if pass_NSA set pass the NSA frame or */
+ /* pass_SMT set and the A-Indicator */
+ /* is not set, pass the NSA frame */
+ if (smc->os.hwm.pass_NSA ||
+ (smc->os.hwm.pass_SMT &&
+ !(rfsw & A_INDIC))) {
+ DB_RX("pass SMT frame ",0,0,5) ;
+ mac_drv_rx_complete(smc, rxd,
+ frag_count,len) ;
+ }
+ else {
+ DB_RX("requeue RxD",0,0,5) ;
+ mac_drv_requeue_rxd(smc,rxd,frag_count);
+ }
+
+ smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
+ break ;
+ case FC_BEACON :
+ if (smc->os.hwm.pass_DB) {
+ DB_RX("pass DB frame ",0,0,5) ;
+ mac_drv_rx_complete(smc, rxd,
+ frag_count,len) ;
+ }
+ else {
+ DB_RX("requeue RxD",0,0,5) ;
+ mac_drv_requeue_rxd(smc,rxd,frag_count);
+ }
+ smt_free_mbuf(smc,mb) ;
+ break ;
+ default :
+ /*
+ * unknown FC abord the frame
+ */
+ DB_RX("unknown FC error",0,0,2) ;
+ smt_free_mbuf(smc,mb) ;
+ DB_RX("requeue RxD",0,0,5) ;
+ mac_drv_requeue_rxd(smc,rxd,frag_count) ;
+ if ((fc & 0xf0) == FC_MAC)
+ smc->hw.fp.err_stats.err_mac_frame++ ;
+ else
+ smc->hw.fp.err_stats.err_imp_frame++ ;
+
+ break ;
+ }
+ }
+
+ DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
+ NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
+
+ continue ;
+ /*--------------------------------------------------------------------*/
+abort_frame:
+ DB_RX("requeue RxD",0,0,5) ;
+ mac_drv_requeue_rxd(smc,rxd,frag_count) ;
+
+ DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
+ NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
+ }
+rx_end:
+#ifdef ALL_RX_COMPLETE
+ mac_drv_all_receives_complete(smc) ;
+#endif
+ return ; /* lint bug: needs return detect end of function */
+}
+
+static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
+{
+ u_char fc ;
+
+ DB_RX("send a queued frame to the llc layer",0,0,4) ;
+ smc->os.hwm.r.len = mb->sm_len ;
+ smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
+ fc = *smc->os.hwm.r.mb_pos ;
+ (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
+ smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
+ smt_free_mbuf(smc,mb) ;
+}
+
+/*
+ * BEGIN_MANUAL_ENTRY(hwm_rx_frag)
+ * void hwm_rx_frag(smc,virt,phys,len,frame_status)
+ *
+ * function MACRO (hardware module, hwmtm.h)
+ * This function calls dma_master for preparing the
+ * system hardware for the DMA transfer and initializes
+ * the current RxD with the length and the physical and
+ * virtual address of the fragment. Furthermore, it sets the
+ * STF and EOF bits depending on the frame status byte,
+ * switches the OWN flag of the RxD, so that it is owned by the
+ * adapter and issues an rx_start.
+ *
+ * para virt virtual pointer to the fragment
+ * len the length of the fragment
+ * frame_status status of the frame, see design description
+ *
+ * NOTE: It is possible to call this function with a fragment length
+ * of zero.
+ *
+ * END_MANUAL_ENTRY
+ */
+void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
+ int frame_status)
+{
+ struct s_smt_fp_rxd volatile *r ;
+ u_int rbctrl ;
+
+ NDD_TRACE("RHfB",virt,len,frame_status) ;
+ DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
+ r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
+ r->rxd_virt = virt ;
+ r->rxd_rbadr = AIX_REVERSE(phys) ;
+ rbctrl = AIX_REVERSE( (((u_long)frame_status &
+ (FIRST_FRAG|LAST_FRAG))<<26) |
+ (((u_long) frame_status & FIRST_FRAG) << 21) |
+ BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
+ r->rxd_rbctrl = rbctrl ;
+
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
+ outpd(ADDR(B0_R1_CSR),CSR_START) ;
+ smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
+ smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
+ smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
+ NDD_TRACE("RHfE",r,AIX_REVERSE(r->rxd_rbadr),0) ;
+}
+
+#ifndef NDIS_OS2
+/*
+ * BEGIN_MANUAL_ENTRY(mac_drv_rx_frag)
+ * int mac_drv_rx_frag(smc,virt,len)
+ *
+ * function DOWNCALL (hwmtm.c)
+ * mac_drv_rx_frag fills the fragment with a part of the frame.
+ *
+ * para virt the virtual address of the fragment
+ * len the length in bytes of the fragment
+ *
+ * return 0: success code, no errors possible
+ *
+ * END_MANUAL_ENTRY
+ */
+int mac_drv_rx_frag(struct s_smc *smc, void far *virt, int len)
+{
+ NDD_TRACE("RHSB",virt,len,smc->os.hwm.r.mb_pos) ;
+
+ DB_RX("receive from queue: len/virt: = %d/%x",len,virt,4) ;
+ memcpy((char far *)virt,smc->os.hwm.r.mb_pos,len) ;
+ smc->os.hwm.r.mb_pos += len ;
+
+ NDD_TRACE("RHSE",smc->os.hwm.r.mb_pos,0,0) ;
+ return(0) ;
+}
+#endif
+
+
+/*
+ * BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue)
+ *
+ * void mac_drv_clear_rx_queue(smc)
+ * struct s_smc *smc ;
+ *
+ * function DOWNCALL (hardware module, hwmtm.c)
+ * mac_drv_clear_rx_queue is called by the OS-specific module
+ * after it has issued a card_stop.
+ * In this case, the frames in the receive queue are obsolete and
+ * should be removed. For removing mac_drv_clear_rx_queue
+ * calls dma_master for each RxD and mac_drv_clear_rxd for each
+ * receive buffer.
+ *
+ * NOTE: calling sequence card_stop:
+ * CLI_FBI(), card_stop(),
+ * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
+ *
+ * NOTE: The caller is responsible that the BMUs are idle
+ * when this function is called.
+ *
+ * END_MANUAL_ENTRY
+ */
+void mac_drv_clear_rx_queue(struct s_smc *smc)
+{
+ struct s_smt_fp_rxd volatile *r ;
+ struct s_smt_fp_rxd volatile *next_rxd ;
+ struct s_smt_rx_queue *queue ;
+ int frag_count ;
+ int i ;
+
+ if (smc->hw.hw_state != STOPPED) {
+ SK_BREAK() ;
+ SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
+ return ;
+ }
+
+ queue = smc->hw.fp.rx[QUEUE_R1] ;
+ DB_RX("clear_rx_queue",0,0,5) ;
+
+ /*
+ * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers
+ */
+ r = queue->rx_curr_get ;
+ while (queue->rx_used) {
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
+ DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
+ r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ frag_count = 1 ;
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
+ r = r->rxd_next ;
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
+ while (r != queue->rx_curr_put &&
+ !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) {
+ DB_RX("Check STF bit in %x",(void *)r,0,5) ;
+ r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
+ r = r->rxd_next ;
+ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
+ frag_count++ ;
+ }
+ DB_RX("STF bit found",0,0,5) ;
+ next_rxd = r ;
+
+ for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
+ DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
+ dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
+ }
+
+ DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ",
+ (void *)queue->rx_curr_get,frag_count,5) ;
+ mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
+
+ queue->rx_curr_get = next_rxd ;
+ queue->rx_used -= frag_count ;
+ queue->rx_free += frag_count ;
+ }
+}
+
+
+/*
+ -------------------------------------------------------------
+ SEND FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+/*
+ * BEGIN_MANUAL_ENTRY(hwm_tx_init)
+ * int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status)
+ *
+ * function DOWN_CALL (hardware module, hwmtm.c)
+ * hwm_tx_init checks if the frame can be sent through the
+ * corresponding send queue.
+ *
+ * para fc the frame control. To determine through which
+ * send queue the frame should be transmitted.
+ * 0x50 - 0x57: asynchronous LLC frame
+ * 0xD0 - 0xD7: synchronous LLC frame
+ * 0x41, 0x4F: SMT frame to the network
+ * 0x42: SMT frame to the network and to the local SMT
+ * 0x43: SMT frame to the local SMT
+ * frag_count count of the fragments for this frame
+ * frame_len length of the frame
+ * frame_status status of the frame, the send queue bit is already
+ * specified
+ *
+ * return frame_status
+ *
+ * END_MANUAL_ENTRY
+ */
+int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
+ int frame_status)
+{
+ NDD_TRACE("THiB",fc,frag_count,frame_len) ;
+ smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
+ smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
+ smc->os.hwm.tx_len = frame_len ;
+ DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ;
+ if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
+ frame_status |= LAN_TX ;
+ }
+ else {
+ switch (fc) {
+ case FC_SMT_INFO :
+ case FC_SMT_NSA :
+ frame_status |= LAN_TX ;
+ break ;
+ case FC_SMT_LOC :
+ frame_status |= LOC_TX ;
+ break ;
+ case FC_SMT_LAN_LOC :
+ frame_status |= LAN_TX | LOC_TX ;
+ break ;
+ default :
+ SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
+ }
+ }
+ if (!smc->hw.mac_ring_is_up) {
+ frame_status &= ~LAN_TX ;
+ frame_status |= RING_DOWN ;
+ DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
+ }
+ if (frag_count > smc->os.hwm.tx_p->tx_free) {
+#ifndef NDIS_OS2
+ mac_drv_clear_txd(smc) ;
+ if (frag_count > smc->os.hwm.tx_p->tx_free) {
+ DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
+ frame_status &= ~LAN_TX ;
+ frame_status |= OUT_OF_TXD ;
+ }
+#else
+ DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
+ frame_status &= ~LAN_TX ;
+ frame_status |= OUT_OF_TXD ;
+#endif
+ }
+ DB_TX("frame_status = %x",frame_status,0,3) ;
+ NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
+ return(frame_status) ;
+}
+
+/*
+ * BEGIN_MANUAL_ENTRY(hwm_tx_frag)
+ * void hwm_tx_frag(smc,virt,phys,len,frame_status)
+ *
+ * function DOWNCALL (hardware module, hwmtm.c)
+ * If the frame should be sent to the LAN, this function calls
+ * dma_master, fills the current TxD with the virtual and the
+ * physical address, sets the STF and EOF bits dependent on
+ * the frame status, and requests the BMU to start the
+ * transmit.
+ * If the frame should be sent to the local SMT, an SMT_MBuf
+ * is allocated if the FIRST_FRAG bit is set in the frame_status.
+ * The fragment of the frame is copied into the SMT MBuf.
+ * The function smt_received_pack is called if the LAST_FRAG
+ * bit is set in the frame_status word.
+ *
+ * para virt virtual pointer to the fragment
+ * len the length of the fragment
+ * frame_status status of the frame, see design description
+ *
+ * return nothing returned, no parameter is modified
+ *
+ * NOTE: It is possible to invoke this macro with a fragment length
+ * of zero.
+ *
+ * END_MANUAL_ENTRY
+ */
+void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
+ int frame_status)
+{
+ struct s_smt_fp_txd volatile *t ;
+ struct s_smt_tx_queue *queue ;
+ u_int tbctrl ;
+
+ queue = smc->os.hwm.tx_p ;
+
+ NDD_TRACE("THfB",virt,len,frame_status) ;
+ /* Bug fix: AF / May 31 1999 (#missing)
+ * snmpinfo problem reported by IBM is caused by invalid
+ * t-pointer (txd) if LAN_TX is not set but LOC_TX only.
+ * Set: t = queue->tx_curr_put here !
+ */
+ t = queue->tx_curr_put ;
+
+ DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
+ if (frame_status & LAN_TX) {
+ /* '*t' is already defined */
+ DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
+ t->txd_virt = virt ;
+ t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ;
+ t->txd_tbadr = AIX_REVERSE(phys) ;
+ tbctrl = AIX_REVERSE((((u_long)frame_status &
+ (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
+ BMU_OWN|BMU_CHECK |len) ;
+ t->txd_tbctrl = tbctrl ;
+
+#ifndef AIX
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
+ outpd(queue->tx_bmu_ctl,CSR_START) ;
+#else /* ifndef AIX */
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
+ if (frame_status & QUEUE_A0) {
+ outpd(ADDR(B0_XA_CSR),CSR_START) ;
+ }
+ else {
+ outpd(ADDR(B0_XS_CSR),CSR_START) ;
+ }
+#endif
+ queue->tx_free-- ;
+ queue->tx_used++ ;
+ queue->tx_curr_put = t->txd_next ;
+ if (frame_status & LAST_FRAG) {
+ smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
+ }
+ }
+ if (frame_status & LOC_TX) {
+ DB_TX("LOC_TX: ",0,0,3) ;
+ if (frame_status & FIRST_FRAG) {
+ if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
+ smc->hw.fp.err_stats.err_no_buf++ ;
+ DB_TX("No SMbuf; transmit terminated",0,0,4) ;
+ }
+ else {
+ smc->os.hwm.tx_data =
+ smtod(smc->os.hwm.tx_mb,char *) - 1 ;
+#ifdef USE_OS_CPY
+#ifdef PASS_1ST_TXD_2_TX_COMP
+ hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
+ smc->os.hwm.tx_len) ;
+#endif
+#endif
+ }
+ }
+ if (smc->os.hwm.tx_mb) {
+#ifndef USE_OS_CPY
+ DB_TX("copy fragment into MBuf ",0,0,3) ;
+ memcpy(smc->os.hwm.tx_data,virt,len) ;
+ smc->os.hwm.tx_data += len ;
+#endif
+ if (frame_status & LAST_FRAG) {
+#ifdef USE_OS_CPY
+#ifndef PASS_1ST_TXD_2_TX_COMP
+ /*
+ * hwm_cpy_txd2mb(txd,data,len) copies 'len'
+ * bytes from the virtual pointer in 'rxd'
+ * to 'data'. The virtual pointer of the
+ * os-specific tx-buffer should be written
+ * in the LAST txd.
+ */
+ hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
+ smc->os.hwm.tx_len) ;
+#endif /* nPASS_1ST_TXD_2_TX_COMP */
+#endif /* USE_OS_CPY */
+ smc->os.hwm.tx_data =
+ smtod(smc->os.hwm.tx_mb,char *) - 1 ;
+ *(char *)smc->os.hwm.tx_mb->sm_data =
+ *smc->os.hwm.tx_data ;
+ smc->os.hwm.tx_data++ ;
+ smc->os.hwm.tx_mb->sm_len =
+ smc->os.hwm.tx_len - 1 ;
+ DB_TX("pass LLC frame to SMT ",0,0,3) ;
+ smt_received_pack(smc,smc->os.hwm.tx_mb,
+ RD_FS_LOCAL) ;
+ }
+ }
+ }
+ NDD_TRACE("THfE",t,queue->tx_free,0) ;
+}
+
+
+/*
+ * queues a receive for later send
+ */
+static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
+{
+ DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ;
+ smc->os.hwm.queued_rx_frames++ ;
+ mb->sm_next = (SMbuf *)NULL ;
+ if (smc->os.hwm.llc_rx_pipe == 0) {
+ smc->os.hwm.llc_rx_pipe = mb ;
+ }
+ else {
+ smc->os.hwm.llc_rx_tail->sm_next = mb ;
+ }
+ smc->os.hwm.llc_rx_tail = mb ;
+
+ /*
+ * force an timer IRQ to receive the data
+ */
+ if (!smc->os.hwm.isr_flag) {
+ smt_force_irq(smc) ;
+ }
+}
+
+/*
+ * get a SMbuf from the llc_rx_queue
+ */
+static SMbuf *get_llc_rx(struct s_smc *smc)
+{
+ SMbuf *mb ;
+
+ if ((mb = smc->os.hwm.llc_rx_pipe)) {
+ smc->os.hwm.queued_rx_frames-- ;
+ smc->os.hwm.llc_rx_pipe = mb->sm_next ;
+ }
+ DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
+ return(mb) ;
+}
+
+/*
+ * queues a transmit SMT MBuf during the time were the MBuf is
+ * queued the TxD ring
+ */
+static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
+{
+ DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ;
+ smc->os.hwm.queued_txd_mb++ ;
+ mb->sm_next = (SMbuf *)NULL ;
+ if (smc->os.hwm.txd_tx_pipe == 0) {
+ smc->os.hwm.txd_tx_pipe = mb ;
+ }
+ else {
+ smc->os.hwm.txd_tx_tail->sm_next = mb ;
+ }
+ smc->os.hwm.txd_tx_tail = mb ;
+}
+
+/*
+ * get a SMbuf from the txd_tx_queue
+ */
+static SMbuf *get_txd_mb(struct s_smc *smc)
+{
+ SMbuf *mb ;
+
+ if ((mb = smc->os.hwm.txd_tx_pipe)) {
+ smc->os.hwm.queued_txd_mb-- ;
+ smc->os.hwm.txd_tx_pipe = mb->sm_next ;
+ }
+ DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
+ return(mb) ;
+}
+
+/*
+ * SMT Send function
+ */
+void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
+{
+ char far *data ;
+ int len ;
+ int n ;
+ int i ;
+ int frag_count ;
+ int frame_status ;
+ SK_LOC_DECL(char far,*virt[3]) ;
+ int frag_len[3] ;
+ struct s_smt_tx_queue *queue ;
+ struct s_smt_fp_txd volatile *t ;
+ u_long phys ;
+ u_int tbctrl ;
+
+ NDD_TRACE("THSB",mb,fc,0) ;
+ DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
+
+ mb->sm_off-- ; /* set to fc */
+ mb->sm_len++ ; /* + fc */
+ data = smtod(mb,char *) ;
+ *data = fc ;
+ if (fc == FC_SMT_LOC)
+ *data = FC_SMT_INFO ;
+
+ /*
+ * determine the frag count and the virt addresses of the frags
+ */
+ frag_count = 0 ;
+ len = mb->sm_len ;
+ while (len) {
+ n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
+ if (n >= len) {
+ n = len ;
+ }
+ DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ;
+ virt[frag_count] = data ;
+ frag_len[frag_count] = n ;
+ frag_count++ ;
+ len -= n ;
+ data += n ;
+ }
+
+ /*
+ * determine the frame status
+ */
+ queue = smc->hw.fp.tx[QUEUE_A0] ;
+ if (fc == FC_BEACON || fc == FC_SMT_LOC) {
+ frame_status = LOC_TX ;
+ }
+ else {
+ frame_status = LAN_TX ;
+ if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
+ (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
+ frame_status |= LOC_TX ;
+ }
+
+ if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
+ frame_status &= ~LAN_TX;
+ if (frame_status) {
+ DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
+ }
+ else {
+ DB_TX("Ring is down: terminate transmission",0,0,2) ;
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+ }
+ DB_TX("frame_status = 0x%x ",frame_status,0,5) ;
+
+ if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
+ mb->sm_use_count = 2 ;
+ }
+
+ if (frame_status & LAN_TX) {
+ t = queue->tx_curr_put ;
+ frame_status |= FIRST_FRAG ;
+ for (i = 0; i < frag_count; i++) {
+ DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
+ if (i == frag_count-1) {
+ frame_status |= LAST_FRAG ;
+ t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR |
+ (((u_long)(mb->sm_len-1)&3) << 27)) ;
+ }
+ t->txd_virt = virt[i] ;
+ phys = dma_master(smc, (void far *)virt[i],
+ frag_len[i], DMA_RD|SMT_BUF) ;
+ t->txd_tbadr = AIX_REVERSE(phys) ;
+ tbctrl = AIX_REVERSE((((u_long) frame_status &
+ (FIRST_FRAG|LAST_FRAG)) << 26) |
+ BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
+ t->txd_tbctrl = tbctrl ;
+#ifndef AIX
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
+ outpd(queue->tx_bmu_ctl,CSR_START) ;
+#else
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
+ outpd(ADDR(B0_XA_CSR),CSR_START) ;
+#endif
+ frame_status &= ~FIRST_FRAG ;
+ queue->tx_curr_put = t = t->txd_next ;
+ queue->tx_free-- ;
+ queue->tx_used++ ;
+ }
+ smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
+ queue_txd_mb(smc,mb) ;
+ }
+
+ if (frame_status & LOC_TX) {
+ DB_TX("pass Mbuf to LLC queue",0,0,5) ;
+ queue_llc_rx(smc,mb) ;
+ }
+
+ /*
+ * We need to unqueue the free SMT_MBUFs here, because it may
+ * be that the SMT want's to send more than 1 frame for one down call
+ */
+ mac_drv_clear_txd(smc) ;
+ NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
+}
+
+/* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd)
+ * void mac_drv_clear_txd(smc)
+ *
+ * function DOWNCALL (hardware module, hwmtm.c)
+ * mac_drv_clear_txd searches in both send queues for TxD's
+ * which were finished by the adapter. It calls dma_complete
+ * for each TxD. If the last fragment of an LLC frame is
+ * reached, it calls mac_drv_tx_complete to release the
+ * send buffer.
+ *
+ * return nothing
+ *
+ * END_MANUAL_ENTRY
+ */
+void mac_drv_clear_txd(struct s_smc *smc)
+{
+ struct s_smt_tx_queue *queue ;
+ struct s_smt_fp_txd volatile *t1 ;
+ struct s_smt_fp_txd volatile *t2 = NULL ;
+ SMbuf *mb ;
+ u_long tbctrl ;
+ int i ;
+ int frag_count ;
+ int n ;
+
+ NDD_TRACE("THcB",0,0,0) ;
+ for (i = QUEUE_S; i <= QUEUE_A0; i++) {
+ queue = smc->hw.fp.tx[i] ;
+ t1 = queue->tx_curr_get ;
+ DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ;
+
+ for ( ; ; ) {
+ frag_count = 0 ;
+
+ do {
+ DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
+ DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
+ tbctrl = CR_READ(t1->txd_tbctrl) ;
+ tbctrl = AIX_REVERSE(tbctrl) ;
+
+ if (tbctrl & BMU_OWN || !queue->tx_used){
+ DB_TX("End of TxDs queue %d",i,0,4) ;
+ goto free_next_queue ; /* next queue */
+ }
+ t1 = t1->txd_next ;
+ frag_count++ ;
+ } while (!(tbctrl & BMU_EOF)) ;
+
+ t1 = queue->tx_curr_get ;
+ for (n = frag_count; n; n--) {
+ tbctrl = AIX_REVERSE(t1->txd_tbctrl) ;
+ dma_complete(smc,
+ (union s_fp_descr volatile *) t1,
+ (int) (DMA_RD |
+ ((tbctrl & BMU_SMT_TX) >> 18))) ;
+ t2 = t1 ;
+ t1 = t1->txd_next ;
+ }
+
+ if (tbctrl & BMU_SMT_TX) {
+ mb = get_txd_mb(smc) ;
+ smt_free_mbuf(smc,mb) ;
+ }
+ else {
+#ifndef PASS_1ST_TXD_2_TX_COMP
+ DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ;
+ mac_drv_tx_complete(smc,t2) ;
+#else
+ DB_TX("mac_drv_tx_comp for TxD 0x%x",
+ queue->tx_curr_get,0,4) ;
+ mac_drv_tx_complete(smc,queue->tx_curr_get) ;
+#endif
+ }
+ queue->tx_curr_get = t1 ;
+ queue->tx_free += frag_count ;
+ queue->tx_used -= frag_count ;
+ }
+free_next_queue: ;
+ }
+ NDD_TRACE("THcE",0,0,0) ;
+}
+
+/*
+ * BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue)
+ *
+ * void mac_drv_clear_tx_queue(smc)
+ * struct s_smc *smc ;
+ *
+ * function DOWNCALL (hardware module, hwmtm.c)
+ * mac_drv_clear_tx_queue is called from the SMT when
+ * the RMT state machine has entered the ISOLATE state.
+ * This function is also called by the os-specific module
+ * after it has called the function card_stop().
+ * In this case, the frames in the send queues are obsolete and
+ * should be removed.
+ *
+ * note calling sequence:
+ * CLI_FBI(), card_stop(),
+ * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
+ *
+ * NOTE: The caller is responsible that the BMUs are idle
+ * when this function is called.
+ *
+ * END_MANUAL_ENTRY
+ */
+void mac_drv_clear_tx_queue(struct s_smc *smc)
+{
+ struct s_smt_fp_txd volatile *t ;
+ struct s_smt_tx_queue *queue ;
+ int tx_used ;
+ int i ;
+
+ if (smc->hw.hw_state != STOPPED) {
+ SK_BREAK() ;
+ SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
+ return ;
+ }
+
+ for (i = QUEUE_S; i <= QUEUE_A0; i++) {
+ queue = smc->hw.fp.tx[i] ;
+ DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ;
+
+ /*
+ * switch the OWN bit of all pending frames to the host
+ */
+ t = queue->tx_curr_get ;
+ tx_used = queue->tx_used ;
+ while (tx_used) {
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
+ DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
+ t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
+ t = t->txd_next ;
+ tx_used-- ;
+ }
+ }
+
+ /*
+ * release all TxD's for both send queues
+ */
+ mac_drv_clear_txd(smc) ;
+
+ for (i = QUEUE_S; i <= QUEUE_A0; i++) {
+ queue = smc->hw.fp.tx[i] ;
+ t = queue->tx_curr_get ;
+
+ /*
+ * write the phys pointer of the NEXT descriptor into the
+ * BMU's current address descriptor pointer and set
+ * tx_curr_get and tx_curr_put to this position
+ */
+ if (i == QUEUE_S) {
+ outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ;
+ }
+ else {
+ outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ;
+ }
+
+ queue->tx_curr_put = queue->tx_curr_get->txd_next ;
+ queue->tx_curr_get = queue->tx_curr_put ;
+ }
+}
+
+
+/*
+ -------------------------------------------------------------
+ TEST FUNCTIONS:
+ -------------------------------------------------------------
+*/
+
+#ifdef DEBUG
+/*
+ * BEGIN_MANUAL_ENTRY(mac_drv_debug_lev)
+ * void mac_drv_debug_lev(smc,flag,lev)
+ *
+ * function DOWNCALL (drvsr.c)
+ * To get a special debug info the user can assign a debug level
+ * to any debug flag.
+ *
+ * para flag debug flag, possible values are:
+ * = 0: reset all debug flags (the defined level is
+ * ignored)
+ * = 1: debug.d_smtf
+ * = 2: debug.d_smt
+ * = 3: debug.d_ecm
+ * = 4: debug.d_rmt
+ * = 5: debug.d_cfm
+ * = 6: debug.d_pcm
+ *
+ * = 10: debug.d_os.hwm_rx (hardware module receive path)
+ * = 11: debug.d_os.hwm_tx(hardware module transmit path)
+ * = 12: debug.d_os.hwm_gen(hardware module general flag)
+ *
+ * lev debug level
+ *
+ * END_MANUAL_ENTRY
+ */
+void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
+{
+ switch(flag) {
+ case (int)NULL:
+ DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
+ DB_P.d_cfm = 0 ;
+ DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
+#ifdef SBA
+ DB_P.d_sba = 0 ;
+#endif
+#ifdef ESS
+ DB_P.d_ess = 0 ;
+#endif
+ break ;
+ case DEBUG_SMTF:
+ DB_P.d_smtf = lev ;
+ break ;
+ case DEBUG_SMT:
+ DB_P.d_smt = lev ;
+ break ;
+ case DEBUG_ECM:
+ DB_P.d_ecm = lev ;
+ break ;
+ case DEBUG_RMT:
+ DB_P.d_rmt = lev ;
+ break ;
+ case DEBUG_CFM:
+ DB_P.d_cfm = lev ;
+ break ;
+ case DEBUG_PCM:
+ DB_P.d_pcm = lev ;
+ break ;
+ case DEBUG_SBA:
+#ifdef SBA
+ DB_P.d_sba = lev ;
+#endif
+ break ;
+ case DEBUG_ESS:
+#ifdef ESS
+ DB_P.d_ess = lev ;
+#endif
+ break ;
+ case DB_HWM_RX:
+ DB_P.d_os.hwm_rx = lev ;
+ break ;
+ case DB_HWM_TX:
+ DB_P.d_os.hwm_tx = lev ;
+ break ;
+ case DB_HWM_GEN:
+ DB_P.d_os.hwm_gen = lev ;
+ break ;
+ default:
+ break ;
+ }
+}
+#endif
diff --git a/drivers/net/skfp/hwt.c b/drivers/net/skfp/hwt.c
new file mode 100644
index 000000000000..e01f8a0f35c6
--- /dev/null
+++ b/drivers/net/skfp/hwt.c
@@ -0,0 +1,305 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * Timer Driver for FBI board (timer chip 82C54)
+ */
+
+/*
+ * Modifications:
+ *
+ * 28-Jun-1994 sw Edit v1.6.
+ * MCA: Added support for the SK-NET FDDI-FM2 adapter. The
+ * following functions have been added(+) or modified(*):
+ * hwt_start(*), hwt_stop(*), hwt_restart(*), hwt_read(*)
+ */
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)hwt.c 1.13 97/04/23 (C) SK " ;
+#endif
+
+/*
+ * Prototypes of local functions.
+ */
+/* 28-Jun-1994 sw - Note: hwt_restart() is also used in module 'drvfbi.c'. */
+/*static void hwt_restart() ; */
+
+/************************
+ *
+ * hwt_start
+ *
+ * Start hardware timer (clock ticks are 16us).
+ *
+ * void hwt_start(
+ * struct s_smc *smc,
+ * u_long time) ;
+ * In
+ * smc - A pointer to the SMT Context structure.
+ *
+ * time - The time in units of 16us to load the timer with.
+ * Out
+ * Nothing.
+ *
+ ************************/
+#define HWT_MAX (65000)
+
+void hwt_start(struct s_smc *smc, u_long time)
+{
+ u_short cnt ;
+
+ if (time > HWT_MAX)
+ time = HWT_MAX ;
+
+ smc->hw.t_start = time ;
+ smc->hw.t_stop = 0L ;
+
+ cnt = (u_short)time ;
+ /*
+ * if time < 16 us
+ * time = 16 us
+ */
+ if (!cnt)
+ cnt++ ;
+#ifndef PCI
+ /*
+ * 6.25MHz -> CLK0 : T0 (cnt0 = 16us) -> OUT0
+ * OUT0 -> CLK1 : T1 (cnt1) OUT1 -> ISRA(IS_TIMINT)
+ */
+ OUT_82c54_TIMER(3,1<<6 | 3<<4 | 0<<1) ; /* counter 1, mode 0 */
+ OUT_82c54_TIMER(1,cnt & 0xff) ; /* LSB */
+ OUT_82c54_TIMER(1,(cnt>>8) & 0xff) ; /* MSB */
+ /*
+ * start timer by switching counter 0 to mode 3
+ * T0 resolution 16 us (CLK0=0.16us)
+ */
+ OUT_82c54_TIMER(3,0<<6 | 3<<4 | 3<<1) ; /* counter 0, mode 3 */
+ OUT_82c54_TIMER(0,100) ; /* LSB */
+ OUT_82c54_TIMER(0,0) ; /* MSB */
+#else /* PCI */
+ outpd(ADDR(B2_TI_INI), (u_long) cnt * 200) ; /* Load timer value. */
+ outpw(ADDR(B2_TI_CRTL), TIM_START) ; /* Start timer. */
+#endif /* PCI */
+ smc->hw.timer_activ = TRUE ;
+}
+
+/************************
+ *
+ * hwt_stop
+ *
+ * Stop hardware timer.
+ *
+ * void hwt_stop(
+ * struct s_smc *smc) ;
+ * In
+ * smc - A pointer to the SMT Context structure.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void hwt_stop(struct s_smc *smc)
+{
+#ifndef PCI
+ /* stop counter 0 by switching to mode 0 */
+ OUT_82c54_TIMER(3,0<<6 | 3<<4 | 0<<1) ; /* counter 0, mode 0 */
+ OUT_82c54_TIMER(0,0) ; /* LSB */
+ OUT_82c54_TIMER(0,0) ; /* MSB */
+#else /* PCI */
+ outpw(ADDR(B2_TI_CRTL), TIM_STOP) ;
+ outpw(ADDR(B2_TI_CRTL), TIM_CL_IRQ) ;
+#endif /* PCI */
+
+ smc->hw.timer_activ = FALSE ;
+}
+
+/************************
+ *
+ * hwt_init
+ *
+ * Initialize hardware timer.
+ *
+ * void hwt_init(
+ * struct s_smc *smc) ;
+ * In
+ * smc - A pointer to the SMT Context structure.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void hwt_init(struct s_smc *smc)
+{
+ smc->hw.t_start = 0 ;
+ smc->hw.t_stop = 0 ;
+ smc->hw.timer_activ = FALSE ;
+
+ hwt_restart(smc) ;
+}
+
+/************************
+ *
+ * hwt_restart
+ *
+ * Clear timer interrupt.
+ *
+ * void hwt_restart(
+ * struct s_smc *smc) ;
+ * In
+ * smc - A pointer to the SMT Context structure.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void hwt_restart(struct s_smc *smc)
+{
+ hwt_stop(smc) ;
+#ifndef PCI
+ OUT_82c54_TIMER(3,1<<6 | 3<<4 | 0<<1) ; /* counter 1, mode 0 */
+ OUT_82c54_TIMER(1,1 ) ; /* LSB */
+ OUT_82c54_TIMER(1,0 ) ; /* MSB */
+#endif
+}
+
+/************************
+ *
+ * hwt_read
+ *
+ * Stop hardware timer and read time elapsed since last start.
+ *
+ * u_long hwt_read(smc) ;
+ * In
+ * smc - A pointer to the SMT Context structure.
+ * Out
+ * The elapsed time since last start in units of 16us.
+ *
+ ************************/
+u_long hwt_read(struct s_smc *smc)
+{
+ u_short tr ;
+#ifndef PCI
+ u_short is ;
+#else
+ u_long is ;
+#endif
+
+ if (smc->hw.timer_activ) {
+ hwt_stop(smc) ;
+#ifndef PCI
+ OUT_82c54_TIMER(3,1<<6) ; /* latch command */
+ tr = IN_82c54_TIMER(1) & 0xff ;
+ tr += (IN_82c54_TIMER(1) & 0xff)<<8 ;
+#else /* PCI */
+ tr = (u_short)((inpd(ADDR(B2_TI_VAL))/200) & 0xffff) ;
+#endif /* PCI */
+ is = GET_ISR() ;
+ /* Check if timer expired (or wraparound). */
+ if ((tr > smc->hw.t_start) || (is & IS_TIMINT)) {
+ hwt_restart(smc) ;
+ smc->hw.t_stop = smc->hw.t_start ;
+ }
+ else
+ smc->hw.t_stop = smc->hw.t_start - tr ;
+ }
+ return (smc->hw.t_stop) ;
+}
+
+#ifdef PCI
+/************************
+ *
+ * hwt_quick_read
+ *
+ * Stop hardware timer and read timer value and start the timer again.
+ *
+ * u_long hwt_read(smc) ;
+ * In
+ * smc - A pointer to the SMT Context structure.
+ * Out
+ * current timer value in units of 80ns.
+ *
+ ************************/
+u_long hwt_quick_read(struct s_smc *smc)
+{
+ u_long interval ;
+ u_long time ;
+
+ interval = inpd(ADDR(B2_TI_INI)) ;
+ outpw(ADDR(B2_TI_CRTL), TIM_STOP) ;
+ time = inpd(ADDR(B2_TI_VAL)) ;
+ outpd(ADDR(B2_TI_INI),time) ;
+ outpw(ADDR(B2_TI_CRTL), TIM_START) ;
+ outpd(ADDR(B2_TI_INI),interval) ;
+
+ return(time) ;
+}
+
+/************************
+ *
+ * hwt_wait_time(smc,start,duration)
+ *
+ * This function returnes after the amount of time is elapsed
+ * since the start time.
+ *
+ * para start start time
+ * duration time to wait
+ *
+ * NOTE: The fuction will return immediately, if the timer is not
+ * started
+ ************************/
+void hwt_wait_time(struct s_smc *smc, u_long start, long int duration)
+{
+ long diff ;
+ long interval ;
+ int wrapped ;
+
+ /*
+ * check if timer is running
+ */
+ if (smc->hw.timer_activ == FALSE ||
+ hwt_quick_read(smc) == hwt_quick_read(smc)) {
+ return ;
+ }
+
+ interval = inpd(ADDR(B2_TI_INI)) ;
+ if (interval > duration) {
+ do {
+ diff = (long)(start - hwt_quick_read(smc)) ;
+ if (diff < 0) {
+ diff += interval ;
+ }
+ } while (diff <= duration) ;
+ }
+ else {
+ diff = interval ;
+ wrapped = 0 ;
+ do {
+ if (!wrapped) {
+ if (hwt_quick_read(smc) >= start) {
+ diff += interval ;
+ wrapped = 1 ;
+ }
+ }
+ else {
+ if (hwt_quick_read(smc) < start) {
+ wrapped = 0 ;
+ }
+ }
+ } while (diff <= duration) ;
+ }
+}
+#endif
+
diff --git a/drivers/net/skfp/lnkstat.c b/drivers/net/skfp/lnkstat.c
new file mode 100644
index 000000000000..00a248044f86
--- /dev/null
+++ b/drivers/net/skfp/lnkstat.c
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ IBM FDDI read error log function
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/lnkstat.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)lnkstat.c 1.8 97/04/11 (C) SK " ;
+#endif
+
+#ifdef sun
+#define _far
+#endif
+
+#define EL_IS_OK(x,l) ((((int)&(((struct s_error_log *)0)->x)) + \
+ sizeof(er->x)) <= l)
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;11)
+
+ u_long smt_get_error_word(smc)
+ struct s_smc *smc ;
+
+Function DOWNCALL (SMT, lnkstat.c)
+ This functions returns the SMT error work for AIX events.
+
+Return smt_error_word These bits are supported:
+
+ SMT_ERL_ALC == [PS/PA].fddiPORTLerFlag
+ SMT_ERL_BLC == [PB].fddiPORTLerFlag
+ SMT_ERL_NCC == fddiMACNotCopiedFlag
+ SMT_ERL_FEC == fddiMACFrameErrorFlag
+
+ END_MANUAL_ENTRY()
+ */
+u_long smt_get_error_word(struct s_smc *smc)
+{
+ u_long st;
+
+ /*
+ * smt error word low
+ */
+ st = 0 ;
+ if (smc->s.sas == SMT_SAS) {
+ if (smc->mib.p[PS].fddiPORTLerFlag)
+ st |= SMT_ERL_ALC ;
+ }
+ else {
+ if (smc->mib.p[PA].fddiPORTLerFlag)
+ st |= SMT_ERL_ALC ;
+ if (smc->mib.p[PB].fddiPORTLerFlag)
+ st |= SMT_ERL_BLC ;
+ }
+ if (smc->mib.m[MAC0].fddiMACNotCopiedFlag)
+ st |= SMT_ERL_NCC ; /* not copied condition */
+ if (smc->mib.m[MAC0].fddiMACFrameErrorFlag)
+ st |= SMT_ERL_FEC ; /* frame error condition */
+
+ return st;
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;11)
+
+ u_long smt_get_event_word(smc)
+ struct s_smc *smc ;
+
+Function DOWNCALL (SMT, lnkstat.c)
+ This functions returns the SMT event work for AIX events.
+
+Return smt_event_word always 0
+
+ END_MANUAL_ENTRY()
+ */
+u_long smt_get_event_word(struct s_smc *smc)
+{
+ return (u_long) 0;
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;11)
+
+ u_long smt_get_port_event_word(smc)
+ struct s_smc *smc ;
+
+Function DOWNCALL (SMT, lnkstat.c)
+ This functions returns the SMT port event work for AIX events.
+
+Return smt_port_event_word always 0
+
+ END_MANUAL_ENTRY()
+ */
+u_long smt_get_port_event_word(struct s_smc *smc)
+{
+ return (u_long) 0;
+}
+
+/*
+ BEGIN_MANUAL_ENTRY(if,func;others;11)
+
+ u_long smt_read_errorlog(smc,p,len)
+ struct s_smc *smc ;
+ char _far *p ;
+ int len ;
+
+Function DOWNCALL (SMT, lnkstat.c)
+ This functions returns the SMT error log field for AIX events.
+
+Para p pointer to the error log field
+ len len of the error log field
+
+Return len used len of the error log field
+
+ END_MANUAL_ENTRY()
+ */
+int smt_read_errorlog(struct s_smc *smc, char _far *p, int len)
+{
+ int i ;
+ int st ;
+ struct s_error_log _far *er ;
+
+ er = (struct s_error_log _far *) p ;
+ if (len > sizeof(struct s_error_log))
+ len = sizeof(struct s_error_log) ;
+ for (i = 0 ; i < len ; i++)
+ *p++ = 0 ;
+ /*
+ * set count
+ */
+ if (EL_IS_OK(set_count_high,len)) {
+ er->set_count_low = (u_short)smc->mib.fddiSMTSetCount.count ;
+ er->set_count_high =
+ (u_short)(smc->mib.fddiSMTSetCount.count >> 16L) ;
+ }
+ /*
+ * aci
+ */
+ if (EL_IS_OK(aci_id_code,len)) {
+ er->aci_id_code = 0 ;
+ }
+ /*
+ * purge counter is missed frames; 16 bits only
+ */
+ if (EL_IS_OK(purge_frame_counter,len)) {
+ if (smc->mib.m[MAC0].fddiMACCopied_Ct > 0xffff)
+ er->purge_frame_counter = 0xffff ;
+ else
+ er->purge_frame_counter =
+ (u_short)smc->mib.m[MAC0].fddiMACCopied_Ct ;
+ }
+ /*
+ * CMT and RMT state machines
+ */
+ if (EL_IS_OK(ecm_state,len))
+ er->ecm_state = smc->mib.fddiSMTECMState ;
+
+ if (EL_IS_OK(pcm_b_state,len)) {
+ if (smc->s.sas == SMT_SAS) {
+ er->pcm_a_state = smc->y[PS].mib->fddiPORTPCMState ;
+ er->pcm_b_state = 0 ;
+ }
+ else {
+ er->pcm_a_state = smc->y[PA].mib->fddiPORTPCMState ;
+ er->pcm_b_state = smc->y[PB].mib->fddiPORTPCMState ;
+ }
+ }
+ if (EL_IS_OK(cfm_state,len))
+ er->cfm_state = smc->mib.fddiSMTCF_State ;
+ if (EL_IS_OK(rmt_state,len))
+ er->rmt_state = smc->mib.m[MAC0].fddiMACRMTState ;
+
+ /*
+ * smt error word low (we only need the low order 16 bits.)
+ */
+
+ st = smt_get_error_word(smc) & 0xffff ;
+
+ if (EL_IS_OK(smt_error_low,len))
+ er->smt_error_low = st ;
+
+ if (EL_IS_OK(ucode_version_level,len))
+ er->ucode_version_level = 0x0101 ;
+ return(len) ;
+}
+
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
new file mode 100644
index 000000000000..571f055c096b
--- /dev/null
+++ b/drivers/net/skfp/pcmplc.c
@@ -0,0 +1,2024 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ PCM
+ Physical Connection Management
+*/
+
+/*
+ * Hardware independent state machine implemantation
+ * The following external SMT functions are referenced :
+ *
+ * queue_event()
+ * smt_timer_start()
+ * smt_timer_stop()
+ *
+ * The following external HW dependent functions are referenced :
+ * sm_pm_control()
+ * sm_ph_linestate()
+ * sm_pm_ls_latch()
+ *
+ * The following HW dependent events are required :
+ * PC_QLS
+ * PC_ILS
+ * PC_HLS
+ * PC_MLS
+ * PC_NSE
+ * PC_LEM
+ *
+ */
+
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/supern_2.h"
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ;
+#endif
+
+#ifdef FDDI_MIB
+extern int snmp_fddi_trap(
+#ifdef ANSIC
+struct s_smc * smc, int type, int index
+#endif
+);
+#endif
+#ifdef CONCENTRATOR
+extern int plc_is_installed(
+#ifdef ANSIC
+struct s_smc *smc ,
+int p
+#endif
+) ;
+#endif
+/*
+ * FSM Macros
+ */
+#define AFLAG (0x20)
+#define GO_STATE(x) (mib->fddiPORTPCMState = (x)|AFLAG)
+#define ACTIONS_DONE() (mib->fddiPORTPCMState &= ~AFLAG)
+#define ACTIONS(x) (x|AFLAG)
+
+/*
+ * PCM states
+ */
+#define PC0_OFF 0
+#define PC1_BREAK 1
+#define PC2_TRACE 2
+#define PC3_CONNECT 3
+#define PC4_NEXT 4
+#define PC5_SIGNAL 5
+#define PC6_JOIN 6
+#define PC7_VERIFY 7
+#define PC8_ACTIVE 8
+#define PC9_MAINT 9
+
+#ifdef DEBUG
+/*
+ * symbolic state names
+ */
+static const char * const pcm_states[] = {
+ "PC0_OFF","PC1_BREAK","PC2_TRACE","PC3_CONNECT","PC4_NEXT",
+ "PC5_SIGNAL","PC6_JOIN","PC7_VERIFY","PC8_ACTIVE","PC9_MAINT"
+} ;
+
+/*
+ * symbolic event names
+ */
+static const char * const pcm_events[] = {
+ "NONE","PC_START","PC_STOP","PC_LOOP","PC_JOIN","PC_SIGNAL",
+ "PC_REJECT","PC_MAINT","PC_TRACE","PC_PDR",
+ "PC_ENABLE","PC_DISABLE",
+ "PC_QLS","PC_ILS","PC_MLS","PC_HLS","PC_LS_PDR","PC_LS_NONE",
+ "PC_TIMEOUT_TB_MAX","PC_TIMEOUT_TB_MIN",
+ "PC_TIMEOUT_C_MIN","PC_TIMEOUT_T_OUT",
+ "PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT",
+ "PC_NSE","PC_LEM"
+} ;
+#endif
+
+#ifdef MOT_ELM
+/*
+ * PCL-S control register
+ * this register in the PLC-S controls the scrambling parameters
+ */
+#define PLCS_CONTROL_C_U 0
+#define PLCS_CONTROL_C_S (PL_C_SDOFF_ENABLE | PL_C_SDON_ENABLE | \
+ PL_C_CIPHER_ENABLE)
+#define PLCS_FASSERT_U 0
+#define PLCS_FASSERT_S 0xFd76 /* 52.0 us */
+#define PLCS_FDEASSERT_U 0
+#define PLCS_FDEASSERT_S 0
+#else /* nMOT_ELM */
+/*
+ * PCL-S control register
+ * this register in the PLC-S controls the scrambling parameters
+ * can be patched for ANSI compliance if standard changes
+ */
+static const u_char plcs_control_c_u[17] = "PLC_CNTRL_C_U=\0\0" ;
+static const u_char plcs_control_c_s[17] = "PLC_CNTRL_C_S=\01\02" ;
+
+#define PLCS_CONTROL_C_U (plcs_control_c_u[14] | (plcs_control_c_u[15]<<8))
+#define PLCS_CONTROL_C_S (plcs_control_c_s[14] | (plcs_control_c_s[15]<<8))
+#endif /* nMOT_ELM */
+
+/*
+ * external vars
+ */
+/* struct definition see 'cmtdef.h' (also used by CFM) */
+
+#define PS_OFF 0
+#define PS_BIT3 1
+#define PS_BIT4 2
+#define PS_BIT7 3
+#define PS_LCT 4
+#define PS_BIT8 5
+#define PS_JOIN 6
+#define PS_ACTIVE 7
+
+#define LCT_LEM_MAX 255
+
+/*
+ * PLC timing parameter
+ */
+
+#define PLC_MS(m) ((int)((0x10000L-(m*100000L/2048))))
+#define SLOW_TL_MIN PLC_MS(6)
+#define SLOW_C_MIN PLC_MS(10)
+
+static const struct plt {
+ int timer ; /* relative plc timer address */
+ int para ; /* default timing parameters */
+} pltm[] = {
+ { PL_C_MIN, SLOW_C_MIN }, /* min t. to remain Connect State */
+ { PL_TL_MIN, SLOW_TL_MIN }, /* min t. to transmit a Line State */
+ { PL_TB_MIN, TP_TB_MIN }, /* min break time */
+ { PL_T_OUT, TP_T_OUT }, /* Signaling timeout */
+ { PL_LC_LENGTH, TP_LC_LENGTH }, /* Link Confidence Test Time */
+ { PL_T_SCRUB, TP_T_SCRUB }, /* Scrub Time == MAC TVX time ! */
+ { PL_NS_MAX, TP_NS_MAX }, /* max t. that noise is tolerated */
+ { 0,0 }
+} ;
+
+/*
+ * interrupt mask
+ */
+#ifdef SUPERNET_3
+/*
+ * Do we need the EBUF error during signaling, too, to detect SUPERNET_3
+ * PLL bug?
+ */
+static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
+ PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
+#else /* SUPERNET_3 */
+/*
+ * We do NOT need the elasticity buffer error during signaling.
+ */
+static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
+ PL_PCM_ENABLED | PL_SELF_TEST ;
+#endif /* SUPERNET_3 */
+static int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
+ PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
+
+/* external functions */
+void all_selection_criteria(struct s_smc *smc);
+
+/* internal functions */
+static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd);
+static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy);
+static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy);
+static void reset_lem_struct(struct s_phy *phy);
+static void plc_init(struct s_smc *smc, int p);
+static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold);
+static void sm_ph_lem_stop(struct s_smc *smc, int np);
+static void sm_ph_linestate(struct s_smc *smc, int phy, int ls);
+static void real_init_plc(struct s_smc *smc);
+
+/*
+ * SMT timer interface
+ * start PCM timer 0
+ */
+static void start_pcm_timer0(struct s_smc *smc, u_long value, int event,
+ struct s_phy *phy)
+{
+ phy->timer0_exp = FALSE ; /* clear timer event flag */
+ smt_timer_start(smc,&phy->pcm_timer0,value,
+ EV_TOKEN(EVENT_PCM+phy->np,event)) ;
+}
+/*
+ * SMT timer interface
+ * stop PCM timer 0
+ */
+static void stop_pcm_timer0(struct s_smc *smc, struct s_phy *phy)
+{
+ if (phy->pcm_timer0.tm_active)
+ smt_timer_stop(smc,&phy->pcm_timer0) ;
+}
+
+/*
+ init PCM state machine (called by driver)
+ clear all PCM vars and flags
+*/
+void pcm_init(struct s_smc *smc)
+{
+ int i ;
+ int np ;
+ struct s_phy *phy ;
+ struct fddi_mib_p *mib ;
+
+ for (np = 0,phy = smc->y ; np < NUMPHYS ; np++,phy++) {
+ /* Indicates the type of PHY being used */
+ mib = phy->mib ;
+ mib->fddiPORTPCMState = ACTIONS(PC0_OFF) ;
+ phy->np = np ;
+ switch (smc->s.sas) {
+#ifdef CONCENTRATOR
+ case SMT_SAS :
+ mib->fddiPORTMy_Type = (np == PS) ? TS : TM ;
+ break ;
+ case SMT_DAS :
+ mib->fddiPORTMy_Type = (np == PA) ? TA :
+ (np == PB) ? TB : TM ;
+ break ;
+ case SMT_NAC :
+ mib->fddiPORTMy_Type = TM ;
+ break;
+#else
+ case SMT_SAS :
+ mib->fddiPORTMy_Type = (np == PS) ? TS : TNONE ;
+ mib->fddiPORTHardwarePresent = (np == PS) ? TRUE :
+ FALSE ;
+#ifndef SUPERNET_3
+ smc->y[PA].mib->fddiPORTPCMState = PC0_OFF ;
+#else
+ smc->y[PB].mib->fddiPORTPCMState = PC0_OFF ;
+#endif
+ break ;
+ case SMT_DAS :
+ mib->fddiPORTMy_Type = (np == PB) ? TB : TA ;
+ break ;
+#endif
+ }
+ /*
+ * set PMD-type
+ */
+ phy->pmd_scramble = 0 ;
+ switch (phy->pmd_type[PMD_SK_PMD]) {
+ case 'P' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ;
+ break ;
+ case 'L' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_LCF ;
+ break ;
+ case 'D' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
+ break ;
+ case 'S' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
+ phy->pmd_scramble = TRUE ;
+ break ;
+ case 'U' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
+ phy->pmd_scramble = TRUE ;
+ break ;
+ case '1' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
+ break ;
+ case '2' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
+ break ;
+ case '3' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
+ break ;
+ case '4' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
+ break ;
+ case 'H' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
+ break ;
+ case 'I' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
+ break ;
+ case 'G' :
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
+ break ;
+ default:
+ mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
+ break ;
+ }
+ /*
+ * A and B port can be on primary and secondary path
+ */
+ switch (mib->fddiPORTMy_Type) {
+ case TA :
+ mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
+ mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
+ mib->fddiPORTRequestedPaths[2] =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_CON_ALTER |
+ MIB_P_PATH_SEC_PREFER ;
+ mib->fddiPORTRequestedPaths[3] =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_CON_ALTER |
+ MIB_P_PATH_SEC_PREFER |
+ MIB_P_PATH_THRU ;
+ break ;
+ case TB :
+ mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
+ mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
+ mib->fddiPORTRequestedPaths[2] =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_PRIM_PREFER ;
+ mib->fddiPORTRequestedPaths[3] =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_PRIM_PREFER |
+ MIB_P_PATH_CON_PREFER |
+ MIB_P_PATH_THRU ;
+ break ;
+ case TS :
+ mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
+ mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
+ mib->fddiPORTRequestedPaths[2] =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_CON_ALTER |
+ MIB_P_PATH_PRIM_PREFER ;
+ mib->fddiPORTRequestedPaths[3] =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_CON_ALTER |
+ MIB_P_PATH_PRIM_PREFER ;
+ break ;
+ case TM :
+ mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
+ mib->fddiPORTRequestedPaths[2] =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_SEC_ALTER |
+ MIB_P_PATH_PRIM_ALTER ;
+ mib->fddiPORTRequestedPaths[3] = 0 ;
+ break ;
+ }
+
+ phy->pc_lem_fail = FALSE ;
+ mib->fddiPORTPCMStateX = mib->fddiPORTPCMState ;
+ mib->fddiPORTLCTFail_Ct = 0 ;
+ mib->fddiPORTBS_Flag = 0 ;
+ mib->fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
+ mib->fddiPORTNeighborType = TNONE ;
+ phy->ls_flag = 0 ;
+ phy->rc_flag = 0 ;
+ phy->tc_flag = 0 ;
+ phy->td_flag = 0 ;
+ if (np >= PM)
+ phy->phy_name = '0' + np - PM ;
+ else
+ phy->phy_name = 'A' + np ;
+ phy->wc_flag = FALSE ; /* set by SMT */
+ memset((char *)&phy->lem,0,sizeof(struct lem_counter)) ;
+ reset_lem_struct(phy) ;
+ memset((char *)&phy->plc,0,sizeof(struct s_plc)) ;
+ phy->plc.p_state = PS_OFF ;
+ for (i = 0 ; i < NUMBITS ; i++) {
+ phy->t_next[i] = 0 ;
+ }
+ }
+ real_init_plc(smc) ;
+}
+
+void init_plc(struct s_smc *smc)
+{
+ SK_UNUSED(smc) ;
+
+ /*
+ * dummy
+ * this is an obsolete public entry point that has to remain
+ * for compat. It is used by various drivers.
+ * the work is now done in real_init_plc()
+ * which is called from pcm_init() ;
+ */
+}
+
+static void real_init_plc(struct s_smc *smc)
+{
+ int p ;
+
+ for (p = 0 ; p < NUMPHYS ; p++)
+ plc_init(smc,p) ;
+}
+
+static void plc_init(struct s_smc *smc, int p)
+{
+ int i ;
+#ifndef MOT_ELM
+ int rev ; /* Revision of PLC-x */
+#endif /* MOT_ELM */
+
+ /* transit PCM state machine to MAINT state */
+ outpw(PLC(p,PL_CNTRL_B),0) ;
+ outpw(PLC(p,PL_CNTRL_B),PL_PCM_STOP) ;
+ outpw(PLC(p,PL_CNTRL_A),0) ;
+
+ /*
+ * if PLC-S then set control register C
+ */
+#ifndef MOT_ELM
+ rev = inpw(PLC(p,PL_STATUS_A)) & PLC_REV_MASK ;
+ if (rev != PLC_REVISION_A)
+#endif /* MOT_ELM */
+ {
+ if (smc->y[p].pmd_scramble) {
+ outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_S) ;
+#ifdef MOT_ELM
+ outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_S) ;
+ outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_S) ;
+#endif /* MOT_ELM */
+ }
+ else {
+ outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_U) ;
+#ifdef MOT_ELM
+ outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_U) ;
+ outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_U) ;
+#endif /* MOT_ELM */
+ }
+ }
+
+ /*
+ * set timer register
+ */
+ for ( i = 0 ; pltm[i].timer; i++) /* set timer parameter reg */
+ outpw(PLC(p,pltm[i].timer),pltm[i].para) ;
+
+ (void)inpw(PLC(p,PL_INTR_EVENT)) ; /* clear interrupt event reg */
+ plc_clear_irq(smc,p) ;
+ outpw(PLC(p,PL_INTR_MASK),plc_imsk_na); /* enable non active irq's */
+
+ /*
+ * if PCM is configured for class s, it will NOT go to the
+ * REMOVE state if offline (page 3-36;)
+ * in the concentrator, all inactive PHYS always must be in
+ * the remove state
+ * there's no real need to use this feature at all ..
+ */
+#ifndef CONCENTRATOR
+ if ((smc->s.sas == SMT_SAS) && (p == PS)) {
+ outpw(PLC(p,PL_CNTRL_B),PL_CLASS_S) ;
+ }
+#endif
+}
+
+/*
+ * control PCM state machine
+ */
+static void plc_go_state(struct s_smc *smc, int p, int state)
+{
+ HW_PTR port ;
+ int val ;
+
+ SK_UNUSED(smc) ;
+
+ port = (HW_PTR) (PLC(p,PL_CNTRL_B)) ;
+ val = inpw(port) & ~(PL_PCM_CNTRL | PL_MAINT) ;
+ outpw(port,val) ;
+ outpw(port,val | state) ;
+}
+
+/*
+ * read current line state (called by ECM & PCM)
+ */
+int sm_pm_get_ls(struct s_smc *smc, int phy)
+{
+ int state ;
+
+#ifdef CONCENTRATOR
+ if (!plc_is_installed(smc,phy))
+ return(PC_QLS) ;
+#endif
+
+ state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ;
+ switch(state) {
+ case PL_L_QLS:
+ state = PC_QLS ;
+ break ;
+ case PL_L_MLS:
+ state = PC_MLS ;
+ break ;
+ case PL_L_HLS:
+ state = PC_HLS ;
+ break ;
+ case PL_L_ILS4:
+ case PL_L_ILS16:
+ state = PC_ILS ;
+ break ;
+ case PL_L_ALS:
+ state = PC_LS_PDR ;
+ break ;
+ default :
+ state = PC_LS_NONE ;
+ }
+ return(state) ;
+}
+
+static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
+{
+ int np = phy->np ; /* PHY index */
+ int n ;
+ int i ;
+
+ SK_UNUSED(smc) ;
+
+ /* create bit vector */
+ for (i = len-1,n = 0 ; i >= 0 ; i--) {
+ n = (n<<1) | phy->t_val[phy->bitn+i] ;
+ }
+ if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
+#if 0
+ printf("PL_PCM_SIGNAL is set\n") ;
+#endif
+ return(1) ;
+ }
+ /* write bit[n] & length = 1 to regs */
+ outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */
+ outpw(PLC(np,PL_XMIT_VECTOR),n) ;
+#ifdef DEBUG
+#if 1
+#ifdef DEBUG_BRD
+ if (smc->debug.d_plc & 0x80)
+#else
+ if (debug.d_plc & 0x80)
+#endif
+ printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ;
+#endif
+#endif
+ return(0) ;
+}
+
+/*
+ * config plc muxes
+ */
+void plc_config_mux(struct s_smc *smc, int mux)
+{
+ if (smc->s.sas != SMT_DAS)
+ return ;
+ if (mux == MUX_WRAPB) {
+ SETMASK(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
+ SETMASK(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
+ }
+ else {
+ CLEAR(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
+ CLEAR(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP) ;
+ }
+ CLEAR(PLC(PB,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
+ CLEAR(PLC(PB,PL_CNTRL_A),PL_SC_REM_LOOP) ;
+}
+
+/*
+ PCM state machine
+ called by dispatcher & fddi_init() (driver)
+ do
+ display state change
+ process event
+ until SM is stable
+*/
+void pcm(struct s_smc *smc, const int np, int event)
+{
+ int state ;
+ int oldstate ;
+ struct s_phy *phy ;
+ struct fddi_mib_p *mib ;
+
+#ifndef CONCENTRATOR
+ /*
+ * ignore 2nd PHY if SAS
+ */
+ if ((np != PS) && (smc->s.sas == SMT_SAS))
+ return ;
+#endif
+ phy = &smc->y[np] ;
+ mib = phy->mib ;
+ oldstate = mib->fddiPORTPCMState ;
+ do {
+ DB_PCM("PCM %c: state %s",
+ phy->phy_name,
+ (mib->fddiPORTPCMState & AFLAG) ? "ACTIONS " : "") ;
+ DB_PCM("%s, event %s\n",
+ pcm_states[mib->fddiPORTPCMState & ~AFLAG],
+ pcm_events[event]) ;
+ state = mib->fddiPORTPCMState ;
+ pcm_fsm(smc,phy,event) ;
+ event = 0 ;
+ } while (state != mib->fddiPORTPCMState) ;
+ /*
+ * because the PLC does the bit signaling for us,
+ * we're always in SIGNAL state
+ * the MIB want's to see CONNECT
+ * we therefore fake an entry in the MIB
+ */
+ if (state == PC5_SIGNAL)
+ mib->fddiPORTPCMStateX = PC3_CONNECT ;
+ else
+ mib->fddiPORTPCMStateX = state ;
+
+#ifndef SLIM_SMT
+ /*
+ * path change
+ */
+ if ( mib->fddiPORTPCMState != oldstate &&
+ ((oldstate == PC8_ACTIVE) || (mib->fddiPORTPCMState == PC8_ACTIVE))) {
+ smt_srf_event(smc,SMT_EVENT_PORT_PATH_CHANGE,
+ (int) (INDEX_PORT+ phy->np),0) ;
+ }
+#endif
+
+#ifdef FDDI_MIB
+ /* check whether a snmp-trap has to be sent */
+
+ if ( mib->fddiPORTPCMState != oldstate ) {
+ /* a real state change took place */
+ DB_SNMP ("PCM from %d to %d\n", oldstate, mib->fddiPORTPCMState);
+ if ( mib->fddiPORTPCMState == PC0_OFF ) {
+ /* send first trap */
+ snmp_fddi_trap (smc, 1, (int) mib->fddiPORTIndex );
+ } else if ( oldstate == PC0_OFF ) {
+ /* send second trap */
+ snmp_fddi_trap (smc, 2, (int) mib->fddiPORTIndex );
+ } else if ( mib->fddiPORTPCMState != PC2_TRACE &&
+ oldstate == PC8_ACTIVE ) {
+ /* send third trap */
+ snmp_fddi_trap (smc, 3, (int) mib->fddiPORTIndex );
+ } else if ( mib->fddiPORTPCMState == PC8_ACTIVE ) {
+ /* send fourth trap */
+ snmp_fddi_trap (smc, 4, (int) mib->fddiPORTIndex );
+ }
+ }
+#endif
+
+ pcm_state_change(smc,np,state) ;
+}
+
+/*
+ * PCM state machine
+ */
+static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
+{
+ int i ;
+ int np = phy->np ; /* PHY index */
+ struct s_plc *plc ;
+ struct fddi_mib_p *mib ;
+#ifndef MOT_ELM
+ u_short plc_rev ; /* Revision of the plc */
+#endif /* nMOT_ELM */
+
+ plc = &phy->plc ;
+ mib = phy->mib ;
+
+ /*
+ * general transitions independent of state
+ */
+ switch (cmd) {
+ case PC_STOP :
+ /*PC00-PC80*/
+ if (mib->fddiPORTPCMState != PC9_MAINT) {
+ GO_STATE(PC0_OFF) ;
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
+ FDDI_PORT_EVENT, (u_long) FDDI_PORT_STOP,
+ smt_get_port_event_word(smc));
+ }
+ return ;
+ case PC_START :
+ /*PC01-PC81*/
+ if (mib->fddiPORTPCMState != PC9_MAINT)
+ GO_STATE(PC1_BREAK) ;
+ return ;
+ case PC_DISABLE :
+ /* PC09-PC99 */
+ GO_STATE(PC9_MAINT) ;
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
+ FDDI_PORT_EVENT, (u_long) FDDI_PORT_DISABLED,
+ smt_get_port_event_word(smc));
+ return ;
+ case PC_TIMEOUT_LCT :
+ /* if long or extended LCT */
+ stop_pcm_timer0(smc,phy) ;
+ CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
+ /* end of LCT is indicate by PCM_CODE (initiate PCM event) */
+ return ;
+ }
+
+ switch(mib->fddiPORTPCMState) {
+ case ACTIONS(PC0_OFF) :
+ stop_pcm_timer0(smc,phy) ;
+ outpw(PLC(np,PL_CNTRL_A),0) ;
+ CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
+ CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
+ sm_ph_lem_stop(smc,np) ; /* disable LEM */
+ phy->cf_loop = FALSE ;
+ phy->cf_join = FALSE ;
+ queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
+ plc_go_state(smc,np,PL_PCM_STOP) ;
+ mib->fddiPORTConnectState = PCM_DISABLED ;
+ ACTIONS_DONE() ;
+ break ;
+ case PC0_OFF:
+ /*PC09*/
+ if (cmd == PC_MAINT) {
+ GO_STATE(PC9_MAINT) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(PC1_BREAK) :
+ /* Stop the LCT timer if we came from Signal state */
+ stop_pcm_timer0(smc,phy) ;
+ ACTIONS_DONE() ;
+ plc_go_state(smc,np,0) ;
+ CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
+ CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
+ sm_ph_lem_stop(smc,np) ; /* disable LEM */
+ /*
+ * if vector is already loaded, go to OFF to clear PCM_SIGNAL
+ */
+#if 0
+ if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
+ plc_go_state(smc,np,PL_PCM_STOP) ;
+ /* TB_MIN ? */
+ }
+#endif
+ /*
+ * Go to OFF state in any case.
+ */
+ plc_go_state(smc,np,PL_PCM_STOP) ;
+
+ if (mib->fddiPORTPC_Withhold == PC_WH_NONE)
+ mib->fddiPORTConnectState = PCM_CONNECTING ;
+ phy->cf_loop = FALSE ;
+ phy->cf_join = FALSE ;
+ queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
+ phy->ls_flag = FALSE ;
+ phy->pc_mode = PM_NONE ; /* needed by CFM */
+ phy->bitn = 0 ; /* bit signaling start bit */
+ for (i = 0 ; i < 3 ; i++)
+ pc_tcode_actions(smc,i,phy) ;
+
+ /* Set the non-active interrupt mask register */
+ outpw(PLC(np,PL_INTR_MASK),plc_imsk_na) ;
+
+ /*
+ * If the LCT was stopped. There might be a
+ * PCM_CODE interrupt event present.
+ * This must be cleared.
+ */
+ (void)inpw(PLC(np,PL_INTR_EVENT)) ;
+#ifndef MOT_ELM
+ /* Get the plc revision for revision dependent code */
+ plc_rev = inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK ;
+
+ if (plc_rev != PLC_REV_SN3)
+#endif /* MOT_ELM */
+ {
+ /*
+ * No supernet III PLC, so set Xmit verctor and
+ * length BEFORE starting the state machine.
+ */
+ if (plc_send_bits(smc,phy,3)) {
+ return ;
+ }
+ }
+
+ /*
+ * Now give the Start command.
+ * - The start command shall be done before setting the bits
+ * to be signaled. (In PLC-S description and PLCS in SN3.
+ * - The start command shall be issued AFTER setting the
+ * XMIT vector and the XMIT length register.
+ *
+ * We do it exactly according this specs for the old PLC and
+ * the new PLCS inside the SN3.
+ * For the usual PLCS we try it the way it is done for the
+ * old PLC and set the XMIT registers again, if the PLC is
+ * not in SIGNAL state. This is done according to an PLCS
+ * errata workaround.
+ */
+
+ plc_go_state(smc,np,PL_PCM_START) ;
+
+ /*
+ * workaround for PLC-S eng. sample errata
+ */
+#ifdef MOT_ELM
+ if (!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
+#else /* nMOT_ELM */
+ if (((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) !=
+ PLC_REVISION_A) &&
+ !(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
+#endif /* nMOT_ELM */
+ {
+ /*
+ * Set register again (PLCS errata) or the first time
+ * (new SN3 PLCS).
+ */
+ (void) plc_send_bits(smc,phy,3) ;
+ }
+ /*
+ * end of workaround
+ */
+
+ GO_STATE(PC5_SIGNAL) ;
+ plc->p_state = PS_BIT3 ;
+ plc->p_bits = 3 ;
+ plc->p_start = 0 ;
+
+ break ;
+ case PC1_BREAK :
+ break ;
+ case ACTIONS(PC2_TRACE) :
+ plc_go_state(smc,np,PL_PCM_TRACE) ;
+ ACTIONS_DONE() ;
+ break ;
+ case PC2_TRACE :
+ break ;
+
+ case PC3_CONNECT : /* these states are done by hardware */
+ case PC4_NEXT :
+ break ;
+
+ case ACTIONS(PC5_SIGNAL) :
+ ACTIONS_DONE() ;
+ case PC5_SIGNAL :
+ if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT))
+ break ;
+ switch (plc->p_state) {
+ case PS_BIT3 :
+ for (i = 0 ; i <= 2 ; i++)
+ pc_rcode_actions(smc,i,phy) ;
+ pc_tcode_actions(smc,3,phy) ;
+ plc->p_state = PS_BIT4 ;
+ plc->p_bits = 1 ;
+ plc->p_start = 3 ;
+ phy->bitn = 3 ;
+ if (plc_send_bits(smc,phy,1)) {
+ return ;
+ }
+ break ;
+ case PS_BIT4 :
+ pc_rcode_actions(smc,3,phy) ;
+ for (i = 4 ; i <= 6 ; i++)
+ pc_tcode_actions(smc,i,phy) ;
+ plc->p_state = PS_BIT7 ;
+ plc->p_bits = 3 ;
+ plc->p_start = 4 ;
+ phy->bitn = 4 ;
+ if (plc_send_bits(smc,phy,3)) {
+ return ;
+ }
+ break ;
+ case PS_BIT7 :
+ for (i = 3 ; i <= 6 ; i++)
+ pc_rcode_actions(smc,i,phy) ;
+ plc->p_state = PS_LCT ;
+ plc->p_bits = 0 ;
+ plc->p_start = 7 ;
+ phy->bitn = 7 ;
+ sm_ph_lem_start(smc,np,(int)smc->s.lct_short) ; /* enable LEM */
+ /* start LCT */
+ i = inpw(PLC(np,PL_CNTRL_B)) & ~PL_PC_LOOP ;
+ outpw(PLC(np,PL_CNTRL_B),i) ; /* must be cleared */
+ outpw(PLC(np,PL_CNTRL_B),i | PL_RLBP) ;
+ break ;
+ case PS_LCT :
+ /* check for local LCT failure */
+ pc_tcode_actions(smc,7,phy) ;
+ /*
+ * set tval[7]
+ */
+ plc->p_state = PS_BIT8 ;
+ plc->p_bits = 1 ;
+ plc->p_start = 7 ;
+ phy->bitn = 7 ;
+ if (plc_send_bits(smc,phy,1)) {
+ return ;
+ }
+ break ;
+ case PS_BIT8 :
+ /* check for remote LCT failure */
+ pc_rcode_actions(smc,7,phy) ;
+ if (phy->t_val[7] || phy->r_val[7]) {
+ plc_go_state(smc,np,PL_PCM_STOP) ;
+ GO_STATE(PC1_BREAK) ;
+ break ;
+ }
+ for (i = 8 ; i <= 9 ; i++)
+ pc_tcode_actions(smc,i,phy) ;
+ plc->p_state = PS_JOIN ;
+ plc->p_bits = 2 ;
+ plc->p_start = 8 ;
+ phy->bitn = 8 ;
+ if (plc_send_bits(smc,phy,2)) {
+ return ;
+ }
+ break ;
+ case PS_JOIN :
+ for (i = 8 ; i <= 9 ; i++)
+ pc_rcode_actions(smc,i,phy) ;
+ plc->p_state = PS_ACTIVE ;
+ GO_STATE(PC6_JOIN) ;
+ break ;
+ }
+ break ;
+
+ case ACTIONS(PC6_JOIN) :
+ /*
+ * prevent mux error when going from WRAP_A to WRAP_B
+ */
+ if (smc->s.sas == SMT_DAS && np == PB &&
+ (smc->y[PA].pc_mode == PM_TREE ||
+ smc->y[PB].pc_mode == PM_TREE)) {
+ SETMASK(PLC(np,PL_CNTRL_A),
+ PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
+ SETMASK(PLC(np,PL_CNTRL_B),
+ PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
+ }
+ SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
+ SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
+ ACTIONS_DONE() ;
+ cmd = 0 ;
+ /* fall thru */
+ case PC6_JOIN :
+ switch (plc->p_state) {
+ case PS_ACTIVE:
+ /*PC88b*/
+ if (!phy->cf_join) {
+ phy->cf_join = TRUE ;
+ queue_event(smc,EVENT_CFM,CF_JOIN+np) ; ;
+ }
+ if (cmd == PC_JOIN)
+ GO_STATE(PC8_ACTIVE) ;
+ /*PC82*/
+ if (cmd == PC_TRACE) {
+ GO_STATE(PC2_TRACE) ;
+ break ;
+ }
+ break ;
+ }
+ break ;
+
+ case PC7_VERIFY :
+ break ;
+
+ case ACTIONS(PC8_ACTIVE) :
+ /*
+ * start LEM for SMT
+ */
+ sm_ph_lem_start(smc,(int)phy->np,LCT_LEM_MAX) ;
+
+ phy->tr_flag = FALSE ;
+ mib->fddiPORTConnectState = PCM_ACTIVE ;
+
+ /* Set the active interrupt mask register */
+ outpw(PLC(np,PL_INTR_MASK),plc_imsk_act) ;
+
+ ACTIONS_DONE() ;
+ break ;
+ case PC8_ACTIVE :
+ /*PC81 is done by PL_TNE_EXPIRED irq */
+ /*PC82*/
+ if (cmd == PC_TRACE) {
+ GO_STATE(PC2_TRACE) ;
+ break ;
+ }
+ /*PC88c: is done by TRACE_PROP irq */
+
+ break ;
+ case ACTIONS(PC9_MAINT) :
+ stop_pcm_timer0(smc,phy) ;
+ CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
+ CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
+ CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; /* disable LEM int. */
+ sm_ph_lem_stop(smc,np) ; /* disable LEM */
+ phy->cf_loop = FALSE ;
+ phy->cf_join = FALSE ;
+ queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
+ plc_go_state(smc,np,PL_PCM_STOP) ;
+ mib->fddiPORTConnectState = PCM_DISABLED ;
+ SETMASK(PLC(np,PL_CNTRL_B),PL_MAINT,PL_MAINT) ;
+ sm_ph_linestate(smc,np,(int) MIB2LS(mib->fddiPORTMaint_LS)) ;
+ outpw(PLC(np,PL_CNTRL_A),PL_SC_BYPASS) ;
+ ACTIONS_DONE() ;
+ break ;
+ case PC9_MAINT :
+ DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ;
+ /*PC90*/
+ if (cmd == PC_ENABLE) {
+ GO_STATE(PC0_OFF) ;
+ break ;
+ }
+ break ;
+
+ default:
+ SMT_PANIC(smc,SMT_E0118, SMT_E0118_MSG) ;
+ break ;
+ }
+}
+
+/*
+ * force line state on a PHY output (only in MAINT state)
+ */
+static void sm_ph_linestate(struct s_smc *smc, int phy, int ls)
+{
+ int cntrl ;
+
+ SK_UNUSED(smc) ;
+
+ cntrl = (inpw(PLC(phy,PL_CNTRL_B)) & ~PL_MAINT_LS) |
+ PL_PCM_STOP | PL_MAINT ;
+ switch(ls) {
+ case PC_QLS: /* Force Quiet */
+ cntrl |= PL_M_QUI0 ;
+ break ;
+ case PC_MLS: /* Force Master */
+ cntrl |= PL_M_MASTR ;
+ break ;
+ case PC_HLS: /* Force Halt */
+ cntrl |= PL_M_HALT ;
+ break ;
+ default :
+ case PC_ILS: /* Force Idle */
+ cntrl |= PL_M_IDLE ;
+ break ;
+ case PC_LS_PDR: /* Enable repeat filter */
+ cntrl |= PL_M_TPDR ;
+ break ;
+ }
+ outpw(PLC(phy,PL_CNTRL_B),cntrl) ;
+}
+
+static void reset_lem_struct(struct s_phy *phy)
+{
+ struct lem_counter *lem = &phy->lem ;
+
+ phy->mib->fddiPORTLer_Estimate = 15 ;
+ lem->lem_float_ber = 15 * 100 ;
+}
+
+/*
+ * link error monitor
+ */
+static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
+{
+ int ber ;
+ u_long errors ;
+ struct lem_counter *lem = &phy->lem ;
+ struct fddi_mib_p *mib ;
+ int cond ;
+
+ mib = phy->mib ;
+
+ if (!lem->lem_on)
+ return ;
+
+ errors = inpw(PLC(((int) phy->np),PL_LINK_ERR_CTR)) ;
+ lem->lem_errors += errors ;
+ mib->fddiPORTLem_Ct += errors ;
+
+ errors = lem->lem_errors ;
+ /*
+ * calculation is called on a intervall of 8 seconds
+ * -> this means, that one error in 8 sec. is one of 8*125*10E6
+ * the same as BER = 10E-9
+ * Please note:
+ * -> 9 errors in 8 seconds mean:
+ * BER = 9 * 10E-9 and this is
+ * < 10E-8, so the limit of 10E-8 is not reached!
+ */
+
+ if (!errors) ber = 15 ;
+ else if (errors <= 9) ber = 9 ;
+ else if (errors <= 99) ber = 8 ;
+ else if (errors <= 999) ber = 7 ;
+ else if (errors <= 9999) ber = 6 ;
+ else if (errors <= 99999) ber = 5 ;
+ else if (errors <= 999999) ber = 4 ;
+ else if (errors <= 9999999) ber = 3 ;
+ else if (errors <= 99999999) ber = 2 ;
+ else if (errors <= 999999999) ber = 1 ;
+ else ber = 0 ;
+
+ /*
+ * weighted average
+ */
+ ber *= 100 ;
+ lem->lem_float_ber = lem->lem_float_ber * 7 + ber * 3 ;
+ lem->lem_float_ber /= 10 ;
+ mib->fddiPORTLer_Estimate = lem->lem_float_ber / 100 ;
+ if (mib->fddiPORTLer_Estimate < 4) {
+ mib->fddiPORTLer_Estimate = 4 ;
+ }
+
+ if (lem->lem_errors) {
+ DB_PCMN(1,"LEM %c :\n",phy->np == PB? 'B' : 'A',0) ;
+ DB_PCMN(1,"errors : %ld\n",lem->lem_errors,0) ;
+ DB_PCMN(1,"sum_errors : %ld\n",mib->fddiPORTLem_Ct,0) ;
+ DB_PCMN(1,"current BER : 10E-%d\n",ber/100,0) ;
+ DB_PCMN(1,"float BER : 10E-(%d/100)\n",lem->lem_float_ber,0) ;
+ DB_PCMN(1,"avg. BER : 10E-%d\n",
+ mib->fddiPORTLer_Estimate,0) ;
+ }
+
+ lem->lem_errors = 0L ;
+
+#ifndef SLIM_SMT
+ cond = (mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Alarm) ?
+ TRUE : FALSE ;
+#ifdef SMT_EXT_CUTOFF
+ smt_ler_alarm_check(smc,phy,cond) ;
+#endif /* nSMT_EXT_CUTOFF */
+ if (cond != mib->fddiPORTLerFlag) {
+ smt_srf_event(smc,SMT_COND_PORT_LER,
+ (int) (INDEX_PORT+ phy->np) ,cond) ;
+ }
+#endif
+
+ if ( mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Cutoff) {
+ phy->pc_lem_fail = TRUE ; /* flag */
+ mib->fddiPORTLem_Reject_Ct++ ;
+ /*
+ * "forgive 10e-2" if we cutoff so we can come
+ * up again ..
+ */
+ lem->lem_float_ber += 2*100 ;
+
+ /*PC81b*/
+#ifdef CONCENTRATOR
+ DB_PCMN(1,"PCM: LER cutoff on port %d cutoff %d\n",
+ phy->np, mib->fddiPORTLer_Cutoff) ;
+#endif
+#ifdef SMT_EXT_CUTOFF
+ smt_port_off_event(smc,phy->np);
+#else /* nSMT_EXT_CUTOFF */
+ queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
+#endif /* nSMT_EXT_CUTOFF */
+ }
+}
+
+/*
+ * called by SMT to calculate LEM bit error rate
+ */
+void sm_lem_evaluate(struct s_smc *smc)
+{
+ int np ;
+
+ for (np = 0 ; np < NUMPHYS ; np++)
+ lem_evaluate(smc,&smc->y[np]) ;
+}
+
+static void lem_check_lct(struct s_smc *smc, struct s_phy *phy)
+{
+ struct lem_counter *lem = &phy->lem ;
+ struct fddi_mib_p *mib ;
+ int errors ;
+
+ mib = phy->mib ;
+
+ phy->pc_lem_fail = FALSE ; /* flag */
+ errors = inpw(PLC(((int)phy->np),PL_LINK_ERR_CTR)) ;
+ lem->lem_errors += errors ;
+ mib->fddiPORTLem_Ct += errors ;
+ if (lem->lem_errors) {
+ switch(phy->lc_test) {
+ case LC_SHORT:
+ if (lem->lem_errors >= smc->s.lct_short)
+ phy->pc_lem_fail = TRUE ;
+ break ;
+ case LC_MEDIUM:
+ if (lem->lem_errors >= smc->s.lct_medium)
+ phy->pc_lem_fail = TRUE ;
+ break ;
+ case LC_LONG:
+ if (lem->lem_errors >= smc->s.lct_long)
+ phy->pc_lem_fail = TRUE ;
+ break ;
+ case LC_EXTENDED:
+ if (lem->lem_errors >= smc->s.lct_extended)
+ phy->pc_lem_fail = TRUE ;
+ break ;
+ }
+ DB_PCMN(1," >>errors : %d\n",lem->lem_errors,0) ;
+ }
+ if (phy->pc_lem_fail) {
+ mib->fddiPORTLCTFail_Ct++ ;
+ mib->fddiPORTLem_Reject_Ct++ ;
+ }
+ else
+ mib->fddiPORTLCTFail_Ct = 0 ;
+}
+
+/*
+ * LEM functions
+ */
+static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold)
+{
+ struct lem_counter *lem = &smc->y[np].lem ;
+
+ lem->lem_on = 1 ;
+ lem->lem_errors = 0L ;
+
+ /* Do NOT reset mib->fddiPORTLer_Estimate here. It is called too
+ * often.
+ */
+
+ outpw(PLC(np,PL_LE_THRESHOLD),threshold) ;
+ (void)inpw(PLC(np,PL_LINK_ERR_CTR)) ; /* clear error counter */
+
+ /* enable LE INT */
+ SETMASK(PLC(np,PL_INTR_MASK),PL_LE_CTR,PL_LE_CTR) ;
+}
+
+static void sm_ph_lem_stop(struct s_smc *smc, int np)
+{
+ struct lem_counter *lem = &smc->y[np].lem ;
+
+ lem->lem_on = 0 ;
+ CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ;
+}
+
+/* ARGSUSED */
+void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off)
+/* int on_off; en- or disable ident. ls */
+{
+ SK_UNUSED(smc) ;
+
+ phy = phy ; on_off = on_off ;
+}
+
+
+/*
+ * PCM pseudo code
+ * receive actions are called AFTER the bit n is received,
+ * i.e. if pc_rcode_actions(5) is called, bit 6 is the next bit to be received
+ */
+
+/*
+ * PCM pseudo code 5.1 .. 6.1
+ */
+static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
+{
+ struct fddi_mib_p *mib ;
+
+ mib = phy->mib ;
+
+ DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ;
+ bit++ ;
+
+ switch(bit) {
+ case 0:
+ case 1:
+ case 2:
+ break ;
+ case 3 :
+ if (phy->r_val[1] == 0 && phy->r_val[2] == 0)
+ mib->fddiPORTNeighborType = TA ;
+ else if (phy->r_val[1] == 0 && phy->r_val[2] == 1)
+ mib->fddiPORTNeighborType = TB ;
+ else if (phy->r_val[1] == 1 && phy->r_val[2] == 0)
+ mib->fddiPORTNeighborType = TS ;
+ else if (phy->r_val[1] == 1 && phy->r_val[2] == 1)
+ mib->fddiPORTNeighborType = TM ;
+ break ;
+ case 4:
+ if (mib->fddiPORTMy_Type == TM &&
+ mib->fddiPORTNeighborType == TM) {
+ DB_PCMN(1,"PCM %c : E100 withhold M-M\n",
+ phy->phy_name,0) ;
+ mib->fddiPORTPC_Withhold = PC_WH_M_M ;
+ RS_SET(smc,RS_EVENT) ;
+ }
+ else if (phy->t_val[3] || phy->r_val[3]) {
+ mib->fddiPORTPC_Withhold = PC_WH_NONE ;
+ if (mib->fddiPORTMy_Type == TM ||
+ mib->fddiPORTNeighborType == TM)
+ phy->pc_mode = PM_TREE ;
+ else
+ phy->pc_mode = PM_PEER ;
+
+ /* reevaluate the selection criteria (wc_flag) */
+ all_selection_criteria (smc);
+
+ if (phy->wc_flag) {
+ mib->fddiPORTPC_Withhold = PC_WH_PATH ;
+ }
+ }
+ else {
+ mib->fddiPORTPC_Withhold = PC_WH_OTHER ;
+ RS_SET(smc,RS_EVENT) ;
+ DB_PCMN(1,"PCM %c : E101 withhold other\n",
+ phy->phy_name,0) ;
+ }
+ phy->twisted = ((mib->fddiPORTMy_Type != TS) &&
+ (mib->fddiPORTMy_Type != TM) &&
+ (mib->fddiPORTNeighborType ==
+ mib->fddiPORTMy_Type)) ;
+ if (phy->twisted) {
+ DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n",
+ phy->phy_name,0) ;
+ }
+ break ;
+ case 5 :
+ break ;
+ case 6:
+ if (phy->t_val[4] || phy->r_val[4]) {
+ if ((phy->t_val[4] && phy->t_val[5]) ||
+ (phy->r_val[4] && phy->r_val[5]) )
+ phy->lc_test = LC_EXTENDED ;
+ else
+ phy->lc_test = LC_LONG ;
+ }
+ else if (phy->t_val[5] || phy->r_val[5])
+ phy->lc_test = LC_MEDIUM ;
+ else
+ phy->lc_test = LC_SHORT ;
+ switch (phy->lc_test) {
+ case LC_SHORT : /* 50ms */
+ outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LENGTH ) ;
+ phy->t_next[7] = smc->s.pcm_lc_short ;
+ break ;
+ case LC_MEDIUM : /* 500ms */
+ outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LONGLN ) ;
+ phy->t_next[7] = smc->s.pcm_lc_medium ;
+ break ;
+ case LC_LONG :
+ SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
+ phy->t_next[7] = smc->s.pcm_lc_long ;
+ break ;
+ case LC_EXTENDED :
+ SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
+ phy->t_next[7] = smc->s.pcm_lc_extended ;
+ break ;
+ }
+ if (phy->t_next[7] > smc->s.pcm_lc_medium) {
+ start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy);
+ }
+ DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ;
+ phy->t_next[9] = smc->s.pcm_t_next_9 ;
+ break ;
+ case 7:
+ if (phy->t_val[6]) {
+ phy->cf_loop = TRUE ;
+ }
+ phy->td_flag = TRUE ;
+ break ;
+ case 8:
+ if (phy->t_val[7] || phy->r_val[7]) {
+ DB_PCMN(1,"PCM %c : E103 LCT fail %s\n",
+ phy->phy_name,phy->t_val[7]? "local":"remote") ;
+ queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
+ }
+ break ;
+ case 9:
+ if (phy->t_val[8] || phy->r_val[8]) {
+ if (phy->t_val[8])
+ phy->cf_loop = TRUE ;
+ phy->td_flag = TRUE ;
+ }
+ break ;
+ case 10:
+ if (phy->r_val[9]) {
+ /* neighbor intends to have MAC on output */ ;
+ mib->fddiPORTMacIndicated.R_val = TRUE ;
+ }
+ else {
+ /* neighbor does not intend to have MAC on output */ ;
+ mib->fddiPORTMacIndicated.R_val = FALSE ;
+ }
+ break ;
+ }
+}
+
+/*
+ * PCM pseudo code 5.1 .. 6.1
+ */
+static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy)
+{
+ int np = phy->np ;
+ struct fddi_mib_p *mib ;
+
+ mib = phy->mib ;
+
+ switch(bit) {
+ case 0:
+ phy->t_val[0] = 0 ; /* no escape used */
+ break ;
+ case 1:
+ if (mib->fddiPORTMy_Type == TS || mib->fddiPORTMy_Type == TM)
+ phy->t_val[1] = 1 ;
+ else
+ phy->t_val[1] = 0 ;
+ break ;
+ case 2 :
+ if (mib->fddiPORTMy_Type == TB || mib->fddiPORTMy_Type == TM)
+ phy->t_val[2] = 1 ;
+ else
+ phy->t_val[2] = 0 ;
+ break ;
+ case 3:
+ {
+ int type,ne ;
+ int policy ;
+
+ type = mib->fddiPORTMy_Type ;
+ ne = mib->fddiPORTNeighborType ;
+ policy = smc->mib.fddiSMTConnectionPolicy ;
+
+ phy->t_val[3] = 1 ; /* Accept connection */
+ switch (type) {
+ case TA :
+ if (
+ ((policy & POLICY_AA) && ne == TA) ||
+ ((policy & POLICY_AB) && ne == TB) ||
+ ((policy & POLICY_AS) && ne == TS) ||
+ ((policy & POLICY_AM) && ne == TM) )
+ phy->t_val[3] = 0 ; /* Reject */
+ break ;
+ case TB :
+ if (
+ ((policy & POLICY_BA) && ne == TA) ||
+ ((policy & POLICY_BB) && ne == TB) ||
+ ((policy & POLICY_BS) && ne == TS) ||
+ ((policy & POLICY_BM) && ne == TM) )
+ phy->t_val[3] = 0 ; /* Reject */
+ break ;
+ case TS :
+ if (
+ ((policy & POLICY_SA) && ne == TA) ||
+ ((policy & POLICY_SB) && ne == TB) ||
+ ((policy & POLICY_SS) && ne == TS) ||
+ ((policy & POLICY_SM) && ne == TM) )
+ phy->t_val[3] = 0 ; /* Reject */
+ break ;
+ case TM :
+ if ( ne == TM ||
+ ((policy & POLICY_MA) && ne == TA) ||
+ ((policy & POLICY_MB) && ne == TB) ||
+ ((policy & POLICY_MS) && ne == TS) ||
+ ((policy & POLICY_MM) && ne == TM) )
+ phy->t_val[3] = 0 ; /* Reject */
+ break ;
+ }
+#ifndef SLIM_SMT
+ /*
+ * detect undesirable connection attempt event
+ */
+ if ( (type == TA && ne == TA ) ||
+ (type == TA && ne == TS ) ||
+ (type == TB && ne == TB ) ||
+ (type == TB && ne == TS ) ||
+ (type == TS && ne == TA ) ||
+ (type == TS && ne == TB ) ) {
+ smt_srf_event(smc,SMT_EVENT_PORT_CONNECTION,
+ (int) (INDEX_PORT+ phy->np) ,0) ;
+ }
+#endif
+ }
+ break ;
+ case 4:
+ if (mib->fddiPORTPC_Withhold == PC_WH_NONE) {
+ if (phy->pc_lem_fail) {
+ phy->t_val[4] = 1 ; /* long */
+ phy->t_val[5] = 0 ;
+ }
+ else {
+ phy->t_val[4] = 0 ;
+ if (mib->fddiPORTLCTFail_Ct > 0)
+ phy->t_val[5] = 1 ; /* medium */
+ else
+ phy->t_val[5] = 0 ; /* short */
+
+ /*
+ * Implementers choice: use medium
+ * instead of short when undesired
+ * connection attempt is made.
+ */
+ if (phy->wc_flag)
+ phy->t_val[5] = 1 ; /* medium */
+ }
+ mib->fddiPORTConnectState = PCM_CONNECTING ;
+ }
+ else {
+ mib->fddiPORTConnectState = PCM_STANDBY ;
+ phy->t_val[4] = 1 ; /* extended */
+ phy->t_val[5] = 1 ;
+ }
+ break ;
+ case 5:
+ break ;
+ case 6:
+ /* we do NOT have a MAC for LCT */
+ phy->t_val[6] = 0 ;
+ break ;
+ case 7:
+ phy->cf_loop = FALSE ;
+ lem_check_lct(smc,phy) ;
+ if (phy->pc_lem_fail) {
+ DB_PCMN(1,"PCM %c : E104 LCT failed\n",
+ phy->phy_name,0) ;
+ phy->t_val[7] = 1 ;
+ }
+ else
+ phy->t_val[7] = 0 ;
+ break ;
+ case 8:
+ phy->t_val[8] = 0 ; /* Don't request MAC loopback */
+ break ;
+ case 9:
+ phy->cf_loop = 0 ;
+ if ((mib->fddiPORTPC_Withhold != PC_WH_NONE) ||
+ ((smc->s.sas == SMT_DAS) && (phy->wc_flag))) {
+ queue_event(smc,EVENT_PCM+np,PC_START) ;
+ break ;
+ }
+ phy->t_val[9] = FALSE ;
+ switch (smc->s.sas) {
+ case SMT_DAS :
+ /*
+ * MAC intended on output
+ */
+ if (phy->pc_mode == PM_TREE) {
+ if ((np == PB) || ((np == PA) &&
+ (smc->y[PB].mib->fddiPORTConnectState !=
+ PCM_ACTIVE)))
+ phy->t_val[9] = TRUE ;
+ }
+ else {
+ if (np == PB)
+ phy->t_val[9] = TRUE ;
+ }
+ break ;
+ case SMT_SAS :
+ if (np == PS)
+ phy->t_val[9] = TRUE ;
+ break ;
+#ifdef CONCENTRATOR
+ case SMT_NAC :
+ /*
+ * MAC intended on output
+ */
+ if (np == PB)
+ phy->t_val[9] = TRUE ;
+ break ;
+#endif
+ }
+ mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
+ break ;
+ }
+ DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ;
+}
+
+/*
+ * return status twisted (called by SMT)
+ */
+int pcm_status_twisted(struct s_smc *smc)
+{
+ int twist = 0 ;
+ if (smc->s.sas != SMT_DAS)
+ return(0) ;
+ if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE))
+ twist |= 1 ;
+ if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE))
+ twist |= 2 ;
+ return(twist) ;
+}
+
+/*
+ * return status (called by SMT)
+ * type
+ * state
+ * remote phy type
+ * remote mac yes/no
+ */
+void pcm_status_state(struct s_smc *smc, int np, int *type, int *state,
+ int *remote, int *mac)
+{
+ struct s_phy *phy = &smc->y[np] ;
+ struct fddi_mib_p *mib ;
+
+ mib = phy->mib ;
+
+ /* remote PHY type and MAC - set only if active */
+ *mac = 0 ;
+ *type = mib->fddiPORTMy_Type ; /* our PHY type */
+ *state = mib->fddiPORTConnectState ;
+ *remote = mib->fddiPORTNeighborType ;
+
+ switch(mib->fddiPORTPCMState) {
+ case PC8_ACTIVE :
+ *mac = mib->fddiPORTMacIndicated.R_val ;
+ break ;
+ }
+}
+
+/*
+ * return rooted station status (called by SMT)
+ */
+int pcm_rooted_station(struct s_smc *smc)
+{
+ int n ;
+
+ for (n = 0 ; n < NUMPHYS ; n++) {
+ if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE &&
+ smc->y[n].mib->fddiPORTNeighborType == TM)
+ return(0) ;
+ }
+ return(1) ;
+}
+
+/*
+ * Interrupt actions for PLC & PCM events
+ */
+void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
+/* int np; PHY index */
+{
+ struct s_phy *phy = &smc->y[np] ;
+ struct s_plc *plc = &phy->plc ;
+ int n ;
+#ifdef SUPERNET_3
+ int corr_mask ;
+#endif /* SUPERNET_3 */
+ int i ;
+
+ if (np >= smc->s.numphys) {
+ plc->soft_err++ ;
+ return ;
+ }
+ if (cmd & PL_EBUF_ERR) { /* elastic buff. det. over-|underflow*/
+ /*
+ * Check whether the SRF Condition occurred.
+ */
+ if (!plc->ebuf_cont && phy->mib->fddiPORTPCMState == PC8_ACTIVE){
+ /*
+ * This is the real Elasticity Error.
+ * More than one in a row are treated as a
+ * single one.
+ * Only count this in the active state.
+ */
+ phy->mib->fddiPORTEBError_Ct ++ ;
+
+ }
+
+ plc->ebuf_err++ ;
+ if (plc->ebuf_cont <= 1000) {
+ /*
+ * Prevent counter from being wrapped after
+ * hanging years in that interrupt.
+ */
+ plc->ebuf_cont++ ; /* Ebuf continous error */
+ }
+
+#ifdef SUPERNET_3
+ if (plc->ebuf_cont == 1000 &&
+ ((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) ==
+ PLC_REV_SN3)) {
+ /*
+ * This interrupt remeained high for at least
+ * 1000 consecutive interrupt calls.
+ *
+ * This is caused by a hardware error of the
+ * ORION part of the Supernet III chipset.
+ *
+ * Disable this bit from the mask.
+ */
+ corr_mask = (plc_imsk_na & ~PL_EBUF_ERR) ;
+ outpw(PLC(np,PL_INTR_MASK),corr_mask);
+
+ /*
+ * Disconnect from the ring.
+ * Call the driver with the reset indication.
+ */
+ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
+
+ /*
+ * Make an error log entry.
+ */
+ SMT_ERR_LOG(smc,SMT_E0136, SMT_E0136_MSG) ;
+
+ /*
+ * Indicate the Reset.
+ */
+ drv_reset_indication(smc) ;
+ }
+#endif /* SUPERNET_3 */
+ } else {
+ /* Reset the continous error variable */
+ plc->ebuf_cont = 0 ; /* reset Ebuf continous error */
+ }
+ if (cmd & PL_PHYINV) { /* physical layer invalid signal */
+ plc->phyinv++ ;
+ }
+ if (cmd & PL_VSYM_CTR) { /* violation symbol counter has incr.*/
+ plc->vsym_ctr++ ;
+ }
+ if (cmd & PL_MINI_CTR) { /* dep. on PLC_CNTRL_A's MINI_CTR_INT*/
+ plc->mini_ctr++ ;
+ }
+ if (cmd & PL_LE_CTR) { /* link error event counter */
+ int j ;
+
+ /*
+ * note: PL_LINK_ERR_CTR MUST be read to clear it
+ */
+ j = inpw(PLC(np,PL_LE_THRESHOLD)) ;
+ i = inpw(PLC(np,PL_LINK_ERR_CTR)) ;
+
+ if (i < j) {
+ /* wrapped around */
+ i += 256 ;
+ }
+
+ if (phy->lem.lem_on) {
+ /* Note: Lem errors shall only be counted when
+ * link is ACTIVE or LCT is active.
+ */
+ phy->lem.lem_errors += i ;
+ phy->mib->fddiPORTLem_Ct += i ;
+ }
+ }
+ if (cmd & PL_TPC_EXPIRED) { /* TPC timer reached zero */
+ if (plc->p_state == PS_LCT) {
+ /*
+ * end of LCT
+ */
+ ;
+ }
+ plc->tpc_exp++ ;
+ }
+ if (cmd & PL_LS_MATCH) { /* LS == LS in PLC_CNTRL_B's MATCH_LS*/
+ switch (inpw(PLC(np,PL_CNTRL_B)) & PL_MATCH_LS) {
+ case PL_I_IDLE : phy->curr_ls = PC_ILS ; break ;
+ case PL_I_HALT : phy->curr_ls = PC_HLS ; break ;
+ case PL_I_MASTR : phy->curr_ls = PC_MLS ; break ;
+ case PL_I_QUIET : phy->curr_ls = PC_QLS ; break ;
+ }
+ }
+ if (cmd & PL_PCM_BREAK) { /* PCM has entered the BREAK state */
+ int reason;
+
+ reason = inpw(PLC(np,PL_STATUS_B)) & PL_BREAK_REASON ;
+
+ switch (reason) {
+ case PL_B_PCS : plc->b_pcs++ ; break ;
+ case PL_B_TPC : plc->b_tpc++ ; break ;
+ case PL_B_TNE : plc->b_tne++ ; break ;
+ case PL_B_QLS : plc->b_qls++ ; break ;
+ case PL_B_ILS : plc->b_ils++ ; break ;
+ case PL_B_HLS : plc->b_hls++ ; break ;
+ }
+
+ /*jd 05-Aug-1999 changed: Bug #10419 */
+ DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag);
+ if (smc->e.DisconnectFlag == FALSE) {
+ DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason);
+ queue_event(smc,EVENT_PCM+np,PC_START) ;
+ }
+ else {
+ DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason);
+ }
+ return ;
+ }
+ /*
+ * If both CODE & ENABLE are set ignore enable
+ */
+ if (cmd & PL_PCM_CODE) { /* receive last sign.-bit | LCT complete */
+ queue_event(smc,EVENT_PCM+np,PC_SIGNAL) ;
+ n = inpw(PLC(np,PL_RCV_VECTOR)) ;
+ for (i = 0 ; i < plc->p_bits ; i++) {
+ phy->r_val[plc->p_start+i] = n & 1 ;
+ n >>= 1 ;
+ }
+ }
+ else if (cmd & PL_PCM_ENABLED) { /* asserted SC_JOIN, scrub.completed*/
+ queue_event(smc,EVENT_PCM+np,PC_JOIN) ;
+ }
+ if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */
+ /*PC22b*/
+ if (!phy->tr_flag) {
+ DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n",
+ np,smc->mib.fddiSMTECMState) ;
+ phy->tr_flag = TRUE ;
+ smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ;
+ queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
+ }
+ }
+ /*
+ * filter PLC glitch ???
+ * QLS || HLS only while in PC2_TRACE state
+ */
+ if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) {
+ /*PC22a*/
+ if (smc->e.path_test == PT_PASSED) {
+ DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np),
+ phy->mib->fddiPORTPCMState) ;
+
+ smc->e.path_test = PT_PENDING ;
+ queue_event(smc,EVENT_ECM,EC_PATH_TEST) ;
+ }
+ }
+ if (cmd & PL_TNE_EXPIRED) { /* TNE: length of noise events */
+ /* break_required (TNE > NS_Max) */
+ if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) {
+ if (!phy->tr_flag) {
+ DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE");
+ queue_event(smc,EVENT_PCM+np,PC_START) ;
+ return ;
+ }
+ }
+ }
+#if 0
+ if (cmd & PL_NP_ERR) { /* NP has requested to r/w an inv reg*/
+ /*
+ * It's a bug by AMD
+ */
+ plc->np_err++ ;
+ }
+ /* pin inactiv (GND) */
+ if (cmd & PL_PARITY_ERR) { /* p. error dedected on TX9-0 inp */
+ plc->parity_err++ ;
+ }
+ if (cmd & PL_LSDO) { /* carrier detected */
+ ;
+ }
+#endif
+}
+
+void pcm_set_lct_short(struct s_smc *smc, int n)
+{
+ if (n <= 0 || n > 1000)
+ return ;
+ smc->s.lct_short = n ;
+}
+
+#ifdef DEBUG
+/*
+ * fill state struct
+ */
+void pcm_get_state(struct s_smc *smc, struct smt_state *state)
+{
+ struct s_phy *phy ;
+ struct pcm_state *pcs ;
+ int i ;
+ int ii ;
+ short rbits ;
+ short tbits ;
+ struct fddi_mib_p *mib ;
+
+ for (i = 0, phy = smc->y, pcs = state->pcm_state ; i < NUMPHYS ;
+ i++ , phy++, pcs++ ) {
+ mib = phy->mib ;
+ pcs->pcm_type = (u_char) mib->fddiPORTMy_Type ;
+ pcs->pcm_state = (u_char) mib->fddiPORTPCMState ;
+ pcs->pcm_mode = phy->pc_mode ;
+ pcs->pcm_neighbor = (u_char) mib->fddiPORTNeighborType ;
+ pcs->pcm_bsf = mib->fddiPORTBS_Flag ;
+ pcs->pcm_lsf = phy->ls_flag ;
+ pcs->pcm_lct_fail = (u_char) mib->fddiPORTLCTFail_Ct ;
+ pcs->pcm_ls_rx = LS2MIB(sm_pm_get_ls(smc,i)) ;
+ for (ii = 0, rbits = tbits = 0 ; ii < NUMBITS ; ii++) {
+ rbits <<= 1 ;
+ tbits <<= 1 ;
+ if (phy->r_val[NUMBITS-1-ii])
+ rbits |= 1 ;
+ if (phy->t_val[NUMBITS-1-ii])
+ tbits |= 1 ;
+ }
+ pcs->pcm_r_val = rbits ;
+ pcs->pcm_t_val = tbits ;
+ }
+}
+
+int get_pcm_state(struct s_smc *smc, int np)
+{
+ int pcs ;
+
+ SK_UNUSED(smc) ;
+
+ switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
+ case PL_PC0 : pcs = PC_STOP ; break ;
+ case PL_PC1 : pcs = PC_START ; break ;
+ case PL_PC2 : pcs = PC_TRACE ; break ;
+ case PL_PC3 : pcs = PC_SIGNAL ; break ;
+ case PL_PC4 : pcs = PC_SIGNAL ; break ;
+ case PL_PC5 : pcs = PC_SIGNAL ; break ;
+ case PL_PC6 : pcs = PC_JOIN ; break ;
+ case PL_PC7 : pcs = PC_JOIN ; break ;
+ case PL_PC8 : pcs = PC_ENABLE ; break ;
+ case PL_PC9 : pcs = PC_MAINT ; break ;
+ default : pcs = PC_DISABLE ; break ;
+ }
+ return(pcs) ;
+}
+
+char *get_linestate(struct s_smc *smc, int np)
+{
+ char *ls = "" ;
+
+ SK_UNUSED(smc) ;
+
+ switch (inpw(PLC(np,PL_STATUS_A)) & PL_LINE_ST) {
+ case PL_L_NLS : ls = "NOISE" ; break ;
+ case PL_L_ALS : ls = "ACTIV" ; break ;
+ case PL_L_UND : ls = "UNDEF" ; break ;
+ case PL_L_ILS4: ls = "ILS 4" ; break ;
+ case PL_L_QLS : ls = "QLS" ; break ;
+ case PL_L_MLS : ls = "MLS" ; break ;
+ case PL_L_HLS : ls = "HLS" ; break ;
+ case PL_L_ILS16:ls = "ILS16" ; break ;
+#ifdef lint
+ default: ls = "unknown" ; break ;
+#endif
+ }
+ return(ls) ;
+}
+
+char *get_pcmstate(struct s_smc *smc, int np)
+{
+ char *pcs ;
+
+ SK_UNUSED(smc) ;
+
+ switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
+ case PL_PC0 : pcs = "OFF" ; break ;
+ case PL_PC1 : pcs = "BREAK" ; break ;
+ case PL_PC2 : pcs = "TRACE" ; break ;
+ case PL_PC3 : pcs = "CONNECT"; break ;
+ case PL_PC4 : pcs = "NEXT" ; break ;
+ case PL_PC5 : pcs = "SIGNAL" ; break ;
+ case PL_PC6 : pcs = "JOIN" ; break ;
+ case PL_PC7 : pcs = "VERIFY" ; break ;
+ case PL_PC8 : pcs = "ACTIV" ; break ;
+ case PL_PC9 : pcs = "MAINT" ; break ;
+ default : pcs = "UNKNOWN" ; break ;
+ }
+ return(pcs) ;
+}
+
+void list_phy(struct s_smc *smc)
+{
+ struct s_plc *plc ;
+ int np ;
+
+ for (np = 0 ; np < NUMPHYS ; np++) {
+ plc = &smc->y[np].plc ;
+ printf("PHY %d:\tERRORS\t\t\tBREAK_REASONS\t\tSTATES:\n",np) ;
+ printf("\tsoft_error: %ld \t\tPC_Start : %ld\n",
+ plc->soft_err,plc->b_pcs);
+ printf("\tparity_err: %ld \t\tTPC exp. : %ld\t\tLine: %s\n",
+ plc->parity_err,plc->b_tpc,get_linestate(smc,np)) ;
+ printf("\tebuf_error: %ld \t\tTNE exp. : %ld\n",
+ plc->ebuf_err,plc->b_tne) ;
+ printf("\tphyinvalid: %ld \t\tQLS det. : %ld\t\tPCM : %s\n",
+ plc->phyinv,plc->b_qls,get_pcmstate(smc,np)) ;
+ printf("\tviosym_ctr: %ld \t\tILS det. : %ld\n",
+ plc->vsym_ctr,plc->b_ils) ;
+ printf("\tmingap_ctr: %ld \t\tHLS det. : %ld\n",
+ plc->mini_ctr,plc->b_hls) ;
+ printf("\tnodepr_err: %ld\n",plc->np_err) ;
+ printf("\tTPC_exp : %ld\n",plc->tpc_exp) ;
+ printf("\tLEM_err : %ld\n",smc->y[np].lem.lem_errors) ;
+ }
+}
+
+
+#ifdef CONCENTRATOR
+void pcm_lem_dump(struct s_smc *smc)
+{
+ int i ;
+ struct s_phy *phy ;
+ struct fddi_mib_p *mib ;
+
+ char *entostring() ;
+
+ printf("PHY errors BER\n") ;
+ printf("----------------------\n") ;
+ for (i = 0,phy = smc->y ; i < NUMPHYS ; i++,phy++) {
+ if (!plc_is_installed(smc,i))
+ continue ;
+ mib = phy->mib ;
+ printf("%s\t%ld\t10E-%d\n",
+ entostring(smc,ENTITY_PHY(i)),
+ mib->fddiPORTLem_Ct,
+ mib->fddiPORTLer_Estimate) ;
+ }
+}
+#endif
+#endif
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
new file mode 100644
index 000000000000..f2b446d8b0bf
--- /dev/null
+++ b/drivers/net/skfp/pmf.c
@@ -0,0 +1,1671 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ Parameter Management Frame processing for SMT 7.2
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/smt_p.h"
+
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef SLIM_SMT
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)pmf.c 1.37 97/08/04 (C) SK " ;
+#endif
+
+static int smt_authorize(struct s_smc *smc, struct smt_header *sm);
+static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm);
+static const struct s_p_tab* smt_get_ptab(u_short para);
+static int smt_mib_phys(struct s_smc *smc);
+int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index, int local,
+ int set);
+void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
+ int index, int local);
+static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
+ int set, int local);
+
+#define MOFFSS(e) ((int)&(((struct fddi_mib *)0)->e))
+#define MOFFSA(e) ((int) (((struct fddi_mib *)0)->e))
+
+#define MOFFMS(e) ((int)&(((struct fddi_mib_m *)0)->e))
+#define MOFFMA(e) ((int) (((struct fddi_mib_m *)0)->e))
+
+#define MOFFAS(e) ((int)&(((struct fddi_mib_a *)0)->e))
+#define MOFFAA(e) ((int) (((struct fddi_mib_a *)0)->e))
+
+#define MOFFPS(e) ((int)&(((struct fddi_mib_p *)0)->e))
+#define MOFFPA(e) ((int) (((struct fddi_mib_p *)0)->e))
+
+
+#define AC_G 0x01 /* Get */
+#define AC_GR 0x02 /* Get/Set */
+#define AC_S 0x04 /* Set */
+#define AC_NA 0x08
+#define AC_GROUP 0x10 /* Group */
+#define MS2BCLK(x) ((x)*12500L)
+/*
+ F LFag (byte)
+ B byte
+ S u_short 16 bit
+ C Counter 32 bit
+ L Long 32 bit
+ T Timer_2 32 bit
+ P TimeStamp ;
+ A LongAddress (6 byte)
+ E Enum 16 bit
+ R ResId 16 Bit
+*/
+static const struct s_p_tab {
+ u_short p_num ; /* parameter code */
+ u_char p_access ; /* access rights */
+ u_short p_offset ; /* offset in mib */
+ char p_swap[3] ; /* format string */
+} p_tab[] = {
+ /* StationIdGrp */
+ { SMT_P100A,AC_GROUP } ,
+ { SMT_P100B,AC_G, MOFFSS(fddiSMTStationId), "8" } ,
+ { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } ,
+ { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } ,
+ { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } ,
+ { SMT_P1010,AC_G, MOFFSA(fddiSMTManufacturerData), "D" } ,
+ { SMT_P1011,AC_GR, MOFFSA(fddiSMTUserData), "D" } ,
+ { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } ,
+
+ /* StationConfigGrp */
+ { SMT_P1014,AC_GROUP } ,
+ { SMT_P1015,AC_G, MOFFSS(fddiSMTMac_Ct), "B" } ,
+ { SMT_P1016,AC_G, MOFFSS(fddiSMTNonMaster_Ct), "B" } ,
+ { SMT_P1017,AC_G, MOFFSS(fddiSMTMaster_Ct), "B" } ,
+ { SMT_P1018,AC_G, MOFFSS(fddiSMTAvailablePaths), "B" } ,
+ { SMT_P1019,AC_G, MOFFSS(fddiSMTConfigCapabilities),"S" } ,
+ { SMT_P101A,AC_GR, MOFFSS(fddiSMTConfigPolicy), "wS" } ,
+ { SMT_P101B,AC_GR, MOFFSS(fddiSMTConnectionPolicy),"wS" } ,
+ { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } ,
+ { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } ,
+ { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } ,
+ { SMT_P1020,AC_G, MOFFSA(fddiSMTPORTIndexes), "II" } ,
+ { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } ,
+ { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } ,
+
+ /* StatusGrp */
+ { SMT_P1028,AC_GROUP } ,
+ { SMT_P1029,AC_G, MOFFSS(fddiSMTECMState), "E" } ,
+ { SMT_P102A,AC_G, MOFFSS(fddiSMTCF_State), "E" } ,
+ { SMT_P102C,AC_G, MOFFSS(fddiSMTRemoteDisconnectFlag),"F" } ,
+ { SMT_P102D,AC_G, MOFFSS(fddiSMTStationStatus), "E" } ,
+ { SMT_P102E,AC_G, MOFFSS(fddiSMTPeerWrapFlag), "F" } ,
+
+ /* MIBOperationGrp */
+ { SMT_P1032,AC_GROUP } ,
+ { SMT_P1033,AC_G, MOFFSA(fddiSMTTimeStamp),"P" } ,
+ { SMT_P1034,AC_G, MOFFSA(fddiSMTTransitionTimeStamp),"P" } ,
+ /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */
+ { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } ,
+ { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } ,
+
+ { SMT_P103C,AC_S, 0, "wS" } ,
+
+ /*
+ * PRIVATE EXTENSIONS
+ * only accessible locally to get/set passwd
+ */
+ { SMT_P10F0,AC_GR, MOFFSA(fddiPRPMFPasswd), "8" } ,
+ { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } ,
+#ifdef ESS
+ { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } ,
+ { SMT_P10F3,AC_GR, MOFFSS(fddiESSOverhead), "lL" } ,
+ { SMT_P10F4,AC_GR, MOFFSS(fddiESSMaxTNeg), "lL" } ,
+ { SMT_P10F5,AC_GR, MOFFSS(fddiESSMinSegmentSize), "lL" } ,
+ { SMT_P10F6,AC_GR, MOFFSS(fddiESSCategory), "lL" } ,
+ { SMT_P10F7,AC_GR, MOFFSS(fddiESSSynchTxMode), "wS" } ,
+#endif
+#ifdef SBA
+ { SMT_P10F8,AC_GR, MOFFSS(fddiSBACommand), "bF" } ,
+ { SMT_P10F9,AC_GR, MOFFSS(fddiSBAAvailable), "bF" } ,
+#endif
+ /* MAC Attributes */
+ { SMT_P200A,AC_GROUP } ,
+ { SMT_P200B,AC_G, MOFFMS(fddiMACFrameStatusFunctions),"S" } ,
+ { SMT_P200D,AC_G, MOFFMS(fddiMACT_MaxCapabilitiy),"T" } ,
+ { SMT_P200E,AC_G, MOFFMS(fddiMACTVXCapabilitiy),"T" } ,
+
+ /* ConfigGrp */
+ { SMT_P2014,AC_GROUP } ,
+ { SMT_P2016,AC_G, MOFFMS(fddiMACAvailablePaths), "B" } ,
+ { SMT_P2017,AC_G, MOFFMS(fddiMACCurrentPath), "S" } ,
+ { SMT_P2018,AC_G, MOFFMS(fddiMACUpstreamNbr), "A" } ,
+ { SMT_P2019,AC_G, MOFFMS(fddiMACDownstreamNbr), "A" } ,
+ { SMT_P201A,AC_G, MOFFMS(fddiMACOldUpstreamNbr), "A" } ,
+ { SMT_P201B,AC_G, MOFFMS(fddiMACOldDownstreamNbr),"A" } ,
+ { SMT_P201D,AC_G, MOFFMS(fddiMACDupAddressTest), "E" } ,
+ { SMT_P2020,AC_GR, MOFFMS(fddiMACRequestedPaths), "wS" } ,
+ { SMT_P2021,AC_G, MOFFMS(fddiMACDownstreamPORTType),"E" } ,
+ { SMT_P2022,AC_G, MOFFMS(fddiMACIndex), "S" } ,
+
+ /* AddressGrp */
+ { SMT_P2028,AC_GROUP } ,
+ { SMT_P2029,AC_G, MOFFMS(fddiMACSMTAddress), "A" } ,
+
+ /* OperationGrp */
+ { SMT_P2032,AC_GROUP } ,
+ { SMT_P2033,AC_G, MOFFMS(fddiMACT_Req), "T" } ,
+ { SMT_P2034,AC_G, MOFFMS(fddiMACT_Neg), "T" } ,
+ { SMT_P2035,AC_G, MOFFMS(fddiMACT_Max), "T" } ,
+ { SMT_P2036,AC_G, MOFFMS(fddiMACTvxValue), "T" } ,
+ { SMT_P2038,AC_G, MOFFMS(fddiMACT_Pri0), "T" } ,
+ { SMT_P2039,AC_G, MOFFMS(fddiMACT_Pri1), "T" } ,
+ { SMT_P203A,AC_G, MOFFMS(fddiMACT_Pri2), "T" } ,
+ { SMT_P203B,AC_G, MOFFMS(fddiMACT_Pri3), "T" } ,
+ { SMT_P203C,AC_G, MOFFMS(fddiMACT_Pri4), "T" } ,
+ { SMT_P203D,AC_G, MOFFMS(fddiMACT_Pri5), "T" } ,
+ { SMT_P203E,AC_G, MOFFMS(fddiMACT_Pri6), "T" } ,
+
+
+ /* CountersGrp */
+ { SMT_P2046,AC_GROUP } ,
+ { SMT_P2047,AC_G, MOFFMS(fddiMACFrame_Ct), "C" } ,
+ { SMT_P2048,AC_G, MOFFMS(fddiMACCopied_Ct), "C" } ,
+ { SMT_P2049,AC_G, MOFFMS(fddiMACTransmit_Ct), "C" } ,
+ { SMT_P204A,AC_G, MOFFMS(fddiMACToken_Ct), "C" } ,
+ { SMT_P2051,AC_G, MOFFMS(fddiMACError_Ct), "C" } ,
+ { SMT_P2052,AC_G, MOFFMS(fddiMACLost_Ct), "C" } ,
+ { SMT_P2053,AC_G, MOFFMS(fddiMACTvxExpired_Ct), "C" } ,
+ { SMT_P2054,AC_G, MOFFMS(fddiMACNotCopied_Ct), "C" } ,
+ { SMT_P2056,AC_G, MOFFMS(fddiMACRingOp_Ct), "C" } ,
+
+ /* FrameErrorConditionGrp */
+ { SMT_P205A,AC_GROUP } ,
+ { SMT_P205F,AC_GR, MOFFMS(fddiMACFrameErrorThreshold),"wS" } ,
+ { SMT_P2060,AC_G, MOFFMS(fddiMACFrameErrorRatio), "S" } ,
+
+ /* NotCopiedConditionGrp */
+ { SMT_P2064,AC_GROUP } ,
+ { SMT_P2067,AC_GR, MOFFMS(fddiMACNotCopiedThreshold),"wS" } ,
+ { SMT_P2069,AC_G, MOFFMS(fddiMACNotCopiedRatio), "S" } ,
+
+ /* StatusGrp */
+ { SMT_P206E,AC_GROUP } ,
+ { SMT_P206F,AC_G, MOFFMS(fddiMACRMTState), "S" } ,
+ { SMT_P2070,AC_G, MOFFMS(fddiMACDA_Flag), "F" } ,
+ { SMT_P2071,AC_G, MOFFMS(fddiMACUNDA_Flag), "F" } ,
+ { SMT_P2072,AC_G, MOFFMS(fddiMACFrameErrorFlag), "F" } ,
+ { SMT_P2073,AC_G, MOFFMS(fddiMACNotCopiedFlag), "F" } ,
+ { SMT_P2074,AC_G, MOFFMS(fddiMACMA_UnitdataAvailable),"F" } ,
+ { SMT_P2075,AC_G, MOFFMS(fddiMACHardwarePresent), "F" } ,
+ { SMT_P2076,AC_GR, MOFFMS(fddiMACMA_UnitdataEnable),"bF" } ,
+
+ /*
+ * PRIVATE EXTENSIONS
+ * only accessible locally to get/set TMIN
+ */
+ { SMT_P20F0,AC_NA } ,
+ { SMT_P20F1,AC_GR, MOFFMS(fddiMACT_Min), "lT" } ,
+
+ /* Path Attributes */
+ /*
+ * DON't swap 320B,320F,3210: they are already swapped in swap_para()
+ */
+ { SMT_P320A,AC_GROUP } ,
+ { SMT_P320B,AC_G, MOFFAS(fddiPATHIndex), "r" } ,
+ { SMT_P320F,AC_GR, MOFFAS(fddiPATHSbaPayload), "l4" } ,
+ { SMT_P3210,AC_GR, MOFFAS(fddiPATHSbaOverhead), "l4" } ,
+ /* fddiPATHConfiguration */
+ { SMT_P3212,AC_G, 0, "" } ,
+ { SMT_P3213,AC_GR, MOFFAS(fddiPATHT_Rmode), "lT" } ,
+ { SMT_P3214,AC_GR, MOFFAS(fddiPATHSbaAvailable), "lL" } ,
+ { SMT_P3215,AC_GR, MOFFAS(fddiPATHTVXLowerBound), "lT" } ,
+ { SMT_P3216,AC_GR, MOFFAS(fddiPATHT_MaxLowerBound),"lT" } ,
+ { SMT_P3217,AC_GR, MOFFAS(fddiPATHMaxT_Req), "lT" } ,
+
+ /* Port Attributes */
+ /* ConfigGrp */
+ { SMT_P400A,AC_GROUP } ,
+ { SMT_P400C,AC_G, MOFFPS(fddiPORTMy_Type), "E" } ,
+ { SMT_P400D,AC_G, MOFFPS(fddiPORTNeighborType), "E" } ,
+ { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } ,
+ { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } ,
+ { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } ,
+ { SMT_P4011,AC_GR, MOFFPA(fddiPORTRequestedPaths), "l4" } ,
+ { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } ,
+ { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } ,
+ { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } ,
+ { SMT_P4017,AC_G, MOFFPS(fddiPORTConnectionCapabilities), "B"} ,
+ { SMT_P401D,AC_G, MOFFPS(fddiPORTIndex), "R" } ,
+
+ /* OperationGrp */
+ { SMT_P401E,AC_GROUP } ,
+ { SMT_P401F,AC_GR, MOFFPS(fddiPORTMaint_LS), "wE" } ,
+ { SMT_P4021,AC_G, MOFFPS(fddiPORTBS_Flag), "F" } ,
+ { SMT_P4022,AC_G, MOFFPS(fddiPORTPC_LS), "E" } ,
+
+ /* ErrorCtrsGrp */
+ { SMT_P4028,AC_GROUP } ,
+ { SMT_P4029,AC_G, MOFFPS(fddiPORTEBError_Ct), "C" } ,
+ { SMT_P402A,AC_G, MOFFPS(fddiPORTLCTFail_Ct), "C" } ,
+
+ /* LerGrp */
+ { SMT_P4032,AC_GROUP } ,
+ { SMT_P4033,AC_G, MOFFPS(fddiPORTLer_Estimate), "F" } ,
+ { SMT_P4034,AC_G, MOFFPS(fddiPORTLem_Reject_Ct), "C" } ,
+ { SMT_P4035,AC_G, MOFFPS(fddiPORTLem_Ct), "C" } ,
+ { SMT_P403A,AC_GR, MOFFPS(fddiPORTLer_Cutoff), "bB" } ,
+ { SMT_P403B,AC_GR, MOFFPS(fddiPORTLer_Alarm), "bB" } ,
+
+ /* StatusGrp */
+ { SMT_P403C,AC_GROUP } ,
+ { SMT_P403D,AC_G, MOFFPS(fddiPORTConnectState), "E" } ,
+ { SMT_P403E,AC_G, MOFFPS(fddiPORTPCMStateX), "E" } ,
+ { SMT_P403F,AC_G, MOFFPS(fddiPORTPC_Withhold), "E" } ,
+ { SMT_P4040,AC_G, MOFFPS(fddiPORTLerFlag), "F" } ,
+ { SMT_P4041,AC_G, MOFFPS(fddiPORTHardwarePresent),"F" } ,
+
+ { SMT_P4046,AC_S, 0, "wS" } ,
+
+ { 0, AC_GROUP } ,
+ { 0 }
+} ;
+
+void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local)
+{
+ struct smt_header *sm ;
+ SMbuf *reply ;
+
+ sm = smtod(mb,struct smt_header *) ;
+ DB_SMT("SMT: processing PMF frame at %x len %d\n",sm,mb->sm_len) ;
+#ifdef DEBUG
+ dump_smt(smc,sm,"PMF Received") ;
+#endif
+ /*
+ * Start the watchdog: It may be a long, long packet and
+ * maybe the watchdog occurs ...
+ */
+ smt_start_watchdog(smc) ;
+
+ if (sm->smt_class == SMT_PMF_GET ||
+ sm->smt_class == SMT_PMF_SET) {
+ reply = smt_build_pmf_response(smc,sm,
+ sm->smt_class == SMT_PMF_SET,local) ;
+ if (reply) {
+ sm = smtod(reply,struct smt_header *) ;
+#ifdef DEBUG
+ dump_smt(smc,sm,"PMF Reply") ;
+#endif
+ smt_send_frame(smc,reply,FC_SMT_INFO,local) ;
+ }
+ }
+}
+
+static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
+ int set, int local)
+{
+ SMbuf *mb ;
+ struct smt_header *smt ;
+ struct smt_para *pa ;
+ struct smt_p_reason *res ;
+ const struct s_p_tab *pt ;
+ int len ;
+ int index ;
+ int idx_end ;
+ int error ;
+ int range ;
+ SK_LOC_DECL(struct s_pcon,pcon) ;
+ SK_LOC_DECL(struct s_pcon,set_pcon) ;
+
+ /*
+ * build SMT header
+ */
+ if (!(mb = smt_get_mbuf(smc)))
+ return(mb) ;
+
+ smt = smtod(mb, struct smt_header *) ;
+ smt->smt_dest = req->smt_source ; /* DA == source of request */
+ smt->smt_class = req->smt_class ; /* same class (GET/SET) */
+ smt->smt_type = SMT_REPLY ;
+ smt->smt_version = SMT_VID_2 ;
+ smt->smt_tid = req->smt_tid ; /* same TID */
+ smt->smt_pad = 0 ;
+ smt->smt_len = 0 ;
+
+ /*
+ * setup parameter status
+ */
+ pcon.pc_len = SMT_MAX_INFO_LEN ; /* max para length */
+ pcon.pc_err = 0 ; /* no error */
+ pcon.pc_badset = 0 ; /* no bad set count */
+ pcon.pc_p = (void *) (smt + 1) ; /* paras start here */
+
+ /*
+ * check authoriziation and set count
+ */
+ error = 0 ;
+ if (set) {
+ if (!local && smt_authorize(smc,req))
+ error = SMT_RDF_AUTHOR ;
+ else if (smt_check_set_count(smc,req))
+ pcon.pc_badset = SMT_RDF_BADSET ;
+ }
+ /*
+ * add reason code and all mandatory parameters
+ */
+ res = (struct smt_p_reason *) pcon.pc_p ;
+ smt_add_para(smc,&pcon,(u_short) SMT_P_REASON,0,0) ;
+ smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
+ /* update 1035 and 1036 later if set */
+ set_pcon = pcon ;
+ smt_add_para(smc,&pcon,(u_short) SMT_P1035,0,0) ;
+ smt_add_para(smc,&pcon,(u_short) SMT_P1036,0,0) ;
+
+ pcon.pc_err = error ;
+ len = req->smt_len ;
+ pa = (struct smt_para *) (req + 1) ;
+ /*
+ * process list of paras
+ */
+ while (!pcon.pc_err && len > 0 ) {
+ if (((u_short)len < pa->p_len + PARA_LEN) || (pa->p_len & 3)) {
+ pcon.pc_err = SMT_RDF_LENGTH ;
+ break ;
+ }
+
+ if (((range = (pa->p_type & 0xf000)) == 0x2000) ||
+ range == 0x3000 || range == 0x4000) {
+ /*
+ * get index for PART,MAC ad PATH group
+ */
+ index = *((u_char *)pa + PARA_LEN + 3) ;/* index */
+ idx_end = index ;
+ if (!set && (pa->p_len != 4)) {
+ pcon.pc_err = SMT_RDF_LENGTH ;
+ break ;
+ }
+ if (!index && !set) {
+ switch (range) {
+ case 0x2000 :
+ index = INDEX_MAC ;
+ idx_end = index - 1 + NUMMACS ;
+ break ;
+ case 0x3000 :
+ index = INDEX_PATH ;
+ idx_end = index - 1 + NUMPATHS ;
+ break ;
+ case 0x4000 :
+ index = INDEX_PORT ;
+ idx_end = index - 1 + NUMPHYS ;
+#ifndef CONCENTRATOR
+ if (smc->s.sas == SMT_SAS)
+ idx_end = INDEX_PORT ;
+#endif
+ break ;
+ }
+ }
+ }
+ else {
+ /*
+ * smt group has no index
+ */
+ if (!set && (pa->p_len != 0)) {
+ pcon.pc_err = SMT_RDF_LENGTH ;
+ break ;
+ }
+ index = 0 ;
+ idx_end = 0 ;
+ }
+ while (index <= idx_end) {
+ /*
+ * if group
+ * add all paras of group
+ */
+ pt = smt_get_ptab(pa->p_type) ;
+ if (pt && pt->p_access == AC_GROUP && !set) {
+ pt++ ;
+ while (pt->p_access == AC_G ||
+ pt->p_access == AC_GR) {
+ smt_add_para(smc,&pcon,pt->p_num,
+ index,local);
+ pt++ ;
+ }
+ }
+ /*
+ * ignore
+ * AUTHORIZATION in get/set
+ * SET COUNT in set
+ */
+ else if (pa->p_type != SMT_P_AUTHOR &&
+ (!set || (pa->p_type != SMT_P1035))) {
+ int st ;
+ if (pcon.pc_badset) {
+ smt_add_para(smc,&pcon,pa->p_type,
+ index,local) ;
+ }
+ else if (set) {
+ st = smt_set_para(smc,pa,index,local,1);
+ /*
+ * return para even if error
+ */
+ smt_add_para(smc,&pcon,pa->p_type,
+ index,local) ;
+ pcon.pc_err = st ;
+ }
+ else {
+ if (pt && pt->p_access == AC_S) {
+ pcon.pc_err =
+ SMT_RDF_ILLEGAL ;
+ }
+ smt_add_para(smc,&pcon,pa->p_type,
+ index,local) ;
+ }
+ }
+ if (pcon.pc_err)
+ break ;
+ index++ ;
+ }
+ len -= pa->p_len + PARA_LEN ;
+ pa = (struct smt_para *) ((char *)pa + pa->p_len + PARA_LEN) ;
+ }
+ smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
+ mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
+
+ /* update reason code */
+ res->rdf_reason = pcon.pc_badset ? pcon.pc_badset :
+ pcon.pc_err ? pcon.pc_err : SMT_RDF_SUCCESS ;
+ if (set && (res->rdf_reason == SMT_RDF_SUCCESS)) {
+ /*
+ * increment set count
+ * set time stamp
+ * store station id of last set
+ */
+ smc->mib.fddiSMTSetCount.count++ ;
+ smt_set_timestamp(smc,smc->mib.fddiSMTSetCount.timestamp) ;
+ smc->mib.fddiSMTLastSetStationId = req->smt_sid ;
+ smt_add_para(smc,&set_pcon,(u_short) SMT_P1035,0,0) ;
+ smt_add_para(smc,&set_pcon,(u_short) SMT_P1036,0,0) ;
+ }
+ return(mb) ;
+}
+
+static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
+{
+ struct smt_para *pa ;
+ int i ;
+ char *p ;
+
+ /*
+ * check source station id if not zero
+ */
+ p = (char *) &smc->mib.fddiPRPMFStation ;
+ for (i = 0 ; i < 8 && !p[i] ; i++)
+ ;
+ if (i != 8) {
+ if (memcmp((char *) &sm->smt_sid,
+ (char *) &smc->mib.fddiPRPMFStation,8))
+ return(1) ;
+ }
+ /*
+ * check authoriziation parameter if passwd not zero
+ */
+ p = (char *) smc->mib.fddiPRPMFPasswd ;
+ for (i = 0 ; i < 8 && !p[i] ; i++)
+ ;
+ if (i != 8) {
+ pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P_AUTHOR) ;
+ if (!pa)
+ return(1) ;
+ if (pa->p_len != 8)
+ return(1) ;
+ if (memcmp((char *)(pa+1),(char *)smc->mib.fddiPRPMFPasswd,8))
+ return(1) ;
+ }
+ return(0) ;
+}
+
+static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
+{
+ struct smt_para *pa ;
+ struct smt_p_setcount *sc ;
+
+ pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P1035) ;
+ if (pa) {
+ sc = (struct smt_p_setcount *) pa ;
+ if ((smc->mib.fddiSMTSetCount.count != sc->count) ||
+ memcmp((char *) smc->mib.fddiSMTSetCount.timestamp,
+ (char *)sc->timestamp,8))
+ return(1) ;
+ }
+ return(0) ;
+}
+
+void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
+ int index, int local)
+{
+ struct smt_para *pa ;
+ const struct s_p_tab *pt ;
+ struct fddi_mib_m *mib_m = NULL;
+ struct fddi_mib_p *mib_p = NULL;
+ int len ;
+ int plen ;
+ char *from ;
+ char *to ;
+ const char *swap ;
+ char c ;
+ int range ;
+ char *mib_addr ;
+ int mac ;
+ int path ;
+ int port ;
+ int sp_len ;
+
+ /*
+ * skip if errror
+ */
+ if (pcon->pc_err)
+ return ;
+
+ /*
+ * actions don't have a value
+ */
+ pt = smt_get_ptab(para) ;
+ if (pt && pt->p_access == AC_S)
+ return ;
+
+ to = (char *) (pcon->pc_p) ; /* destination pointer */
+ len = pcon->pc_len ; /* free space */
+ plen = len ; /* remember start length */
+ pa = (struct smt_para *) to ; /* type/length pointer */
+ to += PARA_LEN ; /* skip smt_para */
+ len -= PARA_LEN ;
+ /*
+ * set index if required
+ */
+ if (((range = (para & 0xf000)) == 0x2000) ||
+ range == 0x3000 || range == 0x4000) {
+ if (len < 4)
+ goto wrong_error ;
+ to[0] = 0 ;
+ to[1] = 0 ;
+ to[2] = 0 ;
+ to[3] = index ;
+ len -= 4 ;
+ to += 4 ;
+ }
+ mac = index - INDEX_MAC ;
+ path = index - INDEX_PATH ;
+ port = index - INDEX_PORT ;
+ /*
+ * get pointer to mib
+ */
+ switch (range) {
+ case 0x1000 :
+ default :
+ mib_addr = (char *) (&smc->mib) ;
+ break ;
+ case 0x2000 :
+ if (mac < 0 || mac >= NUMMACS) {
+ pcon->pc_err = SMT_RDF_NOPARAM ;
+ return ;
+ }
+ mib_addr = (char *) (&smc->mib.m[mac]) ;
+ mib_m = (struct fddi_mib_m *) mib_addr ;
+ break ;
+ case 0x3000 :
+ if (path < 0 || path >= NUMPATHS) {
+ pcon->pc_err = SMT_RDF_NOPARAM ;
+ return ;
+ }
+ mib_addr = (char *) (&smc->mib.a[path]) ;
+ break ;
+ case 0x4000 :
+ if (port < 0 || port >= smt_mib_phys(smc)) {
+ pcon->pc_err = SMT_RDF_NOPARAM ;
+ return ;
+ }
+ mib_addr = (char *) (&smc->mib.p[port_to_mib(smc,port)]) ;
+ mib_p = (struct fddi_mib_p *) mib_addr ;
+ break ;
+ }
+ /*
+ * check special paras
+ */
+ swap = NULL;
+ switch (para) {
+ case SMT_P10F0 :
+ case SMT_P10F1 :
+#ifdef ESS
+ case SMT_P10F2 :
+ case SMT_P10F3 :
+ case SMT_P10F4 :
+ case SMT_P10F5 :
+ case SMT_P10F6 :
+ case SMT_P10F7 :
+#endif
+#ifdef SBA
+ case SMT_P10F8 :
+ case SMT_P10F9 :
+#endif
+ case SMT_P20F1 :
+ if (!local) {
+ pcon->pc_err = SMT_RDF_NOPARAM ;
+ return ;
+ }
+ break ;
+ case SMT_P2034 :
+ case SMT_P2046 :
+ case SMT_P2047 :
+ case SMT_P204A :
+ case SMT_P2051 :
+ case SMT_P2052 :
+ mac_update_counter(smc) ;
+ break ;
+ case SMT_P4022:
+ mib_p->fddiPORTPC_LS = LS2MIB(
+ sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
+ break ;
+ case SMT_P_REASON :
+ * (u_long *) to = 0 ;
+ sp_len = 4 ;
+ goto sp_done ;
+ case SMT_P1033 : /* time stamp */
+ smt_set_timestamp(smc,smc->mib.fddiSMTTimeStamp) ;
+ break ;
+
+ case SMT_P1020: /* port indexes */
+#if NUMPHYS == 12
+ swap = "IIIIIIIIIIII" ;
+#else
+#if NUMPHYS == 2
+ if (smc->s.sas == SMT_SAS)
+ swap = "I" ;
+ else
+ swap = "II" ;
+#else
+#if NUMPHYS == 24
+ swap = "IIIIIIIIIIIIIIIIIIIIIIII" ;
+#else
+ ????
+#endif
+#endif
+#endif
+ break ;
+ case SMT_P3212 :
+ {
+ sp_len = cem_build_path(smc,to,path) ;
+ goto sp_done ;
+ }
+ case SMT_P1048 : /* peer wrap condition */
+ {
+ struct smt_p_1048 *sp ;
+ sp = (struct smt_p_1048 *) to ;
+ sp->p1048_flag = smc->mib.fddiSMTPeerWrapFlag ;
+ sp->p1048_cf_state = smc->mib.fddiSMTCF_State ;
+ sp_len = sizeof(struct smt_p_1048) ;
+ goto sp_done ;
+ }
+ case SMT_P208C :
+ {
+ struct smt_p_208c *sp ;
+ sp = (struct smt_p_208c *) to ;
+ sp->p208c_flag =
+ smc->mib.m[MAC0].fddiMACDuplicateAddressCond ;
+ sp->p208c_dupcondition =
+ (mib_m->fddiMACDA_Flag ? SMT_ST_MY_DUPA : 0) |
+ (mib_m->fddiMACUNDA_Flag ? SMT_ST_UNA_DUPA : 0);
+ sp->p208c_fddilong =
+ mib_m->fddiMACSMTAddress ;
+ sp->p208c_fddiunalong =
+ mib_m->fddiMACUpstreamNbr ;
+ sp->p208c_pad = 0 ;
+ sp_len = sizeof(struct smt_p_208c) ;
+ goto sp_done ;
+ }
+ case SMT_P208D : /* frame error condition */
+ {
+ struct smt_p_208d *sp ;
+ sp = (struct smt_p_208d *) to ;
+ sp->p208d_flag =
+ mib_m->fddiMACFrameErrorFlag ;
+ sp->p208d_frame_ct =
+ mib_m->fddiMACFrame_Ct ;
+ sp->p208d_error_ct =
+ mib_m->fddiMACError_Ct ;
+ sp->p208d_lost_ct =
+ mib_m->fddiMACLost_Ct ;
+ sp->p208d_ratio =
+ mib_m->fddiMACFrameErrorRatio ;
+ sp_len = sizeof(struct smt_p_208d) ;
+ goto sp_done ;
+ }
+ case SMT_P208E : /* not copied condition */
+ {
+ struct smt_p_208e *sp ;
+ sp = (struct smt_p_208e *) to ;
+ sp->p208e_flag =
+ mib_m->fddiMACNotCopiedFlag ;
+ sp->p208e_not_copied =
+ mib_m->fddiMACNotCopied_Ct ;
+ sp->p208e_copied =
+ mib_m->fddiMACCopied_Ct ;
+ sp->p208e_not_copied_ratio =
+ mib_m->fddiMACNotCopiedRatio ;
+ sp_len = sizeof(struct smt_p_208e) ;
+ goto sp_done ;
+ }
+ case SMT_P208F : /* neighbor change event */
+ {
+ struct smt_p_208f *sp ;
+ sp = (struct smt_p_208f *) to ;
+ sp->p208f_multiple =
+ mib_m->fddiMACMultiple_N ;
+ sp->p208f_nacondition =
+ mib_m->fddiMACDuplicateAddressCond ;
+ sp->p208f_old_una =
+ mib_m->fddiMACOldUpstreamNbr ;
+ sp->p208f_new_una =
+ mib_m->fddiMACUpstreamNbr ;
+ sp->p208f_old_dna =
+ mib_m->fddiMACOldDownstreamNbr ;
+ sp->p208f_new_dna =
+ mib_m->fddiMACDownstreamNbr ;
+ sp->p208f_curren_path =
+ mib_m->fddiMACCurrentPath ;
+ sp->p208f_smt_address =
+ mib_m->fddiMACSMTAddress ;
+ sp_len = sizeof(struct smt_p_208f) ;
+ goto sp_done ;
+ }
+ case SMT_P2090 :
+ {
+ struct smt_p_2090 *sp ;
+ sp = (struct smt_p_2090 *) to ;
+ sp->p2090_multiple =
+ mib_m->fddiMACMultiple_P ;
+ sp->p2090_availablepaths =
+ mib_m->fddiMACAvailablePaths ;
+ sp->p2090_currentpath =
+ mib_m->fddiMACCurrentPath ;
+ sp->p2090_requestedpaths =
+ mib_m->fddiMACRequestedPaths ;
+ sp_len = sizeof(struct smt_p_2090) ;
+ goto sp_done ;
+ }
+ case SMT_P4050 :
+ {
+ struct smt_p_4050 *sp ;
+ sp = (struct smt_p_4050 *) to ;
+ sp->p4050_flag =
+ mib_p->fddiPORTLerFlag ;
+ sp->p4050_pad = 0 ;
+ sp->p4050_cutoff =
+ mib_p->fddiPORTLer_Cutoff ; ;
+ sp->p4050_alarm =
+ mib_p->fddiPORTLer_Alarm ; ;
+ sp->p4050_estimate =
+ mib_p->fddiPORTLer_Estimate ;
+ sp->p4050_reject_ct =
+ mib_p->fddiPORTLem_Reject_Ct ;
+ sp->p4050_ct =
+ mib_p->fddiPORTLem_Ct ;
+ sp_len = sizeof(struct smt_p_4050) ;
+ goto sp_done ;
+ }
+
+ case SMT_P4051 :
+ {
+ struct smt_p_4051 *sp ;
+ sp = (struct smt_p_4051 *) to ;
+ sp->p4051_multiple =
+ mib_p->fddiPORTMultiple_U ;
+ sp->p4051_porttype =
+ mib_p->fddiPORTMy_Type ;
+ sp->p4051_connectstate =
+ mib_p->fddiPORTConnectState ; ;
+ sp->p4051_pc_neighbor =
+ mib_p->fddiPORTNeighborType ;
+ sp->p4051_pc_withhold =
+ mib_p->fddiPORTPC_Withhold ;
+ sp_len = sizeof(struct smt_p_4051) ;
+ goto sp_done ;
+ }
+ case SMT_P4052 :
+ {
+ struct smt_p_4052 *sp ;
+ sp = (struct smt_p_4052 *) to ;
+ sp->p4052_flag =
+ mib_p->fddiPORTEB_Condition ;
+ sp->p4052_eberrorcount =
+ mib_p->fddiPORTEBError_Ct ;
+ sp_len = sizeof(struct smt_p_4052) ;
+ goto sp_done ;
+ }
+ case SMT_P4053 :
+ {
+ struct smt_p_4053 *sp ;
+ sp = (struct smt_p_4053 *) to ;
+ sp->p4053_multiple =
+ mib_p->fddiPORTMultiple_P ; ;
+ sp->p4053_availablepaths =
+ mib_p->fddiPORTAvailablePaths ;
+ sp->p4053_currentpath =
+ mib_p->fddiPORTCurrentPath ;
+ memcpy( (char *) &sp->p4053_requestedpaths,
+ (char *) mib_p->fddiPORTRequestedPaths,4) ;
+ sp->p4053_mytype =
+ mib_p->fddiPORTMy_Type ;
+ sp->p4053_neighbortype =
+ mib_p->fddiPORTNeighborType ;
+ sp_len = sizeof(struct smt_p_4053) ;
+ goto sp_done ;
+ }
+ default :
+ break ;
+ }
+ /*
+ * in table ?
+ */
+ if (!pt) {
+ pcon->pc_err = (para & 0xff00) ? SMT_RDF_NOPARAM :
+ SMT_RDF_ILLEGAL ;
+ return ;
+ }
+ /*
+ * check access rights
+ */
+ switch (pt->p_access) {
+ case AC_G :
+ case AC_GR :
+ break ;
+ default :
+ pcon->pc_err = SMT_RDF_ILLEGAL ;
+ return ;
+ }
+ from = mib_addr + pt->p_offset ;
+ if (!swap)
+ swap = pt->p_swap ; /* pointer to swap string */
+
+ /*
+ * copy values
+ */
+ while ((c = *swap++)) {
+ switch(c) {
+ case 'b' :
+ case 'w' :
+ case 'l' :
+ break ;
+ case 'S' :
+ case 'E' :
+ case 'R' :
+ case 'r' :
+ if (len < 4)
+ goto len_error ;
+ to[0] = 0 ;
+ to[1] = 0 ;
+#ifdef LITTLE_ENDIAN
+ if (c == 'r') {
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+ }
+ else {
+ to[3] = *from++ ;
+ to[2] = *from++ ;
+ }
+#else
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+#endif
+ to += 4 ;
+ len -= 4 ;
+ break ;
+ case 'I' : /* for SET of port indexes */
+ if (len < 2)
+ goto len_error ;
+#ifdef LITTLE_ENDIAN
+ to[1] = *from++ ;
+ to[0] = *from++ ;
+#else
+ to[0] = *from++ ;
+ to[1] = *from++ ;
+#endif
+ to += 2 ;
+ len -= 2 ;
+ break ;
+ case 'F' :
+ case 'B' :
+ if (len < 4)
+ goto len_error ;
+ len -= 4 ;
+ to[0] = 0 ;
+ to[1] = 0 ;
+ to[2] = 0 ;
+ to[3] = *from++ ;
+ to += 4 ;
+ break ;
+ case 'C' :
+ case 'T' :
+ case 'L' :
+ if (len < 4)
+ goto len_error ;
+#ifdef LITTLE_ENDIAN
+ to[3] = *from++ ;
+ to[2] = *from++ ;
+ to[1] = *from++ ;
+ to[0] = *from++ ;
+#else
+ to[0] = *from++ ;
+ to[1] = *from++ ;
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+#endif
+ len -= 4 ;
+ to += 4 ;
+ break ;
+ case '2' : /* PortMacIndicated */
+ if (len < 4)
+ goto len_error ;
+ to[0] = 0 ;
+ to[1] = 0 ;
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+ len -= 4 ;
+ to += 4 ;
+ break ;
+ case '4' :
+ if (len < 4)
+ goto len_error ;
+ to[0] = *from++ ;
+ to[1] = *from++ ;
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+ len -= 4 ;
+ to += 4 ;
+ break ;
+ case 'A' :
+ if (len < 8)
+ goto len_error ;
+ to[0] = 0 ;
+ to[1] = 0 ;
+ memcpy((char *) to+2,(char *) from,6) ;
+ to += 8 ;
+ from += 8 ;
+ len -= 8 ;
+ break ;
+ case '8' :
+ if (len < 8)
+ goto len_error ;
+ memcpy((char *) to,(char *) from,8) ;
+ to += 8 ;
+ from += 8 ;
+ len -= 8 ;
+ break ;
+ case 'D' :
+ if (len < 32)
+ goto len_error ;
+ memcpy((char *) to,(char *) from,32) ;
+ to += 32 ;
+ from += 32 ;
+ len -= 32 ;
+ break ;
+ case 'P' : /* timestamp is NOT swapped */
+ if (len < 8)
+ goto len_error ;
+ to[0] = *from++ ;
+ to[1] = *from++ ;
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+ to[4] = *from++ ;
+ to[5] = *from++ ;
+ to[6] = *from++ ;
+ to[7] = *from++ ;
+ to += 8 ;
+ len -= 8 ;
+ break ;
+ default :
+ SMT_PANIC(smc,SMT_E0119, SMT_E0119_MSG) ;
+ break ;
+ }
+ }
+
+done:
+ /*
+ * make it even (in case of 'I' encoding)
+ * note: len is DECREMENTED
+ */
+ if (len & 3) {
+ to[0] = 0 ;
+ to[1] = 0 ;
+ to += 4 - (len & 3 ) ;
+ len = len & ~ 3 ;
+ }
+
+ /* set type and length */
+ pa->p_type = para ;
+ pa->p_len = plen - len - PARA_LEN ;
+ /* return values */
+ pcon->pc_p = (void *) to ;
+ pcon->pc_len = len ;
+ return ;
+
+sp_done:
+ len -= sp_len ;
+ to += sp_len ;
+ goto done ;
+
+len_error:
+ /* parameter does not fit in frame */
+ pcon->pc_err = SMT_RDF_TOOLONG ;
+ return ;
+
+wrong_error:
+ pcon->pc_err = SMT_RDF_LENGTH ;
+}
+
+/*
+ * set parameter
+ */
+int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index, int local,
+ int set)
+{
+#define IFSET(x) if (set) (x)
+
+ const struct s_p_tab *pt ;
+ int len ;
+ char *from ;
+ char *to ;
+ const char *swap ;
+ char c ;
+ char *mib_addr ;
+ struct fddi_mib *mib ;
+ struct fddi_mib_m *mib_m = NULL;
+ struct fddi_mib_a *mib_a = NULL;
+ struct fddi_mib_p *mib_p = NULL;
+ int mac ;
+ int path ;
+ int port ;
+ SK_LOC_DECL(u_char,byte_val) ;
+ SK_LOC_DECL(u_short,word_val) ;
+ SK_LOC_DECL(u_long,long_val) ;
+
+ mac = index - INDEX_MAC ;
+ path = index - INDEX_PATH ;
+ port = index - INDEX_PORT ;
+ len = pa->p_len ;
+ from = (char *) (pa + 1 ) ;
+
+ mib = &smc->mib ;
+ switch (pa->p_type & 0xf000) {
+ case 0x1000 :
+ default :
+ mib_addr = (char *) mib ;
+ break ;
+ case 0x2000 :
+ if (mac < 0 || mac >= NUMMACS) {
+ return(SMT_RDF_NOPARAM) ;
+ }
+ mib_m = &smc->mib.m[mac] ;
+ mib_addr = (char *) mib_m ;
+ from += 4 ; /* skip index */
+ len -= 4 ;
+ break ;
+ case 0x3000 :
+ if (path < 0 || path >= NUMPATHS) {
+ return(SMT_RDF_NOPARAM) ;
+ }
+ mib_a = &smc->mib.a[path] ;
+ mib_addr = (char *) mib_a ;
+ from += 4 ; /* skip index */
+ len -= 4 ;
+ break ;
+ case 0x4000 :
+ if (port < 0 || port >= smt_mib_phys(smc)) {
+ return(SMT_RDF_NOPARAM) ;
+ }
+ mib_p = &smc->mib.p[port_to_mib(smc,port)] ;
+ mib_addr = (char *) mib_p ;
+ from += 4 ; /* skip index */
+ len -= 4 ;
+ break ;
+ }
+ switch (pa->p_type) {
+ case SMT_P10F0 :
+ case SMT_P10F1 :
+#ifdef ESS
+ case SMT_P10F2 :
+ case SMT_P10F3 :
+ case SMT_P10F4 :
+ case SMT_P10F5 :
+ case SMT_P10F6 :
+ case SMT_P10F7 :
+#endif
+#ifdef SBA
+ case SMT_P10F8 :
+ case SMT_P10F9 :
+#endif
+ case SMT_P20F1 :
+ if (!local) {
+ return(SMT_RDF_NOPARAM) ;
+ }
+ break ;
+ }
+ pt = smt_get_ptab(pa->p_type) ;
+ if (!pt) {
+ return( (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM :
+ SMT_RDF_ILLEGAL ) ;
+ }
+ switch (pt->p_access) {
+ case AC_GR :
+ case AC_S :
+ break ;
+ default :
+ return(SMT_RDF_ILLEGAL) ;
+ }
+ to = mib_addr + pt->p_offset ;
+ swap = pt->p_swap ; /* pointer to swap string */
+
+ while (swap && (c = *swap++)) {
+ switch(c) {
+ case 'b' :
+ to = (char *) &byte_val ;
+ break ;
+ case 'w' :
+ to = (char *) &word_val ;
+ break ;
+ case 'l' :
+ to = (char *) &long_val ;
+ break ;
+ case 'S' :
+ case 'E' :
+ case 'R' :
+ case 'r' :
+ if (len < 4) {
+ goto len_error ;
+ }
+ if (from[0] | from[1])
+ goto val_error ;
+#ifdef LITTLE_ENDIAN
+ if (c == 'r') {
+ to[0] = from[2] ;
+ to[1] = from[3] ;
+ }
+ else {
+ to[1] = from[2] ;
+ to[0] = from[3] ;
+ }
+#else
+ to[0] = from[2] ;
+ to[1] = from[3] ;
+#endif
+ from += 4 ;
+ to += 2 ;
+ len -= 4 ;
+ break ;
+ case 'F' :
+ case 'B' :
+ if (len < 4) {
+ goto len_error ;
+ }
+ if (from[0] | from[1] | from[2])
+ goto val_error ;
+ to[0] = from[3] ;
+ len -= 4 ;
+ from += 4 ;
+ to += 4 ;
+ break ;
+ case 'C' :
+ case 'T' :
+ case 'L' :
+ if (len < 4) {
+ goto len_error ;
+ }
+#ifdef LITTLE_ENDIAN
+ to[3] = *from++ ;
+ to[2] = *from++ ;
+ to[1] = *from++ ;
+ to[0] = *from++ ;
+#else
+ to[0] = *from++ ;
+ to[1] = *from++ ;
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+#endif
+ len -= 4 ;
+ to += 4 ;
+ break ;
+ case 'A' :
+ if (len < 8)
+ goto len_error ;
+ if (set)
+ memcpy((char *) to,(char *) from+2,6) ;
+ to += 8 ;
+ from += 8 ;
+ len -= 8 ;
+ break ;
+ case '4' :
+ if (len < 4)
+ goto len_error ;
+ if (set)
+ memcpy((char *) to,(char *) from,4) ;
+ to += 4 ;
+ from += 4 ;
+ len -= 4 ;
+ break ;
+ case '8' :
+ if (len < 8)
+ goto len_error ;
+ if (set)
+ memcpy((char *) to,(char *) from,8) ;
+ to += 8 ;
+ from += 8 ;
+ len -= 8 ;
+ break ;
+ case 'D' :
+ if (len < 32)
+ goto len_error ;
+ if (set)
+ memcpy((char *) to,(char *) from,32) ;
+ to += 32 ;
+ from += 32 ;
+ len -= 32 ;
+ break ;
+ case 'P' : /* timestamp is NOT swapped */
+ if (set) {
+ to[0] = *from++ ;
+ to[1] = *from++ ;
+ to[2] = *from++ ;
+ to[3] = *from++ ;
+ to[4] = *from++ ;
+ to[5] = *from++ ;
+ to[6] = *from++ ;
+ to[7] = *from++ ;
+ }
+ to += 8 ;
+ len -= 8 ;
+ break ;
+ default :
+ SMT_PANIC(smc,SMT_E0120, SMT_E0120_MSG) ;
+ return(SMT_RDF_ILLEGAL) ;
+ }
+ }
+ /*
+ * actions and internal updates
+ */
+ switch (pa->p_type) {
+ case SMT_P101A: /* fddiSMTConfigPolicy */
+ if (word_val & ~1)
+ goto val_error ;
+ IFSET(mib->fddiSMTConfigPolicy = word_val) ;
+ break ;
+ case SMT_P101B : /* fddiSMTConnectionPolicy */
+ if (!(word_val & POLICY_MM))
+ goto val_error ;
+ IFSET(mib->fddiSMTConnectionPolicy = word_val) ;
+ break ;
+ case SMT_P101D : /* fddiSMTTT_Notify */
+ if (word_val < 2 || word_val > 30)
+ goto val_error ;
+ IFSET(mib->fddiSMTTT_Notify = word_val) ;
+ break ;
+ case SMT_P101E : /* fddiSMTStatRptPolicy */
+ if (byte_val & ~1)
+ goto val_error ;
+ IFSET(mib->fddiSMTStatRptPolicy = byte_val) ;
+ break ;
+ case SMT_P101F : /* fddiSMTTrace_MaxExpiration */
+ /*
+ * note: lower limit trace_max = 6.001773... s
+ * NO upper limit
+ */
+ if (long_val < (long)0x478bf51L)
+ goto val_error ;
+ IFSET(mib->fddiSMTTrace_MaxExpiration = long_val) ;
+ break ;
+#ifdef ESS
+ case SMT_P10F2 : /* fddiESSPayload */
+ if (long_val > 1562)
+ goto val_error ;
+ if (set && smc->mib.fddiESSPayload != long_val) {
+ smc->ess.raf_act_timer_poll = TRUE ;
+ smc->mib.fddiESSPayload = long_val ;
+ }
+ break ;
+ case SMT_P10F3 : /* fddiESSOverhead */
+ if (long_val < 50 || long_val > 5000)
+ goto val_error ;
+ if (set && smc->mib.fddiESSPayload &&
+ smc->mib.fddiESSOverhead != long_val) {
+ smc->ess.raf_act_timer_poll = TRUE ;
+ smc->mib.fddiESSOverhead = long_val ;
+ }
+ break ;
+ case SMT_P10F4 : /* fddiESSMaxTNeg */
+ if (long_val > -MS2BCLK(5) || long_val < -MS2BCLK(165))
+ goto val_error ;
+ IFSET(mib->fddiESSMaxTNeg = long_val) ;
+ break ;
+ case SMT_P10F5 : /* fddiESSMinSegmentSize */
+ if (long_val < 1 || long_val > 4478)
+ goto val_error ;
+ IFSET(mib->fddiESSMinSegmentSize = long_val) ;
+ break ;
+ case SMT_P10F6 : /* fddiESSCategory */
+ if ((long_val & 0xffff) != 1)
+ goto val_error ;
+ IFSET(mib->fddiESSCategory = long_val) ;
+ break ;
+ case SMT_P10F7 : /* fddiESSSyncTxMode */
+ if (word_val > 1)
+ goto val_error ;
+ IFSET(mib->fddiESSSynchTxMode = word_val) ;
+ break ;
+#endif
+#ifdef SBA
+ case SMT_P10F8 : /* fddiSBACommand */
+ if (byte_val != SB_STOP && byte_val != SB_START)
+ goto val_error ;
+ IFSET(mib->fddiSBACommand = byte_val) ;
+ break ;
+ case SMT_P10F9 : /* fddiSBAAvailable */
+ if (byte_val > 100)
+ goto val_error ;
+ IFSET(mib->fddiSBAAvailable = byte_val) ;
+ break ;
+#endif
+ case SMT_P2020 : /* fddiMACRequestedPaths */
+ if ((word_val & (MIB_P_PATH_PRIM_PREFER |
+ MIB_P_PATH_PRIM_ALTER)) == 0 )
+ goto val_error ;
+ IFSET(mib_m->fddiMACRequestedPaths = word_val) ;
+ break ;
+ case SMT_P205F : /* fddiMACFrameErrorThreshold */
+ /* 0 .. ffff acceptable */
+ IFSET(mib_m->fddiMACFrameErrorThreshold = word_val) ;
+ break ;
+ case SMT_P2067 : /* fddiMACNotCopiedThreshold */
+ /* 0 .. ffff acceptable */
+ IFSET(mib_m->fddiMACNotCopiedThreshold = word_val) ;
+ break ;
+ case SMT_P2076: /* fddiMACMA_UnitdataEnable */
+ if (byte_val & ~1)
+ goto val_error ;
+ if (set) {
+ mib_m->fddiMACMA_UnitdataEnable = byte_val ;
+ queue_event(smc,EVENT_RMT,RM_ENABLE_FLAG) ;
+ }
+ break ;
+ case SMT_P20F1 : /* fddiMACT_Min */
+ IFSET(mib_m->fddiMACT_Min = long_val) ;
+ break ;
+ case SMT_P320F :
+ if (long_val > 1562)
+ goto val_error ;
+ IFSET(mib_a->fddiPATHSbaPayload = long_val) ;
+#ifdef ESS
+ if (set)
+ ess_para_change(smc) ;
+#endif
+ break ;
+ case SMT_P3210 :
+ if (long_val > 5000)
+ goto val_error ;
+
+ if (long_val != 0 && mib_a->fddiPATHSbaPayload == 0)
+ goto val_error ;
+
+ IFSET(mib_a->fddiPATHSbaOverhead = long_val) ;
+#ifdef ESS
+ if (set)
+ ess_para_change(smc) ;
+#endif
+ break ;
+ case SMT_P3213: /* fddiPATHT_Rmode */
+ /* no limit :
+ * 0 .. 343.597 => 0 .. 2e32 * 80nS
+ */
+ if (set) {
+ mib_a->fddiPATHT_Rmode = long_val ;
+ rtm_set_timer(smc) ;
+ }
+ break ;
+ case SMT_P3214 : /* fddiPATHSbaAvailable */
+ if (long_val > 0x00BEBC20L)
+ goto val_error ;
+#ifdef SBA
+ if (set && mib->fddiSBACommand == SB_STOP)
+ goto val_error ;
+#endif
+ IFSET(mib_a->fddiPATHSbaAvailable = long_val) ;
+ break ;
+ case SMT_P3215 : /* fddiPATHTVXLowerBound */
+ IFSET(mib_a->fddiPATHTVXLowerBound = long_val) ;
+ goto change_mac_para ;
+ case SMT_P3216 : /* fddiPATHT_MaxLowerBound */
+ IFSET(mib_a->fddiPATHT_MaxLowerBound = long_val) ;
+ goto change_mac_para ;
+ case SMT_P3217 : /* fddiPATHMaxT_Req */
+ IFSET(mib_a->fddiPATHMaxT_Req = long_val) ;
+
+change_mac_para:
+ if (set && smt_set_mac_opvalues(smc)) {
+ RS_SET(smc,RS_EVENT) ;
+ smc->sm.please_reconnect = 1 ;
+ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
+ }
+ break ;
+ case SMT_P400E : /* fddiPORTConnectionPolicies */
+ if (byte_val > 1)
+ goto val_error ;
+ IFSET(mib_p->fddiPORTConnectionPolicies = byte_val) ;
+ break ;
+ case SMT_P4011 : /* fddiPORTRequestedPaths */
+ /* all 3*8 bits allowed */
+ IFSET(memcpy((char *)mib_p->fddiPORTRequestedPaths,
+ (char *)&long_val,4)) ;
+ break ;
+ case SMT_P401F: /* fddiPORTMaint_LS */
+ if (word_val > 4)
+ goto val_error ;
+ IFSET(mib_p->fddiPORTMaint_LS = word_val) ;
+ break ;
+ case SMT_P403A : /* fddiPORTLer_Cutoff */
+ if (byte_val < 4 || byte_val > 15)
+ goto val_error ;
+ IFSET(mib_p->fddiPORTLer_Cutoff = byte_val) ;
+ break ;
+ case SMT_P403B : /* fddiPORTLer_Alarm */
+ if (byte_val < 4 || byte_val > 15)
+ goto val_error ;
+ IFSET(mib_p->fddiPORTLer_Alarm = byte_val) ;
+ break ;
+
+ /*
+ * Actions
+ */
+ case SMT_P103C : /* fddiSMTStationAction */
+ if (smt_action(smc,SMT_STATION_ACTION, (int) word_val, 0))
+ goto val_error ;
+ break ;
+ case SMT_P4046: /* fddiPORTAction */
+ if (smt_action(smc,SMT_PORT_ACTION, (int) word_val,
+ port_to_mib(smc,port)))
+ goto val_error ;
+ break ;
+ default :
+ break ;
+ }
+ return(0) ;
+
+val_error:
+ /* parameter value in frame is out of range */
+ return(SMT_RDF_RANGE) ;
+
+len_error:
+ /* parameter value in frame is too short */
+ return(SMT_RDF_LENGTH) ;
+
+#if 0
+no_author_error:
+ /* parameter not setable, because the SBA is not active
+ * Please note: we give the return code 'not authorizeed
+ * because SBA denied is not a valid return code in the
+ * PMF protocol.
+ */
+ return(SMT_RDF_AUTHOR) ;
+#endif
+}
+
+static const struct s_p_tab *smt_get_ptab(u_short para)
+{
+ const struct s_p_tab *pt ;
+ for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
+ ;
+ return(pt->p_num ? pt : NULL) ;
+}
+
+static int smt_mib_phys(struct s_smc *smc)
+{
+#ifdef CONCENTRATOR
+ SK_UNUSED(smc) ;
+
+ return(NUMPHYS) ;
+#else
+ if (smc->s.sas == SMT_SAS)
+ return(1) ;
+ return(NUMPHYS) ;
+#endif
+}
+
+int port_to_mib(struct s_smc *smc, int p)
+{
+#ifdef CONCENTRATOR
+ SK_UNUSED(smc) ;
+
+ return(p) ;
+#else
+ if (smc->s.sas == SMT_SAS)
+ return(PS) ;
+ return(p) ;
+#endif
+}
+
+
+#ifdef DEBUG
+#ifndef BOOT
+void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text)
+{
+ int len ;
+ struct smt_para *pa ;
+ char *c ;
+ int n ;
+ int nn ;
+#ifdef LITTLE_ENDIAN
+ int smtlen ;
+#endif
+
+ SK_UNUSED(smc) ;
+
+#ifdef DEBUG_BRD
+ if (smc->debug.d_smtf < 2)
+#else
+ if (debug.d_smtf < 2)
+#endif
+ return ;
+#ifdef LITTLE_ENDIAN
+ smtlen = sm->smt_len + sizeof(struct smt_header) ;
+#endif
+ printf("SMT Frame [%s]:\nDA ",text) ;
+ dump_hex((char *) &sm->smt_dest,6) ;
+ printf("\tSA ") ;
+ dump_hex((char *) &sm->smt_source,6) ;
+ printf(" Class %x Type %x Version %x\n",
+ sm->smt_class,sm->smt_type,sm->smt_version) ;
+ printf("TID %lx\t\tSID ",sm->smt_tid) ;
+ dump_hex((char *) &sm->smt_sid,8) ;
+ printf(" LEN %x\n",sm->smt_len) ;
+
+ len = sm->smt_len ;
+ pa = (struct smt_para *) (sm + 1) ;
+ while (len > 0 ) {
+ int plen ;
+#ifdef UNIX
+ printf("TYPE %x LEN %x VALUE\t",pa->p_type,pa->p_len) ;
+#else
+ printf("TYPE %04x LEN %2x VALUE\t",pa->p_type,pa->p_len) ;
+#endif
+ n = pa->p_len ;
+ if ( (n < 0 ) || (n > (int)(len - PARA_LEN))) {
+ n = len - PARA_LEN ;
+ printf(" BAD LENGTH\n") ;
+ break ;
+ }
+#ifdef LITTLE_ENDIAN
+ smt_swap_para(sm,smtlen,0) ;
+#endif
+ if (n < 24) {
+ dump_hex((char *)(pa+1),(int) n) ;
+ printf("\n") ;
+ }
+ else {
+ int first = 0 ;
+ c = (char *)(pa+1) ;
+ dump_hex(c,16) ;
+ printf("\n") ;
+ n -= 16 ;
+ c += 16 ;
+ while (n > 0) {
+ nn = (n > 16) ? 16 : n ;
+ if (n > 64) {
+ if (first == 0)
+ printf("\t\t\t...\n") ;
+ first = 1 ;
+ }
+ else {
+ printf("\t\t\t") ;
+ dump_hex(c,nn) ;
+ printf("\n") ;
+ }
+ n -= nn ;
+ c += 16 ;
+ }
+ }
+#ifdef LITTLE_ENDIAN
+ smt_swap_para(sm,smtlen,1) ;
+#endif
+ plen = (pa->p_len + PARA_LEN + 3) & ~3 ;
+ len -= plen ;
+ pa = (struct smt_para *)((char *)pa + plen) ;
+ }
+ printf("-------------------------------------------------\n\n") ;
+}
+
+void dump_hex(char *p, int len)
+{
+ int n = 0 ;
+ while (len--) {
+ n++ ;
+#ifdef UNIX
+ printf("%x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ;
+#else
+ printf("%02x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ;
+#endif
+ }
+}
+#endif /* no BOOT */
+#endif /* DEBUG */
+
+
+#endif /* no SLIM_SMT */
diff --git a/drivers/net/skfp/queue.c b/drivers/net/skfp/queue.c
new file mode 100644
index 000000000000..09adb3d68b7c
--- /dev/null
+++ b/drivers/net/skfp/queue.c
@@ -0,0 +1,173 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ SMT Event Queue Management
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ;
+#endif
+
+#define PRINTF(a,b,c)
+
+/*
+ * init event queue management
+ */
+void ev_init(struct s_smc *smc)
+{
+ smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ;
+}
+
+/*
+ * add event to queue
+ */
+void queue_event(struct s_smc *smc, int class, int event)
+{
+ PRINTF("queue class %d event %d\n",class,event) ;
+ smc->q.ev_put->class = class ;
+ smc->q.ev_put->event = event ;
+ if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT])
+ smc->q.ev_put = smc->q.ev_queue ;
+
+ if (smc->q.ev_put == smc->q.ev_get) {
+ SMT_ERR_LOG(smc,SMT_E0137, SMT_E0137_MSG) ;
+ }
+}
+
+/*
+ * timer_event is called from HW timer package.
+ */
+void timer_event(struct s_smc *smc, u_long token)
+{
+ PRINTF("timer event class %d token %d\n",
+ EV_T_CLASS(token),
+ EV_T_EVENT(token)) ;
+ queue_event(smc,EV_T_CLASS(token),EV_T_EVENT(token));
+}
+
+/*
+ * event dispatcher
+ * while event queue is not empty
+ * get event from queue
+ * send command to state machine
+ * end
+ */
+void ev_dispatcher(struct s_smc *smc)
+{
+ struct event_queue *ev ; /* pointer into queue */
+ int class ;
+
+ ev = smc->q.ev_get ;
+ PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ;
+ while (ev != smc->q.ev_put) {
+ PRINTF("dispatch class %d event %d\n",ev->class,ev->event) ;
+ switch(class = ev->class) {
+ case EVENT_ECM : /* Entity Corordination Man. */
+ ecm(smc,(int)ev->event) ;
+ break ;
+ case EVENT_CFM : /* Configuration Man. */
+ cfm(smc,(int)ev->event) ;
+ break ;
+ case EVENT_RMT : /* Ring Man. */
+ rmt(smc,(int)ev->event) ;
+ break ;
+ case EVENT_SMT :
+ smt_event(smc,(int)ev->event) ;
+ break ;
+#ifdef CONCENTRATOR
+ case 99 :
+ timer_test_event(smc,(int)ev->event) ;
+ break ;
+#endif
+ case EVENT_PCMA : /* PHY A */
+ case EVENT_PCMB : /* PHY B */
+ default :
+ if (class >= EVENT_PCMA &&
+ class < EVENT_PCMA + NUMPHYS) {
+ pcm(smc,class - EVENT_PCMA,(int)ev->event) ;
+ break ;
+ }
+ SMT_PANIC(smc,SMT_E0121, SMT_E0121_MSG) ;
+ return ;
+ }
+
+ if (++ev == &smc->q.ev_queue[MAX_EVENT])
+ ev = smc->q.ev_queue ;
+
+ /* Renew get: it is used in queue_events to detect overruns */
+ smc->q.ev_get = ev;
+ }
+}
+
+/*
+ * smt_online connects to or disconnects from the ring
+ * MUST be called to initiate connection establishment
+ *
+ * on 0 disconnect
+ * on 1 connect
+ */
+u_short smt_online(struct s_smc *smc, int on)
+{
+ queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ;
+ ev_dispatcher(smc) ;
+ return(smc->mib.fddiSMTCF_State) ;
+}
+
+/*
+ * set SMT flag to value
+ * flag flag name
+ * value flag value
+ * dump current flag setting
+ */
+#ifdef CONCENTRATOR
+void do_smt_flag(struct s_smc *smc, char *flag, int value)
+{
+#ifdef DEBUG
+ struct smt_debug *deb;
+
+ SK_UNUSED(smc) ;
+
+#ifdef DEBUG_BRD
+ deb = &smc->debug;
+#else
+ deb = &debug;
+#endif
+ if (!strcmp(flag,"smt"))
+ deb->d_smt = value ;
+ else if (!strcmp(flag,"smtf"))
+ deb->d_smtf = value ;
+ else if (!strcmp(flag,"pcm"))
+ deb->d_pcm = value ;
+ else if (!strcmp(flag,"rmt"))
+ deb->d_rmt = value ;
+ else if (!strcmp(flag,"cfm"))
+ deb->d_cfm = value ;
+ else if (!strcmp(flag,"ecm"))
+ deb->d_ecm = value ;
+ printf("smt %d\n",deb->d_smt) ;
+ printf("smtf %d\n",deb->d_smtf) ;
+ printf("pcm %d\n",deb->d_pcm) ;
+ printf("rmt %d\n",deb->d_rmt) ;
+ printf("cfm %d\n",deb->d_cfm) ;
+ printf("ecm %d\n",deb->d_ecm) ;
+#endif /* DEBUG */
+}
+#endif
diff --git a/drivers/net/skfp/rmt.c b/drivers/net/skfp/rmt.c
new file mode 100644
index 000000000000..ef8d5672d9e8
--- /dev/null
+++ b/drivers/net/skfp/rmt.c
@@ -0,0 +1,654 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ SMT RMT
+ Ring Management
+*/
+
+/*
+ * Hardware independent state machine implemantation
+ * The following external SMT functions are referenced :
+ *
+ * queue_event()
+ * smt_timer_start()
+ * smt_timer_stop()
+ *
+ * The following external HW dependent functions are referenced :
+ * sm_ma_control()
+ * sm_mac_check_beacon_claim()
+ *
+ * The following HW dependent events are required :
+ * RM_RING_OP
+ * RM_RING_NON_OP
+ * RM_MY_BEACON
+ * RM_OTHER_BEACON
+ * RM_MY_CLAIM
+ * RM_TRT_EXP
+ * RM_VALID_CLAIM
+ *
+ */
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ;
+#endif
+
+/*
+ * FSM Macros
+ */
+#define AFLAG 0x10
+#define GO_STATE(x) (smc->mib.m[MAC0].fddiMACRMTState = (x)|AFLAG)
+#define ACTIONS_DONE() (smc->mib.m[MAC0].fddiMACRMTState &= ~AFLAG)
+#define ACTIONS(x) (x|AFLAG)
+
+#define RM0_ISOLATED 0
+#define RM1_NON_OP 1 /* not operational */
+#define RM2_RING_OP 2 /* ring operational */
+#define RM3_DETECT 3 /* detect dupl addresses */
+#define RM4_NON_OP_DUP 4 /* dupl. addr detected */
+#define RM5_RING_OP_DUP 5 /* ring oper. with dupl. addr */
+#define RM6_DIRECTED 6 /* sending directed beacons */
+#define RM7_TRACE 7 /* trace initiated */
+
+#ifdef DEBUG
+/*
+ * symbolic state names
+ */
+static const char * const rmt_states[] = {
+ "RM0_ISOLATED","RM1_NON_OP","RM2_RING_OP","RM3_DETECT",
+ "RM4_NON_OP_DUP","RM5_RING_OP_DUP","RM6_DIRECTED",
+ "RM7_TRACE"
+} ;
+
+/*
+ * symbolic event names
+ */
+static const char * const rmt_events[] = {
+ "NONE","RM_RING_OP","RM_RING_NON_OP","RM_MY_BEACON",
+ "RM_OTHER_BEACON","RM_MY_CLAIM","RM_TRT_EXP","RM_VALID_CLAIM",
+ "RM_JOIN","RM_LOOP","RM_DUP_ADDR","RM_ENABLE_FLAG",
+ "RM_TIMEOUT_NON_OP","RM_TIMEOUT_T_STUCK",
+ "RM_TIMEOUT_ANNOUNCE","RM_TIMEOUT_T_DIRECT",
+ "RM_TIMEOUT_D_MAX","RM_TIMEOUT_POLL","RM_TX_STATE_CHANGE"
+} ;
+#endif
+
+/*
+ * Globals
+ * in struct s_rmt
+ */
+
+
+/*
+ * function declarations
+ */
+static void rmt_fsm(struct s_smc *smc, int cmd);
+static void start_rmt_timer0(struct s_smc *smc, u_long value, int event);
+static void start_rmt_timer1(struct s_smc *smc, u_long value, int event);
+static void start_rmt_timer2(struct s_smc *smc, u_long value, int event);
+static void stop_rmt_timer0(struct s_smc *smc);
+static void stop_rmt_timer1(struct s_smc *smc);
+static void stop_rmt_timer2(struct s_smc *smc);
+static void rmt_dup_actions(struct s_smc *smc);
+static void rmt_reinsert_actions(struct s_smc *smc);
+static void rmt_leave_actions(struct s_smc *smc);
+static void rmt_new_dup_actions(struct s_smc *smc);
+
+#ifndef SUPERNET_3
+extern void restart_trt_for_dbcn() ;
+#endif /*SUPERNET_3*/
+
+/*
+ init RMT state machine
+ clear all RMT vars and flags
+*/
+void rmt_init(struct s_smc *smc)
+{
+ smc->mib.m[MAC0].fddiMACRMTState = ACTIONS(RM0_ISOLATED) ;
+ smc->r.dup_addr_test = DA_NONE ;
+ smc->r.da_flag = 0 ;
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
+ smc->r.sm_ma_avail = FALSE ;
+ smc->r.loop_avail = 0 ;
+ smc->r.bn_flag = 0 ;
+ smc->r.jm_flag = 0 ;
+ smc->r.no_flag = TRUE ;
+}
+
+/*
+ RMT state machine
+ called by dispatcher
+
+ do
+ display state change
+ process event
+ until SM is stable
+*/
+void rmt(struct s_smc *smc, int event)
+{
+ int state ;
+
+ do {
+ DB_RMT("RMT : state %s%s",
+ (smc->mib.m[MAC0].fddiMACRMTState & AFLAG) ? "ACTIONS " : "",
+ rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG]) ;
+ DB_RMT(" event %s\n",rmt_events[event],0) ;
+ state = smc->mib.m[MAC0].fddiMACRMTState ;
+ rmt_fsm(smc,event) ;
+ event = 0 ;
+ } while (state != smc->mib.m[MAC0].fddiMACRMTState) ;
+ rmt_state_change(smc,(int)smc->mib.m[MAC0].fddiMACRMTState) ;
+}
+
+/*
+ process RMT event
+*/
+static void rmt_fsm(struct s_smc *smc, int cmd)
+{
+ /*
+ * RM00-RM70 : from all states
+ */
+ if (!smc->r.rm_join && !smc->r.rm_loop &&
+ smc->mib.m[MAC0].fddiMACRMTState != ACTIONS(RM0_ISOLATED) &&
+ smc->mib.m[MAC0].fddiMACRMTState != RM0_ISOLATED) {
+ RS_SET(smc,RS_NORINGOP) ;
+ rmt_indication(smc,0) ;
+ GO_STATE(RM0_ISOLATED) ;
+ return ;
+ }
+
+ switch(smc->mib.m[MAC0].fddiMACRMTState) {
+ case ACTIONS(RM0_ISOLATED) :
+ stop_rmt_timer0(smc) ;
+ stop_rmt_timer1(smc) ;
+ stop_rmt_timer2(smc) ;
+
+ /*
+ * Disable MAC.
+ */
+ sm_ma_control(smc,MA_OFFLINE) ;
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
+ smc->r.loop_avail = FALSE ;
+ smc->r.sm_ma_avail = FALSE ;
+ smc->r.no_flag = TRUE ;
+ DB_RMTN(1,"RMT : ISOLATED\n",0,0) ;
+ ACTIONS_DONE() ;
+ break ;
+ case RM0_ISOLATED :
+ /*RM01*/
+ if (smc->r.rm_join || smc->r.rm_loop) {
+ /*
+ * According to the standard the MAC must be reset
+ * here. The FORMAC will be initialized and Claim
+ * and Beacon Frames will be uploaded to the MAC.
+ * So any change of Treq will take effect NOW.
+ */
+ sm_ma_control(smc,MA_RESET) ;
+ GO_STATE(RM1_NON_OP) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(RM1_NON_OP) :
+ start_rmt_timer0(smc,smc->s.rmt_t_non_op,RM_TIMEOUT_NON_OP) ;
+ stop_rmt_timer1(smc) ;
+ stop_rmt_timer2(smc) ;
+ sm_ma_control(smc,MA_BEACON) ;
+ DB_RMTN(1,"RMT : RING DOWN\n",0,0) ;
+ RS_SET(smc,RS_NORINGOP) ;
+ smc->r.sm_ma_avail = FALSE ;
+ rmt_indication(smc,0) ;
+ ACTIONS_DONE() ;
+ break ;
+ case RM1_NON_OP :
+ /*RM12*/
+ if (cmd == RM_RING_OP) {
+ RS_SET(smc,RS_RINGOPCHANGE) ;
+ GO_STATE(RM2_RING_OP) ;
+ break ;
+ }
+ /*RM13*/
+ else if (cmd == RM_TIMEOUT_NON_OP) {
+ smc->r.bn_flag = FALSE ;
+ smc->r.no_flag = TRUE ;
+ GO_STATE(RM3_DETECT) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(RM2_RING_OP) :
+ stop_rmt_timer0(smc) ;
+ stop_rmt_timer1(smc) ;
+ stop_rmt_timer2(smc) ;
+ smc->r.no_flag = FALSE ;
+ if (smc->r.rm_loop)
+ smc->r.loop_avail = TRUE ;
+ if (smc->r.rm_join) {
+ smc->r.sm_ma_avail = TRUE ;
+ if (smc->mib.m[MAC0].fddiMACMA_UnitdataEnable)
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE ;
+ else
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
+ }
+ DB_RMTN(1,"RMT : RING UP\n",0,0) ;
+ RS_CLEAR(smc,RS_NORINGOP) ;
+ RS_SET(smc,RS_RINGOPCHANGE) ;
+ rmt_indication(smc,1) ;
+ smt_stat_counter(smc,0) ;
+ ACTIONS_DONE() ;
+ break ;
+ case RM2_RING_OP :
+ /*RM21*/
+ if (cmd == RM_RING_NON_OP) {
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
+ smc->r.loop_avail = FALSE ;
+ RS_SET(smc,RS_RINGOPCHANGE) ;
+ GO_STATE(RM1_NON_OP) ;
+ break ;
+ }
+ /*RM22a*/
+ else if (cmd == RM_ENABLE_FLAG) {
+ if (smc->mib.m[MAC0].fddiMACMA_UnitdataEnable)
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE ;
+ else
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
+ }
+ /*RM25*/
+ else if (smc->r.dup_addr_test == DA_FAILED) {
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
+ smc->r.loop_avail = FALSE ;
+ smc->r.da_flag = TRUE ;
+ GO_STATE(RM5_RING_OP_DUP) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(RM3_DETECT) :
+ start_rmt_timer0(smc,smc->s.mac_d_max*2,RM_TIMEOUT_D_MAX) ;
+ start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
+ start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
+ sm_mac_check_beacon_claim(smc) ;
+ DB_RMTN(1,"RMT : RM3_DETECT\n",0,0) ;
+ ACTIONS_DONE() ;
+ break ;
+ case RM3_DETECT :
+ if (cmd == RM_TIMEOUT_POLL) {
+ start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL);
+ sm_mac_check_beacon_claim(smc) ;
+ break ;
+ }
+ if (cmd == RM_TIMEOUT_D_MAX) {
+ smc->r.timer0_exp = TRUE ;
+ }
+ /*
+ *jd(22-Feb-1999)
+ * We need a time ">= 2*mac_d_max" since we had finished
+ * Claim or Beacon state. So we will restart timer0 at
+ * every state change.
+ */
+ if (cmd == RM_TX_STATE_CHANGE) {
+ start_rmt_timer0(smc,
+ smc->s.mac_d_max*2,
+ RM_TIMEOUT_D_MAX) ;
+ }
+ /*RM32*/
+ if (cmd == RM_RING_OP) {
+ GO_STATE(RM2_RING_OP) ;
+ break ;
+ }
+ /*RM33a*/
+ else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON)
+ && smc->r.bn_flag) {
+ smc->r.bn_flag = FALSE ;
+ }
+ /*RM33b*/
+ else if (cmd == RM_TRT_EXP && !smc->r.bn_flag) {
+ int tx ;
+ /*
+ * set bn_flag only if in state T4 or T5:
+ * only if we're the beaconer should we start the
+ * trace !
+ */
+ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
+ DB_RMTN(2,"RMT : DETECT && TRT_EXPIRED && T4/T5\n",0,0);
+ smc->r.bn_flag = TRUE ;
+ /*
+ * If one of the upstream stations beaconed
+ * and the link to the upstream neighbor is
+ * lost we need to restart the stuck timer to
+ * check the "stuck beacon" condition.
+ */
+ start_rmt_timer1(smc,smc->s.rmt_t_stuck,
+ RM_TIMEOUT_T_STUCK) ;
+ }
+ /*
+ * We do NOT need to clear smc->r.bn_flag in case of
+ * not being in state T4 or T5, because the flag
+ * must be cleared in order to get in this condition.
+ */
+
+ DB_RMTN(2,
+ "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n",
+ tx,smc->r.bn_flag) ;
+ }
+ /*RM34a*/
+ else if (cmd == RM_MY_CLAIM && smc->r.timer0_exp) {
+ rmt_new_dup_actions(smc) ;
+ GO_STATE(RM4_NON_OP_DUP) ;
+ break ;
+ }
+ /*RM34b*/
+ else if (cmd == RM_MY_BEACON && smc->r.timer0_exp) {
+ rmt_new_dup_actions(smc) ;
+ GO_STATE(RM4_NON_OP_DUP) ;
+ break ;
+ }
+ /*RM34c*/
+ else if (cmd == RM_VALID_CLAIM) {
+ rmt_new_dup_actions(smc) ;
+ GO_STATE(RM4_NON_OP_DUP) ;
+ break ;
+ }
+ /*RM36*/
+ else if (cmd == RM_TIMEOUT_T_STUCK &&
+ smc->r.rm_join && smc->r.bn_flag) {
+ GO_STATE(RM6_DIRECTED) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(RM4_NON_OP_DUP) :
+ start_rmt_timer0(smc,smc->s.rmt_t_announce,RM_TIMEOUT_ANNOUNCE);
+ start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
+ start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
+ sm_mac_check_beacon_claim(smc) ;
+ DB_RMTN(1,"RMT : RM4_NON_OP_DUP\n",0,0) ;
+ ACTIONS_DONE() ;
+ break ;
+ case RM4_NON_OP_DUP :
+ if (cmd == RM_TIMEOUT_POLL) {
+ start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL);
+ sm_mac_check_beacon_claim(smc) ;
+ break ;
+ }
+ /*RM41*/
+ if (!smc->r.da_flag) {
+ GO_STATE(RM1_NON_OP) ;
+ break ;
+ }
+ /*RM44a*/
+ else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) &&
+ smc->r.bn_flag) {
+ smc->r.bn_flag = FALSE ;
+ }
+ /*RM44b*/
+ else if (cmd == RM_TRT_EXP && !smc->r.bn_flag) {
+ int tx ;
+ /*
+ * set bn_flag only if in state T4 or T5:
+ * only if we're the beaconer should we start the
+ * trace !
+ */
+ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
+ DB_RMTN(2,"RMT : NOPDUP && TRT_EXPIRED && T4/T5\n",0,0);
+ smc->r.bn_flag = TRUE ;
+ /*
+ * If one of the upstream stations beaconed
+ * and the link to the upstream neighbor is
+ * lost we need to restart the stuck timer to
+ * check the "stuck beacon" condition.
+ */
+ start_rmt_timer1(smc,smc->s.rmt_t_stuck,
+ RM_TIMEOUT_T_STUCK) ;
+ }
+ /*
+ * We do NOT need to clear smc->r.bn_flag in case of
+ * not being in state T4 or T5, because the flag
+ * must be cleared in order to get in this condition.
+ */
+
+ DB_RMTN(2,
+ "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n",
+ tx,smc->r.bn_flag) ;
+ }
+ /*RM44c*/
+ else if (cmd == RM_TIMEOUT_ANNOUNCE && !smc->r.bn_flag) {
+ rmt_dup_actions(smc) ;
+ }
+ /*RM45*/
+ else if (cmd == RM_RING_OP) {
+ smc->r.no_flag = FALSE ;
+ GO_STATE(RM5_RING_OP_DUP) ;
+ break ;
+ }
+ /*RM46*/
+ else if (cmd == RM_TIMEOUT_T_STUCK &&
+ smc->r.rm_join && smc->r.bn_flag) {
+ GO_STATE(RM6_DIRECTED) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(RM5_RING_OP_DUP) :
+ stop_rmt_timer0(smc) ;
+ stop_rmt_timer1(smc) ;
+ stop_rmt_timer2(smc) ;
+ DB_RMTN(1,"RMT : RM5_RING_OP_DUP\n",0,0) ;
+ ACTIONS_DONE() ;
+ break;
+ case RM5_RING_OP_DUP :
+ /*RM52*/
+ if (smc->r.dup_addr_test == DA_PASSED) {
+ smc->r.da_flag = FALSE ;
+ GO_STATE(RM2_RING_OP) ;
+ break ;
+ }
+ /*RM54*/
+ else if (cmd == RM_RING_NON_OP) {
+ smc->r.jm_flag = FALSE ;
+ smc->r.bn_flag = FALSE ;
+ GO_STATE(RM4_NON_OP_DUP) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(RM6_DIRECTED) :
+ start_rmt_timer0(smc,smc->s.rmt_t_direct,RM_TIMEOUT_T_DIRECT) ;
+ stop_rmt_timer1(smc) ;
+ start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
+ sm_ma_control(smc,MA_DIRECTED) ;
+ RS_SET(smc,RS_BEACON) ;
+ DB_RMTN(1,"RMT : RM6_DIRECTED\n",0,0) ;
+ ACTIONS_DONE() ;
+ break ;
+ case RM6_DIRECTED :
+ /*RM63*/
+ if (cmd == RM_TIMEOUT_POLL) {
+ start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL);
+ sm_mac_check_beacon_claim(smc) ;
+#ifndef SUPERNET_3
+ /* Because of problems with the Supernet II chip set
+ * sending of Directed Beacon will stop after 165ms
+ * therefore restart_trt_for_dbcn(smc) will be called
+ * to prevent this.
+ */
+ restart_trt_for_dbcn(smc) ;
+#endif /*SUPERNET_3*/
+ break ;
+ }
+ if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) &&
+ !smc->r.da_flag) {
+ smc->r.bn_flag = FALSE ;
+ GO_STATE(RM3_DETECT) ;
+ break ;
+ }
+ /*RM64*/
+ else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) &&
+ smc->r.da_flag) {
+ smc->r.bn_flag = FALSE ;
+ GO_STATE(RM4_NON_OP_DUP) ;
+ break ;
+ }
+ /*RM67*/
+ else if (cmd == RM_TIMEOUT_T_DIRECT) {
+ GO_STATE(RM7_TRACE) ;
+ break ;
+ }
+ break ;
+ case ACTIONS(RM7_TRACE) :
+ stop_rmt_timer0(smc) ;
+ stop_rmt_timer1(smc) ;
+ stop_rmt_timer2(smc) ;
+ smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ;
+ queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
+ DB_RMTN(1,"RMT : RM7_TRACE\n",0,0) ;
+ ACTIONS_DONE() ;
+ break ;
+ case RM7_TRACE :
+ break ;
+ default:
+ SMT_PANIC(smc,SMT_E0122, SMT_E0122_MSG) ;
+ break;
+ }
+}
+
+/*
+ * (jd) RMT duplicate address actions
+ * leave the ring or reinsert just as configured
+ */
+static void rmt_dup_actions(struct s_smc *smc)
+{
+ if (smc->r.jm_flag) {
+ }
+ else {
+ if (smc->s.rmt_dup_mac_behavior) {
+ SMT_ERR_LOG(smc,SMT_E0138, SMT_E0138_MSG) ;
+ rmt_reinsert_actions(smc) ;
+ }
+ else {
+ SMT_ERR_LOG(smc,SMT_E0135, SMT_E0135_MSG) ;
+ rmt_leave_actions(smc) ;
+ }
+ }
+}
+
+/*
+ * Reconnect to the Ring
+ */
+static void rmt_reinsert_actions(struct s_smc *smc)
+{
+ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
+ queue_event(smc,EVENT_ECM,EC_CONNECT) ;
+}
+
+/*
+ * duplicate address detected
+ */
+static void rmt_new_dup_actions(struct s_smc *smc)
+{
+ smc->r.da_flag = TRUE ;
+ smc->r.bn_flag = FALSE ;
+ smc->r.jm_flag = FALSE ;
+ /*
+ * we have three options : change address, jam or leave
+ * we leave the ring as default
+ * Optionally it's possible to reinsert after leaving the Ring
+ * but this will not conform with SMT Spec.
+ */
+ if (smc->s.rmt_dup_mac_behavior) {
+ SMT_ERR_LOG(smc,SMT_E0138, SMT_E0138_MSG) ;
+ rmt_reinsert_actions(smc) ;
+ }
+ else {
+ SMT_ERR_LOG(smc,SMT_E0135, SMT_E0135_MSG) ;
+ rmt_leave_actions(smc) ;
+ }
+}
+
+
+/*
+ * leave the ring
+ */
+static void rmt_leave_actions(struct s_smc *smc)
+{
+ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
+ /*
+ * Note: Do NOT try again later. (with please reconnect)
+ * The station must be left from the ring!
+ */
+}
+
+/*
+ * SMT timer interface
+ * start RMT timer 0
+ */
+static void start_rmt_timer0(struct s_smc *smc, u_long value, int event)
+{
+ smc->r.timer0_exp = FALSE ; /* clear timer event flag */
+ smt_timer_start(smc,&smc->r.rmt_timer0,value,EV_TOKEN(EVENT_RMT,event));
+}
+
+/*
+ * SMT timer interface
+ * start RMT timer 1
+ */
+static void start_rmt_timer1(struct s_smc *smc, u_long value, int event)
+{
+ smc->r.timer1_exp = FALSE ; /* clear timer event flag */
+ smt_timer_start(smc,&smc->r.rmt_timer1,value,EV_TOKEN(EVENT_RMT,event));
+}
+
+/*
+ * SMT timer interface
+ * start RMT timer 2
+ */
+static void start_rmt_timer2(struct s_smc *smc, u_long value, int event)
+{
+ smc->r.timer2_exp = FALSE ; /* clear timer event flag */
+ smt_timer_start(smc,&smc->r.rmt_timer2,value,EV_TOKEN(EVENT_RMT,event));
+}
+
+/*
+ * SMT timer interface
+ * stop RMT timer 0
+ */
+static void stop_rmt_timer0(struct s_smc *smc)
+{
+ if (smc->r.rmt_timer0.tm_active)
+ smt_timer_stop(smc,&smc->r.rmt_timer0) ;
+}
+
+/*
+ * SMT timer interface
+ * stop RMT timer 1
+ */
+static void stop_rmt_timer1(struct s_smc *smc)
+{
+ if (smc->r.rmt_timer1.tm_active)
+ smt_timer_stop(smc,&smc->r.rmt_timer1) ;
+}
+
+/*
+ * SMT timer interface
+ * stop RMT timer 2
+ */
+static void stop_rmt_timer2(struct s_smc *smc)
+{
+ if (smc->r.rmt_timer2.tm_active)
+ smt_timer_stop(smc,&smc->r.rmt_timer2) ;
+}
+
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
new file mode 100644
index 000000000000..c88aad6edd74
--- /dev/null
+++ b/drivers/net/skfp/skfddi.c
@@ -0,0 +1,2293 @@
+/*
+ * File Name:
+ * skfddi.c
+ *
+ * Copyright Information:
+ * Copyright SysKonnect 1998,1999.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ * Abstract:
+ * A Linux device driver supporting the SysKonnect FDDI PCI controller
+ * familie.
+ *
+ * Maintainers:
+ * CG Christoph Goos (cgoos@syskonnect.de)
+ *
+ * Contributors:
+ * DM David S. Miller
+ *
+ * Address all question to:
+ * linux@syskonnect.de
+ *
+ * The technical manual for the adapters is available from SysKonnect's
+ * web pages: www.syskonnect.com
+ * Goto "Support" and search Knowledge Base for "manual".
+ *
+ * Driver Architecture:
+ * The driver architecture is based on the DEC FDDI driver by
+ * Lawrence V. Stefani and several ethernet drivers.
+ * I also used an existing Windows NT miniport driver.
+ * All hardware dependent fuctions are handled by the SysKonnect
+ * Hardware Module.
+ * The only headerfiles that are directly related to this source
+ * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
+ * The others belong to the SysKonnect FDDI Hardware Module and
+ * should better not be changed.
+ *
+ * Modification History:
+ * Date Name Description
+ * 02-Mar-98 CG Created.
+ *
+ * 10-Mar-99 CG Support for 2.2.x added.
+ * 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
+ * 26-Oct-99 CG Fixed compilation error on 2.2.13
+ * 12-Nov-99 CG Source code release
+ * 22-Nov-99 CG Included in kernel source.
+ * 07-May-00 DM 64 bit fixes, new dma interface
+ * 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
+ * Daniele Bellucci <bellucda@tiscali.it>
+ * 03-Dec-03 SH Convert to PCI device model
+ *
+ * Compilation options (-Dxxx):
+ * DRIVERDEBUG print lots of messages to log file
+ * DUMPPACKETS print received/transmitted packets to logfile
+ *
+ * Tested cpu architectures:
+ * - i386
+ * - sparc64
+ */
+
+/* Version information string - should be updated prior to */
+/* each new release!!! */
+#define VERSION "2.07"
+
+static const char *boot_msg =
+ "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
+ " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
+
+/* Include files */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "h/types.h"
+#undef ADDR // undo Linux definition
+#include "h/skfbi.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/smtstate.h"
+
+
+// Define module-wide (static) routines
+static int skfp_driver_init(struct net_device *dev);
+static int skfp_open(struct net_device *dev);
+static int skfp_close(struct net_device *dev);
+static irqreturn_t skfp_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
+static void skfp_ctl_set_multicast_list(struct net_device *dev);
+static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
+static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
+static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev);
+static void send_queued_packets(struct s_smc *smc);
+static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
+static void ResetAdapter(struct s_smc *smc);
+
+
+// Functions needed by the hardware module
+void *mac_drv_get_space(struct s_smc *smc, u_int size);
+void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
+unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
+unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
+void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
+ int flag);
+void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
+void llc_restart_tx(struct s_smc *smc);
+void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
+ int frag_count, int len);
+void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
+ int frag_count);
+void mac_drv_fill_rxd(struct s_smc *smc);
+void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
+ int frag_count);
+int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
+ int la_len);
+void dump_data(unsigned char *Data, int length);
+
+// External functions from the hardware module
+extern u_int mac_drv_check_space(void);
+extern void read_address(struct s_smc *smc, u_char * mac_addr);
+extern void card_stop(struct s_smc *smc);
+extern int mac_drv_init(struct s_smc *smc);
+extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
+ int len, int frame_status);
+extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
+ int frame_len, int frame_status);
+extern int init_smt(struct s_smc *smc, u_char * mac_addr);
+extern void fddi_isr(struct s_smc *smc);
+extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
+ int len, int frame_status);
+extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
+extern void mac_drv_clear_rx_queue(struct s_smc *smc);
+extern void enable_tx_irq(struct s_smc *smc, u_short queue);
+extern void mac_drv_clear_txd(struct s_smc *smc);
+
+static struct pci_device_id skfddi_pci_tbl[] = {
+ { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
+
+// Define module-wide (static) variables
+
+static int num_boards; /* total number of adapters configured */
+
+#ifdef DRIVERDEBUG
+#define PRINTK(s, args...) printk(s, ## args)
+#else
+#define PRINTK(s, args...)
+#endif // DRIVERDEBUG
+
+/*
+ * =================
+ * = skfp_init_one =
+ * =================
+ *
+ * Overview:
+ * Probes for supported FDDI PCI controllers
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * pdev - pointer to PCI device information
+ *
+ * Functional Description:
+ * This is now called by PCI driver registration process
+ * for each board found.
+ *
+ * Return Codes:
+ * 0 - This device (fddi0, fddi1, etc) configured successfully
+ * -ENODEV - No devices present, or no SysKonnect FDDI PCI device
+ * present for this device name
+ *
+ *
+ * Side Effects:
+ * Device structures for FDDI adapters (fddi0, fddi1, etc) are
+ * initialized and the board resources are read and stored in
+ * the device structure.
+ */
+static int skfp_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct s_smc *smc; /* board pointer */
+ void __iomem *mem;
+ int err;
+
+ PRINTK(KERN_INFO "entering skfp_init_one\n");
+
+ if (num_boards == 0)
+ printk("%s\n", boot_msg);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pci_request_regions(pdev, "skfddi");
+ if (err)
+ goto err_out1;
+
+ pci_set_master(pdev);
+
+#ifdef MEM_MAPPED_IO
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ printk(KERN_ERR "skfp: region is not an MMIO resource\n");
+ err = -EIO;
+ goto err_out2;
+ }
+
+ mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
+#else
+ if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
+ printk(KERN_ERR "skfp: region is not PIO resource\n");
+ err = -EIO;
+ goto err_out2;
+ }
+
+ mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
+#endif
+ if (!mem) {
+ printk(KERN_ERR "skfp: Unable to map register, "
+ "FDDI adapter will be disabled.\n");
+ err = -EIO;
+ goto err_out2;
+ }
+
+ dev = alloc_fddidev(sizeof(struct s_smc));
+ if (!dev) {
+ printk(KERN_ERR "skfp: Unable to allocate fddi device, "
+ "FDDI adapter will be disabled.\n");
+ err = -ENOMEM;
+ goto err_out3;
+ }
+
+ dev->irq = pdev->irq;
+ dev->get_stats = &skfp_ctl_get_stats;
+ dev->open = &skfp_open;
+ dev->stop = &skfp_close;
+ dev->hard_start_xmit = &skfp_send_pkt;
+ dev->set_multicast_list = &skfp_ctl_set_multicast_list;
+ dev->set_mac_address = &skfp_ctl_set_mac_address;
+ dev->do_ioctl = &skfp_ioctl;
+ dev->header_cache_update = NULL; /* not supported */
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Initialize board structure with bus-specific info */
+ smc = netdev_priv(dev);
+ smc->os.dev = dev;
+ smc->os.bus_type = SK_BUS_TYPE_PCI;
+ smc->os.pdev = *pdev;
+ smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
+ smc->os.MaxFrameSize = MAX_FRAME_SIZE;
+ smc->os.dev = dev;
+ smc->hw.slot = -1;
+ smc->hw.iop = mem;
+ smc->os.ResetRequested = FALSE;
+ skb_queue_head_init(&smc->os.SendSkbQueue);
+
+ dev->base_addr = (unsigned long)mem;
+
+ err = skfp_driver_init(dev);
+ if (err)
+ goto err_out4;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out5;
+
+ ++num_boards;
+ pci_set_drvdata(pdev, dev);
+
+ if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
+ (pdev->subsystem_device & 0xff00) == 0x5800)
+ printk("%s: SysKonnect FDDI PCI adapter"
+ " found (SK-%04X)\n", dev->name,
+ pdev->subsystem_device);
+ else
+ printk("%s: FDDI PCI adapter found\n", dev->name);
+
+ return 0;
+err_out5:
+ if (smc->os.SharedMemAddr)
+ pci_free_consistent(pdev, smc->os.SharedMemSize,
+ smc->os.SharedMemAddr,
+ smc->os.SharedMemDMA);
+ pci_free_consistent(pdev, MAX_FRAME_SIZE,
+ smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
+err_out4:
+ free_netdev(dev);
+err_out3:
+#ifdef MEM_MAPPED_IO
+ iounmap(mem);
+#else
+ ioport_unmap(mem);
+#endif
+err_out2:
+ pci_release_regions(pdev);
+err_out1:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/*
+ * Called for each adapter board from pci_unregister_driver
+ */
+static void __devexit skfp_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *p = pci_get_drvdata(pdev);
+ struct s_smc *lp = netdev_priv(p);
+
+ unregister_netdev(p);
+
+ if (lp->os.SharedMemAddr) {
+ pci_free_consistent(&lp->os.pdev,
+ lp->os.SharedMemSize,
+ lp->os.SharedMemAddr,
+ lp->os.SharedMemDMA);
+ lp->os.SharedMemAddr = NULL;
+ }
+ if (lp->os.LocalRxBuffer) {
+ pci_free_consistent(&lp->os.pdev,
+ MAX_FRAME_SIZE,
+ lp->os.LocalRxBuffer,
+ lp->os.LocalRxBufferDMA);
+ lp->os.LocalRxBuffer = NULL;
+ }
+#ifdef MEM_MAPPED_IO
+ iounmap(lp->hw.iop);
+#else
+ ioport_unmap(lp->hw.iop);
+#endif
+ pci_release_regions(pdev);
+ free_netdev(p);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/*
+ * ====================
+ * = skfp_driver_init =
+ * ====================
+ *
+ * Overview:
+ * Initializes remaining adapter board structure information
+ * and makes sure adapter is in a safe state prior to skfp_open().
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * This function allocates additional resources such as the host memory
+ * blocks needed by the adapter.
+ * The adapter is also reset. The OS must call skfp_open() to open
+ * the adapter and bring it on-line.
+ *
+ * Return Codes:
+ * 0 - initialization succeeded
+ * -1 - initialization failed
+ */
+static int skfp_driver_init(struct net_device *dev)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ skfddi_priv *bp = &smc->os;
+ int err = -EIO;
+
+ PRINTK(KERN_INFO "entering skfp_driver_init\n");
+
+ // set the io address in private structures
+ bp->base_addr = dev->base_addr;
+
+ // Get the interrupt level from the PCI Configuration Table
+ smc->hw.irq = dev->irq;
+
+ spin_lock_init(&bp->DriverLock);
+
+ // Allocate invalid frame
+ bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA);
+ if (!bp->LocalRxBuffer) {
+ printk("could not allocate mem for ");
+ printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
+ goto fail;
+ }
+
+ // Determine the required size of the 'shared' memory area.
+ bp->SharedMemSize = mac_drv_check_space();
+ PRINTK(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
+ if (bp->SharedMemSize > 0) {
+ bp->SharedMemSize += 16; // for descriptor alignment
+
+ bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
+ bp->SharedMemSize,
+ &bp->SharedMemDMA);
+ if (!bp->SharedMemSize) {
+ printk("could not allocate mem for ");
+ printk("hardware module: %ld byte\n",
+ bp->SharedMemSize);
+ goto fail;
+ }
+ bp->SharedMemHeap = 0; // Nothing used yet.
+
+ } else {
+ bp->SharedMemAddr = NULL;
+ bp->SharedMemHeap = 0;
+ } // SharedMemSize > 0
+
+ memset(bp->SharedMemAddr, 0, bp->SharedMemSize);
+
+ card_stop(smc); // Reset adapter.
+
+ PRINTK(KERN_INFO "mac_drv_init()..\n");
+ if (mac_drv_init(smc) != 0) {
+ PRINTK(KERN_INFO "mac_drv_init() failed.\n");
+ goto fail;
+ }
+ read_address(smc, NULL);
+ PRINTK(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
+ smc->hw.fddi_canon_addr.a[0],
+ smc->hw.fddi_canon_addr.a[1],
+ smc->hw.fddi_canon_addr.a[2],
+ smc->hw.fddi_canon_addr.a[3],
+ smc->hw.fddi_canon_addr.a[4],
+ smc->hw.fddi_canon_addr.a[5]);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+
+ smt_reset_defaults(smc, 0);
+
+ return (0);
+
+fail:
+ if (bp->SharedMemAddr) {
+ pci_free_consistent(&bp->pdev,
+ bp->SharedMemSize,
+ bp->SharedMemAddr,
+ bp->SharedMemDMA);
+ bp->SharedMemAddr = NULL;
+ }
+ if (bp->LocalRxBuffer) {
+ pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE,
+ bp->LocalRxBuffer, bp->LocalRxBufferDMA);
+ bp->LocalRxBuffer = NULL;
+ }
+ return err;
+} // skfp_driver_init
+
+
+/*
+ * =============
+ * = skfp_open =
+ * =============
+ *
+ * Overview:
+ * Opens the adapter
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * This function brings the adapter to an operational state.
+ *
+ * Return Codes:
+ * 0 - Adapter was successfully opened
+ * -EAGAIN - Could not register IRQ
+ */
+static int skfp_open(struct net_device *dev)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ int err;
+
+ PRINTK(KERN_INFO "entering skfp_open\n");
+ /* Register IRQ - support shared interrupts by passing device ptr */
+ err = request_irq(dev->irq, (void *) skfp_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (err)
+ return err;
+
+ /*
+ * Set current address to factory MAC address
+ *
+ * Note: We've already done this step in skfp_driver_init.
+ * However, it's possible that a user has set a node
+ * address override, then closed and reopened the
+ * adapter. Unless we reset the device address field
+ * now, we'll continue to use the existing modified
+ * address.
+ */
+ read_address(smc, NULL);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+
+ init_smt(smc, NULL);
+ smt_online(smc, 1);
+ STI_FBI();
+
+ /* Clear local multicast address tables */
+ mac_clear_multicast(smc);
+
+ /* Disable promiscuous filter settings */
+ mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
+
+ netif_start_queue(dev);
+ return (0);
+} // skfp_open
+
+
+/*
+ * ==============
+ * = skfp_close =
+ * ==============
+ *
+ * Overview:
+ * Closes the device/module.
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * This routine closes the adapter and brings it to a safe state.
+ * The interrupt service routine is deregistered with the OS.
+ * The adapter can be opened again with another call to skfp_open().
+ *
+ * Return Codes:
+ * Always return 0.
+ *
+ * Assumptions:
+ * No further requests for this adapter are made after this routine is
+ * called. skfp_open() can be called to reset and reinitialize the
+ * adapter.
+ */
+static int skfp_close(struct net_device *dev)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ skfddi_priv *bp = &smc->os;
+
+ CLI_FBI();
+ smt_reset_defaults(smc, 1);
+ card_stop(smc);
+ mac_drv_clear_tx_queue(smc);
+ mac_drv_clear_rx_queue(smc);
+
+ netif_stop_queue(dev);
+ /* Deregister (free) IRQ */
+ free_irq(dev->irq, dev);
+
+ skb_queue_purge(&bp->SendSkbQueue);
+ bp->QueueSkb = MAX_TX_QUEUE_LEN;
+
+ return (0);
+} // skfp_close
+
+
+/*
+ * ==================
+ * = skfp_interrupt =
+ * ==================
+ *
+ * Overview:
+ * Interrupt processing routine
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * irq - interrupt vector
+ * dev_id - pointer to device information
+ * regs - pointer to registers structure
+ *
+ * Functional Description:
+ * This routine calls the interrupt processing routine for this adapter. It
+ * disables and reenables adapter interrupts, as appropriate. We can support
+ * shared interrupts since the incoming dev_id pointer provides our device
+ * structure context. All the real work is done in the hardware module.
+ *
+ * Return Codes:
+ * None
+ *
+ * Assumptions:
+ * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
+ * on Intel-based systems) is done by the operating system outside this
+ * routine.
+ *
+ * System interrupts are enabled through this call.
+ *
+ * Side Effects:
+ * Interrupts are disabled, then reenabled at the adapter.
+ */
+
+irqreturn_t skfp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct s_smc *smc; /* private board structure pointer */
+ skfddi_priv *bp;
+
+ if (dev == NULL) {
+ printk("%s: irq %d for unknown device\n", dev->name, irq);
+ return IRQ_NONE;
+ }
+
+ smc = netdev_priv(dev);
+ bp = &smc->os;
+
+ // IRQs enabled or disabled ?
+ if (inpd(ADDR(B0_IMSK)) == 0) {
+ // IRQs are disabled: must be shared interrupt
+ return IRQ_NONE;
+ }
+ // Note: At this point, IRQs are enabled.
+ if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
+ // Adapter did not issue an IRQ: must be shared interrupt
+ return IRQ_NONE;
+ }
+ CLI_FBI(); // Disable IRQs from our adapter.
+ spin_lock(&bp->DriverLock);
+
+ // Call interrupt handler in hardware module (HWM).
+ fddi_isr(smc);
+
+ if (smc->os.ResetRequested) {
+ ResetAdapter(smc);
+ smc->os.ResetRequested = FALSE;
+ }
+ spin_unlock(&bp->DriverLock);
+ STI_FBI(); // Enable IRQs from our adapter.
+
+ return IRQ_HANDLED;
+} // skfp_interrupt
+
+
+/*
+ * ======================
+ * = skfp_ctl_get_stats =
+ * ======================
+ *
+ * Overview:
+ * Get statistics for FDDI adapter
+ *
+ * Returns:
+ * Pointer to FDDI statistics structure
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * Gets current MIB objects from adapter, then
+ * returns FDDI statistics structure as defined
+ * in if_fddi.h.
+ *
+ * Note: Since the FDDI statistics structure is
+ * still new and the device structure doesn't
+ * have an FDDI-specific get statistics handler,
+ * we'll return the FDDI statistics structure as
+ * a pointer to an Ethernet statistics structure.
+ * That way, at least the first part of the statistics
+ * structure can be decoded properly.
+ * We'll have to pay attention to this routine as the
+ * device structure becomes more mature and LAN media
+ * independent.
+ *
+ */
+struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
+{
+ struct s_smc *bp = netdev_priv(dev);
+
+ /* Fill the bp->stats structure with driver-maintained counters */
+
+ bp->os.MacStat.port_bs_flag[0] = 0x1234;
+ bp->os.MacStat.port_bs_flag[1] = 0x5678;
+// goos: need to fill out fddi statistic
+#if 0
+ /* Get FDDI SMT MIB objects */
+
+/* Fill the bp->stats structure with the SMT MIB object values */
+
+ memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
+ bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
+ bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
+ bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
+ memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
+ bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
+ bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
+ bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
+ bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
+ bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
+ bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
+ bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
+ bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
+ bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
+ bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
+ bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
+ bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
+ bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
+ bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
+ bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
+ bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
+ bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
+ bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
+ bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
+ bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
+ bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
+ bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
+ bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
+ bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
+ memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
+ memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
+ memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
+ memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
+ bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
+ bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
+ bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
+ memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
+ bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
+ bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
+ bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
+ bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
+ bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
+ bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
+ bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
+ bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
+ bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
+ bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
+ bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
+ bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
+ bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
+ bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
+ bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
+ bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
+ memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
+ bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
+ bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
+ bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
+ bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
+ bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
+ bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
+ bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
+ bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
+ bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
+ bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
+ memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
+ memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
+ bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
+ bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
+ bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
+ bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
+ bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
+ bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
+ bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
+ bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
+ bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
+ bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
+ bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
+ bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
+ bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
+ bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
+ bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
+ bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
+ bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
+ bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
+ bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
+ bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
+ bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
+ bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
+ bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
+ bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
+ bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
+ bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
+
+
+ /* Fill the bp->stats structure with the FDDI counter values */
+
+ bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
+ bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
+ bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
+ bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
+ bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
+ bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
+ bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
+ bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
+ bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
+ bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
+ bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
+
+#endif
+ return ((struct net_device_stats *) &bp->os.MacStat);
+} // ctl_get_stat
+
+
+/*
+ * ==============================
+ * = skfp_ctl_set_multicast_list =
+ * ==============================
+ *
+ * Overview:
+ * Enable/Disable LLC frame promiscuous mode reception
+ * on the adapter and/or update multicast address table.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * This function acquires the driver lock and only calls
+ * skfp_ctl_set_multicast_list_wo_lock then.
+ * This routine follows a fairly simple algorithm for setting the
+ * adapter filters and CAM:
+ *
+ * if IFF_PROMISC flag is set
+ * enable promiscuous mode
+ * else
+ * disable promiscuous mode
+ * if number of multicast addresses <= max. multicast number
+ * add mc addresses to adapter table
+ * else
+ * enable promiscuous mode
+ * update adapter filters
+ *
+ * Assumptions:
+ * Multicast addresses are presented in canonical (LSB) format.
+ *
+ * Side Effects:
+ * On-board adapter filters are updated.
+ */
+static void skfp_ctl_set_multicast_list(struct net_device *dev)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ skfddi_priv *bp = &smc->os;
+ unsigned long Flags;
+
+ spin_lock_irqsave(&bp->DriverLock, Flags);
+ skfp_ctl_set_multicast_list_wo_lock(dev);
+ spin_unlock_irqrestore(&bp->DriverLock, Flags);
+ return;
+} // skfp_ctl_set_multicast_list
+
+
+
+static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ struct dev_mc_list *dmi; /* ptr to multicast addr entry */
+ int i;
+
+ /* Enable promiscuous mode, if necessary */
+ if (dev->flags & IFF_PROMISC) {
+ mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
+ PRINTK(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
+ }
+ /* Else, update multicast address table */
+ else {
+ mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
+ PRINTK(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
+
+ // Reset all MC addresses
+ mac_clear_multicast(smc);
+ mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
+ PRINTK(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ } else if (dev->mc_count > 0) {
+ if (dev->mc_count <= FPMAX_MULTICAST) {
+ /* use exact filtering */
+
+ // point to first multicast addr
+ dmi = dev->mc_list;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ mac_add_multicast(smc,
+ (struct fddi_addr *)dmi->dmi_addr,
+ 1);
+
+ PRINTK(KERN_INFO "ENABLE MC ADDRESS:");
+ PRINTK(" %02x %02x %02x ",
+ dmi->dmi_addr[0],
+ dmi->dmi_addr[1],
+ dmi->dmi_addr[2]);
+ PRINTK("%02x %02x %02x\n",
+ dmi->dmi_addr[3],
+ dmi->dmi_addr[4],
+ dmi->dmi_addr[5]);
+ dmi = dmi->next;
+ } // for
+
+ } else { // more MC addresses than HW supports
+
+ mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
+ PRINTK(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ }
+ } else { // no MC addresses
+
+ PRINTK(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
+ }
+
+ /* Update adapter filters */
+ mac_update_multicast(smc);
+ }
+ return;
+} // skfp_ctl_set_multicast_list_wo_lock
+
+
+/*
+ * ===========================
+ * = skfp_ctl_set_mac_address =
+ * ===========================
+ *
+ * Overview:
+ * set new mac address on adapter and update dev_addr field in device table.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * dev - pointer to device information
+ * addr - pointer to sockaddr structure containing unicast address to set
+ *
+ * Assumptions:
+ * The address pointed to by addr->sa_data is a valid unicast
+ * address and is presented in canonical (LSB) format.
+ */
+static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
+ skfddi_priv *bp = &smc->os;
+ unsigned long Flags;
+
+
+ memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
+ spin_lock_irqsave(&bp->DriverLock, Flags);
+ ResetAdapter(smc);
+ spin_unlock_irqrestore(&bp->DriverLock, Flags);
+
+ return (0); /* always return zero */
+} // skfp_ctl_set_mac_address
+
+
+/*
+ * ==============
+ * = skfp_ioctl =
+ * ==============
+ *
+ * Overview:
+ *
+ * Perform IOCTL call functions here. Some are privileged operations and the
+ * effective uid is checked in those cases.
+ *
+ * Returns:
+ * status value
+ * 0 - success
+ * other - failure
+ *
+ * Arguments:
+ * dev - pointer to device information
+ * rq - pointer to ioctl request structure
+ * cmd - ?
+ *
+ */
+
+
+static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ skfddi_priv *lp = &smc->os;
+ struct s_skfp_ioctl ioc;
+ int status = 0;
+
+ if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
+ return -EFAULT;
+
+ switch (ioc.cmd) {
+ case SKFP_GET_STATS: /* Get the driver statistics */
+ ioc.len = sizeof(lp->MacStat);
+ status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
+ ? -EFAULT : 0;
+ break;
+ case SKFP_CLR_STATS: /* Zero out the driver statistics */
+ if (!capable(CAP_NET_ADMIN)) {
+ memset(&lp->MacStat, 0, sizeof(lp->MacStat));
+ } else {
+ status = -EPERM;
+ }
+ break;
+ default:
+ printk("ioctl for %s: unknow cmd: %04x\n", dev->name, ioc.cmd);
+ status = -EOPNOTSUPP;
+
+ } // switch
+
+ return status;
+} // skfp_ioctl
+
+
+/*
+ * =====================
+ * = skfp_send_pkt =
+ * =====================
+ *
+ * Overview:
+ * Queues a packet for transmission and try to transmit it.
+ *
+ * Returns:
+ * Condition code
+ *
+ * Arguments:
+ * skb - pointer to sk_buff to queue for transmission
+ * dev - pointer to device information
+ *
+ * Functional Description:
+ * Here we assume that an incoming skb transmit request
+ * is contained in a single physically contiguous buffer
+ * in which the virtual address of the start of packet
+ * (skb->data) can be converted to a physical address
+ * by using pci_map_single().
+ *
+ * We have an internal queue for packets we can not send
+ * immediately. Packets in this queue can be given to the
+ * adapter if transmit buffers are freed.
+ *
+ * We can't free the skb until after it's been DMA'd
+ * out by the adapter, so we'll keep it in the driver and
+ * return it in mac_drv_tx_complete.
+ *
+ * Return Codes:
+ * 0 - driver has queued and/or sent packet
+ * 1 - caller should requeue the sk_buff for later transmission
+ *
+ * Assumptions:
+ * The entire packet is stored in one physically
+ * contiguous buffer which is not cached and whose
+ * 32-bit physical address can be determined.
+ *
+ * It's vital that this routine is NOT reentered for the
+ * same board and that the OS is not in another section of
+ * code (eg. skfp_interrupt) for the same board on a
+ * different thread.
+ *
+ * Side Effects:
+ * None
+ */
+static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev)
+{
+ struct s_smc *smc = netdev_priv(dev);
+ skfddi_priv *bp = &smc->os;
+
+ PRINTK(KERN_INFO "skfp_send_pkt\n");
+
+ /*
+ * Verify that incoming transmit request is OK
+ *
+ * Note: The packet size check is consistent with other
+ * Linux device drivers, although the correct packet
+ * size should be verified before calling the
+ * transmit routine.
+ */
+
+ if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
+ bp->MacStat.gen.tx_errors++; /* bump error counter */
+ // dequeue packets from xmt queue and send them
+ netif_start_queue(dev);
+ dev_kfree_skb(skb);
+ return (0); /* return "success" */
+ }
+ if (bp->QueueSkb == 0) { // return with tbusy set: queue full
+
+ netif_stop_queue(dev);
+ return 1;
+ }
+ bp->QueueSkb--;
+ skb_queue_tail(&bp->SendSkbQueue, skb);
+ send_queued_packets(netdev_priv(dev));
+ if (bp->QueueSkb == 0) {
+ netif_stop_queue(dev);
+ }
+ dev->trans_start = jiffies;
+ return 0;
+
+} // skfp_send_pkt
+
+
+/*
+ * =======================
+ * = send_queued_packets =
+ * =======================
+ *
+ * Overview:
+ * Send packets from the driver queue as long as there are some and
+ * transmit resources are available.
+ *
+ * Returns:
+ * None
+ *
+ * Arguments:
+ * smc - pointer to smc (adapter) structure
+ *
+ * Functional Description:
+ * Take a packet from queue if there is any. If not, then we are done.
+ * Check if there are resources to send the packet. If not, requeue it
+ * and exit.
+ * Set packet descriptor flags and give packet to adapter.
+ * Check if any send resources can be freed (we do not use the
+ * transmit complete interrupt).
+ */
+static void send_queued_packets(struct s_smc *smc)
+{
+ skfddi_priv *bp = &smc->os;
+ struct sk_buff *skb;
+ unsigned char fc;
+ int queue;
+ struct s_smt_fp_txd *txd; // Current TxD.
+ dma_addr_t dma_address;
+ unsigned long Flags;
+
+ int frame_status; // HWM tx frame status.
+
+ PRINTK(KERN_INFO "send queued packets\n");
+ for (;;) {
+ // send first buffer from queue
+ skb = skb_dequeue(&bp->SendSkbQueue);
+
+ if (!skb) {
+ PRINTK(KERN_INFO "queue empty\n");
+ return;
+ } // queue empty !
+
+ spin_lock_irqsave(&bp->DriverLock, Flags);
+ fc = skb->data[0];
+ queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
+#ifdef ESS
+ // Check if the frame may/must be sent as a synchronous frame.
+
+ if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
+ // It's an LLC frame.
+ if (!smc->ess.sync_bw_available)
+ fc &= ~FC_SYNC_BIT; // No bandwidth available.
+
+ else { // Bandwidth is available.
+
+ if (smc->mib.fddiESSSynchTxMode) {
+ // Send as sync. frame.
+ fc |= FC_SYNC_BIT;
+ }
+ }
+ }
+#endif // ESS
+ frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
+
+ if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
+ // Unable to send the frame.
+
+ if ((frame_status & RING_DOWN) != 0) {
+ // Ring is down.
+ PRINTK("Tx attempt while ring down.\n");
+ } else if ((frame_status & OUT_OF_TXD) != 0) {
+ PRINTK("%s: out of TXDs.\n", bp->dev->name);
+ } else {
+ PRINTK("%s: out of transmit resources",
+ bp->dev->name);
+ }
+
+ // Note: We will retry the operation as soon as
+ // transmit resources become available.
+ skb_queue_head(&bp->SendSkbQueue, skb);
+ spin_unlock_irqrestore(&bp->DriverLock, Flags);
+ return; // Packet has been queued.
+
+ } // if (unable to send frame)
+
+ bp->QueueSkb++; // one packet less in local queue
+
+ // source address in packet ?
+ CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
+
+ txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
+
+ dma_address = pci_map_single(&bp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (frame_status & LAN_TX) {
+ txd->txd_os.skb = skb; // save skb
+ txd->txd_os.dma_addr = dma_address; // save dma mapping
+ }
+ hwm_tx_frag(smc, skb->data, dma_address, skb->len,
+ frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
+
+ if (!(frame_status & LAN_TX)) { // local only frame
+ pci_unmap_single(&bp->pdev, dma_address,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ }
+ spin_unlock_irqrestore(&bp->DriverLock, Flags);
+ } // for
+
+ return; // never reached
+
+} // send_queued_packets
+
+
+/************************
+ *
+ * CheckSourceAddress
+ *
+ * Verify if the source address is set. Insert it if necessary.
+ *
+ ************************/
+void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
+{
+ unsigned char SRBit;
+
+ if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
+
+ return;
+ if ((unsigned short) frame[1 + 10] != 0)
+ return;
+ SRBit = frame[1 + 6] & 0x01;
+ memcpy(&frame[1 + 6], hw_addr, 6);
+ frame[8] |= SRBit;
+} // CheckSourceAddress
+
+
+/************************
+ *
+ * ResetAdapter
+ *
+ * Reset the adapter and bring it back to operational mode.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ * Out
+ * Nothing.
+ *
+ ************************/
+static void ResetAdapter(struct s_smc *smc)
+{
+
+ PRINTK(KERN_INFO "[fddi: ResetAdapter]\n");
+
+ // Stop the adapter.
+
+ card_stop(smc); // Stop all activity.
+
+ // Clear the transmit and receive descriptor queues.
+ mac_drv_clear_tx_queue(smc);
+ mac_drv_clear_rx_queue(smc);
+
+ // Restart the adapter.
+
+ smt_reset_defaults(smc, 1); // Initialize the SMT module.
+
+ init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
+
+ smt_online(smc, 1); // Insert into the ring again.
+ STI_FBI();
+
+ // Restore original receive mode (multicasts, promiscuous, etc.).
+ skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
+} // ResetAdapter
+
+
+//--------------- functions called by hardware module ----------------
+
+/************************
+ *
+ * llc_restart_tx
+ *
+ * The hardware driver calls this routine when the transmit complete
+ * interrupt bits (end of frame) for the synchronous or asynchronous
+ * queue is set.
+ *
+ * NOTE The hardware driver calls this function also if no packets are queued.
+ * The routine must be able to handle this case.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void llc_restart_tx(struct s_smc *smc)
+{
+ skfddi_priv *bp = &smc->os;
+
+ PRINTK(KERN_INFO "[llc_restart_tx]\n");
+
+ // Try to send queued packets
+ spin_unlock(&bp->DriverLock);
+ send_queued_packets(smc);
+ spin_lock(&bp->DriverLock);
+ netif_start_queue(bp->dev);// system may send again if it was blocked
+
+} // llc_restart_tx
+
+
+/************************
+ *
+ * mac_drv_get_space
+ *
+ * The hardware module calls this function to allocate the memory
+ * for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * size - Size of memory in bytes to allocate.
+ * Out
+ * != 0 A pointer to the virtual address of the allocated memory.
+ * == 0 Allocation error.
+ *
+ ************************/
+void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
+{
+ void *virt;
+
+ PRINTK(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
+ virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
+
+ if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
+ printk("Unexpected SMT memory size requested: %d\n", size);
+ return (NULL);
+ }
+ smc->os.SharedMemHeap += size; // Move heap pointer.
+
+ PRINTK(KERN_INFO "mac_drv_get_space end\n");
+ PRINTK(KERN_INFO "virt addr: %lx\n", (ulong) virt);
+ PRINTK(KERN_INFO "bus addr: %lx\n", (ulong)
+ (smc->os.SharedMemDMA +
+ ((char *) virt - (char *)smc->os.SharedMemAddr)));
+ return (virt);
+} // mac_drv_get_space
+
+
+/************************
+ *
+ * mac_drv_get_desc_mem
+ *
+ * This function is called by the hardware dependent module.
+ * It allocates the memory for the RxD and TxD descriptors.
+ *
+ * This memory must be non-cached, non-movable and non-swappable.
+ * This memory should start at a physical page boundary.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * size - Size of memory in bytes to allocate.
+ * Out
+ * != 0 A pointer to the virtual address of the allocated memory.
+ * == 0 Allocation error.
+ *
+ ************************/
+void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
+{
+
+ char *virt;
+
+ PRINTK(KERN_INFO "mac_drv_get_desc_mem\n");
+
+ // Descriptor memory must be aligned on 16-byte boundary.
+
+ virt = mac_drv_get_space(smc, size);
+
+ size = (u_int) (16 - (((unsigned long) virt) & 15UL));
+ size = size % 16;
+
+ PRINTK("Allocate %u bytes alignment gap ", size);
+ PRINTK("for descriptor memory.\n");
+
+ if (!mac_drv_get_space(smc, size)) {
+ printk("fddi: Unable to align descriptor memory.\n");
+ return (NULL);
+ }
+ return (virt + size);
+} // mac_drv_get_desc_mem
+
+
+/************************
+ *
+ * mac_drv_virt2phys
+ *
+ * Get the physical address of a given virtual address.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * virt - A (virtual) pointer into our 'shared' memory area.
+ * Out
+ * Physical address of the given virtual address.
+ *
+ ************************/
+unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
+{
+ return (smc->os.SharedMemDMA +
+ ((char *) virt - (char *)smc->os.SharedMemAddr));
+} // mac_drv_virt2phys
+
+
+/************************
+ *
+ * dma_master
+ *
+ * The HWM calls this function, when the driver leads through a DMA
+ * transfer. If the OS-specific module must prepare the system hardware
+ * for the DMA transfer, it should do it in this function.
+ *
+ * The hardware module calls this dma_master if it wants to send an SMT
+ * frame. This means that the virt address passed in here is part of
+ * the 'shared' memory area.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * virt - The virtual address of the data.
+ *
+ * len - The length in bytes of the data.
+ *
+ * flag - Indicates the transmit direction and the buffer type:
+ * DMA_RD (0x01) system RAM ==> adapter buffer memory
+ * DMA_WR (0x02) adapter buffer memory ==> system RAM
+ * SMT_BUF (0x80) SMT buffer
+ *
+ * >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
+ * Out
+ * Returns the pyhsical address for the DMA transfer.
+ *
+ ************************/
+u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
+{
+ return (smc->os.SharedMemDMA +
+ ((char *) virt - (char *)smc->os.SharedMemAddr));
+} // dma_master
+
+
+/************************
+ *
+ * dma_complete
+ *
+ * The hardware module calls this routine when it has completed a DMA
+ * transfer. If the operating system dependent module has set up the DMA
+ * channel via dma_master() (e.g. Windows NT or AIX) it should clean up
+ * the DMA channel.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * descr - A pointer to a TxD or RxD, respectively.
+ *
+ * flag - Indicates the DMA transfer direction / SMT buffer:
+ * DMA_RD (0x01) system RAM ==> adapter buffer memory
+ * DMA_WR (0x02) adapter buffer memory ==> system RAM
+ * SMT_BUF (0x80) SMT buffer (managed by HWM)
+ * Out
+ * Nothing.
+ *
+ ************************/
+void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
+{
+ /* For TX buffers, there are two cases. If it is an SMT transmit
+ * buffer, there is nothing to do since we use consistent memory
+ * for the 'shared' memory area. The other case is for normal
+ * transmit packets given to us by the networking stack, and in
+ * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
+ * below.
+ *
+ * For RX buffers, we have to unmap dynamic PCI DMA mappings here
+ * because the hardware module is about to potentially look at
+ * the contents of the buffer. If we did not call the PCI DMA
+ * unmap first, the hardware module could read inconsistent data.
+ */
+ if (flag & DMA_WR) {
+ skfddi_priv *bp = &smc->os;
+ volatile struct s_smt_fp_rxd *r = &descr->r;
+
+ /* If SKB is NULL, we used the local buffer. */
+ if (r->rxd_os.skb && r->rxd_os.dma_addr) {
+ int MaxFrameSize = bp->MaxFrameSize;
+
+ pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
+ MaxFrameSize, PCI_DMA_FROMDEVICE);
+ r->rxd_os.dma_addr = 0;
+ }
+ }
+} // dma_complete
+
+
+/************************
+ *
+ * mac_drv_tx_complete
+ *
+ * Transmit of a packet is complete. Release the tx staging buffer.
+ *
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * txd - A pointer to the last TxD which is used by the frame.
+ * Out
+ * Returns nothing.
+ *
+ ************************/
+void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
+{
+ struct sk_buff *skb;
+
+ PRINTK(KERN_INFO "entering mac_drv_tx_complete\n");
+ // Check if this TxD points to a skb
+
+ if (!(skb = txd->txd_os.skb)) {
+ PRINTK("TXD with no skb assigned.\n");
+ return;
+ }
+ txd->txd_os.skb = NULL;
+
+ // release the DMA mapping
+ pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
+ skb->len, PCI_DMA_TODEVICE);
+ txd->txd_os.dma_addr = 0;
+
+ smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
+ smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
+
+ // free the skb
+ dev_kfree_skb_irq(skb);
+
+ PRINTK(KERN_INFO "leaving mac_drv_tx_complete\n");
+} // mac_drv_tx_complete
+
+
+/************************
+ *
+ * dump packets to logfile
+ *
+ ************************/
+#ifdef DUMPPACKETS
+void dump_data(unsigned char *Data, int length)
+{
+ int i, j;
+ unsigned char s[255], sh[10];
+ if (length > 64) {
+ length = 64;
+ }
+ printk(KERN_INFO "---Packet start---\n");
+ for (i = 0, j = 0; i < length / 8; i++, j += 8)
+ printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
+ Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
+ strcpy(s, "");
+ for (i = 0; i < length % 8; i++) {
+ sprintf(sh, "%02x ", Data[j + i]);
+ strcat(s, sh);
+ }
+ printk(KERN_INFO "%s\n", s);
+ printk(KERN_INFO "------------------\n");
+} // dump_data
+#else
+#define dump_data(data,len)
+#endif // DUMPPACKETS
+
+/************************
+ *
+ * mac_drv_rx_complete
+ *
+ * The hardware module calls this function if an LLC frame is received
+ * in a receive buffer. Also the SMT, NSA, and directed beacon frames
+ * from the network will be passed to the LLC layer by this function
+ * if passing is enabled.
+ *
+ * mac_drv_rx_complete forwards the frame to the LLC layer if it should
+ * be received. It also fills the RxD ring with new receive buffers if
+ * some can be queued.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * rxd - A pointer to the first RxD which is used by the receive frame.
+ *
+ * frag_count - Count of RxDs used by the received frame.
+ *
+ * len - Frame length.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
+ int frag_count, int len)
+{
+ skfddi_priv *bp = &smc->os;
+ struct sk_buff *skb;
+ unsigned char *virt, *cp;
+ unsigned short ri;
+ u_int RifLength;
+
+ PRINTK(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
+ if (frag_count != 1) { // This is not allowed to happen.
+
+ printk("fddi: Multi-fragment receive!\n");
+ goto RequeueRxd; // Re-use the given RXD(s).
+
+ }
+ skb = rxd->rxd_os.skb;
+ if (!skb) {
+ PRINTK(KERN_INFO "No skb in rxd\n");
+ smc->os.MacStat.gen.rx_errors++;
+ goto RequeueRxd;
+ }
+ virt = skb->data;
+
+ // The DMA mapping was released in dma_complete above.
+
+ dump_data(skb->data, len);
+
+ /*
+ * FDDI Frame format:
+ * +-------+-------+-------+------------+--------+------------+
+ * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
+ * +-------+-------+-------+------------+--------+------------+
+ *
+ * FC = Frame Control
+ * DA = Destination Address
+ * SA = Source Address
+ * RIF = Routing Information Field
+ * LLC = Logical Link Control
+ */
+
+ // Remove Routing Information Field (RIF), if present.
+
+ if ((virt[1 + 6] & FDDI_RII) == 0)
+ RifLength = 0;
+ else {
+ int n;
+// goos: RIF removal has still to be tested
+ PRINTK(KERN_INFO "RIF found\n");
+ // Get RIF length from Routing Control (RC) field.
+ cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
+
+ ri = ntohs(*((unsigned short *) cp));
+ RifLength = ri & FDDI_RCF_LEN_MASK;
+ if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
+ printk("fddi: Invalid RIF.\n");
+ goto RequeueRxd; // Discard the frame.
+
+ }
+ virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
+ // regions overlap
+
+ virt = cp + RifLength;
+ for (n = FDDI_MAC_HDR_LEN; n; n--)
+ *--virt = *--cp;
+ // adjust sbd->data pointer
+ skb_pull(skb, RifLength);
+ len -= RifLength;
+ RifLength = 0;
+ }
+
+ // Count statistics.
+ smc->os.MacStat.gen.rx_packets++; // Count indicated receive
+ // packets.
+ smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
+
+ // virt points to header again
+ if (virt[1] & 0x01) { // Check group (multicast) bit.
+
+ smc->os.MacStat.gen.multicast++;
+ }
+
+ // deliver frame to system
+ rxd->rxd_os.skb = NULL;
+ skb_trim(skb, len);
+ skb->protocol = fddi_type_trans(skb, bp->dev);
+ skb->dev = bp->dev; /* pass up device pointer */
+
+ netif_rx(skb);
+ bp->dev->last_rx = jiffies;
+
+ HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
+ return;
+
+ RequeueRxd:
+ PRINTK(KERN_INFO "Rx: re-queue RXD.\n");
+ mac_drv_requeue_rxd(smc, rxd, frag_count);
+ smc->os.MacStat.gen.rx_errors++; // Count receive packets
+ // not indicated.
+
+} // mac_drv_rx_complete
+
+
+/************************
+ *
+ * mac_drv_requeue_rxd
+ *
+ * The hardware module calls this function to request the OS-specific
+ * module to queue the receive buffer(s) represented by the pointer
+ * to the RxD and the frag_count into the receive queue again. This
+ * buffer was filled with an invalid frame or an SMT frame.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * rxd - A pointer to the first RxD which is used by the receive frame.
+ *
+ * frag_count - Count of RxDs used by the received frame.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
+ int frag_count)
+{
+ volatile struct s_smt_fp_rxd *next_rxd;
+ volatile struct s_smt_fp_rxd *src_rxd;
+ struct sk_buff *skb;
+ int MaxFrameSize;
+ unsigned char *v_addr;
+ dma_addr_t b_addr;
+
+ if (frag_count != 1) // This is not allowed to happen.
+
+ printk("fddi: Multi-fragment requeue!\n");
+
+ MaxFrameSize = smc->os.MaxFrameSize;
+ src_rxd = rxd;
+ for (; frag_count > 0; frag_count--) {
+ next_rxd = src_rxd->rxd_next;
+ rxd = HWM_GET_CURR_RXD(smc);
+
+ skb = src_rxd->rxd_os.skb;
+ if (skb == NULL) { // this should not happen
+
+ PRINTK("Requeue with no skb in rxd!\n");
+ skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
+ if (skb) {
+ // we got a skb
+ rxd->rxd_os.skb = skb;
+ skb_reserve(skb, 3);
+ skb_put(skb, MaxFrameSize);
+ v_addr = skb->data;
+ b_addr = pci_map_single(&smc->os.pdev,
+ v_addr,
+ MaxFrameSize,
+ PCI_DMA_FROMDEVICE);
+ rxd->rxd_os.dma_addr = b_addr;
+ } else {
+ // no skb available, use local buffer
+ PRINTK("Queueing invalid buffer!\n");
+ rxd->rxd_os.skb = NULL;
+ v_addr = smc->os.LocalRxBuffer;
+ b_addr = smc->os.LocalRxBufferDMA;
+ }
+ } else {
+ // we use skb from old rxd
+ rxd->rxd_os.skb = skb;
+ v_addr = skb->data;
+ b_addr = pci_map_single(&smc->os.pdev,
+ v_addr,
+ MaxFrameSize,
+ PCI_DMA_FROMDEVICE);
+ rxd->rxd_os.dma_addr = b_addr;
+ }
+ hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
+ FIRST_FRAG | LAST_FRAG);
+
+ src_rxd = next_rxd;
+ }
+} // mac_drv_requeue_rxd
+
+
+/************************
+ *
+ * mac_drv_fill_rxd
+ *
+ * The hardware module calls this function at initialization time
+ * to fill the RxD ring with receive buffers. It is also called by
+ * mac_drv_rx_complete if rx_free is large enough to queue some new
+ * receive buffers into the RxD ring. mac_drv_fill_rxd queues new
+ * receive buffers as long as enough RxDs and receive buffers are
+ * available.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void mac_drv_fill_rxd(struct s_smc *smc)
+{
+ int MaxFrameSize;
+ unsigned char *v_addr;
+ unsigned long b_addr;
+ struct sk_buff *skb;
+ volatile struct s_smt_fp_rxd *rxd;
+
+ PRINTK(KERN_INFO "entering mac_drv_fill_rxd\n");
+
+ // Walk through the list of free receive buffers, passing receive
+ // buffers to the HWM as long as RXDs are available.
+
+ MaxFrameSize = smc->os.MaxFrameSize;
+ // Check if there is any RXD left.
+ while (HWM_GET_RX_FREE(smc) > 0) {
+ PRINTK(KERN_INFO ".\n");
+
+ rxd = HWM_GET_CURR_RXD(smc);
+ skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
+ if (skb) {
+ // we got a skb
+ skb_reserve(skb, 3);
+ skb_put(skb, MaxFrameSize);
+ v_addr = skb->data;
+ b_addr = pci_map_single(&smc->os.pdev,
+ v_addr,
+ MaxFrameSize,
+ PCI_DMA_FROMDEVICE);
+ rxd->rxd_os.dma_addr = b_addr;
+ } else {
+ // no skb available, use local buffer
+ // System has run out of buffer memory, but we want to
+ // keep the receiver running in hope of better times.
+ // Multiple descriptors may point to this local buffer,
+ // so data in it must be considered invalid.
+ PRINTK("Queueing invalid buffer!\n");
+ v_addr = smc->os.LocalRxBuffer;
+ b_addr = smc->os.LocalRxBufferDMA;
+ }
+
+ rxd->rxd_os.skb = skb;
+
+ // Pass receive buffer to HWM.
+ hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
+ FIRST_FRAG | LAST_FRAG);
+ }
+ PRINTK(KERN_INFO "leaving mac_drv_fill_rxd\n");
+} // mac_drv_fill_rxd
+
+
+/************************
+ *
+ * mac_drv_clear_rxd
+ *
+ * The hardware module calls this function to release unused
+ * receive buffers.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * rxd - A pointer to the first RxD which is used by the receive buffer.
+ *
+ * frag_count - Count of RxDs used by the receive buffer.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
+ int frag_count)
+{
+
+ struct sk_buff *skb;
+
+ PRINTK("entering mac_drv_clear_rxd\n");
+
+ if (frag_count != 1) // This is not allowed to happen.
+
+ printk("fddi: Multi-fragment clear!\n");
+
+ for (; frag_count > 0; frag_count--) {
+ skb = rxd->rxd_os.skb;
+ if (skb != NULL) {
+ skfddi_priv *bp = &smc->os;
+ int MaxFrameSize = bp->MaxFrameSize;
+
+ pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
+ MaxFrameSize, PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(skb);
+ rxd->rxd_os.skb = NULL;
+ }
+ rxd = rxd->rxd_next; // Next RXD.
+
+ }
+} // mac_drv_clear_rxd
+
+
+/************************
+ *
+ * mac_drv_rx_init
+ *
+ * The hardware module calls this routine when an SMT or NSA frame of the
+ * local SMT should be delivered to the LLC layer.
+ *
+ * It is necessary to have this function, because there is no other way to
+ * copy the contents of SMT MBufs into receive buffers.
+ *
+ * mac_drv_rx_init allocates the required target memory for this frame,
+ * and receives the frame fragment by fragment by calling mac_drv_rx_frag.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * len - The length (in bytes) of the received frame (FC, DA, SA, Data).
+ *
+ * fc - The Frame Control field of the received frame.
+ *
+ * look_ahead - A pointer to the lookahead data buffer (may be NULL).
+ *
+ * la_len - The length of the lookahead data stored in the lookahead
+ * buffer (may be zero).
+ * Out
+ * Always returns zero (0).
+ *
+ ************************/
+int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
+ char *look_ahead, int la_len)
+{
+ struct sk_buff *skb;
+
+ PRINTK("entering mac_drv_rx_init(len=%d)\n", len);
+
+ // "Received" a SMT or NSA frame of the local SMT.
+
+ if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
+ PRINTK("fddi: Discard invalid local SMT frame\n");
+ PRINTK(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
+ len, la_len, (unsigned long) look_ahead);
+ return (0);
+ }
+ skb = alloc_skb(len + 3, GFP_ATOMIC);
+ if (!skb) {
+ PRINTK("fddi: Local SMT: skb memory exhausted.\n");
+ return (0);
+ }
+ skb_reserve(skb, 3);
+ skb_put(skb, len);
+ memcpy(skb->data, look_ahead, len);
+
+ // deliver frame to system
+ skb->protocol = fddi_type_trans(skb, smc->os.dev);
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+
+ return (0);
+} // mac_drv_rx_init
+
+
+/************************
+ *
+ * smt_timer_poll
+ *
+ * This routine is called periodically by the SMT module to clean up the
+ * driver.
+ *
+ * Return any queued frames back to the upper protocol layers if the ring
+ * is down.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void smt_timer_poll(struct s_smc *smc)
+{
+} // smt_timer_poll
+
+
+/************************
+ *
+ * ring_status_indication
+ *
+ * This function indicates a change of the ring state.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * status - The current ring status.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void ring_status_indication(struct s_smc *smc, u_long status)
+{
+ PRINTK("ring_status_indication( ");
+ if (status & RS_RES15)
+ PRINTK("RS_RES15 ");
+ if (status & RS_HARDERROR)
+ PRINTK("RS_HARDERROR ");
+ if (status & RS_SOFTERROR)
+ PRINTK("RS_SOFTERROR ");
+ if (status & RS_BEACON)
+ PRINTK("RS_BEACON ");
+ if (status & RS_PATHTEST)
+ PRINTK("RS_PATHTEST ");
+ if (status & RS_SELFTEST)
+ PRINTK("RS_SELFTEST ");
+ if (status & RS_RES9)
+ PRINTK("RS_RES9 ");
+ if (status & RS_DISCONNECT)
+ PRINTK("RS_DISCONNECT ");
+ if (status & RS_RES7)
+ PRINTK("RS_RES7 ");
+ if (status & RS_DUPADDR)
+ PRINTK("RS_DUPADDR ");
+ if (status & RS_NORINGOP)
+ PRINTK("RS_NORINGOP ");
+ if (status & RS_VERSION)
+ PRINTK("RS_VERSION ");
+ if (status & RS_STUCKBYPASSS)
+ PRINTK("RS_STUCKBYPASSS ");
+ if (status & RS_EVENT)
+ PRINTK("RS_EVENT ");
+ if (status & RS_RINGOPCHANGE)
+ PRINTK("RS_RINGOPCHANGE ");
+ if (status & RS_RES0)
+ PRINTK("RS_RES0 ");
+ PRINTK("]\n");
+} // ring_status_indication
+
+
+/************************
+ *
+ * smt_get_time
+ *
+ * Gets the current time from the system.
+ * Args
+ * None.
+ * Out
+ * The current time in TICKS_PER_SECOND.
+ *
+ * TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
+ * defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
+ * to the time returned by smt_get_time().
+ *
+ ************************/
+unsigned long smt_get_time(void)
+{
+ return jiffies;
+} // smt_get_time
+
+
+/************************
+ *
+ * smt_stat_counter
+ *
+ * Status counter update (ring_op, fifo full).
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * stat - = 0: A ring operational change occurred.
+ * = 1: The FORMAC FIFO buffer is full / FIFO overflow.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void smt_stat_counter(struct s_smc *smc, int stat)
+{
+// BOOLEAN RingIsUp ;
+
+ PRINTK(KERN_INFO "smt_stat_counter\n");
+ switch (stat) {
+ case 0:
+ PRINTK(KERN_INFO "Ring operational change.\n");
+ break;
+ case 1:
+ PRINTK(KERN_INFO "Receive fifo overflow.\n");
+ smc->os.MacStat.gen.rx_errors++;
+ break;
+ default:
+ PRINTK(KERN_INFO "Unknown status (%d).\n", stat);
+ break;
+ }
+} // smt_stat_counter
+
+
+/************************
+ *
+ * cfm_state_change
+ *
+ * Sets CFM state in custom statistics.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * c_state - Possible values are:
+ *
+ * EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
+ * EC5_INSERT, EC6_CHECK, EC7_DEINSERT
+ * Out
+ * Nothing.
+ *
+ ************************/
+void cfm_state_change(struct s_smc *smc, int c_state)
+{
+#ifdef DRIVERDEBUG
+ char *s;
+
+ switch (c_state) {
+ case SC0_ISOLATED:
+ s = "SC0_ISOLATED";
+ break;
+ case SC1_WRAP_A:
+ s = "SC1_WRAP_A";
+ break;
+ case SC2_WRAP_B:
+ s = "SC2_WRAP_B";
+ break;
+ case SC4_THRU_A:
+ s = "SC4_THRU_A";
+ break;
+ case SC5_THRU_B:
+ s = "SC5_THRU_B";
+ break;
+ case SC7_WRAP_S:
+ s = "SC7_WRAP_S";
+ break;
+ case SC9_C_WRAP_A:
+ s = "SC9_C_WRAP_A";
+ break;
+ case SC10_C_WRAP_B:
+ s = "SC10_C_WRAP_B";
+ break;
+ case SC11_C_WRAP_S:
+ s = "SC11_C_WRAP_S";
+ break;
+ default:
+ PRINTK(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
+ return;
+ }
+ PRINTK(KERN_INFO "cfm_state_change: %s\n", s);
+#endif // DRIVERDEBUG
+} // cfm_state_change
+
+
+/************************
+ *
+ * ecm_state_change
+ *
+ * Sets ECM state in custom statistics.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * e_state - Possible values are:
+ *
+ * SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
+ * SC5_THRU_B (7), SC7_WRAP_S (8)
+ * Out
+ * Nothing.
+ *
+ ************************/
+void ecm_state_change(struct s_smc *smc, int e_state)
+{
+#ifdef DRIVERDEBUG
+ char *s;
+
+ switch (e_state) {
+ case EC0_OUT:
+ s = "EC0_OUT";
+ break;
+ case EC1_IN:
+ s = "EC1_IN";
+ break;
+ case EC2_TRACE:
+ s = "EC2_TRACE";
+ break;
+ case EC3_LEAVE:
+ s = "EC3_LEAVE";
+ break;
+ case EC4_PATH_TEST:
+ s = "EC4_PATH_TEST";
+ break;
+ case EC5_INSERT:
+ s = "EC5_INSERT";
+ break;
+ case EC6_CHECK:
+ s = "EC6_CHECK";
+ break;
+ case EC7_DEINSERT:
+ s = "EC7_DEINSERT";
+ break;
+ default:
+ s = "unknown";
+ break;
+ }
+ PRINTK(KERN_INFO "ecm_state_change: %s\n", s);
+#endif //DRIVERDEBUG
+} // ecm_state_change
+
+
+/************************
+ *
+ * rmt_state_change
+ *
+ * Sets RMT state in custom statistics.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ *
+ * r_state - Possible values are:
+ *
+ * RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
+ * RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
+ * Out
+ * Nothing.
+ *
+ ************************/
+void rmt_state_change(struct s_smc *smc, int r_state)
+{
+#ifdef DRIVERDEBUG
+ char *s;
+
+ switch (r_state) {
+ case RM0_ISOLATED:
+ s = "RM0_ISOLATED";
+ break;
+ case RM1_NON_OP:
+ s = "RM1_NON_OP - not operational";
+ break;
+ case RM2_RING_OP:
+ s = "RM2_RING_OP - ring operational";
+ break;
+ case RM3_DETECT:
+ s = "RM3_DETECT - detect dupl addresses";
+ break;
+ case RM4_NON_OP_DUP:
+ s = "RM4_NON_OP_DUP - dupl. addr detected";
+ break;
+ case RM5_RING_OP_DUP:
+ s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
+ break;
+ case RM6_DIRECTED:
+ s = "RM6_DIRECTED - sending directed beacons";
+ break;
+ case RM7_TRACE:
+ s = "RM7_TRACE - trace initiated";
+ break;
+ default:
+ s = "unknown";
+ break;
+ }
+ PRINTK(KERN_INFO "[rmt_state_change: %s]\n", s);
+#endif // DRIVERDEBUG
+} // rmt_state_change
+
+
+/************************
+ *
+ * drv_reset_indication
+ *
+ * This function is called by the SMT when it has detected a severe
+ * hardware problem. The driver should perform a reset on the adapter
+ * as soon as possible, but not from within this function.
+ * Args
+ * smc - A pointer to the SMT context struct.
+ * Out
+ * Nothing.
+ *
+ ************************/
+void drv_reset_indication(struct s_smc *smc)
+{
+ PRINTK(KERN_INFO "entering drv_reset_indication\n");
+
+ smc->os.ResetRequested = TRUE; // Set flag.
+
+} // drv_reset_indication
+
+static struct pci_driver skfddi_pci_driver = {
+ .name = "skfddi",
+ .id_table = skfddi_pci_tbl,
+ .probe = skfp_init_one,
+ .remove = __devexit_p(skfp_remove_one),
+};
+
+static int __init skfd_init(void)
+{
+ return pci_module_init(&skfddi_pci_driver);
+}
+
+static void __exit skfd_exit(void)
+{
+ pci_unregister_driver(&skfddi_pci_driver);
+}
+
+module_init(skfd_init);
+module_exit(skfd_exit);
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
new file mode 100644
index 000000000000..71935eaf9d4e
--- /dev/null
+++ b/drivers/net/skfp/smt.c
@@ -0,0 +1,2097 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/smt_p.h"
+
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ;
+#endif
+
+extern const u_char canonical[256] ;
+
+/*
+ * FC in SMbuf
+ */
+#define m_fc(mb) ((mb)->sm_data[0])
+
+#define SMT_TID_MAGIC 0x1f0a7b3c
+
+#ifdef DEBUG
+static const char *const smt_type_name[] = {
+ "SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??",
+ "SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??",
+ "SMT_08??", "SMT_09??", "SMT_0A??", "SMT_0B??",
+ "SMT_0C??", "SMT_0D??", "SMT_0E??", "SMT_NSA"
+} ;
+
+static const char *const smt_class_name[] = {
+ "UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF",
+ "SRF","PMF_GET","PMF_SET","ESF"
+} ;
+#endif
+#define LAST_CLASS (SMT_PMF_SET)
+
+static const struct fddi_addr SMT_Unknown = {
+ { 0,0,0x1f,0,0,0 }
+} ;
+
+/*
+ * external variables
+ */
+extern const struct fddi_addr fddi_broadcast ;
+
+/*
+ * external functions
+ */
+int pcm_status_twisted(struct s_smc *smc);
+
+/*
+ * function prototypes
+ */
+#ifdef LITTLE_ENDIAN
+static int smt_swap_short(u_short s);
+#endif
+static int mac_index(struct s_smc *smc, int mac);
+static int phy_index(struct s_smc *smc, int phy);
+static int mac_con_resource_index(struct s_smc *smc, int mac);
+static int phy_con_resource_index(struct s_smc *smc, int phy);
+static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
+ int local);
+static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest,
+ int fc, u_long tid, int type, int local);
+static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc,
+ u_long tid, int type, int len);
+static void smt_echo_test(struct s_smc *smc, int dna);
+static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest,
+ u_long tid, int local);
+static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest,
+ u_long tid, int local);
+#ifdef LITTLE_ENDIAN
+static void smt_string_swap(void);
+#endif
+static void smt_add_frame_len(SMbuf *mb, int len);
+static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una);
+static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde);
+static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state);
+static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts);
+static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy);
+static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency);
+static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor);
+static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path);
+static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st);
+static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy);
+static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers);
+static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc);
+static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc);
+static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc);
+static void smt_fill_manufacturer(struct s_smc *smc,
+ struct smp_p_manufacturer *man);
+static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user);
+static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount);
+static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed,
+ int len);
+
+void smt_clear_una_dna(struct s_smc *smc);
+static void smt_clear_old_una_dna(struct s_smc *smc);
+#ifdef CONCENTRATOR
+static int entity_to_index(void);
+#endif
+static void update_dac(struct s_smc *smc, int report);
+static int div_ratio(u_long upper, u_long lower);
+#ifdef USE_CAN_ADDR
+void hwm_conv_can(struct s_smc *smc, char *data, int len);
+#else
+#define hwm_conv_can(smc,data,len)
+#endif
+
+
+static inline int is_my_addr(const struct s_smc *smc,
+ const struct fddi_addr *addr)
+{
+ return(*(short *)(&addr->a[0]) ==
+ *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[0])
+ && *(short *)(&addr->a[2]) ==
+ *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[2])
+ && *(short *)(&addr->a[4]) ==
+ *(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[4])) ;
+}
+
+static inline int is_broadcast(const struct fddi_addr *addr)
+{
+ return(*(u_short *)(&addr->a[0]) == 0xffff &&
+ *(u_short *)(&addr->a[2]) == 0xffff &&
+ *(u_short *)(&addr->a[4]) == 0xffff ) ;
+}
+
+static inline int is_individual(const struct fddi_addr *addr)
+{
+ return(!(addr->a[0] & GROUP_ADDR)) ;
+}
+
+static inline int is_equal(const struct fddi_addr *addr1,
+ const struct fddi_addr *addr2)
+{
+ return(*(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) &&
+ *(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) &&
+ *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]) ) ;
+}
+
+/*
+ * list of mandatory paras in frames
+ */
+static const u_short plist_nif[] = { SMT_P_UNA,SMT_P_SDE,SMT_P_STATE,0 } ;
+
+/*
+ * init SMT agent
+ */
+void smt_agent_init(struct s_smc *smc)
+{
+ int i ;
+
+ /*
+ * get MAC address
+ */
+ smc->mib.m[MAC0].fddiMACSMTAddress = smc->hw.fddi_home_addr ;
+
+ /*
+ * get OUI address from driver (bia == built-in-address)
+ */
+ smc->mib.fddiSMTStationId.sid_oem[0] = 0 ;
+ smc->mib.fddiSMTStationId.sid_oem[1] = 0 ;
+ driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ;
+ for (i = 0 ; i < 6 ; i ++) {
+ smc->mib.fddiSMTStationId.sid_node.a[i] =
+ canonical[smc->mib.fddiSMTStationId.sid_node.a[i]] ;
+ }
+ smc->mib.fddiSMTManufacturerData[0] =
+ smc->mib.fddiSMTStationId.sid_node.a[0] ;
+ smc->mib.fddiSMTManufacturerData[1] =
+ smc->mib.fddiSMTStationId.sid_node.a[1] ;
+ smc->mib.fddiSMTManufacturerData[2] =
+ smc->mib.fddiSMTStationId.sid_node.a[2] ;
+ smc->sm.smt_tid = 0 ;
+ smc->mib.m[MAC0].fddiMACDupAddressTest = DA_NONE ;
+ smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ;
+#ifndef SLIM_SMT
+ smt_clear_una_dna(smc) ;
+ smt_clear_old_una_dna(smc) ;
+#endif
+ for (i = 0 ; i < SMT_MAX_TEST ; i++)
+ smc->sm.pend[i] = 0 ;
+ smc->sm.please_reconnect = 0 ;
+ smc->sm.uniq_ticks = 0 ;
+}
+
+/*
+ * SMT task
+ * forever
+ * delay 30 seconds
+ * send NIF
+ * check tvu & tvd
+ * end
+ */
+void smt_agent_task(struct s_smc *smc)
+{
+ smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L,
+ EV_TOKEN(EVENT_SMT,SM_TIMER)) ;
+ DB_SMT("SMT agent task\n",0,0) ;
+}
+
+void smt_please_reconnect(struct s_smc *smc, int reconn_time)
+/* struct s_smc *smc; Pointer to SMT context */
+/* int reconn_time; Wait for reconnect time in seconds */
+{
+ /*
+ * The please reconnect variable is used as a timer.
+ * It is decremented each time smt_event is called.
+ * This happens every second or when smt_force_irq is called.
+ * Note: smt_force_irq () is called on some packet receives and
+ * when a multicast address is changed. Since nothing
+ * is received during the disconnect and the multicast
+ * address changes can be viewed as not very often and
+ * the timer runs out close to its given value
+ * (reconn_time).
+ */
+ smc->sm.please_reconnect = reconn_time ;
+}
+
+#ifndef SMT_REAL_TOKEN_CT
+void smt_emulate_token_ct(struct s_smc *smc, int mac_index)
+{
+ u_long count;
+ u_long time;
+
+
+ time = smt_get_time();
+ count = ((time - smc->sm.last_tok_time[mac_index]) *
+ 100)/TICKS_PER_SECOND;
+
+ /*
+ * Only when ring is up we will have a token count. The
+ * flag is unfortunatly a single instance value. This
+ * doesn't matter now, because we currently have only
+ * one MAC instance.
+ */
+ if (smc->hw.mac_ring_is_up){
+ smc->mib.m[mac_index].fddiMACToken_Ct += count;
+ }
+
+ /* Remember current time */
+ smc->sm.last_tok_time[mac_index] = time;
+
+}
+#endif
+
+/*ARGSUSED1*/
+void smt_event(struct s_smc *smc, int event)
+{
+ u_long time ;
+#ifndef SMT_REAL_TOKEN_CT
+ int i ;
+#endif
+
+
+ if (smc->sm.please_reconnect) {
+ smc->sm.please_reconnect -- ;
+ if (smc->sm.please_reconnect == 0) {
+ /* Counted down */
+ queue_event(smc,EVENT_ECM,EC_CONNECT) ;
+ }
+ }
+
+ if (event == SM_FAST)
+ return ;
+
+ /*
+ * timer for periodic cleanup in driver
+ * reset and start the watchdog (FM2)
+ * ESS timer
+ * SBA timer
+ */
+ smt_timer_poll(smc) ;
+ smt_start_watchdog(smc) ;
+#ifndef SLIM_SMT
+#ifndef BOOT
+#ifdef ESS
+ ess_timer_poll(smc) ;
+#endif
+#endif
+#ifdef SBA
+ sba_timer_poll(smc) ;
+#endif
+
+ smt_srf_event(smc,0,0,0) ;
+
+#endif /* no SLIM_SMT */
+
+ time = smt_get_time() ;
+
+ if (time - smc->sm.smt_last_lem >= TICKS_PER_SECOND*8) {
+ /*
+ * Use 8 sec. for the time intervall, it simplifies the
+ * LER estimation.
+ */
+ struct fddi_mib_m *mib ;
+ u_long upper ;
+ u_long lower ;
+ int cond ;
+ int port;
+ struct s_phy *phy ;
+ /*
+ * calculate LEM bit error rate
+ */
+ sm_lem_evaluate(smc) ;
+ smc->sm.smt_last_lem = time ;
+
+ /*
+ * check conditions
+ */
+#ifndef SLIM_SMT
+ mac_update_counter(smc) ;
+ mib = smc->mib.m ;
+ upper =
+ (mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) +
+ (mib->fddiMACError_Ct - mib->fddiMACOld_Error_Ct) ;
+ lower =
+ (mib->fddiMACFrame_Ct - mib->fddiMACOld_Frame_Ct) +
+ (mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) ;
+ mib->fddiMACFrameErrorRatio = div_ratio(upper,lower) ;
+
+ cond =
+ ((!mib->fddiMACFrameErrorThreshold &&
+ mib->fddiMACError_Ct != mib->fddiMACOld_Error_Ct) ||
+ (mib->fddiMACFrameErrorRatio >
+ mib->fddiMACFrameErrorThreshold)) ;
+
+ if (cond != mib->fddiMACFrameErrorFlag)
+ smt_srf_event(smc,SMT_COND_MAC_FRAME_ERROR,
+ INDEX_MAC,cond) ;
+
+ upper =
+ (mib->fddiMACNotCopied_Ct - mib->fddiMACOld_NotCopied_Ct) ;
+ lower =
+ upper +
+ (mib->fddiMACCopied_Ct - mib->fddiMACOld_Copied_Ct) ;
+ mib->fddiMACNotCopiedRatio = div_ratio(upper,lower) ;
+
+ cond =
+ ((!mib->fddiMACNotCopiedThreshold &&
+ mib->fddiMACNotCopied_Ct !=
+ mib->fddiMACOld_NotCopied_Ct)||
+ (mib->fddiMACNotCopiedRatio >
+ mib->fddiMACNotCopiedThreshold)) ;
+
+ if (cond != mib->fddiMACNotCopiedFlag)
+ smt_srf_event(smc,SMT_COND_MAC_NOT_COPIED,
+ INDEX_MAC,cond) ;
+
+ /*
+ * set old values
+ */
+ mib->fddiMACOld_Frame_Ct = mib->fddiMACFrame_Ct ;
+ mib->fddiMACOld_Copied_Ct = mib->fddiMACCopied_Ct ;
+ mib->fddiMACOld_Error_Ct = mib->fddiMACError_Ct ;
+ mib->fddiMACOld_Lost_Ct = mib->fddiMACLost_Ct ;
+ mib->fddiMACOld_NotCopied_Ct = mib->fddiMACNotCopied_Ct ;
+
+ /*
+ * Check port EBError Condition
+ */
+ for (port = 0; port < NUMPHYS; port ++) {
+ phy = &smc->y[port] ;
+
+ if (!phy->mib->fddiPORTHardwarePresent) {
+ continue;
+ }
+
+ cond = (phy->mib->fddiPORTEBError_Ct -
+ phy->mib->fddiPORTOldEBError_Ct > 5) ;
+
+ /* If ratio is more than 5 in 8 seconds
+ * Set the condition.
+ */
+ smt_srf_event(smc,SMT_COND_PORT_EB_ERROR,
+ (int) (INDEX_PORT+ phy->np) ,cond) ;
+
+ /*
+ * set old values
+ */
+ phy->mib->fddiPORTOldEBError_Ct =
+ phy->mib->fddiPORTEBError_Ct ;
+ }
+
+#endif /* no SLIM_SMT */
+ }
+
+#ifndef SLIM_SMT
+
+ if (time - smc->sm.smt_last_notify >= (u_long)
+ (smc->mib.fddiSMTTT_Notify * TICKS_PER_SECOND) ) {
+ /*
+ * we can either send an announcement or a request
+ * a request will trigger a reply so that we can update
+ * our dna
+ * note: same tid must be used until reply is received
+ */
+ if (!smc->sm.pend[SMT_TID_NIF])
+ smc->sm.pend[SMT_TID_NIF] = smt_get_tid(smc) ;
+ smt_send_nif(smc,&fddi_broadcast, FC_SMT_NSA,
+ smc->sm.pend[SMT_TID_NIF], SMT_REQUEST,0) ;
+ smc->sm.smt_last_notify = time ;
+ }
+
+ /*
+ * check timer
+ */
+ if (smc->sm.smt_tvu &&
+ time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) {
+ DB_SMT("SMT : UNA expired\n",0,0) ;
+ smc->sm.smt_tvu = 0 ;
+
+ if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr,
+ &SMT_Unknown)){
+ /* Do not update unknown address */
+ smc->mib.m[MAC0].fddiMACOldUpstreamNbr=
+ smc->mib.m[MAC0].fddiMACUpstreamNbr ;
+ }
+ smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ;
+ smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ;
+ /*
+ * Make sure the fddiMACUNDA_Flag = FALSE is
+ * included in the SRF so we don't generate
+ * a separate SRF for the deassertion of this
+ * condition
+ */
+ update_dac(smc,0) ;
+ smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE,
+ INDEX_MAC,0) ;
+ }
+ if (smc->sm.smt_tvd &&
+ time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) {
+ DB_SMT("SMT : DNA expired\n",0,0) ;
+ smc->sm.smt_tvd = 0 ;
+ if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr,
+ &SMT_Unknown)){
+ /* Do not update unknown address */
+ smc->mib.m[MAC0].fddiMACOldDownstreamNbr=
+ smc->mib.m[MAC0].fddiMACDownstreamNbr ;
+ }
+ smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ;
+ smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE,
+ INDEX_MAC,0) ;
+ }
+
+#endif /* no SLIM_SMT */
+
+#ifndef SMT_REAL_TOKEN_CT
+ /*
+ * Token counter emulation section. If hardware supports the token
+ * count, the token counter will be updated in mac_update_counter.
+ */
+ for (i = MAC0; i < NUMMACS; i++ ){
+ if (time - smc->sm.last_tok_time[i] > 2*TICKS_PER_SECOND ){
+ smt_emulate_token_ct( smc, i );
+ }
+ }
+#endif
+
+ smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L,
+ EV_TOKEN(EVENT_SMT,SM_TIMER)) ;
+}
+
+static int div_ratio(u_long upper, u_long lower)
+{
+ if ((upper<<16L) < upper)
+ upper = 0xffff0000L ;
+ else
+ upper <<= 16L ;
+ if (!lower)
+ return(0) ;
+ return((int)(upper/lower)) ;
+}
+
+#ifndef SLIM_SMT
+
+/*
+ * receive packet handler
+ */
+void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
+/* int fs; frame status */
+{
+ struct smt_header *sm ;
+ int local ;
+
+ int illegal = 0 ;
+
+ switch (m_fc(mb)) {
+ case FC_SMT_INFO :
+ case FC_SMT_LAN_LOC :
+ case FC_SMT_LOC :
+ case FC_SMT_NSA :
+ break ;
+ default :
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+
+ smc->mib.m[MAC0].fddiMACSMTCopied_Ct++ ;
+ sm = smtod(mb,struct smt_header *) ;
+ local = ((fs & L_INDICATOR) != 0) ;
+ hwm_conv_can(smc,(char *)sm,12) ;
+
+ /* check destination address */
+ if (is_individual(&sm->smt_dest) && !is_my_addr(smc,&sm->smt_dest)) {
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+#if 0 /* for DUP recognition, do NOT filter them */
+ /* ignore loop back packets */
+ if (is_my_addr(smc,&sm->smt_source) && !local) {
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+#endif
+
+ smt_swap_para(sm,(int) mb->sm_len,1) ;
+ DB_SMT("SMT : received packet [%s] at 0x%x\n",
+ smt_type_name[m_fc(mb) & 0xf],sm) ;
+ DB_SMT("SMT : version %d, class %s\n",sm->smt_version,
+ smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ;
+
+#ifdef SBA
+ /*
+ * check if NSA frame
+ */
+ if (m_fc(mb) == FC_SMT_NSA && sm->smt_class == SMT_NIF &&
+ (sm->smt_type == SMT_ANNOUNCE || sm->smt_type == SMT_REQUEST)) {
+ smc->sba.sm = sm ;
+ sba(smc,NIF) ;
+ }
+#endif
+
+ /*
+ * ignore any packet with NSA and A-indicator set
+ */
+ if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) {
+ DB_SMT("SMT : ignoring NSA with A-indicator set from %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+
+ /*
+ * ignore frames with illegal length
+ */
+ if (((sm->smt_class == SMT_ECF) && (sm->smt_len > SMT_MAX_ECHO_LEN)) ||
+ ((sm->smt_class != SMT_ECF) && (sm->smt_len > SMT_MAX_INFO_LEN))) {
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+
+ /*
+ * check SMT version
+ */
+ switch (sm->smt_class) {
+ case SMT_NIF :
+ case SMT_SIF_CONFIG :
+ case SMT_SIF_OPER :
+ case SMT_ECF :
+ if (sm->smt_version != SMT_VID)
+ illegal = 1;
+ break ;
+ default :
+ if (sm->smt_version != SMT_VID_2)
+ illegal = 1;
+ break ;
+ }
+ if (illegal) {
+ DB_SMT("SMT : version = %d, dest = %s\n",
+ sm->smt_version,addr_to_string(&sm->smt_source)) ;
+ smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ;
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+ if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) ||
+ ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) {
+ DB_SMT("SMT: info length error, len = %d\n",sm->smt_len,0) ;
+ smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ;
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+ switch (sm->smt_class) {
+ case SMT_NIF :
+ if (smt_check_para(smc,sm,plist_nif)) {
+ DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ;
+ break ;
+ } ;
+ switch (sm->smt_type) {
+ case SMT_ANNOUNCE :
+ case SMT_REQUEST :
+ if (!(fs & C_INDICATOR) && m_fc(mb) == FC_SMT_NSA
+ && is_broadcast(&sm->smt_dest)) {
+ struct smt_p_state *st ;
+
+ /* set my UNA */
+ if (!is_equal(
+ &smc->mib.m[MAC0].fddiMACUpstreamNbr,
+ &sm->smt_source)) {
+ DB_SMT("SMT : updated my UNA = %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ if (!is_equal(&smc->mib.m[MAC0].
+ fddiMACUpstreamNbr,&SMT_Unknown)){
+ /* Do not update unknown address */
+ smc->mib.m[MAC0].fddiMACOldUpstreamNbr=
+ smc->mib.m[MAC0].fddiMACUpstreamNbr ;
+ }
+
+ smc->mib.m[MAC0].fddiMACUpstreamNbr =
+ sm->smt_source ;
+ smt_srf_event(smc,
+ SMT_EVENT_MAC_NEIGHBOR_CHANGE,
+ INDEX_MAC,0) ;
+ smt_echo_test(smc,0) ;
+ }
+ smc->sm.smt_tvu = smt_get_time() ;
+ st = (struct smt_p_state *)
+ sm_to_para(smc,sm,SMT_P_STATE) ;
+ if (st) {
+ smc->mib.m[MAC0].fddiMACUNDA_Flag =
+ (st->st_dupl_addr & SMT_ST_MY_DUPA) ?
+ TRUE : FALSE ;
+ update_dac(smc,1) ;
+ }
+ }
+ if ((sm->smt_type == SMT_REQUEST) &&
+ is_individual(&sm->smt_source) &&
+ ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) ||
+ (m_fc(mb) != FC_SMT_NSA))) {
+ DB_SMT("SMT : replying to NIF request %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ smt_send_nif(smc,&sm->smt_source,
+ FC_SMT_INFO,
+ sm->smt_tid,
+ SMT_REPLY,local) ;
+ }
+ break ;
+ case SMT_REPLY :
+ DB_SMT("SMT : received NIF response from %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ if (fs & A_INDICATOR) {
+ smc->sm.pend[SMT_TID_NIF] = 0 ;
+ DB_SMT("SMT : duplicate address\n",0,0) ;
+ smc->mib.m[MAC0].fddiMACDupAddressTest =
+ DA_FAILED ;
+ smc->r.dup_addr_test = DA_FAILED ;
+ queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ;
+ smc->mib.m[MAC0].fddiMACDA_Flag = TRUE ;
+ update_dac(smc,1) ;
+ break ;
+ }
+ if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF]) {
+ smc->sm.pend[SMT_TID_NIF] = 0 ;
+ /* set my DNA */
+ if (!is_equal(
+ &smc->mib.m[MAC0].fddiMACDownstreamNbr,
+ &sm->smt_source)) {
+ DB_SMT("SMT : updated my DNA\n",0,0) ;
+ if (!is_equal(&smc->mib.m[MAC0].
+ fddiMACDownstreamNbr, &SMT_Unknown)){
+ /* Do not update unknown address */
+ smc->mib.m[MAC0].fddiMACOldDownstreamNbr =
+ smc->mib.m[MAC0].fddiMACDownstreamNbr ;
+ }
+
+ smc->mib.m[MAC0].fddiMACDownstreamNbr =
+ sm->smt_source ;
+ smt_srf_event(smc,
+ SMT_EVENT_MAC_NEIGHBOR_CHANGE,
+ INDEX_MAC,0) ;
+ smt_echo_test(smc,1) ;
+ }
+ smc->mib.m[MAC0].fddiMACDA_Flag = FALSE ;
+ update_dac(smc,1) ;
+ smc->sm.smt_tvd = smt_get_time() ;
+ smc->mib.m[MAC0].fddiMACDupAddressTest =
+ DA_PASSED ;
+ if (smc->r.dup_addr_test != DA_PASSED) {
+ smc->r.dup_addr_test = DA_PASSED ;
+ queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ;
+ }
+ }
+ else if (sm->smt_tid ==
+ smc->sm.pend[SMT_TID_NIF_TEST]) {
+ DB_SMT("SMT : NIF test TID ok\n",0,0) ;
+ }
+ else {
+ DB_SMT("SMT : expected TID %lx, got %lx\n",
+ smc->sm.pend[SMT_TID_NIF],sm->smt_tid) ;
+ }
+ break ;
+ default :
+ illegal = 2 ;
+ break ;
+ }
+ break ;
+ case SMT_SIF_CONFIG : /* station information */
+ if (sm->smt_type != SMT_REQUEST)
+ break ;
+ DB_SMT("SMT : replying to SIF Config request from %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ;
+ break ;
+ case SMT_SIF_OPER : /* station information */
+ if (sm->smt_type != SMT_REQUEST)
+ break ;
+ DB_SMT("SMT : replying to SIF Operation request from %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ;
+ break ;
+ case SMT_ECF : /* echo frame */
+ switch (sm->smt_type) {
+ case SMT_REPLY :
+ smc->mib.priv.fddiPRIVECF_Reply_Rx++ ;
+ DB_SMT("SMT: received ECF reply from %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+ if (sm_to_para(smc,sm,SMT_P_ECHODATA) == 0) {
+ DB_SMT("SMT: ECHODATA missing\n",0,0) ;
+ break ;
+ }
+ if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) {
+ DB_SMT("SMT : ECF test TID ok\n",0,0) ;
+ }
+ else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) {
+ DB_SMT("SMT : ECF test UNA ok\n",0,0) ;
+ }
+ else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) {
+ DB_SMT("SMT : ECF test DNA ok\n",0,0) ;
+ }
+ else {
+ DB_SMT("SMT : expected TID %lx, got %lx\n",
+ smc->sm.pend[SMT_TID_ECF],
+ sm->smt_tid) ;
+ }
+ break ;
+ case SMT_REQUEST :
+ smc->mib.priv.fddiPRIVECF_Req_Rx++ ;
+ {
+ if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) {
+ DB_SMT("SMT: ECF with para problem,sending RDF\n",0,0) ;
+ smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,
+ local) ;
+ break ;
+ }
+ DB_SMT("SMT - sending ECF reply to %s\n",
+ addr_to_string(&sm->smt_source),0) ;
+
+ /* set destination addr. & reply */
+ sm->smt_dest = sm->smt_source ;
+ sm->smt_type = SMT_REPLY ;
+ dump_smt(smc,sm,"ECF REPLY") ;
+ smc->mib.priv.fddiPRIVECF_Reply_Tx++ ;
+ smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
+ return ; /* DON'T free mbuf */
+ }
+ default :
+ illegal = 1 ;
+ break ;
+ }
+ break ;
+#ifndef BOOT
+ case SMT_RAF : /* resource allocation */
+#ifdef ESS
+ DB_ESSN(2,"ESS: RAF frame received\n",0,0) ;
+ fs = ess_raf_received_pack(smc,mb,sm,fs) ;
+#endif
+
+#ifdef SBA
+ DB_SBAN(2,"SBA: RAF frame received\n",0,0) ;
+ sba_raf_received_pack(smc,sm,fs) ;
+#endif
+ break ;
+ case SMT_RDF : /* request denied */
+ smc->mib.priv.fddiPRIVRDF_Rx++ ;
+ break ;
+ case SMT_ESF : /* extended service - not supported */
+ if (sm->smt_type == SMT_REQUEST) {
+ DB_SMT("SMT - received ESF, sending RDF\n",0,0) ;
+ smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
+ }
+ break ;
+ case SMT_PMF_GET :
+ case SMT_PMF_SET :
+ if (sm->smt_type != SMT_REQUEST)
+ break ;
+ /* update statistics */
+ if (sm->smt_class == SMT_PMF_GET)
+ smc->mib.priv.fddiPRIVPMF_Get_Rx++ ;
+ else
+ smc->mib.priv.fddiPRIVPMF_Set_Rx++ ;
+ /*
+ * ignore PMF SET with I/G set
+ */
+ if ((sm->smt_class == SMT_PMF_SET) &&
+ !is_individual(&sm->smt_dest)) {
+ DB_SMT("SMT: ignoring PMF-SET with I/G set\n",0,0) ;
+ break ;
+ }
+ smt_pmf_received_pack(smc,mb, local) ;
+ break ;
+ case SMT_SRF :
+ dump_smt(smc,sm,"SRF received") ;
+ break ;
+ default :
+ if (sm->smt_type != SMT_REQUEST)
+ break ;
+ /*
+ * For frames with unknown class:
+ * we need to send a RDF frame according to 8.1.3.1.1,
+ * only if it is a REQUEST.
+ */
+ DB_SMT("SMT : class = %d, send RDF to %s\n",
+ sm->smt_class, addr_to_string(&sm->smt_source)) ;
+
+ smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
+ break ;
+#endif
+ }
+ if (illegal) {
+ DB_SMT("SMT: discarding invalid frame, reason = %d\n",
+ illegal,0) ;
+ }
+ smt_free_mbuf(smc,mb) ;
+}
+
+static void update_dac(struct s_smc *smc, int report)
+{
+ int cond ;
+
+ cond = ( smc->mib.m[MAC0].fddiMACUNDA_Flag |
+ smc->mib.m[MAC0].fddiMACDA_Flag) != 0 ;
+ if (report && (cond != smc->mib.m[MAC0].fddiMACDuplicateAddressCond))
+ smt_srf_event(smc, SMT_COND_MAC_DUP_ADDR,INDEX_MAC,cond) ;
+ else
+ smc->mib.m[MAC0].fddiMACDuplicateAddressCond = cond ;
+}
+
+/*
+ * send SMT frame
+ * set source address
+ * set station ID
+ * send frame
+ */
+void smt_send_frame(struct s_smc *smc, SMbuf *mb, int fc, int local)
+/* SMbuf *mb; buffer to send */
+/* int fc; FC value */
+{
+ struct smt_header *sm ;
+
+ if (!smc->r.sm_ma_avail && !local) {
+ smt_free_mbuf(smc,mb) ;
+ return ;
+ }
+ sm = smtod(mb,struct smt_header *) ;
+ sm->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ;
+ sm->smt_sid = smc->mib.fddiSMTStationId ;
+
+ smt_swap_para(sm,(int) mb->sm_len,0) ; /* swap para & header */
+ hwm_conv_can(smc,(char *)sm,12) ; /* convert SA and DA */
+ smc->mib.m[MAC0].fddiMACSMTTransmit_Ct++ ;
+ smt_send_mbuf(smc,mb,local ? FC_SMT_LOC : fc) ;
+}
+
+/*
+ * generate and send RDF
+ */
+static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
+ int local)
+/* SMbuf *rej; mbuf of offending frame */
+/* int fc; FC of denied frame */
+/* int reason; reason code */
+{
+ SMbuf *mb ;
+ struct smt_header *sm ; /* header of offending frame */
+ struct smt_rdf *rdf ;
+ int len ;
+ int frame_len ;
+
+ sm = smtod(rej,struct smt_header *) ;
+ if (sm->smt_type != SMT_REQUEST)
+ return ;
+
+ DB_SMT("SMT: sending RDF to %s,reason = 0x%x\n",
+ addr_to_string(&sm->smt_source),reason) ;
+
+
+ /*
+ * note: get framelength from MAC length, NOT from SMT header
+ * smt header length is included in sm_len
+ */
+ frame_len = rej->sm_len ;
+
+ if (!(mb=smt_build_frame(smc,SMT_RDF,SMT_REPLY,sizeof(struct smt_rdf))))
+ return ;
+ rdf = smtod(mb,struct smt_rdf *) ;
+ rdf->smt.smt_tid = sm->smt_tid ; /* use TID from sm */
+ rdf->smt.smt_dest = sm->smt_source ; /* set dest = source */
+
+ /* set P12 */
+ rdf->reason.para.p_type = SMT_P_REASON ;
+ rdf->reason.para.p_len = sizeof(struct smt_p_reason) - PARA_LEN ;
+ rdf->reason.rdf_reason = reason ;
+
+ /* set P14 */
+ rdf->version.para.p_type = SMT_P_VERSION ;
+ rdf->version.para.p_len = sizeof(struct smt_p_version) - PARA_LEN ;
+ rdf->version.v_pad = 0 ;
+ rdf->version.v_n = 1 ;
+ rdf->version.v_index = 1 ;
+ rdf->version.v_version[0] = SMT_VID_2 ;
+ rdf->version.v_pad2 = 0 ;
+
+ /* set P13 */
+ if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
+ 2*sizeof(struct smt_header))
+ len = frame_len ;
+ else
+ len = SMT_MAX_INFO_LEN - sizeof(*rdf) +
+ 2*sizeof(struct smt_header) ;
+ /* make length multiple of 4 */
+ len &= ~3 ;
+ rdf->refused.para.p_type = SMT_P_REFUSED ;
+ /* length of para is smt_frame + ref_fc */
+ rdf->refused.para.p_len = len + 4 ;
+ rdf->refused.ref_fc = fc ;
+
+ /* swap it back */
+ smt_swap_para(sm,frame_len,0) ;
+
+ memcpy((char *) &rdf->refused.ref_header,(char *) sm,len) ;
+
+ len -= sizeof(struct smt_header) ;
+ mb->sm_len += len ;
+ rdf->smt.smt_len += len ;
+
+ dump_smt(smc,(struct smt_header *)rdf,"RDF") ;
+ smc->mib.priv.fddiPRIVRDF_Tx++ ;
+ smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
+}
+
+/*
+ * generate and send NIF
+ */
+static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest,
+ int fc, u_long tid, int type, int local)
+/* struct fddi_addr *dest; dest address */
+/* int fc; frame control */
+/* u_long tid; transaction id */
+/* int type; frame type */
+{
+ struct smt_nif *nif ;
+ SMbuf *mb ;
+
+ if (!(mb = smt_build_frame(smc,SMT_NIF,type,sizeof(struct smt_nif))))
+ return ;
+ nif = smtod(mb, struct smt_nif *) ;
+ smt_fill_una(smc,&nif->una) ; /* set UNA */
+ smt_fill_sde(smc,&nif->sde) ; /* set station descriptor */
+ smt_fill_state(smc,&nif->state) ; /* set state information */
+#ifdef SMT6_10
+ smt_fill_fsc(smc,&nif->fsc) ; /* set frame status cap. */
+#endif
+ nif->smt.smt_dest = *dest ; /* destination address */
+ nif->smt.smt_tid = tid ; /* transaction ID */
+ dump_smt(smc,(struct smt_header *)nif,"NIF") ;
+ smt_send_frame(smc,mb,fc,local) ;
+}
+
+#ifdef DEBUG
+/*
+ * send NIF request (test purpose)
+ */
+static void smt_send_nif_request(struct s_smc *smc, struct fddi_addr *dest)
+{
+ smc->sm.pend[SMT_TID_NIF_TEST] = smt_get_tid(smc) ;
+ smt_send_nif(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_NIF_TEST],
+ SMT_REQUEST,0) ;
+}
+
+/*
+ * send ECF request (test purpose)
+ */
+static void smt_send_ecf_request(struct s_smc *smc, struct fddi_addr *dest,
+ int len)
+{
+ smc->sm.pend[SMT_TID_ECF] = smt_get_tid(smc) ;
+ smt_send_ecf(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_ECF],
+ SMT_REQUEST,len) ;
+}
+#endif
+
+/*
+ * echo test
+ */
+static void smt_echo_test(struct s_smc *smc, int dna)
+{
+ u_long tid ;
+
+ smc->sm.pend[dna ? SMT_TID_ECF_DNA : SMT_TID_ECF_UNA] =
+ tid = smt_get_tid(smc) ;
+ smt_send_ecf(smc, dna ?
+ &smc->mib.m[MAC0].fddiMACDownstreamNbr :
+ &smc->mib.m[MAC0].fddiMACUpstreamNbr,
+ FC_SMT_INFO,tid, SMT_REQUEST, (SMT_TEST_ECHO_LEN & ~3)-8) ;
+}
+
+/*
+ * generate and send ECF
+ */
+static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc,
+ u_long tid, int type, int len)
+/* struct fddi_addr *dest; dest address */
+/* int fc; frame control */
+/* u_long tid; transaction id */
+/* int type; frame type */
+/* int len; frame length */
+{
+ struct smt_ecf *ecf ;
+ SMbuf *mb ;
+
+ if (!(mb = smt_build_frame(smc,SMT_ECF,type,SMT_ECF_LEN + len)))
+ return ;
+ ecf = smtod(mb, struct smt_ecf *) ;
+
+ smt_fill_echo(smc,&ecf->ec_echo,tid,len) ; /* set ECHO */
+ ecf->smt.smt_dest = *dest ; /* destination address */
+ ecf->smt.smt_tid = tid ; /* transaction ID */
+ smc->mib.priv.fddiPRIVECF_Req_Tx++ ;
+ smt_send_frame(smc,mb,fc,0) ;
+}
+
+/*
+ * generate and send SIF config response
+ */
+
+static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest,
+ u_long tid, int local)
+/* struct fddi_addr *dest; dest address */
+/* u_long tid; transaction id */
+{
+ struct smt_sif_config *sif ;
+ SMbuf *mb ;
+ int len ;
+ if (!(mb = smt_build_frame(smc,SMT_SIF_CONFIG,SMT_REPLY,
+ SIZEOF_SMT_SIF_CONFIG)))
+ return ;
+
+ sif = smtod(mb, struct smt_sif_config *) ;
+ smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */
+ smt_fill_sde(smc,&sif->sde) ; /* set station descriptor */
+ smt_fill_version(smc,&sif->version) ; /* set version information */
+ smt_fill_state(smc,&sif->state) ; /* set state information */
+ smt_fill_policy(smc,&sif->policy) ; /* set station policy */
+ smt_fill_latency(smc,&sif->latency); /* set station latency */
+ smt_fill_neighbor(smc,&sif->neighbor); /* set station neighbor */
+ smt_fill_setcount(smc,&sif->setcount) ; /* set count */
+ len = smt_fill_path(smc,&sif->path); /* set station path descriptor*/
+ sif->smt.smt_dest = *dest ; /* destination address */
+ sif->smt.smt_tid = tid ; /* transaction ID */
+ smt_add_frame_len(mb,len) ; /* adjust length fields */
+ dump_smt(smc,(struct smt_header *)sif,"SIF Configuration Reply") ;
+ smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
+}
+
+/*
+ * generate and send SIF operation response
+ */
+
+static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest,
+ u_long tid, int local)
+/* struct fddi_addr *dest; dest address */
+/* u_long tid; transaction id */
+{
+ struct smt_sif_operation *sif ;
+ SMbuf *mb ;
+ int ports ;
+ int i ;
+
+ ports = NUMPHYS ;
+#ifndef CONCENTRATOR
+ if (smc->s.sas == SMT_SAS)
+ ports = 1 ;
+#endif
+
+ if (!(mb = smt_build_frame(smc,SMT_SIF_OPER,SMT_REPLY,
+ SIZEOF_SMT_SIF_OPERATION+ports*sizeof(struct smt_p_lem))))
+ return ;
+ sif = smtod(mb, struct smt_sif_operation *) ;
+ smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */
+ smt_fill_mac_status(smc,&sif->status) ; /* set mac status */
+ smt_fill_mac_counter(smc,&sif->mc) ; /* set mac counter field */
+ smt_fill_mac_fnc(smc,&sif->fnc) ; /* set frame not copied counter */
+ smt_fill_manufacturer(smc,&sif->man) ; /* set manufacturer field */
+ smt_fill_user(smc,&sif->user) ; /* set user field */
+ smt_fill_setcount(smc,&sif->setcount) ; /* set count */
+ /*
+ * set link error mon information
+ */
+ if (ports == 1) {
+ smt_fill_lem(smc,sif->lem,PS) ;
+ }
+ else {
+ for (i = 0 ; i < ports ; i++) {
+ smt_fill_lem(smc,&sif->lem[i],i) ;
+ }
+ }
+
+ sif->smt.smt_dest = *dest ; /* destination address */
+ sif->smt.smt_tid = tid ; /* transaction ID */
+ dump_smt(smc,(struct smt_header *)sif,"SIF Operation Reply") ;
+ smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
+}
+
+/*
+ * get and initialize SMT frame
+ */
+SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
+ int length)
+{
+ SMbuf *mb ;
+ struct smt_header *smt ;
+
+#if 0
+ if (!smc->r.sm_ma_avail) {
+ return(0) ;
+ }
+#endif
+ if (!(mb = smt_get_mbuf(smc)))
+ return(mb) ;
+
+ mb->sm_len = length ;
+ smt = smtod(mb, struct smt_header *) ;
+ smt->smt_dest = fddi_broadcast ; /* set dest = broadcast */
+ smt->smt_class = class ;
+ smt->smt_type = type ;
+ switch (class) {
+ case SMT_NIF :
+ case SMT_SIF_CONFIG :
+ case SMT_SIF_OPER :
+ case SMT_ECF :
+ smt->smt_version = SMT_VID ;
+ break ;
+ default :
+ smt->smt_version = SMT_VID_2 ;
+ break ;
+ }
+ smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */
+ smt->smt_pad = 0 ;
+ smt->smt_len = length - sizeof(struct smt_header) ;
+ return(mb) ;
+}
+
+static void smt_add_frame_len(SMbuf *mb, int len)
+{
+ struct smt_header *smt ;
+
+ smt = smtod(mb, struct smt_header *) ;
+ smt->smt_len += len ;
+ mb->sm_len += len ;
+}
+
+
+
+/*
+ * fill values in UNA parameter
+ */
+static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una)
+{
+ SMTSETPARA(una,SMT_P_UNA) ;
+ una->una_pad = 0 ;
+ una->una_node = smc->mib.m[MAC0].fddiMACUpstreamNbr ;
+}
+
+/*
+ * fill values in SDE parameter
+ */
+static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde)
+{
+ SMTSETPARA(sde,SMT_P_SDE) ;
+ sde->sde_non_master = smc->mib.fddiSMTNonMaster_Ct ;
+ sde->sde_master = smc->mib.fddiSMTMaster_Ct ;
+ sde->sde_mac_count = NUMMACS ; /* only 1 MAC */
+#ifdef CONCENTRATOR
+ sde->sde_type = SMT_SDE_CONCENTRATOR ;
+#else
+ sde->sde_type = SMT_SDE_STATION ;
+#endif
+}
+
+/*
+ * fill in values in station state parameter
+ */
+static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state)
+{
+ int top ;
+ int twist ;
+
+ SMTSETPARA(state,SMT_P_STATE) ;
+ state->st_pad = 0 ;
+
+ /* determine topology */
+ top = 0 ;
+ if (smc->mib.fddiSMTPeerWrapFlag) {
+ top |= SMT_ST_WRAPPED ; /* state wrapped */
+ }
+#ifdef CONCENTRATOR
+ if (cfm_status_unattached(smc)) {
+ top |= SMT_ST_UNATTACHED ; /* unattached concentrator */
+ }
+#endif
+ if ((twist = pcm_status_twisted(smc)) & 1) {
+ top |= SMT_ST_TWISTED_A ; /* twisted cable */
+ }
+ if (twist & 2) {
+ top |= SMT_ST_TWISTED_B ; /* twisted cable */
+ }
+#ifdef OPT_SRF
+ top |= SMT_ST_SRF ;
+#endif
+ if (pcm_rooted_station(smc))
+ top |= SMT_ST_ROOTED_S ;
+ if (smc->mib.a[0].fddiPATHSbaPayload != 0)
+ top |= SMT_ST_SYNC_SERVICE ;
+ state->st_topology = top ;
+ state->st_dupl_addr =
+ ((smc->mib.m[MAC0].fddiMACDA_Flag ? SMT_ST_MY_DUPA : 0 ) |
+ (smc->mib.m[MAC0].fddiMACUNDA_Flag ? SMT_ST_UNA_DUPA : 0)) ;
+}
+
+/*
+ * fill values in timestamp parameter
+ */
+static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts)
+{
+
+ SMTSETPARA(ts,SMT_P_TIMESTAMP) ;
+ smt_set_timestamp(smc,ts->ts_time) ;
+}
+
+void smt_set_timestamp(struct s_smc *smc, u_char *p)
+{
+ u_long time ;
+ u_long utime ;
+
+ /*
+ * timestamp is 64 bits long ; resolution is 80 nS
+ * our clock resolution is 10mS
+ * 10mS/80ns = 125000 ~ 2^17 = 131072
+ */
+ utime = smt_get_time() ;
+ time = utime * 100 ;
+ time /= TICKS_PER_SECOND ;
+ p[0] = 0 ;
+ p[1] = (u_char)((time>>(8+8+8+8-1)) & 1) ;
+ p[2] = (u_char)(time>>(8+8+8-1)) ;
+ p[3] = (u_char)(time>>(8+8-1)) ;
+ p[4] = (u_char)(time>>(8-1)) ;
+ p[5] = (u_char)(time<<1) ;
+ p[6] = (u_char)(smc->sm.uniq_ticks>>8) ;
+ p[7] = (u_char)smc->sm.uniq_ticks ;
+ /*
+ * make sure we don't wrap: restart whenever the upper digits change
+ */
+ if (utime != smc->sm.uniq_time) {
+ smc->sm.uniq_ticks = 0 ;
+ }
+ smc->sm.uniq_ticks++ ;
+ smc->sm.uniq_time = utime ;
+}
+
+/*
+ * fill values in station policy parameter
+ */
+static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
+{
+ int i ;
+ u_char *map ;
+ u_short in ;
+ u_short out ;
+
+ /*
+ * MIB para 101b (fddiSMTConnectionPolicy) coding
+ * is different from 0005 coding
+ */
+ static u_char ansi_weirdness[16] = {
+ 0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15
+ } ;
+ SMTSETPARA(policy,SMT_P_POLICY) ;
+
+ out = 0 ;
+ in = smc->mib.fddiSMTConnectionPolicy ;
+ for (i = 0, map = ansi_weirdness ; i < 16 ; i++) {
+ if (in & 1)
+ out |= (1<<*map) ;
+ in >>= 1 ;
+ map++ ;
+ }
+ policy->pl_config = smc->mib.fddiSMTConfigPolicy ;
+ policy->pl_connect = out ;
+}
+
+/*
+ * fill values in latency equivalent parameter
+ */
+static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency)
+{
+ SMTSETPARA(latency,SMT_P_LATENCY) ;
+
+ latency->lt_phyout_idx1 = phy_index(smc,0) ;
+ latency->lt_latency1 = 10 ; /* in octets (byte clock) */
+ /*
+ * note: latency has two phy entries by definition
+ * for a SAS, the 2nd one is null
+ */
+ if (smc->s.sas == SMT_DAS) {
+ latency->lt_phyout_idx2 = phy_index(smc,1) ;
+ latency->lt_latency2 = 10 ; /* in octets (byte clock) */
+ }
+ else {
+ latency->lt_phyout_idx2 = 0 ;
+ latency->lt_latency2 = 0 ;
+ }
+}
+
+/*
+ * fill values in MAC neighbors parameter
+ */
+static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor)
+{
+ SMTSETPARA(neighbor,SMT_P_NEIGHBORS) ;
+
+ neighbor->nb_mib_index = INDEX_MAC ;
+ neighbor->nb_mac_index = mac_index(smc,1) ;
+ neighbor->nb_una = smc->mib.m[MAC0].fddiMACUpstreamNbr ;
+ neighbor->nb_dna = smc->mib.m[MAC0].fddiMACDownstreamNbr ;
+}
+
+/*
+ * fill values in path descriptor
+ */
+#ifdef CONCENTRATOR
+#define ALLPHYS NUMPHYS
+#else
+#define ALLPHYS ((smc->s.sas == SMT_SAS) ? 1 : 2)
+#endif
+
+static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path)
+{
+ SK_LOC_DECL(int,type) ;
+ SK_LOC_DECL(int,state) ;
+ SK_LOC_DECL(int,remote) ;
+ SK_LOC_DECL(int,mac) ;
+ int len ;
+ int p ;
+ int physp ;
+ struct smt_phy_rec *phy ;
+ struct smt_mac_rec *pd_mac ;
+
+ len = PARA_LEN +
+ sizeof(struct smt_mac_rec) * NUMMACS +
+ sizeof(struct smt_phy_rec) * ALLPHYS ;
+ path->para.p_type = SMT_P_PATH ;
+ path->para.p_len = len - PARA_LEN ;
+
+ /* PHYs */
+ for (p = 0,phy = path->pd_phy ; p < ALLPHYS ; p++, phy++) {
+ physp = p ;
+#ifndef CONCENTRATOR
+ if (smc->s.sas == SMT_SAS)
+ physp = PS ;
+#endif
+ pcm_status_state(smc,physp,&type,&state,&remote,&mac) ;
+#ifdef LITTLE_ENDIAN
+ phy->phy_mib_index = smt_swap_short((u_short)p+INDEX_PORT) ;
+#else
+ phy->phy_mib_index = p+INDEX_PORT ;
+#endif
+ phy->phy_type = type ;
+ phy->phy_connect_state = state ;
+ phy->phy_remote_type = remote ;
+ phy->phy_remote_mac = mac ;
+ phy->phy_resource_idx = phy_con_resource_index(smc,p) ;
+ }
+
+ /* MAC */
+ pd_mac = (struct smt_mac_rec *) phy ;
+ pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ;
+ pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ;
+ return(len) ;
+}
+
+/*
+ * fill values in mac status
+ */
+static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st)
+{
+ SMTSETPARA(st,SMT_P_MAC_STATUS) ;
+
+ st->st_mib_index = INDEX_MAC ;
+ st->st_mac_index = mac_index(smc,1) ;
+
+ mac_update_counter(smc) ;
+ /*
+ * timer values are represented in SMT as 2's complement numbers
+ * units : internal : 2's complement BCLK
+ */
+ st->st_t_req = smc->mib.m[MAC0].fddiMACT_Req ;
+ st->st_t_neg = smc->mib.m[MAC0].fddiMACT_Neg ;
+ st->st_t_max = smc->mib.m[MAC0].fddiMACT_Max ;
+ st->st_tvx_value = smc->mib.m[MAC0].fddiMACTvxValue ;
+ st->st_t_min = smc->mib.m[MAC0].fddiMACT_Min ;
+
+ st->st_sba = smc->mib.a[PATH0].fddiPATHSbaPayload ;
+ st->st_frame_ct = smc->mib.m[MAC0].fddiMACFrame_Ct ;
+ st->st_error_ct = smc->mib.m[MAC0].fddiMACError_Ct ;
+ st->st_lost_ct = smc->mib.m[MAC0].fddiMACLost_Ct ;
+}
+
+/*
+ * fill values in LEM status
+ */
+static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy)
+{
+ struct fddi_mib_p *mib ;
+
+ mib = smc->y[phy].mib ;
+
+ SMTSETPARA(lem,SMT_P_LEM) ;
+ lem->lem_mib_index = phy+INDEX_PORT ;
+ lem->lem_phy_index = phy_index(smc,phy) ;
+ lem->lem_pad2 = 0 ;
+ lem->lem_cutoff = mib->fddiPORTLer_Cutoff ;
+ lem->lem_alarm = mib->fddiPORTLer_Alarm ;
+ /* long term bit error rate */
+ lem->lem_estimate = mib->fddiPORTLer_Estimate ;
+ /* # of rejected connections */
+ lem->lem_reject_ct = mib->fddiPORTLem_Reject_Ct ;
+ lem->lem_ct = mib->fddiPORTLem_Ct ; /* total number of errors */
+}
+
+/*
+ * fill version parameter
+ */
+static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers)
+{
+ SK_UNUSED(smc) ;
+ SMTSETPARA(vers,SMT_P_VERSION) ;
+ vers->v_pad = 0 ;
+ vers->v_n = 1 ; /* one version is enough .. */
+ vers->v_index = 1 ;
+ vers->v_version[0] = SMT_VID_2 ;
+ vers->v_pad2 = 0 ;
+}
+
+#ifdef SMT6_10
+/*
+ * fill frame status capabilities
+ */
+/*
+ * note: this para 200B is NOT in swap table, because it's also set in
+ * PMF add_para
+ */
+static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc)
+{
+ SK_UNUSED(smc) ;
+ SMTSETPARA(fsc,SMT_P_FSC) ;
+ fsc->fsc_pad0 = 0 ;
+ fsc->fsc_mac_index = INDEX_MAC ; /* this is MIB ; MIB is NOT
+ * mac_index ()i !
+ */
+ fsc->fsc_pad1 = 0 ;
+ fsc->fsc_value = FSC_TYPE0 ; /* "normal" node */
+#ifdef LITTLE_ENDIAN
+ fsc->fsc_mac_index = smt_swap_short(INDEX_MAC) ;
+ fsc->fsc_value = smt_swap_short(FSC_TYPE0) ;
+#endif
+}
+#endif
+
+/*
+ * fill mac counter field
+ */
+static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc)
+{
+ SMTSETPARA(mc,SMT_P_MAC_COUNTER) ;
+ mc->mc_mib_index = INDEX_MAC ;
+ mc->mc_index = mac_index(smc,1) ;
+ mc->mc_receive_ct = smc->mib.m[MAC0].fddiMACCopied_Ct ;
+ mc->mc_transmit_ct = smc->mib.m[MAC0].fddiMACTransmit_Ct ;
+}
+
+/*
+ * fill mac frame not copied counter
+ */
+static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc)
+{
+ SMTSETPARA(fnc,SMT_P_MAC_FNC) ;
+ fnc->nc_mib_index = INDEX_MAC ;
+ fnc->nc_index = mac_index(smc,1) ;
+ fnc->nc_counter = smc->mib.m[MAC0].fddiMACNotCopied_Ct ;
+}
+
+
+/*
+ * fill manufacturer field
+ */
+static void smt_fill_manufacturer(struct s_smc *smc,
+ struct smp_p_manufacturer *man)
+{
+ SMTSETPARA(man,SMT_P_MANUFACTURER) ;
+ memcpy((char *) man->mf_data,
+ (char *) smc->mib.fddiSMTManufacturerData,
+ sizeof(man->mf_data)) ;
+}
+
+/*
+ * fill user field
+ */
+static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user)
+{
+ SMTSETPARA(user,SMT_P_USER) ;
+ memcpy((char *) user->us_data,
+ (char *) smc->mib.fddiSMTUserData,
+ sizeof(user->us_data)) ;
+}
+
+/*
+ * fill set count
+ */
+static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount)
+{
+ SK_UNUSED(smc) ;
+ SMTSETPARA(setcount,SMT_P_SETCOUNT) ;
+ setcount->count = smc->mib.fddiSMTSetCount.count ;
+ memcpy((char *)setcount->timestamp,
+ (char *)smc->mib.fddiSMTSetCount.timestamp,8) ;
+}
+
+/*
+ * fill echo data
+ */
+static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed,
+ int len)
+{
+ u_char *p ;
+
+ SK_UNUSED(smc) ;
+ SMTSETPARA(echo,SMT_P_ECHODATA) ;
+ echo->para.p_len = len ;
+ for (p = echo->ec_data ; len ; len--) {
+ *p++ = (u_char) seed ;
+ seed += 13 ;
+ }
+}
+
+/*
+ * clear DNA and UNA
+ * called from CFM if configuration changes
+ */
+void smt_clear_una_dna(struct s_smc *smc)
+{
+ smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ;
+ smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ;
+}
+
+static void smt_clear_old_una_dna(struct s_smc *smc)
+{
+ smc->mib.m[MAC0].fddiMACOldUpstreamNbr = SMT_Unknown ;
+ smc->mib.m[MAC0].fddiMACOldDownstreamNbr = SMT_Unknown ;
+}
+
+u_long smt_get_tid(struct s_smc *smc)
+{
+ u_long tid ;
+ while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0)
+ ;
+ return(tid & 0x3fffffffL) ;
+}
+
+
+/*
+ * table of parameter lengths
+ */
+static const struct smt_pdef {
+ int ptype ;
+ int plen ;
+ const char *pswap ;
+} smt_pdef[] = {
+ { SMT_P_UNA, sizeof(struct smt_p_una) ,
+ SWAP_SMT_P_UNA } ,
+ { SMT_P_SDE, sizeof(struct smt_p_sde) ,
+ SWAP_SMT_P_SDE } ,
+ { SMT_P_STATE, sizeof(struct smt_p_state) ,
+ SWAP_SMT_P_STATE } ,
+ { SMT_P_TIMESTAMP,sizeof(struct smt_p_timestamp) ,
+ SWAP_SMT_P_TIMESTAMP } ,
+ { SMT_P_POLICY, sizeof(struct smt_p_policy) ,
+ SWAP_SMT_P_POLICY } ,
+ { SMT_P_LATENCY, sizeof(struct smt_p_latency) ,
+ SWAP_SMT_P_LATENCY } ,
+ { SMT_P_NEIGHBORS,sizeof(struct smt_p_neighbor) ,
+ SWAP_SMT_P_NEIGHBORS } ,
+ { SMT_P_PATH, sizeof(struct smt_p_path) ,
+ SWAP_SMT_P_PATH } ,
+ { SMT_P_MAC_STATUS,sizeof(struct smt_p_mac_status) ,
+ SWAP_SMT_P_MAC_STATUS } ,
+ { SMT_P_LEM, sizeof(struct smt_p_lem) ,
+ SWAP_SMT_P_LEM } ,
+ { SMT_P_MAC_COUNTER,sizeof(struct smt_p_mac_counter) ,
+ SWAP_SMT_P_MAC_COUNTER } ,
+ { SMT_P_MAC_FNC,sizeof(struct smt_p_mac_fnc) ,
+ SWAP_SMT_P_MAC_FNC } ,
+ { SMT_P_PRIORITY,sizeof(struct smt_p_priority) ,
+ SWAP_SMT_P_PRIORITY } ,
+ { SMT_P_EB,sizeof(struct smt_p_eb) ,
+ SWAP_SMT_P_EB } ,
+ { SMT_P_MANUFACTURER,sizeof(struct smp_p_manufacturer) ,
+ SWAP_SMT_P_MANUFACTURER } ,
+ { SMT_P_REASON, sizeof(struct smt_p_reason) ,
+ SWAP_SMT_P_REASON } ,
+ { SMT_P_REFUSED, sizeof(struct smt_p_refused) ,
+ SWAP_SMT_P_REFUSED } ,
+ { SMT_P_VERSION, sizeof(struct smt_p_version) ,
+ SWAP_SMT_P_VERSION } ,
+#ifdef ESS
+ { SMT_P0015, sizeof(struct smt_p_0015) , SWAP_SMT_P0015 } ,
+ { SMT_P0016, sizeof(struct smt_p_0016) , SWAP_SMT_P0016 } ,
+ { SMT_P0017, sizeof(struct smt_p_0017) , SWAP_SMT_P0017 } ,
+ { SMT_P0018, sizeof(struct smt_p_0018) , SWAP_SMT_P0018 } ,
+ { SMT_P0019, sizeof(struct smt_p_0019) , SWAP_SMT_P0019 } ,
+ { SMT_P001A, sizeof(struct smt_p_001a) , SWAP_SMT_P001A } ,
+ { SMT_P001B, sizeof(struct smt_p_001b) , SWAP_SMT_P001B } ,
+ { SMT_P001C, sizeof(struct smt_p_001c) , SWAP_SMT_P001C } ,
+ { SMT_P001D, sizeof(struct smt_p_001d) , SWAP_SMT_P001D } ,
+#endif
+#if 0
+ { SMT_P_FSC, sizeof(struct smt_p_fsc) ,
+ SWAP_SMT_P_FSC } ,
+#endif
+
+ { SMT_P_SETCOUNT,0, SWAP_SMT_P_SETCOUNT } ,
+ { SMT_P1048, 0, SWAP_SMT_P1048 } ,
+ { SMT_P208C, 0, SWAP_SMT_P208C } ,
+ { SMT_P208D, 0, SWAP_SMT_P208D } ,
+ { SMT_P208E, 0, SWAP_SMT_P208E } ,
+ { SMT_P208F, 0, SWAP_SMT_P208F } ,
+ { SMT_P2090, 0, SWAP_SMT_P2090 } ,
+#ifdef ESS
+ { SMT_P320B, sizeof(struct smt_p_320b) , SWAP_SMT_P320B } ,
+ { SMT_P320F, sizeof(struct smt_p_320f) , SWAP_SMT_P320F } ,
+ { SMT_P3210, sizeof(struct smt_p_3210) , SWAP_SMT_P3210 } ,
+#endif
+ { SMT_P4050, 0, SWAP_SMT_P4050 } ,
+ { SMT_P4051, 0, SWAP_SMT_P4051 } ,
+ { SMT_P4052, 0, SWAP_SMT_P4052 } ,
+ { SMT_P4053, 0, SWAP_SMT_P4053 } ,
+} ;
+
+#define N_SMT_PLEN (sizeof(smt_pdef)/sizeof(smt_pdef[0]))
+
+int smt_check_para(struct s_smc *smc, struct smt_header *sm,
+ const u_short list[])
+{
+ const u_short *p = list ;
+ while (*p) {
+ if (!sm_to_para(smc,sm,(int) *p)) {
+ DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0);
+ return(-1) ;
+ }
+ p++ ;
+ }
+ return(0) ;
+}
+
+void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
+{
+ char *p ;
+ int len ;
+ int plen ;
+ void *found = NULL;
+
+ SK_UNUSED(smc) ;
+
+ len = sm->smt_len ;
+ p = (char *)(sm+1) ; /* pointer to info */
+ while (len > 0 ) {
+ if (((struct smt_para *)p)->p_type == para)
+ found = (void *) p ;
+ plen = ((struct smt_para *)p)->p_len + PARA_LEN ;
+ p += plen ;
+ len -= plen ;
+ if (len < 0) {
+ DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ;
+ return NULL;
+ }
+ if ((plen & 3) && (para != SMT_P_ECHODATA)) {
+ DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ;
+ return NULL;
+ }
+ if (found)
+ return(found) ;
+ }
+ return NULL;
+}
+
+#if 0
+/*
+ * send ANTC data test frame
+ */
+void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest)
+{
+ SK_UNUSED(smc) ;
+ SK_UNUSED(dest) ;
+#if 0
+ SMbuf *mb ;
+ struct smt_header *smt ;
+ int i ;
+ char *p ;
+
+ mb = smt_get_mbuf() ;
+ mb->sm_len = 3000+12 ;
+ p = smtod(mb, char *) + 12 ;
+ for (i = 0 ; i < 3000 ; i++)
+ *p++ = 1 << (i&7) ;
+
+ smt = smtod(mb, struct smt_header *) ;
+ smt->smt_dest = *dest ;
+ smt->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ;
+ smt_send_mbuf(smc,mb,FC_ASYNC_LLC) ;
+#endif
+}
+#endif
+
+#ifdef DEBUG
+#define hextoasc(x) "0123456789abcdef"[x]
+
+char *addr_to_string(struct fddi_addr *addr)
+{
+ int i ;
+ static char string[6*3] = "****" ;
+
+ for (i = 0 ; i < 6 ; i++) {
+ string[i*3] = hextoasc((addr->a[i]>>4)&0xf) ;
+ string[i*3+1] = hextoasc((addr->a[i])&0xf) ;
+ string[i*3+2] = ':' ;
+ }
+ string[5*3+2] = 0 ;
+ return(string) ;
+}
+#endif
+
+#ifdef AM29K
+smt_ifconfig(int argc, char *argv[])
+{
+ if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
+ !strcmp(argv[1],"yes")) {
+ smc->mib.fddiSMTBypassPresent = 1 ;
+ return(0) ;
+ }
+ return(amdfddi_config(0,argc,argv)) ;
+}
+#endif
+
+/*
+ * return static mac index
+ */
+static int mac_index(struct s_smc *smc, int mac)
+{
+ SK_UNUSED(mac) ;
+#ifdef CONCENTRATOR
+ SK_UNUSED(smc) ;
+ return(NUMPHYS+1) ;
+#else
+ return((smc->s.sas == SMT_SAS) ? 2 : 3) ;
+#endif
+}
+
+/*
+ * return static phy index
+ */
+static int phy_index(struct s_smc *smc, int phy)
+{
+ SK_UNUSED(smc) ;
+ return(phy+1);
+}
+
+/*
+ * return dynamic mac connection resource index
+ */
+static int mac_con_resource_index(struct s_smc *smc, int mac)
+{
+#ifdef CONCENTRATOR
+ SK_UNUSED(smc) ;
+ SK_UNUSED(mac) ;
+ return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_MAC))) ;
+#else
+ SK_UNUSED(mac) ;
+ switch (smc->mib.fddiSMTCF_State) {
+ case SC9_C_WRAP_A :
+ case SC5_THRU_B :
+ case SC11_C_WRAP_S :
+ return(1) ;
+ case SC10_C_WRAP_B :
+ case SC4_THRU_A :
+ return(2) ;
+ }
+ return(smc->s.sas == SMT_SAS ? 2 : 3) ;
+#endif
+}
+
+/*
+ * return dynamic phy connection resource index
+ */
+static int phy_con_resource_index(struct s_smc *smc, int phy)
+{
+#ifdef CONCENTRATOR
+ return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_PHY(phy)))) ;
+#else
+ switch (smc->mib.fddiSMTCF_State) {
+ case SC9_C_WRAP_A :
+ return(phy == PA ? 3 : 2) ;
+ case SC10_C_WRAP_B :
+ return(phy == PA ? 1 : 3) ;
+ case SC4_THRU_A :
+ return(phy == PA ? 3 : 1) ;
+ case SC5_THRU_B :
+ return(phy == PA ? 2 : 3) ;
+ case SC11_C_WRAP_S :
+ return(2) ;
+ }
+ return(phy) ;
+#endif
+}
+
+#ifdef CONCENTRATOR
+static int entity_to_index(struct s_smc *smc, int e)
+{
+ if (e == ENTITY_MAC)
+ return(mac_index(smc,1)) ;
+ else
+ return(phy_index(smc,e - ENTITY_PHY(0))) ;
+}
+#endif
+
+#ifdef LITTLE_ENDIAN
+static int smt_swap_short(u_short s)
+{
+ return(((s>>8)&0xff)|((s&0xff)<<8)) ;
+}
+
+void smt_swap_para(struct smt_header *sm, int len, int direction)
+/* int direction; 0 encode 1 decode */
+{
+ struct smt_para *pa ;
+ const struct smt_pdef *pd ;
+ char *p ;
+ int plen ;
+ int type ;
+ int i ;
+
+/* printf("smt_swap_para sm %x len %d dir %d\n",
+ sm,len,direction) ;
+ */
+ smt_string_swap((char *)sm,SWAP_SMTHEADER,len) ;
+
+ /* swap args */
+ len -= sizeof(struct smt_header) ;
+
+ p = (char *) (sm + 1) ;
+ while (len > 0) {
+ pa = (struct smt_para *) p ;
+ plen = pa->p_len ;
+ type = pa->p_type ;
+ pa->p_type = smt_swap_short(pa->p_type) ;
+ pa->p_len = smt_swap_short(pa->p_len) ;
+ if (direction) {
+ plen = pa->p_len ;
+ type = pa->p_type ;
+ }
+ /*
+ * note: paras can have 0 length !
+ */
+ if (plen < 0)
+ break ;
+ plen += PARA_LEN ;
+ for (i = N_SMT_PLEN, pd = smt_pdef; i ; i--,pd++) {
+ if (pd->ptype == type)
+ break ;
+ }
+ if (i && pd->pswap) {
+ smt_string_swap(p+PARA_LEN,pd->pswap,len) ;
+ }
+ len -= plen ;
+ p += plen ;
+ }
+}
+
+static void smt_string_swap(char *data, const char *format, int len)
+{
+ const char *open_paren = 0 ;
+ int x ;
+
+ while (len > 0 && *format) {
+ switch (*format) {
+ case '[' :
+ open_paren = format ;
+ break ;
+ case ']' :
+ format = open_paren ;
+ break ;
+ case '1' :
+ case '2' :
+ case '3' :
+ case '4' :
+ case '5' :
+ case '6' :
+ case '7' :
+ case '8' :
+ case '9' :
+ data += *format - '0' ;
+ len -= *format - '0' ;
+ break ;
+ case 'c':
+ data++ ;
+ len-- ;
+ break ;
+ case 's' :
+ x = data[0] ;
+ data[0] = data[1] ;
+ data[1] = x ;
+ data += 2 ;
+ len -= 2 ;
+ break ;
+ case 'l' :
+ x = data[0] ;
+ data[0] = data[3] ;
+ data[3] = x ;
+ x = data[1] ;
+ data[1] = data[2] ;
+ data[2] = x ;
+ data += 4 ;
+ len -= 4 ;
+ break ;
+ }
+ format++ ;
+ }
+}
+#else
+void smt_swap_para(struct smt_header *sm, int len, int direction)
+/* int direction; 0 encode 1 decode */
+{
+ SK_UNUSED(sm) ;
+ SK_UNUSED(len) ;
+ SK_UNUSED(direction) ;
+}
+#endif
+
+/*
+ * PMF actions
+ */
+int smt_action(struct s_smc *smc, int class, int code, int index)
+{
+ int event ;
+ int port ;
+ DB_SMT("SMT: action %d code %d\n",class,code) ;
+ switch(class) {
+ case SMT_STATION_ACTION :
+ switch(code) {
+ case SMT_STATION_ACTION_CONNECT :
+ smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ;
+ queue_event(smc,EVENT_ECM,EC_CONNECT) ;
+ break ;
+ case SMT_STATION_ACTION_DISCONNECT :
+ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
+ smc->mib.fddiSMTRemoteDisconnectFlag = TRUE ;
+ RS_SET(smc,RS_DISCONNECT) ;
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
+ FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_DISCONNECT,
+ smt_get_event_word(smc));
+ break ;
+ case SMT_STATION_ACTION_PATHTEST :
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
+ FDDI_SMT_EVENT, (u_long) FDDI_PATH_TEST,
+ smt_get_event_word(smc));
+ break ;
+ case SMT_STATION_ACTION_SELFTEST :
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
+ FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_SELF_TEST,
+ smt_get_event_word(smc));
+ break ;
+ case SMT_STATION_ACTION_DISABLE_A :
+ if (smc->y[PA].pc_mode == PM_PEER) {
+ RS_SET(smc,RS_EVENT) ;
+ queue_event(smc,EVENT_PCM+PA,PC_DISABLE) ;
+ }
+ break ;
+ case SMT_STATION_ACTION_DISABLE_B :
+ if (smc->y[PB].pc_mode == PM_PEER) {
+ RS_SET(smc,RS_EVENT) ;
+ queue_event(smc,EVENT_PCM+PB,PC_DISABLE) ;
+ }
+ break ;
+ case SMT_STATION_ACTION_DISABLE_M :
+ for (port = 0 ; port < NUMPHYS ; port++) {
+ if (smc->mib.p[port].fddiPORTMy_Type != TM)
+ continue ;
+ RS_SET(smc,RS_EVENT) ;
+ queue_event(smc,EVENT_PCM+port,PC_DISABLE) ;
+ }
+ break ;
+ default :
+ return(1) ;
+ }
+ break ;
+ case SMT_PORT_ACTION :
+ switch(code) {
+ case SMT_PORT_ACTION_ENABLE :
+ event = PC_ENABLE ;
+ break ;
+ case SMT_PORT_ACTION_DISABLE :
+ event = PC_DISABLE ;
+ break ;
+ case SMT_PORT_ACTION_MAINT :
+ event = PC_MAINT ;
+ break ;
+ case SMT_PORT_ACTION_START :
+ event = PC_START ;
+ break ;
+ case SMT_PORT_ACTION_STOP :
+ event = PC_STOP ;
+ break ;
+ default :
+ return(1) ;
+ }
+ queue_event(smc,EVENT_PCM+index,event) ;
+ break ;
+ default :
+ return(1) ;
+ }
+ return(0) ;
+}
+
+/*
+ * change tneg
+ * set T_Req in MIB (Path Attribute)
+ * calculate new values for MAC
+ * if change required
+ * disconnect
+ * set reconnect
+ * end
+ */
+void smt_change_t_neg(struct s_smc *smc, u_long tneg)
+{
+ smc->mib.a[PATH0].fddiPATHMaxT_Req = tneg ;
+
+ if (smt_set_mac_opvalues(smc)) {
+ RS_SET(smc,RS_EVENT) ;
+ smc->sm.please_reconnect = 1 ;
+ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
+ }
+}
+
+/*
+ * canonical conversion of <len> bytes beginning form *data
+ */
+#ifdef USE_CAN_ADDR
+void hwm_conv_can(struct s_smc *smc, char *data, int len)
+{
+ int i ;
+
+ SK_UNUSED(smc) ;
+
+ for (i = len; i ; i--, data++) {
+ *data = canonical[*(u_char *)data] ;
+ }
+}
+#endif
+
+#endif /* no SLIM_SMT */
+
diff --git a/drivers/net/skfp/smtdef.c b/drivers/net/skfp/smtdef.c
new file mode 100644
index 000000000000..5a0c8db816d8
--- /dev/null
+++ b/drivers/net/skfp/smtdef.c
@@ -0,0 +1,360 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ SMT/CMT defaults
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#ifndef OEM_USER_DATA
+#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata"
+#endif
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)smtdef.c 2.53 99/08/11 (C) SK " ;
+#endif
+
+/*
+ * defaults
+ */
+#define TTMS(x) ((u_long)(x)*1000L)
+#define TTS(x) ((u_long)(x)*1000000L)
+#define TTUS(x) ((u_long)(x))
+
+#define DEFAULT_TB_MIN TTMS(5)
+#define DEFAULT_TB_MAX TTMS(50)
+#define DEFAULT_C_MIN TTUS(1600)
+#define DEFAULT_T_OUT TTMS(100+5)
+#define DEFAULT_TL_MIN TTUS(30)
+#define DEFAULT_LC_SHORT TTMS(50+5)
+#define DEFAULT_LC_MEDIUM TTMS(500+20)
+#define DEFAULT_LC_LONG TTS(5)+TTMS(50)
+#define DEFAULT_LC_EXTENDED TTS(50)+TTMS(50)
+#define DEFAULT_T_NEXT_9 TTMS(200+10)
+#define DEFAULT_NS_MAX TTUS(1310)
+#define DEFAULT_I_MAX TTMS(25)
+#define DEFAULT_IN_MAX TTMS(40)
+#define DEFAULT_TD_MIN TTMS(5)
+#define DEFAULT_T_NON_OP TTS(1)
+#define DEFAULT_T_STUCK TTS(8)
+#define DEFAULT_T_DIRECT TTMS(370)
+#define DEFAULT_T_JAM TTMS(370)
+#define DEFAULT_T_ANNOUNCE TTMS(2500)
+#define DEFAULT_D_MAX TTUS(1617)
+#define DEFAULT_LEM_ALARM (8)
+#define DEFAULT_LEM_CUTOFF (7)
+#define DEFAULT_TEST_DONE TTS(1)
+#define DEFAULT_CHECK_POLL TTS(1)
+#define DEFAULT_POLL TTMS(50)
+
+/*
+ * LCT errors threshold
+ */
+#define DEFAULT_LCT_SHORT 1
+#define DEFAULT_LCT_MEDIUM 3
+#define DEFAULT_LCT_LONG 5
+#define DEFAULT_LCT_EXTEND 50
+
+/* Forward declarations */
+void smt_reset_defaults(struct s_smc *smc, int level);
+static void smt_init_mib(struct s_smc *smc, int level);
+static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper);
+
+void smt_set_defaults(struct s_smc *smc)
+{
+ smt_reset_defaults(smc,0) ;
+}
+
+#define MS2BCLK(x) ((x)*12500L)
+#define US2BCLK(x) ((x)*1250L)
+
+void smt_reset_defaults(struct s_smc *smc, int level)
+{
+ struct smt_config *smt ;
+ int i ;
+ u_long smt_boot_time;
+
+
+ smt_init_mib(smc,level) ;
+
+ smc->os.smc_version = SMC_VERSION ;
+ smt_boot_time = smt_get_time();
+ for( i = 0; i < NUMMACS; i++ )
+ smc->sm.last_tok_time[i] = smt_boot_time ;
+ smt = &smc->s ;
+ smt->attach_s = 0 ;
+ smt->build_ring_map = 1 ;
+ smt->sas = SMT_DAS ;
+ smt->numphys = NUMPHYS ;
+ smt->pcm_tb_min = DEFAULT_TB_MIN ;
+ smt->pcm_tb_max = DEFAULT_TB_MAX ;
+ smt->pcm_c_min = DEFAULT_C_MIN ;
+ smt->pcm_t_out = DEFAULT_T_OUT ;
+ smt->pcm_tl_min = DEFAULT_TL_MIN ;
+ smt->pcm_lc_short = DEFAULT_LC_SHORT ;
+ smt->pcm_lc_medium = DEFAULT_LC_MEDIUM ;
+ smt->pcm_lc_long = DEFAULT_LC_LONG ;
+ smt->pcm_lc_extended = DEFAULT_LC_EXTENDED ;
+ smt->pcm_t_next_9 = DEFAULT_T_NEXT_9 ;
+ smt->pcm_ns_max = DEFAULT_NS_MAX ;
+ smt->ecm_i_max = DEFAULT_I_MAX ;
+ smt->ecm_in_max = DEFAULT_IN_MAX ;
+ smt->ecm_td_min = DEFAULT_TD_MIN ;
+ smt->ecm_test_done = DEFAULT_TEST_DONE ;
+ smt->ecm_check_poll = DEFAULT_CHECK_POLL ;
+ smt->rmt_t_non_op = DEFAULT_T_NON_OP ;
+ smt->rmt_t_stuck = DEFAULT_T_STUCK ;
+ smt->rmt_t_direct = DEFAULT_T_DIRECT ;
+ smt->rmt_t_jam = DEFAULT_T_JAM ;
+ smt->rmt_t_announce = DEFAULT_T_ANNOUNCE ;
+ smt->rmt_t_poll = DEFAULT_POLL ;
+ smt->rmt_dup_mac_behavior = FALSE ; /* See Struct smt_config */
+ smt->mac_d_max = DEFAULT_D_MAX ;
+
+ smt->lct_short = DEFAULT_LCT_SHORT ;
+ smt->lct_medium = DEFAULT_LCT_MEDIUM ;
+ smt->lct_long = DEFAULT_LCT_LONG ;
+ smt->lct_extended = DEFAULT_LCT_EXTEND ;
+
+#ifndef SLIM_SMT
+#ifdef ESS
+ if (level == 0) {
+ smc->ess.sync_bw_available = FALSE ;
+ smc->mib.fddiESSPayload = 0 ;
+ smc->mib.fddiESSOverhead = 0 ;
+ smc->mib.fddiESSMaxTNeg = (u_long)(- MS2BCLK(25)) ;
+ smc->mib.fddiESSMinSegmentSize = 1 ;
+ smc->mib.fddiESSCategory = SB_STATIC ;
+ smc->mib.fddiESSSynchTxMode = FALSE ;
+ smc->ess.raf_act_timer_poll = FALSE ;
+ smc->ess.timer_count = 7 ; /* first RAF alc req after 3s */
+ }
+ smc->ess.local_sba_active = FALSE ;
+ smc->ess.sba_reply_pend = NULL ;
+#endif
+#ifdef SBA
+ smt_init_sba(smc,level) ;
+#endif
+#endif /* no SLIM_SMT */
+#ifdef TAG_MODE
+ if (level == 0) {
+ smc->hw.pci_fix_value = 0 ;
+ }
+#endif
+}
+
+/*
+ * manufacturer data
+ */
+static const char man_data[32] =
+/* 01234567890123456789012345678901 */
+ "xxxSK-NET FDDI SMT 7.3 - V2.8.8" ;
+
+static void smt_init_mib(struct s_smc *smc, int level)
+{
+ struct fddi_mib *mib ;
+ struct fddi_mib_p *pm ;
+ int port ;
+ int path ;
+
+ mib = &smc->mib ;
+ if (level == 0) {
+ /*
+ * set EVERYTHING to ZERO
+ * EXCEPT hw and os
+ */
+ memset(((char *)smc)+
+ sizeof(struct s_smt_os)+sizeof(struct s_smt_hw), 0,
+ sizeof(struct s_smc) -
+ sizeof(struct s_smt_os) - sizeof(struct s_smt_hw)) ;
+ }
+ else {
+ mib->fddiSMTRemoteDisconnectFlag = 0 ;
+ mib->fddiSMTPeerWrapFlag = 0 ;
+ }
+
+ mib->fddiSMTOpVersionId = 2 ;
+ mib->fddiSMTHiVersionId = 2 ;
+ mib->fddiSMTLoVersionId = 2 ;
+ memcpy((char *) mib->fddiSMTManufacturerData,man_data,32) ;
+ if (level == 0) {
+ strcpy(mib->fddiSMTUserData,OEM_USER_DATA) ;
+ }
+ mib->fddiSMTMIBVersionId = 1 ;
+ mib->fddiSMTMac_Ct = NUMMACS ;
+ mib->fddiSMTConnectionPolicy = POLICY_MM | POLICY_AA | POLICY_BB ;
+
+ /*
+ * fddiSMTNonMaster_Ct and fddiSMTMaster_Ct are set in smt_fixup_mib
+ * s.sas is not set yet (is set in init driver)
+ */
+ mib->fddiSMTAvailablePaths = MIB_PATH_P | MIB_PATH_S ;
+
+ mib->fddiSMTConfigCapabilities = 0 ; /* no hold,no wrap_ab*/
+ mib->fddiSMTTT_Notify = 10 ;
+ mib->fddiSMTStatRptPolicy = TRUE ;
+ mib->fddiSMTTrace_MaxExpiration = SEC2MIB(7) ;
+ mib->fddiSMTMACIndexes = INDEX_MAC ;
+ mib->fddiSMTStationStatus = MIB_SMT_STASTA_SEPA ; /* separated */
+
+ mib->m[MAC0].fddiMACIndex = INDEX_MAC ;
+ mib->m[MAC0].fddiMACFrameStatusFunctions = FSC_TYPE0 ;
+ mib->m[MAC0].fddiMACRequestedPaths =
+ MIB_P_PATH_LOCAL |
+ MIB_P_PATH_SEC_ALTER |
+ MIB_P_PATH_PRIM_ALTER ;
+ mib->m[MAC0].fddiMACAvailablePaths = MIB_PATH_P ;
+ mib->m[MAC0].fddiMACCurrentPath = MIB_PATH_PRIMARY ;
+ mib->m[MAC0].fddiMACT_MaxCapabilitiy = (u_long)(- MS2BCLK(165)) ;
+ mib->m[MAC0].fddiMACTVXCapabilitiy = (u_long)(- US2BCLK(52)) ;
+ if (level == 0) {
+ mib->m[MAC0].fddiMACTvxValue = (u_long)(- US2BCLK(27)) ;
+ mib->m[MAC0].fddiMACTvxValueMIB = (u_long)(- US2BCLK(27)) ;
+ mib->m[MAC0].fddiMACT_Req = (u_long)(- MS2BCLK(165)) ;
+ mib->m[MAC0].fddiMACT_ReqMIB = (u_long)(- MS2BCLK(165)) ;
+ mib->m[MAC0].fddiMACT_Max = (u_long)(- MS2BCLK(165)) ;
+ mib->m[MAC0].fddiMACT_MaxMIB = (u_long)(- MS2BCLK(165)) ;
+ mib->m[MAC0].fddiMACT_Min = (u_long)(- MS2BCLK(4)) ;
+ }
+ mib->m[MAC0].fddiMACHardwarePresent = TRUE ;
+ mib->m[MAC0].fddiMACMA_UnitdataEnable = TRUE ;
+ mib->m[MAC0].fddiMACFrameErrorThreshold = 1 ;
+ mib->m[MAC0].fddiMACNotCopiedThreshold = 1 ;
+ /*
+ * Path attributes
+ */
+ for (path = 0 ; path < NUMPATHS ; path++) {
+ mib->a[path].fddiPATHIndex = INDEX_PATH + path ;
+ if (level == 0) {
+ mib->a[path].fddiPATHTVXLowerBound =
+ (u_long)(- US2BCLK(27)) ;
+ mib->a[path].fddiPATHT_MaxLowerBound =
+ (u_long)(- MS2BCLK(165)) ;
+ mib->a[path].fddiPATHMaxT_Req =
+ (u_long)(- MS2BCLK(165)) ;
+ }
+ }
+
+
+ /*
+ * Port attributes
+ */
+ pm = mib->p ;
+ for (port = 0 ; port < NUMPHYS ; port++) {
+ /*
+ * set MIB pointer in phy
+ */
+ /* Attention: don't initialize mib pointer here! */
+ /* It must be initialized during phase 2 */
+ smc->y[port].mib = NULL;
+ mib->fddiSMTPORTIndexes[port] = port+INDEX_PORT ;
+
+ pm->fddiPORTIndex = port+INDEX_PORT ;
+ pm->fddiPORTHardwarePresent = TRUE ;
+ if (level == 0) {
+ pm->fddiPORTLer_Alarm = DEFAULT_LEM_ALARM ;
+ pm->fddiPORTLer_Cutoff = DEFAULT_LEM_CUTOFF ;
+ }
+ /*
+ * fddiPORTRequestedPaths are set in pcmplc.c
+ * we don't know the port type yet !
+ */
+ pm->fddiPORTRequestedPaths[1] = 0 ;
+ pm->fddiPORTRequestedPaths[2] = 0 ;
+ pm->fddiPORTRequestedPaths[3] = 0 ;
+ pm->fddiPORTAvailablePaths = MIB_PATH_P ;
+ pm->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ;
+ pm++ ;
+ }
+
+ (void) smt_set_mac_opvalues(smc) ;
+}
+
+int smt_set_mac_opvalues(struct s_smc *smc)
+{
+ int st ;
+ int st2 ;
+
+ st = set_min_max(1,smc->mib.m[MAC0].fddiMACTvxValueMIB,
+ smc->mib.a[PATH0].fddiPATHTVXLowerBound,
+ &smc->mib.m[MAC0].fddiMACTvxValue) ;
+ st |= set_min_max(0,smc->mib.m[MAC0].fddiMACT_MaxMIB,
+ smc->mib.a[PATH0].fddiPATHT_MaxLowerBound,
+ &smc->mib.m[MAC0].fddiMACT_Max) ;
+ st |= (st2 = set_min_max(0,smc->mib.m[MAC0].fddiMACT_ReqMIB,
+ smc->mib.a[PATH0].fddiPATHMaxT_Req,
+ &smc->mib.m[MAC0].fddiMACT_Req)) ;
+ if (st2) {
+ /* Treq attribute changed remotely. So send an AIX_EVENT to the
+ * user
+ */
+ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
+ FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ,
+ smt_get_event_word(smc));
+ }
+ return(st) ;
+}
+
+void smt_fixup_mib(struct s_smc *smc)
+{
+#ifdef CONCENTRATOR
+ switch (smc->s.sas) {
+ case SMT_SAS :
+ smc->mib.fddiSMTNonMaster_Ct = 1 ;
+ break ;
+ case SMT_DAS :
+ smc->mib.fddiSMTNonMaster_Ct = 2 ;
+ break ;
+ case SMT_NAC :
+ smc->mib.fddiSMTNonMaster_Ct = 0 ;
+ break ;
+ }
+ smc->mib.fddiSMTMaster_Ct = NUMPHYS - smc->mib.fddiSMTNonMaster_Ct ;
+#else
+ switch (smc->s.sas) {
+ case SMT_SAS :
+ smc->mib.fddiSMTNonMaster_Ct = 1 ;
+ break ;
+ case SMT_DAS :
+ smc->mib.fddiSMTNonMaster_Ct = 2 ;
+ break ;
+ }
+ smc->mib.fddiSMTMaster_Ct = 0 ;
+#endif
+}
+
+/*
+ * determine new setting for operational value
+ * if limit is lower than mib
+ * use limit
+ * else
+ * use mib
+ * NOTE : numbers are negative, negate comparison !
+ */
+static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper)
+{
+ u_long old ;
+ old = *oper ;
+ if ((limit > mib) ^ maxflag)
+ *oper = limit ;
+ else
+ *oper = mib ;
+ return(old != *oper) ;
+}
+
diff --git a/drivers/net/skfp/smtinit.c b/drivers/net/skfp/smtinit.c
new file mode 100644
index 000000000000..3c8964ce1837
--- /dev/null
+++ b/drivers/net/skfp/smtinit.c
@@ -0,0 +1,125 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ Init SMT
+ call all module level initialization routines
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)smtinit.c 1.15 97/05/06 (C) SK " ;
+#endif
+
+void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
+
+/* define global debug variable */
+#if defined(DEBUG) && !defined(DEBUG_BRD)
+struct smt_debug debug;
+#endif
+
+#ifndef MULT_OEM
+#define OEMID(smc,i) oem_id[i]
+ extern u_char oem_id[] ;
+#else /* MULT_OEM */
+#define OEMID(smc,i) smc->hw.oem_id->oi_mark[i]
+ extern struct s_oem_ids oem_ids[] ;
+#endif /* MULT_OEM */
+
+/*
+ * Set OEM specific values
+ *
+ * Can not be called in smt_reset_defaults, because it is not sure that
+ * the OEM ID is already defined.
+ */
+static void set_oem_spec_val(struct s_smc *smc)
+{
+ struct fddi_mib *mib ;
+
+ mib = &smc->mib ;
+
+ /*
+ * set IBM specific values
+ */
+ if (OEMID(smc,0) == 'I') {
+ mib->fddiSMTConnectionPolicy = POLICY_MM ;
+ }
+}
+
+/*
+ * Init SMT
+ */
+int init_smt(struct s_smc *smc, u_char *mac_addr)
+/* u_char *mac_addr; canonical address or NULL */
+{
+ int p ;
+
+#if defined(DEBUG) && !defined(DEBUG_BRD)
+ debug.d_smt = 0 ;
+ debug.d_smtf = 0 ;
+ debug.d_rmt = 0 ;
+ debug.d_ecm = 0 ;
+ debug.d_pcm = 0 ;
+ debug.d_cfm = 0 ;
+
+ debug.d_plc = 0 ;
+#ifdef ESS
+ debug.d_ess = 0 ;
+#endif
+#ifdef SBA
+ debug.d_sba = 0 ;
+#endif
+#endif /* DEBUG && !DEBUG_BRD */
+
+ /* First initialize the ports mib->pointers */
+ for ( p = 0; p < NUMPHYS; p ++ ) {
+ smc->y[p].mib = & smc->mib.p[p] ;
+ }
+
+ set_oem_spec_val(smc) ;
+ (void) smt_set_mac_opvalues(smc) ;
+ init_fddi_driver(smc,mac_addr) ; /* HW driver */
+ smt_fixup_mib(smc) ; /* update values that depend on s.sas */
+
+ ev_init(smc) ; /* event queue */
+#ifndef SLIM_SMT
+ smt_init_evc(smc) ; /* evcs in MIB */
+#endif /* no SLIM_SMT */
+ smt_timer_init(smc) ; /* timer package */
+ smt_agent_init(smc) ; /* SMT frame manager */
+
+ pcm_init(smc) ; /* PCM state machine */
+ ecm_init(smc) ; /* ECM state machine */
+ cfm_init(smc) ; /* CFM state machine */
+ rmt_init(smc) ; /* RMT state machine */
+
+ for (p = 0 ; p < NUMPHYS ; p++) {
+ pcm(smc,p,0) ; /* PCM A state machine */
+ }
+ ecm(smc,0) ; /* ECM state machine */
+ cfm(smc,0) ; /* CFM state machine */
+ rmt(smc,0) ; /* RMT state machine */
+
+ smt_agent_task(smc) ; /* NIF FSM etc */
+
+ PNMI_INIT(smc) ; /* PNMI initialization */
+
+ return(0) ;
+}
+
diff --git a/drivers/net/skfp/smtparse.c b/drivers/net/skfp/smtparse.c
new file mode 100644
index 000000000000..d5779e414dbe
--- /dev/null
+++ b/drivers/net/skfp/smtparse.c
@@ -0,0 +1,467 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+
+/*
+ parser for SMT parameters
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/smt_p.h"
+
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)smtparse.c 1.12 98/10/06 (C) SK " ;
+#endif
+
+#ifdef sun
+#define _far
+#endif
+
+/*
+ * convert to BCLK units
+ */
+#define MS2BCLK(x) ((x)*12500L)
+#define US2BCLK(x) ((x/10)*125L)
+
+/*
+ * parameter table
+ */
+static struct s_ptab {
+ char *pt_name ;
+ u_short pt_num ;
+ u_short pt_type ;
+ u_long pt_min ;
+ u_long pt_max ;
+} ptab[] = {
+ { "PMFPASSWD",0, 0 } ,
+ { "USERDATA",1, 0 } ,
+ { "LERCUTOFFA",2, 1, 4, 15 } ,
+ { "LERCUTOFFB",3, 1, 4, 15 } ,
+ { "LERALARMA",4, 1, 4, 15 } ,
+ { "LERALARMB",5, 1, 4, 15 } ,
+ { "TMAX",6, 1, 5, 165 } ,
+ { "TMIN",7, 1, 5, 165 } ,
+ { "TREQ",8, 1, 5, 165 } ,
+ { "TVX",9, 1, 2500, 10000 } ,
+#ifdef ESS
+ { "SBAPAYLOAD",10, 1, 0, 1562 } ,
+ { "SBAOVERHEAD",11, 1, 50, 5000 } ,
+ { "MAXTNEG",12, 1, 5, 165 } ,
+ { "MINSEGMENTSIZE",13, 1, 0, 4478 } ,
+ { "SBACATEGORY",14, 1, 0, 0xffff } ,
+ { "SYNCHTXMODE",15, 0 } ,
+#endif
+#ifdef SBA
+ { "SBACOMMAND",16, 0 } ,
+ { "SBAAVAILABLE",17, 1, 0, 100 } ,
+#endif
+ { NULL }
+} ;
+
+/* Define maximum string size for values and keybuffer */
+#define MAX_VAL 40
+
+/*
+ * local function declarations
+ */
+static u_long parse_num(int type, char _far *value, char *v, u_long mn,
+ u_long mx, int scale);
+static int parse_word(char *buf, char _far *text);
+
+#ifdef SIM
+#define DB_MAIN(a,b,c) printf(a,b,c)
+#else
+#define DB_MAIN(a,b,c)
+#endif
+
+/*
+ * BEGIN_MANUAL_ENTRY()
+ *
+ * int smt_parse_arg(struct s_smc *,char _far *keyword,int type,
+ char _far *value)
+ *
+ * parse SMT parameter
+ * *keyword
+ * pointer to keyword, must be \0, \n or \r terminated
+ * *value pointer to value, either char * or u_long *
+ * if char *
+ * pointer to value, must be \0, \n or \r terminated
+ * if u_long *
+ * contains binary value
+ *
+ * type 0: integer
+ * 1: string
+ * return
+ * 0 parameter parsed ok
+ * != 0 error
+ * NOTE:
+ * function can be called with DS != SS
+ *
+ *
+ * END_MANUAL_ENTRY()
+ */
+int smt_parse_arg(struct s_smc *smc, char _far *keyword, int type,
+ char _far *value)
+{
+ char keybuf[MAX_VAL+1];
+ char valbuf[MAX_VAL+1];
+ char c ;
+ char *p ;
+ char *v ;
+ char *d ;
+ u_long val = 0 ;
+ struct s_ptab *pt ;
+ int st ;
+ int i ;
+
+ /*
+ * parse keyword
+ */
+ if ((st = parse_word(keybuf,keyword)))
+ return(st) ;
+ /*
+ * parse value if given as string
+ */
+ if (type == 1) {
+ if ((st = parse_word(valbuf,value)))
+ return(st) ;
+ }
+ /*
+ * search in table
+ */
+ st = 0 ;
+ for (pt = ptab ; (v = pt->pt_name) ; pt++) {
+ for (p = keybuf ; (c = *p) ; p++,v++) {
+ if (c != *v)
+ break ;
+ }
+ if (!c && !*v)
+ break ;
+ }
+ if (!v)
+ return(-1) ;
+#if 0
+ printf("=>%s<==>%s<=\n",pt->pt_name,valbuf) ;
+#endif
+ /*
+ * set value in MIB
+ */
+ if (pt->pt_type)
+ val = parse_num(type,value,valbuf,pt->pt_min,pt->pt_max,1) ;
+ switch (pt->pt_num) {
+ case 0 :
+ v = valbuf ;
+ d = (char *) smc->mib.fddiPRPMFPasswd ;
+ for (i = 0 ; i < (signed)sizeof(smc->mib.fddiPRPMFPasswd) ; i++)
+ *d++ = *v++ ;
+ DB_MAIN("SET %s = %s\n",pt->pt_name,smc->mib.fddiPRPMFPasswd) ;
+ break ;
+ case 1 :
+ v = valbuf ;
+ d = (char *) smc->mib.fddiSMTUserData ;
+ for (i = 0 ; i < (signed)sizeof(smc->mib.fddiSMTUserData) ; i++)
+ *d++ = *v++ ;
+ DB_MAIN("SET %s = %s\n",pt->pt_name,smc->mib.fddiSMTUserData) ;
+ break ;
+ case 2 :
+ smc->mib.p[PA].fddiPORTLer_Cutoff = (u_char) val ;
+ DB_MAIN("SET %s = %d\n",
+ pt->pt_name,smc->mib.p[PA].fddiPORTLer_Cutoff) ;
+ break ;
+ case 3 :
+ smc->mib.p[PB].fddiPORTLer_Cutoff = (u_char) val ;
+ DB_MAIN("SET %s = %d\n",
+ pt->pt_name,smc->mib.p[PB].fddiPORTLer_Cutoff) ;
+ break ;
+ case 4 :
+ smc->mib.p[PA].fddiPORTLer_Alarm = (u_char) val ;
+ DB_MAIN("SET %s = %d\n",
+ pt->pt_name,smc->mib.p[PA].fddiPORTLer_Alarm) ;
+ break ;
+ case 5 :
+ smc->mib.p[PB].fddiPORTLer_Alarm = (u_char) val ;
+ DB_MAIN("SET %s = %d\n",
+ pt->pt_name,smc->mib.p[PB].fddiPORTLer_Alarm) ;
+ break ;
+ case 6 : /* TMAX */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.a[PATH0].fddiPATHT_MaxLowerBound =
+ (u_long) -MS2BCLK((long)val) ;
+ break ;
+ case 7 : /* TMIN */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.m[MAC0].fddiMACT_Min =
+ (u_long) -MS2BCLK((long)val) ;
+ break ;
+ case 8 : /* TREQ */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.a[PATH0].fddiPATHMaxT_Req =
+ (u_long) -MS2BCLK((long)val) ;
+ break ;
+ case 9 : /* TVX */
+ DB_MAIN("SET %s = %d \n",pt->pt_name,val) ;
+ smc->mib.a[PATH0].fddiPATHTVXLowerBound =
+ (u_long) -US2BCLK((long)val) ;
+ break ;
+#ifdef ESS
+ case 10 : /* SBAPAYLOAD */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ if (smc->mib.fddiESSPayload != val) {
+ smc->ess.raf_act_timer_poll = TRUE ;
+ smc->mib.fddiESSPayload = val ;
+ }
+ break ;
+ case 11 : /* SBAOVERHEAD */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.fddiESSOverhead = val ;
+ break ;
+ case 12 : /* MAXTNEG */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.fddiESSMaxTNeg = (u_long) -MS2BCLK((long)val) ;
+ break ;
+ case 13 : /* MINSEGMENTSIZE */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.fddiESSMinSegmentSize = val ;
+ break ;
+ case 14 : /* SBACATEGORY */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.fddiESSCategory =
+ (smc->mib.fddiESSCategory & 0xffff) |
+ ((u_long)(val << 16)) ;
+ break ;
+ case 15 : /* SYNCHTXMODE */
+ /* do not use memcmp(valbuf,"ALL",3) because DS != SS */
+ if (valbuf[0] == 'A' && valbuf[1] == 'L' && valbuf[2] == 'L') {
+ smc->mib.fddiESSSynchTxMode = TRUE ;
+ DB_MAIN("SET %s = %s\n",pt->pt_name,valbuf) ;
+ }
+ /* if (!memcmp(valbuf,"SPLIT",5)) { */
+ if (valbuf[0] == 'S' && valbuf[1] == 'P' && valbuf[2] == 'L' &&
+ valbuf[3] == 'I' && valbuf[4] == 'T') {
+ DB_MAIN("SET %s = %s\n",pt->pt_name,valbuf) ;
+ smc->mib.fddiESSSynchTxMode = FALSE ;
+ }
+ break ;
+#endif
+#ifdef SBA
+ case 16 : /* SBACOMMAND */
+ /* if (!memcmp(valbuf,"START",5)) { */
+ if (valbuf[0] == 'S' && valbuf[1] == 'T' && valbuf[2] == 'A' &&
+ valbuf[3] == 'R' && valbuf[4] == 'T') {
+ DB_MAIN("SET %s = %s\n",pt->pt_name,valbuf) ;
+ smc->mib.fddiSBACommand = SB_START ;
+ }
+ /* if (!memcmp(valbuf,"STOP",4)) { */
+ if (valbuf[0] == 'S' && valbuf[1] == 'T' && valbuf[2] == 'O' &&
+ valbuf[3] == 'P') {
+ DB_MAIN("SET %s = %s\n",pt->pt_name,valbuf) ;
+ smc->mib.fddiSBACommand = SB_STOP ;
+ }
+ break ;
+ case 17 : /* SBAAVAILABLE */
+ DB_MAIN("SET %s = %d\n",pt->pt_name,val) ;
+ smc->mib.fddiSBAAvailable = (u_char) val ;
+ break ;
+#endif
+ }
+ return(0) ;
+}
+
+static int parse_word(char *buf, char _far *text)
+{
+ char c ;
+ char *p ;
+ int p_len ;
+ int quote ;
+ int i ;
+ int ok ;
+
+ /*
+ * skip leading white space
+ */
+ p = buf ;
+ for (i = 0 ; i < MAX_VAL ; i++)
+ *p++ = 0 ;
+ p = buf ;
+ p_len = 0 ;
+ ok = 0 ;
+ while ( (c = *text++) && (c != '\n') && (c != '\r')) {
+ if ((c != ' ') && (c != '\t')) {
+ ok = 1 ;
+ break ;
+ }
+ }
+ if (!ok)
+ return(-1) ;
+ if (c == '"') {
+ quote = 1 ;
+ }
+ else {
+ quote = 0 ;
+ text-- ;
+ }
+ /*
+ * parse valbuf
+ */
+ ok = 0 ;
+ while (!ok && p_len < MAX_VAL-1 && (c = *text++) && (c != '\n')
+ && (c != '\r')) {
+ switch (quote) {
+ case 0 :
+ if ((c == ' ') || (c == '\t') || (c == '=')) {
+ ok = 1 ;
+ break ;
+ }
+ *p++ = c ;
+ p_len++ ;
+ break ;
+ case 2 :
+ *p++ = c ;
+ p_len++ ;
+ quote = 1 ;
+ break ;
+ case 1 :
+ switch (c) {
+ case '"' :
+ ok = 1 ;
+ break ;
+ case '\\' :
+ quote = 2 ;
+ break ;
+ default :
+ *p++ = c ;
+ p_len++ ;
+ }
+ }
+ }
+ *p++ = 0 ;
+ for (p = buf ; (c = *p) ; p++) {
+ if (c >= 'a' && c <= 'z')
+ *p = c + 'A' - 'a' ;
+ }
+ return(0) ;
+}
+
+static u_long parse_num(int type, char _far *value, char *v, u_long mn,
+ u_long mx, int scale)
+{
+ u_long x = 0 ;
+ char c ;
+
+ if (type == 0) { /* integer */
+ u_long _far *l ;
+ u_long u1 ;
+
+ l = (u_long _far *) value ;
+ u1 = *l ;
+ /*
+ * if the value is negative take the lower limit
+ */
+ if ((long)u1 < 0) {
+ if (- ((long)u1) > (long) mx) {
+ u1 = 0 ;
+ }
+ else {
+ u1 = (u_long) - ((long)u1) ;
+ }
+ }
+ x = u1 ;
+ }
+ else { /* string */
+ int sign = 0 ;
+
+ if (*v == '-') {
+ sign = 1 ;
+ }
+ while ((c = *v++) && (c >= '0') && (c <= '9')) {
+ x = x * 10 + c - '0' ;
+ }
+ if (scale == 10) {
+ x *= 10 ;
+ if (c == '.') {
+ if ((c = *v++) && (c >= '0') && (c <= '9')) {
+ x += c - '0' ;
+ }
+ }
+ }
+ if (sign)
+ x = (u_long) - ((long)x) ;
+ }
+ /*
+ * if the value is negative
+ * and the absolute value is outside the limits
+ * take the lower limit
+ * else
+ * take the absoute value
+ */
+ if ((long)x < 0) {
+ if (- ((long)x) > (long) mx) {
+ x = 0 ;
+ }
+ else {
+ x = (u_long) - ((long)x) ;
+ }
+ }
+ if (x < mn)
+ return(mn) ;
+ else if (x > mx)
+ return(mx) ;
+ return(x) ;
+}
+
+#if 0
+struct s_smc SMC ;
+main()
+{
+ char *p ;
+ char *v ;
+ char buf[100] ;
+ int toggle = 0 ;
+
+ while (gets(buf)) {
+ p = buf ;
+ while (*p && ((*p == ' ') || (*p == '\t')))
+ p++ ;
+
+ while (*p && ((*p != ' ') && (*p != '\t')))
+ p++ ;
+
+ v = p ;
+ while (*v && ((*v == ' ') || (*v == '\t')))
+ v++ ;
+ if ((*v >= '0') && (*v <= '9')) {
+ toggle = !toggle ;
+ if (toggle) {
+ u_long l ;
+ l = atol(v) ;
+ smt_parse_arg(&SMC,buf,0,(char _far *)&l) ;
+ }
+ else
+ smt_parse_arg(&SMC,buf,1,(char _far *)p) ;
+ }
+ else {
+ smt_parse_arg(&SMC,buf,1,(char _far *)p) ;
+ }
+ }
+ exit(0) ;
+}
+#endif
+
diff --git a/drivers/net/skfp/smttimer.c b/drivers/net/skfp/smttimer.c
new file mode 100644
index 000000000000..531795e98c30
--- /dev/null
+++ b/drivers/net/skfp/smttimer.c
@@ -0,0 +1,156 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ SMT timer
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)smttimer.c 2.4 97/08/04 (C) SK " ;
+#endif
+
+static void timer_done(struct s_smc *smc, int restart);
+
+void smt_timer_init(struct s_smc *smc)
+{
+ smc->t.st_queue = NULL;
+ smc->t.st_fast.tm_active = FALSE ;
+ smc->t.st_fast.tm_next = NULL;
+ hwt_init(smc) ;
+}
+
+void smt_timer_stop(struct s_smc *smc, struct smt_timer *timer)
+{
+ struct smt_timer **prev ;
+ struct smt_timer *tm ;
+
+ /*
+ * remove timer from queue
+ */
+ timer->tm_active = FALSE ;
+ if (smc->t.st_queue == timer && !timer->tm_next) {
+ hwt_stop(smc) ;
+ }
+ for (prev = &smc->t.st_queue ; (tm = *prev) ; prev = &tm->tm_next ) {
+ if (tm == timer) {
+ *prev = tm->tm_next ;
+ if (tm->tm_next) {
+ tm->tm_next->tm_delta += tm->tm_delta ;
+ }
+ return ;
+ }
+ }
+}
+
+void smt_timer_start(struct s_smc *smc, struct smt_timer *timer, u_long time,
+ u_long token)
+{
+ struct smt_timer **prev ;
+ struct smt_timer *tm ;
+ u_long delta = 0 ;
+
+ time /= 16 ; /* input is uS, clock ticks are 16uS */
+ if (!time)
+ time = 1 ;
+ smt_timer_stop(smc,timer) ;
+ timer->tm_smc = smc ;
+ timer->tm_token = token ;
+ timer->tm_active = TRUE ;
+ if (!smc->t.st_queue) {
+ smc->t.st_queue = timer ;
+ timer->tm_next = NULL;
+ timer->tm_delta = time ;
+ hwt_start(smc,time) ;
+ return ;
+ }
+ /*
+ * timer correction
+ */
+ timer_done(smc,0) ;
+
+ /*
+ * find position in queue
+ */
+ delta = 0 ;
+ for (prev = &smc->t.st_queue ; (tm = *prev) ; prev = &tm->tm_next ) {
+ if (delta + tm->tm_delta > time) {
+ break ;
+ }
+ delta += tm->tm_delta ;
+ }
+ /* insert in queue */
+ *prev = timer ;
+ timer->tm_next = tm ;
+ timer->tm_delta = time - delta ;
+ if (tm)
+ tm->tm_delta -= timer->tm_delta ;
+ /*
+ * start new with first
+ */
+ hwt_start(smc,smc->t.st_queue->tm_delta) ;
+}
+
+void smt_force_irq(struct s_smc *smc)
+{
+ smt_timer_start(smc,&smc->t.st_fast,32L, EV_TOKEN(EVENT_SMT,SM_FAST));
+}
+
+void smt_timer_done(struct s_smc *smc)
+{
+ timer_done(smc,1) ;
+}
+
+static void timer_done(struct s_smc *smc, int restart)
+{
+ u_long delta ;
+ struct smt_timer *tm ;
+ struct smt_timer *next ;
+ struct smt_timer **last ;
+ int done = 0 ;
+
+ delta = hwt_read(smc) ;
+ last = &smc->t.st_queue ;
+ tm = smc->t.st_queue ;
+ while (tm && !done) {
+ if (delta >= tm->tm_delta) {
+ tm->tm_active = FALSE ;
+ delta -= tm->tm_delta ;
+ last = &tm->tm_next ;
+ tm = tm->tm_next ;
+ }
+ else {
+ tm->tm_delta -= delta ;
+ delta = 0 ;
+ done = 1 ;
+ }
+ }
+ *last = NULL;
+ next = smc->t.st_queue ;
+ smc->t.st_queue = tm ;
+
+ for ( tm = next ; tm ; tm = next) {
+ next = tm->tm_next ;
+ timer_event(smc,tm->tm_token) ;
+ }
+
+ if (restart && smc->t.st_queue)
+ hwt_start(smc,smc->t.st_queue->tm_delta) ;
+}
+
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
new file mode 100644
index 000000000000..16573aca8b62
--- /dev/null
+++ b/drivers/net/skfp/srf.c
@@ -0,0 +1,429 @@
+/******************************************************************************
+ *
+ * (C)Copyright 1998,1999 SysKonnect,
+ * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
+ *
+ * See the file "skfddi.c" for further information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ SMT 7.2 Status Response Frame Implementation
+ SRF state machine and frame generation
+*/
+
+#include "h/types.h"
+#include "h/fddi.h"
+#include "h/smc.h"
+#include "h/smt_p.h"
+
+#define KERNEL
+#include "h/smtstate.h"
+
+#ifndef SLIM_SMT
+#ifndef BOOT
+
+#ifndef lint
+static const char ID_sccs[] = "@(#)srf.c 1.18 97/08/04 (C) SK " ;
+#endif
+
+
+/*
+ * function declarations
+ */
+static void clear_all_rep(struct s_smc *smc);
+static void clear_reported(struct s_smc *smc);
+static void smt_send_srf(struct s_smc *smc);
+static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index);
+
+#define MAX_EVCS (sizeof(smc->evcs)/sizeof(smc->evcs[0]))
+
+struct evc_init {
+ u_char code ;
+ u_char index ;
+ u_char n ;
+ u_short para ;
+} ;
+
+static const struct evc_init evc_inits[] = {
+ { SMT_COND_SMT_PEER_WRAP, 0,1,SMT_P1048 } ,
+
+ { SMT_COND_MAC_DUP_ADDR, INDEX_MAC, NUMMACS,SMT_P208C } ,
+ { SMT_COND_MAC_FRAME_ERROR, INDEX_MAC, NUMMACS,SMT_P208D } ,
+ { SMT_COND_MAC_NOT_COPIED, INDEX_MAC, NUMMACS,SMT_P208E } ,
+ { SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC, NUMMACS,SMT_P208F } ,
+ { SMT_EVENT_MAC_PATH_CHANGE, INDEX_MAC, NUMMACS,SMT_P2090 } ,
+
+ { SMT_COND_PORT_LER, INDEX_PORT,NUMPHYS,SMT_P4050 } ,
+ { SMT_COND_PORT_EB_ERROR, INDEX_PORT,NUMPHYS,SMT_P4052 } ,
+ { SMT_EVENT_PORT_CONNECTION, INDEX_PORT,NUMPHYS,SMT_P4051 } ,
+ { SMT_EVENT_PORT_PATH_CHANGE, INDEX_PORT,NUMPHYS,SMT_P4053 } ,
+} ;
+
+#define MAX_INIT_EVC (sizeof(evc_inits)/sizeof(evc_inits[0]))
+
+void smt_init_evc(struct s_smc *smc)
+{
+ struct s_srf_evc *evc ;
+ const struct evc_init *init ;
+ int i ;
+ int index ;
+ int offset ;
+
+ static u_char fail_safe = FALSE ;
+
+ memset((char *)smc->evcs,0,sizeof(smc->evcs)) ;
+
+ evc = smc->evcs ;
+ init = evc_inits ;
+
+ for (i = 0 ; (unsigned) i < MAX_INIT_EVC ; i++) {
+ for (index = 0 ; index < init->n ; index++) {
+ evc->evc_code = init->code ;
+ evc->evc_para = init->para ;
+ evc->evc_index = init->index + index ;
+#ifndef DEBUG
+ evc->evc_multiple = &fail_safe ;
+ evc->evc_cond_state = &fail_safe ;
+#endif
+ evc++ ;
+ }
+ init++ ;
+ }
+
+ if ((unsigned) (evc - smc->evcs) > MAX_EVCS) {
+ SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ;
+ }
+
+ /*
+ * conditions
+ */
+ smc->evcs[0].evc_cond_state = &smc->mib.fddiSMTPeerWrapFlag ;
+ smc->evcs[1].evc_cond_state =
+ &smc->mib.m[MAC0].fddiMACDuplicateAddressCond ;
+ smc->evcs[2].evc_cond_state =
+ &smc->mib.m[MAC0].fddiMACFrameErrorFlag ;
+ smc->evcs[3].evc_cond_state =
+ &smc->mib.m[MAC0].fddiMACNotCopiedFlag ;
+
+ /*
+ * events
+ */
+ smc->evcs[4].evc_multiple = &smc->mib.m[MAC0].fddiMACMultiple_N ;
+ smc->evcs[5].evc_multiple = &smc->mib.m[MAC0].fddiMACMultiple_P ;
+
+ offset = 6 ;
+ for (i = 0 ; i < NUMPHYS ; i++) {
+ /*
+ * conditions
+ */
+ smc->evcs[offset + 0*NUMPHYS].evc_cond_state =
+ &smc->mib.p[i].fddiPORTLerFlag ;
+ smc->evcs[offset + 1*NUMPHYS].evc_cond_state =
+ &smc->mib.p[i].fddiPORTEB_Condition ;
+
+ /*
+ * events
+ */
+ smc->evcs[offset + 2*NUMPHYS].evc_multiple =
+ &smc->mib.p[i].fddiPORTMultiple_U ;
+ smc->evcs[offset + 3*NUMPHYS].evc_multiple =
+ &smc->mib.p[i].fddiPORTMultiple_P ;
+ offset++ ;
+ }
+#ifdef DEBUG
+ for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ if (SMT_IS_CONDITION(evc->evc_code)) {
+ if (!evc->evc_cond_state) {
+ SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ;
+ }
+ evc->evc_multiple = &fail_safe ;
+ }
+ else {
+ if (!evc->evc_multiple) {
+ SMT_PANIC(smc,SMT_E0129, SMT_E0129_MSG) ;
+ }
+ evc->evc_cond_state = &fail_safe ;
+ }
+ }
+#endif
+ smc->srf.TSR = smt_get_time() ;
+ smc->srf.sr_state = SR0_WAIT ;
+}
+
+static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
+{
+ int i ;
+ struct s_srf_evc *evc ;
+
+ for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ if (evc->evc_code == code && evc->evc_index == index)
+ return(evc) ;
+ }
+ return NULL;
+}
+
+#define THRESHOLD_2 (2*TICKS_PER_SECOND)
+#define THRESHOLD_32 (32*TICKS_PER_SECOND)
+
+#ifdef DEBUG
+static const char * const srf_names[] = {
+ "None","MACPathChangeEvent", "MACNeighborChangeEvent",
+ "PORTPathChangeEvent", "PORTUndesiredConnectionAttemptEvent",
+ "SMTPeerWrapCondition", "SMTHoldCondition",
+ "MACFrameErrorCondition", "MACDuplicateAddressCondition",
+ "MACNotCopiedCondition", "PORTEBErrorCondition",
+ "PORTLerCondition"
+} ;
+#endif
+
+void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
+{
+ struct s_srf_evc *evc ;
+ int cond_asserted = 0 ;
+ int cond_deasserted = 0 ;
+ int event_occurred = 0 ;
+ int tsr ;
+ int T_Limit = 2*TICKS_PER_SECOND ;
+
+ if (code == SMT_COND_MAC_DUP_ADDR && cond) {
+ RS_SET(smc,RS_DUPADDR) ;
+ }
+
+ if (code) {
+ DB_SMT("SRF: %s index %d\n",srf_names[code],index) ;
+
+ if (!(evc = smt_get_evc(smc,code,index))) {
+ DB_SMT("SRF : smt_get_evc() failed\n",0,0) ;
+ return ;
+ }
+ /*
+ * ignore condition if no change
+ */
+ if (SMT_IS_CONDITION(code)) {
+ if (*evc->evc_cond_state == cond)
+ return ;
+ }
+
+ /*
+ * set transition time stamp
+ */
+ smt_set_timestamp(smc,smc->mib.fddiSMTTransitionTimeStamp) ;
+ if (SMT_IS_CONDITION(code)) {
+ DB_SMT("SRF: condition is %s\n",cond ? "ON":"OFF",0) ;
+ if (cond) {
+ *evc->evc_cond_state = TRUE ;
+ evc->evc_rep_required = TRUE ;
+ smc->srf.any_report = TRUE ;
+ cond_asserted = TRUE ;
+ }
+ else {
+ *evc->evc_cond_state = FALSE ;
+ cond_deasserted = TRUE ;
+ }
+ }
+ else {
+ if (evc->evc_rep_required) {
+ *evc->evc_multiple = TRUE ;
+ }
+ else {
+ evc->evc_rep_required = TRUE ;
+ *evc->evc_multiple = FALSE ;
+ }
+ smc->srf.any_report = TRUE ;
+ event_occurred = TRUE ;
+ }
+#ifdef FDDI_MIB
+ snmp_srf_event(smc,evc) ;
+#endif /* FDDI_MIB */
+ }
+ tsr = smt_get_time() - smc->srf.TSR ;
+
+ switch (smc->srf.sr_state) {
+ case SR0_WAIT :
+ /* SR01a */
+ if (cond_asserted && tsr < T_Limit) {
+ smc->srf.SRThreshold = THRESHOLD_2 ;
+ smc->srf.sr_state = SR1_HOLDOFF ;
+ break ;
+ }
+ /* SR01b */
+ if (cond_deasserted && tsr < T_Limit) {
+ smc->srf.sr_state = SR1_HOLDOFF ;
+ break ;
+ }
+ /* SR01c */
+ if (event_occurred && tsr < T_Limit) {
+ smc->srf.sr_state = SR1_HOLDOFF ;
+ break ;
+ }
+ /* SR00b */
+ if (cond_asserted && tsr >= T_Limit) {
+ smc->srf.SRThreshold = THRESHOLD_2 ;
+ smc->srf.TSR = smt_get_time() ;
+ smt_send_srf(smc) ;
+ break ;
+ }
+ /* SR00c */
+ if (cond_deasserted && tsr >= T_Limit) {
+ smc->srf.TSR = smt_get_time() ;
+ smt_send_srf(smc) ;
+ break ;
+ }
+ /* SR00d */
+ if (event_occurred && tsr >= T_Limit) {
+ smc->srf.TSR = smt_get_time() ;
+ smt_send_srf(smc) ;
+ break ;
+ }
+ /* SR00e */
+ if (smc->srf.any_report && (u_long) tsr >=
+ smc->srf.SRThreshold) {
+ smc->srf.SRThreshold *= 2 ;
+ if (smc->srf.SRThreshold > THRESHOLD_32)
+ smc->srf.SRThreshold = THRESHOLD_32 ;
+ smc->srf.TSR = smt_get_time() ;
+ smt_send_srf(smc) ;
+ break ;
+ }
+ /* SR02 */
+ if (!smc->mib.fddiSMTStatRptPolicy) {
+ smc->srf.sr_state = SR2_DISABLED ;
+ break ;
+ }
+ break ;
+ case SR1_HOLDOFF :
+ /* SR10b */
+ if (tsr >= T_Limit) {
+ smc->srf.sr_state = SR0_WAIT ;
+ smc->srf.TSR = smt_get_time() ;
+ smt_send_srf(smc) ;
+ break ;
+ }
+ /* SR11a */
+ if (cond_asserted) {
+ smc->srf.SRThreshold = THRESHOLD_2 ;
+ }
+ /* SR11b */
+ /* SR11c */
+ /* handled above */
+ /* SR12 */
+ if (!smc->mib.fddiSMTStatRptPolicy) {
+ smc->srf.sr_state = SR2_DISABLED ;
+ break ;
+ }
+ break ;
+ case SR2_DISABLED :
+ if (smc->mib.fddiSMTStatRptPolicy) {
+ smc->srf.sr_state = SR0_WAIT ;
+ smc->srf.TSR = smt_get_time() ;
+ smc->srf.SRThreshold = THRESHOLD_2 ;
+ clear_all_rep(smc) ;
+ break ;
+ }
+ break ;
+ }
+}
+
+static void clear_all_rep(struct s_smc *smc)
+{
+ struct s_srf_evc *evc ;
+ int i ;
+
+ for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ evc->evc_rep_required = FALSE ;
+ if (SMT_IS_CONDITION(evc->evc_code))
+ *evc->evc_cond_state = FALSE ;
+ }
+ smc->srf.any_report = FALSE ;
+}
+
+static void clear_reported(struct s_smc *smc)
+{
+ struct s_srf_evc *evc ;
+ int i ;
+
+ smc->srf.any_report = FALSE ;
+ for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ if (SMT_IS_CONDITION(evc->evc_code)) {
+ if (*evc->evc_cond_state == FALSE)
+ evc->evc_rep_required = FALSE ;
+ else
+ smc->srf.any_report = TRUE ;
+ }
+ else {
+ evc->evc_rep_required = FALSE ;
+ *evc->evc_multiple = FALSE ;
+ }
+ }
+}
+
+/*
+ * build and send SMT SRF frame
+ */
+static void smt_send_srf(struct s_smc *smc)
+{
+
+ struct smt_header *smt ;
+ struct s_srf_evc *evc ;
+ SK_LOC_DECL(struct s_pcon,pcon) ;
+ SMbuf *mb ;
+ int i ;
+
+ static const struct fddi_addr SMT_SRF_DA = {
+ { 0x80, 0x01, 0x43, 0x00, 0x80, 0x08 }
+ } ;
+
+ /*
+ * build SMT header
+ */
+ if (!smc->r.sm_ma_avail)
+ return ;
+ if (!(mb = smt_build_frame(smc,SMT_SRF,SMT_ANNOUNCE,0)))
+ return ;
+
+ RS_SET(smc,RS_SOFTERROR) ;
+
+ smt = smtod(mb, struct smt_header *) ;
+ smt->smt_dest = SMT_SRF_DA ; /* DA == SRF multicast */
+
+ /*
+ * setup parameter status
+ */
+ pcon.pc_len = SMT_MAX_INFO_LEN ; /* max para length */
+ pcon.pc_err = 0 ; /* no error */
+ pcon.pc_badset = 0 ; /* no bad set count */
+ pcon.pc_p = (void *) (smt + 1) ; /* paras start here */
+
+ smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
+ smt_add_para(smc,&pcon,(u_short) SMT_P1034,0,0) ;
+
+ for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ if (evc->evc_rep_required) {
+ smt_add_para(smc,&pcon,evc->evc_para,
+ (int)evc->evc_index,0) ;
+ }
+ }
+ smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
+ mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
+
+ DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ;
+ DB_SMT("SRF: state SR%d Threshold %d\n",
+ smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
+#ifdef DEBUG
+ dump_smt(smc,smt,"SRF Send") ;
+#endif
+ smt_send_frame(smc,mb,FC_SMT_INFO,0) ;
+ clear_reported(smc) ;
+}
+
+#endif /* no BOOT */
+#endif /* no SLIM_SMT */
+
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
new file mode 100644
index 000000000000..c6fbb1ede0ed
--- /dev/null
+++ b/drivers/net/slhc.c
@@ -0,0 +1,768 @@
+/*
+ * Routines to compress and uncompress tcp packets (for transmission
+ * over low speed serial lines).
+ *
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ *
+ *
+ * modified for KA9Q Internet Software Package by
+ * Katie Stevens (dkstevens@ucdavis.edu)
+ * University of California, Davis
+ * Computing Services
+ * - 01-31-90 initial adaptation (from 1.19)
+ * PPP.05 02-15-90 [ks]
+ * PPP.08 05-02-90 [ks] use PPP protocol field to signal compression
+ * PPP.15 09-90 [ks] improve mbuf handling
+ * PPP.16 11-02 [karn] substantially rewritten to use NOS facilities
+ *
+ * - Feb 1991 Bill_Simpson@um.cc.umich.edu
+ * variable number of conversation slots
+ * allow zero or one slots
+ * separate routines
+ * status display
+ * - Jul 1994 Dmitry Gorodchanin
+ * Fixes for memory leaks.
+ * - Oct 1994 Dmitry Gorodchanin
+ * Modularization.
+ * - Jan 1995 Bjorn Ekwall
+ * Use ip_fast_csum from ip.h
+ * - July 1995 Christos A. Polyzols
+ * Spotted bug in tcp option checking
+ *
+ *
+ * This module is a difficult issue. It's clearly inet code but it's also clearly
+ * driver code belonging close to PPP and SLIP
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <net/slhc_vj.h>
+
+#ifdef CONFIG_INET
+/* Entire module is for IP only */
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/termios.h>
+#include <linux/in.h>
+#include <linux/fcntl.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/icmp.h>
+#include <net/tcp.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <linux/timer.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <net/checksum.h>
+#include <asm/unaligned.h>
+
+static unsigned char *encode(unsigned char *cp, unsigned short n);
+static long decode(unsigned char **cpp);
+static unsigned char * put16(unsigned char *cp, unsigned short x);
+static unsigned short pull16(unsigned char **cpp);
+
+/* Initialize compression data structure
+ * slots must be in range 0 to 255 (zero meaning no compression)
+ */
+struct slcompress *
+slhc_init(int rslots, int tslots)
+{
+ register short i;
+ register struct cstate *ts;
+ struct slcompress *comp;
+
+ comp = (struct slcompress *)kmalloc(sizeof(struct slcompress),
+ GFP_KERNEL);
+ if (! comp)
+ goto out_fail;
+ memset(comp, 0, sizeof(struct slcompress));
+
+ if ( rslots > 0 && rslots < 256 ) {
+ size_t rsize = rslots * sizeof(struct cstate);
+ comp->rstate = (struct cstate *) kmalloc(rsize, GFP_KERNEL);
+ if (! comp->rstate)
+ goto out_free;
+ memset(comp->rstate, 0, rsize);
+ comp->rslot_limit = rslots - 1;
+ }
+
+ if ( tslots > 0 && tslots < 256 ) {
+ size_t tsize = tslots * sizeof(struct cstate);
+ comp->tstate = (struct cstate *) kmalloc(tsize, GFP_KERNEL);
+ if (! comp->tstate)
+ goto out_free2;
+ memset(comp->tstate, 0, tsize);
+ comp->tslot_limit = tslots - 1;
+ }
+
+ comp->xmit_oldest = 0;
+ comp->xmit_current = 255;
+ comp->recv_current = 255;
+ /*
+ * don't accept any packets with implicit index until we get
+ * one with an explicit index. Otherwise the uncompress code
+ * will try to use connection 255, which is almost certainly
+ * out of range
+ */
+ comp->flags |= SLF_TOSS;
+
+ if ( tslots > 0 ) {
+ ts = comp->tstate;
+ for(i = comp->tslot_limit; i > 0; --i){
+ ts[i].cs_this = i;
+ ts[i].next = &(ts[i - 1]);
+ }
+ ts[0].next = &(ts[comp->tslot_limit]);
+ ts[0].cs_this = 0;
+ }
+ return comp;
+
+out_free2:
+ kfree((unsigned char *)comp->rstate);
+out_free:
+ kfree((unsigned char *)comp);
+out_fail:
+ return NULL;
+}
+
+
+/* Free a compression data structure */
+void
+slhc_free(struct slcompress *comp)
+{
+ if ( comp == NULLSLCOMPR )
+ return;
+
+ if ( comp->tstate != NULLSLSTATE )
+ kfree( comp->tstate );
+
+ if ( comp->rstate != NULLSLSTATE )
+ kfree( comp->rstate );
+
+ kfree( comp );
+}
+
+
+/* Put a short in host order into a char array in network order */
+static inline unsigned char *
+put16(unsigned char *cp, unsigned short x)
+{
+ *cp++ = x >> 8;
+ *cp++ = x;
+
+ return cp;
+}
+
+
+/* Encode a number */
+unsigned char *
+encode(unsigned char *cp, unsigned short n)
+{
+ if(n >= 256 || n == 0){
+ *cp++ = 0;
+ cp = put16(cp,n);
+ } else {
+ *cp++ = n;
+ }
+ return cp;
+}
+
+/* Pull a 16-bit integer in host order from buffer in network byte order */
+static unsigned short
+pull16(unsigned char **cpp)
+{
+ short rval;
+
+ rval = *(*cpp)++;
+ rval <<= 8;
+ rval |= *(*cpp)++;
+ return rval;
+}
+
+/* Decode a number */
+long
+decode(unsigned char **cpp)
+{
+ register int x;
+
+ x = *(*cpp)++;
+ if(x == 0){
+ return pull16(cpp) & 0xffff; /* pull16 returns -1 on error */
+ } else {
+ return x & 0xff; /* -1 if PULLCHAR returned error */
+ }
+}
+
+/*
+ * icp and isize are the original packet.
+ * ocp is a place to put a copy if necessary.
+ * cpp is initially a pointer to icp. If the copy is used,
+ * change it to ocp.
+ */
+
+int
+slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
+ unsigned char *ocp, unsigned char **cpp, int compress_cid)
+{
+ register struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]);
+ register struct cstate *lcs = ocs;
+ register struct cstate *cs = lcs->next;
+ register unsigned long deltaS, deltaA;
+ register short changes = 0;
+ int hlen;
+ unsigned char new_seq[16];
+ register unsigned char *cp = new_seq;
+ struct iphdr *ip;
+ struct tcphdr *th, *oth;
+
+
+ /*
+ * Don't play with runt packets.
+ */
+
+ if(isize<sizeof(struct iphdr))
+ return isize;
+
+ ip = (struct iphdr *) icp;
+
+ /* Bail if this packet isn't TCP, or is an IP fragment */
+ if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
+ /* Send as regular IP */
+ if(ip->protocol != IPPROTO_TCP)
+ comp->sls_o_nontcp++;
+ else
+ comp->sls_o_tcp++;
+ return isize;
+ }
+ /* Extract TCP header */
+
+ th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
+ hlen = ip->ihl*4 + th->doff*4;
+
+ /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
+ * some other control bit is set). Also uncompressible if
+ * it's a runt.
+ */
+ if(hlen > isize || th->syn || th->fin || th->rst ||
+ ! (th->ack)){
+ /* TCP connection stuff; send as regular IP */
+ comp->sls_o_tcp++;
+ return isize;
+ }
+ /*
+ * Packet is compressible -- we're going to send either a
+ * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way,
+ * we need to locate (or create) the connection state.
+ *
+ * States are kept in a circularly linked list with
+ * xmit_oldest pointing to the end of the list. The
+ * list is kept in lru order by moving a state to the
+ * head of the list whenever it is referenced. Since
+ * the list is short and, empirically, the connection
+ * we want is almost always near the front, we locate
+ * states via linear search. If we don't find a state
+ * for the datagram, the oldest state is (re-)used.
+ */
+ for ( ; ; ) {
+ if( ip->saddr == cs->cs_ip.saddr
+ && ip->daddr == cs->cs_ip.daddr
+ && th->source == cs->cs_tcp.source
+ && th->dest == cs->cs_tcp.dest)
+ goto found;
+
+ /* if current equal oldest, at end of list */
+ if ( cs == ocs )
+ break;
+ lcs = cs;
+ cs = cs->next;
+ comp->sls_o_searches++;
+ };
+ /*
+ * Didn't find it -- re-use oldest cstate. Send an
+ * uncompressed packet that tells the other side what
+ * connection number we're using for this conversation.
+ *
+ * Note that since the state list is circular, the oldest
+ * state points to the newest and we only need to set
+ * xmit_oldest to update the lru linkage.
+ */
+ comp->sls_o_misses++;
+ comp->xmit_oldest = lcs->cs_this;
+ goto uncompressed;
+
+found:
+ /*
+ * Found it -- move to the front on the connection list.
+ */
+ if(lcs == ocs) {
+ /* found at most recently used */
+ } else if (cs == ocs) {
+ /* found at least recently used */
+ comp->xmit_oldest = lcs->cs_this;
+ } else {
+ /* more than 2 elements */
+ lcs->next = cs->next;
+ cs->next = ocs->next;
+ ocs->next = cs;
+ }
+
+ /*
+ * Make sure that only what we expect to change changed.
+ * Check the following:
+ * IP protocol version, header length & type of service.
+ * The "Don't fragment" bit.
+ * The time-to-live field.
+ * The TCP header length.
+ * IP options, if any.
+ * TCP options, if any.
+ * If any of these things are different between the previous &
+ * current datagram, we send the current datagram `uncompressed'.
+ */
+ oth = &cs->cs_tcp;
+
+ if(ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl
+ || ip->tos != cs->cs_ip.tos
+ || (ip->frag_off & htons(0x4000)) != (cs->cs_ip.frag_off & htons(0x4000))
+ || ip->ttl != cs->cs_ip.ttl
+ || th->doff != cs->cs_tcp.doff
+ || (ip->ihl > 5 && memcmp(ip+1,cs->cs_ipopt,((ip->ihl)-5)*4) != 0)
+ || (th->doff > 5 && memcmp(th+1,cs->cs_tcpopt,((th->doff)-5)*4) != 0)){
+ goto uncompressed;
+ }
+
+ /*
+ * Figure out which of the changing fields changed. The
+ * receiver expects changes in the order: urgent, window,
+ * ack, seq (the order minimizes the number of temporaries
+ * needed in this section of code).
+ */
+ if(th->urg){
+ deltaS = ntohs(th->urg_ptr);
+ cp = encode(cp,deltaS);
+ changes |= NEW_U;
+ } else if(th->urg_ptr != oth->urg_ptr){
+ /* argh! URG not set but urp changed -- a sensible
+ * implementation should never do this but RFC793
+ * doesn't prohibit the change so we have to deal
+ * with it. */
+ goto uncompressed;
+ }
+ if((deltaS = ntohs(th->window) - ntohs(oth->window)) != 0){
+ cp = encode(cp,deltaS);
+ changes |= NEW_W;
+ }
+ if((deltaA = ntohl(th->ack_seq) - ntohl(oth->ack_seq)) != 0L){
+ if(deltaA > 0x0000ffff)
+ goto uncompressed;
+ cp = encode(cp,deltaA);
+ changes |= NEW_A;
+ }
+ if((deltaS = ntohl(th->seq) - ntohl(oth->seq)) != 0L){
+ if(deltaS > 0x0000ffff)
+ goto uncompressed;
+ cp = encode(cp,deltaS);
+ changes |= NEW_S;
+ }
+
+ switch(changes){
+ case 0: /* Nothing changed. If this packet contains data and the
+ * last one didn't, this is probably a data packet following
+ * an ack (normal on an interactive connection) and we send
+ * it compressed. Otherwise it's probably a retransmit,
+ * retransmitted ack or window probe. Send it uncompressed
+ * in case the other side missed the compressed version.
+ */
+ if(ip->tot_len != cs->cs_ip.tot_len &&
+ ntohs(cs->cs_ip.tot_len) == hlen)
+ break;
+ goto uncompressed;
+ break;
+ case SPECIAL_I:
+ case SPECIAL_D:
+ /* actual changes match one of our special case encodings --
+ * send packet uncompressed.
+ */
+ goto uncompressed;
+ case NEW_S|NEW_A:
+ if(deltaS == deltaA &&
+ deltaS == ntohs(cs->cs_ip.tot_len) - hlen){
+ /* special case for echoed terminal traffic */
+ changes = SPECIAL_I;
+ cp = new_seq;
+ }
+ break;
+ case NEW_S:
+ if(deltaS == ntohs(cs->cs_ip.tot_len) - hlen){
+ /* special case for data xfer */
+ changes = SPECIAL_D;
+ cp = new_seq;
+ }
+ break;
+ }
+ deltaS = ntohs(ip->id) - ntohs(cs->cs_ip.id);
+ if(deltaS != 1){
+ cp = encode(cp,deltaS);
+ changes |= NEW_I;
+ }
+ if(th->psh)
+ changes |= TCP_PUSH_BIT;
+ /* Grab the cksum before we overwrite it below. Then update our
+ * state with this packet's header.
+ */
+ deltaA = ntohs(th->check);
+ memcpy(&cs->cs_ip,ip,20);
+ memcpy(&cs->cs_tcp,th,20);
+ /* We want to use the original packet as our compressed packet.
+ * (cp - new_seq) is the number of bytes we need for compressed
+ * sequence numbers. In addition we need one byte for the change
+ * mask, one for the connection id and two for the tcp checksum.
+ * So, (cp - new_seq) + 4 bytes of header are needed.
+ */
+ deltaS = cp - new_seq;
+ if(compress_cid == 0 || comp->xmit_current != cs->cs_this){
+ cp = ocp;
+ *cpp = ocp;
+ *cp++ = changes | NEW_C;
+ *cp++ = cs->cs_this;
+ comp->xmit_current = cs->cs_this;
+ } else {
+ cp = ocp;
+ *cpp = ocp;
+ *cp++ = changes;
+ }
+ cp = put16(cp,(short)deltaA); /* Write TCP checksum */
+/* deltaS is now the size of the change section of the compressed header */
+ memcpy(cp,new_seq,deltaS); /* Write list of deltas */
+ memcpy(cp+deltaS,icp+hlen,isize-hlen);
+ comp->sls_o_compressed++;
+ ocp[0] |= SL_TYPE_COMPRESSED_TCP;
+ return isize - hlen + deltaS + (cp - ocp);
+
+ /* Update connection state cs & send uncompressed packet (i.e.,
+ * a regular ip/tcp packet but with the 'conversation id' we hope
+ * to use on future compressed packets in the protocol field).
+ */
+uncompressed:
+ memcpy(&cs->cs_ip,ip,20);
+ memcpy(&cs->cs_tcp,th,20);
+ if (ip->ihl > 5)
+ memcpy(cs->cs_ipopt, ip+1, ((ip->ihl) - 5) * 4);
+ if (th->doff > 5)
+ memcpy(cs->cs_tcpopt, th+1, ((th->doff) - 5) * 4);
+ comp->xmit_current = cs->cs_this;
+ comp->sls_o_uncompressed++;
+ memcpy(ocp, icp, isize);
+ *cpp = ocp;
+ ocp[9] = cs->cs_this;
+ ocp[0] |= SL_TYPE_UNCOMPRESSED_TCP;
+ return isize;
+}
+
+
+int
+slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+{
+ register int changes;
+ long x;
+ register struct tcphdr *thp;
+ register struct iphdr *ip;
+ register struct cstate *cs;
+ int len, hdrlen;
+ unsigned char *cp = icp;
+
+ /* We've got a compressed packet; read the change byte */
+ comp->sls_i_compressed++;
+ if(isize < 3){
+ comp->sls_i_error++;
+ return 0;
+ }
+ changes = *cp++;
+ if(changes & NEW_C){
+ /* Make sure the state index is in range, then grab the state.
+ * If we have a good state index, clear the 'discard' flag.
+ */
+ x = *cp++; /* Read conn index */
+ if(x < 0 || x > comp->rslot_limit)
+ goto bad;
+
+ comp->flags &=~ SLF_TOSS;
+ comp->recv_current = x;
+ } else {
+ /* this packet has an implicit state index. If we've
+ * had a line error since the last time we got an
+ * explicit state index, we have to toss the packet. */
+ if(comp->flags & SLF_TOSS){
+ comp->sls_i_tossed++;
+ return 0;
+ }
+ }
+ cs = &comp->rstate[comp->recv_current];
+ thp = &cs->cs_tcp;
+ ip = &cs->cs_ip;
+
+ if((x = pull16(&cp)) == -1) { /* Read the TCP checksum */
+ goto bad;
+ }
+ thp->check = htons(x);
+
+ thp->psh = (changes & TCP_PUSH_BIT) ? 1 : 0;
+/*
+ * we can use the same number for the length of the saved header and
+ * the current one, because the packet wouldn't have been sent
+ * as compressed unless the options were the same as the previous one
+ */
+
+ hdrlen = ip->ihl * 4 + thp->doff * 4;
+
+ switch(changes & SPECIALS_MASK){
+ case SPECIAL_I: /* Echoed terminal traffic */
+ {
+ register short i;
+ i = ntohs(ip->tot_len) - hdrlen;
+ thp->ack_seq = htonl( ntohl(thp->ack_seq) + i);
+ thp->seq = htonl( ntohl(thp->seq) + i);
+ }
+ break;
+
+ case SPECIAL_D: /* Unidirectional data */
+ thp->seq = htonl( ntohl(thp->seq) +
+ ntohs(ip->tot_len) - hdrlen);
+ break;
+
+ default:
+ if(changes & NEW_U){
+ thp->urg = 1;
+ if((x = decode(&cp)) == -1) {
+ goto bad;
+ }
+ thp->urg_ptr = htons(x);
+ } else
+ thp->urg = 0;
+ if(changes & NEW_W){
+ if((x = decode(&cp)) == -1) {
+ goto bad;
+ }
+ thp->window = htons( ntohs(thp->window) + x);
+ }
+ if(changes & NEW_A){
+ if((x = decode(&cp)) == -1) {
+ goto bad;
+ }
+ thp->ack_seq = htonl( ntohl(thp->ack_seq) + x);
+ }
+ if(changes & NEW_S){
+ if((x = decode(&cp)) == -1) {
+ goto bad;
+ }
+ thp->seq = htonl( ntohl(thp->seq) + x);
+ }
+ break;
+ }
+ if(changes & NEW_I){
+ if((x = decode(&cp)) == -1) {
+ goto bad;
+ }
+ ip->id = htons (ntohs (ip->id) + x);
+ } else
+ ip->id = htons (ntohs (ip->id) + 1);
+
+ /*
+ * At this point, cp points to the first byte of data in the
+ * packet. Put the reconstructed TCP and IP headers back on the
+ * packet. Recalculate IP checksum (but not TCP checksum).
+ */
+
+ len = isize - (cp - icp);
+ if (len < 0)
+ goto bad;
+ len += hdrlen;
+ ip->tot_len = htons(len);
+ ip->check = 0;
+
+ memmove(icp + hdrlen, cp, len - hdrlen);
+
+ cp = icp;
+ memcpy(cp, ip, 20);
+ cp += 20;
+
+ if (ip->ihl > 5) {
+ memcpy(cp, cs->cs_ipopt, (ip->ihl - 5) * 4);
+ cp += (ip->ihl - 5) * 4;
+ }
+
+ put_unaligned(ip_fast_csum(icp, ip->ihl),
+ &((struct iphdr *)icp)->check);
+
+ memcpy(cp, thp, 20);
+ cp += 20;
+
+ if (thp->doff > 5) {
+ memcpy(cp, cs->cs_tcpopt, ((thp->doff) - 5) * 4);
+ cp += ((thp->doff) - 5) * 4;
+ }
+
+ return len;
+bad:
+ comp->sls_i_error++;
+ return slhc_toss( comp );
+}
+
+
+int
+slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
+{
+ register struct cstate *cs;
+ unsigned ihl;
+
+ unsigned char index;
+
+ if(isize < 20) {
+ /* The packet is shorter than a legal IP header */
+ comp->sls_i_runt++;
+ return slhc_toss( comp );
+ }
+ /* Peek at the IP header's IHL field to find its length */
+ ihl = icp[0] & 0xf;
+ if(ihl < 20 / 4){
+ /* The IP header length field is too small */
+ comp->sls_i_runt++;
+ return slhc_toss( comp );
+ }
+ index = icp[9];
+ icp[9] = IPPROTO_TCP;
+
+ if (ip_fast_csum(icp, ihl)) {
+ /* Bad IP header checksum; discard */
+ comp->sls_i_badcheck++;
+ return slhc_toss( comp );
+ }
+ if(index > comp->rslot_limit) {
+ comp->sls_i_error++;
+ return slhc_toss(comp);
+ }
+
+ /* Update local state */
+ cs = &comp->rstate[comp->recv_current = index];
+ comp->flags &=~ SLF_TOSS;
+ memcpy(&cs->cs_ip,icp,20);
+ memcpy(&cs->cs_tcp,icp + ihl*4,20);
+ if (ihl > 5)
+ memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4);
+ if (cs->cs_tcp.doff > 5)
+ memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
+ cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
+ /* Put headers back on packet
+ * Neither header checksum is recalculated
+ */
+ comp->sls_i_uncompressed++;
+ return isize;
+}
+
+int
+slhc_toss(struct slcompress *comp)
+{
+ if ( comp == NULLSLCOMPR )
+ return 0;
+
+ comp->flags |= SLF_TOSS;
+ return 0;
+}
+
+
+/* VJ header compression */
+EXPORT_SYMBOL(slhc_init);
+EXPORT_SYMBOL(slhc_free);
+EXPORT_SYMBOL(slhc_remember);
+EXPORT_SYMBOL(slhc_compress);
+EXPORT_SYMBOL(slhc_uncompress);
+EXPORT_SYMBOL(slhc_toss);
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California\n");
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ return;
+}
+
+#endif /* MODULE */
+#else /* CONFIG_INET */
+
+
+int
+slhc_toss(struct slcompress *comp)
+{
+ printk(KERN_DEBUG "Called IP function on non IP-system: slhc_toss");
+ return -EINVAL;
+}
+int
+slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+{
+ printk(KERN_DEBUG "Called IP function on non IP-system: slhc_uncompress");
+ return -EINVAL;
+}
+int
+slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
+ unsigned char *ocp, unsigned char **cpp, int compress_cid)
+{
+ printk(KERN_DEBUG "Called IP function on non IP-system: slhc_compress");
+ return -EINVAL;
+}
+
+int
+slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
+{
+ printk(KERN_DEBUG "Called IP function on non IP-system: slhc_remember");
+ return -EINVAL;
+}
+
+void
+slhc_free(struct slcompress *comp)
+{
+ printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free");
+ return;
+}
+struct slcompress *
+slhc_init(int rslots, int tslots)
+{
+ printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
+ return NULL;
+}
+EXPORT_SYMBOL(slhc_init);
+EXPORT_SYMBOL(slhc_free);
+EXPORT_SYMBOL(slhc_remember);
+EXPORT_SYMBOL(slhc_compress);
+EXPORT_SYMBOL(slhc_uncompress);
+EXPORT_SYMBOL(slhc_toss);
+
+#endif /* CONFIG_INET */
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
new file mode 100644
index 000000000000..4ce52f5f2419
--- /dev/null
+++ b/drivers/net/slip.c
@@ -0,0 +1,1522 @@
+/*
+ * slip.c This module implements the SLIP protocol for kernel-based
+ * devices like TTY. It interfaces between a raw TTY, and the
+ * kernel's INET protocol layers.
+ *
+ * Version: @(#)slip.c 0.8.3 12/24/94
+ *
+ * Authors: Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Fixes:
+ * Alan Cox : Sanity checks and avoid tx overruns.
+ * Has a new sl->mtu field.
+ * Alan Cox : Found cause of overrun. ifconfig sl0 mtu upwards.
+ * Driver now spots this and grows/shrinks its buffers(hack!).
+ * Memory leak if you run out of memory setting up a slip driver fixed.
+ * Matt Dillon : Printable slip (borrowed from NET2E)
+ * Pauline Middelink : Slip driver fixes.
+ * Alan Cox : Honours the old SL_COMPRESSED flag
+ * Alan Cox : KISS AX.25 and AXUI IP support
+ * Michael Riepe : Automatic CSLIP recognition added
+ * Charles Hedrick : CSLIP header length problem fix.
+ * Alan Cox : Corrected non-IP cases of the above.
+ * Alan Cox : Now uses hardware type as per FvK.
+ * Alan Cox : Default to 192.168.0.0 (RFC 1597)
+ * A.N.Kuznetsov : dev_tint() recursion fix.
+ * Dmitry Gorodchanin : SLIP memory leaks
+ * Dmitry Gorodchanin : Code cleanup. Reduce tty driver
+ * buffering from 4096 to 256 bytes.
+ * Improving SLIP response time.
+ * CONFIG_SLIP_MODE_SLIP6.
+ * ifconfig sl? up & down now works correctly.
+ * Modularization.
+ * Alan Cox : Oops - fix AX.25 buffer lengths
+ * Dmitry Gorodchanin : Even more cleanups. Preserve CSLIP
+ * statistics. Include CSLIP code only
+ * if it really needed.
+ * Alan Cox : Free slhc buffers in the right place.
+ * Alan Cox : Allow for digipeated IP over AX.25
+ * Matti Aarnio : Dynamic SLIP devices, with ideas taken
+ * from Jim Freeman's <jfree@caldera.com>
+ * dynamic PPP devices. We do NOT kfree()
+ * device entries, just reg./unreg. them
+ * as they are needed. We kfree() them
+ * at module cleanup.
+ * With MODULE-loading ``insmod'', user can
+ * issue parameter: slip_maxdev=1024
+ * (Or how much he/she wants.. Default is 256)
+ * * Stanislav Voronyi : Slip line checking, with ideas taken
+ * from multislip BSDI driver which was written
+ * by Igor Chechik, RELCOM Corp. Only algorithms
+ * have been ported to Linux SLIP driver.
+ * Vitaly E. Lavrov : Sane behaviour on tty hangup.
+ * Alexey Kuznetsov : Cleanup interfaces to tty&netdevice modules.
+ */
+
+#define SL_CHECK_TRANSMIT
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/if_slip.h>
+#include <linux/init.h>
+#include "slip.h"
+#ifdef CONFIG_INET
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/slhc_vj.h>
+#endif
+
+#define SLIP_VERSION "0.8.4-NET3.019-NEWTTY"
+
+static struct net_device **slip_devs;
+
+static int slip_maxdev = SL_NRUNIT;
+module_param(slip_maxdev, int, 0);
+MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices");
+
+static int slip_esc(unsigned char *p, unsigned char *d, int len);
+static void slip_unesc(struct slip *sl, unsigned char c);
+#ifdef CONFIG_SLIP_MODE_SLIP6
+static int slip_esc6(unsigned char *p, unsigned char *d, int len);
+static void slip_unesc6(struct slip *sl, unsigned char c);
+#endif
+#ifdef CONFIG_SLIP_SMART
+static void sl_keepalive(unsigned long sls);
+static void sl_outfill(unsigned long sls);
+static int sl_ioctl(struct net_device *dev,struct ifreq *rq,int cmd);
+#endif
+
+/********************************
+* Buffer administration routines:
+* sl_alloc_bufs()
+* sl_free_bufs()
+* sl_realloc_bufs()
+*
+* NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because
+* sl_realloc_bufs provides strong atomicity and reallocation
+* on actively running device.
+*********************************/
+
+/*
+ Allocate channel buffers.
+ */
+
+static int
+sl_alloc_bufs(struct slip *sl, int mtu)
+{
+ int err = -ENOBUFS;
+ unsigned long len;
+ char * rbuff = NULL;
+ char * xbuff = NULL;
+#ifdef SL_INCLUDE_CSLIP
+ char * cbuff = NULL;
+ struct slcompress *slcomp = NULL;
+#endif
+
+ /*
+ * Allocate the SLIP frame buffers:
+ *
+ * rbuff Receive buffer.
+ * xbuff Transmit buffer.
+ * cbuff Temporary compression buffer.
+ */
+ len = mtu * 2;
+
+ /*
+ * allow for arrival of larger UDP packets, even if we say not to
+ * also fixes a bug in which SunOS sends 512-byte packets even with
+ * an MSS of 128
+ */
+ if (len < 576 * 2)
+ len = 576 * 2;
+ rbuff = kmalloc(len + 4, GFP_KERNEL);
+ if (rbuff == NULL)
+ goto err_exit;
+ xbuff = kmalloc(len + 4, GFP_KERNEL);
+ if (xbuff == NULL)
+ goto err_exit;
+#ifdef SL_INCLUDE_CSLIP
+ cbuff = kmalloc(len + 4, GFP_KERNEL);
+ if (cbuff == NULL)
+ goto err_exit;
+ slcomp = slhc_init(16, 16);
+ if (slcomp == NULL)
+ goto err_exit;
+#endif
+ spin_lock_bh(&sl->lock);
+ if (sl->tty == NULL) {
+ spin_unlock_bh(&sl->lock);
+ err = -ENODEV;
+ goto err_exit;
+ }
+ sl->mtu = mtu;
+ sl->buffsize = len;
+ sl->rcount = 0;
+ sl->xleft = 0;
+ rbuff = xchg(&sl->rbuff, rbuff);
+ xbuff = xchg(&sl->xbuff, xbuff);
+#ifdef SL_INCLUDE_CSLIP
+ cbuff = xchg(&sl->cbuff, cbuff);
+ slcomp = xchg(&sl->slcomp, slcomp);
+#ifdef CONFIG_SLIP_MODE_SLIP6
+ sl->xdata = 0;
+ sl->xbits = 0;
+#endif
+#endif
+ spin_unlock_bh(&sl->lock);
+ err = 0;
+
+ /* Cleanup */
+err_exit:
+#ifdef SL_INCLUDE_CSLIP
+ if (cbuff)
+ kfree(cbuff);
+ if (slcomp)
+ slhc_free(slcomp);
+#endif
+ if (xbuff)
+ kfree(xbuff);
+ if (rbuff)
+ kfree(rbuff);
+ return err;
+}
+
+/* Free a SLIP channel buffers. */
+static void
+sl_free_bufs(struct slip *sl)
+{
+ void * tmp;
+
+ /* Free all SLIP frame buffers. */
+ if ((tmp = xchg(&sl->rbuff, NULL)) != NULL)
+ kfree(tmp);
+ if ((tmp = xchg(&sl->xbuff, NULL)) != NULL)
+ kfree(tmp);
+#ifdef SL_INCLUDE_CSLIP
+ if ((tmp = xchg(&sl->cbuff, NULL)) != NULL)
+ kfree(tmp);
+ if ((tmp = xchg(&sl->slcomp, NULL)) != NULL)
+ slhc_free(tmp);
+#endif
+}
+
+/*
+ Reallocate slip channel buffers.
+ */
+
+static int sl_realloc_bufs(struct slip *sl, int mtu)
+{
+ int err = 0;
+ struct net_device *dev = sl->dev;
+ unsigned char *xbuff, *rbuff;
+#ifdef SL_INCLUDE_CSLIP
+ unsigned char *cbuff;
+#endif
+ int len = mtu * 2;
+
+/*
+ * allow for arrival of larger UDP packets, even if we say not to
+ * also fixes a bug in which SunOS sends 512-byte packets even with
+ * an MSS of 128
+ */
+ if (len < 576 * 2)
+ len = 576 * 2;
+
+ xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
+ rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
+#ifdef SL_INCLUDE_CSLIP
+ cbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
+#endif
+
+
+#ifdef SL_INCLUDE_CSLIP
+ if (xbuff == NULL || rbuff == NULL || cbuff == NULL) {
+#else
+ if (xbuff == NULL || rbuff == NULL) {
+#endif
+ if (mtu >= sl->mtu) {
+ printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n",
+ dev->name);
+ err = -ENOBUFS;
+ }
+ goto done;
+ }
+
+ spin_lock_bh(&sl->lock);
+
+ err = -ENODEV;
+ if (sl->tty == NULL)
+ goto done_on_bh;
+
+ xbuff = xchg(&sl->xbuff, xbuff);
+ rbuff = xchg(&sl->rbuff, rbuff);
+#ifdef SL_INCLUDE_CSLIP
+ cbuff = xchg(&sl->cbuff, cbuff);
+#endif
+ if (sl->xleft) {
+ if (sl->xleft <= len) {
+ memcpy(sl->xbuff, sl->xhead, sl->xleft);
+ } else {
+ sl->xleft = 0;
+ sl->tx_dropped++;
+ }
+ }
+ sl->xhead = sl->xbuff;
+
+ if (sl->rcount) {
+ if (sl->rcount <= len) {
+ memcpy(sl->rbuff, rbuff, sl->rcount);
+ } else {
+ sl->rcount = 0;
+ sl->rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+ sl->mtu = mtu;
+ dev->mtu = mtu;
+ sl->buffsize = len;
+ err = 0;
+
+done_on_bh:
+ spin_unlock_bh(&sl->lock);
+
+done:
+ if (xbuff)
+ kfree(xbuff);
+ if (rbuff)
+ kfree(rbuff);
+#ifdef SL_INCLUDE_CSLIP
+ if (cbuff)
+ kfree(cbuff);
+#endif
+ return err;
+}
+
+
+/* Set the "sending" flag. This must be atomic hence the set_bit. */
+static inline void
+sl_lock(struct slip *sl)
+{
+ netif_stop_queue(sl->dev);
+}
+
+
+/* Clear the "sending" flag. This must be atomic, hence the ASM. */
+static inline void
+sl_unlock(struct slip *sl)
+{
+ netif_wake_queue(sl->dev);
+}
+
+/* Send one completely decapsulated IP datagram to the IP layer. */
+static void
+sl_bump(struct slip *sl)
+{
+ struct sk_buff *skb;
+ int count;
+
+ count = sl->rcount;
+#ifdef SL_INCLUDE_CSLIP
+ if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) {
+ unsigned char c;
+ if ((c = sl->rbuff[0]) & SL_TYPE_COMPRESSED_TCP) {
+ /* ignore compressed packets when CSLIP is off */
+ if (!(sl->mode & SL_MODE_CSLIP)) {
+ printk(KERN_WARNING "%s: compressed packet ignored\n", sl->dev->name);
+ return;
+ }
+ /* make sure we've reserved enough space for uncompress to use */
+ if (count + 80 > sl->buffsize) {
+ sl->rx_over_errors++;
+ return;
+ }
+ count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
+ if (count <= 0) {
+ return;
+ }
+ } else if (c >= SL_TYPE_UNCOMPRESSED_TCP) {
+ if (!(sl->mode & SL_MODE_CSLIP)) {
+ /* turn on header compression */
+ sl->mode |= SL_MODE_CSLIP;
+ sl->mode &= ~SL_MODE_ADAPTIVE;
+ printk(KERN_INFO "%s: header compression turned on\n", sl->dev->name);
+ }
+ sl->rbuff[0] &= 0x4f;
+ if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0) {
+ return;
+ }
+ }
+ }
+#endif /* SL_INCLUDE_CSLIP */
+
+ sl->rx_bytes+=count;
+
+ skb = dev_alloc_skb(count);
+ if (skb == NULL) {
+ printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", sl->dev->name);
+ sl->rx_dropped++;
+ return;
+ }
+ skb->dev = sl->dev;
+ memcpy(skb_put(skb,count), sl->rbuff, count);
+ skb->mac.raw=skb->data;
+ skb->protocol=htons(ETH_P_IP);
+ netif_rx(skb);
+ sl->dev->last_rx = jiffies;
+ sl->rx_packets++;
+}
+
+/* Encapsulate one IP datagram and stuff into a TTY queue. */
+static void
+sl_encaps(struct slip *sl, unsigned char *icp, int len)
+{
+ unsigned char *p;
+ int actual, count;
+
+ if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */
+ printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
+ sl->tx_dropped++;
+ sl_unlock(sl);
+ return;
+ }
+
+ p = icp;
+#ifdef SL_INCLUDE_CSLIP
+ if (sl->mode & SL_MODE_CSLIP) {
+ len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1);
+ }
+#endif
+#ifdef CONFIG_SLIP_MODE_SLIP6
+ if(sl->mode & SL_MODE_SLIP6)
+ count = slip_esc6(p, (unsigned char *) sl->xbuff, len);
+ else
+#endif
+ count = slip_esc(p, (unsigned char *) sl->xbuff, len);
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside driver.write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ actual = sl->tty->driver->write(sl->tty, sl->xbuff, count);
+#ifdef SL_CHECK_TRANSMIT
+ sl->dev->trans_start = jiffies;
+#endif
+ sl->xleft = count - actual;
+ sl->xhead = sl->xbuff + actual;
+#ifdef CONFIG_SLIP_SMART
+ /* VSV */
+ clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */
+#endif
+}
+
+/*
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+static void slip_write_wakeup(struct tty_struct *tty)
+{
+ int actual;
+ struct slip *sl = (struct slip *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
+ return;
+ }
+ if (sl->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sl->tx_packets++;
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ sl_unlock(sl);
+ return;
+ }
+
+ actual = tty->driver->write(tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+}
+
+static void sl_tx_timeout(struct net_device *dev)
+{
+ struct slip *sl = netdev_priv(dev);
+
+ spin_lock(&sl->lock);
+
+ if (netif_queue_stopped(dev)) {
+ if (!netif_running(dev))
+ goto out;
+
+ /* May be we must check transmitter timeout here ?
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+#ifdef SL_CHECK_TRANSMIT
+ if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+ /* 20 sec timeout not reached */
+ goto out;
+ }
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+ "bad line quality" : "driver error");
+ sl->xleft = 0;
+ sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ sl_unlock(sl);
+#endif
+ }
+
+out:
+ spin_unlock(&sl->lock);
+}
+
+
+/* Encapsulate an IP datagram and kick it into a TTY queue. */
+static int
+sl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct slip *sl = netdev_priv(dev);
+
+ spin_lock(&sl->lock);
+ if (!netif_running(dev)) {
+ spin_unlock(&sl->lock);
+ printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ if (sl->tty == NULL) {
+ spin_unlock(&sl->lock);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ sl_lock(sl);
+ sl->tx_bytes+=skb->len;
+ sl_encaps(sl, skb->data, skb->len);
+ spin_unlock(&sl->lock);
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+/******************************************
+ * Routines looking at netdevice side.
+ ******************************************/
+
+/* Netdevice UP -> DOWN routine */
+
+static int
+sl_close(struct net_device *dev)
+{
+ struct slip *sl = netdev_priv(dev);
+
+ spin_lock_bh(&sl->lock);
+ if (sl->tty) {
+ /* TTY discipline is running. */
+ sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ }
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_unlock_bh(&sl->lock);
+
+ return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+
+static int sl_open(struct net_device *dev)
+{
+ struct slip *sl = netdev_priv(dev);
+
+ if (sl->tty==NULL)
+ return -ENODEV;
+
+ sl->flags &= (1 << SLF_INUSE);
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* Netdevice change MTU request */
+
+static int sl_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct slip *sl = netdev_priv(dev);
+
+ if (new_mtu < 68 || new_mtu > 65534)
+ return -EINVAL;
+
+ if (new_mtu != dev->mtu)
+ return sl_realloc_bufs(sl, new_mtu);
+ return 0;
+}
+
+/* Netdevice get statistics request */
+
+static struct net_device_stats *
+sl_get_stats(struct net_device *dev)
+{
+ static struct net_device_stats stats;
+ struct slip *sl = netdev_priv(dev);
+#ifdef SL_INCLUDE_CSLIP
+ struct slcompress *comp;
+#endif
+
+ memset(&stats, 0, sizeof(struct net_device_stats));
+
+ stats.rx_packets = sl->rx_packets;
+ stats.tx_packets = sl->tx_packets;
+ stats.rx_bytes = sl->rx_bytes;
+ stats.tx_bytes = sl->tx_bytes;
+ stats.rx_dropped = sl->rx_dropped;
+ stats.tx_dropped = sl->tx_dropped;
+ stats.tx_errors = sl->tx_errors;
+ stats.rx_errors = sl->rx_errors;
+ stats.rx_over_errors = sl->rx_over_errors;
+#ifdef SL_INCLUDE_CSLIP
+ stats.rx_fifo_errors = sl->rx_compressed;
+ stats.tx_fifo_errors = sl->tx_compressed;
+ stats.collisions = sl->tx_misses;
+ comp = sl->slcomp;
+ if (comp) {
+ stats.rx_fifo_errors += comp->sls_i_compressed;
+ stats.rx_dropped += comp->sls_i_tossed;
+ stats.tx_fifo_errors += comp->sls_o_compressed;
+ stats.collisions += comp->sls_o_misses;
+ }
+#endif /* CONFIG_INET */
+ return (&stats);
+}
+
+/* Netdevice register callback */
+
+static int sl_init(struct net_device *dev)
+{
+ struct slip *sl = netdev_priv(dev);
+
+ /*
+ * Finish setting up the DEVICE info.
+ */
+
+ dev->mtu = sl->mtu;
+ dev->type = ARPHRD_SLIP + sl->mode;
+#ifdef SL_CHECK_TRANSMIT
+ dev->tx_timeout = sl_tx_timeout;
+ dev->watchdog_timeo = 20*HZ;
+#endif
+ return 0;
+}
+
+
+static void sl_uninit(struct net_device *dev)
+{
+ struct slip *sl = netdev_priv(dev);
+
+ sl_free_bufs(sl);
+}
+
+static void sl_setup(struct net_device *dev)
+{
+ dev->init = sl_init;
+ dev->uninit = sl_uninit;
+ dev->open = sl_open;
+ dev->destructor = free_netdev;
+ dev->stop = sl_close;
+ dev->get_stats = sl_get_stats;
+ dev->change_mtu = sl_change_mtu;
+ dev->hard_start_xmit = sl_xmit;
+#ifdef CONFIG_SLIP_SMART
+ dev->do_ioctl = sl_ioctl;
+#endif
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ SET_MODULE_OWNER(dev);
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST;
+}
+
+/******************************************
+ Routines looking at TTY side.
+ ******************************************/
+
+
+static int slip_receive_room(struct tty_struct *tty)
+{
+ return 65536; /* We can handle an infinite amount of data. :-) */
+}
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLIP data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+
+static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+{
+ struct slip *sl = (struct slip *) tty->disc_data;
+
+ if (!sl || sl->magic != SLIP_MAGIC ||
+ !netif_running(sl->dev))
+ return;
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags)) {
+ sl->rx_errors++;
+ }
+ cp++;
+ continue;
+ }
+#ifdef CONFIG_SLIP_MODE_SLIP6
+ if (sl->mode & SL_MODE_SLIP6)
+ slip_unesc6(sl, *cp++);
+ else
+#endif
+ slip_unesc(sl, *cp++);
+ }
+}
+
+/************************************
+ * slip_open helper routines.
+ ************************************/
+
+/* Collect hanged up channels */
+
+static void sl_sync(void)
+{
+ int i;
+ struct net_device *dev;
+ struct slip *sl;
+
+ for (i = 0; i < slip_maxdev; i++) {
+ if ((dev = slip_devs[i]) == NULL)
+ break;
+
+ sl = netdev_priv(dev);
+ if (sl->tty || sl->leased)
+ continue;
+ if (dev->flags&IFF_UP)
+ dev_close(dev);
+ }
+}
+
+
+/* Find a free SLIP channel, and link in this `tty' line. */
+static struct slip *
+sl_alloc(dev_t line)
+{
+ int i;
+ int sel = -1;
+ int score = -1;
+ struct net_device *dev = NULL;
+ struct slip *sl;
+
+ if (slip_devs == NULL)
+ return NULL; /* Master array missing ! */
+
+ for (i = 0; i < slip_maxdev; i++) {
+ dev = slip_devs[i];
+ if (dev == NULL)
+ break;
+
+ sl = netdev_priv(dev);
+ if (sl->leased) {
+ if (sl->line != line)
+ continue;
+ if (sl->tty)
+ return NULL;
+
+ /* Clear ESCAPE & ERROR flags */
+ sl->flags &= (1 << SLF_INUSE);
+ return sl;
+ }
+
+ if (sl->tty)
+ continue;
+
+ if (current->pid == sl->pid) {
+ if (sl->line == line && score < 3) {
+ sel = i;
+ score = 3;
+ continue;
+ }
+ if (score < 2) {
+ sel = i;
+ score = 2;
+ }
+ continue;
+ }
+ if (sl->line == line && score < 1) {
+ sel = i;
+ score = 1;
+ continue;
+ }
+ if (score < 0) {
+ sel = i;
+ score = 0;
+ }
+ }
+
+ if (sel >= 0) {
+ i = sel;
+ dev = slip_devs[i];
+ if (score > 1) {
+ sl = netdev_priv(dev);
+ sl->flags &= (1 << SLF_INUSE);
+ return sl;
+ }
+ }
+
+ /* Sorry, too many, all slots in use */
+ if (i >= slip_maxdev)
+ return NULL;
+
+ if (dev) {
+ sl = netdev_priv(dev);
+ if (test_bit(SLF_INUSE, &sl->flags)) {
+ unregister_netdevice(dev);
+ dev = NULL;
+ slip_devs[i] = NULL;
+ }
+ }
+
+ if (!dev) {
+ char name[IFNAMSIZ];
+ sprintf(name, "sl%d", i);
+
+ dev = alloc_netdev(sizeof(*sl), name, sl_setup);
+ if (!dev)
+ return NULL;
+ dev->base_addr = i;
+ }
+
+ sl = netdev_priv(dev);
+
+ /* Initialize channel control data */
+ sl->magic = SLIP_MAGIC;
+ sl->dev = dev;
+ spin_lock_init(&sl->lock);
+ sl->mode = SL_MODE_DEFAULT;
+#ifdef CONFIG_SLIP_SMART
+ init_timer(&sl->keepalive_timer); /* initialize timer_list struct */
+ sl->keepalive_timer.data=(unsigned long)sl;
+ sl->keepalive_timer.function=sl_keepalive;
+ init_timer(&sl->outfill_timer);
+ sl->outfill_timer.data=(unsigned long)sl;
+ sl->outfill_timer.function=sl_outfill;
+#endif
+ slip_devs[i] = dev;
+
+ return sl;
+}
+
+/*
+ * Open the high-level part of the SLIP channel.
+ * This function is called by the TTY module when the
+ * SLIP line discipline is called for. Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free SLIP channel...
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+
+static int slip_open(struct tty_struct *tty)
+{
+ struct slip *sl;
+ int err;
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* RTnetlink lock is misused here to serialize concurrent
+ opens of slip channels. There are better ways, but it is
+ the simplest one.
+ */
+ rtnl_lock();
+
+ /* Collect hanged up channels. */
+ sl_sync();
+
+ sl = (struct slip *) tty->disc_data;
+
+ err = -EEXIST;
+ /* First make sure we're not already connected. */
+ if (sl && sl->magic == SLIP_MAGIC)
+ goto err_exit;
+
+ /* OK. Find a free SLIP channel to use. */
+ err = -ENFILE;
+ if ((sl = sl_alloc(tty_devnum(tty))) == NULL)
+ goto err_exit;
+
+ sl->tty = tty;
+ tty->disc_data = sl;
+ sl->line = tty_devnum(tty);
+ sl->pid = current->pid;
+
+ /* FIXME: already done before we were called - seems this can go */
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+
+ if (!test_bit(SLF_INUSE, &sl->flags)) {
+ /* Perform the low-level SLIP initialization. */
+ if ((err = sl_alloc_bufs(sl, SL_MTU)) != 0)
+ goto err_free_chan;
+
+ set_bit(SLF_INUSE, &sl->flags);
+
+ if ((err = register_netdevice(sl->dev)))
+ goto err_free_bufs;
+ }
+
+#ifdef CONFIG_SLIP_SMART
+ if (sl->keepalive) {
+ sl->keepalive_timer.expires=jiffies+sl->keepalive*HZ;
+ add_timer (&sl->keepalive_timer);
+ }
+ if (sl->outfill) {
+ sl->outfill_timer.expires=jiffies+sl->outfill*HZ;
+ add_timer (&sl->outfill_timer);
+ }
+#endif
+
+ /* Done. We have linked the TTY line to a channel. */
+ rtnl_unlock();
+ return sl->dev->base_addr;
+
+err_free_bufs:
+ sl_free_bufs(sl);
+
+err_free_chan:
+ sl->tty = NULL;
+ tty->disc_data = NULL;
+ clear_bit(SLF_INUSE, &sl->flags);
+
+err_exit:
+ rtnl_unlock();
+
+ /* Count references from TTY module */
+ return err;
+}
+
+/*
+
+ FIXME: 1,2 are fixed 3 was never true anyway.
+
+ Let me to blame a bit.
+ 1. TTY module calls this funstion on soft interrupt.
+ 2. TTY module calls this function WITH MASKED INTERRUPTS!
+ 3. TTY module does not notify us about line discipline
+ shutdown,
+
+ Seems, now it is clean. The solution is to consider netdevice and
+ line discipline sides as two independent threads.
+
+ By-product (not desired): sl? does not feel hangups and remains open.
+ It is supposed, that user level program (dip, diald, slattach...)
+ will catch SIGHUP and make the rest of work.
+
+ I see no way to make more with current tty code. --ANK
+ */
+
+/*
+ * Close down a SLIP channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ */
+static void
+slip_close(struct tty_struct *tty)
+{
+ struct slip *sl = (struct slip *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
+ return;
+
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ if (!sl->leased)
+ sl->line = 0;
+
+ /* VSV = very important to remove timers */
+#ifdef CONFIG_SLIP_SMART
+ del_timer_sync(&sl->keepalive_timer);
+ del_timer_sync(&sl->outfill_timer);
+#endif
+
+ /* Count references from TTY module */
+}
+
+ /************************************************************************
+ * STANDARD SLIP ENCAPSULATION *
+ ************************************************************************/
+
+int
+slip_esc(unsigned char *s, unsigned char *d, int len)
+{
+ unsigned char *ptr = d;
+ unsigned char c;
+
+ /*
+ * Send an initial END character to flush out any
+ * data that may have accumulated in the receiver
+ * due to line noise.
+ */
+
+ *ptr++ = END;
+
+ /*
+ * For each byte in the packet, send the appropriate
+ * character sequence, according to the SLIP protocol.
+ */
+
+ while (len-- > 0) {
+ switch(c = *s++) {
+ case END:
+ *ptr++ = ESC;
+ *ptr++ = ESC_END;
+ break;
+ case ESC:
+ *ptr++ = ESC;
+ *ptr++ = ESC_ESC;
+ break;
+ default:
+ *ptr++ = c;
+ break;
+ }
+ }
+ *ptr++ = END;
+ return (ptr - d);
+}
+
+static void slip_unesc(struct slip *sl, unsigned char s)
+{
+
+ switch(s) {
+ case END:
+#ifdef CONFIG_SLIP_SMART
+ /* drop keeptest bit = VSV */
+ if (test_bit(SLF_KEEPTEST, &sl->flags))
+ clear_bit(SLF_KEEPTEST, &sl->flags);
+#endif
+
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) {
+ sl_bump(sl);
+ }
+ clear_bit(SLF_ESCAPE, &sl->flags);
+ sl->rcount = 0;
+ return;
+
+ case ESC:
+ set_bit(SLF_ESCAPE, &sl->flags);
+ return;
+ case ESC_ESC:
+ if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) {
+ s = ESC;
+ }
+ break;
+ case ESC_END:
+ if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) {
+ s = END;
+ }
+ break;
+ }
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < sl->buffsize) {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ }
+ sl->rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+}
+
+
+#ifdef CONFIG_SLIP_MODE_SLIP6
+/************************************************************************
+ * 6 BIT SLIP ENCAPSULATION *
+ ************************************************************************/
+
+int
+slip_esc6(unsigned char *s, unsigned char *d, int len)
+{
+ unsigned char *ptr = d;
+ unsigned char c;
+ int i;
+ unsigned short v = 0;
+ short bits = 0;
+
+ /*
+ * Send an initial END character to flush out any
+ * data that may have accumulated in the receiver
+ * due to line noise.
+ */
+
+ *ptr++ = 0x70;
+
+ /*
+ * Encode the packet into printable ascii characters
+ */
+
+ for (i = 0; i < len; ++i) {
+ v = (v << 8) | s[i];
+ bits += 8;
+ while (bits >= 6) {
+ bits -= 6;
+ c = 0x30 + ((v >> bits) & 0x3F);
+ *ptr++ = c;
+ }
+ }
+ if (bits) {
+ c = 0x30 + ((v << (6 - bits)) & 0x3F);
+ *ptr++ = c;
+ }
+ *ptr++ = 0x70;
+ return ptr - d;
+}
+
+void
+slip_unesc6(struct slip *sl, unsigned char s)
+{
+ unsigned char c;
+
+ if (s == 0x70) {
+#ifdef CONFIG_SLIP_SMART
+ /* drop keeptest bit = VSV */
+ if (test_bit(SLF_KEEPTEST, &sl->flags))
+ clear_bit(SLF_KEEPTEST, &sl->flags);
+#endif
+
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) {
+ sl_bump(sl);
+ }
+ sl->rcount = 0;
+ sl->xbits = 0;
+ sl->xdata = 0;
+ } else if (s >= 0x30 && s < 0x70) {
+ sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F);
+ sl->xbits += 6;
+ if (sl->xbits >= 8) {
+ sl->xbits -= 8;
+ c = (unsigned char)(sl->xdata >> sl->xbits);
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < sl->buffsize) {
+ sl->rbuff[sl->rcount++] = c;
+ return;
+ }
+ sl->rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+ }
+}
+#endif /* CONFIG_SLIP_MODE_SLIP6 */
+
+/* Perform I/O control on an active SLIP channel. */
+static int slip_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct slip *sl = (struct slip *) tty->disc_data;
+ unsigned int tmp;
+ int __user *p = (int __user *)arg;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLIP_MAGIC) {
+ return -EINVAL;
+ }
+
+ switch(cmd) {
+ case SIOCGIFNAME:
+ tmp = strlen(sl->dev->name) + 1;
+ if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCGIFENCAP:
+ if (put_user(sl->mode, p))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFENCAP:
+ if (get_user(tmp, p))
+ return -EFAULT;
+#ifndef SL_INCLUDE_CSLIP
+ if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE)) {
+ return -EINVAL;
+ }
+#else
+ if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) ==
+ (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) {
+ /* return -EINVAL; */
+ tmp &= ~SL_MODE_ADAPTIVE;
+ }
+#endif
+#ifndef CONFIG_SLIP_MODE_SLIP6
+ if (tmp & SL_MODE_SLIP6) {
+ return -EINVAL;
+ }
+#endif
+ sl->mode = tmp;
+ sl->dev->type = ARPHRD_SLIP+sl->mode;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+#ifdef CONFIG_SLIP_SMART
+ /* VSV changes start here */
+ case SIOCSKEEPALIVE:
+ if (get_user(tmp, p))
+ return -EFAULT;
+ if (tmp > 255) /* max for unchar */
+ return -EINVAL;
+
+ spin_lock_bh(&sl->lock);
+ if (!sl->tty) {
+ spin_unlock_bh(&sl->lock);
+ return -ENODEV;
+ }
+ if ((sl->keepalive = (unchar) tmp) != 0) {
+ mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ);
+ set_bit(SLF_KEEPTEST, &sl->flags);
+ } else {
+ del_timer (&sl->keepalive_timer);
+ }
+ spin_unlock_bh(&sl->lock);
+ return 0;
+
+ case SIOCGKEEPALIVE:
+ if (put_user(sl->keepalive, p))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSOUTFILL:
+ if (get_user(tmp, p))
+ return -EFAULT;
+ if (tmp > 255) /* max for unchar */
+ return -EINVAL;
+ spin_lock_bh(&sl->lock);
+ if (!sl->tty) {
+ spin_unlock_bh(&sl->lock);
+ return -ENODEV;
+ }
+ if ((sl->outfill = (unchar) tmp) != 0){
+ mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ);
+ set_bit(SLF_OUTWAIT, &sl->flags);
+ } else {
+ del_timer (&sl->outfill_timer);
+ }
+ spin_unlock_bh(&sl->lock);
+ return 0;
+
+ case SIOCGOUTFILL:
+ if (put_user(sl->outfill, p))
+ return -EFAULT;
+ return 0;
+ /* VSV changes end */
+#endif
+
+ /* Allow stty to read, but not set, the serial port */
+ case TCGETS:
+ case TCGETA:
+ return n_tty_ioctl(tty, file, cmd, arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/* VSV changes start here */
+#ifdef CONFIG_SLIP_SMART
+/* function do_ioctl called from net/core/dev.c
+ to allow get/set outfill/keepalive parameter
+ by ifconfig */
+
+static int sl_ioctl(struct net_device *dev,struct ifreq *rq,int cmd)
+{
+ struct slip *sl = netdev_priv(dev);
+ unsigned long *p = (unsigned long *)&rq->ifr_ifru;
+
+ if (sl == NULL) /* Allocation failed ?? */
+ return -ENODEV;
+
+ spin_lock_bh(&sl->lock);
+
+ if (!sl->tty) {
+ spin_unlock_bh(&sl->lock);
+ return -ENODEV;
+ }
+
+ switch(cmd){
+ case SIOCSKEEPALIVE:
+ /* max for unchar */
+ if ((unsigned)*p > 255) {
+ spin_unlock_bh(&sl->lock);
+ return -EINVAL;
+ }
+ sl->keepalive = (unchar) *p;
+ if (sl->keepalive != 0) {
+ sl->keepalive_timer.expires=jiffies+sl->keepalive*HZ;
+ mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ);
+ set_bit(SLF_KEEPTEST, &sl->flags);
+ } else {
+ del_timer(&sl->keepalive_timer);
+ }
+ break;
+
+ case SIOCGKEEPALIVE:
+ *p = sl->keepalive;
+ break;
+
+ case SIOCSOUTFILL:
+ if ((unsigned)*p > 255) { /* max for unchar */
+ spin_unlock_bh(&sl->lock);
+ return -EINVAL;
+ }
+ if ((sl->outfill = (unchar)*p) != 0){
+ mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ);
+ set_bit(SLF_OUTWAIT, &sl->flags);
+ } else {
+ del_timer (&sl->outfill_timer);
+ }
+ break;
+
+ case SIOCGOUTFILL:
+ *p = sl->outfill;
+ break;
+
+ case SIOCSLEASE:
+ /* Resolve race condition, when ioctl'ing hanged up
+ and opened by another process device.
+ */
+ if (sl->tty != current->signal->tty && sl->pid != current->pid) {
+ spin_unlock_bh(&sl->lock);
+ return -EPERM;
+ }
+ sl->leased = 0;
+ if (*p)
+ sl->leased = 1;
+ break;
+
+ case SIOCGLEASE:
+ *p = sl->leased;
+ };
+ spin_unlock_bh(&sl->lock);
+ return 0;
+}
+#endif
+/* VSV changes end */
+
+static struct tty_ldisc sl_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "slip",
+ .open = slip_open,
+ .close = slip_close,
+ .ioctl = slip_ioctl,
+ .receive_buf = slip_receive_buf,
+ .receive_room = slip_receive_room,
+ .write_wakeup = slip_write_wakeup,
+};
+
+static int __init slip_init(void)
+{
+ int status;
+
+ if (slip_maxdev < 4)
+ slip_maxdev = 4; /* Sanity */
+
+ printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)"
+#ifdef CONFIG_SLIP_MODE_SLIP6
+ " (6 bit encapsulation enabled)"
+#endif
+ ".\n",
+ SLIP_VERSION, slip_maxdev );
+#if defined(SL_INCLUDE_CSLIP)
+ printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n");
+#endif
+#ifdef CONFIG_SLIP_SMART
+ printk(KERN_INFO "SLIP linefill/keepalive option.\n");
+#endif
+
+ slip_devs = kmalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL);
+ if (!slip_devs) {
+ printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n");
+ return -ENOMEM;
+ }
+
+ /* Clear the pointer array, we allocate devices when we need them */
+ memset(slip_devs, 0, sizeof(struct net_device *)*slip_maxdev);
+
+ /* Fill in our line protocol discipline, and register it */
+ if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) {
+ printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
+ kfree(slip_devs);
+ }
+ return status;
+}
+
+static void __exit slip_exit(void)
+{
+ int i;
+ struct net_device *dev;
+ struct slip *sl;
+ unsigned long timeout = jiffies + HZ;
+ int busy = 0;
+
+ if (slip_devs == NULL)
+ return;
+
+ /* First of all: check for active disciplines and hangup them.
+ */
+ do {
+ if (busy) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ }
+
+ busy = 0;
+ for (i = 0; i < slip_maxdev; i++) {
+ dev = slip_devs[i];
+ if (!dev)
+ continue;
+ sl = netdev_priv(dev);
+ spin_lock_bh(&sl->lock);
+ if (sl->tty) {
+ busy++;
+ tty_hangup(sl->tty);
+ }
+ spin_unlock_bh(&sl->lock);
+ }
+ } while (busy && time_before(jiffies, timeout));
+
+
+ for (i = 0; i < slip_maxdev; i++) {
+ dev = slip_devs[i];
+ if (!dev)
+ continue;
+ slip_devs[i] = NULL;
+
+ sl = netdev_priv(dev);
+ if (sl->tty) {
+ printk(KERN_ERR "%s: tty discipline still running\n",
+ dev->name);
+ /* Intentionally leak the control block. */
+ dev->destructor = NULL;
+ }
+
+ unregister_netdev(dev);
+ }
+
+ kfree(slip_devs);
+ slip_devs = NULL;
+
+ if ((i = tty_register_ldisc(N_SLIP, NULL)))
+ {
+ printk(KERN_ERR "SLIP: can't unregister line discipline (err = %d)\n", i);
+ }
+}
+
+module_init(slip_init);
+module_exit(slip_exit);
+
+#ifdef CONFIG_SLIP_SMART
+/*
+ * This is start of the code for multislip style line checking
+ * added by Stanislav Voronyi. All changes before marked VSV
+ */
+
+static void sl_outfill(unsigned long sls)
+{
+ struct slip *sl=(struct slip *)sls;
+
+ spin_lock(&sl->lock);
+
+ if (sl->tty == NULL)
+ goto out;
+
+ if(sl->outfill)
+ {
+ if( test_bit(SLF_OUTWAIT, &sl->flags) )
+ {
+ /* no packets were transmitted, do outfill */
+#ifdef CONFIG_SLIP_MODE_SLIP6
+ unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END;
+#else
+ unsigned char s = END;
+#endif
+ /* put END into tty queue. Is it right ??? */
+ if (!netif_queue_stopped(sl->dev))
+ {
+ /* if device busy no outfill */
+ sl->tty->driver->write(sl->tty, &s, 1);
+ }
+ }
+ else
+ set_bit(SLF_OUTWAIT, &sl->flags);
+
+ mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ);
+ }
+out:
+ spin_unlock(&sl->lock);
+}
+
+static void sl_keepalive(unsigned long sls)
+{
+ struct slip *sl=(struct slip *)sls;
+
+ spin_lock(&sl->lock);
+
+ if (sl->tty == NULL)
+ goto out;
+
+ if( sl->keepalive)
+ {
+ if(test_bit(SLF_KEEPTEST, &sl->flags))
+ {
+ /* keepalive still high :(, we must hangup */
+ if( sl->outfill ) /* outfill timer must be deleted too */
+ (void)del_timer(&sl->outfill_timer);
+ printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name);
+ tty_hangup(sl->tty); /* this must hangup tty & close slip */
+ /* I think we need not something else */
+ goto out;
+ }
+ else
+ set_bit(SLF_KEEPTEST, &sl->flags);
+
+ mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ);
+ }
+
+out:
+ spin_unlock(&sl->lock);
+}
+
+#endif
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_SLIP);
diff --git a/drivers/net/slip.h b/drivers/net/slip.h
new file mode 100644
index 000000000000..ab3efe66a642
--- /dev/null
+++ b/drivers/net/slip.h
@@ -0,0 +1,121 @@
+/*
+ * slip.h Define the SLIP device driver interface and constants.
+ *
+ * NOTE: THIS FILE WILL BE MOVED TO THE LINUX INCLUDE DIRECTORY
+ * AS SOON AS POSSIBLE!
+ *
+ * Version: @(#)slip.h 1.2.0 03/28/93
+ *
+ * Fixes:
+ * Alan Cox : Added slip mtu field.
+ * Matt Dillon : Printable slip (borrowed from net2e)
+ * Alan Cox : Added SL_SLIP_LOTS
+ * Dmitry Gorodchanin : A lot of changes in the 'struct slip'
+ * Dmitry Gorodchanin : Added CSLIP statistics.
+ * Stanislav Voronyi : Make line checking as created by
+ * Igor Chechik, RELCOM Corp.
+ * Craig Schlenter : Fixed #define bug that caused
+ * CSLIP telnets to hang in 1.3.61-6
+ *
+ * Author: Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ */
+#ifndef _LINUX_SLIP_H
+#define _LINUX_SLIP_H
+
+#include <linux/config.h>
+
+#if defined(CONFIG_INET) && defined(CONFIG_SLIP_COMPRESSED)
+# define SL_INCLUDE_CSLIP
+#endif
+
+#ifdef SL_INCLUDE_CSLIP
+# define SL_MODE_DEFAULT SL_MODE_ADAPTIVE
+#else
+# define SL_MODE_DEFAULT SL_MODE_SLIP
+#endif
+
+/* SLIP configuration. */
+#define SL_NRUNIT 256 /* MAX number of SLIP channels;
+ This can be overridden with
+ insmod -oslip_maxdev=nnn */
+#define SL_MTU 296 /* 296; I am used to 600- FvK */
+
+/* SLIP protocol characters. */
+#define END 0300 /* indicates end of frame */
+#define ESC 0333 /* indicates byte stuffing */
+#define ESC_END 0334 /* ESC ESC_END means END 'data' */
+#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
+
+
+struct slip {
+ int magic;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
+
+#ifdef SL_INCLUDE_CSLIP
+ struct slcompress *slcomp; /* for header compression */
+ unsigned char *cbuff; /* compression buffer */
+#endif
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char *rbuff; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char *xbuff; /* transmitter buffer */
+ unsigned char *xhead; /* pointer to next byte to XMIT */
+ int xleft; /* bytes left in XMIT queue */
+
+ /* SLIP interface statistics. */
+ unsigned long rx_packets; /* inbound frames counter */
+ unsigned long tx_packets; /* outbound frames counter */
+ unsigned long rx_bytes; /* inbound byte counte */
+ unsigned long tx_bytes; /* outbound byte counter */
+ unsigned long rx_errors; /* Parity, etc. errors */
+ unsigned long tx_errors; /* Planned stuff */
+ unsigned long rx_dropped; /* No memory for skb */
+ unsigned long tx_dropped; /* When MTU change */
+ unsigned long rx_over_errors; /* Frame bigger then SLIP buf. */
+#ifdef SL_INCLUDE_CSLIP
+ unsigned long tx_compressed;
+ unsigned long rx_compressed;
+ unsigned long tx_misses;
+#endif
+ /* Detailed SLIP statistics. */
+
+ int mtu; /* Our mtu (to spot changes!) */
+ int buffsize; /* Max buffers sizes */
+
+#ifdef CONFIG_SLIP_MODE_SLIP6
+ int xdata, xbits; /* 6 bit slip controls */
+#endif
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_INUSE 0 /* Channel in use */
+#define SLF_ESCAPE 1 /* ESC received */
+#define SLF_ERROR 2 /* Parity, etc. error */
+#define SLF_KEEPTEST 3 /* Keepalive test flag */
+#define SLF_OUTWAIT 4 /* is outpacket was flag */
+
+ unsigned char mode; /* SLIP mode */
+ unsigned char leased;
+ dev_t line;
+ pid_t pid;
+#define SL_MODE_SLIP 0
+#define SL_MODE_CSLIP 1
+#define SL_MODE_SLIP6 2 /* Matt Dillon's printable slip */
+#define SL_MODE_CSLIP6 (SL_MODE_SLIP6|SL_MODE_CSLIP)
+#define SL_MODE_AX25 4
+#define SL_MODE_ADAPTIVE 8
+#ifdef CONFIG_SLIP_SMART
+ unsigned char outfill; /* # of sec between outfill packet */
+ unsigned char keepalive; /* keepalive seconds */
+ struct timer_list outfill_timer;
+ struct timer_list keepalive_timer;
+#endif
+};
+
+#define SLIP_MAGIC 0x5302
+
+#endif /* _LINUX_SLIP.H */
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
new file mode 100644
index 000000000000..990201f42ba0
--- /dev/null
+++ b/drivers/net/smc-mca.c
@@ -0,0 +1,508 @@
+/* smc-mca.c: A SMC Ultra ethernet driver for linux. */
+/*
+ Most of this driver, except for ultramca_probe is nearly
+ verbatim from smc-ultra.c by Donald Becker. The rest is
+ written and copyright 1996 by David Weis, weisd3458@uni.edu
+
+ This is a driver for the SMC Ultra and SMC EtherEZ ethercards.
+
+ This driver uses the cards in the 8390-compatible, shared memory mode.
+ Most of the run-time complexity is handled by the generic code in
+ 8390.c.
+
+ This driver enables the shared memory only when doing the actual data
+ transfers to avoid a bug in early version of the card that corrupted
+ data transferred by a AHA1542.
+
+ This driver does not support the programmed-I/O data transfer mode of
+ the EtherEZ. That support (if available) is smc-ez.c. Nor does it
+ use the non-8390-compatible "Altego" mode. (No support currently planned.)
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users.
+ David Weis : Micro Channel-ized it.
+ Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A
+ Christopher Turcksin : Changed MCA-probe so that multiple adapters are
+ found correctly (Jul 16, 1997)
+ Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997)
+ Tom Sightler : Fixed minor detection bug caused by above merge
+ Tom Sightler : Added support for three more Western Digital
+ MCA-adapters
+ Tom Sightler : Added support for 2.2.x mca_find_unused_adapter
+ Hartmut Schmidt : - Modified parameter detection to handle each
+ card differently depending on a switch-list
+ - 'card_ver' removed from the adapter list
+ - Some minor bug fixes
+*/
+
+#include <linux/mca.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+#include "smc-mca.h"
+
+#define DRV_NAME "smc-mca"
+
+static int ultramca_open(struct net_device *dev);
+static void ultramca_reset_8390(struct net_device *dev);
+static void ultramca_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultramca_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb,
+ int ring_offset);
+static void ultramca_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page);
+static int ultramca_close_card(struct net_device *dev);
+
+#define START_PG 0x00 /* First page of TX buffer */
+
+#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
+#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
+#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0
+#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1
+#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2
+#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3
+#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4
+#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5
+#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6
+#define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7
+
+struct smc_mca_adapters_t {
+ unsigned int id;
+ char *name;
+};
+
+#define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */
+
+static int ultra_io[MAX_ULTRAMCA_CARDS];
+static int ultra_irq[MAX_ULTRAMCA_CARDS];
+MODULE_LICENSE("GPL");
+
+module_param_array(ultra_io, int, NULL, 0);
+module_param_array(ultra_irq, int, NULL, 0);
+MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)");
+MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)");
+
+static short smc_mca_adapter_ids[] __initdata = {
+ 0x61c8,
+ 0x61c9,
+ 0x6fc0,
+ 0x6fc1,
+ 0x6fc2,
+ 0xefd4,
+ 0xefd5,
+ 0xefe5,
+ 0x0000
+};
+
+static char *smc_mca_adapter_names[] __initdata = {
+ "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
+ "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
+ "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
+ "WD Starcard PLUS/A (WD8003ST/A)",
+ "WD Ethercard PLUS 10T/A (WD8003W/A)",
+ "IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)",
+ "IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)",
+ "IBM PS/2 Adapter/A for Ethernet",
+ NULL
+};
+
+static int ultra_found = 0;
+
+int __init ultramca_probe(struct device *gen_dev)
+{
+ unsigned short ioaddr;
+ struct net_device *dev;
+ unsigned char reg4, num_pages;
+ struct mca_device *mca_dev = to_mca_device(gen_dev);
+ char slot = mca_dev->slot;
+ unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff;
+ int i, rc;
+ int adapter = mca_dev->index;
+ int tbase = 0;
+ int tirq = 0;
+ int base_addr = ultra_io[ultra_found];
+ int irq = ultra_irq[ultra_found];
+
+ if (base_addr || irq) {
+ printk(KERN_INFO "Probing for SMC MCA adapter");
+ if (base_addr) {
+ printk(KERN_INFO " at I/O address 0x%04x%c",
+ base_addr, irq ? ' ' : '\n');
+ }
+ if (irq) {
+ printk(KERN_INFO "using irq %d\n", irq);
+ }
+ }
+
+ tirq = 0;
+ tbase = 0;
+
+ /* If we're trying to match a specificied irq or io address,
+ * we'll reject the adapter found unless it's the one we're
+ * looking for */
+
+ pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */
+ pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */
+ pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */
+ pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */
+
+ /* Test the following conditions:
+ * - If an irq parameter is supplied, compare it
+ * with the irq of the adapter we found
+ * - If a base_addr paramater is given, compare it
+ * with the base_addr of the adapter we found
+ * - Check that the irq and the base_addr of the
+ * adapter we found is not already in use by
+ * this driver
+ */
+
+ switch (mca_dev->index) {
+ case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
+ case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
+ case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
+ case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
+ {
+ tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr;
+ tirq = irq_table[(pos5 & 0xc) >> 2].new_irq;
+ break;
+ }
+ case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
+ case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
+ case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
+ case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
+ {
+ tbase = ((pos2 & 0x0fe) * 0x10);
+ tirq = irq_table[(pos5 & 3)].old_irq;
+ break;
+ }
+ }
+
+ if(!tirq || !tbase
+ || (irq && irq != tirq)
+ || (base_addr && tbase != base_addr))
+ /* FIXME: we're trying to force the ordering of the
+ * devices here, there should be a way of getting this
+ * to happen */
+ return -ENXIO;
+
+ /* Adapter found. */
+ dev = alloc_ei_netdev();
+ if(!dev)
+ return -ENODEV;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, gen_dev);
+ mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]);
+ mca_device_set_claim(mca_dev, 1);
+
+ printk(KERN_INFO "smc_mca: %s found in slot %d\n",
+ smc_mca_adapter_names[adapter], slot + 1);
+
+ ultra_found++;
+
+ dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase);
+ dev->irq = mca_device_transform_irq(mca_dev, tirq);
+ dev->mem_start = 0;
+ num_pages = 40;
+
+ switch (adapter) { /* card-# in const array above [hs] */
+ case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
+ case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
+ {
+ for (i = 0; i < 16; i++) { /* taking 16 counts
+ * up to 15 [hs] */
+ if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) {
+ dev->mem_start = (unsigned long)
+ mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start);
+ num_pages = mem_table[i].num_pages;
+ }
+ }
+ break;
+ }
+ case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
+ case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
+ case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
+ case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
+ {
+ dev->mem_start = (unsigned long)
+ mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000));
+ num_pages = 0x40;
+ break;
+ }
+ case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
+ case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
+ {
+ /* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates
+ * the index of the 0x2000 step.
+ * beware different number of pages [hs]
+ */
+ dev->mem_start = (unsigned long)
+ mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf))));
+ num_pages = 0x20 + (2 * (pos3 & 0x10));
+ break;
+ }
+ }
+
+ /* sanity check, shouldn't happen */
+ if (dev->mem_start == 0) {
+ rc = -ENODEV;
+ goto err_unclaim;
+ }
+
+ if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) {
+ rc = -ENODEV;
+ goto err_unclaim;
+ }
+
+ reg4 = inb(ioaddr + 4) & 0x7f;
+ outb(reg4, ioaddr + 4);
+
+ printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x,", slot + 1, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set
+ * and read the useful registers there.
+ */
+
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot.
+ */
+
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+
+ /* Switch back to the station address register set so that
+ * the MS-DOS driver can find the card after a warm boot.
+ */
+
+ outb(reg4, ioaddr + 4);
+
+ gen_dev->driver_data = dev;
+
+ /* The 8390 isn't at the base address, so fake the offset
+ */
+
+ dev->base_addr = ioaddr + ULTRA_NIC_OFFSET;
+
+ ei_status.name = "SMC Ultra MCA";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = num_pages;
+
+ ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256);
+ if (!ei_status.mem) {
+ rc = -ENOMEM;
+ goto err_release_region;
+ }
+
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256;
+
+ printk(", IRQ %d memory %#lx-%#lx.\n",
+ dev->irq, dev->mem_start, dev->mem_end - 1);
+
+ ei_status.reset_8390 = &ultramca_reset_8390;
+ ei_status.block_input = &ultramca_block_input;
+ ei_status.block_output = &ultramca_block_output;
+ ei_status.get_8390_hdr = &ultramca_get_8390_hdr;
+
+ ei_status.priv = slot;
+
+ dev->open = &ultramca_open;
+ dev->stop = &ultramca_close_card;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ NS8390_init(dev, 0);
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ iounmap(ei_status.mem);
+err_release_region:
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+err_unclaim:
+ mca_device_set_claim(mca_dev, 0);
+ free_netdev(dev);
+ return rc;
+}
+
+static int ultramca_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ int retval;
+
+ if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
+ return retval;
+
+ outb(ULTRA_MEMENB, ioaddr); /* Enable memory */
+ outb(0x80, ioaddr + 5); /* ??? */
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+ outb(0x04, ioaddr + 5); /* ??? */
+
+ /* Set the early receive warning level in window 0 high enough not
+ * to receive ERW interrupts.
+ */
+
+ /* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr);
+ * outb(0xff, dev->base_addr + EN0_ERWCNT);
+ */
+
+ ei_open(dev);
+ return 0;
+}
+
+static void ultramca_reset_8390(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+
+ outb(ULTRA_RESET, ioaddr);
+ if (ei_debug > 1)
+ printk("resetting Ultra, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(0x80, ioaddr + 5); /* ??? */
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+
+ if (ei_debug > 1)
+ printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
+
+static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps.
+ */
+
+static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256;
+
+ if (ring_offset + count > ei_status.stop_page * 256) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.stop_page * 256 - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+}
+
+static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8);
+
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ultramca_close_card(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+
+ netif_stop_queue(dev);
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+
+ NS8390_init(dev, 0);
+ /* We should someday disable shared memory and change to 8-bit mode
+ * "just in case"...
+ */
+
+ return 0;
+}
+
+static int ultramca_remove(struct device *gen_dev)
+{
+ struct mca_device *mca_dev = to_mca_device(gen_dev);
+ struct net_device *dev = (struct net_device *)gen_dev->driver_data;
+
+ if (dev) {
+ /* NB: ultra_close_card() does free_irq */
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
+
+ unregister_netdev(dev);
+ mca_device_set_claim(mca_dev, 0);
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+ iounmap(ei_status.mem);
+ free_netdev(dev);
+ }
+ return 0;
+}
+
+
+static struct mca_driver ultra_driver = {
+ .id_table = smc_mca_adapter_ids,
+ .driver = {
+ .name = "smc-mca",
+ .bus = &mca_bus_type,
+ .probe = ultramca_probe,
+ .remove = ultramca_remove,
+ }
+};
+
+static int __init ultramca_init_module(void)
+{
+ if(!MCA_bus)
+ return -ENXIO;
+
+ mca_register_driver(&ultra_driver);
+
+ return ultra_found ? 0 : -ENXIO;
+}
+
+static void __exit ultramca_cleanup_module(void)
+{
+ mca_unregister_driver(&ultra_driver);
+}
+module_init(ultramca_init_module);
+module_exit(ultramca_cleanup_module);
+
diff --git a/drivers/net/smc-mca.h b/drivers/net/smc-mca.h
new file mode 100644
index 000000000000..ac50117a7e84
--- /dev/null
+++ b/drivers/net/smc-mca.h
@@ -0,0 +1,61 @@
+/*
+ * djweis weisd3458@uni.edu
+ * most of this file was taken from ps2esdi.h
+ */
+
+struct {
+ unsigned int base_addr;
+} addr_table[] = {
+ { 0x0800 },
+ { 0x1800 },
+ { 0x2800 },
+ { 0x3800 },
+ { 0x4800 },
+ { 0x5800 },
+ { 0x6800 },
+ { 0x7800 },
+ { 0x8800 },
+ { 0x9800 },
+ { 0xa800 },
+ { 0xb800 },
+ { 0xc800 },
+ { 0xd800 },
+ { 0xe800 },
+ { 0xf800 }
+};
+
+#define MEM_MASK 64
+
+struct {
+ unsigned char mem_index;
+ unsigned long mem_start;
+ unsigned char num_pages;
+} mem_table[] = {
+ { 16, 0x0c0000, 40 },
+ { 18, 0x0c4000, 40 },
+ { 20, 0x0c8000, 40 },
+ { 22, 0x0cc000, 40 },
+ { 24, 0x0d0000, 40 },
+ { 26, 0x0d4000, 40 },
+ { 28, 0x0d8000, 40 },
+ { 30, 0x0dc000, 40 },
+ {144, 0xfc0000, 40 },
+ {148, 0xfc8000, 40 },
+ {154, 0xfd0000, 40 },
+ {156, 0xfd8000, 40 },
+ { 0, 0x0c0000, 20 },
+ { 1, 0x0c2000, 20 },
+ { 2, 0x0c4000, 20 },
+ { 3, 0x0c6000, 20 }
+};
+
+#define IRQ_MASK 243
+struct {
+ unsigned char new_irq;
+ unsigned char old_irq;
+} irq_table[] = {
+ { 3, 3 },
+ { 4, 4 },
+ { 10, 10 },
+ { 14, 15 }
+};
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
new file mode 100644
index 000000000000..b564c677c6d2
--- /dev/null
+++ b/drivers/net/smc-ultra.c
@@ -0,0 +1,615 @@
+/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */
+/*
+ This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards.
+
+ Written 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This driver uses the cards in the 8390-compatible mode.
+ Most of the run-time complexity is handled by the generic code in
+ 8390.c. The code in this file is responsible for
+
+ ultra_probe() Detecting and initializing the card.
+ ultra_probe1()
+ ultra_probe_isapnp()
+
+ ultra_open() The card-specific details of starting, stopping
+ ultra_reset_8390() and resetting the 8390 NIC core.
+ ultra_close()
+
+ ultra_block_input() Routines for reading and writing blocks of
+ ultra_block_output() packet buffer memory.
+ ultra_pio_input()
+ ultra_pio_output()
+
+ This driver enables the shared memory only when doing the actual data
+ transfers to avoid a bug in early version of the card that corrupted
+ data transferred by a AHA1542.
+
+ This driver now supports the programmed-I/O (PIO) data transfer mode of
+ the EtherEZ. It does not use the non-8390-compatible "Altego" mode.
+ That support (if available) is in smc-ez.c.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users.
+ Donald Becker : 4/17/96 PIO support, minor potential problems avoided.
+ Donald Becker : 6/6/96 correctly set auto-wrap bit.
+ Alexander Sotirov : 1/20/01 Added support for ISAPnP cards
+
+ Note about the ISA PnP support:
+
+ This driver can not autoprobe for more than one SMC EtherEZ PnP card.
+ You have to configure the second card manually through the /proc/isapnp
+ interface and then load the module with an explicit io=0x___ option.
+*/
+
+static const char version[] =
+ "smc-ultra.c:v2.02 2/3/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/isapnp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "smc-ultra"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int ultra_portlist[] __initdata =
+{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0};
+
+static int ultra_probe1(struct net_device *dev, int ioaddr);
+
+#ifdef __ISAPNP__
+static int ultra_probe_isapnp(struct net_device *dev);
+#endif
+
+static int ultra_open(struct net_device *dev);
+static void ultra_reset_8390(struct net_device *dev);
+static void ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_pio_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_pio_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int ultra_close_card(struct net_device *dev);
+
+#ifdef __ISAPNP__
+static struct isapnp_device_id ultra_device_ids[] __initdata = {
+ { ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416),
+ ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416),
+ (long) "SMC EtherEZ (8416)" },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(isapnp, ultra_device_ids);
+#endif
+
+
+#define START_PG 0x00 /* First page of TX buffer */
+
+#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
+#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
+#define IOPD 0x02 /* I/O Pipe Data (16 bits), PIO operation. */
+#define IOPA 0x07 /* I/O Pipe Address for PIO operation. */
+#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ultra_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ ei_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+/* Probe for the Ultra. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+
+static int __init do_ultra_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ SET_MODULE_OWNER(dev);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &ultra_poll;
+#endif
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ultra_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+#ifdef __ISAPNP__
+ /* Look for any installed ISAPnP cards */
+ if (isapnp_present() && (ultra_probe_isapnp(dev) == 0))
+ return 0;
+#endif
+
+ for (i = 0; ultra_portlist[i]; i++) {
+ dev->irq = irq;
+ if (ultra_probe1(dev, ultra_portlist[i]) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: ultra_close_card() does free_irq */
+#ifdef __ISAPNP__
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ if (idev)
+ pnp_device_detach(idev);
+#endif
+ release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+#ifndef MODULE
+struct net_device * __init ultra_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_ultra_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init ultra_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval;
+ int checksum = 0;
+ const char *model_name;
+ unsigned char eeprom_irq = 0;
+ static unsigned version_printed;
+ /* Values from various config regs. */
+ unsigned char num_pages, irqreg, addr, piomode;
+ unsigned char idreg = inb(ioaddr + 7);
+ unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+
+ if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xF0) != 0x20 /* SMC Ultra */
+ && (idreg & 0xF0) != 0x40) { /* SMC EtherEZ */
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xFF) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
+
+ printk("%s: %s at %#3x,", dev->name, model_name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+ piomode = inb(ioaddr + 0x8);
+ addr = inb(ioaddr + 0xb);
+ irqreg = inb(ioaddr + 0xd);
+
+ /* Switch back to the station address register set so that the MS-DOS driver
+ can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq;
+
+ /* The IRQ bits are split. */
+ irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
+
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ dev->irq = irq;
+ eeprom_irq = 1;
+ }
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
+
+ {
+ int addr_tbl[4] = {0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000};
+ short num_pages_tbl[4] = {0x20, 0x40, 0x80, 0xff};
+
+ dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
+ num_pages = num_pages_tbl[(addr >> 4) & 3];
+ }
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = num_pages;
+
+ ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256);
+ if (!ei_status.mem) {
+ printk(", failed to ioremap.\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256;
+
+ if (piomode) {
+ printk(",%s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ ei_status.block_input = &ultra_pio_input;
+ ei_status.block_output = &ultra_pio_output;
+ ei_status.get_8390_hdr = &ultra_pio_get_hdr;
+ } else {
+ printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
+ dev->irq, dev->mem_start, dev->mem_end-1);
+ ei_status.block_input = &ultra_block_input;
+ ei_status.block_output = &ultra_block_output;
+ ei_status.get_8390_hdr = &ultra_get_8390_hdr;
+ }
+ ei_status.reset_8390 = &ultra_reset_8390;
+ dev->open = &ultra_open;
+ dev->stop = &ultra_close_card;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+
+ return 0;
+out:
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+ return retval;
+}
+
+#ifdef __ISAPNP__
+static int __init ultra_probe_isapnp(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; ultra_device_ids[i].vendor != 0; i++) {
+ struct pnp_dev *idev = NULL;
+
+ while ((idev = pnp_find_dev(NULL,
+ ultra_device_ids[i].vendor,
+ ultra_device_ids[i].function,
+ idev))) {
+ /* Avoid already found cards from previous calls */
+ if (pnp_device_attach(idev) < 0)
+ continue;
+ if (pnp_activate_dev(idev) < 0) {
+ __again:
+ pnp_device_detach(idev);
+ continue;
+ }
+ /* if no io and irq, search for next */
+ if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0))
+ goto __again;
+ /* found it */
+ dev->base_addr = pnp_port_start(idev, 0);
+ dev->irq = pnp_irq(idev, 0);
+ printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) ultra_device_ids[i].driver_data,
+ dev->base_addr, dev->irq);
+ if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
+ printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ pnp_device_detach(idev);
+ return -ENXIO;
+ }
+ ei_status.priv = (unsigned long)idev;
+ break;
+ }
+ if (!idev)
+ continue;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+#endif
+
+static int
+ultra_open(struct net_device *dev)
+{
+ int retval;
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40,
+ 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, };
+
+ retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
+ if (retval)
+ return retval;
+
+ outb(0x00, ioaddr); /* Disable shared memory for safety. */
+ outb(0x80, ioaddr + 5);
+ /* Set the IRQ line. */
+ outb(inb(ioaddr + 4) | 0x80, ioaddr + 4);
+ outb((inb(ioaddr + 13) & ~0x4C) | irq2reg[dev->irq], ioaddr + 13);
+ outb(inb(ioaddr + 4) & 0x7f, ioaddr + 4);
+
+ if (ei_status.block_input == &ultra_pio_input) {
+ outb(0x11, ioaddr + 6); /* Enable interrupts and PIO. */
+ outb(0x01, ioaddr + 0x19); /* Enable ring read auto-wrap. */
+ } else
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ return 0;
+}
+
+static void
+ultra_reset_8390(struct net_device *dev)
+{
+ int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA_RESET, cmd_port);
+ if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(0x00, cmd_port); /* Disable shared memory for safety. */
+ outb(0x80, cmd_port + 5);
+ if (ei_status.block_input == &ultra_pio_input)
+ outb(0x11, cmd_port + 6); /* Enable interrupts and PIO. */
+ else
+ outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG)<<8);
+
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */
+#ifdef __BIG_ENDIAN
+ /* Officially this is what we are doing, but the readl() is faster */
+ /* unfortunately it isn't endian aware of the struct */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void
+ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - (START_PG<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ if (ring_offset + count > ei_status.stop_page*256) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.stop_page*256 - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+static void
+ultra_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - START_PG)<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ memcpy_toio(shmem, buf, count);
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+/* The identical operations for programmed I/O cards.
+ The PIO model is trivial to use: the 16 bit start address is written
+ byte-sequentially to IOPA, with no intervening I/O operations, and the
+ data is read or written to the IOPD data port.
+ The only potential complication is that the address register is shared
+ and must be always be rewritten between each read/write direction change.
+ This is no problem for us, as the 8390 code ensures that we are single
+ threaded. */
+static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_page, ioaddr + IOPA);
+ insw(ioaddr + IOPD, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+static void ultra_pio_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ char *buf = skb->data;
+
+ /* For now set the address again, although it should already be correct. */
+ outb(ring_offset, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_offset >> 8, ioaddr + IOPA);
+ /* We know skbuffs are padded to at least word alignment. */
+ insw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static void ultra_pio_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(start_page, ioaddr + IOPA);
+ /* An extra odd byte is OK here as well. */
+ outsw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static int
+ultra_close_card(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+
+ netif_stop_queue(dev);
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+
+ NS8390_init(dev, 0);
+
+ /* We should someday disable shared memory and change to 8-bit mode
+ "just in case"... */
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */
+static struct net_device *dev_ultra[MAX_ULTRA_CARDS];
+static int io[MAX_ULTRA_CARDS];
+static int irq[MAX_ULTRA_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_ultra_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_ultra[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct net_device *dev = dev_ultra[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
new file mode 100644
index 000000000000..b3e397d7ca85
--- /dev/null
+++ b/drivers/net/smc-ultra32.c
@@ -0,0 +1,454 @@
+/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux.
+
+Sources:
+
+ This driver is based on (cloned from) the ISA SMC Ultra driver
+ written by Donald Becker. Modifications to support the EISA
+ version of the card by Paul Gortmaker and Leonard N. Zubkoff.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+Theory of Operation:
+
+ The SMC Ultra32C card uses the SMC 83c790 chip which is also
+ found on the ISA SMC Ultra cards. It has a shared memory mode of
+ operation that makes it similar to the ISA version of the card.
+ The main difference is that the EISA card has 32KB of RAM, but
+ only an 8KB window into that memory. The EISA card also can be
+ set for a bus-mastering mode of operation via the ECU, but that
+ is not (and probably will never be) supported by this driver.
+ The ECU should be run to enable shared memory and to disable the
+ bus-mastering feature for use with linux.
+
+ By programming the 8390 to use only 8KB RAM, the modifications
+ to the ISA driver can be limited to the probe and initialization
+ code. This allows easy integration of EISA support into the ISA
+ driver. However, the driver development kit from SMC provided the
+ register information for sliding the 8KB window, and hence the 8390
+ is programmed to use the full 32KB RAM.
+
+ Unfortunately this required code changes outside the probe/init
+ routines, and thus we decided to separate the EISA driver from
+ the ISA one. In this way, ISA users don't end up with a larger
+ driver due to the EISA code, and EISA users don't end up with a
+ larger driver due to the ISA EtherEZ PIO code. The driver is
+ similar to the 3c503/16 driver, in that the window must be set
+ back to the 1st 8KB of space for access to the two 8390 Tx slots.
+
+ In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to
+ be a limiting factor, since the EISA bus could get packets off
+ the card fast enough, but having the use of lots of RAM as Rx
+ space is extra insurance if interrupt latencies become excessive.
+
+*/
+
+static const char *version = "smc-ultra32.c: 06/97 v1.00\n";
+
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "smc-ultra32"
+
+static int ultra32_probe1(struct net_device *dev, int ioaddr);
+static int ultra32_open(struct net_device *dev);
+static void ultra32_reset_8390(struct net_device *dev);
+static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra32_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra32_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page);
+static int ultra32_close(struct net_device *dev);
+
+#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */
+#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */
+#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA32_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+/*
+ * Defines that apply only to the Ultra32 EISA card. Note that
+ * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates
+ * into an EISA ID of 0x1080A34D
+ */
+#define ULTRA32_BASE 0xca0
+#define ULTRA32_ID 0x1080a34d
+#define ULTRA32_IDPORT (-0x20) /* 0xc80 */
+/* Config regs 1->7 from the EISA !SMC8010.CFG file. */
+#define ULTRA32_CFG1 0x04 /* 0xca4 */
+#define ULTRA32_CFG2 0x05 /* 0xca5 */
+#define ULTRA32_CFG3 (-0x18) /* 0xc88 */
+#define ULTRA32_CFG4 (-0x17) /* 0xc89 */
+#define ULTRA32_CFG5 (-0x16) /* 0xc8a */
+#define ULTRA32_CFG6 (-0x15) /* 0xc8b */
+#define ULTRA32_CFG7 0x0d /* 0xcad */
+
+static void cleanup_card(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET;
+ /* NB: ultra32_close_card() does free_irq */
+ release_region(ioaddr, ULTRA32_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+/* Probe for the Ultra32. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+
+struct net_device * __init ultra32_probe(int unit)
+{
+ struct net_device *dev;
+ int base;
+ int irq;
+ int err = -ENODEV;
+
+ if (!EISA_bus)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_ei_netdev();
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ irq = dev->irq;
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) {
+ if (ultra32_probe1(dev, base) == 0)
+ break;
+ dev->irq = irq;
+ }
+ if (base >= 0x9000)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, edge, media, retval;
+ int checksum = 0;
+ const char *model_name;
+ static unsigned version_printed;
+ /* Values from various config regs. */
+ unsigned char idreg;
+ unsigned char reg4;
+ const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
+
+ if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ if (inb(ioaddr + ULTRA32_IDPORT) == 0xff ||
+ inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ media = inb(ioaddr + ULTRA32_CFG7) & 0x03;
+ edge = inb(ioaddr + ULTRA32_CFG5) & 0x08;
+ printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n",
+ ioaddr >> 12, ifmap[media],
+ (edge ? "Edge Triggered" : "Level Sensitive"));
+
+ idreg = inb(ioaddr + 7);
+ reg4 = inb(ioaddr + 4) & 0x7f;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xff) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ model_name = "SMC Ultra32";
+
+ printk("%s: %s at 0x%X,", dev->name, model_name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+
+ /* Reset RAM addr. */
+ outb(0x00, ioaddr + 0x0b);
+
+ /* Switch back to the station address register set so that the
+ MS-DOS driver can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) {
+ printk("\nsmc-ultra32: Card RAM is disabled! "
+ "Run EISA config utility.\n");
+ retval = -ENODEV;
+ goto out;
+ }
+ if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0)
+ printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. "
+ "Run EISA config utility.\n");
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07];
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ dev->irq = irq;
+ }
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET;
+
+ /* Save RAM address in the unused reg0 to avoid excess inb's. */
+ ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc;
+
+ dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11);
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = 0;
+ ei_status.rx_start_page = TX_PAGES;
+ /* All Ultra32 cards have 32KB memory with an 8KB window. */
+ ei_status.stop_page = 128;
+
+ ei_status.mem = ioremap(dev->mem_start, 0x2000);
+ if (!ei_status.mem) {
+ printk(", failed to ioremap.\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ dev->mem_end = dev->mem_start + 0x1fff;
+
+ printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n",
+ dev->irq, dev->mem_start, dev->mem_end);
+ ei_status.block_input = &ultra32_block_input;
+ ei_status.block_output = &ultra32_block_output;
+ ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
+ ei_status.reset_8390 = &ultra32_reset_8390;
+ dev->open = &ultra32_open;
+ dev->stop = &ultra32_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+
+ return 0;
+out:
+ release_region(ioaddr, ULTRA32_IO_EXTENT);
+ return retval;
+}
+
+static int ultra32_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
+ int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : SA_SHIRQ;
+ int retval;
+
+ retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev);
+ if (retval)
+ return retval;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ return 0;
+}
+
+static int ultra32_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */
+
+ netif_stop_queue(dev);
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static void ultra32_reset_8390(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA32_RESET, ioaddr);
+ if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ultra32_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select correct 8KB Window. */
+ outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg);
+
+#ifdef __BIG_ENDIAN
+ /* Officially this is what we are doing, but the readl() is faster */
+ /* unfortunately it isn't endian aware of the struct */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps, or in this case, when a
+ packet spans an 8KB boundary. Note that the current 8KB segment is
+ already set by the get_8390_hdr routine. */
+
+static void ultra32_block_input(struct net_device *dev,
+ int count,
+ struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) {
+ int semi_count = 8192 - (ring_offset & 0x1FFF);
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ if (ring_offset < 96*256) {
+ /* Select next 8KB Window. */
+ ring_offset += semi_count;
+ outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg);
+ memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
+ } else {
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ }
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+}
+
+static void ultra32_block_output(struct net_device *dev,
+ int count,
+ const unsigned char *buf,
+ int start_page)
+{
+ void __iomem *xfer_start = ei_status.mem + (start_page<<8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+
+ memcpy_toio(xfer_start, buf, count);
+}
+
+#ifdef MODULE
+#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */
+static struct net_device *dev_ultra[MAX_ULTRA32_CARDS];
+
+MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct net_device *dev = ultra32_probe(-1);
+ if (IS_ERR(dev))
+ break;
+ dev_ultra[found++] = dev;
+ }
+ if (found)
+ return 0;
+ printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n");
+ return -ENXIO;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct net_device *dev = dev_ultra[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
new file mode 100644
index 000000000000..f86697da04d6
--- /dev/null
+++ b/drivers/net/smc9194.c
@@ -0,0 +1,1631 @@
+/*------------------------------------------------------------------------
+ . smc9194.c
+ . This is a driver for SMC's 9000 series of Ethernet cards.
+ .
+ . Copyright (C) 1996 by Erik Stahlman
+ . This software may be used and distributed according to the terms
+ . of the GNU General Public License, incorporated herein by reference.
+ .
+ . "Features" of the SMC chip:
+ . 4608 byte packet memory. ( for the 91C92. Others have more )
+ . EEPROM for configuration
+ . AUI/TP selection ( mine has 10Base2/10BaseT select )
+ .
+ . Arguments:
+ . io = for the base address
+ . irq = for the IRQ
+ . ifport = 0 for autodetect, 1 for TP, 2 for AUI ( or 10base2 )
+ .
+ . author:
+ . Erik Stahlman ( erik@vt.edu )
+ . contributors:
+ . Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ .
+ . Hardware multicast code from Peter Cammaert ( pc@denkart.be )
+ .
+ . Sources:
+ . o SMC databook
+ . o skeleton.c by Donald Becker ( becker@scyld.com )
+ . o ( a LOT of advice from Becker as well )
+ .
+ . History:
+ . 12/07/95 Erik Stahlman written, got receive/xmit handled
+ . 01/03/96 Erik Stahlman worked out some bugs, actually usable!!! :-)
+ . 01/06/96 Erik Stahlman cleaned up some, better testing, etc
+ . 01/29/96 Erik Stahlman fixed autoirq, added multicast
+ . 02/01/96 Erik Stahlman 1. disabled all interrupts in smc_reset
+ . 2. got rid of post-decrementing bug -- UGH.
+ . 02/13/96 Erik Stahlman Tried to fix autoirq failure. Added more
+ . descriptive error messages.
+ . 02/15/96 Erik Stahlman Fixed typo that caused detection failure
+ . 02/23/96 Erik Stahlman Modified it to fit into kernel tree
+ . Added support to change hardware address
+ . Cleared stats on opens
+ . 02/26/96 Erik Stahlman Trial support for Kernel 1.2.13
+ . Kludge for automatic IRQ detection
+ . 03/04/96 Erik Stahlman Fixed kernel 1.3.70 +
+ . Fixed bug reported by Gardner Buchanan in
+ . smc_enable, with outw instead of outb
+ . 03/06/96 Erik Stahlman Added hardware multicast from Peter Cammaert
+ . 04/14/00 Heiko Pruessing (SMA Regelsysteme) Fixed bug in chip memory
+ . allocation
+ . 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet
+ . 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ"
+ . 11/08/01 Matt Domsch Use common crc32 function
+ ----------------------------------------------------------------------------*/
+
+static const char version[] =
+ "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+
+#include "smc9194.h"
+
+#define DRV_NAME "smc9194"
+
+/*------------------------------------------------------------------------
+ .
+ . Configuration options, for the experienced user to change.
+ .
+ -------------------------------------------------------------------------*/
+
+/*
+ . Do you want to use 32 bit xfers? This should work on all chips, as
+ . the chipset is designed to accommodate them.
+*/
+#ifdef __sh__
+#undef USE_32_BIT
+#else
+#define USE_32_BIT 1
+#endif
+
+#if defined(__H8300H__) || defined(__H8300S__)
+#define NO_AUTOPROBE
+#undef insl
+#undef outsl
+#define insl(a,b,l) io_insl_noswap(a,b,l)
+#define outsl(a,b,l) io_outsl_noswap(a,b,l)
+#endif
+
+/*
+ .the SMC9194 can be at any of the following port addresses. To change,
+ .for a slightly different card, you can add it to the array. Keep in
+ .mind that the array must end in zero.
+*/
+
+struct devlist {
+ unsigned int port;
+ unsigned int irq;
+};
+
+#if defined(CONFIG_H8S_EDOSK2674)
+static struct devlist smc_devlist[] __initdata = {
+ {.port = 0xf80000, .irq = 16},
+ {.port = 0, .irq = 0 },
+};
+#else
+static struct devlist smc_devlist[] __initdata = {
+ {.port = 0x200, .irq = 0},
+ {.port = 0x220, .irq = 0},
+ {.port = 0x240, .irq = 0},
+ {.port = 0x260, .irq = 0},
+ {.port = 0x280, .irq = 0},
+ {.port = 0x2A0, .irq = 0},
+ {.port = 0x2C0, .irq = 0},
+ {.port = 0x2E0, .irq = 0},
+ {.port = 0x300, .irq = 0},
+ {.port = 0x320, .irq = 0},
+ {.port = 0x340, .irq = 0},
+ {.port = 0x360, .irq = 0},
+ {.port = 0x380, .irq = 0},
+ {.port = 0x3A0, .irq = 0},
+ {.port = 0x3C0, .irq = 0},
+ {.port = 0x3E0, .irq = 0},
+ {.port = 0, .irq = 0},
+};
+#endif
+/*
+ . Wait time for memory to be free. This probably shouldn't be
+ . tuned that much, as waiting for this means nothing else happens
+ . in the system
+*/
+#define MEMORY_WAIT_TIME 16
+
+/*
+ . DEBUGGING LEVELS
+ .
+ . 0 for normal operation
+ . 1 for slightly more details
+ . >2 for various levels of increasingly useless information
+ . 2 for interrupt tracking, status flags
+ . 3 for packet dumps, etc.
+*/
+#define SMC_DEBUG 0
+
+#if (SMC_DEBUG > 2 )
+#define PRINTK3(x) printk x
+#else
+#define PRINTK3(x)
+#endif
+
+#if SMC_DEBUG > 1
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x)
+#endif
+
+#ifdef SMC_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x)
+#endif
+
+
+/*------------------------------------------------------------------------
+ .
+ . The internal workings of the driver. If you are changing anything
+ . here with the SMC stuff, you should have the datasheet and known
+ . what you are doing.
+ .
+ -------------------------------------------------------------------------*/
+#define CARDNAME "SMC9194"
+
+
+/* store this information for the driver.. */
+struct smc_local {
+ /*
+ these are things that the kernel wants me to keep, so users
+ can find out semi-useless statistics of how well the card is
+ performing
+ */
+ struct net_device_stats stats;
+
+ /*
+ If I have to wait until memory is available to send
+ a packet, I will store the skbuff here, until I get the
+ desired memory. Then, I'll send it out and free it.
+ */
+ struct sk_buff * saved_skb;
+
+ /*
+ . This keeps track of how many packets that I have
+ . sent out. When an TX_EMPTY interrupt comes, I know
+ . that all of these have been sent.
+ */
+ int packets_waiting;
+};
+
+
+/*-----------------------------------------------------------------
+ .
+ . The driver can be entered at any of the following entry points.
+ .
+ .------------------------------------------------------------------ */
+
+/*
+ . This is called by register_netdev(). It is responsible for
+ . checking the portlist for the SMC9000 series chipset. If it finds
+ . one, then it will initialize the device, find the hardware information,
+ . and sets up the appropriate device parameters.
+ . NOTE: Interrupts are *OFF* when this procedure is called.
+ .
+ . NB:This shouldn't be static since it is referred to externally.
+*/
+struct net_device *smc_init(int unit);
+
+/*
+ . The kernel calls this function when someone wants to use the device,
+ . typically 'ifconfig ethX up'.
+*/
+static int smc_open(struct net_device *dev);
+
+/*
+ . Our watchdog timed out. Called by the networking layer
+*/
+static void smc_timeout(struct net_device *dev);
+
+/*
+ . This is called by the kernel in response to 'ifconfig ethX down'. It
+ . is responsible for cleaning up everything that the open routine
+ . does, and maybe putting the card into a powerdown state.
+*/
+static int smc_close(struct net_device *dev);
+
+/*
+ . This routine allows the proc file system to query the driver's
+ . statistics.
+*/
+static struct net_device_stats * smc_query_statistics( struct net_device *dev);
+
+/*
+ . Finally, a call to set promiscuous mode ( for TCPDUMP and related
+ . programs ) and multicast modes.
+*/
+static void smc_set_multicast_list(struct net_device *dev);
+
+
+/*---------------------------------------------------------------
+ .
+ . Interrupt level calls..
+ .
+ ----------------------------------------------------------------*/
+
+/*
+ . Handles the actual interrupt
+*/
+static irqreturn_t smc_interrupt(int irq, void *, struct pt_regs *regs);
+/*
+ . This is a separate procedure to handle the receipt of a packet, to
+ . leave the interrupt code looking slightly cleaner
+*/
+static inline void smc_rcv( struct net_device *dev );
+/*
+ . This handles a TX interrupt, which is only called when an error
+ . relating to a packet is sent.
+*/
+static inline void smc_tx( struct net_device * dev );
+
+/*
+ ------------------------------------------------------------
+ .
+ . Internal routines
+ .
+ ------------------------------------------------------------
+*/
+
+/*
+ . Test if a given location contains a chip, trying to cause as
+ . little damage as possible if it's not a SMC chip.
+*/
+static int smc_probe(struct net_device *dev, int ioaddr);
+
+/*
+ . A rather simple routine to print out a packet for debugging purposes.
+*/
+#if SMC_DEBUG > 2
+static void print_packet( byte *, int );
+#endif
+
+#define tx_done(dev) 1
+
+/* this is called to actually send the packet to the chip */
+static void smc_hardware_send_packet( struct net_device * dev );
+
+/* Since I am not sure if I will have enough room in the chip's ram
+ . to store the packet, I call this routine, which either sends it
+ . now, or generates an interrupt when the card is ready for the
+ . packet */
+static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device *dev );
+
+/* this does a soft reset on the device */
+static void smc_reset( int ioaddr );
+
+/* Enable Interrupts, Receive, and Transmit */
+static void smc_enable( int ioaddr );
+
+/* this puts the device in an inactive state */
+static void smc_shutdown( int ioaddr );
+
+/* This routine will find the IRQ of the driver if one is not
+ . specified in the input to the device. */
+static int smc_findirq( int ioaddr );
+
+/*
+ . Function: smc_reset( int ioaddr )
+ . Purpose:
+ . This sets the SMC91xx chip to its normal state, hopefully from whatever
+ . mess that any other DOS driver has put it in.
+ .
+ . Maybe I should reset more registers to defaults in here? SOFTRESET should
+ . do that for me.
+ .
+ . Method:
+ . 1. send a SOFT RESET
+ . 2. wait for it to finish
+ . 3. enable autorelease mode
+ . 4. reset the memory management unit
+ . 5. clear all interrupts
+ .
+*/
+static void smc_reset( int ioaddr )
+{
+ /* This resets the registers mostly to defaults, but doesn't
+ affect EEPROM. That seems unnecessary */
+ SMC_SELECT_BANK( 0 );
+ outw( RCR_SOFTRESET, ioaddr + RCR );
+
+ /* this should pause enough for the chip to be happy */
+ SMC_DELAY( );
+
+ /* Set the transmit and receive configuration registers to
+ default values */
+ outw( RCR_CLEAR, ioaddr + RCR );
+ outw( TCR_CLEAR, ioaddr + TCR );
+
+ /* set the control register to automatically
+ release successfully transmitted packets, to make the best
+ use out of our limited memory */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ) | CTL_AUTO_RELEASE , ioaddr + CONTROL );
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_RESET, ioaddr + MMU_CMD );
+
+ /* Note: It doesn't seem that waiting for the MMU busy is needed here,
+ but this is a place where future chipsets _COULD_ break. Be wary
+ of issuing another MMU command right after this */
+
+ outb( 0, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_enable
+ . Purpose: let the chip talk to the outside work
+ . Method:
+ . 1. Enable the transmitter
+ . 2. Enable the receiver
+ . 3. Enable interrupts
+*/
+static void smc_enable( int ioaddr )
+{
+ SMC_SELECT_BANK( 0 );
+ /* see the header file for options in TCR/RCR NORMAL*/
+ outw( TCR_NORMAL, ioaddr + TCR );
+ outw( RCR_NORMAL, ioaddr + RCR );
+
+ /* now, enable interrupts */
+ SMC_SELECT_BANK( 2 );
+ outb( SMC_INTERRUPT_MASK, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_shutdown
+ . Purpose: closes down the SMC91xxx chip.
+ . Method:
+ . 1. zero the interrupt mask
+ . 2. clear the enable receive flag
+ . 3. clear the enable xmit flags
+ .
+ . TODO:
+ . (1) maybe utilize power down mode.
+ . Why not yet? Because while the chip will go into power down mode,
+ . the manual says that it will wake up in response to any I/O requests
+ . in the register space. Empirical results do not show this working.
+*/
+static void smc_shutdown( int ioaddr )
+{
+ /* no more interrupts for me */
+ SMC_SELECT_BANK( 2 );
+ outb( 0, ioaddr + INT_MASK );
+
+ /* and tell the card to stay away from that nasty outside world */
+ SMC_SELECT_BANK( 0 );
+ outb( RCR_CLEAR, ioaddr + RCR );
+ outb( TCR_CLEAR, ioaddr + TCR );
+#if 0
+ /* finally, shut the chip down */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ), CTL_POWERDOWN, ioaddr + CONTROL );
+#endif
+}
+
+
+/*
+ . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds )
+ . Purpose:
+ . This sets the internal hardware table to filter out unwanted multicast
+ . packets before they take up memory.
+ .
+ . The SMC chip uses a hash table where the high 6 bits of the CRC of
+ . address are the offset into the table. If that bit is 1, then the
+ . multicast packet is accepted. Otherwise, it's dropped silently.
+ .
+ . To use the 6 bits as an offset into the table, the high 3 bits are the
+ . number of the 8 bit register, while the low 3 bits are the bit within
+ . that register.
+ .
+ . This routine is based very heavily on the one provided by Peter Cammaert.
+*/
+
+
+static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * addrs ) {
+ int i;
+ unsigned char multicast_table[ 8 ];
+ struct dev_mc_list * cur_addr;
+ /* table for flipping the order of 3 bits */
+ unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
+
+ /* start with a table of all zeros: reject all */
+ memset( multicast_table, 0, sizeof( multicast_table ) );
+
+ cur_addr = addrs;
+ for ( i = 0; i < count ; i ++, cur_addr = cur_addr->next ) {
+ int position;
+
+ /* do we have a pointer here? */
+ if ( !cur_addr )
+ break;
+ /* make sure this is a multicast address - shouldn't this
+ be a given if we have it here ? */
+ if ( !( *cur_addr->dmi_addr & 1 ) )
+ continue;
+
+ /* only use the low order bits */
+ position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert3[position&7]] |=
+ (1<<invert3[(position>>3)&7]);
+
+ }
+ /* now, the table can be loaded into the chipset */
+ SMC_SELECT_BANK( 3 );
+
+ for ( i = 0; i < 8 ; i++ ) {
+ outb( multicast_table[i], ioaddr + MULTICAST1 + i );
+ }
+}
+
+/*
+ . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * )
+ . Purpose:
+ . Attempt to allocate memory for a packet, if chip-memory is not
+ . available, then tell the card to generate an interrupt when it
+ . is available.
+ .
+ . Algorithm:
+ .
+ . o if the saved_skb is not currently null, then drop this packet
+ . on the floor. This should never happen, because of TBUSY.
+ . o if the saved_skb is null, then replace it with the current packet,
+ . o See if I can sending it now.
+ . o (NO): Enable interrupts and let the interrupt handler deal with it.
+ . o (YES):Send it now.
+*/
+static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * dev )
+{
+ struct smc_local *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ word length;
+ unsigned short numPages;
+ word time_out;
+
+ netif_stop_queue(dev);
+ /* Well, I want to send the packet.. but I don't know
+ if I can send it right now... */
+
+ if ( lp->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ lp->stats.tx_aborted_errors++;
+ printk(CARDNAME": Bad Craziness - sent packet while busy.\n" );
+ return 1;
+ }
+ lp->saved_skb = skb;
+
+ length = skb->len;
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL) {
+ netif_wake_queue(dev);
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
+
+ /*
+ ** The MMU wants the number of pages to be the number of 256 bytes
+ ** 'pages', minus 1 ( since a packet can't ever have 0 pages :) )
+ **
+ ** Pkt size for allocating is data length +6 (for additional status words,
+ ** length and ctl!) If odd size last byte is included in this header.
+ */
+ numPages = ((length & 0xfffe) + 6) / 256;
+
+ if (numPages > 7 ) {
+ printk(CARDNAME": Far too big packet error. \n");
+ /* freeing the packet is a good thing here... but should
+ . any packets of this size get down here? */
+ dev_kfree_skb (skb);
+ lp->saved_skb = NULL;
+ /* this IS an error, but, i don't want the skb saved */
+ netif_wake_queue(dev);
+ return 0;
+ }
+ /* either way, a packet is waiting now */
+ lp->packets_waiting++;
+
+ /* now, try to allocate the memory */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_ALLOC | numPages, ioaddr + MMU_CMD );
+ /*
+ . Performance Hack
+ .
+ . wait a short amount of time.. if I can send a packet now, I send
+ . it now. Otherwise, I enable an interrupt and wait for one to be
+ . available.
+ .
+ . I could have handled this a slightly different way, by checking to
+ . see if any memory was available in the FREE MEMORY register. However,
+ . either way, I need to generate an allocation, and the allocation works
+ . no matter what, so I saw no point in checking free memory.
+ */
+ time_out = MEMORY_WAIT_TIME;
+ do {
+ word status;
+
+ status = inb( ioaddr + INTERRUPT );
+ if ( status & IM_ALLOC_INT ) {
+ /* acknowledge the interrupt */
+ outb( IM_ALLOC_INT, ioaddr + INTERRUPT );
+ break;
+ }
+ } while ( -- time_out );
+
+ if ( !time_out ) {
+ /* oh well, wait until the chip finds memory later */
+ SMC_ENABLE_INT( IM_ALLOC_INT );
+ PRINTK2((CARDNAME": memory allocation deferred. \n"));
+ /* it's deferred, but I'll handle it later */
+ return 0;
+ }
+ /* or YES! I can send the packet now.. */
+ smc_hardware_send_packet(dev);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+/*
+ . Function: smc_hardware_send_packet(struct net_device * )
+ . Purpose:
+ . This sends the actual packet to the SMC9xxx chip.
+ .
+ . Algorithm:
+ . First, see if a saved_skb is available.
+ . ( this should NOT be called if there is no 'saved_skb'
+ . Now, find the packet number that the chip allocated
+ . Point the data pointers at it in memory
+ . Set the length word in the chip's memory
+ . Dump the packet to chip memory
+ . Check if a last byte is needed ( odd length packet )
+ . if so, set the control flag right
+ . Tell the card to send it
+ . Enable the transmit interrupt, so I know if it failed
+ . Free the kernel data if I actually sent it.
+*/
+static void smc_hardware_send_packet( struct net_device * dev )
+{
+ struct smc_local *lp = netdev_priv(dev);
+ byte packet_no;
+ struct sk_buff * skb = lp->saved_skb;
+ word length;
+ unsigned int ioaddr;
+ byte * buf;
+
+ ioaddr = dev->base_addr;
+
+ if ( !skb ) {
+ PRINTK((CARDNAME": In XMIT with no packet to send \n"));
+ return;
+ }
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ buf = skb->data;
+
+ /* If I get here, I _know_ there is a packet slot waiting for me */
+ packet_no = inb( ioaddr + PNR_ARR + 1 );
+ if ( packet_no & 0x80 ) {
+ /* or isn't there? BAD CHIP! */
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n");
+ dev_kfree_skb_any(skb);
+ lp->saved_skb = NULL;
+ netif_wake_queue(dev);
+ return;
+ }
+
+ /* we have a packet address, so tell the card to use it */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* point to the beginning of the packet */
+ outw( PTR_AUTOINC , ioaddr + POINTER );
+
+ PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+#if SMC_DEBUG > 2
+ print_packet( buf, length );
+#endif
+
+ /* send the packet length ( +6 for status, length and ctl byte )
+ and the status word ( set to zeros ) */
+#ifdef USE_32_BIT
+ outl( (length +6 ) << 16 , ioaddr + DATA_1 );
+#else
+ outw( 0, ioaddr + DATA_1 );
+ /* send the packet length ( +6 for status words, length, and ctl*/
+ outb( (length+6) & 0xFF,ioaddr + DATA_1 );
+ outb( (length+6) >> 8 , ioaddr + DATA_1 );
+#endif
+
+ /* send the actual data
+ . I _think_ it's faster to send the longs first, and then
+ . mop up by sending the last word. It depends heavily
+ . on alignment, at least on the 486. Maybe it would be
+ . a good idea to check which is optimal? But that could take
+ . almost as much time as is saved?
+ */
+#ifdef USE_32_BIT
+ if ( length & 0x2 ) {
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+#if !defined(__H8300H__) && !defined(__H8300S__)
+ outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
+#else
+ ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
+#endif
+ }
+ else
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+#else
+ outsw(ioaddr + DATA_1 , buf, (length ) >> 1);
+#endif
+ /* Send the last byte, if there is one. */
+
+ if ( (length & 1) == 0 ) {
+ outw( 0, ioaddr + DATA_1 );
+ } else {
+ outb( buf[length -1 ], ioaddr + DATA_1 );
+ outb( 0x20, ioaddr + DATA_1);
+ }
+
+ /* enable the interrupts */
+ SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) );
+
+ /* and let the chipset deal with it */
+ outw( MC_ENQUEUE , ioaddr + MMU_CMD );
+
+ PRINTK2((CARDNAME": Sent packet of length %d \n",length));
+
+ lp->saved_skb = NULL;
+ dev_kfree_skb_any (skb);
+
+ dev->trans_start = jiffies;
+
+ /* we can send another packet */
+ netif_wake_queue(dev);
+
+ return;
+}
+
+/*-------------------------------------------------------------------------
+ |
+ | smc_init(int unit)
+ | Input parameters:
+ | dev->base_addr == 0, try to find all possible locations
+ | dev->base_addr == 1, return failure code
+ | dev->base_addr == 2, always allocate space, and return success
+ | dev->base_addr == <anything else> this is the address to check
+ |
+ | Output:
+ | pointer to net_device or ERR_PTR(error)
+ |
+ ---------------------------------------------------------------------------
+*/
+static int io;
+static int irq;
+static int ifport;
+
+struct net_device * __init smc_init(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct smc_local));
+ static struct devlist *smcdev = smc_devlist;
+ int err = 0;
+
+#ifndef NO_AUTOPROBE
+ smcdev = smc_devlist;
+#endif
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = smc_probe(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (;smcdev->port; smcdev++) {
+ if (smc_probe(dev, smcdev->port) == 0)
+ break;
+ }
+ if (!smcdev->port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, SMC_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*----------------------------------------------------------------------
+ . smc_findirq
+ .
+ . This routine has a simple purpose -- make the SMC chip generate an
+ . interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ ------------------------------------------------------------------------
+*/
+int __init smc_findirq( int ioaddr )
+{
+#ifndef NO_AUTOPROBE
+ int timeout = 20;
+ unsigned long cookie;
+
+
+ cookie = probe_irq_on();
+
+ /*
+ * What I try to do here is trigger an ALLOC_INT. This is done
+ * by allocating a small chunk of memory, which will give an interrupt
+ * when done.
+ */
+
+
+ SMC_SELECT_BANK(2);
+ /* enable ALLOCation interrupts ONLY */
+ outb( IM_ALLOC_INT, ioaddr + INT_MASK );
+
+ /*
+ . Allocate 512 bytes of memory. Note that the chip was just
+ . reset so all the memory is available
+ */
+ outw( MC_ALLOC | 1, ioaddr + MMU_CMD );
+
+ /*
+ . Wait until positive that the interrupt has been generated
+ */
+ while ( timeout ) {
+ byte int_status;
+
+ int_status = inb( ioaddr + INTERRUPT );
+
+ if ( int_status & IM_ALLOC_INT )
+ break; /* got the interrupt */
+ timeout--;
+ }
+ /* there is really nothing that I can do here if timeout fails,
+ as probe_irq_off will return a 0 anyway, which is what I
+ want in this case. Plus, the clean up is needed in both
+ cases. */
+
+ /* DELAY HERE!
+ On a fast machine, the status might change before the interrupt
+ is given to the processor. This means that the interrupt was
+ never detected, and probe_irq_off fails to report anything.
+ This should fix probe_irq_* problems.
+ */
+ SMC_DELAY();
+ SMC_DELAY();
+
+ /* and disable all interrupts again */
+ outb( 0, ioaddr + INT_MASK );
+
+ /* and return what I found */
+ return probe_irq_off(cookie);
+#else /* NO_AUTOPROBE */
+ struct devlist *smcdev;
+ for (smcdev = smc_devlist; smcdev->port; smcdev++) {
+ if (smcdev->port == ioaddr)
+ return smcdev->irq;
+ }
+ return 0;
+#endif
+}
+
+/*----------------------------------------------------------------------
+ . Function: smc_probe( int ioaddr )
+ .
+ . Purpose:
+ . Tests to see if a given ioaddr points to an SMC9xxx chip.
+ . Returns a 0 on success
+ .
+ . Algorithm:
+ . (1) see if the high byte of BANK_SELECT is 0x33
+ . (2) compare the ioaddr with the base register's address
+ . (3) see if I recognize the chip ID in the appropriate register
+ .
+ .---------------------------------------------------------------------
+ */
+
+/*---------------------------------------------------------------
+ . Here I do typical initialization tasks.
+ .
+ . o Initialize the structure if needed
+ . o print out my vanity message if not done so already
+ . o print out what type of hardware is detected
+ . o print out the ethernet address
+ . o find the IRQ
+ . o set up my private data
+ . o configure the dev structure with my subroutines
+ . o actually GRAB the irq.
+ . o GRAB the region
+ .-----------------------------------------------------------------
+*/
+static int __init smc_probe(struct net_device *dev, int ioaddr)
+{
+ int i, memory, retval;
+ static unsigned version_printed;
+ unsigned int bank;
+
+ const char *version_string;
+ const char *if_string;
+
+ /* registers */
+ word revision_register;
+ word base_address_register;
+ word configuration_register;
+ word memory_info_register;
+ word memory_cfg_register;
+
+ /* Grab the region so that no one else tries to probe our ioports. */
+ if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ dev->irq = irq;
+ dev->if_port = ifport;
+
+ /* First, see if the high byte is 0x33 */
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00) != 0x3300 ) {
+ retval = -ENODEV;
+ goto err_out;
+ }
+ /* The above MIGHT indicate a device, but I need to write to further
+ test this. */
+ outw( 0x0, ioaddr + BANK_SELECT );
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00 ) != 0x3300 ) {
+ retval = -ENODEV;
+ goto err_out;
+ }
+#if !defined(CONFIG_H8S_EDOSK2674)
+ /* well, we've already written once, so hopefully another time won't
+ hurt. This time, I need to switch the bank register to bank 1,
+ so I can access the base address register */
+ SMC_SELECT_BANK(1);
+ base_address_register = inw( ioaddr + BASE );
+ if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) ) {
+ printk(CARDNAME ": IOADDR %x doesn't match configuration (%x)."
+ "Probably not a SMC chip\n",
+ ioaddr, base_address_register >> 3 & 0x3E0 );
+ /* well, the base address register didn't match. Must not have
+ been a SMC chip after all. */
+ retval = -ENODEV;
+ goto err_out;
+ }
+#else
+ (void)base_address_register; /* Warning suppression */
+#endif
+
+
+ /* check if the revision register is something that I recognize.
+ These might need to be added to later, as future revisions
+ could be added. */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
+ /* I don't recognize this chip, so... */
+ printk(CARDNAME ": IO %x: Unrecognized revision register:"
+ " %x, Contact author. \n", ioaddr, revision_register );
+
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* at this point I'll assume that the chip is an SMC9xxx.
+ It might be prudent to check a listing of MAC addresses
+ against the hardware address, or do some other tests. */
+
+ if (version_printed++ == 0)
+ printk("%s", version);
+
+ /* fill in some of the fields */
+ dev->base_addr = ioaddr;
+
+ /*
+ . Get the MAC address ( bank 1, regs 4 - 9 )
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = inw( ioaddr + ADDR0 + i );
+ dev->dev_addr[ i + 1] = address >> 8;
+ dev->dev_addr[ i ] = address & 0xFF;
+ }
+
+ /* get the memory information */
+
+ SMC_SELECT_BANK( 0 );
+ memory_info_register = inw( ioaddr + MIR );
+ memory_cfg_register = inw( ioaddr + MCR );
+ memory = ( memory_cfg_register >> 9 ) & 0x7; /* multiplier */
+ memory *= 256 * ( memory_info_register & 0xFF );
+
+ /*
+ Now, I want to find out more about the chip. This is sort of
+ redundant, but it's cleaner to have it in both, rather than having
+ one VERY long probe procedure.
+ */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ version_string = chip_ids[ ( revision_register >> 4 ) & 0xF ];
+ if ( !version_string ) {
+ /* I shouldn't get here because this call was done before.... */
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* is it using AUI or 10BaseT ? */
+ if ( dev->if_port == 0 ) {
+ SMC_SELECT_BANK(1);
+ configuration_register = inw( ioaddr + CONFIG );
+ if ( configuration_register & CFG_AUI_SELECT )
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ }
+ if_string = interfaces[ dev->if_port - 1 ];
+
+ /* now, reset the chip, and put it into a known state */
+ smc_reset( ioaddr );
+
+ /*
+ . If dev->irq is 0, then the device has to be banged on to see
+ . what the IRQ is.
+ .
+ . This banging doesn't always detect the IRQ, for unknown reasons.
+ . a workaround is to reset the chip and try again.
+ .
+ . Interestingly, the DOS packet driver *SETS* the IRQ on the card to
+ . be what is requested on the command line. I don't do that, mostly
+ . because the card that I have uses a non-standard method of accessing
+ . the IRQs, and because this _should_ work in most configurations.
+ .
+ . Specifying an IRQ is done with the assumption that the user knows
+ . what (s)he is doing. No checking is done!!!!
+ .
+ */
+ if ( dev->irq < 2 ) {
+ int trials;
+
+ trials = 3;
+ while ( trials-- ) {
+ dev->irq = smc_findirq( ioaddr );
+ if ( dev->irq )
+ break;
+ /* kick the card and try again */
+ smc_reset( ioaddr );
+ }
+ }
+ if (dev->irq == 0 ) {
+ printk(CARDNAME": Couldn't autodetect your IRQ. Use irq=xx.\n");
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* now, print out the card info, in a short format.. */
+
+ printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name,
+ version_string, revision_register & 0xF, ioaddr, dev->irq,
+ if_string, memory );
+ /*
+ . Print the Ethernet address
+ */
+ printk("ADDR: ");
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i] );
+ printk("%2.2x \n", dev->dev_addr[5] );
+
+ /* set the private data to zero by default */
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, &smc_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
+ dev->irq, retval);
+ goto err_out;
+ }
+
+ dev->open = smc_open;
+ dev->stop = smc_close;
+ dev->hard_start_xmit = smc_wait_to_send_packet;
+ dev->tx_timeout = smc_timeout;
+ dev->watchdog_timeo = HZ/20;
+ dev->get_stats = smc_query_statistics;
+ dev->set_multicast_list = smc_set_multicast_list;
+
+ return 0;
+
+err_out:
+ release_region(ioaddr, SMC_IO_EXTENT);
+ return retval;
+}
+
+#if SMC_DEBUG > 2
+static void print_packet( byte * buf, int length )
+{
+#if 0
+ int i;
+ int remainder;
+ int lines;
+
+ printk("Packet of length %d \n", length );
+ lines = length / 16;
+ remainder = length % 16;
+
+ for ( i = 0; i < lines ; i ++ ) {
+ int cur;
+
+ for ( cur = 0; cur < 8; cur ++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+ }
+ for ( i = 0; i < remainder/2 ; i++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+#endif
+}
+#endif
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc ..
+ *
+ */
+static int smc_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ int i; /* used to set hw ethernet address */
+
+ /* clear out all the junk that was put here before... */
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ /* reset the hardware */
+
+ smc_reset( ioaddr );
+ smc_enable( ioaddr );
+
+ /* Select which interface to use */
+
+ SMC_SELECT_BANK( 1 );
+ if ( dev->if_port == 1 ) {
+ outw( inw( ioaddr + CONFIG ) & ~CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+ else if ( dev->if_port == 2 ) {
+ outw( inw( ioaddr + CONFIG ) | CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+
+ /*
+ According to Becker, I have to set the hardware address
+ at this point, because the (l)user can set it with an
+ ioctl. Easily done...
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = dev->dev_addr[ i + 1 ] << 8 ;
+ address |= dev->dev_addr[ i ];
+ outw( address, ioaddr + ADDR0 + i );
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/*--------------------------------------------------------
+ . Called by the kernel to send a packet out into the void
+ . of the net. This routine is largely based on
+ . skeleton.c, from Becker.
+ .--------------------------------------------------------
+*/
+
+static void smc_timeout(struct net_device *dev)
+{
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
+ tx_done(dev) ? "IRQ conflict" :
+ "network cable problem");
+ /* "kick" the adaptor */
+ smc_reset( dev->base_addr );
+ smc_enable( dev->base_addr );
+ dev->trans_start = jiffies;
+ /* clear anything saved */
+ ((struct smc_local *)dev->priv)->saved_skb = NULL;
+ netif_wake_queue(dev);
+}
+
+/*-------------------------------------------------------------
+ .
+ . smc_rcv - receive a packet from the card
+ .
+ . There is ( at least ) a packet waiting to be read from
+ . chip-memory.
+ .
+ . o Read the status
+ . o If an error, record it
+ . o otherwise, read in the packet
+ --------------------------------------------------------------
+*/
+static void smc_rcv(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int packet_number;
+ word status;
+ word packet_length;
+
+ /* assume bank 2 */
+
+ packet_number = inw( ioaddr + FIFO_PORTS );
+
+ if ( packet_number & FP_RXEMPTY ) {
+ /* we got called , but nothing was on the FIFO */
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n"));
+ /* don't need to restore anything */
+ return;
+ }
+
+ /* start reading from the start of the packet */
+ outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER );
+
+ /* First two words are status and packet_length */
+ status = inw( ioaddr + DATA_1 );
+ packet_length = inw( ioaddr + DATA_1 );
+
+ packet_length &= 0x07ff; /* mask off top bits */
+
+ PRINTK2(("RCV: STATUS %4x LENGTH %4x\n", status, packet_length ));
+ /*
+ . the packet length contains 3 extra words :
+ . status, length, and an extra word with an odd byte .
+ */
+ packet_length -= 6;
+
+ if ( !(status & RS_ERRORS ) ){
+ /* do stuff to make a new packet */
+ struct sk_buff * skb;
+ byte * data;
+
+ /* read one extra byte */
+ if ( status & RS_ODDFRAME )
+ packet_length++;
+
+ /* set multicast stats */
+ if ( status & RS_MULTICAST )
+ lp->stats.multicast++;
+
+ skb = dev_alloc_skb( packet_length + 5);
+
+ if ( skb == NULL ) {
+ printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
+ lp->stats.rx_dropped++;
+ goto done;
+ }
+
+ /*
+ ! This should work without alignment, but it could be
+ ! in the worse case
+ */
+
+ skb_reserve( skb, 2 ); /* 16 bit alignment */
+
+ skb->dev = dev;
+ data = skb_put( skb, packet_length);
+
+#ifdef USE_32_BIT
+ /* QUESTION: Like in the TX routine, do I want
+ to send the DWORDs or the bytes first, or some
+ mixture. A mixture might improve already slow PIO
+ performance */
+ PRINTK3((" Reading %d dwords (and %d bytes) \n",
+ packet_length >> 2, packet_length & 3 ));
+ insl(ioaddr + DATA_1 , data, packet_length >> 2 );
+ /* read the left over bytes */
+ insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
+ packet_length & 0x3 );
+#else
+ PRINTK3((" Reading %d words and %d byte(s) \n",
+ (packet_length >> 1 ), packet_length & 1 ));
+ insw(ioaddr + DATA_1 , data, packet_length >> 1);
+ if ( packet_length & 1 ) {
+ data += packet_length & ~1;
+ *(data++) = inb( ioaddr + DATA_1 );
+ }
+#endif
+#if SMC_DEBUG > 2
+ print_packet( data, packet_length );
+#endif
+
+ skb->protocol = eth_type_trans(skb, dev );
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += packet_length;
+ } else {
+ /* error ... */
+ lp->stats.rx_errors++;
+
+ if ( status & RS_ALGNERR ) lp->stats.rx_frame_errors++;
+ if ( status & (RS_TOOSHORT | RS_TOOLONG ) )
+ lp->stats.rx_length_errors++;
+ if ( status & RS_BADCRC) lp->stats.rx_crc_errors++;
+ }
+
+done:
+ /* error or good, tell the card to get rid of this packet */
+ outw( MC_RELEASE, ioaddr + MMU_CMD );
+}
+
+
+/*************************************************************************
+ . smc_tx
+ .
+ . Purpose: Handle a transmit error message. This will only be called
+ . when an error, because of the AUTO_RELEASE mode.
+ .
+ . Algorithm:
+ . Save pointer and packet no
+ . Get the packet no from the top of the queue
+ . check if it's valid ( if not, is this an error??? )
+ . read the status word
+ . record the error
+ . ( resend? Not really, since we don't want old packets around )
+ . Restore saved values
+ ************************************************************************/
+static void smc_tx( struct net_device * dev )
+{
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = netdev_priv(dev);
+ byte saved_packet;
+ byte packet_no;
+ word tx_status;
+
+
+ /* assume bank 2 */
+
+ saved_packet = inb( ioaddr + PNR_ARR );
+ packet_no = inw( ioaddr + FIFO_PORTS );
+ packet_no &= 0x7F;
+
+ /* select this as the packet to read from */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* read the first word from this packet */
+ outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
+
+ tx_status = inw( ioaddr + DATA_1 );
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status ));
+
+ lp->stats.tx_errors++;
+ if ( tx_status & TS_LOSTCAR ) lp->stats.tx_carrier_errors++;
+ if ( tx_status & TS_LATCOL ) {
+ printk(KERN_DEBUG CARDNAME
+ ": Late collision occurred on last xmit.\n");
+ lp->stats.tx_window_errors++;
+ }
+#if 0
+ if ( tx_status & TS_16COL ) { ... }
+#endif
+
+ if ( tx_status & TS_SUCCESS ) {
+ printk(CARDNAME": Successful packet caused interrupt \n");
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK( 0 );
+ outw( inw( ioaddr + TCR ) | TCR_ENABLE, ioaddr + TCR );
+
+ /* kill the packet */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_FREEPKT, ioaddr + MMU_CMD );
+
+ /* one less packet waiting for me */
+ lp->packets_waiting--;
+
+ outb( saved_packet, ioaddr + PNR_ARR );
+ return;
+}
+
+/*--------------------------------------------------------------------
+ .
+ . This is the main routine of the driver, to handle the device when
+ . it needs some attention.
+ .
+ . So:
+ . first, save state of the chipset
+ . branch off into routines to handle each case, and acknowledge
+ . each to the interrupt register
+ . and finally restore state.
+ .
+ ---------------------------------------------------------------------*/
+
+static irqreturn_t smc_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = netdev_priv(dev);
+
+ byte status;
+ word card_stats;
+ byte mask;
+ int timeout;
+ /* state registers */
+ word saved_bank;
+ word saved_pointer;
+ int handled = 0;
+
+
+ PRINTK3((CARDNAME": SMC interrupt started \n"));
+
+ saved_bank = inw( ioaddr + BANK_SELECT );
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw( ioaddr + POINTER );
+
+ mask = inb( ioaddr + INT_MASK );
+ /* clear all interrupts */
+ outb( 0, ioaddr + INT_MASK );
+
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 4;
+
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
+ do {
+ /* read the status flag, and mask it */
+ status = inb( ioaddr + INTERRUPT ) & mask;
+ if (!status )
+ break;
+
+ handled = 1;
+
+ PRINTK3((KERN_WARNING CARDNAME
+ ": Handling interrupt status %x \n", status ));
+
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ PRINTK2((KERN_WARNING CARDNAME
+ ": Receive Interrupt\n"));
+ smc_rcv(dev);
+ } else if (status & IM_TX_INT ) {
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX ERROR handled\n"));
+ smc_tx(dev);
+ outb(IM_TX_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_TX_EMPTY_INT ) {
+ /* update stats */
+ SMC_SELECT_BANK( 0 );
+ card_stats = inw( ioaddr + COUNTER );
+ /* single collisions */
+ lp->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ lp->stats.collisions += card_stats & 0xF;
+
+ /* these are for when linux supports these statistics */
+
+ SMC_SELECT_BANK( 2 );
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX_BUFFER_EMPTY handled\n"));
+ outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT );
+ mask &= ~IM_TX_EMPTY_INT;
+ lp->stats.tx_packets += lp->packets_waiting;
+ lp->packets_waiting = 0;
+
+ } else if (status & IM_ALLOC_INT ) {
+ PRINTK2((KERN_DEBUG CARDNAME
+ ": Allocation interrupt \n"));
+ /* clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet( dev );
+
+ /* enable xmit interrupts based on this */
+ mask |= ( IM_TX_EMPTY_INT | IM_TX_INT );
+
+ /* and let the card send more packets to me */
+ netif_wake_queue(dev);
+
+ PRINTK2((CARDNAME": Handoff done successfully.\n"));
+ } else if (status & IM_RX_OVRN_INT ) {
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_EPH_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
+ } else if (status & IM_ERCV_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
+ outb( IM_ERCV_INT, ioaddr + INTERRUPT );
+ }
+ } while ( timeout -- );
+
+
+ /* restore state register */
+ SMC_SELECT_BANK( 2 );
+ outb( mask, ioaddr + INT_MASK );
+
+ PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
+ outw( saved_pointer, ioaddr + POINTER );
+
+ SMC_SELECT_BANK( saved_bank );
+
+ PRINTK3((CARDNAME ": Interrupt done\n"));
+ return IRQ_RETVAL(handled);
+}
+
+
+/*----------------------------------------------------
+ . smc_close
+ .
+ . this makes the board clean up everything that it can
+ . and not talk to the outside world. Caused by
+ . an 'ifconfig ethX down'
+ .
+ -----------------------------------------------------*/
+static int smc_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ /* clear everything */
+ smc_shutdown( dev->base_addr );
+
+ /* Update the statistics here. */
+ return 0;
+}
+
+/*------------------------------------------------------------
+ . Get the current statistics.
+ . This may be called with the card open or closed.
+ .-------------------------------------------------------------*/
+static struct net_device_stats* smc_query_statistics(struct net_device *dev) {
+ struct smc_local *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+/*-----------------------------------------------------------
+ . smc_set_multicast_list
+ .
+ . This routine will, depending on the values passed to it,
+ . either make it accept multicast packets, go into
+ . promiscuous mode ( for TCPDUMP and cousins ) or accept
+ . a select set of multicast packets
+*/
+static void smc_set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ SMC_SELECT_BANK(0);
+ if ( dev->flags & IFF_PROMISC )
+ outw( inw(ioaddr + RCR ) | RCR_PROMISC, ioaddr + RCR );
+
+/* BUG? I never disable promiscuous mode if multicasting was turned on.
+ Now, I turn off promiscuous mode, but I don't do anything to multicasting
+ when promiscuous mode is turned on.
+*/
+
+ /* Here, I am setting this to accept all multicast packets.
+ I don't need to zero the multicast table, because the flag is
+ checked before the table is
+ */
+ else if (dev->flags & IFF_ALLMULTI)
+ outw( inw(ioaddr + RCR ) | RCR_ALMUL, ioaddr + RCR );
+
+ /* We just get all multicast packets even if we only want them
+ . from one source. This will be changed at some future
+ . point. */
+ else if (dev->mc_count ) {
+ /* support hardware multicasting */
+
+ /* be sure I get rid of flags I might have set */
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+ /* NOTE: this has to set the bank, so make sure it is the
+ last thing called. The bank is set to zero at the top */
+ smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ }
+ else {
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+
+ /*
+ since I'm disabling all multicast entirely, I need to
+ clear the multicast list
+ */
+ SMC_SELECT_BANK( 3 );
+ outw( 0, ioaddr + MULTICAST1 );
+ outw( 0, ioaddr + MULTICAST2 );
+ outw( 0, ioaddr + MULTICAST3 );
+ outw( 0, ioaddr + MULTICAST4 );
+ }
+}
+
+#ifdef MODULE
+
+static struct net_device *devSMC9194;
+MODULE_LICENSE("GPL");
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(ifport, int, 0);
+MODULE_PARM_DESC(io, "SMC 99194 I/O base address");
+MODULE_PARM_DESC(irq, "SMC 99194 IRQ number");
+MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)");
+
+int init_module(void)
+{
+ if (io == 0)
+ printk(KERN_WARNING
+ CARDNAME": You shouldn't use auto-probing with insmod!\n" );
+
+ /* copy the parameters from insmod into the device structure */
+ devSMC9194 = smc_init(-1);
+ if (IS_ERR(devSMC9194))
+ return PTR_ERR(devSMC9194);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(devSMC9194);
+ free_irq(devSMC9194->irq, devSMC9194);
+ release_region(devSMC9194->base_addr, SMC_IO_EXTENT);
+ free_netdev(devSMC9194);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/smc9194.h b/drivers/net/smc9194.h
new file mode 100644
index 000000000000..393ab909bd86
--- /dev/null
+++ b/drivers/net/smc9194.h
@@ -0,0 +1,241 @@
+/*------------------------------------------------------------------------
+ . smc9194.h
+ . Copyright (C) 1996 by Erik Stahlman
+ .
+ . This software may be used and distributed according to the terms
+ . of the GNU General Public License, incorporated herein by reference.
+ .
+ . This file contains register information and access macros for
+ . the SMC91xxx chipset.
+ .
+ . Information contained in this file was obtained from the SMC91C94
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smc.com in the components division.
+ . ( this thanks to advice from Donald Becker ).
+ .
+ . Authors
+ . Erik Stahlman ( erik@vt.edu )
+ .
+ . History
+ . 01/06/96 Erik Stahlman moved definitions here from main .c file
+ . 01/19/96 Erik Stahlman polished this up some, and added better
+ . error handling
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC9194_H_
+#define _SMC9194_H_
+
+/* I want some simple types */
+
+typedef unsigned char byte;
+typedef unsigned short word;
+typedef unsigned long int dword;
+
+
+/* Because of bank switching, the SMC91xxx uses only 16 I/O ports */
+
+#define SMC_IO_EXTENT 16
+
+
+/*---------------------------------------------------------------
+ .
+ . A description of the SMC registers is probably in order here,
+ . although for details, the SMC datasheet is invaluable.
+ .
+ . Basically, the chip has 4 banks of registers ( 0 to 3 ), which
+ . are accessed by writing a number into the BANK_SELECT register
+ . ( I also use a SMC_SELECT_BANK macro for this ).
+ .
+ . The banks are configured so that for most purposes, bank 2 is all
+ . that is needed for simple run time tasks.
+ -----------------------------------------------------------------------*/
+
+/*
+ . Bank Select Register:
+ .
+ . yyyy yyyy 0000 00xx
+ . xx = bank number
+ . yyyy yyyy = 0x33, for identification purposes.
+*/
+#define BANK_SELECT 14
+
+/* BANK 0 */
+
+#define TCR 0 /* transmit control register */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_FDUPLX 0x0800 /* receive packets sent out */
+#define TCR_STP_SQET 0x1000 /* stop transmitting if Signal quality error */
+#define TCR_MON_CNS 0x0400 /* monitors the carrier status */
+#define TCR_PAD_ENABLE 0x0080 /* pads short packets to 64 bytes */
+
+#define TCR_CLEAR 0 /* do NOTHING */
+/* the normal settings for the TCR register : */
+/* QUESTION: do I want to enable padding of short packets ? */
+#define TCR_NORMAL TCR_ENABLE
+
+
+#define EPH_STATUS 2
+#define ES_LINK_OK 0x4000 /* is the link integrity ok ? */
+
+#define RCR 4
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+
+#define COUNTER 6
+#define MIR 8
+#define MCR 10
+/* 12 is reserved */
+
+/* BANK 1 */
+#define CONFIG 0
+#define CFG_AUI_SELECT 0x100
+#define BASE 2
+#define ADDR0 4
+#define ADDR1 6
+#define ADDR2 8
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_POWERDOWN 0x2000
+#define CTL_LE_ENABLE 0x80
+#define CTL_CR_ENABLE 0x40
+#define CTL_TE_ENABLE 0x0020
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_EPROM_ACCESS 0x0003 /* high if Eprom is being read */
+
+/* BANK 2 */
+#define MMU_CMD 0
+#define MC_BUSY 1 /* only readable bit in the register */
+#define MC_NOP 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_REMOVE 0x60 /* remove the current rx packet */
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+
+#define FP_RXEMPTY 0x8000
+#define FP_TXEMPTY 0x80
+
+#define POINTER 6
+#define PTR_READ 0x2000
+#define PTR_RCV 0x8000
+#define PTR_AUTOINC 0x4000
+#define PTR_AUTO_INC 0x0040
+
+#define DATA_1 8
+#define DATA_2 10
+#define INTERRUPT 12
+
+#define INT_MASK 13
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+#define IM_ERCV_INT 0x40 /* not on SMC9192 */
+
+/* BANK 3 */
+#define MULTICAST1 0
+#define MULTICAST2 2
+#define MULTICAST3 4
+#define MULTICAST4 6
+#define MGMT 8
+#define REVISION 10 /* ( hi: chip id low: rev # ) */
+
+
+/* this is NOT on SMC9192 */
+#define ERCV 12
+
+#define CHIP_9190 3
+#define CHIP_9194 4
+#define CHIP_9195 5
+#define CHIP_91100 7
+
+static const char * chip_ids[ 15 ] = {
+ NULL, NULL, NULL,
+ /* 3 */ "SMC91C90/91C92",
+ /* 4 */ "SMC91C94",
+ /* 5 */ "SMC91C95",
+ NULL,
+ /* 7 */ "SMC91C100",
+ /* 8 */ "SMC91C100FD",
+ NULL, NULL, NULL,
+ NULL, NULL, NULL};
+
+/*
+ . Transmit status bits
+*/
+#define TS_SUCCESS 0x0001
+#define TS_LOSTCAR 0x0400
+#define TS_LATCOL 0x0200
+#define TS_16COL 0x0010
+
+/*
+ . Receive status bits
+*/
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+static const char * interfaces[ 2 ] = { "TP", "AUI" };
+
+/*-------------------------------------------------------------------------
+ . I define some macros to make it easier to do somewhat common
+ . or slightly complicated, repeated tasks.
+ --------------------------------------------------------------------------*/
+
+/* select a register bank, 0 to 3 */
+
+#define SMC_SELECT_BANK(x) { outw( x, ioaddr + BANK_SELECT ); }
+
+/* define a small delay for the reset */
+#define SMC_DELAY() { inw( ioaddr + RCR );\
+ inw( ioaddr + RCR );\
+ inw( ioaddr + RCR ); }
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask |= (x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/* this disables an interrupt from the interrupt mask register */
+
+#define SMC_DISABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask &= ~(x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/*----------------------------------------------------------------------
+ . Define the interrupts that I want to receive from the card
+ .
+ . I want:
+ . IM_EPH_INT, for nasty errors
+ . IM_RCV_INT, for happy received packets
+ . IM_RX_OVRN_INT, because I have to kick the receiver
+ --------------------------------------------------------------------------*/
+#define SMC_INTERRUPT_MASK (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT)
+
+#endif /* _SMC_9194_H_ */
+
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
new file mode 100644
index 000000000000..6766bacbb199
--- /dev/null
+++ b/drivers/net/smc91x.c
@@ -0,0 +1,2343 @@
+/*
+ * smc91x.c
+ * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices.
+ *
+ * Copyright (C) 1996 by Erik Stahlman
+ * Copyright (C) 2001 Standard Microsystems Corporation
+ * Developed by Simple Network Magic Corporation
+ * Copyright (C) 2003 Monta Vista Software, Inc.
+ * Unified SMC91x driver by Nicolas Pitre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Arguments:
+ * io = for the base address
+ * irq = for the IRQ
+ * nowait = 0 for normal wait states, 1 eliminates additional wait states
+ *
+ * original author:
+ * Erik Stahlman <erik@vt.edu>
+ *
+ * hardware multicast code:
+ * Peter Cammaert <pc@denkart.be>
+ *
+ * contributors:
+ * Daris A Nevil <dnevil@snmc.com>
+ * Nicolas Pitre <nico@cam.org>
+ * Russell King <rmk@arm.linux.org.uk>
+ *
+ * History:
+ * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet
+ * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ"
+ * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111
+ * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111
+ * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111
+ * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support
+ * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races,
+ * more bus abstraction, big cleanup, etc.
+ * 29/09/03 Russell King - add driver model support
+ * - ethtool support
+ * - convert to use generic MII interface
+ * - add link up/down notification
+ * - don't try to handle full negotiation in
+ * smc_phy_configure
+ * - clean up (and fix stack overrun) in PHY
+ * MII read/write functions
+ * 22/09/04 Nicolas Pitre big update (see commit log for details)
+ */
+static const char version[] =
+ "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@cam.org>\n";
+
+/* Debugging level */
+#ifndef SMC_DEBUG
+#define SMC_DEBUG 0
+#endif
+
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/workqueue.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "smc91x.h"
+
+#ifdef CONFIG_ISA
+/*
+ * the LAN91C111 can be at any of the following port addresses. To change,
+ * for a slightly different card, you can add it to the array. Keep in
+ * mind that the array must end in zero.
+ */
+static unsigned int smc_portlist[] __initdata = {
+ 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0,
+ 0x300, 0x320, 0x340, 0x360, 0x380, 0x3A0, 0x3C0, 0x3E0, 0
+};
+
+#ifndef SMC_IOADDR
+# define SMC_IOADDR -1
+#endif
+static unsigned long io = SMC_IOADDR;
+module_param(io, ulong, 0400);
+MODULE_PARM_DESC(io, "I/O base address");
+
+#ifndef SMC_IRQ
+# define SMC_IRQ -1
+#endif
+static int irq = SMC_IRQ;
+module_param(irq, int, 0400);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+#endif /* CONFIG_ISA */
+
+#ifndef SMC_NOWAIT
+# define SMC_NOWAIT 0
+#endif
+static int nowait = SMC_NOWAIT;
+module_param(nowait, int, 0400);
+MODULE_PARM_DESC(nowait, "set to 1 for no wait state");
+
+/*
+ * Transmit timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+MODULE_LICENSE("GPL");
+
+/*
+ * The internal workings of the driver. If you are changing anything
+ * here with the SMC stuff, you should have the datasheet and know
+ * what you are doing.
+ */
+#define CARDNAME "smc91x"
+
+/*
+ * Use power-down feature of the chip
+ */
+#define POWER_DOWN 1
+
+/*
+ * Wait time for memory to be free. This probably shouldn't be
+ * tuned that much, as waiting for this means nothing else happens
+ * in the system
+ */
+#define MEMORY_WAIT_TIME 16
+
+/*
+ * This selects whether TX packets are sent one by one to the SMC91x internal
+ * memory and throttled until transmission completes. This may prevent
+ * RX overruns a litle by keeping much of the memory free for RX packets
+ * but to the expense of reduced TX throughput and increased IRQ overhead.
+ * Note this is not a cure for a too slow data bus or too high IRQ latency.
+ */
+#define THROTTLE_TX_PKTS 0
+
+/*
+ * The MII clock high/low times. 2x this number gives the MII clock period
+ * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!)
+ */
+#define MII_DELAY 1
+
+/* store this information for the driver.. */
+struct smc_local {
+ /*
+ * If I have to wait until memory is available to send a
+ * packet, I will store the skbuff here, until I get the
+ * desired memory. Then, I'll send it out and free it.
+ */
+ struct sk_buff *pending_tx_skb;
+ struct tasklet_struct tx_task;
+
+ /*
+ * these are things that the kernel wants me to keep, so users
+ * can find out semi-useless statistics of how well the card is
+ * performing
+ */
+ struct net_device_stats stats;
+
+ /* version/revision of the SMC91x chip */
+ int version;
+
+ /* Contains the current active transmission mode */
+ int tcr_cur_mode;
+
+ /* Contains the current active receive mode */
+ int rcr_cur_mode;
+
+ /* Contains the current active receive/phy mode */
+ int rpc_cur_mode;
+ int ctl_rfduplx;
+ int ctl_rspeed;
+
+ u32 msg_enable;
+ u32 phy_type;
+ struct mii_if_info mii;
+
+ /* work queue */
+ struct work_struct phy_configure;
+ int work_pending;
+
+ spinlock_t lock;
+
+#ifdef SMC_CAN_USE_DATACS
+ u32 __iomem *datacs;
+#endif
+
+#ifdef SMC_USE_PXA_DMA
+ /* DMA needs the physical address of the chip */
+ u_long physaddr;
+#endif
+ void __iomem *base;
+};
+
+#if SMC_DEBUG > 0
+#define DBG(n, args...) \
+ do { \
+ if (SMC_DEBUG >= (n)) \
+ printk(args); \
+ } while (0)
+
+#define PRINTK(args...) printk(args)
+#else
+#define DBG(n, args...) do { } while(0)
+#define PRINTK(args...) printk(KERN_DEBUG args)
+#endif
+
+#if SMC_DEBUG > 3
+static void PRINT_PKT(u_char *buf, int length)
+{
+ int i;
+ int remainder;
+ int lines;
+
+ lines = length / 16;
+ remainder = length % 16;
+
+ for (i = 0; i < lines ; i ++) {
+ int cur;
+ for (cur = 0; cur < 8; cur++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+ }
+ for (i = 0; i < remainder/2 ; i++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+}
+#else
+#define PRINT_PKT(x...) do { } while(0)
+#endif
+
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) do { \
+ unsigned char mask; \
+ spin_lock_irq(&lp->lock); \
+ mask = SMC_GET_INT_MASK(); \
+ mask |= (x); \
+ SMC_SET_INT_MASK(mask); \
+ spin_unlock_irq(&lp->lock); \
+} while (0)
+
+/* this disables an interrupt from the interrupt mask register */
+#define SMC_DISABLE_INT(x) do { \
+ unsigned char mask; \
+ spin_lock_irq(&lp->lock); \
+ mask = SMC_GET_INT_MASK(); \
+ mask &= ~(x); \
+ SMC_SET_INT_MASK(mask); \
+ spin_unlock_irq(&lp->lock); \
+} while (0)
+
+/*
+ * Wait while MMU is busy. This is usually in the order of a few nanosecs
+ * if at all, but let's avoid deadlocking the system if the hardware
+ * decides to go south.
+ */
+#define SMC_WAIT_MMU_BUSY() do { \
+ if (unlikely(SMC_GET_MMU_CMD() & MC_BUSY)) { \
+ unsigned long timeout = jiffies + 2; \
+ while (SMC_GET_MMU_CMD() & MC_BUSY) { \
+ if (time_after(jiffies, timeout)) { \
+ printk("%s: timeout %s line %d\n", \
+ dev->name, __FILE__, __LINE__); \
+ break; \
+ } \
+ cpu_relax(); \
+ } \
+ } \
+} while (0)
+
+
+/*
+ * this does a soft reset on the device
+ */
+static void smc_reset(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int ctl, cfg;
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ /* Disable all interrupts */
+ spin_lock(&lp->lock);
+ SMC_SELECT_BANK(2);
+ SMC_SET_INT_MASK(0);
+ spin_unlock(&lp->lock);
+
+ /*
+ * This resets the registers mostly to defaults, but doesn't
+ * affect EEPROM. That seems unnecessary
+ */
+ SMC_SELECT_BANK(0);
+ SMC_SET_RCR(RCR_SOFTRST);
+
+ /*
+ * Setup the Configuration Register
+ * This is necessary because the CONFIG_REG is not affected
+ * by a soft reset
+ */
+ SMC_SELECT_BANK(1);
+
+ cfg = CONFIG_DEFAULT;
+
+ /*
+ * Setup for fast accesses if requested. If the card/system
+ * can't handle it then there will be no recovery except for
+ * a hard reset or power cycle
+ */
+ if (nowait)
+ cfg |= CONFIG_NO_WAIT;
+
+ /*
+ * Release from possible power-down state
+ * Configuration register is not affected by Soft Reset
+ */
+ cfg |= CONFIG_EPH_POWER_EN;
+
+ SMC_SET_CONFIG(cfg);
+
+ /* this should pause enough for the chip to be happy */
+ /*
+ * elaborate? What does the chip _need_? --jgarzik
+ *
+ * This seems to be undocumented, but something the original
+ * driver(s) have always done. Suspect undocumented timing
+ * info/determined empirically. --rmk
+ */
+ udelay(1);
+
+ /* Disable transmit and receive functionality */
+ SMC_SELECT_BANK(0);
+ SMC_SET_RCR(RCR_CLEAR);
+ SMC_SET_TCR(TCR_CLEAR);
+
+ SMC_SELECT_BANK(1);
+ ctl = SMC_GET_CTL() | CTL_LE_ENABLE;
+
+ /*
+ * Set the control register to automatically release successfully
+ * transmitted packets, to make the best use out of our limited
+ * memory
+ */
+ if(!THROTTLE_TX_PKTS)
+ ctl |= CTL_AUTO_RELEASE;
+ else
+ ctl &= ~CTL_AUTO_RELEASE;
+ SMC_SET_CTL(ctl);
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK(2);
+ SMC_SET_MMU_CMD(MC_RESET);
+ SMC_WAIT_MMU_BUSY();
+
+ /* clear anything saved */
+ if (lp->pending_tx_skb != NULL) {
+ dev_kfree_skb (lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+ }
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static void smc_enable(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int mask;
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ /* see the header file for options in TCR/RCR DEFAULT */
+ SMC_SELECT_BANK(0);
+ SMC_SET_TCR(lp->tcr_cur_mode);
+ SMC_SET_RCR(lp->rcr_cur_mode);
+
+ SMC_SELECT_BANK(1);
+ SMC_SET_MAC_ADDR(dev->dev_addr);
+
+ /* now, enable interrupts */
+ mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT;
+ if (lp->version >= (CHIP_91100 << 4))
+ mask |= IM_MDINT;
+ SMC_SELECT_BANK(2);
+ SMC_SET_INT_MASK(mask);
+
+ /*
+ * From this point the register bank must _NOT_ be switched away
+ * to something else than bank 2 without proper locking against
+ * races with any tasklet or interrupt handlers until smc_shutdown()
+ * or smc_reset() is called.
+ */
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void smc_shutdown(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
+
+ /* no more interrupts for me */
+ spin_lock(&lp->lock);
+ SMC_SELECT_BANK(2);
+ SMC_SET_INT_MASK(0);
+ spin_unlock(&lp->lock);
+
+ /* and tell the card to stay away from that nasty outside world */
+ SMC_SELECT_BANK(0);
+ SMC_SET_RCR(RCR_CLEAR);
+ SMC_SET_TCR(TCR_CLEAR);
+
+#ifdef POWER_DOWN
+ /* finally, shut the chip down */
+ SMC_SELECT_BANK(1);
+ SMC_SET_CONFIG(SMC_GET_CONFIG() & ~CONFIG_EPH_POWER_EN);
+#endif
+}
+
+/*
+ * This is the procedure to handle the receipt of a packet.
+ */
+static inline void smc_rcv(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int packet_number, status, packet_len;
+
+ DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+
+ packet_number = SMC_GET_RXFIFO();
+ if (unlikely(packet_number & RXFIFO_REMPTY)) {
+ PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
+ return;
+ }
+
+ /* read from start of packet */
+ SMC_SET_PTR(PTR_READ | PTR_RCV | PTR_AUTOINC);
+
+ /* First two words are status and packet length */
+ SMC_GET_PKT_HDR(status, packet_len);
+ packet_len &= 0x07ff; /* mask off top bits */
+ DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
+ dev->name, packet_number, status,
+ packet_len, packet_len);
+
+ back:
+ if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
+ if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) {
+ /* accept VLAN packets */
+ status &= ~RS_TOOLONG;
+ goto back;
+ }
+ if (packet_len < 6) {
+ /* bloody hardware */
+ printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
+ dev->name, packet_len, status);
+ status |= RS_TOOSHORT;
+ }
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_MMU_CMD(MC_RELEASE);
+ lp->stats.rx_errors++;
+ if (status & RS_ALGNERR)
+ lp->stats.rx_frame_errors++;
+ if (status & (RS_TOOSHORT | RS_TOOLONG))
+ lp->stats.rx_length_errors++;
+ if (status & RS_BADCRC)
+ lp->stats.rx_crc_errors++;
+ } else {
+ struct sk_buff *skb;
+ unsigned char *data;
+ unsigned int data_len;
+
+ /* set multicast stats */
+ if (status & RS_MULTICAST)
+ lp->stats.multicast++;
+
+ /*
+ * Actual payload is packet_len - 6 (or 5 if odd byte).
+ * We want skb_reserve(2) and the final ctrl word
+ * (2 bytes, possibly containing the payload odd byte).
+ * Furthermore, we add 2 bytes to allow rounding up to
+ * multiple of 4 bytes on 32 bit buses.
+ * Hence packet_len - 6 + 2 + 2 + 2.
+ */
+ skb = dev_alloc_skb(packet_len);
+ if (unlikely(skb == NULL)) {
+ printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
+ dev->name);
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_MMU_CMD(MC_RELEASE);
+ lp->stats.rx_dropped++;
+ return;
+ }
+
+ /* Align IP header to 32 bits */
+ skb_reserve(skb, 2);
+
+ /* BUG: the LAN91C111 rev A never sets this bit. Force it. */
+ if (lp->version == 0x90)
+ status |= RS_ODDFRAME;
+
+ /*
+ * If odd length: packet_len - 5,
+ * otherwise packet_len - 6.
+ * With the trailing ctrl byte it's packet_len - 4.
+ */
+ data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6);
+ data = skb_put(skb, data_len);
+ SMC_PULL_DATA(data, packet_len - 4);
+
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_MMU_CMD(MC_RELEASE);
+
+ PRINT_PKT(data, packet_len - 4);
+
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += data_len;
+ }
+}
+
+#ifdef CONFIG_SMP
+/*
+ * On SMP we have the following problem:
+ *
+ * A = smc_hardware_send_pkt()
+ * B = smc_hard_start_xmit()
+ * C = smc_interrupt()
+ *
+ * A and B can never be executed simultaneously. However, at least on UP,
+ * it is possible (and even desirable) for C to interrupt execution of
+ * A or B in order to have better RX reliability and avoid overruns.
+ * C, just like A and B, must have exclusive access to the chip and
+ * each of them must lock against any other concurrent access.
+ * Unfortunately this is not possible to have C suspend execution of A or
+ * B taking place on another CPU. On UP this is no an issue since A and B
+ * are run from softirq context and C from hard IRQ context, and there is
+ * no other CPU where concurrent access can happen.
+ * If ever there is a way to force at least B and C to always be executed
+ * on the same CPU then we could use read/write locks to protect against
+ * any other concurrent access and C would always interrupt B. But life
+ * isn't that easy in a SMP world...
+ */
+#define smc_special_trylock(lock) \
+({ \
+ int __ret; \
+ local_irq_disable(); \
+ __ret = spin_trylock(lock); \
+ if (!__ret) \
+ local_irq_enable(); \
+ __ret; \
+})
+#define smc_special_lock(lock) spin_lock_irq(lock)
+#define smc_special_unlock(lock) spin_unlock_irq(lock)
+#else
+#define smc_special_trylock(lock) (1)
+#define smc_special_lock(lock) do { } while (0)
+#define smc_special_unlock(lock) do { } while (0)
+#endif
+
+/*
+ * This is called to actually send a packet to the chip.
+ */
+static void smc_hardware_send_pkt(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ struct sk_buff *skb;
+ unsigned int packet_no, len;
+ unsigned char *buf;
+
+ DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+
+ if (!smc_special_trylock(&lp->lock)) {
+ netif_stop_queue(dev);
+ tasklet_schedule(&lp->tx_task);
+ return;
+ }
+
+ skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+ packet_no = SMC_GET_AR();
+ if (unlikely(packet_no & AR_FAILED)) {
+ printk("%s: Memory allocation failed.\n", dev->name);
+ lp->stats.tx_errors++;
+ lp->stats.tx_fifo_errors++;
+ smc_special_unlock(&lp->lock);
+ goto done;
+ }
+
+ /* point to the beginning of the packet */
+ SMC_SET_PN(packet_no);
+ SMC_SET_PTR(PTR_AUTOINC);
+
+ buf = skb->data;
+ len = skb->len;
+ DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
+ dev->name, packet_no, len, len, buf);
+ PRINT_PKT(buf, len);
+
+ /*
+ * Send the packet length (+6 for status words, length, and ctl.
+ * The card will pad to 64 bytes with zeroes if packet is too small.
+ */
+ SMC_PUT_PKT_HDR(0, len + 6);
+
+ /* send the actual data */
+ SMC_PUSH_DATA(buf, len & ~1);
+
+ /* Send final ctl word with the last byte if there is one */
+ SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG);
+
+ /*
+ * If THROTTLE_TX_PKTS is set, we look at the TX_EMPTY flag
+ * before queueing this packet for TX, and if it's clear then
+ * we stop the queue here. This will have the effect of
+ * having at most 2 packets queued for TX in the chip's memory
+ * at all time. If THROTTLE_TX_PKTS is not set then the queue
+ * is stopped only when memory allocation (MC_ALLOC) does not
+ * succeed right away.
+ */
+ if (THROTTLE_TX_PKTS && !(SMC_GET_INT() & IM_TX_EMPTY_INT))
+ netif_stop_queue(dev);
+
+ /* queue the packet for TX */
+ SMC_SET_MMU_CMD(MC_ENQUEUE);
+ SMC_ACK_INT(IM_TX_EMPTY_INT);
+ smc_special_unlock(&lp->lock);
+
+ dev->trans_start = jiffies;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += len;
+
+ SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT);
+
+done: if (!THROTTLE_TX_PKTS)
+ netif_wake_queue(dev);
+
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Since I am not sure if I will have enough room in the chip's ram
+ * to store the packet, I call this routine which either sends it
+ * now, or set the card to generates an interrupt when ready
+ * for the packet.
+ */
+static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int numPages, poll_count, status;
+
+ DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+
+ BUG_ON(lp->pending_tx_skb != NULL);
+ lp->pending_tx_skb = skb;
+
+ /*
+ * The MMU wants the number of pages to be the number of 256 bytes
+ * 'pages', minus 1 (since a packet can't ever have 0 pages :))
+ *
+ * The 91C111 ignores the size bits, but earlier models don't.
+ *
+ * Pkt size for allocating is data length +6 (for additional status
+ * words, length and ctl)
+ *
+ * If odd size then last byte is included in ctl word.
+ */
+ numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
+ if (unlikely(numPages > 7)) {
+ printk("%s: Far too big packet error.\n", dev->name);
+ lp->pending_tx_skb = NULL;
+ lp->stats.tx_errors++;
+ lp->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ smc_special_lock(&lp->lock);
+
+ /* now, try to allocate the memory */
+ SMC_SET_MMU_CMD(MC_ALLOC | numPages);
+
+ /*
+ * Poll the chip for a short amount of time in case the
+ * allocation succeeds quickly.
+ */
+ poll_count = MEMORY_WAIT_TIME;
+ do {
+ status = SMC_GET_INT();
+ if (status & IM_ALLOC_INT) {
+ SMC_ACK_INT(IM_ALLOC_INT);
+ break;
+ }
+ } while (--poll_count);
+
+ smc_special_unlock(&lp->lock);
+
+ if (!poll_count) {
+ /* oh well, wait until the chip finds memory later */
+ netif_stop_queue(dev);
+ DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
+ SMC_ENABLE_INT(IM_ALLOC_INT);
+ } else {
+ /*
+ * Allocation succeeded: push packet to the chip's own memory
+ * immediately.
+ */
+ smc_hardware_send_pkt((unsigned long)dev);
+ }
+
+ return 0;
+}
+
+/*
+ * This handles a TX interrupt, which is only called when:
+ * - a TX error occurred, or
+ * - CTL_AUTO_RELEASE is not set and TX of a packet completed.
+ */
+static void smc_tx(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int saved_packet, packet_no, tx_status, pkt_len;
+
+ DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+
+ /* If the TX FIFO is empty then nothing to do */
+ packet_no = SMC_GET_TXFIFO();
+ if (unlikely(packet_no & TXFIFO_TEMPTY)) {
+ PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
+ return;
+ }
+
+ /* select packet to read from */
+ saved_packet = SMC_GET_PN();
+ SMC_SET_PN(packet_no);
+
+ /* read the first word (status word) from this packet */
+ SMC_SET_PTR(PTR_AUTOINC | PTR_READ);
+ SMC_GET_PKT_HDR(tx_status, pkt_len);
+ DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
+ dev->name, tx_status, packet_no);
+
+ if (!(tx_status & TS_SUCCESS))
+ lp->stats.tx_errors++;
+ if (tx_status & TS_LOSTCAR)
+ lp->stats.tx_carrier_errors++;
+
+ if (tx_status & TS_LATCOL) {
+ PRINTK("%s: late collision occurred on last xmit\n", dev->name);
+ lp->stats.tx_window_errors++;
+ if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) {
+ printk(KERN_INFO "%s: unexpectedly large numbers of "
+ "late collisions. Please check duplex "
+ "setting.\n", dev->name);
+ }
+ }
+
+ /* kill the packet */
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_MMU_CMD(MC_FREEPKT);
+
+ /* Don't restore Packet Number Reg until busy bit is cleared */
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_PN(saved_packet);
+
+ /* re-enable transmit */
+ SMC_SELECT_BANK(0);
+ SMC_SET_TCR(lp->tcr_cur_mode);
+ SMC_SELECT_BANK(2);
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+
+static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int mii_reg, mask;
+
+ mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO);
+ mii_reg |= MII_MDOE;
+
+ for (mask = 1 << (bits - 1); mask; mask >>= 1) {
+ if (val & mask)
+ mii_reg |= MII_MDO;
+ else
+ mii_reg &= ~MII_MDO;
+
+ SMC_SET_MII(mii_reg);
+ udelay(MII_DELAY);
+ SMC_SET_MII(mii_reg | MII_MCLK);
+ udelay(MII_DELAY);
+ }
+}
+
+static unsigned int smc_mii_in(struct net_device *dev, int bits)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int mii_reg, mask, val;
+
+ mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO);
+ SMC_SET_MII(mii_reg);
+
+ for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) {
+ if (SMC_GET_MII() & MII_MDI)
+ val |= mask;
+
+ SMC_SET_MII(mii_reg);
+ udelay(MII_DELAY);
+ SMC_SET_MII(mii_reg | MII_MCLK);
+ udelay(MII_DELAY);
+ }
+
+ return val;
+}
+
+/*
+ * Reads a register from the MII Management serial interface
+ */
+static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int phydata;
+
+ SMC_SELECT_BANK(3);
+
+ /* Idle - 32 ones */
+ smc_mii_out(dev, 0xffffffff, 32);
+
+ /* Start code (01) + read (10) + phyaddr + phyreg */
+ smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14);
+
+ /* Turnaround (2bits) + phydata */
+ phydata = smc_mii_in(dev, 18);
+
+ /* Return to idle state */
+ SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO));
+
+ DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __FUNCTION__, phyaddr, phyreg, phydata);
+
+ SMC_SELECT_BANK(2);
+ return phydata;
+}
+
+/*
+ * Writes a register to the MII Management serial interface
+ */
+static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
+ int phydata)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ SMC_SELECT_BANK(3);
+
+ /* Idle - 32 ones */
+ smc_mii_out(dev, 0xffffffff, 32);
+
+ /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */
+ smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32);
+
+ /* Return to idle state */
+ SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO));
+
+ DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __FUNCTION__, phyaddr, phyreg, phydata);
+
+ SMC_SELECT_BANK(2);
+}
+
+/*
+ * Finds and reports the PHY address
+ */
+static void smc_phy_detect(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int phyaddr;
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ lp->phy_type = 0;
+
+ /*
+ * Scan all 32 PHY addresses if necessary, starting at
+ * PHY#1 to PHY#31, and then PHY#0 last.
+ */
+ for (phyaddr = 1; phyaddr < 33; ++phyaddr) {
+ unsigned int id1, id2;
+
+ /* Read the PHY identifiers */
+ id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
+ id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
+
+ DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n",
+ dev->name, id1, id2);
+
+ /* Make sure it is a valid identifier */
+ if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
+ id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) {
+ /* Save the PHY's address */
+ lp->mii.phy_id = phyaddr & 31;
+ lp->phy_type = id1 << 16 | id2;
+ break;
+ }
+ }
+}
+
+/*
+ * Sets the PHY to a configuration as determined by the user
+ */
+static int smc_phy_fixed(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int phyaddr = lp->mii.phy_id;
+ int bmcr, cfg1;
+
+ DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+
+ /* Enter Link Disable state */
+ cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
+ cfg1 |= PHY_CFG1_LNKDIS;
+ smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1);
+
+ /*
+ * Set our fixed capabilities
+ * Disable auto-negotiation
+ */
+ bmcr = 0;
+
+ if (lp->ctl_rfduplx)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (lp->ctl_rspeed == 100)
+ bmcr |= BMCR_SPEED100;
+
+ /* Write our capabilities to the phy control register */
+ smc_phy_write(dev, phyaddr, MII_BMCR, bmcr);
+
+ /* Re-Configure the Receive/Phy Control register */
+ SMC_SELECT_BANK(0);
+ SMC_SET_RPC(lp->rpc_cur_mode);
+ SMC_SELECT_BANK(2);
+
+ return 1;
+}
+
+/*
+ * smc_phy_reset - reset the phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Issue a software reset for the specified PHY and
+ * wait up to 100ms for the reset to complete. We should
+ * not access the PHY for 50ms after issuing the reset.
+ *
+ * The time to wait appears to be dependent on the PHY.
+ *
+ * Must be called with lp->lock locked.
+ */
+static int smc_phy_reset(struct net_device *dev, int phy)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ unsigned int bmcr;
+ int timeout;
+
+ smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET);
+
+ for (timeout = 2; timeout; timeout--) {
+ spin_unlock_irq(&lp->lock);
+ msleep(50);
+ spin_lock_irq(&lp->lock);
+
+ bmcr = smc_phy_read(dev, phy, MII_BMCR);
+ if (!(bmcr & BMCR_RESET))
+ break;
+ }
+
+ return bmcr & BMCR_RESET;
+}
+
+/*
+ * smc_phy_powerdown - powerdown phy
+ * @dev: net device
+ *
+ * Power down the specified PHY
+ */
+static void smc_phy_powerdown(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ unsigned int bmcr;
+ int phy = lp->mii.phy_id;
+
+ if (lp->phy_type == 0)
+ return;
+
+ /* We need to ensure that no calls to smc_phy_configure are
+ pending.
+
+ flush_scheduled_work() cannot be called because we are
+ running with the netlink semaphore held (from
+ devinet_ioctl()) and the pending work queue contains
+ linkwatch_event() (scheduled by netif_carrier_off()
+ above). linkwatch_event() also wants the netlink semaphore.
+ */
+ while(lp->work_pending)
+ schedule();
+
+ bmcr = smc_phy_read(dev, phy, MII_BMCR);
+ smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
+}
+
+/*
+ * smc_phy_check_media - check the media status and adjust TCR
+ * @dev: net device
+ * @init: set true for initialisation
+ *
+ * Select duplex mode depending on negotiation state. This
+ * also updates our carrier state.
+ */
+static void smc_phy_check_media(struct net_device *dev, int init)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
+ /* duplex state has changed */
+ if (lp->mii.full_duplex) {
+ lp->tcr_cur_mode |= TCR_SWFDUP;
+ } else {
+ lp->tcr_cur_mode &= ~TCR_SWFDUP;
+ }
+
+ SMC_SELECT_BANK(0);
+ SMC_SET_TCR(lp->tcr_cur_mode);
+ }
+}
+
+/*
+ * Configures the specified PHY through the MII management interface
+ * using Autonegotiation.
+ * Calls smc_phy_fixed() if the user has requested a certain config.
+ * If RPC ANEG bit is set, the media selection is dependent purely on
+ * the selection by the MII (either in the MII BMCR reg or the result
+ * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
+ * is controlled by the RPC SPEED and RPC DPLX bits.
+ */
+static void smc_phy_configure(void *data)
+{
+ struct net_device *dev = data;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int phyaddr = lp->mii.phy_id;
+ int my_phy_caps; /* My PHY capabilities */
+ int my_ad_caps; /* My Advertised capabilities */
+ int status;
+
+ DBG(3, "%s:smc_program_phy()\n", dev->name);
+
+ spin_lock_irq(&lp->lock);
+
+ /*
+ * We should not be called if phy_type is zero.
+ */
+ if (lp->phy_type == 0)
+ goto smc_phy_configure_exit;
+
+ if (smc_phy_reset(dev, phyaddr)) {
+ printk("%s: PHY reset timed out\n", dev->name);
+ goto smc_phy_configure_exit;
+ }
+
+ /*
+ * Enable PHY Interrupts (for register 18)
+ * Interrupts listed here are disabled
+ */
+ smc_phy_write(dev, phyaddr, PHY_MASK_REG,
+ PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD |
+ PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB |
+ PHY_INT_SPDDET | PHY_INT_DPLXDET);
+
+ /* Configure the Receive/Phy Control register */
+ SMC_SELECT_BANK(0);
+ SMC_SET_RPC(lp->rpc_cur_mode);
+
+ /* If the user requested no auto neg, then go set his request */
+ if (lp->mii.force_media) {
+ smc_phy_fixed(dev);
+ goto smc_phy_configure_exit;
+ }
+
+ /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
+ my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
+
+ if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
+ printk(KERN_INFO "Auto negotiation NOT supported\n");
+ smc_phy_fixed(dev);
+ goto smc_phy_configure_exit;
+ }
+
+ my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */
+
+ if (my_phy_caps & BMSR_100BASE4)
+ my_ad_caps |= ADVERTISE_100BASE4;
+ if (my_phy_caps & BMSR_100FULL)
+ my_ad_caps |= ADVERTISE_100FULL;
+ if (my_phy_caps & BMSR_100HALF)
+ my_ad_caps |= ADVERTISE_100HALF;
+ if (my_phy_caps & BMSR_10FULL)
+ my_ad_caps |= ADVERTISE_10FULL;
+ if (my_phy_caps & BMSR_10HALF)
+ my_ad_caps |= ADVERTISE_10HALF;
+
+ /* Disable capabilities not selected by our user */
+ if (lp->ctl_rspeed != 100)
+ my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
+
+ if (!lp->ctl_rfduplx)
+ my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
+
+ /* Update our Auto-Neg Advertisement Register */
+ smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps);
+ lp->mii.advertising = my_ad_caps;
+
+ /*
+ * Read the register back. Without this, it appears that when
+ * auto-negotiation is restarted, sometimes it isn't ready and
+ * the link does not come up.
+ */
+ status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
+
+ DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps);
+ DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps);
+
+ /* Restart auto-negotiation process in order to advertise my caps */
+ smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
+
+ smc_phy_check_media(dev, 1);
+
+smc_phy_configure_exit:
+ spin_unlock_irq(&lp->lock);
+ lp->work_pending = 0;
+}
+
+/*
+ * smc_phy_interrupt
+ *
+ * Purpose: Handle interrupts relating to PHY register 18. This is
+ * called from the "hard" interrupt handler under our private spinlock.
+ */
+static void smc_phy_interrupt(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int phyaddr = lp->mii.phy_id;
+ int phy18;
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ if (lp->phy_type == 0)
+ return;
+
+ for(;;) {
+ smc_phy_check_media(dev, 0);
+
+ /* Read PHY Register 18, Status Output */
+ phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG);
+ if ((phy18 & PHY_INT_INT) == 0)
+ break;
+ }
+}
+
+/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
+
+static void smc_10bt_check_media(struct net_device *dev, int init)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int old_carrier, new_carrier;
+
+ old_carrier = netif_carrier_ok(dev) ? 1 : 0;
+
+ SMC_SELECT_BANK(0);
+ new_carrier = SMC_inw(ioaddr, EPH_STATUS_REG) & ES_LINK_OK ? 1 : 0;
+ SMC_SELECT_BANK(2);
+
+ if (init || (old_carrier != new_carrier)) {
+ if (!new_carrier) {
+ netif_carrier_off(dev);
+ } else {
+ netif_carrier_on(dev);
+ }
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: link %s\n", dev->name,
+ new_carrier ? "up" : "down");
+ }
+}
+
+static void smc_eph_interrupt(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int ctl;
+
+ smc_10bt_check_media(dev, 0);
+
+ SMC_SELECT_BANK(1);
+ ctl = SMC_GET_CTL();
+ SMC_SET_CTL(ctl & ~CTL_LE_ENABLE);
+ SMC_SET_CTL(ctl);
+ SMC_SELECT_BANK(2);
+}
+
+/*
+ * This is the main routine of the driver, to handle the device when
+ * it needs some attention.
+ */
+static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int status, mask, timeout, card_stats;
+ int saved_pointer;
+
+ DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+
+ spin_lock(&lp->lock);
+
+ /* A preamble may be used when there is a potential race
+ * between the interruptible transmit functions and this
+ * ISR. */
+ SMC_INTERRUPT_PREAMBLE;
+
+ saved_pointer = SMC_GET_PTR();
+ mask = SMC_GET_INT_MASK();
+ SMC_SET_INT_MASK(0);
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 8;
+
+ do {
+ status = SMC_GET_INT();
+
+ DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+ dev->name, status, mask,
+ ({ int meminfo; SMC_SELECT_BANK(0);
+ meminfo = SMC_GET_MIR();
+ SMC_SELECT_BANK(2); meminfo; }),
+ SMC_GET_FIFO());
+
+ status &= mask;
+ if (!status)
+ break;
+
+ if (status & IM_RCV_INT) {
+ DBG(3, "%s: RX irq\n", dev->name);
+ smc_rcv(dev);
+ } else if (status & IM_TX_INT) {
+ DBG(3, "%s: TX int\n", dev->name);
+ smc_tx(dev);
+ SMC_ACK_INT(IM_TX_INT);
+ if (THROTTLE_TX_PKTS)
+ netif_wake_queue(dev);
+ } else if (status & IM_ALLOC_INT) {
+ DBG(3, "%s: Allocation irq\n", dev->name);
+ tasklet_hi_schedule(&lp->tx_task);
+ mask &= ~IM_ALLOC_INT;
+ } else if (status & IM_TX_EMPTY_INT) {
+ DBG(3, "%s: TX empty\n", dev->name);
+ mask &= ~IM_TX_EMPTY_INT;
+
+ /* update stats */
+ SMC_SELECT_BANK(0);
+ card_stats = SMC_GET_COUNTER();
+ SMC_SELECT_BANK(2);
+
+ /* single collisions */
+ lp->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+
+ /* multiple collisions */
+ lp->stats.collisions += card_stats & 0xF;
+ } else if (status & IM_RX_OVRN_INT) {
+ DBG(1, "%s: RX overrun\n", dev->name);
+ SMC_ACK_INT(IM_RX_OVRN_INT);
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ } else if (status & IM_EPH_INT) {
+ smc_eph_interrupt(dev);
+ } else if (status & IM_MDINT) {
+ SMC_ACK_INT(IM_MDINT);
+ smc_phy_interrupt(dev);
+ } else if (status & IM_ERCV_INT) {
+ SMC_ACK_INT(IM_ERCV_INT);
+ PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name);
+ }
+ } while (--timeout);
+
+ /* restore register states */
+ SMC_SET_PTR(saved_pointer);
+ SMC_SET_INT_MASK(mask);
+
+ spin_unlock(&lp->lock);
+
+ DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
+
+ /*
+ * We return IRQ_HANDLED unconditionally here even if there was
+ * nothing to do. There is a possibility that a packet might
+ * get enqueued into the chip right after TX_EMPTY_INT is raised
+ * but just before the CPU acknowledges the IRQ.
+ * Better take an unneeded IRQ in some occasions than complexifying
+ * the code for all cases.
+ */
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void smc_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smc_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+/* Our watchdog timed out. Called by the networking layer */
+static void smc_timeout(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int status, mask, meminfo, fifo;
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ spin_lock_irq(&lp->lock);
+ status = SMC_GET_INT();
+ mask = SMC_GET_INT_MASK();
+ fifo = SMC_GET_FIFO();
+ SMC_SELECT_BANK(0);
+ meminfo = SMC_GET_MIR();
+ SMC_SELECT_BANK(2);
+ spin_unlock_irq(&lp->lock);
+ PRINTK( "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+ dev->name, status, mask, meminfo, fifo );
+
+ smc_reset(dev);
+ smc_enable(dev);
+
+ /*
+ * Reconfiguring the PHY doesn't seem like a bad idea here, but
+ * smc_phy_configure() calls msleep() which calls schedule_timeout()
+ * which calls schedule(). Hence we use a work queue.
+ */
+ if (lp->phy_type != 0) {
+ if (schedule_work(&lp->phy_configure)) {
+ lp->work_pending = 1;
+ }
+ }
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void smc_set_multicast_list(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned char multicast_table[8];
+ int update_multicast = 0;
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ if (dev->flags & IFF_PROMISC) {
+ DBG(2, "%s: RCR_PRMS\n", dev->name);
+ lp->rcr_cur_mode |= RCR_PRMS;
+ }
+
+/* BUG? I never disable promiscuous mode if multicasting was turned on.
+ Now, I turn off promiscuous mode, but I don't do anything to multicasting
+ when promiscuous mode is turned on.
+*/
+
+ /*
+ * Here, I am setting this to accept all multicast packets.
+ * I don't need to zero the multicast table, because the flag is
+ * checked before the table is
+ */
+ else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ DBG(2, "%s: RCR_ALMUL\n", dev->name);
+ lp->rcr_cur_mode |= RCR_ALMUL;
+ }
+
+ /*
+ * This sets the internal hardware table to filter out unwanted
+ * multicast packets before they take up memory.
+ *
+ * The SMC chip uses a hash table where the high 6 bits of the CRC of
+ * address are the offset into the table. If that bit is 1, then the
+ * multicast packet is accepted. Otherwise, it's dropped silently.
+ *
+ * To use the 6 bits as an offset into the table, the high 3 bits are
+ * the number of the 8 bit register, while the low 3 bits are the bit
+ * within that register.
+ */
+ else if (dev->mc_count) {
+ int i;
+ struct dev_mc_list *cur_addr;
+
+ /* table for flipping the order of 3 bits */
+ static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
+
+ /* start with a table of all zeros: reject all */
+ memset(multicast_table, 0, sizeof(multicast_table));
+
+ cur_addr = dev->mc_list;
+ for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ int position;
+
+ /* do we have a pointer here? */
+ if (!cur_addr)
+ break;
+ /* make sure this is a multicast address -
+ shouldn't this be a given if we have it here ? */
+ if (!(*cur_addr->dmi_addr & 1))
+ continue;
+
+ /* only use the low order bits */
+ position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert3[position&7]] |=
+ (1<<invert3[(position>>3)&7]);
+ }
+
+ /* be sure I get rid of flags I might have set */
+ lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
+
+ /* now, the table can be loaded into the chipset */
+ update_multicast = 1;
+ } else {
+ DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
+ lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
+
+ /*
+ * since I'm disabling all multicast entirely, I need to
+ * clear the multicast list
+ */
+ memset(multicast_table, 0, sizeof(multicast_table));
+ update_multicast = 1;
+ }
+
+ spin_lock_irq(&lp->lock);
+ SMC_SELECT_BANK(0);
+ SMC_SET_RCR(lp->rcr_cur_mode);
+ if (update_multicast) {
+ SMC_SELECT_BANK(3);
+ SMC_SET_MCAST(multicast_table);
+ }
+ SMC_SELECT_BANK(2);
+ spin_unlock_irq(&lp->lock);
+}
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int
+smc_open(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Setup the default Register Modes */
+ lp->tcr_cur_mode = TCR_DEFAULT;
+ lp->rcr_cur_mode = RCR_DEFAULT;
+ lp->rpc_cur_mode = RPC_DEFAULT;
+
+ /*
+ * If we are not using a MII interface, we need to
+ * monitor our own carrier signal to detect faults.
+ */
+ if (lp->phy_type == 0)
+ lp->tcr_cur_mode |= TCR_MON_CSN;
+
+ /* reset the hardware */
+ smc_reset(dev);
+ smc_enable(dev);
+
+ /* Configure the PHY, initialize the link state */
+ if (lp->phy_type != 0)
+ smc_phy_configure(dev);
+ else {
+ spin_lock_irq(&lp->lock);
+ smc_10bt_check_media(dev, 1);
+ spin_unlock_irq(&lp->lock);
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/*
+ * smc_close
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int smc_close(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ /* clear everything */
+ smc_shutdown(dev);
+
+ smc_phy_powerdown(dev);
+
+ if (lp->pending_tx_skb) {
+ dev_kfree_skb(lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *smc_query_statistics(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+
+ DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+
+ return &lp->stats;
+}
+
+/*
+ * Ethtool support
+ */
+static int
+smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int ret;
+
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irq(&lp->lock);
+ ret = mii_ethtool_gset(&lp->mii, cmd);
+ spin_unlock_irq(&lp->lock);
+ } else {
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP | SUPPORTED_AUI;
+
+ if (lp->ctl_rspeed == 10)
+ cmd->speed = SPEED_10;
+ else if (lp->ctl_rspeed == 100)
+ cmd->speed = SPEED_100;
+
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->port = 0;
+ cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int ret;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irq(&lp->lock);
+ ret = mii_ethtool_sset(&lp->mii, cmd);
+ spin_unlock_irq(&lp->lock);
+ } else {
+ if (cmd->autoneg != AUTONEG_DISABLE ||
+ cmd->speed != SPEED_10 ||
+ (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
+ (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ return -EINVAL;
+
+// lp->port = cmd->port;
+ lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+
+// if (netif_running(dev))
+// smc_set_port(dev);
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void
+smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, CARDNAME, sizeof(info->driver));
+ strncpy(info->version, version, sizeof(info->version));
+ strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info));
+}
+
+static int smc_ethtool_nwayreset(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int ret = -EINVAL;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irq(&lp->lock);
+ ret = mii_nway_restart(&lp->mii);
+ spin_unlock_irq(&lp->lock);
+ }
+
+ return ret;
+}
+
+static u32 smc_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ lp->msg_enable = level;
+}
+
+static struct ethtool_ops smc_ethtool_ops = {
+ .get_settings = smc_ethtool_getsettings,
+ .set_settings = smc_ethtool_setsettings,
+ .get_drvinfo = smc_ethtool_getdrvinfo,
+
+ .get_msglevel = smc_ethtool_getmsglevel,
+ .set_msglevel = smc_ethtool_setmsglevel,
+ .nway_reset = smc_ethtool_nwayreset,
+ .get_link = ethtool_op_get_link,
+// .get_eeprom = smc_ethtool_geteeprom,
+// .set_eeprom = smc_ethtool_seteeprom,
+};
+
+/*
+ * smc_findirq
+ *
+ * This routine has a simple purpose -- make the SMC chip generate an
+ * interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ */
+/*
+ * does this still work?
+ *
+ * I just deleted auto_irq.c, since it was never built...
+ * --jgarzik
+ */
+static int __init smc_findirq(void __iomem *ioaddr)
+{
+ int timeout = 20;
+ unsigned long cookie;
+
+ DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
+
+ cookie = probe_irq_on();
+
+ /*
+ * What I try to do here is trigger an ALLOC_INT. This is done
+ * by allocating a small chunk of memory, which will give an interrupt
+ * when done.
+ */
+ /* enable ALLOCation interrupts ONLY */
+ SMC_SELECT_BANK(2);
+ SMC_SET_INT_MASK(IM_ALLOC_INT);
+
+ /*
+ * Allocate 512 bytes of memory. Note that the chip was just
+ * reset so all the memory is available
+ */
+ SMC_SET_MMU_CMD(MC_ALLOC | 1);
+
+ /*
+ * Wait until positive that the interrupt has been generated
+ */
+ do {
+ int int_status;
+ udelay(10);
+ int_status = SMC_GET_INT();
+ if (int_status & IM_ALLOC_INT)
+ break; /* got the interrupt */
+ } while (--timeout);
+
+ /*
+ * there is really nothing that I can do here if timeout fails,
+ * as autoirq_report will return a 0 anyway, which is what I
+ * want in this case. Plus, the clean up is needed in both
+ * cases.
+ */
+
+ /* and disable all interrupts again */
+ SMC_SET_INT_MASK(0);
+
+ /* and return what I found */
+ return probe_irq_off(cookie);
+}
+
+/*
+ * Function: smc_probe(unsigned long ioaddr)
+ *
+ * Purpose:
+ * Tests to see if a given ioaddr points to an SMC91x chip.
+ * Returns a 0 on success
+ *
+ * Algorithm:
+ * (1) see if the high byte of BANK_SELECT is 0x33
+ * (2) compare the ioaddr with the base register's address
+ * (3) see if I recognize the chip ID in the appropriate register
+ *
+ * Here I do typical initialization tasks.
+ *
+ * o Initialize the structure if needed
+ * o print out my vanity message if not done so already
+ * o print out what type of hardware is detected
+ * o print out the ethernet address
+ * o find the IRQ
+ * o set up my private data
+ * o configure the dev structure with my subroutines
+ * o actually GRAB the irq.
+ * o GRAB the region
+ */
+static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ static int version_printed = 0;
+ int i, retval;
+ unsigned int val, revision_register;
+ const char *version_string;
+
+ DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
+
+ /* First, see if the high byte is 0x33 */
+ val = SMC_CURRENT_BANK();
+ DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
+ if ((val & 0xFF00) != 0x3300) {
+ if ((val & 0xFF) == 0x33) {
+ printk(KERN_WARNING
+ "%s: Detected possible byte-swapped interface"
+ " at IOADDR %p\n", CARDNAME, ioaddr);
+ }
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /*
+ * The above MIGHT indicate a device, but I need to write to
+ * further test this.
+ */
+ SMC_SELECT_BANK(0);
+ val = SMC_CURRENT_BANK();
+ if ((val & 0xFF00) != 0x3300) {
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /*
+ * well, we've already written once, so hopefully another
+ * time won't hurt. This time, I need to switch the bank
+ * register to bank 1, so I can access the base address
+ * register
+ */
+ SMC_SELECT_BANK(1);
+ val = SMC_GET_BASE();
+ val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
+ if (((unsigned long)ioaddr & ((PAGE_SIZE-1)<<SMC_IO_SHIFT)) != val) { /*XXX: WTF? */
+ printk("%s: IOADDR %p doesn't match configuration (%x).\n",
+ CARDNAME, ioaddr, val);
+ }
+
+ /*
+ * check if the revision register is something that I
+ * recognize. These might need to be added to later,
+ * as future revisions could be added.
+ */
+ SMC_SELECT_BANK(3);
+ revision_register = SMC_GET_REV();
+ DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
+ version_string = chip_ids[ (revision_register >> 4) & 0xF];
+ if (!version_string || (revision_register & 0xff00) != 0x3300) {
+ /* I don't recognize this chip, so... */
+ printk("%s: IO %p: Unrecognized revision register 0x%04x"
+ ", Contact author.\n", CARDNAME,
+ ioaddr, revision_register);
+
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* At this point I'll assume that the chip is an SMC91x. */
+ if (version_printed++ == 0)
+ printk("%s", version);
+
+ /* fill in some of the fields */
+ dev->base_addr = (unsigned long)ioaddr;
+ lp->base = ioaddr;
+ lp->version = revision_register & 0xff;
+ spin_lock_init(&lp->lock);
+
+ /* Get the MAC address */
+ SMC_SELECT_BANK(1);
+ SMC_GET_MAC_ADDR(dev->dev_addr);
+
+ /* now, reset the chip, and put it into a known state */
+ smc_reset(dev);
+
+ /*
+ * If dev->irq is 0, then the device has to be banged on to see
+ * what the IRQ is.
+ *
+ * This banging doesn't always detect the IRQ, for unknown reasons.
+ * a workaround is to reset the chip and try again.
+ *
+ * Interestingly, the DOS packet driver *SETS* the IRQ on the card to
+ * be what is requested on the command line. I don't do that, mostly
+ * because the card that I have uses a non-standard method of accessing
+ * the IRQs, and because this _should_ work in most configurations.
+ *
+ * Specifying an IRQ is done with the assumption that the user knows
+ * what (s)he is doing. No checking is done!!!!
+ */
+ if (dev->irq < 1) {
+ int trials;
+
+ trials = 3;
+ while (trials--) {
+ dev->irq = smc_findirq(ioaddr);
+ if (dev->irq)
+ break;
+ /* kick the card and try again */
+ smc_reset(dev);
+ }
+ }
+ if (dev->irq == 0) {
+ printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
+ dev->name);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ dev->irq = irq_canonicalize(dev->irq);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->open = smc_open;
+ dev->stop = smc_close;
+ dev->hard_start_xmit = smc_hard_start_xmit;
+ dev->tx_timeout = smc_timeout;
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ dev->get_stats = smc_query_statistics;
+ dev->set_multicast_list = smc_set_multicast_list;
+ dev->ethtool_ops = &smc_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = smc_poll_controller;
+#endif
+
+ tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
+ INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
+ lp->mii.phy_id_mask = 0x1f;
+ lp->mii.reg_num_mask = 0x1f;
+ lp->mii.force_media = 0;
+ lp->mii.full_duplex = 0;
+ lp->mii.dev = dev;
+ lp->mii.mdio_read = smc_phy_read;
+ lp->mii.mdio_write = smc_phy_write;
+
+ /*
+ * Locate the phy, if any.
+ */
+ if (lp->version >= (CHIP_91100 << 4))
+ smc_phy_detect(dev);
+
+ /* Set default parameters */
+ lp->msg_enable = NETIF_MSG_LINK;
+ lp->ctl_rfduplx = 0;
+ lp->ctl_rspeed = 10;
+
+ if (lp->version >= (CHIP_91100 << 4)) {
+ lp->ctl_rfduplx = 1;
+ lp->ctl_rspeed = 100;
+ }
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, &smc_interrupt, 0, dev->name, dev);
+ if (retval)
+ goto err_out;
+
+ set_irq_type(dev->irq, IRQT_RISING);
+
+#ifdef SMC_USE_PXA_DMA
+ {
+ int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
+ smc_pxa_dma_irq, NULL);
+ if (dma >= 0)
+ dev->dma = dma;
+ }
+#endif
+
+ retval = register_netdev(dev);
+ if (retval == 0) {
+ /* now, print out the card info, in a short format.. */
+ printk("%s: %s (rev %d) at %p IRQ %d",
+ dev->name, version_string, revision_register & 0x0f,
+ lp->base, dev->irq);
+
+ if (dev->dma != (unsigned char)-1)
+ printk(" DMA %d", dev->dma);
+
+ printk("%s%s\n", nowait ? " [nowait]" : "",
+ THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk("%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", dev->name);
+ } else {
+ /* Print the Ethernet address */
+ printk("%s: Ethernet addr: ", dev->name);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x\n", dev->dev_addr[5]);
+ }
+
+ if (lp->phy_type == 0) {
+ PRINTK("%s: No PHY found\n", dev->name);
+ } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
+ PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name);
+ } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
+ PRINTK("%s: PHY LAN83C180\n", dev->name);
+ }
+ }
+
+err_out:
+#ifdef SMC_USE_PXA_DMA
+ if (retval && dev->dma != (unsigned char)-1)
+ pxa_free_dma(dev->dma);
+#endif
+ return retval;
+}
+
+static int smc_enable_device(struct platform_device *pdev)
+{
+ unsigned long flags;
+ unsigned char ecor, ecsr;
+ void __iomem *addr;
+ struct resource * res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ if (!res)
+ return 0;
+
+ /*
+ * Map the attribute space. This is overkill, but clean.
+ */
+ addr = ioremap(res->start, ATTRIB_SIZE);
+ if (!addr)
+ return -ENOMEM;
+
+ /*
+ * Reset the device. We must disable IRQs around this
+ * since a reset causes the IRQ line become active.
+ */
+ local_irq_save(flags);
+ ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET;
+ writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT));
+ readb(addr + (ECOR << SMC_IO_SHIFT));
+
+ /*
+ * Wait 100us for the chip to reset.
+ */
+ udelay(100);
+
+ /*
+ * The device will ignore all writes to the enable bit while
+ * reset is asserted, even if the reset bit is cleared in the
+ * same write. Must clear reset first, then enable the device.
+ */
+ writeb(ecor, addr + (ECOR << SMC_IO_SHIFT));
+ writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT));
+
+ /*
+ * Set the appropriate byte/word mode.
+ */
+ ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
+#ifndef SMC_CAN_USE_16BIT
+ ecsr |= ECSR_IOIS8;
+#endif
+ writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
+ local_irq_restore(flags);
+
+ iounmap(addr);
+
+ /*
+ * Wait for the chip to wake up. We could poll the control
+ * register in the main register space, but that isn't mapped
+ * yet. We know this is going to take 750us.
+ */
+ msleep(1);
+
+ return 0;
+}
+
+static int smc_request_attrib(struct platform_device *pdev)
+{
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+
+ if (!res)
+ return 0;
+
+ if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void smc_release_attrib(struct platform_device *pdev)
+{
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+
+ if (res)
+ release_mem_region(res->start, ATTRIB_SIZE);
+}
+
+#ifdef SMC_CAN_USE_DATACS
+static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
+{
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+ struct smc_local *lp = netdev_priv(ndev);
+
+ if (!res)
+ return;
+
+ if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
+ printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ return;
+ }
+
+ lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
+}
+
+static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
+{
+ struct smc_local *lp = netdev_priv(ndev);
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+
+ if (lp->datacs)
+ iounmap(lp->datacs);
+
+ lp->datacs = NULL;
+
+ if (res)
+ release_mem_region(res->start, SMC_DATA_EXTENT);
+}
+#else
+static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) {}
+static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) {}
+#endif
+
+/*
+ * smc_init(void)
+ * Input parameters:
+ * dev->base_addr == 0, try to find all possible locations
+ * dev->base_addr > 0x1ff, this is the address to check
+ * dev->base_addr == <anything else>, return failure code
+ *
+ * Output:
+ * 0 --> there is a device
+ * anything else, error
+ */
+static int smc_drv_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev;
+ struct resource *res;
+ unsigned int __iomem *addr;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+
+ if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct smc_local));
+ if (!ndev) {
+ printk("%s: could not allocate device.\n", CARDNAME);
+ ret = -ENOMEM;
+ goto out_release_io;
+ }
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, dev);
+
+ ndev->dma = (unsigned char)-1;
+ ndev->irq = platform_get_irq(pdev, 0);
+
+ ret = smc_request_attrib(pdev);
+ if (ret)
+ goto out_free_netdev;
+#if defined(CONFIG_SA1100_ASSABET)
+ NCR_0 |= NCR_ENET_OSC_EN;
+#endif
+ ret = smc_enable_device(pdev);
+ if (ret)
+ goto out_release_attrib;
+
+ addr = ioremap(res->start, SMC_IO_EXTENT);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto out_release_attrib;
+ }
+
+ dev_set_drvdata(dev, ndev);
+ ret = smc_probe(ndev, addr);
+ if (ret != 0)
+ goto out_iounmap;
+#ifdef SMC_USE_PXA_DMA
+ else {
+ struct smc_local *lp = netdev_priv(ndev);
+ lp->physaddr = res->start;
+ }
+#endif
+
+ smc_request_datacs(pdev, ndev);
+
+ return 0;
+
+ out_iounmap:
+ dev_set_drvdata(dev, NULL);
+ iounmap(addr);
+ out_release_attrib:
+ smc_release_attrib(pdev);
+ out_free_netdev:
+ free_netdev(ndev);
+ out_release_io:
+ release_mem_region(res->start, SMC_IO_EXTENT);
+ out:
+ printk("%s: not found (%d).\n", CARDNAME, ret);
+
+ return ret;
+}
+
+static int smc_drv_remove(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct smc_local *lp = netdev_priv(ndev);
+ struct resource *res;
+
+ dev_set_drvdata(dev, NULL);
+
+ unregister_netdev(ndev);
+
+ free_irq(ndev->irq, ndev);
+
+#ifdef SMC_USE_PXA_DMA
+ if (ndev->dma != (unsigned char)-1)
+ pxa_free_dma(ndev->dma);
+#endif
+ iounmap(lp->base);
+
+ smc_release_datacs(pdev,ndev);
+ smc_release_attrib(pdev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, SMC_IO_EXTENT);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static int smc_drv_suspend(struct device *dev, u32 state, u32 level)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (ndev && level == SUSPEND_DISABLE) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ smc_shutdown(ndev);
+ smc_phy_powerdown(ndev);
+ }
+ }
+ return 0;
+}
+
+static int smc_drv_resume(struct device *dev, u32 level)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (ndev && level == RESUME_ENABLE) {
+ struct smc_local *lp = netdev_priv(ndev);
+ smc_enable_device(pdev);
+ if (netif_running(ndev)) {
+ smc_reset(ndev);
+ smc_enable(ndev);
+ if (lp->phy_type != 0)
+ smc_phy_configure(ndev);
+ netif_device_attach(ndev);
+ }
+ }
+ return 0;
+}
+
+static struct device_driver smc_driver = {
+ .name = CARDNAME,
+ .bus = &platform_bus_type,
+ .probe = smc_drv_probe,
+ .remove = smc_drv_remove,
+ .suspend = smc_drv_suspend,
+ .resume = smc_drv_resume,
+};
+
+static int __init smc_init(void)
+{
+#ifdef MODULE
+#ifdef CONFIG_ISA
+ if (io == -1)
+ printk(KERN_WARNING
+ "%s: You shouldn't use auto-probing with insmod!\n",
+ CARDNAME);
+#endif
+#endif
+
+ return driver_register(&smc_driver);
+}
+
+static void __exit smc_cleanup(void)
+{
+ driver_unregister(&smc_driver);
+}
+
+module_init(smc_init);
+module_exit(smc_cleanup);
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
new file mode 100644
index 000000000000..ddd2688e7d33
--- /dev/null
+++ b/drivers/net/smc91x.h
@@ -0,0 +1,1032 @@
+/*------------------------------------------------------------------------
+ . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device.
+ .
+ . Copyright (C) 1996 by Erik Stahlman
+ . Copyright (C) 2001 Standard Microsystems Corporation
+ . Developed by Simple Network Magic Corporation
+ . Copyright (C) 2003 Monta Vista Software, Inc.
+ . Unified SMC91x driver by Nicolas Pitre
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ .
+ . Information contained in this file was obtained from the LAN91C111
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smsc.com.
+ .
+ . Authors
+ . Erik Stahlman <erik@vt.edu>
+ . Daris A Nevil <dnevil@snmc.com>
+ . Nicolas Pitre <nico@cam.org>
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC91X_H_
+#define _SMC91X_H_
+
+
+/*
+ * Define your architecture specific bus configuration parameters here.
+ */
+
+#if defined(CONFIG_ARCH_LUBBOCK)
+
+/* We can only do 16-bit reads and writes in the static memory space. */
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+/* The first two address lines aren't connected... */
+#define SMC_IO_SHIFT 2
+
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
+#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6)
+
+/* We can only do 16-bit reads and writes in the static memory space. */
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+#define SMC_IO_SHIFT 0
+
+#define SMC_inw(a, r) in_be16((volatile u16 *)((a) + (r)))
+#define SMC_outw(v, a, r) out_be16((volatile u16 *)((a) + (r)), v)
+#define SMC_insw(a, r, p, l) \
+ do { \
+ unsigned long __port = (a) + (r); \
+ u16 *__p = (u16 *)(p); \
+ int __l = (l); \
+ insw(__port, __p, __l); \
+ while (__l > 0) { \
+ *__p = swab16(*__p); \
+ __p++; \
+ __l--; \
+ } \
+ } while (0)
+#define SMC_outsw(a, r, p, l) \
+ do { \
+ unsigned long __port = (a) + (r); \
+ u16 *__p = (u16 *)(p); \
+ int __l = (l); \
+ while (__l > 0) { \
+ /* Believe it or not, the swab isn't needed. */ \
+ outw( /* swab16 */ (*__p++), __port); \
+ __l--; \
+ } \
+ } while (0)
+#define set_irq_type(irq, type)
+
+#elif defined(CONFIG_SA1100_PLEB)
+/* We can only do 16-bit reads and writes in the static memory space. */
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) inb((a) + (r))
+#define SMC_insb(a, r, p, l) insb((a) + (r), p, (l))
+#define SMC_inw(a, r) inw((a) + (r))
+#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
+#define SMC_outb(v, a, r) outb(v, (a) + (r))
+#define SMC_outsb(a, r, p, l) outsb((a) + (r), p, (l))
+#define SMC_outw(v, a, r) outw(v, (a) + (r))
+#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
+
+#define set_irq_type(irq, type) do {} while (0)
+
+#elif defined(CONFIG_SA1100_ASSABET)
+
+#include <asm/arch/neponset.h>
+
+/* We can only do 8-bit reads and writes in the static memory space. */
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 0
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+/* The first two address lines aren't connected... */
+#define SMC_IO_SHIFT 2
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
+#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
+
+#elif defined(CONFIG_ARCH_INNOKOM) || \
+ defined(CONFIG_MACH_MAINSTONE) || \
+ defined(CONFIG_ARCH_PXA_IDP) || \
+ defined(CONFIG_ARCH_RAMSES)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+#define SMC_USE_PXA_DMA 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+/* We actually can't write halfwords properly if not word aligned */
+static inline void
+SMC_outw(u16 val, unsigned long ioaddr, int reg)
+{
+ if (reg & 2) {
+ unsigned int v = val << 16;
+ v |= readl(ioaddr + (reg & ~2)) & 0xffff;
+ writel(v, ioaddr + (reg & ~2));
+ } else {
+ writew(val, ioaddr + reg);
+ }
+}
+
+#elif defined(CONFIG_ARCH_OMAP)
+
+/* We can only do 16-bit reads and writes in the static memory space. */
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#elif defined(CONFIG_SH_SH4202_MICRODEV)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+
+#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000)
+#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000)
+#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000)
+#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000)
+#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000)
+#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000)
+#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l)
+#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l)
+#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l)
+#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l)
+
+#define set_irq_type(irq, type) do {} while(0)
+
+#elif defined(CONFIG_ISA)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+
+#define SMC_inb(a, r) inb((a) + (r))
+#define SMC_inw(a, r) inw((a) + (r))
+#define SMC_outb(v, a, r) outb(v, (a) + (r))
+#define SMC_outw(v, a, r) outw(v, (a) + (r))
+#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
+
+#elif defined(CONFIG_M32R)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+
+#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000)
+#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000)
+#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000)
+#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000)
+#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l)
+#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l)
+
+#define set_irq_type(irq, type) do {} while(0)
+
+#define RPC_LSA_DEFAULT RPC_LED_TX_RX
+#define RPC_LSB_DEFAULT RPC_LED_100_10
+
+#elif defined(CONFIG_MACH_LPD7A400) || defined(CONFIG_MACH_LPD7A404)
+
+/* The LPD7A40X_IOBARRIER is necessary to overcome a mismatch between
+ * the way that the CPU handles chip selects and the way that the SMC
+ * chip expects the chip select to operate. Refer to
+ * Documentation/arm/Sharp-LH/IOBarrier for details. The read from
+ * IOBARRIER is a byte as a least-common denominator of possible
+ * regions to use as the barrier. It would be wasteful to read 32
+ * bits from a byte oriented region.
+ *
+ * There is no explicit protection against interrupts intervening
+ * between the writew and the IOBARRIER. In SMC ISR there is a
+ * preamble that performs an IOBARRIER in the extremely unlikely event
+ * that the driver interrupts itself between a writew to the chip an
+ * the IOBARRIER that follows *and* the cache is large enough that the
+ * first off-chip access while handing the interrupt is to the SMC
+ * chip. Other devices in the same address space as the SMC chip must
+ * be aware of the potential for trouble and perform a similar
+ * IOBARRIER on entry to their ISR.
+ */
+
+#include <asm/arch/constants.h> /* IOBARRIER_VIRT */
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 0
+#define LPD7A40X_IOBARRIER readb (IOBARRIER_VIRT)
+
+#define SMC_inw(a,r) readw ((void*) ((a) + (r)))
+#define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l)
+#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; })
+
+static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
+{
+ unsigned short* ps = (unsigned short*) p;
+ while (l-- > 0) {
+ writew (*ps++, a + r);
+ LPD7A40X_IOBARRIER;
+ }
+}
+
+#define SMC_INTERRUPT_PREAMBLE LPD7A40X_IOBARRIER
+
+#define RPC_LSA_DEFAULT RPC_LED_TX_RX
+#define RPC_LSB_DEFAULT RPC_LED_100_10
+
+#else
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define RPC_LSA_DEFAULT RPC_LED_100_10
+#define RPC_LSB_DEFAULT RPC_LED_TX_RX
+
+#endif
+
+
+#ifdef SMC_USE_PXA_DMA
+/*
+ * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
+ * always happening in irq context so no need to worry about races. TX is
+ * different and probably not worth it for that reason, and not as critical
+ * as RX which can overrun memory and lose packets.
+ */
+#include <linux/dma-mapping.h>
+#include <asm/dma.h>
+#include <asm/arch/pxa-regs.h>
+
+#ifdef SMC_insl
+#undef SMC_insl
+#define SMC_insl(a, r, p, l) \
+ smc_pxa_dma_insl(a, lp->physaddr, r, dev->dma, p, l)
+static inline void
+smc_pxa_dma_insl(u_long ioaddr, u_long physaddr, int reg, int dma,
+ u_char *buf, int len)
+{
+ dma_addr_t dmabuf;
+
+ /* fallback if no DMA available */
+ if (dma == (unsigned char)-1) {
+ readsl(ioaddr + reg, buf, len);
+ return;
+ }
+
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ *((u32 *)buf) = SMC_inl(ioaddr, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ dmabuf = dma_map_single(NULL, buf, len, DMA_FROM_DEVICE);
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | (DCMD_LENGTH & len));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+ while (!(DCSR(dma) & DCSR_STOPSTATE))
+ cpu_relax();
+ DCSR(dma) = 0;
+ dma_unmap_single(NULL, dmabuf, len, DMA_FROM_DEVICE);
+}
+#endif
+
+#ifdef SMC_insw
+#undef SMC_insw
+#define SMC_insw(a, r, p, l) \
+ smc_pxa_dma_insw(a, lp->physaddr, r, dev->dma, p, l)
+static inline void
+smc_pxa_dma_insw(u_long ioaddr, u_long physaddr, int reg, int dma,
+ u_char *buf, int len)
+{
+ dma_addr_t dmabuf;
+
+ /* fallback if no DMA available */
+ if (dma == (unsigned char)-1) {
+ readsw(ioaddr + reg, buf, len);
+ return;
+ }
+
+ /* 64 bit alignment is required for memory to memory DMA */
+ while ((long)buf & 6) {
+ *((u16 *)buf) = SMC_inw(ioaddr, reg);
+ buf += 2;
+ len--;
+ }
+
+ len *= 2;
+ dmabuf = dma_map_single(NULL, buf, len, DMA_FROM_DEVICE);
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH2 | (DCMD_LENGTH & len));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+ while (!(DCSR(dma) & DCSR_STOPSTATE))
+ cpu_relax();
+ DCSR(dma) = 0;
+ dma_unmap_single(NULL, dmabuf, len, DMA_FROM_DEVICE);
+}
+#endif
+
+static void
+smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs)
+{
+ DCSR(dma) = 0;
+}
+#endif /* SMC_USE_PXA_DMA */
+
+
+/* Because of bank switching, the LAN91x uses only 16 I/O ports */
+#ifndef SMC_IO_SHIFT
+#define SMC_IO_SHIFT 0
+#endif
+#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
+#define SMC_DATA_EXTENT (4)
+
+/*
+ . Bank Select Register:
+ .
+ . yyyy yyyy 0000 00xx
+ . xx = bank number
+ . yyyy yyyy = 0x33, for identification purposes.
+*/
+#define BANK_SELECT (14 << SMC_IO_SHIFT)
+
+
+// Transmit Control Register
+/* BANK 0 */
+#define TCR_REG SMC_REG(0x0000, 0)
+#define TCR_ENABLE 0x0001 // When 1 we can transmit
+#define TCR_LOOP 0x0002 // Controls output pin LBK
+#define TCR_FORCOL 0x0004 // When 1 will force a collision
+#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0
+#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames
+#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier
+#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation
+#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error
+#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback
+#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode
+
+#define TCR_CLEAR 0 /* do NOTHING */
+/* the default settings for the TCR register : */
+#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN)
+
+
+// EPH Status Register
+/* BANK 0 */
+#define EPH_STATUS_REG SMC_REG(0x0002, 0)
+#define ES_TX_SUC 0x0001 // Last TX was successful
+#define ES_SNGL_COL 0x0002 // Single collision detected for last tx
+#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx
+#define ES_LTX_MULT 0x0008 // Last tx was a multicast
+#define ES_16COL 0x0010 // 16 Collisions Reached
+#define ES_SQET 0x0020 // Signal Quality Error Test
+#define ES_LTXBRD 0x0040 // Last tx was a broadcast
+#define ES_TXDEFR 0x0080 // Transmit Deferred
+#define ES_LATCOL 0x0200 // Late collision detected on last tx
+#define ES_LOSTCARR 0x0400 // Lost Carrier Sense
+#define ES_EXC_DEF 0x0800 // Excessive Deferral
+#define ES_CTR_ROL 0x1000 // Counter Roll Over indication
+#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin
+#define ES_TXUNRN 0x8000 // Tx Underrun
+
+
+// Receive Control Register
+/* BANK 0 */
+#define RCR_REG SMC_REG(0x0004, 0)
+#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted
+#define RCR_PRMS 0x0002 // Enable promiscuous mode
+#define RCR_ALMUL 0x0004 // When set accepts all multicast frames
+#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets
+#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets
+#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision
+#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier
+#define RCR_SOFTRST 0x8000 // resets the chip
+
+/* the normal settings for the RCR register : */
+#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN)
+#define RCR_CLEAR 0x0 // set it to a base state
+
+
+// Counter Register
+/* BANK 0 */
+#define COUNTER_REG SMC_REG(0x0006, 0)
+
+
+// Memory Information Register
+/* BANK 0 */
+#define MIR_REG SMC_REG(0x0008, 0)
+
+
+// Receive/Phy Control Register
+/* BANK 0 */
+#define RPC_REG SMC_REG(0x000A, 0)
+#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode.
+#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode
+#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode
+#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb
+#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb
+#define RPC_LED_100_10 (0x00) // LED = 100Mbps OR's with 10Mbps link detect
+#define RPC_LED_RES (0x01) // LED = Reserved
+#define RPC_LED_10 (0x02) // LED = 10Mbps link detect
+#define RPC_LED_FD (0x03) // LED = Full Duplex Mode
+#define RPC_LED_TX_RX (0x04) // LED = TX or RX packet occurred
+#define RPC_LED_100 (0x05) // LED = 100Mbps link dectect
+#define RPC_LED_TX (0x06) // LED = TX packet occurred
+#define RPC_LED_RX (0x07) // LED = RX packet occurred
+
+#ifndef RPC_LSA_DEFAULT
+#define RPC_LSA_DEFAULT RPC_LED_100
+#endif
+#ifndef RPC_LSB_DEFAULT
+#define RPC_LSB_DEFAULT RPC_LED_FD
+#endif
+
+#define RPC_DEFAULT (RPC_ANEG | (RPC_LSA_DEFAULT << RPC_LSXA_SHFT) | (RPC_LSB_DEFAULT << RPC_LSXB_SHFT) | RPC_SPEED | RPC_DPLX)
+
+
+/* Bank 0 0x0C is reserved */
+
+// Bank Select Register
+/* All Banks */
+#define BSR_REG 0x000E
+
+
+// Configuration Reg
+/* BANK 1 */
+#define CONFIG_REG SMC_REG(0x0000, 1)
+#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy
+#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL
+#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus
+#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode.
+
+// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low
+#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN)
+
+
+// Base Address Register
+/* BANK 1 */
+#define BASE_REG SMC_REG(0x0002, 1)
+
+
+// Individual Address Registers
+/* BANK 1 */
+#define ADDR0_REG SMC_REG(0x0004, 1)
+#define ADDR1_REG SMC_REG(0x0006, 1)
+#define ADDR2_REG SMC_REG(0x0008, 1)
+
+
+// General Purpose Register
+/* BANK 1 */
+#define GP_REG SMC_REG(0x000A, 1)
+
+
+// Control Register
+/* BANK 1 */
+#define CTL_REG SMC_REG(0x000C, 1)
+#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received
+#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
+#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt
+#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt
+#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt
+#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store
+#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers
+#define CTL_STORE 0x0001 // When set stores registers into EEPROM
+
+
+// MMU Command Register
+/* BANK 2 */
+#define MMU_CMD_REG SMC_REG(0x0000, 2)
+#define MC_BUSY 1 // When 1 the last release has not completed
+#define MC_NOP (0<<5) // No Op
+#define MC_ALLOC (1<<5) // OR with number of 256 byte packets
+#define MC_RESET (2<<5) // Reset MMU to initial state
+#define MC_REMOVE (3<<5) // Remove the current rx packet
+#define MC_RELEASE (4<<5) // Remove and release the current rx packet
+#define MC_FREEPKT (5<<5) // Release packet in PNR register
+#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit
+#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs
+
+
+// Packet Number Register
+/* BANK 2 */
+#define PN_REG SMC_REG(0x0002, 2)
+
+
+// Allocation Result Register
+/* BANK 2 */
+#define AR_REG SMC_REG(0x0003, 2)
+#define AR_FAILED 0x80 // Alocation Failed
+
+
+// TX FIFO Ports Register
+/* BANK 2 */
+#define TXFIFO_REG SMC_REG(0x0004, 2)
+#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty
+
+// RX FIFO Ports Register
+/* BANK 2 */
+#define RXFIFO_REG SMC_REG(0x0005, 2)
+#define RXFIFO_REMPTY 0x80 // RX FIFO Empty
+
+#define FIFO_REG SMC_REG(0x0004, 2)
+
+// Pointer Register
+/* BANK 2 */
+#define PTR_REG SMC_REG(0x0006, 2)
+#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area
+#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access
+#define PTR_READ 0x2000 // When 1 the operation is a read
+
+
+// Data Register
+/* BANK 2 */
+#define DATA_REG SMC_REG(0x0008, 2)
+
+
+// Interrupt Status/Acknowledge Register
+/* BANK 2 */
+#define INT_REG SMC_REG(0x000C, 2)
+
+
+// Interrupt Mask Register
+/* BANK 2 */
+#define IM_REG SMC_REG(0x000D, 2)
+#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt
+#define IM_ERCV_INT 0x40 // Early Receive Interrupt
+#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section
+#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns
+#define IM_ALLOC_INT 0x08 // Set when allocation request is completed
+#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty
+#define IM_TX_INT 0x02 // Transmit Interrupt
+#define IM_RCV_INT 0x01 // Receive Interrupt
+
+
+// Multicast Table Registers
+/* BANK 3 */
+#define MCAST_REG1 SMC_REG(0x0000, 3)
+#define MCAST_REG2 SMC_REG(0x0002, 3)
+#define MCAST_REG3 SMC_REG(0x0004, 3)
+#define MCAST_REG4 SMC_REG(0x0006, 3)
+
+
+// Management Interface Register (MII)
+/* BANK 3 */
+#define MII_REG SMC_REG(0x0008, 3)
+#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup
+#define MII_MDOE 0x0008 // MII Output Enable
+#define MII_MCLK 0x0004 // MII Clock, pin MDCLK
+#define MII_MDI 0x0002 // MII Input, pin MDI
+#define MII_MDO 0x0001 // MII Output, pin MDO
+
+
+// Revision Register
+/* BANK 3 */
+/* ( hi: chip id low: rev # ) */
+#define REV_REG SMC_REG(0x000A, 3)
+
+
+// Early RCV Register
+/* BANK 3 */
+/* this is NOT on SMC9192 */
+#define ERCV_REG SMC_REG(0x000C, 3)
+#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received
+#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask
+
+
+// External Register
+/* BANK 7 */
+#define EXT_REG SMC_REG(0x0000, 7)
+
+
+#define CHIP_9192 3
+#define CHIP_9194 4
+#define CHIP_9195 5
+#define CHIP_9196 6
+#define CHIP_91100 7
+#define CHIP_91100FD 8
+#define CHIP_91111FD 9
+
+static const char * chip_ids[ 16 ] = {
+ NULL, NULL, NULL,
+ /* 3 */ "SMC91C90/91C92",
+ /* 4 */ "SMC91C94",
+ /* 5 */ "SMC91C95",
+ /* 6 */ "SMC91C96",
+ /* 7 */ "SMC91C100",
+ /* 8 */ "SMC91C100FD",
+ /* 9 */ "SMC91C11xFD",
+ NULL, NULL, NULL,
+ NULL, NULL, NULL};
+
+
+/*
+ . Transmit status bits
+*/
+#define TS_SUCCESS 0x0001
+#define TS_LOSTCAR 0x0400
+#define TS_LATCOL 0x0200
+#define TS_16COL 0x0010
+
+/*
+ . Receive status bits
+*/
+#define RS_ALGNERR 0x8000
+#define RS_BRODCAST 0x4000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+
+/*
+ * PHY IDs
+ * LAN83C183 == LAN91C111 Internal PHY
+ */
+#define PHY_LAN83C183 0x0016f840
+#define PHY_LAN83C180 0x02821c50
+
+/*
+ * PHY Register Addresses (LAN91C111 Internal PHY)
+ *
+ * Generic PHY registers can be found in <linux/mii.h>
+ *
+ * These phy registers are specific to our on-board phy.
+ */
+
+// PHY Configuration Register 1
+#define PHY_CFG1_REG 0x10
+#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled
+#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled
+#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down
+#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler
+#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable
+#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled
+#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm)
+#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db
+#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust
+#define PHY_CFG1_TLVL_MASK 0x003C
+#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time
+
+
+// PHY Configuration Register 2
+#define PHY_CFG2_REG 0x11
+#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled
+#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled
+#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt)
+#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo
+
+// PHY Status Output (and Interrupt status) Register
+#define PHY_INT_REG 0x12 // Status Output (Interrupt Status)
+#define PHY_INT_INT 0x8000 // 1=bits have changed since last read
+#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected
+#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync
+#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx
+#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx
+#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx
+#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected
+#define PHY_INT_JAB 0x0100 // 1=Jabber detected
+#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode
+#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex
+
+// PHY Interrupt/Status Mask Register
+#define PHY_MASK_REG 0x13 // Interrupt Mask
+// Uses the same bit definitions as PHY_INT_REG
+
+
+/*
+ * SMC91C96 ethernet config and status registers.
+ * These are in the "attribute" space.
+ */
+#define ECOR 0x8000
+#define ECOR_RESET 0x80
+#define ECOR_LEVEL_IRQ 0x40
+#define ECOR_WR_ATTRIB 0x04
+#define ECOR_ENABLE 0x01
+
+#define ECSR 0x8002
+#define ECSR_IOIS8 0x20
+#define ECSR_PWRDWN 0x04
+#define ECSR_INT 0x02
+
+#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT)
+
+
+/*
+ * Macros to abstract register access according to the data bus
+ * capabilities. Please use those and not the in/out primitives.
+ * Note: the following macros do *not* select the bank -- this must
+ * be done separately as needed in the main code. The SMC_REG() macro
+ * only uses the bank argument for debugging purposes (when enabled).
+ */
+
+#if SMC_DEBUG > 0
+#define SMC_REG(reg, bank) \
+ ({ \
+ int __b = SMC_CURRENT_BANK(); \
+ if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
+ printk( "%s: bank reg screwed (0x%04x)\n", \
+ CARDNAME, __b ); \
+ BUG(); \
+ } \
+ reg<<SMC_IO_SHIFT; \
+ })
+#else
+#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT)
+#endif
+
+#if SMC_CAN_USE_8BIT
+#define SMC_GET_PN() SMC_inb( ioaddr, PN_REG )
+#define SMC_SET_PN(x) SMC_outb( x, ioaddr, PN_REG )
+#define SMC_GET_AR() SMC_inb( ioaddr, AR_REG )
+#define SMC_GET_TXFIFO() SMC_inb( ioaddr, TXFIFO_REG )
+#define SMC_GET_RXFIFO() SMC_inb( ioaddr, RXFIFO_REG )
+#define SMC_GET_INT() SMC_inb( ioaddr, INT_REG )
+#define SMC_ACK_INT(x) SMC_outb( x, ioaddr, INT_REG )
+#define SMC_GET_INT_MASK() SMC_inb( ioaddr, IM_REG )
+#define SMC_SET_INT_MASK(x) SMC_outb( x, ioaddr, IM_REG )
+#else
+#define SMC_GET_PN() (SMC_inw( ioaddr, PN_REG ) & 0xFF)
+#define SMC_SET_PN(x) SMC_outw( x, ioaddr, PN_REG )
+#define SMC_GET_AR() (SMC_inw( ioaddr, PN_REG ) >> 8)
+#define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF)
+#define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8)
+#define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF)
+#define SMC_ACK_INT(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \
+ SMC_outw( __mask | (x), ioaddr, INT_REG ); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8)
+#define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG )
+#endif
+
+#define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT )
+#define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT )
+#define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG )
+#define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG )
+#define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG )
+#define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG )
+#define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG )
+#define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG )
+#define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG )
+#define SMC_GET_MII() SMC_inw( ioaddr, MII_REG )
+#define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG )
+#define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG )
+#define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG )
+#define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG )
+#define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG )
+#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG )
+#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG )
+#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG )
+#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG )
+#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG )
+#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG )
+#define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG )
+#define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG )
+#define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG )
+#define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG )
+
+#ifndef SMC_GET_MAC_ADDR
+#define SMC_GET_MAC_ADDR(addr) \
+ do { \
+ unsigned int __v; \
+ __v = SMC_inw( ioaddr, ADDR0_REG ); \
+ addr[0] = __v; addr[1] = __v >> 8; \
+ __v = SMC_inw( ioaddr, ADDR1_REG ); \
+ addr[2] = __v; addr[3] = __v >> 8; \
+ __v = SMC_inw( ioaddr, ADDR2_REG ); \
+ addr[4] = __v; addr[5] = __v >> 8; \
+ } while (0)
+#endif
+
+#define SMC_SET_MAC_ADDR(addr) \
+ do { \
+ SMC_outw( addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG ); \
+ SMC_outw( addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG ); \
+ SMC_outw( addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG ); \
+ } while (0)
+
+#define SMC_SET_MCAST(x) \
+ do { \
+ const unsigned char *mt = (x); \
+ SMC_outw( mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1 ); \
+ SMC_outw( mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2 ); \
+ SMC_outw( mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3 ); \
+ SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \
+ } while (0)
+
+#if SMC_CAN_USE_32BIT
+/*
+ * Some setups just can't write 8 or 16 bits reliably when not aligned
+ * to a 32 bit boundary. I tell you that exists!
+ * We re-do the ones here that can be easily worked around if they can have
+ * their low parts written to 0 without adverse effects.
+ */
+#undef SMC_SELECT_BANK
+#define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<<SMC_IO_SHIFT )
+#undef SMC_SET_RPC
+#define SMC_SET_RPC(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(8, 0) )
+#undef SMC_SET_PN
+#define SMC_SET_PN(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(0, 2) )
+#undef SMC_SET_PTR
+#define SMC_SET_PTR(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(4, 2) )
+#endif
+
+#if SMC_CAN_USE_32BIT
+#define SMC_PUT_PKT_HDR(status, length) \
+ SMC_outl( (status) | (length) << 16, ioaddr, DATA_REG )
+#define SMC_GET_PKT_HDR(status, length) \
+ do { \
+ unsigned int __val = SMC_inl( ioaddr, DATA_REG ); \
+ (status) = __val & 0xffff; \
+ (length) = __val >> 16; \
+ } while (0)
+#else
+#define SMC_PUT_PKT_HDR(status, length) \
+ do { \
+ SMC_outw( status, ioaddr, DATA_REG ); \
+ SMC_outw( length, ioaddr, DATA_REG ); \
+ } while (0)
+#define SMC_GET_PKT_HDR(status, length) \
+ do { \
+ (status) = SMC_inw( ioaddr, DATA_REG ); \
+ (length) = SMC_inw( ioaddr, DATA_REG ); \
+ } while (0)
+#endif
+
+#if SMC_CAN_USE_32BIT
+#define _SMC_PUSH_DATA(p, l) \
+ do { \
+ char *__ptr = (p); \
+ int __len = (l); \
+ if (__len >= 2 && (unsigned long)__ptr & 2) { \
+ __len -= 2; \
+ SMC_outw( *(u16 *)__ptr, ioaddr, DATA_REG ); \
+ __ptr += 2; \
+ } \
+ SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \
+ if (__len & 2) { \
+ __ptr += (__len & ~3); \
+ SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
+ } \
+ } while (0)
+#define _SMC_PULL_DATA(p, l) \
+ do { \
+ char *__ptr = (p); \
+ int __len = (l); \
+ if ((unsigned long)__ptr & 2) { \
+ /* \
+ * We want 32bit alignment here. \
+ * Since some buses perform a full 32bit \
+ * fetch even for 16bit data we can't use \
+ * SMC_inw() here. Back both source (on chip \
+ * and destination) pointers of 2 bytes. \
+ */ \
+ __ptr -= 2; \
+ __len += 2; \
+ SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
+ } \
+ __len += 2; \
+ SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \
+ } while (0)
+#elif SMC_CAN_USE_16BIT
+#define _SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 )
+#define _SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 )
+#elif SMC_CAN_USE_8BIT
+#define _SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l )
+#define _SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l )
+#endif
+
+#if ! SMC_CAN_USE_16BIT
+#define SMC_outw(x, ioaddr, reg) \
+ do { \
+ unsigned int __val16 = (x); \
+ SMC_outb( __val16, ioaddr, reg ); \
+ SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+ } while (0)
+#define SMC_inw(ioaddr, reg) \
+ ({ \
+ unsigned int __val16; \
+ __val16 = SMC_inb( ioaddr, reg ); \
+ __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
+ __val16; \
+ })
+#endif
+
+#if SMC_CAN_USE_DATACS
+#define SMC_PUSH_DATA(p, l) \
+ if ( lp->datacs ) { \
+ unsigned char *__ptr = (p); \
+ int __len = (l); \
+ if (__len >= 2 && (unsigned long)__ptr & 2) { \
+ __len -= 2; \
+ SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
+ __ptr += 2; \
+ } \
+ outsl(lp->datacs, __ptr, __len >> 2); \
+ if (__len & 2) { \
+ __ptr += (__len & ~3); \
+ SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
+ } \
+ } else { \
+ _SMC_PUSH_DATA(p, l); \
+ }
+
+#define SMC_PULL_DATA(p, l) \
+ if ( lp->datacs ) { \
+ unsigned char *__ptr = (p); \
+ int __len = (l); \
+ if ((unsigned long)__ptr & 2) { \
+ /* \
+ * We want 32bit alignment here. \
+ * Since some buses perform a full 32bit \
+ * fetch even for 16bit data we can't use \
+ * SMC_inw() here. Back both source (on chip \
+ * and destination) pointers of 2 bytes. \
+ */ \
+ __ptr -= 2; \
+ __len += 2; \
+ SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
+ } \
+ __len += 2; \
+ insl( lp->datacs, __ptr, __len >> 2); \
+ } else { \
+ _SMC_PULL_DATA(p, l); \
+ }
+#else
+#define SMC_PUSH_DATA(p, l) _SMC_PUSH_DATA(p, l)
+#define SMC_PULL_DATA(p, l) _SMC_PULL_DATA(p, l)
+#endif
+
+#if !defined (SMC_INTERRUPT_PREAMBLE)
+# define SMC_INTERRUPT_PREAMBLE
+#endif
+
+#endif /* _SMC91X_H_ */
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
new file mode 100644
index 000000000000..cdc9cc873e06
--- /dev/null
+++ b/drivers/net/sonic.c
@@ -0,0 +1,616 @@
+/*
+ * sonic.c
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * Core code included by system sonic drivers
+ */
+
+/*
+ * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
+ * National Semiconductors data sheet for the DP83932B Sonic Ethernet
+ * controller, and the files "8390.c" and "skeleton.c" in this directory.
+ */
+
+
+
+/*
+ * Open/initialize the SONIC controller.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int sonic_open(struct net_device *dev)
+{
+ if (sonic_debug > 2)
+ printk("sonic_open: initializing sonic driver.\n");
+
+ /*
+ * We don't need to deal with auto-irq stuff since we
+ * hardwire the sonic interrupt.
+ */
+/*
+ * XXX Horrible work around: We install sonic_interrupt as fast interrupt.
+ * This means that during execution of the handler interrupt are disabled
+ * covering another bug otherwise corrupting data. This doesn't mean
+ * this glue works ok under all situations.
+ */
+// if (sonic_request_irq(dev->irq, &sonic_interrupt, 0, "sonic", dev)) {
+ if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT,
+ "sonic", dev)) {
+ printk("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ /*
+ * Initialize the SONIC
+ */
+ sonic_init(dev);
+
+ netif_start_queue(dev);
+
+ if (sonic_debug > 2)
+ printk("sonic_open: Initialization done.\n");
+
+ return 0;
+}
+
+
+/*
+ * Close the SONIC device
+ */
+static int sonic_close(struct net_device *dev)
+{
+ unsigned int base_addr = dev->base_addr;
+
+ if (sonic_debug > 2)
+ printk("sonic_close\n");
+
+ netif_stop_queue(dev);
+
+ /*
+ * stop the SONIC, disable interrupts
+ */
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ sonic_free_irq(dev->irq, dev); /* release the IRQ */
+
+ return 0;
+}
+
+static void sonic_tx_timeout(struct net_device *dev)
+{
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ printk("%s: transmit timed out.\n", dev->name);
+
+ /* Try to restart the adaptor. */
+ sonic_init(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * transmit packet
+ */
+static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int base_addr = dev->base_addr;
+ unsigned int laddr;
+ int entry, length;
+
+ netif_stop_queue(dev);
+
+ if (sonic_debug > 2)
+ printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
+
+ /*
+ * Map the packet data into the logical DMA address space
+ */
+ if ((laddr = vdma_alloc(CPHYSADDR(skb->data), skb->len)) == ~0UL) {
+ printk("%s: no VDMA entry for transmit available.\n",
+ dev->name);
+ dev_kfree_skb(skb);
+ netif_start_queue(dev);
+ return 1;
+ }
+ entry = lp->cur_tx & SONIC_TDS_MASK;
+ lp->tx_laddr[entry] = laddr;
+ lp->tx_skb[entry] = skb;
+
+ length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ flush_cache_all();
+
+ /*
+ * Setup the transmit descriptor and issue the transmit command.
+ */
+ lp->tda[entry].tx_status = 0; /* clear status */
+ lp->tda[entry].tx_frag_count = 1; /* single fragment */
+ lp->tda[entry].tx_pktsize = length; /* length of packet */
+ lp->tda[entry].tx_frag_ptr_l = laddr & 0xffff;
+ lp->tda[entry].tx_frag_ptr_h = laddr >> 16;
+ lp->tda[entry].tx_frag_size = length;
+ lp->cur_tx++;
+ lp->stats.tx_bytes += length;
+
+ if (sonic_debug > 2)
+ printk("sonic_send_packet: issueing Tx command\n");
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+
+ dev->trans_start = jiffies;
+
+ if (lp->cur_tx < lp->dirty_tx + SONIC_NUM_TDS)
+ netif_start_queue(dev);
+ else
+ lp->tx_full = 1;
+
+ return 0;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ unsigned int base_addr = dev->base_addr;
+ struct sonic_local *lp;
+ int status;
+
+ if (dev == NULL) {
+ printk("sonic_interrupt: irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ lp = (struct sonic_local *) dev->priv;
+
+ status = SONIC_READ(SONIC_ISR);
+ SONIC_WRITE(SONIC_ISR, 0x7fff); /* clear all bits */
+
+ if (sonic_debug > 2)
+ printk("sonic_interrupt: ISR=%x\n", status);
+
+ if (status & SONIC_INT_PKTRX) {
+ sonic_rx(dev); /* got packet(s) */
+ }
+
+ if (status & SONIC_INT_TXDN) {
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & SONIC_TDS_MASK;
+ int status = lp->tda[entry].tx_status;
+
+ if (sonic_debug > 3)
+ printk
+ ("sonic_interrupt: status %d, cur_tx %d, dirty_tx %d\n",
+ status, lp->cur_tx, lp->dirty_tx);
+
+ if (status == 0) {
+ /* It still hasn't been Txed, kick the sonic again */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ break;
+ }
+
+ /* put back EOL and free descriptor */
+ lp->tda[entry].tx_frag_count = 0;
+ lp->tda[entry].tx_status = 0;
+
+ if (status & 0x0001)
+ lp->stats.tx_packets++;
+ else {
+ lp->stats.tx_errors++;
+ if (status & 0x0642)
+ lp->stats.tx_aborted_errors++;
+ if (status & 0x0180)
+ lp->stats.tx_carrier_errors++;
+ if (status & 0x0020)
+ lp->stats.tx_window_errors++;
+ if (status & 0x0004)
+ lp->stats.tx_fifo_errors++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skb[entry]) {
+ dev_kfree_skb_irq(lp->tx_skb[entry]);
+ lp->tx_skb[entry] = 0;
+ }
+ /* and the VDMA address */
+ vdma_free(lp->tx_laddr[entry]);
+ dirty_tx++;
+ }
+
+ if (lp->tx_full
+ && dirty_tx + SONIC_NUM_TDS > lp->cur_tx + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /*
+ * check error conditions
+ */
+ if (status & SONIC_INT_RFO) {
+ printk("%s: receive fifo underrun\n", dev->name);
+ lp->stats.rx_fifo_errors++;
+ }
+ if (status & SONIC_INT_RDE) {
+ printk("%s: receive descriptors exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
+ }
+ if (status & SONIC_INT_RBE) {
+ printk("%s: receive buffer exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
+ }
+ if (status & SONIC_INT_RBAE) {
+ printk("%s: receive buffer area exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
+ }
+
+ /* counter overruns; all counters are 16bit wide */
+ if (status & SONIC_INT_FAE)
+ lp->stats.rx_frame_errors += 65536;
+ if (status & SONIC_INT_CRC)
+ lp->stats.rx_crc_errors += 65536;
+ if (status & SONIC_INT_MP)
+ lp->stats.rx_missed_errors += 65536;
+
+ /* transmit error */
+ if (status & SONIC_INT_TXER)
+ lp->stats.tx_errors++;
+
+ /*
+ * clear interrupt bits and return
+ */
+ SONIC_WRITE(SONIC_ISR, status);
+ return IRQ_HANDLED;
+}
+
+/*
+ * We have a good packet(s), get it/them out of the buffers.
+ */
+static void sonic_rx(struct net_device *dev)
+{
+ unsigned int base_addr = dev->base_addr;
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ sonic_rd_t *rd = &lp->rda[lp->cur_rx & SONIC_RDS_MASK];
+ int status;
+
+ while (rd->in_use == 0) {
+ struct sk_buff *skb;
+ int pkt_len;
+ unsigned char *pkt_ptr;
+
+ status = rd->rx_status;
+ if (sonic_debug > 3)
+ printk("status %x, cur_rx %d, cur_rra %x\n",
+ status, lp->cur_rx, lp->cur_rra);
+ if (status & SONIC_RCR_PRX) {
+ pkt_len = rd->rx_pktlen;
+ pkt_ptr =
+ (char *)
+ sonic_chiptomem((rd->rx_pktptr_h << 16) +
+ rd->rx_pktptr_l);
+
+ if (sonic_debug > 3)
+ printk
+ ("pktptr %p (rba %p) h:%x l:%x, bsize h:%x l:%x\n",
+ pkt_ptr, lp->rba, rd->rx_pktptr_h,
+ rd->rx_pktptr_l,
+ SONIC_READ(SONIC_RBWC1),
+ SONIC_READ(SONIC_RBWC0));
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ printk
+ ("%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ eth_copy_and_sum(skb, pkt_ptr, pkt_len, 0);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb); /* pass the packet to upper layers */
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ } else {
+ /* This should only happen, if we enable accepting broken packets. */
+ lp->stats.rx_errors++;
+ if (status & SONIC_RCR_FAER)
+ lp->stats.rx_frame_errors++;
+ if (status & SONIC_RCR_CRCR)
+ lp->stats.rx_crc_errors++;
+ }
+
+ rd->in_use = 1;
+ rd = &lp->rda[(++lp->cur_rx) & SONIC_RDS_MASK];
+ /* now give back the buffer to the receive buffer area */
+ if (status & SONIC_RCR_LPKT) {
+ /*
+ * this was the last packet out of the current receice buffer
+ * give the buffer back to the SONIC
+ */
+ lp->cur_rra += sizeof(sonic_rr_t);
+ if (lp->cur_rra >
+ (lp->rra_laddr +
+ (SONIC_NUM_RRS -
+ 1) * sizeof(sonic_rr_t))) lp->cur_rra =
+ lp->rra_laddr;
+ SONIC_WRITE(SONIC_RWP, lp->cur_rra & 0xffff);
+ } else
+ printk
+ ("%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
+ dev->name);
+ }
+ /*
+ * If any worth-while packets have been received, dev_rint()
+ * has done a mark_bh(NET_BH) for us and will work on them
+ * when we get to the bottom-half routine.
+ */
+}
+
+
+/*
+ * Get the current statistics.
+ * This may be called with the device open or closed.
+ */
+static struct net_device_stats *sonic_get_stats(struct net_device *dev)
+{
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int base_addr = dev->base_addr;
+
+ /* read the tally counter from the SONIC and reset them */
+ lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
+ SONIC_WRITE(SONIC_CRCT, 0xffff);
+ lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
+ SONIC_WRITE(SONIC_FAET, 0xffff);
+ lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
+ SONIC_WRITE(SONIC_MPT, 0xffff);
+
+ return &lp->stats;
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void sonic_multicast_list(struct net_device *dev)
+{
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int base_addr = dev->base_addr;
+ unsigned int rcr;
+ struct dev_mc_list *dmi = dev->mc_list;
+ unsigned char *addr;
+ int i;
+
+ rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
+ rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
+
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ rcr |= SONIC_RCR_PRO;
+ } else {
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
+ rcr |= SONIC_RCR_AMC;
+ } else {
+ if (sonic_debug > 2)
+ printk
+ ("sonic_multicast_list: mc_count %d\n",
+ dev->mc_count);
+ lp->cda.cam_enable = 1; /* always enable our own address */
+ for (i = 1; i <= dev->mc_count; i++) {
+ addr = dmi->dmi_addr;
+ dmi = dmi->next;
+ lp->cda.cam_desc[i].cam_cap0 =
+ addr[1] << 8 | addr[0];
+ lp->cda.cam_desc[i].cam_cap1 =
+ addr[3] << 8 | addr[2];
+ lp->cda.cam_desc[i].cam_cap2 =
+ addr[5] << 8 | addr[4];
+ lp->cda.cam_enable |= (1 << i);
+ }
+ SONIC_WRITE(SONIC_CDC, 16);
+ /* issue Load CAM command */
+ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ }
+ }
+
+ if (sonic_debug > 2)
+ printk("sonic_multicast_list: setting RCR=%x\n", rcr);
+
+ SONIC_WRITE(SONIC_RCR, rcr);
+}
+
+
+/*
+ * Initialize the SONIC ethernet controller.
+ */
+static int sonic_init(struct net_device *dev)
+{
+ unsigned int base_addr = dev->base_addr;
+ unsigned int cmd;
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int rra_start;
+ unsigned int rra_end;
+ int i;
+
+ /*
+ * put the Sonic into software-reset mode and
+ * disable all interrupts
+ */
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ /*
+ * clear software reset flag, disable receiver, clear and
+ * enable interrupts, then completely initialize the SONIC
+ */
+ SONIC_WRITE(SONIC_CMD, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+
+ /*
+ * initialize the receive resource area
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize receive resource area\n");
+
+ rra_start = lp->rra_laddr & 0xffff;
+ rra_end =
+ (rra_start + (SONIC_NUM_RRS * sizeof(sonic_rr_t))) & 0xffff;
+
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ lp->rra[i].rx_bufadr_l =
+ (lp->rba_laddr + i * SONIC_RBSIZE) & 0xffff;
+ lp->rra[i].rx_bufadr_h =
+ (lp->rba_laddr + i * SONIC_RBSIZE) >> 16;
+ lp->rra[i].rx_bufsize_l = SONIC_RBSIZE >> 1;
+ lp->rra[i].rx_bufsize_h = 0;
+ }
+
+ /* initialize all RRA registers */
+ SONIC_WRITE(SONIC_RSA, rra_start);
+ SONIC_WRITE(SONIC_REA, rra_end);
+ SONIC_WRITE(SONIC_RRP, rra_start);
+ SONIC_WRITE(SONIC_RWP, rra_end);
+ SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
+ SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE - 2) >> 1);
+
+ lp->cur_rra =
+ lp->rra_laddr + (SONIC_NUM_RRS - 1) * sizeof(sonic_rr_t);
+
+ /* load the resource pointers */
+ if (sonic_debug > 3)
+ printk("sonic_init: issueing RRRA command\n");
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
+ i = 0;
+ while (i++ < 100) {
+ if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
+ break;
+ }
+
+ if (sonic_debug > 2)
+ printk("sonic_init: status=%x\n", SONIC_READ(SONIC_CMD));
+
+ /*
+ * Initialize the receive descriptors so that they
+ * become a circular linked list, ie. let the last
+ * descriptor point to the first again.
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize receive descriptors\n");
+ for (i = 0; i < SONIC_NUM_RDS; i++) {
+ lp->rda[i].rx_status = 0;
+ lp->rda[i].rx_pktlen = 0;
+ lp->rda[i].rx_pktptr_l = 0;
+ lp->rda[i].rx_pktptr_h = 0;
+ lp->rda[i].rx_seqno = 0;
+ lp->rda[i].in_use = 1;
+ lp->rda[i].link =
+ lp->rda_laddr + (i + 1) * sizeof(sonic_rd_t);
+ }
+ /* fix last descriptor */
+ lp->rda[SONIC_NUM_RDS - 1].link = lp->rda_laddr;
+ lp->cur_rx = 0;
+ SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
+ SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
+
+ /*
+ * initialize transmit descriptors
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize transmit descriptors\n");
+ for (i = 0; i < SONIC_NUM_TDS; i++) {
+ lp->tda[i].tx_status = 0;
+ lp->tda[i].tx_config = 0;
+ lp->tda[i].tx_pktsize = 0;
+ lp->tda[i].tx_frag_count = 0;
+ lp->tda[i].link =
+ (lp->tda_laddr +
+ (i + 1) * sizeof(sonic_td_t)) | SONIC_END_OF_LINKS;
+ }
+ lp->tda[SONIC_NUM_TDS - 1].link =
+ (lp->tda_laddr & 0xffff) | SONIC_END_OF_LINKS;
+
+ SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
+ SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
+ lp->cur_tx = lp->dirty_tx = 0;
+
+ /*
+ * put our own address to CAM desc[0]
+ */
+ lp->cda.cam_desc[0].cam_cap0 =
+ dev->dev_addr[1] << 8 | dev->dev_addr[0];
+ lp->cda.cam_desc[0].cam_cap1 =
+ dev->dev_addr[3] << 8 | dev->dev_addr[2];
+ lp->cda.cam_desc[0].cam_cap2 =
+ dev->dev_addr[5] << 8 | dev->dev_addr[4];
+ lp->cda.cam_enable = 1;
+
+ for (i = 0; i < 16; i++)
+ lp->cda.cam_desc[i].cam_entry_pointer = i;
+
+ /*
+ * initialize CAM registers
+ */
+ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+ SONIC_WRITE(SONIC_CDC, 16);
+
+ /*
+ * load the CAM
+ */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+
+ i = 0;
+ while (i++ < 100) {
+ if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
+ break;
+ }
+ if (sonic_debug > 2) {
+ printk("sonic_init: CMD=%x, ISR=%x\n",
+ SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR));
+ }
+
+ /*
+ * enable receiver, disable loopback
+ * and enable all interrupts
+ */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
+ SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
+ SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
+
+ cmd = SONIC_READ(SONIC_CMD);
+ if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
+ printk("sonic_init: failed, status=%x\n", cmd);
+
+ if (sonic_debug > 2)
+ printk("sonic_init: new status=%x\n",
+ SONIC_READ(SONIC_CMD));
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sonic.h b/drivers/net/sonic.h
new file mode 100644
index 000000000000..c4a6d58e4afb
--- /dev/null
+++ b/drivers/net/sonic.h
@@ -0,0 +1,483 @@
+/*
+ * Helpfile for sonic.c
+ *
+ * (C) Waldorf Electronics, Germany
+ * Written by Andreas Busse
+ *
+ * NOTE: most of the structure definitions here are endian dependent.
+ * If you want to use this driver on big endian machines, the data
+ * and pad structure members must be exchanged. Also, the structures
+ * need to be changed accordingly to the bus size.
+ *
+ * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian),
+ * see CONFIG_MACSONIC branch below.
+ *
+ */
+#ifndef SONIC_H
+#define SONIC_H
+
+#include <linux/config.h>
+
+/*
+ * SONIC register offsets
+ */
+
+#define SONIC_CMD 0x00
+#define SONIC_DCR 0x01
+#define SONIC_RCR 0x02
+#define SONIC_TCR 0x03
+#define SONIC_IMR 0x04
+#define SONIC_ISR 0x05
+
+#define SONIC_UTDA 0x06
+#define SONIC_CTDA 0x07
+
+#define SONIC_URDA 0x0d
+#define SONIC_CRDA 0x0e
+#define SONIC_EOBC 0x13
+#define SONIC_URRA 0x14
+#define SONIC_RSA 0x15
+#define SONIC_REA 0x16
+#define SONIC_RRP 0x17
+#define SONIC_RWP 0x18
+#define SONIC_RSC 0x2b
+
+#define SONIC_CEP 0x21
+#define SONIC_CAP2 0x22
+#define SONIC_CAP1 0x23
+#define SONIC_CAP0 0x24
+#define SONIC_CE 0x25
+#define SONIC_CDP 0x26
+#define SONIC_CDC 0x27
+
+#define SONIC_WT0 0x29
+#define SONIC_WT1 0x2a
+
+#define SONIC_SR 0x28
+
+
+/* test-only registers */
+
+#define SONIC_TPS 0x08
+#define SONIC_TFC 0x09
+#define SONIC_TSA0 0x0a
+#define SONIC_TSA1 0x0b
+#define SONIC_TFS 0x0c
+
+#define SONIC_CRBA0 0x0f
+#define SONIC_CRBA1 0x10
+#define SONIC_RBWC0 0x11
+#define SONIC_RBWC1 0x12
+#define SONIC_TTDA 0x20
+#define SONIC_MDT 0x2f
+
+#define SONIC_TRBA0 0x19
+#define SONIC_TRBA1 0x1a
+#define SONIC_TBWC0 0x1b
+#define SONIC_TBWC1 0x1c
+#define SONIC_LLFA 0x1f
+
+#define SONIC_ADDR0 0x1d
+#define SONIC_ADDR1 0x1e
+
+/*
+ * Error counters
+ */
+#define SONIC_CRCT 0x2c
+#define SONIC_FAET 0x2d
+#define SONIC_MPT 0x2e
+
+#define SONIC_DCR2 0x3f
+
+/*
+ * SONIC command bits
+ */
+
+#define SONIC_CR_LCAM 0x0200
+#define SONIC_CR_RRRA 0x0100
+#define SONIC_CR_RST 0x0080
+#define SONIC_CR_ST 0x0020
+#define SONIC_CR_STP 0x0010
+#define SONIC_CR_RXEN 0x0008
+#define SONIC_CR_RXDIS 0x0004
+#define SONIC_CR_TXP 0x0002
+#define SONIC_CR_HTX 0x0001
+
+/*
+ * SONIC data configuration bits
+ */
+
+#define SONIC_DCR_EXBUS 0x8000
+#define SONIC_DCR_LBR 0x2000
+#define SONIC_DCR_PO1 0x1000
+#define SONIC_DCR_PO0 0x0800
+#define SONIC_DCR_SBUS 0x0400
+#define SONIC_DCR_USR1 0x0200
+#define SONIC_DCR_USR0 0x0100
+#define SONIC_DCR_WC1 0x0080
+#define SONIC_DCR_WC0 0x0040
+#define SONIC_DCR_DW 0x0020
+#define SONIC_DCR_BMS 0x0010
+#define SONIC_DCR_RFT1 0x0008
+#define SONIC_DCR_RFT0 0x0004
+#define SONIC_DCR_TFT1 0x0002
+#define SONIC_DCR_TFT0 0x0001
+
+/*
+ * Constants for the SONIC receive control register.
+ */
+
+#define SONIC_RCR_ERR 0x8000
+#define SONIC_RCR_RNT 0x4000
+#define SONIC_RCR_BRD 0x2000
+#define SONIC_RCR_PRO 0x1000
+#define SONIC_RCR_AMC 0x0800
+#define SONIC_RCR_LB1 0x0400
+#define SONIC_RCR_LB0 0x0200
+
+#define SONIC_RCR_MC 0x0100
+#define SONIC_RCR_BC 0x0080
+#define SONIC_RCR_LPKT 0x0040
+#define SONIC_RCR_CRS 0x0020
+#define SONIC_RCR_COL 0x0010
+#define SONIC_RCR_CRCR 0x0008
+#define SONIC_RCR_FAER 0x0004
+#define SONIC_RCR_LBK 0x0002
+#define SONIC_RCR_PRX 0x0001
+
+#define SONIC_RCR_LB_OFF 0
+#define SONIC_RCR_LB_MAC SONIC_RCR_LB0
+#define SONIC_RCR_LB_ENDEC SONIC_RCR_LB1
+#define SONIC_RCR_LB_TRANS (SONIC_RCR_LB0 | SONIC_RCR_LB1)
+
+/* default RCR setup */
+
+#define SONIC_RCR_DEFAULT (SONIC_RCR_BRD)
+
+
+/*
+ * SONIC Transmit Control register bits
+ */
+
+#define SONIC_TCR_PINTR 0x8000
+#define SONIC_TCR_POWC 0x4000
+#define SONIC_TCR_CRCI 0x2000
+#define SONIC_TCR_EXDIS 0x1000
+#define SONIC_TCR_EXD 0x0400
+#define SONIC_TCR_DEF 0x0200
+#define SONIC_TCR_NCRS 0x0100
+#define SONIC_TCR_CRLS 0x0080
+#define SONIC_TCR_EXC 0x0040
+#define SONIC_TCR_PMB 0x0008
+#define SONIC_TCR_FU 0x0004
+#define SONIC_TCR_BCM 0x0002
+#define SONIC_TCR_PTX 0x0001
+
+#define SONIC_TCR_DEFAULT 0x0000
+
+/*
+ * Constants for the SONIC_INTERRUPT_MASK and
+ * SONIC_INTERRUPT_STATUS registers.
+ */
+
+#define SONIC_INT_BR 0x4000
+#define SONIC_INT_HBL 0x2000
+#define SONIC_INT_LCD 0x1000
+#define SONIC_INT_PINT 0x0800
+#define SONIC_INT_PKTRX 0x0400
+#define SONIC_INT_TXDN 0x0200
+#define SONIC_INT_TXER 0x0100
+#define SONIC_INT_TC 0x0080
+#define SONIC_INT_RDE 0x0040
+#define SONIC_INT_RBE 0x0020
+#define SONIC_INT_RBAE 0x0010
+#define SONIC_INT_CRC 0x0008
+#define SONIC_INT_FAE 0x0004
+#define SONIC_INT_MP 0x0002
+#define SONIC_INT_RFO 0x0001
+
+
+/*
+ * The interrupts we allow.
+ */
+
+#define SONIC_IMR_DEFAULT (SONIC_INT_BR | \
+ SONIC_INT_LCD | \
+ SONIC_INT_PINT | \
+ SONIC_INT_PKTRX | \
+ SONIC_INT_TXDN | \
+ SONIC_INT_TXER | \
+ SONIC_INT_RDE | \
+ SONIC_INT_RBE | \
+ SONIC_INT_RBAE | \
+ SONIC_INT_CRC | \
+ SONIC_INT_FAE | \
+ SONIC_INT_MP)
+
+
+#define SONIC_END_OF_LINKS 0x0001
+
+
+#ifdef CONFIG_MACSONIC
+/*
+ * Big endian like structures on 680x0 Macs
+ */
+
+typedef struct {
+ u32 rx_bufadr_l; /* receive buffer ptr */
+ u32 rx_bufadr_h;
+
+ u32 rx_bufsize_l; /* no. of words in the receive buffer */
+ u32 rx_bufsize_h;
+} sonic_rr_t;
+
+/*
+ * Sonic receive descriptor. Receive descriptors are
+ * kept in a linked list of these structures.
+ */
+
+typedef struct {
+ SREGS_PAD(pad0);
+ u16 rx_status; /* status after reception of a packet */
+ SREGS_PAD(pad1);
+ u16 rx_pktlen; /* length of the packet incl. CRC */
+
+ /*
+ * Pointers to the location in the receive buffer area (RBA)
+ * where the packet resides. A packet is always received into
+ * a contiguous piece of memory.
+ */
+ SREGS_PAD(pad2);
+ u16 rx_pktptr_l;
+ SREGS_PAD(pad3);
+ u16 rx_pktptr_h;
+
+ SREGS_PAD(pad4);
+ u16 rx_seqno; /* sequence no. */
+
+ SREGS_PAD(pad5);
+ u16 link; /* link to next RDD (end if EOL bit set) */
+
+ /*
+ * Owner of this descriptor, 0= driver, 1=sonic
+ */
+
+ SREGS_PAD(pad6);
+ u16 in_use;
+
+ caddr_t rda_next; /* pointer to next RD */
+} sonic_rd_t;
+
+
+/*
+ * Describes a Transmit Descriptor
+ */
+typedef struct {
+ SREGS_PAD(pad0);
+ u16 tx_status; /* status after transmission of a packet */
+ SREGS_PAD(pad1);
+ u16 tx_config; /* transmit configuration for this packet */
+ SREGS_PAD(pad2);
+ u16 tx_pktsize; /* size of the packet to be transmitted */
+ SREGS_PAD(pad3);
+ u16 tx_frag_count; /* no. of fragments */
+
+ SREGS_PAD(pad4);
+ u16 tx_frag_ptr_l;
+ SREGS_PAD(pad5);
+ u16 tx_frag_ptr_h;
+ SREGS_PAD(pad6);
+ u16 tx_frag_size;
+
+ SREGS_PAD(pad7);
+ u16 link; /* ptr to next descriptor */
+} sonic_td_t;
+
+
+/*
+ * Describes an entry in the CAM Descriptor Area.
+ */
+
+typedef struct {
+ SREGS_PAD(pad0);
+ u16 cam_entry_pointer;
+ SREGS_PAD(pad1);
+ u16 cam_cap0;
+ SREGS_PAD(pad2);
+ u16 cam_cap1;
+ SREGS_PAD(pad3);
+ u16 cam_cap2;
+} sonic_cd_t;
+
+#define CAM_DESCRIPTORS 16
+
+
+typedef struct {
+ sonic_cd_t cam_desc[CAM_DESCRIPTORS];
+ SREGS_PAD(pad);
+ u16 cam_enable;
+} sonic_cda_t;
+
+#else /* original declarations, little endian 32 bit */
+
+/*
+ * structure definitions
+ */
+
+typedef struct {
+ u32 rx_bufadr_l; /* receive buffer ptr */
+ u32 rx_bufadr_h;
+
+ u32 rx_bufsize_l; /* no. of words in the receive buffer */
+ u32 rx_bufsize_h;
+} sonic_rr_t;
+
+/*
+ * Sonic receive descriptor. Receive descriptors are
+ * kept in a linked list of these structures.
+ */
+
+typedef struct {
+ u16 rx_status; /* status after reception of a packet */
+ SREGS_PAD(pad0);
+ u16 rx_pktlen; /* length of the packet incl. CRC */
+ SREGS_PAD(pad1);
+
+ /*
+ * Pointers to the location in the receive buffer area (RBA)
+ * where the packet resides. A packet is always received into
+ * a contiguous piece of memory.
+ */
+ u16 rx_pktptr_l;
+ SREGS_PAD(pad2);
+ u16 rx_pktptr_h;
+ SREGS_PAD(pad3);
+
+ u16 rx_seqno; /* sequence no. */
+ SREGS_PAD(pad4);
+
+ u16 link; /* link to next RDD (end if EOL bit set) */
+ SREGS_PAD(pad5);
+
+ /*
+ * Owner of this descriptor, 0= driver, 1=sonic
+ */
+
+ u16 in_use;
+ SREGS_PAD(pad6);
+
+ caddr_t rda_next; /* pointer to next RD */
+} sonic_rd_t;
+
+
+/*
+ * Describes a Transmit Descriptor
+ */
+typedef struct {
+ u16 tx_status; /* status after transmission of a packet */
+ SREGS_PAD(pad0);
+ u16 tx_config; /* transmit configuration for this packet */
+ SREGS_PAD(pad1);
+ u16 tx_pktsize; /* size of the packet to be transmitted */
+ SREGS_PAD(pad2);
+ u16 tx_frag_count; /* no. of fragments */
+ SREGS_PAD(pad3);
+
+ u16 tx_frag_ptr_l;
+ SREGS_PAD(pad4);
+ u16 tx_frag_ptr_h;
+ SREGS_PAD(pad5);
+ u16 tx_frag_size;
+ SREGS_PAD(pad6);
+
+ u16 link; /* ptr to next descriptor */
+ SREGS_PAD(pad7);
+} sonic_td_t;
+
+
+/*
+ * Describes an entry in the CAM Descriptor Area.
+ */
+
+typedef struct {
+ u16 cam_entry_pointer;
+ SREGS_PAD(pad0);
+ u16 cam_cap0;
+ SREGS_PAD(pad1);
+ u16 cam_cap1;
+ SREGS_PAD(pad2);
+ u16 cam_cap2;
+ SREGS_PAD(pad3);
+} sonic_cd_t;
+
+#define CAM_DESCRIPTORS 16
+
+
+typedef struct {
+ sonic_cd_t cam_desc[CAM_DESCRIPTORS];
+ u16 cam_enable;
+ SREGS_PAD(pad);
+} sonic_cda_t;
+#endif /* endianness */
+
+/*
+ * Some tunables for the buffer areas. Power of 2 is required
+ * the current driver uses one receive buffer for each descriptor.
+ *
+ * MSch: use more buffer space for the slow m68k Macs!
+ */
+#ifdef CONFIG_MACSONIC
+#define SONIC_NUM_RRS 32 /* number of receive resources */
+#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
+#define SONIC_NUM_TDS 32 /* number of transmit descriptors */
+#else
+#define SONIC_NUM_RRS 16 /* number of receive resources */
+#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
+#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
+#endif
+#define SONIC_RBSIZE 1520 /* size of one resource buffer */
+
+#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+
+
+/* Information that need to be kept for each board. */
+struct sonic_local {
+ sonic_cda_t cda; /* virtual CPU address of CDA */
+ sonic_td_t tda[SONIC_NUM_TDS]; /* transmit descriptor area */
+ sonic_rr_t rra[SONIC_NUM_RRS]; /* receive resource area */
+ sonic_rd_t rda[SONIC_NUM_RDS]; /* receive descriptor area */
+ struct sk_buff *tx_skb[SONIC_NUM_TDS]; /* skbuffs for packets to transmit */
+ unsigned int tx_laddr[SONIC_NUM_TDS]; /* logical DMA address fro skbuffs */
+ unsigned char *rba; /* start of receive buffer areas */
+ unsigned int cda_laddr; /* logical DMA address of CDA */
+ unsigned int tda_laddr; /* logical DMA address of TDA */
+ unsigned int rra_laddr; /* logical DMA address of RRA */
+ unsigned int rda_laddr; /* logical DMA address of RDA */
+ unsigned int rba_laddr; /* logical DMA address of RBA */
+ unsigned int cur_rra; /* current indexes to resource areas */
+ unsigned int cur_rx;
+ unsigned int cur_tx;
+ unsigned int dirty_tx; /* last unacked transmit packet */
+ char tx_full;
+ struct net_device_stats stats;
+};
+
+#define TX_TIMEOUT 6
+
+/* Index to functions, as function prototypes. */
+
+static int sonic_open(struct net_device *dev);
+static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void sonic_rx(struct net_device *dev);
+static int sonic_close(struct net_device *dev);
+static struct net_device_stats *sonic_get_stats(struct net_device *dev);
+static void sonic_multicast_list(struct net_device *dev);
+static int sonic_init(struct net_device *dev);
+static void sonic_tx_timeout(struct net_device *dev);
+
+static const char *version =
+ "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
+
+#endif /* SONIC_H */
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
new file mode 100644
index 000000000000..236bdd3f6ba0
--- /dev/null
+++ b/drivers/net/starfire.c
@@ -0,0 +1,2218 @@
+/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
+/*
+ Written 1998-2000 by Donald Becker.
+
+ Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
+ send all bug reports to me, and not to Donald Becker, as this code
+ has been heavily modified from Donald's original version.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The information below comes from Donald Becker's original driver:
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/starfire.html
+
+ -----------------------------------------------------------
+
+ Linux kernel-specific changes:
+
+ LK1.1.1 (jgarzik):
+ - Use PCI driver interface
+ - Fix MOD_xxx races
+ - softnet fixups
+
+ LK1.1.2 (jgarzik):
+ - Merge Becker version 0.15
+
+ LK1.1.3 (Andrew Morton)
+ - Timer cleanups
+
+ LK1.1.4 (jgarzik):
+ - Merge Becker version 1.03
+
+ LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
+ - Support hardware Rx/Tx checksumming
+ - Use the GFP firmware taken from Adaptec's Netware driver
+
+ LK1.2.2 (Ion Badulescu)
+ - Backported to 2.2.x
+
+ LK1.2.3 (Ion Badulescu)
+ - Fix the flaky mdio interface
+ - More compat clean-ups
+
+ LK1.2.4 (Ion Badulescu)
+ - More 2.2.x initialization fixes
+
+ LK1.2.5 (Ion Badulescu)
+ - Several fixes from Manfred Spraul
+
+ LK1.2.6 (Ion Badulescu)
+ - Fixed ifup/ifdown/ifup problem in 2.4.x
+
+ LK1.2.7 (Ion Badulescu)
+ - Removed unused code
+ - Made more functions static and __init
+
+ LK1.2.8 (Ion Badulescu)
+ - Quell bogus error messages, inform about the Tx threshold
+ - Removed #ifdef CONFIG_PCI, this driver is PCI only
+
+ LK1.2.9 (Ion Badulescu)
+ - Merged Jeff Garzik's changes from 2.4.4-pre5
+ - Added 2.2.x compatibility stuff required by the above changes
+
+ LK1.2.9a (Ion Badulescu)
+ - More updates from Jeff Garzik
+
+ LK1.3.0 (Ion Badulescu)
+ - Merged zerocopy support
+
+ LK1.3.1 (Ion Badulescu)
+ - Added ethtool support
+ - Added GPIO (media change) interrupt support
+
+ LK1.3.2 (Ion Badulescu)
+ - Fixed 2.2.x compatibility issues introduced in 1.3.1
+ - Fixed ethtool ioctl returning uninitialized memory
+
+ LK1.3.3 (Ion Badulescu)
+ - Initialize the TxMode register properly
+ - Don't dereference dev->priv after freeing it
+
+ LK1.3.4 (Ion Badulescu)
+ - Fixed initialization timing problems
+ - Fixed interrupt mask definitions
+
+ LK1.3.5 (jgarzik)
+ - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
+
+ LK1.3.6:
+ - Sparc64 support and fixes (Ion Badulescu)
+ - Better stats and error handling (Ion Badulescu)
+ - Use new pci_set_mwi() PCI API function (jgarzik)
+
+ LK1.3.7 (Ion Badulescu)
+ - minimal implementation of tx_timeout()
+ - correctly shutdown the Rx/Tx engines in netdev_close()
+ - added calls to netif_carrier_on/off
+ (patch from Stefan Rompf <srompf@isg.de>)
+ - VLAN support
+
+ LK1.3.8 (Ion Badulescu)
+ - adjust DMA burst size on sparc64
+ - 64-bit support
+ - reworked zerocopy support for 64-bit buffers
+ - working and usable interrupt mitigation/latency
+ - reduced Tx interrupt frequency for lower interrupt overhead
+
+ LK1.3.9 (Ion Badulescu)
+ - bugfix for mcast filter
+ - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
+
+ LK1.4.0 (Ion Badulescu)
+ - NAPI support
+
+ LK1.4.1 (Ion Badulescu)
+ - flush PCI posting buffers after disabling Rx interrupts
+ - put the chip to a D3 slumber on driver unload
+ - added config option to enable/disable NAPI
+
+TODO: bugfixes (no bugs known as of right now)
+*/
+
+#define DRV_NAME "starfire"
+#define DRV_VERSION "1.03+LK1.4.1"
+#define DRV_RELDATE "February 10, 2002"
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/*
+ * Adaptec's license for their drivers (which is where I got the
+ * firmware files) does not allow one to redistribute them. Thus, we can't
+ * include the firmware with this driver.
+ *
+ * However, should a legal-to-distribute firmware become available,
+ * the driver developer would need only to obtain the firmware in the
+ * form of a C header file.
+ * Once that's done, the #undef below must be changed into a #define
+ * for this driver to really use the firmware. Note that Rx/Tx
+ * hardware TCP checksumming is not possible without the firmware.
+ *
+ * WANTED: legal firmware to include with this GPL'd driver.
+ */
+#undef HAS_FIRMWARE
+/*
+ * The current frame processor firmware fails to checksum a fragment
+ * of length 1. If and when this is fixed, the #define below can be removed.
+ */
+#define HAS_BROKEN_FIRMWARE
+/*
+ * Define this if using the driver with the zero-copy patch
+ */
+#if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
+#define ZEROCOPY
+#endif
+
+#ifdef HAS_FIRMWARE
+#include "starfire_firmware.h"
+#endif /* HAS_FIRMWARE */
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define VLAN_SUPPORT
+#endif
+
+#ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
+#undef HAVE_NETDEV_POLL
+#endif
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Used for tuning interrupt latency vs. overhead. */
+static int intr_latency;
+static int small_frames;
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+static int mtu;
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Starfire has a 512 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 512;
+/* Whether to do TCP/UDP checksums in hardware */
+#ifdef HAS_FIRMWARE
+static int enable_hw_cksum = 1;
+#else
+static int enable_hw_cksum = 0;
+#endif
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+/*
+ * Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ * Setting to > 1518 effectively disables this feature.
+ *
+ * NOTE:
+ * The ia64 doesn't allow for unaligned loads even of integers being
+ * misaligned on a 2 byte boundary. Thus always force copying of
+ * packets as the starfire doesn't allow for misaligned DMAs ;-(
+ * 23/10/2000 - Jes
+ *
+ * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
+ * at least, having unaligned frames leads to a rather serious performance
+ * penalty. -Ion
+ */
+#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
+static int rx_copybreak = PKT_BUF_SZ;
+#else
+static int rx_copybreak /* = 0 */;
+#endif
+
+/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
+#ifdef __sparc__
+#define DMA_BURST_SIZE 64
+#else
+#define DMA_BURST_SIZE 128
+#endif
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
+ The media type is usually passed in 'options[]'.
+ These variables are deprecated, use ethtool instead. -Ion
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {0, };
+static int full_duplex[MAX_UNITS] = {0, };
+
+/* Operational parameters that are set at compile time. */
+
+/* The "native" ring sizes are either 256 or 2048.
+ However in some modes a descriptor may be marked to wrap the ring earlier.
+*/
+#define RX_RING_SIZE 256
+#define TX_RING_SIZE 32
+/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
+#define DONE_Q_SIZE 1024
+/* All queues must be aligned on a 256-byte boundary */
+#define QUEUE_ALIGN 256
+
+#if RX_RING_SIZE > 256
+#define RX_Q_ENTRIES Rx2048QEntries
+#else
+#define RX_Q_ENTRIES Rx256QEntries
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2 * HZ)
+
+/*
+ * This SUCKS.
+ * We need a much better method to determine if dma_addr_t is 64-bit.
+ */
+#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
+/* 64-bit dma_addr_t */
+#define ADDR_64BITS /* This chip uses 64 bit addresses. */
+#define cpu_to_dma(x) cpu_to_le64(x)
+#define dma_to_cpu(x) le64_to_cpu(x)
+#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
+#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
+#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
+#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
+#define RX_DESC_ADDR_SIZE RxDescAddr64bit
+#else /* 32-bit dma_addr_t */
+#define cpu_to_dma(x) cpu_to_le32(x)
+#define dma_to_cpu(x) le32_to_cpu(x)
+#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
+#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
+#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
+#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
+#define RX_DESC_ADDR_SIZE RxDescAddr32bit
+#endif
+
+#ifdef MAX_SKB_FRAGS
+#define skb_first_frag_len(skb) skb_headlen(skb)
+#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
+#else /* not MAX_SKB_FRAGS */
+#define skb_first_frag_len(skb) (skb->len)
+#define skb_num_frags(skb) 1
+#endif /* not MAX_SKB_FRAGS */
+
+/* 2.2.x compatibility code */
+#if LINUX_VERSION_CODE < 0x20300
+
+#include "starfire-kcomp22.h"
+
+#else /* LINUX_VERSION_CODE > 0x20300 */
+
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+#include <linux/if_vlan.h>
+
+#define init_tx_timer(dev, func, timeout) \
+ dev->tx_timeout = func; \
+ dev->watchdog_timeo = timeout;
+#define kick_tx_timer(dev, func, timeout)
+
+#define netif_start_if(dev)
+#define netif_stop_if(dev)
+
+#define PCI_SLOT_NAME(pci_dev) pci_name(pci_dev)
+
+#endif /* LINUX_VERSION_CODE > 0x20300 */
+
+#ifdef HAVE_NETDEV_POLL
+#define init_poll(dev) \
+ dev->poll = &netdev_poll; \
+ dev->weight = max_interrupt_work;
+#define netdev_rx(dev, ioaddr) \
+do { \
+ u32 intr_enable; \
+ if (netif_rx_schedule_prep(dev)) { \
+ __netif_rx_schedule(dev); \
+ intr_enable = readl(ioaddr + IntrEnable); \
+ intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
+ writel(intr_enable, ioaddr + IntrEnable); \
+ readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
+ } else { \
+ /* Paranoia check */ \
+ intr_enable = readl(ioaddr + IntrEnable); \
+ if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
+ printk("%s: interrupt while in polling mode!\n", dev->name); \
+ intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
+ writel(intr_enable, ioaddr + IntrEnable); \
+ } \
+ } \
+} while (0)
+#define netdev_receive_skb(skb) netif_receive_skb(skb)
+#define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
+static int netdev_poll(struct net_device *dev, int *budget);
+#else /* not HAVE_NETDEV_POLL */
+#define init_poll(dev)
+#define netdev_receive_skb(skb) netif_rx(skb)
+#define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
+#define netdev_rx(dev, ioaddr) \
+do { \
+ int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
+ __netdev_rx(dev, &quota);\
+} while (0)
+#endif /* not HAVE_NETDEV_POLL */
+/* end of compatibility code */
+
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
+KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(intr_latency, int, 0);
+module_param(small_frames, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(enable_hw_cksum, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "MTU (all boards)");
+MODULE_PARM_DESC(debug, "Debug level (0-6)");
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
+MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
+MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
+MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
+ring sizes are set fixed by the hardware, but may optionally be wrapped
+earlier by the END bit in the descriptor.
+This driver uses that hardware queue size for the Rx ring, where a large
+number of entries has no ill effect beyond increases the potential backlog.
+The Tx ring is wrapped with the END bit, since a large hardware Tx queue
+disables the queue layer priority ordering and we have no mechanism to
+utilize the hardware two-level priority queue. When modifying the
+RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
+levels.
+
+IIIb/c. Transmit/Receive Structure
+
+See the Adaptec manual for the many possible structures, and options for
+each structure. There are far too many to document all of them here.
+
+For transmit this driver uses type 0/1 transmit descriptors (depending
+on the 32/64 bitness of the architecture), and relies on automatic
+minimum-length padding. It does not use the completion queue
+consumer index, but instead checks for non-zero status entries.
+
+For receive this driver uses type 0/1/2/3 receive descriptors. The driver
+allocates full frame size skbuffs for the Rx ring buffers, so all frames
+should fit in a single descriptor. The driver does not use the completion
+queue consumer index, but instead checks for non-zero status entries.
+
+When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
+is allocated and the frame is copied to the new skbuff. When the incoming
+frame is larger, the skbuff is passed directly up the protocol stack.
+Buffers consumed this way are replaced by newly allocated skbuffs in a later
+phase of receive.
+
+A notable aspect of operation is that unaligned buffers are not permitted by
+the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
+isn't longword aligned, which may cause problems on some machine
+e.g. Alphas and IA64. For these architectures, the driver is forced to copy
+the frame into a new skbuff unconditionally. Copied frames are put into the
+skbuff at an offset of "+2", thus 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and the netif_queue
+status. If the number of free Tx slots in the ring falls below a certain number
+(currently hardcoded to 4), it signals the upper layer to stop the queue.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
+number of free Tx slow is above the threshold, it signals the upper layer to
+restart the queue.
+
+IV. Notes
+
+IVb. References
+
+The Adaptec Starfire manuals, available only from Adaptec.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+- StopOnPerr is broken, don't enable
+- Hardware ethernet padding exposes random data, perform software padding
+ instead (unverified -- works correctly for all the hardware I have)
+
+*/
+
+
+
+enum chip_capability_flags {CanHaveMII=1, };
+
+enum chipset {
+ CH_6915 = 0,
+};
+
+static struct pci_device_id starfire_pci_tbl[] = {
+ { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
+
+/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
+static struct chip_info {
+ const char *name;
+ int drv_flags;
+} netdrv_tbl[] __devinitdata = {
+ { "Adaptec Starfire 6915", CanHaveMII },
+};
+
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum register_offsets {
+ PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
+ IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
+ MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
+ GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
+ TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
+ TxRingHiAddr=0x5009C, /* 64 bit address extension. */
+ TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
+ TxThreshold=0x500B0,
+ CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
+ RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
+ CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
+ RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
+ RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
+ TxMode=0x55000, VlanType=0x55064,
+ PerfFilterTable=0x56000, HashTable=0x56100,
+ TxGfpMem=0x58000, RxGfpMem=0x5a000,
+};
+
+/*
+ * Bits in the interrupt status/mask registers.
+ * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
+ * enables all the interrupt sources that are or'ed into those status bits.
+ */
+enum intr_status_bits {
+ IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
+ IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
+ IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
+ IntrTxComplQLow=0x200000, IntrPCI=0x100000,
+ IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
+ IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
+ IntrNormalSummary=0x8000, IntrTxDone=0x4000,
+ IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
+ IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
+ IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
+ IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
+ IntrNoTxCsum=0x20, IntrTxBadID=0x10,
+ IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
+ IntrTxGfp=0x02, IntrPCIPad=0x01,
+ /* not quite bits */
+ IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
+ IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
+ IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
+};
+
+/* Bits in the RxFilterMode register. */
+enum rx_mode_bits {
+ AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
+ AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
+ PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
+ WakeupOnGFP=0x0800,
+};
+
+/* Bits in the TxMode register */
+enum tx_mode_bits {
+ MiiSoftReset=0x8000, MIILoopback=0x4000,
+ TxFlowEnable=0x0800, RxFlowEnable=0x0400,
+ PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
+};
+
+/* Bits in the TxDescCtrl register. */
+enum tx_ctrl_bits {
+ TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
+ TxDescSpace128=0x30, TxDescSpace256=0x40,
+ TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
+ TxDescType3=0x03, TxDescType4=0x04,
+ TxNoDMACompletion=0x08,
+ TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
+ TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
+ TxDMABurstSizeShift=8,
+};
+
+/* Bits in the RxDescQCtrl register. */
+enum rx_ctrl_bits {
+ RxBufferLenShift=16, RxMinDescrThreshShift=0,
+ RxPrefetchMode=0x8000, RxVariableQ=0x2000,
+ Rx2048QEntries=0x4000, Rx256QEntries=0,
+ RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
+ RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
+ RxDescSpace4=0x000, RxDescSpace8=0x100,
+ RxDescSpace16=0x200, RxDescSpace32=0x300,
+ RxDescSpace64=0x400, RxDescSpace128=0x500,
+ RxConsumerWrEn=0x80,
+};
+
+/* Bits in the RxDMACtrl register. */
+enum rx_dmactrl_bits {
+ RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
+ RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
+ RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
+ RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
+ RxChecksumRejectTCPOnly=0x01000000,
+ RxCompletionQ2Enable=0x800000,
+ RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
+ RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
+ RxDMAQ2NonIP=0x400000,
+ RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
+ RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
+ RxBurstSizeShift=0,
+};
+
+/* Bits in the RxCompletionAddr register */
+enum rx_compl_bits {
+ RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
+ RxComplProducerWrEn=0x40,
+ RxComplType0=0x00, RxComplType1=0x10,
+ RxComplType2=0x20, RxComplType3=0x30,
+ RxComplThreshShift=0,
+};
+
+/* Bits in the TxCompletionAddr register */
+enum tx_compl_bits {
+ TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
+ TxComplProducerWrEn=0x40,
+ TxComplIntrStatus=0x20,
+ CommonQueueMode=0x10,
+ TxComplThreshShift=0,
+};
+
+/* Bits in the GenCtrl register */
+enum gen_ctrl_bits {
+ RxEnable=0x05, TxEnable=0x0a,
+ RxGFPEnable=0x10, TxGFPEnable=0x20,
+};
+
+/* Bits in the IntrTimerCtrl register */
+enum intr_ctrl_bits {
+ Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
+ SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
+ IntrLatencyMask=0x1f,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct starfire_rx_desc {
+ dma_addr_t rxaddr;
+};
+enum rx_desc_bits {
+ RxDescValid=1, RxDescEndRing=2,
+};
+
+/* Completion queue entry. */
+struct short_rx_done_desc {
+ u32 status; /* Low 16 bits is length. */
+};
+struct basic_rx_done_desc {
+ u32 status; /* Low 16 bits is length. */
+ u16 vlanid;
+ u16 status2;
+};
+struct csum_rx_done_desc {
+ u32 status; /* Low 16 bits is length. */
+ u16 csum; /* Partial checksum */
+ u16 status2;
+};
+struct full_rx_done_desc {
+ u32 status; /* Low 16 bits is length. */
+ u16 status3;
+ u16 status2;
+ u16 vlanid;
+ u16 csum; /* partial checksum */
+ u32 timestamp;
+};
+/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
+#ifdef HAS_FIRMWARE
+#ifdef VLAN_SUPPORT
+typedef struct full_rx_done_desc rx_done_desc;
+#define RxComplType RxComplType3
+#else /* not VLAN_SUPPORT */
+typedef struct csum_rx_done_desc rx_done_desc;
+#define RxComplType RxComplType2
+#endif /* not VLAN_SUPPORT */
+#else /* not HAS_FIRMWARE */
+#ifdef VLAN_SUPPORT
+typedef struct basic_rx_done_desc rx_done_desc;
+#define RxComplType RxComplType1
+#else /* not VLAN_SUPPORT */
+typedef struct short_rx_done_desc rx_done_desc;
+#define RxComplType RxComplType0
+#endif /* not VLAN_SUPPORT */
+#endif /* not HAS_FIRMWARE */
+
+enum rx_done_bits {
+ RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
+};
+
+/* Type 1 Tx descriptor. */
+struct starfire_tx_desc_1 {
+ u32 status; /* Upper bits are status, lower 16 length. */
+ u32 addr;
+};
+
+/* Type 2 Tx descriptor. */
+struct starfire_tx_desc_2 {
+ u32 status; /* Upper bits are status, lower 16 length. */
+ u32 reserved;
+ u64 addr;
+};
+
+#ifdef ADDR_64BITS
+typedef struct starfire_tx_desc_2 starfire_tx_desc;
+#define TX_DESC_TYPE TxDescType2
+#else /* not ADDR_64BITS */
+typedef struct starfire_tx_desc_1 starfire_tx_desc;
+#define TX_DESC_TYPE TxDescType1
+#endif /* not ADDR_64BITS */
+#define TX_DESC_SPACING TxDescSpaceUnlim
+
+enum tx_desc_bits {
+ TxDescID=0xB0000000,
+ TxCRCEn=0x01000000, TxDescIntr=0x08000000,
+ TxRingWrap=0x04000000, TxCalTCP=0x02000000,
+};
+struct tx_done_desc {
+ u32 status; /* timestamp, index. */
+#if 0
+ u32 intrstatus; /* interrupt status */
+#endif
+};
+
+struct rx_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+struct tx_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ unsigned int used_slots;
+};
+
+#define PHY_CNT 2
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct starfire_rx_desc *rx_ring;
+ starfire_tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ /* The addresses of rx/tx-in-place skbuffs. */
+ struct rx_ring_info rx_info[RX_RING_SIZE];
+ struct tx_ring_info tx_info[TX_RING_SIZE];
+ /* Pointers to completion queues (full pages). */
+ rx_done_desc *rx_done_q;
+ dma_addr_t rx_done_q_dma;
+ unsigned int rx_done;
+ struct tx_done_desc *tx_done_q;
+ dma_addr_t tx_done_q_dma;
+ unsigned int tx_done;
+ struct net_device_stats stats;
+ struct pci_dev *pci_dev;
+#ifdef VLAN_SUPPORT
+ struct vlan_group *vlgrp;
+#endif
+ void *queue_mem;
+ dma_addr_t queue_mem_dma;
+ size_t queue_mem_size;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_tx, dirty_tx, reap_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ /* These values keep track of the transceiver/media in use. */
+ int speed100; /* Set if speed == 100MBit. */
+ u32 tx_mode;
+ u32 intr_timer_ctrl;
+ u8 tx_threshold;
+ /* MII transceiver section. */
+ struct mii_if_info mii_if; /* MII lib hooks/info */
+ int phy_cnt; /* MII device addresses. */
+ unsigned char phys[PHY_CNT]; /* MII device addresses. */
+ void __iomem *base;
+};
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int __netdev_rx(struct net_device *dev, int *quota);
+static void refill_rx_ring(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+static void netdev_media_change(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
+
+
+#ifdef VLAN_SUPPORT
+static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ spin_lock(&np->lock);
+ if (debug > 2)
+ printk("%s: Setting vlgrp to %p\n", dev->name, grp);
+ np->vlgrp = grp;
+ set_rx_mode(dev);
+ spin_unlock(&np->lock);
+}
+
+static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ spin_lock(&np->lock);
+ if (debug > 1)
+ printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
+ set_rx_mode(dev);
+ spin_unlock(&np->lock);
+}
+
+static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ spin_lock(&np->lock);
+ if (debug > 1)
+ printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
+ if (np->vlgrp)
+ np->vlgrp->vlan_devices[vid] = NULL;
+ set_rx_mode(dev);
+ spin_unlock(&np->lock);
+}
+#endif /* VLAN_SUPPORT */
+
+
+static int __devinit starfire_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct netdev_private *np;
+ int i, irq, option, chip_idx = ent->driver_data;
+ struct net_device *dev;
+ static int card_idx = -1;
+ long ioaddr;
+ void __iomem *base;
+ int drv_flags, io_size;
+ int boguscnt;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ card_idx++;
+
+ if (pci_enable_device (pdev))
+ return -EIO;
+
+ ioaddr = pci_resource_start(pdev, 0);
+ io_size = pci_resource_len(pdev, 0);
+ if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
+ printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev) {
+ printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
+ return -ENOMEM;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ irq = pdev->irq;
+
+ if (pci_request_regions (pdev, DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
+ goto err_out_free_netdev;
+ }
+
+ /* ioremap is borken in Linux-2.2.x/sparc64 */
+ base = ioremap(ioaddr, io_size);
+ if (!base) {
+ printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
+ card_idx, io_size, ioaddr);
+ goto err_out_free_res;
+ }
+
+ pci_set_master(pdev);
+
+ /* enable MWI -- it vastly improves Rx performance on sparc64 */
+ pci_set_mwi(pdev);
+
+#ifdef MAX_SKB_FRAGS
+ dev->features |= NETIF_F_SG;
+#endif /* MAX_SKB_FRAGS */
+#ifdef ZEROCOPY
+ /* Starfire can do TCP/UDP checksumming */
+ if (enable_hw_cksum)
+ dev->features |= NETIF_F_IP_CSUM;
+#endif /* ZEROCOPY */
+#ifdef VLAN_SUPPORT
+ dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ dev->vlan_rx_register = netdev_vlan_rx_register;
+ dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
+#endif /* VLAN_RX_KILL_VID */
+#ifdef ADDR_64BITS
+ dev->features |= NETIF_F_HIGHDMA;
+#endif /* ADDR_64BITS */
+
+ /* Serial EEPROM reads are hidden by the hardware. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+ if (debug > 4)
+ for (i = 0; i < 0x20; i++)
+ printk("%2.2x%s",
+ (unsigned int)readb(base + EEPROMCtrl + i),
+ i % 16 != 15 ? " " : "\n");
+#endif
+
+ /* Issue soft reset */
+ writel(MiiSoftReset, base + TxMode);
+ udelay(1000);
+ writel(0, base + TxMode);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(1, base + PCIDeviceConfig);
+ boguscnt = 1000;
+ while (--boguscnt > 0) {
+ udelay(10);
+ if ((readl(base + PCIDeviceConfig) & 1) == 0)
+ break;
+ }
+ if (boguscnt == 0)
+ printk("%s: chipset reset never completed!\n", dev->name);
+ /* wait a little longer */
+ udelay(1000);
+
+ dev->base_addr = (unsigned long)base;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+ np->base = base;
+ spin_lock_init(&np->lock);
+ pci_set_drvdata(pdev, dev);
+
+ np->pci_dev = pdev;
+
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+
+ drv_flags = netdrv_tbl[chip_idx].drv_flags;
+
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option & 0x200)
+ np->mii_if.full_duplex = 1;
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->mii_if.full_duplex = 1;
+
+ if (np->mii_if.full_duplex)
+ np->mii_if.force_media = 1;
+ else
+ np->mii_if.force_media = 0;
+ np->speed100 = 1;
+
+ /* timer resolution is 128 * 0.8us */
+ np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
+ Timer10X | EnableIntrMasking;
+
+ if (small_frames > 0) {
+ np->intr_timer_ctrl |= SmallFrameBypass;
+ switch (small_frames) {
+ case 1 ... 64:
+ np->intr_timer_ctrl |= SmallFrame64;
+ break;
+ case 65 ... 128:
+ np->intr_timer_ctrl |= SmallFrame128;
+ break;
+ case 129 ... 256:
+ np->intr_timer_ctrl |= SmallFrame256;
+ break;
+ default:
+ np->intr_timer_ctrl |= SmallFrame512;
+ if (small_frames > 512)
+ printk("Adjusting small_frames down to 512\n");
+ break;
+ }
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ init_tx_timer(dev, tx_timeout, TX_TIMEOUT);
+ init_poll(dev);
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &netdev_ioctl;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+
+ if (mtu)
+ dev->mtu = mtu;
+
+ if (register_netdev(dev))
+ goto err_out_cleardev;
+
+ printk(KERN_INFO "%s: %s at %p, ",
+ dev->name, netdrv_tbl[chip_idx].name, base);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ if (drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ int mii_status;
+ for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
+ mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
+ mdelay(100);
+ boguscnt = 1000;
+ while (--boguscnt > 0)
+ if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ if (boguscnt == 0) {
+ printk("%s: PHY reset never completed!\n", dev->name);
+ continue;
+ }
+ mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0) {
+ np->phys[phy_idx++] = phy;
+ np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "%#4.4x advertising %#4.4x.\n",
+ dev->name, phy, mii_status, np->mii_if.advertising);
+ /* there can be only one PHY on-board */
+ break;
+ }
+ }
+ np->phy_cnt = phy_idx;
+ if (np->phy_cnt > 0)
+ np->mii_if.phy_id = np->phys[0];
+ else
+ memset(&np->mii_if, 0, sizeof(np->mii_if));
+ }
+
+ printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
+ dev->name, enable_hw_cksum ? "enabled" : "disabled");
+ return 0;
+
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ iounmap(base);
+err_out_free_res:
+ pci_release_regions (pdev);
+err_out_free_netdev:
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+
+/* Read the MII Management Data I/O (MDIO) interfaces. */
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
+ int result, boguscnt=1000;
+ /* ??? Should we add a busy-wait here? */
+ do
+ result = readl(mdio_addr);
+ while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
+ if (boguscnt == 0)
+ return 0;
+ if ((result & 0xffff) == 0xffff)
+ return 0;
+ return result & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
+ writel(value, mdio_addr);
+ /* The busy-wait will occur before a read. */
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int i, retval;
+ size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
+
+ /* Do we ever need to reset the chip??? */
+ retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
+ if (retval)
+ return retval;
+
+ /* Disable the Rx and Tx, and reset the chip. */
+ writel(0, ioaddr + GenCtrl);
+ writel(1, ioaddr + PCIDeviceConfig);
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ /* Allocate the various queues. */
+ if (np->queue_mem == 0) {
+ tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
+ rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
+ tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
+ rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
+ np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
+ np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
+ if (np->queue_mem == 0)
+ return -ENOMEM;
+
+ np->tx_done_q = np->queue_mem;
+ np->tx_done_q_dma = np->queue_mem_dma;
+ np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
+ np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
+ np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
+ np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
+ np->rx_ring = (void *) np->tx_ring + tx_ring_size;
+ np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
+ }
+
+ /* Start with no carrier, it gets adjusted later */
+ netif_carrier_off(dev);
+ init_ring(dev);
+ /* Set the size of the Rx buffers. */
+ writel((np->rx_buf_sz << RxBufferLenShift) |
+ (0 << RxMinDescrThreshShift) |
+ RxPrefetchMode | RxVariableQ |
+ RX_Q_ENTRIES |
+ RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
+ RxDescSpace4,
+ ioaddr + RxDescQCtrl);
+
+ /* Set up the Rx DMA controller. */
+ writel(RxChecksumIgnore |
+ (0 << RxEarlyIntThreshShift) |
+ (6 << RxHighPrioThreshShift) |
+ ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
+ ioaddr + RxDMACtrl);
+
+ /* Set Tx descriptor */
+ writel((2 << TxHiPriFIFOThreshShift) |
+ (0 << TxPadLenShift) |
+ ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
+ TX_DESC_Q_ADDR_SIZE |
+ TX_DESC_SPACING | TX_DESC_TYPE,
+ ioaddr + TxDescCtrl);
+
+ writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
+ writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
+ writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
+ writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
+ writel(np->tx_ring_dma, ioaddr + TxRingPtr);
+
+ writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
+ writel(np->rx_done_q_dma |
+ RxComplType |
+ (0 << RxComplThreshShift),
+ ioaddr + RxCompletionAddr);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
+
+ /* Fill both the Tx SA register and the Rx perfect filter. */
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
+ /* The first entry is special because it bypasses the VLAN filter.
+ Don't use it. */
+ writew(0, ioaddr + PerfFilterTable);
+ writew(0, ioaddr + PerfFilterTable + 4);
+ writew(0, ioaddr + PerfFilterTable + 8);
+ for (i = 1; i < 16; i++) {
+ u16 *eaddrs = (u16 *)dev->dev_addr;
+ void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
+ writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
+ writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
+ writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
+ }
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
+ writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
+ udelay(1000);
+ writel(np->tx_mode, ioaddr + TxMode);
+ np->tx_threshold = 4;
+ writel(np->tx_threshold, ioaddr + TxThreshold);
+
+ writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
+
+ netif_start_if(dev);
+ netif_start_queue(dev);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
+ set_rx_mode(dev);
+
+ np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ check_duplex(dev);
+
+ /* Enable GPIO interrupts on link change */
+ writel(0x0f00ff00, ioaddr + GPIOCtrl);
+
+ /* Set the interrupt mask */
+ writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
+ IntrTxDMADone | IntrStatsMax | IntrLinkChange |
+ IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
+ ioaddr + IntrEnable);
+ /* Enable PCI interrupts. */
+ writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
+ ioaddr + PCIDeviceConfig);
+
+#ifdef VLAN_SUPPORT
+ /* Set VLAN type to 802.1q */
+ writel(ETH_P_8021Q, ioaddr + VlanType);
+#endif /* VLAN_SUPPORT */
+
+#ifdef HAS_FIRMWARE
+ /* Load Rx/Tx firmware into the frame processors */
+ for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
+ writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
+ for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
+ writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
+#endif /* HAS_FIRMWARE */
+ if (enable_hw_cksum)
+ /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
+ writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
+ else
+ /* Enable the Rx and Tx units only. */
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n",
+ dev->name);
+
+ return 0;
+}
+
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u16 reg0;
+ int silly_count = 1000;
+
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
+ mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ udelay(500);
+ while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
+ /* do nothing */;
+ if (!silly_count) {
+ printk("%s: MII reset failed!\n", dev->name);
+ return;
+ }
+
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
+
+ if (!np->mii_if.force_media) {
+ reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
+ } else {
+ reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+ if (np->speed100)
+ reg0 |= BMCR_SPEED100;
+ if (np->mii_if.full_duplex)
+ reg0 |= BMCR_FULLDPLX;
+ printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->mii_if.full_duplex ? "full" : "half");
+ }
+ mdio_write(dev, np->phys[0], MII_BMCR, reg0);
+}
+
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int old_debug;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
+ "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
+
+ /* Perhaps we should reinitialize the hardware here. */
+
+ /*
+ * Stop and restart the interface.
+ * Cheat and increase the debug level temporarily.
+ */
+ old_debug = debug;
+ debug = 2;
+ netdev_close(dev);
+ netdev_open(dev);
+ debug = old_debug;
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = np->reap_tx = 0;
+ np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_info[i].skb = skb;
+ if (skb == NULL)
+ break;
+ np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb->dev = dev; /* Mark as being used by this device. */
+ /* Grrr, we cannot offset to correctly align the IP header. */
+ np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
+ }
+ writew(i - 1, np->base + RxDescQIdx);
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* Clear the remainder of the Rx buffer ring. */
+ for ( ; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = 0;
+ np->rx_info[i].skb = NULL;
+ np->rx_info[i].mapping = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
+
+ /* Clear the completion rings. */
+ for (i = 0; i < DONE_Q_SIZE; i++) {
+ np->rx_done_q[i].status = 0;
+ np->tx_done_q[i].status = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++)
+ memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
+
+ return;
+}
+
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int entry;
+ u32 status;
+ int i;
+
+ kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
+
+ /*
+ * be cautious here, wrapping the queue has weird semantics
+ * and we may not have enough slots even when it seems we do.
+ */
+ if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
+ netif_stop_queue(dev);
+ return 1;
+ }
+
+#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
+ {
+ int has_bad_length = 0;
+
+ if (skb_first_frag_len(skb) == 1)
+ has_bad_length = 1;
+ else {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (skb_shinfo(skb)->frags[i].size == 1) {
+ has_bad_length = 1;
+ break;
+ }
+ }
+
+ if (has_bad_length)
+ skb_checksum_help(skb);
+ }
+#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
+
+ entry = np->cur_tx % TX_RING_SIZE;
+ for (i = 0; i < skb_num_frags(skb); i++) {
+ int wrap_ring = 0;
+ status = TxDescID;
+
+ if (i == 0) {
+ np->tx_info[entry].skb = skb;
+ status |= TxCRCEn;
+ if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
+ status |= TxRingWrap;
+ wrap_ring = 1;
+ }
+ if (np->reap_tx) {
+ status |= TxDescIntr;
+ np->reap_tx = 0;
+ }
+ if (skb->ip_summed == CHECKSUM_HW) {
+ status |= TxCalTCP;
+ np->stats.tx_compressed++;
+ }
+ status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
+
+ np->tx_info[entry].mapping =
+ pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ } else {
+#ifdef MAX_SKB_FRAGS
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
+ status |= this_frag->size;
+ np->tx_info[entry].mapping =
+ pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
+#endif /* MAX_SKB_FRAGS */
+ }
+
+ np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
+ np->tx_ring[entry].status = cpu_to_le32(status);
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
+ dev->name, np->cur_tx, np->dirty_tx,
+ entry, status);
+ if (wrap_ring) {
+ np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
+ np->cur_tx += np->tx_info[entry].used_slots;
+ entry = 0;
+ } else {
+ np->tx_info[entry].used_slots = 1;
+ np->cur_tx += np->tx_info[entry].used_slots;
+ entry++;
+ }
+ /* scavenge the tx descriptors twice per TX_RING_SIZE */
+ if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
+ np->reap_tx = 1;
+ }
+
+ /* Non-x86: explicitly flush descriptor cache lines here. */
+ /* Ensure all descriptors are written back before the transmit is
+ initiated. - Jes */
+ wmb();
+
+ /* Update the producer index. */
+ writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
+
+ /* 4 is arbitrary, but should be ok */
+ if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
+ netif_stop_queue(dev);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int boguscnt = max_interrupt_work;
+ int consumer;
+ int tx_status;
+ int handled = 0;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrClear);
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == (u32) -1)
+ break;
+
+ handled = 1;
+
+ if (intr_status & (IntrRxDone | IntrRxEmpty))
+ netdev_rx(dev, ioaddr);
+
+ /* Scavenge the skbuff list based on the Tx-done queue.
+ There are redundant checks here that may be cleaned up
+ after the driver has proven to be reliable. */
+ consumer = readl(ioaddr + TxConsumerIdx);
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
+ dev->name, consumer);
+
+ while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
+ dev->name, np->dirty_tx, np->tx_done, tx_status);
+ if ((tx_status & 0xe0000000) == 0xa0000000) {
+ np->stats.tx_packets++;
+ } else if ((tx_status & 0xe0000000) == 0x80000000) {
+ u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
+ struct sk_buff *skb = np->tx_info[entry].skb;
+ np->tx_info[entry].skb = NULL;
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].mapping = 0;
+ np->dirty_tx += np->tx_info[entry].used_slots;
+ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+#ifdef MAX_SKB_FRAGS
+ {
+ int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ np->dirty_tx++;
+ entry++;
+ }
+ }
+#endif /* MAX_SKB_FRAGS */
+ dev_kfree_skb_irq(skb);
+ }
+ np->tx_done_q[np->tx_done].status = 0;
+ np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
+ }
+ writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
+
+ if (netif_queue_stopped(dev) &&
+ (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
+ /* The ring is no longer full, wake the queue. */
+ netif_wake_queue(dev);
+ }
+
+ /* Stats overflow */
+ if (intr_status & IntrStatsMax)
+ get_stats(dev);
+
+ /* Media change interrupt. */
+ if (intr_status & IntrLinkChange)
+ netdev_media_change(dev);
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ if (debug > 1)
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=%#8.8x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
+ dev->name, (int) readl(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+
+/* This routine is logically part of the interrupt/poll handler, but separated
+ for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
+static int __netdev_rx(struct net_device *dev, int *quota)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 desc_status;
+ int retcode = 0;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
+ struct sk_buff *skb;
+ u16 pkt_len;
+ int entry;
+ rx_done_desc *desc = &np->rx_done_q[np->rx_done];
+
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
+ if (!(desc_status & RxOK)) {
+ /* There was a error. */
+ if (debug > 2)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & RxFIFOErr)
+ np->stats.rx_fifo_errors++;
+ goto next_rx;
+ }
+
+ if (*quota <= 0) { /* out of rx quota */
+ retcode = 1;
+ goto out;
+ }
+ (*quota)--;
+
+ pkt_len = desc_status; /* Implicitly Truncate */
+ entry = (desc_status >> 16) & 0x7ff;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(np->pci_dev,
+ np->rx_info[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
+ pci_dma_sync_single_for_device(np->pci_dev,
+ np->rx_info[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ } else {
+ pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb = np->rx_info[entry].skb;
+ skb_put(skb, pkt_len);
+ np->rx_info[entry].skb = NULL;
+ np->rx_info[entry].mapping = 0;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (debug > 5)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13]);
+#endif
+
+ skb->protocol = eth_type_trans(skb, dev);
+#if defined(HAS_FIRMWARE) || defined(VLAN_SUPPORT)
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
+#endif
+#ifdef HAS_FIRMWARE
+ if (le16_to_cpu(desc->status2) & 0x0100) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ np->stats.rx_compressed++;
+ }
+ /*
+ * This feature doesn't seem to be working, at least
+ * with the two firmware versions I have. If the GFP sees
+ * an IP fragment, it either ignores it completely, or reports
+ * "bad checksum" on it.
+ *
+ * Maybe I missed something -- corrections are welcome.
+ * Until then, the printk stays. :-) -Ion
+ */
+ else if (le16_to_cpu(desc->status2) & 0x0040) {
+ skb->ip_summed = CHECKSUM_HW;
+ skb->csum = le16_to_cpu(desc->csum);
+ printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
+ }
+#endif /* HAS_FIRMWARE */
+#ifdef VLAN_SUPPORT
+ if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
+ /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
+ vlan_netdev_receive_skb(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
+ } else
+#endif /* VLAN_SUPPORT */
+ netdev_receive_skb(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+
+ next_rx:
+ np->cur_rx++;
+ desc->status = 0;
+ np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
+ }
+ writew(np->rx_done, np->base + CompletionQConsumerIdx);
+
+ out:
+ refill_rx_ring(dev);
+ if (debug > 5)
+ printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
+ retcode, np->rx_done, desc_status);
+ return retcode;
+}
+
+
+#ifdef HAVE_NETDEV_POLL
+static int netdev_poll(struct net_device *dev, int *budget)
+{
+ u32 intr_status;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int retcode = 0, quota = dev->quota;
+
+ do {
+ writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
+
+ retcode = __netdev_rx(dev, &quota);
+ *budget -= (dev->quota - quota);
+ dev->quota = quota;
+ if (retcode)
+ goto out;
+
+ intr_status = readl(ioaddr + IntrStatus);
+ } while (intr_status & (IntrRxDone | IntrRxEmpty));
+
+ netif_rx_complete(dev);
+ intr_status = readl(ioaddr + IntrEnable);
+ intr_status |= IntrRxDone | IntrRxEmpty;
+ writel(intr_status, ioaddr + IntrEnable);
+
+ out:
+ if (debug > 5)
+ printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode);
+
+ /* Restart Rx engine if stopped. */
+ return retcode;
+}
+#endif /* HAVE_NETDEV_POLL */
+
+
+static void refill_rx_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ int entry = -1;
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_info[entry].skb == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_info[entry].skb = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ np->rx_info[entry].mapping =
+ pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].rxaddr =
+ cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
+ }
+ if (entry == RX_RING_SIZE - 1)
+ np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
+ }
+ if (entry >= 0)
+ writew(entry, np->base + RxDescQIdx);
+}
+
+
+static void netdev_media_change(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 reg0, reg1, reg4, reg5;
+ u32 new_tx_mode;
+ u32 new_intr_timer_ctrl;
+
+ /* reset status first */
+ mdio_read(dev, np->phys[0], MII_BMCR);
+ mdio_read(dev, np->phys[0], MII_BMSR);
+
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
+ reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
+
+ if (reg1 & BMSR_LSTATUS) {
+ /* link is up */
+ if (reg0 & BMCR_ANENABLE) {
+ /* autonegotiation is enabled */
+ reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ reg5 = mdio_read(dev, np->phys[0], MII_LPA);
+ if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
+ np->speed100 = 1;
+ np->mii_if.full_duplex = 1;
+ } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
+ np->speed100 = 1;
+ np->mii_if.full_duplex = 0;
+ } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
+ np->speed100 = 0;
+ np->mii_if.full_duplex = 1;
+ } else {
+ np->speed100 = 0;
+ np->mii_if.full_duplex = 0;
+ }
+ } else {
+ /* autonegotiation is disabled */
+ if (reg0 & BMCR_SPEED100)
+ np->speed100 = 1;
+ else
+ np->speed100 = 0;
+ if (reg0 & BMCR_FULLDPLX)
+ np->mii_if.full_duplex = 1;
+ else
+ np->mii_if.full_duplex = 0;
+ }
+ netif_carrier_on(dev);
+ printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->mii_if.full_duplex ? "full" : "half");
+
+ new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
+ if (np->mii_if.full_duplex)
+ new_tx_mode |= FullDuplex;
+ if (np->tx_mode != new_tx_mode) {
+ np->tx_mode = new_tx_mode;
+ writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
+ udelay(1000);
+ writel(np->tx_mode, ioaddr + TxMode);
+ }
+
+ new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
+ if (np->speed100)
+ new_intr_timer_ctrl |= Timer10X;
+ if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
+ np->intr_timer_ctrl = new_intr_timer_ctrl;
+ writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
+ }
+ } else {
+ netif_carrier_off(dev);
+ printk(KERN_DEBUG "%s: Link is down\n", dev->name);
+ }
+}
+
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* Came close to underrunning the Tx FIFO, increase threshold. */
+ if (intr_status & IntrTxDataLow) {
+ if (np->tx_threshold <= PKT_BUF_SZ / 16) {
+ writel(++np->tx_threshold, np->base + TxThreshold);
+ printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
+ dev->name, np->tx_threshold * 16);
+ } else
+ printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
+ }
+ if (intr_status & IntrRxGFPDead) {
+ np->stats.rx_fifo_errors++;
+ np->stats.rx_errors++;
+ }
+ if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
+ np->stats.tx_fifo_errors++;
+ np->stats.tx_errors++;
+ }
+ if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
+ printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
+ dev->name, intr_status);
+}
+
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+
+ /* This adapter architecture needs no SMP locks. */
+ np->stats.tx_bytes = readl(ioaddr + 0x57010);
+ np->stats.rx_bytes = readl(ioaddr + 0x57044);
+ np->stats.tx_packets = readl(ioaddr + 0x57000);
+ np->stats.tx_aborted_errors =
+ readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
+ np->stats.tx_window_errors = readl(ioaddr + 0x57018);
+ np->stats.collisions =
+ readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
+
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
+ writew(0, ioaddr + RxDMAStatus);
+ np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
+ np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
+ np->stats.rx_length_errors = readl(ioaddr + 0x57058);
+ np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
+
+ return &np->stats;
+}
+
+
+/* Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u32 rx_mode = MinVLANPrio;
+ struct dev_mc_list *mclist;
+ int i;
+#ifdef VLAN_SUPPORT
+
+ rx_mode |= VlanMode;
+ if (np->vlgrp) {
+ int vlan_count = 0;
+ void __iomem *filter_addr = ioaddr + HashTable + 8;
+ for (i = 0; i < VLAN_VID_MASK; i++) {
+ if (np->vlgrp->vlan_devices[i]) {
+ if (vlan_count >= 32)
+ break;
+ writew(cpu_to_be16(i), filter_addr);
+ filter_addr += 16;
+ vlan_count++;
+ }
+ }
+ if (i == VLAN_VID_MASK) {
+ rx_mode |= PerfectFilterVlan;
+ while (vlan_count < 32) {
+ writew(0, filter_addr);
+ filter_addr += 16;
+ vlan_count++;
+ }
+ }
+ }
+#endif /* VLAN_SUPPORT */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ rx_mode |= AcceptAll;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
+ } else if (dev->mc_count <= 14) {
+ /* Use the 16 element perfect filter, skip first two entries. */
+ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
+ u16 *eaddrs;
+ for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
+ }
+ eaddrs = (u16 *)dev->dev_addr;
+ while (i++ < 16) {
+ writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
+ }
+ rx_mode |= AcceptBroadcast|PerfectFilter;
+ } else {
+ /* Must use a multicast hash table. */
+ void __iomem *filter_addr;
+ u16 *eaddrs;
+ u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
+ __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
+
+ *fptr |= cpu_to_le32(1 << (bit_nr & 31));
+ }
+ /* Clear the perfect filter list, skip first two entries. */
+ filter_addr = ioaddr + PerfFilterTable + 2 * 16;
+ eaddrs = (u16 *)dev->dev_addr;
+ for (i = 2; i < 16; i++) {
+ writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
+ }
+ for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
+ writew(mc_filter[i], filter_addr);
+ rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
+ }
+ writel(rx_mode, ioaddr + RxFilterMode);
+}
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, PCI_SLOT_NAME(np->pci_dev));
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_sset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ check_duplex(dev);
+ return res;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ debug = val;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = get_drvinfo,
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
+ check_duplex(dev);
+
+ return rc;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int i;
+
+ netif_stop_queue(dev);
+ netif_stop_if(dev);
+
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
+ dev->name, (int) readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(0, ioaddr + GenCtrl);
+ readl(ioaddr + GenCtrl);
+
+ if (debug > 5) {
+ printk(KERN_DEBUG" Tx ring at %#llx:\n",
+ (long long) np->tx_ring_dma);
+ for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
+ printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
+ i, le32_to_cpu(np->tx_ring[i].status),
+ (long long) dma_to_cpu(np->tx_ring[i].addr),
+ le32_to_cpu(np->tx_done_q[i].status));
+ printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
+ (long long) np->rx_ring_dma, np->rx_done_q);
+ if (np->rx_done_q)
+ for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
+ printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
+ i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
+ }
+ }
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
+ if (np->rx_info[i].skb != NULL) {
+ pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_info[i].skb);
+ }
+ np->rx_info[i].skb = NULL;
+ np->rx_info[i].mapping = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->tx_info[i].skb;
+ if (skb == NULL)
+ continue;
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[i].mapping,
+ skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ np->tx_info[i].mapping = 0;
+ dev_kfree_skb(skb);
+ np->tx_info[i].skb = NULL;
+ }
+
+ return 0;
+}
+
+
+static void __devexit starfire_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (!dev)
+ BUG();
+
+ unregister_netdev(dev);
+
+ if (np->queue_mem)
+ pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
+
+
+ /* XXX: add wakeup code -- requires firmware for MagicPacket */
+ pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
+ pci_disable_device(pdev);
+
+ iounmap(np->base);
+ pci_release_regions(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev); /* Will also free np!! */
+}
+
+
+static struct pci_driver starfire_driver = {
+ .name = DRV_NAME,
+ .probe = starfire_init_one,
+ .remove = __devexit_p(starfire_remove_one),
+ .id_table = starfire_pci_tbl,
+};
+
+
+static int __init starfire_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+#ifndef ADDR_64BITS
+ /* we can do this test only at run-time... sigh */
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ printk("This driver has not been ported to this 64-bit architecture yet\n");
+ return -ENODEV;
+ }
+#endif /* not ADDR_64BITS */
+#ifndef HAS_FIRMWARE
+ /* unconditionally disable hw cksums if firmware is not present */
+ enable_hw_cksum = 0;
+#endif /* not HAS_FIRMWARE */
+ return pci_module_init (&starfire_driver);
+}
+
+
+static void __exit starfire_cleanup (void)
+{
+ pci_unregister_driver (&starfire_driver);
+}
+
+
+module_init(starfire_init);
+module_exit(starfire_cleanup);
+
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/starfire_firmware.pl b/drivers/net/starfire_firmware.pl
new file mode 100644
index 000000000000..0c82b80e1074
--- /dev/null
+++ b/drivers/net/starfire_firmware.pl
@@ -0,0 +1,31 @@
+#!/usr/bin/perl
+
+# This script can be used to generate a new starfire_firmware.h
+# from GFP_RX.DAT and GFP_TX.DAT, files included with the DDK
+# and also with the Novell drivers.
+
+open FW, "GFP_RX.DAT" || die;
+open FWH, ">starfire_firmware.h" || die;
+
+printf(FWH "static u32 firmware_rx[] = {\n");
+$counter = 0;
+while ($foo = <FW>) {
+ chomp;
+ printf(FWH " 0x%s, 0x0000%s,\n", substr($foo, 4, 8), substr($foo, 0, 4));
+ $counter++;
+}
+
+close FW;
+open FW, "GFP_TX.DAT" || die;
+
+printf(FWH "};\t/* %d Rx instructions */\n#define FIRMWARE_RX_SIZE %d\n\nstatic u32 firmware_tx[] = {\n", $counter, $counter);
+$counter = 0;
+while ($foo = <FW>) {
+ chomp;
+ printf(FWH " 0x%s, 0x0000%s,\n", substr($foo, 4, 8), substr($foo, 0, 4));
+ $counter++;
+}
+
+close FW;
+printf(FWH "};\t/* %d Tx instructions */\n#define FIRMWARE_TX_SIZE %d\n", $counter, $counter);
+close(FWH);
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
new file mode 100644
index 000000000000..b6dfdf8f44da
--- /dev/null
+++ b/drivers/net/stnic.c
@@ -0,0 +1,320 @@
+/* stnic.c : A SH7750 specific part of driver for NS DP83902A ST-NIC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 kaz Kojima
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/se/se.h>
+#include <asm/machvec.h>
+#ifdef CONFIG_SH_STANDARD_BIOS
+#include <asm/sh_bios.h>
+#endif
+
+#include "8390.h"
+
+#define DRV_NAME "stnic"
+
+#define byte unsigned char
+#define half unsigned short
+#define word unsigned int
+#define vbyte volatile unsigned char
+#define vhalf volatile unsigned short
+#define vword volatile unsigned int
+
+#define STNIC_RUN 0x01 /* 1 == Run, 0 == reset. */
+
+#define START_PG 0 /* First page of TX buffer */
+#define STOP_PG 128 /* Last page +1 of RX ring */
+
+/* Alias */
+#define STNIC_CR E8390_CMD
+#define PG0_RSAR0 EN0_RSARLO
+#define PG0_RSAR1 EN0_RSARHI
+#define PG0_RBCR0 EN0_RCNTLO
+#define PG0_RBCR1 EN0_RCNTHI
+
+#define CR_RRD E8390_RREAD
+#define CR_RWR E8390_RWRITE
+#define CR_PG0 E8390_PAGE0
+#define CR_STA E8390_START
+#define CR_RDMA E8390_NODMA
+
+/* FIXME! YOU MUST SET YOUR OWN ETHER ADDRESS. */
+static byte stnic_eadr[6] =
+{0x00, 0xc0, 0x6e, 0x00, 0x00, 0x07};
+
+static struct net_device *stnic_dev;
+
+static int stnic_open (struct net_device *dev);
+static int stnic_close (struct net_device *dev);
+static void stnic_reset (struct net_device *dev);
+static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void stnic_block_input (struct net_device *dev, int count,
+ struct sk_buff *skb , int ring_offset);
+static void stnic_block_output (struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+
+static void stnic_init (struct net_device *dev);
+
+/* SH7750 specific read/write io. */
+static inline void
+STNIC_DELAY (void)
+{
+ vword trash;
+ trash = *(vword *) 0xa0000000;
+ trash = *(vword *) 0xa0000000;
+ trash = *(vword *) 0xa0000000;
+}
+
+static inline byte
+STNIC_READ (int reg)
+{
+ byte val;
+
+ val = (*(vhalf *) (PA_83902 + ((reg) << 1)) >> 8) & 0xff;
+ STNIC_DELAY ();
+ return val;
+}
+
+static inline void
+STNIC_WRITE (int reg, byte val)
+{
+ *(vhalf *) (PA_83902 + ((reg) << 1)) = ((half) (val) << 8);
+ STNIC_DELAY ();
+}
+
+static int __init stnic_probe(void)
+{
+ struct net_device *dev;
+ int i, err;
+
+ /* If we are not running on a SolutionEngine, give up now */
+ if (! MACH_SE)
+ return -ENODEV;
+
+ /* New style probing API */
+ dev = alloc_ei_netdev();
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+
+#ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_get_node_addr (stnic_eadr);
+#endif
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = stnic_eadr[i];
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = 0x1000;
+ dev->irq = IRQ_STNIC;
+ dev->open = &stnic_open;
+ dev->stop = &stnic_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (err) {
+ printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+ return err;
+ }
+
+ ei_status.name = dev->name;
+ ei_status.word16 = 1;
+#ifdef __LITTLE_ENDIAN__
+ ei_status.bigendian = 0;
+#else
+ ei_status.bigendian = 1;
+#endif
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = STOP_PG;
+
+ ei_status.reset_8390 = &stnic_reset;
+ ei_status.get_8390_hdr = &stnic_get_hdr;
+ ei_status.block_input = &stnic_block_input;
+ ei_status.block_output = &stnic_block_output;
+
+ stnic_init (dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ return err;
+ }
+ stnic_dev = dev;
+
+ printk (KERN_INFO "NS ST-NIC 83902A\n");
+
+ return 0;
+}
+
+static int
+stnic_open (struct net_device *dev)
+{
+#if 0
+ printk (KERN_DEBUG "stnic open\n");
+#endif
+ ei_open (dev);
+ return 0;
+}
+
+static int
+stnic_close (struct net_device *dev)
+{
+ ei_close (dev);
+ return 0;
+}
+
+static void
+stnic_reset (struct net_device *dev)
+{
+ *(vhalf *) PA_83902_RST = 0;
+ udelay (5);
+ if (ei_debug > 1)
+ printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies);
+ *(vhalf *) PA_83902_RST = ~0;
+ udelay (5);
+}
+
+static void
+stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ half buf[2];
+
+ STNIC_WRITE (PG0_RSAR0, 0);
+ STNIC_WRITE (PG0_RSAR1, ring_page);
+ STNIC_WRITE (PG0_RBCR0, 4);
+ STNIC_WRITE (PG0_RBCR1, 0);
+ STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA);
+
+ buf[0] = *(vhalf *) PA_83902_IF;
+ STNIC_DELAY ();
+ buf[1] = *(vhalf *) PA_83902_IF;
+ STNIC_DELAY ();
+ hdr->next = buf[0] >> 8;
+ hdr->status = buf[0] & 0xff;
+#ifdef __LITTLE_ENDIAN__
+ hdr->count = buf[1];
+#else
+ hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8);
+#endif
+
+ if (ei_debug > 1)
+ printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n",
+ ring_page, hdr->status, hdr->next, hdr->count);
+
+ STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you are
+ porting to a new ethercard look at the packet driver source for hints.
+ The HP LAN doesn't use shared memory -- we put the packet
+ out through the "remote DMA" dataport. */
+
+static void
+stnic_block_input (struct net_device *dev, int length, struct sk_buff *skb,
+ int offset)
+{
+ char *buf = skb->data;
+ half val;
+
+ STNIC_WRITE (PG0_RSAR0, offset & 0xff);
+ STNIC_WRITE (PG0_RSAR1, offset >> 8);
+ STNIC_WRITE (PG0_RBCR0, length & 0xff);
+ STNIC_WRITE (PG0_RBCR1, length >> 8);
+ STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA);
+
+ if (length & 1)
+ length++;
+
+ while (length > 0)
+ {
+ val = *(vhalf *) PA_83902_IF;
+#ifdef __LITTLE_ENDIAN__
+ *buf++ = val & 0xff;
+ *buf++ = val >> 8;
+#else
+ *buf++ = val >> 8;
+ *buf++ = val & 0xff;
+#endif
+ STNIC_DELAY ();
+ length -= sizeof (half);
+ }
+
+ STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
+}
+
+static void
+stnic_block_output (struct net_device *dev, int length,
+ const unsigned char *buf, int output_page)
+{
+ STNIC_WRITE (PG0_RBCR0, 1); /* Write non-zero value */
+ STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA);
+ STNIC_DELAY ();
+
+ STNIC_WRITE (PG0_RBCR0, length & 0xff);
+ STNIC_WRITE (PG0_RBCR1, length >> 8);
+ STNIC_WRITE (PG0_RSAR0, 0);
+ STNIC_WRITE (PG0_RSAR1, output_page);
+ STNIC_WRITE (STNIC_CR, CR_RWR | CR_PG0 | CR_STA);
+
+ if (length & 1)
+ length++;
+
+ while (length > 0)
+ {
+#ifdef __LITTLE_ENDIAN__
+ *(vhalf *) PA_83902_IF = ((half) buf[1] << 8) | buf[0];
+#else
+ *(vhalf *) PA_83902_IF = ((half) buf[0] << 8) | buf[1];
+#endif
+ STNIC_DELAY ();
+ buf += sizeof (half);
+ length -= sizeof (half);
+ }
+
+ STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
+}
+
+/* This function resets the STNIC if something screws up. */
+static void
+stnic_init (struct net_device *dev)
+{
+ stnic_reset (dev);
+ NS8390_init (dev, 0);
+ return;
+}
+
+static void __exit stnic_cleanup(void)
+{
+ unregister_netdev(stnic_dev);
+ free_irq(stnic_dev->irq, stnic_dev);
+ free_netdev(stnic_dev);
+}
+
+module_init(stnic_probe);
+module_exit(stnic_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
new file mode 100644
index 000000000000..d5a58fb30d3a
--- /dev/null
+++ b/drivers/net/sun3_82586.c
@@ -0,0 +1,1211 @@
+/*
+ * Sun3 i82586 Ethernet driver
+ *
+ * Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net)
+ *
+ * Original copyright follows:
+ * --------------------------
+ *
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
+ * --------------------------
+ *
+ * Consult ni52.c for further notes from the original driver.
+ *
+ * This incarnation currently supports the OBIO version of the i82586 chip
+ * used in certain sun3 models. It should be fairly doable to expand this
+ * to support VME if I should every acquire such a board.
+ *
+ */
+
+static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */
+static int automatic_resume = 0; /* experimental .. better should be zero */
+static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
+static int fifo=0x8; /* don't change */
+
+/* #define REALLY_SLOW_IO */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/sun3mmu.h>
+#include <asm/dvma.h>
+#include <asm/byteorder.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "sun3_82586.h"
+
+#define DRV_NAME "sun3_82586"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 0 /* 16 Bit */
+#define SUN3_82586_TOTAL_SIZE PAGE_SIZE
+
+#define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;}
+#define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;}
+#define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;}
+#define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;}
+#define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);}
+
+#define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) )
+#define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base))
+#define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change these values: */
+
+#define RECV_BUFF_SIZE 1536 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1536 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for 32K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */
+#define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+/* different DELAYs */
+#define DELAY(x) mdelay(32 * x);
+#define DELAY_16(); { udelay(16); }
+#define DELAY_18(); { udelay(4); }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() \
+{ int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_cuc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
+ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
+
+#define WAIT_4_SCB_CMD_RUC() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_ruc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
+ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
+
+#define WAIT_4_STAT_COMPL(addr) { int i; \
+ for(i=0;i<32767;i++) { \
+ if(swab16((addr)->cmd_status) & STAT_COMPL) break; \
+ DELAY_16(); DELAY_16(); } }
+
+static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
+static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr);
+static int sun3_82586_open(struct net_device *dev);
+static int sun3_82586_close(struct net_device *dev);
+static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
+static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void sun3_82586_timeout(struct net_device *dev);
+#if 0
+static void sun3_82586_dump(struct net_device *,void *);
+#endif
+
+/* helper-functions */
+static int init586(struct net_device *dev);
+static int check586(struct net_device *dev,char *where,unsigned size);
+static void alloc586(struct net_device *dev);
+static void startrecv586(struct net_device *dev);
+static void *alloc_rfa(struct net_device *dev,void *ptr);
+static void sun3_82586_rcv_int(struct net_device *dev);
+static void sun3_82586_xmt_int(struct net_device *dev);
+static void sun3_82586_rnr_int(struct net_device *dev);
+
+struct priv
+{
+ struct net_device_stats stats;
+ unsigned long base;
+ char *memtop;
+ long int lock;
+ int reseted;
+ volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point,num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count,xmit_last;
+};
+
+/**********************************************
+ * close device
+ */
+static int sun3_82586_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+
+ sun3_reset586(); /* the hard way to stop the receiver */
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+static int sun3_82586_open(struct net_device *dev)
+{
+ int ret;
+
+ sun3_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ sun3_enaint();
+
+ ret = request_irq(dev->irq, &sun3_82586_interrupt,0,dev->name,dev);
+ if (ret)
+ {
+ sun3_reset586();
+ return ret;
+ }
+
+ netif_start_queue(dev);
+
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+static int check586(struct net_device *dev,char *where,unsigned size)
+{
+ struct priv pb;
+ struct priv *p = /* (struct priv *) dev->priv*/ &pb;
+ char *iscp_addr;
+ int i;
+
+ p->base = (unsigned long) dvma_btov(0);
+ p->memtop = (char *)dvma_btov((unsigned long)where);
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *)p->scp,0, sizeof(struct scp_struct));
+ for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */
+ if(((char *)p->scp)[i])
+ return 0;
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+ if(p->scp->sysbus != SYSBUSVAL)
+ return 0;
+
+ iscp_addr = (char *)dvma_btov((unsigned long)where);
+
+ p->iscp = (struct iscp_struct *) iscp_addr;
+ memset((char *)p->iscp,0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ sun3_reset586();
+ sun3_attn586();
+ DELAY(1); /* wait a while... */
+
+ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
+ return 0;
+
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by sun3_82586_probe1 and open586.
+ */
+static void alloc586(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ sun3_reset586();
+ DELAY(1);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start);
+ p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp,0,sizeof(struct iscp_struct));
+ memset((char *) p->scp ,0,sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+ p->iscp->scb_base = make24(dvma_btov(dev->mem_start));
+
+ p->iscp->busy = 1;
+ sun3_reset586();
+ sun3_attn586();
+
+ DELAY(1);
+
+ if(p->iscp->busy)
+ printk("%s: Init-Problems (alloc).\n",dev->name);
+
+ p->reseted = 0;
+
+ memset((char *)p->scb,0,sizeof(struct scb_struct));
+}
+
+struct net_device * __init sun3_82586_probe(int unit)
+{
+ struct net_device *dev;
+ unsigned long ioaddr;
+ static int found = 0;
+ int err = -ENOMEM;
+
+ /* check that this machine has an onboard 82586 */
+ switch(idprom->id_machtype) {
+ case SM_SUN3|SM_3_160:
+ case SM_SUN3|SM_3_260:
+ /* these machines have 82586 */
+ break;
+
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (found)
+ return ERR_PTR(-ENODEV);
+
+ ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE);
+ if (!ioaddr)
+ return ERR_PTR(-ENOMEM);
+ found = 1;
+
+ dev = alloc_etherdev(sizeof(struct priv));
+ if (!dev)
+ goto out;
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+ SET_MODULE_OWNER(dev);
+
+ dev->irq = IE_IRQ;
+ dev->base_addr = ioaddr;
+ err = sun3_82586_probe1(dev, ioaddr);
+ if (err)
+ goto out1;
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return dev;
+
+out2:
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+out1:
+ free_netdev(dev);
+out:
+ iounmap((void *)ioaddr);
+ return ERR_PTR(err);
+}
+
+static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
+{
+ int i, size, retval;
+
+ if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
+ return -EBUSY;
+
+ /* copy in the ethernet address from the prom */
+ for(i = 0; i < 6 ; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
+
+ /*
+ * check (or search) IO-Memory, 32K
+ */
+ size = 0x8000;
+
+ dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000);
+ dev->mem_end = dev->mem_start + size;
+
+ if(size != 0x2000 && size != 0x4000 && size != 0x8000) {
+ printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size);
+ retval = -ENODEV;
+ goto out;
+ }
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size);
+ retval = -ENODEV;
+ goto out;
+ }
+
+ ((struct priv *) (dev->priv))->memtop = (char *)dvma_btov(dev->mem_start);
+ ((struct priv *) (dev->priv))->base = (unsigned long) dvma_btov(0);
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if(size == 0x2000)
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8;
+ else if(size == 0x4000)
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16;
+ else
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_32;
+
+ printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq);
+
+ dev->open = sun3_82586_open;
+ dev->stop = sun3_82586_close;
+ dev->get_stats = sun3_82586_get_stats;
+ dev->tx_timeout = sun3_82586_timeout;
+ dev->watchdog_timeo = HZ/20;
+ dev->hard_start_xmit = sun3_82586_send_packet;
+ dev->set_multicast_list = set_multicast_list;
+
+ dev->if_port = 0;
+ return 0;
+out:
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+ return retval;
+}
+
+
+static int init586(struct net_device *dev)
+{
+ void *ptr;
+ int i,result=0;
+ struct priv *p = (struct priv *) dev->priv;
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct dev_mc_list *dmi=dev->mc_list;
+ int num_addrs=dev->mc_count;
+
+ ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST);
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if(dev->flags & IFF_ALLMULTI) {
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if(num_addrs > len) {
+ printk("%s: switching to promisc. mode\n",dev->name);
+ dev->flags|=IFF_PROMISC;
+ }
+ }
+ if(dev->flags&IFF_PROMISC)
+ {
+ cfg_cmd->promisc=1;
+ dev->flags|=IFF_PROMISC;
+ }
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+ p->scb->cmd_ruc = 0;
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(cfg_cmd);
+
+ if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
+ {
+ printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+
+ ias_cmd = (struct iasetup_cmd_struct *)ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(ias_cmd);
+
+ if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
+ printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+
+ tdr_cmd = (struct tdr_cmd_struct *)ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST);
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(tdr_cmd);
+
+ if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL))
+ {
+ printk("%s: Problems while running the TDR.\n",dev->name);
+ }
+ else
+ {
+ DELAY_16(); /* wait for result */
+ result = swab16(tdr_cmd->status);
+
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ sun3_attn586(); /* ack the interrupts */
+
+ if(result & TDR_LNK_OK)
+ ;
+ else if(result & TDR_XCVR_PRB)
+ printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name);
+ else if(result & TDR_ET_OPN)
+ printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ else if(result & TDR_ET_SRT)
+ {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ }
+ else
+ printk("%s: TDR: Unknown status %04x\n",dev->name,result);
+ }
+
+ /*
+ * Multicast setup
+ */
+ if(num_addrs && !(dev->flags & IFF_PROMISC) )
+ {
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST);
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = swab16(num_addrs * 6);
+
+ for(i=0;i<num_addrs;i++,dmi=dmi->next)
+ memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(mc_cmd);
+
+ if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't apply multicast-address-list.\n",dev->name);
+ }
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for(i=0;i<2;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#else
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if((void *)ptr > (void *)dev->mem_end)
+ {
+ printk("%s: not enough shared-mem for your configuration!\n",dev->name);
+ return 1;
+ }
+ memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
+ memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
+ p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL);
+ p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT);
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter'
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
+ p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT);
+#endif
+
+ /*
+ * ack. interrupts
+ */
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ sun3_attn586();
+ DELAY_16();
+
+ sun3_enaint();
+ sun3_active();
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for sun3_82586_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct net_device *dev,void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
+ p->rfd_first = rfd;
+
+ for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
+ rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
+ rfd[i].rbd_offset = 0xffff;
+ }
+ rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
+
+ for(i=0;i<p->num_recv_buffs;i++)
+ {
+ rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
+ rbd[i].size = swab16(RECV_BUFF_SIZE);
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr)
+{
+ struct net_device *dev = dev_id;
+ unsigned short stat;
+ int cnt=0;
+ struct priv *p;
+
+ if (!dev) {
+ printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq);
+ return IRQ_NONE;
+ }
+ p = (struct priv *) dev->priv;
+
+ if(debuglevel > 1)
+ printk("I");
+
+ WAIT_4_SCB_CMD(); /* wait for last command */
+
+ while((stat=p->scb->cus & STAT_MASK))
+ {
+ p->scb->cmd_cuc = stat;
+ sun3_attn586();
+
+ if(stat & STAT_FR) /* received a frame */
+ sun3_82586_rcv_int(dev);
+
+ if(stat & STAT_RNR) /* RU went 'not ready' */
+ {
+ printk("(R)");
+ if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+ else
+ {
+ printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
+ sun3_82586_rnr_int(dev);
+ }
+ }
+
+ if(stat & STAT_CX) /* command with I-bit set complete */
+ sun3_82586_xmt_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if(stat & STAT_CNA) /* CU went 'not ready' */
+ {
+ if(netif_running(dev))
+ printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
+ }
+#endif
+
+ if(debuglevel > 1)
+ printk("%d",cnt++);
+
+ WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */
+ if(p->scb->cmd_cuc) /* timed out? */
+ {
+ printk("%s: Acknowledge timed out.\n",dev->name);
+ sun3_disint();
+ break;
+ }
+ }
+
+ if(debuglevel > 1)
+ printk("i");
+ return IRQ_HANDLED;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void sun3_82586_rcv_int(struct net_device *dev)
+{
+ int status,cnt=0;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("R");
+
+ for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
+ {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if(status & RFD_OK) /* frame received without error? */
+ {
+ if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */
+ {
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ if(skb != NULL)
+ {
+ skb->dev = dev;
+ skb_reserve(skb,2);
+ skb_put(skb,totlen);
+ eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ p->stats.rx_packets++;
+ }
+ else
+ p->stats.rx_dropped++;
+ }
+ else
+ {
+ int rstat;
+ /* free all RBD's until RBD_LAST is set */
+ totlen = 0;
+ while(!((rstat=swab16(rbd->status)) & RBD_LAST))
+ {
+ totlen += rstat & RBD_MASK;
+ if(!rstat)
+ {
+ printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
+ break;
+ }
+ rbd->status = 0;
+ rbd = (struct rbd_struct *) make32(rbd->next);
+ }
+ totlen += rstat & RBD_MASK;
+ rbd->status = 0;
+ printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
+ p->stats.rx_dropped++;
+ }
+ }
+ else /* frame !(ok), only with 'save-bad-frames' */
+ {
+ printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
+ p->stats.rx_errors++;
+ }
+ p->rfd_top->stat_high = 0;
+ p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
+ p->rfd_top->rbd_offset = 0xffff;
+ p->rfd_last->last = 0; /* delete RFD_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ p->scb->rfa_offset = make16(p->rfd_top);
+
+ if(debuglevel > 0)
+ printk("%d",cnt++);
+ }
+
+ if(automatic_resume)
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+
+#ifdef WAIT_4_BUSY
+ {
+ int i;
+ for(i=0;i<1024;i++)
+ {
+ if(p->rfd_top->status)
+ break;
+ DELAY_16();
+ if(i == 1023)
+ printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
+ }
+ }
+#endif
+
+#if 0
+ if(!at_least_one)
+ {
+ int i;
+ volatile struct rfd_struct *rfds=p->rfd_top;
+ volatile struct rbd_struct *rbds;
+ printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
+ for(i=0;i< (p->num_recv_buffs+4);i++)
+ {
+ rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
+ printk("%04x:%04x ",rfds->status,rbds->status);
+ rfds = (struct rfd_struct *) make32(rfds->next);
+ }
+ printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
+ printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
+ }
+ old_at_least = at_least_one;
+#endif
+
+ if(debuglevel > 0)
+ printk("r");
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void sun3_82586_rnr_int(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
+ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */
+
+ alloc_rfa(dev,(char *)p->rfd_first);
+/* maybe add a check here, before restarting the RU */
+ startrecv586(dev); /* restart RU */
+
+ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void sun3_82586_xmt_int(struct net_device *dev)
+{
+ int status;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("X");
+
+ status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status);
+ if(!(status & STAT_COMPL))
+ printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
+
+ if(status & STAT_OK)
+ {
+ p->stats.tx_packets++;
+ p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ }
+ else
+ {
+ p->stats.tx_errors++;
+ if(status & TCMD_LATECOLL) {
+ printk("%s: late collision detected.\n",dev->name);
+ p->stats.collisions++;
+ }
+ else if(status & TCMD_NOCARRIER) {
+ p->stats.tx_carrier_errors++;
+ printk("%s: no carrier detected.\n",dev->name);
+ }
+ else if(status & TCMD_LOSTCTS)
+ printk("%s: loss of CTS detected.\n",dev->name);
+ else if(status & TCMD_UNDERRUN) {
+ p->stats.tx_fifo_errors++;
+ printk("%s: DMA underrun detected.\n",dev->name);
+ }
+ else if(status & TCMD_MAXCOLL) {
+ printk("%s: Max. collisions exceeded.\n",dev->name);
+ p->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS > 1)
+ if( (++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+ netif_wake_queue(dev);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd_ruc = RUC_START;
+ sun3_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
+}
+
+static void sun3_82586_timeout(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+#ifndef NO_NOPCOMMANDS
+ if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
+ {
+ netif_wake_queue(dev);
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n",dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point);
+#endif
+ p->scb->cmd_cuc = CUC_ABORT;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ dev->trans_start = jiffies;
+ return 0;
+ }
+#endif
+ {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+ printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
+ printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+#endif
+ sun3_82586_close(dev);
+ sun3_82586_open(dev);
+ }
+ dev->trans_start = jiffies;
+}
+
+/******************************************************
+ * send frame
+ */
+
+static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int len,i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ return 0;
+ }
+
+ netif_stop_queue(dev);
+
+#if(NUM_XMIT_BUFFS > 1)
+ if(test_and_set_bit(0,(void *) &p->lock)) {
+ printk("%s: Queue was locked\n",dev->name);
+ return 1;
+ }
+ else
+#endif
+ {
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
+ len = ETH_ZLEN;
+ }
+ memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+
+#ifdef DEBUG
+ if(p->scb->cus & CU_ACTIVE)
+ {
+ printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
+ printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status));
+ }
+#endif
+
+ p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
+ for(i=0;i<16;i++)
+ {
+ p->xmit_cmds[0]->cmd_status = 0;
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
+ p->scb->cmd_cuc = CUC_RESUME;
+ else
+ {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ }
+
+ sun3_attn586();
+ dev->trans_start = jiffies;
+ if(!i)
+ dev_kfree_skb(skb);
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
+ break;
+ if(p->xmit_cmds[0]->cmd_status)
+ break;
+ if(i==15)
+ printk("%s: Can't start transmit-command.\n",dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ dev->trans_start = jiffies;
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb);
+# endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len);
+ if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
+ next_nop = 0;
+
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ /* linkpointer of xmit-command already points to next nop cmd */
+ p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ dev->trans_start = jiffies;
+ p->xmit_count = next_nop;
+
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+ if(p->xmit_count != p->xmit_last)
+ netif_wake_queue(dev);
+ p->lock = 0;
+ local_irq_restore(flags);
+ }
+ dev_kfree_skb(skb);
+#endif
+ }
+ return 0;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ unsigned short crc,aln,rsc,ovrn;
+
+ crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */
+ p->scb->crc_errs = 0;
+ aln = swab16(p->scb->aln_errs);
+ p->scb->aln_errs = 0;
+ rsc = swab16(p->scb->rsc_errs);
+ p->scb->rsc_errs = 0;
+ ovrn = swab16(p->scb->ovrn_errs);
+ p->scb->ovrn_errs = 0;
+
+ p->stats.rx_crc_errors += crc;
+ p->stats.rx_fifo_errors += ovrn;
+ p->stats.rx_frame_errors += aln;
+ p->stats.rx_dropped += rsc;
+
+ return &p->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ sun3_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ sun3_enaint();
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+#error This code is not currently supported as a module
+static struct net_device *dev_sun3_82586;
+
+int init_module(void)
+{
+ dev_sun3_82586 = sun3_82586_probe(-1);
+ if (IS_ERR(dev_sun3_82586))
+ return PTR_ERR(dev_sun3_82586);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unsigned long ioaddr = dev_sun3_82586->base_addr;
+ unregister_netdev(dev_sun3_82586);
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+ iounmap((void *)ioaddr);
+ free_netdev(dev_sun3_82586);
+}
+#endif /* MODULE */
+
+#if 0
+/*
+ * DUMP .. we expect a not running CMD unit and enough space
+ */
+void sun3_82586_dump(struct net_device *dev,void *ptr)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
+ int i;
+
+ p->scb->cmd_cuc = CUC_ABORT;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+
+ dump_cmd->cmd_status = 0;
+ dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
+ dump_cmd->dump_offset = make16((dump_cmd + 1));
+ dump_cmd->cmd_link = 0xffff;
+
+ p->scb->cbl_offset = make16(dump_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_STAT_COMPL(dump_cmd);
+
+ if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't get dump information.\n",dev->name);
+
+ for(i=0;i<170;i++) {
+ printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
+ if(i % 24 == 23)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sun3_82586.h b/drivers/net/sun3_82586.h
new file mode 100644
index 000000000000..81cfb098bcca
--- /dev/null
+++ b/drivers/net/sun3_82586.h
@@ -0,0 +1,318 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+/*
+ * Cloned from ni52.h, copyright as above.
+ *
+ * Modified for Sun3 OBIO i82586 by Sam Creasey (sammy@sammy.net)
+ */
+
+
+/* defines for the obio chip (not vme) */
+#define IEOB_NORSET 0x80 /* don't reset the board */
+#define IEOB_ONAIR 0x40 /* put us on the air */
+#define IEOB_ATTEN 0x20 /* attention! */
+#define IEOB_IENAB 0x10 /* interrupt enable */
+#define IEOB_XXXXX 0x08 /* free bit */
+#define IEOB_XCVRL2 0x04 /* level 2 transceiver? */
+#define IEOB_BUSERR 0x02 /* bus error */
+#define IEOB_INT 0x01 /* interrupt */
+
+/* where the obio one lives */
+#define IE_OBIO 0xc0000
+#define IE_IRQ 3
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* has to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned char rus;
+ unsigned char cus;
+ unsigned char cmd_ruc; /* command word: RU part */
+ unsigned char cmd_cuc; /* command word: CU part & ACK */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* allignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x07 /* mask for CU command */
+#define CUC_NOP 0x00 /* NOP-command */
+#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x02 /* resume after suspend */
+#define CUC_SUSPEND 0x03 /* Suspend CU */
+#define CUC_ABORT 0x04 /* abort command operation immediately */
+
+#define ACK_MASK 0xf0 /* mask for ACK command */
+#define ACK_CX 0x80 /* acknowledges STAT_CX */
+#define ACK_FR 0x40 /* ack. STAT_FR */
+#define ACK_CNA 0x20 /* ack. STAT_CNA */
+#define ACK_RNR 0x10 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf0 /* mask for cause of interrupt */
+#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x40 /* RU finished receiving a frame */
+#define STAT_CNA 0x20 /* CU left active state */
+#define STAT_RNR 0x10 /* RU left ready state */
+
+#define CU_STATUS 0x7 /* CU status, 0=idle */
+#define CU_SUSPEND 0x1 /* CU is suspended */
+#define CU_ACTIVE 0x2 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned char stat_low; /* status word */
+ unsigned char stat_high; /* status word */
+ unsigned char rfd_sf; /* 82596 mode only */
+ unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x80 /* last: last rfd in the list */
+#define RFD_SUSP 0x40 /* last: suspend RU after */
+#define RFD_COMPL 0x80
+#define RFD_OK 0x20
+#define RFD_BUSY 0x40
+#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
+#define RFD_ERR_CRC 0x08 /* CRC error */
+#define RFD_ERR_ALGN 0x04 /* Alignment error */
+#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
+#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
+
+#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
+#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
+#define RFD_COLLDET 0x0001 /* Detected collision during reception */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * DUMP command
+ */
+struct dump_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short dump_offset; /* pointeroffset to DUMP space */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
new file mode 100644
index 000000000000..1f43bbfbc1c7
--- /dev/null
+++ b/drivers/net/sun3lance.c
@@ -0,0 +1,965 @@
+/* sun3lance.c: Ethernet driver for SUN3 Lance chip */
+/*
+
+ Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net).
+ This driver is a part of the linux kernel, and is thus distributed
+ under the GNU General Public License.
+
+ The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically
+ true for the correct IRQ and address of the lance registers. They
+ have not been widely tested, however. What we probably need is a
+ "proper" way to search for a device in the sun3's prom, but, alas,
+ linux has no such thing.
+
+ This driver is largely based on atarilance.c, by Roman Hodek. Other
+ sources of inspiration were the NetBSD sun3 am7990 driver, and the
+ linux sparc lance driver (sunlance.c).
+
+ There are more assumptions made throughout this driver, it almost
+ certainly still needs work, but it does work at least for RARP/BOOTP and
+ mounting the root NFS filesystem.
+
+*/
+
+static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/dvma.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+
+#ifdef CONFIG_SUN3
+#include <asm/sun3mmu.h>
+#else
+#include <asm/sun3xprom.h>
+#endif
+
+/* sun3/60 addr/irq for the lance chip. If your sun is different,
+ change this. */
+#define LANCE_OBIO 0x120000
+#define LANCE_IRQ IRQ3
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 0
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+MODULE_PARM(lance_debug, "i");
+MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+
+/* we're only using 32k of memory, so we use 4 TX
+ buffers and 16 RX buffers. These values are expressed as log2. */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base)
+
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned int filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ unsigned short rdra;
+ unsigned short rlen;
+ unsigned short tdra;
+ unsigned short tlen;
+ unsigned short pad[4]; /* is thie needed? */
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char rx_data[RX_RING_SIZE][PKT_BUF_SZ];
+ char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ volatile unsigned short *iobase;
+ struct lance_memory *mem;
+ int new_rx, new_tx; /* The next free ring entry */
+ int old_tx, old_rx; /* ring entry to be processed */
+ struct net_device_stats stats;
+/* These two must be longs for set_bit() */
+ long tx_full;
+ long lock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG lp->iobase[0]
+#define AREG lp->iobase[1]
+#define REGA(a) ( AREG = (a), DREG )
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+/***************************** Prototypes *****************************/
+
+static int lance_probe( struct net_device *dev);
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static irqreturn_t lance_interrupt( int irq, void *dev_id, struct pt_regs *fp );
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static struct net_device_stats *lance_get_stats( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+
+/************************* End of Prototypes **************************/
+
+struct net_device * __init sun3lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int found;
+ int err = -ENODEV;
+
+ /* check that this machine has an onboard lance */
+ switch(idprom->id_machtype) {
+ case SM_SUN3|SM_3_50:
+ case SM_SUN3|SM_3_60:
+ case SM_SUN3X|SM_3_80:
+ /* these machines have lance */
+ break;
+
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (found)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+ SET_MODULE_OWNER(dev);
+
+ if (!lance_probe(dev))
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ found = 1;
+ return dev;
+
+out1:
+#ifdef CONFIG_SUN3
+ iounmap((void *)dev->base_addr);
+#endif
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int __init lance_probe( struct net_device *dev)
+{
+ unsigned long ioaddr;
+
+ struct lance_private *lp;
+ int i;
+ static int did_version;
+ volatile unsigned short *ioaddr_probe;
+ unsigned short tmp1, tmp2;
+
+#ifdef CONFIG_SUN3
+ ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
+ if (!ioaddr)
+ return 0;
+#else
+ ioaddr = SUN3X_LANCE;
+#endif
+
+ /* test to see if there's really a lance here */
+ /* (CSRO_INIT shouldn't be readable) */
+
+ ioaddr_probe = (volatile unsigned short *)ioaddr;
+ tmp1 = ioaddr_probe[0];
+ tmp2 = ioaddr_probe[1];
+
+ ioaddr_probe[1] = CSR0;
+ ioaddr_probe[0] = CSR0_INIT | CSR0_STOP;
+
+ if(ioaddr_probe[0] != CSR0_STOP) {
+ ioaddr_probe[0] = tmp1;
+ ioaddr_probe[1] = tmp2;
+
+#ifdef CONFIG_SUN3
+ iounmap((void *)ioaddr);
+#endif
+ return 0;
+ }
+
+ lp = netdev_priv(dev);
+
+ /* XXX - leak? */
+ MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
+
+ lp->iobase = (volatile unsigned short *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+
+ REGA(CSR0) = CSR0_STOP;
+
+ request_irq(LANCE_IRQ, lance_interrupt, SA_INTERRUPT, "SUN3 Lance", dev);
+ dev->irq = (unsigned short)LANCE_IRQ;
+
+
+ printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ",
+ dev->name,
+ (unsigned long)ioaddr,
+ (unsigned long)MEM,
+ dev->irq);
+
+ /* copy in the ethernet address from the prom */
+ for(i = 0; i < 6 ; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ for( i = 0; i < 6; ++i )
+ printk( "%02x%s", dev->dev_addr[i], (i < 5) ? ":" : "\n" );
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+ DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n",
+ dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head),
+ (dvma_vtob(MEM->tx_head))));
+
+ if (did_version++ == 0)
+ printk( version );
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->open = &lance_open;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->stop = &lance_close;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->set_mac_address = 0;
+// KLUDGE -- REMOVE ME
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+
+
+ memset( &lp->stats, 0, sizeof(lp->stats) );
+
+ return 1;
+}
+
+static int lance_open( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ REGA(CSR0) = CSR0_STOP;
+
+ lance_init_ring(dev);
+
+ /* From now on, AREG is kept to point to CSR0 */
+ REGA(CSR0) = CSR0_INIT;
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i < 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return( -EIO );
+ }
+
+ DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
+
+ netif_start_queue(dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return( 0 );
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ lp->lock = 0;
+ lp->tx_full = 0;
+ lp->new_rx = lp->new_tx = 0;
+ lp->old_rx = lp->old_tx = 0;
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]);
+ MEM->tx_head[i].flag = 0;
+ MEM->tx_head[i].base_hi =
+ (dvma_vtob(MEM->tx_data[i])) >>16;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]);
+ MEM->rx_head[i].flag = RMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi =
+ (dvma_vtob(MEM->rx_data[i])) >> 16;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000;
+ MEM->rx_head[i].msg_length = 0;
+ }
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+
+ /* tell the lance the address of its init block */
+ REGA(CSR1) = dvma_vtob(&(MEM->init));
+ REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16;
+
+#ifdef CONFIG_SUN3X
+ REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON;
+#else
+ REGA(CSR3) = CSR3_BSWP;
+#endif
+
+}
+
+
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 1, ( "%s: transmit start.\n",
+ dev->name));
+
+ /* Transmitter timeout, serious problems. */
+ if (netif_queue_stopped(dev)) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 20)
+ return( 1 );
+
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA(CSR3) = CSR3_BSWP;
+ lp->stats.tx_errors++;
+
+ if(lance_debug >= 2) {
+ int i;
+ printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n",
+ lp->old_tx, lp->new_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->new_rx );
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length);
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ printk("tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc );
+ }
+
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+
+ netif_start_queue(dev);
+ dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+
+ /* Block a timer-based transmit from overlapping with us by
+ stopping the queue for a bit... */
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
+ printk( "%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return 1;
+ }
+
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+#ifdef CONFIG_SUN3X
+ /* this weirdness doesn't appear on sun3... */
+ if(!(DREG & CSR0_INIT)) {
+ DPRINTK( 1, ("INIT not set, reinitializing...\n"));
+ REGA( CSR0 ) = CSR0_STOP;
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INIT | CSR0_STRT;
+ }
+#endif
+
+ /* Fill in a Tx ring entry */
+#if 0
+ if (lance_debug >= 2) {
+ u_char *p;
+ int i;
+ printk( "%s: TX pkt %d type 0x%04x from ", dev->name,
+ lp->new_tx, ((u_short *)skb->data)[6]);
+ for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" to ");
+ for( p = (u_char *)skb->data, i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" data at 0x%08x len %d\n", (int)skb->data,
+ (int)skb->len );
+ }
+#endif
+ /* We're not prepared for the int until the last flags are set/reset.
+ * And the int may happen already after setting the OWN_CHIP... */
+ local_irq_save(flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->new_tx;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+ /* the sun3's lance needs it's buffer padded to the minimum
+ size */
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+// head->length = -len;
+ head->length = (-len) | 0xf000;
+ head->misc = 0;
+
+ memcpy( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ if (len != skb->len)
+ memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
+
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK;
+ lp->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT;
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
+ dev->name, DREG ));
+ dev->trans_start = jiffies;
+ dev_kfree_skb( skb );
+
+ lp->lock = 0;
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue(dev);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id, struct pt_regs *fp)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+ static int in_interrupt;
+
+ if (dev == NULL) {
+ DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" ));
+ return IRQ_NONE;
+ }
+
+ if (in_interrupt)
+ DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name ));
+ in_interrupt = 1;
+
+ still_more:
+ flush_cache_all();
+
+ AREG = CSR0;
+ csr0 = DREG;
+
+ /* ack interrupts */
+ DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON);
+
+ /* clear errors */
+ if(csr0 & CSR0_ERR)
+ DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS;
+
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int old_tx = lp->old_tx;
+
+// if(lance_debug >= 3) {
+// int i;
+//
+// printk("%s: tx int\n", dev->name);
+//
+// for(i = 0; i < TX_RING_SIZE; i++)
+// printk("ring %d flag=%04x\n", i,
+// MEM->tx_head[i].flag);
+// }
+
+ while( old_tx != lp->new_tx) {
+ struct lance_tx_head *head = &(MEM->tx_head[old_tx]);
+
+ DPRINTK(3, ("on tx_ring %d\n", old_tx));
+
+ if (head->flag & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ if (head->flag & TMD1_ERR) {
+ int status = head->misc;
+ lp->stats.tx_errors++;
+ if (status & TMD3_RTRY) lp->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR) lp->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL) lp->stats.tx_window_errors++;
+ if (status & (TMD3_UFLO | TMD3_BUFF)) {
+ lp->stats.tx_fifo_errors++;
+ printk("%s: Tx FIFO error\n",
+ dev->name);
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ return IRQ_HANDLED;
+ }
+ } else if(head->flag & (TMD1_ENP | TMD1_STP)) {
+
+ head->flag &= ~(TMD1_ENP | TMD1_STP);
+ if(head->flag & (TMD1_ONE | TMD1_MORE))
+ lp->stats.collisions++;
+
+ lp->stats.tx_packets++;
+ DPRINTK(3, ("cleared tx ring %d\n", old_tx));
+ }
+ old_tx = (old_tx +1) & TX_RING_MOD_MASK;
+ }
+
+ lp->old_tx = old_tx;
+ }
+
+
+ if (netif_queue_stopped(dev)) {
+ /* The ring is no longer full, clear tbusy. */
+ netif_start_queue(dev);
+ netif_wake_queue(dev);
+ }
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ }
+
+
+ /* Clear any other interrupt, and set interrupt enable. */
+// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+// CSR0_IDON | CSR0_INEA;
+
+ REGA(CSR0) = CSR0_INEA;
+
+ if(DREG & (CSR0_RINT | CSR0_TINT)) {
+ DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG));
+ goto still_more;
+ }
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+ in_interrupt = 0;
+ return IRQ_HANDLED;
+}
+
+/* get packet, toss into skbuff */
+static int lance_rx( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry = lp->new_rx;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with
+ full-sized buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) lp->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) lp->stats.rx_over_errors++;
+ if (status & RMD1_CRC) lp->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+// short pkt_len = head->msg_length;// & 0xfff;
+ short pkt_len = (head->msg_length & 0xfff) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ lp->stats.rx_errors++;
+ }
+ else {
+ skb = dev_alloc_skb( pkt_len+2 );
+ if (skb == NULL) {
+ DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
+ dev->name ));
+
+ lp->stats.rx_dropped++;
+ head->msg_length = 0;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->new_rx = (lp->new_rx+1) &
+ RX_RING_MOD_MASK;
+ }
+
+#if 0
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head), *p;
+ printk( "%s: RX pkt %d type 0x%04x from ", dev->name, entry, ((u_short *)data)[6]);
+ for( p = &data[6], i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" to ");
+ for( p = data, i = 0; i < 6; i++ )
+ printk("%02x%s", *p++, i != 5 ? ":" : "" );
+ printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
+ "len %d at %08x\n",
+ data[15], data[16], data[17], data[18],
+ data[19], data[20], data[21], data[22],
+ pkt_len, data);
+ }
+#endif
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+ printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len);
+ }
+
+
+ skb->dev = dev;
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+// memcpy( skb->data, PKTBUF_ADDR(head), pkt_len );
+ eth_copy_and_sum(skb,
+ PKTBUF_ADDR(head),
+ pkt_len, 0);
+
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ }
+
+// head->buf_length = -PKT_BUF_SZ | 0xf000;
+ head->msg_length = 0;
+ head->flag = RMD1_OWN_CHIP;
+
+ entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK;
+ }
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free.
+ If not, we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+ return 0;
+}
+
+
+static struct net_device_stats *lance_get_stats( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+/* completely untested on a sun3 */
+static void set_multicast_list( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ if(netif_queue_stopped(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 1, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = dev->mc_count;
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP;
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+#ifdef MODULE
+
+static struct net_device *sun3lance_dev;
+
+int init_module(void)
+{
+ sun3lance_dev = sun3lance_probe(-1);
+ if (IS_ERR(sun3lance_dev))
+ return PTR_ERR(sun3lance_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(sun3lance_dev);
+#ifdef CONFIG_SUN3
+ iounmap((void *)sun3lance_dev->base_addr);
+#endif
+ free_netdev(sun3lance_dev);
+}
+
+#endif /* MODULE */
+
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
new file mode 100644
index 000000000000..025dcd867eaa
--- /dev/null
+++ b/drivers/net/sunbmac.c
@@ -0,0 +1,1324 @@
+/* $Id: sunbmac.c,v 1.30 2002/01/15 06:48:55 davem Exp $
+ * sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2003 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/auxio.h>
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/idprom.h>
+#include <asm/io.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/pgtable.h>
+#include <asm/sbus.h>
+#include <asm/system.h>
+
+#include "sunbmac.h"
+
+static char version[] __initdata =
+ "sunbmac.c:v2.0 24/Nov/03 David S. Miller (davem@redhat.com)\n";
+
+#undef DEBUG_PROBE
+#undef DEBUG_TX
+#undef DEBUG_IRQ
+
+#ifdef DEBUG_PROBE
+#define DP(x) printk x
+#else
+#define DP(x)
+#endif
+
+#ifdef DEBUG_TX
+#define DTX(x) printk x
+#else
+#define DTX(x)
+#endif
+
+#ifdef DEBUG_IRQ
+#define DIRQ(x) printk x
+#else
+#define DIRQ(x)
+#endif
+
+static struct bigmac *root_bigmac_dev;
+
+#define DEFAULT_JAMSIZE 4 /* Toe jam */
+
+#define QEC_RESET_TRIES 200
+
+static int qec_global_reset(void __iomem *gregs)
+{
+ int tries = QEC_RESET_TRIES;
+
+ sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
+ while (--tries) {
+ if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (tries)
+ return 0;
+ printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n");
+ return -1;
+}
+
+static void qec_init(struct bigmac *bp)
+{
+ void __iomem *gregs = bp->gregs;
+ struct sbus_dev *qec_sdev = bp->qec_sdev;
+ u8 bsizes = bp->bigmac_bursts;
+ u32 regval;
+
+ /* 64byte bursts do not work at the moment, do
+ * not even try to enable them. -DaveM
+ */
+ if (bsizes & DMA_BURST32)
+ regval = GLOB_CTRL_B32;
+ else
+ regval = GLOB_CTRL_B16;
+ sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL);
+ sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE);
+
+ /* All of memsize is given to bigmac. */
+ sbus_writel(qec_sdev->reg_addrs[1].reg_size,
+ gregs + GLOB_MSIZE);
+
+ /* Half to the transmitter, half to the receiver. */
+ sbus_writel(qec_sdev->reg_addrs[1].reg_size >> 1,
+ gregs + GLOB_TSIZE);
+ sbus_writel(qec_sdev->reg_addrs[1].reg_size >> 1,
+ gregs + GLOB_RSIZE);
+}
+
+#define TX_RESET_TRIES 32
+#define RX_RESET_TRIES 32
+
+static void bigmac_tx_reset(void __iomem *bregs)
+{
+ int tries = TX_RESET_TRIES;
+
+ sbus_writel(0, bregs + BMAC_TXCFG);
+
+ /* The fifo threshold bit is read-only and does
+ * not clear. -DaveM
+ */
+ while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 &&
+ --tries != 0)
+ udelay(20);
+
+ if (!tries) {
+ printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n");
+ printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n",
+ sbus_readl(bregs + BMAC_TXCFG));
+ }
+}
+
+static void bigmac_rx_reset(void __iomem *bregs)
+{
+ int tries = RX_RESET_TRIES;
+
+ sbus_writel(0, bregs + BMAC_RXCFG);
+ while (sbus_readl(bregs + BMAC_RXCFG) && --tries)
+ udelay(20);
+
+ if (!tries) {
+ printk(KERN_ERR "BIGMAC: Receiver will not reset.\n");
+ printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n",
+ sbus_readl(bregs + BMAC_RXCFG));
+ }
+}
+
+/* Reset the transmitter and receiver. */
+static void bigmac_stop(struct bigmac *bp)
+{
+ bigmac_tx_reset(bp->bregs);
+ bigmac_rx_reset(bp->bregs);
+}
+
+static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
+{
+ struct net_device_stats *stats = &bp->enet_stats;
+
+ stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
+ sbus_writel(0, bregs + BMAC_RCRCECTR);
+
+ stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR);
+ sbus_writel(0, bregs + BMAC_UNALECTR);
+
+ stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR);
+ sbus_writel(0, bregs + BMAC_GLECTR);
+
+ stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR);
+
+ stats->collisions +=
+ (sbus_readl(bregs + BMAC_EXCTR) +
+ sbus_readl(bregs + BMAC_LTCTR));
+ sbus_writel(0, bregs + BMAC_EXCTR);
+ sbus_writel(0, bregs + BMAC_LTCTR);
+}
+
+static void bigmac_clean_rings(struct bigmac *bp)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (bp->rx_skbs[i] != NULL) {
+ dev_kfree_skb_any(bp->rx_skbs[i]);
+ bp->rx_skbs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (bp->tx_skbs[i] != NULL) {
+ dev_kfree_skb_any(bp->tx_skbs[i]);
+ bp->tx_skbs[i] = NULL;
+ }
+ }
+}
+
+static void bigmac_init_rings(struct bigmac *bp, int from_irq)
+{
+ struct bmac_init_block *bb = bp->bmac_block;
+ struct net_device *dev = bp->dev;
+ int i, gfp_flags = GFP_KERNEL;
+
+ if (from_irq || in_interrupt())
+ gfp_flags = GFP_ATOMIC;
+
+ bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
+
+ /* Free any skippy bufs left around in the rings. */
+ bigmac_clean_rings(bp);
+
+ /* Now get new skbufs for the receive ring. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
+ if (!skb)
+ continue;
+
+ bp->rx_skbs[i] = skb;
+ skb->dev = dev;
+
+ /* Because we reserve afterwards. */
+ skb_put(skb, ETH_FRAME_LEN);
+ skb_reserve(skb, 34);
+
+ bb->be_rxd[i].rx_addr =
+ sbus_map_single(bp->bigmac_sdev, skb->data,
+ RX_BUF_ALLOC_SIZE - 34,
+ SBUS_DMA_FROMDEVICE);
+ bb->be_rxd[i].rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++)
+ bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
+}
+
+#define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK)
+#define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB)
+
+static void idle_transceiver(void __iomem *tregs)
+{
+ int i = 20;
+
+ while (i--) {
+ sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ }
+}
+
+static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit)
+{
+ if (bp->tcvr_type == internal) {
+ bit = (bit & 1) << 3;
+ sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO),
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else if (bp->tcvr_type == external) {
+ bit = (bit & 1) << 2;
+ sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else {
+ printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n");
+ }
+}
+
+static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs)
+{
+ int retval = 0;
+
+ if (bp->tcvr_type == internal) {
+ sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
+ } else if (bp->tcvr_type == external) {
+ sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
+ } else {
+ printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n");
+ }
+ return retval;
+}
+
+static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs)
+{
+ int retval = 0;
+
+ if (bp->tcvr_type == internal) {
+ sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
+ sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else if (bp->tcvr_type == external) {
+ sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else {
+ printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n");
+ }
+ return retval;
+}
+
+static void put_tcvr_byte(struct bigmac *bp,
+ void __iomem *tregs,
+ unsigned int byte)
+{
+ int shift = 4;
+
+ do {
+ write_tcvr_bit(bp, tregs, ((byte >> shift) & 1));
+ shift -= 1;
+ } while (shift >= 0);
+}
+
+static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
+ int reg, unsigned short val)
+{
+ int shift;
+
+ reg &= 0xff;
+ val &= 0xffff;
+ switch(bp->tcvr_type) {
+ case internal:
+ case external:
+ break;
+
+ default:
+ printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
+ return;
+ };
+
+ idle_transceiver(tregs);
+ write_tcvr_bit(bp, tregs, 0);
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 0);
+ write_tcvr_bit(bp, tregs, 1);
+
+ put_tcvr_byte(bp, tregs,
+ ((bp->tcvr_type == internal) ?
+ BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
+
+ put_tcvr_byte(bp, tregs, reg);
+
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 0);
+
+ shift = 15;
+ do {
+ write_tcvr_bit(bp, tregs, (val >> shift) & 1);
+ shift -= 1;
+ } while (shift >= 0);
+}
+
+static unsigned short bigmac_tcvr_read(struct bigmac *bp,
+ void __iomem *tregs,
+ int reg)
+{
+ unsigned short retval = 0;
+
+ reg &= 0xff;
+ switch(bp->tcvr_type) {
+ case internal:
+ case external:
+ break;
+
+ default:
+ printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
+ return 0xffff;
+ };
+
+ idle_transceiver(tregs);
+ write_tcvr_bit(bp, tregs, 0);
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 0);
+
+ put_tcvr_byte(bp, tregs,
+ ((bp->tcvr_type == internal) ?
+ BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
+
+ put_tcvr_byte(bp, tregs, reg);
+
+ if (bp->tcvr_type == external) {
+ int shift = 15;
+
+ (void) read_tcvr_bit2(bp, tregs);
+ (void) read_tcvr_bit2(bp, tregs);
+
+ do {
+ int tmp;
+
+ tmp = read_tcvr_bit2(bp, tregs);
+ retval |= ((tmp & 1) << shift);
+ shift -= 1;
+ } while (shift >= 0);
+
+ (void) read_tcvr_bit2(bp, tregs);
+ (void) read_tcvr_bit2(bp, tregs);
+ (void) read_tcvr_bit2(bp, tregs);
+ } else {
+ int shift = 15;
+
+ (void) read_tcvr_bit(bp, tregs);
+ (void) read_tcvr_bit(bp, tregs);
+
+ do {
+ int tmp;
+
+ tmp = read_tcvr_bit(bp, tregs);
+ retval |= ((tmp & 1) << shift);
+ shift -= 1;
+ } while (shift >= 0);
+
+ (void) read_tcvr_bit(bp, tregs);
+ (void) read_tcvr_bit(bp, tregs);
+ (void) read_tcvr_bit(bp, tregs);
+ }
+ return retval;
+}
+
+static void bigmac_tcvr_init(struct bigmac *bp)
+{
+ void __iomem *tregs = bp->tregs;
+ u32 mpal;
+
+ idle_transceiver(tregs);
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+
+ /* Only the bit for the present transceiver (internal or
+ * external) will stick, set them both and see what stays.
+ */
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ udelay(20);
+
+ mpal = sbus_readl(tregs + TCVR_MPAL);
+ if (mpal & MGMT_PAL_EXT_MDIO) {
+ bp->tcvr_type = external;
+ sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
+ tregs + TCVR_TPAL);
+ sbus_readl(tregs + TCVR_TPAL);
+ } else if (mpal & MGMT_PAL_INT_MDIO) {
+ bp->tcvr_type = internal;
+ sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK |
+ TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
+ tregs + TCVR_TPAL);
+ sbus_readl(tregs + TCVR_TPAL);
+ } else {
+ printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor "
+ "external MDIO available!\n");
+ printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n",
+ sbus_readl(tregs + TCVR_MPAL),
+ sbus_readl(tregs + TCVR_TPAL));
+ }
+}
+
+static int bigmac_init(struct bigmac *, int);
+
+static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
+{
+ if (bp->sw_bmcr & BMCR_SPEED100) {
+ int timeout;
+
+ /* Reset the PHY. */
+ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
+ bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bp->sw_bmcr = (BMCR_RESET);
+ bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+
+ timeout = 64;
+ while (--timeout) {
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ if ((bp->sw_bmcr & BMCR_RESET) == 0)
+ break;
+ udelay(20);
+ }
+ if (timeout == 0)
+ printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
+
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+
+ /* Now we try 10baseT. */
+ bp->sw_bmcr &= ~(BMCR_SPEED100);
+ bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ return 0;
+ }
+
+ /* We've tried them all. */
+ return -1;
+}
+
+static void bigmac_timer(unsigned long data)
+{
+ struct bigmac *bp = (struct bigmac *) data;
+ void __iomem *tregs = bp->tregs;
+ int restart_timer = 0;
+
+ bp->timer_ticks++;
+ if (bp->timer_state == ltrywait) {
+ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ if (bp->sw_bmsr & BMSR_LSTATUS) {
+ printk(KERN_INFO "%s: Link is now up at %s.\n",
+ bp->dev->name,
+ (bp->sw_bmcr & BMCR_SPEED100) ?
+ "100baseT" : "10baseT");
+ bp->timer_state = asleep;
+ restart_timer = 0;
+ } else {
+ if (bp->timer_ticks >= 4) {
+ int ret;
+
+ ret = try_next_permutation(bp, tregs);
+ if (ret == -1) {
+ printk(KERN_ERR "%s: Link down, cable problem?\n",
+ bp->dev->name);
+ ret = bigmac_init(bp, 0);
+ if (ret) {
+ printk(KERN_ERR "%s: Error, cannot re-init the "
+ "BigMAC.\n", bp->dev->name);
+ }
+ return;
+ }
+ bp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ } else {
+ /* Can't happens.... */
+ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
+ bp->dev->name);
+ restart_timer = 0;
+ bp->timer_ticks = 0;
+ bp->timer_state = asleep; /* foo on you */
+ }
+
+ if (restart_timer != 0) {
+ bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
+ add_timer(&bp->bigmac_timer);
+ }
+}
+
+/* Well, really we just force the chip into 100baseT then
+ * 10baseT, each time checking for a link status.
+ */
+static void bigmac_begin_auto_negotiation(struct bigmac *bp)
+{
+ void __iomem *tregs = bp->tregs;
+ int timeout;
+
+ /* Grab new software copies of PHY registers. */
+ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+
+ /* Reset the PHY. */
+ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
+ bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+ bp->sw_bmcr = (BMCR_RESET);
+ bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+
+ timeout = 64;
+ while (--timeout) {
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+ if ((bp->sw_bmcr & BMCR_RESET) == 0)
+ break;
+ udelay(20);
+ }
+ if (timeout == 0)
+ printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
+
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+
+ /* First we try 100baseT. */
+ bp->sw_bmcr |= BMCR_SPEED100;
+ bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+
+ bp->timer_state = ltrywait;
+ bp->timer_ticks = 0;
+ bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
+ bp->bigmac_timer.data = (unsigned long) bp;
+ bp->bigmac_timer.function = &bigmac_timer;
+ add_timer(&bp->bigmac_timer);
+}
+
+static int bigmac_init(struct bigmac *bp, int from_irq)
+{
+ void __iomem *gregs = bp->gregs;
+ void __iomem *cregs = bp->creg;
+ void __iomem *bregs = bp->bregs;
+ unsigned char *e = &bp->dev->dev_addr[0];
+
+ /* Latch current counters into statistics. */
+ bigmac_get_counters(bp, bregs);
+
+ /* Reset QEC. */
+ qec_global_reset(gregs);
+
+ /* Init QEC. */
+ qec_init(bp);
+
+ /* Alloc and reset the tx/rx descriptor chains. */
+ bigmac_init_rings(bp, from_irq);
+
+ /* Initialize the PHY. */
+ bigmac_tcvr_init(bp);
+
+ /* Stop transmitter and receiver. */
+ bigmac_stop(bp);
+
+ /* Set hardware ethernet address. */
+ sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
+ sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
+ sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
+
+ /* Clear the hash table until mc upload occurs. */
+ sbus_writel(0, bregs + BMAC_HTABLE3);
+ sbus_writel(0, bregs + BMAC_HTABLE2);
+ sbus_writel(0, bregs + BMAC_HTABLE1);
+ sbus_writel(0, bregs + BMAC_HTABLE0);
+
+ /* Enable Big Mac hash table filter. */
+ sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO,
+ bregs + BMAC_RXCFG);
+ udelay(20);
+
+ /* Ok, configure the Big Mac transmitter. */
+ sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG);
+
+ /* The HME docs recommend to use the 10LSB of our MAC here. */
+ sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
+ bregs + BMAC_RSEED);
+
+ /* Enable the output drivers no matter what. */
+ sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV,
+ bregs + BMAC_XIFCFG);
+
+ /* Tell the QEC where the ring descriptors are. */
+ sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
+ cregs + CREG_RXDS);
+ sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
+ cregs + CREG_TXDS);
+
+ /* Setup the FIFO pointers into QEC local memory. */
+ sbus_writel(0, cregs + CREG_RXRBUFPTR);
+ sbus_writel(0, cregs + CREG_RXWBUFPTR);
+ sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
+ cregs + CREG_TXRBUFPTR);
+ sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
+ cregs + CREG_TXWBUFPTR);
+
+ /* Tell bigmac what interrupts we don't want to hear about. */
+ sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME,
+ bregs + BMAC_IMASK);
+
+ /* Enable the various other irq's. */
+ sbus_writel(0, cregs + CREG_RIMASK);
+ sbus_writel(0, cregs + CREG_TIMASK);
+ sbus_writel(0, cregs + CREG_QMASK);
+ sbus_writel(0, cregs + CREG_BMASK);
+
+ /* Set jam size to a reasonable default. */
+ sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE);
+
+ /* Clear collision counter. */
+ sbus_writel(0, cregs + CREG_CCNT);
+
+ /* Enable transmitter and receiver. */
+ sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE,
+ bregs + BMAC_TXCFG);
+ sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE,
+ bregs + BMAC_RXCFG);
+
+ /* Ok, start detecting link speed/duplex. */
+ bigmac_begin_auto_negotiation(bp);
+
+ /* Success. */
+ return 0;
+}
+
+/* Error interrupts get sent here. */
+static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status)
+{
+ printk(KERN_ERR "bigmac_is_medium_rare: ");
+ if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) {
+ if (qec_status & GLOB_STAT_ER)
+ printk("QEC_ERROR, ");
+ if (qec_status & GLOB_STAT_BM)
+ printk("QEC_BMAC_ERROR, ");
+ }
+ if (bmac_status & CREG_STAT_ERRORS) {
+ if (bmac_status & CREG_STAT_BERROR)
+ printk("BMAC_ERROR, ");
+ if (bmac_status & CREG_STAT_TXDERROR)
+ printk("TXD_ERROR, ");
+ if (bmac_status & CREG_STAT_TXLERR)
+ printk("TX_LATE_ERROR, ");
+ if (bmac_status & CREG_STAT_TXPERR)
+ printk("TX_PARITY_ERROR, ");
+ if (bmac_status & CREG_STAT_TXSERR)
+ printk("TX_SBUS_ERROR, ");
+
+ if (bmac_status & CREG_STAT_RXDROP)
+ printk("RX_DROP_ERROR, ");
+
+ if (bmac_status & CREG_STAT_RXSMALL)
+ printk("RX_SMALL_ERROR, ");
+ if (bmac_status & CREG_STAT_RXLERR)
+ printk("RX_LATE_ERROR, ");
+ if (bmac_status & CREG_STAT_RXPERR)
+ printk("RX_PARITY_ERROR, ");
+ if (bmac_status & CREG_STAT_RXSERR)
+ printk("RX_SBUS_ERROR, ");
+ }
+
+ printk(" RESET\n");
+ bigmac_init(bp, 1);
+}
+
+/* BigMAC transmit complete service routines. */
+static void bigmac_tx(struct bigmac *bp)
+{
+ struct be_txd *txbase = &bp->bmac_block->be_txd[0];
+ struct net_device *dev = bp->dev;
+ int elem;
+
+ spin_lock(&bp->lock);
+
+ elem = bp->tx_old;
+ DTX(("bigmac_tx: tx_old[%d] ", elem));
+ while (elem != bp->tx_new) {
+ struct sk_buff *skb;
+ struct be_txd *this = &txbase[elem];
+
+ DTX(("this(%p) [flags(%08x)addr(%08x)]",
+ this, this->tx_flags, this->tx_addr));
+
+ if (this->tx_flags & TXD_OWN)
+ break;
+ skb = bp->tx_skbs[elem];
+ bp->enet_stats.tx_packets++;
+ bp->enet_stats.tx_bytes += skb->len;
+ sbus_unmap_single(bp->bigmac_sdev,
+ this->tx_addr, skb->len,
+ SBUS_DMA_TODEVICE);
+
+ DTX(("skb(%p) ", skb));
+ bp->tx_skbs[elem] = NULL;
+ dev_kfree_skb_irq(skb);
+
+ elem = NEXT_TX(elem);
+ }
+ DTX((" DONE, tx_old=%d\n", elem));
+ bp->tx_old = elem;
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(bp) > 0)
+ netif_wake_queue(bp->dev);
+
+ spin_unlock(&bp->lock);
+}
+
+/* BigMAC receive complete service routines. */
+static void bigmac_rx(struct bigmac *bp)
+{
+ struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0];
+ struct be_rxd *this;
+ int elem = bp->rx_new, drops = 0;
+ u32 flags;
+
+ this = &rxbase[elem];
+ while (!((flags = this->rx_flags) & RXD_OWN)) {
+ struct sk_buff *skb;
+ int len = (flags & RXD_LENGTH); /* FCS not included */
+
+ /* Check for errors. */
+ if (len < ETH_ZLEN) {
+ bp->enet_stats.rx_errors++;
+ bp->enet_stats.rx_length_errors++;
+
+ drop_it:
+ /* Return it to the BigMAC. */
+ bp->enet_stats.rx_dropped++;
+ this->rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+ goto next;
+ }
+ skb = bp->rx_skbs[elem];
+ if (len > RX_COPY_THRESHOLD) {
+ struct sk_buff *new_skb;
+
+ /* Now refill the entry, if we can. */
+ new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ sbus_unmap_single(bp->bigmac_sdev,
+ this->rx_addr,
+ RX_BUF_ALLOC_SIZE - 34,
+ SBUS_DMA_FROMDEVICE);
+ bp->rx_skbs[elem] = new_skb;
+ new_skb->dev = bp->dev;
+ skb_put(new_skb, ETH_FRAME_LEN);
+ skb_reserve(new_skb, 34);
+ this->rx_addr = sbus_map_single(bp->bigmac_sdev,
+ new_skb->data,
+ RX_BUF_ALLOC_SIZE - 34,
+ SBUS_DMA_FROMDEVICE);
+ this->rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+
+ /* Trim the original skb for the netif. */
+ skb_trim(skb, len);
+ } else {
+ struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+
+ if (copy_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ copy_skb->dev = bp->dev;
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
+ this->rx_addr, len,
+ SBUS_DMA_FROMDEVICE);
+ eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0);
+ sbus_dma_sync_single_for_device(bp->bigmac_sdev,
+ this->rx_addr, len,
+ SBUS_DMA_FROMDEVICE);
+
+ /* Reuse original ring buffer. */
+ this->rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+
+ skb = copy_skb;
+ }
+
+ /* No checksums done by the BigMAC ;-( */
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ netif_rx(skb);
+ bp->dev->last_rx = jiffies;
+ bp->enet_stats.rx_packets++;
+ bp->enet_stats.rx_bytes += len;
+ next:
+ elem = NEXT_RX(elem);
+ this = &rxbase[elem];
+ }
+ bp->rx_new = elem;
+ if (drops)
+ printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name);
+}
+
+static irqreturn_t bigmac_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct bigmac *bp = (struct bigmac *) dev_id;
+ u32 qec_status, bmac_status;
+
+ DIRQ(("bigmac_interrupt: "));
+
+ /* Latch status registers now. */
+ bmac_status = sbus_readl(bp->creg + CREG_STAT);
+ qec_status = sbus_readl(bp->gregs + GLOB_STAT);
+
+ DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status));
+ if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) ||
+ (bmac_status & CREG_STAT_ERRORS))
+ bigmac_is_medium_rare(bp, qec_status, bmac_status);
+
+ if (bmac_status & CREG_STAT_TXIRQ)
+ bigmac_tx(bp);
+
+ if (bmac_status & CREG_STAT_RXIRQ)
+ bigmac_rx(bp);
+
+ return IRQ_HANDLED;
+}
+
+static int bigmac_open(struct net_device *dev)
+{
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+ int ret;
+
+ ret = request_irq(dev->irq, &bigmac_interrupt, SA_SHIRQ, dev->name, bp);
+ if (ret) {
+ printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
+ return ret;
+ }
+ init_timer(&bp->bigmac_timer);
+ ret = bigmac_init(bp, 0);
+ if (ret)
+ free_irq(dev->irq, bp);
+ return ret;
+}
+
+static int bigmac_close(struct net_device *dev)
+{
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+
+ del_timer(&bp->bigmac_timer);
+ bp->timer_state = asleep;
+ bp->timer_ticks = 0;
+
+ bigmac_stop(bp);
+ bigmac_clean_rings(bp);
+ free_irq(dev->irq, bp);
+ return 0;
+}
+
+static void bigmac_tx_timeout(struct net_device *dev)
+{
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+
+ bigmac_init(bp, 0);
+ netif_wake_queue(dev);
+}
+
+/* Put a packet on the wire. */
+static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+ int len, entry;
+ u32 mapping;
+
+ len = skb->len;
+ mapping = sbus_map_single(bp->bigmac_sdev, skb->data, len, SBUS_DMA_TODEVICE);
+
+ /* Avoid a race... */
+ spin_lock_irq(&bp->lock);
+ entry = bp->tx_new;
+ DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry));
+ bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE;
+ bp->tx_skbs[entry] = skb;
+ bp->bmac_block->be_txd[entry].tx_addr = mapping;
+ bp->bmac_block->be_txd[entry].tx_flags =
+ (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
+ bp->tx_new = NEXT_TX(entry);
+ if (TX_BUFFS_AVAIL(bp) <= 0)
+ netif_stop_queue(dev);
+ spin_unlock_irq(&bp->lock);
+
+ /* Get it going. */
+ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
+
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
+{
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+
+ bigmac_get_counters(bp, bp->bregs);
+ return &bp->enet_stats;
+}
+
+static void bigmac_set_multicast(struct net_device *dev)
+{
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+ void __iomem *bregs = bp->bregs;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 tmp, crc;
+
+ /* Disable the receiver. The bit self-clears when
+ * the operation is complete.
+ */
+ tmp = sbus_readl(bregs + BMAC_RXCFG);
+ tmp &= ~(BIGMAC_RXCFG_ENABLE);
+ sbus_writel(tmp, bregs + BMAC_RXCFG);
+ while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
+ udelay(20);
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ sbus_writel(0xffff, bregs + BMAC_HTABLE0);
+ sbus_writel(0xffff, bregs + BMAC_HTABLE1);
+ sbus_writel(0xffff, bregs + BMAC_HTABLE2);
+ sbus_writel(0xffff, bregs + BMAC_HTABLE3);
+ } else if (dev->flags & IFF_PROMISC) {
+ tmp = sbus_readl(bregs + BMAC_RXCFG);
+ tmp |= BIGMAC_RXCFG_PMISC;
+ sbus_writel(tmp, bregs + BMAC_RXCFG);
+ } else {
+ u16 hash_table[4];
+
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ sbus_writel(hash_table[0], bregs + BMAC_HTABLE0);
+ sbus_writel(hash_table[1], bregs + BMAC_HTABLE1);
+ sbus_writel(hash_table[2], bregs + BMAC_HTABLE2);
+ sbus_writel(hash_table[3], bregs + BMAC_HTABLE3);
+ }
+
+ /* Re-enable the receiver. */
+ tmp = sbus_readl(bregs + BMAC_RXCFG);
+ tmp |= BIGMAC_RXCFG_ENABLE;
+ sbus_writel(tmp, bregs + BMAC_RXCFG);
+}
+
+/* Ethtool support... */
+static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct bigmac *bp = dev->priv;
+
+ strcpy(info->driver, "sunbmac");
+ strcpy(info->version, "2.0");
+ sprintf(info->bus_info, "SBUS:%d",
+ bp->qec_sdev->slot);
+}
+
+static u32 bigmac_get_link(struct net_device *dev)
+{
+ struct bigmac *bp = dev->priv;
+
+ spin_lock_irq(&bp->lock);
+ bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR);
+ spin_unlock_irq(&bp->lock);
+
+ return (bp->sw_bmsr & BMSR_LSTATUS);
+}
+
+static struct ethtool_ops bigmac_ethtool_ops = {
+ .get_drvinfo = bigmac_get_drvinfo,
+ .get_link = bigmac_get_link,
+};
+
+static int __init bigmac_ether_init(struct sbus_dev *qec_sdev)
+{
+ struct net_device *dev;
+ static int version_printed;
+ struct bigmac *bp;
+ u8 bsizes, bsizes_more;
+ int i;
+
+ /* Get a new device struct for this interface. */
+ dev = alloc_etherdev(sizeof(struct bigmac));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev->base_addr = (long) qec_sdev;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
+ bp = dev->priv;
+ bp->qec_sdev = qec_sdev;
+ bp->bigmac_sdev = qec_sdev->child;
+
+ spin_lock_init(&bp->lock);
+
+ /* Verify the registers we expect, are actually there. */
+ if ((bp->bigmac_sdev->num_registers != 3) ||
+ (bp->qec_sdev->num_registers != 2)) {
+ printk(KERN_ERR "BIGMAC: Device does not have 2 and 3 regs, it has %d and %d.\n",
+ bp->qec_sdev->num_registers,
+ bp->bigmac_sdev->num_registers);
+ printk(KERN_ERR "BIGMAC: Would you like that for here or to go?\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Map in QEC global control registers. */
+ bp->gregs = sbus_ioremap(&bp->qec_sdev->resource[0], 0,
+ GLOB_REG_SIZE, "BigMAC QEC GLobal Regs");
+ if (!bp->gregs) {
+ printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Make sure QEC is in BigMAC mode. */
+ if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) {
+ printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Reset the QEC. */
+ if (qec_global_reset(bp->gregs))
+ goto fail_and_cleanup;
+
+ /* Get supported SBUS burst sizes. */
+ bsizes = prom_getintdefault(bp->qec_sdev->prom_node,
+ "burst-sizes",
+ 0xff);
+
+ bsizes_more = prom_getintdefault(bp->qec_sdev->bus->prom_node,
+ "burst-sizes",
+ 0xff);
+
+ bsizes &= 0xff;
+ if (bsizes_more != 0xff)
+ bsizes &= bsizes_more;
+ if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
+ (bsizes & DMA_BURST32) == 0)
+ bsizes = (DMA_BURST32 - 1);
+ bp->bigmac_bursts = bsizes;
+
+ /* Perform QEC initialization. */
+ qec_init(bp);
+
+ /* Map in the BigMAC channel registers. */
+ bp->creg = sbus_ioremap(&bp->bigmac_sdev->resource[0], 0,
+ CREG_REG_SIZE, "BigMAC QEC Channel Regs");
+ if (!bp->creg) {
+ printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Map in the BigMAC control registers. */
+ bp->bregs = sbus_ioremap(&bp->bigmac_sdev->resource[1], 0,
+ BMAC_REG_SIZE, "BigMAC Primary Regs");
+ if (!bp->bregs) {
+ printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Map in the BigMAC transceiver registers, this is how you poke at
+ * the BigMAC's PHY.
+ */
+ bp->tregs = sbus_ioremap(&bp->bigmac_sdev->resource[2], 0,
+ TCVR_REG_SIZE, "BigMAC Transceiver Regs");
+ if (!bp->tregs) {
+ printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Stop the BigMAC. */
+ bigmac_stop(bp);
+
+ /* Allocate transmit/receive descriptor DVMA block. */
+ bp->bmac_block = sbus_alloc_consistent(bp->bigmac_sdev,
+ PAGE_SIZE,
+ &bp->bblock_dvma);
+ if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
+ printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Get the board revision of this BigMAC. */
+ bp->board_rev = prom_getintdefault(bp->bigmac_sdev->prom_node,
+ "board-version", 1);
+
+ /* Init auto-negotiation timer state. */
+ init_timer(&bp->bigmac_timer);
+ bp->timer_state = asleep;
+ bp->timer_ticks = 0;
+
+ /* Backlink to generic net device struct. */
+ bp->dev = dev;
+
+ /* Set links to our BigMAC open and close routines. */
+ dev->open = &bigmac_open;
+ dev->stop = &bigmac_close;
+ dev->hard_start_xmit = &bigmac_start_xmit;
+ dev->ethtool_ops = &bigmac_ethtool_ops;
+
+ /* Set links to BigMAC statistic and multi-cast loading code. */
+ dev->get_stats = &bigmac_get_stats;
+ dev->set_multicast_list = &bigmac_set_multicast;
+
+ dev->tx_timeout = &bigmac_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+
+ /* Finish net device registration. */
+ dev->irq = bp->bigmac_sdev->irqs[0];
+ dev->dma = 0;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "BIGMAC: Cannot register device.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Put us into the list of instances attached for later driver
+ * exit.
+ */
+ bp->next_module = root_bigmac_dev;
+ root_bigmac_dev = bp;
+
+ printk(KERN_INFO "%s: BigMAC 100baseT Ethernet ", dev->name);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i],
+ i == 5 ? ' ' : ':');
+ printk("\n");
+
+ return 0;
+
+fail_and_cleanup:
+ /* Something went wrong, undo whatever we did so far. */
+ /* Free register mappings if any. */
+ if (bp->gregs)
+ sbus_iounmap(bp->gregs, GLOB_REG_SIZE);
+ if (bp->creg)
+ sbus_iounmap(bp->creg, CREG_REG_SIZE);
+ if (bp->bregs)
+ sbus_iounmap(bp->bregs, BMAC_REG_SIZE);
+ if (bp->tregs)
+ sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
+
+ if (bp->bmac_block)
+ sbus_free_consistent(bp->bigmac_sdev,
+ PAGE_SIZE,
+ bp->bmac_block,
+ bp->bblock_dvma);
+
+ /* This also frees the co-located 'dev->priv' */
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+/* QEC can be the parent of either QuadEthernet or
+ * a BigMAC. We want the latter.
+ */
+static int __init bigmac_match(struct sbus_dev *sdev)
+{
+ struct sbus_dev *child = sdev->child;
+
+ if (strcmp(sdev->prom_name, "qec") != 0)
+ return 0;
+
+ if (child == NULL)
+ return 0;
+
+ if (strcmp(child->prom_name, "be") != 0)
+ return 0;
+
+ return 1;
+}
+
+static int __init bigmac_probe(void)
+{
+ struct sbus_bus *sbus;
+ struct sbus_dev *sdev = NULL;
+ static int called;
+ int cards = 0, v;
+
+ root_bigmac_dev = NULL;
+
+ if (called)
+ return -ENODEV;
+ called++;
+
+ for_each_sbus(sbus) {
+ for_each_sbusdev(sdev, sbus) {
+ if (bigmac_match(sdev)) {
+ cards++;
+ if ((v = bigmac_ether_init(sdev)))
+ return v;
+ }
+ }
+ }
+ if (!cards)
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit bigmac_cleanup(void)
+{
+ while (root_bigmac_dev) {
+ struct bigmac *bp = root_bigmac_dev;
+ struct bigmac *bp_nxt = root_bigmac_dev->next_module;
+
+ sbus_iounmap(bp->gregs, GLOB_REG_SIZE);
+ sbus_iounmap(bp->creg, CREG_REG_SIZE);
+ sbus_iounmap(bp->bregs, BMAC_REG_SIZE);
+ sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
+ sbus_free_consistent(bp->bigmac_sdev,
+ PAGE_SIZE,
+ bp->bmac_block,
+ bp->bblock_dvma);
+
+ unregister_netdev(bp->dev);
+ free_netdev(bp->dev);
+ root_bigmac_dev = bp_nxt;
+ }
+}
+
+module_init(bigmac_probe);
+module_exit(bigmac_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
new file mode 100644
index 000000000000..5674003fc38a
--- /dev/null
+++ b/drivers/net/sunbmac.h
@@ -0,0 +1,356 @@
+/* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $
+ * sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _SUNBMAC_H
+#define _SUNBMAC_H
+
+/* QEC global registers. */
+#define GLOB_CTRL 0x00UL /* Control */
+#define GLOB_STAT 0x04UL /* Status */
+#define GLOB_PSIZE 0x08UL /* Packet Size */
+#define GLOB_MSIZE 0x0cUL /* Local-mem size (64K) */
+#define GLOB_RSIZE 0x10UL /* Receive partition size */
+#define GLOB_TSIZE 0x14UL /* Transmit partition size */
+#define GLOB_REG_SIZE 0x18UL
+
+#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */
+#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */
+#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */
+#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */
+#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */
+#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */
+#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */
+#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */
+
+#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */
+#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */
+#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */
+#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */
+
+#define GLOB_PSIZE_2048 0x00 /* 2k packet size */
+#define GLOB_PSIZE_4096 0x01 /* 4k packet size */
+#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
+#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
+
+/* QEC BigMAC channel registers. */
+#define CREG_CTRL 0x00UL /* Control */
+#define CREG_STAT 0x04UL /* Status */
+#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */
+#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */
+#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */
+#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */
+#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */
+#define CREG_BMASK 0x1cUL /* BigMAC Error Interrupt Mask*/
+#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */
+#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */
+#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */
+#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */
+#define CREG_CCNT 0x30UL /* Collision Counter */
+#define CREG_REG_SIZE 0x34UL
+
+#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */
+
+#define CREG_STAT_BERROR 0x80000000 /* BigMAC error */
+#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */
+#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */
+#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */
+#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */
+#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */
+#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */
+#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */
+#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */
+#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */
+#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */
+#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */
+
+#define CREG_STAT_ERRORS (CREG_STAT_BERROR|CREG_STAT_TXDERROR|CREG_STAT_TXLERR| \
+ CREG_STAT_TXPERR|CREG_STAT_TXSERR|CREG_STAT_RXDROP| \
+ CREG_STAT_RXSMALL|CREG_STAT_RXLERR|CREG_STAT_RXPERR| \
+ CREG_STAT_RXSERR)
+
+#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */
+#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */
+#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */
+#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */
+#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */
+#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */
+#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */
+#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */
+#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */
+
+/* BIGMAC core registers */
+#define BMAC_XIFCFG 0x000UL /* XIF config register */
+ /* 0x004-->0x0fc, reserved */
+#define BMAC_STATUS 0x100UL /* Status register, clear on read */
+#define BMAC_IMASK 0x104UL /* Interrupt mask register */
+ /* 0x108-->0x204, reserved */
+#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */
+#define BMAC_TXCFG 0x20cUL /* Transmitter config register */
+#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */
+#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */
+#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */
+#define BMAC_STIME 0x21cUL /* Transmit slot time */
+#define BMAC_PLEN 0x220UL /* Size of transmit preamble */
+#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */
+#define BMAC_TXDELIM 0x228UL /* Transmit delimiter */
+#define BMAC_JSIZE 0x22cUL /* Toe jam... */
+#define BMAC_TXPMAX 0x230UL /* Transmit max pkt size */
+#define BMAC_TXPMIN 0x234UL /* Transmit min pkt size */
+#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */
+#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */
+#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */
+#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */
+#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */
+#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */
+#define BMAC_RSEED 0x250UL /* Transmit random number seed */
+#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */
+ /* 0x258-->0x304, reserved */
+#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */
+#define BMAC_RXCFG 0x30cUL /* Receiver config register */
+#define BMAC_RXPMAX 0x310UL /* Receive max pkt size */
+#define BMAC_RXPMIN 0x314UL /* Receive min pkt size */
+#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */
+#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */
+#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */
+#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */
+#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */
+#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */
+#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */
+#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */
+#define BMAC_RXCVALID 0x338UL /* Receiver code violation */
+ /* 0x33c, reserved */
+#define BMAC_HTABLE3 0x340UL /* Hash table 3 */
+#define BMAC_HTABLE2 0x344UL /* Hash table 2 */
+#define BMAC_HTABLE1 0x348UL /* Hash table 1 */
+#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */
+#define BMAC_AFILTER2 0x350UL /* Address filter 2 */
+#define BMAC_AFILTER1 0x354UL /* Address filter 1 */
+#define BMAC_AFILTER0 0x358UL /* Address filter 0 */
+#define BMAC_AFMASK 0x35cUL /* Address filter mask */
+#define BMAC_REG_SIZE 0x360UL
+
+/* BigMac XIF config register. */
+#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */
+#define BIGMAC_XCFG_RESV 0x00000002 /* Reserved, write always as 1 */
+#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */
+#define BIGMAC_XCFG_SMODE 0x00000008 /* Enable serial mode */
+
+/* BigMAC status register. */
+#define BIGMAC_STAT_GOTFRAME 0x00000001 /* Received a frame */
+#define BIGMAC_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define BIGMAC_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define BIGMAC_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define BIGMAC_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define BIGMAC_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define BIGMAC_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define BIGMAC_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define BIGMAC_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define BIGMAC_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define BIGMAC_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define BIGMAC_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define BIGMAC_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define BIGMAC_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define BIGMAC_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */
+
+/* BigMAC interrupt mask register. */
+#define BIGMAC_IMASK_GOTFRAME 0x00000001 /* Received a frame */
+#define BIGMAC_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define BIGMAC_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define BIGMAC_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define BIGMAC_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define BIGMAC_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define BIGMAC_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define BIGMAC_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define BIGMAC_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define BIGMAC_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define BIGMAC_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define BIGMAC_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define BIGMAC_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define BIGMAC_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define BIGMAC_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */
+
+/* BigMac transmit config register. */
+#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */
+#define BIGMAC_TXCFG_FIFO 0x00000010 /* Default tx fthresh... */
+#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */
+#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */
+#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */
+#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */
+#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */
+
+/* BigMac receive config register. */
+#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
+#define BIGMAC_RXCFG_FIFO 0x0000000e /* Default rx fthresh... */
+#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
+#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscous mode */
+#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
+#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
+#define BIGMAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */
+#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */
+#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */
+#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */
+
+/* The BigMAC PHY transceiver. Not nearly as sophisticated as the happy meal
+ * one. But it does have the "bit banger", oh baby.
+ */
+#define TCVR_TPAL 0x00UL
+#define TCVR_MPAL 0x04UL
+#define TCVR_REG_SIZE 0x08UL
+
+/* Frame commands. */
+#define FRAME_WRITE 0x50020000
+#define FRAME_READ 0x60020000
+
+/* Tranceiver registers. */
+#define TCVR_PAL_SERIAL 0x00000001 /* Enable serial mode */
+#define TCVR_PAL_EXTLBACK 0x00000002 /* Enable external loopback */
+#define TCVR_PAL_MSENSE 0x00000004 /* Media sense */
+#define TCVR_PAL_LTENABLE 0x00000008 /* Link test enable */
+#define TCVR_PAL_LTSTATUS 0x00000010 /* Link test status (P1 only) */
+
+/* Management PAL. */
+#define MGMT_PAL_DCLOCK 0x00000001 /* Data clock */
+#define MGMT_PAL_OENAB 0x00000002 /* Output enabler */
+#define MGMT_PAL_MDIO 0x00000004 /* MDIO Data/attached */
+#define MGMT_PAL_TIMEO 0x00000008 /* Transmit enable timeout error */
+#define MGMT_PAL_EXT_MDIO MGMT_PAL_MDIO
+#define MGMT_PAL_INT_MDIO MGMT_PAL_TIMEO
+
+/* Here are some PHY addresses. */
+#define BIGMAC_PHY_EXTERNAL 0 /* External transceiver */
+#define BIGMAC_PHY_INTERNAL 1 /* Internal transceiver */
+
+/* PHY registers */
+#define BIGMAC_BMCR 0x00 /* Basic mode control register */
+#define BIGMAC_BMSR 0x01 /* Basic mode status register */
+
+/* BMCR bits */
+#define BMCR_ISOLATE 0x0400 /* Disconnect DP83840 from MII */
+#define BMCR_PDOWN 0x0800 /* Powerdown the DP83840 */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
+#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
+#define BMCR_RESET 0x8000 /* Reset the DP83840 */
+
+/* BMSR bits */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
+#define BMSR_JCD 0x0002 /* Jabber detected */
+#define BMSR_LSTATUS 0x0004 /* Link status */
+
+/* Ring descriptors and such, same as Quad Ethernet. */
+struct be_rxd {
+ u32 rx_flags;
+ u32 rx_addr;
+};
+
+#define RXD_OWN 0x80000000 /* Ownership. */
+#define RXD_UPDATE 0x10000000 /* Being Updated? */
+#define RXD_LENGTH 0x000007ff /* Packet Length. */
+
+struct be_txd {
+ u32 tx_flags;
+ u32 tx_addr;
+};
+
+#define TXD_OWN 0x80000000 /* Ownership. */
+#define TXD_SOP 0x40000000 /* Start Of Packet */
+#define TXD_EOP 0x20000000 /* End Of Packet */
+#define TXD_UPDATE 0x10000000 /* Being Updated? */
+#define TXD_LENGTH 0x000007ff /* Packet Length. */
+
+#define TX_RING_MAXSIZE 256
+#define RX_RING_MAXSIZE 256
+
+#define TX_RING_SIZE 256
+#define RX_RING_SIZE 256
+
+#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
+#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
+#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
+#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(bp) \
+ (((bp)->tx_old <= (bp)->tx_new) ? \
+ (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
+ (bp)->tx_old - (bp)->tx_new - 1)
+
+
+#define RX_COPY_THRESHOLD 256
+#define RX_BUF_ALLOC_SIZE (ETH_FRAME_LEN + (64 * 3))
+
+struct bmac_init_block {
+ struct be_rxd be_rxd[RX_RING_MAXSIZE];
+ struct be_txd be_txd[TX_RING_MAXSIZE];
+};
+
+#define bib_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct bmac_init_block *)0)->mem[elem]))))
+
+/* Now software state stuff. */
+enum bigmac_transceiver {
+ external = 0,
+ internal = 1,
+ none = 2,
+};
+
+/* Timer state engine. */
+enum bigmac_timer_state {
+ ltrywait = 1, /* Forcing try of all modes, from fastest to slowest. */
+ asleep = 2, /* Timer inactive. */
+};
+
+struct bigmac {
+ void __iomem *gregs; /* QEC Global Registers */
+ void __iomem *creg; /* QEC BigMAC Channel Registers */
+ void __iomem *bregs; /* BigMAC Registers */
+ void __iomem *tregs; /* BigMAC Transceiver */
+ struct bmac_init_block *bmac_block; /* RX and TX descriptors */
+ __u32 bblock_dvma; /* RX and TX descriptors */
+
+ spinlock_t lock;
+
+ struct sk_buff *rx_skbs[RX_RING_SIZE];
+ struct sk_buff *tx_skbs[TX_RING_SIZE];
+
+ int rx_new, tx_new, rx_old, tx_old;
+
+ int board_rev; /* BigMAC board revision. */
+
+ enum bigmac_transceiver tcvr_type;
+ unsigned int bigmac_bursts;
+ unsigned int paddr;
+ unsigned short sw_bmsr; /* SW copy of PHY BMSR */
+ unsigned short sw_bmcr; /* SW copy of PHY BMCR */
+ struct timer_list bigmac_timer;
+ enum bigmac_timer_state timer_state;
+ unsigned int timer_ticks;
+
+ struct net_device_stats enet_stats;
+ struct sbus_dev *qec_sdev;
+ struct sbus_dev *bigmac_sdev;
+ struct net_device *dev;
+ struct bigmac *next_module;
+};
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
+
+static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, int gfp_flags)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + 64, gfp_flags);
+ if(skb) {
+ int offset = ALIGNED_RX_SKB_ADDR(skb->data);
+
+ if(offset)
+ skb_reserve(skb, offset);
+ }
+ return skb;
+}
+
+#endif /* !(_SUNBMAC_H) */
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
new file mode 100644
index 000000000000..08cb7177a175
--- /dev/null
+++ b/drivers/net/sundance.c
@@ -0,0 +1,1785 @@
+/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
+/*
+ Written 1999-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/sundance.html
+
+
+ Version LK1.01a (jgarzik):
+ - Replace some MII-related magic numbers with constants
+
+ Version LK1.02 (D-Link):
+ - Add new board to PCI ID list
+ - Fix multicast bug
+
+ Version LK1.03 (D-Link):
+ - New Rx scheme, reduce Rx congestion
+ - Option to disable flow control
+
+ Version LK1.04 (D-Link):
+ - Tx timeout recovery
+ - More support for ethtool.
+
+ Version LK1.04a:
+ - Remove unused/constant members from struct pci_id_info
+ (which then allows removal of 'drv_flags' from private struct)
+ (jgarzik)
+ - If no phy is found, fail to load that board (jgarzik)
+ - Always start phy id scan at id 1 to avoid problems (Donald Becker)
+ - Autodetect where mii_preable_required is needed,
+ default to not needed. (Donald Becker)
+
+ Version LK1.04b:
+ - Remove mii_preamble_required module parameter (Donald Becker)
+ - Add per-interface mii_preamble_required (setting is autodetected)
+ (Donald Becker)
+ - Remove unnecessary cast from void pointer (jgarzik)
+ - Re-align comments in private struct (jgarzik)
+
+ Version LK1.04c (jgarzik):
+ - Support bitmapped message levels (NETIF_MSG_xxx), and the
+ two ethtool ioctls that get/set them
+ - Don't hand-code MII ethtool support, use standard API/lib
+
+ Version LK1.04d:
+ - Merge from Donald Becker's sundance.c: (Jason Lunz)
+ * proper support for variably-sized MTUs
+ * default to PIO, to fix chip bugs
+ - Add missing unregister_netdev (Jason Lunz)
+ - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
+ - Better rx buf size calculation (Donald Becker)
+
+ Version LK1.05 (D-Link):
+ - Fix DFE-580TX packet drop issue (for DL10050C)
+ - Fix reset_tx logic
+
+ Version LK1.06 (D-Link):
+ - Fix crash while unloading driver
+
+ Versin LK1.06b (D-Link):
+ - New tx scheme, adaptive tx_coalesce
+
+ Version LK1.07 (D-Link):
+ - Fix tx bugs in big-endian machines
+ - Remove unused max_interrupt_work module parameter, the new
+ NAPI-like rx scheme doesn't need it.
+ - Remove redundancy get_stats() in intr_handler(), those
+ I/O access could affect performance in ARM-based system
+ - Add Linux software VLAN support
+
+ Version LK1.08 (D-Link):
+ - Fix bug of custom mac address
+ (StationAddr register only accept word write)
+
+ Version LK1.09 (D-Link):
+ - Fix the flowctrl bug.
+ - Set Pause bit in MII ANAR if flow control enabled.
+
+ Version LK1.09a (ICPlus):
+ - Add the delay time in reading the contents of EEPROM
+
+*/
+
+#define DRV_NAME "sundance"
+#define DRV_VERSION "1.01+LK1.09a"
+#define DRV_RELDATE "10-Jul-2003"
+
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can receive into offset buffers, so the Alpha does not
+ need a copy-align. */
+static int rx_copybreak;
+static int flowctrl=1;
+
+/* media[] specifies the media type the NIC operates at.
+ autosense Autosensing active media.
+ 10mbps_hd 10Mbps half duplex.
+ 10mbps_fd 10Mbps full duplex.
+ 100mbps_hd 100Mbps half duplex.
+ 100mbps_fd 100Mbps full duplex.
+ 0 Autosensing active media.
+ 1 10Mbps half duplex.
+ 2 10Mbps full duplex.
+ 3 100Mbps half duplex.
+ 4 100Mbps full duplex.
+*/
+#define MAX_UNITS 8
+static char *media[MAX_UNITS];
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, and more than 128 requires modifying the
+ Tx error recovery.
+ Large receive rings merely waste memory. */
+#define TX_RING_SIZE 32
+#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
+#define RX_RING_SIZE 64
+#define RX_BUDGET 32
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#ifndef _COMPAT_WITH_OLD_KERNEL
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#else
+#include "crc32.h"
+#include "ethtool.h"
+#include "mii.h"
+#include "compat.h"
+#endif
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
+KERN_INFO " http://www.scyld.com/network/sundance.html\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(media, charp, NULL, 0);
+module_param(flowctrl, int, 0);
+MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
+MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the Sundance Technologies "Alta" ST201 chip.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+Unaligned buffers are permitted by the Sundance hardware, so
+frames are received into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Sundance ST201 datasheet, preliminary version.
+http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+
+IVc. Errata
+
+*/
+
+/* Work-around for Kendin chip bugs. */
+#ifndef CONFIG_SUNDANCE_MMIO
+#define USE_IO_OPS 1
+#endif
+
+static struct pci_device_id sundance_pci_tbl[] = {
+ {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
+ {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
+ {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
+ {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
+ {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
+ {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
+
+enum {
+ netdev_io_size = 128
+};
+
+struct pci_id_info {
+ const char *name;
+};
+static struct pci_id_info pci_id_tbl[] = {
+ {"D-Link DFE-550TX FAST Ethernet Adapter"},
+ {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
+ {"D-Link DFE-580TX 4 port Server Adapter"},
+ {"D-Link DFE-530TXS FAST Ethernet Adapter"},
+ {"D-Link DL10050-based FAST Ethernet Adapter"},
+ {"Sundance Technology Alta"},
+ {NULL,}, /* 0 terminated list. */
+};
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum alta_offsets {
+ DMACtrl = 0x00,
+ TxListPtr = 0x04,
+ TxDMABurstThresh = 0x08,
+ TxDMAUrgentThresh = 0x09,
+ TxDMAPollPeriod = 0x0a,
+ RxDMAStatus = 0x0c,
+ RxListPtr = 0x10,
+ DebugCtrl0 = 0x1a,
+ DebugCtrl1 = 0x1c,
+ RxDMABurstThresh = 0x14,
+ RxDMAUrgentThresh = 0x15,
+ RxDMAPollPeriod = 0x16,
+ LEDCtrl = 0x1a,
+ ASICCtrl = 0x30,
+ EEData = 0x34,
+ EECtrl = 0x36,
+ TxStartThresh = 0x3c,
+ RxEarlyThresh = 0x3e,
+ FlashAddr = 0x40,
+ FlashData = 0x44,
+ TxStatus = 0x46,
+ TxFrameId = 0x47,
+ DownCounter = 0x18,
+ IntrClear = 0x4a,
+ IntrEnable = 0x4c,
+ IntrStatus = 0x4e,
+ MACCtrl0 = 0x50,
+ MACCtrl1 = 0x52,
+ StationAddr = 0x54,
+ MaxFrameSize = 0x5A,
+ RxMode = 0x5c,
+ MIICtrl = 0x5e,
+ MulticastFilter0 = 0x60,
+ MulticastFilter1 = 0x64,
+ RxOctetsLow = 0x68,
+ RxOctetsHigh = 0x6a,
+ TxOctetsLow = 0x6c,
+ TxOctetsHigh = 0x6e,
+ TxFramesOK = 0x70,
+ RxFramesOK = 0x72,
+ StatsCarrierError = 0x74,
+ StatsLateColl = 0x75,
+ StatsMultiColl = 0x76,
+ StatsOneColl = 0x77,
+ StatsTxDefer = 0x78,
+ RxMissed = 0x79,
+ StatsTxXSDefer = 0x7a,
+ StatsTxAbort = 0x7b,
+ StatsBcastTx = 0x7c,
+ StatsBcastRx = 0x7d,
+ StatsMcastTx = 0x7e,
+ StatsMcastRx = 0x7f,
+ /* Aliased and bogus values! */
+ RxStatus = 0x0c,
+};
+enum ASICCtrl_HiWord_bit {
+ GlobalReset = 0x0001,
+ RxReset = 0x0002,
+ TxReset = 0x0004,
+ DMAReset = 0x0008,
+ FIFOReset = 0x0010,
+ NetworkReset = 0x0020,
+ HostReset = 0x0040,
+ ResetBusy = 0x0400,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
+ IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
+ IntrDrvRqst=0x0040,
+ StatsMax=0x0080, LinkChange=0x0100,
+ IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
+ AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
+};
+/* Bits in MACCtrl. */
+enum mac_ctrl0_bits {
+ EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
+ EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
+};
+enum mac_ctrl1_bits {
+ StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
+ TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
+ RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ u32 next_desc;
+ u32 status;
+ struct desc_frag { u32 addr, length; } frag[1];
+};
+
+/* Bits in netdev_desc.status */
+enum desc_status_bits {
+ DescOwn=0x8000,
+ DescEndPacket=0x4000,
+ DescEndRing=0x2000,
+ LastFrag=0x80000000,
+ DescIntrOnTx=0x8000,
+ DescIntrOnDMADone=0x80000000,
+ DisableAlign = 0x00000001,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+#define MII_CNT 4
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ spinlock_t rx_lock; /* Group with Tx control cache line. */
+ int msg_enable;
+ int chip_id;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned int cur_tx, dirty_tx;
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int flowctrl:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int an_enable:1;
+ unsigned int speed;
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ int budget;
+ int cur_task;
+ /* Multicast and receive mode. */
+ spinlock_t mcastlock; /* SMP lock multicast updates. */
+ u16 mcast_filter[4];
+ /* MII transceiver section. */
+ struct mii_if_info mii_if;
+ int mii_preamble_required;
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+ unsigned char pci_rev_id;
+};
+
+/* The station address location in the EEPROM. */
+#define EEPROM_SA_OFFSET 0x10
+#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
+ IntrDrvRqst | IntrTxDone | StatsMax | \
+ LinkChange)
+
+static int change_mtu(struct net_device *dev, int new_mtu);
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static int reset_tx (struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void rx_poll(unsigned long data);
+static void tx_poll(unsigned long data);
+static void refill_rx (struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static int __set_mac_addr(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
+
+static int __devinit sundance_probe1 (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int card_idx;
+ int chip_idx = ent->driver_data;
+ int irq;
+ int i;
+ void __iomem *ioaddr;
+ u16 mii_ctl;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+ pci_set_master(pdev);
+
+ irq = pdev->irq;
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_netdev;
+
+ ioaddr = pci_iomap(pdev, bar, netdev_io_size);
+ if (!ioaddr)
+ goto err_out_res;
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] =
+ le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+ np->base = ioaddr;
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->msg_enable = (1 << debug) - 1;
+ spin_lock_init(&np->lock);
+ tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
+ tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_cleardev;
+ np->tx_ring = (struct netdev_desc *)ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = (struct netdev_desc *)ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &netdev_ioctl;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->tx_timeout = &tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->change_mtu = &change_mtu;
+ pci_set_drvdata(pdev, dev);
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_unmap_rx;
+
+ printk(KERN_INFO "%s: %s at %p, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ if (1) {
+ int phy, phy_idx = 0;
+ np->phys[0] = 1; /* Default setting */
+ np->mii_preamble_required++;
+ for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ if ((mii_status & 0x0040) == 0)
+ np->mii_preamble_required++;
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->mii_if.advertising);
+ }
+ }
+ np->mii_preamble_required--;
+
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
+ dev->name, ioread32(ioaddr + ASICCtrl));
+ goto err_out_unregister;
+ }
+
+ np->mii_if.phy_id = np->phys[0];
+ }
+
+ /* Parse override configuration */
+ np->an_enable = 1;
+ if (card_idx < MAX_UNITS) {
+ if (media[card_idx] != NULL) {
+ np->an_enable = 0;
+ if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
+ strcmp (media[card_idx], "4") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "100mbps_hd") == 0
+ || strcmp (media[card_idx], "3") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 0;
+ } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
+ strcmp (media[card_idx], "2") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
+ strcmp (media[card_idx], "1") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 0;
+ } else {
+ np->an_enable = 1;
+ }
+ }
+ if (flowctrl == 1)
+ np->flowctrl = 1;
+ }
+
+ /* Fibre PHY? */
+ if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
+ /* Default 100Mbps Full */
+ if (np->an_enable) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ np->an_enable = 0;
+ }
+ }
+ /* Reset PHY */
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ mdelay (300);
+ /* If flow control enabled, we need to advertise it.*/
+ if (np->flowctrl)
+ mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
+ /* Force media type */
+ if (!np->an_enable) {
+ mii_ctl = 0;
+ mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
+ mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
+ mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
+ printk (KERN_INFO "Override speed=%d, %s duplex\n",
+ np->speed, np->mii_if.full_duplex ? "Full" : "Half");
+
+ }
+
+ /* Perhaps move the reset here? */
+ /* Reset the chip to erase previous misconfiguration. */
+ if (netif_msg_hw(np))
+ printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
+ iowrite16(0x007f, ioaddr + ASICCtrl + 2);
+ if (netif_msg_hw(np))
+ printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
+
+ card_idx++;
+ return 0;
+
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+err_out_unmap_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
+static int __devinit eeprom_read(void __iomem *ioaddr, int location)
+{
+ int boguscnt = 10000; /* Typical 1900 ticks. */
+ iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
+ do {
+ eeprom_delay(ioaddr + EECtrl);
+ if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
+ return ioread16(ioaddr + EEData);
+ }
+ } while (--boguscnt > 0);
+ return 0;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back 33Mhz PCI cycles. */
+#define mdio_delay() ioread8(mdio_addr)
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(void __iomem *mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ iowrite8(MDIO_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ return;
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int i;
+
+ /* Do we need to reset the chip??? */
+
+ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
+ if (i)
+ return i;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
+ /* The Tx list pointer is written as packets are queued. */
+
+ /* Initialize other registers. */
+ __set_mac_addr(dev);
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
+#else
+ iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
+#endif
+ if (dev->mtu > 2047)
+ iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
+
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ spin_lock_init(&np->mcastlock);
+
+ set_rx_mode(dev);
+ iowrite16(0, ioaddr + IntrEnable);
+ iowrite16(0, ioaddr + DownCounter);
+ /* Set the chip to poll every N*320nsec. */
+ iowrite8(100, ioaddr + RxDMAPollPeriod);
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+ /* Fix DFE-580TX packet drop issue */
+ if (np->pci_rev_id >= 0x14)
+ iowrite8(0x01, ioaddr + DebugCtrl1);
+ netif_start_queue(dev);
+
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
+ "MAC Control %x, %4.4x %4.4x.\n",
+ dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + MACCtrl0),
+ ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ int negotiated = mii_lpa & np->mii_if.advertising;
+ int duplex;
+
+ /* Force media */
+ if (!np->an_enable || mii_lpa == 0xffff) {
+ if (np->mii_if.full_duplex)
+ iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
+ ioaddr + MACCtrl0);
+ return;
+ }
+
+ /* Autonegotiation */
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->mii_if.full_duplex != duplex) {
+ np->mii_if.full_duplex = duplex;
+ if (netif_msg_link(np))
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int next_tick = 10*HZ;
+
+ if (netif_msg_timer(np)) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
+ "Tx %x Rx %x.\n",
+ dev->name, ioread16(ioaddr + IntrEnable),
+ ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flag;
+
+ netif_stop_queue(dev);
+ tasklet_disable(&np->tx_tasklet);
+ iowrite16(0, ioaddr + IntrEnable);
+ printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
+ "TxFrameId %2.2x,"
+ " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
+ ioread8(ioaddr + TxFrameId));
+
+ {
+ int i;
+ for (i=0; i<TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
+ (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
+ le32_to_cpu(np->tx_ring[i].next_desc),
+ le32_to_cpu(np->tx_ring[i].status),
+ (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
+ le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag[0].length));
+ }
+ printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+ ioread32(np->base + TxListPtr),
+ netif_queue_stopped(dev));
+ printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
+ np->cur_tx, np->cur_tx % TX_RING_SIZE,
+ np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
+ printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
+ }
+ spin_lock_irqsave(&np->lock, flag);
+
+ /* Stop and restart the chip's Tx processes . */
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flag);
+
+ dev->if_port = 0;
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ netif_wake_queue(dev);
+ }
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ tasklet_enable(&np->tx_tasklet);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+ np->cur_task = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
+ ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag[0].length = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ np->rx_ring[i].frag[0].addr = cpu_to_le32(
+ pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static void tx_poll (unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned head = np->cur_task % TX_RING_SIZE;
+ struct netdev_desc *txdesc =
+ &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
+
+ /* Chain the next pointer */
+ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
+ int entry = np->cur_task % TX_RING_SIZE;
+ txdesc = &np->tx_ring[entry];
+ if (np->last_tx) {
+ np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
+ entry*sizeof(struct netdev_desc));
+ }
+ np->last_tx = txdesc;
+ }
+ /* Indicate the latest descriptor of tx ring */
+ txdesc->status |= cpu_to_le32(DescIntrOnTx);
+
+ if (ioread32 (np->base + TxListPtr) == 0)
+ iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
+ np->base + TxListPtr);
+ return;
+}
+
+static int
+start_tx (struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct netdev_desc *txdesc;
+ unsigned entry;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+ txdesc->next_desc = 0;
+ txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
+ txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
+ skb->len,
+ PCI_DMA_TODEVICE));
+ txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
+
+ /* Increment cur_tx before tasklet_schedule() */
+ np->cur_tx++;
+ mb();
+ /* Schedule a tx_poll() task */
+ tasklet_schedule(&np->tx_tasklet);
+
+ /* On some architectures: explicitly flush cache lines here. */
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
+ && !netif_queue_stopped(dev)) {
+ /* do nothing */
+ } else {
+ netif_stop_queue (dev);
+ }
+ dev->trans_start = jiffies;
+ if (netif_msg_tx_queued(np)) {
+ printk (KERN_DEBUG
+ "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* Reset hardware tx and free all of tx buffers */
+static int
+reset_tx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+ int irq = in_interrupt();
+
+ /* Reset tx logic, TxListPtr will be cleaned */
+ iowrite16 (TxDisable, ioaddr + MACCtrl1);
+ iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr + ASICCtrl + 2);
+ for (i=50; i > 0; i--) {
+ if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+ break;
+ mdelay(1);
+ }
+ /* free all tx skbuff */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_ring[i].frag[0].addr, skb->len,
+ PCI_DMA_TODEVICE);
+ if (irq)
+ dev_kfree_skb_irq (skb);
+ else
+ dev_kfree_skb (skb);
+ np->tx_skbuff[i] = NULL;
+ np->stats.tx_dropped++;
+ }
+ }
+ np->cur_tx = np->dirty_tx = 0;
+ np->cur_task = 0;
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+ return 0;
+}
+
+/* The interrupt handler cleans up after the Tx thread,
+ and schedule a Rx thread work */
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int hw_frame_id;
+ int tx_cnt;
+ int tx_status;
+ int handled = 0;
+
+
+ do {
+ int intr_status = ioread16(ioaddr + IntrStatus);
+ iowrite16(intr_status, ioaddr + IntrStatus);
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (!(intr_status & DEFAULT_INTR))
+ break;
+
+ handled = 1;
+
+ if (intr_status & (IntrRxDMADone)) {
+ iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
+ ioaddr + IntrEnable);
+ if (np->budget < 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+ }
+ if (intr_status & (IntrTxDone | IntrDrvRqst)) {
+ tx_status = ioread16 (ioaddr + TxStatus);
+ for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
+ if (netif_msg_tx_done(np))
+ printk
+ ("%s: Transmit status is %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x1e) {
+ np->stats.tx_errors++;
+ if (tx_status & 0x10)
+ np->stats.tx_fifo_errors++;
+ if (tx_status & 0x08)
+ np->stats.collisions++;
+ if (tx_status & 0x02)
+ np->stats.tx_window_errors++;
+ /* This reset has not been verified!. */
+ if (tx_status & 0x10) { /* Reset the Tx. */
+ np->stats.tx_fifo_errors++;
+ spin_lock(&np->lock);
+ reset_tx(dev);
+ spin_unlock(&np->lock);
+ }
+ if (tx_status & 0x1e) /* Restart the Tx. */
+ iowrite16 (TxEnable,
+ ioaddr + MACCtrl1);
+ }
+ /* Yup, this is a documentation bug. It cost me *hours*. */
+ iowrite16 (0, ioaddr + TxStatus);
+ if (tx_cnt < 0) {
+ iowrite32(5000, ioaddr + DownCounter);
+ break;
+ }
+ tx_status = ioread16 (ioaddr + TxStatus);
+ }
+ hw_frame_id = (tx_status >> 8) & 0xff;
+ } else {
+ hw_frame_id = ioread8(ioaddr + TxFrameId);
+ }
+
+ if (np->pci_rev_id >= 0x14) {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ int sw_frame_id;
+ sw_frame_id = (le32_to_cpu(
+ np->tx_ring[entry].status) >> 2) & 0xff;
+ if (sw_frame_id == hw_frame_id &&
+ !(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ if (sw_frame_id == (hw_frame_id + 1) %
+ TX_RING_SIZE)
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ pci_unmap_single(np->pci_dev,
+ np->tx_ring[entry].frag[0].addr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag[0].addr = 0;
+ np->tx_ring[entry].frag[0].length = 0;
+ }
+ spin_unlock(&np->lock);
+ } else {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ if (!(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ pci_unmap_single(np->pci_dev,
+ np->tx_ring[entry].frag[0].addr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag[0].addr = 0;
+ np->tx_ring[entry].frag[0].length = 0;
+ }
+ spin_unlock(&np->lock);
+ }
+
+ if (netif_queue_stopped(dev) &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear busy flag. */
+ netif_wake_queue (dev);
+ }
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+ } while (0);
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread16(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+static void rx_poll(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->budget;
+ void __iomem *ioaddr = np->base;
+ int received = 0;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct netdev_desc *desc = &(np->rx_ring[entry]);
+ u32 frame_status = le32_to_cpu(desc->status);
+ int pkt_len;
+
+ if (--boguscnt < 0) {
+ goto not_done;
+ }
+ if (!(frame_status & DescOwn))
+ break;
+ pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ frame_status);
+ if (frame_status & 0x001f4000) {
+ /* There was a error. */
+ if (netif_msg_rx_err(np))
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ frame_status);
+ np->stats.rx_errors++;
+ if (frame_status & 0x00100000) np->stats.rx_length_errors++;
+ if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
+ if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
+ if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
+ if (frame_status & 0x00100000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame,"
+ " status %8.8x.\n",
+ dev->name, frame_status);
+ }
+ } else {
+ struct sk_buff *skb;
+#ifndef final_version
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(np->pci_dev,
+ desc->frag[0].addr,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ pci_dma_sync_single_for_device(np->pci_dev,
+ desc->frag[0].addr,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ } else {
+ pci_unmap_single(np->pci_dev,
+ desc->frag[0].addr,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ entry = (entry + 1) % RX_RING_SIZE;
+ received++;
+ }
+ np->cur_rx = entry;
+ refill_rx (dev);
+ np->budget -= received;
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ return;
+
+not_done:
+ np->cur_rx = entry;
+ refill_rx (dev);
+ if (!received)
+ received = 1;
+ np->budget -= received;
+ if (np->budget <= 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+ return;
+}
+
+static void refill_rx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry;
+ int cnt = 0;
+
+ /* Refill the Rx ring buffers. */
+ for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
+ np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ np->rx_ring[entry].frag[0].addr = cpu_to_le32(
+ pci_map_single(np->pci_dev, skb->tail,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ }
+ /* Perhaps we need not reset this field. */
+ np->rx_ring[entry].frag[0].length =
+ cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[entry].status = 0;
+ cnt++;
+ }
+ return;
+}
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mii_ctl, mii_advertise, mii_lpa;
+ int speed;
+
+ if (intr_status & LinkChange) {
+ if (np->an_enable) {
+ mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
+ mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
+ mii_advertise &= mii_lpa;
+ printk (KERN_INFO "%s: Link changed: ", dev->name);
+ if (mii_advertise & ADVERTISE_100FULL) {
+ np->speed = 100;
+ printk ("100Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_100HALF) {
+ np->speed = 100;
+ printk ("100Mbps, half duplex\n");
+ } else if (mii_advertise & ADVERTISE_10FULL) {
+ np->speed = 10;
+ printk ("10Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_10HALF) {
+ np->speed = 10;
+ printk ("10Mbps, half duplex\n");
+ } else
+ printk ("\n");
+
+ } else {
+ mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
+ speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
+ np->speed = speed;
+ printk (KERN_INFO "%s: Link changed: %dMbps ,",
+ dev->name, speed);
+ printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
+ "full" : "half");
+ }
+ check_duplex (dev);
+ if (np->flowctrl && np->mii_if.full_duplex) {
+ iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
+ ioaddr + MulticastFilter1+2);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
+ ioaddr + MACCtrl0);
+ }
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* We must do a global reset of DMA to continue. */
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int i;
+
+ /* We should lock this segment of code for SMP eventually, although
+ the vulnerability window is very small and statistics are
+ non-critical. */
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
+ np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
+ np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
+ np->stats.collisions += ioread8(ioaddr + StatsLateColl);
+ np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
+ np->stats.collisions += ioread8(ioaddr + StatsOneColl);
+ np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
+ ioread8(ioaddr + StatsTxDefer);
+ for (i = StatsTxDefer; i <= StatsMcastRx; i++)
+ ioread8(ioaddr + i);
+ np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
+ np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
+ np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
+ np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
+
+ return &np->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mc_filter[4]; /* Multicast hash filter */
+ u32 rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else if (dev->mc_count) {
+ struct dev_mc_list *mclist;
+ int bit;
+ int index;
+ int crc;
+ memset (mc_filter, 0, sizeof (mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
+ if (crc & 0x80000000) index |= 1 << bit;
+ mc_filter[index/16] |= (1 << (index % 16));
+ }
+ rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
+ } else {
+ iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ return;
+ }
+ if (np->mii_if.full_duplex && np->flowctrl)
+ mc_filter[3] |= 0x0200;
+
+ for (i = 0; i < 4; i++)
+ iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
+ iowrite8(rx_mode, ioaddr + RxMode);
+}
+
+static int __set_mac_addr(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u16 addr16;
+
+ addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
+ iowrite16(addr16, np->base + StationAddr);
+ addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
+ iowrite16(addr16, np->base + StationAddr+2);
+ addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
+ iowrite16(addr16, np->base + StationAddr+4);
+ return 0;
+}
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_sset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->msg_enable = val;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = get_drvinfo,
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int rc;
+ int i;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ for (i=0; i<TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
+ (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
+ le32_to_cpu(np->tx_ring[i].next_desc),
+ le32_to_cpu(np->tx_ring[i].status),
+ (le32_to_cpu(np->tx_ring[i].status) >> 2)
+ & 0xff,
+ le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag[0].length));
+ }
+ printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+ ioread32(np->base + TxListPtr),
+ netif_queue_stopped(dev));
+ printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
+ np->cur_tx, np->cur_tx % TX_RING_SIZE,
+ np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
+ printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
+ printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
+ return 0;
+ }
+
+
+ return rc;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue(dev);
+
+ if (netif_msg_ifdown(np)) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+
+ /* Wait and kill tasklet */
+ tasklet_kill(&np->rx_tasklet);
+ tasklet_kill(&np->tx_tasklet);
+
+#ifdef __i386__
+ if (netif_msg_hw(np)) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)(np->tx_ring_dma));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
+ np->tx_ring[i].frag[0].length);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)(np->rx_ring_dma));
+ for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
+ np->rx_ring[i].frag[0].length);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ del_timer_sync(&np->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
+ skb = np->rx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(np->pci_dev,
+ np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_ring[i].frag[0].addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ np->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void __devexit sundance_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+ pci_iounmap(pdev, np->base);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static struct pci_driver sundance_driver = {
+ .name = DRV_NAME,
+ .id_table = sundance_pci_tbl,
+ .probe = sundance_probe1,
+ .remove = __devexit_p(sundance_remove1),
+};
+
+static int __init sundance_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init(&sundance_driver);
+}
+
+static void __exit sundance_exit(void)
+{
+ pci_unregister_driver(&sundance_driver);
+}
+
+module_init(sundance_init);
+module_exit(sundance_exit);
+
+
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
new file mode 100644
index 000000000000..5cd50fd53c12
--- /dev/null
+++ b/drivers/net/sungem.c
@@ -0,0 +1,3204 @@
+/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
+ * sungem.c: Sun GEM ethernet driver.
+ *
+ * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
+ *
+ * Support for Apple GMAC and assorted PHYs, WOL, Power Management
+ * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
+ *
+ * NAPI and NETPOLL support
+ * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
+ *
+ * TODO:
+ * - Now that the driver was significantly simplified, I need to rework
+ * the locking. I'm sure we don't need _2_ spinlocks, and we probably
+ * can avoid taking most of them for so long period of time (and schedule
+ * instead). The main issues at this point are caused by the netdev layer
+ * though:
+ *
+ * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
+ * help by net/core/dev.c, thus they can't schedule. That means they can't
+ * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock
+ * where it could have been dropped. change_mtu especially would love also to
+ * be able to msleep instead of horrid locked delays when resetting the HW,
+ * but that read_lock() makes it impossible, unless I defer it's action to
+ * the reset task, which means it'll be asynchronous (won't take effect until
+ * the system schedules a bit).
+ *
+ * Also, it would probably be possible to also remove most of the long-life
+ * locking in open/resume code path (gem_reinit_chip) by beeing more careful
+ * about when we can start taking interrupts or get xmit() called...
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#ifdef __sparc__
+#include <asm/idprom.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/pbm.h>
+#endif
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#endif
+
+#include "sungem_phy.h"
+#include "sungem.h"
+
+/* Stripping FCS is causing problems, disabled for now */
+#undef STRIP_FCS
+
+#define DEFAULT_MSG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+
+#define DRV_NAME "sungem"
+#define DRV_VERSION "0.98"
+#define DRV_RELDATE "8/24/03"
+#define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
+
+static char version[] __devinitdata =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define GEM_MODULE_NAME "gem"
+#define PFX GEM_MODULE_NAME ": "
+
+static struct pci_device_id gem_pci_tbl[] = {
+ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+
+ /* These models only differ from the original GEM in
+ * that their tx/rx fifos are of a different size and
+ * they only support 10/100 speeds. -DaveM
+ *
+ * Apple's GMAC does support gigabit on machines with
+ * the BCM54xx PHYs. -BenH
+ */
+ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
+
+static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
+{
+ u32 cmd;
+ int limit = 10000;
+
+ cmd = (1 << 30);
+ cmd |= (2 << 28);
+ cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
+ cmd |= (reg << 18) & MIF_FRAME_REGAD;
+ cmd |= (MIF_FRAME_TAMSB);
+ writel(cmd, gp->regs + MIF_FRAME);
+
+ while (limit--) {
+ cmd = readl(gp->regs + MIF_FRAME);
+ if (cmd & MIF_FRAME_TALSB)
+ break;
+
+ udelay(10);
+ }
+
+ if (!limit)
+ cmd = 0xffff;
+
+ return cmd & MIF_FRAME_DATA;
+}
+
+static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
+{
+ struct gem *gp = dev->priv;
+ return __phy_read(gp, mii_id, reg);
+}
+
+static inline u16 phy_read(struct gem *gp, int reg)
+{
+ return __phy_read(gp, gp->mii_phy_addr, reg);
+}
+
+static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
+{
+ u32 cmd;
+ int limit = 10000;
+
+ cmd = (1 << 30);
+ cmd |= (1 << 28);
+ cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
+ cmd |= (reg << 18) & MIF_FRAME_REGAD;
+ cmd |= (MIF_FRAME_TAMSB);
+ cmd |= (val & MIF_FRAME_DATA);
+ writel(cmd, gp->regs + MIF_FRAME);
+
+ while (limit--) {
+ cmd = readl(gp->regs + MIF_FRAME);
+ if (cmd & MIF_FRAME_TALSB)
+ break;
+
+ udelay(10);
+ }
+}
+
+static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
+{
+ struct gem *gp = dev->priv;
+ __phy_write(gp, mii_id, reg, val & 0xffff);
+}
+
+static inline void phy_write(struct gem *gp, int reg, u16 val)
+{
+ __phy_write(gp, gp->mii_phy_addr, reg, val);
+}
+
+static inline void gem_enable_ints(struct gem *gp)
+{
+ /* Enable all interrupts but TXDONE */
+ writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+}
+
+static inline void gem_disable_ints(struct gem *gp)
+{
+ /* Disable all interrupts, including TXDONE */
+ writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+}
+
+static void gem_get_cell(struct gem *gp)
+{
+ BUG_ON(gp->cell_enabled < 0);
+ gp->cell_enabled++;
+#ifdef CONFIG_PPC_PMAC
+ if (gp->cell_enabled == 1) {
+ mb();
+ pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
+ udelay(10);
+ }
+#endif /* CONFIG_PPC_PMAC */
+}
+
+/* Turn off the chip's clock */
+static void gem_put_cell(struct gem *gp)
+{
+ BUG_ON(gp->cell_enabled <= 0);
+ gp->cell_enabled--;
+#ifdef CONFIG_PPC_PMAC
+ if (gp->cell_enabled == 0) {
+ mb();
+ pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
+ udelay(10);
+ }
+#endif /* CONFIG_PPC_PMAC */
+}
+
+static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
+{
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
+}
+
+static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
+ u32 pcs_miistat;
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
+ gp->dev->name, pcs_istat);
+
+ if (!(pcs_istat & PCS_ISTAT_LSC)) {
+ printk(KERN_ERR "%s: PCS irq but no link status change???\n",
+ dev->name);
+ return 0;
+ }
+
+ /* The link status bit latches on zero, so you must
+ * read it twice in such a case to see a transition
+ * to the link being up.
+ */
+ pcs_miistat = readl(gp->regs + PCS_MIISTAT);
+ if (!(pcs_miistat & PCS_MIISTAT_LS))
+ pcs_miistat |=
+ (readl(gp->regs + PCS_MIISTAT) &
+ PCS_MIISTAT_LS);
+
+ if (pcs_miistat & PCS_MIISTAT_ANC) {
+ /* The remote-fault indication is only valid
+ * when autoneg has completed.
+ */
+ if (pcs_miistat & PCS_MIISTAT_RF)
+ printk(KERN_INFO "%s: PCS AutoNEG complete, "
+ "RemoteFault\n", dev->name);
+ else
+ printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
+ dev->name);
+ }
+
+ if (pcs_miistat & PCS_MIISTAT_LS) {
+ printk(KERN_INFO "%s: PCS link is now up.\n",
+ dev->name);
+ netif_carrier_on(gp->dev);
+ } else {
+ printk(KERN_INFO "%s: PCS link is now down.\n",
+ dev->name);
+ netif_carrier_off(gp->dev);
+ /* If this happens and the link timer is not running,
+ * reset so we re-negotiate.
+ */
+ if (!timer_pending(&gp->link_timer))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
+ gp->dev->name, txmac_stat);
+
+ /* Defer timer expiration is quite normal,
+ * don't even log the event.
+ */
+ if ((txmac_stat & MAC_TXSTAT_DTE) &&
+ !(txmac_stat & ~MAC_TXSTAT_DTE))
+ return 0;
+
+ if (txmac_stat & MAC_TXSTAT_URUN) {
+ printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
+ dev->name);
+ gp->net_stats.tx_fifo_errors++;
+ }
+
+ if (txmac_stat & MAC_TXSTAT_MPE) {
+ printk(KERN_ERR "%s: TX MAC max packet size error.\n",
+ dev->name);
+ gp->net_stats.tx_errors++;
+ }
+
+ /* The rest are all cases of one of the 16-bit TX
+ * counters expiring.
+ */
+ if (txmac_stat & MAC_TXSTAT_NCE)
+ gp->net_stats.collisions += 0x10000;
+
+ if (txmac_stat & MAC_TXSTAT_ECE) {
+ gp->net_stats.tx_aborted_errors += 0x10000;
+ gp->net_stats.collisions += 0x10000;
+ }
+
+ if (txmac_stat & MAC_TXSTAT_LCE) {
+ gp->net_stats.tx_aborted_errors += 0x10000;
+ gp->net_stats.collisions += 0x10000;
+ }
+
+ /* We do not keep track of MAC_TXSTAT_FCE and
+ * MAC_TXSTAT_PCE events.
+ */
+ return 0;
+}
+
+/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
+ * so we do the following.
+ *
+ * If any part of the reset goes wrong, we return 1 and that causes the
+ * whole chip to be reset.
+ */
+static int gem_rxmac_reset(struct gem *gp)
+{
+ struct net_device *dev = gp->dev;
+ int limit, i;
+ u64 desc_dma;
+ u32 val;
+
+ /* First, reset & disable MAC RX. */
+ writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
+ "chip.\n", dev->name);
+ return 1;
+ }
+
+ writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
+ gp->regs + MAC_RXCFG);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
+ "chip.\n", dev->name);
+ return 1;
+ }
+
+ /* Second, disable RX DMA. */
+ writel(0, gp->regs + RXDMA_CFG);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
+ "chip.\n", dev->name);
+ return 1;
+ }
+
+ udelay(5000);
+
+ /* Execute RX reset command. */
+ writel(gp->swrst_base | GREG_SWRST_RXRST,
+ gp->regs + GREG_SWRST);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ printk(KERN_ERR "%s: RX reset command will not execute, resetting "
+ "whole chip.\n", dev->name);
+ return 1;
+ }
+
+ /* Refresh the RX ring. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct gem_rxd *rxd = &gp->init_block->rxd[i];
+
+ if (gp->rx_skbs[i] == NULL) {
+ printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
+ "whole chip.\n", dev->name);
+ return 1;
+ }
+
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+ }
+ gp->rx_new = gp->rx_old = 0;
+
+ /* Now we must reprogram the rest of RX unit. */
+ desc_dma = (u64) gp->gblock_dvma;
+ desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
+ writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+ writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+ writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+ val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+ ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ writel(val, gp->regs + RXDMA_CFG);
+ if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((8 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+ else
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((4 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+ val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
+ val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
+ writel(val, gp->regs + RXDMA_PTHRESH);
+ val = readl(gp->regs + RXDMA_CFG);
+ writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+ writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+ return 0;
+}
+
+static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
+ int ret = 0;
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
+ gp->dev->name, rxmac_stat);
+
+ if (rxmac_stat & MAC_RXSTAT_OFLW) {
+ u32 smac = readl(gp->regs + MAC_SMACHINE);
+
+ printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
+ dev->name, smac);
+ gp->net_stats.rx_over_errors++;
+ gp->net_stats.rx_fifo_errors++;
+
+ ret = gem_rxmac_reset(gp);
+ }
+
+ if (rxmac_stat & MAC_RXSTAT_ACE)
+ gp->net_stats.rx_frame_errors += 0x10000;
+
+ if (rxmac_stat & MAC_RXSTAT_CCE)
+ gp->net_stats.rx_crc_errors += 0x10000;
+
+ if (rxmac_stat & MAC_RXSTAT_LCE)
+ gp->net_stats.rx_length_errors += 0x10000;
+
+ /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
+ * events.
+ */
+ return ret;
+}
+
+static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
+ gp->dev->name, mac_cstat);
+
+ /* This interrupt is just for pause frame and pause
+ * tracking. It is useful for diagnostics and debug
+ * but probably by default we will mask these events.
+ */
+ if (mac_cstat & MAC_CSTAT_PS)
+ gp->pause_entered++;
+
+ if (mac_cstat & MAC_CSTAT_PRCV)
+ gp->pause_last_time_recvd = (mac_cstat >> 16);
+
+ return 0;
+}
+
+static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 mif_status = readl(gp->regs + MIF_STATUS);
+ u32 reg_val, changed_bits;
+
+ reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
+ changed_bits = (mif_status & MIF_STATUS_STAT);
+
+ gem_handle_mif_event(gp, reg_val, changed_bits);
+
+ return 0;
+}
+
+static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
+
+ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+ gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ printk(KERN_ERR "%s: PCI error [%04x] ",
+ dev->name, pci_estat);
+
+ if (pci_estat & GREG_PCIESTAT_BADACK)
+ printk("<No ACK64# during ABS64 cycle> ");
+ if (pci_estat & GREG_PCIESTAT_DTRTO)
+ printk("<Delayed transaction timeout> ");
+ if (pci_estat & GREG_PCIESTAT_OTHER)
+ printk("<other>");
+ printk("\n");
+ } else {
+ pci_estat |= GREG_PCIESTAT_OTHER;
+ printk(KERN_ERR "%s: PCI error\n", dev->name);
+ }
+
+ if (pci_estat & GREG_PCIESTAT_OTHER) {
+ u16 pci_cfg_stat;
+
+ /* Interrogate PCI config space for the
+ * true cause.
+ */
+ pci_read_config_word(gp->pdev, PCI_STATUS,
+ &pci_cfg_stat);
+ printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
+ dev->name, pci_cfg_stat);
+ if (pci_cfg_stat & PCI_STATUS_PARITY)
+ printk(KERN_ERR "%s: PCI parity error detected.\n",
+ dev->name);
+ if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
+ printk(KERN_ERR "%s: PCI target abort.\n",
+ dev->name);
+ if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
+ printk(KERN_ERR "%s: PCI master acks target abort.\n",
+ dev->name);
+ if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
+ printk(KERN_ERR "%s: PCI master abort.\n",
+ dev->name);
+ if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
+ printk(KERN_ERR "%s: PCI system error SERR#.\n",
+ dev->name);
+ if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
+ printk(KERN_ERR "%s: PCI parity error.\n",
+ dev->name);
+
+ /* Write the error bits back to clear them. */
+ pci_cfg_stat &= (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY);
+ pci_write_config_word(gp->pdev,
+ PCI_STATUS, pci_cfg_stat);
+ }
+
+ /* For all PCI errors, we should reset the chip. */
+ return 1;
+}
+
+/* All non-normal interrupt conditions get serviced here.
+ * Returns non-zero if we should just exit the interrupt
+ * handler right now (ie. if we reset the card which invalidates
+ * all of the other original irq status bits).
+ */
+static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ if (gem_status & GREG_STAT_RXNOBUF) {
+ /* Frame arrived, no free RX buffers available. */
+ if (netif_msg_rx_err(gp))
+ printk(KERN_DEBUG "%s: no buffer for rx frame\n",
+ gp->dev->name);
+ gp->net_stats.rx_dropped++;
+ }
+
+ if (gem_status & GREG_STAT_RXTAGERR) {
+ /* corrupt RX tag framing */
+ if (netif_msg_rx_err(gp))
+ printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
+ gp->dev->name);
+ gp->net_stats.rx_errors++;
+
+ goto do_reset;
+ }
+
+ if (gem_status & GREG_STAT_PCS) {
+ if (gem_pcs_interrupt(dev, gp, gem_status))
+ goto do_reset;
+ }
+
+ if (gem_status & GREG_STAT_TXMAC) {
+ if (gem_txmac_interrupt(dev, gp, gem_status))
+ goto do_reset;
+ }
+
+ if (gem_status & GREG_STAT_RXMAC) {
+ if (gem_rxmac_interrupt(dev, gp, gem_status))
+ goto do_reset;
+ }
+
+ if (gem_status & GREG_STAT_MAC) {
+ if (gem_mac_interrupt(dev, gp, gem_status))
+ goto do_reset;
+ }
+
+ if (gem_status & GREG_STAT_MIF) {
+ if (gem_mif_interrupt(dev, gp, gem_status))
+ goto do_reset;
+ }
+
+ if (gem_status & GREG_STAT_PCIERR) {
+ if (gem_pci_interrupt(dev, gp, gem_status))
+ goto do_reset;
+ }
+
+ return 0;
+
+do_reset:
+ gp->reset_task_pending = 1;
+ schedule_work(&gp->reset_task);
+
+ return 1;
+}
+
+static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ int entry, limit;
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
+ gp->dev->name, gem_status);
+
+ entry = gp->tx_old;
+ limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
+ while (entry != limit) {
+ struct sk_buff *skb;
+ struct gem_txd *txd;
+ dma_addr_t dma_addr;
+ u32 dma_len;
+ int frag;
+
+ if (netif_msg_tx_done(gp))
+ printk(KERN_DEBUG "%s: tx done, slot %d\n",
+ gp->dev->name, entry);
+ skb = gp->tx_skbs[entry];
+ if (skb_shinfo(skb)->nr_frags) {
+ int last = entry + skb_shinfo(skb)->nr_frags;
+ int walk = entry;
+ int incomplete = 0;
+
+ last &= (TX_RING_SIZE - 1);
+ for (;;) {
+ walk = NEXT_TX(walk);
+ if (walk == limit)
+ incomplete = 1;
+ if (walk == last)
+ break;
+ }
+ if (incomplete)
+ break;
+ }
+ gp->tx_skbs[entry] = NULL;
+ gp->net_stats.tx_bytes += skb->len;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ txd = &gp->init_block->txd[entry];
+
+ dma_addr = le64_to_cpu(txd->buffer);
+ dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
+
+ pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
+ entry = NEXT_TX(entry);
+ }
+
+ gp->net_stats.tx_packets++;
+ dev_kfree_skb_irq(skb);
+ }
+ gp->tx_old = entry;
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(dev);
+}
+
+static __inline__ void gem_post_rxds(struct gem *gp, int limit)
+{
+ int cluster_start, curr, count, kick;
+
+ cluster_start = curr = (gp->rx_new & ~(4 - 1));
+ count = 0;
+ kick = -1;
+ wmb();
+ while (curr != limit) {
+ curr = NEXT_RX(curr);
+ if (++count == 4) {
+ struct gem_rxd *rxd =
+ &gp->init_block->rxd[cluster_start];
+ for (;;) {
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+ rxd++;
+ cluster_start = NEXT_RX(cluster_start);
+ if (cluster_start == curr)
+ break;
+ }
+ kick = curr;
+ count = 0;
+ }
+ }
+ if (kick >= 0) {
+ mb();
+ writel(kick, gp->regs + RXDMA_KICK);
+ }
+}
+
+static int gem_rx(struct gem *gp, int work_to_do)
+{
+ int entry, drops, work_done = 0;
+ u32 done;
+
+ if (netif_msg_rx_status(gp))
+ printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
+ gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
+
+ entry = gp->rx_new;
+ drops = 0;
+ done = readl(gp->regs + RXDMA_DONE);
+ for (;;) {
+ struct gem_rxd *rxd = &gp->init_block->rxd[entry];
+ struct sk_buff *skb;
+ u64 status = cpu_to_le64(rxd->status_word);
+ dma_addr_t dma_addr;
+ int len;
+
+ if ((status & RXDCTRL_OWN) != 0)
+ break;
+
+ if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
+ break;
+
+ /* When writing back RX descriptor, GEM writes status
+ * then buffer address, possibly in seperate transactions.
+ * If we don't wait for the chip to write both, we could
+ * post a new buffer to this descriptor then have GEM spam
+ * on the buffer address. We sync on the RX completion
+ * register to prevent this from happening.
+ */
+ if (entry == done) {
+ done = readl(gp->regs + RXDMA_DONE);
+ if (entry == done)
+ break;
+ }
+
+ /* We can now account for the work we're about to do */
+ work_done++;
+
+ skb = gp->rx_skbs[entry];
+
+ len = (status & RXDCTRL_BUFSZ) >> 16;
+ if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
+ gp->net_stats.rx_errors++;
+ if (len < ETH_ZLEN)
+ gp->net_stats.rx_length_errors++;
+ if (len & RXDCTRL_BAD)
+ gp->net_stats.rx_crc_errors++;
+
+ /* We'll just return it to GEM. */
+ drop_it:
+ gp->net_stats.rx_dropped++;
+ goto next;
+ }
+
+ dma_addr = cpu_to_le64(rxd->buffer);
+ if (len > RX_COPY_THRESHOLD) {
+ struct sk_buff *new_skb;
+
+ new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+ if (new_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ pci_unmap_page(gp->pdev, dma_addr,
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE);
+ gp->rx_skbs[entry] = new_skb;
+ new_skb->dev = gp->dev;
+ skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
+ rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
+ virt_to_page(new_skb->data),
+ offset_in_page(new_skb->data),
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE));
+ skb_reserve(new_skb, RX_OFFSET);
+
+ /* Trim the original skb for the netif. */
+ skb_trim(skb, len);
+ } else {
+ struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+
+ if (copy_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+
+ copy_skb->dev = gp->dev;
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+ memcpy(copy_skb->data, skb->data, len);
+ pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+ skb->ip_summed = CHECKSUM_HW;
+ skb->protocol = eth_type_trans(skb, gp->dev);
+
+ netif_receive_skb(skb);
+
+ gp->net_stats.rx_packets++;
+ gp->net_stats.rx_bytes += len;
+ gp->dev->last_rx = jiffies;
+
+ next:
+ entry = NEXT_RX(entry);
+ }
+
+ gem_post_rxds(gp, entry);
+
+ gp->rx_new = entry;
+
+ if (drops)
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
+ gp->dev->name);
+
+ return work_done;
+}
+
+static int gem_poll(struct net_device *dev, int *budget)
+{
+ struct gem *gp = dev->priv;
+ unsigned long flags;
+
+ /*
+ * NAPI locking nightmare: See comment at head of driver
+ */
+ spin_lock_irqsave(&gp->lock, flags);
+
+ do {
+ int work_to_do, work_done;
+
+ /* Handle anomalies */
+ if (gp->status & GREG_STAT_ABNORMAL) {
+ if (gem_abnormal_irq(dev, gp, gp->status))
+ break;
+ }
+
+ /* Run TX completion thread */
+ spin_lock(&gp->tx_lock);
+ gem_tx(dev, gp, gp->status);
+ spin_unlock(&gp->tx_lock);
+
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ /* Run RX thread. We don't use any locking here,
+ * code willing to do bad things - like cleaning the
+ * rx ring - must call netif_poll_disable(), which
+ * schedule_timeout()'s if polling is already disabled.
+ */
+ work_to_do = min(*budget, dev->quota);
+
+ work_done = gem_rx(gp, work_to_do);
+
+ *budget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done >= work_to_do)
+ return 1;
+
+ spin_lock_irqsave(&gp->lock, flags);
+
+ gp->status = readl(gp->regs + GREG_STAT);
+ } while (gp->status & GREG_STAT_NAPI);
+
+ __netif_rx_complete(dev);
+ gem_enable_ints(gp);
+
+ spin_unlock_irqrestore(&gp->lock, flags);
+ return 0;
+}
+
+static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct gem *gp = dev->priv;
+ unsigned long flags;
+
+ /* Swallow interrupts when shutting the chip down, though
+ * that shouldn't happen, we should have done free_irq() at
+ * this point...
+ */
+ if (!gp->running)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&gp->lock, flags);
+
+ if (netif_rx_schedule_prep(dev)) {
+ u32 gem_status = readl(gp->regs + GREG_STAT);
+
+ if (gem_status == 0) {
+ spin_unlock_irqrestore(&gp->lock, flags);
+ return IRQ_NONE;
+ }
+ gp->status = gem_status;
+ gem_disable_ints(gp);
+ __netif_rx_schedule(dev);
+ }
+
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ /* If polling was disabled at the time we received that
+ * interrupt, we may return IRQ_HANDLED here while we
+ * should return IRQ_NONE. No big deal...
+ */
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gem_poll_controller(struct net_device *dev)
+{
+ /* gem_interrupt is safe to reentrance so no need
+ * to disable_irq here.
+ */
+ gem_interrupt(dev->irq, dev, NULL);
+}
+#endif
+
+static void gem_tx_timeout(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ if (!gp->running) {
+ printk("%s: hrm.. hw not running !\n", dev->name);
+ return;
+ }
+ printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
+ dev->name,
+ readl(gp->regs + TXDMA_CFG),
+ readl(gp->regs + MAC_TXSTAT),
+ readl(gp->regs + MAC_TXCFG));
+ printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
+ dev->name,
+ readl(gp->regs + RXDMA_CFG),
+ readl(gp->regs + MAC_RXSTAT),
+ readl(gp->regs + MAC_RXCFG));
+
+ spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
+
+ gp->reset_task_pending = 1;
+ schedule_work(&gp->reset_task);
+
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irq(&gp->lock);
+}
+
+static __inline__ int gem_intme(int entry)
+{
+ /* Algorithm: IRQ every 1/2 of descriptors. */
+ if (!(entry & ((TX_RING_SIZE>>1)-1)))
+ return 1;
+
+ return 0;
+}
+
+static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+ int entry;
+ u64 ctrl;
+ unsigned long flags;
+
+ ctrl = 0;
+ if (skb->ip_summed == CHECKSUM_HW) {
+ u64 csum_start_off, csum_stuff_off;
+
+ csum_start_off = (u64) (skb->h.raw - skb->data);
+ csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+
+ ctrl = (TXDCTRL_CENAB |
+ (csum_start_off << 15) |
+ (csum_stuff_off << 21));
+ }
+
+ local_irq_save(flags);
+ if (!spin_trylock(&gp->tx_lock)) {
+ /* Tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+ /* We raced with gem_do_stop() */
+ if (!gp->running) {
+ spin_unlock_irqrestore(&gp->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&gp->tx_lock, flags);
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = gp->tx_new;
+ gp->tx_skbs[entry] = skb;
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ struct gem_txd *txd = &gp->init_block->txd[entry];
+ dma_addr_t mapping;
+ u32 len;
+
+ len = skb->len;
+ mapping = pci_map_page(gp->pdev,
+ virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ len, PCI_DMA_TODEVICE);
+ ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
+ if (gem_intme(entry))
+ ctrl |= TXDCTRL_INTME;
+ txd->buffer = cpu_to_le64(mapping);
+ wmb();
+ txd->control_word = cpu_to_le64(ctrl);
+ entry = NEXT_TX(entry);
+ } else {
+ struct gem_txd *txd;
+ u32 first_len;
+ u64 intme;
+ dma_addr_t first_mapping;
+ int frag, first_entry = entry;
+
+ intme = 0;
+ if (gem_intme(entry))
+ intme |= TXDCTRL_INTME;
+
+ /* We must give this initial chunk to the device last.
+ * Otherwise we could race with the device.
+ */
+ first_len = skb_headlen(skb);
+ first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ first_len, PCI_DMA_TODEVICE);
+ entry = NEXT_TX(entry);
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ u32 len;
+ dma_addr_t mapping;
+ u64 this_ctrl;
+
+ len = this_frag->size;
+ mapping = pci_map_page(gp->pdev,
+ this_frag->page,
+ this_frag->page_offset,
+ len, PCI_DMA_TODEVICE);
+ this_ctrl = ctrl;
+ if (frag == skb_shinfo(skb)->nr_frags - 1)
+ this_ctrl |= TXDCTRL_EOF;
+
+ txd = &gp->init_block->txd[entry];
+ txd->buffer = cpu_to_le64(mapping);
+ wmb();
+ txd->control_word = cpu_to_le64(this_ctrl | len);
+
+ if (gem_intme(entry))
+ intme |= TXDCTRL_INTME;
+
+ entry = NEXT_TX(entry);
+ }
+ txd = &gp->init_block->txd[first_entry];
+ txd->buffer = cpu_to_le64(first_mapping);
+ wmb();
+ txd->control_word =
+ cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
+ }
+
+ gp->tx_new = entry;
+ if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ if (netif_msg_tx_queued(gp))
+ printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+ dev->name, entry, skb->len);
+ mb();
+ writel(gp->tx_new, gp->regs + TXDMA_KICK);
+ spin_unlock_irqrestore(&gp->tx_lock, flags);
+
+ dev->trans_start = jiffies;
+
+ return NETDEV_TX_OK;
+}
+
+#define STOP_TRIES 32
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_reset(struct gem *gp)
+{
+ int limit;
+ u32 val;
+
+ /* Make sure we won't get any more interrupts */
+ writel(0xffffffff, gp->regs + GREG_IMASK);
+
+ /* Reset the chip */
+ writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
+ gp->regs + GREG_SWRST);
+
+ limit = STOP_TRIES;
+
+ do {
+ udelay(20);
+ val = readl(gp->regs + GREG_SWRST);
+ if (limit-- <= 0)
+ break;
+ } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
+
+ if (limit <= 0)
+ printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_start_dma(struct gem *gp)
+{
+ u32 val;
+
+ /* We are ready to rock, turn everything on. */
+ val = readl(gp->regs + TXDMA_CFG);
+ writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
+ val = readl(gp->regs + RXDMA_CFG);
+ writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+ (void) readl(gp->regs + MAC_RXCFG);
+ udelay(100);
+
+ gem_enable_ints(gp);
+
+ writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
+ * actually stopped before about 4ms tho ...
+ */
+static void gem_stop_dma(struct gem *gp)
+{
+ u32 val;
+
+ /* We are done rocking, turn everything off. */
+ val = readl(gp->regs + TXDMA_CFG);
+ writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
+ val = readl(gp->regs + RXDMA_CFG);
+ writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+ (void) readl(gp->regs + MAC_RXCFG);
+
+ /* Need to wait a bit ... done by the caller */
+}
+
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+// XXX dbl check what that function should do when called on PCS PHY
+static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
+{
+ u32 advertise, features;
+ int autoneg;
+ int speed;
+ int duplex;
+
+ if (gp->phy_type != phy_mii_mdio0 &&
+ gp->phy_type != phy_mii_mdio1)
+ goto non_mii;
+
+ /* Setup advertise */
+ if (found_mii_phy(gp))
+ features = gp->phy_mii.def->features;
+ else
+ features = 0;
+
+ advertise = features & ADVERTISE_MASK;
+ if (gp->phy_mii.advertising != 0)
+ advertise &= gp->phy_mii.advertising;
+
+ autoneg = gp->want_autoneg;
+ speed = gp->phy_mii.speed;
+ duplex = gp->phy_mii.duplex;
+
+ /* Setup link parameters */
+ if (!ep)
+ goto start_aneg;
+ if (ep->autoneg == AUTONEG_ENABLE) {
+ advertise = ep->advertising;
+ autoneg = 1;
+ } else {
+ autoneg = 0;
+ speed = ep->speed;
+ duplex = ep->duplex;
+ }
+
+start_aneg:
+ /* Sanitize settings based on PHY capabilities */
+ if ((features & SUPPORTED_Autoneg) == 0)
+ autoneg = 0;
+ if (speed == SPEED_1000 &&
+ !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
+ speed = SPEED_100;
+ if (speed == SPEED_100 &&
+ !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
+ speed = SPEED_10;
+ if (duplex == DUPLEX_FULL &&
+ !(features & (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Full)))
+ duplex = DUPLEX_HALF;
+ if (speed == 0)
+ speed = SPEED_10;
+
+ /* If we are asleep, we don't try to actually setup the PHY, we
+ * just store the settings
+ */
+ if (gp->asleep) {
+ gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
+ gp->phy_mii.speed = speed;
+ gp->phy_mii.duplex = duplex;
+ return;
+ }
+
+ /* Configure PHY & start aneg */
+ gp->want_autoneg = autoneg;
+ if (autoneg) {
+ if (found_mii_phy(gp))
+ gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
+ gp->lstate = link_aneg;
+ } else {
+ if (found_mii_phy(gp))
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
+ gp->lstate = link_force_ok;
+ }
+
+non_mii:
+ gp->timer_ticks = 0;
+ mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+}
+
+/* A link-up condition has occurred, initialize and enable the
+ * rest of the chip.
+ *
+ * Must be invoked under gp->lock and gp->tx_lock.
+ */
+static int gem_set_link_modes(struct gem *gp)
+{
+ u32 val;
+ int full_duplex, speed, pause;
+
+ full_duplex = 0;
+ speed = SPEED_10;
+ pause = 0;
+
+ if (found_mii_phy(gp)) {
+ if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
+ return 1;
+ full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
+ speed = gp->phy_mii.speed;
+ pause = gp->phy_mii.pause;
+ } else if (gp->phy_type == phy_serialink ||
+ gp->phy_type == phy_serdes) {
+ u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
+
+ if (pcs_lpa & PCS_MIIADV_FD)
+ full_duplex = 1;
+ speed = SPEED_1000;
+ }
+
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
+ gp->dev->name, speed, (full_duplex ? "full" : "half"));
+
+ if (!gp->running)
+ return 0;
+
+ val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
+ if (full_duplex) {
+ val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
+ } else {
+ /* MAC_TXCFG_NBO must be zero. */
+ }
+ writel(val, gp->regs + MAC_TXCFG);
+
+ val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
+ if (!full_duplex &&
+ (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1)) {
+ val |= MAC_XIFCFG_DISE;
+ } else if (full_duplex) {
+ val |= MAC_XIFCFG_FLED;
+ }
+
+ if (speed == SPEED_1000)
+ val |= (MAC_XIFCFG_GMII);
+
+ writel(val, gp->regs + MAC_XIFCFG);
+
+ /* If gigabit and half-duplex, enable carrier extension
+ * mode. Else, disable it.
+ */
+ if (speed == SPEED_1000 && !full_duplex) {
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+ } else {
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+ }
+
+ if (gp->phy_type == phy_serialink ||
+ gp->phy_type == phy_serdes) {
+ u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
+
+ if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
+ pause = 1;
+ }
+
+ if (netif_msg_link(gp)) {
+ if (pause) {
+ printk(KERN_INFO "%s: Pause is enabled "
+ "(rxfifo: %d off: %d on: %d)\n",
+ gp->dev->name,
+ gp->rx_fifo_sz,
+ gp->rx_pause_off,
+ gp->rx_pause_on);
+ } else {
+ printk(KERN_INFO "%s: Pause is disabled\n",
+ gp->dev->name);
+ }
+ }
+
+ if (!full_duplex)
+ writel(512, gp->regs + MAC_STIME);
+ else
+ writel(64, gp->regs + MAC_STIME);
+ val = readl(gp->regs + MAC_MCCFG);
+ if (pause)
+ val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
+ else
+ val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
+ writel(val, gp->regs + MAC_MCCFG);
+
+ gem_start_dma(gp);
+
+ return 0;
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static int gem_mdio_link_not_up(struct gem *gp)
+{
+ switch (gp->lstate) {
+ case link_force_ret:
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Autoneg failed again, keeping"
+ " forced mode\n", gp->dev->name);
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
+ gp->last_forced_speed, DUPLEX_HALF);
+ gp->timer_ticks = 5;
+ gp->lstate = link_force_ok;
+ return 0;
+ case link_aneg:
+ /* We try forced modes after a failed aneg only on PHYs that don't
+ * have "magic_aneg" bit set, which means they internally do the
+ * while forced-mode thingy. On these, we just restart aneg
+ */
+ if (gp->phy_mii.def->magic_aneg)
+ return 1;
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: switching to forced 100bt\n",
+ gp->dev->name);
+ /* Try forced modes. */
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
+ DUPLEX_HALF);
+ gp->timer_ticks = 5;
+ gp->lstate = link_force_try;
+ return 0;
+ case link_force_try:
+ /* Downgrade from 100 to 10 Mbps if necessary.
+ * If already at 10Mbps, warn user about the
+ * situation every 10 ticks.
+ */
+ if (gp->phy_mii.speed == SPEED_100) {
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
+ DUPLEX_HALF);
+ gp->timer_ticks = 5;
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: switching to forced 10bt\n",
+ gp->dev->name);
+ return 0;
+ } else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void gem_link_timer(unsigned long data)
+{
+ struct gem *gp = (struct gem *) data;
+ int restart_aneg = 0;
+
+ if (gp->asleep)
+ return;
+
+ spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
+ gem_get_cell(gp);
+
+ /* If the reset task is still pending, we just
+ * reschedule the link timer
+ */
+ if (gp->reset_task_pending)
+ goto restart;
+
+ if (gp->phy_type == phy_serialink ||
+ gp->phy_type == phy_serdes) {
+ u32 val = readl(gp->regs + PCS_MIISTAT);
+
+ if (!(val & PCS_MIISTAT_LS))
+ val = readl(gp->regs + PCS_MIISTAT);
+
+ if ((val & PCS_MIISTAT_LS) != 0) {
+ gp->lstate = link_up;
+ netif_carrier_on(gp->dev);
+ (void)gem_set_link_modes(gp);
+ }
+ goto restart;
+ }
+ if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
+ /* Ok, here we got a link. If we had it due to a forced
+ * fallback, and we were configured for autoneg, we do
+ * retry a short autoneg pass. If you know your hub is
+ * broken, use ethtool ;)
+ */
+ if (gp->lstate == link_force_try && gp->want_autoneg) {
+ gp->lstate = link_force_ret;
+ gp->last_forced_speed = gp->phy_mii.speed;
+ gp->timer_ticks = 5;
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Got link after fallback, retrying"
+ " autoneg once...\n", gp->dev->name);
+ gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
+ } else if (gp->lstate != link_up) {
+ gp->lstate = link_up;
+ netif_carrier_on(gp->dev);
+ if (gem_set_link_modes(gp))
+ restart_aneg = 1;
+ }
+ } else {
+ /* If the link was previously up, we restart the
+ * whole process
+ */
+ if (gp->lstate == link_up) {
+ gp->lstate = link_down;
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Link down\n",
+ gp->dev->name);
+ netif_carrier_off(gp->dev);
+ gp->reset_task_pending = 1;
+ schedule_work(&gp->reset_task);
+ restart_aneg = 1;
+ } else if (++gp->timer_ticks > 10) {
+ if (found_mii_phy(gp))
+ restart_aneg = gem_mdio_link_not_up(gp);
+ else
+ restart_aneg = 1;
+ }
+ }
+ if (restart_aneg) {
+ gem_begin_auto_negotiation(gp, NULL);
+ goto out_unlock;
+ }
+restart:
+ mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+out_unlock:
+ gem_put_cell(gp);
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irq(&gp->lock);
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_clean_rings(struct gem *gp)
+{
+ struct gem_init_block *gb = gp->init_block;
+ struct sk_buff *skb;
+ int i;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct gem_rxd *rxd;
+
+ rxd = &gb->rxd[i];
+ if (gp->rx_skbs[i] != NULL) {
+ skb = gp->rx_skbs[i];
+ dma_addr = le64_to_cpu(rxd->buffer);
+ pci_unmap_page(gp->pdev, dma_addr,
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ gp->rx_skbs[i] = NULL;
+ }
+ rxd->status_word = 0;
+ wmb();
+ rxd->buffer = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (gp->tx_skbs[i] != NULL) {
+ struct gem_txd *txd;
+ int frag;
+
+ skb = gp->tx_skbs[i];
+ gp->tx_skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ int ent = i & (TX_RING_SIZE - 1);
+
+ txd = &gb->txd[ent];
+ dma_addr = le64_to_cpu(txd->buffer);
+ pci_unmap_page(gp->pdev, dma_addr,
+ le64_to_cpu(txd->control_word) &
+ TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags)
+ i++;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_init_rings(struct gem *gp)
+{
+ struct gem_init_block *gb = gp->init_block;
+ struct net_device *dev = gp->dev;
+ int i;
+ dma_addr_t dma_addr;
+
+ gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
+
+ gem_clean_rings(gp);
+
+ gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
+ (unsigned)VLAN_ETH_FRAME_LEN);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ struct gem_rxd *rxd = &gb->rxd[i];
+
+ skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+ if (!skb) {
+ rxd->buffer = 0;
+ rxd->status_word = 0;
+ continue;
+ }
+
+ gp->rx_skbs[i] = skb;
+ skb->dev = dev;
+ skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
+ dma_addr = pci_map_page(gp->pdev,
+ virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE);
+ rxd->buffer = cpu_to_le64(dma_addr);
+ wmb();
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+ skb_reserve(skb, RX_OFFSET);
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct gem_txd *txd = &gb->txd[i];
+
+ txd->control_word = 0;
+ wmb();
+ txd->buffer = 0;
+ }
+ wmb();
+}
+
+/* Init PHY interface and start link poll state machine */
+static void gem_init_phy(struct gem *gp)
+{
+ u32 mifcfg;
+
+ /* Revert MIF CFG setting done on stop_phy */
+ mifcfg = readl(gp->regs + MIF_CFG);
+ mifcfg &= ~MIF_CFG_BBMODE;
+ writel(mifcfg, gp->regs + MIF_CFG);
+
+ if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
+ int i;
+
+ /* Those delay sucks, the HW seem to love them though, I'll
+ * serisouly consider breaking some locks here to be able
+ * to schedule instead
+ */
+ for (i = 0; i < 3; i++) {
+#ifdef CONFIG_PPC_PMAC
+ pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
+ msleep(20);
+#endif
+ /* Some PHYs used by apple have problem getting back to us,
+ * we do an additional reset here
+ */
+ phy_write(gp, MII_BMCR, BMCR_RESET);
+ msleep(20);
+ if (phy_read(gp, MII_BMCR) != 0xffff)
+ break;
+ if (i == 2)
+ printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
+ gp->dev->name);
+ }
+ }
+
+ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+ gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ u32 val;
+
+ /* Init datapath mode register. */
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1) {
+ val = PCS_DMODE_MGM;
+ } else if (gp->phy_type == phy_serialink) {
+ val = PCS_DMODE_SM | PCS_DMODE_GMOE;
+ } else {
+ val = PCS_DMODE_ESM;
+ }
+
+ writel(val, gp->regs + PCS_DMODE);
+ }
+
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1) {
+ // XXX check for errors
+ mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
+
+ /* Init PHY */
+ if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
+ gp->phy_mii.def->ops->init(&gp->phy_mii);
+ } else {
+ u32 val;
+ int limit;
+
+ /* Reset PCS unit. */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= PCS_MIICTRL_RST;
+ writeb(val, gp->regs + PCS_MIICTRL);
+
+ limit = 32;
+ while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
+ udelay(100);
+ if (limit-- <= 0)
+ break;
+ }
+ if (limit <= 0)
+ printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
+ gp->dev->name);
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ val = readl(gp->regs + PCS_CFG);
+ val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Advertise all capabilities except assymetric
+ * pause.
+ */
+ val = readl(gp->regs + PCS_MIIADV);
+ val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
+ PCS_MIIADV_SP | PCS_MIIADV_AP);
+ writel(val, gp->regs + PCS_MIIADV);
+
+ /* Enable and restart auto-negotiation, disable wrapback/loopback,
+ * and re-enable PCS.
+ */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
+ val &= ~PCS_MIICTRL_WB;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ val = readl(gp->regs + PCS_CFG);
+ val |= PCS_CFG_ENABLE;
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Make sure serialink loopback is off. The meaning
+ * of this bit is logically inverted based upon whether
+ * you are in Serialink or SERDES mode.
+ */
+ val = readl(gp->regs + PCS_SCTRL);
+ if (gp->phy_type == phy_serialink)
+ val &= ~PCS_SCTRL_LOOP;
+ else
+ val |= PCS_SCTRL_LOOP;
+ writel(val, gp->regs + PCS_SCTRL);
+ }
+
+ /* Default aneg parameters */
+ gp->timer_ticks = 0;
+ gp->lstate = link_down;
+ netif_carrier_off(gp->dev);
+
+ /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
+ spin_lock_irq(&gp->lock);
+ gem_begin_auto_negotiation(gp, NULL);
+ spin_unlock_irq(&gp->lock);
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_init_dma(struct gem *gp)
+{
+ u64 desc_dma = (u64) gp->gblock_dvma;
+ u32 val;
+
+ val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
+ writel(val, gp->regs + TXDMA_CFG);
+
+ writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
+ writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
+ desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
+
+ writel(0, gp->regs + TXDMA_KICK);
+
+ val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+ ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ writel(val, gp->regs + RXDMA_CFG);
+
+ writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+ writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+
+ writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+
+ val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
+ val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
+ writel(val, gp->regs + RXDMA_PTHRESH);
+
+ if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((8 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+ else
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((4 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static u32 gem_setup_multicast(struct gem *gp)
+{
+ u32 rxcfg = 0;
+ int i;
+
+ if ((gp->dev->flags & IFF_ALLMULTI) ||
+ (gp->dev->mc_count > 256)) {
+ for (i=0; i<16; i++)
+ writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
+ rxcfg |= MAC_RXCFG_HFE;
+ } else if (gp->dev->flags & IFF_PROMISC) {
+ rxcfg |= MAC_RXCFG_PROM;
+ } else {
+ u16 hash_table[16];
+ u32 crc;
+ struct dev_mc_list *dmi = gp->dev->mc_list;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ hash_table[i] = 0;
+
+ for (i = 0; i < gp->dev->mc_count; i++) {
+ char *addrs = dmi->dmi_addr;
+
+ dmi = dmi->next;
+
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ for (i=0; i<16; i++)
+ writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
+ rxcfg |= MAC_RXCFG_HFE;
+ }
+
+ return rxcfg;
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_init_mac(struct gem *gp)
+{
+ unsigned char *e = &gp->dev->dev_addr[0];
+
+ writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
+
+ writel(0x00, gp->regs + MAC_IPG0);
+ writel(0x08, gp->regs + MAC_IPG1);
+ writel(0x04, gp->regs + MAC_IPG2);
+ writel(0x40, gp->regs + MAC_STIME);
+ writel(0x40, gp->regs + MAC_MINFSZ);
+
+ /* Ethernet payload + header + FCS + optional VLAN tag. */
+ writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
+
+ writel(0x07, gp->regs + MAC_PASIZE);
+ writel(0x04, gp->regs + MAC_JAMSIZE);
+ writel(0x10, gp->regs + MAC_ATTLIM);
+ writel(0x8808, gp->regs + MAC_MCTYPE);
+
+ writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
+
+ writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
+ writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
+ writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
+
+ writel(0, gp->regs + MAC_ADDR3);
+ writel(0, gp->regs + MAC_ADDR4);
+ writel(0, gp->regs + MAC_ADDR5);
+
+ writel(0x0001, gp->regs + MAC_ADDR6);
+ writel(0xc200, gp->regs + MAC_ADDR7);
+ writel(0x0180, gp->regs + MAC_ADDR8);
+
+ writel(0, gp->regs + MAC_AFILT0);
+ writel(0, gp->regs + MAC_AFILT1);
+ writel(0, gp->regs + MAC_AFILT2);
+ writel(0, gp->regs + MAC_AF21MSK);
+ writel(0, gp->regs + MAC_AF0MSK);
+
+ gp->mac_rx_cfg = gem_setup_multicast(gp);
+#ifdef STRIP_FCS
+ gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
+#endif
+ writel(0, gp->regs + MAC_NCOLL);
+ writel(0, gp->regs + MAC_FASUCC);
+ writel(0, gp->regs + MAC_ECOLL);
+ writel(0, gp->regs + MAC_LCOLL);
+ writel(0, gp->regs + MAC_DTIMER);
+ writel(0, gp->regs + MAC_PATMPS);
+ writel(0, gp->regs + MAC_RFCTR);
+ writel(0, gp->regs + MAC_LERR);
+ writel(0, gp->regs + MAC_AERR);
+ writel(0, gp->regs + MAC_FCSERR);
+ writel(0, gp->regs + MAC_RXCVERR);
+
+ /* Clear RX/TX/MAC/XIF config, we will set these up and enable
+ * them once a link is established.
+ */
+ writel(0, gp->regs + MAC_TXCFG);
+ writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
+ writel(0, gp->regs + MAC_MCCFG);
+ writel(0, gp->regs + MAC_XIFCFG);
+
+ /* Setup MAC interrupts. We want to get all of the interesting
+ * counter expiration events, but we do not want to hear about
+ * normal rx/tx as the DMA engine tells us that.
+ */
+ writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
+ writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
+
+ /* Don't enable even the PAUSE interrupts for now, we
+ * make no use of those events other than to record them.
+ */
+ writel(0xffffffff, gp->regs + MAC_MCMASK);
+
+ /* Don't enable GEM's WOL in normal operations
+ */
+ if (gp->has_wol)
+ writel(0, gp->regs + WOL_WAKECSR);
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_init_pause_thresholds(struct gem *gp)
+{
+ u32 cfg;
+
+ /* Calculate pause thresholds. Setting the OFF threshold to the
+ * full RX fifo size effectively disables PAUSE generation which
+ * is what we do for 10/100 only GEMs which have FIFOs too small
+ * to make real gains from PAUSE.
+ */
+ if (gp->rx_fifo_sz <= (2 * 1024)) {
+ gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
+ } else {
+ int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
+ int off = (gp->rx_fifo_sz - (max_frame * 2));
+ int on = off - max_frame;
+
+ gp->rx_pause_off = off;
+ gp->rx_pause_on = on;
+ }
+
+
+ /* Configure the chip "burst" DMA mode & enable some
+ * HW bug fixes on Apple version
+ */
+ cfg = 0;
+ if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
+ cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
+ cfg |= GREG_CFG_IBURST;
+#endif
+ cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
+ cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
+ writel(cfg, gp->regs + GREG_CFG);
+
+ /* If Infinite Burst didn't stick, then use different
+ * thresholds (and Apple bug fixes don't exist)
+ */
+ if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
+ cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
+ cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
+ writel(cfg, gp->regs + GREG_CFG);
+ }
+}
+
+static int gem_check_invariants(struct gem *gp)
+{
+ struct pci_dev *pdev = gp->pdev;
+ u32 mif_cfg;
+
+ /* On Apple's sungem, we can't rely on registers as the chip
+ * was been powered down by the firmware. The PHY is looked
+ * up later on.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
+ gp->phy_type = phy_mii_mdio0;
+ gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
+ gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
+ gp->swrst_base = 0;
+
+ mif_cfg = readl(gp->regs + MIF_CFG);
+ mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
+ mif_cfg |= MIF_CFG_MDI0;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
+ writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
+
+ /* We hard-code the PHY address so we can properly bring it out of
+ * reset later on, we can't really probe it at this point, though
+ * that isn't an issue.
+ */
+ if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
+ gp->mii_phy_addr = 1;
+ else
+ gp->mii_phy_addr = 0;
+
+ return 0;
+ }
+
+ mif_cfg = readl(gp->regs + MIF_CFG);
+
+ if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
+ /* One of the MII PHYs _must_ be present
+ * as this chip has no gigabit PHY.
+ */
+ if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
+ printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
+ mif_cfg);
+ return -1;
+ }
+ }
+
+ /* Determine initial PHY interface type guess. MDIO1 is the
+ * external PHY and thus takes precedence over MDIO0.
+ */
+
+ if (mif_cfg & MIF_CFG_MDI1) {
+ gp->phy_type = phy_mii_mdio1;
+ mif_cfg |= MIF_CFG_PSELECT;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ } else if (mif_cfg & MIF_CFG_MDI0) {
+ gp->phy_type = phy_mii_mdio0;
+ mif_cfg &= ~MIF_CFG_PSELECT;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ } else {
+ gp->phy_type = phy_serialink;
+ }
+ if (gp->phy_type == phy_mii_mdio1 ||
+ gp->phy_type == phy_mii_mdio0) {
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ gp->mii_phy_addr = i;
+ if (phy_read(gp, MII_BMCR) != 0xffff)
+ break;
+ }
+ if (i == 32) {
+ if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
+ printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
+ return -1;
+ }
+ gp->phy_type = phy_serdes;
+ }
+ }
+
+ /* Fetch the FIFO configurations now too. */
+ gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
+ gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
+
+ if (pdev->vendor == PCI_VENDOR_ID_SUN) {
+ if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ if (gp->tx_fifo_sz != (9 * 1024) ||
+ gp->rx_fifo_sz != (20 * 1024)) {
+ printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ gp->tx_fifo_sz, gp->rx_fifo_sz);
+ return -1;
+ }
+ gp->swrst_base = 0;
+ } else {
+ if (gp->tx_fifo_sz != (2 * 1024) ||
+ gp->rx_fifo_sz != (2 * 1024)) {
+ printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ gp->tx_fifo_sz, gp->rx_fifo_sz);
+ return -1;
+ }
+ gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
+ }
+ }
+
+ return 0;
+}
+
+/* Must be invoked under gp->lock and gp->tx_lock. */
+static void gem_reinit_chip(struct gem *gp)
+{
+ /* Reset the chip */
+ gem_reset(gp);
+
+ /* Make sure ints are disabled */
+ gem_disable_ints(gp);
+
+ /* Allocate & setup ring buffers */
+ gem_init_rings(gp);
+
+ /* Configure pause thresholds */
+ gem_init_pause_thresholds(gp);
+
+ /* Init DMA & MAC engines */
+ gem_init_dma(gp);
+ gem_init_mac(gp);
+}
+
+
+/* Must be invoked with no lock held. */
+static void gem_stop_phy(struct gem *gp, int wol)
+{
+ u32 mifcfg;
+ unsigned long flags;
+
+ /* Let the chip settle down a bit, it seems that helps
+ * for sleep mode on some models
+ */
+ msleep(10);
+
+ /* Make sure we aren't polling PHY status change. We
+ * don't currently use that feature though
+ */
+ mifcfg = readl(gp->regs + MIF_CFG);
+ mifcfg &= ~MIF_CFG_POLL;
+ writel(mifcfg, gp->regs + MIF_CFG);
+
+ if (wol && gp->has_wol) {
+ unsigned char *e = &gp->dev->dev_addr[0];
+ u32 csr;
+
+ /* Setup wake-on-lan for MAGIC packet */
+ writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
+ gp->regs + MAC_RXCFG);
+ writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
+ writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
+ writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
+
+ writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
+ csr = WOL_WAKECSR_ENABLE;
+ if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
+ csr |= WOL_WAKECSR_MII;
+ writel(csr, gp->regs + WOL_WAKECSR);
+ } else {
+ writel(0, gp->regs + MAC_RXCFG);
+ (void)readl(gp->regs + MAC_RXCFG);
+ /* Machine sleep will die in strange ways if we
+ * dont wait a bit here, looks like the chip takes
+ * some time to really shut down
+ */
+ msleep(10);
+ }
+
+ writel(0, gp->regs + MAC_TXCFG);
+ writel(0, gp->regs + MAC_XIFCFG);
+ writel(0, gp->regs + TXDMA_CFG);
+ writel(0, gp->regs + RXDMA_CFG);
+
+ if (!wol) {
+ spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
+ gem_reset(gp);
+ writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
+ writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ /* No need to take the lock here */
+
+ if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
+ gp->phy_mii.def->ops->suspend(&gp->phy_mii);
+
+ /* According to Apple, we must set the MDIO pins to this begnign
+ * state or we may 1) eat more current, 2) damage some PHYs
+ */
+ writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
+ writel(0, gp->regs + MIF_BBCLK);
+ writel(0, gp->regs + MIF_BBDATA);
+ writel(0, gp->regs + MIF_BBOENAB);
+ writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
+ (void) readl(gp->regs + MAC_XIFCFG);
+ }
+}
+
+
+static int gem_do_start(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
+
+ /* Enable the cell */
+ gem_get_cell(gp);
+
+ /* Init & setup chip hardware */
+ gem_reinit_chip(gp);
+
+ gp->running = 1;
+
+ if (gp->lstate == link_up) {
+ netif_carrier_on(gp->dev);
+ gem_set_link_modes(gp);
+ }
+
+ netif_wake_queue(gp->dev);
+
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ if (request_irq(gp->pdev->irq, gem_interrupt,
+ SA_SHIRQ, dev->name, (void *)dev)) {
+ printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
+
+ spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
+
+ gp->running = 0;
+ gem_reset(gp);
+ gem_clean_rings(gp);
+ gem_put_cell(gp);
+
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void gem_do_stop(struct net_device *dev, int wol)
+{
+ struct gem *gp = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
+
+ gp->running = 0;
+
+ /* Stop netif queue */
+ netif_stop_queue(dev);
+
+ /* Make sure ints are disabled */
+ gem_disable_ints(gp);
+
+ /* We can drop the lock now */
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ /* If we are going to sleep with WOL */
+ gem_stop_dma(gp);
+ msleep(10);
+ if (!wol)
+ gem_reset(gp);
+ msleep(10);
+
+ /* Get rid of rings */
+ gem_clean_rings(gp);
+
+ /* No irq needed anymore */
+ free_irq(gp->pdev->irq, (void *) dev);
+
+ /* Cell not needed neither if no WOL */
+ if (!wol) {
+ spin_lock_irqsave(&gp->lock, flags);
+ gem_put_cell(gp);
+ spin_unlock_irqrestore(&gp->lock, flags);
+ }
+}
+
+static void gem_reset_task(void *data)
+{
+ struct gem *gp = (struct gem *) data;
+
+ down(&gp->pm_sem);
+
+ netif_poll_disable(gp->dev);
+
+ spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
+
+ if (gp->running == 0)
+ goto not_running;
+
+ if (gp->running) {
+ netif_stop_queue(gp->dev);
+
+ /* Reset the chip & rings */
+ gem_reinit_chip(gp);
+ if (gp->lstate == link_up)
+ gem_set_link_modes(gp);
+ netif_wake_queue(gp->dev);
+ }
+ not_running:
+ gp->reset_task_pending = 0;
+
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irq(&gp->lock);
+
+ netif_poll_enable(gp->dev);
+
+ up(&gp->pm_sem);
+}
+
+
+static int gem_open(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+ int rc = 0;
+
+ down(&gp->pm_sem);
+
+ /* We need the cell enabled */
+ if (!gp->asleep)
+ rc = gem_do_start(dev);
+ gp->opened = (rc == 0);
+
+ up(&gp->pm_sem);
+
+ return rc;
+}
+
+static int gem_close(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+
+ /* Note: we don't need to call netif_poll_disable() here because
+ * our caller (dev_close) already did it for us
+ */
+
+ down(&gp->pm_sem);
+
+ gp->opened = 0;
+ if (!gp->asleep)
+ gem_do_stop(dev, 0);
+
+ up(&gp->pm_sem);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct gem *gp = dev->priv;
+ unsigned long flags;
+
+ down(&gp->pm_sem);
+
+ netif_poll_disable(dev);
+
+ printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
+ dev->name,
+ (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
+
+ /* Keep the cell enabled during the entire operation */
+ spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
+ gem_get_cell(gp);
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ /* If the driver is opened, we stop the MAC */
+ if (gp->opened) {
+ /* Stop traffic, mark us closed */
+ netif_device_detach(dev);
+
+ /* Switch off MAC, remember WOL setting */
+ gp->asleep_wol = gp->wake_on_lan;
+ gem_do_stop(dev, gp->asleep_wol);
+ } else
+ gp->asleep_wol = 0;
+
+ /* Mark us asleep */
+ gp->asleep = 1;
+ wmb();
+
+ /* Stop the link timer */
+ del_timer_sync(&gp->link_timer);
+
+ /* Now we release the semaphore to not block the reset task who
+ * can take it too. We are marked asleep, so there will be no
+ * conflict here
+ */
+ up(&gp->pm_sem);
+
+ /* Wait for a pending reset task to complete */
+ while (gp->reset_task_pending)
+ yield();
+ flush_scheduled_work();
+
+ /* Shut the PHY down eventually and setup WOL */
+ gem_stop_phy(gp, gp->asleep_wol);
+
+ /* Make sure bus master is disabled */
+ pci_disable_device(gp->pdev);
+
+ /* Release the cell, no need to take a lock at this point since
+ * nothing else can happen now
+ */
+ gem_put_cell(gp);
+
+ return 0;
+}
+
+static int gem_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct gem *gp = dev->priv;
+ unsigned long flags;
+
+ printk(KERN_INFO "%s: resuming\n", dev->name);
+
+ down(&gp->pm_sem);
+
+ /* Keep the cell enabled during the entire operation, no need to
+ * take a lock here tho since nothing else can happen while we are
+ * marked asleep
+ */
+ gem_get_cell(gp);
+
+ /* Make sure PCI access and bus master are enabled */
+ if (pci_enable_device(gp->pdev)) {
+ printk(KERN_ERR "%s: Can't re-enable chip !\n",
+ dev->name);
+ /* Put cell and forget it for now, it will be considered as
+ * still asleep, a new sleep cycle may bring it back
+ */
+ gem_put_cell(gp);
+ up(&gp->pm_sem);
+ return 0;
+ }
+ pci_set_master(gp->pdev);
+
+ /* Reset everything */
+ gem_reset(gp);
+
+ /* Mark us woken up */
+ gp->asleep = 0;
+ wmb();
+
+ /* Bring the PHY back. Again, lock is useless at this point as
+ * nothing can be happening until we restart the whole thing
+ */
+ gem_init_phy(gp);
+
+ /* If we were opened, bring everything back */
+ if (gp->opened) {
+ /* Restart MAC */
+ gem_do_start(dev);
+
+ /* Re-attach net device */
+ netif_device_attach(dev);
+
+ }
+
+ spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
+
+ /* If we had WOL enabled, the cell clock was never turned off during
+ * sleep, so we end up beeing unbalanced. Fix that here
+ */
+ if (gp->asleep_wol)
+ gem_put_cell(gp);
+
+ /* This function doesn't need to hold the cell, it will be held if the
+ * driver is open by gem_do_start().
+ */
+ gem_put_cell(gp);
+
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ netif_poll_enable(dev);
+
+ up(&gp->pm_sem);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct net_device_stats *gem_get_stats(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+ struct net_device_stats *stats = &gp->net_stats;
+
+ spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
+
+ /* I have seen this being called while the PM was in progress,
+ * so we shield against this
+ */
+ if (gp->running) {
+ stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+ writel(0, gp->regs + MAC_FCSERR);
+
+ stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+ writel(0, gp->regs + MAC_AERR);
+
+ stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+ writel(0, gp->regs + MAC_LERR);
+
+ stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+ stats->collisions +=
+ (readl(gp->regs + MAC_ECOLL) +
+ readl(gp->regs + MAC_LCOLL));
+ writel(0, gp->regs + MAC_ECOLL);
+ writel(0, gp->regs + MAC_LCOLL);
+ }
+
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irq(&gp->lock);
+
+ return &gp->net_stats;
+}
+
+static void gem_set_multicast(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+ u32 rxcfg, rxcfg_new;
+ int limit = 10000;
+
+
+ spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
+
+ if (!gp->running)
+ goto bail;
+
+ netif_stop_queue(dev);
+
+ rxcfg = readl(gp->regs + MAC_RXCFG);
+ rxcfg_new = gem_setup_multicast(gp);
+#ifdef STRIP_FCS
+ rxcfg_new |= MAC_RXCFG_SFCS;
+#endif
+ gp->mac_rx_cfg = rxcfg_new;
+
+ writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+ while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
+ rxcfg |= rxcfg_new;
+
+ writel(rxcfg, gp->regs + MAC_RXCFG);
+
+ netif_wake_queue(dev);
+
+ bail:
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irq(&gp->lock);
+}
+
+/* Jumbo-grams don't seem to work :-( */
+#define GEM_MIN_MTU 68
+#if 1
+#define GEM_MAX_MTU 1500
+#else
+#define GEM_MAX_MTU 9000
+#endif
+
+static int gem_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct gem *gp = dev->priv;
+
+ if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
+ return -EINVAL;
+
+ if (!netif_running(dev) || !netif_device_present(dev)) {
+ /* We'll just catch it later when the
+ * device is up'd or resumed.
+ */
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ down(&gp->pm_sem);
+ spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
+ dev->mtu = new_mtu;
+ if (gp->running) {
+ gem_reinit_chip(gp);
+ if (gp->lstate == link_up)
+ gem_set_link_modes(gp);
+ }
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irq(&gp->lock);
+ up(&gp->pm_sem);
+
+ return 0;
+}
+
+static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct gem *gp = dev->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(gp->pdev));
+}
+
+static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gem *gp = dev->priv;
+
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1) {
+ if (gp->phy_mii.def)
+ cmd->supported = gp->phy_mii.def->features;
+ else
+ cmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full);
+
+ /* XXX hardcoded stuff for now */
+ cmd->port = PORT_MII;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = 0; /* XXX fixed PHYAD */
+
+ /* Return current PHY settings */
+ spin_lock_irq(&gp->lock);
+ cmd->autoneg = gp->want_autoneg;
+ cmd->speed = gp->phy_mii.speed;
+ cmd->duplex = gp->phy_mii.duplex;
+ cmd->advertising = gp->phy_mii.advertising;
+
+ /* If we started with a forced mode, we don't have a default
+ * advertise set, we need to return something sensible so
+ * userland can re-enable autoneg properly.
+ */
+ if (cmd->advertising == 0)
+ cmd->advertising = cmd->supported;
+ spin_unlock_irq(&gp->lock);
+ } else { // XXX PCS ?
+ cmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg);
+ cmd->advertising = cmd->supported;
+ cmd->speed = 0;
+ cmd->duplex = cmd->port = cmd->phy_address =
+ cmd->transceiver = cmd->autoneg = 0;
+ }
+ cmd->maxtxpkt = cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gem *gp = dev->priv;
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE &&
+ cmd->advertising == 0)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((cmd->speed != SPEED_1000 &&
+ cmd->speed != SPEED_100 &&
+ cmd->speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Apply settings and restart link process. */
+ spin_lock_irq(&gp->lock);
+ gem_get_cell(gp);
+ gem_begin_auto_negotiation(gp, cmd);
+ gem_put_cell(gp);
+ spin_unlock_irq(&gp->lock);
+
+ return 0;
+}
+
+static int gem_nway_reset(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+
+ if (!gp->want_autoneg)
+ return -EINVAL;
+
+ /* Restart link process. */
+ spin_lock_irq(&gp->lock);
+ gem_get_cell(gp);
+ gem_begin_auto_negotiation(gp, NULL);
+ gem_put_cell(gp);
+ spin_unlock_irq(&gp->lock);
+
+ return 0;
+}
+
+static u32 gem_get_msglevel(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+ return gp->msg_enable;
+}
+
+static void gem_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct gem *gp = dev->priv;
+ gp->msg_enable = value;
+}
+
+
+/* Add more when I understand how to program the chip */
+/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
+
+#define WOL_SUPPORTED_MASK (WAKE_MAGIC)
+
+static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gem *gp = dev->priv;
+
+ /* Add more when I understand how to program the chip */
+ if (gp->has_wol) {
+ wol->supported = WOL_SUPPORTED_MASK;
+ wol->wolopts = gp->wake_on_lan;
+ } else {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ }
+}
+
+static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gem *gp = dev->priv;
+
+ if (!gp->has_wol)
+ return -EOPNOTSUPP;
+ gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
+ return 0;
+}
+
+static struct ethtool_ops gem_ethtool_ops = {
+ .get_drvinfo = gem_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = gem_get_settings,
+ .set_settings = gem_set_settings,
+ .nway_reset = gem_nway_reset,
+ .get_msglevel = gem_get_msglevel,
+ .set_msglevel = gem_set_msglevel,
+ .get_wol = gem_get_wol,
+ .set_wol = gem_set_wol,
+};
+
+static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct gem *gp = dev->priv;
+ struct mii_ioctl_data *data = if_mii(ifr);
+ int rc = -EOPNOTSUPP;
+ unsigned long flags;
+
+ /* Hold the PM semaphore while doing ioctl's or we may collide
+ * with power management.
+ */
+ down(&gp->pm_sem);
+
+ spin_lock_irqsave(&gp->lock, flags);
+ gem_get_cell(gp);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = gp->mii_phy_addr;
+ /* Fallthrough... */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ if (!gp->running)
+ rc = -EAGAIN;
+ else {
+ data->val_out = __phy_read(gp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f);
+ rc = 0;
+ }
+ break;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ rc = -EPERM;
+ else if (!gp->running)
+ rc = -EAGAIN;
+ else {
+ __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
+ data->val_in);
+ rc = 0;
+ }
+ break;
+ };
+
+ spin_lock_irqsave(&gp->lock, flags);
+ gem_put_cell(gp);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ up(&gp->pm_sem);
+
+ return rc;
+}
+
+#if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC))
+/* Fetch MAC address from vital product data of PCI ROM. */
+static void find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
+{
+ int this_offset;
+
+ for (this_offset = 0x20; this_offset < len; this_offset++) {
+ void __iomem *p = rom_base + this_offset;
+ int i;
+
+ if (readb(p + 0) != 0x90 ||
+ readb(p + 1) != 0x00 ||
+ readb(p + 2) != 0x09 ||
+ readb(p + 3) != 0x4e ||
+ readb(p + 4) != 0x41 ||
+ readb(p + 5) != 0x06)
+ continue;
+
+ this_offset += 6;
+ p += 6;
+
+ for (i = 0; i < 6; i++)
+ dev_addr[i] = readb(p + i);
+ break;
+ }
+}
+
+static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
+{
+ u32 rom_reg_orig;
+ void __iomem *p;
+
+ if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) {
+ if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0)
+ goto use_random;
+ }
+
+ pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_reg_orig);
+ pci_write_config_dword(pdev, pdev->rom_base_reg,
+ rom_reg_orig | PCI_ROM_ADDRESS_ENABLE);
+
+ p = ioremap(pci_resource_start(pdev, PCI_ROM_RESOURCE), (64 * 1024));
+ if (p != NULL && readb(p) == 0x55 && readb(p + 1) == 0xaa)
+ find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
+
+ if (p != NULL)
+ iounmap(p);
+
+ pci_write_config_dword(pdev, pdev->rom_base_reg, rom_reg_orig);
+ return;
+
+use_random:
+ /* Sun MAC prefix then 3 random bytes. */
+ dev_addr[0] = 0x08;
+ dev_addr[1] = 0x00;
+ dev_addr[2] = 0x20;
+ get_random_bytes(dev_addr + 3, 3);
+ return;
+}
+#endif /* not Sparc and not PPC */
+
+static int __devinit gem_get_device_address(struct gem *gp)
+{
+#if defined(__sparc__) || defined(CONFIG_PPC_PMAC)
+ struct net_device *dev = gp->dev;
+#endif
+
+#if defined(__sparc__)
+ struct pci_dev *pdev = gp->pdev;
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ int node = -1;
+
+ if (pcp != NULL) {
+ node = pcp->prom_node;
+ if (prom_getproplen(node, "local-mac-address") == 6)
+ prom_getproperty(node, "local-mac-address",
+ dev->dev_addr, 6);
+ else
+ node = -1;
+ }
+ if (node == -1)
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+#elif defined(CONFIG_PPC_PMAC)
+ unsigned char *addr;
+
+ addr = get_property(gp->of_node, "local-mac-address", NULL);
+ if (addr == NULL) {
+ printk("\n");
+ printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
+ return -1;
+ }
+ memcpy(dev->dev_addr, addr, 6);
+#else
+ get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
+#endif
+ return 0;
+}
+
+static void __devexit gem_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct gem *gp = dev->priv;
+
+ unregister_netdev(dev);
+
+ /* Stop the link timer */
+ del_timer_sync(&gp->link_timer);
+
+ /* We shouldn't need any locking here */
+ gem_get_cell(gp);
+
+ /* Wait for a pending reset task to complete */
+ while (gp->reset_task_pending)
+ yield();
+ flush_scheduled_work();
+
+ /* Shut the PHY down */
+ gem_stop_phy(gp, 0);
+
+ gem_put_cell(gp);
+
+ /* Make sure bus master is disabled */
+ pci_disable_device(gp->pdev);
+
+ /* Free resources */
+ pci_free_consistent(pdev,
+ sizeof(struct gem_init_block),
+ gp->init_block,
+ gp->gblock_dvma);
+ iounmap(gp->regs);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static int __devinit gem_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int gem_version_printed = 0;
+ unsigned long gemreg_base, gemreg_len;
+ struct net_device *dev;
+ struct gem *gp;
+ int i, err, pci_using_dac;
+
+ if (gem_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ /* Apple gmac note: during probe, the chip is powered up by
+ * the arch code to allow the code below to work (and to let
+ * the chip be probed on the config space. It won't stay powered
+ * up until the interface is brought up however, so we can't rely
+ * on register configuration done at this point.
+ */
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot enable MMIO operation, "
+ "aborting.\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* Configure DMA attributes. */
+
+ /* All of the GEM documentation states that 64-bit DMA addressing
+ * is fully supported and should work just fine. However the
+ * front end for RIO based GEMs is different and only supports
+ * 32-bit addressing.
+ *
+ * For now we assume the various PPC GEMs are 32-bit only as well.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_GEM &&
+ !pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL)) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_disable_device;
+ }
+ pci_using_dac = 0;
+ }
+
+ gemreg_base = pci_resource_start(pdev, 0);
+ gemreg_len = pci_resource_len(pdev, 0);
+
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
+ printk(KERN_ERR PFX "Cannot find proper PCI device "
+ "base address, aborting.\n");
+ err = -ENODEV;
+ goto err_disable_device;
+ }
+
+ dev = alloc_etherdev(sizeof(*gp));
+ if (!dev) {
+ printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ gp = dev->priv;
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resources, "
+ "aborting.\n");
+ goto err_out_free_netdev;
+ }
+
+ gp->pdev = pdev;
+ dev->base_addr = (long) pdev;
+ gp->dev = dev;
+
+ gp->msg_enable = DEFAULT_MSG;
+
+ spin_lock_init(&gp->lock);
+ spin_lock_init(&gp->tx_lock);
+ init_MUTEX(&gp->pm_sem);
+
+ init_timer(&gp->link_timer);
+ gp->link_timer.function = gem_link_timer;
+ gp->link_timer.data = (unsigned long) gp;
+
+ INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+
+ gp->lstate = link_down;
+ gp->timer_ticks = 0;
+ netif_carrier_off(dev);
+
+ gp->regs = ioremap(gemreg_base, gemreg_len);
+ if (gp->regs == 0UL) {
+ printk(KERN_ERR PFX "Cannot map device registers, "
+ "aborting.\n");
+ err = -EIO;
+ goto err_out_free_res;
+ }
+
+ /* On Apple, we want a reference to the Open Firmware device-tree
+ * node. We use it for clock control.
+ */
+#ifdef CONFIG_PPC_PMAC
+ gp->of_node = pci_device_to_OF_node(pdev);
+#endif
+
+ /* Only Apple version supports WOL afaik */
+ if (pdev->vendor == PCI_VENDOR_ID_APPLE)
+ gp->has_wol = 1;
+
+ /* Make sure cell is enabled */
+ gem_get_cell(gp);
+
+ /* Make sure everything is stopped and in init state */
+ gem_reset(gp);
+
+ /* Fill up the mii_phy structure (even if we won't use it) */
+ gp->phy_mii.dev = dev;
+ gp->phy_mii.mdio_read = _phy_read;
+ gp->phy_mii.mdio_write = _phy_write;
+
+ /* By default, we start with autoneg */
+ gp->want_autoneg = 1;
+
+ /* Check fifo sizes, PHY type, etc... */
+ if (gem_check_invariants(gp)) {
+ err = -ENODEV;
+ goto err_out_iounmap;
+ }
+
+ /* It is guaranteed that the returned buffer will be at least
+ * PAGE_SIZE aligned.
+ */
+ gp->init_block = (struct gem_init_block *)
+ pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
+ &gp->gblock_dvma);
+ if (!gp->init_block) {
+ printk(KERN_ERR PFX "Cannot allocate init block, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ if (gem_get_device_address(gp))
+ goto err_out_free_consistent;
+
+ dev->open = gem_open;
+ dev->stop = gem_close;
+ dev->hard_start_xmit = gem_start_xmit;
+ dev->get_stats = gem_get_stats;
+ dev->set_multicast_list = gem_set_multicast;
+ dev->do_ioctl = gem_ioctl;
+ dev->poll = gem_poll;
+ dev->weight = 64;
+ dev->ethtool_ops = &gem_ethtool_ops;
+ dev->tx_timeout = gem_tx_timeout;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->change_mtu = gem_change_mtu;
+ dev->irq = pdev->irq;
+ dev->dma = 0;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = gem_poll_controller;
+#endif
+
+ /* Set that now, in case PM kicks in now */
+ pci_set_drvdata(pdev, dev);
+
+ /* Detect & init PHY, start autoneg, we release the cell now
+ * too, it will be managed by whoever needs it
+ */
+ gem_init_phy(gp);
+
+ spin_lock_irq(&gp->lock);
+ gem_put_cell(gp);
+ spin_unlock_irq(&gp->lock);
+
+ /* Register with kernel */
+ if (register_netdev(dev)) {
+ printk(KERN_ERR PFX "Cannot register net device, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_consistent;
+ }
+
+ printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ",
+ dev->name);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i],
+ i == 5 ? ' ' : ':');
+ printk("\n");
+
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1)
+ printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
+ gp->phy_mii.def ? gp->phy_mii.def->name : "no");
+
+ /* GEM can do it all... */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ return 0;
+
+err_out_free_consistent:
+ gem_remove_one(pdev);
+err_out_iounmap:
+ gem_put_cell(gp);
+ iounmap(gp->regs);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_free_netdev:
+ free_netdev(dev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+
+}
+
+
+static struct pci_driver gem_driver = {
+ .name = GEM_MODULE_NAME,
+ .id_table = gem_pci_tbl,
+ .probe = gem_init_one,
+ .remove = __devexit_p(gem_remove_one),
+#ifdef CONFIG_PM
+ .suspend = gem_suspend,
+ .resume = gem_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init gem_init(void)
+{
+ return pci_module_init(&gem_driver);
+}
+
+static void __exit gem_cleanup(void)
+{
+ pci_unregister_driver(&gem_driver);
+}
+
+module_init(gem_init);
+module_exit(gem_cleanup);
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
new file mode 100644
index 000000000000..7143fd7cf3f8
--- /dev/null
+++ b/drivers/net/sungem.h
@@ -0,0 +1,1051 @@
+/* $Id: sungem.h,v 1.10.2.4 2002/03/11 08:54:48 davem Exp $
+ * sungem.h: Definitions for Sun GEM ethernet driver.
+ *
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SUNGEM_H
+#define _SUNGEM_H
+
+/* Global Registers */
+#define GREG_SEBSTATE 0x0000UL /* SEB State Register */
+#define GREG_CFG 0x0004UL /* Configuration Register */
+#define GREG_STAT 0x000CUL /* Status Register */
+#define GREG_IMASK 0x0010UL /* Interrupt Mask Register */
+#define GREG_IACK 0x0014UL /* Interrupt ACK Register */
+#define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */
+#define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */
+#define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */
+#define GREG_BIFCFG 0x1008UL /* BIF Configuration Register */
+#define GREG_BIFDIAG 0x100CUL /* BIF Diagnostics Register */
+#define GREG_SWRST 0x1010UL /* Software Reset Register */
+
+/* Global SEB State Register */
+#define GREG_SEBSTATE_ARB 0x00000003 /* State of Arbiter */
+#define GREG_SEBSTATE_RXWON 0x00000004 /* RX won internal arbitration */
+
+/* Global Configuration Register */
+#define GREG_CFG_IBURST 0x00000001 /* Infinite Burst */
+#define GREG_CFG_TXDMALIM 0x0000003e /* TX DMA grant limit */
+#define GREG_CFG_RXDMALIM 0x000007c0 /* RX DMA grant limit */
+#define GREG_CFG_RONPAULBIT 0x00000800 /* Use mem read multiple for PCI read
+ * after infinite burst (Apple) */
+#define GREG_CFG_ENBUG2FIX 0x00001000 /* Fix Rx hang after overflow */
+
+/* Global Interrupt Status Register.
+ *
+ * Reading this register automatically clears bits 0 through 6.
+ * This auto-clearing does not occur when the alias at GREG_STAT2
+ * is read instead. The rest of the interrupt bits only clear when
+ * the secondary interrupt status register corresponding to that
+ * bit is read (ie. if GREG_STAT_PCS is set, it will be cleared by
+ * reading PCS_ISTAT).
+ */
+#define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */
+#define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */
+#define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */
+#define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */
+#define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */
+#define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */
+#define GREG_STAT_PCS 0x00002000 /* PCS signalled interrupt */
+#define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */
+#define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */
+#define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */
+#define GREG_STAT_MIF 0x00020000 /* MIF signalled interrupt */
+#define GREG_STAT_PCIERR 0x00040000 /* PCI Error interrupt */
+#define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */
+#define GREG_STAT_TXNR_SHIFT 19
+
+#define GREG_STAT_ABNORMAL (GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR | \
+ GREG_STAT_PCS | GREG_STAT_TXMAC | GREG_STAT_RXMAC | \
+ GREG_STAT_MAC | GREG_STAT_MIF | GREG_STAT_PCIERR)
+
+#define GREG_STAT_NAPI (GREG_STAT_TXALL | GREG_STAT_TXINTME | \
+ GREG_STAT_RXDONE | GREG_STAT_ABNORMAL)
+
+/* The layout of GREG_IMASK and GREG_IACK is identical to GREG_STAT.
+ * Bits set in GREG_IMASK will prevent that interrupt type from being
+ * signalled to the cpu. GREG_IACK can be used to clear specific top-level
+ * interrupt conditions in GREG_STAT, ie. it only works for bits 0 through 6.
+ * Setting the bit will clear that interrupt, clear bits will have no effect
+ * on GREG_STAT.
+ */
+
+/* Global PCI Error Status Register */
+#define GREG_PCIESTAT_BADACK 0x00000001 /* No ACK64# during ABS64 cycle */
+#define GREG_PCIESTAT_DTRTO 0x00000002 /* Delayed transaction timeout */
+#define GREG_PCIESTAT_OTHER 0x00000004 /* Other PCI error, check cfg space */
+
+/* The layout of the GREG_PCIEMASK is identical to that of GREG_PCIESTAT.
+ * Bits set in GREG_PCIEMASK will prevent that interrupt type from being
+ * signalled to the cpu.
+ */
+
+/* Global BIF Configuration Register */
+#define GREG_BIFCFG_SLOWCLK 0x00000001 /* Set if PCI runs < 25Mhz */
+#define GREG_BIFCFG_B64DIS 0x00000002 /* Disable 64bit wide data cycle*/
+#define GREG_BIFCFG_M66EN 0x00000004 /* Set if on 66Mhz PCI segment */
+
+/* Global BIF Diagnostics Register */
+#define GREG_BIFDIAG_BURSTSM 0x007f0000 /* PCI Burst state machine */
+#define GREG_BIFDIAG_BIFSM 0xff000000 /* BIF state machine */
+
+/* Global Software Reset Register.
+ *
+ * This register is used to perform a global reset of the RX and TX portions
+ * of the GEM asic. Setting the RX or TX reset bit will start the reset.
+ * The driver _MUST_ poll these bits until they clear. One may not attempt
+ * to program any other part of GEM until the bits clear.
+ */
+#define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */
+#define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */
+#define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */
+#define GREG_SWRST_CACHESIZE 0x00ff0000 /* RIO only: cache line size */
+#define GREG_SWRST_CACHE_SHIFT 16
+
+/* TX DMA Registers */
+#define TXDMA_KICK 0x2000UL /* TX Kick Register */
+#define TXDMA_CFG 0x2004UL /* TX Configuration Register */
+#define TXDMA_DBLOW 0x2008UL /* TX Desc. Base Low */
+#define TXDMA_DBHI 0x200CUL /* TX Desc. Base High */
+#define TXDMA_FWPTR 0x2014UL /* TX FIFO Write Pointer */
+#define TXDMA_FSWPTR 0x2018UL /* TX FIFO Shadow Write Pointer */
+#define TXDMA_FRPTR 0x201CUL /* TX FIFO Read Pointer */
+#define TXDMA_FSRPTR 0x2020UL /* TX FIFO Shadow Read Pointer */
+#define TXDMA_PCNT 0x2024UL /* TX FIFO Packet Counter */
+#define TXDMA_SMACHINE 0x2028UL /* TX State Machine Register */
+#define TXDMA_DPLOW 0x2030UL /* TX Data Pointer Low */
+#define TXDMA_DPHI 0x2034UL /* TX Data Pointer High */
+#define TXDMA_TXDONE 0x2100UL /* TX Completion Register */
+#define TXDMA_FADDR 0x2104UL /* TX FIFO Address */
+#define TXDMA_FTAG 0x2108UL /* TX FIFO Tag */
+#define TXDMA_DLOW 0x210CUL /* TX FIFO Data Low */
+#define TXDMA_DHIT1 0x2110UL /* TX FIFO Data HighT1 */
+#define TXDMA_DHIT0 0x2114UL /* TX FIFO Data HighT0 */
+#define TXDMA_FSZ 0x2118UL /* TX FIFO Size */
+
+/* TX Kick Register.
+ *
+ * This 13-bit register is programmed by the driver to hold the descriptor
+ * entry index which follows the last valid transmit descriptor.
+ */
+
+/* TX Completion Register.
+ *
+ * This 13-bit register is updated by GEM to hold to descriptor entry index
+ * which follows the last descriptor already processed by GEM. Note that
+ * this value is mirrored in GREG_STAT which eliminates the need to even
+ * access this register in the driver during interrupt processing.
+ */
+
+/* TX Configuration Register.
+ *
+ * Note that TXDMA_CFG_FTHRESH, the TX FIFO Threshold, is an obsolete feature
+ * that was meant to be used with jumbo packets. It should be set to the
+ * maximum value of 0x4ff, else one risks getting TX MAC Underrun errors.
+ */
+#define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */
+#define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */
+#define TXDMA_CFG_RINGSZ_32 0x00000000 /* 32 TX descriptors */
+#define TXDMA_CFG_RINGSZ_64 0x00000002 /* 64 TX descriptors */
+#define TXDMA_CFG_RINGSZ_128 0x00000004 /* 128 TX descriptors */
+#define TXDMA_CFG_RINGSZ_256 0x00000006 /* 256 TX descriptors */
+#define TXDMA_CFG_RINGSZ_512 0x00000008 /* 512 TX descriptors */
+#define TXDMA_CFG_RINGSZ_1K 0x0000000a /* 1024 TX descriptors */
+#define TXDMA_CFG_RINGSZ_2K 0x0000000c /* 2048 TX descriptors */
+#define TXDMA_CFG_RINGSZ_4K 0x0000000e /* 4096 TX descriptors */
+#define TXDMA_CFG_RINGSZ_8K 0x00000010 /* 8192 TX descriptors */
+#define TXDMA_CFG_PIOSEL 0x00000020 /* Enable TX FIFO PIO from cpu */
+#define TXDMA_CFG_FTHRESH 0x001ffc00 /* TX FIFO Threshold, obsolete */
+#define TXDMA_CFG_PMODE 0x00200000 /* TXALL irq means TX FIFO empty*/
+
+/* TX Descriptor Base Low/High.
+ *
+ * These two registers store the 53 most significant bits of the base address
+ * of the TX descriptor table. The 11 least significant bits are always
+ * zero. As a result, the TX descriptor table must be 2K aligned.
+ */
+
+/* The rest of the TXDMA_* registers are for diagnostics and debug, I will document
+ * them later. -DaveM
+ */
+
+/* WakeOnLan Registers */
+#define WOL_MATCH0 0x3000UL
+#define WOL_MATCH1 0x3004UL
+#define WOL_MATCH2 0x3008UL
+#define WOL_MCOUNT 0x300CUL
+#define WOL_WAKECSR 0x3010UL
+
+/* WOL Match count register
+ */
+#define WOL_MCOUNT_N 0x00000010
+#define WOL_MCOUNT_M 0x00000000 /* 0 << 8 */
+
+#define WOL_WAKECSR_ENABLE 0x00000001
+#define WOL_WAKECSR_MII 0x00000002
+#define WOL_WAKECSR_SEEN 0x00000004
+#define WOL_WAKECSR_FILT_UCAST 0x00000008
+#define WOL_WAKECSR_FILT_MCAST 0x00000010
+#define WOL_WAKECSR_FILT_BCAST 0x00000020
+#define WOL_WAKECSR_FILT_SEEN 0x00000040
+
+
+/* Receive DMA Registers */
+#define RXDMA_CFG 0x4000UL /* RX Configuration Register */
+#define RXDMA_DBLOW 0x4004UL /* RX Descriptor Base Low */
+#define RXDMA_DBHI 0x4008UL /* RX Descriptor Base High */
+#define RXDMA_FWPTR 0x400CUL /* RX FIFO Write Pointer */
+#define RXDMA_FSWPTR 0x4010UL /* RX FIFO Shadow Write Pointer */
+#define RXDMA_FRPTR 0x4014UL /* RX FIFO Read Pointer */
+#define RXDMA_PCNT 0x4018UL /* RX FIFO Packet Counter */
+#define RXDMA_SMACHINE 0x401CUL /* RX State Machine Register */
+#define RXDMA_PTHRESH 0x4020UL /* Pause Thresholds */
+#define RXDMA_DPLOW 0x4024UL /* RX Data Pointer Low */
+#define RXDMA_DPHI 0x4028UL /* RX Data Pointer High */
+#define RXDMA_KICK 0x4100UL /* RX Kick Register */
+#define RXDMA_DONE 0x4104UL /* RX Completion Register */
+#define RXDMA_BLANK 0x4108UL /* RX Blanking Register */
+#define RXDMA_FADDR 0x410CUL /* RX FIFO Address */
+#define RXDMA_FTAG 0x4110UL /* RX FIFO Tag */
+#define RXDMA_DLOW 0x4114UL /* RX FIFO Data Low */
+#define RXDMA_DHIT1 0x4118UL /* RX FIFO Data HighT0 */
+#define RXDMA_DHIT0 0x411CUL /* RX FIFO Data HighT1 */
+#define RXDMA_FSZ 0x4120UL /* RX FIFO Size */
+
+/* RX Configuration Register. */
+#define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */
+#define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */
+#define RXDMA_CFG_RINGSZ_32 0x00000000 /* - 32 entries */
+#define RXDMA_CFG_RINGSZ_64 0x00000002 /* - 64 entries */
+#define RXDMA_CFG_RINGSZ_128 0x00000004 /* - 128 entries */
+#define RXDMA_CFG_RINGSZ_256 0x00000006 /* - 256 entries */
+#define RXDMA_CFG_RINGSZ_512 0x00000008 /* - 512 entries */
+#define RXDMA_CFG_RINGSZ_1K 0x0000000a /* - 1024 entries */
+#define RXDMA_CFG_RINGSZ_2K 0x0000000c /* - 2048 entries */
+#define RXDMA_CFG_RINGSZ_4K 0x0000000e /* - 4096 entries */
+#define RXDMA_CFG_RINGSZ_8K 0x00000010 /* - 8192 entries */
+#define RXDMA_CFG_RINGSZ_BDISAB 0x00000020 /* Disable RX desc batching */
+#define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */
+#define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */
+#define RXDMA_CFG_FTHRESH 0x07000000 /* RX FIFO dma start threshold */
+#define RXDMA_CFG_FTHRESH_64 0x00000000 /* - 64 bytes */
+#define RXDMA_CFG_FTHRESH_128 0x01000000 /* - 128 bytes */
+#define RXDMA_CFG_FTHRESH_256 0x02000000 /* - 256 bytes */
+#define RXDMA_CFG_FTHRESH_512 0x03000000 /* - 512 bytes */
+#define RXDMA_CFG_FTHRESH_1K 0x04000000 /* - 1024 bytes */
+#define RXDMA_CFG_FTHRESH_2K 0x05000000 /* - 2048 bytes */
+
+/* RX Descriptor Base Low/High.
+ *
+ * These two registers store the 53 most significant bits of the base address
+ * of the RX descriptor table. The 11 least significant bits are always
+ * zero. As a result, the RX descriptor table must be 2K aligned.
+ */
+
+/* RX PAUSE Thresholds.
+ *
+ * These values determine when XOFF and XON PAUSE frames are emitted by
+ * GEM. The thresholds measure RX FIFO occupancy in units of 64 bytes.
+ */
+#define RXDMA_PTHRESH_OFF 0x000001ff /* XOFF emitted w/FIFO > this */
+#define RXDMA_PTHRESH_ON 0x001ff000 /* XON emitted w/FIFO < this */
+
+/* RX Kick Register.
+ *
+ * This 13-bit register is written by the host CPU and holds the last
+ * valid RX descriptor number plus one. This is, if 'N' is written to
+ * this register, it means that all RX descriptors up to but excluding
+ * 'N' are valid.
+ *
+ * The hardware requires that RX descriptors are posted in increments
+ * of 4. This means 'N' must be a multiple of four. For the best
+ * performance, the first new descriptor being posted should be (PCI)
+ * cache line aligned.
+ */
+
+/* RX Completion Register.
+ *
+ * This 13-bit register is updated by GEM to indicate which RX descriptors
+ * have already been used for receive frames. All descriptors up to but
+ * excluding the value in this register are ready to be processed. GEM
+ * updates this register value after the RX FIFO empties completely into
+ * the RX descriptor's buffer, but before the RX_DONE bit is set in the
+ * interrupt status register.
+ */
+
+/* RX Blanking Register. */
+#define RXDMA_BLANK_IPKTS 0x000001ff /* RX_DONE asserted after this
+ * many packets received since
+ * previous RX_DONE.
+ */
+#define RXDMA_BLANK_ITIME 0x000ff000 /* RX_DONE asserted after this
+ * many clocks (measured in 2048
+ * PCI clocks) were counted since
+ * the previous RX_DONE.
+ */
+
+/* RX FIFO Size.
+ *
+ * This 11-bit read-only register indicates how large, in units of 64-bytes,
+ * the RX FIFO is. The driver uses this to properly configure the RX PAUSE
+ * thresholds.
+ */
+
+/* The rest of the RXDMA_* registers are for diagnostics and debug, I will document
+ * them later. -DaveM
+ */
+
+/* MAC Registers */
+#define MAC_TXRST 0x6000UL /* TX MAC Software Reset Command*/
+#define MAC_RXRST 0x6004UL /* RX MAC Software Reset Command*/
+#define MAC_SNDPAUSE 0x6008UL /* Send Pause Command Register */
+#define MAC_TXSTAT 0x6010UL /* TX MAC Status Register */
+#define MAC_RXSTAT 0x6014UL /* RX MAC Status Register */
+#define MAC_CSTAT 0x6018UL /* MAC Control Status Register */
+#define MAC_TXMASK 0x6020UL /* TX MAC Mask Register */
+#define MAC_RXMASK 0x6024UL /* RX MAC Mask Register */
+#define MAC_MCMASK 0x6028UL /* MAC Control Mask Register */
+#define MAC_TXCFG 0x6030UL /* TX MAC Configuration Register*/
+#define MAC_RXCFG 0x6034UL /* RX MAC Configuration Register*/
+#define MAC_MCCFG 0x6038UL /* MAC Control Config Register */
+#define MAC_XIFCFG 0x603CUL /* XIF Configuration Register */
+#define MAC_IPG0 0x6040UL /* InterPacketGap0 Register */
+#define MAC_IPG1 0x6044UL /* InterPacketGap1 Register */
+#define MAC_IPG2 0x6048UL /* InterPacketGap2 Register */
+#define MAC_STIME 0x604CUL /* SlotTime Register */
+#define MAC_MINFSZ 0x6050UL /* MinFrameSize Register */
+#define MAC_MAXFSZ 0x6054UL /* MaxFrameSize Register */
+#define MAC_PASIZE 0x6058UL /* PA Size Register */
+#define MAC_JAMSIZE 0x605CUL /* JamSize Register */
+#define MAC_ATTLIM 0x6060UL /* Attempt Limit Register */
+#define MAC_MCTYPE 0x6064UL /* MAC Control Type Register */
+#define MAC_ADDR0 0x6080UL /* MAC Address 0 Register */
+#define MAC_ADDR1 0x6084UL /* MAC Address 1 Register */
+#define MAC_ADDR2 0x6088UL /* MAC Address 2 Register */
+#define MAC_ADDR3 0x608CUL /* MAC Address 3 Register */
+#define MAC_ADDR4 0x6090UL /* MAC Address 4 Register */
+#define MAC_ADDR5 0x6094UL /* MAC Address 5 Register */
+#define MAC_ADDR6 0x6098UL /* MAC Address 6 Register */
+#define MAC_ADDR7 0x609CUL /* MAC Address 7 Register */
+#define MAC_ADDR8 0x60A0UL /* MAC Address 8 Register */
+#define MAC_AFILT0 0x60A4UL /* Address Filter 0 Register */
+#define MAC_AFILT1 0x60A8UL /* Address Filter 1 Register */
+#define MAC_AFILT2 0x60ACUL /* Address Filter 2 Register */
+#define MAC_AF21MSK 0x60B0UL /* Address Filter 2&1 Mask Reg */
+#define MAC_AF0MSK 0x60B4UL /* Address Filter 0 Mask Reg */
+#define MAC_HASH0 0x60C0UL /* Hash Table 0 Register */
+#define MAC_HASH1 0x60C4UL /* Hash Table 1 Register */
+#define MAC_HASH2 0x60C8UL /* Hash Table 2 Register */
+#define MAC_HASH3 0x60CCUL /* Hash Table 3 Register */
+#define MAC_HASH4 0x60D0UL /* Hash Table 4 Register */
+#define MAC_HASH5 0x60D4UL /* Hash Table 5 Register */
+#define MAC_HASH6 0x60D8UL /* Hash Table 6 Register */
+#define MAC_HASH7 0x60DCUL /* Hash Table 7 Register */
+#define MAC_HASH8 0x60E0UL /* Hash Table 8 Register */
+#define MAC_HASH9 0x60E4UL /* Hash Table 9 Register */
+#define MAC_HASH10 0x60E8UL /* Hash Table 10 Register */
+#define MAC_HASH11 0x60ECUL /* Hash Table 11 Register */
+#define MAC_HASH12 0x60F0UL /* Hash Table 12 Register */
+#define MAC_HASH13 0x60F4UL /* Hash Table 13 Register */
+#define MAC_HASH14 0x60F8UL /* Hash Table 14 Register */
+#define MAC_HASH15 0x60FCUL /* Hash Table 15 Register */
+#define MAC_NCOLL 0x6100UL /* Normal Collision Counter */
+#define MAC_FASUCC 0x6104UL /* First Attmpt. Succ Coll Ctr. */
+#define MAC_ECOLL 0x6108UL /* Excessive Collision Counter */
+#define MAC_LCOLL 0x610CUL /* Late Collision Counter */
+#define MAC_DTIMER 0x6110UL /* Defer Timer */
+#define MAC_PATMPS 0x6114UL /* Peak Attempts Register */
+#define MAC_RFCTR 0x6118UL /* Receive Frame Counter */
+#define MAC_LERR 0x611CUL /* Length Error Counter */
+#define MAC_AERR 0x6120UL /* Alignment Error Counter */
+#define MAC_FCSERR 0x6124UL /* FCS Error Counter */
+#define MAC_RXCVERR 0x6128UL /* RX code Violation Error Ctr */
+#define MAC_RANDSEED 0x6130UL /* Random Number Seed Register */
+#define MAC_SMACHINE 0x6134UL /* State Machine Register */
+
+/* TX MAC Software Reset Command. */
+#define MAC_TXRST_CMD 0x00000001 /* Start sw reset, self-clears */
+
+/* RX MAC Software Reset Command. */
+#define MAC_RXRST_CMD 0x00000001 /* Start sw reset, self-clears */
+
+/* Send Pause Command. */
+#define MAC_SNDPAUSE_TS 0x0000ffff /* The pause_time operand used in
+ * Send_Pause and flow-control
+ * handshakes.
+ */
+#define MAC_SNDPAUSE_SP 0x00010000 /* Setting this bit instructs the MAC
+ * to send a Pause Flow Control
+ * frame onto the network.
+ */
+
+/* TX MAC Status Register. */
+#define MAC_TXSTAT_XMIT 0x00000001 /* Frame Transmitted */
+#define MAC_TXSTAT_URUN 0x00000002 /* TX Underrun */
+#define MAC_TXSTAT_MPE 0x00000004 /* Max Packet Size Error */
+#define MAC_TXSTAT_NCE 0x00000008 /* Normal Collision Cntr Expire */
+#define MAC_TXSTAT_ECE 0x00000010 /* Excess Collision Cntr Expire */
+#define MAC_TXSTAT_LCE 0x00000020 /* Late Collision Cntr Expire */
+#define MAC_TXSTAT_FCE 0x00000040 /* First Collision Cntr Expire */
+#define MAC_TXSTAT_DTE 0x00000080 /* Defer Timer Expire */
+#define MAC_TXSTAT_PCE 0x00000100 /* Peak Attempts Cntr Expire */
+
+/* RX MAC Status Register. */
+#define MAC_RXSTAT_RCV 0x00000001 /* Frame Received */
+#define MAC_RXSTAT_OFLW 0x00000002 /* Receive Overflow */
+#define MAC_RXSTAT_FCE 0x00000004 /* Frame Cntr Expire */
+#define MAC_RXSTAT_ACE 0x00000008 /* Align Error Cntr Expire */
+#define MAC_RXSTAT_CCE 0x00000010 /* CRC Error Cntr Expire */
+#define MAC_RXSTAT_LCE 0x00000020 /* Length Error Cntr Expire */
+#define MAC_RXSTAT_VCE 0x00000040 /* Code Violation Cntr Expire */
+
+/* MAC Control Status Register. */
+#define MAC_CSTAT_PRCV 0x00000001 /* Pause Received */
+#define MAC_CSTAT_PS 0x00000002 /* Paused State */
+#define MAC_CSTAT_NPS 0x00000004 /* Not Paused State */
+#define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */
+
+/* The layout of the MAC_{TX,RX,C}MASK registers is identical to that
+ * of MAC_{TX,RX,C}STAT. Bits set in MAC_{TX,RX,C}MASK will prevent
+ * that interrupt type from being signalled to front end of GEM. For
+ * the interrupt to actually get sent to the cpu, it is necessary to
+ * properly set the appropriate GREG_IMASK_{TX,RX,}MAC bits as well.
+ */
+
+/* TX MAC Configuration Register.
+ *
+ * NOTE: The TX MAC Enable bit must be cleared and polled until
+ * zero before any other bits in this register are changed.
+ *
+ * Also, enabling the Carrier Extension feature of GEM is
+ * a 3 step process 1) Set TX Carrier Extension 2) Set
+ * RX Carrier Extension 3) Set Slot Time to 0x200. This
+ * mode must be enabled when in half-duplex at 1Gbps, else
+ * it must be disabled.
+ */
+#define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */
+#define MAC_TXCFG_ICS 0x00000002 /* Ignore Carrier Sense */
+#define MAC_TXCFG_ICOLL 0x00000004 /* Ignore Collisions */
+#define MAC_TXCFG_EIPG0 0x00000008 /* Enable IPG0 */
+#define MAC_TXCFG_NGU 0x00000010 /* Never Give Up */
+#define MAC_TXCFG_NGUL 0x00000020 /* Never Give Up Limit */
+#define MAC_TXCFG_NBO 0x00000040 /* No Backoff */
+#define MAC_TXCFG_SD 0x00000080 /* Slow Down */
+#define MAC_TXCFG_NFCS 0x00000100 /* No FCS */
+#define MAC_TXCFG_TCE 0x00000200 /* TX Carrier Extension */
+
+/* RX MAC Configuration Register.
+ *
+ * NOTE: The RX MAC Enable bit must be cleared and polled until
+ * zero before any other bits in this register are changed.
+ *
+ * Similar rules apply to the Hash Filter Enable bit when
+ * programming the hash table registers, and the Address Filter
+ * Enable bit when programming the address filter registers.
+ */
+#define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */
+#define MAC_RXCFG_SPAD 0x00000002 /* Strip Pad */
+#define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */
+#define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */
+#define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */
+#define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */
+#define MAC_RXCFG_AFE 0x00000040 /* Address Filter Enable */
+#define MAC_RXCFG_DDE 0x00000080 /* Disable Discard on Error */
+#define MAC_RXCFG_RCE 0x00000100 /* RX Carrier Extension */
+
+/* MAC Control Config Register. */
+#define MAC_MCCFG_SPE 0x00000001 /* Send Pause Enable */
+#define MAC_MCCFG_RPE 0x00000002 /* Receive Pause Enable */
+#define MAC_MCCFG_PMC 0x00000004 /* Pass MAC Control */
+
+/* XIF Configuration Register.
+ *
+ * NOTE: When leaving or entering loopback mode, a global hardware
+ * init of GEM should be performed.
+ */
+#define MAC_XIFCFG_OE 0x00000001 /* MII TX Output Driver Enable */
+#define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */
+#define MAC_XIFCFG_DISE 0x00000004 /* Disable RX path during TX */
+#define MAC_XIFCFG_GMII 0x00000008 /* Use GMII clocks + datapath */
+#define MAC_XIFCFG_MBOE 0x00000010 /* Controls MII_BUF_EN pin */
+#define MAC_XIFCFG_LLED 0x00000020 /* Force LINKLED# active (low) */
+#define MAC_XIFCFG_FLED 0x00000040 /* Force FDPLXLED# active (low) */
+
+/* InterPacketGap0 Register. This 8-bit value is used as an extension
+ * to the InterPacketGap1 Register. Specifically it contributes to the
+ * timing of the RX-to-TX IPG. This value is ignored and presumed to
+ * be zero for TX-to-TX IPG calculations and/or when the Enable IPG0 bit
+ * is cleared in the TX MAC Configuration Register.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x00
+ */
+
+/* InterPacketGap1 Register. This 8-bit value defines the first 2/3
+ * portion of the Inter Packet Gap.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x08
+ */
+
+/* InterPacketGap2 Register. This 8-bit value defines the second 1/3
+ * portion of the Inter Packet Gap.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x04
+ */
+
+/* Slot Time Register. This 10-bit value specifies the slot time
+ * parameter in units of media byte time. It determines the physical
+ * span of the network.
+ *
+ * Recommended value: 0x40
+ */
+
+/* Minimum Frame Size Register. This 10-bit register specifies the
+ * smallest sized frame the TXMAC will send onto the medium, and the
+ * RXMAC will receive from the medium.
+ *
+ * Recommended value: 0x40
+ */
+
+/* Maximum Frame and Burst Size Register.
+ *
+ * This register specifies two things. First it specifies the maximum
+ * sized frame the TXMAC will send and the RXMAC will recognize as
+ * valid. Second, it specifies the maximum run length of a burst of
+ * packets sent in half-duplex gigabit modes.
+ *
+ * Recommended value: 0x200005ee
+ */
+#define MAC_MAXFSZ_MFS 0x00007fff /* Max Frame Size */
+#define MAC_MAXFSZ_MBS 0x7fff0000 /* Max Burst Size */
+
+/* PA Size Register. This 10-bit register specifies the number of preamble
+ * bytes which will be transmitted at the beginning of each frame. A
+ * value of two or greater should be programmed here.
+ *
+ * Recommended value: 0x07
+ */
+
+/* Jam Size Register. This 4-bit register specifies the duration of
+ * the jam in units of media byte time.
+ *
+ * Recommended value: 0x04
+ */
+
+/* Attempts Limit Register. This 8-bit register specifies the number
+ * of attempts that the TXMAC will make to transmit a frame, before it
+ * resets its Attempts Counter. After reaching the Attempts Limit the
+ * TXMAC may or may not drop the frame, as determined by the NGU
+ * (Never Give Up) and NGUL (Never Give Up Limit) bits in the TXMAC
+ * Configuration Register.
+ *
+ * Recommended value: 0x10
+ */
+
+/* MAX Control Type Register. This 16-bit register specifies the
+ * "type" field of a MAC Control frame. The TXMAC uses this field to
+ * encapsulate the MAC Control frame for transmission, and the RXMAC
+ * uses it for decoding valid MAC Control frames received from the
+ * network.
+ *
+ * Recommended value: 0x8808
+ */
+
+/* MAC Address Registers. Each of these registers specify the
+ * ethernet MAC of the interface, 16-bits at a time. Register
+ * 0 specifies bits [47:32], register 1 bits [31:16], and register
+ * 2 bits [15:0].
+ *
+ * Registers 3 through and including 5 specify an alternate
+ * MAC address for the interface.
+ *
+ * Registers 6 through and including 8 specify the MAC Control
+ * Address, which must be the reserved multicast address for MAC
+ * Control frames.
+ *
+ * Example: To program primary station address a:b:c:d:e:f into
+ * the chip.
+ * MAC_Address_2 = (a << 8) | b
+ * MAC_Address_1 = (c << 8) | d
+ * MAC_Address_0 = (e << 8) | f
+ */
+
+/* Address Filter Registers. Registers 0 through 2 specify bit
+ * fields [47:32] through [15:0], respectively, of the address
+ * filter. The Address Filter 2&1 Mask Register denotes the 8-bit
+ * nibble mask for Address Filter Registers 2 and 1. The Address
+ * Filter 0 Mask Register denotes the 16-bit mask for the Address
+ * Filter Register 0.
+ */
+
+/* Hash Table Registers. Registers 0 through 15 specify bit fields
+ * [255:240] through [15:0], respectively, of the hash table.
+ */
+
+/* Statistics Registers. All of these registers are 16-bits and
+ * track occurrences of a specific event. GEM can be configured
+ * to interrupt the host cpu when any of these counters overflow.
+ * They should all be explicitly initialized to zero when the interface
+ * is brought up.
+ */
+
+/* Random Number Seed Register. This 10-bit value is used as the
+ * RNG seed inside GEM for the CSMA/CD backoff algorithm. It is
+ * recommended to program this register to the 10 LSB of the
+ * interfaces MAC address.
+ */
+
+/* Pause Timer, read-only. This 16-bit timer is used to time the pause
+ * interval as indicated by a received pause flow control frame.
+ * A non-zero value in this timer indicates that the MAC is currently in
+ * the paused state.
+ */
+
+/* MIF Registers */
+#define MIF_BBCLK 0x6200UL /* MIF Bit-Bang Clock */
+#define MIF_BBDATA 0x6204UL /* MIF Bit-Band Data */
+#define MIF_BBOENAB 0x6208UL /* MIF Bit-Bang Output Enable */
+#define MIF_FRAME 0x620CUL /* MIF Frame/Output Register */
+#define MIF_CFG 0x6210UL /* MIF Configuration Register */
+#define MIF_MASK 0x6214UL /* MIF Mask Register */
+#define MIF_STATUS 0x6218UL /* MIF Status Register */
+#define MIF_SMACHINE 0x621CUL /* MIF State Machine Register */
+
+/* MIF Bit-Bang Clock. This 1-bit register is used to generate the
+ * MDC clock waveform on the MII Management Interface when the MIF is
+ * programmed in the "Bit-Bang" mode. Writing a '1' after a '0' into
+ * this register will create a rising edge on the MDC, while writing
+ * a '0' after a '1' will create a falling edge. For every bit that
+ * is transferred on the management interface, both edges have to be
+ * generated.
+ */
+
+/* MIF Bit-Bang Data. This 1-bit register is used to generate the
+ * outgoing data (MDO) on the MII Management Interface when the MIF
+ * is programmed in the "Bit-Bang" mode. The daa will be steered to the
+ * appropriate MDIO based on the state of the PHY_Select bit in the MIF
+ * Configuration Register.
+ */
+
+/* MIF Big-Band Output Enable. THis 1-bit register is used to enable
+ * ('1') or disable ('0') the I-directional driver on the MII when the
+ * MIF is programmed in the "Bit-Bang" mode. The MDIO should be enabled
+ * when data bits are transferred from the MIF to the transceiver, and it
+ * should be disabled when the interface is idle or when data bits are
+ * transferred from the transceiver to the MIF (data portion of a read
+ * instruction). Only one MDIO will be enabled at a given time, depending
+ * on the state of the PHY_Select bit in the MIF Configuration Register.
+ */
+
+/* MIF Configuration Register. This 15-bit register controls the operation
+ * of the MIF.
+ */
+#define MIF_CFG_PSELECT 0x00000001 /* Xcvr slct: 0=mdio0 1=mdio1 */
+#define MIF_CFG_POLL 0x00000002 /* Enable polling mechanism */
+#define MIF_CFG_BBMODE 0x00000004 /* 1=bit-bang 0=frame mode */
+#define MIF_CFG_PRADDR 0x000000f8 /* Xcvr poll register address */
+#define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */
+#define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */
+#define MIF_CFG_PPADDR 0x00007c00 /* Xcvr poll PHY address */
+
+/* MIF Frame/Output Register. This 32-bit register allows the host to
+ * communicate with a transceiver in frame mode (as opposed to big-bang
+ * mode). Writes by the host specify an instrution. After being issued
+ * the host must poll this register for completion. Also, after
+ * completion this register holds the data returned by the transceiver
+ * if applicable.
+ */
+#define MIF_FRAME_ST 0xc0000000 /* STart of frame */
+#define MIF_FRAME_OP 0x30000000 /* OPcode */
+#define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */
+#define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */
+#define MIF_FRAME_TAMSB 0x00020000 /* Turn Around MSB */
+#define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */
+#define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */
+
+/* MIF Status Register. This register reports status when the MIF is
+ * operating in the poll mode. The poll status field is auto-clearing
+ * on read.
+ */
+#define MIF_STATUS_DATA 0xffff0000 /* Live image of XCVR reg */
+#define MIF_STATUS_STAT 0x0000ffff /* Which bits have changed */
+
+/* MIF Mask Register. This 16-bit register is used when in poll mode
+ * to say which bits of the polled register will cause an interrupt
+ * when changed.
+ */
+
+/* PCS/Serialink Registers */
+#define PCS_MIICTRL 0x9000UL /* PCS MII Control Register */
+#define PCS_MIISTAT 0x9004UL /* PCS MII Status Register */
+#define PCS_MIIADV 0x9008UL /* PCS MII Advertisement Reg */
+#define PCS_MIILP 0x900CUL /* PCS MII Link Partner Ability */
+#define PCS_CFG 0x9010UL /* PCS Configuration Register */
+#define PCS_SMACHINE 0x9014UL /* PCS State Machine Register */
+#define PCS_ISTAT 0x9018UL /* PCS Interrupt Status Reg */
+#define PCS_DMODE 0x9050UL /* Datapath Mode Register */
+#define PCS_SCTRL 0x9054UL /* Serialink Control Register */
+#define PCS_SOS 0x9058UL /* Shared Output Select Reg */
+#define PCS_SSTATE 0x905CUL /* Serialink State Register */
+
+/* PCD MII Control Register. */
+#define PCS_MIICTRL_SPD 0x00000040 /* Read as one, writes ignored */
+#define PCS_MIICTRL_CT 0x00000080 /* Force COL signal active */
+#define PCS_MIICTRL_DM 0x00000100 /* Duplex mode, forced low */
+#define PCS_MIICTRL_RAN 0x00000200 /* Restart auto-neg, self clear */
+#define PCS_MIICTRL_ISO 0x00000400 /* Read as zero, writes ignored */
+#define PCS_MIICTRL_PD 0x00000800 /* Read as zero, writes ignored */
+#define PCS_MIICTRL_ANE 0x00001000 /* Auto-neg enable */
+#define PCS_MIICTRL_SS 0x00002000 /* Read as zero, writes ignored */
+#define PCS_MIICTRL_WB 0x00004000 /* Wrapback, loopback at 10-bit
+ * input side of Serialink
+ */
+#define PCS_MIICTRL_RST 0x00008000 /* Resets PCS, self clearing */
+
+/* PCS MII Status Register. */
+#define PCS_MIISTAT_EC 0x00000001 /* Ext Capability: Read as zero */
+#define PCS_MIISTAT_JD 0x00000002 /* Jabber Detect: Read as zero */
+#define PCS_MIISTAT_LS 0x00000004 /* Link Status: 1=up 0=down */
+#define PCS_MIISTAT_ANA 0x00000008 /* Auto-neg Ability, always 1 */
+#define PCS_MIISTAT_RF 0x00000010 /* Remote Fault */
+#define PCS_MIISTAT_ANC 0x00000020 /* Auto-neg complete */
+#define PCS_MIISTAT_ES 0x00000100 /* Extended Status, always 1 */
+
+/* PCS MII Advertisement Register. */
+#define PCS_MIIADV_FD 0x00000020 /* Advertise Full Duplex */
+#define PCS_MIIADV_HD 0x00000040 /* Advertise Half Duplex */
+#define PCS_MIIADV_SP 0x00000080 /* Advertise Symmetric Pause */
+#define PCS_MIIADV_AP 0x00000100 /* Advertise Asymmetric Pause */
+#define PCS_MIIADV_RF 0x00003000 /* Remote Fault */
+#define PCS_MIIADV_ACK 0x00004000 /* Read-only */
+#define PCS_MIIADV_NP 0x00008000 /* Next-page, forced low */
+
+/* PCS MII Link Partner Ability Register. This register is equivalent
+ * to the Link Partnet Ability Register of the standard MII register set.
+ * It's layout corresponds to the PCS MII Advertisement Register.
+ */
+
+/* PCS Configuration Register. */
+#define PCS_CFG_ENABLE 0x00000001 /* Must be zero while changing
+ * PCS MII advertisement reg.
+ */
+#define PCS_CFG_SDO 0x00000002 /* Signal detect override */
+#define PCS_CFG_SDL 0x00000004 /* Signal detect active low */
+#define PCS_CFG_JS 0x00000018 /* Jitter-study:
+ * 0 = normal operation
+ * 1 = high-frequency test pattern
+ * 2 = low-frequency test pattern
+ * 3 = reserved
+ */
+#define PCS_CFG_TO 0x00000020 /* 10ms auto-neg timer override */
+
+/* PCS Interrupt Status Register. This register is self-clearing
+ * when read.
+ */
+#define PCS_ISTAT_LSC 0x00000004 /* Link Status Change */
+
+/* Datapath Mode Register. */
+#define PCS_DMODE_SM 0x00000001 /* 1 = use internal Serialink */
+#define PCS_DMODE_ESM 0x00000002 /* External SERDES mode */
+#define PCS_DMODE_MGM 0x00000004 /* MII/GMII mode */
+#define PCS_DMODE_GMOE 0x00000008 /* GMII Output Enable */
+
+/* Serialink Control Register.
+ *
+ * NOTE: When in SERDES mode, the loopback bit has inverse logic.
+ */
+#define PCS_SCTRL_LOOP 0x00000001 /* Loopback enable */
+#define PCS_SCTRL_ESCD 0x00000002 /* Enable sync char detection */
+#define PCS_SCTRL_LOCK 0x00000004 /* Lock to reference clock */
+#define PCS_SCTRL_EMP 0x00000018 /* Output driver emphasis */
+#define PCS_SCTRL_STEST 0x000001c0 /* Self test patterns */
+#define PCS_SCTRL_PDWN 0x00000200 /* Software power-down */
+#define PCS_SCTRL_RXZ 0x00000c00 /* PLL input to Serialink */
+#define PCS_SCTRL_RXP 0x00003000 /* PLL input to Serialink */
+#define PCS_SCTRL_TXZ 0x0000c000 /* PLL input to Serialink */
+#define PCS_SCTRL_TXP 0x00030000 /* PLL input to Serialink */
+
+/* Shared Output Select Register. For test and debug, allows multiplexing
+ * test outputs into the PROM address pins. Set to zero for normal
+ * operation.
+ */
+#define PCS_SOS_PADDR 0x00000003 /* PROM Address */
+
+/* PROM Image Space */
+#define PROM_START 0x100000UL /* Expansion ROM run time access*/
+#define PROM_SIZE 0x0fffffUL /* Size of ROM */
+#define PROM_END 0x200000UL /* End of ROM */
+
+/* MII definitions missing from mii.h */
+
+#define BMCR_SPD2 0x0040 /* Gigabit enable? (bcm5411) */
+#define LPA_PAUSE 0x0400
+
+/* More PHY registers (specific to Broadcom models) */
+
+/* MII BCM5201 MULTIPHY interrupt register */
+#define MII_BCM5201_INTERRUPT 0x1A
+#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000
+
+#define MII_BCM5201_AUXMODE2 0x1B
+#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008
+
+#define MII_BCM5201_MULTIPHY 0x1E
+
+/* MII BCM5201 MULTIPHY register bits */
+#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002
+#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008
+
+/* MII BCM5400 1000-BASET Control register */
+#define MII_BCM5400_GB_CONTROL 0x09
+#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200
+
+/* MII BCM5400 AUXCONTROL register */
+#define MII_BCM5400_AUXCONTROL 0x18
+#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004
+
+/* MII BCM5400 AUXSTATUS register */
+#define MII_BCM5400_AUXSTATUS 0x19
+#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700
+#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8
+
+/* When it can, GEM internally caches 4 aligned TX descriptors
+ * at a time, so that it can use full cacheline DMA reads.
+ *
+ * Note that unlike HME, there is no ownership bit in the descriptor
+ * control word. The same functionality is obtained via the TX-Kick
+ * and TX-Complete registers. As a result, GEM need not write back
+ * updated values to the TX descriptor ring, it only performs reads.
+ *
+ * Since TX descriptors are never modified by GEM, the driver can
+ * use the buffer DMA address as a place to keep track of allocated
+ * DMA mappings for a transmitted packet.
+ */
+struct gem_txd {
+ u64 control_word;
+ u64 buffer;
+};
+
+#define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */
+#define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */
+#define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */
+#define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */
+#define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */
+#define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */
+#define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */
+#define TXDCTRL_NOCRC 0x0000000200000000ULL /* No CRC Present */
+
+/* GEM requires that RX descriptors are provided four at a time,
+ * aligned. Also, the RX ring may not wrap around. This means that
+ * there will be at least 4 unused desciptor entries in the middle
+ * of the RX ring at all times.
+ *
+ * Similar to HME, GEM assumes that it can write garbage bytes before
+ * the beginning of the buffer and right after the end in order to DMA
+ * whole cachelines.
+ *
+ * Unlike for TX, GEM does update the status word in the RX descriptors
+ * when packets arrive. Therefore an ownership bit does exist in the
+ * RX descriptors. It is advisory, GEM clears it but does not check
+ * it in any way. So when buffers are posted to the RX ring (via the
+ * RX Kick register) by the driver it must make sure the buffers are
+ * truly ready and that the ownership bits are set properly.
+ *
+ * Even though GEM modifies the RX descriptors, it guarantees that the
+ * buffer DMA address field will stay the same when it performs these
+ * updates. Therefore it can be used to keep track of DMA mappings
+ * by the host driver just as in the TX descriptor case above.
+ */
+struct gem_rxd {
+ u64 status_word;
+ u64 buffer;
+};
+
+#define RXDCTRL_TCPCSUM 0x000000000000ffffULL /* TCP Pseudo-CSUM */
+#define RXDCTRL_BUFSZ 0x000000007fff0000ULL /* Buffer Size */
+#define RXDCTRL_OWN 0x0000000080000000ULL /* GEM owns this entry */
+#define RXDCTRL_HASHVAL 0x0ffff00000000000ULL /* Hash Value */
+#define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */
+#define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */
+#define RXDCTRL_BAD 0x4000000000000000ULL /* Frame has bad CRC */
+
+#define RXDCTRL_FRESH(gp) \
+ ((((RX_BUF_ALLOC_SIZE(gp) - RX_OFFSET) << 16) & RXDCTRL_BUFSZ) | \
+ RXDCTRL_OWN)
+
+#define TX_RING_SIZE 128
+#define RX_RING_SIZE 128
+
+#if TX_RING_SIZE == 32
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_32
+#elif TX_RING_SIZE == 64
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_64
+#elif TX_RING_SIZE == 128
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_128
+#elif TX_RING_SIZE == 256
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_256
+#elif TX_RING_SIZE == 512
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_512
+#elif TX_RING_SIZE == 1024
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_1K
+#elif TX_RING_SIZE == 2048
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_2K
+#elif TX_RING_SIZE == 4096
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_4K
+#elif TX_RING_SIZE == 8192
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_8K
+#else
+#error TX_RING_SIZE value is illegal...
+#endif
+
+#if RX_RING_SIZE == 32
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_32
+#elif RX_RING_SIZE == 64
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_64
+#elif RX_RING_SIZE == 128
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_128
+#elif RX_RING_SIZE == 256
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_256
+#elif RX_RING_SIZE == 512
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_512
+#elif RX_RING_SIZE == 1024
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_1K
+#elif RX_RING_SIZE == 2048
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_2K
+#elif RX_RING_SIZE == 4096
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_4K
+#elif RX_RING_SIZE == 8192
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_8K
+#else
+#error RX_RING_SIZE is illegal...
+#endif
+
+#define NEXT_TX(N) (((N) + 1) & (TX_RING_SIZE - 1))
+#define NEXT_RX(N) (((N) + 1) & (RX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(GP) \
+ (((GP)->tx_old <= (GP)->tx_new) ? \
+ (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new : \
+ (GP)->tx_old - (GP)->tx_new - 1)
+
+#define RX_OFFSET 2
+#define RX_BUF_ALLOC_SIZE(gp) ((gp)->rx_buf_sz + 28 + RX_OFFSET + 64)
+
+#define RX_COPY_THRESHOLD 256
+
+#if TX_RING_SIZE < 128
+#define INIT_BLOCK_TX_RING_SIZE 128
+#else
+#define INIT_BLOCK_TX_RING_SIZE TX_RING_SIZE
+#endif
+
+#if RX_RING_SIZE < 128
+#define INIT_BLOCK_RX_RING_SIZE 128
+#else
+#define INIT_BLOCK_RX_RING_SIZE RX_RING_SIZE
+#endif
+
+struct gem_init_block {
+ struct gem_txd txd[INIT_BLOCK_TX_RING_SIZE];
+ struct gem_rxd rxd[INIT_BLOCK_RX_RING_SIZE];
+};
+
+enum gem_phy_type {
+ phy_mii_mdio0,
+ phy_mii_mdio1,
+ phy_serialink,
+ phy_serdes,
+};
+
+enum link_state {
+ link_down = 0, /* No link, will retry */
+ link_aneg, /* Autoneg in progress */
+ link_force_try, /* Try Forced link speed */
+ link_force_ret, /* Forced mode worked, retrying autoneg */
+ link_force_ok, /* Stay in forced mode */
+ link_up /* Link is up */
+};
+
+struct gem {
+ spinlock_t lock;
+ spinlock_t tx_lock;
+ void __iomem *regs;
+ int rx_new, rx_old;
+ int tx_new, tx_old;
+
+ unsigned int has_wol : 1; /* chip supports wake-on-lan */
+ unsigned int asleep : 1; /* chip asleep, protected by pm_sem */
+ unsigned int asleep_wol : 1; /* was asleep with WOL enabled */
+ unsigned int opened : 1; /* driver opened, protected by pm_sem */
+ unsigned int running : 1; /* chip running, protected by lock */
+
+ /* cell enable count, protected by lock */
+ int cell_enabled;
+
+ struct semaphore pm_sem;
+
+ u32 msg_enable;
+ u32 status;
+
+ struct net_device_stats net_stats;
+
+ int tx_fifo_sz;
+ int rx_fifo_sz;
+ int rx_pause_off;
+ int rx_pause_on;
+ int rx_buf_sz;
+ u64 pause_entered;
+ u16 pause_last_time_recvd;
+ u32 mac_rx_cfg;
+ u32 swrst_base;
+
+ int want_autoneg;
+ int last_forced_speed;
+ enum link_state lstate;
+ struct timer_list link_timer;
+ int timer_ticks;
+ int wake_on_lan;
+ struct work_struct reset_task;
+ volatile int reset_task_pending;
+
+ enum gem_phy_type phy_type;
+ struct mii_phy phy_mii;
+ int mii_phy_addr;
+
+ struct gem_init_block *init_block;
+ struct sk_buff *rx_skbs[RX_RING_SIZE];
+ struct sk_buff *tx_skbs[RX_RING_SIZE];
+ dma_addr_t gblock_dvma;
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+#ifdef CONFIG_PPC_PMAC
+ struct device_node *of_node;
+#endif
+};
+
+#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) \
+ && gp->phy_mii.def && gp->phy_mii.def->ops)
+
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags)
+{
+ struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
+
+ if (skb) {
+ int offset = (int) ALIGNED_RX_SKB_ADDR(skb->data);
+ if (offset)
+ skb_reserve(skb, offset);
+ }
+
+ return skb;
+}
+
+#endif /* _SUNGEM_H */
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
new file mode 100644
index 000000000000..0fca414d3657
--- /dev/null
+++ b/drivers/net/sungem_phy.c
@@ -0,0 +1,872 @@
+/*
+ * PHY drivers for the sungem ethernet driver.
+ *
+ * This file could be shared with other drivers.
+ *
+ * (c) 2002, Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ *
+ * TODO:
+ * - Implement WOL
+ * - Add support for PHYs that provide an IRQ line
+ * - Eventually moved the entire polling state machine in
+ * there (out of the eth driver), so that it can easily be
+ * skipped on PHYs that implement it in hardware.
+ * - On LXT971 & BCM5201, Apple uses some chip specific regs
+ * to read the link status. Figure out why and if it makes
+ * sense to do the same (magic aneg ?)
+ * - Apple has some additional power management code for some
+ * Broadcom PHYs that they "hide" from the OpenSource version
+ * of darwin, still need to reverse engineer that
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+#include "sungem_phy.h"
+
+/* Link modes of the BCM5400 PHY */
+static int phy_BCM5400_link_table[8][3] = {
+ { 0, 0, 0 }, /* No link */
+ { 0, 0, 0 }, /* 10BT Half Duplex */
+ { 1, 0, 0 }, /* 10BT Full Duplex */
+ { 0, 1, 0 }, /* 100BT Half Duplex */
+ { 0, 1, 0 }, /* 100BT Half Duplex */
+ { 1, 1, 0 }, /* 100BT Full Duplex*/
+ { 1, 0, 1 }, /* 1000BT */
+ { 1, 0, 1 }, /* 1000BT */
+};
+
+static inline int __phy_read(struct mii_phy* phy, int id, int reg)
+{
+ return phy->mdio_read(phy->dev, id, reg);
+}
+
+static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val)
+{
+ phy->mdio_write(phy->dev, id, reg, val);
+}
+
+static inline int phy_read(struct mii_phy* phy, int reg)
+{
+ return phy->mdio_read(phy->dev, phy->mii_id, reg);
+}
+
+static inline void phy_write(struct mii_phy* phy, int reg, int val)
+{
+ phy->mdio_write(phy->dev, phy->mii_id, reg, val);
+}
+
+static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
+{
+ u16 val;
+ int limit = 10000;
+
+ val = __phy_read(phy, phy_id, MII_BMCR);
+ val &= ~(BMCR_ISOLATE | BMCR_PDOWN);
+ val |= BMCR_RESET;
+ __phy_write(phy, phy_id, MII_BMCR, val);
+
+ udelay(100);
+
+ while (limit--) {
+ val = __phy_read(phy, phy_id, MII_BMCR);
+ if ((val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ if ((val & BMCR_ISOLATE) && limit > 0)
+ __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
+
+ return (limit <= 0);
+}
+
+static int bcm5201_init(struct mii_phy* phy)
+{
+ u16 data;
+
+ data = phy_read(phy, MII_BCM5201_MULTIPHY);
+ data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE;
+ phy_write(phy, MII_BCM5201_MULTIPHY, data);
+
+ phy_write(phy, MII_BCM5201_INTERRUPT, 0);
+
+ return 0;
+}
+
+static int bcm5201_suspend(struct mii_phy* phy)
+{
+ phy_write(phy, MII_BCM5201_INTERRUPT, 0);
+ phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
+
+ return 0;
+}
+
+static int bcm5221_init(struct mii_phy* phy)
+{
+ u16 data;
+
+ data = phy_read(phy, MII_BCM5221_TEST);
+ phy_write(phy, MII_BCM5221_TEST,
+ data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+ data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
+ phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
+ data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
+
+ data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+ phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+ data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR);
+
+ data = phy_read(phy, MII_BCM5221_TEST);
+ phy_write(phy, MII_BCM5221_TEST,
+ data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+ return 0;
+}
+
+static int bcm5221_suspend(struct mii_phy* phy)
+{
+ u16 data;
+
+ data = phy_read(phy, MII_BCM5221_TEST);
+ phy_write(phy, MII_BCM5221_TEST,
+ data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+ data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+ phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+ data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE);
+
+ return 0;
+}
+
+static int bcm5400_init(struct mii_phy* phy)
+{
+ u16 data;
+
+ /* Configure for gigabit full duplex */
+ data = phy_read(phy, MII_BCM5400_AUXCONTROL);
+ data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
+ phy_write(phy, MII_BCM5400_AUXCONTROL, data);
+
+ data = phy_read(phy, MII_BCM5400_GB_CONTROL);
+ data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
+ phy_write(phy, MII_BCM5400_GB_CONTROL, data);
+
+ udelay(100);
+
+ /* Reset and configure cascaded 10/100 PHY */
+ (void)reset_one_mii_phy(phy, 0x1f);
+
+ data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
+ data |= MII_BCM5201_MULTIPHY_SERIALMODE;
+ __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
+
+ data = phy_read(phy, MII_BCM5400_AUXCONTROL);
+ data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
+ phy_write(phy, MII_BCM5400_AUXCONTROL, data);
+
+ return 0;
+}
+
+static int bcm5400_suspend(struct mii_phy* phy)
+{
+#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
+ phy_write(phy, MII_BMCR, BMCR_PDOWN);
+#endif
+ return 0;
+}
+
+static int bcm5401_init(struct mii_phy* phy)
+{
+ u16 data;
+ int rev;
+
+ rev = phy_read(phy, MII_PHYSID2) & 0x000f;
+ if (rev == 0 || rev == 3) {
+ /* Some revisions of 5401 appear to need this
+ * initialisation sequence to disable, according
+ * to OF, "tap power management"
+ *
+ * WARNING ! OF and Darwin don't agree on the
+ * register addresses. OF seem to interpret the
+ * register numbers below as decimal
+ *
+ * Note: This should (and does) match tg3_init_5401phy_dsp
+ * in the tg3.c driver. -DaveM
+ */
+ phy_write(phy, 0x18, 0x0c20);
+ phy_write(phy, 0x17, 0x0012);
+ phy_write(phy, 0x15, 0x1804);
+ phy_write(phy, 0x17, 0x0013);
+ phy_write(phy, 0x15, 0x1204);
+ phy_write(phy, 0x17, 0x8006);
+ phy_write(phy, 0x15, 0x0132);
+ phy_write(phy, 0x17, 0x8006);
+ phy_write(phy, 0x15, 0x0232);
+ phy_write(phy, 0x17, 0x201f);
+ phy_write(phy, 0x15, 0x0a20);
+ }
+
+ /* Configure for gigabit full duplex */
+ data = phy_read(phy, MII_BCM5400_GB_CONTROL);
+ data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
+ phy_write(phy, MII_BCM5400_GB_CONTROL, data);
+
+ udelay(10);
+
+ /* Reset and configure cascaded 10/100 PHY */
+ (void)reset_one_mii_phy(phy, 0x1f);
+
+ data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
+ data |= MII_BCM5201_MULTIPHY_SERIALMODE;
+ __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
+
+ return 0;
+}
+
+static int bcm5401_suspend(struct mii_phy* phy)
+{
+#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
+ phy_write(phy, MII_BMCR, BMCR_PDOWN);
+#endif
+ return 0;
+}
+
+static int bcm5411_init(struct mii_phy* phy)
+{
+ u16 data;
+
+ /* Here's some more Apple black magic to setup
+ * some voltage stuffs.
+ */
+ phy_write(phy, 0x1c, 0x8c23);
+ phy_write(phy, 0x1c, 0x8ca3);
+ phy_write(phy, 0x1c, 0x8c23);
+
+ /* Here, Apple seems to want to reset it, do
+ * it as well
+ */
+ phy_write(phy, MII_BMCR, BMCR_RESET);
+ phy_write(phy, MII_BMCR, 0x1340);
+
+ data = phy_read(phy, MII_BCM5400_GB_CONTROL);
+ data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
+ phy_write(phy, MII_BCM5400_GB_CONTROL, data);
+
+ udelay(10);
+
+ /* Reset and configure cascaded 10/100 PHY */
+ (void)reset_one_mii_phy(phy, 0x1f);
+
+ return 0;
+}
+
+static int bcm5411_suspend(struct mii_phy* phy)
+{
+ phy_write(phy, MII_BMCR, BMCR_PDOWN);
+
+ return 0;
+}
+
+static int bcm5421_init(struct mii_phy* phy)
+{
+ u16 data;
+ int rev;
+
+ rev = phy_read(phy, MII_PHYSID2) & 0x000f;
+ if (rev == 0) {
+ /* This is borrowed from MacOS
+ */
+ phy_write(phy, 0x18, 0x1007);
+ data = phy_read(phy, 0x18);
+ phy_write(phy, 0x18, data | 0x0400);
+ phy_write(phy, 0x18, 0x0007);
+ data = phy_read(phy, 0x18);
+ phy_write(phy, 0x18, data | 0x0800);
+ phy_write(phy, 0x17, 0x000a);
+ data = phy_read(phy, 0x15);
+ phy_write(phy, 0x15, data | 0x0200);
+ }
+#if 0
+ /* This has to be verified before I enable it */
+ /* Enable automatic low-power */
+ phy_write(phy, 0x1c, 0x9002);
+ phy_write(phy, 0x1c, 0xa821);
+ phy_write(phy, 0x1c, 0x941d);
+#endif
+ return 0;
+}
+
+static int bcm5421k2_init(struct mii_phy* phy)
+{
+ /* Init code borrowed from OF */
+ phy_write(phy, 4, 0x01e1);
+ phy_write(phy, 9, 0x0300);
+
+ return 0;
+}
+
+static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+ u16 ctl, adv;
+
+ phy->autoneg = 1;
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = 0;
+ phy->advertising = advertise;
+
+ /* Setup standard advertise */
+ adv = phy_read(phy, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(phy, MII_ADVERTISE, adv);
+
+ /* Setup 1000BT advertise */
+ adv = phy_read(phy, MII_1000BASETCONTROL);
+ adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP);
+ if (advertise & SUPPORTED_1000baseT_Half)
+ adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+ if (advertise & SUPPORTED_1000baseT_Full)
+ adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+ phy_write(phy, MII_1000BASETCONTROL, adv);
+
+ /* Start/Restart aneg */
+ ctl = phy_read(phy, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+ u16 ctl;
+
+ phy->autoneg = 0;
+ phy->speed = speed;
+ phy->duplex = fd;
+ phy->pause = 0;
+
+ ctl = phy_read(phy, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
+
+ /* First reset the PHY */
+ phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+ /* Select speed & duplex */
+ switch(speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ ctl |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ ctl |= BMCR_SPD2;
+ }
+ if (fd == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+
+ // XXX Should we set the sungem to GII now on 1000BT ?
+
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int bcm54xx_read_link(struct mii_phy *phy)
+{
+ int link_mode;
+ u16 val;
+
+ if (phy->autoneg) {
+ val = phy_read(phy, MII_BCM5400_AUXSTATUS);
+ link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
+ MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
+ phy->duplex = phy_BCM5400_link_table[link_mode][0] ? DUPLEX_FULL : DUPLEX_HALF;
+ phy->speed = phy_BCM5400_link_table[link_mode][2] ?
+ SPEED_1000 :
+ (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10);
+ val = phy_read(phy, MII_LPA);
+ phy->pause = ((val & LPA_PAUSE) != 0);
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+ u16 ctl, adv;
+
+ phy->autoneg = 1;
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = 0;
+ phy->advertising = advertise;
+
+ /* Setup standard advertise */
+ adv = phy_read(phy, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(phy, MII_ADVERTISE, adv);
+
+ /* Setup 1000BT advertise & enable crossover detect
+ * XXX How do we advertise 1000BT ? Darwin source is
+ * confusing here, they read from specific control and
+ * write to control... Someone has specs for those
+ * beasts ?
+ */
+ adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
+ adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX;
+ adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
+ MII_1000BASETCONTROL_HALFDUPLEXCAP);
+ if (advertise & SUPPORTED_1000baseT_Half)
+ adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+ if (advertise & SUPPORTED_1000baseT_Full)
+ adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+ phy_write(phy, MII_1000BASETCONTROL, adv);
+
+ /* Start/Restart aneg */
+ ctl = phy_read(phy, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+ u16 ctl, ctl2;
+
+ phy->autoneg = 0;
+ phy->speed = speed;
+ phy->duplex = fd;
+ phy->pause = 0;
+
+ ctl = phy_read(phy, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
+ ctl |= BMCR_RESET;
+
+ /* Select speed & duplex */
+ switch(speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ ctl |= BMCR_SPEED100;
+ break;
+ /* I'm not sure about the one below, again, Darwin source is
+ * quite confusing and I lack chip specs
+ */
+ case SPEED_1000:
+ ctl |= BMCR_SPD2;
+ }
+ if (fd == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+
+ /* Disable crossover. Again, the way Apple does it is strange,
+ * though I don't assume they are wrong ;)
+ */
+ ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
+ ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX |
+ MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX |
+ MII_1000BASETCONTROL_FULLDUPLEXCAP |
+ MII_1000BASETCONTROL_HALFDUPLEXCAP);
+ if (speed == SPEED_1000)
+ ctl2 |= (fd == DUPLEX_FULL) ?
+ MII_1000BASETCONTROL_FULLDUPLEXCAP :
+ MII_1000BASETCONTROL_HALFDUPLEXCAP;
+ phy_write(phy, MII_1000BASETCONTROL, ctl2);
+
+ // XXX Should we set the sungem to GII now on 1000BT ?
+
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int marvell_read_link(struct mii_phy *phy)
+{
+ u16 status;
+
+ if (phy->autoneg) {
+ status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
+ if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
+ return -EAGAIN;
+ if (status & MII_M1011_PHY_SPEC_STATUS_1000)
+ phy->speed = SPEED_1000;
+ else if (status & MII_M1011_PHY_SPEC_STATUS_100)
+ phy->speed = SPEED_100;
+ else
+ phy->speed = SPEED_10;
+ if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
+ phy->duplex = DUPLEX_FULL;
+ else
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = 0; /* XXX Check against spec ! */
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+ u16 ctl, adv;
+
+ phy->autoneg = 1;
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = 0;
+ phy->advertising = advertise;
+
+ /* Setup standard advertise */
+ adv = phy_read(phy, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(phy, MII_ADVERTISE, adv);
+
+ /* Start/Restart aneg */
+ ctl = phy_read(phy, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+ u16 ctl;
+
+ phy->autoneg = 0;
+ phy->speed = speed;
+ phy->duplex = fd;
+ phy->pause = 0;
+
+ ctl = phy_read(phy, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
+
+ /* First reset the PHY */
+ phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+ /* Select speed & duplex */
+ switch(speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ ctl |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ default:
+ return -EINVAL;
+ }
+ if (fd == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_poll_link(struct mii_phy *phy)
+{
+ u16 status;
+
+ (void)phy_read(phy, MII_BMSR);
+ status = phy_read(phy, MII_BMSR);
+ if ((status & BMSR_LSTATUS) == 0)
+ return 0;
+ if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
+ return 0;
+ return 1;
+}
+
+static int genmii_read_link(struct mii_phy *phy)
+{
+ u16 lpa;
+
+ if (phy->autoneg) {
+ lpa = phy_read(phy, MII_LPA);
+
+ if (lpa & (LPA_10FULL | LPA_100FULL))
+ phy->duplex = DUPLEX_FULL;
+ else
+ phy->duplex = DUPLEX_HALF;
+ if (lpa & (LPA_100FULL | LPA_100HALF))
+ phy->speed = SPEED_100;
+ else
+ phy->speed = SPEED_10;
+ phy->pause = 0;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+
+#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
+#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+
+/* Broadcom BCM 5201 */
+static struct mii_phy_ops bcm5201_phy_ops = {
+ .init = bcm5201_init,
+ .suspend = bcm5201_suspend,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link,
+};
+
+static struct mii_phy_def bcm5201_phy_def = {
+ .phy_id = 0x00406210,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5201",
+ .features = MII_BASIC_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5201_phy_ops
+};
+
+/* Broadcom BCM 5221 */
+static struct mii_phy_ops bcm5221_phy_ops = {
+ .suspend = bcm5221_suspend,
+ .init = bcm5221_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link,
+};
+
+static struct mii_phy_def bcm5221_phy_def = {
+ .phy_id = 0x004061e0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5221",
+ .features = MII_BASIC_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5221_phy_ops
+};
+
+/* Broadcom BCM 5400 */
+static struct mii_phy_ops bcm5400_phy_ops = {
+ .init = bcm5400_init,
+ .suspend = bcm5400_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+ .setup_forced = bcm54xx_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5400_phy_def = {
+ .phy_id = 0x00206040,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5400",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5400_phy_ops
+};
+
+/* Broadcom BCM 5401 */
+static struct mii_phy_ops bcm5401_phy_ops = {
+ .init = bcm5401_init,
+ .suspend = bcm5401_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+ .setup_forced = bcm54xx_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5401_phy_def = {
+ .phy_id = 0x00206050,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5401",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5401_phy_ops
+};
+
+/* Broadcom BCM 5411 */
+static struct mii_phy_ops bcm5411_phy_ops = {
+ .init = bcm5411_init,
+ .suspend = bcm5411_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+ .setup_forced = bcm54xx_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5411_phy_def = {
+ .phy_id = 0x00206070,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5411",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5411_phy_ops
+};
+
+/* Broadcom BCM 5421 */
+static struct mii_phy_ops bcm5421_phy_ops = {
+ .init = bcm5421_init,
+ .suspend = bcm5411_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+ .setup_forced = bcm54xx_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5421_phy_def = {
+ .phy_id = 0x002060e0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5421",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5421_phy_ops
+};
+
+/* Broadcom BCM 5421 built-in K2 */
+static struct mii_phy_ops bcm5421k2_phy_ops = {
+ .init = bcm5421k2_init,
+ .suspend = bcm5411_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+ .setup_forced = bcm54xx_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5421k2_phy_def = {
+ .phy_id = 0x002062e0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5421-K2",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5421k2_phy_ops
+};
+
+/* Marvell 88E1101 (Apple seem to deal with 2 different revs,
+ * I masked out the 8 last bits to get both, but some specs
+ * would be useful here) --BenH.
+ */
+static struct mii_phy_ops marvell_phy_ops = {
+ .setup_aneg = marvell_setup_aneg,
+ .setup_forced = marvell_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = marvell_read_link
+};
+
+static struct mii_phy_def marvell_phy_def = {
+ .phy_id = 0x01410c00,
+ .phy_id_mask = 0xffffff00,
+ .name = "Marvell 88E1101",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &marvell_phy_ops
+};
+
+/* Generic implementation for most 10/100 PHYs */
+static struct mii_phy_ops generic_phy_ops = {
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def genmii_phy_def = {
+ .phy_id = 0x00000000,
+ .phy_id_mask = 0x00000000,
+ .name = "Generic MII",
+ .features = MII_BASIC_FEATURES,
+ .magic_aneg = 0,
+ .ops = &generic_phy_ops
+};
+
+static struct mii_phy_def* mii_phy_table[] = {
+ &bcm5201_phy_def,
+ &bcm5221_phy_def,
+ &bcm5400_phy_def,
+ &bcm5401_phy_def,
+ &bcm5411_phy_def,
+ &bcm5421_phy_def,
+ &bcm5421k2_phy_def,
+ &marvell_phy_def,
+ &genmii_phy_def,
+ NULL
+};
+
+int mii_phy_probe(struct mii_phy *phy, int mii_id)
+{
+ int rc;
+ u32 id;
+ struct mii_phy_def* def;
+ int i;
+
+ /* We do not reset the mii_phy structure as the driver
+ * may re-probe the PHY regulary
+ */
+ phy->mii_id = mii_id;
+
+ /* Take PHY out of isloate mode and reset it. */
+ rc = reset_one_mii_phy(phy, mii_id);
+ if (rc)
+ goto fail;
+
+ /* Read ID and find matching entry */
+ id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
+ printk(KERN_DEBUG "PHY ID: %x, addr: %x\n", id, mii_id);
+ for (i=0; (def = mii_phy_table[i]) != NULL; i++)
+ if ((id & def->phy_id_mask) == def->phy_id)
+ break;
+ /* Should never be NULL (we have a generic entry), but... */
+ if (def == NULL)
+ goto fail;
+
+ phy->def = def;
+
+ return 0;
+fail:
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->pause = 0;
+ phy->advertising = 0;
+ return -ENODEV;
+}
+
+EXPORT_SYMBOL(mii_phy_probe);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
new file mode 100644
index 000000000000..822cb58174ea
--- /dev/null
+++ b/drivers/net/sungem_phy.h
@@ -0,0 +1,117 @@
+#ifndef __SUNGEM_PHY_H__
+#define __SUNGEM_PHY_H__
+
+struct mii_phy;
+
+/* Operations supported by any kind of PHY */
+struct mii_phy_ops
+{
+ int (*init)(struct mii_phy *phy);
+ int (*suspend)(struct mii_phy *phy);
+ int (*setup_aneg)(struct mii_phy *phy, u32 advertise);
+ int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
+ int (*poll_link)(struct mii_phy *phy);
+ int (*read_link)(struct mii_phy *phy);
+};
+
+/* Structure used to statically define an mii/gii based PHY */
+struct mii_phy_def
+{
+ u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
+ u32 phy_id_mask; /* Significant bits */
+ u32 features; /* Ethtool SUPPORTED_* defines */
+ int magic_aneg; /* Autoneg does all speed test for us */
+ const char* name;
+ const struct mii_phy_ops* ops;
+};
+
+/* An instance of a PHY, partially borrowed from mii_if_info */
+struct mii_phy
+{
+ struct mii_phy_def* def;
+ int advertising;
+ int mii_id;
+
+ /* 1: autoneg enabled, 0: disabled */
+ int autoneg;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+
+ /* Provided by host chip */
+ struct net_device* dev;
+ int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
+ void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
+};
+
+/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
+ * filled, the remaining fields will be filled on return
+ */
+extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
+
+
+/* MII definitions missing from mii.h */
+
+#define BMCR_SPD2 0x0040 /* Gigabit enable (bcm54xx) */
+#define LPA_PAUSE 0x0400
+
+/* More PHY registers (model specific) */
+
+/* MII BCM5201 MULTIPHY interrupt register */
+#define MII_BCM5201_INTERRUPT 0x1A
+#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000
+
+#define MII_BCM5201_AUXMODE2 0x1B
+#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008
+
+#define MII_BCM5201_MULTIPHY 0x1E
+
+/* MII BCM5201 MULTIPHY register bits */
+#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002
+#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008
+
+/* MII BCM5221 Additional registers */
+#define MII_BCM5221_TEST 0x1f
+#define MII_BCM5221_TEST_ENABLE_SHADOWS 0x0080
+#define MII_BCM5221_SHDOW_AUX_STAT2 0x1b
+#define MII_BCM5221_SHDOW_AUX_STAT2_APD 0x0020
+#define MII_BCM5221_SHDOW_AUX_MODE4 0x1a
+#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001
+#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004
+
+/* MII BCM5400 1000-BASET Control register */
+#define MII_BCM5400_GB_CONTROL 0x09
+#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200
+
+/* MII BCM5400 AUXCONTROL register */
+#define MII_BCM5400_AUXCONTROL 0x18
+#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004
+
+/* MII BCM5400 AUXSTATUS register */
+#define MII_BCM5400_AUXSTATUS 0x19
+#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700
+#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8
+
+/* 1000BT control (Marvell & BCM54xx at least) */
+#define MII_1000BASETCONTROL 0x09
+#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
+#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
+
+/* Marvell 88E1011 PHY control */
+#define MII_M1011_PHY_SPEC_CONTROL 0x10
+#define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20
+#define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX 0x40
+
+/* Marvell 88E1011 PHY status */
+#define MII_M1011_PHY_SPEC_STATUS 0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
+
+#endif /* __SUNGEM_PHY_H__ */
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
new file mode 100644
index 000000000000..d837b3c35723
--- /dev/null
+++ b/drivers/net/sunhme.c
@@ -0,0 +1,3426 @@
+/* $Id: sunhme.c,v 1.124 2002/01/15 06:25:51 davem Exp $
+ * sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
+ * auto carrier detecting ethernet driver. Also known as the
+ * "Happy Meal Ethernet" found on SunSwift SBUS cards.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2002, 2003 David S. Miller (davem@redhat.com)
+ *
+ * Changes :
+ * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
+ * - port to non-sparc architectures. Tested only on x86 and
+ * only currently works with QFE PCI cards.
+ * - ability to specify the MAC address at module load time by passing this
+ * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
+ */
+
+static char version[] =
+ "sunhme.c:v2.02 24/Aug/2003 David S. Miller (davem@redhat.com)\n";
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#ifdef __sparc__
+#include <asm/idprom.h>
+#include <asm/sbus.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/auxio.h>
+#ifndef __sparc_v9__
+#include <asm/io-unit.h>
+#endif
+#endif
+#include <asm/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#ifdef __sparc__
+#include <asm/pbm.h>
+#endif
+#endif
+
+#include "sunhme.h"
+
+
+#define DRV_NAME "sunhme"
+
+static int macaddr[6];
+
+/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
+module_param_array(macaddr, int, NULL, 0);
+MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
+MODULE_LICENSE("GPL");
+
+static struct happy_meal *root_happy_dev;
+
+#ifdef CONFIG_SBUS
+static struct quattro *qfe_sbus_list;
+#endif
+
+#ifdef CONFIG_PCI
+static struct quattro *qfe_pci_list;
+#endif
+
+#undef HMEDEBUG
+#undef SXDEBUG
+#undef RXDEBUG
+#undef TXDEBUG
+#undef TXLOGGING
+
+#ifdef TXLOGGING
+struct hme_tx_logent {
+ unsigned int tstamp;
+ int tx_new, tx_old;
+ unsigned int action;
+#define TXLOG_ACTION_IRQ 0x01
+#define TXLOG_ACTION_TXMIT 0x02
+#define TXLOG_ACTION_TBUSY 0x04
+#define TXLOG_ACTION_NBUFS 0x08
+ unsigned int status;
+};
+#define TX_LOG_LEN 128
+static struct hme_tx_logent tx_log[TX_LOG_LEN];
+static int txlog_cur_entry;
+static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
+{
+ struct hme_tx_logent *tlp;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ tlp = &tx_log[txlog_cur_entry];
+ tlp->tstamp = (unsigned int)jiffies;
+ tlp->tx_new = hp->tx_new;
+ tlp->tx_old = hp->tx_old;
+ tlp->action = a;
+ tlp->status = s;
+ txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
+ restore_flags(flags);
+}
+static __inline__ void tx_dump_log(void)
+{
+ int i, this;
+
+ this = txlog_cur_entry;
+ for (i = 0; i < TX_LOG_LEN; i++) {
+ printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
+ tx_log[this].tstamp,
+ tx_log[this].tx_new, tx_log[this].tx_old,
+ tx_log[this].action, tx_log[this].status);
+ this = (this + 1) & (TX_LOG_LEN - 1);
+ }
+}
+static __inline__ void tx_dump_ring(struct happy_meal *hp)
+{
+ struct hmeal_init_block *hb = hp->happy_block;
+ struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i+=4) {
+ printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
+ i, i + 4,
+ le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
+ le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
+ le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
+ le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
+ }
+}
+#else
+#define tx_add_log(hp, a, s) do { } while(0)
+#define tx_dump_log() do { } while(0)
+#define tx_dump_ring(hp) do { } while(0)
+#endif
+
+#ifdef HMEDEBUG
+#define HMD(x) printk x
+#else
+#define HMD(x)
+#endif
+
+/* #define AUTO_SWITCH_DEBUG */
+
+#ifdef AUTO_SWITCH_DEBUG
+#define ASD(x) printk x
+#else
+#define ASD(x)
+#endif
+
+#define DEFAULT_IPG0 16 /* For lance-mode only */
+#define DEFAULT_IPG1 8 /* For all modes */
+#define DEFAULT_IPG2 4 /* For all modes */
+#define DEFAULT_JAMSIZE 4 /* Toe jam */
+
+#if defined(CONFIG_PCI) && defined(MODULE)
+/* This happy_pci_ids is declared __initdata because it is only used
+ as an advisory to depmod. If this is ported to the new PCI interface
+ where it could be referenced at any time due to hot plugging,
+ the __initdata reference should be removed. */
+
+static struct pci_device_id happymeal_pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_SUN,
+ .device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
+
+#endif
+
+/* NOTE: In the descriptor writes one _must_ write the address
+ * member _first_. The card must not be allowed to see
+ * the updated descriptor flags until the address is
+ * correct. I've added a write memory barrier between
+ * the two stores so that I can sleep well at night... -DaveM
+ */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+static void sbus_hme_write32(void __iomem *reg, u32 val)
+{
+ sbus_writel(val, reg);
+}
+
+static u32 sbus_hme_read32(void __iomem *reg)
+{
+ return sbus_readl(reg);
+}
+
+static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
+{
+ rxd->rx_addr = addr;
+ wmb();
+ rxd->rx_flags = flags;
+}
+
+static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
+{
+ txd->tx_addr = addr;
+ wmb();
+ txd->tx_flags = flags;
+}
+
+static u32 sbus_hme_read_desc32(u32 *p)
+{
+ return *p;
+}
+
+static void pci_hme_write32(void __iomem *reg, u32 val)
+{
+ writel(val, reg);
+}
+
+static u32 pci_hme_read32(void __iomem *reg)
+{
+ return readl(reg);
+}
+
+static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
+{
+ rxd->rx_addr = cpu_to_le32(addr);
+ wmb();
+ rxd->rx_flags = cpu_to_le32(flags);
+}
+
+static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
+{
+ txd->tx_addr = cpu_to_le32(addr);
+ wmb();
+ txd->tx_flags = cpu_to_le32(flags);
+}
+
+static u32 pci_hme_read_desc32(u32 *p)
+{
+ return cpu_to_le32p(p);
+}
+
+#define hme_write32(__hp, __reg, __val) \
+ ((__hp)->write32((__reg), (__val)))
+#define hme_read32(__hp, __reg) \
+ ((__hp)->read32(__reg))
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+ ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+ ((__hp)->write_txd((__txd), (__flags), (__addr)))
+#define hme_read_desc32(__hp, __p) \
+ ((__hp)->read_desc32(__p))
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+ ((__hp)->dma_map((__hp)->happy_dev, (__ptr), (__size), (__dir)))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+ ((__hp)->dma_unmap((__hp)->happy_dev, (__addr), (__size), (__dir)))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+ ((__hp)->dma_sync_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir)))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+ ((__hp)->dma_sync_for_device((__hp)->happy_dev, (__addr), (__size), (__dir)))
+#else
+#ifdef CONFIG_SBUS
+/* SBUS only compilation */
+#define hme_write32(__hp, __reg, __val) \
+ sbus_writel((__val), (__reg))
+#define hme_read32(__hp, __reg) \
+ sbus_readl(__reg)
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+do { (__rxd)->rx_addr = (__addr); \
+ wmb(); \
+ (__rxd)->rx_flags = (__flags); \
+} while(0)
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+do { (__txd)->tx_addr = (__addr); \
+ wmb(); \
+ (__txd)->tx_flags = (__flags); \
+} while(0)
+#define hme_read_desc32(__hp, __p) (*(__p))
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+ sbus_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+ sbus_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+ sbus_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+ sbus_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir))
+#else
+/* PCI only compilation */
+#define hme_write32(__hp, __reg, __val) \
+ writel((__val), (__reg))
+#define hme_read32(__hp, __reg) \
+ readl(__reg)
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+do { (__rxd)->rx_addr = cpu_to_le32(__addr); \
+ wmb(); \
+ (__rxd)->rx_flags = cpu_to_le32(__flags); \
+} while(0)
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+do { (__txd)->tx_addr = cpu_to_le32(__addr); \
+ wmb(); \
+ (__txd)->tx_flags = cpu_to_le32(__flags); \
+} while(0)
+#define hme_read_desc32(__hp, __p) cpu_to_le32p(__p)
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+ pci_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+ pci_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+ pci_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+ pci_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir))
+#endif
+#endif
+
+
+#ifdef SBUS_DMA_BIDIRECTIONAL
+# define DMA_BIDIRECTIONAL SBUS_DMA_BIDIRECTIONAL
+#else
+# define DMA_BIDIRECTIONAL 0
+#endif
+
+#ifdef SBUS_DMA_FROMDEVICE
+# define DMA_FROMDEVICE SBUS_DMA_FROMDEVICE
+#else
+# define DMA_TODEVICE 1
+#endif
+
+#ifdef SBUS_DMA_TODEVICE
+# define DMA_TODEVICE SBUS_DMA_TODEVICE
+#else
+# define DMA_FROMDEVICE 2
+#endif
+
+
+/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
+static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
+{
+ hme_write32(hp, tregs + TCVR_BBDATA, bit);
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+}
+
+#if 0
+static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
+{
+ u32 ret;
+
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+ ret = hme_read32(hp, tregs + TCVR_CFG);
+ if (internal)
+ ret &= TCV_CFG_MDIO0;
+ else
+ ret &= TCV_CFG_MDIO1;
+
+ return ret;
+}
+#endif
+
+static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
+{
+ u32 retval;
+
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+ udelay(1);
+ retval = hme_read32(hp, tregs + TCVR_CFG);
+ if (internal)
+ retval &= TCV_CFG_MDIO0;
+ else
+ retval &= TCV_CFG_MDIO1;
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+
+ return retval;
+}
+
+#define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
+
+static int happy_meal_bb_read(struct happy_meal *hp,
+ void __iomem *tregs, int reg)
+{
+ u32 tmp;
+ int retval = 0;
+ int i;
+
+ ASD(("happy_meal_bb_read: reg=%d ", reg));
+
+ /* Enable the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 1);
+
+ /* Force BitBang into the idle state. */
+ for (i = 0; i < 32; i++)
+ BB_PUT_BIT(hp, tregs, 1);
+
+ /* Give it the read sequence. */
+ BB_PUT_BIT(hp, tregs, 0);
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 0);
+
+ /* Give it the PHY address. */
+ tmp = hp->paddr & 0xff;
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Tell it what register we want to read. */
+ tmp = (reg & 0xff);
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Close down the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 0);
+
+ /* Now read in the value. */
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ for (i = 15; i >= 0; i--)
+ retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ ASD(("value=%x\n", retval));
+ return retval;
+}
+
+static void happy_meal_bb_write(struct happy_meal *hp,
+ void __iomem *tregs, int reg,
+ unsigned short value)
+{
+ u32 tmp;
+ int i;
+
+ ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
+
+ /* Enable the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 1);
+
+ /* Force BitBang into the idle state. */
+ for (i = 0; i < 32; i++)
+ BB_PUT_BIT(hp, tregs, 1);
+
+ /* Give it write sequence. */
+ BB_PUT_BIT(hp, tregs, 0);
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 0);
+ BB_PUT_BIT(hp, tregs, 1);
+
+ /* Give it the PHY address. */
+ tmp = (hp->paddr & 0xff);
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Tell it what register we will be writing. */
+ tmp = (reg & 0xff);
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Tell it to become ready for the bits. */
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 0);
+
+ for (i = 15; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
+
+ /* Close down the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 0);
+}
+
+#define TCVR_READ_TRIES 16
+
+static int happy_meal_tcvr_read(struct happy_meal *hp,
+ void __iomem *tregs, int reg)
+{
+ int tries = TCVR_READ_TRIES;
+ int retval;
+
+ ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
+ if (hp->tcvr_type == none) {
+ ASD(("no transceiver, value=TCVR_FAILURE\n"));
+ return TCVR_FAILURE;
+ }
+
+ if (!(hp->happy_flags & HFLAG_FENABLE)) {
+ ASD(("doing bit bang\n"));
+ return happy_meal_bb_read(hp, tregs, reg);
+ }
+
+ hme_write32(hp, tregs + TCVR_FRAME,
+ (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
+ while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
+ udelay(20);
+ if (!tries) {
+ printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
+ return TCVR_FAILURE;
+ }
+ retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
+ ASD(("value=%04x\n", retval));
+ return retval;
+}
+
+#define TCVR_WRITE_TRIES 16
+
+static void happy_meal_tcvr_write(struct happy_meal *hp,
+ void __iomem *tregs, int reg,
+ unsigned short value)
+{
+ int tries = TCVR_WRITE_TRIES;
+
+ ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
+
+ /* Welcome to Sun Microsystems, can I take your order please? */
+ if (!(hp->happy_flags & HFLAG_FENABLE)) {
+ happy_meal_bb_write(hp, tregs, reg, value);
+ return;
+ }
+
+ /* Would you like fries with that? */
+ hme_write32(hp, tregs + TCVR_FRAME,
+ (FRAME_WRITE | (hp->paddr << 23) |
+ ((reg & 0xff) << 18) | (value & 0xffff)));
+ while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
+ udelay(20);
+
+ /* Anything else? */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
+
+ /* Fifty-two cents is your change, have a nice day. */
+}
+
+/* Auto negotiation. The scheme is very simple. We have a timer routine
+ * that keeps watching the auto negotiation process as it progresses.
+ * The DP83840 is first told to start doing it's thing, we set up the time
+ * and place the timer state machine in it's initial state.
+ *
+ * Here the timer peeks at the DP83840 status registers at each click to see
+ * if the auto negotiation has completed, we assume here that the DP83840 PHY
+ * will time out at some point and just tell us what (didn't) happen. For
+ * complete coverage we only allow so many of the ticks at this level to run,
+ * when this has expired we print a warning message and try another strategy.
+ * This "other" strategy is to force the interface into various speed/duplex
+ * configurations and we stop when we see a link-up condition before the
+ * maximum number of "peek" ticks have occurred.
+ *
+ * Once a valid link status has been detected we configure the BigMAC and
+ * the rest of the Happy Meal to speak the most efficient protocol we could
+ * get a clean link for. The priority for link configurations, highest first
+ * is:
+ * 100 Base-T Full Duplex
+ * 100 Base-T Half Duplex
+ * 10 Base-T Full Duplex
+ * 10 Base-T Half Duplex
+ *
+ * We start a new timer now, after a successful auto negotiation status has
+ * been detected. This timer just waits for the link-up bit to get set in
+ * the BMCR of the DP83840. When this occurs we print a kernel log message
+ * describing the link type in use and the fact that it is up.
+ *
+ * If a fatal error of some sort is signalled and detected in the interrupt
+ * service routine, and the chip is reset, or the link is ifconfig'd down
+ * and then back up, this entire process repeats itself all over again.
+ */
+static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
+{
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+
+ /* Downgrade from full to half duplex. Only possible
+ * via ethtool.
+ */
+ if (hp->sw_bmcr & BMCR_FULLDPLX) {
+ hp->sw_bmcr &= ~(BMCR_FULLDPLX);
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+ return 0;
+ }
+
+ /* Downgrade from 100 to 10. */
+ if (hp->sw_bmcr & BMCR_SPEED100) {
+ hp->sw_bmcr &= ~(BMCR_SPEED100);
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+ return 0;
+ }
+
+ /* We've tried everything. */
+ return -1;
+}
+
+static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
+{
+ printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
+ if (hp->tcvr_type == external)
+ printk("external ");
+ else
+ printk("internal ");
+ printk("transceiver at ");
+ hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+ if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
+ if (hp->sw_lpa & LPA_100FULL)
+ printk("100Mb/s, Full Duplex.\n");
+ else
+ printk("100Mb/s, Half Duplex.\n");
+ } else {
+ if (hp->sw_lpa & LPA_10FULL)
+ printk("10Mb/s, Full Duplex.\n");
+ else
+ printk("10Mb/s, Half Duplex.\n");
+ }
+}
+
+static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
+{
+ printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
+ if (hp->tcvr_type == external)
+ printk("external ");
+ else
+ printk("internal ");
+ printk("transceiver at ");
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (hp->sw_bmcr & BMCR_SPEED100)
+ printk("100Mb/s, ");
+ else
+ printk("10Mb/s, ");
+ if (hp->sw_bmcr & BMCR_FULLDPLX)
+ printk("Full Duplex.\n");
+ else
+ printk("Half Duplex.\n");
+}
+
+static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
+{
+ int full;
+
+ /* All we care about is making sure the bigmac tx_cfg has a
+ * proper duplex setting.
+ */
+ if (hp->timer_state == arbwait) {
+ hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+ if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
+ goto no_response;
+ if (hp->sw_lpa & LPA_100FULL)
+ full = 1;
+ else if (hp->sw_lpa & LPA_100HALF)
+ full = 0;
+ else if (hp->sw_lpa & LPA_10FULL)
+ full = 1;
+ else
+ full = 0;
+ } else {
+ /* Forcing a link mode. */
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (hp->sw_bmcr & BMCR_FULLDPLX)
+ full = 1;
+ else
+ full = 0;
+ }
+
+ /* Before changing other bits in the tx_cfg register, and in
+ * general any of other the TX config registers too, you
+ * must:
+ * 1) Clear Enable
+ * 2) Poll with reads until that bit reads back as zero
+ * 3) Make TX configuration changes
+ * 4) Set Enable once more
+ */
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
+ ~(BIGMAC_TXCFG_ENABLE));
+ while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
+ barrier();
+ if (full) {
+ hp->happy_flags |= HFLAG_FULL;
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
+ BIGMAC_TXCFG_FULLDPLX);
+ } else {
+ hp->happy_flags &= ~(HFLAG_FULL);
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
+ ~(BIGMAC_TXCFG_FULLDPLX));
+ }
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
+ BIGMAC_TXCFG_ENABLE);
+ return 0;
+no_response:
+ return 1;
+}
+
+static int happy_meal_init(struct happy_meal *hp);
+
+static int is_lucent_phy(struct happy_meal *hp)
+{
+ void __iomem *tregs = hp->tcvregs;
+ unsigned short mr2, mr3;
+ int ret = 0;
+
+ mr2 = happy_meal_tcvr_read(hp, tregs, 2);
+ mr3 = happy_meal_tcvr_read(hp, tregs, 3);
+ if ((mr2 & 0xffff) == 0x0180 &&
+ ((mr3 & 0xffff) >> 10) == 0x1d)
+ ret = 1;
+
+ return ret;
+}
+
+static void happy_meal_timer(unsigned long data)
+{
+ struct happy_meal *hp = (struct happy_meal *) data;
+ void __iomem *tregs = hp->tcvregs;
+ int restart_timer = 0;
+
+ spin_lock_irq(&hp->happy_lock);
+
+ hp->timer_ticks++;
+ switch(hp->timer_state) {
+ case arbwait:
+ /* Only allow for 5 ticks, thats 10 seconds and much too
+ * long to wait for arbitration to complete.
+ */
+ if (hp->timer_ticks >= 10) {
+ /* Enter force mode. */
+ do_force_mode:
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
+ hp->dev->name);
+ hp->sw_bmcr = BMCR_SPEED100;
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ if (!is_lucent_phy(hp)) {
+ /* OK, seems we need do disable the transceiver for the first
+ * tick to make sure we get an accurate link state at the
+ * second tick.
+ */
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
+ hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+ happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ hp->timer_state = ltrywait;
+ hp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ /* Anything interesting happen? */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
+ int ret;
+
+ /* Just what we've been waiting for... */
+ ret = set_happy_link_modes(hp, tregs);
+ if (ret) {
+ /* Ooops, something bad happened, go to force
+ * mode.
+ *
+ * XXX Broken hubs which don't support 802.3u
+ * XXX auto-negotiation make this happen as well.
+ */
+ goto do_force_mode;
+ }
+
+ /* Success, at least so far, advance our state engine. */
+ hp->timer_state = lupwait;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ break;
+
+ case lupwait:
+ /* Auto negotiation was successful and we are awaiting a
+ * link up status. I have decided to let this timer run
+ * forever until some sort of error is signalled, reporting
+ * a message to the user at 10 second intervals.
+ */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ if (hp->sw_bmsr & BMSR_LSTATUS) {
+ /* Wheee, it's up, display the link mode in use and put
+ * the timer to sleep.
+ */
+ display_link_mode(hp, tregs);
+ hp->timer_state = asleep;
+ restart_timer = 0;
+ } else {
+ if (hp->timer_ticks >= 10) {
+ printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
+ "not completely up.\n", hp->dev->name);
+ hp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ break;
+
+ case ltrywait:
+ /* Making the timeout here too long can make it take
+ * annoyingly long to attempt all of the link mode
+ * permutations, but then again this is essentially
+ * error recovery code for the most part.
+ */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
+ if (hp->timer_ticks == 1) {
+ if (!is_lucent_phy(hp)) {
+ /* Re-enable transceiver, we'll re-enable the transceiver next
+ * tick, then check link state on the following tick.
+ */
+ hp->sw_csconfig |= CSCONFIG_TCVDISAB;
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ restart_timer = 1;
+ break;
+ }
+ if (hp->timer_ticks == 2) {
+ if (!is_lucent_phy(hp)) {
+ hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ restart_timer = 1;
+ break;
+ }
+ if (hp->sw_bmsr & BMSR_LSTATUS) {
+ /* Force mode selection success. */
+ display_forced_link_mode(hp, tregs);
+ set_happy_link_modes(hp, tregs); /* XXX error? then what? */
+ hp->timer_state = asleep;
+ restart_timer = 0;
+ } else {
+ if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
+ int ret;
+
+ ret = try_next_permutation(hp, tregs);
+ if (ret == -1) {
+ /* Aieee, tried them all, reset the
+ * chip and try all over again.
+ */
+
+ /* Let the user know... */
+ printk(KERN_NOTICE "%s: Link down, cable problem?\n",
+ hp->dev->name);
+
+ ret = happy_meal_init(hp);
+ if (ret) {
+ /* ho hum... */
+ printk(KERN_ERR "%s: Error, cannot re-init the "
+ "Happy Meal.\n", hp->dev->name);
+ }
+ goto out;
+ }
+ if (!is_lucent_phy(hp)) {
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
+ DP83840_CSCONFIG);
+ hp->sw_csconfig |= CSCONFIG_TCVDISAB;
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ hp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ break;
+
+ case asleep:
+ default:
+ /* Can't happens.... */
+ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
+ hp->dev->name);
+ restart_timer = 0;
+ hp->timer_ticks = 0;
+ hp->timer_state = asleep; /* foo on you */
+ break;
+ };
+
+ if (restart_timer) {
+ hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
+ add_timer(&hp->happy_timer);
+ }
+
+out:
+ spin_unlock_irq(&hp->happy_lock);
+}
+
+#define TX_RESET_TRIES 32
+#define RX_RESET_TRIES 32
+
+/* hp->happy_lock must be held */
+static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
+{
+ int tries = TX_RESET_TRIES;
+
+ HMD(("happy_meal_tx_reset: reset, "));
+
+ /* Would you like to try our SMCC Delux? */
+ hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
+ while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
+ udelay(20);
+
+ /* Lettuce, tomato, buggy hardware (no extra charge)? */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
+
+ /* Take care. */
+ HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
+{
+ int tries = RX_RESET_TRIES;
+
+ HMD(("happy_meal_rx_reset: reset, "));
+
+ /* We have a special on GNU/Viking hardware bugs today. */
+ hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
+ while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
+ udelay(20);
+
+ /* Will that be all? */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
+
+ /* Don't forget your vik_1137125_wa. Have a nice day. */
+ HMD(("done\n"));
+}
+
+#define STOP_TRIES 16
+
+/* hp->happy_lock must be held */
+static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
+{
+ int tries = STOP_TRIES;
+
+ HMD(("happy_meal_stop: reset, "));
+
+ /* We're consolidating our STB products, it's your lucky day. */
+ hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
+ while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
+ udelay(20);
+
+ /* Come back next week when we are "Sun Microelectronics". */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Fry guys.");
+
+ /* Remember: "Different name, same old buggy as shit hardware." */
+ HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
+{
+ struct net_device_stats *stats = &hp->net_stats;
+
+ stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
+ hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
+
+ stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
+ hme_write32(hp, bregs + BMAC_UNALECTR, 0);
+
+ stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
+ hme_write32(hp, bregs + BMAC_GLECTR, 0);
+
+ stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
+
+ stats->collisions +=
+ (hme_read32(hp, bregs + BMAC_EXCTR) +
+ hme_read32(hp, bregs + BMAC_LTCTR));
+ hme_write32(hp, bregs + BMAC_EXCTR, 0);
+ hme_write32(hp, bregs + BMAC_LTCTR, 0);
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
+{
+ ASD(("happy_meal_poll_stop: "));
+
+ /* If polling disabled or not polling already, nothing to do. */
+ if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
+ (HFLAG_POLLENABLE | HFLAG_POLL)) {
+ HMD(("not polling, return\n"));
+ return;
+ }
+
+ /* Shut up the MIF. */
+ ASD(("were polling, mif ints off, "));
+ hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+
+ /* Turn off polling. */
+ ASD(("polling off, "));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
+
+ /* We are no longer polling. */
+ hp->happy_flags &= ~(HFLAG_POLL);
+
+ /* Let the bits set. */
+ udelay(200);
+ ASD(("done\n"));
+}
+
+/* Only Sun can take such nice parts and fuck up the programming interface
+ * like this. Good job guys...
+ */
+#define TCVR_RESET_TRIES 16 /* It should reset quickly */
+#define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
+
+/* hp->happy_lock must be held */
+static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
+{
+ u32 tconfig;
+ int result, tries = TCVR_RESET_TRIES;
+
+ tconfig = hme_read32(hp, tregs + TCVR_CFG);
+ ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
+ if (hp->tcvr_type == external) {
+ ASD(("external<"));
+ hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
+ hp->tcvr_type = internal;
+ hp->paddr = TCV_PADDR_ITX;
+ ASD(("ISOLATE,"));
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR,
+ (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE) {
+ ASD(("phyread_fail>\n"));
+ return -1;
+ }
+ ASD(("phyread_ok,PSELECT>"));
+ hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
+ hp->tcvr_type = external;
+ hp->paddr = TCV_PADDR_ETX;
+ } else {
+ if (tconfig & TCV_CFG_MDIO1) {
+ ASD(("internal<PSELECT,"));
+ hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
+ ASD(("ISOLATE,"));
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR,
+ (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE) {
+ ASD(("phyread_fail>\n"));
+ return -1;
+ }
+ ASD(("phyread_ok,~PSELECT>"));
+ hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
+ hp->tcvr_type = internal;
+ hp->paddr = TCV_PADDR_ITX;
+ }
+ }
+
+ ASD(("BMCR_RESET "));
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
+
+ while (--tries) {
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE)
+ return -1;
+ hp->sw_bmcr = result;
+ if (!(result & BMCR_RESET))
+ break;
+ udelay(20);
+ }
+ if (!tries) {
+ ASD(("BMCR RESET FAILED!\n"));
+ return -1;
+ }
+ ASD(("RESET_OK\n"));
+
+ /* Get fresh copies of the PHY registers. */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
+ hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
+ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+
+ ASD(("UNISOLATE"));
+ hp->sw_bmcr &= ~(BMCR_ISOLATE);
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ tries = TCVR_UNISOLATE_TRIES;
+ while (--tries) {
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE)
+ return -1;
+ if (!(result & BMCR_ISOLATE))
+ break;
+ udelay(20);
+ }
+ if (!tries) {
+ ASD((" FAILED!\n"));
+ return -1;
+ }
+ ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
+ if (!is_lucent_phy(hp)) {
+ result = happy_meal_tcvr_read(hp, tregs,
+ DP83840_CSCONFIG);
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
+ }
+ return 0;
+}
+
+/* Figure out whether we have an internal or external transceiver.
+ *
+ * hp->happy_lock must be held
+ */
+static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
+{
+ unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
+
+ ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
+ if (hp->happy_flags & HFLAG_POLL) {
+ /* If we are polling, we must stop to get the transceiver type. */
+ ASD(("<polling> "));
+ if (hp->tcvr_type == internal) {
+ if (tconfig & TCV_CFG_MDIO1) {
+ ASD(("<internal> <poll stop> "));
+ happy_meal_poll_stop(hp, tregs);
+ hp->paddr = TCV_PADDR_ETX;
+ hp->tcvr_type = external;
+ ASD(("<external>\n"));
+ tconfig &= ~(TCV_CFG_PENABLE);
+ tconfig |= TCV_CFG_PSELECT;
+ hme_write32(hp, tregs + TCVR_CFG, tconfig);
+ }
+ } else {
+ if (hp->tcvr_type == external) {
+ ASD(("<external> "));
+ if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
+ ASD(("<poll stop> "));
+ happy_meal_poll_stop(hp, tregs);
+ hp->paddr = TCV_PADDR_ITX;
+ hp->tcvr_type = internal;
+ ASD(("<internal>\n"));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) &
+ ~(TCV_CFG_PSELECT));
+ }
+ ASD(("\n"));
+ } else {
+ ASD(("<none>\n"));
+ }
+ }
+ } else {
+ u32 reread = hme_read32(hp, tregs + TCVR_CFG);
+
+ /* Else we can just work off of the MDIO bits. */
+ ASD(("<not polling> "));
+ if (reread & TCV_CFG_MDIO1) {
+ hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
+ hp->paddr = TCV_PADDR_ETX;
+ hp->tcvr_type = external;
+ ASD(("<external>\n"));
+ } else {
+ if (reread & TCV_CFG_MDIO0) {
+ hme_write32(hp, tregs + TCVR_CFG,
+ tconfig & ~(TCV_CFG_PSELECT));
+ hp->paddr = TCV_PADDR_ITX;
+ hp->tcvr_type = internal;
+ ASD(("<internal>\n"));
+ } else {
+ printk(KERN_ERR "happy meal: Transceiver and a coke please.");
+ hp->tcvr_type = none; /* Grrr... */
+ ASD(("<none>\n"));
+ }
+ }
+ }
+}
+
+/* The receive ring buffers are a bit tricky to get right. Here goes...
+ *
+ * The buffers we dma into must be 64 byte aligned. So we use a special
+ * alloc_skb() routine for the happy meal to allocate 64 bytes more than
+ * we really need.
+ *
+ * We use skb_reserve() to align the data block we get in the skb. We
+ * also program the etxregs->cfg register to use an offset of 2. This
+ * imperical constant plus the ethernet header size will always leave
+ * us with a nicely aligned ip header once we pass things up to the
+ * protocol layers.
+ *
+ * The numbers work out to:
+ *
+ * Max ethernet frame size 1518
+ * Ethernet header size 14
+ * Happy Meal base offset 2
+ *
+ * Say a skb data area is at 0xf001b010, and its size alloced is
+ * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
+ *
+ * First our alloc_skb() routine aligns the data base to a 64 byte
+ * boundary. We now have 0xf001b040 as our skb data address. We
+ * plug this into the receive descriptor address.
+ *
+ * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
+ * So now the data we will end up looking at starts at 0xf001b042. When
+ * the packet arrives, we will check out the size received and subtract
+ * this from the skb->length. Then we just pass the packet up to the
+ * protocols as is, and allocate a new skb to replace this slot we have
+ * just received from.
+ *
+ * The ethernet layer will strip the ether header from the front of the
+ * skb we just sent to it, this leaves us with the ip header sitting
+ * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
+ * Happy Meal has even checksummed the tcp/udp data for us. The 16
+ * bit checksum is obtained from the low bits of the receive descriptor
+ * flags, thus:
+ *
+ * skb->csum = rxd->rx_flags & 0xffff;
+ * skb->ip_summed = CHECKSUM_HW;
+ *
+ * before sending off the skb to the protocols, and we are good as gold.
+ */
+static void happy_meal_clean_rings(struct happy_meal *hp)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (hp->rx_skbs[i] != NULL) {
+ struct sk_buff *skb = hp->rx_skbs[i];
+ struct happy_meal_rxd *rxd;
+ u32 dma_addr;
+
+ rxd = &hp->happy_block->happy_meal_rxd[i];
+ dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
+ hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ hp->rx_skbs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (hp->tx_skbs[i] != NULL) {
+ struct sk_buff *skb = hp->tx_skbs[i];
+ struct happy_meal_txd *txd;
+ u32 dma_addr;
+ int frag;
+
+ hp->tx_skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ txd = &hp->happy_block->happy_meal_txd[i];
+ dma_addr = hme_read_desc32(hp, &txd->tx_addr);
+ hme_dma_unmap(hp, dma_addr,
+ (hme_read_desc32(hp, &txd->tx_flags)
+ & TXFLAG_SIZE),
+ DMA_TODEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags)
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_init_rings(struct happy_meal *hp)
+{
+ struct hmeal_init_block *hb = hp->happy_block;
+ struct net_device *dev = hp->dev;
+ int i;
+
+ HMD(("happy_meal_init_rings: counters to zero, "));
+ hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
+
+ /* Free any skippy bufs left around in the rings. */
+ HMD(("clean, "));
+ happy_meal_clean_rings(hp);
+
+ /* Now get new skippy bufs for the receive ring. */
+ HMD(("init rxring, "));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+ continue;
+ }
+ hp->rx_skbs[i] = skb;
+ skb->dev = dev;
+
+ /* Because we reserve afterwards. */
+ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i],
+ (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
+ hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
+ skb_reserve(skb, RX_OFFSET);
+ }
+
+ HMD(("init txring, "));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
+
+ HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
+ void __iomem *tregs,
+ struct ethtool_cmd *ep)
+{
+ int timeout;
+
+ /* Read all of the registers we are interested in now. */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
+ hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
+
+ /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
+
+ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+ if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ /* Advertise everything we can support. */
+ if (hp->sw_bmsr & BMSR_10HALF)
+ hp->sw_advertise |= (ADVERTISE_10HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10HALF);
+
+ if (hp->sw_bmsr & BMSR_10FULL)
+ hp->sw_advertise |= (ADVERTISE_10FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10FULL);
+ if (hp->sw_bmsr & BMSR_100HALF)
+ hp->sw_advertise |= (ADVERTISE_100HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100HALF);
+ if (hp->sw_bmsr & BMSR_100FULL)
+ hp->sw_advertise |= (ADVERTISE_100FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100FULL);
+ happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
+
+ /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
+ * XXX and this is because the DP83840 does not support it, changes
+ * XXX would need to be made to the tx/rx logic in the driver as well
+ * XXX so I completely skip checking for it in the BMSR for now.
+ */
+
+#ifdef AUTO_SWITCH_DEBUG
+ ASD(("%s: Advertising [ ", hp->dev->name));
+ if (hp->sw_advertise & ADVERTISE_10HALF)
+ ASD(("10H "));
+ if (hp->sw_advertise & ADVERTISE_10FULL)
+ ASD(("10F "));
+ if (hp->sw_advertise & ADVERTISE_100HALF)
+ ASD(("100H "));
+ if (hp->sw_advertise & ADVERTISE_100FULL)
+ ASD(("100F "));
+#endif
+
+ /* Enable Auto-Negotiation, this is usually on already... */
+ hp->sw_bmcr |= BMCR_ANENABLE;
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ /* Restart it to make sure it is going. */
+ hp->sw_bmcr |= BMCR_ANRESTART;
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ /* BMCR_ANRESTART self clears when the process has begun. */
+
+ timeout = 64; /* More than enough. */
+ while (--timeout) {
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (!(hp->sw_bmcr & BMCR_ANRESTART))
+ break; /* got it. */
+ udelay(10);
+ }
+ if (!timeout) {
+ printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
+ "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
+ printk(KERN_NOTICE "%s: Performing force link detection.\n",
+ hp->dev->name);
+ goto force_link;
+ } else {
+ hp->timer_state = arbwait;
+ }
+ } else {
+force_link:
+ /* Force the link up, trying first a particular mode.
+ * Either we are here at the request of ethtool or
+ * because the Happy Meal would not start to autoneg.
+ */
+
+ /* Disable auto-negotiation in BMCR, enable the duplex and
+ * speed setting, init the timer state machine, and fire it off.
+ */
+ if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ hp->sw_bmcr = BMCR_SPEED100;
+ } else {
+ if (ep->speed == SPEED_100)
+ hp->sw_bmcr = BMCR_SPEED100;
+ else
+ hp->sw_bmcr = 0;
+ if (ep->duplex == DUPLEX_FULL)
+ hp->sw_bmcr |= BMCR_FULLDPLX;
+ }
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ if (!is_lucent_phy(hp)) {
+ /* OK, seems we need do disable the transceiver for the first
+ * tick to make sure we get an accurate link state at the
+ * second tick.
+ */
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
+ DP83840_CSCONFIG);
+ hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+ happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
+ hp->sw_csconfig);
+ }
+ hp->timer_state = ltrywait;
+ }
+
+ hp->timer_ticks = 0;
+ hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
+ hp->happy_timer.data = (unsigned long) hp;
+ hp->happy_timer.function = &happy_meal_timer;
+ add_timer(&hp->happy_timer);
+}
+
+/* hp->happy_lock must be held */
+static int happy_meal_init(struct happy_meal *hp)
+{
+ void __iomem *gregs = hp->gregs;
+ void __iomem *etxregs = hp->etxregs;
+ void __iomem *erxregs = hp->erxregs;
+ void __iomem *bregs = hp->bigmacregs;
+ void __iomem *tregs = hp->tcvregs;
+ u32 regtmp, rxcfg;
+ unsigned char *e = &hp->dev->dev_addr[0];
+
+ /* If auto-negotiation timer is running, kill it. */
+ del_timer(&hp->happy_timer);
+
+ HMD(("happy_meal_init: happy_flags[%08x] ",
+ hp->happy_flags));
+ if (!(hp->happy_flags & HFLAG_INIT)) {
+ HMD(("set HFLAG_INIT, "));
+ hp->happy_flags |= HFLAG_INIT;
+ happy_meal_get_counters(hp, bregs);
+ }
+
+ /* Stop polling. */
+ HMD(("to happy_meal_poll_stop\n"));
+ happy_meal_poll_stop(hp, tregs);
+
+ /* Stop transmitter and receiver. */
+ HMD(("happy_meal_init: to happy_meal_stop\n"));
+ happy_meal_stop(hp, gregs);
+
+ /* Alloc and reset the tx/rx descriptor chains. */
+ HMD(("happy_meal_init: to happy_meal_init_rings\n"));
+ happy_meal_init_rings(hp);
+
+ /* Shut up the MIF. */
+ HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
+ hme_read32(hp, tregs + TCVR_IMASK)));
+ hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+
+ /* See if we can enable the MIF frame on this card to speak to the DP83840. */
+ if (hp->happy_flags & HFLAG_FENABLE) {
+ HMD(("use frame old[%08x], ",
+ hme_read32(hp, tregs + TCVR_CFG)));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
+ } else {
+ HMD(("use bitbang old[%08x], ",
+ hme_read32(hp, tregs + TCVR_CFG)));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
+ }
+
+ /* Check the state of the transceiver. */
+ HMD(("to happy_meal_transceiver_check\n"));
+ happy_meal_transceiver_check(hp, tregs);
+
+ /* Put the Big Mac into a sane state. */
+ HMD(("happy_meal_init: "));
+ switch(hp->tcvr_type) {
+ case none:
+ /* Cannot operate if we don't know the transceiver type! */
+ HMD(("AAIEEE no transceiver type, EAGAIN"));
+ return -EAGAIN;
+
+ case internal:
+ /* Using the MII buffers. */
+ HMD(("internal, using MII, "));
+ hme_write32(hp, bregs + BMAC_XIFCFG, 0);
+ break;
+
+ case external:
+ /* Not using the MII, disable it. */
+ HMD(("external, disable MII, "));
+ hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
+ break;
+ };
+
+ if (happy_meal_tcvr_reset(hp, tregs))
+ return -EAGAIN;
+
+ /* Reset the Happy Meal Big Mac transceiver and the receiver. */
+ HMD(("tx/rx reset, "));
+ happy_meal_tx_reset(hp, bregs);
+ happy_meal_rx_reset(hp, bregs);
+
+ /* Set jam size and inter-packet gaps to reasonable defaults. */
+ HMD(("jsize/ipg1/ipg2, "));
+ hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
+ hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
+ hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
+
+ /* Load up the MAC address and random seed. */
+ HMD(("rseed/macaddr, "));
+
+ /* The docs recommend to use the 10LSB of our MAC here. */
+ hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
+
+ hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
+ hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
+ hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
+
+ HMD(("htable, "));
+ if ((hp->dev->flags & IFF_ALLMULTI) ||
+ (hp->dev->mc_count > 64)) {
+ hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
+ } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
+ u16 hash_table[4];
+ struct dev_mc_list *dmi = hp->dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0;
+
+ for (i = 0; i < hp->dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
+ hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
+ hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
+ hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
+ } else {
+ hme_write32(hp, bregs + BMAC_HTABLE3, 0);
+ hme_write32(hp, bregs + BMAC_HTABLE2, 0);
+ hme_write32(hp, bregs + BMAC_HTABLE1, 0);
+ hme_write32(hp, bregs + BMAC_HTABLE0, 0);
+ }
+
+ /* Set the RX and TX ring ptrs. */
+ HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
+ hme_write32(hp, erxregs + ERX_RING,
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
+ hme_write32(hp, etxregs + ETX_RING,
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
+
+ /* Parity issues in the ERX unit of some HME revisions can cause some
+ * registers to not be written unless their parity is even. Detect such
+ * lost writes and simply rewrite with a low bit set (which will be ignored
+ * since the rxring needs to be 2K aligned).
+ */
+ if (hme_read32(hp, erxregs + ERX_RING) !=
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
+ hme_write32(hp, erxregs + ERX_RING,
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
+ | 0x4);
+
+ /* Set the supported burst sizes. */
+ HMD(("happy_meal_init: old[%08x] bursts<",
+ hme_read32(hp, gregs + GREG_CFG)));
+
+#ifndef __sparc__
+ /* It is always PCI and can handle 64byte bursts. */
+ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
+#else
+ if ((hp->happy_bursts & DMA_BURST64) &&
+ ((hp->happy_flags & HFLAG_PCI) != 0
+#ifdef CONFIG_SBUS
+ || sbus_can_burst64(hp->happy_dev)
+#endif
+ || 0)) {
+ u32 gcfg = GREG_CFG_BURST64;
+
+ /* I have no idea if I should set the extended
+ * transfer mode bit for Cheerio, so for now I
+ * do not. -DaveM
+ */
+#ifdef CONFIG_SBUS
+ if ((hp->happy_flags & HFLAG_PCI) == 0 &&
+ sbus_can_dma_64bit(hp->happy_dev)) {
+ sbus_set_sbus64(hp->happy_dev,
+ hp->happy_bursts);
+ gcfg |= GREG_CFG_64BIT;
+ }
+#endif
+
+ HMD(("64>"));
+ hme_write32(hp, gregs + GREG_CFG, gcfg);
+ } else if (hp->happy_bursts & DMA_BURST32) {
+ HMD(("32>"));
+ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
+ } else if (hp->happy_bursts & DMA_BURST16) {
+ HMD(("16>"));
+ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
+ } else {
+ HMD(("XXX>"));
+ hme_write32(hp, gregs + GREG_CFG, 0);
+ }
+#endif /* __sparc__ */
+
+ /* Turn off interrupts we do not want to hear. */
+ HMD((", enable global interrupts, "));
+ hme_write32(hp, gregs + GREG_IMASK,
+ (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
+ GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
+
+ /* Set the transmit ring buffer size. */
+ HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
+ hme_read32(hp, etxregs + ETX_RSIZE)));
+ hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
+
+ /* Enable transmitter DVMA. */
+ HMD(("tx dma enable old[%08x], ",
+ hme_read32(hp, etxregs + ETX_CFG)));
+ hme_write32(hp, etxregs + ETX_CFG,
+ hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
+
+ /* This chip really rots, for the receiver sometimes when you
+ * write to its control registers not all the bits get there
+ * properly. I cannot think of a sane way to provide complete
+ * coverage for this hardware bug yet.
+ */
+ HMD(("erx regs bug old[%08x]\n",
+ hme_read32(hp, erxregs + ERX_CFG)));
+ hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
+ regtmp = hme_read32(hp, erxregs + ERX_CFG);
+ hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
+ if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
+ printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
+ printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
+ ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
+ /* XXX Should return failure here... */
+ }
+
+ /* Enable Big Mac hash table filter. */
+ HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
+ hme_read32(hp, bregs + BMAC_RXCFG)));
+ rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
+ if (hp->dev->flags & IFF_PROMISC)
+ rxcfg |= BIGMAC_RXCFG_PMISC;
+ hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
+
+ /* Let the bits settle in the chip. */
+ udelay(10);
+
+ /* Ok, configure the Big Mac transmitter. */
+ HMD(("BIGMAC init, "));
+ regtmp = 0;
+ if (hp->happy_flags & HFLAG_FULL)
+ regtmp |= BIGMAC_TXCFG_FULLDPLX;
+
+ /* Don't turn on the "don't give up" bit for now. It could cause hme
+ * to deadlock with the PHY if a Jabber occurs.
+ */
+ hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
+
+ /* Give up after 16 TX attempts. */
+ hme_write32(hp, bregs + BMAC_ALIMIT, 16);
+
+ /* Enable the output drivers no matter what. */
+ regtmp = BIGMAC_XCFG_ODENABLE;
+
+ /* If card can do lance mode, enable it. */
+ if (hp->happy_flags & HFLAG_LANCE)
+ regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
+
+ /* Disable the MII buffers if using external transceiver. */
+ if (hp->tcvr_type == external)
+ regtmp |= BIGMAC_XCFG_MIIDISAB;
+
+ HMD(("XIF config old[%08x], ",
+ hme_read32(hp, bregs + BMAC_XIFCFG)));
+ hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
+
+ /* Start things up. */
+ HMD(("tx old[%08x] and rx [%08x] ON!\n",
+ hme_read32(hp, bregs + BMAC_TXCFG),
+ hme_read32(hp, bregs + BMAC_RXCFG)));
+ hme_write32(hp, bregs + BMAC_TXCFG,
+ hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
+ hme_write32(hp, bregs + BMAC_RXCFG,
+ hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
+
+ /* Get the autonegotiation started, and the watch timer ticking. */
+ happy_meal_begin_auto_negotiation(hp, tregs, NULL);
+
+ /* Success. */
+ return 0;
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
+{
+ void __iomem *tregs = hp->tcvregs;
+ void __iomem *bregs = hp->bigmacregs;
+ void __iomem *gregs = hp->gregs;
+
+ happy_meal_stop(hp, gregs);
+ hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+ if (hp->happy_flags & HFLAG_FENABLE)
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
+ else
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
+ happy_meal_transceiver_check(hp, tregs);
+ switch(hp->tcvr_type) {
+ case none:
+ return;
+ case internal:
+ hme_write32(hp, bregs + BMAC_XIFCFG, 0);
+ break;
+ case external:
+ hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
+ break;
+ };
+ if (happy_meal_tcvr_reset(hp, tregs))
+ return;
+
+ /* Latch PHY registers as of now. */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+
+ /* Advertise everything we can support. */
+ if (hp->sw_bmsr & BMSR_10HALF)
+ hp->sw_advertise |= (ADVERTISE_10HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10HALF);
+
+ if (hp->sw_bmsr & BMSR_10FULL)
+ hp->sw_advertise |= (ADVERTISE_10FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10FULL);
+ if (hp->sw_bmsr & BMSR_100HALF)
+ hp->sw_advertise |= (ADVERTISE_100HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100HALF);
+ if (hp->sw_bmsr & BMSR_100FULL)
+ hp->sw_advertise |= (ADVERTISE_100FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100FULL);
+
+ /* Update the PHY advertisement register. */
+ happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
+}
+
+/* Once status is latched (by happy_meal_interrupt) it is cleared by
+ * the hardware, so we cannot re-read it and get a correct value.
+ *
+ * hp->happy_lock must be held
+ */
+static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
+{
+ int reset = 0;
+
+ /* Only print messages for non-counter related interrupts. */
+ if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
+ GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
+ GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
+ GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
+ GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
+ GREG_STAT_SLVPERR))
+ printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
+ hp->dev->name, status);
+
+ if (status & GREG_STAT_RFIFOVF) {
+ /* Receive FIFO overflow is harmless and the hardware will take
+ care of it, just some packets are lost. Who cares. */
+ printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
+ }
+
+ if (status & GREG_STAT_STSTERR) {
+ /* BigMAC SQE link test failed. */
+ printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_TFIFO_UND) {
+ /* Transmit FIFO underrun, again DMA error likely. */
+ printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
+ hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_MAXPKTERR) {
+ /* Driver error, tried to transmit something larger
+ * than ethernet max mtu.
+ */
+ printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_NORXD) {
+ /* This is harmless, it just means the system is
+ * quite loaded and the incoming packet rate was
+ * faster than the interrupt handler could keep up
+ * with.
+ */
+ printk(KERN_INFO "%s: Happy Meal out of receive "
+ "descriptors, packet dropped.\n",
+ hp->dev->name);
+ }
+
+ if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
+ /* All sorts of DMA receive errors. */
+ printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
+ if (status & GREG_STAT_RXERR)
+ printk("GenericError ");
+ if (status & GREG_STAT_RXPERR)
+ printk("ParityError ");
+ if (status & GREG_STAT_RXTERR)
+ printk("RxTagBotch ");
+ printk("]\n");
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_EOPERR) {
+ /* Driver bug, didn't set EOP bit in tx descriptor given
+ * to the happy meal.
+ */
+ printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
+ hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_MIFIRQ) {
+ /* MIF signalled an interrupt, were we polling it? */
+ printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
+ }
+
+ if (status &
+ (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
+ /* All sorts of transmit DMA errors. */
+ printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
+ if (status & GREG_STAT_TXEACK)
+ printk("GenericError ");
+ if (status & GREG_STAT_TXLERR)
+ printk("LateError ");
+ if (status & GREG_STAT_TXPERR)
+ printk("ParityErro ");
+ if (status & GREG_STAT_TXTERR)
+ printk("TagBotch ");
+ printk("]\n");
+ reset = 1;
+ }
+
+ if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
+ /* Bus or parity error when cpu accessed happy meal registers
+ * or it's internal FIFO's. Should never see this.
+ */
+ printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
+ hp->dev->name,
+ (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
+ reset = 1;
+ }
+
+ if (reset) {
+ printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
+ happy_meal_init(hp);
+ return 1;
+ }
+ return 0;
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_mif_interrupt(struct happy_meal *hp)
+{
+ void __iomem *tregs = hp->tcvregs;
+
+ printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+
+ /* Use the fastest transmission protocol possible. */
+ if (hp->sw_lpa & LPA_100FULL) {
+ printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
+ hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
+ } else if (hp->sw_lpa & LPA_100HALF) {
+ printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
+ hp->sw_bmcr |= BMCR_SPEED100;
+ } else if (hp->sw_lpa & LPA_10FULL) {
+ printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
+ hp->sw_bmcr |= BMCR_FULLDPLX;
+ } else {
+ printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
+ }
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ /* Finally stop polling and shut up the MIF. */
+ happy_meal_poll_stop(hp, tregs);
+}
+
+#ifdef TXDEBUG
+#define TXD(x) printk x
+#else
+#define TXD(x)
+#endif
+
+/* hp->happy_lock must be held */
+static void happy_meal_tx(struct happy_meal *hp)
+{
+ struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+ struct happy_meal_txd *this;
+ struct net_device *dev = hp->dev;
+ int elem;
+
+ elem = hp->tx_old;
+ TXD(("TX<"));
+ while (elem != hp->tx_new) {
+ struct sk_buff *skb;
+ u32 flags, dma_addr, dma_len;
+ int frag;
+
+ TXD(("[%d]", elem));
+ this = &txbase[elem];
+ flags = hme_read_desc32(hp, &this->tx_flags);
+ if (flags & TXFLAG_OWN)
+ break;
+ skb = hp->tx_skbs[elem];
+ if (skb_shinfo(skb)->nr_frags) {
+ int last;
+
+ last = elem + skb_shinfo(skb)->nr_frags;
+ last &= (TX_RING_SIZE - 1);
+ flags = hme_read_desc32(hp, &txbase[last].tx_flags);
+ if (flags & TXFLAG_OWN)
+ break;
+ }
+ hp->tx_skbs[elem] = NULL;
+ hp->net_stats.tx_bytes += skb->len;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ dma_addr = hme_read_desc32(hp, &this->tx_addr);
+ dma_len = hme_read_desc32(hp, &this->tx_flags);
+
+ dma_len &= TXFLAG_SIZE;
+ hme_dma_unmap(hp, dma_addr, dma_len, DMA_TODEVICE);
+
+ elem = NEXT_TX(elem);
+ this = &txbase[elem];
+ }
+
+ dev_kfree_skb_irq(skb);
+ hp->net_stats.tx_packets++;
+ }
+ hp->tx_old = elem;
+ TXD((">"));
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(dev);
+}
+
+#ifdef RXDEBUG
+#define RXD(x) printk x
+#else
+#define RXD(x)
+#endif
+
+/* Originally I used to handle the allocation failure by just giving back just
+ * that one ring buffer to the happy meal. Problem is that usually when that
+ * condition is triggered, the happy meal expects you to do something reasonable
+ * with all of the packets it has DMA'd in. So now I just drop the entire
+ * ring when we cannot get a new skb and give them all back to the happy meal,
+ * maybe things will be "happier" now.
+ *
+ * hp->happy_lock must be held
+ */
+static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
+{
+ struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
+ struct happy_meal_rxd *this;
+ int elem = hp->rx_new, drops = 0;
+ u32 flags;
+
+ RXD(("RX<"));
+ this = &rxbase[elem];
+ while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
+ struct sk_buff *skb;
+ int len = flags >> 16;
+ u16 csum = flags & RXFLAG_CSUM;
+ u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
+
+ RXD(("[%d ", elem));
+
+ /* Check for errors. */
+ if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
+ RXD(("ERR(%08x)]", flags));
+ hp->net_stats.rx_errors++;
+ if (len < ETH_ZLEN)
+ hp->net_stats.rx_length_errors++;
+ if (len & (RXFLAG_OVERFLOW >> 16)) {
+ hp->net_stats.rx_over_errors++;
+ hp->net_stats.rx_fifo_errors++;
+ }
+
+ /* Return it to the Happy meal. */
+ drop_it:
+ hp->net_stats.rx_dropped++;
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+ dma_addr);
+ goto next;
+ }
+ skb = hp->rx_skbs[elem];
+ if (len > RX_COPY_THRESHOLD) {
+ struct sk_buff *new_skb;
+
+ /* Now refill the entry, if we can. */
+ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE);
+ hp->rx_skbs[elem] = new_skb;
+ new_skb->dev = dev;
+ skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+ hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
+ skb_reserve(new_skb, RX_OFFSET);
+
+ /* Trim the original skb for the netif. */
+ skb_trim(skb, len);
+ } else {
+ struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+
+ if (copy_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+
+ copy_skb->dev = dev;
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE);
+ memcpy(copy_skb->data, skb->data, len);
+ hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE);
+
+ /* Reuse original ring buffer. */
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+ dma_addr);
+
+ skb = copy_skb;
+ }
+
+ /* This card is _fucking_ hot... */
+ skb->csum = ntohs(csum ^ 0xffff);
+ skb->ip_summed = CHECKSUM_HW;
+
+ RXD(("len=%d csum=%4x]", len, csum));
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ hp->net_stats.rx_packets++;
+ hp->net_stats.rx_bytes += len;
+ next:
+ elem = NEXT_RX(elem);
+ this = &rxbase[elem];
+ }
+ hp->rx_new = elem;
+ if (drops)
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
+ RXD((">"));
+}
+
+static irqreturn_t happy_meal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct happy_meal *hp = dev->priv;
+ u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
+
+ HMD(("happy_meal_interrupt: status=%08x ", happy_status));
+
+ spin_lock(&hp->happy_lock);
+
+ if (happy_status & GREG_STAT_ERRORS) {
+ HMD(("ERRORS "));
+ if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
+ goto out;
+ }
+
+ if (happy_status & GREG_STAT_MIFIRQ) {
+ HMD(("MIFIRQ "));
+ happy_meal_mif_interrupt(hp);
+ }
+
+ if (happy_status & GREG_STAT_TXALL) {
+ HMD(("TXALL "));
+ happy_meal_tx(hp);
+ }
+
+ if (happy_status & GREG_STAT_RXTOHOST) {
+ HMD(("RXTOHOST "));
+ happy_meal_rx(hp, dev);
+ }
+
+ HMD(("done\n"));
+out:
+ spin_unlock(&hp->happy_lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SBUS
+static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie, struct pt_regs *ptregs)
+{
+ struct quattro *qp = (struct quattro *) cookie;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct net_device *dev = qp->happy_meals[i];
+ struct happy_meal *hp = dev->priv;
+ u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
+
+ HMD(("quattro_interrupt: status=%08x ", happy_status));
+
+ if (!(happy_status & (GREG_STAT_ERRORS |
+ GREG_STAT_MIFIRQ |
+ GREG_STAT_TXALL |
+ GREG_STAT_RXTOHOST)))
+ continue;
+
+ spin_lock(&hp->happy_lock);
+
+ if (happy_status & GREG_STAT_ERRORS) {
+ HMD(("ERRORS "));
+ if (happy_meal_is_not_so_happy(hp, happy_status))
+ goto next;
+ }
+
+ if (happy_status & GREG_STAT_MIFIRQ) {
+ HMD(("MIFIRQ "));
+ happy_meal_mif_interrupt(hp);
+ }
+
+ if (happy_status & GREG_STAT_TXALL) {
+ HMD(("TXALL "));
+ happy_meal_tx(hp);
+ }
+
+ if (happy_status & GREG_STAT_RXTOHOST) {
+ HMD(("RXTOHOST "));
+ happy_meal_rx(hp, dev);
+ }
+
+ next:
+ spin_unlock(&hp->happy_lock);
+ }
+ HMD(("done\n"));
+
+ return IRQ_HANDLED;
+}
+#endif
+
+static int happy_meal_open(struct net_device *dev)
+{
+ struct happy_meal *hp = dev->priv;
+ int res;
+
+ HMD(("happy_meal_open: "));
+
+ /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
+ * into a single source which we register handling at probe time.
+ */
+ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
+ if (request_irq(dev->irq, &happy_meal_interrupt,
+ SA_SHIRQ, dev->name, (void *)dev)) {
+ HMD(("EAGAIN\n"));
+#ifdef __sparc__
+ printk(KERN_ERR "happy_meal(SBUS): Can't order irq %s to go.\n",
+ __irq_itoa(dev->irq));
+#else
+ printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
+ dev->irq);
+#endif
+
+ return -EAGAIN;
+ }
+ }
+
+ HMD(("to happy_meal_init\n"));
+
+ spin_lock_irq(&hp->happy_lock);
+ res = happy_meal_init(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
+ free_irq(dev->irq, dev);
+ return res;
+}
+
+static int happy_meal_close(struct net_device *dev)
+{
+ struct happy_meal *hp = dev->priv;
+
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_stop(hp, hp->gregs);
+ happy_meal_clean_rings(hp);
+
+ /* If auto-negotiation timer is running, kill it. */
+ del_timer(&hp->happy_timer);
+
+ spin_unlock_irq(&hp->happy_lock);
+
+ /* On Quattro QFE cards, all hme interrupts are concentrated
+ * into a single source which we register handling at probe
+ * time and never unregister.
+ */
+ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+#ifdef SXDEBUG
+#define SXD(x) printk x
+#else
+#define SXD(x)
+#endif
+
+static void happy_meal_tx_timeout(struct net_device *dev)
+{
+ struct happy_meal *hp = dev->priv;
+
+ printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ tx_dump_log();
+ printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
+ hme_read32(hp, hp->gregs + GREG_STAT),
+ hme_read32(hp, hp->etxregs + ETX_CFG),
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
+
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_init(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ netif_wake_queue(dev);
+}
+
+static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct happy_meal *hp = dev->priv;
+ int entry;
+ u32 tx_flags;
+
+ tx_flags = TXFLAG_OWN;
+ if (skb->ip_summed == CHECKSUM_HW) {
+ u32 csum_start_off, csum_stuff_off;
+
+ csum_start_off = (u32) (skb->h.raw - skb->data);
+ csum_stuff_off = (u32) ((skb->h.raw + skb->csum) - skb->data);
+
+ tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
+ ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
+ ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
+ }
+
+ spin_lock_irq(&hp->happy_lock);
+
+ if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&hp->happy_lock);
+ printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+ dev->name);
+ return 1;
+ }
+
+ entry = hp->tx_new;
+ SXD(("SX<l[%d]e[%d]>", len, entry));
+ hp->tx_skbs[entry] = skb;
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ u32 mapping, len;
+
+ len = skb->len;
+ mapping = hme_dma_map(hp, skb->data, len, DMA_TODEVICE);
+ tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
+ hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
+ (tx_flags | (len & TXFLAG_SIZE)),
+ mapping);
+ entry = NEXT_TX(entry);
+ } else {
+ u32 first_len, first_mapping;
+ int frag, first_entry = entry;
+
+ /* We must give this initial chunk to the device last.
+ * Otherwise we could race with the device.
+ */
+ first_len = skb_headlen(skb);
+ first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TODEVICE);
+ entry = NEXT_TX(entry);
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ u32 len, mapping, this_txflags;
+
+ len = this_frag->size;
+ mapping = hme_dma_map(hp,
+ ((void *) page_address(this_frag->page) +
+ this_frag->page_offset),
+ len, DMA_TODEVICE);
+ this_txflags = tx_flags;
+ if (frag == skb_shinfo(skb)->nr_frags - 1)
+ this_txflags |= TXFLAG_EOP;
+ hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
+ (this_txflags | (len & TXFLAG_SIZE)),
+ mapping);
+ entry = NEXT_TX(entry);
+ }
+ hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
+ (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
+ first_mapping);
+ }
+
+ hp->tx_new = entry;
+
+ if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ /* Get it going. */
+ hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
+
+ spin_unlock_irq(&hp->happy_lock);
+
+ dev->trans_start = jiffies;
+
+ tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
+ return 0;
+}
+
+static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
+{
+ struct happy_meal *hp = dev->priv;
+
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_get_counters(hp, hp->bigmacregs);
+ spin_unlock_irq(&hp->happy_lock);
+
+ return &hp->net_stats;
+}
+
+static void happy_meal_set_multicast(struct net_device *dev)
+{
+ struct happy_meal *hp = dev->priv;
+ void __iomem *bregs = hp->bigmacregs;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ spin_lock_irq(&hp->happy_lock);
+
+ netif_stop_queue(dev);
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
+ } else if (dev->flags & IFF_PROMISC) {
+ hme_write32(hp, bregs + BMAC_RXCFG,
+ hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
+ } else {
+ u16 hash_table[4];
+
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
+ hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
+ hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
+ hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
+ }
+
+ netif_wake_queue(dev);
+
+ spin_unlock_irq(&hp->happy_lock);
+}
+
+/* Ethtool support... */
+static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct happy_meal *hp = dev->priv;
+
+ cmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+ /* XXX hardcoded stuff for now */
+ cmd->port = PORT_TP; /* XXX no MII support */
+ cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
+ cmd->phy_address = 0; /* XXX fixed PHYAD */
+
+ /* Record PHY settings. */
+ spin_lock_irq(&hp->happy_lock);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
+ hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
+ spin_unlock_irq(&hp->happy_lock);
+
+ if (hp->sw_bmcr & BMCR_ANENABLE) {
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->speed =
+ (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
+ SPEED_100 : SPEED_10;
+ if (cmd->speed == SPEED_100)
+ cmd->duplex =
+ (hp->sw_lpa & (LPA_100FULL)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ else
+ cmd->duplex =
+ (hp->sw_lpa & (LPA_10FULL)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->speed =
+ (hp->sw_bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10;
+ cmd->duplex =
+ (hp->sw_bmcr & BMCR_FULLDPLX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ return 0;
+}
+
+static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct happy_meal *hp = dev->priv;
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((cmd->speed != SPEED_100 &&
+ cmd->speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Ok, do it to it. */
+ spin_lock_irq(&hp->happy_lock);
+ del_timer(&hp->happy_timer);
+ happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
+ spin_unlock_irq(&hp->happy_lock);
+
+ return 0;
+}
+
+static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct happy_meal *hp = dev->priv;
+
+ strcpy(info->driver, "sunhme");
+ strcpy(info->version, "2.02");
+ if (hp->happy_flags & HFLAG_PCI) {
+ struct pci_dev *pdev = hp->happy_dev;
+ strcpy(info->bus_info, pci_name(pdev));
+ }
+#ifdef CONFIG_SBUS
+ else {
+ struct sbus_dev *sdev = hp->happy_dev;
+ sprintf(info->bus_info, "SBUS:%d",
+ sdev->slot);
+ }
+#endif
+}
+
+static u32 hme_get_link(struct net_device *dev)
+{
+ struct happy_meal *hp = dev->priv;
+
+ spin_lock_irq(&hp->happy_lock);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
+ spin_unlock_irq(&hp->happy_lock);
+
+ return (hp->sw_bmsr & BMSR_LSTATUS);
+}
+
+static struct ethtool_ops hme_ethtool_ops = {
+ .get_settings = hme_get_settings,
+ .set_settings = hme_set_settings,
+ .get_drvinfo = hme_get_drvinfo,
+ .get_link = hme_get_link,
+};
+
+static int hme_version_printed;
+
+#ifdef CONFIG_SBUS
+void __init quattro_get_ranges(struct quattro *qp)
+{
+ struct sbus_dev *sdev = qp->quattro_dev;
+ int err;
+
+ err = prom_getproperty(sdev->prom_node,
+ "ranges",
+ (char *)&qp->ranges[0],
+ sizeof(qp->ranges));
+ if (err == 0 || err == -1) {
+ qp->nranges = 0;
+ return;
+ }
+ qp->nranges = (err / sizeof(struct linux_prom_ranges));
+}
+
+static void __init quattro_apply_ranges(struct quattro *qp, struct happy_meal *hp)
+{
+ struct sbus_dev *sdev = hp->happy_dev;
+ int rng;
+
+ for (rng = 0; rng < qp->nranges; rng++) {
+ struct linux_prom_ranges *rngp = &qp->ranges[rng];
+ int reg;
+
+ for (reg = 0; reg < 5; reg++) {
+ if (sdev->reg_addrs[reg].which_io ==
+ rngp->ot_child_space)
+ break;
+ }
+ if (reg == 5)
+ continue;
+
+ sdev->reg_addrs[reg].which_io = rngp->ot_parent_space;
+ sdev->reg_addrs[reg].phys_addr += rngp->ot_parent_base;
+ }
+}
+
+/* Given a happy meal sbus device, find it's quattro parent.
+ * If none exist, allocate and return a new one.
+ *
+ * Return NULL on failure.
+ */
+static struct quattro * __init quattro_sbus_find(struct sbus_dev *goal_sdev)
+{
+ struct sbus_bus *sbus;
+ struct sbus_dev *sdev;
+ struct quattro *qp;
+ int i;
+
+ if (qfe_sbus_list == NULL)
+ goto found;
+
+ for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
+ for (i = 0, sdev = qp->quattro_dev;
+ (sdev != NULL) && (i < 4);
+ sdev = sdev->next, i++) {
+ if (sdev == goal_sdev)
+ return qp;
+ }
+ }
+ for_each_sbus(sbus) {
+ for_each_sbusdev(sdev, sbus) {
+ if (sdev == goal_sdev)
+ goto found;
+ }
+ }
+
+ /* Cannot find quattro parent, fail. */
+ return NULL;
+
+found:
+ qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
+ if (qp != NULL) {
+ int i;
+
+ for (i = 0; i < 4; i++)
+ qp->happy_meals[i] = NULL;
+
+ qp->quattro_dev = goal_sdev;
+ qp->next = qfe_sbus_list;
+ qfe_sbus_list = qp;
+ quattro_get_ranges(qp);
+ }
+ return qp;
+}
+
+/* After all quattro cards have been probed, we call these functions
+ * to register the IRQ handlers.
+ */
+static void __init quattro_sbus_register_irqs(void)
+{
+ struct quattro *qp;
+
+ for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
+ struct sbus_dev *sdev = qp->quattro_dev;
+ int err;
+
+ err = request_irq(sdev->irqs[0],
+ quattro_sbus_interrupt,
+ SA_SHIRQ, "Quattro",
+ qp);
+ if (err != 0) {
+ printk(KERN_ERR "Quattro: Fatal IRQ registery error %d.\n", err);
+ panic("QFE request irq");
+ }
+ }
+}
+#endif /* CONFIG_SBUS */
+
+#ifdef CONFIG_PCI
+static struct quattro * __init quattro_pci_find(struct pci_dev *pdev)
+{
+ struct pci_dev *bdev = pdev->bus->self;
+ struct quattro *qp;
+
+ if (!bdev) return NULL;
+ for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
+ struct pci_dev *qpdev = qp->quattro_dev;
+
+ if (qpdev == bdev)
+ return qp;
+ }
+ qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
+ if (qp != NULL) {
+ int i;
+
+ for (i = 0; i < 4; i++)
+ qp->happy_meals[i] = NULL;
+
+ qp->quattro_dev = bdev;
+ qp->next = qfe_pci_list;
+ qfe_pci_list = qp;
+
+ /* No range tricks necessary on PCI. */
+ qp->nranges = 0;
+ }
+ return qp;
+}
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_SBUS
+static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe)
+{
+ struct quattro *qp = NULL;
+ struct happy_meal *hp;
+ struct net_device *dev;
+ int i, qfe_slot = -1;
+ int err = -ENODEV;
+
+ if (is_qfe) {
+ qp = quattro_sbus_find(sdev);
+ if (qp == NULL)
+ goto err_out;
+ for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
+ if (qp->happy_meals[qfe_slot] == NULL)
+ break;
+ if (qfe_slot == 4)
+ goto err_out;
+ }
+
+ err = -ENOMEM;
+ dev = alloc_etherdev(sizeof(struct happy_meal));
+ if (!dev)
+ goto err_out;
+ SET_MODULE_OWNER(dev);
+
+ if (hme_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ /* If user did not specify a MAC address specifically, use
+ * the Quattro local-mac-address property...
+ */
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i < 6) { /* a mac address was given */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+ macaddr[5]++;
+ } else if (qfe_slot != -1 &&
+ prom_getproplen(sdev->prom_node,
+ "local-mac-address") == 6) {
+ prom_getproperty(sdev->prom_node, "local-mac-address",
+ dev->dev_addr, 6);
+ } else {
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ }
+
+ hp = dev->priv;
+
+ hp->happy_dev = sdev;
+
+ spin_lock_init(&hp->happy_lock);
+
+ err = -ENODEV;
+ if (sdev->num_registers != 5) {
+ printk(KERN_ERR "happymeal: Device does not have 5 regs, it has %d.\n",
+ sdev->num_registers);
+ printk(KERN_ERR "happymeal: Would you like that for here or to go?\n");
+ goto err_out_free_netdev;
+ }
+
+ if (qp != NULL) {
+ hp->qfe_parent = qp;
+ hp->qfe_ent = qfe_slot;
+ qp->happy_meals[qfe_slot] = dev;
+ quattro_apply_ranges(qp, hp);
+ }
+
+ hp->gregs = sbus_ioremap(&sdev->resource[0], 0,
+ GREG_REG_SIZE, "HME Global Regs");
+ if (!hp->gregs) {
+ printk(KERN_ERR "happymeal: Cannot map Happy Meal global registers.\n");
+ goto err_out_free_netdev;
+ }
+
+ hp->etxregs = sbus_ioremap(&sdev->resource[1], 0,
+ ETX_REG_SIZE, "HME TX Regs");
+ if (!hp->etxregs) {
+ printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Transmit registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->erxregs = sbus_ioremap(&sdev->resource[2], 0,
+ ERX_REG_SIZE, "HME RX Regs");
+ if (!hp->erxregs) {
+ printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Receive registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->bigmacregs = sbus_ioremap(&sdev->resource[3], 0,
+ BMAC_REG_SIZE, "HME BIGMAC Regs");
+ if (!hp->bigmacregs) {
+ printk(KERN_ERR "happymeal: Cannot map Happy Meal BIGMAC registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->tcvregs = sbus_ioremap(&sdev->resource[4], 0,
+ TCVR_REG_SIZE, "HME Tranceiver Regs");
+ if (!hp->tcvregs) {
+ printk(KERN_ERR "happymeal: Cannot map Happy Meal Tranceiver registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->hm_revision = prom_getintdefault(sdev->prom_node, "hm-rev", 0xff);
+ if (hp->hm_revision == 0xff)
+ hp->hm_revision = 0xa0;
+
+ /* Now enable the feature flags we can. */
+ if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
+ hp->happy_flags = HFLAG_20_21;
+ else if (hp->hm_revision != 0xa0)
+ hp->happy_flags = HFLAG_NOT_A0;
+
+ if (qp != NULL)
+ hp->happy_flags |= HFLAG_QUATTRO;
+
+ /* Get the supported DVMA burst sizes from our Happy SBUS. */
+ hp->happy_bursts = prom_getintdefault(sdev->bus->prom_node,
+ "burst-sizes", 0x00);
+
+ hp->happy_block = sbus_alloc_consistent(hp->happy_dev,
+ PAGE_SIZE,
+ &hp->hblock_dvma);
+ err = -ENOMEM;
+ if (!hp->happy_block) {
+ printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
+ goto err_out_iounmap;
+ }
+
+ /* Force check of the link first time we are brought up. */
+ hp->linkcheck = 0;
+
+ /* Force timer state to 'asleep' with count of zero. */
+ hp->timer_state = asleep;
+ hp->timer_ticks = 0;
+
+ init_timer(&hp->happy_timer);
+
+ hp->dev = dev;
+ dev->open = &happy_meal_open;
+ dev->stop = &happy_meal_close;
+ dev->hard_start_xmit = &happy_meal_start_xmit;
+ dev->get_stats = &happy_meal_get_stats;
+ dev->set_multicast_list = &happy_meal_set_multicast;
+ dev->tx_timeout = &happy_meal_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+ dev->ethtool_ops = &hme_ethtool_ops;
+
+ /* Happy Meal can do it all... except VLAN. */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_VLAN_CHALLENGED;
+
+ dev->irq = sdev->irqs[0];
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ /* Hook up PCI register/dma accessors. */
+ hp->read_desc32 = sbus_hme_read_desc32;
+ hp->write_txd = sbus_hme_write_txd;
+ hp->write_rxd = sbus_hme_write_rxd;
+ hp->dma_map = (u32 (*)(void *, void *, long, int))sbus_map_single;
+ hp->dma_unmap = (void (*)(void *, u32, long, int))sbus_unmap_single;
+ hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int))
+ sbus_dma_sync_single_for_cpu;
+ hp->dma_sync_for_device = (void (*)(void *, u32, long, int))
+ sbus_dma_sync_single_for_device;
+ hp->read32 = sbus_hme_read32;
+ hp->write32 = sbus_hme_write32;
+#endif
+
+ /* Grrr, Happy Meal comes up by default not advertising
+ * full duplex 100baseT capabilities, fix this.
+ */
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_set_initial_advertisement(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ if (register_netdev(hp->dev)) {
+ printk(KERN_ERR "happymeal: Cannot register net device, "
+ "aborting.\n");
+ goto err_out_free_consistent;
+ }
+
+ if (qfe_slot != -1)
+ printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
+ dev->name, qfe_slot);
+ else
+ printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
+ dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c",
+ dev->dev_addr[i], i == 5 ? ' ' : ':');
+ printk("\n");
+
+ /* We are home free at this point, link us in to the happy
+ * device list.
+ */
+ hp->next_module = root_happy_dev;
+ root_happy_dev = hp;
+
+ return 0;
+
+err_out_free_consistent:
+ sbus_free_consistent(hp->happy_dev,
+ PAGE_SIZE,
+ hp->happy_block,
+ hp->hblock_dvma);
+
+err_out_iounmap:
+ if (hp->gregs)
+ sbus_iounmap(hp->gregs, GREG_REG_SIZE);
+ if (hp->etxregs)
+ sbus_iounmap(hp->etxregs, ETX_REG_SIZE);
+ if (hp->erxregs)
+ sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
+ if (hp->bigmacregs)
+ sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
+ if (hp->tcvregs)
+ sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
+
+err_out_free_netdev:
+ free_netdev(dev);
+
+err_out:
+ return err;
+}
+#endif
+
+#ifdef CONFIG_PCI
+#ifndef __sparc__
+static int is_quattro_p(struct pci_dev *pdev)
+{
+ struct pci_dev *busdev = pdev->bus->self;
+ struct list_head *tmp;
+ int n_hmes;
+
+ if (busdev == NULL ||
+ busdev->vendor != PCI_VENDOR_ID_DEC ||
+ busdev->device != PCI_DEVICE_ID_DEC_21153)
+ return 0;
+
+ n_hmes = 0;
+ tmp = pdev->bus->devices.next;
+ while (tmp != &pdev->bus->devices) {
+ struct pci_dev *this_pdev = pci_dev_b(tmp);
+
+ if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
+ this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
+ n_hmes++;
+
+ tmp = tmp->next;
+ }
+
+ if (n_hmes != 4)
+ return 0;
+
+ return 1;
+}
+
+/* Fetch MAC address from vital product data of PCI ROM. */
+static void find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
+{
+ int this_offset;
+
+ for (this_offset = 0x20; this_offset < len; this_offset++) {
+ void __iomem *p = rom_base + this_offset;
+
+ if (readb(p + 0) != 0x90 ||
+ readb(p + 1) != 0x00 ||
+ readb(p + 2) != 0x09 ||
+ readb(p + 3) != 0x4e ||
+ readb(p + 4) != 0x41 ||
+ readb(p + 5) != 0x06)
+ continue;
+
+ this_offset += 6;
+ p += 6;
+
+ if (index == 0) {
+ int i;
+
+ for (i = 0; i < 6; i++)
+ dev_addr[i] = readb(p + i);
+ break;
+ }
+ index--;
+ }
+}
+
+static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
+{
+ u32 rom_reg_orig;
+ void __iomem *p;
+ int index;
+
+ index = 0;
+ if (is_quattro_p(pdev))
+ index = PCI_SLOT(pdev->devfn);
+
+ if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) {
+ if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0)
+ goto use_random;
+ }
+
+ pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_reg_orig);
+ pci_write_config_dword(pdev, pdev->rom_base_reg,
+ rom_reg_orig | PCI_ROM_ADDRESS_ENABLE);
+
+ p = ioremap(pci_resource_start(pdev, PCI_ROM_RESOURCE), (64 * 1024));
+ if (p != NULL && readb(p) == 0x55 && readb(p + 1) == 0xaa)
+ find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
+
+ if (p != NULL)
+ iounmap(p);
+
+ pci_write_config_dword(pdev, pdev->rom_base_reg, rom_reg_orig);
+ return;
+
+use_random:
+ /* Sun MAC prefix then 3 random bytes. */
+ dev_addr[0] = 0x08;
+ dev_addr[1] = 0x00;
+ dev_addr[2] = 0x20;
+ get_random_bytes(&dev_addr[3], 3);
+ return;
+}
+#endif /* !(__sparc__) */
+
+static int __init happy_meal_pci_init(struct pci_dev *pdev)
+{
+ struct quattro *qp = NULL;
+#ifdef __sparc__
+ struct pcidev_cookie *pcp;
+ int node;
+#endif
+ struct happy_meal *hp;
+ struct net_device *dev;
+ void __iomem *hpreg_base;
+ unsigned long hpreg_res;
+ int i, qfe_slot = -1;
+ char prom_name[64];
+ int err;
+
+ /* Now make sure pci_dev cookie is there. */
+#ifdef __sparc__
+ pcp = pdev->sysdata;
+ if (pcp == NULL || pcp->prom_node == -1) {
+ printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n");
+ return -ENODEV;
+ }
+ node = pcp->prom_node;
+
+ prom_getstring(node, "name", prom_name, sizeof(prom_name));
+#else
+ if (is_quattro_p(pdev))
+ strcpy(prom_name, "SUNW,qfe");
+ else
+ strcpy(prom_name, "SUNW,hme");
+#endif
+
+ err = -ENODEV;
+ if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
+ qp = quattro_pci_find(pdev);
+ if (qp == NULL)
+ goto err_out;
+ for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
+ if (qp->happy_meals[qfe_slot] == NULL)
+ break;
+ if (qfe_slot == 4)
+ goto err_out;
+ }
+
+ dev = alloc_etherdev(sizeof(struct happy_meal));
+ err = -ENOMEM;
+ if (!dev)
+ goto err_out;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (hme_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev->base_addr = (long) pdev;
+
+ hp = (struct happy_meal *)dev->priv;
+ memset(hp, 0, sizeof(*hp));
+
+ hp->happy_dev = pdev;
+
+ spin_lock_init(&hp->happy_lock);
+
+ if (qp != NULL) {
+ hp->qfe_parent = qp;
+ hp->qfe_ent = qfe_slot;
+ qp->happy_meals[qfe_slot] = dev;
+ }
+
+ hpreg_res = pci_resource_start(pdev, 0);
+ err = -ENODEV;
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
+ printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
+ goto err_out_clear_quattro;
+ }
+ if (pci_request_regions(pdev, DRV_NAME)) {
+ printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
+ "aborting.\n");
+ goto err_out_clear_quattro;
+ }
+
+ if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == 0) {
+ printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
+ goto err_out_free_res;
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i < 6) { /* a mac address was given */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+ macaddr[5]++;
+ } else {
+#ifdef __sparc__
+ if (qfe_slot != -1 &&
+ prom_getproplen(node, "local-mac-address") == 6) {
+ prom_getproperty(node, "local-mac-address",
+ dev->dev_addr, 6);
+ } else {
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ }
+#else
+ get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
+#endif
+ }
+
+ /* Layout registers. */
+ hp->gregs = (hpreg_base + 0x0000UL);
+ hp->etxregs = (hpreg_base + 0x2000UL);
+ hp->erxregs = (hpreg_base + 0x4000UL);
+ hp->bigmacregs = (hpreg_base + 0x6000UL);
+ hp->tcvregs = (hpreg_base + 0x7000UL);
+
+#ifdef __sparc__
+ hp->hm_revision = prom_getintdefault(node, "hm-rev", 0xff);
+ if (hp->hm_revision == 0xff) {
+ unsigned char prev;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &prev);
+ hp->hm_revision = 0xc0 | (prev & 0x0f);
+ }
+#else
+ /* works with this on non-sparc hosts */
+ hp->hm_revision = 0x20;
+#endif
+
+ /* Now enable the feature flags we can. */
+ if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
+ hp->happy_flags = HFLAG_20_21;
+ else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
+ hp->happy_flags = HFLAG_NOT_A0;
+
+ if (qp != NULL)
+ hp->happy_flags |= HFLAG_QUATTRO;
+
+ /* And of course, indicate this is PCI. */
+ hp->happy_flags |= HFLAG_PCI;
+
+#ifdef __sparc__
+ /* Assume PCI happy meals can handle all burst sizes. */
+ hp->happy_bursts = DMA_BURSTBITS;
+#endif
+
+ hp->happy_block = (struct hmeal_init_block *)
+ pci_alloc_consistent(pdev, PAGE_SIZE, &hp->hblock_dvma);
+
+ err = -ENODEV;
+ if (!hp->happy_block) {
+ printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->linkcheck = 0;
+ hp->timer_state = asleep;
+ hp->timer_ticks = 0;
+
+ init_timer(&hp->happy_timer);
+
+ hp->dev = dev;
+ dev->open = &happy_meal_open;
+ dev->stop = &happy_meal_close;
+ dev->hard_start_xmit = &happy_meal_start_xmit;
+ dev->get_stats = &happy_meal_get_stats;
+ dev->set_multicast_list = &happy_meal_set_multicast;
+ dev->tx_timeout = &happy_meal_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+ dev->ethtool_ops = &hme_ethtool_ops;
+ dev->irq = pdev->irq;
+ dev->dma = 0;
+
+ /* Happy Meal can do it all... */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ /* Hook up PCI register/dma accessors. */
+ hp->read_desc32 = pci_hme_read_desc32;
+ hp->write_txd = pci_hme_write_txd;
+ hp->write_rxd = pci_hme_write_rxd;
+ hp->dma_map = (u32 (*)(void *, void *, long, int))pci_map_single;
+ hp->dma_unmap = (void (*)(void *, u32, long, int))pci_unmap_single;
+ hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int))
+ pci_dma_sync_single_for_cpu;
+ hp->dma_sync_for_device = (void (*)(void *, u32, long, int))
+ pci_dma_sync_single_for_device;
+ hp->read32 = pci_hme_read32;
+ hp->write32 = pci_hme_write32;
+#endif
+
+ /* Grrr, Happy Meal comes up by default not advertising
+ * full duplex 100baseT capabilities, fix this.
+ */
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_set_initial_advertisement(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ if (register_netdev(hp->dev)) {
+ printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ if (!qfe_slot) {
+ struct pci_dev *qpdev = qp->quattro_dev;
+
+ prom_name[0] = 0;
+ if (!strncmp(dev->name, "eth", 3)) {
+ int i = simple_strtoul(dev->name + 3, NULL, 10);
+ sprintf(prom_name, "-%d", i + 3);
+ }
+ printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
+ if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
+ qpdev->device == PCI_DEVICE_ID_DEC_21153)
+ printk("DEC 21153 PCI Bridge\n");
+ else
+ printk("unknown bridge %04x.%04x\n",
+ qpdev->vendor, qpdev->device);
+ }
+
+ if (qfe_slot != -1)
+ printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
+ dev->name, qfe_slot);
+ else
+ printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
+ dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ' ' : ':');
+
+ printk("\n");
+
+ /* We are home free at this point, link us in to the happy
+ * device list.
+ */
+ hp->next_module = root_happy_dev;
+ root_happy_dev = hp;
+
+ return 0;
+
+err_out_iounmap:
+ iounmap(hp->gregs);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_clear_quattro:
+ if (qp != NULL)
+ qp->happy_meals[qfe_slot] = NULL;
+
+ free_netdev(dev);
+
+err_out:
+ return err;
+}
+#endif
+
+#ifdef CONFIG_SBUS
+static int __init happy_meal_sbus_probe(void)
+{
+ struct sbus_bus *sbus;
+ struct sbus_dev *sdev;
+ int cards = 0;
+ char model[128];
+
+ for_each_sbus(sbus) {
+ for_each_sbusdev(sdev, sbus) {
+ char *name = sdev->prom_name;
+
+ if (!strcmp(name, "SUNW,hme")) {
+ cards++;
+ prom_getstring(sdev->prom_node, "model",
+ model, sizeof(model));
+ if (!strcmp(model, "SUNW,sbus-qfe"))
+ happy_meal_sbus_init(sdev, 1);
+ else
+ happy_meal_sbus_init(sdev, 0);
+ } else if (!strcmp(name, "qfe") ||
+ !strcmp(name, "SUNW,qfe")) {
+ cards++;
+ happy_meal_sbus_init(sdev, 1);
+ }
+ }
+ }
+ if (cards != 0)
+ quattro_sbus_register_irqs();
+ return cards;
+}
+#endif
+
+#ifdef CONFIG_PCI
+static int __init happy_meal_pci_probe(void)
+{
+ struct pci_dev *pdev = NULL;
+ int cards = 0;
+
+ while ((pdev = pci_find_device(PCI_VENDOR_ID_SUN,
+ PCI_DEVICE_ID_SUN_HAPPYMEAL, pdev)) != NULL) {
+ if (pci_enable_device(pdev))
+ continue;
+ pci_set_master(pdev);
+ cards++;
+ happy_meal_pci_init(pdev);
+ }
+ return cards;
+}
+#endif
+
+static int __init happy_meal_probe(void)
+{
+ static int called = 0;
+ int cards;
+
+ root_happy_dev = NULL;
+
+ if (called)
+ return -ENODEV;
+ called++;
+
+ cards = 0;
+#ifdef CONFIG_SBUS
+ cards += happy_meal_sbus_probe();
+#endif
+#ifdef CONFIG_PCI
+ cards += happy_meal_pci_probe();
+#endif
+ if (!cards)
+ return -ENODEV;
+ return 0;
+}
+
+
+static void __exit happy_meal_cleanup_module(void)
+{
+#ifdef CONFIG_SBUS
+ struct quattro *last_seen_qfe = NULL;
+#endif
+
+ while (root_happy_dev) {
+ struct happy_meal *hp = root_happy_dev;
+ struct happy_meal *next = root_happy_dev->next_module;
+ struct net_device *dev = hp->dev;
+
+ /* Unregister netdev before unmapping registers as this
+ * call can end up trying to access those registers.
+ */
+ unregister_netdev(dev);
+
+#ifdef CONFIG_SBUS
+ if (!(hp->happy_flags & HFLAG_PCI)) {
+ if (hp->happy_flags & HFLAG_QUATTRO) {
+ if (hp->qfe_parent != last_seen_qfe) {
+ free_irq(dev->irq, hp->qfe_parent);
+ last_seen_qfe = hp->qfe_parent;
+ }
+ }
+
+ sbus_iounmap(hp->gregs, GREG_REG_SIZE);
+ sbus_iounmap(hp->etxregs, ETX_REG_SIZE);
+ sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
+ sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
+ sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
+ sbus_free_consistent(hp->happy_dev,
+ PAGE_SIZE,
+ hp->happy_block,
+ hp->hblock_dvma);
+ }
+#endif
+#ifdef CONFIG_PCI
+ if ((hp->happy_flags & HFLAG_PCI)) {
+ pci_free_consistent(hp->happy_dev,
+ PAGE_SIZE,
+ hp->happy_block,
+ hp->hblock_dvma);
+ iounmap(hp->gregs);
+ pci_release_regions(hp->happy_dev);
+ }
+#endif
+ free_netdev(dev);
+
+ root_happy_dev = next;
+ }
+
+ /* Now cleanup the quattro lists. */
+#ifdef CONFIG_SBUS
+ while (qfe_sbus_list) {
+ struct quattro *qfe = qfe_sbus_list;
+ struct quattro *next = qfe->next;
+
+ kfree(qfe);
+
+ qfe_sbus_list = next;
+ }
+#endif
+#ifdef CONFIG_PCI
+ while (qfe_pci_list) {
+ struct quattro *qfe = qfe_pci_list;
+ struct quattro *next = qfe->next;
+
+ kfree(qfe);
+
+ qfe_pci_list = next;
+ }
+#endif
+}
+
+module_init(happy_meal_probe);
+module_exit(happy_meal_cleanup_module);
diff --git a/drivers/net/sunhme.h b/drivers/net/sunhme.h
new file mode 100644
index 000000000000..34e9f953cea4
--- /dev/null
+++ b/drivers/net/sunhme.h
@@ -0,0 +1,515 @@
+/* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $
+ * sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver.
+ * Also known as the "Happy Meal".
+ *
+ * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SUNHME_H
+#define _SUNHME_H
+
+#include <linux/config.h>
+#include <linux/pci.h>
+
+/* Happy Meal global registers. */
+#define GREG_SWRESET 0x000UL /* Software Reset */
+#define GREG_CFG 0x004UL /* Config Register */
+#define GREG_STAT 0x108UL /* Status */
+#define GREG_IMASK 0x10cUL /* Interrupt Mask */
+#define GREG_REG_SIZE 0x110UL
+
+/* Global reset register. */
+#define GREG_RESET_ETX 0x01
+#define GREG_RESET_ERX 0x02
+#define GREG_RESET_ALL 0x03
+
+/* Global config register. */
+#define GREG_CFG_BURSTMSK 0x03
+#define GREG_CFG_BURST16 0x00
+#define GREG_CFG_BURST32 0x01
+#define GREG_CFG_BURST64 0x02
+#define GREG_CFG_64BIT 0x04
+#define GREG_CFG_PARITY 0x08
+#define GREG_CFG_RESV 0x10
+
+/* Global status register. */
+#define GREG_STAT_GOTFRAME 0x00000001 /* Received a frame */
+#define GREG_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define GREG_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define GREG_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define GREG_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define GREG_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define GREG_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define GREG_STAT_STSTERR 0x00000080 /* Test error in XIF for SQE */
+#define GREG_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define GREG_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define GREG_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define GREG_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define GREG_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define GREG_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define GREG_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define GREG_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */
+#define GREG_STAT_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */
+#define GREG_STAT_NORXD 0x00020000 /* No more receive descriptors */
+#define GREG_STAT_RXERR 0x00040000 /* Error during receive dma */
+#define GREG_STAT_RXLATERR 0x00080000 /* Late error during receive dma */
+#define GREG_STAT_RXPERR 0x00100000 /* Parity error during receive dma */
+#define GREG_STAT_RXTERR 0x00200000 /* Tag error during receive dma */
+#define GREG_STAT_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */
+#define GREG_STAT_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */
+#define GREG_STAT_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */
+#define GREG_STAT_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */
+#define GREG_STAT_TXEACK 0x04000000 /* Error during transmit dma */
+#define GREG_STAT_TXLERR 0x08000000 /* Late error during transmit dma */
+#define GREG_STAT_TXPERR 0x10000000 /* Parity error during transmit dma */
+#define GREG_STAT_TXTERR 0x20000000 /* Tag error during transmit dma */
+#define GREG_STAT_SLVERR 0x40000000 /* PIO access got an error */
+#define GREG_STAT_SLVPERR 0x80000000 /* PIO access got a parity error */
+
+/* All interesting error conditions. */
+#define GREG_STAT_ERRORS 0xfc7efefc
+
+/* Global interrupt mask register. */
+#define GREG_IMASK_GOTFRAME 0x00000001 /* Received a frame */
+#define GREG_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define GREG_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define GREG_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define GREG_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define GREG_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define GREG_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define GREG_IMASK_STSTERR 0x00000080 /* Test error in XIF for SQE */
+#define GREG_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define GREG_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define GREG_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define GREG_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define GREG_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define GREG_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define GREG_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define GREG_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */
+#define GREG_IMASK_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */
+#define GREG_IMASK_NORXD 0x00020000 /* No more receive descriptors */
+#define GREG_IMASK_RXERR 0x00040000 /* Error during receive dma */
+#define GREG_IMASK_RXLATERR 0x00080000 /* Late error during receive dma */
+#define GREG_IMASK_RXPERR 0x00100000 /* Parity error during receive dma */
+#define GREG_IMASK_RXTERR 0x00200000 /* Tag error during receive dma */
+#define GREG_IMASK_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */
+#define GREG_IMASK_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */
+#define GREG_IMASK_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */
+#define GREG_IMASK_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */
+#define GREG_IMASK_TXEACK 0x04000000 /* Error during transmit dma */
+#define GREG_IMASK_TXLERR 0x08000000 /* Late error during transmit dma */
+#define GREG_IMASK_TXPERR 0x10000000 /* Parity error during transmit dma */
+#define GREG_IMASK_TXTERR 0x20000000 /* Tag error during transmit dma */
+#define GREG_IMASK_SLVERR 0x40000000 /* PIO access got an error */
+#define GREG_IMASK_SLVPERR 0x80000000 /* PIO access got a parity error */
+
+/* Happy Meal external transmitter registers. */
+#define ETX_PENDING 0x00UL /* Transmit pending/wakeup register */
+#define ETX_CFG 0x04UL /* Transmit config register */
+#define ETX_RING 0x08UL /* Transmit ring pointer */
+#define ETX_BBASE 0x0cUL /* Transmit buffer base */
+#define ETX_BDISP 0x10UL /* Transmit buffer displacement */
+#define ETX_FIFOWPTR 0x14UL /* FIFO write ptr */
+#define ETX_FIFOSWPTR 0x18UL /* FIFO write ptr (shadow register) */
+#define ETX_FIFORPTR 0x1cUL /* FIFO read ptr */
+#define ETX_FIFOSRPTR 0x20UL /* FIFO read ptr (shadow register) */
+#define ETX_FIFOPCNT 0x24UL /* FIFO packet counter */
+#define ETX_SMACHINE 0x28UL /* Transmitter state machine */
+#define ETX_RSIZE 0x2cUL /* Ring descriptor size */
+#define ETX_BPTR 0x30UL /* Transmit data buffer ptr */
+#define ETX_REG_SIZE 0x34UL
+
+/* ETX transmit pending register. */
+#define ETX_TP_DMAWAKEUP 0x00000001 /* Restart transmit dma */
+
+/* ETX config register. */
+#define ETX_CFG_DMAENABLE 0x00000001 /* Enable transmit dma */
+#define ETX_CFG_FIFOTHRESH 0x000003fe /* Transmit FIFO threshold */
+#define ETX_CFG_IRQDAFTER 0x00000400 /* Interrupt after TX-FIFO drained */
+#define ETX_CFG_IRQDBEFORE 0x00000000 /* Interrupt before TX-FIFO drained */
+
+#define ETX_RSIZE_SHIFT 4
+
+/* Happy Meal external receiver registers. */
+#define ERX_CFG 0x00UL /* Receiver config register */
+#define ERX_RING 0x04UL /* Receiver ring ptr */
+#define ERX_BPTR 0x08UL /* Receiver buffer ptr */
+#define ERX_FIFOWPTR 0x0cUL /* FIFO write ptr */
+#define ERX_FIFOSWPTR 0x10UL /* FIFO write ptr (shadow register) */
+#define ERX_FIFORPTR 0x14UL /* FIFO read ptr */
+#define ERX_FIFOSRPTR 0x18UL /* FIFO read ptr (shadow register) */
+#define ERX_SMACHINE 0x1cUL /* Receiver state machine */
+#define ERX_REG_SIZE 0x20UL
+
+/* ERX config register. */
+#define ERX_CFG_DMAENABLE 0x00000001 /* Enable receive DMA */
+#define ERX_CFG_RESV1 0x00000006 /* Unused... */
+#define ERX_CFG_BYTEOFFSET 0x00000038 /* Receive first byte offset */
+#define ERX_CFG_RESV2 0x000001c0 /* Unused... */
+#define ERX_CFG_SIZE32 0x00000000 /* Receive ring size == 32 */
+#define ERX_CFG_SIZE64 0x00000200 /* Receive ring size == 64 */
+#define ERX_CFG_SIZE128 0x00000400 /* Receive ring size == 128 */
+#define ERX_CFG_SIZE256 0x00000600 /* Receive ring size == 256 */
+#define ERX_CFG_RESV3 0x0000f800 /* Unused... */
+#define ERX_CFG_CSUMSTART 0x007f0000 /* Offset of checksum start,
+ * in halfwords. */
+
+/* I'd like a Big Mac, small fries, small coke, and SparcLinux please. */
+#define BMAC_XIFCFG 0x0000UL /* XIF config register */
+ /* 0x4-->0x204, reserved */
+#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */
+#define BMAC_TXCFG 0x20cUL /* Transmitter config register */
+#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */
+#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */
+#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */
+#define BMAC_STIME 0x21cUL /* Transmit slot time */
+#define BMAC_PLEN 0x220UL /* Size of transmit preamble */
+#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */
+#define BMAC_TXSDELIM 0x228UL /* Transmit delimiter */
+#define BMAC_JSIZE 0x22cUL /* Jam size */
+#define BMAC_TXMAX 0x230UL /* Transmit max pkt size */
+#define BMAC_TXMIN 0x234UL /* Transmit min pkt size */
+#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */
+#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */
+#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */
+#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */
+#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */
+#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */
+#define BMAC_RSEED 0x250UL /* Transmit random number seed */
+#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */
+ /* 0x258-->0x304, reserved */
+#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */
+#define BMAC_RXCFG 0x30cUL /* Receiver config register */
+#define BMAC_RXMAX 0x310UL /* Receive max pkt size */
+#define BMAC_RXMIN 0x314UL /* Receive min pkt size */
+#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */
+#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */
+#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */
+#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */
+#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */
+#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */
+#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */
+#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */
+#define BMAC_RXCVALID 0x338UL /* Receiver code violation */
+ /* 0x33c, reserved */
+#define BMAC_HTABLE3 0x340UL /* Hash table 3 */
+#define BMAC_HTABLE2 0x344UL /* Hash table 2 */
+#define BMAC_HTABLE1 0x348UL /* Hash table 1 */
+#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */
+#define BMAC_AFILTER2 0x350UL /* Address filter 2 */
+#define BMAC_AFILTER1 0x354UL /* Address filter 1 */
+#define BMAC_AFILTER0 0x358UL /* Address filter 0 */
+#define BMAC_AFMASK 0x35cUL /* Address filter mask */
+#define BMAC_REG_SIZE 0x360UL
+
+/* BigMac XIF config register. */
+#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */
+#define BIGMAC_XCFG_XLBACK 0x00000002 /* Loopback-mode XIF enable */
+#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */
+#define BIGMAC_XCFG_MIIDISAB 0x00000008 /* MII receive buffer disable */
+#define BIGMAC_XCFG_SQENABLE 0x00000010 /* SQE test enable */
+#define BIGMAC_XCFG_SQETWIN 0x000003e0 /* SQE time window */
+#define BIGMAC_XCFG_LANCE 0x00000010 /* Lance mode enable */
+#define BIGMAC_XCFG_LIPG0 0x000003e0 /* Lance mode IPG0 */
+
+/* BigMac transmit config register. */
+#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */
+#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */
+#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */
+#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */
+#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */
+#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */
+#define BIGMAC_TXCFG_DGIVEUP 0x00000400 /* Don't give up on transmits */
+
+/* BigMac receive config register. */
+#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
+#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
+#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscous mode */
+#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
+#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
+#define BIGMAC_RXCFG_REJME 0x00000200 /* Reject packets addressed to me */
+#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */
+#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */
+#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */
+
+/* These are the "Management Interface" (ie. MIF) registers of the transceiver. */
+#define TCVR_BBCLOCK 0x00UL /* Bit bang clock register */
+#define TCVR_BBDATA 0x04UL /* Bit bang data register */
+#define TCVR_BBOENAB 0x08UL /* Bit bang output enable */
+#define TCVR_FRAME 0x0cUL /* Frame control/data register */
+#define TCVR_CFG 0x10UL /* MIF config register */
+#define TCVR_IMASK 0x14UL /* MIF interrupt mask */
+#define TCVR_STATUS 0x18UL /* MIF status */
+#define TCVR_SMACHINE 0x1cUL /* MIF state machine */
+#define TCVR_REG_SIZE 0x20UL
+
+/* Frame commands. */
+#define FRAME_WRITE 0x50020000
+#define FRAME_READ 0x60020000
+
+/* Transceiver config register */
+#define TCV_CFG_PSELECT 0x00000001 /* Select PHY */
+#define TCV_CFG_PENABLE 0x00000002 /* Enable MIF polling */
+#define TCV_CFG_BENABLE 0x00000004 /* Enable the "bit banger" oh baby */
+#define TCV_CFG_PREGADDR 0x000000f8 /* Address of poll register */
+#define TCV_CFG_MDIO0 0x00000100 /* MDIO zero, data/attached */
+#define TCV_CFG_MDIO1 0x00000200 /* MDIO one, data/attached */
+#define TCV_CFG_PDADDR 0x00007c00 /* Device PHY address polling */
+
+/* Here are some PHY addresses. */
+#define TCV_PADDR_ETX 0 /* Internal transceiver */
+#define TCV_PADDR_ITX 1 /* External transceiver */
+
+/* Transceiver status register */
+#define TCV_STAT_BASIC 0xffff0000 /* The "basic" part */
+#define TCV_STAT_NORMAL 0x0000ffff /* The "non-basic" part */
+
+/* Inside the Happy Meal transceiver is the physical layer, they use an
+ * implementations for National Semiconductor, part number DP83840VCE.
+ * You can retrieve the data sheets and programming docs for this beast
+ * from http://www.national.com/
+ *
+ * The DP83840 is capable of both 10 and 100Mbps ethernet, in both
+ * half and full duplex mode. It also supports auto negotiation.
+ *
+ * But.... THIS THING IS A PAIN IN THE ASS TO PROGRAM!
+ * Debugging eeprom burnt code is more fun than programming this chip!
+ */
+
+/* Generic MII registers defined in linux/mii.h, these below
+ * are DP83840 specific.
+ */
+#define DP83840_CSCONFIG 0x17 /* CS configuration */
+
+/* The Carrier Sense config register. */
+#define CSCONFIG_RESV1 0x0001 /* Unused... */
+#define CSCONFIG_LED4 0x0002 /* Pin for full-dplx LED4 */
+#define CSCONFIG_LED1 0x0004 /* Pin for conn-status LED1 */
+#define CSCONFIG_RESV2 0x0008 /* Unused... */
+#define CSCONFIG_TCVDISAB 0x0010 /* Turns off the transceiver */
+#define CSCONFIG_DFBYPASS 0x0020 /* Bypass disconnect function */
+#define CSCONFIG_GLFORCE 0x0040 /* Good link force for 100mbps */
+#define CSCONFIG_CLKTRISTATE 0x0080 /* Tristate 25m clock */
+#define CSCONFIG_RESV3 0x0700 /* Unused... */
+#define CSCONFIG_ENCODE 0x0800 /* 1=MLT-3, 0=binary */
+#define CSCONFIG_RENABLE 0x1000 /* Repeater mode enable */
+#define CSCONFIG_TCDISABLE 0x2000 /* Disable timeout counter */
+#define CSCONFIG_RESV4 0x4000 /* Unused... */
+#define CSCONFIG_NDISABLE 0x8000 /* Disable NRZI */
+
+/* Happy Meal descriptor rings and such.
+ * All descriptor rings must be aligned on a 2K boundary.
+ * All receive buffers must be 64 byte aligned.
+ * Always write the address first before setting the ownership
+ * bits to avoid races with the hardware scanning the ring.
+ */
+struct happy_meal_rxd {
+ u32 rx_flags;
+ u32 rx_addr;
+};
+
+#define RXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */
+#define RXFLAG_OVERFLOW 0x40000000 /* 1 = buffer overflow */
+#define RXFLAG_SIZE 0x3fff0000 /* Size of the buffer */
+#define RXFLAG_CSUM 0x0000ffff /* HW computed checksum */
+
+struct happy_meal_txd {
+ u32 tx_flags;
+ u32 tx_addr;
+};
+
+#define TXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */
+#define TXFLAG_SOP 0x40000000 /* 1 = start of packet */
+#define TXFLAG_EOP 0x20000000 /* 1 = end of packet */
+#define TXFLAG_CSENABLE 0x10000000 /* 1 = enable hw-checksums */
+#define TXFLAG_CSLOCATION 0x0ff00000 /* Where to stick the csum */
+#define TXFLAG_CSBUFBEGIN 0x000fc000 /* Where to begin checksum */
+#define TXFLAG_SIZE 0x00003fff /* Size of the packet */
+
+#define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */
+#define RX_RING_SIZE 32 /* see ERX_CFG_SIZE* for possible values */
+
+#if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
+#error TX_RING_SIZE holds illegal value
+#endif
+
+#define TX_RING_MAXSIZE 256
+#define RX_RING_MAXSIZE 256
+
+/* We use a 14 byte offset for checksum computation. */
+#if (RX_RING_SIZE == 32)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE32|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 64)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE64|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 128)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE128|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 256)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE256|((14/2)<<16))
+#else
+#error RX_RING_SIZE holds illegal value
+#endif
+#endif
+#endif
+#endif
+
+#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
+#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
+#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
+#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(hp) \
+ (((hp)->tx_old <= (hp)->tx_new) ? \
+ (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
+ (hp)->tx_old - (hp)->tx_new - 1)
+
+#define RX_OFFSET 2
+#define RX_BUF_ALLOC_SIZE (1546 + RX_OFFSET + 64)
+
+#define RX_COPY_THRESHOLD 256
+
+struct hmeal_init_block {
+ struct happy_meal_rxd happy_meal_rxd[RX_RING_MAXSIZE];
+ struct happy_meal_txd happy_meal_txd[TX_RING_MAXSIZE];
+};
+
+#define hblock_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct hmeal_init_block *)0)->mem[elem]))))
+
+/* Now software state stuff. */
+enum happy_transceiver {
+ external = 0,
+ internal = 1,
+ none = 2,
+};
+
+/* Timer state engine. */
+enum happy_timer_state {
+ arbwait = 0, /* Waiting for auto negotiation to complete. */
+ lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
+ ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
+ asleep = 3, /* Time inactive. */
+};
+
+struct quattro;
+
+/* Happy happy, joy joy! */
+struct happy_meal {
+ void __iomem *gregs; /* Happy meal global registers */
+ struct hmeal_init_block *happy_block; /* RX and TX descriptors (CPU addr) */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ u32 (*read_desc32)(u32 *);
+ void (*write_txd)(struct happy_meal_txd *, u32, u32);
+ void (*write_rxd)(struct happy_meal_rxd *, u32, u32);
+ u32 (*dma_map)(void *, void *, long, int);
+ void (*dma_unmap)(void *, u32, long, int);
+ void (*dma_sync_for_cpu)(void *, u32, long, int);
+ void (*dma_sync_for_device)(void *, u32, long, int);
+#endif
+
+ /* This is either a sbus_dev or a pci_dev. */
+ void *happy_dev;
+
+ spinlock_t happy_lock;
+
+ struct sk_buff *rx_skbs[RX_RING_SIZE];
+ struct sk_buff *tx_skbs[TX_RING_SIZE];
+
+ int rx_new, tx_new, rx_old, tx_old;
+
+ struct net_device_stats net_stats; /* Statistical counters */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ u32 (*read32)(void __iomem *);
+ void (*write32)(void __iomem *, u32);
+#endif
+
+ void __iomem *etxregs; /* External transmitter regs */
+ void __iomem *erxregs; /* External receiver regs */
+ void __iomem *bigmacregs; /* BIGMAC core regs */
+ void __iomem *tcvregs; /* MIF transceiver regs */
+
+ dma_addr_t hblock_dvma; /* DVMA visible address happy block */
+ unsigned int happy_flags; /* Driver state flags */
+ enum happy_transceiver tcvr_type; /* Kind of transceiver in use */
+ unsigned int happy_bursts; /* Get your mind out of the gutter */
+ unsigned int paddr; /* PHY address for transceiver */
+ unsigned short hm_revision; /* Happy meal revision */
+ unsigned short sw_bmcr; /* SW copy of BMCR */
+ unsigned short sw_bmsr; /* SW copy of BMSR */
+ unsigned short sw_physid1; /* SW copy of PHYSID1 */
+ unsigned short sw_physid2; /* SW copy of PHYSID2 */
+ unsigned short sw_advertise; /* SW copy of ADVERTISE */
+ unsigned short sw_lpa; /* SW copy of LPA */
+ unsigned short sw_expansion; /* SW copy of EXPANSION */
+ unsigned short sw_csconfig; /* SW copy of CSCONFIG */
+ unsigned int auto_speed; /* Auto-nego link speed */
+ unsigned int forced_speed; /* Force mode link speed */
+ unsigned int poll_data; /* MIF poll data */
+ unsigned int poll_flag; /* MIF poll flag */
+ unsigned int linkcheck; /* Have we checked the link yet? */
+ unsigned int lnkup; /* Is the link up as far as we know? */
+ unsigned int lnkdown; /* Trying to force the link down? */
+ unsigned int lnkcnt; /* Counter for link-up attempts. */
+ struct timer_list happy_timer; /* To watch the link when coming up. */
+ enum happy_timer_state timer_state; /* State of the auto-neg timer. */
+ unsigned int timer_ticks; /* Number of clicks at each state. */
+
+ struct net_device *dev; /* Backpointer */
+ struct quattro *qfe_parent; /* For Quattro cards */
+ int qfe_ent; /* Which instance on quattro */
+ struct happy_meal *next_module;
+};
+
+/* Here are the happy flags. */
+#define HFLAG_POLL 0x00000001 /* We are doing MIF polling */
+#define HFLAG_FENABLE 0x00000002 /* The MII frame is enabled */
+#define HFLAG_LANCE 0x00000004 /* We are using lance-mode */
+#define HFLAG_RXENABLE 0x00000008 /* Receiver is enabled */
+#define HFLAG_AUTO 0x00000010 /* Using auto-negotiation, 0 = force */
+#define HFLAG_FULL 0x00000020 /* Full duplex enable */
+#define HFLAG_MACFULL 0x00000040 /* Using full duplex in the MAC */
+#define HFLAG_POLLENABLE 0x00000080 /* Actually try MIF polling */
+#define HFLAG_RXCV 0x00000100 /* XXX RXCV ENABLE */
+#define HFLAG_INIT 0x00000200 /* Init called at least once */
+#define HFLAG_LINKUP 0x00000400 /* 1 = Link is up */
+#define HFLAG_PCI 0x00000800 /* PCI based Happy Meal */
+#define HFLAG_QUATTRO 0x00001000 /* On QFE/Quattro card */
+
+#define HFLAG_20_21 (HFLAG_POLLENABLE | HFLAG_FENABLE)
+#define HFLAG_NOT_A0 (HFLAG_POLLENABLE | HFLAG_FENABLE | HFLAG_LANCE | HFLAG_RXCV)
+
+/* Support for QFE/Quattro cards. */
+struct quattro {
+ struct net_device *happy_meals[4];
+
+ /* This is either a sbus_dev or a pci_dev. */
+ void *quattro_dev;
+
+ struct quattro *next;
+
+ /* PROM ranges, if any. */
+#ifdef CONFIG_SBUS
+ struct linux_prom_ranges ranges[8];
+#endif
+ int nranges;
+};
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+#define happy_meal_alloc_skb(__length, __gfp_flags) \
+({ struct sk_buff *__skb; \
+ __skb = alloc_skb((__length) + 64, (__gfp_flags)); \
+ if(__skb) { \
+ int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
+ if(__offset) \
+ skb_reserve(__skb, __offset); \
+ } \
+ __skb; \
+})
+
+#endif /* !(_SUNHME_H) */
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
new file mode 100644
index 000000000000..62d464c7ef51
--- /dev/null
+++ b/drivers/net/sunlance.c
@@ -0,0 +1,1614 @@
+/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $
+ * lance.c: Linux/Sparc/Lance driver
+ *
+ * Written 1995, 1996 by Miguel de Icaza
+ * Sources:
+ * The Linux depca driver
+ * The Linux lance driver.
+ * The Linux skeleton driver.
+ * The NetBSD Sparc/Lance driver.
+ * Theo de Raadt (deraadt@openbsd.org)
+ * NCR92C990 Lan Controller manual
+ *
+ * 1.4:
+ * Added support to run with a ledma on the Sun4m
+ *
+ * 1.5:
+ * Added multiple card detection.
+ *
+ * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller
+ * (davem@caip.rutgers.edu)
+ *
+ * 5/29/96: override option 'tpe-link-test?', if it is 'false', as
+ * this disables auto carrier detection on sun4m. Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 1.7:
+ * 6/26/96: Bug fix for multiple ledmas, miguel.
+ *
+ * 1.8:
+ * Stole multicast code from depca.c, fixed lance_tx.
+ *
+ * 1.9:
+ * 8/21/96: Fixed the multicast code (Pedro Roque)
+ *
+ * 8/28/96: Send fake packet in lance_open() if auto_select is true,
+ * so we can detect the carrier loss condition in time.
+ * Eddie C. Dost (ecd@skynet.be)
+ *
+ * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an
+ * MNA trap during chksum_partial_copy(). (ecd@skynet.be)
+ *
+ * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be)
+ *
+ * 12/22/96: Don't loop forever in lance_rx() on incomplete packets.
+ * This was the sun4c killer. Shit, stupid bug.
+ * (ecd@skynet.be)
+ *
+ * 1.10:
+ * 1/26/97: Modularize driver. (ecd@skynet.be)
+ *
+ * 1.11:
+ * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz)
+ *
+ * 1.12:
+ * 11/3/99: Fixed SMP race in lance_start_xmit found by davem.
+ * Anton Blanchard (anton@progsoc.uts.edu.au)
+ * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces.
+ * David S. Miller (davem@redhat.com)
+ * 2.01:
+ * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com)
+ *
+ */
+
+#undef DEBUG_DRIVER
+
+static char version[] =
+ "sunlance.c:v2.02 24/Aug/03 Miguel de Icaza (miguel@nuclecu.unam.mx)\n";
+
+static char lancestr[] = "LANCE";
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/socket.h> /* Used for the temporal inet entries and routing */
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#include <asm/byteorder.h> /* Used by the checksum routines */
+#include <asm/idprom.h>
+#include <asm/sbus.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/auxio.h> /* For tpe-link-test? setting */
+#include <asm/irq.h>
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x80 /* Who owns the entry */
+#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x20 /* FRA: Frame error */
+#define LE_R1_OFL 0x10 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x08 /* CRC error */
+#define LE_R1_BUF 0x04 /* BUF: Buffer error */
+#define LE_R1_SOP 0x02 /* Start of packet */
+#define LE_R1_EOP 0x01 /* End of packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T1_OWN 0x80 /* Lance owns the packet */
+#define LE_T1_ERR 0x40 /* Error summary */
+#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x08 /* Error: one retry needed */
+#define LE_T1_EDEF 0x04 /* Error: deferred */
+#define LE_T1_SOP 0x02 /* Start of packet */
+#define LE_T1_EOP 0x01 /* End of packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK)
+
+#define PKT_BUF_SZ 1544
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+struct lance_rx_desc {
+ u16 rmd0; /* low address of packet */
+ u8 rmd1_bits; /* descriptor bits */
+ u8 rmd1_hadr; /* high address of packet */
+ s16 length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ u16 mblength; /* This is the actual number of bytes received */
+};
+
+struct lance_tx_desc {
+ u16 tmd0; /* low address of packet */
+ u8 tmd1_bits; /* descriptor bits */
+ u8 tmd1_hadr; /* high address of packet */
+ s16 length; /* Length is 2s complement (negative)! */
+ u16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+/* On the Sparc, this block should be on a DMA region */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ u16 rx_ptr; /* receive descriptor addr */
+ u16 rx_len; /* receive len and high addr */
+ u16 tx_ptr; /* transmit descriptor addr */
+ u16 tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ u8 pad[2]; /* align rx_buf for copy_and_sum(). */
+ u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+};
+
+#define libdesc_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
+
+#define libbuff_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
+
+struct lance_private {
+ void __iomem *lregs; /* Lance RAP/RDP regs. */
+ void __iomem *dregs; /* DMA controller regs. */
+ struct lance_init_block __iomem *init_block_iomem;
+ struct lance_init_block *init_block_mem;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ struct net_device_stats stats;
+ struct sbus_dma *ledma; /* If set this points to ledma */
+ char tpe; /* cable-selection is TPE */
+ char auto_select; /* cable-selection by carrier */
+ char burst_sizes; /* ledma SBus burst sizes */
+ char pio_buffer; /* init block in PIO space? */
+
+ unsigned short busmaster_regval;
+
+ void (*init_ring)(struct net_device *);
+ void (*rx)(struct net_device *);
+ void (*tx)(struct net_device *);
+
+ char *name;
+ dma_addr_t init_block_dvma;
+ struct net_device *dev; /* Backpointer */
+ struct lance_private *next_module;
+ struct sbus_dev *sdev;
+ struct timer_list multicast_timer;
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* Lance registers. */
+#define RDP 0x00UL /* register data port */
+#define RAP 0x02UL /* register address port */
+#define LANCE_REG_SIZE 0x04UL
+
+#define STOP_LANCE(__lp) \
+do { void __iomem *__base = (__lp)->lregs; \
+ sbus_writew(LE_CSR0, __base + RAP); \
+ sbus_writew(LE_C0_STOP, __base + RDP); \
+} while (0)
+
+int sparc_lance_debug = 2;
+
+/* The Lance uses 24 bit addresses */
+/* On the Sun4c the DVMA will provide the remaining bytes for us */
+/* On the Sun4m we have to instruct the ledma to provide them */
+/* Even worse, on scsi/ether SBUS cards, the init block and the
+ * transmit/receive buffers are addresses as offsets from absolute
+ * zero on the lebuffer PIO area. -DaveM
+ */
+
+#define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
+
+static struct lance_private *root_lance_dev;
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ u32 leptr;
+
+ if (lp->pio_buffer)
+ leptr = 0;
+ else
+ leptr = LANCE_ADDR(lp->init_block_dvma);
+
+ sbus_writew(LE_CSR1, lp->lregs + RAP);
+ sbus_writew(leptr & 0xffff, lp->lregs + RDP);
+ sbus_writew(LE_CSR2, lp->lregs + RAP);
+ sbus_writew(leptr >> 16, lp->lregs + RDP);
+ sbus_writew(LE_CSR3, lp->lregs + RAP);
+ sbus_writew(lp->busmaster_regval, lp->lregs + RDP);
+
+ /* Point back to csr0 */
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ dma_addr_t aib = lp->init_block_dvma;
+ __u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i <= TX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
+ ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
+ ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+}
+
+static void lance_init_ring_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]);
+ sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]);
+ sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]);
+ sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]);
+ sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]);
+ sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i <= TX_RING_SIZE; i++) {
+ leptr = libbuff_offset(tx_buf, i);
+ sbus_writew(leptr, &ib->btx_ring [i].tmd0);
+ sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
+ sbus_writeb(0, &ib->btx_ring [i].tmd1_bits);
+
+ /* The ones required by tmd2 */
+ sbus_writew(0xf000, &ib->btx_ring [i].length);
+ sbus_writew(0, &ib->btx_ring [i].misc);
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = libbuff_offset(rx_buf, i);
+
+ sbus_writew(leptr, &ib->brx_ring [i].rmd0);
+ sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr);
+ sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits);
+ sbus_writew(-RX_BUFF_SIZE|0xf000,
+ &ib->brx_ring [i].length);
+ sbus_writew(0, &ib->brx_ring [i].mblength);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = libdesc_offset(brx_ring, 0);
+ sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16),
+ &ib->rx_len);
+ sbus_writew(leptr, &ib->rx_ptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = libdesc_offset(btx_ring, 0);
+ sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16),
+ &ib->tx_len);
+ sbus_writew(leptr, &ib->tx_ptr);
+}
+
+static void init_restart_ledma(struct lance_private *lp)
+{
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ if (!(csr & DMA_HNDL_ERROR)) {
+ /* E-Cache draining */
+ while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
+ barrier();
+ }
+
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ csr &= ~DMA_E_BURSTS;
+ if (lp->burst_sizes & DMA_BURST32)
+ csr |= DMA_E_BURST32;
+ else
+ csr |= DMA_E_BURST16;
+
+ csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
+
+ if (lp->tpe)
+ csr |= DMA_EN_ENETAUI;
+ else
+ csr &= ~DMA_EN_ENETAUI;
+ udelay(20);
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ udelay(200);
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ u16 regval = 0;
+ int i;
+
+ if (lp->dregs)
+ init_restart_ledma(lp);
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ sbus_writew(LE_C0_INIT, lp->lregs + RDP);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; i < 100; i++) {
+ regval = sbus_readw(lp->lregs + RDP);
+
+ if (regval & (LE_C0_ERR | LE_C0_IDON))
+ break;
+ barrier();
+ }
+ if (i == 100 || (regval & LE_C0_ERR)) {
+ printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, regval);
+ if (lp->dregs)
+ printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ sbus_writew(LE_C0_IDON, lp->lregs + RDP);
+ sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ csr |= DMA_INT_ENAB;
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ }
+
+ return 0;
+}
+
+static void lance_rx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ struct lance_rx_desc *rd;
+ u8 bits;
+ int len, entry = lp->rx_new;
+ struct sk_buff *skb;
+
+ for (rd = &ib->brx_ring [entry];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ lp->stats.rx_over_errors++;
+ lp->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) lp->stats.rx_errors++;
+ } else {
+ len = (rd->mblength & 0xfff) - 4;
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb == NULL) {
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ lp->stats.rx_bytes += len;
+
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)&(ib->rx_buf [entry][0]),
+ len, 0);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc *td = &ib->btx_ring [i];
+ u8 bits = td->tmd1_bits;
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = td->misc;
+
+ lp->stats.tx_errors++;
+ if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ lp->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ lp->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits = bits & ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ lp->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ lp->stats.collisions += 2;
+
+ lp->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len)
+{
+ u16 *p16 = (u16 *) skb->data;
+ u32 *p32;
+ u8 *p8;
+ void __iomem *pbuf = piobuf;
+
+ /* We know here that both src and dest are on a 16bit boundary. */
+ *p16++ = sbus_readw(pbuf);
+ p32 = (u32 *) p16;
+ pbuf += 2;
+ len -= 2;
+
+ while (len >= 4) {
+ *p32++ = sbus_readl(pbuf);
+ pbuf += 4;
+ len -= 4;
+ }
+ p8 = (u8 *) p32;
+ if (len >= 2) {
+ p16 = (u16 *) p32;
+ *p16++ = sbus_readw(pbuf);
+ pbuf += 2;
+ len -= 2;
+ p8 = (u8 *) p16;
+ }
+ if (len >= 1)
+ *p8 = sbus_readb(pbuf);
+}
+
+static void lance_rx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ struct lance_rx_desc __iomem *rd;
+ unsigned char bits;
+ int len, entry;
+ struct sk_buff *skb;
+
+ entry = lp->rx_new;
+ for (rd = &ib->brx_ring [entry];
+ !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ lp->stats.rx_over_errors++;
+ lp->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) lp->stats.rx_errors++;
+ } else {
+ len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb == NULL) {
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ lp->stats.rx_bytes += len;
+
+ skb->dev = dev;
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc __iomem *td = &ib->btx_ring [i];
+ u8 bits = sbus_readb(&td->tmd1_bits);
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = sbus_readw(&td->misc);
+
+ lp->stats.tx_errors++;
+ if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ lp->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ lp->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ lp->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ lp->stats.collisions += 2;
+
+ lp->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+out:
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ csr0 = sbus_readw(lp->lregs + RDP);
+
+ /* Acknowledge all the interrupt sources ASAP */
+ sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT),
+ lp->lregs + RDP);
+
+ if ((csr0 & LE_C0_ERR) != 0) {
+ /* Clear the error condition */
+ sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR),
+ lp->lregs + RDP);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lp->rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lp->tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ lp->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ lp->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ if (lp->dregs) {
+ u32 addr = sbus_readl(lp->dregs + DMA_ADDR);
+
+ printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n",
+ dev->name, csr0, addr & 0xffffff);
+ } else {
+ printk(KERN_ERR "%s: Memory error, status %04x\n",
+ dev->name, csr0);
+ }
+
+ sbus_writew(LE_C0_STOP, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ dma_csr |= DMA_FIFO_INV;
+ sbus_writel(dma_csr, lp->dregs + DMA_CSR);
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ sbus_writew(LE_C0_INEA, lp->lregs + RDP);
+
+ return IRQ_HANDLED;
+}
+
+/* Build a fake network packet and send it to ourselves. */
+static void build_fake_packet(struct lance_private *lp)
+{
+ struct net_device *dev = lp->dev;
+ int i, entry;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]);
+ struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet;
+ for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++)
+ sbus_writew(0, &packet[i]);
+ for (i = 0; i < 6; i++) {
+ sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]);
+ sbus_writeb(dev->dev_addr[i], &eth->h_source[i]);
+ }
+ sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *packet = (u16 *) &(ib->tx_buf[entry][0]);
+ struct ethhdr *eth = (struct ethhdr *) packet;
+ memset(packet, 0, ETH_ZLEN);
+ for (i = 0; i < 6; i++) {
+ eth->h_dest[i] = dev->dev_addr[i];
+ eth->h_source[i] = dev->dev_addr[i];
+ }
+ ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ }
+ lp->tx_new = TX_NEXT(entry);
+}
+
+struct net_device *last_dev;
+
+static int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status = 0;
+
+ last_dev = dev;
+
+ STOP_LANCE(lp);
+
+ if (request_irq(dev->irq, &lance_interrupt, SA_SHIRQ,
+ lancestr, (void *) dev)) {
+ printk(KERN_ERR "Lance: Can't get irq %s\n", __irq_itoa(dev->irq));
+ return -EAGAIN;
+ }
+
+ /* On the 4m, setup the ledma to provide the upper bits for buffers */
+ if (lp->dregs) {
+ u32 regval = lp->init_block_dvma & 0xff000000;
+
+ sbus_writel(regval, lp->dregs + DMA_TEST);
+ }
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew(0, &ib->mode);
+ sbus_writel(0, &ib->filter[0]);
+ sbus_writel(0, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->mode = 0;
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ status = init_restart_lance(lp);
+ if (!status && lp->auto_select) {
+ build_fake_packet(lp);
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+ }
+
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ STOP_LANCE(lp);
+
+ free_irq(dev->irq, (void *) dev);
+ return 0;
+}
+
+static int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ STOP_LANCE(lp);
+
+ /* On the 4m, reset the dma too */
+ if (lp->dregs) {
+ u32 csr, addr;
+
+ printk(KERN_ERR "resetting ledma\n");
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+
+ addr = lp->init_block_dvma & 0xff000000;
+ sbus_writel(addr, lp->dregs + DMA_TEST);
+ }
+ lp->init_ring(dev);
+ load_csrs(lp);
+ dev->trans_start = jiffies;
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len)
+{
+ void __iomem *piobuf = dest;
+ u32 *p32;
+ u16 *p16;
+ u8 *p8;
+
+ switch ((unsigned long)src & 0x3) {
+ case 0:
+ p32 = (u32 *) src;
+ while (len >= 4) {
+ sbus_writel(*p32, piobuf);
+ p32++;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p32;
+ break;
+ case 1:
+ case 3:
+ p8 = (u8 *) src;
+ while (len >= 4) {
+ u32 val;
+
+ val = p8[0] << 24;
+ val |= p8[1] << 16;
+ val |= p8[2] << 8;
+ val |= p8[3];
+ sbus_writel(val, piobuf);
+ p8 += 4;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p8;
+ break;
+ case 2:
+ p16 = (u16 *) src;
+ while (len >= 4) {
+ u32 val = p16[0]<<16 | p16[1];
+ sbus_writel(val, piobuf);
+ p16 += 2;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p16;
+ break;
+ };
+ if (len >= 2) {
+ u16 val = src[0] << 8 | src[1];
+ sbus_writew(val, piobuf);
+ src += 2;
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(src[0], piobuf);
+}
+
+static void lance_piozero(void __iomem *dest, int len)
+{
+ void __iomem *piobuf = dest;
+
+ if ((unsigned long)piobuf & 1) {
+ sbus_writeb(0, piobuf);
+ piobuf += 1;
+ len -= 1;
+ if (len == 0)
+ return;
+ }
+ if (len == 1) {
+ sbus_writeb(0, piobuf);
+ return;
+ }
+ if ((unsigned long)piobuf & 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ if (len == 0)
+ return;
+ }
+ while (len >= 4) {
+ sbus_writel(0, piobuf);
+ piobuf += 4;
+ len -= 4;
+ }
+ if (len >= 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(0, piobuf);
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, sbus_readw(lp->lregs + RDP));
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, skblen, len;
+
+ skblen = skb->len;
+
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+
+ spin_lock_irq(&lp->lock);
+
+ lp->stats.tx_bytes += len;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
+ if (len != skblen)
+ lance_piozero(&ib->tx_buf[entry][skblen], len - skblen);
+ sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
+ memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen);
+ if (len != skblen)
+ memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ }
+
+ lp->tx_new = TX_NEXT(entry);
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+
+ /* Read back CSR to invalidate the E-Cache.
+ * This is needed, because DMA_DSBL_WR_INV is set.
+ */
+ if (lp->dregs)
+ sbus_readw(lp->lregs + RDP);
+
+ spin_unlock_irq(&lp->lock);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *lance_get_stats(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+ u32 val;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI)
+ val = ~0;
+ else
+ val = 0;
+
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writel(val, &ib->filter[0]);
+ sbus_writel(val, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->filter [0] = val;
+ ib->filter [1] = val;
+ }
+
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+ crc = ether_crc_le(6, addrs);
+ crc = crc >> 26;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter;
+ u16 tmp = sbus_readw(&mcast_table[crc>>4]);
+ tmp |= 1 << (crc & 0xf);
+ sbus_writew(tmp, &mcast_table[crc>>4]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *mcast_table = (u16 *) &ib->filter;
+ mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ }
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib_mem = lp->init_block_mem;
+ struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem;
+ u16 mode;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+
+ if (lp->pio_buffer)
+ mode = sbus_readw(&ib_iomem->mode);
+ else
+ mode = ib_mem->mode;
+ if (dev->flags & IFF_PROMISC) {
+ mode |= LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ } else {
+ mode &= ~LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(unsigned long _opaque)
+{
+ struct net_device *dev = (struct net_device *) _opaque;
+
+ lance_set_multicast(dev);
+}
+
+static void lance_free_hwresources(struct lance_private *lp)
+{
+ if (lp->lregs)
+ sbus_iounmap(lp->lregs, LANCE_REG_SIZE);
+ if (lp->init_block_iomem) {
+ sbus_iounmap(lp->init_block_iomem,
+ sizeof(struct lance_init_block));
+ } else if (lp->init_block_mem) {
+ sbus_free_consistent(lp->sdev,
+ sizeof(struct lance_init_block),
+ lp->init_block_mem,
+ lp->init_block_dvma);
+ }
+}
+
+/* Ethtool support... */
+static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ strcpy(info->driver, "sunlance");
+ strcpy(info->version, "2.02");
+ sprintf(info->bus_info, "SBUS:%d",
+ lp->sdev->slot);
+}
+
+static u32 sparc_lance_get_link(struct net_device *dev)
+{
+ /* We really do not keep track of this, but this
+ * is better than not reporting anything at all.
+ */
+ return 1;
+}
+
+static struct ethtool_ops sparc_lance_ethtool_ops = {
+ .get_drvinfo = sparc_lance_get_drvinfo,
+ .get_link = sparc_lance_get_link,
+};
+
+static int __init sparc_lance_init(struct sbus_dev *sdev,
+ struct sbus_dma *ledma,
+ struct sbus_dev *lebuffer)
+{
+ static unsigned version_printed;
+ struct net_device *dev;
+ struct lance_private *lp;
+ int i;
+
+ dev = alloc_etherdev(sizeof(struct lance_private) + 8);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ memset(lp, 0, sizeof(*lp));
+
+ if (sparc_lance_debug && version_printed++ == 0)
+ printk (KERN_INFO "%s", version);
+
+ spin_lock_init(&lp->lock);
+
+ /* Copy the IDPROM ethernet address to the device structure, later we
+ * will copy the address in the device structure to the lance
+ * initialization block.
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* Get the IO region */
+ lp->lregs = sbus_ioremap(&sdev->resource[0], 0,
+ LANCE_REG_SIZE, lancestr);
+ if (!lp->lregs) {
+ printk(KERN_ERR "SunLance: Cannot map registers.\n");
+ goto fail;
+ }
+
+ lp->sdev = sdev;
+ if (lebuffer) {
+ /* sanity check */
+ if (lebuffer->resource[0].start & 7) {
+ printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n");
+ goto fail;
+ }
+ lp->init_block_iomem =
+ sbus_ioremap(&lebuffer->resource[0], 0,
+ sizeof(struct lance_init_block), "lebuffer");
+ if (!lp->init_block_iomem) {
+ printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n");
+ goto fail;
+ }
+ lp->init_block_dvma = 0;
+ lp->pio_buffer = 1;
+ lp->init_ring = lance_init_ring_pio;
+ lp->rx = lance_rx_pio;
+ lp->tx = lance_tx_pio;
+ } else {
+ lp->init_block_mem =
+ sbus_alloc_consistent(sdev, sizeof(struct lance_init_block),
+ &lp->init_block_dvma);
+ if (!lp->init_block_mem || lp->init_block_dvma == 0) {
+ printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
+ goto fail;
+ }
+ lp->pio_buffer = 0;
+ lp->init_ring = lance_init_ring_dvma;
+ lp->rx = lance_rx_dvma;
+ lp->tx = lance_tx_dvma;
+ }
+ lp->busmaster_regval = prom_getintdefault(sdev->prom_node,
+ "busmaster-regval",
+ (LE_C3_BSWP | LE_C3_ACON |
+ LE_C3_BCON));
+
+ lp->name = lancestr;
+ lp->ledma = ledma;
+
+ lp->burst_sizes = 0;
+ if (lp->ledma) {
+ char prop[6];
+ unsigned int sbmask;
+ u32 csr;
+
+ /* Find burst-size property for ledma */
+ lp->burst_sizes = prom_getintdefault(ledma->sdev->prom_node,
+ "burst-sizes", 0);
+
+ /* ledma may be capable of fast bursts, but sbus may not. */
+ sbmask = prom_getintdefault(ledma->sdev->bus->prom_node,
+ "burst-sizes", DMA_BURSTBITS);
+ lp->burst_sizes &= sbmask;
+
+ /* Get the cable-selection property */
+ memset(prop, 0, sizeof(prop));
+ prom_getstring(ledma->sdev->prom_node, "cable-selection",
+ prop, sizeof(prop));
+ if (prop[0] == 0) {
+ int topnd, nd;
+
+ printk(KERN_INFO "SunLance: using auto-carrier-detection.\n");
+
+ /* Is this found at /options .attributes in all
+ * Prom versions? XXX
+ */
+ topnd = prom_getchild(prom_root_node);
+
+ nd = prom_searchsiblings(topnd, "options");
+ if (!nd)
+ goto no_link_test;
+
+ if (!prom_node_has_property(nd, "tpe-link-test?"))
+ goto no_link_test;
+
+ memset(prop, 0, sizeof(prop));
+ prom_getstring(nd, "tpe-link-test?", prop,
+ sizeof(prop));
+
+ if (strcmp(prop, "true")) {
+ printk(KERN_NOTICE "SunLance: warning: overriding option "
+ "'tpe-link-test?'\n");
+ printk(KERN_NOTICE "SunLance: warning: mail any problems "
+ "to ecd@skynet.be\n");
+ auxio_set_lte(AUXIO_LTE_ON);
+ }
+no_link_test:
+ lp->auto_select = 1;
+ lp->tpe = 0;
+ } else if (!strcmp(prop, "aui")) {
+ lp->auto_select = 0;
+ lp->tpe = 0;
+ } else {
+ lp->auto_select = 0;
+ lp->tpe = 1;
+ }
+
+ lp->dregs = ledma->regs;
+
+ /* Reset ledma */
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+ } else
+ lp->dregs = NULL;
+
+ lp->dev = dev;
+ SET_MODULE_OWNER(dev);
+ dev->open = &lance_open;
+ dev->stop = &lance_close;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->tx_timeout = &lance_tx_timeout;
+ dev->watchdog_timeo = 5*HZ;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &lance_set_multicast;
+ dev->ethtool_ops = &sparc_lance_ethtool_ops;
+
+ dev->irq = sdev->irqs[0];
+
+ dev->dma = 0;
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ init_timer(&lp->multicast_timer);
+ lp->multicast_timer.data = (unsigned long) dev;
+ lp->multicast_timer.function = &lance_set_multicast_retry;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SunLance: Cannot register device.\n");
+ goto fail;
+ }
+
+ lp->next_module = root_lance_dev;
+ root_lance_dev = lp;
+
+ printk(KERN_INFO "%s: LANCE ", dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i],
+ i == 5 ? ' ': ':');
+ printk("\n");
+
+ return 0;
+
+fail:
+ lance_free_hwresources(lp);
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+/* On 4m, find the associated dma for the lance chip */
+static inline struct sbus_dma *find_ledma(struct sbus_dev *sdev)
+{
+ struct sbus_dma *p;
+
+ for_each_dvma(p) {
+ if (p->sdev == sdev)
+ return p;
+ }
+ return NULL;
+}
+
+#ifdef CONFIG_SUN4
+
+#include <asm/sun4paddr.h>
+#include <asm/machines.h>
+
+/* Find all the lance cards on the system and initialize them */
+static int __init sparc_lance_probe(void)
+{
+ static struct sbus_dev sdev;
+ static int called;
+
+ root_lance_dev = NULL;
+
+ if (called)
+ return -ENODEV;
+ called++;
+
+ if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
+ (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
+ memset(&sdev, 0, sizeof(sdev));
+ sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
+ sdev.irqs[0] = 6;
+ return sparc_lance_init(&sdev, NULL, NULL);
+ }
+ return -ENODEV;
+}
+
+#else /* !CONFIG_SUN4 */
+
+/* Find all the lance cards on the system and initialize them */
+static int __init sparc_lance_probe(void)
+{
+ struct sbus_bus *bus;
+ struct sbus_dev *sdev = NULL;
+ struct sbus_dma *ledma = NULL;
+ static int called;
+ int cards = 0, v;
+
+ root_lance_dev = NULL;
+
+ if (called)
+ return -ENODEV;
+ called++;
+
+ for_each_sbus (bus) {
+ for_each_sbusdev (sdev, bus) {
+ if (strcmp(sdev->prom_name, "le") == 0) {
+ cards++;
+ if ((v = sparc_lance_init(sdev, NULL, NULL)))
+ return v;
+ continue;
+ }
+ if (strcmp(sdev->prom_name, "ledma") == 0) {
+ cards++;
+ ledma = find_ledma(sdev);
+ if ((v = sparc_lance_init(sdev->child,
+ ledma, NULL)))
+ return v;
+ continue;
+ }
+ if (strcmp(sdev->prom_name, "lebuffer") == 0){
+ cards++;
+ if ((v = sparc_lance_init(sdev->child,
+ NULL, sdev)))
+ return v;
+ continue;
+ }
+ } /* for each sbusdev */
+ } /* for each sbus */
+ if (!cards)
+ return -ENODEV;
+ return 0;
+}
+#endif /* !CONFIG_SUN4 */
+
+static void __exit sparc_lance_cleanup(void)
+{
+ struct lance_private *lp;
+
+ while (root_lance_dev) {
+ lp = root_lance_dev->next_module;
+
+ unregister_netdev(root_lance_dev->dev);
+ lance_free_hwresources(root_lance_dev);
+ free_netdev(root_lance_dev->dev);
+ root_lance_dev = lp;
+ }
+}
+
+module_init(sparc_lance_probe);
+module_exit(sparc_lance_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
new file mode 100644
index 000000000000..37ef1b82a6cb
--- /dev/null
+++ b/drivers/net/sunqe.c
@@ -0,0 +1,1043 @@
+/* $Id: sunqe.c,v 1.55 2002/01/15 06:48:55 davem Exp $
+ * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
+ * Once again I am out to prove that every ethernet
+ * controller out there can be most efficiently programmed
+ * if you make it look like a LANCE.
+ *
+ * Copyright (C) 1996, 1999, 2003 David S. Miller (davem@redhat.com)
+ */
+
+static char version[] =
+ "sunqe.c:v3.0 8/24/03 David S. Miller (davem@redhat.com)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/idprom.h>
+#include <asm/sbus.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/auxio.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+
+#include "sunqe.h"
+
+static struct sunqec *root_qec_dev;
+
+static void qe_set_multicast(struct net_device *dev);
+
+#define QEC_RESET_TRIES 200
+
+static inline int qec_global_reset(void __iomem *gregs)
+{
+ int tries = QEC_RESET_TRIES;
+
+ sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
+ while (--tries) {
+ u32 tmp = sbus_readl(gregs + GLOB_CTRL);
+ if (tmp & GLOB_CTRL_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (tries)
+ return 0;
+ printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
+ return -1;
+}
+
+#define MACE_RESET_RETRIES 200
+#define QE_RESET_RETRIES 200
+
+static inline int qe_stop(struct sunqe *qep)
+{
+ void __iomem *cregs = qep->qcregs;
+ void __iomem *mregs = qep->mregs;
+ int tries;
+
+ /* Reset the MACE, then the QEC channel. */
+ sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
+ tries = MACE_RESET_RETRIES;
+ while (--tries) {
+ u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
+ if (tmp & MREGS_BCONFIG_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (!tries) {
+ printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
+ return -1;
+ }
+
+ sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
+ tries = QE_RESET_RETRIES;
+ while (--tries) {
+ u32 tmp = sbus_readl(cregs + CREG_CTRL);
+ if (tmp & CREG_CTRL_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (!tries) {
+ printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static void qe_init_rings(struct sunqe *qep)
+{
+ struct qe_init_block *qb = qep->qe_block;
+ struct sunqe_buffers *qbufs = qep->buffers;
+ __u32 qbufs_dvma = qep->buffers_dvma;
+ int i;
+
+ qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
+ memset(qb, 0, sizeof(struct qe_init_block));
+ memset(qbufs, 0, sizeof(struct sunqe_buffers));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
+ qb->qe_rxd[i].rx_flags =
+ (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
+ }
+}
+
+static int qe_init(struct sunqe *qep, int from_irq)
+{
+ struct sunqec *qecp = qep->parent;
+ void __iomem *cregs = qep->qcregs;
+ void __iomem *mregs = qep->mregs;
+ void __iomem *gregs = qecp->gregs;
+ unsigned char *e = &qep->dev->dev_addr[0];
+ u32 tmp;
+ int i;
+
+ /* Shut it up. */
+ if (qe_stop(qep))
+ return -EAGAIN;
+
+ /* Setup initial rx/tx init block pointers. */
+ sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
+ sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
+
+ /* Enable/mask the various irq's. */
+ sbus_writel(0, cregs + CREG_RIMASK);
+ sbus_writel(1, cregs + CREG_TIMASK);
+
+ sbus_writel(0, cregs + CREG_QMASK);
+ sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
+
+ /* Setup the FIFO pointers into QEC local memory. */
+ tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
+ sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
+ sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
+
+ tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
+ sbus_readl(gregs + GLOB_RSIZE);
+ sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
+ sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
+
+ /* Clear the channel collision counter. */
+ sbus_writel(0, cregs + CREG_CCNT);
+
+ /* For 10baseT, inter frame space nor throttle seems to be necessary. */
+ sbus_writel(0, cregs + CREG_PIPG);
+
+ /* Now dork with the AMD MACE. */
+ sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
+ sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
+ sbus_writeb(0, mregs + MREGS_RXFCNTL);
+
+ /* The QEC dma's the rx'd packets from local memory out to main memory,
+ * and therefore it interrupts when the packet reception is "complete".
+ * So don't listen for the MACE talking about it.
+ */
+ sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
+ sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
+ sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
+ MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
+ mregs + MREGS_FCONFIG);
+
+ /* Only usable interface on QuadEther is twisted pair. */
+ sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
+
+ /* Tell MACE we are changing the ether address. */
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
+ mregs + MREGS_IACONFIG);
+ while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ sbus_writeb(e[0], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[1], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[2], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[3], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[4], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[5], mregs + MREGS_ETHADDR);
+
+ /* Clear out the address filter. */
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+ mregs + MREGS_IACONFIG);
+ while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ for (i = 0; i < 8; i++)
+ sbus_writeb(0, mregs + MREGS_FILTER);
+
+ /* Address changes are now complete. */
+ sbus_writeb(0, mregs + MREGS_IACONFIG);
+
+ qe_init_rings(qep);
+
+ /* Wait a little bit for the link to come up... */
+ mdelay(5);
+ if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
+ int tries = 50;
+
+ while (tries--) {
+ u8 tmp;
+
+ mdelay(5);
+ barrier();
+ tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
+ if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
+ break;
+ }
+ if (tries == 0)
+ printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
+ }
+
+ /* Missed packet counter is cleared on a read. */
+ sbus_readb(mregs + MREGS_MPCNT);
+
+ /* Reload multicast information, this will enable the receiver
+ * and transmitter.
+ */
+ qe_set_multicast(qep->dev);
+
+ /* QEC should now start to show interrupts. */
+ return 0;
+}
+
+/* Grrr, certain error conditions completely lock up the AMD MACE,
+ * so when we get these we _must_ reset the chip.
+ */
+static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
+{
+ struct net_device *dev = qep->dev;
+ int mace_hwbug_workaround = 0;
+
+ if (qe_status & CREG_STAT_EDEFER) {
+ printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
+ qep->net_stats.tx_errors++;
+ }
+
+ if (qe_status & CREG_STAT_CLOSS) {
+ printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
+ qep->net_stats.tx_errors++;
+ qep->net_stats.tx_carrier_errors++;
+ }
+
+ if (qe_status & CREG_STAT_ERETRIES) {
+ printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
+ qep->net_stats.tx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_LCOLL) {
+ printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
+ qep->net_stats.tx_errors++;
+ qep->net_stats.collisions++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_FUFLOW) {
+ printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
+ qep->net_stats.tx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_JERROR) {
+ printk(KERN_ERR "%s: Jabber error.\n", dev->name);
+ }
+
+ if (qe_status & CREG_STAT_BERROR) {
+ printk(KERN_ERR "%s: Babble error.\n", dev->name);
+ }
+
+ if (qe_status & CREG_STAT_CCOFLOW) {
+ qep->net_stats.tx_errors += 256;
+ qep->net_stats.collisions += 256;
+ }
+
+ if (qe_status & CREG_STAT_TXDERROR) {
+ printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
+ qep->net_stats.tx_errors++;
+ qep->net_stats.tx_aborted_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_TXLERR) {
+ printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
+ qep->net_stats.tx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_TXPERR) {
+ printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
+ qep->net_stats.tx_errors++;
+ qep->net_stats.tx_aborted_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_TXSERR) {
+ printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
+ qep->net_stats.tx_errors++;
+ qep->net_stats.tx_aborted_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_RCCOFLOW) {
+ qep->net_stats.rx_errors += 256;
+ qep->net_stats.collisions += 256;
+ }
+
+ if (qe_status & CREG_STAT_RUOFLOW) {
+ qep->net_stats.rx_errors += 256;
+ qep->net_stats.rx_over_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_MCOFLOW) {
+ qep->net_stats.rx_errors += 256;
+ qep->net_stats.rx_missed_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_RXFOFLOW) {
+ printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
+ qep->net_stats.rx_errors++;
+ qep->net_stats.rx_over_errors++;
+ }
+
+ if (qe_status & CREG_STAT_RLCOLL) {
+ printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
+ qep->net_stats.rx_errors++;
+ qep->net_stats.collisions++;
+ }
+
+ if (qe_status & CREG_STAT_FCOFLOW) {
+ qep->net_stats.rx_errors += 256;
+ qep->net_stats.rx_frame_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_CECOFLOW) {
+ qep->net_stats.rx_errors += 256;
+ qep->net_stats.rx_crc_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_RXDROP) {
+ printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
+ qep->net_stats.rx_errors++;
+ qep->net_stats.rx_dropped++;
+ qep->net_stats.rx_missed_errors++;
+ }
+
+ if (qe_status & CREG_STAT_RXSMALL) {
+ printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
+ qep->net_stats.rx_errors++;
+ qep->net_stats.rx_length_errors++;
+ }
+
+ if (qe_status & CREG_STAT_RXLERR) {
+ printk(KERN_ERR "%s: Receive late error.\n", dev->name);
+ qep->net_stats.rx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_RXPERR) {
+ printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
+ qep->net_stats.rx_errors++;
+ qep->net_stats.rx_missed_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_RXSERR) {
+ printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
+ qep->net_stats.rx_errors++;
+ qep->net_stats.rx_missed_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (mace_hwbug_workaround)
+ qe_init(qep, 1);
+ return mace_hwbug_workaround;
+}
+
+/* Per-QE receive interrupt service routine. Just like on the happy meal
+ * we receive directly into skb's with a small packet copy water mark.
+ */
+static void qe_rx(struct sunqe *qep)
+{
+ struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
+ struct qe_rxd *this;
+ struct sunqe_buffers *qbufs = qep->buffers;
+ __u32 qbufs_dvma = qep->buffers_dvma;
+ int elem = qep->rx_new, drops = 0;
+ u32 flags;
+
+ this = &rxbase[elem];
+ while (!((flags = this->rx_flags) & RXD_OWN)) {
+ struct sk_buff *skb;
+ unsigned char *this_qbuf =
+ &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
+ __u32 this_qbuf_dvma = qbufs_dvma +
+ qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
+ struct qe_rxd *end_rxd =
+ &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
+ int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
+
+ /* Check for errors. */
+ if (len < ETH_ZLEN) {
+ qep->net_stats.rx_errors++;
+ qep->net_stats.rx_length_errors++;
+ qep->net_stats.rx_dropped++;
+ } else {
+ skb = dev_alloc_skb(len + 2);
+ if (skb == NULL) {
+ drops++;
+ qep->net_stats.rx_dropped++;
+ } else {
+ skb->dev = qep->dev;
+ skb_reserve(skb, 2);
+ skb_put(skb, len);
+ eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
+ len, 0);
+ skb->protocol = eth_type_trans(skb, qep->dev);
+ netif_rx(skb);
+ qep->dev->last_rx = jiffies;
+ qep->net_stats.rx_packets++;
+ qep->net_stats.rx_bytes += len;
+ }
+ }
+ end_rxd->rx_addr = this_qbuf_dvma;
+ end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
+
+ elem = NEXT_RX(elem);
+ this = &rxbase[elem];
+ }
+ qep->rx_new = elem;
+ if (drops)
+ printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
+}
+
+static void qe_tx_reclaim(struct sunqe *qep);
+
+/* Interrupts for all QE's get filtered out via the QEC master controller,
+ * so we just run through each qe and check to see who is signaling
+ * and thus needs to be serviced.
+ */
+static irqreturn_t qec_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sunqec *qecp = (struct sunqec *) dev_id;
+ u32 qec_status;
+ int channel = 0;
+
+ /* Latch the status now. */
+ qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
+ while (channel < 4) {
+ if (qec_status & 0xf) {
+ struct sunqe *qep = qecp->qes[channel];
+ u32 qe_status;
+
+ qe_status = sbus_readl(qep->qcregs + CREG_STAT);
+ if (qe_status & CREG_STAT_ERRORS) {
+ if (qe_is_bolixed(qep, qe_status))
+ goto next;
+ }
+ if (qe_status & CREG_STAT_RXIRQ)
+ qe_rx(qep);
+ if (netif_queue_stopped(qep->dev) &&
+ (qe_status & CREG_STAT_TXIRQ)) {
+ spin_lock(&qep->lock);
+ qe_tx_reclaim(qep);
+ if (TX_BUFFS_AVAIL(qep) > 0) {
+ /* Wake net queue and return to
+ * lazy tx reclaim.
+ */
+ netif_wake_queue(qep->dev);
+ sbus_writel(1, qep->qcregs + CREG_TIMASK);
+ }
+ spin_unlock(&qep->lock);
+ }
+ next:
+ ;
+ }
+ qec_status >>= 4;
+ channel++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qe_open(struct net_device *dev)
+{
+ struct sunqe *qep = (struct sunqe *) dev->priv;
+
+ qep->mconfig = (MREGS_MCONFIG_TXENAB |
+ MREGS_MCONFIG_RXENAB |
+ MREGS_MCONFIG_MBAENAB);
+ return qe_init(qep, 0);
+}
+
+static int qe_close(struct net_device *dev)
+{
+ struct sunqe *qep = (struct sunqe *) dev->priv;
+
+ qe_stop(qep);
+ return 0;
+}
+
+/* Reclaim TX'd frames from the ring. This must always run under
+ * the IRQ protected qep->lock.
+ */
+static void qe_tx_reclaim(struct sunqe *qep)
+{
+ struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
+ int elem = qep->tx_old;
+
+ while (elem != qep->tx_new) {
+ u32 flags = txbase[elem].tx_flags;
+
+ if (flags & TXD_OWN)
+ break;
+ elem = NEXT_TX(elem);
+ }
+ qep->tx_old = elem;
+}
+
+static void qe_tx_timeout(struct net_device *dev)
+{
+ struct sunqe *qep = (struct sunqe *) dev->priv;
+ int tx_full;
+
+ spin_lock_irq(&qep->lock);
+
+ /* Try to reclaim, if that frees up some tx
+ * entries, we're fine.
+ */
+ qe_tx_reclaim(qep);
+ tx_full = TX_BUFFS_AVAIL(qep) <= 0;
+
+ spin_unlock_irq(&qep->lock);
+
+ if (! tx_full)
+ goto out;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ qe_init(qep, 1);
+
+out:
+ netif_wake_queue(dev);
+}
+
+/* Get a packet queued to go onto the wire. */
+static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sunqe *qep = (struct sunqe *) dev->priv;
+ struct sunqe_buffers *qbufs = qep->buffers;
+ __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
+ unsigned char *txbuf;
+ int len, entry;
+
+ spin_lock_irq(&qep->lock);
+
+ qe_tx_reclaim(qep);
+
+ len = skb->len;
+ entry = qep->tx_new;
+
+ txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
+ txbuf_dvma = qbufs_dvma +
+ qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
+
+ /* Avoid a race... */
+ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
+
+ memcpy(txbuf, skb->data, len);
+
+ qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
+ qep->qe_block->qe_txd[entry].tx_flags =
+ (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
+ qep->tx_new = NEXT_TX(entry);
+
+ /* Get it going. */
+ dev->trans_start = jiffies;
+ sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
+
+ qep->net_stats.tx_packets++;
+ qep->net_stats.tx_bytes += len;
+
+ if (TX_BUFFS_AVAIL(qep) <= 0) {
+ /* Halt the net queue and enable tx interrupts.
+ * When the tx queue empties the tx irq handler
+ * will wake up the queue and return us back to
+ * the lazy tx reclaim scheme.
+ */
+ netif_stop_queue(dev);
+ sbus_writel(0, qep->qcregs + CREG_TIMASK);
+ }
+ spin_unlock_irq(&qep->lock);
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *qe_get_stats(struct net_device *dev)
+{
+ struct sunqe *qep = (struct sunqe *) dev->priv;
+
+ return &qep->net_stats;
+}
+
+static void qe_set_multicast(struct net_device *dev)
+{
+ struct sunqe *qep = (struct sunqe *) dev->priv;
+ struct dev_mc_list *dmi = dev->mc_list;
+ u8 new_mconfig = qep->mconfig;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* Lock out others. */
+ netif_stop_queue(dev);
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+ qep->mregs + MREGS_IACONFIG);
+ while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ for (i = 0; i < 8; i++)
+ sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
+ sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
+ } else if (dev->flags & IFF_PROMISC) {
+ new_mconfig |= MREGS_MCONFIG_PROMISC;
+ } else {
+ u16 hash_table[4];
+ u8 *hbytes = (unsigned char *) &hash_table[0];
+
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ if (!(*addrs & 1))
+ continue;
+ crc = ether_crc_le(6, addrs);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ /* Program the qe with the new filter value. */
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+ qep->mregs + MREGS_IACONFIG);
+ while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ for (i = 0; i < 8; i++) {
+ u8 tmp = *hbytes++;
+ sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
+ }
+ sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
+ }
+
+ /* Any change of the logical address filter, the physical address,
+ * or enabling/disabling promiscuous mode causes the MACE to disable
+ * the receiver. So we must re-enable them here or else the MACE
+ * refuses to listen to anything on the network. Sheesh, took
+ * me a day or two to find this bug.
+ */
+ qep->mconfig = new_mconfig;
+ sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
+
+ /* Let us get going again. */
+ netif_wake_queue(dev);
+}
+
+/* Ethtool support... */
+static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct sunqe *qep = dev->priv;
+
+ strcpy(info->driver, "sunqe");
+ strcpy(info->version, "3.0");
+ sprintf(info->bus_info, "SBUS:%d",
+ qep->qe_sdev->slot);
+}
+
+static u32 qe_get_link(struct net_device *dev)
+{
+ struct sunqe *qep = dev->priv;
+ void __iomem *mregs = qep->mregs;
+ u8 phyconfig;
+
+ spin_lock_irq(&qep->lock);
+ phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
+ spin_unlock_irq(&qep->lock);
+
+ return (phyconfig & MREGS_PHYCONFIG_LSTAT);
+}
+
+static struct ethtool_ops qe_ethtool_ops = {
+ .get_drvinfo = qe_get_drvinfo,
+ .get_link = qe_get_link,
+};
+
+/* This is only called once at boot time for each card probed. */
+static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev)
+{
+ u8 bsizes = qecp->qec_bursts;
+
+ if (sbus_can_burst64(qsdev) && (bsizes & DMA_BURST64)) {
+ sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
+ } else if (bsizes & DMA_BURST32) {
+ sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
+ } else {
+ sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
+ }
+
+ /* Packetsize only used in 100baseT BigMAC configurations,
+ * set it to zero just to be on the safe side.
+ */
+ sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
+
+ /* Set the local memsize register, divided up to one piece per QE channel. */
+ sbus_writel((qsdev->reg_addrs[1].reg_size >> 2),
+ qecp->gregs + GLOB_MSIZE);
+
+ /* Divide up the local QEC memory amongst the 4 QE receiver and
+ * transmitter FIFOs. Basically it is (total / 2 / num_channels).
+ */
+ sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
+ qecp->gregs + GLOB_TSIZE);
+ sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
+ qecp->gregs + GLOB_RSIZE);
+}
+
+/* Four QE's per QEC card. */
+static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev)
+{
+ static unsigned version_printed;
+ struct net_device *qe_devs[4];
+ struct sunqe *qeps[4];
+ struct sbus_dev *qesdevs[4];
+ struct sbus_dev *child;
+ struct sunqec *qecp = NULL;
+ u8 bsizes, bsizes_more;
+ int i, j, res = -ENOMEM;
+
+ for (i = 0; i < 4; i++) {
+ qe_devs[i] = alloc_etherdev(sizeof(struct sunqe));
+ if (!qe_devs[i])
+ goto out;
+ }
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ for (i = 0; i < 4; i++) {
+ qeps[i] = (struct sunqe *) qe_devs[i]->priv;
+ for (j = 0; j < 6; j++)
+ qe_devs[i]->dev_addr[j] = idprom->id_ethaddr[j];
+ qeps[i]->channel = i;
+ spin_lock_init(&qeps[i]->lock);
+ }
+
+ qecp = kmalloc(sizeof(struct sunqec), GFP_KERNEL);
+ if (qecp == NULL)
+ goto out1;
+ qecp->qec_sdev = sdev;
+
+ for (i = 0; i < 4; i++) {
+ qecp->qes[i] = qeps[i];
+ qeps[i]->dev = qe_devs[i];
+ qeps[i]->parent = qecp;
+ }
+
+ res = -ENODEV;
+
+ for (i = 0, child = sdev->child; i < 4; i++, child = child->next) {
+ /* Link in channel */
+ j = prom_getintdefault(child->prom_node, "channel#", -1);
+ if (j == -1)
+ goto out2;
+ qesdevs[j] = child;
+ }
+
+ for (i = 0; i < 4; i++)
+ qeps[i]->qe_sdev = qesdevs[i];
+
+ /* Now map in the registers, QEC globals first. */
+ qecp->gregs = sbus_ioremap(&sdev->resource[0], 0,
+ GLOB_REG_SIZE, "QEC Global Registers");
+ if (!qecp->gregs) {
+ printk(KERN_ERR "QuadEther: Cannot map QEC global registers.\n");
+ goto out2;
+ }
+
+ /* Make sure the QEC is in MACE mode. */
+ if ((sbus_readl(qecp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_MMODE) {
+ printk(KERN_ERR "QuadEther: AIEEE, QEC is not in MACE mode!\n");
+ goto out3;
+ }
+
+ /* Reset the QEC. */
+ if (qec_global_reset(qecp->gregs))
+ goto out3;
+
+ /* Find and set the burst sizes for the QEC, since it does
+ * the actual dma for all 4 channels.
+ */
+ bsizes = prom_getintdefault(sdev->prom_node, "burst-sizes", 0xff);
+ bsizes &= 0xff;
+ bsizes_more = prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff);
+
+ if (bsizes_more != 0xff)
+ bsizes &= bsizes_more;
+ if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
+ (bsizes & DMA_BURST32)==0)
+ bsizes = (DMA_BURST32 - 1);
+
+ qecp->qec_bursts = bsizes;
+
+ /* Perform one time QEC initialization, we never touch the QEC
+ * globals again after this.
+ */
+ qec_init_once(qecp, sdev);
+
+ for (i = 0; i < 4; i++) {
+ struct sunqe *qe = qeps[i];
+ /* Map in QEC per-channel control registers. */
+ qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0,
+ CREG_REG_SIZE, "QEC Channel Registers");
+ if (!qe->qcregs) {
+ printk(KERN_ERR "QuadEther: Cannot map QE %d's channel registers.\n", i);
+ goto out4;
+ }
+
+ /* Map in per-channel AMD MACE registers. */
+ qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0,
+ MREGS_REG_SIZE, "QE MACE Registers");
+ if (!qe->mregs) {
+ printk(KERN_ERR "QuadEther: Cannot map QE %d's MACE registers.\n", i);
+ goto out4;
+ }
+
+ qe->qe_block = sbus_alloc_consistent(qe->qe_sdev,
+ PAGE_SIZE,
+ &qe->qblock_dvma);
+ qe->buffers = sbus_alloc_consistent(qe->qe_sdev,
+ sizeof(struct sunqe_buffers),
+ &qe->buffers_dvma);
+ if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
+ qe->buffers == NULL || qe->buffers_dvma == 0) {
+ goto out4;
+ }
+
+ /* Stop this QE. */
+ qe_stop(qe);
+ }
+
+ for (i = 0; i < 4; i++) {
+ SET_MODULE_OWNER(qe_devs[i]);
+ qe_devs[i]->open = qe_open;
+ qe_devs[i]->stop = qe_close;
+ qe_devs[i]->hard_start_xmit = qe_start_xmit;
+ qe_devs[i]->get_stats = qe_get_stats;
+ qe_devs[i]->set_multicast_list = qe_set_multicast;
+ qe_devs[i]->tx_timeout = qe_tx_timeout;
+ qe_devs[i]->watchdog_timeo = 5*HZ;
+ qe_devs[i]->irq = sdev->irqs[0];
+ qe_devs[i]->dma = 0;
+ qe_devs[i]->ethtool_ops = &qe_ethtool_ops;
+ }
+
+ /* QEC receives interrupts from each QE, then it sends the actual
+ * IRQ to the cpu itself. Since QEC is the single point of
+ * interrupt for all QE channels we register the IRQ handler
+ * for it now.
+ */
+ if (request_irq(sdev->irqs[0], &qec_interrupt,
+ SA_SHIRQ, "QuadEther", (void *) qecp)) {
+ printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n");
+ res = -EAGAIN;
+ goto out4;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (register_netdev(qe_devs[i]) != 0)
+ goto out5;
+ }
+
+ /* Report the QE channels. */
+ for (i = 0; i < 4; i++) {
+ printk(KERN_INFO "%s: QuadEthernet channel[%d] ", qe_devs[i]->name, i);
+ for (j = 0; j < 6; j++)
+ printk ("%2.2x%c",
+ qe_devs[i]->dev_addr[j],
+ j == 5 ? ' ': ':');
+ printk("\n");
+ }
+
+ /* We are home free at this point, link the qe's into
+ * the master list for later driver exit.
+ */
+ qecp->next_module = root_qec_dev;
+ root_qec_dev = qecp;
+
+ return 0;
+
+out5:
+ while (i--)
+ unregister_netdev(qe_devs[i]);
+ free_irq(sdev->irqs[0], (void *)qecp);
+out4:
+ for (i = 0; i < 4; i++) {
+ struct sunqe *qe = (struct sunqe *)qe_devs[i]->priv;
+
+ if (qe->qcregs)
+ sbus_iounmap(qe->qcregs, CREG_REG_SIZE);
+ if (qe->mregs)
+ sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
+ if (qe->qe_block)
+ sbus_free_consistent(qe->qe_sdev,
+ PAGE_SIZE,
+ qe->qe_block,
+ qe->qblock_dvma);
+ if (qe->buffers)
+ sbus_free_consistent(qe->qe_sdev,
+ sizeof(struct sunqe_buffers),
+ qe->buffers,
+ qe->buffers_dvma);
+ }
+out3:
+ sbus_iounmap(qecp->gregs, GLOB_REG_SIZE);
+out2:
+ kfree(qecp);
+out1:
+ i = 4;
+out:
+ while (i--)
+ free_netdev(qe_devs[i]);
+ return res;
+}
+
+static int __init qec_match(struct sbus_dev *sdev)
+{
+ struct sbus_dev *sibling;
+ int i;
+
+ if (strcmp(sdev->prom_name, "qec") != 0)
+ return 0;
+
+ /* QEC can be parent of either QuadEthernet or BigMAC
+ * children. Do not confuse this with qfe/SUNW,qfe
+ * which is a quad-happymeal card and handled by
+ * a different driver.
+ */
+ sibling = sdev->child;
+ for (i = 0; i < 4; i++) {
+ if (sibling == NULL)
+ return 0;
+ if (strcmp(sibling->prom_name, "qe") != 0)
+ return 0;
+ sibling = sibling->next;
+ }
+ return 1;
+}
+
+static int __init qec_probe(void)
+{
+ struct net_device *dev = NULL;
+ struct sbus_bus *bus;
+ struct sbus_dev *sdev = NULL;
+ static int called;
+ int cards = 0, v;
+
+ root_qec_dev = NULL;
+
+ if (called)
+ return -ENODEV;
+ called++;
+
+ for_each_sbus(bus) {
+ for_each_sbusdev(sdev, bus) {
+ if (cards)
+ dev = NULL;
+
+ if (qec_match(sdev)) {
+ cards++;
+ if ((v = qec_ether_init(dev, sdev)))
+ return v;
+ }
+ }
+ }
+ if (!cards)
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit qec_cleanup(void)
+{
+ struct sunqec *next_qec;
+ int i;
+
+ while (root_qec_dev) {
+ next_qec = root_qec_dev->next_module;
+
+ /* Release all four QE channels, then the QEC itself. */
+ for (i = 0; i < 4; i++) {
+ unregister_netdev(root_qec_dev->qes[i]->dev);
+ sbus_iounmap(root_qec_dev->qes[i]->qcregs, CREG_REG_SIZE);
+ sbus_iounmap(root_qec_dev->qes[i]->mregs, MREGS_REG_SIZE);
+ sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev,
+ PAGE_SIZE,
+ root_qec_dev->qes[i]->qe_block,
+ root_qec_dev->qes[i]->qblock_dvma);
+ sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev,
+ sizeof(struct sunqe_buffers),
+ root_qec_dev->qes[i]->buffers,
+ root_qec_dev->qes[i]->buffers_dvma);
+ free_netdev(root_qec_dev->qes[i]->dev);
+ }
+ free_irq(root_qec_dev->qec_sdev->irqs[0], (void *)root_qec_dev);
+ sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE);
+ kfree(root_qec_dev);
+ root_qec_dev = next_qec;
+ }
+}
+
+module_init(qec_probe);
+module_exit(qec_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h
new file mode 100644
index 000000000000..af34f36111ed
--- /dev/null
+++ b/drivers/net/sunqe.h
@@ -0,0 +1,351 @@
+/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $
+ * sunqe.h: Definitions for the Sun QuadEthernet driver.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _SUNQE_H
+#define _SUNQE_H
+
+/* QEC global registers. */
+#define GLOB_CTRL 0x00UL /* Control */
+#define GLOB_STAT 0x04UL /* Status */
+#define GLOB_PSIZE 0x08UL /* Packet Size */
+#define GLOB_MSIZE 0x0cUL /* Local-memory Size */
+#define GLOB_RSIZE 0x10UL /* Receive partition size */
+#define GLOB_TSIZE 0x14UL /* Transmit partition size */
+#define GLOB_REG_SIZE 0x18UL
+
+#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */
+#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */
+#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */
+#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */
+#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */
+#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */
+#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */
+#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */
+
+#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */
+#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */
+#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */
+#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */
+
+#define GLOB_PSIZE_2048 0x00 /* 2k packet size */
+#define GLOB_PSIZE_4096 0x01 /* 4k packet size */
+#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
+#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
+
+/* In MACE mode, there are four qe channels. Each channel has it's own
+ * status bits in the QEC status register. This macro picks out the
+ * ones you want.
+ */
+#define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf)
+
+/* The following registers are for per-qe channel information/status. */
+#define CREG_CTRL 0x00UL /* Control */
+#define CREG_STAT 0x04UL /* Status */
+#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */
+#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */
+#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */
+#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */
+#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */
+#define CREG_MMASK 0x1cUL /* MACE Error Interrupt Mask */
+#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */
+#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */
+#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */
+#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */
+#define CREG_CCNT 0x30UL /* Collision Counter */
+#define CREG_PIPG 0x34UL /* Inter-Frame Gap */
+#define CREG_REG_SIZE 0x38UL
+
+#define CREG_CTRL_RXOFF 0x00000004 /* Disable this qe's receiver*/
+#define CREG_CTRL_RESET 0x00000002 /* Reset this qe channel */
+#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */
+
+#define CREG_STAT_EDEFER 0x10000000 /* Excessive Defers */
+#define CREG_STAT_CLOSS 0x08000000 /* Carrier Loss */
+#define CREG_STAT_ERETRIES 0x04000000 /* More than 16 retries */
+#define CREG_STAT_LCOLL 0x02000000 /* Late TX Collision */
+#define CREG_STAT_FUFLOW 0x01000000 /* FIFO Underflow */
+#define CREG_STAT_JERROR 0x00800000 /* Jabber Error */
+#define CREG_STAT_BERROR 0x00400000 /* Babble Error */
+#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */
+#define CREG_STAT_CCOFLOW 0x00100000 /* TX Coll-counter Overflow */
+#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */
+#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */
+#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */
+#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */
+#define CREG_STAT_RCCOFLOW 0x00001000 /* RX Coll-counter Overflow */
+#define CREG_STAT_RUOFLOW 0x00000800 /* Runt Counter Overflow */
+#define CREG_STAT_MCOFLOW 0x00000400 /* Missed Counter Overflow */
+#define CREG_STAT_RXFOFLOW 0x00000200 /* RX FIFO Overflow */
+#define CREG_STAT_RLCOLL 0x00000100 /* RX Late Collision */
+#define CREG_STAT_FCOFLOW 0x00000080 /* Frame Counter Overflow */
+#define CREG_STAT_CECOFLOW 0x00000040 /* CRC Error-counter Overflow*/
+#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */
+#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */
+#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */
+#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */
+#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */
+#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */
+
+#define CREG_STAT_ERRORS (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES| \
+ CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR| \
+ CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR| \
+ CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR| \
+ CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \
+ CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW| \
+ CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL| \
+ CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR)
+
+#define CREG_QMASK_COFLOW 0x00100000 /* CollCntr overflow */
+#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */
+#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */
+#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */
+#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */
+#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */
+#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */
+#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */
+#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */
+#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */
+
+#define CREG_MMASK_EDEFER 0x10000000 /* Excess defer */
+#define CREG_MMASK_CLOSS 0x08000000 /* Carrier loss */
+#define CREG_MMASK_ERETRY 0x04000000 /* Excess retry */
+#define CREG_MMASK_LCOLL 0x02000000 /* Late collision error */
+#define CREG_MMASK_UFLOW 0x01000000 /* Underflow */
+#define CREG_MMASK_JABBER 0x00800000 /* Jabber error */
+#define CREG_MMASK_BABBLE 0x00400000 /* Babble error */
+#define CREG_MMASK_OFLOW 0x00000800 /* Overflow */
+#define CREG_MMASK_RXCOLL 0x00000400 /* RX Coll-Cntr overflow */
+#define CREG_MMASK_RPKT 0x00000200 /* Runt pkt overflow */
+#define CREG_MMASK_MPKT 0x00000100 /* Missed pkt overflow */
+
+#define CREG_PIPG_TENAB 0x00000020 /* Enable Throttle */
+#define CREG_PIPG_MMODE 0x00000010 /* Manual Mode */
+#define CREG_PIPG_WMASK 0x0000000f /* SBUS Wait Mask */
+
+/* Per-channel AMD 79C940 MACE registers. */
+#define MREGS_RXFIFO 0x00UL /* Receive FIFO */
+#define MREGS_TXFIFO 0x01UL /* Transmit FIFO */
+#define MREGS_TXFCNTL 0x02UL /* Transmit Frame Control */
+#define MREGS_TXFSTAT 0x03UL /* Transmit Frame Status */
+#define MREGS_TXRCNT 0x04UL /* Transmit Retry Count */
+#define MREGS_RXFCNTL 0x05UL /* Receive Frame Control */
+#define MREGS_RXFSTAT 0x06UL /* Receive Frame Status */
+#define MREGS_FFCNT 0x07UL /* FIFO Frame Count */
+#define MREGS_IREG 0x08UL /* Interrupt Register */
+#define MREGS_IMASK 0x09UL /* Interrupt Mask */
+#define MREGS_POLL 0x0aUL /* POLL Register */
+#define MREGS_BCONFIG 0x0bUL /* BIU Config */
+#define MREGS_FCONFIG 0x0cUL /* FIFO Config */
+#define MREGS_MCONFIG 0x0dUL /* MAC Config */
+#define MREGS_PLSCONFIG 0x0eUL /* PLS Config */
+#define MREGS_PHYCONFIG 0x0fUL /* PHY Config */
+#define MREGS_CHIPID1 0x10UL /* Chip-ID, low bits */
+#define MREGS_CHIPID2 0x11UL /* Chip-ID, high bits */
+#define MREGS_IACONFIG 0x12UL /* Internal Address Config */
+ /* 0x13UL, reserved */
+#define MREGS_FILTER 0x14UL /* Logical Address Filter */
+#define MREGS_ETHADDR 0x15UL /* Our Ethernet Address */
+ /* 0x16UL, reserved */
+ /* 0x17UL, reserved */
+#define MREGS_MPCNT 0x18UL /* Missed Packet Count */
+ /* 0x19UL, reserved */
+#define MREGS_RPCNT 0x1aUL /* Runt Packet Count */
+#define MREGS_RCCNT 0x1bUL /* RX Collision Count */
+ /* 0x1cUL, reserved */
+#define MREGS_UTEST 0x1dUL /* User Test */
+#define MREGS_RTEST1 0x1eUL /* Reserved Test 1 */
+#define MREGS_RTEST2 0x1fUL /* Reserved Test 2 */
+#define MREGS_REG_SIZE 0x20UL
+
+#define MREGS_TXFCNTL_DRETRY 0x80 /* Retry disable */
+#define MREGS_TXFCNTL_DFCS 0x08 /* Disable TX FCS */
+#define MREGS_TXFCNTL_AUTOPAD 0x01 /* TX auto pad */
+
+#define MREGS_TXFSTAT_VALID 0x80 /* TX valid */
+#define MREGS_TXFSTAT_UNDERFLOW 0x40 /* TX underflow */
+#define MREGS_TXFSTAT_LCOLL 0x20 /* TX late collision */
+#define MREGS_TXFSTAT_MRETRY 0x10 /* TX > 1 retries */
+#define MREGS_TXFSTAT_ORETRY 0x08 /* TX 1 retry */
+#define MREGS_TXFSTAT_PDEFER 0x04 /* TX pkt deferred */
+#define MREGS_TXFSTAT_CLOSS 0x02 /* TX carrier lost */
+#define MREGS_TXFSTAT_RERROR 0x01 /* TX retry error */
+
+#define MREGS_TXRCNT_EDEFER 0x80 /* TX Excess defers */
+#define MREGS_TXRCNT_CMASK 0x0f /* TX retry count */
+
+#define MREGS_RXFCNTL_LOWLAT 0x08 /* RX low latency */
+#define MREGS_RXFCNTL_AREJECT 0x04 /* RX addr match rej */
+#define MREGS_RXFCNTL_AUTOSTRIP 0x01 /* RX auto strip */
+
+#define MREGS_RXFSTAT_OVERFLOW 0x80 /* RX overflow */
+#define MREGS_RXFSTAT_LCOLL 0x40 /* RX late collision */
+#define MREGS_RXFSTAT_FERROR 0x20 /* RX framing error */
+#define MREGS_RXFSTAT_FCSERROR 0x10 /* RX FCS error */
+#define MREGS_RXFSTAT_RBCNT 0x0f /* RX msg byte count */
+
+#define MREGS_FFCNT_RX 0xf0 /* RX FIFO frame cnt */
+#define MREGS_FFCNT_TX 0x0f /* TX FIFO frame cnt */
+
+#define MREGS_IREG_JABBER 0x80 /* IRQ Jabber error */
+#define MREGS_IREG_BABBLE 0x40 /* IRQ Babble error */
+#define MREGS_IREG_COLL 0x20 /* IRQ Collision error */
+#define MREGS_IREG_RCCO 0x10 /* IRQ Collision cnt overflow */
+#define MREGS_IREG_RPKTCO 0x08 /* IRQ Runt packet count overflow */
+#define MREGS_IREG_MPKTCO 0x04 /* IRQ missed packet cnt overflow */
+#define MREGS_IREG_RXIRQ 0x02 /* IRQ RX'd a packet */
+#define MREGS_IREG_TXIRQ 0x01 /* IRQ TX'd a packet */
+
+#define MREGS_IMASK_BABBLE 0x40 /* IMASK Babble errors */
+#define MREGS_IMASK_COLL 0x20 /* IMASK Collision errors */
+#define MREGS_IMASK_MPKTCO 0x04 /* IMASK Missed pkt cnt overflow */
+#define MREGS_IMASK_RXIRQ 0x02 /* IMASK RX interrupts */
+#define MREGS_IMASK_TXIRQ 0x01 /* IMASK TX interrupts */
+
+#define MREGS_POLL_TXVALID 0x80 /* TX is valid */
+#define MREGS_POLL_TDTR 0x40 /* TX data transfer request */
+#define MREGS_POLL_RDTR 0x20 /* RX data transfer request */
+
+#define MREGS_BCONFIG_BSWAP 0x40 /* Byte Swap */
+#define MREGS_BCONFIG_4TS 0x00 /* 4byte transmit start point */
+#define MREGS_BCONFIG_16TS 0x10 /* 16byte transmit start point */
+#define MREGS_BCONFIG_64TS 0x20 /* 64byte transmit start point */
+#define MREGS_BCONFIG_112TS 0x30 /* 112byte transmit start point */
+#define MREGS_BCONFIG_RESET 0x01 /* SW-Reset the MACE */
+
+#define MREGS_FCONFIG_TXF8 0x00 /* TX fifo 8 write cycles */
+#define MREGS_FCONFIG_TXF32 0x80 /* TX fifo 32 write cycles */
+#define MREGS_FCONFIG_TXF16 0x40 /* TX fifo 16 write cycles */
+#define MREGS_FCONFIG_RXF64 0x20 /* RX fifo 64 write cycles */
+#define MREGS_FCONFIG_RXF32 0x10 /* RX fifo 32 write cycles */
+#define MREGS_FCONFIG_RXF16 0x00 /* RX fifo 16 write cycles */
+#define MREGS_FCONFIG_TFWU 0x08 /* TX fifo watermark update */
+#define MREGS_FCONFIG_RFWU 0x04 /* RX fifo watermark update */
+#define MREGS_FCONFIG_TBENAB 0x02 /* TX burst enable */
+#define MREGS_FCONFIG_RBENAB 0x01 /* RX burst enable */
+
+#define MREGS_MCONFIG_PROMISC 0x80 /* Promiscuous mode enable */
+#define MREGS_MCONFIG_TPDDISAB 0x40 /* TX 2part deferral enable */
+#define MREGS_MCONFIG_MBAENAB 0x20 /* Modified backoff enable */
+#define MREGS_MCONFIG_RPADISAB 0x08 /* RX physical addr disable */
+#define MREGS_MCONFIG_RBDISAB 0x04 /* RX broadcast disable */
+#define MREGS_MCONFIG_TXENAB 0x02 /* Enable transmitter */
+#define MREGS_MCONFIG_RXENAB 0x01 /* Enable receiver */
+
+#define MREGS_PLSCONFIG_TXMS 0x08 /* TX mode select */
+#define MREGS_PLSCONFIG_GPSI 0x06 /* Use GPSI connector */
+#define MREGS_PLSCONFIG_DAI 0x04 /* Use DAI connector */
+#define MREGS_PLSCONFIG_TP 0x02 /* Use TwistedPair connector */
+#define MREGS_PLSCONFIG_AUI 0x00 /* Use AUI connector */
+#define MREGS_PLSCONFIG_IOENAB 0x01 /* PLS I/O enable */
+
+#define MREGS_PHYCONFIG_LSTAT 0x80 /* Link status */
+#define MREGS_PHYCONFIG_LTESTDIS 0x40 /* Disable link test logic */
+#define MREGS_PHYCONFIG_RXPOLARITY 0x20 /* RX polarity */
+#define MREGS_PHYCONFIG_APCDISAB 0x10 /* AutoPolarityCorrect disab */
+#define MREGS_PHYCONFIG_LTENAB 0x08 /* Select low threshold */
+#define MREGS_PHYCONFIG_AUTO 0x04 /* Connector port auto-sel */
+#define MREGS_PHYCONFIG_RWU 0x02 /* Remote WakeUp */
+#define MREGS_PHYCONFIG_AW 0x01 /* Auto Wakeup */
+
+#define MREGS_IACONFIG_ACHNGE 0x80 /* Do address change */
+#define MREGS_IACONFIG_PARESET 0x04 /* Physical address reset */
+#define MREGS_IACONFIG_LARESET 0x02 /* Logical address reset */
+
+#define MREGS_UTEST_RTRENAB 0x80 /* Enable resv test register */
+#define MREGS_UTEST_RTRDISAB 0x40 /* Disab resv test register */
+#define MREGS_UTEST_RPACCEPT 0x20 /* Accept runt packets */
+#define MREGS_UTEST_FCOLL 0x10 /* Force collision status */
+#define MREGS_UTEST_FCSENAB 0x08 /* Enable FCS on RX */
+#define MREGS_UTEST_INTLOOPM 0x06 /* Intern lpback w/MENDEC */
+#define MREGS_UTEST_INTLOOP 0x04 /* Intern lpback */
+#define MREGS_UTEST_EXTLOOP 0x02 /* Extern lpback */
+#define MREGS_UTEST_NOLOOP 0x00 /* No loopback */
+
+struct qe_rxd {
+ u32 rx_flags;
+ u32 rx_addr;
+};
+
+#define RXD_OWN 0x80000000 /* Ownership. */
+#define RXD_UPDATE 0x10000000 /* Being Updated? */
+#define RXD_LENGTH 0x000007ff /* Packet Length. */
+
+struct qe_txd {
+ u32 tx_flags;
+ u32 tx_addr;
+};
+
+#define TXD_OWN 0x80000000 /* Ownership. */
+#define TXD_SOP 0x40000000 /* Start Of Packet */
+#define TXD_EOP 0x20000000 /* End Of Packet */
+#define TXD_UPDATE 0x10000000 /* Being Updated? */
+#define TXD_LENGTH 0x000007ff /* Packet Length. */
+
+#define TX_RING_MAXSIZE 256
+#define RX_RING_MAXSIZE 256
+
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+
+#define NEXT_RX(num) (((num) + 1) & (RX_RING_MAXSIZE - 1))
+#define NEXT_TX(num) (((num) + 1) & (TX_RING_MAXSIZE - 1))
+#define PREV_RX(num) (((num) - 1) & (RX_RING_MAXSIZE - 1))
+#define PREV_TX(num) (((num) - 1) & (TX_RING_MAXSIZE - 1))
+
+#define TX_BUFFS_AVAIL(qp) \
+ (((qp)->tx_old <= (qp)->tx_new) ? \
+ (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
+ (qp)->tx_old - (qp)->tx_new - 1)
+
+struct qe_init_block {
+ struct qe_rxd qe_rxd[RX_RING_MAXSIZE];
+ struct qe_txd qe_txd[TX_RING_MAXSIZE];
+};
+
+#define qib_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem]))))
+
+struct sunqe;
+
+struct sunqec {
+ void __iomem *gregs; /* QEC Global Registers */
+ struct sunqe *qes[4]; /* Each child MACE */
+ unsigned int qec_bursts; /* Support burst sizes */
+ struct sbus_dev *qec_sdev; /* QEC's SBUS device */
+ struct sunqec *next_module; /* List of all QECs in system */
+};
+
+#define PKT_BUF_SZ 1664
+#define RXD_PKT_SZ 1664
+
+struct sunqe_buffers {
+ u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
+ u8 __pad[2];
+ u8 rx_buf[RX_RING_SIZE][PKT_BUF_SZ];
+};
+
+#define qebuf_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0]))))
+
+struct sunqe {
+ void __iomem *qcregs; /* QEC per-channel Registers */
+ void __iomem *mregs; /* Per-channel MACE Registers */
+ struct qe_init_block *qe_block; /* RX and TX descriptors */
+ __u32 qblock_dvma; /* RX and TX descriptors */
+ spinlock_t lock; /* Protects txfull state */
+ int rx_new, rx_old; /* RX ring extents */
+ int tx_new, tx_old; /* TX ring extents */
+ struct sunqe_buffers *buffers; /* CPU visible address. */
+ __u32 buffers_dvma; /* DVMA visible address. */
+ struct sunqec *parent;
+ u8 mconfig; /* Base MACE mconfig value */
+ struct net_device_stats net_stats; /* Statistical counters */
+ struct sbus_dev *qe_sdev; /* QE's SBUS device struct */
+ struct net_device *dev; /* QE's netdevice struct */
+ int channel; /* Who am I? */
+};
+
+#endif /* !(_SUNQE_H) */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
new file mode 100644
index 000000000000..c2ec9fd8c31d
--- /dev/null
+++ b/drivers/net/tc35815.c
@@ -0,0 +1,1745 @@
+/* tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ *
+ * Based on skelton.c by Donald Becker.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+static const char *version =
+ "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+/*
+ * The name of the card. Is used for messages and in the requests for
+ * io regions, irqs and dma channels
+ */
+static const char* cardname = "TC35815CF";
+#define TC35815_PROC_ENTRY "net/tc35815"
+
+#define TC35815_MODULE_NAME "TC35815CF"
+#define TX_TIMEOUT (4*HZ)
+
+/* First, a few definitions that the brave might change. */
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef TC35815_DEBUG
+#define TC35815_DEBUG 1
+#endif
+static unsigned int tc35815_debug = TC35815_DEBUG;
+
+#define GATHER_TXINT /* On-Demand Tx Interrupt */
+
+#define vtonocache(p) KSEG1ADDR(virt_to_phys(p))
+
+/*
+ * Registers
+ */
+struct tc35815_regs {
+ volatile __u32 DMA_Ctl; /* 0x00 */
+ volatile __u32 TxFrmPtr;
+ volatile __u32 TxThrsh;
+ volatile __u32 TxPollCtr;
+ volatile __u32 BLFrmPtr;
+ volatile __u32 RxFragSize;
+ volatile __u32 Int_En;
+ volatile __u32 FDA_Bas;
+ volatile __u32 FDA_Lim; /* 0x20 */
+ volatile __u32 Int_Src;
+ volatile __u32 unused0[2];
+ volatile __u32 PauseCnt;
+ volatile __u32 RemPauCnt;
+ volatile __u32 TxCtlFrmStat;
+ volatile __u32 unused1;
+ volatile __u32 MAC_Ctl; /* 0x40 */
+ volatile __u32 CAM_Ctl;
+ volatile __u32 Tx_Ctl;
+ volatile __u32 Tx_Stat;
+ volatile __u32 Rx_Ctl;
+ volatile __u32 Rx_Stat;
+ volatile __u32 MD_Data;
+ volatile __u32 MD_CA;
+ volatile __u32 CAM_Adr; /* 0x60 */
+ volatile __u32 CAM_Data;
+ volatile __u32 CAM_Ena;
+ volatile __u32 PROM_Ctl;
+ volatile __u32 PROM_Data;
+ volatile __u32 Algn_Cnt;
+ volatile __u32 CRC_Cnt;
+ volatile __u32 Miss_Cnt;
+};
+
+/*
+ * Bit assignments
+ */
+/* DMA_Ctl bit asign ------------------------------------------------------- */
+#define DMA_IntMask 0x00040000 /* 1:Interupt mask */
+#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */
+#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */
+#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */
+#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */
+#define DMA_TestMode 0x00002000 /* 1:Test Mode */
+#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */
+#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */
+
+/* RxFragSize bit asign ---------------------------------------------------- */
+#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */
+#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */
+
+/* MAC_Ctl bit asign ------------------------------------------------------- */
+#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */
+#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */
+#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */
+#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */
+#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */
+#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/
+#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */
+#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */
+#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */
+#define MAC_Reset 0x00000004 /* 1:Software Reset */
+#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */
+#define MAC_HaltReq 0x00000001 /* 1:Halt request */
+
+/* PROM_Ctl bit asign ------------------------------------------------------ */
+#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */
+#define PROM_Read 0x00004000 /*10:Read operation */
+#define PROM_Write 0x00002000 /*01:Write operation */
+#define PROM_Erase 0x00006000 /*11:Erase operation */
+ /*00:Enable or Disable Writting, */
+ /* as specified in PROM_Addr. */
+#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */
+ /*00xxxx: disable */
+
+/* CAM_Ctl bit asign ------------------------------------------------------- */
+#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */
+#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/
+ /* accept other */
+#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */
+#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */
+#define CAM_StationAcc 0x00000001 /* 1:unicast accept */
+
+/* CAM_Ena bit asign ------------------------------------------------------- */
+#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
+#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
+#define CAM_Ena_Bit(index) (1<<(index))
+#define CAM_ENTRY_DESTINATION 0
+#define CAM_ENTRY_SOURCE 1
+#define CAM_ENTRY_MACCTL 20
+
+/* Tx_Ctl bit asign -------------------------------------------------------- */
+#define Tx_En 0x00000001 /* 1:Transmit enable */
+#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */
+#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
+#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */
+#define Tx_FBack 0x00000010 /* 1:Fast Back-off */
+#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */
+#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */
+#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */
+#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */
+#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */
+#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */
+#define Tx_EnComp 0x00004000 /* 1:Enable Completion */
+
+/* Tx_Stat bit asign ------------------------------------------------------- */
+#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */
+#define Tx_ExColl 0x00000010 /* Excessive Collision */
+#define Tx_TXDefer 0x00000020 /* Transmit Defered */
+#define Tx_Paused 0x00000040 /* Transmit Paused */
+#define Tx_IntTx 0x00000080 /* Interrupt on Tx */
+#define Tx_Under 0x00000100 /* Underrun */
+#define Tx_Defer 0x00000200 /* Deferral */
+#define Tx_NCarr 0x00000400 /* No Carrier */
+#define Tx_10Stat 0x00000800 /* 10Mbps Status */
+#define Tx_LateColl 0x00001000 /* Late Collision */
+#define Tx_TxPar 0x00002000 /* Tx Parity Error */
+#define Tx_Comp 0x00004000 /* Completion */
+#define Tx_Halted 0x00008000 /* Tx Halted */
+#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */
+
+/* Rx_Ctl bit asign -------------------------------------------------------- */
+#define Rx_EnGood 0x00004000 /* 1:Enable Good */
+#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */
+#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */
+#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */
+#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */
+#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */
+#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */
+#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */
+#define Rx_ShortEn 0x00000008 /* 1:Short Enable */
+#define Rx_LongEn 0x00000004 /* 1:Long Enable */
+#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */
+#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */
+
+/* Rx_Stat bit asign ------------------------------------------------------- */
+#define Rx_Halted 0x00008000 /* Rx Halted */
+#define Rx_Good 0x00004000 /* Rx Good */
+#define Rx_RxPar 0x00002000 /* Rx Parity Error */
+ /* 0x00001000 not use */
+#define Rx_LongErr 0x00000800 /* Rx Long Error */
+#define Rx_Over 0x00000400 /* Rx Overflow */
+#define Rx_CRCErr 0x00000200 /* Rx CRC Error */
+#define Rx_Align 0x00000100 /* Rx Alignment Error */
+#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */
+#define Rx_IntRx 0x00000040 /* Rx Interrupt */
+#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */
+
+#define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */
+
+/* Int_En bit asign -------------------------------------------------------- */
+#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
+#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Control Complete Enable */
+#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
+#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
+#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
+#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */
+#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */
+#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */
+#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */
+#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */
+#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */
+#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */
+ /* Exhausted Enable */
+
+/* Int_Src bit asign ------------------------------------------------------- */
+#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */
+#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */
+#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */
+#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */
+#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */
+#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */
+#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */
+#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */
+#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */
+#define Int_SWInt 0x00000020 /* 1:Software request & Clear */
+#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */
+#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */
+#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */
+#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */
+#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */
+
+/* MD_CA bit asign --------------------------------------------------------- */
+#define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */
+#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */
+#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */
+
+
+/* MII register offsets */
+#define MII_CONTROL 0x0000
+#define MII_STATUS 0x0001
+#define MII_PHY_ID0 0x0002
+#define MII_PHY_ID1 0x0003
+#define MII_ANAR 0x0004
+#define MII_ANLPAR 0x0005
+#define MII_ANER 0x0006
+/* MII Control register bit definitions. */
+#define MIICNTL_FDX 0x0100
+#define MIICNTL_RST_AUTO 0x0200
+#define MIICNTL_ISOLATE 0x0400
+#define MIICNTL_PWRDWN 0x0800
+#define MIICNTL_AUTO 0x1000
+#define MIICNTL_SPEED 0x2000
+#define MIICNTL_LPBK 0x4000
+#define MIICNTL_RESET 0x8000
+/* MII Status register bit significance. */
+#define MIISTAT_EXT 0x0001
+#define MIISTAT_JAB 0x0002
+#define MIISTAT_LINK 0x0004
+#define MIISTAT_CAN_AUTO 0x0008
+#define MIISTAT_FAULT 0x0010
+#define MIISTAT_AUTO_DONE 0x0020
+#define MIISTAT_CAN_T 0x0800
+#define MIISTAT_CAN_T_FDX 0x1000
+#define MIISTAT_CAN_TX 0x2000
+#define MIISTAT_CAN_TX_FDX 0x4000
+#define MIISTAT_CAN_T4 0x8000
+/* MII Auto-Negotiation Expansion/RemoteEnd Register Bits */
+#define MII_AN_TX_FDX 0x0100
+#define MII_AN_TX_HDX 0x0080
+#define MII_AN_10_FDX 0x0040
+#define MII_AN_10_HDX 0x0020
+
+
+/*
+ * Descriptors
+ */
+
+/* Frame descripter */
+struct FDesc {
+ volatile __u32 FDNext;
+ volatile __u32 FDSystem;
+ volatile __u32 FDStat;
+ volatile __u32 FDCtl;
+};
+
+/* Buffer descripter */
+struct BDesc {
+ volatile __u32 BuffData;
+ volatile __u32 BDCtl;
+};
+
+#define FD_ALIGN 16
+
+/* Frame Descripter bit asign ---------------------------------------------- */
+#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
+#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
+#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
+#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */
+#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */
+#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */
+#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */
+#define FD_FrmOpt_Packing 0x04000000 /* Rx only */
+#define FD_CownsFD 0x80000000 /* FD Controller owner bit */
+#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
+#define FD_BDCnt_SHIFT 16
+
+/* Buffer Descripter bit asign --------------------------------------------- */
+#define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */
+#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
+#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
+#define BD_CownsBD 0x80000000 /* BD Controller owner bit */
+#define BD_RxBDID_SHIFT 16
+#define BD_RxBDSeqN_SHIFT 24
+
+
+/* Some useful constants. */
+#undef NO_CHECK_CARRIER /* Does not check No-Carrier with TP */
+
+#ifdef NO_CHECK_CARRIER
+#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
+ Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
+ Tx_En) /* maybe 0x7d01 */
+#else
+#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
+ Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
+ Tx_En) /* maybe 0x7f01 */
+#endif
+#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
+ | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
+
+#define INT_EN_CMD (Int_NRAbtEn | \
+ Int_DParDEn | Int_DParErrEn | \
+ Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \
+ Int_STargAbtEn | \
+ Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/
+
+/* Tuning parameters */
+#define DMA_BURST_SIZE 32
+#define TX_THRESHOLD 1024
+
+#define FD_PAGE_NUM 2
+#define FD_PAGE_ORDER 1
+/* 16 + RX_BUF_PAGES * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*2 */
+#define RX_BUF_PAGES 8 /* >= 2 */
+#define RX_FD_NUM 250 /* >= 32 */
+#define TX_FD_NUM 128
+
+struct TxFD {
+ struct FDesc fd;
+ struct BDesc bd;
+ struct BDesc unused;
+};
+
+struct RxFD {
+ struct FDesc fd;
+ struct BDesc bd[0]; /* variable length */
+};
+
+struct FrFD {
+ struct FDesc fd;
+ struct BDesc bd[RX_BUF_PAGES];
+};
+
+
+extern unsigned long tc_readl(volatile __u32 *addr);
+extern void tc_writel(unsigned long data, volatile __u32 *addr);
+
+dma_addr_t priv_dma_handle;
+
+/* Information that need to be kept for each board. */
+struct tc35815_local {
+ struct net_device *next_module;
+
+ /* statistics */
+ struct net_device_stats stats;
+ struct {
+ int max_tx_qlen;
+ int tx_ints;
+ int rx_ints;
+ } lstats;
+
+ int tbusy;
+ int option;
+#define TC35815_OPT_AUTO 0x00
+#define TC35815_OPT_10M 0x01
+#define TC35815_OPT_100M 0x02
+#define TC35815_OPT_FULLDUP 0x04
+ int linkspeed; /* 10 or 100 */
+ int fullduplex;
+
+ /*
+ * Transmitting: Batch Mode.
+ * 1 BD in 1 TxFD.
+ * Receiving: Packing Mode.
+ * 1 circular FD for Free Buffer List.
+ * RX_BUG_PAGES BD in Free Buffer FD.
+ * One Free Buffer BD has PAGE_SIZE data buffer.
+ */
+ struct pci_dev *pdev;
+ dma_addr_t fd_buf_dma_handle;
+ void * fd_buf; /* for TxFD, TxFD, FrFD */
+ struct TxFD *tfd_base;
+ int tfd_start;
+ int tfd_end;
+ struct RxFD *rfd_base;
+ struct RxFD *rfd_limit;
+ struct RxFD *rfd_cur;
+ struct FrFD *fbl_ptr;
+ unsigned char fbl_curid;
+ dma_addr_t data_buf_dma_handle[RX_BUF_PAGES];
+ void * data_buf[RX_BUF_PAGES]; /* packing */
+ spinlock_t lock;
+};
+
+/* Index to functions, as function prototypes. */
+
+static int __devinit tc35815_probe1(struct pci_dev *pdev, unsigned int base_addr, unsigned int irq);
+
+static int tc35815_open(struct net_device *dev);
+static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
+static void tc35815_tx_timeout(struct net_device *dev);
+static irqreturn_t tc35815_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void tc35815_rx(struct net_device *dev);
+static void tc35815_txdone(struct net_device *dev);
+static int tc35815_close(struct net_device *dev);
+static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
+static void tc35815_set_multicast_list(struct net_device *dev);
+
+static void tc35815_chip_reset(struct net_device *dev);
+static void tc35815_chip_init(struct net_device *dev);
+static void tc35815_phy_chip_init(struct net_device *dev);
+
+/* A list of all installed tc35815 devices. */
+static struct net_device *root_tc35815_dev = NULL;
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static struct pci_device_id tc35815_pci_tbl[] = {
+ { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE (pci, tc35815_pci_tbl);
+
+int
+tc35815_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err = 0;
+ int ret;
+ unsigned long pci_memaddr;
+ unsigned int pci_irq_line;
+
+ printk(KERN_INFO "tc35815_probe: found device %#08x.%#08x\n", ent->vendor, ent->device);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_memaddr = pci_resource_start (pdev, 1);
+
+ printk(KERN_INFO " pci_memaddr=%#08lx resource_flags=%#08lx\n", pci_memaddr, pci_resource_flags (pdev, 0));
+
+ if (!pci_memaddr) {
+ printk(KERN_WARNING "no PCI MEM resources, aborting\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+ pci_irq_line = pdev->irq;
+ /* irq disabled. */
+ if (pci_irq_line == 0) {
+ printk(KERN_WARNING "no PCI irq, aborting\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ ret = tc35815_probe1(pdev, pci_memaddr, pci_irq_line);
+ if (ret)
+ goto err_out;
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_out:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static int __devinit tc35815_probe1(struct pci_dev *pdev, unsigned int base_addr, unsigned int irq)
+{
+ static unsigned version_printed = 0;
+ int i, ret;
+ struct tc35815_local *lp;
+ struct tc35815_regs *tr;
+ struct net_device *dev;
+
+ /* Allocate a new 'dev' if needed. */
+ dev = alloc_etherdev(sizeof(struct tc35815_local));
+ if (dev == NULL)
+ return -ENOMEM;
+
+ /*
+ * alloc_etherdev allocs and zeros dev->priv
+ */
+ lp = dev->priv;
+
+ if (tc35815_debug && version_printed++ == 0)
+ printk(KERN_DEBUG "%s", version);
+
+ /* Fill in the 'dev' fields. */
+ dev->irq = irq;
+ dev->base_addr = (unsigned long)ioremap(base_addr,
+ sizeof(struct tc35815_regs));
+ if (!dev->base_addr) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ tr = (struct tc35815_regs*)dev->base_addr;
+
+ tc35815_chip_reset(dev);
+
+ /* Retrieve and print the ethernet address. */
+ while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
+ ;
+ for (i = 0; i < 6; i += 2) {
+ unsigned short data;
+ tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
+ while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
+ ;
+ data = tc_readl(&tr->PROM_Data);
+ dev->dev_addr[i] = data & 0xff;
+ dev->dev_addr[i+1] = data >> 8;
+ }
+
+ /* Initialize the device structure. */
+ lp->pdev = pdev;
+ lp->next_module = root_tc35815_dev;
+ root_tc35815_dev = dev;
+
+ spin_lock_init(&lp->lock);
+
+ if (dev->mem_start > 0) {
+ lp->option = dev->mem_start;
+ if ((lp->option & TC35815_OPT_10M) &&
+ (lp->option & TC35815_OPT_100M)) {
+ /* if both speed speficied, auto select. */
+ lp->option &= ~(TC35815_OPT_10M | TC35815_OPT_100M);
+ }
+ }
+ //XXX fixme
+ lp->option |= TC35815_OPT_10M;
+
+ /* do auto negotiation */
+ tc35815_phy_chip_init(dev);
+
+ dev->open = tc35815_open;
+ dev->stop = tc35815_close;
+ dev->tx_timeout = tc35815_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->hard_start_xmit = tc35815_send_packet;
+ dev->get_stats = tc35815_get_stats;
+ dev->set_multicast_list = tc35815_set_multicast_list;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto err_out_iounmap;
+
+ printk(KERN_INFO "%s: %s found at %#x, irq %d, MAC",
+ dev->name, cardname, base_addr, irq);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+ printk("\n");
+ printk(KERN_INFO "%s: linkspeed %dMbps, %s Duplex\n",
+ dev->name, lp->linkspeed, lp->fullduplex ? "Full" : "Half");
+
+ return 0;
+
+err_out_iounmap:
+ iounmap((void *) dev->base_addr);
+err_out:
+ free_netdev(dev);
+ return ret;
+}
+
+
+static int
+tc35815_init_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ int i;
+ unsigned long fd_addr;
+
+ if (!lp->fd_buf) {
+ if (sizeof(struct FDesc) +
+ sizeof(struct BDesc) * RX_BUF_PAGES +
+ sizeof(struct FDesc) * RX_FD_NUM +
+ sizeof(struct TxFD) * TX_FD_NUM > PAGE_SIZE * FD_PAGE_NUM) {
+ printk(KERN_WARNING "%s: Invalid Queue Size.\n", dev->name);
+ return -ENOMEM;
+ }
+
+ if ((lp->fd_buf = (void *)__get_free_pages(GFP_KERNEL, FD_PAGE_ORDER)) == 0)
+ return -ENOMEM;
+ for (i = 0; i < RX_BUF_PAGES; i++) {
+ if ((lp->data_buf[i] = (void *)get_zeroed_page(GFP_KERNEL)) == 0) {
+ while (--i >= 0) {
+ free_page((unsigned long)lp->data_buf[i]);
+ lp->data_buf[i] = 0;
+ }
+ free_page((unsigned long)lp->fd_buf);
+ lp->fd_buf = 0;
+ return -ENOMEM;
+ }
+#ifdef __mips__
+ dma_cache_wback_inv((unsigned long)lp->data_buf[i], PAGE_SIZE * FD_PAGE_NUM);
+#endif
+ }
+#ifdef __mips__
+ dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
+#endif
+ } else {
+ clear_page(lp->fd_buf);
+#ifdef __mips__
+ dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
+#endif
+ }
+#ifdef __mips__
+ fd_addr = (unsigned long)vtonocache(lp->fd_buf);
+#else
+ fd_addr = (unsigned long)lp->fd_buf;
+#endif
+
+ /* Free Descriptors (for Receive) */
+ lp->rfd_base = (struct RxFD *)fd_addr;
+ fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
+ for (i = 0; i < RX_FD_NUM; i++) {
+ lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
+ }
+ lp->rfd_cur = lp->rfd_base;
+ lp->rfd_limit = (struct RxFD *)(fd_addr -
+ sizeof(struct FDesc) -
+ sizeof(struct BDesc) * 30);
+
+ /* Transmit Descriptors */
+ lp->tfd_base = (struct TxFD *)fd_addr;
+ fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
+ for (i = 0; i < TX_FD_NUM; i++) {
+ lp->tfd_base[i].fd.FDNext = cpu_to_le32(virt_to_bus(&lp->tfd_base[i+1]));
+ lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0);
+ lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
+ }
+ lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(virt_to_bus(&lp->tfd_base[0]));
+ lp->tfd_start = 0;
+ lp->tfd_end = 0;
+
+ /* Buffer List (for Receive) */
+ lp->fbl_ptr = (struct FrFD *)fd_addr;
+ lp->fbl_ptr->fd.FDNext = cpu_to_le32(virt_to_bus(lp->fbl_ptr));
+ lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_PAGES | FD_CownsFD);
+ for (i = 0; i < RX_BUF_PAGES; i++) {
+ lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(virt_to_bus(lp->data_buf[i]));
+ /* BDID is index of FrFD.bd[] */
+ lp->fbl_ptr->bd[i].BDCtl =
+ cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | PAGE_SIZE);
+ }
+ lp->fbl_curid = 0;
+
+ return 0;
+}
+
+static void
+tc35815_clear_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ int i;
+
+ for (i = 0; i < TX_FD_NUM; i++) {
+ struct sk_buff *skb = (struct sk_buff *)
+ le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
+ if (skb)
+ dev_kfree_skb_any(skb);
+ lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0);
+ }
+
+ tc35815_init_queues(dev);
+}
+
+static void
+tc35815_free_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ int i;
+
+ if (lp->tfd_base) {
+ for (i = 0; i < TX_FD_NUM; i++) {
+ struct sk_buff *skb = (struct sk_buff *)
+ le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
+ if (skb)
+ dev_kfree_skb_any(skb);
+ lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0);
+ }
+ }
+
+ lp->rfd_base = NULL;
+ lp->rfd_base = NULL;
+ lp->rfd_limit = NULL;
+ lp->rfd_cur = NULL;
+ lp->fbl_ptr = NULL;
+
+ for (i = 0; i < RX_BUF_PAGES; i++) {
+ if (lp->data_buf[i])
+ free_page((unsigned long)lp->data_buf[i]);
+ lp->data_buf[i] = 0;
+ }
+ if (lp->fd_buf)
+ __free_pages(lp->fd_buf, FD_PAGE_ORDER);
+ lp->fd_buf = NULL;
+}
+
+static void
+dump_txfd(struct TxFD *fd)
+{
+ printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
+ le32_to_cpu(fd->fd.FDNext),
+ le32_to_cpu(fd->fd.FDSystem),
+ le32_to_cpu(fd->fd.FDStat),
+ le32_to_cpu(fd->fd.FDCtl));
+ printk("BD: ");
+ printk(" %08x %08x",
+ le32_to_cpu(fd->bd.BuffData),
+ le32_to_cpu(fd->bd.BDCtl));
+ printk("\n");
+}
+
+static int
+dump_rxfd(struct RxFD *fd)
+{
+ int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
+ if (bd_count > 8)
+ bd_count = 8;
+ printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
+ le32_to_cpu(fd->fd.FDNext),
+ le32_to_cpu(fd->fd.FDSystem),
+ le32_to_cpu(fd->fd.FDStat),
+ le32_to_cpu(fd->fd.FDCtl));
+ if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
+ return 0;
+ printk("BD: ");
+ for (i = 0; i < bd_count; i++)
+ printk(" %08x %08x",
+ le32_to_cpu(fd->bd[i].BuffData),
+ le32_to_cpu(fd->bd[i].BDCtl));
+ printk("\n");
+ return bd_count;
+}
+
+static void
+dump_frfd(struct FrFD *fd)
+{
+ int i;
+ printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
+ le32_to_cpu(fd->fd.FDNext),
+ le32_to_cpu(fd->fd.FDSystem),
+ le32_to_cpu(fd->fd.FDStat),
+ le32_to_cpu(fd->fd.FDCtl));
+ printk("BD: ");
+ for (i = 0; i < RX_BUF_PAGES; i++)
+ printk(" %08x %08x",
+ le32_to_cpu(fd->bd[i].BuffData),
+ le32_to_cpu(fd->bd[i].BDCtl));
+ printk("\n");
+}
+
+static void
+panic_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ int i;
+
+ printk("TxFD base %p, start %d, end %d\n",
+ lp->tfd_base, lp->tfd_start, lp->tfd_end);
+ printk("RxFD base %p limit %p cur %p\n",
+ lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
+ printk("FrFD %p\n", lp->fbl_ptr);
+ for (i = 0; i < TX_FD_NUM; i++)
+ dump_txfd(&lp->tfd_base[i]);
+ for (i = 0; i < RX_FD_NUM; i++) {
+ int bd_count = dump_rxfd(&lp->rfd_base[i]);
+ i += (bd_count + 1) / 2; /* skip BDs */
+ }
+ dump_frfd(lp->fbl_ptr);
+ panic("%s: Illegal queue state.", dev->name);
+}
+
+#if 0
+static void print_buf(char *add, int length)
+{
+ int i;
+ int len = length;
+
+ printk("print_buf(%08x)(%x)\n", (unsigned int) add,length);
+
+ if (len > 100)
+ len = 100;
+ for (i = 0; i < len; i++) {
+ printk(" %2.2X", (unsigned char) add[i]);
+ if (!(i % 16))
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+
+static void print_eth(char *add)
+{
+ int i;
+
+ printk("print_eth(%08x)\n", (unsigned int) add);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char) add[i + 6]);
+ printk(" =>");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char) add[i]);
+ printk(" : %2.2X%2.2X\n", (unsigned char) add[12], (unsigned char) add[13]);
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int
+tc35815_open(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ /*
+ * This is used if the interrupt line can turned off (shared).
+ * See 3c503.c for an example of selecting the IRQ at config-time.
+ */
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &tc35815_interrupt, SA_SHIRQ, cardname, dev)) {
+ return -EAGAIN;
+ }
+
+ tc35815_chip_reset(dev);
+
+ if (tc35815_init_queues(dev) != 0) {
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ tc35815_chip_init(dev);
+
+ lp->tbusy = 0;
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void tc35815_tx_timeout(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ struct tc35815_regs *tr = (struct tc35815_regs *)dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ printk(KERN_WARNING "%s: transmit timed out, status %#lx\n",
+ dev->name, tc_readl(&tr->Tx_Stat));
+ /* Try to restart the adaptor. */
+ tc35815_chip_reset(dev);
+ tc35815_clear_queues(dev);
+ tc35815_chip_init(dev);
+ lp->tbusy=0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ struct tc35815_regs *tr = (struct tc35815_regs *)dev->base_addr;
+
+ if (netif_queue_stopped(dev)) {
+ /*
+ * If we get here, some higher level has decided we are broken.
+ * There should really be a "kick me" function call instead.
+ */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk(KERN_WARNING "%s: transmit timed out, status %#lx\n",
+ dev->name, tc_readl(&tr->Tx_Stat));
+ /* Try to restart the adaptor. */
+ tc35815_chip_reset(dev);
+ tc35815_clear_queues(dev);
+ tc35815_chip_init(dev);
+ lp->tbusy=0;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ }
+
+ /*
+ * Block a timer-based transmit from overlapping. This could better be
+ * done with atomic_swap(1, lp->tbusy), but set_bit() works as well.
+ */
+ if (test_and_set_bit(0, (void*)&lp->tbusy) != 0) {
+ printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
+ dev_kfree_skb_any(skb);
+ } else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+ struct TxFD *txfd = &lp->tfd_base[lp->tfd_start];
+ unsigned long flags;
+ lp->stats.tx_bytes += skb->len;
+
+
+#ifdef __mips__
+ dma_cache_wback_inv((unsigned long)buf, length);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* failsafe... */
+ if (lp->tfd_start != lp->tfd_end)
+ tc35815_txdone(dev);
+
+
+ txfd->bd.BuffData = cpu_to_le32(virt_to_bus(buf));
+
+ txfd->bd.BDCtl = cpu_to_le32(length);
+ txfd->fd.FDSystem = cpu_to_le32((__u32)skb);
+ txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
+
+ if (lp->tfd_start == lp->tfd_end) {
+ /* Start DMA Transmitter. */
+ txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
+#ifdef GATHER_TXINT
+ txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
+#endif
+ if (tc35815_debug > 2) {
+ printk("%s: starting TxFD.\n", dev->name);
+ dump_txfd(txfd);
+ if (tc35815_debug > 3)
+ print_eth(buf);
+ }
+ tc_writel(virt_to_bus(txfd), &tr->TxFrmPtr);
+ } else {
+ txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
+ if (tc35815_debug > 2) {
+ printk("%s: queueing TxFD.\n", dev->name);
+ dump_txfd(txfd);
+ if (tc35815_debug > 3)
+ print_eth(buf);
+ }
+ }
+ lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
+
+ dev->trans_start = jiffies;
+
+ if ((lp->tfd_start + 1) % TX_FD_NUM != lp->tfd_end) {
+ /* we can send another packet */
+ lp->tbusy = 0;
+ netif_start_queue(dev);
+ } else {
+ netif_stop_queue(dev);
+ if (tc35815_debug > 1)
+ printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ return 0;
+}
+
+#define FATAL_ERROR_INT \
+ (Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
+static void tc35815_fatal_error_interrupt(struct net_device *dev, int status)
+{
+ static int count;
+ printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
+ dev->name, status);
+
+ if (status & Int_IntPCI)
+ printk(" IntPCI");
+ if (status & Int_DmParErr)
+ printk(" DmParErr");
+ if (status & Int_IntNRAbt)
+ printk(" IntNRAbt");
+ printk("\n");
+ if (count++ > 100)
+ panic("%s: Too many fatal errors.", dev->name);
+ printk(KERN_WARNING "%s: Resetting %s...\n", dev->name, cardname);
+ /* Try to restart the adaptor. */
+ tc35815_chip_reset(dev);
+ tc35815_clear_queues(dev);
+ tc35815_chip_init(dev);
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t tc35815_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct tc35815_regs *tr;
+ struct tc35815_local *lp;
+ int status, boguscount = 0;
+ int handled = 0;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq);
+ return IRQ_NONE;
+ }
+
+ tr = (struct tc35815_regs*)dev->base_addr;
+ lp = dev->priv;
+
+ do {
+ status = tc_readl(&tr->Int_Src);
+ if (status == 0)
+ break;
+ handled = 1;
+ tc_writel(status, &tr->Int_Src); /* write to clear */
+
+ /* Fatal errors... */
+ if (status & FATAL_ERROR_INT) {
+ tc35815_fatal_error_interrupt(dev, status);
+ break;
+ }
+ /* recoverable errors */
+ if (status & Int_IntFDAEx) {
+ /* disable FDAEx int. (until we make rooms...) */
+ tc_writel(tc_readl(&tr->Int_En) & ~Int_FDAExEn, &tr->Int_En);
+ printk(KERN_WARNING
+ "%s: Free Descriptor Area Exhausted (%#x).\n",
+ dev->name, status);
+ lp->stats.rx_dropped++;
+ }
+ if (status & Int_IntBLEx) {
+ /* disable BLEx int. (until we make rooms...) */
+ tc_writel(tc_readl(&tr->Int_En) & ~Int_BLExEn, &tr->Int_En);
+ printk(KERN_WARNING
+ "%s: Buffer List Exhausted (%#x).\n",
+ dev->name, status);
+ lp->stats.rx_dropped++;
+ }
+ if (status & Int_IntExBD) {
+ printk(KERN_WARNING
+ "%s: Excessive Buffer Descriptiors (%#x).\n",
+ dev->name, status);
+ lp->stats.rx_length_errors++;
+ }
+ /* normal notification */
+ if (status & Int_IntMacRx) {
+ /* Got a packet(s). */
+ lp->lstats.rx_ints++;
+ tc35815_rx(dev);
+ }
+ if (status & Int_IntMacTx) {
+ lp->lstats.tx_ints++;
+ tc35815_txdone(dev);
+ }
+ } while (++boguscount < 20) ;
+
+ return IRQ_RETVAL(handled);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+tc35815_rx(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ struct tc35815_regs *tr = (struct tc35815_regs*)dev->base_addr;
+ unsigned int fdctl;
+ int i;
+ int buf_free_count = 0;
+ int fd_free_count = 0;
+
+ while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
+ int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
+ int pkt_len = fdctl & FD_FDLength_MASK;
+ struct RxFD *next_rfd;
+ int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
+
+ if (tc35815_debug > 2)
+ dump_rxfd(lp->rfd_cur);
+ if (status & Rx_Good) {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ unsigned char *data;
+ int cur_bd, offset;
+
+ lp->stats.rx_bytes += pkt_len;
+
+ skb = dev_alloc_skb(pkt_len + 2); /* +2: for reserve */
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, 2); /* 16 bit alignment */
+ skb->dev = dev;
+
+ data = skb_put(skb, pkt_len);
+
+ /* copy from receive buffer */
+ cur_bd = 0;
+ offset = 0;
+ while (offset < pkt_len && cur_bd < bd_count) {
+ int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
+ BD_BuffLength_MASK;
+ void *rxbuf =
+ bus_to_virt(le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData));
+#ifdef __mips__
+ dma_cache_inv((unsigned long)rxbuf, len);
+#endif
+ memcpy(data + offset, rxbuf, len);
+ offset += len;
+ cur_bd++;
+ }
+#if 0
+ print_buf(data,pkt_len);
+#endif
+ if (tc35815_debug > 3)
+ print_eth(data);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ } else {
+ lp->stats.rx_errors++;
+ /* WORKAROUND: LongErr and CRCErr means Overflow. */
+ if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
+ status &= ~(Rx_LongErr|Rx_CRCErr);
+ status |= Rx_Over;
+ }
+ if (status & Rx_LongErr) lp->stats.rx_length_errors++;
+ if (status & Rx_Over) lp->stats.rx_fifo_errors++;
+ if (status & Rx_CRCErr) lp->stats.rx_crc_errors++;
+ if (status & Rx_Align) lp->stats.rx_frame_errors++;
+ }
+
+ if (bd_count > 0) {
+ /* put Free Buffer back to controller */
+ int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
+ unsigned char id =
+ (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
+ if (id >= RX_BUF_PAGES) {
+ printk("%s: invalid BDID.\n", dev->name);
+ panic_queues(dev);
+ }
+ /* free old buffers */
+ while (lp->fbl_curid != id) {
+ bdctl = le32_to_cpu(lp->fbl_ptr->bd[lp->fbl_curid].BDCtl);
+ if (bdctl & BD_CownsBD) {
+ printk("%s: Freeing invalid BD.\n",
+ dev->name);
+ panic_queues(dev);
+ }
+ /* pass BD to controler */
+ /* Note: BDLength was modified by chip. */
+ lp->fbl_ptr->bd[lp->fbl_curid].BDCtl =
+ cpu_to_le32(BD_CownsBD |
+ (lp->fbl_curid << BD_RxBDID_SHIFT) |
+ PAGE_SIZE);
+ lp->fbl_curid =
+ (lp->fbl_curid + 1) % RX_BUF_PAGES;
+ if (tc35815_debug > 2) {
+ printk("%s: Entering new FBD %d\n",
+ dev->name, lp->fbl_curid);
+ dump_frfd(lp->fbl_ptr);
+ }
+ buf_free_count++;
+ }
+ }
+
+ /* put RxFD back to controller */
+ next_rfd = bus_to_virt(le32_to_cpu(lp->rfd_cur->fd.FDNext));
+#ifdef __mips__
+ next_rfd = (struct RxFD *)vtonocache(next_rfd);
+#endif
+ if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
+ printk("%s: RxFD FDNext invalid.\n", dev->name);
+ panic_queues(dev);
+ }
+ for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
+ /* pass FD to controler */
+ lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead); /* for debug */
+ lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
+ lp->rfd_cur++;
+ fd_free_count++;
+ }
+
+ lp->rfd_cur = next_rfd;
+ }
+
+ /* re-enable BL/FDA Exhaust interrupts. */
+ if (fd_free_count) {
+ tc_writel(tc_readl(&tr->Int_En) | Int_FDAExEn, &tr->Int_En);
+ if (buf_free_count)
+ tc_writel(tc_readl(&tr->Int_En) | Int_BLExEn, &tr->Int_En);
+ }
+}
+
+#ifdef NO_CHECK_CARRIER
+#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
+#else
+#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
+#endif
+
+static void
+tc35815_check_tx_stat(struct net_device *dev, int status)
+{
+ struct tc35815_local *lp = dev->priv;
+ const char *msg = NULL;
+
+ /* count collisions */
+ if (status & Tx_ExColl)
+ lp->stats.collisions += 16;
+ if (status & Tx_TxColl_MASK)
+ lp->stats.collisions += status & Tx_TxColl_MASK;
+
+ /* WORKAROUND: ignore LostCrS in full duplex operation */
+ if (lp->fullduplex)
+ status &= ~Tx_NCarr;
+
+ if (!(status & TX_STA_ERR)) {
+ /* no error. */
+ lp->stats.tx_packets++;
+ return;
+ }
+
+ lp->stats.tx_errors++;
+ if (status & Tx_ExColl) {
+ lp->stats.tx_aborted_errors++;
+ msg = "Excessive Collision.";
+ }
+ if (status & Tx_Under) {
+ lp->stats.tx_fifo_errors++;
+ msg = "Tx FIFO Underrun.";
+ }
+ if (status & Tx_Defer) {
+ lp->stats.tx_fifo_errors++;
+ msg = "Excessive Deferral.";
+ }
+#ifndef NO_CHECK_CARRIER
+ if (status & Tx_NCarr) {
+ lp->stats.tx_carrier_errors++;
+ msg = "Lost Carrier Sense.";
+ }
+#endif
+ if (status & Tx_LateColl) {
+ lp->stats.tx_aborted_errors++;
+ msg = "Late Collision.";
+ }
+ if (status & Tx_TxPar) {
+ lp->stats.tx_fifo_errors++;
+ msg = "Transmit Parity Error.";
+ }
+ if (status & Tx_SQErr) {
+ lp->stats.tx_heartbeat_errors++;
+ msg = "Signal Quality Error.";
+ }
+ if (msg)
+ printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
+}
+
+static void
+tc35815_txdone(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ struct tc35815_regs *tr = (struct tc35815_regs*)dev->base_addr;
+ struct TxFD *txfd;
+ unsigned int fdctl;
+ int num_done = 0;
+
+ txfd = &lp->tfd_base[lp->tfd_end];
+ while (lp->tfd_start != lp->tfd_end &&
+ !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
+ int status = le32_to_cpu(txfd->fd.FDStat);
+ struct sk_buff *skb;
+ unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
+
+ if (tc35815_debug > 2) {
+ printk("%s: complete TxFD.\n", dev->name);
+ dump_txfd(txfd);
+ }
+ tc35815_check_tx_stat(dev, status);
+
+ skb = (struct sk_buff *)le32_to_cpu(txfd->fd.FDSystem);
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ }
+ txfd->fd.FDSystem = cpu_to_le32(0);
+
+ num_done++;
+ lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
+ txfd = &lp->tfd_base[lp->tfd_end];
+ if ((fdnext & ~FD_Next_EOL) != virt_to_bus(txfd)) {
+ printk("%s: TxFD FDNext invalid.\n", dev->name);
+ panic_queues(dev);
+ }
+ if (fdnext & FD_Next_EOL) {
+ /* DMA Transmitter has been stopping... */
+ if (lp->tfd_end != lp->tfd_start) {
+ int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
+ struct TxFD* txhead = &lp->tfd_base[head];
+ int qlen = (lp->tfd_start + TX_FD_NUM
+ - lp->tfd_end) % TX_FD_NUM;
+
+ if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
+ printk("%s: TxFD FDCtl invalid.\n", dev->name);
+ panic_queues(dev);
+ }
+ /* log max queue length */
+ if (lp->lstats.max_tx_qlen < qlen)
+ lp->lstats.max_tx_qlen = qlen;
+
+
+ /* start DMA Transmitter again */
+ txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
+#ifdef GATHER_TXINT
+ txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
+#endif
+ if (tc35815_debug > 2) {
+ printk("%s: start TxFD on queue.\n",
+ dev->name);
+ dump_txfd(txfd);
+ }
+ tc_writel(virt_to_bus(txfd), &tr->TxFrmPtr);
+ }
+ break;
+ }
+ }
+
+ if (num_done > 0 && lp->tbusy) {
+ lp->tbusy = 0;
+ netif_start_queue(dev);
+ }
+}
+
+/* The inverse routine to tc35815_open(). */
+static int
+tc35815_close(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+
+ lp->tbusy = 1;
+ netif_stop_queue(dev);
+
+ /* Flush the Tx and disable Rx here. */
+
+ tc35815_chip_reset(dev);
+ free_irq(dev->irq, dev);
+
+ tc35815_free_queues(dev);
+
+ return 0;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ struct tc35815_regs *tr = (struct tc35815_regs*)dev->base_addr;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Update the statistics from the device registers. */
+ lp->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ return &lp->stats;
+}
+
+static void tc35815_set_cam_entry(struct tc35815_regs *tr, int index, unsigned char *addr)
+{
+ int cam_index = index * 6;
+ unsigned long cam_data;
+ unsigned long saved_addr;
+ saved_addr = tc_readl(&tr->CAM_Adr);
+
+ if (tc35815_debug > 1) {
+ int i;
+ printk(KERN_DEBUG "%s: CAM %d:", cardname, index);
+ for (i = 0; i < 6; i++)
+ printk(" %02x", addr[i]);
+ printk("\n");
+ }
+ if (index & 1) {
+ /* read modify write */
+ tc_writel(cam_index - 2, &tr->CAM_Adr);
+ cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
+ cam_data |= addr[0] << 8 | addr[1];
+ tc_writel(cam_data, &tr->CAM_Data);
+ /* write whole word */
+ tc_writel(cam_index + 2, &tr->CAM_Adr);
+ cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ tc_writel(cam_data, &tr->CAM_Data);
+ } else {
+ /* write whole word */
+ tc_writel(cam_index, &tr->CAM_Adr);
+ cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ tc_writel(cam_data, &tr->CAM_Data);
+ /* read modify write */
+ tc_writel(cam_index + 4, &tr->CAM_Adr);
+ cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
+ cam_data |= addr[4] << 24 | (addr[5] << 16);
+ tc_writel(cam_data, &tr->CAM_Data);
+ }
+
+ if (tc35815_debug > 2) {
+ int i;
+ for (i = cam_index / 4; i < cam_index / 4 + 2; i++) {
+ tc_writel(i * 4, &tr->CAM_Adr);
+ printk("CAM 0x%x: %08lx",
+ i * 4, tc_readl(&tr->CAM_Data));
+ }
+ }
+ tc_writel(saved_addr, &tr->CAM_Adr);
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void
+tc35815_set_multicast_list(struct net_device *dev)
+{
+ struct tc35815_regs *tr = (struct tc35815_regs*)dev->base_addr;
+
+ if (dev->flags&IFF_PROMISC)
+ {
+ /* Enable promiscuous mode */
+ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
+ }
+ else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > CAM_ENTRY_MAX - 3)
+ {
+ /* CAM 0, 1, 20 are reserved. */
+ /* Disable promiscuous mode, use normal mode. */
+ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
+ }
+ else if(dev->mc_count)
+ {
+ struct dev_mc_list* cur_addr = dev->mc_list;
+ int i;
+ int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
+
+ tc_writel(0, &tr->CAM_Ctl);
+ /* Walk the address list, and load the filter */
+ for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ if (!cur_addr)
+ break;
+ /* entry 0,1 is reserved. */
+ tc35815_set_cam_entry(tr, i + 2, cur_addr->dmi_addr);
+ ena_bits |= CAM_Ena_Bit(i + 2);
+ }
+ tc_writel(ena_bits, &tr->CAM_Ena);
+ tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
+ }
+ else {
+ tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
+ tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
+ }
+}
+
+static unsigned long tc_phy_read(struct net_device *dev, struct tc35815_regs *tr, int phy, int phy_reg)
+{
+ struct tc35815_local *lp = dev->priv;
+ unsigned long data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tc_writel(MD_CA_Busy | (phy << 5) | phy_reg, &tr->MD_CA);
+ while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
+ ;
+ data = tc_readl(&tr->MD_Data);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return data;
+}
+
+static void tc_phy_write(struct net_device *dev, unsigned long d, struct tc35815_regs *tr, int phy, int phy_reg)
+{
+ struct tc35815_local *lp = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tc_writel(d, &tr->MD_Data);
+ tc_writel(MD_CA_Busy | MD_CA_Wr | (phy << 5) | phy_reg, &tr->MD_CA);
+ while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
+ ;
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static void tc35815_phy_chip_init(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ struct tc35815_regs *tr = (struct tc35815_regs*)dev->base_addr;
+ static int first = 1;
+ unsigned short ctl;
+
+ if (first) {
+ unsigned short id0, id1;
+ int count;
+ first = 0;
+
+ /* first data written to the PHY will be an ID number */
+ tc_phy_write(dev, 0, tr, 0, MII_CONTROL); /* ID:0 */
+#if 0
+ tc_phy_write(dev, MIICNTL_RESET, tr, 0, MII_CONTROL);
+ printk(KERN_INFO "%s: Resetting PHY...", dev->name);
+ while (tc_phy_read(dev, tr, 0, MII_CONTROL) & MIICNTL_RESET)
+ ;
+ printk("\n");
+ tc_phy_write(dev, MIICNTL_AUTO|MIICNTL_SPEED|MIICNTL_FDX, tr, 0,
+ MII_CONTROL);
+#endif
+ id0 = tc_phy_read(dev, tr, 0, MII_PHY_ID0);
+ id1 = tc_phy_read(dev, tr, 0, MII_PHY_ID1);
+ printk(KERN_DEBUG "%s: PHY ID %04x %04x\n", dev->name,
+ id0, id1);
+ if (lp->option & TC35815_OPT_10M) {
+ lp->linkspeed = 10;
+ lp->fullduplex = (lp->option & TC35815_OPT_FULLDUP) != 0;
+ } else if (lp->option & TC35815_OPT_100M) {
+ lp->linkspeed = 100;
+ lp->fullduplex = (lp->option & TC35815_OPT_FULLDUP) != 0;
+ } else {
+ /* auto negotiation */
+ unsigned long neg_result;
+ tc_phy_write(dev, MIICNTL_AUTO | MIICNTL_RST_AUTO, tr, 0, MII_CONTROL);
+ printk(KERN_INFO "%s: Auto Negotiation...", dev->name);
+ count = 0;
+ while (!(tc_phy_read(dev, tr, 0, MII_STATUS) & MIISTAT_AUTO_DONE)) {
+ if (count++ > 5000) {
+ printk(" failed. Assume 10Mbps\n");
+ lp->linkspeed = 10;
+ lp->fullduplex = 0;
+ goto done;
+ }
+ if (count % 512 == 0)
+ printk(".");
+ mdelay(1);
+ }
+ printk(" done.\n");
+ neg_result = tc_phy_read(dev, tr, 0, MII_ANLPAR);
+ if (neg_result & (MII_AN_TX_FDX | MII_AN_TX_HDX))
+ lp->linkspeed = 100;
+ else
+ lp->linkspeed = 10;
+ if (neg_result & (MII_AN_TX_FDX | MII_AN_10_FDX))
+ lp->fullduplex = 1;
+ else
+ lp->fullduplex = 0;
+ done:
+ ;
+ }
+ }
+
+ ctl = 0;
+ if (lp->linkspeed == 100)
+ ctl |= MIICNTL_SPEED;
+ if (lp->fullduplex)
+ ctl |= MIICNTL_FDX;
+ tc_phy_write(dev, ctl, tr, 0, MII_CONTROL);
+
+ if (lp->fullduplex) {
+ tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_FullDup, &tr->MAC_Ctl);
+ }
+}
+
+static void tc35815_chip_reset(struct net_device *dev)
+{
+ struct tc35815_regs *tr = (struct tc35815_regs*)dev->base_addr;
+
+ /* reset the controller */
+ tc_writel(MAC_Reset, &tr->MAC_Ctl);
+ while (tc_readl(&tr->MAC_Ctl) & MAC_Reset)
+ ;
+
+ tc_writel(0, &tr->MAC_Ctl);
+
+ /* initialize registers to default value */
+ tc_writel(0, &tr->DMA_Ctl);
+ tc_writel(0, &tr->TxThrsh);
+ tc_writel(0, &tr->TxPollCtr);
+ tc_writel(0, &tr->RxFragSize);
+ tc_writel(0, &tr->Int_En);
+ tc_writel(0, &tr->FDA_Bas);
+ tc_writel(0, &tr->FDA_Lim);
+ tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */
+ tc_writel(0, &tr->CAM_Ctl);
+ tc_writel(0, &tr->Tx_Ctl);
+ tc_writel(0, &tr->Rx_Ctl);
+ tc_writel(0, &tr->CAM_Ena);
+ (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */
+
+}
+
+static void tc35815_chip_init(struct net_device *dev)
+{
+ struct tc35815_local *lp = dev->priv;
+ struct tc35815_regs *tr = (struct tc35815_regs*)dev->base_addr;
+ unsigned long flags;
+ unsigned long txctl = TX_CTL_CMD;
+
+ tc35815_phy_chip_init(dev);
+
+ /* load station address to CAM */
+ tc35815_set_cam_entry(tr, CAM_ENTRY_SOURCE, dev->dev_addr);
+
+ /* Enable CAM (broadcast and unicast) */
+ tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
+ tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
+
+ tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize); /* Packing */
+ tc_writel(0, &tr->TxPollCtr); /* Batch mode */
+ tc_writel(TX_THRESHOLD, &tr->TxThrsh);
+ tc_writel(INT_EN_CMD, &tr->Int_En);
+
+ /* set queues */
+ tc_writel(virt_to_bus(lp->rfd_base), &tr->FDA_Bas);
+ tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
+ &tr->FDA_Lim);
+ /*
+ * Activation method:
+ * First, enable eht MAC Transmitter and the DMA Receive circuits.
+ * Then enable the DMA Transmitter and the MAC Receive circuits.
+ */
+ tc_writel(virt_to_bus(lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */
+ tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
+ /* start MAC transmitter */
+ /* WORKAROUND: ignore LostCrS in full duplex operation */
+ if (lp->fullduplex)
+ txctl = TX_CTL_CMD & ~Tx_EnLCarr;
+#ifdef GATHER_TXINT
+ txctl &= ~Tx_EnComp; /* disable global tx completion int. */
+#endif
+ tc_writel(txctl, &tr->Tx_Ctl);
+#if 0 /* No need to polling */
+ tc_writel(virt_to_bus(lp->tfd_base), &tr->TxFrmPtr); /* start DMA transmitter */
+#endif
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/* XXX */
+void
+tc35815_killall(void)
+{
+ struct net_device *dev;
+
+ for (dev = root_tc35815_dev; dev; dev = ((struct tc35815_local *)dev->priv)->next_module) {
+ if (dev->flags&IFF_UP){
+ dev->stop(dev);
+ }
+ }
+}
+
+static struct pci_driver tc35815_driver = {
+ .name = TC35815_MODULE_NAME,
+ .probe = tc35815_probe,
+ .remove = NULL,
+ .id_table = tc35815_pci_tbl,
+};
+
+static int __init tc35815_init_module(void)
+{
+ return pci_module_init(&tc35815_driver);
+}
+
+static void __exit tc35815_cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ while (root_tc35815_dev) {
+ struct net_device *dev = root_tc35815_dev;
+ next_dev = ((struct tc35815_local *)dev->priv)->next_module;
+ iounmap((void *)(dev->base_addr));
+ unregister_netdev(dev);
+ free_netdev(dev);
+ root_tc35815_dev = next_dev;
+ }
+}
+module_init(tc35815_init_module);
+module_exit(tc35815_cleanup_module);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
new file mode 100644
index 000000000000..12de80884b1a
--- /dev/null
+++ b/drivers/net/tg3.c
@@ -0,0 +1,9083 @@
+/*
+ * tg3.c: Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2005 Broadcom Corporation.
+ *
+ * Firmware is:
+ * Copyright (C) 2000-2003 Broadcom Corporation.
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/workqueue.h>
+
+#include <net/checksum.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_SPARC64
+#include <asm/idprom.h>
+#include <asm/oplib.h>
+#include <asm/pbm.h>
+#endif
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define TG3_VLAN_TAG_USED 1
+#else
+#define TG3_VLAN_TAG_USED 0
+#endif
+
+#ifdef NETIF_F_TSO
+#define TG3_TSO_SUPPORT 1
+#else
+#define TG3_TSO_SUPPORT 0
+#endif
+
+#include "tg3.h"
+
+#define DRV_MODULE_NAME "tg3"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "3.25"
+#define DRV_MODULE_RELDATE "March 24, 2005"
+
+#define TG3_DEF_MAC_MODE 0
+#define TG3_DEF_RX_MODE 0
+#define TG3_DEF_TX_MODE 0
+#define TG3_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define TG3_TX_TIMEOUT (5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define TG3_MIN_MTU 60
+#define TG3_MAX_MTU(tp) \
+ ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
+
+/* These numbers seem to be hard coded in the NIC firmware somehow.
+ * You can't change the ring sizes, but you can change where you place
+ * them in the NIC onboard memory.
+ */
+#define TG3_RX_RING_SIZE 512
+#define TG3_DEF_RX_RING_PENDING 200
+#define TG3_RX_JUMBO_RING_SIZE 256
+#define TG3_DEF_RX_JUMBO_RING_PENDING 100
+
+/* Do not place this n-ring entries value into the tp struct itself,
+ * we really want to expose these constants to GCC so that modulo et
+ * al. operations are done with shifts and masks instead of with
+ * hw multiply/modulo instructions. Another solution would be to
+ * replace things like '% foo' with '& (foo - 1)'.
+ */
+#define TG3_RX_RCB_RING_SIZE(tp) \
+ ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
+
+#define TG3_TX_RING_SIZE 512
+#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
+
+#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
+ TG3_RX_RING_SIZE)
+#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
+ TG3_RX_JUMBO_RING_SIZE)
+#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
+ TG3_RX_RCB_RING_SIZE(tp))
+#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
+ TG3_TX_RING_SIZE)
+#define TX_RING_GAP(TP) \
+ (TG3_TX_RING_SIZE - (TP)->tx_pending)
+#define TX_BUFFS_AVAIL(TP) \
+ (((TP)->tx_cons <= (TP)->tx_prod) ? \
+ (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
+ (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
+#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+
+#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
+#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
+
+/* number of ETHTOOL_GSTATS u64's */
+#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
+module_param(tg3_debug, int, 0);
+MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+
+static struct pci_device_id tg3_pci_tbl[] = {
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
+
+static struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[TG3_NUM_STATS] = {
+ { "rx_octets" },
+ { "rx_fragments" },
+ { "rx_ucast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_bcast_packets" },
+ { "rx_fcs_errors" },
+ { "rx_align_errors" },
+ { "rx_xon_pause_rcvd" },
+ { "rx_xoff_pause_rcvd" },
+ { "rx_mac_ctrl_rcvd" },
+ { "rx_xoff_entered" },
+ { "rx_frame_too_long_errors" },
+ { "rx_jabbers" },
+ { "rx_undersize_packets" },
+ { "rx_in_length_errors" },
+ { "rx_out_length_errors" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+ { "rx_1523_to_2047_octet_packets" },
+ { "rx_2048_to_4095_octet_packets" },
+ { "rx_4096_to_8191_octet_packets" },
+ { "rx_8192_to_9022_octet_packets" },
+
+ { "tx_octets" },
+ { "tx_collisions" },
+
+ { "tx_xon_sent" },
+ { "tx_xoff_sent" },
+ { "tx_flow_control" },
+ { "tx_mac_errors" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+ { "tx_deferred" },
+ { "tx_excessive_collisions" },
+ { "tx_late_collisions" },
+ { "tx_collide_2times" },
+ { "tx_collide_3times" },
+ { "tx_collide_4times" },
+ { "tx_collide_5times" },
+ { "tx_collide_6times" },
+ { "tx_collide_7times" },
+ { "tx_collide_8times" },
+ { "tx_collide_9times" },
+ { "tx_collide_10times" },
+ { "tx_collide_11times" },
+ { "tx_collide_12times" },
+ { "tx_collide_13times" },
+ { "tx_collide_14times" },
+ { "tx_collide_15times" },
+ { "tx_ucast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_bcast_packets" },
+ { "tx_carrier_sense_errors" },
+ { "tx_discards" },
+ { "tx_errors" },
+
+ { "dma_writeq_full" },
+ { "dma_write_prioq_full" },
+ { "rxbds_empty" },
+ { "rx_discards" },
+ { "rx_errors" },
+ { "rx_threshold_hit" },
+
+ { "dma_readq_full" },
+ { "dma_read_prioq_full" },
+ { "tx_comp_queue_full" },
+
+ { "ring_set_send_prod_index" },
+ { "ring_status_update" },
+ { "nic_irqs" },
+ { "nic_avoided_irqs" },
+ { "nic_tx_threshold_hit" }
+};
+
+static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+ if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ } else {
+ writel(val, tp->regs + off);
+ if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
+ readl(tp->regs + off);
+ }
+}
+
+static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
+{
+ if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ } else {
+ void __iomem *dest = tp->regs + off;
+ writel(val, dest);
+ readl(dest); /* always flush PCI write */
+ }
+}
+
+static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+ void __iomem *mbox = tp->regs + off;
+ writel(val, mbox);
+ if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
+ readl(mbox);
+}
+
+static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+ void __iomem *mbox = tp->regs + off;
+ writel(val, mbox);
+ if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
+ writel(val, mbox);
+ if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
+ readl(mbox);
+}
+
+#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
+#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
+
+#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
+#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
+#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
+#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
+#define tr32(reg) readl(tp->regs + (reg))
+#define tr16(reg) readw(tp->regs + (reg))
+#define tr8(reg) readb(tp->regs + (reg))
+
+static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_disable_ints(struct tg3 *tp)
+{
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
+ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+ tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+}
+
+static inline void tg3_cond_int(struct tg3 *tp)
+{
+ if (tp->hw_status->status & SD_STATUS_UPDATED)
+ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+}
+
+static void tg3_enable_ints(struct tg3 *tp)
+{
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
+ tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+
+ tg3_cond_int(tp);
+}
+
+/* tg3_restart_ints
+ * similar to tg3_enable_ints, but it can return without flushing the
+ * PIO write which reenables interrupts
+ */
+static void tg3_restart_ints(struct tg3 *tp)
+{
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
+ mmiowb();
+
+ tg3_cond_int(tp);
+}
+
+static inline void tg3_netif_stop(struct tg3 *tp)
+{
+ netif_poll_disable(tp->dev);
+ netif_tx_disable(tp->dev);
+}
+
+static inline void tg3_netif_start(struct tg3 *tp)
+{
+ netif_wake_queue(tp->dev);
+ /* NOTE: unconditional netif_wake_queue is only appropriate
+ * so long as all callers are assured to have free tx slots
+ * (such as after tg3_init_hw)
+ */
+ netif_poll_enable(tp->dev);
+ tg3_cond_int(tp);
+}
+
+static void tg3_switch_clocks(struct tg3 *tp)
+{
+ u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
+ u32 orig_clock_ctrl;
+
+ orig_clock_ctrl = clock_ctrl;
+ clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
+ CLOCK_CTRL_CLKRUN_OENABLE |
+ 0x1f);
+ tp->pci_clock_ctrl = clock_ctrl;
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
+ tw32_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl | CLOCK_CTRL_625_CORE);
+ udelay(40);
+ }
+ } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
+ tw32_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl |
+ (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
+ udelay(40);
+ tw32_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl | (CLOCK_CTRL_ALTCLK));
+ udelay(40);
+ }
+ tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
+ udelay(40);
+}
+
+#define PHY_BUSY_LOOPS 5000
+
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+ u32 frame_val;
+ unsigned int loops;
+ int ret;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ *val = 0x0;
+
+ frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
+ MI_COM_PHY_ADDR_MASK);
+ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+ MI_COM_REG_ADDR_MASK);
+ frame_val |= (MI_COM_CMD_READ | MI_COM_START);
+
+ tw32_f(MAC_MI_COM, frame_val);
+
+ loops = PHY_BUSY_LOOPS;
+ while (loops != 0) {
+ udelay(10);
+ frame_val = tr32(MAC_MI_COM);
+
+ if ((frame_val & MI_COM_BUSY) == 0) {
+ udelay(5);
+ frame_val = tr32(MAC_MI_COM);
+ break;
+ }
+ loops -= 1;
+ }
+
+ ret = -EBUSY;
+ if (loops != 0) {
+ *val = frame_val & MI_COM_DATA_MASK;
+ ret = 0;
+ }
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ return ret;
+}
+
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+ u32 frame_val;
+ unsigned int loops;
+ int ret;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
+ MI_COM_PHY_ADDR_MASK);
+ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+ MI_COM_REG_ADDR_MASK);
+ frame_val |= (val & MI_COM_DATA_MASK);
+ frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
+
+ tw32_f(MAC_MI_COM, frame_val);
+
+ loops = PHY_BUSY_LOOPS;
+ while (loops != 0) {
+ udelay(10);
+ frame_val = tr32(MAC_MI_COM);
+ if ((frame_val & MI_COM_BUSY) == 0) {
+ udelay(5);
+ frame_val = tr32(MAC_MI_COM);
+ break;
+ }
+ loops -= 1;
+ }
+
+ ret = -EBUSY;
+ if (loops != 0)
+ ret = 0;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ return ret;
+}
+
+static void tg3_phy_set_wirespeed(struct tg3 *tp)
+{
+ u32 val;
+
+ if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
+ return;
+
+ if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
+ !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
+ tg3_writephy(tp, MII_TG3_AUX_CTRL,
+ (val | (1 << 15) | (1 << 4)));
+}
+
+static int tg3_bmcr_reset(struct tg3 *tp)
+{
+ u32 phy_control;
+ int limit, err;
+
+ /* OK, reset it, and poll the BMCR_RESET bit until it
+ * clears or we time out.
+ */
+ phy_control = BMCR_RESET;
+ err = tg3_writephy(tp, MII_BMCR, phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ limit = 5000;
+ while (limit--) {
+ err = tg3_readphy(tp, MII_BMCR, &phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ if ((phy_control & BMCR_RESET) == 0) {
+ udelay(40);
+ break;
+ }
+ udelay(10);
+ }
+ if (limit <= 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int tg3_wait_macro_done(struct tg3 *tp)
+{
+ int limit = 100;
+
+ while (limit--) {
+ u32 tmp32;
+
+ if (!tg3_readphy(tp, 0x16, &tmp32)) {
+ if ((tmp32 & 0x1000) == 0)
+ break;
+ }
+ }
+ if (limit <= 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
+{
+ static const u32 test_pat[4][6] = {
+ { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
+ { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
+ { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
+ { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
+ };
+ int chan;
+
+ for (chan = 0; chan < 4; chan++) {
+ int i;
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, 0x16, 0x0002);
+
+ for (i = 0; i < 6; i++)
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
+ test_pat[chan][i]);
+
+ tg3_writephy(tp, 0x16, 0x0202);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, 0x16, 0x0082);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ tg3_writephy(tp, 0x16, 0x0802);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 6; i += 2) {
+ u32 low, high;
+
+ if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
+ tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+ low &= 0x7fff;
+ high &= 0x000f;
+ if (low != test_pat[chan][i] ||
+ high != test_pat[chan][i+1]) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
+
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tg3_phy_reset_chanpat(struct tg3 *tp)
+{
+ int chan;
+
+ for (chan = 0; chan < 4; chan++) {
+ int i;
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, 0x16, 0x0002);
+ for (i = 0; i < 6; i++)
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
+ tg3_writephy(tp, 0x16, 0x0202);
+ if (tg3_wait_macro_done(tp))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+{
+ u32 reg32, phy9_orig;
+ int retries, do_phy_reset, err;
+
+ retries = 10;
+ do_phy_reset = 1;
+ do {
+ if (do_phy_reset) {
+ err = tg3_bmcr_reset(tp);
+ if (err)
+ return err;
+ do_phy_reset = 0;
+ }
+
+ /* Disable transmitter and interrupt. */
+ if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
+ continue;
+
+ reg32 |= 0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ /* Set full-duplex, 1000 mbps. */
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
+
+ /* Set to master mode. */
+ if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
+ continue;
+
+ tg3_writephy(tp, MII_TG3_CTRL,
+ (MII_TG3_CTRL_AS_MASTER |
+ MII_TG3_CTRL_ENABLE_AS_MASTER));
+
+ /* Enable SM_DSP_CLOCK and 6dB. */
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+
+ /* Block the PHY control access. */
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
+
+ err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
+ if (!err)
+ break;
+ } while (--retries);
+
+ err = tg3_phy_reset_chanpat(tp);
+ if (err)
+ return err;
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+ tg3_writephy(tp, 0x16, 0x0000);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ /* Set Extended packet length bit for jumbo frames */
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
+ }
+ else {
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ }
+
+ tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
+
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+ } else if (!err)
+ err = -EBUSY;
+
+ return err;
+}
+
+/* This will reset the tigon3 PHY if there is no valid
+ * link unless the FORCE argument is non-zero.
+ */
+static int tg3_phy_reset(struct tg3 *tp)
+{
+ u32 phy_status;
+ int err;
+
+ err = tg3_readphy(tp, MII_BMSR, &phy_status);
+ err |= tg3_readphy(tp, MII_BMSR, &phy_status);
+ if (err != 0)
+ return -EBUSY;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ err = tg3_phy_reset_5703_4_5(tp);
+ if (err)
+ return err;
+ goto out;
+ }
+
+ err = tg3_bmcr_reset(tp);
+ if (err)
+ return err;
+
+out:
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ }
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
+ tg3_writephy(tp, 0x1c, 0x8d68);
+ tg3_writephy(tp, 0x1c, 0x8d68);
+ }
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ }
+ /* Set Extended packet length bit (bit 14) on all chips that */
+ /* support jumbo frames */
+ if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ /* Cannot do read-modify-write on 5401 */
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+ u32 phy_reg;
+
+ /* Set bit 14 with read-modify-write to preserve other bits */
+ if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
+ !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
+ }
+
+ /* Set phy register 0x10 bit 0 to high fifo elasticity to support
+ * jumbo frames transmission.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
+ u32 phy_reg;
+
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ }
+
+ tg3_phy_set_wirespeed(tp);
+ return 0;
+}
+
+static void tg3_frob_aux_power(struct tg3 *tp)
+{
+ struct tg3 *tp_peer = tp;
+
+ if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ tp_peer = pci_get_drvdata(tp->pdev_peer);
+ if (!tp_peer)
+ BUG();
+ }
+
+
+ if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
+ (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ (GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1));
+ udelay(100);
+ } else {
+ u32 no_gpio2;
+ u32 grc_local_ctrl;
+
+ if (tp_peer != tp &&
+ (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
+ return;
+
+ /* On 5753 and variants, GPIO2 cannot be used. */
+ no_gpio2 = tp->nic_sram_data_cfg &
+ NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+ grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ GRC_LCLCTRL_GPIO_OUTPUT2;
+ if (no_gpio2) {
+ grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT2);
+ }
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ grc_local_ctrl);
+ udelay(100);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ grc_local_ctrl);
+ udelay(100);
+
+ if (!no_gpio2) {
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ grc_local_ctrl);
+ udelay(100);
+ }
+ }
+ } else {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ if (tp_peer != tp &&
+ (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
+ return;
+
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ (GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OUTPUT1));
+ udelay(100);
+
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ (GRC_LCLCTRL_GPIO_OE1));
+ udelay(100);
+
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ (GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OUTPUT1));
+ udelay(100);
+ }
+ }
+}
+
+static int tg3_setup_phy(struct tg3 *, int);
+
+#define RESET_KIND_SHUTDOWN 0
+#define RESET_KIND_INIT 1
+#define RESET_KIND_SUSPEND 2
+
+static void tg3_write_sig_post_reset(struct tg3 *, int);
+static int tg3_halt_cpu(struct tg3 *, u32);
+
+static int tg3_set_power_state(struct tg3 *tp, int state)
+{
+ u32 misc_host_ctrl;
+ u16 power_control, power_caps;
+ int pm = tp->pm_cap;
+
+ /* Make sure register accesses (indirect or otherwise)
+ * will function correctly.
+ */
+ pci_write_config_dword(tp->pdev,
+ TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ pci_read_config_word(tp->pdev,
+ pm + PCI_PM_CTRL,
+ &power_control);
+ power_control |= PCI_PM_CTRL_PME_STATUS;
+ power_control &= ~(PCI_PM_CTRL_STATE_MASK);
+ switch (state) {
+ case 0:
+ power_control |= 0;
+ pci_write_config_word(tp->pdev,
+ pm + PCI_PM_CTRL,
+ power_control);
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ udelay(100);
+
+ return 0;
+
+ case 1:
+ power_control |= 1;
+ break;
+
+ case 2:
+ power_control |= 2;
+ break;
+
+ case 3:
+ power_control |= 3;
+ break;
+
+ default:
+ printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
+ "requested.\n",
+ tp->dev->name, state);
+ return -EINVAL;
+ };
+
+ power_control |= PCI_PM_CTRL_PME_ENABLE;
+
+ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
+
+ if (tp->link_config.phy_is_low_power == 0) {
+ tp->link_config.phy_is_low_power = 1;
+ tp->link_config.orig_speed = tp->link_config.speed;
+ tp->link_config.orig_duplex = tp->link_config.duplex;
+ tp->link_config.orig_autoneg = tp->link_config.autoneg;
+ }
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+ tp->link_config.speed = SPEED_10;
+ tp->link_config.duplex = DUPLEX_HALF;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tg3_setup_phy(tp, 0);
+ }
+
+ pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
+
+ if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
+ u32 mac_mode;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
+ udelay(40);
+
+ mac_mode = MAC_MODE_PORT_MODE_MII;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
+ !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+ } else {
+ mac_mode = MAC_MODE_PORT_MODE_TBI;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+ if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
+ (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
+ mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+
+ tw32_f(MAC_MODE, mac_mode);
+ udelay(100);
+
+ tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
+ udelay(10);
+ }
+
+ if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ u32 base_val;
+
+ base_val = tp->pci_clock_ctrl;
+ base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE);
+
+ tw32_f(TG3PCI_CLOCK_CTRL, base_val |
+ CLOCK_CTRL_ALTCLK |
+ CLOCK_CTRL_PWRDOWN_PLL133);
+ udelay(40);
+ } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
+ (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
+ u32 newbits1, newbits2;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE |
+ CLOCK_CTRL_ALTCLK);
+ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+ } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ newbits1 = CLOCK_CTRL_625_CORE;
+ newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
+ } else {
+ newbits1 = CLOCK_CTRL_ALTCLK;
+ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+ }
+
+ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
+ udelay(40);
+
+ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
+ udelay(40);
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ u32 newbits3;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE |
+ CLOCK_CTRL_44MHZ_CORE);
+ } else {
+ newbits3 = CLOCK_CTRL_44MHZ_CORE;
+ }
+
+ tw32_f(TG3PCI_CLOCK_CTRL,
+ tp->pci_clock_ctrl | newbits3);
+ udelay(40);
+ }
+ }
+
+ tg3_frob_aux_power(tp);
+
+ /* Workaround for unstable PLL clock */
+ if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
+ (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+ u32 val = tr32(0x7d00);
+
+ val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
+ tw32(0x7d00, val);
+ if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ }
+
+ /* Finally, set the new power state. */
+ pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
+
+ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+
+ return 0;
+}
+
+static void tg3_link_report(struct tg3 *tp)
+{
+ if (!netif_carrier_ok(tp->dev)) {
+ printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
+ } else {
+ printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
+ tp->dev->name,
+ (tp->link_config.active_speed == SPEED_1000 ?
+ 1000 :
+ (tp->link_config.active_speed == SPEED_100 ?
+ 100 : 10)),
+ (tp->link_config.active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
+
+ printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
+ "%s for RX.\n",
+ tp->dev->name,
+ (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
+ (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
+ }
+}
+
+static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
+{
+ u32 new_tg3_flags = 0;
+ u32 old_rx_mode = tp->rx_mode;
+ u32 old_tx_mode = tp->tx_mode;
+
+ if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
+ if (local_adv & ADVERTISE_PAUSE_CAP) {
+ if (local_adv & ADVERTISE_PAUSE_ASYM) {
+ if (remote_adv & LPA_PAUSE_CAP)
+ new_tg3_flags |=
+ (TG3_FLAG_RX_PAUSE |
+ TG3_FLAG_TX_PAUSE);
+ else if (remote_adv & LPA_PAUSE_ASYM)
+ new_tg3_flags |=
+ (TG3_FLAG_RX_PAUSE);
+ } else {
+ if (remote_adv & LPA_PAUSE_CAP)
+ new_tg3_flags |=
+ (TG3_FLAG_RX_PAUSE |
+ TG3_FLAG_TX_PAUSE);
+ }
+ } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
+ if ((remote_adv & LPA_PAUSE_CAP) &&
+ (remote_adv & LPA_PAUSE_ASYM))
+ new_tg3_flags |= TG3_FLAG_TX_PAUSE;
+ }
+
+ tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
+ tp->tg3_flags |= new_tg3_flags;
+ } else {
+ new_tg3_flags = tp->tg3_flags;
+ }
+
+ if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
+ tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_rx_mode != tp->rx_mode) {
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ }
+
+ if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
+ tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_tx_mode != tp->tx_mode) {
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+ }
+}
+
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+{
+ switch (val & MII_TG3_AUX_STAT_SPDMASK) {
+ case MII_TG3_AUX_STAT_10HALF:
+ *speed = SPEED_10;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_10FULL:
+ *speed = SPEED_10;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ case MII_TG3_AUX_STAT_100HALF:
+ *speed = SPEED_100;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_100FULL:
+ *speed = SPEED_100;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ case MII_TG3_AUX_STAT_1000HALF:
+ *speed = SPEED_1000;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_1000FULL:
+ *speed = SPEED_1000;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ default:
+ *speed = SPEED_INVALID;
+ *duplex = DUPLEX_INVALID;
+ break;
+ };
+}
+
+static void tg3_phy_copper_begin(struct tg3 *tp)
+{
+ u32 new_adv;
+ int i;
+
+ if (tp->link_config.phy_is_low_power) {
+ /* Entering low power mode. Disable gigabit and
+ * 100baseT advertisements.
+ */
+ tg3_writephy(tp, MII_TG3_CTRL, 0);
+
+ new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
+ if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
+ new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
+
+ tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ } else if (tp->link_config.speed == SPEED_INVALID) {
+ tp->link_config.advertising =
+ (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg | ADVERTISED_MII);
+
+ if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+ tp->link_config.advertising &=
+ ~(ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
+ if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
+ new_adv |= ADVERTISE_10HALF;
+ if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
+ new_adv |= ADVERTISE_10FULL;
+ if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
+ new_adv |= ADVERTISE_100HALF;
+ if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
+ new_adv |= ADVERTISE_100FULL;
+ tg3_writephy(tp, MII_ADVERTISE, new_adv);
+
+ if (tp->link_config.advertising &
+ (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
+ new_adv = 0;
+ if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
+ new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
+ if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
+ new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
+ if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
+ (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
+ new_adv |= (MII_TG3_CTRL_AS_MASTER |
+ MII_TG3_CTRL_ENABLE_AS_MASTER);
+ tg3_writephy(tp, MII_TG3_CTRL, new_adv);
+ } else {
+ tg3_writephy(tp, MII_TG3_CTRL, 0);
+ }
+ } else {
+ /* Asking for a specific link mode. */
+ if (tp->link_config.speed == SPEED_1000) {
+ new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
+ tg3_writephy(tp, MII_ADVERTISE, new_adv);
+
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv = MII_TG3_CTRL_ADV_1000_FULL;
+ else
+ new_adv = MII_TG3_CTRL_ADV_1000_HALF;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ new_adv |= (MII_TG3_CTRL_AS_MASTER |
+ MII_TG3_CTRL_ENABLE_AS_MASTER);
+ tg3_writephy(tp, MII_TG3_CTRL, new_adv);
+ } else {
+ tg3_writephy(tp, MII_TG3_CTRL, 0);
+
+ new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
+ if (tp->link_config.speed == SPEED_100) {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv |= ADVERTISE_100FULL;
+ else
+ new_adv |= ADVERTISE_100HALF;
+ } else {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv |= ADVERTISE_10FULL;
+ else
+ new_adv |= ADVERTISE_10HALF;
+ }
+ tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ }
+ }
+
+ if (tp->link_config.autoneg == AUTONEG_DISABLE &&
+ tp->link_config.speed != SPEED_INVALID) {
+ u32 bmcr, orig_bmcr;
+
+ tp->link_config.active_speed = tp->link_config.speed;
+ tp->link_config.active_duplex = tp->link_config.duplex;
+
+ bmcr = 0;
+ switch (tp->link_config.speed) {
+ default:
+ case SPEED_10:
+ break;
+
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+
+ case SPEED_1000:
+ bmcr |= TG3_BMCR_SPEED1000;
+ break;
+ };
+
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
+ (bmcr != orig_bmcr)) {
+ tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
+ for (i = 0; i < 1500; i++) {
+ u32 tmp;
+
+ udelay(10);
+ if (tg3_readphy(tp, MII_BMSR, &tmp) ||
+ tg3_readphy(tp, MII_BMSR, &tmp))
+ continue;
+ if (!(tmp & BMSR_LSTATUS)) {
+ udelay(40);
+ break;
+ }
+ }
+ tg3_writephy(tp, MII_BMCR, bmcr);
+ udelay(40);
+ }
+ } else {
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ }
+}
+
+static int tg3_init_5401phy_dsp(struct tg3 *tp)
+{
+ int err;
+
+ /* Turn off tap power management. */
+ /* Set Extended packet length bit */
+ err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
+
+ err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
+ err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
+
+ err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
+ err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
+
+ err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
+ err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
+
+ err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
+ err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
+
+ err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
+ err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
+
+ udelay(40);
+
+ return err;
+}
+
+static int tg3_copper_is_advertising_all(struct tg3 *tp)
+{
+ u32 adv_reg, all_mask;
+
+ if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
+ return 0;
+
+ all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ if ((adv_reg & all_mask) != all_mask)
+ return 0;
+ if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+ u32 tg3_ctrl;
+
+ if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
+ return 0;
+
+ all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
+ MII_TG3_CTRL_ADV_1000_FULL);
+ if ((tg3_ctrl & all_mask) != all_mask)
+ return 0;
+ }
+ return 1;
+}
+
+static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
+{
+ int current_link_up;
+ u32 bmsr, dummy;
+ u16 current_speed;
+ u8 current_duplex;
+ int i, err;
+
+ tw32(MAC_EVENT, 0);
+
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED));
+ udelay(40);
+
+ tp->mi_mode = MAC_MI_MODE_BASE;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
+
+ /* Some third-party PHYs need to be reset on link going
+ * down.
+ */
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ netif_carrier_ok(tp->dev)) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ !(bmsr & BMSR_LSTATUS))
+ force_reset = 1;
+ }
+ if (force_reset)
+ tg3_phy_reset(tp);
+
+ if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
+ !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
+ bmsr = 0;
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ for (i = 0; i < 1000; i++) {
+ udelay(10);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS)) {
+ udelay(40);
+ break;
+ }
+ }
+
+ if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
+ !(bmsr & BMSR_LSTATUS) &&
+ tp->link_config.active_speed == SPEED_1000) {
+ err = tg3_phy_reset(tp);
+ if (!err)
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+ }
+ }
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+ /* 5701 {A0,B0} CRC bug workaround */
+ tg3_writephy(tp, 0x15, 0x0a75);
+ tg3_writephy(tp, 0x1c, 0x8c68);
+ tg3_writephy(tp, 0x1c, 0x8d68);
+ tg3_writephy(tp, 0x1c, 0x8c68);
+ }
+
+ /* Clear pending interrupts... */
+ tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
+ tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
+
+ if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
+ tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
+ else
+ tg3_writephy(tp, MII_TG3_IMASK, ~0);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+ else
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
+ }
+
+ current_link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+
+ if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
+ u32 val;
+
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
+ tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
+ if (!(val & (1 << 10))) {
+ val |= (1 << 10);
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ goto relink;
+ }
+ }
+
+ bmsr = 0;
+ for (i = 0; i < 100; i++) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
+ break;
+ udelay(40);
+ }
+
+ if (bmsr & BMSR_LSTATUS) {
+ u32 aux_stat, bmcr;
+
+ tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
+ for (i = 0; i < 2000; i++) {
+ udelay(10);
+ if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
+ aux_stat)
+ break;
+ }
+
+ tg3_aux_stat_to_speed_duplex(tp, aux_stat,
+ &current_speed,
+ &current_duplex);
+
+ bmcr = 0;
+ for (i = 0; i < 200; i++) {
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (tg3_readphy(tp, MII_BMCR, &bmcr))
+ continue;
+ if (bmcr && bmcr != 0x7fff)
+ break;
+ udelay(10);
+ }
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ if (bmcr & BMCR_ANENABLE) {
+ current_link_up = 1;
+
+ /* Force autoneg restart if we are exiting
+ * low power mode.
+ */
+ if (!tg3_copper_is_advertising_all(tp))
+ current_link_up = 0;
+ } else {
+ current_link_up = 0;
+ }
+ } else {
+ if (!(bmcr & BMCR_ANENABLE) &&
+ tp->link_config.speed == current_speed &&
+ tp->link_config.duplex == current_duplex) {
+ current_link_up = 1;
+ } else {
+ current_link_up = 0;
+ }
+ }
+
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+ }
+
+ if (current_link_up == 1 &&
+ (tp->link_config.active_duplex == DUPLEX_FULL) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE)) {
+ u32 local_adv, remote_adv;
+
+ if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
+ local_adv = 0;
+ local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+ if (tg3_readphy(tp, MII_LPA, &remote_adv))
+ remote_adv = 0;
+
+ remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
+
+ /* If we are not advertising full pause capability,
+ * something is wrong. Bring the link down and reconfigure.
+ */
+ if (local_adv != ADVERTISE_PAUSE_CAP) {
+ current_link_up = 0;
+ } else {
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+ }
+ }
+relink:
+ if (current_link_up == 0) {
+ u32 tmp;
+
+ tg3_phy_copper_begin(tp);
+
+ tg3_readphy(tp, MII_BMSR, &tmp);
+ if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
+ (tmp & BMSR_LSTATUS))
+ current_link_up = 1;
+ }
+
+ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+ if (current_link_up == 1) {
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+ if (tp->link_config.active_duplex == DUPLEX_HALF)
+ tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
+ (current_link_up == 1 &&
+ tp->link_config.active_speed == SPEED_10))
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ } else {
+ if (current_link_up == 1)
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ }
+
+ /* ??? Without this setting Netgear GA302T PHY does not
+ * ??? send/receive packets...
+ */
+ if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
+ tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+ tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
+ /* Polled via timer. */
+ tw32_f(MAC_EVENT, 0);
+ } else {
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ }
+ udelay(40);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+ current_link_up == 1 &&
+ tp->link_config.active_speed == SPEED_1000 &&
+ ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
+ (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
+ udelay(120);
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(40);
+ tg3_write_mem(tp,
+ NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
+ }
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ }
+
+ return 0;
+}
+
+struct tg3_fiber_aneginfo {
+ int state;
+#define ANEG_STATE_UNKNOWN 0
+#define ANEG_STATE_AN_ENABLE 1
+#define ANEG_STATE_RESTART_INIT 2
+#define ANEG_STATE_RESTART 3
+#define ANEG_STATE_DISABLE_LINK_OK 4
+#define ANEG_STATE_ABILITY_DETECT_INIT 5
+#define ANEG_STATE_ABILITY_DETECT 6
+#define ANEG_STATE_ACK_DETECT_INIT 7
+#define ANEG_STATE_ACK_DETECT 8
+#define ANEG_STATE_COMPLETE_ACK_INIT 9
+#define ANEG_STATE_COMPLETE_ACK 10
+#define ANEG_STATE_IDLE_DETECT_INIT 11
+#define ANEG_STATE_IDLE_DETECT 12
+#define ANEG_STATE_LINK_OK 13
+#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
+#define ANEG_STATE_NEXT_PAGE_WAIT 15
+
+ u32 flags;
+#define MR_AN_ENABLE 0x00000001
+#define MR_RESTART_AN 0x00000002
+#define MR_AN_COMPLETE 0x00000004
+#define MR_PAGE_RX 0x00000008
+#define MR_NP_LOADED 0x00000010
+#define MR_TOGGLE_TX 0x00000020
+#define MR_LP_ADV_FULL_DUPLEX 0x00000040
+#define MR_LP_ADV_HALF_DUPLEX 0x00000080
+#define MR_LP_ADV_SYM_PAUSE 0x00000100
+#define MR_LP_ADV_ASYM_PAUSE 0x00000200
+#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
+#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
+#define MR_LP_ADV_NEXT_PAGE 0x00001000
+#define MR_TOGGLE_RX 0x00002000
+#define MR_NP_RX 0x00004000
+
+#define MR_LINK_OK 0x80000000
+
+ unsigned long link_time, cur_time;
+
+ u32 ability_match_cfg;
+ int ability_match_count;
+
+ char ability_match, idle_match, ack_match;
+
+ u32 txconfig, rxconfig;
+#define ANEG_CFG_NP 0x00000080
+#define ANEG_CFG_ACK 0x00000040
+#define ANEG_CFG_RF2 0x00000020
+#define ANEG_CFG_RF1 0x00000010
+#define ANEG_CFG_PS2 0x00000001
+#define ANEG_CFG_PS1 0x00008000
+#define ANEG_CFG_HD 0x00004000
+#define ANEG_CFG_FD 0x00002000
+#define ANEG_CFG_INVAL 0x00001f06
+
+};
+#define ANEG_OK 0
+#define ANEG_DONE 1
+#define ANEG_TIMER_ENAB 2
+#define ANEG_FAILED -1
+
+#define ANEG_STATE_SETTLE_TIME 10000
+
+static int tg3_fiber_aneg_smachine(struct tg3 *tp,
+ struct tg3_fiber_aneginfo *ap)
+{
+ unsigned long delta;
+ u32 rx_cfg_reg;
+ int ret;
+
+ if (ap->state == ANEG_STATE_UNKNOWN) {
+ ap->rxconfig = 0;
+ ap->link_time = 0;
+ ap->cur_time = 0;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->idle_match = 0;
+ ap->ack_match = 0;
+ }
+ ap->cur_time++;
+
+ if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
+ rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
+
+ if (rx_cfg_reg != ap->ability_match_cfg) {
+ ap->ability_match_cfg = rx_cfg_reg;
+ ap->ability_match = 0;
+ ap->ability_match_count = 0;
+ } else {
+ if (++ap->ability_match_count > 1) {
+ ap->ability_match = 1;
+ ap->ability_match_cfg = rx_cfg_reg;
+ }
+ }
+ if (rx_cfg_reg & ANEG_CFG_ACK)
+ ap->ack_match = 1;
+ else
+ ap->ack_match = 0;
+
+ ap->idle_match = 0;
+ } else {
+ ap->idle_match = 1;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->ack_match = 0;
+
+ rx_cfg_reg = 0;
+ }
+
+ ap->rxconfig = rx_cfg_reg;
+ ret = ANEG_OK;
+
+ switch(ap->state) {
+ case ANEG_STATE_UNKNOWN:
+ if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
+ ap->state = ANEG_STATE_AN_ENABLE;
+
+ /* fallthru */
+ case ANEG_STATE_AN_ENABLE:
+ ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
+ if (ap->flags & MR_AN_ENABLE) {
+ ap->link_time = 0;
+ ap->cur_time = 0;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->idle_match = 0;
+ ap->ack_match = 0;
+
+ ap->state = ANEG_STATE_RESTART_INIT;
+ } else {
+ ap->state = ANEG_STATE_DISABLE_LINK_OK;
+ }
+ break;
+
+ case ANEG_STATE_RESTART_INIT:
+ ap->link_time = ap->cur_time;
+ ap->flags &= ~(MR_NP_LOADED);
+ ap->txconfig = 0;
+ tw32(MAC_TX_AUTO_NEG, 0);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ret = ANEG_TIMER_ENAB;
+ ap->state = ANEG_STATE_RESTART;
+
+ /* fallthru */
+ case ANEG_STATE_RESTART:
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME) {
+ ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
+ } else {
+ ret = ANEG_TIMER_ENAB;
+ }
+ break;
+
+ case ANEG_STATE_DISABLE_LINK_OK:
+ ret = ANEG_DONE;
+ break;
+
+ case ANEG_STATE_ABILITY_DETECT_INIT:
+ ap->flags &= ~(MR_TOGGLE_TX);
+ ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
+ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_ABILITY_DETECT;
+ break;
+
+ case ANEG_STATE_ABILITY_DETECT:
+ if (ap->ability_match != 0 && ap->rxconfig != 0) {
+ ap->state = ANEG_STATE_ACK_DETECT_INIT;
+ }
+ break;
+
+ case ANEG_STATE_ACK_DETECT_INIT:
+ ap->txconfig |= ANEG_CFG_ACK;
+ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_ACK_DETECT;
+
+ /* fallthru */
+ case ANEG_STATE_ACK_DETECT:
+ if (ap->ack_match != 0) {
+ if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
+ (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
+ ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
+ } else {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ }
+ } else if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ }
+ break;
+
+ case ANEG_STATE_COMPLETE_ACK_INIT:
+ if (ap->rxconfig & ANEG_CFG_INVAL) {
+ ret = ANEG_FAILED;
+ break;
+ }
+ ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
+ MR_LP_ADV_HALF_DUPLEX |
+ MR_LP_ADV_SYM_PAUSE |
+ MR_LP_ADV_ASYM_PAUSE |
+ MR_LP_ADV_REMOTE_FAULT1 |
+ MR_LP_ADV_REMOTE_FAULT2 |
+ MR_LP_ADV_NEXT_PAGE |
+ MR_TOGGLE_RX |
+ MR_NP_RX);
+ if (ap->rxconfig & ANEG_CFG_FD)
+ ap->flags |= MR_LP_ADV_FULL_DUPLEX;
+ if (ap->rxconfig & ANEG_CFG_HD)
+ ap->flags |= MR_LP_ADV_HALF_DUPLEX;
+ if (ap->rxconfig & ANEG_CFG_PS1)
+ ap->flags |= MR_LP_ADV_SYM_PAUSE;
+ if (ap->rxconfig & ANEG_CFG_PS2)
+ ap->flags |= MR_LP_ADV_ASYM_PAUSE;
+ if (ap->rxconfig & ANEG_CFG_RF1)
+ ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
+ if (ap->rxconfig & ANEG_CFG_RF2)
+ ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
+ if (ap->rxconfig & ANEG_CFG_NP)
+ ap->flags |= MR_LP_ADV_NEXT_PAGE;
+
+ ap->link_time = ap->cur_time;
+
+ ap->flags ^= (MR_TOGGLE_TX);
+ if (ap->rxconfig & 0x0008)
+ ap->flags |= MR_TOGGLE_RX;
+ if (ap->rxconfig & ANEG_CFG_NP)
+ ap->flags |= MR_NP_RX;
+ ap->flags |= MR_PAGE_RX;
+
+ ap->state = ANEG_STATE_COMPLETE_ACK;
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_COMPLETE_ACK:
+ if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ break;
+ }
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME) {
+ if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
+ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+ } else {
+ if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
+ !(ap->flags & MR_NP_RX)) {
+ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+ } else {
+ ret = ANEG_FAILED;
+ }
+ }
+ }
+ break;
+
+ case ANEG_STATE_IDLE_DETECT_INIT:
+ ap->link_time = ap->cur_time;
+ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_IDLE_DETECT;
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_IDLE_DETECT:
+ if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ break;
+ }
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME) {
+ /* XXX another gem from the Broadcom driver :( */
+ ap->state = ANEG_STATE_LINK_OK;
+ }
+ break;
+
+ case ANEG_STATE_LINK_OK:
+ ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
+ ret = ANEG_DONE;
+ break;
+
+ case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
+ /* ??? unimplemented */
+ break;
+
+ case ANEG_STATE_NEXT_PAGE_WAIT:
+ /* ??? unimplemented */
+ break;
+
+ default:
+ ret = ANEG_FAILED;
+ break;
+ };
+
+ return ret;
+}
+
+static int fiber_autoneg(struct tg3 *tp, u32 *flags)
+{
+ int res = 0;
+ struct tg3_fiber_aneginfo aninfo;
+ int status = ANEG_FAILED;
+ unsigned int tick;
+ u32 tmp;
+
+ tw32_f(MAC_TX_AUTO_NEG, 0);
+
+ tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+ tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
+ udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
+ udelay(40);
+
+ memset(&aninfo, 0, sizeof(aninfo));
+ aninfo.flags |= MR_AN_ENABLE;
+ aninfo.state = ANEG_STATE_UNKNOWN;
+ aninfo.cur_time = 0;
+ tick = 0;
+ while (++tick < 195000) {
+ status = tg3_fiber_aneg_smachine(tp, &aninfo);
+ if (status == ANEG_DONE || status == ANEG_FAILED)
+ break;
+
+ udelay(1);
+ }
+
+ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ *flags = aninfo.flags;
+
+ if (status == ANEG_DONE &&
+ (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
+ MR_LP_ADV_FULL_DUPLEX)))
+ res = 1;
+
+ return res;
+}
+
+static void tg3_init_bcm8002(struct tg3 *tp)
+{
+ u32 mac_status = tr32(MAC_STATUS);
+ int i;
+
+ /* Reset when initting first time or we have a link. */
+ if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
+ !(mac_status & MAC_STATUS_PCS_SYNCED))
+ return;
+
+ /* Set PLL lock range. */
+ tg3_writephy(tp, 0x16, 0x8007);
+
+ /* SW reset */
+ tg3_writephy(tp, MII_BMCR, BMCR_RESET);
+
+ /* Wait for reset to complete. */
+ /* XXX schedule_timeout() ... */
+ for (i = 0; i < 500; i++)
+ udelay(10);
+
+ /* Config mode; select PMA/Ch 1 regs. */
+ tg3_writephy(tp, 0x10, 0x8411);
+
+ /* Enable auto-lock and comdet, select txclk for tx. */
+ tg3_writephy(tp, 0x11, 0x0a10);
+
+ tg3_writephy(tp, 0x18, 0x00a0);
+ tg3_writephy(tp, 0x16, 0x41ff);
+
+ /* Assert and deassert POR. */
+ tg3_writephy(tp, 0x13, 0x0400);
+ udelay(40);
+ tg3_writephy(tp, 0x13, 0x0000);
+
+ tg3_writephy(tp, 0x11, 0x0a50);
+ udelay(40);
+ tg3_writephy(tp, 0x11, 0x0a10);
+
+ /* Wait for signal to stabilize */
+ /* XXX schedule_timeout() ... */
+ for (i = 0; i < 15000; i++)
+ udelay(10);
+
+ /* Deselect the channel register so we can read the PHYID
+ * later.
+ */
+ tg3_writephy(tp, 0x10, 0x8011);
+}
+
+static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+{
+ u32 sg_dig_ctrl, sg_dig_status;
+ u32 serdes_cfg, expected_sg_dig_ctrl;
+ int workaround, port_a;
+ int current_link_up;
+
+ serdes_cfg = 0;
+ expected_sg_dig_ctrl = 0;
+ workaround = 0;
+ port_a = 1;
+ current_link_up = 0;
+
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+ workaround = 1;
+ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+ port_a = 0;
+
+ /* preserve bits 0-11,13,14 for signal pre-emphasis */
+ /* preserve bits 20-23 for voltage regulator */
+ serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
+ }
+
+ sg_dig_ctrl = tr32(SG_DIG_CTRL);
+
+ if (tp->link_config.autoneg != AUTONEG_ENABLE) {
+ if (sg_dig_ctrl & (1 << 31)) {
+ if (workaround) {
+ u32 val = serdes_cfg;
+
+ if (port_a)
+ val |= 0xc010000;
+ else
+ val |= 0x4010000;
+ tw32_f(MAC_SERDES_CFG, val);
+ }
+ tw32_f(SG_DIG_CTRL, 0x01388400);
+ }
+ if (mac_status & MAC_STATUS_PCS_SYNCED) {
+ tg3_setup_flow_control(tp, 0, 0);
+ current_link_up = 1;
+ }
+ goto out;
+ }
+
+ /* Want auto-negotiation. */
+ expected_sg_dig_ctrl = 0x81388400;
+
+ /* Pause capability */
+ expected_sg_dig_ctrl |= (1 << 11);
+
+ /* Asymettric pause */
+ expected_sg_dig_ctrl |= (1 << 12);
+
+ if (sg_dig_ctrl != expected_sg_dig_ctrl) {
+ if (workaround)
+ tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
+ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
+ udelay(5);
+ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
+
+ tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
+ } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET)) {
+ int i;
+
+ /* Giver time to negotiate (~200ms) */
+ for (i = 0; i < 40000; i++) {
+ sg_dig_status = tr32(SG_DIG_STATUS);
+ if (sg_dig_status & (0x3))
+ break;
+ udelay(5);
+ }
+ mac_status = tr32(MAC_STATUS);
+
+ if ((sg_dig_status & (1 << 1)) &&
+ (mac_status & MAC_STATUS_PCS_SYNCED)) {
+ u32 local_adv, remote_adv;
+
+ local_adv = ADVERTISE_PAUSE_CAP;
+ remote_adv = 0;
+ if (sg_dig_status & (1 << 19))
+ remote_adv |= LPA_PAUSE_CAP;
+ if (sg_dig_status & (1 << 20))
+ remote_adv |= LPA_PAUSE_ASYM;
+
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+ current_link_up = 1;
+ tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
+ } else if (!(sg_dig_status & (1 << 1))) {
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
+ tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
+ else {
+ if (workaround) {
+ u32 val = serdes_cfg;
+
+ if (port_a)
+ val |= 0xc010000;
+ else
+ val |= 0x4010000;
+
+ tw32_f(MAC_SERDES_CFG, val);
+ }
+
+ tw32_f(SG_DIG_CTRL, 0x01388400);
+ udelay(40);
+
+ /* Link parallel detection - link is up */
+ /* only if we have PCS_SYNC and not */
+ /* receiving config code words */
+ mac_status = tr32(MAC_STATUS);
+ if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
+ !(mac_status & MAC_STATUS_RCVD_CFG)) {
+ tg3_setup_flow_control(tp, 0, 0);
+ current_link_up = 1;
+ }
+ }
+ }
+ }
+
+out:
+ return current_link_up;
+}
+
+static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+{
+ int current_link_up = 0;
+
+ if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
+ tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
+ goto out;
+ }
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ u32 flags;
+ int i;
+
+ if (fiber_autoneg(tp, &flags)) {
+ u32 local_adv, remote_adv;
+
+ local_adv = ADVERTISE_PAUSE_CAP;
+ remote_adv = 0;
+ if (flags & MR_LP_ADV_SYM_PAUSE)
+ remote_adv |= LPA_PAUSE_CAP;
+ if (flags & MR_LP_ADV_ASYM_PAUSE)
+ remote_adv |= LPA_PAUSE_ASYM;
+
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+ tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
+ current_link_up = 1;
+ }
+ for (i = 0; i < 30; i++) {
+ udelay(20);
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(40);
+ if ((tr32(MAC_STATUS) &
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED)) == 0)
+ break;
+ }
+
+ mac_status = tr32(MAC_STATUS);
+ if (current_link_up == 0 &&
+ (mac_status & MAC_STATUS_PCS_SYNCED) &&
+ !(mac_status & MAC_STATUS_RCVD_CFG))
+ current_link_up = 1;
+ } else {
+ /* Forcing 1000FD link up. */
+ current_link_up = 1;
+ tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
+
+ tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
+ udelay(40);
+ }
+
+out:
+ return current_link_up;
+}
+
+static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
+{
+ u32 orig_pause_cfg;
+ u16 orig_active_speed;
+ u8 orig_active_duplex;
+ u32 mac_status;
+ int current_link_up;
+ int i;
+
+ orig_pause_cfg =
+ (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
+ TG3_FLAG_TX_PAUSE));
+ orig_active_speed = tp->link_config.active_speed;
+ orig_active_duplex = tp->link_config.active_duplex;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
+ netif_carrier_ok(tp->dev) &&
+ (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
+ mac_status = tr32(MAC_STATUS);
+ mac_status &= (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_RCVD_CFG);
+ if (mac_status == (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET)) {
+ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ return 0;
+ }
+ }
+
+ tw32_f(MAC_TX_AUTO_NEG, 0);
+
+ tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ if (tp->phy_id == PHY_ID_BCM8002)
+ tg3_init_bcm8002(tp);
+
+ /* Enable link change event even when serdes polling. */
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ udelay(40);
+
+ current_link_up = 0;
+ mac_status = tr32(MAC_STATUS);
+
+ if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
+ current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
+ else
+ current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
+
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tp->hw_status->status =
+ (SD_STATUS_UPDATED |
+ (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
+
+ for (i = 0; i < 100; i++) {
+ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(5);
+ if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED)) == 0)
+ break;
+ }
+
+ mac_status = tr32(MAC_STATUS);
+ if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
+ current_link_up = 0;
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ tw32_f(MAC_MODE, (tp->mac_mode |
+ MAC_MODE_SEND_CONFIGS));
+ udelay(1);
+ tw32_f(MAC_MODE, tp->mac_mode);
+ }
+ }
+
+ if (current_link_up == 1) {
+ tp->link_config.active_speed = SPEED_1000;
+ tp->link_config.active_duplex = DUPLEX_FULL;
+ tw32(MAC_LED_CTRL, (tp->led_ctrl |
+ LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON));
+ } else {
+ tp->link_config.active_speed = SPEED_INVALID;
+ tp->link_config.active_duplex = DUPLEX_INVALID;
+ tw32(MAC_LED_CTRL, (tp->led_ctrl |
+ LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_TRAFFIC_OVERRIDE));
+ }
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ } else {
+ u32 now_pause_cfg =
+ tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
+ TG3_FLAG_TX_PAUSE);
+ if (orig_pause_cfg != now_pause_cfg ||
+ orig_active_speed != tp->link_config.active_speed ||
+ orig_active_duplex != tp->link_config.active_duplex)
+ tg3_link_report(tp);
+ }
+
+ return 0;
+}
+
+static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+{
+ int err;
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ err = tg3_setup_fiber_phy(tp, force_reset);
+ } else {
+ err = tg3_setup_copper_phy(tp, force_reset);
+ }
+
+ if (tp->link_config.active_speed == SPEED_1000 &&
+ tp->link_config.active_duplex == DUPLEX_HALF)
+ tw32(MAC_TX_LENGTHS,
+ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
+ else
+ tw32(MAC_TX_LENGTHS,
+ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (netif_carrier_ok(tp->dev)) {
+ tw32(HOSTCC_STAT_COAL_TICKS,
+ DEFAULT_STAT_COAL_TICKS);
+ } else {
+ tw32(HOSTCC_STAT_COAL_TICKS, 0);
+ }
+ }
+
+ return err;
+}
+
+/* Tigon3 never reports partial packet sends. So we do not
+ * need special logic to handle SKBs that have not had all
+ * of their frags sent yet, like SunGEM does.
+ */
+static void tg3_tx(struct tg3 *tp)
+{
+ u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
+ u32 sw_idx = tp->tx_cons;
+
+ while (sw_idx != hw_idx) {
+ struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
+ struct sk_buff *skb = ri->skb;
+ int i;
+
+ if (unlikely(skb == NULL))
+ BUG();
+
+ pci_unmap_single(tp->pdev,
+ pci_unmap_addr(ri, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ ri->skb = NULL;
+
+ sw_idx = NEXT_TX(sw_idx);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ if (unlikely(sw_idx == hw_idx))
+ BUG();
+
+ ri = &tp->tx_buffers[sw_idx];
+ if (unlikely(ri->skb != NULL))
+ BUG();
+
+ pci_unmap_page(tp->pdev,
+ pci_unmap_addr(ri, mapping),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+
+ sw_idx = NEXT_TX(sw_idx);
+ }
+
+ dev_kfree_skb_irq(skb);
+ }
+
+ tp->tx_cons = sw_idx;
+
+ if (netif_queue_stopped(tp->dev) &&
+ (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
+ netif_wake_queue(tp->dev);
+}
+
+/* Returns size of skb allocated or < 0 on error.
+ *
+ * We only need to fill in the address because the other members
+ * of the RX descriptor are invariant, see tg3_init_rings.
+ *
+ * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * posting buffers we only dirty the first cache line of the RX
+ * descriptor (containing the address). Whereas for the RX status
+ * buffers the cpu only reads the last cacheline of the RX descriptor
+ * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
+ */
+static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
+ int src_idx, u32 dest_idx_unmasked)
+{
+ struct tg3_rx_buffer_desc *desc;
+ struct ring_info *map, *src_map;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int skb_size, dest_idx;
+
+ src_map = NULL;
+ switch (opaque_key) {
+ case RXD_OPAQUE_RING_STD:
+ dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
+ desc = &tp->rx_std[dest_idx];
+ map = &tp->rx_std_buffers[dest_idx];
+ if (src_idx >= 0)
+ src_map = &tp->rx_std_buffers[src_idx];
+ skb_size = RX_PKT_BUF_SZ;
+ break;
+
+ case RXD_OPAQUE_RING_JUMBO:
+ dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
+ desc = &tp->rx_jumbo[dest_idx];
+ map = &tp->rx_jumbo_buffers[dest_idx];
+ if (src_idx >= 0)
+ src_map = &tp->rx_jumbo_buffers[src_idx];
+ skb_size = RX_JUMBO_PKT_BUF_SZ;
+ break;
+
+ default:
+ return -EINVAL;
+ };
+
+ /* Do not overwrite any of the map or rp information
+ * until we are sure we can commit to a new buffer.
+ *
+ * Callers depend upon this behavior and assume that
+ * we leave everything unchanged if we fail.
+ */
+ skb = dev_alloc_skb(skb_size);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb->dev = tp->dev;
+ skb_reserve(skb, tp->rx_offset);
+
+ mapping = pci_map_single(tp->pdev, skb->data,
+ skb_size - tp->rx_offset,
+ PCI_DMA_FROMDEVICE);
+
+ map->skb = skb;
+ pci_unmap_addr_set(map, mapping, mapping);
+
+ if (src_map != NULL)
+ src_map->skb = NULL;
+
+ desc->addr_hi = ((u64)mapping >> 32);
+ desc->addr_lo = ((u64)mapping & 0xffffffff);
+
+ return skb_size;
+}
+
+/* We only need to move over in the address because the other
+ * members of the RX descriptor are invariant. See notes above
+ * tg3_alloc_rx_skb for full details.
+ */
+static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
+ int src_idx, u32 dest_idx_unmasked)
+{
+ struct tg3_rx_buffer_desc *src_desc, *dest_desc;
+ struct ring_info *src_map, *dest_map;
+ int dest_idx;
+
+ switch (opaque_key) {
+ case RXD_OPAQUE_RING_STD:
+ dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
+ dest_desc = &tp->rx_std[dest_idx];
+ dest_map = &tp->rx_std_buffers[dest_idx];
+ src_desc = &tp->rx_std[src_idx];
+ src_map = &tp->rx_std_buffers[src_idx];
+ break;
+
+ case RXD_OPAQUE_RING_JUMBO:
+ dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
+ dest_desc = &tp->rx_jumbo[dest_idx];
+ dest_map = &tp->rx_jumbo_buffers[dest_idx];
+ src_desc = &tp->rx_jumbo[src_idx];
+ src_map = &tp->rx_jumbo_buffers[src_idx];
+ break;
+
+ default:
+ return;
+ };
+
+ dest_map->skb = src_map->skb;
+ pci_unmap_addr_set(dest_map, mapping,
+ pci_unmap_addr(src_map, mapping));
+ dest_desc->addr_hi = src_desc->addr_hi;
+ dest_desc->addr_lo = src_desc->addr_lo;
+
+ src_map->skb = NULL;
+}
+
+#if TG3_VLAN_TAG_USED
+static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
+{
+ return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
+}
+#endif
+
+/* The RX ring scheme is composed of multiple rings which post fresh
+ * buffers to the chip, and one special ring the chip uses to report
+ * status back to the host.
+ *
+ * The special ring reports the status of received packets to the
+ * host. The chip does not write into the original descriptor the
+ * RX buffer was obtained from. The chip simply takes the original
+ * descriptor as provided by the host, updates the status and length
+ * field, then writes this into the next status ring entry.
+ *
+ * Each ring the host uses to post buffers to the chip is described
+ * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
+ * it is first placed into the on-chip ram. When the packet's length
+ * is known, it walks down the TG3_BDINFO entries to select the ring.
+ * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
+ * which is within the range of the new packet's length is chosen.
+ *
+ * The "separate ring for rx status" scheme may sound queer, but it makes
+ * sense from a cache coherency perspective. If only the host writes
+ * to the buffer post rings, and only the chip writes to the rx status
+ * rings, then cache lines never move beyond shared-modified state.
+ * If both the host and chip were to write into the same ring, cache line
+ * eviction could occur since both entities want it in an exclusive state.
+ */
+static int tg3_rx(struct tg3 *tp, int budget)
+{
+ u32 work_mask;
+ u32 rx_rcb_ptr = tp->rx_rcb_ptr;
+ u16 hw_idx, sw_idx;
+ int received;
+
+ hw_idx = tp->hw_status->idx[0].rx_producer;
+ /*
+ * We need to order the read of hw_idx and the read of
+ * the opaque cookie.
+ */
+ rmb();
+ sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
+ work_mask = 0;
+ received = 0;
+ while (sw_idx != hw_idx && budget > 0) {
+ struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
+ unsigned int len;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 opaque_key, desc_idx, *post_ptr;
+
+ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+ if (opaque_key == RXD_OPAQUE_RING_STD) {
+ dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
+ mapping);
+ skb = tp->rx_std_buffers[desc_idx].skb;
+ post_ptr = &tp->rx_std_ptr;
+ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+ dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
+ mapping);
+ skb = tp->rx_jumbo_buffers[desc_idx].skb;
+ post_ptr = &tp->rx_jumbo_ptr;
+ }
+ else {
+ goto next_pkt_nopost;
+ }
+
+ work_mask |= opaque_key;
+
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ drop_it:
+ tg3_recycle_rx(tp, opaque_key,
+ desc_idx, *post_ptr);
+ drop_it_no_recycle:
+ /* Other statistics kept track of by card. */
+ tp->net_stats.rx_dropped++;
+ goto next_pkt;
+ }
+
+ len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
+
+ if (len > RX_COPY_THRESHOLD
+ && tp->rx_offset == 2
+ /* rx_offset != 2 iff this is a 5701 card running
+ * in PCI-X mode [see tg3_get_invariants()] */
+ ) {
+ int skb_size;
+
+ skb_size = tg3_alloc_rx_skb(tp, opaque_key,
+ desc_idx, *post_ptr);
+ if (skb_size < 0)
+ goto drop_it;
+
+ pci_unmap_single(tp->pdev, dma_addr,
+ skb_size - tp->rx_offset,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, len);
+ } else {
+ struct sk_buff *copy_skb;
+
+ tg3_recycle_rx(tp, opaque_key,
+ desc_idx, *post_ptr);
+
+ copy_skb = dev_alloc_skb(len + 2);
+ if (copy_skb == NULL)
+ goto drop_it_no_recycle;
+
+ copy_skb->dev = tp->dev;
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+ memcpy(copy_skb->data, skb->data, len);
+ pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
+ (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+ (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+ >> RXD_TCPCSUM_SHIFT) == 0xffff))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->protocol = eth_type_trans(skb, tp->dev);
+#if TG3_VLAN_TAG_USED
+ if (tp->vlgrp != NULL &&
+ desc->type_flags & RXD_FLAG_VLAN) {
+ tg3_vlan_rx(tp, skb,
+ desc->err_vlan & RXD_VLAN_MASK);
+ } else
+#endif
+ netif_receive_skb(skb);
+
+ tp->dev->last_rx = jiffies;
+ received++;
+ budget--;
+
+next_pkt:
+ (*post_ptr)++;
+next_pkt_nopost:
+ rx_rcb_ptr++;
+ sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
+ }
+
+ /* ACK the status ring. */
+ tp->rx_rcb_ptr = rx_rcb_ptr;
+ tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
+ (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
+
+ /* Refill RX ring(s). */
+ if (work_mask & RXD_OPAQUE_RING_STD) {
+ sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
+ tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
+ sw_idx);
+ }
+ if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+ sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
+ tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
+ sw_idx);
+ }
+ mmiowb();
+
+ return received;
+}
+
+static int tg3_poll(struct net_device *netdev, int *budget)
+{
+ struct tg3 *tp = netdev_priv(netdev);
+ struct tg3_hw_status *sblk = tp->hw_status;
+ unsigned long flags;
+ int done;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ /* handle link change and other phy events */
+ if (!(tp->tg3_flags &
+ (TG3_FLAG_USE_LINKCHG_REG |
+ TG3_FLAG_POLL_SERDES))) {
+ if (sblk->status & SD_STATUS_LINK_CHG) {
+ sblk->status = SD_STATUS_UPDATED |
+ (sblk->status & ~SD_STATUS_LINK_CHG);
+ tg3_setup_phy(tp, 0);
+ }
+ }
+
+ /* run TX completion thread */
+ if (sblk->idx[0].tx_consumer != tp->tx_cons) {
+ spin_lock(&tp->tx_lock);
+ tg3_tx(tp);
+ spin_unlock(&tp->tx_lock);
+ }
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ /* run RX thread, within the bounds set by NAPI.
+ * All RX "locking" is done by ensuring outside
+ * code synchronizes with dev->poll()
+ */
+ done = 1;
+ if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
+ int orig_budget = *budget;
+ int work_done;
+
+ if (orig_budget > netdev->quota)
+ orig_budget = netdev->quota;
+
+ work_done = tg3_rx(tp, orig_budget);
+
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ if (work_done >= orig_budget)
+ done = 0;
+ }
+
+ /* if no more work, tell net stack and NIC we're done */
+ if (done) {
+ spin_lock_irqsave(&tp->lock, flags);
+ __netif_rx_complete(netdev);
+ tg3_restart_ints(tp);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ return (done ? 0 : 1);
+}
+
+static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
+{
+ struct tg3_hw_status *sblk = tp->hw_status;
+ unsigned int work_exists = 0;
+
+ /* check for phy events */
+ if (!(tp->tg3_flags &
+ (TG3_FLAG_USE_LINKCHG_REG |
+ TG3_FLAG_POLL_SERDES))) {
+ if (sblk->status & SD_STATUS_LINK_CHG)
+ work_exists = 1;
+ }
+ /* check for RX/TX work to do */
+ if (sblk->idx[0].tx_consumer != tp->tx_cons ||
+ sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
+ work_exists = 1;
+
+ return work_exists;
+}
+
+static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct tg3 *tp = netdev_priv(dev);
+ struct tg3_hw_status *sblk = tp->hw_status;
+ unsigned long flags;
+ unsigned int handled = 1;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ /* In INTx mode, it is possible for the interrupt to arrive at
+ * the CPU before the status block posted prior to the interrupt.
+ * Reading the PCI State register will confirm whether the
+ * interrupt is ours and will flush the status block.
+ */
+ if ((sblk->status & SD_STATUS_UPDATED) ||
+ !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ /*
+ * writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ */
+ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+ 0x00000001);
+ /*
+ * Flush PCI write. This also guarantees that our
+ * status block has been flushed to host memory.
+ */
+ tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+ sblk->status &= ~SD_STATUS_UPDATED;
+
+ if (likely(tg3_has_work(dev, tp)))
+ netif_rx_schedule(dev); /* schedule NAPI poll */
+ else {
+ /* no work, shared interrupt perhaps? re-enable
+ * interrupts, and flush that PCI write
+ */
+ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+ 0x00000000);
+ tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
+ }
+ } else { /* shared interrupt */
+ handled = 0;
+ }
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int tg3_init_hw(struct tg3 *);
+static int tg3_halt(struct tg3 *);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tg3_poll_controller(struct net_device *dev)
+{
+ tg3_interrupt(dev->irq, dev, NULL);
+}
+#endif
+
+static void tg3_reset_task(void *_data)
+{
+ struct tg3 *tp = _data;
+ unsigned int restart_timer;
+
+ tg3_netif_stop(tp);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
+ tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
+
+ tg3_halt(tp);
+ tg3_init_hw(tp);
+
+ tg3_netif_start(tp);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ if (restart_timer)
+ mod_timer(&tp->timer, jiffies + 1);
+}
+
+static void tg3_tx_timeout(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
+ dev->name);
+
+ schedule_work(&tp->reset_task);
+}
+
+static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
+
+static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
+ u32 guilty_entry, int guilty_len,
+ u32 last_plus_one, u32 *start, u32 mss)
+{
+ struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
+ dma_addr_t new_addr;
+ u32 entry = *start;
+ int i;
+
+ if (!new_skb) {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ /* New SKB is guaranteed to be linear. */
+ entry = *start;
+ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+ PCI_DMA_TODEVICE);
+ tg3_set_txd(tp, entry, new_addr, new_skb->len,
+ (skb->ip_summed == CHECKSUM_HW) ?
+ TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
+ *start = NEXT_TX(entry);
+
+ /* Now clean up the sw ring entries. */
+ i = 0;
+ while (entry != last_plus_one) {
+ int len;
+
+ if (i == 0)
+ len = skb_headlen(skb);
+ else
+ len = skb_shinfo(skb)->frags[i-1].size;
+ pci_unmap_single(tp->pdev,
+ pci_unmap_addr(&tp->tx_buffers[entry], mapping),
+ len, PCI_DMA_TODEVICE);
+ if (i == 0) {
+ tp->tx_buffers[entry].skb = new_skb;
+ pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
+ } else {
+ tp->tx_buffers[entry].skb = NULL;
+ }
+ entry = NEXT_TX(entry);
+ i++;
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void tg3_set_txd(struct tg3 *tp, int entry,
+ dma_addr_t mapping, int len, u32 flags,
+ u32 mss_and_is_end)
+{
+ struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
+ int is_end = (mss_and_is_end & 0x1);
+ u32 mss = (mss_and_is_end >> 1);
+ u32 vlan_tag = 0;
+
+ if (is_end)
+ flags |= TXD_FLAG_END;
+ if (flags & TXD_FLAG_VLAN) {
+ vlan_tag = flags >> 16;
+ flags &= 0xffff;
+ }
+ vlan_tag |= (mss << TXD_MSS_SHIFT);
+
+ txd->addr_hi = ((u64) mapping >> 32);
+ txd->addr_lo = ((u64) mapping & 0xffffffff);
+ txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
+ txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
+}
+
+static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+{
+ u32 base = (u32) mapping & 0xffffffff;
+
+ return ((base > 0xffffdcc0) &&
+ (base + len + 8 < base));
+}
+
+static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ dma_addr_t mapping;
+ unsigned int i;
+ u32 len, entry, base_flags, mss;
+ int would_hit_hwbug;
+ unsigned long flags;
+
+ len = skb_headlen(skb);
+
+ /* No BH disabling for tx_lock here. We are running in BH disabled
+ * context and TX reclaim runs via tp->poll inside of a software
+ * interrupt. Rejoice!
+ *
+ * Actually, things are not so simple. If we are to take a hw
+ * IRQ here, we can deadlock, consider:
+ *
+ * CPU1 CPU2
+ * tg3_start_xmit
+ * take tp->tx_lock
+ * tg3_timer
+ * take tp->lock
+ * tg3_interrupt
+ * spin on tp->lock
+ * spin on tp->tx_lock
+ *
+ * So we really do need to disable interrupts when taking
+ * tx_lock here.
+ */
+ local_irq_save(flags);
+ if (!spin_trylock(&tp->tx_lock)) {
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+
+ /* This is a hard error, log it. */
+ if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = tp->tx_prod;
+ base_flags = 0;
+ if (skb->ip_summed == CHECKSUM_HW)
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#if TG3_TSO_SUPPORT != 0
+ mss = 0;
+ if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
+ (mss = skb_shinfo(skb)->tso_size) != 0) {
+ int tcp_opt_len, ip_tcp_len;
+
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ tcp_opt_len = ((skb->h.th->doff - 5) * 4);
+ ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+
+ base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+ TXD_FLAG_CPU_POST_DMA);
+
+ skb->nh.iph->check = 0;
+ skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+ skb->h.th->check = 0;
+ base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+ }
+ else {
+ skb->h.th->check =
+ ~csum_tcpudp_magic(skb->nh.iph->saddr,
+ skb->nh.iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
+ if (tcp_opt_len || skb->nh.iph->ihl > 5) {
+ int tsflags;
+
+ tsflags = ((skb->nh.iph->ihl - 5) +
+ (tcp_opt_len >> 2));
+ mss |= (tsflags << 11);
+ }
+ } else {
+ if (tcp_opt_len || skb->nh.iph->ihl > 5) {
+ int tsflags;
+
+ tsflags = ((skb->nh.iph->ihl - 5) +
+ (tcp_opt_len >> 2));
+ base_flags |= tsflags << 12;
+ }
+ }
+ }
+#else
+ mss = 0;
+#endif
+#if TG3_VLAN_TAG_USED
+ if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
+ base_flags |= (TXD_FLAG_VLAN |
+ (vlan_tx_tag_get(skb) << 16));
+#endif
+
+ /* Queue skb data, a.k.a. the main skb fragment. */
+ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ tp->tx_buffers[entry].skb = skb;
+ pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
+
+ would_hit_hwbug = 0;
+
+ if (tg3_4g_overflow_test(mapping, len))
+ would_hit_hwbug = entry + 1;
+
+ tg3_set_txd(tp, entry, mapping, len, base_flags,
+ (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
+
+ entry = NEXT_TX(entry);
+
+ /* Now loop through additional data fragments, and queue them. */
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ unsigned int i, last;
+
+ last = skb_shinfo(skb)->nr_frags - 1;
+ for (i = 0; i <= last; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+ mapping = pci_map_page(tp->pdev,
+ frag->page,
+ frag->page_offset,
+ len, PCI_DMA_TODEVICE);
+
+ tp->tx_buffers[entry].skb = NULL;
+ pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
+
+ if (tg3_4g_overflow_test(mapping, len)) {
+ /* Only one should match. */
+ if (would_hit_hwbug)
+ BUG();
+ would_hit_hwbug = entry + 1;
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ tg3_set_txd(tp, entry, mapping, len,
+ base_flags, (i == last)|(mss << 1));
+ else
+ tg3_set_txd(tp, entry, mapping, len,
+ base_flags, (i == last));
+
+ entry = NEXT_TX(entry);
+ }
+ }
+
+ if (would_hit_hwbug) {
+ u32 last_plus_one = entry;
+ u32 start;
+ unsigned int len = 0;
+
+ would_hit_hwbug -= 1;
+ entry = entry - 1 - skb_shinfo(skb)->nr_frags;
+ entry &= (TG3_TX_RING_SIZE - 1);
+ start = entry;
+ i = 0;
+ while (entry != last_plus_one) {
+ if (i == 0)
+ len = skb_headlen(skb);
+ else
+ len = skb_shinfo(skb)->frags[i-1].size;
+
+ if (entry == would_hit_hwbug)
+ break;
+
+ i++;
+ entry = NEXT_TX(entry);
+
+ }
+
+ /* If the workaround fails due to memory/mapping
+ * failure, silently drop this packet.
+ */
+ if (tigon3_4gb_hwbug_workaround(tp, skb,
+ entry, len,
+ last_plus_one,
+ &start, mss))
+ goto out_unlock;
+
+ entry = start;
+ }
+
+ /* Packets are ready, update Tx producer idx local and on card. */
+ tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+
+ tp->tx_prod = entry;
+ if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+out_unlock:
+ mmiowb();
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+ dev->trans_start = jiffies;
+
+ return NETDEV_TX_OK;
+}
+
+static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
+ int new_mtu)
+{
+ dev->mtu = new_mtu;
+
+ if (new_mtu > ETH_DATA_LEN)
+ tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
+}
+
+static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* We'll just catch it later when the
+ * device is up'd.
+ */
+ tg3_set_mtu(dev, tp, new_mtu);
+ return 0;
+ }
+
+ tg3_netif_stop(tp);
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tg3_halt(tp);
+
+ tg3_set_mtu(dev, tp, new_mtu);
+
+ tg3_init_hw(tp);
+
+ tg3_netif_start(tp);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void tg3_free_rings(struct tg3 *tp)
+{
+ struct ring_info *rxp;
+ int i;
+
+ for (i = 0; i < TG3_RX_RING_SIZE; i++) {
+ rxp = &tp->rx_std_buffers[i];
+
+ if (rxp->skb == NULL)
+ continue;
+ pci_unmap_single(tp->pdev,
+ pci_unmap_addr(rxp, mapping),
+ RX_PKT_BUF_SZ - tp->rx_offset,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(rxp->skb);
+ rxp->skb = NULL;
+ }
+
+ for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
+ rxp = &tp->rx_jumbo_buffers[i];
+
+ if (rxp->skb == NULL)
+ continue;
+ pci_unmap_single(tp->pdev,
+ pci_unmap_addr(rxp, mapping),
+ RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(rxp->skb);
+ rxp->skb = NULL;
+ }
+
+ for (i = 0; i < TG3_TX_RING_SIZE; ) {
+ struct tx_ring_info *txp;
+ struct sk_buff *skb;
+ int j;
+
+ txp = &tp->tx_buffers[i];
+ skb = txp->skb;
+
+ if (skb == NULL) {
+ i++;
+ continue;
+ }
+
+ pci_unmap_single(tp->pdev,
+ pci_unmap_addr(txp, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ txp->skb = NULL;
+
+ i++;
+
+ for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
+ txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
+ pci_unmap_page(tp->pdev,
+ pci_unmap_addr(txp, mapping),
+ skb_shinfo(skb)->frags[j].size,
+ PCI_DMA_TODEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static void tg3_init_rings(struct tg3 *tp)
+{
+ u32 i;
+
+ /* Free up all the SKBs. */
+ tg3_free_rings(tp);
+
+ /* Zero out all descriptors. */
+ memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
+ memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
+ memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
+
+ /* Initialize invariants of the rings, we only set this
+ * stuff once. This works because the card does not
+ * write into the rx buffer posting rings.
+ */
+ for (i = 0; i < TG3_RX_RING_SIZE; i++) {
+ struct tg3_rx_buffer_desc *rxd;
+
+ rxd = &tp->rx_std[i];
+ rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
+ << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
+ rxd->opaque = (RXD_OPAQUE_RING_STD |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
+ for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
+ struct tg3_rx_buffer_desc *rxd;
+
+ rxd = &tp->rx_jumbo[i];
+ rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
+ << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+ RXD_FLAG_JUMBO;
+ rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+ }
+
+ /* Now allocate fresh SKBs for each rx ring. */
+ for (i = 0; i < tp->rx_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
+ -1, i) < 0)
+ break;
+ }
+
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
+ for (i = 0; i < tp->rx_jumbo_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
+ -1, i) < 0)
+ break;
+ }
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+ if (tp->rx_std_buffers) {
+ kfree(tp->rx_std_buffers);
+ tp->rx_std_buffers = NULL;
+ }
+ if (tp->rx_std) {
+ pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
+ tp->rx_std, tp->rx_std_mapping);
+ tp->rx_std = NULL;
+ }
+ if (tp->rx_jumbo) {
+ pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
+ tp->rx_jumbo, tp->rx_jumbo_mapping);
+ tp->rx_jumbo = NULL;
+ }
+ if (tp->rx_rcb) {
+ pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
+ tp->rx_rcb, tp->rx_rcb_mapping);
+ tp->rx_rcb = NULL;
+ }
+ if (tp->tx_ring) {
+ pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
+ tp->tx_ring, tp->tx_desc_mapping);
+ tp->tx_ring = NULL;
+ }
+ if (tp->hw_status) {
+ pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
+ tp->hw_status, tp->status_mapping);
+ tp->hw_status = NULL;
+ }
+ if (tp->hw_stats) {
+ pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
+ tp->hw_stats, tp->stats_mapping);
+ tp->hw_stats = NULL;
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down. Can sleep.
+ */
+static int tg3_alloc_consistent(struct tg3 *tp)
+{
+ tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
+ (TG3_RX_RING_SIZE +
+ TG3_RX_JUMBO_RING_SIZE)) +
+ (sizeof(struct tx_ring_info) *
+ TG3_TX_RING_SIZE),
+ GFP_KERNEL);
+ if (!tp->rx_std_buffers)
+ return -ENOMEM;
+
+ memset(tp->rx_std_buffers, 0,
+ (sizeof(struct ring_info) *
+ (TG3_RX_RING_SIZE +
+ TG3_RX_JUMBO_RING_SIZE)) +
+ (sizeof(struct tx_ring_info) *
+ TG3_TX_RING_SIZE));
+
+ tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
+ tp->tx_buffers = (struct tx_ring_info *)
+ &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
+
+ tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
+ &tp->rx_std_mapping);
+ if (!tp->rx_std)
+ goto err_out;
+
+ tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
+ &tp->rx_jumbo_mapping);
+
+ if (!tp->rx_jumbo)
+ goto err_out;
+
+ tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
+ &tp->rx_rcb_mapping);
+ if (!tp->rx_rcb)
+ goto err_out;
+
+ tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
+ &tp->tx_desc_mapping);
+ if (!tp->tx_ring)
+ goto err_out;
+
+ tp->hw_status = pci_alloc_consistent(tp->pdev,
+ TG3_HW_STATUS_SIZE,
+ &tp->status_mapping);
+ if (!tp->hw_status)
+ goto err_out;
+
+ tp->hw_stats = pci_alloc_consistent(tp->pdev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping);
+ if (!tp->hw_stats)
+ goto err_out;
+
+ memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ return 0;
+
+err_out:
+ tg3_free_consistent(tp);
+ return -ENOMEM;
+}
+
+#define MAX_WAIT_CNT 1000
+
+/* To stop a block, clear the enable bit and poll till it
+ * clears. tp->lock is held.
+ */
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
+{
+ unsigned int i;
+ u32 val;
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ switch (ofs) {
+ case RCVLSC_MODE:
+ case DMAC_MODE:
+ case MBFREE_MODE:
+ case BUFMGR_MODE:
+ case MEMARB_MODE:
+ /* We can't enable/disable these bits of the
+ * 5705/5750, just say success.
+ */
+ return 0;
+
+ default:
+ break;
+ };
+ }
+
+ val = tr32(ofs);
+ val &= ~enable_bit;
+ tw32_f(ofs, val);
+
+ for (i = 0; i < MAX_WAIT_CNT; i++) {
+ udelay(100);
+ val = tr32(ofs);
+ if ((val & enable_bit) == 0)
+ break;
+ }
+
+ if (i == MAX_WAIT_CNT) {
+ printk(KERN_ERR PFX "tg3_stop_block timed out, "
+ "ofs=%lx enable_bit=%x\n",
+ ofs, enable_bit);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_abort_hw(struct tg3 *tp)
+{
+ int i, err;
+
+ tg3_disable_ints(tp);
+
+ tp->rx_mode &= ~RX_MODE_ENABLE;
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
+ err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
+ err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
+
+ err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
+ err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
+ err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+ err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
+ if (err)
+ goto out;
+
+ tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tp->tx_mode &= ~TX_MODE_ENABLE;
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+
+ for (i = 0; i < MAX_WAIT_CNT; i++) {
+ udelay(100);
+ if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
+ break;
+ }
+ if (i >= MAX_WAIT_CNT) {
+ printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
+ "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
+ tp->dev->name, tr32(MAC_TX_MODE));
+ return -ENODEV;
+ }
+
+ err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
+ err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
+
+ tw32(FTQ_RESET, 0xffffffff);
+ tw32(FTQ_RESET, 0x00000000);
+
+ err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
+ err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
+ if (err)
+ goto out;
+
+ if (tp->hw_status)
+ memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+ if (tp->hw_stats)
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+out:
+ return err;
+}
+
+/* tp->lock is held. */
+static int tg3_nvram_lock(struct tg3 *tp)
+{
+ if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+ int i;
+
+ tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+ for (i = 0; i < 8000; i++) {
+ if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+ break;
+ udelay(20);
+ }
+ if (i == 8000)
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_nvram_unlock(struct tg3 *tp)
+{
+ if (tp->tg3_flags & TG3_FLAG_NVRAM)
+ tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+{
+ if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+
+ if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ };
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
+{
+ if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START_DONE);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD_DONE);
+ break;
+
+ default:
+ break;
+ };
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
+{
+ if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ };
+ }
+}
+
+static void tg3_stop_fw(struct tg3 *);
+
+/* tp->lock is held. */
+static int tg3_chip_reset(struct tg3 *tp)
+{
+ u32 val;
+ u32 flags_save;
+ int i;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+ tg3_nvram_lock(tp);
+
+ /*
+ * We must avoid the readl() that normally takes place.
+ * It locks machines, causes machine checks, and other
+ * fun things. So, temporarily disable the 5701
+ * hardware workaround, while we do the reset.
+ */
+ flags_save = tp->tg3_flags;
+ tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
+
+ /* do the reset */
+ val = GRC_MISC_CFG_CORECLK_RESET;
+
+ if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ if (tr32(0x7e2c) == 0x60) {
+ tw32(0x7e2c, 0x20);
+ }
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ tw32(GRC_MISC_CFG, (1 << 29));
+ val |= (1 << 29);
+ }
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+ tw32(GRC_MISC_CFG, val);
+
+ /* restore 5701 hardware bug workaround flag */
+ tp->tg3_flags = flags_save;
+
+ /* Unfortunately, we have to delay before the PCI read back.
+ * Some 575X chips even will not respond to a PCI cfg access
+ * when the reset command is given to the chip.
+ *
+ * How do these hardware designers expect things to work
+ * properly if the PCI write is posted for a long period
+ * of time? It is always necessary to have some method by
+ * which a register read back can occur to push the write
+ * out which does the reset.
+ *
+ * For most tg3 variants the trick below was working.
+ * Ho hum...
+ */
+ udelay(120);
+
+ /* Flush PCI posted writes. The normal MMIO registers
+ * are inaccessible at this time so this is the only
+ * way to make this reliably (actually, this is no longer
+ * the case, see above). I tried to use indirect
+ * register read/write but this upset some 5701 variants.
+ */
+ pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
+
+ udelay(120);
+
+ if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+ int i;
+ u32 cfg_val;
+
+ /* Wait for link training to complete. */
+ for (i = 0; i < 5000; i++)
+ udelay(100);
+
+ pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
+ pci_write_config_dword(tp->pdev, 0xc4,
+ cfg_val | (1 << 15));
+ }
+ /* Set PCIE max payload size and clear error status. */
+ pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
+ }
+
+ /* Re-enable indirect register accesses. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ /* Set MAX PCI retry to zero. */
+ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
+ val |= PCISTATE_RETRY_SAME_DMA;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+ pci_restore_state(tp->pdev);
+
+ /* Make sure PCI-X relaxed ordering bit is clear. */
+ pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
+ val &= ~PCIX_CAPS_RELAXED_ORDERING;
+ pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
+
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+ tg3_stop_fw(tp);
+ tw32(0x5000, 0x400);
+ }
+
+ tw32(GRC_MODE, tp->grc_mode);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+ u32 val = tr32(0xc4);
+
+ tw32(0xc4, val | (1 << 15));
+ }
+
+ if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+ tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
+ tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ } else
+ tw32_f(MAC_MODE, 0);
+ udelay(40);
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
+ /* Wait for firmware initialization to complete. */
+ for (i = 0; i < 100000; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ udelay(10);
+ }
+ if (i >= 100000) {
+ printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
+ "firmware will not restart magic=%08x\n",
+ tp->dev->name, val);
+ return -ENODEV;
+ }
+ }
+
+ if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ u32 val = tr32(0x7c00);
+
+ tw32(0x7c00, val | (1 << 25));
+ }
+
+ /* Reprobe ASF enable state. */
+ tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
+ tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
+ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+ u32 nic_cfg;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+ tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
+ }
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_stop_fw(struct tg3 *tp)
+{
+ if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ u32 val;
+ int i;
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
+ val = tr32(GRC_RX_CPU_EVENT);
+ val |= (1 << 14);
+ tw32(GRC_RX_CPU_EVENT, val);
+
+ /* Wait for RX cpu to ACK the event. */
+ for (i = 0; i < 100; i++) {
+ if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
+ break;
+ udelay(1);
+ }
+ }
+}
+
+/* tp->lock is held. */
+static int tg3_halt(struct tg3 *tp)
+{
+ int err;
+
+ tg3_stop_fw(tp);
+
+ tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
+
+ tg3_abort_hw(tp);
+ err = tg3_chip_reset(tp);
+
+ tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
+ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#define TG3_FW_RELEASE_MAJOR 0x0
+#define TG3_FW_RELASE_MINOR 0x0
+#define TG3_FW_RELEASE_FIX 0x0
+#define TG3_FW_START_ADDR 0x08000000
+#define TG3_FW_TEXT_ADDR 0x08000000
+#define TG3_FW_TEXT_LEN 0x9c0
+#define TG3_FW_RODATA_ADDR 0x080009c0
+#define TG3_FW_RODATA_LEN 0x60
+#define TG3_FW_DATA_ADDR 0x08000a40
+#define TG3_FW_DATA_LEN 0x20
+#define TG3_FW_SBSS_ADDR 0x08000a60
+#define TG3_FW_SBSS_LEN 0xc
+#define TG3_FW_BSS_ADDR 0x08000a70
+#define TG3_FW_BSS_LEN 0x10
+
+static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
+ 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
+ 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
+ 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
+ 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
+ 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
+ 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
+ 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
+ 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
+ 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
+ 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
+ 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
+ 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
+ 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
+ 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
+ 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
+ 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
+ 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
+ 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
+ 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
+ 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
+ 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
+ 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
+ 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+ 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
+ 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
+ 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
+ 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
+ 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
+ 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
+ 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
+ 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
+ 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
+ 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
+ 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
+ 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
+ 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
+ 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
+ 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
+ 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
+ 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
+ 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
+ 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
+ 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
+ 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
+ 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
+ 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
+ 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
+ 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
+ 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
+ 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
+ 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
+ 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
+ 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
+ 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
+ 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
+ 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
+ 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
+ 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
+ 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
+ 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
+ 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
+ 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
+ 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
+ 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
+ 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
+ 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
+ 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
+ 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
+ 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
+ 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
+ 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
+ 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
+ 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
+ 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
+ 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
+ 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
+};
+
+static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
+ 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
+ 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
+ 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
+ 0x00000000
+};
+
+#if 0 /* All zeros, don't eat up space with it. */
+u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+#endif
+
+#define RX_CPU_SCRATCH_BASE 0x30000
+#define RX_CPU_SCRATCH_SIZE 0x04000
+#define TX_CPU_SCRATCH_BASE 0x34000
+#define TX_CPU_SCRATCH_SIZE 0x04000
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+{
+ int i;
+
+ if (offset == TX_CPU_BASE &&
+ (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ BUG();
+
+ if (offset == RX_CPU_BASE) {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+ } else {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+ }
+
+ if (i >= 10000) {
+ printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
+ "and %s CPU\n",
+ tp->dev->name,
+ (offset == RX_CPU_BASE ? "RX" : "TX"));
+ return -ENODEV;
+ }
+ return 0;
+}
+
+struct fw_info {
+ unsigned int text_base;
+ unsigned int text_len;
+ u32 *text_data;
+ unsigned int rodata_base;
+ unsigned int rodata_len;
+ u32 *rodata_data;
+ unsigned int data_base;
+ unsigned int data_len;
+ u32 *data_data;
+};
+
+/* tp->lock is held. */
+static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
+ int cpu_scratch_size, struct fw_info *info)
+{
+ int err, i;
+ u32 orig_tg3_flags = tp->tg3_flags;
+ void (*write_op)(struct tg3 *, u32, u32);
+
+ if (cpu_base == TX_CPU_BASE &&
+ (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
+ "TX cpu firmware on %s which is 5705.\n",
+ tp->dev->name);
+ return -EINVAL;
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ write_op = tg3_write_mem;
+ else
+ write_op = tg3_write_indirect_reg32;
+
+ /* Force use of PCI config space for indirect register
+ * write calls.
+ */
+ tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
+
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (err)
+ goto out;
+
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
+ for (i = 0; i < (info->text_len / sizeof(u32)); i++)
+ write_op(tp, (cpu_scratch_base +
+ (info->text_base & 0xffff) +
+ (i * sizeof(u32))),
+ (info->text_data ?
+ info->text_data[i] : 0));
+ for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
+ write_op(tp, (cpu_scratch_base +
+ (info->rodata_base & 0xffff) +
+ (i * sizeof(u32))),
+ (info->rodata_data ?
+ info->rodata_data[i] : 0));
+ for (i = 0; i < (info->data_len / sizeof(u32)); i++)
+ write_op(tp, (cpu_scratch_base +
+ (info->data_base & 0xffff) +
+ (i * sizeof(u32))),
+ (info->data_data ?
+ info->data_data[i] : 0));
+
+ err = 0;
+
+out:
+ tp->tg3_flags = orig_tg3_flags;
+ return err;
+}
+
+/* tp->lock is held. */
+static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
+{
+ struct fw_info info;
+ int err, i;
+
+ info.text_base = TG3_FW_TEXT_ADDR;
+ info.text_len = TG3_FW_TEXT_LEN;
+ info.text_data = &tg3FwText[0];
+ info.rodata_base = TG3_FW_RODATA_ADDR;
+ info.rodata_len = TG3_FW_RODATA_LEN;
+ info.rodata_data = &tg3FwRodata[0];
+ info.data_base = TG3_FW_DATA_ADDR;
+ info.data_len = TG3_FW_DATA_LEN;
+ info.data_data = NULL;
+
+ err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
+ RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
+ TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup only the RX cpu. */
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
+ break;
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
+ "to set RX CPU PC, is %08x should be %08x\n",
+ tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
+ TG3_FW_TEXT_ADDR);
+ return -ENODEV;
+ }
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ return 0;
+}
+
+#if TG3_TSO_SUPPORT != 0
+
+#define TG3_TSO_FW_RELEASE_MAJOR 0x1
+#define TG3_TSO_FW_RELASE_MINOR 0x6
+#define TG3_TSO_FW_RELEASE_FIX 0x0
+#define TG3_TSO_FW_START_ADDR 0x08000000
+#define TG3_TSO_FW_TEXT_ADDR 0x08000000
+#define TG3_TSO_FW_TEXT_LEN 0x1aa0
+#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
+#define TG3_TSO_FW_RODATA_LEN 0x60
+#define TG3_TSO_FW_DATA_ADDR 0x08001b20
+#define TG3_TSO_FW_DATA_LEN 0x30
+#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
+#define TG3_TSO_FW_SBSS_LEN 0x2c
+#define TG3_TSO_FW_BSS_ADDR 0x08001b80
+#define TG3_TSO_FW_BSS_LEN 0x894
+
+static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
+ 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
+ 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
+ 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
+ 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
+ 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
+ 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
+ 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
+ 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
+ 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
+ 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
+ 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
+ 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
+ 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
+ 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
+ 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
+ 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
+ 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
+ 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
+ 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
+ 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
+ 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
+ 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
+ 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
+ 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
+ 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
+ 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
+ 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
+ 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
+ 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
+ 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
+ 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
+ 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
+ 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
+ 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
+ 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
+ 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
+ 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
+ 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
+ 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
+ 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
+ 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
+ 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
+ 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
+ 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
+ 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
+ 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
+ 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
+ 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
+ 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
+ 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
+ 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
+ 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
+ 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
+ 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
+ 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
+ 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
+ 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
+ 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
+ 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
+ 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
+ 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
+ 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
+ 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
+ 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
+ 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
+ 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
+ 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
+ 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
+ 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
+ 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
+ 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
+ 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
+ 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
+ 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
+ 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
+ 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
+ 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
+ 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
+ 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
+ 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
+ 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
+ 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
+ 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
+ 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
+ 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
+ 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
+ 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
+ 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
+ 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
+ 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
+ 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
+ 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
+ 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
+ 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
+ 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
+ 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
+ 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
+ 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
+ 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
+ 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
+ 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
+ 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
+ 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
+ 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
+ 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
+ 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
+ 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
+ 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
+ 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
+ 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
+ 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
+ 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
+ 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
+ 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
+ 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
+ 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
+ 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
+ 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
+ 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
+ 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
+ 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
+ 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
+ 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
+ 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
+ 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
+ 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
+ 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
+ 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
+ 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
+ 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
+ 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
+ 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
+ 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
+ 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
+ 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
+ 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
+ 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
+ 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
+ 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
+ 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
+ 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
+ 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
+ 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
+ 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
+ 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
+ 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
+ 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
+ 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
+ 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
+ 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
+ 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
+ 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
+ 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
+ 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
+ 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
+ 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
+ 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
+ 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
+ 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
+ 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
+ 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
+ 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
+ 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
+ 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
+ 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
+ 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
+ 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
+ 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
+ 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
+ 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
+ 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
+ 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
+ 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
+ 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
+ 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
+ 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
+ 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
+ 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
+ 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
+ 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
+ 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
+ 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
+ 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
+ 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
+ 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
+ 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
+ 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
+ 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
+ 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
+ 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
+ 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
+ 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
+ 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
+ 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
+ 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
+ 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
+ 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
+ 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
+ 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
+ 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
+ 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
+ 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
+ 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
+ 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
+ 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
+ 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
+ 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
+ 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
+ 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
+ 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
+ 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
+ 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
+ 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
+ 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
+ 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
+ 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
+ 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
+ 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
+ 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
+ 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
+ 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
+ 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
+ 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
+ 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
+ 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
+ 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
+ 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
+ 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
+ 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
+ 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
+ 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
+ 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
+ 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
+ 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
+ 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
+ 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
+ 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
+ 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
+ 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
+ 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
+ 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
+ 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
+ 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
+ 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
+ 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
+ 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
+ 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
+ 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
+ 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
+ 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
+ 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
+ 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
+ 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
+ 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
+ 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
+ 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
+ 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
+ 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
+ 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
+ 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
+ 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
+ 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
+ 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
+ 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
+ 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
+ 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
+ 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
+ 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
+ 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
+ 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
+ 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
+ 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
+ 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
+ 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
+ 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
+ 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
+ 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
+ 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
+ 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
+ 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
+ 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
+ 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
+ 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
+ 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
+};
+
+static u32 tg3TsoFwRodata[] = {
+ 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
+ 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
+ 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
+ 0x00000000,
+};
+
+static u32 tg3TsoFwData[] = {
+ 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000,
+};
+
+/* 5705 needs a special version of the TSO firmware. */
+#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
+#define TG3_TSO5_FW_RELASE_MINOR 0x2
+#define TG3_TSO5_FW_RELEASE_FIX 0x0
+#define TG3_TSO5_FW_START_ADDR 0x00010000
+#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
+#define TG3_TSO5_FW_TEXT_LEN 0xe90
+#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
+#define TG3_TSO5_FW_RODATA_LEN 0x50
+#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
+#define TG3_TSO5_FW_DATA_LEN 0x20
+#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
+#define TG3_TSO5_FW_SBSS_LEN 0x28
+#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
+#define TG3_TSO5_FW_BSS_LEN 0x88
+
+static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
+ 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
+ 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
+ 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
+ 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
+ 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
+ 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
+ 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
+ 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
+ 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
+ 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
+ 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
+ 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
+ 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
+ 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
+ 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
+ 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
+ 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
+ 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
+ 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
+ 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
+ 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
+ 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
+ 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
+ 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
+ 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
+ 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
+ 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
+ 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
+ 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
+ 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
+ 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
+ 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
+ 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
+ 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
+ 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
+ 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
+ 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
+ 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
+ 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
+ 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
+ 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
+ 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
+ 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
+ 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
+ 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
+ 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
+ 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
+ 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
+ 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
+ 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
+ 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
+ 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
+ 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
+ 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
+ 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
+ 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
+ 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
+ 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
+ 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
+ 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
+ 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
+ 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
+ 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
+ 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
+ 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
+ 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
+ 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
+ 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
+ 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
+ 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
+ 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
+ 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
+ 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
+ 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
+ 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
+ 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
+ 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
+ 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
+ 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
+ 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
+ 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
+ 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
+ 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
+ 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
+ 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
+ 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
+ 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
+ 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
+ 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
+ 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
+ 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
+ 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
+ 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
+ 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
+ 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
+ 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
+ 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
+ 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
+ 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
+ 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
+ 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
+ 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
+ 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
+ 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
+ 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
+ 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
+ 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
+ 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
+ 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
+ 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
+ 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
+ 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
+ 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
+ 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
+ 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
+ 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
+ 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
+ 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
+ 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
+ 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
+ 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
+ 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
+ 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
+ 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
+ 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
+ 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
+ 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
+ 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
+ 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
+ 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
+ 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
+ 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
+ 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
+ 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
+ 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
+ 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
+ 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
+ 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
+ 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
+ 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
+ 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
+ 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
+ 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
+ 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
+ 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
+ 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
+ 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
+ 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
+ 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
+ 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
+ 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
+ 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
+ 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
+ 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
+ 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
+ 0x00000000, 0x00000000, 0x00000000,
+};
+
+static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
+ 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
+ 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
+ 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
+ 0x00000000, 0x00000000, 0x00000000,
+};
+
+static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
+ 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* tp->lock is held. */
+static int tg3_load_tso_firmware(struct tg3 *tp)
+{
+ struct fw_info info;
+ unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
+ int err, i;
+
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ return 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ info.text_base = TG3_TSO5_FW_TEXT_ADDR;
+ info.text_len = TG3_TSO5_FW_TEXT_LEN;
+ info.text_data = &tg3Tso5FwText[0];
+ info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
+ info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
+ info.rodata_data = &tg3Tso5FwRodata[0];
+ info.data_base = TG3_TSO5_FW_DATA_ADDR;
+ info.data_len = TG3_TSO5_FW_DATA_LEN;
+ info.data_data = &tg3Tso5FwData[0];
+ cpu_base = RX_CPU_BASE;
+ cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
+ cpu_scratch_size = (info.text_len +
+ info.rodata_len +
+ info.data_len +
+ TG3_TSO5_FW_SBSS_LEN +
+ TG3_TSO5_FW_BSS_LEN);
+ } else {
+ info.text_base = TG3_TSO_FW_TEXT_ADDR;
+ info.text_len = TG3_TSO_FW_TEXT_LEN;
+ info.text_data = &tg3TsoFwText[0];
+ info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
+ info.rodata_len = TG3_TSO_FW_RODATA_LEN;
+ info.rodata_data = &tg3TsoFwRodata[0];
+ info.data_base = TG3_TSO_FW_DATA_ADDR;
+ info.data_len = TG3_TSO_FW_DATA_LEN;
+ info.data_data = &tg3TsoFwData[0];
+ cpu_base = TX_CPU_BASE;
+ cpu_scratch_base = TX_CPU_SCRATCH_BASE;
+ cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
+ }
+
+ err = tg3_load_firmware_cpu(tp, cpu_base,
+ cpu_scratch_base, cpu_scratch_size,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup the cpu. */
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, info.text_base);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(cpu_base + CPU_PC) == info.text_base)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, info.text_base);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
+ "to set CPU PC, is %08x should be %08x\n",
+ tp->dev->name, tr32(cpu_base + CPU_PC),
+ info.text_base);
+ return -ENODEV;
+ }
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+ return 0;
+}
+
+#endif /* TG3_TSO_SUPPORT != 0 */
+
+/* tp->lock is held. */
+static void __tg3_set_mac_addr(struct tg3 *tp)
+{
+ u32 addr_high, addr_low;
+ int i;
+
+ addr_high = ((tp->dev->dev_addr[0] << 8) |
+ tp->dev->dev_addr[1]);
+ addr_low = ((tp->dev->dev_addr[2] << 24) |
+ (tp->dev->dev_addr[3] << 16) |
+ (tp->dev->dev_addr[4] << 8) |
+ (tp->dev->dev_addr[5] << 0));
+ for (i = 0; i < 4; i++) {
+ tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ for (i = 0; i < 12; i++) {
+ tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
+ }
+ }
+
+ addr_high = (tp->dev->dev_addr[0] +
+ tp->dev->dev_addr[1] +
+ tp->dev->dev_addr[2] +
+ tp->dev->dev_addr[3] +
+ tp->dev->dev_addr[4] +
+ tp->dev->dev_addr[5]) &
+ TX_BACKOFF_SEED_MASK;
+ tw32(MAC_TX_BACKOFF_SEED, addr_high);
+}
+
+static int tg3_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ spin_lock_irq(&tp->lock);
+ __tg3_set_mac_addr(tp);
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
+ dma_addr_t mapping, u32 maxlen_flags,
+ u32 nic_addr)
+{
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
+ ((u64) mapping >> 32));
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
+ ((u64) mapping & 0xffffffff));
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
+ maxlen_flags);
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
+ nic_addr);
+}
+
+static void __tg3_set_rx_mode(struct net_device *);
+
+/* tp->lock is held. */
+static int tg3_reset_hw(struct tg3 *tp)
+{
+ u32 val, rdmac_mode;
+ int i, err, limit;
+
+ tg3_disable_ints(tp);
+
+ tg3_stop_fw(tp);
+
+ tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
+
+ if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
+ err = tg3_abort_hw(tp);
+ if (err)
+ return err;
+ }
+
+ err = tg3_chip_reset(tp);
+ if (err)
+ return err;
+
+ tg3_write_sig_legacy(tp, RESET_KIND_INIT);
+
+ /* This works around an issue with Athlon chipsets on
+ * B3 tigon3 silicon. This bit has no effect on any
+ * other revision. But do not set this on PCI Express
+ * chips.
+ */
+ if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
+ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
+ val = tr32(TG3PCI_PCISTATE);
+ val |= PCISTATE_RETRY_SAME_DMA;
+ tw32(TG3PCI_PCISTATE, val);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+ /* Enable some hw fixes. */
+ val = tr32(TG3PCI_MSI_DATA);
+ val |= (1 << 26) | (1 << 28) | (1 << 29);
+ tw32(TG3PCI_MSI_DATA, val);
+ }
+
+ /* Descriptor ring init may make accesses to the
+ * NIC SRAM area to setup the TX descriptors, so we
+ * can only do this after the hardware has been
+ * successfully reset.
+ */
+ tg3_init_rings(tp);
+
+ /* This value is determined during the probe time DMA
+ * engine test, tg3_test_dma.
+ */
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+ tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
+ GRC_MODE_4X_NIC_SEND_RINGS |
+ GRC_MODE_NO_TX_PHDR_CSUM |
+ GRC_MODE_NO_RX_PHDR_CSUM);
+ tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
+ if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
+ tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
+ if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
+ tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
+
+ tw32(GRC_MODE,
+ tp->grc_mode |
+ (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
+
+ /* Setup the timer prescalar register. Clock is always 66Mhz. */
+ val = tr32(GRC_MISC_CFG);
+ val &= ~0xff;
+ val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
+ tw32(GRC_MISC_CFG, val);
+
+ /* Initialize MBUF/DESC pool. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ /* Do nothing. */
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+ tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
+ else
+ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
+ tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
+ tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
+ }
+#if TG3_TSO_SUPPORT != 0
+ else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ int fw_len;
+
+ fw_len = (TG3_TSO5_FW_TEXT_LEN +
+ TG3_TSO5_FW_RODATA_LEN +
+ TG3_TSO5_FW_DATA_LEN +
+ TG3_TSO5_FW_SBSS_LEN +
+ TG3_TSO5_FW_BSS_LEN);
+ fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
+ tw32(BUFMGR_MB_POOL_ADDR,
+ NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
+ tw32(BUFMGR_MB_POOL_SIZE,
+ NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
+ }
+#endif
+
+ if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
+ tw32(BUFMGR_MB_RDMA_LOW_WATER,
+ tp->bufmgr_config.mbuf_read_dma_low_water);
+ tw32(BUFMGR_MB_MACRX_LOW_WATER,
+ tp->bufmgr_config.mbuf_mac_rx_low_water);
+ tw32(BUFMGR_MB_HIGH_WATER,
+ tp->bufmgr_config.mbuf_high_water);
+ } else {
+ tw32(BUFMGR_MB_RDMA_LOW_WATER,
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
+ tw32(BUFMGR_MB_MACRX_LOW_WATER,
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
+ tw32(BUFMGR_MB_HIGH_WATER,
+ tp->bufmgr_config.mbuf_high_water_jumbo);
+ }
+ tw32(BUFMGR_DMA_LOW_WATER,
+ tp->bufmgr_config.dma_low_water);
+ tw32(BUFMGR_DMA_HIGH_WATER,
+ tp->bufmgr_config.dma_high_water);
+
+ tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
+ for (i = 0; i < 2000; i++) {
+ if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
+ break;
+ udelay(10);
+ }
+ if (i >= 2000) {
+ printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
+ tp->dev->name);
+ return -ENODEV;
+ }
+
+ /* Setup replenish threshold. */
+ tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
+
+ /* Initialize TG3_BDINFO's at:
+ * RCVDBDI_STD_BD: standard eth size rx ring
+ * RCVDBDI_JUMBO_BD: jumbo frame rx ring
+ * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
+ *
+ * like so:
+ * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
+ * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
+ * ring attribute flags
+ * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
+ *
+ * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
+ * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
+ *
+ * The size of each ring is fixed in the firmware, but the location is
+ * configurable.
+ */
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tp->rx_std_mapping >> 32));
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tp->rx_std_mapping & 0xffffffff));
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
+ NIC_SRAM_RX_BUFFER_DESC);
+
+ /* Don't even try to program the JUMBO/MINI buffer descriptor
+ * configs on 5705.
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
+ } else {
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
+
+ tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+ /* Setup replenish threshold. */
+ tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
+
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tp->rx_jumbo_mapping >> 32));
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tp->rx_jumbo_mapping & 0xffffffff));
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC);
+ } else {
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+ }
+
+ }
+
+ /* There is only one send ring on 5705/5750, no need to explicitly
+ * disable the others.
+ */
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ /* Clear out send RCB ring in SRAM. */
+ for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
+ tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+ }
+
+ tp->tx_prod = 0;
+ tp->tx_cons = 0;
+ tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
+ tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
+
+ tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
+ tp->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+
+ /* There is only one receive return ring on 5705/5750, no need
+ * to explicitly disable the others.
+ */
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
+ i += TG3_BDINFO_SIZE) {
+ tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+ }
+ }
+
+ tp->rx_rcb_ptr = 0;
+ tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
+
+ tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
+ tp->rx_rcb_mapping,
+ (TG3_RX_RCB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ 0);
+
+ tp->rx_std_ptr = tp->rx_pending;
+ tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
+ tp->rx_std_ptr);
+
+ tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
+ tp->rx_jumbo_pending : 0;
+ tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
+ tp->rx_jumbo_ptr);
+
+ /* Initialize MAC address and backoff seed. */
+ __tg3_set_mac_addr(tp);
+
+ /* MTU + ethernet header + FCS + optional VLAN tag */
+ tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
+
+ /* The slot time is changed by tg3_setup_phy if we
+ * run at gigabit with half duplex.
+ */
+ tw32(MAC_TX_LENGTHS,
+ (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
+
+ /* Receive rules. */
+ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
+ tw32(RCVLPC_CONFIG, 0x0181);
+
+ /* Calculate RDMAC_MODE setting early, we need it to determine
+ * the RCVLPC_STATE_ENABLE mask.
+ */
+ rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
+ RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
+ RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
+ RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
+ RDMAC_MODE_LNGREAD_ENAB);
+ if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
+ rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
+ if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
+ (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
+ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+ !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
+ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ }
+ }
+
+#if TG3_TSO_SUPPORT != 0
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ rdmac_mode |= (1 << 27);
+#endif
+
+ /* Receive/send statistics. */
+ if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+ (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
+ val = tr32(RCVLPC_STATS_ENABLE);
+ val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
+ tw32(RCVLPC_STATS_ENABLE, val);
+ } else {
+ tw32(RCVLPC_STATS_ENABLE, 0xffffff);
+ }
+ tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
+ tw32(SNDDATAI_STATSENAB, 0xffffff);
+ tw32(SNDDATAI_STATSCTRL,
+ (SNDDATAI_SCTRL_ENABLE |
+ SNDDATAI_SCTRL_FASTUPD));
+
+ /* Setup host coalescing engine. */
+ tw32(HOSTCC_MODE, 0);
+ for (i = 0; i < 2000; i++) {
+ if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
+ break;
+ udelay(10);
+ }
+
+ tw32(HOSTCC_RXCOL_TICKS, 0);
+ tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
+ tw32(HOSTCC_RXMAX_FRAMES, 1);
+ tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ tw32(HOSTCC_RXCOAL_TICK_INT, 0);
+ tw32(HOSTCC_TXCOAL_TICK_INT, 0);
+ }
+ tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+ /* set status block DMA address */
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tp->status_mapping >> 32));
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tp->status_mapping & 0xffffffff));
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ /* Status/statistics block address. See tg3_timer,
+ * the tg3_periodic_fetch_stats call there, and
+ * tg3_get_stats to see how this works for 5705/5750 chips.
+ */
+ tw32(HOSTCC_STAT_COAL_TICKS,
+ DEFAULT_STAT_COAL_TICKS);
+ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tp->stats_mapping >> 32));
+ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tp->stats_mapping & 0xffffffff));
+ tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
+ tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
+ }
+
+ tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
+
+ tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
+ tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
+
+ /* Clear statistics/status block in chip, and status block in ram. */
+ for (i = NIC_SRAM_STATS_BLK;
+ i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
+ i += sizeof(u32)) {
+ tg3_write_mem(tp, i, 0);
+ udelay(40);
+ }
+ memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+ MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
+ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
+ udelay(40);
+
+ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OUTPUT1);
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ udelay(100);
+
+ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
+ tr32(MAILBOX_INTERRUPT_0);
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
+ udelay(40);
+ }
+
+ val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
+ WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
+ WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
+ WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
+ WDMAC_MODE_LNGREAD_ENAB);
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
+ (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ /* nothing */
+ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+ !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+ val |= WDMAC_MODE_RX_ACCEL;
+ }
+ }
+
+ tw32_f(WDMAC_MODE, val);
+ udelay(40);
+
+ if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ val = tr32(TG3PCI_X_CAPS);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+ val &= ~PCIX_CAPS_BURST_MASK;
+ val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
+ val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
+ if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
+ val |= (tp->split_mode_max_reqs <<
+ PCIX_CAPS_SPLIT_SHIFT);
+ }
+ tw32(TG3PCI_X_CAPS, val);
+ }
+
+ tw32_f(RDMAC_MODE, rdmac_mode);
+ udelay(40);
+
+ tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
+ tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+ tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
+ tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
+ tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
+ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+#if TG3_TSO_SUPPORT != 0
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
+#endif
+ tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
+ tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ err = tg3_load_5701_a0_firmware_fix(tp);
+ if (err)
+ return err;
+ }
+
+#if TG3_TSO_SUPPORT != 0
+ if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ err = tg3_load_tso_firmware(tp);
+ if (err)
+ return err;
+ }
+#endif
+
+ tp->tx_mode = TX_MODE_ENABLE;
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+ udelay(100);
+
+ tp->rx_mode = RX_MODE_ENABLE;
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ if (tp->link_config.phy_is_low_power) {
+ tp->link_config.phy_is_low_power = 0;
+ tp->link_config.speed = tp->link_config.orig_speed;
+ tp->link_config.duplex = tp->link_config.orig_duplex;
+ tp->link_config.autoneg = tp->link_config.orig_autoneg;
+ }
+
+ tp->mi_mode = MAC_MI_MODE_BASE;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ }
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
+ !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
+ /* Set drive transmission level to 1.2V */
+ /* only if the signal pre-emphasis bit is not set */
+ val = tr32(MAC_SERDES_CFG);
+ val &= 0xfffff000;
+ val |= 0x880;
+ tw32(MAC_SERDES_CFG, val);
+ }
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+ tw32(MAC_SERDES_CFG, 0x616000);
+ }
+
+ /* Prevent chip from dropping frames when flow control
+ * is enabled.
+ */
+ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+ /* Use hardware link auto-negotiation */
+ tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
+ }
+
+ err = tg3_setup_phy(tp, 1);
+ if (err)
+ return err;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+ u32 tmp;
+
+ /* Clear CRC stats. */
+ if (!tg3_readphy(tp, 0x1e, &tmp)) {
+ tg3_writephy(tp, 0x1e, tmp | 0x8000);
+ tg3_readphy(tp, 0x14, &tmp);
+ }
+ }
+
+ __tg3_set_rx_mode(tp->dev);
+
+ /* Initialize receive rules. */
+ tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ limit = 8;
+ else
+ limit = 16;
+ if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
+ limit -= 4;
+ switch (limit) {
+ case 16:
+ tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
+ case 15:
+ tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
+ case 14:
+ tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
+ case 13:
+ tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
+ case 12:
+ tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
+ case 11:
+ tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
+ case 10:
+ tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
+ case 9:
+ tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
+ case 8:
+ tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
+ case 7:
+ tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
+ case 6:
+ tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
+ case 5:
+ tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
+ case 4:
+ /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
+ case 3:
+ /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
+ case 2:
+ case 1:
+
+ default:
+ break;
+ };
+
+ tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
+
+ if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
+ tg3_enable_ints(tp);
+
+ return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing. Invoked with tp->lock held.
+ */
+static int tg3_init_hw(struct tg3 *tp)
+{
+ int err;
+
+ /* Force the chip into D0. */
+ err = tg3_set_power_state(tp, 0);
+ if (err)
+ goto out;
+
+ tg3_switch_clocks(tp);
+
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ err = tg3_reset_hw(tp);
+
+out:
+ return err;
+}
+
+#define TG3_STAT_ADD32(PSTAT, REG) \
+do { u32 __val = tr32(REG); \
+ (PSTAT)->low += __val; \
+ if ((PSTAT)->low < __val) \
+ (PSTAT)->high += 1; \
+} while (0)
+
+static void tg3_periodic_fetch_stats(struct tg3 *tp)
+{
+ struct tg3_hw_stats *sp = tp->hw_stats;
+
+ if (!netif_carrier_ok(tp->dev))
+ return;
+
+ TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
+ TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
+ TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
+ TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
+ TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
+ TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
+ TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
+
+ TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
+ TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
+ TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
+ TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
+ TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
+ TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
+ TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
+ TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
+ TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
+ TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
+ TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
+}
+
+static void tg3_timer(unsigned long __opaque)
+{
+ struct tg3 *tp = (struct tg3 *) __opaque;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ spin_lock(&tp->tx_lock);
+
+ /* All of this garbage is because when using non-tagged
+ * IRQ status the mailbox/status_block protocol the chip
+ * uses with the cpu is race prone.
+ */
+ if (tp->hw_status->status & SD_STATUS_UPDATED) {
+ tw32(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+ } else {
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
+ (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+ }
+
+ if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ schedule_work(&tp->reset_task);
+ return;
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ tg3_periodic_fetch_stats(tp);
+
+ /* This part only runs once per second. */
+ if (!--tp->timer_counter) {
+ if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
+ u32 mac_stat;
+ int phy_event;
+
+ mac_stat = tr32(MAC_STATUS);
+
+ phy_event = 0;
+ if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
+ if (mac_stat & MAC_STATUS_MI_INTERRUPT)
+ phy_event = 1;
+ } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
+ phy_event = 1;
+
+ if (phy_event)
+ tg3_setup_phy(tp, 0);
+ } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
+ u32 mac_stat = tr32(MAC_STATUS);
+ int need_setup = 0;
+
+ if (netif_carrier_ok(tp->dev) &&
+ (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
+ need_setup = 1;
+ }
+ if (! netif_carrier_ok(tp->dev) &&
+ (mac_stat & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET))) {
+ need_setup = 1;
+ }
+ if (need_setup) {
+ tw32_f(MAC_MODE,
+ (tp->mac_mode &
+ ~MAC_MODE_PORT_MODE_MASK));
+ udelay(40);
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+ tg3_setup_phy(tp, 0);
+ }
+ }
+
+ tp->timer_counter = tp->timer_multiplier;
+ }
+
+ /* Heartbeat is only sent once every 120 seconds. */
+ if (!--tp->asf_counter) {
+ if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ u32 val;
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
+ val = tr32(GRC_RX_CPU_EVENT);
+ val |= (1 << 14);
+ tw32(GRC_RX_CPU_EVENT, val);
+ }
+ tp->asf_counter = tp->asf_multiplier;
+ }
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tg3_disable_ints(tp);
+ tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ /* The placement of this call is tied
+ * to the setup and use of Host TX descriptors.
+ */
+ err = tg3_alloc_consistent(tp);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, tg3_interrupt,
+ SA_SHIRQ, dev->name, dev);
+
+ if (err) {
+ tg3_free_consistent(tp);
+ return err;
+ }
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ err = tg3_init_hw(tp);
+ if (err) {
+ tg3_halt(tp);
+ tg3_free_rings(tp);
+ } else {
+ tp->timer_offset = HZ / 10;
+ tp->timer_counter = tp->timer_multiplier = 10;
+ tp->asf_counter = tp->asf_multiplier = (10 * 120);
+
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + tp->timer_offset;
+ tp->timer.data = (unsigned long) tp;
+ tp->timer.function = tg3_timer;
+ add_timer(&tp->timer);
+
+ tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+ }
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ if (err) {
+ free_irq(dev->irq, dev);
+ tg3_free_consistent(tp);
+ return err;
+ }
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tg3_enable_ints(tp);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+#if 0
+/*static*/ void tg3_dump_state(struct tg3 *tp)
+{
+ u32 val32, val32_2, val32_3, val32_4, val32_5;
+ u16 val16;
+ int i;
+
+ pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
+ printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
+ val16, val32);
+
+ /* MAC block */
+ printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
+ tr32(MAC_MODE), tr32(MAC_STATUS));
+ printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
+ tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
+ printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
+ tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
+ printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
+ tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
+
+ /* Send data initiator control block */
+ printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
+ tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
+ printk(" SNDDATAI_STATSCTRL[%08x]\n",
+ tr32(SNDDATAI_STATSCTRL));
+
+ /* Send data completion control block */
+ printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
+
+ /* Send BD ring selector block */
+ printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
+ tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
+
+ /* Send BD initiator control block */
+ printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
+ tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
+
+ /* Send BD completion control block */
+ printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
+
+ /* Receive list placement control block */
+ printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
+ tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
+ printk(" RCVLPC_STATSCTRL[%08x]\n",
+ tr32(RCVLPC_STATSCTRL));
+
+ /* Receive data and receive BD initiator control block */
+ printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
+ tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
+
+ /* Receive data completion control block */
+ printk("DEBUG: RCVDCC_MODE[%08x]\n",
+ tr32(RCVDCC_MODE));
+
+ /* Receive BD initiator control block */
+ printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
+ tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
+
+ /* Receive BD completion control block */
+ printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
+ tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
+
+ /* Receive list selector control block */
+ printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
+ tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
+
+ /* Mbuf cluster free block */
+ printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
+ tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
+
+ /* Host coalescing control block */
+ printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
+ tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
+ printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
+ tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
+ tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
+ printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
+ tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
+ tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
+ printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
+ tr32(HOSTCC_STATS_BLK_NIC_ADDR));
+ printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
+ tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
+
+ /* Memory arbiter control block */
+ printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
+ tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
+
+ /* Buffer manager control block */
+ printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
+ tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
+ printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
+ tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
+ printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
+ "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
+ tr32(BUFMGR_DMA_DESC_POOL_ADDR),
+ tr32(BUFMGR_DMA_DESC_POOL_SIZE));
+
+ /* Read DMA control block */
+ printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
+ tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
+
+ /* Write DMA control block */
+ printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
+ tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
+
+ /* DMA completion block */
+ printk("DEBUG: DMAC_MODE[%08x]\n",
+ tr32(DMAC_MODE));
+
+ /* GRC block */
+ printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
+ tr32(GRC_MODE), tr32(GRC_MISC_CFG));
+ printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
+ tr32(GRC_LOCAL_CTRL));
+
+ /* TG3_BDINFOs */
+ printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
+ tr32(RCVDBDI_JUMBO_BD + 0x0),
+ tr32(RCVDBDI_JUMBO_BD + 0x4),
+ tr32(RCVDBDI_JUMBO_BD + 0x8),
+ tr32(RCVDBDI_JUMBO_BD + 0xc));
+ printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
+ tr32(RCVDBDI_STD_BD + 0x0),
+ tr32(RCVDBDI_STD_BD + 0x4),
+ tr32(RCVDBDI_STD_BD + 0x8),
+ tr32(RCVDBDI_STD_BD + 0xc));
+ printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
+ tr32(RCVDBDI_MINI_BD + 0x0),
+ tr32(RCVDBDI_MINI_BD + 0x4),
+ tr32(RCVDBDI_MINI_BD + 0x8),
+ tr32(RCVDBDI_MINI_BD + 0xc));
+
+ tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
+ tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
+ tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
+ tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
+ printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
+ val32, val32_2, val32_3, val32_4);
+
+ tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
+ tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
+ tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
+ tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
+ printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
+ val32, val32_2, val32_3, val32_4);
+
+ tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
+ tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
+ tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
+ tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
+ tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
+ printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
+ val32, val32_2, val32_3, val32_4, val32_5);
+
+ /* SW status block */
+ printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+ tp->hw_status->status,
+ tp->hw_status->status_tag,
+ tp->hw_status->rx_jumbo_consumer,
+ tp->hw_status->rx_consumer,
+ tp->hw_status->rx_mini_consumer,
+ tp->hw_status->idx[0].rx_producer,
+ tp->hw_status->idx[0].tx_consumer);
+
+ /* SW statistics block */
+ printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
+ ((u32 *)tp->hw_stats)[0],
+ ((u32 *)tp->hw_stats)[1],
+ ((u32 *)tp->hw_stats)[2],
+ ((u32 *)tp->hw_stats)[3]);
+
+ /* Mailboxes */
+ printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
+ tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
+ tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
+ tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
+ tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
+
+ /* NIC side send descriptors. */
+ for (i = 0; i < 6; i++) {
+ unsigned long txd;
+
+ txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
+ + (i * sizeof(struct tg3_tx_buffer_desc));
+ printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
+ i,
+ readl(txd + 0x0), readl(txd + 0x4),
+ readl(txd + 0x8), readl(txd + 0xc));
+ }
+
+ /* NIC side RX descriptors. */
+ for (i = 0; i < 6; i++) {
+ unsigned long rxd;
+
+ rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
+ + (i * sizeof(struct tg3_rx_buffer_desc));
+ printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
+ i,
+ readl(rxd + 0x0), readl(rxd + 0x4),
+ readl(rxd + 0x8), readl(rxd + 0xc));
+ rxd += (4 * sizeof(u32));
+ printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
+ i,
+ readl(rxd + 0x0), readl(rxd + 0x4),
+ readl(rxd + 0x8), readl(rxd + 0xc));
+ }
+
+ for (i = 0; i < 6; i++) {
+ unsigned long rxd;
+
+ rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
+ + (i * sizeof(struct tg3_rx_buffer_desc));
+ printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
+ i,
+ readl(rxd + 0x0), readl(rxd + 0x4),
+ readl(rxd + 0x8), readl(rxd + 0xc));
+ rxd += (4 * sizeof(u32));
+ printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
+ i,
+ readl(rxd + 0x0), readl(rxd + 0x4),
+ readl(rxd + 0x8), readl(rxd + 0xc));
+ }
+}
+#endif
+
+static struct net_device_stats *tg3_get_stats(struct net_device *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
+
+static int tg3_close(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ del_timer_sync(&tp->timer);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+#if 0
+ tg3_dump_state(tp);
+#endif
+
+ tg3_disable_ints(tp);
+
+ tg3_halt(tp);
+ tg3_free_rings(tp);
+ tp->tg3_flags &=
+ ~(TG3_FLAG_INIT_COMPLETE |
+ TG3_FLAG_GOT_SERDES_FLOWCTL);
+ netif_carrier_off(tp->dev);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ free_irq(dev->irq, dev);
+
+ memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
+ sizeof(tp->net_stats_prev));
+ memcpy(&tp->estats_prev, tg3_get_estats(tp),
+ sizeof(tp->estats_prev));
+
+ tg3_free_consistent(tp);
+
+ return 0;
+}
+
+static inline unsigned long get_stat64(tg3_stat64_t *val)
+{
+ unsigned long ret;
+
+#if (BITS_PER_LONG == 32)
+ ret = val->low;
+#else
+ ret = ((u64)val->high << 32) | ((u64)val->low);
+#endif
+ return ret;
+}
+
+static unsigned long calc_crc_errors(struct tg3 *tp)
+{
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (!tg3_readphy(tp, 0x1e, &val)) {
+ tg3_writephy(tp, 0x1e, val | 0x8000);
+ tg3_readphy(tp, 0x14, &val);
+ } else
+ val = 0;
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ tp->phy_crc_errors += val;
+
+ return tp->phy_crc_errors;
+ }
+
+ return get_stat64(&hw_stats->rx_fcs_errors);
+}
+
+#define ESTAT_ADD(member) \
+ estats->member = old_estats->member + \
+ get_stat64(&hw_stats->member)
+
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+{
+ struct tg3_ethtool_stats *estats = &tp->estats;
+ struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!hw_stats)
+ return old_estats;
+
+ ESTAT_ADD(rx_octets);
+ ESTAT_ADD(rx_fragments);
+ ESTAT_ADD(rx_ucast_packets);
+ ESTAT_ADD(rx_mcast_packets);
+ ESTAT_ADD(rx_bcast_packets);
+ ESTAT_ADD(rx_fcs_errors);
+ ESTAT_ADD(rx_align_errors);
+ ESTAT_ADD(rx_xon_pause_rcvd);
+ ESTAT_ADD(rx_xoff_pause_rcvd);
+ ESTAT_ADD(rx_mac_ctrl_rcvd);
+ ESTAT_ADD(rx_xoff_entered);
+ ESTAT_ADD(rx_frame_too_long_errors);
+ ESTAT_ADD(rx_jabbers);
+ ESTAT_ADD(rx_undersize_packets);
+ ESTAT_ADD(rx_in_length_errors);
+ ESTAT_ADD(rx_out_length_errors);
+ ESTAT_ADD(rx_64_or_less_octet_packets);
+ ESTAT_ADD(rx_65_to_127_octet_packets);
+ ESTAT_ADD(rx_128_to_255_octet_packets);
+ ESTAT_ADD(rx_256_to_511_octet_packets);
+ ESTAT_ADD(rx_512_to_1023_octet_packets);
+ ESTAT_ADD(rx_1024_to_1522_octet_packets);
+ ESTAT_ADD(rx_1523_to_2047_octet_packets);
+ ESTAT_ADD(rx_2048_to_4095_octet_packets);
+ ESTAT_ADD(rx_4096_to_8191_octet_packets);
+ ESTAT_ADD(rx_8192_to_9022_octet_packets);
+
+ ESTAT_ADD(tx_octets);
+ ESTAT_ADD(tx_collisions);
+ ESTAT_ADD(tx_xon_sent);
+ ESTAT_ADD(tx_xoff_sent);
+ ESTAT_ADD(tx_flow_control);
+ ESTAT_ADD(tx_mac_errors);
+ ESTAT_ADD(tx_single_collisions);
+ ESTAT_ADD(tx_mult_collisions);
+ ESTAT_ADD(tx_deferred);
+ ESTAT_ADD(tx_excessive_collisions);
+ ESTAT_ADD(tx_late_collisions);
+ ESTAT_ADD(tx_collide_2times);
+ ESTAT_ADD(tx_collide_3times);
+ ESTAT_ADD(tx_collide_4times);
+ ESTAT_ADD(tx_collide_5times);
+ ESTAT_ADD(tx_collide_6times);
+ ESTAT_ADD(tx_collide_7times);
+ ESTAT_ADD(tx_collide_8times);
+ ESTAT_ADD(tx_collide_9times);
+ ESTAT_ADD(tx_collide_10times);
+ ESTAT_ADD(tx_collide_11times);
+ ESTAT_ADD(tx_collide_12times);
+ ESTAT_ADD(tx_collide_13times);
+ ESTAT_ADD(tx_collide_14times);
+ ESTAT_ADD(tx_collide_15times);
+ ESTAT_ADD(tx_ucast_packets);
+ ESTAT_ADD(tx_mcast_packets);
+ ESTAT_ADD(tx_bcast_packets);
+ ESTAT_ADD(tx_carrier_sense_errors);
+ ESTAT_ADD(tx_discards);
+ ESTAT_ADD(tx_errors);
+
+ ESTAT_ADD(dma_writeq_full);
+ ESTAT_ADD(dma_write_prioq_full);
+ ESTAT_ADD(rxbds_empty);
+ ESTAT_ADD(rx_discards);
+ ESTAT_ADD(rx_errors);
+ ESTAT_ADD(rx_threshold_hit);
+
+ ESTAT_ADD(dma_readq_full);
+ ESTAT_ADD(dma_read_prioq_full);
+ ESTAT_ADD(tx_comp_queue_full);
+
+ ESTAT_ADD(ring_set_send_prod_index);
+ ESTAT_ADD(ring_status_update);
+ ESTAT_ADD(nic_irqs);
+ ESTAT_ADD(nic_avoided_irqs);
+ ESTAT_ADD(nic_tx_threshold_hit);
+
+ return estats;
+}
+
+static struct net_device_stats *tg3_get_stats(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct net_device_stats *stats = &tp->net_stats;
+ struct net_device_stats *old_stats = &tp->net_stats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!hw_stats)
+ return old_stats;
+
+ stats->rx_packets = old_stats->rx_packets +
+ get_stat64(&hw_stats->rx_ucast_packets) +
+ get_stat64(&hw_stats->rx_mcast_packets) +
+ get_stat64(&hw_stats->rx_bcast_packets);
+
+ stats->tx_packets = old_stats->tx_packets +
+ get_stat64(&hw_stats->tx_ucast_packets) +
+ get_stat64(&hw_stats->tx_mcast_packets) +
+ get_stat64(&hw_stats->tx_bcast_packets);
+
+ stats->rx_bytes = old_stats->rx_bytes +
+ get_stat64(&hw_stats->rx_octets);
+ stats->tx_bytes = old_stats->tx_bytes +
+ get_stat64(&hw_stats->tx_octets);
+
+ stats->rx_errors = old_stats->rx_errors +
+ get_stat64(&hw_stats->rx_errors) +
+ get_stat64(&hw_stats->rx_discards);
+ stats->tx_errors = old_stats->tx_errors +
+ get_stat64(&hw_stats->tx_errors) +
+ get_stat64(&hw_stats->tx_mac_errors) +
+ get_stat64(&hw_stats->tx_carrier_sense_errors) +
+ get_stat64(&hw_stats->tx_discards);
+
+ stats->multicast = old_stats->multicast +
+ get_stat64(&hw_stats->rx_mcast_packets);
+ stats->collisions = old_stats->collisions +
+ get_stat64(&hw_stats->tx_collisions);
+
+ stats->rx_length_errors = old_stats->rx_length_errors +
+ get_stat64(&hw_stats->rx_frame_too_long_errors) +
+ get_stat64(&hw_stats->rx_undersize_packets);
+
+ stats->rx_over_errors = old_stats->rx_over_errors +
+ get_stat64(&hw_stats->rxbds_empty);
+ stats->rx_frame_errors = old_stats->rx_frame_errors +
+ get_stat64(&hw_stats->rx_align_errors);
+ stats->tx_aborted_errors = old_stats->tx_aborted_errors +
+ get_stat64(&hw_stats->tx_discards);
+ stats->tx_carrier_errors = old_stats->tx_carrier_errors +
+ get_stat64(&hw_stats->tx_carrier_sense_errors);
+
+ stats->rx_crc_errors = old_stats->rx_crc_errors +
+ calc_crc_errors(tp);
+
+ return stats;
+}
+
+static inline u32 calc_crc(unsigned char *buf, int len)
+{
+ u32 reg;
+ u32 tmp;
+ int j, k;
+
+ reg = 0xffffffff;
+
+ for (j = 0; j < len; j++) {
+ reg ^= buf[j];
+
+ for (k = 0; k < 8; k++) {
+ tmp = reg & 0x01;
+
+ reg >>= 1;
+
+ if (tmp) {
+ reg ^= 0xedb88320;
+ }
+ }
+ }
+
+ return ~reg;
+}
+
+static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
+{
+ /* accept or reject all multicast frames */
+ tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
+}
+
+static void __tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 rx_mode;
+
+ rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
+ RX_MODE_KEEP_VLAN_TAG);
+
+ /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
+ * flag clear.
+ */
+#if TG3_VLAN_TAG_USED
+ if (!tp->vlgrp &&
+ !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ rx_mode |= RX_MODE_KEEP_VLAN_TAG;
+#else
+ /* By definition, VLAN is disabled always in this
+ * case.
+ */
+ if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ rx_mode |= RX_MODE_KEEP_VLAN_TAG;
+#endif
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode. */
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast. */
+ tg3_set_multi (tp, 1);
+ } else if (dev->mc_count < 1) {
+ /* Reject all multicast. */
+ tg3_set_multi (tp, 0);
+ } else {
+ /* Accept one or more multicast(s). */
+ struct dev_mc_list *mclist;
+ unsigned int i;
+ u32 mc_filter[4] = { 0, };
+ u32 regidx;
+ u32 bit;
+ u32 crc;
+
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+
+ crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
+ bit = ~crc & 0x7f;
+ regidx = (bit & 0x60) >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ tw32(MAC_HASH_REG_0, mc_filter[0]);
+ tw32(MAC_HASH_REG_1, mc_filter[1]);
+ tw32(MAC_HASH_REG_2, mc_filter[2]);
+ tw32(MAC_HASH_REG_3, mc_filter[3]);
+ }
+
+ if (rx_mode != tp->rx_mode) {
+ tp->rx_mode = rx_mode;
+ tw32_f(MAC_RX_MODE, rx_mode);
+ udelay(10);
+ }
+}
+
+static void tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+ __tg3_set_rx_mode(dev);
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+}
+
+#define TG3_REGDUMP_LEN (32 * 1024)
+
+static int tg3_get_regs_len(struct net_device *dev)
+{
+ return TG3_REGDUMP_LEN;
+}
+
+static void tg3_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *_p)
+{
+ u32 *p = _p;
+ struct tg3 *tp = netdev_priv(dev);
+ u8 *orig_p = _p;
+ int i;
+
+ regs->version = 0;
+
+ memset(p, 0, TG3_REGDUMP_LEN);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+#define __GET_REG32(reg) (*(p)++ = tr32(reg))
+#define GET_REG32_LOOP(base,len) \
+do { p = (u32 *)(orig_p + (base)); \
+ for (i = 0; i < len; i += 4) \
+ __GET_REG32((base) + i); \
+} while (0)
+#define GET_REG32_1(reg) \
+do { p = (u32 *)(orig_p + (reg)); \
+ __GET_REG32((reg)); \
+} while (0)
+
+ GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
+ GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
+ GET_REG32_LOOP(MAC_MODE, 0x4f0);
+ GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
+ GET_REG32_1(SNDDATAC_MODE);
+ GET_REG32_LOOP(SNDBDS_MODE, 0x80);
+ GET_REG32_LOOP(SNDBDI_MODE, 0x48);
+ GET_REG32_1(SNDBDC_MODE);
+ GET_REG32_LOOP(RCVLPC_MODE, 0x20);
+ GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
+ GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
+ GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
+ GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
+ GET_REG32_1(RCVDCC_MODE);
+ GET_REG32_LOOP(RCVBDI_MODE, 0x20);
+ GET_REG32_LOOP(RCVCC_MODE, 0x14);
+ GET_REG32_LOOP(RCVLSC_MODE, 0x08);
+ GET_REG32_1(MBFREE_MODE);
+ GET_REG32_LOOP(HOSTCC_MODE, 0x100);
+ GET_REG32_LOOP(MEMARB_MODE, 0x10);
+ GET_REG32_LOOP(BUFMGR_MODE, 0x58);
+ GET_REG32_LOOP(RDMAC_MODE, 0x08);
+ GET_REG32_LOOP(WDMAC_MODE, 0x08);
+ GET_REG32_LOOP(RX_CPU_BASE, 0x280);
+ GET_REG32_LOOP(TX_CPU_BASE, 0x280);
+ GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
+ GET_REG32_LOOP(FTQ_RESET, 0x120);
+ GET_REG32_LOOP(MSGINT_MODE, 0x0c);
+ GET_REG32_1(DMAC_MODE);
+ GET_REG32_LOOP(GRC_MODE, 0x4c);
+ if (tp->tg3_flags & TG3_FLAG_NVRAM)
+ GET_REG32_LOOP(NVRAM_CMD, 0x24);
+
+#undef __GET_REG32
+#undef GET_REG32_LOOP
+#undef GET_REG32_1
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+}
+
+static int tg3_get_eeprom_len(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ return tp->nvram_size;
+}
+
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
+
+static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int ret;
+ u8 *pd;
+ u32 i, offset, len, val, b_offset, b_count;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ eeprom->magic = TG3_EEPROM_MAGIC;
+
+ if (offset & 3) {
+ /* adjustments to start on required 4 byte boundary */
+ b_offset = offset & 3;
+ b_count = 4 - b_offset;
+ if (b_count > len) {
+ /* i.e. offset=1 len=2 */
+ b_count = len;
+ }
+ ret = tg3_nvram_read(tp, offset-b_offset, &val);
+ if (ret)
+ return ret;
+ val = cpu_to_le32(val);
+ memcpy(data, ((char*)&val) + b_offset, b_count);
+ len -= b_count;
+ offset += b_count;
+ eeprom->len += b_count;
+ }
+
+ /* read bytes upto the last 4 byte boundary */
+ pd = &data[eeprom->len];
+ for (i = 0; i < (len - (len & 3)); i += 4) {
+ ret = tg3_nvram_read(tp, offset + i, &val);
+ if (ret) {
+ eeprom->len += i;
+ return ret;
+ }
+ val = cpu_to_le32(val);
+ memcpy(pd + i, &val, 4);
+ }
+ eeprom->len += i;
+
+ if (len & 3) {
+ /* read last bytes not ending on 4 byte boundary */
+ pd = &data[eeprom->len];
+ b_count = len & 3;
+ b_offset = offset + len - b_count;
+ ret = tg3_nvram_read(tp, b_offset, &val);
+ if (ret)
+ return ret;
+ val = cpu_to_le32(val);
+ memcpy(pd, ((char*)&val), b_count);
+ eeprom->len += b_count;
+ }
+ return 0;
+}
+
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+
+static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int ret;
+ u32 offset, len, b_offset, odd_len, start, end;
+ u8 *buf;
+
+ if (eeprom->magic != TG3_EEPROM_MAGIC)
+ return -EINVAL;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+
+ if ((b_offset = (offset & 3))) {
+ /* adjustments to start on required 4 byte boundary */
+ ret = tg3_nvram_read(tp, offset-b_offset, &start);
+ if (ret)
+ return ret;
+ start = cpu_to_le32(start);
+ len += b_offset;
+ offset &= ~3;
+ }
+
+ odd_len = 0;
+ if ((len & 3) && ((len > 4) || (b_offset == 0))) {
+ /* adjustments to end on required 4 byte boundary */
+ odd_len = 1;
+ len = (len + 3) & ~3;
+ ret = tg3_nvram_read(tp, offset+len-4, &end);
+ if (ret)
+ return ret;
+ end = cpu_to_le32(end);
+ }
+
+ buf = data;
+ if (b_offset || odd_len) {
+ buf = kmalloc(len, GFP_KERNEL);
+ if (buf == 0)
+ return -ENOMEM;
+ if (b_offset)
+ memcpy(buf, &start, 4);
+ if (odd_len)
+ memcpy(buf+len-4, &end, 4);
+ memcpy(buf + b_offset, data, eeprom->len);
+ }
+
+ ret = tg3_nvram_write_block(tp, offset, len, buf);
+
+ if (buf != data)
+ kfree(buf);
+
+ return ret;
+}
+
+static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ cmd->supported = (SUPPORTED_Autoneg);
+
+ if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+ cmd->supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
+ cmd->supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_MII);
+ else
+ cmd->supported |= SUPPORTED_FIBRE;
+
+ cmd->advertising = tp->link_config.advertising;
+ if (netif_running(dev)) {
+ cmd->speed = tp->link_config.active_speed;
+ cmd->duplex = tp->link_config.active_duplex;
+ }
+ cmd->port = 0;
+ cmd->phy_address = PHY_ADDR;
+ cmd->transceiver = 0;
+ cmd->autoneg = tp->link_config.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ /* These are the only valid advertisement bits allowed. */
+ if (cmd->autoneg == AUTONEG_ENABLE &&
+ (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE)))
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tp->link_config.autoneg = cmd->autoneg;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ tp->link_config.advertising = cmd->advertising;
+ tp->link_config.speed = SPEED_INVALID;
+ tp->link_config.duplex = DUPLEX_INVALID;
+ } else {
+ tp->link_config.advertising = 0;
+ tp->link_config.speed = cmd->speed;
+ tp->link_config.duplex = cmd->duplex;
+ }
+
+ if (netif_running(dev))
+ tg3_setup_phy(tp, 1);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->bus_info, pci_name(tp->pdev));
+}
+
+static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+ if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
+ wol->wolopts = WAKE_MAGIC;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+ if ((wol->wolopts & WAKE_MAGIC) &&
+ tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
+ !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
+ return -EINVAL;
+
+ spin_lock_irq(&tp->lock);
+ if (wol->wolopts & WAKE_MAGIC)
+ tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static u32 tg3_get_msglevel(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ return tp->msg_enable;
+}
+
+static void tg3_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ tp->msg_enable = value;
+}
+
+#if TG3_TSO_SUPPORT != 0
+static int tg3_set_tso(struct net_device *dev, u32 value)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
+ if (value)
+ return -EINVAL;
+ return 0;
+ }
+ return ethtool_op_set_tso(dev, value);
+}
+#endif
+
+static int tg3_nway_reset(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 bmcr;
+ int r;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ spin_lock_irq(&tp->lock);
+ r = -EINVAL;
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
+ (bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
+ r = 0;
+ }
+ spin_unlock_irq(&tp->lock);
+
+ return r;
+}
+
+static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
+
+ ering->rx_pending = tp->rx_pending;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+ ering->tx_pending = tp->tx_pending;
+}
+
+static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
+ (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
+ (ering->tx_pending > TG3_TX_RING_SIZE - 1))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ tg3_netif_stop(tp);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tp->rx_pending = ering->rx_pending;
+
+ if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
+ tp->rx_pending > 63)
+ tp->rx_pending = 63;
+ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+ tp->tx_pending = ering->tx_pending;
+
+ if (netif_running(dev)) {
+ tg3_halt(tp);
+ tg3_init_hw(tp);
+ tg3_netif_start(tp);
+ }
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
+ epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
+}
+
+static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ tg3_netif_stop(tp);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+ if (epause->autoneg)
+ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
+ if (epause->rx_pause)
+ tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
+ if (epause->tx_pause)
+ tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
+
+ if (netif_running(dev)) {
+ tg3_halt(tp);
+ tg3_init_hw(tp);
+ tg3_netif_start(tp);
+ }
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static u32 tg3_get_rx_csum(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
+}
+
+static int tg3_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
+ if (data != 0)
+ return -EINVAL;
+ return 0;
+ }
+
+ spin_lock_irq(&tp->lock);
+ if (data)
+ tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static int tg3_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
+ if (data != 0)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ return 0;
+}
+
+static int tg3_get_stats_count (struct net_device *dev)
+{
+ return TG3_NUM_STATS;
+}
+
+static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ default:
+ WARN_ON(1); /* we need a WARN() */
+ break;
+ }
+}
+
+static void tg3_get_ethtool_stats (struct net_device *dev,
+ struct ethtool_stats *estats, u64 *tmp_stats)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+}
+
+static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = PHY_ADDR;
+
+ /* fallthru */
+ case SIOCGMIIREG: {
+ u32 mii_regval;
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+ break; /* We have no PHY */
+
+ spin_lock_irq(&tp->lock);
+ err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+ spin_unlock_irq(&tp->lock);
+
+ data->val_out = mii_regval;
+
+ return err;
+ }
+
+ case SIOCSMIIREG:
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+ break; /* We have no PHY */
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ spin_lock_irq(&tp->lock);
+ err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_irq(&tp->lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+#if TG3_VLAN_TAG_USED
+static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tp->vlgrp = grp;
+
+ /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
+ __tg3_set_rx_mode(dev);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+}
+
+static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+ if (tp->vlgrp)
+ tp->vlgrp->vlan_devices[vid] = NULL;
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+}
+#endif
+
+static struct ethtool_ops tg3_ethtool_ops = {
+ .get_settings = tg3_get_settings,
+ .set_settings = tg3_set_settings,
+ .get_drvinfo = tg3_get_drvinfo,
+ .get_regs_len = tg3_get_regs_len,
+ .get_regs = tg3_get_regs,
+ .get_wol = tg3_get_wol,
+ .set_wol = tg3_set_wol,
+ .get_msglevel = tg3_get_msglevel,
+ .set_msglevel = tg3_set_msglevel,
+ .nway_reset = tg3_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = tg3_get_eeprom_len,
+ .get_eeprom = tg3_get_eeprom,
+ .set_eeprom = tg3_set_eeprom,
+ .get_ringparam = tg3_get_ringparam,
+ .set_ringparam = tg3_set_ringparam,
+ .get_pauseparam = tg3_get_pauseparam,
+ .set_pauseparam = tg3_set_pauseparam,
+ .get_rx_csum = tg3_get_rx_csum,
+ .set_rx_csum = tg3_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = tg3_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#if TG3_TSO_SUPPORT != 0
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = tg3_set_tso,
+#endif
+ .get_strings = tg3_get_strings,
+ .get_stats_count = tg3_get_stats_count,
+ .get_ethtool_stats = tg3_get_ethtool_stats,
+};
+
+static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
+{
+ u32 cursize, val;
+
+ tp->nvram_size = EEPROM_CHIP_SIZE;
+
+ if (tg3_nvram_read(tp, 0, &val) != 0)
+ return;
+
+ if (swab32(val) != TG3_EEPROM_MAGIC)
+ return;
+
+ /*
+ * Size the chip by reading offsets at increasing powers of two.
+ * When we encounter our validation signature, we know the addressing
+ * has wrapped around, and thus have our chip size.
+ */
+ cursize = 0x800;
+
+ while (cursize < tp->nvram_size) {
+ if (tg3_nvram_read(tp, cursize, &val) != 0)
+ return;
+
+ if (swab32(val) == TG3_EEPROM_MAGIC)
+ break;
+
+ cursize <<= 1;
+ }
+
+ tp->nvram_size = cursize;
+}
+
+static void __devinit tg3_get_nvram_size(struct tg3 *tp)
+{
+ u32 val;
+
+ if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
+ if (val != 0) {
+ tp->nvram_size = (val >> 16) * 1024;
+ return;
+ }
+ }
+ tp->nvram_size = 0x20000;
+}
+
+static void __devinit tg3_get_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+ if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ }
+ else {
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
+ case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ break;
+ case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_ATMEL_EEPROM:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ break;
+ case FLASH_VENDOR_ST:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ break;
+ case FLASH_VENDOR_SAIFUN:
+ tp->nvram_jedecnum = JEDEC_SAIFUN;
+ tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_SST_SMALL:
+ case FLASH_VENDOR_SST_LARGE:
+ tp->nvram_jedecnum = JEDEC_SST;
+ tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
+ break;
+ }
+ }
+ else {
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ }
+}
+
+/* Chips other than 5700/5701 use the NVRAM for fetching info. */
+static void __devinit tg3_nvram_init(struct tg3 *tp)
+{
+ int j;
+
+ if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
+ return;
+
+ tw32_f(GRC_EEPROM_ADDR,
+ (EEPROM_ADDR_FSM_RESET |
+ (EEPROM_DEFAULT_CLOCK_PERIOD <<
+ EEPROM_ADDR_CLKPERD_SHIFT)));
+
+ /* XXX schedule_timeout() ... */
+ for (j = 0; j < 100; j++)
+ udelay(10);
+
+ /* Enable seeprom accesses. */
+ tw32_f(GRC_LOCAL_CTRL,
+ tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
+ udelay(100);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ tp->tg3_flags |= TG3_FLAG_NVRAM;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+ }
+
+ tg3_get_nvram_info(tp);
+ tg3_get_nvram_size(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+ }
+
+ } else {
+ tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
+
+ tg3_get_eeprom_size(tp);
+ }
+}
+
+static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
+ u32 offset, u32 *val)
+{
+ u32 tmp;
+ int i;
+
+ if (offset > EEPROM_ADDR_ADDR_MASK ||
+ (offset % 4) != 0)
+ return -EINVAL;
+
+ tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
+ EEPROM_ADDR_DEVID_MASK |
+ EEPROM_ADDR_READ);
+ tw32(GRC_EEPROM_ADDR,
+ tmp |
+ (0 << EEPROM_ADDR_DEVID_SHIFT) |
+ ((offset << EEPROM_ADDR_ADDR_SHIFT) &
+ EEPROM_ADDR_ADDR_MASK) |
+ EEPROM_ADDR_READ | EEPROM_ADDR_START);
+
+ for (i = 0; i < 10000; i++) {
+ tmp = tr32(GRC_EEPROM_ADDR);
+
+ if (tmp & EEPROM_ADDR_COMPLETE)
+ break;
+ udelay(100);
+ }
+ if (!(tmp & EEPROM_ADDR_COMPLETE))
+ return -EBUSY;
+
+ *val = tr32(GRC_EEPROM_DATA);
+ return 0;
+}
+
+#define NVRAM_CMD_TIMEOUT 10000
+
+static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
+{
+ int i;
+
+ tw32(NVRAM_CMD, nvram_cmd);
+ for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
+ udelay(10);
+ if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
+ udelay(10);
+ break;
+ }
+ }
+ if (i == NVRAM_CMD_TIMEOUT) {
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+ int ret;
+
+ if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
+ printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
+ return -EINVAL;
+ }
+
+ if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
+ return tg3_nvram_read_using_eeprom(tp, offset, val);
+
+ if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
+ (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL)) {
+
+ offset = ((offset / tp->nvram_pagesize) <<
+ ATMEL_AT45DB0X1B_PAGE_POS) +
+ (offset % tp->nvram_pagesize);
+ }
+
+ if (offset > NVRAM_ADDR_MSK)
+ return -EINVAL;
+
+ tg3_nvram_lock(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+ }
+
+ tw32(NVRAM_ADDR, offset);
+ ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
+ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
+
+ if (ret == 0)
+ *val = swab32(tr32(NVRAM_RDDATA));
+
+ tg3_nvram_unlock(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+ }
+
+ return ret;
+}
+
+static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+ u32 offset, u32 len, u8 *buf)
+{
+ int i, j, rc = 0;
+ u32 val;
+
+ for (i = 0; i < len; i += 4) {
+ u32 addr, data;
+
+ addr = offset + i;
+
+ memcpy(&data, buf + i, 4);
+
+ tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
+
+ val = tr32(GRC_EEPROM_ADDR);
+ tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
+
+ val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
+ EEPROM_ADDR_READ);
+ tw32(GRC_EEPROM_ADDR, val |
+ (0 << EEPROM_ADDR_DEVID_SHIFT) |
+ (addr & EEPROM_ADDR_ADDR_MASK) |
+ EEPROM_ADDR_START |
+ EEPROM_ADDR_WRITE);
+
+ for (j = 0; j < 10000; j++) {
+ val = tr32(GRC_EEPROM_ADDR);
+
+ if (val & EEPROM_ADDR_COMPLETE)
+ break;
+ udelay(100);
+ }
+ if (!(val & EEPROM_ADDR_COMPLETE)) {
+ rc = -EBUSY;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int ret = 0;
+ u32 pagesize = tp->nvram_pagesize;
+ u32 pagemask = pagesize - 1;
+ u32 nvram_cmd;
+ u8 *tmp;
+
+ tmp = kmalloc(pagesize, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ while (len) {
+ int j;
+ u32 phy_addr, page_off, size, nvaccess;
+
+ phy_addr = offset & ~pagemask;
+
+ for (j = 0; j < pagesize; j += 4) {
+ if ((ret = tg3_nvram_read(tp, phy_addr + j,
+ (u32 *) (tmp + j))))
+ break;
+ }
+ if (ret)
+ break;
+
+ page_off = offset & pagemask;
+ size = pagesize;
+ if (len < size)
+ size = len;
+
+ len -= size;
+
+ memcpy(tmp + page_off, buf, size);
+
+ offset = offset + (pagesize - page_off);
+
+ nvaccess = tr32(NVRAM_ACCESS);
+ tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+
+ /*
+ * Before we can erase the flash page, we need
+ * to issue a special "write enable" command.
+ */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Erase the target page */
+ tw32(NVRAM_ADDR, phy_addr);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
+ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Issue another write enable to start the write. */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ for (j = 0; j < pagesize; j += 4) {
+ u32 data;
+
+ data = *((u32 *) (tmp + j));
+ tw32(NVRAM_WRDATA, cpu_to_be32(data));
+
+ tw32(NVRAM_ADDR, phy_addr + j);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
+ NVRAM_CMD_WR;
+
+ if (j == 0)
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ else if (j == (pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+ break;
+ }
+ if (ret)
+ break;
+ }
+
+ nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+ tg3_nvram_exec_cmd(tp, nvram_cmd);
+
+ kfree(tmp);
+
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < len; i += 4, offset += 4) {
+ u32 data, page_off, phy_addr, nvram_cmd;
+
+ memcpy(&data, buf + i, 4);
+ tw32(NVRAM_WRDATA, cpu_to_be32(data));
+
+ page_off = offset % tp->nvram_pagesize;
+
+ if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL)) {
+
+ phy_addr = ((offset / tp->nvram_pagesize) <<
+ ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
+ }
+ else {
+ phy_addr = offset;
+ }
+
+ tw32(NVRAM_ADDR, phy_addr);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
+
+ if ((page_off == 0) || (i == 0))
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ else if (page_off == (tp->nvram_pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if (i == (len - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if ((tp->nvram_jedecnum == JEDEC_ST) &&
+ (nvram_cmd & NVRAM_CMD_FIRST)) {
+
+ if ((ret = tg3_nvram_exec_cmd(tp,
+ NVRAM_CMD_WREN | NVRAM_CMD_GO |
+ NVRAM_CMD_DONE)))
+
+ break;
+ }
+ if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
+ /* We always do complete word writes to eeprom. */
+ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
+ }
+
+ if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+ break;
+ }
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
+{
+ int ret;
+
+ if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
+ printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
+ return -EINVAL;
+ }
+
+ if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ GRC_LCLCTRL_GPIO_OE1);
+ udelay(40);
+ }
+
+ if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
+ ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
+ }
+ else {
+ u32 grc_mode;
+
+ tg3_nvram_lock(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+
+ tw32(NVRAM_WRITE1, 0x406);
+ }
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
+
+ if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
+ !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
+
+ ret = tg3_nvram_write_block_buffered(tp, offset, len,
+ buf);
+ }
+ else {
+ ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
+ buf);
+ }
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+ }
+ tg3_nvram_unlock(tp);
+ }
+
+ if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
+ udelay(40);
+ }
+
+ return ret;
+}
+
+struct subsys_tbl_ent {
+ u16 subsys_vendor, subsys_devid;
+ u32 phy_id;
+};
+
+static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
+ /* Broadcom boards. */
+ { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
+ { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
+ { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
+ { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
+
+ /* 3com boards. */
+ { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
+ { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
+ { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
+ { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
+ { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
+
+ /* DELL boards. */
+ { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
+ { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
+ { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
+ { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
+
+ /* Compaq boards. */
+ { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
+ { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
+ { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
+ { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
+ { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
+
+ /* IBM boards. */
+ { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
+};
+
+static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
+ if ((subsys_id_to_phy_id[i].subsys_vendor ==
+ tp->pdev->subsystem_vendor) &&
+ (subsys_id_to_phy_id[i].subsys_devid ==
+ tp->pdev->subsystem_device))
+ return &subsys_id_to_phy_id[i];
+ }
+ return NULL;
+}
+
+static int __devinit tg3_phy_probe(struct tg3 *tp)
+{
+ u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
+ u32 hw_phy_id, hw_phy_id_masked;
+ u32 val;
+ int eeprom_signature_found, eeprom_phy_serdes, err;
+
+ tp->phy_id = PHY_ID_INVALID;
+ eeprom_phy_id = PHY_ID_INVALID;
+ eeprom_phy_serdes = 0;
+ eeprom_signature_found = 0;
+ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+ u32 nic_cfg, led_cfg;
+ u32 nic_phy_id, ver, cfg2 = 0;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+ tp->nic_sram_data_cfg = nic_cfg;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
+ ver >>= NIC_SRAM_DATA_VER_SHIFT;
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
+ (ver > 0) && (ver < 0x100))
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
+
+ eeprom_signature_found = 1;
+
+ if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
+ NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
+ eeprom_phy_serdes = 1;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
+ if (nic_phy_id != 0) {
+ u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
+ u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
+
+ eeprom_phy_id = (id1 >> 16) << 10;
+ eeprom_phy_id |= (id2 & 0xfc00) << 16;
+ eeprom_phy_id |= (id2 & 0x03ff) << 0;
+ } else
+ eeprom_phy_id = 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
+ SHASTA_EXT_LED_MODE_MASK);
+ } else
+ led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
+
+ switch (led_cfg) {
+ default:
+ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+ break;
+
+ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
+ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+ break;
+
+ case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
+ tp->led_ctrl = LED_CTRL_MODE_MAC;
+ break;
+
+ case SHASTA_EXT_LED_SHARED:
+ tp->led_ctrl = LED_CTRL_MODE_SHARED;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+ LED_CTRL_MODE_PHY_2);
+ break;
+
+ case SHASTA_EXT_LED_MAC:
+ tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
+ break;
+
+ case SHASTA_EXT_LED_COMBO:
+ tp->led_ctrl = LED_CTRL_MODE_COMBO;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+ LED_CTRL_MODE_PHY_2);
+ break;
+
+ };
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+ tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
+ (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
+ tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+
+ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+ tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
+ }
+ if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
+ tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
+
+ if (cfg2 & (1 << 17))
+ tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
+
+ /* serdes signal pre-emphasis in register 0x590 set by */
+ /* bootcode if bit 18 is set */
+ if (cfg2 & (1 << 18))
+ tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
+ }
+
+ /* Reading the PHY ID register can conflict with ASF
+ * firwmare access to the PHY hardware.
+ */
+ err = 0;
+ if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
+ } else {
+ /* Now read the physical PHY_ID from the chip and verify
+ * that it is sane. If it doesn't look good, we fall back
+ * to either the hard-coded table based PHY_ID and failing
+ * that the value found in the eeprom area.
+ */
+ err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
+ err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
+
+ hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
+ hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
+ hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
+
+ hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
+ }
+
+ if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
+ tp->phy_id = hw_phy_id;
+ if (hw_phy_id_masked == PHY_ID_BCM8002)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+ } else {
+ if (eeprom_signature_found) {
+ tp->phy_id = eeprom_phy_id;
+ if (eeprom_phy_serdes)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+ } else {
+ struct subsys_tbl_ent *p;
+
+ /* No eeprom signature? Try the hardcoded
+ * subsys device table.
+ */
+ p = lookup_by_subsys(tp);
+ if (!p)
+ return -ENODEV;
+
+ tp->phy_id = p->phy_id;
+ if (!tp->phy_id ||
+ tp->phy_id == PHY_ID_BCM8002)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+ }
+ }
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+ !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+ u32 bmsr, adv_reg, tg3_ctrl;
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
+ goto skip_phy_reset;
+
+ err = tg3_phy_reset(tp);
+ if (err)
+ return err;
+
+ adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL |
+ ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
+ tg3_ctrl = 0;
+ if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+ tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
+ MII_TG3_CTRL_ADV_1000_FULL);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
+ MII_TG3_CTRL_ENABLE_AS_MASTER);
+ }
+
+ if (!tg3_copper_is_advertising_all(tp)) {
+ tg3_writephy(tp, MII_ADVERTISE, adv_reg);
+
+ if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+ tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
+
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ }
+ tg3_phy_set_wirespeed(tp);
+
+ tg3_writephy(tp, MII_ADVERTISE, adv_reg);
+ if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+ tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
+ }
+
+skip_phy_reset:
+ if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+ }
+
+ if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
+ err = tg3_init_5401phy_dsp(tp);
+ }
+
+ if (!eeprom_signature_found)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+ tp->link_config.advertising =
+ (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+ tp->link_config.advertising &=
+ ~(ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ return err;
+}
+
+static void __devinit tg3_read_partno(struct tg3 *tp)
+{
+ unsigned char vpd_data[256];
+ int i;
+
+ if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
+ /* Sun decided not to put the necessary bits in the
+ * NVRAM of their onboard tg3 parts :(
+ */
+ strcpy(tp->board_part_number, "Sun 570X");
+ return;
+ }
+
+ for (i = 0; i < 256; i += 4) {
+ u32 tmp;
+
+ if (tg3_nvram_read(tp, 0x100 + i, &tmp))
+ goto out_not_found;
+
+ vpd_data[i + 0] = ((tmp >> 0) & 0xff);
+ vpd_data[i + 1] = ((tmp >> 8) & 0xff);
+ vpd_data[i + 2] = ((tmp >> 16) & 0xff);
+ vpd_data[i + 3] = ((tmp >> 24) & 0xff);
+ }
+
+ /* Now parse and find the part number. */
+ for (i = 0; i < 256; ) {
+ unsigned char val = vpd_data[i];
+ int block_end;
+
+ if (val == 0x82 || val == 0x91) {
+ i = (i + 3 +
+ (vpd_data[i + 1] +
+ (vpd_data[i + 2] << 8)));
+ continue;
+ }
+
+ if (val != 0x90)
+ goto out_not_found;
+
+ block_end = (i + 3 +
+ (vpd_data[i + 1] +
+ (vpd_data[i + 2] << 8)));
+ i += 3;
+ while (i < block_end) {
+ if (vpd_data[i + 0] == 'P' &&
+ vpd_data[i + 1] == 'N') {
+ int partno_len = vpd_data[i + 2];
+
+ if (partno_len > 24)
+ goto out_not_found;
+
+ memcpy(tp->board_part_number,
+ &vpd_data[i + 3],
+ partno_len);
+
+ /* Success. */
+ return;
+ }
+ }
+
+ /* Part number not found. */
+ goto out_not_found;
+ }
+
+out_not_found:
+ strcpy(tp->board_part_number, "none");
+}
+
+#ifdef CONFIG_SPARC64
+static int __devinit tg3_is_sun_570X(struct tg3 *tp)
+{
+ struct pci_dev *pdev = tp->pdev;
+ struct pcidev_cookie *pcp = pdev->sysdata;
+
+ if (pcp != NULL) {
+ int node = pcp->prom_node;
+ u32 venid;
+ int err;
+
+ err = prom_getproperty(node, "subsystem-vendor-id",
+ (char *) &venid, sizeof(venid));
+ if (err == 0 || err == -1)
+ return 0;
+ if (venid == PCI_VENDOR_ID_SUN)
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+static int __devinit tg3_get_invariants(struct tg3 *tp)
+{
+ static struct pci_device_id write_reorder_chipsets[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801AA_8) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801AB_8) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801BA_11) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801BA_6) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { },
+ };
+ u32 misc_ctrl_reg;
+ u32 cacheline_sz_reg;
+ u32 pci_state_reg, grc_misc_cfg;
+ u32 val;
+ u16 pci_cmd;
+ int err;
+
+#ifdef CONFIG_SPARC64
+ if (tg3_is_sun_570X(tp))
+ tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
+#endif
+
+ /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
+ * reordering to the mailbox registers done by the host
+ * controller can cause major troubles. We read back from
+ * every mailbox register write to force the writes to be
+ * posted to the chip in order.
+ */
+ if (pci_dev_present(write_reorder_chipsets))
+ tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+
+ /* Force memory write invalidate off. If we leave it on,
+ * then on 5700_BX chips we have to enable a workaround.
+ * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
+ * to match the cacheline size. The Broadcom driver have this
+ * workaround but turns MWI off all the times so never uses
+ * it. This seems to suggest that the workaround is insufficient.
+ */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+ /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
+ * has the register indirect write enable bit set before
+ * we try to access any of the MMIO registers. It is also
+ * critical that the PCI-X hw workaround situation is decided
+ * before that as well.
+ */
+ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ &misc_ctrl_reg);
+
+ tp->pci_chip_rev_id = (misc_ctrl_reg >>
+ MISC_HOST_CTRL_CHIPREV_SHIFT);
+
+ /* Initialize misc host control in PCI block. */
+ tp->misc_host_ctrl |= (misc_ctrl_reg &
+ MISC_HOST_CTRL_CHIPREV);
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
+ &cacheline_sz_reg);
+
+ tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
+ tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
+ tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
+ tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750))
+ tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
+
+ if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
+ tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ tp->pci_lat_timer < 64) {
+ tp->pci_lat_timer = 64;
+
+ cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
+ cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
+ cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
+ cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
+
+ pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
+ cacheline_sz_reg);
+ }
+
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+
+ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
+ tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
+
+ /* If this is a 5700 BX chipset, and we are in PCI-X
+ * mode, enable register write workaround.
+ *
+ * The workaround is to use indirect register accesses
+ * for all chip writes not to mailbox registers.
+ */
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ u32 pm_reg;
+ u16 pci_cmd;
+
+ tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
+
+ /* The chip can have it's power management PCI config
+ * space registers clobbered due to this bug.
+ * So explicitly force the chip into D0 here.
+ */
+ pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
+ &pm_reg);
+ pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
+ pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
+ pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
+ pm_reg);
+
+ /* Also, force SERR#/PERR# in PCI command. */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
+
+ /* Back to back register writes can cause problems on this chip,
+ * the workaround is to read back all reg writes except those to
+ * mailbox regs. See tg3_write_indirect_reg32().
+ *
+ * PCI Express 5750_A0 rev chips need this workaround too.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+ tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
+ tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
+
+ if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
+ tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
+ if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
+ tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
+
+ /* Chip-specific fixup from Broadcom driver */
+ if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+ (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
+ pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
+ }
+
+ /* Force the chip into D0. */
+ err = tg3_set_power_state(tp, 0);
+ if (err) {
+ printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
+ pci_name(tp->pdev));
+ return err;
+ }
+
+ /* 5700 B0 chips do not support checksumming correctly due
+ * to hardware bugs.
+ */
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
+ tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
+
+ /* Pseudo-header checksum is done by hardware logic and not
+ * the offload processers, so make the chip do the pseudo-
+ * header checksums on receive. For transmit it is more
+ * convenient to do the pseudo-header checksum in software
+ * as Linux does that on transmit for us in all cases.
+ */
+ tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
+ tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
+
+ /* Derive initial jumbo mode from MTU assigned in
+ * ether_setup() via the alloc_etherdev() call
+ */
+ if (tp->dev->mtu > ETH_DATA_LEN)
+ tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
+
+ /* Determine WakeOnLan speed to use. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+ tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
+ } else {
+ tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
+ }
+
+ /* A few boards don't want Ethernet@WireSpeed phy feature */
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
+ ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
+ tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+
+ /* Only 5701 and later support tagged irq status mode.
+ * Also, 5788 chips cannot use tagged irq status.
+ *
+ * However, since we are using NAPI avoid tagged irq status
+ * because the interrupt condition is more difficult to
+ * fully clear in that mode.
+ */
+ tp->coalesce_mode = 0;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+ tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
+
+ /* Initialize MAC MI mode, polling disabled. */
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+
+ /* Initialize data/descriptor byte/word swapping. */
+ val = tr32(GRC_MODE);
+ val &= GRC_MODE_HOST_STACKUP;
+ tw32(GRC_MODE, val | tp->grc_mode);
+
+ tg3_switch_clocks(tp);
+
+ /* Clear this out for sanity. */
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+ (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
+ u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
+
+ if (chiprevid == CHIPREV_ID_5701_A0 ||
+ chiprevid == CHIPREV_ID_5701_B0 ||
+ chiprevid == CHIPREV_ID_5701_B2 ||
+ chiprevid == CHIPREV_ID_5701_B5) {
+ void __iomem *sram_base;
+
+ /* Write some dummy words into the SRAM status block
+ * area, see if it reads back correctly. If the return
+ * value is bad, force enable the PCIX workaround.
+ */
+ sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
+
+ writel(0x00000000, sram_base);
+ writel(0x00000000, sram_base + 4);
+ writel(0xffffffff, sram_base + 4);
+ if (readl(sram_base) != 0x00000000)
+ tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
+ }
+ }
+
+ udelay(50);
+ tg3_nvram_init(tp);
+
+ grc_misc_cfg = tr32(GRC_MISC_CFG);
+ grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
+
+ /* Broadcom's driver says that CIOBE multisplit has a bug */
+#if 0
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
+ tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
+ tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
+ }
+#endif
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
+ grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
+ tp->tg3_flags2 |= TG3_FLG2_IS_5788;
+
+ /* these are limited to 10/100 only */
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
+ (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
+ tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
+
+ err = tg3_phy_probe(tp);
+ if (err) {
+ printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
+ pci_name(tp->pdev), err);
+ /* ... but do not return immediately ... */
+ }
+
+ tg3_read_partno(tp);
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
+ } else {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
+ }
+
+ /* 5700 {AX,BX} chips have a broken status block link
+ * change bit implementation, so we must use the
+ * status register in those cases.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
+
+ /* The led_ctrl is set during tg3_phy_probe, here we might
+ * have to force the link status polling mechanism based
+ * upon subsystem IDs.
+ */
+ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+ tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
+ TG3_FLAG_USE_LINKCHG_REG);
+ }
+
+ /* For all SERDES we poll the MAC status register. */
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+ tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
+
+ /* 5700 BX chips need to have their TX producer index mailboxes
+ * written twice to workaround a bug.
+ */
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
+ tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
+
+ /* It seems all chips can get confused if TX buffers
+ * straddle the 4GB address boundary in some cases.
+ */
+ tp->dev->hard_start_xmit = tg3_start_xmit;
+
+ tp->rx_offset = 2;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
+ tp->rx_offset = 0;
+
+ /* By default, disable wake-on-lan. User can change this
+ * using ETHTOOL_SWOL.
+ */
+ tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
+
+ return err;
+}
+
+#ifdef CONFIG_SPARC64
+static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+ struct pci_dev *pdev = tp->pdev;
+ struct pcidev_cookie *pcp = pdev->sysdata;
+
+ if (pcp != NULL) {
+ int node = pcp->prom_node;
+
+ if (prom_getproplen(node, "local-mac-address") == 6) {
+ prom_getproperty(node, "local-mac-address",
+ dev->dev_addr, 6);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ return 0;
+}
+#endif
+
+static int __devinit tg3_get_device_address(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+ u32 hi, lo, mac_offset;
+
+#ifdef CONFIG_SPARC64
+ if (!tg3_get_macaddr_sparc(tp))
+ return 0;
+#endif
+
+ mac_offset = 0x7c;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
+ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+ mac_offset = 0xcc;
+ if (tg3_nvram_lock(tp))
+ tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
+ else
+ tg3_nvram_unlock(tp);
+ }
+
+ /* First try to get it from MAC address mailbox. */
+ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
+ if ((hi >> 16) == 0x484b) {
+ dev->dev_addr[0] = (hi >> 8) & 0xff;
+ dev->dev_addr[1] = (hi >> 0) & 0xff;
+
+ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
+ dev->dev_addr[2] = (lo >> 24) & 0xff;
+ dev->dev_addr[3] = (lo >> 16) & 0xff;
+ dev->dev_addr[4] = (lo >> 8) & 0xff;
+ dev->dev_addr[5] = (lo >> 0) & 0xff;
+ }
+ /* Next, try NVRAM. */
+ else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
+ !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
+ !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
+ dev->dev_addr[0] = ((hi >> 16) & 0xff);
+ dev->dev_addr[1] = ((hi >> 24) & 0xff);
+ dev->dev_addr[2] = ((lo >> 0) & 0xff);
+ dev->dev_addr[3] = ((lo >> 8) & 0xff);
+ dev->dev_addr[4] = ((lo >> 16) & 0xff);
+ dev->dev_addr[5] = ((lo >> 24) & 0xff);
+ }
+ /* Finally just fetch it out of the MAC control regs. */
+ else {
+ hi = tr32(MAC_ADDR_0_HIGH);
+ lo = tr32(MAC_ADDR_0_LOW);
+
+ dev->dev_addr[5] = lo & 0xff;
+ dev->dev_addr[4] = (lo >> 8) & 0xff;
+ dev->dev_addr[3] = (lo >> 16) & 0xff;
+ dev->dev_addr[2] = (lo >> 24) & 0xff;
+ dev->dev_addr[1] = hi & 0xff;
+ dev->dev_addr[0] = (hi >> 8) & 0xff;
+ }
+
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+#ifdef CONFIG_SPARC64
+ if (!tg3_get_default_macaddr_sparc(tp))
+ return 0;
+#endif
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
+{
+ struct tg3_internal_buffer_desc test_desc;
+ u32 sram_dma_descs;
+ int i, ret;
+
+ sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
+
+ tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
+ tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
+ tw32(RDMAC_STATUS, 0);
+ tw32(WDMAC_STATUS, 0);
+
+ tw32(BUFMGR_MODE, 0);
+ tw32(FTQ_RESET, 0);
+
+ test_desc.addr_hi = ((u64) buf_dma) >> 32;
+ test_desc.addr_lo = buf_dma & 0xffffffff;
+ test_desc.nic_mbuf = 0x00002100;
+ test_desc.len = size;
+
+ /*
+ * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
+ * the *second* time the tg3 driver was getting loaded after an
+ * initial scan.
+ *
+ * Broadcom tells me:
+ * ...the DMA engine is connected to the GRC block and a DMA
+ * reset may affect the GRC block in some unpredictable way...
+ * The behavior of resets to individual blocks has not been tested.
+ *
+ * Broadcom noted the GRC reset will also reset all sub-components.
+ */
+ if (to_device) {
+ test_desc.cqid_sqid = (13 << 8) | 2;
+
+ tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
+ udelay(40);
+ } else {
+ test_desc.cqid_sqid = (16 << 8) | 7;
+
+ tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
+ udelay(40);
+ }
+ test_desc.flags = 0x00000005;
+
+ for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
+ u32 val;
+
+ val = *(((u32 *)&test_desc) + i);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
+ sram_dma_descs + (i * sizeof(u32)));
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+ }
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ if (to_device) {
+ tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
+ } else {
+ tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
+ }
+
+ ret = -ENODEV;
+ for (i = 0; i < 40; i++) {
+ u32 val;
+
+ if (to_device)
+ val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
+ else
+ val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
+ if ((val & 0xffff) == sram_dma_descs) {
+ ret = 0;
+ break;
+ }
+
+ udelay(100);
+ }
+
+ return ret;
+}
+
+#define TEST_BUFFER_SIZE 0x400
+
+static int __devinit tg3_test_dma(struct tg3 *tp)
+{
+ dma_addr_t buf_dma;
+ u32 *buf;
+ int ret;
+
+ buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_nofree;
+ }
+
+ tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
+ (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
+
+#ifndef CONFIG_X86
+ {
+ u8 byte;
+ int cacheline_size;
+ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
+
+ if (byte == 0)
+ cacheline_size = 1024;
+ else
+ cacheline_size = (int) byte * 4;
+
+ switch (cacheline_size) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+ tp->dma_rwctrl |=
+ DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
+ break;
+ } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ tp->dma_rwctrl &=
+ ~(DMA_RWCTRL_PCI_WRITE_CMD);
+ tp->dma_rwctrl |=
+ DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
+ break;
+ }
+ /* fallthrough */
+ case 256:
+ if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ tp->dma_rwctrl |=
+ DMA_RWCTRL_WRITE_BNDRY_256;
+ else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ tp->dma_rwctrl |=
+ DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
+ };
+ }
+#endif
+
+ if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ /* DMA read watermark not used on PCIE */
+ tp->dma_rwctrl |= 0x00180000;
+ } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ tp->dma_rwctrl |= 0x003f0000;
+ else
+ tp->dma_rwctrl |= 0x003f000f;
+ } else {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
+
+ if (ccval == 0x6 || ccval == 0x7)
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
+
+ /* Set bit 23 to renable PCIX hw bug fix */
+ tp->dma_rwctrl |= 0x009f0000;
+ } else {
+ tp->dma_rwctrl |= 0x001b000f;
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tp->dma_rwctrl &= 0xfffffff0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ /* Remove this if it causes problems for some boards. */
+ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
+
+ /* On 5700/5701 chips, we need to set this bit.
+ * Otherwise the chip will issue cacheline transactions
+ * to streamable DMA memory with not all the byte
+ * enables turned on. This is an error on several
+ * RISC PCI controllers, in particular sparc64.
+ *
+ * On 5703/5704 chips, this bit has been reassigned
+ * a different meaning. In particular, it is used
+ * on those chips to enable a PCI-X workaround.
+ */
+ tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
+ }
+
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+#if 0
+ /* Unneeded, already done by tg3_get_invariants. */
+ tg3_switch_clocks(tp);
+#endif
+
+ ret = 0;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ goto out;
+
+ while (1) {
+ u32 *p = buf, i;
+
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
+ p[i] = i;
+
+ /* Send the buffer to the chip. */
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
+ if (ret) {
+ printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
+ break;
+ }
+
+#if 0
+ /* validate data reached card RAM correctly. */
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+ u32 val;
+ tg3_read_mem(tp, 0x2100 + (i*4), &val);
+ if (le32_to_cpu(val) != p[i]) {
+ printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
+ /* ret = -ENODEV here? */
+ }
+ p[i] = 0;
+ }
+#endif
+ /* Now read it back. */
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
+ if (ret) {
+ printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
+
+ break;
+ }
+
+ /* Verify it. */
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+ if (p[i] == i)
+ continue;
+
+ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
+ DMA_RWCTRL_WRITE_BNDRY_DISAB) {
+ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ break;
+ } else {
+ printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
+ ret = -ENODEV;
+ goto out;
+ }
+ }
+
+ if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
+ /* Success. */
+ ret = 0;
+ break;
+ }
+ }
+
+out:
+ pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
+out_nofree:
+ return ret;
+}
+
+static void __devinit tg3_init_link_config(struct tg3 *tp)
+{
+ tp->link_config.advertising =
+ (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg | ADVERTISED_MII);
+ tp->link_config.speed = SPEED_INVALID;
+ tp->link_config.duplex = DUPLEX_INVALID;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ netif_carrier_off(tp->dev);
+ tp->link_config.active_speed = SPEED_INVALID;
+ tp->link_config.active_duplex = DUPLEX_INVALID;
+ tp->link_config.phy_is_low_power = 0;
+ tp->link_config.orig_speed = SPEED_INVALID;
+ tp->link_config.orig_duplex = DUPLEX_INVALID;
+ tp->link_config.orig_autoneg = AUTONEG_INVALID;
+}
+
+static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
+{
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO;
+
+ tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
+ tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
+}
+
+static char * __devinit tg3_phy_string(struct tg3 *tp)
+{
+ switch (tp->phy_id & PHY_ID_MASK) {
+ case PHY_ID_BCM5400: return "5400";
+ case PHY_ID_BCM5401: return "5401";
+ case PHY_ID_BCM5411: return "5411";
+ case PHY_ID_BCM5701: return "5701";
+ case PHY_ID_BCM5703: return "5703";
+ case PHY_ID_BCM5704: return "5704";
+ case PHY_ID_BCM5705: return "5705";
+ case PHY_ID_BCM5750: return "5750";
+ case PHY_ID_BCM8002: return "8002/serdes";
+ case 0: return "serdes";
+ default: return "unknown";
+ };
+}
+
+static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
+{
+ struct pci_dev *peer;
+ unsigned int func, devnr = tp->pdev->devfn & ~7;
+
+ for (func = 0; func < 8; func++) {
+ peer = pci_get_slot(tp->pdev->bus, devnr | func);
+ if (peer && peer != tp->pdev)
+ break;
+ pci_dev_put(peer);
+ }
+ if (!peer || peer == tp->pdev)
+ BUG();
+
+ /*
+ * We don't need to keep the refcount elevated; there's no way
+ * to remove one half of this device without removing the other
+ */
+ pci_dev_put(peer);
+
+ return peer;
+}
+
+static int __devinit tg3_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int tg3_version_printed = 0;
+ unsigned long tg3reg_base, tg3reg_len;
+ struct net_device *dev;
+ struct tg3 *tp;
+ int i, err, pci_using_dac, pm_cap;
+
+ if (tg3_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot enable PCI device, "
+ "aborting.\n");
+ return err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "Cannot find proper PCI device "
+ "base address, aborting.\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resources, "
+ "aborting.\n");
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /* Find power-management capability. */
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap == 0) {
+ printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
+ "aborting.\n");
+ err = -EIO;
+ goto err_out_free_res;
+ }
+
+ /* Configure DMA attributes. */
+ err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
+ if (!err) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
+ if (err < 0) {
+ printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
+ "for consistent allocations\n");
+ goto err_out_free_res;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, 0xffffffffULL);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_free_res;
+ }
+ pci_using_dac = 0;
+ }
+
+ tg3reg_base = pci_resource_start(pdev, 0);
+ tg3reg_len = pci_resource_len(pdev, 0);
+
+ dev = alloc_etherdev(sizeof(*tp));
+ if (!dev) {
+ printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_LLTX;
+#if TG3_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_rx_register = tg3_vlan_rx_register;
+ dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
+#endif
+
+ tp = netdev_priv(dev);
+ tp->pdev = pdev;
+ tp->dev = dev;
+ tp->pm_cap = pm_cap;
+ tp->mac_mode = TG3_DEF_MAC_MODE;
+ tp->rx_mode = TG3_DEF_RX_MODE;
+ tp->tx_mode = TG3_DEF_TX_MODE;
+ tp->mi_mode = MAC_MI_MODE_BASE;
+ if (tg3_debug > 0)
+ tp->msg_enable = tg3_debug;
+ else
+ tp->msg_enable = TG3_DEF_MSG_ENABLE;
+
+ /* The word/byte swap controls here control register access byte
+ * swapping. DMA data byte swapping is controlled in the GRC_MODE
+ * setting below.
+ */
+ tp->misc_host_ctrl =
+ MISC_HOST_CTRL_MASK_PCI_INT |
+ MISC_HOST_CTRL_WORD_SWAP |
+ MISC_HOST_CTRL_INDIR_ACCESS |
+ MISC_HOST_CTRL_PCISTATE_RW;
+
+ /* The NONFRM (non-frame) byte/word swap controls take effect
+ * on descriptor entries, anything which isn't packet data.
+ *
+ * The StrongARM chips on the board (one for tx, one for rx)
+ * are running in big-endian mode.
+ */
+ tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
+ GRC_MODE_WSWAP_NONFRM_DATA);
+#ifdef __BIG_ENDIAN
+ tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
+#endif
+ spin_lock_init(&tp->lock);
+ spin_lock_init(&tp->tx_lock);
+ spin_lock_init(&tp->indirect_lock);
+ INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+
+ tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
+ if (tp->regs == 0UL) {
+ printk(KERN_ERR PFX "Cannot map device registers, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+
+ tg3_init_link_config(tp);
+
+ tg3_init_bufmgr_config(tp);
+
+ tp->rx_pending = TG3_DEF_RX_RING_PENDING;
+ tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
+ tp->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+ dev->open = tg3_open;
+ dev->stop = tg3_close;
+ dev->get_stats = tg3_get_stats;
+ dev->set_multicast_list = tg3_set_rx_mode;
+ dev->set_mac_address = tg3_set_mac_addr;
+ dev->do_ioctl = tg3_ioctl;
+ dev->tx_timeout = tg3_tx_timeout;
+ dev->poll = tg3_poll;
+ dev->ethtool_ops = &tg3_ethtool_ops;
+ dev->weight = 64;
+ dev->watchdog_timeo = TG3_TX_TIMEOUT;
+ dev->change_mtu = tg3_change_mtu;
+ dev->irq = pdev->irq;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = tg3_poll_controller;
+#endif
+
+ err = tg3_get_invariants(tp);
+ if (err) {
+ printk(KERN_ERR PFX "Problem fetching invariants of chip, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_5705;
+ }
+
+#if TG3_TSO_SUPPORT != 0
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+ tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+ }
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
+ (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
+ tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
+ } else {
+ tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+ }
+
+ /* TSO is off by default, user can enable using ethtool. */
+#if 0
+ if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
+ dev->features |= NETIF_F_TSO;
+#endif
+
+#endif
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+ !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
+ !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
+ tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
+ tp->rx_pending = 63;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tp->pdev_peer = tg3_find_5704_peer(tp);
+
+ err = tg3_get_device_address(tp);
+ if (err) {
+ printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ /*
+ * Reset chip in case UNDI or EFI driver did not shutdown
+ * DMA self test will enable WDMAC and we'll see (spurious)
+ * pending DMA on the PCI bus at that point.
+ */
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ pci_save_state(tp->pdev);
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tg3_halt(tp);
+ }
+
+ err = tg3_test_dma(tp);
+ if (err) {
+ printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ /* Tigon3 can do ipv4 only... and some chips have buggy
+ * checksumming.
+ */
+ if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
+ } else
+ tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
+
+ if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
+ dev->features &= ~NETIF_F_HIGHDMA;
+
+ /* flow control autonegotiation is default behavior */
+ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot register net device, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ /* Now that we have fully setup the chip, save away a snapshot
+ * of the PCI config space. We need to restore this after
+ * GRC_MISC_CFG core clock resets and some resume events.
+ */
+ pci_save_state(tp->pdev);
+
+ printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
+ dev->name,
+ tp->board_part_number,
+ tp->pci_chip_rev_id,
+ tg3_phy_string(tp),
+ ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
+ ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
+ ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
+ ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
+ ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
+ (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i],
+ i == 5 ? '\n' : ':');
+
+ printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
+ "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
+ "TSOcap[%d] \n",
+ dev->name,
+ (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
+ (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
+ (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
+ (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
+ (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+
+ return 0;
+
+err_out_iounmap:
+ iounmap(tp->regs);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit tg3_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct tg3 *tp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ iounmap(tp->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return 0;
+
+ tg3_netif_stop(tp);
+
+ del_timer_sync(&tp->timer);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+ tg3_disable_ints(tp);
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ netif_device_detach(dev);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+ tg3_halt(tp);
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
+ if (err) {
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tg3_init_hw(tp);
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ netif_device_attach(dev);
+ tg3_netif_start(tp);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+ }
+
+ return err;
+}
+
+static int tg3_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_restore_state(tp->pdev);
+
+ err = tg3_set_power_state(tp, 0);
+ if (err)
+ return err;
+
+ netif_device_attach(dev);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tg3_init_hw(tp);
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ tg3_enable_ints(tp);
+
+ tg3_netif_start(tp);
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static struct pci_driver tg3_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = tg3_pci_tbl,
+ .probe = tg3_init_one,
+ .remove = __devexit_p(tg3_remove_one),
+ .suspend = tg3_suspend,
+ .resume = tg3_resume
+};
+
+static int __init tg3_init(void)
+{
+ return pci_module_init(&tg3_driver);
+}
+
+static void __exit tg3_cleanup(void)
+{
+ pci_unregister_driver(&tg3_driver);
+}
+
+module_init(tg3_init);
+module_exit(tg3_cleanup);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
new file mode 100644
index 000000000000..d48887d90325
--- /dev/null
+++ b/drivers/net/tg3.h
@@ -0,0 +1,2206 @@
+/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $
+ * tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ */
+
+#ifndef _T3_H
+#define _T3_H
+
+#define TG3_64BIT_REG_HIGH 0x00UL
+#define TG3_64BIT_REG_LOW 0x04UL
+
+/* Descriptor block info. */
+#define TG3_BDINFO_HOST_ADDR 0x0UL /* 64-bit */
+#define TG3_BDINFO_MAXLEN_FLAGS 0x8UL /* 32-bit */
+#define BDINFO_FLAGS_USE_EXT_RECV 0x00000001 /* ext rx_buffer_desc */
+#define BDINFO_FLAGS_DISABLED 0x00000002
+#define BDINFO_FLAGS_MAXLEN_MASK 0xffff0000
+#define BDINFO_FLAGS_MAXLEN_SHIFT 16
+#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
+#define TG3_BDINFO_SIZE 0x10UL
+
+#define RX_COPY_THRESHOLD 256
+
+#define RX_STD_MAX_SIZE 1536
+#define RX_STD_MAX_SIZE_5705 512
+#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
+
+/* First 256 bytes are a mirror of PCI config space. */
+#define TG3PCI_VENDOR 0x00000000
+#define TG3PCI_VENDOR_BROADCOM 0x14e4
+#define TG3PCI_DEVICE 0x00000002
+#define TG3PCI_DEVICE_TIGON3_1 0x1644 /* BCM5700 */
+#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */
+#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */
+#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */
+#define TG3PCI_COMMAND 0x00000004
+#define TG3PCI_STATUS 0x00000006
+#define TG3PCI_CCREVID 0x00000008
+#define TG3PCI_CACHELINESZ 0x0000000c
+#define TG3PCI_LATTIMER 0x0000000d
+#define TG3PCI_HEADERTYPE 0x0000000e
+#define TG3PCI_BIST 0x0000000f
+#define TG3PCI_BASE0_LOW 0x00000010
+#define TG3PCI_BASE0_HIGH 0x00000014
+/* 0x18 --> 0x2c unused */
+#define TG3PCI_SUBSYSVENID 0x0000002c
+#define TG3PCI_SUBSYSID 0x0000002e
+#define TG3PCI_ROMADDR 0x00000030
+#define TG3PCI_CAPLIST 0x00000034
+/* 0x35 --> 0x3c unused */
+#define TG3PCI_IRQ_LINE 0x0000003c
+#define TG3PCI_IRQ_PIN 0x0000003d
+#define TG3PCI_MIN_GNT 0x0000003e
+#define TG3PCI_MAX_LAT 0x0000003f
+#define TG3PCI_X_CAPS 0x00000040
+#define PCIX_CAPS_RELAXED_ORDERING 0x00020000
+#define PCIX_CAPS_SPLIT_MASK 0x00700000
+#define PCIX_CAPS_SPLIT_SHIFT 20
+#define PCIX_CAPS_BURST_MASK 0x000c0000
+#define PCIX_CAPS_BURST_SHIFT 18
+#define PCIX_CAPS_MAX_BURST_CPIOB 2
+#define TG3PCI_PM_CAP_PTR 0x00000041
+#define TG3PCI_X_COMMAND 0x00000042
+#define TG3PCI_X_STATUS 0x00000044
+#define TG3PCI_PM_CAP_ID 0x00000048
+#define TG3PCI_VPD_CAP_PTR 0x00000049
+#define TG3PCI_PM_CAPS 0x0000004a
+#define TG3PCI_PM_CTRL_STAT 0x0000004c
+#define TG3PCI_BR_SUPP_EXT 0x0000004e
+#define TG3PCI_PM_DATA 0x0000004f
+#define TG3PCI_VPD_CAP_ID 0x00000050
+#define TG3PCI_MSI_CAP_PTR 0x00000051
+#define TG3PCI_VPD_ADDR_FLAG 0x00000052
+#define VPD_ADDR_FLAG_WRITE 0x00008000
+#define TG3PCI_VPD_DATA 0x00000054
+#define TG3PCI_MSI_CAP_ID 0x00000058
+#define TG3PCI_NXT_CAP_PTR 0x00000059
+#define TG3PCI_MSI_CTRL 0x0000005a
+#define TG3PCI_MSI_ADDR_LOW 0x0000005c
+#define TG3PCI_MSI_ADDR_HIGH 0x00000060
+#define TG3PCI_MSI_DATA 0x00000064
+/* 0x66 --> 0x68 unused */
+#define TG3PCI_MISC_HOST_CTRL 0x00000068
+#define MISC_HOST_CTRL_CLEAR_INT 0x00000001
+#define MISC_HOST_CTRL_MASK_PCI_INT 0x00000002
+#define MISC_HOST_CTRL_BYTE_SWAP 0x00000004
+#define MISC_HOST_CTRL_WORD_SWAP 0x00000008
+#define MISC_HOST_CTRL_PCISTATE_RW 0x00000010
+#define MISC_HOST_CTRL_CLKREG_RW 0x00000020
+#define MISC_HOST_CTRL_REGWORD_SWAP 0x00000040
+#define MISC_HOST_CTRL_INDIR_ACCESS 0x00000080
+#define MISC_HOST_CTRL_IRQ_MASK_MODE 0x00000100
+#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200
+#define MISC_HOST_CTRL_CHIPREV 0xffff0000
+#define MISC_HOST_CTRL_CHIPREV_SHIFT 16
+#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \
+ (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
+ MISC_HOST_CTRL_CHIPREV_SHIFT)
+#define CHIPREV_ID_5700_A0 0x7000
+#define CHIPREV_ID_5700_A1 0x7001
+#define CHIPREV_ID_5700_B0 0x7100
+#define CHIPREV_ID_5700_B1 0x7101
+#define CHIPREV_ID_5700_B3 0x7102
+#define CHIPREV_ID_5700_ALTIMA 0x7104
+#define CHIPREV_ID_5700_C0 0x7200
+#define CHIPREV_ID_5701_A0 0x0000
+#define CHIPREV_ID_5701_B0 0x0100
+#define CHIPREV_ID_5701_B2 0x0102
+#define CHIPREV_ID_5701_B5 0x0105
+#define CHIPREV_ID_5703_A0 0x1000
+#define CHIPREV_ID_5703_A1 0x1001
+#define CHIPREV_ID_5703_A2 0x1002
+#define CHIPREV_ID_5703_A3 0x1003
+#define CHIPREV_ID_5704_A0 0x2000
+#define CHIPREV_ID_5704_A1 0x2001
+#define CHIPREV_ID_5704_A2 0x2002
+#define CHIPREV_ID_5704_A3 0x2003
+#define CHIPREV_ID_5705_A0 0x3000
+#define CHIPREV_ID_5705_A1 0x3001
+#define CHIPREV_ID_5705_A2 0x3002
+#define CHIPREV_ID_5705_A3 0x3003
+#define CHIPREV_ID_5750_A0 0x4000
+#define CHIPREV_ID_5750_A1 0x4001
+#define CHIPREV_ID_5750_A3 0x4003
+#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
+#define ASIC_REV_5700 0x07
+#define ASIC_REV_5701 0x00
+#define ASIC_REV_5703 0x01
+#define ASIC_REV_5704 0x02
+#define ASIC_REV_5705 0x03
+#define ASIC_REV_5750 0x04
+#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
+#define CHIPREV_5700_AX 0x70
+#define CHIPREV_5700_BX 0x71
+#define CHIPREV_5700_CX 0x72
+#define CHIPREV_5701_AX 0x00
+#define CHIPREV_5703_AX 0x10
+#define CHIPREV_5704_AX 0x20
+#define CHIPREV_5704_BX 0x21
+#define CHIPREV_5750_AX 0x40
+#define CHIPREV_5750_BX 0x41
+#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff)
+#define METAL_REV_A0 0x00
+#define METAL_REV_A1 0x01
+#define METAL_REV_B0 0x00
+#define METAL_REV_B1 0x01
+#define METAL_REV_B2 0x02
+#define TG3PCI_DMA_RW_CTRL 0x0000006c
+#define DMA_RWCTRL_MIN_DMA 0x000000ff
+#define DMA_RWCTRL_MIN_DMA_SHIFT 0
+#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
+#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
+#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
+#define DMA_RWCTRL_READ_BNDRY_128_PCIX 0x00000100
+#define DMA_RWCTRL_READ_BNDRY_32 0x00000200
+#define DMA_RWCTRL_READ_BNDRY_256_PCIX 0x00000200
+#define DMA_RWCTRL_READ_BNDRY_64 0x00000300
+#define DMA_RWCTRL_READ_BNDRY_384_PCIX 0x00000300
+#define DMA_RWCTRL_READ_BNDRY_128 0x00000400
+#define DMA_RWCTRL_READ_BNDRY_256 0x00000500
+#define DMA_RWCTRL_READ_BNDRY_512 0x00000600
+#define DMA_RWCTRL_READ_BNDRY_1024 0x00000700
+#define DMA_RWCTRL_WRITE_BNDRY_MASK 0x00003800
+#define DMA_RWCTRL_WRITE_BNDRY_DISAB 0x00000000
+#define DMA_RWCTRL_WRITE_BNDRY_16 0x00000800
+#define DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800
+#define DMA_RWCTRL_WRITE_BNDRY_32 0x00001000
+#define DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000
+#define DMA_RWCTRL_WRITE_BNDRY_64 0x00001800
+#define DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800
+#define DMA_RWCTRL_WRITE_BNDRY_128 0x00002000
+#define DMA_RWCTRL_WRITE_BNDRY_256 0x00002800
+#define DMA_RWCTRL_WRITE_BNDRY_512 0x00003000
+#define DMA_RWCTRL_WRITE_BNDRY_1024 0x00003800
+#define DMA_RWCTRL_ONE_DMA 0x00004000
+#define DMA_RWCTRL_READ_WATER 0x00070000
+#define DMA_RWCTRL_READ_WATER_SHIFT 16
+#define DMA_RWCTRL_WRITE_WATER 0x00380000
+#define DMA_RWCTRL_WRITE_WATER_SHIFT 19
+#define DMA_RWCTRL_USE_MEM_READ_MULT 0x00400000
+#define DMA_RWCTRL_ASSERT_ALL_BE 0x00800000
+#define DMA_RWCTRL_PCI_READ_CMD 0x0f000000
+#define DMA_RWCTRL_PCI_READ_CMD_SHIFT 24
+#define DMA_RWCTRL_PCI_WRITE_CMD 0xf0000000
+#define DMA_RWCTRL_PCI_WRITE_CMD_SHIFT 28
+#define DMA_RWCTRL_WRITE_BNDRY_64_PCIE 0x10000000
+#define DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000
+#define DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000
+#define TG3PCI_PCISTATE 0x00000070
+#define PCISTATE_FORCE_RESET 0x00000001
+#define PCISTATE_INT_NOT_ACTIVE 0x00000002
+#define PCISTATE_CONV_PCI_MODE 0x00000004
+#define PCISTATE_BUS_SPEED_HIGH 0x00000008
+#define PCISTATE_BUS_32BIT 0x00000010
+#define PCISTATE_ROM_ENABLE 0x00000020
+#define PCISTATE_ROM_RETRY_ENABLE 0x00000040
+#define PCISTATE_FLAT_VIEW 0x00000100
+#define PCISTATE_RETRY_SAME_DMA 0x00002000
+#define TG3PCI_CLOCK_CTRL 0x00000074
+#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200
+#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400
+#define CLOCK_CTRL_TXCLK_DISABLE 0x00000800
+#define CLOCK_CTRL_ALTCLK 0x00001000
+#define CLOCK_CTRL_PWRDOWN_PLL133 0x00008000
+#define CLOCK_CTRL_44MHZ_CORE 0x00040000
+#define CLOCK_CTRL_625_CORE 0x00100000
+#define CLOCK_CTRL_FORCE_CLKRUN 0x00200000
+#define CLOCK_CTRL_CLKRUN_OENABLE 0x00400000
+#define CLOCK_CTRL_DELAY_PCI_GRANT 0x80000000
+#define TG3PCI_REG_BASE_ADDR 0x00000078
+#define TG3PCI_MEM_WIN_BASE_ADDR 0x0000007c
+#define TG3PCI_REG_DATA 0x00000080
+#define TG3PCI_MEM_WIN_DATA 0x00000084
+#define TG3PCI_MODE_CTRL 0x00000088
+#define TG3PCI_MISC_CFG 0x0000008c
+#define TG3PCI_MISC_LOCAL_CTRL 0x00000090
+/* 0x94 --> 0x98 unused */
+#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
+#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
+#define TG3PCI_SND_PROD_IDX 0x000000a8 /* 64-bit */
+/* 0xb0 --> 0xb8 unused */
+#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
+#define DUAL_MAC_CTRL_CH_MASK 0x00000003
+#define DUAL_MAC_CTRL_ID 0x00000004
+/* 0xbc --> 0x100 unused */
+
+/* 0x100 --> 0x200 unused */
+
+/* Mailbox registers */
+#define MAILBOX_INTERRUPT_0 0x00000200 /* 64-bit */
+#define MAILBOX_INTERRUPT_1 0x00000208 /* 64-bit */
+#define MAILBOX_INTERRUPT_2 0x00000210 /* 64-bit */
+#define MAILBOX_INTERRUPT_3 0x00000218 /* 64-bit */
+#define MAILBOX_GENERAL_0 0x00000220 /* 64-bit */
+#define MAILBOX_GENERAL_1 0x00000228 /* 64-bit */
+#define MAILBOX_GENERAL_2 0x00000230 /* 64-bit */
+#define MAILBOX_GENERAL_3 0x00000238 /* 64-bit */
+#define MAILBOX_GENERAL_4 0x00000240 /* 64-bit */
+#define MAILBOX_GENERAL_5 0x00000248 /* 64-bit */
+#define MAILBOX_GENERAL_6 0x00000250 /* 64-bit */
+#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */
+#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */
+#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */
+#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */
+#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_2 0x00000290 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_3 0x00000298 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_4 0x000002a0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_5 0x000002a8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_6 0x000002b0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_7 0x000002b8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_8 0x000002c0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_9 0x000002c8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_10 0x000002d0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_11 0x000002d8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_12 0x000002e0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_13 0x000002e8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_14 0x000002f0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_15 0x000002f8 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_0 0x00000300 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_1 0x00000308 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_2 0x00000310 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_3 0x00000318 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_4 0x00000320 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_5 0x00000328 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_6 0x00000330 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_7 0x00000338 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_8 0x00000340 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_9 0x00000348 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_10 0x00000350 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_11 0x00000358 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_12 0x00000360 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_13 0x00000368 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_14 0x00000370 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_15 0x00000378 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_0 0x00000380 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_1 0x00000388 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_2 0x00000390 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_3 0x00000398 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_4 0x000003a0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_5 0x000003a8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_6 0x000003b0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_7 0x000003b8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_8 0x000003c0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_9 0x000003c8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_10 0x000003d0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_11 0x000003d8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_12 0x000003e0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_13 0x000003e8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_14 0x000003f0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_15 0x000003f8 /* 64-bit */
+
+/* MAC control registers */
+#define MAC_MODE 0x00000400
+#define MAC_MODE_RESET 0x00000001
+#define MAC_MODE_HALF_DUPLEX 0x00000002
+#define MAC_MODE_PORT_MODE_MASK 0x0000000c
+#define MAC_MODE_PORT_MODE_TBI 0x0000000c
+#define MAC_MODE_PORT_MODE_GMII 0x00000008
+#define MAC_MODE_PORT_MODE_MII 0x00000004
+#define MAC_MODE_PORT_MODE_NONE 0x00000000
+#define MAC_MODE_PORT_INT_LPBACK 0x00000010
+#define MAC_MODE_TAGGED_MAC_CTRL 0x00000080
+#define MAC_MODE_TX_BURSTING 0x00000100
+#define MAC_MODE_MAX_DEFER 0x00000200
+#define MAC_MODE_LINK_POLARITY 0x00000400
+#define MAC_MODE_RXSTAT_ENABLE 0x00000800
+#define MAC_MODE_RXSTAT_CLEAR 0x00001000
+#define MAC_MODE_RXSTAT_FLUSH 0x00002000
+#define MAC_MODE_TXSTAT_ENABLE 0x00004000
+#define MAC_MODE_TXSTAT_CLEAR 0x00008000
+#define MAC_MODE_TXSTAT_FLUSH 0x00010000
+#define MAC_MODE_SEND_CONFIGS 0x00020000
+#define MAC_MODE_MAGIC_PKT_ENABLE 0x00040000
+#define MAC_MODE_ACPI_ENABLE 0x00080000
+#define MAC_MODE_MIP_ENABLE 0x00100000
+#define MAC_MODE_TDE_ENABLE 0x00200000
+#define MAC_MODE_RDE_ENABLE 0x00400000
+#define MAC_MODE_FHDE_ENABLE 0x00800000
+#define MAC_STATUS 0x00000404
+#define MAC_STATUS_PCS_SYNCED 0x00000001
+#define MAC_STATUS_SIGNAL_DET 0x00000002
+#define MAC_STATUS_RCVD_CFG 0x00000004
+#define MAC_STATUS_CFG_CHANGED 0x00000008
+#define MAC_STATUS_SYNC_CHANGED 0x00000010
+#define MAC_STATUS_PORT_DEC_ERR 0x00000400
+#define MAC_STATUS_LNKSTATE_CHANGED 0x00001000
+#define MAC_STATUS_MI_COMPLETION 0x00400000
+#define MAC_STATUS_MI_INTERRUPT 0x00800000
+#define MAC_STATUS_AP_ERROR 0x01000000
+#define MAC_STATUS_ODI_ERROR 0x02000000
+#define MAC_STATUS_RXSTAT_OVERRUN 0x04000000
+#define MAC_STATUS_TXSTAT_OVERRUN 0x08000000
+#define MAC_EVENT 0x00000408
+#define MAC_EVENT_PORT_DECODE_ERR 0x00000400
+#define MAC_EVENT_LNKSTATE_CHANGED 0x00001000
+#define MAC_EVENT_MI_COMPLETION 0x00400000
+#define MAC_EVENT_MI_INTERRUPT 0x00800000
+#define MAC_EVENT_AP_ERROR 0x01000000
+#define MAC_EVENT_ODI_ERROR 0x02000000
+#define MAC_EVENT_RXSTAT_OVERRUN 0x04000000
+#define MAC_EVENT_TXSTAT_OVERRUN 0x08000000
+#define MAC_LED_CTRL 0x0000040c
+#define LED_CTRL_LNKLED_OVERRIDE 0x00000001
+#define LED_CTRL_1000MBPS_ON 0x00000002
+#define LED_CTRL_100MBPS_ON 0x00000004
+#define LED_CTRL_10MBPS_ON 0x00000008
+#define LED_CTRL_TRAFFIC_OVERRIDE 0x00000010
+#define LED_CTRL_TRAFFIC_BLINK 0x00000020
+#define LED_CTRL_TRAFFIC_LED 0x00000040
+#define LED_CTRL_1000MBPS_STATUS 0x00000080
+#define LED_CTRL_100MBPS_STATUS 0x00000100
+#define LED_CTRL_10MBPS_STATUS 0x00000200
+#define LED_CTRL_TRAFFIC_STATUS 0x00000400
+#define LED_CTRL_MODE_MAC 0x00000000
+#define LED_CTRL_MODE_PHY_1 0x00000800
+#define LED_CTRL_MODE_PHY_2 0x00001000
+#define LED_CTRL_MODE_SHASTA_MAC 0x00002000
+#define LED_CTRL_MODE_SHARED 0x00004000
+#define LED_CTRL_MODE_COMBO 0x00008000
+#define LED_CTRL_BLINK_RATE_MASK 0x7ff80000
+#define LED_CTRL_BLINK_RATE_SHIFT 19
+#define LED_CTRL_BLINK_PER_OVERRIDE 0x00080000
+#define LED_CTRL_BLINK_RATE_OVERRIDE 0x80000000
+#define MAC_ADDR_0_HIGH 0x00000410 /* upper 2 bytes */
+#define MAC_ADDR_0_LOW 0x00000414 /* lower 4 bytes */
+#define MAC_ADDR_1_HIGH 0x00000418 /* upper 2 bytes */
+#define MAC_ADDR_1_LOW 0x0000041c /* lower 4 bytes */
+#define MAC_ADDR_2_HIGH 0x00000420 /* upper 2 bytes */
+#define MAC_ADDR_2_LOW 0x00000424 /* lower 4 bytes */
+#define MAC_ADDR_3_HIGH 0x00000428 /* upper 2 bytes */
+#define MAC_ADDR_3_LOW 0x0000042c /* lower 4 bytes */
+#define MAC_ACPI_MBUF_PTR 0x00000430
+#define MAC_ACPI_LEN_OFFSET 0x00000434
+#define ACPI_LENOFF_LEN_MASK 0x0000ffff
+#define ACPI_LENOFF_LEN_SHIFT 0
+#define ACPI_LENOFF_OFF_MASK 0x0fff0000
+#define ACPI_LENOFF_OFF_SHIFT 16
+#define MAC_TX_BACKOFF_SEED 0x00000438
+#define TX_BACKOFF_SEED_MASK 0x000003ff
+#define MAC_RX_MTU_SIZE 0x0000043c
+#define RX_MTU_SIZE_MASK 0x0000ffff
+#define MAC_PCS_TEST 0x00000440
+#define PCS_TEST_PATTERN_MASK 0x000fffff
+#define PCS_TEST_PATTERN_SHIFT 0
+#define PCS_TEST_ENABLE 0x00100000
+#define MAC_TX_AUTO_NEG 0x00000444
+#define TX_AUTO_NEG_MASK 0x0000ffff
+#define TX_AUTO_NEG_SHIFT 0
+#define MAC_RX_AUTO_NEG 0x00000448
+#define RX_AUTO_NEG_MASK 0x0000ffff
+#define RX_AUTO_NEG_SHIFT 0
+#define MAC_MI_COM 0x0000044c
+#define MI_COM_CMD_MASK 0x0c000000
+#define MI_COM_CMD_WRITE 0x04000000
+#define MI_COM_CMD_READ 0x08000000
+#define MI_COM_READ_FAILED 0x10000000
+#define MI_COM_START 0x20000000
+#define MI_COM_BUSY 0x20000000
+#define MI_COM_PHY_ADDR_MASK 0x03e00000
+#define MI_COM_PHY_ADDR_SHIFT 21
+#define MI_COM_REG_ADDR_MASK 0x001f0000
+#define MI_COM_REG_ADDR_SHIFT 16
+#define MI_COM_DATA_MASK 0x0000ffff
+#define MAC_MI_STAT 0x00000450
+#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001
+#define MAC_MI_MODE 0x00000454
+#define MAC_MI_MODE_CLK_10MHZ 0x00000001
+#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002
+#define MAC_MI_MODE_AUTO_POLL 0x00000010
+#define MAC_MI_MODE_CORE_CLK_62MHZ 0x00008000
+#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */
+#define MAC_AUTO_POLL_STATUS 0x00000458
+#define MAC_AUTO_POLL_ERROR 0x00000001
+#define MAC_TX_MODE 0x0000045c
+#define TX_MODE_RESET 0x00000001
+#define TX_MODE_ENABLE 0x00000002
+#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010
+#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
+#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
+#define MAC_TX_STATUS 0x00000460
+#define TX_STATUS_XOFFED 0x00000001
+#define TX_STATUS_SENT_XOFF 0x00000002
+#define TX_STATUS_SENT_XON 0x00000004
+#define TX_STATUS_LINK_UP 0x00000008
+#define TX_STATUS_ODI_UNDERRUN 0x00000010
+#define TX_STATUS_ODI_OVERRUN 0x00000020
+#define MAC_TX_LENGTHS 0x00000464
+#define TX_LENGTHS_SLOT_TIME_MASK 0x000000ff
+#define TX_LENGTHS_SLOT_TIME_SHIFT 0
+#define TX_LENGTHS_IPG_MASK 0x00000f00
+#define TX_LENGTHS_IPG_SHIFT 8
+#define TX_LENGTHS_IPG_CRS_MASK 0x00003000
+#define TX_LENGTHS_IPG_CRS_SHIFT 12
+#define MAC_RX_MODE 0x00000468
+#define RX_MODE_RESET 0x00000001
+#define RX_MODE_ENABLE 0x00000002
+#define RX_MODE_FLOW_CTRL_ENABLE 0x00000004
+#define RX_MODE_KEEP_MAC_CTRL 0x00000008
+#define RX_MODE_KEEP_PAUSE 0x00000010
+#define RX_MODE_ACCEPT_OVERSIZED 0x00000020
+#define RX_MODE_ACCEPT_RUNTS 0x00000040
+#define RX_MODE_LEN_CHECK 0x00000080
+#define RX_MODE_PROMISC 0x00000100
+#define RX_MODE_NO_CRC_CHECK 0x00000200
+#define RX_MODE_KEEP_VLAN_TAG 0x00000400
+#define MAC_RX_STATUS 0x0000046c
+#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
+#define RX_STATUS_XOFF_RCVD 0x00000002
+#define RX_STATUS_XON_RCVD 0x00000004
+#define MAC_HASH_REG_0 0x00000470
+#define MAC_HASH_REG_1 0x00000474
+#define MAC_HASH_REG_2 0x00000478
+#define MAC_HASH_REG_3 0x0000047c
+#define MAC_RCV_RULE_0 0x00000480
+#define MAC_RCV_VALUE_0 0x00000484
+#define MAC_RCV_RULE_1 0x00000488
+#define MAC_RCV_VALUE_1 0x0000048c
+#define MAC_RCV_RULE_2 0x00000490
+#define MAC_RCV_VALUE_2 0x00000494
+#define MAC_RCV_RULE_3 0x00000498
+#define MAC_RCV_VALUE_3 0x0000049c
+#define MAC_RCV_RULE_4 0x000004a0
+#define MAC_RCV_VALUE_4 0x000004a4
+#define MAC_RCV_RULE_5 0x000004a8
+#define MAC_RCV_VALUE_5 0x000004ac
+#define MAC_RCV_RULE_6 0x000004b0
+#define MAC_RCV_VALUE_6 0x000004b4
+#define MAC_RCV_RULE_7 0x000004b8
+#define MAC_RCV_VALUE_7 0x000004bc
+#define MAC_RCV_RULE_8 0x000004c0
+#define MAC_RCV_VALUE_8 0x000004c4
+#define MAC_RCV_RULE_9 0x000004c8
+#define MAC_RCV_VALUE_9 0x000004cc
+#define MAC_RCV_RULE_10 0x000004d0
+#define MAC_RCV_VALUE_10 0x000004d4
+#define MAC_RCV_RULE_11 0x000004d8
+#define MAC_RCV_VALUE_11 0x000004dc
+#define MAC_RCV_RULE_12 0x000004e0
+#define MAC_RCV_VALUE_12 0x000004e4
+#define MAC_RCV_RULE_13 0x000004e8
+#define MAC_RCV_VALUE_13 0x000004ec
+#define MAC_RCV_RULE_14 0x000004f0
+#define MAC_RCV_VALUE_14 0x000004f4
+#define MAC_RCV_RULE_15 0x000004f8
+#define MAC_RCV_VALUE_15 0x000004fc
+#define RCV_RULE_DISABLE_MASK 0x7fffffff
+#define MAC_RCV_RULE_CFG 0x00000500
+#define RCV_RULE_CFG_DEFAULT_CLASS 0x00000008
+#define MAC_LOW_WMARK_MAX_RX_FRAME 0x00000504
+/* 0x508 --> 0x520 unused */
+#define MAC_HASHREGU_0 0x00000520
+#define MAC_HASHREGU_1 0x00000524
+#define MAC_HASHREGU_2 0x00000528
+#define MAC_HASHREGU_3 0x0000052c
+#define MAC_EXTADDR_0_HIGH 0x00000530
+#define MAC_EXTADDR_0_LOW 0x00000534
+#define MAC_EXTADDR_1_HIGH 0x00000538
+#define MAC_EXTADDR_1_LOW 0x0000053c
+#define MAC_EXTADDR_2_HIGH 0x00000540
+#define MAC_EXTADDR_2_LOW 0x00000544
+#define MAC_EXTADDR_3_HIGH 0x00000548
+#define MAC_EXTADDR_3_LOW 0x0000054c
+#define MAC_EXTADDR_4_HIGH 0x00000550
+#define MAC_EXTADDR_4_LOW 0x00000554
+#define MAC_EXTADDR_5_HIGH 0x00000558
+#define MAC_EXTADDR_5_LOW 0x0000055c
+#define MAC_EXTADDR_6_HIGH 0x00000560
+#define MAC_EXTADDR_6_LOW 0x00000564
+#define MAC_EXTADDR_7_HIGH 0x00000568
+#define MAC_EXTADDR_7_LOW 0x0000056c
+#define MAC_EXTADDR_8_HIGH 0x00000570
+#define MAC_EXTADDR_8_LOW 0x00000574
+#define MAC_EXTADDR_9_HIGH 0x00000578
+#define MAC_EXTADDR_9_LOW 0x0000057c
+#define MAC_EXTADDR_10_HIGH 0x00000580
+#define MAC_EXTADDR_10_LOW 0x00000584
+#define MAC_EXTADDR_11_HIGH 0x00000588
+#define MAC_EXTADDR_11_LOW 0x0000058c
+#define MAC_SERDES_CFG 0x00000590
+#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
+#define MAC_SERDES_STAT 0x00000594
+/* 0x598 --> 0x5b0 unused */
+#define SG_DIG_CTRL 0x000005b0
+#define SG_DIG_USING_HW_AUTONEG 0x80000000
+#define SG_DIG_SOFT_RESET 0x40000000
+#define SG_DIG_DISABLE_LINKRDY 0x20000000
+#define SG_DIG_CRC16_CLEAR_N 0x01000000
+#define SG_DIG_EN10B 0x00800000
+#define SG_DIG_CLEAR_STATUS 0x00400000
+#define SG_DIG_LOCAL_DUPLEX_STATUS 0x00200000
+#define SG_DIG_LOCAL_LINK_STATUS 0x00100000
+#define SG_DIG_SPEED_STATUS_MASK 0x000c0000
+#define SG_DIG_SPEED_STATUS_SHIFT 18
+#define SG_DIG_JUMBO_PACKET_DISABLE 0x00020000
+#define SG_DIG_RESTART_AUTONEG 0x00010000
+#define SG_DIG_FIBER_MODE 0x00008000
+#define SG_DIG_REMOTE_FAULT_MASK 0x00006000
+#define SG_DIG_PAUSE_MASK 0x00001800
+#define SG_DIG_GBIC_ENABLE 0x00000400
+#define SG_DIG_CHECK_END_ENABLE 0x00000200
+#define SG_DIG_SGMII_AUTONEG_TIMER 0x00000100
+#define SG_DIG_CLOCK_PHASE_SELECT 0x00000080
+#define SG_DIG_GMII_INPUT_SELECT 0x00000040
+#define SG_DIG_MRADV_CRC16_SELECT 0x00000020
+#define SG_DIG_COMMA_DETECT_ENABLE 0x00000010
+#define SG_DIG_AUTONEG_TIMER_REDUCE 0x00000008
+#define SG_DIG_AUTONEG_LOW_ENABLE 0x00000004
+#define SG_DIG_REMOTE_LOOPBACK 0x00000002
+#define SG_DIG_LOOPBACK 0x00000001
+#define SG_DIG_STATUS 0x000005b4
+#define SG_DIG_CRC16_BUS_MASK 0xffff0000
+#define SG_DIG_PARTNER_FAULT_MASK 0x00600000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_ASYM_PAUSE 0x00100000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_PAUSE_CAPABLE 0x00080000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_HALF_DUPLEX 0x00040000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_FULL_DUPLEX 0x00020000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_NEXT_PAGE 0x00010000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_AUTONEG_STATE_MASK 0x00000ff0
+#define SG_DIG_COMMA_DETECTOR 0x00000008
+#define SG_DIG_MAC_ACK_STATUS 0x00000004
+#define SG_DIG_AUTONEG_COMPLETE 0x00000002
+#define SG_DIG_AUTONEG_ERROR 0x00000001
+/* 0x5b8 --> 0x600 unused */
+#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */
+#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */
+/* 0x624 --> 0x800 unused */
+#define MAC_TX_STATS_OCTETS 0x00000800
+#define MAC_TX_STATS_RESV1 0x00000804
+#define MAC_TX_STATS_COLLISIONS 0x00000808
+#define MAC_TX_STATS_XON_SENT 0x0000080c
+#define MAC_TX_STATS_XOFF_SENT 0x00000810
+#define MAC_TX_STATS_RESV2 0x00000814
+#define MAC_TX_STATS_MAC_ERRORS 0x00000818
+#define MAC_TX_STATS_SINGLE_COLLISIONS 0x0000081c
+#define MAC_TX_STATS_MULT_COLLISIONS 0x00000820
+#define MAC_TX_STATS_DEFERRED 0x00000824
+#define MAC_TX_STATS_RESV3 0x00000828
+#define MAC_TX_STATS_EXCESSIVE_COL 0x0000082c
+#define MAC_TX_STATS_LATE_COL 0x00000830
+#define MAC_TX_STATS_RESV4_1 0x00000834
+#define MAC_TX_STATS_RESV4_2 0x00000838
+#define MAC_TX_STATS_RESV4_3 0x0000083c
+#define MAC_TX_STATS_RESV4_4 0x00000840
+#define MAC_TX_STATS_RESV4_5 0x00000844
+#define MAC_TX_STATS_RESV4_6 0x00000848
+#define MAC_TX_STATS_RESV4_7 0x0000084c
+#define MAC_TX_STATS_RESV4_8 0x00000850
+#define MAC_TX_STATS_RESV4_9 0x00000854
+#define MAC_TX_STATS_RESV4_10 0x00000858
+#define MAC_TX_STATS_RESV4_11 0x0000085c
+#define MAC_TX_STATS_RESV4_12 0x00000860
+#define MAC_TX_STATS_RESV4_13 0x00000864
+#define MAC_TX_STATS_RESV4_14 0x00000868
+#define MAC_TX_STATS_UCAST 0x0000086c
+#define MAC_TX_STATS_MCAST 0x00000870
+#define MAC_TX_STATS_BCAST 0x00000874
+#define MAC_TX_STATS_RESV5_1 0x00000878
+#define MAC_TX_STATS_RESV5_2 0x0000087c
+#define MAC_RX_STATS_OCTETS 0x00000880
+#define MAC_RX_STATS_RESV1 0x00000884
+#define MAC_RX_STATS_FRAGMENTS 0x00000888
+#define MAC_RX_STATS_UCAST 0x0000088c
+#define MAC_RX_STATS_MCAST 0x00000890
+#define MAC_RX_STATS_BCAST 0x00000894
+#define MAC_RX_STATS_FCS_ERRORS 0x00000898
+#define MAC_RX_STATS_ALIGN_ERRORS 0x0000089c
+#define MAC_RX_STATS_XON_PAUSE_RECVD 0x000008a0
+#define MAC_RX_STATS_XOFF_PAUSE_RECVD 0x000008a4
+#define MAC_RX_STATS_MAC_CTRL_RECVD 0x000008a8
+#define MAC_RX_STATS_XOFF_ENTERED 0x000008ac
+#define MAC_RX_STATS_FRAME_TOO_LONG 0x000008b0
+#define MAC_RX_STATS_JABBERS 0x000008b4
+#define MAC_RX_STATS_UNDERSIZE 0x000008b8
+/* 0x8bc --> 0xc00 unused */
+
+/* Send data initiator control registers */
+#define SNDDATAI_MODE 0x00000c00
+#define SNDDATAI_MODE_RESET 0x00000001
+#define SNDDATAI_MODE_ENABLE 0x00000002
+#define SNDDATAI_MODE_STAT_OFLOW_ENAB 0x00000004
+#define SNDDATAI_STATUS 0x00000c04
+#define SNDDATAI_STATUS_STAT_OFLOW 0x00000004
+#define SNDDATAI_STATSCTRL 0x00000c08
+#define SNDDATAI_SCTRL_ENABLE 0x00000001
+#define SNDDATAI_SCTRL_FASTUPD 0x00000002
+#define SNDDATAI_SCTRL_CLEAR 0x00000004
+#define SNDDATAI_SCTRL_FLUSH 0x00000008
+#define SNDDATAI_SCTRL_FORCE_ZERO 0x00000010
+#define SNDDATAI_STATSENAB 0x00000c0c
+#define SNDDATAI_STATSINCMASK 0x00000c10
+/* 0xc14 --> 0xc80 unused */
+#define SNDDATAI_COS_CNT_0 0x00000c80
+#define SNDDATAI_COS_CNT_1 0x00000c84
+#define SNDDATAI_COS_CNT_2 0x00000c88
+#define SNDDATAI_COS_CNT_3 0x00000c8c
+#define SNDDATAI_COS_CNT_4 0x00000c90
+#define SNDDATAI_COS_CNT_5 0x00000c94
+#define SNDDATAI_COS_CNT_6 0x00000c98
+#define SNDDATAI_COS_CNT_7 0x00000c9c
+#define SNDDATAI_COS_CNT_8 0x00000ca0
+#define SNDDATAI_COS_CNT_9 0x00000ca4
+#define SNDDATAI_COS_CNT_10 0x00000ca8
+#define SNDDATAI_COS_CNT_11 0x00000cac
+#define SNDDATAI_COS_CNT_12 0x00000cb0
+#define SNDDATAI_COS_CNT_13 0x00000cb4
+#define SNDDATAI_COS_CNT_14 0x00000cb8
+#define SNDDATAI_COS_CNT_15 0x00000cbc
+#define SNDDATAI_DMA_RDQ_FULL_CNT 0x00000cc0
+#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT 0x00000cc4
+#define SNDDATAI_SDCQ_FULL_CNT 0x00000cc8
+#define SNDDATAI_NICRNG_SSND_PIDX_CNT 0x00000ccc
+#define SNDDATAI_STATS_UPDATED_CNT 0x00000cd0
+#define SNDDATAI_INTERRUPTS_CNT 0x00000cd4
+#define SNDDATAI_AVOID_INTERRUPTS_CNT 0x00000cd8
+#define SNDDATAI_SND_THRESH_HIT_CNT 0x00000cdc
+/* 0xce0 --> 0x1000 unused */
+
+/* Send data completion control registers */
+#define SNDDATAC_MODE 0x00001000
+#define SNDDATAC_MODE_RESET 0x00000001
+#define SNDDATAC_MODE_ENABLE 0x00000002
+/* 0x1004 --> 0x1400 unused */
+
+/* Send BD ring selector */
+#define SNDBDS_MODE 0x00001400
+#define SNDBDS_MODE_RESET 0x00000001
+#define SNDBDS_MODE_ENABLE 0x00000002
+#define SNDBDS_MODE_ATTN_ENABLE 0x00000004
+#define SNDBDS_STATUS 0x00001404
+#define SNDBDS_STATUS_ERROR_ATTN 0x00000004
+#define SNDBDS_HWDIAG 0x00001408
+/* 0x140c --> 0x1440 */
+#define SNDBDS_SEL_CON_IDX_0 0x00001440
+#define SNDBDS_SEL_CON_IDX_1 0x00001444
+#define SNDBDS_SEL_CON_IDX_2 0x00001448
+#define SNDBDS_SEL_CON_IDX_3 0x0000144c
+#define SNDBDS_SEL_CON_IDX_4 0x00001450
+#define SNDBDS_SEL_CON_IDX_5 0x00001454
+#define SNDBDS_SEL_CON_IDX_6 0x00001458
+#define SNDBDS_SEL_CON_IDX_7 0x0000145c
+#define SNDBDS_SEL_CON_IDX_8 0x00001460
+#define SNDBDS_SEL_CON_IDX_9 0x00001464
+#define SNDBDS_SEL_CON_IDX_10 0x00001468
+#define SNDBDS_SEL_CON_IDX_11 0x0000146c
+#define SNDBDS_SEL_CON_IDX_12 0x00001470
+#define SNDBDS_SEL_CON_IDX_13 0x00001474
+#define SNDBDS_SEL_CON_IDX_14 0x00001478
+#define SNDBDS_SEL_CON_IDX_15 0x0000147c
+/* 0x1480 --> 0x1800 unused */
+
+/* Send BD initiator control registers */
+#define SNDBDI_MODE 0x00001800
+#define SNDBDI_MODE_RESET 0x00000001
+#define SNDBDI_MODE_ENABLE 0x00000002
+#define SNDBDI_MODE_ATTN_ENABLE 0x00000004
+#define SNDBDI_STATUS 0x00001804
+#define SNDBDI_STATUS_ERROR_ATTN 0x00000004
+#define SNDBDI_IN_PROD_IDX_0 0x00001808
+#define SNDBDI_IN_PROD_IDX_1 0x0000180c
+#define SNDBDI_IN_PROD_IDX_2 0x00001810
+#define SNDBDI_IN_PROD_IDX_3 0x00001814
+#define SNDBDI_IN_PROD_IDX_4 0x00001818
+#define SNDBDI_IN_PROD_IDX_5 0x0000181c
+#define SNDBDI_IN_PROD_IDX_6 0x00001820
+#define SNDBDI_IN_PROD_IDX_7 0x00001824
+#define SNDBDI_IN_PROD_IDX_8 0x00001828
+#define SNDBDI_IN_PROD_IDX_9 0x0000182c
+#define SNDBDI_IN_PROD_IDX_10 0x00001830
+#define SNDBDI_IN_PROD_IDX_11 0x00001834
+#define SNDBDI_IN_PROD_IDX_12 0x00001838
+#define SNDBDI_IN_PROD_IDX_13 0x0000183c
+#define SNDBDI_IN_PROD_IDX_14 0x00001840
+#define SNDBDI_IN_PROD_IDX_15 0x00001844
+/* 0x1848 --> 0x1c00 unused */
+
+/* Send BD completion control registers */
+#define SNDBDC_MODE 0x00001c00
+#define SNDBDC_MODE_RESET 0x00000001
+#define SNDBDC_MODE_ENABLE 0x00000002
+#define SNDBDC_MODE_ATTN_ENABLE 0x00000004
+/* 0x1c04 --> 0x2000 unused */
+
+/* Receive list placement control registers */
+#define RCVLPC_MODE 0x00002000
+#define RCVLPC_MODE_RESET 0x00000001
+#define RCVLPC_MODE_ENABLE 0x00000002
+#define RCVLPC_MODE_CLASS0_ATTN_ENAB 0x00000004
+#define RCVLPC_MODE_MAPOOR_AATTN_ENAB 0x00000008
+#define RCVLPC_MODE_STAT_OFLOW_ENAB 0x00000010
+#define RCVLPC_STATUS 0x00002004
+#define RCVLPC_STATUS_CLASS0 0x00000004
+#define RCVLPC_STATUS_MAPOOR 0x00000008
+#define RCVLPC_STATUS_STAT_OFLOW 0x00000010
+#define RCVLPC_LOCK 0x00002008
+#define RCVLPC_LOCK_REQ_MASK 0x0000ffff
+#define RCVLPC_LOCK_REQ_SHIFT 0
+#define RCVLPC_LOCK_GRANT_MASK 0xffff0000
+#define RCVLPC_LOCK_GRANT_SHIFT 16
+#define RCVLPC_NON_EMPTY_BITS 0x0000200c
+#define RCVLPC_NON_EMPTY_BITS_MASK 0x0000ffff
+#define RCVLPC_CONFIG 0x00002010
+#define RCVLPC_STATSCTRL 0x00002014
+#define RCVLPC_STATSCTRL_ENABLE 0x00000001
+#define RCVLPC_STATSCTRL_FASTUPD 0x00000002
+#define RCVLPC_STATS_ENABLE 0x00002018
+#define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000
+#define RCVLPC_STATS_INCMASK 0x0000201c
+/* 0x2020 --> 0x2100 unused */
+#define RCVLPC_SELLST_BASE 0x00002100 /* 16 16-byte entries */
+#define SELLST_TAIL 0x00000004
+#define SELLST_CONT 0x00000008
+#define SELLST_UNUSED 0x0000000c
+#define RCVLPC_COS_CNTL_BASE 0x00002200 /* 16 4-byte entries */
+#define RCVLPC_DROP_FILTER_CNT 0x00002240
+#define RCVLPC_DMA_WQ_FULL_CNT 0x00002244
+#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT 0x00002248
+#define RCVLPC_NO_RCV_BD_CNT 0x0000224c
+#define RCVLPC_IN_DISCARDS_CNT 0x00002250
+#define RCVLPC_IN_ERRORS_CNT 0x00002254
+#define RCVLPC_RCV_THRESH_HIT_CNT 0x00002258
+/* 0x225c --> 0x2400 unused */
+
+/* Receive Data and Receive BD Initiator Control */
+#define RCVDBDI_MODE 0x00002400
+#define RCVDBDI_MODE_RESET 0x00000001
+#define RCVDBDI_MODE_ENABLE 0x00000002
+#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004
+#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008
+#define RCVDBDI_MODE_INV_RING_SZ 0x00000010
+#define RCVDBDI_STATUS 0x00002404
+#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004
+#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008
+#define RCVDBDI_STATUS_INV_RING_SZ 0x00000010
+#define RCVDBDI_SPLIT_FRAME_MINSZ 0x00002408
+/* 0x240c --> 0x2440 unused */
+#define RCVDBDI_JUMBO_BD 0x00002440 /* TG3_BDINFO_... */
+#define RCVDBDI_STD_BD 0x00002450 /* TG3_BDINFO_... */
+#define RCVDBDI_MINI_BD 0x00002460 /* TG3_BDINFO_... */
+#define RCVDBDI_JUMBO_CON_IDX 0x00002470
+#define RCVDBDI_STD_CON_IDX 0x00002474
+#define RCVDBDI_MINI_CON_IDX 0x00002478
+/* 0x247c --> 0x2480 unused */
+#define RCVDBDI_BD_PROD_IDX_0 0x00002480
+#define RCVDBDI_BD_PROD_IDX_1 0x00002484
+#define RCVDBDI_BD_PROD_IDX_2 0x00002488
+#define RCVDBDI_BD_PROD_IDX_3 0x0000248c
+#define RCVDBDI_BD_PROD_IDX_4 0x00002490
+#define RCVDBDI_BD_PROD_IDX_5 0x00002494
+#define RCVDBDI_BD_PROD_IDX_6 0x00002498
+#define RCVDBDI_BD_PROD_IDX_7 0x0000249c
+#define RCVDBDI_BD_PROD_IDX_8 0x000024a0
+#define RCVDBDI_BD_PROD_IDX_9 0x000024a4
+#define RCVDBDI_BD_PROD_IDX_10 0x000024a8
+#define RCVDBDI_BD_PROD_IDX_11 0x000024ac
+#define RCVDBDI_BD_PROD_IDX_12 0x000024b0
+#define RCVDBDI_BD_PROD_IDX_13 0x000024b4
+#define RCVDBDI_BD_PROD_IDX_14 0x000024b8
+#define RCVDBDI_BD_PROD_IDX_15 0x000024bc
+#define RCVDBDI_HWDIAG 0x000024c0
+/* 0x24c4 --> 0x2800 unused */
+
+/* Receive Data Completion Control */
+#define RCVDCC_MODE 0x00002800
+#define RCVDCC_MODE_RESET 0x00000001
+#define RCVDCC_MODE_ENABLE 0x00000002
+#define RCVDCC_MODE_ATTN_ENABLE 0x00000004
+/* 0x2804 --> 0x2c00 unused */
+
+/* Receive BD Initiator Control Registers */
+#define RCVBDI_MODE 0x00002c00
+#define RCVBDI_MODE_RESET 0x00000001
+#define RCVBDI_MODE_ENABLE 0x00000002
+#define RCVBDI_MODE_RCB_ATTN_ENAB 0x00000004
+#define RCVBDI_STATUS 0x00002c04
+#define RCVBDI_STATUS_RCB_ATTN 0x00000004
+#define RCVBDI_JUMBO_PROD_IDX 0x00002c08
+#define RCVBDI_STD_PROD_IDX 0x00002c0c
+#define RCVBDI_MINI_PROD_IDX 0x00002c10
+#define RCVBDI_MINI_THRESH 0x00002c14
+#define RCVBDI_STD_THRESH 0x00002c18
+#define RCVBDI_JUMBO_THRESH 0x00002c1c
+/* 0x2c20 --> 0x3000 unused */
+
+/* Receive BD Completion Control Registers */
+#define RCVCC_MODE 0x00003000
+#define RCVCC_MODE_RESET 0x00000001
+#define RCVCC_MODE_ENABLE 0x00000002
+#define RCVCC_MODE_ATTN_ENABLE 0x00000004
+#define RCVCC_STATUS 0x00003004
+#define RCVCC_STATUS_ERROR_ATTN 0x00000004
+#define RCVCC_JUMP_PROD_IDX 0x00003008
+#define RCVCC_STD_PROD_IDX 0x0000300c
+#define RCVCC_MINI_PROD_IDX 0x00003010
+/* 0x3014 --> 0x3400 unused */
+
+/* Receive list selector control registers */
+#define RCVLSC_MODE 0x00003400
+#define RCVLSC_MODE_RESET 0x00000001
+#define RCVLSC_MODE_ENABLE 0x00000002
+#define RCVLSC_MODE_ATTN_ENABLE 0x00000004
+#define RCVLSC_STATUS 0x00003404
+#define RCVLSC_STATUS_ERROR_ATTN 0x00000004
+/* 0x3408 --> 0x3800 unused */
+
+/* Mbuf cluster free registers */
+#define MBFREE_MODE 0x00003800
+#define MBFREE_MODE_RESET 0x00000001
+#define MBFREE_MODE_ENABLE 0x00000002
+#define MBFREE_STATUS 0x00003804
+/* 0x3808 --> 0x3c00 unused */
+
+/* Host coalescing control registers */
+#define HOSTCC_MODE 0x00003c00
+#define HOSTCC_MODE_RESET 0x00000001
+#define HOSTCC_MODE_ENABLE 0x00000002
+#define HOSTCC_MODE_ATTN 0x00000004
+#define HOSTCC_MODE_NOW 0x00000008
+#define HOSTCC_MODE_FULL_STATUS 0x00000000
+#define HOSTCC_MODE_64BYTE 0x00000080
+#define HOSTCC_MODE_32BYTE 0x00000100
+#define HOSTCC_MODE_CLRTICK_RXBD 0x00000200
+#define HOSTCC_MODE_CLRTICK_TXBD 0x00000400
+#define HOSTCC_MODE_NOINT_ON_NOW 0x00000800
+#define HOSTCC_MODE_NOINT_ON_FORCE 0x00001000
+#define HOSTCC_STATUS 0x00003c04
+#define HOSTCC_STATUS_ERROR_ATTN 0x00000004
+#define HOSTCC_RXCOL_TICKS 0x00003c08
+#define LOW_RXCOL_TICKS 0x00000032
+#define DEFAULT_RXCOL_TICKS 0x00000048
+#define HIGH_RXCOL_TICKS 0x00000096
+#define HOSTCC_TXCOL_TICKS 0x00003c0c
+#define LOW_TXCOL_TICKS 0x00000096
+#define DEFAULT_TXCOL_TICKS 0x0000012c
+#define HIGH_TXCOL_TICKS 0x00000145
+#define HOSTCC_RXMAX_FRAMES 0x00003c10
+#define LOW_RXMAX_FRAMES 0x00000005
+#define DEFAULT_RXMAX_FRAMES 0x00000008
+#define HIGH_RXMAX_FRAMES 0x00000012
+#define HOSTCC_TXMAX_FRAMES 0x00003c14
+#define LOW_TXMAX_FRAMES 0x00000035
+#define DEFAULT_TXMAX_FRAMES 0x0000004b
+#define HIGH_TXMAX_FRAMES 0x00000052
+#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
+#define DEFAULT_RXCOAL_TICK_INT 0x00000019
+#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
+#define DEFAULT_TXCOAL_TICK_INT 0x00000019
+#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20
+#define DEFAULT_RXCOAL_MAXF_INT 0x00000005
+#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24
+#define DEFAULT_TXCOAL_MAXF_INT 0x00000005
+#define HOSTCC_STAT_COAL_TICKS 0x00003c28
+#define DEFAULT_STAT_COAL_TICKS 0x000f4240
+/* 0x3c2c --> 0x3c30 unused */
+#define HOSTCC_STATS_BLK_HOST_ADDR 0x00003c30 /* 64-bit */
+#define HOSTCC_STATUS_BLK_HOST_ADDR 0x00003c38 /* 64-bit */
+#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40
+#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44
+#define HOSTCC_FLOW_ATTN 0x00003c48
+/* 0x3c4c --> 0x3c50 unused */
+#define HOSTCC_JUMBO_CON_IDX 0x00003c50
+#define HOSTCC_STD_CON_IDX 0x00003c54
+#define HOSTCC_MINI_CON_IDX 0x00003c58
+/* 0x3c5c --> 0x3c80 unused */
+#define HOSTCC_RET_PROD_IDX_0 0x00003c80
+#define HOSTCC_RET_PROD_IDX_1 0x00003c84
+#define HOSTCC_RET_PROD_IDX_2 0x00003c88
+#define HOSTCC_RET_PROD_IDX_3 0x00003c8c
+#define HOSTCC_RET_PROD_IDX_4 0x00003c90
+#define HOSTCC_RET_PROD_IDX_5 0x00003c94
+#define HOSTCC_RET_PROD_IDX_6 0x00003c98
+#define HOSTCC_RET_PROD_IDX_7 0x00003c9c
+#define HOSTCC_RET_PROD_IDX_8 0x00003ca0
+#define HOSTCC_RET_PROD_IDX_9 0x00003ca4
+#define HOSTCC_RET_PROD_IDX_10 0x00003ca8
+#define HOSTCC_RET_PROD_IDX_11 0x00003cac
+#define HOSTCC_RET_PROD_IDX_12 0x00003cb0
+#define HOSTCC_RET_PROD_IDX_13 0x00003cb4
+#define HOSTCC_RET_PROD_IDX_14 0x00003cb8
+#define HOSTCC_RET_PROD_IDX_15 0x00003cbc
+#define HOSTCC_SND_CON_IDX_0 0x00003cc0
+#define HOSTCC_SND_CON_IDX_1 0x00003cc4
+#define HOSTCC_SND_CON_IDX_2 0x00003cc8
+#define HOSTCC_SND_CON_IDX_3 0x00003ccc
+#define HOSTCC_SND_CON_IDX_4 0x00003cd0
+#define HOSTCC_SND_CON_IDX_5 0x00003cd4
+#define HOSTCC_SND_CON_IDX_6 0x00003cd8
+#define HOSTCC_SND_CON_IDX_7 0x00003cdc
+#define HOSTCC_SND_CON_IDX_8 0x00003ce0
+#define HOSTCC_SND_CON_IDX_9 0x00003ce4
+#define HOSTCC_SND_CON_IDX_10 0x00003ce8
+#define HOSTCC_SND_CON_IDX_11 0x00003cec
+#define HOSTCC_SND_CON_IDX_12 0x00003cf0
+#define HOSTCC_SND_CON_IDX_13 0x00003cf4
+#define HOSTCC_SND_CON_IDX_14 0x00003cf8
+#define HOSTCC_SND_CON_IDX_15 0x00003cfc
+/* 0x3d00 --> 0x4000 unused */
+
+/* Memory arbiter control registers */
+#define MEMARB_MODE 0x00004000
+#define MEMARB_MODE_RESET 0x00000001
+#define MEMARB_MODE_ENABLE 0x00000002
+#define MEMARB_STATUS 0x00004004
+#define MEMARB_TRAP_ADDR_LOW 0x00004008
+#define MEMARB_TRAP_ADDR_HIGH 0x0000400c
+/* 0x4010 --> 0x4400 unused */
+
+/* Buffer manager control registers */
+#define BUFMGR_MODE 0x00004400
+#define BUFMGR_MODE_RESET 0x00000001
+#define BUFMGR_MODE_ENABLE 0x00000002
+#define BUFMGR_MODE_ATTN_ENABLE 0x00000004
+#define BUFMGR_MODE_BM_TEST 0x00000008
+#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010
+#define BUFMGR_STATUS 0x00004404
+#define BUFMGR_STATUS_ERROR 0x00000004
+#define BUFMGR_STATUS_MBLOW 0x00000010
+#define BUFMGR_MB_POOL_ADDR 0x00004408
+#define BUFMGR_MB_POOL_SIZE 0x0000440c
+#define BUFMGR_MB_RDMA_LOW_WATER 0x00004410
+#define DEFAULT_MB_RDMA_LOW_WATER 0x00000050
+#define DEFAULT_MB_RDMA_LOW_WATER_5705 0x00000000
+#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130
+#define BUFMGR_MB_MACRX_LOW_WATER 0x00004414
+#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
+#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
+#define BUFMGR_MB_HIGH_WATER 0x00004418
+#define DEFAULT_MB_HIGH_WATER 0x00000060
+#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
+#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
+#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
+#define BUFMGR_MB_ALLOC_BIT 0x10000000
+#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
+#define BUFMGR_TX_MB_ALLOC_REQ 0x00004424
+#define BUFMGR_TX_MB_ALLOC_RESP 0x00004428
+#define BUFMGR_DMA_DESC_POOL_ADDR 0x0000442c
+#define BUFMGR_DMA_DESC_POOL_SIZE 0x00004430
+#define BUFMGR_DMA_LOW_WATER 0x00004434
+#define DEFAULT_DMA_LOW_WATER 0x00000005
+#define BUFMGR_DMA_HIGH_WATER 0x00004438
+#define DEFAULT_DMA_HIGH_WATER 0x0000000a
+#define BUFMGR_RX_DMA_ALLOC_REQ 0x0000443c
+#define BUFMGR_RX_DMA_ALLOC_RESP 0x00004440
+#define BUFMGR_TX_DMA_ALLOC_REQ 0x00004444
+#define BUFMGR_TX_DMA_ALLOC_RESP 0x00004448
+#define BUFMGR_HWDIAG_0 0x0000444c
+#define BUFMGR_HWDIAG_1 0x00004450
+#define BUFMGR_HWDIAG_2 0x00004454
+/* 0x4458 --> 0x4800 unused */
+
+/* Read DMA control registers */
+#define RDMAC_MODE 0x00004800
+#define RDMAC_MODE_RESET 0x00000001
+#define RDMAC_MODE_ENABLE 0x00000002
+#define RDMAC_MODE_TGTABORT_ENAB 0x00000004
+#define RDMAC_MODE_MSTABORT_ENAB 0x00000008
+#define RDMAC_MODE_PARITYERR_ENAB 0x00000010
+#define RDMAC_MODE_ADDROFLOW_ENAB 0x00000020
+#define RDMAC_MODE_FIFOOFLOW_ENAB 0x00000040
+#define RDMAC_MODE_FIFOURUN_ENAB 0x00000080
+#define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100
+#define RDMAC_MODE_LNGREAD_ENAB 0x00000200
+#define RDMAC_MODE_SPLIT_ENABLE 0x00000800
+#define RDMAC_MODE_SPLIT_RESET 0x00001000
+#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
+#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_STATUS 0x00004804
+#define RDMAC_STATUS_TGTABORT 0x00000004
+#define RDMAC_STATUS_MSTABORT 0x00000008
+#define RDMAC_STATUS_PARITYERR 0x00000010
+#define RDMAC_STATUS_ADDROFLOW 0x00000020
+#define RDMAC_STATUS_FIFOOFLOW 0x00000040
+#define RDMAC_STATUS_FIFOURUN 0x00000080
+#define RDMAC_STATUS_FIFOOREAD 0x00000100
+#define RDMAC_STATUS_LNGREAD 0x00000200
+/* 0x4808 --> 0x4c00 unused */
+
+/* Write DMA control registers */
+#define WDMAC_MODE 0x00004c00
+#define WDMAC_MODE_RESET 0x00000001
+#define WDMAC_MODE_ENABLE 0x00000002
+#define WDMAC_MODE_TGTABORT_ENAB 0x00000004
+#define WDMAC_MODE_MSTABORT_ENAB 0x00000008
+#define WDMAC_MODE_PARITYERR_ENAB 0x00000010
+#define WDMAC_MODE_ADDROFLOW_ENAB 0x00000020
+#define WDMAC_MODE_FIFOOFLOW_ENAB 0x00000040
+#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080
+#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
+#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
+#define WDMAC_MODE_RX_ACCEL 0x00000400
+#define WDMAC_STATUS 0x00004c04
+#define WDMAC_STATUS_TGTABORT 0x00000004
+#define WDMAC_STATUS_MSTABORT 0x00000008
+#define WDMAC_STATUS_PARITYERR 0x00000010
+#define WDMAC_STATUS_ADDROFLOW 0x00000020
+#define WDMAC_STATUS_FIFOOFLOW 0x00000040
+#define WDMAC_STATUS_FIFOURUN 0x00000080
+#define WDMAC_STATUS_FIFOOREAD 0x00000100
+#define WDMAC_STATUS_LNGREAD 0x00000200
+/* 0x4c08 --> 0x5000 unused */
+
+/* Per-cpu register offsets (arm9) */
+#define CPU_MODE 0x00000000
+#define CPU_MODE_RESET 0x00000001
+#define CPU_MODE_HALT 0x00000400
+#define CPU_STATE 0x00000004
+#define CPU_EVTMASK 0x00000008
+/* 0xc --> 0x1c reserved */
+#define CPU_PC 0x0000001c
+#define CPU_INSN 0x00000020
+#define CPU_SPAD_UFLOW 0x00000024
+#define CPU_WDOG_CLEAR 0x00000028
+#define CPU_WDOG_VECTOR 0x0000002c
+#define CPU_WDOG_PC 0x00000030
+#define CPU_HW_BP 0x00000034
+/* 0x38 --> 0x44 unused */
+#define CPU_WDOG_SAVED_STATE 0x00000044
+#define CPU_LAST_BRANCH_ADDR 0x00000048
+#define CPU_SPAD_UFLOW_SET 0x0000004c
+/* 0x50 --> 0x200 unused */
+#define CPU_R0 0x00000200
+#define CPU_R1 0x00000204
+#define CPU_R2 0x00000208
+#define CPU_R3 0x0000020c
+#define CPU_R4 0x00000210
+#define CPU_R5 0x00000214
+#define CPU_R6 0x00000218
+#define CPU_R7 0x0000021c
+#define CPU_R8 0x00000220
+#define CPU_R9 0x00000224
+#define CPU_R10 0x00000228
+#define CPU_R11 0x0000022c
+#define CPU_R12 0x00000230
+#define CPU_R13 0x00000234
+#define CPU_R14 0x00000238
+#define CPU_R15 0x0000023c
+#define CPU_R16 0x00000240
+#define CPU_R17 0x00000244
+#define CPU_R18 0x00000248
+#define CPU_R19 0x0000024c
+#define CPU_R20 0x00000250
+#define CPU_R21 0x00000254
+#define CPU_R22 0x00000258
+#define CPU_R23 0x0000025c
+#define CPU_R24 0x00000260
+#define CPU_R25 0x00000264
+#define CPU_R26 0x00000268
+#define CPU_R27 0x0000026c
+#define CPU_R28 0x00000270
+#define CPU_R29 0x00000274
+#define CPU_R30 0x00000278
+#define CPU_R31 0x0000027c
+/* 0x280 --> 0x400 unused */
+
+#define RX_CPU_BASE 0x00005000
+#define TX_CPU_BASE 0x00005400
+
+/* Mailboxes */
+#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */
+#define GRCMBOX_INTERRUPT_1 0x00005808 /* 64-bit */
+#define GRCMBOX_INTERRUPT_2 0x00005810 /* 64-bit */
+#define GRCMBOX_INTERRUPT_3 0x00005818 /* 64-bit */
+#define GRCMBOX_GENERAL_0 0x00005820 /* 64-bit */
+#define GRCMBOX_GENERAL_1 0x00005828 /* 64-bit */
+#define GRCMBOX_GENERAL_2 0x00005830 /* 64-bit */
+#define GRCMBOX_GENERAL_3 0x00005838 /* 64-bit */
+#define GRCMBOX_GENERAL_4 0x00005840 /* 64-bit */
+#define GRCMBOX_GENERAL_5 0x00005848 /* 64-bit */
+#define GRCMBOX_GENERAL_6 0x00005850 /* 64-bit */
+#define GRCMBOX_GENERAL_7 0x00005858 /* 64-bit */
+#define GRCMBOX_RELOAD_STAT 0x00005860 /* 64-bit */
+#define GRCMBOX_RCVSTD_PROD_IDX 0x00005868 /* 64-bit */
+#define GRCMBOX_RCVJUMBO_PROD_IDX 0x00005870 /* 64-bit */
+#define GRCMBOX_RCVMINI_PROD_IDX 0x00005878 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_0 0x00005880 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_1 0x00005888 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_2 0x00005890 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_3 0x00005898 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_4 0x000058a0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_5 0x000058a8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_6 0x000058b0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_7 0x000058b8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_8 0x000058c0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_9 0x000058c8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_10 0x000058d0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_11 0x000058d8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_12 0x000058e0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_13 0x000058e8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_14 0x000058f0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_15 0x000058f8 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_0 0x00005900 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_1 0x00005908 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_2 0x00005910 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_3 0x00005918 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_4 0x00005920 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_5 0x00005928 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_6 0x00005930 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_7 0x00005938 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_8 0x00005940 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_9 0x00005948 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_10 0x00005950 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_11 0x00005958 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_12 0x00005960 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_13 0x00005968 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_14 0x00005970 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_15 0x00005978 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_0 0x00005980 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_1 0x00005988 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_2 0x00005990 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_3 0x00005998 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_4 0x000059a0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_5 0x000059a8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_6 0x000059b0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_7 0x000059b8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_8 0x000059c0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_9 0x000059c8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_10 0x000059d0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_11 0x000059d8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_12 0x000059e0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_13 0x000059e8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_14 0x000059f0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_15 0x000059f8 /* 64-bit */
+#define GRCMBOX_HIGH_PRIO_EV_VECTOR 0x00005a00
+#define GRCMBOX_HIGH_PRIO_EV_MASK 0x00005a04
+#define GRCMBOX_LOW_PRIO_EV_VEC 0x00005a08
+#define GRCMBOX_LOW_PRIO_EV_MASK 0x00005a0c
+/* 0x5a10 --> 0x5c00 */
+
+/* Flow Through queues */
+#define FTQ_RESET 0x00005c00
+/* 0x5c04 --> 0x5c10 unused */
+#define FTQ_DMA_NORM_READ_CTL 0x00005c10
+#define FTQ_DMA_NORM_READ_FULL_CNT 0x00005c14
+#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ 0x00005c18
+#define FTQ_DMA_NORM_READ_WRITE_PEEK 0x00005c1c
+#define FTQ_DMA_HIGH_READ_CTL 0x00005c20
+#define FTQ_DMA_HIGH_READ_FULL_CNT 0x00005c24
+#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ 0x00005c28
+#define FTQ_DMA_HIGH_READ_WRITE_PEEK 0x00005c2c
+#define FTQ_DMA_COMP_DISC_CTL 0x00005c30
+#define FTQ_DMA_COMP_DISC_FULL_CNT 0x00005c34
+#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ 0x00005c38
+#define FTQ_DMA_COMP_DISC_WRITE_PEEK 0x00005c3c
+#define FTQ_SEND_BD_COMP_CTL 0x00005c40
+#define FTQ_SEND_BD_COMP_FULL_CNT 0x00005c44
+#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ 0x00005c48
+#define FTQ_SEND_BD_COMP_WRITE_PEEK 0x00005c4c
+#define FTQ_SEND_DATA_INIT_CTL 0x00005c50
+#define FTQ_SEND_DATA_INIT_FULL_CNT 0x00005c54
+#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ 0x00005c58
+#define FTQ_SEND_DATA_INIT_WRITE_PEEK 0x00005c5c
+#define FTQ_DMA_NORM_WRITE_CTL 0x00005c60
+#define FTQ_DMA_NORM_WRITE_FULL_CNT 0x00005c64
+#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ 0x00005c68
+#define FTQ_DMA_NORM_WRITE_WRITE_PEEK 0x00005c6c
+#define FTQ_DMA_HIGH_WRITE_CTL 0x00005c70
+#define FTQ_DMA_HIGH_WRITE_FULL_CNT 0x00005c74
+#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ 0x00005c78
+#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK 0x00005c7c
+#define FTQ_SWTYPE1_CTL 0x00005c80
+#define FTQ_SWTYPE1_FULL_CNT 0x00005c84
+#define FTQ_SWTYPE1_FIFO_ENQDEQ 0x00005c88
+#define FTQ_SWTYPE1_WRITE_PEEK 0x00005c8c
+#define FTQ_SEND_DATA_COMP_CTL 0x00005c90
+#define FTQ_SEND_DATA_COMP_FULL_CNT 0x00005c94
+#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ 0x00005c98
+#define FTQ_SEND_DATA_COMP_WRITE_PEEK 0x00005c9c
+#define FTQ_HOST_COAL_CTL 0x00005ca0
+#define FTQ_HOST_COAL_FULL_CNT 0x00005ca4
+#define FTQ_HOST_COAL_FIFO_ENQDEQ 0x00005ca8
+#define FTQ_HOST_COAL_WRITE_PEEK 0x00005cac
+#define FTQ_MAC_TX_CTL 0x00005cb0
+#define FTQ_MAC_TX_FULL_CNT 0x00005cb4
+#define FTQ_MAC_TX_FIFO_ENQDEQ 0x00005cb8
+#define FTQ_MAC_TX_WRITE_PEEK 0x00005cbc
+#define FTQ_MB_FREE_CTL 0x00005cc0
+#define FTQ_MB_FREE_FULL_CNT 0x00005cc4
+#define FTQ_MB_FREE_FIFO_ENQDEQ 0x00005cc8
+#define FTQ_MB_FREE_WRITE_PEEK 0x00005ccc
+#define FTQ_RCVBD_COMP_CTL 0x00005cd0
+#define FTQ_RCVBD_COMP_FULL_CNT 0x00005cd4
+#define FTQ_RCVBD_COMP_FIFO_ENQDEQ 0x00005cd8
+#define FTQ_RCVBD_COMP_WRITE_PEEK 0x00005cdc
+#define FTQ_RCVLST_PLMT_CTL 0x00005ce0
+#define FTQ_RCVLST_PLMT_FULL_CNT 0x00005ce4
+#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ 0x00005ce8
+#define FTQ_RCVLST_PLMT_WRITE_PEEK 0x00005cec
+#define FTQ_RCVDATA_INI_CTL 0x00005cf0
+#define FTQ_RCVDATA_INI_FULL_CNT 0x00005cf4
+#define FTQ_RCVDATA_INI_FIFO_ENQDEQ 0x00005cf8
+#define FTQ_RCVDATA_INI_WRITE_PEEK 0x00005cfc
+#define FTQ_RCVDATA_COMP_CTL 0x00005d00
+#define FTQ_RCVDATA_COMP_FULL_CNT 0x00005d04
+#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ 0x00005d08
+#define FTQ_RCVDATA_COMP_WRITE_PEEK 0x00005d0c
+#define FTQ_SWTYPE2_CTL 0x00005d10
+#define FTQ_SWTYPE2_FULL_CNT 0x00005d14
+#define FTQ_SWTYPE2_FIFO_ENQDEQ 0x00005d18
+#define FTQ_SWTYPE2_WRITE_PEEK 0x00005d1c
+/* 0x5d20 --> 0x6000 unused */
+
+/* Message signaled interrupt registers */
+#define MSGINT_MODE 0x00006000
+#define MSGINT_MODE_RESET 0x00000001
+#define MSGINT_MODE_ENABLE 0x00000002
+#define MSGINT_STATUS 0x00006004
+#define MSGINT_FIFO 0x00006008
+/* 0x600c --> 0x6400 unused */
+
+/* DMA completion registers */
+#define DMAC_MODE 0x00006400
+#define DMAC_MODE_RESET 0x00000001
+#define DMAC_MODE_ENABLE 0x00000002
+/* 0x6404 --> 0x6800 unused */
+
+/* GRC registers */
+#define GRC_MODE 0x00006800
+#define GRC_MODE_UPD_ON_COAL 0x00000001
+#define GRC_MODE_BSWAP_NONFRM_DATA 0x00000002
+#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004
+#define GRC_MODE_BSWAP_DATA 0x00000010
+#define GRC_MODE_WSWAP_DATA 0x00000020
+#define GRC_MODE_SPLITHDR 0x00000100
+#define GRC_MODE_NOFRM_CRACKING 0x00000200
+#define GRC_MODE_INCL_CRC 0x00000400
+#define GRC_MODE_ALLOW_BAD_FRMS 0x00000800
+#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000
+#define GRC_MODE_NOIRQ_ON_RCV 0x00004000
+#define GRC_MODE_FORCE_PCI32BIT 0x00008000
+#define GRC_MODE_HOST_STACKUP 0x00010000
+#define GRC_MODE_HOST_SENDBDS 0x00020000
+#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
+#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
+#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
+#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
+#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
+#define GRC_MODE_IRQ_ON_MAC_ATTN 0x04000000
+#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
+#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
+#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
+#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
+#define GRC_MISC_CFG 0x00006804
+#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
+#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
+#define GRC_MISC_CFG_PRESCALAR_SHIFT 1
+#define GRC_MISC_CFG_BOARD_ID_MASK 0x0001e000
+#define GRC_MISC_CFG_BOARD_ID_5700 0x0001e000
+#define GRC_MISC_CFG_BOARD_ID_5701 0x00000000
+#define GRC_MISC_CFG_BOARD_ID_5702FE 0x00004000
+#define GRC_MISC_CFG_BOARD_ID_5703 0x00000000
+#define GRC_MISC_CFG_BOARD_ID_5703S 0x00002000
+#define GRC_MISC_CFG_BOARD_ID_5704 0x00000000
+#define GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000
+#define GRC_MISC_CFG_BOARD_ID_5704_A2 0x00008000
+#define GRC_MISC_CFG_BOARD_ID_5788 0x00010000
+#define GRC_MISC_CFG_BOARD_ID_5788M 0x00018000
+#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000
+#define GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000
+#define GRC_LOCAL_CTRL 0x00006808
+#define GRC_LCLCTRL_INT_ACTIVE 0x00000001
+#define GRC_LCLCTRL_CLEARINT 0x00000002
+#define GRC_LCLCTRL_SETINT 0x00000004
+#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008
+#define GRC_LCLCTRL_GPIO_INPUT0 0x00000100
+#define GRC_LCLCTRL_GPIO_INPUT1 0x00000200
+#define GRC_LCLCTRL_GPIO_INPUT2 0x00000400
+#define GRC_LCLCTRL_GPIO_OE0 0x00000800
+#define GRC_LCLCTRL_GPIO_OE1 0x00001000
+#define GRC_LCLCTRL_GPIO_OE2 0x00002000
+#define GRC_LCLCTRL_GPIO_OUTPUT0 0x00004000
+#define GRC_LCLCTRL_GPIO_OUTPUT1 0x00008000
+#define GRC_LCLCTRL_GPIO_OUTPUT2 0x00010000
+#define GRC_LCLCTRL_EXTMEM_ENABLE 0x00020000
+#define GRC_LCLCTRL_MEMSZ_MASK 0x001c0000
+#define GRC_LCLCTRL_MEMSZ_256K 0x00000000
+#define GRC_LCLCTRL_MEMSZ_512K 0x00040000
+#define GRC_LCLCTRL_MEMSZ_1M 0x00080000
+#define GRC_LCLCTRL_MEMSZ_2M 0x000c0000
+#define GRC_LCLCTRL_MEMSZ_4M 0x00100000
+#define GRC_LCLCTRL_MEMSZ_8M 0x00140000
+#define GRC_LCLCTRL_MEMSZ_16M 0x00180000
+#define GRC_LCLCTRL_BANK_SELECT 0x00200000
+#define GRC_LCLCTRL_SSRAM_TYPE 0x00400000
+#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000
+#define GRC_TIMER 0x0000680c
+#define GRC_RX_CPU_EVENT 0x00006810
+#define GRC_RX_TIMER_REF 0x00006814
+#define GRC_RX_CPU_SEM 0x00006818
+#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c
+#define GRC_TX_CPU_EVENT 0x00006820
+#define GRC_TX_TIMER_REF 0x00006824
+#define GRC_TX_CPU_SEM 0x00006828
+#define GRC_REMOTE_TX_CPU_ATTN 0x0000682c
+#define GRC_MEM_POWER_UP 0x00006830 /* 64-bit */
+#define GRC_EEPROM_ADDR 0x00006838
+#define EEPROM_ADDR_WRITE 0x00000000
+#define EEPROM_ADDR_READ 0x80000000
+#define EEPROM_ADDR_COMPLETE 0x40000000
+#define EEPROM_ADDR_FSM_RESET 0x20000000
+#define EEPROM_ADDR_DEVID_MASK 0x1c000000
+#define EEPROM_ADDR_DEVID_SHIFT 26
+#define EEPROM_ADDR_START 0x02000000
+#define EEPROM_ADDR_CLKPERD_SHIFT 16
+#define EEPROM_ADDR_ADDR_MASK 0x0000ffff
+#define EEPROM_ADDR_ADDR_SHIFT 0
+#define EEPROM_DEFAULT_CLOCK_PERIOD 0x60
+#define EEPROM_CHIP_SIZE (64 * 1024)
+#define GRC_EEPROM_DATA 0x0000683c
+#define GRC_EEPROM_CTRL 0x00006840
+#define GRC_MDI_CTRL 0x00006844
+#define GRC_SEEPROM_DELAY 0x00006848
+/* 0x684c --> 0x6c00 unused */
+
+/* 0x6c00 --> 0x7000 unused */
+
+/* NVRAM Control registers */
+#define NVRAM_CMD 0x00007000
+#define NVRAM_CMD_RESET 0x00000001
+#define NVRAM_CMD_DONE 0x00000008
+#define NVRAM_CMD_GO 0x00000010
+#define NVRAM_CMD_WR 0x00000020
+#define NVRAM_CMD_RD 0x00000000
+#define NVRAM_CMD_ERASE 0x00000040
+#define NVRAM_CMD_FIRST 0x00000080
+#define NVRAM_CMD_LAST 0x00000100
+#define NVRAM_CMD_WREN 0x00010000
+#define NVRAM_CMD_WRDI 0x00020000
+#define NVRAM_STAT 0x00007004
+#define NVRAM_WRDATA 0x00007008
+#define NVRAM_ADDR 0x0000700c
+#define NVRAM_ADDR_MSK 0x00ffffff
+#define NVRAM_RDDATA 0x00007010
+#define NVRAM_CFG1 0x00007014
+#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001
+#define NVRAM_CFG1_BUFFERED_MODE 0x00000002
+#define NVRAM_CFG1_PASS_THRU 0x00000004
+#define NVRAM_CFG1_STATUS_BITS 0x00000070
+#define NVRAM_CFG1_BIT_BANG 0x00000008
+#define NVRAM_CFG1_FLASH_SIZE 0x02000000
+#define NVRAM_CFG1_COMPAT_BYPASS 0x80000000
+#define NVRAM_CFG1_VENDOR_MASK 0x03000003
+#define FLASH_VENDOR_ATMEL_EEPROM 0x02000000
+#define FLASH_VENDOR_ATMEL_FLASH_BUFFERED 0x02000003
+#define FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED 0x00000003
+#define FLASH_VENDOR_ST 0x03000001
+#define FLASH_VENDOR_SAIFUN 0x01000003
+#define FLASH_VENDOR_SST_SMALL 0x00000001
+#define FLASH_VENDOR_SST_LARGE 0x02000001
+#define NVRAM_CFG2 0x00007018
+#define NVRAM_CFG3 0x0000701c
+#define NVRAM_SWARB 0x00007020
+#define SWARB_REQ_SET0 0x00000001
+#define SWARB_REQ_SET1 0x00000002
+#define SWARB_REQ_SET2 0x00000004
+#define SWARB_REQ_SET3 0x00000008
+#define SWARB_REQ_CLR0 0x00000010
+#define SWARB_REQ_CLR1 0x00000020
+#define SWARB_REQ_CLR2 0x00000040
+#define SWARB_REQ_CLR3 0x00000080
+#define SWARB_GNT0 0x00000100
+#define SWARB_GNT1 0x00000200
+#define SWARB_GNT2 0x00000400
+#define SWARB_GNT3 0x00000800
+#define SWARB_REQ0 0x00001000
+#define SWARB_REQ1 0x00002000
+#define SWARB_REQ2 0x00004000
+#define SWARB_REQ3 0x00008000
+#define NVRAM_ACCESS 0x00007024
+#define ACCESS_ENABLE 0x00000001
+#define ACCESS_WR_ENABLE 0x00000002
+#define NVRAM_WRITE1 0x00007028
+/* 0x702c --> 0x7400 unused */
+
+/* 0x7400 --> 0x8000 unused */
+
+#define TG3_EEPROM_MAGIC 0x669955aa
+
+/* 32K Window into NIC internal memory */
+#define NIC_SRAM_WIN_BASE 0x00008000
+
+/* Offsets into first 32k of NIC internal memory. */
+#define NIC_SRAM_PAGE_ZERO 0x00000000
+#define NIC_SRAM_SEND_RCB 0x00000100 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_RCV_RET_RCB 0x00000200 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_STATS_BLK 0x00000300
+#define NIC_SRAM_STATUS_BLK 0x00000b00
+
+#define NIC_SRAM_FIRMWARE_MBOX 0x00000b50
+#define NIC_SRAM_FIRMWARE_MBOX_MAGIC1 0x4B657654
+#define NIC_SRAM_FIRMWARE_MBOX_MAGIC2 0x4861764b /* !dma on linkchg */
+
+#define NIC_SRAM_DATA_SIG 0x00000b54
+#define NIC_SRAM_DATA_SIG_MAGIC 0x4b657654 /* ascii for 'KevT' */
+
+#define NIC_SRAM_DATA_CFG 0x00000b58
+#define NIC_SRAM_DATA_CFG_LED_MODE_MASK 0x0000000c
+#define NIC_SRAM_DATA_CFG_LED_MODE_MAC 0x00000000
+#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_1 0x00000004
+#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_2 0x00000008
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_MASK 0x00000030
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN 0x00000000
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER 0x00000010
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER 0x00000020
+#define NIC_SRAM_DATA_CFG_WOL_ENABLE 0x00000040
+#define NIC_SRAM_DATA_CFG_ASF_ENABLE 0x00000080
+#define NIC_SRAM_DATA_CFG_EEPROM_WP 0x00000100
+#define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000
+#define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000
+#define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000
+
+#define NIC_SRAM_DATA_VER 0x00000b5c
+#define NIC_SRAM_DATA_VER_SHIFT 16
+
+#define NIC_SRAM_DATA_PHY_ID 0x00000b74
+#define NIC_SRAM_DATA_PHY_ID1_MASK 0xffff0000
+#define NIC_SRAM_DATA_PHY_ID2_MASK 0x0000ffff
+
+#define NIC_SRAM_FW_CMD_MBOX 0x00000b78
+#define FWCMD_NICDRV_ALIVE 0x00000001
+#define FWCMD_NICDRV_PAUSE_FW 0x00000002
+#define FWCMD_NICDRV_IPV4ADDR_CHG 0x00000003
+#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004
+#define FWCMD_NICDRV_FIX_DMAR 0x00000005
+#define FWCMD_NICDRV_FIX_DMAW 0x00000006
+#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c
+#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80
+#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00
+#define NIC_SRAM_FW_DRV_STATE_MBOX 0x00000c04
+#define DRV_STATE_START 0x00000001
+#define DRV_STATE_START_DONE 0x80000001
+#define DRV_STATE_UNLOAD 0x00000002
+#define DRV_STATE_UNLOAD_DONE 0x80000002
+#define DRV_STATE_WOL 0x00000003
+#define DRV_STATE_SUSPEND 0x00000004
+
+#define NIC_SRAM_FW_RESET_TYPE_MBOX 0x00000c08
+
+#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14
+#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18
+
+#define NIC_SRAM_DATA_CFG_2 0x00000d38
+
+#define SHASTA_EXT_LED_MODE_MASK 0x00018000
+#define SHASTA_EXT_LED_LEGACY 0x00000000
+#define SHASTA_EXT_LED_SHARED 0x00008000
+#define SHASTA_EXT_LED_MAC 0x00010000
+#define SHASTA_EXT_LED_COMBO 0x00018000
+
+#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
+
+#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
+#define NIC_SRAM_DMA_DESC_POOL_SIZE 0x00002000
+#define NIC_SRAM_TX_BUFFER_DESC 0x00004000 /* 512 entries */
+#define NIC_SRAM_RX_BUFFER_DESC 0x00006000 /* 256 entries */
+#define NIC_SRAM_RX_JUMBO_BUFFER_DESC 0x00007000 /* 256 entries */
+#define NIC_SRAM_MBUF_POOL_BASE 0x00008000
+#define NIC_SRAM_MBUF_POOL_SIZE96 0x00018000
+#define NIC_SRAM_MBUF_POOL_SIZE64 0x00010000
+#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
+#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+
+/* Currently this is fixed. */
+#define PHY_ADDR 0x01
+
+/* Tigon3 specific PHY MII registers. */
+#define TG3_BMCR_SPEED1000 0x0040
+
+#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */
+#define MII_TG3_CTRL_ADV_1000_HALF 0x0100
+#define MII_TG3_CTRL_ADV_1000_FULL 0x0200
+#define MII_TG3_CTRL_AS_MASTER 0x0800
+#define MII_TG3_CTRL_ENABLE_AS_MASTER 0x1000
+
+#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */
+#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001
+#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002
+#define MII_TG3_EXT_CTRL_TBI 0x8000
+
+#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
+#define MII_TG3_EXT_STAT_LPASS 0x0100
+
+#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */
+
+#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */
+
+#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
+
+#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */
+#define MII_TG3_AUX_STAT_LPASS 0x0004
+#define MII_TG3_AUX_STAT_SPDMASK 0x0700
+#define MII_TG3_AUX_STAT_10HALF 0x0100
+#define MII_TG3_AUX_STAT_10FULL 0x0200
+#define MII_TG3_AUX_STAT_100HALF 0x0300
+#define MII_TG3_AUX_STAT_100_4 0x0400
+#define MII_TG3_AUX_STAT_100FULL 0x0500
+#define MII_TG3_AUX_STAT_1000HALF 0x0600
+#define MII_TG3_AUX_STAT_1000FULL 0x0700
+
+#define MII_TG3_ISTAT 0x1a /* IRQ status register */
+#define MII_TG3_IMASK 0x1b /* IRQ mask register */
+
+/* ISTAT/IMASK event bits */
+#define MII_TG3_INT_LINKCHG 0x0002
+#define MII_TG3_INT_SPEEDCHG 0x0004
+#define MII_TG3_INT_DUPLEXCHG 0x0008
+#define MII_TG3_INT_ANEG_PAGE_RX 0x0400
+
+/* There are two ways to manage the TX descriptors on the tigon3.
+ * Either the descriptors are in host DMA'able memory, or they
+ * exist only in the cards on-chip SRAM. All 16 send bds are under
+ * the same mode, they may not be configured individually.
+ *
+ * This driver always uses host memory TX descriptors.
+ *
+ * To use host memory TX descriptors:
+ * 1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register.
+ * Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear.
+ * 2) Allocate DMA'able memory.
+ * 3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ * a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory
+ * obtained in step 2
+ * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC.
+ * c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number
+ * of TX descriptors. Leave flags field clear.
+ * 4) Access TX descriptors via host memory. The chip
+ * will refetch into local SRAM as needed when producer
+ * index mailboxes are updated.
+ *
+ * To use on-chip TX descriptors:
+ * 1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register.
+ * Make sure GRC_MODE_HOST_SENDBDS is clear.
+ * 2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ * a) Set TG3_BDINFO_HOST_ADDR to zero.
+ * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC
+ * c) TG3_BDINFO_MAXLEN_FLAGS is don't care.
+ * 3) Access TX descriptors directly in on-chip SRAM
+ * using normal {read,write}l(). (and not using
+ * pointer dereferencing of ioremap()'d memory like
+ * the broken Broadcom driver does)
+ *
+ * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of
+ * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices.
+ */
+struct tg3_tx_buffer_desc {
+ u32 addr_hi;
+ u32 addr_lo;
+
+ u32 len_flags;
+#define TXD_FLAG_TCPUDP_CSUM 0x0001
+#define TXD_FLAG_IP_CSUM 0x0002
+#define TXD_FLAG_END 0x0004
+#define TXD_FLAG_IP_FRAG 0x0008
+#define TXD_FLAG_IP_FRAG_END 0x0010
+#define TXD_FLAG_VLAN 0x0040
+#define TXD_FLAG_COAL_NOW 0x0080
+#define TXD_FLAG_CPU_PRE_DMA 0x0100
+#define TXD_FLAG_CPU_POST_DMA 0x0200
+#define TXD_FLAG_ADD_SRC_ADDR 0x1000
+#define TXD_FLAG_CHOOSE_SRC_ADDR 0x6000
+#define TXD_FLAG_NO_CRC 0x8000
+#define TXD_LEN_SHIFT 16
+
+ u32 vlan_tag;
+#define TXD_VLAN_TAG_SHIFT 0
+#define TXD_MSS_SHIFT 16
+};
+
+#define TXD_ADDR 0x00UL /* 64-bit */
+#define TXD_LEN_FLAGS 0x08UL /* 32-bit (upper 16-bits are len) */
+#define TXD_VLAN_TAG 0x0cUL /* 32-bit (upper 16-bits are tag) */
+#define TXD_SIZE 0x10UL
+
+struct tg3_rx_buffer_desc {
+ u32 addr_hi;
+ u32 addr_lo;
+
+ u32 idx_len;
+#define RXD_IDX_MASK 0xffff0000
+#define RXD_IDX_SHIFT 16
+#define RXD_LEN_MASK 0x0000ffff
+#define RXD_LEN_SHIFT 0
+
+ u32 type_flags;
+#define RXD_TYPE_SHIFT 16
+#define RXD_FLAGS_SHIFT 0
+
+#define RXD_FLAG_END 0x0004
+#define RXD_FLAG_MINI 0x0800
+#define RXD_FLAG_JUMBO 0x0020
+#define RXD_FLAG_VLAN 0x0040
+#define RXD_FLAG_ERROR 0x0400
+#define RXD_FLAG_IP_CSUM 0x1000
+#define RXD_FLAG_TCPUDP_CSUM 0x2000
+#define RXD_FLAG_IS_TCP 0x4000
+
+ u32 ip_tcp_csum;
+#define RXD_IPCSUM_MASK 0xffff0000
+#define RXD_IPCSUM_SHIFT 16
+#define RXD_TCPCSUM_MASK 0x0000ffff
+#define RXD_TCPCSUM_SHIFT 0
+
+ u32 err_vlan;
+
+#define RXD_VLAN_MASK 0x0000ffff
+
+#define RXD_ERR_BAD_CRC 0x00010000
+#define RXD_ERR_COLLISION 0x00020000
+#define RXD_ERR_LINK_LOST 0x00040000
+#define RXD_ERR_PHY_DECODE 0x00080000
+#define RXD_ERR_ODD_NIBBLE_RCVD_MII 0x00100000
+#define RXD_ERR_MAC_ABRT 0x00200000
+#define RXD_ERR_TOO_SMALL 0x00400000
+#define RXD_ERR_NO_RESOURCES 0x00800000
+#define RXD_ERR_HUGE_FRAME 0x01000000
+#define RXD_ERR_MASK 0xffff0000
+
+ u32 reserved;
+ u32 opaque;
+#define RXD_OPAQUE_INDEX_MASK 0x0000ffff
+#define RXD_OPAQUE_INDEX_SHIFT 0
+#define RXD_OPAQUE_RING_STD 0x00010000
+#define RXD_OPAQUE_RING_JUMBO 0x00020000
+#define RXD_OPAQUE_RING_MINI 0x00040000
+#define RXD_OPAQUE_RING_MASK 0x00070000
+};
+
+struct tg3_ext_rx_buffer_desc {
+ struct {
+ u32 addr_hi;
+ u32 addr_lo;
+ } addrlist[3];
+ u32 len2_len1;
+ u32 resv_len3;
+ struct tg3_rx_buffer_desc std;
+};
+
+/* We only use this when testing out the DMA engine
+ * at probe time. This is the internal format of buffer
+ * descriptors used by the chip at NIC_SRAM_DMA_DESCS.
+ */
+struct tg3_internal_buffer_desc {
+ u32 addr_hi;
+ u32 addr_lo;
+ u32 nic_mbuf;
+ /* XXX FIX THIS */
+#ifdef __BIG_ENDIAN
+ u16 cqid_sqid;
+ u16 len;
+#else
+ u16 len;
+ u16 cqid_sqid;
+#endif
+ u32 flags;
+ u32 __cookie1;
+ u32 __cookie2;
+ u32 __cookie3;
+};
+
+#define TG3_HW_STATUS_SIZE 0x50
+struct tg3_hw_status {
+ u32 status;
+#define SD_STATUS_UPDATED 0x00000001
+#define SD_STATUS_LINK_CHG 0x00000002
+#define SD_STATUS_ERROR 0x00000004
+
+ u32 status_tag;
+
+#ifdef __BIG_ENDIAN
+ u16 rx_consumer;
+ u16 rx_jumbo_consumer;
+#else
+ u16 rx_jumbo_consumer;
+ u16 rx_consumer;
+#endif
+
+#ifdef __BIG_ENDIAN
+ u16 reserved;
+ u16 rx_mini_consumer;
+#else
+ u16 rx_mini_consumer;
+ u16 reserved;
+#endif
+ struct {
+#ifdef __BIG_ENDIAN
+ u16 tx_consumer;
+ u16 rx_producer;
+#else
+ u16 rx_producer;
+ u16 tx_consumer;
+#endif
+ } idx[16];
+};
+
+typedef struct {
+ u32 high, low;
+} tg3_stat64_t;
+
+struct tg3_hw_stats {
+ u8 __reserved0[0x400-0x300];
+
+ /* Statistics maintained by Receive MAC. */
+ tg3_stat64_t rx_octets;
+ u64 __reserved1;
+ tg3_stat64_t rx_fragments;
+ tg3_stat64_t rx_ucast_packets;
+ tg3_stat64_t rx_mcast_packets;
+ tg3_stat64_t rx_bcast_packets;
+ tg3_stat64_t rx_fcs_errors;
+ tg3_stat64_t rx_align_errors;
+ tg3_stat64_t rx_xon_pause_rcvd;
+ tg3_stat64_t rx_xoff_pause_rcvd;
+ tg3_stat64_t rx_mac_ctrl_rcvd;
+ tg3_stat64_t rx_xoff_entered;
+ tg3_stat64_t rx_frame_too_long_errors;
+ tg3_stat64_t rx_jabbers;
+ tg3_stat64_t rx_undersize_packets;
+ tg3_stat64_t rx_in_length_errors;
+ tg3_stat64_t rx_out_length_errors;
+ tg3_stat64_t rx_64_or_less_octet_packets;
+ tg3_stat64_t rx_65_to_127_octet_packets;
+ tg3_stat64_t rx_128_to_255_octet_packets;
+ tg3_stat64_t rx_256_to_511_octet_packets;
+ tg3_stat64_t rx_512_to_1023_octet_packets;
+ tg3_stat64_t rx_1024_to_1522_octet_packets;
+ tg3_stat64_t rx_1523_to_2047_octet_packets;
+ tg3_stat64_t rx_2048_to_4095_octet_packets;
+ tg3_stat64_t rx_4096_to_8191_octet_packets;
+ tg3_stat64_t rx_8192_to_9022_octet_packets;
+
+ u64 __unused0[37];
+
+ /* Statistics maintained by Transmit MAC. */
+ tg3_stat64_t tx_octets;
+ u64 __reserved2;
+ tg3_stat64_t tx_collisions;
+ tg3_stat64_t tx_xon_sent;
+ tg3_stat64_t tx_xoff_sent;
+ tg3_stat64_t tx_flow_control;
+ tg3_stat64_t tx_mac_errors;
+ tg3_stat64_t tx_single_collisions;
+ tg3_stat64_t tx_mult_collisions;
+ tg3_stat64_t tx_deferred;
+ u64 __reserved3;
+ tg3_stat64_t tx_excessive_collisions;
+ tg3_stat64_t tx_late_collisions;
+ tg3_stat64_t tx_collide_2times;
+ tg3_stat64_t tx_collide_3times;
+ tg3_stat64_t tx_collide_4times;
+ tg3_stat64_t tx_collide_5times;
+ tg3_stat64_t tx_collide_6times;
+ tg3_stat64_t tx_collide_7times;
+ tg3_stat64_t tx_collide_8times;
+ tg3_stat64_t tx_collide_9times;
+ tg3_stat64_t tx_collide_10times;
+ tg3_stat64_t tx_collide_11times;
+ tg3_stat64_t tx_collide_12times;
+ tg3_stat64_t tx_collide_13times;
+ tg3_stat64_t tx_collide_14times;
+ tg3_stat64_t tx_collide_15times;
+ tg3_stat64_t tx_ucast_packets;
+ tg3_stat64_t tx_mcast_packets;
+ tg3_stat64_t tx_bcast_packets;
+ tg3_stat64_t tx_carrier_sense_errors;
+ tg3_stat64_t tx_discards;
+ tg3_stat64_t tx_errors;
+
+ u64 __unused1[31];
+
+ /* Statistics maintained by Receive List Placement. */
+ tg3_stat64_t COS_rx_packets[16];
+ tg3_stat64_t COS_rx_filter_dropped;
+ tg3_stat64_t dma_writeq_full;
+ tg3_stat64_t dma_write_prioq_full;
+ tg3_stat64_t rxbds_empty;
+ tg3_stat64_t rx_discards;
+ tg3_stat64_t rx_errors;
+ tg3_stat64_t rx_threshold_hit;
+
+ u64 __unused2[9];
+
+ /* Statistics maintained by Send Data Initiator. */
+ tg3_stat64_t COS_out_packets[16];
+ tg3_stat64_t dma_readq_full;
+ tg3_stat64_t dma_read_prioq_full;
+ tg3_stat64_t tx_comp_queue_full;
+
+ /* Statistics maintained by Host Coalescing. */
+ tg3_stat64_t ring_set_send_prod_index;
+ tg3_stat64_t ring_status_update;
+ tg3_stat64_t nic_irqs;
+ tg3_stat64_t nic_avoided_irqs;
+ tg3_stat64_t nic_tx_threshold_hit;
+
+ u8 __reserved4[0xb00-0x9c0];
+};
+
+/* 'mapping' is superfluous as the chip does not write into
+ * the tx/rx post rings so we could just fetch it from there.
+ * But the cache behavior is better how we are doing it now.
+ */
+struct ring_info {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(mapping)
+};
+
+struct tx_ring_info {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(mapping)
+ u32 prev_vlan_tag;
+};
+
+struct tg3_config_info {
+ u32 flags;
+};
+
+struct tg3_link_config {
+ /* Describes what we're trying to get. */
+ u32 advertising;
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+
+ /* Describes what we actually have. */
+ u16 active_speed;
+ u8 active_duplex;
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+#define AUTONEG_INVALID 0xff
+
+ /* When we go in and out of low power mode we need
+ * to swap with this state.
+ */
+ int phy_is_low_power;
+ u16 orig_speed;
+ u8 orig_duplex;
+ u8 orig_autoneg;
+};
+
+struct tg3_bufmgr_config {
+ u32 mbuf_read_dma_low_water;
+ u32 mbuf_mac_rx_low_water;
+ u32 mbuf_high_water;
+
+ u32 mbuf_read_dma_low_water_jumbo;
+ u32 mbuf_mac_rx_low_water_jumbo;
+ u32 mbuf_high_water_jumbo;
+
+ u32 dma_low_water;
+ u32 dma_high_water;
+};
+
+struct tg3_ethtool_stats {
+ /* Statistics maintained by Receive MAC. */
+ u64 rx_octets;
+ u64 rx_fragments;
+ u64 rx_ucast_packets;
+ u64 rx_mcast_packets;
+ u64 rx_bcast_packets;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 rx_xon_pause_rcvd;
+ u64 rx_xoff_pause_rcvd;
+ u64 rx_mac_ctrl_rcvd;
+ u64 rx_xoff_entered;
+ u64 rx_frame_too_long_errors;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_in_length_errors;
+ u64 rx_out_length_errors;
+ u64 rx_64_or_less_octet_packets;
+ u64 rx_65_to_127_octet_packets;
+ u64 rx_128_to_255_octet_packets;
+ u64 rx_256_to_511_octet_packets;
+ u64 rx_512_to_1023_octet_packets;
+ u64 rx_1024_to_1522_octet_packets;
+ u64 rx_1523_to_2047_octet_packets;
+ u64 rx_2048_to_4095_octet_packets;
+ u64 rx_4096_to_8191_octet_packets;
+ u64 rx_8192_to_9022_octet_packets;
+
+ /* Statistics maintained by Transmit MAC. */
+ u64 tx_octets;
+ u64 tx_collisions;
+ u64 tx_xon_sent;
+ u64 tx_xoff_sent;
+ u64 tx_flow_control;
+ u64 tx_mac_errors;
+ u64 tx_single_collisions;
+ u64 tx_mult_collisions;
+ u64 tx_deferred;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_collide_2times;
+ u64 tx_collide_3times;
+ u64 tx_collide_4times;
+ u64 tx_collide_5times;
+ u64 tx_collide_6times;
+ u64 tx_collide_7times;
+ u64 tx_collide_8times;
+ u64 tx_collide_9times;
+ u64 tx_collide_10times;
+ u64 tx_collide_11times;
+ u64 tx_collide_12times;
+ u64 tx_collide_13times;
+ u64 tx_collide_14times;
+ u64 tx_collide_15times;
+ u64 tx_ucast_packets;
+ u64 tx_mcast_packets;
+ u64 tx_bcast_packets;
+ u64 tx_carrier_sense_errors;
+ u64 tx_discards;
+ u64 tx_errors;
+
+ /* Statistics maintained by Receive List Placement. */
+ u64 dma_writeq_full;
+ u64 dma_write_prioq_full;
+ u64 rxbds_empty;
+ u64 rx_discards;
+ u64 rx_errors;
+ u64 rx_threshold_hit;
+
+ /* Statistics maintained by Send Data Initiator. */
+ u64 dma_readq_full;
+ u64 dma_read_prioq_full;
+ u64 tx_comp_queue_full;
+
+ /* Statistics maintained by Host Coalescing. */
+ u64 ring_set_send_prod_index;
+ u64 ring_status_update;
+ u64 nic_irqs;
+ u64 nic_avoided_irqs;
+ u64 nic_tx_threshold_hit;
+};
+
+struct tg3 {
+ /* begin "general, frequently-used members" cacheline section */
+
+ /* SMP locking strategy:
+ *
+ * lock: Held during all operations except TX packet
+ * processing.
+ *
+ * tx_lock: Held during tg3_start_xmit{,_4gbug} and tg3_tx
+ *
+ * If you want to shut up all asynchronous processing you must
+ * acquire both locks, 'lock' taken before 'tx_lock'. IRQs must
+ * be disabled to take 'lock' but only softirq disabling is
+ * necessary for acquisition of 'tx_lock'.
+ */
+ spinlock_t lock;
+ spinlock_t indirect_lock;
+
+ void __iomem *regs;
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ struct tg3_hw_status *hw_status;
+ dma_addr_t status_mapping;
+
+ u32 msg_enable;
+
+ /* begin "tx thread" cacheline section */
+ u32 tx_prod;
+ u32 tx_cons;
+ u32 tx_pending;
+
+ spinlock_t tx_lock;
+
+ struct tg3_tx_buffer_desc *tx_ring;
+ struct tx_ring_info *tx_buffers;
+ dma_addr_t tx_desc_mapping;
+
+ /* begin "rx thread" cacheline section */
+ u32 rx_rcb_ptr;
+ u32 rx_std_ptr;
+ u32 rx_jumbo_ptr;
+ u32 rx_pending;
+ u32 rx_jumbo_pending;
+#if TG3_VLAN_TAG_USED
+ struct vlan_group *vlgrp;
+#endif
+
+ struct tg3_rx_buffer_desc *rx_std;
+ struct ring_info *rx_std_buffers;
+ dma_addr_t rx_std_mapping;
+
+ struct tg3_rx_buffer_desc *rx_jumbo;
+ struct ring_info *rx_jumbo_buffers;
+ dma_addr_t rx_jumbo_mapping;
+
+ struct tg3_rx_buffer_desc *rx_rcb;
+ dma_addr_t rx_rcb_mapping;
+
+ /* begin "everything else" cacheline(s) section */
+ struct net_device_stats net_stats;
+ struct net_device_stats net_stats_prev;
+ struct tg3_ethtool_stats estats;
+ struct tg3_ethtool_stats estats_prev;
+
+ unsigned long phy_crc_errors;
+
+ u32 rx_offset;
+ u32 tg3_flags;
+#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
+#define TG3_FLAG_RX_CHECKSUMS 0x00000004
+#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
+#define TG3_FLAG_USE_MI_INTERRUPT 0x00000010
+#define TG3_FLAG_ENABLE_ASF 0x00000020
+#define TG3_FLAG_5701_REG_WRITE_BUG 0x00000040
+#define TG3_FLAG_POLL_SERDES 0x00000080
+#if defined(CONFIG_X86)
+#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100
+#else
+#define TG3_FLAG_MBOX_WRITE_REORDER 0 /* disables code too */
+#endif
+#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
+#define TG3_FLAG_WOL_SPEED_100MB 0x00000400
+#define TG3_FLAG_WOL_ENABLE 0x00000800
+#define TG3_FLAG_EEPROM_WRITE_PROT 0x00001000
+#define TG3_FLAG_NVRAM 0x00002000
+#define TG3_FLAG_NVRAM_BUFFERED 0x00004000
+#define TG3_FLAG_RX_PAUSE 0x00008000
+#define TG3_FLAG_TX_PAUSE 0x00010000
+#define TG3_FLAG_PCIX_MODE 0x00020000
+#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000
+#define TG3_FLAG_PCI_32BIT 0x00080000
+#define TG3_FLAG_NO_TX_PSEUDO_CSUM 0x00100000
+#define TG3_FLAG_NO_RX_PSEUDO_CSUM 0x00200000
+#define TG3_FLAG_SERDES_WOL_CAP 0x00400000
+#define TG3_FLAG_JUMBO_ENABLE 0x00800000
+#define TG3_FLAG_10_100_ONLY 0x01000000
+#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
+#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
+#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000
+#define TG3_FLAG_SPLIT_MODE 0x40000000
+#define TG3_FLAG_INIT_COMPLETE 0x80000000
+ u32 tg3_flags2;
+#define TG3_FLG2_RESTART_TIMER 0x00000001
+#define TG3_FLG2_SUN_570X 0x00000002
+#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
+#define TG3_FLG2_IS_5788 0x00000008
+#define TG3_FLG2_MAX_RXPEND_64 0x00000010
+#define TG3_FLG2_TSO_CAPABLE 0x00000020
+#define TG3_FLG2_PHY_ADC_BUG 0x00000040
+#define TG3_FLG2_PHY_5704_A0_BUG 0x00000080
+#define TG3_FLG2_PHY_BER_BUG 0x00000100
+#define TG3_FLG2_PCI_EXPRESS 0x00000200
+#define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400
+#define TG3_FLG2_HW_AUTONEG 0x00000800
+#define TG3_FLG2_PHY_JUST_INITTED 0x00001000
+#define TG3_FLG2_PHY_SERDES 0x00002000
+#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000
+#define TG3_FLG2_FLASH 0x00008000
+#define TG3_FLG2_HW_TSO 0x00010000
+#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
+#define TG3_FLG2_5705_PLUS 0x00040000
+
+ u32 split_mode_max_reqs;
+#define SPLIT_MODE_5704_MAX_REQ 3
+
+ struct timer_list timer;
+ u16 timer_counter;
+ u16 timer_multiplier;
+ u32 timer_offset;
+ u16 asf_counter;
+ u16 asf_multiplier;
+
+ struct tg3_link_config link_config;
+ struct tg3_bufmgr_config bufmgr_config;
+
+ /* cache h/w values, often passed straight to h/w */
+ u32 rx_mode;
+ u32 tx_mode;
+ u32 mac_mode;
+ u32 mi_mode;
+ u32 misc_host_ctrl;
+ u32 grc_mode;
+ u32 grc_local_ctrl;
+ u32 dma_rwctrl;
+ u32 coalesce_mode;
+
+ /* PCI block */
+ u16 pci_chip_rev_id;
+ u8 pci_cacheline_sz;
+ u8 pci_lat_timer;
+ u8 pci_hdr_type;
+ u8 pci_bist;
+
+ int pm_cap;
+
+ /* PHY info */
+ u32 phy_id;
+#define PHY_ID_MASK 0xfffffff0
+#define PHY_ID_BCM5400 0x60008040
+#define PHY_ID_BCM5401 0x60008050
+#define PHY_ID_BCM5411 0x60008070
+#define PHY_ID_BCM5701 0x60008110
+#define PHY_ID_BCM5703 0x60008160
+#define PHY_ID_BCM5704 0x60008190
+#define PHY_ID_BCM5705 0x600081a0
+#define PHY_ID_BCM5750 0x60008180
+#define PHY_ID_BCM8002 0x60010140
+#define PHY_ID_INVALID 0xffffffff
+#define PHY_ID_REV_MASK 0x0000000f
+#define PHY_REV_BCM5401_B0 0x1
+#define PHY_REV_BCM5401_B2 0x3
+#define PHY_REV_BCM5401_C0 0x6
+#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
+
+ u32 led_ctrl;
+
+ char board_part_number[24];
+ u32 nic_sram_data_cfg;
+ u32 pci_clock_ctrl;
+ struct pci_dev *pdev_peer;
+
+ /* This macro assumes the passed PHY ID is already masked
+ * with PHY_ID_MASK.
+ */
+#define KNOWN_PHY_ID(X) \
+ ((X) == PHY_ID_BCM5400 || (X) == PHY_ID_BCM5401 || \
+ (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
+ (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
+ (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
+ (X) == PHY_ID_BCM8002)
+
+ struct tg3_hw_stats *hw_stats;
+ dma_addr_t stats_mapping;
+ struct work_struct reset_task;
+
+ u32 nvram_size;
+ u32 nvram_pagesize;
+ u32 nvram_jedecnum;
+
+#define JEDEC_ATMEL 0x1f
+#define JEDEC_ST 0x20
+#define JEDEC_SAIFUN 0x4f
+#define JEDEC_SST 0xbf
+
+#define ATMEL_AT24C64_CHIP_SIZE (64 * 1024)
+#define ATMEL_AT24C64_PAGE_SIZE (32)
+
+#define ATMEL_AT24C512_CHIP_SIZE (512 * 1024)
+#define ATMEL_AT24C512_PAGE_SIZE (128)
+
+#define ATMEL_AT45DB0X1B_PAGE_POS 9
+#define ATMEL_AT45DB0X1B_PAGE_SIZE 264
+
+#define ATMEL_AT25F512_PAGE_SIZE 256
+
+#define ST_M45PEX0_PAGE_SIZE 256
+
+#define SAIFUN_SA25F0XX_PAGE_SIZE 256
+
+#define SST_25VF0X0_PAGE_SIZE 4098
+
+
+};
+
+#endif /* !(_T3_H) */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
new file mode 100644
index 000000000000..a7ffa64502dd
--- /dev/null
+++ b/drivers/net/tlan.c
@@ -0,0 +1,3304 @@
+/*******************************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.c
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1998 James Banks
+ * (C) 1999-2001 Torben Mathiasen
+ * (C) 2002 Samuel Chessman
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ ** This file is best viewed/edited with columns>=132.
+ *
+ ** Useful (if not required) reading:
+ *
+ * Texas Instruments, ThunderLAN Programmer's Guide,
+ * TI Literature Number SPWU013A
+ * available in PDF format from www.ti.com
+ * Level One, LXT901 and LXT970 Data Sheets
+ * available in PDF format from www.level1.com
+ * National Semiconductor, DP83840A Data Sheet
+ * available in PDF format from www.national.com
+ * Microchip Technology, 24C01A/02A/04A Data Sheet
+ * available in PDF format from www.microchip.com
+ *
+ * Change History
+ *
+ * Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses
+ * new PCI BIOS interface.
+ * Alan Cox <alan@redhat.com>: Fixed the out of memory
+ * handling.
+ *
+ * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
+ *
+ * v1.1 Dec 20, 1999 - Removed linux version checking
+ * Patch from Tigran Aivazian.
+ * - v1.1 includes Alan's SMP updates.
+ * - We still have problems on SMP though,
+ * but I'm looking into that.
+ *
+ * v1.2 Jan 02, 2000 - Hopefully fixed the SMP deadlock.
+ * - Removed dependency of HZ being 100.
+ * - We now allow higher priority timers to
+ * overwrite timers like TLAN_TIMER_ACTIVITY
+ * Patch from John Cagle <john.cagle@compaq.com>.
+ * - Fixed a few compiler warnings.
+ *
+ * v1.3 Feb 04, 2000 - Fixed the remaining HZ issues.
+ * - Removed call to pci_present().
+ * - Removed SA_INTERRUPT flag from irq handler.
+ * - Added __init and __initdata to reduce resisdent
+ * code size.
+ * - Driver now uses module_init/module_exit.
+ * - Rewrote init_module and tlan_probe to
+ * share a lot more code. We now use tlan_probe
+ * with builtin and module driver.
+ * - Driver ported to new net API.
+ * - tlan.txt has been reworked to reflect current
+ * driver (almost)
+ * - Other minor stuff
+ *
+ * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
+ * network cleanup in 2.3.43pre7 (Tigran & myself)
+ * - Minor stuff.
+ *
+ * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
+ * if no cable/link were present.
+ * - Cosmetic changes.
+ * - TODO: Port completely to new PCI/DMA API
+ * Auto-Neg fallback.
+ *
+ * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
+ * tested it though, as the kernel support is currently
+ * broken (2.3.99p4p3).
+ * - Updated tlan.txt accordingly.
+ * - Adjusted minimum/maximum frame length.
+ * - There is now a TLAN website up at
+ * http://tlan.kernel.dk
+ *
+ * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
+ * reports PHY information when used with Donald
+ * Beckers userspace MII diagnostics utility.
+ *
+ * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
+ * - Added link information to Auto-Neg and forced
+ * modes. When NIC operates with auto-neg the driver
+ * will report Link speed & duplex modes as well as
+ * link partner abilities. When forced link is used,
+ * the driver will report status of the established
+ * link.
+ * Please read tlan.txt for additional information.
+ * - Removed call to check_region(), and used
+ * return value of request_region() instead.
+ *
+ * v1.8a May 28, 2000 - Minor updates.
+ *
+ * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
+ * - Updated with timer fixes from Andrew Morton.
+ * - Fixed module race in TLan_Open.
+ * - Added routine to monitor PHY status.
+ * - Added activity led support for Proliant devices.
+ *
+ * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
+ * like the Compaq NetFlex3/E.
+ * - Rewrote tlan_probe to better handle multiple
+ * bus probes. Probing and device setup is now
+ * done through TLan_Probe and TLan_init_one. Actual
+ * hardware probe is done with kernel API and
+ * TLan_EisaProbe.
+ * - Adjusted debug information for probing.
+ * - Fixed bug that would cause general debug information
+ * to be printed after driver removal.
+ * - Added transmit timeout handling.
+ * - Fixed OOM return values in tlan_probe.
+ * - Fixed possible mem leak in tlan_exit
+ * (now tlan_remove_one).
+ * - Fixed timer bug in TLan_phyMonitor.
+ * - This driver version is alpha quality, please
+ * send me any bug issues you may encounter.
+ *
+ * v1.11 Aug 31, 2000 - Do not try to register irq 0 if no irq line was
+ * set for EISA cards.
+ * - Added support for NetFlex3/E with nibble-rate
+ * 10Base-T PHY. This is untestet as I haven't got
+ * one of these cards.
+ * - Fixed timer being added twice.
+ * - Disabled PhyMonitoring by default as this is
+ * work in progress. Define MONITOR to enable it.
+ * - Now we don't display link info with PHYs that
+ * doesn't support it (level1).
+ * - Incresed tx_timeout beacuse of auto-neg.
+ * - Adjusted timers for forced speeds.
+ *
+ * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
+ *
+ * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
+ * when link can't be established.
+ * - Added the bbuf option as a kernel parameter.
+ * - Fixed ioaddr probe bug.
+ * - Fixed stupid deadlock with MII interrupts.
+ * - Added support for speed/duplex selection with
+ * multiple nics.
+ * - Added partly fix for TX Channel lockup with
+ * TLAN v1.0 silicon. This needs to be investigated
+ * further.
+ *
+ * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
+ * interrupt. Thanks goes to
+ * Adam Keys <adam@ti.com>
+ * Denis Beaudoin <dbeaudoin@ti.com>
+ * for providing the patch.
+ * - Fixed auto-neg output when using multiple
+ * adapters.
+ * - Converted to use new taskq interface.
+ *
+ * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
+ *
+ * Samuel Chessman <chessman@tux.org> New Maintainer!
+ *
+ * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
+ * 10T half duplex no loopback
+ * Thanks to Gunnar Eikman
+ *******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+
+#include "tlan.h"
+
+typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
+
+
+/* For removing EISA devices */
+static struct net_device *TLan_Eisa_Devices;
+
+static int TLanDevicesInstalled;
+
+/* Set speed, duplex and aui settings */
+static int aui[MAX_TLAN_BOARDS];
+static int duplex[MAX_TLAN_BOARDS];
+static int speed[MAX_TLAN_BOARDS];
+static int boards_found;
+
+MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
+MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
+MODULE_LICENSE("GPL");
+
+
+/* Define this to enable Link beat monitoring */
+#undef MONITOR
+
+/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+static int debug;
+
+static int bbuf;
+static u8 *TLanPadBuffer;
+static dma_addr_t TLanPadBufferDMA;
+static char TLanSignature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.15\n";
+static int tlan_have_pci;
+static int tlan_have_eisa;
+
+static const char *media[] = {
+ "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
+ "100baseTx-FD", "100baseT4", NULL
+};
+
+static struct board {
+ const char *deviceLabel;
+ u32 flags;
+ u16 addrOfs;
+} board_info[] = {
+ { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/P", TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Integrated 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+ { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
+ { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+ { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+};
+
+static struct pci_device_id tlan_pci_tbl[] = {
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
+
+static void TLan_EisaProbe( void );
+static void TLan_Eisa_Cleanup( void );
+static int TLan_Init( struct net_device * );
+static int TLan_Open( struct net_device *dev );
+static int TLan_StartTx( struct sk_buff *, struct net_device *);
+static irqreturn_t TLan_HandleInterrupt( int, void *, struct pt_regs *);
+static int TLan_Close( struct net_device *);
+static struct net_device_stats *TLan_GetStats( struct net_device *);
+static void TLan_SetMulticastList( struct net_device *);
+static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
+static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
+static void TLan_tx_timeout( struct net_device *dev);
+static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
+
+static u32 TLan_HandleInvalid( struct net_device *, u16 );
+static u32 TLan_HandleTxEOF( struct net_device *, u16 );
+static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
+static u32 TLan_HandleRxEOF( struct net_device *, u16 );
+static u32 TLan_HandleDummy( struct net_device *, u16 );
+static u32 TLan_HandleTxEOC( struct net_device *, u16 );
+static u32 TLan_HandleStatusCheck( struct net_device *, u16 );
+static u32 TLan_HandleRxEOC( struct net_device *, u16 );
+
+static void TLan_Timer( unsigned long );
+
+static void TLan_ResetLists( struct net_device * );
+static void TLan_FreeLists( struct net_device * );
+static void TLan_PrintDio( u16 );
+static void TLan_PrintList( TLanList *, char *, int );
+static void TLan_ReadAndClearStats( struct net_device *, int );
+static void TLan_ResetAdapter( struct net_device * );
+static void TLan_FinishReset( struct net_device * );
+static void TLan_SetMac( struct net_device *, int areg, char *mac );
+
+static void TLan_PhyPrint( struct net_device * );
+static void TLan_PhyDetect( struct net_device * );
+static void TLan_PhyPowerDown( struct net_device * );
+static void TLan_PhyPowerUp( struct net_device * );
+static void TLan_PhyReset( struct net_device * );
+static void TLan_PhyStartLink( struct net_device * );
+static void TLan_PhyFinishAutoNeg( struct net_device * );
+#ifdef MONITOR
+static void TLan_PhyMonitor( struct net_device * );
+#endif
+
+/*
+static int TLan_PhyNop( struct net_device * );
+static int TLan_PhyInternalCheck( struct net_device * );
+static int TLan_PhyInternalService( struct net_device * );
+static int TLan_PhyDp83840aCheck( struct net_device * );
+*/
+
+static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
+static void TLan_MiiSendData( u16, u32, unsigned );
+static void TLan_MiiSync( u16 );
+static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+
+static void TLan_EeSendStart( u16 );
+static int TLan_EeSendByte( u16, u8, int );
+static void TLan_EeReceiveByte( u16, u8 *, int );
+static int TLan_EeReadByte( struct net_device *, u8, u8 * );
+
+
+static void
+TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+{
+ unsigned long addr = (unsigned long)skb;
+ tag->buffer[9].address = (u32)addr;
+ addr >>= 31; /* >>= 32 is undefined for 32bit arch, stupid C */
+ addr >>= 1;
+ tag->buffer[8].address = (u32)addr;
+}
+
+static struct sk_buff *
+TLan_GetSKB( struct tlan_list_tag *tag)
+{
+ unsigned long addr = tag->buffer[8].address;
+ addr <<= 31;
+ addr <<= 1;
+ addr |= tag->buffer[9].address;
+ return (struct sk_buff *) addr;
+}
+
+
+static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+ TLan_HandleInvalid,
+ TLan_HandleTxEOF,
+ TLan_HandleStatOverflow,
+ TLan_HandleRxEOF,
+ TLan_HandleDummy,
+ TLan_HandleTxEOC,
+ TLan_HandleStatusCheck,
+ TLan_HandleRxEOC
+};
+
+static inline void
+TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+ if ( priv->timer.function != NULL &&
+ priv->timerType != TLAN_TIMER_ACTIVITY ) {
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+ priv->timer.function = &TLan_Timer;
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->timer.data = (unsigned long) dev;
+ priv->timerSetAt = jiffies;
+ priv->timerType = type;
+ mod_timer(&priv->timer, jiffies + ticks);
+
+} /* TLan_SetTimer */
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Primary Functions
+
+ These functions are more or less common to all Linux network drivers.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+
+ /***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
+
+
+static void __devexit tlan_remove_one( struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata( pdev );
+ TLanPrivateInfo *priv = netdev_priv(dev);
+
+ unregister_netdev( dev );
+
+ if ( priv->dmaStorage ) {
+ pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA );
+ }
+
+#ifdef CONFIG_PCI
+ pci_release_regions(pdev);
+#endif
+
+ free_netdev( dev );
+
+ pci_set_drvdata( pdev, NULL );
+}
+
+static struct pci_driver tlan_driver = {
+ .name = "tlan",
+ .id_table = tlan_pci_tbl,
+ .probe = tlan_init_one,
+ .remove = __devexit_p(tlan_remove_one),
+};
+
+static int __init tlan_probe(void)
+{
+ static int pad_allocated;
+
+ printk(KERN_INFO "%s", tlan_banner);
+
+ TLanPadBuffer = (u8 *) pci_alloc_consistent(NULL, TLAN_MIN_FRAME_SIZE, &TLanPadBufferDMA);
+
+ if (TLanPadBuffer == NULL) {
+ printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
+ return -ENOMEM;
+ }
+
+ memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
+ pad_allocated = 1;
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
+
+ /* Use new style PCI probing. Now the kernel will
+ do most of this for us */
+ pci_register_driver(&tlan_driver);
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
+ TLan_EisaProbe();
+
+ printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n",
+ TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
+
+ if (TLanDevicesInstalled == 0) {
+ pci_unregister_driver(&tlan_driver);
+ pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static int __devinit tlan_init_one( struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return TLan_probe1( pdev, -1, -1, 0, ent);
+}
+
+
+/*
+ ***************************************************************
+ * tlan_probe1
+ *
+ * Returns:
+ * 0 on success, error code on error
+ * Parms:
+ * none
+ *
+ * The name is lower case to fit in with all the rest of
+ * the netcard_probe names. This function looks for
+ * another TLan based adapter, setting it up with the
+ * allocated device struct if one is found.
+ * tlan_probe has been ported to the new net API and
+ * now allocates its own device structure. This function
+ * is also used by modules.
+ *
+ **************************************************************/
+
+static int __devinit TLan_probe1(struct pci_dev *pdev,
+ long ioaddr, int irq, int rev, const struct pci_device_id *ent )
+{
+
+ struct net_device *dev;
+ TLanPrivateInfo *priv;
+ u8 pci_rev;
+ u16 device_id;
+ int reg, rc = -ENODEV;
+
+ if (pdev) {
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, TLanSignature);
+ if (rc) {
+ printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
+ goto err_out;
+ }
+ }
+
+ dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+ if (dev == NULL) {
+ printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ priv->pciDev = pdev;
+
+ /* Is this a PCI device? */
+ if (pdev) {
+ u32 pci_io_base = 0;
+
+ priv->adapter = &board_info[ent->driver_data];
+
+ rc = pci_set_dma_mask(pdev, 0xFFFFFFFF);
+ if (rc) {
+ printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+ goto err_out_free_dev;
+ }
+
+ pci_read_config_byte ( pdev, PCI_REVISION_ID, &pci_rev);
+
+ for ( reg= 0; reg <= 5; reg ++ ) {
+ if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
+ pci_io_base = pci_resource_start(pdev, reg);
+ TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
+ pci_io_base);
+ break;
+ }
+ }
+ if (!pci_io_base) {
+ printk(KERN_ERR "TLAN: No IO mappings available\n");
+ rc = -EIO;
+ goto err_out_free_dev;
+ }
+
+ dev->base_addr = pci_io_base;
+ dev->irq = pdev->irq;
+ priv->adapterRev = pci_rev;
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, dev);
+
+ } else { /* EISA card */
+ /* This is a hack. We need to know which board structure
+ * is suited for this adapter */
+ device_id = inw(ioaddr + EISA_ID2);
+ priv->is_eisa = 1;
+ if (device_id == 0x20F1) {
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapterRev = 23; /* TLAN 2.3 */
+ } else {
+ priv->adapter = &board_info[14];
+ priv->adapterRev = 10; /* TLAN 1.0 */
+ }
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ }
+
+ /* Kernel parameters */
+ if (dev->mem_start) {
+ priv->aui = dev->mem_start & 0x01;
+ priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 : (dev->mem_start & 0x06) >> 1;
+ priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 : (dev->mem_start & 0x18) >> 3;
+
+ if (priv->speed == 0x1) {
+ priv->speed = TLAN_SPEED_10;
+ } else if (priv->speed == 0x2) {
+ priv->speed = TLAN_SPEED_100;
+ }
+ debug = priv->debug = dev->mem_end;
+ } else {
+ priv->aui = aui[boards_found];
+ priv->speed = speed[boards_found];
+ priv->duplex = duplex[boards_found];
+ priv->debug = debug;
+ }
+
+ /* This will be used when we get an adapter error from
+ * within our irq handler */
+ INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev);
+
+ spin_lock_init(&priv->lock);
+
+ rc = TLan_Init(dev);
+ if (rc) {
+ printk(KERN_ERR "TLAN: Could not set up device.\n");
+ goto err_out_free_dev;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ printk(KERN_ERR "TLAN: Could not register device.\n");
+ goto err_out_uninit;
+ }
+
+
+ TLanDevicesInstalled++;
+ boards_found++;
+
+ /* pdev is NULL if this is an EISA device */
+ if (pdev)
+ tlan_have_pci++;
+ else {
+ priv->nextDevice = TLan_Eisa_Devices;
+ TLan_Eisa_Devices = dev;
+ tlan_have_eisa++;
+ }
+
+ printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
+ dev->name,
+ (int) dev->irq,
+ (int) dev->base_addr,
+ priv->adapter->deviceLabel,
+ priv->adapterRev);
+ return 0;
+
+err_out_uninit:
+ pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
+ priv->dmaStorageDMA );
+err_out_free_dev:
+ free_netdev(dev);
+err_out_regions:
+#ifdef CONFIG_PCI
+ if (pdev)
+ pci_release_regions(pdev);
+#endif
+err_out:
+ if (pdev)
+ pci_disable_device(pdev);
+ return rc;
+}
+
+
+static void TLan_Eisa_Cleanup(void)
+{
+ struct net_device *dev;
+ TLanPrivateInfo *priv;
+
+ while( tlan_have_eisa ) {
+ dev = TLan_Eisa_Devices;
+ priv = netdev_priv(dev);
+ if (priv->dmaStorage) {
+ pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA );
+ }
+ release_region( dev->base_addr, 0x10);
+ unregister_netdev( dev );
+ TLan_Eisa_Devices = priv->nextDevice;
+ free_netdev( dev );
+ tlan_have_eisa--;
+ }
+}
+
+
+static void __exit tlan_exit(void)
+{
+ pci_unregister_driver(&tlan_driver);
+
+ if (tlan_have_eisa)
+ TLan_Eisa_Cleanup();
+
+ pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
+
+}
+
+
+/* Module loading/unloading */
+module_init(tlan_probe);
+module_exit(tlan_exit);
+
+
+
+ /**************************************************************
+ * TLan_EisaProbe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
+
+static void __init TLan_EisaProbe (void)
+{
+ long ioaddr;
+ int rc = -ENODEV;
+ int irq;
+ u16 device_id;
+
+ if (!EISA_bus) {
+ TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
+ return;
+ }
+
+ /* Loop through all slots of the EISA bus */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+
+ TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+ goto out;
+
+ if (inw(ioaddr + EISA_ID) != 0x110E) {
+ release_region(ioaddr, 0x10);
+ goto out;
+ }
+
+ device_id = inw(ioaddr + EISA_ID2);
+ if (device_id != 0x20F1 && device_id != 0x40F1) {
+ release_region (ioaddr, 0x10);
+ goto out;
+ }
+
+ if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */
+ release_region (ioaddr, 0x10);
+ goto out2;
+ }
+
+ if (debug == 0x10)
+ printk("Found one\n");
+
+
+ /* Get irq from board */
+ switch (inb(ioaddr + 0xCC0)) {
+ case(0x10):
+ irq=5;
+ break;
+ case(0x20):
+ irq=9;
+ break;
+ case(0x40):
+ irq=10;
+ break;
+ case(0x80):
+ irq=11;
+ break;
+ default:
+ goto out;
+ }
+
+
+ /* Setup the newly found eisa adapter */
+ rc = TLan_probe1( NULL, ioaddr, irq,
+ 12, NULL);
+ continue;
+
+ out:
+ if (debug == 0x10)
+ printk("None found\n");
+ continue;
+
+ out2: if (debug == 0x10)
+ printk("Card found but it is not enabled, skipping\n");
+ continue;
+
+ }
+
+} /* TLan_EisaProbe */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void TLan_Poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ TLan_HandleInterrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+
+
+
+ /***************************************************************
+ * TLan_Init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int TLan_Init( struct net_device *dev )
+{
+ int dma_size;
+ int err;
+ int i;
+ TLanPrivateInfo *priv;
+
+ priv = netdev_priv(dev);
+
+ if ( bbuf ) {
+ dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
+ * ( sizeof(TLanList) + TLAN_MAX_FRAME_SIZE );
+ } else {
+ dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
+ * ( sizeof(TLanList) );
+ }
+ priv->dmaStorage = pci_alloc_consistent(priv->pciDev, dma_size, &priv->dmaStorageDMA);
+ priv->dmaSize = dma_size;
+
+ if ( priv->dmaStorage == NULL ) {
+ printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n",
+ dev->name );
+ return -ENOMEM;
+ }
+ memset( priv->dmaStorage, 0, dma_size );
+ priv->rxList = (TLanList *)
+ ( ( ( (u32) priv->dmaStorage ) + 7 ) & 0xFFFFFFF8 );
+ priv->rxListDMA = ( ( ( (u32) priv->dmaStorageDMA ) + 7 ) & 0xFFFFFFF8 );
+ priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
+ priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+ if ( bbuf ) {
+ priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
+ priv->rxBufferDMA =priv->txListDMA + sizeof(TLanList) * TLAN_NUM_TX_LISTS;
+ priv->txBuffer = priv->rxBuffer + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
+ priv->txBufferDMA = priv->rxBufferDMA + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
+ }
+
+ err = 0;
+ for ( i = 0; i < 6 ; i++ )
+ err |= TLan_EeReadByte( dev,
+ (u8) priv->adapter->addrOfs + i,
+ (u8 *) &dev->dev_addr[i] );
+ if ( err ) {
+ printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
+ dev->name,
+ err );
+ }
+ dev->addr_len = 6;
+
+ netif_carrier_off(dev);
+
+ /* Device methods */
+ dev->open = &TLan_Open;
+ dev->hard_start_xmit = &TLan_StartTx;
+ dev->stop = &TLan_Close;
+ dev->get_stats = &TLan_GetStats;
+ dev->set_multicast_list = &TLan_SetMulticastList;
+ dev->do_ioctl = &TLan_ioctl;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &TLan_Poll;
+#endif
+ dev->tx_timeout = &TLan_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 0;
+
+} /* TLan_Init */
+
+
+
+
+ /***************************************************************
+ * TLan_Open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
+
+static int TLan_Open( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ int err;
+
+ priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
+ err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ, TLanSignature, dev );
+
+ if ( err ) {
+ printk(KERN_ERR "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq );
+ return err;
+ }
+
+ init_timer(&priv->timer);
+ netif_start_queue(dev);
+
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ TLan_ResetLists( dev );
+ TLan_ReadAndClearStats( dev, TLAN_IGNORE );
+ TLan_ResetAdapter( dev );
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", dev->name, priv->tlanRev );
+
+ return 0;
+
+} /* TLan_Open */
+
+
+
+ /**************************************************************
+ * TLan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
+
+static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ u32 phy = priv->phy[priv->phyNum];
+
+ if (!priv->phyOnline)
+ return -EAGAIN;
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = phy;
+
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ TLan_MiiReadReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, &data->val_out);
+ return 0;
+
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ TLan_MiiWriteReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+} /* tlan_ioctl */
+
+
+ /***************************************************************
+ * TLan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
+
+static void TLan_tx_timeout(struct net_device *dev)
+{
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+
+ /* Ok so we timed out, lets see what we can do about it...*/
+ TLan_FreeLists( dev );
+ TLan_ResetLists( dev );
+ TLan_ReadAndClearStats( dev, TLAN_IGNORE );
+ TLan_ResetAdapter( dev );
+ dev->trans_start = jiffies;
+ netif_wake_queue( dev );
+
+}
+
+
+
+ /***************************************************************
+ * TLan_StartTx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ TLanList *tail_list;
+ dma_addr_t tail_list_phys;
+ u8 *tail_buffer;
+ int pad;
+ unsigned long flags;
+
+ if ( ! priv->phyOnline ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", dev->name );
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ tail_list = priv->txList + priv->txTail;
+ tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+
+ if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail );
+ netif_stop_queue(dev);
+ priv->txBusyCount++;
+ return 1;
+ }
+
+ tail_list->forward = 0;
+
+ if ( bbuf ) {
+ tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
+ memcpy( tail_buffer, skb->data, skb->len );
+ } else {
+ tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ TLan_StoreSKB(tail_list, skb);
+ }
+
+ pad = TLAN_MIN_FRAME_SIZE - skb->len;
+
+ if ( pad > 0 ) {
+ tail_list->frameSize = (u16) skb->len + pad;
+ tail_list->buffer[0].count = (u32) skb->len;
+ tail_list->buffer[1].count = TLAN_LAST_BUFFER | (u32) pad;
+ tail_list->buffer[1].address = TLanPadBufferDMA;
+ } else {
+ tail_list->frameSize = (u16) skb->len;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
+ tail_list->buffer[1].count = 0;
+ tail_list->buffer[1].address = 0;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tail_list->cStat = TLAN_CSTAT_READY;
+ if ( ! priv->txInProgress ) {
+ priv->txInProgress = 1;
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
+ outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
+ outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+ } else {
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail );
+ if ( priv->txTail == 0 ) {
+ ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = tail_list_phys;
+ } else {
+ ( priv->txList + ( priv->txTail - 1 ) )->forward = tail_list_phys;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+
+ if ( bbuf )
+ dev_kfree_skb_any(skb);
+
+ dev->trans_start = jiffies;
+ return 0;
+
+} /* TLan_StartTx */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleInterrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ * regs ???
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
+
+static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ u32 ack;
+ struct net_device *dev;
+ u32 host_cmd;
+ u16 host_int;
+ int type;
+ TLanPrivateInfo *priv;
+
+ dev = dev_id;
+ priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+
+ host_int = inw( dev->base_addr + TLAN_HOST_INT );
+ outw( host_int, dev->base_addr + TLAN_HOST_INT );
+
+ type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
+
+ ack = TLanIntVector[type]( dev, host_int );
+
+ if ( ack ) {
+ host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
+ outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+} /* TLan_HandleInterrupts */
+
+
+
+
+ /***************************************************************
+ * TLan_Close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
+
+static int TLan_Close(struct net_device *dev)
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ priv->neg_be_verbose = 0;
+
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ if ( priv->timer.function != NULL ) {
+ del_timer_sync( &priv->timer );
+ priv->timer.function = NULL;
+ }
+
+ free_irq( dev->irq, dev );
+ TLan_FreeLists( dev );
+ TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+
+ return 0;
+
+} /* TLan_Close */
+
+
+
+
+ /***************************************************************
+ * TLan_GetStats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
+
+static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ int i;
+
+ /* Should only read stats if open ? */
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+
+ TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, priv->rxEocCount );
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, priv->txBusyCount );
+ if ( debug & TLAN_DEBUG_GNRL ) {
+ TLan_PrintDio( dev->base_addr );
+ TLan_PhyPrint( dev );
+ }
+ if ( debug & TLAN_DEBUG_LIST ) {
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
+ TLan_PrintList( priv->rxList + i, "RX", i );
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
+ TLan_PrintList( priv->txList + i, "TX", i );
+ }
+
+ return ( &( (TLanPrivateInfo *) netdev_priv(dev) )->stats );
+
+} /* TLan_GetStats */
+
+
+
+
+ /***************************************************************
+ * TLan_SetMulticastList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
+
+static void TLan_SetMulticastList( struct net_device *dev )
+{
+ struct dev_mc_list *dmi = dev->mc_list;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ int i;
+ u32 offset;
+ u8 tmp;
+
+ if ( dev->flags & IFF_PROMISC ) {
+ tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+ } else {
+ tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
+ if ( dev->flags & IFF_ALLMULTI ) {
+ for ( i = 0; i < 3; i++ )
+ TLan_SetMac( dev, i + 1, NULL );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+ } else {
+ for ( i = 0; i < dev->mc_count; i++ ) {
+ if ( i < 3 ) {
+ TLan_SetMac( dev, i + 1, (char *) &dmi->dmi_addr );
+ } else {
+ offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
+ if ( offset < 32 )
+ hash1 |= ( 1 << offset );
+ else
+ hash2 |= ( 1 << ( offset - 32 ) );
+ }
+ dmi = dmi->next;
+ }
+ for ( ; i < 3; i++ )
+ TLan_SetMac( dev, i + 1, NULL );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+ }
+ }
+
+} /* TLan_SetMulticastList */
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Interrupt Vectors and Table
+
+ Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
+ Programmer's Guide" for more informations on handling interrupts
+ generated by TLAN based adapters.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_HandleInvalid
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles invalid interrupts. This should
+ * never happen unless some other adapter is trying to use
+ * the IRQ line assigned to the device.
+ *
+ **************************************************************/
+
+u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
+{
+ /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */
+ return 0;
+
+} /* TLan_HandleInvalid */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleTxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ int eoc = 0;
+ TLanList *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 0;
+ u16 tmpCStat;
+
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", priv->txHead, priv->txTail );
+ head_list = priv->txList + priv->txHead;
+
+ while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
+ ack++;
+ if ( ! bbuf ) {
+ struct sk_buff *skb = TLan_GetSKB(head_list);
+ pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ head_list->buffer[8].address = 0;
+ head_list->buffer[9].address = 0;
+ }
+
+ if ( tmpCStat & TLAN_CSTAT_EOC )
+ eoc = 1;
+
+ priv->stats.tx_bytes += head_list->frameSize;
+
+ head_list->cStat = TLAN_CSTAT_UNUSED;
+ netif_start_queue(dev);
+ CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
+ head_list = priv->txList + priv->txHead;
+ }
+
+ if (!ack)
+ printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
+
+ if ( eoc ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail );
+ head_list = priv->txList + priv->txHead;
+ head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
+ if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->txInProgress = 0;
+ }
+ }
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
+ if ( priv->timer.function == NULL ) {
+ priv->timer.function = &TLan_Timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timerSetAt = jiffies;
+ priv->timerType = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
+ priv->timerSetAt = jiffies;
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleTxEOF */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
+
+u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+{
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+
+ return 1;
+
+} /* TLan_HandleStatOverflow */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u32 ack = 0;
+ int eoc = 0;
+ u8 *head_buffer;
+ TLanList *head_list;
+ struct sk_buff *skb;
+ TLanList *tail_list;
+ void *t;
+ u32 frameSize;
+ u16 tmpCStat;
+ dma_addr_t head_list_phys;
+
+ TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail );
+ head_list = priv->rxList + priv->rxHead;
+ head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+
+ while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
+ frameSize = head_list->frameSize;
+ ack++;
+ if (tmpCStat & TLAN_CSTAT_EOC)
+ eoc = 1;
+
+ if (bbuf) {
+ skb = dev_alloc_skb(frameSize + 7);
+ if (skb == NULL)
+ printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n");
+ else {
+ head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ t = (void *) skb_put(skb, frameSize);
+
+ priv->stats.rx_bytes += head_list->frameSize;
+
+ memcpy( t, head_buffer, frameSize );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ }
+ } else {
+ struct sk_buff *new_skb;
+
+ /*
+ * I changed the algorithm here. What we now do
+ * is allocate the new frame. If this fails we
+ * simply recycle the frame.
+ */
+
+ new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
+
+ if ( new_skb != NULL ) {
+ skb = TLan_GetSKB(head_list);
+ pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ skb_trim( skb, frameSize );
+
+ priv->stats.rx_bytes += frameSize;
+
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+
+ new_skb->dev = dev;
+ skb_reserve( new_skb, 2 );
+ t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
+ head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ head_list->buffer[8].address = (u32) t;
+ TLan_StoreSKB(head_list, new_skb);
+ } else
+ printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" );
+ }
+
+ head_list->forward = 0;
+ head_list->cStat = 0;
+ tail_list = priv->rxList + priv->rxTail;
+ tail_list->forward = head_list_phys;
+
+ CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
+ CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
+ head_list = priv->rxList + priv->rxHead;
+ head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ }
+
+ if (!ack)
+ printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
+
+
+
+
+ if ( eoc ) {
+ TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail );
+ head_list = priv->rxList + priv->rxHead;
+ head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rxEocCount++;
+ }
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
+ if ( priv->timer.function == NULL ) {
+ priv->timer.function = &TLan_Timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timerSetAt = jiffies;
+ priv->timerType = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
+ priv->timerSetAt = jiffies;
+ }
+ }
+
+ dev->last_rx = jiffies;
+
+ return ack;
+
+} /* TLan_HandleRxEOF */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleDummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
+
+u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+{
+ printk( "TLAN: Test interrupt on %s.\n", dev->name );
+ return 1;
+
+} /* TLan_HandleDummy */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleTxEOC
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
+
+u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ TLanList *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ host_int = 0;
+ if ( priv->tlanRev < 0x30 ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail );
+ head_list = priv->txList + priv->txHead;
+ head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
+ if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ netif_stop_queue(dev);
+ outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->txInProgress = 0;
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleTxEOC */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleStatusCheck
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
+
+u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u32 ack;
+ u32 error;
+ u8 net_sts;
+ u32 phy;
+ u16 tlphy_ctl;
+ u16 tlphy_sts;
+
+ ack = 1;
+ if ( host_int & TLAN_HI_IV_MASK ) {
+ netif_stop_queue( dev );
+ error = inl( dev->base_addr + TLAN_CH_PARM );
+ printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+
+ schedule_work(&priv->tlan_tqueue);
+
+ netif_wake_queue(dev);
+ ack = 0;
+ } else {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
+ phy = priv->phy[priv->phyNum];
+
+ net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
+ if ( net_sts ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", dev->name, (unsigned) net_sts );
+ }
+ if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
+ if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ }
+
+ if (debug) {
+ TLan_PhyPrint( dev );
+ }
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleStatusCheck */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleRxEOC
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
+
+u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ if ( priv->tlanRev < 0x30 ) {
+ TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail );
+ head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rxEocCount++;
+ }
+
+ return ack;
+
+} /* TLan_HandleRxEOC */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Timer Function
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_Timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+void TLan_Timer( unsigned long data )
+{
+ struct net_device *dev = (struct net_device *) data;
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u32 elapsed;
+ unsigned long flags = 0;
+
+ priv->timer.function = NULL;
+
+ switch ( priv->timerType ) {
+#ifdef MONITOR
+ case TLAN_TIMER_LINK_BEAT:
+ TLan_PhyMonitor( dev );
+ break;
+#endif
+ case TLAN_TIMER_PHY_PDOWN:
+ TLan_PhyPowerDown( dev );
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ TLan_PhyPowerUp( dev );
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ TLan_PhyReset( dev );
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ TLan_PhyStartLink( dev );
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ TLan_PhyFinishAutoNeg( dev );
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ TLan_FinishReset( dev );
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if ( priv->timer.function == NULL ) {
+ elapsed = jiffies - priv->timerSetAt;
+ if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ } else {
+ priv->timer.function = &TLan_Timer;
+ priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer( &priv->timer );
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
+ }
+
+} /* TLan_Timer */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Adapter Related Routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_ResetLists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+void TLan_ResetLists( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ int i;
+ TLanList *list;
+ dma_addr_t list_phys;
+ struct sk_buff *skb;
+ void *t = NULL;
+
+ priv->txHead = 0;
+ priv->txTail = 0;
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
+ list = priv->txList + i;
+ list->cStat = TLAN_CSTAT_UNUSED;
+ if ( bbuf ) {
+ list->buffer[0].address = priv->txBufferDMA + ( i * TLAN_MAX_FRAME_SIZE );
+ } else {
+ list->buffer[0].address = 0;
+ }
+ list->buffer[2].count = 0;
+ list->buffer[2].address = 0;
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+
+ priv->rxHead = 0;
+ priv->rxTail = TLAN_NUM_RX_LISTS - 1;
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
+ list = priv->rxList + i;
+ list_phys = priv->rxListDMA + sizeof(TLanList) * i;
+ list->cStat = TLAN_CSTAT_READY;
+ list->frameSize = TLAN_MAX_FRAME_SIZE;
+ list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ if ( bbuf ) {
+ list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE );
+ } else {
+ skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
+ if ( skb == NULL ) {
+ printk( "TLAN: Couldn't allocate memory for received data.\n" );
+ /* If this ever happened it would be a problem */
+ } else {
+ skb->dev = dev;
+ skb_reserve( skb, 2 );
+ t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
+ }
+ list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ list->buffer[8].address = (u32) t;
+ TLan_StoreSKB(list, skb);
+ }
+ list->buffer[1].count = 0;
+ list->buffer[1].address = 0;
+ if ( i < TLAN_NUM_RX_LISTS - 1 )
+ list->forward = list_phys + sizeof(TLanList);
+ else
+ list->forward = 0;
+ }
+
+} /* TLan_ResetLists */
+
+
+void TLan_FreeLists( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ int i;
+ TLanList *list;
+ struct sk_buff *skb;
+
+ if ( ! bbuf ) {
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
+ list = priv->txList + i;
+ skb = TLan_GetSKB(list);
+ if ( skb ) {
+ pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any( skb );
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
+ list = priv->rxList + i;
+ skb = TLan_GetSKB(list);
+ if ( skb ) {
+ pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any( skb );
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+ }
+} /* TLan_FreeLists */
+
+
+
+
+ /***************************************************************
+ * TLan_PrintDio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
+
+void TLan_PrintDio( u16 io_base )
+{
+ u32 data0, data1;
+ int i;
+
+ printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", io_base );
+ printk( "TLAN: Off. +0 +4\n" );
+ for ( i = 0; i < 0x4C; i+= 8 ) {
+ data0 = TLan_DioRead32( io_base, i );
+ data1 = TLan_DioRead32( io_base, i + 0x4 );
+ printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
+ }
+
+} /* TLan_PrintDio */
+
+
+
+
+ /***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the TLanList structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
+
+void TLan_PrintList( TLanList *list, char *type, int num)
+{
+ int i;
+
+ printk( "TLAN: %s List %d at 0x%08x\n", type, num, (u32) list );
+ printk( "TLAN: Forward = 0x%08x\n", list->forward );
+ printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
+ printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
+ /* for ( i = 0; i < 10; i++ ) { */
+ for ( i = 0; i < 2; i++ ) {
+ printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", i, list->buffer[i].count, list->buffer[i].address );
+ }
+
+} /* TLan_PrintList */
+
+
+
+
+ /***************************************************************
+ * TLan_ReadAndClearStats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
+
+void TLan_ReadAndClearStats( struct net_device *dev, int record )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u32 tx_good, tx_under;
+ u32 rx_good, rx_over;
+ u32 def_tx, crc, code;
+ u32 multi_col, single_col;
+ u32 excess_col, late_col, loss;
+
+ outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
+ tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
+ tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
+ rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
+ rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
+ def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
+ def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+ code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
+ multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+ single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
+
+ outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
+ late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
+ loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+
+ if ( record ) {
+ priv->stats.rx_packets += rx_good;
+ priv->stats.rx_errors += rx_over + crc + code;
+ priv->stats.tx_packets += tx_good;
+ priv->stats.tx_errors += tx_under + loss;
+ priv->stats.collisions += multi_col + single_col + excess_col + late_col;
+
+ priv->stats.rx_over_errors += rx_over;
+ priv->stats.rx_crc_errors += crc;
+ priv->stats.rx_frame_errors += code;
+
+ priv->stats.tx_aborted_errors += tx_under;
+ priv->stats.tx_carrier_errors += loss;
+ }
+
+} /* TLan_ReadAndClearStats */
+
+
+
+
+ /***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
+
+void
+TLan_ResetAdapter( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ int i;
+ u32 addr;
+ u32 data;
+ u8 data8;
+
+ priv->tlanFullDuplex = FALSE;
+ priv->phyOnline=0;
+ netif_carrier_off(dev);
+
+/* 1. Assert reset bit. */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_AD_RST;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+ udelay(1000);
+
+/* 2. Turn off interrupts. ( Probably isn't necessary ) */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_INT_OFF;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+/* 3. Clear AREGs and HASHs. */
+
+ for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
+ TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
+ }
+
+/* 4. Setup NetConfig register. */
+
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+
+/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
+
+ outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
+ outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+
+/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
+
+ outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+ addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+ TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+
+/* 7. Setup the remaining registers. */
+
+ if ( priv->tlanRev >= 0x30 ) {
+ data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
+ TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+ }
+ TLan_PhyDetect( dev );
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+ data |= TLAN_NET_CFG_BIT;
+ if ( priv->aui == 1 ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
+ } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
+ priv->tlanFullDuplex = TRUE;
+ } else {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+ }
+ }
+
+ if ( priv->phyNum == 0 ) {
+ data |= TLAN_NET_CFG_PHY_EN;
+ }
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ TLan_FinishReset( dev );
+ } else {
+ TLan_PhyPowerDown( dev );
+ }
+
+} /* TLan_ResetAdapter */
+
+
+
+
+void
+TLan_FinishReset( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u8 data;
+ u32 phy;
+ u8 sio;
+ u16 status;
+ u16 partner;
+ u16 tlphy_ctl;
+ u16 tlphy_par;
+ u16 tlphy_id1, tlphy_id2;
+ int i;
+
+ phy = priv->phy[priv->phyNum];
+
+ data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
+ if ( priv->tlanFullDuplex ) {
+ data |= TLAN_NET_CMD_DUPLEX;
+ }
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+ data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
+ if ( priv->phyNum == 0 ) {
+ data |= TLAN_NET_MASK_MASK7;
+ }
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
+ TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+
+ if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) {
+ status = MII_GS_LINK;
+ printk( "TLAN: %s: Link forced.\n", dev->name );
+ } else {
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ udelay( 1000 );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ if ( (status & MII_GS_LINK) && /* We only support link info on Nat.Sem. PHY's */
+ (tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2) ) {
+ TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
+
+ printk( "TLAN: %s: Link active with ", dev->name );
+ if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
+ printk( "forced 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
+ } else {
+ printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
+ printk("TLAN: Partner capability: ");
+ for (i = 5; i <= 10; i++)
+ if (partner & (1<<i))
+ printk("%s",media[i-5]);
+ printk("\n");
+ }
+
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+#ifdef MONITOR
+ /* We have link beat..for now anyway */
+ priv->link = 1;
+ /*Enabling link beat monitoring */
+ TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+#endif
+ } else if (status & MII_GS_LINK) {
+ printk( "TLAN: %s: Link active\n", dev->name );
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ }
+ }
+
+ if ( priv->phyNum == 0 ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
+ tlphy_ctl |= TLAN_TC_INTEN;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
+ sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
+ sio |= TLAN_NET_SIO_MINTEN;
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
+ }
+
+ if ( status & MII_GS_LINK ) {
+ TLan_SetMac( dev, 0, dev->dev_addr );
+ priv->phyOnline = 1;
+ outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
+ if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
+ outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
+ }
+ outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
+ outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+ netif_carrier_on(dev);
+ } else {
+ printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name );
+ TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+ return;
+ }
+
+} /* TLan_FinishReset */
+
+
+
+
+ /***************************************************************
+ * TLan_SetMac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
+
+void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+{
+ int i;
+
+ areg *= 6;
+
+ if ( mac != NULL ) {
+ for ( i = 0; i < 6; i++ )
+ TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, mac[i] );
+ } else {
+ for ( i = 0; i < 6; i++ )
+ TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, 0 );
+ }
+
+} /* TLan_SetMac */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver PHY Layer Routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+ /*********************************************************************
+ * TLan_PhyPrint
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
+
+void TLan_PhyPrint( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 i, data0, data1, data2, data3, phy;
+
+ phy = priv->phy[priv->phyNum];
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
+ } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
+ printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
+ printk( "TLAN: Off. +0 +1 +2 +3 \n" );
+ for ( i = 0; i < 0x20; i+= 4 ) {
+ printk( "TLAN: 0x%02x", i );
+ TLan_MiiReadReg( dev, phy, i, &data0 );
+ printk( " 0x%04hx", data0 );
+ TLan_MiiReadReg( dev, phy, i + 1, &data1 );
+ printk( " 0x%04hx", data1 );
+ TLan_MiiReadReg( dev, phy, i + 2, &data2 );
+ printk( " 0x%04hx", data2 );
+ TLan_MiiReadReg( dev, phy, i + 3, &data3 );
+ printk( " 0x%04hx\n", data3 );
+ }
+ } else {
+ printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
+ }
+
+} /* TLan_PhyPrint */
+
+
+
+
+ /*********************************************************************
+ * TLan_PhyDetect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
+
+void TLan_PhyDetect( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 control;
+ u16 hi;
+ u16 lo;
+ u32 phy;
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ priv->phyNum = 0xFFFF;
+ return;
+ }
+
+ TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+
+ if ( hi != 0xFFFF ) {
+ priv->phy[0] = TLAN_PHY_MAX_ADDR;
+ } else {
+ priv->phy[0] = TLAN_PHY_NONE;
+ }
+
+ priv->phy[1] = TLAN_PHY_NONE;
+ for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
+ if ( ( control != 0xFFFF ) || ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "PHY found at %02x %04x %04x %04x\n", phy, control, hi, lo );
+ if ( ( priv->phy[1] == TLAN_PHY_NONE ) && ( phy != TLAN_PHY_MAX_ADDR ) ) {
+ priv->phy[1] = phy;
+ }
+ }
+ }
+
+ if ( priv->phy[1] != TLAN_PHY_NONE ) {
+ priv->phyNum = 1;
+ } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
+ priv->phyNum = 0;
+ } else {
+ printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
+ }
+
+} /* TLan_PhyDetect */
+
+
+
+
+void TLan_PhyPowerDown( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+ value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
+ TLan_MiiSync( dev->base_addr );
+ TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
+ if ( ( priv->phyNum == 0 ) && ( priv->phy[1] != TLAN_PHY_NONE ) && ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
+ TLan_MiiSync( dev->base_addr );
+ TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+ }
+
+ /* Wait for 50 ms and powerup
+ * This is abitrary. It is intended to make sure the
+ * transceiver settles.
+ */
+ TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+
+} /* TLan_PhyPowerDown */
+
+
+
+
+void TLan_PhyPowerUp( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
+ TLan_MiiSync( dev->base_addr );
+ value = MII_GC_LOOPBK;
+ TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
+ TLan_MiiSync(dev->base_addr);
+ /* Wait for 500 ms and reset the
+ * transceiver. The TLAN docs say both 50 ms and
+ * 500 ms, so do the longer, just in case.
+ */
+ TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+
+} /* TLan_PhyPowerUp */
+
+
+
+
+void TLan_PhyReset( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 phy;
+ u16 value;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
+ TLan_MiiSync( dev->base_addr );
+ value = MII_GC_LOOPBK | MII_GC_RESET;
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
+ while ( value & MII_GC_RESET ) {
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
+ }
+
+ /* Wait for 500 ms and initialize.
+ * I don't remember why I wait this long.
+ * I've changed this to 50ms, as it seems long enough.
+ */
+ TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+
+} /* TLan_PhyReset */
+
+
+
+
+void TLan_PhyStartLink( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 ability;
+ u16 control;
+ u16 data;
+ u16 phy;
+ u16 status;
+ u16 tctl;
+
+ phy = priv->phy[priv->phyNum];
+ TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+
+ if ( ( status & MII_GS_AUTONEG ) &&
+ ( ! priv->aui ) ) {
+ ability = status >> 11;
+ if ( priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
+ } else if ( priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlanFullDuplex = TRUE;
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
+ } else if ( priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
+ } else if ( priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlanFullDuplex = TRUE;
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+ } else {
+
+ /* Set Auto-Neg advertisement */
+ TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+ /* Enablee Auto-Neg */
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+ /* Restart Auto-Neg */
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+ /* Wait for 4 sec for autonegotiation
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
+ TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ return;
+ }
+
+ }
+
+ if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
+ priv->phyNum = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
+ TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ return;
+ } else if ( priv->phyNum == 0 ) {
+ control = 0;
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
+ if ( priv->aui ) {
+ tctl |= TLAN_TC_AUISEL;
+ } else {
+ tctl &= ~TLAN_TC_AUISEL;
+ if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ control |= MII_GC_DUPLEX;
+ priv->tlanFullDuplex = TRUE;
+ }
+ if ( priv->speed == TLAN_SPEED_100 ) {
+ control |= MII_GC_SPEEDSEL;
+ }
+ }
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+ }
+
+ /* Wait for 2 sec to give the transceiver time
+ * to establish link.
+ */
+ TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+
+} /* TLan_PhyStartLink */
+
+
+
+
+void TLan_PhyFinishAutoNeg( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 an_adv;
+ u16 an_lpa;
+ u16 data;
+ u16 mode;
+ u16 phy;
+ u16 status;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ udelay( 1000 );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+
+ if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+ /* Wait for 8 sec to give the process
+ * more time. Perhaps we should fail after a while.
+ */
+ if (!priv->neg_be_verbose++) {
+ printk(KERN_INFO "TLAN: Giving autonegotiation more time.\n");
+ printk(KERN_INFO "TLAN: Please check that your adapter has\n");
+ printk(KERN_INFO "TLAN: been properly connected to a HUB or Switch.\n");
+ printk(KERN_INFO "TLAN: Trying to establish link in the background...\n");
+ }
+ TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ return;
+ }
+
+ printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
+ TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
+ TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+ mode = an_adv & an_lpa & 0x03E0;
+ if ( mode & 0x0100 ) {
+ priv->tlanFullDuplex = TRUE;
+ } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
+ priv->tlanFullDuplex = TRUE;
+ }
+
+ if ( ( ! ( mode & 0x0180 ) ) && ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && ( priv->phyNum != 0 ) ) {
+ priv->phyNum = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
+ TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ return;
+ }
+
+ if ( priv->phyNum == 0 ) {
+ if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX );
+ printk( "TLAN: Starting internal PHY with FULL-DUPLEX\n" );
+ } else {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
+ printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ }
+ }
+
+ /* Wait for 100 ms. No reason in partiticular.
+ */
+ TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+
+} /* TLan_PhyFinishAutoNeg */
+
+#ifdef MONITOR
+
+ /*********************************************************************
+ *
+ * TLan_phyMonitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * dev The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus. This can be used to give info
+ * about link changes (up/down), and possible switch to alternate
+ * media.
+ *
+ * ******************************************************************/
+
+void TLan_PhyMonitor( struct net_device *dev )
+{
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ u16 phy;
+ u16 phy_status;
+
+ phy = priv->phy[priv->phyNum];
+
+ /* Get PHY status register */
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (priv->link) {
+ priv->link = 0;
+ printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
+ dev->flags &= ~IFF_RUNNING;
+ TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+ return;
+ }
+ }
+
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !priv->link) {
+ priv->link = 1;
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+ dev->flags |= IFF_RUNNING;
+ }
+
+ /* Setup a new monitor */
+ TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+}
+
+#endif /* MONITOR */
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver MII Routines
+
+ These routines are based on the information in Chap. 2 of the
+ "ThunderLAN Programmer's Guide", pp. 15-24.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_MiiReadReg
+ *
+ * Returns:
+ * 0 if ack received ok
+ * 1 otherwise.
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retreived.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retreive the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+{
+ u8 nack;
+ u16 sio, tmp;
+ u32 i;
+ int err;
+ int minten;
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+
+ err = FALSE;
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+
+ TLan_MiiSync(dev->base_addr);
+
+ minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
+ if ( minten )
+ TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */
+ TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
+ TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+
+
+ TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */
+
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */
+
+ nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */
+ if (nack) { /* No ACK, so fake it */
+ for (i = 0; i < 16; i++) {
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ }
+ tmp = 0xffff;
+ err = TRUE;
+ } else { /* ACK, so read data */
+ for (tmp = 0, i = 0x8000; i; i >>= 1) {
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
+ if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+ tmp |= i;
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ }
+ }
+
+
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+
+ if ( minten )
+ TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+
+ *val = tmp;
+
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err;
+
+} /* TLan_MiiReadReg */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiSendData
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
+
+void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+{
+ u16 sio;
+ u32 i;
+
+ if ( num_bits == 0 )
+ return;
+
+ outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+ TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+
+ for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
+ (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ if ( data & i )
+ TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+ else
+ TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ }
+
+} /* TLan_MiiSendData */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
+
+void TLan_MiiSync( u16 base_port )
+{
+ int i;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
+ for ( i = 0; i < 32; i++ ) {
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ }
+
+} /* TLan_MiiSync */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiWriteReg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
+
+void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+{
+ u16 sio;
+ int minten;
+ unsigned long flags = 0;
+ TLanPrivateInfo *priv = netdev_priv(dev);
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+
+ TLan_MiiSync( dev->base_addr );
+
+ minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
+ if ( minten )
+ TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
+ TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+
+ TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */
+ TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */
+
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+
+ if ( minten )
+ TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+} /* TLan_MiiWriteReg */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Eeprom routines
+
+ The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
+ EEPROM. These functions are based on information in Microchip's
+ data sheet. I don't know how well this functions will work with
+ other EEPROMs.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_EeSendStart
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+void TLan_EeSendStart( u16 io_base )
+{
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+
+} /* TLan_EeSendStart */
+
+
+
+
+ /***************************************************************
+ * TLan_EeSendByte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+{
+ int err;
+ u8 place;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ /* Assume clock is low, tx is enabled; */
+ for ( place = 0x80; place != 0; place >>= 1 ) {
+ if ( place & data )
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ else
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ }
+ TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+
+ if ( ( ! err ) && stop ) {
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ }
+
+ return ( err );
+
+} /* TLan_EeSendByte */
+
+
+
+
+ /***************************************************************
+ * TLan_EeReceiveByte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+{
+ u8 place;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+ *data = 0;
+
+ /* Assume clock is low, tx is enabled; */
+ TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
+ for ( place = 0x80; place; place >>= 1 ) {
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+ *data |= place;
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ }
+
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ if ( ! stop ) {
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ } else {
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ }
+
+} /* TLan_EeReceiveByte */
+
+
+
+
+ /***************************************************************
+ * TLan_EeReadByte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+{
+ int err;
+ TLanPrivateInfo *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+ int ret=0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ TLan_EeSendStart( dev->base_addr );
+ err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
+ if (err)
+ {
+ ret=1;
+ goto fail;
+ }
+ err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
+ if (err)
+ {
+ ret=2;
+ goto fail;
+ }
+ TLan_EeSendStart( dev->base_addr );
+ err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
+ if (err)
+ {
+ ret=3;
+ goto fail;
+ }
+ TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+fail:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+
+} /* TLan_EeReadByte */
+
+
+
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
new file mode 100644
index 000000000000..5d32bc62bef8
--- /dev/null
+++ b/drivers/net/tlan.h
@@ -0,0 +1,540 @@
+#ifndef TLAN_H
+#define TLAN_H
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.h
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1999-2001 Torben Mathiasen
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ ** This file is best viewed/edited with tabstop=4, colums>=132
+ *
+ *
+ * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com>
+ * New Maintainer
+ *
+ ********************************************************************/
+
+
+#include <asm/io.h>
+#include <asm/types.h>
+#include <linux/netdevice.h>
+
+
+
+ /*****************************************************************
+ * TLan Definitions
+ *
+ ****************************************************************/
+
+#define FALSE 0
+#define TRUE 1
+
+#define TLAN_MIN_FRAME_SIZE 64
+#define TLAN_MAX_FRAME_SIZE 1600
+
+#define TLAN_NUM_RX_LISTS 32
+#define TLAN_NUM_TX_LISTS 64
+
+#define TLAN_IGNORE 0
+#define TLAN_RECORD 1
+
+#define TLAN_DBG(lvl, format, args...) if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args );
+#define TLAN_DEBUG_GNRL 0x0001
+#define TLAN_DEBUG_TX 0x0002
+#define TLAN_DEBUG_RX 0x0004
+#define TLAN_DEBUG_LIST 0x0008
+#define TLAN_DEBUG_PROBE 0x0010
+
+#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */
+
+
+ /*****************************************************************
+ * Device Identification Definitions
+ *
+ ****************************************************************/
+
+#define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030
+#ifndef PCI_DEVICE_ID_OLICOM_OC2183
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2325
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2326
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#endif
+
+typedef struct tlan_adapter_entry {
+ u16 vendorId;
+ u16 deviceId;
+ char *deviceLabel;
+ u32 flags;
+ u16 addrOfs;
+} TLanAdapterEntry;
+
+#define TLAN_ADAPTER_NONE 0x00000000
+#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
+#define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002
+#define TLAN_ADAPTER_USE_INTERN_10 0x00000004
+#define TLAN_ADAPTER_ACTIVITY_LED 0x00000008
+
+#define TLAN_SPEED_DEFAULT 0
+#define TLAN_SPEED_10 10
+#define TLAN_SPEED_100 100
+
+#define TLAN_DUPLEX_DEFAULT 0
+#define TLAN_DUPLEX_HALF 1
+#define TLAN_DUPLEX_FULL 2
+
+
+
+ /*****************************************************************
+ * EISA Definitions
+ *
+ ****************************************************************/
+
+#define EISA_ID 0xc80 /* EISA ID Registers */
+#define EISA_ID0 0xc80 /* EISA ID Register 0 */
+#define EISA_ID1 0xc81 /* EISA ID Register 1 */
+#define EISA_ID2 0xc82 /* EISA ID Register 2 */
+#define EISA_ID3 0xc83 /* EISA ID Register 3 */
+#define EISA_CR 0xc84 /* EISA Control Register */
+#define EISA_REG0 0xc88 /* EISA Configuration Register 0 */
+#define EISA_REG1 0xc89 /* EISA Configuration Register 1 */
+#define EISA_REG2 0xc8a /* EISA Configuration Register 2 */
+#define EISA_REG3 0xc8f /* EISA Configuration Register 3 */
+#define EISA_APROM 0xc90 /* Ethernet Address PROM */
+
+
+
+ /*****************************************************************
+ * Rx/Tx List Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_BUFFERS_PER_LIST 10
+#define TLAN_LAST_BUFFER 0x80000000
+#define TLAN_CSTAT_UNUSED 0x8000
+#define TLAN_CSTAT_FRM_CMP 0x4000
+#define TLAN_CSTAT_READY 0x3000
+#define TLAN_CSTAT_EOC 0x0800
+#define TLAN_CSTAT_RX_ERROR 0x0400
+#define TLAN_CSTAT_PASS_CRC 0x0200
+#define TLAN_CSTAT_DP_PR 0x0100
+
+
+typedef struct tlan_buffer_ref_tag {
+ u32 count;
+ u32 address;
+} TLanBufferRef;
+
+
+typedef struct tlan_list_tag {
+ u32 forward;
+ u16 cStat;
+ u16 frameSize;
+ TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST];
+} TLanList;
+
+
+typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
+
+
+
+
+ /*****************************************************************
+ * PHY definitions
+ *
+ ****************************************************************/
+
+#define TLAN_PHY_MAX_ADDR 0x1F
+#define TLAN_PHY_NONE 0x20
+
+
+
+
+ /*****************************************************************
+ * TLAN Private Information Structure
+ *
+ ****************************************************************/
+
+typedef struct tlan_private_tag {
+ struct net_device *nextDevice;
+ struct pci_dev *pciDev;
+ void *dmaStorage;
+ dma_addr_t dmaStorageDMA;
+ unsigned int dmaSize;
+ u8 *padBuffer;
+ TLanList *rxList;
+ dma_addr_t rxListDMA;
+ u8 *rxBuffer;
+ dma_addr_t rxBufferDMA;
+ u32 rxHead;
+ u32 rxTail;
+ u32 rxEocCount;
+ TLanList *txList;
+ dma_addr_t txListDMA;
+ u8 *txBuffer;
+ dma_addr_t txBufferDMA;
+ u32 txHead;
+ u32 txInProgress;
+ u32 txTail;
+ u32 txBusyCount;
+ u32 phyOnline;
+ u32 timerSetAt;
+ u32 timerType;
+ struct timer_list timer;
+ struct net_device_stats stats;
+ struct board *adapter;
+ u32 adapterRev;
+ u32 aui;
+ u32 debug;
+ u32 duplex;
+ u32 phy[2];
+ u32 phyNum;
+ u32 speed;
+ u8 tlanRev;
+ u8 tlanFullDuplex;
+ char devName[8];
+ spinlock_t lock;
+ u8 link;
+ u8 is_eisa;
+ struct work_struct tlan_tqueue;
+ u8 neg_be_verbose;
+} TLanPrivateInfo;
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Timer Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_TIMER_LINK_BEAT 1
+#define TLAN_TIMER_ACTIVITY 2
+#define TLAN_TIMER_PHY_PDOWN 3
+#define TLAN_TIMER_PHY_PUP 4
+#define TLAN_TIMER_PHY_RESET 5
+#define TLAN_TIMER_PHY_START_LINK 6
+#define TLAN_TIMER_PHY_FINISH_AN 7
+#define TLAN_TIMER_FINISH_RESET 8
+
+#define TLAN_TIMER_ACT_DELAY (HZ/10)
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Eeprom Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_EEPROM_ACK 0
+#define TLAN_EEPROM_STOP 1
+
+
+
+
+ /*****************************************************************
+ * Host Register Offsets and Contents
+ *
+ ****************************************************************/
+
+#define TLAN_HOST_CMD 0x00
+#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_STOP 0x40000000
+#define TLAN_HC_ACK 0x20000000
+#define TLAN_HC_CS_MASK 0x1FE00000
+#define TLAN_HC_EOC 0x00100000
+#define TLAN_HC_RT 0x00080000
+#define TLAN_HC_NES 0x00040000
+#define TLAN_HC_AD_RST 0x00008000
+#define TLAN_HC_LD_TMR 0x00004000
+#define TLAN_HC_LD_THR 0x00002000
+#define TLAN_HC_REQ_INT 0x00001000
+#define TLAN_HC_INT_OFF 0x00000800
+#define TLAN_HC_INT_ON 0x00000400
+#define TLAN_HC_AC_MASK 0x000000FF
+#define TLAN_CH_PARM 0x04
+#define TLAN_DIO_ADR 0x08
+#define TLAN_DA_ADR_INC 0x8000
+#define TLAN_DA_RAM_ADR 0x4000
+#define TLAN_HOST_INT 0x0A
+#define TLAN_HI_IV_MASK 0x1FE0
+#define TLAN_HI_IT_MASK 0x001C
+#define TLAN_DIO_DATA 0x0C
+
+
+/* ThunderLAN Internal Register DIO Offsets */
+
+#define TLAN_NET_CMD 0x00
+#define TLAN_NET_CMD_NRESET 0x80
+#define TLAN_NET_CMD_NWRAP 0x40
+#define TLAN_NET_CMD_CSF 0x20
+#define TLAN_NET_CMD_CAF 0x10
+#define TLAN_NET_CMD_NOBRX 0x08
+#define TLAN_NET_CMD_DUPLEX 0x04
+#define TLAN_NET_CMD_TRFRAM 0x02
+#define TLAN_NET_CMD_TXPACE 0x01
+#define TLAN_NET_SIO 0x01
+#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_ECLOK 0x40
+#define TLAN_NET_SIO_ETXEN 0x20
+#define TLAN_NET_SIO_EDATA 0x10
+#define TLAN_NET_SIO_NMRST 0x08
+#define TLAN_NET_SIO_MCLK 0x04
+#define TLAN_NET_SIO_MTXEN 0x02
+#define TLAN_NET_SIO_MDATA 0x01
+#define TLAN_NET_STS 0x02
+#define TLAN_NET_STS_MIRQ 0x80
+#define TLAN_NET_STS_HBEAT 0x40
+#define TLAN_NET_STS_TXSTOP 0x20
+#define TLAN_NET_STS_RXSTOP 0x10
+#define TLAN_NET_STS_RSRVD 0x0F
+#define TLAN_NET_MASK 0x03
+#define TLAN_NET_MASK_MASK7 0x80
+#define TLAN_NET_MASK_MASK6 0x40
+#define TLAN_NET_MASK_MASK5 0x20
+#define TLAN_NET_MASK_MASK4 0x10
+#define TLAN_NET_MASK_RSRVD 0x0F
+#define TLAN_NET_CONFIG 0x04
+#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_TCLK 0x4000
+#define TLAN_NET_CFG_BIT 0x2000
+#define TLAN_NET_CFG_RXCRC 0x1000
+#define TLAN_NET_CFG_PEF 0x0800
+#define TLAN_NET_CFG_1FRAG 0x0400
+#define TLAN_NET_CFG_1CHAN 0x0200
+#define TLAN_NET_CFG_MTEST 0x0100
+#define TLAN_NET_CFG_PHY_EN 0x0080
+#define TLAN_NET_CFG_MSMASK 0x007F
+#define TLAN_MAN_TEST 0x06
+#define TLAN_DEF_VENDOR_ID 0x08
+#define TLAN_DEF_DEVICE_ID 0x0A
+#define TLAN_DEF_REVISION 0x0C
+#define TLAN_DEF_SUBCLASS 0x0D
+#define TLAN_DEF_MIN_LAT 0x0E
+#define TLAN_DEF_MAX_LAT 0x0F
+#define TLAN_AREG_0 0x10
+#define TLAN_AREG_1 0x16
+#define TLAN_AREG_2 0x1C
+#define TLAN_AREG_3 0x22
+#define TLAN_HASH_1 0x28
+#define TLAN_HASH_2 0x2C
+#define TLAN_GOOD_TX_FRMS 0x30
+#define TLAN_TX_UNDERUNS 0x33
+#define TLAN_GOOD_RX_FRMS 0x34
+#define TLAN_RX_OVERRUNS 0x37
+#define TLAN_DEFERRED_TX 0x38
+#define TLAN_CRC_ERRORS 0x3A
+#define TLAN_CODE_ERRORS 0x3B
+#define TLAN_MULTICOL_FRMS 0x3C
+#define TLAN_SINGLECOL_FRMS 0x3E
+#define TLAN_EXCESSCOL_FRMS 0x40
+#define TLAN_LATE_COLS 0x41
+#define TLAN_CARRIER_LOSS 0x42
+#define TLAN_ACOMMIT 0x43
+#define TLAN_LED_REG 0x44
+#define TLAN_LED_ACT 0x10
+#define TLAN_LED_LINK 0x01
+#define TLAN_BSIZE_REG 0x45
+#define TLAN_MAX_RX 0x46
+#define TLAN_INT_DIS 0x48
+#define TLAN_ID_TX_EOC 0x04
+#define TLAN_ID_RX_EOF 0x02
+#define TLAN_ID_RX_EOC 0x01
+
+
+
+/* ThunderLAN Interrupt Codes */
+
+#define TLAN_INT_NUMBER_OF_INTS 8
+
+#define TLAN_INT_NONE 0x0000
+#define TLAN_INT_TX_EOF 0x0001
+#define TLAN_INT_STAT_OVERFLOW 0x0002
+#define TLAN_INT_RX_EOF 0x0003
+#define TLAN_INT_DUMMY 0x0004
+#define TLAN_INT_TX_EOC 0x0005
+#define TLAN_INT_STATUS_CHECK 0x0006
+#define TLAN_INT_RX_EOC 0x0007
+
+
+
+/* ThunderLAN MII Registers */
+
+/* Generic MII/PHY Registers */
+
+#define MII_GEN_CTL 0x00
+#define MII_GC_RESET 0x8000
+#define MII_GC_LOOPBK 0x4000
+#define MII_GC_SPEEDSEL 0x2000
+#define MII_GC_AUTOENB 0x1000
+#define MII_GC_PDOWN 0x0800
+#define MII_GC_ISOLATE 0x0400
+#define MII_GC_AUTORSRT 0x0200
+#define MII_GC_DUPLEX 0x0100
+#define MII_GC_COLTEST 0x0080
+#define MII_GC_RESERVED 0x007F
+#define MII_GEN_STS 0x01
+#define MII_GS_100BT4 0x8000
+#define MII_GS_100BTXFD 0x4000
+#define MII_GS_100BTXHD 0x2000
+#define MII_GS_10BTFD 0x1000
+#define MII_GS_10BTHD 0x0800
+#define MII_GS_RESERVED 0x07C0
+#define MII_GS_AUTOCMPLT 0x0020
+#define MII_GS_RFLT 0x0010
+#define MII_GS_AUTONEG 0x0008
+#define MII_GS_LINK 0x0004
+#define MII_GS_JABBER 0x0002
+#define MII_GS_EXTCAP 0x0001
+#define MII_GEN_ID_HI 0x02
+#define MII_GEN_ID_LO 0x03
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
+#define MII_AN_ADV 0x04
+#define MII_AN_LPA 0x05
+#define MII_AN_EXP 0x06
+
+/* ThunderLAN Specific MII/PHY Registers */
+
+#define TLAN_TLPHY_ID 0x10
+#define TLAN_TLPHY_CTL 0x11
+#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_SWAPOL 0x4000
+#define TLAN_TC_AUISEL 0x2000
+#define TLAN_TC_SQEEN 0x1000
+#define TLAN_TC_MTEST 0x0800
+#define TLAN_TC_RESERVED 0x07F8
+#define TLAN_TC_NFEW 0x0004
+#define TLAN_TC_INTEN 0x0002
+#define TLAN_TC_TINT 0x0001
+#define TLAN_TLPHY_STS 0x12
+#define TLAN_TS_MINT 0x8000
+#define TLAN_TS_PHOK 0x4000
+#define TLAN_TS_POLOK 0x2000
+#define TLAN_TS_TPENERGY 0x1000
+#define TLAN_TS_RESERVED 0x0FFF
+#define TLAN_TLPHY_PAR 0x19
+#define TLAN_PHY_CIM_STAT 0x0020
+#define TLAN_PHY_SPEED_100 0x0040
+#define TLAN_PHY_DUPLEX_FULL 0x0080
+#define TLAN_PHY_AN_EN_STAT 0x0400
+
+/* National Sem. & Level1 PHY id's */
+#define NAT_SEM_ID1 0x2000
+#define NAT_SEM_ID2 0x5C01
+#define LEVEL1_ID1 0x7810
+#define LEVEL1_ID2 0x0000
+
+#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+
+/* Routines to access internal registers. */
+
+static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)));
+
+} /* TLan_DioRead8 */
+
+
+
+
+static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)));
+
+} /* TLan_DioRead16 */
+
+
+
+
+static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inl(base_addr + TLAN_DIO_DATA));
+
+} /* TLan_DioRead32 */
+
+
+
+
+static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port)
+#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit))
+#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port)
+
+/*
+ * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
+ * the code below is about seven times as fast as the original code
+ *
+ * The original code was:
+ *
+ * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); }
+ *
+ * #define XOR8( a, b, c, d, e, f, g, h ) \
+ * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
+ * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ *
+ * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), DA(a,36), DA(a,42) );
+ * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), DA(a,37), DA(a,43) ) << 1;
+ * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), DA(a,38), DA(a,44) ) << 2;
+ * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), DA(a,39), DA(a,45) ) << 3;
+ * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), DA(a,40), DA(a,46) ) << 4;
+ * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ *
+ */
+static inline u32 TLan_HashFunc( const u8 *a )
+{
+ u8 hash;
+
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
+
+ return (hash & 077);
+}
+#endif
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
new file mode 100644
index 000000000000..0d1dcf421771
--- /dev/null
+++ b/drivers/net/tokenring/3c359.c
@@ -0,0 +1,1830 @@
+/*
+ * 3c359.c (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
+ *
+ * Linux driver for 3Com 3c359 Tokenlink Velocity XL PCI NIC
+ *
+ * Base Driver Olympic:
+ * Written 1999 Peter De Schrijver & Mike Phillips
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * 7/17/00 - Clean up, version number 0.9.0. Ready to release to the world.
+ *
+ * 2/16/01 - Port up to kernel 2.4.2 ready for submission into the kernel.
+ * 3/05/01 - Last clean up stuff before submission.
+ * 2/15/01 - Finally, update to new pci api.
+ *
+ * To Do:
+ */
+
+/*
+ * Technical Card Details
+ *
+ * All access to data is done with 16/8 bit transfers. The transfer
+ * method really sucks. You can only read or write one location at a time.
+ *
+ * Also, the microcode for the card must be uploaded if the card does not have
+ * the flashrom on board. This is a 28K bloat in the driver when compiled
+ * as a module.
+ *
+ * Rx is very simple, status into a ring of descriptors, dma data transfer,
+ * interrupts to tell us when a packet is received.
+ *
+ * Tx is a little more interesting. Similar scenario, descriptor and dma data
+ * transfers, but we don't have to interrupt the card to tell it another packet
+ * is ready for transmission, we are just doing simple memory writes, not io or mmio
+ * writes. The card can be set up to simply poll on the next
+ * descriptor pointer and when this value is non-zero will automatically download
+ * the next packet. The card then interrupts us when the packet is done.
+ *
+ */
+
+#define XL_DEBUG 0
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <net/checksum.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "3c359.h"
+
+static char version[] __devinitdata =
+"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ;
+
+MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
+MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
+
+/* Module paramters */
+
+/* Ring Speed 0,4,16
+ * 0 = Autosense
+ * 4,16 = Selected speed only, no autosense
+ * This allows the card to be the first on the ring
+ * and become the active monitor.
+ *
+ * WARNING: Some hubs will allow you to insert
+ * at the wrong speed.
+ *
+ * The adapter will _not_ fail to open if there are no
+ * active monitors on the ring, it will simply open up in
+ * its last known ringspeed if no ringspeed is specified.
+ */
+
+static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
+
+module_param_array(ringspeed, int, NULL, 0);
+MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
+
+/* Packet buffer size */
+
+static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
+
+module_param_array(pkt_buf_sz, int, NULL, 0) ;
+MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
+/* Message Level */
+
+static int message_level[XL_MAX_ADAPTERS] = {0,} ;
+
+module_param_array(message_level, int, NULL, 0) ;
+MODULE_PARM_DESC(message_level, "3c359: Level of reported messages \n") ;
+/*
+ * This is a real nasty way of doing this, but otherwise you
+ * will be stuck with 1555 lines of hex #'s in the code.
+ */
+
+#include "3c359_microcode.h"
+
+static struct pci_device_id xl_pci_tbl[] =
+{
+ {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci,xl_pci_tbl) ;
+
+static int xl_init(struct net_device *dev);
+static int xl_open(struct net_device *dev);
+static int xl_open_hw(struct net_device *dev) ;
+static int xl_hw_reset(struct net_device *dev);
+static int xl_xmit(struct sk_buff *skb, struct net_device *dev);
+static void xl_dn_comp(struct net_device *dev);
+static int xl_close(struct net_device *dev);
+static void xl_set_rx_mode(struct net_device *dev);
+static irqreturn_t xl_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct net_device_stats * xl_get_stats(struct net_device *dev);
+static int xl_set_mac_address(struct net_device *dev, void *addr) ;
+static void xl_arb_cmd(struct net_device *dev);
+static void xl_asb_cmd(struct net_device *dev) ;
+static void xl_srb_cmd(struct net_device *dev, int srb_cmd) ;
+static void xl_wait_misr_flags(struct net_device *dev) ;
+static int xl_change_mtu(struct net_device *dev, int mtu);
+static void xl_srb_bh(struct net_device *dev) ;
+static void xl_asb_bh(struct net_device *dev) ;
+static void xl_reset(struct net_device *dev) ;
+static void xl_freemem(struct net_device *dev) ;
+
+
+/* EEProm Access Functions */
+static u16 xl_ee_read(struct net_device *dev, int ee_addr) ;
+static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) ;
+
+/* Debugging functions */
+#if XL_DEBUG
+static void print_tx_state(struct net_device *dev) ;
+static void print_rx_state(struct net_device *dev) ;
+
+static void print_tx_state(struct net_device *dev)
+{
+
+ struct xl_private *xl_priv = (struct xl_private *)dev->priv ;
+ struct xl_tx_desc *txd ;
+ u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
+ int i ;
+
+ printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head,
+ xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
+ printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n");
+ for (i = 0; i < 16; i++) {
+ txd = &(xl_priv->xl_tx_ring[i]) ;
+ printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd),
+ txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
+ }
+
+ printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) );
+
+ printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
+ printk("Queue status = %0x \n",netif_running(dev) ) ;
+}
+
+static void print_rx_state(struct net_device *dev)
+{
+
+ struct xl_private *xl_priv = (struct xl_private *)dev->priv ;
+ struct xl_rx_desc *rxd ;
+ u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
+ int i ;
+
+ printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ;
+ printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n");
+ for (i = 0; i < 16; i++) {
+ /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
+ rxd = &(xl_priv->xl_rx_ring[i]) ;
+ printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd),
+ rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
+ }
+
+ printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) );
+
+ printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
+ printk("Queue status = %0x \n",netif_running(dev) ) ;
+}
+#endif
+
+/*
+ * Read values from the on-board EEProm. This looks very strange
+ * but you have to wait for the EEProm to get/set the value before
+ * passing/getting the next value from the nic. As with all requests
+ * on this nic it has to be done in two stages, a) tell the nic which
+ * memory address you want to access and b) pass/get the value from the nic.
+ * With the EEProm, you have to wait before and inbetween access a) and b).
+ * As this is only read at initialization time and the wait period is very
+ * small we shouldn't have to worry about scheduling issues.
+ */
+
+static u16 xl_ee_read(struct net_device *dev, int ee_addr)
+{
+ struct xl_private *xl_priv = (struct xl_private *)dev->priv ;
+ u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
+
+ /* Wait for EEProm to not be busy */
+ writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
+
+ /* Tell EEProm what we want to do and where */
+ writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
+
+ /* Wait for EEProm to not be busy */
+ writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
+
+ /* Tell EEProm what we want to do and where */
+ writel(IO_WORD_WRITE | EECONTROL , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
+
+ /* Finally read the value from the EEProm */
+ writel(IO_WORD_READ | EEDATA , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ return readw(xl_mmio + MMIO_MACDATA) ;
+}
+
+/*
+ * Write values to the onboard eeprom. As with eeprom read you need to
+ * set which location to write, wait, value to write, wait, with the
+ * added twist of having to enable eeprom writes as well.
+ */
+
+static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value)
+{
+ struct xl_private *xl_priv = (struct xl_private *)dev->priv ;
+ u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
+
+ /* Wait for EEProm to not be busy */
+ writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
+
+ /* Enable write/erase */
+ writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(EE_ENABLE_WRITE, xl_mmio + MMIO_MACDATA) ;
+
+ /* Wait for EEProm to not be busy */
+ writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
+
+ /* Put the value we want to write into EEDATA */
+ writel(IO_WORD_WRITE | EEDATA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(ee_value, xl_mmio + MMIO_MACDATA) ;
+
+ /* Tell EEProm to write eevalue into ee_addr */
+ writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(EEWRITE + ee_addr, xl_mmio + MMIO_MACDATA) ;
+
+ /* Wait for EEProm to not be busy, to ensure write gets done */
+ writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
+
+ return ;
+}
+
+int __devinit xl_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev ;
+ struct xl_private *xl_priv ;
+ static int card_no = -1 ;
+ int i ;
+
+ card_no++ ;
+
+ if (pci_enable_device(pdev)) {
+ return -ENODEV ;
+ }
+
+ pci_set_master(pdev);
+
+ if ((i = pci_request_regions(pdev,"3c359"))) {
+ return i ;
+ } ;
+
+ /*
+ * Allowing init_trdev to allocate the dev->priv structure will align xl_private
+ * on a 32 bytes boundary which we need for the rx/tx descriptors
+ */
+
+ dev = alloc_trdev(sizeof(struct xl_private)) ;
+ if (!dev) {
+ pci_release_regions(pdev) ;
+ return -ENOMEM ;
+ }
+ xl_priv = dev->priv ;
+
+#if XL_DEBUG
+ printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n",
+ pdev, dev, dev->priv, (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start) ;
+#endif
+
+ dev->irq=pdev->irq;
+ dev->base_addr=pci_resource_start(pdev,0) ;
+ xl_priv->xl_card_name = pci_name(pdev);
+ xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE);
+ xl_priv->pdev = pdev ;
+
+ if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
+ xl_priv->pkt_buf_sz = PKT_BUF_SZ ;
+ else
+ xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
+
+ dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ;
+ xl_priv->xl_ring_speed = ringspeed[card_no] ;
+ xl_priv->xl_message_level = message_level[card_no] ;
+ xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ;
+ xl_priv->xl_copy_all_options = 0 ;
+
+ if((i = xl_init(dev))) {
+ iounmap(xl_priv->xl_mmio) ;
+ free_netdev(dev) ;
+ pci_release_regions(pdev) ;
+ return i ;
+ }
+
+ dev->open=&xl_open;
+ dev->hard_start_xmit=&xl_xmit;
+ dev->change_mtu=&xl_change_mtu;
+ dev->stop=&xl_close;
+ dev->do_ioctl=NULL;
+ dev->set_multicast_list=&xl_set_rx_mode;
+ dev->get_stats=&xl_get_stats ;
+ dev->set_mac_address=&xl_set_mac_address ;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pci_set_drvdata(pdev,dev) ;
+ if ((i = register_netdev(dev))) {
+ printk(KERN_ERR "3C359, register netdev failed\n") ;
+ pci_set_drvdata(pdev,NULL) ;
+ iounmap(xl_priv->xl_mmio) ;
+ free_netdev(dev) ;
+ pci_release_regions(pdev) ;
+ return i ;
+ }
+
+ printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ;
+
+ return 0;
+}
+
+
+static int __init xl_init(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *)dev->priv ;
+
+ printk(KERN_INFO "%s \n", version);
+ printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
+ xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
+
+ spin_lock_init(&xl_priv->xl_lock) ;
+
+ return xl_hw_reset(dev) ;
+
+}
+
+
+/*
+ * Hardware reset. This needs to be a separate entity as we need to reset the card
+ * when we change the EEProm settings.
+ */
+
+static int xl_hw_reset(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *)dev->priv ;
+ u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
+ unsigned long t ;
+ u16 i ;
+ u16 result_16 ;
+ u8 result_8 ;
+ u16 start ;
+ int j ;
+
+ /*
+ * Reset the card. If the card has got the microcode on board, we have
+ * missed the initialization interrupt, so we must always do this.
+ */
+
+ writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
+
+ /*
+ * Must wait for cmdInProgress bit (12) to clear before continuing with
+ * card configuration.
+ */
+
+ t=jiffies;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
+ schedule();
+ if(jiffies-t > 40*HZ) {
+ printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Enable pmbar by setting bit in CPAttention
+ */
+
+ writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ result_8 = readb(xl_mmio + MMIO_MACDATA) ;
+ result_8 = result_8 | CPA_PMBARVIS ;
+ writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(result_8, xl_mmio + MMIO_MACDATA) ;
+
+ /*
+ * Read cpHold bit in pmbar, if cleared we have got Flashrom on board.
+ * If not, we need to upload the microcode to the card
+ */
+
+ writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
+
+#if XL_DEBUG
+ printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ;
+#endif
+
+ if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
+
+ /* Set PmBar, privateMemoryBase bits (8:2) to 0 */
+
+ writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
+ result_16 = readw(xl_mmio + MMIO_MACDATA) ;
+ result_16 = result_16 & ~((0x7F) << 2) ;
+ writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(result_16,xl_mmio + MMIO_MACDATA) ;
+
+ /* Set CPAttention, memWrEn bit */
+
+ writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ result_8 = readb(xl_mmio + MMIO_MACDATA) ;
+ result_8 = result_8 | CPA_MEMWREN ;
+ writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(result_8, xl_mmio + MMIO_MACDATA) ;
+
+ /*
+ * Now to write the microcode into the shared ram
+ * The microcode must finish at position 0xFFFF, so we must subtract
+ * to get the start position for the code
+ */
+
+ start = (0xFFFF - (mc_size) + 1 ) ; /* Looks strange but ensures compiler only uses 16 bit unsigned int for this */
+
+ printk(KERN_INFO "3C359: Uploading Microcode: ");
+
+ for (i = start, j = 0; j < mc_size; i++, j++) {
+ writel(MEM_BYTE_WRITE | 0XD0000 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(microcode[j],xl_mmio + MMIO_MACDATA) ;
+ if (j % 1024 == 0)
+ printk(".");
+ }
+ printk("\n") ;
+
+ for (i=0;i < 16; i++) {
+ writel( (MEM_BYTE_WRITE | 0xDFFF0) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(microcode[mc_size - 16 + i], xl_mmio + MMIO_MACDATA) ;
+ }
+
+ /*
+ * Have to write the start address of the upload to FFF4, but
+ * the address must be >> 4. You do not want to know how long
+ * it took me to discover this.
+ */
+
+ writel(MEM_WORD_WRITE | 0xDFFF4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(start >> 4, xl_mmio + MMIO_MACDATA);
+
+ /* Clear the CPAttention, memWrEn Bit */
+
+ writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ result_8 = readb(xl_mmio + MMIO_MACDATA) ;
+ result_8 = result_8 & ~CPA_MEMWREN ;
+ writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(result_8, xl_mmio + MMIO_MACDATA) ;
+
+ /* Clear the cpHold bit in pmbar */
+
+ writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
+ result_16 = readw(xl_mmio + MMIO_MACDATA) ;
+ result_16 = result_16 & ~PMB_CPHOLD ;
+ writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(result_16,xl_mmio + MMIO_MACDATA) ;
+
+
+ } /* If microcode upload required */
+
+ /*
+ * The card should now go though a self test procedure and get itself ready
+ * to be opened, we must wait for an srb response with the initialization
+ * information.
+ */
+
+#if XL_DEBUG
+ printk(KERN_INFO "%s: Microcode uploaded, must wait for the self test to complete\n", dev->name);
+#endif
+
+ writew(SETINDENABLE | 0xFFF, xl_mmio + MMIO_COMMAND) ;
+
+ t=jiffies;
+ while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) {
+ schedule();
+ if(jiffies-t > 15*HZ) {
+ printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Write the RxBufArea with D000, RxEarlyThresh, TxStartThresh,
+ * DnPriReqThresh, read the tech docs if you want to know what
+ * values they need to be.
+ */
+
+ writel(MMIO_WORD_WRITE | RXBUFAREA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(0xD000, xl_mmio + MMIO_MACDATA) ;
+
+ writel(MMIO_WORD_WRITE | RXEARLYTHRESH, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(0X0020, xl_mmio + MMIO_MACDATA) ;
+
+ writew( SETTXSTARTTHRESH | 0x40 , xl_mmio + MMIO_COMMAND) ;
+
+ writeb(0x04, xl_mmio + MMIO_DNBURSTTHRESH) ;
+ writeb(0x04, xl_mmio + DNPRIREQTHRESH) ;
+
+ /*
+ * Read WRBR to provide the location of the srb block, have to use byte reads not word reads.
+ * Tech docs have this wrong !!!!
+ */
+
+ writel(MMIO_BYTE_READ | WRBR, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ;
+ writel( (MMIO_BYTE_READ | WRBR) + 1, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ;
+
+#if XL_DEBUG
+ writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
+ printk(KERN_INFO "Default ring speed 4 mbps \n") ;
+ } else {
+ printk(KERN_INFO "Default ring speed 16 mbps \n") ;
+ }
+ printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
+#endif
+
+ return 0;
+}
+
+static int xl_open(struct net_device *dev)
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+ u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
+ u8 i ;
+ u16 hwaddr[3] ; /* Should be u8[6] but we get word return values */
+ int open_err ;
+
+ u16 switchsettings, switchsettings_eeprom ;
+
+ if(request_irq(dev->irq, &xl_interrupt, SA_SHIRQ , "3c359", dev)) {
+ return -EAGAIN;
+ }
+
+ /*
+ * Read the information from the EEPROM that we need. I know we
+ * should use ntohs, but the word gets stored reversed in the 16
+ * bit field anyway and it all works its self out when we memcpy
+ * it into dev->dev_addr.
+ */
+
+ hwaddr[0] = xl_ee_read(dev,0x10) ;
+ hwaddr[1] = xl_ee_read(dev,0x11) ;
+ hwaddr[2] = xl_ee_read(dev,0x12) ;
+
+ /* Ring speed */
+
+ switchsettings_eeprom = xl_ee_read(dev,0x08) ;
+ switchsettings = switchsettings_eeprom ;
+
+ if (xl_priv->xl_ring_speed != 0) {
+ if (xl_priv->xl_ring_speed == 4)
+ switchsettings = switchsettings | 0x02 ;
+ else
+ switchsettings = switchsettings & ~0x02 ;
+ }
+
+ /* Only write EEProm if there has been a change */
+ if (switchsettings != switchsettings_eeprom) {
+ xl_ee_write(dev,0x08,switchsettings) ;
+ /* Hardware reset after changing EEProm */
+ xl_hw_reset(dev) ;
+ }
+
+ memcpy(dev->dev_addr,hwaddr,dev->addr_len) ;
+
+ open_err = xl_open_hw(dev) ;
+
+ /*
+ * This really needs to be cleaned up with better error reporting.
+ */
+
+ if (open_err != 0) { /* Something went wrong with the open command */
+ if (open_err & 0x07) { /* Wrong speed, retry at different speed */
+ printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ;
+ switchsettings = switchsettings ^ 2 ;
+ xl_ee_write(dev,0x08,switchsettings) ;
+ xl_hw_reset(dev) ;
+ open_err = xl_open_hw(dev) ;
+ if (open_err != 0) {
+ printk(KERN_WARNING "%s: Open error returned a second time, we're bombing out now\n", dev->name);
+ free_irq(dev->irq,dev) ;
+ return -ENODEV ;
+ }
+ } else {
+ printk(KERN_WARNING "%s: Open Error = %04x\n", dev->name, open_err) ;
+ free_irq(dev->irq,dev) ;
+ return -ENODEV ;
+ }
+ }
+
+ /*
+ * Now to set up the Rx and Tx buffer structures
+ */
+ /* These MUST be on 8 byte boundaries */
+ xl_priv->xl_tx_ring = kmalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL) ;
+ if (xl_priv->xl_tx_ring == NULL) {
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n",
+ dev->name);
+ free_irq(dev->irq,dev);
+ return -ENOMEM;
+ }
+ xl_priv->xl_rx_ring = kmalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL) ;
+ if (xl_priv->xl_tx_ring == NULL) {
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n",
+ dev->name);
+ free_irq(dev->irq,dev);
+ kfree(xl_priv->xl_tx_ring);
+ return -ENOMEM;
+ }
+ memset(xl_priv->xl_tx_ring,0,sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) ;
+ memset(xl_priv->xl_rx_ring,0,sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) ;
+
+ /* Setup Rx Ring */
+ for (i=0 ; i < XL_RX_RING_SIZE ; i++) {
+ struct sk_buff *skb ;
+
+ skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
+ if (skb==NULL)
+ break ;
+
+ skb->dev = dev ;
+ xl_priv->xl_rx_ring[i].upfragaddr = pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ;
+ xl_priv->xl_rx_ring[i].upfraglen = xl_priv->pkt_buf_sz | RXUPLASTFRAG;
+ xl_priv->rx_ring_skb[i] = skb ;
+ }
+
+ if (i==0) {
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ;
+ free_irq(dev->irq,dev) ;
+ return -EIO ;
+ }
+
+ xl_priv->rx_ring_no = i ;
+ xl_priv->rx_ring_tail = 0 ;
+ xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ;
+ for (i=0;i<(xl_priv->rx_ring_no-1);i++) {
+ xl_priv->xl_rx_ring[i].upnextptr = xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)) ;
+ }
+ xl_priv->xl_rx_ring[i].upnextptr = 0 ;
+
+ writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ;
+
+ /* Setup Tx Ring */
+
+ xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
+
+ xl_priv->tx_ring_head = 1 ;
+ xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */
+ xl_priv->free_ring_entries = XL_TX_RING_SIZE ;
+
+ /*
+ * Setup the first dummy DPD entry for polling to start working.
+ */
+
+ xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY ;
+ xl_priv->xl_tx_ring[0].buffer = 0 ;
+ xl_priv->xl_tx_ring[0].buffer_length = 0 ;
+ xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
+
+ writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ;
+ writel(DNUNSTALL, xl_mmio + MMIO_COMMAND) ;
+ writel(UPUNSTALL, xl_mmio + MMIO_COMMAND) ;
+ writel(DNENABLE, xl_mmio + MMIO_COMMAND) ;
+ writeb(0x40, xl_mmio + MMIO_DNPOLL) ;
+
+ /*
+ * Enable interrupts on the card
+ */
+
+ writel(SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
+ writel(SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
+
+ netif_start_queue(dev) ;
+ return 0;
+
+}
+
+static int xl_open_hw(struct net_device *dev)
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+ u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
+ u16 vsoff ;
+ char ver_str[33];
+ int open_err ;
+ int i ;
+ unsigned long t ;
+
+ /*
+ * Okay, let's build up the Open.NIC srb command
+ *
+ */
+
+ writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(OPEN_NIC, xl_mmio + MMIO_MACDATA) ;
+
+ /*
+ * Use this as a test byte, if it comes back with the same value, the command didn't work
+ */
+
+ writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0xff,xl_mmio + MMIO_MACDATA) ;
+
+ /* Open options */
+ writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0x00, xl_mmio + MMIO_MACDATA) ;
+ writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0x00, xl_mmio + MMIO_MACDATA) ;
+
+ /*
+ * Node address, be careful here, the docs say you can just put zeros here and it will use
+ * the hardware address, it doesn't, you must include the node address in the open command.
+ */
+
+ if (xl_priv->xl_laa[0]) { /* If using a LAA address */
+ for (i=10;i<16;i++) {
+ writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(xl_priv->xl_laa[i],xl_mmio + MMIO_MACDATA) ;
+ }
+ memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ;
+ } else { /* Regular hardware address */
+ for (i=10;i<16;i++) {
+ writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(dev->dev_addr[i-10], xl_mmio + MMIO_MACDATA) ;
+ }
+ }
+
+ /* Default everything else to 0 */
+ for (i = 16; i < 34; i++) {
+ writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0x00,xl_mmio + MMIO_MACDATA) ;
+ }
+
+ /*
+ * Set the csrb bit in the MISR register
+ */
+
+ xl_wait_misr_flags(dev) ;
+ writel(MEM_BYTE_WRITE | MF_CSRB, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
+ writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(MISR_CSRB , xl_mmio + MMIO_MACDATA) ;
+
+ /*
+ * Now wait for the command to run
+ */
+
+ t=jiffies;
+ while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
+ schedule();
+ if(jiffies-t > 40*HZ) {
+ printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
+ break ;
+ }
+ }
+
+ /*
+ * Let's interpret the open response
+ */
+
+ writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ if (readb(xl_mmio + MMIO_MACDATA)!=0) {
+ open_err = readb(xl_mmio + MMIO_MACDATA) << 8 ;
+ writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ open_err |= readb(xl_mmio + MMIO_MACDATA) ;
+ return open_err ;
+ } else {
+ writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ xl_priv->asb = ntohs(readw(xl_mmio + MMIO_MACDATA)) ;
+ printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ;
+ printk("ASB: %04x",xl_priv->asb ) ;
+ writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ printk(", SRB: %04x",ntohs(readw(xl_mmio + MMIO_MACDATA)) ) ;
+
+ writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ xl_priv->arb = ntohs(readw(xl_mmio + MMIO_MACDATA)) ;
+ printk(", ARB: %04x \n",xl_priv->arb ) ;
+ writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ vsoff = ntohs(readw(xl_mmio + MMIO_MACDATA)) ;
+
+ /*
+ * Interesting, sending the individual characters directly to printk was causing klogd to use
+ * use 100% of processor time, so we build up the string and print that instead.
+ */
+
+ for (i=0;i<0x20;i++) {
+ writel( (MEM_BYTE_READ | 0xD0000 | vsoff) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
+ }
+ ver_str[i] = '\0' ;
+ printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str);
+ }
+
+ /*
+ * Issue the AckInterrupt
+ */
+ writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+
+ return 0 ;
+}
+
+/*
+ * There are two ways of implementing rx on the 359 NIC, either
+ * interrupt driven or polling. We are going to uses interrupts,
+ * it is the easier way of doing things.
+ *
+ * The Rx works with a ring of Rx descriptors. At initialise time the ring
+ * entries point to the next entry except for the last entry in the ring
+ * which points to 0. The card is programmed with the location of the first
+ * available descriptor and keeps reading the next_ptr until next_ptr is set
+ * to 0. Hopefully with a ring size of 16 the card will never get to read a next_ptr
+ * of 0. As the Rx interrupt is received we copy the frame up to the protocol layers
+ * and then point the end of the ring to our current position and point our current
+ * position to 0, therefore making the current position the last position on the ring.
+ * The last position on the ring therefore loops continually loops around the rx ring.
+ *
+ * rx_ring_tail is the position on the ring to process next. (Think of a snake, the head
+ * expands as the card adds new packets and we go around eating the tail processing the
+ * packets.)
+ *
+ * Undoubtably it could be streamlined and improved upon, but at the moment it works
+ * and the fast path through the routine is fine.
+ *
+ * adv_rx_ring could be inlined to increase performance, but its called a *lot* of times
+ * in xl_rx so would increase the size of the function significantly.
+ */
+
+static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+ int prev_ring_loc ;
+
+ prev_ring_loc = (xl_priv->rx_ring_tail + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
+ xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * xl_priv->rx_ring_tail) ;
+ xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus = 0 ;
+ xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upnextptr = 0 ;
+ xl_priv->rx_ring_tail++ ;
+ xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1) ;
+
+ return ;
+}
+
+static void xl_rx(struct net_device *dev)
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ struct sk_buff *skb, *skb2 ;
+ int frame_length = 0, copy_len = 0 ;
+ int temp_ring_loc ;
+
+ /*
+ * Receive the next frame, loop around the ring until all frames
+ * have been received.
+ */
+
+ while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */
+
+ if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */
+
+ /*
+ * This is a pain, you need to go through all the descriptors until the last one
+ * for this frame to find the framelength
+ */
+
+ temp_ring_loc = xl_priv->rx_ring_tail ;
+
+ while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) {
+ temp_ring_loc++ ;
+ temp_ring_loc &= (XL_RX_RING_SIZE-1) ;
+ }
+
+ frame_length = xl_priv->xl_rx_ring[temp_ring_loc].framestatus & 0x7FFF ;
+
+ skb = dev_alloc_skb(frame_length) ;
+
+ if (skb==NULL) { /* No memory for frame, still need to roll forward the rx ring */
+ printk(KERN_WARNING "%s: dev_alloc_skb failed - multi buffer !\n", dev->name) ;
+ while (xl_priv->rx_ring_tail != temp_ring_loc)
+ adv_rx_ring(dev) ;
+
+ adv_rx_ring(dev) ; /* One more time just for luck :) */
+ xl_priv->xl_stats.rx_dropped++ ;
+
+ writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
+ return ;
+ }
+
+ skb->dev = dev ;
+
+ while (xl_priv->rx_ring_tail != temp_ring_loc) {
+ copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ;
+ frame_length -= copy_len ;
+ pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, copy_len) ;
+ pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ adv_rx_ring(dev) ;
+ }
+
+ /* Now we have found the last fragment */
+ pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, frame_length) ;
+/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
+ pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ adv_rx_ring(dev) ;
+ skb->protocol = tr_type_trans(skb,dev) ;
+ netif_rx(skb) ;
+
+ } else { /* Single Descriptor Used, simply swap buffers over, fast path */
+
+ frame_length = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & 0x7FFF ;
+
+ skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
+
+ if (skb==NULL) { /* Still need to fix the rx ring */
+ printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ;
+ adv_rx_ring(dev) ;
+ xl_priv->xl_stats.rx_dropped++ ;
+ writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
+ return ;
+ }
+
+ skb->dev = dev ;
+
+ skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
+ pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ skb_put(skb2, frame_length) ;
+ skb2->protocol = tr_type_trans(skb2,dev) ;
+
+ xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ;
+ xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ;
+ xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = xl_priv->pkt_buf_sz | RXUPLASTFRAG ;
+ adv_rx_ring(dev) ;
+ xl_priv->xl_stats.rx_packets++ ;
+ xl_priv->xl_stats.rx_bytes += frame_length ;
+
+ netif_rx(skb2) ;
+ } /* if multiple buffers */
+ dev->last_rx = jiffies ;
+ } /* while packet to do */
+
+ /* Clear the updComplete interrupt */
+ writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
+ return ;
+}
+
+/*
+ * This is ruthless, it doesn't care what state the card is in it will
+ * completely reset the adapter.
+ */
+
+static void xl_reset(struct net_device *dev)
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ unsigned long t;
+
+ writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
+
+ /*
+ * Must wait for cmdInProgress bit (12) to clear before continuing with
+ * card configuration.
+ */
+
+ t=jiffies;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
+ if(jiffies-t > 40*HZ) {
+ printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
+ break ;
+ }
+ }
+
+}
+
+static void xl_freemem(struct net_device *dev)
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv ;
+ int i ;
+
+ for (i=0;i<XL_RX_RING_SIZE;i++) {
+ dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ;
+ pci_unmap_single(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ;
+ xl_priv->rx_ring_tail++ ;
+ xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1;
+ }
+
+ /* unmap ring */
+ pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ;
+
+ pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ;
+
+ kfree(xl_priv->xl_rx_ring) ;
+ kfree(xl_priv->xl_tx_ring) ;
+
+ return ;
+}
+
+static irqreturn_t xl_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xl_private *xl_priv =(struct xl_private *)dev->priv;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ u16 intstatus, macstatus ;
+
+ if (!dev) {
+ printk(KERN_WARNING "Device structure dead, aaahhhh !\n") ;
+ return IRQ_NONE;
+ }
+
+ intstatus = readw(xl_mmio + MMIO_INTSTATUS) ;
+
+ if (!(intstatus & 1)) /* We didn't generate the interrupt */
+ return IRQ_NONE;
+
+ spin_lock(&xl_priv->xl_lock) ;
+
+ /*
+ * Process the interrupt
+ */
+ /*
+ * Something fishy going on here, we shouldn't get 0001 ints, not fatal though.
+ */
+ if (intstatus == 0x0001) {
+ writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+ printk(KERN_INFO "%s: 00001 int received \n",dev->name) ;
+ } else {
+ if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
+
+ /*
+ * Host Error.
+ * It may be possible to recover from this, but usually it means something
+ * is seriously fubar, so we just close the adapter.
+ */
+
+ if (intstatus & HOSTERRINT) {
+ printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ;
+ writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
+ printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ netif_stop_queue(dev) ;
+ xl_freemem(dev) ;
+ free_irq(dev->irq,dev);
+ xl_reset(dev) ;
+ writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+ spin_unlock(&xl_priv->xl_lock) ;
+ return IRQ_HANDLED;
+ } /* Host Error */
+
+ if (intstatus & SRBRINT ) { /* Srbc interrupt */
+ writel(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+ if (xl_priv->srb_queued)
+ xl_srb_bh(dev) ;
+ } /* SRBR Interrupt */
+
+ if (intstatus & TXUNDERRUN) { /* Issue DnReset command */
+ writel(DNRESET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { /* Wait for command to run */
+ /* !!! FIX-ME !!!!
+ Must put a timeout check here ! */
+ /* Empty Loop */
+ }
+ printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ;
+ writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+ } /* TxUnderRun */
+
+ if (intstatus & ARBCINT ) { /* Arbc interrupt */
+ xl_arb_cmd(dev) ;
+ } /* Arbc */
+
+ if (intstatus & ASBFINT) {
+ if (xl_priv->asb_queued == 1) {
+ xl_asb_cmd(dev) ;
+ } else if (xl_priv->asb_queued == 2) {
+ xl_asb_bh(dev) ;
+ } else {
+ writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
+ }
+ } /* Asbf */
+
+ if (intstatus & UPCOMPINT ) /* UpComplete */
+ xl_rx(dev) ;
+
+ if (intstatus & DNCOMPINT ) /* DnComplete */
+ xl_dn_comp(dev) ;
+
+ if (intstatus & HARDERRINT ) { /* Hardware error */
+ writel(MMIO_WORD_READ | MACSTATUS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ macstatus = readw(xl_mmio + MMIO_MACDATA) ;
+ printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
+ if (macstatus & (1<<14))
+ printk(KERN_WARNING "tchk error: Unrecoverable error \n") ;
+ if (macstatus & (1<<3))
+ printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ;
+ if (macstatus & (1<<2))
+ printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ;
+ printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
+ printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ netif_stop_queue(dev) ;
+ xl_freemem(dev) ;
+ free_irq(dev->irq,dev);
+ unregister_netdev(dev) ;
+ free_netdev(dev) ;
+ xl_reset(dev) ;
+ writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+ spin_unlock(&xl_priv->xl_lock) ;
+ return IRQ_HANDLED;
+ }
+ } else {
+ printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ;
+ writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+ }
+ }
+
+ /* Turn interrupts back on */
+
+ writel( SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
+ writel( SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
+
+ spin_unlock(&xl_priv->xl_lock) ;
+ return IRQ_HANDLED;
+}
+
+/*
+ * Tx - Polling configuration
+ */
+
+static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+ struct xl_tx_desc *txd ;
+ int tx_head, tx_tail, tx_prev ;
+ unsigned long flags ;
+
+ spin_lock_irqsave(&xl_priv->xl_lock,flags) ;
+
+ netif_stop_queue(dev) ;
+
+ if (xl_priv->free_ring_entries > 1 ) {
+ /*
+ * Set up the descriptor for the packet
+ */
+ tx_head = xl_priv->tx_ring_head ;
+ tx_tail = xl_priv->tx_ring_tail ;
+
+ txd = &(xl_priv->xl_tx_ring[tx_head]) ;
+ txd->dnnextptr = 0 ;
+ txd->framestartheader = skb->len | TXDNINDICATE ;
+ txd->buffer = pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE) ;
+ txd->buffer_length = skb->len | TXDNFRAGLAST ;
+ xl_priv->tx_ring_skb[tx_head] = skb ;
+ xl_priv->xl_stats.tx_packets++ ;
+ xl_priv->xl_stats.tx_bytes += skb->len ;
+
+ /*
+ * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1
+ * to ensure no negative numbers in unsigned locations.
+ */
+
+ tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ;
+
+ xl_priv->tx_ring_head++ ;
+ xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
+ xl_priv->free_ring_entries-- ;
+
+ xl_priv->xl_tx_ring[tx_prev].dnnextptr = xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head) ;
+
+ /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */
+ /* readl(xl_mmio + MMIO_DNLISTPTR) ; */
+
+ netif_wake_queue(dev) ;
+
+ spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
+
+ return 0;
+ } else {
+ spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
+ return 1;
+ }
+
+}
+
+/*
+ * The NIC has told us that a packet has been downloaded onto the card, we must
+ * find out which packet it has done, clear the skb and information for the packet
+ * then advance around the ring for all tranmitted packets
+ */
+
+static void xl_dn_comp(struct net_device *dev)
+{
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ struct xl_tx_desc *txd ;
+
+
+ if (xl_priv->tx_ring_tail == 255) {/* First time */
+ xl_priv->xl_tx_ring[0].framestartheader = 0 ;
+ xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
+ xl_priv->tx_ring_tail = 1 ;
+ }
+
+ while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) {
+ txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
+ pci_unmap_single(xl_priv->pdev,txd->buffer, xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE) ;
+ txd->framestartheader = 0 ;
+ txd->buffer = 0xdeadbeef ;
+ txd->buffer_length = 0 ;
+ dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
+ xl_priv->tx_ring_tail++ ;
+ xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ;
+ xl_priv->free_ring_entries++ ;
+ }
+
+ netif_wake_queue(dev) ;
+
+ writel(ACK_INTERRUPT | DNCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
+}
+
+/*
+ * Close the adapter properly.
+ * This srb reply cannot be handled from interrupt context as we have
+ * to free the interrupt from the driver.
+ */
+
+static int xl_close(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ unsigned long t ;
+
+ netif_stop_queue(dev) ;
+
+ /*
+ * Close the adapter, need to stall the rx and tx queues.
+ */
+
+ writew(DNSTALL, xl_mmio + MMIO_COMMAND) ;
+ t=jiffies;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
+ schedule();
+ if(jiffies-t > 10*HZ) {
+ printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name);
+ break ;
+ }
+ }
+ writew(DNDISABLE, xl_mmio + MMIO_COMMAND) ;
+ t=jiffies;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
+ schedule();
+ if(jiffies-t > 10*HZ) {
+ printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name);
+ break ;
+ }
+ }
+ writew(UPSTALL, xl_mmio + MMIO_COMMAND) ;
+ t=jiffies;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
+ schedule();
+ if(jiffies-t > 10*HZ) {
+ printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name);
+ break ;
+ }
+ }
+
+ /* Turn off interrupts, we will still get the indication though
+ * so we can trap it
+ */
+
+ writel(SETINTENABLE, xl_mmio + MMIO_COMMAND) ;
+
+ xl_srb_cmd(dev,CLOSE_NIC) ;
+
+ t=jiffies;
+ while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
+ schedule();
+ if(jiffies-t > 10*HZ) {
+ printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name);
+ break ;
+ }
+ }
+ /* Read the srb response from the adapter */
+
+ writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
+ if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
+ printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ;
+ } else {
+ writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ if (readb(xl_mmio + MMIO_MACDATA)==0) {
+ printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ;
+ writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+
+ xl_freemem(dev) ;
+ free_irq(dev->irq,dev) ;
+ } else {
+ printk(KERN_INFO "%s: Close nic command returned error code %02x\n",dev->name, readb(xl_mmio + MMIO_MACDATA)) ;
+ }
+ }
+
+ /* Reset the upload and download logic */
+
+ writew(UPRESET, xl_mmio + MMIO_COMMAND) ;
+ t=jiffies;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
+ schedule();
+ if(jiffies-t > 10*HZ) {
+ printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name);
+ break ;
+ }
+ }
+ writew(DNRESET, xl_mmio + MMIO_COMMAND) ;
+ t=jiffies;
+ while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
+ schedule();
+ if(jiffies-t > 10*HZ) {
+ printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name);
+ break ;
+ }
+ }
+ xl_hw_reset(dev) ;
+ return 0 ;
+}
+
+static void xl_set_rx_mode(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ struct dev_mc_list *dmi ;
+ unsigned char dev_mc_address[4] ;
+ u16 options ;
+ int i ;
+
+ if (dev->flags & IFF_PROMISC)
+ options = 0x0004 ;
+ else
+ options = 0x0000 ;
+
+ if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */
+ xl_priv->xl_copy_all_options = options ;
+ xl_srb_cmd(dev, SET_RECEIVE_MODE) ;
+ return ;
+ }
+
+ dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
+
+ for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ dev_mc_address[0] |= dmi->dmi_addr[2] ;
+ dev_mc_address[1] |= dmi->dmi_addr[3] ;
+ dev_mc_address[2] |= dmi->dmi_addr[4] ;
+ dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ }
+
+ if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
+ memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ;
+ xl_srb_cmd(dev, SET_FUNC_ADDRESS) ;
+ }
+ return ;
+}
+
+
+/*
+ * We issued an srb command and now we must read
+ * the response from the completed command.
+ */
+
+static void xl_srb_bh(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ u8 srb_cmd, ret_code ;
+ int i ;
+
+ writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ srb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
+ writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ ret_code = readb(xl_mmio + MMIO_MACDATA) ;
+
+ /* Ret_code is standard across all commands */
+
+ switch (ret_code) {
+ case 1:
+ printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
+ break ;
+ case 4:
+ printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ;
+ break ;
+
+ case 6:
+ printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ;
+ break ;
+
+ case 0: /* Successful command execution */
+ switch (srb_cmd) {
+ case READ_LOG: /* Returns 14 bytes of data from the NIC */
+ if(xl_priv->xl_message_level)
+ printk(KERN_INFO "%s: READ.LOG 14 bytes of data ",dev->name) ;
+ /*
+ * We still have to read the log even if message_level = 0 and we don't want
+ * to see it
+ */
+ for (i=0;i<14;i++) {
+ writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ if(xl_priv->xl_message_level)
+ printk("%02x:",readb(xl_mmio + MMIO_MACDATA)) ;
+ }
+ printk("\n") ;
+ break ;
+ case SET_FUNC_ADDRESS:
+ if(xl_priv->xl_message_level)
+ printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ;
+ break ;
+ case CLOSE_NIC:
+ if(xl_priv->xl_message_level)
+ printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ;
+ break ;
+ case SET_MULTICAST_MODE:
+ if(xl_priv->xl_message_level)
+ printk(KERN_INFO "%s: Multicast options successfully changed\n",dev->name) ;
+ break ;
+ case SET_RECEIVE_MODE:
+ if(xl_priv->xl_message_level) {
+ if (xl_priv->xl_copy_all_options == 0x0004)
+ printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ;
+ else
+ printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ;
+ }
+ break ;
+
+ } /* switch */
+ break ;
+ } /* switch */
+ return ;
+}
+
+static struct net_device_stats * xl_get_stats(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ return (struct net_device_stats *) &xl_priv->xl_stats;
+}
+
+static int xl_set_mac_address (struct net_device *dev, void *addr)
+{
+ struct sockaddr *saddr = addr ;
+ struct xl_private *xl_priv = (struct xl_private *)dev->priv ;
+
+ if (netif_running(dev)) {
+ printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
+ return -EIO ;
+ }
+
+ memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ;
+
+ if (xl_priv->xl_message_level) {
+ printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0],
+ xl_priv->xl_laa[1], xl_priv->xl_laa[2],
+ xl_priv->xl_laa[3], xl_priv->xl_laa[4],
+ xl_priv->xl_laa[5]);
+ }
+
+ return 0 ;
+}
+
+static void xl_arb_cmd(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ u8 arb_cmd ;
+ u16 lan_status, lan_status_diff ;
+
+ writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ arb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
+
+ if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */
+ writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+
+ printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, ntohs(readw(xl_mmio + MMIO_MACDATA) )) ;
+
+ lan_status = ntohs(readw(xl_mmio + MMIO_MACDATA));
+
+ /* Acknowledge interrupt, this tells nic we are done with the arb */
+ writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+
+ lan_status_diff = xl_priv->xl_lan_status ^ lan_status ;
+
+ if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
+ if (lan_status_diff & LSC_LWF)
+ printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
+ if (lan_status_diff & LSC_ARW)
+ printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
+ if (lan_status_diff & LSC_FPE)
+ printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
+ if (lan_status_diff & LSC_RR)
+ printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
+
+ /* Adapter has been closed by the hardware */
+
+ netif_stop_queue(dev);
+ xl_freemem(dev) ;
+ free_irq(dev->irq,dev);
+
+ printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ } /* If serious error */
+
+ if (xl_priv->xl_message_level) {
+ if (lan_status_diff & LSC_SIG_LOSS)
+ printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ if (lan_status_diff & LSC_HARD_ERR)
+ printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ if (lan_status_diff & LSC_SOFT_ERR)
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ if (lan_status_diff & LSC_TRAN_BCN)
+ printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ if (lan_status_diff & LSC_SS)
+ printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ if (lan_status_diff & LSC_RING_REC)
+ printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
+ if (lan_status_diff & LSC_FDX_MODE)
+ printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
+ }
+
+ if (lan_status_diff & LSC_CO) {
+ if (xl_priv->xl_message_level)
+ printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ /* Issue READ.LOG command */
+ xl_srb_cmd(dev, READ_LOG) ;
+ }
+
+ /* There is no command in the tech docs to issue the read_sr_counters */
+ if (lan_status_diff & LSC_SR_CO) {
+ if (xl_priv->xl_message_level)
+ printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
+ }
+
+ xl_priv->xl_lan_status = lan_status ;
+
+ } /* Lan.change.status */
+ else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
+#if XL_DEBUG
+ printk(KERN_INFO "Received.Data \n") ;
+#endif
+ writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ xl_priv->mac_buffer = ntohs(readw(xl_mmio + MMIO_MACDATA)) ;
+
+ /* Now we are going to be really basic here and not do anything
+ * with the data at all. The tech docs do not give me enough
+ * information to calculate the buffers properly so we're
+ * just going to tell the nic that we've dealt with the frame
+ * anyway.
+ */
+
+ dev->last_rx = jiffies ;
+ /* Acknowledge interrupt, this tells nic we are done with the arb */
+ writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
+
+ /* Is the ASB free ? */
+
+ xl_priv->asb_queued = 0 ;
+ writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ if (readb(xl_mmio + MMIO_MACDATA) != 0xff) {
+ xl_priv->asb_queued = 1 ;
+
+ xl_wait_misr_flags(dev) ;
+
+ writel(MEM_BYTE_WRITE | MF_ASBFR, xl_mmio + MMIO_MAC_ACCESS_CMD);
+ writeb(0xff, xl_mmio + MMIO_MACDATA) ;
+ writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(MISR_ASBFR, xl_mmio + MMIO_MACDATA) ;
+ return ;
+ /* Drop out and wait for the bottom half to be run */
+ }
+
+ xl_asb_cmd(dev) ;
+
+ } else {
+ printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ;
+ }
+
+ /* Acknowledge the arb interrupt */
+
+ writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
+
+ return ;
+}
+
+
+/*
+ * There is only one asb command, but we can get called from different
+ * places.
+ */
+
+static void xl_asb_cmd(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+
+ if (xl_priv->asb_queued == 1)
+ writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
+
+ writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0x81, xl_mmio + MMIO_MACDATA) ;
+
+ writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(ntohs(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
+
+ xl_wait_misr_flags(dev) ;
+
+ writel(MEM_BYTE_WRITE | MF_RASB, xl_mmio + MMIO_MAC_ACCESS_CMD);
+ writeb(0xff, xl_mmio + MMIO_MACDATA) ;
+
+ writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(MISR_RASB, xl_mmio + MMIO_MACDATA) ;
+
+ xl_priv->asb_queued = 2 ;
+
+ return ;
+}
+
+/*
+ * This will only get called if there was an error
+ * from the asb cmd.
+ */
+static void xl_asb_bh(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+ u8 ret_code ;
+
+ writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ ret_code = readb(xl_mmio + MMIO_MACDATA) ;
+ switch (ret_code) {
+ case 0x01:
+ printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ;
+ break ;
+ case 0x26:
+ printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ;
+ break ;
+ case 0x40:
+ printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ;
+ break ;
+ }
+ xl_priv->asb_queued = 0 ;
+ writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
+ return ;
+}
+
+/*
+ * Issue srb commands to the nic
+ */
+
+static void xl_srb_cmd(struct net_device *dev, int srb_cmd)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+
+ switch (srb_cmd) {
+ case READ_LOG:
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(READ_LOG, xl_mmio + MMIO_MACDATA) ;
+ break;
+
+ case CLOSE_NIC:
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(CLOSE_NIC, xl_mmio + MMIO_MACDATA) ;
+ break ;
+
+ case SET_RECEIVE_MODE:
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(SET_RECEIVE_MODE, xl_mmio + MMIO_MACDATA) ;
+ writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ;
+ break ;
+
+ case SET_FUNC_ADDRESS:
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(SET_FUNC_ADDRESS, xl_mmio + MMIO_MACDATA) ;
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ;
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ;
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ;
+ writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ;
+ break ;
+ } /* switch */
+
+
+ xl_wait_misr_flags(dev) ;
+
+ /* Write 0xff to the CSRB flag */
+ writel(MEM_BYTE_WRITE | MF_CSRB , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
+ /* Set csrb bit in MISR register to process command */
+ writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(MISR_CSRB, xl_mmio + MMIO_MACDATA) ;
+ xl_priv->srb_queued = 1 ;
+
+ return ;
+}
+
+/*
+ * This is nasty, to use the MISR command you have to wait for 6 memory locations
+ * to be zero. This is the way the driver does on other OS'es so we should be ok with
+ * the empty loop.
+ */
+
+static void xl_wait_misr_flags(struct net_device *dev)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv ;
+ u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
+
+ int i ;
+
+ writel(MMIO_BYTE_READ | MISR_RW, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ if (readb(xl_mmio + MMIO_MACDATA) != 0) { /* Misr not clear */
+ for (i=0; i<6; i++) {
+ writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ while (readb(xl_mmio + MMIO_MACDATA) != 0 ) {} ; /* Empty Loop */
+ }
+ }
+
+ writel(MMIO_BYTE_WRITE | MISR_AND, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
+ writeb(0x80, xl_mmio + MMIO_MACDATA) ;
+
+ return ;
+}
+
+/*
+ * Change mtu size, this should work the same as olympic
+ */
+
+static int xl_change_mtu(struct net_device *dev, int mtu)
+{
+ struct xl_private *xl_priv = (struct xl_private *) dev->priv;
+ u16 max_mtu ;
+
+ if (xl_priv->xl_ring_speed == 4)
+ max_mtu = 4500 ;
+ else
+ max_mtu = 18000 ;
+
+ if (mtu > max_mtu)
+ return -EINVAL ;
+ if (mtu < 100)
+ return -EINVAL ;
+
+ dev->mtu = mtu ;
+ xl_priv->pkt_buf_sz = mtu + TR_HLEN ;
+
+ return 0 ;
+}
+
+static void __devexit xl_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct xl_private *xl_priv=(struct xl_private *)dev->priv;
+
+ unregister_netdev(dev);
+ iounmap(xl_priv->xl_mmio) ;
+ pci_release_regions(pdev) ;
+ pci_set_drvdata(pdev,NULL) ;
+ free_netdev(dev);
+ return ;
+}
+
+static struct pci_driver xl_3c359_driver = {
+ .name = "3c359",
+ .id_table = xl_pci_tbl,
+ .probe = xl_probe,
+ .remove = __devexit_p(xl_remove_one),
+};
+
+static int __init xl_pci_init (void)
+{
+ return pci_module_init (&xl_3c359_driver);
+}
+
+
+static void __exit xl_pci_cleanup (void)
+{
+ pci_unregister_driver (&xl_3c359_driver);
+}
+
+module_init(xl_pci_init);
+module_exit(xl_pci_cleanup);
+
+MODULE_LICENSE("GPL") ;
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
new file mode 100644
index 000000000000..05c860368852
--- /dev/null
+++ b/drivers/net/tokenring/3c359.h
@@ -0,0 +1,290 @@
+/*
+ * 3c359.h (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
+ *
+ * Linux driver for 3Com 3C359 Token Link PCI XL cards.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License Version 2 or (at your option)
+ * any later verion, incorporated herein by reference.
+ */
+
+/* Memory Access Commands */
+#define IO_BYTE_READ 0x28 << 24
+#define IO_BYTE_WRITE 0x18 << 24
+#define IO_WORD_READ 0x20 << 24
+#define IO_WORD_WRITE 0x10 << 24
+#define MMIO_BYTE_READ 0x88 << 24
+#define MMIO_BYTE_WRITE 0x48 << 24
+#define MMIO_WORD_READ 0x80 << 24
+#define MMIO_WORD_WRITE 0x40 << 24
+#define MEM_BYTE_READ 0x8C << 24
+#define MEM_BYTE_WRITE 0x4C << 24
+#define MEM_WORD_READ 0x84 << 24
+#define MEM_WORD_WRITE 0x44 << 24
+
+#define PMBAR 0x1C80
+#define PMB_CPHOLD (1<<10)
+
+#define CPATTENTION 0x180D
+#define CPA_PMBARVIS (1<<7)
+#define CPA_MEMWREN (1<<6)
+
+#define SWITCHSETTINGS 0x1C88
+#define EECONTROL 0x1C8A
+#define EEDATA 0x1C8C
+#define EEREAD 0x0080
+#define EEWRITE 0x0040
+#define EEERASE 0x0060
+#define EE_ENABLE_WRITE 0x0030
+#define EEBUSY (1<<15)
+
+#define WRBR 0xCDE02
+#define WWOR 0xCDE04
+#define WWCR 0xCDE06
+#define MACSTATUS 0xCDE08
+#define MISR_RW 0xCDE0B
+#define MISR_AND 0xCDE2B
+#define MISR_SET 0xCDE4B
+#define RXBUFAREA 0xCDE10
+#define RXEARLYTHRESH 0xCDE12
+#define TXSTARTTHRESH 0x58
+#define DNPRIREQTHRESH 0x2C
+
+#define MISR_CSRB (1<<5)
+#define MISR_RASB (1<<4)
+#define MISR_SRBFR (1<<3)
+#define MISR_ASBFR (1<<2)
+#define MISR_ARBF (1<<1)
+
+/* MISR Flags memory locations */
+#define MF_SSBF 0xDFFE0
+#define MF_ARBF 0xDFFE1
+#define MF_ASBFR 0xDFFE2
+#define MF_SRBFR 0xDFFE3
+#define MF_RASB 0xDFFE4
+#define MF_CSRB 0xDFFE5
+
+#define MMIO_MACDATA 0x10
+#define MMIO_MAC_ACCESS_CMD 0x14
+#define MMIO_TIMER 0x1A
+#define MMIO_DMA_CTRL 0x20
+#define MMIO_DNLISTPTR 0x24
+#define MMIO_HASHFILTER 0x28
+#define MMIO_CONFIG 0x29
+#define MMIO_DNPRIREQTHRESH 0x2C
+#define MMIO_DNPOLL 0x2D
+#define MMIO_UPPKTSTATUS 0x30
+#define MMIO_FREETIMER 0x34
+#define MMIO_COUNTDOWN 0x36
+#define MMIO_UPLISTPTR 0x38
+#define MMIO_UPPOLL 0x3C
+#define MMIO_UPBURSTTHRESH 0x40
+#define MMIO_DNBURSTTHRESH 0x41
+#define MMIO_INTSTATUS_AUTO 0x56
+#define MMIO_TXSTARTTHRESH 0x58
+#define MMIO_INTERRUPTENABLE 0x5A
+#define MMIO_INDICATIONENABLE 0x5C
+#define MMIO_COMMAND 0x5E /* These two are meant to be the same */
+#define MMIO_INTSTATUS 0x5E /* Makes the code more readable this way */
+#define INTSTAT_CMD_IN_PROGRESS (1<<12)
+#define INTSTAT_SRB (1<<14)
+#define INTSTAT_INTLATCH (1<<0)
+
+/* Indication / Interrupt Mask
+ * Annoyingly the bits to be set in the indication and interrupt enable
+ * do not match with the actual bits received in the interrupt, although
+ * they are in the same order.
+ * The mapping for the indication / interrupt are:
+ * Bit Indication / Interrupt
+ * 0 HostError
+ * 1 txcomplete
+ * 2 updneeded
+ * 3 rxcomplete
+ * 4 intrequested
+ * 5 macerror
+ * 6 dncomplete
+ * 7 upcomplete
+ * 8 txunderrun
+ * 9 asbf
+ * 10 srbr
+ * 11 arbc
+ *
+ * The only ones we don't want to receive are txcomplete and rxcomplete
+ * we use dncomplete and upcomplete instead.
+ */
+
+#define INT_MASK 0xFF5
+
+/* Note the subtle difference here, IND and INT */
+
+#define SETINDENABLE (8<<12)
+#define SETINTENABLE (7<<12)
+#define SRBBIT (1<<10)
+#define ASBBIT (1<<9)
+#define ARBBIT (1<<11)
+
+#define SRB 0xDFE90
+#define ASB 0xDFED0
+#define ARB 0xD0000
+#define SCRATCH 0xDFEF0
+
+#define INT_REQUEST 0x6000 /* (6 << 12) */
+#define ACK_INTERRUPT 0x6800 /* (13 <<11) */
+#define GLOBAL_RESET 0x00
+#define DNDISABLE 0x5000
+#define DNENABLE 0x4800
+#define DNSTALL 0x3002
+#define DNRESET 0x5800
+#define DNUNSTALL 0x3003
+#define UPRESET 0x2800
+#define UPSTALL 0x3000
+#define UPUNSTALL 0x3001
+#define SETCONFIG 0x4000
+#define SETTXSTARTTHRESH 0x9800
+
+/* Received Interrupts */
+#define ASBFINT (1<<13)
+#define SRBRINT (1<<14)
+#define ARBCINT (1<<15)
+#define TXUNDERRUN (1<<11)
+
+#define UPCOMPINT (1<<10)
+#define DNCOMPINT (1<<9)
+#define HARDERRINT (1<<7)
+#define RXCOMPLETE (1<<4)
+#define TXCOMPINT (1<<2)
+#define HOSTERRINT (1<<1)
+
+/* Receive descriptor bits */
+#define RXOVERRUN (1<<19)
+#define RXFC (1<<21)
+#define RXAR (1<<22)
+#define RXUPDCOMPLETE (1<<23)
+#define RXUPDFULL (1<<24)
+#define RXUPLASTFRAG (1<<31)
+
+/* Transmit descriptor bits */
+#define TXDNCOMPLETE (1<<16)
+#define TXTXINDICATE (1<<27)
+#define TXDPDEMPTY (1<<29)
+#define TXDNINDICATE (1<<31)
+#define TXDNFRAGLAST (1<<31)
+
+/* Interrupts to Acknowledge */
+#define LATCH_ACK 1
+#define TXCOMPACK (1<<1)
+#define INTREQACK (1<<2)
+#define DNCOMPACK (1<<3)
+#define UPCOMPACK (1<<4)
+#define ASBFACK (1<<5)
+#define SRBRACK (1<<6)
+#define ARBCACK (1<<7)
+
+#define XL_IO_SPACE 128
+#define SRB_COMMAND_SIZE 50
+
+/* Adapter Commands */
+#define REQUEST_INT 0x00
+#define MODIFY_OPEN_PARMS 0x01
+#define RESTORE_OPEN_PARMS 0x02
+#define OPEN_NIC 0x03
+#define CLOSE_NIC 0x04
+#define SET_SLEEP_MODE 0x05
+#define SET_GROUP_ADDRESS 0x06
+#define SET_FUNC_ADDRESS 0x07
+#define READ_LOG 0x08
+#define SET_MULTICAST_MODE 0x0C
+#define CHANGE_WAKEUP_PATTERN 0x0D
+#define GET_STATISTICS 0x13
+#define SET_RECEIVE_MODE 0x1F
+
+/* ARB Commands */
+#define RECEIVE_DATA 0x81
+#define RING_STATUS_CHANGE 0x84
+
+/* ASB Commands */
+#define ASB_RECEIVE_DATE 0x81
+
+/* Defines for LAN STATUS CHANGE reports */
+#define LSC_SIG_LOSS 0x8000
+#define LSC_HARD_ERR 0x4000
+#define LSC_SOFT_ERR 0x2000
+#define LSC_TRAN_BCN 0x1000
+#define LSC_LWF 0x0800
+#define LSC_ARW 0x0400
+#define LSC_FPE 0x0200
+#define LSC_RR 0x0100
+#define LSC_CO 0x0080
+#define LSC_SS 0x0040
+#define LSC_RING_REC 0x0020
+#define LSC_SR_CO 0x0010
+#define LSC_FDX_MODE 0x0004
+
+#define XL_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
+
+/* 3c359 defaults for buffers */
+
+#define XL_RX_RING_SIZE 16 /* must be a power of 2 */
+#define XL_TX_RING_SIZE 16 /* must be a power of 2 */
+
+#define PKT_BUF_SZ 4096 /* Default packet size */
+
+/* 3c359 data structures */
+
+struct xl_tx_desc {
+ u32 dnnextptr ;
+ u32 framestartheader ;
+ u32 buffer ;
+ u32 buffer_length ;
+};
+
+struct xl_rx_desc {
+ u32 upnextptr ;
+ u32 framestatus ;
+ u32 upfragaddr ;
+ u32 upfraglen ;
+};
+
+struct xl_private {
+
+
+ /* These two structures must be aligned on 8 byte boundaries */
+
+ /* struct xl_rx_desc xl_rx_ring[XL_RX_RING_SIZE]; */
+ /* struct xl_tx_desc xl_tx_ring[XL_TX_RING_SIZE]; */
+ struct xl_rx_desc *xl_rx_ring ;
+ struct xl_tx_desc *xl_tx_ring ;
+ struct sk_buff *tx_ring_skb[XL_TX_RING_SIZE], *rx_ring_skb[XL_RX_RING_SIZE];
+ int tx_ring_head, tx_ring_tail ;
+ int rx_ring_tail, rx_ring_no ;
+ int free_ring_entries ;
+
+ u16 srb;
+ u16 arb;
+ u16 asb;
+
+ u8 __iomem *xl_mmio;
+ char *xl_card_name;
+ struct pci_dev *pdev ;
+
+ spinlock_t xl_lock ;
+
+ volatile int srb_queued;
+ struct wait_queue *srb_wait;
+ volatile int asb_queued;
+
+ struct net_device_stats xl_stats ;
+
+ u16 mac_buffer ;
+ u16 xl_lan_status ;
+ u8 xl_ring_speed ;
+ u16 pkt_buf_sz ;
+ u8 xl_message_level;
+ u16 xl_copy_all_options ;
+ unsigned char xl_functional_addr[4] ;
+ u16 xl_addr_table_addr, xl_parms_addr ;
+ u8 xl_laa[6] ;
+ u32 rx_ring_dma_addr ;
+ u32 tx_ring_dma_addr ;
+};
+
diff --git a/drivers/net/tokenring/3c359_microcode.h b/drivers/net/tokenring/3c359_microcode.h
new file mode 100644
index 000000000000..81354afa3d34
--- /dev/null
+++ b/drivers/net/tokenring/3c359_microcode.h
@@ -0,0 +1,1581 @@
+
+/*
+ * The firmware this driver downloads into the tokenring card is a
+ * separate program and is not GPL'd source code, even though the Linux
+ * side driver and the routine that loads this data into the card are.
+ *
+ * This firmware is licensed to you strictly for use in conjunction
+ * with the use of 3Com 3C359 TokenRing adapters. There is no
+ * waranty expressed or implied about its fitness for any purpose.
+ */
+
+/* 3c359_microcode.mac: 3Com 3C359 Tokenring microcode.
+ *
+ * Notes:
+ * - Loaded from xl_init upon adapter initialization.
+ *
+ * Available from 3Com as part of their standard 3C359 driver.
+ *
+ * mc_size *must* must match the microcode being used, each version is a
+ * different length.
+ */
+
+static int mc_size = 24880 ;
+
+u8 microcode[] = {
+ 0xfe,0x3a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x33,0x2f,0x30,0x32,0x2f,0x39,0x39,0x20,0x31
+,0x37,0x3a,0x31,0x33,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x41,0x42,0x43,0x44,0x45,0x46
+,0x00,0x00,0x07,0xff,0x02,0x00,0xfe,0x9f,0x06,0x00,0x00,0x7c,0x48,0x00,0x00,0x70
+,0x82,0x00,0xff,0xff,0x86,0x00,0xff,0xff,0x88,0x00,0xff,0xff,0x9a,0x00,0xff,0xff
+,0xff,0xff,0x11,0x00,0xc0,0x00,0xff,0xff,0xff,0xff,0x11,0x22,0x33,0x44,0x55,0x66
+,0x33,0x43,0x4f,0x4d,0x20,0x42,0x41,0x42,0x45,0x11,0x40,0xc0,0x00,0xff,0xff,0xff
+,0xff,0x11,0x22,0x33,0x44,0x55,0x66,0x53,0x74,0x61,0x72,0x74,0x20,0x6f,0x66,0x20
+,0x4c,0x4c,0x43,0x20,0x66,0x72,0x61,0x6d,0x65,0x2e,0x20,0x20,0x54,0x6f,0x74,0x61
+,0x6c,0x20,0x64,0x61,0x74,0x61,0x20,0x73,0x69,0x7a,0x65,0x20,0x69,0x73,0x20,0x78
+,0x78,0x78,0x20,0x20,0x20,0x42,0x41,0x42,0x45,0xe8,0xd2,0x01,0x83,0x3e,0xf7,0x34
+,0x00,0x75,0x21,0xe8,0x41,0x00,0x83,0x3e,0xf7,0x34,0x00,0x75,0x17,0xe8,0x82,0x00
+,0x83,0x3e,0xf7,0x34,0x00,0x75,0x0d,0xe8,0xbf,0x00,0x83,0x3e,0xf7,0x34,0x00,0x75
+,0x03,0xe8,0x41,0x02,0xc3,0x1e,0xb8,0x00,0xf0,0x8e,0xd8,0x33,0xf6,0xb9,0x00,0x80
+,0x33,0xdb,0xad,0x03,0xd8,0xe2,0xfb,0x1f,0xb8,0x00,0x00,0x83,0xfb,0x00,0x74,0x03
+,0xb8,0x22,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0xba,0x56,0x00,0xb0,0xff,0xee,0x33,0xc0
+,0x8e,0xc0,0x33,0xf6,0xb9,0xff,0x7f,0x83,0x3e,0xff,0x34,0x00,0x74,0x08,0x8d,0x3e
+,0x30,0x61,0xd1,0xef,0x2b,0xcf,0x26,0x8b,0x1c,0x26,0xc7,0x04,0xff,0xff,0x26,0x83
+,0x3c,0xff,0x75,0x17,0x26,0xc7,0x04,0x00,0x00,0x26,0x83,0x3c,0x00,0x75,0x0c,0x26
+,0x89,0x1c,0x46,0x46,0xe2,0xe0,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x24,0x00,0xa3,0xf7
+,0x34,0xc3,0xfa,0xb4,0xd7,0x9e,0x73,0x3a,0x75,0x38,0x79,0x36,0x7b,0x34,0x9f,0xb1
+,0x05,0xd2,0xec,0x73,0x2d,0xb0,0x40,0xd0,0xe0,0x71,0x27,0x79,0x25,0xd0,0xe0,0x73
+,0x21,0x7b,0x1f,0x32,0xc0,0x75,0x1b,0x32,0xe4,0x9e,0x72,0x16,0x74,0x14,0x78,0x12
+,0x7a,0x10,0x9f,0xd2,0xec,0x72,0x0b,0xd0,0xe4,0x70,0x07,0x75,0x05,0xb8,0x00,0x00
+,0xeb,0x03,0xb8,0x26,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0xba,0x5a,0x00,0x33,0xc0,0xef
+,0xef,0xef,0xef,0xb0,0x00,0xe6,0x56,0xb0,0x00,0xe6,0x54,0xba,0x52,0x00,0xb8,0x01
+,0x01,0xef,0xe8,0xca,0x00,0x3c,0x01,0x75,0x7f,0xe8,0x83,0x00,0xba,0x52,0x00,0xb8
+,0x02,0x02,0xef,0xe8,0xb9,0x00,0x3c,0x02,0x75,0x6e,0xe8,0x7a,0x00,0xba,0x52,0x00
+,0xb8,0x04,0x04,0xef,0xe8,0xa8,0x00,0x3c,0x04,0x75,0x5d,0xe8,0x71,0x00,0xba,0x52
+,0x00,0xb8,0x08,0x08,0xef,0xe8,0x97,0x00,0x3c,0x08,0x75,0x4c,0xe8,0x68,0x00,0xba
+,0x52,0x00,0xb8,0x10,0x10,0xef,0xe8,0x86,0x00,0x3c,0x10,0x75,0x3b,0xe8,0x5f,0x00
+,0xba,0x52,0x00,0xb8,0x20,0x20,0xef,0xe8,0x75,0x00,0x3c,0x20,0x75,0x2a,0xe8,0x56
+,0x00,0xba,0x52,0x00,0xb8,0x40,0x40,0xef,0xe8,0x64,0x00,0x3c,0x40,0x75,0x19,0xe8
+,0x4d,0x00,0xba,0x52,0x00,0xb8,0x80,0x80,0xef,0xe8,0x53,0x00,0x3c,0x80,0x75,0x08
+,0xe8,0x44,0x00,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x28,0x00,0xa3,0xf7,0x34,0xc3,0xba
+,0x5a,0x00,0xb8,0x00,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x01,0x80,0xef,0xc3,0xba
+,0x5a,0x00,0xb8,0x02,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x03,0x80,0xef,0xc3,0xba
+,0x5a,0x00,0xb8,0x04,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x05,0x80,0xef,0xc3,0xba
+,0x5a,0x00,0xb8,0x06,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x07,0x80,0xef,0xc3,0xb9
+,0xff,0xff,0xe4,0x58,0xe4,0x54,0x3c,0x00,0x75,0x03,0x49,0x75,0xf7,0xc3,0xfa,0x32
+,0xc0,0xe6,0x56,0xe4,0x56,0x3c,0x00,0x74,0x03,0xe9,0x82,0x00,0xb0,0xff,0xe6,0x56
+,0xe4,0x56,0x3c,0xff,0x75,0x78,0xba,0x52,0x00,0xb8,0xff,0xff,0xef,0xed,0x3c,0xff
+,0x75,0x6c,0xb8,0x00,0xff,0xef,0xed,0x3c,0x00,0x75,0x63,0xb0,0xff,0xe6,0x54,0xe4
+,0x54,0x3c,0xff,0x75,0x59,0x32,0xc0,0xe6,0x54,0xe4,0x54,0x3c,0x00,0x75,0x4f,0xb0
+,0x0f,0xe6,0x50,0xe4,0x50,0x24,0x0f,0x3c,0x0f,0x75,0x43,0xb0,0x00,0xe6,0x50,0xe4
+,0x50,0x24,0x0f,0x3c,0x00,0x75,0x37,0x8c,0xc8,0x8e,0xc0,0xbe,0x70,0x00,0x26,0x8b
+,0x14,0x26,0x8b,0x5c,0x02,0xb8,0x00,0x00,0xef,0xed,0x23,0xc3,0x3d,0x00,0x00,0x75
+,0x1d,0xb8,0xff,0xff,0x23,0xc3,0xef,0x8b,0xc8,0xed,0x23,0xc3,0x3b,0xc1,0x75,0x0e
+,0x83,0xc6,0x04,0x26,0x83,0x3c,0xff,0x75,0xd5,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x2a
+,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0x33,0xc0,0xbf,0x00,0x20,0xb9,0x17,0x00,0xf3,0xab
+,0xbf,0x00,0x30,0xb9,0x17,0x00,0xf3,0xab,0xbf,0x00,0x22,0xb9,0x40,0x00,0xf3,0xab
+,0xbf,0x00,0x32,0xb9,0x40,0x00,0xf3,0xab,0xfc,0x1e,0x8c,0xc8,0x8e,0xd8,0x33,0xc0
+,0x8e,0xc0,0xbe,0x92,0x00,0xbf,0x00,0x20,0xb9,0x17,0x00,0xf3,0xa4,0xbe,0xa9,0x00
+,0xbf,0x00,0x22,0xb9,0x40,0x00,0xf3,0xa4,0x1f,0xc7,0x06,0xfb,0x34,0x64,0x00,0xba
+,0x08,0x00,0xb8,0x0f,0x00,0xef,0xe8,0x82,0x01,0xe8,0x9b,0x01,0x72,0x0d,0xc7,0x06
+,0xf7,0x34,0x2c,0x00,0xc7,0x06,0xf9,0x34,0x04,0x00,0xc3,0xba,0x0a,0x00,0x33,0xc0
+,0xef,0xe8,0x98,0x01,0xe8,0xb5,0x01,0xb8,0x17,0x00,0xba,0x9c,0x00,0xef,0xb8,0x00
+,0x10,0xba,0x9a,0x00,0xef,0xb8,0x17,0x00,0xa9,0x01,0x00,0x74,0x01,0x40,0xba,0x8c
+,0x00,0xef,0xb8,0x00,0x18,0xba,0x86,0x00,0xef,0xb8,0x0c,0x00,0xba,0x82,0x00,0xef
+,0xba,0x02,0x00,0xed,0x25,0xf9,0xff,0x0d,0x02,0x00,0xef,0xba,0x06,0x00,0x33,0xc0
+,0xef,0xba,0x04,0x00,0xb8,0x60,0x00,0xef,0xba,0x00,0x00,0xb8,0x18,0x00,0xef,0xba
+,0x80,0x00,0xb9,0xff,0xff,0xed,0xa9,0x01,0x00,0x75,0x04,0xe2,0xf8,0xeb,0x3e,0xba
+,0x0a,0x00,0xed,0xa9,0x00,0x40,0x74,0x35,0xa9,0x00,0x20,0x74,0x30,0x33,0xc0,0xef
+,0x51,0xb9,0xc8,0x00,0xe2,0xfe,0x59,0x1e,0x06,0x1f,0x26,0x8b,0x0e,0x02,0x30,0x83
+,0xf9,0x17,0x75,0x18,0x49,0x49,0xbe,0x02,0x20,0xbf,0x06,0x30,0xf3,0xa6,0x1f,0x23
+,0xc9,0x75,0x0a,0xff,0x0e,0xfb,0x34,0x74,0x12,0xe9,0x4d,0xff,0x1f,0xb8,0x2c,0x00
+,0xbb,0x00,0x00,0xa3,0xf7,0x34,0x89,0x1e,0xf9,0x34,0xc3,0xc7,0x06,0xfb,0x34,0x64
+,0x00,0xe8,0xd3,0x00,0x72,0x0d,0xc7,0x06,0xf7,0x34,0x2c,0x00,0xc7,0x06,0xf9,0x34
+,0x04,0x00,0xc3,0xe8,0xd6,0x00,0xe8,0xf3,0x00,0xb8,0x03,0x00,0xba,0x82,0x00,0xef
+,0xb8,0x40,0x80,0xba,0x98,0x00,0xef,0xb8,0x00,0x11,0xba,0x96,0x00,0xef,0xb8,0x40
+,0x00,0xa9,0x01,0x00,0x74,0x01,0x40,0xba,0x92,0x00,0xef,0xb8,0x00,0x19,0xba,0x8e
+,0x00,0xef,0xba,0x02,0x00,0xed,0x25,0xf9,0xff,0x0d,0x06,0x00,0xef,0xba,0x06,0x00
+,0x33,0xc0,0xef,0xba,0x00,0x00,0xb8,0x18,0x00,0xef,0xba,0x80,0x00,0xb9,0xff,0xff
+,0xed,0xa9,0x20,0x00,0x75,0x04,0xe2,0xf8,0xeb,0x43,0xba,0x0a,0x00,0xed,0xa9,0x00
+,0x40,0x74,0x3a,0xa9,0x00,0x20,0x74,0x35,0x33,0xc0,0xef,0x51,0xb9,0xc8,0x00,0xe2
+,0xfe,0x59,0x1e,0x06,0x1f,0x26,0x8b,0x0e,0x02,0x32,0x83,0xf9,0x40,0x75,0x1d,0x49
+,0x49,0xbe,0x02,0x22,0xbf,0x06,0x32,0xf3,0xa6,0x1f,0x23,0xc9,0x75,0x0f,0xff,0x0e
+,0xfb,0x34,0x74,0x03,0xe9,0x5a,0xff,0xb8,0x00,0x00,0xeb,0x0b,0x1f,0xb8,0x2c,0x00
+,0xbb,0x02,0x00,0x89,0x1e,0xf9,0x34,0xa3,0xf7,0x34,0xc3,0xba,0x02,0x00,0xb8,0x00
+,0x9c,0xef,0xba,0x00,0x00,0xb8,0x00,0x84,0xef,0x33,0xc0,0xef,0xba,0x0a,0x00,0xef
+,0xba,0x0e,0x00,0x33,0xc0,0xef,0xc3,0xba,0x0a,0x00,0xb9,0xff,0xff,0xed,0x25,0x00
+,0x60,0x3d,0x00,0x60,0x74,0x04,0xe2,0xf5,0xf8,0xc3,0xf9,0xc3,0xb0,0x00,0xe6,0x56
+,0xb8,0x00,0xff,0xba,0x52,0x00,0xef,0xb9,0xff,0xff,0xba,0x58,0x00,0xed,0x25,0xef
+,0x00,0x74,0x08,0xba,0x5a,0x00,0x33,0xc0,0xef,0xe2,0xef,0xc3,0xba,0x80,0x00,0xed
+,0xba,0x84,0x00,0xef,0xba,0x80,0x00,0xed,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0xc6,0x06,0xec,0x34,0x15,0x33,0xc0,0x8e,0xd8,0x8e,0xc0,0x1e,0x8c,0xc8,0xbe,0x40
+,0x54,0xbf,0x60,0xfe,0x8e,0xd8,0xb9,0x10,0x00,0xf3,0xa4,0x1f,0xc7,0x06,0x80,0x36
+,0x10,0x35,0xc7,0x06,0x8c,0x36,0x30,0x35,0x8d,0x06,0x38,0x35,0xa3,0x30,0x35,0xa3
+,0x32,0x35,0x05,0x33,0x01,0xa3,0x34,0x35,0xc7,0x06,0x36,0x35,0x50,0x01,0xc7,0x06
+,0x84,0x36,0x80,0xfe,0xc7,0x06,0x88,0x36,0xc0,0xfe,0xc6,0x06,0xc2,0xfe,0xff,0xc6
+,0x06,0x93,0x36,0x80,0xc6,0x06,0x92,0x36,0x00,0xc6,0x06,0x80,0xfe,0x80,0xc7,0x06
+,0x82,0xfe,0x54,0x50,0xc7,0x06,0x84,0xfe,0x2b,0x4d,0xe5,0xce,0xa9,0x02,0x00,0x75
+,0x08,0xc6,0x06,0x81,0xfe,0x23,0xe9,0x05,0x00,0xc6,0x06,0x81,0xfe,0x22,0xa1,0xf7
+,0x34,0xa3,0x86,0xfe,0xb8,0x48,0x34,0x86,0xe0,0xa3,0x88,0xfe,0x8d,0x06,0x4e,0x34
+,0x86,0xe0,0xa3,0x8a,0xfe,0xb8,0x58,0x34,0x86,0xe0,0xa3,0x8c,0xfe,0xb8,0x9c,0x34
+,0x86,0xe0,0xa3,0x8e,0xfe,0x8d,0x06,0x20,0x03,0x86,0xe0,0xa3,0x90,0xfe,0x33,0xc0
+,0xba,0x72,0x00,0xef,0x33,0xc0,0xba,0x74,0x00,0xef,0xba,0x76,0x00,0xef,0xb8,0x80
+,0xfe,0x86,0xe0,0xba,0x72,0x00,0xef,0xe8,0xbf,0x07,0xba,0x0c,0x01,0xb8,0x40,0x40
+,0xef,0xed,0xba,0x6a,0x00,0xb8,0x03,0x00,0xc1,0xe0,0x08,0x0d,0x03,0x00,0xef,0xb9
+,0x0a,0x00,0xe8,0x94,0x00,0xba,0x6a,0x00,0xb8,0x03,0x00,0xc1,0xe0,0x08,0xef,0xa1
+,0x32,0x34,0xa3,0xa2,0x33,0xc7,0x06,0xa6,0x33,0x04,0x00,0x8d,0x06,0xa0,0x33,0xc1
+,0xe8,0x04,0xcd,0x39,0xc7,0x06,0x90,0x36,0xff,0xff,0xe9,0xe3,0x00,0x63,0x0d,0x66
+,0x0d,0x66,0x0d,0x8a,0x0d,0xe6,0x0e,0x75,0x12,0x2e,0x0f,0x03,0x0f,0x50,0x0f,0x60
+,0x0d,0x60,0x0d,0x60,0x0d,0xed,0x0f,0xe9,0x12,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60
+,0x0d,0x60,0x0d,0x22,0x10,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0xfe,0x10,0x60
+,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0xaf,0x0f,0x32,0x10,0x37
+,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60
+,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60
+,0x0d,0x64,0x0e,0x00,0x0f,0x95,0x09,0x60,0x0a,0x49,0xbb,0xff,0xff,0xba,0x6a,0x00
+,0xed,0xa9,0x00,0x20,0x74,0x38,0x80,0x3e,0x80,0xfe,0x12,0x75,0x31,0xe8,0x4a,0x00
+,0xa1,0x32,0x34,0xa3,0xa2,0x33,0xc7,0x06,0xa6,0x33,0x04,0x00,0x8d,0x06,0xa0,0x33
+,0xc1,0xe8,0x04,0xcd,0x39,0xe8,0x22,0x00,0xc7,0x06,0xf3,0x34,0x46,0x00,0xc7,0x06
+,0xf5,0x34,0xff,0xff,0xc7,0x06,0x90,0x36,0xff,0xff,0x58,0xe9,0x32,0x00,0x4b,0x83
+,0xfb,0x00,0x75,0xb9,0x83,0xf9,0x00,0x75,0xb0,0xc3,0x52,0xba,0x6a,0x00,0xb8,0x03
+,0x00,0xc1,0xe0,0x08,0x0d,0x03,0x00,0xef,0x5a,0xc3,0x52,0xba,0x6a,0x00,0xb8,0x03
+,0x00,0xc1,0xe0,0x08,0xef,0x5a,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x68,0x80,0x07,0xa1,0x90,0x36,0xcd,0x35,0x8b,0x36,0x24,0x02,0x2e,0xff,0xa4,0x35
+,0x0a,0xfa,0x8a,0x26,0x94,0x36,0x88,0x26,0xe8,0x34,0xc6,0x06,0x94,0x36,0x00,0xfb
+,0x22,0xe4,0x75,0x01,0xc3,0xf6,0xc4,0x20,0x74,0x7d,0xf6,0xc4,0x08,0x74,0x05,0x80
+,0x0e,0x92,0x36,0x04,0x80,0x26,0xe8,0x34,0xd7,0xc4,0x1e,0x84,0x36,0x26,0x8b,0x37
+,0x81,0xe6,0xff,0x00,0x83,0xfe,0x20,0x76,0x05,0xb0,0x01,0xe9,0x28,0x00,0x53,0x06
+,0xd1,0xe6,0x2e,0xff,0x94,0x9d,0x06,0x07,0x5b,0x26,0x88,0x47,0x02,0x3c,0xff,0x74
+,0x07,0x3c,0xfe,0x75,0x11,0xe9,0x3b,0x00,0xf6,0x06,0x92,0x36,0x08,0x75,0x34,0xf6
+,0x06,0x92,0x36,0x04,0x74,0x2d,0x80,0x26,0x92,0x36,0xf3,0x80,0x3e,0x95,0x36,0x00
+,0x75,0x21,0x26,0x80,0x3f,0x05,0x75,0x13,0xc6,0x06,0x95,0x36,0x00,0x26,0x80,0x7f
+,0x06,0x00,0x74,0x07,0x26,0x8b,0x47,0x04,0xa2,0x95,0x36,0xba,0x0c,0x01,0xb8,0x40
+,0x40,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x10,0x75,0x03,0xe9,0x5b,0x00,0xf6
+,0xc4,0x04,0x74,0x05,0x80,0x0e,0x92,0x36,0x01,0x80,0x26,0xe8,0x34,0xeb,0xc4,0x3e
+,0x88,0x36,0x26,0x8b,0x35,0x83,0xe6,0x7f,0x83,0xfe,0x12,0x72,0x08,0x26,0xc6,0x45
+,0x02,0x01,0xe9,0x24,0x00,0x83,0xc6,0x20,0xd1,0xe6,0x2e,0xff,0x94,0x9d,0x06,0xc4
+,0x3e,0x88,0x36,0x26,0x88,0x45,0x02,0x3c,0xff,0x75,0x0e,0xf6,0x06,0x92,0x36,0x01
+,0x74,0x14,0xf6,0x06,0x92,0x36,0x02,0x75,0x0d,0x80,0x26,0x92,0x36,0xfc,0xba,0x0c
+,0x01,0xb8,0x20,0x20,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x08,0x74,0x22,0x80
+,0x26,0xe8,0x34,0xf7,0x80,0x0e,0x92,0x36,0x04,0xf6,0x06,0x92,0x36,0x08,0x74,0x11
+,0x80,0x26,0x92,0x36,0xf3,0xba,0x0c,0x01,0xb8,0x40,0x40,0xef,0xed,0x8a,0x26,0xe8
+,0x34,0xf6,0xc4,0x04,0x74,0x22,0x80,0x26,0xe8,0x34,0xfb,0x80,0x0e,0x92,0x36,0x01
+,0xf6,0x06,0x92,0x36,0x02,0x75,0x11,0x80,0x26,0x92,0x36,0xfe,0xba,0x0c,0x01,0xb8
+,0x20,0x20,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x01,0x74,0x67,0x80,0x26,0xe8
+,0x34,0xfe,0x80,0x3e,0xe8,0xff,0x00,0x74,0x39,0x80,0x3e,0xe8,0xff,0x04,0x74,0x32
+,0x80,0x3e,0xe8,0xff,0x01,0x75,0x21,0xe5,0x80,0xa9,0x00,0x07,0x74,0x0a,0xba,0x9e
+,0x00,0xb8,0x00,0x02,0xef,0xe9,0xef,0xff,0xc6,0x06,0xe8,0xff,0x03,0xba,0x0c,0x01
+,0xb8,0x08,0x08,0xef,0xed,0xe9,0x28,0x00,0x80,0x3e,0xe8,0xff,0x03,0x74,0x06,0xe9
+,0x1e,0x00,0xe9,0x00,0x00,0xba,0x10,0x01,0xb8,0x02,0x02,0xef,0xed,0xe5,0x00,0x0d
+,0x18,0x00,0xe7,0x00,0xe5,0x82,0x0d,0x02,0x00,0xe7,0x82,0xc6,0x06,0xe8,0xff,0x04
+,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x02,0x74,0x0d,0x80,0x26,0xe8,0x34,0xfd,0x80,0x26
+,0x92,0x36,0xbf,0xe8,0x4f,0x0b,0xfa,0xa0,0xe8,0x34,0x08,0x06,0x94,0x36,0xc6,0x06
+,0xe8,0x34,0x00,0xfb,0xc3,0xe8,0xe7,0x0f,0xc4,0x1e,0x84,0x36,0x2e,0xff,0x16,0x01
+,0x07,0x26,0x88,0x47,0x02,0xe9,0x7e,0xfe,0xe8,0x2d,0x10,0xc4,0x1e,0x84,0x36,0x2e
+,0xff,0x16,0x03,0x07,0x26,0x88,0x47,0x02,0xe9,0x6b,0xfe,0x8e,0x06,0x26,0x02,0x2e
+,0xff,0x16,0x07,0x07,0xc3,0xc3,0x83,0x3e,0xf5,0x34,0x00,0x74,0x0f,0xff,0x0e,0xf3
+,0x34,0x75,0x09,0xe8,0xc4,0xfd,0xc7,0x06,0xf5,0x34,0x00,0x00,0xf6,0x06,0x93,0x36
+,0x20,0x74,0x30,0xa1,0xc2,0x34,0x3b,0x06,0xe9,0x34,0xa3,0xe9,0x34,0x74,0x24,0x80
+,0x3e,0x95,0x36,0x00,0x75,0x1d,0xf7,0x06,0xe6,0x34,0x20,0x00,0x74,0x12,0xa9,0x20
+,0x00,0x74,0x0d,0x83,0x26,0xc2,0x34,0xdf,0x83,0x26,0xe9,0x34,0xdf,0xe9,0x03,0x00
+,0xe8,0xdd,0x09,0xba,0x06,0x01,0xed,0x8b,0xd0,0x81,0xe2,0x00,0xc0,0xc1,0xea,0x0e
+,0x03,0x16,0x74,0x34,0xc1,0xe0,0x02,0x11,0x06,0x72,0x34,0x73,0x04,0xff,0x06,0x74
+,0x34,0xba,0x02,0x01,0xed,0x8b,0xd0,0x81,0xe2,0x00,0xc0,0xc1,0xea,0x0e,0x03,0x16
+,0x70,0x34,0xc1,0xe0,0x02,0x11,0x06,0x6e,0x34,0x73,0x04,0xff,0x06,0x70,0x34,0xc7
+,0x06,0xa6,0x33,0x04,0x00,0xc7,0x06,0xaa,0x33,0x00,0x00,0x8d,0x06,0xa0,0x33,0xc1
+,0xe8,0x04,0xcd,0x39,0xc3,0x95,0x09,0x95,0x09,0x65,0x09,0x78,0x09,0x95,0x09,0x95
+,0x09,0x91,0x07,0x95,0x09,0x96,0x09,0x8b,0x09,0x95,0x09,0x95,0x09,0x95,0x09,0x95
+,0x09,0x95,0x09,0x95,0x09,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x90
+,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xe9,0xcc,0x00,0x8c,0xc0,0x40,0x8e,0xc0,0x26
+,0x8b,0x0e,0x06,0x00,0x86,0xe9,0x26,0x89,0x0e,0x06,0x00,0x8c,0xc2,0xc1,0xe2,0x04
+,0xbe,0x0e,0x00,0x26,0xa1,0x04,0x00,0xd0,0xe0,0x24,0xc0,0x8a,0xe0,0xc0,0xec,0x04
+,0x0a,0xc4,0x26,0xa2,0x05,0x00,0x26,0xa1,0x08,0x00,0xa9,0x00,0xc0,0x74,0x03,0xe9
+,0x9e,0x00,0x26,0xf6,0x06,0x10,0x00,0x80,0x75,0x03,0xe9,0x0a,0x00,0x26,0xa0,0x16
+,0x00,0x24,0x1f,0x32,0xe4,0x03,0xf0,0x80,0x3e,0xec,0x34,0x06,0x72,0x5c,0x80,0x3e
+,0x95,0x36,0x00,0x75,0x66,0x8b,0xfa,0x33,0xdb,0x8e,0xc3,0x26,0x89,0x1d,0x26,0x88
+,0x5d,0x04,0x51,0x50,0xc4,0x1e,0x8c,0x36,0xb9,0x0f,0x00,0x33,0xc0,0xe8,0x21,0x09
+,0x58,0x59,0x0b,0xdb,0x74,0x34,0xfe,0x0e,0xe6,0x3a,0x26,0xc6,0x07,0x81,0x26,0xc6
+,0x47,0x01,0x00,0x26,0xc6,0x47,0x02,0xff,0x26,0xc7,0x47,0x04,0x00,0x00,0x26,0x89
+,0x4f,0x0a,0x86,0xf2,0x26,0x89,0x57,0x06,0x26,0x89,0x77,0x08,0x26,0xc6,0x47,0x09
+,0x00,0x26,0xc6,0x47,0x0c,0x02,0xe8,0x8c,0x09,0xc3,0xff,0x06,0xec,0x33,0x8c,0xc0
+,0x48,0x8e,0xc0,0xfa,0xe8,0x97,0x10,0xfb,0xe9,0xeb,0xff,0x8c,0xc0,0x48,0x8e,0xc0
+,0xfa,0xe8,0x8a,0x10,0xfb,0xc3,0x8c,0xc0,0x8e,0xc0,0xfa,0xe8,0x80,0x10,0xfb,0xc3
+,0x80,0x3e,0x95,0x36,0x00,0x75,0x03,0xe9,0xc2,0x00,0xbf,0x08,0x00,0x26,0xf6,0x06
+,0x10,0x00,0x80,0x75,0x05,0x03,0xfe,0xe9,0x0c,0x00,0x26,0xa0,0x16,0x00,0x24,0x1f
+,0x32,0xe4,0x03,0xf0,0x03,0xfe,0xa0,0x95,0x36,0x3c,0x00,0x75,0x03,0xe9,0x9c,0x00
+,0x3c,0x01,0x74,0x0b,0x3c,0x02,0x74,0x14,0x3c,0x03,0x74,0x1d,0xe9,0x8d,0x00,0xc6
+,0x06,0x96,0x36,0x01,0xe8,0x3c,0x01,0x72,0x27,0xe9,0x80,0x00,0xc6,0x06,0x96,0x36
+,0x02,0xe8,0x83,0x00,0x72,0x1a,0xe9,0x73,0x00,0xc6,0x06,0x96,0x36,0x01,0xe8,0x22
+,0x01,0x72,0x0d,0xc6,0x06,0x96,0x36,0x02,0xe8,0x6c,0x00,0x72,0x03,0xe9,0x5c,0x00
+,0x53,0x06,0x50,0xc4,0x1e,0x8c,0x36,0xb9,0x0b,0x00,0x33,0xc0,0xe8,0x42,0x08,0x58
+,0x26,0xc6,0x07,0x82,0x26,0xc6,0x47,0x02,0xff,0x8d,0x06,0xe0,0xfe,0x86,0xc4,0x26
+,0x89,0x47,0x06,0xa0,0x96,0x36,0x26,0x88,0x47,0x08,0xe8,0xc8,0x08,0x07,0x5b,0x83
+,0x26,0xad,0x36,0xfe,0xa1,0xad,0x36,0xe7,0x04,0xba,0x10,0x01,0xb8,0x80,0x80,0xef
+,0xed,0xba,0x10,0x01,0xb8,0x02,0x02,0xef,0xed,0x52,0xba,0xe0,0x00,0xb8,0x41,0x10
+,0xef,0x5a,0xb8,0x9c,0x03,0xcd,0x39,0xc6,0x06,0x95,0x36,0x00,0x8c,0xc0,0x48,0x8e
+,0xc0,0xfa,0xe8,0xa9,0x0f,0xfb,0xc3,0x1e,0x06,0x1f,0x06,0x33,0xc0,0x8e,0xc0,0x8b
+,0xf0,0x8d,0x3e,0x20,0xf3,0x51,0xb1,0x0a,0x26,0x83,0x7d,0x0c,0x01,0x75,0x2a,0x57
+,0x26,0x83,0x7d,0x0e,0x00,0x74,0x06,0xe8,0x2f,0x00,0xe9,0x03,0x00,0xe8,0x66,0x07
+,0x5f,0x73,0x16,0x33,0xc0,0x8e,0xd8,0x26,0x8b,0x4d,0x12,0x8d,0x75,0x20,0x8d,0x3e
+,0xe0,0xfe,0xf3,0xa4,0x59,0x07,0x1f,0xf9,0xc3,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20
+,0x01,0xe9,0xc4,0xff,0x59,0x07,0x1f,0xf8,0xc3,0x51,0x50,0x53,0x56,0x52,0x57,0x33
+,0xdb,0x26,0x8a,0x5d,0x0e,0x26,0x8b,0x4d,0x12,0x8d,0x7d,0x20,0x5a,0x87,0xd7,0x26
+,0x8a,0x45,0x14,0x87,0xd7,0x42,0x32,0xff,0x80,0xff,0x08,0x75,0x08,0xfe,0xcb,0x22
+,0xdb,0x75,0xea,0x33,0xdb,0x23,0xdb,0x74,0x06,0xfe,0xc7,0xd0,0xc8,0x73,0x0c,0x50
+,0x26,0x8a,0x05,0x38,0x04,0x58,0x74,0x03,0xe9,0x0a,0x00,0x49,0x46,0x47,0x23,0xc9
+,0x74,0x0a,0xe9,0xd3,0xff,0x5a,0x5e,0x5b,0x58,0x59,0xf8,0xc3,0x5a,0x5e,0x5b,0x58
+,0x59,0xf9,0xc3,0x1e,0x06,0x1f,0x06,0x33,0xc0,0x8e,0xc0,0x86,0xcd,0x2b,0xce,0x8b
+,0xf7,0x8b,0xc1,0x33,0xc9,0x80,0x3c,0xff,0x74,0x16,0x80,0xf9,0x06,0x73,0x09,0x32
+,0xc9,0x46,0x48,0x74,0x2e,0xe9,0xed,0xff,0x3d,0x60,0x00,0x73,0x0c,0xe9,0x23,0x00
+,0xfe,0xc1,0x46,0x48,0x74,0x1d,0xe9,0xdc,0xff,0xb8,0x10,0x00,0x8d,0x3e,0x18,0x34
+,0x32,0xed,0xb1,0x06,0xf3,0xa6,0x74,0x03,0xe9,0x08,0x00,0x48,0x23,0xc0,0x74,0x07
+,0xe9,0xe9,0xff,0x07,0x1f,0xf8,0xc3,0x8d,0x36,0x18,0x34,0x33,0xc0,0x8e,0xd8,0x8d
+,0x3e,0xe0,0xfe,0xb8,0x10,0x00,0xb9,0x06,0x00,0x56,0xf3,0xa4,0x5e,0x48,0x3d,0x00
+,0x00,0x75,0xf3,0x07,0x1f,0xf9,0xc3,0xff,0x06,0xe4,0x33,0xc6,0x06,0xeb,0x34,0x00
+,0x26,0x8b,0x45,0x06,0x86,0xe0,0xc1,0xe8,0x04,0x48,0x06,0x8e,0xc0,0xfe,0x06,0xe6
+,0x3a,0xfa,0xe8,0x69,0x0e,0xfb,0x07,0xb0,0xff,0xc3,0x00,0x00,0x00,0x00,0x00,0x00
+,0xb0,0x01,0xc3,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3
+,0x8b,0x0e,0x97,0x36,0x81,0xe1,0x80,0x30,0x26,0x8b,0x47,0x04,0x25,0x7f,0xcf,0x0b
+,0xc1,0xa3,0x97,0x36,0xa3,0xe6,0x34,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x74
+,0x03,0xb0,0x03,0xc3,0x26,0x8b,0x47,0x08,0xa3,0x97,0x36,0xa3,0xe6,0x34,0x26,0x8a
+,0x47,0x20,0xa2,0xfd,0x34,0x3c,0x01,0x75,0x06,0xc7,0x06,0xa1,0x36,0x00,0x00,0x26
+,0x8a,0x47,0x21,0xa2,0xfe,0x34,0x26,0x8b,0x47,0x0a,0xa3,0x18,0x34,0xa3,0x58,0x34
+,0x26,0x8b,0x47,0x0c,0xa3,0x1a,0x34,0xa3,0x5a,0x34,0x26,0x8b,0x47,0x0e,0xa3,0x1c
+,0x34,0xa3,0x5c,0x34,0xc6,0x06,0x2a,0x34,0xc0,0x26,0x8b,0x47,0x14,0x25,0x7f,0xff
+,0x09,0x06,0x2c,0x34,0x26,0x8b,0x47,0x16,0x25,0xff,0xfe,0x25,0xff,0xfc,0x09,0x06
+,0x2e,0x34,0xc6,0x06,0x00,0x34,0xc0,0x26,0x8b,0x47,0x10,0xa3,0x02,0x34,0x26,0x8b
+,0x47,0x12,0xa3,0x04,0x34,0x06,0x53,0xe8,0x84,0x0a,0x5b,0x07,0x3d,0x00,0x00,0x75
+,0x07,0x80,0x0e,0x92,0x36,0x08,0xb0,0xfe,0xc3,0xb9,0x00,0x01,0xa1,0xac,0x33,0x33
+,0xd2,0xf7,0xf9,0xa3,0xae,0x33,0x91,0x49,0x33,0xd2,0xf7,0xe9,0x05,0x00,0x3b,0xa3
+,0x46,0x34,0xbf,0x00,0x3b,0x89,0x3e,0x44,0x34,0xba,0x68,0x00,0xb8,0xe0,0xe0,0xef
+,0xa1,0xae,0x33,0xe7,0x62,0xa1,0xae,0x33,0xba,0x08,0x01,0xef,0xa1,0x44,0x34,0xe7
+,0x64,0xa1,0x44,0x34,0xba,0x0a,0x01,0xef,0xb8,0x00,0x01,0x2d,0x04,0x00,0x0d,0x00
+,0x10,0xe7,0x92,0xc3,0x3d,0x00,0x00,0x74,0x0a,0x26,0x89,0x47,0x07,0xe8,0x83,0x3a
+,0xb0,0x07,0xc3,0xa1,0xae,0x33,0x26,0x89,0x47,0x2b,0xa1,0x44,0x34,0x26,0x89,0x47
+,0x2d,0xa1,0x46,0x34,0x26,0x89,0x47,0x2f,0x80,0x0e,0x93,0x36,0x20,0xa1,0x88,0x36
+,0x86,0xe0,0x26,0x89,0x47,0x08,0xa1,0x84,0x36,0x86,0xe0,0x26,0x89,0x47,0x0a,0xa1
+,0x80,0x36,0x86,0xe0,0x26,0x89,0x47,0x0c,0xb8,0x60,0xfe,0x86,0xe0,0x26,0x89,0x47
+,0x0e,0xa0,0xa1,0x36,0x26,0x88,0x47,0x10,0x8b,0x36,0x88,0x36,0x26,0xc6,0x44,0x02
+,0xff,0xe5,0x9e,0xa9,0x00,0x08,0x74,0x0c,0xba,0x84,0x00,0xed,0x0d,0x08,0x00,0xef
+,0xba,0x8e,0x00,0xef,0xe5,0x02,0x25,0xf9,0xff,0xe7,0x02,0xba,0x10,0x01,0xb8,0x02
+,0x02,0xef,0xed,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x01,0xc3
+,0x80,0x26,0x93,0x36,0x9f,0xe8,0x8d,0x0a,0x80,0x0e,0x92,0x36,0x08,0xb0,0xfe,0xc3
+,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xc6,0x06,0x2a
+,0x34,0xc0,0x26,0x8b,0x47,0x06,0x25,0x7f,0xff,0xa3,0x2c,0x34,0x26,0x8b,0x47,0x08
+,0x25,0xff,0xfe,0x25,0xff,0xfc,0xa3,0x2e,0x34,0xcd,0x52,0xb0,0x00,0xc3,0xf6,0x06
+,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xc6,0x06,0x00,0x34,0xc0,0x26,0x8b,0x47
+,0x06,0xa3,0x02,0x34,0x26,0x8b,0x47,0x08,0xa3,0x04,0x34,0xcd,0x52,0xb0,0x00,0xc3
+,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x57,0x8d,0x7f,0x06,0x51,0xb9
+,0x07,0x00,0x33,0xc0,0xf3,0xab,0x59,0x8d,0x7f,0x06,0xa1,0x7a,0x34,0x03,0x06,0x39
+,0x37,0x26,0x88,0x05,0xa1,0x95,0x37,0x26,0x88,0x45,0x02,0xa1,0x80,0x34,0x03,0x06
+,0x76,0x34,0x26,0x88,0x45,0x07,0xa1,0xc6,0x34,0x26,0x88,0x45,0x09,0xa1,0xd8,0x33
+,0x26,0x88,0x45,0x0a,0x33,0xc0,0xa3,0x7a,0x34,0xa3,0x39,0x37,0xa3,0x95,0x37,0xa3
+,0x80,0x34,0xa3,0x76,0x34,0xa3,0xc6,0x34,0xa3,0xd8,0x33,0x5f,0xb0,0x00,0xc3,0xf6
+,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x26,0x8b,0x4f,0x04,0x83,0xf9,0x06
+,0x74,0x12,0x83,0xf9,0x04,0x74,0x0d,0x83,0xf9,0x00,0x74,0x08,0x83,0xf9,0x02,0x74
+,0x03,0xb0,0x01,0xc3,0x89,0x0e,0xe8,0x3a,0x83,0x26,0xab,0x36,0xf9,0x09,0x0e,0xab
+,0x36,0xe5,0x02,0x25,0xf9,0xff,0x0b,0xc1,0xe7,0x02,0xb0,0x00,0xc3,0xf6,0x06,0x93
+,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x26,0x8b,0x4f,0x04,0x80,0xf9,0xff,0x74,0x08
+,0x80,0xf9,0x00,0x74,0x10,0xb0,0x01,0xc3,0x83,0x0e,0xad,0x36,0x02,0xa1,0xad,0x36
+,0xe7,0x04,0xe9,0x0a,0x00,0x83,0x26,0xad,0x36,0xfd,0xa1,0xad,0x36,0xe7,0x04,0xb0
+,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xe8,0xd5,0x04,0xb0
+,0x00,0xc3,0xf6,0x06,0x93,0x36,0x80,0x75,0x03,0xb0,0x01,0xc3,0x26,0x83,0x7f,0x06
+,0x05,0x75,0x03,0xe9,0x9d,0x00,0x26,0x8b,0x57,0x04,0x26,0x8b,0x47,0x08,0x26,0x81
+,0x7f,0x06,0x00,0x80,0x75,0x08,0xed,0x26,0x89,0x47,0x0a,0xe9,0x9d,0x00,0x26,0x83
+,0x7f,0x06,0x01,0x75,0x04,0xef,0xe9,0x92,0x00,0x26,0x81,0x7f,0x06,0x01,0x80,0x75
+,0x09,0xef,0xed,0x26,0x89,0x47,0x0a,0xe9,0x81,0x00,0x26,0x83,0x7f,0x06,0x02,0x75
+,0x07,0x26,0x21,0x47,0x04,0xe9,0x73,0x00,0x26,0x81,0x7f,0x06,0x02,0x80,0x75,0x0c
+,0x26,0x21,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x5f,0x00,0x26,0x83,0x7f,0x06
+,0x03,0x75,0x07,0x26,0x09,0x47,0x04,0xe9,0x51,0x00,0x26,0x81,0x7f,0x06,0x03,0x80
+,0x75,0x0c,0x26,0x09,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x3d,0x00,0x26,0x83
+,0x7f,0x06,0x04,0x75,0x07,0x26,0x31,0x47,0x04,0xe9,0x2f,0x00,0x26,0x81,0x7f,0x06
+,0x04,0x80,0x75,0x0c,0x26,0x31,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x1b,0x00
+,0xb0,0x01,0xc3,0xfa,0x53,0x26,0x8b,0x4f,0x08,0x0b,0xc9,0x74,0x0c,0x8d,0x1e,0xe0
+,0xfe,0xe8,0x52,0xff,0x83,0xc3,0x08,0xe2,0xf8,0x5b,0xfb,0xb0,0x00,0xc3,0xf6,0x06
+,0x93,0x36,0x80,0x75,0x0a,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x01,0xc3,0x8d
+,0x3e,0xe0,0xfe,0xe5,0x00,0x26,0x89,0x05,0xe5,0x02,0x26,0x89,0x45,0x02,0xa1,0xad
+,0x36,0x26,0x89,0x45,0x04,0xe5,0x06,0x26,0x89,0x45,0x06,0xe5,0x08,0x26,0x89,0x45
+,0x08,0xe5,0x0a,0x26,0x89,0x45,0x0a,0xe5,0x0e,0x26,0x89,0x45,0x0c,0xe5,0x48,0x26
+,0x89,0x45,0x0e,0xe5,0x4a,0x26,0x89,0x45,0x10,0xe5,0x4c,0x26,0x89,0x45,0x12,0xa1
+,0xb7,0x36,0x26,0x89,0x45,0x14,0xe5,0x50,0x26,0x89,0x45,0x16,0xe5,0x52,0x26,0x89
+,0x45,0x18,0xe5,0x54,0x26,0x89,0x45,0x1a,0xe5,0x56,0x26,0x89,0x45,0x1c,0xe5,0x58
+,0x26,0x89,0x45,0x1e,0xe5,0x62,0x26,0x89,0x45,0x20,0xe5,0x64,0x26,0x89,0x45,0x22
+,0xe5,0x66,0x26,0x89,0x45,0x24,0xe5,0x68,0x26,0x89,0x45,0x26,0xe5,0x6a,0x26,0x89
+,0x45,0x28,0xe5,0x6c,0x26,0x89,0x45,0x2a,0xe5,0x70,0x26,0x89,0x45,0x2c,0xe5,0x72
+,0x26,0x89,0x45,0x2e,0xe5,0x74,0x26,0x89,0x45,0x30,0xe5,0x76,0x26,0x89,0x45,0x32
+,0xe5,0x7c,0x26,0x89,0x45,0x34,0xe5,0x7e,0x26,0x89,0x45,0x36,0xe5,0x80,0x26,0x89
+,0x45,0x38,0xe5,0x82,0x26,0x89,0x45,0x3a,0xe5,0x86,0x26,0x89,0x45,0x3c,0xe5,0x88
+,0x26,0x89,0x45,0x3e,0xe5,0x9a,0x26,0x89,0x45,0x40,0xe5,0x9e,0x26,0x89,0x45,0x42
+,0xe5,0xcc,0x26,0x89,0x45,0x44,0xe5,0xce,0x26,0x89,0x45,0x46,0xe5,0xd0,0x26,0x89
+,0x45,0x48,0xe5,0xd2,0x26,0x89,0x45,0x4a,0xba,0x00,0x01,0xed,0x11,0x06,0x66,0x34
+,0x73,0x04,0xff,0x06,0x68,0x34,0x26,0x89,0x45,0x4c,0xba,0x02,0x01,0xed,0xc1,0xe0
+,0x02,0x11,0x06,0x6e,0x34,0x73,0x04,0xff,0x06,0x70,0x34,0x26,0x89,0x45,0x4e,0xba
+,0x04,0x01,0xed,0x11,0x06,0x6a,0x34,0x73,0x04,0xff,0x06,0x6c,0x34,0x26,0x89,0x45
+,0x50,0xba,0x06,0x01,0xed,0xc1,0xe0,0x02,0x11,0x06,0x72,0x34,0x73,0x04,0xff,0x06
+,0x74,0x34,0x26,0x89,0x45,0x52,0xba,0x08,0x01,0xed,0x26,0x89,0x45,0x54,0xba,0x0a
+,0x01,0xed,0x26,0x89,0x45,0x56,0xba,0x0c,0x01,0xed,0x26,0x89,0x45,0x58,0xba,0x0e
+,0x01,0xed,0x01,0x06,0x7a,0x34,0x26,0x89,0x45,0x5e,0xba,0x10,0x01,0xed,0x26,0x89
+,0x45,0x5c,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x80,0x74,0x07,0xf6,0x06,0x93,0x36
+,0x20,0x75,0x03,0xb0,0x01,0xc3,0x26,0x80,0x7f,0x06,0x00,0x75,0x30,0x80,0x3e,0x95
+,0x36,0x00,0x74,0x52,0xc6,0x06,0x95,0x36,0x00,0x83,0x26,0xad,0x36,0xfe,0xa1,0xad
+,0x36,0xe7,0x04,0xba,0x10,0x01,0xb8,0x80,0x80,0xef,0xed,0xba,0x10,0x01,0xb8,0x02
+,0x02,0xef,0xed,0xba,0xe0,0x00,0xb8,0x00,0x10,0xef,0xb0,0x00,0xc3,0x26,0x8b,0x47
+,0x04,0x3d,0x00,0x00,0x74,0x20,0x3d,0x03,0x00,0x77,0x1b,0xba,0x10,0x01,0xb8,0x02
+,0x00,0xef,0xba,0xe0,0x00,0xb8,0x01,0x10,0xef,0x83,0x0e,0xad,0x36,0x01,0xa1,0xad
+,0x36,0xe7,0x04,0xb0,0x00,0xc3,0xb0,0x06,0xc3,0xf6,0x06,0x93,0x36,0x80,0x75,0x03
+,0xb0,0x01,0xc3,0x26,0x83,0x7f,0x04,0x01,0x74,0x0a,0x26,0x83,0x7f,0x04,0x02,0x74
+,0x19,0xb0,0x06,0xc3,0x26,0x83,0x7f,0x06,0x0c,0x77,0xf6,0x26,0x83,0x7f,0x0a,0x60
+,0x77,0xef,0xe8,0x10,0x00,0x72,0x0b,0xb0,0x46,0xc3,0xe8,0x4e,0x00,0x72,0x03,0xb0
+,0x46,0xc3,0xb0,0x00,0xc3,0x51,0xb1,0x0a,0x8b,0x3e,0x20,0xf3,0x26,0x83,0x7d,0x0c
+,0x02,0x75,0x03,0xe9,0x0e,0x00,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20,0x01,0xe9,0xeb
+,0xff,0x59,0xf8,0xc3,0x57,0x8d,0x7d,0x0e,0x8d,0x77,0x06,0xb9,0x12,0x00,0xf3,0xa4
+,0x8d,0x7d,0x20,0x8d,0x36,0xe0,0xfe,0x26,0x8b,0x4d,0x12,0xf3,0xa4,0xff,0x06,0x01
+,0x35,0x5f,0x26,0xc7,0x45,0x0c,0x01,0x00,0x59,0xf9,0xc3,0x51,0xb1,0x0a,0x8d,0x3e
+,0x20,0xf3,0x8d,0x36,0xe0,0xfe,0x26,0x83,0x7d,0x0c,0x01,0x75,0x1b,0x57,0xe8,0x25
+,0x00,0x5f,0x73,0x14,0x33,0xc0,0xb9,0x20,0x01,0xf3,0xaa,0x26,0xc7,0x45,0x0c,0x02
+,0x00,0xff,0x0e,0x01,0x35,0x59,0xf9,0xc3,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20,0x01
+,0xe9,0xd3,0xff,0x59,0xf8,0xc3,0x51,0x26,0x8b,0x4d,0x12,0x8d,0x7d,0x20,0xf3,0xa6
+,0x74,0x03,0x59,0xf8,0xc3,0x59,0xf9,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x80,0x3e,0xec,0x34,0x06,0x72,0x33,0xff,0x06,0xf0,0x33,0x50,0xc4,0x1e,0x8c,0x36
+,0xb9,0x0f,0x00,0x33,0xc0,0xe8,0x29,0x00,0x58,0x81,0x26,0xc2,0x34,0xdf,0x7f,0x81
+,0x26,0xe9,0x34,0xdf,0x7f,0x0b,0xdb,0x74,0x11,0x26,0xc6,0x07,0x84,0x26,0xc6,0x47
+,0x02,0xff,0x26,0x89,0x47,0x06,0xe8,0xac,0x00,0xc3,0xff,0x06,0xea,0x33,0xe9,0xf5
+,0xff,0x57,0x26,0x8b,0x3f,0x03,0xf9,0x26,0x3b,0x7f,0x02,0x74,0x16,0x26,0x3b,0x7f
+,0x04,0x7c,0x2a,0x3d,0x00,0x00,0x75,0x13,0x8d,0x7f,0x08,0x03,0xf9,0x26,0x3b,0x7f
+,0x02,0x7c,0x14,0xff,0x06,0xde,0x33,0x33,0xdb,0x5f,0xc3,0x26,0x8b,0x7f,0x02,0x26
+,0x89,0x3f,0x03,0xf9,0xe9,0x06,0x00,0x26,0x89,0x3f,0x26,0x29,0x0f,0x26,0xc7,0x05
+,0xff,0xff,0x26,0x87,0x3f,0x26,0x89,0x0d,0x8d,0x5d,0x02,0x50,0x8b,0xfb,0x83,0xe9
+,0x02,0x33,0xc0,0xf3,0xaa,0x58,0xfe,0x0e,0xec,0x34,0x5f,0xc3,0x8b,0x7c,0x02,0x3b
+,0x3c,0x74,0x2f,0x83,0x3d,0xff,0x75,0x0b,0x8d,0x7c,0x08,0x89,0x7c,0x02,0x83,0x3d
+,0xff,0x74,0x1e,0x8a,0x45,0x02,0x3c,0x81,0x75,0x0c,0x80,0x3e,0xeb,0x34,0x00,0x74
+,0x05,0x33,0xc0,0xe9,0x0b,0x00,0x8b,0x0d,0x01,0x4c,0x02,0x8d,0x75,0x02,0x83,0xe9
+,0x02,0xc3,0x80,0x3e,0xec,0x34,0x06,0x72,0x05,0x33,0xc0,0xe9,0xf3,0xff,0xff,0x06
+,0xee,0x33,0xe9,0xbe,0xff,0xf6,0x06,0x92,0x36,0x40,0x74,0x01,0xc3,0x57,0x56,0x51
+,0x52,0x8b,0x36,0x8c,0x36,0xe8,0xa4,0xff,0x75,0x03,0xe9,0x1a,0x00,0xe9,0x1c,0x00
+,0xfe,0x06,0xec,0x34,0xc4,0x3e,0x80,0x36,0xf3,0xa4,0x80,0x0e,0x92,0x36,0x40,0xba
+,0x0c,0x01,0xb8,0x80,0x80,0xef,0xed,0x5a,0x59,0x5e,0x5f,0xc3,0xff,0x06,0xe0,0x33
+,0x80,0x3c,0x81,0x75,0x0c,0xff,0x06,0xe2,0x33,0xc6,0x06,0xeb,0x34,0x01,0xe9,0xcf
+,0xff,0x80,0x3c,0x84,0x75,0x07,0xff,0x06,0xe6,0x33,0xe9,0xc3,0xff,0xff,0x06,0xe8
+,0x33,0xe9,0xbc,0xff,0x8d,0x3e,0xe0,0xfe,0xa1,0x72,0x34,0xc7,0x06,0x72,0x34,0x00
+,0x00,0x89,0x05,0xa1,0x74,0x34,0xc7,0x06,0x74,0x34,0x00,0x00,0x89,0x45,0x02,0xba
+,0x04,0x01,0xed,0x89,0x45,0x04,0xc7,0x45,0x06,0x00,0x00,0xa1,0x6e,0x34,0xc7,0x06
+,0x6e,0x34,0x00,0x00,0x89,0x45,0x08,0xa1,0x70,0x34,0xc7,0x06,0x70,0x34,0x00,0x00
+,0x89,0x45,0x0a,0xba,0x00,0x01,0xed,0x89,0x45,0x0c,0xc7,0x45,0x0e,0x00,0x00,0x32
+,0xe4,0xba,0x0e,0x01,0xec,0x89,0x45,0x10,0xa1,0x7e,0x34,0xc7,0x06,0x7e,0x34,0x00
+,0x00,0x89,0x45,0x12,0xa1,0x8c,0x34,0xc7,0x06,0x8c,0x34,0x00,0x00,0x89,0x45,0x14
+,0xa1,0x8a,0x34,0xc7,0x06,0x8a,0x34,0x00,0x00,0x89,0x45,0x16,0xa1,0x7c,0x34,0xc7
+,0x06,0x7c,0x34,0x00,0x00,0x89,0x45,0x18,0xa1,0x88,0x34,0xc7,0x06,0x88,0x34,0x00
+,0x00,0x89,0x45,0x1a,0xa1,0xca,0x33,0xc7,0x06,0xca,0x33,0x00,0x00,0x89,0x45,0x1c
+,0xa1,0x78,0x34,0xc7,0x06,0x78,0x34,0x00,0x00,0x89,0x45,0x1e,0xa1,0xc6,0x34,0xc7
+,0x06,0xc6,0x34,0x00,0x00,0x89,0x45,0x20,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0xfa,0x33,0xc0,0x8e,0xd8,0x8e,0xc0,0xb8,0xa0,0x01,0xc1,0xe8,0x04,0x8e,0xd0,0x8d
+,0x26,0x80,0x00,0xe8,0x00,0x01,0xe8,0x10,0xeb,0x8b,0x1e,0xf7,0x34,0x8b,0x16,0xf9
+,0x34,0x8b,0x36,0xff,0x34,0x33,0xc0,0xb9,0xef,0xff,0x8d,0x3e,0x14,0x00,0x2b,0xcf
+,0x2b,0xce,0xd1,0xe9,0xf3,0xab,0x89,0x1e,0xf7,0x34,0x89,0x16,0xf9,0x34,0x83,0xfe
+,0x00,0x74,0x0c,0xb9,0xef,0xff,0xbf,0x80,0xfe,0x2b,0xcf,0xd1,0xe9,0xf3,0xab,0xb9
+,0xff,0xff,0x81,0xe9,0x00,0x3b,0x83,0xfe,0x00,0x74,0x03,0xe9,0x1b,0x00,0x51,0x1e
+,0xb8,0x00,0xe0,0x8e,0xd8,0x33,0xf6,0x8d,0x3e,0x00,0xd8,0xb9,0x00,0x0c,0xf3,0xa5
+,0x1f,0x59,0xbe,0xff,0xff,0x81,0xee,0x00,0xd8,0x2b,0xce,0x81,0xe1,0x00,0xff,0x89
+,0x0e,0xac,0x33,0x8d,0x06,0x20,0x02,0xc1,0xe8,0x04,0xa3,0x32,0x34,0x8e,0xd0,0x36
+,0xc7,0x06,0x1e,0x00,0x80,0x18,0x36,0xc7,0x06,0x22,0x00,0xff,0x7f,0x36,0xc7,0x06
+,0x0a,0x00,0xff,0xff,0x36,0xc7,0x06,0x1c,0x00,0x80,0x00,0x8d,0x06,0xa0,0x02,0xc1
+,0xe8,0x04,0xa3,0x30,0x34,0x8e,0xd0,0x36,0xc7,0x06,0x1e,0x00,0x50,0x28,0x36,0xc7
+,0x06,0x0a,0x00,0xff,0xff,0x36,0xc7,0x06,0x1c,0x00,0x80,0x00,0xb8,0xa0,0x01,0xc1
+,0xe8,0x04,0xa3,0x34,0x34,0xa3,0xf2,0x33,0x8e,0xd0,0x8d,0x26,0x80,0x00,0xb8,0x00
+,0x90,0xe7,0x02,0x8d,0x3e,0x70,0x01,0x8b,0xc7,0xc1,0xe8,0x04,0xb9,0x03,0x00,0x89
+,0x45,0x0e,0x89,0x45,0x02,0xc7,0x05,0xff,0xff,0x83,0xc7,0x10,0x05,0x01,0x00,0xe2
+,0xee,0xe8,0x5b,0x01,0xe5,0xce,0xa3,0xb5,0x36,0xe8,0x21,0x00,0xe8,0x45,0x01,0xa1
+,0x32,0x34,0x8c,0xcb,0xcd,0x37,0x0e,0x58,0xa9,0x00,0xf0,0x74,0x07,0x33,0xf6,0x89
+,0x36,0xff,0x34,0xc3,0x8d,0x36,0x30,0x61,0x89,0x36,0xff,0x34,0xc3,0x33,0xc0,0x8b
+,0xd0,0x8b,0xf2,0xb9,0x68,0x00,0x2e,0x80,0xbc,0xac,0x17,0x80,0x75,0x01,0xef,0x83
+,0xc2,0x02,0x46,0xe2,0xf1,0xb8,0x02,0x00,0xe7,0x50,0xb9,0x5a,0x00,0x33,0xff,0xc7
+,0x05,0x65,0x18,0x8c,0x4d,0x02,0x83,0xc7,0x04,0xe2,0xf4,0x33,0xc0,0x8e,0xc0,0x8c
+,0xc8,0x8e,0xd8,0x8d,0x3e,0x80,0x00,0x8d,0x36,0x9c,0x17,0xb9,0x08,0x00,0xe8,0x37
+,0x00,0x8d,0x36,0x20,0x21,0x8d,0x3e,0xc0,0x00,0xb9,0x0d,0x00,0xe8,0x29,0x00,0x8d
+,0x3e,0x40,0x01,0xb9,0x0a,0x00,0xe8,0x1f,0x00,0xe8,0x4b,0x0e,0x33,0xc0,0x8e,0xd8
+,0xc7,0x06,0x4e,0x37,0x6f,0x17,0xe7,0x48,0xe7,0x4c,0xb8,0x40,0x9c,0xe7,0x4a,0xe5
+,0x48,0x90,0xb8,0x00,0x70,0xe7,0x48,0xc3,0xa5,0x83,0xc7,0x02,0xe2,0xfa,0xc3,0xe5
+,0x4c,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe5,0x58,0xd1
+,0xe0,0x73,0x11,0x8b,0xf0,0xd1,0xe6,0x33,0xc0,0x8e,0xd8,0x8b,0xb4,0x80,0x00,0x83
+,0xc6,0x0b,0xff,0xe6,0x1f,0x07,0x5a,0x5f,0x5e,0x59,0x58,0xcf,0x58,0x1c,0xe4,0x1c
+,0x6c,0x1c,0x8e,0x1a,0xc0,0x1f,0x40,0x1a,0x44,0x1c,0x65,0x18,0x80,0x80,0x80,0xff
+,0x80,0x03,0x02,0x80,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
+,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
+,0x80,0x03,0x03,0x43,0x80,0x80,0x02,0x80,0x42,0x03,0x02,0xff,0x03,0x01,0x03,0x01
+,0x01,0x03,0x02,0x03,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x02,0x03,0x01,0x03
+,0x03,0xff,0x01,0x01,0xff,0x01,0xff,0x01,0x01,0x03,0x03,0x03,0xff,0xff,0xff,0xff
+,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
+,0xff,0xff,0xff,0x02,0xb8,0x0f,0x00,0xe7,0x84,0xb8,0x0f,0xf8,0xe7,0x82,0xc3,0xb9
+,0x08,0x00,0x89,0x0e,0xe6,0x3a,0x8d,0x06,0x20,0x03,0x8b,0xd0,0xc1,0xe8,0x04,0xa3
+,0x90,0x01,0x8b,0xc2,0x8b,0xd8,0xc1,0xe8,0x04,0x8e,0xc0,0x05,0x61,0x00,0x26,0xa3
+,0x00,0x00,0xa1,0x30,0x34,0x26,0xa3,0x02,0x00,0x83,0xc3,0x14,0xd1,0xeb,0x26,0x89
+,0x1e,0x08,0x00,0x81,0xc2,0x10,0x06,0xe2,0xd9,0x26,0xc7,0x06,0x00,0x00,0xff,0xff
+,0x8c,0x06,0x92,0x01,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8
+,0xe7,0x5a,0xff,0x06,0xbe,0x33,0xba,0xd2,0x00,0xed,0xcf,0x00,0x00,0x00,0x00,0x00
+,0x8c,0xcb,0xa1,0x30,0x34,0xcd,0x37,0xe9,0x06,0xed,0xb8,0x32,0x00,0xc3,0xe8,0x8c
+,0x01,0xfe,0x06,0xe2,0x34,0xe8,0x21,0x01,0x75,0xf0,0xe8,0x53,0x0e,0x81,0x0e,0xaf
+,0x36,0x00,0xc0,0xc7,0x06,0xad,0x36,0x60,0x00,0xf7,0x06,0xe6,0x34,0x80,0x00,0x75
+,0x1a,0xf7,0x06,0xe6,0x34,0x00,0x08,0x74,0x09,0xc7,0x06,0xab,0x36,0x0b,0x00,0xe9
+,0x0f,0x00,0xc7,0x06,0xab,0x36,0x03,0x00,0xe9,0x06,0x00,0xc7,0x06,0xab,0x36,0x11
+,0x9c,0xc7,0x06,0xa9,0x36,0x18,0x00,0xf7,0x06,0xe6,0x34,0x80,0x00,0x75,0x0d,0xf7
+,0x06,0xb5,0x36,0x02,0x00,0x74,0x05,0x83,0x0e,0xa9,0x36,0x20,0xa1,0xa9,0x36,0xe7
+,0x00,0xa1,0xab,0x36,0xe7,0x02,0xf7,0x06,0xe6,0x34,0x80,0x00,0x74,0x2e,0xe8,0xf2
+,0x2f,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56,0xa1,0xb1,0x36,0x0d,0x00,0x10,0xe7,0x08
+,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xb8,0x40,0x00,0xe7,0x4e,0x33
+,0xc0,0xe7,0x0e,0xc7,0x06,0x26,0x02,0x00,0x00,0xe9,0x23,0x00,0xc7,0x06,0x4e,0x37
+,0x3f,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x80,0x74,0x07,0x26
+,0x81,0x0e,0x08,0x00,0x00,0x80,0xc6,0x06,0xe0,0x34,0x01,0xb8,0x00,0x00,0xc3,0xfe
+,0x06,0xe1,0x34,0xc6,0x06,0xe0,0x34,0x00,0xa1,0x26,0x02,0x0b,0xc0,0x74,0x01,0xc3
+,0xe8,0x04,0x00,0xb8,0x00,0x00,0xc3,0xa1,0xa9,0x36,0xe7,0x00,0x8b,0x1e,0xab,0x36
+,0x83,0xe3,0x06,0xe5,0x02,0x25,0xf9,0xff,0x0b,0xc3,0x0d,0x10,0x00,0xe7,0x02,0xa1
+,0xad,0x36,0xe7,0x04,0xc3,0xb8,0x0a,0x00,0xe7,0x84,0xfe,0x06,0xe5,0x34,0xc6,0x06
+,0xe3,0x34,0x01,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x40,0x74,0x07
+,0x26,0x81,0x0e,0x08,0x00,0x00,0x40,0xc3,0xc7,0x06,0x4e,0x37,0x6f,0x17,0xfe,0x06
+,0xe4,0x34,0xc6,0x06,0xe3,0x34,0x00,0xc3,0xc3,0xf6,0x06,0x18,0x34,0x80,0x75,0x0d
+,0xa1,0x18,0x34,0x0b,0x06,0x1a,0x34,0x0b,0x06,0x1c,0x34,0x75,0x01,0xc3,0xa1,0x2e
+,0x34,0x25,0xff,0xfe,0x8b,0x16,0xe7,0x36,0x81,0xe2,0x00,0x01,0x0b,0xc2,0xa3,0x2e
+,0x34,0x8d,0x16,0x10,0x00,0xbf,0x00,0x00,0xb9,0x08,0x00,0x8b,0x85,0x00,0x34,0xef
+,0x83,0xc2,0x10,0x8b,0x85,0x02,0x34,0xef,0x83,0xc2,0x10,0x8b,0x85,0x04,0x34,0xef
+,0x83,0xc2,0xe2,0x83,0xc7,0x06,0x49,0x75,0xe2,0xb8,0x00,0x00,0x8e,0xc0,0xbe,0x00
+,0x34,0xbf,0xb9,0x36,0xb9,0x18,0x00,0xf3,0xa5,0xb8,0x00,0x00,0xc3,0x33,0xc0,0x8e
+,0xc0,0x8d,0x3e,0xb0,0x33,0xb9,0x08,0x00,0xf3,0xab,0x8d,0x3e,0x3e,0x34,0xb9,0x03
+,0x00,0xf3,0xab,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xba
+,0x33,0xe5,0x56,0x0d,0x20,0x00,0xe7,0x56,0xba,0x7a,0x00,0xed,0x08,0x26,0x94,0x36
+,0x33,0xc0,0xb1,0x08,0x32,0xed,0x06,0x8e,0xc0,0x8d,0x3e,0xe0,0xff,0xf3,0xaa,0x8e
+,0x06,0x32,0x34,0x26,0x81,0x0e,0x08,0x00,0x00,0x02,0x07,0xe5,0x56,0x25,0xdf,0xff
+,0xe7,0x56,0xe9,0xf8,0xfc,0x00,0xbd,0x1b,0x10,0x1b,0xd9,0x1a,0xf3,0x1a,0x50,0x51
+,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb6,0x33,0x53
+,0x06,0x51,0xe5,0x80,0xa3,0xb4,0x33,0x8b,0xd8,0x8b,0xc8,0x25,0x10,0x00,0xa3,0xed
+,0x34,0x0b,0xc0,0x74,0x14,0xff,0x06,0x80,0x34,0x80,0x3e,0xfe,0x34,0x00,0x74,0x03
+,0xe9,0x06,0x00,0xb8,0x80,0x00,0xe8,0x9d,0x04,0x83,0xe3,0x03,0xd1,0xe3,0x2e,0xff
+,0x97,0x86,0x1a,0x59,0x07,0x5b,0xe9,0xa4,0xfc,0xba,0x20,0x00,0x8e,0x06,0x3c,0x34
+,0x83,0x3e,0x3c,0x34,0x00,0x75,0x03,0xe9,0xf0,0x00,0xc7,0x06,0x3c,0x34,0x00,0x00
+,0xe9,0x2a,0x00,0xba,0x10,0x00,0x8e,0x06,0x3a,0x34,0x83,0x3e,0x3a,0x34,0x00,0x75
+,0x03,0xe9,0xd5,0xff,0xc7,0x06,0x3a,0x34,0x00,0x00,0xe8,0x10,0x00,0xe9,0xc9,0xff
+,0xba,0x10,0x00,0x8e,0x06,0x3a,0x34,0xc7,0x06,0x3a,0x34,0x00,0x00,0x26,0xa1,0x14
+,0x00,0x26,0xa3,0x0c,0x00,0x26,0xa1,0x16,0x00,0x26,0xa3,0x0e,0x00,0x26,0xc6,0x06
+,0x0a,0x00,0x00,0xc1,0xea,0x02,0x23,0xd1,0x74,0x1c,0xba,0x20,0x00,0x26,0xc7,0x06
+,0x0e,0x00,0xea,0x05,0x26,0x0b,0x16,0x0c,0x00,0x26,0x89,0x16,0x0c,0x00,0xff,0x06
+,0x86,0x34,0xff,0x06,0xdc,0x33,0x26,0xa1,0x0c,0x00,0xa9,0x00,0x37,0x74,0x16,0x26
+,0xc6,0x06,0x0a,0x00,0x02,0xa9,0x00,0x30,0x74,0x04,0xff,0x06,0x7a,0x34,0xff,0x06
+,0xda,0x33,0xe9,0x49,0x00,0xc0,0xec,0x07,0x83,0x16,0x8a,0x34,0x00,0x24,0x07,0x3c
+,0x07,0x75,0x04,0xff,0x06,0x8c,0x34,0xff,0x06,0x7e,0x34,0xa1,0x30,0x34,0x8c,0xc3
+,0x8e,0xc0,0x8e,0xdb,0x26,0x83,0x0e,0x08,0x00,0x40,0x8c,0xd8,0x26,0x87,0x06,0x16
+,0x00,0x26,0x83,0x3e,0x14,0x00,0xff,0x74,0x0a,0x8e,0xc0,0x26,0x8c,0x1e,0x00,0x00
+,0xe9,0x05,0x00,0x26,0x8c,0x1e,0x14,0x00,0x33,0xc0,0x8e,0xd8,0xc3,0xc3,0x8c,0xc0
+,0x87,0x06,0x92,0x01,0x3d,0xff,0xff,0x74,0x0d,0x8e,0xd8,0x8c,0x06,0x00,0x00,0x33
+,0xc0,0x8e,0xd8,0xe9,0x04,0x00,0x8c,0x06,0x90,0x01,0xe8,0x01,0x00,0xc3,0x06,0x83
+,0x3e,0x90,0x01,0xff,0x74,0x29,0x83,0x3e,0x3a,0x34,0x00,0x75,0x11,0xba,0x86,0x00
+,0xe8,0x1e,0x00,0x8c,0x06,0x3a,0x34,0x83,0x3e,0x90,0x01,0xff,0x74,0x11,0x83,0x3e
+,0x3c,0x34,0x00,0x75,0x0a,0xba,0x88,0x00,0xe8,0x06,0x00,0x8c,0x06,0x3c,0x34,0x07
+,0xc3,0xa1,0x90,0x01,0x8e,0xc0,0x26,0xa1,0x08,0x00,0xef,0x26,0xa1,0x00,0x00,0x26
+,0xc7,0x06,0x00,0x00,0xff,0xff,0xa3,0x90,0x01,0x3d,0xff,0xff,0x75,0x03,0xa3,0x92
+,0x01,0x83,0x3e,0xed,0x34,0x00,0x74,0x0b,0xb8,0x10,0x00,0xe7,0x84,0xc7,0x06,0xed
+,0x34,0x00,0x00,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7
+,0x5a,0xff,0x06,0xbc,0x33,0xe9,0x25,0xfb,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33
+,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb0,0x33,0xe9,0x11,0xfb,0x50,0x51,0x56,0x57
+,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb4,0x33,0x06,0xff,0x06
+,0x76,0x34,0x80,0x3e,0xfe,0x34,0x00,0x74,0x04,0x07,0xe9,0xf0,0xfa,0xb8,0x80,0x00
+,0xe8,0xd3,0x02,0x07,0xe9,0xe6,0xfa,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0xc6,0x1d,0x08,0x1d,0x91,0x1e,0x5d,0x1e,0x73,0x1e,0x89,0x1e,0x91,0x1e,0xa8,0x1d
+,0x91,0x1e,0x91,0x1e,0xaf,0x1e,0xaf,0x1e,0x15,0x1d,0x15,0x1d,0x91,0x1e,0x99,0x1f
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x02,0x00,0x00
+,0x00,0x01,0x00,0x10,0x00,0x01,0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00
+,0x07,0xe9,0x99,0xfa,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7
+,0x5a,0xff,0x06,0xb2,0x33,0x06,0x68,0xf6,0x1c,0xe5,0x06,0xa3,0xb2,0x33,0x8b,0xf0
+,0x83,0xe6,0x1e,0x2e,0xff,0xa4,0xa0,0x1c,0xe5,0x0c,0xa9,0x80,0x00,0x74,0x06,0xe8
+,0xa4,0x01,0xe5,0x06,0xc3,0x53,0xe5,0x0c,0x8b,0xd8,0xa9,0x01,0x00,0x74,0x14,0x83
+,0x3e,0xe0,0x3a,0x00,0x74,0x0d,0x8e,0x06,0x38,0x34,0xe8,0xbf,0x06,0xc7,0x06,0xe0
+,0x3a,0x00,0x00,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7
+,0x02,0x8b,0xc3,0x5b,0xa9,0x01,0x00,0x74,0x01,0xc3,0x8b,0xd0,0xb8,0x00,0x08,0xe7
+,0x84,0x8b,0xc2,0x8e,0x06,0x38,0x34,0x26,0xa3,0x0c,0x00,0x8b,0xd0,0xc1,0xe0,0x03
+,0x83,0x16,0x88,0x34,0x00,0xff,0x06,0x7c,0x34,0x26,0x83,0x3e,0x06,0x00,0x0a,0x75
+,0x21,0x8b,0xc2,0x25,0x40,0x18,0x3d,0x40,0x00,0x74,0x0c,0x3d,0x00,0x10,0x75,0x12
+,0x26,0xfe,0x0e,0x0a,0x00,0x74,0x0b,0xf7,0x06,0xef,0x34,0x20,0x00,0x75,0x03,0xe9
+,0x5a,0x06,0x8c,0xc0,0x26,0x8e,0x06,0x02,0x00,0x26,0x83,0x0e,0x08,0x00,0x20,0x26
+,0xa3,0x12,0x00,0x26,0xa3,0x10,0x00,0xc3,0xff,0x06,0xc4,0x33,0xe5,0x0c,0xa9,0x01
+,0x00,0x75,0x01,0xc3,0xa9,0xf0,0x07,0x74,0x01,0xc3,0xff,0x06,0xd4,0x33,0xe5,0x00
+,0x0d,0x18,0x00,0xe7,0x00,0xc3,0xff,0x06,0xca,0x33,0x80,0x3e,0xa0,0x36,0x08,0x75
+,0x14,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x08,0x74,0x07,0x26,0x81
+,0x0e,0x08,0x00,0x00,0x08,0xe5,0x82,0x25,0xfd,0xff,0xe7,0x82,0xe5,0x0c,0x50,0xe5
+,0x80,0x25,0x00,0x07,0xa3,0xe4,0x3a,0xe5,0x8c,0x25,0x00,0x80,0xa3,0xe2,0x3a,0x58
+,0xa9,0x02,0x00,0x75,0x25,0x83,0x3e,0xe2,0x3a,0x00,0x75,0x1e,0x83,0x3e,0xe4,0x3a
+,0x00,0x75,0x17,0xe5,0x08,0x0d,0x00,0x04,0x25,0xff,0x04,0xe7,0x08,0xe8,0x6a,0x01
+,0xe5,0x82,0x0d,0x02,0x00,0xe7,0x82,0xe9,0x21,0x00,0xe8,0x1a,0x06,0x80,0x3e,0xe8
+,0xff,0x00,0x74,0x0a,0x80,0x3e,0xe8,0xff,0x04,0x74,0x03,0xe9,0x0d,0x00,0xc6,0x06
+,0xe8,0xff,0x01,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0x80,0x3e,0x9f,0x36,0x06
+,0x75,0x05,0x83,0x0e,0x99,0x36,0x40,0xb8,0x00,0x01,0xe9,0x09,0x01,0xff,0x06,0xcc
+,0x33,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36,0xe7,0x06,0xff,0x06,0xc6,0x34
+,0xe9,0x1e,0x00,0xff,0x06,0xce,0x33,0xff,0x06,0x95,0x37,0x81,0x26,0xaf,0x36,0xff
+,0xef,0xa1,0xaf,0x36,0xe7,0x06,0xe9,0x08,0x00,0xff,0x06,0xd0,0x33,0xff,0x06,0x7a
+,0x34,0xff,0x06,0xd2,0x33,0xd1,0xe6,0x8e,0x06,0x30,0x34,0x2e,0x8b,0x84,0xc0,0x1c
+,0x26,0x09,0x06,0x08,0x00,0x2e,0x8b,0x84,0xc2,0x1c,0x09,0x06,0x66,0x37,0xc3,0xe5
+,0x0c,0xa9,0x80,0x00,0x74,0x56,0x50,0xe8,0xf0,0x00,0x58,0xa9,0x00,0x01,0x75,0x07
+,0xff,0x06,0xc6,0x33,0xe9,0x08,0x00,0xff,0x06,0x78,0x34,0xff,0x06,0xc8,0x33,0xe5
+,0x82,0x25,0xfd,0xff,0xe7,0x82,0xe8,0x6e,0x05,0xba,0x10,0x01,0xed,0x80,0x3e,0xe8
+,0xff,0x00,0x74,0x0a,0x80,0x3e,0xe8,0xff,0x04,0x74,0x03,0xe9,0x1d,0x00,0xc6,0x06
+,0xe8,0xff,0x01,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0xe9,0x0d,0x00,0xc6,0x06
+,0xe8,0xff,0x03,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0xc3,0xa9,0x01,0x00,0x74
+,0x1c,0xe8,0x2c,0x00,0x83,0x3e,0xe0,0x3a,0x00,0x74,0x0f,0x06,0x8e,0x06,0x38,0x34
+,0xe8,0xc9,0x04,0xc7,0x06,0xe0,0x3a,0x00,0x00,0x07,0xe9,0x5d,0x00,0x8b,0xd0,0x8e
+,0x06,0x38,0x34,0x26,0xa3,0x0c,0x00,0xe8,0x06,0x00,0x68,0x69,0x1d,0xe9,0x4a,0x00
+,0xa9,0x00,0x04,0x74,0x0a,0xb8,0x00,0x04,0xff,0x06,0xd8,0x33,0xe9,0x17,0x00,0xa9
+,0x00,0x01,0x74,0x0a,0xff,0x06,0x39,0x37,0xb8,0x00,0x01,0xe9,0x08,0x00,0xa9,0x10
+,0x00,0xb8,0x10,0x00,0x74,0x1d,0x09,0x06,0x66,0x37,0x8c,0xc0,0x8e,0x06,0x30,0x34
+,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x01
+,0x8e,0xc0,0xc3,0xff,0x06,0xc2,0x33,0xe9,0xf8,0xff,0xe5,0x00,0x0d,0x18,0x00,0xe7
+,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7,0x02,0xc3,0x58,0xe9,0x43,0xfd,0xe5,0x08,0x0d
+,0x00,0x04,0x25,0xff,0x04,0xe7,0x08,0xe9,0xe0,0xff,0xe5,0x0e,0xa9,0x00,0x08,0x75
+,0x01,0xc3,0xe9,0xf5,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb8
+,0x33,0xe5,0x48,0x06,0x53,0x57,0xff,0x16,0x4e,0x37,0x5f,0x5b,0x83,0x3e,0x80,0x01
+,0xff,0x74,0x58,0x8e,0x06,0x80,0x01,0x26,0xff,0x0e,0x08,0x00,0x75,0x4d,0x26,0xa1
+,0x00,0x00,0xa3,0x80,0x01,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x8c,0xc0,0x26,0x8e
+,0x06,0x02,0x00,0x26,0x81,0x0e,0x08,0x00,0x80,0x00,0x8b,0xd0,0x26,0x87,0x06,0x1a
+,0x00,0x26,0x83,0x3e,0x18,0x00,0xff,0x74,0x0a,0x8e,0xc0,0x26,0x89,0x16,0x00,0x00
+,0xe9,0x05,0x00,0x26,0x89,0x16,0x18,0x00,0x83,0x3e,0x80,0x01,0xff,0x74,0x0c,0x8e
+,0x06,0x80,0x01,0x26,0x83,0x3e,0x08,0x00,0x00,0x74,0xb3,0x07,0xe9,0x3e,0xf7,0xe5
+,0x4c,0x90,0xe5,0x02,0xa9,0x00,0x20,0x74,0x0d,0x25,0xff,0xdf,0x0d,0x01,0x00,0xe7
+,0x02,0x0d,0x00,0x20,0xe7,0x02,0xe5,0x0a,0x8b,0xd8,0xa3,0xf4,0x33,0x25,0xc3,0x57
+,0x0d,0x00,0x10,0xe7,0x0a,0xf7,0x06,0x9b,0x36,0x00,0x80,0x74,0x37,0xf7,0xc3,0x00
+,0x80,0x74,0x06,0xf7,0xc3,0x00,0x08,0x74,0x5d,0x81,0x26,0xc2,0x34,0x7f,0xff,0xc7
+,0x06,0x35,0x37,0x05,0x00,0xb8,0x80,0x03,0xcd,0x39,0x81,0x26,0x9b,0x36,0xff,0x7f
+,0xc7,0x06,0x0f,0x37,0x04,0x00,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06
+,0x0f,0x37,0x03,0x00,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x2a,0xf7,0xc3,0x00,0x08
+,0x74,0x24,0x80,0x3e,0x9d,0x36,0x06,0x7c,0x1d,0xff,0x06,0x94,0x34,0x83,0x0e,0x66
+,0x37,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26
+,0x81,0x0e,0x08,0x00,0x00,0x01,0xf7,0xc3,0x00,0x20,0x75,0x3b,0xf7,0x06,0x9a,0x37
+,0x80,0x00,0x74,0x0b,0xff,0x06,0x89,0x37,0x33,0xc0,0xe7,0x0e,0xe9,0x04,0x00,0xff
+,0x06,0x3b,0x37,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x1c,0x80,0x26,0x9e,0x36,0xff
+,0x75,0x15,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x08,0x74,0x07,0x26
+,0x81,0x0e,0x08,0x00,0x00,0x08,0xc3,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x02,0x23,0x02,0x23,0x02,0x23,0x02,0x23,0x03,0x23,0xdd,0x22,0x02,0x23,0xfd,0x21
+,0x02,0x23,0xa4,0x24,0xf3,0x24,0x02,0x23,0x8d,0x22,0x7a,0x23,0x02,0x23,0x97,0x24
+,0x1b,0x24,0x75,0x24,0x02,0x23,0x02,0x23,0x8e,0x25,0xfb,0x8e,0x06,0x7e,0x01,0xfb
+,0x26,0x83,0x3e,0x00,0x00,0xff,0x74,0xf2,0x26,0x8e,0x06,0x00,0x00,0xfa,0x26,0x8b
+,0x1e,0x08,0x00,0x26,0x23,0x1e,0x0a,0x00,0x74,0xe5,0x8c,0xc0,0x8e,0xd0,0x26,0x8b
+,0x26,0x02,0x00,0x8c,0x16,0xf2,0x33,0x22,0xff,0x75,0x6a,0x26,0xa1,0x1c,0x00,0x8a
+,0xe3,0x8a,0xdc,0x22,0xd8,0x75,0x0d,0xd0,0xe8,0x24,0xf8,0x0a,0xc0,0x75,0xf2,0xb0
+,0x80,0xe9,0xed,0xff,0xd0,0xe8,0x24,0xf8,0x0a,0xc0,0x75,0x02,0xb0,0x80,0x32,0xe4
+,0x26,0xa3,0x1c,0x00,0xf7,0xc3,0x08,0x00,0x75,0x47,0x2e,0x8a,0x9f,0xc5,0x25,0x2e
+,0x8b,0xbf,0xc5,0x26,0x80,0xc3,0x10,0x26,0x8e,0x1d,0x26,0x8c,0x1e,0x06,0x00,0x8b
+,0x16,0x00,0x00,0xc7,0x06,0x00,0x00,0xff,0xff,0x26,0x89,0x15,0x83,0xfa,0xff,0x75
+,0x0a,0x2e,0x8b,0x97,0xcd,0x26,0x26,0x21,0x16,0x08,0x00,0x33,0xc0,0x8e,0xd8,0x26
+,0x89,0x1e,0x04,0x00,0xc3,0x8a,0xdf,0xb7,0x00,0x2e,0x8a,0x9f,0xc5,0x25,0xe9,0xe0
+,0xff,0x26,0x83,0x26,0x08,0x00,0xf7,0x83,0xc3,0x10,0xe9,0xde,0xff,0x60,0x06,0x1e
+,0x68,0x87,0x25,0x6a,0x00,0x1f,0x8e,0x06,0xf2,0x33,0x8b,0x0e,0x34,0x34,0x39,0x0e
+,0xf2,0x33,0x74,0x0e,0x26,0x81,0x0e,0x0a,0x00,0x00,0x02,0x26,0x81,0x0e,0x08,0x00
+,0x00,0x02,0x26,0x89,0x26,0x02,0x00,0xa3,0xf2,0x33,0x8e,0xd0,0x8d,0x26,0x80,0x00
+,0x36,0x89,0x26,0x02,0x00,0x36,0x89,0x1e,0x20,0x00,0x36,0xc7,0x06,0x08,0x00,0x00
+,0x00,0xb9,0x04,0x00,0xbe,0x00,0x00,0x2e,0x8b,0xbc,0xc5,0x26,0x36,0xc7,0x05,0xff
+,0xff,0x36,0xc7,0x45,0x02,0xff,0xff,0x83,0xc6,0x02,0xe2,0xeb,0x8e,0x06,0x7e,0x01
+,0x36,0x8b,0x0e,0x22,0x00,0x8c,0xc0,0x26,0x83,0x3e,0x00,0x00,0xff,0x26,0x8e,0x06
+,0x00,0x00,0x74,0x07,0x26,0x3b,0x0e,0x22,0x00,0x7d,0xea,0x36,0x8c,0x06,0x00,0x00
+,0x8e,0xc0,0x26,0x8c,0x16,0x00,0x00,0xfb,0x36,0xff,0x2e,0x1e,0x00,0x06,0x1e,0x68
+,0x8b,0x25,0x6a,0x00,0x1f,0x26,0x09,0x36,0x08,0x00,0xf7,0xc6,0x00,0xff,0x74,0x01
+,0xc3,0x56,0x52,0x2e,0x8b,0xb4,0xc5,0x25,0x81,0xe6,0xff,0x00,0x2e,0x8b,0xb4,0xc5
+,0x26,0x8c,0xc2,0x8e,0xc0,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x8e,0xc2,0x26,0x83
+,0x3c,0xff,0x74,0x0f,0x8b,0xd0,0x26,0x87,0x54,0x02,0x8e,0xc2,0x26,0xa3,0x00,0x00
+,0xe9,0x07,0x00,0x26,0x89,0x44,0x02,0x26,0x89,0x04,0x5a,0x5e,0xc3,0x06,0x1e,0x68
+,0x8b,0x25,0x6a,0x00,0x1f,0x8e,0x06,0xf2,0x33,0x26,0xa3,0x0a,0x00,0x26,0x89,0x26
+,0x02,0x00,0xa1,0x34,0x34,0x8e,0xd0,0x8d,0x26,0x80,0x00,0x8c,0x16,0xf2,0x33,0xe9
+,0x4d,0xfe,0xcf,0x50,0x1e,0x52,0x53,0x33,0xc0,0x8e,0xd8,0x26,0x83,0x3e,0x04,0x00
+,0xff,0x26,0xc7,0x06,0x04,0x00,0x00,0x00,0x74,0x03,0xe9,0x1a,0x00,0x83,0x3e,0xe6
+,0x3a,0x02,0x76,0x13,0xff,0x06,0xd6,0x33,0x8c,0xc0,0x8e,0x06,0x32,0x34,0xbe,0x40
+,0x00,0x68,0x3a,0x23,0xe9,0x5e,0xff,0xe8,0x84,0xf8,0x5b,0x5a,0x1f,0x58,0xcf,0xe8
+,0xe1,0x00,0x26,0xc6,0x06,0x18,0x00,0x10,0x26,0x8a,0x1e,0x29,0x00,0x88,0x1e,0x1b
+,0x37,0x26,0xc7,0x06,0x0c,0x00,0xff,0x7f,0x26,0xa1,0x0e,0x00,0xe7,0x9c,0x26,0xa1
+,0x08,0x00,0xe7,0x9a,0xe5,0x00,0x80,0xfb,0x08,0x74,0x09,0x0d,0x18,0xac,0xe7,0x00
+,0x07,0x1f,0x58,0xcf,0x0d,0x18,0x00,0xe9,0xf4,0xff,0x50,0x1e,0x06,0x33,0xc0,0x8e
+,0xd8,0x83,0x3e,0xa1,0x36,0x00,0x75,0xb7,0x26,0x8b,0x36,0x06,0x00,0x2e,0xff,0x94
+,0xdc,0x23,0x07,0x1f,0x58,0xcf,0xe8,0x8a,0x00,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00
+,0xe8,0x49,0x00,0xc3,0x53,0xf7,0x06,0xef,0x34,0x20,0x00,0x75,0x2d,0xe5,0x8c,0x25
+,0x00,0x70,0x8b,0xd8,0xe5,0x8c,0x25,0x00,0x70,0x3b,0xc3,0x74,0x05,0x8b,0xd8,0xe9
+,0xf2,0xff,0x3d,0x00,0x30,0x75,0x10,0xe5,0x02,0x25,0xef,0xff,0xe7,0x02,0xc7,0x06
+,0xe0,0x3a,0xff,0xff,0xe9,0x03,0x00,0xe8,0x12,0x00,0x5b,0xc3,0xa3,0x23,0x96,0x23
+,0xa4,0x23,0xa4,0x23,0x96,0x23,0xa4,0x23,0x96,0x23,0x96,0x23,0x26,0xa0,0x29,0x00
+,0xa2,0x1b,0x37,0x26,0xc7,0x06,0x0c,0x00,0xff,0x7f,0x26,0xa1,0x0e,0x00,0xe7,0x9c
+,0x26,0xa1,0x08,0x00,0xe7,0x9a,0xe5,0x00,0x25,0xff,0x53,0x26,0x8b,0x36,0x06,0x00
+,0x83,0xe6,0x0e,0x2e,0x0b,0x84,0xad,0x25,0xe7,0x00,0xc3,0x06,0x1e,0x68,0x8b,0x25
+,0x6a,0x00,0x1f,0x83,0x0e,0xef,0x34,0x20,0x83,0x0e,0x9b,0x36,0x08,0xe5,0x00,0x25
+,0xef,0xff,0x0d,0x08,0x00,0xe7,0x00,0xe5,0x00,0xa9,0x10,0x00,0x75,0x01,0xc3,0xe5
+,0x00,0xa9,0x10,0x00,0x75,0xf9,0xc3,0x50,0x53,0x51,0x56,0x06,0x1e,0x33,0xc0,0x8e
+,0xd8,0xb8,0x05,0x00,0xe7,0x84,0xe5,0x08,0x0d,0x00,0x04,0x25,0xff,0x04,0xe7,0x08
+,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7,0x02,0x1f,0x07
+,0x5e,0x59,0x5b,0x58,0xc3,0x50,0x1e,0x33,0xc0,0x8e,0xd8,0xc7,0x06,0xef,0x34,0x00
+,0x00,0x83,0x26,0x9b,0x36,0xf7,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d
+,0x11,0x00,0xe7,0x02,0x1f,0x58,0xcf,0x60,0x06,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f
+,0xe8,0x16,0xf5,0xc3,0x06,0x1e,0x68,0x8b,0x25,0x6a,0x00,0x1f,0x8e,0xc0,0x26,0x83
+,0x3e,0x0a,0x00,0x00,0x74,0x03,0xe8,0x43,0x00,0x26,0xc7,0x06,0x0a,0x00,0xff,0xff
+,0x26,0x8b,0x16,0x06,0x00,0x8e,0x1e,0x8e,0x01,0x8c,0xd8,0x8b,0xca,0x83,0x3e,0x00
+,0x00,0xff,0x8e,0x1e,0x00,0x00,0x74,0x0a,0x2b,0x16,0x08,0x00,0x73,0xeb,0x29,0x0e
+,0x08,0x00,0x26,0x89,0x0e,0x08,0x00,0x26,0x8c,0x1e,0x00,0x00,0x8e,0xd8,0x8c,0x06
+,0x00,0x00,0xc3,0x60,0x06,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f,0x8e,0xc0,0x8b,0xc8
+,0x8e,0x1e,0x8e,0x01,0x26,0xc7,0x06,0x0a,0x00,0x00,0x00,0x8c,0xd8,0x83,0x3e,0x00
+,0x00,0xff,0x74,0x25,0x3b,0x0e,0x00,0x00,0x8e,0x1e,0x00,0x00,0x75,0xed,0x8e,0xd8
+,0x26,0xa1,0x00,0x00,0xa3,0x00,0x00,0x3d,0xff,0xff,0x74,0x56,0x8e,0xd8,0x26,0xa1
+,0x08,0x00,0x01,0x06,0x08,0x00,0xe9,0x49,0x00,0x26,0x8e,0x1e,0x02,0x00,0xbe,0x18
+,0x00,0x83,0x3c,0xff,0x74,0x3c,0x39,0x0c,0x74,0x19,0x8e,0x1c,0xbe,0x00,0x00,0x83
+,0x3e,0x00,0x00,0xff,0x74,0x2c,0x39,0x0e,0x00,0x00,0x74,0x07,0x8e,0x1e,0x00,0x00
+,0xe9,0xec,0xff,0x26,0xa1,0x00,0x00,0x89,0x04,0x33,0xc9,0x8e,0xd9,0x3d,0xff,0xff
+,0x75,0x10,0x83,0xfe,0x18,0x75,0x0b,0x26,0x8e,0x1e,0x02,0x00,0x81,0x26,0x08,0x00
+,0x7f,0xff,0x33,0xc0,0x8e,0xd8,0xc3,0x1f,0x07,0x61,0xcf,0x1f,0x07,0xcf,0x60,0x06
+,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f,0xe5,0x06,0x25,0x1e,0x00,0x3d,0x1e,0x00,0x75
+,0xf6,0xb9,0x08,0x00,0xe5,0x58,0xe7,0x5a,0x23,0xc0,0xe0,0xf8,0xc3,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0xac,0x00,0x00,0x00,0xa8,0x00,0x8c,0x02,0x04,0x00
+,0x00,0x08,0x10,0x20,0x00,0xff,0x0e,0x0c,0x0c,0x0a,0x0a,0x0a,0x0a,0x08,0x08,0x08
+,0x08,0x08,0x08,0x08,0x08,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06
+,0x06,0x06,0x06,0x06,0x06,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04
+,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04
+,0x04,0x04,0x04,0x04,0x04,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
+,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
+,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
+,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
+,0x02,0x02,0x02,0x02,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x14,0x00,0x10,0x00,0x0c,0x00,0xff,0x7f,0xff
+,0xbf,0xff,0xdf,0xff,0xef,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,0xfe,0x7f,0xff,0xbf
+,0xff,0xdf,0xff,0xef,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,0xfe,0xff,0x00,0x00,0x00
+,0x80,0x3e,0xe2,0x34,0x01,0x76,0x03,0xe9,0xa5,0x00,0xb8,0x00,0x00,0xe7,0x4e,0xb9
+,0x28,0x00,0xe2,0xfe,0xc6,0x06,0x45,0x37,0x02,0xbf,0x3f,0x28,0x2e,0x8b,0x45,0x08
+,0xe7,0x4e,0xb9,0x28,0x00,0xe2,0xfe,0x2e,0x8b,0x1d,0xc7,0x06,0xb3,0x36,0x40,0x11
+,0xc7,0x06,0xb1,0x36,0x27,0x00,0xc7,0x06,0x46,0x37,0x02,0x00,0xc7,0x06,0x48,0x37
+,0x64,0x00,0xf7,0x06,0xb5,0x36,0x02,0x00,0x75,0x1c,0x2e,0x0b,0x5d,0x02,0x81,0x26
+,0xb3,0x36,0xff,0xfe,0xc7,0x06,0xb1,0x36,0x9c,0x00,0xc7,0x06,0x46,0x37,0x08,0x00
+,0xc7,0x06,0x48,0x37,0x90,0x01,0x89,0x1e,0xb7,0x36,0x89,0x1e,0xfe,0x33,0xbe,0x20
+,0x00,0x8b,0xc3,0xe7,0x4e,0xb9,0x28,0x00,0xe2,0xfe,0x2e,0x8b,0x45,0x04,0xe7,0x4e
+,0xb9,0x28,0x00,0xe2,0xfe,0xe5,0x4e,0x8b,0xcb,0x2e,0x23,0x45,0x06,0x2e,0x23,0x4d
+,0x06,0x3a,0xc1,0x74,0x36,0x4e,0x75,0xd9,0x80,0x3e,0x45,0x37,0x00,0x74,0x0b,0xc6
+,0x06,0x45,0x37,0x00,0xbf,0x2f,0x28,0xe9,0x72,0xff,0xc6,0x06,0x45,0x37,0x01,0xf7
+,0x06,0xb5,0x36,0x02,0x00,0x74,0x14,0xe5,0xce,0x25,0xfd,0xff,0xe7,0xce,0xe8,0x43
+,0x00,0xe5,0xce,0x0d,0x02,0x00,0xe7,0xce,0xe8,0x39,0x00,0x80,0x3e,0xe2,0x34,0x01
+,0x76,0x01,0xc3,0xb8,0xea,0x05,0xe7,0x8c,0xfa,0xe8,0x12,0xf4,0xfb,0x8d,0x06,0xd0
+,0x39,0x8b,0xd8,0xc1,0xe8,0x04,0xa3,0x38,0x34,0x8e,0xc0,0xa1,0x30,0x34,0x26,0xa3
+,0x02,0x00,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x83,0xc3,0x18,0xd1,0xeb,0x26,0x89
+,0x1e,0x08,0x00,0xc3,0xe5,0x02,0x0d,0x00,0x40,0xe7,0x02,0xe5,0x00,0x0d,0x04,0x00
+,0xe7,0x00,0xb8,0x00,0x00,0xe7,0x0a,0xe5,0x0a,0xa9,0x00,0x80,0x75,0x14,0xe5,0x08
+,0x0d,0x00,0x10,0xe7,0x08,0xe5,0x0a,0x0d,0x00,0x08,0xb9,0x05,0x00,0xe7,0x0a,0xe2
+,0xfc,0xc3,0xe5,0x08,0x0d,0x00,0x10,0xb9,0x05,0x00,0xe7,0x08,0xe2,0xfc,0xc3,0x04
+,0x0c,0x20,0x00,0x01,0x0c,0x7e,0xff,0x00,0x0c,0x02,0x00,0x10,0x00,0x40,0x00,0x0c
+,0xc6,0x01,0x00,0x00,0xc0,0xf7,0xff,0x00,0xc0,0x02,0x00,0x10,0x00,0x40,0x00,0x00
+,0x33,0xc0,0x8e,0xd8,0x8d,0x3e,0x72,0x49,0x8d,0x36,0xb0,0x37,0xb9,0x14,0x00,0x8b
+,0x1e,0x30,0x34,0x89,0x5c,0x02,0x2e,0x8b,0x45,0x02,0x89,0x44,0x06,0x2e,0x8b,0x05
+,0x89,0x44,0x04,0x83,0xc7,0x04,0x83,0xc6,0x10,0xe2,0xe8,0xc6,0x06,0x9e,0x36,0x0e
+,0xe8,0xfd,0x26,0x68,0x83,0x28,0xa1,0xaa,0x02,0xcd,0x35,0x83,0x3e,0xa1,0x36,0x00
+,0x74,0x03,0xe9,0x3b,0x27,0x33,0xff,0x8e,0x06,0xa6,0x02,0x8b,0x36,0xa4,0x02,0x2e
+,0xff,0xa4,0x2e,0x30,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37,0x37,0x01,0x00,0xc6
+,0x06,0xca,0x34,0x01,0xe9,0x7d,0x19,0x80,0x3e,0xa0,0x36,0x08,0x74,0xe6,0x80,0x26
+,0x9e,0x36,0xff,0x75,0x1a,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x12,0xf7,0x06,0x9b
+,0x36,0x03,0x00,0x75,0x0a,0x83,0x0e,0x66,0x37,0x10,0xc6,0x06,0xa0,0x36,0x08,0xe9
+,0xfb,0x01,0x80,0x3e,0x9e,0x36,0x02,0x75,0xce,0xc6,0x06,0xa0,0x36,0x06,0xe9,0xec
+,0x01,0xc3,0xe9,0xe8,0x01,0x26,0xc7,0x06,0x0a,0x00,0x00,0x00,0x26,0xff,0x26,0x04
+,0x00,0xa1,0xd1,0x36,0x26,0x39,0x06,0x1a,0x00,0x75,0x22,0xa1,0xd3,0x36,0x26,0x39
+,0x06,0x1c,0x00,0x75,0x18,0xa1,0xd5,0x36,0x26,0x39,0x06,0x1e,0x00,0x75,0x0e,0x26
+,0xf7,0x06,0x0c,0x00,0x40,0x00,0x74,0x05,0x83,0x0e,0x66,0x37,0x40,0x81,0x0e,0xaf
+,0x36,0x00,0x10,0xa1,0xaf,0x36,0xe7,0x06,0x80,0x3e,0x9d,0x36,0x02,0x75,0x06,0xcd
+,0x34,0xe9,0xa2,0x1a,0xc3,0xf7,0x06,0x9b,0x36,0x10,0x00,0x75,0x54,0x26,0xf6,0x06
+,0x0a,0x00,0xff,0x75,0x4c,0x26,0xa0,0x19,0x00,0x24,0xc0,0x3c,0x40,0x75,0x11,0x80
+,0x3e,0x95,0x36,0x00,0x74,0x3b,0x26,0xc7,0x06,0x04,0x00,0xff,0xff,0xe9,0x31,0x00
+,0xe8,0xf1,0x04,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74,0x2f,0x8b,0xd8,0xb8,0x7d,0x03
+,0xcd,0x3a,0x8b,0xc3,0xc6,0x06,0xa0,0x36,0x06,0xf7,0x06,0x9b,0x36,0x02,0x00,0x75
+,0x05,0xc6,0x06,0xa0,0x36,0x04,0x81,0x0e,0x9b,0x36,0x80,0x00,0x83,0x26,0x9b,0x36
+,0xfc,0xe9,0x23,0x01,0xe8,0x87,0x1d,0xe9,0x33,0x01,0x50,0x26,0xa1,0x0c,0x00,0x25
+,0x07,0x00,0x3d,0x07,0x00,0x75,0x03,0xe9,0x84,0x00,0x3d,0x05,0x00,0x75,0x03,0xe9
+,0x7c,0x00,0x83,0x3e,0xe8,0x3a,0x04,0x74,0x75,0x83,0x3e,0xe8,0x3a,0x02,0x74,0x6e
+,0xf7,0x06,0xe6,0x34,0x18,0x80,0x75,0x03,0xe9,0x6a,0x00,0xf7,0x06,0xe6,0x34,0x00
+,0x80,0x74,0x35,0x26,0x80,0x3e,0x29,0x00,0x02,0x75,0x2d,0x51,0x56,0x57,0x8d,0x36
+,0x3e,0x34,0x8d,0x3e,0x20,0x00,0xb9,0x06,0x00,0xf3,0xa6,0x5f,0x5e,0x59,0x74,0x45
+,0x26,0xa1,0x20,0x00,0xa3,0x3e,0x34,0x26,0xa1,0x22,0x00,0xa3,0x40,0x34,0x26,0xa1
+,0x24,0x00,0xa3,0x42,0x34,0xe9,0x26,0x00,0xf7,0x06,0xe6,0x34,0x08,0x00,0x74,0x0b
+,0x26,0x80,0x3e,0x19,0x00,0x00,0x74,0x03,0xe9,0x13,0x00,0xf7,0x06,0xe6,0x34,0x10
+,0x00,0x74,0x12,0x26,0xa0,0x28,0x00,0xc0,0xe8,0x04,0x22,0xc0,0x74,0x07,0x26,0xc7
+,0x06,0x04,0x00,0xff,0xff,0x58,0x23,0xc0,0x74,0x03,0xe9,0x57,0xff,0x81,0x26,0x9b
+,0x36,0xff,0xfe,0x83,0xfe,0x06,0x7f,0x24,0x26,0xa1,0x20,0x00,0x3b,0x06,0xd1,0x36
+,0x75,0x1a,0x26,0xa1,0x22,0x00,0x3b,0x06,0xd3,0x36,0x75,0x10,0x26,0xa1,0x24,0x00
+,0x3b,0x06,0xd5,0x36,0x75,0x06,0x81,0x0e,0x9b,0x36,0x00,0x01,0x26,0xa1,0x20,0x00
+,0x25,0x7f,0xff,0xa3,0xb8,0x34,0x26,0xa1,0x22,0x00,0xa3,0xba,0x34,0x26,0xa1,0x24
+,0x00,0xa3,0xbc,0x34,0x8b,0xc6,0x86,0xc4,0xa3,0xc0,0x34,0xd1,0xe6,0x80,0xfc,0x09
+,0x74,0x03,0xe8,0xaa,0x1c,0x8b,0xc6,0x2e,0xff,0xa4,0x30,0x49,0x26,0xa1,0x0c,0x00
+,0x3d,0xff,0x7f,0x74,0x0f,0x26,0xff,0x26,0x04,0x00,0x8e,0x06,0x38,0x34,0xe8,0x36
+,0x06,0xcd,0x50,0xc3,0xe9,0x16,0x00,0xcd,0x34,0xe9,0x11,0x00,0xcd,0x34,0x89,0x36
+,0x3d,0x37,0xa1,0x9d,0x36,0xa3,0x3f,0x37,0xc6,0x06,0xa0,0x36,0x0c,0xe8,0x8e,0x00
+,0xa1,0x9f,0x36,0x22,0xe4,0x75,0x32,0xf7,0x06,0x4c,0x37,0x01,0x00,0x75,0x2a,0xf6
+,0x06,0x9d,0x36,0x80,0x74,0x07,0x88,0x26,0x9e,0x36,0xe9,0x31,0x00,0x3a,0x06,0x9d
+,0x36,0xa3,0x9d,0x36,0x74,0x28,0x8b,0xf0,0x2e,0xff,0xa4,0x0d,0x2b,0x44,0x29,0xee
+,0x42,0x19,0x44,0xcd,0x44,0x2f,0x45,0x5a,0x45,0x3a,0x26,0x9e,0x36,0x75,0x01,0xc3
+,0x32,0xc0,0x86,0xc4,0x8b,0xf0,0xa2,0x9e,0x36,0x2e,0xff,0xa4,0x20,0x49,0x8b,0x2e
+,0x99,0x36,0x23,0xed,0x75,0x01,0xc3,0xbf,0x01,0x00,0xbe,0x00,0x00,0x85,0xfd,0x75
+,0x1a,0x46,0xd1,0xe7,0xe9,0xf6,0xff,0x2a,0x00,0x29,0x00,0x28,0x00,0x27,0x00,0x25
+,0x00,0x05,0x00,0x07,0x00,0x26,0x00,0x06,0x00,0x20,0x00,0xf7,0xd7,0x21,0x3e,0x99
+,0x36,0xd1,0xe6,0x2e,0x8b,0xb4,0x47,0x2b,0xe9,0x4f,0xff,0xe9,0x56,0xff,0x80,0x26
+,0x9e,0x36,0xff,0x75,0x17,0xf7,0x06,0x4c,0x37,0x01,0x00,0x75,0x0f,0xf6,0x06,0x9d
+,0x36,0x80,0x74,0x08,0xf7,0x06,0x66,0x37,0xff,0xff,0x75,0x07,0xc7,0x06,0x66,0x37
+,0x00,0x00,0xc3,0xf7,0x06,0x41,0x37,0x01,0x00,0x75,0x0b,0xb8,0x7f,0x03,0xcd,0x39
+,0xc7,0x06,0x41,0x37,0x01,0x00,0x33,0xf6,0xb8,0x00,0x40,0x85,0x06,0x66,0x37,0x74
+,0x21,0x80,0xbc,0x54,0x37,0xff,0x74,0x04,0xfe,0x84,0x54,0x37,0x80,0xbc,0x96,0x34
+,0xff,0x74,0x04,0xfe,0x84,0x96,0x34,0x31,0x06,0x66,0x37,0x83,0x3e,0x66,0x37,0x00
+,0x74,0x05,0x46,0xd1,0xe8,0x73,0xd4,0xc3,0xa1,0xf4,0x33,0xa9,0x00,0x88,0x74,0x0b
+,0xa9,0x00,0x10,0x75,0x09,0x8b,0x1e,0x43,0x37,0xff,0xe3,0xe9,0xd7,0x00,0xc7,0x06
+,0x35,0x37,0x05,0x00,0xc7,0x06,0x43,0x37,0x1e,0x2c,0xf7,0x06,0xf4,0x33,0x00,0x08
+,0x74,0x06,0xc7,0x06,0x43,0x37,0x10,0x2c,0xb8,0x80,0x03,0xcd,0x39,0xe9,0xcd,0xfe
+,0xa9,0x00,0x08,0x74,0xd9,0xff,0x0e,0x35,0x37,0x75,0xed,0xe9,0x66,0x00,0xa9,0x00
+,0x08,0x75,0xcb,0xff,0x0e,0x35,0x37,0x75,0xdf,0x81,0x0e,0xc2,0x34,0xc0,0x00,0xf6
+,0x06,0x9d,0x36,0x80,0x74,0x48,0x81,0x0e,0x9b,0x36,0x00,0x80,0xf7,0x06,0x9b,0x36
+,0x01,0x00,0x74,0x1e,0xb8,0x7d,0x03,0xcd,0x3a,0x81,0x0e,0x9b,0x36,0x80,0x00,0x83
+,0x26,0x9b,0x36,0xfe,0xc7,0x06,0x0f,0x37,0x02,0x00,0xc6,0x06,0xa0,0x36,0x04,0xe9
+,0x7b,0xfe,0x80,0x3e,0xa0,0x36,0x04,0x75,0x07,0x83,0x3e,0x0f,0x37,0x01,0x75,0x05
+,0xc6,0x06,0xa0,0x36,0x06,0xc7,0x06,0x0f,0x37,0x02,0x00,0xe9,0x5f,0xfe,0xbe,0x02
+,0x00,0xe9,0x4a,0xfe,0x80,0x26,0x9e,0x36,0xff,0x75,0x3a,0xf6,0x06,0x9d,0x36,0x80
+,0x74,0x2d,0xf7,0x06,0x9b,0x36,0x00,0x20,0x75,0x2b,0xc6,0x06,0xa0,0x36,0x06,0xff
+,0x06,0x94,0x34,0x83,0x0e,0x66,0x37,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a
+,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x01,0xe9,0x06,0x00,0xbe
+,0x04,0x00,0xe9,0x09,0xfe,0x81,0x0e,0xaf,0x36,0x00,0x08,0xa1,0xaf,0x36,0xe7,0x06
+,0xe5,0x0a,0xa9,0x00,0x80,0x74,0x0e,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36
+,0xe7,0x06,0xe9,0x09,0xff,0xe9,0xf5,0xfd,0xc7,0x06,0x41,0x37,0x00,0x00,0x83,0x0e
+,0x99,0x36,0x02,0xe9,0xe7,0xfd,0x80,0x26,0x9e,0x36,0xff,0x75,0x1d,0xf7,0x06,0x9b
+,0x36,0x00,0x40,0x75,0x05,0x83,0x0e,0x99,0x36,0x08,0x83,0x0e,0x99,0x36,0x20,0x81
+,0x26,0x9b,0x36,0xff,0xbf,0xb8,0x85,0x03,0xcd,0x39,0xe9,0xc0,0xfd,0x80,0x3e,0x9e
+,0x36,0x06,0x74,0x07,0x80,0x3e,0x9e,0x36,0x0a,0x75,0x34,0xf6,0x06,0x9d,0x36,0x80
+,0x75,0x06,0xbe,0x07,0x00,0xe9,0x96,0xfd,0xc6,0x06,0xa0,0x36,0x04,0x83,0x3e,0x0f
+,0x37,0x02,0x74,0x1b,0xc7,0x06,0x0f,0x37,0x04,0x00,0x80,0x3e,0x9e,0x36,0x06,0x75
+,0x0e,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06,0x0f,0x37,0x03,0x00,0xe9
+,0x7b,0xfd,0x80,0x3e,0x9d,0x36,0x04,0x75,0x12,0x81,0x0e,0xc2,0x34,0x00,0x40,0xff
+,0x06,0x92,0x34,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x62,0xfd,0xbe,0x05,0x00,0xe9,0x4d
+,0xfd,0xf6,0x06,0x9d,0x36,0x80,0x75,0x19,0x83,0x0e,0xc2,0x34,0x04,0xbe,0x06,0x00
+,0xe9,0x3b,0xfd,0x80,0x26,0x9e,0x36,0xff,0x75,0xc5,0xff,0x06,0x31,0x37,0xe9,0x00
+,0x00,0x83,0x26,0xc2,0x34,0xbf,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x2f,0xfd,0xe5,0x0a
+,0x50,0x25,0xc3,0xbf,0xe7,0x0a,0x58,0x80,0x26,0x9e,0x36,0xff,0x75,0x0d,0xa9,0x00
+,0x40,0x75,0x08,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x12,0xfd,0xb8,0x83,0x03,0xcd,0x39
+,0xc3,0xb8,0x7c,0x03,0xcd,0x39,0xf7,0x06,0xf4,0x33,0x00,0x10,0x75,0x09,0xc7,0x06
+,0x33,0x37,0x02,0x00,0xe9,0xf6,0xfc,0xff,0x0e,0x33,0x37,0x74,0x03,0xe9,0xed,0xfc
+,0xff,0x06,0x8e,0x34,0xe8,0xf7,0x19,0x83,0x0e,0xc2,0x34,0x08,0xbe,0x03,0x00,0xe9
+,0xcc,0xfc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x04,0x04,0x05
+,0x04,0x04,0x04,0x00,0x03,0x00,0x03,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x04,0x00,0x08,0x08,0x05,0x08,0x08,0x08,0x00,0x03,0x00,0x03,0x03,0x00,0x00
+,0x02,0x04,0x04,0x04,0x04,0x00,0x00,0x08,0x00,0x00,0x0a,0x14,0x00,0x00,0x1a,0x00
+,0x1c,0x00,0x1e,0x20,0x00,0x00,0x04,0x41,0x06,0x0b,0x08,0xc2,0xff,0xe7,0x04,0x03
+,0x06,0x04,0x04,0x05,0x04,0x06,0x04,0x87,0x04,0x03,0x06,0x04,0x04,0x85,0x4e,0xa2
+,0x04,0xcf,0x04,0xcd,0xc7,0x06,0xa2,0x37,0x00,0x00,0xc7,0x06,0xa6,0x37,0x00,0x00
+,0x26,0xa1,0x20,0x00,0x25,0x7f,0xff,0xa3,0xf5,0x36,0x26,0xa1,0x22,0x00,0xa3,0xf7
+,0x36,0x26,0xa1,0x24,0x00,0xa3,0xf9,0x36,0xe8,0x3b,0x19,0x8b,0xf0,0x26,0x8b,0x0e
+,0x0e,0x00,0x2b,0xc8,0x83,0xe9,0x0e,0xb8,0x01,0x80,0x83,0xf9,0x04,0x7c,0x51,0x26
+,0x8a,0x54,0x28,0x88,0x16,0x1c,0x37,0x40,0x26,0x8b,0x6c,0x26,0x86,0xcd,0x3b,0xcd
+,0x86,0xcd,0x89,0x0e,0xa4,0x37,0x75,0x38,0x40,0x32,0xff,0x26,0x8a,0x5c,0x29,0x80
+,0xfb,0x15,0x77,0x25,0x80,0xfb,0x0a,0x74,0x20,0x80,0xfb,0x01,0x74,0x1b,0xb8,0x04
+,0x80,0x2e,0x3a,0x97,0x02,0x2e,0x74,0x07,0x2e,0x3a,0x97,0x18,0x2e,0x75,0x11,0x33
+,0xc0,0x80,0xfb,0x09,0x75,0x4f,0x8b,0xf3,0xc3,0x26,0xc7,0x06,0x04,0x00,0xff,0xff
+,0x50,0x52,0xa1,0xa4,0x37,0x86,0xc4,0x26,0x3b,0x06,0x26,0x00,0x7c,0x32,0x26,0x81
+,0x3e,0x26,0x00,0x00,0x04,0x7e,0x29,0x8d,0x74,0x2a,0x26,0x8b,0x14,0x22,0xd2,0x74
+,0x1f,0x80,0xe6,0xbf,0x80,0xfe,0x09,0x75,0x17,0xc7,0x06,0xa2,0x37,0x01,0x00,0x80
+,0xfa,0x04,0x75,0x0c,0x26,0x8b,0x44,0x02,0xa3,0x03,0x37,0x86,0xc4,0xa3,0xd0,0x34
+,0x5a,0x58,0xe9,0xb1,0xff,0xbd,0x72,0x37,0x2e,0x8a,0x87,0x2e,0x2e,0x22,0xc0,0x74
+,0x16,0x05,0x44,0x2e,0x8b,0xf8,0x2e,0x8b,0x05,0x3e,0x89,0x46,0x00,0x83,0xc5,0x02
+,0x83,0xc7,0x02,0x22,0xe4,0x7d,0xef,0x8d,0x74,0x2a,0x83,0xe9,0x04,0x75,0x03,0xe9
+,0xa1,0x00,0x26,0x8b,0x14,0x22,0xd2,0x75,0x03,0xe9,0x7c,0x00,0xc7,0x06,0xa6,0x37
+,0x01,0x00,0xbf,0x72,0x37,0x8b,0x05,0x83,0xc7,0x02,0x80,0xe6,0xbf,0x80,0xe4,0x3f
+,0x80,0xfe,0x09,0x75,0x22,0x80,0xfa,0x04,0x75,0x5e,0xc7,0x06,0xa2,0x37,0x01,0x00
+,0x26,0x8b,0x44,0x02,0xa3,0x03,0x37,0x86,0xc4,0xa3,0xd0,0x34,0x86,0xc4,0xc7,0x06
+,0xa6,0x37,0x00,0x00,0xe9,0x47,0x00,0x3b,0xfd,0x7e,0x15,0x26,0x8b,0x04,0xa8,0x40
+,0x74,0x06,0xb8,0x07,0x80,0xe9,0x38,0xff,0x32,0xc0,0x26,0x8b,0x04,0xe9,0x2e,0x00
+,0x3a,0xf4,0x75,0xb1,0xc7,0x45,0xfe,0x00,0x00,0x80,0xfe,0x22,0x75,0x0d,0x3a,0xd0
+,0x77,0x16,0xc7,0x06,0xa6,0x37,0x00,0x00,0xe9,0x13,0x00,0x3a,0xd0,0x75,0x09,0xc7
+,0x06,0xa6,0x37,0x00,0x00,0xe9,0x06,0x00,0xb8,0x05,0x80,0xe9,0x02,0xff,0x32,0xf6
+,0x03,0xf2,0x2b,0xca,0xb8,0x05,0x80,0x23,0xc9,0x76,0x03,0xe9,0x64,0xff,0x74,0x03
+,0xe9,0xed,0xfe,0x33,0xc0,0xbf,0x72,0x37,0x8b,0x15,0x47,0x47,0x3b,0xfd,0x7f,0x1b
+,0xf6,0xc6,0x80,0x74,0x16,0xf7,0x06,0xa6,0x37,0x01,0x00,0x74,0x06,0xb8,0x08,0x80
+,0xe9,0xc3,0xfe,0xf6,0xc6,0x40,0x74,0xe0,0xb8,0x07,0x80,0xe9,0xb8,0xfe,0x7d,0x42
+,0xa3,0x45,0x44,0x29,0x44,0x29,0xb7,0x28,0xe2,0x28,0xee,0x2b,0xf2,0x28,0xf5,0x28
+,0x01,0x29,0xac,0x2a,0x44,0x29,0x44,0x29,0x44,0x29,0x44,0x29,0x44,0x29,0x00,0x00
+,0x73,0x36,0x00,0x00,0x03,0x36,0xc5,0x35,0x83,0x35,0x45,0x35,0x07,0x35,0xd2,0x34
+,0x45,0x34,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0xa6,0x38,0x00,0x00,0xe0,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0xf2,0x33,0x00,0x00,0xa6,0x33,0x60,0x33,0xfd,0x32,0xbc,0x32,0x77,0x32,0x3c,0x32
+,0xfb,0x31,0x6a,0x31,0x0a,0x31,0xe0,0xe0,0x10,0x10,0x10,0xe0,0xe0,0xe0,0xe0,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0xe0,0x00,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0
+,0xe0,0x33,0xff,0x26,0xf6,0x06,0x1a,0x00,0x80,0x74,0x1b,0x26,0x80,0x26,0x1a,0x00
+,0x7f,0x26,0x8b,0x3e,0x26,0x00,0x83,0xe7,0x1f,0x74,0x0b,0x26,0x80,0x0e,0x20,0x00
+,0x80,0x26,0x01,0x3e,0x0e,0x00,0xc3,0x60,0x2e,0x8b,0x84,0xa6,0x30,0x26,0xa3,0x18
+,0x00,0xd1,0xe6,0x2e,0xff,0x94,0x50,0x30,0x61,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4
+,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x16,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26
+,0xc6,0x06,0x19,0x00,0x00,0xe8,0xbf,0x05,0xe8,0x98,0x05,0x26,0xc7,0x06,0x26,0x00
+,0x00,0x08,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29,0x00,0x2a,0xbf,0x2a
+,0x00,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x2a,0xa1,0x93,0x37,0x33,0xdb,0xa9
+,0x40,0x00,0x75,0x02,0xb3,0x01,0xa9,0x00,0x10,0x74,0x02,0xb7,0x88,0xa9,0x00,0x08
+,0x74,0x03,0x80,0xcf,0x44,0x26,0x89,0x5d,0x02,0xc3,0x83,0x0e,0xc2,0x34,0x20,0x26
+,0xc7,0x06,0x04,0x00,0x6b,0x2b,0x26,0xc7,0x06,0x0e,0x00,0x30,0x00,0x26,0xc7,0x06
+,0x06,0x00,0x0a,0x00,0x26,0xc7,0x06,0x0a,0x00,0x04,0x00,0x26,0xc6,0x06,0x19,0x00
+,0x00,0xe8,0x69,0x05,0xe8,0x2c,0x05,0x26,0xc7,0x06,0x26,0x00,0x00,0x22,0x26,0xc6
+,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x29,0xbf,0x2a,0x00,0x26,0xc6,0x05
+,0x08,0x26,0xc6,0x45,0x01,0x2d,0x8d,0x7d,0x02,0xbe,0x54,0x37,0xb9,0x03,0x00,0xf3
+,0xa5,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x2e,0x8d,0x7d,0x02,0xbe,0x5a,0x37
+,0xb9,0x03,0x00,0xf3,0xa5,0xe8,0xd4,0x05,0xe8,0x64,0x05,0xb9,0x06,0x00,0xbe,0x54
+,0x37,0x8d,0x2e,0x2c,0x00,0x26,0x8b,0x46,0x00,0x29,0x04,0x83,0xc6,0x02,0x83,0xc5
+,0x02,0x83,0xf9,0x04,0x75,0x02,0x45,0x45,0xe2,0xeb,0xc3,0x26,0xc7,0x06,0x04,0x00
+,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x24,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00
+,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0xe4,0x04,0xe8,0xa7,0x04,0x26,0xc7,0x06,0x26
+,0x00,0x00,0x16,0x26,0xc6,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x28,0xbf
+,0x2a,0x00,0xe8,0x5b,0x06,0xe8,0x74,0x05,0xe8,0x04,0x05,0xc3,0x26,0xc7,0x06,0x04
+,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x1a,0x00,0x26,0xc7,0x06,0x06,0x00,0x06
+,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0xa3,0x04,0xe8,0x66,0x04,0x26,0xc7,0x06
+,0x26,0x00,0x00,0x0c,0x26,0xc6,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x27
+,0xbf,0x2a,0x00,0xe8,0x21,0x05,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7
+,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x0a,0x00,0x26,0xc7,0x06,0x0a
+,0x00,0x04,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x4b,0x04,0xe8,0x24,0x04,0x26
+,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29
+,0x00,0x26,0xbf,0x2a,0x00,0xe8,0xf4,0x04,0xe8,0x84,0x04,0xc3,0x26,0xc7,0x06,0x04
+,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06
+,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x0d,0x04,0xe8,0xe6,0x03,0x26,0xc7,0x06
+,0x26,0x00,0x00,0x26,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29,0x00,0x25
+,0xbf,0x2a,0x00,0xe8,0xb6,0x04,0xe8,0x46,0x04,0xe8,0xfa,0x04,0xc3,0x26,0xc7,0x06
+,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x38,0x00,0xa1,0xa2,0x37,0x50,0x0b
+,0xc0,0x75,0x07,0x26,0xc7,0x06,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06
+,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x99,0x03,0xe8,0xa4,0xfd,0x26,0xc7,0x45
+,0x26,0x00,0x2a,0x58,0x0b,0xc0,0x75,0x06,0x26,0xc7,0x45,0x26,0x00,0x26,0xa1,0x1c
+,0x37,0xc1,0xe0,0x04,0x26,0x88,0x45,0x28,0x26,0xc6,0x45,0x29,0x24,0x83,0xc7,0x2a
+,0xe8,0x29,0x04,0xe8,0xa0,0x04,0xe8,0x22,0x05,0xe8,0xf8,0x03,0xe8,0x09,0x04,0xc3
+,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x32,0x00,0x26,0xc7
+,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x45,0x03,0xe8,0x50
+,0xfd,0x26,0xc7,0x45,0x26,0x00,0x24,0xa1,0x1c,0x37,0xc1,0xe0,0x04,0x26,0x88,0x45
+,0x28,0x26,0xc6,0x45,0x29,0x23,0x83,0xc7,0x2a,0xe8,0xe0,0x03,0xe8,0x6c,0x04,0xe8
+,0x8a,0x04,0xe8,0x9c,0x04,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06
+,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00
+,0x00,0xe8,0xff,0x02,0xe8,0x0a,0xfd,0x26,0xc7,0x45,0x26,0x00,0x26,0xa1,0x1c,0x37
+,0xc1,0xe0,0x04,0x26,0x88,0x45,0x28,0x26,0xc6,0x45,0x29,0x22,0x83,0xc7,0x2a,0xe8
+,0x9a,0x03,0xe8,0xc7,0x03,0xe8,0x57,0x03,0xe8,0xf8,0x03,0xe8,0x78,0x04,0xe8,0x8a
+,0x04,0xc3,0x26,0xc7,0x06,0x04,0x00,0x74,0x45,0x26,0xc7,0x06,0x0e,0x00,0x3e,0x00
+,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc7,0x06,0x0a,0x00,0x04,0x00,0x26,0xc6
+,0x06,0x19,0x00,0x00,0xe8,0xfc,0x02,0xe8,0xa9,0x02,0x83,0x3e,0x8d,0x37,0x03,0x75
+,0x01,0x90,0x26,0xc7,0x06,0x26,0x00,0x00,0x30,0x26,0xc6,0x06,0x28,0x00,0x50,0x26
+,0xc6,0x06,0x29,0x00,0x20,0xbf,0x2a,0x00,0xe8,0xd0,0x03,0xe8,0x01,0x03,0xe8,0xb5
+,0x03,0xe8,0x9f,0x03,0xc3,0x26,0xc7,0x06,0x04,0x00,0x61,0x43,0xb9,0xf0,0x00,0x83
+,0xe9,0x02,0x26,0x89,0x0e,0x0e,0x00,0x26,0xc7,0x06,0x06,0x00,0x02,0x00,0x26,0xc6
+,0x06,0x19,0x00,0x00,0x26,0xc7,0x06,0x1a,0x00,0x00,0x00,0x26,0xc7,0x06,0x1c,0x00
+,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x00,0xe8,0x47,0x02,0x83,0xe9,0x0e,0x86
+,0xcd,0x26,0x89,0x0e,0x26,0x00,0x86,0xcd,0x26,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6
+,0x06,0x29,0x00,0x08,0xbf,0x2a,0x00,0x83,0xe9,0x04,0x26,0x89,0x0d,0x26,0xc6,0x45
+,0x01,0x26,0x8d,0x7d,0x02,0x83,0xe9,0x02,0xbb,0x01,0x00,0xb8,0x30,0x30,0x4b,0x75
+,0x17,0xbb,0x0a,0x00,0x8a,0xc4,0x26,0x88,0x05,0xb0,0x31,0x80,0xc4,0x01,0x80,0xfc
+,0x3a,0x75,0x0a,0xb4,0x61,0xe9,0x05,0x00,0x26,0x88,0x05,0x04,0x01,0x47,0x49,0x75
+,0xdd,0xc3,0x26,0xc7,0x06,0x04,0x00,0x04,0x45,0x26,0xc7,0x06,0x0e,0x00,0x12,0x00
+,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x01,0xe8,0xe5,0x01
+,0xe8,0xd0,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x04,0x26,0xc6,0x06,0x28,0x00,0x00
+,0x26,0xc6,0x06,0x29,0x00,0x07,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7
+,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19
+,0x00,0x06,0xe8,0x04,0x02,0xe8,0x9b,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26
+,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x06,0xbf,0x2a,0x00,0xe8,0x6b
+,0x02,0xe8,0xfb,0x01,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e
+,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x05
+,0xe8,0xc6,0x01,0xe8,0x5d,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06
+,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x05,0xbf,0x2a,0x00,0xe8,0x2d,0x02,0xe8
+,0xbd,0x01,0xc3,0xff,0x06,0x82,0x34,0x26,0xc7,0x06,0x04,0x00,0x3d,0x41,0x26,0xc7
+,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x0e,0x00,0x26,0xc6,0x06,0x19
+,0x00,0x04,0xe8,0x84,0x01,0xe8,0x1b,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26
+,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x04,0xbf,0x2a,0x00,0xe8,0xeb
+,0x01,0xe8,0x7b,0x01,0xc3,0x26,0xc7,0x06,0x04,0x00,0x67,0x42,0x26,0xc7,0x06,0x0e
+,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x08,0x00,0x26,0xc6,0x06,0x19,0x00,0x03
+,0xe8,0x46,0x01,0xe8,0xdd,0x00,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06
+,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x03,0xbf,0x2a,0x00,0xe8,0xad,0x01,0xe8
+,0x3d,0x01,0xc3,0xff,0x06,0x84,0x34,0x26,0xc7,0x06,0x04,0x00,0x67,0x42,0x26,0xc7
+,0x06,0x0e,0x00,0x24,0x00,0x26,0xc7,0x06,0x06,0x00,0x08,0x00,0x26,0xc6,0x06,0x19
+,0x00,0x02,0xe8,0x04,0x01,0xe8,0x9b,0x00,0x26,0xc7,0x06,0x26,0x00,0x00,0x16,0x26
+,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x02,0xbf,0x2a,0x00,0x26,0xc6
+,0x05,0x04,0x26,0xc6,0x45,0x01,0x01,0xa1,0x0f,0x37,0x86,0xe0,0xf6,0x06,0x6f,0x37
+,0x01,0x75,0x0f,0x39,0x06,0xcc,0x34,0x74,0x09,0x8b,0xd8,0xb8,0x89,0x03,0xcd,0x39
+,0x8b,0xc3,0xa3,0xcc,0x34,0x26,0x89,0x45,0x02,0x8d,0x7d,0x04,0xe8,0x3d,0x01,0xe8
+,0xcd,0x00,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x1c
+,0x00,0xa1,0xa2,0x37,0x50,0x0b,0xc0,0x75,0x07,0x26,0xc7,0x06,0x0e,0x00,0x18,0x00
+,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x23,0x00
+,0xe8,0x2e,0xfa,0x26,0xc7,0x45,0x26,0x00,0x0e,0x58,0x0b,0xc0,0x75,0x06,0x26,0xc7
+,0x45,0x26,0x00,0x0a,0x26,0xc6,0x45,0x29,0x00,0x83,0xc7,0x2a,0xe8,0xbd,0x00,0xe8
+,0xff,0x00,0xc3,0x56,0x57,0x51,0xb9,0x03,0x00,0xbe,0xd1,0x36,0xbf,0x20,0x00,0xf3
+,0xa5,0x59,0x5f,0x5e,0xc3,0x56,0x57,0x51,0xb9,0x03,0x00,0xbe,0xd1,0x36,0xbf,0x1a
+,0x00,0xf3,0xa5,0x59,0x5f,0x5e,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00,0x26,0xc7
+,0x06,0x1c,0x00,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x10,0xc3,0x26,0xc7,0x06
+,0x1a,0x00,0xc0,0x00,0x26,0xc7,0x06,0x1c,0x00,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00
+,0x00,0x08,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00,0x26,0xc7,0x06,0x1c,0x00,0x00
+,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x02,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00
+,0x26,0xc7,0x06,0x1c,0x00,0xff,0xff,0x26,0xc7,0x06,0x1e,0x00,0xff,0xff,0xc3,0x26
+,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x02,0x8d,0x7d,0x02,0xbe,0x05,0x37,0xb9,0x03
+,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x06,0xa1,0x0d,0x37
+,0x26,0x89,0x45,0x02,0x8d,0x7d,0x04,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01
+,0x07,0xa1,0x0b,0x37,0x26,0x89,0x45,0x02,0x83,0xc7,0x04,0xc3,0xa1,0xa2,0x37,0x0b
+,0xc0,0x74,0x13,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x09,0xa1,0x03,0x37,0x26
+,0x89,0x45,0x02,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x02
+,0x8d,0x7d,0x02,0xbe,0x05,0x37,0xb9,0x03,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x06
+,0x26,0xc6,0x45,0x01,0x0b,0x8d,0x7d,0x02,0xbe,0xef,0x36,0xb9,0x02,0x00,0xf3,0xa5
+,0xc3,0x26,0xc6,0x05,0x06,0x26,0xc6,0x45,0x01,0x20,0xa1,0x68,0x37,0x26,0x89,0x45
+,0x02,0xa1,0x6a,0x37,0x26,0x88,0x65,0x05,0xc1,0xe0,0x04,0x26,0x88,0x45,0x04,0x83
+,0xc7,0x06,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x21,0x26,0xc7,0x45,0x02
+,0x00,0x00,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x14,0x26,0xc6,0x45,0x01,0x22,0x8d
+,0x7d,0x02,0xbe,0x1f,0x37,0xb9,0x09,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x0c,0x26
+,0xc6,0x45,0x01,0x23,0x8d,0x7d,0x02,0x1e,0x0e,0x1f,0x8d,0x36,0x40,0x54,0xb9,0x03
+,0x00,0xf3,0xa5,0x33,0xc0,0xb9,0x02,0x00,0xf3,0xab,0x1f,0xc3,0x26,0xc6,0x05,0x08
+,0x26,0xc6,0x45,0x01,0x28,0x8d,0x7d,0x02,0xbe,0xd1,0x36,0xb9,0x03,0x00,0xf3,0xa5
+,0xc3,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x29,0xa1,0xc2,0x34,0x86,0xe0,0x26
+,0x89,0x45,0x02,0xa1,0x9b,0x36,0x26,0x89,0x45,0x04,0x26,0x88,0x45,0x06,0x26,0x88
+,0x45,0x07,0x8d,0x7d,0x08,0xc3,0x26,0xc6,0x05,0x06,0x26,0xc6,0x45,0x01,0x2b,0x8d
+,0x7d,0x02,0xbe,0xbb,0x36,0xb9,0x02,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x06,0x26
+,0xc6,0x45,0x01,0x2c,0x8d,0x7d,0x02,0xbe,0xe5,0x36,0xb9,0x02,0x00,0xf3,0xa5,0xc3
+,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x30,0xa1,0x37,0x37,0x86,0xe0,0x26,0x89
+,0x45,0x02,0x8d,0x7d,0x04,0xc3,0x26,0xc7,0x06,0x0e,0x00,0x1e,0x00,0x26,0xc7,0x06
+,0x06,0x00,0x02,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x6c,0xfe,0xe8,0x03,0xfe
+,0x26,0xc7,0x06,0x26,0x00,0x00,0x10,0x26,0xc6,0x06,0x28,0x00,0x30,0x26,0xc6,0x06
+,0x29,0x00,0x11,0xbf,0x2a,0x00,0xe8,0x35,0x00,0xe8,0x45,0x00,0xe8,0x55,0x00,0xc3
+,0x26,0xc7,0x06,0x0e,0x00,0x12,0x00,0x26,0xc7,0x06,0x06,0x00,0x02,0x00,0x26,0xc6
+,0x06,0x19,0x00,0x00,0xe8,0x32,0xfe,0xe8,0xc9,0xfd,0x26,0xc7,0x06,0x26,0x00,0x00
+,0x04,0x26,0xc6,0x06,0x28,0x00,0x30,0x26,0xc6,0x06,0x29,0x00,0x13,0xc3,0x26,0xc6
+,0x05,0x04,0x26,0xc6,0x45,0x01,0x0c,0x26,0xc7,0x45,0x02,0x00,0x01,0x83,0xc7,0x04
+,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x0e,0x26,0xc7,0x45,0x02,0x00,0x02
+,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x21,0x26,0xc7,0x45
+,0x02,0x00,0x00,0x83,0xc7,0x04,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0xb3,0x39,0xc9,0x39,0x83,0x3a,0xb3,0x39,0xb3,0x39,0xb3,0x39,0x1c,0x3a,0x1c,0x3a
+,0xa3,0xb6,0x34,0xa1,0xe9,0x36,0xa3,0x11,0x37,0xa3,0xd2,0x34,0xa1,0xeb,0x36,0xa3
+,0x13,0x37,0xa3,0xd4,0x34,0xa1,0xed,0x36,0xa3,0x15,0x37,0xa3,0xd6,0x34,0xa1,0x01
+,0x37,0xa3,0xce,0x34,0xa1,0xf7,0x36,0xa3,0x17,0x37,0xa3,0xdc,0x34,0xa1,0xf9,0x36
+,0xa3,0x19,0x37,0xa3,0xde,0x34,0xf7,0x06,0x9b,0x36,0x02,0x00,0x75,0x0c,0x33,0xc0
+,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0x50,0x39,0xe9,0x0f,0x01,0xbe,0x07,0x00
+,0xe9,0x19,0xf1,0xf6,0x06,0x9d,0x36,0x80,0x74,0xf3,0xc6,0x06,0xa0,0x36,0x02,0xc6
+,0x06,0x6e,0x37,0x08,0xc6,0x06,0x70,0x37,0x02,0xb8,0x88,0x03,0xcd,0x39,0xf6,0x06
+,0x6f,0x37,0x01,0x75,0x4a,0xa1,0xd1,0x36,0x3a,0x06,0xe9,0x36,0x75,0x41,0x3a,0x26
+,0xea,0x36,0x75,0x3b,0xa1,0xd3,0x36,0x3a,0x06,0xeb,0x36,0x75,0x32,0x3a,0x26,0xec
+,0x36,0x75,0x2c,0xa1,0xd5,0x36,0x3a,0x06,0xed,0x36,0x75,0x23,0x3a,0x26,0xee,0x36
+,0x75,0x1d,0xc6,0x06,0x70,0x37,0x02,0xfe,0x0e,0x6e,0x37,0x75,0x0f,0xb8,0x88,0x03
+,0xcd,0x3a,0x83,0x0e,0x9b,0x36,0x12,0xc6,0x06,0xa0,0x36,0x0c,0xe9,0xa8,0xf0,0xa1
+,0x05,0x37,0x26,0x3b,0x06,0x20,0x00,0x75,0x40,0xa1,0x07,0x37,0x26,0x3b,0x06,0x22
+,0x00,0x75,0x36,0xa1,0x09,0x37,0x26,0x3b,0x06,0x24,0x00,0x75,0x2c,0xa0,0x9e,0x36
+,0x3c,0x02,0x75,0x08,0x26,0xf6,0x06,0x18,0x00,0x08,0x75,0x47,0xc6,0x06,0x6e,0x37
+,0x08,0xfe,0x0e,0x70,0x37,0x75,0x1c,0xc6,0x06,0x70,0x37,0x02,0xe5,0x02,0x0d,0x01
+,0x04,0x25,0xef,0xff,0xe7,0x02,0xe9,0x5e,0xf0,0xc6,0x06,0x70,0x37,0x02,0xc6,0x06
+,0x6e,0x37,0x08,0xe5,0x02,0x25,0xff,0xfb,0x0d,0x01,0x00,0x25,0xef,0xff,0xe7,0x02
+,0xe9,0x44,0xf0,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x25,0x26,0xf6,0x06,0x18,0x00
+,0x08,0x75,0xed,0x81,0x26,0x9b,0x36,0x7f,0xff,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x84
+,0x03,0xcd,0x3a,0xc6,0x06,0xa0,0x36,0x06,0x83,0x26,0xc2,0x34,0xaf,0xe9,0x17,0xf0
+,0xa1,0x01,0x37,0x3a,0x26,0x0f,0x37,0x7f,0xc7,0xe9,0xf7,0xfe,0x83,0x26,0x9b,0x36
+,0xec,0xe8,0x2a,0x0d,0x81,0x0e,0x9b,0x36,0x80,0x00,0xbb,0xff,0x7f,0xcd,0x53,0xc6
+,0x06,0xa0,0x36,0x02,0xe9,0xf0,0xef,0x83,0x0e,0x9b,0x36,0x11,0xc6,0x06,0xa0,0x36
+,0x0c,0xe9,0xf9,0xef,0x44,0x3b,0x2c,0x3b,0xc7,0x2a,0x6b,0x3b,0x44,0x3b,0xc7,0x2a
+,0xc7,0x2a,0xc7,0x2a,0xa3,0xb6,0x34,0x81,0x0e,0xc2,0x34,0x00,0x20,0xf7,0x06,0x41
+,0x37,0x01,0x00,0x74,0x1b,0x8c,0xc3,0xc7,0x06,0x41,0x37,0x00,0x00,0xb8,0x7f,0x03
+,0xcd,0x3a,0x33,0xc0,0x8e,0xc0,0xbf,0x54,0x37,0xb9,0x06,0x00,0xf3,0xab,0x8e,0xc3
+,0x33,0xc0,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0xe4,0x3a,0xf7,0x06,0x9b,0x36
+,0x00,0x01,0x75,0x21,0x83,0x26,0xc2,0x34,0xbf,0xa1,0xa9,0x36,0xe7,0x00,0xa1,0x9b
+,0x36,0xe9,0x09,0x00,0xa1,0x9b,0x36,0x81,0x26,0x9b,0x36,0xff,0xdf,0xa9,0x00,0x20
+,0x75,0x06,0xe9,0x6e,0x00,0xe9,0x6f,0xef,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37
+,0x37,0x01,0x00,0xc6,0x06,0xca,0x34,0x01,0xe9,0x58,0x00,0x83,0x0e,0x9b,0x36,0x40
+,0xe8,0x58,0x00,0xa1,0x05,0x37,0x3b,0x06,0xe9,0x36,0x75,0x37,0xa1,0x07,0x37,0x3b
+,0x06,0xeb,0x36,0x75,0x2e,0xa1,0x09,0x37,0x3b,0x06,0xed,0x36,0x75,0x25,0xfe,0x0e
+,0x71,0x37,0x75,0x1c,0xb8,0x87,0x03,0xcd,0x3a,0x83,0x0e,0x99,0x36,0x10,0xa1,0x50
+,0x37,0xc7,0x06,0x50,0x37,0x00,0x00,0x09,0x06,0x99,0x36,0xc6,0x06,0xa0,0x36,0x08
+,0xe9,0x14,0xef,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37,0x37,0x03,0x00,0xc6,0x06
+,0xca,0x34,0x03,0xc6,0x06,0xa0,0x36,0x0a,0xe9,0xfc,0xee,0xa1,0xd1,0x36,0x26,0x3b
+,0x06,0x20,0x00,0x75,0x15,0xa1,0xd3,0x36,0x26,0x3b,0x06,0x22,0x00,0x75,0x12,0xa1
+,0xd5,0x36,0x26,0x3b,0x06,0x24,0x00,0x75,0x0f,0xc3,0x8d,0x36,0x20,0x00,0xe9,0x0b
+,0x00,0x8d,0x36,0x22,0x00,0xe9,0x04,0x00,0x8d,0x36,0x24,0x00,0x83,0xc4,0x02,0xf7
+,0x06,0xe6,0x34,0x01,0x00,0x74,0x15,0x26,0x3a,0x04,0x77,0x08,0x72,0x0e,0x26,0x3a
+,0x64,0x01,0x72,0x08,0xc6,0x06,0xa0,0x36,0x06,0xe9,0xab,0xee,0xe8,0x7c,0x0a,0x8c
+,0xc0,0x3d,0xff,0xff,0x74,0x1b,0x26,0xc6,0x06,0x18,0x00,0x10,0x26,0xc7,0x06,0x04
+,0x00,0x49,0x3c,0x26,0xc7,0x06,0x06,0x00,0x0c,0x00,0xcd,0x50,0xb9,0x4e,0x00,0xe2
+,0xfe,0xc6,0x06,0xa0,0x36,0x0a,0xe9,0x94,0xee,0xe9,0x7b,0xee,0x8f,0x3c,0x06,0x3d
+,0x06,0x3d,0x06,0x3d,0xd2,0x3c,0xea,0x3c,0x06,0x3d,0x06,0x3d,0xa3,0xb6,0x34,0x81
+,0x26,0xc2,0x34,0xaf,0xdf,0xc7,0x06,0x4c,0x37,0x00,0x00,0xb8,0x8a,0x03,0xcd,0x3a
+,0x80,0x3e,0x9d,0x36,0x04,0x75,0x0c,0x80,0x3e,0x9e,0x36,0x06,0x74,0x05,0xc6,0x06
+,0x9f,0x36,0x06,0x33,0xc0,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0x4c,0x3c,0xf7
+,0x06,0x9b,0x36,0x00,0x20,0x75,0x0e,0x81,0x26,0x9b,0x36,0xff,0xbf,0xb8,0x8b,0x03
+,0xcd,0x3a,0xe9,0x54,0x00,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x03,0xe9,0x17,0xee
+,0xc7,0x06,0x37,0x37,0x02,0x00,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04
+,0x83,0x0e,0x50,0x37,0x04,0xf6,0x06,0x9d,0x36,0x80,0x75,0x2a,0xe8,0x1f,0x0b,0xe9
+,0x27,0x00,0xf7,0x06,0x9b,0x36,0x00,0x01,0x75,0xd3,0xc7,0x06,0x37,0x37,0x02,0x00
+,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04,0xc6,0x06,0xa0,0x36,0x00,0xf6
+,0x06,0x9d,0x36,0x80,0x74,0x03,0xe8,0xde,0x0a,0x81,0x26,0x9b,0x36,0x7c,0xff,0xbb
+,0xff,0xff,0xcd,0x53,0xcd,0x54,0xe9,0xbe,0xed,0xa3,0xb6,0x34,0xe8,0xad,0x01,0xb8
+,0x86,0x03,0xcd,0x39,0xc7,0x06,0x4c,0x37,0x00,0x00,0x81,0x26,0xc2,0x34,0xaf,0xdf
+,0xf6,0x06,0x9d,0x36,0x80,0x74,0x34,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x56,0xf7
+,0x06,0x9b,0x36,0x00,0x01,0x74,0x27,0xe8,0x35,0x01,0x72,0x1c,0xbe,0x00,0x40,0x85
+,0x36,0xc2,0x34,0x75,0x08,0x09,0x36,0xc2,0x34,0xff,0x06,0x92,0x34,0xe8,0x8b,0x01
+,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00,0xe9,0x6c,0xed,0xe9,0xb5,0x00,0xc7,0x06
+,0x37,0x37,0x02,0x00,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04,0x83,0x0e
+,0x50,0x37,0x04,0x80,0x3e,0x9e,0x36,0x08,0x74,0x03,0xe8,0x5a,0x0a,0xe8,0xef,0x00
+,0x72,0xd6,0xe9,0xc8,0xff,0x80,0x3e,0x9e,0x36,0x0a,0x75,0x12,0xc6,0x06,0xa0,0x36
+,0x00,0xf7,0x06,0x9b,0x36,0x08,0x00,0x74,0x02,0xcd,0x54,0xe8,0x39,0x0a,0x81,0x26
+,0x9b,0x36,0xff,0xbf,0xe8,0xc8,0x00,0x72,0xaf,0xb8,0x8b,0x03,0xcd,0x39,0xe9,0x9c
+,0xff,0xf6,0x06,0x9e,0x36,0xff,0x75,0x58,0xa3,0xb6,0x34,0xe8,0xfe,0x00,0x81,0x26
+,0xc2,0x34,0xff,0xbf,0xf6,0x06,0x9d,0x36,0x80,0x74,0x48,0xf7,0x06,0x9b,0x36,0x00
+,0x20,0x74,0x22,0xf7,0x06,0x9b,0x36,0x00,0x40,0x75,0x08,0xe8,0x91,0x00,0x72,0x30
+,0xe9,0x22,0x00,0x26,0xa1,0x0c,0x00,0xa9,0x60,0x00,0x75,0x24,0x81,0x0e,0x66,0x37
+,0x00,0x08,0xe9,0xd2,0xec,0xc7,0x06,0x4c,0x37,0x00,0x00,0xe8,0x71,0x00,0x72,0x10
+,0xb8,0x8b,0x03,0xcd,0x39,0xe8,0xd3,0x00,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00
+,0xe9,0xb4,0xec,0x80,0x3e,0x9d,0x36,0x04,0x75,0x0c,0x80,0x3e,0x9e,0x36,0x06,0x74
+,0x46,0xc6,0x06,0x9f,0x36,0x06,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x0c,0x80,0x3e
+,0x9d,0x36,0x08,0x75,0x05,0xc6,0x06,0x9f,0x36,0x0a,0xe8,0x32,0x00,0x72,0xd1,0xe8
+,0x99,0x00,0x80,0x3e,0x9d,0x36,0x08,0x75,0x13,0x81,0x0e,0x99,0x36,0x80,0x00,0xf7
+,0x06,0x9b,0x36,0x00,0x20,0x75,0x08,0xb8,0x8b,0x03,0xcd,0x39,0xe9,0x68,0xec,0xc6
+,0x06,0x9f,0x36,0x0a,0xe9,0x60,0xec,0xb8,0x86,0x03,0xcd,0x3a,0xe9,0x58,0xec,0x26
+,0xa1,0x0c,0x00,0xa9,0x60,0x00,0x74,0x08,0x81,0x26,0xc2,0x34,0xff,0xbf,0xf9,0xc3
+,0xf7,0x06,0x9b,0x36,0x00,0x40,0x74,0x13,0x81,0x0e,0x66,0x37,0x00,0x08,0xe8,0x4a
+,0x00,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00,0xf9,0xc3,0x81,0x0e,0x9b,0x36,0x00
+,0x40,0x80,0x26,0x6f,0x37,0xfe,0x81,0x26,0x9b,0x36,0x7f,0xff,0xc6,0x06,0xa0,0x36
+,0x00,0xf8,0xc3,0x81,0x0e,0x99,0x36,0x00,0x01,0xe9,0x21,0xec,0x26,0xa1,0x20,0x00
+,0xa3,0xfb,0x36,0xa3,0xaa,0x34,0x26,0xa1,0x22,0x00,0xa3,0xfd,0x36,0xa3,0xac,0x34
+,0x26,0xa1,0x24,0x00,0xa3,0xff,0x36,0xa3,0xae,0x34,0xc3,0xa1,0x05,0x37,0x26,0x3b
+,0x06,0x20,0x00,0x75,0x19,0xa1,0x07,0x37,0x26,0x3b,0x06,0x22,0x00,0x75,0x0f,0xa1
+,0x09,0x37,0x26,0x3b,0x06,0x24,0x00,0x75,0x05,0xe8,0x02,0x00,0xf8,0xc3,0x51,0x1e
+,0x06,0x8b,0xc7,0x8d,0x36,0x20,0x00,0xbf,0x05,0x37,0xb9,0x03,0x00,0x1e,0x06,0x1f
+,0x07,0xf3,0xa5,0x8b,0xf8,0x8d,0x36,0x20,0x00,0xbf,0xa0,0x34,0xb9,0x03,0x00,0xf3
+,0xa5,0x07,0x1f,0x59,0x8b,0xf8,0xa1,0x07,0x37,0xa3,0xa6,0x34,0xa1,0x09,0x37,0xa3
+,0xa8,0x34,0xf9,0xc3,0xc6,0x06,0xb6,0x34,0x01,0xe9,0x8b,0xeb,0xe8,0x87,0x08,0x8b
+,0xf0,0x05,0x12,0x00,0x26,0x29,0x06,0x0e,0x00,0x26,0x8b,0x44,0x2a,0x26,0x3a,0x06
+,0x0e,0x00,0x75,0x5b,0x26,0x83,0x2e,0x0e,0x00,0x02,0x80,0xfc,0x27,0x75,0x50,0x26
+,0x8b,0x44,0x2c,0xa9,0xff,0xff,0x75,0x47,0x8b,0xfe,0x33,0xc0,0x26,0xf6,0x45,0x3c
+,0x80,0x74,0x06,0x26,0x8a,0x45,0x3a,0x24,0x1f,0x03,0xf8,0x26,0x80,0x7d,0x45,0x09
+,0x75,0x2d,0x8c,0xc2,0x8e,0x06,0x38,0x34,0x8e,0xda,0x8b,0x0e,0x0e,0x00,0x26,0x89
+,0x0e,0x0e,0x00,0x8d,0x74,0x2c,0xbf,0x18,0x00,0xf3,0xa4,0x33,0xc0,0x8e,0xd8,0x26
+,0xc7,0x06,0x04,0x00,0xb5,0x3f,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0xcd,0x50,0xb8
+,0x06,0x80,0xe9,0xef,0xe9,0x26,0xa1,0x0c,0x00,0xa3,0x93,0x37,0x83,0x0e,0x99,0x36
+,0x01,0xe9,0x00,0xeb,0x26,0x80,0x3e,0x1c,0x00,0xff,0x75,0x2f,0x26,0x80,0x3e,0x1e
+,0x00,0xff,0x75,0x27,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00,0x75,0x1b,0xa1,0xd1,0x36
+,0x26,0xa3,0x1a,0x00,0xa1,0xd3,0x36,0x26,0xa3,0x1c,0x00,0xa1,0xd5,0x36,0x26,0xa3
+,0x1e,0x00,0xb8,0x0a,0x80,0xe8,0x36,0x07,0xe9,0xe2,0xea,0xff,0x06,0x90,0x34,0xbe
+,0x0a,0x00,0xc6,0x06,0xb6,0x34,0x01,0xf6,0x06,0x9d,0x36,0x80,0x75,0x05,0x83,0x0e
+,0xc2,0x34,0x01,0xe9,0xb6,0xea,0x80,0x3e,0x9d,0x36,0x0a,0x75,0x0f,0x26,0xa1,0x0c
+,0x00,0x25,0x07,0x00,0x3d,0x04,0x00,0x75,0x03,0xe8,0x79,0x00,0xa1,0xf3,0x36,0x86
+,0xe0,0xe7,0x1e,0xa3,0xe3,0x36,0x81,0x26,0x0b,0x37,0x00,0x03,0x81,0x26,0x0d,0x37
+,0x7b,0x7f,0x83,0x0e,0x0d,0x37,0x48,0xe8,0x1e,0x00,0x26,0xa1,0x0c,0x00,0x25,0x07
+,0x00,0x3d,0x04,0x00,0x74,0x09,0x26,0xf7,0x06,0x0c,0x00,0x20,0x00,0x75,0x06,0xb8
+,0x01,0x00,0xe9,0x3f,0xe9,0xe9,0x5f,0xea,0xc7,0x06,0x41,0x37,0x00,0x00,0xb8,0x7f
+,0x03,0xcd,0x3a,0xa1,0x1d,0x37,0xa3,0xc4,0x34,0x86,0xe0,0x68,0x7f,0x03,0x1f,0xa3
+,0x06,0x00,0x33,0xc0,0x8e,0xd8,0xa1,0x0b,0x37,0xa3,0xb2,0x34,0xa1,0x0d,0x37,0xa3
+,0xb4,0x34,0xa1,0xf3,0x36,0xa3,0xc8,0x34,0xa1,0xef,0x36,0xa3,0x9c,0x34,0xa1,0xf1
+,0x36,0xa3,0x9e,0x34,0xc3,0x80,0x0e,0x9d,0x36,0x80,0xbe,0x00,0x00,0xe8,0xb4,0x07
+,0xb8,0x7b,0x03,0xcd,0x3a,0xb8,0x7c,0x03,0xcd,0x39,0xc7,0x06,0x33,0x37,0x02,0x00
+,0xa1,0xe5,0x36,0xe7,0x2e,0xa1,0xe7,0x36,0xe7,0x3e,0xb8,0x82,0x03,0xcd,0x3a,0xf7
+,0x06,0x9b,0x36,0x00,0x20,0x75,0x03,0xe8,0xfd,0x06,0xa1,0xd3,0x36,0xa3,0xef,0x36
+,0xa3,0x9c,0x34,0xa1,0xd5,0x36,0xa3,0xf1,0x36,0xa3,0x9e,0x34,0xc3,0xf6,0x06,0x9d
+,0x36,0x80,0x74,0x31,0xbe,0x22,0x00,0xe9,0x17,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74
+,0x24,0xbe,0x23,0x00,0xe9,0x0a,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74,0x17,0xbe,0x24
+,0x00,0x56,0xe8,0xa8,0x05,0x8c,0xc0,0x3d,0xff,0xff,0x5e,0x74,0x05,0xe8,0xd7,0xef
+,0xcd,0x50,0xe9,0x1f,0xe8,0xe9,0x9f,0xe9,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0xb8,0x84,0x03,0xcd,0x3a,0xb8,0x8a,0x03,0xcd,0x39,0xe9,0xf7,0x00,0x80,0x3e,0xa0
+,0x36,0x08,0x75,0x2e,0xa9,0xd0,0x07,0x75,0x2c,0xa1,0xb1,0x36,0x0d,0x00,0x04,0xe7
+,0x08,0xe5,0x00,0x25,0xff,0x73,0xe7,0x00,0xb8,0x8a,0x03,0xcd,0x3a,0xe8,0xc3,0x06
+,0x33,0xc0,0xe7,0x0e,0xe5,0x0a,0x25,0xc3,0x17,0xe7,0x0a,0xcd,0x54,0xc6,0x06,0xa0
+,0x36,0x00,0xe9,0x68,0xe9,0xbe,0x04,0x00,0xe9,0x3f,0xe9,0x83,0x26,0x9b,0x36,0xbf
+,0xc6,0x06,0x71,0x37,0x03,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x88,0x03,0xcd,0x3a,0xb8
+,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x39,0x81,0x0e,0xc2,0x34,0x00,0x20,0xe9
+,0x92,0x00,0xe8,0x49,0x06,0xb8,0x87,0x03,0xcd,0x39,0xbb,0xff,0x7f,0xcd,0x53,0xb8
+,0x84,0x03,0xcd,0x3a,0xb8,0x88,0x03,0xcd,0x3a,0xb8,0x8b,0x03,0xcd,0x3a,0xb8,0x83
+,0x03,0xcd,0x3a,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd,0x3a,0xc3,0xe5,0x00
+,0x25,0xff,0x53,0xe7,0x00,0x83,0x0e,0xc2,0x34,0x40,0x83,0x26,0xc2,0x34,0xef,0xe8
+,0x0c,0x06,0xbb,0xff,0x7f,0xcd,0x53,0xb8,0x8a,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd
+,0x3a,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x3a
+,0xb8,0x8b,0x03,0xcd,0x3a,0xb8,0x84,0x03,0xcd,0x3a,0xb8,0x89,0x03,0xcd,0x3a,0xc3
+,0x83,0x0e,0xc2,0x34,0x50,0xe8,0x18,0x04,0xe8,0xd3,0x05,0xf6,0x06,0x6f,0x37,0x01
+,0x75,0x12,0xb8,0x89,0x03,0xcd,0x39,0x83,0x3e,0x0f,0x37,0x00,0x75,0x06,0xc7,0x06
+,0x0f,0x37,0x04,0x00,0xa1,0x9d,0x36,0x80,0xfc,0x08,0x74,0x05,0xb8,0x84,0x03,0xcd
+,0x39,0xe5,0x02,0x0d,0x01,0x08,0x25,0xef,0xff,0xe7,0x02,0xa1,0x9d,0x36,0x86,0xe0
+,0x32,0xe4,0x8b,0xf0,0xd1,0xee,0x33,0xc0,0x0d,0x20,0x00,0x09,0x06,0xad,0x36,0xa1
+,0xad,0x36,0xe7,0x04,0xe9,0x53,0xe8,0xe9,0x5a,0xe8,0x33,0xc0,0xa0,0x1b,0x37,0xd1
+,0xe0,0x3a,0x06,0xa0,0x36,0x75,0x03,0xe9,0xba,0xff,0xe9,0x60,0xe8,0xc7,0x06,0x41
+,0x37,0x00,0x00,0xe8,0xc1,0xe1,0xe8,0x6a,0x06,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56
+,0xa1,0xb1,0x36,0x0d,0x00,0x10,0xe7,0x08,0xe5,0x02,0x25,0xf9,0xff,0x0d,0x03,0x00
+,0xe7,0x02,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xa1,0xad,0x36,0xe7
+,0x04,0xe8,0x7c,0x03,0xe8,0x9f,0x03,0xc7,0x06,0x1d,0x37,0x00,0xc8,0xc7,0x06,0x0b
+,0x37,0x00,0x03,0xc7,0x06,0x0d,0x37,0x7b,0x7f,0x33,0xc0,0xa3,0x99,0x36,0xa3,0x9b
+,0x36,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xa3,0x4c,0x37,0xa3,0xf3,0x36,0xa3,0xef,0x36
+,0xa3,0xf1,0x36,0xe8,0x82,0xfd,0xc6,0x06,0x9f,0x36,0x02,0xe9,0xef,0xe7,0xe5,0x02
+,0x0d,0x01,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02,0xe8,0xf2
+,0x05,0xe5,0x0a,0x0d,0x40,0x00,0xe7,0x0a,0x33,0xc0,0xa3,0x81,0x37,0xa3,0x85,0x37
+,0xa3,0x83,0x37,0xa3,0x87,0x37,0xa3,0x89,0x37,0xe5,0x00,0x0d,0x00,0x84,0xe7,0x00
+,0xb8,0x8c,0x03,0xcd,0x39,0xb8,0x80,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff
+,0xe5,0x00,0x25,0xff,0x7b,0xe7,0x00,0x81,0x0e,0x9a,0x37,0x80,0x00,0xb8,0x7e,0x03
+,0xcd,0x39,0x33,0xc0,0xe7,0x0e,0xbe,0x08,0x00,0x8e,0x06,0x38,0x34,0xe8,0xa7,0xed
+,0x83,0x26,0xef,0x34,0xdf,0xff,0x06,0x81,0x37,0xcd,0x50,0x83,0x0e,0xef,0x34,0x20
+,0xc3,0xf7,0x06,0x9a,0x37,0x80,0x00,0x74,0x3d,0xa9,0xd0,0x07,0x74,0x10,0xa9,0x00
+,0x04,0x74,0x12,0x33,0xc0,0xe7,0x0e,0xff,0x06,0x87,0x37,0xe9,0xd2,0xff,0xff,0x06
+,0x85,0x37,0xe9,0xcb,0xff,0xff,0x06,0x83,0x37,0xe9,0xc4,0xff,0x83,0x26,0x9a,0x37
+,0x7f,0xa1,0x89,0x37,0x03,0x06,0x87,0x37,0x3d,0x05,0x00,0x7f,0x01,0xc3,0xbb,0xff
+,0x7f,0xcd,0x53,0xe9,0x00,0x00,0xe5,0x02,0x25,0xff,0xfb,0x25,0xef,0xff,0x0d,0x01
+,0x00,0xe7,0x02,0xa1,0x83,0x37,0x3b,0x06,0x46,0x37,0x7f,0x2a,0xa1,0x85,0x37,0x3b
+,0x06,0x48,0x37,0x7c,0x21,0xa1,0x89,0x37,0x03,0x06,0x87,0x37,0x3d,0x05,0x00,0x7f
+,0x15,0xc6,0x06,0x9f,0x36,0x04,0xe5,0x02,0x25,0xff,0xf7,0x0d,0x01,0x00,0x25,0xef
+,0xff,0xe7,0x02,0xe9,0xf7,0xe6,0xbe,0x01,0x00,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74
+,0x0a,0x83,0x26,0x9b,0x36,0xfc,0x83,0x0e,0xc2,0x34,0x04,0xe9,0xd0,0xe6,0xb8,0x7b
+,0x03,0xcd,0x39,0xe5,0x02,0x0d,0x01,0x60,0x25,0xef,0xff,0xe7,0x02,0xc7,0x06,0xf1
+,0x34,0x20,0x03,0xb8,0x8e,0x03,0xcd,0x39,0xc3,0x81,0x26,0xc2,0x34,0x7f,0xff,0x80
+,0x0e,0x6f,0x37,0x01,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74,0xd2,0xb8,0x7b,0x03,0xcd
+,0x3a,0xb8,0x7d,0x03,0xcd,0x39,0x83,0x26,0x9b,0x36,0xef,0x33,0xc0,0xb0,0x8a,0xa2
+,0x9f,0x36,0xa2,0x9d,0x36,0xc7,0x06,0x4c,0x37,0x01,0x00,0xc7,0x06,0x0f,0x37,0x04
+,0x00,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06,0x0f,0x37,0x03,0x00,0xb8
+,0x8d,0x03,0xcd,0x39,0xe8,0x00,0xd5,0xe5,0x02,0x0d,0x01,0x40,0x25,0xef,0xff,0x8b
+,0xd8,0xb8,0x7c,0x03,0xcd,0x39,0xc7,0x06,0x33,0x37,0x02,0x00,0x8b,0xc3,0x0d,0x00
+,0x20,0x25,0xf9,0xff,0x0b,0x06,0xe8,0x3a,0xe7,0x02,0xc3,0xff,0x0e,0xf1,0x34,0x75
+,0x01,0xc3,0xe5,0x4e,0xa9,0x01,0x00,0x75,0x12,0xe5,0x00,0xa9,0x00,0x04,0x75,0x05
+,0x0d,0x00,0x04,0xe7,0x00,0xb8,0x8e,0x03,0xcd,0x39,0xc3,0xe5,0x00,0xa9,0x00,0x04
+,0x74,0xf3,0x25,0xff,0xfb,0xe7,0x00,0xe9,0xeb,0xff,0xc6,0x06,0xa0,0x36,0x04,0x83
+,0x26,0x9b,0x36,0xfc,0x81,0x0e,0x9b,0x36,0x80,0x00,0xe9,0x10,0xe6,0xb8,0x8e,0x03
+,0xcd,0x3a,0xcd,0x54,0x81,0x0e,0xaf,0x36,0x00,0x18,0xa1,0xaf,0x36,0xe7,0x06,0xb8
+,0x7b,0x03,0xcd,0x39,0xa1,0xd3,0x36,0xa3,0x8f,0x37,0xa1,0xd5,0x36,0xa3,0x91,0x37
+,0xc7,0x06,0x8b,0x37,0x02,0x00,0xc7,0x06,0x8d,0x37,0x02,0x00,0x83,0x0e,0x99,0x36
+,0x40,0xe9,0xd9,0xe5,0x80,0x3e,0x9f,0x36,0x06,0x75,0x15,0xa9,0xd0,0x07,0x75,0xec
+,0x25,0x00,0x18,0x75,0x0e,0xff,0x0e,0x8b,0x37,0x75,0xe1,0xc6,0x06,0x9f,0x36,0x08
+,0xe9,0xba,0xe5,0xff,0x0e,0x8d,0x37,0x75,0xd3,0xbe,0x08,0x00,0xe9,0x9f,0xe5,0xb8
+,0x7b,0x03,0xcd,0x39,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x08,0xc6,0x06,0x9f,0x36
+,0x0a,0xe9,0x0d,0x00,0xf7,0x06,0x9b,0x36,0x00,0x40,0x74,0x0b,0xb8,0x8b,0x03,0xcd
+,0x39,0x81,0x0e,0x99,0x36,0x80,0x00,0xe9,0x83,0xe5,0xb8,0x7b,0x03,0xcd,0x39,0xc7
+,0x06,0x8b,0x37,0x04,0x00,0xc7,0x06,0x8d,0x37,0x04,0x00,0x81,0x0e,0x99,0x36,0x00
+,0x02,0xe9,0x69,0xe5,0xf6,0x06,0x9d,0x36,0x80,0x75,0x1b,0xa9,0xd0,0x07,0x75,0xeb
+,0xa9,0x00,0x18,0x75,0x0c,0xff,0x0e,0x8d,0x37,0x75,0xe0,0xe8,0x17,0xfb,0xe9,0x4c
+,0xe5,0xb8,0x82,0x03,0xcd,0x39,0xc3,0xff,0x0e,0x8b,0x37,0x75,0xce,0xbe,0x09,0x00
+,0xe9,0x2b,0xe5,0xc7,0x06,0x3d,0x37,0x00,0x00,0xc7,0x06,0x9b,0x36,0x00,0x00,0xe8
+,0x3c,0x02,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0x81,0x26,0x9b
+,0x36,0xff,0x7f,0xe5,0x02,0x0d,0x01,0x00,0x25,0xef,0xff,0x25,0xff,0xdf,0xe7,0x02
+,0xbb,0xff,0x7f,0xcd,0x53,0x33,0xc0,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xe8,0x50,0x00
+,0xe8,0x73,0x00,0xb8,0x81,0x03,0xcd,0x39,0xc3,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74
+,0x0d,0xc6,0x06,0x9f,0x36,0x02,0xc6,0x06,0xa0,0x36,0x00,0xe9,0xdf,0xe4,0x83,0x0e
+,0x9b,0x36,0x10,0xc7,0x06,0x99,0x36,0x00,0x00,0xe8,0xe7,0x02,0xe5,0x56,0x0d,0x02
+,0x00,0xe7,0x56,0xc7,0x06,0xa8,0x02,0x00,0x00,0x8b,0x36,0x3d,0x37,0xe8,0x44,0x02
+,0xc6,0x06,0xa0,0x36,0x0e,0xe9,0xb5,0xe4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x06,0xb8,0x8a,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd,0x3a,0xb8,0x86,0x03,0xcd,0x3a
+,0xb8,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x3a,0xb8,0x8b,0x03,0xcd,0x3a,0xb8
+,0x88,0x03,0xcd,0x3a,0x07,0xc3,0x06,0xb8,0x88,0x03,0xcd,0x3a,0xb8,0x7b,0x03,0xcd
+,0x3a,0xb8,0x82,0x03,0xcd,0x3a,0xb8,0x7f,0x03,0xcd,0x3a,0xb8,0x7c,0x03,0xcd,0x3a
+,0xb8,0x7e,0x03,0xcd,0x3a,0xb8,0x80,0x03,0xcd,0x3a,0xb8,0x81,0x03,0xcd,0x3a,0xb8
+,0x84,0x03,0xcd,0x3a,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x7d,0x03,0xcd,0x3a,0xb8,0x8d
+,0x03,0xcd,0x3a,0xc7,0x06,0x41,0x37,0x00,0x00,0x07,0xc3,0x06,0x8e,0x06,0x38,0x34
+,0x1f,0x8b,0x0e,0x0e,0x00,0x26,0x89,0x0e,0x0e,0x00,0xbe,0x18,0x00,0xbf,0x18,0x00
+,0xf3,0xa4,0x06,0x1e,0x07,0xcd,0x34,0x07,0x33,0xc0,0x8e,0xd8,0xc3,0x26,0xf6,0x06
+,0x20,0x00,0x80,0x74,0x44,0x33,0xc0,0x26,0xa0,0x26,0x00,0x24,0x1f,0x8b,0xf0,0x26
+,0x8b,0x5c,0x28,0x89,0x1e,0x6a,0x37,0x06,0x8e,0x06,0x38,0x34,0x1f,0xc0,0xe3,0x04
+,0x26,0x88,0x5c,0x28,0x8b,0xc6,0xb9,0x06,0x00,0xbe,0x20,0x00,0xbf,0x1a,0x00,0xf3
+,0xa4,0x8b,0xc8,0x83,0xc7,0x06,0xf3,0xa4,0x26,0x81,0x26,0x26,0x00,0x1f,0x80,0x26
+,0x81,0x36,0x26,0x00,0x00,0x80,0xe9,0xa9,0xff,0x26,0x8b,0x1e,0x28,0x00,0x89,0x1e
+,0x6a,0x37,0x06,0x8e,0x06,0x38,0x34,0x1f,0xc0,0xe3,0x04,0x26,0x88,0x1e,0x28,0x00
+,0xb9,0x06,0x00,0xbe,0x20,0x00,0xbf,0x1a,0x00,0xf3,0xa4,0xe9,0x84,0xff,0x86,0xc4
+,0xa3,0x68,0x37,0xe8,0x87,0xff,0xf7,0x06,0x6a,0x37,0x0f,0x00,0x74,0x10,0x80,0x3e
+,0x9e,0x36,0x00,0x75,0x09,0xbe,0x00,0x00,0xe8,0xac,0xe9,0xcd,0x50,0xc3,0xc3,0x50
+,0x56,0x06,0x33,0xc0,0x26,0xf6,0x06,0x20,0x00,0x80,0x74,0x06,0x26,0xa0,0x26,0x00
+,0x24,0x1f,0x8b,0xf0,0x26,0x8b,0x5c,0x26,0x86,0xfb,0x83,0xeb,0x04,0x74,0x4f,0x83
+,0xc6,0x2a,0x8c,0xc0,0x8e,0xd8,0xb9,0x07,0x00,0x33,0xc0,0x8e,0xc0,0xbf,0x72,0x37
+,0xf3,0xab,0x33,0xc9,0x8a,0x0c,0x80,0xf9,0x00,0x75,0x03,0xe9,0x30,0x00,0x3b,0xd9
+,0x73,0x03,0xe9,0x29,0x00,0x2b,0xd9,0x8a,0x44,0x01,0x25,0x3f,0x00,0x74,0x19,0x3d
+,0x0b,0x00,0x7d,0x14,0xd1,0xe0,0x8b,0xf8,0x2e,0x8b,0xbd,0x5c,0x49,0x8d,0x74,0x02
+,0x83,0xe9,0x02,0xf3,0xa4,0xe9,0x02,0x00,0x03,0xf1,0x23,0xdb,0x75,0xc4,0x33,0xc0
+,0x8e,0xd8,0x07,0x5e,0x58,0xc3,0x33,0xc0,0x26,0xf6,0x06,0x20,0x00,0x80,0x74,0x06
+,0x26,0xa0,0x26,0x00,0x24,0x1f,0xc3,0xe5,0x0a,0x25,0xc3,0xbf,0xe7,0x0a,0xb8,0x86
+,0x03,0xcd,0x39,0xb8,0x83,0x03,0xcd,0x39,0x81,0x26,0x9b,0x36,0x7c,0xdf,0xb8,0x85
+,0x03,0xcd,0x3a,0xe5,0x02,0x25,0xff,0xf3,0x0d,0x01,0x00,0x25,0xef,0xff,0xe7,0x02
+,0xe5,0x00,0x25,0xff,0x53,0xe7,0x00,0xa1,0xe7,0x36,0x25,0xff,0xfe,0xa3,0xe7,0x36
+,0xe7,0x3e,0x83,0x26,0x99,0x36,0xcf,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1,0xaf,0x36
+,0xe7,0x06,0xc3,0xe5,0x02,0x0d,0x01,0x0c,0x25,0xef,0xff,0xe7,0x02,0xa1,0xe7,0x36
+,0x0d,0x00,0x01,0xe7,0x3e,0xa3,0xe7,0x36,0x81,0x0e,0x9b,0x36,0x00,0x20,0x83,0x0e
+,0x99,0x36,0x20,0x81,0x26,0x9b,0x36,0x7c,0xbf,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1
+,0xaf,0x36,0xe7,0x06,0xb8,0x86,0x03,0xcd,0x39,0xb8,0x85,0x03,0xcd,0x39,0xb8,0x83
+,0x03,0xcd,0x3a,0xc3,0x0b,0xf6,0x75,0x49,0x06,0x8e,0x06,0x32,0x34,0x80,0x3e,0xe0
+,0x34,0x01,0x75,0x1b,0x26,0x89,0x36,0x06,0x00,0x8e,0x06,0x32,0x34,0x26,0xf7,0x06
+,0x0a,0x00,0x00,0x20,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x20,0x07,0xc3,0x80
+,0x3e,0xe3,0x34,0x01,0x75,0x19,0x26,0x89,0x36,0x06,0x00,0x8e,0x06,0x32,0x34,0x26
+,0xf7,0x06,0x0a,0x00,0x00,0x10,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x10,0x07
+,0xc3,0xe9,0xb4,0xff,0x50,0x51,0x57,0x33,0xc0,0xb9,0x06,0x00,0x8e,0xc0,0xbf,0xd1
+,0x36,0xf3,0xae,0x5f,0x74,0x0c,0x26,0xf6,0x06,0x00,0x00,0xc0,0x75,0x04,0xf8,0x59
+,0x58,0xc3,0xf9,0xe9,0xf9,0xff,0x8b,0x05,0x0b,0x45,0x02,0x0b,0x45,0x04,0xc3,0x52
+,0x50,0xe5,0x06,0x25,0x1e,0x00,0x3d,0x1e,0x00,0x75,0xf6,0xb8,0x01,0x80,0xe7,0x5a
+,0x58,0x5a,0xc3,0xe8,0xe9,0xff,0x50,0xe5,0x02,0x25,0xff,0x7f,0x0d,0x01,0x00,0x25
+,0xef,0xff,0xe7,0x02,0x0d,0x00,0x80,0xe7,0x02,0xa1,0xad,0x36,0xe7,0x04,0xa1,0xaf
+,0x36,0xe7,0x06,0x58,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x2e,0x2b,0xce,0x41,0x10,0x42,0x7b,0x41,0x30,0x41,0xa2,0x41,0xaf,0x45,0x44,0x29
+,0xc7,0x2a,0xc7,0x2a,0x60,0x39,0xf4,0x3a,0x5c,0x3c,0x09,0x3d,0xb1,0x3d,0x34,0x3f
+,0xc7,0x2a,0x3c,0x3f,0xc7,0x2a,0xc4,0x3f,0x16,0x40,0x16,0x40,0xed,0x40,0xfa,0x40
+,0x07,0x41,0xc7,0x2a,0xc7,0x2a,0xc7,0x2a,0xc7,0x2a,0xd6,0x52,0x00,0x00,0x01,0x37
+,0xe9,0x36,0xf3,0x36,0xef,0x36,0x1d,0x37,0x0d,0x37,0x0b,0x37,0x9c,0x37,0x03,0x37
+,0xfb,0x36,0x62,0x2d,0x40,0x06,0xd1,0x2d,0xf4,0x01,0xba,0x44,0x40,0x06,0x8c,0x43
+,0x64,0x00,0xe8,0x2c,0xc8,0x00,0xd8,0x2b,0x05,0x00,0xe9,0x45,0x50,0x00,0x97,0x45
+,0xfa,0x00,0xae,0x2d,0x04,0x01,0x6a,0x42,0x02,0x00,0xf6,0x2c,0xbc,0x02,0x93,0x2d
+,0xdc,0x05,0x1d,0x2d,0x64,0x00,0xa1,0x2d,0x14,0x00,0xd7,0x3a,0x08,0x07,0x81,0x2d
+,0x64,0x00,0xb3,0x3e,0x02,0x00,0x30,0x43,0x64,0x00,0xc5,0x2c,0xf4,0x01,0x8b,0x44
+,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x80,0x3e,0xfd,0x34,0x02,0x74,0x0c,0xe8,0x20,0x05,0xc7,0x06,0xa1,0x36,0x00,0x00
+,0xe9,0x9a,0xf8,0xff,0x06,0xc0,0x33,0xe8,0x10,0x05,0x8b,0x36,0x3d,0x37,0xe8,0x73
+,0xfe,0xc3,0xcd,0x34,0xe9,0xe8,0x05,0xc7,0x06,0xa3,0x36,0x00,0x00,0xc7,0x06,0x41
+,0x37,0x00,0x00,0xe8,0xed,0xfe,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56,0xa1,0xb1,0x36
+,0x0d,0x00,0x10,0xe7,0x08,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xa1
+,0xad,0x36,0xe7,0x04,0xe8,0x2b,0x09,0xc7,0x06,0x1d,0x37,0x00,0xc8,0xc7,0x06,0x0b
+,0x37,0x00,0x03,0xc7,0x06,0x0d,0x37,0x7b,0x7f,0x33,0xc0,0xa3,0x9b,0x36,0xa3,0x9d
+,0x36,0xc7,0x06,0x4c,0x37,0x01,0x00,0xc6,0x06,0x9e,0x36,0xff,0xc7,0x06,0x05,0x37
+,0x00,0x00,0xc7,0x06,0x07,0x37,0x00,0x00,0xc7,0x06,0x09,0x37,0x00,0x00,0xa3,0xf3
+,0x36,0xa3,0xef,0x36,0xa3,0xf1,0x36,0xe8,0xfe,0xf5,0xe5,0x02,0x25,0xf9,0xff,0x0d
+,0x03,0x00,0x0d,0x00,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02
+,0xb8,0x8f,0x03,0xcd,0x39,0xb8,0x80,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff
+,0xa1,0xa9,0x36,0xa3,0xa7,0x36,0x0d,0x00,0xa4,0x0d,0x00,0x08,0xe7,0x00,0xa3,0xa9
+,0x36,0xc7,0x06,0xa3,0x36,0x01,0x00,0xc7,0x06,0xa5,0x36,0x0c,0x00,0x83,0x3e,0xa5
+,0x36,0x00,0x75,0x09,0xc7,0x06,0x3d,0x37,0x05,0x00,0xe9,0x13,0xff,0xff,0x0e,0xa5
+,0x36,0xbe,0x11,0x00,0xe8,0x22,0x05,0xb8,0x90,0x03,0xcd,0x39,0xc3,0x83,0x3e,0xa3
+,0x36,0x01,0x74,0xd9,0xc3,0xb8,0x90,0x03,0xcd,0x3a,0x26,0xa0,0x2b,0x00,0x26,0x8b
+,0x1e,0x2c,0x00,0xcd,0x34,0x83,0x3e,0xa3,0x36,0x01,0x74,0x03,0xe9,0xf0,0x04,0x3c
+,0x0f,0x75,0x1e,0x81,0xfb,0x00,0x02,0x75,0x18,0x26,0xa1,0x20,0x00,0xa3,0x05,0x37
+,0x26,0xa1,0x22,0x00,0xa3,0x07,0x37,0x26,0xa1,0x24,0x00,0xa3,0x09,0x37,0xe9,0x09
+,0x00,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xb6,0xfe,0xc7,0x06,0xa3,0x36,0x02,0x00
+,0xc6,0x06,0x9e,0x36,0xff,0xe8,0xcb,0xfd,0xe8,0x1c,0xd9,0x33,0xc0,0xa3,0x85,0x37
+,0xa3,0x83,0x37,0xa3,0x87,0x37,0xa3,0x89,0x37,0xb8,0x91,0x03,0xcd,0x39,0xb8,0x80
+,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff,0xe5,0x00,0x25,0xff,0x53,0xe7,0x00
+,0x81,0x0e,0x9a,0x37,0x80,0x00,0xb8,0x92,0x03,0xcd,0x39,0x33,0xc0,0xe7,0x0e,0xbe
+,0x08,0x00,0x8e,0x06,0x38,0x34,0xe8,0x8e,0xe5,0x26,0xc7,0x06,0x04,0x00,0x7d,0x4b
+,0x83,0x26,0xef,0x34,0xdf,0xcd,0x50,0x83,0x0e,0xef,0x34,0x20,0xc3,0xf7,0x06,0x9a
+,0x37,0x80,0x00,0x74,0x32,0xa9,0xd0,0x07,0x74,0x0c,0xa9,0x00,0x04,0x74,0x0e,0x33
+,0xc0,0xe7,0x0e,0xe9,0xda,0xff,0xff,0x06,0x85,0x37,0xe9,0xd3,0xff,0xff,0x06,0x83
+,0x37,0xe9,0xcc,0xff,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0x36,0xfe,0x83,0x26,0x9a
+,0x37,0x7f,0xbb,0xff,0x7f,0xcd,0x53,0xe5,0x00,0x0d,0x00,0xac,0xe7,0x00,0xe5,0x02
+,0x25,0xff,0xfb,0x25,0xef,0xff,0x25,0xff,0xf7,0x0d,0x01,0x00,0xe7,0x02,0xa1,0x83
+,0x37,0x3b,0x06,0x46,0x37,0x7f,0xcd,0xa1,0x85,0x37,0x3b,0x06,0x48,0x37,0x7c,0xc4
+,0xc7,0x06,0xa3,0x36,0x03,0x00,0xbe,0x13,0x00,0xe8,0xfd,0x03,0xb8,0x93,0x03,0xcd
+,0x39,0xb8,0x94,0x03,0xcd,0x39,0xb8,0x96,0x03,0xcd,0x39,0xb8,0x95,0x03,0xcd,0x39
+,0xbe,0x06,0x00,0xe8,0xe3,0x03,0xe9,0xd6,0x03,0x83,0x3e,0xa3,0x36,0x03,0x74,0x01
+,0xc3,0xbe,0x13,0x00,0xe8,0xd2,0x03,0xb8,0x94,0x03,0xcd,0x39,0xc3,0xb8,0x94,0x03
+,0xcd,0x3a,0x26,0xa0,0x2b,0x00,0x26,0x8b,0x1e,0x2c,0x00,0xcd,0x34,0x83,0x3e,0xa3
+,0x36,0x03,0x74,0x03,0xe9,0xa8,0x03,0x3c,0x0d,0x75,0x3e,0x83,0xfb,0x00,0x75,0x39
+,0xe5,0x02,0x0d,0x00,0x20,0xe7,0x02,0xb8,0x93,0x03,0xcd,0x3a,0xc7,0x06,0xa3,0x36
+,0x04,0x00,0xbe,0x00,0x00,0xe8,0x0c,0xfc,0xc6,0x06,0x9d,0x36,0x80,0xc6,0x06,0x9e
+,0x36,0x00,0xc7,0x06,0x33,0x37,0x02,0x00,0xb8,0x9a,0x03,0xcd,0x39,0xe8,0xfc,0x00
+,0xc7,0x06,0x4c,0x37,0x00,0x00,0xe9,0x66,0x03,0xc7,0x06,0x3d,0x37,0x08,0x00,0xe9
+,0x61,0xfd,0x83,0x3e,0xa3,0x36,0x03,0x75,0x09,0xc7,0x06,0x3d,0x37,0x05,0x00,0xe9
+,0x51,0xfd,0xe9,0x4a,0x03,0x83,0x3e,0xa3,0x36,0x04,0x74,0x12,0x83,0x3e,0xa3,0x36
+,0x05,0x74,0x0b,0xcd,0x34,0xc7,0x06,0x3d,0x37,0x07,0x00,0xe9,0x35,0xfd,0xc7,0x06
+,0xa3,0x36,0x06,0x00,0xc6,0x06,0x9e,0x36,0xff,0xb8,0x9a,0x03,0xcd,0x3a,0xb8,0x99
+,0x03,0xcd,0x3a,0xb8,0x96,0x03,0xcd,0x3a,0xb8,0x97,0x03,0xcd,0x39,0xb8,0x98,0x03
+,0xcd,0x39,0xb8,0x9b,0x03,0xcd,0x39,0xe9,0x18,0xfd,0xcd,0x34,0x83,0x3e,0xa3,0x36
+,0x04,0x77,0x18,0x83,0x3e,0xa3,0x36,0x03,0x75,0x08,0xf7,0x06,0x9b,0x36,0x00,0x01
+,0x75,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xe8,0xfc,0xe9,0xe1,0x02,0xcd,0x34
+,0x83,0x3e,0xa3,0x36,0x02,0x77,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xd3,0xfc
+,0x83,0x3e,0xa3,0x36,0x04,0x77,0x05,0xb8,0x96,0x03,0xcd,0x39,0xe9,0xc0,0x02,0x83
+,0x3e,0xa3,0x36,0x03,0x75,0x10,0x26,0xa1,0x0c,0x00,0x25,0x07,0x00,0x50,0x3d,0x04
+,0x00,0x75,0x03,0xe8,0x36,0x00,0xa1,0xf3,0x36,0x86,0xe0,0xe7,0x1e,0xa3,0xe3,0x36
+,0x81,0x26,0x0b,0x37,0x00,0x03,0x81,0x26,0x0d,0x37,0x7b,0x7f,0x83,0x0e,0x0d,0x37
+,0x48,0xe8,0x14,0xf3,0x58,0x3d,0x04,0x00,0x74,0x09,0x26,0xf7,0x06,0x0c,0x00,0x20
+,0x00,0x75,0x06,0xb8,0x01,0x00,0xe9,0x7a,0x02,0xe9,0x86,0xfc,0xa1,0xe5,0x36,0xe7
+,0x2e,0xa1,0xe7,0x36,0xe7,0x3e,0xa1,0xd3,0x36,0xa3,0x9c,0x34,0xa1,0xd5,0x36,0xa3
+,0x9e,0x34,0xc3,0x26,0x80,0x3e,0x1c,0x00,0xff,0x75,0x2f,0x26,0x80,0x3e,0x1e,0x00
+,0xff,0x75,0x27,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00,0x75,0x1b,0xa1,0xd1,0x36,0x26
+,0xa3,0x1a,0x00,0xa1,0xd3,0x36,0x26,0xa3,0x1c,0x00,0xa1,0xd5,0x36,0x26,0xa3,0x1e
+,0x00,0xb8,0x0a,0x80,0xe9,0x2c,0x02,0xe9,0x38,0xfc,0xff,0x06,0x90,0x34,0xbe,0x0a
+,0x00,0xc6,0x06,0xb6,0x34,0x01,0xf6,0x06,0x9d,0x36,0x80,0x75,0x05,0x83,0x0e,0xc2
+,0x34,0x01,0xcd,0x34,0xe9,0x0c,0xfc,0x83,0x3e,0xa3,0x36,0x03,0x75,0x09,0xc7,0x06
+,0x3d,0x37,0x05,0x00,0xe9,0xfc,0xfb,0xe5,0x02,0x0d,0x03,0x00,0x0d,0x00,0x88,0x0d
+,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02,0xc7,0x06,0xa3,0x36,0x05,0x00,0xc6,0x06,0x9e
+,0x36,0xff,0xbe,0x02,0x00,0xe8,0xe1,0x01,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x9a,0x03
+,0xcd,0x3a,0xb8,0x99,0x03,0xcd,0x39,0xb8,0x97,0x03,0xcd,0x39,0xb8,0x98,0x03,0xcd
+,0x39,0xe9,0xbb,0x01,0x83,0x3e,0xa3,0x36,0x03,0x74,0x0a,0x83,0x3e,0xa3,0x36,0x04
+,0x74,0x03,0xe9,0xaa,0x01,0xbe,0x06,0x00,0xe8,0xae,0x01,0xb8,0x95,0x03,0xcd,0x39
+,0xe9,0x9c,0x01,0x83,0x3e,0xa3,0x36,0x05,0x74,0x03,0xe9,0x92,0x01,0xbe,0x02,0x00
+,0xe8,0x96,0x01,0xb8,0x99,0x03,0xcd,0x39,0xe9,0x84,0x01,0xc7,0x06,0x0f,0x37,0x05
+,0x00,0xe9,0x7b,0x01,0xe5,0x02,0x25,0xff,0xdf,0xe7,0x02,0xc7,0x06,0xa3,0x36,0x07
+,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xe9,0x65,0x01,0xe8,0xd5,0x04,0xc6,0x06,0x9d
+,0x36,0x00,0xc7,0x06,0x9b,0x36,0x00,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xc7,0x06
+,0xa8,0x02,0x00,0x00,0xc7,0x06,0x4c,0x37,0x01,0x00,0xe5,0x02,0x25,0xf9,0xff,0x0d
+,0x03,0x00,0x0d,0x00,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02
+,0xe9,0x67,0xfc,0xb8,0x9a,0x03,0xcd,0x39,0xf7,0x06,0xf4,0x33,0x00,0x10,0x75,0x09
+,0xc7,0x06,0x33,0x37,0x02,0x00,0xe9,0x16,0x01,0xff,0x0e,0x33,0x37,0x74,0x03,0xe9
+,0x0d,0x01,0xff,0x06,0x8e,0x34,0x83,0x0e,0xc2,0x34,0x08,0xc7,0x06,0x3d,0x37,0x03
+,0x00,0xe9,0xff,0xfa,0xc3,0x52,0x50,0xba,0xe0,0x00,0xb8,0x00,0x10,0xef,0x58,0x5a
+,0xc3,0xc7,0x06,0x3d,0x37,0x00,0x00,0xe9,0xe9,0xfa,0xfa,0xe8,0x54,0x04,0xb8,0x80
+,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04,0x00,0xd8,0x2b,0xb8,0x7f,0x03,0x8e,0xc0,0x26
+,0xc7,0x06,0x04,0x00,0xe8,0x2c,0x33,0xc0,0x8e,0xc0,0xa1,0xa7,0x36,0xa3,0xa9,0x36
+,0xa1,0xa9,0x36,0xe7,0x00,0xa1,0xab,0x36,0xe7,0x02,0xc7,0x06,0x05,0x37,0x00,0x00
+,0xc7,0x06,0x07,0x37,0x00,0x00,0xc7,0x06,0x09,0x37,0x00,0x00,0xc6,0x06,0x9d,0x36
+,0x00,0xc6,0x06,0x9e,0x36,0xff,0xc7,0x06,0x9b,0x36,0x00,0x00,0xc7,0x06,0xa3,0x36
+,0x00,0x00,0xc7,0x06,0x0f,0x37,0x00,0x00,0xc7,0x06,0xa8,0x02,0x00,0x00,0xc7,0x06
+,0x4c,0x37,0x01,0x00,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0xbb
+,0xff,0x7f,0xcd,0x53,0xe8,0x7c,0xf9,0xe5,0x56,0x0d,0x02,0x00,0xe7,0x56,0xfb,0xc3
+,0x8d,0x3e,0xc0,0x53,0x8d,0x36,0xf0,0x38,0xb9,0x0e,0x00,0x8b,0x1e,0x30,0x34,0x89
+,0x5c,0x02,0x2e,0x8b,0x45,0x02,0x89,0x44,0x06,0x2e,0x8b,0x05,0x89,0x44,0x04,0x83
+,0xc7,0x04,0x83,0xc6,0x10,0xe2,0xe8,0xb8,0x80,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04
+,0x00,0xe2,0x51,0xb8,0x7f,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04,0x00,0xb2,0x52,0x33
+,0xc0,0x8e,0xc0,0xc7,0x06,0xa1,0x36,0x01,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xc3
+,0x33,0xff,0x8e,0x06,0xa6,0x02,0x8b,0x36,0xa4,0x02,0x2e,0xff,0xa4,0xa0,0x53,0xe8
+,0x8c,0xdb,0xc3,0xe8,0x48,0xf7,0xe9,0xf6,0xff,0x8e,0x06,0x38,0x34,0xe8,0x07,0xe1
+,0x26,0xc7,0x06,0x04,0x00,0xdf,0x4f,0xcd,0x50,0xc3,0x26,0xc7,0x06,0x0a,0x00,0x00
+,0x00,0x26,0xff,0x26,0x04,0x00,0xcd,0x34,0xe9,0xd4,0xff,0xa1,0xd1,0x36,0x26,0x39
+,0x06,0x1a,0x00,0x75,0x22,0xa1,0xd3,0x36,0x26,0x39,0x06,0x1c,0x00,0x75,0x18,0xa1
+,0xd5,0x36,0x26,0x39,0x06,0x1e,0x00,0x75,0x0e,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00
+,0x74,0x05,0x83,0x0e,0x66,0x37,0x40,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1,0xaf,0x36
+,0xe7,0x06,0x83,0x3e,0xa3,0x36,0x02,0x75,0x05,0xcd,0x34,0xe9,0x56,0xfb,0x83,0x3e
+,0xa3,0x36,0x00,0x74,0xb1,0x83,0x3e,0xa3,0x36,0x05,0x77,0xaa,0x26,0xf6,0x06,0x0a
+,0x00,0xff,0x75,0xa2,0xe8,0xfd,0xdd,0x50,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xe9
+,0x8c,0x00,0x26,0xa1,0x0c,0x00,0x25,0x07,0x00,0x3d,0x07,0x00,0x75,0x03,0xe9,0x76
+,0x00,0x3d,0x05,0x00,0x75,0x03,0xe9,0x6e,0x00,0xf7,0x06,0xe6,0x34,0x18,0x80,0x75
+,0x03,0xe9,0x6a,0x00,0xf7,0x06,0xe6,0x34,0x00,0x80,0x74,0x35,0x26,0x80,0x3e,0x29
+,0x00,0x02,0x75,0x2d,0x51,0x56,0x57,0x8d,0x36,0x3e,0x34,0x8d,0x3e,0x20,0x00,0xb9
+,0x06,0x00,0xf3,0xa6,0x5f,0x5e,0x59,0x75,0x45,0x26,0xa1,0x20,0x00,0xa3,0x3e,0x34
+,0x26,0xa1,0x22,0x00,0xa3,0x40,0x34,0x26,0xa1,0x24,0x00,0xa3,0x42,0x34,0xe9,0x26
+,0x00,0xf7,0x06,0xe6,0x34,0x08,0x00,0x74,0x0b,0x26,0x80,0x3e,0x19,0x00,0x00,0x74
+,0x03,0xe9,0x13,0x00,0xf7,0x06,0xe6,0x34,0x10,0x00,0x74,0x12,0x26,0xa0,0x28,0x00
+,0xc0,0xe8,0x04,0x22,0xc0,0x74,0x07,0x26,0xc7,0x06,0x04,0x00,0xff,0xff,0x58,0x23
+,0xc0,0x74,0x03,0xe9,0xdd,0xfe,0x81,0x26,0x9b,0x36,0xff,0xfe,0x26,0xa1,0x20,0x00
+,0x3b,0x06,0xd1,0x36,0x75,0x1a,0x26,0xa1,0x22,0x00,0x3b,0x06,0xd3,0x36,0x75,0x10
+,0x26,0xa1,0x24,0x00,0x3b,0x06,0xd5,0x36,0x75,0x06,0x81,0x0e,0x9b,0x36,0x00,0x01
+,0x26,0xa1,0x20,0x00,0x25,0x7f,0xff,0xa3,0xb8,0x34,0x26,0xa1,0x22,0x00,0xa3,0xba
+,0x34,0x26,0xa1,0x24,0x00,0xa3,0xbc,0x34,0x8b,0xc6,0x86,0xc4,0xa3,0xc0,0x34,0xd1
+,0xe6,0x80,0xfc,0x09,0x74,0x03,0xe8,0xf6,0xf5,0xa1,0x05,0x37,0x0b,0x06,0x07,0x37
+,0x0b,0x06,0x09,0x37,0x74,0x3e,0x26,0xa1,0x20,0x00,0x3b,0x06,0x05,0x37,0x75,0x17
+,0x26,0xa1,0x22,0x00,0x3b,0x06,0x07,0x37,0x75,0x0d,0x26,0xa1,0x24,0x00,0x3b,0x06
+,0x09,0x37,0x75,0x03,0xe9,0x1d,0x00,0x26,0xa0,0x28,0x00,0x24,0x0f,0x3c,0x03,0x74
+,0x1b,0x3c,0x00,0x75,0x0f,0x83,0x3e,0xa3,0x36,0x04,0x74,0x10,0xf7,0x06,0x9b,0x36
+,0x00,0x01,0x74,0x08,0x2e,0xff,0x94,0xf8,0x53,0xe9,0x33,0xfe,0xcd,0x34,0xc7,0x06
+,0x3d,0x37,0x01,0x00,0xe9,0x2c,0xf8,0x83,0x3e,0xa3,0x36,0x05,0x74,0x10,0x83,0x3e
+,0xa3,0x36,0x01,0x7e,0x09,0x83,0xee,0x16,0x2e,0xff,0x94,0x24,0x54,0xc3,0xcd,0x34
+,0xc3,0x26,0xa1,0x0c,0x00,0x3d,0xff,0x7f,0x74,0x05,0x26,0xff,0x26,0x04,0x00,0xe9
+,0xfd,0xfd,0xa1,0xf4,0x33,0xa9,0x00,0x88,0x74,0x0b,0xa9,0x00,0x10,0x75,0x09,0x8b
+,0x1e,0x43,0x37,0xff,0xe3,0xe9,0x97,0x00,0xc7,0x06,0x35,0x37,0x05,0x00,0xc7,0x06
+,0x43,0x37,0x28,0x52,0xf7,0x06,0xf4,0x33,0x00,0x08,0x74,0x06,0xc7,0x06,0x43,0x37
+,0x1a,0x52,0xb8,0x80,0x03,0xcd,0x39,0xe9,0xc5,0xfd,0xa9,0x00,0x08,0x74,0xd9,0xff
+,0x0e,0x35,0x37,0x75,0xed,0xe9,0x30,0x00,0xa9,0x00,0x08,0x75,0xcb,0xff,0x0e,0x35
+,0x37,0x75,0xdf,0x81,0x0e,0xc2,0x34,0xc0,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74,0x0f
+,0x81,0x0e,0x9b,0x36,0x00,0x80,0xc7,0x06,0x0f,0x37,0x02,0x00,0xe9,0x90,0xfd,0xc7
+,0x06,0x3d,0x37,0x02,0x00,0xe9,0x8b,0xf7,0x80,0x26,0x9e,0x36,0xff,0x75,0x30,0xf6
+,0x06,0x9d,0x36,0x80,0x74,0x20,0xff,0x06,0x94,0x34,0x83,0x0e,0x66,0x37,0x20,0x8e
+,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08
+,0x00,0x00,0x01,0xe9,0x09,0x00,0xc7,0x06,0x3d,0x37,0x04,0x00,0xe9,0x54,0xf7,0x81
+,0x0e,0xaf,0x36,0x00,0x08,0xa1,0xaf,0x36,0xe7,0x06,0xe5,0x0a,0xa9,0x00,0x80,0x74
+,0x0e,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36,0xe7,0x06,0xe9,0x49,0xff,0xe9
+,0x2d,0xfd,0xc7,0x06,0x41,0x37,0x00,0x00,0xbe,0x29,0x00,0xe8,0x2b,0xfd,0xe9,0x1e
+,0xfd,0xcd,0x34,0x83,0x3e,0xa3,0x36,0x04,0x77,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00
+,0xe9,0x10,0xf7,0xe9,0x09,0xfd,0xcd,0x34,0xc3,0xc7,0x06,0x9b,0x36,0x00,0x00,0xe8
+,0x0c,0xf5,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0x81,0x26,0x9b
+,0x36,0xff,0x7f,0xe5,0x02,0x0d,0x01,0x00,0x25,0xef,0xff,0x25,0xff,0xdf,0xe7,0x02
+,0xbb,0xff,0x7f,0xcd,0x53,0x33,0xc0,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xe8,0x20,0xf3
+,0xe8,0x43,0xf3,0x83,0x0e,0x9b,0x36,0x10,0xc7,0x06,0x99,0x36,0x00,0x00,0xe8,0xd2
+,0xf5,0xe5,0x56,0x0d,0x02,0x00,0xe7,0x56,0xc7,0x06,0xa8,0x02,0x00,0x00,0xbe,0x00
+,0x00,0xe8,0x30,0xf5,0xc6,0x06,0xa0,0x36,0x0e,0xb8,0x9c,0x03,0xcd,0x39,0xb8,0x80
+,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff,0xc7,0x06,0xa1,0x36,0x01,0x00,0xe9
+,0xa5,0xf6,0x06,0xb8,0x8f,0x03,0xcd,0x3a,0xb8,0x90,0x03,0xcd,0x3a,0xb8,0x91,0x03
+,0xcd,0x3a,0xb8,0x92,0x03,0xcd,0x3a,0xb8,0x93,0x03,0xcd,0x3a,0xb8,0x94,0x03,0xcd
+,0x3a,0xb8,0x95,0x03,0xcd,0x3a,0xb8,0x96,0x03,0xcd,0x3a,0xb8,0x97,0x03,0xcd,0x3a
+,0xb8,0x98,0x03,0xcd,0x3a,0xb8,0x99,0x03,0xcd,0x3a,0xb8,0x9a,0x03,0xcd,0x3a,0xb8
+,0x9b,0x03,0xcd,0x3a,0xb8,0x7f,0x03,0xcd,0x3a,0xb8,0x80,0x03,0xcd,0x3a,0x07,0xc3
+,0xf7,0x49,0xf1,0x4e,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xf8,0x51,0xdf,0x4f
+,0xfa,0x4f,0x0b,0x50,0xd1,0x51,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f
+,0xe4,0x4e,0x06,0x00,0xcd,0x4a,0x04,0x00,0xe4,0x4e,0x19,0x00,0xad,0x4b,0xfa,0x00
+,0x82,0x4c,0x08,0x07,0x09,0x4c,0x14,0x00,0x24,0x4e,0x64,0x00,0xd7,0x4d,0xf4,0x01
+,0x64,0x4e,0xbc,0x02,0x7a,0x4e,0xe8,0x03,0x43,0x4e,0x02,0x00,0xb3,0x4e,0xf4,0x01
+,0x5b,0x4e,0xf4,0x01,0xe5,0x4e,0x14,0x00,0x06,0x50,0x06,0x50,0x95,0x4c,0xc1,0x52
+,0xc1,0x52,0xfe,0x4c,0xda,0x4c,0x06,0x50,0x06,0x50,0x06,0x50,0x06,0x50,0xb7,0x51
+,0xb7,0x51,0xb7,0x51,0xb7,0x51,0xb7,0x51,0xb7,0x51,0x06,0x50,0xd5,0x4a,0x06,0x50
+,0x1d,0x4c,0x06,0x50,0x83,0x4d,0x1f,0x4d,0x1f,0x4d,0xed,0x40,0xfa,0x40,0x07,0x41
+,0x37,0x37,0x2e,0x37,0x37,0x20,0x20,0x79,0x79,0x2f,0x79,0x79,0x2f,0x79,0x79,0x20
+,0x30,0x31,0x2e,0x39,0x30,0x20,0x20,0x30,0x32,0x2f,0x31,0x37,0x2f,0x39,0x39,0x20
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+,0x90,0xea,0xc0,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0x06
+} ;
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
new file mode 100644
index 000000000000..23d0fa4bbceb
--- /dev/null
+++ b/drivers/net/tokenring/Kconfig
@@ -0,0 +1,186 @@
+#
+# Token Ring driver configuration
+#
+
+menu "Token Ring devices"
+ depends on NETDEVICES
+
+# So far, we only have PCI, ISA, and MCA token ring devices
+config TR
+ bool "Token Ring driver support"
+ depends on (PCI || ISA || MCA || CCW)
+ select LLC
+ help
+ Token Ring is IBM's way of communication on a local network; the
+ rest of the world uses Ethernet. To participate on a Token Ring
+ network, you need a special Token ring network card. If you are
+ connected to such a Token Ring network and want to use your Token
+ Ring card under Linux, say Y here and to the driver for your
+ particular card below and read the Token-Ring mini-HOWTO, available
+ from <http://www.tldp.org/docs.html#howto>. Most people can
+ say N here.
+
+config IBMTR
+ tristate "IBM Tropic chipset based adapter support"
+ depends on TR && (ISA || MCA)
+ ---help---
+ This is support for all IBM Token Ring cards that don't use DMA. If
+ you have such a beast, say Y and read the Token-Ring mini-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ Warning: this driver will almost definitely fail if more than one
+ active Token Ring card is present.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ibmtr.
+
+config IBMOL
+ tristate "IBM Olympic chipset PCI adapter support"
+ depends on TR && PCI
+ ---help---
+ This is support for all non-Lanstreamer IBM PCI Token Ring Cards.
+ Specifically this is all IBM PCI, PCI Wake On Lan, PCI II, PCI II
+ Wake On Lan, and PCI 100/16/4 adapters.
+
+ If you have such an adapter, say Y and read the Token-Ring
+ mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module will be
+ called olympic.
+
+ Also read <file:Documentation/networking/olympic.txt> or check the
+ Linux Token Ring Project site for the latest information at
+ <http://www.linuxtr.net/>.
+
+config IBMLS
+ tristate "IBM Lanstreamer chipset PCI adapter support"
+ depends on TR && PCI && !64BIT
+ help
+ This is support for IBM Lanstreamer PCI Token Ring Cards.
+
+ If you have such an adapter, say Y and read the Token-Ring
+ mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module will be
+ called lanstreamer.
+
+config 3C359
+ tristate "3Com 3C359 Token Link Velocity XL adapter support"
+ depends on TR && PCI
+ ---help---
+ This is support for the 3Com PCI Velocity XL cards, specifically
+ the 3Com 3C359, please note this is not for the 3C339 cards, you
+ should use the tms380 driver instead.
+
+ If you have such an adapter, say Y and read the Token-Ring
+ mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 3c359.
+
+ Also read the file <file:Documentation/networking/3c359.txt> or check the
+ Linux Token Ring Project site for the latest information at
+ <http://www.linuxtr.net>
+
+config TMS380TR
+ tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
+ depends on TR && (PCI || ISA)
+ select FW_LOADER
+ ---help---
+ This driver provides generic support for token ring adapters
+ based on the Texas Instruments TMS380 series chipsets. This
+ includes the SysKonnect TR4/16(+) ISA (SK-4190), SysKonnect
+ TR4/16(+) PCI (SK-4590), SysKonnect TR4/16 PCI (SK-4591),
+ Compaq 4/16 PCI, Thomas-Conrad TC4048 4/16 PCI, and several
+ Madge adapters. If you say Y here, you will be asked to select
+ which cards to support below. If you're using modules, each
+ class of card will be supported by a separate module.
+
+ If you have such an adapter and would like to use it, say Y and
+ read the Token-Ring mini-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Also read the file <file:Documentation/networking/tms380tr.txt> or
+ check <http://www.auk.cx/tms380tr/>.
+
+ To compile this driver as a module, choose M here: the module will be
+ called tms380tr.
+
+config TMSPCI
+ tristate "Generic TMS380 PCI support"
+ depends on TR && TMS380TR && PCI
+ ---help---
+ This tms380 module supports generic TMS380-based PCI cards.
+
+ These cards are known to work:
+ - Compaq 4/16 TR PCI
+ - SysKonnect TR4/16 PCI (SK-4590/SK-4591)
+ - Thomas-Conrad TC4048 PCI 4/16
+ - 3Com Token Link Velocity
+
+ To compile this driver as a module, choose M here: the module will be
+ called tmspci.
+
+config SKISA
+ tristate "SysKonnect TR4/16 ISA support"
+ depends on TR && TMS380TR && ISA
+ help
+ This tms380 module supports SysKonnect TR4/16 ISA cards.
+
+ These cards are known to work:
+ - SysKonnect TR4/16 ISA (SK-4190)
+
+ To compile this driver as a module, choose M here: the module will be
+ called skisa.
+
+config PROTEON
+ tristate "Proteon ISA support"
+ depends on TR && TMS380TR && ISA
+ help
+ This tms380 module supports Proteon ISA cards.
+
+ These cards are known to work:
+ - Proteon 1392
+ - Proteon 1392 plus
+
+ To compile this driver as a module, choose M here: the module will be
+ called proteon.
+
+config ABYSS
+ tristate "Madge Smart 16/4 PCI Mk2 support"
+ depends on TR && TMS380TR && PCI
+ help
+ This tms380 module supports the Madge Smart 16/4 PCI Mk2
+ cards (51-02).
+
+ To compile this driver as a module, choose M here: the module will be
+ called abyss.
+
+config MADGEMC
+ tristate "Madge Smart 16/4 Ringnode MicroChannel"
+ depends on TR && TMS380TR && MCA_LEGACY
+ help
+ This tms380 module supports the Madge Smart 16/4 MC16 and MC32
+ MicroChannel adapters.
+
+ To compile this driver as a module, choose M here: the module will be
+ called madgemc.
+
+config SMCTR
+ tristate "SMC ISA/MCA adapter support"
+ depends on TR && (ISA || MCA_LEGACY) && (BROKEN || !64BIT)
+ ---help---
+ This is support for the ISA and MCA SMC Token Ring cards,
+ specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A
+ (8115T/A) adapters.
+
+ If you have such an adapter and would like to use it, say Y or M and
+ read the Token-Ring mini-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> and the file
+ <file:Documentation/networking/smctr.txt>.
+
+ To compile this driver as a module, choose M here: the module will be
+ called smctr.
+
+endmenu
+
diff --git a/drivers/net/tokenring/Makefile b/drivers/net/tokenring/Makefile
new file mode 100644
index 000000000000..c88b0a5e5380
--- /dev/null
+++ b/drivers/net/tokenring/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for drivers/net/tokenring
+#
+
+obj-$(CONFIG_IBMTR) += ibmtr.o
+obj-$(CONFIG_IBMOL) += olympic.o
+obj-$(CONFIG_IBMLS) += lanstreamer.o
+obj-$(CONFIG_TMS380TR) += tms380tr.o
+obj-$(CONFIG_ABYSS) += abyss.o
+obj-$(CONFIG_MADGEMC) += madgemc.o
+obj-$(CONFIG_PROTEON) += proteon.o
+obj-$(CONFIG_TMSPCI) += tmspci.o
+obj-$(CONFIG_SKISA) += skisa.o
+obj-$(CONFIG_SMCTR) += smctr.o
+obj-$(CONFIG_3C359) += 3c359.o
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
new file mode 100644
index 000000000000..bd4a2bccf867
--- /dev/null
+++ b/drivers/net/tokenring/abyss.c
@@ -0,0 +1,481 @@
+/*
+ * abyss.c: Network driver for the Madge Smart 16/4 PCI Mk2 token ring card.
+ *
+ * Written 1999-2000 by Adam Fritzler
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver module supports the following cards:
+ * - Madge Smart 16/4 PCI Mk2
+ *
+ * Maintainer(s):
+ * AF Adam Fritzler mid@auk.cx
+ *
+ * Modification History:
+ * 30-Dec-99 AF Split off from the tms380tr driver.
+ * 22-Jan-00 AF Updated to use indirect read/writes
+ * 23-Nov-00 JG New PCI API, cleanups
+ *
+ *
+ * TODO:
+ * 1. See if we can use MMIO instead of inb/outb/inw/outw
+ * 2. Add support for Mk1 (has AT24 attached to the PCI
+ * config registers)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "tms380tr.h"
+#include "abyss.h" /* Madge-specific constants */
+
+static char version[] __devinitdata =
+"abyss.c: v1.02 23/11/2000 by Adam Fritzler\n";
+
+#define ABYSS_IO_EXTENT 64
+
+static struct pci_device_id abyss_pci_tbl[] = {
+ { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, abyss_pci_tbl);
+
+MODULE_LICENSE("GPL");
+
+static int abyss_open(struct net_device *dev);
+static int abyss_close(struct net_device *dev);
+static void abyss_enable(struct net_device *dev);
+static int abyss_chipset_init(struct net_device *dev);
+static void abyss_read_eeprom(struct net_device *dev);
+static unsigned short abyss_setnselout_pins(struct net_device *dev);
+
+static void at24_writedatabyte(unsigned long regaddr, unsigned char byte);
+static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr);
+static int at24_sendcmd(unsigned long regaddr, unsigned char cmd);
+static unsigned char at24_readdatabit(unsigned long regaddr);
+static unsigned char at24_readdatabyte(unsigned long regaddr);
+static int at24_waitforack(unsigned long regaddr);
+static int at24_waitfornack(unsigned long regaddr);
+static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data);
+static void at24_start(unsigned long regaddr);
+static unsigned char at24_readb(unsigned long regaddr, unsigned char addr);
+
+static unsigned short abyss_sifreadb(struct net_device *dev, unsigned short reg)
+{
+ return inb(dev->base_addr + reg);
+}
+
+static unsigned short abyss_sifreadw(struct net_device *dev, unsigned short reg)
+{
+ return inw(dev->base_addr + reg);
+}
+
+static void abyss_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outb(val, dev->base_addr + reg);
+}
+
+static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outw(val, dev->base_addr + reg);
+}
+
+static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int versionprinted;
+ struct net_device *dev;
+ struct net_local *tp;
+ int i, ret, pci_irq_line;
+ unsigned long pci_ioaddr;
+
+ if (versionprinted++ == 0)
+ printk("%s", version);
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+
+ /* Remove I/O space marker in bit 0. */
+ pci_irq_line = pdev->irq;
+ pci_ioaddr = pci_resource_start (pdev, 0);
+
+ /* At this point we have found a valid card. */
+
+ dev = alloc_trdev(sizeof(struct net_local));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+
+ if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) {
+ ret = -EBUSY;
+ goto err_out_trdev;
+ }
+
+ ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (ret)
+ goto err_out_region;
+
+ dev->base_addr = pci_ioaddr;
+ dev->irq = pci_irq_line;
+
+ printk("%s: Madge Smart 16/4 PCI Mk2 (Abyss)\n", dev->name);
+ printk("%s: IO: %#4lx IRQ: %d\n",
+ dev->name, pci_ioaddr, dev->irq);
+ /*
+ * The TMS SIF registers lay 0x10 above the card base address.
+ */
+ dev->base_addr += 0x10;
+
+ ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
+ if (ret) {
+ printk("%s: unable to get memory for dev->priv.\n",
+ dev->name);
+ goto err_out_irq;
+ }
+
+ abyss_read_eeprom(dev);
+
+ printk("%s: Ring Station Address: ", dev->name);
+ printk("%2.2x", dev->dev_addr[0]);
+ for (i = 1; i < 6; i++)
+ printk(":%2.2x", dev->dev_addr[i]);
+ printk("\n");
+
+ tp = netdev_priv(dev);
+ tp->setnselout = abyss_setnselout_pins;
+ tp->sifreadb = abyss_sifreadb;
+ tp->sifreadw = abyss_sifreadw;
+ tp->sifwriteb = abyss_sifwriteb;
+ tp->sifwritew = abyss_sifwritew;
+
+ memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
+
+ dev->open = abyss_open;
+ dev->stop = abyss_close;
+
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto err_out_tmsdev;
+ return 0;
+
+err_out_tmsdev:
+ pci_set_drvdata(pdev, NULL);
+ tmsdev_term(dev);
+err_out_irq:
+ free_irq(pdev->irq, dev);
+err_out_region:
+ release_region(pci_ioaddr, ABYSS_IO_EXTENT);
+err_out_trdev:
+ free_netdev(dev);
+ return ret;
+}
+
+static unsigned short abyss_setnselout_pins(struct net_device *dev)
+{
+ unsigned short val = 0;
+ struct net_local *tp = netdev_priv(dev);
+
+ if(tp->DataRate == SPEED_4)
+ val |= 0x01; /* Set 4Mbps */
+ else
+ val |= 0x00; /* Set 16Mbps */
+
+ return val;
+}
+
+/*
+ * The following Madge boards should use this code:
+ * - Smart 16/4 PCI Mk2 (Abyss)
+ * - Smart 16/4 PCI Mk1 (PCI T)
+ * - Smart 16/4 Client Plus PnP (Big Apple)
+ * - Smart 16/4 Cardbus Mk2
+ *
+ * These access an Atmel AT24 SEEPROM using their glue chip registers.
+ *
+ */
+static void at24_writedatabyte(unsigned long regaddr, unsigned char byte)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
+ at24_setlines(regaddr, 1, (byte >> (7-i))&0x01);
+ at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
+ }
+}
+
+static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr)
+{
+ if (at24_sendcmd(regaddr, cmd)) {
+ at24_writedatabyte(regaddr, addr);
+ return at24_waitforack(regaddr);
+ }
+ return 0;
+}
+
+static int at24_sendcmd(unsigned long regaddr, unsigned char cmd)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ at24_start(regaddr);
+ at24_writedatabyte(regaddr, cmd);
+ if (at24_waitforack(regaddr))
+ return 1;
+ }
+ return 0;
+}
+
+static unsigned char at24_readdatabit(unsigned long regaddr)
+{
+ unsigned char val;
+
+ at24_setlines(regaddr, 0, 1);
+ at24_setlines(regaddr, 1, 1);
+ val = (inb(regaddr) & AT24_DATA)?1:0;
+ at24_setlines(regaddr, 1, 1);
+ at24_setlines(regaddr, 0, 1);
+ return val;
+}
+
+static unsigned char at24_readdatabyte(unsigned long regaddr)
+{
+ unsigned char data = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ data <<= 1;
+ data |= at24_readdatabit(regaddr);
+ }
+
+ return data;
+}
+
+static int at24_waitforack(unsigned long regaddr)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if ((at24_readdatabit(regaddr) & 0x01) == 0x00)
+ return 1;
+ }
+ return 0;
+}
+
+static int at24_waitfornack(unsigned long regaddr)
+{
+ int i;
+ for (i = 0; i < 10; i++) {
+ if ((at24_readdatabit(regaddr) & 0x01) == 0x01)
+ return 1;
+ }
+ return 0;
+}
+
+static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data)
+{
+ unsigned char val = AT24_ENABLE;
+ if (clock)
+ val |= AT24_CLOCK;
+ if (data)
+ val |= AT24_DATA;
+
+ outb(val, regaddr);
+ tms380tr_wait(20); /* Very necessary. */
+}
+
+static void at24_start(unsigned long regaddr)
+{
+ at24_setlines(regaddr, 0, 1);
+ at24_setlines(regaddr, 1, 1);
+ at24_setlines(regaddr, 1, 0);
+ at24_setlines(regaddr, 0, 1);
+}
+
+static unsigned char at24_readb(unsigned long regaddr, unsigned char addr)
+{
+ unsigned char data = 0xff;
+
+ if (at24_sendfullcmd(regaddr, AT24_WRITE, addr)) {
+ if (at24_sendcmd(regaddr, AT24_READ)) {
+ data = at24_readdatabyte(regaddr);
+ if (!at24_waitfornack(regaddr))
+ data = 0xff;
+ }
+ }
+ return data;
+}
+
+
+/*
+ * Enable basic functions of the Madge chipset needed
+ * for initialization.
+ */
+static void abyss_enable(struct net_device *dev)
+{
+ unsigned char reset_reg;
+ unsigned long ioaddr;
+
+ ioaddr = dev->base_addr;
+ reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
+ reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
+ outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
+ tms380tr_wait(100);
+}
+
+/*
+ * Enable the functions of the Madge chipset needed for
+ * full working order.
+ */
+static int abyss_chipset_init(struct net_device *dev)
+{
+ unsigned char reset_reg;
+ unsigned long ioaddr;
+
+ ioaddr = dev->base_addr;
+
+ reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
+
+ reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
+ outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
+
+ reset_reg &= ~(PCIBM2_RESET_REG_CHIP_NRES |
+ PCIBM2_RESET_REG_FIFO_NRES |
+ PCIBM2_RESET_REG_SIF_NRES);
+ outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
+
+ tms380tr_wait(100);
+
+ reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
+ outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
+
+ reset_reg |= PCIBM2_RESET_REG_SIF_NRES;
+ outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
+
+ reset_reg |= PCIBM2_RESET_REG_FIFO_NRES;
+ outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
+
+ outb(PCIBM2_INT_CONTROL_REG_SINTEN |
+ PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE,
+ ioaddr + PCIBM2_INT_CONTROL_REG);
+
+ outb(30, ioaddr + PCIBM2_FIFO_THRESHOLD);
+
+ return 0;
+}
+
+static inline void abyss_chipset_close(struct net_device *dev)
+{
+ unsigned long ioaddr;
+
+ ioaddr = dev->base_addr;
+ outb(0, ioaddr + PCIBM2_RESET_REG);
+}
+
+/*
+ * Read configuration data from the AT24 SEEPROM on Madge cards.
+ *
+ */
+static void abyss_read_eeprom(struct net_device *dev)
+{
+ struct net_local *tp;
+ unsigned long ioaddr;
+ unsigned short val;
+ int i;
+
+ tp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ /* Must enable glue chip first */
+ abyss_enable(dev);
+
+ val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
+ PCIBM2_SEEPROM_RING_SPEED);
+ tp->DataRate = val?SPEED_4:SPEED_16; /* set open speed */
+ printk("%s: SEEPROM: ring speed: %dMb/sec\n", dev->name, tp->DataRate);
+
+ val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
+ PCIBM2_SEEPROM_RAM_SIZE) * 128;
+ printk("%s: SEEPROM: adapter RAM: %dkb\n", dev->name, val);
+
+ dev->addr_len = 6;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
+ PCIBM2_SEEPROM_BIA+i);
+}
+
+static int abyss_open(struct net_device *dev)
+{
+ abyss_chipset_init(dev);
+ tms380tr_open(dev);
+ return 0;
+}
+
+static int abyss_close(struct net_device *dev)
+{
+ tms380tr_close(dev);
+ abyss_chipset_close(dev);
+ return 0;
+}
+
+static void __devexit abyss_detach (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ BUG();
+ unregister_netdev(dev);
+ release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
+ free_irq(dev->irq, dev);
+ tmsdev_term(dev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver abyss_driver = {
+ .name = "abyss",
+ .id_table = abyss_pci_tbl,
+ .probe = abyss_attach,
+ .remove = __devexit_p(abyss_detach),
+};
+
+static int __init abyss_init (void)
+{
+ return pci_register_driver(&abyss_driver);
+}
+
+static void __exit abyss_rmmod (void)
+{
+ pci_unregister_driver (&abyss_driver);
+}
+
+module_init(abyss_init);
+module_exit(abyss_rmmod);
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c abyss.c"
+ * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c abyss.c"
+ * c-set-style "K&R"
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/tokenring/abyss.h b/drivers/net/tokenring/abyss.h
new file mode 100644
index 000000000000..0ee6e4f085b1
--- /dev/null
+++ b/drivers/net/tokenring/abyss.h
@@ -0,0 +1,58 @@
+/*
+ * abyss.h: Header for the abyss tms380tr module
+ *
+ * Authors:
+ * - Adam Fritzler <mid@auk.cx>
+ */
+
+#ifndef __LINUX_MADGETR_H
+#define __LINUX_MADGETR_H
+
+#ifdef __KERNEL__
+
+/*
+ * For Madge Smart 16/4 PCI Mk2. Since we increment the base address
+ * to get everything correct for the TMS SIF, we do these as negatives
+ * as they fall below the SIF in addressing.
+ */
+#define PCIBM2_INT_STATUS_REG ((short)-15)/* 0x01 */
+#define PCIBM2_INT_CONTROL_REG ((short)-14)/* 0x02 */
+#define PCIBM2_RESET_REG ((short)-12)/* 0x04 */
+#define PCIBM2_SEEPROM_REG ((short)-9) /* 0x07 */
+
+#define PCIBM2_INT_CONTROL_REG_SINTEN 0x02
+#define PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE 0x80
+#define PCIBM2_INT_STATUS_REG_PCI_ERR 0x80
+
+#define PCIBM2_RESET_REG_CHIP_NRES 0x01
+#define PCIBM2_RESET_REG_FIFO_NRES 0x02
+#define PCIBM2_RESET_REG_SIF_NRES 0x04
+
+#define PCIBM2_FIFO_THRESHOLD 0x21
+#define PCIBM2_BURST_LENGTH 0x22
+
+/*
+ * Bits in PCIBM2_SEEPROM_REG.
+ */
+#define AT24_ENABLE 0x04
+#define AT24_DATA 0x02
+#define AT24_CLOCK 0x01
+
+/*
+ * AT24 Commands.
+ */
+#define AT24_WRITE 0xA0
+#define AT24_READ 0xA1
+
+/*
+ * Addresses in AT24 SEEPROM.
+ */
+#define PCIBM2_SEEPROM_BIA 0x12
+#define PCIBM2_SEEPROM_RING_SPEED 0x18
+#define PCIBM2_SEEPROM_RAM_SIZE 0x1A
+#define PCIBM2_SEEPROM_HWF1 0x1C
+#define PCIBM2_SEEPROM_HWF2 0x1E
+
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_MADGETR_H */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
new file mode 100644
index 000000000000..c098863bdd9d
--- /dev/null
+++ b/drivers/net/tokenring/ibmtr.c
@@ -0,0 +1,1987 @@
+/* ibmtr.c: A shared-memory IBM Token Ring 16/4 driver for linux
+ *
+ * Written 1993 by Mark Swanson and Peter De Schrijver.
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This device driver should work with Any IBM Token Ring Card that does
+ * not use DMA.
+ *
+ * I used Donald Becker's (becker@scyld.com) device driver work
+ * as a base for most of my initial work.
+ *
+ * Changes by Peter De Schrijver
+ * (Peter.Deschrijver@linux.cc.kuleuven.ac.be) :
+ *
+ * + changed name to ibmtr.c in anticipation of other tr boards.
+ * + changed reset code and adapter open code.
+ * + added SAP open code.
+ * + a first attempt to write interrupt, transmit and receive routines.
+ *
+ * Changes by David W. Morris (dwm@shell.portal.com) :
+ * 941003 dwm: - Restructure tok_probe for multiple adapters, devices.
+ * + Add comments, misc reorg for clarity.
+ * + Flatten interrupt handler levels.
+ *
+ * Changes by Farzad Farid (farzy@zen.via.ecp.fr)
+ * and Pascal Andre (andre@chimay.via.ecp.fr) (March 9 1995) :
+ * + multi ring support clean up.
+ * + RFC1042 compliance enhanced.
+ *
+ * Changes by Pascal Andre (andre@chimay.via.ecp.fr) (September 7 1995) :
+ * + bug correction in tr_tx
+ * + removed redundant information display
+ * + some code reworking
+ *
+ * Changes by Michel Lespinasse (walken@via.ecp.fr),
+ * Yann Doussot (doussot@via.ecp.fr) and Pascal Andre (andre@via.ecp.fr)
+ * (February 18, 1996) :
+ * + modified shared memory and mmio access port the driver to
+ * alpha platform (structure access -> readb/writeb)
+ *
+ * Changes by Steve Kipisz (bungy@ibm.net or kipisz@vnet.ibm.com)
+ * (January 18 1996):
+ * + swapped WWOR and WWCR in ibmtr.h
+ * + moved some init code from tok_probe into trdev_init. The
+ * PCMCIA code can call trdev_init to complete initializing
+ * the driver.
+ * + added -DPCMCIA to support PCMCIA
+ * + detecting PCMCIA Card Removal in interrupt handler. If
+ * ISRP is FF, then a PCMCIA card has been removed
+ * 10/2000 Burt needed a new method to avoid crashing the OS
+ *
+ * Changes by Paul Norton (pnorton@cts.com) :
+ * + restructured the READ.LOG logic to prevent the transmit SRB
+ * from being rudely overwritten before the transmit cycle is
+ * complete. (August 15 1996)
+ * + completed multiple adapter support. (November 20 1996)
+ * + implemented csum_partial_copy in tr_rx and increased receive
+ * buffer size and count. Minor fixes. (March 15, 1997)
+ *
+ * Changes by Christopher Turcksin <wabbit@rtfc.demon.co.uk>
+ * + Now compiles ok as a module again.
+ *
+ * Changes by Paul Norton (pnorton@ieee.org) :
+ * + moved the header manipulation code in tr_tx and tr_rx to
+ * net/802/tr.c. (July 12 1997)
+ * + add retry and timeout on open if cable disconnected. (May 5 1998)
+ * + lifted 2000 byte mtu limit. now depends on shared-RAM size.
+ * May 25 1998)
+ * + can't allocate 2k recv buff at 8k shared-RAM. (20 October 1998)
+ *
+ * Changes by Joel Sloan (jjs@c-me.com) :
+ * + disable verbose debug messages by default - to enable verbose
+ * debugging, edit the IBMTR_DEBUG_MESSAGES define below
+ *
+ * Changes by Mike Phillips <phillim@amtrak.com> :
+ * + Added extra #ifdef's to work with new PCMCIA Token Ring Code.
+ * The PCMCIA code now just sets up the card so it can be recognized
+ * by ibmtr_probe. Also checks allocated memory vs. on-board memory
+ * for correct figure to use.
+ *
+ * Changes by Tim Hockin (thockin@isunix.it.ilstu.edu) :
+ * + added spinlocks for SMP sanity (10 March 1999)
+ *
+ * Changes by Jochen Friedrich to enable RFC1469 Option 2 multicasting
+ * i.e. using functional address C0 00 00 04 00 00 to transmit and
+ * receive multicast packets.
+ *
+ * Changes by Mike Sullivan (based on original sram patch by Dave Grothe
+ * to support windowing into on adapter shared ram.
+ * i.e. Use LANAID to setup a PnP configuration with 16K RAM. Paging
+ * will shift this 16K window over the entire available shared RAM.
+ *
+ * Changes by Peter De Schrijver (p2@mind.be) :
+ * + fixed a problem with PCMCIA card removal
+ *
+ * Change by Mike Sullivan et al.:
+ * + added turbo card support. No need to use lanaid to configure
+ * the adapter into isa compatiblity mode.
+ *
+ * Changes by Burt Silverman to allow the computer to behave nicely when
+ * a cable is pulled or not in place, or a PCMCIA card is removed hot.
+ */
+
+/* change the define of IBMTR_DEBUG_MESSAGES to a nonzero value
+in the event that chatty debug messages are desired - jjs 12/30/98 */
+
+#define IBMTR_DEBUG_MESSAGES 0
+
+#include <linux/module.h>
+
+#ifdef PCMCIA /* required for ibmtr_cs.c to build */
+#undef MODULE /* yes, really */
+#undef ENABLE_PAGING
+#else
+#define ENABLE_PAGING 1
+#endif
+
+#define FALSE 0
+#define TRUE (!FALSE)
+
+/* changes the output format of driver initialization */
+#define TR_VERBOSE 0
+
+/* some 95 OS send many non UI frame; this allow removing the warning */
+#define TR_FILTERNONUI 1
+
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/trdevice.h>
+#include <linux/ibmtr.h>
+
+#include <net/checksum.h>
+
+#include <asm/io.h>
+
+#define DPRINTK(format, args...) printk("%s: " format, dev->name , ## args)
+#define DPRINTD(format, args...) DummyCall("%s: " format, dev->name , ## args)
+
+/* version and credits */
+#ifndef PCMCIA
+static char version[] __initdata =
+ "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
+ " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
+ " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
+ " v2.2.1 02/08/00 Mike Sullivan <sullivam@us.ibm.com>\n"
+ " v2.2.2 07/27/00 Burt Silverman <burts@us.ibm.com>\n"
+ " v2.4.0 03/01/01 Mike Sullivan <sullivan@us.ibm.com>\n";
+#endif
+
+/* this allows displaying full adapter information */
+
+char *channel_def[] __devinitdata = { "ISA", "MCA", "ISA P&P" };
+
+static char pcchannelid[] __devinitdata = {
+ 0x05, 0x00, 0x04, 0x09,
+ 0x04, 0x03, 0x04, 0x0f,
+ 0x03, 0x06, 0x03, 0x01,
+ 0x03, 0x01, 0x03, 0x00,
+ 0x03, 0x09, 0x03, 0x09,
+ 0x03, 0x00, 0x02, 0x00
+};
+
+static char mcchannelid[] __devinitdata = {
+ 0x04, 0x0d, 0x04, 0x01,
+ 0x05, 0x02, 0x05, 0x03,
+ 0x03, 0x06, 0x03, 0x03,
+ 0x05, 0x08, 0x03, 0x04,
+ 0x03, 0x05, 0x03, 0x01,
+ 0x03, 0x08, 0x02, 0x00
+};
+
+char __devinit *adapter_def(char type)
+{
+ switch (type) {
+ case 0xF: return "PC Adapter | PC Adapter II | Adapter/A";
+ case 0xE: return "16/4 Adapter | 16/4 Adapter/A (long)";
+ case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter";
+ case 0xC: return "Auto 16/4 Adapter";
+ default: return "adapter (unknown type)";
+ };
+};
+
+#define TRC_INIT 0x01 /* Trace initialization & PROBEs */
+#define TRC_INITV 0x02 /* verbose init trace points */
+unsigned char ibmtr_debug_trace = 0;
+
+static int ibmtr_probe(struct net_device *dev);
+static int ibmtr_probe1(struct net_device *dev, int ioaddr);
+static unsigned char get_sram_size(struct tok_info *adapt_info);
+static int trdev_init(struct net_device *dev);
+static int tok_open(struct net_device *dev);
+static int tok_init_card(struct net_device *dev);
+void tok_open_adapter(unsigned long dev_addr);
+static void open_sap(unsigned char type, struct net_device *dev);
+static void tok_set_multicast_list(struct net_device *dev);
+static int tok_send_packet(struct sk_buff *skb, struct net_device *dev);
+static int tok_close(struct net_device *dev);
+irqreturn_t tok_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void initial_tok_int(struct net_device *dev);
+static void tr_tx(struct net_device *dev);
+static void tr_rx(struct net_device *dev);
+void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
+static void tok_rerun(unsigned long dev_addr);
+void ibmtr_readlog(struct net_device *dev);
+static struct net_device_stats *tok_get_stats(struct net_device *dev);
+int ibmtr_change_mtu(struct net_device *dev, int mtu);
+static void find_turbo_adapters(int *iolist);
+
+static int ibmtr_portlist[IBMTR_MAX_ADAPTERS+1] __devinitdata = {
+ 0xa20, 0xa24, 0, 0, 0
+};
+static int __devinitdata turbo_io[IBMTR_MAX_ADAPTERS] = {0};
+static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
+static int __devinitdata turbo_searched = 0;
+
+#ifndef PCMCIA
+static __u32 ibmtr_mem_base __initdata = 0xd0000;
+#endif
+
+static void __devinit PrtChanID(char *pcid, short stride)
+{
+ short i, j;
+ for (i = 0, j = 0; i < 24; i++, j += stride)
+ printk("%1x", ((int) pcid[j]) & 0x0f);
+ printk("\n");
+}
+
+static void __devinit HWPrtChanID(void __iomem *pcid, short stride)
+{
+ short i, j;
+ for (i = 0, j = 0; i < 24; i++, j += stride)
+ printk("%1x", ((int) readb(pcid + j)) & 0x0f);
+ printk("\n");
+}
+
+/* We have to ioremap every checked address, because isa_readb is
+ * going away.
+ */
+
+static void __devinit find_turbo_adapters(int *iolist)
+{
+ int ram_addr;
+ int index=0;
+ void __iomem *chanid;
+ int found_turbo=0;
+ unsigned char *tchanid, ctemp;
+ int i, j;
+ unsigned long jif;
+ void __iomem *ram_mapped ;
+
+ if (turbo_searched == 1) return;
+ turbo_searched=1;
+ for (ram_addr=0xC0000; ram_addr < 0xE0000; ram_addr+=0x2000) {
+
+ __u32 intf_tbl=0;
+
+ found_turbo=1;
+ ram_mapped = ioremap((u32)ram_addr,0x1fff) ;
+ if (ram_mapped==NULL)
+ continue ;
+ chanid=(CHANNEL_ID + ram_mapped);
+ tchanid=pcchannelid;
+ ctemp=readb(chanid) & 0x0f;
+ if (ctemp != *tchanid) continue;
+ for (i=2,j=1; i<=46; i=i+2,j++) {
+ if ((readb(chanid+i) & 0x0f) != tchanid[j]){
+ found_turbo=0;
+ break;
+ }
+ }
+ if (!found_turbo) continue;
+
+ writeb(0x90, ram_mapped+0x1E01);
+ for(i=2; i<0x0f; i++) {
+ writeb(0x00, ram_mapped+0x1E01+i);
+ }
+ writeb(0x00, ram_mapped+0x1E01);
+ for(jif=jiffies+TR_BUSY_INTERVAL; time_before_eq(jiffies,jif););
+ intf_tbl=ntohs(readw(ram_mapped+ACA_OFFSET+ACA_RW+WRBR_EVEN));
+ if (intf_tbl) {
+#if IBMTR_DEBUG_MESSAGES
+ printk("ibmtr::find_turbo_adapters, Turbo found at "
+ "ram_addr %x\n",ram_addr);
+ printk("ibmtr::find_turbo_adapters, interface_table ");
+ for(i=0; i<6; i++) {
+ printk("%x:",readb(ram_addr+intf_tbl+i));
+ }
+ printk("\n");
+#endif
+ turbo_io[index]=ntohs(readw(ram_mapped+intf_tbl+4));
+ turbo_irq[index]=readb(ram_mapped+intf_tbl+3);
+ outb(0, turbo_io[index] + ADAPTRESET);
+ for(jif=jiffies+TR_RST_TIME;time_before_eq(jiffies,jif););
+ outb(0, turbo_io[index] + ADAPTRESETREL);
+ index++;
+ continue;
+ }
+#if IBMTR_DEBUG_MESSAGES
+ printk("ibmtr::find_turbo_adapters, ibmtr card found at"
+ " %x but not a Turbo model\n",ram_addr);
+#endif
+ iounmap(ram_mapped) ;
+ } /* for */
+ for(i=0; i<IBMTR_MAX_ADAPTERS; i++) {
+ if(!turbo_io[i]) break;
+ for (j=0; j<IBMTR_MAX_ADAPTERS; j++) {
+ if ( iolist[j] && iolist[j] != turbo_io[i]) continue;
+ iolist[j]=turbo_io[i];
+ break;
+ }
+ }
+}
+
+static void ibmtr_cleanup_card(struct net_device *dev)
+{
+ if (dev->base_addr) {
+ outb(0,dev->base_addr+ADAPTRESET);
+
+ schedule_timeout(TR_RST_TIME); /* wait 50ms */
+
+ outb(0,dev->base_addr+ADAPTRESETREL);
+ }
+
+#ifndef PCMCIA
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, IBMTR_IO_EXTENT);
+
+ {
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+ iounmap(ti->mmio);
+ iounmap(ti->sram_virt);
+ }
+#endif
+}
+
+int ibmtr_probe_card(struct net_device *dev)
+{
+ int err = ibmtr_probe(dev);
+ if (!err) {
+ err = register_netdev(dev);
+ if (err)
+ ibmtr_cleanup_card(dev);
+ }
+ return err;
+}
+
+/****************************************************************************
+ * ibmtr_probe(): Routine specified in the network device structure
+ * to probe for an IBM Token Ring Adapter. Routine outline:
+ * I. Interrogate hardware to determine if an adapter exists
+ * and what the speeds and feeds are
+ * II. Setup data structures to control execution based upon
+ * adapter characteristics.
+ *
+ * We expect ibmtr_probe to be called once for each device entry
+ * which references it.
+ ****************************************************************************/
+
+static int ibmtr_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+
+ if (base_addr && base_addr <= 0x1ff) /* Don't probe at all. */
+ return -ENXIO;
+ if (base_addr > 0x1ff) { /* Check a single specified location. */
+ if (!ibmtr_probe1(dev, base_addr)) return 0;
+ return -ENODEV;
+ }
+ find_turbo_adapters(ibmtr_portlist);
+ for (i = 0; ibmtr_portlist[i]; i++) {
+ int ioaddr = ibmtr_portlist[i];
+
+ if (!ibmtr_probe1(dev, ioaddr)) return 0;
+ }
+ return -ENODEV;
+}
+
+/*****************************************************************************/
+
+static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
+{
+
+ unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0;
+ void __iomem * t_mmio = NULL;
+ struct tok_info *ti = dev->priv;
+ void __iomem *cd_chanid;
+ unsigned char *tchanid, ctemp;
+#ifndef PCMCIA
+ unsigned char t_irq=0;
+ unsigned long timeout;
+ static int version_printed;
+#endif
+
+ /* Query the adapter PIO base port which will return
+ * indication of where MMIO was placed. We also have a
+ * coded interrupt number.
+ */
+ segment = inb(PIOaddr);
+ if (segment < 0x40 || segment > 0xe0) {
+ /* Out of range values so we'll assume non-existent IO device
+ * but this is not necessarily a problem, esp if a turbo
+ * adapter is being used. */
+#if IBMTR_DEBUG_MESSAGES
+ DPRINTK("ibmtr_probe1(): unhappy that inb(0x%X) == 0x%X, "
+ "Hardware Problem?\n",PIOaddr,segment);
+#endif
+ return -ENODEV;
+ }
+ /*
+ * Compute the linear base address of the MMIO area
+ * as LINUX doesn't care about segments
+ */
+ t_mmio = ioremap(((__u32) (segment & 0xfc) << 11) + 0x80000,2048);
+ if (!t_mmio) {
+ DPRINTK("Cannot remap mmiobase memory area") ;
+ return -ENODEV ;
+ }
+ intr = segment & 0x03; /* low bits is coded interrupt # */
+ if (ibmtr_debug_trace & TRC_INIT)
+ DPRINTK("PIOaddr: %4hx seg/intr: %2x mmio base: %p intr: %d\n"
+ , PIOaddr, (int) segment, t_mmio, (int) intr);
+
+ /*
+ * Now we will compare expected 'channelid' strings with
+ * what we is there to learn of ISA/MCA or not TR card
+ */
+#ifdef PCMCIA
+ iounmap(t_mmio);
+ t_mmio = ti->mmio; /*BMS to get virtual address */
+ irq = ti->irq; /*BMS to display the irq! */
+#endif
+ cd_chanid = (CHANNEL_ID + t_mmio); /* for efficiency */
+ tchanid = pcchannelid;
+ cardpresent = TR_ISA; /* try ISA */
+
+ /* Suboptimize knowing first byte different */
+ ctemp = readb(cd_chanid) & 0x0f;
+ if (ctemp != *tchanid) { /* NOT ISA card, try MCA */
+ tchanid = mcchannelid;
+ cardpresent = TR_MCA;
+ if (ctemp != *tchanid) /* Neither ISA nor MCA */
+ cardpresent = NOTOK;
+ }
+ if (cardpresent != NOTOK) {
+ /* Know presumed type, try rest of ID */
+ for (i = 2, j = 1; i <= 46; i = i + 2, j++) {
+ if( (readb(cd_chanid+i)&0x0f) == tchanid[j]) continue;
+ /* match failed, not TR card */
+ cardpresent = NOTOK;
+ break;
+ }
+ }
+ /*
+ * If we have an ISA board check for the ISA P&P version,
+ * as it has different IRQ settings
+ */
+ if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio) == 0x0e))
+ cardpresent = TR_ISAPNP;
+ if (cardpresent == NOTOK) { /* "channel_id" did not match, report */
+ if (!(ibmtr_debug_trace & TRC_INIT)) {
+#ifndef PCMCIA
+ iounmap(t_mmio);
+#endif
+ return -ENODEV;
+ }
+ DPRINTK( "Channel ID string not found for PIOaddr: %4hx\n",
+ PIOaddr);
+ DPRINTK("Expected for ISA: ");
+ PrtChanID(pcchannelid, 1);
+ DPRINTK(" found: ");
+/* BMS Note that this can be misleading, when hardware is flaky, because you
+ are reading it a second time here. So with my flaky hardware, I'll see my-
+ self in this block, with the HW ID matching the ISA ID exactly! */
+ HWPrtChanID(cd_chanid, 2);
+ DPRINTK("Expected for MCA: ");
+ PrtChanID(mcchannelid, 1);
+ }
+ /* Now, setup some of the pl0 buffers for this driver.. */
+ /* If called from PCMCIA, it is already set up, so no need to
+ waste the memory, just use the existing structure */
+#ifndef PCMCIA
+ ti->mmio = t_mmio;
+ for (i = 0; i < IBMTR_MAX_ADAPTERS; i++) {
+ if (turbo_io[i] != PIOaddr)
+ continue;
+#if IBMTR_DEBUG_MESSAGES
+ printk("ibmtr::tr_probe1, setting PIOaddr %x to Turbo\n",
+ PIOaddr);
+#endif
+ ti->turbo = 1;
+ t_irq = turbo_irq[i];
+ }
+#endif /* !PCMCIA */
+ ti->readlog_pending = 0;
+ init_waitqueue_head(&ti->wait_for_reset);
+
+ /* if PCMCIA, the card can be recognized as either TR_ISA or TR_ISAPNP
+ * depending which card is inserted. */
+
+#ifndef PCMCIA
+ switch (cardpresent) {
+ case TR_ISA:
+ if (intr == 0) irq = 9; /* irq2 really is irq9 */
+ if (intr == 1) irq = 3;
+ if (intr == 2) irq = 6;
+ if (intr == 3) irq = 7;
+ ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
+ break;
+ case TR_MCA:
+ if (intr == 0) irq = 9;
+ if (intr == 1) irq = 3;
+ if (intr == 2) irq = 10;
+ if (intr == 3) irq = 11;
+ ti->global_int_enable = 0;
+ ti->adapter_int_enable = 0;
+ ti->sram_phys=(__u32)(inb(PIOaddr+ADAPTRESETREL) & 0xfe) << 12;
+ break;
+ case TR_ISAPNP:
+ if (!t_irq) {
+ if (intr == 0) irq = 9;
+ if (intr == 1) irq = 3;
+ if (intr == 2) irq = 10;
+ if (intr == 3) irq = 11;
+ } else
+ irq=t_irq;
+ timeout = jiffies + TR_SPIN_INTERVAL;
+ while (!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)){
+ if (!time_after(jiffies, timeout)) continue;
+ DPRINTK( "Hardware timeout during initialization.\n");
+ iounmap(t_mmio);
+ kfree(ti);
+ return -ENODEV;
+ }
+ ti->sram_phys =
+ ((__u32)readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_EVEN)<<12);
+ ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
+ break;
+ } /*end switch (cardpresent) */
+#endif /*not PCMCIA */
+
+ if (ibmtr_debug_trace & TRC_INIT) { /* just report int */
+ DPRINTK("irq=%d", irq);
+ printk(", sram_phys=0x%x", ti->sram_phys);
+ if(ibmtr_debug_trace&TRC_INITV){ /* full chat in verbose only */
+ DPRINTK(", ti->mmio=%p", ti->mmio);
+ printk(", segment=%02X", segment);
+ }
+ printk(".\n");
+ }
+
+ /* Get hw address of token ring card */
+ j = 0;
+ for (i = 0; i < 0x18; i = i + 2) {
+ /* technical reference states to do this */
+ temp = readb(ti->mmio + AIP + i) & 0x0f;
+ ti->hw_address[j] = temp;
+ if (j & 1)
+ dev->dev_addr[(j / 2)] =
+ ti->hw_address[j]+ (ti->hw_address[j - 1] << 4);
+ ++j;
+ }
+ /* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,... */
+ ti->adapter_type = readb(ti->mmio + AIPADAPTYPE);
+
+ /* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */
+ ti->data_rate = readb(ti->mmio + AIPDATARATE);
+
+ /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */
+ ti->token_release = readb(ti->mmio + AIPEARLYTOKEN);
+
+ /* How much shared RAM is on adapter ? */
+ if (ti->turbo) {
+ ti->avail_shared_ram=127;
+ } else {
+ ti->avail_shared_ram = get_sram_size(ti);/*in 512 byte units */
+ }
+ /* We need to set or do a bunch of work here based on previous results*/
+ /* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */
+ ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE);
+
+ /* Available DHB 4Mb size: F=2048, E=4096, D=4464 */
+ switch (readb(ti->mmio + AIP4MBDHB)) {
+ case 0xe: ti->dhb_size4mb = 4096; break;
+ case 0xd: ti->dhb_size4mb = 4464; break;
+ default: ti->dhb_size4mb = 2048; break;
+ }
+
+ /* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */
+ switch (readb(ti->mmio + AIP16MBDHB)) {
+ case 0xe: ti->dhb_size16mb = 4096; break;
+ case 0xd: ti->dhb_size16mb = 8192; break;
+ case 0xc: ti->dhb_size16mb = 16384; break;
+ case 0xb: ti->dhb_size16mb = 17960; break;
+ default: ti->dhb_size16mb = 2048; break;
+ }
+
+ /* We must figure out how much shared memory space this adapter
+ * will occupy so that if there are two adapters we can fit both
+ * in. Given a choice, we will limit this adapter to 32K. The
+ * maximum space will will use for two adapters is 64K so if the
+ * adapter we are working on demands 64K (it also doesn't support
+ * paging), then only one adapter can be supported.
+ */
+
+ /*
+ * determine how much of total RAM is mapped into PC space
+ */
+ ti->mapped_ram_size= /*sixteen to onehundredtwentyeight 512byte blocks*/
+ 1<< ((readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03) + 4);
+ ti->page_mask = 0;
+ if (ti->turbo) ti->page_mask=0xf0;
+ else if (ti->shared_ram_paging == 0xf); /* No paging in adapter */
+ else {
+#ifdef ENABLE_PAGING
+ unsigned char pg_size = 0;
+ /* BMS: page size: PCMCIA, use configuration register;
+ ISAPNP, use LANAIDC config tool from www.ibm.com */
+ switch (ti->shared_ram_paging) {
+ case 0xf:
+ break;
+ case 0xe:
+ ti->page_mask = (ti->mapped_ram_size == 32) ? 0xc0 : 0;
+ pg_size = 32; /* 16KB page size */
+ break;
+ case 0xd:
+ ti->page_mask = (ti->mapped_ram_size == 64) ? 0x80 : 0;
+ pg_size = 64; /* 32KB page size */
+ break;
+ case 0xc:
+ switch (ti->mapped_ram_size) {
+ case 32:
+ ti->page_mask = 0xc0;
+ pg_size = 32;
+ break;
+ case 64:
+ ti->page_mask = 0x80;
+ pg_size = 64;
+ break;
+ }
+ break;
+ default:
+ DPRINTK("Unknown shared ram paging info %01X\n",
+ ti->shared_ram_paging);
+ iounmap(t_mmio);
+ kfree(ti);
+ return -ENODEV;
+ break;
+ } /*end switch shared_ram_paging */
+
+ if (ibmtr_debug_trace & TRC_INIT)
+ DPRINTK("Shared RAM paging code: %02X, "
+ "mapped RAM size: %dK, shared RAM size: %dK, "
+ "page mask: %02X\n:",
+ ti->shared_ram_paging, ti->mapped_ram_size / 2,
+ ti->avail_shared_ram / 2, ti->page_mask);
+#endif /*ENABLE_PAGING */
+ }
+
+#ifndef PCMCIA
+ /* finish figuring the shared RAM address */
+ if (cardpresent == TR_ISA) {
+ static __u32 ram_bndry_mask[] =
+ { 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000 };
+ __u32 new_base, rrr_32, chk_base, rbm;
+
+ rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
+ rbm = ram_bndry_mask[rrr_32];
+ new_base = (ibmtr_mem_base + (~rbm)) & rbm;/* up to boundary */
+ chk_base = new_base + (ti->mapped_ram_size << 9);
+ if (chk_base > (ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE)) {
+ DPRINTK("Shared RAM for this adapter (%05x) exceeds "
+ "driver limit (%05x), adapter not started.\n",
+ chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
+ iounmap(t_mmio);
+ kfree(ti);
+ return -ENODEV;
+ } else { /* seems cool, record what we have figured out */
+ ti->sram_base = new_base >> 12;
+ ibmtr_mem_base = chk_base;
+ }
+ }
+ else ti->sram_base = ti->sram_phys >> 12;
+
+ /* The PCMCIA has already got the interrupt line and the io port,
+ so no chance of anybody else getting it - MLP */
+ if (request_irq(dev->irq = irq, &tok_interrupt, 0, "ibmtr", dev) != 0) {
+ DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
+ irq);
+ iounmap(t_mmio);
+ kfree(ti);
+ return -ENODEV;
+ }
+ /*?? Now, allocate some of the PIO PORTs for this driver.. */
+ /* record PIOaddr range as busy */
+ if (!request_region(PIOaddr, IBMTR_IO_EXTENT, "ibmtr")) {
+ DPRINTK("Could not grab PIO range. Halting driver.\n");
+ free_irq(dev->irq, dev);
+ iounmap(t_mmio);
+ kfree(ti);
+ return -EBUSY;
+ }
+
+ if (!version_printed++) {
+ printk(version);
+ }
+#endif /* !PCMCIA */
+ DPRINTK("%s %s found\n",
+ channel_def[cardpresent - 1], adapter_def(ti->adapter_type));
+ DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n",
+ irq, PIOaddr, ti->mapped_ram_size / 2);
+ DPRINTK("Hardware address : %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ if (ti->page_mask)
+ DPRINTK("Shared RAM paging enabled. "
+ "Page size: %uK Shared Ram size %dK\n",
+ ((ti->page_mask^0xff)+1) >>2, ti->avail_shared_ram / 2);
+ else
+ DPRINTK("Shared RAM paging disabled. ti->page_mask %x\n",
+ ti->page_mask);
+
+ /* Calculate the maximum DHB we can use */
+ /* two cases where avail_shared_ram doesn't equal mapped_ram_size:
+ 1. avail_shared_ram is 127 but mapped_ram_size is 128 (typical)
+ 2. user has configured adapter for less than avail_shared_ram
+ but is not using paging (she should use paging, I believe)
+ */
+ if (!ti->page_mask) {
+ ti->avail_shared_ram=
+ min(ti->mapped_ram_size,ti->avail_shared_ram);
+ }
+
+ switch (ti->avail_shared_ram) {
+ case 16: /* 8KB shared RAM */
+ ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)2048);
+ ti->rbuf_len4 = 1032;
+ ti->rbuf_cnt4=2;
+ ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)2048);
+ ti->rbuf_len16 = 1032;
+ ti->rbuf_cnt16=2;
+ break;
+ case 32: /* 16KB shared RAM */
+ ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
+ ti->rbuf_len4 = 1032;
+ ti->rbuf_cnt4=4;
+ ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)4096);
+ ti->rbuf_len16 = 1032; /*1024 usable */
+ ti->rbuf_cnt16=4;
+ break;
+ case 64: /* 32KB shared RAM */
+ ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
+ ti->rbuf_len4 = 1032;
+ ti->rbuf_cnt4=6;
+ ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)10240);
+ ti->rbuf_len16 = 1032;
+ ti->rbuf_cnt16=6;
+ break;
+ case 127: /* 63.5KB shared RAM */
+ ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
+ ti->rbuf_len4 = 1032;
+ ti->rbuf_cnt4=6;
+ ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)16384);
+ ti->rbuf_len16 = 1032;
+ ti->rbuf_cnt16=16;
+ break;
+ case 128: /* 64KB shared RAM */
+ ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
+ ti->rbuf_len4 = 1032;
+ ti->rbuf_cnt4=6;
+ ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)17960);
+ ti->rbuf_len16 = 1032;
+ ti->rbuf_cnt16=16;
+ break;
+ default:
+ ti->dhb_size4mb = 2048;
+ ti->rbuf_len4 = 1032;
+ ti->rbuf_cnt4=2;
+ ti->dhb_size16mb = 2048;
+ ti->rbuf_len16 = 1032;
+ ti->rbuf_cnt16=2;
+ break;
+ }
+ /* this formula is not smart enough for the paging case
+ ti->rbuf_cnt<x> = (ti->avail_shared_ram * BLOCKSZ - ADAPT_PRIVATE -
+ ARBLENGTH - SSBLENGTH - DLC_MAX_SAP * SAPLENGTH -
+ DLC_MAX_STA * STALENGTH - ti->dhb_size<x>mb * NUM_DHB -
+ SRBLENGTH - ASBLENGTH) / ti->rbuf_len<x>;
+ */
+ ti->maxmtu16 = (ti->rbuf_len16 - 8) * ti->rbuf_cnt16 - TR_HLEN;
+ ti->maxmtu4 = (ti->rbuf_len4 - 8) * ti->rbuf_cnt4 - TR_HLEN;
+ /*BMS assuming 18 bytes of Routing Information (usually works) */
+ DPRINTK("Maximum Receive Internet Protocol MTU 16Mbps: %d, 4Mbps: %d\n",
+ ti->maxmtu16, ti->maxmtu4);
+
+ dev->base_addr = PIOaddr; /* set the value for device */
+ dev->mem_start = ti->sram_base << 12;
+ dev->mem_end = dev->mem_start + (ti->mapped_ram_size << 9) - 1;
+ trdev_init(dev);
+ return 0; /* Return 0 to indicate we have found a Token Ring card. */
+} /*ibmtr_probe1() */
+
+/*****************************************************************************/
+
+/* query the adapter for the size of shared RAM */
+/* the function returns the RAM size in units of 512 bytes */
+
+static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
+{
+ unsigned char avail_sram_code;
+ static unsigned char size_code[] = { 0, 16, 32, 64, 127, 128 };
+ /* Adapter gives
+ 'F' -- use RRR bits 3,2
+ 'E' -- 8kb 'D' -- 16kb
+ 'C' -- 32kb 'A' -- 64KB
+ 'B' - 64KB less 512 bytes at top
+ (WARNING ... must zero top bytes in INIT */
+
+ avail_sram_code = 0xf - readb(adapt_info->mmio + AIPAVAILSHRAM);
+ if (avail_sram_code) return size_code[avail_sram_code];
+ else /* for code 'F', must compute size from RRR(3,2) bits */
+ return 1 <<
+ ((readb(adapt_info->mmio+ACA_OFFSET+ACA_RW+RRR_ODD)>>2&3)+4);
+}
+
+/*****************************************************************************/
+
+static int __devinit trdev_init(struct net_device *dev)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+
+ SET_PAGE(ti->srb_page);
+ ti->open_failure = NO ;
+ dev->open = tok_open;
+ dev->stop = tok_close;
+ dev->hard_start_xmit = tok_send_packet;
+ dev->get_stats = tok_get_stats;
+ dev->set_multicast_list = tok_set_multicast_list;
+ dev->change_mtu = ibmtr_change_mtu;
+
+ return 0;
+}
+
+/*****************************************************************************/
+
+static int tok_init_card(struct net_device *dev)
+{
+ struct tok_info *ti;
+ short PIOaddr;
+ unsigned long i;
+
+ PIOaddr = dev->base_addr;
+ ti = (struct tok_info *) dev->priv;
+ /* Special processing for first interrupt after reset */
+ ti->do_tok_int = FIRST_INT;
+ /* Reset adapter */
+ writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
+ outb(0, PIOaddr + ADAPTRESET);
+
+ current->state=TASK_UNINTERRUPTIBLE;
+ schedule_timeout(TR_RST_TIME); /* wait 50ms */
+
+ outb(0, PIOaddr + ADAPTRESETREL);
+#ifdef ENABLE_PAGING
+ if (ti->page_mask)
+ writeb(SRPR_ENABLE_PAGING,ti->mmio+ACA_OFFSET+ACA_RW+SRPR_EVEN);
+#endif
+ writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ i = sleep_on_timeout(&ti->wait_for_reset, 4 * HZ);
+ return i? 0 : -EAGAIN;
+}
+
+/*****************************************************************************/
+static int tok_open(struct net_device *dev)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+ int i;
+
+ /*the case we were left in a failure state during a previous open */
+ if (ti->open_failure == YES) {
+ DPRINTK("Last time you were disconnected, how about now?\n");
+ printk("You can't insert with an ICS connector half-cocked.\n");
+ }
+
+ ti->open_status = CLOSED; /* CLOSED or OPEN */
+ ti->sap_status = CLOSED; /* CLOSED or OPEN */
+ ti->open_failure = NO; /* NO or YES */
+ ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */
+ /* 12/2000 not typical Linux, but we can use RUNNING to let us know when
+ the network has crapped out or cables are disconnected. Useful because
+ the IFF_UP flag stays up the whole time, until ifconfig tr0 down.
+ */
+ dev->flags &= ~IFF_RUNNING;
+
+ ti->sram_phys &= ~1; /* to reverse what we do in tok_close */
+ /* init the spinlock */
+ spin_lock_init(&ti->lock);
+ init_timer(&ti->tr_timer);
+
+ i = tok_init_card(dev);
+ if (i) return i;
+
+ while (1){
+ tok_open_adapter((unsigned long) dev);
+ i= interruptible_sleep_on_timeout(&ti->wait_for_reset, 25 * HZ);
+ /* sig catch: estimate opening adapter takes more than .5 sec*/
+ if (i>(245*HZ)/10) break; /* fancier than if (i==25*HZ) */
+ if (i==0) break;
+ if (ti->open_status == OPEN && ti->sap_status==OPEN) {
+ netif_start_queue(dev);
+ DPRINTK("Adapter is up and running\n");
+ return 0;
+ }
+ current->state=TASK_INTERRUPTIBLE;
+ i=schedule_timeout(TR_RETRY_INTERVAL); /* wait 30 seconds */
+ if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
+ }
+ outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
+ DPRINTK("TERMINATED via signal\n"); /*BMS useful */
+ return -EAGAIN;
+}
+
+/*****************************************************************************/
+
+#define COMMAND_OFST 0
+#define OPEN_OPTIONS_OFST 8
+#define NUM_RCV_BUF_OFST 24
+#define RCV_BUF_LEN_OFST 26
+#define DHB_LENGTH_OFST 28
+#define NUM_DHB_OFST 30
+#define DLC_MAX_SAP_OFST 32
+#define DLC_MAX_STA_OFST 33
+
+void tok_open_adapter(unsigned long dev_addr)
+{
+ struct net_device *dev = (struct net_device *) dev_addr;
+ struct tok_info *ti;
+ int i;
+
+ ti = (struct tok_info *) dev->priv;
+ SET_PAGE(ti->init_srb_page);
+ writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
+ for (i = 0; i < sizeof(struct dir_open_adapter); i++)
+ writeb(0, ti->init_srb + i);
+ writeb(DIR_OPEN_ADAPTER, ti->init_srb + COMMAND_OFST);
+ writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + OPEN_OPTIONS_OFST);
+ if (ti->ring_speed == 16) {
+ writew(htons(ti->dhb_size16mb), ti->init_srb + DHB_LENGTH_OFST);
+ writew(htons(ti->rbuf_cnt16), ti->init_srb + NUM_RCV_BUF_OFST);
+ writew(htons(ti->rbuf_len16), ti->init_srb + RCV_BUF_LEN_OFST);
+ } else {
+ writew(htons(ti->dhb_size4mb), ti->init_srb + DHB_LENGTH_OFST);
+ writew(htons(ti->rbuf_cnt4), ti->init_srb + NUM_RCV_BUF_OFST);
+ writew(htons(ti->rbuf_len4), ti->init_srb + RCV_BUF_LEN_OFST);
+ }
+ writeb(NUM_DHB, /* always 2 */ ti->init_srb + NUM_DHB_OFST);
+ writeb(DLC_MAX_SAP, ti->init_srb + DLC_MAX_SAP_OFST);
+ writeb(DLC_MAX_STA, ti->init_srb + DLC_MAX_STA_OFST);
+ ti->srb = ti->init_srb; /* We use this one in the interrupt handler */
+ ti->srb_page = ti->init_srb_page;
+ DPRINTK("Opening adapter: Xmit bfrs: %d X %d, Rcv bfrs: %d X %d\n",
+ readb(ti->init_srb + NUM_DHB_OFST),
+ ntohs(readw(ti->init_srb + DHB_LENGTH_OFST)),
+ ntohs(readw(ti->init_srb + NUM_RCV_BUF_OFST)),
+ ntohs(readw(ti->init_srb + RCV_BUF_LEN_OFST)));
+ writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+}
+
+/*****************************************************************************/
+
+static void open_sap(unsigned char type, struct net_device *dev)
+{
+ int i;
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+
+ SET_PAGE(ti->srb_page);
+ for (i = 0; i < sizeof(struct dlc_open_sap); i++)
+ writeb(0, ti->srb + i);
+
+#define MAX_I_FIELD_OFST 14
+#define SAP_VALUE_OFST 16
+#define SAP_OPTIONS_OFST 17
+#define STATION_COUNT_OFST 18
+
+ writeb(DLC_OPEN_SAP, ti->srb + COMMAND_OFST);
+ writew(htons(MAX_I_FIELD), ti->srb + MAX_I_FIELD_OFST);
+ writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb+ SAP_OPTIONS_OFST);
+ writeb(SAP_OPEN_STATION_CNT, ti->srb + STATION_COUNT_OFST);
+ writeb(type, ti->srb + SAP_VALUE_OFST);
+ writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+}
+
+
+/*****************************************************************************/
+
+static void tok_set_multicast_list(struct net_device *dev)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+ struct dev_mc_list *mclist;
+ unsigned char address[4];
+
+ int i;
+
+ /*BMS the next line is CRUCIAL or you may be sad when you */
+ /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
+ if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
+ address[0] = address[1] = address[2] = address[3] = 0;
+ mclist = dev->mc_list;
+ for (i = 0; i < dev->mc_count; i++) {
+ address[0] |= mclist->dmi_addr[2];
+ address[1] |= mclist->dmi_addr[3];
+ address[2] |= mclist->dmi_addr[4];
+ address[3] |= mclist->dmi_addr[5];
+ mclist = mclist->next;
+ }
+ SET_PAGE(ti->srb_page);
+ for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
+ writeb(0, ti->srb + i);
+
+#define FUNCT_ADDRESS_OFST 6
+
+ writeb(DIR_SET_FUNC_ADDR, ti->srb + COMMAND_OFST);
+ for (i = 0; i < 4; i++)
+ writeb(address[i], ti->srb + FUNCT_ADDRESS_OFST + i);
+ writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+#if TR_VERBOSE
+ DPRINTK("Setting functional address: ");
+ for (i=0;i<4;i++) printk("%02X ", address[i]);
+ printk("\n");
+#endif
+}
+
+/*****************************************************************************/
+
+#define STATION_ID_OFST 4
+
+static int tok_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tok_info *ti;
+ unsigned long flags;
+ ti = (struct tok_info *) dev->priv;
+
+ netif_stop_queue(dev);
+
+ /* lock against other CPUs */
+ spin_lock_irqsave(&(ti->lock), flags);
+
+ /* Save skb; we'll need it when the adapter asks for the data */
+ ti->current_skb = skb;
+ SET_PAGE(ti->srb_page);
+ writeb(XMIT_UI_FRAME, ti->srb + COMMAND_OFST);
+ writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
+ writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ spin_unlock_irqrestore(&(ti->lock), flags);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/*****************************************************************************/
+
+static int tok_close(struct net_device *dev)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+
+ /* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */
+ /* unloading the module from memory, and then if a timer pops, ouch */
+ del_timer_sync(&ti->tr_timer);
+ outb(0, dev->base_addr + ADAPTRESET);
+ ti->sram_phys |= 1;
+ ti->open_status = CLOSED;
+
+ netif_stop_queue(dev);
+ DPRINTK("Adapter is closed.\n");
+ return 0;
+}
+
+/*****************************************************************************/
+
+#define RETCODE_OFST 2
+#define OPEN_ERROR_CODE_OFST 6
+#define ASB_ADDRESS_OFST 8
+#define SRB_ADDRESS_OFST 10
+#define ARB_ADDRESS_OFST 12
+#define SSB_ADDRESS_OFST 14
+
+static char *printphase[]= {"Lobe media test","Physical insertion",
+ "Address verification","Roll call poll","Request Parameters"};
+static char *printerror[]={"Function failure","Signal loss","Reserved",
+ "Frequency error","Timeout","Ring failure","Ring beaconing",
+ "Duplicate node address",
+ "Parameter request-retry count exceeded","Remove received",
+ "IMPL force received","Duplicate modifier",
+ "No monitor detected","Monitor contention failed for RPL"};
+
+static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page)
+{
+ if (ti->page_mask) {
+ *page = (index >> 8) & ti->page_mask;
+ index &= ~(ti->page_mask << 8);
+ }
+ return ti->sram_virt + index;
+}
+
+void dir_open_adapter (struct net_device *dev)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+ unsigned char ret_code;
+ __u16 err;
+
+ ti->srb = map_address(ti,
+ ntohs(readw(ti->init_srb + SRB_ADDRESS_OFST)),
+ &ti->srb_page);
+ ti->ssb = map_address(ti,
+ ntohs(readw(ti->init_srb + SSB_ADDRESS_OFST)),
+ &ti->ssb_page);
+ ti->arb = map_address(ti,
+ ntohs(readw(ti->init_srb + ARB_ADDRESS_OFST)),
+ &ti->arb_page);
+ ti->asb = map_address(ti,
+ ntohs(readw(ti->init_srb + ASB_ADDRESS_OFST)),
+ &ti->asb_page);
+ ti->current_skb = NULL;
+ ret_code = readb(ti->init_srb + RETCODE_OFST);
+ err = ntohs(readw(ti->init_srb + OPEN_ERROR_CODE_OFST));
+ if (!ret_code) {
+ ti->open_status = OPEN; /* TR adapter is now available */
+ if (ti->open_mode == AUTOMATIC) {
+ DPRINTK("Adapter reopened.\n");
+ }
+ writeb(~SRB_RESP_INT, ti->mmio+ACA_OFFSET+ACA_RESET+ISRP_ODD);
+ open_sap(EXTENDED_SAP, dev);
+ return;
+ }
+ ti->open_failure = YES;
+ if (ret_code == 7){
+ if (err == 0x24) {
+ if (!ti->auto_speedsave) {
+ DPRINTK("Open failed: Adapter speed must match "
+ "ring speed if Automatic Ring Speed Save is "
+ "disabled.\n");
+ ti->open_action = FAIL;
+ }else
+ DPRINTK("Retrying open to adjust to "
+ "ring speed, ");
+ } else if (err == 0x2d) {
+ DPRINTK("Physical Insertion: No Monitor Detected, ");
+ printk("retrying after %ds delay...\n",
+ TR_RETRY_INTERVAL/HZ);
+ } else if (err == 0x11) {
+ DPRINTK("Lobe Media Function Failure (0x11), ");
+ printk(" retrying after %ds delay...\n",
+ TR_RETRY_INTERVAL/HZ);
+ } else {
+ char **prphase = printphase;
+ char **prerror = printerror;
+ DPRINTK("TR Adapter misc open failure, error code = ");
+ printk("0x%x, Phase: %s, Error: %s\n",
+ err, prphase[err/16 -1], prerror[err%16 -1]);
+ printk(" retrying after %ds delay...\n",
+ TR_RETRY_INTERVAL/HZ);
+ }
+ } else DPRINTK("open failed: ret_code = %02X..., ", ret_code);
+ if (ti->open_action != FAIL) {
+ if (ti->open_mode==AUTOMATIC){
+ ti->open_action = REOPEN;
+ ibmtr_reset_timer(&(ti->tr_timer), dev);
+ return;
+ }
+ wake_up(&ti->wait_for_reset);
+ return;
+ }
+ DPRINTK("FAILURE, CAPUT\n");
+}
+
+/******************************************************************************/
+
+irqreturn_t tok_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned char status;
+ /* unsigned char status_even ; */
+ struct tok_info *ti;
+ struct net_device *dev;
+#ifdef ENABLE_PAGING
+ unsigned char save_srpr;
+#endif
+
+ dev = dev_id;
+#if TR_VERBOSE
+ DPRINTK("Int from tok_driver, dev : %p irq%d regs=%p\n", dev,irq,regs);
+#endif
+ ti = (struct tok_info *) dev->priv;
+ if (ti->sram_phys & 1)
+ return IRQ_NONE; /* PCMCIA card extraction flag */
+ spin_lock(&(ti->lock));
+#ifdef ENABLE_PAGING
+ save_srpr = readb(ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
+#endif
+
+ /* Disable interrupts till processing is finished */
+ writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
+
+ /* Reset interrupt for ISA boards */
+ if (ti->adapter_int_enable)
+ outb(0, ti->adapter_int_enable);
+ else /* used for PCMCIA cards */
+ outb(0, ti->global_int_enable);
+ if (ti->do_tok_int == FIRST_INT){
+ initial_tok_int(dev);
+#ifdef ENABLE_PAGING
+ writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
+#endif
+ spin_unlock(&(ti->lock));
+ return IRQ_HANDLED;
+ }
+ /* Begin interrupt handler HERE inline to avoid the extra
+ levels of logic and call depth for the original solution. */
+ status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
+ /*BMSstatus_even = readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) */
+ /*BMSdebugprintk("tok_interrupt: ISRP_ODD = 0x%x ISRP_EVEN = 0x%x\n", */
+ /*BMS status,status_even); */
+
+ if (status & ADAP_CHK_INT) {
+ int i;
+ void __iomem *check_reason;
+ __u8 check_reason_page = 0;
+ check_reason = map_address(ti,
+ ntohs(readw(ti->mmio+ ACA_OFFSET+ACA_RW + WWCR_EVEN)),
+ &check_reason_page);
+ SET_PAGE(check_reason_page);
+
+ DPRINTK("Adapter check interrupt\n");
+ DPRINTK("8 reason bytes follow: ");
+ for (i = 0; i < 8; i++, check_reason++)
+ printk("%02X ", (int) readb(check_reason));
+ printk("\n");
+ writeb(~ADAP_CHK_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
+ status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRA_EVEN);
+ DPRINTK("ISRA_EVEN == 0x02%x\n",status);
+ ti->open_status = CLOSED;
+ ti->sap_status = CLOSED;
+ ti->open_mode = AUTOMATIC;
+ dev->flags &= ~IFF_RUNNING;
+ netif_stop_queue(dev);
+ ti->open_action = RESTART;
+ outb(0, dev->base_addr + ADAPTRESET);
+ ibmtr_reset_timer(&(ti->tr_timer), dev);/*BMS try to reopen*/
+ spin_unlock(&(ti->lock));
+ return IRQ_HANDLED;
+ }
+ if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
+ & (TCR_INT | ERR_INT | ACCESS_INT)) {
+ DPRINTK("adapter error: ISRP_EVEN : %02x\n",
+ (int)readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRP_EVEN));
+ writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
+ ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
+ status= readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRA_EVEN);/*BMS*/
+ DPRINTK("ISRA_EVEN == 0x02%x\n",status);/*BMS*/
+ writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+#ifdef ENABLE_PAGING
+ writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
+#endif
+ spin_unlock(&(ti->lock));
+ return IRQ_HANDLED;
+ }
+ if (status & SRB_RESP_INT) { /* SRB response */
+ SET_PAGE(ti->srb_page);
+#if TR_VERBOSE
+ DPRINTK("SRB resp: cmd=%02X rsp=%02X\n",
+ readb(ti->srb), readb(ti->srb + RETCODE_OFST));
+#endif
+ switch (readb(ti->srb)) { /* SRB command check */
+ case XMIT_DIR_FRAME:{
+ unsigned char xmit_ret_code;
+ xmit_ret_code = readb(ti->srb + RETCODE_OFST);
+ if (xmit_ret_code == 0xff) break;
+ DPRINTK("error on xmit_dir_frame request: %02X\n",
+ xmit_ret_code);
+ if (ti->current_skb) {
+ dev_kfree_skb_irq(ti->current_skb);
+ ti->current_skb = NULL;
+ }
+ /*dev->tbusy = 0;*/
+ netif_wake_queue(dev);
+ if (ti->readlog_pending)
+ ibmtr_readlog(dev);
+ break;
+ }
+ case XMIT_UI_FRAME:{
+ unsigned char xmit_ret_code;
+
+ xmit_ret_code = readb(ti->srb + RETCODE_OFST);
+ if (xmit_ret_code == 0xff) break;
+ DPRINTK("error on xmit_ui_frame request: %02X\n",
+ xmit_ret_code);
+ if (ti->current_skb) {
+ dev_kfree_skb_irq(ti->current_skb);
+ ti->current_skb = NULL;
+ }
+ netif_wake_queue(dev);
+ if (ti->readlog_pending)
+ ibmtr_readlog(dev);
+ break;
+ }
+ case DIR_OPEN_ADAPTER:
+ dir_open_adapter(dev);
+ break;
+ case DLC_OPEN_SAP:
+ if (readb(ti->srb + RETCODE_OFST)) {
+ DPRINTK("open_sap failed: ret_code = %02X, "
+ "retrying\n",
+ (int) readb(ti->srb + RETCODE_OFST));
+ ti->open_action = REOPEN;
+ ibmtr_reset_timer(&(ti->tr_timer), dev);
+ break;
+ }
+ ti->exsap_station_id = readw(ti->srb + STATION_ID_OFST);
+ ti->sap_status = OPEN;/* TR adapter is now available */
+ if (ti->open_mode==MANUAL){
+ wake_up(&ti->wait_for_reset);
+ break;
+ }
+ netif_wake_queue(dev);
+ dev->flags |= IFF_RUNNING;/*BMS 12/2000*/
+ break;
+ case DIR_INTERRUPT:
+ case DIR_MOD_OPEN_PARAMS:
+ case DIR_SET_GRP_ADDR:
+ case DIR_SET_FUNC_ADDR:
+ case DLC_CLOSE_SAP:
+ if (readb(ti->srb + RETCODE_OFST))
+ DPRINTK("error on %02X: %02X\n",
+ (int) readb(ti->srb + COMMAND_OFST),
+ (int) readb(ti->srb + RETCODE_OFST));
+ break;
+ case DIR_READ_LOG:
+ if (readb(ti->srb + RETCODE_OFST)){
+ DPRINTK("error on dir_read_log: %02X\n",
+ (int) readb(ti->srb + RETCODE_OFST));
+ netif_wake_queue(dev);
+ break;
+ }
+#if IBMTR_DEBUG_MESSAGES
+
+#define LINE_ERRORS_OFST 0
+#define INTERNAL_ERRORS_OFST 1
+#define BURST_ERRORS_OFST 2
+#define AC_ERRORS_OFST 3
+#define ABORT_DELIMITERS_OFST 4
+#define LOST_FRAMES_OFST 6
+#define RECV_CONGEST_COUNT_OFST 7
+#define FRAME_COPIED_ERRORS_OFST 8
+#define FREQUENCY_ERRORS_OFST 9
+#define TOKEN_ERRORS_OFST 10
+
+ DPRINTK("Line errors %02X, Internal errors %02X, "
+ "Burst errors %02X\n" "A/C errors %02X, "
+ "Abort delimiters %02X, Lost frames %02X\n"
+ "Receive congestion count %02X, "
+ "Frame copied errors %02X\nFrequency errors %02X, "
+ "Token errors %02X\n",
+ (int) readb(ti->srb + LINE_ERRORS_OFST),
+ (int) readb(ti->srb + INTERNAL_ERRORS_OFST),
+ (int) readb(ti->srb + BURST_ERRORS_OFST),
+ (int) readb(ti->srb + AC_ERRORS_OFST),
+ (int) readb(ti->srb + ABORT_DELIMITERS_OFST),
+ (int) readb(ti->srb + LOST_FRAMES_OFST),
+ (int) readb(ti->srb + RECV_CONGEST_COUNT_OFST),
+ (int) readb(ti->srb + FRAME_COPIED_ERRORS_OFST),
+ (int) readb(ti->srb + FREQUENCY_ERRORS_OFST),
+ (int) readb(ti->srb + TOKEN_ERRORS_OFST));
+#endif
+ netif_wake_queue(dev);
+ break;
+ default:
+ DPRINTK("Unknown command %02X encountered\n",
+ (int) readb(ti->srb));
+ } /* end switch SRB command check */
+ writeb(~SRB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
+ } /* if SRB response */
+ if (status & ASB_FREE_INT) { /* ASB response */
+ SET_PAGE(ti->asb_page);
+#if TR_VERBOSE
+ DPRINTK("ASB resp: cmd=%02X\n", readb(ti->asb));
+#endif
+
+ switch (readb(ti->asb)) { /* ASB command check */
+ case REC_DATA:
+ case XMIT_UI_FRAME:
+ case XMIT_DIR_FRAME:
+ break;
+ default:
+ DPRINTK("unknown command in asb %02X\n",
+ (int) readb(ti->asb));
+ } /* switch ASB command check */
+ if (readb(ti->asb + 2) != 0xff) /* checks ret_code */
+ DPRINTK("ASB error %02X in cmd %02X\n",
+ (int) readb(ti->asb + 2), (int) readb(ti->asb));
+ writeb(~ASB_FREE_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
+ } /* if ASB response */
+
+#define STATUS_OFST 6
+#define NETW_STATUS_OFST 6
+
+ if (status & ARB_CMD_INT) { /* ARB response */
+ SET_PAGE(ti->arb_page);
+#if TR_VERBOSE
+ DPRINTK("ARB resp: cmd=%02X\n", readb(ti->arb));
+#endif
+
+ switch (readb(ti->arb)) { /* ARB command check */
+ case DLC_STATUS:
+ DPRINTK("DLC_STATUS new status: %02X on station %02X\n",
+ ntohs(readw(ti->arb + STATUS_OFST)),
+ ntohs(readw(ti->arb+ STATION_ID_OFST)));
+ break;
+ case REC_DATA:
+ tr_rx(dev);
+ break;
+ case RING_STAT_CHANGE:{
+ unsigned short ring_status;
+ ring_status= ntohs(readw(ti->arb + NETW_STATUS_OFST));
+ if (ibmtr_debug_trace & TRC_INIT)
+ DPRINTK("Ring Status Change...(0x%x)\n",
+ ring_status);
+ if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){
+ netif_stop_queue(dev);
+ dev->flags &= ~IFF_RUNNING;/*not typical Linux*/
+ DPRINTK("Remove received, or Auto-removal error"
+ ", or Lobe fault\n");
+ DPRINTK("We'll try to reopen the closed adapter"
+ " after a %d second delay.\n",
+ TR_RETRY_INTERVAL/HZ);
+ /*I was confused: I saw the TR reopening but */
+ /*forgot:with an RJ45 in an RJ45/ICS adapter */
+ /*but adapter not in the ring, the TR will */
+ /* open, and then soon close and come here. */
+ ti->open_mode = AUTOMATIC;
+ ti->open_status = CLOSED; /*12/2000 BMS*/
+ ti->open_action = REOPEN;
+ ibmtr_reset_timer(&(ti->tr_timer), dev);
+ } else if (ring_status & LOG_OVERFLOW) {
+ if(netif_queue_stopped(dev))
+ ti->readlog_pending = 1;
+ else
+ ibmtr_readlog(dev);
+ }
+ break;
+ }
+ case XMIT_DATA_REQ:
+ tr_tx(dev);
+ break;
+ default:
+ DPRINTK("Unknown command %02X in arb\n",
+ (int) readb(ti->arb));
+ break;
+ } /* switch ARB command check */
+ writeb(~ARB_CMD_INT, ti->mmio+ ACA_OFFSET+ACA_RESET + ISRP_ODD);
+ writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ } /* if ARB response */
+ if (status & SSB_RESP_INT) { /* SSB response */
+ unsigned char retcode;
+ SET_PAGE(ti->ssb_page);
+#if TR_VERBOSE
+ DPRINTK("SSB resp: cmd=%02X rsp=%02X\n",
+ readb(ti->ssb), readb(ti->ssb + 2));
+#endif
+
+ switch (readb(ti->ssb)) { /* SSB command check */
+ case XMIT_DIR_FRAME:
+ case XMIT_UI_FRAME:
+ retcode = readb(ti->ssb + 2);
+ if (retcode && (retcode != 0x22))/* checks ret_code */
+ DPRINTK("xmit ret_code: %02X xmit error code: "
+ "%02X\n",
+ (int)retcode, (int)readb(ti->ssb + 6));
+ else
+ ti->tr_stats.tx_packets++;
+ break;
+ case XMIT_XID_CMD:
+ DPRINTK("xmit xid ret_code: %02X\n",
+ (int) readb(ti->ssb + 2));
+ default:
+ DPRINTK("Unknown command %02X in ssb\n",
+ (int) readb(ti->ssb));
+ } /* SSB command check */
+ writeb(~SSB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
+ writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ } /* if SSB response */
+#ifdef ENABLE_PAGING
+ writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
+#endif
+ writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ spin_unlock(&(ti->lock));
+ return IRQ_HANDLED;
+} /*tok_interrupt */
+
+/*****************************************************************************/
+
+#define INIT_STATUS_OFST 1
+#define INIT_STATUS_2_OFST 2
+#define ENCODED_ADDRESS_OFST 8
+
+static void initial_tok_int(struct net_device *dev)
+{
+
+ __u32 encoded_addr, hw_encoded_addr;
+ struct tok_info *ti;
+ unsigned char init_status; /*BMS 12/2000*/
+
+ ti = (struct tok_info *) dev->priv;
+
+ ti->do_tok_int = NOT_FIRST;
+
+ /* we assign the shared-ram address for ISA devices */
+ writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
+#ifndef PCMCIA
+ ti->sram_virt = ioremap(((__u32)ti->sram_base << 12), ti->avail_shared_ram);
+#endif
+ ti->init_srb = map_address(ti,
+ ntohs(readw(ti->mmio + ACA_OFFSET + WRBR_EVEN)),
+ &ti->init_srb_page);
+ if (ti->page_mask && ti->avail_shared_ram == 127) {
+ void __iomem *last_512;
+ __u8 last_512_page=0;
+ int i;
+ last_512 = map_address(ti, 0xfe00, &last_512_page);
+ /* initialize high section of ram (if necessary) */
+ SET_PAGE(last_512_page);
+ for (i = 0; i < 512; i++)
+ writeb(0, last_512 + i);
+ }
+ SET_PAGE(ti->init_srb_page);
+
+#if TR_VERBOSE
+ {
+ int i;
+
+ DPRINTK("ti->init_srb_page=0x%x\n", ti->init_srb_page);
+ DPRINTK("init_srb(%p):", ti->init_srb );
+ for (i = 0; i < 20; i++)
+ printk("%02X ", (int) readb(ti->init_srb + i));
+ printk("\n");
+ }
+#endif
+
+ hw_encoded_addr = readw(ti->init_srb + ENCODED_ADDRESS_OFST);
+ encoded_addr = ntohs(hw_encoded_addr);
+ init_status= /*BMS 12/2000 check for shallow mode possibility (Turbo)*/
+ readb(ti->init_srb+offsetof(struct srb_init_response,init_status));
+ /*printk("Initial interrupt: init_status= 0x%02x\n",init_status);*/
+ ti->ring_speed = init_status & 0x01 ? 16 : 4;
+ DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n",
+ ti->ring_speed, (unsigned int)dev->mem_start);
+ ti->auto_speedsave=readb(ti->init_srb+INIT_STATUS_2_OFST)&4?TRUE:FALSE;
+
+ if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset);
+ else tok_open_adapter((unsigned long)dev);
+
+} /*initial_tok_int() */
+
+/*****************************************************************************/
+
+#define CMD_CORRELATE_OFST 1
+#define DHB_ADDRESS_OFST 6
+
+#define FRAME_LENGTH_OFST 6
+#define HEADER_LENGTH_OFST 8
+#define RSAP_VALUE_OFST 9
+
+static void tr_tx(struct net_device *dev)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+ struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data;
+ unsigned int hdr_len;
+ __u32 dhb=0,dhb_base;
+ void __iomem *dhbuf = NULL;
+ unsigned char xmit_command;
+ int i,dhb_len=0x4000,src_len,src_offset;
+ struct trllc *llc;
+ struct srb_xmit xsrb;
+ __u8 dhb_page = 0;
+ __u8 llc_ssap;
+
+ SET_PAGE(ti->asb_page);
+
+ if (readb(ti->asb+RETCODE_OFST) != 0xFF) DPRINTK("ASB not free !!!\n");
+
+ /* in providing the transmit interrupts, is telling us it is ready for
+ data and providing a shared memory address for us to stuff with data.
+ Here we compute the effective address where we will place data.
+ */
+ SET_PAGE(ti->arb_page);
+ dhb=dhb_base=ntohs(readw(ti->arb + DHB_ADDRESS_OFST));
+ if (ti->page_mask) {
+ dhb_page = (dhb_base >> 8) & ti->page_mask;
+ dhb=dhb_base & ~(ti->page_mask << 8);
+ }
+ dhbuf = ti->sram_virt + dhb;
+
+ /* Figure out the size of the 802.5 header */
+ if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */
+ hdr_len = sizeof(struct trh_hdr) - TR_MAXRIFLEN;
+ else
+ hdr_len = ((ntohs(trhdr->rcf) & TR_RCF_LEN_MASK) >> 8)
+ + sizeof(struct trh_hdr) - TR_MAXRIFLEN;
+
+ llc = (struct trllc *) (ti->current_skb->data + hdr_len);
+
+ llc_ssap = llc->ssap;
+ SET_PAGE(ti->srb_page);
+ memcpy_fromio(&xsrb, ti->srb, sizeof(xsrb));
+ SET_PAGE(ti->asb_page);
+ xmit_command = xsrb.command;
+
+ writeb(xmit_command, ti->asb + COMMAND_OFST);
+ writew(xsrb.station_id, ti->asb + STATION_ID_OFST);
+ writeb(llc_ssap, ti->asb + RSAP_VALUE_OFST);
+ writeb(xsrb.cmd_corr, ti->asb + CMD_CORRELATE_OFST);
+ writeb(0, ti->asb + RETCODE_OFST);
+ if ((xmit_command == XMIT_XID_CMD) || (xmit_command == XMIT_TEST_CMD)) {
+ writew(htons(0x11), ti->asb + FRAME_LENGTH_OFST);
+ writeb(0x0e, ti->asb + HEADER_LENGTH_OFST);
+ SET_PAGE(dhb_page);
+ writeb(AC, dhbuf);
+ writeb(LLC_FRAME, dhbuf + 1);
+ for (i = 0; i < TR_ALEN; i++)
+ writeb((int) 0x0FF, dhbuf + i + 2);
+ for (i = 0; i < TR_ALEN; i++)
+ writeb(0, dhbuf + i + TR_ALEN + 2);
+ writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ return;
+ }
+ /*
+ * the token ring packet is copied from sk_buff to the adapter
+ * buffer identified in the command data received with the interrupt.
+ */
+ writeb(hdr_len, ti->asb + HEADER_LENGTH_OFST);
+ writew(htons(ti->current_skb->len), ti->asb + FRAME_LENGTH_OFST);
+ src_len=ti->current_skb->len;
+ src_offset=0;
+ dhb=dhb_base;
+ while(1) {
+ if (ti->page_mask) {
+ dhb_page=(dhb >> 8) & ti->page_mask;
+ dhb=dhb & ~(ti->page_mask << 8);
+ dhb_len=0x4000-dhb; /* remaining size of this page */
+ }
+ dhbuf = ti->sram_virt + dhb;
+ SET_PAGE(dhb_page);
+ if (src_len > dhb_len) {
+ memcpy_toio(dhbuf,&ti->current_skb->data[src_offset],
+ dhb_len);
+ src_len -= dhb_len;
+ src_offset += dhb_len;
+ dhb_base+=dhb_len;
+ dhb=dhb_base;
+ continue;
+ }
+ memcpy_toio(dhbuf, &ti->current_skb->data[src_offset], src_len);
+ break;
+ }
+ writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ ti->tr_stats.tx_bytes += ti->current_skb->len;
+ dev_kfree_skb_irq(ti->current_skb);
+ ti->current_skb = NULL;
+ netif_wake_queue(dev);
+ if (ti->readlog_pending)
+ ibmtr_readlog(dev);
+} /*tr_tx */
+
+/*****************************************************************************/
+
+
+#define RECEIVE_BUFFER_OFST 6
+#define LAN_HDR_LENGTH_OFST 8
+#define DLC_HDR_LENGTH_OFST 9
+
+#define DSAP_OFST 0
+#define SSAP_OFST 1
+#define LLC_OFST 2
+#define PROTID_OFST 3
+#define ETHERTYPE_OFST 6
+
+static void tr_rx(struct net_device *dev)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+ __u32 rbuffer;
+ void __iomem *rbuf, *rbufdata, *llc;
+ __u8 rbuffer_page = 0;
+ unsigned char *data;
+ unsigned int rbuffer_len, lan_hdr_len, hdr_len, ip_len, length;
+ unsigned char dlc_hdr_len;
+ struct sk_buff *skb;
+ unsigned int skb_size = 0;
+ int IPv4_p = 0;
+ unsigned int chksum = 0;
+ struct iphdr *iph;
+ struct arb_rec_req rarb;
+
+ SET_PAGE(ti->arb_page);
+ memcpy_fromio(&rarb, ti->arb, sizeof(rarb));
+ rbuffer = ntohs(rarb.rec_buf_addr) ;
+ rbuf = map_address(ti, rbuffer, &rbuffer_page);
+
+ SET_PAGE(ti->asb_page);
+
+ if (readb(ti->asb + RETCODE_OFST) !=0xFF) DPRINTK("ASB not free !!!\n");
+
+ writeb(REC_DATA, ti->asb + COMMAND_OFST);
+ writew(rarb.station_id, ti->asb + STATION_ID_OFST);
+ writew(rarb.rec_buf_addr, ti->asb + RECEIVE_BUFFER_OFST);
+
+ lan_hdr_len = rarb.lan_hdr_len;
+ if (lan_hdr_len > sizeof(struct trh_hdr)) {
+ DPRINTK("Linux cannot handle greater than 18 bytes RIF\n");
+ return;
+ } /*BMS I added this above just to be very safe */
+ dlc_hdr_len = readb(ti->arb + DLC_HDR_LENGTH_OFST);
+ hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr);
+
+ SET_PAGE(rbuffer_page);
+ llc = rbuf + offsetof(struct rec_buf, data) + lan_hdr_len;
+
+#if TR_VERBOSE
+ DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n",
+ (__u32) offsetof(struct rec_buf, data), (unsigned int) lan_hdr_len);
+ DPRINTK("llc: %08X rec_buf_addr: %04X dev->mem_start: %lX\n",
+ llc, ntohs(rarb.rec_buf_addr), dev->mem_start);
+ DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, "
+ "ethertype: %04X\n",
+ (int) readb(llc + DSAP_OFST), (int) readb(llc + SSAP_OFST),
+ (int) readb(llc + LLC_OFST), (int) readb(llc + PROTID_OFST),
+ (int) readb(llc+PROTID_OFST+1),(int)readb(llc+PROTID_OFST + 2),
+ (int) ntohs(readw(llc + ETHERTYPE_OFST)));
+#endif
+ if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
+ SET_PAGE(ti->asb_page);
+ writeb(DATA_LOST, ti->asb + RETCODE_OFST);
+ ti->tr_stats.rx_dropped++;
+ writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ return;
+ }
+ length = ntohs(rarb.frame_len);
+ if (readb(llc + DSAP_OFST) == EXTENDED_SAP &&
+ readb(llc + SSAP_OFST) == EXTENDED_SAP &&
+ length >= hdr_len) IPv4_p = 1;
+#if TR_VERBOSE
+#define SADDR_OFST 8
+#define DADDR_OFST 2
+
+ if (!IPv4_p) {
+
+ void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data);
+
+ DPRINTK("Probably non-IP frame received.\n");
+ DPRINTK("ssap: %02X dsap: %02X "
+ "saddr: %02X:%02X:%02X:%02X:%02X:%02X "
+ "daddr: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ readb(llc + SSAP_OFST), readb(llc + DSAP_OFST),
+ readb(trhhdr+SADDR_OFST), readb(trhhdr+ SADDR_OFST+1),
+ readb(trhhdr+SADDR_OFST+2), readb(trhhdr+SADDR_OFST+3),
+ readb(trhhdr+SADDR_OFST+4), readb(trhhdr+SADDR_OFST+5),
+ readb(trhhdr+DADDR_OFST), readb(trhhdr+DADDR_OFST + 1),
+ readb(trhhdr+DADDR_OFST+2), readb(trhhdr+DADDR_OFST+3),
+ readb(trhhdr+DADDR_OFST+4), readb(trhhdr+DADDR_OFST+5));
+ }
+#endif
+
+ /*BMS handle the case she comes in with few hops but leaves with many */
+ skb_size=length-lan_hdr_len+sizeof(struct trh_hdr)+sizeof(struct trllc);
+
+ if (!(skb = dev_alloc_skb(skb_size))) {
+ DPRINTK("out of memory. frame dropped.\n");
+ ti->tr_stats.rx_dropped++;
+ SET_PAGE(ti->asb_page);
+ writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
+ writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+ return;
+ }
+ /*BMS again, if she comes in with few but leaves with many */
+ skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len);
+ skb_put(skb, length);
+ skb->dev = dev;
+ data = skb->data;
+ rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len)));
+ rbufdata = rbuf + offsetof(struct rec_buf, data);
+
+ if (IPv4_p) {
+ /* Copy the headers without checksumming */
+ memcpy_fromio(data, rbufdata, hdr_len);
+
+ /* Watch for padded packets and bogons */
+ iph= (struct iphdr *)(data+ lan_hdr_len + sizeof(struct trllc));
+ ip_len = ntohs(iph->tot_len) - sizeof(struct iphdr);
+ length -= hdr_len;
+ if ((ip_len <= length) && (ip_len > 7))
+ length = ip_len;
+ data += hdr_len;
+ rbuffer_len -= hdr_len;
+ rbufdata += hdr_len;
+ }
+ /* Copy the payload... */
+#define BUFFER_POINTER_OFST 2
+#define BUFFER_LENGTH_OFST 6
+ for (;;) {
+ if (ibmtr_debug_trace&TRC_INITV && length < rbuffer_len)
+ DPRINTK("CURIOUS, length=%d < rbuffer_len=%d\n",
+ length,rbuffer_len);
+ if (IPv4_p)
+ chksum=csum_partial_copy_nocheck((void*)rbufdata,
+ data,length<rbuffer_len?length:rbuffer_len,chksum);
+ else
+ memcpy_fromio(data, rbufdata, rbuffer_len);
+ rbuffer = ntohs(readw(rbuf+BUFFER_POINTER_OFST)) ;
+ if (!rbuffer)
+ break;
+ rbuffer -= 2;
+ length -= rbuffer_len;
+ data += rbuffer_len;
+ rbuf = map_address(ti, rbuffer, &rbuffer_page);
+ SET_PAGE(rbuffer_page);
+ rbuffer_len = ntohs(readw(rbuf + BUFFER_LENGTH_OFST));
+ rbufdata = rbuf + offsetof(struct rec_buf, data);
+ }
+
+ SET_PAGE(ti->asb_page);
+ writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
+
+ writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+
+ ti->tr_stats.rx_bytes += skb->len;
+ ti->tr_stats.rx_packets++;
+
+ skb->protocol = tr_type_trans(skb, dev);
+ if (IPv4_p) {
+ skb->csum = chksum;
+ skb->ip_summed = 1;
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+} /*tr_rx */
+
+/*****************************************************************************/
+
+void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev)
+{
+ tmr->expires = jiffies + TR_RETRY_INTERVAL;
+ tmr->data = (unsigned long) dev;
+ tmr->function = tok_rerun;
+ init_timer(tmr);
+ add_timer(tmr);
+}
+
+/*****************************************************************************/
+
+void tok_rerun(unsigned long dev_addr){
+
+ struct net_device *dev = (struct net_device *)dev_addr;
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+
+ if ( ti->open_action == RESTART){
+ ti->do_tok_int = FIRST_INT;
+ outb(0, dev->base_addr + ADAPTRESETREL);
+#ifdef ENABLE_PAGING
+ if (ti->page_mask)
+ writeb(SRPR_ENABLE_PAGING,
+ ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
+#endif
+
+ writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ } else
+ tok_open_adapter(dev_addr);
+}
+
+/*****************************************************************************/
+
+void ibmtr_readlog(struct net_device *dev)
+{
+ struct tok_info *ti;
+
+ ti = (struct tok_info *) dev->priv;
+
+ ti->readlog_pending = 0;
+ SET_PAGE(ti->srb_page);
+ writeb(DIR_READ_LOG, ti->srb);
+ writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
+ writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
+
+ netif_stop_queue(dev);
+
+}
+
+/*****************************************************************************/
+
+/* tok_get_stats(): Basically a scaffold routine which will return
+ the address of the tr_statistics structure associated with
+ this device -- the tr.... structure is an ethnet look-alike
+ so at least for this iteration may suffice. */
+
+static struct net_device_stats *tok_get_stats(struct net_device *dev)
+{
+
+ struct tok_info *toki;
+ toki = (struct tok_info *) dev->priv;
+ return (struct net_device_stats *) &toki->tr_stats;
+}
+
+/*****************************************************************************/
+
+int ibmtr_change_mtu(struct net_device *dev, int mtu)
+{
+ struct tok_info *ti = (struct tok_info *) dev->priv;
+
+ if (ti->ring_speed == 16 && mtu > ti->maxmtu16)
+ return -EINVAL;
+ if (ti->ring_speed == 4 && mtu > ti->maxmtu4)
+ return -EINVAL;
+ dev->mtu = mtu;
+ return 0;
+}
+
+/*****************************************************************************/
+#ifdef MODULE
+
+/* 3COM 3C619C supports 8 interrupts, 32 I/O ports */
+static struct net_device *dev_ibmtr[IBMTR_MAX_ADAPTERS];
+static int io[IBMTR_MAX_ADAPTERS] = { 0xa20, 0xa24 };
+static int irq[IBMTR_MAX_ADAPTERS];
+static int mem[IBMTR_MAX_ADAPTERS];
+
+MODULE_LICENSE("GPL");
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+
+static int __init ibmtr_init(void)
+{
+ int i;
+ int count=0;
+
+ find_turbo_adapters(io);
+
+ for (i = 0; io[i] && (i < IBMTR_MAX_ADAPTERS); i++) {
+ struct net_device *dev;
+ irq[i] = 0;
+ mem[i] = 0;
+ dev = alloc_trdev(sizeof(struct tok_info));
+ if (dev == NULL) {
+ if (i == 0)
+ return -ENOMEM;
+ break;
+ }
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+ dev->mem_start = mem[i];
+
+ if (ibmtr_probe_card(dev)) {
+ free_netdev(dev);
+ continue;
+ }
+ dev_ibmtr[i] = dev;
+ count++;
+ }
+ if (count) return 0;
+ printk("ibmtr: register_netdev() returned non-zero.\n");
+ return -EIO;
+}
+module_init(ibmtr_init);
+
+static void __exit ibmtr_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < IBMTR_MAX_ADAPTERS; i++){
+ if (!dev_ibmtr[i])
+ continue;
+ unregister_netdev(dev_ibmtr[i]);
+ ibmtr_cleanup_card(dev_ibmtr[i]);
+ free_netdev(dev_ibmtr[i]);
+ }
+}
+module_exit(ibmtr_cleanup);
+#endif
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
new file mode 100644
index 000000000000..99e0b03b69a8
--- /dev/null
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -0,0 +1,2011 @@
+/*
+ * lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter
+ *
+ * Written By: Mike Sullivan, IBM Corporation
+ *
+ * Copyright (C) 1999 IBM Corporation
+ *
+ * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
+ * chipset.
+ *
+ * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
+ * chipsets) written by:
+ * 1999 Peter De Schrijver All Rights Reserved
+ * 1999 Mike Phillips (phillim@amtrak.com)
+ *
+ * Base Driver Skeleton:
+ * Written 1993-94 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * 12/10/99 - Alpha Release 0.1.0
+ * First release to the public
+ * 03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing
+ * malloc free checks, reviewed code. <alan@redhat.com>
+ * 03/13/00 - Added spinlocks for smp
+ * 03/08/01 - Added support for module_init() and module_exit()
+ * 08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue
+ * calls and other incorrectness - Kent Yoder <yoder1@us.ibm.com>
+ * 11/05/01 - Restructured the interrupt function, added delays, reduced the
+ * the number of TX descriptors to 1, which together can prevent
+ * the card from locking up the box - <yoder1@us.ibm.com>
+ * 09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com>
+ * 11/13/02 - Removed free_irq calls which could cause a hang, added
+ * netif_carrier_{on|off} - <yoder1@us.ibm.com>
+ *
+ * To Do:
+ *
+ *
+ * If Problems do Occur
+ * Most problems can be rectified by either closing and opening the interface
+ * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
+ * if compiled into the kernel).
+ */
+
+/* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */
+
+#define STREAMER_DEBUG 0
+#define STREAMER_DEBUG_PACKETS 0
+
+/* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel.
+ * Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the
+ * kernel.
+ * Intended to be used to create a ring-error reporting network module
+ * i.e. it will give you the source address of beaconers on the ring
+ */
+
+#define STREAMER_NETWORK_MONITOR 0
+
+/* #define CONFIG_PROC_FS */
+
+/*
+ * Allow or disallow ioctl's for debugging
+ */
+
+#define STREAMER_IOCTL 0
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/version.h>
+#include <linux/bitops.h>
+
+#include <net/checksum.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "lanstreamer.h"
+
+#if (BITS_PER_LONG == 64)
+#error broken on 64-bit: stores pointer to rx_ring->buffer in 32-bit int
+#endif
+
+
+/* I've got to put some intelligence into the version number so that Peter and I know
+ * which version of the code somebody has got.
+ * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
+ * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
+ *
+ * Official releases will only have an a.b.c version number format.
+ */
+
+static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
+ " v0.5.3 11/13/02 - Kent Yoder";
+
+static struct pci_device_id streamer_pci_tbl[] = {
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
+ {} /* terminating entry */
+};
+MODULE_DEVICE_TABLE(pci,streamer_pci_tbl);
+
+
+static char *open_maj_error[] = {
+ "No error", "Lobe Media Test", "Physical Insertion",
+ "Address Verification", "Neighbor Notification (Ring Poll)",
+ "Request Parameters", "FDX Registration Request",
+ "FDX Lobe Media Test", "FDX Duplicate Address Check",
+ "Unknown stage"
+};
+
+static char *open_min_error[] = {
+ "No error", "Function Failure", "Signal Lost", "Wire Fault",
+ "Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing",
+ "Duplicate Node Address", "Request Parameters", "Remove Received",
+ "Reserved", "Reserved", "No Monitor Detected for RPL",
+ "Monitor Contention failer for RPL", "FDX Protocol Error"
+};
+
+/* Module paramters */
+
+/* Ring Speed 0,4,16
+ * 0 = Autosense
+ * 4,16 = Selected speed only, no autosense
+ * This allows the card to be the first on the ring
+ * and become the active monitor.
+ *
+ * WARNING: Some hubs will allow you to insert
+ * at the wrong speed
+ */
+
+static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, };
+
+module_param_array(ringspeed, int, NULL, 0);
+
+/* Packet buffer size */
+
+static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, };
+
+module_param_array(pkt_buf_sz, int, NULL, 0);
+
+/* Message Level */
+
+static int message_level[STREAMER_MAX_ADAPTERS] = { 1, };
+
+module_param_array(message_level, int, NULL, 0);
+
+#if STREAMER_IOCTL
+static int streamer_ioctl(struct net_device *, struct ifreq *, int);
+#endif
+
+static int streamer_reset(struct net_device *dev);
+static int streamer_open(struct net_device *dev);
+static int streamer_xmit(struct sk_buff *skb, struct net_device *dev);
+static int streamer_close(struct net_device *dev);
+static void streamer_set_rx_mode(struct net_device *dev);
+static irqreturn_t streamer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs);
+static struct net_device_stats *streamer_get_stats(struct net_device *dev);
+static int streamer_set_mac_address(struct net_device *dev, void *addr);
+static void streamer_arb_cmd(struct net_device *dev);
+static int streamer_change_mtu(struct net_device *dev, int mtu);
+static void streamer_srb_bh(struct net_device *dev);
+static void streamer_asb_bh(struct net_device *dev);
+#if STREAMER_NETWORK_MONITOR
+#ifdef CONFIG_PROC_FS
+static int streamer_proc_info(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data);
+static int sprintf_info(char *buffer, struct net_device *dev);
+struct streamer_private *dev_streamer=NULL;
+#endif
+#endif
+
+static int __devinit streamer_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct streamer_private *streamer_priv;
+ unsigned long pio_start, pio_end, pio_flags, pio_len;
+ unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+ int rc = 0;
+ static int card_no=-1;
+ u16 pcr;
+
+#if STREAMER_DEBUG
+ printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev);
+#endif
+
+ card_no++;
+ dev = alloc_trdev(sizeof(*streamer_priv));
+ if (dev==NULL) {
+ printk(KERN_ERR "lanstreamer: out of memory.\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ streamer_priv = dev->priv;
+
+#if STREAMER_NETWORK_MONITOR
+#ifdef CONFIG_PROC_FS
+ if (!dev_streamer)
+ create_proc_read_entry("net/streamer_tr", 0, 0,
+ streamer_proc_info, NULL);
+ streamer_priv->next = dev_streamer;
+ dev_streamer = streamer_priv;
+#endif
+#endif
+
+ rc = pci_set_dma_mask(pdev, 0xFFFFFFFFULL);
+ if (rc) {
+ printk(KERN_ERR "%s: No suitable PCI mapping available.\n",
+ dev->name);
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ printk(KERN_ERR "lanstreamer: unable to enable pci device\n");
+ rc=-EIO;
+ goto err_out;
+ }
+
+ pci_set_master(pdev);
+
+ rc = pci_set_mwi(pdev);
+ if (rc) {
+ printk(KERN_ERR "lanstreamer: unable to enable MWI on pci device\n");
+ goto err_out_disable;
+ }
+
+ pio_start = pci_resource_start(pdev, 0);
+ pio_end = pci_resource_end(pdev, 0);
+ pio_flags = pci_resource_flags(pdev, 0);
+ pio_len = pci_resource_len(pdev, 0);
+
+ mmio_start = pci_resource_start(pdev, 1);
+ mmio_end = pci_resource_end(pdev, 1);
+ mmio_flags = pci_resource_flags(pdev, 1);
+ mmio_len = pci_resource_len(pdev, 1);
+
+#if STREAMER_DEBUG
+ printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n",
+ pio_start, pio_end, pio_len, pio_flags);
+ printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n",
+ mmio_start, mmio_end, mmio_flags, mmio_len);
+#endif
+
+ if (!request_region(pio_start, pio_len, "lanstreamer")) {
+ printk(KERN_ERR "lanstreamer: unable to get pci io addr %lx\n",
+ pio_start);
+ rc= -EBUSY;
+ goto err_out_mwi;
+ }
+
+ if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) {
+ printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %lx\n",
+ mmio_start);
+ rc= -EBUSY;
+ goto err_out_free_pio;
+ }
+
+ streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len);
+ if (streamer_priv->streamer_mmio == NULL) {
+ printk(KERN_ERR "lanstreamer: unable to remap MMIO %lx\n",
+ mmio_start);
+ rc= -EIO;
+ goto err_out_free_mmio;
+ }
+
+ init_waitqueue_head(&streamer_priv->srb_wait);
+ init_waitqueue_head(&streamer_priv->trb_wait);
+
+ dev->open = &streamer_open;
+ dev->hard_start_xmit = &streamer_xmit;
+ dev->change_mtu = &streamer_change_mtu;
+ dev->stop = &streamer_close;
+#if STREAMER_IOCTL
+ dev->do_ioctl = &streamer_ioctl;
+#else
+ dev->do_ioctl = NULL;
+#endif
+ dev->set_multicast_list = &streamer_set_rx_mode;
+ dev->get_stats = &streamer_get_stats;
+ dev->set_mac_address = &streamer_set_mac_address;
+ dev->irq = pdev->irq;
+ dev->base_addr=pio_start;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ streamer_priv->streamer_card_name = (char *)pdev->resource[0].name;
+ streamer_priv->pci_dev = pdev;
+
+ if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000))
+ streamer_priv->pkt_buf_sz = PKT_BUF_SZ;
+ else
+ streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no];
+
+ streamer_priv->streamer_ring_speed = ringspeed[card_no];
+ streamer_priv->streamer_message_level = message_level[card_no];
+
+ pci_set_drvdata(pdev, dev);
+
+ spin_lock_init(&streamer_priv->streamer_lock);
+
+ pci_read_config_word (pdev, PCI_COMMAND, &pcr);
+ pcr |= PCI_COMMAND_SERR;
+ pci_write_config_word (pdev, PCI_COMMAND, pcr);
+
+ printk("%s \n", version);
+ printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
+ streamer_priv->streamer_card_name,
+ (unsigned int) dev->base_addr,
+ streamer_priv->streamer_mmio,
+ dev->irq);
+
+ if (streamer_reset(dev))
+ goto err_out_unmap;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_unmap;
+ return 0;
+
+err_out_unmap:
+ iounmap(streamer_priv->streamer_mmio);
+err_out_free_mmio:
+ release_mem_region(mmio_start, mmio_len);
+err_out_free_pio:
+ release_region(pio_start, pio_len);
+err_out_mwi:
+ pci_clear_mwi(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out:
+ free_netdev(dev);
+#if STREAMER_DEBUG
+ printk("lanstreamer: Exit error %x\n",rc);
+#endif
+ return rc;
+}
+
+static void __devexit streamer_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev=pci_get_drvdata(pdev);
+ struct streamer_private *streamer_priv;
+
+#if STREAMER_DEBUG
+ printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev);
+#endif
+
+ if (dev == NULL) {
+ printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n");
+ return;
+ }
+
+ streamer_priv=dev->priv;
+ if (streamer_priv == NULL) {
+ printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n");
+ return;
+ }
+
+#if STREAMER_NETWORK_MONITOR
+#ifdef CONFIG_PROC_FS
+ {
+ struct streamer_private **p, **next;
+
+ for (p = &dev_streamer; *p; p = next) {
+ next = &(*p)->next;
+ if (*p == streamer_priv) {
+ *p = *next;
+ break;
+ }
+ }
+ if (!dev_streamer)
+ remove_proc_entry("net/streamer_tr", NULL);
+ }
+#endif
+#endif
+
+ unregister_netdev(dev);
+ iounmap(streamer_priv->streamer_mmio);
+ release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1));
+ release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0));
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+
+static int streamer_reset(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv;
+ __u8 __iomem *streamer_mmio;
+ unsigned long t;
+ unsigned int uaa_addr;
+ struct sk_buff *skb = NULL;
+ __u16 misr;
+
+ streamer_priv = (struct streamer_private *) dev->priv;
+ streamer_mmio = streamer_priv->streamer_mmio;
+
+ writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL);
+ t = jiffies;
+ /* Hold soft reset bit for a while */
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(HZ);
+
+ writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET,
+ streamer_mmio + BCTL);
+
+#if STREAMER_DEBUG
+ printk("BCTL: %x\n", readw(streamer_mmio + BCTL));
+ printk("GPR: %x\n", readw(streamer_mmio + GPR));
+ printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK));
+#endif
+ writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL );
+
+ if (streamer_priv->streamer_ring_speed == 0) { /* Autosense */
+ writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE,
+ streamer_mmio + GPR);
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Ringspeed autosense mode on\n",
+ dev->name);
+ } else if (streamer_priv->streamer_ring_speed == 16) {
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n",
+ dev->name);
+ writew(GPR_16MBPS, streamer_mmio + GPR);
+ } else if (streamer_priv->streamer_ring_speed == 4) {
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n",
+ dev->name);
+ writew(0, streamer_mmio + GPR);
+ }
+
+ skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
+ if (!skb) {
+ printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n",
+ dev->name);
+ } else {
+ struct streamer_rx_desc *rx_ring;
+ u8 *data;
+
+ rx_ring=(struct streamer_rx_desc *)skb->data;
+ data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc);
+ rx_ring->forward=0;
+ rx_ring->status=0;
+ rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data,
+ 512, PCI_DMA_FROMDEVICE));
+ rx_ring->framelen_buflen=512;
+ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)),
+ streamer_mmio+RXBDA);
+ }
+
+#if STREAMER_DEBUG
+ printk("GPR = %x\n", readw(streamer_mmio + GPR));
+#endif
+ /* start solo init */
+ writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
+
+ while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ/10);
+ if (jiffies - t > 40 * HZ) {
+ printk(KERN_ERR
+ "IBM PCI tokenring card not responding\n");
+ release_region(dev->base_addr, STREAMER_IO_SPACE);
+ if (skb)
+ dev_kfree_skb(skb);
+ return -1;
+ }
+ }
+ writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
+ misr = readw(streamer_mmio + MISR_RUM);
+ writew(~misr, streamer_mmio + MISR_RUM);
+
+ if (skb)
+ dev_kfree_skb(skb); /* release skb used for diagnostics */
+
+#if STREAMER_DEBUG
+ printk("LAPWWO: %x, LAPA: %x LAPE: %x\n",
+ readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA),
+ readw(streamer_mmio + LAPE));
+#endif
+
+#if STREAMER_DEBUG
+ {
+ int i;
+ writew(readw(streamer_mmio + LAPWWO),
+ streamer_mmio + LAPA);
+ printk("initialization response srb dump: ");
+ for (i = 0; i < 10; i++)
+ printk("%x:",
+ ntohs(readw(streamer_mmio + LAPDINC)));
+ printk("\n");
+ }
+#endif
+
+ writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA);
+ if (readw(streamer_mmio + LAPD)) {
+ printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",
+ ntohs(readw(streamer_mmio + LAPD)));
+ release_region(dev->base_addr, STREAMER_IO_SPACE);
+ return -1;
+ }
+
+ writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
+ uaa_addr = ntohs(readw(streamer_mmio + LAPDINC));
+ readw(streamer_mmio + LAPDINC); /* skip over Level.Addr field */
+ streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC));
+ streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC));
+
+#if STREAMER_DEBUG
+ printk("UAA resides at %x\n", uaa_addr);
+#endif
+
+ /* setup uaa area for access with LAPD */
+ {
+ int i;
+ __u16 addr;
+ writew(uaa_addr, streamer_mmio + LAPA);
+ for (i = 0; i < 6; i += 2) {
+ addr=ntohs(readw(streamer_mmio+LAPDINC));
+ dev->dev_addr[i]= (addr >> 8) & 0xff;
+ dev->dev_addr[i+1]= addr & 0xff;
+ }
+#if STREAMER_DEBUG
+ printk("Adapter address: ");
+ for (i = 0; i < 6; i++) {
+ printk("%02x:", dev->dev_addr[i]);
+ }
+ printk("\n");
+#endif
+ }
+ return 0;
+}
+
+static int streamer_open(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ unsigned long flags;
+ char open_error[255];
+ int i, open_finished = 1;
+ __u16 srb_word;
+ __u16 srb_open;
+ int rc;
+
+ if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) {
+ rc=streamer_reset(dev);
+ }
+
+ if (request_irq(dev->irq, &streamer_interrupt, SA_SHIRQ, "lanstreamer", dev)) {
+ return -EAGAIN;
+ }
+#if STREAMER_DEBUG
+ printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
+ printk("pending ints: %x\n", readw(streamer_mmio + SISR));
+#endif
+
+ writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
+ writew(LISR_LIE, streamer_mmio + LISR); /* more ints later */
+
+ /* adapter is closed, so SRB is pointed to by LAPWWO */
+ writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
+
+#if STREAMER_DEBUG
+ printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO),
+ readw(streamer_mmio + LAPA));
+ printk("LAPE: %x\n", readw(streamer_mmio + LAPE));
+ printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK));
+#endif
+ do {
+ int i;
+
+ for (i = 0; i < SRB_COMMAND_SIZE; i += 2) {
+ writew(0, streamer_mmio + LAPDINC);
+ }
+
+ writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA);
+ writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; /* open */
+ writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC);
+ writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC);
+
+ writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
+#if STREAMER_NETWORK_MONITOR
+ /* If Network Monitor, instruct card to copy MAC frames through the ARB */
+ writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC); /* offset 8 word contains open options */
+#else
+ writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC); /* Offset 8 word contains Open.Options */
+#endif
+
+ if (streamer_priv->streamer_laa[0]) {
+ writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA);
+ writew(htons((streamer_priv->streamer_laa[0] << 8) |
+ streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC);
+ writew(htons((streamer_priv->streamer_laa[2] << 8) |
+ streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC);
+ writew(htons((streamer_priv->streamer_laa[4] << 8) |
+ streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC);
+ memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len);
+ }
+
+ /* save off srb open offset */
+ srb_open = readw(streamer_mmio + LAPWWO);
+#if STREAMER_DEBUG
+ writew(readw(streamer_mmio + LAPWWO),
+ streamer_mmio + LAPA);
+ printk("srb open request: \n");
+ for (i = 0; i < 16; i++) {
+ printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
+ }
+ printk("\n");
+#endif
+ spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
+ streamer_priv->srb_queued = 1;
+
+ /* signal solo that SRB command has been issued */
+ writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
+ spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
+
+ while (streamer_priv->srb_queued) {
+ interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ);
+ if (signal_pending(current)) {
+ printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
+ printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n",
+ readw(streamer_mmio + SISR),
+ readw(streamer_mmio + MISR_RUM),
+ readw(streamer_mmio + LISR));
+ streamer_priv->srb_queued = 0;
+ break;
+ }
+ }
+
+#if STREAMER_DEBUG
+ printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK));
+ printk("srb open response:\n");
+ writew(srb_open, streamer_mmio + LAPA);
+ for (i = 0; i < 10; i++) {
+ printk("%x:",
+ ntohs(readw(streamer_mmio + LAPDINC)));
+ }
+#endif
+
+ /* If we get the same return response as we set, the interrupt wasn't raised and the open
+ * timed out.
+ */
+ writew(srb_open + 2, streamer_mmio + LAPA);
+ srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8;
+ if (srb_word == STREAMER_CLEAR_RET_CODE) {
+ printk(KERN_WARNING "%s: Adapter Open time out or error.\n",
+ dev->name);
+ return -EIO;
+ }
+
+ if (srb_word != 0) {
+ if (srb_word == 0x07) {
+ if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
+ printk(KERN_WARNING "%s: Retrying at different ring speed \n",
+ dev->name);
+ open_finished = 0;
+ } else {
+ __u16 error_code;
+
+ writew(srb_open + 6, streamer_mmio + LAPA);
+ error_code = ntohs(readw(streamer_mmio + LAPD));
+ strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]);
+ strcat(open_error, " - ");
+ strcat(open_error, open_min_error[(error_code & 0x0f)]);
+
+ if (!streamer_priv->streamer_ring_speed
+ && ((error_code & 0x0f) == 0x0d))
+ {
+ printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name);
+ free_irq(dev->irq, dev);
+ return -EIO;
+ }
+
+ printk(KERN_WARNING "%s: %s\n",
+ dev->name, open_error);
+ free_irq(dev->irq, dev);
+ return -EIO;
+
+ } /* if autosense && open_finished */
+ } else {
+ printk(KERN_WARNING "%s: Bad OPEN response: %x\n",
+ dev->name, srb_word);
+ free_irq(dev->irq, dev);
+ return -EIO;
+ }
+ } else
+ open_finished = 1;
+ } while (!(open_finished)); /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
+
+ writew(srb_open + 18, streamer_mmio + LAPA);
+ srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
+ if (srb_word & (1 << 3))
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name);
+
+ if (srb_word & 1)
+ streamer_priv->streamer_ring_speed = 16;
+ else
+ streamer_priv->streamer_ring_speed = 4;
+
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Opened in %d Mbps mode\n",
+ dev->name,
+ streamer_priv->streamer_ring_speed);
+
+ writew(srb_open + 8, streamer_mmio + LAPA);
+ streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC));
+ streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC));
+ streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC));
+ readw(streamer_mmio + LAPDINC); /* offset 14 word is rsvd */
+ streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC));
+
+ streamer_priv->streamer_receive_options = 0x00;
+ streamer_priv->streamer_copy_all_options = 0;
+
+ /* setup rx ring */
+ /* enable rx channel */
+ writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM);
+
+ /* setup rx descriptors */
+ streamer_priv->streamer_rx_ring=
+ kmalloc( sizeof(struct streamer_rx_desc)*
+ STREAMER_RX_RING_SIZE,GFP_KERNEL);
+ if (!streamer_priv->streamer_rx_ring) {
+ printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name);
+ return -EIO;
+ }
+
+ for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
+ if (skb == NULL)
+ break;
+
+ skb->dev = dev;
+
+ streamer_priv->streamer_rx_ring[i].forward =
+ cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1],
+ sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
+ streamer_priv->streamer_rx_ring[i].status = 0;
+ streamer_priv->streamer_rx_ring[i].buffer =
+ cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data,
+ streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
+ streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz;
+ streamer_priv->rx_ring_skb[i] = skb;
+ }
+ streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward =
+ cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
+ sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
+
+ if (i == 0) {
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name);
+ free_irq(dev->irq, dev);
+ return -EIO;
+ }
+
+ streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */
+
+ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
+ sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
+ streamer_mmio + RXBDA);
+ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1],
+ sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
+ streamer_mmio + RXLBDA);
+
+ /* set bus master interrupt event mask */
+ writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
+
+
+ /* setup tx ring */
+ streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)*
+ STREAMER_TX_RING_SIZE,GFP_KERNEL);
+ if (!streamer_priv->streamer_tx_ring) {
+ printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name);
+ return -EIO;
+ }
+
+ writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */
+ for (i = 0; i < STREAMER_TX_RING_SIZE; i++) {
+ streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
+ &streamer_priv->streamer_tx_ring[i + 1],
+ sizeof(struct streamer_tx_desc),
+ PCI_DMA_TODEVICE));
+ streamer_priv->streamer_tx_ring[i].status = 0;
+ streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0;
+ streamer_priv->streamer_tx_ring[i].buffer = 0;
+ streamer_priv->streamer_tx_ring[i].buflen = 0;
+ streamer_priv->streamer_tx_ring[i].rsvd1 = 0;
+ streamer_priv->streamer_tx_ring[i].rsvd2 = 0;
+ streamer_priv->streamer_tx_ring[i].rsvd3 = 0;
+ }
+ streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward =
+ cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0],
+ sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE));
+
+ streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE;
+ streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */
+ streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1;
+
+ /* set Busmaster interrupt event mask (handle receives on interrupt only */
+ writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
+ /* set system event interrupt mask */
+ writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM);
+
+#if STREAMER_DEBUG
+ printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
+ printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK));
+#endif
+
+#if STREAMER_NETWORK_MONITOR
+
+ writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
+ printk("%s: Node Address: %04x:%04x:%04x\n", dev->name,
+ ntohs(readw(streamer_mmio + LAPDINC)),
+ ntohs(readw(streamer_mmio + LAPDINC)),
+ ntohs(readw(streamer_mmio + LAPDINC)));
+ readw(streamer_mmio + LAPDINC);
+ readw(streamer_mmio + LAPDINC);
+ printk("%s: Functional Address: %04x:%04x\n", dev->name,
+ ntohs(readw(streamer_mmio + LAPDINC)),
+ ntohs(readw(streamer_mmio + LAPDINC)));
+
+ writew(streamer_priv->streamer_parms_addr + 4,
+ streamer_mmio + LAPA);
+ printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name,
+ ntohs(readw(streamer_mmio + LAPDINC)),
+ ntohs(readw(streamer_mmio + LAPDINC)),
+ ntohs(readw(streamer_mmio + LAPDINC)));
+#endif
+
+ netif_start_queue(dev);
+ netif_carrier_on(dev);
+ return 0;
+}
+
+/*
+ * When we enter the rx routine we do not know how many frames have been
+ * queued on the rx channel. Therefore we start at the next rx status
+ * position and travel around the receive ring until we have completed
+ * all the frames.
+ *
+ * This means that we may process the frame before we receive the end
+ * of frame interrupt. This is why we always test the status instead
+ * of blindly processing the next frame.
+ *
+ */
+static void streamer_rx(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ struct streamer_rx_desc *rx_desc;
+ int rx_ring_last_received, length, frame_length, buffer_cnt = 0;
+ struct sk_buff *skb, *skb2;
+
+ /* setup the next rx descriptor to be received */
+ rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
+ rx_ring_last_received = streamer_priv->rx_ring_last_received;
+
+ while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */
+ if (rx_ring_last_received != streamer_priv->rx_ring_last_received)
+ {
+ printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n",
+ rx_ring_last_received, streamer_priv->rx_ring_last_received);
+ }
+ streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
+ rx_ring_last_received = streamer_priv->rx_ring_last_received;
+
+ length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
+ frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff;
+
+ if (rx_desc->status & 0x7E830000) { /* errors */
+ if (streamer_priv->streamer_message_level) {
+ printk(KERN_WARNING "%s: Rx Error %x \n",
+ dev->name, rx_desc->status);
+ }
+ } else { /* received without errors */
+ if (rx_desc->status & 0x80000000) { /* frame complete */
+ buffer_cnt = 1;
+ skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
+ } else {
+ skb = dev_alloc_skb(frame_length);
+ }
+
+ if (skb == NULL)
+ {
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
+ streamer_priv->streamer_stats.rx_dropped++;
+ } else { /* we allocated an skb OK */
+ skb->dev = dev;
+
+ if (buffer_cnt == 1) {
+ /* release the DMA mapping */
+ pci_unmap_single(streamer_priv->pci_dev,
+ le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer),
+ streamer_priv->pkt_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];
+#if STREAMER_DEBUG_PACKETS
+ {
+ int i;
+ printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head);
+ for (i = 0; i < frame_length; i++)
+ {
+ printk("%x:", skb2->data[i]);
+ if (((i + 1) % 16) == 0)
+ printk("\n");
+ }
+ printk("\n");
+ }
+#endif
+ skb_put(skb2, length);
+ skb2->protocol = tr_type_trans(skb2, dev);
+ /* recycle this descriptor */
+ streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
+ streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
+ streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer =
+ cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ streamer_priv->rx_ring_skb[rx_ring_last_received] = skb;
+ /* place recycled descriptor back on the adapter */
+ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
+ &streamer_priv->streamer_rx_ring[rx_ring_last_received],
+ sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)),
+ streamer_mmio + RXLBDA);
+ /* pass the received skb up to the protocol */
+ netif_rx(skb2);
+ } else {
+ do { /* Walk the buffers */
+ pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE),
+ memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */
+ streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
+ streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
+
+ /* give descriptor back to the adapter */
+ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
+ &streamer_priv->streamer_rx_ring[rx_ring_last_received],
+ length, PCI_DMA_FROMDEVICE)),
+ streamer_mmio + RXLBDA);
+
+ if (rx_desc->status & 0x80000000)
+ break; /* this descriptor completes the frame */
+
+ /* else get the next pending descriptor */
+ if (rx_ring_last_received!= streamer_priv->rx_ring_last_received)
+ {
+ printk("RX Error rx_ring_last_received not the same %x %x\n",
+ rx_ring_last_received,
+ streamer_priv->rx_ring_last_received);
+ }
+ rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)];
+
+ length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
+ streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1);
+ rx_ring_last_received = streamer_priv->rx_ring_last_received;
+ } while (1);
+
+ skb->protocol = tr_type_trans(skb, dev);
+ /* send up to the protocol */
+ netif_rx(skb);
+ }
+ dev->last_rx = jiffies;
+ streamer_priv->streamer_stats.rx_packets++;
+ streamer_priv->streamer_stats.rx_bytes += length;
+ } /* if skb == null */
+ } /* end received without errors */
+
+ /* try the next one */
+ rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
+ } /* end for all completed rx descriptors */
+}
+
+static irqreturn_t streamer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ __u16 sisr;
+ __u16 misr;
+ u8 max_intr = MAX_INTR;
+
+ spin_lock(&streamer_priv->streamer_lock);
+ sisr = readw(streamer_mmio + SISR);
+
+ while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE |
+ SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR))
+ && (max_intr > 0)) {
+
+ if(sisr & SISR_PAR_ERR) {
+ writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM);
+ (void)readw(streamer_mmio + SISR_RUM);
+ }
+
+ else if(sisr & SISR_SERR_ERR) {
+ writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM);
+ (void)readw(streamer_mmio + SISR_RUM);
+ }
+
+ else if(sisr & SISR_MI) {
+ misr = readw(streamer_mmio + MISR_RUM);
+
+ if (misr & MISR_TX2_EOF) {
+ while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
+ streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
+ streamer_priv->free_tx_ring_entries++;
+ streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
+ streamer_priv->streamer_stats.tx_packets++;
+ dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0;
+ }
+ netif_wake_queue(dev);
+ }
+
+ if (misr & MISR_RX_EOF) {
+ streamer_rx(dev);
+ }
+ /* MISR_RX_EOF */
+
+ if (misr & MISR_RX_NOBUF) {
+ /* According to the documentation, we don't have to do anything,
+ * but trapping it keeps it out of /var/log/messages.
+ */
+ } /* SISR_RX_NOBUF */
+
+ writew(~misr, streamer_mmio + MISR_RUM);
+ (void)readw(streamer_mmio + MISR_RUM);
+ }
+
+ else if (sisr & SISR_SRB_REPLY) {
+ if (streamer_priv->srb_queued == 1) {
+ wake_up_interruptible(&streamer_priv->srb_wait);
+ } else if (streamer_priv->srb_queued == 2) {
+ streamer_srb_bh(dev);
+ }
+ streamer_priv->srb_queued = 0;
+
+ writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
+ (void)readw(streamer_mmio + SISR_RUM);
+ }
+
+ else if (sisr & SISR_ADAPTER_CHECK) {
+ printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
+ writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
+ printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n",
+ dev->name, readw(streamer_mmio + LAPDINC),
+ ntohs(readw(streamer_mmio + LAPDINC)),
+ ntohs(readw(streamer_mmio + LAPDINC)),
+ ntohs(readw(streamer_mmio + LAPDINC)));
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
+ }
+
+ /* SISR_ADAPTER_CHECK */
+ else if (sisr & SISR_ASB_FREE) {
+ /* Wake up anything that is waiting for the asb response */
+ if (streamer_priv->asb_queued) {
+ streamer_asb_bh(dev);
+ }
+ writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM);
+ (void)readw(streamer_mmio + SISR_RUM);
+ }
+ /* SISR_ASB_FREE */
+ else if (sisr & SISR_ARB_CMD) {
+ streamer_arb_cmd(dev);
+ writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM);
+ (void)readw(streamer_mmio + SISR_RUM);
+ }
+ /* SISR_ARB_CMD */
+ else if (sisr & SISR_TRB_REPLY) {
+ /* Wake up anything that is waiting for the trb response */
+ if (streamer_priv->trb_queued) {
+ wake_up_interruptible(&streamer_priv->
+ trb_wait);
+ }
+ streamer_priv->trb_queued = 0;
+ writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM);
+ (void)readw(streamer_mmio + SISR_RUM);
+ }
+ /* SISR_TRB_REPLY */
+
+ sisr = readw(streamer_mmio + SISR);
+ max_intr--;
+ } /* while() */
+
+ spin_unlock(&streamer_priv->streamer_lock) ;
+ return IRQ_HANDLED;
+}
+
+static int streamer_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ unsigned long flags ;
+
+ spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
+
+ if (streamer_priv->free_tx_ring_entries) {
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer =
+ cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE));
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0;
+ streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len;
+
+ streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb;
+ streamer_priv->free_tx_ring_entries--;
+#if STREAMER_DEBUG_PACKETS
+ {
+ int i;
+ printk("streamer_xmit packet print:\n");
+ for (i = 0; i < skb->len; i++) {
+ printk("%x:", skb->data[i]);
+ if (((i + 1) % 16) == 0)
+ printk("\n");
+ }
+ printk("\n");
+ }
+#endif
+
+ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
+ &streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free],
+ sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)),
+ streamer_mmio + TX2LFDA);
+ (void)readl(streamer_mmio + TX2LFDA);
+
+ streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1);
+ spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
+ return 0;
+ } else {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
+ return 1;
+ }
+}
+
+
+static int streamer_close(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ unsigned long flags;
+ int i;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ writew(streamer_priv->srb, streamer_mmio + LAPA);
+ writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC);
+ writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
+
+ spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
+
+ streamer_priv->srb_queued = 1;
+ writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
+
+ spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
+
+ while (streamer_priv->srb_queued)
+ {
+ interruptible_sleep_on_timeout(&streamer_priv->srb_wait,
+ jiffies + 60 * HZ);
+ if (signal_pending(current))
+ {
+ printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
+ printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n",
+ readw(streamer_mmio + SISR),
+ readw(streamer_mmio + MISR_RUM),
+ readw(streamer_mmio + LISR));
+ streamer_priv->srb_queued = 0;
+ break;
+ }
+ }
+
+ streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
+
+ for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
+ if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) {
+ dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]);
+ }
+ streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
+ }
+
+ /* reset tx/rx fifo's and busmaster logic */
+
+ /* TBD. Add graceful way to reset the LLC channel without doing a soft reset.
+ writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
+ udelay(1);
+ writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL);
+ */
+
+#if STREAMER_DEBUG
+ writew(streamer_priv->srb, streamer_mmio + LAPA);
+ printk("srb): ");
+ for (i = 0; i < 2; i++) {
+ printk("%x ", ntohs(readw(streamer_mmio + LAPDINC)));
+ }
+ printk("\n");
+#endif
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static void streamer_set_rx_mode(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ __u8 options = 0;
+ struct dev_mc_list *dmi;
+ unsigned char dev_mc_address[5];
+ int i;
+
+ writel(streamer_priv->srb, streamer_mmio + LAPA);
+ options = streamer_priv->streamer_copy_all_options;
+
+ if (dev->flags & IFF_PROMISC)
+ options |= (3 << 5); /* All LLC and MAC frames, all through the main rx channel */
+ else
+ options &= ~(3 << 5);
+
+ /* Only issue the srb if there is a change in options */
+
+ if ((options ^ streamer_priv->streamer_copy_all_options))
+ {
+ /* Now to issue the srb command to alter the copy.all.options */
+ writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC);
+ writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
+ writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC);
+ writew(htons(0x4a41),streamer_mmio+LAPDINC);
+ writew(htons(0x4d45),streamer_mmio+LAPDINC);
+ writew(htons(0x5320),streamer_mmio+LAPDINC);
+ writew(0x2020, streamer_mmio + LAPDINC);
+
+ streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
+
+ writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
+
+ streamer_priv->streamer_copy_all_options = options;
+ return;
+ }
+
+ /* Set the functional addresses we need for multicast */
+ writel(streamer_priv->srb,streamer_mmio+LAPA);
+ dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
+
+ for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next)
+ {
+ dev_mc_address[0] |= dmi->dmi_addr[2] ;
+ dev_mc_address[1] |= dmi->dmi_addr[3] ;
+ dev_mc_address[2] |= dmi->dmi_addr[4] ;
+ dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ }
+
+ writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
+ writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
+ writew(0,streamer_mmio+LAPDINC);
+ writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC);
+ writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC);
+ streamer_priv->srb_queued = 2 ;
+ writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM);
+}
+
+static void streamer_srb_bh(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ __u16 srb_word;
+
+ writew(streamer_priv->srb, streamer_mmio + LAPA);
+ srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
+
+ switch (srb_word) {
+
+ /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
+ * At some point we should do something if we get an error, such as
+ * resetting the IFF_PROMISC flag in dev
+ */
+
+ case SRB_MODIFY_RECEIVE_OPTIONS:
+ srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
+
+ switch (srb_word) {
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
+ break;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
+ break;
+ default:
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",
+ dev->name,
+ streamer_priv->streamer_copy_all_options,
+ streamer_priv->streamer_receive_options);
+ break;
+ } /* switch srb[2] */
+ break;
+
+
+ /* SRB_SET_GROUP_ADDRESS - Multicast group setting
+ */
+ case SRB_SET_GROUP_ADDRESS:
+ srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
+ switch (srb_word) {
+ case 0x00:
+ break;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name);
+ break;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
+ break;
+ case 0x3c:
+ printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name);
+ break;
+ case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
+ printk(KERN_WARNING "%s: Group address registers full\n", dev->name);
+ break;
+ case 0x55:
+ printk(KERN_INFO "%s: Group Address already set.\n", dev->name);
+ break;
+ default:
+ break;
+ } /* switch srb[2] */
+ break;
+
+
+ /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
+ */
+ case SRB_RESET_GROUP_ADDRESS:
+ srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
+ switch (srb_word) {
+ case 0x00:
+ break;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ break;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
+ break;
+ case 0x39: /* Must deal with this if individual multicast addresses used */
+ printk(KERN_INFO "%s: Group address not found \n", dev->name);
+ break;
+ default:
+ break;
+ } /* switch srb[2] */
+ break;
+
+
+ /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
+ */
+
+ case SRB_SET_FUNC_ADDRESS:
+ srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
+ switch (srb_word) {
+ case 0x00:
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name);
+ break;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ break;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
+ break;
+ default:
+ break;
+ } /* switch srb[2] */
+ break;
+
+ /* SRB_READ_LOG - Read and reset the adapter error counters
+ */
+
+ case SRB_READ_LOG:
+ srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
+ switch (srb_word) {
+ case 0x00:
+ {
+ int i;
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Read Log command complete\n", dev->name);
+ printk("Read Log statistics: ");
+ writew(streamer_priv->srb + 6,
+ streamer_mmio + LAPA);
+ for (i = 0; i < 5; i++) {
+ printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
+ }
+ printk("\n");
+ }
+ break;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ break;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
+ break;
+
+ } /* switch srb[2] */
+ break;
+
+ /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
+
+ case SRB_READ_SR_COUNTERS:
+ srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
+ switch (srb_word) {
+ case 0x00:
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
+ break;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ break;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
+ break;
+ default:
+ break;
+ } /* switch srb[2] */
+ break;
+
+ default:
+ printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name);
+ break;
+ } /* switch srb[0] */
+}
+
+static struct net_device_stats *streamer_get_stats(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv;
+ streamer_priv = (struct streamer_private *) dev->priv;
+ return (struct net_device_stats *) &streamer_priv->streamer_stats;
+}
+
+static int streamer_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *saddr = addr;
+ struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
+
+ if (netif_running(dev))
+ {
+ printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name);
+ return -EIO;
+ }
+
+ memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len);
+
+ if (streamer_priv->streamer_message_level) {
+ printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",
+ dev->name, streamer_priv->streamer_laa[0],
+ streamer_priv->streamer_laa[1],
+ streamer_priv->streamer_laa[2],
+ streamer_priv->streamer_laa[3],
+ streamer_priv->streamer_laa[4],
+ streamer_priv->streamer_laa[5]);
+ }
+ return 0;
+}
+
+static void streamer_arb_cmd(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ __u8 header_len;
+ __u16 frame_len, buffer_len;
+ struct sk_buff *mac_frame;
+ __u8 frame_data[256];
+ __u16 buff_off;
+ __u16 lan_status = 0, lan_status_diff; /* Initialize to stop compiler warning */
+ __u8 fdx_prot_error;
+ __u16 next_ptr;
+ __u16 arb_word;
+
+#if STREAMER_NETWORK_MONITOR
+ struct trh_hdr *mac_hdr;
+#endif
+
+ writew(streamer_priv->arb, streamer_mmio + LAPA);
+ arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
+
+ if (arb_word == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
+ writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
+ streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC));
+ header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */
+ frame_len = ntohs(readw(streamer_mmio + LAPDINC));
+
+#if STREAMER_DEBUG
+ {
+ int i;
+ __u16 next;
+ __u8 status;
+ __u16 len;
+
+ writew(ntohs(buff_off), streamer_mmio + LAPA); /*setup window to frame data */
+ next = htons(readw(streamer_mmio + LAPDINC));
+ status =
+ ntohs(readw(streamer_mmio + LAPDINC)) & 0xff;
+ len = ntohs(readw(streamer_mmio + LAPDINC));
+
+ /* print out 1st 14 bytes of frame data */
+ for (i = 0; i < 7; i++) {
+ printk("Loc %d = %04x\n", i,
+ ntohs(readw
+ (streamer_mmio + LAPDINC)));
+ }
+
+ printk("next %04x, fs %02x, len %04x \n", next,
+ status, len);
+ }
+#endif
+ if (!(mac_frame = dev_alloc_skb(frame_len))) {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n",
+ dev->name);
+ goto drop_frame;
+ }
+ /* Walk the buffer chain, creating the frame */
+
+ do {
+ int i;
+ __u16 rx_word;
+
+ writew(htons(buff_off), streamer_mmio + LAPA); /* setup window to frame data */
+ next_ptr = ntohs(readw(streamer_mmio + LAPDINC));
+ readw(streamer_mmio + LAPDINC); /* read thru status word */
+ buffer_len = ntohs(readw(streamer_mmio + LAPDINC));
+
+ if (buffer_len > 256)
+ break;
+
+ i = 0;
+ while (i < buffer_len) {
+ rx_word=ntohs(readw(streamer_mmio+LAPDINC));
+ frame_data[i]=rx_word >> 8;
+ frame_data[i+1]=rx_word & 0xff;
+ i += 2;
+ }
+
+ memcpy(skb_put(mac_frame, buffer_len),
+ frame_data, buffer_len);
+ } while (next_ptr && (buff_off = next_ptr));
+
+#if STREAMER_NETWORK_MONITOR
+ printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
+ dev->name);
+ mac_hdr = (struct trh_hdr *) mac_frame->data;
+ printk(KERN_WARNING
+ "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n",
+ dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1],
+ mac_hdr->daddr[2], mac_hdr->daddr[3],
+ mac_hdr->daddr[4], mac_hdr->daddr[5]);
+ printk(KERN_WARNING
+ "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n",
+ dev->name, mac_hdr->saddr[0], mac_hdr->saddr[1],
+ mac_hdr->saddr[2], mac_hdr->saddr[3],
+ mac_hdr->saddr[4], mac_hdr->saddr[5]);
+#endif
+ mac_frame->dev = dev;
+ mac_frame->protocol = tr_type_trans(mac_frame, dev);
+ netif_rx(mac_frame);
+
+ /* Now tell the card we have dealt with the received frame */
+drop_frame:
+ /* Set LISR Bit 1 */
+ writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
+
+ /* Is the ASB free ? */
+
+ if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE))
+ {
+ streamer_priv->asb_queued = 1;
+ writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
+ return;
+ /* Drop out and wait for the bottom half to be run */
+ }
+
+
+ writew(streamer_priv->asb, streamer_mmio + LAPA);
+ writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC);
+ writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
+ writew(0, streamer_mmio + LAPDINC);
+ writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
+
+ writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
+
+ streamer_priv->asb_queued = 2;
+ return;
+
+ } else if (arb_word == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
+ writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
+ lan_status = ntohs(readw(streamer_mmio + LAPDINC));
+ fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8;
+
+ /* Issue ARB Free */
+ writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
+
+ lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) &
+ lan_status;
+
+ if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR))
+ {
+ if (lan_status_diff & LSC_LWF)
+ printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name);
+ if (lan_status_diff & LSC_ARW)
+ printk(KERN_WARNING "%s: Auto removal error\n", dev->name);
+ if (lan_status_diff & LSC_FPE)
+ printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name);
+ if (lan_status_diff & LSC_RR)
+ printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name);
+
+ /* Adapter has been closed by the hardware */
+
+ /* reset tx/rx fifo's and busmaster logic */
+
+ /* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
+ udelay(1);
+ writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
+ }
+ /* If serious error */
+ if (streamer_priv->streamer_message_level) {
+ if (lan_status_diff & LSC_SIG_LOSS)
+ printk(KERN_WARNING "%s: No receive signal detected \n", dev->name);
+ if (lan_status_diff & LSC_HARD_ERR)
+ printk(KERN_INFO "%s: Beaconing \n", dev->name);
+ if (lan_status_diff & LSC_SOFT_ERR)
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name);
+ if (lan_status_diff & LSC_TRAN_BCN)
+ printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
+ if (lan_status_diff & LSC_SS)
+ printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ if (lan_status_diff & LSC_RING_REC)
+ printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
+ if (lan_status_diff & LSC_FDX_MODE)
+ printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name);
+ }
+
+ if (lan_status_diff & LSC_CO) {
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+
+ /* Issue READ.LOG command */
+
+ writew(streamer_priv->srb, streamer_mmio + LAPA);
+ writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC);
+ writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
+ writew(0, streamer_mmio + LAPDINC);
+ streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
+
+ writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
+ }
+
+ if (lan_status_diff & LSC_SR_CO) {
+ if (streamer_priv->streamer_message_level)
+ printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
+
+ /* Issue a READ.SR.COUNTERS */
+ writew(streamer_priv->srb, streamer_mmio + LAPA);
+ writew(htons(SRB_READ_SR_COUNTERS << 8),
+ streamer_mmio+LAPDINC);
+ writew(htons(STREAMER_CLEAR_RET_CODE << 8),
+ streamer_mmio+LAPDINC);
+ streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
+ writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
+
+ }
+ streamer_priv->streamer_lan_status = lan_status;
+ } /* Lan.change.status */
+ else
+ printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+}
+
+static void streamer_asb_bh(struct net_device *dev)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+
+ if (streamer_priv->asb_queued == 1)
+ {
+ /* Dropped through the first time */
+
+ writew(streamer_priv->asb, streamer_mmio + LAPA);
+ writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC);
+ writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
+ writew(0, streamer_mmio + LAPDINC);
+ writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
+
+ writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
+ streamer_priv->asb_queued = 2;
+
+ return;
+ }
+
+ if (streamer_priv->asb_queued == 2) {
+ __u8 rc;
+ writew(streamer_priv->asb + 2, streamer_mmio + LAPA);
+ rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
+ switch (rc) {
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ break;
+ case 0x26:
+ printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ break;
+ case 0xFF:
+ /* Valid response, everything should be ok again */
+ break;
+ default:
+ printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name);
+ break;
+ }
+ }
+ streamer_priv->asb_queued = 0;
+}
+
+static int streamer_change_mtu(struct net_device *dev, int mtu)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u16 max_mtu;
+
+ if (streamer_priv->streamer_ring_speed == 4)
+ max_mtu = 4500;
+ else
+ max_mtu = 18000;
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+ if (mtu < 100)
+ return -EINVAL;
+
+ dev->mtu = mtu;
+ streamer_priv->pkt_buf_sz = mtu + TR_HLEN;
+
+ return 0;
+}
+
+#if STREAMER_NETWORK_MONITOR
+#ifdef CONFIG_PROC_FS
+static int streamer_proc_info(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ struct streamer_private *sdev=NULL;
+ struct pci_dev *pci_device = NULL;
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ int size;
+
+ struct net_device *dev;
+
+ size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n");
+
+ pos += size;
+ len += size;
+
+ for(sdev=dev_streamer; sdev; sdev=sdev->next) {
+ pci_device=sdev->pci_dev;
+ dev=pci_get_drvdata(pci_device);
+
+ size = sprintf_info(buffer + len, dev);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ break;
+ } /* for */
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if (len > length)
+ len = length; /* Ending slop */
+ return len;
+}
+
+static int sprintf_info(char *buffer, struct net_device *dev)
+{
+ struct streamer_private *streamer_priv =
+ (struct streamer_private *) dev->priv;
+ __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+ struct streamer_adapter_addr_table sat;
+ struct streamer_parameters_table spt;
+ int size = 0;
+ int i;
+
+ writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
+ for (i = 0; i < 14; i += 2) {
+ __u16 io_word;
+ __u8 *datap = (__u8 *) & sat;
+ io_word=ntohs(readw(streamer_mmio+LAPDINC));
+ datap[size]=io_word >> 8;
+ datap[size+1]=io_word & 0xff;
+ }
+ writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA);
+ for (i = 0; i < 68; i += 2) {
+ __u16 io_word;
+ __u8 *datap = (__u8 *) & spt;
+ io_word=ntohs(readw(streamer_mmio+LAPDINC));
+ datap[size]=io_word >> 8;
+ datap[size+1]=io_word & 0xff;
+ }
+
+
+ size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name);
+
+ size += sprintf(buffer + size,
+ "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
+ dev->name, dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4],
+ dev->dev_addr[5], sat.node_addr[0], sat.node_addr[1],
+ sat.node_addr[2], sat.node_addr[3], sat.node_addr[4],
+ sat.node_addr[5], sat.func_addr[0], sat.func_addr[1],
+ sat.func_addr[2], sat.func_addr[3]);
+
+ size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
+
+ size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name);
+
+ size += sprintf(buffer + size,
+ "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
+ dev->name, spt.phys_addr[0], spt.phys_addr[1],
+ spt.phys_addr[2], spt.phys_addr[3],
+ spt.up_node_addr[0], spt.up_node_addr[1],
+ spt.up_node_addr[2], spt.up_node_addr[3],
+ spt.up_node_addr[4], spt.up_node_addr[4],
+ spt.poll_addr[0], spt.poll_addr[1], spt.poll_addr[2],
+ spt.poll_addr[3], spt.poll_addr[4], spt.poll_addr[5],
+ ntohs(spt.acc_priority), ntohs(spt.auth_source_class),
+ ntohs(spt.att_code));
+
+ size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name);
+
+ size += sprintf(buffer + size,
+ "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
+ dev->name, spt.source_addr[0], spt.source_addr[1],
+ spt.source_addr[2], spt.source_addr[3],
+ spt.source_addr[4], spt.source_addr[5],
+ ntohs(spt.beacon_type), ntohs(spt.major_vector),
+ ntohs(spt.lan_status), ntohs(spt.local_ring),
+ ntohs(spt.mon_error), ntohs(spt.frame_correl));
+
+ size += sprintf(buffer + size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
+ dev->name);
+
+ size += sprintf(buffer + size,
+ "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
+ dev->name, ntohs(spt.beacon_transmit),
+ ntohs(spt.beacon_receive), spt.beacon_naun[0],
+ spt.beacon_naun[1], spt.beacon_naun[2],
+ spt.beacon_naun[3], spt.beacon_naun[4],
+ spt.beacon_naun[5], spt.beacon_phys[0],
+ spt.beacon_phys[1], spt.beacon_phys[2],
+ spt.beacon_phys[3]);
+ return size;
+}
+#endif
+#endif
+
+#if STREAMER_IOCTL && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+static int streamer_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int i;
+ struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
+ u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
+
+ switch(cmd) {
+ case IOCTL_SISR_MASK:
+ writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
+ break;
+ case IOCTL_SPIN_LOCK_TEST:
+ printk(KERN_INFO "spin_lock() called.\n");
+ spin_lock(&streamer_priv->streamer_lock);
+ spin_unlock(&streamer_priv->streamer_lock);
+ printk(KERN_INFO "spin_unlock() finished.\n");
+ break;
+ case IOCTL_PRINT_BDAS:
+ printk(KERN_INFO "bdas: RXBDA: %x RXLBDA: %x TX2FDA: %x TX2LFDA: %x\n",
+ readw(streamer_mmio + RXBDA),
+ readw(streamer_mmio + RXLBDA),
+ readw(streamer_mmio + TX2FDA),
+ readw(streamer_mmio + TX2LFDA));
+ break;
+ case IOCTL_PRINT_REGISTERS:
+ printk(KERN_INFO "registers:\n");
+ printk(KERN_INFO "SISR: %04x MISR: %04x LISR: %04x BCTL: %04x BMCTL: %04x\nmask %04x mask %04x\n",
+ readw(streamer_mmio + SISR),
+ readw(streamer_mmio + MISR_RUM),
+ readw(streamer_mmio + LISR),
+ readw(streamer_mmio + BCTL),
+ readw(streamer_mmio + BMCTL_SUM),
+ readw(streamer_mmio + SISR_MASK),
+ readw(streamer_mmio + MISR_MASK));
+ break;
+ case IOCTL_PRINT_RX_BUFS:
+ printk(KERN_INFO "Print rx bufs:\n");
+ for(i=0; i<STREAMER_RX_RING_SIZE; i++)
+ printk(KERN_INFO "rx_ring %d status: 0x%x\n", i,
+ streamer_priv->streamer_rx_ring[i].status);
+ break;
+ case IOCTL_PRINT_TX_BUFS:
+ printk(KERN_INFO "Print tx bufs:\n");
+ for(i=0; i<STREAMER_TX_RING_SIZE; i++)
+ printk(KERN_INFO "tx_ring %d status: 0x%x\n", i,
+ streamer_priv->streamer_tx_ring[i].status);
+ break;
+ case IOCTL_RX_CMD:
+ streamer_rx(dev);
+ printk(KERN_INFO "Sent rx command.\n");
+ break;
+ default:
+ printk(KERN_INFO "Bad ioctl!\n");
+ }
+ return 0;
+}
+#endif
+
+static struct pci_driver streamer_pci_driver = {
+ .name = "lanstreamer",
+ .id_table = streamer_pci_tbl,
+ .probe = streamer_init_one,
+ .remove = __devexit_p(streamer_remove_one),
+};
+
+static int __init streamer_init_module(void) {
+ return pci_module_init(&streamer_pci_driver);
+}
+
+static void __exit streamer_cleanup_module(void) {
+ pci_unregister_driver(&streamer_pci_driver);
+}
+
+module_init(streamer_init_module);
+module_exit(streamer_cleanup_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
new file mode 100644
index 000000000000..5557d8e1e22d
--- /dev/null
+++ b/drivers/net/tokenring/lanstreamer.h
@@ -0,0 +1,358 @@
+/*
+ * lanstreamer.h -- driver for the IBM Auto LANStreamer PCI Adapter
+ *
+ * Written By: Mike Sullivan, IBM Corporation
+ *
+ * Copyright (C) 1999 IBM Corporation
+ *
+ * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
+ * chipset.
+ *
+ * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
+ * chipsets) written by:
+ * 1999 Peter De Schrijver All Rights Reserved
+ * 1999 Mike Phillips (phillim@amtrak.com)
+ *
+ * Base Driver Skeleton:
+ * Written 1993-94 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * 12/10/99 - Alpha Release 0.1.0
+ * First release to the public
+ * 08/15/01 - Added ioctl() definitions and others - Kent Yoder <yoder1@us.ibm.com>
+ *
+ */
+
+#include <linux/version.h>
+
+#if STREAMER_IOCTL && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+#include <asm/ioctl.h>
+#define IOCTL_PRINT_RX_BUFS SIOCDEVPRIVATE
+#define IOCTL_PRINT_TX_BUFS SIOCDEVPRIVATE+1
+#define IOCTL_RX_CMD SIOCDEVPRIVATE+2
+#define IOCTL_TX_CMD SIOCDEVPRIVATE+3
+#define IOCTL_PRINT_REGISTERS SIOCDEVPRIVATE+4
+#define IOCTL_PRINT_BDAS SIOCDEVPRIVATE+5
+#define IOCTL_SPIN_LOCK_TEST SIOCDEVPRIVATE+6
+#define IOCTL_SISR_MASK SIOCDEVPRIVATE+7
+#endif
+
+/* MAX_INTR - the maximum number of times we can loop
+ * inside the interrupt function before returning
+ * control to the OS (maximum value is 256)
+ */
+#define MAX_INTR 5
+
+#define CLS 0x0C
+#define MLR 0x86
+#define LTR 0x0D
+
+#define BCTL 0x60
+#define BCTL_SOFTRESET (1<<15)
+#define BCTL_RX_FIFO_8 (1<<1)
+#define BCTL_TX_FIFO_8 (1<<3)
+
+#define GPR 0x4a
+#define GPR_AUTOSENSE (1<<2)
+#define GPR_16MBPS (1<<3)
+
+#define LISR 0x10
+#define LISR_SUM 0x12
+#define LISR_RUM 0x14
+
+#define LISR_LIE (1<<15)
+#define LISR_SLIM (1<<13)
+#define LISR_SLI (1<<12)
+#define LISR_BPEI (1<<9)
+#define LISR_BPE (1<<8)
+#define LISR_SRB_CMD (1<<5)
+#define LISR_ASB_REPLY (1<<4)
+#define LISR_ASB_FREE_REQ (1<<2)
+#define LISR_ARB_FREE (1<<1)
+#define LISR_TRB_FRAME (1<<0)
+
+#define SISR 0x16
+#define SISR_SUM 0x18
+#define SISR_RUM 0x1A
+#define SISR_MASK 0x54
+#define SISR_MASK_SUM 0x56
+#define SISR_MASK_RUM 0x58
+
+#define SISR_MI (1<<15)
+#define SISR_SERR_ERR (1<<14)
+#define SISR_TIMER (1<<11)
+#define SISR_LAP_PAR_ERR (1<<10)
+#define SISR_LAP_ACC_ERR (1<<9)
+#define SISR_PAR_ERR (1<<8)
+#define SISR_ADAPTER_CHECK (1<<6)
+#define SISR_SRB_REPLY (1<<5)
+#define SISR_ASB_FREE (1<<4)
+#define SISR_ARB_CMD (1<<3)
+#define SISR_TRB_REPLY (1<<2)
+
+#define MISR_RUM 0x5A
+#define MISR_MASK 0x5C
+#define MISR_MASK_RUM 0x5E
+
+#define MISR_TX2_IDLE (1<<15)
+#define MISR_TX2_NO_STATUS (1<<14)
+#define MISR_TX2_HALT (1<<13)
+#define MISR_TX2_EOF (1<<12)
+#define MISR_TX1_IDLE (1<<11)
+#define MISR_TX1_NO_STATUS (1<<10)
+#define MISR_TX1_HALT (1<<9)
+#define MISR_TX1_EOF (1<<8)
+#define MISR_RX_NOBUF (1<<5)
+#define MISR_RX_EOB (1<<4)
+#define MISR_RX_NO_STATUS (1<<2)
+#define MISR_RX_HALT (1<<1)
+#define MISR_RX_EOF (1<<0)
+
+#define LAPA 0x62
+#define LAPE 0x64
+#define LAPD 0x66
+#define LAPDINC 0x68
+#define LAPWWO 0x6A
+#define LAPWWC 0x6C
+#define LAPCTL 0x6E
+
+#define TIMER 0x4E4
+
+#define BMCTL_SUM 0x50
+#define BMCTL_RUM 0x52
+#define BMCTL_TX1_DIS (1<<14)
+#define BMCTL_TX2_DIS (1<<10)
+#define BMCTL_RX_DIS (1<<6)
+#define BMCTL_RX_ENABLED (1<<5)
+
+#define RXLBDA 0x90
+#define RXBDA 0x94
+#define RXSTAT 0x98
+#define RXDBA 0x9C
+
+#define TX1LFDA 0xA0
+#define TX1FDA 0xA4
+#define TX1STAT 0xA8
+#define TX1DBA 0xAC
+#define TX2LFDA 0xB0
+#define TX2FDA 0xB4
+#define TX2STAT 0xB8
+#define TX2DBA 0xBC
+
+#define STREAMER_IO_SPACE 256
+
+#define SRB_COMMAND_SIZE 50
+
+#define STREAMER_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
+
+/* Defines for LAN STATUS CHANGE reports */
+#define LSC_SIG_LOSS 0x8000
+#define LSC_HARD_ERR 0x4000
+#define LSC_SOFT_ERR 0x2000
+#define LSC_TRAN_BCN 0x1000
+#define LSC_LWF 0x0800
+#define LSC_ARW 0x0400
+#define LSC_FPE 0x0200
+#define LSC_RR 0x0100
+#define LSC_CO 0x0080
+#define LSC_SS 0x0040
+#define LSC_RING_REC 0x0020
+#define LSC_SR_CO 0x0010
+#define LSC_FDX_MODE 0x0004
+
+/* Defines for OPEN ADAPTER command */
+
+#define OPEN_ADAPTER_EXT_WRAP (1<<15)
+#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
+#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
+#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
+#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
+#define OPEN_ADAPTER_ENABLE_EC (1<<10)
+#define OPEN_ADAPTER_CONTENDER (1<<8)
+#define OPEN_ADAPTER_PASS_BEACON (1<<7)
+#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
+#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
+#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
+#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
+
+
+/* Defines for SRB Commands */
+#define SRB_CLOSE_ADAPTER 0x04
+#define SRB_CONFIGURE_BRIDGE 0x0c
+#define SRB_CONFIGURE_HP_CHANNEL 0x13
+#define SRB_MODIFY_BRIDGE_PARMS 0x15
+#define SRB_MODIFY_OPEN_OPTIONS 0x01
+#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
+#define SRB_NO_OPERATION 0x00
+#define SRB_OPEN_ADAPTER 0x03
+#define SRB_READ_LOG 0x08
+#define SRB_READ_SR_COUNTERS 0x16
+#define SRB_RESET_GROUP_ADDRESS 0x02
+#define SRB_RESET_TARGET_SEGMETN 0x14
+#define SRB_SAVE_CONFIGURATION 0x1b
+#define SRB_SET_BRIDGE_PARMS 0x09
+#define SRB_SET_FUNC_ADDRESS 0x07
+#define SRB_SET_GROUP_ADDRESS 0x06
+#define SRB_SET_TARGET_SEGMENT 0x05
+
+/* Clear return code */
+#define STREAMER_CLEAR_RET_CODE 0xfe
+
+/* ARB Commands */
+#define ARB_RECEIVE_DATA 0x81
+#define ARB_LAN_CHANGE_STATUS 0x84
+
+/* ASB Response commands */
+#define ASB_RECEIVE_DATA 0x81
+
+
+/* Streamer defaults for buffers */
+
+#define STREAMER_RX_RING_SIZE 16 /* should be a power of 2 */
+/* Setting the number of TX descriptors to 1 is a workaround for an
+ * undocumented hardware problem with the lanstreamer board. Setting
+ * this to something higher may slightly increase the throughput you
+ * can get from the card, but at the risk of locking up the box. -
+ * <yoder1@us.ibm.com>
+ */
+#define STREAMER_TX_RING_SIZE 1 /* should be a power of 2 */
+
+#define PKT_BUF_SZ 4096 /* Default packet size */
+
+/* Streamer data structures */
+
+struct streamer_tx_desc {
+ __u32 forward;
+ __u32 status;
+ __u32 bufcnt_framelen;
+ __u32 buffer;
+ __u32 buflen;
+ __u32 rsvd1;
+ __u32 rsvd2;
+ __u32 rsvd3;
+};
+
+struct streamer_rx_desc {
+ __u32 forward;
+ __u32 status;
+ __u32 buffer;
+ __u32 framelen_buflen;
+};
+
+struct mac_receive_buffer {
+ __u16 next;
+ __u8 padding;
+ __u8 frame_status;
+ __u16 buffer_length;
+ __u8 frame_data;
+};
+
+struct streamer_private {
+
+ __u16 srb;
+ __u16 trb;
+ __u16 arb;
+ __u16 asb;
+
+ struct streamer_private *next;
+ struct pci_dev *pci_dev;
+ __u8 __iomem *streamer_mmio;
+ char *streamer_card_name;
+
+ spinlock_t streamer_lock;
+
+ volatile int srb_queued; /* True if an SRB is still posted */
+ wait_queue_head_t srb_wait;
+
+ volatile int asb_queued; /* True if an ASB is posted */
+
+ volatile int trb_queued; /* True if a TRB is posted */
+ wait_queue_head_t trb_wait;
+
+ struct streamer_rx_desc *streamer_rx_ring;
+ struct streamer_tx_desc *streamer_tx_ring;
+ struct sk_buff *tx_ring_skb[STREAMER_TX_RING_SIZE],
+ *rx_ring_skb[STREAMER_RX_RING_SIZE];
+ int tx_ring_free, tx_ring_last_status, rx_ring_last_received,
+ free_tx_ring_entries;
+
+ struct net_device_stats streamer_stats;
+ __u16 streamer_lan_status;
+ __u8 streamer_ring_speed;
+ __u16 pkt_buf_sz;
+ __u8 streamer_receive_options, streamer_copy_all_options,
+ streamer_message_level;
+ __u16 streamer_addr_table_addr, streamer_parms_addr;
+ __u16 mac_rx_buffer;
+ __u8 streamer_laa[6];
+};
+
+struct streamer_adapter_addr_table {
+
+ __u8 node_addr[6];
+ __u8 reserved[4];
+ __u8 func_addr[4];
+};
+
+struct streamer_parameters_table {
+
+ __u8 phys_addr[4];
+ __u8 up_node_addr[6];
+ __u8 up_phys_addr[4];
+ __u8 poll_addr[6];
+ __u16 reserved;
+ __u16 acc_priority;
+ __u16 auth_source_class;
+ __u16 att_code;
+ __u8 source_addr[6];
+ __u16 beacon_type;
+ __u16 major_vector;
+ __u16 lan_status;
+ __u16 soft_error_time;
+ __u16 reserved1;
+ __u16 local_ring;
+ __u16 mon_error;
+ __u16 beacon_transmit;
+ __u16 beacon_receive;
+ __u16 frame_correl;
+ __u8 beacon_naun[6];
+ __u32 reserved2;
+ __u8 beacon_phys[4];
+};
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
new file mode 100644
index 000000000000..cfae2bbf2167
--- /dev/null
+++ b/drivers/net/tokenring/madgemc.c
@@ -0,0 +1,800 @@
+/*
+ * madgemc.c: Driver for the Madge Smart 16/4 MC16 MCA token ring card.
+ *
+ * Written 2000 by Adam Fritzler
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver module supports the following cards:
+ * - Madge Smart 16/4 Ringnode MC16
+ * - Madge Smart 16/4 Ringnode MC32 (??)
+ *
+ * Maintainer(s):
+ * AF Adam Fritzler mid@auk.cx
+ *
+ * Modification History:
+ * 16-Jan-00 AF Created
+ *
+ */
+static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
+
+#include <linux/module.h>
+#include <linux/mca-legacy.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "tms380tr.h"
+#include "madgemc.h" /* Madge-specific constants */
+
+#define MADGEMC_IO_EXTENT 32
+#define MADGEMC_SIF_OFFSET 0x08
+
+struct madgemc_card {
+ struct net_device *dev;
+
+ /*
+ * These are read from the BIA ROM.
+ */
+ unsigned int manid;
+ unsigned int cardtype;
+ unsigned int cardrev;
+ unsigned int ramsize;
+
+ /*
+ * These are read from the MCA POS registers.
+ */
+ unsigned int burstmode:2;
+ unsigned int fairness:1; /* 0 = Fair, 1 = Unfair */
+ unsigned int arblevel:4;
+ unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */
+ unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */
+
+ struct madgemc_card *next;
+};
+static struct madgemc_card *madgemc_card_list;
+
+
+static int madgemc_open(struct net_device *dev);
+static int madgemc_close(struct net_device *dev);
+static int madgemc_chipset_init(struct net_device *dev);
+static void madgemc_read_rom(struct madgemc_card *card);
+static unsigned short madgemc_setnselout_pins(struct net_device *dev);
+static void madgemc_setcabletype(struct net_device *dev, int type);
+
+static int madgemc_mcaproc(char *buf, int slot, void *d);
+
+static void madgemc_setregpage(struct net_device *dev, int page);
+static void madgemc_setsifsel(struct net_device *dev, int val);
+static void madgemc_setint(struct net_device *dev, int val);
+
+static irqreturn_t madgemc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*
+ * These work around paging, however they don't guarentee you're on the
+ * right page.
+ */
+#define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
+#define SIFWRITEB(val, reg) (outb(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
+#define SIFREADW(reg) (inw(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
+#define SIFWRITEW(val, reg) (outw(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
+
+/*
+ * Read a byte-length value from the register.
+ */
+static unsigned short madgemc_sifreadb(struct net_device *dev, unsigned short reg)
+{
+ unsigned short ret;
+ if (reg<0x8)
+ ret = SIFREADB(reg);
+ else {
+ madgemc_setregpage(dev, 1);
+ ret = SIFREADB(reg);
+ madgemc_setregpage(dev, 0);
+ }
+ return ret;
+}
+
+/*
+ * Write a byte-length value to a register.
+ */
+static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ if (reg<0x8)
+ SIFWRITEB(val, reg);
+ else {
+ madgemc_setregpage(dev, 1);
+ SIFWRITEB(val, reg);
+ madgemc_setregpage(dev, 0);
+ }
+ return;
+}
+
+/*
+ * Read a word-length value from a register
+ */
+static unsigned short madgemc_sifreadw(struct net_device *dev, unsigned short reg)
+{
+ unsigned short ret;
+ if (reg<0x8)
+ ret = SIFREADW(reg);
+ else {
+ madgemc_setregpage(dev, 1);
+ ret = SIFREADW(reg);
+ madgemc_setregpage(dev, 0);
+ }
+ return ret;
+}
+
+/*
+ * Write a word-length value to a register.
+ */
+static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ if (reg<0x8)
+ SIFWRITEW(val, reg);
+ else {
+ madgemc_setregpage(dev, 1);
+ SIFWRITEW(val, reg);
+ madgemc_setregpage(dev, 0);
+ }
+ return;
+}
+
+
+
+static int __init madgemc_probe(void)
+{
+ static int versionprinted;
+ struct net_device *dev;
+ struct net_local *tp;
+ struct madgemc_card *card;
+ int i,slot = 0;
+ __u8 posreg[4];
+
+ if (!MCA_bus)
+ return -1;
+
+ while (slot != MCA_NOTFOUND) {
+ /*
+ * Currently we only support the MC16/32 (MCA ID 002d)
+ */
+ slot = mca_find_unused_adapter(0x002d, slot);
+ if (slot == MCA_NOTFOUND)
+ break;
+
+ /*
+ * If we get here, we have an adapter.
+ */
+ if (versionprinted++ == 0)
+ printk("%s", version);
+
+ dev = alloc_trdev(sizeof(struct net_local));
+ if (dev == NULL) {
+ printk("madgemc: unable to allocate dev space\n");
+ if (madgemc_card_list)
+ return 0;
+ return -1;
+ }
+
+ SET_MODULE_OWNER(dev);
+ dev->dma = 0;
+
+ /*
+ * Fetch MCA config registers
+ */
+ for(i=0;i<4;i++)
+ posreg[i] = mca_read_stored_pos(slot, i+2);
+
+ card = kmalloc(sizeof(struct madgemc_card), GFP_KERNEL);
+ if (card==NULL) {
+ printk("madgemc: unable to allocate card struct\n");
+ free_netdev(dev);
+ if (madgemc_card_list)
+ return 0;
+ return -1;
+ }
+ card->dev = dev;
+
+ /*
+ * Parse configuration information. This all comes
+ * directly from the publicly available @002d.ADF.
+ * Get it from Madge or your local ADF library.
+ */
+
+ /*
+ * Base address
+ */
+ dev->base_addr = 0x0a20 +
+ ((posreg[2] & MC16_POS2_ADDR2)?0x0400:0) +
+ ((posreg[0] & MC16_POS0_ADDR1)?0x1000:0) +
+ ((posreg[3] & MC16_POS3_ADDR3)?0x2000:0);
+
+ /*
+ * Interrupt line
+ */
+ switch(posreg[0] >> 6) { /* upper two bits */
+ case 0x1: dev->irq = 3; break;
+ case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */
+ case 0x3: dev->irq = 10; break;
+ default: dev->irq = 0; break;
+ }
+
+ if (dev->irq == 0) {
+ printk("%s: invalid IRQ\n", dev->name);
+ goto getout1;
+ }
+
+ if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT,
+ "madgemc")) {
+ printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", slot, dev->base_addr);
+ dev->base_addr += MADGEMC_SIF_OFFSET;
+ goto getout1;
+ }
+ dev->base_addr += MADGEMC_SIF_OFFSET;
+
+ /*
+ * Arbitration Level
+ */
+ card->arblevel = ((posreg[0] >> 1) & 0x7) + 8;
+
+ /*
+ * Burst mode and Fairness
+ */
+ card->burstmode = ((posreg[2] >> 6) & 0x3);
+ card->fairness = ((posreg[2] >> 4) & 0x1);
+
+ /*
+ * Ring Speed
+ */
+ if ((posreg[1] >> 2)&0x1)
+ card->ringspeed = 2; /* not selected */
+ else if ((posreg[2] >> 5) & 0x1)
+ card->ringspeed = 1; /* 16Mb */
+ else
+ card->ringspeed = 0; /* 4Mb */
+
+ /*
+ * Cable type
+ */
+ if ((posreg[1] >> 6)&0x1)
+ card->cabletype = 1; /* STP/DB9 */
+ else
+ card->cabletype = 0; /* UTP/RJ-45 */
+
+
+ /*
+ * ROM Info. This requires us to actually twiddle
+ * bits on the card, so we must ensure above that
+ * the base address is free of conflict (request_region above).
+ */
+ madgemc_read_rom(card);
+
+ if (card->manid != 0x4d) { /* something went wrong */
+ printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
+ goto getout;
+ }
+
+ if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
+ printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
+ goto getout;
+ }
+
+ /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
+ if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
+ card->ramsize = 128;
+ else
+ card->ramsize = 256;
+
+ printk("%s: %s Rev %d at 0x%04lx IRQ %d\n",
+ dev->name,
+ (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
+ MADGEMC32_CARDNAME, card->cardrev,
+ dev->base_addr, dev->irq);
+
+ if (card->cardtype == 0x0d)
+ printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name);
+
+ if (card->ringspeed==2) { /* Unknown */
+ printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
+ card->ringspeed = 1; /* default to 16mb */
+ }
+
+ printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize);
+
+ printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name,
+ (card->ringspeed)?16:4,
+ card->cabletype?"STP/DB9":"UTP/RJ-45");
+ printk("%s: Arbitration Level: %d\n", dev->name,
+ card->arblevel);
+
+ printk("%s: Burst Mode: ", dev->name);
+ switch(card->burstmode) {
+ case 0: printk("Cycle steal"); break;
+ case 1: printk("Limited burst"); break;
+ case 2: printk("Delayed release"); break;
+ case 3: printk("Immediate release"); break;
+ }
+ printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
+
+
+ /*
+ * Enable SIF before we assign the interrupt handler,
+ * just in case we get spurious interrupts that need
+ * handling.
+ */
+ outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
+ madgemc_setsifsel(dev, 1);
+ if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
+ "madgemc", dev))
+ goto getout;
+
+ madgemc_chipset_init(dev); /* enables interrupts! */
+ madgemc_setcabletype(dev, card->cabletype);
+
+ /* Setup MCA structures */
+ mca_set_adapter_name(slot, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
+ mca_set_adapter_procfn(slot, madgemc_mcaproc, dev);
+ mca_mark_as_used(slot);
+
+ printk("%s: Ring Station Address: ", dev->name);
+ printk("%2.2x", dev->dev_addr[0]);
+ for (i = 1; i < 6; i++)
+ printk(":%2.2x", dev->dev_addr[i]);
+ printk("\n");
+
+ /* XXX is ISA_MAX_ADDRESS correct here? */
+ if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) {
+ printk("%s: unable to get memory for dev->priv.\n",
+ dev->name);
+ release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
+ MADGEMC_IO_EXTENT);
+
+ kfree(card);
+ tmsdev_term(dev);
+ free_netdev(dev);
+ if (madgemc_card_list)
+ return 0;
+ return -1;
+ }
+ tp = netdev_priv(dev);
+
+ /*
+ * The MC16 is physically a 32bit card. However, Madge
+ * insists on calling it 16bit, so I'll assume here that
+ * they know what they're talking about. Cut off DMA
+ * at 16mb.
+ */
+ tp->setnselout = madgemc_setnselout_pins;
+ tp->sifwriteb = madgemc_sifwriteb;
+ tp->sifreadb = madgemc_sifreadb;
+ tp->sifwritew = madgemc_sifwritew;
+ tp->sifreadw = madgemc_sifreadw;
+ tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
+
+ memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
+
+ dev->open = madgemc_open;
+ dev->stop = madgemc_close;
+
+ if (register_netdev(dev) == 0) {
+ /* Enlist in the card list */
+ card->next = madgemc_card_list;
+ madgemc_card_list = card;
+ slot++;
+ continue; /* successful, try to find another */
+ }
+
+ free_irq(dev->irq, dev);
+ getout:
+ release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
+ MADGEMC_IO_EXTENT);
+ getout1:
+ kfree(card);
+ free_netdev(dev);
+ slot++;
+ }
+
+ if (madgemc_card_list)
+ return 0;
+ return -1;
+}
+
+/*
+ * Handle interrupts generated by the card
+ *
+ * The MicroChannel Madge cards need slightly more handling
+ * after an interrupt than other TMS380 cards do.
+ *
+ * First we must make sure it was this card that generated the
+ * interrupt (since interrupt sharing is allowed). Then,
+ * because we're using level-triggered interrupts (as is
+ * standard on MCA), we must toggle the interrupt line
+ * on the card in order to claim and acknowledge the interrupt.
+ * Once that is done, the interrupt should be handlable in
+ * the normal tms380tr_interrupt() routine.
+ *
+ * There's two ways we can check to see if the interrupt is ours,
+ * both with their own disadvantages...
+ *
+ * 1) Read in the SIFSTS register from the TMS controller. This
+ * is guarenteed to be accurate, however, there's a fairly
+ * large performance penalty for doing so: the Madge chips
+ * must request the register from the Eagle, the Eagle must
+ * read them from its internal bus, and then take the route
+ * back out again, for a 16bit read.
+ *
+ * 2) Use the MC_CONTROL_REG0_SINTR bit from the Madge ASICs.
+ * The major disadvantage here is that the accuracy of the
+ * bit is in question. However, it cuts out the extra read
+ * cycles it takes to read the Eagle's SIF, as its only an
+ * 8bit read, and theoretically the Madge bit is directly
+ * connected to the interrupt latch coming out of the Eagle
+ * hardware (that statement is not verified).
+ *
+ * I can't determine which of these methods has the best win. For now,
+ * we make a compromise. Use the Madge way for the first interrupt,
+ * which should be the fast-path, and then once we hit the first
+ * interrupt, keep on trying using the SIF method until we've
+ * exhausted all contiguous interrupts.
+ *
+ */
+static irqreturn_t madgemc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ int pending,reg1;
+ struct net_device *dev;
+
+ if (!dev_id) {
+ printk("madgemc_interrupt: was not passed a dev_id!\n");
+ return IRQ_NONE;
+ }
+
+ dev = (struct net_device *)dev_id;
+
+ /* Make sure its really us. -- the Madge way */
+ pending = inb(dev->base_addr + MC_CONTROL_REG0);
+ if (!(pending & MC_CONTROL_REG0_SINTR))
+ return IRQ_NONE; /* not our interrupt */
+
+ /*
+ * Since we're level-triggered, we may miss the rising edge
+ * of the next interrupt while we're off handling this one,
+ * so keep checking until the SIF verifies that it has nothing
+ * left for us to do.
+ */
+ pending = STS_SYSTEM_IRQ;
+ do {
+ if (pending & STS_SYSTEM_IRQ) {
+
+ /* Toggle the interrupt to reset the latch on card */
+ reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
+ outb(reg1 ^ MC_CONTROL_REG1_SINTEN,
+ dev->base_addr + MC_CONTROL_REG1);
+ outb(reg1, dev->base_addr + MC_CONTROL_REG1);
+
+ /* Continue handling as normal */
+ tms380tr_interrupt(irq, dev_id, regs);
+
+ pending = SIFREADW(SIFSTS); /* restart - the SIF way */
+
+ } else
+ return IRQ_HANDLED;
+ } while (1);
+
+ return IRQ_HANDLED; /* not reachable */
+}
+
+/*
+ * Set the card to the prefered ring speed.
+ *
+ * Unlike newer cards, the MC16/32 have their speed selection
+ * circuit connected to the Madge ASICs and not to the TMS380
+ * NSELOUT pins. Set the ASIC bits correctly here, and return
+ * zero to leave the TMS NSELOUT bits unaffected.
+ *
+ */
+unsigned short madgemc_setnselout_pins(struct net_device *dev)
+{
+ unsigned char reg1;
+ struct net_local *tp = netdev_priv(dev);
+
+ reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
+
+ if(tp->DataRate == SPEED_16)
+ reg1 |= MC_CONTROL_REG1_SPEED_SEL; /* add for 16mb */
+ else if (reg1 & MC_CONTROL_REG1_SPEED_SEL)
+ reg1 ^= MC_CONTROL_REG1_SPEED_SEL; /* remove for 4mb */
+ outb(reg1, dev->base_addr + MC_CONTROL_REG1);
+
+ return 0; /* no change */
+}
+
+/*
+ * Set the register page. This equates to the SRSX line
+ * on the TMS380Cx6.
+ *
+ * Register selection is normally done via three contiguous
+ * bits. However, some boards (such as the MC16/32) use only
+ * two bits, plus a separate bit in the glue chip. This
+ * sets the SRSX bit (the top bit). See page 4-17 in the
+ * Yellow Book for which registers are affected.
+ *
+ */
+static void madgemc_setregpage(struct net_device *dev, int page)
+{
+ static int reg1;
+
+ reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
+ if ((page == 0) && (reg1 & MC_CONTROL_REG1_SRSX)) {
+ outb(reg1 ^ MC_CONTROL_REG1_SRSX,
+ dev->base_addr + MC_CONTROL_REG1);
+ }
+ else if (page == 1) {
+ outb(reg1 | MC_CONTROL_REG1_SRSX,
+ dev->base_addr + MC_CONTROL_REG1);
+ }
+ reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
+
+ return;
+}
+
+/*
+ * The SIF registers are not mapped into register space by default
+ * Set this to 1 to map them, 0 to map the BIA ROM.
+ *
+ */
+static void madgemc_setsifsel(struct net_device *dev, int val)
+{
+ unsigned int reg0;
+
+ reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
+ if ((val == 0) && (reg0 & MC_CONTROL_REG0_SIFSEL)) {
+ outb(reg0 ^ MC_CONTROL_REG0_SIFSEL,
+ dev->base_addr + MC_CONTROL_REG0);
+ } else if (val == 1) {
+ outb(reg0 | MC_CONTROL_REG0_SIFSEL,
+ dev->base_addr + MC_CONTROL_REG0);
+ }
+ reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
+
+ return;
+}
+
+/*
+ * Enable SIF interrupts
+ *
+ * This does not enable interrupts in the SIF, but rather
+ * enables SIF interrupts to be passed onto the host.
+ *
+ */
+static void madgemc_setint(struct net_device *dev, int val)
+{
+ unsigned int reg1;
+
+ reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
+ if ((val == 0) && (reg1 & MC_CONTROL_REG1_SINTEN)) {
+ outb(reg1 ^ MC_CONTROL_REG1_SINTEN,
+ dev->base_addr + MC_CONTROL_REG1);
+ } else if (val == 1) {
+ outb(reg1 | MC_CONTROL_REG1_SINTEN,
+ dev->base_addr + MC_CONTROL_REG1);
+ }
+
+ return;
+}
+
+/*
+ * Cable type is set via control register 7. Bit zero high
+ * for UTP, low for STP.
+ */
+static void madgemc_setcabletype(struct net_device *dev, int type)
+{
+ outb((type==0)?MC_CONTROL_REG7_CABLEUTP:MC_CONTROL_REG7_CABLESTP,
+ dev->base_addr + MC_CONTROL_REG7);
+}
+
+/*
+ * Enable the functions of the Madge chipset needed for
+ * full working order.
+ */
+static int madgemc_chipset_init(struct net_device *dev)
+{
+ outb(0, dev->base_addr + MC_CONTROL_REG1); /* pull SRESET low */
+ tms380tr_wait(100); /* wait for card to reset */
+
+ /* bring back into normal operating mode */
+ outb(MC_CONTROL_REG1_NSRESET, dev->base_addr + MC_CONTROL_REG1);
+
+ /* map SIF registers */
+ madgemc_setsifsel(dev, 1);
+
+ /* enable SIF interrupts */
+ madgemc_setint(dev, 1);
+
+ return 0;
+}
+
+/*
+ * Disable the board, and put back into power-up state.
+ */
+void madgemc_chipset_close(struct net_device *dev)
+{
+ /* disable interrupts */
+ madgemc_setint(dev, 0);
+ /* unmap SIF registers */
+ madgemc_setsifsel(dev, 0);
+
+ return;
+}
+
+/*
+ * Read the card type (MC16 or MC32) from the card.
+ *
+ * The configuration registers are stored in two separate
+ * pages. Pages are flipped by clearing bit 3 of CONTROL_REG0 (PAGE)
+ * for page zero, or setting bit 3 for page one.
+ *
+ * Page zero contains the following data:
+ * Byte 0: Manufacturer ID (0x4D -- ASCII "M")
+ * Byte 1: Card type:
+ * 0x08 for MC16
+ * 0x0D for MC32
+ * Byte 2: Card revision
+ * Byte 3: Mirror of POS config register 0
+ * Byte 4: Mirror of POS 1
+ * Byte 5: Mirror of POS 2
+ *
+ * Page one contains the following data:
+ * Byte 0: Unused
+ * Byte 1-6: BIA, MSB to LSB.
+ *
+ * Note that to read the BIA, we must unmap the SIF registers
+ * by clearing bit 2 of CONTROL_REG0 (SIFSEL), as the data
+ * will reside in the same logical location. For this reason,
+ * _never_ read the BIA while the Eagle processor is running!
+ * The SIF will be completely inaccessible until the BIA operation
+ * is complete.
+ *
+ */
+static void madgemc_read_rom(struct madgemc_card *card)
+{
+ unsigned long ioaddr;
+ unsigned char reg0, reg1, tmpreg0, i;
+
+ ioaddr = card->dev->base_addr;
+
+ reg0 = inb(ioaddr + MC_CONTROL_REG0);
+ reg1 = inb(ioaddr + MC_CONTROL_REG1);
+
+ /* Switch to page zero and unmap SIF */
+ tmpreg0 = reg0 & ~(MC_CONTROL_REG0_PAGE + MC_CONTROL_REG0_SIFSEL);
+ outb(tmpreg0, ioaddr + MC_CONTROL_REG0);
+
+ card->manid = inb(ioaddr + MC_ROM_MANUFACTURERID);
+ card->cardtype = inb(ioaddr + MC_ROM_ADAPTERID);
+ card->cardrev = inb(ioaddr + MC_ROM_REVISION);
+
+ /* Switch to rom page one */
+ outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0);
+
+ /* Read BIA */
+ card->dev->addr_len = 6;
+ for (i = 0; i < 6; i++)
+ card->dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
+
+ /* Restore original register values */
+ outb(reg0, ioaddr + MC_CONTROL_REG0);
+ outb(reg1, ioaddr + MC_CONTROL_REG1);
+
+ return;
+}
+
+static int madgemc_open(struct net_device *dev)
+{
+ /*
+ * Go ahead and reinitialize the chipset again, just to
+ * make sure we didn't get left in a bad state.
+ */
+ madgemc_chipset_init(dev);
+ tms380tr_open(dev);
+ return 0;
+}
+
+static int madgemc_close(struct net_device *dev)
+{
+ tms380tr_close(dev);
+ madgemc_chipset_close(dev);
+ return 0;
+}
+
+/*
+ * Give some details available from /proc/mca/slotX
+ */
+static int madgemc_mcaproc(char *buf, int slot, void *d)
+{
+ struct net_device *dev = (struct net_device *)d;
+ struct madgemc_card *curcard = madgemc_card_list;
+ int len = 0;
+
+ while (curcard) { /* search for card struct */
+ if (curcard->dev == dev)
+ break;
+ curcard = curcard->next;
+ }
+ len += sprintf(buf+len, "-------\n");
+ if (curcard) {
+ struct net_local *tp = netdev_priv(dev);
+ int i;
+
+ len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev);
+ len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize);
+ len += sprintf(buf+len, "Cable type: %s\n", (curcard->cabletype)?"STP/DB9":"UTP/RJ-45");
+ len += sprintf(buf+len, "Configured ring speed: %dMb/sec\n", (curcard->ringspeed)?16:4);
+ len += sprintf(buf+len, "Running ring speed: %dMb/sec\n", (tp->DataRate==SPEED_16)?16:4);
+ len += sprintf(buf+len, "Device: %s\n", dev->name);
+ len += sprintf(buf+len, "IO Port: 0x%04lx\n", dev->base_addr);
+ len += sprintf(buf+len, "IRQ: %d\n", dev->irq);
+ len += sprintf(buf+len, "Arbitration Level: %d\n", curcard->arblevel);
+ len += sprintf(buf+len, "Burst Mode: ");
+ switch(curcard->burstmode) {
+ case 0: len += sprintf(buf+len, "Cycle steal"); break;
+ case 1: len += sprintf(buf+len, "Limited burst"); break;
+ case 2: len += sprintf(buf+len, "Delayed release"); break;
+ case 3: len += sprintf(buf+len, "Immediate release"); break;
+ }
+ len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair");
+
+ len += sprintf(buf+len, "Ring Station Address: ");
+ len += sprintf(buf+len, "%2.2x", dev->dev_addr[0]);
+ for (i = 1; i < 6; i++)
+ len += sprintf(buf+len, " %2.2x", dev->dev_addr[i]);
+ len += sprintf(buf+len, "\n");
+ } else
+ len += sprintf(buf+len, "Card not configured\n");
+
+ return len;
+}
+
+static void __exit madgemc_exit(void)
+{
+ struct net_device *dev;
+ struct madgemc_card *this_card;
+
+ while (madgemc_card_list) {
+ dev = madgemc_card_list->dev;
+ unregister_netdev(dev);
+ release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
+ free_irq(dev->irq, dev);
+ tmsdev_term(dev);
+ free_netdev(dev);
+ this_card = madgemc_card_list;
+ madgemc_card_list = this_card->next;
+ kfree(this_card);
+ }
+}
+
+module_init(madgemc_probe);
+module_exit(madgemc_exit);
+
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c madgemc.c"
+ * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c madgemc.c"
+ * c-set-style "K&R"
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/drivers/net/tokenring/madgemc.h b/drivers/net/tokenring/madgemc.h
new file mode 100644
index 000000000000..2dd822203809
--- /dev/null
+++ b/drivers/net/tokenring/madgemc.h
@@ -0,0 +1,70 @@
+/*
+ * madgemc.h: Header for the madgemc tms380tr module
+ *
+ * Authors:
+ * - Adam Fritzler <mid@auk.cx>
+ */
+
+#ifndef __LINUX_MADGEMC_H
+#define __LINUX_MADGEMC_H
+
+#ifdef __KERNEL__
+
+#define MADGEMC16_CARDNAME "Madge Smart 16/4 MC16 Ringnode"
+#define MADGEMC32_CARDNAME "Madge Smart 16/4 MC32 Ringnode"
+
+/*
+ * Bit definitions for the POS config registers
+ */
+#define MC16_POS0_ADDR1 0x20
+#define MC16_POS2_ADDR2 0x04
+#define MC16_POS3_ADDR3 0x20
+
+#define MC_CONTROL_REG0 ((long)-8) /* 0x00 */
+#define MC_CONTROL_REG1 ((long)-7) /* 0x01 */
+#define MC_ADAPTER_POS_REG0 ((long)-6) /* 0x02 */
+#define MC_ADAPTER_POS_REG1 ((long)-5) /* 0x03 */
+#define MC_ADAPTER_POS_REG2 ((long)-4) /* 0x04 */
+#define MC_ADAPTER_REG5_UNUSED ((long)-3) /* 0x05 */
+#define MC_ADAPTER_REG6_UNUSED ((long)-2) /* 0x06 */
+#define MC_CONTROL_REG7 ((long)-1) /* 0x07 */
+
+#define MC_CONTROL_REG0_UNKNOWN1 0x01
+#define MC_CONTROL_REG0_UNKNOWN2 0x02
+#define MC_CONTROL_REG0_SIFSEL 0x04
+#define MC_CONTROL_REG0_PAGE 0x08
+#define MC_CONTROL_REG0_TESTINTERRUPT 0x10
+#define MC_CONTROL_REG0_UNKNOWN20 0x20
+#define MC_CONTROL_REG0_SINTR 0x40
+#define MC_CONTROL_REG0_UNKNOWN80 0x80
+
+#define MC_CONTROL_REG1_SINTEN 0x01
+#define MC_CONTROL_REG1_BITOFDEATH 0x02
+#define MC_CONTROL_REG1_NSRESET 0x04
+#define MC_CONTROL_REG1_UNKNOWN8 0x08
+#define MC_CONTROL_REG1_UNKNOWN10 0x10
+#define MC_CONTROL_REG1_UNKNOWN20 0x20
+#define MC_CONTROL_REG1_SRSX 0x40
+#define MC_CONTROL_REG1_SPEED_SEL 0x80
+
+#define MC_CONTROL_REG7_CABLESTP 0x00
+#define MC_CONTROL_REG7_CABLEUTP 0x01
+
+/*
+ * ROM Page Zero
+ */
+#define MC_ROM_MANUFACTURERID 0x00
+#define MC_ROM_ADAPTERID 0x01
+#define MC_ROM_REVISION 0x02
+#define MC_ROM_CONFIG0 0x03
+#define MC_ROM_CONFIG1 0x04
+#define MC_ROM_CONFIG2 0x05
+
+/*
+ * ROM Page One
+ */
+#define MC_ROM_UNUSED_BYTE 0x00
+#define MC_ROM_BIA_START 0x01
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_MADGEMC_H */
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
new file mode 100644
index 000000000000..9e7923192a49
--- /dev/null
+++ b/drivers/net/tokenring/olympic.c
@@ -0,0 +1,1786 @@
+/*
+ * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
+ * 1999/2000 Mike Phillips (mikep@linuxtr.net)
+ *
+ * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
+ * chipset.
+ *
+ * Base Driver Skeleton:
+ * Written 1993-94 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
+ * assistance and perserverance with the testing of this driver.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * 4/27/99 - Alpha Release 0.1.0
+ * First release to the public
+ *
+ * 6/8/99 - Official Release 0.2.0
+ * Merged into the kernel code
+ * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
+ * resource. Driver also reports the card name returned by
+ * the pci resource.
+ * 1/11/00 - Added spinlocks for smp
+ * 2/23/00 - Updated to dev_kfree_irq
+ * 3/10/00 - Fixed FDX enable which triggered other bugs also
+ * squashed.
+ * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
+ * The odd thing about the changes is that the fix for
+ * endian issues with the big-endian data in the arb, asb...
+ * was to always swab() the bytes, no matter what CPU.
+ * That's because the read[wl]() functions always swap the
+ * bytes on the way in on PPC.
+ * Fixing the hardware descriptors was another matter,
+ * because they weren't going through read[wl](), there all
+ * the results had to be in memory in le32 values. kdaaker
+ *
+ * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
+ *
+ * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
+ *
+ * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
+ * Change proc_fs behaviour, now one entry per adapter.
+ *
+ * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
+ * adapter when live does not take the system down with it.
+ *
+ * 06/02/01 - Clean up, copy skb for small packets
+ *
+ * 06/22/01 - Add EISR error handling routines
+ *
+ * 07/19/01 - Improve bad LAA reporting, strip out freemem
+ * into a separate function, its called from 3
+ * different places now.
+ * 02/09/02 - Replaced sleep_on.
+ * 03/01/02 - Replace access to several registers from 32 bit to
+ * 16 bit. Fixes alignment errors on PPC 64 bit machines.
+ * Thanks to Al Trautman for this one.
+ * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
+ * silently ignored until the error checking code
+ * went into version 1.0.0
+ * 06/04/02 - Add correct start up sequence for the cardbus adapters.
+ * Required for strict compliance with pci power mgmt specs.
+ * To Do:
+ *
+ * Wake on lan
+ *
+ * If Problems do Occur
+ * Most problems can be rectified by either closing and opening the interface
+ * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
+ * if compiled into the kernel).
+ */
+
+/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
+
+#define OLYMPIC_DEBUG 0
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <net/checksum.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "olympic.h"
+
+/* I've got to put some intelligence into the version number so that Peter and I know
+ * which version of the code somebody has got.
+ * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
+ * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
+ *
+ * Official releases will only have an a.b.c version number format.
+ */
+
+static char version[] __devinitdata =
+"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
+
+static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
+ "Address Verification", "Neighbor Notification (Ring Poll)",
+ "Request Parameters","FDX Registration Request",
+ "FDX Duplicate Address Check", "Station registration Query Wait",
+ "Unknown stage"};
+
+static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
+ "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
+ "Duplicate Node Address","Request Parameters","Remove Received",
+ "Reserved", "Reserved", "No Monitor Detected for RPL",
+ "Monitor Contention failer for RPL", "FDX Protocol Error"};
+
+/* Module paramters */
+
+MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
+MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
+
+/* Ring Speed 0,4,16,100
+ * 0 = Autosense
+ * 4,16 = Selected speed only, no autosense
+ * This allows the card to be the first on the ring
+ * and become the active monitor.
+ * 100 = Nothing at present, 100mbps is autodetected
+ * if FDX is turned on. May be implemented in the future to
+ * fail if 100mpbs is not detected.
+ *
+ * WARNING: Some hubs will allow you to insert
+ * at the wrong speed
+ */
+
+static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
+module_param_array(ringspeed, int, NULL, 0);
+
+/* Packet buffer size */
+
+static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
+module_param_array(pkt_buf_sz, int, NULL, 0) ;
+
+/* Message Level */
+
+static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
+module_param_array(message_level, int, NULL, 0) ;
+
+/* Change network_monitor to receive mac frames through the arb channel.
+ * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
+ * device, i.e. tr0, tr1 etc.
+ * Intended to be used to create a ring-error reporting network module
+ * i.e. it will give you the source address of beaconers on the ring
+ */
+static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
+module_param_array(network_monitor, int, NULL, 0);
+
+static struct pci_device_id olympic_pci_tbl[] = {
+ {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
+
+
+static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int olympic_init(struct net_device *dev);
+static int olympic_open(struct net_device *dev);
+static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
+static int olympic_close(struct net_device *dev);
+static void olympic_set_rx_mode(struct net_device *dev);
+static void olympic_freemem(struct net_device *dev) ;
+static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct net_device_stats * olympic_get_stats(struct net_device *dev);
+static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
+static void olympic_arb_cmd(struct net_device *dev);
+static int olympic_change_mtu(struct net_device *dev, int mtu);
+static void olympic_srb_bh(struct net_device *dev) ;
+static void olympic_asb_bh(struct net_device *dev) ;
+static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
+
+static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev ;
+ struct olympic_private *olympic_priv;
+ static int card_no = -1 ;
+ int i ;
+
+ card_no++ ;
+
+ if ((i = pci_enable_device(pdev))) {
+ return i ;
+ }
+
+ pci_set_master(pdev);
+
+ if ((i = pci_request_regions(pdev,"olympic"))) {
+ goto op_disable_dev;
+ }
+
+ dev = alloc_trdev(sizeof(struct olympic_private)) ;
+ if (!dev) {
+ i = -ENOMEM;
+ goto op_free_dev;
+ }
+
+ olympic_priv = dev->priv ;
+
+ spin_lock_init(&olympic_priv->olympic_lock) ;
+
+ init_waitqueue_head(&olympic_priv->srb_wait);
+ init_waitqueue_head(&olympic_priv->trb_wait);
+#if OLYMPIC_DEBUG
+ printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv);
+#endif
+ dev->irq=pdev->irq;
+ dev->base_addr=pci_resource_start(pdev, 0);
+ olympic_priv->olympic_card_name = pci_name(pdev);
+ olympic_priv->pdev = pdev;
+ olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
+ olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
+ if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
+ goto op_free_iomap;
+ }
+
+ if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
+ olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
+ else
+ olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
+
+ dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
+ olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
+ olympic_priv->olympic_message_level = message_level[card_no] ;
+ olympic_priv->olympic_network_monitor = network_monitor[card_no];
+
+ if ((i = olympic_init(dev))) {
+ goto op_free_iomap;
+ }
+
+ dev->open=&olympic_open;
+ dev->hard_start_xmit=&olympic_xmit;
+ dev->change_mtu=&olympic_change_mtu;
+ dev->stop=&olympic_close;
+ dev->do_ioctl=NULL;
+ dev->set_multicast_list=&olympic_set_rx_mode;
+ dev->get_stats=&olympic_get_stats ;
+ dev->set_mac_address=&olympic_set_mac_address ;
+ SET_MODULE_OWNER(dev) ;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pci_set_drvdata(pdev,dev) ;
+ register_netdev(dev) ;
+ printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
+ if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
+ char proc_name[20] ;
+ strcpy(proc_name,"net/olympic_") ;
+ strcat(proc_name,dev->name) ;
+ create_proc_read_entry(proc_name,0,NULL,olympic_proc_info,(void *)dev) ;
+ printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
+ }
+ return 0 ;
+
+op_free_iomap:
+ if (olympic_priv->olympic_mmio)
+ iounmap(olympic_priv->olympic_mmio);
+ if (olympic_priv->olympic_lap)
+ iounmap(olympic_priv->olympic_lap);
+
+op_free_dev:
+ free_netdev(dev);
+ pci_release_regions(pdev);
+
+op_disable_dev:
+ pci_disable_device(pdev);
+ return i;
+}
+
+static int __devinit olympic_init(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv;
+ u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
+ unsigned long t;
+ unsigned int uaa_addr;
+
+ olympic_priv=(struct olympic_private *)dev->priv;
+ olympic_mmio=olympic_priv->olympic_mmio;
+
+ printk("%s \n", version);
+ printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
+
+ writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
+ t=jiffies;
+ while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
+ schedule();
+ if(jiffies-t > 40*HZ) {
+ printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
+ return -ENODEV;
+ }
+ }
+
+
+ /* Needed for cardbus */
+ if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
+ writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
+ }
+
+#if OLYMPIC_DEBUG
+ printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
+ printk("GPR: %x\n",readw(olympic_mmio+GPR));
+ printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
+#endif
+ /* Aaaahhh, You have got to be real careful setting GPR, the card
+ holds the previous values from flash memory, including autosense
+ and ring speed */
+
+ writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
+
+ if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
+ writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
+ } else if (olympic_priv->olympic_ring_speed == 16) {
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
+ writew(GPR_16MBPS, olympic_mmio+GPR);
+ } else if (olympic_priv->olympic_ring_speed == 4) {
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
+ writew(0, olympic_mmio+GPR);
+ }
+
+ writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
+
+#if OLYMPIC_DEBUG
+ printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
+#endif
+ /* Solo has been paused to meet the Cardbus power
+ * specs if the adapter is cardbus. Check to
+ * see its been paused and then restart solo. The
+ * adapter should set the pause bit within 1 second.
+ */
+
+ if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
+ t=jiffies;
+ while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
+ schedule() ;
+ if(jiffies-t > 2*HZ) {
+ printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
+ return -ENODEV;
+ }
+ }
+ writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
+ }
+
+ /* start solo init */
+ writel((1<<15),olympic_mmio+SISR_MASK_SUM);
+
+ t=jiffies;
+ while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
+ schedule();
+ if(jiffies-t > 15*HZ) {
+ printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
+ return -ENODEV;
+ }
+ }
+
+ writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
+
+#if OLYMPIC_DEBUG
+ printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
+#endif
+
+ init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
+
+#if OLYMPIC_DEBUG
+{
+ int i;
+ printk("init_srb(%p): ",init_srb);
+ for(i=0;i<20;i++)
+ printk("%x ",readb(init_srb+i));
+ printk("\n");
+}
+#endif
+ if(readw(init_srb+6)) {
+ printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
+ return -ENODEV;
+ }
+
+ if (olympic_priv->olympic_message_level) {
+ if ( readb(init_srb +2) & 0x40) {
+ printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
+ } else {
+ printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
+ }
+ }
+
+ uaa_addr=swab16(readw(init_srb+8));
+
+#if OLYMPIC_DEBUG
+ printk("UAA resides at %x\n",uaa_addr);
+#endif
+
+ writel(uaa_addr,olympic_mmio+LAPA);
+ adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
+
+#if OLYMPIC_DEBUG
+ printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2),
+ readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5));
+#endif
+
+ memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
+
+ olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
+ olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
+
+ return 0;
+
+}
+
+static int olympic_open(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+ u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
+ unsigned long flags, t;
+ int i, open_finished = 1 ;
+ u8 resp, err;
+
+ DECLARE_WAITQUEUE(wait,current) ;
+
+ olympic_init(dev);
+
+ if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
+ return -EAGAIN;
+ }
+
+#if OLYMPIC_DEBUG
+ printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
+ printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
+#endif
+
+ writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
+
+ writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
+
+ writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
+
+ /* adapter is closed, so SRB is pointed to by LAPWWO */
+
+ writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
+ init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
+
+#if OLYMPIC_DEBUG
+ printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
+ printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
+ printk("Before the open command \n");
+#endif
+ do {
+ memset_io(init_srb,0,SRB_COMMAND_SIZE);
+
+ writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
+ writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
+
+ /* If Network Monitor, instruct card to copy MAC frames through the ARB */
+ if (olympic_priv->olympic_network_monitor)
+ writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
+ else
+ writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
+
+ /* Test OR of first 3 bytes as its totally possible for
+ * someone to set the first 2 bytes to be zero, although this
+ * is an error, the first byte must have bit 6 set to 1 */
+
+ if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
+ writeb(olympic_priv->olympic_laa[0],init_srb+12);
+ writeb(olympic_priv->olympic_laa[1],init_srb+13);
+ writeb(olympic_priv->olympic_laa[2],init_srb+14);
+ writeb(olympic_priv->olympic_laa[3],init_srb+15);
+ writeb(olympic_priv->olympic_laa[4],init_srb+16);
+ writeb(olympic_priv->olympic_laa[5],init_srb+17);
+ memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
+ }
+ writeb(1,init_srb+30);
+
+ spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
+ olympic_priv->srb_queued=1;
+
+ writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
+ spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
+
+ t = jiffies ;
+
+ add_wait_queue(&olympic_priv->srb_wait,&wait) ;
+ set_current_state(TASK_INTERRUPTIBLE) ;
+
+ while(olympic_priv->srb_queued) {
+ schedule() ;
+ if(signal_pending(current)) {
+ printk(KERN_WARNING "%s: Signal received in open.\n",
+ dev->name);
+ printk(KERN_WARNING "SISR=%x LISR=%x\n",
+ readl(olympic_mmio+SISR),
+ readl(olympic_mmio+LISR));
+ olympic_priv->srb_queued=0;
+ break;
+ }
+ if ((jiffies-t) > 10*HZ) {
+ printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
+ olympic_priv->srb_queued=0;
+ break ;
+ }
+ set_current_state(TASK_INTERRUPTIBLE) ;
+ }
+ remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
+ set_current_state(TASK_RUNNING) ;
+ olympic_priv->srb_queued = 0 ;
+#if OLYMPIC_DEBUG
+ printk("init_srb(%p): ",init_srb);
+ for(i=0;i<20;i++)
+ printk("%02x ",readb(init_srb+i));
+ printk("\n");
+#endif
+
+ /* If we get the same return response as we set, the interrupt wasn't raised and the open
+ * timed out.
+ */
+
+ switch (resp = readb(init_srb+2)) {
+ case OLYMPIC_CLEAR_RET_CODE:
+ printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
+ goto out;
+ case 0:
+ open_finished = 1;
+ break;
+ case 0x07:
+ if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
+ printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
+ open_finished = 0 ;
+ continue;
+ }
+
+ err = readb(init_srb+7);
+
+ if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
+ printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
+ } else {
+ printk(KERN_WARNING "%s: %s - %s\n", dev->name,
+ open_maj_error[(err & 0xf0) >> 4],
+ open_min_error[(err & 0x0f)]);
+ }
+ goto out;
+
+ case 0x32:
+ printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ olympic_priv->olympic_laa[0],
+ olympic_priv->olympic_laa[1],
+ olympic_priv->olympic_laa[2],
+ olympic_priv->olympic_laa[3],
+ olympic_priv->olympic_laa[4],
+ olympic_priv->olympic_laa[5]) ;
+ goto out;
+
+ default:
+ printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
+ goto out;
+
+ }
+ } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
+
+ if (readb(init_srb+18) & (1<<3))
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
+
+ if (readb(init_srb+18) & (1<<1))
+ olympic_priv->olympic_ring_speed = 100 ;
+ else if (readb(init_srb+18) & 1)
+ olympic_priv->olympic_ring_speed = 16 ;
+ else
+ olympic_priv->olympic_ring_speed = 4 ;
+
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
+
+ olympic_priv->asb = swab16(readw(init_srb+8));
+ olympic_priv->srb = swab16(readw(init_srb+10));
+ olympic_priv->arb = swab16(readw(init_srb+12));
+ olympic_priv->trb = swab16(readw(init_srb+16));
+
+ olympic_priv->olympic_receive_options = 0x01 ;
+ olympic_priv->olympic_copy_all_options = 0 ;
+
+ /* setup rx ring */
+
+ writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
+
+ writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
+
+ for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
+
+ struct sk_buff *skb;
+
+ skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
+ if(skb == NULL)
+ break;
+
+ skb->dev = dev;
+
+ olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
+ skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
+ olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
+ olympic_priv->rx_ring_skb[i]=skb;
+ }
+
+ if (i==0) {
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
+ goto out;
+ }
+
+ olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
+ sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
+ writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
+ writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
+ writew(i, olympic_mmio+RXDESCQCNT);
+
+ olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
+ sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
+ writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
+ writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
+
+ olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
+ olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
+
+ writew(i, olympic_mmio+RXSTATQCNT);
+
+#if OLYMPIC_DEBUG
+ printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
+ printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
+ printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
+ printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
+ printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
+
+ printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
+ printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
+ olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
+#endif
+
+ writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
+
+#if OLYMPIC_DEBUG
+ printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
+ printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
+ printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
+#endif
+
+ writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
+
+ /* setup tx ring */
+
+ writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
+ for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
+ olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
+
+ olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
+ olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
+ sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
+ writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
+ writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
+ writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
+
+ olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
+ sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
+ writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
+ writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
+ writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
+
+ olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
+ olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
+
+ writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
+ writel(0,olympic_mmio+EISR) ;
+ writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
+ writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
+
+#if OLYMPIC_DEBUG
+ printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
+ printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
+#endif
+
+ if (olympic_priv->olympic_network_monitor) {
+ u8 __iomem *oat ;
+ u8 __iomem *opt ;
+ oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
+ opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
+
+ printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5));
+ printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
+ printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5));
+ }
+
+ netif_start_queue(dev);
+ return 0;
+
+out:
+ free_irq(dev->irq, dev);
+ return -EIO;
+}
+
+/*
+ * When we enter the rx routine we do not know how many frames have been
+ * queued on the rx channel. Therefore we start at the next rx status
+ * position and travel around the receive ring until we have completed
+ * all the frames.
+ *
+ * This means that we may process the frame before we receive the end
+ * of frame interrupt. This is why we always test the status instead
+ * of blindly processing the next frame.
+ *
+ * We also remove the last 4 bytes from the packet as well, these are
+ * just token ring trailer info and upset protocols that don't check
+ * their own length, i.e. SNA.
+ *
+ */
+static void olympic_rx(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+ u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
+ struct olympic_rx_status *rx_status;
+ struct olympic_rx_desc *rx_desc ;
+ int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
+ struct sk_buff *skb, *skb2;
+ int i;
+
+ rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
+
+ while (rx_status->status_buffercnt) {
+ u32 l_status_buffercnt;
+
+ olympic_priv->rx_status_last_received++ ;
+ olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
+#if OLYMPIC_DEBUG
+ printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
+#endif
+ length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
+ buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
+ i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
+ frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
+
+#if OLYMPIC_DEBUG
+ printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
+#endif
+ l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
+ if(l_status_buffercnt & 0xC0000000) {
+ if (l_status_buffercnt & 0x3B000000) {
+ if (olympic_priv->olympic_message_level) {
+ if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
+ printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
+ if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
+ printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
+ if (l_status_buffercnt & (1<<27)) /* No receive buffers */
+ printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
+ if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
+ printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
+ if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
+ printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
+ }
+ olympic_priv->rx_ring_last_received += i ;
+ olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
+ olympic_priv->olympic_stats.rx_errors++;
+ } else {
+
+ if (buffer_cnt == 1) {
+ skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
+ } else {
+ skb = dev_alloc_skb(length) ;
+ }
+
+ if (skb == NULL) {
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
+ olympic_priv->olympic_stats.rx_dropped++ ;
+ /* Update counters even though we don't transfer the frame */
+ olympic_priv->rx_ring_last_received += i ;
+ olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
+ } else {
+ skb->dev = dev ;
+
+ /* Optimise based upon number of buffers used.
+ If only one buffer is used we can simply swap the buffers around.
+ If more than one then we must use the new buffer and copy the information
+ first. Ideally all frames would be in a single buffer, this can be tuned by
+ altering the buffer size. If the length of the packet is less than
+ 1500 bytes we're going to copy it over anyway to stop packets getting
+ dropped from sockets with buffers smaller than our pkt_buf_sz. */
+
+ if (buffer_cnt==1) {
+ olympic_priv->rx_ring_last_received++ ;
+ olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
+ rx_ring_last_received = olympic_priv->rx_ring_last_received ;
+ if (length > 1500) {
+ skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
+ /* unmap buffer */
+ pci_unmap_single(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ skb_put(skb2,length-4);
+ skb2->protocol = tr_type_trans(skb2,dev);
+ olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
+ cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
+ olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
+ olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
+ cpu_to_le32(olympic_priv->pkt_buf_sz);
+ olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
+ netif_rx(skb2) ;
+ } else {
+ pci_dma_sync_single_for_cpu(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
+ pci_dma_sync_single_for_device(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ skb->protocol = tr_type_trans(skb,dev) ;
+ netif_rx(skb) ;
+ }
+ } else {
+ do { /* Walk the buffers */
+ olympic_priv->rx_ring_last_received++ ;
+ olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
+ rx_ring_last_received = olympic_priv->rx_ring_last_received ;
+ pci_dma_sync_single_for_cpu(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
+ cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
+ memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
+ pci_dma_sync_single_for_device(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ } while (--i) ;
+ skb_trim(skb,skb->len-4) ;
+ skb->protocol = tr_type_trans(skb,dev);
+ netif_rx(skb) ;
+ }
+ dev->last_rx = jiffies ;
+ olympic_priv->olympic_stats.rx_packets++ ;
+ olympic_priv->olympic_stats.rx_bytes += length ;
+ } /* if skb == null */
+ } /* If status & 0x3b */
+
+ } else { /*if buffercnt & 0xC */
+ olympic_priv->rx_ring_last_received += i ;
+ olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
+ }
+
+ rx_status->fragmentcnt_framelen = 0 ;
+ rx_status->status_buffercnt = 0 ;
+ rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
+
+ writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
+ } /* while */
+
+}
+
+static void olympic_freemem(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+ int i;
+
+ for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
+ if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
+ dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
+ olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
+ }
+ if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
+ pci_unmap_single(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
+ olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
+ }
+ olympic_priv->rx_status_last_received++;
+ olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
+ }
+ /* unmap rings */
+ pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
+ sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
+ sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
+
+ pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
+ sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
+ sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
+
+ return ;
+}
+
+static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev= (struct net_device *)dev_id;
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+ u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
+ u32 sisr;
+ u8 __iomem *adapter_check_area ;
+
+ /*
+ * Read sisr but don't reset it yet.
+ * The indication bit may have been set but the interrupt latch
+ * bit may not be set, so we'd lose the interrupt later.
+ */
+ sisr=readl(olympic_mmio+SISR) ;
+ if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
+ return IRQ_NONE;
+ sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
+
+ spin_lock(&olympic_priv->olympic_lock);
+
+ /* Hotswap gives us this on removal */
+ if (sisr == 0xffffffff) {
+ printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
+ spin_unlock(&olympic_priv->olympic_lock) ;
+ return IRQ_NONE;
+ }
+
+ if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
+ SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
+
+ /* If we ever get this the adapter is seriously dead. Only a reset is going to
+ * bring it back to life. We're talking pci bus errors and such like :( */
+ if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
+ printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
+ printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
+ printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
+ printk(KERN_ERR "or the linux-tr mailing list.\n") ;
+ wake_up_interruptible(&olympic_priv->srb_wait);
+ spin_unlock(&olympic_priv->olympic_lock) ;
+ return IRQ_HANDLED;
+ } /* SISR_ERR */
+
+ if(sisr & SISR_SRB_REPLY) {
+ if(olympic_priv->srb_queued==1) {
+ wake_up_interruptible(&olympic_priv->srb_wait);
+ } else if (olympic_priv->srb_queued==2) {
+ olympic_srb_bh(dev) ;
+ }
+ olympic_priv->srb_queued=0;
+ } /* SISR_SRB_REPLY */
+
+ /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
+ we get all tx completions. */
+ if (sisr & SISR_TX1_EOF) {
+ while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
+ olympic_priv->tx_ring_last_status++;
+ olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
+ olympic_priv->free_tx_ring_entries++;
+ olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
+ olympic_priv->olympic_stats.tx_packets++ ;
+ pci_unmap_single(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
+ olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
+ olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
+ olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
+ }
+ netif_wake_queue(dev);
+ } /* SISR_TX1_EOF */
+
+ if (sisr & SISR_RX_STATUS) {
+ olympic_rx(dev);
+ } /* SISR_RX_STATUS */
+
+ if (sisr & SISR_ADAPTER_CHECK) {
+ netif_stop_queue(dev);
+ printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
+ writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
+ adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
+ printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
+ spin_unlock(&olympic_priv->olympic_lock) ;
+ return IRQ_HANDLED;
+ } /* SISR_ADAPTER_CHECK */
+
+ if (sisr & SISR_ASB_FREE) {
+ /* Wake up anything that is waiting for the asb response */
+ if (olympic_priv->asb_queued) {
+ olympic_asb_bh(dev) ;
+ }
+ } /* SISR_ASB_FREE */
+
+ if (sisr & SISR_ARB_CMD) {
+ olympic_arb_cmd(dev) ;
+ } /* SISR_ARB_CMD */
+
+ if (sisr & SISR_TRB_REPLY) {
+ /* Wake up anything that is waiting for the trb response */
+ if (olympic_priv->trb_queued) {
+ wake_up_interruptible(&olympic_priv->trb_wait);
+ }
+ olympic_priv->trb_queued = 0 ;
+ } /* SISR_TRB_REPLY */
+
+ if (sisr & SISR_RX_NOBUF) {
+ /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
+ /var/log/messages. */
+ } /* SISR_RX_NOBUF */
+ } else {
+ printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
+ printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
+ } /* One if the interrupts we want */
+ writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
+
+ spin_unlock(&olympic_priv->olympic_lock) ;
+ return IRQ_HANDLED;
+}
+
+static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+ u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
+ unsigned long flags ;
+
+ spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
+
+ netif_stop_queue(dev);
+
+ if(olympic_priv->free_tx_ring_entries) {
+ olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
+ cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
+ olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
+ olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
+ olympic_priv->free_tx_ring_entries--;
+
+ olympic_priv->tx_ring_free++;
+ olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
+ writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
+ return 0;
+ } else {
+ spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
+ return 1;
+ }
+
+}
+
+
+static int olympic_close(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+ u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
+ unsigned long t,flags;
+
+ DECLARE_WAITQUEUE(wait,current) ;
+
+ netif_stop_queue(dev);
+
+ writel(olympic_priv->srb,olympic_mmio+LAPA);
+ srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
+
+ writeb(SRB_CLOSE_ADAPTER,srb+0);
+ writeb(0,srb+1);
+ writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
+
+ add_wait_queue(&olympic_priv->srb_wait,&wait) ;
+ set_current_state(TASK_INTERRUPTIBLE) ;
+
+ spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
+ olympic_priv->srb_queued=1;
+
+ writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
+ spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
+
+ while(olympic_priv->srb_queued) {
+
+ t = schedule_timeout(60*HZ);
+
+ if(signal_pending(current)) {
+ printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
+ printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
+ olympic_priv->srb_queued=0;
+ break;
+ }
+
+ if (t == 0) {
+ printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
+ }
+ olympic_priv->srb_queued=0;
+ }
+ remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
+
+ olympic_priv->rx_status_last_received++;
+ olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
+
+ olympic_freemem(dev) ;
+
+ /* reset tx/rx fifo's and busmaster logic */
+
+ writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
+ udelay(1);
+ writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
+
+#if OLYMPIC_DEBUG
+ {
+ int i ;
+ printk("srb(%p): ",srb);
+ for(i=0;i<4;i++)
+ printk("%x ",readb(srb+i));
+ printk("\n");
+ }
+#endif
+ free_irq(dev->irq,dev);
+
+ return 0;
+
+}
+
+static void olympic_set_rx_mode(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
+ u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
+ u8 options = 0;
+ u8 __iomem *srb;
+ struct dev_mc_list *dmi ;
+ unsigned char dev_mc_address[4] ;
+ int i ;
+
+ writel(olympic_priv->srb,olympic_mmio+LAPA);
+ srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
+ options = olympic_priv->olympic_copy_all_options;
+
+ if (dev->flags&IFF_PROMISC)
+ options |= 0x61 ;
+ else
+ options &= ~0x61 ;
+
+ /* Only issue the srb if there is a change in options */
+
+ if ((options ^ olympic_priv->olympic_copy_all_options)) {
+
+ /* Now to issue the srb command to alter the copy.all.options */
+
+ writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
+ writeb(0,srb+1);
+ writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
+ writeb(0,srb+3);
+ writeb(olympic_priv->olympic_receive_options,srb+4);
+ writeb(options,srb+5);
+
+ olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
+
+ writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
+
+ olympic_priv->olympic_copy_all_options = options ;
+
+ return ;
+ }
+
+ /* Set the functional addresses we need for multicast */
+
+ dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
+
+ for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ dev_mc_address[0] |= dmi->dmi_addr[2] ;
+ dev_mc_address[1] |= dmi->dmi_addr[3] ;
+ dev_mc_address[2] |= dmi->dmi_addr[4] ;
+ dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ }
+
+ writeb(SRB_SET_FUNC_ADDRESS,srb+0);
+ writeb(0,srb+1);
+ writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
+ writeb(0,srb+3);
+ writeb(0,srb+4);
+ writeb(0,srb+5);
+ writeb(dev_mc_address[0],srb+6);
+ writeb(dev_mc_address[1],srb+7);
+ writeb(dev_mc_address[2],srb+8);
+ writeb(dev_mc_address[3],srb+9);
+
+ olympic_priv->srb_queued = 2 ;
+ writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
+
+}
+
+static void olympic_srb_bh(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
+ u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
+ u8 __iomem *srb;
+
+ writel(olympic_priv->srb,olympic_mmio+LAPA);
+ srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
+
+ switch (readb(srb)) {
+
+ /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
+ * At some point we should do something if we get an error, such as
+ * resetting the IFF_PROMISC flag in dev
+ */
+
+ case SRB_MODIFY_RECEIVE_OPTIONS:
+ switch (readb(srb+2)) {
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
+ break ;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
+ break ;
+ default:
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
+ break ;
+ } /* switch srb[2] */
+ break ;
+
+ /* SRB_SET_GROUP_ADDRESS - Multicast group setting
+ */
+
+ case SRB_SET_GROUP_ADDRESS:
+ switch (readb(srb+2)) {
+ case 0x00:
+ break ;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ break ;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
+ break ;
+ case 0x3c:
+ printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
+ break ;
+ case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
+ printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
+ break ;
+ case 0x55:
+ printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
+ break ;
+ default:
+ break ;
+ } /* switch srb[2] */
+ break ;
+
+ /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
+ */
+
+ case SRB_RESET_GROUP_ADDRESS:
+ switch (readb(srb+2)) {
+ case 0x00:
+ break ;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ break ;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
+ break ;
+ case 0x39: /* Must deal with this if individual multicast addresses used */
+ printk(KERN_INFO "%s: Group address not found \n",dev->name);
+ break ;
+ default:
+ break ;
+ } /* switch srb[2] */
+ break ;
+
+
+ /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
+ */
+
+ case SRB_SET_FUNC_ADDRESS:
+ switch (readb(srb+2)) {
+ case 0x00:
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
+ break ;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ break ;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
+ break ;
+ default:
+ break ;
+ } /* switch srb[2] */
+ break ;
+
+ /* SRB_READ_LOG - Read and reset the adapter error counters
+ */
+
+ case SRB_READ_LOG:
+ switch (readb(srb+2)) {
+ case 0x00:
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
+ break ;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ break ;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
+ break ;
+
+ } /* switch srb[2] */
+ break ;
+
+ /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
+
+ case SRB_READ_SR_COUNTERS:
+ switch (readb(srb+2)) {
+ case 0x00:
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
+ break ;
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ break ;
+ case 0x04:
+ printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
+ break ;
+ default:
+ break ;
+ } /* switch srb[2] */
+ break ;
+
+ default:
+ printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
+ break ;
+ } /* switch srb[0] */
+
+}
+
+static struct net_device_stats * olympic_get_stats(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv ;
+ olympic_priv=(struct olympic_private *) dev->priv;
+ return (struct net_device_stats *) &olympic_priv->olympic_stats;
+}
+
+static int olympic_set_mac_address (struct net_device *dev, void *addr)
+{
+ struct sockaddr *saddr = addr ;
+ struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
+
+ if (netif_running(dev)) {
+ printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
+ return -EIO ;
+ }
+
+ memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
+
+ if (olympic_priv->olympic_message_level) {
+ printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
+ olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
+ olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
+ olympic_priv->olympic_laa[5]);
+ }
+
+ return 0 ;
+}
+
+static void olympic_arb_cmd(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
+ u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
+ u8 __iomem *arb_block, *asb_block, *srb ;
+ u8 header_len ;
+ u16 frame_len, buffer_len ;
+ struct sk_buff *mac_frame ;
+ u8 __iomem *buf_ptr ;
+ u8 __iomem *frame_data ;
+ u16 buff_off ;
+ u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
+ u8 fdx_prot_error ;
+ u16 next_ptr;
+
+ arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
+ asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
+ srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
+
+ if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
+
+ header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
+ frame_len = swab16(readw(arb_block + 10)) ;
+
+ buff_off = swab16(readw(arb_block + 6)) ;
+
+ buf_ptr = olympic_priv->olympic_lap + buff_off ;
+
+#if OLYMPIC_DEBUG
+{
+ int i;
+ frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
+
+ for (i=0 ; i < 14 ; i++) {
+ printk("Loc %d = %02x\n",i,readb(frame_data + i));
+ }
+
+ printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
+}
+#endif
+ mac_frame = dev_alloc_skb(frame_len) ;
+ if (!mac_frame) {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
+ goto drop_frame;
+ }
+
+ /* Walk the buffer chain, creating the frame */
+
+ do {
+ frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
+ buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
+ memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
+ next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
+ } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
+
+ if (olympic_priv->olympic_network_monitor) {
+ struct trh_hdr *mac_hdr ;
+ printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
+ mac_hdr = (struct trh_hdr *)mac_frame->data ;
+ printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
+ printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
+ }
+ mac_frame->dev = dev ;
+ mac_frame->protocol = tr_type_trans(mac_frame,dev);
+ netif_rx(mac_frame) ;
+ dev->last_rx = jiffies;
+
+drop_frame:
+ /* Now tell the card we have dealt with the received frame */
+
+ /* Set LISR Bit 1 */
+ writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
+
+ /* Is the ASB free ? */
+
+ if (readb(asb_block + 2) != 0xff) {
+ olympic_priv->asb_queued = 1 ;
+ writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
+ return ;
+ /* Drop out and wait for the bottom half to be run */
+ }
+
+ writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
+ writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
+ writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
+ writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
+
+ writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
+
+ olympic_priv->asb_queued = 2 ;
+
+ return ;
+
+ } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
+ lan_status = swab16(readw(arb_block+6));
+ fdx_prot_error = readb(arb_block+8) ;
+
+ /* Issue ARB Free */
+ writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
+
+ lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
+
+ if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
+ if (lan_status_diff & LSC_LWF)
+ printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
+ if (lan_status_diff & LSC_ARW)
+ printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
+ if (lan_status_diff & LSC_FPE)
+ printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
+ if (lan_status_diff & LSC_RR)
+ printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
+
+ /* Adapter has been closed by the hardware */
+
+ /* reset tx/rx fifo's and busmaster logic */
+
+ writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
+ udelay(1);
+ writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
+ netif_stop_queue(dev);
+ olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
+ printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ } /* If serious error */
+
+ if (olympic_priv->olympic_message_level) {
+ if (lan_status_diff & LSC_SIG_LOSS)
+ printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ if (lan_status_diff & LSC_HARD_ERR)
+ printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ if (lan_status_diff & LSC_SOFT_ERR)
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ if (lan_status_diff & LSC_TRAN_BCN)
+ printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ if (lan_status_diff & LSC_SS)
+ printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ if (lan_status_diff & LSC_RING_REC)
+ printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
+ if (lan_status_diff & LSC_FDX_MODE)
+ printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
+ }
+
+ if (lan_status_diff & LSC_CO) {
+
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+
+ /* Issue READ.LOG command */
+
+ writeb(SRB_READ_LOG, srb);
+ writeb(0,srb+1);
+ writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
+ writeb(0,srb+3);
+ writeb(0,srb+4);
+ writeb(0,srb+5);
+
+ olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
+
+ writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
+
+ }
+
+ if (lan_status_diff & LSC_SR_CO) {
+
+ if (olympic_priv->olympic_message_level)
+ printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
+
+ /* Issue a READ.SR.COUNTERS */
+
+ writeb(SRB_READ_SR_COUNTERS,srb);
+ writeb(0,srb+1);
+ writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
+ writeb(0,srb+3);
+
+ olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
+
+ writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
+
+ }
+
+ olympic_priv->olympic_lan_status = lan_status ;
+
+ } /* Lan.change.status */
+ else
+ printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+}
+
+static void olympic_asb_bh(struct net_device *dev)
+{
+ struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
+ u8 __iomem *arb_block, *asb_block ;
+
+ arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
+ asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
+
+ if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
+
+ writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
+ writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
+ writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
+ writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
+
+ writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
+ olympic_priv->asb_queued = 2 ;
+
+ return ;
+ }
+
+ if (olympic_priv->asb_queued == 2) {
+ switch (readb(asb_block+2)) {
+ case 0x01:
+ printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ break ;
+ case 0x26:
+ printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ break ;
+ case 0xFF:
+ /* Valid response, everything should be ok again */
+ break ;
+ default:
+ printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
+ break ;
+ }
+ }
+ olympic_priv->asb_queued = 0 ;
+}
+
+static int olympic_change_mtu(struct net_device *dev, int mtu)
+{
+ struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
+ u16 max_mtu ;
+
+ if (olympic_priv->olympic_ring_speed == 4)
+ max_mtu = 4500 ;
+ else
+ max_mtu = 18000 ;
+
+ if (mtu > max_mtu)
+ return -EINVAL ;
+ if (mtu < 100)
+ return -EINVAL ;
+
+ dev->mtu = mtu ;
+ olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
+
+ return 0 ;
+}
+
+static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
+{
+ struct net_device *dev = (struct net_device *)data ;
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+ u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
+ u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
+ int size = 0 ;
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+
+ size = sprintf(buffer,
+ "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
+ size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
+ dev->name);
+
+ size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
+ dev->name,
+ dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
+ readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
+
+ size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
+
+ size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
+ dev->name) ;
+
+ size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
+ dev->name,
+ readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
+ readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
+ readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
+ readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
+ readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5),
+ readb(opt+offsetof(struct olympic_parameters_table, poll_addr)),
+ readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1),
+ readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2),
+ readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3),
+ readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4),
+ readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
+
+ size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
+ dev->name) ;
+
+ size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
+ dev->name,
+ readb(opt+offsetof(struct olympic_parameters_table, source_addr)),
+ readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1),
+ readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2),
+ readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3),
+ readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4),
+ readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
+
+ size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
+ dev->name) ;
+
+ size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
+ dev->name,
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
+ swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
+ readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
+
+ len=size;
+ pos=begin+size;
+ if (pos<offset) {
+ len=0;
+ begin=pos;
+ }
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+
+static void __devexit olympic_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev) ;
+ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
+
+ if (olympic_priv->olympic_network_monitor) {
+ char proc_name[20] ;
+ strcpy(proc_name,"net/olympic_") ;
+ strcat(proc_name,dev->name) ;
+ remove_proc_entry(proc_name,NULL);
+ }
+ unregister_netdev(dev) ;
+ iounmap(olympic_priv->olympic_mmio) ;
+ iounmap(olympic_priv->olympic_lap) ;
+ pci_release_regions(pdev) ;
+ pci_set_drvdata(pdev,NULL) ;
+ free_netdev(dev) ;
+}
+
+static struct pci_driver olympic_driver = {
+ .name = "olympic",
+ .id_table = olympic_pci_tbl,
+ .probe = olympic_probe,
+ .remove = __devexit_p(olympic_remove_one),
+};
+
+static int __init olympic_pci_init(void)
+{
+ return pci_module_init (&olympic_driver) ;
+}
+
+static void __exit olympic_pci_cleanup(void)
+{
+ pci_unregister_driver(&olympic_driver) ;
+}
+
+
+module_init(olympic_pci_init) ;
+module_exit(olympic_pci_cleanup) ;
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
new file mode 100644
index 000000000000..2fc59c997468
--- /dev/null
+++ b/drivers/net/tokenring/olympic.h
@@ -0,0 +1,322 @@
+/*
+ * olympic.h (c) 1999 Peter De Schrijver All Rights Reserved
+ * 1999,2000 Mike Phillips (mikep@linuxtr.net)
+ *
+ * Linux driver for IBM PCI tokenring cards based on the olympic and the PIT/PHY chipset.
+ *
+ * Base Driver Skeleton:
+ * Written 1993-94 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ */
+
+#define CID 0x4e
+
+#define BCTL 0x70
+#define BCTL_SOFTRESET (1<<15)
+#define BCTL_MIMREB (1<<6)
+#define BCTL_MODE_INDICATOR (1<<5)
+
+#define GPR 0x4a
+#define GPR_OPTI_BF (1<<6)
+#define GPR_NEPTUNE_BF (1<<4)
+#define GPR_AUTOSENSE (1<<2)
+#define GPR_16MBPS (1<<3)
+
+#define PAG 0x85
+#define LBC 0x8e
+
+#define LISR 0x10
+#define LISR_SUM 0x14
+#define LISR_RWM 0x18
+
+#define LISR_LIE (1<<15)
+#define LISR_SLIM (1<<13)
+#define LISR_SLI (1<<12)
+#define LISR_PCMSRMASK (1<<11)
+#define LISR_PCMSRINT (1<<10)
+#define LISR_WOLMASK (1<<9)
+#define LISR_WOL (1<<8)
+#define LISR_SRB_CMD (1<<5)
+#define LISR_ASB_REPLY (1<<4)
+#define LISR_ASB_FREE_REQ (1<<2)
+#define LISR_ARB_FREE (1<<1)
+#define LISR_TRB_FRAME (1<<0)
+
+#define SISR 0x20
+#define SISR_SUM 0x24
+#define SISR_RWM 0x28
+#define SISR_RR 0x2C
+#define SISR_RESMASK 0x30
+#define SISR_MASK 0x54
+#define SISR_MASK_SUM 0x58
+#define SISR_MASK_RWM 0x5C
+
+#define SISR_TX2_IDLE (1<<31)
+#define SISR_TX2_HALT (1<<29)
+#define SISR_TX2_EOF (1<<28)
+#define SISR_TX1_IDLE (1<<27)
+#define SISR_TX1_HALT (1<<25)
+#define SISR_TX1_EOF (1<<24)
+#define SISR_TIMEOUT (1<<23)
+#define SISR_RX_NOBUF (1<<22)
+#define SISR_RX_STATUS (1<<21)
+#define SISR_RX_HALT (1<<18)
+#define SISR_RX_EOF_EARLY (1<<16)
+#define SISR_MI (1<<15)
+#define SISR_PI (1<<13)
+#define SISR_ERR (1<<9)
+#define SISR_ADAPTER_CHECK (1<<6)
+#define SISR_SRB_REPLY (1<<5)
+#define SISR_ASB_FREE (1<<4)
+#define SISR_ARB_CMD (1<<3)
+#define SISR_TRB_REPLY (1<<2)
+
+#define EISR 0x34
+#define EISR_RWM 0x38
+#define EISR_MASK 0x3c
+#define EISR_MASK_OPTIONS 0x001FFF7F
+
+#define LAPA 0x60
+#define LAPWWO 0x64
+#define LAPWWC 0x68
+#define LAPCTL 0x6C
+#define LAIPD 0x78
+#define LAIPDDINC 0x7C
+
+#define TIMER 0x50
+
+#define CLKCTL 0x74
+#define CLKCTL_PAUSE (1<<15)
+
+#define PM_CON 0x4
+
+#define BMCTL_SUM 0x40
+#define BMCTL_RWM 0x44
+#define BMCTL_TX2_DIS (1<<30)
+#define BMCTL_TX1_DIS (1<<26)
+#define BMCTL_RX_DIS (1<<22)
+
+#define BMASR 0xcc
+
+#define RXDESCQ 0x90
+#define RXDESCQCNT 0x94
+#define RXCDA 0x98
+#define RXENQ 0x9C
+#define RXSTATQ 0xA0
+#define RXSTATQCNT 0xA4
+#define RXCSA 0xA8
+#define RXCLEN 0xAC
+#define RXHLEN 0xAE
+
+#define TXDESCQ_1 0xb0
+#define TXDESCQ_2 0xd0
+#define TXDESCQCNT_1 0xb4
+#define TXDESCQCNT_2 0xd4
+#define TXCDA_1 0xb8
+#define TXCDA_2 0xd8
+#define TXENQ_1 0xbc
+#define TXENQ_2 0xdc
+#define TXSTATQ_1 0xc0
+#define TXSTATQ_2 0xe0
+#define TXSTATQCNT_1 0xc4
+#define TXSTATQCNT_2 0xe4
+#define TXCSA_1 0xc8
+#define TXCSA_2 0xe8
+/* Cardbus */
+#define FERMASK 0xf4
+#define FERMASK_INT_BIT (1<<15)
+
+#define OLYMPIC_IO_SPACE 256
+
+#define SRB_COMMAND_SIZE 50
+
+#define OLYMPIC_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
+
+/* Defines for LAN STATUS CHANGE reports */
+#define LSC_SIG_LOSS 0x8000
+#define LSC_HARD_ERR 0x4000
+#define LSC_SOFT_ERR 0x2000
+#define LSC_TRAN_BCN 0x1000
+#define LSC_LWF 0x0800
+#define LSC_ARW 0x0400
+#define LSC_FPE 0x0200
+#define LSC_RR 0x0100
+#define LSC_CO 0x0080
+#define LSC_SS 0x0040
+#define LSC_RING_REC 0x0020
+#define LSC_SR_CO 0x0010
+#define LSC_FDX_MODE 0x0004
+
+/* Defines for OPEN ADAPTER command */
+
+#define OPEN_ADAPTER_EXT_WRAP (1<<15)
+#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
+#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
+#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
+#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
+#define OPEN_ADAPTER_ENABLE_EC (1<<10)
+#define OPEN_ADAPTER_CONTENDER (1<<8)
+#define OPEN_ADAPTER_PASS_BEACON (1<<7)
+#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
+#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
+#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
+#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
+#define OPEN_ADAPTER_USE_OPTS2 (1<<0)
+
+#define OPEN_ADAPTER_2_ENABLE_ONNOW (1<<15)
+
+/* Defines for SRB Commands */
+
+#define SRB_ACCESS_REGISTER 0x1f
+#define SRB_CLOSE_ADAPTER 0x04
+#define SRB_CONFIGURE_BRIDGE 0x0c
+#define SRB_CONFIGURE_WAKEUP_EVENT 0x1a
+#define SRB_MODIFY_BRIDGE_PARMS 0x15
+#define SRB_MODIFY_OPEN_OPTIONS 0x01
+#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
+#define SRB_NO_OPERATION 0x00
+#define SRB_OPEN_ADAPTER 0x03
+#define SRB_READ_LOG 0x08
+#define SRB_READ_SR_COUNTERS 0x16
+#define SRB_RESET_GROUP_ADDRESS 0x02
+#define SRB_SAVE_CONFIGURATION 0x1b
+#define SRB_SET_BRIDGE_PARMS 0x09
+#define SRB_SET_BRIDGE_TARGETS 0x10
+#define SRB_SET_FUNC_ADDRESS 0x07
+#define SRB_SET_GROUP_ADDRESS 0x06
+#define SRB_SET_GROUP_ADDR_OPTIONS 0x11
+#define SRB_UPDATE_WAKEUP_PATTERN 0x19
+
+/* Clear return code */
+
+#define OLYMPIC_CLEAR_RET_CODE 0xfe
+
+/* ARB Commands */
+#define ARB_RECEIVE_DATA 0x81
+#define ARB_LAN_CHANGE_STATUS 0x84
+/* ASB Response commands */
+
+#define ASB_RECEIVE_DATA 0x81
+
+
+/* Olympic defaults for buffers */
+
+#define OLYMPIC_RX_RING_SIZE 16 /* should be a power of 2 */
+#define OLYMPIC_TX_RING_SIZE 8 /* should be a power of 2 */
+
+#define PKT_BUF_SZ 4096 /* Default packet size */
+
+/* Olympic data structures */
+
+/* xxxx These structures are all little endian in hardware. */
+
+struct olympic_tx_desc {
+ u32 buffer;
+ u32 status_length;
+};
+
+struct olympic_tx_status {
+ u32 status;
+};
+
+struct olympic_rx_desc {
+ u32 buffer;
+ u32 res_length;
+};
+
+struct olympic_rx_status {
+ u32 fragmentcnt_framelen;
+ u32 status_buffercnt;
+};
+/* xxxx END These structures are all little endian in hardware. */
+/* xxxx There may be more, but I'm pretty sure about these */
+
+struct mac_receive_buffer {
+ u16 next ;
+ u8 padding ;
+ u8 frame_status ;
+ u16 buffer_length ;
+ u8 frame_data ;
+};
+
+struct olympic_private {
+
+ u16 srb; /* be16 */
+ u16 trb; /* be16 */
+ u16 arb; /* be16 */
+ u16 asb; /* be16 */
+
+ u8 __iomem *olympic_mmio;
+ u8 __iomem *olympic_lap;
+ struct pci_dev *pdev ;
+ char *olympic_card_name ;
+
+ spinlock_t olympic_lock ;
+
+ volatile int srb_queued; /* True if an SRB is still posted */
+ wait_queue_head_t srb_wait;
+
+ volatile int asb_queued; /* True if an ASB is posted */
+
+ volatile int trb_queued; /* True if a TRB is posted */
+ wait_queue_head_t trb_wait ;
+
+ /* These must be on a 4 byte boundary. */
+ struct olympic_rx_desc olympic_rx_ring[OLYMPIC_RX_RING_SIZE];
+ struct olympic_tx_desc olympic_tx_ring[OLYMPIC_TX_RING_SIZE];
+ struct olympic_rx_status olympic_rx_status_ring[OLYMPIC_RX_RING_SIZE];
+ struct olympic_tx_status olympic_tx_status_ring[OLYMPIC_TX_RING_SIZE];
+
+ struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE];
+ int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
+
+ struct net_device_stats olympic_stats ;
+ u16 olympic_lan_status ;
+ u8 olympic_ring_speed ;
+ u16 pkt_buf_sz ;
+ u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor;
+ u16 olympic_addr_table_addr, olympic_parms_addr ;
+ u8 olympic_laa[6] ;
+ u32 rx_ring_dma_addr;
+ u32 rx_status_ring_dma_addr;
+ u32 tx_ring_dma_addr;
+ u32 tx_status_ring_dma_addr;
+};
+
+struct olympic_adapter_addr_table {
+
+ u8 node_addr[6] ;
+ u8 reserved[4] ;
+ u8 func_addr[4] ;
+} ;
+
+struct olympic_parameters_table {
+
+ u8 phys_addr[4] ;
+ u8 up_node_addr[6] ;
+ u8 up_phys_addr[4] ;
+ u8 poll_addr[6] ;
+ u16 reserved ;
+ u16 acc_priority ;
+ u16 auth_source_class ;
+ u16 att_code ;
+ u8 source_addr[6] ;
+ u16 beacon_type ;
+ u16 major_vector ;
+ u16 lan_status ;
+ u16 soft_error_time ;
+ u16 reserved1 ;
+ u16 local_ring ;
+ u16 mon_error ;
+ u16 beacon_transmit ;
+ u16 beacon_receive ;
+ u16 frame_correl ;
+ u8 beacon_naun[6] ;
+ u32 reserved2 ;
+ u8 beacon_phys[4] ;
+};
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
new file mode 100644
index 000000000000..675b063508e3
--- /dev/null
+++ b/drivers/net/tokenring/proteon.c
@@ -0,0 +1,432 @@
+/*
+ * proteon.c: A network driver for Proteon ISA token ring cards.
+ *
+ * Based on tmspci written 1999 by Adam Fritzler
+ *
+ * Written 2003 by Jochen Friedrich
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver module supports the following cards:
+ * - Proteon 1392, 1392+
+ *
+ * Maintainer(s):
+ * AF Adam Fritzler mid@auk.cx
+ * JF Jochen Friedrich jochen@scram.de
+ *
+ * Modification History:
+ * 02-Jan-03 JF Created
+ *
+ */
+static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pci.h>
+#include <asm/dma.h>
+
+#include "tms380tr.h"
+
+#define PROTEON_IO_EXTENT 32
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int portlist[] __initdata = {
+ 0x0A20, 0x0E20, 0x1A20, 0x1E20, 0x2A20, 0x2E20, 0x3A20, 0x3E20,// Prot.
+ 0x4A20, 0x4E20, 0x5A20, 0x5E20, 0x6A20, 0x6E20, 0x7A20, 0x7E20,// Prot.
+ 0x8A20, 0x8E20, 0x9A20, 0x9E20, 0xAA20, 0xAE20, 0xBA20, 0xBE20,// Prot.
+ 0xCA20, 0xCE20, 0xDA20, 0xDE20, 0xEA20, 0xEE20, 0xFA20, 0xFE20,// Prot.
+ 0
+};
+
+/* A zero-terminated list of IRQs to be probed. */
+static unsigned short irqlist[] = {
+ 7, 6, 5, 4, 3, 12, 11, 10, 9,
+ 0
+};
+
+/* A zero-terminated list of DMAs to be probed. */
+static int dmalist[] __initdata = {
+ 5, 6, 7,
+ 0
+};
+
+static char cardname[] = "Proteon 1392\0";
+
+struct net_device *proteon_probe(int unit);
+static int proteon_open(struct net_device *dev);
+static void proteon_read_eeprom(struct net_device *dev);
+static unsigned short proteon_setnselout_pins(struct net_device *dev);
+
+static unsigned short proteon_sifreadb(struct net_device *dev, unsigned short reg)
+{
+ return inb(dev->base_addr + reg);
+}
+
+static unsigned short proteon_sifreadw(struct net_device *dev, unsigned short reg)
+{
+ return inw(dev->base_addr + reg);
+}
+
+static void proteon_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outb(val, dev->base_addr + reg);
+}
+
+static void proteon_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outw(val, dev->base_addr + reg);
+}
+
+static int __init proteon_probe1(struct net_device *dev, int ioaddr)
+{
+ unsigned char chk1, chk2;
+ int i;
+
+ if (!request_region(ioaddr, PROTEON_IO_EXTENT, cardname))
+ return -ENODEV;
+
+
+ chk1 = inb(ioaddr + 0x1f); /* Get Proteon ID reg 1 */
+ if (chk1 != 0x1f)
+ goto nodev;
+
+ chk1 = inb(ioaddr + 0x1e) & 0x07; /* Get Proteon ID reg 0 */
+ for (i=0; i<16; i++) {
+ chk2 = inb(ioaddr + 0x1e) & 0x07;
+ if (((chk1 + 1) & 0x07) != chk2)
+ goto nodev;
+ chk1 = chk2;
+ }
+
+ dev->base_addr = ioaddr;
+ return (0);
+nodev:
+ release_region(ioaddr, PROTEON_IO_EXTENT);
+ return -ENODEV;
+}
+
+static int __init setup_card(struct net_device *dev)
+{
+ struct net_local *tp;
+ static int versionprinted;
+ const unsigned *port;
+ int j,err = 0;
+
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+ if (dev->base_addr) /* probe specific location */
+ err = proteon_probe1(dev, dev->base_addr);
+ else {
+ for (port = portlist; *port; port++) {
+ err = proteon_probe1(dev, *port);
+ if (!err)
+ break;
+ }
+ }
+ if (err)
+ goto out4;
+
+ /* At this point we have found a valid card. */
+
+ if (versionprinted++ == 0)
+ printk(KERN_DEBUG "%s", version);
+
+ err = -EIO;
+ if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL))
+ goto out4;
+
+ dev->base_addr &= ~3;
+
+ proteon_read_eeprom(dev);
+
+ printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name);
+ printk("%2.2x", dev->dev_addr[0]);
+ for (j = 1; j < 6; j++)
+ printk(":%2.2x", dev->dev_addr[j]);
+ printk("\n");
+
+ tp = netdev_priv(dev);
+ tp->setnselout = proteon_setnselout_pins;
+
+ tp->sifreadb = proteon_sifreadb;
+ tp->sifreadw = proteon_sifreadw;
+ tp->sifwriteb = proteon_sifwriteb;
+ tp->sifwritew = proteon_sifwritew;
+
+ memcpy(tp->ProductID, cardname, PROD_ID_SIZE + 1);
+
+ tp->tmspriv = NULL;
+
+ dev->open = proteon_open;
+ dev->stop = tms380tr_close;
+
+ if (dev->irq == 0)
+ {
+ for(j = 0; irqlist[j] != 0; j++)
+ {
+ dev->irq = irqlist[j];
+ if (!request_irq(dev->irq, tms380tr_interrupt, 0,
+ cardname, dev))
+ break;
+ }
+
+ if(irqlist[j] == 0)
+ {
+ printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name);
+ goto out3;
+ }
+ }
+ else
+ {
+ for(j = 0; irqlist[j] != 0; j++)
+ if (irqlist[j] == dev->irq)
+ break;
+ if (irqlist[j] == 0)
+ {
+ printk(KERN_INFO "%s: Illegal IRQ %d specified\n",
+ dev->name, dev->irq);
+ goto out3;
+ }
+ if (request_irq(dev->irq, tms380tr_interrupt, 0,
+ cardname, dev))
+ {
+ printk(KERN_INFO "%s: Selected IRQ %d not available\n",
+ dev->name, dev->irq);
+ goto out3;
+ }
+ }
+
+ if (dev->dma == 0)
+ {
+ for(j = 0; dmalist[j] != 0; j++)
+ {
+ dev->dma = dmalist[j];
+ if (!request_dma(dev->dma, cardname))
+ break;
+ }
+
+ if(dmalist[j] == 0)
+ {
+ printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name);
+ goto out2;
+ }
+ }
+ else
+ {
+ for(j = 0; dmalist[j] != 0; j++)
+ if (dmalist[j] == dev->dma)
+ break;
+ if (dmalist[j] == 0)
+ {
+ printk(KERN_INFO "%s: Illegal DMA %d specified\n",
+ dev->name, dev->dma);
+ goto out2;
+ }
+ if (request_dma(dev->dma, cardname))
+ {
+ printk(KERN_INFO "%s: Selected DMA %d not available\n",
+ dev->name, dev->dma);
+ goto out2;
+ }
+ }
+
+ printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
+ dev->name, dev->base_addr, dev->irq, dev->dma);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ return 0;
+out:
+ free_dma(dev->dma);
+out2:
+ free_irq(dev->irq, dev);
+out3:
+ tmsdev_term(dev);
+out4:
+ release_region(dev->base_addr, PROTEON_IO_EXTENT);
+ return err;
+}
+
+struct net_device * __init proteon_probe(int unit)
+{
+ struct net_device *dev = alloc_trdev(sizeof(struct net_local));
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "tr%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ err = setup_card(dev);
+ if (err)
+ goto out;
+
+ return dev;
+
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*
+ * Reads MAC address from adapter RAM, which should've read it from
+ * the onboard ROM.
+ *
+ * Calling this on a board that does not support it can be a very
+ * dangerous thing. The Madge board, for instance, will lock your
+ * machine hard when this is called. Luckily, its supported in a
+ * separate driver. --ASF
+ */
+static void proteon_read_eeprom(struct net_device *dev)
+{
+ int i;
+
+ /* Address: 0000:0000 */
+ proteon_sifwritew(dev, 0, SIFADX);
+ proteon_sifwritew(dev, 0, SIFADR);
+
+ /* Read six byte MAC address data */
+ dev->addr_len = 6;
+ for(i = 0; i < 6; i++)
+ dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8;
+}
+
+unsigned short proteon_setnselout_pins(struct net_device *dev)
+{
+ return 0;
+}
+
+static int proteon_open(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned short val = 0;
+ int i;
+
+ /* Proteon reset sequence */
+ outb(0, dev->base_addr + 0x11);
+ mdelay(20);
+ outb(0x04, dev->base_addr + 0x11);
+ mdelay(20);
+ outb(0, dev->base_addr + 0x11);
+ mdelay(100);
+
+ /* set control/status reg */
+ val = inb(dev->base_addr + 0x11);
+ val |= 0x78;
+ val &= 0xf9;
+ if(tp->DataRate == SPEED_4)
+ val |= 0x20;
+ else
+ val &= ~0x20;
+
+ outb(val, dev->base_addr + 0x11);
+ outb(0xff, dev->base_addr + 0x12);
+ for(i = 0; irqlist[i] != 0; i++)
+ {
+ if(irqlist[i] == dev->irq)
+ break;
+ }
+ val = i;
+ i = (7 - dev->dma) << 4;
+ val |= i;
+ outb(val, dev->base_addr + 0x13);
+
+ return tms380tr_open(dev);
+}
+
+#ifdef MODULE
+
+#define ISATR_MAX_ADAPTERS 3
+
+static int io[ISATR_MAX_ADAPTERS];
+static int irq[ISATR_MAX_ADAPTERS];
+static int dma[ISATR_MAX_ADAPTERS];
+
+MODULE_LICENSE("GPL");
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+
+static struct net_device *proteon_dev[ISATR_MAX_ADAPTERS];
+
+int init_module(void)
+{
+ struct net_device *dev;
+ int i, num = 0, err = 0;
+
+ for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
+ dev = alloc_trdev(sizeof(struct net_local));
+ if (!dev)
+ continue;
+
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+ dev->dma = dma[i];
+ err = setup_card(dev);
+ if (!err) {
+ proteon_dev[i] = dev;
+ ++num;
+ } else {
+ free_netdev(dev);
+ }
+ }
+
+ printk(KERN_NOTICE "proteon.c: %d cards found.\n", num);
+ /* Probe for cards. */
+ if (num == 0) {
+ printk(KERN_NOTICE "proteon.c: No cards found.\n");
+ return (-ENODEV);
+ }
+ return (0);
+}
+
+void cleanup_module(void)
+{
+ int i;
+
+ for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
+ struct net_device *dev = proteon_dev[i];
+
+ if (!dev)
+ continue;
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, PROTEON_IO_EXTENT);
+ free_irq(dev->irq, dev);
+ free_dma(dev->dma);
+ tmsdev_term(dev);
+ free_netdev(dev);
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c proteon.c"
+ * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c proteon.c"
+ * c-set-style "K&R"
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
new file mode 100644
index 000000000000..3fab54a26466
--- /dev/null
+++ b/drivers/net/tokenring/skisa.c
@@ -0,0 +1,442 @@
+/*
+ * skisa.c: A network driver for SK-NET TMS380-based ISA token ring cards.
+ *
+ * Based on tmspci written 1999 by Adam Fritzler
+ *
+ * Written 2000 by Jochen Friedrich
+ * Dedicated to my girlfriend Steffi Bopp
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver module supports the following cards:
+ * - SysKonnect TR4/16(+) ISA (SK-4190)
+ *
+ * Maintainer(s):
+ * AF Adam Fritzler mid@auk.cx
+ * JF Jochen Friedrich jochen@scram.de
+ *
+ * Modification History:
+ * 14-Jan-01 JF Created
+ * 28-Oct-02 JF Fixed probe of card for static compilation.
+ * Fixed module init to not make hotplug go wild.
+ * 09-Nov-02 JF Fixed early bail out on out of memory
+ * situations if multiple cards are found.
+ * Cleaned up some unnecessary console SPAM.
+ * 09-Dec-02 JF Fixed module reference counting.
+ * 02-Jan-03 JF Renamed to skisa.c
+ *
+ */
+static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pci.h>
+#include <asm/dma.h>
+
+#include "tms380tr.h"
+
+#define SK_ISA_IO_EXTENT 32
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int portlist[] __initdata = {
+ 0x0A20, 0x1A20, 0x0B20, 0x1B20, 0x0980, 0x1980, 0x0900, 0x1900,// SK
+ 0
+};
+
+/* A zero-terminated list of IRQs to be probed.
+ * Used again after initial probe for sktr_chipset_init, called from sktr_open.
+ */
+static const unsigned short irqlist[] = {
+ 3, 5, 9, 10, 11, 12, 15,
+ 0
+};
+
+/* A zero-terminated list of DMAs to be probed. */
+static int dmalist[] __initdata = {
+ 5, 6, 7,
+ 0
+};
+
+static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
+
+struct net_device *sk_isa_probe(int unit);
+static int sk_isa_open(struct net_device *dev);
+static void sk_isa_read_eeprom(struct net_device *dev);
+static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
+
+static unsigned short sk_isa_sifreadb(struct net_device *dev, unsigned short reg)
+{
+ return inb(dev->base_addr + reg);
+}
+
+static unsigned short sk_isa_sifreadw(struct net_device *dev, unsigned short reg)
+{
+ return inw(dev->base_addr + reg);
+}
+
+static void sk_isa_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outb(val, dev->base_addr + reg);
+}
+
+static void sk_isa_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outw(val, dev->base_addr + reg);
+}
+
+
+static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
+{
+ unsigned char old, chk1, chk2;
+
+ if (!request_region(ioaddr, SK_ISA_IO_EXTENT, isa_cardname))
+ return -ENODEV;
+
+ old = inb(ioaddr + SIFADR); /* Get the old SIFADR value */
+
+ chk1 = 0; /* Begin with check value 0 */
+ do {
+ /* Write new SIFADR value */
+ outb(chk1, ioaddr + SIFADR);
+
+ /* Read, invert and write */
+ chk2 = inb(ioaddr + SIFADD);
+ chk2 ^= 0x0FE;
+ outb(chk2, ioaddr + SIFADR);
+
+ /* Read, invert and compare */
+ chk2 = inb(ioaddr + SIFADD);
+ chk2 ^= 0x0FE;
+
+ if(chk1 != chk2) {
+ release_region(ioaddr, SK_ISA_IO_EXTENT);
+ return -ENODEV;
+ }
+
+ chk1 -= 2;
+ } while(chk1 != 0); /* Repeat 128 times (all byte values) */
+
+ /* Restore the SIFADR value */
+ outb(old, ioaddr + SIFADR);
+
+ dev->base_addr = ioaddr;
+ return 0;
+}
+
+static int __init setup_card(struct net_device *dev)
+{
+ struct net_local *tp;
+ static int versionprinted;
+ const unsigned *port;
+ int j, err = 0;
+
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+ if (dev->base_addr) /* probe specific location */
+ err = sk_isa_probe1(dev, dev->base_addr);
+ else {
+ for (port = portlist; *port; port++) {
+ err = sk_isa_probe1(dev, *port);
+ if (!err)
+ break;
+ }
+ }
+ if (err)
+ goto out4;
+
+ /* At this point we have found a valid card. */
+
+ if (versionprinted++ == 0)
+ printk(KERN_DEBUG "%s", version);
+
+ err = -EIO;
+ if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL))
+ goto out4;
+
+ dev->base_addr &= ~3;
+
+ sk_isa_read_eeprom(dev);
+
+ printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name);
+ printk("%2.2x", dev->dev_addr[0]);
+ for (j = 1; j < 6; j++)
+ printk(":%2.2x", dev->dev_addr[j]);
+ printk("\n");
+
+ tp = netdev_priv(dev);
+ tp->setnselout = sk_isa_setnselout_pins;
+
+ tp->sifreadb = sk_isa_sifreadb;
+ tp->sifreadw = sk_isa_sifreadw;
+ tp->sifwriteb = sk_isa_sifwriteb;
+ tp->sifwritew = sk_isa_sifwritew;
+
+ memcpy(tp->ProductID, isa_cardname, PROD_ID_SIZE + 1);
+
+ tp->tmspriv = NULL;
+
+ dev->open = sk_isa_open;
+ dev->stop = tms380tr_close;
+
+ if (dev->irq == 0)
+ {
+ for(j = 0; irqlist[j] != 0; j++)
+ {
+ dev->irq = irqlist[j];
+ if (!request_irq(dev->irq, tms380tr_interrupt, 0,
+ isa_cardname, dev))
+ break;
+ }
+
+ if(irqlist[j] == 0)
+ {
+ printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name);
+ goto out3;
+ }
+ }
+ else
+ {
+ for(j = 0; irqlist[j] != 0; j++)
+ if (irqlist[j] == dev->irq)
+ break;
+ if (irqlist[j] == 0)
+ {
+ printk(KERN_INFO "%s: Illegal IRQ %d specified\n",
+ dev->name, dev->irq);
+ goto out3;
+ }
+ if (request_irq(dev->irq, tms380tr_interrupt, 0,
+ isa_cardname, dev))
+ {
+ printk(KERN_INFO "%s: Selected IRQ %d not available\n",
+ dev->name, dev->irq);
+ goto out3;
+ }
+ }
+
+ if (dev->dma == 0)
+ {
+ for(j = 0; dmalist[j] != 0; j++)
+ {
+ dev->dma = dmalist[j];
+ if (!request_dma(dev->dma, isa_cardname))
+ break;
+ }
+
+ if(dmalist[j] == 0)
+ {
+ printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name);
+ goto out2;
+ }
+ }
+ else
+ {
+ for(j = 0; dmalist[j] != 0; j++)
+ if (dmalist[j] == dev->dma)
+ break;
+ if (dmalist[j] == 0)
+ {
+ printk(KERN_INFO "%s: Illegal DMA %d specified\n",
+ dev->name, dev->dma);
+ goto out2;
+ }
+ if (request_dma(dev->dma, isa_cardname))
+ {
+ printk(KERN_INFO "%s: Selected DMA %d not available\n",
+ dev->name, dev->dma);
+ goto out2;
+ }
+ }
+
+ printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
+ dev->name, dev->base_addr, dev->irq, dev->dma);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ return 0;
+out:
+ free_dma(dev->dma);
+out2:
+ free_irq(dev->irq, dev);
+out3:
+ tmsdev_term(dev);
+out4:
+ release_region(dev->base_addr, SK_ISA_IO_EXTENT);
+ return err;
+}
+
+struct net_device * __init sk_isa_probe(int unit)
+{
+ struct net_device *dev = alloc_trdev(sizeof(struct net_local));
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "tr%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ err = setup_card(dev);
+ if (err)
+ goto out;
+
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*
+ * Reads MAC address from adapter RAM, which should've read it from
+ * the onboard ROM.
+ *
+ * Calling this on a board that does not support it can be a very
+ * dangerous thing. The Madge board, for instance, will lock your
+ * machine hard when this is called. Luckily, its supported in a
+ * separate driver. --ASF
+ */
+static void sk_isa_read_eeprom(struct net_device *dev)
+{
+ int i;
+
+ /* Address: 0000:0000 */
+ sk_isa_sifwritew(dev, 0, SIFADX);
+ sk_isa_sifwritew(dev, 0, SIFADR);
+
+ /* Read six byte MAC address data */
+ dev->addr_len = 6;
+ for(i = 0; i < 6; i++)
+ dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8;
+}
+
+unsigned short sk_isa_setnselout_pins(struct net_device *dev)
+{
+ return 0;
+}
+
+static int sk_isa_open(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned short val = 0;
+ unsigned short oldval;
+ int i;
+
+ val = 0;
+ for(i = 0; irqlist[i] != 0; i++)
+ {
+ if(irqlist[i] == dev->irq)
+ break;
+ }
+
+ val |= CYCLE_TIME << 2;
+ val |= i << 4;
+ i = dev->dma - 5;
+ val |= i;
+ if(tp->DataRate == SPEED_4)
+ val |= LINE_SPEED_BIT;
+ else
+ val &= ~LINE_SPEED_BIT;
+ oldval = sk_isa_sifreadb(dev, POSREG);
+ /* Leave cycle bits alone */
+ oldval |= 0xf3;
+ val &= oldval;
+ sk_isa_sifwriteb(dev, val, POSREG);
+
+ return tms380tr_open(dev);
+}
+
+#ifdef MODULE
+
+#define ISATR_MAX_ADAPTERS 3
+
+static int io[ISATR_MAX_ADAPTERS];
+static int irq[ISATR_MAX_ADAPTERS];
+static int dma[ISATR_MAX_ADAPTERS];
+
+MODULE_LICENSE("GPL");
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+
+static struct net_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
+
+int init_module(void)
+{
+ struct net_device *dev;
+ int i, num = 0, err = 0;
+
+ for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
+ dev = alloc_trdev(sizeof(struct net_local));
+ if (!dev)
+ continue;
+
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+ dev->dma = dma[i];
+ err = setup_card(dev);
+
+ if (!err) {
+ sk_isa_dev[i] = dev;
+ ++num;
+ } else {
+ free_netdev(dev);
+ }
+ }
+
+ printk(KERN_NOTICE "skisa.c: %d cards found.\n", num);
+ /* Probe for cards. */
+ if (num == 0) {
+ printk(KERN_NOTICE "skisa.c: No cards found.\n");
+ return (-ENODEV);
+ }
+ return (0);
+}
+
+void cleanup_module(void)
+{
+ int i;
+
+ for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
+ struct net_device *dev = sk_isa_dev[i];
+
+ if (!dev)
+ continue;
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, SK_ISA_IO_EXTENT);
+ free_irq(dev->irq, dev);
+ free_dma(dev->dma);
+ tmsdev_term(dev);
+ free_netdev(dev);
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c skisa.c"
+ * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c skisa.c"
+ * c-set-style "K&R"
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
new file mode 100644
index 000000000000..5c8aeacb8318
--- /dev/null
+++ b/drivers/net/tokenring/smctr.c
@@ -0,0 +1,5742 @@
+/*
+ * smctr.c: A network driver for the SMC Token Ring Adapters.
+ *
+ * Written by Jay Schulist <jschlst@samba.org>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This device driver works with the following SMC adapters:
+ * - SMC TokenCard Elite (8115T, chips 825/584)
+ * - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594)
+ *
+ * Source(s):
+ * - SMC TokenCard SDK.
+ *
+ * Maintainer(s):
+ * JS Jay Schulist <jschlst@samba.org>
+ *
+ * Changes:
+ * 07102000 JS Fixed a timing problem in smctr_wait_cmd();
+ * Also added a bit more discriptive error msgs.
+ * 07122000 JS Fixed problem with detecting a card with
+ * module io/irq/mem specified.
+ *
+ * To do:
+ * 1. Multicast support.
+ *
+ * Initial 2.5 cleanup Alan Cox <alan@redhat.com> 2002/10/28
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/mca-legacy.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/trdevice.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#if BITS_PER_LONG == 64
+#error FIXME: driver does not support 64-bit platforms
+#endif
+
+#include "smctr.h" /* Our Stuff */
+#include "smctr_firmware.h" /* SMC adapter firmware */
+
+static char version[] __initdata = KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n";
+static const char cardname[] = "smctr";
+
+
+#define SMCTR_IO_EXTENT 20
+
+#ifdef CONFIG_MCA_LEGACY
+static unsigned int smctr_posid = 0x6ec6;
+#endif
+
+static int ringspeed;
+
+/* SMC Name of the Adapter. */
+static char smctr_name[] = "SMC TokenCard";
+char *smctr_model = "Unknown";
+
+/* Use 0 for production, 1 for verification, 2 for debug, and
+ * 3 for very verbose debug.
+ */
+#ifndef SMCTR_DEBUG
+#define SMCTR_DEBUG 1
+#endif
+static unsigned int smctr_debug = SMCTR_DEBUG;
+
+/* smctr.c prototypes and functions are arranged alphabeticly
+ * for clearity, maintainability and pure old fashion fun.
+ */
+/* A */
+static int smctr_alloc_shared_memory(struct net_device *dev);
+
+/* B */
+static int smctr_bypass_state(struct net_device *dev);
+
+/* C */
+static int smctr_checksum_firmware(struct net_device *dev);
+static int __init smctr_chk_isa(struct net_device *dev);
+static int smctr_chg_rx_mask(struct net_device *dev);
+static int smctr_clear_int(struct net_device *dev);
+static int smctr_clear_trc_reset(int ioaddr);
+static int smctr_close(struct net_device *dev);
+
+/* D */
+static int smctr_decode_firmware(struct net_device *dev);
+static int smctr_disable_16bit(struct net_device *dev);
+static int smctr_disable_adapter_ctrl_store(struct net_device *dev);
+static int smctr_disable_bic_int(struct net_device *dev);
+
+/* E */
+static int smctr_enable_16bit(struct net_device *dev);
+static int smctr_enable_adapter_ctrl_store(struct net_device *dev);
+static int smctr_enable_adapter_ram(struct net_device *dev);
+static int smctr_enable_bic_int(struct net_device *dev);
+
+/* G */
+static int __init smctr_get_boardid(struct net_device *dev, int mca);
+static int smctr_get_group_address(struct net_device *dev);
+static int smctr_get_functional_address(struct net_device *dev);
+static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
+static int smctr_get_physical_drop_number(struct net_device *dev);
+static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
+static int smctr_get_station_id(struct net_device *dev);
+static struct net_device_stats *smctr_get_stats(struct net_device *dev);
+static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
+ __u16 bytes_count);
+static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
+
+/* H */
+static int smctr_hardware_send_packet(struct net_device *dev,
+ struct net_local *tp);
+/* I */
+static int smctr_init_acbs(struct net_device *dev);
+static int smctr_init_adapter(struct net_device *dev);
+static int smctr_init_card_real(struct net_device *dev);
+static int smctr_init_rx_bdbs(struct net_device *dev);
+static int smctr_init_rx_fcbs(struct net_device *dev);
+static int smctr_init_shared_memory(struct net_device *dev);
+static int smctr_init_tx_bdbs(struct net_device *dev);
+static int smctr_init_tx_fcbs(struct net_device *dev);
+static int smctr_internal_self_test(struct net_device *dev);
+static irqreturn_t smctr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int smctr_issue_enable_int_cmd(struct net_device *dev,
+ __u16 interrupt_enable_mask);
+static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
+ __u16 ibits);
+static int smctr_issue_init_timers_cmd(struct net_device *dev);
+static int smctr_issue_init_txrx_cmd(struct net_device *dev);
+static int smctr_issue_insert_cmd(struct net_device *dev);
+static int smctr_issue_read_ring_status_cmd(struct net_device *dev);
+static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt);
+static int smctr_issue_remove_cmd(struct net_device *dev);
+static int smctr_issue_resume_acb_cmd(struct net_device *dev);
+static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue);
+static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue);
+static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue);
+static int smctr_issue_test_internal_rom_cmd(struct net_device *dev);
+static int smctr_issue_test_hic_cmd(struct net_device *dev);
+static int smctr_issue_test_mac_reg_cmd(struct net_device *dev);
+static int smctr_issue_trc_loopback_cmd(struct net_device *dev);
+static int smctr_issue_tri_loopback_cmd(struct net_device *dev);
+static int smctr_issue_write_byte_cmd(struct net_device *dev,
+ short aword_cnt, void *byte);
+static int smctr_issue_write_word_cmd(struct net_device *dev,
+ short aword_cnt, void *word);
+
+/* J */
+static int smctr_join_complete_state(struct net_device *dev);
+
+/* L */
+static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev);
+static int smctr_load_firmware(struct net_device *dev);
+static int smctr_load_node_addr(struct net_device *dev);
+static int smctr_lobe_media_test(struct net_device *dev);
+static int smctr_lobe_media_test_cmd(struct net_device *dev);
+static int smctr_lobe_media_test_state(struct net_device *dev);
+
+/* M */
+static int smctr_make_8025_hdr(struct net_device *dev,
+ MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc);
+static int smctr_make_access_pri(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv);
+static int smctr_make_auth_funct_class(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_corr(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv, __u16 correlator);
+static int smctr_make_funct_addr(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_group_addr(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_phy_drop_num(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
+static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
+static int smctr_make_ring_station_status(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_ring_station_version(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_tx_status_code(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv, __u16 tx_fstatus);
+static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+static int smctr_make_wrap_data(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv);
+
+/* O */
+static int smctr_open(struct net_device *dev);
+static int smctr_open_tr(struct net_device *dev);
+
+/* P */
+struct net_device *smctr_probe(int unit);
+static int __init smctr_probe1(struct net_device *dev, int ioaddr);
+static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
+ struct net_device *dev, __u16 rx_status);
+
+/* R */
+static int smctr_ram_memory_test(struct net_device *dev);
+static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *correlator);
+static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *correlator);
+static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf);
+static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
+ MAC_HEADER *rmf, __u16 *correlator);
+static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *correlator);
+static int smctr_reset_adapter(struct net_device *dev);
+static int smctr_restart_tx_chain(struct net_device *dev, short queue);
+static int smctr_ring_status_chg(struct net_device *dev);
+static int smctr_rx_frame(struct net_device *dev);
+
+/* S */
+static int smctr_send_dat(struct net_device *dev);
+static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev);
+static int smctr_send_lobe_media_test(struct net_device *dev);
+static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 correlator);
+static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 correlator);
+static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 correlator);
+static int smctr_send_rpt_tx_forward(struct net_device *dev,
+ MAC_HEADER *rmf, __u16 tx_fstatus);
+static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 rcode, __u16 correlator);
+static int smctr_send_rq_init(struct net_device *dev);
+static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *tx_fstatus);
+static int smctr_set_auth_access_pri(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv);
+static int smctr_set_auth_funct_class(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv);
+static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
+ __u16 *correlator);
+static int smctr_set_error_timer_value(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv);
+static int smctr_set_frame_forward(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv, __u8 dc_sc);
+static int smctr_set_local_ring_num(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv);
+static unsigned short smctr_set_ctrl_attention(struct net_device *dev);
+static void smctr_set_multicast_list(struct net_device *dev);
+static int smctr_set_page(struct net_device *dev, __u8 *buf);
+static int smctr_set_phy_drop(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv);
+static int smctr_set_ring_speed(struct net_device *dev);
+static int smctr_set_rx_look_ahead(struct net_device *dev);
+static int smctr_set_trc_reset(int ioaddr);
+static int smctr_setup_single_cmd(struct net_device *dev,
+ __u16 command, __u16 subcommand);
+static int smctr_setup_single_cmd_w_data(struct net_device *dev,
+ __u16 command, __u16 subcommand);
+static char *smctr_malloc(struct net_device *dev, __u16 size);
+static int smctr_status_chg(struct net_device *dev);
+
+/* T */
+static void smctr_timeout(struct net_device *dev);
+static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
+ __u16 queue);
+static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue);
+static unsigned short smctr_tx_move_frame(struct net_device *dev,
+ struct sk_buff *skb, __u8 *pbuff, unsigned int bytes);
+
+/* U */
+static int smctr_update_err_stats(struct net_device *dev);
+static int smctr_update_rx_chain(struct net_device *dev, __u16 queue);
+static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
+ __u16 queue);
+
+/* W */
+static int smctr_wait_cmd(struct net_device *dev);
+static int smctr_wait_while_cbusy(struct net_device *dev);
+
+#define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X)
+#define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X)
+#define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X))
+
+/* Allocate Adapter Shared Memory.
+ * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the
+ * function "get_num_rx_bdbs" below!!!
+ *
+ * Order of memory allocation:
+ *
+ * 0. Initial System Configuration Block Pointer
+ * 1. System Configuration Block
+ * 2. System Control Block
+ * 3. Action Command Block
+ * 4. Interrupt Status Block
+ *
+ * 5. MAC TX FCB'S
+ * 6. NON-MAC TX FCB'S
+ * 7. MAC TX BDB'S
+ * 8. NON-MAC TX BDB'S
+ * 9. MAC RX FCB'S
+ * 10. NON-MAC RX FCB'S
+ * 11. MAC RX BDB'S
+ * 12. NON-MAC RX BDB'S
+ * 13. MAC TX Data Buffer( 1, 256 byte buffer)
+ * 14. MAC RX Data Buffer( 1, 256 byte buffer)
+ *
+ * 15. NON-MAC TX Data Buffer
+ * 16. NON-MAC RX Data Buffer
+ */
+static int smctr_alloc_shared_memory(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_alloc_shared_memory\n", dev->name);
+
+ /* Allocate initial System Control Block pointer.
+ * This pointer is located in the last page, last offset - 4.
+ */
+ tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400)
+ - (long)ISCP_BLOCK_SIZE);
+
+ /* Allocate System Control Blocks. */
+ tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock));
+ PARAGRAPH_BOUNDRY(tp->sh_mem_used);
+
+ tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock));
+ PARAGRAPH_BOUNDRY(tp->sh_mem_used);
+
+ tp->acb_head = (ACBlock *)smctr_malloc(dev,
+ sizeof(ACBlock)*tp->num_acbs);
+ PARAGRAPH_BOUNDRY(tp->sh_mem_used);
+
+ tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock));
+ PARAGRAPH_BOUNDRY(tp->sh_mem_used);
+
+ tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE);
+ PARAGRAPH_BOUNDRY(tp->sh_mem_used);
+
+ /* Allocate transmit FCBs. */
+ tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
+ sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]);
+
+ tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
+ sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]);
+
+ tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev,
+ sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]);
+
+ /* Allocate transmit BDBs. */
+ tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
+ sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]);
+
+ tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
+ sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]);
+
+ tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev,
+ sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]);
+
+ /* Allocate receive FCBs. */
+ tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
+ sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]);
+
+ tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
+ sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]);
+
+ /* Allocate receive BDBs. */
+ tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
+ sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]);
+
+ tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
+
+ tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
+ sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]);
+
+ tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
+
+ /* Allocate MAC transmit buffers.
+ * MAC Tx Buffers doen't have to be on an ODD Boundry.
+ */
+ tp->tx_buff_head[MAC_QUEUE]
+ = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]);
+ tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE];
+ tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
+
+ /* Allocate BUG transmit buffers. */
+ tp->tx_buff_head[BUG_QUEUE]
+ = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]);
+ tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE];
+ tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
+
+ /* Allocate MAC receive data buffers.
+ * MAC Rx buffer doesn't have to be on a 256 byte boundary.
+ */
+ tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
+ RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]);
+ tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
+
+ /* Allocate Non-MAC transmit buffers.
+ * ?? For maximum Netware performance, put Tx Buffers on
+ * ODD Boundry and then restore malloc to Even Boundrys.
+ */
+ smctr_malloc(dev, 1L);
+ tp->tx_buff_head[NON_MAC_QUEUE]
+ = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]);
+ tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE];
+ tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
+ smctr_malloc(dev, 1L);
+
+ /* Allocate Non-MAC receive data buffers.
+ * To guarantee a minimum of 256 contigous memory to
+ * UM_Receive_Packet's lookahead pointer, before a page
+ * change or ring end is encountered, place each rx buffer on
+ * a 256 byte boundary.
+ */
+ smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used));
+ tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
+ RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
+ tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
+
+ return (0);
+}
+
+/* Enter Bypass state. */
+static int smctr_bypass_state(struct net_device *dev)
+{
+ int err;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_bypass_state\n", dev->name);
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE);
+
+ return (err);
+}
+
+static int smctr_checksum_firmware(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u16 i, checksum = 0;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_checksum_firmware\n", dev->name);
+
+ smctr_enable_adapter_ctrl_store(dev);
+
+ for(i = 0; i < CS_RAM_SIZE; i += 2)
+ checksum += *((__u16 *)(tp->ram_access + i));
+
+ tp->microcode_version = *(__u16 *)(tp->ram_access
+ + CS_RAM_VERSION_OFFSET);
+ tp->microcode_version >>= 8;
+
+ smctr_disable_adapter_ctrl_store(dev);
+
+ if(checksum)
+ return (checksum);
+
+ return (0);
+}
+
+static int __init smctr_chk_mca(struct net_device *dev)
+{
+#ifdef CONFIG_MCA_LEGACY
+ struct net_local *tp = netdev_priv(dev);
+ int current_slot;
+ __u8 r1, r2, r3, r4, r5;
+
+ current_slot = mca_find_unused_adapter(smctr_posid, 0);
+ if(current_slot == MCA_NOTFOUND)
+ return (-ENODEV);
+
+ mca_set_adapter_name(current_slot, smctr_name);
+ mca_mark_as_used(current_slot);
+ tp->slot_num = current_slot;
+
+ r1 = mca_read_stored_pos(tp->slot_num, 2);
+ r2 = mca_read_stored_pos(tp->slot_num, 3);
+
+ if(tp->slot_num)
+ outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT));
+ else
+ outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT));
+
+ r1 = inb(CNFG_POS_REG1);
+ r2 = inb(CNFG_POS_REG0);
+
+ tp->bic_type = BIC_594_CHIP;
+
+ /* IO */
+ r2 = mca_read_stored_pos(tp->slot_num, 2);
+ r2 &= 0xF0;
+ dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800;
+ request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name);
+
+ /* IRQ */
+ r5 = mca_read_stored_pos(tp->slot_num, 5);
+ r5 &= 0xC;
+ switch(r5)
+ {
+ case 0:
+ dev->irq = 3;
+ break;
+
+ case 0x4:
+ dev->irq = 4;
+ break;
+
+ case 0x8:
+ dev->irq = 10;
+ break;
+
+ default:
+ dev->irq = 15;
+ break;
+ }
+ if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev)) {
+ release_region(dev->base_addr, SMCTR_IO_EXTENT);
+ return -ENODEV;
+ }
+
+ /* Get RAM base */
+ r3 = mca_read_stored_pos(tp->slot_num, 3);
+ tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000;
+ if (r3 & 0x8)
+ tp->ram_base += 0x010000;
+ if (r3 & 0x80)
+ tp->ram_base += 0xF00000;
+
+ /* Get Ram Size */
+ r3 &= 0x30;
+ r3 >>= 4;
+
+ tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3;
+ tp->ram_size = (__u16)CNFG_SIZE_64KB;
+ tp->board_id |= TOKEN_MEDIA;
+
+ r4 = mca_read_stored_pos(tp->slot_num, 4);
+ tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0x0C0000;
+ if (r4 & 0x8)
+ tp->rom_base += 0x010000;
+
+ /* Get ROM size. */
+ r4 >>= 4;
+ switch (r4) {
+ case 0:
+ tp->rom_size = CNFG_SIZE_8KB;
+ break;
+ case 1:
+ tp->rom_size = CNFG_SIZE_16KB;
+ break;
+ case 2:
+ tp->rom_size = CNFG_SIZE_32KB;
+ break;
+ default:
+ tp->rom_size = ROM_DISABLE;
+ }
+
+ /* Get Media Type. */
+ r5 = mca_read_stored_pos(tp->slot_num, 5);
+ r5 &= CNFG_MEDIA_TYPE_MASK;
+ switch(r5)
+ {
+ case (0):
+ tp->media_type = MEDIA_STP_4;
+ break;
+
+ case (1):
+ tp->media_type = MEDIA_STP_16;
+ break;
+
+ case (3):
+ tp->media_type = MEDIA_UTP_16;
+ break;
+
+ default:
+ tp->media_type = MEDIA_UTP_4;
+ break;
+ }
+ tp->media_menu = 14;
+
+ r2 = mca_read_stored_pos(tp->slot_num, 2);
+ if(!(r2 & 0x02))
+ tp->mode_bits |= EARLY_TOKEN_REL;
+
+ /* Disable slot */
+ outb(CNFG_POS_CONTROL_REG, 0);
+
+ tp->board_id = smctr_get_boardid(dev, 1);
+ switch(tp->board_id & 0xffff)
+ {
+ case WD8115TA:
+ smctr_model = "8115T/A";
+ break;
+
+ case WD8115T:
+ if(tp->extra_info & CHIP_REV_MASK)
+ smctr_model = "8115T rev XE";
+ else
+ smctr_model = "8115T rev XD";
+ break;
+
+ default:
+ smctr_model = "Unknown";
+ break;
+ }
+
+ return (0);
+#else
+ return (-1);
+#endif /* CONFIG_MCA_LEGACY */
+}
+
+static int smctr_chg_rx_mask(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err = 0;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_chg_rx_mask\n", dev->name);
+
+ smctr_enable_16bit(dev);
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ if(tp->mode_bits & LOOPING_MODE_MASK)
+ tp->config_word0 |= RX_OWN_BIT;
+ else
+ tp->config_word0 &= ~RX_OWN_BIT;
+
+ if(tp->receive_mask & PROMISCUOUS_MODE)
+ tp->config_word0 |= PROMISCUOUS_BIT;
+ else
+ tp->config_word0 &= ~PROMISCUOUS_BIT;
+
+ if(tp->receive_mask & ACCEPT_ERR_PACKETS)
+ tp->config_word0 |= SAVBAD_BIT;
+ else
+ tp->config_word0 &= ~SAVBAD_BIT;
+
+ if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
+ tp->config_word0 |= RXATMAC;
+ else
+ tp->config_word0 &= ~RXATMAC;
+
+ if(tp->receive_mask & ACCEPT_MULTI_PROM)
+ tp->config_word1 |= MULTICAST_ADDRESS_BIT;
+ else
+ tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
+
+ if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
+ tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
+ else
+ {
+ if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
+ tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
+ else
+ tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
+ }
+
+ if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
+ &tp->config_word0)))
+ {
+ return (err);
+ }
+
+ if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
+ &tp->config_word1)))
+ {
+ return (err);
+ }
+
+ smctr_disable_16bit(dev);
+
+ return (0);
+}
+
+static int smctr_clear_int(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
+
+ return (0);
+}
+
+static int smctr_clear_trc_reset(int ioaddr)
+{
+ __u8 r;
+
+ r = inb(ioaddr + MSR);
+ outb(~MSR_RST & r, ioaddr + MSR);
+
+ return (0);
+}
+
+/*
+ * The inverse routine to smctr_open().
+ */
+static int smctr_close(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int err;
+
+ netif_stop_queue(dev);
+
+ tp->cleanup = 1;
+
+ /* Check to see if adapter is already in a closed state. */
+ if(tp->status != OPEN)
+ return (0);
+
+ smctr_enable_16bit(dev);
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ if((err = smctr_issue_remove_cmd(dev)))
+ {
+ smctr_disable_16bit(dev);
+ return (err);
+ }
+
+ for(;;)
+ {
+ skb = skb_dequeue(&tp->SendSkbQueue);
+ if(skb == NULL)
+ break;
+ tp->QueueSkb++;
+ dev_kfree_skb(skb);
+ }
+
+
+ return (0);
+}
+
+static int smctr_decode_firmware(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ short bit = 0x80, shift = 12;
+ DECODE_TREE_NODE *tree;
+ short branch, tsize;
+ __u16 buff = 0;
+ long weight;
+ __u8 *ucode;
+ __u16 *mem;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_decode_firmware\n", dev->name);
+
+ weight = *(long *)(tp->ptr_ucode + WEIGHT_OFFSET);
+ tsize = *(__u8 *)(tp->ptr_ucode + TREE_SIZE_OFFSET);
+ tree = (DECODE_TREE_NODE *)(tp->ptr_ucode + TREE_OFFSET);
+ ucode = (__u8 *)(tp->ptr_ucode + TREE_OFFSET
+ + (tsize * sizeof(DECODE_TREE_NODE)));
+ mem = (__u16 *)(tp->ram_access);
+
+ while(weight)
+ {
+ branch = ROOT;
+ while((tree + branch)->tag != LEAF && weight)
+ {
+ branch = *ucode & bit ? (tree + branch)->llink
+ : (tree + branch)->rlink;
+
+ bit >>= 1;
+ weight--;
+
+ if(bit == 0)
+ {
+ bit = 0x80;
+ ucode++;
+ }
+ }
+
+ buff |= (tree + branch)->info << shift;
+ shift -= 4;
+
+ if(shift < 0)
+ {
+ *(mem++) = SWAP_BYTES(buff);
+ buff = 0;
+ shift = 12;
+ }
+ }
+
+ /* The following assumes the Control Store Memory has
+ * been initialized to zero. If the last partial word
+ * is zero, it will not be written.
+ */
+ if(buff)
+ *(mem++) = SWAP_BYTES(buff);
+
+ return (0);
+}
+
+static int smctr_disable_16bit(struct net_device *dev)
+{
+ return (0);
+}
+
+/*
+ * On Exit, Adapter is:
+ * 1. TRC is in a reset state and un-initialized.
+ * 2. Adapter memory is enabled.
+ * 3. Control Store memory is out of context (-WCSS is 1).
+ */
+static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_disable_adapter_ctrl_store\n", dev->name);
+
+ tp->trc_mask |= CSR_WCSS;
+ outb(tp->trc_mask, ioaddr + CSR);
+
+ return (0);
+}
+
+static int smctr_disable_bic_int(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY
+ | CSR_MSKTINT | CSR_WCSS;
+ outb(tp->trc_mask, ioaddr + CSR);
+
+ return (0);
+}
+
+static int smctr_enable_16bit(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u8 r;
+
+ if(tp->adapter_bus == BUS_ISA16_TYPE)
+ {
+ r = inb(dev->base_addr + LAAR);
+ outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
+ }
+
+ return (0);
+}
+
+/*
+ * To enable the adapter control store memory:
+ * 1. Adapter must be in a RESET state.
+ * 2. Adapter memory must be enabled.
+ * 3. Control Store Memory is in context (-WCSS is 0).
+ */
+static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_enable_adapter_ctrl_store\n", dev->name);
+
+ smctr_set_trc_reset(ioaddr);
+ smctr_enable_adapter_ram(dev);
+
+ tp->trc_mask &= ~CSR_WCSS;
+ outb(tp->trc_mask, ioaddr + CSR);
+
+ return (0);
+}
+
+static int smctr_enable_adapter_ram(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ __u8 r;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_enable_adapter_ram\n", dev->name);
+
+ r = inb(ioaddr + MSR);
+ outb(MSR_MEMB | r, ioaddr + MSR);
+
+ return (0);
+}
+
+static int smctr_enable_bic_int(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ __u8 r;
+
+ switch(tp->bic_type)
+ {
+ case (BIC_584_CHIP):
+ tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
+ outb(tp->trc_mask, ioaddr + CSR);
+ r = inb(ioaddr + IRR);
+ outb(r | IRR_IEN, ioaddr + IRR);
+ break;
+
+ case (BIC_594_CHIP):
+ tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
+ outb(tp->trc_mask, ioaddr + CSR);
+ r = inb(ioaddr + IMCCR);
+ outb(r | IMCCR_EIL, ioaddr + IMCCR);
+ break;
+ }
+
+ return (0);
+}
+
+static int __init smctr_chk_isa(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ __u8 r1, r2, b, chksum = 0;
+ __u16 r;
+ int i;
+ int err = -ENODEV;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_chk_isa %#4x\n", dev->name, ioaddr);
+
+ if((ioaddr & 0x1F) != 0)
+ goto out;
+
+ /* Grab the region so that no one else tries to probe our ioports. */
+ if (!request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ /* Checksum SMC node address */
+ for(i = 0; i < 8; i++)
+ {
+ b = inb(ioaddr + LAR0 + i);
+ chksum += b;
+ }
+
+ if (chksum != NODE_ADDR_CKSUM)
+ goto out2;
+
+ b = inb(ioaddr + BDID);
+ if(b != BRD_ID_8115T)
+ {
+ printk(KERN_ERR "%s: The adapter found is not supported\n", dev->name);
+ goto out2;
+ }
+
+ /* Check for 8115T Board ID */
+ r2 = 0;
+ for(r = 0; r < 8; r++)
+ {
+ r1 = inb(ioaddr + 0x8 + r);
+ r2 += r1;
+ }
+
+ /* value of RegF adds up the sum to 0xFF */
+ if((r2 != 0xFF) && (r2 != 0xEE))
+ goto out2;
+
+ /* Get adapter ID */
+ tp->board_id = smctr_get_boardid(dev, 0);
+ switch(tp->board_id & 0xffff)
+ {
+ case WD8115TA:
+ smctr_model = "8115T/A";
+ break;
+
+ case WD8115T:
+ if(tp->extra_info & CHIP_REV_MASK)
+ smctr_model = "8115T rev XE";
+ else
+ smctr_model = "8115T rev XD";
+ break;
+
+ default:
+ smctr_model = "Unknown";
+ break;
+ }
+
+ /* Store BIC type. */
+ tp->bic_type = BIC_584_CHIP;
+ tp->nic_type = NIC_825_CHIP;
+
+ /* Copy Ram Size */
+ tp->ram_usable = CNFG_SIZE_16KB;
+ tp->ram_size = CNFG_SIZE_64KB;
+
+ /* Get 58x Ram Base */
+ r1 = inb(ioaddr);
+ r1 &= 0x3F;
+
+ r2 = inb(ioaddr + CNFG_LAAR_584);
+ r2 &= CNFG_LAAR_MASK;
+ r2 <<= 3;
+ r2 |= ((r1 & 0x38) >> 3);
+
+ tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13);
+
+ /* Get 584 Irq */
+ r1 = 0;
+ r1 = inb(ioaddr + CNFG_ICR_583);
+ r1 &= CNFG_ICR_IR2_584;
+
+ r2 = inb(ioaddr + CNFG_IRR_583);
+ r2 &= CNFG_IRR_IRQS; /* 0x60 */
+ r2 >>= 5;
+
+ switch(r2)
+ {
+ case 0:
+ if(r1 == 0)
+ dev->irq = 2;
+ else
+ dev->irq = 10;
+ break;
+
+ case 1:
+ if(r1 == 0)
+ dev->irq = 3;
+ else
+ dev->irq = 11;
+ break;
+
+ case 2:
+ if(r1 == 0)
+ {
+ if(tp->extra_info & ALTERNATE_IRQ_BIT)
+ dev->irq = 5;
+ else
+ dev->irq = 4;
+ }
+ else
+ dev->irq = 15;
+ break;
+
+ case 3:
+ if(r1 == 0)
+ dev->irq = 7;
+ else
+ dev->irq = 4;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: No IRQ found aborting\n", dev->name);
+ goto out2;
+ }
+
+ if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev))
+ goto out2;
+
+ /* Get 58x Rom Base */
+ r1 = inb(ioaddr + CNFG_BIO_583);
+ r1 &= 0x3E;
+ r1 |= 0x40;
+
+ tp->rom_base = (__u32)r1 << 13;
+
+ /* Get 58x Rom Size */
+ r1 = inb(ioaddr + CNFG_BIO_583);
+ r1 &= 0xC0;
+ if(r1 == 0)
+ tp->rom_size = ROM_DISABLE;
+ else
+ {
+ r1 >>= 6;
+ tp->rom_size = (__u16)CNFG_SIZE_8KB << r1;
+ }
+
+ /* Get 58x Boot Status */
+ r1 = inb(ioaddr + CNFG_GP2);
+
+ tp->mode_bits &= (~BOOT_STATUS_MASK);
+
+ if(r1 & CNFG_GP2_BOOT_NIBBLE)
+ tp->mode_bits |= BOOT_TYPE_1;
+
+ /* Get 58x Zero Wait State */
+ tp->mode_bits &= (~ZERO_WAIT_STATE_MASK);
+
+ r1 = inb(ioaddr + CNFG_IRR_583);
+
+ if(r1 & CNFG_IRR_ZWS)
+ tp->mode_bits |= ZERO_WAIT_STATE_8_BIT;
+
+ if(tp->board_id & BOARD_16BIT)
+ {
+ r1 = inb(ioaddr + CNFG_LAAR_584);
+
+ if(r1 & CNFG_LAAR_ZWS)
+ tp->mode_bits |= ZERO_WAIT_STATE_16_BIT;
+ }
+
+ /* Get 584 Media Menu */
+ tp->media_menu = 14;
+ r1 = inb(ioaddr + CNFG_IRR_583);
+
+ tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */
+ if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA)
+ {
+ /* Get Advanced Features */
+ if(((r1 & 0x6) >> 1) == 0x3)
+ tp->media_type |= MEDIA_UTP_16;
+ else
+ {
+ if(((r1 & 0x6) >> 1) == 0x2)
+ tp->media_type |= MEDIA_STP_16;
+ else
+ {
+ if(((r1 & 0x6) >> 1) == 0x1)
+ tp->media_type |= MEDIA_UTP_4;
+
+ else
+ tp->media_type |= MEDIA_STP_4;
+ }
+ }
+
+ r1 = inb(ioaddr + CNFG_GP2);
+ if(!(r1 & 0x2) ) /* GP2_ETRD */
+ tp->mode_bits |= EARLY_TOKEN_REL;
+
+ /* see if the chip is corrupted
+ if(smctr_read_584_chksum(ioaddr))
+ {
+ printk(KERN_ERR "%s: EEPROM Checksum Failure\n", dev->name);
+ free_irq(dev->irq, dev);
+ goto out2;
+ }
+ */
+ }
+
+ return (0);
+
+out2:
+ release_region(ioaddr, SMCTR_IO_EXTENT);
+out:
+ return err;
+}
+
+static int __init smctr_get_boardid(struct net_device *dev, int mca)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ __u8 r, r1, IdByte;
+ __u16 BoardIdMask;
+
+ tp->board_id = BoardIdMask = 0;
+
+ if(mca)
+ {
+ BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
+ tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT);
+ }
+ else
+ {
+ BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
+ tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K
+ + NIC_825_BIT + ALTERNATE_IRQ_BIT);
+ }
+
+ if(!mca)
+ {
+ r = inb(ioaddr + BID_REG_1);
+ r &= 0x0c;
+ outb(r, ioaddr + BID_REG_1);
+ r = inb(ioaddr + BID_REG_1);
+
+ if(r & BID_SIXTEEN_BIT_BIT)
+ {
+ tp->extra_info |= SLOT_16BIT;
+ tp->adapter_bus = BUS_ISA16_TYPE;
+ }
+ else
+ tp->adapter_bus = BUS_ISA8_TYPE;
+ }
+ else
+ tp->adapter_bus = BUS_MCA_TYPE;
+
+ /* Get Board Id Byte */
+ IdByte = inb(ioaddr + BID_BOARD_ID_BYTE);
+
+ /* if Major version > 1.0 then
+ * return;
+ */
+ if(IdByte & 0xF8)
+ return (-1);
+
+ r1 = inb(ioaddr + BID_REG_1);
+ r1 &= BID_ICR_MASK;
+ r1 |= BID_OTHER_BIT;
+
+ outb(r1, ioaddr + BID_REG_1);
+ r1 = inb(ioaddr + BID_REG_3);
+
+ r1 &= BID_EAR_MASK;
+ r1 |= BID_ENGR_PAGE;
+
+ outb(r1, ioaddr + BID_REG_3);
+ r1 = inb(ioaddr + BID_REG_1);
+ r1 &= BID_ICR_MASK;
+ r1 |= (BID_RLA | BID_OTHER_BIT);
+
+ outb(r1, ioaddr + BID_REG_1);
+
+ r1 = inb(ioaddr + BID_REG_1);
+ while(r1 & BID_RECALL_DONE_MASK)
+ r1 = inb(ioaddr + BID_REG_1);
+
+ r = inb(ioaddr + BID_LAR_0 + BID_REG_6);
+
+ /* clear chip rev bits */
+ tp->extra_info &= ~CHIP_REV_MASK;
+ tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6);
+
+ r1 = inb(ioaddr + BID_REG_1);
+ r1 &= BID_ICR_MASK;
+ r1 |= BID_OTHER_BIT;
+
+ outb(r1, ioaddr + BID_REG_1);
+ r1 = inb(ioaddr + BID_REG_3);
+
+ r1 &= BID_EAR_MASK;
+ r1 |= BID_EA6;
+
+ outb(r1, ioaddr + BID_REG_3);
+ r1 = inb(ioaddr + BID_REG_1);
+
+ r1 &= BID_ICR_MASK;
+ r1 |= BID_RLA;
+
+ outb(r1, ioaddr + BID_REG_1);
+ r1 = inb(ioaddr + BID_REG_1);
+
+ while(r1 & BID_RECALL_DONE_MASK)
+ r1 = inb(ioaddr + BID_REG_1);
+
+ return (BoardIdMask);
+}
+
+static int smctr_get_group_address(struct net_device *dev)
+{
+ smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
+
+ return(smctr_wait_cmd(dev));
+}
+
+static int smctr_get_functional_address(struct net_device *dev)
+{
+ smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
+
+ return(smctr_wait_cmd(dev));
+}
+
+/* Calculate number of Non-MAC receive BDB's and data buffers.
+ * This function must simulate allocateing shared memory exactly
+ * as the allocate_shared_memory function above.
+ */
+static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int mem_used = 0;
+
+ /* Allocate System Control Blocks. */
+ mem_used += sizeof(SCGBlock);
+
+ mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
+ mem_used += sizeof(SCLBlock);
+
+ mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
+ mem_used += sizeof(ACBlock) * tp->num_acbs;
+
+ mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
+ mem_used += sizeof(ISBlock);
+
+ mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
+ mem_used += MISC_DATA_SIZE;
+
+ /* Allocate transmit FCB's. */
+ mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
+
+ mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE];
+ mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE];
+ mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE];
+
+ /* Allocate transmit BDBs. */
+ mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE];
+ mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE];
+ mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE];
+
+ /* Allocate receive FCBs. */
+ mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE];
+ mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE];
+
+ /* Allocate receive BDBs. */
+ mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE];
+
+ /* Allocate MAC transmit buffers.
+ * MAC transmit buffers don't have to be on an ODD Boundry.
+ */
+ mem_used += tp->tx_buff_size[MAC_QUEUE];
+
+ /* Allocate BUG transmit buffers. */
+ mem_used += tp->tx_buff_size[BUG_QUEUE];
+
+ /* Allocate MAC receive data buffers.
+ * MAC receive buffers don't have to be on a 256 byte boundary.
+ */
+ mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE];
+
+ /* Allocate Non-MAC transmit buffers.
+ * For maximum Netware performance, put Tx Buffers on
+ * ODD Boundry,and then restore malloc to Even Boundrys.
+ */
+ mem_used += 1L;
+ mem_used += tp->tx_buff_size[NON_MAC_QUEUE];
+ mem_used += 1L;
+
+ /* CALCULATE NUMBER OF NON-MAC RX BDB'S
+ * AND NON-MAC RX DATA BUFFERS
+ *
+ * Make sure the mem_used offset at this point is the
+ * same as in allocate_shared memory or the following
+ * boundary adjustment will be incorrect (i.e. not allocating
+ * the non-mac receive buffers above cannot change the 256
+ * byte offset).
+ *
+ * Since this cannot be guaranteed, adding the full 256 bytes
+ * to the amount of shared memory used at this point will guaranteed
+ * that the rx data buffers do not overflow shared memory.
+ */
+ mem_used += 0x100;
+
+ return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock)));
+}
+
+static int smctr_get_physical_drop_number(struct net_device *dev)
+{
+ smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
+
+ return(smctr_wait_cmd(dev));
+}
+
+static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+ BDBlock *bdb;
+
+ bdb = (BDBlock *)((__u32)tp->ram_access
+ + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr));
+
+ tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
+
+ return ((__u8 *)bdb->data_block_ptr);
+}
+
+static int smctr_get_station_id(struct net_device *dev)
+{
+ smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
+
+ return(smctr_wait_cmd(dev));
+}
+
+/*
+ * Get the current statistics. This may be called with the card open
+ * or closed.
+ */
+static struct net_device_stats *smctr_get_stats(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ return ((struct net_device_stats *)&tp->MacStat);
+}
+
+static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
+ __u16 bytes_count)
+{
+ struct net_local *tp = netdev_priv(dev);
+ FCBlock *pFCB;
+ BDBlock *pbdb;
+ unsigned short alloc_size;
+ unsigned short *temp;
+
+ if(smctr_debug > 20)
+ printk(KERN_DEBUG "smctr_get_tx_fcb\n");
+
+ /* check if there is enough FCB blocks */
+ if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
+ return ((FCBlock *)(-1L));
+
+ /* round off the input pkt size to the nearest even number */
+ alloc_size = (bytes_count + 1) & 0xfffe;
+
+ /* check if enough mem */
+ if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
+ return ((FCBlock *)(-1L));
+
+ /* check if past the end ;
+ * if exactly enough mem to end of ring, alloc from front.
+ * this avoids update of curr when curr = end
+ */
+ if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size)
+ >= (unsigned long)(tp->tx_buff_end[queue]))
+ {
+ /* check if enough memory from ring head */
+ alloc_size = alloc_size +
+ (__u16)((__u32)tp->tx_buff_end[queue]
+ - (__u32)tp->tx_buff_curr[queue]);
+
+ if((tp->tx_buff_used[queue] + alloc_size)
+ > tp->tx_buff_size[queue])
+ {
+ return ((FCBlock *)(-1L));
+ }
+
+ /* ring wrap */
+ tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
+ }
+
+ tp->tx_buff_used[queue] += alloc_size;
+ tp->num_tx_fcbs_used[queue]++;
+ tp->tx_fcb_curr[queue]->frame_length = bytes_count;
+ tp->tx_fcb_curr[queue]->memory_alloc = alloc_size;
+ temp = tp->tx_buff_curr[queue];
+ tp->tx_buff_curr[queue]
+ = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe));
+
+ pbdb = tp->tx_fcb_curr[queue]->bdb_ptr;
+ pbdb->buffer_length = bytes_count;
+ pbdb->data_block_ptr = temp;
+ pbdb->trc_data_block_ptr = TRC_POINTER(temp);
+
+ pFCB = tp->tx_fcb_curr[queue];
+ tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
+
+ return (pFCB);
+}
+
+static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
+{
+ smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
+
+ return(smctr_wait_cmd(dev));
+}
+
+static int smctr_hardware_send_packet(struct net_device *dev,
+ struct net_local *tp)
+{
+ struct tr_statistics *tstat = &tp->MacStat;
+ struct sk_buff *skb;
+ FCBlock *fcb;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name);
+
+ if(tp->status != OPEN)
+ return (-1);
+
+ if(tp->monitor_state_ready != 1)
+ return (-1);
+
+ for(;;)
+ {
+ /* Send first buffer from queue */
+ skb = skb_dequeue(&tp->SendSkbQueue);
+ if(skb == NULL)
+ return (-1);
+
+ tp->QueueSkb++;
+
+ if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1);
+
+ smctr_enable_16bit(dev);
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len))
+ == (FCBlock *)(-1L))
+ {
+ smctr_disable_16bit(dev);
+ return (-1);
+ }
+
+ smctr_tx_move_frame(dev, skb,
+ (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len);
+
+ smctr_set_page(dev, (__u8 *)fcb);
+
+ smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE);
+ dev_kfree_skb(skb);
+
+ tstat->tx_packets++;
+
+ smctr_disable_16bit(dev);
+ }
+
+ return (0);
+}
+
+static int smctr_init_acbs(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i;
+ ACBlock *acb;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name);
+
+ acb = tp->acb_head;
+ acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
+ acb->cmd_info = ACB_CHAIN_END;
+ acb->cmd = 0;
+ acb->subcmd = 0;
+ acb->data_offset_lo = 0;
+ acb->data_offset_hi = 0;
+ acb->next_ptr
+ = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
+ acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
+
+ for(i = 1; i < tp->num_acbs; i++)
+ {
+ acb = acb->next_ptr;
+ acb->cmd_done_status
+ = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
+ acb->cmd_info = ACB_CHAIN_END;
+ acb->cmd = 0;
+ acb->subcmd = 0;
+ acb->data_offset_lo = 0;
+ acb->data_offset_hi = 0;
+ acb->next_ptr
+ = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
+ acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
+ }
+
+ acb->next_ptr = tp->acb_head;
+ acb->trc_next_ptr = TRC_POINTER(tp->acb_head);
+ tp->acb_next = tp->acb_head->next_ptr;
+ tp->acb_curr = tp->acb_head->next_ptr;
+ tp->num_acbs_used = 0;
+
+ return (0);
+}
+
+static int smctr_init_adapter(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name);
+
+ tp->status = CLOSED;
+ tp->page_offset_mask = (tp->ram_usable * 1024) - 1;
+ skb_queue_head_init(&tp->SendSkbQueue);
+ tp->QueueSkb = MAX_TX_QUEUE;
+
+ if(!(tp->group_address_0 & 0x0080))
+ tp->group_address_0 |= 0x00C0;
+
+ if(!(tp->functional_address_0 & 0x00C0))
+ tp->functional_address_0 |= 0x00C0;
+
+ tp->functional_address[0] &= 0xFF7F;
+
+ if(tp->authorized_function_classes == 0)
+ tp->authorized_function_classes = 0x7FFF;
+
+ if(tp->authorized_access_priority == 0)
+ tp->authorized_access_priority = 0x06;
+
+ smctr_disable_bic_int(dev);
+ smctr_set_trc_reset(dev->base_addr);
+
+ smctr_enable_16bit(dev);
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ if(smctr_checksum_firmware(dev))
+ {
+ printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); return (-ENOENT);
+ }
+
+ if((err = smctr_ram_memory_test(dev)))
+ {
+ printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name);
+ return (-EIO);
+ }
+
+ smctr_set_rx_look_ahead(dev);
+ smctr_load_node_addr(dev);
+
+ /* Initialize adapter for Internal Self Test. */
+ smctr_reset_adapter(dev);
+ if((err = smctr_init_card_real(dev)))
+ {
+ printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
+ dev->name, err);
+ return (-EINVAL);
+ }
+
+ /* This routine clobbers the TRC's internal registers. */
+ if((err = smctr_internal_self_test(dev)))
+ {
+ printk(KERN_ERR "%s: Card failed internal self test (%d)\n",
+ dev->name, err);
+ return (-EINVAL);
+ }
+
+ /* Re-Initialize adapter's internal registers */
+ smctr_reset_adapter(dev);
+ if((err = smctr_init_card_real(dev)))
+ {
+ printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
+ dev->name, err);
+ return (-EINVAL);
+ }
+
+ smctr_enable_bic_int(dev);
+
+ if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
+ return (err);
+
+ smctr_disable_16bit(dev);
+
+ return (0);
+}
+
+static int smctr_init_card_real(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err = 0;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_init_card_real\n", dev->name);
+
+ tp->sh_mem_used = 0;
+ tp->num_acbs = NUM_OF_ACBS;
+
+ /* Range Check Max Packet Size */
+ if(tp->max_packet_size < 256)
+ tp->max_packet_size = 256;
+ else
+ {
+ if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY)
+ tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY;
+ }
+
+ tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY
+ / tp->max_packet_size) - 1;
+
+ if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS)
+ tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS;
+ else
+ {
+ if(tp->num_of_tx_buffs == 0)
+ tp->num_of_tx_buffs = 1;
+ }
+
+ /* Tx queue constants */
+ tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS;
+ tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS;
+ tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY;
+ tp->tx_buff_used [BUG_QUEUE] = 0;
+ tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING;
+
+ tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS;
+ tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS;
+ tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY;
+ tp->tx_buff_used [MAC_QUEUE] = 0;
+ tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING;
+
+ tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS;
+ tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS;
+ tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY;
+ tp->tx_buff_used [NON_MAC_QUEUE] = 0;
+ tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING;
+
+ /* Receive Queue Constants */
+ tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS;
+ tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS;
+
+ if(tp->extra_info & CHIP_REV_MASK)
+ tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */
+ else
+ tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */
+
+ tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev);
+
+ smctr_alloc_shared_memory(dev);
+ smctr_init_shared_memory(dev);
+
+ if((err = smctr_issue_init_timers_cmd(dev)))
+ return (err);
+
+ if((err = smctr_issue_init_txrx_cmd(dev)))
+ {
+ printk(KERN_ERR "%s: Hardware failure\n", dev->name);
+ return (err);
+ }
+
+ return (0);
+}
+
+static int smctr_init_rx_bdbs(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, j;
+ BDBlock *bdb;
+ __u16 *buf;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_init_rx_bdbs\n", dev->name);
+
+ for(i = 0; i < NUM_RX_QS_USED; i++)
+ {
+ bdb = tp->rx_bdb_head[i];
+ buf = tp->rx_buff_head[i];
+ bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING);
+ bdb->buffer_length = RX_DATA_BUFFER_SIZE;
+ bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
+ bdb->data_block_ptr = buf;
+ bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
+
+ if(i == NON_MAC_QUEUE)
+ bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
+ else
+ bdb->trc_data_block_ptr = TRC_POINTER(buf);
+
+ for(j = 1; j < tp->num_rx_bdbs[i]; j++)
+ {
+ bdb->next_ptr->back_ptr = bdb;
+ bdb = bdb->next_ptr;
+ buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE);
+ bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
+ bdb->buffer_length = RX_DATA_BUFFER_SIZE;
+ bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
+ bdb->data_block_ptr = buf;
+ bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
+
+ if(i == NON_MAC_QUEUE)
+ bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
+ else
+ bdb->trc_data_block_ptr = TRC_POINTER(buf);
+ }
+
+ bdb->next_ptr = tp->rx_bdb_head[i];
+ bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]);
+
+ tp->rx_bdb_head[i]->back_ptr = bdb;
+ tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
+ }
+
+ return (0);
+}
+
+static int smctr_init_rx_fcbs(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, j;
+ FCBlock *fcb;
+
+ for(i = 0; i < NUM_RX_QS_USED; i++)
+ {
+ fcb = tp->rx_fcb_head[i];
+ fcb->frame_status = 0;
+ fcb->frame_length = 0;
+ fcb->info = FCB_CHAIN_END;
+ fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock));
+ if(i == NON_MAC_QUEUE)
+ fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
+ else
+ fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
+
+ for(j = 1; j < tp->num_rx_fcbs[i]; j++)
+ {
+ fcb->next_ptr->back_ptr = fcb;
+ fcb = fcb->next_ptr;
+ fcb->frame_status = 0;
+ fcb->frame_length = 0;
+ fcb->info = FCB_WARNING;
+ fcb->next_ptr
+ = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
+
+ if(i == NON_MAC_QUEUE)
+ fcb->trc_next_ptr
+ = RX_FCB_TRC_POINTER(fcb->next_ptr);
+ else
+ fcb->trc_next_ptr
+ = TRC_POINTER(fcb->next_ptr);
+ }
+
+ fcb->next_ptr = tp->rx_fcb_head[i];
+
+ if(i == NON_MAC_QUEUE)
+ fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
+ else
+ fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
+
+ tp->rx_fcb_head[i]->back_ptr = fcb;
+ tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
+ }
+
+ return(0);
+}
+
+static int smctr_init_shared_memory(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i;
+ __u32 *iscpb;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_init_shared_memory\n", dev->name);
+
+ smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr);
+
+ /* Initialize Initial System Configuration Point. (ISCP) */
+ iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr);
+ *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr)));
+
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ /* Initialize System Configuration Pointers. (SCP) */
+ tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT
+ | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT
+ | SCGB_BURST_LENGTH);
+
+ tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr);
+ tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head);
+ tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr);
+ tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2;
+
+ /* Initialize System Control Block. (SCB) */
+ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP;
+ tp->sclb_ptr->iack_code = 0;
+ tp->sclb_ptr->resume_control = 0;
+ tp->sclb_ptr->int_mask_control = 0;
+ tp->sclb_ptr->int_mask_state = 0;
+
+ /* Initialize Interrupt Status Block. (ISB) */
+ for(i = 0; i < NUM_OF_INTERRUPTS; i++)
+ {
+ tp->isb_ptr->IStatus[i].IType = 0xf0;
+ tp->isb_ptr->IStatus[i].ISubtype = 0;
+ }
+
+ tp->current_isb_index = 0;
+
+ /* Initialize Action Command Block. (ACB) */
+ smctr_init_acbs(dev);
+
+ /* Initialize transmit FCB's and BDB's. */
+ smctr_link_tx_fcbs_to_bdbs(dev);
+ smctr_init_tx_bdbs(dev);
+ smctr_init_tx_fcbs(dev);
+
+ /* Initialize receive FCB's and BDB's. */
+ smctr_init_rx_bdbs(dev);
+ smctr_init_rx_fcbs(dev);
+
+ return (0);
+}
+
+static int smctr_init_tx_bdbs(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, j;
+ BDBlock *bdb;
+
+ for(i = 0; i < NUM_TX_QS_USED; i++)
+ {
+ bdb = tp->tx_bdb_head[i];
+ bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
+ bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
+ bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
+
+ for(j = 1; j < tp->num_tx_bdbs[i]; j++)
+ {
+ bdb->next_ptr->back_ptr = bdb;
+ bdb = bdb->next_ptr;
+ bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
+ bdb->next_ptr
+ = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
+ }
+
+ bdb->next_ptr = tp->tx_bdb_head[i];
+ bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]);
+ tp->tx_bdb_head[i]->back_ptr = bdb;
+ }
+
+ return (0);
+}
+
+static int smctr_init_tx_fcbs(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, j;
+ FCBlock *fcb;
+
+ for(i = 0; i < NUM_TX_QS_USED; i++)
+ {
+ fcb = tp->tx_fcb_head[i];
+ fcb->frame_status = 0;
+ fcb->frame_length = 0;
+ fcb->info = FCB_CHAIN_END;
+ fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
+ fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
+
+ for(j = 1; j < tp->num_tx_fcbs[i]; j++)
+ {
+ fcb->next_ptr->back_ptr = fcb;
+ fcb = fcb->next_ptr;
+ fcb->frame_status = 0;
+ fcb->frame_length = 0;
+ fcb->info = FCB_CHAIN_END;
+ fcb->next_ptr
+ = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
+ fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
+ }
+
+ fcb->next_ptr = tp->tx_fcb_head[i];
+ fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]);
+
+ tp->tx_fcb_head[i]->back_ptr = fcb;
+ tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr;
+ tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr;
+ tp->num_tx_fcbs_used[i] = 0;
+ }
+
+ return (0);
+}
+
+static int smctr_internal_self_test(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if((err = smctr_issue_test_internal_rom_cmd(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ if(tp->acb_head->cmd_done_status & 0xff)
+ return (-1);
+
+ if((err = smctr_issue_test_hic_cmd(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ if(tp->acb_head->cmd_done_status & 0xff)
+ return (-1);
+
+ if((err = smctr_issue_test_mac_reg_cmd(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ if(tp->acb_head->cmd_done_status & 0xff)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * The typical workload of the driver: Handle the network interface interrupts.
+ */
+static irqreturn_t smctr_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *tp;
+ int ioaddr;
+ __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00;
+ __u16 err1, err = NOT_MY_INTERRUPT;
+ __u8 isb_type, isb_subtype;
+ __u16 isb_index;
+
+ if(dev == NULL)
+ {
+ printk(KERN_CRIT "%s: irq %d for unknown device.\n", dev->name, irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ tp = netdev_priv(dev);
+
+
+ if(tp->status == NOT_INITIALIZED)
+ return IRQ_NONE;
+
+ spin_lock(&tp->lock);
+
+ smctr_disable_bic_int(dev);
+ smctr_enable_16bit(dev);
+
+ smctr_clear_int(dev);
+
+ /* First read the LSB */
+ while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0)
+ {
+ isb_index = tp->current_isb_index;
+ isb_type = tp->isb_ptr->IStatus[isb_index].IType;
+ isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype;
+
+ (tp->current_isb_index)++;
+ if(tp->current_isb_index == NUM_OF_INTERRUPTS)
+ tp->current_isb_index = 0;
+
+ if(isb_type >= 0x10)
+ {
+ smctr_disable_16bit(dev);
+ spin_unlock(&tp->lock);
+ return IRQ_HANDLED;
+ }
+
+ err = HARDWARE_FAILED;
+ interrupt_ack_code = isb_index;
+ tp->isb_ptr->IStatus[isb_index].IType |= 0xf0;
+
+ interrupt_unmask_bits |= (1 << (__u16)isb_type);
+
+ switch(isb_type)
+ {
+ case ISB_IMC_MAC_TYPE_3:
+ smctr_disable_16bit(dev);
+
+ switch(isb_subtype)
+ {
+ case 0:
+ tp->monitor_state = MS_MONITOR_FSM_INACTIVE;
+ break;
+
+ case 1:
+ tp->monitor_state = MS_REPEAT_BEACON_STATE;
+ break;
+
+ case 2:
+ tp->monitor_state = MS_REPEAT_CLAIM_TOKEN_STATE;
+ break;
+
+ case 3:
+ tp->monitor_state = MS_TRANSMIT_CLAIM_TOKEN_STATE; break;
+
+ case 4:
+ tp->monitor_state = MS_STANDBY_MONITOR_STATE;
+ break;
+
+ case 5:
+ tp->monitor_state = MS_TRANSMIT_BEACON_STATE;
+ break;
+
+ case 6:
+ tp->monitor_state = MS_ACTIVE_MONITOR_STATE;
+ break;
+
+ case 7:
+ tp->monitor_state = MS_TRANSMIT_RING_PURGE_STATE;
+ break;
+
+ case 8: /* diagnostic state */
+ break;
+
+ case 9:
+ tp->monitor_state = MS_BEACON_TEST_STATE;
+ if(smctr_lobe_media_test(dev))
+ {
+ tp->ring_status_flags = RING_STATUS_CHANGED;
+ tp->ring_status = AUTO_REMOVAL_ERROR;
+ smctr_ring_status_chg(dev);
+ smctr_bypass_state(dev);
+ }
+ else
+ smctr_issue_insert_cmd(dev);
+ break;
+
+ /* case 0x0a-0xff, illegal states */
+ default:
+ break;
+ }
+
+ tp->ring_status_flags = MONITOR_STATE_CHANGED;
+ err = smctr_ring_status_chg(dev);
+
+ smctr_enable_16bit(dev);
+ break;
+
+ /* Type 0x02 - MAC Error Counters Interrupt
+ * One or more MAC Error Counter is half full
+ * MAC Error Counters
+ * Lost_FR_Error_Counter
+ * RCV_Congestion_Counter
+ * FR_copied_Error_Counter
+ * FREQ_Error_Counter
+ * Token_Error_Counter
+ * Line_Error_Counter
+ * Internal_Error_Count
+ */
+ case ISB_IMC_MAC_ERROR_COUNTERS:
+ /* Read 802.5 Error Counters */
+ err = smctr_issue_read_ring_status_cmd(dev);
+ break;
+
+ /* Type 0x04 - MAC Type 2 Interrupt
+ * HOST needs to enqueue MAC Frame for transmission
+ * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to
+ * TRC_Status_Changed_Indicate
+ */
+ case ISB_IMC_MAC_TYPE_2:
+ err = smctr_issue_read_ring_status_cmd(dev);
+ break;
+
+
+ /* Type 0x05 - TX Frame Interrupt (FI). */
+ case ISB_IMC_TX_FRAME:
+ /* BUG QUEUE for TRC stuck receive BUG */
+ if(isb_subtype & TX_PENDING_PRIORITY_2)
+ {
+ if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
+ break;
+ }
+
+ /* NON-MAC frames only */
+ if(isb_subtype & TX_PENDING_PRIORITY_1)
+ {
+ if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
+ break;
+ }
+
+ /* MAC frames only */
+ if(isb_subtype & TX_PENDING_PRIORITY_0)
+ err = smctr_tx_complete(dev, MAC_QUEUE); break;
+
+ /* Type 0x06 - TX END OF QUEUE (FE) */
+ case ISB_IMC_END_OF_TX_QUEUE:
+ /* BUG queue */
+ if(isb_subtype & TX_PENDING_PRIORITY_2)
+ {
+ /* ok to clear Receive FIFO overrun
+ * imask send_BUG now completes.
+ */
+ interrupt_unmask_bits |= 0x800;
+
+ tp->tx_queue_status[BUG_QUEUE] = NOT_TRANSMITING;
+ if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
+ break;
+ if((err = smctr_restart_tx_chain(dev, BUG_QUEUE)) != SUCCESS)
+ break;
+ }
+
+ /* NON-MAC queue only */
+ if(isb_subtype & TX_PENDING_PRIORITY_1)
+ {
+ tp->tx_queue_status[NON_MAC_QUEUE] = NOT_TRANSMITING;
+ if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
+ break;
+ if((err = smctr_restart_tx_chain(dev, NON_MAC_QUEUE)) != SUCCESS)
+ break;
+ }
+
+ /* MAC queue only */
+ if(isb_subtype & TX_PENDING_PRIORITY_0)
+ {
+ tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
+ if((err = smctr_tx_complete(dev, MAC_QUEUE)) != SUCCESS)
+ break;
+
+ err = smctr_restart_tx_chain(dev, MAC_QUEUE);
+ }
+ break;
+
+ /* Type 0x07 - NON-MAC RX Resource Interrupt
+ * Subtype bit 12 - (BW) BDB warning
+ * Subtype bit 13 - (FW) FCB warning
+ * Subtype bit 14 - (BE) BDB End of chain
+ * Subtype bit 15 - (FE) FCB End of chain
+ */
+ case ISB_IMC_NON_MAC_RX_RESOURCE:
+ tp->rx_fifo_overrun_count = 0;
+ tp->receive_queue_number = NON_MAC_QUEUE;
+ err1 = smctr_rx_frame(dev);
+
+ if(isb_subtype & NON_MAC_RX_RESOURCE_FE)
+ {
+ if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
+
+ if(tp->ptr_rx_fcb_overruns)
+ (*tp->ptr_rx_fcb_overruns)++;
+ }
+
+ if(isb_subtype & NON_MAC_RX_RESOURCE_BE)
+ {
+ if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
+
+ if(tp->ptr_rx_bdb_overruns)
+ (*tp->ptr_rx_bdb_overruns)++;
+ }
+ err = err1;
+ break;
+
+ /* Type 0x08 - MAC RX Resource Interrupt
+ * Subtype bit 12 - (BW) BDB warning
+ * Subtype bit 13 - (FW) FCB warning
+ * Subtype bit 14 - (BE) BDB End of chain
+ * Subtype bit 15 - (FE) FCB End of chain
+ */
+ case ISB_IMC_MAC_RX_RESOURCE:
+ tp->receive_queue_number = MAC_QUEUE;
+ err1 = smctr_rx_frame(dev);
+
+ if(isb_subtype & MAC_RX_RESOURCE_FE)
+ {
+ if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS)
+ break;
+
+ if(tp->ptr_rx_fcb_overruns)
+ (*tp->ptr_rx_fcb_overruns)++;
+ }
+
+ if(isb_subtype & MAC_RX_RESOURCE_BE)
+ {
+ if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS)
+ break;
+
+ if(tp->ptr_rx_bdb_overruns)
+ (*tp->ptr_rx_bdb_overruns)++;
+ }
+ err = err1;
+ break;
+
+ /* Type 0x09 - NON_MAC RX Frame Interrupt */
+ case ISB_IMC_NON_MAC_RX_FRAME:
+ tp->rx_fifo_overrun_count = 0;
+ tp->receive_queue_number = NON_MAC_QUEUE;
+ err = smctr_rx_frame(dev);
+ break;
+
+ /* Type 0x0A - MAC RX Frame Interrupt */
+ case ISB_IMC_MAC_RX_FRAME:
+ tp->receive_queue_number = MAC_QUEUE;
+ err = smctr_rx_frame(dev);
+ break;
+
+ /* Type 0x0B - TRC status
+ * TRC has encountered an error condition
+ * subtype bit 14 - transmit FIFO underrun
+ * subtype bit 15 - receive FIFO overrun
+ */
+ case ISB_IMC_TRC_FIFO_STATUS:
+ if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN)
+ {
+ if(tp->ptr_tx_fifo_underruns)
+ (*tp->ptr_tx_fifo_underruns)++;
+ }
+
+ if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN)
+ {
+ /* update overrun stuck receive counter
+ * if >= 3, has to clear it by sending
+ * back to back frames. We pick
+ * DAT(duplicate address MAC frame)
+ */
+ tp->rx_fifo_overrun_count++;
+
+ if(tp->rx_fifo_overrun_count >= 3)
+ {
+ tp->rx_fifo_overrun_count = 0;
+
+ /* delay clearing fifo overrun
+ * imask till send_BUG tx
+ * complete posted
+ */
+ interrupt_unmask_bits &= (~0x800);
+ printk(KERN_CRIT "Jay please send bug\n");// smctr_send_bug(dev);
+ }
+
+ if(tp->ptr_rx_fifo_overruns)
+ (*tp->ptr_rx_fifo_overruns)++;
+ }
+
+ err = SUCCESS;
+ break;
+
+ /* Type 0x0C - Action Command Status Interrupt
+ * Subtype bit 14 - CB end of command chain (CE)
+ * Subtype bit 15 - CB command interrupt (CI)
+ */
+ case ISB_IMC_COMMAND_STATUS:
+ err = SUCCESS;
+ if(tp->acb_head->cmd == ACB_CMD_HIC_NOP)
+ {
+ printk(KERN_ERR "i1\n");
+ smctr_disable_16bit(dev);
+
+ /* XXXXXXXXXXXXXXXXX */
+ /* err = UM_Interrupt(dev); */
+
+ smctr_enable_16bit(dev);
+ }
+ else
+ {
+ if((tp->acb_head->cmd
+ == ACB_CMD_READ_TRC_STATUS)
+ && (tp->acb_head->subcmd
+ == RW_TRC_STATUS_BLOCK))
+ {
+ if(tp->ptr_bcn_type != 0)
+ {
+ *(tp->ptr_bcn_type)
+ = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type;
+ }
+
+ if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED)
+ {
+ smctr_update_err_stats(dev);
+ }
+
+ if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED)
+ {
+ tp->ring_status
+ = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status;
+ smctr_disable_16bit(dev);
+ err = smctr_ring_status_chg(dev);
+ smctr_enable_16bit(dev);
+ if((tp->ring_status & REMOVE_RECEIVED)
+ && (tp->config_word0 & NO_AUTOREMOVE))
+ {
+ smctr_issue_remove_cmd(dev);
+ }
+
+ if(err != SUCCESS)
+ {
+ tp->acb_pending = 0;
+ break;
+ }
+ }
+
+ if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED)
+ {
+ if(tp->ptr_una)
+ {
+ tp->ptr_una[0] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]);
+ tp->ptr_una[1] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]);
+ tp->ptr_una[2] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]);
+ }
+
+ }
+
+ if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & READY_TO_SEND_RQ_INIT) {
+ err = smctr_send_rq_init(dev);
+ }
+ }
+ }
+
+ tp->acb_pending = 0;
+ break;
+
+ /* Type 0x0D - MAC Type 1 interrupt
+ * Subtype -- 00 FR_BCN received at S12
+ * 01 FR_BCN received at S21
+ * 02 FR_DAT(DA=MA, A<>0) received at S21
+ * 03 TSM_EXP at S21
+ * 04 FR_REMOVE received at S42
+ * 05 TBR_EXP, BR_FLAG_SET at S42
+ * 06 TBT_EXP at S53
+ */
+ case ISB_IMC_MAC_TYPE_1:
+ if(isb_subtype > 8)
+ {
+ err = HARDWARE_FAILED;
+ break;
+ }
+
+ err = SUCCESS;
+ switch(isb_subtype)
+ {
+ case 0:
+ tp->join_state = JS_BYPASS_STATE;
+ if(tp->status != CLOSED)
+ {
+ tp->status = CLOSED;
+ err = smctr_status_chg(dev);
+ }
+ break;
+
+ case 1:
+ tp->join_state = JS_LOBE_TEST_STATE;
+ break;
+
+ case 2:
+ tp->join_state = JS_DETECT_MONITOR_PRESENT_STATE;
+ break;
+
+ case 3:
+ tp->join_state = JS_AWAIT_NEW_MONITOR_STATE;
+ break;
+
+ case 4:
+ tp->join_state = JS_DUPLICATE_ADDRESS_TEST_STATE;
+ break;
+
+ case 5:
+ tp->join_state = JS_NEIGHBOR_NOTIFICATION_STATE;
+ break;
+
+ case 6:
+ tp->join_state = JS_REQUEST_INITIALIZATION_STATE;
+ break;
+
+ case 7:
+ tp->join_state = JS_JOIN_COMPLETE_STATE;
+ tp->status = OPEN;
+ err = smctr_status_chg(dev);
+ break;
+
+ case 8:
+ tp->join_state = JS_BYPASS_WAIT_STATE;
+ break;
+ }
+ break ;
+
+ /* Type 0x0E - TRC Initialization Sequence Interrupt
+ * Subtype -- 00-FF Initializatin sequence complete
+ */
+ case ISB_IMC_TRC_INTRNL_TST_STATUS:
+ tp->status = INITIALIZED;
+ smctr_disable_16bit(dev);
+ err = smctr_status_chg(dev);
+ smctr_enable_16bit(dev);
+ break;
+
+ /* other interrupt types, illegal */
+ default:
+ break;
+ }
+
+ if(err != SUCCESS)
+ break;
+ }
+
+ /* Checking the ack code instead of the unmask bits here is because :
+ * while fixing the stuck receive, DAT frame are sent and mask off
+ * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0)
+ * but we still want to issue ack to ISB
+ */
+ if(!(interrupt_ack_code & 0xff00))
+ smctr_issue_int_ack(dev, interrupt_ack_code, interrupt_unmask_bits);
+
+ smctr_disable_16bit(dev);
+ smctr_enable_bic_int(dev);
+ spin_unlock(&tp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int smctr_issue_enable_int_cmd(struct net_device *dev,
+ __u16 interrupt_enable_mask)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
+ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
+
+ smctr_set_ctrl_attention(dev);
+
+ return (0);
+}
+
+static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_wait_while_cbusy(dev))
+ return (-1);
+
+ tp->sclb_ptr->int_mask_control = ibits;
+ tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
+ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_IACK_CODE_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
+
+ smctr_set_ctrl_attention(dev);
+
+ return (0);
+}
+
+static int smctr_issue_init_timers_cmd(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+ __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
+ tp->config_word1 = 0;
+
+ if((tp->media_type == MEDIA_STP_16)
+ || (tp->media_type == MEDIA_UTP_16)
+ || (tp->media_type == MEDIA_STP_16_UTP_16))
+ {
+ tp->config_word0 |= FREQ_16MB_BIT;
+ }
+
+ if(tp->mode_bits & EARLY_TOKEN_REL)
+ tp->config_word0 |= ETREN;
+
+ if(tp->mode_bits & LOOPING_MODE_MASK)
+ tp->config_word0 |= RX_OWN_BIT;
+ else
+ tp->config_word0 &= ~RX_OWN_BIT;
+
+ if(tp->receive_mask & PROMISCUOUS_MODE)
+ tp->config_word0 |= PROMISCUOUS_BIT;
+ else
+ tp->config_word0 &= ~PROMISCUOUS_BIT;
+
+ if(tp->receive_mask & ACCEPT_ERR_PACKETS)
+ tp->config_word0 |= SAVBAD_BIT;
+ else
+ tp->config_word0 &= ~SAVBAD_BIT;
+
+ if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
+ tp->config_word0 |= RXATMAC;
+ else
+ tp->config_word0 &= ~RXATMAC;
+
+ if(tp->receive_mask & ACCEPT_MULTI_PROM)
+ tp->config_word1 |= MULTICAST_ADDRESS_BIT;
+ else
+ tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
+
+ if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
+ tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
+ else
+ {
+ if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
+ tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
+ else
+ tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
+ }
+
+ if((tp->media_type == MEDIA_STP_16)
+ || (tp->media_type == MEDIA_UTP_16)
+ || (tp->media_type == MEDIA_STP_16_UTP_16))
+ {
+ tp->config_word1 |= INTERFRAME_SPACING_16;
+ }
+ else
+ tp->config_word1 |= INTERFRAME_SPACING_4;
+
+ *pTimer_Struc++ = tp->config_word0;
+ *pTimer_Struc++ = tp->config_word1;
+
+ if((tp->media_type == MEDIA_STP_4)
+ || (tp->media_type == MEDIA_UTP_4)
+ || (tp->media_type == MEDIA_STP_4_UTP_4))
+ {
+ *pTimer_Struc++ = 0x00FA; /* prescale */
+ *pTimer_Struc++ = 0x2710; /* TPT_limit */
+ *pTimer_Struc++ = 0x2710; /* TQP_limit */
+ *pTimer_Struc++ = 0x0A28; /* TNT_limit */
+ *pTimer_Struc++ = 0x3E80; /* TBT_limit */
+ *pTimer_Struc++ = 0x3A98; /* TSM_limit */
+ *pTimer_Struc++ = 0x1B58; /* TAM_limit */
+ *pTimer_Struc++ = 0x00C8; /* TBR_limit */
+ *pTimer_Struc++ = 0x07D0; /* TER_limit */
+ *pTimer_Struc++ = 0x000A; /* TGT_limit */
+ *pTimer_Struc++ = 0x1162; /* THT_limit */
+ *pTimer_Struc++ = 0x07D0; /* TRR_limit */
+ *pTimer_Struc++ = 0x1388; /* TVX_limit */
+ *pTimer_Struc++ = 0x0000; /* reserved */
+ }
+ else
+ {
+ *pTimer_Struc++ = 0x03E8; /* prescale */
+ *pTimer_Struc++ = 0x9C40; /* TPT_limit */
+ *pTimer_Struc++ = 0x9C40; /* TQP_limit */
+ *pTimer_Struc++ = 0x0A28; /* TNT_limit */
+ *pTimer_Struc++ = 0x3E80; /* TBT_limit */
+ *pTimer_Struc++ = 0x3A98; /* TSM_limit */
+ *pTimer_Struc++ = 0x1B58; /* TAM_limit */
+ *pTimer_Struc++ = 0x00C8; /* TBR_limit */
+ *pTimer_Struc++ = 0x07D0; /* TER_limit */
+ *pTimer_Struc++ = 0x000A; /* TGT_limit */
+ *pTimer_Struc++ = 0x4588; /* THT_limit */
+ *pTimer_Struc++ = 0x1F40; /* TRR_limit */
+ *pTimer_Struc++ = 0x4E20; /* TVX_limit */
+ *pTimer_Struc++ = 0x0000; /* reserved */
+ }
+
+ /* Set node address. */
+ *pTimer_Struc++ = dev->dev_addr[0] << 8
+ | (dev->dev_addr[1] & 0xFF);
+ *pTimer_Struc++ = dev->dev_addr[2] << 8
+ | (dev->dev_addr[3] & 0xFF);
+ *pTimer_Struc++ = dev->dev_addr[4] << 8
+ | (dev->dev_addr[5] & 0xFF);
+
+ /* Set group address. */
+ *pTimer_Struc++ = tp->group_address_0 << 8
+ | tp->group_address_0 >> 8;
+ *pTimer_Struc++ = tp->group_address[0] << 8
+ | tp->group_address[0] >> 8;
+ *pTimer_Struc++ = tp->group_address[1] << 8
+ | tp->group_address[1] >> 8;
+
+ /* Set functional address. */
+ *pTimer_Struc++ = tp->functional_address_0 << 8
+ | tp->functional_address_0 >> 8;
+ *pTimer_Struc++ = tp->functional_address[0] << 8
+ | tp->functional_address[0] >> 8;
+ *pTimer_Struc++ = tp->functional_address[1] << 8
+ | tp->functional_address[1] >> 8;
+
+ /* Set Bit-Wise group address. */
+ *pTimer_Struc++ = tp->bitwise_group_address[0] << 8
+ | tp->bitwise_group_address[0] >> 8;
+ *pTimer_Struc++ = tp->bitwise_group_address[1] << 8
+ | tp->bitwise_group_address[1] >> 8;
+
+ /* Set ring number address. */
+ *pTimer_Struc++ = tp->source_ring_number;
+ *pTimer_Struc++ = tp->target_ring_number;
+
+ /* Physical drop number. */
+ *pTimer_Struc++ = (unsigned short)0;
+ *pTimer_Struc++ = (unsigned short)0;
+
+ /* Product instance ID. */
+ for(i = 0; i < 9; i++)
+ *pTimer_Struc++ = (unsigned short)0;
+
+ err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
+
+ return (err);
+}
+
+static int smctr_issue_init_txrx_cmd(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+ void **txrx_ptrs = (void *)tp->misc_command_data;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ {
+ printk(KERN_ERR "%s: Hardware failure\n", dev->name);
+ return (err);
+ }
+
+ /* Initialize Transmit Queue Pointers that are used, to point to
+ * a single FCB.
+ */
+ for(i = 0; i < NUM_TX_QS_USED; i++)
+ *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]);
+
+ /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */
+ for(; i < MAX_TX_QS; i++)
+ *txrx_ptrs++ = (void *)0;
+
+ /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are
+ * used, to point to a single FCB and a BDB chain of buffers.
+ */
+ for(i = 0; i < NUM_RX_QS_USED; i++)
+ {
+ *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]);
+ *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]);
+ }
+
+ /* Initialize Receive Queue Pointers that are NOT used to ZERO. */
+ for(; i < MAX_RX_QS; i++)
+ {
+ *txrx_ptrs++ = (void *)0;
+ *txrx_ptrs++ = (void *)0;
+ }
+
+ err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
+
+ return (err);
+}
+
+static int smctr_issue_insert_cmd(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
+
+ return (err);
+}
+
+static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
+{
+ int err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
+ RW_TRC_STATUS_BLOCK);
+
+ return (err);
+}
+
+static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
+{
+ int err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
+ aword_cnt);
+
+ return (err);
+}
+
+static int smctr_issue_remove_cmd(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ tp->sclb_ptr->resume_control = 0;
+ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
+
+ smctr_set_ctrl_attention(dev);
+
+ return (0);
+}
+
+static int smctr_issue_resume_acb_cmd(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ tp->sclb_ptr->resume_control = SCLB_RC_ACB;
+ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
+
+ tp->acb_pending = 1;
+
+ smctr_set_ctrl_attention(dev);
+
+ return (0);
+}
+
+static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if(queue == MAC_QUEUE)
+ tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
+ else
+ tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB;
+
+ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
+
+ smctr_set_ctrl_attention(dev);
+
+ return (0);
+}
+
+static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
+
+ if(smctr_wait_while_cbusy(dev))
+ return (-1);
+
+ if(queue == MAC_QUEUE)
+ tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
+ else
+ tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB;
+
+ tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
+
+ smctr_set_ctrl_attention(dev);
+
+ return (0);
+}
+
+static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
+
+ if(smctr_wait_while_cbusy(dev))
+ return (-1);
+
+ tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
+ tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
+
+ smctr_set_ctrl_attention(dev);
+
+ return (0);
+}
+
+static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
+ TRC_INTERNAL_ROM_TEST);
+
+ return (err);
+}
+
+static int smctr_issue_test_hic_cmd(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
+ TRC_HOST_INTERFACE_REG_TEST);
+
+ return (err);
+}
+
+static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
+ TRC_MAC_REGISTERS_TEST);
+
+ return (err);
+}
+
+static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
+ TRC_INTERNAL_LOOPBACK);
+
+ return (err);
+}
+
+static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
+ TRC_TRI_LOOPBACK);
+
+ return (err);
+}
+
+static int smctr_issue_write_byte_cmd(struct net_device *dev,
+ short aword_cnt, void *byte)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int iword, ibyte;
+ int err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
+ iword++, ibyte += 2)
+ {
+ tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8)
+ | (*((__u8 *)byte + ibyte + 1));
+ }
+
+ return (smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
+ aword_cnt));
+}
+
+static int smctr_issue_write_word_cmd(struct net_device *dev,
+ short aword_cnt, void *word)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, err;
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
+ tp->misc_command_data[i] = *((__u16 *)word + i);
+
+ err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
+ aword_cnt);
+
+ return (err);
+}
+
+static int smctr_join_complete_state(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
+ JS_JOIN_COMPLETE_STATE);
+
+ return (err);
+}
+
+static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, j;
+ FCBlock *fcb;
+ BDBlock *bdb;
+
+ for(i = 0; i < NUM_TX_QS_USED; i++)
+ {
+ fcb = tp->tx_fcb_head[i];
+ bdb = tp->tx_bdb_head[i];
+
+ for(j = 0; j < tp->num_tx_fcbs[i]; j++)
+ {
+ fcb->bdb_ptr = bdb;
+ fcb->trc_bdb_ptr = TRC_POINTER(bdb);
+ fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock));
+ bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock));
+ }
+ }
+
+ return (0);
+}
+
+static int smctr_load_firmware(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u16 i, checksum = 0;
+ int err = 0;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_load_firmware\n", dev->name);
+
+ tp->ptr_ucode = smctr_code;
+ tp->num_of_tx_buffs = 4;
+ tp->mode_bits |= UMAC;
+ tp->receive_mask = 0;
+ tp->max_packet_size = 4177;
+
+ /* Can only upload the firmware once per adapter reset. */
+ if(tp->microcode_version != 0)
+ return (UCODE_PRESENT);
+
+ /* Verify the firmware exists and is there in the right amount. */
+ if((tp->ptr_ucode == 0L)
+ || (*(tp->ptr_ucode + UCODE_VERSION_OFFSET) < UCODE_VERSION))
+ {
+ return (UCODE_NOT_PRESENT);
+ }
+
+ /* UCODE_SIZE is not included in Checksum. */
+ for(i = 0; i < *((__u16 *)(tp->ptr_ucode + UCODE_SIZE_OFFSET)); i += 2)
+ checksum += *((__u16 *)(tp->ptr_ucode + 2 + i));
+ if(checksum)
+ return (UCODE_NOT_PRESENT);
+
+ /* At this point we have a valid firmware image, lets kick it on up. */
+ smctr_enable_adapter_ram(dev);
+ smctr_enable_16bit(dev);
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ if((smctr_checksum_firmware(dev))
+ || (*(tp->ptr_ucode + UCODE_VERSION_OFFSET)
+ > tp->microcode_version))
+ {
+ smctr_enable_adapter_ctrl_store(dev);
+
+ /* Zero out ram space for firmware. */
+ for(i = 0; i < CS_RAM_SIZE; i += 2)
+ *((__u16 *)(tp->ram_access + i)) = 0;
+
+ smctr_decode_firmware(dev);
+
+ tp->microcode_version = *(tp->ptr_ucode + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET))
+ = (tp->microcode_version << 8);
+ *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET))
+ = ~(tp->microcode_version << 8) + 1;
+
+ smctr_disable_adapter_ctrl_store(dev);
+
+ if(smctr_checksum_firmware(dev))
+ err = HARDWARE_FAILED;
+ }
+ else
+ err = UCODE_PRESENT;
+
+ smctr_disable_16bit(dev);
+
+ return (err);
+}
+
+static int smctr_load_node_addr(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ unsigned int i;
+ __u8 r;
+
+ for(i = 0; i < 6; i++)
+ {
+ r = inb(ioaddr + LAR0 + i);
+ dev->dev_addr[i] = (char)r;
+ }
+ dev->addr_len = 6;
+
+ return (0);
+}
+
+/* Lobe Media Test.
+ * During the transmission of the initial 1500 lobe media MAC frames,
+ * the phase lock loop in the 805 chip may lock, and then un-lock, causing
+ * the 825 to go into a PURGE state. When performing a PURGE, the MCT
+ * microcode will not transmit any frames given to it by the host, and
+ * will consequently cause a timeout.
+ *
+ * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit
+ * queues other then the one used for the lobe_media_test should be
+ * disabled.!?
+ *
+ * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
+ * has any multi-cast or promiscous bits set, the receive_mask needs to
+ * be changed to clear the multi-cast or promiscous mode bits, the lobe_test
+ * run, and then the receive mask set back to its original value if the test
+ * is successful.
+ */
+static int smctr_lobe_media_test(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, perror = 0;
+ unsigned short saved_rcv_mask;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_lobe_media_test\n", dev->name);
+
+ /* Clear receive mask for lobe test. */
+ saved_rcv_mask = tp->receive_mask;
+ tp->receive_mask = 0;
+
+ smctr_chg_rx_mask(dev);
+
+ /* Setup the lobe media test. */
+ smctr_lobe_media_test_cmd(dev);
+ if(smctr_wait_cmd(dev))
+ {
+ smctr_reset_adapter(dev);
+ tp->status = CLOSED;
+ return (LOBE_MEDIA_TEST_FAILED);
+ }
+
+ /* Tx lobe media test frames. */
+ for(i = 0; i < 1500; ++i)
+ {
+ if(smctr_send_lobe_media_test(dev))
+ {
+ if(perror)
+ {
+ smctr_reset_adapter(dev);
+ tp->state = CLOSED;
+ return (LOBE_MEDIA_TEST_FAILED);
+ }
+ else
+ {
+ perror = 1;
+ if(smctr_lobe_media_test_cmd(dev))
+ {
+ smctr_reset_adapter(dev);
+ tp->state = CLOSED;
+ return (LOBE_MEDIA_TEST_FAILED);
+ }
+ }
+ }
+ }
+
+ if(smctr_send_dat(dev))
+ {
+ if(smctr_send_dat(dev))
+ {
+ smctr_reset_adapter(dev);
+ tp->state = CLOSED;
+ return (LOBE_MEDIA_TEST_FAILED);
+ }
+ }
+
+ /* Check if any frames received during test. */
+ if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status)
+ || (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
+ {
+ smctr_reset_adapter(dev);
+ tp->state = CLOSED;
+ return (LOBE_MEDIA_TEST_FAILED);
+ }
+
+ /* Set receive mask to "Promisc" mode. */
+ tp->receive_mask = saved_rcv_mask;
+
+ smctr_chg_rx_mask(dev);
+
+ return (0);
+}
+
+static int smctr_lobe_media_test_cmd(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_lobe_media_test_cmd\n", dev->name);
+
+ /* Change to lobe media test state. */
+ if(tp->monitor_state != MS_BEACON_TEST_STATE)
+ {
+ smctr_lobe_media_test_state(dev);
+ if(smctr_wait_cmd(dev))
+ {
+ printk(KERN_ERR "Lobe Failed test state\n");
+ return (LOBE_MEDIA_TEST_FAILED);
+ }
+ }
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
+ TRC_LOBE_MEDIA_TEST);
+
+ return (err);
+}
+
+static int smctr_lobe_media_test_state(struct net_device *dev)
+{
+ int err;
+
+ err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
+ JS_LOBE_TEST_STATE);
+
+ return (err);
+}
+
+static int smctr_make_8025_hdr(struct net_device *dev,
+ MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc)
+{
+ tmf->ac = MSB(ac_fc); /* msb is access control */
+ tmf->fc = LSB(ac_fc); /* lsb is frame control */
+
+ tmf->sa[0] = dev->dev_addr[0];
+ tmf->sa[1] = dev->dev_addr[1];
+ tmf->sa[2] = dev->dev_addr[2];
+ tmf->sa[3] = dev->dev_addr[3];
+ tmf->sa[4] = dev->dev_addr[4];
+ tmf->sa[5] = dev->dev_addr[5];
+
+ switch(tmf->vc)
+ {
+ /* Send RQ_INIT to RPS */
+ case RQ_INIT:
+ tmf->da[0] = 0xc0;
+ tmf->da[1] = 0x00;
+ tmf->da[2] = 0x00;
+ tmf->da[3] = 0x00;
+ tmf->da[4] = 0x00;
+ tmf->da[5] = 0x02;
+ break;
+
+ /* Send RPT_TX_FORWARD to CRS */
+ case RPT_TX_FORWARD:
+ tmf->da[0] = 0xc0;
+ tmf->da[1] = 0x00;
+ tmf->da[2] = 0x00;
+ tmf->da[3] = 0x00;
+ tmf->da[4] = 0x00;
+ tmf->da[5] = 0x10;
+ break;
+
+ /* Everything else goes to sender */
+ default:
+ tmf->da[0] = rmf->sa[0];
+ tmf->da[1] = rmf->sa[1];
+ tmf->da[2] = rmf->sa[2];
+ tmf->da[3] = rmf->sa[3];
+ tmf->da[4] = rmf->sa[4];
+ tmf->da[5] = rmf->sa[5];
+ break;
+ }
+
+ return (0);
+}
+
+static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ tsv->svi = AUTHORIZED_ACCESS_PRIORITY;
+ tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY;
+
+ tsv->svv[0] = MSB(tp->authorized_access_priority);
+ tsv->svv[1] = LSB(tp->authorized_access_priority);
+
+ return (0);
+}
+
+static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
+{
+ tsv->svi = ADDRESS_MODIFER;
+ tsv->svl = S_ADDRESS_MODIFER;
+
+ tsv->svv[0] = 0;
+ tsv->svv[1] = 0;
+
+ return (0);
+}
+
+static int smctr_make_auth_funct_class(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ tsv->svi = AUTHORIZED_FUNCTION_CLASS;
+ tsv->svl = S_AUTHORIZED_FUNCTION_CLASS;
+
+ tsv->svv[0] = MSB(tp->authorized_function_classes);
+ tsv->svv[1] = LSB(tp->authorized_function_classes);
+
+ return (0);
+}
+
+static int smctr_make_corr(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv, __u16 correlator)
+{
+ tsv->svi = CORRELATOR;
+ tsv->svl = S_CORRELATOR;
+
+ tsv->svv[0] = MSB(correlator);
+ tsv->svv[1] = LSB(correlator);
+
+ return (0);
+}
+
+static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ smctr_get_functional_address(dev);
+
+ tsv->svi = FUNCTIONAL_ADDRESS;
+ tsv->svl = S_FUNCTIONAL_ADDRESS;
+
+ tsv->svv[0] = MSB(tp->misc_command_data[0]);
+ tsv->svv[1] = LSB(tp->misc_command_data[0]);
+
+ tsv->svv[2] = MSB(tp->misc_command_data[1]);
+ tsv->svv[3] = LSB(tp->misc_command_data[1]);
+
+ return (0);
+}
+
+static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ smctr_get_group_address(dev);
+
+ tsv->svi = GROUP_ADDRESS;
+ tsv->svl = S_GROUP_ADDRESS;
+
+ tsv->svv[0] = MSB(tp->misc_command_data[0]);
+ tsv->svv[1] = LSB(tp->misc_command_data[0]);
+
+ tsv->svv[2] = MSB(tp->misc_command_data[1]);
+ tsv->svv[3] = LSB(tp->misc_command_data[1]);
+
+ /* Set Group Address Sub-vector to all zeros if only the
+ * Group Address/Functional Address Indicator is set.
+ */
+ if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00
+ && tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
+ tsv->svv[0] = 0x00;
+
+ return (0);
+}
+
+static int smctr_make_phy_drop_num(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ smctr_get_physical_drop_number(dev);
+
+ tsv->svi = PHYSICAL_DROP;
+ tsv->svl = S_PHYSICAL_DROP;
+
+ tsv->svv[0] = MSB(tp->misc_command_data[0]);
+ tsv->svv[1] = LSB(tp->misc_command_data[0]);
+
+ tsv->svv[2] = MSB(tp->misc_command_data[1]);
+ tsv->svv[3] = LSB(tp->misc_command_data[1]);
+
+ return (0);
+}
+
+static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
+{
+ int i;
+
+ tsv->svi = PRODUCT_INSTANCE_ID;
+ tsv->svl = S_PRODUCT_INSTANCE_ID;
+
+ for(i = 0; i < 18; i++)
+ tsv->svv[i] = 0xF0;
+
+ return (0);
+}
+
+static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ smctr_get_station_id(dev);
+
+ tsv->svi = STATION_IDENTIFER;
+ tsv->svl = S_STATION_IDENTIFER;
+
+ tsv->svv[0] = MSB(tp->misc_command_data[0]);
+ tsv->svv[1] = LSB(tp->misc_command_data[0]);
+
+ tsv->svv[2] = MSB(tp->misc_command_data[1]);
+ tsv->svv[3] = LSB(tp->misc_command_data[1]);
+
+ tsv->svv[4] = MSB(tp->misc_command_data[2]);
+ tsv->svv[5] = LSB(tp->misc_command_data[2]);
+
+ return (0);
+}
+
+static int smctr_make_ring_station_status(struct net_device *dev,
+ MAC_SUB_VECTOR * tsv)
+{
+ tsv->svi = RING_STATION_STATUS;
+ tsv->svl = S_RING_STATION_STATUS;
+
+ tsv->svv[0] = 0;
+ tsv->svv[1] = 0;
+ tsv->svv[2] = 0;
+ tsv->svv[3] = 0;
+ tsv->svv[4] = 0;
+ tsv->svv[5] = 0;
+
+ return (0);
+}
+
+static int smctr_make_ring_station_version(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ tsv->svi = RING_STATION_VERSION_NUMBER;
+ tsv->svl = S_RING_STATION_VERSION_NUMBER;
+
+ tsv->svv[0] = 0xe2; /* EBCDIC - S */
+ tsv->svv[1] = 0xd4; /* EBCDIC - M */
+ tsv->svv[2] = 0xc3; /* EBCDIC - C */
+ tsv->svv[3] = 0x40; /* EBCDIC - */
+ tsv->svv[4] = 0xe5; /* EBCDIC - V */
+ tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4);
+ tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f);
+ tsv->svv[7] = 0x40; /* EBCDIC - */
+ tsv->svv[8] = 0xe7; /* EBCDIC - X */
+
+ if(tp->extra_info & CHIP_REV_MASK)
+ tsv->svv[9] = 0xc5; /* EBCDIC - E */
+ else
+ tsv->svv[9] = 0xc4; /* EBCDIC - D */
+
+ return (0);
+}
+
+static int smctr_make_tx_status_code(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv, __u16 tx_fstatus)
+{
+ tsv->svi = TRANSMIT_STATUS_CODE;
+ tsv->svl = S_TRANSMIT_STATUS_CODE;
+
+ tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) || IBM_PASS_SOURCE_ADDR);
+
+ /* Stripped frame status of Transmitted Frame */
+ tsv->svv[1] = tx_fstatus & 0xff;
+
+ return (0);
+}
+
+static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
+ MAC_SUB_VECTOR *tsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ smctr_get_upstream_neighbor_addr(dev);
+
+ tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS;
+ tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS;
+
+ tsv->svv[0] = MSB(tp->misc_command_data[0]);
+ tsv->svv[1] = LSB(tp->misc_command_data[0]);
+
+ tsv->svv[2] = MSB(tp->misc_command_data[1]);
+ tsv->svv[3] = LSB(tp->misc_command_data[1]);
+
+ tsv->svv[4] = MSB(tp->misc_command_data[2]);
+ tsv->svv[5] = LSB(tp->misc_command_data[2]);
+
+ return (0);
+}
+
+static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
+{
+ tsv->svi = WRAP_DATA;
+ tsv->svl = S_WRAP_DATA;
+
+ return (0);
+}
+
+/*
+ * Open/initialize the board. This is called sometime after
+ * booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int smctr_open(struct net_device *dev)
+{
+ int err;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_open\n", dev->name);
+
+ err = smctr_init_adapter(dev);
+ if(err < 0)
+ return (err);
+
+ return (err);
+}
+
+/* Interrupt driven open of Token card. */
+static int smctr_open_tr(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_open_tr\n", dev->name);
+
+ /* Now we can actually open the adapter. */
+ if(tp->status == OPEN)
+ return (0);
+ if(tp->status != INITIALIZED)
+ return (-1);
+
+ /* FIXME: it would work a lot better if we masked the irq sources
+ on the card here, then we could skip the locking and poll nicely */
+ spin_lock_irqsave(&tp->lock, flags);
+
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE)))
+ goto out;
+
+ if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE)))
+ goto out;
+
+ if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE)))
+ goto out;
+
+ if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE)))
+ goto out;
+
+ tp->status = CLOSED;
+
+ /* Insert into the Ring or Enter Loopback Mode. */
+ if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1)
+ {
+ tp->status = CLOSED;
+
+ if(!(err = smctr_issue_trc_loopback_cmd(dev)))
+ {
+ if(!(err = smctr_wait_cmd(dev)))
+ tp->status = OPEN;
+ }
+
+ smctr_status_chg(dev);
+ }
+ else
+ {
+ if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2)
+ {
+ tp->status = CLOSED;
+ if(!(err = smctr_issue_tri_loopback_cmd(dev)))
+ {
+ if(!(err = smctr_wait_cmd(dev)))
+ tp->status = OPEN;
+ }
+
+ smctr_status_chg(dev);
+ }
+ else
+ {
+ if((tp->mode_bits & LOOPING_MODE_MASK)
+ == LOOPBACK_MODE_3)
+ {
+ tp->status = CLOSED;
+ if(!(err = smctr_lobe_media_test_cmd(dev)))
+ {
+ if(!(err = smctr_wait_cmd(dev)))
+ tp->status = OPEN;
+ }
+ smctr_status_chg(dev);
+ }
+ else
+ {
+ if(!(err = smctr_lobe_media_test(dev)))
+ err = smctr_issue_insert_cmd(dev);
+ else
+ {
+ if(err == LOBE_MEDIA_TEST_FAILED)
+ printk(KERN_WARNING "%s: Lobe Media Test Failure - Check cable?\n", dev->name);
+ }
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return (err);
+}
+
+/* Check for a network adapter of this type,
+ * and return device structure if one exists.
+ */
+struct net_device __init *smctr_probe(int unit)
+{
+ struct net_device *dev = alloc_trdev(sizeof(struct net_local));
+ static const unsigned ports[] = {
+ 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300,
+ 0x320, 0x340, 0x360, 0x380, 0
+ };
+ const unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ SET_MODULE_OWNER(dev);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "tr%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ if (dev->base_addr > 0x1ff) /* Check a single specified location. */
+ err = smctr_probe1(dev, dev->base_addr);
+ else if(dev->base_addr != 0) /* Don't probe at all. */
+ err =-ENXIO;
+ else {
+ for (port = ports; *port; port++) {
+ err = smctr_probe1(dev, *port);
+ if (!err)
+ break;
+ }
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+#ifdef CONFIG_MCA_LEGACY
+ { struct net_local *tp = netdev_priv(dev);
+ if (tp->slot_num)
+ mca_mark_as_unused(tp->slot_num);
+ }
+#endif
+ release_region(dev->base_addr, SMCTR_IO_EXTENT);
+ free_irq(dev->irq, dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+
+static int __init smctr_probe1(struct net_device *dev, int ioaddr)
+{
+ static unsigned version_printed;
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+ __u32 *ram;
+
+ if(smctr_debug && version_printed++ == 0)
+ printk(version);
+
+ spin_lock_init(&tp->lock);
+ dev->base_addr = ioaddr;
+
+ /* Actually detect an adapter now. */
+ err = smctr_chk_isa(dev);
+ if(err < 0)
+ {
+ if ((err = smctr_chk_mca(dev)) < 0) {
+ err = -ENODEV;
+ goto out;
+ }
+ }
+
+ tp = netdev_priv(dev);
+ dev->mem_start = tp->ram_base;
+ dev->mem_end = dev->mem_start + 0x10000;
+ ram = (__u32 *)phys_to_virt(dev->mem_start);
+ tp->ram_access = *(__u32 *)&ram;
+ tp->status = NOT_INITIALIZED;
+
+ err = smctr_load_firmware(dev);
+ if(err != UCODE_PRESENT && err != SUCCESS)
+ {
+ printk(KERN_ERR "%s: Firmware load failed (%d)\n", dev->name, err);
+ err = -EIO;
+ goto out;
+ }
+
+ /* Allow user to specify ring speed on module insert. */
+ if(ringspeed == 4)
+ tp->media_type = MEDIA_UTP_4;
+ else
+ tp->media_type = MEDIA_UTP_16;
+
+ printk(KERN_INFO "%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n",
+ dev->name, smctr_name, smctr_model,
+ (unsigned int)dev->base_addr,
+ dev->irq, tp->rom_base, tp->ram_base);
+
+ dev->open = smctr_open;
+ dev->stop = smctr_close;
+ dev->hard_start_xmit = smctr_send_packet;
+ dev->tx_timeout = smctr_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->get_stats = smctr_get_stats;
+ dev->set_multicast_list = &smctr_set_multicast_list;
+ return (0);
+
+out:
+ return err;
+}
+
+static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
+ struct net_device *dev, __u16 rx_status)
+{
+ struct net_local *tp = netdev_priv(dev);
+ struct sk_buff *skb;
+ __u16 rcode, correlator;
+ int err = 0;
+ __u8 xframe = 1;
+ __u16 tx_fstatus;
+
+ rmf->vl = SWAP_BYTES(rmf->vl);
+ if(rx_status & FCB_RX_STATUS_DA_MATCHED)
+ {
+ switch(rmf->vc)
+ {
+ /* Received MAC Frames Processed by RS. */
+ case INIT:
+ if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED)
+ {
+ return (rcode);
+ }
+
+ if((err = smctr_send_rsp(dev, rmf, rcode,
+ correlator)))
+ {
+ return (err);
+ }
+ break;
+
+ case CHG_PARM:
+ if((rcode = smctr_rcv_chg_param(dev, rmf,
+ &correlator)) ==HARDWARE_FAILED)
+ {
+ return (rcode);
+ }
+
+ if((err = smctr_send_rsp(dev, rmf, rcode,
+ correlator)))
+ {
+ return (err);
+ }
+ break;
+
+ case RQ_ADDR:
+ if((rcode = smctr_rcv_rq_addr_state_attch(dev,
+ rmf, &correlator)) != POSITIVE_ACK)
+ {
+ if(rcode == HARDWARE_FAILED)
+ return (rcode);
+ else
+ return (smctr_send_rsp(dev, rmf,
+ rcode, correlator));
+ }
+
+ if((err = smctr_send_rpt_addr(dev, rmf,
+ correlator)))
+ {
+ return (err);
+ }
+ break;
+
+ case RQ_ATTCH:
+ if((rcode = smctr_rcv_rq_addr_state_attch(dev,
+ rmf, &correlator)) != POSITIVE_ACK)
+ {
+ if(rcode == HARDWARE_FAILED)
+ return (rcode);
+ else
+ return (smctr_send_rsp(dev, rmf,
+ rcode,
+ correlator));
+ }
+
+ if((err = smctr_send_rpt_attch(dev, rmf,
+ correlator)))
+ {
+ return (err);
+ }
+ break;
+
+ case RQ_STATE:
+ if((rcode = smctr_rcv_rq_addr_state_attch(dev,
+ rmf, &correlator)) != POSITIVE_ACK)
+ {
+ if(rcode == HARDWARE_FAILED)
+ return (rcode);
+ else
+ return (smctr_send_rsp(dev, rmf,
+ rcode,
+ correlator));
+ }
+
+ if((err = smctr_send_rpt_state(dev, rmf,
+ correlator)))
+ {
+ return (err);
+ }
+ break;
+
+ case TX_FORWARD:
+ if((rcode = smctr_rcv_tx_forward(dev, rmf))
+ != POSITIVE_ACK)
+ {
+ if(rcode == HARDWARE_FAILED)
+ return (rcode);
+ else
+ return (smctr_send_rsp(dev, rmf,
+ rcode,
+ correlator));
+ }
+
+ if((err = smctr_send_tx_forward(dev, rmf,
+ &tx_fstatus)) == HARDWARE_FAILED)
+ {
+ return (err);
+ }
+
+ if(err == A_FRAME_WAS_FORWARDED)
+ {
+ if((err = smctr_send_rpt_tx_forward(dev,
+ rmf, tx_fstatus))
+ == HARDWARE_FAILED)
+ {
+ return (err);
+ }
+ }
+ break;
+
+ /* Received MAC Frames Processed by CRS/REM/RPS. */
+ case RSP:
+ case RQ_INIT:
+ case RPT_NEW_MON:
+ case RPT_SUA_CHG:
+ case RPT_ACTIVE_ERR:
+ case RPT_NN_INCMP:
+ case RPT_ERROR:
+ case RPT_ATTCH:
+ case RPT_STATE:
+ case RPT_ADDR:
+ break;
+
+ /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */
+ default:
+ xframe = 0;
+ if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES))
+ {
+ rcode = smctr_rcv_unknown(dev, rmf,
+ &correlator);
+ if((err = smctr_send_rsp(dev, rmf,rcode,
+ correlator)))
+ {
+ return (err);
+ }
+ }
+
+ break;
+ }
+ }
+ else
+ {
+ /* 1. DA doesn't match (Promiscuous Mode).
+ * 2. Parse for Extended MAC Frame Type.
+ */
+ switch(rmf->vc)
+ {
+ case RSP:
+ case INIT:
+ case RQ_INIT:
+ case RQ_ADDR:
+ case RQ_ATTCH:
+ case RQ_STATE:
+ case CHG_PARM:
+ case RPT_ADDR:
+ case RPT_ERROR:
+ case RPT_ATTCH:
+ case RPT_STATE:
+ case RPT_NEW_MON:
+ case RPT_SUA_CHG:
+ case RPT_NN_INCMP:
+ case RPT_ACTIVE_ERR:
+ break;
+
+ default:
+ xframe = 0;
+ break;
+ }
+ }
+
+ /* NOTE: UNKNOWN MAC frames will NOT be passed up unless
+ * ACCEPT_ATT_MAC_FRAMES is set.
+ */
+ if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
+ && (xframe == (__u8)0))
+ || ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES)
+ && (xframe == (__u8)1)))
+ {
+ rmf->vl = SWAP_BYTES(rmf->vl);
+
+ if (!(skb = dev_alloc_skb(size)))
+ return -ENOMEM;
+ skb->len = size;
+
+ /* Slide data into a sleek skb. */
+ skb_put(skb, skb->len);
+ memcpy(skb->data, rmf, skb->len);
+
+ /* Update Counters */
+ tp->MacStat.rx_packets++;
+ tp->MacStat.rx_bytes += skb->len;
+
+ /* Kick the packet on up. */
+ skb->dev = dev;
+ skb->protocol = tr_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ err = 0;
+ }
+
+ return (err);
+}
+
+/* Adapter RAM test. Incremental word ODD boundary data test. */
+static int smctr_ram_memory_test(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0,
+ word_read = 0, err_word = 0, err_pattern = 0;
+ unsigned int err_offset;
+ __u32 j, pword;
+ __u8 err = 0;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_ram_memory_test\n", dev->name);
+
+ start_pattern = 0x0001;
+ pages_of_ram = tp->ram_size / tp->ram_usable;
+ pword = tp->ram_access;
+
+ /* Incremental word ODD boundary test. */
+ for(page = 0; (page < pages_of_ram) && (~err);
+ page++, start_pattern += 0x8000)
+ {
+ smctr_set_page(dev, (__u8 *)(tp->ram_access
+ + (page * tp->ram_usable * 1024) + 1));
+ word_pattern = start_pattern;
+
+ for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2)
+ *(__u16 *)(pword + j) = word_pattern++;
+
+ word_pattern = start_pattern;
+
+ for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1
+ && (~err); j += 2, word_pattern++)
+ {
+ word_read = *(__u16 *)(pword + j);
+ if(word_read != word_pattern)
+ {
+ err = (__u8)1;
+ err_offset = j;
+ err_word = word_read;
+ err_pattern = word_pattern;
+ return (RAM_TEST_FAILED);
+ }
+ }
+ }
+
+ /* Zero out memory. */
+ for(page = 0; page < pages_of_ram && (~err); page++)
+ {
+ smctr_set_page(dev, (__u8 *)(tp->ram_access
+ + (page * tp->ram_usable * 1024)));
+ word_pattern = 0;
+
+ for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2)
+ *(__u16 *)(pword + j) = word_pattern;
+
+ for(j =0; j < (__u32)tp->ram_usable * 1024
+ && (~err); j += 2)
+ {
+ word_read = *(__u16 *)(pword + j);
+ if(word_read != word_pattern)
+ {
+ err = (__u8)1;
+ err_offset = j;
+ err_word = word_read;
+ err_pattern = word_pattern;
+ return (RAM_TEST_FAILED);
+ }
+ }
+ }
+
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+
+ return (0);
+}
+
+static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *correlator)
+{
+ MAC_SUB_VECTOR *rsv;
+ signed short vlen;
+ __u16 rcode = POSITIVE_ACK;
+ unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
+
+ /* This Frame can only come from a CRS */
+ if((rmf->dc_sc & SC_MASK) != SC_CRS)
+ return(E_INAPPROPRIATE_SOURCE_CLASS);
+
+ /* Remove MVID Length from total length. */
+ vlen = (signed short)rmf->vl - 4;
+
+ /* Point to First SVID */
+ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
+
+ /* Search for Appropriate SVID's. */
+ while((vlen > 0) && (rcode == POSITIVE_ACK))
+ {
+ switch(rsv->svi)
+ {
+ case CORRELATOR:
+ svectors |= F_CORRELATOR;
+ rcode = smctr_set_corr(dev, rsv, correlator);
+ break;
+
+ case LOCAL_RING_NUMBER:
+ svectors |= F_LOCAL_RING_NUMBER;
+ rcode = smctr_set_local_ring_num(dev, rsv);
+ break;
+
+ case ASSIGN_PHYSICAL_DROP:
+ svectors |= F_ASSIGN_PHYSICAL_DROP;
+ rcode = smctr_set_phy_drop(dev, rsv);
+ break;
+
+ case ERROR_TIMER_VALUE:
+ svectors |= F_ERROR_TIMER_VALUE;
+ rcode = smctr_set_error_timer_value(dev, rsv);
+ break;
+
+ case AUTHORIZED_FUNCTION_CLASS:
+ svectors |= F_AUTHORIZED_FUNCTION_CLASS;
+ rcode = smctr_set_auth_funct_class(dev, rsv);
+ break;
+
+ case AUTHORIZED_ACCESS_PRIORITY:
+ svectors |= F_AUTHORIZED_ACCESS_PRIORITY;
+ rcode = smctr_set_auth_access_pri(dev, rsv);
+ break;
+
+ default:
+ rcode = E_SUB_VECTOR_UNKNOWN;
+ break;
+ }
+
+ /* Let Sender Know if SUM of SV length's is
+ * larger then length in MVID length field
+ */
+ if((vlen -= rsv->svl) < 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+
+ rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
+ }
+
+ if(rcode == POSITIVE_ACK)
+ {
+ /* Let Sender Know if MVID length field
+ * is larger then SUM of SV length's
+ */
+ if(vlen != 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+ else
+ {
+ /* Let Sender Know if Expected SVID Missing */
+ if((svectors & R_CHG_PARM) ^ R_CHG_PARM)
+ rcode = E_MISSING_SUB_VECTOR;
+ }
+ }
+
+ return (rcode);
+}
+
+static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *correlator)
+{
+ MAC_SUB_VECTOR *rsv;
+ signed short vlen;
+ __u16 rcode = POSITIVE_ACK;
+ unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
+
+ /* This Frame can only come from a RPS */
+ if((rmf->dc_sc & SC_MASK) != SC_RPS)
+ return (E_INAPPROPRIATE_SOURCE_CLASS);
+
+ /* Remove MVID Length from total length. */
+ vlen = (signed short)rmf->vl - 4;
+
+ /* Point to First SVID */
+ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
+
+ /* Search for Appropriate SVID's */
+ while((vlen > 0) && (rcode == POSITIVE_ACK))
+ {
+ switch(rsv->svi)
+ {
+ case CORRELATOR:
+ svectors |= F_CORRELATOR;
+ rcode = smctr_set_corr(dev, rsv, correlator);
+ break;
+
+ case LOCAL_RING_NUMBER:
+ svectors |= F_LOCAL_RING_NUMBER;
+ rcode = smctr_set_local_ring_num(dev, rsv);
+ break;
+
+ case ASSIGN_PHYSICAL_DROP:
+ svectors |= F_ASSIGN_PHYSICAL_DROP;
+ rcode = smctr_set_phy_drop(dev, rsv);
+ break;
+
+ case ERROR_TIMER_VALUE:
+ svectors |= F_ERROR_TIMER_VALUE;
+ rcode = smctr_set_error_timer_value(dev, rsv);
+ break;
+
+ default:
+ rcode = E_SUB_VECTOR_UNKNOWN;
+ break;
+ }
+
+ /* Let Sender Know if SUM of SV length's is
+ * larger then length in MVID length field
+ */
+ if((vlen -= rsv->svl) < 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+
+ rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
+ }
+
+ if(rcode == POSITIVE_ACK)
+ {
+ /* Let Sender Know if MVID length field
+ * is larger then SUM of SV length's
+ */
+ if(vlen != 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+ else
+ {
+ /* Let Sender Know if Expected SV Missing */
+ if((svectors & R_INIT) ^ R_INIT)
+ rcode = E_MISSING_SUB_VECTOR;
+ }
+ }
+
+ return (rcode);
+}
+
+static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
+{
+ MAC_SUB_VECTOR *rsv;
+ signed short vlen;
+ __u16 rcode = POSITIVE_ACK;
+ unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
+
+ /* This Frame can only come from a CRS */
+ if((rmf->dc_sc & SC_MASK) != SC_CRS)
+ return (E_INAPPROPRIATE_SOURCE_CLASS);
+
+ /* Remove MVID Length from total length */
+ vlen = (signed short)rmf->vl - 4;
+
+ /* Point to First SVID */
+ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
+
+ /* Search for Appropriate SVID's */
+ while((vlen > 0) && (rcode == POSITIVE_ACK))
+ {
+ switch(rsv->svi)
+ {
+ case FRAME_FORWARD:
+ svectors |= F_FRAME_FORWARD;
+ rcode = smctr_set_frame_forward(dev, rsv,
+ rmf->dc_sc);
+ break;
+
+ default:
+ rcode = E_SUB_VECTOR_UNKNOWN;
+ break;
+ }
+
+ /* Let Sender Know if SUM of SV length's is
+ * larger then length in MVID length field
+ */
+ if((vlen -= rsv->svl) < 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+
+ rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
+ }
+
+ if(rcode == POSITIVE_ACK)
+ {
+ /* Let Sender Know if MVID length field
+ * is larger then SUM of SV length's
+ */
+ if(vlen != 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+ else
+ {
+ /* Let Sender Know if Expected SV Missing */
+ if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD)
+ rcode = E_MISSING_SUB_VECTOR;
+ }
+ }
+
+ return (rcode);
+}
+
+static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
+ MAC_HEADER *rmf, __u16 *correlator)
+{
+ MAC_SUB_VECTOR *rsv;
+ signed short vlen;
+ __u16 rcode = POSITIVE_ACK;
+ unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
+
+ /* Remove MVID Length from total length */
+ vlen = (signed short)rmf->vl - 4;
+
+ /* Point to First SVID */
+ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
+
+ /* Search for Appropriate SVID's */
+ while((vlen > 0) && (rcode == POSITIVE_ACK))
+ {
+ switch(rsv->svi)
+ {
+ case CORRELATOR:
+ svectors |= F_CORRELATOR;
+ rcode = smctr_set_corr(dev, rsv, correlator);
+ break;
+
+ default:
+ rcode = E_SUB_VECTOR_UNKNOWN;
+ break;
+ }
+
+ /* Let Sender Know if SUM of SV length's is
+ * larger then length in MVID length field
+ */
+ if((vlen -= rsv->svl) < 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+
+ rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
+ }
+
+ if(rcode == POSITIVE_ACK)
+ {
+ /* Let Sender Know if MVID length field
+ * is larger then SUM of SV length's
+ */
+ if(vlen != 0)
+ rcode = E_VECTOR_LENGTH_ERROR;
+ else
+ {
+ /* Let Sender Know if Expected SVID Missing */
+ if((svectors & R_RQ_ATTCH_STATE_ADDR)
+ ^ R_RQ_ATTCH_STATE_ADDR)
+ rcode = E_MISSING_SUB_VECTOR;
+ }
+ }
+
+ return (rcode);
+}
+
+static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *correlator)
+{
+ MAC_SUB_VECTOR *rsv;
+ signed short vlen;
+
+ *correlator = 0;
+
+ /* Remove MVID Length from total length */
+ vlen = (signed short)rmf->vl - 4;
+
+ /* Point to First SVID */
+ rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
+
+ /* Search for CORRELATOR for RSP to UNKNOWN */
+ while((vlen > 0) && (*correlator == 0))
+ {
+ switch(rsv->svi)
+ {
+ case CORRELATOR:
+ smctr_set_corr(dev, rsv, correlator);
+ break;
+
+ default:
+ break;
+ }
+
+ vlen -= rsv->svl;
+ rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
+ }
+
+ return (E_UNRECOGNIZED_VECTOR_ID);
+}
+
+/*
+ * Reset the 825 NIC and exit w:
+ * 1. The NIC reset cleared (non-reset state), halted and un-initialized.
+ * 2. TINT masked.
+ * 3. CBUSY masked.
+ * 4. TINT clear.
+ * 5. CBUSY clear.
+ */
+static int smctr_reset_adapter(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr);
+ mdelay(200); /* ~2 ms */
+
+ smctr_clear_trc_reset(ioaddr);
+ mdelay(200); /* ~2 ms */
+
+ /* Remove any latched interrupts that occurred prior to reseting the
+ * adapter or possibily caused by line glitches due to the reset.
+ */
+ outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
+
+ return (0);
+}
+
+static int smctr_restart_tx_chain(struct net_device *dev, short queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err = 0;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name);
+
+ if(tp->num_tx_fcbs_used[queue] != 0
+ && tp->tx_queue_status[queue] == NOT_TRANSMITING)
+ {
+ tp->tx_queue_status[queue] = TRANSMITING;
+ err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
+ }
+
+ return (err);
+}
+
+static int smctr_ring_status_chg(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_ring_status_chg\n", dev->name);
+
+ /* Check for ring_status_flag: whenever MONITOR_STATE_BIT
+ * Bit is set, check value of monitor_state, only then we
+ * enable and start transmit/receive timeout (if and only
+ * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE)
+ */
+ if(tp->ring_status_flags == MONITOR_STATE_CHANGED)
+ {
+ if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE)
+ || (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
+ {
+ tp->monitor_state_ready = 1;
+ }
+ else
+ {
+ /* if adapter is NOT in either active monitor
+ * or standby monitor state => Disable
+ * transmit/receive timeout.
+ */
+ tp->monitor_state_ready = 0;
+
+ /* Ring speed problem, switching to auto mode. */
+ if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE
+ && !tp->cleanup)
+ {
+ printk(KERN_INFO "%s: Incorrect ring speed switching.\n",
+ dev->name);
+ smctr_set_ring_speed(dev);
+ }
+ }
+ }
+
+ if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
+ return (0);
+
+ switch(tp->ring_status)
+ {
+ case RING_RECOVERY:
+ printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
+ tp->current_ring_status |= RING_RECOVERY;
+ break;
+
+ case SINGLE_STATION:
+ printk(KERN_INFO "%s: Single Statinon\n", dev->name);
+ tp->current_ring_status |= SINGLE_STATION;
+ break;
+
+ case COUNTER_OVERFLOW:
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
+ tp->current_ring_status |= COUNTER_OVERFLOW;
+ break;
+
+ case REMOVE_RECEIVED:
+ printk(KERN_INFO "%s: Remove Received\n", dev->name);
+ tp->current_ring_status |= REMOVE_RECEIVED;
+ break;
+
+ case AUTO_REMOVAL_ERROR:
+ printk(KERN_INFO "%s: Auto Remove Error\n", dev->name);
+ tp->current_ring_status |= AUTO_REMOVAL_ERROR;
+ break;
+
+ case LOBE_WIRE_FAULT:
+ printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name);
+ tp->current_ring_status |= LOBE_WIRE_FAULT;
+ break;
+
+ case TRANSMIT_BEACON:
+ printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
+ tp->current_ring_status |= TRANSMIT_BEACON;
+ break;
+
+ case SOFT_ERROR:
+ printk(KERN_INFO "%s: Soft Error\n", dev->name);
+ tp->current_ring_status |= SOFT_ERROR;
+ break;
+
+ case HARD_ERROR:
+ printk(KERN_INFO "%s: Hard Error\n", dev->name);
+ tp->current_ring_status |= HARD_ERROR;
+ break;
+
+ case SIGNAL_LOSS:
+ printk(KERN_INFO "%s: Signal Loss\n", dev->name);
+ tp->current_ring_status |= SIGNAL_LOSS;
+ break;
+
+ default:
+ printk(KERN_INFO "%s: Unknown ring status change\n",
+ dev->name);
+ break;
+ }
+
+ return (0);
+}
+
+static int smctr_rx_frame(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u16 queue, status, rx_size, err = 0;
+ __u8 *pbuff;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_rx_frame\n", dev->name);
+
+ queue = tp->receive_queue_number;
+
+ while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS)
+ {
+ err = HARDWARE_FAILED;
+
+ if(((status & 0x007f) == 0)
+ || ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
+ {
+ /* frame length less the CRC (4 bytes) + FS (1 byte) */
+ rx_size = tp->rx_fcb_curr[queue]->frame_length - 5;
+
+ pbuff = smctr_get_rx_pointer(dev, queue);
+
+ smctr_set_page(dev, pbuff);
+ smctr_disable_16bit(dev);
+
+ /* pbuff points to addr within one page */
+ pbuff = (__u8 *)PAGE_POINTER(pbuff);
+
+ if(queue == NON_MAC_QUEUE)
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(rx_size);
+ if (skb) {
+ skb_put(skb, rx_size);
+
+ memcpy(skb->data, pbuff, rx_size);
+
+ /* Update Counters */
+ tp->MacStat.rx_packets++;
+ tp->MacStat.rx_bytes += skb->len;
+
+ /* Kick the packet on up. */
+ skb->dev = dev;
+ skb->protocol = tr_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ } else {
+ }
+ }
+ else
+ smctr_process_rx_packet((MAC_HEADER *)pbuff,
+ rx_size, dev, status);
+ }
+
+ smctr_enable_16bit(dev);
+ smctr_set_page(dev, (__u8 *)tp->ram_access);
+ smctr_update_rx_chain(dev, queue);
+
+ if(err != SUCCESS)
+ break;
+ }
+
+ return (err);
+}
+
+static int smctr_send_dat(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int i, err;
+ MAC_HEADER *tmf;
+ FCBlock *fcb;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_send_dat\n", dev->name);
+
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
+ sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
+ {
+ return (OUT_OF_RESOURCES);
+ }
+
+ /* Initialize DAT Data Fields. */
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->ac = MSB(AC_FC_DAT);
+ tmf->fc = LSB(AC_FC_DAT);
+
+ for(i = 0; i < 6; i++)
+ {
+ tmf->sa[i] = dev->dev_addr[i];
+ tmf->da[i] = dev->dev_addr[i];
+
+ }
+
+ tmf->vc = DAT;
+ tmf->dc_sc = DC_RS | SC_RS;
+ tmf->vl = 4;
+ tmf->vl = SWAP_BYTES(tmf->vl);
+
+ /* Start Transmit. */
+ if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
+ return (err);
+
+ /* Wait for Transmit to Complete */
+ for(i = 0; i < 10000; i++)
+ {
+ if(fcb->frame_status & FCB_COMMAND_DONE)
+ break;
+ mdelay(1);
+ }
+
+ /* Check if GOOD frame Tx'ed. */
+ if(!(fcb->frame_status & FCB_COMMAND_DONE)
+ || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
+ {
+ return (INITIALIZE_FAILED);
+ }
+
+ /* De-allocated Tx FCB and Frame Buffer
+ * The FCB must be de-allocated manually if executing with
+ * interrupts disabled, other wise the ISR (LM_Service_Events)
+ * will de-allocate it when the interrupt occurs.
+ */
+ tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
+ smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
+
+ return (0);
+}
+
+static void smctr_timeout(struct net_device *dev)
+{
+ /*
+ * If we get here, some higher level has decided we are broken.
+ * There should really be a "kick me" function call instead.
+ *
+ * Resetting the token ring adapter takes a long time so just
+ * fake transmission time and go on trying. Our own timeout
+ * routine is in sktr_timer_chk()
+ */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * Gets skb from system, queues it and checks if it can be sent
+ */
+static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_send_packet\n", dev->name);
+
+ /*
+ * Block a transmit overlap
+ */
+
+ netif_stop_queue(dev);
+
+ if(tp->QueueSkb == 0)
+ return (1); /* Return with tbusy set: queue full */
+
+ tp->QueueSkb--;
+ skb_queue_tail(&tp->SendSkbQueue, skb);
+ smctr_hardware_send_packet(dev, tp);
+ if(tp->QueueSkb > 0)
+ netif_wake_queue(dev);
+
+ return (0);
+}
+
+static int smctr_send_lobe_media_test(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ MAC_SUB_VECTOR *tsv;
+ MAC_HEADER *tmf;
+ FCBlock *fcb;
+ __u32 i;
+ int err;
+
+ if(smctr_debug > 15)
+ printk(KERN_DEBUG "%s: smctr_send_lobe_media_test\n", dev->name);
+
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
+ + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
+ {
+ return (OUT_OF_RESOURCES);
+ }
+
+ /* Initialize DAT Data Fields. */
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST);
+ tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST);
+
+ for(i = 0; i < 6; i++)
+ {
+ tmf->da[i] = 0;
+ tmf->sa[i] = dev->dev_addr[i];
+ }
+
+ tmf->vc = LOBE_MEDIA_TEST;
+ tmf->dc_sc = DC_RS | SC_RS;
+ tmf->vl = 4;
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
+ smctr_make_wrap_data(dev, tsv);
+ tmf->vl += tsv->svl;
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_wrap_data(dev, tsv);
+ tmf->vl += tsv->svl;
+
+ /* Start Transmit. */
+ tmf->vl = SWAP_BYTES(tmf->vl);
+ if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
+ return (err);
+
+ /* Wait for Transmit to Complete. (10 ms). */
+ for(i=0; i < 10000; i++)
+ {
+ if(fcb->frame_status & FCB_COMMAND_DONE)
+ break;
+ mdelay(1);
+ }
+
+ /* Check if GOOD frame Tx'ed */
+ if(!(fcb->frame_status & FCB_COMMAND_DONE)
+ || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
+ {
+ return (LOBE_MEDIA_TEST_FAILED);
+ }
+
+ /* De-allocated Tx FCB and Frame Buffer
+ * The FCB must be de-allocated manually if executing with
+ * interrupts disabled, other wise the ISR (LM_Service_Events)
+ * will de-allocate it when the interrupt occurs.
+ */
+ tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
+ smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
+
+ return (0);
+}
+
+static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 correlator)
+{
+ MAC_HEADER *tmf;
+ MAC_SUB_VECTOR *tsv;
+ FCBlock *fcb;
+
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS
+ + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
+ == (FCBlock *)(-1L))
+ {
+ return (0);
+ }
+
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->vc = RPT_ADDR;
+ tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
+ tmf->vl = 4;
+
+ smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR);
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
+ smctr_make_corr(dev, tsv, correlator);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_phy_drop_num(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_upstream_neighbor_addr(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_addr_mod(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_group_addr(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_funct_addr(dev, tsv);
+
+ tmf->vl += tsv->svl;
+
+ /* Subtract out MVID and MVL which is
+ * include in both vl and MAC_HEADER
+ */
+/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+ fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+*/
+ tmf->vl = SWAP_BYTES(tmf->vl);
+
+ return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+}
+
+static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 correlator)
+{
+ MAC_HEADER *tmf;
+ MAC_SUB_VECTOR *tsv;
+ FCBlock *fcb;
+
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS
+ + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
+ == (FCBlock *)(-1L))
+ {
+ return (0);
+ }
+
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->vc = RPT_ATTCH;
+ tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
+ tmf->vl = 4;
+
+ smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH);
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
+ smctr_make_corr(dev, tsv, correlator);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_product_id(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_funct_addr(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_auth_funct_class(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_access_pri(dev, tsv);
+
+ tmf->vl += tsv->svl;
+
+ /* Subtract out MVID and MVL which is
+ * include in both vl and MAC_HEADER
+ */
+/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+ fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+*/
+ tmf->vl = SWAP_BYTES(tmf->vl);
+
+ return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+}
+
+static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 correlator)
+{
+ MAC_HEADER *tmf;
+ MAC_SUB_VECTOR *tsv;
+ FCBlock *fcb;
+
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER
+ + S_RING_STATION_STATUS + S_STATION_IDENTIFER))
+ == (FCBlock *)(-1L))
+ {
+ return (0);
+ }
+
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->vc = RPT_STATE;
+ tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
+ tmf->vl = 4;
+
+ smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE);
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
+ smctr_make_corr(dev, tsv, correlator);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_ring_station_version(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_ring_station_status(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_station_id(dev, tsv);
+
+ tmf->vl += tsv->svl;
+
+ /* Subtract out MVID and MVL which is
+ * include in both vl and MAC_HEADER
+ */
+/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+ fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+*/
+ tmf->vl = SWAP_BYTES(tmf->vl);
+
+ return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+}
+
+static int smctr_send_rpt_tx_forward(struct net_device *dev,
+ MAC_HEADER *rmf, __u16 tx_fstatus)
+{
+ MAC_HEADER *tmf;
+ MAC_SUB_VECTOR *tsv;
+ FCBlock *fcb;
+
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
+ {
+ return (0);
+ }
+
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->vc = RPT_TX_FORWARD;
+ tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
+ tmf->vl = 4;
+
+ smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD);
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
+ smctr_make_tx_status_code(dev, tsv, tx_fstatus);
+
+ tmf->vl += tsv->svl;
+
+ /* Subtract out MVID and MVL which is
+ * include in both vl and MAC_HEADER
+ */
+/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+ fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+*/
+ tmf->vl = SWAP_BYTES(tmf->vl);
+
+ return(smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
+}
+
+static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 rcode, __u16 correlator)
+{
+ MAC_HEADER *tmf;
+ MAC_SUB_VECTOR *tsv;
+ FCBlock *fcb;
+
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
+ {
+ return (0);
+ }
+
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->vc = RSP;
+ tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
+ tmf->vl = 4;
+
+ smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP);
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
+ smctr_make_corr(dev, tsv, correlator);
+
+ return (0);
+}
+
+static int smctr_send_rq_init(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ MAC_HEADER *tmf;
+ MAC_SUB_VECTOR *tsv;
+ FCBlock *fcb;
+ unsigned int i, count = 0;
+ __u16 fstatus;
+ int err;
+
+ do {
+ if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
+ + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS
+ + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
+ == (FCBlock *)(-1L)))
+ {
+ return (0);
+ }
+
+ tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
+ tmf->vc = RQ_INIT;
+ tmf->dc_sc = DC_RPS | SC_RS;
+ tmf->vl = 4;
+
+ smctr_make_8025_hdr(dev, NULL, tmf, AC_FC_RQ_INIT);
+
+ tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
+ smctr_make_product_id(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_upstream_neighbor_addr(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_ring_station_version(dev, tsv);
+
+ tmf->vl += tsv->svl;
+ tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
+ smctr_make_addr_mod(dev, tsv);
+
+ tmf->vl += tsv->svl;
+
+ /* Subtract out MVID and MVL which is
+ * include in both vl and MAC_HEADER
+ */
+/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+ fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
+*/
+ tmf->vl = SWAP_BYTES(tmf->vl);
+
+ if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
+ return (err);
+
+ /* Wait for Transmit to Complete */
+ for(i = 0; i < 10000; i++)
+ {
+ if(fcb->frame_status & FCB_COMMAND_DONE)
+ break;
+ mdelay(1);
+ }
+
+ /* Check if GOOD frame Tx'ed */
+ fstatus = fcb->frame_status;
+
+ if(!(fstatus & FCB_COMMAND_DONE))
+ return (HARDWARE_FAILED);
+
+ if(!(fstatus & FCB_TX_STATUS_E))
+ count++;
+
+ /* De-allocated Tx FCB and Frame Buffer
+ * The FCB must be de-allocated manually if executing with
+ * interrupts disabled, other wise the ISR (LM_Service_Events)
+ * will de-allocate it when the interrupt occurs.
+ */
+ tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
+ smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
+ } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
+
+ return (smctr_join_complete_state(dev));
+}
+
+static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
+ __u16 *tx_fstatus)
+{
+ struct net_local *tp = netdev_priv(dev);
+ FCBlock *fcb;
+ unsigned int i;
+ int err;
+
+ /* Check if this is the END POINT of the Transmit Forward Chain. */
+ if(rmf->vl <= 18)
+ return (0);
+
+ /* Allocate Transmit FCB only by requesting 0 bytes
+ * of data buffer.
+ */
+ if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
+ return (0);
+
+ /* Set pointer to Transmit Frame Buffer to the data
+ * portion of the received TX Forward frame, making
+ * sure to skip over the Vector Code (vc) and Vector
+ * length (vl).
+ */
+ fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf
+ + sizeof(MAC_HEADER) + 2);
+ fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf
+ + sizeof(MAC_HEADER) + 2);
+
+ fcb->frame_length = rmf->vl - 4 - 2;
+ fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
+
+ if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
+ return (err);
+
+ /* Wait for Transmit to Complete */
+ for(i = 0; i < 10000; i++)
+ {
+ if(fcb->frame_status & FCB_COMMAND_DONE)
+ break;
+ mdelay(1);
+ }
+
+ /* Check if GOOD frame Tx'ed */
+ if(!(fcb->frame_status & FCB_COMMAND_DONE))
+ {
+ if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
+ return (err);
+
+ for(i = 0; i < 10000; i++)
+ {
+ if(fcb->frame_status & FCB_COMMAND_DONE)
+ break;
+ mdelay(1);
+ }
+
+ if(!(fcb->frame_status & FCB_COMMAND_DONE))
+ return (HARDWARE_FAILED);
+ }
+
+ *tx_fstatus = fcb->frame_status;
+
+ return (A_FRAME_WAS_FORWARDED);
+}
+
+static int smctr_set_auth_access_pri(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
+ return (E_SUB_VECTOR_LENGTH_ERROR);
+
+ tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
+
+ return (POSITIVE_ACK);
+}
+
+static int smctr_set_auth_funct_class(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
+ return (E_SUB_VECTOR_LENGTH_ERROR);
+
+ tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
+
+ return (POSITIVE_ACK);
+}
+
+static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
+ __u16 *correlator)
+{
+ if(rsv->svl != S_CORRELATOR)
+ return (E_SUB_VECTOR_LENGTH_ERROR);
+
+ *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
+
+ return (POSITIVE_ACK);
+}
+
+static int smctr_set_error_timer_value(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv)
+{
+ __u16 err_tval;
+ int err;
+
+ if(rsv->svl != S_ERROR_TIMER_VALUE)
+ return (E_SUB_VECTOR_LENGTH_ERROR);
+
+ err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
+
+ smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
+
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ return (POSITIVE_ACK);
+}
+
+static int smctr_set_frame_forward(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv, __u8 dc_sc)
+{
+ if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
+ return (E_SUB_VECTOR_LENGTH_ERROR);
+
+ if((dc_sc & DC_MASK) != DC_CRS)
+ {
+ if(rsv->svl >= 2 && rsv->svl < 20)
+ return (E_TRANSMIT_FORWARD_INVALID);
+
+ if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
+ return (E_TRANSMIT_FORWARD_INVALID);
+ }
+
+ return (POSITIVE_ACK);
+}
+
+static int smctr_set_local_ring_num(struct net_device *dev,
+ MAC_SUB_VECTOR *rsv)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(rsv->svl != S_LOCAL_RING_NUMBER)
+ return (E_SUB_VECTOR_LENGTH_ERROR);
+
+ if(tp->ptr_local_ring_num)
+ *(__u16 *)(tp->ptr_local_ring_num)
+ = (rsv->svv[0] << 8 | rsv->svv[1]);
+
+ return (POSITIVE_ACK);
+}
+
+static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if(tp->bic_type == BIC_585_CHIP)
+ outb((tp->trc_mask | HWR_CA), ioaddr + HWR);
+ else
+ {
+ outb((tp->trc_mask | CSR_CA), ioaddr + CSR);
+ outb(tp->trc_mask, ioaddr + CSR);
+ }
+
+ return (0);
+}
+
+static void smctr_set_multicast_list(struct net_device *dev)
+{
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
+
+ return;
+}
+
+static int smctr_set_page(struct net_device *dev, __u8 *buf)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u8 amask;
+ __u32 tptr;
+
+ tptr = (__u32)buf - (__u32)tp->ram_access;
+ amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
+ outb(amask, dev->base_addr + PR);
+
+ return (0);
+}
+
+static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
+{
+ int err;
+
+ if(rsv->svl != S_PHYSICAL_DROP)
+ return (E_SUB_VECTOR_LENGTH_ERROR);
+
+ smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
+ if((err = smctr_wait_cmd(dev)))
+ return (err);
+
+ return (POSITIVE_ACK);
+}
+
+/* Reset the ring speed to the opposite of what it was. This auto-pilot
+ * mode requires a complete reset and re-init of the adapter.
+ */
+static int smctr_set_ring_speed(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ if(tp->media_type == MEDIA_UTP_16)
+ tp->media_type = MEDIA_UTP_4;
+ else
+ tp->media_type = MEDIA_UTP_16;
+
+ smctr_enable_16bit(dev);
+
+ /* Re-Initialize adapter's internal registers */
+ smctr_reset_adapter(dev);
+
+ if((err = smctr_init_card_real(dev)))
+ return (err);
+
+ smctr_enable_bic_int(dev);
+
+ if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
+ return (err);
+
+ smctr_disable_16bit(dev);
+
+ return (0);
+}
+
+static int smctr_set_rx_look_ahead(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u16 sword, rword;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_set_rx_look_ahead_flag\n", dev->name);
+
+ tp->adapter_flags &= ~(FORCED_16BIT_MODE);
+ tp->adapter_flags |= RX_VALID_LOOKAHEAD;
+
+ if(tp->adapter_bus == BUS_ISA16_TYPE)
+ {
+ sword = *((__u16 *)(tp->ram_access));
+ *((__u16 *)(tp->ram_access)) = 0x1234;
+
+ smctr_disable_16bit(dev);
+ rword = *((__u16 *)(tp->ram_access));
+ smctr_enable_16bit(dev);
+
+ if(rword != 0x1234)
+ tp->adapter_flags |= FORCED_16BIT_MODE;
+
+ *((__u16 *)(tp->ram_access)) = sword;
+ }
+
+ return (0);
+}
+
+static int smctr_set_trc_reset(int ioaddr)
+{
+ __u8 r;
+
+ r = inb(ioaddr + MSR);
+ outb(MSR_RST | r, ioaddr + MSR);
+
+ return (0);
+}
+
+/*
+ * This function can be called if the adapter is busy or not.
+ */
+static int smctr_setup_single_cmd(struct net_device *dev,
+ __u16 command, __u16 subcommand)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int err;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name);
+
+ if((err = smctr_wait_while_cbusy(dev)))
+ return (err);
+
+ if((err = (unsigned int)smctr_wait_cmd(dev)))
+ return (err);
+
+ tp->acb_head->cmd_done_status = 0;
+ tp->acb_head->cmd = command;
+ tp->acb_head->subcmd = subcommand;
+
+ err = smctr_issue_resume_acb_cmd(dev);
+
+ return (err);
+}
+
+/*
+ * This function can not be called with the adapter busy.
+ */
+static int smctr_setup_single_cmd_w_data(struct net_device *dev,
+ __u16 command, __u16 subcommand)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ tp->acb_head->cmd_done_status = ACB_COMMAND_NOT_DONE;
+ tp->acb_head->cmd = command;
+ tp->acb_head->subcmd = subcommand;
+ tp->acb_head->data_offset_lo
+ = (__u16)TRC_POINTER(tp->misc_command_data);
+
+ return(smctr_issue_resume_acb_cmd(dev));
+}
+
+static char *smctr_malloc(struct net_device *dev, __u16 size)
+{
+ struct net_local *tp = netdev_priv(dev);
+ char *m;
+
+ m = (char *)(tp->ram_access + tp->sh_mem_used);
+ tp->sh_mem_used += (__u32)size;
+
+ return (m);
+}
+
+static int smctr_status_chg(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_status_chg\n", dev->name);
+
+ switch(tp->status)
+ {
+ case OPEN:
+ break;
+
+ case CLOSED:
+ break;
+
+ /* Interrupt driven open() completion. XXX */
+ case INITIALIZED:
+ tp->group_address_0 = 0;
+ tp->group_address[0] = 0;
+ tp->group_address[1] = 0;
+ tp->functional_address_0 = 0;
+ tp->functional_address[0] = 0;
+ tp->functional_address[1] = 0;
+ smctr_open_tr(dev);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: status change unknown %x\n",
+ dev->name, tp->status);
+ break;
+ }
+
+ return (0);
+}
+
+static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
+ __u16 queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err = 0;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_trc_send_packet\n", dev->name);
+
+ fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS;
+ if(tp->num_tx_fcbs[queue] != 1)
+ fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS;
+
+ if(tp->tx_queue_status[queue] == NOT_TRANSMITING)
+ {
+ tp->tx_queue_status[queue] = TRANSMITING;
+ err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
+ }
+
+ return (err);
+}
+
+static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+ __u16 status, err = 0;
+ int cstatus;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_tx_complete\n", dev->name);
+
+ while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS)
+ {
+ if(status & 0x7e00 )
+ {
+ err = HARDWARE_FAILED;
+ break;
+ }
+
+ if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue],
+ queue)) != SUCCESS)
+ break;
+
+ smctr_disable_16bit(dev);
+
+ if(tp->mode_bits & UMAC)
+ {
+ if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2)))
+ cstatus = NO_SUCH_DESTINATION;
+ else
+ {
+ if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2)))
+ cstatus = DEST_OUT_OF_RESOURCES;
+ else
+ {
+ if(status & FCB_TX_STATUS_E)
+ cstatus = MAX_COLLISIONS;
+ else
+ cstatus = SUCCESS;
+ }
+ }
+ }
+ else
+ cstatus = SUCCESS;
+
+ if(queue == BUG_QUEUE)
+ err = SUCCESS;
+
+ smctr_enable_16bit(dev);
+ if(err != SUCCESS)
+ break;
+ }
+
+ return (err);
+}
+
+static unsigned short smctr_tx_move_frame(struct net_device *dev,
+ struct sk_buff *skb, __u8 *pbuff, unsigned int bytes)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int ram_usable;
+ __u32 flen, len, offset = 0;
+ __u8 *frag, *page;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_tx_move_frame\n", dev->name);
+
+ ram_usable = ((unsigned int)tp->ram_usable) << 10;
+ frag = skb->data;
+ flen = skb->len;
+
+ while(flen > 0 && bytes > 0)
+ {
+ smctr_set_page(dev, pbuff);
+
+ offset = SMC_PAGE_OFFSET(pbuff);
+
+ if(offset + flen > ram_usable)
+ len = ram_usable - offset;
+ else
+ len = flen;
+
+ if(len > bytes)
+ len = bytes;
+
+ page = (char *) (offset + tp->ram_access);
+ memcpy(page, frag, len);
+
+ flen -=len;
+ bytes -= len;
+ frag += len;
+ pbuff += len;
+ }
+
+ return (0);
+}
+
+/* Update the error statistic counters for this adapter. */
+static int smctr_update_err_stats(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ struct tr_statistics *tstat = &tp->MacStat;
+
+ if(tstat->internal_errors)
+ tstat->internal_errors
+ += *(tp->misc_command_data + 0) & 0x00ff;
+
+ if(tstat->line_errors)
+ tstat->line_errors += *(tp->misc_command_data + 0) >> 8;
+
+ if(tstat->A_C_errors)
+ tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff;
+
+ if(tstat->burst_errors)
+ tstat->burst_errors += *(tp->misc_command_data + 1) >> 8;
+
+ if(tstat->abort_delimiters)
+ tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8;
+
+ if(tstat->recv_congest_count)
+ tstat->recv_congest_count
+ += *(tp->misc_command_data + 3) & 0x00ff;
+
+ if(tstat->lost_frames)
+ tstat->lost_frames
+ += *(tp->misc_command_data + 3) >> 8;
+
+ if(tstat->frequency_errors)
+ tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff;
+
+ if(tstat->frame_copied_errors)
+ tstat->frame_copied_errors
+ += *(tp->misc_command_data + 4) >> 8;
+
+ if(tstat->token_errors)
+ tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
+
+ return (0);
+}
+
+static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+ FCBlock *fcb;
+ BDBlock *bdb;
+ __u16 size, len;
+
+ fcb = tp->rx_fcb_curr[queue];
+ len = fcb->frame_length;
+
+ fcb->frame_status = 0;
+ fcb->info = FCB_CHAIN_END;
+ fcb->back_ptr->info = FCB_WARNING;
+
+ tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr;
+
+ /* update RX BDBs */
+ size = (len >> RX_BDB_SIZE_SHIFT);
+ if(len & RX_DATA_BUFFER_SIZE_MASK)
+ size += sizeof(BDBlock);
+ size &= (~RX_BDB_SIZE_MASK);
+
+ /* check if wrap around */
+ bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size));
+ if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue])
+ {
+ bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue])
+ + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue]));
+ }
+
+ bdb->back_ptr->info = BDB_CHAIN_END;
+ tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
+ tp->rx_bdb_curr[queue] = bdb;
+
+ return (0);
+}
+
+static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
+ __u16 queue)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(smctr_debug > 20)
+ printk(KERN_DEBUG "smctr_update_tx_chain\n");
+
+ if(tp->num_tx_fcbs_used[queue] <= 0)
+ return (HARDWARE_FAILED);
+ else
+ {
+ if(tp->tx_buff_used[queue] < fcb->memory_alloc)
+ {
+ tp->tx_buff_used[queue] = 0;
+ return (HARDWARE_FAILED);
+ }
+
+ tp->tx_buff_used[queue] -= fcb->memory_alloc;
+
+ /* if all transmit buffer are cleared
+ * need to set the tx_buff_curr[] to tx_buff_head[]
+ * otherwise, tx buffer will be segregate and cannot
+ * accommodate and buffer greater than (curr - head) and
+ * (end - curr) since we do not allow wrap around allocation.
+ */
+ if(tp->tx_buff_used[queue] == 0)
+ tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
+
+ tp->num_tx_fcbs_used[queue]--;
+ fcb->frame_status = 0;
+ tp->tx_fcb_end[queue] = fcb->next_ptr;
+ netif_wake_queue(dev);
+ return (0);
+ }
+}
+
+static int smctr_wait_cmd(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int loop_count = 0x20000;
+
+ if(smctr_debug > 10)
+ printk(KERN_DEBUG "%s: smctr_wait_cmd\n", dev->name);
+
+ while(loop_count)
+ {
+ if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE)
+ break;
+ udelay(1);
+ loop_count--;
+ }
+
+ if(loop_count == 0)
+ return(HARDWARE_FAILED);
+
+ if(tp->acb_head->cmd_done_status & 0xff)
+ return(HARDWARE_FAILED);
+
+ return (0);
+}
+
+static int smctr_wait_while_cbusy(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int timeout = 0x20000;
+ int ioaddr = dev->base_addr;
+ __u8 r;
+
+ if(tp->bic_type == BIC_585_CHIP)
+ {
+ while(timeout)
+ {
+ r = inb(ioaddr + HWR);
+ if((r & HWR_CBUSY) == 0)
+ break;
+ timeout--;
+ }
+ }
+ else
+ {
+ while(timeout)
+ {
+ r = inb(ioaddr + CSR);
+ if((r & CSR_CBUSY) == 0)
+ break;
+ timeout--;
+ }
+ }
+
+ if(timeout)
+ return (0);
+ else
+ return (HARDWARE_FAILED);
+}
+
+#ifdef MODULE
+
+static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS];
+static int io[SMCTR_MAX_ADAPTERS];
+static int irq[SMCTR_MAX_ADAPTERS];
+
+MODULE_LICENSE("GPL");
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param(ringspeed, int, 0);
+
+static struct net_device *setup_card(int n)
+{
+ struct net_device *dev = alloc_trdev(sizeof(struct net_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->irq = irq[n];
+ err = smctr_probe1(dev, io[n]);
+ if (err)
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+ out1:
+#ifdef CONFIG_MCA_LEGACY
+ { struct net_local *tp = netdev_priv(dev);
+ if (tp->slot_num)
+ mca_mark_as_unused(tp->slot_num);
+ }
+#endif
+ release_region(dev->base_addr, SMCTR_IO_EXTENT);
+ free_irq(dev->irq, dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+
+int init_module(void)
+{
+ int i, found = 0;
+ struct net_device *dev;
+
+ for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
+ dev = io[0]? setup_card(i) : smctr_probe(-1);
+ if (!IS_ERR(dev)) {
+ ++found;
+ dev_smctr[i] = dev;
+ }
+ }
+
+ return found ? 0 : -ENODEV;
+}
+
+void cleanup_module(void)
+{
+ int i;
+
+ for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
+ struct net_device *dev = dev_smctr[i];
+
+ if (dev) {
+
+ unregister_netdev(dev);
+#ifdef CONFIG_MCA_LEGACY
+ { struct net_local *tp = netdev_priv(dev);
+ if (tp->slot_num)
+ mca_mark_as_unused(tp->slot_num);
+ }
+#endif
+ release_region(dev->base_addr, SMCTR_IO_EXTENT);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h
new file mode 100644
index 000000000000..b306c7e4c793
--- /dev/null
+++ b/drivers/net/tokenring/smctr.h
@@ -0,0 +1,1588 @@
+/* smctr.h: SMC Token Ring driver header for Linux
+ *
+ * Authors:
+ * - Jay Schulist <jschlst@samba.org>
+ */
+
+#ifndef __LINUX_SMCTR_H
+#define __LINUX_SMCTR_H
+
+#ifdef __KERNEL__
+
+#define MAX_TX_QUEUE 10
+
+#define SMC_HEADER_SIZE 14
+
+#define SMC_PAGE_OFFSET(X) (((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask)
+
+#define INIT 0x0D
+#define RQ_ATTCH 0x10
+#define RQ_STATE 0x0F
+#define RQ_ADDR 0x0E
+#define CHG_PARM 0x0C
+#define RSP 0x00
+#define TX_FORWARD 0x09
+
+#define AC_FC_DAT ((3<<13) | 1)
+#define DAT 0x07
+
+#define RPT_NEW_MON 0x25
+#define RPT_SUA_CHG 0x26
+#define RPT_ACTIVE_ERR 0x28
+#define RPT_NN_INCMP 0x27
+#define RPT_ERROR 0x29
+
+#define RQ_INIT 0x20
+#define RPT_ATTCH 0x24
+#define RPT_STATE 0x23
+#define RPT_ADDR 0x22
+
+#define POSITIVE_ACK 0x0001
+#define A_FRAME_WAS_FORWARDED 0x8888
+
+#define GROUP_ADDRESS 0x2B
+#define PHYSICAL_DROP 0x0B
+#define AUTHORIZED_ACCESS_PRIORITY 0x07
+#define AUTHORIZED_FUNCTION_CLASS 0x06
+#define FUNCTIONAL_ADDRESS 0x2C
+#define RING_STATION_STATUS 0x29
+#define TRANSMIT_STATUS_CODE 0x2A
+#define IBM_PASS_SOURCE_ADDR 0x01
+#define AC_FC_RPT_TX_FORWARD ((0<<13) | 0)
+#define AC_FC_RPT_STATE ((0<<13) | 0)
+#define AC_FC_RPT_ADDR ((0<<13) | 0)
+#define CORRELATOR 0x09
+
+#define POSITIVE_ACK 0x0001 /* */
+#define E_MAC_DATA_INCOMPLETE 0x8001 /* not used */
+#define E_VECTOR_LENGTH_ERROR 0x8002 /* */
+#define E_UNRECOGNIZED_VECTOR_ID 0x8003 /* */
+#define E_INAPPROPRIATE_SOURCE_CLASS 0x8004 /* */
+#define E_SUB_VECTOR_LENGTH_ERROR 0x8005 /* */
+#define E_TRANSMIT_FORWARD_INVALID 0x8006 /* def. by IBM */
+#define E_MISSING_SUB_VECTOR 0x8007 /* */
+#define E_SUB_VECTOR_UNKNOWN 0x8008 /* */
+#define E_MAC_HEADER_TOO_LONG 0x8009 /* */
+#define E_FUNCTION_DISABLED 0x800A /* not used */
+
+#define A_FRAME_WAS_FORWARDED 0x8888 /* used by send_TX_FORWARD */
+
+#define UPSTREAM_NEIGHBOR_ADDRESS 0x02
+#define LOCAL_RING_NUMBER 0x03
+#define ASSIGN_PHYSICAL_DROP 0x04
+#define ERROR_TIMER_VALUE 0x05
+#define AUTHORIZED_FUNCTION_CLASS 0x06
+#define AUTHORIZED_ACCESS_PRIORITY 0x07
+#define CORRELATOR 0x09
+#define PHYSICAL_DROP 0x0B
+#define RESPONSE_CODE 0x20
+#define ADDRESS_MODIFER 0x21
+#define PRODUCT_INSTANCE_ID 0x22
+#define RING_STATION_VERSION_NUMBER 0x23
+#define WRAP_DATA 0x26
+#define FRAME_FORWARD 0x27
+#define STATION_IDENTIFER 0x28
+#define RING_STATION_STATUS 0x29
+#define TRANSMIT_STATUS_CODE 0x2A
+#define GROUP_ADDRESS 0x2B
+#define FUNCTIONAL_ADDRESS 0x2C
+
+#define F_NO_SUB_VECTORS_FOUND 0x0000
+#define F_UPSTREAM_NEIGHBOR_ADDRESS 0x0001
+#define F_LOCAL_RING_NUMBER 0x0002
+#define F_ASSIGN_PHYSICAL_DROP 0x0004
+#define F_ERROR_TIMER_VALUE 0x0008
+#define F_AUTHORIZED_FUNCTION_CLASS 0x0010
+#define F_AUTHORIZED_ACCESS_PRIORITY 0x0020
+#define F_CORRELATOR 0x0040
+#define F_PHYSICAL_DROP 0x0080
+#define F_RESPONSE_CODE 0x0100
+#define F_PRODUCT_INSTANCE_ID 0x0200
+#define F_RING_STATION_VERSION_NUMBER 0x0400
+#define F_STATION_IDENTIFER 0x0800
+#define F_RING_STATION_STATUS 0x1000
+#define F_GROUP_ADDRESS 0x2000
+#define F_FUNCTIONAL_ADDRESS 0x4000
+#define F_FRAME_FORWARD 0x8000
+
+#define R_INIT 0x00
+#define R_RQ_ATTCH_STATE_ADDR 0x00
+#define R_CHG_PARM 0x00
+#define R_TX_FORWARD F_FRAME_FORWARD
+
+
+#define UPSTREAM_NEIGHBOR_ADDRESS 0x02
+#define ADDRESS_MODIFER 0x21
+#define RING_STATION_VERSION_NUMBER 0x23
+#define PRODUCT_INSTANCE_ID 0x22
+
+#define RPT_TX_FORWARD 0x2A
+
+#define AC_FC_INIT (3<<13) | 0 /* */
+#define AC_FC_RQ_INIT ((3<<13) | 0) /* */
+#define AC_FC_RQ_ATTCH (3<<13) | 0 /* DC = SC of rx frame */
+#define AC_FC_RQ_STATE (3<<13) | 0 /* DC = SC of rx frame */
+#define AC_FC_RQ_ADDR (3<<13) | 0 /* DC = SC of rx frame */
+#define AC_FC_CHG_PARM (3<<13) | 0 /* */
+#define AC_FC_RSP (0<<13) | 0 /* DC = SC of rx frame */
+#define AC_FC_RPT_ATTCH (0<<13) | 0
+
+#define S_UPSTREAM_NEIGHBOR_ADDRESS 6 + 2
+#define S_LOCAL_RING_NUMBER 2 + 2
+#define S_ASSIGN_PHYSICAL_DROP 4 + 2
+#define S_ERROR_TIMER_VALUE 2 + 2
+#define S_AUTHORIZED_FUNCTION_CLASS 2 + 2
+#define S_AUTHORIZED_ACCESS_PRIORITY 2 + 2
+#define S_CORRELATOR 2 + 2
+#define S_PHYSICAL_DROP 4 + 2
+#define S_RESPONSE_CODE 4 + 2
+#define S_ADDRESS_MODIFER 2 + 2
+#define S_PRODUCT_INSTANCE_ID 18 + 2
+#define S_RING_STATION_VERSION_NUMBER 10 + 2
+#define S_STATION_IDENTIFER 6 + 2
+#define S_RING_STATION_STATUS 6 + 2
+#define S_GROUP_ADDRESS 4 + 2
+#define S_FUNCTIONAL_ADDRESS 4 + 2
+#define S_FRAME_FORWARD 252 + 2
+#define S_TRANSMIT_STATUS_CODE 2 + 2
+
+#define ISB_IMC_RES0 0x0000 /* */
+#define ISB_IMC_MAC_TYPE_3 0x0001 /* MAC_ARC_INDICATE */
+#define ISB_IMC_MAC_ERROR_COUNTERS 0x0002 /* */
+#define ISB_IMC_RES1 0x0003 /* */
+#define ISB_IMC_MAC_TYPE_2 0x0004 /* QUE_MAC_INDICATE */
+#define ISB_IMC_TX_FRAME 0x0005 /* */
+#define ISB_IMC_END_OF_TX_QUEUE 0x0006 /* */
+#define ISB_IMC_NON_MAC_RX_RESOURCE 0x0007 /* */
+#define ISB_IMC_MAC_RX_RESOURCE 0x0008 /* */
+#define ISB_IMC_NON_MAC_RX_FRAME 0x0009 /* */
+#define ISB_IMC_MAC_RX_FRAME 0x000A /* */
+#define ISB_IMC_TRC_FIFO_STATUS 0x000B /* */
+#define ISB_IMC_COMMAND_STATUS 0x000C /* */
+#define ISB_IMC_MAC_TYPE_1 0x000D /* Self Removed */
+#define ISB_IMC_TRC_INTRNL_TST_STATUS 0x000E /* */
+#define ISB_IMC_RES2 0x000F /* */
+
+#define NON_MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */
+#define NON_MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */
+#define NON_MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */
+#define NON_MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */
+#define RAW_NON_MAC_RX_RESOURCE_BW 0x1000 /* */
+#define RAW_NON_MAC_RX_RESOURCE_FW 0x2000 /* */
+#define RAW_NON_MAC_RX_RESOURCE_BE 0x4000 /* */
+#define RAW_NON_MAC_RX_RESOURCE_FE 0x8000 /* */
+
+#define MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */
+#define MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */
+#define MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */
+#define MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */
+#define RAW_MAC_RX_RESOURCE_BW 0x1000 /* */
+#define RAW_MAC_RX_RESOURCE_FW 0x2000 /* */
+#define RAW_MAC_RX_RESOURCE_BE 0x4000 /* */
+#define RAW_MAC_RX_RESOURCE_FE 0x8000 /* */
+
+#define TRC_FIFO_STATUS_TX_UNDERRUN 0x40 /* shifted right 8 bits */
+#define TRC_FIFO_STATUS_RX_OVERRUN 0x80 /* shifted right 8 bits */
+#define RAW_TRC_FIFO_STATUS_TX_UNDERRUN 0x4000 /* */
+#define RAW_TRC_FIFO_STATUS_RX_OVERRUN 0x8000 /* */
+
+#define CSR_CLRTINT 0x08
+
+#define MSB(X) ((__u8)((__u16) X >> 8))
+#define LSB(X) ((__u8)((__u16) X & 0xff))
+
+#define AC_FC_LOBE_MEDIA_TEST ((3<<13) | 0)
+#define S_WRAP_DATA 248 + 2 /* 500 + 2 */
+#define WRAP_DATA 0x26
+#define LOBE_MEDIA_TEST 0x08
+
+/* Destination Class (dc) */
+
+#define DC_MASK 0xF0
+#define DC_RS 0x00
+#define DC_CRS 0x40
+#define DC_RPS 0x50
+#define DC_REM 0x60
+
+/* Source Classes (sc) */
+
+#define SC_MASK 0x0F
+#define SC_RS 0x00
+#define SC_CRS 0x04
+#define SC_RPS 0x05
+#define SC_REM 0x06
+
+#define PR 0x11
+#define PR_PAGE_MASK 0x0C000
+
+#define MICROCHANNEL 0x0008
+#define INTERFACE_CHIP 0x0010
+#define BOARD_16BIT 0x0040
+#define PAGED_RAM 0x0080
+#define WD8115TA (TOKEN_MEDIA | MICROCHANNEL | INTERFACE_CHIP | PAGED_RAM)
+#define WD8115T (TOKEN_MEDIA | INTERFACE_CHIP | BOARD_16BIT | PAGED_RAM)
+
+#define BRD_ID_8316 0x50
+
+#define r587_SER 0x001
+#define SER_DIN 0x80
+#define SER_DOUT 0x40
+#define SER_CLK 0x20
+#define SER_ECS 0x10
+#define SER_E806 0x08
+#define SER_PNP 0x04
+#define SER_BIO 0x02
+#define SER_16B 0x01
+
+#define r587_IDR 0x004
+#define IDR_IRQ_MASK 0x0F0
+#define IDR_DCS_MASK 0x007
+#define IDR_RWS 0x008
+
+
+#define r587_BIO 0x003
+#define BIO_ENB 0x080
+#define BIO_MASK 0x03F
+
+#define r587_PCR 0x005
+#define PCR_RAMS 0x040
+
+
+
+#define NUM_ADDR_BITS 8
+
+#define ISA_MAX_ADDRESS 0x00ffffff
+
+#define SMCTR_MAX_ADAPTERS 7
+
+#define MC_TABLE_ENTRIES 16
+
+#define MAXFRAGMENTS 32
+
+#define CHIP_REV_MASK 0x3000
+
+#define MAX_TX_QS 8
+#define NUM_TX_QS_USED 3
+
+#define MAX_RX_QS 2
+#define NUM_RX_QS_USED 2
+
+#define INTEL_DATA_FORMAT 0x4000
+#define INTEL_ADDRESS_POINTER_FORMAT 0x8000
+#define PAGE_POINTER(X) ((((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) + tp->ram_access)
+#define SWAP_WORDS(X) (((X & 0xFFFF) << 16) | (X >> 16))
+
+#define INTERFACE_CHIP 0x0010 /* Soft Config Adapter */
+#define ADVANCED_FEATURES 0x0020 /* Adv. netw. interface features */
+#define BOARD_16BIT 0x0040 /* 16 bit capability */
+#define PAGED_RAM 0x0080 /* Adapter has paged RAM */
+
+#define PAGED_ROM 0x0100 /* Adapter has paged ROM */
+
+#define RAM_SIZE_UNKNOWN 0x0000 /* Unknown RAM size */
+#define RAM_SIZE_0K 0x0001 /* 0K RAM */
+#define RAM_SIZE_8K 0x0002 /* 8k RAM */
+#define RAM_SIZE_16K 0x0003 /* 16k RAM */
+#define RAM_SIZE_32K 0x0004 /* 32k RAM */
+#define RAM_SIZE_64K 0x0005 /* 64k RAM */
+#define RAM_SIZE_RESERVED_6 0x0006 /* Reserved RAM size */
+#define RAM_SIZE_RESERVED_7 0x0007 /* Reserved RAM size */
+#define RAM_SIZE_MASK 0x0007 /* Isolates RAM Size */
+
+#define TOKEN_MEDIA 0x0005
+
+#define BID_REG_0 0x00
+#define BID_REG_1 0x01
+#define BID_REG_2 0x02
+#define BID_REG_3 0x03
+#define BID_REG_4 0x04
+#define BID_REG_5 0x05
+#define BID_REG_6 0x06
+#define BID_REG_7 0x07
+#define BID_LAR_0 0x08
+#define BID_LAR_1 0x09
+#define BID_LAR_2 0x0A
+#define BID_LAR_3 0x0B
+#define BID_LAR_4 0x0C
+#define BID_LAR_5 0x0D
+
+#define BID_BOARD_ID_BYTE 0x0E
+#define BID_CHCKSM_BYTE 0x0F
+#define BID_LAR_OFFSET 0x08
+
+#define BID_MSZ_583_BIT 0x08
+#define BID_SIXTEEN_BIT_BIT 0x01
+
+#define BID_BOARD_REV_MASK 0x1E
+
+#define BID_MEDIA_TYPE_BIT 0x01
+#define BID_SOFT_CONFIG_BIT 0x20
+#define BID_RAM_SIZE_BIT 0x40
+#define BID_BUS_TYPE_BIT 0x80
+
+#define BID_CR 0x10
+
+#define BID_TXP 0x04 /* Transmit Packet Command */
+
+#define BID_TCR_DIFF 0x0D /* Transmit Configuration Register */
+
+#define BID_TCR_VAL 0x18 /* Value to Test 8390 or 690 */
+#define BID_PS0 0x00 /* Register Page Select 0 */
+#define BID_PS1 0x40 /* Register Page Select 1 */
+#define BID_PS2 0x80 /* Register Page Select 2 */
+#define BID_PS_MASK 0x3F /* For Masking Off Page Select Bits */
+
+#define BID_EEPROM_0 0x08
+#define BID_EEPROM_1 0x09
+#define BID_EEPROM_2 0x0A
+#define BID_EEPROM_3 0x0B
+#define BID_EEPROM_4 0x0C
+#define BID_EEPROM_5 0x0D
+#define BID_EEPROM_6 0x0E
+#define BID_EEPROM_7 0x0F
+
+#define BID_OTHER_BIT 0x02
+#define BID_ICR_MASK 0x0C
+#define BID_EAR_MASK 0x0F
+#define BID_ENGR_PAGE 0x0A0
+#define BID_RLA 0x10
+#define BID_EA6 0x80
+#define BID_RECALL_DONE_MASK 0x10
+#define BID_BID_EEPROM_OVERRIDE 0xFFB0
+#define BID_EXTRA_EEPROM_OVERRIDE 0xFFD0
+#define BID_EEPROM_MEDIA_MASK 0x07
+#define BID_STARLAN_TYPE 0x00
+#define BID_ETHERNET_TYPE 0x01
+#define BID_TP_TYPE 0x02
+#define BID_EW_TYPE 0x03
+#define BID_TOKEN_RING_TYPE 0x04
+#define BID_UTP2_TYPE 0x05
+#define BID_EEPROM_IRQ_MASK 0x18
+#define BID_PRIMARY_IRQ 0x00
+#define BID_ALTERNATE_IRQ_1 0x08
+#define BID_ALTERNATE_IRQ_2 0x10
+#define BID_ALTERNATE_IRQ_3 0x18
+#define BID_EEPROM_RAM_SIZE_MASK 0xE0
+#define BID_EEPROM_RAM_SIZE_RES1 0x00
+#define BID_EEPROM_RAM_SIZE_RES2 0x20
+#define BID_EEPROM_RAM_SIZE_8K 0x40
+#define BID_EEPROM_RAM_SIZE_16K 0x60
+#define BID_EEPROM_RAM_SIZE_32K 0x80
+#define BID_EEPROM_RAM_SIZE_64K 0xA0
+#define BID_EEPROM_RAM_SIZE_RES3 0xC0
+#define BID_EEPROM_RAM_SIZE_RES4 0xE0
+#define BID_EEPROM_BUS_TYPE_MASK 0x07
+#define BID_EEPROM_BUS_TYPE_AT 0x00
+#define BID_EEPROM_BUS_TYPE_MCA 0x01
+#define BID_EEPROM_BUS_TYPE_EISA 0x02
+#define BID_EEPROM_BUS_TYPE_NEC 0x03
+#define BID_EEPROM_BUS_SIZE_MASK 0x18
+#define BID_EEPROM_BUS_SIZE_8BIT 0x00
+#define BID_EEPROM_BUS_SIZE_16BIT 0x08
+#define BID_EEPROM_BUS_SIZE_32BIT 0x10
+#define BID_EEPROM_BUS_SIZE_64BIT 0x18
+#define BID_EEPROM_BUS_MASTER 0x20
+#define BID_EEPROM_RAM_PAGING 0x40
+#define BID_EEPROM_ROM_PAGING 0x80
+#define BID_EEPROM_PAGING_MASK 0xC0
+#define BID_EEPROM_LOW_COST 0x08
+#define BID_EEPROM_IO_MAPPED 0x10
+#define BID_EEPROM_HMI 0x01
+#define BID_EEPROM_AUTO_MEDIA_DETECT 0x01
+#define BID_EEPROM_CHIP_REV_MASK 0x0C
+
+#define BID_EEPROM_LAN_ADDR 0x30
+
+#define BID_EEPROM_MEDIA_OPTION 0x54
+#define BID_EEPROM_MEDIA_UTP 0x01
+#define BID_EEPROM_4MB_RING 0x08
+#define BID_EEPROM_16MB_RING 0x10
+#define BID_EEPROM_MEDIA_STP 0x40
+
+#define BID_EEPROM_MISC_DATA 0x56
+#define BID_EEPROM_EARLY_TOKEN_RELEASE 0x02
+
+#define CNFG_ID_8003E 0x6fc0
+#define CNFG_ID_8003S 0x6fc1
+#define CNFG_ID_8003W 0x6fc2
+#define CNFG_ID_8115TRA 0x6ec6
+#define CNFG_ID_8013E 0x61C8
+#define CNFG_ID_8013W 0x61C9
+#define CNFG_ID_BISTRO03E 0xEFE5
+#define CNFG_ID_BISTRO13E 0xEFD5
+#define CNFG_ID_BISTRO13W 0xEFD4
+#define CNFG_MSR_583 0x0
+#define CNFG_ICR_583 0x1
+#define CNFG_IAR_583 0x2
+#define CNFG_BIO_583 0x3
+#define CNFG_EAR_583 0x3
+#define CNFG_IRR_583 0x4
+#define CNFG_LAAR_584 0x5
+#define CNFG_GP2 0x7
+#define CNFG_LAAR_MASK 0x1F
+#define CNFG_LAAR_ZWS 0x20
+#define CNFG_LAAR_L16E 0x40
+#define CNFG_ICR_IR2_584 0x04
+#define CNFG_ICR_MASK 0x08
+#define CNFG_ICR_MSZ 0x08
+#define CNFG_ICR_RLA 0x10
+#define CNFG_ICR_STO 0x80
+#define CNFG_IRR_IRQS 0x60
+#define CNFG_IRR_IEN 0x80
+#define CNFG_IRR_ZWS 0x01
+#define CNFG_GP2_BOOT_NIBBLE 0x0F
+#define CNFG_IRR_OUT2 0x04
+#define CNFG_IRR_OUT1 0x02
+
+#define CNFG_SIZE_8KB 8
+#define CNFG_SIZE_16KB 16
+#define CNFG_SIZE_32KB 32
+#define CNFG_SIZE_64KB 64
+#define CNFG_SIZE_128KB 128
+#define CNFG_SIZE_256KB 256
+#define ROM_DISABLE 0x0
+
+#define CNFG_SLOT_ENABLE_BIT 0x08
+
+#define CNFG_POS_CONTROL_REG 0x096
+#define CNFG_POS_REG0 0x100
+#define CNFG_POS_REG1 0x101
+#define CNFG_POS_REG2 0x102
+#define CNFG_POS_REG3 0x103
+#define CNFG_POS_REG4 0x104
+#define CNFG_POS_REG5 0x105
+
+#define CNFG_ADAPTER_TYPE_MASK 0x0e
+
+#define SLOT_16BIT 0x0008
+#define INTERFACE_5X3_CHIP 0x0000 /* 0000 = 583 or 593 chips */
+#define NIC_690_BIT 0x0010 /* NIC is 690 */
+#define ALTERNATE_IRQ_BIT 0x0020 /* Alternate IRQ is used */
+#define INTERFACE_584_CHIP 0x0040 /* 0001 = 584 chip */
+#define INTERFACE_594_CHIP 0x0080 /* 0010 = 594 chip */
+#define INTERFACE_585_CHIP 0x0100 /* 0100 = 585/790 chip */
+#define INTERFACE_CHIP_MASK 0x03C0 /* Isolates Intfc Chip Type */
+
+#define BOARD_16BIT 0x0040
+#define NODE_ADDR_CKSUM 0xEE
+#define BRD_ID_8115T 0x04
+
+#define NIC_825_BIT 0x0400 /* TRC 83C825 NIC */
+#define NIC_790_BIT 0x0800 /* NIC is 83C790 Ethernet */
+
+#define CHIP_REV_MASK 0x3000
+
+#define HWR_CBUSY 0x02
+#define HWR_CA 0x01
+
+#define MAC_QUEUE 0
+#define NON_MAC_QUEUE 1
+#define BUG_QUEUE 2 /* NO RECEIVE QUEUE, ONLY TX */
+
+#define NUM_MAC_TX_FCBS 8
+#define NUM_MAC_TX_BDBS NUM_MAC_TX_FCBS
+#define NUM_MAC_RX_FCBS 7
+#define NUM_MAC_RX_BDBS 8
+
+#define NUM_NON_MAC_TX_FCBS 6
+#define NUM_NON_MAC_TX_BDBS NUM_NON_MAC_TX_FCBS
+
+#define NUM_NON_MAC_RX_BDBS 0 /* CALCULATED DYNAMICALLY */
+
+#define NUM_BUG_TX_FCBS 8
+#define NUM_BUG_TX_BDBS NUM_BUG_TX_FCBS
+
+#define MAC_TX_BUFFER_MEMORY 1024
+#define NON_MAC_TX_BUFFER_MEMORY (20 * 1024)
+#define BUG_TX_BUFFER_MEMORY (NUM_BUG_TX_FCBS * 32)
+
+#define RX_BUFFER_MEMORY 0 /* CALCULATED DYNAMICALLY */
+#define RX_DATA_BUFFER_SIZE 256
+#define RX_BDB_SIZE_SHIFT 3 /* log2(RX_DATA_BUFFER_SIZE)-log2(sizeof(BDBlock)) */
+#define RX_BDB_SIZE_MASK (sizeof(BDBlock) - 1)
+#define RX_DATA_BUFFER_SIZE_MASK (RX_DATA_BUFFER_SIZE-1)
+
+#define NUM_OF_INTERRUPTS 0x20
+
+#define NOT_TRANSMITING 0
+#define TRANSMITING 1
+
+#define TRC_INTERRUPT_ENABLE_MASK 0x7FF6
+
+#define UCODE_VERSION 0x58
+
+#define UCODE_SIZE_OFFSET 0x0000 /* WORD */
+#define UCODE_CHECKSUM_OFFSET 0x0002 /* WORD */
+#define UCODE_VERSION_OFFSET 0x0004 /* BYTE */
+
+#define CS_RAM_SIZE 0X2000
+#define CS_RAM_CHECKSUM_OFFSET 0x1FFE /* WORD 1FFE(MSB)-1FFF(LSB)*/
+#define CS_RAM_VERSION_OFFSET 0x1FFC /* WORD 1FFC(MSB)-1FFD(LSB)*/
+
+#define MISC_DATA_SIZE 128
+#define NUM_OF_ACBS 1
+
+#define ACB_COMMAND_NOT_DONE 0x0000 /* Init, command not done */
+#define ACB_COMMAND_DONE 0x8000 /* TRC says command done */
+#define ACB_COMMAND_STATUS_MASK 0x00FF /* low byte is status */
+#define ACB_COMMAND_SUCCESSFUL 0x0000 /* means cmd was successful */
+#define ACB_NOT_CHAIN_END 0x0000 /* tell TRC more CBs in chain */
+#define ACB_CHAIN_END 0x8000 /* tell TRC last CB in chain */
+#define ACB_COMMAND_NO_INTERRUPT 0x0000 /* tell TRC no INT after CB */
+#define ACB_COMMAND_INTERRUPT 0x2000 /* tell TRC to INT after CB */
+#define ACB_SUB_CMD_NOP 0x0000
+#define ACB_CMD_HIC_NOP 0x0080
+#define ACB_CMD_MCT_NOP 0x0000
+#define ACB_CMD_MCT_TEST 0x0001
+#define ACB_CMD_HIC_TEST 0x0081
+#define ACB_CMD_INSERT 0x0002
+#define ACB_CMD_REMOVE 0x0003
+#define ACB_CMD_MCT_WRITE_VALUE 0x0004
+#define ACB_CMD_HIC_WRITE_VALUE 0x0084
+#define ACB_CMD_MCT_READ_VALUE 0x0005
+#define ACB_CMD_HIC_READ_VALUE 0x0085
+#define ACB_CMD_INIT_TX_RX 0x0086
+#define ACB_CMD_INIT_TRC_TIMERS 0x0006
+#define ACB_CMD_READ_TRC_STATUS 0x0007
+#define ACB_CMD_CHANGE_JOIN_STATE 0x0008
+#define ACB_CMD_RESERVED_9 0x0009
+#define ACB_CMD_RESERVED_A 0x000A
+#define ACB_CMD_RESERVED_B 0x000B
+#define ACB_CMD_RESERVED_C 0x000C
+#define ACB_CMD_RESERVED_D 0x000D
+#define ACB_CMD_RESERVED_E 0x000E
+#define ACB_CMD_RESERVED_F 0x000F
+
+#define TRC_MAC_REGISTERS_TEST 0x0000
+#define TRC_INTERNAL_LOOPBACK 0x0001
+#define TRC_TRI_LOOPBACK 0x0002
+#define TRC_INTERNAL_ROM_TEST 0x0003
+#define TRC_LOBE_MEDIA_TEST 0x0004
+#define TRC_ANALOG_TEST 0x0005
+#define TRC_HOST_INTERFACE_REG_TEST 0x0003
+
+#define TEST_DMA_1 0x0000
+#define TEST_DMA_2 0x0001
+#define TEST_MCT_ROM 0x0002
+#define HIC_INTERNAL_DIAG 0x0003
+
+#define ABORT_TRANSMIT_PRIORITY_0 0x0001
+#define ABORT_TRANSMIT_PRIORITY_1 0x0002
+#define ABORT_TRANSMIT_PRIORITY_2 0x0004
+#define ABORT_TRANSMIT_PRIORITY_3 0x0008
+#define ABORT_TRANSMIT_PRIORITY_4 0x0010
+#define ABORT_TRANSMIT_PRIORITY_5 0x0020
+#define ABORT_TRANSMIT_PRIORITY_6 0x0040
+#define ABORT_TRANSMIT_PRIORITY_7 0x0080
+
+#define TX_PENDING_PRIORITY_0 0x0001
+#define TX_PENDING_PRIORITY_1 0x0002
+#define TX_PENDING_PRIORITY_2 0x0004
+#define TX_PENDING_PRIORITY_3 0x0008
+#define TX_PENDING_PRIORITY_4 0x0010
+#define TX_PENDING_PRIORITY_5 0x0020
+#define TX_PENDING_PRIORITY_6 0x0040
+#define TX_PENDING_PRIORITY_7 0x0080
+
+#define FCB_FRAME_LENGTH 0x100
+#define FCB_COMMAND_DONE 0x8000 /* FCB Word 0 */
+#define FCB_NOT_CHAIN_END 0x0000 /* FCB Word 1 */
+#define FCB_CHAIN_END 0x8000
+#define FCB_NO_WARNING 0x0000
+#define FCB_WARNING 0x4000
+#define FCB_INTERRUPT_DISABLE 0x0000
+#define FCB_INTERRUPT_ENABLE 0x2000
+
+#define FCB_ENABLE_IMA 0x0008
+#define FCB_ENABLE_TES 0x0004 /* Guarantee Tx before Int */
+#define FCB_ENABLE_TFS 0x0002 /* Post Tx Frame Status */
+#define FCB_ENABLE_NTC 0x0001 /* No Tx CRC */
+
+#define FCB_TX_STATUS_CR2 0x0004
+#define FCB_TX_STATUS_AR2 0x0008
+#define FCB_TX_STATUS_CR1 0x0040
+#define FCB_TX_STATUS_AR1 0x0080
+#define FCB_TX_AC_BITS (FCB_TX_STATUS_AR1+FCB_TX_STATUS_AR2+FCB_TX_STATUS_CR1+FCB_TX_STATUS_CR2)
+#define FCB_TX_STATUS_E 0x0100
+
+#define FCB_RX_STATUS_ANY_ERROR 0x0001
+#define FCB_RX_STATUS_FCS_ERROR 0x0002
+
+#define FCB_RX_STATUS_IA_MATCHED 0x0400
+#define FCB_RX_STATUS_IGA_BSGA_MATCHED 0x0500
+#define FCB_RX_STATUS_FA_MATCHED 0x0600
+#define FCB_RX_STATUS_BA_MATCHED 0x0700
+#define FCB_RX_STATUS_DA_MATCHED 0x0400
+#define FCB_RX_STATUS_SOURCE_ROUTING 0x0800
+
+#define BDB_BUFFER_SIZE 0x100
+#define BDB_NOT_CHAIN_END 0x0000
+#define BDB_CHAIN_END 0x8000
+#define BDB_NO_WARNING 0x0000
+#define BDB_WARNING 0x4000
+
+#define ERROR_COUNTERS_CHANGED 0x0001
+#define TI_NDIS_RING_STATUS_CHANGED 0x0002
+#define UNA_CHANGED 0x0004
+#define READY_TO_SEND_RQ_INIT 0x0008
+
+#define SCGB_ADDRESS_POINTER_FORMAT INTEL_ADDRESS_POINTER_FORMAT
+#define SCGB_DATA_FORMAT INTEL_DATA_FORMAT
+#define SCGB_MULTI_WORD_CONTROL 0
+#define SCGB_BURST_LENGTH 0x000E /* DMA Burst Length */
+
+#define SCGB_CONFIG (INTEL_ADDRESS_POINTER_FORMAT+INTEL_DATA_FORMAT+SCGB_BURST_LENGTH)
+
+#define ISCP_BLOCK_SIZE 0x0A
+#define RAM_SIZE 0x10000
+#define INIT_SYS_CONFIG_PTR_OFFSET (RAM_SIZE-ISCP_BLOCK_SIZE)
+#define SCGP_BLOCK_OFFSET 0
+
+#define SCLB_NOT_VALID 0x0000 /* Initially, SCLB not valid */
+#define SCLB_VALID 0x8000 /* Host tells TRC SCLB valid */
+#define SCLB_PROCESSED 0x0000 /* TRC says SCLB processed */
+#define SCLB_RESUME_CONTROL_NOT_VALID 0x0000 /* Initially, RC not valid */
+#define SCLB_RESUME_CONTROL_VALID 0x4000 /* Host tells TRC RC valid */
+#define SCLB_IACK_CODE_NOT_VALID 0x0000 /* Initially, IACK not valid */
+#define SCLB_IACK_CODE_VALID 0x2000 /* Host tells TRC IACK valid */
+#define SCLB_CMD_NOP 0x0000
+#define SCLB_CMD_REMOVE 0x0001
+#define SCLB_CMD_SUSPEND_ACB_CHAIN 0x0002
+#define SCLB_CMD_SET_INTERRUPT_MASK 0x0003
+#define SCLB_CMD_CLEAR_INTERRUPT_MASK 0x0004
+#define SCLB_CMD_RESERVED_5 0x0005
+#define SCLB_CMD_RESERVED_6 0x0006
+#define SCLB_CMD_RESERVED_7 0x0007
+#define SCLB_CMD_RESERVED_8 0x0008
+#define SCLB_CMD_RESERVED_9 0x0009
+#define SCLB_CMD_RESERVED_A 0x000A
+#define SCLB_CMD_RESERVED_B 0x000B
+#define SCLB_CMD_RESERVED_C 0x000C
+#define SCLB_CMD_RESERVED_D 0x000D
+#define SCLB_CMD_RESERVED_E 0x000E
+#define SCLB_CMD_RESERVED_F 0x000F
+
+#define SCLB_RC_ACB 0x0001 /* Action Command Block Chain */
+#define SCLB_RC_RES0 0x0002 /* Always Zero */
+#define SCLB_RC_RES1 0x0004 /* Always Zero */
+#define SCLB_RC_RES2 0x0008 /* Always Zero */
+#define SCLB_RC_RX_MAC_FCB 0x0010 /* RX_MAC_FCB Chain */
+#define SCLB_RC_RX_MAC_BDB 0x0020 /* RX_MAC_BDB Chain */
+#define SCLB_RC_RX_NON_MAC_FCB 0x0040 /* RX_NON_MAC_FCB Chain */
+#define SCLB_RC_RX_NON_MAC_BDB 0x0080 /* RX_NON_MAC_BDB Chain */
+#define SCLB_RC_TFCB0 0x0100 /* TX Priority 0 FCB Chain */
+#define SCLB_RC_TFCB1 0x0200 /* TX Priority 1 FCB Chain */
+#define SCLB_RC_TFCB2 0x0400 /* TX Priority 2 FCB Chain */
+#define SCLB_RC_TFCB3 0x0800 /* TX Priority 3 FCB Chain */
+#define SCLB_RC_TFCB4 0x1000 /* TX Priority 4 FCB Chain */
+#define SCLB_RC_TFCB5 0x2000 /* TX Priority 5 FCB Chain */
+#define SCLB_RC_TFCB6 0x4000 /* TX Priority 6 FCB Chain */
+#define SCLB_RC_TFCB7 0x8000 /* TX Priority 7 FCB Chain */
+
+#define SCLB_IMC_RES0 0x0001 /* */
+#define SCLB_IMC_MAC_TYPE_3 0x0002 /* MAC_ARC_INDICATE */
+#define SCLB_IMC_MAC_ERROR_COUNTERS 0x0004 /* */
+#define SCLB_IMC_RES1 0x0008 /* */
+#define SCLB_IMC_MAC_TYPE_2 0x0010 /* QUE_MAC_INDICATE */
+#define SCLB_IMC_TX_FRAME 0x0020 /* */
+#define SCLB_IMC_END_OF_TX_QUEUE 0x0040 /* */
+#define SCLB_IMC_NON_MAC_RX_RESOURCE 0x0080 /* */
+#define SCLB_IMC_MAC_RX_RESOURCE 0x0100 /* */
+#define SCLB_IMC_NON_MAC_RX_FRAME 0x0200 /* */
+#define SCLB_IMC_MAC_RX_FRAME 0x0400 /* */
+#define SCLB_IMC_TRC_FIFO_STATUS 0x0800 /* */
+#define SCLB_IMC_COMMAND_STATUS 0x1000 /* */
+#define SCLB_IMC_MAC_TYPE_1 0x2000 /* Self Removed */
+#define SCLB_IMC_TRC_INTRNL_TST_STATUS 0x4000 /* */
+#define SCLB_IMC_RES2 0x8000 /* */
+
+#define DMA_TRIGGER 0x0004
+#define FREQ_16MB_BIT 0x0010
+#define THDREN 0x0020
+#define CFG0_RSV1 0x0040
+#define CFG0_RSV2 0x0080
+#define ETREN 0x0100
+#define RX_OWN_BIT 0x0200
+#define RXATMAC 0x0400
+#define PROMISCUOUS_BIT 0x0800
+#define USETPT 0x1000
+#define SAVBAD_BIT 0x2000
+#define ONEQUE 0x4000
+#define NO_AUTOREMOVE 0x8000
+
+#define RX_FCB_AREA_8316 0x00000000
+#define RX_BUFF_AREA_8316 0x00000000
+
+#define TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access)
+#define RX_FCB_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_FCB_AREA_8316)
+#define RX_BUFF_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_BUFF_AREA_8316)
+
+// Offset 0: MSR - Memory Select Register
+//
+#define r587_MSR 0x000 // Register Offset
+//#define MSR_RST 0x080 // LAN Controller Reset
+#define MSR_MENB 0x040 // Shared Memory Enable
+#define MSR_RA18 0x020 // Ram Address bit 18 (583, 584, 587)
+#define MSR_RA17 0x010 // Ram Address bit 17 (583, 584, 585/790)
+#define MSR_RA16 0x008 // Ram Address bit 16 (583, 584, 585/790)
+#define MSR_RA15 0x004 // Ram Address bit 15 (583, 584, 585/790)
+#define MSR_RA14 0x002 // Ram Address bit 14 (583, 584, 585/790)
+#define MSR_RA13 0x001 // Ram Address bit 13 (583, 584, 585/790)
+
+#define MSR_MASK 0x03F // Mask for Address bits RA18-RA13 (583, 584, 587)
+
+#define MSR 0x00
+#define IRR 0x04
+#define HWR 0x04
+#define LAAR 0x05
+#define IMCCR 0x05
+#define LAR0 0x08
+#define BDID 0x0E // Adapter ID byte register offset
+#define CSR 0x10
+#define PR 0x11
+
+#define MSR_RST 0x80
+#define MSR_MEMB 0x40
+#define MSR_0WS 0x20
+
+#define FORCED_16BIT_MODE 0x0002
+
+#define INTERFRAME_SPACING_16 0x0003 /* 6 bytes */
+#define INTERFRAME_SPACING_4 0x0001 /* 2 bytes */
+#define MULTICAST_ADDRESS_BIT 0x0010
+#define NON_SRC_ROUTING_BIT 0x0020
+
+#define LOOPING_MODE_MASK 0x0007
+
+/*
+ * Decode firmware defines.
+ */
+#define SWAP_BYTES(X) ((X & 0xff) << 8) | (X >> 8)
+#define WEIGHT_OFFSET 5
+#define TREE_SIZE_OFFSET 9
+#define TREE_OFFSET 11
+
+/* The Huffman Encoding Tree is constructed of these nodes. */
+typedef struct {
+ __u8 llink; /* Short version of above node. */
+ __u8 tag;
+ __u8 info; /* This node is used on decodes. */
+ __u8 rlink;
+} DECODE_TREE_NODE;
+
+#define ROOT 0 /* Branch value. */
+#define LEAF 0 /* Tag field value. */
+#define BRANCH 1 /* Tag field value. */
+
+/*
+ * Multicast Table Structure
+ */
+typedef struct {
+ __u8 address[6];
+ __u8 instance_count;
+} McTable;
+
+/*
+ * Fragment Descriptor Definition
+ */
+typedef struct {
+ __u8 *fragment_ptr;
+ __u32 fragment_length;
+} FragmentStructure;
+
+/*
+ * Data Buffer Structure Definition
+ */
+typedef struct {
+ __u32 fragment_count;
+ FragmentStructure fragment_list[MAXFRAGMENTS];
+} DataBufferStructure;
+
+#pragma pack(1)
+typedef struct {
+ __u8 IType;
+ __u8 ISubtype;
+} Interrupt_Status_Word;
+
+#pragma pack(1)
+typedef struct BDBlockType {
+ __u16 info; /* 02 */
+ __u32 trc_next_ptr; /* 06 */
+ __u32 trc_data_block_ptr; /* 10 */
+ __u16 buffer_length; /* 12 */
+
+ __u16 *data_block_ptr; /* 16 */
+ struct BDBlockType *next_ptr; /* 20 */
+ struct BDBlockType *back_ptr; /* 24 */
+ __u8 filler[8]; /* 32 */
+} BDBlock;
+
+#pragma pack(1)
+typedef struct FCBlockType {
+ __u16 frame_status; /* 02 */
+ __u16 info; /* 04 */
+ __u32 trc_next_ptr; /* 08 */
+ __u32 trc_bdb_ptr; /* 12 */
+ __u16 frame_length; /* 14 */
+
+ BDBlock *bdb_ptr; /* 18 */
+ struct FCBlockType *next_ptr; /* 22 */
+ struct FCBlockType *back_ptr; /* 26 */
+ __u16 memory_alloc; /* 28 */
+ __u8 filler[4]; /* 32 */
+
+} FCBlock;
+
+#pragma pack(1)
+typedef struct SBlockType{
+ __u8 Internal_Error_Count;
+ __u8 Line_Error_Count;
+ __u8 AC_Error_Count;
+ __u8 Burst_Error_Count;
+ __u8 RESERVED_COUNTER_0;
+ __u8 AD_TRANS_Count;
+ __u8 RCV_Congestion_Count;
+ __u8 Lost_FR_Error_Count;
+ __u8 FREQ_Error_Count;
+ __u8 FR_Copied_Error_Count;
+ __u8 RESERVED_COUNTER_1;
+ __u8 Token_Error_Count;
+
+ __u16 TI_NDIS_Ring_Status;
+ __u16 BCN_Type;
+ __u16 Error_Code;
+ __u16 SA_of_Last_AMP_SMP[3];
+ __u16 UNA[3];
+ __u16 Ucode_Version_Number;
+ __u16 Status_CHG_Indicate;
+ __u16 RESERVED_STATUS_0;
+} SBlock;
+
+#pragma pack(1)
+typedef struct ACBlockType {
+ __u16 cmd_done_status; /* 02 */
+ __u16 cmd_info; /* 04 */
+ __u32 trc_next_ptr; /* 08 */
+ __u16 cmd; /* 10 */
+ __u16 subcmd; /* 12 */
+ __u16 data_offset_lo; /* 14 */
+ __u16 data_offset_hi; /* 16 */
+
+ struct ACBlockType *next_ptr; /* 20 */
+
+ __u8 filler[12]; /* 32 */
+} ACBlock;
+
+#define NUM_OF_INTERRUPTS 0x20
+
+#pragma pack(1)
+typedef struct {
+ Interrupt_Status_Word IStatus[NUM_OF_INTERRUPTS];
+} ISBlock;
+
+#pragma pack(1)
+typedef struct {
+ __u16 valid_command; /* 02 */
+ __u16 iack_code; /* 04 */
+ __u16 resume_control; /* 06 */
+ __u16 int_mask_control; /* 08 */
+ __u16 int_mask_state; /* 10 */
+
+ __u8 filler[6]; /* 16 */
+} SCLBlock;
+
+#pragma pack(1)
+typedef struct
+{
+ __u16 config; /* 02 */
+ __u32 trc_sclb_ptr; /* 06 */
+ __u32 trc_acb_ptr; /* 10 */
+ __u32 trc_isb_ptr; /* 14 */
+ __u16 isbsiz; /* 16 */
+
+ SCLBlock *sclb_ptr; /* 20 */
+ ACBlock *acb_ptr; /* 24 */
+ ISBlock *isb_ptr; /* 28 */
+
+ __u16 Non_Mac_Rx_Bdbs; /* 30 DEBUG */
+ __u8 filler[2]; /* 32 */
+
+} SCGBlock;
+
+#pragma pack(1)
+typedef struct
+{
+ __u32 trc_scgb_ptr;
+ SCGBlock *scgb_ptr;
+} ISCPBlock;
+#pragma pack()
+
+typedef struct net_local {
+ ISCPBlock *iscpb_ptr;
+ SCGBlock *scgb_ptr;
+ SCLBlock *sclb_ptr;
+ ISBlock *isb_ptr;
+
+ ACBlock *acb_head;
+ ACBlock *acb_curr;
+ ACBlock *acb_next;
+
+ __u8 adapter_name[12];
+
+ __u16 num_rx_bdbs [NUM_RX_QS_USED];
+ __u16 num_rx_fcbs [NUM_RX_QS_USED];
+
+ __u16 num_tx_bdbs [NUM_TX_QS_USED];
+ __u16 num_tx_fcbs [NUM_TX_QS_USED];
+
+ __u16 num_of_tx_buffs;
+
+ __u16 tx_buff_size [NUM_TX_QS_USED];
+ __u16 tx_buff_used [NUM_TX_QS_USED];
+ __u16 tx_queue_status [NUM_TX_QS_USED];
+
+ FCBlock *tx_fcb_head[NUM_TX_QS_USED];
+ FCBlock *tx_fcb_curr[NUM_TX_QS_USED];
+ FCBlock *tx_fcb_end[NUM_TX_QS_USED];
+ BDBlock *tx_bdb_head[NUM_TX_QS_USED];
+ __u16 *tx_buff_head[NUM_TX_QS_USED];
+ __u16 *tx_buff_end[NUM_TX_QS_USED];
+ __u16 *tx_buff_curr[NUM_TX_QS_USED];
+ __u16 num_tx_fcbs_used[NUM_TX_QS_USED];
+
+ FCBlock *rx_fcb_head[NUM_RX_QS_USED];
+ FCBlock *rx_fcb_curr[NUM_RX_QS_USED];
+ BDBlock *rx_bdb_head[NUM_RX_QS_USED];
+ BDBlock *rx_bdb_curr[NUM_RX_QS_USED];
+ BDBlock *rx_bdb_end[NUM_RX_QS_USED];
+ __u16 *rx_buff_head[NUM_RX_QS_USED];
+ __u16 *rx_buff_end[NUM_RX_QS_USED];
+
+ __u32 *ptr_local_ring_num;
+
+ __u32 sh_mem_used;
+
+ __u16 page_offset_mask;
+
+ __u16 authorized_function_classes;
+ __u16 authorized_access_priority;
+
+ __u16 num_acbs;
+ __u16 num_acbs_used;
+ __u16 acb_pending;
+
+ __u16 current_isb_index;
+
+ __u8 monitor_state;
+ __u8 monitor_state_ready;
+ __u16 ring_status;
+ __u8 ring_status_flags;
+ __u8 current_ring_status;
+ __u8 state;
+
+ __u8 join_state;
+
+ __u8 slot_num;
+ __u16 pos_id;
+
+ __u32 *ptr_una;
+ __u32 *ptr_bcn_type;
+ __u32 *ptr_tx_fifo_underruns;
+ __u32 *ptr_rx_fifo_underruns;
+ __u32 *ptr_rx_fifo_overruns;
+ __u32 *ptr_tx_fifo_overruns;
+ __u32 *ptr_tx_fcb_overruns;
+ __u32 *ptr_rx_fcb_overruns;
+ __u32 *ptr_tx_bdb_overruns;
+ __u32 *ptr_rx_bdb_overruns;
+
+ __u16 receive_queue_number;
+
+ __u8 rx_fifo_overrun_count;
+ __u8 tx_fifo_overrun_count;
+
+ __u16 adapter_flags;
+ __u16 adapter_flags1;
+ __u16 *misc_command_data;
+ __u16 max_packet_size;
+
+ __u16 config_word0;
+ __u16 config_word1;
+
+ __u8 trc_mask;
+
+ __u16 source_ring_number;
+ __u16 target_ring_number;
+
+ __u16 microcode_version;
+
+ __u16 bic_type;
+ __u16 nic_type;
+ __u16 board_id;
+
+ __u16 rom_size;
+ __u32 rom_base;
+ __u16 ram_size;
+ __u16 ram_usable;
+ __u32 ram_base;
+ __u32 ram_access;
+
+ __u16 extra_info;
+ __u16 mode_bits;
+ __u16 media_menu;
+ __u16 media_type;
+ __u16 adapter_bus;
+
+ __u16 status;
+ __u16 receive_mask;
+
+ __u16 group_address_0;
+ __u16 group_address[2];
+ __u16 functional_address_0;
+ __u16 functional_address[2];
+ __u16 bitwise_group_address[2];
+
+ __u8 *ptr_ucode;
+
+ __u8 cleanup;
+
+ struct sk_buff_head SendSkbQueue;
+ __u16 QueueSkb;
+
+ struct tr_statistics MacStat; /* MAC statistics structure */
+
+ spinlock_t lock;
+} NET_LOCAL;
+
+/************************************
+ * SNMP-ON-BOARD Agent Link Structure
+ ************************************/
+
+typedef struct {
+ __u8 LnkSigStr[12]; /* signature string "SmcLinkTable" */
+ __u8 LnkDrvTyp; /* 1=Redbox ODI, 2=ODI DOS, 3=ODI OS/2, 4=NDIS DOS */
+ __u8 LnkFlg; /* 0 if no agent linked, 1 if agent linked */
+ void *LnkNfo; /* routine which returns pointer to NIC info */
+ void *LnkAgtRcv; /* pointer to agent receive trap entry */
+ void *LnkAgtXmt; /* pointer to agent transmit trap
+entry */
+void *LnkGet; /* pointer to NIC receive data
+copy routine */
+ void *LnkSnd; /* pointer to NIC send routine
+*/
+ void *LnkRst; /* pointer to NIC driver reset
+routine */
+ void *LnkMib; /* pointer to MIB data base */
+ void *LnkMibAct; /* pointer to MIB action routine list */
+ __u16 LnkCntOffset; /* offset to error counters */
+ __u16 LnkCntNum; /* number of error counters */
+ __u16 LnkCntSize; /* size of error counters i.e. 32 = 32 bits */
+ void *LnkISR; /* pointer to interrupt vector */
+ __u8 LnkFrmTyp; /* 1=Ethernet, 2=Token Ring */
+ __u8 LnkDrvVer1 ; /* driver major version */
+ __u8 LnkDrvVer2 ; /* driver minor version */
+} AgentLink;
+
+/*
+ * Definitions for pcm_card_flags(bit_mapped)
+ */
+#define REG_COMPLETE 0x0001
+#define INSERTED 0x0002
+#define PCC_INSERTED 0x0004 /* 1=currently inserted, 0=cur removed */
+
+/*
+ * Adapter RAM test patterns
+ */
+#define RAM_PATTERN_1 0x55AA
+#define RAM_PATTERN_2 0x9249
+#define RAM_PATTERN_3 0xDB6D
+
+/*
+ * definitions for RAM test
+ */
+#define ROM_SIGNATURE 0xAA55
+#define MIN_ROM_SIZE 0x2000
+
+/*
+ * Return Codes
+ */
+#define SUCCESS 0x0000
+#define ADAPTER_AND_CONFIG 0x0001
+#define ADAPTER_NO_CONFIG 0x0002
+#define NOT_MY_INTERRUPT 0x0003
+#define FRAME_REJECTED 0x0004
+#define EVENTS_DISABLED 0x0005
+#define OUT_OF_RESOURCES 0x0006
+#define INVALID_PARAMETER 0x0007
+#define INVALID_FUNCTION 0x0008
+#define INITIALIZE_FAILED 0x0009
+#define CLOSE_FAILED 0x000A
+#define MAX_COLLISIONS 0x000B
+#define NO_SUCH_DESTINATION 0x000C
+#define BUFFER_TOO_SMALL_ERROR 0x000D
+#define ADAPTER_CLOSED 0x000E
+#define UCODE_NOT_PRESENT 0x000F
+#define FIFO_UNDERRUN 0x0010
+#define DEST_OUT_OF_RESOURCES 0x0011
+#define ADAPTER_NOT_INITIALIZED 0x0012
+#define PENDING 0x0013
+#define UCODE_PRESENT 0x0014
+#define NOT_INIT_BY_BRIDGE 0x0015
+
+#define OPEN_FAILED 0x0080
+#define HARDWARE_FAILED 0x0081
+#define SELF_TEST_FAILED 0x0082
+#define RAM_TEST_FAILED 0x0083
+#define RAM_CONFLICT 0x0084
+#define ROM_CONFLICT 0x0085
+#define UNKNOWN_ADAPTER 0x0086
+#define CONFIG_ERROR 0x0087
+#define CONFIG_WARNING 0x0088
+#define NO_FIXED_CNFG 0x0089
+#define EEROM_CKSUM_ERROR 0x008A
+#define ROM_SIGNATURE_ERROR 0x008B
+#define ROM_CHECKSUM_ERROR 0x008C
+#define ROM_SIZE_ERROR 0x008D
+#define UNSUPPORTED_NIC_CHIP 0x008E
+#define NIC_REG_ERROR 0x008F
+#define BIC_REG_ERROR 0x0090
+#define MICROCODE_TEST_ERROR 0x0091
+#define LOBE_MEDIA_TEST_FAILED 0x0092
+
+#define ADAPTER_FOUND_LAN_CORRUPT 0x009B
+
+#define ADAPTER_NOT_FOUND 0xFFFF
+
+#define ILLEGAL_FUNCTION INVALID_FUNCTION
+
+/* Errors */
+#define IO_BASE_INVALID 0x0001
+#define IO_BASE_RANGE 0x0002
+#define IRQ_INVALID 0x0004
+#define IRQ_RANGE 0x0008
+#define RAM_BASE_INVALID 0x0010
+#define RAM_BASE_RANGE 0x0020
+#define RAM_SIZE_RANGE 0x0040
+#define MEDIA_INVALID 0x0800
+
+/* Warnings */
+#define IRQ_MISMATCH 0x0080
+#define RAM_BASE_MISMATCH 0x0100
+#define RAM_SIZE_MISMATCH 0x0200
+#define BUS_MODE_MISMATCH 0x0400
+
+#define RX_CRC_ERROR 0x01
+#define RX_ALIGNMENT_ERROR 0x02
+#define RX_HW_FAILED 0x80
+
+/*
+ * Definitions for the field RING_STATUS_FLAGS
+ */
+#define RING_STATUS_CHANGED 0X01
+#define MONITOR_STATE_CHANGED 0X02
+#define JOIN_STATE_CHANGED 0X04
+
+/*
+ * Definitions for the field JOIN_STATE
+ */
+#define JS_BYPASS_STATE 0x00
+#define JS_LOBE_TEST_STATE 0x01
+#define JS_DETECT_MONITOR_PRESENT_STATE 0x02
+#define JS_AWAIT_NEW_MONITOR_STATE 0x03
+#define JS_DUPLICATE_ADDRESS_TEST_STATE 0x04
+#define JS_NEIGHBOR_NOTIFICATION_STATE 0x05
+#define JS_REQUEST_INITIALIZATION_STATE 0x06
+#define JS_JOIN_COMPLETE_STATE 0x07
+#define JS_BYPASS_WAIT_STATE 0x08
+
+/*
+ * Definitions for the field MONITOR_STATE
+ */
+#define MS_MONITOR_FSM_INACTIVE 0x00
+#define MS_REPEAT_BEACON_STATE 0x01
+#define MS_REPEAT_CLAIM_TOKEN_STATE 0x02
+#define MS_TRANSMIT_CLAIM_TOKEN_STATE 0x03
+#define MS_STANDBY_MONITOR_STATE 0x04
+#define MS_TRANSMIT_BEACON_STATE 0x05
+#define MS_ACTIVE_MONITOR_STATE 0x06
+#define MS_TRANSMIT_RING_PURGE_STATE 0x07
+#define MS_BEACON_TEST_STATE 0x09
+
+/*
+ * Definitions for the bit-field RING_STATUS
+ */
+#define SIGNAL_LOSS 0x8000
+#define HARD_ERROR 0x4000
+#define SOFT_ERROR 0x2000
+#define TRANSMIT_BEACON 0x1000
+#define LOBE_WIRE_FAULT 0x0800
+#define AUTO_REMOVAL_ERROR 0x0400
+#define REMOVE_RECEIVED 0x0100
+#define COUNTER_OVERFLOW 0x0080
+#define SINGLE_STATION 0x0040
+#define RING_RECOVERY 0x0020
+
+/*
+ * Definitions for the field BUS_TYPE
+ */
+#define AT_BUS 0x00
+#define MCA_BUS 0x01
+#define EISA_BUS 0x02
+#define PCI_BUS 0x03
+#define PCMCIA_BUS 0x04
+
+/*
+ * Definitions for adapter_flags
+ */
+#define RX_VALID_LOOKAHEAD 0x0001
+#define FORCED_16BIT_MODE 0x0002
+#define ADAPTER_DISABLED 0x0004
+#define TRANSMIT_CHAIN_INT 0x0008
+#define EARLY_RX_FRAME 0x0010
+#define EARLY_TX 0x0020
+#define EARLY_RX_COPY 0x0040
+#define USES_PHYSICAL_ADDR 0x0080 /* Rsvd for DEC PCI and 9232 */
+#define NEEDS_PHYSICAL_ADDR 0x0100 /* Reserved*/
+#define RX_STATUS_PENDING 0x0200
+#define ERX_DISABLED 0x0400 /* EARLY_RX_ENABLE rcv_mask */
+#define ENABLE_TX_PENDING 0x0800
+#define ENABLE_RX_PENDING 0x1000
+#define PERM_CLOSE 0x2000
+#define IO_MAPPED 0x4000 /* IOmapped bus interface 795 */
+#define ETX_DISABLED 0x8000
+
+
+/*
+ * Definitions for adapter_flags1
+ */
+#define TX_PHY_RX_VIRT 0x0001
+#define NEEDS_HOST_RAM 0x0002
+#define NEEDS_MEDIA_TYPE 0x0004
+#define EARLY_RX_DONE 0x0008
+#define PNP_BOOT_BIT 0x0010 /* activates PnP & config on power-up */
+ /* clear => regular PnP operation */
+#define PNP_ENABLE 0x0020 /* regular PnP operation clear => */
+ /* no PnP, overrides PNP_BOOT_BIT */
+#define SATURN_ENABLE 0x0040
+
+#define ADAPTER_REMOVABLE 0x0080 /* adapter is hot swappable */
+#define TX_PHY 0x0100 /* Uses physical address for tx bufs */
+#define RX_PHY 0x0200 /* Uses physical address for rx bufs */
+#define TX_VIRT 0x0400 /* Uses virtual addr for tx bufs */
+#define RX_VIRT 0x0800
+#define NEEDS_SERVICE 0x1000
+
+/*
+ * Adapter Status Codes
+ */
+#define OPEN 0x0001
+#define INITIALIZED 0x0002
+#define CLOSED 0x0003
+#define FAILED 0x0005
+#define NOT_INITIALIZED 0x0006
+#define IO_CONFLICT 0x0007
+#define CARD_REMOVED 0x0008
+#define CARD_INSERTED 0x0009
+
+/*
+ * Mode Bit Definitions
+ */
+#define INTERRUPT_STATUS_BIT 0x8000 /* PC Interrupt Line: 0 = Not Enabled */
+#define BOOT_STATUS_MASK 0x6000 /* Mask to isolate BOOT_STATUS */
+#define BOOT_INHIBIT 0x0000 /* BOOT_STATUS is 'inhibited' */
+#define BOOT_TYPE_1 0x2000 /* Unused BOOT_STATUS value */
+#define BOOT_TYPE_2 0x4000 /* Unused BOOT_STATUS value */
+#define BOOT_TYPE_3 0x6000 /* Unused BOOT_STATUS value */
+#define ZERO_WAIT_STATE_MASK 0x1800 /* Mask to isolate Wait State flags */
+#define ZERO_WAIT_STATE_8_BIT 0x1000 /* 0 = Disabled (Inserts Wait States) */
+#define ZERO_WAIT_STATE_16_BIT 0x0800 /* 0 = Disabled (Inserts Wait States) */
+#define LOOPING_MODE_MASK 0x0007
+#define LOOPBACK_MODE_0 0x0000
+#define LOOPBACK_MODE_1 0x0001
+#define LOOPBACK_MODE_2 0x0002
+#define LOOPBACK_MODE_3 0x0003
+#define LOOPBACK_MODE_4 0x0004
+#define LOOPBACK_MODE_5 0x0005
+#define LOOPBACK_MODE_6 0x0006
+#define LOOPBACK_MODE_7 0x0007
+#define AUTO_MEDIA_DETECT 0x0008
+#define MANUAL_CRC 0x0010
+#define EARLY_TOKEN_REL 0x0020 /* Early Token Release for Token Ring */
+#define UMAC 0x0040
+#define UTP2_PORT 0x0080 /* For 8216T2, 0=port A, 1=Port B. */
+#define BNC_10BT_INTERFACE 0x0600 /* BNC and UTP current media set */
+#define UTP_INTERFACE 0x0500 /* Ethernet UTP Only. */
+#define BNC_INTERFACE 0x0400
+#define AUI_INTERFACE 0x0300
+#define AUI_10BT_INTERFACE 0x0200
+#define STARLAN_10_INTERFACE 0x0100
+#define INTERFACE_TYPE_MASK 0x0700
+
+/*
+ * Media Type Bit Definitions
+ *
+ * legend: TP = Twisted Pair
+ * STP = Shielded twisted pair
+ * UTP = Unshielded twisted pair
+ */
+
+#define CNFG_MEDIA_TYPE_MASK 0x001e /* POS Register 3 Mask */
+
+#define MEDIA_S10 0x0000 /* Ethernet adapter, TP. */
+#define MEDIA_AUI_UTP 0x0001 /* Ethernet adapter, AUI/UTP media */
+#define MEDIA_BNC 0x0002 /* Ethernet adapter, BNC media. */
+#define MEDIA_AUI 0x0003 /* Ethernet Adapter, AUI media. */
+#define MEDIA_STP_16 0x0004 /* TokenRing adap, 16Mbit STP. */
+#define MEDIA_STP_4 0x0005 /* TokenRing adap, 4Mbit STP. */
+#define MEDIA_UTP_16 0x0006 /* TokenRing adap, 16Mbit UTP. */
+#define MEDIA_UTP_4 0x0007 /* TokenRing adap, 4Mbit UTP. */
+#define MEDIA_UTP 0x0008 /* Ethernet adapter, UTP media (no AUI)
+*/
+#define MEDIA_BNC_UTP 0x0010 /* Ethernet adapter, BNC/UTP media */
+#define MEDIA_UTPFD 0x0011 /* Ethernet adapter, TP full duplex */
+#define MEDIA_UTPNL 0x0012 /* Ethernet adapter, TP with link integrity test disabled */
+#define MEDIA_AUI_BNC 0x0013 /* Ethernet adapter, AUI/BNC media */
+#define MEDIA_AUI_BNC_UTP 0x0014 /* Ethernet adapter, AUI_BNC/UTP */
+#define MEDIA_UTPA 0x0015 /* Ethernet UTP-10Mbps Ports A */
+#define MEDIA_UTPB 0x0016 /* Ethernet UTP-10Mbps Ports B */
+#define MEDIA_STP_16_UTP_16 0x0017 /* Token Ring STP-16Mbps/UTP-16Mbps */
+#define MEDIA_STP_4_UTP_4 0x0018 /* Token Ring STP-4Mbps/UTP-4Mbps */
+
+#define MEDIA_STP100_UTP100 0x0020 /* Ethernet STP-100Mbps/UTP-100Mbps */
+#define MEDIA_UTP100FD 0x0021 /* Ethernet UTP-100Mbps, full duplex */
+#define MEDIA_UTP100 0x0022 /* Ethernet UTP-100Mbps */
+
+
+#define MEDIA_UNKNOWN 0xFFFF /* Unknown adapter/media type */
+
+/*
+ * Definitions for the field:
+ * media_type2
+ */
+#define MEDIA_TYPE_MII 0x0001
+#define MEDIA_TYPE_UTP 0x0002
+#define MEDIA_TYPE_BNC 0x0004
+#define MEDIA_TYPE_AUI 0x0008
+#define MEDIA_TYPE_S10 0x0010
+#define MEDIA_TYPE_AUTO_SENSE 0x1000
+#define MEDIA_TYPE_AUTO_DETECT 0x4000
+#define MEDIA_TYPE_AUTO_NEGOTIATE 0x8000
+
+/*
+ * Definitions for the field:
+ * line_speed
+ */
+#define LINE_SPEED_UNKNOWN 0x0000
+#define LINE_SPEED_4 0x0001
+#define LINE_SPEED_10 0x0002
+#define LINE_SPEED_16 0x0004
+#define LINE_SPEED_100 0x0008
+#define LINE_SPEED_T4 0x0008 /* 100BaseT4 aliased for 9332BVT */
+#define LINE_SPEED_FULL_DUPLEX 0x8000
+
+/*
+ * Definitions for the field:
+ * bic_type (Bus interface chip type)
+ */
+#define BIC_NO_CHIP 0x0000 /* Bus interface chip not implemented */
+#define BIC_583_CHIP 0x0001 /* 83C583 bus interface chip */
+#define BIC_584_CHIP 0x0002 /* 83C584 bus interface chip */
+#define BIC_585_CHIP 0x0003 /* 83C585 bus interface chip */
+#define BIC_593_CHIP 0x0004 /* 83C593 bus interface chip */
+#define BIC_594_CHIP 0x0005 /* 83C594 bus interface chip */
+#define BIC_564_CHIP 0x0006 /* PCMCIA Bus interface chip */
+#define BIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */
+#define BIC_571_CHIP 0x0008 /* 83C571 EISA bus master i-face */
+#define BIC_587_CHIP 0x0009 /* Token Ring AT bus master i-face */
+#define BIC_574_CHIP 0x0010 /* FEAST bus interface chip */
+#define BIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
+#define BIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
+#define BIC_8432E_CHIP 0x0013 /* 8432 Enhanced bus iface/Ethernet NIC(DEC) */
+#define BIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
+#define BIC_C94_CHIP 0x0015 /* 91C94 bus i-face in PCMCIA mode */
+#define BIC_X8020_CHIP 0x0016 /* Xilinx PCMCIA multi-func i-face */
+
+/*
+ * Definitions for the field:
+ * nic_type (Bus interface chip type)
+ */
+#define NIC_UNK_CHIP 0x0000 /* Unknown NIC chip */
+#define NIC_8390_CHIP 0x0001 /* DP8390 Ethernet NIC */
+#define NIC_690_CHIP 0x0002 /* 83C690 Ethernet NIC */
+#define NIC_825_CHIP 0x0003 /* 83C825 Token Ring NIC */
+/* #define NIC_???_CHIP 0x0004 */ /* Not used */
+/* #define NIC_???_CHIP 0x0005 */ /* Not used */
+/* #define NIC_???_CHIP 0x0006 */ /* Not used */
+#define NIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */
+#define NIC_C100_CHIP 0x0010 /* FEAST 100Mbps Ethernet NIC */
+#define NIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
+#define NIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
+#define NIC_8432E_CHIP 0x0013 /* 8432 enhanced bus iface/Ethernet NIC(DEC) */
+#define NIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
+#define NIC_C94_CHIP 0x0015 /* 91C94 PC Card with multi func */
+
+/*
+ * Definitions for the field:
+ * adapter_type The adapter_type field describes the adapter/bus
+ * configuration.
+ */
+#define BUS_ISA16_TYPE 0x0001 /* 16 bit adap in 16 bit (E)ISA slot */
+#define BUS_ISA8_TYPE 0x0002 /* 8/16b adap in 8 bit XT/(E)ISA slot */
+#define BUS_MCA_TYPE 0x0003 /* Micro Channel adapter */
+
+/*
+ * Receive Mask definitions
+ */
+#define ACCEPT_MULTICAST 0x0001
+#define ACCEPT_BROADCAST 0x0002
+#define PROMISCUOUS_MODE 0x0004
+#define ACCEPT_SOURCE_ROUTING 0x0008
+#define ACCEPT_ERR_PACKETS 0x0010
+#define ACCEPT_ATT_MAC_FRAMES 0x0020
+#define ACCEPT_MULTI_PROM 0x0040
+#define TRANSMIT_ONLY 0x0080
+#define ACCEPT_EXT_MAC_FRAMES 0x0100
+#define EARLY_RX_ENABLE 0x0200
+#define PKT_SIZE_NOT_NEEDED 0x0400
+#define ACCEPT_SOURCE_ROUTING_SPANNING 0x0808
+
+#define ACCEPT_ALL_MAC_FRAMES 0x0120
+
+/*
+ * config_mode defs
+ */
+#define STORE_EEROM 0x0001 /* Store config in EEROM. */
+#define STORE_REGS 0x0002 /* Store config in register set. */
+
+/*
+ * equates for lmac_flags in adapter structure (Ethernet)
+ */
+#define MEM_DISABLE 0x0001
+#define RX_STATUS_POLL 0x0002
+#define USE_RE_BIT 0x0004
+/*#define RESERVED 0x0008 */
+/*#define RESERVED 0x0010 */
+/*#define RESERVED 0x0020 */
+/*#define RESERVED 0x0040 */
+/*#define RESERVED 0x0080 */
+/*#define RESERVED 0x0100 */
+/*#define RESERVED 0x0200 */
+/*#define RESERVED 0x0400 */
+/*#define RESERVED 0x0800 */
+/*#define RESERVED 0x1000 */
+/*#define RESERVED 0x2000 */
+/*#define RESERVED 0x4000 */
+/*#define RESERVED 0x8000 */
+
+/* media_opts & media_set Fields bit defs for Ethernet ... */
+#define MED_OPT_BNC 0x01
+#define MED_OPT_UTP 0x02
+#define MED_OPT_AUI 0x04
+#define MED_OPT_10MB 0x08
+#define MED_OPT_100MB 0x10
+#define MED_OPT_S10 0x20
+
+/* media_opts & media_set Fields bit defs for Token Ring ... */
+#define MED_OPT_4MB 0x08
+#define MED_OPT_16MB 0x10
+#define MED_OPT_STP 0x40
+
+#define MAX_8023_SIZE 1500 /* Max 802.3 size of frame. */
+#define DEFAULT_ERX_VALUE 4 /* Number of 16-byte blocks for 790B early Rx. */
+#define DEFAULT_ETX_VALUE 32 /* Number of bytes for 790B early Tx. */
+#define DEFAULT_TX_RETRIES 3 /* Number of transmit retries */
+#define LPBK_FRAME_SIZE 1024 /* Default loopback frame for Rx calibration test. */
+#define MAX_LOOKAHEAD_SIZE 252 /* Max lookahead size for ethernet. */
+
+#define RW_MAC_STATE 0x1101
+#define RW_SA_OF_LAST_AMP_OR_SMP 0x2803
+#define RW_PHYSICAL_DROP_NUMBER 0x3B02
+#define RW_UPSTREAM_NEIGHBOR_ADDRESS 0x3E03
+#define RW_PRODUCT_INSTANCE_ID 0x4B09
+
+#define RW_TRC_STATUS_BLOCK 0x5412
+
+#define RW_MAC_ERROR_COUNTERS_NO_CLEAR 0x8006
+#define RW_MAC_ERROR_COUNTER_CLEAR 0x7A06
+#define RW_CONFIG_REGISTER_0 0xA001
+#define RW_CONFIG_REGISTER_1 0xA101
+#define RW_PRESCALE_TIMER_THRESHOLD 0xA201
+#define RW_TPT_THRESHOLD 0xA301
+#define RW_TQP_THRESHOLD 0xA401
+#define RW_TNT_THRESHOLD 0xA501
+#define RW_TBT_THRESHOLD 0xA601
+#define RW_TSM_THRESHOLD 0xA701
+#define RW_TAM_THRESHOLD 0xA801
+#define RW_TBR_THRESHOLD 0xA901
+#define RW_TER_THRESHOLD 0xAA01
+#define RW_TGT_THRESHOLD 0xAB01
+#define RW_THT_THRESHOLD 0xAC01
+#define RW_TRR_THRESHOLD 0xAD01
+#define RW_TVX_THRESHOLD 0xAE01
+#define RW_INDIVIDUAL_MAC_ADDRESS 0xB003
+
+#define RW_INDIVIDUAL_GROUP_ADDRESS 0xB303 /* all of group addr */
+#define RW_INDIVIDUAL_GROUP_ADDR_WORD_0 0xB301 /* 1st word of group addr */
+#define RW_INDIVIDUAL_GROUP_ADDR 0xB402 /* 2nd-3rd word of group addr */
+#define RW_FUNCTIONAL_ADDRESS 0xB603 /* all of functional addr */
+#define RW_FUNCTIONAL_ADDR_WORD_0 0xB601 /* 1st word of func addr */
+#define RW_FUNCTIONAL_ADDR 0xB702 /* 2nd-3rd word func addr */
+
+#define RW_BIT_SIGNIFICANT_GROUP_ADDR 0xB902
+#define RW_SOURCE_RING_BRIDGE_NUMBER 0xBB01
+#define RW_TARGET_RING_NUMBER 0xBC01
+
+#define RW_HIC_INTERRUPT_MASK 0xC601
+
+#define SOURCE_ROUTING_SPANNING_BITS 0x00C0 /* Spanning Tree Frames */
+#define SOURCE_ROUTING_EXPLORER_BIT 0x0040 /* Explorer and Single Route */
+
+ /* write */
+
+#define CSR_MSK_ALL 0x80 // Bic 587 Only
+#define CSR_MSKTINT 0x20
+#define CSR_MSKCBUSY 0x10
+#define CSR_CLRTINT 0x08
+#define CSR_CLRCBUSY 0x04
+#define CSR_WCSS 0x02
+#define CSR_CA 0x01
+
+ /* read */
+
+#define CSR_TINT 0x20
+#define CSR_CINT 0x10
+#define CSR_TSTAT 0x08
+#define CSR_CSTAT 0x04
+#define CSR_FAULT 0x02
+#define CSR_CBUSY 0x01
+
+#define LAAR_MEM16ENB 0x80
+#define Zws16 0x20
+
+#define IRR_IEN 0x80
+#define Zws8 0x01
+
+#define IMCCR_EIL 0x04
+
+typedef struct {
+ __u8 ac; /* Access Control */
+ __u8 fc; /* Frame Control */
+ __u8 da[6]; /* Dest Addr */
+ __u8 sa[6]; /* Source Addr */
+
+ __u16 vl; /* Vector Length */
+ __u8 dc_sc; /* Dest/Source Class */
+ __u8 vc; /* Vector Code */
+ } MAC_HEADER;
+
+#define MAX_SUB_VECTOR_INFO (RX_DATA_BUFFER_SIZE - sizeof(MAC_HEADER) - 2)
+
+typedef struct
+ {
+ __u8 svl; /* Sub-vector Length */
+ __u8 svi; /* Sub-vector Code */
+ __u8 svv[MAX_SUB_VECTOR_INFO]; /* Sub-vector Info */
+ } MAC_SUB_VECTOR;
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_SMCTR_H */
diff --git a/drivers/net/tokenring/smctr_firmware.h b/drivers/net/tokenring/smctr_firmware.h
new file mode 100644
index 000000000000..53f2cbc817c9
--- /dev/null
+++ b/drivers/net/tokenring/smctr_firmware.h
@@ -0,0 +1,979 @@
+/*
+ * The firmware this driver downloads into the tokenring card is a
+ * separate program and is not GPL'd source code, even though the Linux
+ * side driver and the routine that loads this data into the card are.
+ *
+ * This firmware is licensed to you strictly for use in conjunction
+ * with the use of SMC TokenRing adapters. There is no waranty
+ * expressed or implied about its fitness for any purpose.
+ */
+
+/* smctr_firmware.h: SMC TokenRing driver firmware dump for Linux.
+ *
+ * Notes:
+ * - This is an 8K binary image. (MCT.BIN v6.3C1 03/01/95)
+ *
+ * Authors:
+ * - Jay Schulist <jschlst@samba.org>
+ */
+
+#include <linux/config.h>
+
+#if defined(CONFIG_SMCTR) || defined(CONFIG_SMCTR_MODULE)
+
+unsigned char smctr_code[] = {
+ 0x0BC, 0x01D, 0x012, 0x03B, 0x063, 0x0B4, 0x0E9, 0x000,
+ 0x000, 0x01F, 0x000, 0x001, 0x001, 0x000, 0x002, 0x005,
+ 0x001, 0x000, 0x006, 0x003, 0x001, 0x000, 0x004, 0x009,
+ 0x001, 0x000, 0x00A, 0x007, 0x001, 0x000, 0x008, 0x00B,
+ 0x001, 0x000, 0x00C, 0x000, 0x000, 0x000, 0x000, 0x00F,
+ 0x001, 0x000, 0x010, 0x00D, 0x001, 0x000, 0x00E, 0x013,
+ 0x001, 0x000, 0x014, 0x011, 0x001, 0x000, 0x012, 0x000,
+ 0x000, 0x005, 0x000, 0x015, 0x001, 0x000, 0x016, 0x019,
+ 0x001, 0x000, 0x01A, 0x017, 0x001, 0x000, 0x018, 0x000,
+ 0x000, 0x00E, 0x000, 0x000, 0x000, 0x001, 0x000, 0x000,
+ 0x000, 0x004, 0x000, 0x01B, 0x001, 0x000, 0x01C, 0x000,
+ 0x000, 0x007, 0x000, 0x000, 0x000, 0x00F, 0x000, 0x000,
+ 0x000, 0x00B, 0x000, 0x01D, 0x001, 0x000, 0x01E, 0x000,
+ 0x000, 0x008, 0x000, 0x000, 0x000, 0x002, 0x000, 0x000,
+ 0x000, 0x00C, 0x000, 0x000, 0x000, 0x006, 0x000, 0x000,
+ 0x000, 0x00D, 0x000, 0x000, 0x000, 0x003, 0x000, 0x000,
+ 0x000, 0x00A, 0x000, 0x000, 0x000, 0x009, 0x000, 0x004,
+ 0x078, 0x0C6, 0x0BC, 0x001, 0x094, 0x004, 0x093, 0x080,
+ 0x0C8, 0x040, 0x062, 0x0E9, 0x0DA, 0x01C, 0x02C, 0x015,
+ 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x058,
+ 0x00B, 0x0E9, 0x0E5, 0x0D5, 0x095, 0x0C1, 0x09D, 0x077,
+ 0x0CE, 0x0BB, 0x0A0, 0x06E, 0x01C, 0x005, 0x0F6, 0x077,
+ 0x0C6, 0x002, 0x0FA, 0x096, 0x070, 0x0E8, 0x01D, 0x0C0,
+ 0x017, 0x00E, 0x002, 0x0FA, 0x058, 0x07D, 0x0C0, 0x05F,
+ 0x072, 0x0CE, 0x0EC, 0x0A4, 0x0C3, 0x084, 0x090, 0x07A,
+ 0x030, 0x0CD, 0x08D, 0x079, 0x019, 0x0E7, 0x06C, 0x024,
+ 0x027, 0x09C, 0x008, 0x039, 0x007, 0x038, 0x0A8, 0x04A,
+ 0x04C, 0x0EA, 0x04D, 0x098, 0x09B, 0x024, 0x04C, 0x0C0,
+ 0x026, 0x0D3, 0x0E7, 0x054, 0x05A, 0x04D, 0x0F2, 0x04C,
+ 0x00C, 0x013, 0x023, 0x049, 0x090, 0x032, 0x06E, 0x0A4,
+ 0x0DF, 0x093, 0x071, 0x013, 0x077, 0x026, 0x0E1, 0x026,
+ 0x0F8, 0x026, 0x00C, 0x04C, 0x012, 0x026, 0x008, 0x009,
+ 0x082, 0x082, 0x060, 0x0A9, 0x030, 0x079, 0x036, 0x0B0,
+ 0x0B2, 0x0A8, 0x0A7, 0x072, 0x064, 0x08F, 0x09B, 0x033,
+ 0x033, 0x0F9, 0x0B8, 0x039, 0x0D5, 0x011, 0x073, 0x0AA,
+ 0x075, 0x026, 0x05D, 0x026, 0x051, 0x093, 0x02A, 0x049,
+ 0x094, 0x0C9, 0x095, 0x089, 0x0BC, 0x04D, 0x0C8, 0x09B,
+ 0x080, 0x09B, 0x0A0, 0x099, 0x006, 0x04C, 0x086, 0x026,
+ 0x058, 0x09B, 0x0A4, 0x09B, 0x099, 0x037, 0x062, 0x06C,
+ 0x067, 0x09B, 0x033, 0x030, 0x0BF, 0x036, 0x066, 0x061,
+ 0x0BF, 0x036, 0x0EC, 0x0C5, 0x0BD, 0x066, 0x082, 0x05A,
+ 0x050, 0x031, 0x0D5, 0x09D, 0x098, 0x018, 0x029, 0x03C,
+ 0x098, 0x086, 0x04C, 0x017, 0x026, 0x03E, 0x02C, 0x0B8,
+ 0x069, 0x03B, 0x049, 0x02E, 0x0B4, 0x008, 0x043, 0x01A,
+ 0x0A4, 0x0F9, 0x0B3, 0x051, 0x0F1, 0x010, 0x0F3, 0x043,
+ 0x0CD, 0x008, 0x06F, 0x063, 0x079, 0x0B3, 0x033, 0x00E,
+ 0x013, 0x098, 0x049, 0x098, 0x004, 0x0DA, 0x07C, 0x0E0,
+ 0x052, 0x079, 0x031, 0x00C, 0x098, 0x02E, 0x04D, 0x0AC,
+ 0x02C, 0x084, 0x014, 0x0EE, 0x04C, 0x0FE, 0x067, 0x05E,
+ 0x0E4, 0x09A, 0x075, 0x029, 0x0D7, 0x0A9, 0x035, 0x03A,
+ 0x094, 0x05B, 0x0D5, 0x09B, 0x058, 0x0B4, 0x0AF, 0x075,
+ 0x066, 0x0AF, 0x014, 0x0A9, 0x0EF, 0x040, 0x095, 0x025,
+ 0x008, 0x0B9, 0x0AD, 0x042, 0x0FC, 0x0D8, 0x0D9, 0x08C,
+ 0x033, 0x00E, 0x013, 0x098, 0x066, 0x01E, 0x045, 0x0AC,
+ 0x0B0, 0x00C, 0x042, 0x0D3, 0x0CC, 0x0A6, 0x012, 0x062,
+ 0x0DE, 0x0B4, 0x0B1, 0x080, 0x049, 0x07D, 0x0A2, 0x0DE,
+ 0x0B4, 0x018, 0x0C0, 0x024, 0x084, 0x0E6, 0x054, 0x0F5,
+ 0x083, 0x046, 0x001, 0x068, 0x01A, 0x063, 0x00C, 0x0C6,
+ 0x012, 0x064, 0x0FA, 0x04C, 0x035, 0x01C, 0x02C, 0x00E,
+ 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA,
+ 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AD, 0x0D7, 0x002,
+ 0x070, 0x0E0, 0x04C, 0x0F3, 0x0A1, 0x0C1, 0x0D5, 0x0C0,
+ 0x03C, 0x0B9, 0x069, 0x039, 0x060, 0x04E, 0x058, 0x077,
+ 0x002, 0x067, 0x093, 0x03C, 0x099, 0x0E4, 0x0CF, 0x038,
+ 0x01C, 0x097, 0x02E, 0x040, 0x01B, 0x090, 0x031, 0x046,
+ 0x0A3, 0x05E, 0x00E, 0x088, 0x034, 0x06A, 0x035, 0x0E0,
+ 0x0E8, 0x0AA, 0x035, 0x01A, 0x0A9, 0x0F5, 0x015, 0x046,
+ 0x0A3, 0x0EA, 0x07D, 0x04A, 0x0A3, 0x051, 0x0AA, 0x09F,
+ 0x070, 0x054, 0x0A6, 0x057, 0x02E, 0x0B4, 0x0CD, 0x0C8,
+ 0x0A3, 0x00C, 0x0C1, 0x0DA, 0x0C6, 0x0E1, 0x0CB, 0x07A,
+ 0x0D4, 0x01C, 0x068, 0x0FF, 0x0CF, 0x055, 0x0A8, 0x0C0,
+ 0x02D, 0x085, 0x011, 0x017, 0x044, 0x02A, 0x030, 0x00B,
+ 0x04A, 0x088, 0x0C2, 0x04D, 0x0B5, 0x020, 0x0D5, 0x026,
+ 0x001, 0x069, 0x051, 0x069, 0x052, 0x019, 0x052, 0x060,
+ 0x016, 0x095, 0x016, 0x082, 0x096, 0x054, 0x098, 0x005,
+ 0x0A5, 0x045, 0x0F3, 0x0DD, 0x06A, 0x0F9, 0x028, 0x018,
+ 0x0EF, 0x000, 0x030, 0x030, 0x051, 0x04E, 0x044, 0x05D,
+ 0x012, 0x0D1, 0x043, 0x0E6, 0x012, 0x06F, 0x09E, 0x0BA,
+ 0x0CC, 0x0DF, 0x025, 0x003, 0x01D, 0x0E0, 0x006, 0x006,
+ 0x00A, 0x030, 0x0CC, 0x0A9, 0x0EB, 0x02D, 0x000, 0x086,
+ 0x0A6, 0x012, 0x065, 0x04F, 0x056, 0x0D6, 0x065, 0x049,
+ 0x05F, 0x03D, 0x0E8, 0x037, 0x0C9, 0x040, 0x0C7, 0x078,
+ 0x001, 0x081, 0x082, 0x08C, 0x033, 0x018, 0x049, 0x080,
+ 0x0AE, 0x040, 0x0C5, 0x018, 0x005, 0x09C, 0x06D, 0x018,
+ 0x066, 0x00E, 0x0F3, 0x0A0, 0x0C6, 0x012, 0x062, 0x0DE,
+ 0x0F5, 0x004, 0x0B4, 0x0AC, 0x06B, 0x0C6, 0x019, 0x091,
+ 0x073, 0x005, 0x048, 0x02E, 0x072, 0x094, 0x080, 0x073,
+ 0x0A1, 0x0C8, 0x047, 0x036, 0x066, 0x064, 0x02F, 0x036,
+ 0x066, 0x064, 0x007, 0x099, 0x002, 0x091, 0x08E, 0x072,
+ 0x0D1, 0x00F, 0x09D, 0x006, 0x031, 0x073, 0x0A0, 0x0C3,
+ 0x051, 0x06A, 0x01A, 0x020, 0x0BF, 0x03A, 0x00C, 0x02C,
+ 0x073, 0x087, 0x043, 0x05E, 0x060, 0x002, 0x023, 0x0FC,
+ 0x0E0, 0x0D6, 0x035, 0x0EF, 0x09E, 0x0F5, 0x0EF, 0x092,
+ 0x081, 0x08E, 0x0F0, 0x003, 0x003, 0x005, 0x018, 0x066,
+ 0x045, 0x0CC, 0x00B, 0x048, 0x02E, 0x070, 0x00A, 0x040,
+ 0x039, 0x0D0, 0x0E4, 0x023, 0x09B, 0x033, 0x032, 0x017,
+ 0x09B, 0x033, 0x032, 0x003, 0x0CC, 0x085, 0x048, 0x0C7,
+ 0x038, 0x014, 0x0A5, 0x0CE, 0x029, 0x07E, 0x0D2, 0x080,
+ 0x0A1, 0x0A8, 0x0B4, 0x048, 0x088, 0x02F, 0x0CE, 0x083,
+ 0x00B, 0x01C, 0x0E1, 0x0D0, 0x0D7, 0x098, 0x004, 0x088,
+ 0x087, 0x0CE, 0x096, 0x031, 0x073, 0x0A5, 0x08F, 0x0F3,
+ 0x083, 0x058, 0x0D7, 0x0BE, 0x07B, 0x082, 0x0AF, 0x092,
+ 0x081, 0x08E, 0x0F0, 0x003, 0x003, 0x005, 0x018, 0x066,
+ 0x045, 0x0CC, 0x015, 0x020, 0x0B9, 0x0C8, 0x029, 0x000,
+ 0x0E7, 0x043, 0x090, 0x08E, 0x06C, 0x0CC, 0x0C8, 0x05E,
+ 0x06C, 0x0CC, 0x0C8, 0x00F, 0x032, 0x005, 0x023, 0x01C,
+ 0x0E4, 0x050, 0x0D4, 0x05A, 0x017, 0x088, 0x02F, 0x0CE,
+ 0x083, 0x010, 0x0F9, 0x0D0, 0x023, 0x017, 0x03A, 0x004,
+ 0x035, 0x0E6, 0x000, 0x022, 0x016, 0x039, 0x0C3, 0x0A3,
+ 0x0FC, 0x0E0, 0x0D6, 0x035, 0x0E0, 0x0BF, 0x0F4, 0x018,
+ 0x0F2, 0x02D, 0x04D, 0x043, 0x051, 0x06E, 0x05A, 0x022,
+ 0x01F, 0x030, 0x0D4, 0x017, 0x0E7, 0x041, 0x091, 0x073,
+ 0x005, 0x048, 0x02E, 0x077, 0x069, 0x000, 0x0E7, 0x043,
+ 0x090, 0x08E, 0x06C, 0x0CC, 0x0C8, 0x05E, 0x06C, 0x0CC,
+ 0x0C8, 0x00F, 0x032, 0x005, 0x023, 0x01C, 0x0EF, 0x04C,
+ 0x04E, 0x006, 0x004, 0x0C9, 0x09E, 0x00B, 0x0FF, 0x041,
+ 0x08F, 0x022, 0x0D4, 0x0D4, 0x035, 0x016, 0x0E5, 0x0A2,
+ 0x021, 0x0F3, 0x05A, 0x082, 0x0FC, 0x0E8, 0x032, 0x02E,
+ 0x060, 0x0A9, 0x005, 0x0CE, 0x013, 0x048, 0x007, 0x03A,
+ 0x01C, 0x084, 0x073, 0x066, 0x066, 0x042, 0x0F3, 0x066,
+ 0x066, 0x040, 0x079, 0x090, 0x029, 0x018, 0x0E7, 0x00A,
+ 0x098, 0x09C, 0x00A, 0x09E, 0x0B5, 0x012, 0x05C, 0x07C,
+ 0x0C3, 0x031, 0x08B, 0x098, 0x02A, 0x07C, 0x0D3, 0x0ED,
+ 0x038, 0x0E9, 0x0D3, 0x04E, 0x074, 0x0ED, 0x049, 0x09E,
+ 0x00B, 0x0FF, 0x041, 0x08F, 0x022, 0x0D4, 0x0D4, 0x035,
+ 0x016, 0x0E5, 0x0A2, 0x02D, 0x0EB, 0x045, 0x033, 0x08F,
+ 0x0FC, 0x0F7, 0x0A0, 0x05F, 0x025, 0x003, 0x01D, 0x0E4,
+ 0x00E, 0x006, 0x00A, 0x030, 0x0CC, 0x00C, 0x0F3, 0x0EB,
+ 0x040, 0x0DE, 0x061, 0x0A8, 0x070, 0x092, 0x00A, 0x000,
+ 0x0E1, 0x024, 0x01E, 0x000, 0x0E1, 0x024, 0x01E, 0x000,
+ 0x0E1, 0x024, 0x01E, 0x000, 0x0E1, 0x024, 0x01E, 0x000,
+ 0x0E1, 0x024, 0x01E, 0x001, 0x00F, 0x098, 0x02A, 0x00B,
+ 0x0F3, 0x0A0, 0x0C8, 0x0B9, 0x0A2, 0x0A4, 0x017, 0x03A,
+ 0x069, 0x000, 0x0E7, 0x043, 0x090, 0x08E, 0x075, 0x048,
+ 0x05E, 0x070, 0x069, 0x001, 0x0E6, 0x000, 0x052, 0x031,
+ 0x0CC, 0x018, 0x014, 0x0A5, 0x0CC, 0x009, 0x082, 0x094,
+ 0x073, 0x00C, 0x0A0, 0x091, 0x0F5, 0x025, 0x0CC, 0x007,
+ 0x006, 0x084, 0x084, 0x09F, 0x030, 0x0A2, 0x0A4, 0x07D,
+ 0x050, 0x075, 0x0A6, 0x065, 0x001, 0x04A, 0x08E, 0x0B4,
+ 0x0CC, 0x0C4, 0x035, 0x054, 0x075, 0x066, 0x0A4, 0x097,
+ 0x07A, 0x089, 0x050, 0x053, 0x013, 0x080, 0x019, 0x0E3,
+ 0x049, 0x05C, 0x06D, 0x0CE, 0x0A9, 0x040, 0x035, 0x006,
+ 0x078, 0x0D2, 0x057, 0x006, 0x0F1, 0x0B3, 0x02A, 0x08D,
+ 0x097, 0x023, 0x062, 0x092, 0x05D, 0x069, 0x099, 0x01C,
+ 0x06A, 0x036, 0x0E6, 0x0CD, 0x046, 0x012, 0x06F, 0x09E,
+ 0x0E1, 0x0AB, 0x0E4, 0x0A3, 0x00C, 0x0C0, 0x0DE, 0x0AC,
+ 0x0D4, 0x00D, 0x028, 0x01B, 0x0D0, 0x012, 0x0A5, 0x000,
+ 0x0F8, 0x04B, 0x0AD, 0x033, 0x028, 0x006, 0x0A0, 0x0DE,
+ 0x014, 0x097, 0x03A, 0x089, 0x05D, 0x0C0, 0x00D, 0x0E3,
+ 0x006, 0x090, 0x092, 0x05D, 0x069, 0x098, 0x066, 0x0B9,
+ 0x019, 0x095, 0x0E4, 0x0A8, 0x0CF, 0x09D, 0x033, 0x018,
+ 0x049, 0x0BE, 0x07B, 0x086, 0x0AF, 0x092, 0x08C, 0x033,
+ 0x024, 0x014, 0x00C, 0x0F4, 0x083, 0x024, 0x021, 0x0C2,
+ 0x070, 0x0BF, 0x0F4, 0x018, 0x0F2, 0x02D, 0x04D, 0x043,
+ 0x051, 0x06E, 0x05A, 0x022, 0x01F, 0x032, 0x0A8, 0x02F,
+ 0x0CE, 0x083, 0x022, 0x0E6, 0x005, 0x0A4, 0x017, 0x03A,
+ 0x069, 0x000, 0x0E7, 0x043, 0x090, 0x08E, 0x075, 0x048,
+ 0x05E, 0x070, 0x069, 0x001, 0x0E6, 0x042, 0x0A4, 0x063,
+ 0x098, 0x002, 0x029, 0x04B, 0x09A, 0x029, 0x078, 0x0E9,
+ 0x040, 0x053, 0x013, 0x081, 0x081, 0x032, 0x067, 0x082,
+ 0x0FF, 0x0D0, 0x063, 0x0C8, 0x0B5, 0x035, 0x00D, 0x045,
+ 0x0AE, 0x050, 0x008, 0x07C, 0x0E0, 0x0D0, 0x05F, 0x09D,
+ 0x006, 0x045, 0x0CC, 0x001, 0x0A4, 0x017, 0x03A, 0x069,
+ 0x000, 0x0E7, 0x043, 0x090, 0x08E, 0x075, 0x048, 0x05E,
+ 0x070, 0x069, 0x001, 0x0E6, 0x059, 0x0A4, 0x063, 0x098,
+ 0x01C, 0x052, 0x097, 0x03B, 0x030, 0x052, 0x08E, 0x07D,
+ 0x02A, 0x009, 0x01F, 0x051, 0x0EB, 0x0A4, 0x0A4, 0x00A,
+ 0x0B9, 0x094, 0x087, 0x0AE, 0x0C5, 0x031, 0x038, 0x002,
+ 0x0FF, 0x0D0, 0x063, 0x0C8, 0x0B5, 0x035, 0x00D, 0x045,
+ 0x0AE, 0x050, 0x008, 0x07C, 0x0EA, 0x020, 0x0BF, 0x03A,
+ 0x00C, 0x08B, 0x09A, 0x016, 0x090, 0x05C, 0x0E9, 0x0A4,
+ 0x003, 0x09D, 0x00E, 0x042, 0x039, 0x0D5, 0x021, 0x079,
+ 0x095, 0x048, 0x00F, 0x030, 0x00A, 0x091, 0x08E, 0x060,
+ 0x0EB, 0x029, 0x073, 0x000, 0x009, 0x054, 0x004, 0x0CA,
+ 0x082, 0x065, 0x052, 0x065, 0x0E4, 0x0CA, 0x022, 0x065,
+ 0x072, 0x065, 0x009, 0x032, 0x0E0, 0x099, 0x072, 0x04C,
+ 0x0C4, 0x0E0, 0x00B, 0x0FF, 0x041, 0x08F, 0x022, 0x0D4,
+ 0x0D4, 0x035, 0x016, 0x0B9, 0x040, 0x021, 0x0F3, 0x08A,
+ 0x082, 0x0FC, 0x0E8, 0x032, 0x02E, 0x060, 0x0A9, 0x005,
+ 0x0CE, 0x09A, 0x040, 0x039, 0x0D0, 0x0E4, 0x023, 0x09D,
+ 0x052, 0x017, 0x099, 0x054, 0x061, 0x099, 0x001, 0x0E6,
+ 0x040, 0x0A4, 0x063, 0x098, 0x004, 0x0B1, 0x084, 0x098,
+ 0x018, 0x0EF, 0x02D, 0x003, 0x005, 0x031, 0x038, 0x002,
+ 0x0FF, 0x0D0, 0x063, 0x0C8, 0x0B5, 0x035, 0x00D, 0x045,
+ 0x0B9, 0x068, 0x088, 0x07C, 0x0E0, 0x050, 0x05F, 0x09D,
+ 0x006, 0x045, 0x0CC, 0x081, 0x048, 0x02E, 0x071, 0x034,
+ 0x08F, 0x048, 0x001, 0x048, 0x015, 0x021, 0x005, 0x021,
+ 0x0E9, 0x00A, 0x052, 0x003, 0x0CE, 0x05A, 0x046, 0x039,
+ 0x0CF, 0x047, 0x08E, 0x060, 0x0AB, 0x01A, 0x0F3, 0x053,
+ 0x043, 0x0EB, 0x035, 0x024, 0x0B8, 0x01B, 0x030, 0x007,
+ 0x009, 0x08A, 0x074, 0x02F, 0x07E, 0x041, 0x074, 0x01E,
+ 0x01D, 0x00D, 0x087, 0x046, 0x049, 0x0D5, 0x095, 0x0D1,
+ 0x0D5, 0x0D5, 0x0BB, 0x0A9, 0x04E, 0x082, 0x09D, 0x005,
+ 0x03A, 0x00A, 0x074, 0x014, 0x0E8, 0x029, 0x0D0, 0x042,
+ 0x074, 0x05B, 0x0CE, 0x050, 0x0C4, 0x007, 0x045, 0x0BC,
+ 0x0E2, 0x00C, 0x040, 0x074, 0x05B, 0x0CE, 0x083, 0x004,
+ 0x0F9, 0x095, 0x04D, 0x013, 0x063, 0x05E, 0x06F, 0x031,
+ 0x03B, 0x0A0, 0x08B, 0x0A2, 0x0C5, 0x039, 0x08D, 0x078,
+ 0x03A, 0x022, 0x0A0, 0x000, 0x06B, 0x0C1, 0x0D1, 0x054,
+ 0x060, 0x016, 0x0D9, 0x091, 0x0A2, 0x0E7, 0x043, 0x08C,
+ 0x024, 0x0DC, 0x01C, 0x0E0, 0x051, 0x017, 0x039, 0x06B,
+ 0x03B, 0x0CC, 0x04B, 0x042, 0x02E, 0x06B, 0x050, 0x0BF,
+ 0x036, 0x036, 0x065, 0x04F, 0x07A, 0x018, 0x055, 0x025,
+ 0x078, 0x098, 0x023, 0x0E7, 0x050, 0x03E, 0x0F3, 0x081,
+ 0x04C, 0x002, 0x06D, 0x03E, 0x071, 0x053, 0x0AF, 0x078,
+ 0x0A9, 0x0D4, 0x0A6, 0x029, 0x0B1, 0x0BC, 0x0D9, 0x099,
+ 0x0B2, 0x08E, 0x062, 0x08F, 0x022, 0x02E, 0x075, 0x016,
+ 0x0B0, 0x0B2, 0x0AB, 0x023, 0x028, 0x016, 0x054, 0x052,
+ 0x031, 0x0BC, 0x0D9, 0x099, 0x0B2, 0x08E, 0x066, 0x019,
+ 0x002, 0x02E, 0x075, 0x016, 0x050, 0x02C, 0x0A9, 0x0C8,
+ 0x0C6, 0x0F5, 0x020, 0x0D3, 0x0E4, 0x07F, 0x04F, 0x09C,
+ 0x00A, 0x0D6, 0x016, 0x07F, 0x090, 0x0EE, 0x04C, 0x0EB,
+ 0x0CF, 0x0E2, 0x088, 0x0BA, 0x02F, 0x042, 0x086, 0x0AE,
+ 0x0BD, 0x0E5, 0x0A7, 0x052, 0x09F, 0x093, 0x063, 0x079,
+ 0x0EB, 0x033, 0x008, 0x0F9, 0x094, 0x052, 0x047, 0x0CD,
+ 0x099, 0x025, 0x06F, 0x03A, 0x00C, 0x013, 0x0E6, 0x055,
+ 0x034, 0x04C, 0x05A, 0x04D, 0x0B5, 0x023, 0x095, 0x0A5,
+ 0x048, 0x011, 0x05A, 0x00A, 0x043, 0x095, 0x0AC, 0x02C,
+ 0x0BA, 0x024, 0x005, 0x049, 0x0B1, 0x0BC, 0x0CA, 0x0A7,
+ 0x072, 0x06C, 0x06B, 0x0C5, 0x0BD, 0x0E8, 0x031, 0x069,
+ 0x052, 0x05D, 0x006, 0x012, 0x065, 0x03E, 0x0B1, 0x050,
+ 0x04C, 0x07D, 0x04F, 0x0AC, 0x00A, 0x030, 0x00B, 0x036,
+ 0x064, 0x011, 0x073, 0x08A, 0x083, 0x08E, 0x075, 0x012,
+ 0x09F, 0x07B, 0x0D2, 0x099, 0x058, 0x0EE, 0x082, 0x02E,
+ 0x077, 0x0A0, 0x0E3, 0x09D, 0x05D, 0x04F, 0x0BC, 0x02A,
+ 0x053, 0x029, 0x053, 0x0DE, 0x093, 0x024, 0x0BA, 0x0B3,
+ 0x036, 0x0AA, 0x04A, 0x0C6, 0x079, 0x0D4, 0x0B9, 0x0DE,
+ 0x062, 0x05A, 0x011, 0x073, 0x050, 0x050, 0x0BF, 0x037,
+ 0x036, 0x06F, 0x013, 0x023, 0x0BA, 0x00C, 0x024, 0x0CE,
+ 0x0BD, 0x0E2, 0x0A7, 0x052, 0x0B2, 0x08E, 0x06B, 0x060,
+ 0x062, 0x02E, 0x075, 0x013, 0x030, 0x0AC, 0x0A0, 0x059,
+ 0x0CA, 0x064, 0x063, 0x079, 0x0B3, 0x033, 0x065, 0x01C,
+ 0x0CC, 0x032, 0x004, 0x05C, 0x0EA, 0x02C, 0x0A0, 0x059,
+ 0x0DF, 0x023, 0x01B, 0x0D4, 0x083, 0x052, 0x047, 0x0DD,
+ 0x079, 0x096, 0x0D4, 0x09E, 0x0B3, 0x052, 0x04B, 0x0A2,
+ 0x05A, 0x01A, 0x08D, 0x05D, 0x07B, 0x082, 0x0A7, 0x052,
+ 0x0B2, 0x08E, 0x066, 0x019, 0x002, 0x02E, 0x075, 0x016,
+ 0x050, 0x02C, 0x08C, 0x032, 0x01D, 0x07B, 0x08E, 0x0A7,
+ 0x052, 0x0B1, 0x0BC, 0x0D9, 0x099, 0x098, 0x004, 0x0DA,
+ 0x07C, 0x0E2, 0x0AC, 0x0FE, 0x066, 0x019, 0x002, 0x02E,
+ 0x065, 0x050, 0x0BF, 0x033, 0x066, 0x064, 0x0FE, 0x074,
+ 0x018, 0x086, 0x04C, 0x017, 0x026, 0x0D6, 0x016, 0x052,
+ 0x039, 0x018, 0x0DE, 0x07A, 0x0CC, 0x0C2, 0x03E, 0x065,
+ 0x014, 0x091, 0x0F3, 0x066, 0x049, 0x008, 0x06E, 0x083,
+ 0x009, 0x033, 0x0AF, 0x031, 0x0ED, 0x00D, 0x09D, 0x006,
+ 0x012, 0x062, 0x02A, 0x031, 0x08D, 0x06D, 0x0E7, 0x041,
+ 0x082, 0x07C, 0x0CA, 0x0A6, 0x089, 0x087, 0x009, 0x02E,
+ 0x029, 0x0B1, 0x0AF, 0x010, 0x039, 0x0D6, 0x064, 0x097,
+ 0x030, 0x01D, 0x042, 0x075, 0x093, 0x044, 0x002, 0x08C,
+ 0x024, 0x0D2, 0x07A, 0x0B3, 0x050, 0x0F6, 0x089, 0x005,
+ 0x043, 0x05E, 0x061, 0x098, 0x0C0, 0x02C, 0x092, 0x025,
+ 0x03C, 0x08B, 0x024, 0x089, 0x049, 0x005, 0x049, 0x0E7,
+ 0x00C, 0x0B9, 0x084, 0x098, 0x0B7, 0x0AD, 0x033, 0x044,
+ 0x0AE, 0x05A, 0x051, 0x086, 0x060, 0x09F, 0x038, 0x0A9,
+ 0x0A2, 0x06C, 0x06B, 0x0C4, 0x08E, 0x0F4, 0x05E, 0x049,
+ 0x046, 0x012, 0x062, 0x0DE, 0x0B4, 0x0CD, 0x021, 0x05C,
+ 0x0B4, 0x0A3, 0x00C, 0x0C1, 0x03E, 0x072, 0x029, 0x0A2,
+ 0x06C, 0x06B, 0x0C6, 0x012, 0x062, 0x047, 0x0F0, 0x0E8,
+ 0x0C3, 0x032, 0x004, 0x035, 0x040, 0x092, 0x0A4, 0x082,
+ 0x088, 0x010, 0x092, 0x07C, 0x0CB, 0x0D4, 0x02F, 0x0A4,
+ 0x002, 0x011, 0x084, 0x098, 0x0B7, 0x0AD, 0x033, 0x044,
+ 0x0AE, 0x05A, 0x051, 0x086, 0x060, 0x09F, 0x038, 0x0A9,
+ 0x0A2, 0x06C, 0x06B, 0x0C4, 0x08E, 0x0F4, 0x05E, 0x049,
+ 0x044, 0x008, 0x049, 0x03E, 0x065, 0x0EA, 0x017, 0x0D2,
+ 0x001, 0x008, 0x0C2, 0x04C, 0x05B, 0x0D6, 0x099, 0x0A4,
+ 0x02B, 0x096, 0x094, 0x061, 0x098, 0x027, 0x0CE, 0x045,
+ 0x034, 0x04D, 0x08D, 0x078, 0x081, 0x009, 0x027, 0x0CC,
+ 0x0BD, 0x012, 0x028, 0x06C, 0x058, 0x0AF, 0x0B6, 0x0F3,
+ 0x0A0, 0x0C1, 0x03E, 0x065, 0x053, 0x044, 0x0D8, 0x0D7,
+ 0x092, 0x08E, 0x07D, 0x04B, 0x0C2, 0x0FA, 0x061, 0x026,
+ 0x006, 0x03A, 0x0B3, 0x06B, 0x003, 0x005, 0x049, 0x0E7,
+ 0x00C, 0x0B9, 0x06F, 0x05A, 0x066, 0x095, 0x05C, 0x0B4,
+ 0x0A3, 0x00C, 0x0C1, 0x03E, 0x070, 0x029, 0x0A2, 0x06E,
+ 0x0A4, 0x0DF, 0x093, 0x071, 0x013, 0x077, 0x026, 0x0E1,
+ 0x026, 0x0F8, 0x026, 0x0C6, 0x0BC, 0x094, 0x073, 0x0F9,
+ 0x02F, 0x00B, 0x0E9, 0x084, 0x098, 0x018, 0x0EA, 0x0CC,
+ 0x0EC, 0x00C, 0x015, 0x027, 0x09C, 0x032, 0x0FF, 0x03D,
+ 0x056, 0x0AF, 0x092, 0x08B, 0x07A, 0x0D3, 0x035, 0x0D5,
+ 0x0CB, 0x04A, 0x030, 0x0CC, 0x013, 0x0E7, 0x002, 0x09A,
+ 0x026, 0x0C6, 0x0BC, 0x094, 0x073, 0x041, 0x097, 0x091,
+ 0x0F4, 0x083, 0x0CE, 0x004, 0x020, 0x062, 0x08B, 0x005,
+ 0x016, 0x049, 0x08C, 0x024, 0x0C0, 0x0C7, 0x056, 0x090,
+ 0x0C0, 0x0C1, 0x052, 0x079, 0x0C3, 0x02E, 0x05B, 0x0D5,
+ 0x0A6, 0x072, 0x0D2, 0x094, 0x0FA, 0x0AD, 0x058, 0x0C8,
+ 0x0FA, 0x09F, 0x054, 0x0B3, 0x032, 0x04B, 0x0B9, 0x054,
+ 0x0A6, 0x051, 0x086, 0x06B, 0x079, 0x0D0, 0x060, 0x09F,
+ 0x032, 0x005, 0x034, 0x04D, 0x08D, 0x07A, 0x04D, 0x01E,
+ 0x07A, 0x0B3, 0x051, 0x000, 0x0A9, 0x03D, 0x059, 0x0A8,
+ 0x07B, 0x044, 0x082, 0x0A1, 0x0AF, 0x04A, 0x08D, 0x052,
+ 0x0A9, 0x052, 0x041, 0x049, 0x04F, 0x03A, 0x02E, 0x040,
+ 0x0A4, 0x099, 0x050, 0x0BE, 0x090, 0x008, 0x052, 0x079,
+ 0x0C3, 0x02E, 0x061, 0x026, 0x02D, 0x0EB, 0x04C, 0x0D0,
+ 0x015, 0x0CB, 0x04A, 0x030, 0x0CC, 0x013, 0x0E7, 0x002,
+ 0x09A, 0x026, 0x0C6, 0x0BC, 0x048, 0x0FE, 0x01D, 0x025,
+ 0x046, 0x0A9, 0x054, 0x0A9, 0x020, 0x0A4, 0x0A7, 0x09D,
+ 0x017, 0x020, 0x052, 0x04C, 0x0A8, 0x05F, 0x048, 0x004,
+ 0x023, 0x009, 0x031, 0x06F, 0x05A, 0x066, 0x080, 0x0AE,
+ 0x05A, 0x051, 0x086, 0x060, 0x09F, 0x038, 0x014, 0x0D1,
+ 0x036, 0x035, 0x0E4, 0x0A7, 0x09D, 0x017, 0x020, 0x052,
+ 0x04C, 0x0A2, 0x045, 0x00D, 0x08B, 0x015, 0x0F4, 0x091,
+ 0x0DE, 0x08B, 0x0C9, 0x028, 0x0C2, 0x04C, 0x05B, 0x0D6,
+ 0x099, 0x0A9, 0x05C, 0x0B4, 0x0A3, 0x00C, 0x0D6, 0x0F3,
+ 0x0A0, 0x0C1, 0x03E, 0x064, 0x00A, 0x068, 0x09B, 0x01A,
+ 0x0F1, 0x06D, 0x04C, 0x0AA, 0x092, 0x0E0, 0x036, 0x094,
+ 0x070, 0x09B, 0x029, 0x078, 0x013, 0x0AE, 0x0B3, 0x0AA,
+ 0x085, 0x0D4, 0x043, 0x075, 0x009, 0x03A, 0x0C9, 0x0EB,
+ 0x035, 0x024, 0x0B8, 0x01B, 0x032, 0x08E, 0x013, 0x048,
+ 0x07E, 0x04E, 0x0FD, 0x040, 0x0FD, 0x040, 0x0FD, 0x040,
+ 0x0FD, 0x040, 0x0FD, 0x040, 0x0FC, 0x013, 0x0F4, 0x021,
+ 0x0F9, 0x017, 0x045, 0x08A, 0x030, 0x00B, 0x033, 0x05F,
+ 0x083, 0x0A2, 0x02A, 0x030, 0x00B, 0x033, 0x05F, 0x083,
+ 0x0A2, 0x0A8, 0x0C0, 0x02D, 0x0B3, 0x020, 0x070, 0x092,
+ 0x013, 0x09A, 0x0DE, 0x074, 0x018, 0x027, 0x0CC, 0x0AA,
+ 0x068, 0x09B, 0x01A, 0x0F7, 0x007, 0x045, 0x051, 0x080,
+ 0x05B, 0x066, 0x047, 0x007, 0x038, 0x0A8, 0x023, 0x0E7,
+ 0x051, 0x011, 0x03F, 0x0E0, 0x0E8, 0x085, 0x046, 0x001,
+ 0x06D, 0x099, 0x006, 0x012, 0x065, 0x04F, 0x07A, 0x020,
+ 0x024, 0x0BA, 0x0B3, 0x032, 0x015, 0x025, 0x07B, 0x0AD,
+ 0x033, 0x078, 0x0AE, 0x00E, 0x073, 0x0D0, 0x047, 0x0CE,
+ 0x0A7, 0x030, 0x0CC, 0x044, 0x0FF, 0x083, 0x0A2, 0x0A8,
+ 0x0C0, 0x02C, 0x0D9, 0x091, 0x0C1, 0x0D1, 0x015, 0x018,
+ 0x005, 0x09B, 0x032, 0x008, 0x0BA, 0x02C, 0x051, 0x080,
+ 0x059, 0x0B3, 0x020, 0x070, 0x092, 0x0E2, 0x098, 0x089,
+ 0x0FD, 0x0BC, 0x0EE, 0x018, 0x090, 0x0FC, 0x08B, 0x0A2,
+ 0x0C5, 0x02B, 0x00D, 0x078, 0x03A, 0x022, 0x0A5, 0x061,
+ 0x0AF, 0x007, 0x045, 0x051, 0x080, 0x05B, 0x066, 0x044,
+ 0x09E, 0x0B3, 0x052, 0x04B, 0x083, 0x0AD, 0x0C7, 0x009,
+ 0x0BE, 0x01F, 0x09F, 0x074, 0x065, 0x05D, 0x00A, 0x017,
+ 0x07C, 0x0AB, 0x0A0, 0x0C2, 0x04C, 0x038, 0x049, 0x012,
+ 0x02E, 0x038, 0x049, 0x007, 0x0A3, 0x00C, 0x0C1, 0x03E,
+ 0x065, 0x053, 0x044, 0x0D8, 0x0D7, 0x0AD, 0x0E7, 0x000,
+ 0x032, 0x04B, 0x09B, 0x033, 0x034, 0x04A, 0x003, 0x000,
+ 0x09D, 0x025, 0x0CE, 0x083, 0x024, 0x0B8, 0x019, 0x099,
+ 0x08C, 0x002, 0x012, 0x04B, 0x0A1, 0x099, 0x0D8, 0x0C0,
+ 0x027, 0x049, 0x073, 0x0CF, 0x0F9, 0x03C, 0x0F4, 0x07C,
+ 0x0E7, 0x098, 0x004, 0x0E9, 0x02E, 0x07F, 0x039, 0x0E3,
+ 0x04F, 0x046, 0x053, 0x0C0, 0x060, 0x013, 0x0A4, 0x0B9,
+ 0x0E5, 0x03C, 0x003, 0x0DE, 0x08F, 0x09C, 0x0F3, 0x000,
+ 0x09C, 0x06F, 0x0CF, 0x03E, 0x085, 0x0F9, 0x0A3, 0x036,
+ 0x002, 0x01E, 0x060, 0x038, 0x092, 0x03E, 0x063, 0x01A,
+ 0x010, 0x09F, 0x0CF, 0x018, 0x010, 0x092, 0x0BC, 0x0D0,
+ 0x0A4, 0x00C, 0x0DC, 0x0C0, 0x00F, 0x09C, 0x097, 0x034,
+ 0x062, 0x0B6, 0x0E7, 0x0F3, 0x0F3, 0x0A5, 0x0CF, 0x018,
+ 0x042, 0x034, 0x01C, 0x0C2, 0x0CA, 0x0FA, 0x08E, 0x068,
+ 0x052, 0x006, 0x0AF, 0x03C, 0x0A3, 0x00D, 0x0BF, 0x09E,
+ 0x050, 0x0E1, 0x0D1, 0x073, 0x0CA, 0x0E0, 0x03A, 0x0FC,
+ 0x0C1, 0x009, 0x01A, 0x01E, 0x06A, 0x05C, 0x05B, 0x08E,
+ 0x063, 0x04E, 0x077, 0x073, 0x0CC, 0x061, 0x067, 0x0DD,
+ 0x0E6, 0x06C, 0x048, 0x0D1, 0x0F3, 0x01B, 0x024, 0x069,
+ 0x051, 0x008, 0x0D4, 0x042, 0x01B, 0x0F4, 0x067, 0x0D1,
+ 0x080, 0x04E, 0x02F, 0x0D0, 0x08C, 0x0D8, 0x030, 0x009,
+ 0x0C2, 0x01E, 0x080, 0x01C, 0x046, 0x001, 0x03A, 0x047,
+ 0x0D0, 0x031, 0x0A1, 0x006, 0x001, 0x03A, 0x07F, 0x046,
+ 0x030, 0x021, 0x018, 0x004, 0x0E9, 0x05E, 0x084, 0x029,
+ 0x000, 0x0C0, 0x027, 0x0CD, 0x0D0, 0x000, 0x07C, 0x098,
+ 0x004, 0x0F9, 0x02E, 0x084, 0x062, 0x08C, 0x002, 0x07D,
+ 0x0BA, 0x03E, 0x07E, 0x04C, 0x002, 0x07D, 0x02E, 0x08C,
+ 0x061, 0x008, 0x030, 0x009, 0x0F4, 0x01D, 0x001, 0x065,
+ 0x073, 0x000, 0x09F, 0x051, 0x0D0, 0x085, 0x020, 0x018,
+ 0x004, 0x0FA, 0x0BD, 0x019, 0x046, 0x018, 0x0C0, 0x027,
+ 0x0DF, 0x0D1, 0x094, 0x038, 0x04C, 0x002, 0x07D, 0x017,
+ 0x046, 0x057, 0x001, 0x030, 0x009, 0x0F5, 0x0FA, 0x001,
+ 0x009, 0x006, 0x001, 0x03E, 0x087, 0x0A1, 0x04B, 0x088,
+ 0x0C0, 0x027, 0x0DC, 0x074, 0x00D, 0x039, 0x0D3, 0x000,
+ 0x09F, 0x073, 0x0D0, 0x030, 0x0B3, 0x098, 0x004, 0x0FB,
+ 0x0BD, 0x006, 0x0C4, 0x083, 0x000, 0x09F, 0x047, 0x0D0,
+ 0x036, 0x048, 0x0CC, 0x002, 0x071, 0x0BF, 0x03F, 0x09A,
+ 0x017, 0x0E6, 0x03F, 0x008, 0x021, 0x0E6, 0x092, 0x0A4,
+ 0x08F, 0x09A, 0x010, 0x031, 0x0A7, 0x0F3, 0x010, 0x0B1,
+ 0x084, 0x0AF, 0x03A, 0x0AC, 0x0DC, 0x0F7, 0x073, 0x0F2,
+ 0x05C, 0x0C6, 0x02A, 0x0DB, 0x09E, 0x07E, 0x07E, 0x097,
+ 0x031, 0x008, 0x063, 0x0D0, 0x073, 0x07B, 0x043, 0x0A8,
+ 0x0E6, 0x03D, 0x034, 0x0EA, 0x0F3, 0x0E3, 0x015, 0x0BF,
+ 0x09F, 0x018, 0x05F, 0x045, 0x0CF, 0x0E8, 0x09F, 0x05F,
+ 0x09A, 0x05B, 0x003, 0x0D0, 0x0F3, 0x0D3, 0x0CE, 0x037,
+ 0x01C, 0x0D0, 0x00F, 0x0BB, 0x09E, 0x068, 0x078, 0x03B,
+ 0x0BC, 0x0CA, 0x031, 0x0E8, 0x0F9, 0x0A2, 0x002, 0x012,
+ 0x0A2, 0x073, 0x051, 0x008, 0x06F, 0x0D1, 0x0F3, 0x046,
+ 0x001, 0x038, 0x0BF, 0x040, 0x0FC, 0x023, 0x000, 0x09C,
+ 0x021, 0x0E8, 0x049, 0x051, 0x080, 0x04E, 0x091, 0x0F4,
+ 0x021, 0x003, 0x019, 0x080, 0x04E, 0x09F, 0x0D0, 0x021,
+ 0x063, 0x006, 0x001, 0x03A, 0x056, 0x08C, 0x002, 0x074,
+ 0x0FE, 0x075, 0x049, 0x05E, 0x063, 0x0D3, 0x04A, 0x054,
+ 0x042, 0x035, 0x013, 0x0A7, 0x0D1, 0x080, 0x04E, 0x095,
+ 0x0E8, 0x01E, 0x09A, 0x04C, 0x002, 0x07C, 0x0DD, 0x01B,
+ 0x0B9, 0x0E6, 0x001, 0x03E, 0x04B, 0x0A0, 0x062, 0x0A3,
+ 0x000, 0x09F, 0x06E, 0x08C, 0x0FC, 0x0F3, 0x000, 0x09F,
+ 0x04B, 0x0A0, 0x042, 0x018, 0x0CC, 0x002, 0x07D, 0x007,
+ 0x043, 0x0DA, 0x013, 0x000, 0x09F, 0x051, 0x0D0, 0x03D,
+ 0x034, 0x098, 0x004, 0x0FA, 0x0BD, 0x01C, 0x062, 0x08C,
+ 0x002, 0x07D, 0x0FD, 0x01C, 0x061, 0x073, 0x000, 0x09F,
+ 0x045, 0x0D1, 0x0F4, 0x04E, 0x060, 0x013, 0x0EB, 0x0F4,
+ 0x025, 0x0B0, 0x033, 0x000, 0x09F, 0x043, 0x0D1, 0x0A7,
+ 0x09C, 0x018, 0x004, 0x0FB, 0x08E, 0x084, 0x003, 0x0E9,
+ 0x080, 0x04F, 0x0B9, 0x0E8, 0x043, 0x0C1, 0x030, 0x009,
+ 0x0F7, 0x07A, 0x00A, 0x031, 0x098, 0x004, 0x0FA, 0x03E,
+ 0x084, 0x040, 0x041, 0x080, 0x04E, 0x082, 0x0E7, 0x041,
+ 0x087, 0x009, 0x023, 0x004, 0x023, 0x000, 0x09D, 0x005,
+ 0x0CE, 0x096, 0x01C, 0x024, 0x08C, 0x010, 0x08C, 0x002,
+ 0x074, 0x017, 0x03A, 0x004, 0x038, 0x049, 0x018, 0x021,
+ 0x018, 0x004, 0x0E8, 0x02E, 0x074, 0x050, 0x0E1, 0x024,
+ 0x060, 0x084, 0x060, 0x013, 0x0A0, 0x0B9, 0x0D4, 0x011,
+ 0x0C2, 0x048, 0x0C1, 0x008, 0x0C0, 0x027, 0x041, 0x073,
+ 0x0A8, 0x023, 0x084, 0x091, 0x082, 0x011, 0x080, 0x04E,
+ 0x082, 0x0E7, 0x052, 0x08E, 0x012, 0x046, 0x008, 0x046,
+ 0x001, 0x03A, 0x00B, 0x09D, 0x040, 0x01C, 0x024, 0x08C,
+ 0x010, 0x08C, 0x002, 0x074, 0x017, 0x03A, 0x009, 0x00E,
+ 0x012, 0x046, 0x008, 0x046, 0x001, 0x03A, 0x00B, 0x098,
+ 0x06A, 0x01C, 0x024, 0x0B0, 0x0E1, 0x018, 0x004, 0x0E8,
+ 0x02E, 0x06B, 0x050, 0x0E1, 0x025, 0x087, 0x008, 0x0C0,
+ 0x027, 0x041, 0x073, 0x005, 0x043, 0x084, 0x096, 0x01C,
+ 0x023, 0x000, 0x09D, 0x005, 0x0CC, 0x0AA, 0x01C, 0x024,
+ 0x0B0, 0x0E1, 0x018, 0x004, 0x0E8, 0x02E, 0x070, 0x068,
+ 0x070, 0x092, 0x0C3, 0x084, 0x060, 0x013, 0x0E5, 0x044,
+ 0x0F9, 0x040, 0x09D, 0x005, 0x0CE, 0x05A, 0x01C, 0x024,
+ 0x0B0, 0x0E1, 0x018, 0x004, 0x0F9, 0x0D1, 0x03E, 0x070,
+ 0x027, 0x0CF, 0x013, 0x0E5, 0x044, 0x02C, 0x0A0, 0x042,
+ 0x0CB, 0x089, 0x0F2, 0x021, 0x03A, 0x00B, 0x09C, 0x00A,
+ 0x01C, 0x024, 0x0B0, 0x0E1, 0x018, 0x004, 0x0F9, 0x0D1,
+ 0x00B, 0x038, 0x010, 0x0B3, 0x0C4, 0x021, 0x039, 0x036,
+ 0x05C, 0x042, 0x0C8, 0x084, 0x02B, 0x079, 0x0D0, 0x061,
+ 0x0C2, 0x074, 0x015, 0x024, 0x0BA, 0x0D3, 0x031, 0x0E5,
+ 0x059, 0x008, 0x029, 0x008, 0x0E0, 0x066, 0x063, 0x042,
+ 0x095, 0x012, 0x081, 0x000, 0x029, 0x00B, 0x0C1, 0x051,
+ 0x024, 0x0B8, 0x019, 0x099, 0x090, 0x022, 0x090, 0x0B4,
+ 0x018, 0x0A0, 0x091, 0x041, 0x001, 0x041, 0x041, 0x041,
+ 0x052, 0x083, 0x0CA, 0x040, 0x028, 0x068, 0x029, 0x008,
+ 0x0BA, 0x016, 0x010, 0x09C, 0x099, 0x00B, 0x056, 0x094,
+ 0x090, 0x052, 0x015, 0x074, 0x0C0, 0x027, 0x01A, 0x02A,
+ 0x0D2, 0x090, 0x025, 0x0D3, 0x000, 0x09D, 0x028, 0x0AB,
+ 0x04A, 0x042, 0x017, 0x04C, 0x002, 0x070, 0x0D4, 0x084,
+ 0x02E, 0x098, 0x004, 0x0E1, 0x02A, 0x042, 0x017, 0x04C,
+ 0x002, 0x070, 0x082, 0x090, 0x04B, 0x0A6, 0x001, 0x038,
+ 0x051, 0x048, 0x042, 0x0E9, 0x080, 0x04E, 0x015, 0x0A4,
+ 0x021, 0x074, 0x0C0, 0x027, 0x00F, 0x0A4, 0x012, 0x0E9,
+ 0x080, 0x04E, 0x082, 0x0AC, 0x080, 0x0AC, 0x0A0, 0x0AC,
+ 0x0A9, 0x059, 0x0E5, 0x064, 0x045, 0x065, 0x0CA, 0x0C8,
+ 0x04A, 0x0CE, 0x00A, 0x0CE, 0x04A, 0x0CE, 0x095, 0x091,
+ 0x095, 0x094, 0x095, 0x093, 0x029, 0x025, 0x0C0, 0x0CC,
+ 0x0CC, 0x088, 0x0A4, 0x097, 0x056, 0x036, 0x064, 0x072,
+ 0x090, 0x054, 0x08A, 0x09C, 0x045, 0x008, 0x0B9, 0x0B7,
+ 0x066, 0x012, 0x093, 0x009, 0x0C9, 0x0B2, 0x074, 0x08E,
+ 0x0BA, 0x060, 0x013, 0x0E5, 0x034, 0x08E, 0x0BA, 0x060,
+ 0x013, 0x0E4, 0x074, 0x08E, 0x0BA, 0x060, 0x013, 0x0E5,
+ 0x069, 0x01D, 0x074, 0x0C0, 0x027, 0x0CA, 0x029, 0x01D,
+ 0x074, 0x0C0, 0x027, 0x0CE, 0x0D2, 0x025, 0x0D3, 0x000,
+ 0x09F, 0x038, 0x0A4, 0x04B, 0x0A6, 0x001, 0x03E, 0x05E,
+ 0x091, 0x02E, 0x098, 0x004, 0x0F9, 0x015, 0x022, 0x05D,
+ 0x030, 0x009, 0x0F3, 0x0E9, 0x012, 0x0E9, 0x080, 0x04F,
+ 0x090, 0x052, 0x025, 0x0D3, 0x000, 0x09D, 0x0C5, 0x048,
+ 0x025, 0x0D3, 0x000, 0x09C, 0x045, 0x0CE, 0x0CD, 0x009,
+ 0x0C9, 0x0B2, 0x01A, 0x044, 0x0BA, 0x060, 0x013, 0x0E7,
+ 0x034, 0x089, 0x074, 0x0C0, 0x027, 0x01C, 0x027, 0x0B7,
+ 0x09C, 0x080, 0x0C2, 0x0D7, 0x076, 0x059, 0x09B, 0x093,
+ 0x00C, 0x064, 0x0C3, 0x01D, 0x01B, 0x0F4, 0x045, 0x04B,
+ 0x0C7, 0x0C6, 0x03A, 0x037, 0x0E8, 0x081, 0x04B, 0x0C7,
+ 0x0C6, 0x03A, 0x037, 0x0E8, 0x091, 0x04B, 0x0C7, 0x0C6,
+ 0x032, 0x061, 0x08E, 0x0B3, 0x0BC, 0x0C3, 0x04A, 0x022,
+ 0x0E6, 0x0B5, 0x024, 0x097, 0x071, 0x0C9, 0x087, 0x0B4,
+ 0x031, 0x0AE, 0x073, 0x0A2, 0x0CF, 0x039, 0x0D2, 0x05D,
+ 0x004, 0x044, 0x042, 0x0C0, 0x0D6, 0x0DE, 0x071, 0x006,
+ 0x016, 0x0BB, 0x0DB, 0x0CE, 0x083, 0x00C, 0x064, 0x0C3,
+ 0x01D, 0x031, 0x013, 0x004, 0x0F9, 0x095, 0x04D, 0x013,
+ 0x032, 0x093, 0x063, 0x05E, 0x066, 0x014, 0x0CC, 0x029,
+ 0x02A, 0x053, 0x030, 0x0A6, 0x061, 0x04C, 0x0C2, 0x099,
+ 0x085, 0x03A, 0x072, 0x0CC, 0x0C2, 0x099, 0x085, 0x006,
+ 0x01B, 0x0B3, 0x00A, 0x066, 0x014, 0x014, 0x024, 0x099,
+ 0x085, 0x033, 0x00A, 0x008, 0x0B1, 0x086, 0x061, 0x04C,
+ 0x0C2, 0x084, 0x021, 0x068, 0x073, 0x03B, 0x030, 0x0A6,
+ 0x061, 0x041, 0x04E, 0x0A5, 0x098, 0x053, 0x030, 0x0AC,
+ 0x059, 0x076, 0x061, 0x04C, 0x0C2, 0x0B0, 0x08D, 0x0D6,
+ 0x061, 0x04C, 0x0C2, 0x0B0, 0x02C, 0x0F6, 0x061, 0x04C,
+ 0x0C2, 0x0B1, 0x08C, 0x0A5, 0x098, 0x053, 0x030, 0x0AC,
+ 0x00F, 0x024, 0x0CC, 0x029, 0x098, 0x056, 0x00F, 0x028,
+ 0x066, 0x015, 0x092, 0x01A, 0x019, 0x085, 0x033, 0x00A,
+ 0x0CA, 0x085, 0x00C, 0x0C2, 0x099, 0x085, 0x065, 0x0C3,
+ 0x0D9, 0x085, 0x033, 0x00A, 0x0CE, 0x070, 0x086, 0x061,
+ 0x04C, 0x0C2, 0x0B3, 0x097, 0x071, 0x00C, 0x099, 0x03B,
+ 0x0CC, 0x083, 0x058, 0x00B, 0x0EA, 0x077, 0x09D, 0x006,
+ 0x04A, 0x0BE, 0x004, 0x074, 0x060, 0x0E0, 0x0D1, 0x04E,
+ 0x038, 0x04C, 0x03E, 0x0EE, 0x03E, 0x0EE, 0x03E, 0x0EE,
+ 0x03E, 0x0EE, 0x030, 0x0BB, 0x0CA, 0x0E1, 0x01F, 0x077,
+ 0x01F, 0x077, 0x01F, 0x077, 0x01F, 0x077, 0x027, 0x070,
+ 0x08F, 0x0BB, 0x080, 0x00E, 0x011, 0x0F7, 0x071, 0x0F7,
+ 0x07C, 0x06F, 0x03C, 0x0B3, 0x036, 0x002, 0x0FB, 0x08D,
+ 0x0E6, 0x055, 0x070, 0x07F, 0x02D, 0x024, 0x069, 0x055,
+ 0x04F, 0x058, 0x0A9, 0x023, 0x01F, 0x054, 0x0F7, 0x08A,
+ 0x095, 0x025, 0x02B, 0x075, 0x00C, 0x0CC, 0x0AC, 0x056,
+ 0x051, 0x0CC, 0x051, 0x0E4, 0x045, 0x0CE, 0x0A2, 0x012,
+ 0x039, 0x0C0, 0x0A0, 0x0AF, 0x056, 0x06A, 0x049, 0x07F,
+ 0x002, 0x08C, 0x009, 0x0F8, 0x00B, 0x0EB, 0x0AF, 0x056,
+ 0x076, 0x067, 0x052, 0x0B2, 0x08E, 0x069, 0x0A7, 0x011,
+ 0x073, 0x0A8, 0x0B1, 0x0BC, 0x0CA, 0x0A0, 0x0A9, 0x036,
+ 0x050, 0x02C, 0x098, 0x0E7, 0x00A, 0x0F5, 0x066, 0x0A4,
+ 0x097, 0x0E2, 0x05A, 0x030, 0x027, 0x0BA, 0x0F7, 0x083,
+ 0x04E, 0x0A5, 0x033, 0x00A, 0x066, 0x015, 0x08D, 0x0E6,
+ 0x055, 0x039, 0x0D2, 0x0A7, 0x0AC, 0x054, 0x060, 0x016,
+ 0x070, 0x01B, 0x072, 0x08E, 0x062, 0x08F, 0x022, 0x02E,
+ 0x075, 0x016, 0x002, 0x0FB, 0x08D, 0x0E6, 0x00A, 0x095,
+ 0x03D, 0x062, 0x0A3, 0x000, 0x0B7, 0x001, 0x0B5, 0x053,
+ 0x0DE, 0x02A, 0x054, 0x094, 0x0AD, 0x0D4, 0x033, 0x032,
+ 0x0B1, 0x059, 0x047, 0x031, 0x047, 0x091, 0x017, 0x03A,
+ 0x088, 0x048, 0x0E7, 0x002, 0x0B0, 0x017, 0x0DC, 0x067,
+ 0x09D, 0x04B, 0x08D, 0x0E7, 0x052, 0x0AA, 0x07B, 0x0D4,
+ 0x0AA, 0x092, 0x0BD, 0x0D6, 0x099, 0x0BC, 0x056, 0x002,
+ 0x0FB, 0x08C, 0x0F3, 0x066, 0x066, 0x0C6, 0x0F3, 0x066,
+ 0x066, 0x062, 0x099, 0x02A, 0x0F8, 0x018, 0x068, 0x070,
+ 0x0B0, 0x08A, 0x00D, 0x055, 0x055, 0x055, 0x055, 0x052,
+ 0x032, 0x0E1, 0x040, 0x05C, 0x038, 0x00B, 0x0EA, 0x09B,
+ 0x087, 0x001, 0x07D, 0x0C0, 0x05F, 0x070, 0x017, 0x0DC,
+ 0x005, 0x0F5, 0x0DC, 0x09B, 0x001, 0x07D, 0x061, 0x04D,
+ 0x080, 0x0BE, 0x0A7, 0x079, 0x082, 0x0A2, 0x01F, 0x050,
+ 0x015, 0x02A, 0x08F, 0x08B, 0x01C, 0x0E5, 0x0A5, 0x013,
+ 0x084, 0x058, 0x0E7, 0x002, 0x091, 0x054, 0x005, 0x002,
+ 0x04B, 0x0BD, 0x022, 0x01A, 0x094, 0x07F, 0x09C, 0x01A,
+ 0x0C0, 0x05F, 0x042, 0x01A, 0x021, 0x0D1, 0x080, 0x059,
+ 0x0C0, 0x06D, 0x01C, 0x02C, 0x00A, 0x083, 0x055, 0x055,
+ 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055,
+ 0x055, 0x054, 0x01C, 0x0B8, 0x05C, 0x06E, 0x017, 0x09C,
+ 0x02F, 0x038, 0x05E, 0x070, 0x0E7, 0x0B8, 0x05E, 0x070,
+ 0x0BC, 0x0E1, 0x079, 0x0C2, 0x0F3, 0x085, 0x0E7, 0x00B,
+ 0x0CE, 0x017, 0x09C, 0x029, 0x09C, 0x029, 0x09C, 0x029,
+ 0x09C, 0x023, 0x00F, 0x058, 0x014, 0x0EE, 0x035, 0x077,
+ 0x026, 0x021, 0x093, 0x005, 0x0C9, 0x0B0, 0x017, 0x0D2,
+ 0x01D, 0x018, 0x08A, 0x021, 0x093, 0x005, 0x0C9, 0x0B0,
+ 0x017, 0x0D1, 0x087, 0x0AC, 0x00A, 0x074, 0x00F, 0x0AE,
+ 0x0F5, 0x05A, 0x082, 0x0A3, 0x0E4, 0x03A, 0x031, 0x014,
+ 0x0BB, 0x0D7, 0x059, 0x099, 0x074, 0x0A2, 0x019, 0x030,
+ 0x05C, 0x09B, 0x001, 0x07D, 0x018, 0x07A, 0x0C0, 0x0A7,
+ 0x040, 0x0F8, 0x043, 0x0D4, 0x063, 0x089, 0x025, 0x0D0,
+ 0x010, 0x0D6, 0x01C, 0x06A, 0x010, 0x0F5, 0x055, 0x089,
+ 0x025, 0x0D1, 0x051, 0x066, 0x01F, 0x051, 0x0F5, 0x091,
+ 0x049, 0x02E, 0x089, 0x015, 0x098, 0x06A, 0x0A3, 0x0E0,
+ 0x08A, 0x094, 0x065, 0x064, 0x00E, 0x013, 0x017, 0x038,
+ 0x0A8, 0x086, 0x04C, 0x017, 0x026, 0x0C0, 0x05F, 0x046,
+ 0x01E, 0x0B0, 0x028, 0x063, 0x01F, 0x008, 0x07A, 0x08C,
+ 0x071, 0x024, 0x0BA, 0x002, 0x01A, 0x0D0, 0x00D, 0x042,
+ 0x01E, 0x0AA, 0x0B1, 0x024, 0x0BA, 0x02A, 0x02D, 0x031,
+ 0x0F5, 0x01F, 0x058, 0x074, 0x092, 0x0E8, 0x087, 0x05A,
+ 0x063, 0x052, 0x0DE, 0x0F4, 0x051, 0x069, 0x04A, 0x03E,
+ 0x009, 0x069, 0x046, 0x050, 0x0F0, 0x0E1, 0x031, 0x073,
+ 0x005, 0x045, 0x0BD, 0x059, 0x08D, 0x08B, 0x04A, 0x07C,
+ 0x0D3, 0x0ED, 0x038, 0x0E9, 0x0D3, 0x04E, 0x074, 0x0ED,
+ 0x044, 0x032, 0x060, 0x0B9, 0x036, 0x002, 0x0FA, 0x05B,
+ 0x0DE, 0x08A, 0x02D, 0x029, 0x0D0, 0x0E1, 0x021, 0x0F5,
+ 0x0A3, 0x092, 0x021, 0x0F2, 0x019, 0x030, 0x05C, 0x09B,
+ 0x001, 0x07D, 0x021, 0x0F5, 0x0A0, 0x0C6, 0x001, 0x067,
+ 0x001, 0x0B4, 0x045, 0x0CE, 0x0A5, 0x012, 0x039, 0x0D4,
+ 0x01C, 0x005, 0x0F4, 0x040, 0x0A1, 0x0C2, 0x0C3, 0x050,
+ 0x06A, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA,
+ 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x0AA, 0x081, 0x0AF,
+ 0x086, 0x09F, 0x019, 0x01B, 0x0E7, 0x081, 0x0F3, 0x065,
+ 0x0F2, 0x080, 0x0BE, 0x070, 0x017, 0x0DF, 0x0DF, 0x038,
+ 0x00B, 0x0EB, 0x00D, 0x0C3, 0x080, 0x0BE, 0x0A7, 0x00F,
+ 0x095, 0x04F, 0x05A, 0x094, 0x0C0, 0x02C, 0x0D8, 0x0B1,
+ 0x0A7, 0x0CE, 0x05A, 0x011, 0x073, 0x0A8, 0x03A, 0x0C2,
+ 0x0CC, 0x0B6, 0x030, 0x017, 0x0DC, 0x06F, 0x035, 0x0A9,
+ 0x080, 0x04D, 0x0A7, 0x0CE, 0x02A, 0x018, 0x079, 0x0C5,
+ 0x049, 0x0DE, 0x061, 0x0A8, 0x022, 0x0E7, 0x050, 0x033,
+ 0x0F9, 0x098, 0x064, 0x008, 0x0B9, 0x095, 0x042, 0x0FC,
+ 0x0CC, 0x0D9, 0x095, 0x03D, 0x062, 0x0A2, 0x048, 0x0D4,
+ 0x048, 0x0E7, 0x002, 0x088, 0x0B9, 0x0C1, 0x0A0, 0x0E3,
+ 0x09D, 0x04E, 0x062, 0x0E6, 0x0CC, 0x0C6, 0x06B, 0x0CE,
+ 0x083, 0x010, 0x0C9, 0x082, 0x0E4, 0x0DA, 0x0C2, 0x0C8,
+ 0x01E, 0x0C3, 0x0B9, 0x036, 0x002, 0x0FA, 0x0A9, 0x0EB,
+ 0x04E, 0x030, 0x030, 0x0FA, 0x00D, 0x0F0, 0x0A9, 0x0EB,
+ 0x040, 0x0B9, 0x00F, 0x0AA, 0x07A, 0x0D2, 0x0C2, 0x0C8,
+ 0x0FA, 0x0A7, 0x0AD, 0x041, 0x00A, 0x047, 0x0D5, 0x03D,
+ 0x068, 0x0AC, 0x0F1, 0x0F5, 0x04F, 0x05A, 0x097, 0x054,
+ 0x07D, 0x04F, 0x0A8, 0x0AA, 0x055, 0x01F, 0x011, 0x073,
+ 0x05A, 0x0B0, 0x017, 0x0DE, 0x05D, 0x059, 0x0A9, 0x025,
+ 0x0D0, 0x055, 0x02A, 0x046, 0x0BC, 0x0B8, 0x022, 0x0AE,
+ 0x045, 0x029, 0x03E, 0x014, 0x0FA, 0x0E1, 0x099, 0x094,
+ 0x0CA, 0x04A, 0x0BE, 0x03D, 0x0D6, 0x099, 0x092, 0x05D,
+ 0x015, 0x017, 0x0C8, 0x0D7, 0x0DC, 0x015, 0x017, 0x08A,
+ 0x040, 0x01F, 0x00A, 0x09E, 0x0AC, 0x0C9, 0x065, 0x049,
+ 0x05C, 0x01D, 0x010, 0x068, 0x04A, 0x03E, 0x05B, 0x0DE,
+ 0x083, 0x016, 0x095, 0x080, 0x0BE, 0x091, 0x074, 0x058,
+ 0x0A4, 0x000, 0x07C, 0x038, 0x0E7, 0x056, 0x030, 0x017,
+ 0x0DF, 0x075, 0x0A6, 0x064, 0x097, 0x045, 0x020, 0x09D,
+ 0x003, 0x05F, 0x070, 0x054, 0x05E, 0x029, 0x01D, 0x0F0,
+ 0x0A9, 0x0EA, 0x0CC, 0x086, 0x054, 0x095, 0x0C1, 0x0D1,
+ 0x006, 0x083, 0x00F, 0x0AA, 0x07B, 0x0D0, 0x065, 0x049,
+ 0x045, 0x0BD, 0x0E9, 0x062, 0x0D2, 0x091, 0x0DF, 0x004,
+ 0x05D, 0x016, 0x029, 0x01C, 0x07D, 0x04F, 0x0AC, 0x01A,
+ 0x047, 0x01A, 0x0A9, 0x0F5, 0x067, 0x066, 0x053, 0x028,
+ 0x0B7, 0x0BD, 0x02C, 0x05A, 0x052, 0x03B, 0x0E3, 0x0DD,
+ 0x059, 0x0A9, 0x025, 0x0D1, 0x0A8, 0x0AC, 0x008, 0x06B,
+ 0x0EE, 0x008, 0x0AB, 0x0C5, 0x020, 0x02F, 0x085, 0x04F,
+ 0x056, 0x066, 0x075, 0x049, 0x05C, 0x01C, 0x018, 0x01D,
+ 0x081, 0x0C2, 0x064, 0x005, 0x0F0, 0x080, 0x0BE, 0x035,
+ 0x05C, 0x0D0, 0x017, 0x0C2, 0x055, 0x0F0, 0x095, 0x07C,
+ 0x025, 0x05F, 0x008, 0x00B, 0x0E1, 0x001, 0x07C, 0x07B,
+ 0x0AB, 0x035, 0x024, 0x0BA, 0x010, 0x055, 0x093, 0x01A,
+ 0x0FB, 0x082, 0x02A, 0x0F1, 0x048, 0x0D7, 0x0C2, 0x0A7,
+ 0x0AB, 0x031, 0x0B2, 0x0A4, 0x0AC, 0x063, 0x09D, 0x04A,
+ 0x08D, 0x07C, 0x07B, 0x0AB, 0x035, 0x024, 0x0BA, 0x010,
+ 0x054, 0x030, 0x08D, 0x07D, 0x0C1, 0x015, 0x078, 0x0AC,
+ 0x06F, 0x05A, 0x094, 0x060, 0x01A, 0x0E3, 0x079, 0x0D4,
+ 0x0AA, 0x04F, 0x085, 0x04F, 0x056, 0x066, 0x0D5, 0x049,
+ 0x058, 0x0C7, 0x03A, 0x095, 0x049, 0x0F0, 0x045, 0x0D1,
+ 0x062, 0x094, 0x086, 0x0BC, 0x01D, 0x013, 0x0D2, 0x090,
+ 0x0FF, 0x0CF, 0x07A, 0x083, 0x0F2, 0x050, 0x031, 0x0DE,
+ 0x000, 0x060, 0x060, 0x0A1, 0x017, 0x035, 0x0A8, 0x05F,
+ 0x09B, 0x01B, 0x037, 0x007, 0x044, 0x01A, 0x030, 0x00B,
+ 0x038, 0x00D, 0x0BC, 0x01C, 0x0E0, 0x0D0, 0x047, 0x0CE,
+ 0x0A0, 0x0AA, 0x07A, 0x0A1, 0x098, 0x06A, 0x092, 0x095,
+ 0x03D, 0x068, 0x031, 0x080, 0x05B, 0x080, 0x0DA, 0x0A9,
+ 0x0EF, 0x041, 0x095, 0x025, 0x016, 0x0F7, 0x0A5, 0x08B,
+ 0x04A, 0x0C6, 0x079, 0x0B3, 0x033, 0x060, 0x02F, 0x0AA,
+ 0x09E, 0x0B1, 0x051, 0x080, 0x059, 0x09E, 0x0CA, 0x0A7,
+ 0x0AC, 0x00A, 0x030, 0x00B, 0x067, 0x0B2, 0x0AD, 0x0D5,
+ 0x0DA, 0x092, 0x05D, 0x017, 0x0A3, 0x000, 0x0B3, 0x02D,
+ 0x095, 0x06E, 0x008, 0x0A9, 0x058, 0x0A1, 0x017, 0x03A,
+ 0x08B, 0x001, 0x07D, 0x054, 0x0F7, 0x08E, 0x095, 0x025,
+ 0x008, 0x01C, 0x0E0, 0x056, 0x002, 0x0FB, 0x0C1, 0x0D1,
+ 0x015, 0x018, 0x005, 0x092, 0x06B, 0x03C, 0x01D, 0x012,
+ 0x028, 0x0C0, 0x02C, 0x0A5, 0x06C, 0x011, 0x070, 0x017,
+ 0x0B2, 0x038, 0x04D, 0x080, 0x0BE, 0x0E0, 0x02F, 0x0B4,
+ 0x0EC, 0x04A, 0x0ED, 0x0B3, 0x09E, 0x002, 0x0FB, 0x080,
+ 0x0BE, 0x0E0, 0x02F, 0x0B1, 0x039, 0x093, 0x03E, 0x06D,
+ 0x0E7, 0x010, 0x060, 0x09F, 0x032, 0x0A9, 0x0A2, 0x06C,
+ 0x005, 0x0F4, 0x040, 0x0E6, 0x00A, 0x095, 0x03D, 0x06A,
+ 0x023, 0x000, 0x0B3, 0x080, 0x0DA, 0x0A7, 0x0D6, 0x02A,
+ 0x003, 0x00D, 0x070, 0x017, 0x0D2, 0x02E, 0x076, 0x029,
+ 0x04F, 0x0BC, 0x054, 0x0A6, 0x051, 0x06F, 0x07A, 0x058,
+ 0x0B4, 0x0AC, 0x005, 0x0F4, 0x08B, 0x0A2, 0x0F4, 0x00E,
+ 0x035, 0x00D, 0x049, 0x02E, 0x0B4, 0x0CC, 0x018, 0x0A5,
+ 0x0C8, 0x0F8, 0x04A, 0x097, 0x023, 0x0E1, 0x005, 0x02E,
+ 0x047, 0x0C2, 0x08A, 0x05C, 0x08F, 0x085, 0x069, 0x072,
+ 0x03E, 0x01F, 0x04A, 0x0C3, 0x055, 0x01F, 0x056, 0x043,
+ 0x032, 0x08C, 0x0A3, 0x05E, 0x060, 0x0A8, 0x045, 0x0CE,
+ 0x00D, 0x060, 0x02F, 0x0A3, 0x084, 0x09D, 0x0D8, 0x0F0,
+ 0x017, 0x0D2, 0x02E, 0x00E, 0x01B, 0x023, 0x084, 0x0D8,
+ 0x00B, 0x0EB, 0x089, 0x0F3, 0x080, 0x0BE, 0x0E0, 0x02F,
+ 0x0BB, 0x039, 0x085, 0x0DF, 0x022, 0x003, 0x0E7, 0x001,
+ 0x07D, 0x0C0, 0x05F, 0x070, 0x017, 0x0D1, 0x017, 0x038,
+ 0x014, 0x05B, 0x0D6, 0x0A2, 0x074, 0x00D, 0x04B, 0x07A,
+ 0x0B3, 0x031, 0x096, 0x094, 0x06B, 0x0CC, 0x035, 0x023,
+ 0x0D7, 0x049, 0x048, 0x015, 0x073, 0x029, 0x00F, 0x05D,
+ 0x08A, 0x0C0, 0x05F, 0x04D, 0x079, 0x084, 0x035, 0x080,
+ 0x0BE, 0x088, 0x01C, 0x0C3, 0x052, 0x09F, 0x059, 0x068,
+ 0x0C0, 0x02C, 0x0E0, 0x036, 0x0AA, 0x07B, 0x0CD, 0x04A,
+ 0x092, 0x0BE, 0x0F3, 0x081, 0x04A, 0x07D, 0x05B, 0x059,
+ 0x094, 0x0CA, 0x01C, 0x024, 0x0EE, 0x0C7, 0x080, 0x0BE,
+ 0x088, 0x01C, 0x0C3, 0x052, 0x09F, 0x059, 0x068, 0x0C0,
+ 0x02C, 0x0E0, 0x036, 0x0AA, 0x07B, 0x0CD, 0x04A, 0x092,
+ 0x0BE, 0x0F3, 0x081, 0x043, 0x084, 0x09C, 0x07B, 0x038,
+ 0x00B, 0x0EB, 0x0AF, 0x070, 0x0D4, 0x0EA, 0x053, 0x000,
+ 0x09B, 0x04F, 0x09C, 0x054, 0x030, 0x0F3, 0x08A, 0x094,
+ 0x0FA, 0x0B6, 0x0B3, 0x029, 0x094, 0x022, 0x0E6, 0x01A,
+ 0x085, 0x0F9, 0x0B0, 0x059, 0x093, 0x0F9, 0x0D2, 0x0C4,
+ 0x032, 0x060, 0x0B9, 0x036, 0x0B0, 0x0B3, 0x090, 0x0D9,
+ 0x077, 0x026, 0x01C, 0x027, 0x022, 0x0E8, 0x096, 0x0B4,
+ 0x023, 0x0EA, 0x09E, 0x0B5, 0x011, 0x080, 0x059, 0x065,
+ 0x086, 0x020, 0x073, 0x096, 0x08D, 0x079, 0x0AD, 0x058,
+ 0x00B, 0x0E9, 0x017, 0x044, 0x08A, 0x04A, 0x007, 0x0D7,
+ 0x07A, 0x082, 0x0A1, 0x090, 0x0FA, 0x0EF, 0x001, 0x054,
+ 0x0BA, 0x050, 0x0D4, 0x059, 0x01E, 0x02C, 0x0E9, 0x0F3,
+ 0x08A, 0x099, 0x085, 0x06B, 0x00B, 0x023, 0x015, 0x097,
+ 0x072, 0x061, 0x017, 0x030, 0x0D4, 0x02C, 0x073, 0x087,
+ 0x048, 0x0AA, 0x002, 0x081, 0x025, 0x0DE, 0x091, 0x00D,
+ 0x04A, 0x0C0, 0x05F, 0x07E, 0x0D2, 0x080, 0x0A5, 0x03E,
+ 0x0B2, 0x0D0, 0x0C8, 0x06B, 0x080, 0x0BE, 0x088, 0x01C,
+ 0x0EA, 0x009, 0x017, 0x044, 0x01A, 0x037, 0x01A, 0x091,
+ 0x074, 0x058, 0x0A3, 0x071, 0x0AF, 0x007, 0x044, 0x054,
+ 0x06E, 0x035, 0x0E0, 0x0E8, 0x0AA, 0x064, 0x00F, 0x090,
+ 0x0FA, 0x0D0, 0x063, 0x000, 0x0B3, 0x080, 0x0DA, 0x02C,
+ 0x073, 0x087, 0x048, 0x0AA, 0x002, 0x081, 0x025, 0x0DE,
+ 0x091, 0x00D, 0x04A, 0x0C0, 0x05F, 0x048, 0x0BA, 0x027,
+ 0x0A3, 0x000, 0x0B7, 0x001, 0x0B7, 0x04F, 0x09C, 0x0B4,
+ 0x06B, 0x0CC, 0x035, 0x016, 0x0F5, 0x066, 0x063, 0x02D,
+ 0x029, 0x01E, 0x0BA, 0x04A, 0x040, 0x0AB, 0x099, 0x048,
+ 0x07A, 0x0EC, 0x050, 0x08B, 0x09C, 0x008, 0x022, 0x0FC,
+ 0x0F9, 0x0B2, 0x055, 0x03D, 0x062, 0x0A9, 0x023, 0x051,
+ 0x023, 0x09C, 0x00A, 0x03C, 0x073, 0x00D, 0x044, 0x05C,
+ 0x0E1, 0x050, 0x071, 0x0CE, 0x0A1, 0x01F, 0x0E7, 0x015,
+ 0x06B, 0x00B, 0x025, 0x0ED, 0x00B, 0x093, 0x060, 0x02F,
+ 0x0AA, 0x09E, 0x0AC, 0x036, 0x065, 0x049, 0x05F, 0x07A,
+ 0x020, 0x050, 0x008, 0x07F, 0x0EF, 0x039, 0x014, 0x049,
+ 0x001, 0x011, 0x081, 0x004, 0x060, 0x040, 0x0CC, 0x059,
+ 0x0C0, 0x0AD, 0x023, 0x0EB, 0x041, 0x0B0, 0x081, 0x0F2,
+ 0x03A, 0x041, 0x0AA, 0x050, 0x043, 0x0E4, 0x0D4, 0x086,
+ 0x054, 0x0A0, 0x087, 0x0C1, 0x052, 0x0CA, 0x093, 0x001,
+ 0x032, 0x054, 0x09D, 0x024, 0x002, 0x000, 0x000, 0x052,
+ 0x0AF, 0x016, 0x046, 0x0A7, 0x091, 0x067, 0x008, 0x0B4,
+ 0x004, 0x051, 0x0F1, 0x065, 0x019, 0x0B4, 0x06E, 0x02D,
+ 0x0C0, 0x0AD, 0x049, 0x000, 0x092, 0x057, 0x01B, 0x074,
+ 0x045, 0x05F, 0x023, 0x051, 0x0B7, 0x044, 0x00A, 0x010,
+ 0x006, 0x0A3, 0x06E, 0x08B, 0x06B, 0x008, 0x01F, 0x019,
+ 0x0D1, 0x0E6, 0x080, 0x082, 0x080, 0x054, 0x004, 0x02A,
+ 0x045, 0x091, 0x0A9, 0x0E4, 0x059, 0x0C2, 0x02D, 0x001,
+ 0x014, 0x004, 0x050, 0x0D3, 0x0FC, 0x055, 0x084, 0x061,
+ 0x0D9, 0x080, 0x051, 0x02F, 0x0E2, 0x01F, 0x046, 0x05F,
+ 0x040, 0x0E0, 0x020, 0x015, 0x04A, 0x0BC, 0x059, 0x01A,
+ 0x09E, 0x045, 0x09C, 0x022, 0x0D0, 0x011, 0x048, 0x0CB,
+ 0x0E8, 0x014, 0x008, 0x001, 0x054, 0x015, 0x0E2, 0x0C8,
+ 0x0D4, 0x0F2, 0x02C, 0x0E1, 0x016, 0x080, 0x08A, 0x046,
+ 0x05F, 0x052, 0x07C, 0x0D9, 0x0A8, 0x0F8, 0x088, 0x0D0,
+ 0x05A, 0x03C, 0x0D2, 0x05C, 0x05B, 0x080, 0x0DA, 0x0A7,
+ 0x0D6, 0x05A, 0x008, 0x086, 0x0A4, 0x05D, 0x017, 0x0A0,
+ 0x0C3, 0x052, 0x02E, 0x088, 0x0A8, 0x022, 0x01F, 0x053,
+ 0x0EA, 0x0DA, 0x0CC, 0x0A6, 0x050, 0x0E1, 0x027, 0x076,
+ 0x03C, 0x005, 0x0F5, 0x04F, 0x0AB, 0x06B, 0x032, 0x099,
+ 0x043, 0x084, 0x09C, 0x07B, 0x038, 0x00B, 0x0E9, 0x027,
+ 0x0AC, 0x0D4, 0x092, 0x0E0, 0x00E, 0x0DA, 0x038, 0x04D,
+ 0x080, 0x0BE, 0x0E6, 0x07D, 0x050, 0x0BA, 0x051, 0x0AE,
+ 0x066, 0x0EF, 0x0BC, 0x0DC, 0x07B, 0x087, 0x01E, 0x002,
+ 0x0FA, 0x093, 0x0E6, 0x0CD, 0x047, 0x0C4, 0x043, 0x0CD,
+ 0x00F, 0x034, 0x09D, 0x0A3, 0x000, 0x0B0, 0x055, 0x001,
+ 0x0AE, 0x003, 0x084, 0x004, 0x0CE, 0x001, 0x0D0, 0x0E1,
+ 0x070, 0x002, 0x080, 0x00E, 0x089, 0x0E9, 0x022, 0x01F,
+ 0x0E0, 0x0E8, 0x096, 0x0B0, 0x011, 0x0F4, 0x0C2, 0x0CE,
+ 0x003, 0x06A, 0x044, 0x02D, 0x0C0, 0x06D, 0x048, 0x005,
+ 0x0B8, 0x00D, 0x0A3, 0x000, 0x0B7, 0x076, 0x0D5, 0x0DE,
+ 0x0B1, 0x050, 0x0DC, 0x07D, 0x077, 0x0BC, 0x054, 0x0BA,
+ 0x052, 0x07F, 0x058, 0x014, 0x034, 0x00F, 0x09A, 0x0F3,
+ 0x081, 0x058, 0x00B, 0x0EA, 0x0EF, 0x058, 0x014, 0x060,
+ 0x016, 0x0A5, 0x06C, 0x02E, 0x0F7, 0x081, 0x04B, 0x0A5,
+ 0x06F, 0x07D, 0x05D, 0x0EE, 0x0B5, 0x02E, 0x095, 0x080,
+ 0x0BE, 0x0F0, 0x073, 0x0BD, 0x004, 0x07C, 0x0EA, 0x0FE,
+ 0x0EB, 0x04C, 0x0DE, 0x029, 0x053, 0x0DD, 0x06A, 0x054,
+ 0x094, 0x0A9, 0x0EA, 0x00A, 0x08C, 0x002, 0x0D6, 0x04C,
+ 0x03C, 0x005, 0x0F4, 0x000, 0x0EA, 0x0CD, 0x056, 0x0AF,
+ 0x0C0, 0x047, 0x0D2, 0x09C, 0x08D, 0x029, 0x0CA, 0x0E0,
+ 0x02F, 0x0AE, 0x0BD, 0x075, 0x099, 0x09D, 0x04A, 0x0F9,
+ 0x0EF, 0x051, 0x07C, 0x094, 0x00C, 0x077, 0x080, 0x018,
+ 0x018, 0x029, 0x02A, 0x0F8, 0x0E0, 0x0E8, 0x0AA, 0x030,
+ 0x00B, 0x02A, 0x098, 0x07C, 0x01D, 0x011, 0x051, 0x080,
+ 0x059, 0x054, 0x0C3, 0x051, 0x0F5, 0x01B, 0x033, 0x024,
+ 0x0BB, 0x082, 0x0A5, 0x019, 0x05C, 0x01D, 0x010, 0x028,
+ 0x0C0, 0x02C, 0x09A, 0x0C7, 0x0C1, 0x0D1, 0x022, 0x08C,
+ 0x002, 0x0C9, 0x094, 0x064, 0x05C, 0x00C, 0x0D6, 0x08E,
+ 0x013, 0x060, 0x02F, 0x0B8, 0x00B, 0x0EA, 0x030, 0x0E3,
+ 0x0C0, 0x05F, 0x048, 0x0DC, 0x078, 0x00B, 0x0E8, 0x000,
+ 0x0E3, 0x0C0, 0x05F, 0x06C, 0x038, 0x0D5, 0x02E, 0x035,
+ 0x04F, 0x05A, 0x08A, 0x061, 0x0AA, 0x09F, 0x056, 0x01B,
+ 0x032, 0x099, 0x046, 0x042, 0x0C8, 0x001, 0x00C, 0x045,
+ 0x0CE, 0x0A5, 0x017, 0x0E6, 0x0C6, 0x0CE, 0x0A9, 0x0EB,
+ 0x015, 0x016, 0x046, 0x0A2, 0x047, 0x038, 0x014, 0x043,
+ 0x026, 0x022, 0x0E7, 0x03D, 0x060, 0x02F, 0x0AA, 0x09E,
+ 0x0B5, 0x012, 0x0E0, 0x07F, 0x001, 0x07D, 0x0E3, 0x0E7,
+ 0x002, 0x093, 0x0F9, 0x095, 0x044, 0x05C, 0x0E5, 0x0A0,
+ 0x0E3, 0x09D, 0x04A, 0x07F, 0x09C, 0x054, 0x0A9, 0x0EB,
+ 0x051, 0x005, 0x046, 0x0B9, 0x0FC, 0x0C0, 0x01B, 0x022,
+ 0x02E, 0x064, 0x054, 0x02F, 0x0CD, 0x046, 0x0CC, 0x0A7,
+ 0x0D5, 0x086, 0x0CC, 0x0A6, 0x050, 0x055, 0x0C6, 0x045,
+ 0x0CE, 0x05A, 0x00E, 0x039, 0x0D4, 0x0A7, 0x0F9, 0x0C5,
+ 0x04A, 0x09E, 0x0B5, 0x011, 0x080, 0x059, 0x0C0, 0x06D,
+ 0x0CF, 0x0E6, 0x000, 0x0D9, 0x011, 0x073, 0x022, 0x0A1,
+ 0x07E, 0x06A, 0x036, 0x065, 0x03E, 0x0AC, 0x036, 0x065,
+ 0x032, 0x0B0, 0x017, 0x0DD, 0x03E, 0x072, 0x0D2, 0x079,
+ 0x031, 0x00C, 0x098, 0x02E, 0x04C, 0x020, 0x073, 0x02A,
+ 0x08F, 0x0F3, 0x08A, 0x0AD, 0x0E7, 0x041, 0x082, 0x07C,
+ 0x0CA, 0x0A6, 0x089, 0x0B5, 0x085, 0x09F, 0x0B0, 0x0F0,
+ 0x017, 0x0D5, 0x01F, 0x054, 0x054, 0x025, 0x01A, 0x0A8,
+ 0x0FF, 0x02A, 0x094, 0x065, 0x011, 0x0D7, 0x049, 0x044,
+ 0x0D5, 0x0CC, 0x0A0, 0x055, 0x0D8, 0x0AE, 0x00E, 0x088,
+ 0x014, 0x060, 0x016, 0x04D, 0x063, 0x022, 0x0E0, 0x072,
+ 0x086, 0x038, 0x04D, 0x080, 0x0BE, 0x0E0, 0x02F, 0x0B8,
+ 0x00B, 0x0EE, 0x002, 0x0FB, 0x081, 0x038, 0x0F0, 0x017,
+ 0x0D7, 0x0D7, 0x01E, 0x002, 0x0FA, 0x0FA, 0x0E3, 0x0C0,
+ 0x05F, 0x04C, 0x085, 0x090, 0x002, 0x018, 0x0C8, 0x05B,
+ 0x080, 0x0DA, 0x030, 0x00B, 0x070, 0x01B, 0x04C, 0x022,
+ 0x0D3, 0x04C, 0x033, 0x003, 0x08C, 0x02E, 0x04C, 0x043,
+ 0x026, 0x0D0, 0x0F5, 0x063, 0x066, 0x0D0, 0x095, 0x0A7,
+ 0x0CE, 0x045, 0x033, 0x00A, 0x0D6, 0x016, 0x042, 0x038,
+ 0x06E, 0x0E4, 0x0CE, 0x0BD, 0x059, 0x02C, 0x0D2, 0x0AB,
+ 0x0BA, 0x094, 0x09D, 0x0E6, 0x01A, 0x0B0, 0x017, 0x0D5,
+ 0x04F, 0x05A, 0x08B, 0x009, 0x01A, 0x088, 0x0B9, 0x0C5,
+ 0x042, 0x047, 0x030, 0x0D4, 0x032, 0x016, 0x072, 0x088,
+ 0x065, 0x0BD, 0x059, 0x099, 0x025, 0x0A5, 0x060, 0x02F,
+ 0x0B8, 0x060, 0x0F3, 0x008, 0x0B7, 0x04A, 0x01A, 0x08F,
+ 0x0AB, 0x00D, 0x099, 0x046, 0x051, 0x0AF, 0x038, 0x0A8,
+ 0x08E, 0x090, 0x065, 0x013, 0x052, 0x018, 0x0A0, 0x054,
+ 0x0B1, 0x042, 0x02E, 0x061, 0x0A8, 0x048, 0x0E7, 0x02D,
+ 0x016, 0x0F7, 0x0A8, 0x005, 0x0A5, 0x060, 0x02F, 0x0A4,
+ 0x075, 0x0D2, 0x051, 0x035, 0x073, 0x028, 0x015, 0x076,
+ 0x02B, 0x083, 0x0A2, 0x005, 0x018, 0x005, 0x093, 0x058,
+ 0x0C8, 0x0B8, 0x006, 0x028, 0x063, 0x084, 0x0D8, 0x00B,
+ 0x0EE, 0x002, 0x0FB, 0x080, 0x0BE, 0x0E0, 0x02F, 0x0A0,
+ 0x043, 0x0A7, 0x001, 0x07D, 0x04C, 0x0E3, 0x0C0, 0x05F,
+ 0x070, 0x017, 0x0DC, 0x005, 0x0F4, 0x064, 0x02D, 0x0C0,
+ 0x06D, 0x018, 0x005, 0x0B8, 0x00D, 0x0A5, 0x0BD, 0x06A,
+ 0x023, 0x086, 0x0AA, 0x09E, 0x0B5, 0x011, 0x0A4, 0x06A,
+ 0x0A3, 0x0EA, 0x08A, 0x08D, 0x023, 0x0E1, 0x017, 0x038,
+ 0x034, 0x069, 0x071, 0x098, 0x045, 0x0A6, 0x098, 0x06A,
+ 0x03E, 0x0AC, 0x036, 0x065, 0x019, 0x046, 0x0BC, 0x0E2,
+ 0x0A2, 0x03A, 0x041, 0x094, 0x04D, 0x048, 0x062, 0x081,
+ 0x052, 0x0C5, 0x016, 0x0F7, 0x0A8, 0x08B, 0x04A, 0x054,
+ 0x0F5, 0x0A8, 0x08C, 0x002, 0x0DC, 0x006, 0x0D1, 0x003,
+ 0x09C, 0x0B4, 0x0A9, 0x0EE, 0x00A, 0x095, 0x025, 0x02A,
+ 0x07A, 0x0AD, 0x046, 0x001, 0x067, 0x001, 0x0B5, 0x0D7,
+ 0x0AC, 0x00A, 0x030, 0x00B, 0x06C, 0x049, 0x035, 0x0E6,
+ 0x0B5, 0x067, 0x0F3, 0x000, 0x06C, 0x088, 0x0B9, 0x091,
+ 0x050, 0x0BF, 0x031, 0x01B, 0x032, 0x0A7, 0x0B8, 0x068,
+ 0x095, 0x025, 0x07B, 0x0AD, 0x033, 0x078, 0x0A7, 0x0CD,
+ 0x03E, 0x0D3, 0x08E, 0x09D, 0x034, 0x0E7, 0x04E, 0x0D4,
+ 0x022, 0x0E7, 0x006, 0x084, 0x08E, 0x060, 0x0A8, 0x0FF,
+ 0x038, 0x0AB, 0x083, 0x09C, 0x02A, 0x008, 0x0F9, 0x0D4,
+ 0x020, 0x063, 0x0BC, 0x01A, 0x006, 0x00A, 0x0C0, 0x05F,
+ 0x046, 0x042, 0x0DC, 0x006, 0x0D1, 0x080, 0x05B, 0x080,
+ 0x0DA, 0x022, 0x0E6, 0x01A, 0x084, 0x08E, 0x072, 0x0D1,
+ 0x06F, 0x05A, 0x080, 0x087, 0x01A, 0x0AA, 0x07A, 0x0D4,
+ 0x048, 0x0C8, 0x0D5, 0x047, 0x0D5, 0x015, 0x023, 0x023,
+ 0x0E1, 0x017, 0x038, 0x034, 0x08C, 0x0BA, 0x04B, 0x07B,
+ 0x0D4, 0x002, 0x0D2, 0x08C, 0x022, 0x0DC, 0x006, 0x0D5,
+ 0x01F, 0x056, 0x01B, 0x032, 0x08C, 0x0A3, 0x05E, 0x071,
+ 0x051, 0x01D, 0x020, 0x0CA, 0x026, 0x0A4, 0x031, 0x040,
+ 0x0A9, 0x062, 0x0B0, 0x017, 0x0DF, 0x09E, 0x0F4, 0x0B7,
+ 0x0C9, 0x040, 0x0C7, 0x078, 0x001, 0x081, 0x082, 0x0B8,
+ 0x038, 0x039, 0x049, 0x01C, 0x026, 0x0C0, 0x05F, 0x070,
+ 0x017, 0x0D4, 0x0AB, 0x0E1, 0x02A, 0x0F8, 0x04A, 0x0BE,
+ 0x012, 0x0AF, 0x08F, 0x097, 0x04F, 0x0CB, 0x0A7, 0x001,
+ 0x07D, 0x0DA, 0x080, 0x0AA, 0x091, 0x064, 0x07F, 0x04A,
+ 0x081, 0x0D5, 0x022, 0x0C8, 0x0FE, 0x082, 0x080, 0x025,
+ 0x048, 0x0B2, 0x03E, 0x0BB, 0x0DC, 0x035, 0x02E, 0x094,
+ 0x007, 0x0E8, 0x08A, 0x09C, 0x003, 0x0E2, 0x04B, 0x0A5,
+ 0x077, 0x0AB, 0x0B3, 0x032, 0x0E9, 0x04B, 0x0BD, 0x059,
+ 0x086, 0x084, 0x097, 0x07A, 0x004, 0x0BA, 0x053, 0x0E1,
+ 0x032, 0x0EF, 0x050, 0x0D4, 0x0E6, 0x035, 0x053, 0x0EB,
+ 0x002, 0x09C, 0x0C7, 0x0D7, 0x07A, 0x0B3, 0x030, 0x0D2,
+ 0x05D, 0x0EA, 0x002, 0x0E9, 0x044, 0x05D, 0x016, 0x028,
+ 0x0C0, 0x02C, 0x0E0, 0x036, 0x091, 0x074, 0x045, 0x059,
+ 0x018, 0x0D5, 0x04F, 0x0AC, 0x00A, 0x0C4, 0x035, 0x030,
+ 0x08B, 0x038, 0x069, 0x02B, 0x0BD, 0x059, 0x098, 0x069,
+ 0x02E, 0x0F5, 0x012, 0x0E9, 0x058, 0x067, 0x04A, 0x0EF,
+ 0x050, 0x0D5, 0x08E, 0x03E, 0x01C, 0x0A4, 0x0B0, 0x0CE,
+ 0x093, 0x021, 0x06E, 0x01A, 0x048, 0x01F, 0x0A2, 0x02A,
+ 0x0C3, 0x00D, 0x057, 0x07A, 0x0B3, 0x00D, 0x009, 0x02E,
+ 0x0F4, 0x043, 0x05D, 0x028, 0x08B, 0x083, 0x020, 0x092,
+ 0x038, 0x04D, 0x080, 0x0BE, 0x0E0, 0x02F, 0x0AC, 0x017,
+ 0x049, 0x0B3, 0x0A5, 0x082, 0x0E9, 0x03E, 0x0E9, 0x036,
+ 0x074, 0x0E0, 0x02F, 0x0A6, 0x0CE, 0x09C, 0x005, 0x0F4,
+ 0x0C2, 0x02C, 0x08C, 0x052, 0x057, 0x07A, 0x0D4, 0x08D,
+ 0x048, 0x0FA, 0x0EF, 0x050, 0x0D5, 0x0AE, 0x035, 0x053,
+ 0x0EB, 0x002, 0x086, 0x021, 0x0AA, 0x0EF, 0x056, 0x066,
+ 0x01A, 0x04B, 0x0BD, 0x044, 0x0BA, 0x050, 0x0C4, 0x0E9,
+ 0x053, 0x0EB, 0x002, 0x086, 0x081, 0x0F5, 0x0DE, 0x0A1,
+ 0x0A8, 0x062, 0x01F, 0x05D, 0x0FE, 0x0A2, 0x05D, 0x029,
+ 0x077, 0x0A8, 0x06A, 0x061, 0x08D, 0x040, 0x0FD, 0x011,
+ 0x053, 0x00C, 0x06A, 0x0A7, 0x0D6, 0x005, 0x030, 0x0C7,
+ 0x0D7, 0x07F, 0x0A9, 0x057, 0x04A, 0x05D, 0x0EB, 0x048,
+ 0x01B, 0x00C, 0x07C, 0x08B, 0x09D, 0x08A, 0x053, 0x0EF,
+ 0x066, 0x094, 0x0CA, 0x054, 0x0F5, 0x0A0, 0x0C6, 0x001,
+ 0x06E, 0x003, 0x06A, 0x09F, 0x056, 0x076, 0x065, 0x032,
+ 0x08B, 0x07B, 0x0D2, 0x0C5, 0x0A5, 0x060, 0x02F, 0x0AA,
+ 0x07D, 0x065, 0x0A3, 0x000, 0x0B7, 0x001, 0x0B4, 0x0C8,
+ 0x05A, 0x007, 0x08F, 0x0ED, 0x001, 0x0D5, 0x027, 0x091,
+ 0x067, 0x001, 0x0B4, 0x08B, 0x09C, 0x054, 0x01C, 0x073,
+ 0x0A8, 0x084, 0x05C, 0x0C1, 0x050, 0x0BF, 0x036, 0x056,
+ 0x060, 0x0AB, 0x08C, 0x08B, 0x09C, 0x054, 0x01C, 0x073,
+ 0x0A8, 0x084, 0x05C, 0x0C1, 0x050, 0x0BF, 0x036, 0x056,
+ 0x06C, 0x005, 0x0F5, 0x053, 0x0D6, 0x0A2, 0x030, 0x00B,
+ 0x029, 0x05B, 0x019, 0x0FC, 0x0F6, 0x094, 0x045, 0x0CF,
+ 0x015, 0x00B, 0x0F3, 0x03C, 0x0B3, 0x02A, 0x07A, 0x0C5,
+ 0x046, 0x001, 0x064, 0x08A, 0x031, 0x023, 0x09C, 0x00A,
+ 0x05D, 0x0EA, 0x034, 0x033, 0x02E, 0x095, 0x0C7, 0x0CE,
+ 0x02A, 0x04F, 0x0E6, 0x050, 0x020, 0x0B9, 0x031, 0x00C,
+ 0x09B, 0x0EF, 0x039, 0x014, 0x045, 0x0CE, 0x045, 0x007,
+ 0x01C, 0x0EA, 0x046, 0x087, 0x0AB, 0x01B, 0x036, 0x084,
+ 0x0A7, 0x05E, 0x0AC, 0x096, 0x067, 0x052, 0x0B0, 0x017,
+ 0x0DC, 0x0FE, 0x07B, 0x04A, 0x022, 0x0E7, 0x08A, 0x085,
+ 0x0F9, 0x09E, 0x059, 0x097, 0x07A, 0x08D, 0x00C, 0x0CB,
+ 0x0A5, 0x027, 0x0F3, 0x0A0, 0x044, 0x032, 0x060, 0x0B9,
+ 0x037, 0x0DE, 0x072, 0x028, 0x08B, 0x09C, 0x08A, 0x00E,
+ 0x039, 0x0D4, 0x08C, 0x005, 0x0F7, 0x0E7, 0x0B8, 0x02A,
+ 0x0F9, 0x028, 0x018, 0x0EF, 0x000, 0x030, 0x030, 0x057,
+ 0x007, 0x044, 0x00A, 0x050, 0x08F, 0x0F0, 0x073, 0x091,
+ 0x041, 0x01F, 0x03A, 0x090, 0x045, 0x0C0, 0x0BB, 0x018,
+ 0x0E1, 0x036, 0x002, 0x0FB, 0x0FB, 0x09E, 0x002, 0x0FA,
+ 0x0EE, 0x0E7, 0x0F5, 0x0CF, 0x001, 0x07D, 0x010, 0x05C,
+ 0x0F0, 0x017, 0x0D1, 0x005, 0x0CF, 0x001, 0x07D, 0x053,
+ 0x0EB, 0x02D, 0x018, 0x005, 0x0B8, 0x00D, 0x0A6, 0x042,
+ 0x0DC, 0x006, 0x0D3, 0x017, 0x035, 0x0A8, 0x08B, 0x09C,
+ 0x00A, 0x00E, 0x039, 0x0D4, 0x00C, 0x0FE, 0x07B, 0x04A,
+ 0x022, 0x0E6, 0x055, 0x00B, 0x0F3, 0x031, 0x0B3, 0x060,
+ 0x02F, 0x0BC, 0x07C, 0x0E2, 0x0A4, 0x0FE, 0x065, 0x051,
+ 0x017, 0x038, 0x014, 0x01C, 0x073, 0x0A8, 0x019, 0x0FC,
+ 0x0F6, 0x094, 0x045, 0x0CC, 0x0AA, 0x017, 0x0E6, 0x063,
+ 0x066, 0x00A, 0x0B8, 0x0CC, 0x085, 0x0A1, 0x058, 0x0F6,
+ 0x0A2, 0x035, 0x048, 0x048, 0x07F, 0x04A, 0x089, 0x095,
+ 0x021, 0x021, 0x0FD, 0x005, 0x002, 0x054, 0x09E, 0x045,
+ 0x091, 0x00E, 0x03C, 0x005, 0x0F5, 0x007, 0x040, 0x055,
+ 0x048, 0x052, 0x03E, 0x086, 0x0A0, 0x075, 0x048, 0x052,
+ 0x03E, 0x0B5, 0x000, 0x04A, 0x09C, 0x000, 0x06B, 0x0C7,
+ 0x0CE, 0x045, 0x027, 0x0F3, 0x02A, 0x084, 0x037, 0x035,
+ 0x0DE, 0x0A0, 0x0AB, 0x023, 0x01A, 0x0AE, 0x0F5, 0x083,
+ 0x059, 0x018, 0x0D7, 0x043, 0x0DE, 0x02A, 0x0D0, 0x094,
+ 0x0EB, 0x0DE, 0x005, 0x03A, 0x095, 0x09F, 0x0CC, 0x0C3,
+ 0x020, 0x045, 0x0CC, 0x0AA, 0x017, 0x0E6, 0x066, 0x0CC,
+ 0x043, 0x026, 0x04F, 0x0E7, 0x041, 0x022, 0x02E, 0x070,
+ 0x068, 0x038, 0x0E7, 0x053, 0x0E0, 0x02F, 0x0AB, 0x0BC,
+ 0x012, 0x0D2, 0x0E9, 0x058, 0x00B, 0x0EA, 0x0A7, 0x0AD,
+ 0x045, 0x0A1, 0x01F, 0x0C0, 0x05F, 0x078, 0x039, 0x0C8,
+ 0x0A0, 0x08F, 0x09D, 0x048, 0x01C, 0x024, 0x0EE, 0x0C7,
+ 0x080, 0x0BE, 0x0BA, 0x0F5, 0x06D, 0x066, 0x049, 0x077,
+ 0x00D, 0x04E, 0x0A5, 0x030, 0x009, 0x0B4, 0x0F9, 0x0C5,
+ 0x043, 0x00F, 0x038, 0x0A9, 0x03F, 0x09D, 0x002, 0x0FB,
+ 0x0CE, 0x045, 0x011, 0x073, 0x091, 0x041, 0x0C7, 0x03A,
+ 0x091, 0x09F, 0x0CF, 0x069, 0x044, 0x05C, 0x0F1, 0x050,
+ 0x0BF, 0x033, 0x0CB, 0x032, 0x0A7, 0x0AC, 0x054, 0x090,
+ 0x08D, 0x044, 0x08E, 0x070, 0x029, 0x077, 0x0A8, 0x0D0,
+ 0x0CC, 0x0BA, 0x056, 0x0B0, 0x0B2, 0x09D, 0x08C, 0x086,
+ 0x04C, 0x017, 0x026, 0x077, 0x026, 0x01C, 0x027, 0x01C,
+ 0x024, 0x09E, 0x023, 0x061, 0x0BE, 0x08E, 0x012, 0x04F,
+ 0x011, 0x087, 0x01C, 0x0EA, 0x05C, 0x005, 0x0F5, 0x0D7,
+ 0x0B8, 0x06A, 0x075, 0x029, 0x077, 0x0AB, 0x00D, 0x099,
+ 0x074, 0x0A5, 0x04F, 0x072, 0x0A0, 0x0AA, 0x04A, 0x0C6,
+ 0x0F3, 0x066, 0x066, 0x0C6, 0x039, 0x082, 0x0AF, 0x075,
+ 0x0A6, 0x06F, 0x014, 0x06B, 0x0CE, 0x005, 0x070, 0x073,
+ 0x096, 0x082, 0x03E, 0x075, 0x028, 0x0E1, 0x03A, 0x0A7,
+ 0x0AD, 0x044, 0x060, 0x016, 0x052, 0x0B6, 0x01D, 0x07A,
+ 0x0B6, 0x0B3, 0x024, 0x0BB, 0x086, 0x0A7, 0x052, 0x098,
+ 0x004, 0x0DA, 0x07C, 0x0E2, 0x0A1, 0x087, 0x09C, 0x055,
+ 0x0F7, 0x09C, 0x0B5, 0x0AC, 0x02C, 0x095, 0x033, 0x0B9,
+ 0x031, 0x005, 0x0D9, 0x053, 0x0D6, 0x0A2, 0x030, 0x00B,
+ 0x029, 0x05B, 0x002, 0x02E, 0x061, 0x05A, 0x017, 0x0E6,
+ 0x09C, 0x0B3, 0x02A, 0x07A, 0x0C5, 0x040, 0x021, 0x0A8,
+ 0x091, 0x0CE, 0x005, 0x027, 0x0F3, 0x0A5, 0x088, 0x064,
+ 0x0C1, 0x072, 0x065, 0x04F, 0x058, 0x014, 0x00C, 0x08D,
+ 0x07E, 0x0F3, 0x081, 0x044, 0x05C, 0x0EF, 0x041, 0x0C7,
+ 0x03A, 0x0BE, 0x002, 0x0FA, 0x0A9, 0x0EA, 0x0CE, 0x0CC,
+ 0x0A9, 0x029, 0x053, 0x0D6, 0x0A2, 0x046, 0x047, 0x0DD,
+ 0x07A, 0x0C0, 0x0A3, 0x000, 0x086, 0x0E2, 0x09B, 0x029,
+ 0x078, 0x08B, 0x081, 0x009, 0x098, 0x070, 0x09B, 0x029,
+ 0x079, 0x05D, 0x0D9, 0x072, 0x0ED, 0x094, 0x0BC, 0x0B9,
+ 0x076, 0x013, 0x03B, 0x02A, 0x05D, 0x0B2, 0x097, 0x095,
+ 0x02E, 0x0D9, 0x04B, 0x0CA, 0x07D, 0x05B, 0x059, 0x094,
+ 0x0CA, 0x01C, 0x024, 0x0EE, 0x0C7, 0x094, 0x0BC, 0x0C0,
+ 0x026, 0x0D3, 0x0E7, 0x015, 0x00C, 0x03C, 0x0E2, 0x0AC,
+ 0x0FE, 0x07B, 0x04A, 0x022, 0x0E7, 0x08A, 0x085, 0x0F9,
+ 0x09E, 0x059, 0x097, 0x07A, 0x08D, 0x00C, 0x0CB, 0x0A5,
+ 0x027, 0x0F3, 0x0A0, 0x041, 0x072, 0x062, 0x019, 0x037,
+ 0x0DE, 0x070, 0x028, 0x08B, 0x09C, 0x08A, 0x00E, 0x039,
+ 0x0D4, 0x08D, 0x00F, 0x056, 0x036, 0x06D, 0x009, 0x04E,
+ 0x0BD, 0x059, 0x02C, 0x0CE, 0x0A5, 0x06B, 0x00B, 0x022,
+ 0x0D9, 0x09D, 0x0C9, 0x0B2, 0x097, 0x0BE, 0x0F3, 0x081,
+ 0x04A, 0x07D, 0x065, 0x0A3, 0x000, 0x093, 0x08F, 0x067,
+ 0x029, 0x078, 0x0C2, 0x04D, 0x0C1, 0x0D1, 0x006, 0x082,
+ 0x031, 0x0AF, 0x007, 0x038, 0x034, 0x011, 0x0F3, 0x0A8,
+ 0x02A, 0x09E, 0x0A8, 0x066, 0x01A, 0x0A4, 0x0A5, 0x04F,
+ 0x05A, 0x00C, 0x011, 0x08F, 0x0AA, 0x07B, 0x0D0, 0x065,
+ 0x049, 0x045, 0x0BD, 0x0E9, 0x062, 0x0D2, 0x0B1, 0x09E,
+ 0x06C, 0x0CC, 0x0C6, 0x019, 0x087, 0x009, 0x0C3, 0x08E,
+ 0x075, 0x041, 0x01F, 0x03A, 0x0A5, 0x013, 0x0D5, 0x055,
+ 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055,
+ 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055,
+ 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055,
+ 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055,
+ 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055, 0x055,
+ 0x055, 0x055, 0x055, 0x05A, 0x0CC, 0x090
+ };
+
+#endif /* defined(CONFIG_SMCTR) || defined(CONFIG_SMCTR_MODULE) */
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
new file mode 100644
index 000000000000..df43b449e429
--- /dev/null
+++ b/drivers/net/tokenring/tms380tr.c
@@ -0,0 +1,2410 @@
+/*
+ * tms380tr.c: A network driver library for Texas Instruments TMS380-based
+ * Token Ring Adapters.
+ *
+ * Originally sktr.c: Written 1997 by Christoph Goos
+ *
+ * A fine result of the Linux Systems Network Architecture Project.
+ * http://www.linux-sna.org
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * The following modules are currently available for card support:
+ * - tmspci (Generic PCI card support)
+ * - abyss (Madge PCI support)
+ * - tmsisa (SysKonnect TR4/16 ISA)
+ *
+ * Sources:
+ * - The hardware related parts of this driver are take from
+ * the SysKonnect Token Ring driver for Windows NT.
+ * - I used the IBM Token Ring driver 'ibmtr.c' as a base for this
+ * driver, as well as the 'skeleton.c' driver by Donald Becker.
+ * - Also various other drivers in the linux source tree were taken
+ * as samples for some tasks.
+ * - TI TMS380 Second-Generation Token Ring User's Guide
+ * - TI datasheets for respective chips
+ * - David Hein at Texas Instruments
+ * - Various Madge employees
+ *
+ * Maintainer(s):
+ * JS Jay Schulist jschlst@samba.org
+ * CG Christoph Goos cgoos@syskonnect.de
+ * AF Adam Fritzler mid@auk.cx
+ * MLP Mike Phillips phillim@amtrak.com
+ * JF Jochen Friedrich jochen@scram.de
+ *
+ * Modification History:
+ * 29-Aug-97 CG Created
+ * 04-Apr-98 CG Fixed problems caused by tok_timer_check
+ * 10-Apr-98 CG Fixed lockups at cable disconnection
+ * 27-May-98 JS Formated to Linux Kernel Format
+ * 31-May-98 JS Hacked in PCI support
+ * 16-Jun-98 JS Modulized for multiple cards with one driver
+ * Sep-99 AF Renamed to tms380tr (supports more than SK's)
+ * 23-Sep-99 AF Added Compaq and Thomas-Conrad PCI support
+ * Fixed a bug causing double copies on PCI
+ * Fixed for new multicast stuff (2.2/2.3)
+ * 25-Sep-99 AF Uped TPL_NUM from 3 to 9
+ * Removed extraneous 'No free TPL'
+ * 22-Dec-99 AF Added Madge PCI Mk2 support and generalized
+ * parts of the initilization procedure.
+ * 30-Dec-99 AF Turned tms380tr into a library ala 8390.
+ * Madge support is provided in the abyss module
+ * Generic PCI support is in the tmspci module.
+ * 30-Nov-00 JF Updated PCI code to support IO MMU via
+ * pci_map_static(). Alpha uses this MMU for ISA
+ * as well.
+ * 14-Jan-01 JF Fix DMA on ifdown/ifup sequences. Some
+ * cleanup.
+ * 13-Jan-02 JF Add spinlock to fix race condition.
+ * 09-Nov-02 JF Fixed printks to not SPAM the console during
+ * normal operation.
+ * 30-Dec-02 JF Removed incorrect __init from
+ * tms380tr_init_card.
+ *
+ * To do:
+ * 1. Multi/Broadcast packet handling (this may have fixed itself)
+ * 2. Write a sktrisa module that includes the old ISA support (done)
+ * 3. Allow modules to load their own microcode
+ * 4. Speed up the BUD process -- freezing the kernel for 3+sec is
+ * quite unacceptable.
+ * 5. Still a few remaining stalls when the cable is unplugged.
+ */
+
+#ifdef MODULE
+static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, Adam Fritzler\n";
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/trdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "tms380tr.h" /* Our Stuff */
+
+/* Use 0 for production, 1 for verification, 2 for debug, and
+ * 3 for very verbose debug.
+ */
+#ifndef TMS380TR_DEBUG
+#define TMS380TR_DEBUG 0
+#endif
+static unsigned int tms380tr_debug = TMS380TR_DEBUG;
+
+static struct device tms_device;
+
+/* Index to functions, as function prototypes.
+ * Alphabetical by function name.
+ */
+
+/* "A" */
+/* "B" */
+static int tms380tr_bringup_diags(struct net_device *dev);
+/* "C" */
+static void tms380tr_cancel_tx_queue(struct net_local* tp);
+static int tms380tr_chipset_init(struct net_device *dev);
+static void tms380tr_chk_irq(struct net_device *dev);
+static void tms380tr_chk_outstanding_cmds(struct net_device *dev);
+static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr);
+static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType);
+int tms380tr_close(struct net_device *dev);
+static void tms380tr_cmd_status_irq(struct net_device *dev);
+/* "D" */
+static void tms380tr_disable_interrupts(struct net_device *dev);
+#if TMS380TR_DEBUG > 0
+static void tms380tr_dump(unsigned char *Data, int length);
+#endif
+/* "E" */
+static void tms380tr_enable_interrupts(struct net_device *dev);
+static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command);
+static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue);
+/* "F" */
+/* "G" */
+static struct net_device_stats *tms380tr_get_stats(struct net_device *dev);
+/* "H" */
+static int tms380tr_hardware_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+/* "I" */
+static int tms380tr_init_adapter(struct net_device *dev);
+static void tms380tr_init_ipb(struct net_local *tp);
+static void tms380tr_init_net_local(struct net_device *dev);
+static void tms380tr_init_opb(struct net_device *dev);
+/* "M" */
+/* "O" */
+int tms380tr_open(struct net_device *dev);
+static void tms380tr_open_adapter(struct net_device *dev);
+/* "P" */
+/* "R" */
+static void tms380tr_rcv_status_irq(struct net_device *dev);
+static int tms380tr_read_ptr(struct net_device *dev);
+static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
+ unsigned short Address, int Length);
+static int tms380tr_reset_adapter(struct net_device *dev);
+static void tms380tr_reset_interrupt(struct net_device *dev);
+static void tms380tr_ring_status_irq(struct net_device *dev);
+/* "S" */
+static int tms380tr_send_packet(struct sk_buff *skb, struct net_device *dev);
+static void tms380tr_set_multicast_list(struct net_device *dev);
+static int tms380tr_set_mac_address(struct net_device *dev, void *addr);
+/* "T" */
+static void tms380tr_timer_chk(unsigned long data);
+static void tms380tr_timer_end_wait(unsigned long data);
+static void tms380tr_tx_status_irq(struct net_device *dev);
+/* "U" */
+static void tms380tr_update_rcv_stats(struct net_local *tp,
+ unsigned char DataPtr[], unsigned int Length);
+/* "W" */
+void tms380tr_wait(unsigned long time);
+static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status);
+static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status);
+
+#define SIFREADB(reg) (((struct net_local *)dev->priv)->sifreadb(dev, reg))
+#define SIFWRITEB(val, reg) (((struct net_local *)dev->priv)->sifwriteb(dev, val, reg))
+#define SIFREADW(reg) (((struct net_local *)dev->priv)->sifreadw(dev, reg))
+#define SIFWRITEW(val, reg) (((struct net_local *)dev->priv)->sifwritew(dev, val, reg))
+
+
+
+#if 0 /* TMS380TR_DEBUG > 0 */
+static int madgemc_sifprobe(struct net_device *dev)
+{
+ unsigned char old, chk1, chk2;
+
+ old = SIFREADB(SIFADR); /* Get the old SIFADR value */
+
+ chk1 = 0; /* Begin with check value 0 */
+ do {
+ madgemc_setregpage(dev, 0);
+ /* Write new SIFADR value */
+ SIFWRITEB(chk1, SIFADR);
+ chk2 = SIFREADB(SIFADR);
+ if (chk2 != chk1)
+ return -1;
+
+ madgemc_setregpage(dev, 1);
+ /* Read, invert and write */
+ chk2 = SIFREADB(SIFADD);
+ if (chk2 != chk1)
+ return -1;
+
+ madgemc_setregpage(dev, 0);
+ chk2 ^= 0x0FE;
+ SIFWRITEB(chk2, SIFADR);
+
+ /* Read, invert and compare */
+ madgemc_setregpage(dev, 1);
+ chk2 = SIFREADB(SIFADD);
+ madgemc_setregpage(dev, 0);
+ chk2 ^= 0x0FE;
+
+ if(chk1 != chk2)
+ return (-1); /* No adapter */
+ chk1 -= 2;
+ } while(chk1 != 0); /* Repeat 128 times (all byte values) */
+
+ madgemc_setregpage(dev, 0); /* sanity */
+ /* Restore the SIFADR value */
+ SIFWRITEB(old, SIFADR);
+
+ return (0);
+}
+#endif
+
+/*
+ * Open/initialize the board. This is called sometime after
+ * booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+int tms380tr_open(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ /* init the spinlock */
+ spin_lock_init(&tp->lock);
+ init_timer(&tp->timer);
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+
+#ifdef CONFIG_ISA
+ if(dev->dma > 0)
+ {
+ unsigned long flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+ }
+#endif
+
+ err = tms380tr_chipset_init(dev);
+ if(err)
+ {
+ printk(KERN_INFO "%s: Chipset initialization error\n",
+ dev->name);
+ return (-1);
+ }
+
+ tp->timer.expires = jiffies + 30*HZ;
+ tp->timer.function = tms380tr_timer_end_wait;
+ tp->timer.data = (unsigned long)dev;
+ add_timer(&tp->timer);
+
+ printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n",
+ dev->name, tms380tr_read_ptr(dev));
+
+ tms380tr_enable_interrupts(dev);
+ tms380tr_open_adapter(dev);
+
+ netif_start_queue(dev);
+
+ /* Wait for interrupt from hardware. If interrupt does not come,
+ * there will be a timeout from the timer.
+ */
+ tp->Sleeping = 1;
+ interruptible_sleep_on(&tp->wait_for_tok_int);
+ del_timer(&tp->timer);
+
+ /* If AdapterVirtOpenFlag is 1, the adapter is now open for use */
+ if(tp->AdapterVirtOpenFlag == 0)
+ {
+ tms380tr_disable_interrupts(dev);
+ return (-1);
+ }
+
+ tp->StartTime = jiffies;
+
+ /* Start function control timer */
+ tp->timer.expires = jiffies + 2*HZ;
+ tp->timer.function = tms380tr_timer_chk;
+ tp->timer.data = (unsigned long)dev;
+ add_timer(&tp->timer);
+
+ return (0);
+}
+
+/*
+ * Timeout function while waiting for event
+ */
+static void tms380tr_timer_end_wait(unsigned long data)
+{
+ struct net_device *dev = (struct net_device*)data;
+ struct net_local *tp = netdev_priv(dev);
+
+ if(tp->Sleeping)
+ {
+ tp->Sleeping = 0;
+ wake_up_interruptible(&tp->wait_for_tok_int);
+ }
+
+ return;
+}
+
+/*
+ * Initialize the chipset
+ */
+static int tms380tr_chipset_init(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ tms380tr_init_ipb(tp);
+ tms380tr_init_opb(dev);
+ tms380tr_init_net_local(dev);
+
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name);
+ err = tms380tr_reset_adapter(dev);
+ if(err < 0)
+ return (-1);
+
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name);
+ err = tms380tr_bringup_diags(dev);
+ if(err < 0)
+ return (-1);
+
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "%s: Init adapter...\n", dev->name);
+ err = tms380tr_init_adapter(dev);
+ if(err < 0)
+ return (-1);
+
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "%s: Done!\n", dev->name);
+ return (0);
+}
+
+/*
+ * Initializes the net_local structure.
+ */
+static void tms380tr_init_net_local(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int i;
+ dma_addr_t dmabuf;
+
+ tp->scb.CMD = 0;
+ tp->scb.Parm[0] = 0;
+ tp->scb.Parm[1] = 0;
+
+ tp->ssb.STS = 0;
+ tp->ssb.Parm[0] = 0;
+ tp->ssb.Parm[1] = 0;
+ tp->ssb.Parm[2] = 0;
+
+ tp->CMDqueue = 0;
+
+ tp->AdapterOpenFlag = 0;
+ tp->AdapterVirtOpenFlag = 0;
+ tp->ScbInUse = 0;
+ tp->OpenCommandIssued = 0;
+ tp->ReOpenInProgress = 0;
+ tp->HaltInProgress = 0;
+ tp->TransmitHaltScheduled = 0;
+ tp->LobeWireFaultLogged = 0;
+ tp->LastOpenStatus = 0;
+ tp->MaxPacketSize = DEFAULT_PACKET_SIZE;
+
+ /* Create circular chain of transmit lists */
+ for (i = 0; i < TPL_NUM; i++)
+ {
+ tp->Tpl[i].NextTPLAddr = htonl(((char *)(&tp->Tpl[(i+1) % TPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
+ tp->Tpl[i].Status = 0;
+ tp->Tpl[i].FrameSize = 0;
+ tp->Tpl[i].FragList[0].DataCount = 0;
+ tp->Tpl[i].FragList[0].DataAddr = 0;
+ tp->Tpl[i].NextTPLPtr = &tp->Tpl[(i+1) % TPL_NUM];
+ tp->Tpl[i].MData = NULL;
+ tp->Tpl[i].TPLIndex = i;
+ tp->Tpl[i].DMABuff = 0;
+ tp->Tpl[i].BusyFlag = 0;
+ }
+
+ tp->TplFree = tp->TplBusy = &tp->Tpl[0];
+
+ /* Create circular chain of receive lists */
+ for (i = 0; i < RPL_NUM; i++)
+ {
+ tp->Rpl[i].NextRPLAddr = htonl(((char *)(&tp->Rpl[(i+1) % RPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
+ tp->Rpl[i].Status = (RX_VALID | RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
+ tp->Rpl[i].FrameSize = 0;
+ tp->Rpl[i].FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
+
+ /* Alloc skb and point adapter to data area */
+ tp->Rpl[i].Skb = dev_alloc_skb(tp->MaxPacketSize);
+ tp->Rpl[i].DMABuff = 0;
+
+ /* skb == NULL ? then use local buffer */
+ if(tp->Rpl[i].Skb == NULL)
+ {
+ tp->Rpl[i].SkbStat = SKB_UNAVAILABLE;
+ tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
+ tp->Rpl[i].MData = tp->LocalRxBuffers[i];
+ }
+ else /* SKB != NULL */
+ {
+ tp->Rpl[i].Skb->dev = dev;
+ skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
+
+ /* data unreachable for DMA ? then use local buffer */
+ dmabuf = pci_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE);
+ if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
+ {
+ tp->Rpl[i].SkbStat = SKB_DATA_COPY;
+ tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
+ tp->Rpl[i].MData = tp->LocalRxBuffers[i];
+ }
+ else /* DMA directly in skb->data */
+ {
+ tp->Rpl[i].SkbStat = SKB_DMA_DIRECT;
+ tp->Rpl[i].FragList[0].DataAddr = htonl(dmabuf);
+ tp->Rpl[i].MData = tp->Rpl[i].Skb->data;
+ tp->Rpl[i].DMABuff = dmabuf;
+ }
+ }
+
+ tp->Rpl[i].NextRPLPtr = &tp->Rpl[(i+1) % RPL_NUM];
+ tp->Rpl[i].RPLIndex = i;
+ }
+
+ tp->RplHead = &tp->Rpl[0];
+ tp->RplTail = &tp->Rpl[RPL_NUM-1];
+ tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
+
+ return;
+}
+
+/*
+ * Initializes the initialisation parameter block.
+ */
+static void tms380tr_init_ipb(struct net_local *tp)
+{
+ tp->ipb.Init_Options = BURST_MODE;
+ tp->ipb.CMD_Status_IV = 0;
+ tp->ipb.TX_IV = 0;
+ tp->ipb.RX_IV = 0;
+ tp->ipb.Ring_Status_IV = 0;
+ tp->ipb.SCB_Clear_IV = 0;
+ tp->ipb.Adapter_CHK_IV = 0;
+ tp->ipb.RX_Burst_Size = BURST_SIZE;
+ tp->ipb.TX_Burst_Size = BURST_SIZE;
+ tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
+ tp->ipb.SCB_Addr = 0;
+ tp->ipb.SSB_Addr = 0;
+
+ return;
+}
+
+/*
+ * Initializes the open parameter block.
+ */
+static void tms380tr_init_opb(struct net_device *dev)
+{
+ struct net_local *tp;
+ unsigned long Addr;
+ unsigned short RplSize = RPL_SIZE;
+ unsigned short TplSize = TPL_SIZE;
+ unsigned short BufferSize = BUFFER_SIZE;
+ int i;
+
+ tp = netdev_priv(dev);
+
+ tp->ocpl.OPENOptions = 0;
+ tp->ocpl.OPENOptions |= ENABLE_FULL_DUPLEX_SELECTION;
+ tp->ocpl.FullDuplex = 0;
+ tp->ocpl.FullDuplex |= OPEN_FULL_DUPLEX_OFF;
+
+ /*
+ * Set node address
+ *
+ * We go ahead and put it in the OPB even though on
+ * most of the generic adapters this isn't required.
+ * Its simpler this way. -- ASF
+ */
+ for (i=0;i<6;i++)
+ tp->ocpl.NodeAddr[i] = ((unsigned char *)dev->dev_addr)[i];
+
+ tp->ocpl.GroupAddr = 0;
+ tp->ocpl.FunctAddr = 0;
+ tp->ocpl.RxListSize = cpu_to_be16((unsigned short)RplSize);
+ tp->ocpl.TxListSize = cpu_to_be16((unsigned short)TplSize);
+ tp->ocpl.BufSize = cpu_to_be16((unsigned short)BufferSize);
+ tp->ocpl.Reserved = 0;
+ tp->ocpl.TXBufMin = TX_BUF_MIN;
+ tp->ocpl.TXBufMax = TX_BUF_MAX;
+
+ Addr = htonl(((char *)tp->ProductID - (char *)tp) + tp->dmabuffer);
+
+ tp->ocpl.ProdIDAddr[0] = LOWORD(Addr);
+ tp->ocpl.ProdIDAddr[1] = HIWORD(Addr);
+
+ return;
+}
+
+/*
+ * Send OPEN command to adapter
+ */
+static void tms380tr_open_adapter(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ if(tp->OpenCommandIssued)
+ return;
+
+ tp->OpenCommandIssued = 1;
+ tms380tr_exec_cmd(dev, OC_OPEN);
+
+ return;
+}
+
+/*
+ * Clear the adapter's interrupt flag. Clear system interrupt enable
+ * (SINTEN): disable adapter to system interrupts.
+ */
+static void tms380tr_disable_interrupts(struct net_device *dev)
+{
+ SIFWRITEB(0, SIFACL);
+
+ return;
+}
+
+/*
+ * Set the adapter's interrupt flag. Set system interrupt enable
+ * (SINTEN): enable adapter to system interrupts.
+ */
+static void tms380tr_enable_interrupts(struct net_device *dev)
+{
+ SIFWRITEB(ACL_SINTEN, SIFACL);
+
+ return;
+}
+
+/*
+ * Put command in command queue, try to execute it.
+ */
+static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ tp->CMDqueue |= Command;
+ tms380tr_chk_outstanding_cmds(dev);
+
+ return;
+}
+
+static void tms380tr_timeout(struct net_device *dev)
+{
+ /*
+ * If we get here, some higher level has decided we are broken.
+ * There should really be a "kick me" function call instead.
+ *
+ * Resetting the token ring adapter takes a long time so just
+ * fake transmission time and go on trying. Our own timeout
+ * routine is in tms380tr_timer_chk()
+ */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * Gets skb from system, queues it and checks if it can be sent
+ */
+static int tms380tr_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ int err;
+
+ err = tms380tr_hardware_send_packet(skb, dev);
+ if(tp->TplFree->NextTPLPtr->BusyFlag)
+ netif_stop_queue(dev);
+ return (err);
+}
+
+/*
+ * Move frames into adapter tx queue
+ */
+static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ TPL *tpl;
+ short length;
+ unsigned char *buf;
+ unsigned long flags;
+ int i;
+ dma_addr_t dmabuf, newbuf;
+ struct net_local *tp = netdev_priv(dev);
+
+ /* Try to get a free TPL from the chain.
+ *
+ * NOTE: We *must* always leave one unused TPL in the chain,
+ * because otherwise the adapter might send frames twice.
+ */
+ spin_lock_irqsave(&tp->lock, flags);
+ if(tp->TplFree->NextTPLPtr->BusyFlag) { /* No free TPL */
+ if (tms380tr_debug > 0)
+ printk(KERN_DEBUG "%s: No free TPL\n", dev->name);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ return 1;
+ }
+
+ dmabuf = 0;
+
+ /* Is buffer reachable for Busmaster-DMA? */
+
+ length = skb->len;
+ dmabuf = pci_map_single(tp->pdev, skb->data, length, PCI_DMA_TODEVICE);
+ if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
+ /* Copy frame to local buffer */
+ pci_unmap_single(tp->pdev, dmabuf, length, PCI_DMA_TODEVICE);
+ dmabuf = 0;
+ i = tp->TplFree->TPLIndex;
+ buf = tp->LocalTxBuffers[i];
+ memcpy(buf, skb->data, length);
+ newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer;
+ }
+ else {
+ /* Send direct from skb->data */
+ newbuf = dmabuf;
+ buf = skb->data;
+ }
+ /* Source address in packet? */
+ tms380tr_chk_src_addr(buf, dev->dev_addr);
+ tp->LastSendTime = jiffies;
+ tpl = tp->TplFree; /* Get the "free" TPL */
+ tpl->BusyFlag = 1; /* Mark TPL as busy */
+ tp->TplFree = tpl->NextTPLPtr;
+
+ /* Save the skb for delayed return of skb to system */
+ tpl->Skb = skb;
+ tpl->DMABuff = dmabuf;
+ tpl->FragList[0].DataCount = cpu_to_be16((unsigned short)length);
+ tpl->FragList[0].DataAddr = htonl(newbuf);
+
+ /* Write the data length in the transmit list. */
+ tpl->FrameSize = cpu_to_be16((unsigned short)length);
+ tpl->MData = buf;
+
+ /* Transmit the frame and set the status values. */
+ tms380tr_write_tpl_status(tpl, TX_VALID | TX_START_FRAME
+ | TX_END_FRAME | TX_PASS_SRC_ADDR
+ | TX_FRAME_IRQ);
+
+ /* Let adapter send the frame. */
+ tms380tr_exec_sifcmd(dev, CMD_TX_VALID);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Write the given value to the 'Status' field of the specified TPL.
+ * NOTE: This function should be used whenever the status of any TPL must be
+ * modified by the driver, because the compiler may otherwise change the
+ * order of instructions such that writing the TPL status may be executed at
+ * an undesireable time. When this function is used, the status is always
+ * written when the function is called.
+ */
+static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status)
+{
+ tpl->Status = Status;
+}
+
+static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
+{
+ unsigned char SRBit;
+
+ if((((unsigned long)frame[8]) & ~0x80) != 0) /* Compare 4 bytes */
+ return;
+ if((unsigned short)frame[12] != 0) /* Compare 2 bytes */
+ return;
+
+ SRBit = frame[8] & 0x80;
+ memcpy(&frame[8], hw_addr, 6);
+ frame[8] |= SRBit;
+
+ return;
+}
+
+/*
+ * The timer routine: Check if adapter still open and working, reopen if not.
+ */
+static void tms380tr_timer_chk(unsigned long data)
+{
+ struct net_device *dev = (struct net_device*)data;
+ struct net_local *tp = netdev_priv(dev);
+
+ if(tp->HaltInProgress)
+ return;
+
+ tms380tr_chk_outstanding_cmds(dev);
+ if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies)
+ && (tp->TplFree != tp->TplBusy))
+ {
+ /* Anything to send, but stalled too long */
+ tp->LastSendTime = jiffies;
+ tms380tr_exec_cmd(dev, OC_CLOSE); /* Does reopen automatically */
+ }
+
+ tp->timer.expires = jiffies + 2*HZ;
+ add_timer(&tp->timer);
+
+ if(tp->AdapterOpenFlag || tp->ReOpenInProgress)
+ return;
+ tp->ReOpenInProgress = 1;
+ tms380tr_open_adapter(dev);
+
+ return;
+}
+
+/*
+ * The typical workload of the driver: Handle the network interface interrupts.
+ */
+irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *tp;
+ unsigned short irq_type;
+ int handled = 0;
+
+ if(dev == NULL) {
+ printk(KERN_INFO "%s: irq %d for unknown device.\n", dev->name, irq);
+ return IRQ_NONE;
+ }
+
+ tp = netdev_priv(dev);
+
+ irq_type = SIFREADW(SIFSTS);
+
+ while(irq_type & STS_SYSTEM_IRQ) {
+ handled = 1;
+ irq_type &= STS_IRQ_MASK;
+
+ if(!tms380tr_chk_ssb(tp, irq_type)) {
+ printk(KERN_DEBUG "%s: DATA LATE occurred\n", dev->name);
+ break;
+ }
+
+ switch(irq_type) {
+ case STS_IRQ_RECEIVE_STATUS:
+ tms380tr_reset_interrupt(dev);
+ tms380tr_rcv_status_irq(dev);
+ break;
+
+ case STS_IRQ_TRANSMIT_STATUS:
+ /* Check if TRANSMIT.HALT command is complete */
+ if(tp->ssb.Parm[0] & COMMAND_COMPLETE) {
+ tp->TransmitCommandActive = 0;
+ tp->TransmitHaltScheduled = 0;
+
+ /* Issue a new transmit command. */
+ tms380tr_exec_cmd(dev, OC_TRANSMIT);
+ }
+
+ tms380tr_reset_interrupt(dev);
+ tms380tr_tx_status_irq(dev);
+ break;
+
+ case STS_IRQ_COMMAND_STATUS:
+ /* The SSB contains status of last command
+ * other than receive/transmit.
+ */
+ tms380tr_cmd_status_irq(dev);
+ break;
+
+ case STS_IRQ_SCB_CLEAR:
+ /* The SCB is free for another command. */
+ tp->ScbInUse = 0;
+ tms380tr_chk_outstanding_cmds(dev);
+ break;
+
+ case STS_IRQ_RING_STATUS:
+ tms380tr_ring_status_irq(dev);
+ break;
+
+ case STS_IRQ_ADAPTER_CHECK:
+ tms380tr_chk_irq(dev);
+ break;
+
+ case STS_IRQ_LLC_STATUS:
+ printk(KERN_DEBUG "tms380tr: unexpected LLC status IRQ\n");
+ break;
+
+ case STS_IRQ_TIMER:
+ printk(KERN_DEBUG "tms380tr: unexpected Timer IRQ\n");
+ break;
+
+ case STS_IRQ_RECEIVE_PENDING:
+ printk(KERN_DEBUG "tms380tr: unexpected Receive Pending IRQ\n");
+ break;
+
+ default:
+ printk(KERN_DEBUG "Unknown Token Ring IRQ (0x%04x)\n", irq_type);
+ break;
+ }
+
+ /* Reset system interrupt if not already done. */
+ if(irq_type != STS_IRQ_TRANSMIT_STATUS
+ && irq_type != STS_IRQ_RECEIVE_STATUS) {
+ tms380tr_reset_interrupt(dev);
+ }
+
+ irq_type = SIFREADW(SIFSTS);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Reset the INTERRUPT SYSTEM bit and issue SSB CLEAR command.
+ */
+static void tms380tr_reset_interrupt(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ SSB *ssb = &tp->ssb;
+
+ /*
+ * [Workaround for "Data Late"]
+ * Set all fields of the SSB to well-defined values so we can
+ * check if the adapter has written the SSB.
+ */
+
+ ssb->STS = (unsigned short) -1;
+ ssb->Parm[0] = (unsigned short) -1;
+ ssb->Parm[1] = (unsigned short) -1;
+ ssb->Parm[2] = (unsigned short) -1;
+
+ /* Free SSB by issuing SSB_CLEAR command after reading IRQ code
+ * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
+ */
+ tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
+
+ return;
+}
+
+/*
+ * Check if the SSB has actually been written by the adapter.
+ */
+static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType)
+{
+ SSB *ssb = &tp->ssb; /* The address of the SSB. */
+
+ /* C 0 1 2 INTERRUPT CODE
+ * - - - - --------------
+ * 1 1 1 1 TRANSMIT STATUS
+ * 1 1 1 1 RECEIVE STATUS
+ * 1 ? ? 0 COMMAND STATUS
+ * 0 0 0 0 SCB CLEAR
+ * 1 1 0 0 RING STATUS
+ * 0 0 0 0 ADAPTER CHECK
+ *
+ * 0 = SSB field not affected by interrupt
+ * 1 = SSB field is affected by interrupt
+ *
+ * C = SSB ADDRESS +0: COMMAND
+ * 0 = SSB ADDRESS +2: STATUS 0
+ * 1 = SSB ADDRESS +4: STATUS 1
+ * 2 = SSB ADDRESS +6: STATUS 2
+ */
+
+ /* Check if this interrupt does use the SSB. */
+
+ if(IrqType != STS_IRQ_TRANSMIT_STATUS
+ && IrqType != STS_IRQ_RECEIVE_STATUS
+ && IrqType != STS_IRQ_COMMAND_STATUS
+ && IrqType != STS_IRQ_RING_STATUS)
+ {
+ return (1); /* SSB not involved. */
+ }
+
+ /* Note: All fields of the SSB have been set to all ones (-1) after it
+ * has last been used by the software (see DriverIsr()).
+ *
+ * Check if the affected SSB fields are still unchanged.
+ */
+
+ if(ssb->STS == (unsigned short) -1)
+ return (0); /* Command field not yet available. */
+ if(IrqType == STS_IRQ_COMMAND_STATUS)
+ return (1); /* Status fields not always affected. */
+ if(ssb->Parm[0] == (unsigned short) -1)
+ return (0); /* Status 1 field not yet available. */
+ if(IrqType == STS_IRQ_RING_STATUS)
+ return (1); /* Status 2 & 3 fields not affected. */
+
+ /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */
+ if(ssb->Parm[1] == (unsigned short) -1)
+ return (0); /* Status 2 field not yet available. */
+ if(ssb->Parm[2] == (unsigned short) -1)
+ return (0); /* Status 3 field not yet available. */
+
+ return (1); /* All SSB fields have been written by the adapter. */
+}
+
+/*
+ * Evaluates the command results status in the SSB status field.
+ */
+static void tms380tr_cmd_status_irq(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned short ssb_cmd, ssb_parm_0;
+ unsigned short ssb_parm_1;
+ char *open_err = "Open error -";
+ char *code_err = "Open code -";
+
+ /* Copy the ssb values to local variables */
+ ssb_cmd = tp->ssb.STS;
+ ssb_parm_0 = tp->ssb.Parm[0];
+ ssb_parm_1 = tp->ssb.Parm[1];
+
+ if(ssb_cmd == OPEN)
+ {
+ tp->Sleeping = 0;
+ if(!tp->ReOpenInProgress)
+ wake_up_interruptible(&tp->wait_for_tok_int);
+
+ tp->OpenCommandIssued = 0;
+ tp->ScbInUse = 0;
+
+ if((ssb_parm_0 & 0x00FF) == GOOD_COMPLETION)
+ {
+ /* Success, the adapter is open. */
+ tp->LobeWireFaultLogged = 0;
+ tp->AdapterOpenFlag = 1;
+ tp->AdapterVirtOpenFlag = 1;
+ tp->TransmitCommandActive = 0;
+ tms380tr_exec_cmd(dev, OC_TRANSMIT);
+ tms380tr_exec_cmd(dev, OC_RECEIVE);
+
+ if(tp->ReOpenInProgress)
+ tp->ReOpenInProgress = 0;
+
+ return;
+ }
+ else /* The adapter did not open. */
+ {
+ if(ssb_parm_0 & NODE_ADDR_ERROR)
+ printk(KERN_INFO "%s: Node address error\n",
+ dev->name);
+ if(ssb_parm_0 & LIST_SIZE_ERROR)
+ printk(KERN_INFO "%s: List size error\n",
+ dev->name);
+ if(ssb_parm_0 & BUF_SIZE_ERROR)
+ printk(KERN_INFO "%s: Buffer size error\n",
+ dev->name);
+ if(ssb_parm_0 & TX_BUF_COUNT_ERROR)
+ printk(KERN_INFO "%s: Tx buffer count error\n",
+ dev->name);
+ if(ssb_parm_0 & INVALID_OPEN_OPTION)
+ printk(KERN_INFO "%s: Invalid open option\n",
+ dev->name);
+ if(ssb_parm_0 & OPEN_ERROR)
+ {
+ /* Show the open phase. */
+ switch(ssb_parm_0 & OPEN_PHASES_MASK)
+ {
+ case LOBE_MEDIA_TEST:
+ if(!tp->LobeWireFaultLogged)
+ {
+ tp->LobeWireFaultLogged = 1;
+ printk(KERN_INFO "%s: %s Lobe wire fault (check cable !).\n", dev->name, open_err);
+ }
+ tp->ReOpenInProgress = 1;
+ tp->AdapterOpenFlag = 0;
+ tp->AdapterVirtOpenFlag = 1;
+ tms380tr_open_adapter(dev);
+ return;
+
+ case PHYSICAL_INSERTION:
+ printk(KERN_INFO "%s: %s Physical insertion.\n", dev->name, open_err);
+ break;
+
+ case ADDRESS_VERIFICATION:
+ printk(KERN_INFO "%s: %s Address verification.\n", dev->name, open_err);
+ break;
+
+ case PARTICIPATION_IN_RING_POLL:
+ printk(KERN_INFO "%s: %s Participation in ring poll.\n", dev->name, open_err);
+ break;
+
+ case REQUEST_INITIALISATION:
+ printk(KERN_INFO "%s: %s Request initialisation.\n", dev->name, open_err);
+ break;
+
+ case FULLDUPLEX_CHECK:
+ printk(KERN_INFO "%s: %s Full duplex check.\n", dev->name, open_err);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: %s Unknown open phase\n", dev->name, open_err);
+ break;
+ }
+
+ /* Show the open errors. */
+ switch(ssb_parm_0 & OPEN_ERROR_CODES_MASK)
+ {
+ case OPEN_FUNCTION_FAILURE:
+ printk(KERN_INFO "%s: %s OPEN_FUNCTION_FAILURE", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_FUNCTION_FAILURE;
+ break;
+
+ case OPEN_SIGNAL_LOSS:
+ printk(KERN_INFO "%s: %s OPEN_SIGNAL_LOSS\n", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_SIGNAL_LOSS;
+ break;
+
+ case OPEN_TIMEOUT:
+ printk(KERN_INFO "%s: %s OPEN_TIMEOUT\n", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_TIMEOUT;
+ break;
+
+ case OPEN_RING_FAILURE:
+ printk(KERN_INFO "%s: %s OPEN_RING_FAILURE\n", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_RING_FAILURE;
+ break;
+
+ case OPEN_RING_BEACONING:
+ printk(KERN_INFO "%s: %s OPEN_RING_BEACONING\n", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_RING_BEACONING;
+ break;
+
+ case OPEN_DUPLICATE_NODEADDR:
+ printk(KERN_INFO "%s: %s OPEN_DUPLICATE_NODEADDR\n", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_DUPLICATE_NODEADDR;
+ break;
+
+ case OPEN_REQUEST_INIT:
+ printk(KERN_INFO "%s: %s OPEN_REQUEST_INIT\n", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_REQUEST_INIT;
+ break;
+
+ case OPEN_REMOVE_RECEIVED:
+ printk(KERN_INFO "%s: %s OPEN_REMOVE_RECEIVED", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_REMOVE_RECEIVED;
+ break;
+
+ case OPEN_FULLDUPLEX_SET:
+ printk(KERN_INFO "%s: %s OPEN_FULLDUPLEX_SET\n", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_FULLDUPLEX_SET;
+ break;
+
+ default:
+ printk(KERN_INFO "%s: %s Unknown open err code", dev->name, code_err);
+ tp->LastOpenStatus =
+ OPEN_FUNCTION_FAILURE;
+ break;
+ }
+ }
+
+ tp->AdapterOpenFlag = 0;
+ tp->AdapterVirtOpenFlag = 0;
+
+ return;
+ }
+ }
+ else
+ {
+ if(ssb_cmd != READ_ERROR_LOG)
+ return;
+
+ /* Add values from the error log table to the MAC
+ * statistics counters and update the errorlogtable
+ * memory.
+ */
+ tp->MacStat.line_errors += tp->errorlogtable.Line_Error;
+ tp->MacStat.burst_errors += tp->errorlogtable.Burst_Error;
+ tp->MacStat.A_C_errors += tp->errorlogtable.ARI_FCI_Error;
+ tp->MacStat.lost_frames += tp->errorlogtable.Lost_Frame_Error;
+ tp->MacStat.recv_congest_count += tp->errorlogtable.Rx_Congest_Error;
+ tp->MacStat.rx_errors += tp->errorlogtable.Rx_Congest_Error;
+ tp->MacStat.frame_copied_errors += tp->errorlogtable.Frame_Copied_Error;
+ tp->MacStat.token_errors += tp->errorlogtable.Token_Error;
+ tp->MacStat.dummy1 += tp->errorlogtable.DMA_Bus_Error;
+ tp->MacStat.dummy1 += tp->errorlogtable.DMA_Parity_Error;
+ tp->MacStat.abort_delimiters += tp->errorlogtable.AbortDelimeters;
+ tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
+ tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
+ }
+
+ return;
+}
+
+/*
+ * The inverse routine to tms380tr_open().
+ */
+int tms380tr_close(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ netif_stop_queue(dev);
+
+ del_timer(&tp->timer);
+
+ /* Flush the Tx and disable Rx here. */
+
+ tp->HaltInProgress = 1;
+ tms380tr_exec_cmd(dev, OC_CLOSE);
+ tp->timer.expires = jiffies + 1*HZ;
+ tp->timer.function = tms380tr_timer_end_wait;
+ tp->timer.data = (unsigned long)dev;
+ add_timer(&tp->timer);
+
+ tms380tr_enable_interrupts(dev);
+
+ tp->Sleeping = 1;
+ interruptible_sleep_on(&tp->wait_for_tok_int);
+ tp->TransmitCommandActive = 0;
+
+ del_timer(&tp->timer);
+ tms380tr_disable_interrupts(dev);
+
+#ifdef CONFIG_ISA
+ if(dev->dma > 0)
+ {
+ unsigned long flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ }
+#endif
+
+ SIFWRITEW(0xFF00, SIFCMD);
+#if 0
+ if(dev->dma > 0) /* what the? */
+ SIFWRITEB(0xff, POSREG);
+#endif
+ tms380tr_cancel_tx_queue(tp);
+
+ return (0);
+}
+
+/*
+ * Get the current statistics. This may be called with the card open
+ * or closed.
+ */
+static struct net_device_stats *tms380tr_get_stats(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ return ((struct net_device_stats *)&tp->MacStat);
+}
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+static void tms380tr_set_multicast_list(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned int OpenOptions;
+
+ OpenOptions = tp->ocpl.OPENOptions &
+ ~(PASS_ADAPTER_MAC_FRAMES
+ | PASS_ATTENTION_FRAMES
+ | PASS_BEACON_MAC_FRAMES
+ | COPY_ALL_MAC_FRAMES
+ | COPY_ALL_NON_MAC_FRAMES);
+
+ tp->ocpl.FunctAddr = 0;
+
+ if(dev->flags & IFF_PROMISC)
+ /* Enable promiscuous mode */
+ OpenOptions |= COPY_ALL_NON_MAC_FRAMES |
+ COPY_ALL_MAC_FRAMES;
+ else
+ {
+ if(dev->flags & IFF_ALLMULTI)
+ {
+ /* Disable promiscuous mode, use normal mode. */
+ tp->ocpl.FunctAddr = 0xFFFFFFFF;
+ }
+ else
+ {
+ int i;
+ struct dev_mc_list *mclist = dev->mc_list;
+ for (i=0; i< dev->mc_count; i++)
+ {
+ ((char *)(&tp->ocpl.FunctAddr))[0] |=
+ mclist->dmi_addr[2];
+ ((char *)(&tp->ocpl.FunctAddr))[1] |=
+ mclist->dmi_addr[3];
+ ((char *)(&tp->ocpl.FunctAddr))[2] |=
+ mclist->dmi_addr[4];
+ ((char *)(&tp->ocpl.FunctAddr))[3] |=
+ mclist->dmi_addr[5];
+ mclist = mclist->next;
+ }
+ }
+ tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
+ }
+
+ tp->ocpl.OPENOptions = OpenOptions;
+ tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
+ return;
+}
+
+/*
+ * Wait for some time (microseconds)
+ */
+void tms380tr_wait(unsigned long time)
+{
+#if 0
+ long tmp;
+
+ tmp = jiffies + time/(1000000/HZ);
+ do {
+ current->state = TASK_INTERRUPTIBLE;
+ tmp = schedule_timeout(tmp);
+ } while(time_after(tmp, jiffies));
+#else
+ udelay(time);
+#endif
+ return;
+}
+
+/*
+ * Write a command value to the SIFCMD register
+ */
+static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue)
+{
+ unsigned short cmd;
+ unsigned short SifStsValue;
+ unsigned long loop_counter;
+
+ WriteValue = ((WriteValue ^ CMD_SYSTEM_IRQ) | CMD_INTERRUPT_ADAPTER);
+ cmd = (unsigned short)WriteValue;
+ loop_counter = 0,5 * 800000;
+ do {
+ SifStsValue = SIFREADW(SIFSTS);
+ } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
+ SIFWRITEW(cmd, SIFCMD);
+
+ return;
+}
+
+/*
+ * Processes adapter hardware reset, halts adapter and downloads firmware,
+ * clears the halt bit.
+ */
+static int tms380tr_reset_adapter(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned short *fw_ptr;
+ unsigned short count, c, count2;
+ const struct firmware *fw_entry = NULL;
+
+ strncpy(tms_device.bus_id,dev->name, BUS_ID_SIZE);
+
+ if (request_firmware(&fw_entry, "tms380tr.bin", &tms_device) != 0) {
+ printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
+ dev->name, "tms380tr.bin");
+ return (-1);
+ }
+
+ fw_ptr = (unsigned short *)fw_entry->data;
+ count2 = fw_entry->size / 2;
+
+ /* Hardware adapter reset */
+ SIFWRITEW(ACL_ARESET, SIFACL);
+ tms380tr_wait(40);
+
+ c = SIFREADW(SIFACL);
+ tms380tr_wait(20);
+
+ if(dev->dma == 0) /* For PCI adapters */
+ {
+ c &= ~(ACL_NSELOUT0 | ACL_NSELOUT1); /* Clear bits */
+ if(tp->setnselout)
+ c |= (*tp->setnselout)(dev);
+ }
+
+ /* In case a command is pending - forget it */
+ tp->ScbInUse = 0;
+
+ c &= ~ACL_ARESET; /* Clear adapter reset bit */
+ c |= ACL_CPHALT; /* Halt adapter CPU, allow download */
+ c |= ACL_BOOT;
+ c |= ACL_SINTEN;
+ c &= ~ACL_PSDMAEN; /* Clear pseudo dma bit */
+ SIFWRITEW(c, SIFACL);
+ tms380tr_wait(40);
+
+ count = 0;
+ /* Download firmware via DIO interface: */
+ do {
+ if (count2 < 3) continue;
+
+ /* Download first address part */
+ SIFWRITEW(*fw_ptr, SIFADX);
+ fw_ptr++;
+ count2--;
+ /* Download second address part */
+ SIFWRITEW(*fw_ptr, SIFADD);
+ fw_ptr++;
+ count2--;
+
+ if((count = *fw_ptr) != 0) /* Load loop counter */
+ {
+ fw_ptr++; /* Download block data */
+ count2--;
+ if (count > count2) continue;
+
+ for(; count > 0; count--)
+ {
+ SIFWRITEW(*fw_ptr, SIFINC);
+ fw_ptr++;
+ count2--;
+ }
+ }
+ else /* Stop, if last block downloaded */
+ {
+ c = SIFREADW(SIFACL);
+ c &= (~ACL_CPHALT | ACL_SINTEN);
+
+ /* Clear CPHALT and start BUD */
+ SIFWRITEW(c, SIFACL);
+ if (fw_entry)
+ release_firmware(fw_entry);
+ return (1);
+ }
+ } while(count == 0);
+
+ if (fw_entry)
+ release_firmware(fw_entry);
+ printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
+ return (-1);
+}
+
+/*
+ * Starts bring up diagnostics of token ring adapter and evaluates
+ * diagnostic results.
+ */
+static int tms380tr_bringup_diags(struct net_device *dev)
+{
+ int loop_cnt, retry_cnt;
+ unsigned short Status;
+
+ tms380tr_wait(HALF_SECOND);
+ tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
+ tms380tr_wait(HALF_SECOND);
+
+ retry_cnt = BUD_MAX_RETRIES; /* maximal number of retrys */
+
+ do {
+ retry_cnt--;
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "BUD-Status: ");
+ loop_cnt = BUD_MAX_LOOPCNT; /* maximum: three seconds*/
+ do { /* Inspect BUD results */
+ loop_cnt--;
+ tms380tr_wait(HALF_SECOND);
+ Status = SIFREADW(SIFSTS);
+ Status &= STS_MASK;
+
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG " %04X \n", Status);
+ /* BUD successfully completed */
+ if(Status == STS_INITIALIZE)
+ return (1);
+ /* Unrecoverable hardware error, BUD not completed? */
+ } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST))
+ != (STS_ERROR | STS_TEST)));
+
+ /* Error preventing completion of BUD */
+ if(retry_cnt > 0)
+ {
+ printk(KERN_INFO "%s: Adapter Software Reset.\n",
+ dev->name);
+ tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
+ tms380tr_wait(HALF_SECOND);
+ }
+ } while(retry_cnt > 0);
+
+ Status = SIFREADW(SIFSTS);
+
+ printk(KERN_INFO "%s: Hardware error\n", dev->name);
+ /* Hardware error occurred! */
+ Status &= 0x001f;
+ if (Status & 0x0010)
+ printk(KERN_INFO "%s: BUD Error: Timeout\n", dev->name);
+ else if ((Status & 0x000f) > 6)
+ printk(KERN_INFO "%s: BUD Error: Illegal Failure\n", dev->name);
+ else
+ printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f);
+
+ return (-1);
+}
+
+/*
+ * Copy initialisation data to adapter memory, beginning at address
+ * 1:0A00; Starting DMA test and evaluating result bits.
+ */
+static int tms380tr_init_adapter(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ const unsigned char SCB_Test[6] = {0x00, 0x00, 0xC1, 0xE2, 0xD4, 0x8B};
+ const unsigned char SSB_Test[8] = {0xFF, 0xFF, 0xD1, 0xD7,
+ 0xC5, 0xD9, 0xC3, 0xD4};
+ void *ptr = (void *)&tp->ipb;
+ unsigned short *ipb_ptr = (unsigned short *)ptr;
+ unsigned char *cb_ptr = (unsigned char *) &tp->scb;
+ unsigned char *sb_ptr = (unsigned char *) &tp->ssb;
+ unsigned short Status;
+ int i, loop_cnt, retry_cnt;
+
+ /* Normalize: byte order low/high, word order high/low! (only IPB!) */
+ tp->ipb.SCB_Addr = SWAPW(((char *)&tp->scb - (char *)tp) + tp->dmabuffer);
+ tp->ipb.SSB_Addr = SWAPW(((char *)&tp->ssb - (char *)tp) + tp->dmabuffer);
+
+ if(tms380tr_debug > 3)
+ {
+ printk(KERN_DEBUG "%s: buffer (real): %lx\n", dev->name, (long) &tp->scb);
+ printk(KERN_DEBUG "%s: buffer (virt): %lx\n", dev->name, (long) ((char *)&tp->scb - (char *)tp) + (long) tp->dmabuffer);
+ printk(KERN_DEBUG "%s: buffer (DMA) : %lx\n", dev->name, (long) tp->dmabuffer);
+ printk(KERN_DEBUG "%s: buffer (tp) : %lx\n", dev->name, (long) tp);
+ }
+ /* Maximum: three initialization retries */
+ retry_cnt = INIT_MAX_RETRIES;
+
+ do {
+ retry_cnt--;
+
+ /* Transfer initialization block */
+ SIFWRITEW(0x0001, SIFADX);
+
+ /* To address 0001:0A00 of adapter RAM */
+ SIFWRITEW(0x0A00, SIFADD);
+
+ /* Write 11 words to adapter RAM */
+ for(i = 0; i < 11; i++)
+ SIFWRITEW(ipb_ptr[i], SIFINC);
+
+ /* Execute SCB adapter command */
+ tms380tr_exec_sifcmd(dev, CMD_EXECUTE);
+
+ loop_cnt = INIT_MAX_LOOPCNT; /* Maximum: 11 seconds */
+
+ /* While remaining retries, no error and not completed */
+ do {
+ Status = 0;
+ loop_cnt--;
+ tms380tr_wait(HALF_SECOND);
+
+ /* Mask interesting status bits */
+ Status = SIFREADW(SIFSTS);
+ Status &= STS_MASK;
+ } while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0)
+ && ((Status & STS_ERROR) == 0) && (loop_cnt != 0));
+
+ if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0)
+ {
+ /* Initialization completed without error */
+ i = 0;
+ do { /* Test if contents of SCB is valid */
+ if(SCB_Test[i] != *(cb_ptr + i))
+ {
+ printk(KERN_INFO "%s: DMA failed\n", dev->name);
+ /* DMA data error: wrong data in SCB */
+ return (-1);
+ }
+ i++;
+ } while(i < 6);
+
+ i = 0;
+ do { /* Test if contents of SSB is valid */
+ if(SSB_Test[i] != *(sb_ptr + i))
+ /* DMA data error: wrong data in SSB */
+ return (-1);
+ i++;
+ } while (i < 8);
+
+ return (1); /* Adapter successfully initialized */
+ }
+ else
+ {
+ if((Status & STS_ERROR) != 0)
+ {
+ /* Initialization error occurred */
+ Status = SIFREADW(SIFSTS);
+ Status &= STS_ERROR_MASK;
+ /* ShowInitialisationErrorCode(Status); */
+ printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status);
+ return (-1); /* Unrecoverable error */
+ }
+ else
+ {
+ if(retry_cnt > 0)
+ {
+ /* Reset adapter and try init again */
+ tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
+ tms380tr_wait(HALF_SECOND);
+ }
+ }
+ }
+ } while(retry_cnt > 0);
+
+ printk(KERN_INFO "%s: Retry exceeded\n", dev->name);
+ return (-1);
+}
+
+/*
+ * Check for outstanding commands in command queue and tries to execute
+ * command immediately. Corresponding command flag in command queue is cleared.
+ */
+static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned long Addr = 0;
+
+ if(tp->CMDqueue == 0)
+ return; /* No command execution */
+
+ /* If SCB in use: no command */
+ if(tp->ScbInUse == 1)
+ return;
+
+ /* Check if adapter is opened, avoiding COMMAND_REJECT
+ * interrupt by the adapter!
+ */
+ if(tp->AdapterOpenFlag == 0)
+ {
+ if(tp->CMDqueue & OC_OPEN)
+ {
+ /* Execute OPEN command */
+ tp->CMDqueue ^= OC_OPEN;
+
+ Addr = htonl(((char *)&tp->ocpl - (char *)tp) + tp->dmabuffer);
+ tp->scb.Parm[0] = LOWORD(Addr);
+ tp->scb.Parm[1] = HIWORD(Addr);
+ tp->scb.CMD = OPEN;
+ }
+ else
+ /* No OPEN command queued, but adapter closed. Note:
+ * We'll try to re-open the adapter in DriverPoll()
+ */
+ return; /* No adapter command issued */
+ }
+ else
+ {
+ /* Adapter is open; evaluate command queue: try to execute
+ * outstanding commands (depending on priority!) CLOSE
+ * command queued
+ */
+ if(tp->CMDqueue & OC_CLOSE)
+ {
+ tp->CMDqueue ^= OC_CLOSE;
+ tp->AdapterOpenFlag = 0;
+ tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */
+ tp->scb.Parm[1] = 0; /* but should be set to zero! */
+ tp->scb.CMD = CLOSE;
+ if(!tp->HaltInProgress)
+ tp->CMDqueue |= OC_OPEN; /* re-open adapter */
+ else
+ tp->CMDqueue = 0; /* no more commands */
+ }
+ else
+ {
+ if(tp->CMDqueue & OC_RECEIVE)
+ {
+ tp->CMDqueue ^= OC_RECEIVE;
+ Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer);
+ tp->scb.Parm[0] = LOWORD(Addr);
+ tp->scb.Parm[1] = HIWORD(Addr);
+ tp->scb.CMD = RECEIVE;
+ }
+ else
+ {
+ if(tp->CMDqueue & OC_TRANSMIT_HALT)
+ {
+ /* NOTE: TRANSMIT.HALT must be checked
+ * before TRANSMIT.
+ */
+ tp->CMDqueue ^= OC_TRANSMIT_HALT;
+ tp->scb.CMD = TRANSMIT_HALT;
+
+ /* Parm[0] and Parm[1] are ignored
+ * but should be set to zero!
+ */
+ tp->scb.Parm[0] = 0;
+ tp->scb.Parm[1] = 0;
+ }
+ else
+ {
+ if(tp->CMDqueue & OC_TRANSMIT)
+ {
+ /* NOTE: TRANSMIT must be
+ * checked after TRANSMIT.HALT
+ */
+ if(tp->TransmitCommandActive)
+ {
+ if(!tp->TransmitHaltScheduled)
+ {
+ tp->TransmitHaltScheduled = 1;
+ tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT) ;
+ }
+ tp->TransmitCommandActive = 0;
+ return;
+ }
+
+ tp->CMDqueue ^= OC_TRANSMIT;
+ tms380tr_cancel_tx_queue(tp);
+ Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
+ tp->scb.Parm[0] = LOWORD(Addr);
+ tp->scb.Parm[1] = HIWORD(Addr);
+ tp->scb.CMD = TRANSMIT;
+ tp->TransmitCommandActive = 1;
+ }
+ else
+ {
+ if(tp->CMDqueue & OC_MODIFY_OPEN_PARMS)
+ {
+ tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
+ tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
+ tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
+ tp->scb.Parm[1] = 0; /* is ignored but should be zero */
+ tp->scb.CMD = MODIFY_OPEN_PARMS;
+ }
+ else
+ {
+ if(tp->CMDqueue & OC_SET_FUNCT_ADDR)
+ {
+ tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
+ tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
+ tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
+ tp->scb.CMD = SET_FUNCT_ADDR;
+ }
+ else
+ {
+ if(tp->CMDqueue & OC_SET_GROUP_ADDR)
+ {
+ tp->CMDqueue ^= OC_SET_GROUP_ADDR;
+ tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
+ tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
+ tp->scb.CMD = SET_GROUP_ADDR;
+ }
+ else
+ {
+ if(tp->CMDqueue & OC_READ_ERROR_LOG)
+ {
+ tp->CMDqueue ^= OC_READ_ERROR_LOG;
+ Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
+ tp->scb.Parm[0] = LOWORD(Addr);
+ tp->scb.Parm[1] = HIWORD(Addr);
+ tp->scb.CMD = READ_ERROR_LOG;
+ }
+ else
+ {
+ printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
+ tp->CMDqueue = 0;
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ tp->ScbInUse = 1; /* Set semaphore: SCB in use. */
+
+ /* Execute SCB and generate IRQ when done. */
+ tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
+
+ return;
+}
+
+/*
+ * IRQ conditions: signal loss on the ring, transmit or receive of beacon
+ * frames (disabled if bit 1 of OPEN option is set); report error MAC
+ * frame transmit (disabled if bit 2 of OPEN option is set); open or short
+ * circuit fault on the lobe is detected; remove MAC frame received;
+ * error counter overflow (255); opened adapter is the only station in ring.
+ * After some of the IRQs the adapter is closed!
+ */
+static void tms380tr_ring_status_irq(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+
+ tp->CurrentRingStatus = be16_to_cpu((unsigned short)tp->ssb.Parm[0]);
+
+ /* First: fill up statistics */
+ if(tp->ssb.Parm[0] & SIGNAL_LOSS)
+ {
+ printk(KERN_INFO "%s: Signal Loss\n", dev->name);
+ tp->MacStat.line_errors++;
+ }
+
+ /* Adapter is closed, but initialized */
+ if(tp->ssb.Parm[0] & LOBE_WIRE_FAULT)
+ {
+ printk(KERN_INFO "%s: Lobe Wire Fault, Reopen Adapter\n",
+ dev->name);
+ tp->MacStat.line_errors++;
+ }
+
+ if(tp->ssb.Parm[0] & RING_RECOVERY)
+ printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
+
+ /* Counter overflow: read error log */
+ if(tp->ssb.Parm[0] & COUNTER_OVERFLOW)
+ {
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
+ tms380tr_exec_cmd(dev, OC_READ_ERROR_LOG);
+ }
+
+ /* Adapter is closed, but initialized */
+ if(tp->ssb.Parm[0] & REMOVE_RECEIVED)
+ printk(KERN_INFO "%s: Remove Received, Reopen Adapter\n",
+ dev->name);
+
+ /* Adapter is closed, but initialized */
+ if(tp->ssb.Parm[0] & AUTO_REMOVAL_ERROR)
+ printk(KERN_INFO "%s: Auto Removal Error, Reopen Adapter\n",
+ dev->name);
+
+ if(tp->ssb.Parm[0] & HARD_ERROR)
+ printk(KERN_INFO "%s: Hard Error\n", dev->name);
+
+ if(tp->ssb.Parm[0] & SOFT_ERROR)
+ printk(KERN_INFO "%s: Soft Error\n", dev->name);
+
+ if(tp->ssb.Parm[0] & TRANSMIT_BEACON)
+ printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
+
+ if(tp->ssb.Parm[0] & SINGLE_STATION)
+ printk(KERN_INFO "%s: Single Station\n", dev->name);
+
+ /* Check if adapter has been closed */
+ if(tp->ssb.Parm[0] & ADAPTER_CLOSED)
+ {
+ printk(KERN_INFO "%s: Adapter closed (Reopening),"
+ "CurrentRingStat %x\n",
+ dev->name, tp->CurrentRingStatus);
+ tp->AdapterOpenFlag = 0;
+ tms380tr_open_adapter(dev);
+ }
+
+ return;
+}
+
+/*
+ * Issued if adapter has encountered an unrecoverable hardware
+ * or software error.
+ */
+static void tms380tr_chk_irq(struct net_device *dev)
+{
+ int i;
+ unsigned short AdapterCheckBlock[4];
+ struct net_local *tp = netdev_priv(dev);
+
+ tp->AdapterOpenFlag = 0; /* Adapter closed now */
+
+ /* Page number of adapter memory */
+ SIFWRITEW(0x0001, SIFADX);
+ /* Address offset */
+ SIFWRITEW(CHECKADDR, SIFADR);
+
+ /* Reading 8 byte adapter check block. */
+ for(i = 0; i < 4; i++)
+ AdapterCheckBlock[i] = SIFREADW(SIFINC);
+
+ if(tms380tr_debug > 3)
+ {
+ printk(KERN_DEBUG "%s: AdapterCheckBlock: ", dev->name);
+ for (i = 0; i < 4; i++)
+ printk("%04X", AdapterCheckBlock[i]);
+ printk("\n");
+ }
+
+ switch(AdapterCheckBlock[0])
+ {
+ case DIO_PARITY:
+ printk(KERN_INFO "%s: DIO parity error\n", dev->name);
+ break;
+
+ case DMA_READ_ABORT:
+ printk(KERN_INFO "%s DMA read operation aborted:\n",
+ dev->name);
+ switch (AdapterCheckBlock[1])
+ {
+ case 0:
+ printk(KERN_INFO "Timeout\n");
+ printk(KERN_INFO "Address: %04X %04X\n",
+ AdapterCheckBlock[2],
+ AdapterCheckBlock[3]);
+ break;
+
+ case 1:
+ printk(KERN_INFO "Parity error\n");
+ printk(KERN_INFO "Address: %04X %04X\n",
+ AdapterCheckBlock[2],
+ AdapterCheckBlock[3]);
+ break;
+
+ case 2:
+ printk(KERN_INFO "Bus error\n");
+ printk(KERN_INFO "Address: %04X %04X\n",
+ AdapterCheckBlock[2],
+ AdapterCheckBlock[3]);
+ break;
+
+ default:
+ printk(KERN_INFO "Unknown error.\n");
+ break;
+ }
+ break;
+
+ case DMA_WRITE_ABORT:
+ printk(KERN_INFO "%s: DMA write operation aborted: \n",
+ dev->name);
+ switch (AdapterCheckBlock[1])
+ {
+ case 0:
+ printk(KERN_INFO "Timeout\n");
+ printk(KERN_INFO "Address: %04X %04X\n",
+ AdapterCheckBlock[2],
+ AdapterCheckBlock[3]);
+ break;
+
+ case 1:
+ printk(KERN_INFO "Parity error\n");
+ printk(KERN_INFO "Address: %04X %04X\n",
+ AdapterCheckBlock[2],
+ AdapterCheckBlock[3]);
+ break;
+
+ case 2:
+ printk(KERN_INFO "Bus error\n");
+ printk(KERN_INFO "Address: %04X %04X\n",
+ AdapterCheckBlock[2],
+ AdapterCheckBlock[3]);
+ break;
+
+ default:
+ printk(KERN_INFO "Unknown error.\n");
+ break;
+ }
+ break;
+
+ case ILLEGAL_OP_CODE:
+ printk(KERN_INFO "%s: Illegal operation code in firmware\n",
+ dev->name);
+ /* Parm[0-3]: adapter internal register R13-R15 */
+ break;
+
+ case PARITY_ERRORS:
+ printk(KERN_INFO "%s: Adapter internal bus parity error\n",
+ dev->name);
+ /* Parm[0-3]: adapter internal register R13-R15 */
+ break;
+
+ case RAM_DATA_ERROR:
+ printk(KERN_INFO "%s: RAM data error\n", dev->name);
+ /* Parm[0-1]: MSW/LSW address of RAM location. */
+ break;
+
+ case RAM_PARITY_ERROR:
+ printk(KERN_INFO "%s: RAM parity error\n", dev->name);
+ /* Parm[0-1]: MSW/LSW address of RAM location. */
+ break;
+
+ case RING_UNDERRUN:
+ printk(KERN_INFO "%s: Internal DMA underrun detected\n",
+ dev->name);
+ break;
+
+ case INVALID_IRQ:
+ printk(KERN_INFO "%s: Unrecognized interrupt detected\n",
+ dev->name);
+ /* Parm[0-3]: adapter internal register R13-R15 */
+ break;
+
+ case INVALID_ERROR_IRQ:
+ printk(KERN_INFO "%s: Unrecognized error interrupt detected\n",
+ dev->name);
+ /* Parm[0-3]: adapter internal register R13-R15 */
+ break;
+
+ case INVALID_XOP:
+ printk(KERN_INFO "%s: Unrecognized XOP request detected\n",
+ dev->name);
+ /* Parm[0-3]: adapter internal register R13-R15 */
+ break;
+
+ default:
+ printk(KERN_INFO "%s: Unknown status", dev->name);
+ break;
+ }
+
+ if(tms380tr_chipset_init(dev) == 1)
+ {
+ /* Restart of firmware successful */
+ tp->AdapterOpenFlag = 1;
+ }
+
+ return;
+}
+
+/*
+ * Internal adapter pointer to RAM data are copied from adapter into
+ * host system.
+ */
+static int tms380tr_read_ptr(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned short adapterram;
+
+ tms380tr_read_ram(dev, (unsigned char *)&tp->intptrs.BurnedInAddrPtr,
+ ADAPTER_INT_PTRS, 16);
+ tms380tr_read_ram(dev, (unsigned char *)&adapterram,
+ cpu_to_be16((unsigned short)tp->intptrs.AdapterRAMPtr), 2);
+ return be16_to_cpu(adapterram);
+}
+
+/*
+ * Reads a number of bytes from adapter to system memory.
+ */
+static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
+ unsigned short Address, int Length)
+{
+ int i;
+ unsigned short old_sifadx, old_sifadr, InWord;
+
+ /* Save the current values */
+ old_sifadx = SIFREADW(SIFADX);
+ old_sifadr = SIFREADW(SIFADR);
+
+ /* Page number of adapter memory */
+ SIFWRITEW(0x0001, SIFADX);
+ /* Address offset in adapter RAM */
+ SIFWRITEW(Address, SIFADR);
+
+ /* Copy len byte from adapter memory to system data area. */
+ i = 0;
+ for(;;)
+ {
+ InWord = SIFREADW(SIFINC);
+
+ *(Data + i) = HIBYTE(InWord); /* Write first byte */
+ if(++i == Length) /* All is done break */
+ break;
+
+ *(Data + i) = LOBYTE(InWord); /* Write second byte */
+ if (++i == Length) /* All is done break */
+ break;
+ }
+
+ /* Restore original values */
+ SIFWRITEW(old_sifadx, SIFADX);
+ SIFWRITEW(old_sifadr, SIFADR);
+
+ return;
+}
+
+/*
+ * Cancel all queued packets in the transmission queue.
+ */
+static void tms380tr_cancel_tx_queue(struct net_local* tp)
+{
+ TPL *tpl;
+
+ /*
+ * NOTE: There must not be an active TRANSMIT command pending, when
+ * this function is called.
+ */
+ if(tp->TransmitCommandActive)
+ return;
+
+ for(;;)
+ {
+ tpl = tp->TplBusy;
+ if(!tpl->BusyFlag)
+ break;
+ /* "Remove" TPL from busy list. */
+ tp->TplBusy = tpl->NextTPLPtr;
+ tms380tr_write_tpl_status(tpl, 0); /* Clear VALID bit */
+ tpl->BusyFlag = 0; /* "free" TPL */
+
+ printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
+ if (tpl->DMABuff)
+ pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(tpl->Skb);
+ }
+
+ return;
+}
+
+/*
+ * This function is called whenever a transmit interrupt is generated by the
+ * adapter. For a command complete interrupt, it is checked if we have to
+ * issue a new transmit command or not.
+ */
+static void tms380tr_tx_status_irq(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned char HighByte, HighAc, LowAc;
+ TPL *tpl;
+
+ /* NOTE: At this point the SSB from TRANSMIT STATUS is no longer
+ * available, because the CLEAR SSB command has already been issued.
+ *
+ * Process all complete transmissions.
+ */
+
+ for(;;)
+ {
+ tpl = tp->TplBusy;
+ if(!tpl->BusyFlag || (tpl->Status
+ & (TX_VALID | TX_FRAME_COMPLETE))
+ != TX_FRAME_COMPLETE)
+ {
+ break;
+ }
+
+ /* "Remove" TPL from busy list. */
+ tp->TplBusy = tpl->NextTPLPtr ;
+
+ /* Check the transmit status field only for directed frames*/
+ if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0)
+ {
+ HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status);
+ HighAc = GET_FRAME_STATUS_HIGH_AC(HighByte);
+ LowAc = GET_FRAME_STATUS_LOW_AC(HighByte);
+
+ if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED))
+ {
+ printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n",
+ dev->name,
+ *(unsigned long *)&tpl->MData[2+2]);
+ }
+ else
+ {
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "%s: Directed frame tx'd\n",
+ dev->name);
+ }
+ }
+ else
+ {
+ if(!DIRECTED_FRAME(tpl))
+ {
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "%s: Broadcast frame tx'd\n",
+ dev->name);
+ }
+ }
+
+ tp->MacStat.tx_packets++;
+ if (tpl->DMABuff)
+ pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(tpl->Skb);
+ tpl->BusyFlag = 0; /* "free" TPL */
+ }
+
+ if(!tp->TplFree->NextTPLPtr->BusyFlag)
+ netif_wake_queue(dev);
+ return;
+}
+
+/*
+ * Called if a frame receive interrupt is generated by the adapter.
+ * Check if the frame is valid and indicate it to system.
+ */
+static void tms380tr_rcv_status_irq(struct net_device *dev)
+{
+ struct net_local *tp = netdev_priv(dev);
+ unsigned char *ReceiveDataPtr;
+ struct sk_buff *skb;
+ unsigned int Length, Length2;
+ RPL *rpl;
+ RPL *SaveHead;
+ dma_addr_t dmabuf;
+
+ /* NOTE: At this point the SSB from RECEIVE STATUS is no longer
+ * available, because the CLEAR SSB command has already been issued.
+ *
+ * Process all complete receives.
+ */
+
+ for(;;)
+ {
+ rpl = tp->RplHead;
+ if(rpl->Status & RX_VALID)
+ break; /* RPL still in use by adapter */
+
+ /* Forward RPLHead pointer to next list. */
+ SaveHead = tp->RplHead;
+ tp->RplHead = rpl->NextRPLPtr;
+
+ /* Get the frame size (Byte swap for Intel).
+ * Do this early (see workaround comment below)
+ */
+ Length = be16_to_cpu((unsigned short)rpl->FrameSize);
+
+ /* Check if the Frame_Start, Frame_End and
+ * Frame_Complete bits are set.
+ */
+ if((rpl->Status & VALID_SINGLE_BUFFER_FRAME)
+ == VALID_SINGLE_BUFFER_FRAME)
+ {
+ ReceiveDataPtr = rpl->MData;
+
+ /* Workaround for delayed write of FrameSize on ISA
+ * (FrameSize is false but valid-bit is reset)
+ * Frame size is set to zero when the RPL is freed.
+ * Length2 is there because there have also been
+ * cases where the FrameSize was partially written
+ */
+ Length2 = be16_to_cpu((unsigned short)rpl->FrameSize);
+
+ if(Length == 0 || Length != Length2)
+ {
+ tp->RplHead = SaveHead;
+ break; /* Return to tms380tr_interrupt */
+ }
+ tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length);
+
+ if(tms380tr_debug > 3)
+ printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n",
+ dev->name, Length, Length);
+
+ /* Indicate the received frame to system the
+ * adapter does the Source-Routing padding for
+ * us. See: OpenOptions in tms380tr_init_opb()
+ */
+ skb = rpl->Skb;
+ if(rpl->SkbStat == SKB_UNAVAILABLE)
+ {
+ /* Try again to allocate skb */
+ skb = dev_alloc_skb(tp->MaxPacketSize);
+ if(skb == NULL)
+ {
+ /* Update Stats ?? */
+ }
+ else
+ {
+ skb->dev = dev;
+ skb_put(skb, tp->MaxPacketSize);
+ rpl->SkbStat = SKB_DATA_COPY;
+ ReceiveDataPtr = rpl->MData;
+ }
+ }
+
+ if(skb && (rpl->SkbStat == SKB_DATA_COPY
+ || rpl->SkbStat == SKB_DMA_DIRECT))
+ {
+ if(rpl->SkbStat == SKB_DATA_COPY)
+ memcpy(skb->data, ReceiveDataPtr, Length);
+
+ /* Deliver frame to system */
+ rpl->Skb = NULL;
+ skb_trim(skb,Length);
+ skb->protocol = tr_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ }
+ else /* Invalid frame */
+ {
+ if(rpl->Skb != NULL)
+ dev_kfree_skb_irq(rpl->Skb);
+
+ /* Skip list. */
+ if(rpl->Status & RX_START_FRAME)
+ /* Frame start bit is set -> overflow. */
+ tp->MacStat.rx_errors++;
+ }
+ if (rpl->DMABuff)
+ pci_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, PCI_DMA_TODEVICE);
+ rpl->DMABuff = 0;
+
+ /* Allocate new skb for rpl */
+ rpl->Skb = dev_alloc_skb(tp->MaxPacketSize);
+ /* skb == NULL ? then use local buffer */
+ if(rpl->Skb == NULL)
+ {
+ rpl->SkbStat = SKB_UNAVAILABLE;
+ rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
+ rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
+ }
+ else /* skb != NULL */
+ {
+ rpl->Skb->dev = dev;
+ skb_put(rpl->Skb, tp->MaxPacketSize);
+
+ /* Data unreachable for DMA ? then use local buffer */
+ dmabuf = pci_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE);
+ if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
+ {
+ rpl->SkbStat = SKB_DATA_COPY;
+ rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
+ rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
+ }
+ else
+ {
+ /* DMA directly in skb->data */
+ rpl->SkbStat = SKB_DMA_DIRECT;
+ rpl->FragList[0].DataAddr = htonl(dmabuf);
+ rpl->MData = rpl->Skb->data;
+ rpl->DMABuff = dmabuf;
+ }
+ }
+
+ rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
+ rpl->FrameSize = 0;
+
+ /* Pass the last RPL back to the adapter */
+ tp->RplTail->FrameSize = 0;
+
+ /* Reset the CSTAT field in the list. */
+ tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ);
+
+ /* Current RPL becomes last one in list. */
+ tp->RplTail = tp->RplTail->NextRPLPtr;
+
+ /* Inform adapter about RPL valid. */
+ tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
+ }
+
+ return;
+}
+
+/*
+ * This function should be used whenever the status of any RPL must be
+ * modified by the driver, because the compiler may otherwise change the
+ * order of instructions such that writing the RPL status may be executed
+ * at an undesireable time. When this function is used, the status is
+ * always written when the function is called.
+ */
+static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
+{
+ rpl->Status = Status;
+
+ return;
+}
+
+/*
+ * The function updates the statistic counters in mac->MacStat.
+ * It differtiates between directed and broadcast/multicast ( ==functional)
+ * frames.
+ */
+static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[],
+ unsigned int Length)
+{
+ tp->MacStat.rx_packets++;
+ tp->MacStat.rx_bytes += Length;
+
+ /* Test functional bit */
+ if(DataPtr[2] & GROUP_BIT)
+ tp->MacStat.multicast++;
+
+ return;
+}
+
+static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct net_local *tp = netdev_priv(dev);
+ struct sockaddr *saddr = addr;
+
+ if (tp->AdapterOpenFlag || tp->AdapterVirtOpenFlag) {
+ printk(KERN_WARNING "%s: Cannot set MAC/LAA address while card is open\n", dev->name);
+ return -EIO;
+ }
+ memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
+ return 0;
+}
+
+#if TMS380TR_DEBUG > 0
+/*
+ * Dump Packet (data)
+ */
+static void tms380tr_dump(unsigned char *Data, int length)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < length / 8; i++, j += 8)
+ {
+ printk(KERN_DEBUG "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ Data[j+0],Data[j+1],Data[j+2],Data[j+3],
+ Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
+ }
+
+ return;
+}
+#endif
+
+void tmsdev_term(struct net_device *dev)
+{
+ struct net_local *tp;
+
+ tp = netdev_priv(dev);
+ pci_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
+ PCI_DMA_BIDIRECTIONAL);
+}
+
+int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
+ struct pci_dev *pdev)
+{
+ struct net_local *tms_local;
+
+ memset(dev->priv, 0, sizeof(struct net_local));
+ tms_local = netdev_priv(dev);
+ init_waitqueue_head(&tms_local->wait_for_tok_int);
+ tms_local->dmalimit = dmalimit;
+ tms_local->pdev = pdev;
+ tms_local->dmabuffer = pci_map_single(pdev, (void *)tms_local,
+ sizeof(struct net_local), PCI_DMA_BIDIRECTIONAL);
+ if (tms_local->dmabuffer + sizeof(struct net_local) > dmalimit)
+ {
+ printk(KERN_INFO "%s: Memory not accessible for DMA\n",
+ dev->name);
+ tmsdev_term(dev);
+ return -ENOMEM;
+ }
+
+ /* These can be overridden by the card driver if needed */
+ dev->open = tms380tr_open;
+ dev->stop = tms380tr_close;
+ dev->do_ioctl = NULL;
+ dev->hard_start_xmit = tms380tr_send_packet;
+ dev->tx_timeout = tms380tr_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->get_stats = tms380tr_get_stats;
+ dev->set_multicast_list = &tms380tr_set_multicast_list;
+ dev->set_mac_address = tms380tr_set_mac_address;
+
+ return 0;
+}
+
+#ifdef MODULE
+
+EXPORT_SYMBOL(tms380tr_open);
+EXPORT_SYMBOL(tms380tr_close);
+EXPORT_SYMBOL(tms380tr_interrupt);
+EXPORT_SYMBOL(tmsdev_init);
+EXPORT_SYMBOL(tmsdev_term);
+EXPORT_SYMBOL(tms380tr_wait);
+
+struct module *TMS380_module = NULL;
+
+int init_module(void)
+{
+ printk(KERN_DEBUG "%s", version);
+
+ TMS380_module = &__this_module;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ TMS380_module = NULL;
+}
+#endif
+
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c tms380tr.c"
+ * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c tms380tr.c"
+ * c-set-style "K&R"
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
new file mode 100644
index 000000000000..f2c5ba0f37a5
--- /dev/null
+++ b/drivers/net/tokenring/tms380tr.h
@@ -0,0 +1,1141 @@
+/*
+ * tms380tr.h: TI TMS380 Token Ring driver for Linux
+ *
+ * Authors:
+ * - Christoph Goos <cgoos@syskonnect.de>
+ * - Adam Fritzler <mid@auk.cx>
+ */
+
+#ifndef __LINUX_TMS380TR_H
+#define __LINUX_TMS380TR_H
+
+#ifdef __KERNEL__
+
+#include <linux/interrupt.h>
+
+/* module prototypes */
+int tms380tr_open(struct net_device *dev);
+int tms380tr_close(struct net_device *dev);
+irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
+ struct pci_dev *pdev);
+void tmsdev_term(struct net_device *dev);
+void tms380tr_wait(unsigned long time);
+
+#define TMS380TR_MAX_ADAPTERS 7
+
+#define SEND_TIMEOUT 10*HZ
+
+#define TR_RCF_LONGEST_FRAME_MASK 0x0070
+#define TR_RCF_FRAME4K 0x0030
+
+/*------------------------------------------------------------------*/
+/* Bit order for adapter communication with DMA */
+/* -------------------------------------------------------------- */
+/* Bit 8 | 9| 10| 11|| 12| 13| 14| 15|| 0| 1| 2| 3|| 4| 5| 6| 7| */
+/* -------------------------------------------------------------- */
+/* The bytes in a word must be byte swapped. Also, if a double */
+/* word is used for storage, then the words, as well as the bytes, */
+/* must be swapped. */
+/* Bit order for adapter communication with DIO */
+/* -------------------------------------------------------------- */
+/* Bit 0 | 1| 2| 3|| 4| 5| 6| 7|| 8| 9| 10| 11|| 12| 13| 14| 15| */
+/* -------------------------------------------------------------- */
+/*------------------------------------------------------------------*/
+
+/* Swap words of a long. */
+#define SWAPW(x) (((x) << 16) | ((x) >> 16))
+
+/* Get the low byte of a word. */
+#define LOBYTE(w) ((unsigned char)(w))
+
+/* Get the high byte of a word. */
+#define HIBYTE(w) ((unsigned char)((unsigned short)(w) >> 8))
+
+/* Get the low word of a long. */
+#define LOWORD(l) ((unsigned short)(l))
+
+/* Get the high word of a long. */
+#define HIWORD(l) ((unsigned short)((unsigned long)(l) >> 16))
+
+
+
+/* Token ring adapter I/O addresses for normal mode. */
+
+/*
+ * The SIF registers. Common to all adapters.
+ */
+/* Basic SIF (SRSX = 0) */
+#define SIFDAT 0x00 /* SIF/DMA data. */
+#define SIFINC 0x02 /* IO Word data with auto increment. */
+#define SIFINH 0x03 /* IO Byte data with auto increment. */
+#define SIFADR 0x04 /* SIF/DMA Address. */
+#define SIFCMD 0x06 /* SIF Command. */
+#define SIFSTS 0x06 /* SIF Status. */
+
+/* "Extended" SIF (SRSX = 1) */
+#define SIFACL 0x08 /* SIF Adapter Control Register. */
+#define SIFADD 0x0a /* SIF/DMA Address. -- 0x0a */
+#define SIFADX 0x0c /* 0x0c */
+#define DMALEN 0x0e /* SIF DMA length. -- 0x0e */
+
+/*
+ * POS Registers. Only for ISA Adapters.
+ */
+#define POSREG 0x10 /* Adapter Program Option Select (POS)
+ * Register: base IO address + 16 byte.
+ */
+#define POSREG_2 24L /* only for TR4/16+ adapter
+ * base IO address + 24 byte. -- 0x18
+ */
+
+/* SIFCMD command codes (high-low) */
+#define CMD_INTERRUPT_ADAPTER 0x8000 /* Cause internal adapter interrupt */
+#define CMD_ADAPTER_RESET 0x4000 /* Hardware reset of adapter */
+#define CMD_SSB_CLEAR 0x2000 /* Acknowledge to adapter to
+ * system interrupts.
+ */
+#define CMD_EXECUTE 0x1000 /* Execute SCB command */
+#define CMD_SCB_REQUEST 0x0800 /* Request adapter to interrupt
+ * system when SCB is available for
+ * another command.
+ */
+#define CMD_RX_CONTINUE 0x0400 /* Continue receive after odd pointer
+ * stop. (odd pointer receive method)
+ */
+#define CMD_RX_VALID 0x0200 /* Now actual RPL is valid. */
+#define CMD_TX_VALID 0x0100 /* Now actual TPL is valid. (valid
+ * bit receive/transmit method)
+ */
+#define CMD_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system
+ * interrupt is reset.
+ */
+#define CMD_CLEAR_SYSTEM_IRQ 0x0080 /* Clear SYSTEM_INTERRUPT bit.
+ * (write: 1=ignore, 0=reset)
+ */
+#define EXEC_SOFT_RESET 0xFF00 /* adapter soft reset. (restart
+ * adapter after hardware reset)
+ */
+
+
+/* ACL commands (high-low) */
+#define ACL_SWHLDA 0x0800 /* Software hold acknowledge. */
+#define ACL_SWDDIR 0x0400 /* Data transfer direction. */
+#define ACL_SWHRQ 0x0200 /* Pseudo DMA operation. */
+#define ACL_PSDMAEN 0x0100 /* Enable pseudo system DMA. */
+#define ACL_ARESET 0x0080 /* Adapter hardware reset command.
+ * (held in reset condition as
+ * long as bit is set)
+ */
+#define ACL_CPHALT 0x0040 /* Communication processor halt.
+ * (can only be set while ACL_ARESET
+ * bit is set; prevents adapter
+ * processor from executing code while
+ * downloading firmware)
+ */
+#define ACL_BOOT 0x0020
+#define ACL_SINTEN 0x0008 /* System interrupt enable/disable
+ * (1/0): can be written if ACL_ARESET
+ * is zero.
+ */
+#define ACL_PEN 0x0004
+
+#define ACL_NSELOUT0 0x0002
+#define ACL_NSELOUT1 0x0001 /* NSELOUTx have a card-specific
+ * meaning for setting ring speed.
+ */
+
+#define PS_DMA_MASK (ACL_SWHRQ | ACL_PSDMAEN)
+
+
+/* SIFSTS register return codes (high-low) */
+#define STS_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system
+ * interrupt is valid.
+ */
+#define STS_INITIALIZE 0x0040 /* INITIALIZE status. (ready to
+ * initialize)
+ */
+#define STS_TEST 0x0020 /* TEST status. (BUD not completed) */
+#define STS_ERROR 0x0010 /* ERROR status. (unrecoverable
+ * HW error occurred)
+ */
+#define STS_MASK 0x00F0 /* Mask interesting status bits. */
+#define STS_ERROR_MASK 0x000F /* Get Error Code by masking the
+ * interrupt code bits.
+ */
+#define ADAPTER_INT_PTRS 0x0A00 /* Address offset of adapter internal
+ * pointers 01:0a00 (high-low) have to
+ * be read after init and before open.
+ */
+
+
+/* Interrupt Codes (only MAC IRQs) */
+#define STS_IRQ_ADAPTER_CHECK 0x0000 /* unrecoverable hardware or
+ * software error.
+ */
+#define STS_IRQ_RING_STATUS 0x0004 /* SSB is updated with ring status. */
+#define STS_IRQ_LLC_STATUS 0x0005 /* Not used in MAC-only microcode */
+#define STS_IRQ_SCB_CLEAR 0x0006 /* SCB clear, following an
+ * SCB_REQUEST IRQ.
+ */
+#define STS_IRQ_TIMER 0x0007 /* Not normally used in MAC ucode */
+#define STS_IRQ_COMMAND_STATUS 0x0008 /* SSB is updated with command
+ * status.
+ */
+#define STS_IRQ_RECEIVE_STATUS 0x000A /* SSB is updated with receive
+ * status.
+ */
+#define STS_IRQ_TRANSMIT_STATUS 0x000C /* SSB is updated with transmit
+ * status
+ */
+#define STS_IRQ_RECEIVE_PENDING 0x000E /* Not used in MAC-only microcode */
+#define STS_IRQ_MASK 0x000F /* = STS_ERROR_MASK. */
+
+
+/* TRANSMIT_STATUS completion code: (SSB.Parm[0]) */
+#define COMMAND_COMPLETE 0x0080 /* TRANSMIT command completed
+ * (avoid this!) issue another transmit
+ * to send additional frames.
+ */
+#define FRAME_COMPLETE 0x0040 /* Frame has been transmitted;
+ * INTERRUPT_FRAME bit was set in the
+ * CSTAT request; indication of possibly
+ * more than one frame transmissions!
+ * SSB.Parm[0-1]: 32 bit pointer to
+ * TPL of last frame.
+ */
+#define LIST_ERROR 0x0020 /* Error in one of the TPLs that
+ * compose the frame; TRANSMIT
+ * terminated; Parm[1-2]: 32bit pointer
+ * to TPL which starts the error
+ * frame; error details in bits 8-13.
+ * (14?)
+ */
+#define FRAME_SIZE_ERROR 0x8000 /* FRAME_SIZE does not equal the sum of
+ * the valid DATA_COUNT fields;
+ * FRAME_SIZE less than header plus
+ * information field. (15 bytes +
+ * routing field) Or if FRAME_SIZE
+ * was specified as zero in one list.
+ */
+#define TX_THRESHOLD 0x4000 /* FRAME_SIZE greater than (BUFFER_SIZE
+ * - 9) * TX_BUF_MAX.
+ */
+#define ODD_ADDRESS 0x2000 /* Odd forward pointer value is
+ * read on a list without END_FRAME
+ * indication.
+ */
+#define FRAME_ERROR 0x1000 /* START_FRAME bit (not) anticipated,
+ * but (not) set.
+ */
+#define ACCESS_PRIORITY_ERROR 0x0800 /* Access priority requested has not
+ * been allowed.
+ */
+#define UNENABLED_MAC_FRAME 0x0400 /* MAC frame has source class of zero
+ * or MAC frame PCF ATTN field is
+ * greater than one.
+ */
+#define ILLEGAL_FRAME_FORMAT 0x0200 /* Bit 0 or FC field was set to one. */
+
+
+/*
+ * Since we need to support some functions even if the adapter is in a
+ * CLOSED state, we have a (pseudo-) command queue which holds commands
+ * that are outstandig to be executed.
+ *
+ * Each time a command completes, an interrupt occurs and the next
+ * command is executed. The command queue is actually a simple word with
+ * a bit for each outstandig command. Therefore the commands will not be
+ * executed in the order they have been queued.
+ *
+ * The following defines the command code bits and the command queue:
+ */
+#define OC_OPEN 0x0001 /* OPEN command */
+#define OC_TRANSMIT 0x0002 /* TRANSMIT command */
+#define OC_TRANSMIT_HALT 0x0004 /* TRANSMIT_HALT command */
+#define OC_RECEIVE 0x0008 /* RECEIVE command */
+#define OC_CLOSE 0x0010 /* CLOSE command */
+#define OC_SET_GROUP_ADDR 0x0020 /* SET_GROUP_ADDR command */
+#define OC_SET_FUNCT_ADDR 0x0040 /* SET_FUNCT_ADDR command */
+#define OC_READ_ERROR_LOG 0x0080 /* READ_ERROR_LOG command */
+#define OC_READ_ADAPTER 0x0100 /* READ_ADAPTER command */
+#define OC_MODIFY_OPEN_PARMS 0x0400 /* MODIFY_OPEN_PARMS command */
+#define OC_RESTORE_OPEN_PARMS 0x0800 /* RESTORE_OPEN_PARMS command */
+#define OC_SET_FIRST_16_GROUP 0x1000 /* SET_FIRST_16_GROUP command */
+#define OC_SET_BRIDGE_PARMS 0x2000 /* SET_BRIDGE_PARMS command */
+#define OC_CONFIG_BRIDGE_PARMS 0x4000 /* CONFIG_BRIDGE_PARMS command */
+
+#define OPEN 0x0300 /* C: open command. S: completion. */
+#define TRANSMIT 0x0400 /* C: transmit command. S: completion
+ * status. (reject: COMMAND_REJECT if
+ * adapter not opened, TRANSMIT already
+ * issued or address passed in the SCB
+ * not word aligned)
+ */
+#define TRANSMIT_HALT 0x0500 /* C: interrupt TX TPL chain; if no
+ * TRANSMIT command issued, the command
+ * is ignored (completion with TRANSMIT
+ * status (0x0400)!)
+ */
+#define RECEIVE 0x0600 /* C: receive command. S: completion
+ * status. (reject: COMMAND_REJECT if
+ * adapter not opened, RECEIVE already
+ * issued or address passed in the SCB
+ * not word aligned)
+ */
+#define CLOSE 0x0700 /* C: close adapter. S: completion.
+ * (COMMAND_REJECT if adapter not open)
+ */
+#define SET_GROUP_ADDR 0x0800 /* C: alter adapter group address after
+ * OPEN. S: completion. (COMMAND_REJECT
+ * if adapter not open)
+ */
+#define SET_FUNCT_ADDR 0x0900 /* C: alter adapter functional address
+ * after OPEN. S: completion.
+ * (COMMAND_REJECT if adapter not open)
+ */
+#define READ_ERROR_LOG 0x0A00 /* C: read adapter error counters.
+ * S: completion. (command ignored
+ * if adapter not open!)
+ */
+#define READ_ADAPTER 0x0B00 /* C: read data from adapter memory.
+ * (important: after init and before
+ * open!) S: completion. (ADAPTER_CHECK
+ * interrupt if undefined storage area
+ * read)
+ */
+#define MODIFY_OPEN_PARMS 0x0D00 /* C: modify some adapter operational
+ * parameters. (bit correspondend to
+ * WRAP_INTERFACE is ignored)
+ * S: completion. (reject:
+ * COMMAND_REJECT)
+ */
+#define RESTORE_OPEN_PARMS 0x0E00 /* C: modify some adapter operational
+ * parameters. (bit correspondend
+ * to WRAP_INTERFACE is ignored)
+ * S: completion. (reject:
+ * COMMAND_REJECT)
+ */
+#define SET_FIRST_16_GROUP 0x0F00 /* C: alter the first two bytes in
+ * adapter group address.
+ * S: completion. (reject:
+ * COMMAND_REJECT)
+ */
+#define SET_BRIDGE_PARMS 0x1000 /* C: values and conditions for the
+ * adapter hardware to use when frames
+ * are copied for forwarding.
+ * S: completion. (reject:
+ * COMMAND_REJECT)
+ */
+#define CONFIG_BRIDGE_PARMS 0x1100 /* C: ..
+ * S: completion. (reject:
+ * COMMAND_REJECT)
+ */
+
+#define SPEED_4 4
+#define SPEED_16 16 /* Default transmission speed */
+
+
+/* Initialization Parameter Block (IPB); word alignment necessary! */
+#define BURST_SIZE 0x0018 /* Default burst size */
+#define BURST_MODE 0x9F00 /* Burst mode enable */
+#define DMA_RETRIES 0x0505 /* Magic DMA retry number... */
+
+#define CYCLE_TIME 3 /* Default AT-bus cycle time: 500 ns
+ * (later adapter version: fix cycle time!)
+ */
+#define LINE_SPEED_BIT 0x80
+
+/* Macro definition for the wait function. */
+#define ONE_SECOND_TICKS 1000000
+#define HALF_SECOND (ONE_SECOND_TICKS / 2)
+#define ONE_SECOND (ONE_SECOND_TICKS)
+#define TWO_SECONDS (ONE_SECOND_TICKS * 2)
+#define THREE_SECONDS (ONE_SECOND_TICKS * 3)
+#define FOUR_SECONDS (ONE_SECOND_TICKS * 4)
+#define FIVE_SECONDS (ONE_SECOND_TICKS * 5)
+
+#define BUFFER_SIZE 2048 /* Buffers on Adapter */
+
+#pragma pack(1)
+typedef struct {
+ unsigned short Init_Options; /* Initialize with burst mode;
+ * LLC disabled. (MAC only)
+ */
+
+ /* Interrupt vectors the adapter places on attached system bus. */
+ u_int8_t CMD_Status_IV; /* Interrupt vector: command status. */
+ u_int8_t TX_IV; /* Interrupt vector: transmit. */
+ u_int8_t RX_IV; /* Interrupt vector: receive. */
+ u_int8_t Ring_Status_IV; /* Interrupt vector: ring status. */
+ u_int8_t SCB_Clear_IV; /* Interrupt vector: SCB clear. */
+ u_int8_t Adapter_CHK_IV; /* Interrupt vector: adapter check. */
+
+ u_int16_t RX_Burst_Size; /* Max. number of transfer cycles. */
+ u_int16_t TX_Burst_Size; /* During DMA burst; even value! */
+ u_int16_t DMA_Abort_Thrhld; /* Number of DMA retries. */
+
+ u_int32_t SCB_Addr; /* SCB address: even, word aligned, high-low */
+ u_int32_t SSB_Addr; /* SSB address: even, word aligned, high-low */
+} IPB, *IPB_Ptr;
+#pragma pack()
+
+/*
+ * OPEN Command Parameter List (OCPL) (can be reused, if the adapter has to
+ * be reopened)
+ */
+#define BUFFER_SIZE 2048 /* Buffers on Adapter. */
+#define TPL_SIZE 8+6*TX_FRAG_NUM /* Depending on fragments per TPL. */
+#define RPL_SIZE 14 /* (with TI firmware v2.26 handling
+ * up to nine fragments possible)
+ */
+#define TX_BUF_MIN 20 /* ??? (Stephan: calculation with */
+#define TX_BUF_MAX 40 /* BUFFER_SIZE and MAX_FRAME_SIZE) ???
+ */
+#define DISABLE_EARLY_TOKEN_RELEASE 0x1000
+
+/* OPEN Options (high-low) */
+#define WRAP_INTERFACE 0x0080 /* Inserting omitted for test
+ * purposes; transmit data appears
+ * as receive data. (useful for
+ * testing; change: CLOSE necessary)
+ */
+#define DISABLE_HARD_ERROR 0x0040 /* On HARD_ERROR & TRANSMIT_BEACON
+ * no RING.STATUS interrupt.
+ */
+#define DISABLE_SOFT_ERROR 0x0020 /* On SOFT_ERROR, no RING.STATUS
+ * interrupt.
+ */
+#define PASS_ADAPTER_MAC_FRAMES 0x0010 /* Passing unsupported MAC frames
+ * to system.
+ */
+#define PASS_ATTENTION_FRAMES 0x0008 /* All changed attention MAC frames are
+ * passed to the system.
+ */
+#define PAD_ROUTING_FIELD 0x0004 /* Routing field is padded to 18
+ * bytes.
+ */
+#define FRAME_HOLD 0x0002 /*Adapter waits for entire frame before
+ * initiating DMA transfer; otherwise:
+ * DMA transfer initiation if internal
+ * buffer filled.
+ */
+#define CONTENDER 0x0001 /* Adapter participates in the monitor
+ * contention process.
+ */
+#define PASS_BEACON_MAC_FRAMES 0x8000 /* Adapter passes beacon MAC frames
+ * to the system.
+ */
+#define EARLY_TOKEN_RELEASE 0x1000 /* Only valid in 16 Mbps operation;
+ * 0 = ETR. (no effect in 4 Mbps
+ * operation)
+ */
+#define COPY_ALL_MAC_FRAMES 0x0400 /* All MAC frames are copied to
+ * the system. (after OPEN: duplicate
+ * address test (DAT) MAC frame is
+ * first received frame copied to the
+ * system)
+ */
+#define COPY_ALL_NON_MAC_FRAMES 0x0200 /* All non MAC frames are copied to
+ * the system.
+ */
+#define PASS_FIRST_BUF_ONLY 0x0100 /* Passes only first internal buffer
+ * of each received frame; FrameSize
+ * of RPLs must contain internal
+ * BUFFER_SIZE bits for promiscous mode.
+ */
+#define ENABLE_FULL_DUPLEX_SELECTION 0x2000
+ /* Enable the use of full-duplex
+ * settings with bits in byte 22 in
+ * ocpl. (new feature in firmware
+ * version 3.09)
+ */
+
+/* Full-duplex settings */
+#define OPEN_FULL_DUPLEX_OFF 0x0000
+#define OPEN_FULL_DUPLEX_ON 0x00c0
+#define OPEN_FULL_DUPLEX_AUTO 0x0080
+
+#define PROD_ID_SIZE 18 /* Length of product ID. */
+
+#define TX_FRAG_NUM 3 /* Number of fragments used in one TPL. */
+#define TX_MORE_FRAGMENTS 0x8000 /* Bit set in DataCount to indicate more
+ * fragments following.
+ */
+
+/* XXX is there some better way to do this? */
+#define ISA_MAX_ADDRESS 0x00ffffff
+#define PCI_MAX_ADDRESS 0xffffffff
+
+#pragma pack(1)
+typedef struct {
+ u_int16_t OPENOptions;
+ u_int8_t NodeAddr[6]; /* Adapter node address; use ROM
+ * address
+ */
+ u_int32_t GroupAddr; /* Multicast: high order
+ * bytes = 0xC000
+ */
+ u_int32_t FunctAddr; /* High order bytes = 0xC000 */
+ u_int16_t RxListSize; /* RPL size: 0 (=26), 14, 20 or
+ * 26 bytes read by the adapter.
+ * (Depending on the number of
+ * fragments/list)
+ */
+ u_int16_t TxListSize; /* TPL size */
+ u_int16_t BufSize; /* Is automatically rounded up to the
+ * nearest nK boundary.
+ */
+ u_int16_t FullDuplex;
+ u_int16_t Reserved;
+ u_int8_t TXBufMin; /* Number of adapter buffers reserved
+ * for transmission a minimum of 2
+ * buffers must be allocated.
+ */
+ u_int8_t TXBufMax; /* Maximum number of adapter buffers
+ * for transmit; a minimum of 2 buffers
+ * must be available for receive.
+ * Default: 6
+ */
+ u_int16_t ProdIDAddr[2];/* Pointer to product ID. */
+} OPB, *OPB_Ptr;
+#pragma pack()
+
+/*
+ * SCB: adapter commands enabled by the host system started by writing
+ * CMD_INTERRUPT_ADAPTER | CMD_EXECUTE (|SCB_REQUEST) to the SIFCMD IO
+ * register. (special case: | CMD_SYSTEM_IRQ for initialization)
+ */
+#pragma pack(1)
+typedef struct {
+ u_int16_t CMD; /* Command code */
+ u_int16_t Parm[2]; /* Pointer to Command Parameter Block */
+} SCB; /* System Command Block (32 bit physical address; big endian)*/
+#pragma pack()
+
+/*
+ * SSB: adapter command return status can be evaluated after COMMAND_STATUS
+ * adapter to system interrupt after reading SSB, the availability of the SSB
+ * has to be told the adapter by writing CMD_INTERRUPT_ADAPTER | CMD_SSB_CLEAR
+ * in the SIFCMD IO register.
+ */
+#pragma pack(1)
+typedef struct {
+ u_int16_t STS; /* Status code */
+ u_int16_t Parm[3]; /* Parameter or pointer to Status Parameter
+ * Block.
+ */
+} SSB; /* System Status Block (big endian - physical address) */
+#pragma pack()
+
+typedef struct {
+ unsigned short BurnedInAddrPtr; /* Pointer to adapter burned in
+ * address. (BIA)
+ */
+ unsigned short SoftwareLevelPtr;/* Pointer to software level data. */
+ unsigned short AdapterAddrPtr; /* Pointer to adapter addresses. */
+ unsigned short AdapterParmsPtr; /* Pointer to adapter parameters. */
+ unsigned short MACBufferPtr; /* Pointer to MAC buffer. (internal) */
+ unsigned short LLCCountersPtr; /* Pointer to LLC counters. */
+ unsigned short SpeedFlagPtr; /* Pointer to data rate flag.
+ * (4/16 Mbps)
+ */
+ unsigned short AdapterRAMPtr; /* Pointer to adapter RAM found. (KB) */
+} INTPTRS; /* Adapter internal pointers */
+
+#pragma pack(1)
+typedef struct {
+ u_int8_t Line_Error; /* Line error: code violation in
+ * frame or in a token, or FCS error.
+ */
+ u_int8_t Internal_Error; /* IBM specific. (Reserved_1) */
+ u_int8_t Burst_Error;
+ u_int8_t ARI_FCI_Error; /* ARI/FCI bit zero in AMP or
+ * SMP MAC frame.
+ */
+ u_int8_t AbortDelimeters; /* IBM specific. (Reserved_2) */
+ u_int8_t Reserved_3;
+ u_int8_t Lost_Frame_Error; /* Receive of end of transmitted
+ * frame failed.
+ */
+ u_int8_t Rx_Congest_Error; /* Adapter in repeat mode has not
+ * enough buffer space to copy incoming
+ * frame.
+ */
+ u_int8_t Frame_Copied_Error; /* ARI bit not zero in frame
+ * addressed to adapter.
+ */
+ u_int8_t Frequency_Error; /* IBM specific. (Reserved_4) */
+ u_int8_t Token_Error; /* (active only in monitor station) */
+ u_int8_t Reserved_5;
+ u_int8_t DMA_Bus_Error; /* DMA bus errors not exceeding the
+ * abort thresholds.
+ */
+ u_int8_t DMA_Parity_Error; /* DMA parity errors not exceeding
+ * the abort thresholds.
+ */
+} ERRORTAB; /* Adapter error counters */
+#pragma pack()
+
+
+/*--------------------- Send and Receive definitions -------------------*/
+#pragma pack(1)
+typedef struct {
+ u_int16_t DataCount; /* Value 0, even and odd values are
+ * permitted; value is unaltered most
+ * significant bit set: following
+ * fragments last fragment: most
+ * significant bit is not evaluated.
+ * (???)
+ */
+ u_int32_t DataAddr; /* Pointer to frame data fragment;
+ * even or odd.
+ */
+} Fragment;
+#pragma pack()
+
+#define MAX_FRAG_NUMBERS 9 /* Maximal number of fragments possible to use
+ * in one RPL/TPL. (depending on TI firmware
+ * version)
+ */
+
+/*
+ * AC (1), FC (1), Dst (6), Src (6), RIF (18), Data (4472) = 4504
+ * The packet size can be one of the follows: 548, 1502, 2084, 4504, 8176,
+ * 11439, 17832. Refer to TMS380 Second Generation Token Ring User's Guide
+ * Page 2-27.
+ */
+#define HEADER_SIZE (1 + 1 + 6 + 6)
+#define SRC_SIZE 18
+#define MIN_DATA_SIZE 516
+#define DEFAULT_DATA_SIZE 4472
+#define MAX_DATA_SIZE 17800
+
+#define DEFAULT_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + DEFAULT_DATA_SIZE)
+#define MIN_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MIN_DATA_SIZE)
+#define MAX_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MAX_DATA_SIZE)
+
+/*
+ * Macros to deal with the frame status field.
+ */
+#define AC_NOT_RECOGNIZED 0x00
+#define GROUP_BIT 0x80
+#define GET_TRANSMIT_STATUS_HIGH_BYTE(Ts) ((unsigned char)((Ts) >> 8))
+#define GET_FRAME_STATUS_HIGH_AC(Fs) ((unsigned char)(((Fs) & 0xC0) >> 6))
+#define GET_FRAME_STATUS_LOW_AC(Fs) ((unsigned char)(((Fs) & 0x0C) >> 2))
+#define DIRECTED_FRAME(Context) (!((Context)->MData[2] & GROUP_BIT))
+
+
+/*--------------------- Send Functions ---------------------------------*/
+/* define TX_CSTAT _REQUEST (R) and _COMPLETE (C) values (high-low) */
+
+#define TX_VALID 0x0080 /* R: set via TRANSMIT.VALID interrupt.
+ * C: always reset to zero!
+ */
+#define TX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero.
+ * C: set to one.
+ */
+#define TX_START_FRAME 0x0020 /* R: start of a frame: 1
+ * C: unchanged.
+ */
+#define TX_END_FRAME 0x0010 /* R: end of a frame: 1
+ * C: unchanged.
+ */
+#define TX_FRAME_IRQ 0x0008 /* R: request interrupt generation
+ * after transmission.
+ * C: unchanged.
+ */
+#define TX_ERROR 0x0004 /* R: reserved.
+ * C: set to one if Error occurred.
+ */
+#define TX_INTERFRAME_WAIT 0x0004
+#define TX_PASS_CRC 0x0002 /* R: set if CRC value is already
+ * calculated. (valid only in
+ * FRAME_START TPL)
+ * C: unchanged.
+ */
+#define TX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame
+ * source address and does not overwrite
+ * with the adapter node address.
+ * (valid only in FRAME_START TPL)
+ *
+ * C: unchanged.
+ */
+#define TX_STRIP_FS 0xFF00 /* R: reserved.
+ * C: if no Transmission Error,
+ * field contains copy of FS byte after
+ * stripping of frame.
+ */
+
+/*
+ * Structure of Transmit Parameter Lists (TPLs) (only one frame every TPL,
+ * but possibly multiple TPLs for one frame) the length of the TPLs has to be
+ * initialized in the OPL. (OPEN parameter list)
+ */
+#define TPL_NUM 3 /* Number of Transmit Parameter Lists.
+ * !! MUST BE >= 3 !!
+ */
+
+#pragma pack(1)
+typedef struct s_TPL TPL;
+
+struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */
+ u_int32_t NextTPLAddr; /* Pointer to next TPL in chain; if
+ * pointer is odd: this is the last
+ * TPL. Pointing to itself can cause
+ * problems!
+ */
+ volatile u_int16_t Status; /* Initialized by the adapter:
+ * CSTAT_REQUEST important: update least
+ * significant bit first! Set by the
+ * adapter: CSTAT_COMPLETE status.
+ */
+ u_int16_t FrameSize; /* Number of bytes to be transmitted
+ * as a frame including AC/FC,
+ * Destination, Source, Routing field
+ * not including CRC, FS, End Delimiter
+ * (valid only if START_FRAME bit in
+ * CSTAT nonzero) must not be zero in
+ * any list; maximum value: (BUFFER_SIZE
+ * - 8) * TX_BUF_MAX sum of DataCount
+ * values in FragmentList must equal
+ * Frame_Size value in START_FRAME TPL!
+ * frame data fragment list.
+ */
+
+ /* TPL/RPL size in OPEN parameter list depending on maximal
+ * numbers of fragments used in one parameter list.
+ */
+ Fragment FragList[TX_FRAG_NUM]; /* Maximum: nine frame fragments in one
+ * TPL actual version of firmware: 9
+ * fragments possible.
+ */
+#pragma pack()
+
+ /* Special proprietary data and precalculations */
+
+ TPL *NextTPLPtr; /* Pointer to next TPL in chain. */
+ unsigned char *MData;
+ struct sk_buff *Skb;
+ unsigned char TPLIndex;
+ volatile unsigned char BusyFlag;/* Flag: TPL busy? */
+ dma_addr_t DMABuff; /* DMA IO bus address from pci_map */
+};
+
+/* ---------------------Receive Functions-------------------------------*
+ * define RECEIVE_CSTAT_REQUEST (R) and RECEIVE_CSTAT_COMPLETE (C) values.
+ * (high-low)
+ */
+#define RX_VALID 0x0080 /* R: set; tell adapter with
+ * RECEIVE.VALID interrupt.
+ * C: reset to zero.
+ */
+#define RX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero,
+ * C: set to one.
+ */
+#define RX_START_FRAME 0x0020 /* R: must be reset to zero.
+ * C: set to one on the list.
+ */
+#define RX_END_FRAME 0x0010 /* R: must be reset to zero.
+ * C: set to one on the list
+ * that ends the frame.
+ */
+#define RX_FRAME_IRQ 0x0008 /* R: request interrupt generation
+ * after receive.
+ * C: unchanged.
+ */
+#define RX_INTERFRAME_WAIT 0x0004 /* R: after receiving a frame:
+ * interrupt and wait for a
+ * RECEIVE.CONTINUE.
+ * C: unchanged.
+ */
+#define RX_PASS_CRC 0x0002 /* R: if set, the adapter includes
+ * the CRC in data passed. (last four
+ * bytes; valid only if FRAME_START is
+ * set)
+ * C: set, if CRC is included in
+ * received data.
+ */
+#define RX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame
+ * source address and does not
+ * overwrite with the adapter node
+ * address. (valid only if FRAME_START
+ * is set)
+ * C: unchanged.
+ */
+#define RX_RECEIVE_FS 0xFC00 /* R: reserved; must be reset to zero.
+ * C: on lists with START_FRAME, field
+ * contains frame status field from
+ * received frame; otherwise cleared.
+ */
+#define RX_ADDR_MATCH 0x0300 /* R: reserved; must be reset to zero.
+ * C: address match code mask.
+ */
+#define RX_STATUS_MASK 0x00FF /* Mask for receive status bits. */
+
+#define RX_INTERN_ADDR_MATCH 0x0100 /* C: internally address match. */
+#define RX_EXTERN_ADDR_MATCH 0x0200 /* C: externally matched via
+ * XMATCH/XFAIL interface.
+ */
+#define RX_INTEXT_ADDR_MATCH 0x0300 /* C: internally and externally
+ * matched.
+ */
+#define RX_READY (RX_VALID | RX_FRAME_IRQ) /* Ready for receive. */
+
+/* Constants for Command Status Interrupt.
+ * COMMAND_REJECT status field bit functions (SSB.Parm[0])
+ */
+#define ILLEGAL_COMMAND 0x0080 /* Set if an unknown command
+ * is issued to the adapter
+ */
+#define ADDRESS_ERROR 0x0040 /* Set if any address field in
+ * the SCB is odd. (not word aligned)
+ */
+#define ADAPTER_OPEN 0x0020 /* Command issued illegal with
+ * open adapter.
+ */
+#define ADAPTER_CLOSE 0x0010 /* Command issued illegal with
+ * closed adapter.
+ */
+#define SAME_COMMAND 0x0008 /* Command issued with same command
+ * already executing.
+ */
+
+/* OPEN_COMPLETION values (SSB.Parm[0], MSB) */
+#define NODE_ADDR_ERROR 0x0040 /* Wrong address or BIA read
+ * zero address.
+ */
+#define LIST_SIZE_ERROR 0x0020 /* If List_Size value not in 0,
+ * 14, 20, 26.
+ */
+#define BUF_SIZE_ERROR 0x0010 /* Not enough available memory for
+ * two buffers.
+ */
+#define TX_BUF_COUNT_ERROR 0x0004 /* Remaining receive buffers less than
+ * two.
+ */
+#define OPEN_ERROR 0x0002 /* Error during ring insertion; more
+ * information in bits 8-15.
+ */
+
+/* Standard return codes */
+#define GOOD_COMPLETION 0x0080 /* =OPEN_SUCCESSFULL */
+#define INVALID_OPEN_OPTION 0x0001 /* OPEN options are not supported by
+ * the adapter.
+ */
+
+/* OPEN phases; details of OPEN_ERROR (SSB.Parm[0], LSB) */
+#define OPEN_PHASES_MASK 0xF000 /* Check only the bits 8-11. */
+#define LOBE_MEDIA_TEST 0x1000
+#define PHYSICAL_INSERTION 0x2000
+#define ADDRESS_VERIFICATION 0x3000
+#define PARTICIPATION_IN_RING_POLL 0x4000
+#define REQUEST_INITIALISATION 0x5000
+#define FULLDUPLEX_CHECK 0x6000
+
+/* OPEN error codes; details of OPEN_ERROR (SSB.Parm[0], LSB) */
+#define OPEN_ERROR_CODES_MASK 0x0F00 /* Check only the bits 12-15. */
+#define OPEN_FUNCTION_FAILURE 0x0100 /* Unable to transmit to itself or
+ * frames received before insertion.
+ */
+#define OPEN_SIGNAL_LOSS 0x0200 /* Signal loss condition detected at
+ * receiver.
+ */
+#define OPEN_TIMEOUT 0x0500 /* Insertion timer expired before
+ * logical insertion.
+ */
+#define OPEN_RING_FAILURE 0x0600 /* Unable to receive own ring purge
+ * MAC frames.
+ */
+#define OPEN_RING_BEACONING 0x0700 /* Beacon MAC frame received after
+ * ring insertion.
+ */
+#define OPEN_DUPLICATE_NODEADDR 0x0800 /* Other station in ring found
+ * with the same address.
+ */
+#define OPEN_REQUEST_INIT 0x0900 /* RPS present but does not respond. */
+#define OPEN_REMOVE_RECEIVED 0x0A00 /* Adapter received a remove adapter
+ * MAC frame.
+ */
+#define OPEN_FULLDUPLEX_SET 0x0D00 /* Got this with full duplex on when
+ * trying to connect to a normal ring.
+ */
+
+/* SET_BRIDGE_PARMS return codes: */
+#define BRIDGE_INVALID_MAX_LEN 0x4000 /* MAX_ROUTING_FIELD_LENGTH odd,
+ * less than 6 or > 30.
+ */
+#define BRIDGE_INVALID_SRC_RING 0x2000 /* SOURCE_RING number zero, too large
+ * or = TARGET_RING.
+ */
+#define BRIDGE_INVALID_TRG_RING 0x1000 /* TARGET_RING number zero, too large
+ * or = SOURCE_RING.
+ */
+#define BRIDGE_INVALID_BRDGE_NO 0x0800 /* BRIDGE_NUMBER too large. */
+#define BRIDGE_INVALID_OPTIONS 0x0400 /* Invalid bridge options. */
+#define BRIDGE_DIAGS_FAILED 0x0200 /* Diagnostics of TMS380SRA failed. */
+#define BRIDGE_NO_SRA 0x0100 /* The TMS380SRA does not exist in HW
+ * configuration.
+ */
+
+/*
+ * Bring Up Diagnostics error codes.
+ */
+#define BUD_INITIAL_ERROR 0x0
+#define BUD_CHECKSUM_ERROR 0x1
+#define BUD_ADAPTER_RAM_ERROR 0x2
+#define BUD_INSTRUCTION_ERROR 0x3
+#define BUD_CONTEXT_ERROR 0x4
+#define BUD_PROTOCOL_ERROR 0x5
+#define BUD_INTERFACE_ERROR 0x6
+
+/* BUD constants */
+#define BUD_MAX_RETRIES 3
+#define BUD_MAX_LOOPCNT 6
+#define BUD_TIMEOUT 3000
+
+/* Initialization constants */
+#define INIT_MAX_RETRIES 3 /* Maximum three retries. */
+#define INIT_MAX_LOOPCNT 22 /* Maximum loop counts. */
+
+/* RING STATUS field values (high/low) */
+#define SIGNAL_LOSS 0x0080 /* Loss of signal on the ring
+ * detected.
+ */
+#define HARD_ERROR 0x0040 /* Transmitting or receiving beacon
+ * frames.
+ */
+#define SOFT_ERROR 0x0020 /* Report error MAC frame
+ * transmitted.
+ */
+#define TRANSMIT_BEACON 0x0010 /* Transmitting beacon frames on the
+ * ring.
+ */
+#define LOBE_WIRE_FAULT 0x0008 /* Open or short circuit in the
+ * cable to concentrator; adapter
+ * closed.
+ */
+#define AUTO_REMOVAL_ERROR 0x0004 /* Lobe wrap test failed, deinserted;
+ * adapter closed.
+ */
+#define REMOVE_RECEIVED 0x0001 /* Received a remove ring station MAC
+ * MAC frame request; adapter closed.
+ */
+#define COUNTER_OVERFLOW 0x8000 /* Overflow of one of the adapters
+ * error counters; READ.ERROR.LOG.
+ */
+#define SINGLE_STATION 0x4000 /* Adapter is the only station on the
+ * ring.
+ */
+#define RING_RECOVERY 0x2000 /* Claim token MAC frames on the ring;
+ * reset after ring purge frame.
+ */
+
+#define ADAPTER_CLOSED (LOBE_WIRE_FAULT | AUTO_REMOVAL_ERROR |\
+ REMOVE_RECEIVED)
+
+/* Adapter_check_block.Status field bit assignments: */
+#define DIO_PARITY 0x8000 /* Adapter detects bad parity
+ * through direct I/O access.
+ */
+#define DMA_READ_ABORT 0x4000 /* Aborting DMA read operation
+ * from system Parm[0]: 0=timeout,
+ * 1=parity error, 2=bus error;
+ * Parm[1]: 32 bit pointer to host
+ * system address at failure.
+ */
+#define DMA_WRITE_ABORT 0x2000 /* Aborting DMA write operation
+ * to system. (parameters analogous to
+ * DMA_READ_ABORT)
+ */
+#define ILLEGAL_OP_CODE 0x1000 /* Illegal operation code in the
+ * the adapters firmware Parm[0]-2:
+ * communications processor registers
+ * R13-R15.
+ */
+#define PARITY_ERRORS 0x0800 /* Adapter detects internal bus
+ * parity error.
+ */
+#define RAM_DATA_ERROR 0x0080 /* Valid only during RAM testing;
+ * RAM data error Parm[0-1]: 32 bit
+ * pointer to RAM location.
+ */
+#define RAM_PARITY_ERROR 0x0040 /* Valid only during RAM testing;
+ * RAM parity error Parm[0-1]: 32 bit
+ * pointer to RAM location.
+ */
+#define RING_UNDERRUN 0x0020 /* Internal DMA underrun when
+ * transmitting onto ring.
+ */
+#define INVALID_IRQ 0x0008 /* Unrecognized interrupt generated
+ * internal to adapter Parm[0-2]:
+ * adapter register R13-R15.
+ */
+#define INVALID_ERROR_IRQ 0x0004 /* Unrecognized error interrupt
+ * generated Parm[0-2]: adapter register
+ * R13-R15.
+ */
+#define INVALID_XOP 0x0002 /* Unrecognized XOP request in
+ * communication processor Parm[0-2]:
+ * adapter register R13-R15.
+ */
+#define CHECKADDR 0x05E0 /* Adapter check status information
+ * address offset.
+ */
+#define ROM_PAGE_0 0x0000 /* Adapter ROM page 0. */
+
+/*
+ * RECEIVE.STATUS interrupt result SSB values: (high-low)
+ * (RECEIVE_COMPLETE field bit definitions in SSB.Parm[0])
+ */
+#define RX_COMPLETE 0x0080 /* SSB.Parm[0]; SSB.Parm[1]: 32
+ * bit pointer to last RPL.
+ */
+#define RX_SUSPENDED 0x0040 /* SSB.Parm[0]; SSB.Parm[1]: 32
+ * bit pointer to RPL with odd
+ * forward pointer.
+ */
+
+/* Valid receive CSTAT: */
+#define RX_FRAME_CONTROL_BITS (RX_VALID | RX_START_FRAME | RX_END_FRAME | \
+ RX_FRAME_COMPLETE)
+#define VALID_SINGLE_BUFFER_FRAME (RX_START_FRAME | RX_END_FRAME | \
+ RX_FRAME_COMPLETE)
+
+typedef enum SKB_STAT SKB_STAT;
+enum SKB_STAT {
+ SKB_UNAVAILABLE,
+ SKB_DMA_DIRECT,
+ SKB_DATA_COPY
+};
+
+/* Receive Parameter List (RPL) The length of the RPLs has to be initialized
+ * in the OPL. (OPEN parameter list)
+ */
+#define RPL_NUM 3
+
+#define RX_FRAG_NUM 1 /* Maximal number of used fragments in one RPL.
+ * (up to firmware v2.24: 3, now: up to 9)
+ */
+
+#pragma pack(1)
+typedef struct s_RPL RPL;
+struct s_RPL { /* Receive Parameter List */
+ u_int32_t NextRPLAddr; /* Pointer to next RPL in chain
+ * (normalized = physical 32 bit
+ * address) if pointer is odd: this
+ * is last RPL. Pointing to itself can
+ * cause problems!
+ */
+ volatile u_int16_t Status; /* Set by creation of Receive Parameter
+ * List RECEIVE_CSTAT_COMPLETE set by
+ * adapter in lists that start or end
+ * a frame.
+ */
+ volatile u_int16_t FrameSize; /* Number of bytes received as a
+ * frame including AC/FC, Destination,
+ * Source, Routing field not including
+ * CRC, FS (Frame Status), End Delimiter
+ * (valid only if START_FRAME bit in
+ * CSTAT nonzero) must not be zero in
+ * any list; maximum value: (BUFFER_SIZE
+ * - 8) * TX_BUF_MAX sum of DataCount
+ * values in FragmentList must equal
+ * Frame_Size value in START_FRAME TPL!
+ * frame data fragment list
+ */
+
+ /* TPL/RPL size in OPEN parameter list depending on maximal numbers
+ * of fragments used in one parameter list.
+ */
+ Fragment FragList[RX_FRAG_NUM]; /* Maximum: nine frame fragments in
+ * one TPL. Actual version of firmware:
+ * 9 fragments possible.
+ */
+#pragma pack()
+
+ /* Special proprietary data and precalculations. */
+ RPL *NextRPLPtr; /* Logical pointer to next RPL in chain. */
+ unsigned char *MData;
+ struct sk_buff *Skb;
+ SKB_STAT SkbStat;
+ int RPLIndex;
+ dma_addr_t DMABuff; /* DMA IO bus address from pci_map */
+};
+
+/* Information that need to be kept for each board. */
+typedef struct net_local {
+#pragma pack(1)
+ IPB ipb; /* Initialization Parameter Block. */
+ SCB scb; /* System Command Block: system to adapter
+ * communication.
+ */
+ SSB ssb; /* System Status Block: adapter to system
+ * communication.
+ */
+ OPB ocpl; /* Open Options Parameter Block. */
+
+ ERRORTAB errorlogtable; /* Adapter statistic error counters.
+ * (read from adapter memory)
+ */
+ unsigned char ProductID[PROD_ID_SIZE + 1]; /* Product ID */
+#pragma pack()
+
+ TPL Tpl[TPL_NUM];
+ TPL *TplFree;
+ TPL *TplBusy;
+ unsigned char LocalTxBuffers[TPL_NUM][DEFAULT_PACKET_SIZE];
+
+ RPL Rpl[RPL_NUM];
+ RPL *RplHead;
+ RPL *RplTail;
+ unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
+
+ struct pci_dev *pdev;
+ int DataRate;
+ unsigned char ScbInUse;
+ unsigned short CMDqueue;
+
+ unsigned long AdapterOpenFlag:1;
+ unsigned long AdapterVirtOpenFlag:1;
+ unsigned long OpenCommandIssued:1;
+ unsigned long TransmitCommandActive:1;
+ unsigned long TransmitHaltScheduled:1;
+ unsigned long HaltInProgress:1;
+ unsigned long LobeWireFaultLogged:1;
+ unsigned long ReOpenInProgress:1;
+ unsigned long Sleeping:1;
+
+ unsigned long LastOpenStatus;
+ unsigned short CurrentRingStatus;
+ unsigned long MaxPacketSize;
+
+ unsigned long StartTime;
+ unsigned long LastSendTime;
+
+ struct tr_statistics MacStat; /* MAC statistics structure */
+
+ unsigned long dmalimit; /* the max DMA address (ie, ISA) */
+ dma_addr_t dmabuffer; /* the DMA bus address corresponding to
+ priv. Might be different from virt_to_bus()
+ for architectures with IO MMU (Alpha) */
+
+ struct timer_list timer;
+
+ wait_queue_head_t wait_for_tok_int;
+
+ INTPTRS intptrs; /* Internal adapter pointer. Must be read
+ * before OPEN command.
+ */
+ unsigned short (*setnselout)(struct net_device *);
+ unsigned short (*sifreadb)(struct net_device *, unsigned short);
+ void (*sifwriteb)(struct net_device *, unsigned short, unsigned short);
+ unsigned short (*sifreadw)(struct net_device *, unsigned short);
+ void (*sifwritew)(struct net_device *, unsigned short, unsigned short);
+
+ spinlock_t lock; /* SMP protection */
+ void *tmspriv;
+} NET_LOCAL;
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_TMS380TR_H */
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
new file mode 100644
index 000000000000..37ddb5c2bec3
--- /dev/null
+++ b/drivers/net/tokenring/tmspci.c
@@ -0,0 +1,267 @@
+/*
+ * tmspci.c: A generic network driver for TMS380-based PCI token ring cards.
+ *
+ * Written 1999 by Adam Fritzler
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver module supports the following cards:
+ * - SysKonnect TR4/16(+) PCI (SK-4590)
+ * - SysKonnect TR4/16 PCI (SK-4591)
+ * - Compaq TR 4/16 PCI
+ * - Thomas-Conrad TC4048 4/16 PCI
+ * - 3Com 3C339 Token Link Velocity
+ *
+ * Maintainer(s):
+ * AF Adam Fritzler mid@auk.cx
+ *
+ * Modification History:
+ * 30-Dec-99 AF Split off from the tms380tr driver.
+ * 22-Jan-00 AF Updated to use indirect read/writes
+ * 23-Nov-00 JG New PCI API, cleanups
+ *
+ * TODO:
+ * 1. See if we can use MMIO instead of port accesses
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/trdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "tms380tr.h"
+
+static char version[] __devinitdata =
+"tmspci.c: v1.02 23/11/2000 by Adam Fritzler\n";
+
+#define TMS_PCI_IO_EXTENT 32
+
+struct card_info {
+ unsigned char nselout[2]; /* NSELOUT vals for 4mb([0]) and 16mb([1]) */
+ char *name;
+};
+
+static struct card_info card_info_table[] = {
+ { {0x03, 0x01}, "Compaq 4/16 TR PCI"},
+ { {0x03, 0x01}, "SK NET TR 4/16 PCI"},
+ { {0x03, 0x01}, "Thomas-Conrad TC4048 PCI 4/16"},
+ { {0x03, 0x01}, "3Com Token Link Velocity"},
+};
+
+static struct pci_device_id tmspci_pci_tbl[] = {
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, tmspci_pci_tbl);
+
+MODULE_LICENSE("GPL");
+
+static void tms_pci_read_eeprom(struct net_device *dev);
+static unsigned short tms_pci_setnselout_pins(struct net_device *dev);
+
+static unsigned short tms_pci_sifreadb(struct net_device *dev, unsigned short reg)
+{
+ return inb(dev->base_addr + reg);
+}
+
+static unsigned short tms_pci_sifreadw(struct net_device *dev, unsigned short reg)
+{
+ return inw(dev->base_addr + reg);
+}
+
+static void tms_pci_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outb(val, dev->base_addr + reg);
+}
+
+static void tms_pci_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
+{
+ outw(val, dev->base_addr + reg);
+}
+
+static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int versionprinted;
+ struct net_device *dev;
+ struct net_local *tp;
+ int i, ret;
+ unsigned int pci_irq_line;
+ unsigned long pci_ioaddr;
+ struct card_info *cardinfo = &card_info_table[ent->driver_data];
+
+ if (versionprinted++ == 0)
+ printk("%s", version);
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+
+ /* Remove I/O space marker in bit 0. */
+ pci_irq_line = pdev->irq;
+ pci_ioaddr = pci_resource_start (pdev, 0);
+
+ /* At this point we have found a valid card. */
+ dev = alloc_trdev(sizeof(struct net_local));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+
+ if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) {
+ ret = -EBUSY;
+ goto err_out_trdev;
+ }
+
+ ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (ret)
+ goto err_out_region;
+
+ dev->base_addr = pci_ioaddr;
+ dev->irq = pci_irq_line;
+ dev->dma = 0;
+
+ printk("%s: %s\n", dev->name, cardinfo->name);
+ printk("%s: IO: %#4lx IRQ: %d\n",
+ dev->name, dev->base_addr, dev->irq);
+
+ tms_pci_read_eeprom(dev);
+
+ printk("%s: Ring Station Address: ", dev->name);
+ printk("%2.2x", dev->dev_addr[0]);
+ for (i = 1; i < 6; i++)
+ printk(":%2.2x", dev->dev_addr[i]);
+ printk("\n");
+
+ ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
+ if (ret) {
+ printk("%s: unable to get memory for dev->priv.\n", dev->name);
+ goto err_out_irq;
+ }
+
+ tp = dev->priv;
+ tp->setnselout = tms_pci_setnselout_pins;
+
+ tp->sifreadb = tms_pci_sifreadb;
+ tp->sifreadw = tms_pci_sifreadw;
+ tp->sifwriteb = tms_pci_sifwriteb;
+ tp->sifwritew = tms_pci_sifwritew;
+
+ memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1);
+
+ tp->tmspriv = cardinfo;
+
+ dev->open = tms380tr_open;
+ dev->stop = tms380tr_close;
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto err_out_tmsdev;
+
+ return 0;
+
+err_out_tmsdev:
+ pci_set_drvdata(pdev, NULL);
+ tmsdev_term(dev);
+err_out_irq:
+ free_irq(pdev->irq, dev);
+err_out_region:
+ release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
+err_out_trdev:
+ free_netdev(dev);
+ return ret;
+}
+
+/*
+ * Reads MAC address from adapter RAM, which should've read it from
+ * the onboard ROM.
+ *
+ * Calling this on a board that does not support it can be a very
+ * dangerous thing. The Madge board, for instance, will lock your
+ * machine hard when this is called. Luckily, its supported in a
+ * separate driver. --ASF
+ */
+static void tms_pci_read_eeprom(struct net_device *dev)
+{
+ int i;
+
+ /* Address: 0000:0000 */
+ tms_pci_sifwritew(dev, 0, SIFADX);
+ tms_pci_sifwritew(dev, 0, SIFADR);
+
+ /* Read six byte MAC address data */
+ dev->addr_len = 6;
+ for(i = 0; i < 6; i++)
+ dev->dev_addr[i] = tms_pci_sifreadw(dev, SIFINC) >> 8;
+}
+
+static unsigned short tms_pci_setnselout_pins(struct net_device *dev)
+{
+ unsigned short val = 0;
+ struct net_local *tp = dev->priv;
+ struct card_info *cardinfo = tp->tmspriv;
+
+ if(tp->DataRate == SPEED_4)
+ val |= cardinfo->nselout[0]; /* Set 4Mbps */
+ else
+ val |= cardinfo->nselout[1]; /* Set 16Mbps */
+ return val;
+}
+
+static void __devexit tms_pci_detach (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ BUG();
+ unregister_netdev(dev);
+ release_region(dev->base_addr, TMS_PCI_IO_EXTENT);
+ free_irq(dev->irq, dev);
+ tmsdev_term(dev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver tms_pci_driver = {
+ .name = "tmspci",
+ .id_table = tmspci_pci_tbl,
+ .probe = tms_pci_attach,
+ .remove = __devexit_p(tms_pci_detach),
+};
+
+static int __init tms_pci_init (void)
+{
+ return pci_register_driver(&tms_pci_driver);
+}
+
+static void __exit tms_pci_rmmod (void)
+{
+ pci_unregister_driver (&tms_pci_driver);
+}
+
+module_init(tms_pci_init);
+module_exit(tms_pci_rmmod);
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c tmspci.c"
+ * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/tokenring/ -c tmspci.c"
+ * c-set-style "K&R"
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
new file mode 100644
index 000000000000..5db694c4eb02
--- /dev/null
+++ b/drivers/net/tulip/21142.c
@@ -0,0 +1,245 @@
+/*
+ drivers/net/tulip/21142.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "tulip.h"
+
+
+static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
+u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
+ of available transceivers. */
+void t21142_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr12 = ioread32(ioaddr + CSR12);
+ int next_tick = 60*HZ;
+ int new_csr6 = 0;
+
+ if (tulip_debug > 2)
+ printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
+ dev->name, csr12, medianame[dev->if_port]);
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ if (tulip_check_duplex(dev) < 0) {
+ netif_carrier_off(dev);
+ next_tick = 3*HZ;
+ } else {
+ netif_carrier_on(dev);
+ next_tick = 60*HZ;
+ }
+ } else if (tp->nwayset) {
+ /* Don't screw up a negotiated session! */
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ } else if (tp->medialock) {
+ ;
+ } else if (dev->if_port == 3) {
+ if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
+ "trying NWay.\n", dev->name, csr12);
+ t21142_start_nway(dev);
+ next_tick = 3*HZ;
+ }
+ } else if ((csr12 & 0x7000) != 0x5000) {
+ /* Negotiation failed. Search media types. */
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
+ dev->name, csr12);
+ if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ new_csr6 = 0x82420000;
+ dev->if_port = 0;
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(0x0003FFFF, ioaddr + CSR14);
+ iowrite16(t21142_csr15[dev->if_port], ioaddr + CSR15);
+ iowrite32(t21142_csr13[dev->if_port], ioaddr + CSR13);
+ } else {
+ /* Select 100mbps port to check for link beat. */
+ new_csr6 = 0x83860000;
+ dev->if_port = 3;
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(0x0003FF7F, ioaddr + CSR14);
+ iowrite16(8, ioaddr + CSR15);
+ iowrite32(1, ioaddr + CSR13);
+ }
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ if (new_csr6 != (tp->csr6 & ~0x00D5)) {
+ tp->csr6 &= 0x00D5;
+ tp->csr6 |= new_csr6;
+ iowrite32(0x0301, ioaddr + CSR12);
+ tulip_restart_rxtx(tp);
+ }
+ next_tick = 3*HZ;
+ }
+
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
+
+void t21142_start_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr14 = ((tp->sym_advertise & 0x0780) << 9) |
+ ((tp->sym_advertise & 0x0020) << 1) | 0xffbf;
+
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
+ dev->name, csr14);
+ iowrite32(0x0001, ioaddr + CSR13);
+ udelay(100);
+ iowrite32(csr14, ioaddr + CSR14);
+ tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+ iowrite32(tp->csr6, ioaddr + CSR6);
+ if (tp->mtable && tp->mtable->csr15dir) {
+ iowrite32(tp->mtable->csr15dir, ioaddr + CSR15);
+ iowrite32(tp->mtable->csr15val, ioaddr + CSR15);
+ } else
+ iowrite16(0x0008, ioaddr + CSR15);
+ iowrite32(0x1301, ioaddr + CSR12); /* Trigger NWAY. */
+}
+
+
+
+void t21142_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr12 = ioread32(ioaddr + CSR12);
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
+ "%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability. */
+ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
+ int setup_done = 0;
+ int negotiated = tp->sym_advertise & (csr12 >> 16);
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ tp->nwayset = 0;
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+ tp->full_duplex = (tulip_media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
+
+ if (tulip_debug > 1) {
+ if (tp->nwayset)
+ printk(KERN_INFO "%s: Switching to %s based on link "
+ "negotiation %4.4x & %4.4x = %4.4x.\n",
+ dev->name, medianame[dev->if_port], tp->sym_advertise,
+ tp->lpar, negotiated);
+ else
+ printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
+ " link beat status %4.4x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ }
+
+ if (tp->mtable) {
+ int i;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == dev->if_port) {
+ int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65));
+ tp->cur_index = i;
+ tulip_select_media(dev, startup);
+ setup_done = 1;
+ break;
+ }
+ }
+ if ( ! setup_done) {
+ tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000) | (tp->csr6 & 0x20ff);
+ if (tp->full_duplex)
+ tp->csr6 |= 0x0200;
+ iowrite32(1, ioaddr + CSR13);
+ }
+#if 0 /* Restart shouldn't be needed. */
+ iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
+ dev->name, ioread32(ioaddr + CSR5));
+#endif
+ tulip_start_rxtx(tp);
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
+ dev->name, tp->csr6, ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
+ } else if ((tp->nwayset && (csr5 & 0x08000000)
+ && (dev->if_port == 3 || dev->if_port == 5)
+ && (csr12 & 2) == 2) ||
+ (tp->nway && (csr5 & (TPLnkFail)))) {
+ /* Link blew? Maybe restart NWay. */
+ del_timer_sync(&tp->timer);
+ t21142_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ t21142_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 5)
+ iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
+ } else if (dev->if_port == 0 || dev->if_port == 4) {
+ if ((csr12 & 4) == 0)
+ printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
+ dev->name);
+ } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ if (tulip_debug)
+ printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
+ dev->name);
+ dev->if_port = 0;
+ } else if (tp->nwayset) {
+ if (tulip_debug)
+ printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
+ dev->name, medianame[dev->if_port], tp->csr6);
+ } else { /* 100mbps link beat good. */
+ if (tulip_debug)
+ printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
+ dev->name);
+ dev->if_port = 3;
+ tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
+ iowrite32(0x0003FF7F, ioaddr + CSR14);
+ iowrite32(0x0301, ioaddr + CSR12);
+ tulip_restart_rxtx(tp);
+ }
+}
+
+
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
new file mode 100644
index 000000000000..e2cdaf876201
--- /dev/null
+++ b/drivers/net/tulip/Kconfig
@@ -0,0 +1,166 @@
+#
+# Tulip family network device configuration
+#
+
+menu "Tulip family network device support"
+ depends on NET_ETHERNET && (PCI || EISA || CARDBUS)
+
+config NET_TULIP
+ bool "\"Tulip\" family network device support"
+ help
+ This selects the "Tulip" family of EISA/PCI network cards.
+
+config DE2104X
+ tristate "Early DECchip Tulip (dc2104x) PCI support (EXPERIMENTAL)"
+ depends on NET_TULIP && PCI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ This driver is developed for the SMC EtherPower series Ethernet
+ cards and also works with cards based on the DECchip
+ 21040 (Tulip series) chips. Some LinkSys PCI cards are
+ of this type. (If your card is NOT SMC EtherPower 10/100 PCI
+ (smc9332dst), you can also try the driver for "Generic DECchip"
+ cards, below. However, most people with a network card of this type
+ will say Y here.) Do read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called de2104x.
+
+config TULIP
+ tristate "DECchip Tulip (dc2114x) PCI support"
+ depends on NET_TULIP && PCI
+ select CRC32
+ ---help---
+ This driver is developed for the SMC EtherPower series Ethernet
+ cards and also works with cards based on the DECchip
+ 21140 (Tulip series) chips. Some LinkSys PCI cards are
+ of this type. (If your card is NOT SMC EtherPower 10/100 PCI
+ (smc9332dst), you can also try the driver for "Generic DECchip"
+ cards, above. However, most people with a network card of this type
+ will say Y here.) Do read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called tulip.
+
+config TULIP_MWI
+ bool "New bus configuration (EXPERIMENTAL)"
+ depends on TULIP && EXPERIMENTAL
+ help
+ This configures your Tulip card specifically for the card and
+ system cache line size type you are using.
+
+ This is experimental code, not yet tested on many boards.
+
+ If unsure, say N.
+
+config TULIP_MMIO
+ bool "Use PCI shared mem for NIC registers"
+ depends on TULIP
+ help
+ Use PCI shared memory for the NIC registers, rather than going through
+ the Tulip's PIO (programmed I/O ports). Faster, but could produce
+ obscure bugs if your mainboard has memory controller timing issues.
+ If in doubt, say N.
+
+config TULIP_NAPI
+ bool "Use NAPI RX polling "
+ depends on TULIP
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
+
+config TULIP_NAPI_HW_MITIGATION
+ bool "Use Interrupt Mitigation "
+ depends on TULIP_NAPI
+ ---help---
+ Use HW to reduce RX interrupts. Not strict necessary since NAPI reduces
+ RX interrupts but itself. Although this reduces RX interrupts even at
+ low levels traffic at the cost of a small latency.
+
+ If in doubt, say Y.
+
+config DE4X5
+ tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
+ depends on NET_TULIP && (PCI || EISA)
+ select CRC32
+ ---help---
+ This is support for the DIGITAL series of PCI/EISA Ethernet cards.
+ These include the DE425, DE434, DE435, DE450 and DE500 models. If
+ you have a network card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. More specific
+ information is contained in
+ <file:Documentation/networking/de4x5.txt>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called de4x5.
+
+config WINBOND_840
+ tristate "Winbond W89c840 Ethernet support"
+ depends on NET_TULIP && PCI
+ select CRC32
+ select MII
+ help
+ This driver is for the Winbond W89c840 chip. It also works with
+ the TX9882 chip on the Compex RL100-ATX board.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/drivers.html>.
+
+config DM9102
+ tristate "Davicom DM910x/DM980x support"
+ depends on NET_TULIP && PCI
+ select CRC32
+ ---help---
+ This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
+ Davicom (<http://www.davicom.com.tw/>). If you have such a network
+ (Ethernet) card, say Y. Some information is contained in the file
+ <file:Documentation/networking/dmfe.txt>.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called dmfe.
+
+config PCMCIA_XIRCOM
+ tristate "Xircom CardBus support (new driver)"
+ depends on NET_TULIP && CARDBUS
+ ---help---
+ This driver is for the Digital "Tulip" Ethernet CardBus adapters.
+ It should work with most DEC 21*4*-based chips/ethercards, as well
+ as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
+ ASIX.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called xircom_cb. If unsure, say N.
+
+config PCMCIA_XIRTULIP
+ tristate "Xircom Tulip-like CardBus support (old driver)"
+ depends on NET_TULIP && CARDBUS && BROKEN_ON_SMP
+ select CRC32
+ ---help---
+ This driver is for the Digital "Tulip" Ethernet CardBus adapters.
+ It should work with most DEC 21*4*-based chips/ethercards, as well
+ as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
+ ASIX.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module will
+ be called xircom_tulip_cb. If unsure, say N.
+
+endmenu
+
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
new file mode 100644
index 000000000000..8bb9b4683979
--- /dev/null
+++ b/drivers/net/tulip/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the Linux "Tulip" family network device drivers.
+#
+
+obj-$(CONFIG_PCMCIA_XIRTULIP) += xircom_tulip_cb.o
+obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o
+obj-$(CONFIG_DM9102) += dmfe.o
+obj-$(CONFIG_WINBOND_840) += winbond-840.o
+obj-$(CONFIG_DE2104X) += de2104x.o
+obj-$(CONFIG_TULIP) += tulip.o
+obj-$(CONFIG_DE4X5) += de4x5.o
+
+# Declare multi-part drivers.
+
+tulip-objs := eeprom.o interrupt.o media.o \
+ timer.o tulip_core.o \
+ 21142.o pnic.o pnic2.o
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
new file mode 100644
index 000000000000..008e19210e66
--- /dev/null
+++ b/drivers/net/tulip/de2104x.c
@@ -0,0 +1,2187 @@
+/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
+/*
+ Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
+
+ Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
+ Written/copyright 1994-2001 by Donald Becker. [tulip.c]
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ See the file COPYING in this distribution for more information.
+
+ TODO, in rough priority order:
+ * Support forcing media type with a module parameter,
+ like dl2k.c/sundance.c
+ * Constants (module parms?) for Rx work limit
+ * Complete reset on PciErr
+ * Jumbo frames / dev->change_mtu
+ * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
+ * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
+ * Implement Tx software interrupt mitigation via
+ Tx descriptor bit
+
+ */
+
+#define DRV_NAME "de2104x"
+#define DRV_VERSION "0.7"
+#define DRV_RELDATE "Mar 17, 2004"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/compiler.h>
+#include <linux/rtnetlink.h>
+#include <linux/crc32.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = -1;
+module_param (debug, int, 0);
+MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
+ || defined(__sparc_) || defined(__ia64__) \
+ || defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+module_param (rx_copybreak, int, 0);
+MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
+
+#define PFX DRV_NAME ": "
+
+#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+#define DE_RX_RING_SIZE 64
+#define DE_TX_RING_SIZE 64
+#define DE_RING_BYTES \
+ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
+ (sizeof(struct de_desc) * DE_TX_RING_SIZE))
+#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
+#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
+#define TX_BUFFS_AVAIL(CP) \
+ (((CP)->tx_tail <= (CP)->tx_head) ? \
+ (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
+ (CP)->tx_tail - (CP)->tx_head - 1)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+#define RX_OFFSET 2
+
+#define DE_SETUP_SKB ((struct sk_buff *) 1)
+#define DE_DUMMY_SKB ((struct sk_buff *) 2)
+#define DE_SETUP_FRAME_WORDS 96
+#define DE_EEPROM_WORDS 256
+#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
+#define DE_MAX_MEDIA 5
+
+#define DE_MEDIA_TP_AUTO 0
+#define DE_MEDIA_BNC 1
+#define DE_MEDIA_AUI 2
+#define DE_MEDIA_TP 3
+#define DE_MEDIA_TP_FD 4
+#define DE_MEDIA_INVALID DE_MAX_MEDIA
+#define DE_MEDIA_FIRST 0
+#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
+#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
+
+#define DE_TIMER_LINK (60 * HZ)
+#define DE_TIMER_NO_LINK (5 * HZ)
+
+#define DE_NUM_REGS 16
+#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
+#define DE_REGS_VER 1
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+#define DE_UNALIGNED_16(a) (u16)(get_unaligned((u16 *)(a)))
+
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+ to support a pre-NWay full-duplex signaling mechanism using short frames.
+ No one knows what it should be, but if left at its default value some
+ 10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC 0x6969
+
+enum {
+ /* NIC registers */
+ BusMode = 0x00,
+ TxPoll = 0x08,
+ RxPoll = 0x10,
+ RxRingAddr = 0x18,
+ TxRingAddr = 0x20,
+ MacStatus = 0x28,
+ MacMode = 0x30,
+ IntrMask = 0x38,
+ RxMissed = 0x40,
+ ROMCmd = 0x48,
+ CSR11 = 0x58,
+ SIAStatus = 0x60,
+ CSR13 = 0x68,
+ CSR14 = 0x70,
+ CSR15 = 0x78,
+ PCIPM = 0x40,
+
+ /* BusMode bits */
+ CmdReset = (1 << 0),
+ CacheAlign16 = 0x00008000,
+ BurstLen4 = 0x00000400,
+
+ /* Rx/TxPoll bits */
+ NormalTxPoll = (1 << 0),
+ NormalRxPoll = (1 << 0),
+
+ /* Tx/Rx descriptor status bits */
+ DescOwn = (1 << 31),
+ RxError = (1 << 15),
+ RxErrLong = (1 << 7),
+ RxErrCRC = (1 << 1),
+ RxErrFIFO = (1 << 0),
+ RxErrRunt = (1 << 11),
+ RxErrFrame = (1 << 14),
+ RingEnd = (1 << 25),
+ FirstFrag = (1 << 29),
+ LastFrag = (1 << 30),
+ TxError = (1 << 15),
+ TxFIFOUnder = (1 << 1),
+ TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
+ TxMaxCol = (1 << 8),
+ TxOWC = (1 << 9),
+ TxJabber = (1 << 14),
+ SetupFrame = (1 << 27),
+ TxSwInt = (1 << 31),
+
+ /* MacStatus bits */
+ IntrOK = (1 << 16),
+ IntrErr = (1 << 15),
+ RxIntr = (1 << 6),
+ RxEmpty = (1 << 7),
+ TxIntr = (1 << 0),
+ TxEmpty = (1 << 2),
+ PciErr = (1 << 13),
+ TxState = (1 << 22) | (1 << 21) | (1 << 20),
+ RxState = (1 << 19) | (1 << 18) | (1 << 17),
+ LinkFail = (1 << 12),
+ LinkPass = (1 << 4),
+ RxStopped = (1 << 8),
+ TxStopped = (1 << 1),
+
+ /* MacMode bits */
+ TxEnable = (1 << 13),
+ RxEnable = (1 << 1),
+ RxTx = TxEnable | RxEnable,
+ FullDuplex = (1 << 9),
+ AcceptAllMulticast = (1 << 7),
+ AcceptAllPhys = (1 << 6),
+ BOCnt = (1 << 5),
+ MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
+ RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
+
+ /* ROMCmd bits */
+ EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
+ EE_CS = 0x01, /* EEPROM chip select. */
+ EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
+ EE_WRITE_0 = 0x01,
+ EE_WRITE_1 = 0x05,
+ EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
+ EE_ENB = (0x4800 | EE_CS),
+
+ /* The EEPROM commands include the alway-set leading bit. */
+ EE_READ_CMD = 6,
+
+ /* RxMissed bits */
+ RxMissedOver = (1 << 16),
+ RxMissedMask = 0xffff,
+
+ /* SROM-related bits */
+ SROMC0InfoLeaf = 27,
+ MediaBlockMask = 0x3f,
+ MediaCustomCSRs = (1 << 6),
+
+ /* PCIPM bits */
+ PM_Sleep = (1 << 31),
+ PM_Snooze = (1 << 30),
+ PM_Mask = PM_Sleep | PM_Snooze,
+
+ /* SIAStatus bits */
+ NWayState = (1 << 14) | (1 << 13) | (1 << 12),
+ NWayRestart = (1 << 12),
+ NonselPortActive = (1 << 9),
+ LinkFailStatus = (1 << 2),
+ NetCxnErr = (1 << 1),
+};
+
+static const u32 de_intr_mask =
+ IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
+ LinkPass | LinkFail | PciErr;
+
+/*
+ * Set the programmable burst length to 4 longwords for all:
+ * DMA errors result without these values. Cache align 16 long.
+ */
+static const u32 de_bus_mode = CacheAlign16 | BurstLen4;
+
+struct de_srom_media_block {
+ u8 opts;
+ u16 csr13;
+ u16 csr14;
+ u16 csr15;
+} __attribute__((packed));
+
+struct de_srom_info_leaf {
+ u16 default_media;
+ u8 n_blocks;
+ u8 unused;
+} __attribute__((packed));
+
+struct de_desc {
+ u32 opts1;
+ u32 opts2;
+ u32 addr1;
+ u32 addr2;
+};
+
+struct media_info {
+ u16 type; /* DE_MEDIA_xxx */
+ u16 csr13;
+ u16 csr14;
+ u16 csr15;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+struct de_private {
+ unsigned tx_head;
+ unsigned tx_tail;
+ unsigned rx_tail;
+
+ void __iomem *regs;
+ struct net_device *dev;
+ spinlock_t lock;
+
+ struct de_desc *rx_ring;
+ struct de_desc *tx_ring;
+ struct ring_info tx_skb[DE_TX_RING_SIZE];
+ struct ring_info rx_skb[DE_RX_RING_SIZE];
+ unsigned rx_buf_sz;
+ dma_addr_t ring_dma;
+
+ u32 msg_enable;
+
+ struct net_device_stats net_stats;
+
+ struct pci_dev *pdev;
+
+ u16 setup_frame[DE_SETUP_FRAME_WORDS];
+
+ u32 media_type;
+ u32 media_supported;
+ u32 media_advertise;
+ struct media_info media[DE_MAX_MEDIA];
+ struct timer_list media_timer;
+
+ u8 *ee_data;
+ unsigned board_idx;
+ unsigned de21040 : 1;
+ unsigned media_lock : 1;
+};
+
+
+static void de_set_rx_mode (struct net_device *dev);
+static void de_tx (struct de_private *de);
+static void de_clean_rings (struct de_private *de);
+static void de_media_interrupt (struct de_private *de, u32 status);
+static void de21040_media_timer (unsigned long data);
+static void de21041_media_timer (unsigned long data);
+static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
+
+
+static struct pci_device_id de_pci_tbl[] = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, de_pci_tbl);
+
+static const char * const media_name[DE_MAX_MEDIA] = {
+ "10baseT auto",
+ "BNC",
+ "AUI",
+ "10baseT-HD",
+ "10baseT-FD"
+};
+
+/* 21040 transceiver register settings:
+ * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
+static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
+static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
+static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
+
+/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
+static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+#define dr32(reg) readl(de->regs + (reg))
+#define dw32(reg,val) writel((val), de->regs + (reg))
+
+
+static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
+ u32 status, u32 len)
+{
+ if (netif_msg_rx_err (de))
+ printk (KERN_DEBUG
+ "%s: rx err, slot %d status 0x%x len %d\n",
+ de->dev->name, rx_tail, status, len);
+
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (netif_msg_rx_err(de))
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ de->dev->name, status);
+ de->net_stats.rx_length_errors++;
+ }
+ } else if (status & RxError) {
+ /* There was a fatal error. */
+ de->net_stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) de->net_stats.rx_length_errors++;
+ if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
+ if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
+ }
+}
+
+static void de_rx (struct de_private *de)
+{
+ unsigned rx_tail = de->rx_tail;
+ unsigned rx_work = DE_RX_RING_SIZE;
+ unsigned drop = 0;
+ int rc;
+
+ while (rx_work--) {
+ u32 status, len;
+ dma_addr_t mapping;
+ struct sk_buff *skb, *copy_skb;
+ unsigned copying_skb, buflen;
+
+ skb = de->rx_skb[rx_tail].skb;
+ if (!skb)
+ BUG();
+ rmb();
+ status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
+ if (status & DescOwn)
+ break;
+
+ len = ((status >> 16) & 0x7ff) - 4;
+ mapping = de->rx_skb[rx_tail].mapping;
+
+ if (unlikely(drop)) {
+ de->net_stats.rx_dropped++;
+ goto rx_next;
+ }
+
+ if (unlikely((status & 0x38008300) != 0x0300)) {
+ de_rx_err_acct(de, rx_tail, status, len);
+ goto rx_next;
+ }
+
+ copying_skb = (len <= rx_copybreak);
+
+ if (unlikely(netif_msg_rx_status(de)))
+ printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
+ de->dev->name, rx_tail, status, len,
+ copying_skb);
+
+ buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
+ copy_skb = dev_alloc_skb (buflen);
+ if (unlikely(!copy_skb)) {
+ de->net_stats.rx_dropped++;
+ drop = 1;
+ rx_work = 100;
+ goto rx_next;
+ }
+ copy_skb->dev = de->dev;
+
+ if (!copying_skb) {
+ pci_unmap_single(de->pdev, mapping,
+ buflen, PCI_DMA_FROMDEVICE);
+ skb_put(skb, len);
+
+ mapping =
+ de->rx_skb[rx_tail].mapping =
+ pci_map_single(de->pdev, copy_skb->tail,
+ buflen, PCI_DMA_FROMDEVICE);
+ de->rx_skb[rx_tail].skb = copy_skb;
+ } else {
+ pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ skb_reserve(copy_skb, RX_OFFSET);
+ memcpy(skb_put(copy_skb, len), skb->tail, len);
+
+ pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ skb->protocol = eth_type_trans (skb, de->dev);
+
+ de->net_stats.rx_packets++;
+ de->net_stats.rx_bytes += skb->len;
+ de->dev->last_rx = jiffies;
+ rc = netif_rx (skb);
+ if (rc == NET_RX_DROP)
+ drop = 1;
+
+rx_next:
+ de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
+ if (rx_tail == (DE_RX_RING_SIZE - 1))
+ de->rx_ring[rx_tail].opts2 =
+ cpu_to_le32(RingEnd | de->rx_buf_sz);
+ else
+ de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
+ de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
+ rx_tail = NEXT_RX(rx_tail);
+ }
+
+ if (!rx_work)
+ printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
+
+ de->rx_tail = rx_tail;
+}
+
+static irqreturn_t de_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct de_private *de = dev->priv;
+ u32 status;
+
+ status = dr32(MacStatus);
+ if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
+ return IRQ_NONE;
+
+ if (netif_msg_intr(de))
+ printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
+ dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
+
+ dw32(MacStatus, status);
+
+ if (status & (RxIntr | RxEmpty)) {
+ de_rx(de);
+ if (status & RxEmpty)
+ dw32(RxPoll, NormalRxPoll);
+ }
+
+ spin_lock(&de->lock);
+
+ if (status & (TxIntr | TxEmpty))
+ de_tx(de);
+
+ if (status & (LinkPass | LinkFail))
+ de_media_interrupt(de, status);
+
+ spin_unlock(&de->lock);
+
+ if (status & PciErr) {
+ u16 pci_status;
+
+ pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
+ printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
+ dev->name, status, pci_status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void de_tx (struct de_private *de)
+{
+ unsigned tx_head = de->tx_head;
+ unsigned tx_tail = de->tx_tail;
+
+ while (tx_tail != tx_head) {
+ struct sk_buff *skb;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
+ if (status & DescOwn)
+ break;
+
+ skb = de->tx_skb[tx_tail].skb;
+ if (!skb)
+ BUG();
+ if (unlikely(skb == DE_DUMMY_SKB))
+ goto next;
+
+ if (unlikely(skb == DE_SETUP_SKB)) {
+ pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
+ sizeof(de->setup_frame), PCI_DMA_TODEVICE);
+ goto next;
+ }
+
+ pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+
+ if (status & LastFrag) {
+ if (status & TxError) {
+ if (netif_msg_tx_err(de))
+ printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
+ de->dev->name, status);
+ de->net_stats.tx_errors++;
+ if (status & TxOWC)
+ de->net_stats.tx_window_errors++;
+ if (status & TxMaxCol)
+ de->net_stats.tx_aborted_errors++;
+ if (status & TxLinkFail)
+ de->net_stats.tx_carrier_errors++;
+ if (status & TxFIFOUnder)
+ de->net_stats.tx_fifo_errors++;
+ } else {
+ de->net_stats.tx_packets++;
+ de->net_stats.tx_bytes += skb->len;
+ if (netif_msg_tx_done(de))
+ printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
+ }
+ dev_kfree_skb_irq(skb);
+ }
+
+next:
+ de->tx_skb[tx_tail].skb = NULL;
+
+ tx_tail = NEXT_TX(tx_tail);
+ }
+
+ de->tx_tail = tx_tail;
+
+ if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
+ netif_wake_queue(de->dev);
+}
+
+static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ unsigned int entry, tx_free;
+ u32 mapping, len, flags = FirstFrag | LastFrag;
+ struct de_desc *txd;
+
+ spin_lock_irq(&de->lock);
+
+ tx_free = TX_BUFFS_AVAIL(de);
+ if (tx_free == 0) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&de->lock);
+ return 1;
+ }
+ tx_free--;
+
+ entry = de->tx_head;
+
+ txd = &de->tx_ring[entry];
+
+ len = skb->len;
+ mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (entry == (DE_TX_RING_SIZE - 1))
+ flags |= RingEnd;
+ if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
+ flags |= TxSwInt;
+ flags |= len;
+ txd->opts2 = cpu_to_le32(flags);
+ txd->addr1 = cpu_to_le32(mapping);
+
+ de->tx_skb[entry].skb = skb;
+ de->tx_skb[entry].mapping = mapping;
+ wmb();
+
+ txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+
+ de->tx_head = NEXT_TX(entry);
+ if (netif_msg_tx_queued(de))
+ printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+ dev->name, entry, skb->len);
+
+ if (tx_free == 0)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&de->lock);
+
+ /* Trigger an immediate transmit demand. */
+ dw32(TxPoll, NormalTxPoll);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling de->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+#undef set_bit_le
+#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
+
+static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ u16 hash_table[32];
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit_le(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+
+ set_bit_le(index, hash_table);
+
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &de->setup_frame[13*6];
+ }
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15-i)*12);
+ setup_frm = &de->setup_frame[15*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+
+static void __de_set_rx_mode (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ u32 macmode;
+ unsigned int entry;
+ u32 mapping;
+ struct de_desc *txd;
+ struct de_desc *dummy_txd = NULL;
+
+ macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ macmode |= AcceptAllMulticast | AcceptAllPhys;
+ goto out;
+ }
+
+ if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ macmode |= AcceptAllMulticast;
+ goto out;
+ }
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (dev->mc_count > 14) /* Must use a multicast hash table. */
+ build_setup_frame_hash (de->setup_frame, dev);
+ else
+ build_setup_frame_perfect (de->setup_frame, dev);
+
+ /*
+ * Now add this frame to the Tx list.
+ */
+
+ entry = de->tx_head;
+
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ if (entry != 0) {
+ de->tx_skb[entry].skb = DE_DUMMY_SKB;
+
+ dummy_txd = &de->tx_ring[entry];
+ dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
+ cpu_to_le32(RingEnd) : 0;
+ dummy_txd->addr1 = 0;
+
+ /* Must set DescOwned later to avoid race with chip */
+
+ entry = NEXT_TX(entry);
+ }
+
+ de->tx_skb[entry].skb = DE_SETUP_SKB;
+ de->tx_skb[entry].mapping = mapping =
+ pci_map_single (de->pdev, de->setup_frame,
+ sizeof (de->setup_frame), PCI_DMA_TODEVICE);
+
+ /* Put the setup frame on the Tx list. */
+ txd = &de->tx_ring[entry];
+ if (entry == (DE_TX_RING_SIZE - 1))
+ txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
+ else
+ txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
+ txd->addr1 = cpu_to_le32(mapping);
+ wmb();
+
+ txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+
+ if (dummy_txd) {
+ dummy_txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+ }
+
+ de->tx_head = NEXT_TX(entry);
+
+ if (TX_BUFFS_AVAIL(de) < 0)
+ BUG();
+ if (TX_BUFFS_AVAIL(de) == 0)
+ netif_stop_queue(dev);
+
+ /* Trigger an immediate transmit demand. */
+ dw32(TxPoll, NormalTxPoll);
+
+out:
+ if (macmode != dr32(MacMode))
+ dw32(MacMode, macmode);
+}
+
+static void de_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct de_private *de = dev->priv;
+
+ spin_lock_irqsave (&de->lock, flags);
+ __de_set_rx_mode(dev);
+ spin_unlock_irqrestore (&de->lock, flags);
+}
+
+static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
+{
+ if (unlikely(rx_missed & RxMissedOver))
+ de->net_stats.rx_missed_errors += RxMissedMask;
+ else
+ de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
+}
+
+static void __de_get_stats(struct de_private *de)
+{
+ u32 tmp = dr32(RxMissed); /* self-clearing */
+
+ de_rx_missed(de, tmp);
+}
+
+static struct net_device_stats *de_get_stats(struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&de->lock);
+ if (netif_running(dev) && netif_device_present(dev))
+ __de_get_stats(de);
+ spin_unlock_irq(&de->lock);
+
+ return &de->net_stats;
+}
+
+static inline int de_is_running (struct de_private *de)
+{
+ return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
+}
+
+static void de_stop_rxtx (struct de_private *de)
+{
+ u32 macmode;
+ unsigned int work = 1000;
+
+ macmode = dr32(MacMode);
+ if (macmode & RxTx) {
+ dw32(MacMode, macmode & ~RxTx);
+ dr32(MacMode);
+ }
+
+ while (--work > 0) {
+ if (!de_is_running(de))
+ return;
+ cpu_relax();
+ }
+
+ printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
+}
+
+static inline void de_start_rxtx (struct de_private *de)
+{
+ u32 macmode;
+
+ macmode = dr32(MacMode);
+ if ((macmode & RxTx) != RxTx) {
+ dw32(MacMode, macmode | RxTx);
+ dr32(MacMode);
+ }
+}
+
+static void de_stop_hw (struct de_private *de)
+{
+
+ udelay(5);
+ dw32(IntrMask, 0);
+
+ de_stop_rxtx(de);
+
+ dw32(MacStatus, dr32(MacStatus));
+
+ udelay(10);
+
+ de->rx_tail = 0;
+ de->tx_head = de->tx_tail = 0;
+}
+
+static void de_link_up(struct de_private *de)
+{
+ if (!netif_carrier_ok(de->dev)) {
+ netif_carrier_on(de->dev);
+ if (netif_msg_link(de))
+ printk(KERN_INFO "%s: link up, media %s\n",
+ de->dev->name, media_name[de->media_type]);
+ }
+}
+
+static void de_link_down(struct de_private *de)
+{
+ if (netif_carrier_ok(de->dev)) {
+ netif_carrier_off(de->dev);
+ if (netif_msg_link(de))
+ printk(KERN_INFO "%s: link down\n", de->dev->name);
+ }
+}
+
+static void de_set_media (struct de_private *de)
+{
+ unsigned media = de->media_type;
+ u32 macmode = dr32(MacMode);
+
+ if (de_is_running(de))
+ BUG();
+
+ if (de->de21040)
+ dw32(CSR11, FULL_DUPLEX_MAGIC);
+ dw32(CSR13, 0); /* Reset phy */
+ dw32(CSR14, de->media[media].csr14);
+ dw32(CSR15, de->media[media].csr15);
+ dw32(CSR13, de->media[media].csr13);
+
+ /* must delay 10ms before writing to other registers,
+ * especially CSR6
+ */
+ mdelay(10);
+
+ if (media == DE_MEDIA_TP_FD)
+ macmode |= FullDuplex;
+ else
+ macmode &= ~FullDuplex;
+
+ if (netif_msg_link(de)) {
+ printk(KERN_INFO "%s: set link %s\n"
+ KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
+ KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ de->dev->name, media_name[media],
+ de->dev->name, dr32(MacMode), dr32(SIAStatus),
+ dr32(CSR13), dr32(CSR14), dr32(CSR15),
+ de->dev->name, macmode, de->media[media].csr13,
+ de->media[media].csr14, de->media[media].csr15);
+ }
+ if (macmode != dr32(MacMode))
+ dw32(MacMode, macmode);
+}
+
+static void de_next_media (struct de_private *de, u32 *media,
+ unsigned int n_media)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_media; i++) {
+ if (de_ok_to_advertise(de, media[i])) {
+ de->media_type = media[i];
+ return;
+ }
+ }
+}
+
+static void de21040_media_timer (unsigned long data)
+{
+ struct de_private *de = (struct de_private *) data;
+ struct net_device *dev = de->dev;
+ u32 status = dr32(SIAStatus);
+ unsigned int carrier;
+ unsigned long flags;
+
+ carrier = (status & NetCxnErr) ? 0 : 1;
+
+ if (carrier) {
+ if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
+ goto no_link_yet;
+
+ de->media_timer.expires = jiffies + DE_TIMER_LINK;
+ add_timer(&de->media_timer);
+ if (!netif_carrier_ok(dev))
+ de_link_up(de);
+ else
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: %s link ok, status %x\n",
+ dev->name, media_name[de->media_type],
+ status);
+ return;
+ }
+
+ de_link_down(de);
+
+ if (de->media_lock)
+ return;
+
+ if (de->media_type == DE_MEDIA_AUI) {
+ u32 next_state = DE_MEDIA_TP;
+ de_next_media(de, &next_state, 1);
+ } else {
+ u32 next_state = DE_MEDIA_AUI;
+ de_next_media(de, &next_state, 1);
+ }
+
+ spin_lock_irqsave(&de->lock, flags);
+ de_stop_rxtx(de);
+ spin_unlock_irqrestore(&de->lock, flags);
+ de_set_media(de);
+ de_start_rxtx(de);
+
+no_link_yet:
+ de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
+ add_timer(&de->media_timer);
+
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
+ dev->name, media_name[de->media_type], status);
+}
+
+static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
+{
+ switch (new_media) {
+ case DE_MEDIA_TP_AUTO:
+ if (!(de->media_advertise & ADVERTISED_Autoneg))
+ return 0;
+ if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
+ return 0;
+ break;
+ case DE_MEDIA_BNC:
+ if (!(de->media_advertise & ADVERTISED_BNC))
+ return 0;
+ break;
+ case DE_MEDIA_AUI:
+ if (!(de->media_advertise & ADVERTISED_AUI))
+ return 0;
+ break;
+ case DE_MEDIA_TP:
+ if (!(de->media_advertise & ADVERTISED_10baseT_Half))
+ return 0;
+ break;
+ case DE_MEDIA_TP_FD:
+ if (!(de->media_advertise & ADVERTISED_10baseT_Full))
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+
+static void de21041_media_timer (unsigned long data)
+{
+ struct de_private *de = (struct de_private *) data;
+ struct net_device *dev = de->dev;
+ u32 status = dr32(SIAStatus);
+ unsigned int carrier;
+ unsigned long flags;
+
+ carrier = (status & NetCxnErr) ? 0 : 1;
+
+ if (carrier) {
+ if ((de->media_type == DE_MEDIA_TP_AUTO ||
+ de->media_type == DE_MEDIA_TP ||
+ de->media_type == DE_MEDIA_TP_FD) &&
+ (status & LinkFailStatus))
+ goto no_link_yet;
+
+ de->media_timer.expires = jiffies + DE_TIMER_LINK;
+ add_timer(&de->media_timer);
+ if (!netif_carrier_ok(dev))
+ de_link_up(de);
+ else
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
+ dev->name, media_name[de->media_type],
+ dr32(MacMode), status);
+ return;
+ }
+
+ de_link_down(de);
+
+ /* if media type locked, don't switch media */
+ if (de->media_lock)
+ goto set_media;
+
+ /* if activity detected, use that as hint for new media type */
+ if (status & NonselPortActive) {
+ unsigned int have_media = 1;
+
+ /* if AUI/BNC selected, then activity is on TP port */
+ if (de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC) {
+ if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
+ de->media_type = DE_MEDIA_TP_AUTO;
+ else
+ have_media = 0;
+ }
+
+ /* TP selected. If there is only TP and BNC, then it's BNC */
+ else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
+ de_ok_to_advertise(de, DE_MEDIA_BNC))
+ de->media_type = DE_MEDIA_BNC;
+
+ /* TP selected. If there is only TP and AUI, then it's AUI */
+ else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
+ de_ok_to_advertise(de, DE_MEDIA_AUI))
+ de->media_type = DE_MEDIA_AUI;
+
+ /* otherwise, ignore the hint */
+ else
+ have_media = 0;
+
+ if (have_media)
+ goto set_media;
+ }
+
+ /*
+ * Absent or ambiguous activity hint, move to next advertised
+ * media state. If de->media_type is left unchanged, this
+ * simply resets the PHY and reloads the current media settings.
+ */
+ if (de->media_type == DE_MEDIA_AUI) {
+ u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
+ de_next_media(de, next_states, ARRAY_SIZE(next_states));
+ } else if (de->media_type == DE_MEDIA_BNC) {
+ u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
+ de_next_media(de, next_states, ARRAY_SIZE(next_states));
+ } else {
+ u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
+ de_next_media(de, next_states, ARRAY_SIZE(next_states));
+ }
+
+set_media:
+ spin_lock_irqsave(&de->lock, flags);
+ de_stop_rxtx(de);
+ spin_unlock_irqrestore(&de->lock, flags);
+ de_set_media(de);
+ de_start_rxtx(de);
+
+no_link_yet:
+ de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
+ add_timer(&de->media_timer);
+
+ if (netif_msg_timer(de))
+ printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
+ dev->name, media_name[de->media_type], status);
+}
+
+static void de_media_interrupt (struct de_private *de, u32 status)
+{
+ if (status & LinkPass) {
+ de_link_up(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
+ return;
+ }
+
+ if (!(status & LinkFail))
+ BUG();
+
+ if (netif_carrier_ok(de->dev)) {
+ de_link_down(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
+ }
+}
+
+static int de_reset_mac (struct de_private *de)
+{
+ u32 status, tmp;
+
+ /*
+ * Reset MAC. de4x5.c and tulip.c examined for "advice"
+ * in this area.
+ */
+
+ if (dr32(BusMode) == 0xffffffff)
+ return -EBUSY;
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ dw32 (BusMode, CmdReset);
+ mdelay (1);
+
+ dw32 (BusMode, de_bus_mode);
+ mdelay (1);
+
+ for (tmp = 0; tmp < 5; tmp++) {
+ dr32 (BusMode);
+ mdelay (1);
+ }
+
+ mdelay (1);
+
+ status = dr32(MacStatus);
+ if (status & (RxState | TxState))
+ return -EBUSY;
+ if (status == 0xffffffff)
+ return -ENODEV;
+ return 0;
+}
+
+static void de_adapter_wake (struct de_private *de)
+{
+ u32 pmctl;
+
+ if (de->de21040)
+ return;
+
+ pci_read_config_dword(de->pdev, PCIPM, &pmctl);
+ if (pmctl & PM_Mask) {
+ pmctl &= ~PM_Mask;
+ pci_write_config_dword(de->pdev, PCIPM, pmctl);
+
+ /* de4x5.c delays, so we do too */
+ msleep(10);
+ }
+}
+
+static void de_adapter_sleep (struct de_private *de)
+{
+ u32 pmctl;
+
+ if (de->de21040)
+ return;
+
+ pci_read_config_dword(de->pdev, PCIPM, &pmctl);
+ pmctl |= PM_Sleep;
+ pci_write_config_dword(de->pdev, PCIPM, pmctl);
+}
+
+static int de_init_hw (struct de_private *de)
+{
+ struct net_device *dev = de->dev;
+ u32 macmode;
+ int rc;
+
+ de_adapter_wake(de);
+
+ macmode = dr32(MacMode) & ~MacModeClear;
+
+ rc = de_reset_mac(de);
+ if (rc)
+ return rc;
+
+ de_set_media(de); /* reset phy */
+
+ dw32(RxRingAddr, de->ring_dma);
+ dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
+
+ dw32(MacMode, RxTx | macmode);
+
+ dr32(RxMissed); /* self-clearing */
+
+ dw32(IntrMask, de_intr_mask);
+
+ de_set_rx_mode(dev);
+
+ return 0;
+}
+
+static int de_refill_rx (struct de_private *de)
+{
+ unsigned i;
+
+ for (i = 0; i < DE_RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(de->rx_buf_sz);
+ if (!skb)
+ goto err_out;
+
+ skb->dev = de->dev;
+
+ de->rx_skb[i].mapping = pci_map_single(de->pdev,
+ skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ de->rx_skb[i].skb = skb;
+
+ de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
+ if (i == (DE_RX_RING_SIZE - 1))
+ de->rx_ring[i].opts2 =
+ cpu_to_le32(RingEnd | de->rx_buf_sz);
+ else
+ de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
+ de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
+ de->rx_ring[i].addr2 = 0;
+ }
+
+ return 0;
+
+err_out:
+ de_clean_rings(de);
+ return -ENOMEM;
+}
+
+static int de_init_rings (struct de_private *de)
+{
+ memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
+ de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+
+ de->rx_tail = 0;
+ de->tx_head = de->tx_tail = 0;
+
+ return de_refill_rx (de);
+}
+
+static int de_alloc_rings (struct de_private *de)
+{
+ de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
+ if (!de->rx_ring)
+ return -ENOMEM;
+ de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
+ return de_init_rings(de);
+}
+
+static void de_clean_rings (struct de_private *de)
+{
+ unsigned i;
+
+ memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
+ de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+ wmb();
+ memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
+ de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+ wmb();
+
+ for (i = 0; i < DE_RX_RING_SIZE; i++) {
+ if (de->rx_skb[i].skb) {
+ pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
+ de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(de->rx_skb[i].skb);
+ }
+ }
+
+ for (i = 0; i < DE_TX_RING_SIZE; i++) {
+ struct sk_buff *skb = de->tx_skb[i].skb;
+ if ((skb) && (skb != DE_DUMMY_SKB)) {
+ if (skb != DE_SETUP_SKB) {
+ dev_kfree_skb(skb);
+ de->net_stats.tx_dropped++;
+ pci_unmap_single(de->pdev,
+ de->tx_skb[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ } else {
+ pci_unmap_single(de->pdev,
+ de->tx_skb[i].mapping,
+ sizeof(de->setup_frame),
+ PCI_DMA_TODEVICE);
+ }
+ }
+ }
+
+ memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
+ memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
+}
+
+static void de_free_rings (struct de_private *de)
+{
+ de_clean_rings(de);
+ pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
+ de->rx_ring = NULL;
+ de->tx_ring = NULL;
+}
+
+static int de_open (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ if (netif_msg_ifup(de))
+ printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
+
+ de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ rc = de_alloc_rings(de);
+ if (rc) {
+ printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
+ dev->name, rc);
+ return rc;
+ }
+
+ rc = de_init_hw(de);
+ if (rc) {
+ printk(KERN_ERR "%s: h/w init failure, err=%d\n",
+ dev->name, rc);
+ goto err_out_free;
+ }
+
+ rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
+ if (rc) {
+ printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
+ dev->name, dev->irq, rc);
+ goto err_out_hw;
+ }
+
+ netif_start_queue(dev);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
+
+ return 0;
+
+err_out_hw:
+ spin_lock_irqsave(&de->lock, flags);
+ de_stop_hw(de);
+ spin_unlock_irqrestore(&de->lock, flags);
+
+err_out_free:
+ de_free_rings(de);
+ return rc;
+}
+
+static int de_close (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ unsigned long flags;
+
+ if (netif_msg_ifdown(de))
+ printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
+
+ del_timer_sync(&de->media_timer);
+
+ spin_lock_irqsave(&de->lock, flags);
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ spin_unlock_irqrestore(&de->lock, flags);
+
+ free_irq(dev->irq, dev);
+
+ de_free_rings(de);
+ de_adapter_sleep(de);
+ pci_disable_device(de->pdev);
+ return 0;
+}
+
+static void de_tx_timeout (struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+
+ printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
+ dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
+ de->rx_tail, de->tx_head, de->tx_tail);
+
+ del_timer_sync(&de->media_timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&de->lock);
+
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&de->lock);
+ enable_irq(dev->irq);
+
+ /* Update the error counts. */
+ __de_get_stats(de);
+
+ synchronize_irq(dev->irq);
+ de_clean_rings(de);
+
+ de_init_hw(de);
+
+ netif_wake_queue(dev);
+}
+
+static void __de_get_regs(struct de_private *de, u8 *buf)
+{
+ int i;
+ u32 *rbuf = (u32 *)buf;
+
+ /* read all CSRs */
+ for (i = 0; i < DE_NUM_REGS; i++)
+ rbuf[i] = dr32(i * 8);
+
+ /* handle self-clearing RxMissed counter, CSR8 */
+ de_rx_missed(de, rbuf[8]);
+}
+
+static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+{
+ ecmd->supported = de->media_supported;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->phy_address = 0;
+ ecmd->advertising = de->media_advertise;
+
+ switch (de->media_type) {
+ case DE_MEDIA_AUI:
+ ecmd->port = PORT_AUI;
+ ecmd->speed = 5;
+ break;
+ case DE_MEDIA_BNC:
+ ecmd->port = PORT_BNC;
+ ecmd->speed = 2;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ ecmd->speed = SPEED_10;
+ break;
+ }
+
+ if (dr32(MacMode) & FullDuplex)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+
+ if (de->media_lock)
+ ecmd->autoneg = AUTONEG_DISABLE;
+ else
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+{
+ u32 new_media;
+ unsigned int media_lock;
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
+ return -EINVAL;
+ if (de->de21040 && ecmd->speed == 2)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
+ return -EINVAL;
+ if (de->de21040 && ecmd->port == PORT_BNC)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ if (ecmd->advertising & ~de->media_supported)
+ return -EINVAL;
+ if (ecmd->autoneg == AUTONEG_ENABLE &&
+ (!(ecmd->advertising & ADVERTISED_Autoneg)))
+ return -EINVAL;
+
+ switch (ecmd->port) {
+ case PORT_AUI:
+ new_media = DE_MEDIA_AUI;
+ if (!(ecmd->advertising & ADVERTISED_AUI))
+ return -EINVAL;
+ break;
+ case PORT_BNC:
+ new_media = DE_MEDIA_BNC;
+ if (!(ecmd->advertising & ADVERTISED_BNC))
+ return -EINVAL;
+ break;
+ default:
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ new_media = DE_MEDIA_TP_AUTO;
+ else if (ecmd->duplex == DUPLEX_FULL)
+ new_media = DE_MEDIA_TP_FD;
+ else
+ new_media = DE_MEDIA_TP;
+ if (!(ecmd->advertising & ADVERTISED_TP))
+ return -EINVAL;
+ if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
+ return -EINVAL;
+ break;
+ }
+
+ media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
+
+ if ((new_media == de->media_type) &&
+ (media_lock == de->media_lock) &&
+ (ecmd->advertising == de->media_advertise))
+ return 0; /* nothing to change */
+
+ de_link_down(de);
+ de_stop_rxtx(de);
+
+ de->media_type = new_media;
+ de->media_lock = media_lock;
+ de->media_advertise = ecmd->advertising;
+ de_set_media(de);
+
+ return 0;
+}
+
+static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
+{
+ struct de_private *de = dev->priv;
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(de->pdev));
+ info->eedump_len = DE_EEPROM_SIZE;
+}
+
+static int de_get_regs_len(struct net_device *dev)
+{
+ return DE_REGS_SIZE;
+}
+
+static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct de_private *de = dev->priv;
+ int rc;
+
+ spin_lock_irq(&de->lock);
+ rc = __de_get_settings(de, ecmd);
+ spin_unlock_irq(&de->lock);
+
+ return rc;
+}
+
+static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct de_private *de = dev->priv;
+ int rc;
+
+ spin_lock_irq(&de->lock);
+ rc = __de_set_settings(de, ecmd);
+ spin_unlock_irq(&de->lock);
+
+ return rc;
+}
+
+static u32 de_get_msglevel(struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+
+ return de->msg_enable;
+}
+
+static void de_set_msglevel(struct net_device *dev, u32 msglvl)
+{
+ struct de_private *de = dev->priv;
+
+ de->msg_enable = msglvl;
+}
+
+static int de_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct de_private *de = dev->priv;
+
+ if (!de->ee_data)
+ return -EOPNOTSUPP;
+ if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
+ (eeprom->len != DE_EEPROM_SIZE))
+ return -EINVAL;
+ memcpy(data, de->ee_data, eeprom->len);
+
+ return 0;
+}
+
+static int de_nway_reset(struct net_device *dev)
+{
+ struct de_private *de = dev->priv;
+ u32 status;
+
+ if (de->media_type != DE_MEDIA_TP_AUTO)
+ return -EINVAL;
+ if (netif_carrier_ok(de->dev))
+ de_link_down(de);
+
+ status = dr32(SIAStatus);
+ dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
+ if (netif_msg_link(de))
+ printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
+ de->dev->name, status, dr32(SIAStatus));
+ return 0;
+}
+
+static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *data)
+{
+ struct de_private *de = dev->priv;
+
+ regs->version = (DE_REGS_VER << 2) | de->de21040;
+
+ spin_lock_irq(&de->lock);
+ __de_get_regs(de, data);
+ spin_unlock_irq(&de->lock);
+}
+
+static struct ethtool_ops de_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .get_drvinfo = de_get_drvinfo,
+ .get_regs_len = de_get_regs_len,
+ .get_settings = de_get_settings,
+ .set_settings = de_set_settings,
+ .get_msglevel = de_get_msglevel,
+ .set_msglevel = de_set_msglevel,
+ .get_eeprom = de_get_eeprom,
+ .nway_reset = de_nway_reset,
+ .get_regs = de_get_regs,
+};
+
+static void __init de21040_get_mac_address (struct de_private *de)
+{
+ unsigned i;
+
+ dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
+
+ for (i = 0; i < 6; i++) {
+ int value, boguscnt = 100000;
+ do
+ value = dr32(ROMCmd);
+ while (value < 0 && --boguscnt > 0);
+ de->dev->dev_addr[i] = value;
+ udelay(1);
+ if (boguscnt <= 0)
+ printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
+ }
+}
+
+static void __init de21040_get_media_info(struct de_private *de)
+{
+ unsigned int i;
+
+ de->media_type = DE_MEDIA_TP;
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half | SUPPORTED_AUI;
+ de->media_advertise = de->media_supported;
+
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ switch (i) {
+ case DE_MEDIA_AUI:
+ case DE_MEDIA_TP:
+ case DE_MEDIA_TP_FD:
+ de->media[i].type = i;
+ de->media[i].csr13 = t21040_csr13[i];
+ de->media[i].csr14 = t21040_csr14[i];
+ de->media[i].csr15 = t21040_csr15[i];
+ break;
+ default:
+ de->media[i].type = DE_MEDIA_INVALID;
+ break;
+ }
+ }
+}
+
+/* Note: this routine returns extra data bits for size detection. */
+static unsigned __init tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ void __iomem *ee_addr = regs + ROMCmd;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ writel(EE_ENB & ~EE_CS, ee_addr);
+ writel(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writel(EE_ENB | dataval, ee_addr);
+ readl(ee_addr);
+ writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ readl(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ writel(EE_ENB, ee_addr);
+ readl(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ readl(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ writel(EE_ENB, ee_addr);
+ readl(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+static void __init de21041_get_srom_info (struct de_private *de)
+{
+ unsigned i, sa_offset = 0, ofs;
+ u8 ee_data[DE_EEPROM_SIZE + 6] = {};
+ unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
+ struct de_srom_info_leaf *il;
+ void *bufp;
+
+ /* download entire eeprom */
+ for (i = 0; i < DE_EEPROM_WORDS; i++)
+ ((u16 *)ee_data)[i] =
+ le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size));
+
+ /* DEC now has a specification but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(eedata, eedata+16, 8) */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+
+ /* store MAC address */
+ for (i = 0; i < 6; i ++)
+ de->dev->dev_addr[i] = ee_data[i + sa_offset];
+
+ /* get offset of controller 0 info leaf. ignore 2nd byte. */
+ ofs = ee_data[SROMC0InfoLeaf];
+ if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
+ goto bad_srom;
+
+ /* get pointer to info leaf */
+ il = (struct de_srom_info_leaf *) &ee_data[ofs];
+
+ /* paranoia checks */
+ if (il->n_blocks == 0)
+ goto bad_srom;
+ if ((sizeof(ee_data) - ofs) <
+ (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
+ goto bad_srom;
+
+ /* get default media type */
+ switch (DE_UNALIGNED_16(&il->default_media)) {
+ case 0x0001: de->media_type = DE_MEDIA_BNC; break;
+ case 0x0002: de->media_type = DE_MEDIA_AUI; break;
+ case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
+ default: de->media_type = DE_MEDIA_TP_AUTO; break;
+ }
+
+ if (netif_msg_probe(de))
+ printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
+ de->board_idx, ofs,
+ media_name[de->media_type]);
+
+ /* init SIA register values to defaults */
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ de->media[i].type = DE_MEDIA_INVALID;
+ de->media[i].csr13 = 0xffff;
+ de->media[i].csr14 = 0xffff;
+ de->media[i].csr15 = 0xffff;
+ }
+
+ /* parse media blocks to see what medias are supported,
+ * and if any custom CSR values are provided
+ */
+ bufp = ((void *)il) + sizeof(*il);
+ for (i = 0; i < il->n_blocks; i++) {
+ struct de_srom_media_block *ib = bufp;
+ unsigned idx;
+
+ /* index based on media type in media block */
+ switch(ib->opts & MediaBlockMask) {
+ case 0: /* 10baseT */
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
+ | SUPPORTED_Autoneg;
+ idx = DE_MEDIA_TP;
+ de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
+ break;
+ case 1: /* BNC */
+ de->media_supported |= SUPPORTED_BNC;
+ idx = DE_MEDIA_BNC;
+ break;
+ case 2: /* AUI */
+ de->media_supported |= SUPPORTED_AUI;
+ idx = DE_MEDIA_AUI;
+ break;
+ case 4: /* 10baseT-FD */
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
+ | SUPPORTED_Autoneg;
+ idx = DE_MEDIA_TP_FD;
+ de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
+ break;
+ default:
+ goto bad_srom;
+ }
+
+ de->media[idx].type = idx;
+
+ if (netif_msg_probe(de))
+ printk(KERN_INFO "de%d: media block #%u: %s",
+ de->board_idx, i,
+ media_name[de->media[idx].type]);
+
+ bufp += sizeof (ib->opts);
+
+ if (ib->opts & MediaCustomCSRs) {
+ de->media[idx].csr13 = DE_UNALIGNED_16(&ib->csr13);
+ de->media[idx].csr14 = DE_UNALIGNED_16(&ib->csr14);
+ de->media[idx].csr15 = DE_UNALIGNED_16(&ib->csr15);
+ bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
+ sizeof(ib->csr15);
+
+ if (netif_msg_probe(de))
+ printk(" (%x,%x,%x)\n",
+ de->media[idx].csr13,
+ de->media[idx].csr14,
+ de->media[idx].csr15);
+
+ } else if (netif_msg_probe(de))
+ printk("\n");
+
+ if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
+ break;
+ }
+
+ de->media_advertise = de->media_supported;
+
+fill_defaults:
+ /* fill in defaults, for cases where custom CSRs not used */
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ if (de->media[i].csr13 == 0xffff)
+ de->media[i].csr13 = t21041_csr13[i];
+ if (de->media[i].csr14 == 0xffff)
+ de->media[i].csr14 = t21041_csr14[i];
+ if (de->media[i].csr15 == 0xffff)
+ de->media[i].csr15 = t21041_csr15[i];
+ }
+
+ de->ee_data = kmalloc(DE_EEPROM_SIZE, GFP_KERNEL);
+ if (de->ee_data)
+ memcpy(de->ee_data, &ee_data[0], DE_EEPROM_SIZE);
+
+ return;
+
+bad_srom:
+ /* for error cases, it's ok to assume we support all these */
+ for (i = 0; i < DE_MAX_MEDIA; i++)
+ de->media[i].type = i;
+ de->media_supported =
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_AUI |
+ SUPPORTED_BNC;
+ goto fill_defaults;
+}
+
+static int __init de_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct de_private *de;
+ int rc;
+ void __iomem *regs;
+ long pciaddr;
+ static int board_idx = -1;
+
+ board_idx++;
+
+#ifndef MODULE
+ if (board_idx == 0)
+ printk("%s", version);
+#endif
+
+ /* allocate a new ethernet device structure, and fill in defaults */
+ dev = alloc_etherdev(sizeof(struct de_private));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->open = de_open;
+ dev->stop = de_close;
+ dev->set_multicast_list = de_set_rx_mode;
+ dev->hard_start_xmit = de_start_xmit;
+ dev->get_stats = de_get_stats;
+ dev->ethtool_ops = &de_ethtool_ops;
+ dev->tx_timeout = de_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ de = dev->priv;
+ de->de21040 = ent->driver_data == 0 ? 1 : 0;
+ de->pdev = pdev;
+ de->dev = dev;
+ de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
+ de->board_idx = board_idx;
+ spin_lock_init (&de->lock);
+ init_timer(&de->media_timer);
+ if (de->de21040)
+ de->media_timer.function = de21040_media_timer;
+ else
+ de->media_timer.function = de21041_media_timer;
+ de->media_timer.data = (unsigned long) de;
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ /* wake up device, assign resources */
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_free;
+
+ /* reserve PCI resources to ensure driver atomicity */
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable;
+
+ /* check for invalid IRQ value */
+ if (pdev->irq < 2) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
+ pdev->irq, pci_name(pdev));
+ goto err_out_res;
+ }
+
+ dev->irq = pdev->irq;
+
+ /* obtain and check validity of PCI I/O address */
+ pciaddr = pci_resource_start(pdev, 1);
+ if (!pciaddr) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
+ pci_name(pdev));
+ goto err_out_res;
+ }
+ if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
+ pci_resource_len(pdev, 1), pci_name(pdev));
+ goto err_out_res;
+ }
+
+ /* remap CSR registers */
+ regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
+ if (!regs) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
+ pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
+ goto err_out_res;
+ }
+ dev->base_addr = (unsigned long) regs;
+ de->regs = regs;
+
+ de_adapter_wake(de);
+
+ /* make sure hardware is not running */
+ rc = de_reset_mac(de);
+ if (rc) {
+ printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
+ pci_name(pdev));
+ goto err_out_iomap;
+ }
+
+ /* get MAC address, initialize default media type and
+ * get list of supported media
+ */
+ if (de->de21040) {
+ de21040_get_mac_address(de);
+ de21040_get_media_info(de);
+ } else {
+ de21041_get_srom_info(de);
+ }
+
+ /* register new network interface with kernel */
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_iomap;
+
+ /* print info about board and interface just registered */
+ printk (KERN_INFO "%s: %s at 0x%lx, "
+ "%02x:%02x:%02x:%02x:%02x:%02x, "
+ "IRQ %d\n",
+ dev->name,
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5],
+ dev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ /* enable busmastering */
+ pci_set_master(pdev);
+
+ /* put adapter to sleep */
+ de_adapter_sleep(de);
+
+ return 0;
+
+err_out_iomap:
+ if (de->ee_data)
+ kfree(de->ee_data);
+ iounmap(regs);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static void __exit de_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct de_private *de = dev->priv;
+
+ if (!dev)
+ BUG();
+ unregister_netdev(dev);
+ if (de->ee_data)
+ kfree(de->ee_data);
+ iounmap(de->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+}
+
+#ifdef CONFIG_PM
+
+static int de_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct de_private *de = dev->priv;
+
+ rtnl_lock();
+ if (netif_running (dev)) {
+ del_timer_sync(&de->media_timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&de->lock);
+
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&de->lock);
+ enable_irq(dev->irq);
+
+ /* Update the error counts. */
+ __de_get_stats(de);
+
+ synchronize_irq(dev->irq);
+ de_clean_rings(de);
+
+ de_adapter_sleep(de);
+ pci_disable_device(pdev);
+ } else {
+ netif_device_detach(dev);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static int de_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct de_private *de = dev->priv;
+
+ rtnl_lock();
+ if (netif_device_present(dev))
+ goto out;
+ if (netif_running(dev)) {
+ pci_enable_device(pdev);
+ de_init_hw(de);
+ netif_device_attach(dev);
+ } else {
+ netif_device_attach(dev);
+ }
+out:
+ rtnl_unlock();
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver de_driver = {
+ .name = DRV_NAME,
+ .id_table = de_pci_tbl,
+ .probe = de_init_one,
+ .remove = __exit_p(de_remove_one),
+#ifdef CONFIG_PM
+ .suspend = de_suspend,
+ .resume = de_resume,
+#endif
+};
+
+static int __init de_init (void)
+{
+#ifdef MODULE
+ printk("%s", version);
+#endif
+ return pci_module_init (&de_driver);
+}
+
+static void __exit de_exit (void)
+{
+ pci_unregister_driver (&de_driver);
+}
+
+module_init(de_init);
+module_exit(de_exit);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
new file mode 100644
index 000000000000..93800c126e86
--- /dev/null
+++ b/drivers/net/tulip/de4x5.c
@@ -0,0 +1,5778 @@
+/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
+ ethernet driver for Linux.
+
+ Copyright 1994, 1995 Digital Equipment Corporation.
+
+ Testing resources for this driver have been made available
+ in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2 of the License, or (at your
+ option) any later version.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Originally, this driver was written for the Digital Equipment
+ Corporation series of EtherWORKS ethernet cards:
+
+ DE425 TP/COAX EISA
+ DE434 TP PCI
+ DE435 TP/COAX/AUI PCI
+ DE450 TP/COAX/AUI PCI
+ DE500 10/100 PCI Fasternet
+
+ but it will now attempt to support all cards which conform to the
+ Digital Semiconductor SROM Specification. The driver currently
+ recognises the following chips:
+
+ DC21040 (no SROM)
+ DC21041[A]
+ DC21140[A]
+ DC21142
+ DC21143
+
+ So far the driver is known to work with the following cards:
+
+ KINGSTON
+ Linksys
+ ZNYX342
+ SMC8432
+ SMC9332 (w/new SROM)
+ ZNYX31[45]
+ ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
+
+ The driver has been tested on a relatively busy network using the DE425,
+ DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
+ 16M of data to a DECstation 5000/200 as follows:
+
+ TCP UDP
+ TX RX TX RX
+ DE425 1030k 997k 1170k 1128k
+ DE434 1063k 995k 1170k 1125k
+ DE435 1063k 995k 1170k 1125k
+ DE500 1063k 998k 1170k 1125k in 10Mb/s mode
+
+ All values are typical (in kBytes/sec) from a sample of 4 for each
+ measurement. Their error is +/-20k on a quiet (private) network and also
+ depend on what load the CPU has.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'ewrk3.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious. With the module autoload of
+ every usable DECchip board, I pinched Donald's 'next_module' field to
+ link my modules together.
+
+ Upto 15 EISA cards can be supported under this driver, limited primarily
+ by the available IRQ lines. I have checked different configurations of
+ multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
+ problem yet (provided you have at least depca.c v0.38) ...
+
+ PCI support has been added to allow the driver to work with the DE434,
+ DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
+ to the differences in the EISA and PCI CSR address offsets from the base
+ address.
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). Loadable module support under PCI and EISA has been
+ achieved by letting the driver autoprobe as if it were compiled into the
+ kernel. Do make sure you're not sharing interrupts with anything that
+ cannot accommodate interrupt sharing!
+
+ To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) for fixed autoprobes (not recommended), edit the source code near
+ line 5594 to reflect the I/O address you're using, or assign these when
+ loading by:
+
+ insmod de4x5 io=0xghh where g = bus number
+ hh = device number
+
+ NB: autoprobing for modules is now supported by default. You may just
+ use:
+
+ insmod de4x5
+
+ to load all available boards. For a specific board, still use
+ the 'io=?' above.
+ 3) compile de4x5.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the de4x5 configuration turned off and reboot.
+ 5) insmod de4x5 [io=0xghh]
+ 6) run the net startup bits for your new eth?? interface(s) manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ To unload a module, turn off the associated interface(s)
+ 'ifconfig eth?? down' then 'rmmod de4x5'.
+
+ Automedia detection is included so that in principal you can disconnect
+ from, e.g. TP, reconnect to BNC and things will still work (after a
+ pause whilst the driver figures out where its media went). My tests
+ using ping showed that it appears to work....
+
+ By default, the driver will now autodetect any DECchip based card.
+ Should you have a need to restrict the driver to DIGITAL only cards, you
+ can compile with a DEC_ONLY define, or if loading as a module, use the
+ 'dec_only=1' parameter.
+
+ I've changed the timing routines to use the kernel timer and scheduling
+ functions so that the hangs and other assorted problems that occurred
+ while autosensing the media should be gone. A bonus for the DC21040
+ auto media sense algorithm is that it can now use one that is more in
+ line with the rest (the DC21040 chip doesn't have a hardware timer).
+ The downside is the 1 'jiffies' (10ms) resolution.
+
+ IEEE 802.3u MII interface code has been added in anticipation that some
+ products may use it in the future.
+
+ The SMC9332 card has a non-compliant SROM which needs fixing - I have
+ patched this driver to detect it because the SROM format used complies
+ to a previous DEC-STD format.
+
+ I have removed the buffer copies needed for receive on Intels. I cannot
+ remove them for Alphas since the Tulip hardware only does longword
+ aligned DMA transfers and the Alphas get alignment traps with non
+ longword aligned data copies (which makes them really slow). No comment.
+
+ I have added SROM decoding routines to make this driver work with any
+ card that supports the Digital Semiconductor SROM spec. This will help
+ all cards running the dc2114x series chips in particular. Cards using
+ the dc2104x chips should run correctly with the basic driver. I'm in
+ debt to <mjacob@feral.com> for the testing and feedback that helped get
+ this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
+ (with the latest SROM complying with the SROM spec V3: their first was
+ broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
+ (quad 21041 MAC) cards also appear to work despite their incorrectly
+ wired IRQs.
+
+ I have added a temporary fix for interrupt problems when some SCSI cards
+ share the same interrupt as the DECchip based cards. The problem occurs
+ because the SCSI card wants to grab the interrupt as a fast interrupt
+ (runs the service routine with interrupts turned off) vs. this card
+ which really needs to run the service routine with interrupts turned on.
+ This driver will now add the interrupt service routine as a fast
+ interrupt if it is bounced from the slow interrupt. THIS IS NOT A
+ RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
+ until people sort out their compatibility issues and the kernel
+ interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
+ INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
+ run on the same interrupt. PCMCIA/CardBus is another can of worms...
+
+ Finally, I think I have really fixed the module loading problem with
+ more than one DECchip based card. As a side effect, I don't mess with
+ the device structure any more which means that if more than 1 card in
+ 2.0.x is installed (4 in 2.1.x), the user will have to edit
+ linux/drivers/net/Space.c to make room for them. Hence, module loading
+ is the preferred way to use this driver, since it doesn't have this
+ limitation.
+
+ Where SROM media detection is used and full duplex is specified in the
+ SROM, the feature is ignored unless lp->params.fdx is set at compile
+ time OR during a module load (insmod de4x5 args='eth??:fdx' [see
+ below]). This is because there is no way to automatically detect full
+ duplex links except through autonegotiation. When I include the
+ autonegotiation feature in the SROM autoconf code, this detection will
+ occur automatically for that case.
+
+ Command line arguments are now allowed, similar to passing arguments
+ through LILO. This will allow a per adapter board set up of full duplex
+ and media. The only lexical constraints are: the board name (dev->name)
+ appears in the list before its parameters. The list of parameters ends
+ either at the end of the parameter list or with another board name. The
+ following parameters are allowed:
+
+ fdx for full duplex
+ autosense to set the media/speed; with the following
+ sub-parameters:
+ TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
+
+ Case sensitivity is important for the sub-parameters. They *must* be
+ upper case. Examples:
+
+ insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+
+ For a compiled in driver, at or above line 548, place e.g.
+ #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
+
+ Yes, I know full duplex isn't permissible on BNC or AUI; they're just
+ examples. By default, full duplex is turned off and AUTO is the default
+ autosense setting. In reality, I expect only the full duplex option to
+ be used. Note the use of single quotes in the two examples above and the
+ lack of commas to separate items. ALSO, you must get the requested media
+ correct in relation to what the adapter SROM says it has. There's no way
+ to determine this in advance other than by trial and error and common
+ sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
+
+ Changed the bus probing. EISA used to be done first, followed by PCI.
+ Most people probably don't even know what a de425 is today and the EISA
+ probe has messed up some SCSI cards in the past, so now PCI is always
+ probed first followed by EISA if a) the architecture allows EISA and
+ either b) there have been no PCI cards detected or c) an EISA probe is
+ forced by the user. To force a probe include "force_eisa" in your
+ insmod "args" line; for built-in kernels either change the driver to do
+ this automatically or include #define DE4X5_FORCE_EISA on or before
+ line 1040 in the driver.
+
+ TO DO:
+ ------
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 17-Nov-94 Initial writing. ALPHA code release.
+ 0.2 13-Jan-95 Added PCI support for DE435's.
+ 0.21 19-Jan-95 Added auto media detection.
+ 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
+ Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ Add request/release_region code.
+ Add loadable modules support for PCI.
+ Clean up loadable modules support.
+ 0.23 28-Feb-95 Added DC21041 and DC21140 support.
+ Fix missed frame counter value and initialisation.
+ Fixed EISA probe.
+ 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
+ Change TX_BUFFS_AVAIL macro.
+ Change media autodetection to allow manual setting.
+ Completed DE500 (DC21140) support.
+ 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
+ 0.242 10-May-95 Minor changes.
+ 0.30 12-Jun-95 Timer fix for DC21140.
+ Portability changes.
+ Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
+ Add DE500 semi automatic autosense.
+ Add Link Fail interrupt TP failure detection.
+ Add timer based link change detection.
+ Plugged a memory leak in de4x5_queue_pkt().
+ 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
+ 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
+ suggestion by <heiko@colossus.escape.de>.
+ 0.33 8-Aug-95 Add shared interrupt support (not released yet).
+ 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
+ Fix de4x5_interrupt().
+ Fix dc21140_autoconf() mess.
+ No shared interrupt support.
+ 0.332 11-Sep-95 Added MII management interface routines.
+ 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
+ Add kernel timer code (h/w is too flaky).
+ Add MII based PHY autosense.
+ Add new multicasting code.
+ Add new autosense algorithms for media/mode
+ selection using kernel scheduling/timing.
+ Re-formatted.
+ Made changes suggested by <jeff@router.patch.net>:
+ Change driver to detect all DECchip based cards
+ with DEC_ONLY restriction a special case.
+ Changed driver to autoprobe as a module. No irq
+ checking is done now - assume BIOS is good!
+ Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
+ 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
+ only <niles@axp745gsfc.nasa.gov>
+ Fix for multiple PCI cards reported by <jos@xos.nl>
+ Duh, put the SA_SHIRQ flag into request_interrupt().
+ Fix SMC ethernet address in enet_det[].
+ Print chip name instead of "UNKNOWN" during boot.
+ 0.42 26-Apr-96 Fix MII write TA bit error.
+ Fix bug in dc21040 and dc21041 autosense code.
+ Remove buffer copies on receive for Intels.
+ Change sk_buff handling during media disconnects to
+ eliminate DUP packets.
+ Add dynamic TX thresholding.
+ Change all chips to use perfect multicast filtering.
+ Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 21-Jun-96 Fix unconnected media TX retry bug.
+ Add Accton to the list of broken cards.
+ Fix TX under-run bug for non DC21140 chips.
+ Fix boot command probe bug in alloc_device() as
+ reported by <koen.gadeyne@barco.com> and
+ <orava@nether.tky.hut.fi>.
+ Add cache locks to prevent a race condition as
+ reported by <csd@microplex.com> and
+ <baba@beckman.uiuc.edu>.
+ Upgraded alloc_device() code.
+ 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
+ with <csd@microplex.com>
+ 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
+ Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
+ and <michael@compurex.com>.
+ 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
+ with a loopback packet.
+ 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
+ by <bhat@mundook.cs.mu.OZ.AU>
+ 0.45 8-Dec-96 Include endian functions for PPC use, from work
+ by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
+ 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
+ suggestion from <mjacob@feral.com>.
+ 0.5 30-Jan-97 Added SROM decoding functions.
+ Updated debug flags.
+ Fix sleep/wakeup calls for PCI cards, bug reported
+ by <cross@gweep.lkg.dec.com>.
+ Added multi-MAC, one SROM feature from discussion
+ with <mjacob@feral.com>.
+ Added full module autoprobe capability.
+ Added attempt to use an SMC9332 with broken SROM.
+ Added fix for ZYNX multi-mac cards that didn't
+ get their IRQs wired correctly.
+ 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
+ <paubert@iram.es>
+ Fix init_connection() to remove extra device reset.
+ Fix MAC/PHY reset ordering in dc21140m_autoconf().
+ Fix initialisation problem with lp->timeout in
+ typeX_infoblock() from <paubert@iram.es>.
+ Fix MII PHY reset problem from work done by
+ <paubert@iram.es>.
+ 0.52 26-Apr-97 Some changes may not credit the right people -
+ a disk crash meant I lost some mail.
+ Change RX interrupt routine to drop rather than
+ defer packets to avoid hang reported by
+ <g.thomas@opengroup.org>.
+ Fix srom_exec() to return for COMPACT and type 1
+ infoblocks.
+ Added DC21142 and DC21143 functions.
+ Added byte counters from <phil@tazenda.demon.co.uk>
+ Added SA_INTERRUPT temporary fix from
+ <mjacob@feral.com>.
+ 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
+ module load: bug reported by
+ <Piete.Brooks@cl.cam.ac.uk>
+ Fix multi-MAC, one SROM, to work with 2114x chips:
+ bug reported by <cmetz@inner.net>.
+ Make above search independent of BIOS device scan
+ direction.
+ Completed DC2114[23] autosense functions.
+ 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
+ <robin@intercore.com
+ Fix type1_infoblock() bug introduced in 0.53, from
+ problem reports by
+ <parmee@postecss.ncrfran.france.ncr.com> and
+ <jo@ice.dillingen.baynet.de>.
+ Added argument list to set up each board from either
+ a module's command line or a compiled in #define.
+ Added generic MII PHY functionality to deal with
+ newer PHY chips.
+ Fix the mess in 2.1.67.
+ 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
+ <redhat@cococo.net>.
+ Fix bug in pci_probe() for 64 bit systems reported
+ by <belliott@accessone.com>.
+ 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
+ 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
+ 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
+ 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
+ **Incompatible with 2.0.x from here.**
+ 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
+ from <lma@varesearch.com>
+ Add TP, AUI and BNC cases to 21140m_autoconf() for
+ case where a 21140 under SROM control uses, e.g. AUI
+ from problem report by <delchini@lpnp09.in2p3.fr>
+ Add MII parallel detection to 2114x_autoconf() for
+ case where no autonegotiation partner exists from
+ problem report by <mlapsley@ndirect.co.uk>.
+ Add ability to force connection type directly even
+ when using SROM control from problem report by
+ <earl@exis.net>.
+ Updated the PCI interface to conform with the latest
+ version. I hope nothing is broken...
+ Add TX done interrupt modification from suggestion
+ by <Austin.Donnelly@cl.cam.ac.uk>.
+ Fix is_anc_capable() bug reported by
+ <Austin.Donnelly@cl.cam.ac.uk>.
+ Fix type[13]_infoblock() bug: during MII search, PHY
+ lp->rst not run because lp->ibn not initialised -
+ from report & fix by <paubert@iram.es>.
+ Fix probe bug with EISA & PCI cards present from
+ report by <eirik@netcom.com>.
+ 0.541 24-Aug-98 Fix compiler problems associated with i386-string
+ ops from multiple bug reports and temporary fix
+ from <paubert@iram.es>.
+ Fix pci_probe() to correctly emulate the old
+ pcibios_find_class() function.
+ Add an_exception() for old ZYNX346 and fix compile
+ warning on PPC & SPARC, from <ecd@skynet.be>.
+ Fix lastPCI to correctly work with compiled in
+ kernels and modules from bug report by
+ <Zlatko.Calusic@CARNet.hr> et al.
+ 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
+ when media is unconnected.
+ Change dev->interrupt to lp->interrupt to ensure
+ alignment for Alpha's and avoid their unaligned
+ access traps. This flag is merely for log messages:
+ should do something more definitive though...
+ 0.543 30-Dec-98 Add SMP spin locking.
+ 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
+ a 21143 by <mmporter@home.com>.
+ Change PCI/EISA bus probing order.
+ 0.545 28-Nov-99 Further Moto SROM bug fix from
+ <mporter@eng.mcd.mot.com>
+ Remove double checking for DEBUG_RX in de4x5_dbg_rx()
+ from report by <geert@linux-m68k.org>
+ 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
+ was causing a page fault when initializing the
+ variable 'pb', on a non de4x5 PCI device, in this
+ case a PCI bridge (DEC chip 21152). The value of
+ 'pb' is now only initialized if a de4x5 chip is
+ present.
+ <france@handhelds.org>
+ 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
+ 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
+ generic DMA APIs. Fixed DE425 support on Alpha.
+ <maz@wild-wind.fr.eu.org>
+ =========================================================================
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/eisa.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_PPC_MULTIPLATFORM
+#include <asm/machdep.h>
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+#include "de4x5.h"
+
+static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
+
+#define c_char const char
+#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((u_short *)(a)))
+
+/*
+** MII Information
+*/
+struct phy_table {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time - 802.3u is confusing here */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+};
+
+struct mii_phy {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+ int addr; /* MII address for the PHY */
+ u_char *gep; /* Start of GEP sequence block in SROM */
+ u_char *rst; /* Start of reset sequence in SROM */
+ u_int mc; /* Media Capabilities */
+ u_int ana; /* NWay Advertisement */
+ u_int fdx; /* Full DupleX capabilites for each media */
+ u_int ttm; /* Transmit Threshold Mode for each media */
+ u_int mci; /* 21142 MII Connector Interrupt info */
+};
+
+#define DE4X5_MAX_PHY 8 /* Allow upto 8 attached PHY devices per board */
+
+struct sia_phy {
+ u_char mc; /* Media Code */
+ u_char ext; /* csr13-15 valid when set */
+ int csr13; /* SIA Connectivity Register */
+ int csr14; /* SIA TX/RX Register */
+ int csr15; /* SIA General Register */
+ int gepc; /* SIA GEP Control Information */
+ int gep; /* SIA GEP Data */
+};
+
+/*
+** Define the know universe of PHY devices that can be
+** recognised by this driver.
+*/
+static struct phy_table phy_info[] = {
+ {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
+ {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
+ {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
+ {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
+ {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
+};
+
+/*
+** These GENERIC values assumes that the PHY devices follow 802.3u and
+** allow parallel detection to set the link partner ability register.
+** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
+*/
+#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
+#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
+#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
+
+/*
+** Define special SROM detection cases
+*/
+static c_char enet_det[][ETH_ALEN] = {
+ {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
+};
+
+#define SMC 1
+#define ACCTON 2
+
+/*
+** SROM Repair definitions. If a broken SROM is detected a card may
+** use this information to help figure out what to do. This is a
+** "stab in the dark" and so far for SMC9332's only.
+*/
+static c_char srom_repair_info[][100] = {
+ {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
+ 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
+ 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
+ 0x00,0x18,}
+};
+
+
+#ifdef DE4X5_DEBUG
+static int de4x5_debug = DE4X5_DEBUG;
+#else
+/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
+static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
+#endif
+
+/*
+** Allow per adapter set up. For modules this is simply a command line
+** parameter, e.g.:
+** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+**
+** For a compiled in driver, place e.g.
+** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
+** here
+*/
+#ifdef DE4X5_PARM
+static char *args = DE4X5_PARM;
+#else
+static char *args;
+#endif
+
+struct parameters {
+ int fdx;
+ int autosense;
+};
+
+#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
+
+#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Ethernet Info
+*/
+#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
+#define IEEE802_3_SZ 1518 /* Packet + CRC */
+#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
+#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
+#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
+#define PKT_HDR_LEN 14 /* Addresses and data length info */
+#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
+#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
+
+
+/*
+** EISA bus defines
+*/
+#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
+
+#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
+
+#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
+#define DE4X5_NAME_LENGTH 8
+
+static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
+
+/*
+** Ethernet PROM defines for DC21040
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** PCI Bus defines
+*/
+#define PCI_MAX_BUS_NUM 8
+#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
+#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
+#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
+#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
+#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
+#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
+
+#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
+#define DE4X5_CACHE_ALIGN CAL_16LONG
+#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
+/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
+#define DESC_ALIGN
+
+#ifndef DEC_ONLY /* See README.de4x5 for using this */
+static int dec_only;
+#else
+static int dec_only = 1;
+#endif
+
+/*
+** DE4X5 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ imr |= lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Disable the IRQs */\
+}
+
+#define UNMASK_IRQs {\
+ imr |= lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
+}
+
+#define MASK_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Mask the IRQs */\
+}
+
+/*
+** DE4X5 START/STOP
+*/
+#define START_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr |= OMR_ST | OMR_SR;\
+ outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr &= ~(OMR_ST|OMR_SR);\
+ outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
+}
+
+/*
+** DE4X5 SIA RESET
+*/
+#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
+
+/*
+** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
+*/
+#define DE4X5_AUTOSENSE_MS 250
+
+/*
+** SROM Structure
+*/
+struct de4x5_srom {
+ char sub_vendor_id[2];
+ char sub_system_id[2];
+ char reserved[12];
+ char id_block_crc;
+ char reserved2;
+ char version;
+ char num_controllers;
+ char ieee_addr[6];
+ char info[100];
+ short chksum;
+};
+#define SUB_VENDOR_ID 0x500a
+
+/*
+** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
+** and have sizes of both a power of 2 and a multiple of 4.
+** A size of 256 bytes for each buffer could be chosen because over 90% of
+** all packets in our network are <256 bytes long and 64 longword alignment
+** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
+** descriptors are needed for machines with an ALPHA CPU.
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 32 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
+ /* Multiple of 4 for DC21040 */
+ /* Allows 512 byte alignment */
+struct de4x5_desc {
+ volatile s32 status;
+ u32 des1;
+ u32 buf;
+ u32 next;
+ DESC_ALIGN
+};
+
+/*
+** The DE4X5 private structure
+*/
+#define DE4X5_PKT_STAT_SZ 16
+#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DE4X5_PKT_STAT_SZ */
+
+struct pkt_stats {
+ u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
+ u_int unicast;
+ u_int multicast;
+ u_int broadcast;
+ u_int excessive_collisions;
+ u_int tx_underruns;
+ u_int excessive_underruns;
+ u_int rx_runt_frames;
+ u_int rx_collision;
+ u_int rx_dribble;
+ u_int rx_overflow;
+};
+
+struct de4x5_private {
+ char adapter_name[80]; /* Adapter name */
+ u_long interrupt; /* Aligned ISR flag */
+ struct de4x5_desc *rx_ring; /* RX descriptor ring */
+ struct de4x5_desc *tx_ring; /* TX descriptor ring */
+ struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
+ struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
+ int rx_new, rx_old; /* RX descriptor ring pointers */
+ int tx_new, tx_old; /* TX descriptor ring pointers */
+ char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
+ char frame[64]; /* Min sized packet for loopback*/
+ spinlock_t lock; /* Adapter specific spinlock */
+ struct net_device_stats stats; /* Public stats */
+ struct pkt_stats pktStats; /* Private stats counters */
+ char rxRingSize;
+ char txRingSize;
+ int bus; /* EISA or PCI */
+ int bus_num; /* PCI Bus number */
+ int device; /* Device number on PCI bus */
+ int state; /* Adapter OPENED or CLOSED */
+ int chipset; /* DC21040, DC21041 or DC21140 */
+ s32 irq_mask; /* Interrupt Mask (Enable) bits */
+ s32 irq_en; /* Summary interrupt bits */
+ int media; /* Media (eg TP), mode (eg 100B)*/
+ int c_media; /* Remember the last media conn */
+ int fdx; /* media full duplex flag */
+ int linkOK; /* Link is OK */
+ int autosense; /* Allow/disallow autosensing */
+ int tx_enable; /* Enable descriptor polling */
+ int setup_f; /* Setup frame filtering type */
+ int local_state; /* State within a 'media' state */
+ struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
+ struct sia_phy sia; /* SIA PHY Information */
+ int active; /* Index to active PHY device */
+ int mii_cnt; /* Number of attached PHY's */
+ int timeout; /* Scheduling counter */
+ struct timer_list timer; /* Timer info for kernel */
+ int tmp; /* Temporary global per card */
+ struct {
+ u_long lock; /* Lock the cache accesses */
+ s32 csr0; /* Saved Bus Mode Register */
+ s32 csr6; /* Saved Operating Mode Reg. */
+ s32 csr7; /* Saved IRQ Mask Register */
+ s32 gep; /* Saved General Purpose Reg. */
+ s32 gepc; /* Control info for GEP */
+ s32 csr13; /* Saved SIA Connectivity Reg. */
+ s32 csr14; /* Saved SIA TX/RX Register */
+ s32 csr15; /* Saved SIA General Register */
+ int save_cnt; /* Flag if state already saved */
+ struct sk_buff *skb; /* Save the (re-ordered) skb's */
+ } cache;
+ struct de4x5_srom srom; /* A copy of the SROM */
+ int cfrv; /* Card CFRV copy */
+ int rx_ovf; /* Check for 'RX overflow' tag */
+ int useSROM; /* For non-DEC card use SROM */
+ int useMII; /* Infoblock using the MII */
+ int asBitValid; /* Autosense bits in GEP? */
+ int asPolarity; /* 0 => asserted high */
+ int asBit; /* Autosense bit number in GEP */
+ int defMedium; /* SROM default medium */
+ int tcount; /* Last infoblock number */
+ int infoblock_init; /* Initialised this infoblock? */
+ int infoleaf_offset; /* SROM infoleaf for controller */
+ s32 infoblock_csr6; /* csr6 value in SROM infoblock */
+ int infoblock_media; /* infoblock media */
+ int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
+ u_char *rst; /* Pointer to Type 5 reset info */
+ u_char ibn; /* Infoblock number */
+ struct parameters params; /* Command line/ #defined params */
+ struct device *gendev; /* Generic device */
+ dma_addr_t dma_rings; /* DMA handle for rings */
+ int dma_size; /* Size of the DMA area */
+ char *rx_bufs; /* rx bufs on alpha, sparc, ... */
+};
+
+/*
+** To get around certain poxy cards that don't provide an SROM
+** for the second and more DECchip, I have to key off the first
+** chip's address. I'll assume there's not a bad SROM iff:
+**
+** o the chipset is the same
+** o the bus number is the same and > 0
+** o the sum of all the returned hw address bytes is 0 or 0x5fa
+**
+** Also have to save the irq for those cards whose hardware designers
+** can't follow the PCI to PCI Bridge Architecture spec.
+*/
+static struct {
+ int chipset;
+ int bus;
+ int irq;
+ u_char addr[ETH_ALEN];
+} last = {0,};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingSize-lp->tx_new-1:\
+ lp->tx_old -lp->tx_new-1)
+
+#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
+
+/*
+** Public Functions
+*/
+static int de4x5_open(struct net_device *dev);
+static int de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int de4x5_close(struct net_device *dev);
+static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
+static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
+static void set_multicast_list(struct net_device *dev);
+static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
+static int de4x5_init(struct net_device *dev);
+static int de4x5_sw_reset(struct net_device *dev);
+static int de4x5_rx(struct net_device *dev);
+static int de4x5_tx(struct net_device *dev);
+static int de4x5_ast(struct net_device *dev);
+static int de4x5_txur(struct net_device *dev);
+static int de4x5_rx_ovfc(struct net_device *dev);
+
+static int autoconf_media(struct net_device *dev);
+static void create_packet(struct net_device *dev, char *frame, int len);
+static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
+static int dc21040_autoconf(struct net_device *dev);
+static int dc21041_autoconf(struct net_device *dev);
+static int dc21140m_autoconf(struct net_device *dev);
+static int dc2114x_autoconf(struct net_device *dev);
+static int srom_autoconf(struct net_device *dev);
+static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
+static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
+static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
+static int test_for_100Mb(struct net_device *dev, int msec);
+static int wait_for_link(struct net_device *dev);
+static int test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec);
+static int is_spd_100(struct net_device *dev);
+static int is_100_up(struct net_device *dev);
+static int is_10_up(struct net_device *dev);
+static int is_anc_capable(struct net_device *dev);
+static int ping_media(struct net_device *dev, int msec);
+static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
+static void de4x5_free_rx_buffs(struct net_device *dev);
+static void de4x5_free_tx_buffs(struct net_device *dev);
+static void de4x5_save_skbs(struct net_device *dev);
+static void de4x5_rst_desc_ring(struct net_device *dev);
+static void de4x5_cache_state(struct net_device *dev, int flag);
+static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
+static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
+static struct sk_buff *de4x5_get_cache(struct net_device *dev);
+static void de4x5_setup_intr(struct net_device *dev);
+static void de4x5_init_connection(struct net_device *dev);
+static int de4x5_reset_phy(struct net_device *dev);
+static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
+static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
+static int test_tp(struct net_device *dev, s32 msec);
+static int EISA_signature(char *name, struct device *device);
+static int PCI_signature(char *name, struct de4x5_private *lp);
+static void DevicePresent(struct net_device *dev, u_long iobase);
+static void enet_addr_rst(u_long aprom_addr);
+static int de4x5_bad_srom(struct de4x5_private *lp);
+static short srom_rd(u_long address, u_char offset);
+static void srom_latch(u_int command, u_long address);
+static void srom_command(u_int command, u_long address);
+static void srom_address(u_int command, u_long address, u_char offset);
+static short srom_data(u_int command, u_long address);
+/*static void srom_busy(u_int command, u_long address);*/
+static void sendto_srom(u_int command, u_long addr);
+static int getfrom_srom(u_long addr);
+static int srom_map_media(struct net_device *dev);
+static int srom_infoleaf_info(struct net_device *dev);
+static void srom_init(struct net_device *dev);
+static void srom_exec(struct net_device *dev, u_char *p);
+static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
+static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
+static int mii_rdata(u_long ioaddr);
+static void mii_wdata(int data, int len, u_long ioaddr);
+static void mii_ta(u_long rw, u_long ioaddr);
+static int mii_swap(int data, int len);
+static void mii_address(u_char addr, u_long ioaddr);
+static void sendto_mii(u32 command, int data, u_long ioaddr);
+static int getfrom_mii(u32 command, u_long ioaddr);
+static int mii_get_oui(u_char phyaddr, u_long ioaddr);
+static int mii_get_phy(struct net_device *dev);
+static void SetMulticastFilter(struct net_device *dev);
+static int get_hw_addr(struct net_device *dev);
+static void srom_repair(struct net_device *dev, int card);
+static int test_bad_enet(struct net_device *dev, int status);
+static int an_exception(struct de4x5_private *lp);
+static char *build_setup_frame(struct net_device *dev, int mode);
+static void disable_ast(struct net_device *dev);
+static void enable_ast(struct net_device *dev, u32 time_out);
+static long de4x5_switch_mac_port(struct net_device *dev);
+static int gep_rd(struct net_device *dev);
+static void gep_wr(s32 data, struct net_device *dev);
+static void timeout(struct net_device *dev, void (*fn)(u_long data), u_long data, u_long msec);
+static void yawn(struct net_device *dev, int state);
+static void de4x5_parse_params(struct net_device *dev);
+static void de4x5_dbg_open(struct net_device *dev);
+static void de4x5_dbg_mii(struct net_device *dev, int k);
+static void de4x5_dbg_media(struct net_device *dev);
+static void de4x5_dbg_srom(struct de4x5_srom *p);
+static void de4x5_dbg_rx(struct sk_buff *skb, int len);
+static int de4x5_strncmp(char *a, char *b, int n);
+static int dc21041_infoleaf(struct net_device *dev);
+static int dc21140_infoleaf(struct net_device *dev);
+static int dc21142_infoleaf(struct net_device *dev);
+static int dc21143_infoleaf(struct net_device *dev);
+static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
+
+/*
+** Note now that module autoprobing is allowed under EISA and PCI. The
+** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
+** to "do the right thing".
+*/
+
+static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
+
+module_param(io, int, 0);
+module_param(de4x5_debug, int, 0);
+module_param(dec_only, int, 0);
+module_param(args, charp, 0);
+
+MODULE_PARM_DESC(io, "de4x5 I/O base address");
+MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
+MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
+MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
+MODULE_LICENSE("GPL");
+
+/*
+** List the SROM infoleaf functions and chipsets
+*/
+struct InfoLeaf {
+ int chipset;
+ int (*fn)(struct net_device *);
+};
+static struct InfoLeaf infoleaf_array[] = {
+ {DC21041, dc21041_infoleaf},
+ {DC21140, dc21140_infoleaf},
+ {DC21142, dc21142_infoleaf},
+ {DC21143, dc21143_infoleaf}
+};
+#define INFOLEAF_SIZE (sizeof(infoleaf_array)/(sizeof(int)+sizeof(int *)))
+
+/*
+** List the SROM info block functions
+*/
+static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
+ type0_infoblock,
+ type1_infoblock,
+ type2_infoblock,
+ type3_infoblock,
+ type4_infoblock,
+ type5_infoblock,
+ compact_infoblock
+};
+
+#define COMPACT (sizeof(dc_infoblock)/sizeof(int *) - 1)
+
+/*
+** Miscellaneous defines...
+*/
+#define RESET_DE4X5 {\
+ int i;\
+ i=inl(DE4X5_BMR);\
+ mdelay(1);\
+ outl(i | BMR_SWR, DE4X5_BMR);\
+ mdelay(1);\
+ outl(i, DE4X5_BMR);\
+ mdelay(1);\
+ for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
+ mdelay(1);\
+}
+
+#define PHY_HARD_RESET {\
+ outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
+ mdelay(1); /* Assert for 1ms */\
+ outl(0x00, DE4X5_GEP);\
+ mdelay(2); /* Wait for 2ms */\
+}
+
+
+static int __devinit
+de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
+{
+ char name[DE4X5_NAME_LENGTH + 1];
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct pci_dev *pdev = NULL;
+ int i, status=0;
+
+ gendev->driver_data = dev;
+
+ /* Ensure we're not sleeping */
+ if (lp->bus == EISA) {
+ outb(WAKEUP, PCI_CFPM);
+ } else {
+ pdev = to_pci_dev (gendev);
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
+ }
+ mdelay(10);
+
+ RESET_DE4X5;
+
+ if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
+ return -ENXIO; /* Hardware could not reset */
+ }
+
+ /*
+ ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
+ */
+ lp->useSROM = FALSE;
+ if (lp->bus == PCI) {
+ PCI_signature(name, lp);
+ } else {
+ EISA_signature(name, gendev);
+ }
+
+ if (*name == '\0') { /* Not found a board signature */
+ return -ENXIO;
+ }
+
+ dev->base_addr = iobase;
+ printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase);
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ if (status != 0) {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ return -ENXIO;
+ } else {
+ lp->cache.gepc = GEP_INIT;
+ lp->asBit = GEP_SLNK;
+ lp->asPolarity = GEP_SLNK;
+ lp->asBitValid = TRUE;
+ lp->timeout = -1;
+ lp->gendev = gendev;
+ spin_lock_init(&lp->lock);
+ init_timer(&lp->timer);
+ de4x5_parse_params(dev);
+
+ /*
+ ** Choose correct autosensing in case someone messed up
+ */
+ lp->autosense = lp->params.autosense;
+ if (lp->chipset != DC21140) {
+ if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
+ lp->params.autosense = TP;
+ }
+ if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
+ lp->params.autosense = BNC;
+ }
+ }
+ lp->fdx = lp->params.fdx;
+ sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id);
+
+ lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
+#if defined(__alpha__) || defined(__powerpc__) || defined(__sparc_v9__) || defined(DE4X5_DO_MEMCPY)
+ lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
+#endif
+ lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
+ &lp->dma_rings, GFP_ATOMIC);
+ if (lp->rx_ring == NULL) {
+ return -ENOMEM;
+ }
+
+ lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
+
+ /*
+ ** Set up the RX descriptor ring (Intels)
+ ** Allocate contiguous receive buffers, long word aligned (Alphas)
+ */
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
+ lp->rx_ring[i].buf = 0;
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+
+#else
+ {
+ dma_addr_t dma_rx_bufs;
+
+ dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
+ * sizeof(struct de4x5_desc);
+ dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
+ lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
+ + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
+ lp->rx_ring[i].buf =
+ cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+
+ }
+#endif
+
+ barrier();
+
+ lp->rxRingSize = NUM_RX_DESC;
+ lp->txRingSize = NUM_TX_DESC;
+
+ /* Write the end of list marker to the descriptor lists */
+ lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
+ lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
+
+ /* Tell the adapter where the TX/RX rings are located. */
+ outl(lp->dma_rings, DE4X5_RRBA);
+ outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
+ DE4X5_TRBA);
+
+ /* Initialise the IRQ mask and Enable/Disable */
+ lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
+ lp->irq_en = IMR_NIM | IMR_AIM;
+
+ /* Create a loopback packet frame for later media probing */
+ create_packet(dev, lp->frame, sizeof(lp->frame));
+
+ /* Check if the RX overflow bug needs testing for */
+ i = lp->cfrv & 0x000000fe;
+ if ((lp->chipset == DC21140) && (i == 0x20)) {
+ lp->rx_ovf = 1;
+ }
+
+ /* Initialise the SROM pointers if possible */
+ if (lp->useSROM) {
+ lp->state = INITIALISED;
+ if (srom_infoleaf_info(dev)) {
+ dma_free_coherent (gendev, lp->dma_size,
+ lp->rx_ring, lp->dma_rings);
+ return -ENXIO;
+ }
+ srom_init(dev);
+ }
+
+ lp->state = CLOSED;
+
+ /*
+ ** Check for an MII interface
+ */
+ if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
+ mii_get_phy(dev);
+ }
+
+#ifndef __sparc_v9__
+ printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
+#else
+ printk(" and requires IRQ%x (provided by %s).\n", dev->irq,
+#endif
+ ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
+ }
+
+ if (de4x5_debug & DEBUG_VERSION) {
+ printk(version);
+ }
+
+ /* The DE4X5-specific entries in the device structure. */
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, gendev);
+ dev->open = &de4x5_open;
+ dev->hard_start_xmit = &de4x5_queue_pkt;
+ dev->stop = &de4x5_close;
+ dev->get_stats = &de4x5_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &de4x5_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic fields of the device structure. */
+ if ((status = register_netdev (dev))) {
+ dma_free_coherent (gendev, lp->dma_size,
+ lp->rx_ring, lp->dma_rings);
+ return status;
+ }
+
+ /* Let the adapter sleep to save power */
+ yawn(dev, SLEEP);
+
+ return status;
+}
+
+
+static int
+de4x5_open(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ s32 omr;
+
+ /* Allocate the RX buffers */
+ for (i=0; i<lp->rxRingSize; i++) {
+ if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
+ de4x5_free_rx_buffs(dev);
+ return -EAGAIN;
+ }
+ }
+
+ /*
+ ** Wake up the adapter
+ */
+ yawn(dev, WAKEUP);
+
+ /*
+ ** Re-initialize the DE4X5...
+ */
+ status = de4x5_init(dev);
+ spin_lock_init(&lp->lock);
+ lp->state = OPEN;
+ de4x5_dbg_open(dev);
+
+ if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
+ lp->adapter_name, dev)) {
+ printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+ if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
+ lp->adapter_name, dev)) {
+ printk("\n Cannot get IRQ- reconfigure your hardware.\n");
+ disable_ast(dev);
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+ yawn(dev, SLEEP);
+ lp->state = CLOSED;
+ return -EAGAIN;
+ } else {
+ printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
+ printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
+ }
+ }
+
+ lp->interrupt = UNMASK_INTERRUPTS;
+ dev->trans_start = jiffies;
+
+ START_DE4X5;
+
+ de4x5_setup_intr(dev);
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
+ printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
+ printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
+ printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
+ printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
+ printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
+ printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
+ printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
+ }
+
+ return status;
+}
+
+/*
+** Initialize the DE4X5 operating conditions. NB: a chip problem with the
+** DC21140 requires using perfect filtering mode for that chip. Since I can't
+** see why I'd want > 14 multicast addresses, I have changed all chips to use
+** the perfect filtering mode. Keep the DMA burst length at 8: there seems
+** to be data corruption problems if it is larger (UDP errors seen from a
+** ttcp source).
+*/
+static int
+de4x5_init(struct net_device *dev)
+{
+ /* Lock out other processes whilst setting up the hardware */
+ netif_stop_queue(dev);
+
+ de4x5_sw_reset(dev);
+
+ /* Autoconfigure the connected port */
+ autoconf_media(dev);
+
+ return 0;
+}
+
+static int
+de4x5_sw_reset(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 bmr, omr;
+
+ /* Select the MII or SRL port now and RESET the MAC */
+ if (!lp->useSROM) {
+ if (lp->phy[lp->active].id != 0) {
+ lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
+ } else {
+ lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
+ }
+ de4x5_switch_mac_port(dev);
+ }
+
+ /*
+ ** Set the programmable burst length to 8 longwords for all the DC21140
+ ** Fasternet chips and 4 longwords for all others: DMA errors result
+ ** without these values. Cache align 16 long.
+ */
+ bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
+ bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
+ outl(bmr, DE4X5_BMR);
+
+ omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
+ if (lp->chipset == DC21140) {
+ omr |= (OMR_SDP | OMR_SB);
+ }
+ lp->setup_f = PERFECT;
+ outl(lp->dma_rings, DE4X5_RRBA);
+ outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
+ DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+
+ /* Build the setup frame depending on filtering mode */
+ SetMulticastFilter(dev);
+
+ load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
+ outl(omr|OMR_ST, DE4X5_OMR);
+
+ /* Poll for setup frame completion (adapter interrupts are disabled now) */
+
+ for (j=0, i=0;(i<500) && (j==0);i++) { /* Upto 500ms delay */
+ mdelay(1);
+ if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
+ }
+ outl(omr, DE4X5_OMR); /* Stop everything! */
+
+ if (j == 0) {
+ printk("%s: Setup frame timed out, status %08x\n", dev->name,
+ inl(DE4X5_STS));
+ status = -EIO;
+ }
+
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_old = lp->tx_new;
+
+ return status;
+}
+
+/*
+** Writes a socket buffer address to the next available transmit descriptor.
+*/
+static int
+de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int status = 0;
+ u_long flags = 0;
+
+ netif_stop_queue(dev);
+ if (lp->tx_enable == NO) { /* Cannot send for now */
+ return -1;
+ }
+
+ /*
+ ** Clean out the TX ring asynchronously to interrupts - sometimes the
+ ** interrupts are lost by delayed descriptor status updates relative to
+ ** the irq assertion, especially with a busy PCI bus.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+ de4x5_tx(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Test if cache is already locked - requeue skb if so */
+ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
+ return -1;
+
+ /* Transmit descriptor ring full or stale skb */
+ if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
+ if (lp->interrupt) {
+ de4x5_putb_cache(dev, skb); /* Requeue the buffer */
+ } else {
+ de4x5_put_cache(dev, skb);
+ }
+ if (de4x5_debug & DEBUG_TX) {
+ printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
+ }
+ } else if (skb->len > 0) {
+ /* If we already have stuff queued locally, use that first */
+ if (lp->cache.skb && !lp->interrupt) {
+ de4x5_put_cache(dev, skb);
+ skb = de4x5_get_cache(dev);
+ }
+
+ while (skb && !netif_queue_stopped(dev) &&
+ (u_long) lp->tx_skb[lp->tx_new] <= 1) {
+ spin_lock_irqsave(&lp->lock, flags);
+ netif_stop_queue(dev);
+ load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
+ lp->stats.tx_bytes += skb->len;
+ outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
+
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ dev->trans_start = jiffies;
+
+ if (TX_BUFFS_AVAIL) {
+ netif_start_queue(dev); /* Another pkt may be queued */
+ }
+ skb = de4x5_get_cache(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ if (skb) de4x5_putb_cache(dev, skb);
+ }
+
+ lp->cache.lock = 0;
+
+ return status;
+}
+
+/*
+** The DE4X5 interrupt handler.
+**
+** I/O Read/Writes through intermediate PCI bridges are never 'posted',
+** so that the asserted interrupt always has some real data to work with -
+** if these I/O accesses are ever changed to memory accesses, ensure the
+** STS write is read immediately to complete the transaction if the adapter
+** is not on bus 0. Lost interrupts can still occur when the PCI bus load
+** is high and descriptor status bits cannot be set before the associated
+** interrupt is asserted and this routine entered.
+*/
+static irqreturn_t
+de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct de4x5_private *lp;
+ s32 imr, omr, sts, limit;
+ u_long iobase;
+ unsigned int handled = 0;
+
+ if (dev == NULL) {
+ printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+ lp = netdev_priv(dev);
+ spin_lock(&lp->lock);
+ iobase = dev->base_addr;
+
+ DISABLE_IRQs; /* Ensure non re-entrancy */
+
+ if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ synchronize_irq(dev->irq);
+
+ for (limit=0; limit<8; limit++) {
+ sts = inl(DE4X5_STS); /* Read IRQ status */
+ outl(sts, DE4X5_STS); /* Reset the board interrupts */
+
+ if (!(sts & lp->irq_mask)) break;/* All done */
+ handled = 1;
+
+ if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
+ de4x5_rx(dev);
+
+ if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
+ de4x5_tx(dev);
+
+ if (sts & STS_LNF) { /* TP Link has failed */
+ lp->irq_mask &= ~IMR_LFM;
+ }
+
+ if (sts & STS_UNF) { /* Transmit underrun */
+ de4x5_txur(dev);
+ }
+
+ if (sts & STS_SE) { /* Bus Error */
+ STOP_DE4X5;
+ printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
+ dev->name, sts);
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* Load the TX ring with any locally stored packets */
+ if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
+ while (lp->cache.skb && !netif_queue_stopped(dev) && lp->tx_enable) {
+ de4x5_queue_pkt(de4x5_get_cache(dev), dev);
+ }
+ lp->cache.lock = 0;
+ }
+
+ lp->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+ spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int
+de4x5_rx(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
+ entry=lp->rx_new) {
+ status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
+
+ if (lp->rx_ovf) {
+ if (inl(DE4X5_MFC) & MFC_FOCM) {
+ de4x5_rx_ovfc(dev);
+ break;
+ }
+ }
+
+ if (status & RD_FS) { /* Remember the start of frame */
+ lp->rx_old = entry;
+ }
+
+ if (status & RD_LS) { /* Valid frame status */
+ if (lp->tx_enable) lp->linkOK++;
+ if (status & RD_ES) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
+ if (status & RD_CE) lp->stats.rx_crc_errors++;
+ if (status & RD_OF) lp->stats.rx_fifo_errors++;
+ if (status & RD_TL) lp->stats.rx_length_errors++;
+ if (status & RD_RF) lp->pktStats.rx_runt_frames++;
+ if (status & RD_CS) lp->pktStats.rx_collision++;
+ if (status & RD_DB) lp->pktStats.rx_dribble++;
+ if (status & RD_OF) lp->pktStats.rx_overflow++;
+ } else { /* A valid frame received */
+ struct sk_buff *skb;
+ short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
+ >> 16) - 4;
+
+ if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
+ printk("%s: Insufficient memory; nuking packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ } else {
+ de4x5_dbg_rx(skb, pkt_len);
+
+ /* Push up the protocol stack */
+ skb->protocol=eth_type_trans(skb,dev);
+ de4x5_local_stats(dev, skb->data, pkt_len);
+ netif_rx(skb);
+
+ /* Update stats */
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ }
+
+ /* Change buffer ownership for this frame, back to the adapter */
+ for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
+ lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+ lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
+ }
+
+ return 0;
+}
+
+static inline void
+de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
+{
+ dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
+ le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
+ DMA_TO_DEVICE);
+ if ((u_long) lp->tx_skb[entry] > 1)
+ dev_kfree_skb_irq(lp->tx_skb[entry]);
+ lp->tx_skb[entry] = NULL;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+de4x5_tx(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
+ if (status < 0) { /* Buffer not sent yet */
+ break;
+ } else if (status != 0x7fffffff) { /* Not setup frame */
+ if (status & TD_ES) { /* An error happened */
+ lp->stats.tx_errors++;
+ if (status & TD_NC) lp->stats.tx_carrier_errors++;
+ if (status & TD_LC) lp->stats.tx_window_errors++;
+ if (status & TD_UF) lp->stats.tx_fifo_errors++;
+ if (status & TD_EC) lp->pktStats.excessive_collisions++;
+ if (status & TD_DE) lp->stats.tx_aborted_errors++;
+
+ if (TX_PKT_PENDING) {
+ outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
+ }
+ } else { /* Packet sent */
+ lp->stats.tx_packets++;
+ if (lp->tx_enable) lp->linkOK++;
+ }
+ /* Update the collision counter */
+ lp->stats.collisions += ((status & TD_EC) ? 16 :
+ ((status & TD_CC) >> 3));
+
+ /* Free the buffer. */
+ if (lp->tx_skb[entry] != NULL)
+ de4x5_free_tx_buff(lp, entry);
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (++lp->tx_old) % lp->txRingSize;
+ }
+
+ /* Any resources available? */
+ if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
+ if (lp->interrupt)
+ netif_wake_queue(dev);
+ else
+ netif_start_queue(dev);
+ }
+
+ return 0;
+}
+
+static int
+de4x5_ast(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ disable_ast(dev);
+
+ if (lp->useSROM) {
+ next_tick = srom_autoconf(dev);
+ } else if (lp->chipset == DC21140) {
+ next_tick = dc21140m_autoconf(dev);
+ } else if (lp->chipset == DC21041) {
+ next_tick = dc21041_autoconf(dev);
+ } else if (lp->chipset == DC21040) {
+ next_tick = dc21040_autoconf(dev);
+ }
+ lp->linkOK = 0;
+ enable_ast(dev, next_tick);
+
+ return 0;
+}
+
+static int
+de4x5_txur(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
+ omr &= ~(OMR_ST|OMR_SR);
+ outl(omr, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_TS);
+ if ((omr & OMR_TR) < OMR_TR) {
+ omr += 0x4000;
+ } else {
+ omr |= OMR_SF;
+ }
+ outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
+ }
+
+ return 0;
+}
+
+static int
+de4x5_rx_ovfc(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ outl(omr & ~OMR_SR, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_RS);
+
+ for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
+ lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
+ lp->rx_new = (++lp->rx_new % lp->rxRingSize);
+ }
+
+ outl(omr, DE4X5_OMR);
+
+ return 0;
+}
+
+static int
+de4x5_close(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 imr, omr;
+
+ disable_ast(dev);
+
+ netif_stop_queue(dev);
+
+ if (de4x5_debug & DEBUG_CLOSE) {
+ printk("%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, inl(DE4X5_STS));
+ }
+
+ /*
+ ** We stop the DE4X5 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+ STOP_DE4X5;
+
+ /* Free the associated irq */
+ free_irq(dev->irq, dev);
+ lp->state = CLOSED;
+
+ /* Free any socket buffers */
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+
+ /* Put the adapter to sleep to save power */
+ yawn(dev, SLEEP);
+
+ return 0;
+}
+
+static struct net_device_stats *
+de4x5_get_stats(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
+
+ return &lp->stats;
+}
+
+static void
+de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DE4X5_PKT_STAT_SZ;
+ }
+ }
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+
+ return;
+}
+
+/*
+** Removes the TD_IC flag from previous descriptor to improve TX performance.
+** If the flag is changed on a descriptor that is being read by the hardware,
+** I assume PCI transaction ordering will mean you are either successful or
+** just miss asserting the change to the hardware. Anyway you're messing with
+** a descriptor you don't own, but this shouldn't kill the chip provided
+** the descriptor register is read only to the hardware.
+*/
+static void
+load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
+ dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
+
+ lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
+ lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
+ lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
+ lp->tx_skb[lp->tx_new] = skb;
+ lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
+ barrier();
+
+ lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
+ barrier();
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ /* First, double check that the adapter is open */
+ if (lp->state == OPEN) {
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ u32 omr;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ } else {
+ SetMulticastFilter(dev);
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, (struct sk_buff *)1);
+
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->trans_start = jiffies;
+ }
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+*/
+static void
+SetMulticastFilter(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct dev_mc_list *dmi=dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i, j, bit, byte;
+ u16 hashcode;
+ u32 omr, crc;
+ char *pa;
+ unsigned char *addrs;
+
+ omr = inl(DE4X5_OMR);
+ omr &= ~(OMR_PR | OMR_PM);
+ pa = build_setup_frame(dev, ALL); /* Build the basic frame */
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ omr |= OMR_PM; /* Pass all multicasts */
+ } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = ether_crc_le(ETH_ALEN, addrs);
+ hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
+
+ byte <<= 1; /* calc offset into setup frame */
+ if (byte & 0x02) {
+ byte -= 1;
+ }
+ lp->setup_frame[byte] |= bit;
+ }
+ }
+ } else { /* Perfect filtering */
+ for (j=0; j<dev->mc_count; j++) {
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ for (i=0; i<ETH_ALEN; i++) {
+ *(pa + (i&1)) = *addrs++;
+ if (i & 0x01) pa += 4;
+ }
+ }
+ }
+ outl(omr, DE4X5_OMR);
+
+ return;
+}
+
+#ifdef CONFIG_EISA
+
+static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
+
+static int __init de4x5_eisa_probe (struct device *gendev)
+{
+ struct eisa_device *edev;
+ u_long iobase;
+ u_char irq, regval;
+ u_short vendor;
+ u32 cfid;
+ int status, device;
+ struct net_device *dev;
+ struct de4x5_private *lp;
+
+ edev = to_eisa_device (gendev);
+ iobase = edev->base_addr;
+
+ if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
+ return -EBUSY;
+
+ if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
+ DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
+ status = -EBUSY;
+ goto release_reg_1;
+ }
+
+ if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
+ status = -ENOMEM;
+ goto release_reg_2;
+ }
+ lp = netdev_priv(dev);
+
+ cfid = (u32) inl(PCI_CFID);
+ lp->cfrv = (u_short) inl(PCI_CFRV);
+ device = (cfid >> 8) & 0x00ffff00;
+ vendor = (u_short) cfid;
+
+ /* Read the EISA Configuration Registers */
+ regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
+#ifdef CONFIG_ALPHA
+ /* Looks like the Jensen firmware (rev 2.2) doesn't really
+ * care about the EISA configuration, and thus doesn't
+ * configure the PLX bridge properly. Oh well... Simply mimic
+ * the EISA config file to sort it out. */
+
+ /* EISA REG1: Assert DecChip 21040 HW Reset */
+ outb (ER1_IAM | 1, EISA_REG1);
+ mdelay (1);
+
+ /* EISA REG1: Deassert DecChip 21040 HW Reset */
+ outb (ER1_IAM, EISA_REG1);
+ mdelay (1);
+
+ /* EISA REG3: R/W Burst Transfer Enable */
+ outb (ER3_BWE | ER3_BRE, EISA_REG3);
+
+ /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
+ outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
+#endif
+ irq = de4x5_irq[(regval >> 1) & 0x03];
+
+ if (is_DC2114x) {
+ device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
+ }
+ lp->chipset = device;
+ lp->bus = EISA;
+
+ /* Write the PCI Configuration Registers */
+ outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
+ outl(0x00006000, PCI_CFLT);
+ outl(iobase, PCI_CBIO);
+
+ DevicePresent(dev, EISA_APROM);
+
+ dev->irq = irq;
+
+ if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
+ return 0;
+ }
+
+ free_netdev (dev);
+ release_reg_2:
+ release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
+ release_reg_1:
+ release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
+
+ return status;
+}
+
+static int __devexit de4x5_eisa_remove (struct device *device)
+{
+ struct net_device *dev;
+ u_long iobase;
+
+ dev = device->driver_data;
+ iobase = dev->base_addr;
+
+ unregister_netdev (dev);
+ free_netdev (dev);
+ release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
+ release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
+
+ return 0;
+}
+
+static struct eisa_device_id de4x5_eisa_ids[] = {
+ { "DEC4250", 0 }, /* 0 is the board name index... */
+ { "" }
+};
+
+static struct eisa_driver de4x5_eisa_driver = {
+ .id_table = de4x5_eisa_ids,
+ .driver = {
+ .name = "de4x5",
+ .probe = de4x5_eisa_probe,
+ .remove = __devexit_p (de4x5_eisa_remove),
+ }
+};
+MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
+#endif
+
+#ifdef CONFIG_PCI
+
+/*
+** This function searches the current bus (which is >0) for a DECchip with an
+** SROM, so that in multiport cards that have one SROM shared between multiple
+** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
+** For single port cards this is a time waster...
+*/
+static void __devinit
+srom_search(struct net_device *dev, struct pci_dev *pdev)
+{
+ u_char pb;
+ u_short vendor, status;
+ u_int irq = 0, device;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ int i, j, cfrv;
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct list_head *walk = &pdev->bus_list;
+
+ for (walk = walk->next; walk != &pdev->bus_list; walk = walk->next) {
+ struct pci_dev *this_dev = pci_dev_b(walk);
+
+ /* Skip the pci_bus list entry */
+ if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+
+ vendor = this_dev->vendor;
+ device = this_dev->device << 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
+
+ /* Get the chip configuration revision register */
+ pb = this_dev->bus->number;
+ pci_read_config_dword(this_dev, PCI_REVISION_ID, &cfrv);
+
+ /* Set the device number information */
+ lp->device = PCI_SLOT(this_dev->devfn);
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) {
+ device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
+ }
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+ iobase = pci_resource_start(this_dev, 0);
+
+ /* Fetch the IRQ to be used */
+ irq = this_dev->irq;
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
+
+ /* Check if I/O accesses are enabled */
+ pci_read_config_word(this_dev, PCI_COMMAND, &status);
+ if (!(status & PCI_COMMAND_IO)) continue;
+
+ /* Search for a valid SROM attached to this DECchip */
+ DevicePresent(dev, DE4X5_APROM);
+ for (j=0, i=0; i<ETH_ALEN; i++) {
+ j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ if ((j != 0) && (j != 0x5fa)) {
+ last.chipset = device;
+ last.bus = pb;
+ last.irq = irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ return;
+ }
+ }
+
+ return;
+}
+
+/*
+** PCI bus I/O device probe
+** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
+** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
+** enabled by the user first in the set up utility. Hence we just check for
+** enabled features and silently ignore the card if they're not.
+**
+** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
+** bit. Here, check for I/O accesses and then set BM. If you put the card in
+** a non BM slot, you're on your own (and complain to the PC vendor that your
+** PC doesn't conform to the PCI standard)!
+**
+** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
+** kernels use the V0.535[n] drivers.
+*/
+
+static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u_char pb, pbus = 0, dev_num, dnum = 0, timer;
+ u_short vendor, status;
+ u_int irq = 0, device;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ int error;
+ struct net_device *dev;
+ struct de4x5_private *lp;
+
+ dev_num = PCI_SLOT(pdev->devfn);
+ pb = pdev->bus->number;
+
+ if (io) { /* probe a single PCI device */
+ pbus = (u_short)(io >> 8);
+ dnum = (u_short)(io & 0xff);
+ if ((pbus != pb) || (dnum != dev_num))
+ return -ENODEV;
+ }
+
+ vendor = pdev->vendor;
+ device = pdev->device << 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
+ return -ENODEV;
+
+ /* Ok, the device seems to be for us. */
+ if ((error = pci_enable_device (pdev)))
+ return error;
+
+ if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
+ error = -ENOMEM;
+ goto disable_dev;
+ }
+
+ lp = netdev_priv(dev);
+ lp->bus = PCI;
+ lp->bus_num = 0;
+
+ /* Search for an SROM on this bus */
+ if (lp->bus_num != pb) {
+ lp->bus_num = pb;
+ srom_search(dev, pdev);
+ }
+
+ /* Get the chip configuration revision register */
+ pci_read_config_dword(pdev, PCI_REVISION_ID, &lp->cfrv);
+
+ /* Set the device number information */
+ lp->device = dev_num;
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) {
+ device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
+ }
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+ iobase = pci_resource_start(pdev, 0);
+
+ /* Fetch the IRQ to be used */
+ irq = pdev->irq;
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
+ error = -ENODEV;
+ goto free_dev;
+ }
+
+ /* Check if I/O accesses and Bus Mastering are enabled */
+ pci_read_config_word(pdev, PCI_COMMAND, &status);
+#ifdef __powerpc__
+ if (!(status & PCI_COMMAND_IO)) {
+ status |= PCI_COMMAND_IO;
+ pci_write_config_word(pdev, PCI_COMMAND, status);
+ pci_read_config_word(pdev, PCI_COMMAND, &status);
+ }
+#endif /* __powerpc__ */
+ if (!(status & PCI_COMMAND_IO)) {
+ error = -ENODEV;
+ goto free_dev;
+ }
+
+ if (!(status & PCI_COMMAND_MASTER)) {
+ status |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, status);
+ pci_read_config_word(pdev, PCI_COMMAND, &status);
+ }
+ if (!(status & PCI_COMMAND_MASTER)) {
+ error = -ENODEV;
+ goto free_dev;
+ }
+
+ /* Check the latency timer for values >= 0x60 */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
+ if (timer < 0x60) {
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
+ }
+
+ DevicePresent(dev, DE4X5_APROM);
+
+ if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
+ error = -EBUSY;
+ goto free_dev;
+ }
+
+ dev->irq = irq;
+
+ if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
+ goto release;
+ }
+
+ return 0;
+
+ release:
+ release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
+ free_dev:
+ free_netdev (dev);
+ disable_dev:
+ pci_disable_device (pdev);
+ return error;
+}
+
+static void __devexit de4x5_pci_remove (struct pci_dev *pdev)
+{
+ struct net_device *dev;
+ u_long iobase;
+
+ dev = pdev->dev.driver_data;
+ iobase = dev->base_addr;
+
+ unregister_netdev (dev);
+ free_netdev (dev);
+ release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
+ pci_disable_device (pdev);
+}
+
+static struct pci_device_id de4x5_pci_tbl[] = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { },
+};
+
+static struct pci_driver de4x5_pci_driver = {
+ .name = "de4x5",
+ .id_table = de4x5_pci_tbl,
+ .probe = de4x5_pci_probe,
+ .remove = __devexit_p (de4x5_pci_remove),
+};
+
+#endif
+
+/*
+** Auto configure the media here rather than setting the port at compile
+** time. This routine is called by de4x5_init() and when a loss of media is
+** detected (excessive collisions, loss of carrier, no carrier or link fail
+** [TP] or no recent receive activity) to check whether the user has been
+** sneaky and changed the port on us.
+*/
+static int
+autoconf_media(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ lp->linkOK = 0;
+ lp->c_media = AUTO; /* Bogus last media */
+ disable_ast(dev);
+ inl(DE4X5_MFC); /* Zero the lost frames counter */
+ lp->media = INIT;
+ lp->tcount = 0;
+
+ if (lp->useSROM) {
+ next_tick = srom_autoconf(dev);
+ } else if (lp->chipset == DC21040) {
+ next_tick = dc21040_autoconf(dev);
+ } else if (lp->chipset == DC21041) {
+ next_tick = dc21041_autoconf(dev);
+ } else if (lp->chipset == DC21140) {
+ next_tick = dc21140m_autoconf(dev);
+ }
+
+ enable_ast(dev, next_tick);
+
+ return (lp->media);
+}
+
+/*
+** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
+** from BNC as the port has a jumper to set thick or thin wire. When set for
+** BNC, the BNC port will indicate activity if it's not terminated correctly.
+** The only way to test for that is to place a loopback packet onto the
+** network and watch for errors. Since we're messing with the interrupt mask
+** register, disable the board interrupts and do not allow any more packets to
+** be queued to the hardware. Re-enable everything only when the media is
+** found.
+** I may have to "age out" locally queued packets so that the higher layer
+** timeouts don't effectively duplicate packets on the network.
+*/
+static int
+dc21040_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ s32 imr;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = NO;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev);
+ if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
+ lp->media = TP;
+ } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
+ lp->media = BNC_AUI;
+ } else if (lp->autosense == EXT_SIA) {
+ lp->media = EXT_SIA;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21040_autoconf(dev);
+ break;
+
+ case TP:
+ next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
+ TP_SUSPECT, test_tp);
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
+ break;
+
+ case BNC:
+ case AUI:
+ case BNC_AUI:
+ next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
+ BNC_AUI_SUSPECT, ping_media);
+ break;
+
+ case BNC_AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
+ break;
+
+ case EXT_SIA:
+ next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
+ NC, EXT_SIA_SUSPECT, ping_media);
+ break;
+
+ case EXT_SIA_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
+ break;
+
+ case NC:
+ /* default to TP for all */
+ reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = NO;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
+ int next_state, int suspect_state,
+ int (*fn)(struct net_device *, int))
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 0:
+ reset_init_sia(dev, csr13, csr14, csr15);
+ lp->local_state++;
+ next_tick = 500;
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else {
+ if (linkBad && (lp->autosense == AUTO)) {
+ lp->local_state = 0;
+ lp->media = next_state;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = suspect_state;
+ next_tick = 3000;
+ }
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
+ int (*fn)(struct net_device *, int),
+ int (*asfn)(struct net_device *))
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 1:
+ if (lp->linkOK) {
+ lp->media = prev_state;
+ } else {
+ lp->local_state++;
+ next_tick = asfn(dev);
+ }
+ break;
+
+ case 2:
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else if (!linkBad) {
+ lp->local_state--;
+ lp->media = prev_state;
+ } else {
+ lp->media = INIT;
+ lp->tcount++;
+ }
+ }
+
+ return next_tick;
+}
+
+/*
+** Autoconfigure the media when using the DC21041. AUI needs to be tested
+** before BNC, because the BNC port will indicate activity if it's not
+** terminated correctly. The only way to test for that is to place a loopback
+** packet onto the network and watch for errors. Since we're messing with
+** the interrupt mask register, disable the board interrupts and do not allow
+** any more packets to be queued to the hardware. Re-enable everything only
+** when the media is found.
+*/
+static int
+dc21041_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 sts, irqs, irq_mask, imr, omr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = NO;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
+ lp->media = TP; /* On chip auto negotiation is broken */
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21041_autoconf(dev);
+ break;
+
+ case TP_NW:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts & STS_LNP) {
+ lp->media = ANS;
+ } else {
+ lp->media = AUI;
+ }
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ if (!lp->tx_enable) {
+ irqs = STS_LNP;
+ irq_mask = IMR_LPM;
+ sts = test_ans(dev, irqs, irq_mask, 3000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ lp->media = TP;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = ANS_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case ANS_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
+ break;
+
+ case TP:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ if (inl(DE4X5_SISR) & SISR_NRA) {
+ lp->media = AUI; /* Non selected port activity */
+ } else {
+ lp->media = BNC;
+ }
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = TP_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->media = NC;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
+ break;
+
+ case NC:
+ omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = NO;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** Some autonegotiation chips are broken in that they do not return the
+** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
+** register, except at the first power up negotiation.
+*/
+static int
+dc21140m_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int ana, anlpa, cap, cr, slnk, sr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ u_long imr, omr, iobase = dev->base_addr;
+
+ switch(lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = FALSE;
+ lp->linkOK = 0;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->useSROM) {
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ return next_tick;
+ }
+ srom_exec(dev, lp->phy[lp->active].gep);
+ if (lp->infoblock_media == ANS) {
+ ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ } else {
+ lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
+ SET_10Mb;
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if ((lp->autosense == AUTO) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ } else if (lp->autosense == AUTO) {
+ lp->media = SPD_DET;
+ } else if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = NC;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc21140m_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (lp->timeout < 0) {
+ lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
+ (~gep_rd(dev) & GEP_LNP));
+ SET_100Mb_PDET;
+ }
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ next_tick = slnk & ~TIMER_CB;
+ } else {
+ if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = NC;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case _100Mb: /* Set 100Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case BNC:
+ case AUI:
+ case _10Mb: /* Set 10Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case NC:
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = FALSE;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** This routine may be merged into dc21140m_autoconf() sometime as I'm
+** changing how I figure out the media - but trying to keep it backwards
+** compatible with the de500-xa and de500-aa.
+** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
+** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
+** This routine just has to figure out whether 10Mb/s or 100Mb/s is
+** active.
+** When autonegotiation is working, the ANS part searches the SROM for
+** the highest common speed (TP) link that both can run and if that can
+** be full duplex. That infoblock is executed and then the link speed set.
+**
+** Only _10Mb and _100Mb are tested here.
+*/
+static int
+dc2114x_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = FALSE;
+ lp->linkOK = 0;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if (lp->params.autosense & ~AUTO) {
+ srom_map_media(dev); /* Fixed media requested */
+ if (lp->media != lp->params.autosense) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ lp->media = INIT;
+ }
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = SPD_DET;
+ if ((lp->infoblock_media == ANS) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc2114x_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->tcount++;
+ lp->media = INIT;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ if (lp->media == _100Mb) {
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ lp->media = SPD_DET;
+ return (slnk & ~TIMER_CB);
+ }
+ } else {
+ if (wait_for_link(dev) < 0) {
+ lp->media = SPD_DET;
+ return PDET_LINK_WAIT;
+ }
+ }
+ if (lp->media == ANS) { /* Do MII parallel detection */
+ if (is_spd_100(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = _10Mb;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
+ (((lp->media == _10Mb) || (lp->media == TP) ||
+ (lp->media == BNC) || (lp->media == AUI)) &&
+ is_10_up(dev))) {
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->tcount++;
+ lp->media = INIT;
+ }
+ break;
+
+ case _10Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case _100Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ default:
+ lp->tcount++;
+printk("Huh?: media:%02x\n", lp->media);
+ lp->media = INIT;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+srom_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ return lp->infoleaf_fn(dev);
+}
+
+/*
+** This mapping keeps the original media codes and FDX flag unchanged.
+** While it isn't strictly necessary, it helps me for the moment...
+** The early return avoids a media state / SROM media space clash.
+*/
+static int
+srom_map_media(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ lp->fdx = 0;
+ if (lp->infoblock_media == lp->media)
+ return 0;
+
+ switch(lp->infoblock_media) {
+ case SROM_10BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_10BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = TP;
+ }
+ break;
+
+ case SROM_10BASE2:
+ lp->media = BNC;
+ break;
+
+ case SROM_10BASE5:
+ lp->media = AUI;
+ break;
+
+ case SROM_100BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_100BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASET4:
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASEFF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_100BASEF:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case ANS:
+ lp->media = ANS;
+ lp->fdx = lp->params.fdx;
+ break;
+
+ default:
+ printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
+ lp->infoblock_media);
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+static void
+de4x5_init_connection(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_long flags = 0;
+
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media; /* Stop scrolling media messages */
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ de4x5_rst_desc_ring(dev);
+ de4x5_setup_intr(dev);
+ lp->tx_enable = YES;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ outl(POLL_DEMAND, DE4X5_TPD);
+
+ netif_wake_queue(dev);
+
+ return;
+}
+
+/*
+** General PHY reset function. Some MII devices don't reset correctly
+** since their MII address pins can float at voltages that are dependent
+** on the signal pin use. Do a double reset to ensure a reset.
+*/
+static int
+de4x5_reset_phy(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int next_tick = 0;
+
+ if ((lp->useSROM) || (lp->phy[lp->active].id)) {
+ if (lp->timeout < 0) {
+ if (lp->useSROM) {
+ if (lp->phy[lp->active].rst) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].rst);
+ } else if (lp->rst) { /* Type 5 infoblock reset */
+ srom_exec(dev, lp->rst);
+ srom_exec(dev, lp->rst);
+ }
+ } else {
+ PHY_HARD_RESET;
+ }
+ if (lp->useMII) {
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ }
+ if (lp->useMII) {
+ next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500);
+ }
+ } else if (lp->chipset == DC21140) {
+ PHY_HARD_RESET;
+ }
+
+ return next_tick;
+}
+
+static int
+test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 sts, csr12;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
+ reset_init_sia(dev, csr13, csr14, csr15);
+ }
+
+ /* set up the interrupt mask */
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ /* clear csr12 NRA and SRA bits */
+ if ((lp->chipset == DC21041) || lp->useSROM) {
+ csr12 = inl(DE4X5_SISR);
+ outl(csr12, DE4X5_SISR);
+ }
+ }
+
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static int
+test_tp(struct net_device *dev, s32 msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
+
+ if (sisr && --lp->timeout) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** Samples the 100Mb Link State Signal. The sample interval is important
+** because too fast a rate can give erroneous results and confuse the
+** speed sense algorithm.
+*/
+#define SAMPLE_INTERVAL 500 /* ms */
+#define SAMPLE_DELAY 2000 /* ms */
+static int
+test_for_100Mb(struct net_device *dev, int msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
+
+ if (lp->timeout < 0) {
+ if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
+ if (msec > SAMPLE_DELAY) {
+ lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
+ gep = SAMPLE_DELAY | TIMER_CB;
+ return gep;
+ } else {
+ lp->timeout = msec/SAMPLE_INTERVAL;
+ }
+ }
+
+ if (lp->phy[lp->active].id || lp->useSROM) {
+ gep = is_100_up(dev) | is_spd_100(dev);
+ } else {
+ gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
+ }
+ if (!(gep & ret) && --lp->timeout) {
+ gep = SAMPLE_INTERVAL | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return gep;
+}
+
+static int
+wait_for_link(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ if (lp->timeout < 0) {
+ lp->timeout = 1;
+ }
+
+ if (lp->timeout--) {
+ return TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return 0;
+}
+
+/*
+**
+**
+*/
+static int
+test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int test;
+ u_long iobase = dev->base_addr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ if (pol) pol = ~0;
+ reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
+ test = (reg ^ pol) & mask;
+
+ if (test && --lp->timeout) {
+ reg = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return reg;
+}
+
+static int
+is_spd_100(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int spd;
+
+ if (lp->useMII) {
+ spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
+ spd = ~(spd ^ lp->phy[lp->active].spd.value);
+ spd &= lp->phy[lp->active].spd.mask;
+ } else if (!lp->useSROM) { /* de500-xa */
+ spd = ((~gep_rd(dev)) & GEP_SLNK);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+
+ spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
+ }
+
+ return spd;
+}
+
+static int
+is_100_up(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ } else if (!lp->useSROM) { /* de500-xa */
+ return ((~gep_rd(dev)) & GEP_SLNK);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+
+ return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid));
+ }
+}
+
+static int
+is_10_up(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ } else if (!lp->useSROM) { /* de500-xa */
+ return ((~gep_rd(dev)) & GEP_LNP);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return (((lp->chipset & ~0x00ff) == DC2114x) ?
+ (~inl(DE4X5_SISR)&SISR_LS10):
+ 0);
+
+ return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid));
+ }
+}
+
+static int
+is_anc_capable(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
+ } else {
+ return 0;
+ }
+}
+
+/*
+** Send a packet onto the media and watch for send errors that indicate the
+** media is bad or unconnected.
+*/
+static int
+ping_media(struct net_device *dev, int msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+
+ lp->tmp = lp->tx_new; /* Remember the ring position */
+ load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD);
+ }
+
+ sisr = inl(DE4X5_SISR);
+
+ if ((!(sisr & SISR_NCR)) &&
+ ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
+ (--lp->timeout)) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ if ((!(sisr & SISR_NCR)) &&
+ !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
+ lp->timeout) {
+ sisr = 0;
+ } else {
+ sisr = 1;
+ }
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** This function does 2 things: on Intels it kmalloc's another buffer to
+** replace the one about to be passed up. On Alpha's it kmallocs a buffer
+** into which the packet is copied.
+*/
+static struct sk_buff *
+de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct sk_buff *p;
+
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+ struct sk_buff *ret;
+ u_long i=0, tmp;
+
+ p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
+ if (!p) return NULL;
+
+ p->dev = dev;
+ tmp = virt_to_bus(p->data);
+ i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
+ skb_reserve(p, i);
+ lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
+
+ ret = lp->rx_skb[index];
+ lp->rx_skb[index] = p;
+
+ if ((u_long) ret > 1) {
+ skb_put(ret, len);
+ }
+
+ return ret;
+
+#else
+ if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
+
+ p = dev_alloc_skb(len + 2);
+ if (!p) return NULL;
+
+ p->dev = dev;
+ skb_reserve(p, 2); /* Align */
+ if (index < lp->rx_old) { /* Wrapped buffer */
+ short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
+ memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
+ memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
+ } else { /* Linear buffer */
+ memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
+ }
+
+ return p;
+#endif
+}
+
+static void
+de4x5_free_rx_buffs(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i=0; i<lp->rxRingSize; i++) {
+ if ((u_long) lp->rx_skb[i] > 1) {
+ dev_kfree_skb(lp->rx_skb[i]);
+ }
+ lp->rx_ring[i].status = 0;
+ lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
+ }
+
+ return;
+}
+
+static void
+de4x5_free_tx_buffs(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i=0; i<lp->txRingSize; i++) {
+ if (lp->tx_skb[i])
+ de4x5_free_tx_buff(lp, i);
+ lp->tx_ring[i].status = 0;
+ }
+
+ /* Unload the locally queued packets */
+ while (lp->cache.skb) {
+ dev_kfree_skb(de4x5_get_cache(dev));
+ }
+
+ return;
+}
+
+/*
+** When a user pulls a connection, the DECchip can end up in a
+** 'running - waiting for end of transmission' state. This means that we
+** have to perform a chip soft reset to ensure that we can synchronize
+** the hardware and software and make any media probes using a loopback
+** packet meaningful.
+*/
+static void
+de4x5_save_skbs(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ if (!lp->cache.save_cnt) {
+ STOP_DE4X5;
+ de4x5_tx(dev); /* Flush any sent skb's */
+ de4x5_free_tx_buffs(dev);
+ de4x5_cache_state(dev, DE4X5_SAVE_STATE);
+ de4x5_sw_reset(dev);
+ de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
+ lp->cache.save_cnt++;
+ START_DE4X5;
+ }
+
+ return;
+}
+
+static void
+de4x5_rst_desc_ring(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i;
+ s32 omr;
+
+ if (lp->cache.save_cnt) {
+ STOP_DE4X5;
+ outl(lp->dma_rings, DE4X5_RRBA);
+ outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
+ DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+ lp->cache.save_cnt--;
+ START_DE4X5;
+ }
+
+ return;
+}
+
+static void
+de4x5_cache_state(struct net_device *dev, int flag)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ switch(flag) {
+ case DE4X5_SAVE_STATE:
+ lp->cache.csr0 = inl(DE4X5_BMR);
+ lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
+ lp->cache.csr7 = inl(DE4X5_IMR);
+ break;
+
+ case DE4X5_RESTORE_STATE:
+ outl(lp->cache.csr0, DE4X5_BMR);
+ outl(lp->cache.csr6, DE4X5_OMR);
+ outl(lp->cache.csr7, DE4X5_IMR);
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
+ lp->cache.csr15);
+ }
+ break;
+ }
+
+ return;
+}
+
+static void
+de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct sk_buff *p;
+
+ if (lp->cache.skb) {
+ for (p=lp->cache.skb; p->next; p=p->next);
+ p->next = skb;
+ } else {
+ lp->cache.skb = skb;
+ }
+ skb->next = NULL;
+
+ return;
+}
+
+static void
+de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct sk_buff *p = lp->cache.skb;
+
+ lp->cache.skb = skb;
+ skb->next = p;
+
+ return;
+}
+
+static struct sk_buff *
+de4x5_get_cache(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct sk_buff *p = lp->cache.skb;
+
+ if (p) {
+ lp->cache.skb = p->next;
+ p->next = NULL;
+ }
+
+ return p;
+}
+
+/*
+** Check the Auto Negotiation State. Return OK when a link pass interrupt
+** is received and the auto-negotiation status is NWAY OK.
+*/
+static int
+test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 sts, ans;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+ }
+
+ ans = inl(DE4X5_SISR) & SISR_ANS;
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static void
+de4x5_setup_intr(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 imr, sts;
+
+ if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
+ imr = 0;
+ UNMASK_IRQs;
+ sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
+ outl(sts, DE4X5_STS);
+ ENABLE_IRQs;
+ }
+
+ return;
+}
+
+/*
+**
+*/
+static void
+reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ RESET_SIA;
+ if (lp->useSROM) {
+ if (lp->ibn == 3) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].gep);
+ outl(1, DE4X5_SICR);
+ return;
+ } else {
+ csr15 = lp->cache.csr15;
+ csr14 = lp->cache.csr14;
+ csr13 = lp->cache.csr13;
+ outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
+ outl(csr15 | lp->cache.gep, DE4X5_SIGR);
+ }
+ } else {
+ outl(csr15, DE4X5_SIGR);
+ }
+ outl(csr14, DE4X5_STRR);
+ outl(csr13, DE4X5_SICR);
+
+ mdelay(10);
+
+ return;
+}
+
+/*
+** Create a loopback ethernet packet
+*/
+static void
+create_packet(struct net_device *dev, char *frame, int len)
+{
+ int i;
+ char *buf = frame;
+
+ for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
+ *buf++ = dev->dev_addr[i];
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
+ *buf++ = dev->dev_addr[i];
+ }
+
+ *buf++ = 0; /* Packet length (2 bytes) */
+ *buf++ = 1;
+
+ return;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int
+EISA_signature(char *name, struct device *device)
+{
+ int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
+ struct eisa_device *edev;
+
+ *name = '\0';
+ edev = to_eisa_device (device);
+ i = edev->id.driver_data;
+
+ if (i >= 0 && i < siglen) {
+ strcpy (name, de4x5_signatures[i]);
+ status = 1;
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Look for a particular board name in the PCI configuration space
+*/
+static int
+PCI_signature(char *name, struct de4x5_private *lp)
+{
+ int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
+
+ if (lp->chipset == DC21040) {
+ strcpy(name, "DE434/5");
+ return status;
+ } else { /* Search for a DEC name in the SROM */
+ int i = *((char *)&lp->srom + 19) * 3;
+ strncpy(name, (char *)&lp->srom + 26 + i, 8);
+ }
+ name[8] = '\0';
+ for (i=0; i<siglen; i++) {
+ if (strstr(name,de4x5_signatures[i])!=NULL) break;
+ }
+ if (i == siglen) {
+ if (dec_only) {
+ *name = '\0';
+ } else { /* Use chip name to avoid confusion */
+ strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
+ ((lp->chipset == DC21041) ? "DC21041" :
+ ((lp->chipset == DC21140) ? "DC21140" :
+ ((lp->chipset == DC21142) ? "DC21142" :
+ ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
+ )))))));
+ }
+ if (lp->chipset != DC21041) {
+ lp->useSROM = TRUE; /* card is not recognisably DEC */
+ }
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ lp->useSROM = TRUE;
+ }
+
+ return status;
+}
+
+/*
+** Set up the Ethernet PROM counter to the start of the Ethernet address on
+** the DC21040, else read the SROM for the other chips.
+** The SROM may not be present in a multi-MAC card, so first read the
+** MAC address and check for a bad address. If there is a bad one then exit
+** immediately with the prior srom contents intact (the h/w address will
+** be fixed up later).
+*/
+static void
+DevicePresent(struct net_device *dev, u_long aprom_addr)
+{
+ int i, j=0;
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ if (lp->chipset == DC21040) {
+ if (lp->bus == EISA) {
+ enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ } else {
+ outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ }
+ } else { /* Read new srom */
+ u_short tmp, *p = (short *)((char *)&lp->srom + SROM_HWADD);
+ for (i=0; i<(ETH_ALEN>>1); i++) {
+ tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
+ *p = le16_to_cpu(tmp);
+ j += *p++;
+ }
+ if ((j == 0) || (j == 0x2fffd)) {
+ return;
+ }
+
+ p=(short *)&lp->srom;
+ for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
+ tmp = srom_rd(aprom_addr, i);
+ *p++ = le16_to_cpu(tmp);
+ }
+ de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ }
+
+ return;
+}
+
+/*
+** Since the write on the Enet PROM register doesn't seem to reset the PROM
+** pointer correctly (at least on my DE425 EISA card), this routine should do
+** it...from depca.c.
+*/
+static void
+enet_addr_rst(u_long aprom_addr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength=0;
+ s8 data;
+ int i, j;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(aprom_addr);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** For the bad status case and no SROM, then add one to the previous
+** address. However, need to add one backwards in case we have 0xff
+** as one or more of the bytes. Only the last 3 bytes should be checked
+** as the first three are invariant - assigned to an organisation.
+*/
+static int
+get_hw_addr(struct net_device *dev)
+{
+ u_long iobase = dev->base_addr;
+ int broken, i, k, tmp, status = 0;
+ u_short j,chksum;
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ broken = de4x5_bad_srom(lp);
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1;
+ if (k > 0xffff) k-=0xffff;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_char) tmp;
+ dev->dev_addr[i++] = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_short) (tmp << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ } else if (!broken) {
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ } else if ((broken == SMC) || (broken == ACCTON)) {
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ }
+ } else {
+ k += (u_char) (tmp = inb(EISA_APROM));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ }
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum |= (u_short) (tmp << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+ } else {
+ chksum = (u_char) inb(EISA_APROM);
+ chksum |= (u_short) (inb(EISA_APROM) << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+
+ /* If possible, try to fix a broken card - SMC only so far */
+ srom_repair(dev, broken);
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+ /*
+ ** If the address starts with 00 a0, we have to bit-reverse
+ ** each byte of the address.
+ */
+ if ( (_machine & _MACH_Pmac) &&
+ (dev->dev_addr[0] == 0) &&
+ (dev->dev_addr[1] == 0xa0) )
+ {
+ for (i = 0; i < ETH_ALEN; ++i)
+ {
+ int x = dev->dev_addr[i];
+ x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
+ x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
+ dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ }
+ }
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+ /* Test for a bad enet address */
+ status = test_bad_enet(dev, status);
+
+ return status;
+}
+
+/*
+** Test for enet addresses in the first 32 bytes. The built-in strncmp
+** didn't seem to work here...?
+*/
+static int
+de4x5_bad_srom(struct de4x5_private *lp)
+{
+ int i, status = 0;
+
+ for (i=0; i<sizeof(enet_det)/ETH_ALEN; i++) {
+ if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
+ !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
+ if (i == 0) {
+ status = SMC;
+ } else if (i == 1) {
+ status = ACCTON;
+ }
+ break;
+ }
+ }
+
+ return status;
+}
+
+static int
+de4x5_strncmp(char *a, char *b, int n)
+{
+ int ret=0;
+
+ for (;n && !ret;n--) {
+ ret = *a++ - *b++;
+ }
+
+ return ret;
+}
+
+static void
+srom_repair(struct net_device *dev, int card)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ switch(card) {
+ case SMC:
+ memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
+ memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
+ memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
+ lp->useSROM = TRUE;
+ break;
+ }
+
+ return;
+}
+
+/*
+** Assume that the irq's do not follow the PCI spec - this is seems
+** to be true so far (2 for 2).
+*/
+static int
+test_bad_enet(struct net_device *dev, int status)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i, tmp;
+
+ for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
+ if ((tmp == 0) || (tmp == 0x5fa)) {
+ if ((lp->chipset == last.chipset) &&
+ (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
+ for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
+ for (i=ETH_ALEN-1; i>2; --i) {
+ dev->dev_addr[i] += 1;
+ if (dev->dev_addr[i] != 0) break;
+ }
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ if (!an_exception(lp)) {
+ dev->irq = last.irq;
+ }
+
+ status = 0;
+ }
+ } else if (!status) {
+ last.chipset = lp->chipset;
+ last.bus = lp->bus_num;
+ last.irq = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ }
+
+ return status;
+}
+
+/*
+** List of board exceptions with correctly wired IRQs
+*/
+static int
+an_exception(struct de4x5_private *lp)
+{
+ if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
+ (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+** SROM Read
+*/
+static short
+srom_rd(u_long addr, u_char offset)
+{
+ sendto_srom(SROM_RD | SROM_SR, addr);
+
+ srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
+ srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
+ srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
+
+ return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
+}
+
+static void
+srom_latch(u_int command, u_long addr)
+{
+ sendto_srom(command, addr);
+ sendto_srom(command | DT_CLK, addr);
+ sendto_srom(command, addr);
+
+ return;
+}
+
+static void
+srom_command(u_int command, u_long addr)
+{
+ srom_latch(command, addr);
+ srom_latch(command, addr);
+ srom_latch((command & 0x0000ff00) | DT_CS, addr);
+
+ return;
+}
+
+static void
+srom_address(u_int command, u_long addr, u_char offset)
+{
+ int i, a;
+
+ a = offset << 2;
+ for (i=0; i<6; i++, a <<= 1) {
+ srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
+ }
+ udelay(1);
+
+ i = (getfrom_srom(addr) >> 3) & 0x01;
+
+ return;
+}
+
+static short
+srom_data(u_int command, u_long addr)
+{
+ int i;
+ short word = 0;
+ s32 tmp;
+
+ for (i=0; i<16; i++) {
+ sendto_srom(command | DT_CLK, addr);
+ tmp = getfrom_srom(addr);
+ sendto_srom(command, addr);
+
+ word = (word << 1) | ((tmp >> 3) & 0x01);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return word;
+}
+
+/*
+static void
+srom_busy(u_int command, u_long addr)
+{
+ sendto_srom((command & 0x0000ff00) | DT_CS, addr);
+
+ while (!((getfrom_srom(addr) >> 3) & 0x01)) {
+ mdelay(1);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return;
+}
+*/
+
+static void
+sendto_srom(u_int command, u_long addr)
+{
+ outl(command, addr);
+ udelay(1);
+
+ return;
+}
+
+static int
+getfrom_srom(u_long addr)
+{
+ s32 tmp;
+
+ tmp = inl(addr);
+ udelay(1);
+
+ return tmp;
+}
+
+static int
+srom_infoleaf_info(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i, count;
+ u_char *p;
+
+ /* Find the infoleaf decoder function that matches this chipset */
+ for (i=0; i<INFOLEAF_SIZE; i++) {
+ if (lp->chipset == infoleaf_array[i].chipset) break;
+ }
+ if (i == INFOLEAF_SIZE) {
+ lp->useSROM = FALSE;
+ printk("%s: Cannot find correct chipset for SROM decoding!\n",
+ dev->name);
+ return -ENXIO;
+ }
+
+ lp->infoleaf_fn = infoleaf_array[i].fn;
+
+ /* Find the information offset that this function should use */
+ count = *((u_char *)&lp->srom + 19);
+ p = (u_char *)&lp->srom + 26;
+
+ if (count > 1) {
+ for (i=count; i; --i, p+=3) {
+ if (lp->device == *p) break;
+ }
+ if (i == 0) {
+ lp->useSROM = FALSE;
+ printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
+ dev->name, lp->device);
+ return -ENXIO;
+ }
+ }
+
+ lp->infoleaf_offset = TWIDDLE(p+1);
+
+ return 0;
+}
+
+/*
+** This routine loads any type 1 or 3 MII info into the mii device
+** struct and executes any type 5 code to reset PHY devices for this
+** controller.
+** The info for the MII devices will be valid since the index used
+** will follow the discovery process from MII address 1-31 then 0.
+*/
+static void
+srom_init(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ u_char count;
+
+ p+=2;
+ if (lp->chipset == DC21140) {
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+ gep_wr(lp->cache.gepc, dev);
+ }
+
+ /* Block count */
+ count = *p++;
+
+ /* Jump the infoblocks to find types */
+ for (;count; --count) {
+ if (*p < 128) {
+ p += COMPACT_LEN;
+ } else if (*(p+1) == 5) {
+ type5_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 4) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 3) {
+ type3_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 2) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 1) {
+ type1_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else {
+ p += ((*p & BLOCK_LEN) + 1);
+ }
+ }
+
+ return;
+}
+
+/*
+** A generic routine that writes GEP control, data and reset information
+** to the GEP register (21140) or csr15 GEP portion (2114[23]).
+*/
+static void
+srom_exec(struct net_device *dev, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char count = (p ? *p++ : 0);
+ u_short *w = (u_short *)p;
+
+ if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
+
+ if (lp->chipset != DC21140) RESET_SIA;
+
+ while (count--) {
+ gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
+ *p++ : TWIDDLE(w++)), dev);
+ mdelay(2); /* 2ms per action */
+ }
+
+ if (lp->chipset != DC21140) {
+ outl(lp->cache.csr14, DE4X5_STRR);
+ outl(lp->cache.csr13, DE4X5_SICR);
+ }
+
+ return;
+}
+
+/*
+** Basically this function is a NOP since it will never be called,
+** unless I implement the DC21041 SROM functions. There's no need
+** since the existing code will be satisfactory for all boards.
+*/
+static int
+dc21041_infoleaf(struct net_device *dev)
+{
+ return DE4X5_AUTOSENSE_MS;
+}
+
+static int
+dc21140_infoleaf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* GEP control */
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21142_infoleaf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21143_infoleaf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+/*
+** The compact infoblock is only designed for DC21140[A] chips, so
+** we'll reuse the dc21140m_autoconf function. Non MII media only.
+*/
+static int
+compact_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char flags, csr6;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+COMPACT_LEN) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
+ } else {
+ return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = COMPACT;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ lp->infoblock_media = (*p++) & COMPACT_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/*
+** This block describes non MII media for the DC21140[A] only.
+*/
+static int
+type0_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 0;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ p+=2;
+ lp->infoblock_media = (*p++) & BLOCK0_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/* These functions are under construction! */
+
+static int
+type1_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 1;
+ lp->active = *p++;
+ lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
+ lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
+ lp->phy[lp->active].mc = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ttm = TWIDDLE(p);
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 1;
+ lp->active = *p;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = TRUE;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+static int
+type2_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 2;
+ lp->active = 0;
+ p += 2;
+ lp->infoblock_media = (*p) & MEDIA_CODE;
+
+ if ((*p++) & EXT_FIELD) {
+ lp->cache.csr13 = TWIDDLE(p); p += 2;
+ lp->cache.csr14 = TWIDDLE(p); p += 2;
+ lp->cache.csr15 = TWIDDLE(p); p += 2;
+ } else {
+ lp->cache.csr13 = CSR13;
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ }
+ lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(TWIDDLE(p)) << 16);
+ lp->infoblock_csr6 = OMR_SIA;
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type3_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 3;
+ lp->active = *p++;
+ if (MOTO_SROM_BUG) lp->active = 0;
+ lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
+ lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
+ lp->phy[lp->active].mc = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ttm = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].mci = *p;
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 3;
+ lp->active = *p;
+ if (MOTO_SROM_BUG) lp->active = 0;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = TRUE;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type4_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 4;
+ lp->active = 0;
+ p+=2;
+ lp->infoblock_media = (*p++) & MEDIA_CODE;
+ lp->cache.csr13 = CSR13; /* Hard coded defaults */
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+/*
+** This block type provides information for resetting external devices
+** (chips) through the General Purpose Register.
+*/
+static int
+type5_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ /* Must be initializing to run this code */
+ if ((lp->state == INITIALISED) || (lp->media == INIT)) {
+ p+=2;
+ lp->rst = p;
+ srom_exec(dev, lp->rst);
+ }
+
+ return DE4X5_AUTOSENSE_MS;
+}
+
+/*
+** MII Read/Write
+*/
+
+static int
+mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to read */
+ mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
+
+ return mii_rdata(ioaddr); /* Read data */
+}
+
+static void
+mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to write */
+ mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
+ data = mii_swap(data, 16); /* Swap data bit ordering */
+ mii_wdata(data, 16, ioaddr); /* Write data */
+
+ return;
+}
+
+static int
+mii_rdata(u_long ioaddr)
+{
+ int i;
+ s32 tmp = 0;
+
+ for (i=0; i<16; i++) {
+ tmp <<= 1;
+ tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
+ }
+
+ return tmp;
+}
+
+static void
+mii_wdata(int data, int len, u_long ioaddr)
+{
+ int i;
+
+ for (i=0; i<len; i++) {
+ sendto_mii(MII_MWR | MII_WR, data, ioaddr);
+ data >>= 1;
+ }
+
+ return;
+}
+
+static void
+mii_address(u_char addr, u_long ioaddr)
+{
+ int i;
+
+ addr = mii_swap(addr, 5);
+ for (i=0; i<5; i++) {
+ sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
+ addr >>= 1;
+ }
+
+ return;
+}
+
+static void
+mii_ta(u_long rw, u_long ioaddr)
+{
+ if (rw == MII_STWR) {
+ sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
+ } else {
+ getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
+ }
+
+ return;
+}
+
+static int
+mii_swap(int data, int len)
+{
+ int i, tmp = 0;
+
+ for (i=0; i<len; i++) {
+ tmp <<= 1;
+ tmp |= (data & 1);
+ data >>= 1;
+ }
+
+ return tmp;
+}
+
+static void
+sendto_mii(u32 command, int data, u_long ioaddr)
+{
+ u32 j;
+
+ j = (data & 1) << 17;
+ outl(command | j, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC | j, ioaddr);
+ udelay(1);
+
+ return;
+}
+
+static int
+getfrom_mii(u32 command, u_long ioaddr)
+{
+ outl(command, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC, ioaddr);
+ udelay(1);
+
+ return ((inl(ioaddr) >> 19) & 1);
+}
+
+/*
+** Here's 3 ways to calculate the OUI from the ID registers.
+*/
+static int
+mii_get_oui(u_char phyaddr, u_long ioaddr)
+{
+/*
+ union {
+ u_short reg;
+ u_char breg[2];
+ } a;
+ int i, r2, r3, ret=0;*/
+ int r2, r3;
+
+ /* Read r2 and r3 */
+ r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
+ r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
+ /* SEEQ and Cypress way * /
+ / * Shuffle r2 and r3 * /
+ a.reg=0;
+ r3 = ((r3>>10)|(r2<<6))&0x0ff;
+ r2 = ((r2>>2)&0x3fff);
+
+ / * Bit reverse r3 * /
+ for (i=0;i<8;i++) {
+ ret<<=1;
+ ret |= (r3&1);
+ r3>>=1;
+ }
+
+ / * Bit reverse r2 * /
+ for (i=0;i<16;i++) {
+ a.reg<<=1;
+ a.reg |= (r2&1);
+ r2>>=1;
+ }
+
+ / * Swap r2 bytes * /
+ i=a.breg[0];
+ a.breg[0]=a.breg[1];
+ a.breg[1]=i;
+
+ return ((a.reg<<8)|ret); */ /* SEEQ and Cypress way */
+/* return ((r2<<6)|(u_int)(r3>>10)); */ /* NATIONAL and BROADCOM way */
+ return r2; /* (I did it) My way */
+}
+
+/*
+** The SROM spec forces us to search addresses [1-31 0]. Bummer.
+*/
+static int
+mii_get_phy(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table);
+ int id;
+
+ lp->active = 0;
+ lp->useMII = TRUE;
+
+ /* Search the MII address space for possible PHY devices */
+ for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
+ lp->phy[lp->active].addr = i;
+ if (i==0) n++; /* Count cycles */
+ while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
+ id = mii_get_oui(i, DE4X5_MII);
+ if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
+ for (j=0; j<limit; j++) { /* Search PHY table */
+ if (id != phy_info[j].id) continue; /* ID match? */
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
+ if (k < DE4X5_MAX_PHY) {
+ memcpy((char *)&lp->phy[k],
+ (char *)&phy_info[j], sizeof(struct phy_table));
+ lp->phy[k].addr = i;
+ lp->mii_cnt++;
+ lp->active++;
+ } else {
+ goto purgatory; /* Stop the search */
+ }
+ break;
+ }
+ if ((j == limit) && (i < DE4X5_MAX_MII)) {
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
+ lp->phy[k].addr = i;
+ lp->phy[k].id = id;
+ lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
+ lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
+ lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
+ lp->mii_cnt++;
+ lp->active++;
+ printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name);
+ j = de4x5_debug;
+ de4x5_debug |= DEBUG_MII;
+ de4x5_dbg_mii(dev, k);
+ de4x5_debug = j;
+ printk("\n");
+ }
+ }
+ purgatory:
+ lp->active = 0;
+ if (lp->phy[0].id) { /* Reset the PHY devices */
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
+ while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
+
+ de4x5_dbg_mii(dev, k);
+ }
+ }
+ if (!lp->mii_cnt) lp->useMII = FALSE;
+
+ return lp->mii_cnt;
+}
+
+static char *
+build_setup_frame(struct net_device *dev, int mode)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+ char *pa = lp->setup_frame;
+
+ /* Initialise the setup frame */
+ if (mode == ALL) {
+ memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
+ }
+
+ if (lp->setup_f == HASH_PERF) {
+ for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
+ *(pa + i) = dev->dev_addr[i]; /* Host address */
+ if (i & 0x01) pa += 2;
+ }
+ *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
+ } else {
+ for (i=0; i<ETH_ALEN; i++) { /* Host address */
+ *(pa + (i&1)) = dev->dev_addr[i];
+ if (i & 0x01) pa += 4;
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
+ *(pa + (i&1)) = (char) 0xff;
+ if (i & 0x01) pa += 4;
+ }
+ }
+
+ return pa; /* Points to the next entry */
+}
+
+static void
+enable_ast(struct net_device *dev, u32 time_out)
+{
+ timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
+
+ return;
+}
+
+static void
+disable_ast(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ del_timer(&lp->timer);
+
+ return;
+}
+
+static long
+de4x5_switch_mac_port(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ STOP_DE4X5;
+
+ /* Assert the OMR_PS bit in CSR6 */
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
+ OMR_FDX));
+ omr |= lp->infoblock_csr6;
+ if (omr & OMR_PS) omr |= OMR_HBD;
+ outl(omr, DE4X5_OMR);
+
+ /* Soft Reset */
+ RESET_DE4X5;
+
+ /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else if ((lp->chipset & ~0x0ff) == DC2114x) {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
+ }
+
+ /* Restore CSR6 */
+ outl(omr, DE4X5_OMR);
+
+ /* Reset CSR8 */
+ inl(DE4X5_MFC);
+
+ return omr;
+}
+
+static void
+gep_wr(s32 data, struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ outl(data, DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
+ }
+
+ return;
+}
+
+static int
+gep_rd(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ return inl(DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return (inl(DE4X5_SIGR) & 0x000fffff);
+ }
+
+ return 0;
+}
+
+static void
+timeout(struct net_device *dev, void (*fn)(u_long data), u_long data, u_long msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int dt;
+
+ /* First, cancel any pending timer events */
+ del_timer(&lp->timer);
+
+ /* Convert msec to ticks */
+ dt = (msec * HZ) / 1000;
+ if (dt==0) dt=1;
+
+ /* Set up timer */
+ init_timer(&lp->timer);
+ lp->timer.expires = jiffies + dt;
+ lp->timer.function = fn;
+ lp->timer.data = data;
+ add_timer(&lp->timer);
+
+ return;
+}
+
+static void
+yawn(struct net_device *dev, int state)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
+
+ if(lp->bus == EISA) {
+ switch(state) {
+ case WAKEUP:
+ outb(WAKEUP, PCI_CFPM);
+ mdelay(10);
+ break;
+
+ case SNOOZE:
+ outb(SNOOZE, PCI_CFPM);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ outb(SLEEP, PCI_CFPM);
+ break;
+ }
+ } else {
+ struct pci_dev *pdev = to_pci_dev (lp->gendev);
+ switch(state) {
+ case WAKEUP:
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
+ mdelay(10);
+ break;
+
+ case SNOOZE:
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
+ break;
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_parse_params(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ char *p, *q, t;
+
+ lp->params.fdx = 0;
+ lp->params.autosense = AUTO;
+
+ if (args == NULL) return;
+
+ if ((p = strstr(args, dev->name))) {
+ if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
+ t = *q;
+ *q = '\0';
+
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+
+ if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
+ if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "TP_NW")) {
+ lp->params.autosense = TP_NW;
+ } else if (strstr(p, "BNC")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "AUI")) {
+ lp->params.autosense = AUI;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "10Mb")) {
+ lp->params.autosense = _10Mb;
+ } else if (strstr(p, "100Mb")) {
+ lp->params.autosense = _100Mb;
+ } else if (strstr(p, "AUTO")) {
+ lp->params.autosense = AUTO;
+ }
+ }
+ *q = t;
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_open(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
+ printk("\tphysical address: ");
+ for (i=0;i<6;i++) {
+ printk("%2.2x:",(short)dev->dev_addr[i]);
+ }
+ printk("\n");
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
+ printk("Descriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
+ printk("Ring size: \nRX: %d\nTX: %d\n",
+ (short)lp->rxRingSize,
+ (short)lp->txRingSize);
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_mii(struct net_device *dev, int k)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (de4x5_debug & DEBUG_MII) {
+ printk("\nMII device address: %d\n", lp->phy[k].addr);
+ printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
+ }
+ printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
+ printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
+ } else {
+ printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_media(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ if (lp->media != lp->c_media) {
+ if (de4x5_debug & DEBUG_MEDIA) {
+ printk("%s: media is %s%s\n", dev->name,
+ (lp->media == NC ? "unconnected, link down or incompatible connection" :
+ (lp->media == TP ? "TP" :
+ (lp->media == ANS ? "TP/Nway" :
+ (lp->media == BNC ? "BNC" :
+ (lp->media == AUI ? "AUI" :
+ (lp->media == BNC_AUI ? "BNC/AUI" :
+ (lp->media == EXT_SIA ? "EXT SIA" :
+ (lp->media == _100Mb ? "100Mb/s" :
+ (lp->media == _10Mb ? "10Mb/s" :
+ "???"
+ ))))))))), (lp->fdx?" full duplex.":"."));
+ }
+ lp->c_media = lp->media;
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_srom(struct de4x5_srom *p)
+{
+ int i;
+
+ if (de4x5_debug & DEBUG_SROM) {
+ printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
+ printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
+ printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
+ printk("SROM version: %02x\n", (u_char)(p->version));
+ printk("# controllers: %02x\n", (u_char)(p->num_controllers));
+
+ printk("Hardware Address: ");
+ for (i=0;i<ETH_ALEN-1;i++) {
+ printk("%02x:", (u_char)*(p->ieee_addr+i));
+ }
+ printk("%02x\n", (u_char)*(p->ieee_addr+i));
+ printk("CRC checksum: %04x\n", (u_short)(p->chksum));
+ for (i=0; i<64; i++) {
+ printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_rx(struct sk_buff *skb, int len)
+{
+ int i, j;
+
+ if (de4x5_debug & DEBUG_RX) {
+ printk("R: %02x:%02x:%02x:%02x:%02x:%02x <- %02x:%02x:%02x:%02x:%02x:%02x len/SAP:%02x%02x [%d]\n",
+ (u_char)skb->data[0],
+ (u_char)skb->data[1],
+ (u_char)skb->data[2],
+ (u_char)skb->data[3],
+ (u_char)skb->data[4],
+ (u_char)skb->data[5],
+ (u_char)skb->data[6],
+ (u_char)skb->data[7],
+ (u_char)skb->data[8],
+ (u_char)skb->data[9],
+ (u_char)skb->data[10],
+ (u_char)skb->data[11],
+ (u_char)skb->data[12],
+ (u_char)skb->data[13],
+ len);
+ for (j=0; len>0;j+=16, len-=16) {
+ printk(" %03x: ",j);
+ for (i=0; i<16 && i<len; i++) {
+ printk("%02x ",(u_char)skb->data[i+j]);
+ }
+ printk("\n");
+ }
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases. In the normal course of events
+** this function is only used for my testing.
+*/
+static int
+de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 omr;
+ union {
+ u8 addr[144];
+ u16 sval[72];
+ u32 lval[36];
+ } tmp;
+ u_long flags = 0;
+
+ switch(ioc->cmd) {
+ case DE4X5_GET_HWADDR: /* Get the hardware address */
+ ioc->len = ETH_ALEN;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
+ if (netif_queue_stopped(dev))
+ return -EBUSY;
+ netif_stop_queue(dev);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ build_setup_frame(dev, PHYS_ADDR_ONLY);
+ /* Set up the descriptor and give ownership to the card */
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, (struct sk_buff *)1);
+ lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ netif_wake_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DE4X5_SET_PROM: /* Set Promiscuous Mode */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ dev->flags |= IFF_PROMISC;
+ break;
+
+ case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ omr = inl(DE4X5_OMR);
+ omr &= ~OMR_PR;
+ outl(omr, DE4X5_OMR);
+ dev->flags &= ~IFF_PROMISC;
+ break;
+
+ case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ printk("%s: Boo!\n", dev->name);
+ break;
+
+ case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PM;
+ outl(omr, DE4X5_OMR);
+ break;
+
+ case DE4X5_GET_STATS: /* Get the driver statistics */
+ {
+ struct pkt_stats statbuf;
+ ioc->len = sizeof(statbuf);
+ spin_lock_irqsave(&lp->lock, flags);
+ memcpy(&statbuf, &lp->pktStats, ioc->len);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (copy_to_user(ioc->data, &statbuf, ioc->len))
+ return -EFAULT;
+ break;
+ }
+ case DE4X5_CLR_STATS: /* Zero out the driver statistics */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ spin_lock_irqsave(&lp->lock, flags);
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case DE4X5_GET_OMR: /* Get the OMR Register contents */
+ tmp.addr[0] = inl(DE4X5_OMR);
+ if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
+ break;
+
+ case DE4X5_SET_OMR: /* Set the OMR Register contents */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
+ outl(tmp.addr[0], DE4X5_OMR);
+ break;
+
+ case DE4X5_GET_REG: /* Get the DE4X5 Registers */
+ j = 0;
+ tmp.lval[0] = inl(DE4X5_STS); j+=4;
+ tmp.lval[1] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[3] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[4] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[5] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[6] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
+ ioc->len = j;
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
+/*
+ case DE4X5_DUMP:
+ j = 0;
+ tmp.addr[j++] = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[j++] = dev->dev_addr[i];
+ }
+ tmp.addr[j++] = lp->rxRingSize;
+ tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
+ tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+
+ for (i=0;i<lp->rxRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
+ }
+ for (i=0;i<lp->txRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
+ }
+
+ tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[j>>2] = lp->chipset; j+=4;
+ if (lp->chipset == DC21140) {
+ tmp.lval[j>>2] = gep_rd(dev); j+=4;
+ } else {
+ tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
+ }
+ tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ tmp.lval[j>>2] = lp->active; j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ } else {
+ tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ }
+
+ tmp.addr[j++] = lp->txRingSize;
+ tmp.addr[j++] = netif_queue_stopped(dev);
+
+ ioc->len = j;
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+*/
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+static int __init de4x5_module_init (void)
+{
+ int err = 0;
+
+#ifdef CONFIG_PCI
+ err = pci_module_init (&de4x5_pci_driver);
+#endif
+#ifdef CONFIG_EISA
+ err |= eisa_driver_register (&de4x5_eisa_driver);
+#endif
+
+ return err;
+}
+
+static void __exit de4x5_module_exit (void)
+{
+#ifdef CONFIG_PCI
+ pci_unregister_driver (&de4x5_pci_driver);
+#endif
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&de4x5_eisa_driver);
+#endif
+}
+
+module_init (de4x5_module_init);
+module_exit (de4x5_module_exit);
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
new file mode 100644
index 000000000000..ad37a4074302
--- /dev/null
+++ b/drivers/net/tulip/de4x5.h
@@ -0,0 +1,1029 @@
+/*
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** DC21040 CSR<1..15> Register Address Map
+*/
+#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
+#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
+#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
+#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
+#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
+#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
+#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
+#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
+#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
+#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
+#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
+#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
+#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
+#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
+#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
+#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
+#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
+#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
+#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
+#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
+#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
+
+/*
+** EISA Register Address Map
+*/
+#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase+0x0c84 /* EISA Control Register */
+#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
+#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
+#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
+#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
+#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
+
+/*
+** PCI/EISA Configuration Registers Address Map
+*/
+#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
+#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
+#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
+#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
+#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
+#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
+#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
+#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
+#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
+#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
+#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
+
+/*
+** EISA Configuration Register 0 bit definitions
+*/
+#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
+#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
+#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
+#define ER0_ISTS 0x10 /* Interrupt Status (X) */
+#define ER0_LI 0x08 /* Latch Interrupts */
+#define ER0_INTL 0x06 /* INTerrupt Level */
+#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
+
+/*
+** EISA Configuration Register 1 bit definitions
+*/
+#define ER1_IAM 0xe0 /* ISA Address Mode */
+#define ER1_IAE 0x10 /* ISA Addressing Enable */
+#define ER1_UPIN 0x0f /* User Pins */
+
+/*
+** EISA Configuration Register 2 bit definitions
+*/
+#define ER2_BRS 0xc0 /* Boot ROM Size */
+#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
+
+/*
+** EISA Configuration Register 3 bit definitions
+*/
+#define ER3_BWE 0x40 /* Burst Write Enable */
+#define ER3_BRE 0x04 /* Burst Read Enable */
+#define ER3_LSR 0x02 /* Local Software Reset */
+
+/*
+** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
+** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
+** the configuration revision register step number.
+*/
+#define CFID_DID 0xff00 /* Device ID */
+#define CFID_VID 0x00ff /* Vendor ID */
+#define DC21040_DID 0x0200 /* Unique Device ID # */
+#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
+#define DC21041_DID 0x1400 /* Unique Device ID # */
+#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
+#define DC21140_DID 0x0900 /* Unique Device ID # */
+#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
+#define DC2114x_DID 0x1900 /* Unique Device ID # */
+#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
+
+/*
+** Chipset defines
+*/
+#define DC21040 DC21040_DID
+#define DC21041 DC21041_DID
+#define DC21140 DC21140_DID
+#define DC2114x DC2114x_DID
+#define DC21142 (DC2114x_DID | 0x0010)
+#define DC21143 (DC2114x_DID | 0x0030)
+#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */
+
+#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
+#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
+#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
+#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
+#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
+#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
+
+/*
+** PCI Configuration Command/Status Register (PCI_CFCS)
+*/
+#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
+#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
+#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
+#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
+#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
+#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
+#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
+#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
+#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
+#define CFCS_MO 0x00000004 /* Master Operation (C) */
+#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
+#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
+
+/*
+** PCI Configuration Revision Register (PCI_CFRV)
+*/
+#define CFRV_BC 0xff000000 /* Base Class */
+#define CFRV_SC 0x00ff0000 /* Subclass */
+#define CFRV_RN 0x000000f0 /* Revision Number */
+#define CFRV_SN 0x0000000f /* Step Number */
+#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
+#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
+#define STEP_NUMBER 0x00000020 /* Increments for future chips */
+#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
+#define CFRV_MASK 0xffff0000 /* Register mask */
+
+/*
+** PCI Configuration Latency Timer Register (PCI_CFLT)
+*/
+#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
+
+/*
+** PCI Configuration Base I/O Address Register (PCI_CBIO)
+*/
+#define CBIO_MASK -128 /* Base I/O Address Mask */
+#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
+
+/*
+** PCI Configuration Card Information Structure Register (PCI_CCIS)
+*/
+#define CCIS_ROMI 0xf0000000 /* ROM Image */
+#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
+#define CCIS_ASI 0x00000007 /* Address Space Indicator */
+
+/*
+** PCI Configuration Subsystem ID Register (PCI_SSID)
+*/
+#define SSID_SSID 0xffff0000 /* Subsystem ID */
+#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
+
+/*
+** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
+*/
+#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
+#define CBER_ROME 0x00000001 /* ROM Enable */
+
+/*
+** PCI Configuration Interrupt Register (PCI_CFIT)
+*/
+#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
+#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
+#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
+#define CFIT_IRQL 0x000000ff /* Interrupt Line */
+
+/*
+** PCI Configuration Power Management Area Register (PCI_CFPM)
+*/
+#define SLEEP 0x80 /* Power Saving Sleep Mode */
+#define SNOOZE 0x40 /* Power Saving Snooze Mode */
+#define WAKEUP 0x00 /* Power Saving Wakeup */
+
+#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
+#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
+
+/*
+** DC21040 Bus Mode Register (DE4X5_BMR)
+*/
+#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
+#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
+#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
+#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
+#define BMR_CAL 0x0000c000 /* Cache Alignment */
+#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
+#define BMR_BLE 0x00000080 /* Big/Little Endian */
+#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
+#define BMR_BAR 0x00000002 /* Bus ARbitration */
+#define BMR_SWR 0x00000001 /* Software Reset */
+
+ /* Timings here are for 10BASE-T/AUI only*/
+#define TAP_NOPOLL 0x00000000 /* No automatic polling */
+#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
+#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
+#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
+#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
+#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
+#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
+#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
+
+#define CAL_NOUSE 0x00000000 /* Not used */
+#define CAL_8LONG 0x00004000 /* 8-longword alignment */
+#define CAL_16LONG 0x00008000 /* 16-longword alignment */
+#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
+
+#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
+#define PBL_1 0x00000100 /* 1 longword DMA burst length */
+#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
+#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
+#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
+#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
+#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
+
+#define DSL_0 0x00000000 /* 0 longword / descriptor */
+#define DSL_1 0x00000004 /* 1 longword / descriptor */
+#define DSL_2 0x00000008 /* 2 longwords / descriptor */
+#define DSL_4 0x00000010 /* 4 longwords / descriptor */
+#define DSL_8 0x00000020 /* 8 longwords / descriptor */
+#define DSL_16 0x00000040 /* 16 longwords / descriptor */
+#define DSL_32 0x00000080 /* 32 longwords / descriptor */
+
+/*
+** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
+*/
+#define TPD 0x00000001 /* Transmit Poll Demand */
+
+/*
+** DC21040 Receive Poll Demand Register (DE4X5_RPD)
+*/
+#define RPD 0x00000001 /* Receive Poll Demand */
+
+/*
+** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
+*/
+#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
+
+/*
+** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
+*/
+#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
+
+/*
+** Status Register (DE4X5_STS)
+*/
+#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
+#define STS_BE 0x03800000 /* Bus Error Bits */
+#define STS_TS 0x00700000 /* Transmit Process State */
+#define STS_RS 0x000e0000 /* Receive Process State */
+#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define STS_ER 0x00004000 /* Early Receive */
+#define STS_FBE 0x00002000 /* Fatal Bus Error */
+#define STS_SE 0x00002000 /* System Error */
+#define STS_LNF 0x00001000 /* Link Fail */
+#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
+#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
+#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define STS_AT 0x00000400 /* AUI/TP Pin */
+#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
+#define STS_RPS 0x00000100 /* Receive Process Stopped */
+#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define STS_RI 0x00000040 /* Receive Interrupt */
+#define STS_UNF 0x00000020 /* Transmit Underflow */
+#define STS_LNP 0x00000010 /* Link Pass */
+#define STS_ANC 0x00000010 /* Autonegotiation Complete */
+#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
+#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define STS_TPS 0x00000002 /* Transmit Process Stopped */
+#define STS_TI 0x00000001 /* Transmit Interrupt */
+
+#define EB_PAR 0x00000000 /* Parity Error */
+#define EB_MA 0x00800000 /* Master Abort */
+#define EB_TA 0x01000000 /* Target Abort */
+#define EB_RES0 0x01800000 /* Reserved */
+#define EB_RES1 0x02000000 /* Reserved */
+
+#define TS_STOP 0x00000000 /* Stopped */
+#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
+#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
+#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
+#define TS_RES 0x00400000 /* Reserved */
+#define TS_SPKT 0x00500000 /* Setup Packet */
+#define TS_SUSP 0x00600000 /* Suspended */
+#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
+
+#define RS_STOP 0x00000000 /* Stopped */
+#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
+#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
+#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
+#define RS_SUSP 0x00080000 /* Suspended */
+#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
+#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
+#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
+
+#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
+
+/*
+** Operation Mode Register (DE4X5_OMR)
+*/
+#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
+#define OMR_RA 0x40000000 /* Receive All */
+#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
+#define OMR_SCR 0x01000000 /* Scrambler Mode */
+#define OMR_PCS 0x00800000 /* PCS Function */
+#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
+#define OMR_SF 0x00200000 /* Store and Forward */
+#define OMR_HBD 0x00080000 /* HeartBeat Disable */
+#define OMR_PS 0x00040000 /* Port Select */
+#define OMR_CA 0x00020000 /* Capture Effect Enable */
+#define OMR_BP 0x00010000 /* Back Pressure */
+#define OMR_TR 0x0000c000 /* Threshold Control Bits */
+#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
+#define OMR_FC 0x00001000 /* Force Collision Mode */
+#define OMR_OM 0x00000c00 /* Operating Mode */
+#define OMR_FDX 0x00000200 /* Full Duplex Mode */
+#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
+#define OMR_PM 0x00000080 /* Pass All Multicast */
+#define OMR_PR 0x00000040 /* Promiscuous Mode */
+#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
+#define OMR_IF 0x00000010 /* Inverse Filtering */
+#define OMR_PB 0x00000008 /* Pass Bad Frames */
+#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
+#define OMR_SR 0x00000002 /* Start/Stop Receive */
+#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
+
+#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
+#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
+#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
+#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
+
+#define OMR_DEF (OMR_SDP)
+#define OMR_SIA (OMR_SDP | OMR_TTM)
+#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
+#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
+#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
+
+/*
+** DC21040 Interrupt Mask Register (DE4X5_IMR)
+*/
+#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
+#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
+#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
+#define IMR_ERM 0x00004000 /* Early Receive Mask */
+#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
+#define IMR_SEM 0x00002000 /* System Error Mask */
+#define IMR_LFM 0x00001000 /* Link Fail Mask */
+#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
+#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
+#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
+#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
+#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
+#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
+#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
+#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
+#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
+#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
+#define IMR_LPM 0x00000010 /* Link Pass */
+#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
+#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
+#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
+#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
+
+/*
+** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
+*/
+#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
+#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
+#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
+#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
+#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
+
+/*
+** DC21040 Ethernet Address PROM (DE4X5_APROM)
+*/
+#define APROM_DN 0x80000000 /* Data Not Valid */
+#define APROM_DT 0x000000ff /* Address Byte */
+
+/*
+** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
+*/
+#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define BROM_RD 0x00004000 /* Read from Boot ROM */
+#define BROM_WR 0x00002000 /* Write to Boot ROM */
+#define BROM_BR 0x00001000 /* Select Boot ROM when set */
+#define BROM_SR 0x00000800 /* Select Serial ROM when set */
+#define BROM_REG 0x00000400 /* External Register Select */
+#define BROM_DT 0x000000ff /* Data Byte */
+
+/*
+** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
+*/
+#define MII_MDI 0x00080000 /* MII Management Data In */
+#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
+#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
+#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
+#define MII_MDT 0x00020000 /* MII Management Data Out */
+#define MII_MDC 0x00010000 /* MII Management Clock */
+#define MII_RD 0x00004000 /* Read from MII */
+#define MII_WR 0x00002000 /* Write to MII */
+#define MII_SEL 0x00000800 /* Select MII when RESET */
+
+#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define SROM_RD 0x00004000 /* Read from Boot ROM */
+#define SROM_WR 0x00002000 /* Write to Boot ROM */
+#define SROM_BR 0x00001000 /* Select Boot ROM when set */
+#define SROM_SR 0x00000800 /* Select Serial ROM when set */
+#define SROM_REG 0x00000400 /* External Register Select */
+#define SROM_DT 0x000000ff /* Data Byte */
+
+#define DT_OUT 0x00000008 /* Serial Data Out */
+#define DT_IN 0x00000004 /* Serial Data In */
+#define DT_CLK 0x00000002 /* Serial ROM Clock */
+#define DT_CS 0x00000001 /* Serial ROM Chip Select */
+
+#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
+#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
+#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
+#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
+
+#define MII_CR 0x00 /* MII Management Control Register */
+#define MII_SR 0x01 /* MII Management Status Register */
+#define MII_ID0 0x02 /* PHY Identifier Register 0 */
+#define MII_ID1 0x03 /* PHY Identifier Register 1 */
+#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
+#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
+#define MII_ANE 0x06 /* Auto Negotiation Expansion */
+#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
+
+#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
+
+/*
+** MII Management Control Register
+*/
+#define MII_CR_RST 0x8000 /* RESET the PHY chip */
+#define MII_CR_LPBK 0x4000 /* Loopback enable */
+#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
+#define MII_CR_10 0x0000 /* Set 10Mb/s */
+#define MII_CR_100 0x2000 /* Set 100Mb/s */
+#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
+#define MII_CR_PD 0x0800 /* Power Down */
+#define MII_CR_ISOL 0x0400 /* Isolate Mode */
+#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
+#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
+#define MII_CR_CTE 0x0080 /* Collision Test Enable */
+
+/*
+** MII Management Status Register
+*/
+#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
+#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
+#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
+#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
+#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
+#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
+#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
+#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
+#define MII_SR_LKS 0x0004 /* Link Status */
+#define MII_SR_JABD 0x0002 /* Jabber Detect */
+#define MII_SR_XC 0x0001 /* Extended Capabilities */
+
+/*
+** MII Management Auto Negotiation Advertisement Register
+*/
+#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** MII Management Auto Negotiation Remote End Register
+*/
+#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
+#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
+#define MII_ANLPA_RF 0x2000 /* Remote Fault */
+#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** SROM Media Definitions (ABG SROM Section)
+*/
+#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
+#define MEDIA_MII 0x0040 /* MII Present on the adapter */
+#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
+#define MEDIA_AUI 0x0004 /* AUI Media present */
+#define MEDIA_TP 0x0002 /* TP Media present */
+#define MEDIA_BNC 0x0001 /* BNC Media present */
+
+/*
+** SROM Definitions (Digital Semiconductor Format)
+*/
+#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
+#define SROM_SSID 0x0002 /* Sub-system ID offset */
+#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
+#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
+#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
+#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
+#define SROM_SFV 0x0012 /* SROM Format Version offset */
+#define SROM_CCNT 0x0013 /* Controller Count offset */
+#define SROM_HWADD 0x0014 /* Hardware Address offset */
+#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
+#define SROM_CRC 0x007e /* SROM CRC offset */
+
+/*
+** SROM Media Connection Definitions
+*/
+#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
+#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
+#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
+#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
+#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
+#define SROM_100BT4 0x0006 /* 100BASE-T4 */
+#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
+#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
+#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
+#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
+#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
+#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
+#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
+#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
+#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
+#define SROM_PAO 0x8800 /* Powerup Autosense Only */
+#define SROM_NSMI 0xffff /* No Selected Media Information */
+
+/*
+** SROM Media Definitions
+*/
+#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
+#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
+#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
+#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
+#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
+#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
+
+#define BLOCK_LEN 0x7f /* Extended blocks length mask */
+#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
+#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
+
+/*
+** SROM Compact Format Block Masks
+*/
+#define COMPACT_FI 0x80 /* Format Indicator */
+#define COMPACT_LEN 0x04 /* Length */
+#define COMPACT_MC 0x3f /* Media Code */
+
+/*
+** SROM Extended Format Block Type 0 Masks
+*/
+#define BLOCK0_FI 0x80 /* Format Indicator */
+#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
+#define BLOCK0_MC 0x3f /* Media Code */
+
+/*
+** DC21040 Full Duplex Register (DE4X5_FDR)
+*/
+#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
+
+/*
+** DC21041 General Purpose Timer Register (DE4X5_GPT)
+*/
+#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
+#define GPT_VAL 0x0000ffff /* Timer Value */
+
+/*
+** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
+*/
+/* Valid ONLY for DE500 hardware */
+#define GEP_LNP 0x00000080 /* Link Pass (input) */
+#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
+#define GEP_SDET 0x00000020 /* Signal Detect (input) */
+#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
+#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
+#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
+#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
+#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
+#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
+#define GEP_CTRL 0x00000100 /* GEP control bit */
+
+/*
+** SIA Register Defaults
+*/
+#define CSR13 0x00000001
+#define CSR14 0x0003ff7f /* Autonegotiation disabled */
+#define CSR15 0x00000008
+
+/*
+** SIA Status Register (DE4X5_SISR)
+*/
+#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
+#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
+#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
+#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
+#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
+#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
+#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
+#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
+#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
+#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
+#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
+#define SISR_DAO 0x00000080 /* PLL All One */
+#define SISR_DAZ 0x00000040 /* PLL All Zero */
+#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
+#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
+#define SISR_APS 0x00000008 /* Auto Polarity State */
+#define SISR_LKF 0x00000004 /* Link Fail Status */
+#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
+#define SISR_NCR 0x00000002 /* Network Connection Error */
+#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
+#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
+#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
+
+#define ANS_NDIS 0x00000000 /* Nway disable */
+#define ANS_TDIS 0x00001000 /* Transmit Disable */
+#define ANS_ADET 0x00002000 /* Ability Detect */
+#define ANS_ACK 0x00003000 /* Acknowledge */
+#define ANS_CACK 0x00004000 /* Complete Acknowledge */
+#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
+#define ANS_LCHK 0x00006000 /* Link Check */
+
+#define SISR_RST 0x00000301 /* CSR12 reset */
+#define SISR_ANR 0x00001301 /* Autonegotiation restart */
+
+/*
+** SIA Connectivity Register (DE4X5_SICR)
+*/
+#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
+#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
+#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
+#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
+#define SICR_IE 0x00001000 /* Input Enable */
+#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
+#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
+#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
+#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
+#define SICR_ASE 0x00000080 /* APLL Start Enable*/
+#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
+#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
+#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
+#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
+#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
+#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
+#define SICR_SRL 0x00000001 /* SIA Reset */
+#define SIA_RESET 0x00000000 /* SIA Reset Value */
+
+/*
+** SIA Transmit and Receive Register (DE4X5_STRR)
+*/
+#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
+#define STRR_SPP 0x00004000 /* Set Polarity Plus */
+#define STRR_APE 0x00002000 /* Auto Polarity Enable */
+#define STRR_LTE 0x00001000 /* Link Test Enable */
+#define STRR_SQE 0x00000800 /* Signal Quality Enable */
+#define STRR_CLD 0x00000400 /* Collision Detect Enable */
+#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
+#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
+#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
+#define STRR_HDE 0x00000040 /* Half Duplex Enable */
+#define STRR_CPEN 0x00000030 /* Compensation Enable */
+#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
+#define STRR_DREN 0x00000004 /* Driver Enable */
+#define STRR_LBK 0x00000002 /* Loopback Enable */
+#define STRR_ECEN 0x00000001 /* Encoder Enable */
+#define STRR_RESET 0xffffffff /* Reset value for STRR */
+
+/*
+** SIA General Register (DE4X5_SIGR)
+*/
+#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
+#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
+#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
+#define SIGR_CWE 0x08000000 /* Control Write Enable */
+#define SIGR_RME 0x04000000 /* Receive Match Enable */
+#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
+#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
+#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
+#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
+#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
+#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
+#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
+#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
+#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
+#define SIGR_FRL 0x00002000 /* Force Receiver Low */
+#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
+#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
+#define SIGR_FLF 0x00000400 /* Force Link Fail */
+#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
+#define SIGR_TSCK 0x00000100 /* Test Clock */
+#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
+#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
+#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
+#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
+#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
+#define SIGR_JCK 0x00000004 /* Jabber Clock */
+#define SIGR_HUJ 0x00000002 /* Host Unjab */
+#define SIGR_JBD 0x00000001 /* Jabber Disable */
+#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
+
+/*
+** Receive Descriptor Bit Summary
+*/
+#define R_OWN 0x80000000 /* Own Bit */
+#define RD_FF 0x40000000 /* Filtering Fail */
+#define RD_FL 0x3fff0000 /* Frame Length */
+#define RD_ES 0x00008000 /* Error Summary */
+#define RD_LE 0x00004000 /* Length Error */
+#define RD_DT 0x00003000 /* Data Type */
+#define RD_RF 0x00000800 /* Runt Frame */
+#define RD_MF 0x00000400 /* Multicast Frame */
+#define RD_FS 0x00000200 /* First Descriptor */
+#define RD_LS 0x00000100 /* Last Descriptor */
+#define RD_TL 0x00000080 /* Frame Too Long */
+#define RD_CS 0x00000040 /* Collision Seen */
+#define RD_FT 0x00000020 /* Frame Type */
+#define RD_RJ 0x00000010 /* Receive Watchdog */
+#define RD_RE 0x00000008 /* Report on MII Error */
+#define RD_DB 0x00000004 /* Dribbling Bit */
+#define RD_CE 0x00000002 /* CRC Error */
+#define RD_OF 0x00000001 /* Overflow */
+
+#define RD_RER 0x02000000 /* Receive End Of Ring */
+#define RD_RCH 0x01000000 /* Second Address Chained */
+#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
+#define RD_RBS1 0x000007ff /* Buffer 1 Size */
+
+/*
+** Transmit Descriptor Bit Summary
+*/
+#define T_OWN 0x80000000 /* Own Bit */
+#define TD_ES 0x00008000 /* Error Summary */
+#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
+#define TD_LO 0x00000800 /* Loss Of Carrier */
+#define TD_NC 0x00000400 /* No Carrier */
+#define TD_LC 0x00000200 /* Late Collision */
+#define TD_EC 0x00000100 /* Excessive Collisions */
+#define TD_HF 0x00000080 /* Heartbeat Fail */
+#define TD_CC 0x00000078 /* Collision Counter */
+#define TD_LF 0x00000004 /* Link Fail */
+#define TD_UF 0x00000002 /* Underflow Error */
+#define TD_DE 0x00000001 /* Deferred */
+
+#define TD_IC 0x80000000 /* Interrupt On Completion */
+#define TD_LS 0x40000000 /* Last Segment */
+#define TD_FS 0x20000000 /* First Segment */
+#define TD_FT1 0x10000000 /* Filtering Type */
+#define TD_SET 0x08000000 /* Setup Packet */
+#define TD_AC 0x04000000 /* Add CRC Disable */
+#define TD_TER 0x02000000 /* Transmit End Of Ring */
+#define TD_TCH 0x01000000 /* Second Address Chained */
+#define TD_DPD 0x00800000 /* Disabled Padding */
+#define TD_FT0 0x00400000 /* Filtering Type */
+#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
+#define TD_TBS1 0x000007ff /* Buffer 1 Size */
+
+#define PERFECT_F 0x00000000
+#define HASH_F TD_FT0
+#define INVERSE_F TD_FT1
+#define HASH_O_F (TD_FT1 | TD_F0)
+
+/*
+** Media / mode state machine definitions
+** User selectable:
+*/
+#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
+#define TP_NW 0x0002 /* 10Base-T with Nway */
+#define BNC 0x0004 /* Thinwire */
+#define AUI 0x0008 /* Thickwire */
+#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
+#define _10Mb 0x0040 /* 10Mb/s Ethernet */
+#define _100Mb 0x0080 /* 100Mb/s Ethernet */
+#define AUTO 0x4000 /* Auto sense the media or speed */
+
+/*
+** Internal states
+*/
+#define NC 0x0000 /* No Connection */
+#define ANS 0x0020 /* Intermediate AutoNegotiation State */
+#define SPD_DET 0x0100 /* Parallel speed detection */
+#define INIT 0x0200 /* Initial state */
+#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
+#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
+#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
+#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
+#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
+#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
+#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
+#define MII 0x1000 /* MII on the 21143 */
+
+#define TIMER_CB 0x80000000 /* Timer callback detection */
+
+/*
+** DE4X5 DEBUG Options
+*/
+#define DEBUG_NONE 0x0000 /* No DEBUG messages */
+#define DEBUG_VERSION 0x0001 /* Print version message */
+#define DEBUG_MEDIA 0x0002 /* Print media messages */
+#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
+#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
+#define DEBUG_SROM 0x0010 /* Print SROM messages */
+#define DEBUG_MII 0x0020 /* Print MII messages */
+#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
+#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
+#define DEBUG_PCICFG 0x0100
+#define DEBUG_ALL 0x01ff
+
+/*
+** Miscellaneous
+*/
+#define PCI 0
+#define EISA 1
+
+#define HASH_TABLE_LEN 512 /* Bits */
+#define HASH_BITS 0x01ff /* 9 LS bits */
+
+#define SETUP_FRAME_LEN 192 /* Bytes */
+#define IMPERF_PA_OFFSET 156 /* Bytes */
+
+#define POLL_DEMAND 1
+
+#define LOST_MEDIA_THRESHOLD 3
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define DE4X5_STRLEN 8
+
+#define DE4X5_INIT 0 /* Initialisation time */
+#define DE4X5_RUN 1 /* Run time */
+
+#define DE4X5_SAVE_STATE 0
+#define DE4X5_RESTORE_STATE 1
+
+/*
+** Address Filtering Modes
+*/
+#define PERFECT 0 /* 16 perfect physical addresses */
+#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
+#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
+#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
+
+#define ALL 0 /* Clear out all the setup frame */
+#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
+
+/*
+** Booleans
+*/
+#define NO 0
+#define FALSE 0
+
+#define YES ~0
+#define TRUE ~0
+
+/*
+** Adapter state
+*/
+#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
+#define CLOSED 1 /* Ready for opening */
+#define OPEN 2 /* Running */
+
+/*
+** Various wait times
+*/
+#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
+#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
+
+/*
+** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
+** the vendors seem split 50-50 on how to calculate the OUI register values
+** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
+*/
+#define NATIONAL_TX 0x2000
+#define BROADCOM_T4 0x03e0
+#define SEEQ_T4 0x0016
+#define CYPRESS_T4 0x0014
+
+/*
+** Speed Selection stuff
+*/
+#define SET_10Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+#define SET_100Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ int fdx=0;\
+ if (lp->phy[lp->active].id == NATIONAL_TX) {\
+ mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
+ 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
+ if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ if (fdx) omr |= OMR_FDX;\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/* FIX ME so I don't jam 10Mb networks */
+#define SET_100Mb_PDET {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
+ lp->cache.gep = (GEP_FDXD | GEP_MODE);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DE4X5IOCTL SIOCDEVPRIVATE
+
+struct de4x5_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
+#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
+#define DE4X5_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DE4X5_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
+#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
+#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
+#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
+#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
+#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
+#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
+#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
+
+#define MOTO_SROM_BUG ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((s32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008))
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
new file mode 100644
index 000000000000..e25f33df223e
--- /dev/null
+++ b/drivers/net/tulip/dmfe.c
@@ -0,0 +1,2066 @@
+/*
+ A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
+ ethernet driver for Linux.
+ Copyright (C) 1997 Sten Wang
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ DAVICOM Web-Site: www.davicom.com.tw
+
+ Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
+ Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
+
+ (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
+
+ Marcelo Tosatti <marcelo@conectiva.com.br> :
+ Made it compile in 2.3 (device to net_device)
+
+ Alan Cox <alan@redhat.com> :
+ Cleaned up for kernel merge.
+ Removed the back compatibility support
+ Reformatted, fixing spelling etc as I went
+ Removed IRQ 0-15 assumption
+
+ Jeff Garzik <jgarzik@pobox.com> :
+ Updated to use new PCI driver API.
+ Resource usage cleanups.
+ Report driver version to user.
+
+ Tobias Ringstrom <tori@unhappy.mine.nu> :
+ Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
+ Andrew Morton and Frank Davis for the SMP safety fixes.
+
+ Vojtech Pavlik <vojtech@suse.cz> :
+ Cleaned up pointer arithmetics.
+ Fixed a lot of 64bit issues.
+ Cleaned up printk()s a bit.
+ Fixed some obvious big endian problems.
+
+ Tobias Ringstrom <tori@unhappy.mine.nu> :
+ Use time_after for jiffies calculation. Added ethtool
+ support. Updated PCI resource allocation. Do not
+ forget to unmap PCI mapped skbs.
+
+ Alan Cox <alan@redhat.com>
+ Added new PCI identifiers provided by Clear Zhang at ALi
+ for their 1563 ethernet device.
+
+ TODO
+
+ Implement pci_driver::suspend() and pci_driver::resume()
+ power management methods.
+
+ Check on 64 bit boxes.
+ Check and fix on big endian boxes.
+
+ Test and make sure PCI latency is now correct for all cases.
+*/
+
+#define DRV_NAME "dmfe"
+#define DRV_VERSION "1.36.4"
+#define DRV_RELDATE "2002-01-17"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+
+/* Board/System/Debug information/definition ---------------- */
+#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
+#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
+#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
+#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
+
+#define DM9102_IO_SIZE 0x80
+#define DM9102A_IO_SIZE 0x100
+#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
+#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
+#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
+#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
+#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
+#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
+#define TX_BUF_ALLOC 0x600
+#define RX_ALLOC_SIZE 0x620
+#define DM910X_RESET 1
+#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
+#define CR6_DEFAULT 0x00080000 /* HD */
+#define CR7_DEFAULT 0x180c1
+#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
+#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
+#define MAX_PACKET_SIZE 1514
+#define DMFE_MAX_MULTICAST 14
+#define RX_COPY_SIZE 100
+#define MAX_CHECK_PACKET 0x8000
+#define DM9801_NOISE_FLOOR 8
+#define DM9802_NOISE_FLOOR 5
+
+#define DMFE_10MHF 0
+#define DMFE_100MHF 1
+#define DMFE_10MFD 4
+#define DMFE_100MFD 5
+#define DMFE_AUTO 8
+#define DMFE_1M_HPNA 0x10
+
+#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
+#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
+#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
+#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
+#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
+#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
+
+#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
+#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
+#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
+
+#define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+
+#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+
+
+/* CR9 definition: SROM/MII */
+#define CR9_SROM_READ 0x4800
+#define CR9_SRCS 0x1
+#define CR9_SRCLK 0x2
+#define CR9_CRDOUT 0x8
+#define SROM_DATA_0 0x0
+#define SROM_DATA_1 0x4
+#define PHY_DATA_1 0x20000
+#define PHY_DATA_0 0x00000
+#define MDCLKH 0x10000
+
+#define PHY_POWER_DOWN 0x800
+
+#define SROM_V41_CODE 0x14
+
+#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
+
+#define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
+#define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
+
+/* Sten Check */
+#define DEVICE net_device
+
+/* Structure/enum declaration ------------------------------- */
+struct tx_desc {
+ u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
+ char *tx_buf_ptr; /* Data for us */
+ struct tx_desc *next_tx_desc;
+} __attribute__(( aligned(32) ));
+
+struct rx_desc {
+ u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
+ struct sk_buff *rx_skb_ptr; /* Data for us */
+ struct rx_desc *next_rx_desc;
+} __attribute__(( aligned(32) ));
+
+struct dmfe_board_info {
+ u32 chip_id; /* Chip vendor/Device ID */
+ u32 chip_revision; /* Chip revision */
+ struct DEVICE *next_dev; /* next device */
+ struct pci_dev *pdev; /* PCI device */
+ spinlock_t lock;
+
+ long ioaddr; /* I/O base address */
+ u32 cr0_data;
+ u32 cr5_data;
+ u32 cr6_data;
+ u32 cr7_data;
+ u32 cr15_data;
+
+ /* pointer for memory physical address */
+ dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
+ dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
+ dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
+ dma_addr_t first_tx_desc_dma;
+ dma_addr_t first_rx_desc_dma;
+
+ /* descriptor pointer */
+ unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
+ unsigned char *buf_pool_start; /* Tx buffer pool align dword */
+ unsigned char *desc_pool_ptr; /* descriptor pool memory */
+ struct tx_desc *first_tx_desc;
+ struct tx_desc *tx_insert_ptr;
+ struct tx_desc *tx_remove_ptr;
+ struct rx_desc *first_rx_desc;
+ struct rx_desc *rx_insert_ptr;
+ struct rx_desc *rx_ready_ptr; /* packet come pointer */
+ unsigned long tx_packet_cnt; /* transmitted packet count */
+ unsigned long tx_queue_cnt; /* wait to send packet count */
+ unsigned long rx_avail_cnt; /* available rx descriptor count */
+ unsigned long interval_rx_cnt; /* rx packet count a callback time */
+
+ u16 HPNA_command; /* For HPNA register 16 */
+ u16 HPNA_timer; /* For HPNA remote device check */
+ u16 dbug_cnt;
+ u16 NIC_capability; /* NIC media capability */
+ u16 PHY_reg4; /* Saved Phyxcer register 4 value */
+
+ u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
+ u8 chip_type; /* Keep DM9102A chip type */
+ u8 media_mode; /* user specify media mode */
+ u8 op_mode; /* real work media mode */
+ u8 phy_addr;
+ u8 link_failed; /* Ever link failed */
+ u8 wait_reset; /* Hardware failed, need to reset */
+ u8 dm910x_chk_mode; /* Operating mode check */
+ u8 first_in_callback; /* Flag to record state */
+ struct timer_list timer;
+
+ /* System defined statistic counter */
+ struct net_device_stats stats;
+
+ /* Driver defined statistic counter */
+ unsigned long tx_fifo_underrun;
+ unsigned long tx_loss_carrier;
+ unsigned long tx_no_carrier;
+ unsigned long tx_late_collision;
+ unsigned long tx_excessive_collision;
+ unsigned long tx_jabber_timeout;
+ unsigned long reset_count;
+ unsigned long reset_cr8;
+ unsigned long reset_fatal;
+ unsigned long reset_TXtimeout;
+
+ /* NIC SROM data */
+ unsigned char srom[128];
+};
+
+enum dmfe_offsets {
+ DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
+ DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
+ DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
+ DCR15 = 0x78
+};
+
+enum dmfe_CR6_bits {
+ CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
+ CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
+ CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
+};
+
+/* Global variable declaration ----------------------------- */
+static int __devinitdata printed_version;
+static char version[] __devinitdata =
+ KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static int dmfe_debug;
+static unsigned char dmfe_media_mode = DMFE_AUTO;
+static u32 dmfe_cr6_user_set;
+
+/* For module input parameter */
+static int debug;
+static u32 cr6set;
+static unsigned char mode = 8;
+static u8 chkmode = 1;
+static u8 HPNA_mode; /* Default: Low Power/High Speed */
+static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
+static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
+static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
+static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
+ 4: TX pause packet */
+
+
+/* function declaration ------------------------------------- */
+static int dmfe_open(struct DEVICE *);
+static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
+static int dmfe_stop(struct DEVICE *);
+static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
+static void dmfe_set_filter_mode(struct DEVICE *);
+static struct ethtool_ops netdev_ethtool_ops;
+static u16 read_srom_word(long ,int);
+static irqreturn_t dmfe_interrupt(int , void *, struct pt_regs *);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_dmfe (struct net_device *dev);
+#endif
+static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
+static void allocate_rx_buffer(struct dmfe_board_info *);
+static void update_cr6(u32, unsigned long);
+static void send_filter_frame(struct DEVICE * ,int);
+static void dm9132_id_table(struct DEVICE * ,int);
+static u16 phy_read(unsigned long, u8, u8, u32);
+static void phy_write(unsigned long, u8, u8, u16, u32);
+static void phy_write_1bit(unsigned long, u32);
+static u16 phy_read_1bit(unsigned long);
+static u8 dmfe_sense_speed(struct dmfe_board_info *);
+static void dmfe_process_mode(struct dmfe_board_info *);
+static void dmfe_timer(unsigned long);
+static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
+static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
+static void dmfe_dynamic_reset(struct DEVICE *);
+static void dmfe_free_rxbuffer(struct dmfe_board_info *);
+static void dmfe_init_dm910x(struct DEVICE *);
+static void dmfe_parse_srom(struct dmfe_board_info *);
+static void dmfe_program_DM9801(struct dmfe_board_info *, int);
+static void dmfe_program_DM9802(struct dmfe_board_info *);
+static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
+static void dmfe_set_phyxcer(struct dmfe_board_info *);
+
+/* DM910X network baord routine ---------------------------- */
+
+/*
+ * Search DM910X board ,allocate space and register it
+ */
+
+static int __devinit dmfe_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct dmfe_board_info *db; /* board information structure */
+ struct net_device *dev;
+ u32 dev_rev, pci_pmr;
+ int i, err;
+
+ DMFE_DBUG(0, "dmfe_init_one()", 0);
+
+ if (!printed_version++)
+ printk(version);
+
+ /* Init network device */
+ dev = alloc_etherdev(sizeof(*db));
+ if (dev == NULL)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_set_dma_mask(pdev, 0xffffffff)) {
+ printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+ err = -ENODEV;
+ goto err_out_free;
+ }
+
+ /* Enable Master/IO access, Disable memory access */
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_out_free;
+
+ if (!pci_resource_start(pdev, 0)) {
+ printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+ /* Read Chip revision */
+ pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
+
+ if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
+ printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
+
+ /* Set Latency Timer 80h */
+ /* FIXME: setting values > 32 breaks some SiS 559x stuff.
+ Need a PCI quirk.. */
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+#endif
+
+ if (pci_request_regions(pdev, DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+ /* Init system & device */
+ db = netdev_priv(dev);
+
+ /* Allocate Tx/Rx descriptor memory */
+ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+
+ db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
+ db->first_tx_desc_dma = db->desc_pool_dma_ptr;
+ db->buf_pool_start = db->buf_pool_ptr;
+ db->buf_pool_dma_start = db->buf_pool_dma_ptr;
+
+ db->chip_id = ent->driver_data;
+ db->ioaddr = pci_resource_start(pdev, 0);
+ db->chip_revision = dev_rev;
+
+ db->pdev = pdev;
+
+ dev->base_addr = db->ioaddr;
+ dev->irq = pdev->irq;
+ pci_set_drvdata(pdev, dev);
+ dev->open = &dmfe_open;
+ dev->hard_start_xmit = &dmfe_start_xmit;
+ dev->stop = &dmfe_stop;
+ dev->get_stats = &dmfe_get_stats;
+ dev->set_multicast_list = &dmfe_set_filter_mode;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &poll_dmfe;
+#endif
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ spin_lock_init(&db->lock);
+
+ pci_read_config_dword(pdev, 0x50, &pci_pmr);
+ pci_pmr &= 0x70000;
+ if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
+ db->chip_type = 1; /* DM9102A E3 */
+ else
+ db->chip_type = 0;
+
+ /* read 64 word srom data */
+ for (i = 0; i < 64; i++)
+ ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+
+ /* Set Node address */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = db->srom[20 + i];
+
+ err = register_netdev (dev);
+ if (err)
+ goto err_out_res;
+
+ printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
+ dev->name,
+ ent->driver_data >> 16,
+ pci_name(pdev));
+ for (i = 0; i < 6; i++)
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+ printk(", irq %d.\n", dev->irq);
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_out_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+
+ return err;
+}
+
+
+static void __devexit dmfe_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ DMFE_DBUG(0, "dmfe_remove_one()", 0);
+
+ if (dev) {
+ pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
+ DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
+ db->desc_pool_dma_ptr);
+ pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ unregister_netdev(dev);
+ pci_release_regions(pdev);
+ free_netdev(dev); /* free board information */
+ pci_set_drvdata(pdev, NULL);
+ }
+
+ DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
+}
+
+
+/*
+ * Open the interface.
+ * The interface is opened whenever "ifconfig" actives it.
+ */
+
+static int dmfe_open(struct DEVICE *dev)
+{
+ int ret;
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ DMFE_DBUG(0, "dmfe_open", 0);
+
+ ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ, dev->name, dev);
+ if (ret)
+ return ret;
+
+ /* system variable init */
+ db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
+ db->tx_packet_cnt = 0;
+ db->tx_queue_cnt = 0;
+ db->rx_avail_cnt = 0;
+ db->link_failed = 1;
+ db->wait_reset = 0;
+
+ db->first_in_callback = 0;
+ db->NIC_capability = 0xf; /* All capability*/
+ db->PHY_reg4 = 0x1e0;
+
+ /* CR6 operation mode decision */
+ if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
+ (db->chip_revision >= 0x02000030) ) {
+ db->cr6_data |= DMFE_TXTH_256;
+ db->cr0_data = CR0_DEFAULT;
+ db->dm910x_chk_mode=4; /* Enter the normal mode */
+ } else {
+ db->cr6_data |= CR6_SFT; /* Store & Forward mode */
+ db->cr0_data = 0;
+ db->dm910x_chk_mode = 1; /* Enter the check mode */
+ }
+
+ /* Initilize DM910X board */
+ dmfe_init_dm910x(dev);
+
+ /* Active System Interface */
+ netif_wake_queue(dev);
+
+ /* set and active a timer process */
+ init_timer(&db->timer);
+ db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
+ db->timer.data = (unsigned long)dev;
+ db->timer.function = &dmfe_timer;
+ add_timer(&db->timer);
+
+ return 0;
+}
+
+
+/* Initilize DM910X board
+ * Reset DM910X board
+ * Initilize TX/Rx descriptor chain structure
+ * Send the set-up frame
+ * Enable Tx/Rx machine
+ */
+
+static void dmfe_init_dm910x(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = db->ioaddr;
+
+ DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
+
+ /* Reset DM910x MAC controller */
+ outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
+ udelay(100);
+ outl(db->cr0_data, ioaddr + DCR0);
+ udelay(5);
+
+ /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
+ db->phy_addr = 1;
+
+ /* Parser SROM and media mode */
+ dmfe_parse_srom(db);
+ db->media_mode = dmfe_media_mode;
+
+ /* RESET Phyxcer Chip by GPR port bit 7 */
+ outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
+ if (db->chip_id == PCI_DM9009_ID) {
+ outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
+ mdelay(300); /* Delay 300 ms */
+ }
+ outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
+
+ /* Process Phyxcer Media Mode */
+ if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
+ dmfe_set_phyxcer(db);
+
+ /* Media Mode Process */
+ if ( !(db->media_mode & DMFE_AUTO) )
+ db->op_mode = db->media_mode; /* Force Mode */
+
+ /* Initiliaze Transmit/Receive decriptor and CR3/4 */
+ dmfe_descriptor_init(db, ioaddr);
+
+ /* Init CR6 to program DM910x operation */
+ update_cr6(db->cr6_data, ioaddr);
+
+ /* Send setup frame */
+ if (db->chip_id == PCI_DM9132_ID)
+ dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ else
+ send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+
+ /* Init CR7, interrupt active bit */
+ db->cr7_data = CR7_DEFAULT;
+ outl(db->cr7_data, ioaddr + DCR7);
+
+ /* Init CR15, Tx jabber and Rx watchdog timer */
+ outl(db->cr15_data, ioaddr + DCR15);
+
+ /* Enable DM910X Tx/Rx function */
+ db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
+ update_cr6(db->cr6_data, ioaddr);
+}
+
+
+/*
+ * Hardware start transmission.
+ * Send a packet to media from the upper layer.
+ */
+
+static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ struct tx_desc *txptr;
+ unsigned long flags;
+
+ DMFE_DBUG(0, "dmfe_start_xmit", 0);
+
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
+ /* Too large packet check */
+ if (skb->len > MAX_PACKET_SIZE) {
+ printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* No Tx resource check, it never happen nromally */
+ if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
+ spin_unlock_irqrestore(&db->lock, flags);
+ printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt);
+ return 1;
+ }
+
+ /* Disable NIC interrupt */
+ outl(0, dev->base_addr + DCR7);
+
+ /* transmit this packet */
+ txptr = db->tx_insert_ptr;
+ memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
+ txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
+
+ /* Point to next transmit free descriptor */
+ db->tx_insert_ptr = txptr->next_tx_desc;
+
+ /* Transmit Packet Process */
+ if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
+ txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
+ db->tx_packet_cnt++; /* Ready to send */
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ dev->trans_start = jiffies; /* saved time stamp */
+ } else {
+ db->tx_queue_cnt++; /* queue TX packet */
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ }
+
+ /* Tx resource check */
+ if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
+ netif_wake_queue(dev);
+
+ /* Restore CR7 to enable interrupt */
+ spin_unlock_irqrestore(&db->lock, flags);
+ outl(db->cr7_data, dev->base_addr + DCR7);
+
+ /* free this SKB */
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+
+/*
+ * Stop the interface.
+ * The interface is stopped when it is brought.
+ */
+
+static int dmfe_stop(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ DMFE_DBUG(0, "dmfe_stop", 0);
+
+ /* disable system */
+ netif_stop_queue(dev);
+
+ /* deleted timer */
+ del_timer_sync(&db->timer);
+
+ /* Reset & stop DM910X board */
+ outl(DM910X_RESET, ioaddr + DCR0);
+ udelay(5);
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+
+ /* free interrupt */
+ free_irq(dev->irq, dev);
+
+ /* free allocated rx buffer */
+ dmfe_free_rxbuffer(db);
+
+#if 0
+ /* show statistic counter */
+ printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ db->tx_fifo_underrun, db->tx_excessive_collision,
+ db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+ db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+ db->reset_fatal, db->reset_TXtimeout);
+#endif
+
+ return 0;
+}
+
+
+/*
+ * DM9102 insterrupt handler
+ * receive the packet to upper layer, free the transmitted packet
+ */
+
+static irqreturn_t dmfe_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct DEVICE *dev = dev_id;
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ DMFE_DBUG(0, "dmfe_interrupt()", 0);
+
+ if (!dev) {
+ DMFE_DBUG(1, "dmfe_interrupt() without DEVICE arg", 0);
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Got DM910X status */
+ db->cr5_data = inl(ioaddr + DCR5);
+ outl(db->cr5_data, ioaddr + DCR5);
+ if ( !(db->cr5_data & 0xc1) ) {
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Disable all interrupt in CR7 to solve the interrupt edge problem */
+ outl(0, ioaddr + DCR7);
+
+ /* Check system status */
+ if (db->cr5_data & 0x2000) {
+ /* system bus error happen */
+ DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
+ db->reset_fatal++;
+ db->wait_reset = 1; /* Need to RESET */
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Received the coming packet */
+ if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
+ dmfe_rx_packet(dev, db);
+
+ /* reallocate rx descriptor buffer */
+ if (db->rx_avail_cnt<RX_DESC_CNT)
+ allocate_rx_buffer(db);
+
+ /* Free the transmitted descriptor */
+ if ( db->cr5_data & 0x01)
+ dmfe_free_tx_pkt(dev, db);
+
+ /* Mode Check */
+ if (db->dm910x_chk_mode & 0x2) {
+ db->dm910x_chk_mode = 0x4;
+ db->cr6_data |= 0x100;
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+
+ /* Restore CR7 to enable interrupt mask */
+ outl(db->cr7_data, ioaddr + DCR7);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void poll_dmfe (struct net_device *dev)
+{
+ /* disable_irq here is not very nice, but with the lockless
+ interrupt handler we have no other choice. */
+ disable_irq(dev->irq);
+ dmfe_interrupt (dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+/*
+ * Free TX resource after TX complete
+ */
+
+static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
+{
+ struct tx_desc *txptr;
+ unsigned long ioaddr = dev->base_addr;
+ u32 tdes0;
+
+ txptr = db->tx_remove_ptr;
+ while(db->tx_packet_cnt) {
+ tdes0 = le32_to_cpu(txptr->tdes0);
+ /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ if (tdes0 & 0x80000000)
+ break;
+
+ /* A packet sent completed */
+ db->tx_packet_cnt--;
+ db->stats.tx_packets++;
+
+ /* Transmit statistic counter */
+ if ( tdes0 != 0x7fffffff ) {
+ /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ db->stats.collisions += (tdes0 >> 3) & 0xf;
+ db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+ if (tdes0 & TDES0_ERR_MASK) {
+ db->stats.tx_errors++;
+
+ if (tdes0 & 0x0002) { /* UnderRun */
+ db->tx_fifo_underrun++;
+ if ( !(db->cr6_data & CR6_SFT) ) {
+ db->cr6_data = db->cr6_data | CR6_SFT;
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+ }
+ if (tdes0 & 0x0100)
+ db->tx_excessive_collision++;
+ if (tdes0 & 0x0200)
+ db->tx_late_collision++;
+ if (tdes0 & 0x0400)
+ db->tx_no_carrier++;
+ if (tdes0 & 0x0800)
+ db->tx_loss_carrier++;
+ if (tdes0 & 0x4000)
+ db->tx_jabber_timeout++;
+ }
+ }
+
+ txptr = txptr->next_tx_desc;
+ }/* End of while */
+
+ /* Update TX remove pointer to next */
+ db->tx_remove_ptr = txptr;
+
+ /* Send the Tx packet in queue */
+ if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
+ txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
+ db->tx_packet_cnt++; /* Ready to send */
+ db->tx_queue_cnt--;
+ outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
+ dev->trans_start = jiffies; /* saved time stamp */
+ }
+
+ /* Resource available check */
+ if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
+ netif_wake_queue(dev); /* Active upper layer, send again */
+}
+
+
+/*
+ * Calculate the CRC valude of the Rx packet
+ * flag = 1 : return the reverse CRC (for the received packet CRC)
+ * 0 : return the normal CRC (for Hash Table index)
+ */
+
+static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
+{
+ u32 crc = crc32(~0, Data, Len);
+ if (flag) crc = ~crc;
+ return crc;
+}
+
+
+/*
+ * Receive the come packet and pass to upper layer
+ */
+
+static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
+{
+ struct rx_desc *rxptr;
+ struct sk_buff *skb;
+ int rxlen;
+ u32 rdes0;
+
+ rxptr = db->rx_ready_ptr;
+
+ while(db->rx_avail_cnt) {
+ rdes0 = le32_to_cpu(rxptr->rdes0);
+ if (rdes0 & 0x80000000) /* packet owner check */
+ break;
+
+ db->rx_avail_cnt--;
+ db->interval_rx_cnt++;
+
+ pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if ( (rdes0 & 0x300) != 0x300) {
+ /* A packet without First/Last flag */
+ /* reuse this SKB */
+ DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ } else {
+ /* A packet with First/Last flag */
+ rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
+
+ /* error summary bit check */
+ if (rdes0 & 0x8000) {
+ /* This is a error packet */
+ //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+ db->stats.rx_errors++;
+ if (rdes0 & 1)
+ db->stats.rx_fifo_errors++;
+ if (rdes0 & 2)
+ db->stats.rx_crc_errors++;
+ if (rdes0 & 0x80)
+ db->stats.rx_length_errors++;
+ }
+
+ if ( !(rdes0 & 0x8000) ||
+ ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+ skb = rxptr->rx_skb_ptr;
+
+ /* Received Packet CRC check need or not */
+ if ( (db->dm910x_chk_mode & 1) &&
+ (cal_CRC(skb->tail, rxlen, 1) !=
+ (*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */
+ /* Found a error received packet */
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ db->dm910x_chk_mode = 3;
+ } else {
+ /* Good packet, send to upper layer */
+ /* Shorst packet used new SKB */
+ if ( (rxlen < RX_COPY_SIZE) &&
+ ( (skb = dev_alloc_skb(rxlen + 2) )
+ != NULL) ) {
+ /* size less than COPY_SIZE, allocate a rxlen SKB */
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16byte align */
+ memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ } else {
+ skb->dev = dev;
+ skb_put(skb, rxlen);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ db->stats.rx_packets++;
+ db->stats.rx_bytes += rxlen;
+ }
+ } else {
+ /* Reuse SKB buffer when the packet is error */
+ DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ }
+ }
+
+ rxptr = rxptr->next_rx_desc;
+ }
+
+ db->rx_ready_ptr = rxptr;
+}
+
+
+/*
+ * Get statistics from driver.
+ */
+
+static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ DMFE_DBUG(0, "dmfe_get_stats", 0);
+ return &db->stats;
+}
+
+
+/*
+ * Set DM910X multicast address
+ */
+
+static void dmfe_set_filter_mode(struct DEVICE * dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long flags;
+
+ DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
+ spin_lock_irqsave(&db->lock, flags);
+
+ if (dev->flags & IFF_PROMISC) {
+ DMFE_DBUG(0, "Enable PROM Mode", 0);
+ db->cr6_data |= CR6_PM | CR6_PBF;
+ update_cr6(db->cr6_data, db->ioaddr);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
+ DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
+ db->cr6_data &= ~(CR6_PM | CR6_PBF);
+ db->cr6_data |= CR6_PAM;
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ DMFE_DBUG(0, "Set multicast address", dev->mc_count);
+ if (db->chip_id == PCI_DM9132_ID)
+ dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ else
+ send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct dmfe_board_info *np = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (np->pdev)
+ strcpy(info->bus_info, pci_name(np->pdev));
+ else
+ sprintf(info->bus_info, "EISA 0x%lx %d",
+ dev->base_addr, dev->irq);
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/*
+ * A periodic timer routine
+ * Dynamic media sense, allocate Rx buffer...
+ */
+
+static void dmfe_timer(unsigned long data)
+{
+ u32 tmp_cr8;
+ unsigned char tmp_cr12;
+ struct DEVICE *dev = (struct DEVICE *) data;
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long flags;
+
+ DMFE_DBUG(0, "dmfe_timer()", 0);
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Media mode process when Link OK before enter this route */
+ if (db->first_in_callback == 0) {
+ db->first_in_callback = 1;
+ if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
+ db->cr6_data &= ~0x40000;
+ update_cr6(db->cr6_data, db->ioaddr);
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+ db->cr6_data |= 0x40000;
+ update_cr6(db->cr6_data, db->ioaddr);
+ db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+ }
+
+
+ /* Operating Mode Check */
+ if ( (db->dm910x_chk_mode & 0x1) &&
+ (db->stats.rx_packets > MAX_CHECK_PACKET) )
+ db->dm910x_chk_mode = 0x4;
+
+ /* Dynamic reset DM910X : system error or transmit time-out */
+ tmp_cr8 = inl(db->ioaddr + DCR8);
+ if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
+ db->reset_cr8++;
+ db->wait_reset = 1;
+ }
+ db->interval_rx_cnt = 0;
+
+ /* TX polling kick monitor */
+ if ( db->tx_packet_cnt &&
+ time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
+ outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
+
+ /* TX Timeout */
+ if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
+ db->reset_TXtimeout++;
+ db->wait_reset = 1;
+ printk(KERN_WARNING "%s: Tx timeout - resetting\n",
+ dev->name);
+ }
+ }
+
+ if (db->wait_reset) {
+ DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
+ db->reset_count++;
+ dmfe_dynamic_reset(dev);
+ db->first_in_callback = 0;
+ db->timer.expires = DMFE_TIMER_WUT;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ /* Link status check, Dynamic media type change */
+ if (db->chip_id == PCI_DM9132_ID)
+ tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
+ else
+ tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
+
+ if ( ((db->chip_id == PCI_DM9102_ID) &&
+ (db->chip_revision == 0x02000030)) ||
+ ((db->chip_id == PCI_DM9132_ID) &&
+ (db->chip_revision == 0x02000010)) ) {
+ /* DM9102A Chip */
+ if (tmp_cr12 & 2)
+ tmp_cr12 = 0x0; /* Link failed */
+ else
+ tmp_cr12 = 0x3; /* Link OK */
+ }
+
+ if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
+ /* Link Failed */
+ DMFE_DBUG(0, "Link Failed", tmp_cr12);
+ db->link_failed = 1;
+
+ /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
+ /* AUTO or force 1M Homerun/Longrun don't need */
+ if ( !(db->media_mode & 0x38) )
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+
+ /* AUTO mode, if INT phyxcer link failed, select EXT device */
+ if (db->media_mode & DMFE_AUTO) {
+ /* 10/100M link failed, used 1M Home-Net */
+ db->cr6_data|=0x00040000; /* bit18=1, MII */
+ db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+ } else
+ if ((tmp_cr12 & 0x3) && db->link_failed) {
+ DMFE_DBUG(0, "Link link OK", tmp_cr12);
+ db->link_failed = 0;
+
+ /* Auto Sense Speed */
+ if ( (db->media_mode & DMFE_AUTO) &&
+ dmfe_sense_speed(db) )
+ db->link_failed = 1;
+ dmfe_process_mode(db);
+ /* SHOW_MEDIA_TYPE(db->op_mode); */
+ }
+
+ /* HPNA remote command check */
+ if (db->HPNA_command & 0xf00) {
+ db->HPNA_timer--;
+ if (!db->HPNA_timer)
+ dmfe_HPNA_remote_cmd_chk(db);
+ }
+
+ /* Timer active again */
+ db->timer.expires = DMFE_TIMER_WUT;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+
+/*
+ * Dynamic reset the DM910X board
+ * Stop DM910X board
+ * Free Tx/Rx allocated memory
+ * Reset DM910X board
+ * Re-initilize DM910X board
+ */
+
+static void dmfe_dynamic_reset(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
+
+ /* Sopt MAC controller */
+ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
+ update_cr6(db->cr6_data, dev->base_addr);
+ outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
+ outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+
+ /* Disable upper layer interface */
+ netif_stop_queue(dev);
+
+ /* Free Rx Allocate buffer */
+ dmfe_free_rxbuffer(db);
+
+ /* system variable init */
+ db->tx_packet_cnt = 0;
+ db->tx_queue_cnt = 0;
+ db->rx_avail_cnt = 0;
+ db->link_failed = 1;
+ db->wait_reset = 0;
+
+ /* Re-initilize DM910X board */
+ dmfe_init_dm910x(dev);
+
+ /* Restart upper layer interface */
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * free all allocated rx buffer
+ */
+
+static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
+{
+ DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
+
+ /* free allocated rx buffer */
+ while (db->rx_avail_cnt) {
+ dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
+ db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
+ db->rx_avail_cnt--;
+ }
+}
+
+
+/*
+ * Reuse the SK buffer
+ */
+
+static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
+{
+ struct rx_desc *rxptr = db->rx_insert_ptr;
+
+ if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
+ rxptr->rx_skb_ptr = skb;
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ wmb();
+ rxptr->rdes0 = cpu_to_le32(0x80000000);
+ db->rx_avail_cnt++;
+ db->rx_insert_ptr = rxptr->next_rx_desc;
+ } else
+ DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
+}
+
+
+/*
+ * Initialize transmit/Receive descriptor
+ * Using Chain structure, and allocate Tx/Rx buffer
+ */
+
+static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
+{
+ struct tx_desc *tmp_tx;
+ struct rx_desc *tmp_rx;
+ unsigned char *tmp_buf;
+ dma_addr_t tmp_tx_dma, tmp_rx_dma;
+ dma_addr_t tmp_buf_dma;
+ int i;
+
+ DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
+
+ /* tx descriptor start pointer */
+ db->tx_insert_ptr = db->first_tx_desc;
+ db->tx_remove_ptr = db->first_tx_desc;
+ outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
+
+ /* rx descriptor start pointer */
+ db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
+ db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
+ db->rx_insert_ptr = db->first_rx_desc;
+ db->rx_ready_ptr = db->first_rx_desc;
+ outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
+
+ /* Init Transmit chain */
+ tmp_buf = db->buf_pool_start;
+ tmp_buf_dma = db->buf_pool_dma_start;
+ tmp_tx_dma = db->first_tx_desc_dma;
+ for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
+ tmp_tx->tx_buf_ptr = tmp_buf;
+ tmp_tx->tdes0 = cpu_to_le32(0);
+ tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
+ tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
+ tmp_tx_dma += sizeof(struct tx_desc);
+ tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
+ tmp_tx->next_tx_desc = tmp_tx + 1;
+ tmp_buf = tmp_buf + TX_BUF_ALLOC;
+ tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
+ }
+ (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
+ tmp_tx->next_tx_desc = db->first_tx_desc;
+
+ /* Init Receive descriptor chain */
+ tmp_rx_dma=db->first_rx_desc_dma;
+ for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
+ tmp_rx->rdes0 = cpu_to_le32(0);
+ tmp_rx->rdes1 = cpu_to_le32(0x01000600);
+ tmp_rx_dma += sizeof(struct rx_desc);
+ tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
+ tmp_rx->next_rx_desc = tmp_rx + 1;
+ }
+ (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
+ tmp_rx->next_rx_desc = db->first_rx_desc;
+
+ /* pre-allocate Rx buffer */
+ allocate_rx_buffer(db);
+}
+
+
+/*
+ * Update CR6 value
+ * Firstly stop DM910X , then written value and start
+ */
+
+static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+{
+ u32 cr6_tmp;
+
+ cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
+ outl(cr6_tmp, ioaddr + DCR6);
+ udelay(5);
+ outl(cr6_data, ioaddr + DCR6);
+ udelay(5);
+}
+
+
+/*
+ * Send a setup frame for DM9132
+ * This setup frame initilize DM910X address filter mode
+*/
+
+static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
+{
+ struct dev_mc_list *mcptr;
+ u16 * addrptr;
+ unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
+ u32 hash_val;
+ u16 i, hash_table[4];
+
+ DMFE_DBUG(0, "dm9132_id_table()", 0);
+
+ /* Node address */
+ addrptr = (u16 *) dev->dev_addr;
+ outw(addrptr[0], ioaddr);
+ ioaddr += 4;
+ outw(addrptr[1], ioaddr);
+ ioaddr += 4;
+ outw(addrptr[2], ioaddr);
+ ioaddr += 4;
+
+ /* Clear Hash Table */
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0x0;
+
+ /* broadcast address */
+ hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
+ }
+
+ /* Write the hash table to MAC MD table */
+ for (i = 0; i < 4; i++, ioaddr += 4)
+ outw(hash_table[i], ioaddr);
+}
+
+
+/*
+ * Send a setup frame for DM9102/DM9102A
+ * This setup frame initilize DM910X address filter mode
+ */
+
+static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ struct dev_mc_list *mcptr;
+ struct tx_desc *txptr;
+ u16 * addrptr;
+ u32 * suptr;
+ int i;
+
+ DMFE_DBUG(0, "send_filter_frame()", 0);
+
+ txptr = db->tx_insert_ptr;
+ suptr = (u32 *) txptr->tx_buf_ptr;
+
+ /* Node address */
+ addrptr = (u16 *) dev->dev_addr;
+ *suptr++ = addrptr[0];
+ *suptr++ = addrptr[1];
+ *suptr++ = addrptr[2];
+
+ /* broadcast address */
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+
+ /* fit the multicast address */
+ for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ addrptr = (u16 *) mcptr->dmi_addr;
+ *suptr++ = addrptr[0];
+ *suptr++ = addrptr[1];
+ *suptr++ = addrptr[2];
+ }
+
+ for (; i<14; i++) {
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+ }
+
+ /* prepare the setup frame */
+ db->tx_insert_ptr = txptr->next_tx_desc;
+ txptr->tdes1 = cpu_to_le32(0x890000c0);
+
+ /* Resource Check and Send the setup packet */
+ if (!db->tx_packet_cnt) {
+ /* Resource Empty */
+ db->tx_packet_cnt++;
+ txptr->tdes0 = cpu_to_le32(0x80000000);
+ update_cr6(db->cr6_data | 0x2000, dev->base_addr);
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ update_cr6(db->cr6_data, dev->base_addr);
+ dev->trans_start = jiffies;
+ } else
+ db->tx_queue_cnt++; /* Put in TX queue */
+}
+
+
+/*
+ * Allocate rx buffer,
+ * As possible as allocate maxiumn Rx buffer
+ */
+
+static void allocate_rx_buffer(struct dmfe_board_info *db)
+{
+ struct rx_desc *rxptr;
+ struct sk_buff *skb;
+
+ rxptr = db->rx_insert_ptr;
+
+ while(db->rx_avail_cnt < RX_DESC_CNT) {
+ if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+ break;
+ rxptr->rx_skb_ptr = skb; /* FIXME (?) */
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ wmb();
+ rxptr->rdes0 = cpu_to_le32(0x80000000);
+ rxptr = rxptr->next_rx_desc;
+ db->rx_avail_cnt++;
+ }
+
+ db->rx_insert_ptr = rxptr;
+}
+
+
+/*
+ * Read one word data from the serial ROM
+ */
+
+static u16 read_srom_word(long ioaddr, int offset)
+{
+ int i;
+ u16 srom_data = 0;
+ long cr9_ioaddr = ioaddr + DCR9;
+
+ outl(CR9_SROM_READ, cr9_ioaddr);
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+ /* Send the Read Command 110b */
+ SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+ SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+ SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+
+ /* Send the offset */
+ for (i = 5; i >= 0; i--) {
+ srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
+ SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+ }
+
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+ for (i = 16; i > 0; i--) {
+ outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+ udelay(5);
+ srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+ udelay(5);
+ }
+
+ outl(CR9_SROM_READ, cr9_ioaddr);
+ return srom_data;
+}
+
+
+/*
+ * Auto sense the media mode
+ */
+
+static u8 dmfe_sense_speed(struct dmfe_board_info * db)
+{
+ u8 ErrFlag = 0;
+ u16 phy_mode;
+
+ /* CR6 bit18=0, select 10/100M */
+ update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
+
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+
+ if ( (phy_mode & 0x24) == 0x24 ) {
+ if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000;
+ else /* DM9102/DM9102A */
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000;
+ /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+ switch (phy_mode) {
+ case 0x1000: db->op_mode = DMFE_10MHF; break;
+ case 0x2000: db->op_mode = DMFE_10MFD; break;
+ case 0x4000: db->op_mode = DMFE_100MHF; break;
+ case 0x8000: db->op_mode = DMFE_100MFD; break;
+ default: db->op_mode = DMFE_10MHF;
+ ErrFlag = 1;
+ break;
+ }
+ } else {
+ db->op_mode = DMFE_10MHF;
+ DMFE_DBUG(0, "Link Failed :", phy_mode);
+ ErrFlag = 1;
+ }
+
+ return ErrFlag;
+}
+
+
+/*
+ * Set 10/100 phyxcer capability
+ * AUTO mode : phyxcer register4 is NIC capability
+ * Force mode: phyxcer register4 is the force media
+ */
+
+static void dmfe_set_phyxcer(struct dmfe_board_info *db)
+{
+ u16 phy_reg;
+
+ /* Select 10/100M phyxcer */
+ db->cr6_data &= ~0x40000;
+ update_cr6(db->cr6_data, db->ioaddr);
+
+ /* DM9009 Chip: Phyxcer reg18 bit12=0 */
+ if (db->chip_id == PCI_DM9009_ID) {
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000;
+ phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id);
+ }
+
+ /* Phyxcer capability setting */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+
+ if (db->media_mode & DMFE_AUTO) {
+ /* AUTO Mode */
+ phy_reg |= db->PHY_reg4;
+ } else {
+ /* Force Mode */
+ switch(db->media_mode) {
+ case DMFE_10MHF: phy_reg |= 0x20; break;
+ case DMFE_10MFD: phy_reg |= 0x40; break;
+ case DMFE_100MHF: phy_reg |= 0x80; break;
+ case DMFE_100MFD: phy_reg |= 0x100; break;
+ }
+ if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
+ }
+
+ /* Write new capability to Phyxcer Reg4 */
+ if ( !(phy_reg & 0x01e0)) {
+ phy_reg|=db->PHY_reg4;
+ db->media_mode|=DMFE_AUTO;
+ }
+ phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+
+ /* Restart Auto-Negotiation */
+ if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
+ if ( !db->chip_type )
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+}
+
+
+/*
+ * Process op-mode
+ * AUTO mode : PHY controller in Auto-negotiation Mode
+ * Force mode: PHY controller in force mode with HUB
+ * N-way force capability with SWITCH
+ */
+
+static void dmfe_process_mode(struct dmfe_board_info *db)
+{
+ u16 phy_reg;
+
+ /* Full Duplex Mode Check */
+ if (db->op_mode & 0x4)
+ db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
+ else
+ db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
+
+ /* Transciver Selection */
+ if (db->op_mode & 0x10) /* 1M HomePNA */
+ db->cr6_data |= 0x40000;/* External MII select */
+ else
+ db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
+
+ update_cr6(db->cr6_data, db->ioaddr);
+
+ /* 10/100M phyxcer force mode need */
+ if ( !(db->media_mode & 0x18)) {
+ /* Forece Mode */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
+ if ( !(phy_reg & 0x1) ) {
+ /* parter without N-Way capability */
+ phy_reg = 0x0;
+ switch(db->op_mode) {
+ case DMFE_10MHF: phy_reg = 0x0; break;
+ case DMFE_10MFD: phy_reg = 0x100; break;
+ case DMFE_100MHF: phy_reg = 0x2000; break;
+ case DMFE_100MFD: phy_reg = 0x2100; break;
+ }
+ phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+ if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
+ mdelay(20);
+ phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+ }
+ }
+}
+
+
+/*
+ * Write a word to Phy register
+ */
+
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+{
+ u16 i;
+ unsigned long ioaddr;
+
+ if (chip_id == PCI_DM9132_ID) {
+ ioaddr = iobase + 0x80 + offset * 4;
+ outw(phy_data, ioaddr);
+ } else {
+ /* DM9102/DM9102A Chip */
+ ioaddr = iobase + DCR9;
+
+ /* Send 33 synchronization clock to Phy controller */
+ for (i = 0; i < 35; i++)
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send start command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send write command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send Phy address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* Send register address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* written trasnition */
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+
+ /* Write a word data to PHY controller */
+ for ( i = 0x8000; i > 0; i >>= 1)
+ phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
+ }
+}
+
+
+/*
+ * Read a word data from phy register
+ */
+
+static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+{
+ int i;
+ u16 phy_data;
+ unsigned long ioaddr;
+
+ if (chip_id == PCI_DM9132_ID) {
+ /* DM9132 Chip */
+ ioaddr = iobase + 0x80 + offset * 4;
+ phy_data = inw(ioaddr);
+ } else {
+ /* DM9102/DM9102A Chip */
+ ioaddr = iobase + DCR9;
+
+ /* Send 33 synchronization clock to Phy controller */
+ for (i = 0; i < 35; i++)
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send start command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send read command(10) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+
+ /* Send Phy address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* Send register address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* Skip transition state */
+ phy_read_1bit(ioaddr);
+
+ /* read 16bit data */
+ for (phy_data = 0, i = 0; i < 16; i++) {
+ phy_data <<= 1;
+ phy_data |= phy_read_1bit(ioaddr);
+ }
+ }
+
+ return phy_data;
+}
+
+
+/*
+ * Write one bit data to Phy Controller
+ */
+
+static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
+{
+ outl(phy_data, ioaddr); /* MII Clock Low */
+ udelay(1);
+ outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
+ udelay(1);
+ outl(phy_data, ioaddr); /* MII Clock Low */
+ udelay(1);
+}
+
+
+/*
+ * Read one bit phy data from PHY controller
+ */
+
+static u16 phy_read_1bit(unsigned long ioaddr)
+{
+ u16 phy_data;
+
+ outl(0x50000, ioaddr);
+ udelay(1);
+ phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
+ outl(0x40000, ioaddr);
+ udelay(1);
+
+ return phy_data;
+}
+
+
+/*
+ * Parser SROM and media mode
+ */
+
+static void dmfe_parse_srom(struct dmfe_board_info * db)
+{
+ char * srom = db->srom;
+ int dmfe_mode, tmp_reg;
+
+ DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
+
+ /* Init CR15 */
+ db->cr15_data = CR15_DEFAULT;
+
+ /* Check SROM Version */
+ if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
+ /* SROM V4.01 */
+ /* Get NIC support media mode */
+ db->NIC_capability = le16_to_cpup(srom + 34);
+ db->PHY_reg4 = 0;
+ for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
+ switch( db->NIC_capability & tmp_reg ) {
+ case 0x1: db->PHY_reg4 |= 0x0020; break;
+ case 0x2: db->PHY_reg4 |= 0x0040; break;
+ case 0x4: db->PHY_reg4 |= 0x0080; break;
+ case 0x8: db->PHY_reg4 |= 0x0100; break;
+ }
+ }
+
+ /* Media Mode Force or not check */
+ dmfe_mode = le32_to_cpup(srom + 34) & le32_to_cpup(srom + 36);
+ switch(dmfe_mode) {
+ case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
+ case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
+ case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
+ case 0x100:
+ case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
+ }
+
+ /* Special Function setting */
+ /* VLAN function */
+ if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
+ db->cr15_data |= 0x40;
+
+ /* Flow Control */
+ if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
+ db->cr15_data |= 0x400;
+
+ /* TX pause packet */
+ if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
+ db->cr15_data |= 0x9800;
+ }
+
+ /* Parse HPNA parameter */
+ db->HPNA_command = 1;
+
+ /* Accept remote command or not */
+ if (HPNA_rx_cmd == 0)
+ db->HPNA_command |= 0x8000;
+
+ /* Issue remote command & operation mode */
+ if (HPNA_tx_cmd == 1)
+ switch(HPNA_mode) { /* Issue Remote Command */
+ case 0: db->HPNA_command |= 0x0904; break;
+ case 1: db->HPNA_command |= 0x0a00; break;
+ case 2: db->HPNA_command |= 0x0506; break;
+ case 3: db->HPNA_command |= 0x0602; break;
+ }
+ else
+ switch(HPNA_mode) { /* Don't Issue */
+ case 0: db->HPNA_command |= 0x0004; break;
+ case 1: db->HPNA_command |= 0x0000; break;
+ case 2: db->HPNA_command |= 0x0006; break;
+ case 3: db->HPNA_command |= 0x0002; break;
+ }
+
+ /* Check DM9801 or DM9802 present or not */
+ db->HPNA_present = 0;
+ update_cr6(db->cr6_data|0x40000, db->ioaddr);
+ tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
+ if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
+ /* DM9801 or DM9802 present */
+ db->HPNA_timer = 8;
+ if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
+ /* DM9801 HomeRun */
+ db->HPNA_present = 1;
+ dmfe_program_DM9801(db, tmp_reg);
+ } else {
+ /* DM9802 LongRun */
+ db->HPNA_present = 2;
+ dmfe_program_DM9802(db);
+ }
+ }
+
+}
+
+
+/*
+ * Init HomeRun DM9801
+ */
+
+static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
+{
+ uint reg17, reg25;
+
+ if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
+ switch(HPNA_rev) {
+ case 0xb900: /* DM9801 E3 */
+ db->HPNA_command |= 0x1000;
+ reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
+ reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
+ reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ break;
+ case 0xb901: /* DM9801 E4 */
+ reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
+ reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
+ break;
+ case 0xb902: /* DM9801 E5 */
+ case 0xb903: /* DM9801 E6 */
+ default:
+ db->HPNA_command |= 0x1000;
+ reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
+ reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
+ break;
+ }
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
+}
+
+
+/*
+ * Init HomeRun DM9802
+ */
+
+static void dmfe_program_DM9802(struct dmfe_board_info * db)
+{
+ uint phy_reg;
+
+ if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
+ phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
+}
+
+
+/*
+ * Check remote HPNA power and speed status. If not correct,
+ * issue command again.
+*/
+
+static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
+{
+ uint phy_reg;
+
+ /* Got remote device status */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
+ switch(phy_reg) {
+ case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
+ case 0x20: phy_reg = 0x0900;break; /* LP/HS */
+ case 0x40: phy_reg = 0x0600;break; /* HP/LS */
+ case 0x60: phy_reg = 0x0500;break; /* HP/HS */
+ }
+
+ /* Check remote device status match our setting ot not */
+ if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ db->HPNA_timer=8;
+ } else
+ db->HPNA_timer=600; /* Match, every 10 minutes, check */
+}
+
+
+
+static struct pci_device_id dmfe_pci_tbl[] = {
+ { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
+ { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
+ { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
+ { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
+
+
+static struct pci_driver dmfe_driver = {
+ .name = "dmfe",
+ .id_table = dmfe_pci_tbl,
+ .probe = dmfe_init_one,
+ .remove = __devexit_p(dmfe_remove_one),
+};
+
+MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
+MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(debug, int, 0);
+module_param(mode, byte, 0);
+module_param(cr6set, int, 0);
+module_param(chkmode, byte, 0);
+module_param(HPNA_mode, byte, 0);
+module_param(HPNA_rx_cmd, byte, 0);
+module_param(HPNA_tx_cmd, byte, 0);
+module_param(HPNA_NoiseFloor, byte, 0);
+module_param(SF_mode, byte, 0);
+MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
+MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
+
+/* Description:
+ * when user used insmod to add module, system invoked init_module()
+ * to initilize and register.
+ */
+
+static int __init dmfe_init_module(void)
+{
+ int rc;
+
+ printk(version);
+ printed_version = 1;
+
+ DMFE_DBUG(0, "init_module() ", debug);
+
+ if (debug)
+ dmfe_debug = debug; /* set debug flag */
+ if (cr6set)
+ dmfe_cr6_user_set = cr6set;
+
+ switch(mode) {
+ case DMFE_10MHF:
+ case DMFE_100MHF:
+ case DMFE_10MFD:
+ case DMFE_100MFD:
+ case DMFE_1M_HPNA:
+ dmfe_media_mode = mode;
+ break;
+ default:dmfe_media_mode = DMFE_AUTO;
+ break;
+ }
+
+ if (HPNA_mode > 4)
+ HPNA_mode = 0; /* Default: LP/HS */
+ if (HPNA_rx_cmd > 1)
+ HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
+ if (HPNA_tx_cmd > 1)
+ HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
+ if (HPNA_NoiseFloor > 15)
+ HPNA_NoiseFloor = 0;
+
+ rc = pci_module_init(&dmfe_driver);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+
+/*
+ * Description:
+ * when user used rmmod to delete module, system invoked clean_module()
+ * to un-register all registered services.
+ */
+
+static void __exit dmfe_cleanup_module(void)
+{
+ DMFE_DBUG(0, "dmfe_clean_module() ", debug);
+ pci_unregister_driver(&dmfe_driver);
+}
+
+module_init(dmfe_init_module);
+module_exit(dmfe_cleanup_module);
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
new file mode 100644
index 000000000000..ac5bf49ff60f
--- /dev/null
+++ b/drivers/net/tulip/eeprom.c
@@ -0,0 +1,357 @@
+/*
+ drivers/net/tulip/eeprom.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/pci.h>
+#include "tulip.h"
+#include <linux/init.h>
+#include <asm/unaligned.h>
+
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+ Search www.digital.com for "21X4 SROM" to get details.
+ This code is very complex, and will require changes to support
+ additional cards, so I'll be verbose about what is going on.
+ */
+
+/* Known cards that have old-style EEPROMs. */
+static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
+ {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+ 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+ {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0903, 0x006D, /* 100baseTx */
+ 0x0905, 0x006D, /* 100baseTx-FD */ }},
+ {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+ 0x0107, 0x8021, /* 100baseFx */
+ 0x0108, 0x8021, /* 100baseFx-FD */
+ 0x0100, 0x009E, /* 10baseT */
+ 0x0104, 0x009E, /* 10baseT-FD */
+ 0x0103, 0x006D, /* 100baseTx */
+ 0x0105, 0x006D, /* 100baseTx-FD */ }},
+ {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+ 0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+ 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+ {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+ 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */
+ 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */
+ 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+ 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+ 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+ }},
+ {"NetWinder", 0x00, 0x10, 0x57,
+ /* Default media = MII
+ * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1
+ */
+ { 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 }
+ },
+ {NULL}};
+
+
+static const char *block_name[] __devinitdata = {
+ "21140 non-MII",
+ "21140 MII PHY",
+ "21142 Serial PHY",
+ "21142 MII PHY",
+ "21143 SYM PHY",
+ "21143 reset method"
+};
+
+
+/**
+ * tulip_build_fake_mediatable - Build a fake mediatable entry.
+ * @tp: Ptr to the tulip private data.
+ *
+ * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
+ * srom and can not be handled under the fixup routine. These cards
+ * still need a valid mediatable entry for correct csr12 setup and
+ * mii handling.
+ *
+ * Since this is currently a parisc-linux specific function, the
+ * #ifdef __hppa__ should completely optimize this function away for
+ * non-parisc hardware.
+ */
+static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
+{
+#ifdef CONFIG_GSC
+ if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) {
+ static unsigned char leafdata[] =
+ { 0x01, /* phy number */
+ 0x02, /* gpr setup sequence length */
+ 0x02, 0x00, /* gpr setup sequence */
+ 0x02, /* phy reset sequence length */
+ 0x01, 0x00, /* phy reset sequence */
+ 0x00, 0x78, /* media capabilities */
+ 0x00, 0xe0, /* nway advertisment */
+ 0x00, 0x05, /* fdx bit map */
+ 0x00, 0x06 /* ttm bit map */
+ };
+
+ tp->mtable = (struct mediatable *)
+ kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL);
+
+ if (tp->mtable == NULL)
+ return; /* Horrible, impossible failure. */
+
+ tp->mtable->defaultmedia = 0x800;
+ tp->mtable->leafcount = 1;
+ tp->mtable->csr12dir = 0x3f; /* inputs on bit7 for hsc-pci, bit6 for pci-fx */
+ tp->mtable->has_nonmii = 0;
+ tp->mtable->has_reset = 0;
+ tp->mtable->has_mii = 1;
+ tp->mtable->csr15dir = tp->mtable->csr15val = 0;
+ tp->mtable->mleaf[0].type = 1;
+ tp->mtable->mleaf[0].media = 11;
+ tp->mtable->mleaf[0].leafdata = &leafdata[0];
+ tp->flags |= HAS_PHY_IRQ;
+ tp->csr12_shadow = -1;
+ }
+#endif
+}
+
+void __devinit tulip_parse_eeprom(struct net_device *dev)
+{
+ /* The last media info list parsed, for multiport boards. */
+ static struct mediatable *last_mediatable;
+ static unsigned char *last_ee_data;
+ static int controller_index;
+ struct tulip_private *tp = netdev_priv(dev);
+ unsigned char *ee_data = tp->eeprom;
+ int i;
+
+ tp->mtable = NULL;
+ /* Detect an old-style (SA only) EEPROM layout:
+ memcmp(eedata, eedata+16, 8). */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ break;
+ if (i >= 8) {
+ if (ee_data[0] == 0xff) {
+ if (last_mediatable) {
+ controller_index++;
+ printk(KERN_INFO "%s: Controller %d of multiport board.\n",
+ dev->name, controller_index);
+ tp->mtable = last_mediatable;
+ ee_data = last_ee_data;
+ goto subsequent_board;
+ } else
+ printk(KERN_INFO "%s: Missing EEPROM, this interface may "
+ "not work correctly!\n",
+ dev->name);
+ return;
+ }
+ /* Do a fix-up based on the vendor half of the station address prefix. */
+ for (i = 0; eeprom_fixups[i].name; i++) {
+ if (dev->dev_addr[0] == eeprom_fixups[i].addr0
+ && dev->dev_addr[1] == eeprom_fixups[i].addr1
+ && dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+ if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
+ i++; /* An Accton EN1207, not an outlaw Maxtech. */
+ memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+ sizeof(eeprom_fixups[i].newtable));
+ printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using"
+ " substitute media control info.\n",
+ dev->name, eeprom_fixups[i].name);
+ break;
+ }
+ }
+ if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+ printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+ "information.\n",
+ dev->name);
+ return;
+ }
+ }
+
+ controller_index = 0;
+ if (ee_data[19] > 1) { /* Multiport board. */
+ last_ee_data = ee_data;
+ }
+subsequent_board:
+
+ if (ee_data[27] == 0) { /* No valid media table. */
+ tulip_build_fake_mediatable(tp);
+ } else {
+ unsigned char *p = (void *)ee_data + ee_data[27];
+ unsigned char csr12dir = 0;
+ int count, new_advertise = 0;
+ struct mediatable *mtable;
+ u16 media = get_u16(p);
+
+ p += 2;
+ if (tp->flags & CSR12_IN_SROM)
+ csr12dir = *p++;
+ count = *p++;
+
+ /* there is no phy information, don't even try to build mtable */
+ if (count == 0) {
+ if (tulip_debug > 0)
+ printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name);
+ return;
+ }
+
+ mtable = (struct mediatable *)
+ kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf),
+ GFP_KERNEL);
+ if (mtable == NULL)
+ return; /* Horrible, impossible failure. */
+ last_mediatable = tp->mtable = mtable;
+ mtable->defaultmedia = media;
+ mtable->leafcount = count;
+ mtable->csr12dir = csr12dir;
+ mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+ mtable->csr15dir = mtable->csr15val = 0;
+
+ printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ struct medialeaf *leaf = &mtable->mleaf[i];
+
+ if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+ leaf->type = 0;
+ leaf->media = p[0] & 0x3f;
+ leaf->leafdata = p;
+ if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */
+ mtable->has_mii = 1;
+ p += 4;
+ } else {
+ leaf->type = p[1];
+ if (p[1] == 0x05) {
+ mtable->has_reset = i;
+ leaf->media = p[2] & 0x0f;
+ } else if (tp->chip_id == DM910X && p[1] == 0x80) {
+ /* Hack to ignore Davicom delay period block */
+ mtable->leafcount--;
+ count--;
+ i--;
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ continue;
+ } else if (p[1] & 1) {
+ int gpr_len, reset_len;
+
+ mtable->has_mii = 1;
+ leaf->media = 11;
+ gpr_len=p[3]*2;
+ reset_len=p[4+gpr_len]*2;
+ new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
+ } else {
+ mtable->has_nonmii = 1;
+ leaf->media = p[2] & MEDIA_MASK;
+ /* Davicom's media number for 100BaseTX is strange */
+ if (tp->chip_id == DM910X && leaf->media == 1)
+ leaf->media = 3;
+ switch (leaf->media) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+ case 3: new_advertise |= 0x0080; break;
+ case 5: new_advertise |= 0x0100; break;
+ case 6: new_advertise |= 0x0200; break;
+ }
+ if (p[1] == 2 && leaf->media == 0) {
+ if (p[2] & 0x40) {
+ u32 base15 = get_unaligned((u16*)&p[7]);
+ mtable->csr15dir =
+ (get_unaligned((u16*)&p[9])<<16) + base15;
+ mtable->csr15val =
+ (get_unaligned((u16*)&p[11])<<16) + base15;
+ } else {
+ mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
+ mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
+ }
+ }
+ }
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ }
+ if (tulip_debug > 1 && leaf->media == 11) {
+ unsigned char *bp = leaf->leafdata;
+ printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
+ "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+ dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ }
+ printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
+ "by a %s (%d) block.\n",
+ dev->name, i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
+ }
+ if (new_advertise)
+ tp->sym_advertise = new_advertise;
+ }
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */
+#define EE_CS 0x01 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x05
+#define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */
+#define EE_ENB (0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ Even at 33Mhz current PCI implementations don't overrun the EEPROM clock.
+ We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay() ioread32(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD (6)
+
+/* Note: this routine returns extra data bits for size detection. */
+int __devinit tulip_read_eeprom(struct net_device *dev, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ struct tulip_private *tp = dev->priv;
+ void __iomem *ee_addr = tp->base_addr + CSR9;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ iowrite32(EE_ENB & ~EE_CS, ee_addr);
+ iowrite32(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ iowrite32(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ iowrite32(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ iowrite32(EE_ENB, ee_addr);
+ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ iowrite32(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ iowrite32(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ iowrite32(EE_ENB & ~EE_CS, ee_addr);
+ return (tp->flags & HAS_SWAPPED_SEEPROM) ? swab16(retval) : retval;
+}
+
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
new file mode 100644
index 000000000000..afb5cda9d8e1
--- /dev/null
+++ b/drivers/net/tulip/interrupt.c
@@ -0,0 +1,786 @@
+/*
+ drivers/net/tulip/interrupt.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/pci.h>
+#include "tulip.h"
+#include <linux/config.h>
+#include <linux/etherdevice.h>
+
+int tulip_rx_copybreak;
+unsigned int tulip_max_interrupt_work;
+
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+#define MIT_SIZE 15
+#define MIT_TABLE 15 /* We use 0 or max */
+
+static unsigned int mit_table[MIT_SIZE+1] =
+{
+ /* CRS11 21143 hardware Mitigation Control Interrupt
+ We use only RX mitigation we other techniques for
+ TX intr. mitigation.
+
+ 31 Cycle Size (timer control)
+ 30:27 TX timer in 16 * Cycle size
+ 26:24 TX No pkts before Int.
+ 23:20 RX timer in Cycle size
+ 19:17 RX No pkts before Int.
+ 16 Continues Mode (CM)
+ */
+
+ 0x0, /* IM disabled */
+ 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
+ 0x80150000,
+ 0x80270000,
+ 0x80370000,
+ 0x80490000,
+ 0x80590000,
+ 0x80690000,
+ 0x807B0000,
+ 0x808B0000,
+ 0x809D0000,
+ 0x80AD0000,
+ 0x80BD0000,
+ 0x80CF0000,
+ 0x80DF0000,
+// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
+ 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
+};
+#endif
+
+
+int tulip_refill_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int entry;
+ int refilled = 0;
+
+ /* Refill the Rx ring buffers. */
+ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_buffers[entry].skb == NULL) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break;
+
+ mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ tp->rx_buffers[entry].mapping = mapping;
+
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+ refilled++;
+ }
+ tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+ }
+ if(tp->chip_id == LC82C168) {
+ if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
+ /* Rx stopped due to out of buffers,
+ * restart it
+ */
+ iowrite32(0x01, tp->base_addr + CSR2);
+ }
+ }
+ return refilled;
+}
+
+#ifdef CONFIG_TULIP_NAPI
+
+void oom_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ netif_rx_schedule(dev);
+}
+
+int tulip_poll(struct net_device *dev, int *budget)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = *budget;
+ int received = 0;
+
+ if (!netif_running(dev))
+ goto done;
+
+ if (rx_work_limit > dev->quota)
+ rx_work_limit = dev->quota;
+
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+
+/* that one buffer is needed for mit activation; or might be a
+ bug in the ring buffer code; check later -- JHS*/
+
+ if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
+#endif
+
+ if (tulip_debug > 4)
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+
+ do {
+ if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
+ printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
+ break;
+ }
+ /* Acknowledge current RX interrupt sources. */
+ iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
+
+
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+
+ if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
+ break;
+
+ if (tulip_debug > 5)
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ dev->name, entry, status);
+ if (--rx_work_limit < 0)
+ goto not_done;
+
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ tp->stats.rx_length_errors++;
+ }
+ } else if (status & RxDescFatalErr) {
+ /* There was a fatal error. */
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) tp->stats.rx_length_errors++;
+ if (status & 0x0004) tp->stats.rx_frame_errors++;
+ if (status & 0x0002) tp->stats.rx_crc_errors++;
+ if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ }
+ } else {
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (pkt_len > 1518) {
+ printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+ dev->name, pkt_len, pkt_len);
+ pkt_len = 1518;
+ tp->stats.rx_length_errors++;
+ }
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < tulip_rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+#if ! defined(__alpha__)
+ eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
+ pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ tp->rx_buffers[entry].skb->tail,
+ pkt_len);
+#endif
+ pci_dma_sync_single_for_device(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ } else { /* Pass up the skb already on the Rx ring. */
+ char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
+ pkt_len);
+
+#ifndef final_version
+ if (tp->rx_buffers[entry].mapping !=
+ le32_to_cpu(tp->rx_ring[entry].buffer1)) {
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
+ dev->name,
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (unsigned long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
+ }
+#endif
+
+ pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ tp->rx_buffers[entry].skb = NULL;
+ tp->rx_buffers[entry].mapping = 0;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_receive_skb(skb);
+
+ dev->last_rx = jiffies;
+ tp->stats.rx_packets++;
+ tp->stats.rx_bytes += pkt_len;
+ }
+ received++;
+
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
+ tulip_refill_rx(dev);
+
+ }
+
+ /* New ack strategy... irq does not ack Rx any longer
+ hopefully this helps */
+
+ /* Really bad things can happen here... If new packet arrives
+ * and an irq arrives (tx or just due to occasionally unset
+ * mask), it will be acked by irq handler, but new thread
+ * is not scheduled. It is major hole in design.
+ * No idea how to fix this if "playing with fire" will fail
+ * tomorrow (night 011029). If it will not fail, we won
+ * finally: amount of IO did not increase at all. */
+ } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
+
+done:
+
+ #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+
+ /* We use this simplistic scheme for IM. It's proven by
+ real life installations. We can have IM enabled
+ continuesly but this would cause unnecessary latency.
+ Unfortunely we can't use all the NET_RX_* feedback here.
+ This would turn on IM for devices that is not contributing
+ to backlog congestion with unnecessary latency.
+
+ We monitor the the device RX-ring and have:
+
+ HW Interrupt Mitigation either ON or OFF.
+
+ ON: More then 1 pkt received (per intr.) OR we are dropping
+ OFF: Only 1 pkt received
+
+ Note. We only use min and max (0, 15) settings from mit_table */
+
+
+ if( tp->flags & HAS_INTR_MITIGATION) {
+ if( received > 1 ) {
+ if( ! tp->mit_on ) {
+ tp->mit_on = 1;
+ iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
+ }
+ }
+ else {
+ if( tp->mit_on ) {
+ tp->mit_on = 0;
+ iowrite32(0, tp->base_addr + CSR11);
+ }
+ }
+ }
+
+#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
+
+ dev->quota -= received;
+ *budget -= received;
+
+ tulip_refill_rx(dev);
+
+ /* If RX ring is not full we are out of memory. */
+ if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
+
+ /* Remove us from polling list and enable RX intr. */
+
+ netif_rx_complete(dev);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+
+ /* The last op happens after poll completion. Which means the following:
+ * 1. it can race with disabling irqs in irq handler
+ * 2. it can race with dise/enabling irqs in other poll threads
+ * 3. if an irq raised after beginning loop, it will be immediately
+ * triggered here.
+ *
+ * Summarizing: the logic results in some redundant irqs both
+ * due to races in masking and due to too late acking of already
+ * processed irqs. But it must not result in losing events.
+ */
+
+ return 0;
+
+ not_done:
+ if (!received) {
+
+ received = dev->quota; /* Not to happen */
+ }
+ dev->quota -= received;
+ *budget -= received;
+
+ if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
+ tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
+ tulip_refill_rx(dev);
+
+ if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
+
+ return 1;
+
+
+ oom: /* Executed with RX ints disabled */
+
+
+ /* Start timer, stop polling, but do not enable rx interrupts. */
+ mod_timer(&tp->oom_timer, jiffies+1);
+
+ /* Think: timer_pending() was an explicit signature of bug.
+ * Timer can be pending now but fired and completed
+ * before we did netif_rx_complete(). See? We would lose it. */
+
+ /* remove ourselves from the polling list */
+ netif_rx_complete(dev);
+
+ return 0;
+}
+
+#else /* CONFIG_TULIP_NAPI */
+
+static int tulip_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+ int received = 0;
+
+ if (tulip_debug > 4)
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+ if (tulip_debug > 5)
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ dev->name, entry, status);
+ if (--rx_work_limit < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ tp->stats.rx_length_errors++;
+ }
+ } else if (status & RxDescFatalErr) {
+ /* There was a fatal error. */
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) tp->stats.rx_length_errors++;
+ if (status & 0x0004) tp->stats.rx_frame_errors++;
+ if (status & 0x0002) tp->stats.rx_crc_errors++;
+ if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ }
+ } else {
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (pkt_len > 1518) {
+ printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+ dev->name, pkt_len, pkt_len);
+ pkt_len = 1518;
+ tp->stats.rx_length_errors++;
+ }
+#endif
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < tulip_rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+#if ! defined(__alpha__)
+ eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
+ pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ tp->rx_buffers[entry].skb->tail,
+ pkt_len);
+#endif
+ pci_dma_sync_single_for_device(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ } else { /* Pass up the skb already on the Rx ring. */
+ char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
+ pkt_len);
+
+#ifndef final_version
+ if (tp->rx_buffers[entry].mapping !=
+ le32_to_cpu(tp->rx_ring[entry].buffer1)) {
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
+ dev->name,
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
+ }
+#endif
+
+ pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ tp->rx_buffers[entry].skb = NULL;
+ tp->rx_buffers[entry].mapping = 0;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ tp->stats.rx_packets++;
+ tp->stats.rx_bytes += pkt_len;
+ }
+ received++;
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+ return received;
+}
+#endif /* CONFIG_TULIP_NAPI */
+
+static inline unsigned int phy_interrupt (struct net_device *dev)
+{
+#ifdef __hppa__
+ struct tulip_private *tp = netdev_priv(dev);
+ int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
+
+ if (csr12 != tp->csr12_shadow) {
+ /* ack interrupt */
+ iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
+ tp->csr12_shadow = csr12;
+ /* do link change stuff */
+ spin_lock(&tp->lock);
+ tulip_check_duplex(dev);
+ spin_unlock(&tp->lock);
+ /* clear irq ack bit */
+ iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
+
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr5;
+ int missed;
+ int rx = 0;
+ int tx = 0;
+ int oi = 0;
+ int maxrx = RX_RING_SIZE;
+ int maxtx = TX_RING_SIZE;
+ int maxoi = TX_RING_SIZE;
+#ifdef CONFIG_TULIP_NAPI
+ int rxd = 0;
+#else
+ int entry;
+#endif
+ unsigned int work_count = tulip_max_interrupt_work;
+ unsigned int handled = 0;
+
+ /* Let's see whether the interrupt really is for us */
+ csr5 = ioread32(ioaddr + CSR5);
+
+ if (tp->flags & HAS_PHY_IRQ)
+ handled = phy_interrupt (dev);
+
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+ return IRQ_RETVAL(handled);
+
+ tp->nir++;
+
+ do {
+
+#ifdef CONFIG_TULIP_NAPI
+
+ if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
+ rxd++;
+ /* Mask RX intrs and add the device to poll list. */
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
+ netif_rx_schedule(dev);
+
+ if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
+ break;
+ }
+
+ /* Acknowledge the interrupt sources we handle here ASAP
+ the poll function does Rx and RxNoBuf acking */
+
+ iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
+
+#else
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+
+ if (csr5 & (RxIntr | RxNoBuf)) {
+ rx += tulip_rx(dev);
+ tulip_refill_rx(dev);
+ }
+
+#endif /* CONFIG_TULIP_NAPI */
+
+ if (tulip_debug > 4)
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ dev->name, csr5, ioread32(ioaddr + CSR5));
+
+
+ if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
+ unsigned int dirty_tx;
+
+ spin_lock(&tp->lock);
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still has not been Txed */
+
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+#endif
+ tp->stats.tx_errors++;
+ if (status & 0x4104) tp->stats.tx_aborted_errors++;
+ if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+ if (status & 0x0200) tp->stats.tx_window_errors++;
+ if (status & 0x0002) tp->stats.tx_fifo_errors++;
+ if ((status & 0x0080) && tp->full_duplex == 0)
+ tp->stats.tx_heartbeat_errors++;
+ } else {
+ tp->stats.tx_bytes +=
+ tp->tx_buffers[entry].skb->len;
+ tp->stats.collisions += (status >> 3) & 15;
+ tp->stats.tx_packets++;
+ }
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ tx++;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
+ dev->name, dirty_tx, tp->cur_tx);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+ netif_wake_queue(dev);
+
+ tp->dirty_tx = dirty_tx;
+ if (csr5 & TxDied) {
+ if (tulip_debug > 2)
+ printk(KERN_WARNING "%s: The transmitter stopped."
+ " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+ dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
+ tulip_restart_rxtx(tp);
+ }
+ spin_unlock(&tp->lock);
+ }
+
+ /* Log errors. */
+ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
+ if (csr5 == 0xffffffff)
+ break;
+ if (csr5 & TxJabber) tp->stats.tx_errors++;
+ if (csr5 & TxFIFOUnderflow) {
+ if ((tp->csr6 & 0xC000) != 0xC000)
+ tp->csr6 += 0x4000; /* Bump up the Tx threshold */
+ else
+ tp->csr6 |= 0x00200000; /* Store-n-forward. */
+ /* Restart the transmit process. */
+ tulip_restart_rxtx(tp);
+ iowrite32(0, ioaddr + CSR1);
+ }
+ if (csr5 & (RxDied | RxNoBuf)) {
+ if (tp->flags & COMET_MAC_ADDR) {
+ iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
+ iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
+ }
+ }
+ if (csr5 & RxDied) { /* Missed a Rx frame. */
+ tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+ tp->stats.rx_errors++;
+ tulip_start_rxtx(tp);
+ }
+ /*
+ * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
+ * call is ever done under the spinlock
+ */
+ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
+ if (tp->link_change)
+ (tp->link_change)(dev, csr5);
+ }
+ if (csr5 & SytemError) {
+ int error = (csr5 >> 23) & 7;
+ /* oops, we hit a PCI error. The code produced corresponds
+ * to the reason:
+ * 0 - parity error
+ * 1 - master abort
+ * 2 - target abort
+ * Note that on parity error, we should do a software reset
+ * of the chip to get it back into a sane state (according
+ * to the 21142/3 docs that is).
+ * -- rmk
+ */
+ printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
+ dev->name, tp->nir, error);
+ }
+ /* Clear all error sources, included undocumented ones! */
+ iowrite32(0x0800f7ba, ioaddr + CSR5);
+ oi++;
+ }
+ if (csr5 & TimerInt) {
+
+ if (tulip_debug > 2)
+ printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+ dev->name, csr5);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ tp->ttimer = 0;
+ oi++;
+ }
+ if (tx > maxtx || rx > maxrx || oi > maxoi) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Too much work during an interrupt, "
+ "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
+
+ /* Acknowledge all interrupt sources. */
+ iowrite32(0x8001ffff, ioaddr + CSR5);
+ if (tp->flags & HAS_INTR_MITIGATION) {
+ /* Josip Loncaric at ICASE did extensive experimentation
+ to develop a good interrupt mitigation setting.*/
+ iowrite32(0x8b240000, ioaddr + CSR11);
+ } else if (tp->chip_id == LC82C168) {
+ /* the LC82C168 doesn't have a hw timer.*/
+ iowrite32(0x00, ioaddr + CSR7);
+ mod_timer(&tp->timer, RUN_AT(HZ/50));
+ } else {
+ /* Mask all interrupting sources, set timer to
+ re-enable. */
+ iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
+ iowrite32(0x0012, ioaddr + CSR11);
+ }
+ break;
+ }
+
+ work_count--;
+ if (work_count == 0)
+ break;
+
+ csr5 = ioread32(ioaddr + CSR5);
+
+#ifdef CONFIG_TULIP_NAPI
+ if (rxd)
+ csr5 &= ~RxPollInt;
+ } while ((csr5 & (TxNoBuf |
+ TxDied |
+ TxIntr |
+ TimerInt |
+ /* Abnormal intr. */
+ RxDied |
+ TxFIFOUnderflow |
+ TxJabber |
+ TPLnkFail |
+ SytemError )) != 0);
+#else
+ } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
+
+ tulip_refill_rx(dev);
+
+ /* check if the card is in suspend mode */
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_buffers[entry].skb == NULL) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+ if (tp->chip_id == LC82C168) {
+ iowrite32(0x00, ioaddr + CSR7);
+ mod_timer(&tp->timer, RUN_AT(HZ/50));
+ } else {
+ if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
+ ioaddr + CSR7);
+ iowrite32(TimerInt, ioaddr + CSR5);
+ iowrite32(12, ioaddr + CSR11);
+ tp->ttimer = 1;
+ }
+ }
+ }
+#endif /* CONFIG_TULIP_NAPI */
+
+ if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
+ tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+ }
+
+ if (tulip_debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+ dev->name, ioread32(ioaddr + CSR5));
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
new file mode 100644
index 000000000000..edae09a4b021
--- /dev/null
+++ b/drivers/net/tulip/media.c
@@ -0,0 +1,562 @@
+/*
+ drivers/net/tulip/media.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include "tulip.h"
+
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() ioread32(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+ MDIO protocol. It is just different enough from the EEPROM protocol
+ to not share code. The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK 0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN 0x40000
+#define MDIO_DATA_READ 0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+ 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0,
+ 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int i;
+ int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+ int retval = 0;
+ void __iomem *ioaddr = tp->base_addr;
+ void __iomem *mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return 0xffff;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ return ioread32(ioaddr + comet_miireg2offset[location]);
+ return 0xffff;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+ ioread32(ioaddr + 0xA0);
+ ioread32(ioaddr + 0xA0);
+ while (--i > 0) {
+ barrier();
+ if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000))
+ break;
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return retval & 0xffff;
+ }
+
+ if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
+ int value;
+ int i = 1000;
+
+ value = ioread32(ioaddr + CSR9);
+ iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
+
+ value = (phy_id << 21) | (location << 16) | 0x08000000;
+ iowrite32(value, ioaddr + CSR10);
+
+ while(--i > 0) {
+ mdio_delay();
+ if(ioread32(ioaddr + CSR10) & 0x10000000)
+ break;
+ }
+ retval = ioread32(ioaddr + CSR10);
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return retval & 0xFFFF;
+ }
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+ iowrite32(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ iowrite32(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return (retval>>1) & 0xffff;
+}
+
+void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int i;
+ int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff);
+ void __iomem *ioaddr = tp->base_addr;
+ void __iomem *mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ iowrite32(val, ioaddr + comet_miireg2offset[location]);
+ return;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ iowrite32(cmd, ioaddr + 0xA0);
+ do {
+ barrier();
+ if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000))
+ break;
+ } while (--i > 0);
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+ }
+ if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
+ int value;
+ int i = 1000;
+
+ value = ioread32(ioaddr + CSR9);
+ iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
+
+ value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
+ iowrite32(value, ioaddr + CSR10);
+
+ while(--i > 0) {
+ if (ioread32(ioaddr + CSR10) & 0x10000000)
+ break;
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ }
+
+ /* Establish sync by sending 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ iowrite32(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite32(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+}
+
+
+/* Set up the transceiver control registers for the selected media type. */
+void tulip_select_media(struct net_device *dev, int startup)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ struct mediatable *mtable = tp->mtable;
+ u32 new_csr6;
+ int i;
+
+ if (mtable) {
+ struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+ unsigned char *p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: /* 21140 non-MII xcvr. */
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+ " with control setting %2.2x.\n",
+ dev->name, p[1]);
+ dev->if_port = p[0];
+ if (startup)
+ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ iowrite32(p[1], ioaddr + CSR12);
+ new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+ break;
+ case 2: case 4: {
+ u16 setup[5];
+ u32 csr13val, csr14val, csr15dir, csr15val;
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ dev->if_port = p[0] & MEDIA_MASK;
+ if (tulip_media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+ unsigned char *rst = rleaf->leafdata;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+ dev->name);
+ for (i = 0; i < rst[0]; i++)
+ iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+ "%4.4x/%4.4x.\n",
+ dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
+ csr13val = setup[0];
+ csr14val = setup[1];
+ csr15dir = (setup[3]<<16) | setup[2];
+ csr15val = (setup[4]<<16) | setup[2];
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(csr14val, ioaddr + CSR14);
+ iowrite32(csr15dir, ioaddr + CSR15); /* Direction */
+ iowrite32(csr15val, ioaddr + CSR15); /* Data */
+ iowrite32(csr13val, ioaddr + CSR13);
+ } else {
+ csr13val = 1;
+ csr14val = 0;
+ csr15dir = (setup[0]<<16) | 0x0008;
+ csr15val = (setup[1]<<16) | 0x0008;
+ if (dev->if_port <= 4)
+ csr14val = t21142_csr14[dev->if_port];
+ if (startup) {
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(csr14val, ioaddr + CSR14);
+ }
+ iowrite32(csr15dir, ioaddr + CSR15); /* Direction */
+ iowrite32(csr15val, ioaddr + CSR15); /* Data */
+ if (startup) iowrite32(csr13val, ioaddr + CSR13);
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
+ dev->name, csr15dir, csr15val);
+ if (mleaf->type == 4)
+ new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
+ else
+ new_csr6 = 0x82420000;
+ break;
+ }
+ case 1: case 3: {
+ int phy_num = p[0];
+ int init_length = p[1];
+ u16 *misc_info, tmp_info;
+
+ dev->if_port = 11;
+ new_csr6 = 0x020E0000;
+ if (mleaf->type == 3) { /* 21142 */
+ u16 *init_sequence = (u16*)(p+2);
+ u16 *reset_sequence = &((u16*)(p+3))[init_length];
+ int reset_length = p[2 + init_length*2];
+ misc_info = reset_sequence + reset_length;
+ if (startup)
+ for (i = 0; i < reset_length; i++)
+ iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+ for (i = 0; i < init_length; i++)
+ iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+ } else {
+ u8 *init_sequence = p + 2;
+ u8 *reset_sequence = p + 3 + init_length;
+ int reset_length = p[2 + init_length];
+ misc_info = (u16*)(reset_sequence + reset_length);
+ if (startup) {
+ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ for (i = 0; i < reset_length; i++)
+ iowrite32(reset_sequence[i], ioaddr + CSR12);
+ }
+ for (i = 0; i < init_length; i++)
+ iowrite32(init_sequence[i], ioaddr + CSR12);
+ }
+ tmp_info = get_u16(&misc_info[1]);
+ if (tmp_info)
+ tp->advertising[phy_num] = tmp_info | 1;
+ if (tmp_info && startup < 2) {
+ if (tp->mii_advertise == 0)
+ tp->mii_advertise = tp->advertising[phy_num];
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
+ dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
+ }
+ break;
+ }
+ case 5: case 6: {
+ u16 setup[5];
+
+ new_csr6 = 0; /* FIXME */
+
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+ unsigned char *rst = rleaf->leafdata;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+ dev->name);
+ for (i = 0; i < rst[0]; i++)
+ iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+
+ break;
+ }
+ default:
+ printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
+ dev->name, mleaf->type);
+ new_csr6 = 0x020E0000;
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR12) & 0xff);
+ } else if (tp->chip_id == LC82C168) {
+ if (startup && ! tp->medialock)
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+ dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
+ if (tp->mii_cnt) {
+ new_csr6 = 0x810C0000;
+ iowrite32(0x0001, ioaddr + CSR15);
+ iowrite32(0x0201B07A, ioaddr + 0xB8);
+ } else if (startup) {
+ /* Start with 10mbps to do autonegotiation. */
+ iowrite32(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ iowrite32(0x0001B078, ioaddr + 0xB8);
+ iowrite32(0x0201B078, ioaddr + 0xB8);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ iowrite32(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ /* Trigger autonegotiation. */
+ iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+ } else {
+ iowrite32(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ iowrite32(0x1F078, ioaddr + 0xB8);
+ }
+ } else { /* Unknown chip type with no media table. */
+ if (tp->default_port == 0)
+ dev->if_port = tp->mii_cnt ? 11 : 3;
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ new_csr6 = 0x020E0000;
+ } else if (tulip_media_cap[dev->if_port] & MediaIsFx) {
+ new_csr6 = 0x02860000;
+ } else
+ new_csr6 = 0x03860000;
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: No media description table, assuming "
+ "%s transceiver, CSR12 %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR12));
+ }
+
+ tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
+ return;
+}
+
+/*
+ Check the MII negotiated duplex and change the CSR6 setting if
+ required.
+ Return 0 if everything is OK.
+ Return < 0 if the transceiver is missing or has no link beat.
+ */
+int tulip_check_duplex(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ unsigned int bmsr, lpa, negotiated, new_csr6;
+
+ bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
+ lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
+ "%4.4x.\n", dev->name, bmsr, lpa);
+ if (bmsr == 0xffff)
+ return -2;
+ if ((bmsr & BMSR_LSTATUS) == 0) {
+ int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
+ if ((new_bmsr & BMSR_LSTATUS) == 0) {
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: No link beat on the MII interface,"
+ " status %4.4x.\n", dev->name, new_bmsr);
+ return -1;
+ }
+ }
+ negotiated = lpa & tp->advertising[0];
+ tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated);
+
+ new_csr6 = tp->csr6;
+
+ if (negotiated & LPA_100) new_csr6 &= ~TxThreshold;
+ else new_csr6 |= TxThreshold;
+ if (tp->full_duplex) new_csr6 |= FullDuplex;
+ else new_csr6 &= ~FullDuplex;
+
+ if (new_csr6 != tp->csr6) {
+ tp->csr6 = new_csr6;
+ tulip_restart_rxtx(tp);
+
+ if (tulip_debug > 0)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII"
+ "#%d link partner capability of %4.4x.\n",
+ dev->name, tp->full_duplex ? "full" : "half",
+ tp->phys[0], lpa);
+ return 1;
+ }
+
+ return 0;
+}
+
+void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int phyn, phy_idx = 0;
+ int mii_reg0;
+ int mii_advert;
+ unsigned int to_advert, new_bmcr, ane_switch;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later,
+ but takes much time. */
+ for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+ int phy = phyn & 0x1f;
+ int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
+ if ((mii_status & 0x8301) == 0x8001 ||
+ ((mii_status & BMSR_100BASE4) == 0
+ && (mii_status & 0x7800) != 0)) {
+ /* preserve Becker logic, gain indentation level */
+ } else {
+ continue;
+ }
+
+ mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR);
+ mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE);
+ ane_switch = 0;
+
+ /* if not advertising at all, gen an
+ * advertising value from the capability
+ * bits in BMSR
+ */
+ if ((mii_advert & ADVERTISE_ALL) == 0) {
+ unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR);
+ mii_advert = ((tmpadv >> 6) & 0x3e0) | 1;
+ }
+
+ if (tp->mii_advertise) {
+ tp->advertising[phy_idx] =
+ to_advert = tp->mii_advertise;
+ } else if (tp->advertising[phy_idx]) {
+ to_advert = tp->advertising[phy_idx];
+ } else {
+ tp->advertising[phy_idx] =
+ tp->mii_advertise =
+ to_advert = mii_advert;
+ }
+
+ tp->phys[phy_idx++] = phy;
+
+ printk (KERN_INFO "tulip%d: MII transceiver #%d "
+ "config %4.4x status %4.4x advertising %4.4x.\n",
+ board_idx, phy, mii_reg0, mii_status, mii_advert);
+
+ /* Fixup for DLink with miswired PHY. */
+ if (mii_advert != to_advert) {
+ printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d,"
+ " previously advertising %4.4x.\n",
+ board_idx, to_advert, phy, mii_advert);
+ tulip_mdio_write (dev, phy, 4, to_advert);
+ }
+
+ /* Enable autonegotiation: some boards default to off. */
+ if (tp->default_port == 0) {
+ new_bmcr = mii_reg0 | BMCR_ANENABLE;
+ if (new_bmcr != mii_reg0) {
+ new_bmcr |= BMCR_ANRESTART;
+ ane_switch = 1;
+ }
+ }
+ /* ...or disable nway, if forcing media */
+ else {
+ new_bmcr = mii_reg0 & ~BMCR_ANENABLE;
+ if (new_bmcr != mii_reg0)
+ ane_switch = 1;
+ }
+
+ /* clear out bits we never want at this point */
+ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE |
+ BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK |
+ BMCR_RESET);
+
+ if (tp->full_duplex)
+ new_bmcr |= BMCR_FULLDPLX;
+ if (tulip_media_cap[tp->default_port] & MediaIs100)
+ new_bmcr |= BMCR_SPEED100;
+
+ if (new_bmcr != mii_reg0) {
+ /* some phys need the ANE switch to
+ * happen before forced media settings
+ * will "take." However, we write the
+ * same value twice in order not to
+ * confuse the sane phys.
+ */
+ if (ane_switch) {
+ tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
+ udelay (10);
+ }
+ tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
+ }
+ }
+ tp->mii_cnt = phy_idx;
+ if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
+ printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+ board_idx);
+ tp->phys[0] = 1;
+ }
+}
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
new file mode 100644
index 000000000000..d9980bde7508
--- /dev/null
+++ b/drivers/net/tulip/pnic.c
@@ -0,0 +1,172 @@
+/*
+ drivers/net/tulip/pnic.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "tulip.h"
+
+
+void pnic_do_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ u32 phy_reg = ioread32(ioaddr + 0xB8);
+ u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+ if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ if (phy_reg & 0x20000000) dev->if_port = 5;
+ else if (phy_reg & 0x40000000) dev->if_port = 3;
+ else if (phy_reg & 0x10000000) dev->if_port = 4;
+ else if (phy_reg & 0x08000000) dev->if_port = 0;
+ tp->nwayset = 1;
+ new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000;
+ iowrite32(0x32 | (dev->if_port & 1), ioaddr + CSR12);
+ if (dev->if_port & 1)
+ iowrite32(0x1F868, ioaddr + 0xB8);
+ if (phy_reg & 0x30000000) {
+ tp->full_duplex = 1;
+ new_csr6 |= 0x00000200;
+ }
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ /* Restart Tx */
+ tulip_restart_rxtx(tp);
+ dev->trans_start = jiffies;
+ }
+ }
+}
+
+void pnic_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int phy_reg = ioread32(ioaddr + 0xB8);
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
+ dev->name, phy_reg, csr5);
+ if (ioread32(ioaddr + CSR5) & TPLnkFail) {
+ iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
+ /* If we use an external MII, then we mustn't use the
+ * internal negotiation.
+ */
+ if (tulip_media_cap[dev->if_port] & MediaIsMII)
+ return;
+ if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) {
+ tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
+ iowrite32(tp->csr6, ioaddr + CSR6);
+ iowrite32(0x30, ioaddr + CSR12);
+ iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ dev->trans_start = jiffies;
+ }
+ } else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ spin_lock(&tp->lock);
+ tulip_check_duplex(dev);
+ spin_unlock(&tp->lock);
+ } else {
+ pnic_do_nway(dev);
+ }
+ iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7);
+ }
+}
+
+void pnic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 60*HZ;
+
+ if(!ioread32(ioaddr + CSR7)) {
+ /* the timer was called due to a work overflow
+ * in the interrupt handler. Skip the connection
+ * checks, the nic is definitively speaking with
+ * his link partner.
+ */
+ goto too_good_connection;
+ }
+
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ spin_lock_irq(&tp->lock);
+ if (tulip_check_duplex(dev) > 0)
+ next_tick = 3*HZ;
+ spin_unlock_irq(&tp->lock);
+ } else {
+ int csr12 = ioread32(ioaddr + CSR12);
+ int new_csr6 = tp->csr6 & ~0x40C40200;
+ int phy_reg = ioread32(ioaddr + 0xB8);
+ int csr5 = ioread32(ioaddr + CSR5);
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
+ "CSR5 %8.8x.\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
+ if (phy_reg & 0x04000000) { /* Remote link fault */
+ iowrite32(0x0201F078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ tp->nwayset = 0;
+ } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ pnic_do_nway(dev);
+ next_tick = 60*HZ;
+ } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
+ "CSR5 %8.8x, PHY %3.3x.\n",
+ dev->name, medianame[dev->if_port], csr12,
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8));
+ next_tick = 3*HZ;
+ if (tp->medialock) {
+ } else if (tp->nwayset && (dev->if_port & 1)) {
+ next_tick = 1*HZ;
+ } else if (dev->if_port == 0) {
+ dev->if_port = 3;
+ iowrite32(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ iowrite32(0x1F868, ioaddr + 0xB8);
+ } else {
+ dev->if_port = 0;
+ iowrite32(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ iowrite32(0x1F078, ioaddr + 0xB8);
+ }
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ /* Restart Tx */
+ tulip_restart_rxtx(tp);
+ dev->trans_start = jiffies;
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: Changing PNIC configuration to %s "
+ "%s-duplex, CSR6 %8.8x.\n",
+ dev->name, medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half", new_csr6);
+ }
+ }
+ }
+too_good_connection:
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ if(!ioread32(ioaddr + CSR7)) {
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name);
+ disable_irq(dev->irq);
+ tulip_refill_rx(dev);
+ enable_irq(dev->irq);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ }
+}
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
new file mode 100644
index 000000000000..55f4a9a631bc
--- /dev/null
+++ b/drivers/net/tulip/pnic2.c
@@ -0,0 +1,407 @@
+/*
+ drivers/net/tulip/pnic2.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+ Modified to hep support PNIC_II by Kevin B. Hendricks
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+
+/* Understanding the PNIC_II - everything is this file is based
+ * on the PNIC_II_PDF datasheet which is sorely lacking in detail
+ *
+ * As I understand things, here are the registers and bits that
+ * explain the masks and constants used in this file that are
+ * either different from the 21142/3 or important for basic operation.
+ *
+ *
+ * CSR 6 (mask = 0xfe3bd1fd of bits not to change)
+ * -----
+ * Bit 24 - SCR
+ * Bit 23 - PCS
+ * Bit 22 - TTM (Trasmit Threshold Mode)
+ * Bit 18 - Port Select
+ * Bit 13 - Start - 1, Stop - 0 Transmissions
+ * Bit 11:10 - Loop Back Operation Mode
+ * Bit 9 - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set)
+ * Bit 1 - Start - 1, Stop - 0 Receive
+ *
+ *
+ * CSR 14 (mask = 0xfff0ee39 of bits not to change)
+ * ------
+ * Bit 19 - PAUSE-Pause
+ * Bit 18 - Advertise T4
+ * Bit 17 - Advertise 100baseTx-FD
+ * Bit 16 - Advertise 100baseTx-HD
+ * Bit 12 - LTE - Link Test Enable
+ * Bit 7 - ANE - Auto Negotiate Enable
+ * Bit 6 - HDE - Advertise 10baseT-HD
+ * Bit 2 - Reset to Power down - kept as 1 for normal operation
+ * Bit 1 - Loop Back enable for 10baseT MCC
+ *
+ *
+ * CSR 12
+ * ------
+ * Bit 25 - Partner can do T4
+ * Bit 24 - Partner can do 100baseTx-FD
+ * Bit 23 - Partner can do 100baseTx-HD
+ * Bit 22 - Partner can do 10baseT-FD
+ * Bit 21 - Partner can do 10baseT-HD
+ * Bit 15 - LPN is 1 if all above bits are valid other wise 0
+ * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
+ * Bit 3 - Autopolarity state
+ * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed
+ * Bit 1 - LS100B - link state of 100baseT 0 - good, 1- faild
+ *
+ *
+ * Data Port Selection Info
+ *-------------------------
+ *
+ * CSR14<7> CSR6<18> CSR6<22> CSR6<23> CSR6<24> MODE/PORT
+ * 1 0 0 (X) 0 (X) 1 NWAY
+ * 0 0 1 0 (X) 0 10baseT
+ * 0 1 0 1 1 (X) 100baseT
+ *
+ *
+ */
+
+
+
+#include <linux/pci.h>
+#include "tulip.h"
+#include <linux/delay.h>
+
+
+void pnic2_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 3)
+ printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n",
+ dev->name,ioread32(ioaddr + CSR12));
+
+ if (next_tick) {
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ }
+}
+
+
+void pnic2_start_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr14;
+ int csr12;
+
+ /* set up what to advertise during the negotiation */
+
+ /* load in csr14 and mask off bits not to touch
+ * comment at top of file explains mask value
+ */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xfff0ee39);
+
+ /* bit 17 - advetise 100baseTx-FD */
+ if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000;
+
+ /* bit 16 - advertise 100baseTx-HD */
+ if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000;
+
+ /* bit 6 - advertise 10baseT-HD */
+ if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040;
+
+ /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable
+ * and bit 0 Don't PowerDown 10baseT
+ */
+ csr14 |= 0x00001184;
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
+ "csr14=%8.8x.\n", dev->name, csr14);
+
+ /* tell pnic2_lnk_change we are doing an nway negotiation */
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+
+ /* now we have to set up csr6 for NWAY state */
+
+ tp->csr6 = ioread32(ioaddr + CSR6);
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: On Entry to Nway, "
+ "csr6=%8.8x.\n", dev->name, tp->csr6);
+
+ /* mask off any bits not to touch
+ * comment at top of file explains mask value
+ */
+ tp->csr6 = tp->csr6 & 0xfe3bd1fd;
+
+ /* don't forget that bit 9 is also used for advertising */
+ /* advertise 10baseT-FD for the negotiation (bit 9) */
+ if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200;
+
+ /* set bit 24 for nway negotiation mode ...
+ * see Data Port Selection comment at top of file
+ * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1)
+ */
+ tp->csr6 |= 0x01000000;
+ iowrite32(csr14, ioaddr + CSR14);
+ iowrite32(tp->csr6, ioaddr + CSR6);
+ udelay(100);
+
+ /* all set up so now force the negotiation to begin */
+
+ /* read in current values and mask off all but the
+ * Autonegotiation bits 14:12. Writing a 001 to those bits
+ * should start the autonegotiation
+ */
+ csr12 = (ioread32(ioaddr + CSR12) & 0xffff8fff);
+ csr12 |= 0x1000;
+ iowrite32(csr12, ioaddr + CSR12);
+}
+
+
+
+void pnic2_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr14;
+
+ /* read the staus register to find out what is up */
+ int csr12 = ioread32(ioaddr + CSR12);
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, "
+ " CSR5 %x, %8.8x.\n", dev->name, csr12,
+ csr5, ioread32(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability.
+ * check bits 14:12 for bit pattern 101 - all is good
+ */
+ if (tp->nway && !tp->nwayset) {
+
+ /* we did an auto negotiation */
+
+ if ((csr12 & 0x7000) == 0x5000) {
+
+ /* negotiation ended successfully */
+
+ /* get the link partners reply and mask out all but
+ * bits 24-21 which show the partners capabilites
+ * and match those to what we advertised
+ *
+ * then begin to interpret the results of the negotiation.
+ * Always go in this order : (we are ignoring T4 for now)
+ * 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD
+ */
+
+ int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise;
+ tp->lpar = (csr12 >> 16);
+ tp->nwayset = 1;
+
+ if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: funny autonegotiate result "
+ "csr12 %8.8x advertising %4.4x\n",
+ dev->name, csr12, tp->sym_advertise);
+ tp->nwayset = 0;
+ /* so check if 100baseTx link state is okay */
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+
+ /* now record the duplex that was negotiated */
+ tp->full_duplex = 0;
+ if ((dev->if_port == 4) || (dev->if_port == 5))
+ tp->full_duplex = 1;
+
+ if (tulip_debug > 1) {
+ if (tp->nwayset)
+ printk(KERN_INFO "%s: Switching to %s based on link "
+ "negotiation %4.4x & %4.4x = %4.4x.\n",
+ dev->name, medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar, negotiated);
+ }
+
+ /* remember to turn off bit 7 - autonegotiate
+ * enable so we can properly end nway mode and
+ * set duplex (ie. use csr6<9> again)
+ */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
+ iowrite32(csr14,ioaddr + CSR14);
+
+
+ /* now set the data port and operating mode
+ * (see the Data Port Selection comments at
+ * the top of the file
+ */
+
+ /* get current csr6 and mask off bits not to touch */
+ /* see comment at top of file */
+
+ tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
+
+ /* so if using if_port 3 or 5 then select the 100baseT
+ * port else select the 10baseT port.
+ * See the Data Port Selection table at the top
+ * of the file which was taken from the PNIC_II.PDF
+ * datasheet
+ */
+ if (dev->if_port & 1) tp->csr6 |= 0x01840000;
+ else tp->csr6 |= 0x00400000;
+
+ /* now set the full duplex bit appropriately */
+ if (tp->full_duplex) tp->csr6 |= 0x00000200;
+
+ iowrite32(1, ioaddr + CSR13);
+
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 "
+ "%8.8x.\n", dev->name, tp->csr6,
+ ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
+
+ /* now the following actually writes out the
+ * new csr6 values
+ */
+ tulip_start_rxtx(tp);
+
+ return;
+
+ } else {
+ printk(KERN_INFO "%s: Autonegotiation failed, "
+ "using %s, link beat status %4.4x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+
+ /* remember to turn off bit 7 - autonegotiate
+ * enable so we don't forget
+ */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
+ iowrite32(csr14,ioaddr + CSR14);
+
+ /* what should we do when autonegotiate fails?
+ * should we try again or default to baseline
+ * case. I just don't know.
+ *
+ * for now default to some baseline case
+ */
+
+ dev->if_port = 0;
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* set to 10baseTx-HD - see Data Port Selection
+ * comment given at the top of the file
+ */
+ tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
+ tp->csr6 |= 0x00400000;
+
+ tulip_restart_rxtx(tp);
+
+ return;
+
+ }
+ }
+
+ if ((tp->nwayset && (csr5 & 0x08000000)
+ && (dev->if_port == 3 || dev->if_port == 5)
+ && (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) {
+
+ /* Link blew? Maybe restart NWay. */
+
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Ugh! Link blew?\n", dev->name);
+
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+
+ return;
+ }
+
+
+ if (dev->if_port == 3 || dev->if_port == 5) {
+
+ /* we are at 100mb and a potential link change occurred */
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+
+ /* check 100 link beat */
+
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* if failed then try doing an nway to get in sync */
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ }
+
+ return;
+ }
+
+ if (dev->if_port == 0 || dev->if_port == 4) {
+
+ /* we are at 10mb and a potential link change occurred */
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 4) ? "failed" : "good");
+
+
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* if failed, try doing an nway to get in sync */
+ if ((csr12 & 4) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ }
+
+ return;
+ }
+
+
+ if (tulip_debug > 1)
+ printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name);
+
+ /* if all else fails default to trying 10baseT-HD */
+ dev->if_port = 0;
+
+ /* make sure autonegotiate enable is off */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
+ iowrite32(csr14,ioaddr + CSR14);
+
+ /* set to 10baseTx-HD - see Data Port Selection
+ * comment given at the top of the file
+ */
+ tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
+ tp->csr6 |= 0x00400000;
+
+ tulip_restart_rxtx(tp);
+}
+
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
new file mode 100644
index 000000000000..691568283553
--- /dev/null
+++ b/drivers/net/tulip/timer.c
@@ -0,0 +1,175 @@
+/*
+ drivers/net/tulip/timer.c
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/pci.h>
+#include "tulip.h"
+
+
+void tulip_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ u32 csr12 = ioread32(ioaddr + CSR12);
+ int next_tick = 2*HZ;
+
+ if (tulip_debug > 2) {
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
+ " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6), csr12, ioread32(ioaddr + CSR13),
+ ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ }
+ switch (tp->chip_id) {
+ case DC21140:
+ case DC21142:
+ case MX98713:
+ case COMPEX9881:
+ case DM910X:
+ case ULI526X:
+ default: {
+ struct medialeaf *mleaf;
+ unsigned char *p;
+ if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */
+ /* Not much that can be done.
+ Assume this a generic MII or SYM transceiver. */
+ next_tick = 60*HZ;
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
+ "CSR12 0x%2.2x.\n",
+ dev->name, ioread32(ioaddr + CSR6), csr12 & 0xff);
+ break;
+ }
+ mleaf = &tp->mtable->mleaf[tp->cur_index];
+ p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: case 4: {
+ /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */
+ int offset = mleaf->type == 4 ? 5 : 2;
+ s8 bitnum = p[offset];
+ if (p[offset+1] & 0x80) {
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG"%s: Transceiver monitor tick "
+ "CSR12=%#2.2x, no media sense.\n",
+ dev->name, csr12);
+ if (mleaf->type == 4) {
+ if (mleaf->media == 3 && (csr12 & 0x02))
+ goto select_next_media;
+ }
+ break;
+ }
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
+ " bit %d is %d, expecting %d.\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
+ /* Check that the specified bit has the proper value. */
+ if ((bitnum < 0) !=
+ ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name,
+ medianame[mleaf->media & MEDIA_MASK]);
+ if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
+ goto actually_mii;
+ netif_carrier_on(dev);
+ break;
+ }
+ netif_carrier_off(dev);
+ if (tp->medialock)
+ break;
+ select_next_media:
+ if (--tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
+ if (tulip_media_cap[dev->if_port] & MediaIsFD)
+ goto select_next_media; /* Skip FD entries. */
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: No link beat on media %s,"
+ " trying transceiver type %s.\n",
+ dev->name, medianame[mleaf->media & MEDIA_MASK],
+ medianame[tp->mtable->mleaf[tp->cur_index].media]);
+ tulip_select_media(dev, 0);
+ /* Restart the transmit process. */
+ tulip_restart_rxtx(tp);
+ next_tick = (24*HZ)/10;
+ break;
+ }
+ case 1: case 3: /* 21140, 21142 MII */
+ actually_mii:
+ if (tulip_check_duplex(dev) < 0) {
+ netif_carrier_off(dev);
+ next_tick = 3*HZ;
+ } else {
+ netif_carrier_on(dev);
+ next_tick = 60*HZ;
+ }
+ break;
+ case 2: /* 21142 serial block has no link beat. */
+ default:
+ break;
+ }
+ }
+ break;
+ }
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
+
+void mxic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 3) {
+ printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
+ ioread32(ioaddr + CSR12));
+ }
+ if (next_tick) {
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ }
+}
+
+
+void comet_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
+ "%4.4x.\n",
+ dev->name,
+ tulip_mdio_read(dev, tp->phys[0], 1),
+ tulip_mdio_read(dev, tp->phys[0], 5));
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ if (tulip_check_duplex(dev) < 0)
+ { netif_carrier_off(dev); }
+ else
+ { netif_carrier_on(dev); }
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
new file mode 100644
index 000000000000..20346d847d9e
--- /dev/null
+++ b/drivers/net/tulip/tulip.h
@@ -0,0 +1,493 @@
+/*
+ drivers/net/tulip/tulip.h
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#ifndef __NET_TULIP_H__
+#define __NET_TULIP_H__
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+
+
+/* undefine, or define to various debugging levels (>4 == obscene levels) */
+#define TULIP_DEBUG 1
+
+/* undefine USE_IO_OPS for MMIO, define for PIO */
+#ifdef CONFIG_TULIP_MMIO
+# undef USE_IO_OPS
+#else
+# define USE_IO_OPS 1
+#endif
+
+
+
+struct tulip_chip_table {
+ char *chip_name;
+ int io_size;
+ int valid_intrs; /* CSR7 interrupt enable settings */
+ int flags;
+ void (*media_timer) (unsigned long data);
+};
+
+
+enum tbl_flag {
+ HAS_MII = 0x0001,
+ HAS_MEDIA_TABLE = 0x0002,
+ CSR12_IN_SROM = 0x0004,
+ ALWAYS_CHECK_MII = 0x0008,
+ HAS_ACPI = 0x0010,
+ MC_HASH_ONLY = 0x0020, /* Hash-only multicast filter. */
+ HAS_PNICNWAY = 0x0080,
+ HAS_NWAY = 0x0040, /* Uses internal NWay xcvr. */
+ HAS_INTR_MITIGATION = 0x0100,
+ IS_ASIX = 0x0200,
+ HAS_8023X = 0x0400,
+ COMET_MAC_ADDR = 0x0800,
+ HAS_PCI_MWI = 0x1000,
+ HAS_PHY_IRQ = 0x2000,
+ HAS_SWAPPED_SEEPROM = 0x4000,
+ NEEDS_FAKE_MEDIA_TABLE = 0x8000,
+};
+
+
+/* chip types. careful! order is VERY IMPORTANT here, as these
+ * are used throughout the driver as indices into arrays */
+/* Note 21142 == 21143. */
+enum chips {
+ DC21040 = 0,
+ DC21041 = 1,
+ DC21140 = 2,
+ DC21142 = 3, DC21143 = 3,
+ LC82C168,
+ MX98713,
+ MX98715,
+ MX98725,
+ AX88140,
+ PNIC2,
+ COMET,
+ COMPEX9881,
+ I21145,
+ DM910X,
+ CONEXANT,
+ ULI526X
+};
+
+
+enum MediaIs {
+ MediaIsFD = 1,
+ MediaAlwaysFD = 2,
+ MediaIsMII = 4,
+ MediaIsFx = 8,
+ MediaIs100 = 16
+};
+
+
+/* Offsets to the Command and Status Registers, "CSRs". All accesses
+ must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+ CSR0 = 0,
+ CSR1 = 0x08,
+ CSR2 = 0x10,
+ CSR3 = 0x18,
+ CSR4 = 0x20,
+ CSR5 = 0x28,
+ CSR6 = 0x30,
+ CSR7 = 0x38,
+ CSR8 = 0x40,
+ CSR9 = 0x48,
+ CSR10 = 0x50,
+ CSR11 = 0x58,
+ CSR12 = 0x60,
+ CSR13 = 0x68,
+ CSR14 = 0x70,
+ CSR15 = 0x78,
+};
+
+/* register offset and bits for CFDD PCI config reg */
+enum pci_cfg_driver_reg {
+ CFDD = 0x40,
+ CFDD_Sleep = (1 << 31),
+ CFDD_Snooze = (1 << 30),
+};
+
+#define RxPollInt (RxIntr|RxNoBuf|RxDied|RxJabber)
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+ TimerInt = 0x800,
+ SytemError = 0x2000,
+ TPLnkFail = 0x1000,
+ TPLnkPass = 0x10,
+ NormalIntr = 0x10000,
+ AbnormalIntr = 0x8000,
+ RxJabber = 0x200,
+ RxDied = 0x100,
+ RxNoBuf = 0x80,
+ RxIntr = 0x40,
+ TxFIFOUnderflow = 0x20,
+ TxJabber = 0x08,
+ TxNoBuf = 0x04,
+ TxDied = 0x02,
+ TxIntr = 0x01,
+};
+
+/* bit mask for CSR5 TX/RX process state */
+#define CSR5_TS 0x00700000
+#define CSR5_RS 0x000e0000
+
+enum tulip_mode_bits {
+ TxThreshold = (1 << 22),
+ FullDuplex = (1 << 9),
+ TxOn = 0x2000,
+ AcceptBroadcast = 0x0100,
+ AcceptAllMulticast = 0x0080,
+ AcceptAllPhys = 0x0040,
+ AcceptRunt = 0x0008,
+ RxOn = 0x0002,
+ RxTx = (TxOn | RxOn),
+};
+
+
+enum tulip_busconfig_bits {
+ MWI = (1 << 24),
+ MRL = (1 << 23),
+ MRM = (1 << 21),
+ CALShift = 14,
+ BurstLenShift = 8,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 buffer2;
+};
+
+
+struct tulip_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 buffer2; /* We use only buffer 1. */
+};
+
+
+enum desc_status_bits {
+ DescOwned = 0x80000000,
+ RxDescFatalErr = 0x8000,
+ RxWholePkt = 0x0300,
+};
+
+
+enum t21143_csr6_bits {
+ csr6_sc = (1<<31),
+ csr6_ra = (1<<30),
+ csr6_ign_dest_msb = (1<<26),
+ csr6_mbo = (1<<25),
+ csr6_scr = (1<<24), /* scramble mode flag: can't be set */
+ csr6_pcs = (1<<23), /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */
+ csr6_ttm = (1<<22), /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */
+ csr6_sf = (1<<21), /* Store and forward. If set ignores TR bits */
+ csr6_hbd = (1<<19), /* Heart beat disable. Disables SQE function in 10baseT */
+ csr6_ps = (1<<18), /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */
+ csr6_ca = (1<<17), /* Collision Offset Enable. If set uses special algorithm in low collision situations */
+ csr6_trh = (1<<15), /* Transmit Threshold high bit */
+ csr6_trl = (1<<14), /* Transmit Threshold low bit */
+
+ /***************************************************************
+ * This table shows transmit threshold values based on media *
+ * and these two registers (from PNIC1 & 2 docs) Note: this is *
+ * all meaningless if sf is set. *
+ ***************************************************************/
+
+ /***********************************
+ * (trh,trl) * 100BaseTX * 10BaseT *
+ ***********************************
+ * (0,0) * 128 * 72 *
+ * (0,1) * 256 * 96 *
+ * (1,0) * 512 * 128 *
+ * (1,1) * 1024 * 160 *
+ ***********************************/
+
+ csr6_fc = (1<<12), /* Forces a collision in next transmission (for testing in loopback mode) */
+ csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */
+ csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */
+ /* set both and you get (PHY) loopback */
+ csr6_fd = (1<<9), /* Full duplex mode, disables hearbeat, no loopback */
+ csr6_pm = (1<<7), /* Pass All Multicast */
+ csr6_pr = (1<<6), /* Promiscuous mode */
+ csr6_sb = (1<<5), /* Start(1)/Stop(0) backoff counter */
+ csr6_if = (1<<4), /* Inverse Filtering, rejects only addresses in address table: can't be set */
+ csr6_pb = (1<<3), /* Pass Bad Frames, (1) causes even bad frames to be passed on */
+ csr6_ho = (1<<2), /* Hash-only filtering mode: can't be set */
+ csr6_hp = (1<<0), /* Hash/Perfect Receive Filtering Mode: can't be set */
+
+ csr6_mask_capture = (csr6_sc | csr6_ca),
+ csr6_mask_defstate = (csr6_mask_capture | csr6_mbo),
+ csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps),
+ csr6_mask_hdcaptt = (csr6_mask_hdcap | csr6_trh | csr6_trl),
+ csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd),
+ csr6_mask_fullpromisc = (csr6_pr | csr6_pm),
+ csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if),
+ csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
+};
+
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 128
+#define MEDIA_MASK 31
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */
+
+#if defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+#define TULIP_MAX_CACHE_LINE 16 /* in units of 32-bit words */
+#else
+#define TULIP_MAX_CACHE_LINE 32 /* in units of 32-bit words */
+#endif
+
+
+/* Ring-wrap flag in length field, use for last ring entry.
+ 0x01000000 means chain on buffer2 address,
+ 0x02000000 means use the ring start address in CSR2/3.
+ Note: Some work-alike chips do not function correctly in chained mode.
+ The ASIX chip works only in chained mode.
+ Thus we indicates ring mode, but always write the 'next' field for
+ chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+
+#define EEPROM_SIZE 512 /* 2 << EEPROM_ADDRLEN */
+
+
+#define RUN_AT(x) (jiffies + (x))
+
+#if defined(__i386__) /* AKA get_unaligned() */
+#define get_u16(ptr) (*(u16 *)(ptr))
+#else
+#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+#endif
+
+struct medialeaf {
+ u8 type;
+ u8 media;
+ unsigned char *leafdata;
+};
+
+
+struct mediatable {
+ u16 defaultmedia;
+ u8 leafcount;
+ u8 csr12dir; /* General purpose pin directions. */
+ unsigned has_mii:1;
+ unsigned has_nonmii:1;
+ unsigned has_reset:6;
+ u32 csr15dir;
+ u32 csr15val; /* 21143 NWay setting. */
+ struct medialeaf mleaf[0];
+};
+
+
+struct mediainfo {
+ struct mediainfo *next;
+ int info_type;
+ int index;
+ unsigned char *info;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+
+struct tulip_private {
+ const char *product_name;
+ struct net_device *next_module;
+ struct tulip_rx_desc *rx_ring;
+ struct tulip_tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct ring_info tx_buffers[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct ring_info rx_buffers[RX_RING_SIZE];
+ u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */
+ int chip_id;
+ int revision;
+ int flags;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ struct timer_list oom_timer; /* Out of memory timer. */
+ u32 mc_filter[2];
+ spinlock_t lock;
+ spinlock_t mii_lock;
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+ int mit_on;
+#endif
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int full_duplex_lock:1;
+ unsigned int fake_addr:1; /* Multiport board faked address. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
+ unsigned int csr0; /* CSR0 setting. */
+ unsigned int csr6; /* Current CSR6 control settings. */
+ unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
+ void (*link_change) (struct net_device * dev, int csr5);
+ u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */
+ u16 lpar; /* 21143 Link partner ability. */
+ u16 advertising[4];
+ signed char phys[4], mii_cnt; /* MII device addresses. */
+ struct mediatable *mtable;
+ int cur_index; /* Current media index. */
+ int saved_if_port;
+ struct pci_dev *pdev;
+ int ttimer;
+ int susp_rx;
+ unsigned long nir;
+ void __iomem *base_addr;
+ int csr12_shadow;
+ int pad0; /* Used for 8-byte alignment */
+};
+
+
+struct eeprom_fixup {
+ char *name;
+ unsigned char addr0;
+ unsigned char addr1;
+ unsigned char addr2;
+ u16 newtable[32]; /* Max length below. */
+};
+
+
+/* 21142.c */
+extern u16 t21142_csr14[];
+void t21142_timer(unsigned long data);
+void t21142_start_nway(struct net_device *dev);
+void t21142_lnk_change(struct net_device *dev, int csr5);
+
+
+/* PNIC2.c */
+void pnic2_lnk_change(struct net_device *dev, int csr5);
+void pnic2_timer(unsigned long data);
+void pnic2_start_nway(struct net_device *dev);
+void pnic2_lnk_change(struct net_device *dev, int csr5);
+
+/* eeprom.c */
+void tulip_parse_eeprom(struct net_device *dev);
+int tulip_read_eeprom(struct net_device *dev, int location, int addr_len);
+
+/* interrupt.c */
+extern unsigned int tulip_max_interrupt_work;
+extern int tulip_rx_copybreak;
+irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+int tulip_refill_rx(struct net_device *dev);
+#ifdef CONFIG_TULIP_NAPI
+int tulip_poll(struct net_device *dev, int *budget);
+#endif
+
+
+/* media.c */
+int tulip_mdio_read(struct net_device *dev, int phy_id, int location);
+void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int value);
+void tulip_select_media(struct net_device *dev, int startup);
+int tulip_check_duplex(struct net_device *dev);
+void tulip_find_mii (struct net_device *dev, int board_idx);
+
+/* pnic.c */
+void pnic_do_nway(struct net_device *dev);
+void pnic_lnk_change(struct net_device *dev, int csr5);
+void pnic_timer(unsigned long data);
+
+/* timer.c */
+void tulip_timer(unsigned long data);
+void mxic_timer(unsigned long data);
+void comet_timer(unsigned long data);
+
+/* tulip_core.c */
+extern int tulip_debug;
+extern const char * const medianame[];
+extern const char tulip_media_cap[];
+extern struct tulip_chip_table tulip_tbl[];
+void oom_timer(unsigned long data);
+extern u8 t21040_csr13[];
+
+static inline void tulip_start_rxtx(struct tulip_private *tp)
+{
+ void __iomem *ioaddr = tp->base_addr;
+ iowrite32(tp->csr6 | RxTx, ioaddr + CSR6);
+ barrier();
+ (void) ioread32(ioaddr + CSR6); /* mmio sync */
+}
+
+static inline void tulip_stop_rxtx(struct tulip_private *tp)
+{
+ void __iomem *ioaddr = tp->base_addr;
+ u32 csr6 = ioread32(ioaddr + CSR6);
+
+ if (csr6 & RxTx) {
+ unsigned i=1300/10;
+ iowrite32(csr6 & ~RxTx, ioaddr + CSR6);
+ barrier();
+ /* wait until in-flight frame completes.
+ * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
+ * Typically expect this loop to end in < 50 us on 100BT.
+ */
+ while (--i && (ioread32(ioaddr + CSR5) & (CSR5_TS|CSR5_RS)))
+ udelay(10);
+
+ if (!i)
+ printk(KERN_DEBUG "%s: tulip_stop_rxtx() failed\n",
+ pci_name(tp->pdev));
+ }
+}
+
+static inline void tulip_restart_rxtx(struct tulip_private *tp)
+{
+ if(!(tp->chip_id == ULI526X &&
+ (tp->revision == 0x40 || tp->revision == 0x50))) {
+ tulip_stop_rxtx(tp);
+ udelay(5);
+ }
+ tulip_start_rxtx(tp);
+}
+
+#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
new file mode 100644
index 000000000000..d098b3ba3538
--- /dev/null
+++ b/drivers/net/tulip/tulip_core.c
@@ -0,0 +1,1861 @@
+/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
+
+/*
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver, or visit the project
+ Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+#include <linux/config.h>
+
+#define DRV_NAME "tulip"
+#ifdef CONFIG_TULIP_NAPI
+#define DRV_VERSION "1.1.13-NAPI" /* Keep at least for test */
+#else
+#define DRV_VERSION "1.1.13"
+#endif
+#define DRV_RELDATE "May 11, 2002"
+
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "tulip.h"
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+
+#ifdef __sparc__
+#include <asm/pbm.h>
+#endif
+
+static char version[] __devinitdata =
+ "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
+
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static unsigned int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
+
+/* The possible media types that can be set in options[] are: */
+const char * const medianame[32] = {
+ "10baseT", "10base2", "AUI", "100baseTx",
+ "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+ "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+ "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+ "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+ "","","","", "","","","", "","","","Transceiver reset",
+};
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
+ || defined(__sparc_) || defined(__ia64__) \
+ || defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+ Set the bus performance register.
+ Typical: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 No alignment 0x00000000 unlimited 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Warning: many older 486 systems are broken and require setting 0x00A04800
+ 8 longword cache alignment, 8 longword burst.
+ ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__ia64__)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
+static int csr0 = 0x01A00000 | 0x8000;
+#elif defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+static int csr0 = 0x01A00000 | 0x9000;
+#elif defined(__arm__) || defined(__sh__)
+static int csr0 = 0x01A00000 | 0x4800;
+#elif defined(__mips__)
+static int csr0 = 0x00200000 | 0x4000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+
+
+MODULE_AUTHOR("The Linux Kernel Team");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+module_param(tulip_debug, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(csr0, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+
+#define PFX DRV_NAME ": "
+
+#ifdef TULIP_DEBUG
+int tulip_debug = TULIP_DEBUG;
+#else
+int tulip_debug = 1;
+#endif
+
+
+
+/*
+ * This table use during operation for capabilities and media timer.
+ *
+ * It is indexed via the values in 'enum chips'
+ */
+
+struct tulip_chip_table tulip_tbl[] = {
+ { }, /* placeholder for array, slot unused currently */
+ { }, /* placeholder for array, slot unused currently */
+
+ /* DC21140 */
+ { "Digital DS21140 Tulip", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer },
+
+ /* DC21142, DC21143 */
+ { "Digital DS21143 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
+ | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer },
+
+ /* LC82C168 */
+ { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
+ HAS_MII | HAS_PNICNWAY, pnic_timer },
+
+ /* MX98713 */
+ { "Macronix 98713 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+
+ /* MX98715 */
+ { "Macronix 98715 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+
+ /* MX98725 */
+ { "Macronix 98725 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+
+ /* AX88140 */
+ { "ASIX AX88140", 128, 0x0001fbff,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
+ | IS_ASIX, tulip_timer },
+
+ /* PNIC2 */
+ { "Lite-On PNIC-II", 256, 0x0801fbff,
+ HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer },
+
+ /* COMET */
+ { "ADMtek Comet", 256, 0x0001abef,
+ HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+
+ /* COMPEX9881 */
+ { "Compex 9881 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+
+ /* I21145 */
+ { "Intel DS21145 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
+ | HAS_NWAY | HAS_PCI_MWI, t21142_timer },
+
+ /* DM910X */
+ { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
+ tulip_timer },
+
+ /* RS7112 */
+ { "Conexant LANfinity", 256, 0x0001ebef,
+ HAS_MII | HAS_ACPI, tulip_timer },
+
+ /* ULi526X */
+ { "ULi M5261/M5263", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
+};
+
+
+static struct pci_device_id tulip_pci_tbl[] = {
+ { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
+ { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
+ { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
+ { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
+ { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
+ { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
+ { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
+ { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
+ { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+ { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+ { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+ { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+ { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
+ { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
+ { 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
+ { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
+
+
+/* A full-duplex map for media types. */
+const char tulip_media_cap[32] =
+{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
+
+static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_init_ring(struct net_device *dev);
+static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int tulip_open(struct net_device *dev);
+static int tulip_close(struct net_device *dev);
+static void tulip_up(struct net_device *dev);
+static void tulip_down(struct net_device *dev);
+static struct net_device_stats *tulip_get_stats(struct net_device *dev);
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_tulip(struct net_device *dev);
+#endif
+
+static void tulip_set_power_state (struct tulip_private *tp,
+ int sleep, int snooze)
+{
+ if (tp->flags & HAS_ACPI) {
+ u32 tmp, newtmp;
+ pci_read_config_dword (tp->pdev, CFDD, &tmp);
+ newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
+ if (sleep)
+ newtmp |= CFDD_Sleep;
+ else if (snooze)
+ newtmp |= CFDD_Snooze;
+ if (tmp != newtmp)
+ pci_write_config_dword (tp->pdev, CFDD, newtmp);
+ }
+
+}
+
+
+static void tulip_up(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 3*HZ;
+ int i;
+
+ /* Wake the chip from sleep/snooze mode. */
+ tulip_set_power_state (tp, 0, 0);
+
+ /* On some chip revs we must set the MII/SYM port before the reset!? */
+ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
+ iowrite32(0x00040000, ioaddr + CSR6);
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ iowrite32(0x00000001, ioaddr + CSR0);
+ udelay(100);
+
+ /* Deassert reset.
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+ iowrite32(tp->csr0, ioaddr + CSR0);
+ udelay(100);
+
+ if (tulip_debug > 1)
+ printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
+
+ iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
+ iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ if (tp->flags & MC_HASH_ONLY) {
+ u32 addr_low = le32_to_cpu(get_unaligned((u32 *)dev->dev_addr));
+ u32 addr_high = le16_to_cpu(get_unaligned((u16 *)(dev->dev_addr+4)));
+ if (tp->chip_id == AX88140) {
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(addr_low, ioaddr + CSR14);
+ iowrite32(1, ioaddr + CSR13);
+ iowrite32(addr_high, ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ iowrite32(addr_low, ioaddr + 0xA4);
+ iowrite32(addr_high, ioaddr + 0xA8);
+ iowrite32(0, ioaddr + 0xAC);
+ iowrite32(0, ioaddr + 0xB0);
+ }
+ } else {
+ /* This is set_rx_mode(), but without starting the transmitter. */
+ u16 *eaddrs = (u16 *)dev->dev_addr;
+ u16 *setup_frm = &tp->setup_frame[15*6];
+ dma_addr_t mapping;
+
+ /* 21140 bug: you must add the broadcast address. */
+ memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
+ /* Fill the final entry of the table with our physical address. */
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+
+ mapping = pci_map_single(tp->pdev, tp->setup_frame,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ tp->tx_buffers[tp->cur_tx].skb = NULL;
+ tp->tx_buffers[tp->cur_tx].mapping = mapping;
+
+ /* Put the setup frame on the Tx list. */
+ tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
+ tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
+ tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
+
+ tp->cur_tx++;
+ }
+
+ tp->saved_if_port = dev->if_port;
+ if (dev->if_port == 0)
+ dev->if_port = tp->default_port;
+
+ /* Allow selecting a default media. */
+ i = 0;
+ if (tp->mtable == NULL)
+ goto media_picked;
+ if (dev->if_port) {
+ int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
+ (dev->if_port == 12 ? 0 : dev->if_port);
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using user-specified media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ goto media_picked;
+ }
+ }
+ if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+ int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+ dev->name, medianame[looking_for]);
+ goto media_picked;
+ }
+ }
+ /* Start sensing first non-full-duplex media. */
+ for (i = tp->mtable->leafcount - 1;
+ (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+ ;
+media_picked:
+
+ tp->csr6 = 0;
+ tp->cur_index = i;
+ tp->nwayset = 0;
+
+ if (dev->if_port) {
+ if (tp->chip_id == DC21143 &&
+ (tulip_media_cap[dev->if_port] & MediaIsMII)) {
+ /* We must reset the media CSRs when we force-select MII mode. */
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ iowrite32(0x0008, ioaddr + CSR15);
+ }
+ tulip_select_media(dev, 1);
+ } else if (tp->chip_id == DC21142) {
+ if (tp->mii_cnt) {
+ tulip_select_media(dev, 1);
+ if (tulip_debug > 1)
+ printk(KERN_INFO "%s: Using MII transceiver %d, status "
+ "%4.4x.\n",
+ dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
+ iowrite32(csr6_mask_defstate, ioaddr + CSR6);
+ tp->csr6 = csr6_mask_hdcap;
+ dev->if_port = 11;
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ } else
+ t21142_start_nway(dev);
+ } else if (tp->chip_id == PNIC2) {
+ /* for initial startup advertise 10/100 Full and Half */
+ tp->sym_advertise = 0x01E0;
+ /* enable autonegotiate end interrupt */
+ iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
+ iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
+ pnic2_start_nway(dev);
+ } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+ tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
+ iowrite32(0x0001, ioaddr + CSR15);
+ } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
+ pnic_do_nway(dev);
+ else {
+ /* Start with 10mbps to do autonegotiation. */
+ iowrite32(0x32, ioaddr + CSR12);
+ tp->csr6 = 0x00420000;
+ iowrite32(0x0001B078, ioaddr + 0xB8);
+ iowrite32(0x0201B078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ }
+ } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
+ && ! tp->medialock) {
+ dev->if_port = 0;
+ tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
+ iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
+ } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
+ /* Provided by BOLO, Macronix - 12/10/1998. */
+ dev->if_port = 0;
+ tp->csr6 = 0x01a80200;
+ iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
+ iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
+ } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
+ /* Enable automatic Tx underrun recovery. */
+ iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ tp->csr6 = 0x00040000;
+ } else if (tp->chip_id == AX88140) {
+ tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+ } else
+ tulip_select_media(dev, 1);
+
+ /* Start the chip's Tx to process setup frame. */
+ tulip_stop_rxtx(tp);
+ barrier();
+ udelay(5);
+ iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ tulip_start_rxtx(tp);
+ iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
+
+ if (tulip_debug > 2) {
+ printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
+ dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
+ }
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ tp->timer.expires = RUN_AT(next_tick);
+ add_timer(&tp->timer);
+#ifdef CONFIG_TULIP_NAPI
+ init_timer(&tp->oom_timer);
+ tp->oom_timer.data = (unsigned long)dev;
+ tp->oom_timer.function = oom_timer;
+#endif
+}
+
+static int
+tulip_open(struct net_device *dev)
+{
+ int retval;
+
+ if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)))
+ return retval;
+
+ tulip_init_ring (dev);
+
+ tulip_up (dev);
+
+ netif_start_queue (dev);
+
+ return 0;
+}
+
+
+static void tulip_tx_timeout(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ /* Do nothing -- the media monitor should handle this. */
+ if (tulip_debug > 1)
+ printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
+ dev->name);
+ } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
+ || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
+ || tp->chip_id == DM910X || tp->chip_id == ULI526X) {
+ printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
+ "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
+ dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
+ ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ if ( ! tp->medialock && tp->mtable) {
+ do
+ --tp->cur_index;
+ while (tp->cur_index >= 0
+ && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
+ & MediaIsFD));
+ if (--tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ tulip_select_media(dev, 0);
+ printk(KERN_WARNING "%s: transmit timed out, switching to %s "
+ "media.\n", dev->name, medianame[dev->if_port]);
+ }
+ } else if (tp->chip_id == PNIC2) {
+ printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
+ "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
+ dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
+ (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
+ } else {
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
+ "%8.8x, resetting...\n",
+ dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
+ dev->if_port = 0;
+ }
+
+#if defined(way_too_many_messages)
+ if (tulip_debug > 3) {
+ int i;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+ int j;
+ printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
+ "%2.2x %2.2x %2.2x.\n",
+ i, (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
+ for (j = 0; buf[j] != 0xee && j < 1600; j++)
+ if (j < 100) printk(" %2.2x", buf[j]);
+ printk(" j=%d.\n", j);
+ }
+ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
+ printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Stop and restart the chip's Tx processes . */
+
+ tulip_restart_rxtx(tp);
+ /* Trigger an immediate transmit demand. */
+ iowrite32(0, ioaddr + CSR1);
+
+ tp->stats.tx_errors++;
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int i;
+
+ tp->susp_rx = 0;
+ tp->ttimer = 0;
+ tp->nir = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x00000000;
+ tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
+ tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
+ tp->rx_buffers[i].skb = NULL;
+ tp->rx_buffers[i].mapping = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
+ tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ dma_addr_t mapping;
+
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+ tp->rx_buffers[i].skb = skb;
+ if (skb == NULL)
+ break;
+ mapping = pci_map_single(tp->pdev, skb->tail,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ tp->rx_buffers[i].mapping = mapping;
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
+ tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
+ }
+ tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_buffers[i].skb = NULL;
+ tp->tx_buffers[i].mapping = 0;
+ tp->tx_ring[i].status = 0x00000000;
+ tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
+ }
+ tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
+}
+
+static int
+tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int entry;
+ u32 flag;
+ dma_addr_t mapping;
+
+ spin_lock_irq(&tp->lock);
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+
+ tp->tx_buffers[entry].skb = skb;
+ mapping = pci_map_single(tp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ tp->tx_buffers[entry].mapping = mapping;
+ tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
+
+ if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+ flag = 0x60000000; /* No interrupt */
+ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
+ flag = 0xe0000000; /* Tx-done intr. */
+ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
+ flag = 0x60000000; /* No Tx-done intr. */
+ } else { /* Leave room for set_rx_mode() to fill entries. */
+ flag = 0xe0000000; /* Tx-done intr. */
+ netif_stop_queue(dev);
+ }
+ if (entry == TX_RING_SIZE-1)
+ flag = 0xe0000000 | DESC_RING_WRAP;
+
+ tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+ /* if we were using Transmit Automatic Polling, we would need a
+ * wmb() here. */
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ wmb();
+
+ tp->cur_tx++;
+
+ /* Trigger an immediate transmit demand. */
+ iowrite32(0, tp->base_addr + CSR1);
+
+ spin_unlock_irq(&tp->lock);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+static void tulip_clean_tx_ring(struct tulip_private *tp)
+{
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0) {
+ tp->stats.tx_errors++; /* It wasn't Txed */
+ tp->tx_ring[entry].status = 0;
+ }
+
+ /* Check for Tx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ }
+}
+
+static void tulip_down (struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ unsigned long flags;
+
+ del_timer_sync (&tp->timer);
+#ifdef CONFIG_TULIP_NAPI
+ del_timer_sync (&tp->oom_timer);
+#endif
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32 (0x00000000, ioaddr + CSR7);
+
+ /* Stop the Tx and Rx processes. */
+ tulip_stop_rxtx(tp);
+
+ /* prepare receive buffers */
+ tulip_refill_rx(dev);
+
+ /* release any unconsumed transmit buffers */
+ tulip_clean_tx_ring(tp);
+
+ if (ioread32 (ioaddr + CSR6) != 0xffffffff)
+ tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+
+ dev->if_port = tp->saved_if_port;
+
+ /* Leave the driver in snooze, not sleep, mode. */
+ tulip_set_power_state (tp, 0, 1);
+}
+
+
+static int tulip_close (struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int i;
+
+ netif_stop_queue (dev);
+
+ tulip_down (dev);
+
+ if (tulip_debug > 1)
+ printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, ioread32 (ioaddr + CSR5));
+
+ free_irq (dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_buffers[i].skb;
+ dma_addr_t mapping = tp->rx_buffers[i].mapping;
+
+ tp->rx_buffers[i].skb = NULL;
+ tp->rx_buffers[i].mapping = 0;
+
+ tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
+ tp->rx_ring[i].length = 0;
+ tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+ pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb (skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->tx_buffers[i].skb;
+
+ if (skb != NULL) {
+ pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb (skb);
+ }
+ tp->tx_buffers[i].skb = NULL;
+ tp->tx_buffers[i].mapping = 0;
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *tulip_get_stats(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+
+ if (netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ return &tp->stats;
+}
+
+
+static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct tulip_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pdev));
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = tulip_get_drvinfo
+};
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ struct mii_ioctl_data *data = if_mii(rq);
+ const unsigned int phy_idx = 0;
+ int phy = tp->phys[phy_idx] & 0x1f;
+ unsigned int regnum = data->reg_num;
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ if (tp->mii_cnt)
+ data->phy_id = phy;
+ else if (tp->flags & HAS_NWAY)
+ data->phy_id = 32;
+ else if (tp->chip_id == COMET)
+ data->phy_id = 1;
+ else
+ return -ENODEV;
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
+ int csr12 = ioread32 (ioaddr + CSR12);
+ int csr14 = ioread32 (ioaddr + CSR14);
+ switch (regnum) {
+ case 0:
+ if (((csr14<<5) & 0x1000) ||
+ (dev->if_port == 5 && tp->nwayset))
+ data->val_out = 0x1000;
+ else
+ data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
+ | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
+ break;
+ case 1:
+ data->val_out =
+ 0x1848 +
+ ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
+ ((csr12&0x06) == 6 ? 0 : 4);
+ data->val_out |= 0x6048;
+ break;
+ case 4:
+ /* Advertised value, bogus 10baseTx-FD value from CSR6. */
+ data->val_out =
+ ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
+ ((csr14 >> 1) & 0x20) + 1;
+ data->val_out |= ((csr14 >> 9) & 0x03C0);
+ break;
+ case 5: data->val_out = tp->lpar; break;
+ default: data->val_out = 0; break;
+ }
+ } else {
+ data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
+ }
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable (CAP_NET_ADMIN))
+ return -EPERM;
+ if (regnum & ~0x1f)
+ return -EINVAL;
+ if (data->phy_id == phy) {
+ u16 value = data->val_in;
+ switch (regnum) {
+ case 0: /* Check for autonegotiation on or reset. */
+ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (tp->full_duplex_lock)
+ tp->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4:
+ tp->advertising[phy_idx] =
+ tp->mii_advertise = data->val_in;
+ break;
+ }
+ }
+ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
+ u16 value = data->val_in;
+ if (regnum == 0) {
+ if ((value & 0x1200) == 0x1200) {
+ if (tp->chip_id == PNIC2) {
+ pnic2_start_nway (dev);
+ } else {
+ t21142_start_nway (dev);
+ }
+ }
+ } else if (regnum == 4)
+ tp->sym_advertise = value;
+ } else {
+ tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+#undef set_bit_le
+#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
+
+static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ u16 hash_table[32];
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit_le(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+
+ set_bit_le(index, hash_table);
+
+ }
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &tp->setup_frame[13*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ struct dev_mc_list *mclist;
+ int i;
+ u16 *eaddrs;
+
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15-i)*12);
+ setup_frm = &tp->setup_frame[15*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr6;
+
+ csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
+
+ tp->csr6 &= ~0x00D5;
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else if (tp->flags & MC_HASH_ONLY) {
+ /* Some work-alikes have only a 64-entry hash filter table. */
+ /* Should verify correctness on big-endian/__powerpc__ */
+ struct dev_mc_list *mclist;
+ int i;
+ if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else {
+ u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
+ int filterbit;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ if (tp->flags & COMET_MAC_ADDR)
+ filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ else
+ filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit &= 0x3f;
+ mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ if (tulip_debug > 2) {
+ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
+ mclist->dmi_addr[0], mclist->dmi_addr[1],
+ mclist->dmi_addr[2], mclist->dmi_addr[3],
+ mclist->dmi_addr[4], mclist->dmi_addr[5],
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ }
+ }
+ if (mc_filter[0] == tp->mc_filter[0] &&
+ mc_filter[1] == tp->mc_filter[1])
+ ; /* No change. */
+ else if (tp->flags & IS_ASIX) {
+ iowrite32(2, ioaddr + CSR13);
+ iowrite32(mc_filter[0], ioaddr + CSR14);
+ iowrite32(3, ioaddr + CSR13);
+ iowrite32(mc_filter[1], ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ iowrite32(mc_filter[0], ioaddr + 0xAC);
+ iowrite32(mc_filter[1], ioaddr + 0xB0);
+ }
+ tp->mc_filter[0] = mc_filter[0];
+ tp->mc_filter[1] = mc_filter[1];
+ }
+ } else {
+ unsigned long flags;
+ u32 tx_flags = 0x08000000 | 192;
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ build_setup_frame_hash(tp->setup_frame, dev);
+ tx_flags = 0x08400000 | 192;
+ } else {
+ build_setup_frame_perfect(tp->setup_frame, dev);
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+ /* Same setup recently queued, we need not add it. */
+ } else {
+ unsigned int entry;
+ int dummy = -1;
+
+ /* Now add this frame to the Tx list. */
+
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ if (entry != 0) {
+ /* Avoid a chip errata by prefixing a dummy entry. Don't do
+ this on the ULI526X as it triggers a different problem */
+ if (!(tp->chip_id == ULI526X && (tp->revision = 0x40 || tp->revision == 0x50))) {
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ tp->tx_ring[entry].length =
+ (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
+ tp->tx_ring[entry].buffer1 = 0;
+ /* Must set DescOwned later to avoid race with chip */
+ dummy = entry;
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+ }
+ }
+
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping =
+ pci_map_single(tp->pdev, tp->setup_frame,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ /* Put the setup frame on the Tx list. */
+ if (entry == TX_RING_SIZE-1)
+ tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
+ tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
+ tp->tx_ring[entry].buffer1 =
+ cpu_to_le32(tp->tx_buffers[entry].mapping);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ if (dummy >= 0)
+ tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
+ netif_stop_queue(dev);
+
+ /* Trigger an immediate transmit demand. */
+ iowrite32(0, ioaddr + CSR1);
+ }
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ iowrite32(csr6, ioaddr + CSR6);
+}
+
+#ifdef CONFIG_TULIP_MWI
+static void __devinit tulip_mwi_config (struct pci_dev *pdev,
+ struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ u8 cache;
+ u16 pci_command;
+ u32 csr0;
+
+ if (tulip_debug > 3)
+ printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
+
+ tp->csr0 = csr0 = 0;
+
+ /* if we have any cache line size at all, we can do MRM */
+ csr0 |= MRM;
+
+ /* ...and barring hardware bugs, MWI */
+ if (!(tp->chip_id == DC21143 && tp->revision == 65))
+ csr0 |= MWI;
+
+ /* set or disable MWI in the standard PCI command bit.
+ * Check for the case where mwi is desired but not available
+ */
+ if (csr0 & MWI) pci_set_mwi(pdev);
+ else pci_clear_mwi(pdev);
+
+ /* read result from hardware (in case bit refused to enable) */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
+ csr0 &= ~MWI;
+
+ /* if cache line size hardwired to zero, no MWI */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+ if ((csr0 & MWI) && (cache == 0)) {
+ csr0 &= ~MWI;
+ pci_clear_mwi(pdev);
+ }
+
+ /* assign per-cacheline-size cache alignment and
+ * burst length values
+ */
+ switch (cache) {
+ case 8:
+ csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 16:
+ csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 32:
+ csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
+ break;
+ default:
+ cache = 0;
+ break;
+ }
+
+ /* if we have a good cache line size, we by now have a good
+ * csr0, so save it and exit
+ */
+ if (cache)
+ goto out;
+
+ /* we don't have a good csr0 or cache line size, disable MWI */
+ if (csr0 & MWI) {
+ pci_clear_mwi(pdev);
+ csr0 &= ~MWI;
+ }
+
+ /* sane defaults for burst length and cache alignment
+ * originally from de4x5 driver
+ */
+ csr0 |= (8 << BurstLenShift) | (1 << CALShift);
+
+out:
+ tp->csr0 = csr0;
+ if (tulip_debug > 2)
+ printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
+ pci_name(pdev), cache, csr0);
+}
+#endif
+
+/*
+ * Chips that have the MRM/reserved bit quirk and the burst quirk. That
+ * is the DM910X and the on chip ULi devices
+ */
+
+static int tulip_uli_dm_quirk(struct pci_dev *pdev)
+{
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+ return 1;
+ if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
+ return 1;
+ if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
+ return 1;
+ return 0;
+}
+
+static int __devinit tulip_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct tulip_private *tp;
+ /* See note below on the multiport cards. */
+ static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+ static struct pci_device_id early_486_chipsets[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
+ { },
+ };
+ static int last_irq;
+ static int multiport_cnt; /* For four-port boards w/one EEPROM */
+ u8 chip_rev;
+ int i, irq;
+ unsigned short sum;
+ unsigned char *ee_data;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ static int board_idx = -1;
+ int chip_idx = ent->driver_data;
+ const char *chip_name = tulip_tbl[chip_idx].chip_name;
+ unsigned int eeprom_missing = 0;
+ unsigned int force_csr0 = 0;
+
+#ifndef MODULE
+ static int did_version; /* Already printed version info. */
+ if (tulip_debug > 0 && did_version++ == 0)
+ printk (KERN_INFO "%s", version);
+#endif
+
+ board_idx++;
+
+ /*
+ * Lan media wire a tulip chip to a wan interface. Needs a very
+ * different driver (lmc driver)
+ */
+
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
+ printk (KERN_ERR PFX "skipping LMC card.\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Early DM9100's need software CRC and the DMFE driver
+ */
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
+ {
+ u32 dev_rev;
+ /* Read Chip revision */
+ pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
+ if(dev_rev < 0x02000030)
+ {
+ printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Looks for early PCI chipsets where people report hangs
+ * without the workarounds being on.
+ */
+
+ /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
+ aligned. Aries might need this too. The Saturn errata are not
+ pretty reading but thankfully it's an old 486 chipset.
+
+ 2. The dreaded SiS496 486 chipset. Same workaround as Intel
+ Saturn.
+ */
+
+ if (pci_dev_present(early_486_chipsets)) {
+ csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
+ force_csr0 = 1;
+ }
+
+ /* bugfix: the ASIX must have a burst limit or horrible things happen. */
+ if (chip_idx == AX88140) {
+ if ((csr0 & 0x3f00) == 0)
+ csr0 |= 0x2000;
+ }
+
+ /* PNIC doesn't have MWI/MRL/MRM... */
+ if (chip_idx == LC82C168)
+ csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
+
+ /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
+ if (tulip_uli_dm_quirk(pdev)) {
+ csr0 &= ~0x01f100ff;
+#if defined(__sparc__)
+ csr0 = (csr0 & ~0xff00) | 0xe000;
+#endif
+ }
+ /*
+ * And back to business
+ */
+
+ i = pci_enable_device(pdev);
+ if (i) {
+ printk (KERN_ERR PFX
+ "Cannot enable tulip board #%d, aborting\n",
+ board_idx);
+ return i;
+ }
+
+ irq = pdev->irq;
+
+ /* alloc_etherdev ensures aligned and zeroed private structures */
+ dev = alloc_etherdev (sizeof (*tp));
+ if (!dev) {
+ printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
+ printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, "
+ "aborting\n", pci_name(pdev),
+ pci_resource_len (pdev, 0),
+ pci_resource_start (pdev, 0));
+ goto err_out_free_netdev;
+ }
+
+ /* grab all resources from both PIO and MMIO regions, as we
+ * don't want anyone else messing around with our hardware */
+ if (pci_request_regions (pdev, "tulip"))
+ goto err_out_free_netdev;
+
+#ifndef USE_IO_OPS
+ ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size);
+#else
+ ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);
+#endif
+ if (!ioaddr)
+ goto err_out_free_res;
+
+ pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
+
+ /*
+ * initialize private data structure 'tp'
+ * it is zeroed and aligned in alloc_etherdev
+ */
+ tp = netdev_priv(dev);
+
+ tp->rx_ring = pci_alloc_consistent(pdev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma);
+ if (!tp->rx_ring)
+ goto err_out_mtable;
+ tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
+ tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
+
+ tp->chip_id = chip_idx;
+ tp->flags = tulip_tbl[chip_idx].flags;
+ tp->pdev = pdev;
+ tp->base_addr = ioaddr;
+ tp->revision = chip_rev;
+ tp->csr0 = csr0;
+ spin_lock_init(&tp->lock);
+ spin_lock_init(&tp->mii_lock);
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+
+ dev->base_addr = (unsigned long)ioaddr;
+
+#ifdef CONFIG_TULIP_MWI
+ if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ tulip_mwi_config (pdev, dev);
+#else
+ /* MWI is broken for DC21143 rev 65... */
+ if (chip_idx == DC21143 && chip_rev == 65)
+ tp->csr0 &= ~MWI;
+#endif
+
+ /* Stop the chip's Tx and Rx processes. */
+ tulip_stop_rxtx(tp);
+
+ pci_set_master(pdev);
+
+#ifdef CONFIG_GSC
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
+ switch (pdev->subsystem_device) {
+ default:
+ break;
+ case 0x1061:
+ case 0x1062:
+ case 0x1063:
+ case 0x1098:
+ case 0x1099:
+ case 0x10EE:
+ tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
+ chip_name = "GSC DS21140 Tulip";
+ }
+ }
+#endif
+
+ /* Clear the missed-packet counter. */
+ ioread32(ioaddr + CSR8);
+
+ /* The station address ROM is read byte serially. The register must
+ be polled, waiting for the value to be read bit serially from the
+ EEPROM.
+ */
+ ee_data = tp->eeprom;
+ sum = 0;
+ if (chip_idx == LC82C168) {
+ for (i = 0; i < 3; i++) {
+ int value, boguscnt = 100000;
+ iowrite32(0x600 | i, ioaddr + 0x98);
+ do
+ value = ioread32(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
+ sum += value & 0xffff;
+ }
+ } else if (chip_idx == COMET) {
+ /* No need to read the EEPROM. */
+ put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
+ put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (u16 *)(dev->dev_addr + 4));
+ for (i = 0; i < 6; i ++)
+ sum += dev->dev_addr[i];
+ } else {
+ /* A serial EEPROM interface, we read now and sort it out later. */
+ int sa_offset = 0;
+ int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
+
+ for (i = 0; i < sizeof(tp->eeprom); i+=2) {
+ u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
+ ee_data[i] = data & 0xff;
+ ee_data[i + 1] = data >> 8;
+ }
+
+ /* DEC now has a specification (see Notes) but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(ee_data, ee_data+16, 8) */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+ if (chip_idx == CONEXANT) {
+ /* Check that the tuple type and length is correct. */
+ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
+ sa_offset = 0x19A;
+ } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
+ ee_data[2] == 0) {
+ sa_offset = 2; /* Grrr, damn Matrox boards. */
+ multiport_cnt = 4;
+ }
+#ifdef CONFIG_DDB5476
+ if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 6)) {
+ /* DDB5476 MAC address in first EEPROM locations. */
+ sa_offset = 0;
+ /* No media table either */
+ tp->flags &= ~HAS_MEDIA_TABLE;
+ }
+#endif
+#ifdef CONFIG_DDB5477
+ if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) {
+ /* DDB5477 MAC address in first EEPROM locations. */
+ sa_offset = 0;
+ /* No media table either */
+ tp->flags &= ~HAS_MEDIA_TABLE;
+ }
+#endif
+#ifdef CONFIG_MIPS_COBALT
+ if ((pdev->bus->number == 0) &&
+ ((PCI_SLOT(pdev->devfn) == 7) ||
+ (PCI_SLOT(pdev->devfn) == 12))) {
+ /* Cobalt MAC address in first EEPROM locations. */
+ sa_offset = 0;
+ /* No media table either */
+ tp->flags &= ~HAS_MEDIA_TABLE;
+ }
+#endif
+#ifdef CONFIG_GSC
+ /* Check to see if we have a broken srom */
+ if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
+ /* pci_vendor_id and subsystem_id are swapped */
+ ee_data[0] = ee_data[2];
+ ee_data[1] = ee_data[3];
+ ee_data[2] = 0x61;
+ ee_data[3] = 0x10;
+
+ /* HSC-PCI boards need to be byte-swaped and shifted
+ * up 1 word. This shift needs to happen at the end
+ * of the MAC first because of the 2 byte overlap.
+ */
+ for (i = 4; i >= 0; i -= 2) {
+ ee_data[17 + i + 3] = ee_data[17 + i];
+ ee_data[16 + i + 5] = ee_data[16 + i];
+ }
+ }
+#endif
+
+ for (i = 0; i < 6; i ++) {
+ dev->dev_addr[i] = ee_data[i + sa_offset];
+ sum += ee_data[i + sa_offset];
+ }
+ }
+ /* Lite-On boards have the address byte-swapped. */
+ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02)
+ && dev->dev_addr[1] == 0x00)
+ for (i = 0; i < 6; i+=2) {
+ char tmp = dev->dev_addr[i];
+ dev->dev_addr[i] = dev->dev_addr[i+1];
+ dev->dev_addr[i+1] = tmp;
+ }
+ /* On the Zynx 315 Etherarray and other multiport boards only the
+ first Tulip has an EEPROM.
+ On Sparc systems the mac address is held in the OBP property
+ "local-mac-address".
+ The addresses of the subsequent ports are derived from the first.
+ Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+ that here as well. */
+ if (sum == 0 || sum == 6*0xff) {
+#if defined(__sparc__)
+ struct pcidev_cookie *pcp = pdev->sysdata;
+#endif
+ eeprom_missing = 1;
+ for (i = 0; i < 5; i++)
+ dev->dev_addr[i] = last_phys_addr[i];
+ dev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(__sparc__)
+ if ((pcp != NULL) && prom_getproplen(pcp->prom_node,
+ "local-mac-address") == 6) {
+ prom_getproperty(pcp->prom_node, "local-mac-address",
+ dev->dev_addr, 6);
+ }
+#endif
+#if defined(__i386__) /* Patch up x86 BIOS bug. */
+ if (last_irq)
+ irq = last_irq;
+#endif
+ }
+
+ for (i = 0; i < 6; i++)
+ last_phys_addr[i] = dev->dev_addr[i];
+ last_irq = irq;
+ dev->irq = irq;
+
+ /* The lower four bits are the media type. */
+ if (board_idx >= 0 && board_idx < MAX_UNITS) {
+ if (options[board_idx] & MEDIA_MASK)
+ tp->default_port = options[board_idx] & MEDIA_MASK;
+ if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
+ tp->full_duplex = 1;
+ if (mtu[board_idx] > 0)
+ dev->mtu = mtu[board_idx];
+ }
+ if (dev->mem_start & MEDIA_MASK)
+ tp->default_port = dev->mem_start & MEDIA_MASK;
+ if (tp->default_port) {
+ printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
+ board_idx, medianame[tp->default_port & MEDIA_MASK]);
+ tp->medialock = 1;
+ if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ }
+ if (tp->full_duplex)
+ tp->full_duplex_lock = 1;
+
+ if (tulip_media_cap[tp->default_port] & MediaIsMII) {
+ u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+ tp->mii_advertise = media2advert[tp->default_port - 9];
+ tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+ }
+
+ if (tp->flags & HAS_MEDIA_TABLE) {
+ sprintf(dev->name, "tulip%d", board_idx); /* hack */
+ tulip_parse_eeprom(dev);
+ strcpy(dev->name, "eth%d"); /* un-hack */
+ }
+
+ if ((tp->flags & ALWAYS_CHECK_MII) ||
+ (tp->mtable && tp->mtable->has_mii) ||
+ ( ! tp->mtable && (tp->flags & HAS_MII))) {
+ if (tp->mtable && tp->mtable->has_mii) {
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == 11) {
+ tp->cur_index = i;
+ tp->saved_if_port = dev->if_port;
+ tulip_select_media(dev, 2);
+ dev->if_port = tp->saved_if_port;
+ break;
+ }
+ }
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs
+ later, but takes much time. */
+ tulip_find_mii (dev, board_idx);
+ }
+
+ /* The Tulip-specific entries in the device structure. */
+ dev->open = tulip_open;
+ dev->hard_start_xmit = tulip_start_xmit;
+ dev->tx_timeout = tulip_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_TULIP_NAPI
+ dev->poll = tulip_poll;
+ dev->weight = 16;
+#endif
+ dev->stop = tulip_close;
+ dev->get_stats = tulip_get_stats;
+ dev->do_ioctl = private_ioctl;
+ dev->set_multicast_list = set_rx_mode;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &poll_tulip;
+#endif
+ SET_ETHTOOL_OPS(dev, &ops);
+
+ if (register_netdev(dev))
+ goto err_out_free_ring;
+
+ printk(KERN_INFO "%s: %s rev %d at %p,",
+ dev->name, chip_name, chip_rev, ioaddr);
+ pci_set_drvdata(pdev, dev);
+
+ if (eeprom_missing)
+ printk(" EEPROM not present,");
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
+ printk(", IRQ %d.\n", irq);
+
+ if (tp->chip_id == PNIC2)
+ tp->link_change = pnic2_lnk_change;
+ else if (tp->flags & HAS_NWAY)
+ tp->link_change = t21142_lnk_change;
+ else if (tp->flags & HAS_PNICNWAY)
+ tp->link_change = pnic_lnk_change;
+
+ /* Reset the xcvr interface and turn on heartbeat. */
+ switch (chip_idx) {
+ case DC21140:
+ case DM910X:
+ case ULI526X:
+ default:
+ if (tp->mtable)
+ iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+ break;
+ case DC21142:
+ if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
+ iowrite32(csr6_mask_defstate, ioaddr + CSR6);
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
+ } else
+ t21142_start_nway(dev);
+ break;
+ case PNIC2:
+ /* just do a reset for sanity sake */
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ break;
+ case LC82C168:
+ if ( ! tp->mii_cnt) {
+ tp->nway = 1;
+ tp->nwayset = 0;
+ iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
+ iowrite32(0x30, ioaddr + CSR12);
+ iowrite32(0x0001F078, ioaddr + CSR6);
+ iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
+ }
+ break;
+ case MX98713:
+ case COMPEX9881:
+ iowrite32(0x00000000, ioaddr + CSR6);
+ iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+ iowrite32(0x00000001, ioaddr + CSR13);
+ break;
+ case MX98715:
+ case MX98725:
+ iowrite32(0x01a80000, ioaddr + CSR6);
+ iowrite32(0xFFFFFFFF, ioaddr + CSR14);
+ iowrite32(0x00001000, ioaddr + CSR12);
+ break;
+ case COMET:
+ /* No initialization necessary. */
+ break;
+ }
+
+ /* put the chip in snooze mode until opened */
+ tulip_set_power_state (tp, 0, 1);
+
+ return 0;
+
+err_out_free_ring:
+ pci_free_consistent (pdev,
+ sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
+
+err_out_mtable:
+ if (tp->mtable)
+ kfree (tp->mtable);
+ pci_iounmap(pdev, ioaddr);
+
+err_out_free_res:
+ pci_release_regions (pdev);
+
+err_out_free_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+
+#ifdef CONFIG_PM
+
+static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev && netif_running (dev) && netif_device_present (dev)) {
+ netif_device_detach (dev);
+ tulip_down (dev);
+ /* pci_power_off(pdev, -1); */
+ }
+ return 0;
+}
+
+
+static int tulip_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev && netif_running (dev) && !netif_device_present (dev)) {
+#if 1
+ pci_enable_device (pdev);
+#endif
+ /* pci_power_on(pdev); */
+ tulip_up (dev);
+ netif_device_attach (dev);
+ }
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static void __devexit tulip_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct tulip_private *tp;
+
+ if (!dev)
+ return;
+
+ tp = netdev_priv(dev);
+ unregister_netdev(dev);
+ pci_free_consistent (pdev,
+ sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
+ if (tp->mtable)
+ kfree (tp->mtable);
+ pci_iounmap(pdev, tp->base_addr);
+ free_netdev (dev);
+ pci_release_regions (pdev);
+ pci_set_drvdata (pdev, NULL);
+
+ /* pci_power_off (pdev, -1); */
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void poll_tulip (struct net_device *dev)
+{
+ /* disable_irq here is not very nice, but with the lockless
+ interrupt handler we have no other choice. */
+ disable_irq(dev->irq);
+ tulip_interrupt (dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+static struct pci_driver tulip_driver = {
+ .name = DRV_NAME,
+ .id_table = tulip_pci_tbl,
+ .probe = tulip_init_one,
+ .remove = __devexit_p(tulip_remove_one),
+#ifdef CONFIG_PM
+ .suspend = tulip_suspend,
+ .resume = tulip_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init tulip_init (void)
+{
+#ifdef MODULE
+ printk (KERN_INFO "%s", version);
+#endif
+
+ /* copy module parms into globals */
+ tulip_rx_copybreak = rx_copybreak;
+ tulip_max_interrupt_work = max_interrupt_work;
+
+ /* probe for and init boards */
+ return pci_module_init (&tulip_driver);
+}
+
+
+static void __exit tulip_cleanup (void)
+{
+ pci_unregister_driver (&tulip_driver);
+}
+
+
+module_init(tulip_init);
+module_exit(tulip_cleanup);
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
new file mode 100644
index 000000000000..f7e64ee11b1d
--- /dev/null
+++ b/drivers/net/tulip/winbond-840.c
@@ -0,0 +1,1716 @@
+/* winbond-840.c: A Linux PCI network adapter device driver. */
+/*
+ Written 1998-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/drivers.html
+
+ Do not remove the copyright information.
+ Do not change the version information unless an improvement has been made.
+ Merely removing my name, as Compex has done in the past, does not count
+ as an improvement.
+
+ Changelog:
+ * ported to 2.4
+ ???
+ * spin lock update, memory barriers, new style dma mappings
+ limit each tx buffer to < 1024 bytes
+ remove DescIntr from Rx descriptors (that's an Tx flag)
+ remove next pointer from Tx descriptors
+ synchronize tx_q_bytes
+ software reset in tx_timeout
+ Copyright (C) 2000 Manfred Spraul
+ * further cleanups
+ power management.
+ support for big endian descriptors
+ Copyright (C) 2001 Manfred Spraul
+ * ethtool support (jgarzik)
+ * Replace some MII-related magic numbers with constants (jgarzik)
+
+ TODO:
+ * enable pci_power_off
+ * Wake-On-LAN
+*/
+
+#define DRV_NAME "winbond-840"
+#define DRV_VERSION "1.01-d"
+#define DRV_RELDATE "Nov-17-2001"
+
+
+/* Automatically extracted configuration info:
+probe-func: winbond840_probe
+config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
+
+c-help-name: Winbond W89c840 PCI Ethernet support
+c-help-symbol: CONFIG_WINBOND_840
+c-help: This driver is for the Winbond W89c840 chip. It also works with
+c-help: the TX9882 chip on the Compex RL100-ATX board.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The '840 uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define TX_QUEUE_LEN_RESTART 5
+#define RX_RING_SIZE 32
+
+#define TX_BUFLIMIT (1024-128)
+
+/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
+ To avoid overflowing we don't queue again until we have room for a
+ full-size packet.
+ */
+#define TX_FIFO_SIZE (2048)
+#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
+
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/rtnetlink.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
+KERN_INFO " http://www.scyld.com/network/drivers.html\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
+MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Winbond w89c840 chip.
+
+II. Board-specific settings
+
+None.
+
+III. Driver operation
+
+This chip is very similar to the Digital 21*4* "Tulip" family. The first
+twelve registers and the descriptor format are nearly identical. Read a
+Tulip manual for operational details.
+
+A significant difference is that the multicast filter and station address are
+stored in registers rather than loaded through a pseudo-transmit packet.
+
+Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
+full-sized packet we must use both data buffers in a descriptor. Thus the
+driver uses ring mode where descriptors are implicitly sequential in memory,
+rather than using the second descriptor address as a chain pointer to
+subsequent descriptors.
+
+IV. Notes
+
+If you are going to almost clone a Tulip, why not go all the way and avoid
+the need for a new driver?
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+http://www.winbond.com.tw/
+
+IVc. Errata
+
+A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
+correctly detect a full FIFO, and queuing more than 2048 bytes may result in
+silent data corruption.
+
+Test with 'ping -s 10000' on a fast computer.
+
+*/
+
+
+
+/*
+ PCI probe table.
+*/
+enum pci_id_flags_bits {
+ /* Set PCI command register bits before calling probe1(). */
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ /* Read and map the single following PCI BAR. */
+ PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
+ PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
+};
+enum chip_capability_flags {
+ CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
+#ifdef USE_IO_OPS
+#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
+#else
+#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
+#endif
+
+static struct pci_device_id w840_pci_tbl[] = {
+ { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
+ { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
+
+struct pci_id_info {
+ const char *name;
+ struct match_info {
+ int pci, pci_mask, subsystem, subsystem_mask;
+ int revision, revision_mask; /* Only 8 bits. */
+ } id;
+ enum pci_id_flags_bits pci_flags;
+ int io_size; /* Needed for I/O region check or ioremap(). */
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+static struct pci_id_info pci_id_tbl[] = {
+ {"Winbond W89c840", /* Sometime a Level-One switch card. */
+ { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ {"Winbond W89c840", { 0x08401050, 0xffffffff, },
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ {NULL,}, /* 0 terminated list. */
+};
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
+ accesses instead of memory space. */
+
+/* Offsets to the Command and Status Registers, "CSRs".
+ While similar to the Tulip, these registers are longword aligned.
+ Note: It's not useful to define symbolic names for every register bit in
+ the device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+*/
+enum w840_offsets {
+ PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
+ RxRingPtr=0x0C, TxRingPtr=0x10,
+ IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
+ RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
+ CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
+ MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
+ CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+ NormalIntr=0x10000, AbnormalIntr=0x8000,
+ IntrPCIErr=0x2000, TimerInt=0x800,
+ IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
+ TxFIFOUnderflow=0x20, RxErrIntr=0x10,
+ TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
+};
+
+/* Bits in the NetworkConfig register. */
+enum rx_mode_bits {
+ AcceptErr=0x80, AcceptRunt=0x40,
+ AcceptBroadcast=0x20, AcceptMulticast=0x10,
+ AcceptAllPhys=0x08, AcceptMyPhys=0x02,
+};
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
+ MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
+};
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct w840_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 buffer2;
+};
+
+struct w840_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
+ DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
+ DescIntr=0x80000000,
+};
+
+#define MII_CNT 1 /* winbond only supports one MII */
+struct netdev_private {
+ struct w840_rx_desc *rx_ring;
+ dma_addr_t rx_addr[RX_RING_SIZE];
+ struct w840_tx_desc *tx_ring;
+ dma_addr_t tx_addr[TX_RING_SIZE];
+ dma_addr_t ring_dma_addr;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int csr6;
+ struct w840_rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_q_bytes;
+ unsigned int tx_full; /* The Tx queue is full. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
+ u32 mii;
+ struct mii_if_info mii_if;
+ void __iomem *base_addr;
+};
+
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static int update_link(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void init_rxtx_rings(struct net_device *dev);
+static void free_rxtx_rings(struct netdev_private *np);
+static void init_registers(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+static int alloc_ringdesc(struct net_device *dev);
+static void free_ringdesc(struct netdev_private *np);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static u32 __set_rx_mode(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static int netdev_close(struct net_device *dev);
+
+
+
+static int __devinit w840_probe1 (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int find_cnt;
+ int chip_idx = ent->driver_data;
+ int irq;
+ int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ void __iomem *ioaddr;
+ int bar = 1;
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+
+ pci_set_master(pdev);
+
+ irq = pdev->irq;
+
+ if (pci_set_dma_mask(pdev,0xFFFFffff)) {
+ printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
+ pci_name(pdev));
+ return -EIO;
+ }
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_netdev;
+#ifdef USE_IO_OPS
+ bar = 0;
+#endif
+ ioaddr = pci_iomap(pdev, bar, pci_id_tbl[chip_idx].io_size);
+ if (!ioaddr)
+ goto err_out_free_res;
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
+
+ /* Reset the chip to erase previous misconfiguration.
+ No hold time required! */
+ iowrite32(0x00000001, ioaddr + PCIBusCfg);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ spin_lock_init(&np->lock);
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->base_addr = ioaddr;
+
+ pci_set_drvdata(pdev, dev);
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->mii_if.full_duplex = 1;
+ if (option & 15)
+ printk(KERN_INFO "%s: ignoring user supplied media type %d",
+ dev->name, option & 15);
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ np->mii_if.full_duplex = 1;
+
+ if (np->mii_if.full_duplex)
+ np->mii_if.force_media = 1;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &netdev_ioctl;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->tx_timeout = &tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_cleardev;
+
+ printk(KERN_INFO "%s: %s at %p, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
+ mdio_read(dev, phy, MII_PHYSID2);
+ printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ np->mii_if.phy_id = np->phys[0];
+ if (phy_idx == 0) {
+ printk(KERN_WARNING "%s: MII PHY not found -- this device may "
+ "not operate correctly.\n", dev->name);
+ }
+ }
+
+ find_cnt++;
+ return 0;
+
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
+ often serial bit streams generated by the host processor.
+ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
+ a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
+ made udelay() unreliable.
+ The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
+ depricated.
+*/
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
+ EE_ChipSelect=0x801, EE_DataIn=0x08,
+};
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(void __iomem *addr, int location)
+{
+ int i;
+ int retval = 0;
+ void __iomem *ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+ iowrite32(EE_ChipSelect, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ iowrite32(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ iowrite32(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ iowrite32(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
+ iowrite32(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ iowrite32(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back 33Mhz PCI cycles. */
+#define mdio_delay(mdio_addr) ioread32(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older transceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 1;
+
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(void __iomem *mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ iowrite32(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite32(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 20; i > 0; i--) {
+ iowrite32(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (location == 4 && phy_id == np->phys[0])
+ np->mii_if.advertising = value;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite32(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite32(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int i;
+
+ iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+
+ netif_device_detach(dev);
+ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
+ if (i)
+ goto out_err;
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ if((i=alloc_ringdesc(dev)))
+ goto out_err;
+
+ spin_lock_irq(&np->lock);
+ netif_device_attach(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+
+ netif_start_queue(dev);
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 1*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+ return 0;
+out_err:
+ netif_device_attach(dev);
+ return i;
+}
+
+#define MII_DAVICOM_DM9101 0x0181b800
+
+static int update_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int duplex, fasteth, result, mii_reg;
+
+ /* BSMR */
+ mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
+
+ if (mii_reg == 0xffff)
+ return np->csr6;
+ /* reread: the link status bit is sticky */
+ mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
+ if (!(mii_reg & 0x4)) {
+ if (netif_carrier_ok(dev)) {
+ if (debug)
+ printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
+ dev->name, np->phys[0]);
+ netif_carrier_off(dev);
+ }
+ return np->csr6;
+ }
+ if (!netif_carrier_ok(dev)) {
+ if (debug)
+ printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
+ dev->name, np->phys[0]);
+ netif_carrier_on(dev);
+ }
+
+ if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
+ /* If the link partner doesn't support autonegotiation
+ * the MII detects it's abilities with the "parallel detection".
+ * Some MIIs update the LPA register to the result of the parallel
+ * detection, some don't.
+ * The Davicom PHY [at least 0181b800] doesn't.
+ * Instead bit 9 and 13 of the BMCR are updated to the result
+ * of the negotiation..
+ */
+ mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
+ duplex = mii_reg & BMCR_FULLDPLX;
+ fasteth = mii_reg & BMCR_SPEED100;
+ } else {
+ int negotiated;
+ mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
+ negotiated = mii_reg & np->mii_if.advertising;
+
+ duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
+ fasteth = negotiated & 0x380;
+ }
+ duplex |= np->mii_if.force_media;
+ /* remove fastether and fullduplex */
+ result = np->csr6 & ~0x20000200;
+ if (duplex)
+ result |= 0x200;
+ if (fasteth)
+ result |= 0x20000000;
+ if (result != np->csr6 && debug)
+ printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
+ dev->name, fasteth ? 100 : 10,
+ duplex ? "full" : "half", np->phys[0]);
+ return result;
+}
+
+#define RXTX_TIMEOUT 2000
+static inline void update_csr6(struct net_device *dev, int new)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int limit = RXTX_TIMEOUT;
+
+ if (!netif_device_present(dev))
+ new = 0;
+ if (new==np->csr6)
+ return;
+ /* stop both Tx and Rx processes */
+ iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
+ /* wait until they have really stopped */
+ for (;;) {
+ int csr5 = ioread32(ioaddr + IntrStatus);
+ int t;
+
+ t = (csr5 >> 17) & 0x07;
+ if (t==0||t==1) {
+ /* rx stopped */
+ t = (csr5 >> 20) & 0x07;
+ if (t==0||t==1)
+ break;
+ }
+
+ limit--;
+ if(!limit) {
+ printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
+ dev->name, csr5);
+ break;
+ }
+ udelay(1);
+ }
+ np->csr6 = new;
+ /* and restart them with the new configuration */
+ iowrite32(np->csr6, ioaddr + NetworkConfig);
+ if (new & 0x200)
+ np->mii_if.full_duplex = 1;
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+ "config %8.8x.\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ spin_lock_irq(&np->lock);
+ update_csr6(dev, update_link(dev));
+ spin_unlock_irq(&np->lock);
+ np->timer.expires = jiffies + 10*HZ;
+ add_timer(&np->timer);
+}
+
+static void init_rxtx_rings(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->rx_head_desc = &np->rx_ring[0];
+ np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
+
+ /* Initial all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].length = np->rx_buf_sz;
+ np->rx_ring[i].status = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].length |= DescEndRing;
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
+ skb->len,PCI_DMA_FROMDEVICE);
+
+ np->rx_ring[i].buffer1 = np->rx_addr[i];
+ np->rx_ring[i].status = DescOwn;
+ }
+
+ np->cur_rx = 0;
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* Initialize the Tx descriptors */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = 0;
+ }
+ np->tx_full = 0;
+ np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
+
+ iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
+ iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
+ np->base_addr + TxRingPtr);
+
+}
+
+static void free_rxtx_rings(struct netdev_private* np)
+{
+ int i;
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ if (np->rx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->rx_addr[i],
+ np->rx_skbuff[i]->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = NULL;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_addr[i],
+ np->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(np->tx_skbuff[i]);
+ }
+ np->tx_skbuff[i] = NULL;
+ }
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+#ifdef __BIG_ENDIAN
+ i = (1<<20); /* Big-endian descriptors */
+#else
+ i = 0;
+#endif
+ i |= (0x04<<2); /* skip length 4 u32 */
+ i |= 0x02; /* give Rx priority */
+
+ /* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword cache alignment, 8 longword burst.
+ 586: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 <not allowed> 0000 align to cache 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords */
+
+#if defined (__i386__) && !defined(MODULE)
+ /* When not a module we can work around broken '486 PCI boards. */
+ if (boot_cpu_data.x86 <= 4) {
+ i |= 0x4800;
+ printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
+ "alignment to 8 longwords.\n", dev->name);
+ } else {
+ i |= 0xE000;
+ }
+#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
+ i |= 0xE000;
+#elif defined(__sparc__)
+ i |= 0x4800;
+#else
+#warning Processor architecture undefined
+ i |= 0x4800;
+#endif
+ iowrite32(i, ioaddr + PCIBusCfg);
+
+ np->csr6 = 0;
+ /* 128 byte Tx threshold;
+ Transmit on; Receive on; */
+ update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(0x1A0F5, ioaddr + IntrStatus);
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
+
+ iowrite32(0, ioaddr + RxStartDemand);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
+
+ {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", np->tx_ring[i].status);
+ printk("\n");
+ }
+ printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
+ np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
+ printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ /*
+ * Under high load dirty_tx and the internal tx descriptor pointer
+ * come out of sync, thus perform a software reset and reinitialize
+ * everything.
+ */
+
+ iowrite32(1, np->base_addr+PCIBusCfg);
+ udelay(1);
+
+ free_rxtx_rings(np);
+ init_rxtx_rings(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ netif_wake_queue(dev);
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static int alloc_ringdesc(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ np->rx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct w840_rx_desc)*RX_RING_SIZE +
+ sizeof(struct w840_tx_desc)*TX_RING_SIZE,
+ &np->ring_dma_addr);
+ if(!np->rx_ring)
+ return -ENOMEM;
+ init_rxtx_rings(dev);
+ return 0;
+}
+
+static void free_ringdesc(struct netdev_private *np)
+{
+ pci_free_consistent(np->pci_dev,
+ sizeof(struct w840_rx_desc)*RX_RING_SIZE +
+ sizeof(struct w840_tx_desc)*TX_RING_SIZE,
+ np->rx_ring, np->ring_dma_addr);
+
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned entry;
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_addr[entry] = pci_map_single(np->pci_dev,
+ skb->data,skb->len, PCI_DMA_TODEVICE);
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buffer1 = np->tx_addr[entry];
+ if (skb->len < TX_BUFLIMIT) {
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ } else {
+ int len = skb->len - TX_BUFLIMIT;
+
+ np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
+ np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
+ }
+ if(entry == TX_RING_SIZE-1)
+ np->tx_ring[entry].length |= DescEndRing;
+
+ /* Now acquire the irq spinlock.
+ * The difficult race is the the ordering between
+ * increasing np->cur_tx and setting DescOwn:
+ * - if np->cur_tx is increased first the interrupt
+ * handler could consider the packet as transmitted
+ * since DescOwn is cleared.
+ * - If DescOwn is set first the NIC could report the
+ * packet as sent, but the interrupt handler would ignore it
+ * since the np->cur_tx was not yet increased.
+ */
+ spin_lock_irq(&np->lock);
+ np->cur_tx++;
+
+ wmb(); /* flush length, buffer1, buffer2 */
+ np->tx_ring[entry].status = DescOwn;
+ wmb(); /* flush status and kick the hardware */
+ iowrite32(0, np->base_addr + TxStartDemand);
+ np->tx_q_bytes += skb->len;
+ /* Work around horrible bug in the chip by marking the queue as full
+ when we do not have FIFO room for a maximum sized packet. */
+ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
+ ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
+ netif_stop_queue(dev);
+ wmb();
+ np->tx_full = 1;
+ }
+ spin_unlock_irq(&np->lock);
+
+ dev->trans_start = jiffies;
+
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+static void netdev_tx_done(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int tx_status = np->tx_ring[entry].status;
+
+ if (tx_status < 0)
+ break;
+ if (tx_status & 0x8000) { /* There was an error, log it. */
+#ifndef final_version
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+#endif
+ np->stats.tx_errors++;
+ if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x0200) np->stats.tx_window_errors++;
+ if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
+ if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
+ np->stats.tx_heartbeat_errors++;
+ } else {
+#ifndef final_version
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
+ dev->name, entry, tx_status);
+#endif
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ np->stats.collisions += (tx_status >> 3) & 15;
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ pci_unmap_single(np->pci_dev,np->tx_addr[entry],
+ np->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ np->tx_q_bytes -= np->tx_skbuff[entry]->len;
+ dev_kfree_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ }
+ if (np->tx_full &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
+ np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
+ /* The ring is no longer full, clear tbusy. */
+ np->tx_full = 0;
+ wmb();
+ netif_wake_queue(dev);
+ }
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int work_limit = max_interrupt_work;
+ int handled = 0;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+ do {
+ u32 intr_status = ioread32(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
+ break;
+
+ handled = 1;
+
+ if (intr_status & (IntrRxDone | RxNoBuf))
+ netdev_rx(dev);
+ if (intr_status & RxNoBuf)
+ iowrite32(0, ioaddr + RxStartDemand);
+
+ if (intr_status & (TxIdle | IntrTxDone) &&
+ np->cur_tx != np->dirty_tx) {
+ spin_lock(&np->lock);
+ netdev_tx_done(dev);
+ spin_unlock(&np->lock);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
+ TimerInt | IntrTxStopped))
+ netdev_error(dev, intr_status);
+
+ if (--work_limit < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n", dev->name, intr_status);
+ /* Set the timer to re-enable the other interrupts after
+ 10*82usec ticks. */
+ spin_lock(&np->lock);
+ if (netif_device_present(dev)) {
+ iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
+ iowrite32(10, ioaddr + GPTimer);
+ }
+ spin_unlock(&np->lock);
+ break;
+ }
+ } while (1);
+
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread32(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (debug > 4) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (--work_limit >= 0) {
+ struct w840_rx_desc *desc = np->rx_head_desc;
+ s32 status = desc->status;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ status);
+ if (status < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x status %4.4x!\n",
+ dev->name, np->cur_rx, status);
+ np->stats.rx_length_errors++;
+ }
+ } else if (status & 0x8000) {
+ /* There was a fatal error. */
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ np->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) np->stats.rx_length_errors++;
+ if (status & 0x004C) np->stats.rx_frame_errors++;
+ if (status & 0x0002) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ int pkt_len = ((status >> 16) & 0x7ff) - 4;
+
+#ifndef final_version
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " status %x.\n", pkt_len, status);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(np->pci_dev,np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (debug > 5)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
+#endif
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += pkt_len;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_addr[entry] = pci_map_single(np->pci_dev,
+ skb->tail,
+ skb->len, PCI_DMA_FROMDEVICE);
+ np->rx_ring[entry].buffer1 = np->rx_addr[entry];
+ }
+ wmb();
+ np->rx_ring[entry].status = DescOwn;
+ }
+
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
+ dev->name, intr_status);
+ if (intr_status == 0xffffffff)
+ return;
+ spin_lock(&np->lock);
+ if (intr_status & TxFIFOUnderflow) {
+ int new;
+ /* Bump up the Tx threshold */
+#if 0
+ /* This causes lots of dropped packets,
+ * and under high load even tx_timeouts
+ */
+ new = np->csr6 + 0x4000;
+#else
+ new = (np->csr6 >> 14)&0x7f;
+ if (new < 64)
+ new *= 2;
+ else
+ new = 127; /* load full packet before starting */
+ new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
+#endif
+ printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
+ dev->name, new);
+ update_csr6(dev, new);
+ }
+ if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
+ np->stats.rx_errors++;
+ }
+ if (intr_status & TimerInt) {
+ /* Re-enable other interrupts. */
+ if (netif_device_present(dev))
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
+ }
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+ iowrite32(0, ioaddr + RxStartDemand);
+ spin_unlock(&np->lock);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&np->lock);
+ if (netif_running(dev) && netif_device_present(dev))
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+ spin_unlock_irq(&np->lock);
+
+ return &np->stats;
+}
+
+
+static u32 __set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
+ filterbit &= 0x3f;
+ mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ }
+ iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
+ iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
+ return rx_mode;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 rx_mode = __set_rx_mode(dev);
+ spin_lock_irq(&np->lock);
+ update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
+ spin_unlock_irq(&np->lock);
+}
+
+static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(np->pci_dev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_gset(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_sset(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_sg = ethtool_op_get_sg,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(rq);
+ struct netdev_private *np = netdev_priv(dev);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ spin_lock_irq(&np->lock);
+ data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ spin_unlock_irq(&np->lock);
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ spin_lock_irq(&np->lock);
+ mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_irq(&np->lock);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ netif_stop_queue(dev);
+
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
+ "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Stop the chip's Tx and Rx processes. */
+ spin_lock_irq(&np->lock);
+ netif_device_detach(dev);
+ update_csr6(dev, 0);
+ iowrite32(0x0000, ioaddr + IntrEnable);
+ spin_unlock_irq(&np->lock);
+
+ free_irq(dev->irq, dev);
+ wmb();
+ netif_device_attach(dev);
+
+ if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+
+#ifdef __i386__
+ if (debug > 2) {
+ int i;
+
+ printk(KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ del_timer_sync(&np->timer);
+
+ free_rxtx_rings(np);
+ free_ringdesc(np);
+
+ return 0;
+}
+
+static void __devexit w840_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+ unregister_netdev(dev);
+ pci_release_regions(pdev);
+ pci_iounmap(pdev, np->base_addr);
+ free_netdev(dev);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * suspend/resume synchronization:
+ * - open, close, do_ioctl:
+ * rtnl_lock, & netif_device_detach after the rtnl_unlock.
+ * - get_stats:
+ * spin_lock_irq(np->lock), doesn't touch hw if not present
+ * - hard_start_xmit:
+ * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
+ * - tx_timeout:
+ * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ * - set_multicast_list
+ * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ * - interrupt handler
+ * doesn't touch hw if not present, synchronize_irq waits for
+ * running instances of the interrupt handler.
+ *
+ * Disabling hw requires clearing csr6 & IntrEnable.
+ * update_csr6 & all function that write IntrEnable check netif_device_present
+ * before settings any bits.
+ *
+ * Detach must occur under spin_unlock_irq(), interrupts from a detached
+ * device would cause an irq storm.
+ */
+static int w840_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ rtnl_lock();
+ if (netif_running (dev)) {
+ del_timer_sync(&np->timer);
+
+ spin_lock_irq(&np->lock);
+ netif_device_detach(dev);
+ update_csr6(dev, 0);
+ iowrite32(0, ioaddr + IntrEnable);
+ netif_stop_queue(dev);
+ spin_unlock_irq(&np->lock);
+
+ spin_unlock_wait(&dev->xmit_lock);
+ synchronize_irq(dev->irq);
+
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+
+ /* no more hardware accesses behind this line. */
+
+ if (np->csr6) BUG();
+ if (ioread32(ioaddr + IntrEnable)) BUG();
+
+ /* pci_power_off(pdev, -1); */
+
+ free_rxtx_rings(np);
+ } else {
+ netif_device_detach(dev);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static int w840_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ rtnl_lock();
+ if (netif_device_present(dev))
+ goto out; /* device not suspended */
+ if (netif_running(dev)) {
+ pci_enable_device(pdev);
+ /* pci_power_on(pdev); */
+
+ spin_lock_irq(&np->lock);
+ iowrite32(1, np->base_addr+PCIBusCfg);
+ ioread32(np->base_addr+PCIBusCfg);
+ udelay(1);
+ netif_device_attach(dev);
+ init_rxtx_rings(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+
+ netif_wake_queue(dev);
+
+ mod_timer(&np->timer, jiffies + 1*HZ);
+ } else {
+ netif_device_attach(dev);
+ }
+out:
+ rtnl_unlock();
+ return 0;
+}
+#endif
+
+static struct pci_driver w840_driver = {
+ .name = DRV_NAME,
+ .id_table = w840_pci_tbl,
+ .probe = w840_probe1,
+ .remove = __devexit_p(w840_remove1),
+#ifdef CONFIG_PM
+ .suspend = w840_suspend,
+ .resume = w840_resume,
+#endif
+};
+
+static int __init w840_init(void)
+{
+ printk(version);
+ return pci_module_init(&w840_driver);
+}
+
+static void __exit w840_exit(void)
+{
+ pci_unregister_driver(&w840_driver);
+}
+
+module_init(w840_init);
+module_exit(w840_exit);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
new file mode 100644
index 000000000000..26cc4f6378c7
--- /dev/null
+++ b/drivers/net/tulip/xircom_cb.c
@@ -0,0 +1,1277 @@
+/*
+ * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
+ *
+ * This software is (C) by the respective authors, and licensed under the GPL
+ * License.
+ *
+ * Written by Arjan van de Ven for Red Hat, Inc.
+ * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ *
+ * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#ifdef DEBUG
+#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
+#define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__)
+#else
+#define enter(x) do {} while (0)
+#define leave(x) do {} while (0)
+#endif
+
+
+MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
+MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
+MODULE_LICENSE("GPL");
+
+
+
+/* IO registers on the card, offsets */
+#define CSR0 0x00
+#define CSR1 0x08
+#define CSR2 0x10
+#define CSR3 0x18
+#define CSR4 0x20
+#define CSR5 0x28
+#define CSR6 0x30
+#define CSR7 0x38
+#define CSR8 0x40
+#define CSR9 0x48
+#define CSR10 0x50
+#define CSR11 0x58
+#define CSR12 0x60
+#define CSR13 0x68
+#define CSR14 0x70
+#define CSR15 0x78
+#define CSR16 0x80
+
+/* PCI registers */
+#define PCI_POWERMGMT 0x40
+
+/* Offsets of the buffers within the descriptor pages, in bytes */
+
+#define NUMDESCRIPTORS 4
+
+static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144};
+
+
+struct xircom_private {
+ /* Send and receive buffers, kernel-addressable and dma addressable forms */
+
+ unsigned int *rx_buffer;
+ unsigned int *tx_buffer;
+
+ dma_addr_t rx_dma_handle;
+ dma_addr_t tx_dma_handle;
+
+ struct sk_buff *tx_skb[4];
+
+ unsigned long io_port;
+ int open;
+
+ /* transmit_used is the rotating counter that indicates which transmit
+ descriptor has to be used next */
+ int transmit_used;
+
+ /* Spinlock to serialize register operations.
+ It must be helt while manipulating the following registers:
+ CSR0, CSR6, CSR7, CSR9, CSR10, CSR15
+ */
+ spinlock_t lock;
+
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ struct net_device_stats stats;
+};
+
+
+/* Function prototypes */
+static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void xircom_remove(struct pci_dev *pdev);
+static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int xircom_open(struct net_device *dev);
+static int xircom_close(struct net_device *dev);
+static void xircom_up(struct xircom_private *card);
+static struct net_device_stats *xircom_get_stats(struct net_device *dev);
+#if CONFIG_NET_POLL_CONTROLLER
+static void xircom_poll_controller(struct net_device *dev);
+#endif
+
+static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset);
+static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset);
+static void read_mac_address(struct xircom_private *card);
+static void transceiver_voodoo(struct xircom_private *card);
+static void initialize_card(struct xircom_private *card);
+static void trigger_transmit(struct xircom_private *card);
+static void trigger_receive(struct xircom_private *card);
+static void setup_descriptors(struct xircom_private *card);
+static void remove_descriptors(struct xircom_private *card);
+static int link_status_changed(struct xircom_private *card);
+static void activate_receiver(struct xircom_private *card);
+static void deactivate_receiver(struct xircom_private *card);
+static void activate_transmitter(struct xircom_private *card);
+static void deactivate_transmitter(struct xircom_private *card);
+static void enable_transmit_interrupt(struct xircom_private *card);
+static void enable_receive_interrupt(struct xircom_private *card);
+static void enable_link_interrupt(struct xircom_private *card);
+static void disable_all_interrupts(struct xircom_private *card);
+static int link_status(struct xircom_private *card);
+
+
+
+static struct pci_device_id xircom_pci_table[] = {
+ {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,},
+};
+MODULE_DEVICE_TABLE(pci, xircom_pci_table);
+
+static struct pci_driver xircom_ops = {
+ .name = "xircom_cb",
+ .id_table = xircom_pci_table,
+ .probe = xircom_probe,
+ .remove = xircom_remove,
+ .suspend =NULL,
+ .resume =NULL
+};
+
+
+#ifdef DEBUG
+static void print_binary(unsigned int number)
+{
+ int i,i2;
+ char buffer[64];
+ memset(buffer,0,64);
+ i2=0;
+ for (i=31;i>=0;i--) {
+ if (number & (1<<i))
+ buffer[i2++]='1';
+ else
+ buffer[i2++]='0';
+ if ((i&3)==0)
+ buffer[i2++]=' ';
+ }
+ printk("%s\n",buffer);
+}
+#endif
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct xircom_private *private = netdev_priv(dev);
+
+ strcpy(info->driver, "xircom_cb");
+ strcpy(info->bus_info, pci_name(private->pdev));
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/* xircom_probe is the code that gets called on device insertion.
+ it sets up the hardware and registers the device to the networklayer.
+
+ TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
+ first two packets that get send, and pump hates that.
+
+ */
+static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *dev = NULL;
+ struct xircom_private *private;
+ unsigned char chip_rev;
+ unsigned long flags;
+ unsigned short tmp16;
+ enter("xircom_probe");
+
+ /* First do the PCI initialisation */
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ /* disable all powermanagement */
+ pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
+
+ pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
+
+ /* clear PCI status, if any */
+ pci_read_config_word (pdev,PCI_STATUS, &tmp16);
+ pci_write_config_word (pdev, PCI_STATUS,tmp16);
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
+
+ if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
+ printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
+ return -ENODEV;
+ }
+
+ /*
+ Before changing the hardware, allocate the memory.
+ This way, we can fail gracefully if not enough memory
+ is available.
+ */
+ dev = alloc_etherdev(sizeof(struct xircom_private));
+ if (!dev) {
+ printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n");
+ goto device_fail;
+ }
+ private = netdev_priv(dev);
+
+ /* Allocate the send/receive buffers */
+ private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
+ if (private->rx_buffer == NULL) {
+ printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
+ goto rx_buf_fail;
+ }
+ private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
+ if (private->tx_buffer == NULL) {
+ printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
+ goto tx_buf_fail;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+
+ private->dev = dev;
+ private->pdev = pdev;
+ private->io_port = pci_resource_start(pdev, 0);
+ spin_lock_init(&private->lock);
+ dev->irq = pdev->irq;
+ dev->base_addr = private->io_port;
+
+ initialize_card(private);
+ read_mac_address(private);
+ setup_descriptors(private);
+
+ dev->open = &xircom_open;
+ dev->hard_start_xmit = &xircom_start_xmit;
+ dev->stop = &xircom_close;
+ dev->get_stats = &xircom_get_stats;
+ dev->priv = private;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = &xircom_poll_controller;
+#endif
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ pci_set_drvdata(pdev, dev);
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
+ goto reg_fail;
+ }
+
+ printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq);
+ /* start the transmitter to get a heartbeat */
+ /* TODO: send 2 dummy packets here */
+ transceiver_voodoo(private);
+
+ spin_lock_irqsave(&private->lock,flags);
+ activate_transmitter(private);
+ activate_receiver(private);
+ spin_unlock_irqrestore(&private->lock,flags);
+
+ trigger_receive(private);
+
+ leave("xircom_probe");
+ return 0;
+
+reg_fail:
+ kfree(private->tx_buffer);
+tx_buf_fail:
+ kfree(private->rx_buffer);
+rx_buf_fail:
+ free_netdev(dev);
+device_fail:
+ return -ENODEV;
+}
+
+
+/*
+ xircom_remove is called on module-unload or on device-eject.
+ it unregisters the irq, io-region and network device.
+ Interrupts and such are already stopped in the "ifconfig ethX down"
+ code.
+ */
+static void __devexit xircom_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct xircom_private *card = netdev_priv(dev);
+
+ enter("xircom_remove");
+ pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
+ pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
+
+ release_region(dev->base_addr, 128);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+ leave("xircom_remove");
+}
+
+static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct xircom_private *card = netdev_priv(dev);
+ unsigned int status;
+ int i;
+
+ enter("xircom_interrupt\n");
+
+ spin_lock(&card->lock);
+ status = inl(card->io_port+CSR5);
+
+#ifdef DEBUG
+ print_binary(status);
+ printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
+ printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
+#endif
+ /* Handle shared irq and hotplug */
+ if (status == 0 || status == 0xffffffff) {
+ spin_unlock(&card->lock);
+ return IRQ_NONE;
+ }
+
+ if (link_status_changed(card)) {
+ int newlink;
+ printk(KERN_DEBUG "xircom_cb: Link status has changed \n");
+ newlink = link_status(card);
+ printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink);
+ if (newlink)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ }
+
+ /* Clear all remaining interrupts */
+ status |= 0xffffffff; /* FIXME: make this clear only the
+ real existing bits */
+ outl(status,card->io_port+CSR5);
+
+
+ for (i=0;i<NUMDESCRIPTORS;i++)
+ investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
+ for (i=0;i<NUMDESCRIPTORS;i++)
+ investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
+
+
+ spin_unlock(&card->lock);
+ leave("xircom_interrupt");
+ return IRQ_HANDLED;
+}
+
+static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xircom_private *card;
+ unsigned long flags;
+ int nextdescriptor;
+ int desc;
+ enter("xircom_start_xmit");
+
+ card = netdev_priv(dev);
+ spin_lock_irqsave(&card->lock,flags);
+
+ /* First see if we can free some descriptors */
+ for (desc=0;desc<NUMDESCRIPTORS;desc++)
+ investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
+
+
+ nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
+ desc = card->transmit_used;
+
+ /* only send the packet if the descriptor is free */
+ if (card->tx_buffer[4*desc]==0) {
+ /* Copy the packet data; zero the memory first as the card
+ sometimes sends more than you ask it to. */
+
+ memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
+ memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
+
+
+ /* FIXME: The specification tells us that the length we send HAS to be a multiple of
+ 4 bytes. */
+
+ card->tx_buffer[4*desc+1] = skb->len;
+ if (desc == NUMDESCRIPTORS-1)
+ card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */
+
+ card->tx_buffer[4*desc+1] |= 0xF0000000;
+ /* 0xF0... means want interrupts*/
+ card->tx_skb[desc] = skb;
+
+ wmb();
+ /* This gives the descriptor to the card */
+ card->tx_buffer[4*desc] = 0x80000000;
+ trigger_transmit(card);
+ if (((int)card->tx_buffer[nextdescriptor*4])<0) { /* next descriptor is occupied... */
+ netif_stop_queue(dev);
+ }
+ card->transmit_used = nextdescriptor;
+ leave("xircom-start_xmit - sent");
+ spin_unlock_irqrestore(&card->lock,flags);
+ return 0;
+ }
+
+
+
+ /* Uh oh... no free descriptor... drop the packet */
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&card->lock,flags);
+ trigger_transmit(card);
+
+ return -EIO;
+}
+
+
+
+
+static int xircom_open(struct net_device *dev)
+{
+ struct xircom_private *xp = netdev_priv(dev);
+ int retval;
+ enter("xircom_open");
+ printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
+ retval = request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval) {
+ leave("xircom_open - No IRQ");
+ return retval;
+ }
+
+ xircom_up(xp);
+ xp->open = 1;
+ leave("xircom_open");
+ return 0;
+}
+
+static int xircom_close(struct net_device *dev)
+{
+ struct xircom_private *card;
+ unsigned long flags;
+
+ enter("xircom_close");
+ card = netdev_priv(dev);
+ netif_stop_queue(dev); /* we don't want new packets */
+
+
+ spin_lock_irqsave(&card->lock,flags);
+
+ disable_all_interrupts(card);
+#if 0
+ /* We can enable this again once we send dummy packets on ifconfig ethX up */
+ deactivate_receiver(card);
+ deactivate_transmitter(card);
+#endif
+ remove_descriptors(card);
+
+ spin_unlock_irqrestore(&card->lock,flags);
+
+ card->open = 0;
+ free_irq(dev->irq,dev);
+
+ leave("xircom_close");
+
+ return 0;
+
+}
+
+
+
+static struct net_device_stats *xircom_get_stats(struct net_device *dev)
+{
+ struct xircom_private *card = netdev_priv(dev);
+ return &card->stats;
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xircom_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ xircom_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+
+static void initialize_card(struct xircom_private *card)
+{
+ unsigned int val;
+ unsigned long flags;
+ enter("initialize_card");
+
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ /* First: reset the card */
+ val = inl(card->io_port + CSR0);
+ val |= 0x01; /* Software reset */
+ outl(val, card->io_port + CSR0);
+
+ udelay(100); /* give the card some time to reset */
+
+ val = inl(card->io_port + CSR0);
+ val &= ~0x01; /* disable Software reset */
+ outl(val, card->io_port + CSR0);
+
+
+ val = 0; /* Value 0x00 is a safe and conservative value
+ for the PCI configuration settings */
+ outl(val, card->io_port + CSR0);
+
+
+ disable_all_interrupts(card);
+ deactivate_receiver(card);
+ deactivate_transmitter(card);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ leave("initialize_card");
+}
+
+/*
+trigger_transmit causes the card to check for frames to be transmitted.
+This is accomplished by writing to the CSR1 port. The documentation
+claims that the act of writing is sufficient and that the value is
+ignored; I chose zero.
+*/
+static void trigger_transmit(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("trigger_transmit");
+
+ val = 0;
+ outl(val, card->io_port + CSR1);
+
+ leave("trigger_transmit");
+}
+
+/*
+trigger_receive causes the card to check for empty frames in the
+descriptor list in which packets can be received.
+This is accomplished by writing to the CSR2 port. The documentation
+claims that the act of writing is sufficient and that the value is
+ignored; I chose zero.
+*/
+static void trigger_receive(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("trigger_receive");
+
+ val = 0;
+ outl(val, card->io_port + CSR2);
+
+ leave("trigger_receive");
+}
+
+/*
+setup_descriptors initializes the send and receive buffers to be valid
+descriptors and programs the addresses into the card.
+*/
+static void setup_descriptors(struct xircom_private *card)
+{
+ unsigned int val;
+ unsigned int address;
+ int i;
+ enter("setup_descriptors");
+
+
+ if (card->rx_buffer == NULL)
+ BUG();
+ if (card->tx_buffer == NULL)
+ BUG();
+
+ /* Receive descriptors */
+ memset(card->rx_buffer, 0, 128); /* clear the descriptors */
+ for (i=0;i<NUMDESCRIPTORS;i++ ) {
+
+ /* Rx Descr0: It's empty, let the card own it, no errors -> 0x80000000 */
+ card->rx_buffer[i*4 + 0] = 0x80000000;
+ /* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
+ card->rx_buffer[i*4 + 1] = 1536;
+ if (i==NUMDESCRIPTORS-1)
+ card->rx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */
+
+ /* Rx Descr2: address of the buffer
+ we store the buffer at the 2nd half of the page */
+
+ address = (unsigned long) card->rx_dma_handle;
+ card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
+ /* Rx Desc3: address of 2nd buffer -> 0 */
+ card->rx_buffer[i*4 + 3] = 0;
+ }
+
+ wmb();
+ /* Write the receive descriptor ring address to the card */
+ address = (unsigned long) card->rx_dma_handle;
+ val = cpu_to_le32(address);
+ outl(val, card->io_port + CSR3); /* Receive descr list address */
+
+
+ /* transmit descriptors */
+ memset(card->tx_buffer, 0, 128); /* clear the descriptors */
+
+ for (i=0;i<NUMDESCRIPTORS;i++ ) {
+ /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
+ card->tx_buffer[i*4 + 0] = 0x00000000;
+ /* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
+ card->tx_buffer[i*4 + 1] = 1536;
+ if (i==NUMDESCRIPTORS-1)
+ card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */
+
+ /* Tx Descr2: address of the buffer
+ we store the buffer at the 2nd half of the page */
+ address = (unsigned long) card->tx_dma_handle;
+ card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
+ /* Tx Desc3: address of 2nd buffer -> 0 */
+ card->tx_buffer[i*4 + 3] = 0;
+ }
+
+ wmb();
+ /* wite the transmit descriptor ring to the card */
+ address = (unsigned long) card->tx_dma_handle;
+ val =cpu_to_le32(address);
+ outl(val, card->io_port + CSR4); /* xmit descr list address */
+
+ leave("setup_descriptors");
+}
+
+/*
+remove_descriptors informs the card the descriptors are no longer
+valid by setting the address in the card to 0x00.
+*/
+static void remove_descriptors(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("remove_descriptors");
+
+ val = 0;
+ outl(val, card->io_port + CSR3); /* Receive descriptor address */
+ outl(val, card->io_port + CSR4); /* Send descriptor address */
+
+ leave("remove_descriptors");
+}
+
+/*
+link_status_changed returns 1 if the card has indicated that
+the link status has changed. The new link status has to be read from CSR12.
+
+This function also clears the status-bit.
+*/
+static int link_status_changed(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("link_status_changed");
+
+ val = inl(card->io_port + CSR5); /* Status register */
+
+ if ((val & (1 << 27)) == 0) { /* no change */
+ leave("link_status_changed - nochange");
+ return 0;
+ }
+
+ /* clear the event by writing a 1 to the bit in the
+ status register. */
+ val = (1 << 27);
+ outl(val, card->io_port + CSR5);
+
+ leave("link_status_changed - changed");
+ return 1;
+}
+
+
+/*
+transmit_active returns 1 if the transmitter on the card is
+in a non-stopped state.
+*/
+static int transmit_active(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("transmit_active");
+
+ val = inl(card->io_port + CSR5); /* Status register */
+
+ if ((val & (7 << 20)) == 0) { /* transmitter disabled */
+ leave("transmit_active - inactive");
+ return 0;
+ }
+
+ leave("transmit_active - active");
+ return 1;
+}
+
+/*
+receive_active returns 1 if the receiver on the card is
+in a non-stopped state.
+*/
+static int receive_active(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("receive_active");
+
+
+ val = inl(card->io_port + CSR5); /* Status register */
+
+ if ((val & (7 << 17)) == 0) { /* receiver disabled */
+ leave("receive_active - inactive");
+ return 0;
+ }
+
+ leave("receive_active - active");
+ return 1;
+}
+
+/*
+activate_receiver enables the receiver on the card.
+Before being allowed to active the receiver, the receiver
+must be completely de-activated. To achieve this,
+this code actually disables the receiver first; then it waits for the
+receiver to become inactive, then it activates the receiver and then
+it waits for the receiver to be active.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void activate_receiver(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+ enter("activate_receiver");
+
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+
+ /* If the "active" bit is set and the receiver is already
+ active, no need to do the expensive thing */
+ if ((val&2) && (receive_active(card)))
+ return;
+
+
+ val = val & ~2; /* disable the receiver */
+ outl(val, card->io_port + CSR6);
+
+ counter = 10;
+ while (counter > 0) {
+ if (!receive_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ }
+
+ /* enable the receiver */
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val | 2; /* enable the receiver */
+ outl(val, card->io_port + CSR6);
+
+ /* now wait for the card to activate again */
+ counter = 10;
+ while (counter > 0) {
+ if (receive_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n");
+ }
+
+ leave("activate_receiver");
+}
+
+/*
+deactivate_receiver disables the receiver on the card.
+To achieve this this code disables the receiver first;
+then it waits for the receiver to become inactive.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void deactivate_receiver(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+ enter("deactivate_receiver");
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val & ~2; /* disable the receiver */
+ outl(val, card->io_port + CSR6);
+
+ counter = 10;
+ while (counter > 0) {
+ if (!receive_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ }
+
+
+ leave("deactivate_receiver");
+}
+
+
+/*
+activate_transmitter enables the transmitter on the card.
+Before being allowed to active the transmitter, the transmitter
+must be completely de-activated. To achieve this,
+this code actually disables the transmitter first; then it waits for the
+transmitter to become inactive, then it activates the transmitter and then
+it waits for the transmitter to be active again.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void activate_transmitter(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+ enter("activate_transmitter");
+
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+
+ /* If the "active" bit is set and the receiver is already
+ active, no need to do the expensive thing */
+ if ((val&(1<<13)) && (transmit_active(card)))
+ return;
+
+ val = val & ~(1 << 13); /* disable the transmitter */
+ outl(val, card->io_port + CSR6);
+
+ counter = 10;
+ while (counter > 0) {
+ if (!transmit_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ }
+
+ /* enable the transmitter */
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val | (1 << 13); /* enable the transmitter */
+ outl(val, card->io_port + CSR6);
+
+ /* now wait for the card to activate again */
+ counter = 10;
+ while (counter > 0) {
+ if (transmit_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n");
+ }
+
+ leave("activate_transmitter");
+}
+
+/*
+deactivate_transmitter disables the transmitter on the card.
+To achieve this this code disables the transmitter first;
+then it waits for the transmitter to become inactive.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void deactivate_transmitter(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+ enter("deactivate_transmitter");
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val & ~2; /* disable the transmitter */
+ outl(val, card->io_port + CSR6);
+
+ counter = 20;
+ while (counter > 0) {
+ if (!transmit_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ }
+
+
+ leave("deactivate_transmitter");
+}
+
+
+/*
+enable_transmit_interrupt enables the transmit interrupt
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_transmit_interrupt(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("enable_transmit_interrupt");
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val |= 1; /* enable the transmit interrupt */
+ outl(val, card->io_port + CSR7);
+
+ leave("enable_transmit_interrupt");
+}
+
+
+/*
+enable_receive_interrupt enables the receive interrupt
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_receive_interrupt(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("enable_receive_interrupt");
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val = val | (1 << 6); /* enable the receive interrupt */
+ outl(val, card->io_port + CSR7);
+
+ leave("enable_receive_interrupt");
+}
+
+/*
+enable_link_interrupt enables the link status change interrupt
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_link_interrupt(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("enable_link_interrupt");
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val = val | (1 << 27); /* enable the link status chage interrupt */
+ outl(val, card->io_port + CSR7);
+
+ leave("enable_link_interrupt");
+}
+
+
+
+/*
+disable_all_interrupts disables all interrupts
+
+must be called with the lock held and interrupts disabled.
+*/
+static void disable_all_interrupts(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("enable_all_interrupts");
+
+ val = 0; /* disable all interrupts */
+ outl(val, card->io_port + CSR7);
+
+ leave("disable_all_interrupts");
+}
+
+/*
+enable_common_interrupts enables several weird interrupts
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_common_interrupts(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("enable_link_interrupt");
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val |= (1<<16); /* Normal Interrupt Summary */
+ val |= (1<<15); /* Abnormal Interrupt Summary */
+ val |= (1<<13); /* Fatal bus error */
+ val |= (1<<8); /* Receive Process Stopped */
+ val |= (1<<7); /* Receive Buffer Unavailable */
+ val |= (1<<5); /* Transmit Underflow */
+ val |= (1<<2); /* Transmit Buffer Unavailable */
+ val |= (1<<1); /* Transmit Process Stopped */
+ outl(val, card->io_port + CSR7);
+
+ leave("enable_link_interrupt");
+}
+
+/*
+enable_promisc starts promisc mode
+
+must be called with the lock held and interrupts disabled.
+*/
+static int enable_promisc(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("enable_promisc");
+
+ val = inl(card->io_port + CSR6);
+ val = val | (1 << 6);
+ outl(val, card->io_port + CSR6);
+
+ leave("enable_promisc");
+ return 1;
+}
+
+
+
+
+/*
+link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
+
+Must be called in locked state with interrupts disabled
+*/
+static int link_status(struct xircom_private *card)
+{
+ unsigned int val;
+ enter("link_status");
+
+ val = inb(card->io_port + CSR12);
+
+ if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+ return 10;
+ if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+ return 100;
+
+ /* If we get here -> no link at all */
+
+ leave("link_status");
+ return 0;
+}
+
+
+
+
+
+/*
+ read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
+
+ This function will take the spinlock itself and can, as a result, not be called with the lock helt.
+ */
+static void read_mac_address(struct xircom_private *card)
+{
+ unsigned char j, tuple, link, data_id, data_count;
+ unsigned long flags;
+ int i;
+
+ enter("read_mac_address");
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
+ for (i = 0x100; i < 0x1f7; i += link + 2) {
+ outl(i, card->io_port + CSR10);
+ tuple = inl(card->io_port + CSR9) & 0xff;
+ outl(i + 1, card->io_port + CSR10);
+ link = inl(card->io_port + CSR9) & 0xff;
+ outl(i + 2, card->io_port + CSR10);
+ data_id = inl(card->io_port + CSR9) & 0xff;
+ outl(i + 3, card->io_port + CSR10);
+ data_count = inl(card->io_port + CSR9) & 0xff;
+ if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
+ /*
+ * This is it. We have the data we want.
+ */
+ for (j = 0; j < 6; j++) {
+ outl(i + j + 4, card->io_port + CSR10);
+ card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff;
+ }
+ break;
+ } else if (link == 0) {
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+#ifdef DEBUG
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2X", i ? ':' : ' ', card->dev->dev_addr[i]);
+ printk("\n");
+#endif
+ leave("read_mac_address");
+}
+
+
+/*
+ transceiver_voodoo() enables the external UTP plug thingy.
+ it's called voodoo as I stole this code and cannot cross-reference
+ it with the specification.
+ */
+static void transceiver_voodoo(struct xircom_private *card)
+{
+ unsigned long flags;
+
+ enter("transceiver_voodoo");
+
+ /* disable all powermanagement */
+ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
+
+ setup_descriptors(card);
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ outl(0x0008, card->io_port + CSR15);
+ udelay(25);
+ outl(0xa8050000, card->io_port + CSR15);
+ udelay(25);
+ outl(0xa00f0000, card->io_port + CSR15);
+ udelay(25);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ netif_start_queue(card->dev);
+ leave("transceiver_voodoo");
+}
+
+
+static void xircom_up(struct xircom_private *card)
+{
+ unsigned long flags;
+ int i;
+
+ enter("xircom_up");
+
+ /* disable all powermanagement */
+ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
+
+ setup_descriptors(card);
+
+ spin_lock_irqsave(&card->lock, flags);
+
+
+ enable_link_interrupt(card);
+ enable_transmit_interrupt(card);
+ enable_receive_interrupt(card);
+ enable_common_interrupts(card);
+ enable_promisc(card);
+
+ /* The card can have received packets already, read them away now */
+ for (i=0;i<NUMDESCRIPTORS;i++)
+ investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
+
+
+ spin_unlock_irqrestore(&card->lock, flags);
+ trigger_receive(card);
+ trigger_transmit(card);
+ netif_start_queue(card->dev);
+ leave("xircom_up");
+}
+
+/* Bufferoffset is in BYTES */
+static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset)
+{
+ int status;
+
+ enter("investigate_read_descriptor");
+ status = card->rx_buffer[4*descnr];
+
+ if ((status > 0)) { /* packet received */
+
+ /* TODO: discard error packets */
+
+ short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1518) {
+ printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len);
+ pkt_len = 1518;
+ }
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ card->stats.rx_dropped++;
+ goto out;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ card->stats.rx_packets++;
+ card->stats.rx_bytes += pkt_len;
+
+ out:
+ /* give the buffer back to the card */
+ card->rx_buffer[4*descnr] = 0x80000000;
+ trigger_receive(card);
+ }
+
+ leave("investigate_read_descriptor");
+
+}
+
+
+/* Bufferoffset is in BYTES */
+static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset)
+{
+ int status;
+
+ enter("investigate_write_descriptor");
+
+ status = card->tx_buffer[4*descnr];
+#if 0
+ if (status & 0x8000) { /* Major error */
+ printk(KERN_ERR "Major transmit error status %x \n", status);
+ card->tx_buffer[4*descnr] = 0;
+ netif_wake_queue (dev);
+ }
+#endif
+ if (status > 0) { /* bit 31 is 0 when done */
+ if (card->tx_skb[descnr]!=NULL) {
+ card->stats.tx_bytes += card->tx_skb[descnr]->len;
+ dev_kfree_skb_irq(card->tx_skb[descnr]);
+ }
+ card->tx_skb[descnr] = NULL;
+ /* Bit 8 in the status field is 1 if there was a collision */
+ if (status&(1<<8))
+ card->stats.collisions++;
+ card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
+ netif_wake_queue (dev);
+ card->stats.tx_packets++;
+ }
+
+ leave("investigate_write_descriptor");
+
+}
+
+
+static int __init xircom_init(void)
+{
+ pci_register_driver(&xircom_ops);
+ return 0;
+}
+
+static void __exit xircom_exit(void)
+{
+ pci_unregister_driver(&xircom_ops);
+}
+
+module_init(xircom_init)
+module_exit(xircom_exit)
+
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
new file mode 100644
index 000000000000..32ccb26890c3
--- /dev/null
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -0,0 +1,1748 @@
+/* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
+/*
+ Written/copyright 1994-1999 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ -----------------------------------------------------------
+
+ Linux kernel-specific changes:
+
+ LK1.0 (Ion Badulescu)
+ - Major cleanup
+ - Use 2.4 PCI API
+ - Support ethtool
+ - Rewrite perfect filter/hash code
+ - Use interrupts for media changes
+
+ LK1.1 (Ion Badulescu)
+ - Disallow negotiation of unsupported full-duplex modes
+*/
+
+#define DRV_NAME "xircom_tulip_cb"
+#define DRV_VERSION "0.91+LK1.1"
+#define DRV_RELDATE "October 11, 2001"
+
+#define CARDBUS 1
+
+/* A few user-configurable values. */
+
+#define xircom_debug debug
+#ifdef XIRCOM_DEBUG
+static int xircom_debug = XIRCOM_DEBUG;
+#else
+static int xircom_debug = 1;
+#endif
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 25;
+
+#define MAX_UNITS 4
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 32
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#ifdef __alpha__
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+ Set the bus performance register.
+ Typical: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 No alignment 0x00000000 unlimited 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Warning: many older 486 systems are broken and require setting 0x00A04800
+ 8 longword cache alignment, 8 longword burst.
+ ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__powerpc__)
+static int csr0 = 0x01B00000 | 0x8000;
+#elif defined(__sparc__)
+static int csr0 = 0x01B00080 | 0x8000;
+#elif defined(__i386__)
+static int csr0 = 0x01A00000 | 0x8000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4 * HZ)
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+#define PKT_SETUP_SZ 192 /* Size of the setup frame */
+
+/* PCI registers */
+#define PCI_POWERMGMT 0x40
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+
+#include <asm/io.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/uaccess.h>
+
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
+KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(debug, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(csr0, int, 0);
+
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+
+#define RUN_AT(x) (jiffies + (x))
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver was forked from the driver for the DECchip "Tulip",
+Digital's single-chip ethernet controllers for PCI. It supports Xircom's
+almost-Tulip-compatible CBE-100 CardBus adapters.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
+This driver uses statically allocated rings of Rx and Tx descriptors, set at
+compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
+for the Rx ring buffers at open() time and passes the skb->data field to the
+Xircom as receive data buffers. When an incoming frame is less than
+RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
+copied to the new skbuff. When the incoming frame is larger, the skbuff is
+passed directly up the protocol stack and replaced by a newly allocated
+skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data. A subtle aspect of this
+choice is that the Xircom only receives into longword aligned buffers, thus
+the IP header at offset 14 isn't longword aligned for further processing.
+Copied frames are put into the new skbuff at an offset of "+2", thus copying
+has the beneficial effect of aligning the IP header and preloading the
+cache.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'tp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
+http://www.national.com/pf/DP/DP83840A.html
+
+IVc. Errata
+
+*/
+
+/* A full-duplex map for media types. */
+enum MediaIs {
+ MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
+ MediaIs100=16};
+static const char media_cap[] =
+{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
+
+/* Offsets to the Command and Status Registers, "CSRs". All accesses
+ must be longword instructions and quadword aligned. */
+enum xircom_offsets {
+ CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
+ CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
+ CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+ LinkChange=0x08000000,
+ NormalIntr=0x10000, NormalIntrMask=0x00014045,
+ AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
+ ReservedIntrMask=0xe0001a18,
+ EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
+ EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
+ TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
+};
+
+enum csr0_control_bits {
+ EnableMWI=0x01000000, EnableMRL=0x00800000,
+ EnableMRM=0x00200000, EqualBusPrio=0x02,
+ SoftwareReset=0x01,
+};
+
+enum csr6_control_bits {
+ ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
+ HashFilterBit=0x01, FullDuplexBit=0x0200,
+ TxThresh10=0x400000, TxStoreForw=0x200000,
+ TxThreshMask=0xc000, TxThreshShift=14,
+ EnableTx=0x2000, EnableRx=0x02,
+ ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
+ EnableTxRx=(EnableTx | EnableRx),
+};
+
+
+enum tbl_flag {
+ HAS_MII=1, HAS_ACPI=2,
+};
+static struct xircom_chip_table {
+ char *chip_name;
+ int valid_intrs; /* CSR7 interrupt enable settings */
+ int flags;
+} xircom_tbl[] = {
+ { "Xircom Cardbus Adapter",
+ LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
+ RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
+ HAS_MII | HAS_ACPI, },
+ { NULL, },
+};
+/* This matches the table above. */
+enum chips {
+ X3201_3,
+};
+
+
+/* The Xircom Rx and Tx buffer descriptors. */
+struct xircom_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2;
+};
+
+struct xircom_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2; /* We use only buffer 1. */
+};
+
+enum tx_desc0_status_bits {
+ Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
+ Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
+};
+enum tx_desc1_status_bits {
+ Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
+ Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
+ Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
+ Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
+};
+enum rx_desc0_status_bits {
+ Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
+ Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
+ Rx0HugeFrame=0x80, Rx0CRCError=0x02,
+ Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
+};
+enum rx_desc1_status_bits {
+ Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
+};
+
+struct xircom_private {
+ struct xircom_rx_desc rx_ring[RX_RING_SIZE];
+ struct xircom_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+#ifdef CARDBUS
+ /* The X3201-3 requires 4-byte aligned tx bufs */
+ struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
+#endif
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
+ int chip_id;
+ struct net_device_stats stats;
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int speed100:1;
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int autoneg:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int open:1;
+ unsigned int csr0; /* CSR0 setting. */
+ unsigned int csr6; /* Current CSR6 control settings. */
+ u16 to_advertise; /* NWay capabilities advertised. */
+ u16 advertising[4];
+ signed char phys[4], mii_cnt; /* MII device addresses. */
+ int saved_if_port;
+ struct pci_dev *pdev;
+ spinlock_t lock;
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static void xircom_up(struct net_device *dev);
+static void xircom_down(struct net_device *dev);
+static int xircom_open(struct net_device *dev);
+static void xircom_tx_timeout(struct net_device *dev);
+static void xircom_init_ring(struct net_device *dev);
+static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int xircom_rx(struct net_device *dev);
+static void xircom_media_change(struct net_device *dev);
+static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int xircom_close(struct net_device *dev);
+static struct net_device_stats *xircom_get_stats(struct net_device *dev);
+static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static struct ethtool_ops ops;
+
+
+/* The Xircom cards are picky about when certain bits in CSR6 can be
+ manipulated. Keith Owens <kaos@ocs.com.au>. */
+static void outl_CSR6(u32 newcsr6, long ioaddr)
+{
+ const int strict_bits =
+ TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
+ int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ /* mask out the reserved bits that always read 0 on the Xircom cards */
+ newcsr6 &= ~ReservedZeroMask;
+ /* or in the reserved bits that always read 1 */
+ newcsr6 |= ReservedOneMask;
+ currcsr6 = inl(ioaddr + CSR6);
+ if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
+ ((currcsr6 & ~EnableTxRx) == 0)) {
+ outl(newcsr6, ioaddr + CSR6); /* safe */
+ restore_flags(flags);
+ return;
+ }
+ /* make sure the transmitter and receiver are stopped first */
+ currcsr6 &= ~EnableTxRx;
+ while (1) {
+ csr5 = inl(ioaddr + CSR5);
+ if (csr5 == 0xffffffff)
+ break; /* cannot read csr5, card removed? */
+ csr5_22_20 = csr5 & 0x700000;
+ csr5_19_17 = csr5 & 0x0e0000;
+ if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
+ (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
+ break; /* both are stopped or suspended */
+ if (!--attempts) {
+ printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
+ "csr5=0x%08x\n", csr5);
+ outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
+ restore_flags(flags);
+ return;
+ }
+ outl(currcsr6, ioaddr + CSR6);
+ udelay(1);
+ }
+ /* now it is safe to change csr6 */
+ outl(newcsr6, ioaddr + CSR6);
+ restore_flags(flags);
+}
+
+
+static void __devinit read_mac_address(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ int i, j;
+ unsigned char tuple, link, data_id, data_count;
+
+ /* Xircom has its address stored in the CIS;
+ * we access it through the boot rom interface for now
+ * this might not work, as the CIS is not parsed but I
+ * (danilo) use the offset I found on my card's CIS !!!
+ *
+ * Doug Ledford: I changed this routine around so that it
+ * walks the CIS memory space, parsing the config items, and
+ * finds the proper lan_node_id tuple and uses the data
+ * stored there.
+ */
+ outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
+ for (i = 0x100; i < 0x1f7; i += link+2) {
+ outl(i, ioaddr + CSR10);
+ tuple = inl(ioaddr + CSR9) & 0xff;
+ outl(i + 1, ioaddr + CSR10);
+ link = inl(ioaddr + CSR9) & 0xff;
+ outl(i + 2, ioaddr + CSR10);
+ data_id = inl(ioaddr + CSR9) & 0xff;
+ outl(i + 3, ioaddr + CSR10);
+ data_count = inl(ioaddr + CSR9) & 0xff;
+ if ( (tuple == 0x22) &&
+ (data_id == 0x04) && (data_count == 0x06) ) {
+ /*
+ * This is it. We have the data we want.
+ */
+ for (j = 0; j < 6; j++) {
+ outl(i + j + 4, ioaddr + CSR10);
+ dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
+ }
+ break;
+ } else if (link == 0) {
+ break;
+ }
+ }
+}
+
+
+/*
+ * locate the MII interfaces and initialize them.
+ * we disable full-duplex modes here,
+ * because we don't know how to handle them.
+ */
+static void find_mii_transceivers(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ int phy, phy_idx;
+
+ if (media_cap[tp->default_port] & MediaIsMII) {
+ u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+ tp->to_advertise = media2advert[tp->default_port - 9];
+ } else
+ tp->to_advertise =
+ /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
+ /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later,
+ but takes much time. */
+ for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
+ ((mii_status & BMSR_100BASE4) == 0 &&
+ (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
+ int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
+ int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
+ int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
+ tp->phys[phy_idx] = phy;
+ tp->advertising[phy_idx++] = reg4;
+ printk(KERN_INFO "%s: MII transceiver #%d "
+ "config %4.4x status %4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_reg0, mii_status, mii_advert);
+ }
+ }
+ tp->mii_cnt = phy_idx;
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
+ dev->name);
+ tp->phys[0] = 0;
+ }
+}
+
+
+/*
+ * To quote Arjan van de Ven:
+ * transceiver_voodoo() enables the external UTP plug thingy.
+ * it's called voodoo as I stole this code and cannot cross-reference
+ * it with the specification.
+ * Actually it seems to go like this:
+ * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
+ * so any prior MII settings are lost.
+ * - GPIO0 enables the TP port so the MII can talk to the network.
+ * - a software reset will reset both GPIO pins.
+ * I also moved the software reset here, because doing it in xircom_up()
+ * required enabling the GPIO pins each time, which reset the MII each time.
+ * Thus we couldn't control the MII -- which sucks because we don't know
+ * how to handle full-duplex modes so we *must* disable them.
+ */
+static void transceiver_voodoo(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ outl(SoftwareReset, ioaddr + CSR0);
+ udelay(2);
+
+ /* Deassert reset. */
+ outl(tp->csr0, ioaddr + CSR0);
+
+ /* Reset the xcvr interface and turn on heartbeat. */
+ outl(0x0008, ioaddr + CSR15);
+ udelay(5); /* The delays are Xircom-recommended to give the
+ * chipset time to reset the actual hardware
+ * on the PCMCIA card
+ */
+ outl(0xa8050000, ioaddr + CSR15);
+ udelay(5);
+ outl(0xa00f0000, ioaddr + CSR15);
+ udelay(5);
+
+ outl_CSR6(0, ioaddr);
+ //outl_CSR6(FullDuplexBit, ioaddr);
+}
+
+
+static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct xircom_private *tp;
+ static int board_idx = -1;
+ int chip_idx = id->driver_data;
+ long ioaddr;
+ int i;
+ u8 chip_rev;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
+
+ board_idx++;
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ pci_set_master(pdev);
+
+ ioaddr = pci_resource_start(pdev, 0);
+ dev = alloc_etherdev(sizeof(*tp));
+ if (!dev) {
+ printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
+ return -ENOMEM;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ dev->base_addr = ioaddr;
+ dev->irq = pdev->irq;
+
+ if (pci_request_regions(pdev, dev->name)) {
+ printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
+ goto err_out_free_netdev;
+ }
+
+ /* Bring the chip out of sleep mode.
+ Caution: Snooze mode does not work with some boards! */
+ if (xircom_tbl[chip_idx].flags & HAS_ACPI)
+ pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
+
+ /* Stop the chip's Tx and Rx processes. */
+ outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
+ /* Clear the missed-packet counter. */
+ (volatile int)inl(ioaddr + CSR8);
+
+ tp = netdev_priv(dev);
+
+ spin_lock_init(&tp->lock);
+ tp->pdev = pdev;
+ tp->chip_id = chip_idx;
+ /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
+ /* XXX: is this necessary for Xircom? */
+ tp->csr0 = csr0 & ~EnableMWI;
+
+ pci_set_drvdata(pdev, dev);
+
+ /* The lower four bits are the media type. */
+ if (board_idx >= 0 && board_idx < MAX_UNITS) {
+ tp->default_port = options[board_idx] & 15;
+ if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
+ tp->full_duplex = 1;
+ if (mtu[board_idx] > 0)
+ dev->mtu = mtu[board_idx];
+ }
+ if (dev->mem_start)
+ tp->default_port = dev->mem_start;
+ if (tp->default_port) {
+ if (media_cap[tp->default_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ }
+ if (tp->full_duplex)
+ tp->autoneg = 0;
+ else
+ tp->autoneg = 1;
+ tp->speed100 = 1;
+
+ /* The Xircom-specific entries in the device structure. */
+ dev->open = &xircom_open;
+ dev->hard_start_xmit = &xircom_start_xmit;
+ dev->stop = &xircom_close;
+ dev->get_stats = &xircom_get_stats;
+ dev->do_ioctl = &xircom_ioctl;
+#ifdef HAVE_MULTICAST
+ dev->set_multicast_list = &set_rx_mode;
+#endif
+ dev->tx_timeout = xircom_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ SET_ETHTOOL_OPS(dev, &ops);
+
+ transceiver_voodoo(dev);
+
+ read_mac_address(dev);
+
+ if (register_netdev(dev))
+ goto err_out_cleardev;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
+ printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+ dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
+ printk(", IRQ %d.\n", dev->irq);
+
+ if (xircom_tbl[chip_idx].flags & HAS_MII) {
+ find_mii_transceivers(dev);
+ check_duplex(dev);
+ }
+
+ return 0;
+
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+err_out_free_netdev:
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() inl(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+ MDIO protocol. It is just different enough from the EEPROM protocol
+ to not share code. The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK 0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN 0x40000
+#define MDIO_DATA_READ 0x80000
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int retval = 0;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return (retval>>1) & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ int i;
+ int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+
+ /* Establish sync by sending 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return;
+}
+
+
+static void
+xircom_up(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+
+ xircom_init_ring(dev);
+ /* Clear the tx ring */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_skbuff[i] = NULL;
+ tp->tx_ring[i].status = 0;
+ }
+
+ if (xircom_debug > 1)
+ printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+ outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+
+ tp->saved_if_port = dev->if_port;
+ if (dev->if_port == 0)
+ dev->if_port = tp->default_port;
+
+ tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
+
+ set_rx_mode(dev);
+
+ /* Start the chip's Tx to process setup frame. */
+ outl_CSR6(tp->csr6, ioaddr);
+ outl_CSR6(tp->csr6 | EnableTx, ioaddr);
+
+ /* Acknowledge all outstanding interrupts sources */
+ outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+ /* Enable interrupts by setting the interrupt mask. */
+ outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ /* Enable Rx */
+ outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
+ /* Rx poll demand */
+ outl(0, ioaddr + CSR2);
+
+ /* Tell the net layer we're ready */
+ netif_start_queue (dev);
+
+ /* Check current media state */
+ xircom_media_change(dev);
+
+ if (xircom_debug > 2) {
+ printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
+ dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
+ inl(ioaddr + CSR6));
+ }
+}
+
+
+static int
+xircom_open(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+
+ if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
+ return -EAGAIN;
+
+ xircom_up(dev);
+ tp->open = 1;
+
+ return 0;
+}
+
+
+static void xircom_tx_timeout(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ /* Do nothing -- the media monitor should handle this. */
+ if (xircom_debug > 1)
+ printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
+ dev->name);
+ }
+
+#if defined(way_too_many_messages)
+ if (xircom_debug > 3) {
+ int i;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+ int j;
+ printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
+ "%2.2x %2.2x %2.2x.\n",
+ i, (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
+ for (j = 0; buf[j] != 0xee && j < 1600; j++)
+ if (j < 100) printk(" %2.2x", buf[j]);
+ printk(" j=%d.\n", j);
+ }
+ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
+ printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Stop and restart the chip's Tx/Rx processes . */
+ outl_CSR6(tp->csr6 | EnableRx, ioaddr);
+ outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+ tp->stats.tx_errors++;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void xircom_init_ring(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ int i;
+
+ tp->tx_full = 0;
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0;
+ tp->rx_ring[i].length = PKT_BUF_SZ;
+ tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
+ tp->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
+ tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+ tp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
+ tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
+ }
+ tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_skbuff[i] = NULL;
+ tp->tx_ring[i].status = 0;
+ tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
+#ifdef CARDBUS
+ if (tp->chip_id == X3201_3)
+ tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
+#endif /* CARDBUS */
+ }
+ tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
+}
+
+
+static int
+xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ int entry;
+ u32 flag;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+
+ tp->tx_skbuff[entry] = skb;
+#ifdef CARDBUS
+ if (tp->chip_id == X3201_3) {
+ memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
+ tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
+ } else
+#endif
+ tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
+
+ if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+ flag = Tx1WholePkt; /* No interrupt */
+ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
+ flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
+ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
+ flag = Tx1WholePkt; /* No Tx-done intr. */
+ } else {
+ /* Leave room for set_rx_mode() to fill entries. */
+ flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
+ tp->tx_full = 1;
+ }
+ if (entry == TX_RING_SIZE - 1)
+ flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
+
+ tp->tx_ring[entry].length = skb->len | flag;
+ tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
+ tp->cur_tx++;
+ if (tp->tx_full)
+ netif_stop_queue (dev);
+ else
+ netif_wake_queue (dev);
+
+ /* Trigger an immediate transmit demand. */
+ outl(0, dev->base_addr + CSR1);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+
+static void xircom_media_change(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ u16 reg0, reg1, reg4, reg5;
+ u32 csr6 = inl(ioaddr + CSR6), newcsr6;
+
+ /* reset status first */
+ mdio_read(dev, tp->phys[0], MII_BMCR);
+ mdio_read(dev, tp->phys[0], MII_BMSR);
+
+ reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
+ reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
+
+ if (reg1 & BMSR_LSTATUS) {
+ /* link is up */
+ if (reg0 & BMCR_ANENABLE) {
+ /* autonegotiation is enabled */
+ reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
+ reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
+ if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
+ tp->speed100 = 1;
+ tp->full_duplex = 1;
+ } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
+ tp->speed100 = 1;
+ tp->full_duplex = 0;
+ } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
+ tp->speed100 = 0;
+ tp->full_duplex = 1;
+ } else {
+ tp->speed100 = 0;
+ tp->full_duplex = 0;
+ }
+ } else {
+ /* autonegotiation is disabled */
+ if (reg0 & BMCR_SPEED100)
+ tp->speed100 = 1;
+ else
+ tp->speed100 = 0;
+ if (reg0 & BMCR_FULLDPLX)
+ tp->full_duplex = 1;
+ else
+ tp->full_duplex = 0;
+ }
+ printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
+ dev->name,
+ tp->speed100 ? "100" : "10",
+ tp->full_duplex ? "full" : "half");
+ netif_carrier_on(dev);
+ newcsr6 = csr6 & ~FullDuplexBit;
+ if (tp->full_duplex)
+ newcsr6 |= FullDuplexBit;
+ if (newcsr6 != csr6)
+ outl_CSR6(newcsr6, ioaddr + CSR6);
+ } else {
+ printk(KERN_DEBUG "%s: Link is down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+}
+
+
+static void check_duplex(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ u16 reg0;
+
+ mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
+ udelay(500);
+ while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
+
+ reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
+ mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
+
+ if (tp->autoneg) {
+ reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
+ reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
+ } else {
+ reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+ if (tp->speed100)
+ reg0 |= BMCR_SPEED100;
+ if (tp->full_duplex)
+ reg0 |= BMCR_FULLDPLX;
+ printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
+ dev->name,
+ tp->speed100 ? "100" : "10",
+ tp->full_duplex ? "full" : "half");
+ }
+ mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct xircom_private *tp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int csr5, work_budget = max_interrupt_work;
+ int handled = 0;
+
+ spin_lock (&tp->lock);
+
+ do {
+ csr5 = inl(ioaddr + CSR5);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+ if (xircom_debug > 4)
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ dev->name, csr5, inl(dev->base_addr + CSR5));
+
+ if (csr5 == 0xffffffff)
+ break; /* all bits set, assume PCMCIA card removed */
+
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+ break;
+
+ handled = 1;
+
+ if (csr5 & (RxIntr | RxNoBuf))
+ work_budget -= xircom_rx(dev);
+
+ if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = tp->tx_ring[entry].status;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_skbuff[entry] == NULL)
+ continue;
+
+ if (status & Tx0DescError) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (xircom_debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+#endif
+ tp->stats.tx_errors++;
+ if (status & Tx0ManyColl) {
+ tp->stats.tx_aborted_errors++;
+ }
+ if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
+ if (status & Tx0LateColl) tp->stats.tx_window_errors++;
+ if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
+ } else {
+ tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
+ tp->stats.collisions += (status >> 3) & 15;
+ tp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_skbuff[entry]);
+ tp->tx_skbuff[entry] = NULL;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (tp->tx_full &&
+ tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+ /* The ring is no longer full */
+ tp->tx_full = 0;
+
+ if (tp->tx_full)
+ netif_stop_queue (dev);
+ else
+ netif_wake_queue (dev);
+
+ tp->dirty_tx = dirty_tx;
+ if (csr5 & TxDied) {
+ if (xircom_debug > 2)
+ printk(KERN_WARNING "%s: The transmitter stopped."
+ " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+ dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+ outl_CSR6(tp->csr6 | EnableRx, ioaddr);
+ outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
+ }
+ }
+
+ /* Log errors. */
+ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
+ if (csr5 & LinkChange)
+ xircom_media_change(dev);
+ if (csr5 & TxFIFOUnderflow) {
+ if ((tp->csr6 & TxThreshMask) != TxThreshMask)
+ tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
+ else
+ tp->csr6 |= TxStoreForw; /* Store-n-forward. */
+ /* Restart the transmit process. */
+ outl_CSR6(tp->csr6 | EnableRx, ioaddr);
+ outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
+ }
+ if (csr5 & RxDied) { /* Missed a Rx frame. */
+ tp->stats.rx_errors++;
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+ outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
+ }
+ /* Clear all error sources, included undocumented ones! */
+ outl(0x0800f7ba, ioaddr + CSR5);
+ }
+ if (--work_budget < 0) {
+ if (xircom_debug > 1)
+ printk(KERN_WARNING "%s: Too much work during an interrupt, "
+ "csr5=0x%8.8x.\n", dev->name, csr5);
+ /* Acknowledge all interrupt sources. */
+ outl(0x8001ffff, ioaddr + CSR5);
+ break;
+ }
+ } while (1);
+
+ if (xircom_debug > 3)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+ dev->name, inl(ioaddr + CSR5));
+
+ spin_unlock (&tp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static int
+xircom_rx(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+ int work_done = 0;
+
+ if (xircom_debug > 4)
+ printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (tp->rx_ring[entry].status >= 0) {
+ s32 status = tp->rx_ring[entry].status;
+
+ if (xircom_debug > 5)
+ printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+ if (--rx_work_limit < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ignore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (xircom_debug > 1)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ tp->stats.rx_length_errors++;
+ }
+ } else if (status & Rx0DescError) {
+ /* There was a fatal error. */
+ if (xircom_debug > 2)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+ if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
+ if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
+ }
+ } else {
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (pkt_len > 1518) {
+ printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+ dev->name, pkt_len, pkt_len);
+ pkt_len = 1518;
+ tp->stats.rx_length_errors++;
+ }
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if ! defined(__alpha__)
+ eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
+ pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
+#endif
+ work_done++;
+ } else { /* Pass up the skb already on the Rx ring. */
+ skb_put(skb = tp->rx_skbuff[entry], pkt_len);
+ tp->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ tp->stats.rx_packets++;
+ tp->stats.rx_bytes += pkt_len;
+ }
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
+ work_done++;
+ }
+ tp->rx_ring[entry].status = Rx0DescOwned;
+ }
+
+ return work_done;
+}
+
+
+static void
+xircom_down(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct xircom_private *tp = netdev_priv(dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0, ioaddr + CSR7);
+ /* Stop the chip's Tx and Rx processes. */
+ outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
+
+ if (inl(ioaddr + CSR6) != 0xffffffff)
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+ dev->if_port = tp->saved_if_port;
+}
+
+
+static int
+xircom_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct xircom_private *tp = netdev_priv(dev);
+ int i;
+
+ if (xircom_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inl(ioaddr + CSR5));
+
+ netif_stop_queue(dev);
+
+ if (netif_device_present(dev))
+ xircom_down(dev);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_skbuff[i];
+ tp->rx_skbuff[i] = NULL;
+ tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
+ tp->rx_ring[i].length = 0;
+ tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+ dev_kfree_skb(skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (tp->tx_skbuff[i])
+ dev_kfree_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = NULL;
+ }
+
+ tp->open = 0;
+ return 0;
+}
+
+
+static struct net_device_stats *xircom_get_stats(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ if (netif_device_present(dev))
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+ return &tp->stats;
+}
+
+static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ ecmd->supported =
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII;
+
+ ecmd->advertising = ADVERTISED_MII;
+ if (tp->advertising[0] & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (tp->advertising[0] & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (tp->advertising[0] & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (tp->advertising[0] & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (tp->autoneg) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ } else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->port = PORT_MII;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->phy_address = tp->phys[0];
+ ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
+ ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->maxtxpkt = TX_RING_SIZE / 2;
+ ecmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ u16 autoneg, speed100, full_duplex;
+
+ autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
+ speed100 = (ecmd->speed == SPEED_100);
+ full_duplex = (ecmd->duplex == DUPLEX_FULL);
+
+ tp->autoneg = autoneg;
+ if (speed100 != tp->speed100 ||
+ full_duplex != tp->full_duplex) {
+ tp->speed100 = speed100;
+ tp->full_duplex = full_duplex;
+ /* change advertising bits */
+ tp->advertising[0] &= ~(ADVERTISE_10HALF |
+ ADVERTISE_10FULL |
+ ADVERTISE_100HALF |
+ ADVERTISE_100FULL |
+ ADVERTISE_100BASE4);
+ if (speed100) {
+ if (full_duplex)
+ tp->advertising[0] |= ADVERTISE_100FULL;
+ else
+ tp->advertising[0] |= ADVERTISE_100HALF;
+ } else {
+ if (full_duplex)
+ tp->advertising[0] |= ADVERTISE_10FULL;
+ else
+ tp->advertising[0] |= ADVERTISE_10HALF;
+ }
+ }
+ check_duplex(dev);
+ return 0;
+}
+
+static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(tp->pdev));
+}
+
+static struct ethtool_ops ops = {
+ .get_settings = xircom_get_settings,
+ .set_settings = xircom_set_settings,
+ .get_drvinfo = xircom_get_drvinfo,
+};
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ u16 *data = (u16 *)&rq->ifr_ifru;
+ int phy = tp->phys[0] & 0x1f;
+ unsigned long flags;
+
+ switch(cmd) {
+ /* Legacy mii-diag interface */
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ if (tp->mii_cnt)
+ data[0] = phy;
+ else
+ return -ENODEV;
+ return 0;
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ save_flags(flags);
+ cli();
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ restore_flags(flags);
+ return 0;
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ save_flags(flags);
+ cli();
+ if (data[0] == tp->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ if (value & (BMCR_RESET | BMCR_ANENABLE))
+ /* Autonegotiation. */
+ tp->autoneg = 1;
+ else {
+ tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
+ tp->autoneg = 0;
+ }
+ break;
+ case 4:
+ tp->advertising[0] = value;
+ break;
+ }
+ check_duplex(dev);
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ restore_flags(flags);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+static void set_rx_mode(struct net_device *dev)
+{
+ struct xircom_private *tp = netdev_priv(dev);
+ struct dev_mc_list *mclist;
+ long ioaddr = dev->base_addr;
+ int csr6 = inl(ioaddr + CSR6);
+ u16 *eaddrs, *setup_frm;
+ u32 tx_flags;
+ int i;
+
+ tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
+ csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ tp->csr6 |= PromiscBit;
+ csr6 |= PromiscBit;
+ goto out;
+ }
+
+ if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ tp->csr6 |= AllMultiBit;
+ csr6 |= AllMultiBit;
+ goto out;
+ }
+
+ tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
+
+ /* Note that only the low-address shortword of setup_frame is valid! */
+ setup_frm = tp->setup_frame;
+ mclist = dev->mc_list;
+
+ /* Fill the first entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
+ *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
+ *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
+
+ if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
+ u32 hash, hash2;
+
+ tx_flags |= Tx1HashSetup;
+ tp->csr6 |= HashFilterBit;
+ csr6 |= HashFilterBit;
+
+ /* Fill the unused 3 entries with the broadcast address.
+ At least one entry *must* contain the broadcast address!!!*/
+ for (i = 0; i < 3; i++) {
+ *setup_frm = 0xffff; setup_frm += 2;
+ *setup_frm = 0xffff; setup_frm += 2;
+ *setup_frm = 0xffff; setup_frm += 2;
+ }
+
+ /* Truly brain-damaged hash filter layout */
+ /* XXX: not sure if I should take the last or the first 9 bits */
+ for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
+ u32 *hptr;
+ hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ if (hash < 384) {
+ hash2 = hash + ((hash >> 4) << 4) +
+ ((hash >> 5) << 5);
+ } else {
+ hash -= 384;
+ hash2 = 64 + hash + (hash >> 4) * 80;
+ }
+ hptr = &hash_table[hash2 & ~0x1f];
+ *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
+ }
+ } else {
+ /* We have <= 14 mcast addresses so we can use Xircom's
+ wonderful 16-address perfect filter. */
+ for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
+ *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
+ *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
+ }
+ /* Fill the unused entries with the broadcast address.
+ At least one entry *must* contain the broadcast address!!!*/
+ for (; i < 15; i++) {
+ *setup_frm = 0xffff; setup_frm += 2;
+ *setup_frm = 0xffff; setup_frm += 2;
+ *setup_frm = 0xffff; setup_frm += 2;
+ }
+ }
+
+ /* Now add this frame to the Tx list. */
+ if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+ /* Same setup recently queued, we need not add it. */
+ /* XXX: Huh? All it means is that the Tx list is full...*/
+ } else {
+ unsigned long flags;
+ unsigned int entry;
+ int dummy = -1;
+
+ save_flags(flags); cli();
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ if (entry != 0) {
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ tp->tx_skbuff[entry] = NULL;
+ tp->tx_ring[entry].length =
+ (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
+ tp->tx_ring[entry].buffer1 = 0;
+ /* race with chip, set Tx0DescOwned later */
+ dummy = entry;
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+ }
+
+ tp->tx_skbuff[entry] = NULL;
+ /* Put the setup frame on the Tx list. */
+ if (entry == TX_RING_SIZE - 1)
+ tx_flags |= Tx1RingWrap; /* Wrap ring. */
+ tp->tx_ring[entry].length = tx_flags;
+ tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
+ tp->tx_ring[entry].status = Tx0DescOwned;
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
+ tp->tx_full = 1;
+ netif_stop_queue (dev);
+ }
+ if (dummy >= 0)
+ tp->tx_ring[dummy].status = Tx0DescOwned;
+ restore_flags(flags);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+ }
+
+out:
+ outl_CSR6(csr6, ioaddr);
+}
+
+
+static struct pci_device_id xircom_pci_table[] = {
+ { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
+ {0},
+};
+MODULE_DEVICE_TABLE(pci, xircom_pci_table);
+
+
+#ifdef CONFIG_PM
+static int xircom_suspend(struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct xircom_private *tp = netdev_priv(dev);
+ printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
+ if (tp->open)
+ xircom_down(dev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, 3);
+
+ return 0;
+}
+
+
+static int xircom_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct xircom_private *tp = netdev_priv(dev);
+ printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
+
+ pci_set_power_state(pdev,0);
+ pci_enable_device(pdev);
+ pci_restore_state(pdev);
+
+ /* Bring the chip out of sleep mode.
+ Caution: Snooze mode does not work with some boards! */
+ if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
+ pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
+
+ transceiver_voodoo(dev);
+ if (xircom_tbl[tp->chip_id].flags & HAS_MII)
+ check_duplex(dev);
+
+ if (tp->open)
+ xircom_up(dev);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+
+static void __devexit xircom_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
+ unregister_netdev(dev);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+
+static struct pci_driver xircom_driver = {
+ .name = DRV_NAME,
+ .id_table = xircom_pci_table,
+ .probe = xircom_init_one,
+ .remove = __devexit_p(xircom_remove_one),
+#ifdef CONFIG_PM
+ .suspend = xircom_suspend,
+ .resume = xircom_resume
+#endif /* CONFIG_PM */
+};
+
+
+static int __init xircom_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init(&xircom_driver);
+}
+
+
+static void __exit xircom_exit(void)
+{
+ pci_unregister_driver(&xircom_driver);
+}
+
+module_init(xircom_init)
+module_exit(xircom_exit)
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
new file mode 100644
index 000000000000..7bfee366297b
--- /dev/null
+++ b/drivers/net/tun.c
@@ -0,0 +1,883 @@
+/*
+ * TUN - Universal TUN/TAP device driver.
+ * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
+ */
+
+/*
+ * Changes:
+ *
+ * Mark Smith <markzzzsmith@yahoo.com.au>
+ * Use random_ether_addr() for tap MAC address.
+ *
+ * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
+ * Fixes in packet dropping, queue length setting and queue wakeup.
+ * Increased default tx queue length.
+ * Added ethtool API.
+ * Minor cleanups
+ *
+ * Daniel Podlejski <underley@underley.eu.org>
+ * Modifications for 2.3.99-pre5 kernel.
+ */
+
+#define DRV_NAME "tun"
+#define DRV_VERSION "1.6"
+#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
+#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/miscdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_tun.h>
+#include <linux/crc32.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#ifdef TUN_DEBUG
+static int debug;
+#endif
+
+/* Network device part of the driver */
+
+static LIST_HEAD(tun_dev_list);
+static struct ethtool_ops tun_ethtool_ops;
+
+/* Net device open. */
+static int tun_net_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* Net device close. */
+static int tun_net_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/* Net device start xmit */
+static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
+
+ /* Drop packet if interface is not attached */
+ if (!tun->attached)
+ goto drop;
+
+ /* Packet dropping */
+ if (skb_queue_len(&tun->readq) >= dev->tx_queue_len) {
+ if (!(tun->flags & TUN_ONE_QUEUE)) {
+ /* Normal queueing mode. */
+ /* Packet scheduler handles dropping of further packets. */
+ netif_stop_queue(dev);
+
+ /* We won't see all dropped packets individually, so overrun
+ * error is more appropriate. */
+ tun->stats.tx_fifo_errors++;
+ } else {
+ /* Single queue mode.
+ * Driver handles dropping of all packets itself. */
+ goto drop;
+ }
+ }
+
+ /* Queue packet */
+ skb_queue_tail(&tun->readq, skb);
+ dev->trans_start = jiffies;
+
+ /* Notify and wake up reader process */
+ if (tun->flags & TUN_FASYNC)
+ kill_fasync(&tun->fasync, SIGIO, POLL_IN);
+ wake_up_interruptible(&tun->read_wait);
+ return 0;
+
+drop:
+ tun->stats.tx_dropped++;
+ kfree_skb(skb);
+ return 0;
+}
+
+/** Add the specified Ethernet address to this multicast filter. */
+static void
+add_multi(u32* filter, const u8* addr)
+{
+ int bit_nr = ether_crc(ETH_ALEN, addr) >> 26;
+ filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+}
+
+/** Remove the specified Ethernet addres from this multicast filter. */
+static void
+del_multi(u32* filter, const u8* addr)
+{
+ int bit_nr = ether_crc(ETH_ALEN, addr) >> 26;
+ filter[bit_nr >> 5] &= ~(1 << (bit_nr & 31));
+}
+
+/** Update the list of multicast groups to which the network device belongs.
+ * This list is used to filter packets being sent from the character device to
+ * the network device. */
+static void
+tun_net_mclist(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ const struct dev_mc_list *mclist;
+ int i;
+ DBG(KERN_DEBUG "%s: tun_net_mclist: mc_count %d\n",
+ dev->name, dev->mc_count);
+ memset(tun->chr_filter, 0, sizeof tun->chr_filter);
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count && mclist != NULL;
+ i++, mclist = mclist->next) {
+ add_multi(tun->net_filter, mclist->dmi_addr);
+ DBG(KERN_DEBUG "%s: tun_net_mclist: %x:%x:%x:%x:%x:%x\n",
+ dev->name,
+ mclist->dmi_addr[0], mclist->dmi_addr[1], mclist->dmi_addr[2],
+ mclist->dmi_addr[3], mclist->dmi_addr[4], mclist->dmi_addr[5]);
+ }
+}
+
+static struct net_device_stats *tun_net_stats(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ return &tun->stats;
+}
+
+/* Initialize net device. */
+static void tun_net_init(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ switch (tun->flags & TUN_TYPE_MASK) {
+ case TUN_TUN_DEV:
+ /* Point-to-Point TUN Device */
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = 1500;
+
+ /* Zero header length */
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
+ break;
+
+ case TUN_TAP_DEV:
+ /* Ethernet TAP Device */
+ dev->set_multicast_list = tun_net_mclist;
+
+ ether_setup(dev);
+ random_ether_addr(dev->dev_addr);
+ dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
+ break;
+ }
+}
+
+/* Character device part */
+
+/* Poll */
+static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
+{
+ struct tun_struct *tun = file->private_data;
+ unsigned int mask = POLLOUT | POLLWRNORM;
+
+ if (!tun)
+ return -EBADFD;
+
+ DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
+
+ poll_wait(file, &tun->read_wait, wait);
+
+ if (skb_queue_len(&tun->readq))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+/* Get packet from user space buffer */
+static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
+{
+ struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) };
+ struct sk_buff *skb;
+ size_t len = count, align = 0;
+
+ if (!(tun->flags & TUN_NO_PI)) {
+ if ((len -= sizeof(pi)) > count)
+ return -EINVAL;
+
+ if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
+ return -EFAULT;
+ }
+
+ if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV)
+ align = NET_IP_ALIGN;
+
+ if (!(skb = alloc_skb(len + align, GFP_KERNEL))) {
+ tun->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ if (align)
+ skb_reserve(skb, align);
+ if (memcpy_fromiovec(skb_put(skb, len), iv, len))
+ return -EFAULT;
+
+ skb->dev = tun->dev;
+ switch (tun->flags & TUN_TYPE_MASK) {
+ case TUN_TUN_DEV:
+ skb->mac.raw = skb->data;
+ skb->protocol = pi.proto;
+ break;
+ case TUN_TAP_DEV:
+ skb->protocol = eth_type_trans(skb, tun->dev);
+ break;
+ };
+
+ if (tun->flags & TUN_NOCHECKSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_rx_ni(skb);
+ tun->dev->last_rx = jiffies;
+
+ tun->stats.rx_packets++;
+ tun->stats.rx_bytes += len;
+
+ return count;
+}
+
+static inline size_t iov_total(const struct iovec *iv, unsigned long count)
+{
+ unsigned long i;
+ size_t len;
+
+ for (i = 0, len = 0; i < count; i++)
+ len += iv[i].iov_len;
+
+ return len;
+}
+
+/* Writev */
+static ssize_t tun_chr_writev(struct file * file, const struct iovec *iv,
+ unsigned long count, loff_t *pos)
+{
+ struct tun_struct *tun = file->private_data;
+
+ if (!tun)
+ return -EBADFD;
+
+ DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
+
+ return tun_get_user(tun, (struct iovec *) iv, iov_total(iv, count));
+}
+
+/* Write */
+static ssize_t tun_chr_write(struct file * file, const char __user * buf,
+ size_t count, loff_t *pos)
+{
+ struct iovec iv = { (void __user *) buf, count };
+ return tun_chr_writev(file, &iv, 1, pos);
+}
+
+/* Put packet to the user space buffer */
+static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
+ struct sk_buff *skb,
+ struct iovec *iv, int len)
+{
+ struct tun_pi pi = { 0, skb->protocol };
+ ssize_t total = 0;
+
+ if (!(tun->flags & TUN_NO_PI)) {
+ if ((len -= sizeof(pi)) < 0)
+ return -EINVAL;
+
+ if (len < skb->len) {
+ /* Packet will be striped */
+ pi.flags |= TUN_PKT_STRIP;
+ }
+
+ if (memcpy_toiovec(iv, (void *) &pi, sizeof(pi)))
+ return -EFAULT;
+ total += sizeof(pi);
+ }
+
+ len = min_t(int, skb->len, len);
+
+ skb_copy_datagram_iovec(skb, 0, iv, len);
+ total += len;
+
+ tun->stats.tx_packets++;
+ tun->stats.tx_bytes += len;
+
+ return total;
+}
+
+/* Readv */
+static ssize_t tun_chr_readv(struct file *file, const struct iovec *iv,
+ unsigned long count, loff_t *pos)
+{
+ struct tun_struct *tun = file->private_data;
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb;
+ ssize_t len, ret = 0;
+
+ if (!tun)
+ return -EBADFD;
+
+ DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
+
+ len = iov_total(iv, count);
+ if (len < 0)
+ return -EINVAL;
+
+ add_wait_queue(&tun->read_wait, &wait);
+ while (len) {
+ const u8 ones[ ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u8 addr[ ETH_ALEN];
+ int bit_nr;
+
+ current->state = TASK_INTERRUPTIBLE;
+
+ /* Read frames from the queue */
+ if (!(skb=skb_dequeue(&tun->readq))) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ /* Nothing to read, let's sleep */
+ schedule();
+ continue;
+ }
+ netif_wake_queue(tun->dev);
+
+ /** Decide whether to accept this packet. This code is designed to
+ * behave identically to an Ethernet interface. Accept the packet if
+ * - we are promiscuous.
+ * - the packet is addressed to us.
+ * - the packet is broadcast.
+ * - the packet is multicast and
+ * - we are multicast promiscous.
+ * - we belong to the multicast group.
+ */
+ memcpy(addr, skb->data,
+ min_t(size_t, sizeof addr, skb->len));
+ bit_nr = ether_crc(sizeof addr, addr) >> 26;
+ if ((tun->if_flags & IFF_PROMISC) ||
+ memcmp(addr, tun->dev_addr, sizeof addr) == 0 ||
+ memcmp(addr, ones, sizeof addr) == 0 ||
+ (((addr[0] == 1 && addr[1] == 0 && addr[2] == 0x5e) ||
+ (addr[0] == 0x33 && addr[1] == 0x33)) &&
+ ((tun->if_flags & IFF_ALLMULTI) ||
+ (tun->chr_filter[bit_nr >> 5] & (1 << (bit_nr & 31)))))) {
+ DBG(KERN_DEBUG "%s: tun_chr_readv: accepted: %x:%x:%x:%x:%x:%x\n",
+ tun->dev->name, addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+ ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
+ kfree_skb(skb);
+ break;
+ } else {
+ DBG(KERN_DEBUG "%s: tun_chr_readv: rejected: %x:%x:%x:%x:%x:%x\n",
+ tun->dev->name, addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+ kfree_skb(skb);
+ continue;
+ }
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&tun->read_wait, &wait);
+
+ return ret;
+}
+
+/* Read */
+static ssize_t tun_chr_read(struct file * file, char __user * buf,
+ size_t count, loff_t *pos)
+{
+ struct iovec iv = { buf, count };
+ return tun_chr_readv(file, &iv, 1, pos);
+}
+
+static void tun_setup(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ skb_queue_head_init(&tun->readq);
+ init_waitqueue_head(&tun->read_wait);
+
+ tun->owner = -1;
+
+ SET_MODULE_OWNER(dev);
+ dev->open = tun_net_open;
+ dev->hard_start_xmit = tun_net_xmit;
+ dev->stop = tun_net_close;
+ dev->get_stats = tun_net_stats;
+ dev->ethtool_ops = &tun_ethtool_ops;
+ dev->destructor = free_netdev;
+}
+
+static struct tun_struct *tun_get_by_name(const char *name)
+{
+ struct tun_struct *tun;
+
+ ASSERT_RTNL();
+ list_for_each_entry(tun, &tun_dev_list, list) {
+ if (!strncmp(tun->dev->name, name, IFNAMSIZ))
+ return tun;
+ }
+
+ return NULL;
+}
+
+static int tun_set_iff(struct file *file, struct ifreq *ifr)
+{
+ struct tun_struct *tun;
+ struct net_device *dev;
+ int err;
+
+ tun = tun_get_by_name(ifr->ifr_name);
+ if (tun) {
+ if (tun->attached)
+ return -EBUSY;
+
+ /* Check permissions */
+ if (tun->owner != -1 &&
+ current->euid != tun->owner && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+ else if (__dev_get_by_name(ifr->ifr_name))
+ return -EINVAL;
+ else {
+ char *name;
+ unsigned long flags = 0;
+
+ err = -EINVAL;
+
+ /* Set dev type */
+ if (ifr->ifr_flags & IFF_TUN) {
+ /* TUN device */
+ flags |= TUN_TUN_DEV;
+ name = "tun%d";
+ } else if (ifr->ifr_flags & IFF_TAP) {
+ /* TAP device */
+ flags |= TUN_TAP_DEV;
+ name = "tap%d";
+ } else
+ goto failed;
+
+ if (*ifr->ifr_name)
+ name = ifr->ifr_name;
+
+ dev = alloc_netdev(sizeof(struct tun_struct), name,
+ tun_setup);
+ if (!dev)
+ return -ENOMEM;
+
+ tun = netdev_priv(dev);
+ tun->dev = dev;
+ tun->flags = flags;
+ /* Be promiscuous by default to maintain previous behaviour. */
+ tun->if_flags = IFF_PROMISC;
+ /* Generate random Ethernet address. */
+ *(u16 *)tun->dev_addr = htons(0x00FF);
+ get_random_bytes(tun->dev_addr + sizeof(u16), 4);
+ memset(tun->chr_filter, 0, sizeof tun->chr_filter);
+
+ tun_net_init(dev);
+
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto err_free_dev;
+ }
+
+ err = register_netdevice(tun->dev);
+ if (err < 0)
+ goto err_free_dev;
+
+ list_add(&tun->list, &tun_dev_list);
+ }
+
+ DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
+
+ if (ifr->ifr_flags & IFF_NO_PI)
+ tun->flags |= TUN_NO_PI;
+
+ if (ifr->ifr_flags & IFF_ONE_QUEUE)
+ tun->flags |= TUN_ONE_QUEUE;
+
+ file->private_data = tun;
+ tun->attached = 1;
+
+ strcpy(ifr->ifr_name, tun->dev->name);
+ return 0;
+
+ err_free_dev:
+ free_netdev(dev);
+ failed:
+ return err;
+}
+
+static int tun_chr_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tun_struct *tun = file->private_data;
+ void __user* argp = (void __user*)arg;
+ struct ifreq ifr;
+
+ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
+ if (copy_from_user(&ifr, argp, sizeof ifr))
+ return -EFAULT;
+
+ if (cmd == TUNSETIFF && !tun) {
+ int err;
+
+ ifr.ifr_name[IFNAMSIZ-1] = '\0';
+
+ rtnl_lock();
+ err = tun_set_iff(file, &ifr);
+ rtnl_unlock();
+
+ if (err)
+ return err;
+
+ if (copy_to_user(argp, &ifr, sizeof(ifr)))
+ return -EFAULT;
+ return 0;
+ }
+
+ if (!tun)
+ return -EBADFD;
+
+ DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
+
+ switch (cmd) {
+ case TUNSETNOCSUM:
+ /* Disable/Enable checksum */
+ if (arg)
+ tun->flags |= TUN_NOCHECKSUM;
+ else
+ tun->flags &= ~TUN_NOCHECKSUM;
+
+ DBG(KERN_INFO "%s: checksum %s\n",
+ tun->dev->name, arg ? "disabled" : "enabled");
+ break;
+
+ case TUNSETPERSIST:
+ /* Disable/Enable persist mode */
+ if (arg)
+ tun->flags |= TUN_PERSIST;
+ else
+ tun->flags &= ~TUN_PERSIST;
+
+ DBG(KERN_INFO "%s: persist %s\n",
+ tun->dev->name, arg ? "disabled" : "enabled");
+ break;
+
+ case TUNSETOWNER:
+ /* Set owner of the device */
+ tun->owner = (uid_t) arg;
+
+ DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
+ break;
+
+#ifdef TUN_DEBUG
+ case TUNSETDEBUG:
+ tun->debug = arg;
+ break;
+#endif
+
+ case SIOCGIFFLAGS:
+ ifr.ifr_flags = tun->if_flags;
+ if (copy_to_user( argp, &ifr, sizeof ifr))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFFLAGS:
+ /** Set the character device's interface flags. Currently only
+ * IFF_PROMISC and IFF_ALLMULTI are used. */
+ tun->if_flags = ifr.ifr_flags;
+ DBG(KERN_INFO "%s: interface flags 0x%lx\n",
+ tun->dev->name, tun->if_flags);
+ return 0;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr,
+ min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
+ if (copy_to_user( argp, &ifr, sizeof ifr))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ /** Set the character device's hardware address. This is used when
+ * filtering packets being sent from the network device to the character
+ * device. */
+ memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data,
+ min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
+ DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n",
+ tun->dev->name,
+ tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2],
+ tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]);
+ return 0;
+
+ case SIOCADDMULTI:
+ /** Add the specified group to the character device's multicast filter
+ * list. */
+ add_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data);
+ DBG(KERN_DEBUG "%s: add multi: %x:%x:%x:%x:%x:%x\n",
+ tun->dev->name,
+ (u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1],
+ (u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3],
+ (u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]);
+ return 0;
+
+ case SIOCDELMULTI:
+ /** Remove the specified group from the character device's multicast
+ * filter list. */
+ del_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data);
+ DBG(KERN_DEBUG "%s: del multi: %x:%x:%x:%x:%x:%x\n",
+ tun->dev->name,
+ (u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1],
+ (u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3],
+ (u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]);
+ return 0;
+
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int tun_chr_fasync(int fd, struct file *file, int on)
+{
+ struct tun_struct *tun = file->private_data;
+ int ret;
+
+ if (!tun)
+ return -EBADFD;
+
+ DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
+
+ if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
+ return ret;
+
+ if (on) {
+ ret = f_setown(file, current->pid, 0);
+ if (ret)
+ return ret;
+ tun->flags |= TUN_FASYNC;
+ } else
+ tun->flags &= ~TUN_FASYNC;
+
+ return 0;
+}
+
+static int tun_chr_open(struct inode *inode, struct file * file)
+{
+ DBG1(KERN_INFO "tunX: tun_chr_open\n");
+ file->private_data = NULL;
+ return 0;
+}
+
+static int tun_chr_close(struct inode *inode, struct file *file)
+{
+ struct tun_struct *tun = file->private_data;
+
+ if (!tun)
+ return 0;
+
+ DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
+
+ tun_chr_fasync(-1, file, 0);
+
+ rtnl_lock();
+
+ /* Detach from net device */
+ file->private_data = NULL;
+ tun->attached = 0;
+
+ /* Drop read queue */
+ skb_queue_purge(&tun->readq);
+
+ if (!(tun->flags & TUN_PERSIST)) {
+ list_del(&tun->list);
+ unregister_netdevice(tun->dev);
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static struct file_operations tun_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = tun_chr_read,
+ .readv = tun_chr_readv,
+ .write = tun_chr_write,
+ .writev = tun_chr_writev,
+ .poll = tun_chr_poll,
+ .ioctl = tun_chr_ioctl,
+ .open = tun_chr_open,
+ .release = tun_chr_close,
+ .fasync = tun_chr_fasync
+};
+
+static struct miscdevice tun_miscdev = {
+ .minor = TUN_MINOR,
+ .name = "tun",
+ .fops = &tun_fops,
+ .devfs_name = "net/tun",
+};
+
+/* ethtool interface */
+
+static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = 0;
+ cmd->advertising = 0;
+ cmd->speed = SPEED_10;
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_TP;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+
+ switch (tun->flags & TUN_TYPE_MASK) {
+ case TUN_TUN_DEV:
+ strcpy(info->bus_info, "tun");
+ break;
+ case TUN_TAP_DEV:
+ strcpy(info->bus_info, "tap");
+ break;
+ }
+}
+
+static u32 tun_get_msglevel(struct net_device *dev)
+{
+#ifdef TUN_DEBUG
+ struct tun_struct *tun = netdev_priv(dev);
+ return tun->debug;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static void tun_set_msglevel(struct net_device *dev, u32 value)
+{
+#ifdef TUN_DEBUG
+ struct tun_struct *tun = netdev_priv(dev);
+ tun->debug = value;
+#endif
+}
+
+static u32 tun_get_link(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ return tun->attached;
+}
+
+static u32 tun_get_rx_csum(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ return (tun->flags & TUN_NOCHECKSUM) == 0;
+}
+
+static int tun_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ if (data)
+ tun->flags &= ~TUN_NOCHECKSUM;
+ else
+ tun->flags |= TUN_NOCHECKSUM;
+ return 0;
+}
+
+static struct ethtool_ops tun_ethtool_ops = {
+ .get_settings = tun_get_settings,
+ .get_drvinfo = tun_get_drvinfo,
+ .get_msglevel = tun_get_msglevel,
+ .set_msglevel = tun_set_msglevel,
+ .get_link = tun_get_link,
+ .get_rx_csum = tun_get_rx_csum,
+ .set_rx_csum = tun_set_rx_csum
+};
+
+static int __init tun_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
+
+ ret = misc_register(&tun_miscdev);
+ if (ret)
+ printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
+ return ret;
+}
+
+static void tun_cleanup(void)
+{
+ struct tun_struct *tun, *nxt;
+
+ misc_deregister(&tun_miscdev);
+
+ rtnl_lock();
+ list_for_each_entry_safe(tun, nxt, &tun_dev_list, list) {
+ DBG(KERN_INFO "%s cleaned up\n", tun->dev->name);
+ unregister_netdevice(tun->dev);
+ }
+ rtnl_unlock();
+
+}
+
+module_init(tun_init);
+module_exit(tun_cleanup);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(TUN_MINOR);
diff --git a/drivers/net/typhoon-firmware.h b/drivers/net/typhoon-firmware.h
new file mode 100644
index 000000000000..2bf47d93b784
--- /dev/null
+++ b/drivers/net/typhoon-firmware.h
@@ -0,0 +1,3778 @@
+/*
+ * Copyright 1999-2004 3Com Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms of the 3c990img.h
+ * microcode software are permitted provided that the following conditions
+ * are met:
+ * 1. Redistribution of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistribution in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of 3Com may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY 3COM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * USER ACKNOWLEDGES AND AGREES THAT PURCHASE OR USE OF THE 3c990img.h
+ * MICROCODE SOFTWARE WILL NOT CREATE OR GIVE GROUNDS FOR A LICENSE BY
+ * IMPLICATION, ESTOPPEL, OR OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS
+ * (PATENT, COPYRIGHT, TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT)
+ * EMBODIED IN ANY OTHER 3COM HARDWARE OR SOFTWARE EITHER SOLELY OR IN
+ * COMBINATION WITH THE 3c990img.h MICROCODE SOFTWARE
+ */
+
+ /* ver 03.001.008 */
+static const u8 typhoon_firmware_image[] = {
+0x54, 0x59, 0x50, 0x48, 0x4f, 0x4f, 0x4e, 0x00, 0x02, 0x00, 0x00, 0x00,
+0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x99, 0xb1, 0xd4,
+0x4c, 0xb8, 0xd0, 0x4b, 0x32, 0x02, 0xd4, 0xee, 0x73, 0x7e, 0x0b, 0x13,
+0x9b, 0xc0, 0xae, 0xf4, 0x40, 0x01, 0x00, 0x00, 0xe8, 0xfc, 0x00, 0x00,
+0x00, 0x00, 0xff, 0xff, 0x39, 0x00, 0x00, 0xea, 0x05, 0x00, 0x00, 0xea,
+0x04, 0x00, 0x00, 0xea, 0x03, 0x00, 0x00, 0xea, 0x02, 0x00, 0x00, 0xea,
+0x01, 0x00, 0x00, 0xea, 0x32, 0x02, 0x00, 0xea, 0xc5, 0x14, 0x00, 0xea,
+0x07, 0x00, 0x2d, 0xe9, 0x0e, 0x00, 0xa0, 0xe1, 0x00, 0x10, 0x0f, 0xe1,
+0xd0, 0x20, 0x9f, 0xe5, 0x12, 0xff, 0x2f, 0xe1, 0xfe, 0xff, 0xff, 0xea,
+0x01, 0x00, 0x80, 0xe0, 0x04, 0x20, 0x81, 0xe4, 0x01, 0x00, 0x50, 0xe1,
+0xfc, 0xff, 0xff, 0x1a, 0x0e, 0xf0, 0xa0, 0xe1, 0x00, 0xa0, 0xa0, 0xe1,
+0x0e, 0xb0, 0xa0, 0xe1, 0x00, 0x00, 0xa0, 0xe3, 0xa8, 0x10, 0x9f, 0xe5,
+0x00, 0x00, 0x81, 0xe5, 0xa4, 0x10, 0x9f, 0xe5, 0x00, 0x00, 0x81, 0xe5,
+0x01, 0x16, 0xa0, 0xe3, 0x00, 0x00, 0x91, 0xe5, 0x01, 0x00, 0x80, 0xe3,
+0x00, 0x00, 0x81, 0xe5, 0xd7, 0x00, 0xa0, 0xe3, 0x00, 0xf0, 0x21, 0xe1,
+0x88, 0xd0, 0x9f, 0xe5, 0xdb, 0x00, 0xa0, 0xe3, 0x00, 0xf0, 0x21, 0xe1,
+0x7c, 0xd0, 0x9f, 0xe5, 0xd2, 0x00, 0xa0, 0xe3, 0x00, 0xf0, 0x21, 0xe1,
+0x74, 0xd0, 0x9f, 0xe5, 0xd1, 0x00, 0xa0, 0xe3, 0x00, 0xf0, 0x21, 0xe1,
+0x6c, 0xd0, 0x9f, 0xe5, 0x9b, 0x14, 0x00, 0xeb, 0xd3, 0x00, 0xa0, 0xe3,
+0x00, 0xf0, 0x21, 0xe1, 0x60, 0xd0, 0x9f, 0xe5, 0x60, 0x00, 0x9f, 0xe5,
+0x60, 0x10, 0x9f, 0xe5, 0x60, 0x20, 0x9f, 0xe5, 0xdb, 0xff, 0xff, 0xeb,
+0x5c, 0x00, 0x9f, 0xe5, 0x5c, 0x10, 0x9f, 0xe5, 0x00, 0x20, 0xa0, 0xe3,
+0xd7, 0xff, 0xff, 0xeb, 0x54, 0x00, 0x9f, 0xe5, 0x54, 0x10, 0x9f, 0xe5,
+0xd4, 0xff, 0xff, 0xeb, 0x0a, 0x00, 0xa0, 0xe1, 0x0b, 0xf0, 0xa0, 0xe1,
+0xd3, 0x10, 0xa0, 0xe3, 0x01, 0xf0, 0x21, 0xe1, 0xd4, 0xff, 0xff, 0xeb,
+0x3c, 0xa0, 0x9f, 0xe5, 0x1a, 0xff, 0x2f, 0xe1, 0xc6, 0xff, 0xff, 0xea,
+0x15, 0x21, 0xff, 0xff, 0x0c, 0x00, 0x10, 0x00, 0x1c, 0x00, 0x10, 0x00,
+0x3c, 0x38, 0x00, 0x80, 0xfc, 0x37, 0x00, 0x80, 0xfc, 0x3f, 0x00, 0x80,
+0x7c, 0x34, 0x00, 0x80, 0x80, 0x0f, 0x00, 0x00, 0x80, 0x30, 0x00, 0x80,
+0xad, 0xde, 0xad, 0xde, 0xb0, 0xbb, 0x00, 0x00, 0x24, 0xab, 0x20, 0x40,
+0x48, 0x29, 0x00, 0x00, 0x28, 0x05, 0x00, 0x80, 0xbd, 0xba, 0x21, 0x40,
+0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x58, 0x57, 0x00, 0x00, 0x86, 0x4b, 0x00, 0x00, 0x60, 0x01, 0xff, 0xff,
+0xb0, 0xb5, 0x07, 0x1c, 0x12, 0x4d, 0x00, 0x24, 0x28, 0x68, 0x00, 0x28,
+0x1e, 0xd0, 0x38, 0x1c, 0x10, 0x49, 0x04, 0xf0, 0x7b, 0xfd, 0x29, 0x68,
+0xc0, 0x46, 0x08, 0x60, 0x00, 0x28, 0x15, 0xd0, 0x38, 0x01, 0x0d, 0x49,
+0x40, 0x18, 0x19, 0x23, 0xdb, 0x01, 0xc0, 0x18, 0x41, 0x6b, 0x80, 0x29,
+0x0c, 0xd2, 0x01, 0x31, 0x41, 0x63, 0x28, 0x68, 0xc1, 0x69, 0xc0, 0x46,
+0x29, 0x60, 0x39, 0x07, 0x41, 0x60, 0x04, 0x62, 0xc7, 0x62, 0xb0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x20, 0x1c, 0xfa, 0xe7, 0xe8, 0x17, 0x00, 0x80,
+0xee, 0x05, 0x00, 0x00, 0xa0, 0x1c, 0x00, 0x80, 0x02, 0x49, 0x0a, 0x68,
+0xc0, 0x46, 0xc2, 0x61, 0x08, 0x60, 0x70, 0x47,
+0xe8, 0x17, 0x00, 0x80, 0x70, 0x47, 0x00, 0x00, 0x70, 0x47, 0x00, 0x00,
+0x70, 0x47, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe1, 0x00, 0x10, 0xa0, 0xe1,
+0xc0, 0x10, 0x81, 0xe3, 0x01, 0xf0, 0x21, 0xe1, 0x1e, 0xff, 0x2f, 0xe1,
+0x00, 0xf0, 0x21, 0xe1, 0x1e, 0xff, 0x2f, 0xe1, 0x00, 0x00, 0x0f, 0xe1,
+0xc0, 0x00, 0x80, 0xe3, 0x00, 0xf0, 0x21, 0xe1, 0x1e, 0xff, 0x2f, 0xe1,
+0x00, 0x00, 0x0f, 0xe1, 0xc0, 0x00, 0xc0, 0xe3, 0x00, 0xf0, 0x21, 0xe1,
+0x1e, 0xff, 0x2f, 0xe1, 0x00, 0x00, 0x0f, 0xe1, 0x40, 0x00, 0x80, 0xe3,
+0x00, 0xf0, 0x21, 0xe1, 0x1e, 0xff, 0x2f, 0xe1, 0x00, 0x00, 0x0f, 0xe1,
+0x80, 0x00, 0x10, 0xe3, 0x80, 0x00, 0x80, 0xe3, 0x00, 0xf0, 0x21, 0xe1,
+0x00, 0x00, 0x00, 0x12, 0x1e, 0xff, 0x2f, 0xe1, 0x00, 0x00, 0x50, 0xe3,
+0x00, 0x00, 0x0f, 0xe1, 0x80, 0x00, 0xc0, 0x13, 0x00, 0xf0, 0x21, 0xe1,
+0x1e, 0xff, 0x2f, 0xe1, 0x00, 0x00, 0x0f, 0xe1, 0x80, 0x00, 0xc0, 0xe3,
+0x00, 0xf0, 0x21, 0xe1, 0x1e, 0xff, 0x2f, 0xe1, 0x91, 0x00, 0x00, 0xe0,
+0x1e, 0xff, 0x2f, 0xe1, 0x01, 0x20, 0x80, 0xe0, 0x01, 0x00, 0x80, 0xe0,
+0x1e, 0xff, 0x2f, 0xe1, 0x80, 0xb5, 0x08, 0x4f, 0x64, 0x28, 0x04, 0xd3,
+0x64, 0x20, 0x38, 0x63, 0x00, 0x20, 0xc0, 0x43, 0x03, 0xe0, 0x38, 0x63,
+0x04, 0x49, 0x05, 0xf0, 0x01, 0xfb, 0x78, 0x63, 0xb8, 0x63, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x88, 0x13, 0x00, 0x00,
+0x80, 0xb4, 0x10, 0x4b, 0x00, 0x22, 0x1f, 0x6b, 0x64, 0x2f, 0x03, 0xd2,
+0x09, 0x68, 0x09, 0x68, 0x49, 0x08, 0x02, 0xd2, 0x10, 0x1c, 0x80, 0xbc,
+0x70, 0x47, 0x19, 0x1c, 0xdb, 0x6b, 0x4f, 0x6b, 0xbb, 0x42, 0x05, 0xd2,
+0x40, 0x68, 0x00, 0x04, 0x00, 0x0c, 0x18, 0x18, 0xc8, 0x63, 0xf1, 0xe7,
+0x41, 0x68, 0x05, 0x4b, 0x19, 0x43, 0x41, 0x60, 0x04, 0x48, 0xc1, 0x6b,
+0x01, 0x31, 0xc1, 0x63, 0x02, 0x20, 0xe8, 0xe7, 0x68, 0x0e, 0x00, 0x80,
+0x00, 0x00, 0x00, 0x80, 0x0c, 0x2b, 0x00, 0x80, 0x90, 0xb5, 0x07, 0x1c,
+0x15, 0x4c, 0x00, 0x20, 0x21, 0x6b, 0x64, 0x29, 0x0b, 0xd2, 0xb9, 0x6e,
+0x49, 0x08, 0x08, 0xd3, 0x21, 0x6c, 0xa2, 0x6b, 0x91, 0x42, 0x07, 0xd2,
+0xfa, 0x1d, 0x39, 0x32, 0x52, 0x8b, 0x89, 0x18, 0x21, 0x64, 0x90, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x78, 0x6a, 0x39, 0x6b, 0xc0, 0x46, 0x48, 0x62,
+0x38, 0x6b, 0x02, 0xf0, 0x2d, 0xfe, 0x38, 0x1c, 0x02, 0xf0, 0xe8, 0xfa,
+0x01, 0x20, 0xbb, 0x23, 0x1b, 0x01, 0xe1, 0x18, 0xc8, 0x73, 0x05, 0x49,
+0x0a, 0x6c, 0x12, 0x18, 0x0a, 0x64, 0x04, 0x49, 0x8a, 0x6d, 0x12, 0x18,
+0x8a, 0x65, 0xe4, 0xe7, 0x68, 0x0e, 0x00, 0x80, 0x0c, 0x2b, 0x00, 0x80,
+0xa4, 0x2a, 0x00, 0x80, 0x80, 0xb4, 0x0a, 0x48, 0xc0, 0x6d, 0x02, 0x23,
+0x18, 0x40, 0x09, 0x4a, 0x00, 0x21, 0x00, 0x28, 0x03, 0xd0, 0xd1, 0x63,
+0x11, 0x64, 0x80, 0xbc, 0x70, 0x47, 0x06, 0x48, 0x07, 0x68, 0x7b, 0x1c,
+0x03, 0x60, 0x0a, 0x2f, 0xf7, 0xd3, 0x01, 0x60, 0xf3, 0xe7, 0x00, 0x00,
+0xa4, 0x2a, 0x00, 0x80, 0x68, 0x0e, 0x00, 0x80, 0xe0, 0x01, 0x00, 0x80,
+0x70, 0x47, 0x02, 0x04, 0x12, 0x0c, 0x00, 0x0c, 0x10, 0x18, 0x0a, 0x04,
+0x12, 0x0c, 0x09, 0x0c, 0x51, 0x18, 0x08, 0x18, 0x01, 0x0c, 0x05, 0xd0,
+0x01, 0x04, 0x09, 0x0c, 0x00, 0x0c, 0x08, 0x18, 0x01, 0x0c, 0xf9, 0xd1,
+0x00, 0x04, 0x00, 0x0c, 0x70, 0x47, 0x80, 0xb4, 0x00, 0x22, 0x00, 0x29,
+0x18, 0xd0, 0x4f, 0x08, 0x7b, 0x1e, 0x00, 0x2f,
+0x06, 0xd0, 0x07, 0x88, 0xba, 0x18, 0x02, 0x30, 0x1f, 0x1c, 0x01, 0x3b,
+0x00, 0x2f, 0xf8, 0xd1, 0x49, 0x08, 0x03, 0xd3, 0x00, 0x88, 0x00, 0x06,
+0x00, 0x0e, 0x82, 0x18, 0x10, 0x0c, 0x05, 0xd0, 0x10, 0x04, 0x00, 0x0c,
+0x11, 0x0c, 0x42, 0x18, 0x10, 0x0c, 0xf9, 0xd1, 0x10, 0x04, 0x00, 0x0c,
+0x80, 0xbc, 0x70, 0x47, 0x80, 0xb5, 0x83, 0x89, 0xc7, 0x89, 0xfb, 0x18,
+0x07, 0x8a, 0xfb, 0x18, 0x47, 0x8a, 0xfb, 0x18, 0x40, 0x7a, 0x00, 0x02,
+0xc7, 0x18, 0x38, 0x0c, 0x05, 0xd0, 0x38, 0x04, 0x00, 0x0c, 0x3b, 0x0c,
+0xc7, 0x18, 0x38, 0x0c, 0xf9, 0xd1, 0x08, 0x1c, 0x11, 0x1c, 0xff, 0xf7,
+0xc8, 0xff, 0x01, 0x1c, 0x38, 0x1c, 0xff, 0xf7, 0xb0, 0xff, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x90, 0xb5, 0x02, 0x23, 0x82, 0x68, 0x1a, 0x40,
+0x00, 0x27, 0x00, 0x2a, 0x0f, 0xd0, 0x0a, 0x4a, 0x93, 0x69, 0x01, 0x33,
+0x93, 0x61, 0x0a, 0x68, 0x8b, 0x68, 0x9a, 0x18, 0x00, 0x68, 0x1c, 0x18,
+0x57, 0x81, 0x09, 0x69, 0x10, 0x1c, 0xff, 0xf7, 0xac, 0xff, 0xc0, 0x43,
+0x60, 0x81, 0x38, 0x1c, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x0c, 0x2b, 0x00, 0x80, 0x90, 0xb5, 0x04, 0x23, 0x82, 0x68, 0x1a, 0x40,
+0x00, 0x27, 0x00, 0x2a, 0x11, 0xd0, 0x4a, 0x68, 0x52, 0x09, 0x0e, 0xd3,
+0x09, 0x4a, 0x13, 0x6a, 0x01, 0x33, 0x13, 0x62, 0xcb, 0x68, 0x02, 0x68,
+0x9c, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x08, 0x3a, 0x1a, 0x43, 0x12, 0x68,
+0x00, 0xf0, 0x2e, 0xf8, 0x20, 0x82, 0x38, 0x1c, 0x90, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x0c, 0x2b, 0x00, 0x80, 0x90, 0xb5, 0x80, 0x23,
+0x82, 0x68, 0x1a, 0x40, 0x00, 0x24, 0x00, 0x2a, 0x15, 0xd0, 0x4a, 0x68,
+0x92, 0x09, 0x12, 0xd3, 0x0b, 0x4a, 0xd3, 0x69, 0x01, 0x33, 0xd3, 0x61,
+0xcb, 0x68, 0x02, 0x68, 0x9f, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x08, 0x3a,
+0x1a, 0x43, 0x12, 0x68, 0x00, 0xf0, 0x0e, 0xf8, 0x00, 0x28, 0x00, 0xd1,
+0x04, 0x48, 0xc0, 0x46, 0xf8, 0x80, 0x20, 0x1c, 0x90, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x0c, 0x2b, 0x00, 0x80, 0xff, 0xff, 0x00, 0x00,
+0xb0, 0xb5, 0x14, 0x1c, 0x05, 0x1c, 0x0f, 0x1c, 0x38, 0x69, 0xb9, 0x68,
+0x41, 0x18, 0x38, 0x68, 0xff, 0xf7, 0x53, 0xff, 0xc0, 0x43, 0x01, 0x04,
+0x09, 0x0c, 0x20, 0x1c, 0xff, 0xf7, 0x39, 0xff, 0x04, 0x1c, 0xb8, 0x68,
+0x79, 0x69, 0x40, 0x18, 0x69, 0x68, 0x88, 0x42, 0x0c, 0xd2, 0x2a, 0x68,
+0x12, 0x18, 0x09, 0x1a, 0x10, 0x1c, 0x00, 0xf0, 0x05, 0xf9, 0xc0, 0x43,
+0x01, 0x04, 0x09, 0x0c, 0x20, 0x1c, 0xff, 0xf7, 0x26, 0xff, 0x04, 0x1c,
+0xe0, 0x43, 0x00, 0x04, 0x00, 0x0c, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x80, 0xb5, 0x07, 0x1c, 0xb8, 0x6b, 0xc0, 0x08, 0x1a, 0xd3, 0xb8, 0x6a,
+0xf9, 0x6b, 0x40, 0x18, 0x79, 0x6c, 0x00, 0xf0, 0xed, 0xf8, 0xc0, 0x43,
+0x01, 0x04, 0x09, 0x0c, 0x0a, 0x48, 0x07, 0xd0, 0x20, 0x23, 0xb9, 0x69,
+0x19, 0x43, 0xb9, 0x61, 0x01, 0x6b, 0x01, 0x31, 0x01, 0x63, 0x07, 0xe0,
+0xff, 0x23, 0x01, 0x33, 0xb9, 0x69, 0x19, 0x43, 0xb9, 0x61, 0x41, 0x6a,
+0x01, 0x31, 0x41, 0x62, 0x00, 0x20, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x0c, 0x2b, 0x00, 0x80, 0x80, 0xb5, 0x07, 0x1c, 0xb8, 0x6b, 0x41, 0x09,
+0x1c, 0xd3, 0xc0, 0x08, 0x1a, 0xd3, 0xf8, 0x1d, 0x39, 0x30, 0x00, 0x7b,
+0x06, 0x28, 0x15, 0xd1, 0x38, 0x1c, 0x00, 0xf0, 0x53, 0xf8, 0x01, 0x1c,
+0x0a, 0x48, 0x07, 0xd0, 0x40, 0x23, 0xb9, 0x69,
+0x19, 0x43, 0xb9, 0x61, 0x81, 0x6b, 0x01, 0x31, 0x81, 0x63, 0x07, 0xe0,
+0x01, 0x23, 0x9b, 0x02, 0xb9, 0x69, 0x19, 0x43, 0xb9, 0x61, 0xc1, 0x6a,
+0x01, 0x31, 0xc1, 0x62, 0x00, 0x20, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x0c, 0x2b, 0x00, 0x80, 0xb0, 0xb5, 0x07, 0x1c, 0xb8, 0x6b, 0x81, 0x09,
+0x2c, 0xd3, 0xc0, 0x08, 0x2a, 0xd3, 0xf8, 0x1d, 0x39, 0x30, 0x00, 0x7b,
+0x11, 0x28, 0x25, 0xd1, 0xb8, 0x6a, 0x39, 0x6c, 0x40, 0x18, 0x01, 0x23,
+0x9b, 0x07, 0x06, 0x30, 0x18, 0x43, 0x00, 0x68, 0x05, 0x04, 0x2d, 0x0c,
+0x0f, 0x4c, 0x11, 0xd0, 0x38, 0x1c, 0x00, 0xf0, 0x1f, 0xf8, 0x00, 0x28,
+0x0c, 0xd0, 0xa8, 0x42, 0x02, 0xd1, 0x0c, 0x4b, 0x98, 0x42, 0x07, 0xd0,
+0x80, 0x23, 0xb8, 0x69, 0x18, 0x43, 0xb8, 0x61, 0x60, 0x6b, 0x01, 0x30,
+0x60, 0x63, 0x07, 0xe0, 0x01, 0x23, 0x5b, 0x02, 0xb8, 0x69, 0x18, 0x43,
+0xb8, 0x61, 0xa0, 0x6a, 0x01, 0x30, 0xa0, 0x62, 0x00, 0x20, 0xb0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x0c, 0x2b, 0x00, 0x80, 0xff, 0xff, 0x00, 0x00,
+0xf0, 0xb5, 0xff, 0xb0, 0x99, 0xb0, 0x04, 0x1c, 0xe0, 0x6b, 0x61, 0x6c,
+0x09, 0x18, 0x03, 0xaa, 0x85, 0x18, 0xa3, 0x6a, 0x00, 0x20, 0x8a, 0x08,
+0x01, 0x32, 0x97, 0x92, 0x07, 0xd0, 0x82, 0x00, 0x9f, 0x58, 0x03, 0xae,
+0xb7, 0x50, 0x97, 0x9a, 0x01, 0x30, 0x82, 0x42, 0xf7, 0xd8, 0x60, 0x6a,
+0x01, 0x23, 0x9b, 0x07, 0x04, 0x30, 0x18, 0x43, 0x00, 0x68, 0xc0, 0x46,
+0x02, 0x90, 0x02, 0xaf, 0x3f, 0x88, 0x03, 0xa8, 0xff, 0xf7, 0x87, 0xfe,
+0xc0, 0x43, 0x01, 0x04, 0x09, 0x0c, 0x38, 0x1c, 0xff, 0xf7, 0x6d, 0xfe,
+0x07, 0x1c, 0xe0, 0x6b, 0xa1, 0x6c, 0x40, 0x18, 0x61, 0x6a, 0x01, 0x23,
+0x9b, 0x07, 0x08, 0x31, 0x19, 0x43, 0x09, 0x68, 0xc0, 0x46, 0x01, 0x91,
+0x01, 0xa9, 0x09, 0x88, 0x01, 0x31, 0x88, 0x42, 0x0c, 0xd2, 0xa2, 0x6a,
+0x12, 0x18, 0x09, 0x1a, 0x10, 0x1c, 0x00, 0xf0, 0x2f, 0xf8, 0xc0, 0x43,
+0x01, 0x04, 0x09, 0x0c, 0x38, 0x1c, 0xff, 0xf7, 0x50, 0xfe, 0x07, 0x1c,
+0xa8, 0x89, 0xe9, 0x89, 0x08, 0x18, 0x29, 0x8a, 0x08, 0x18, 0x69, 0x8a,
+0x08, 0x18, 0x69, 0x7a, 0x09, 0x02, 0x08, 0x18, 0xa1, 0x6c, 0x62, 0x6c,
+0x89, 0x1a, 0x0a, 0x04, 0x12, 0x0c, 0x11, 0x02, 0x12, 0x0a, 0x11, 0x43,
+0x09, 0x04, 0x09, 0x0c, 0x09, 0x18, 0x08, 0x0c, 0x05, 0xd0, 0x08, 0x04,
+0x00, 0x0c, 0x09, 0x0c, 0x41, 0x18, 0x08, 0x0c, 0xf9, 0xd1, 0x38, 0x1c,
+0xff, 0xf7, 0x2f, 0xfe, 0xc0, 0x43, 0x00, 0x04, 0x00, 0x0c, 0x7f, 0xb0,
+0x19, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xb0, 0xb4, 0x00, 0x22,
+0x00, 0x29, 0x2e, 0xd0, 0x83, 0x07, 0x9b, 0x0f, 0xdc, 0x00, 0x47, 0x18,
+0x04, 0x25, 0xef, 0x1b, 0xbf, 0x07, 0xbf, 0x0f, 0xff, 0x00, 0x80, 0x08,
+0x80, 0x00, 0x59, 0x18, 0x03, 0x31, 0x89, 0x08, 0x4d, 0x1e, 0x02, 0xc8,
+0xe1, 0x40, 0xa1, 0x40, 0x6b, 0x1e, 0x00, 0x2d, 0x09, 0xd0, 0x0c, 0x04,
+0x24, 0x0c, 0xa2, 0x18, 0x09, 0x0c, 0x8a, 0x18, 0x02, 0xc8, 0x1c, 0x1c,
+0x01, 0x3b, 0x00, 0x2c, 0xf5, 0xd1, 0xb9, 0x40, 0x08, 0x1c, 0xf8, 0x40,
+0x01, 0x04, 0x09, 0x0c, 0x89, 0x18, 0x00, 0x0c, 0x42, 0x18, 0x10, 0x0c,
+0x05, 0xd0, 0x10, 0x04, 0x00, 0x0c, 0x11, 0x0c, 0x42, 0x18, 0x10, 0x0c,
+0xf9, 0xd1, 0x10, 0x04, 0x00, 0x0c, 0xb0, 0xbc, 0x70, 0x47, 0x00, 0x00,
+0x90, 0xb4, 0x00, 0x20, 0x01, 0x27, 0x11, 0x49, 0x42, 0x00, 0x12, 0x18,
+0xd2, 0x00, 0x53, 0x18, 0x9c, 0x68, 0x01, 0x23,
+0x9b, 0x07, 0x23, 0x43, 0x1b, 0x68, 0x1b, 0x03, 0x1b, 0x0b, 0x8a, 0x58,
+0x12, 0x03, 0x12, 0x0b, 0x93, 0x42, 0x0c, 0xd1, 0x01, 0x30, 0x04, 0x28,
+0xec, 0xd3, 0x08, 0x48, 0xc0, 0x6a, 0x01, 0x03, 0x09, 0x0b, 0x07, 0x48,
+0x00, 0x6f, 0x00, 0x03, 0x00, 0x0b, 0x81, 0x42, 0x02, 0xd0, 0x38, 0x1c,
+0x90, 0xbc, 0x70, 0x47, 0x00, 0x20, 0xfb, 0xe7, 0xa8, 0x03, 0x00, 0x80,
+0x00, 0x40, 0x14, 0x40, 0x68, 0x0e, 0x00, 0x80, 0x98, 0xb4, 0x14, 0x4a,
+0xc0, 0x46, 0x00, 0x92, 0x83, 0x00, 0x13, 0x48, 0xc0, 0x58, 0x07, 0x03,
+0x3f, 0x0b, 0x12, 0x48, 0xc0, 0x58, 0x02, 0x03, 0x12, 0x0b, 0x11, 0x48,
+0xc0, 0x58, 0x00, 0x03, 0x00, 0x0b, 0x10, 0x4c, 0xe4, 0x58, 0x01, 0x23,
+0x9b, 0x07, 0x23, 0x43, 0x1b, 0x68, 0x9b, 0x00, 0xcc, 0x00, 0x01, 0x21,
+0x98, 0x42, 0x01, 0xd1, 0x08, 0x1c, 0x09, 0xe0, 0x98, 0x42, 0x03, 0xd9,
+0x10, 0x1a, 0xda, 0x1b, 0x80, 0x18, 0x00, 0xe0, 0x18, 0x1a, 0x84, 0x42,
+0xf4, 0xd3, 0x00, 0x20, 0x98, 0xbc, 0x70, 0x47, 0x55, 0x55, 0x55, 0x55,
+0x20, 0x04, 0x00, 0x80, 0x28, 0x04, 0x00, 0x80, 0x08, 0x04, 0x00, 0x80,
+0x18, 0x04, 0x00, 0x80, 0x80, 0xb4, 0x13, 0x04, 0x00, 0xd0, 0x01, 0x3a,
+0x80, 0x00, 0x0b, 0x1c, 0x13, 0x49, 0x0f, 0x58, 0xc0, 0x46, 0x3b, 0x60,
+0x0b, 0x58, 0xc0, 0x46, 0x5a, 0x60, 0x0a, 0x58, 0x08, 0x32, 0x10, 0x4b,
+0x1b, 0x58, 0x9a, 0x42, 0x01, 0xd3, 0x0f, 0x4a, 0x12, 0x58, 0x0f, 0x4b,
+0x1f, 0x58, 0x01, 0x23, 0x9b, 0x07, 0x3b, 0x43, 0x1b, 0x68, 0x9b, 0x00,
+0x17, 0x03, 0x3f, 0x0b, 0x9f, 0x42, 0x06, 0xd1, 0x0a, 0x48, 0xc1, 0x68,
+0x01, 0x31, 0xc1, 0x60, 0x01, 0x20, 0x80, 0xbc, 0x70, 0x47, 0x08, 0x4b,
+0x1b, 0x58, 0xc0, 0x46, 0x1a, 0x60, 0x0a, 0x50, 0x00, 0x20, 0xf6, 0xe7,
+0x08, 0x04, 0x00, 0x80, 0x28, 0x04, 0x00, 0x80, 0x20, 0x04, 0x00, 0x80,
+0x18, 0x04, 0x00, 0x80, 0xa0, 0x82, 0x20, 0x40, 0x10, 0x04, 0x00, 0x80,
+0xff, 0x5f, 0x2d, 0xe9, 0x48, 0xfe, 0xff, 0xeb, 0x01, 0xb6, 0xa0, 0xe3,
+0x01, 0xb1, 0x8b, 0xe2, 0x02, 0x8a, 0xa0, 0xe3, 0x01, 0x7a, 0xa0, 0xe3,
+0x01, 0xa9, 0xa0, 0xe3, 0x01, 0x56, 0xa0, 0xe3, 0xc8, 0x60, 0x9f, 0xe5,
+0xc8, 0x90, 0x9f, 0xe5, 0x14, 0x40, 0x9b, 0xe5, 0x00, 0x00, 0x54, 0xe3,
+0x2c, 0x00, 0x00, 0x0a, 0x03, 0x0a, 0x14, 0xe3, 0x11, 0x00, 0x00, 0x0a,
+0x0c, 0x00, 0x96, 0xe5, 0x00, 0x00, 0x50, 0xe3, 0x21, 0x00, 0x00, 0x0a,
+0x01, 0x0a, 0x14, 0xe3, 0x05, 0x00, 0x00, 0x0a, 0x1c, 0x00, 0x96, 0xe5,
+0x01, 0x0a, 0xc0, 0xe3, 0x1c, 0x00, 0x86, 0xe5, 0x1c, 0x00, 0x85, 0xe5,
+0x14, 0x70, 0x85, 0xe5, 0x06, 0x00, 0x00, 0xea, 0x02, 0x0a, 0x14, 0xe3,
+0x04, 0x00, 0x00, 0x0a, 0x1c, 0x00, 0x96, 0xe5, 0x02, 0x0a, 0xc0, 0xe3,
+0x1c, 0x00, 0x86, 0xe5, 0x1c, 0x00, 0x85, 0xe5, 0x14, 0x80, 0x85, 0xe5,
+0x01, 0x09, 0x14, 0xe3, 0x04, 0x00, 0x00, 0x0a, 0x1c, 0x00, 0x96, 0xe5,
+0x01, 0x09, 0xc0, 0xe3, 0x1c, 0x00, 0x86, 0xe5, 0x1c, 0x00, 0x85, 0xe5,
+0x14, 0xa0, 0x85, 0xe5, 0x02, 0x00, 0x14, 0xe3, 0x40, 0x00, 0x00, 0x1b,
+0x01, 0x00, 0x14, 0xe3, 0x54, 0x00, 0x00, 0x1b, 0x02, 0x0b, 0x14, 0xe3,
+0x67, 0x00, 0x00, 0x1b, 0x01, 0x0b, 0x14, 0xe3, 0x20, 0x00, 0x00, 0x1b,
+0x18, 0x00, 0x99, 0xe5, 0x01, 0x00, 0x80, 0xe2, 0x18, 0x00, 0x89, 0xe5,
+0xd5, 0xff, 0xff, 0xea, 0x1c, 0x00, 0x96, 0xe5, 0x01, 0x0a, 0xc0, 0xe3,
+0x1c, 0x00, 0x86, 0xe5, 0x1c, 0x00, 0x85, 0xe5,
+0x14, 0x70, 0x85, 0xe5, 0xe1, 0xff, 0xff, 0xea, 0xff, 0x5f, 0xbd, 0xe8,
+0x04, 0xf0, 0x5e, 0xe2, 0x68, 0x0e, 0x00, 0x80, 0x08, 0x83, 0x20, 0x40,
+0x10, 0x10, 0x1f, 0xe5, 0x14, 0x30, 0x91, 0xe5, 0x00, 0x20, 0xc3, 0xe1,
+0x14, 0x20, 0x81, 0xe5, 0x01, 0x16, 0xa0, 0xe3, 0x0c, 0x20, 0x81, 0xe5,
+0x0b, 0x12, 0xa0, 0xe3, 0x00, 0x00, 0x81, 0xe5, 0x18, 0x10, 0x9f, 0xe5,
+0xb0, 0x24, 0xd1, 0xe1, 0x01, 0x20, 0x82, 0xe2, 0xb0, 0x24, 0xc1, 0xe1,
+0x3c, 0x20, 0x91, 0xe5, 0x00, 0x00, 0x82, 0xe1, 0x3c, 0x00, 0x81, 0xe5,
+0x1e, 0xff, 0x2f, 0xe1, 0xa0, 0x82, 0x20, 0x40, 0xff, 0xff, 0xff, 0xea,
+0xfe, 0xff, 0xff, 0xea, 0x01, 0x0b, 0xa0, 0xe3, 0x01, 0x16, 0xa0, 0xe3,
+0x14, 0x00, 0x81, 0xe5, 0x00, 0x1a, 0x81, 0xe1, 0x24, 0x20, 0x91, 0xe5,
+0x70, 0x00, 0x1f, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x24, 0x20, 0x80, 0xe5,
+0x28, 0x10, 0x91, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x28, 0x10, 0x80, 0xe5,
+0x2c, 0x20, 0x90, 0xe5, 0x01, 0x20, 0x82, 0xe2, 0x2c, 0x20, 0x80, 0xe5,
+0x3f, 0x00, 0x01, 0xe2, 0x3f, 0x00, 0x50, 0xe3, 0x1e, 0xff, 0x2f, 0x11,
+0x18, 0x00, 0x9f, 0xe5, 0x00, 0x10, 0x90, 0xe5, 0x01, 0x10, 0x81, 0xe2,
+0x00, 0x10, 0x80, 0xe5, 0x02, 0x18, 0xa0, 0xe3, 0x0b, 0x02, 0xa0, 0xe3,
+0x00, 0x10, 0x80, 0xe5, 0x1e, 0xff, 0x2f, 0xe1, 0x30, 0x04, 0x00, 0x80,
+0x01, 0x06, 0xa0, 0xe3, 0x01, 0x01, 0x80, 0xe2, 0x00, 0x10, 0x90, 0xe5,
+0x01, 0x08, 0x11, 0xe3, 0x0b, 0x10, 0xa0, 0xe3, 0x02, 0x19, 0x81, 0xe2,
+0x05, 0x00, 0x00, 0x1a, 0x00, 0x20, 0x90, 0xe5, 0x42, 0x28, 0xb0, 0xe1,
+0x05, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x90, 0xe5, 0x02, 0x0c, 0x10, 0xe3,
+0x02, 0x00, 0x00, 0x0a, 0x06, 0x07, 0xa0, 0xe3, 0x4c, 0x11, 0x80, 0xe5,
+0x03, 0x00, 0x00, 0xea, 0x0c, 0x00, 0x9f, 0xe5, 0x00, 0x00, 0x00, 0x00,
+0x40, 0x10, 0x80, 0xe5, 0xff, 0xff, 0xff, 0xea, 0xfe, 0xff, 0xff, 0xea,
+0x00, 0x00, 0x00, 0x80, 0x01, 0x06, 0xa0, 0xe3, 0x01, 0x01, 0x80, 0xe2,
+0x00, 0x10, 0x90, 0xe5, 0x01, 0x08, 0x11, 0xe3, 0x0c, 0x10, 0xa0, 0xe3,
+0x02, 0x19, 0x81, 0xe2, 0x05, 0x00, 0x00, 0x1a, 0x00, 0x20, 0x90, 0xe5,
+0x42, 0x28, 0xb0, 0xe1, 0x05, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x90, 0xe5,
+0x02, 0x0c, 0x10, 0xe3, 0x02, 0x00, 0x00, 0x0a, 0x06, 0x07, 0xa0, 0xe3,
+0x4c, 0x11, 0x80, 0xe5, 0x03, 0x00, 0x00, 0xea, 0x4c, 0x00, 0x1f, 0xe5,
+0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x80, 0xe5, 0xff, 0xff, 0xff, 0xea,
+0xfe, 0xff, 0xff, 0xea, 0x02, 0x1b, 0xa0, 0xe3, 0x01, 0x06, 0xa0, 0xe3,
+0x14, 0x10, 0x80, 0xe5, 0x1e, 0xff, 0x2f, 0xe1, 0x80, 0x21, 0x1f, 0xe5,
+0x14, 0x30, 0x92, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x80, 0xe5,
+0x1c, 0x00, 0x92, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xe5,
+0x00, 0x10, 0xa0, 0xe3, 0x14, 0x10, 0x82, 0xe5, 0x01, 0x06, 0xa0, 0xe3,
+0x1c, 0x10, 0x82, 0xe5, 0x0c, 0x10, 0x80, 0xe5, 0x1c, 0x10, 0x92, 0xe5,
+0x00, 0x00, 0x00, 0x00, 0x1c, 0x10, 0x80, 0xe5, 0x1e, 0xff, 0x2f, 0xe1,
+0xc0, 0x21, 0x1f, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x10, 0x82, 0xe5,
+0x01, 0x16, 0xa0, 0xe3, 0x14, 0x00, 0x82, 0xe5, 0x0c, 0x00, 0x81, 0xe5,
+0x1c, 0x00, 0x92, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x81, 0xe5,
+0x1e, 0xff, 0x2f, 0xe1, 0x80, 0xb5, 0x0f, 0x1c, 0x38, 0x1c, 0x00, 0xf0,
+0x17, 0xf8, 0x00, 0x28, 0x02, 0xd0, 0x38, 0x1c,
+0x00, 0xf0, 0x92, 0xf8, 0x00, 0x20, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x80, 0xb5, 0x0f, 0x1c, 0x38, 0x1c, 0x00, 0xf0, 0x09, 0xf8, 0x00, 0x28,
+0x02, 0xd0, 0x38, 0x1c, 0x00, 0xf0, 0x84, 0xf8, 0x00, 0x20, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0xf0, 0xb4, 0x07, 0x68, 0x3a, 0x78, 0xd2, 0x07,
+0xd2, 0x0f, 0x00, 0x24, 0x00, 0x2a, 0x03, 0xd0, 0xff, 0x22, 0x01, 0x32,
+0x42, 0x60, 0x00, 0xe0, 0x44, 0x60, 0x3a, 0x7b, 0x7b, 0x7b, 0x1b, 0x02,
+0x1a, 0x43, 0x81, 0x2a, 0x08, 0xd1, 0x01, 0x23, 0x5b, 0x02, 0x42, 0x68,
+0x1a, 0x43, 0x42, 0x60, 0x04, 0x22, 0xbf, 0x18, 0x82, 0x60, 0x00, 0xe0,
+0x84, 0x60, 0x3a, 0x7b, 0x7b, 0x7b, 0x1b, 0x02, 0x1a, 0x43, 0x08, 0x2a,
+0x06, 0xd1, 0x06, 0x23, 0x41, 0x68, 0x19, 0x43, 0x41, 0x60, 0x81, 0x68,
+0x0e, 0x31, 0x3c, 0xe0, 0xc1, 0x23, 0xdb, 0x00, 0x9a, 0x42, 0x03, 0xd1,
+0x41, 0x68, 0x24, 0x4b, 0x19, 0x43, 0x3e, 0xe0, 0x23, 0x4b, 0x9a, 0x42,
+0x04, 0xd1, 0x01, 0x23, 0x1b, 0x03, 0x41, 0x68, 0x19, 0x43, 0x36, 0xe0,
+0x13, 0x02, 0x12, 0x0a, 0x12, 0x06, 0x12, 0x0e, 0x1a, 0x43, 0x12, 0x04,
+0x12, 0x0c, 0x2e, 0x3a, 0x1c, 0x4b, 0x9a, 0x42, 0x2d, 0xd8, 0x01, 0x25,
+0x42, 0x68, 0x15, 0x43, 0x45, 0x60, 0xba, 0x7b, 0xfb, 0x7b, 0x1b, 0x02,
+0x1a, 0x43, 0x18, 0x4b, 0x9a, 0x42, 0x22, 0xd1, 0xfb, 0x1d, 0x09, 0x33,
+0x44, 0xcb, 0x9b, 0x07, 0xdb, 0x0e, 0xda, 0x40, 0x5b, 0x42, 0x20, 0x33,
+0x9e, 0x40, 0x16, 0x43, 0x03, 0x2e, 0x18, 0xd1, 0x39, 0x7d, 0x7b, 0x7d,
+0x1b, 0x02, 0x19, 0x43, 0x08, 0x29, 0x07, 0xd1, 0x04, 0x21, 0x29, 0x43,
+0x41, 0x60, 0x81, 0x68, 0x16, 0x31, 0x81, 0x60, 0x01, 0x21, 0x0a, 0xe0,
+0xc1, 0x23, 0xdb, 0x00, 0x99, 0x42, 0x04, 0xd1, 0x01, 0x21, 0x89, 0x03,
+0x29, 0x43, 0x41, 0x60, 0x00, 0xe0, 0x84, 0x60, 0x00, 0x21, 0x08, 0x1c,
+0xf0, 0xbc, 0x70, 0x47, 0x02, 0x40, 0x00, 0x00, 0x81, 0x80, 0x00, 0x00,
+0xae, 0x05, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0x80, 0xb4, 0x42, 0x68,
+0xd1, 0x08, 0x3f, 0xd3, 0x01, 0x68, 0x83, 0x68, 0x59, 0x18, 0x02, 0x39,
+0x8f, 0x78, 0x3f, 0x07, 0x3f, 0x0f, 0x05, 0x2f, 0x03, 0xd1, 0xda, 0x1d,
+0x0d, 0x32, 0xc2, 0x60, 0x05, 0xe0, 0xbf, 0x00, 0xdb, 0x19, 0xc3, 0x60,
+0x08, 0x23, 0x1a, 0x43, 0x42, 0x60, 0x8a, 0x78, 0x12, 0x07, 0x12, 0x0f,
+0x92, 0x00, 0x02, 0x61, 0x0a, 0x79, 0x4b, 0x79, 0x1b, 0x02, 0x1a, 0x43,
+0x13, 0x02, 0x12, 0x0a, 0x12, 0x06, 0x12, 0x0e, 0x1a, 0x43, 0x12, 0x04,
+0x12, 0x0c, 0x42, 0x61, 0xca, 0x7a, 0x06, 0x2a, 0x03, 0xd1, 0x10, 0x23,
+0x42, 0x68, 0x1a, 0x43, 0x10, 0xe0, 0x11, 0x2a, 0x03, 0xd1, 0x20, 0x23,
+0x42, 0x68, 0x1a, 0x43, 0x0a, 0xe0, 0x33, 0x2a, 0x03, 0xd1, 0x40, 0x23,
+0x42, 0x68, 0x1a, 0x43, 0x04, 0xe0, 0x32, 0x2a, 0x03, 0xd1, 0x80, 0x23,
+0x42, 0x68, 0x1a, 0x43, 0x42, 0x60, 0xc9, 0x7a, 0xc0, 0x46, 0x01, 0x76,
+0x80, 0xbc, 0x70, 0x47, 0x0a, 0x78, 0xc0, 0x46, 0x02, 0x60, 0x4b, 0x78,
+0x1b, 0x02, 0x1a, 0x43, 0x02, 0x60, 0x8b, 0x78, 0x1b, 0x04, 0x1a, 0x43,
+0x02, 0x60, 0xc9, 0x78, 0x09, 0x06, 0x11, 0x43, 0x01, 0x60, 0x70, 0x47,
+0x80, 0xb5, 0x07, 0x1c, 0x48, 0x68, 0x80, 0x09, 0x26, 0xd3, 0xb8, 0x6a,
+0xc9, 0x68, 0x40, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x02, 0x30, 0x18, 0x43,
+0x00, 0x68, 0x00, 0x04, 0x00, 0x0c, 0x11, 0x23, 0x9b, 0x02, 0x98, 0x42,
+0x18, 0xd1, 0x78, 0x6a, 0x39, 0x6b, 0xc0, 0x46,
+0x48, 0x62, 0x38, 0x6b, 0x02, 0xf0, 0xda, 0xf8, 0x38, 0x1c, 0x01, 0xf0,
+0x95, 0xfd, 0x01, 0x20, 0x07, 0x49, 0xc0, 0x46, 0xc8, 0x73, 0x07, 0x49,
+0x4a, 0x6c, 0x12, 0x18, 0x4a, 0x64, 0x06, 0x49, 0x8a, 0x6d, 0x12, 0x18,
+0x8a, 0x65, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x20, 0xfa, 0xe7,
+0x18, 0x1a, 0x00, 0x80, 0x0c, 0x2b, 0x00, 0x80, 0xa4, 0x2a, 0x00, 0x80,
+0x81, 0x07, 0x19, 0xd0, 0x80, 0x08, 0x80, 0x00, 0x01, 0x23, 0x9b, 0x07,
+0x01, 0x1d, 0x18, 0x43, 0x00, 0x68, 0x19, 0x43, 0x09, 0x68, 0x02, 0x02,
+0x12, 0x0e, 0x12, 0x06, 0x00, 0x0a, 0xff, 0x23, 0x1b, 0x04, 0x18, 0x40,
+0x10, 0x43, 0x0a, 0x0a, 0x12, 0x06, 0x12, 0x0e, 0x10, 0x43, 0x09, 0x02,
+0x1b, 0x0a, 0x19, 0x40, 0x08, 0x43, 0x70, 0x47, 0x01, 0x23, 0x9b, 0x07,
+0x18, 0x43, 0x00, 0x68, 0x01, 0x06, 0x02, 0x02, 0xff, 0x23, 0x1b, 0x04,
+0x1a, 0x40, 0x11, 0x43, 0x02, 0x0a, 0x1b, 0x0a, 0x1a, 0x40, 0x11, 0x43,
+0x00, 0x0e, 0x08, 0x43, 0xed, 0xe7, 0x00, 0x00, 0xf0, 0xb5, 0x04, 0x23,
+0x81, 0x6b, 0x19, 0x40, 0x00, 0x22, 0x00, 0x29, 0x46, 0xd0, 0xc7, 0x1d,
+0x39, 0x37, 0x39, 0x7b, 0x33, 0x29, 0x01, 0xd0, 0x32, 0x29, 0x3f, 0xd1,
+0x01, 0x6b, 0xc0, 0x46, 0x4a, 0x65, 0xc4, 0x1d, 0x2d, 0x34, 0xcd, 0x1d,
+0x2d, 0x35, 0x00, 0x22, 0x93, 0x00, 0xe6, 0x58, 0xc0, 0x46, 0xee, 0x50,
+0x01, 0x32, 0x07, 0x2a, 0xf8, 0xd3, 0x82, 0x6a, 0xc0, 0x46, 0x4a, 0x63,
+0x82, 0x6a, 0xc0, 0x46, 0x8a, 0x62, 0x7a, 0x8b, 0xcb, 0x1d, 0x39, 0x33,
+0x5a, 0x83, 0x40, 0x6a, 0xc0, 0x46, 0x48, 0x62, 0x12, 0x48, 0x01, 0x27,
+0x42, 0x68, 0x00, 0x2a, 0x10, 0xd1, 0xc2, 0x68, 0x00, 0x2a, 0x13, 0xd1,
+0x42, 0x69, 0x00, 0x2a, 0x0d, 0xd1, 0x01, 0x61, 0xc1, 0x60, 0x01, 0x6a,
+0x02, 0x29, 0x02, 0xd3, 0x20, 0x30, 0x07, 0x71, 0x0c, 0xe0, 0x00, 0xf0,
+0x13, 0xf8, 0x09, 0xe0, 0xc2, 0x68, 0x00, 0x2a, 0x02, 0xd1, 0x01, 0x61,
+0xc1, 0x60, 0x03, 0xe0, 0x02, 0x69, 0xc0, 0x46, 0x51, 0x65, 0x01, 0x61,
+0x38, 0x1c, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x10, 0x1c, 0xfa, 0xe7,
+0x6c, 0x06, 0x00, 0x80, 0x80, 0xb5, 0x1e, 0x49, 0x00, 0x22, 0xcb, 0x68,
+0x00, 0x2b, 0x34, 0xd0, 0xc8, 0x1d, 0xf9, 0x30, 0x83, 0x62, 0xcb, 0x68,
+0x9b, 0x6a, 0xc0, 0x46, 0xc3, 0x62, 0xcf, 0x69, 0x7b, 0x00, 0xdf, 0x19,
+0x7f, 0x02, 0x17, 0x4b, 0xff, 0x18, 0xff, 0x37, 0x65, 0x37, 0x83, 0x63,
+0x07, 0x63, 0xcb, 0x1d, 0xff, 0x33, 0x5a, 0x33, 0x1a, 0x72, 0xcb, 0x69,
+0x00, 0x2b, 0x01, 0xd0, 0xca, 0x61, 0x01, 0xe0, 0x01, 0x23, 0xcb, 0x61,
+0x0f, 0x1c, 0xc9, 0x68, 0x49, 0x6a, 0x09, 0x89, 0x01, 0x31, 0x41, 0x63,
+0xf8, 0x1d, 0xff, 0x30, 0x3a, 0x30, 0x42, 0x60, 0x02, 0x82, 0x82, 0x60,
+0xc2, 0x60, 0x38, 0x1c, 0x00, 0xf0, 0xce, 0xfa, 0x38, 0x6a, 0x01, 0x30,
+0x38, 0x62, 0x38, 0x1c, 0x00, 0xf0, 0x0a, 0xf8, 0x80, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x10, 0x1c, 0xfa, 0xe7, 0x00, 0x00, 0x6c, 0x06, 0x00, 0x80,
+0xac, 0xab, 0x20, 0x40, 0xf0, 0xb5, 0x07, 0x1c, 0xf9, 0x1d, 0xf9, 0x31,
+0x88, 0x6a, 0xc2, 0x1d, 0x2d, 0x32, 0x01, 0x23, 0x9b, 0x07, 0x08, 0x32,
+0x1a, 0x43, 0xc8, 0x6a, 0x12, 0x68, 0x12, 0x04, 0x12, 0x0c, 0x80, 0x18,
+0x82, 0x79, 0xc3, 0x79, 0x1b, 0x02, 0x1a, 0x43, 0x13, 0x02, 0x12, 0x0a,
+0x12, 0x06, 0x12, 0x0e, 0x1a, 0x43, 0x12, 0x04, 0x12, 0x0c, 0x02, 0x38,
+0x92, 0x04, 0x92, 0x0c, 0x00, 0x26, 0x25, 0x4d,
+0xec, 0x1d, 0xff, 0x34, 0x3a, 0x34, 0x00, 0x2a, 0x04, 0xd0, 0x20, 0x8a,
+0x01, 0x23, 0x9b, 0x02, 0x18, 0x43, 0x2b, 0xe0, 0x01, 0x23, 0x9b, 0x07,
+0xc2, 0x1d, 0x0d, 0x32, 0x1a, 0x43, 0x12, 0x68, 0x12, 0x04, 0x12, 0x30,
+0x18, 0x43, 0x00, 0x68, 0x00, 0x04, 0x00, 0x0c, 0x10, 0x43, 0x03, 0x1c,
+0xf8, 0x1d, 0xff, 0x30, 0x4a, 0x30, 0x82, 0x78, 0xc8, 0x6b, 0x19, 0x1c,
+0x02, 0xf0, 0x02, 0xf8, 0x00, 0x28, 0x04, 0xda, 0x20, 0x8a, 0xff, 0x23,
+0x01, 0x33, 0x18, 0x43, 0x0e, 0xe0, 0xf9, 0x1d, 0xff, 0x31, 0x3a, 0x31,
+0x08, 0x60, 0x01, 0x04, 0x09, 0x0c, 0x38, 0x1c, 0x00, 0xf0, 0x1c, 0xf8,
+0x00, 0x28, 0x14, 0xd1, 0x20, 0x8a, 0x01, 0x23, 0x5b, 0x02, 0x18, 0x43,
+0x20, 0x82, 0x21, 0x8a, 0x38, 0x1c, 0x00, 0xf0, 0xa2, 0xfb, 0xe8, 0x68,
+0x01, 0x23, 0x9b, 0x07, 0x54, 0x30, 0x18, 0x43, 0x00, 0x68, 0xc0, 0x46,
+0xe8, 0x60, 0x30, 0x1c, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x01, 0x20,
+0xfa, 0xe7, 0x00, 0x00, 0x6c, 0x06, 0x00, 0x80, 0xf8, 0xb5, 0x07, 0x1c,
+0xfc, 0x1d, 0xf9, 0x34, 0xa0, 0x6b, 0xa6, 0x6a, 0xc5, 0x1d, 0x0d, 0x35,
+0x38, 0x48, 0xc0, 0x6a, 0x4b, 0x00, 0x59, 0x18, 0x49, 0x01, 0x42, 0x18,
+0x01, 0x20, 0x80, 0x07, 0x10, 0x43, 0x00, 0x68, 0x00, 0x04, 0x00, 0x0c,
+0x00, 0x90, 0x01, 0x23, 0x9b, 0x07, 0xd0, 0x1d, 0x05, 0x30, 0x18, 0x43,
+0x00, 0x68, 0x38, 0x1c, 0x29, 0x1c, 0x00, 0xf0, 0xc2, 0xfa, 0xa8, 0x88,
+0x41, 0x07, 0x01, 0xd0, 0x00, 0x20, 0x51, 0xe0, 0x29, 0x89, 0x09, 0x18,
+0x60, 0x6b, 0x81, 0x42, 0xf8, 0xd8, 0x69, 0x89, 0xea, 0x88, 0x89, 0x18,
+0x81, 0x42, 0xf3, 0xd8, 0x00, 0x98, 0x01, 0x28, 0x25, 0xd1, 0xe0, 0x6a,
+0xf1, 0x6b, 0x40, 0x18, 0x71, 0x6c, 0xfa, 0x1d, 0xcd, 0x32, 0x01, 0xf0,
+0x33, 0xf9, 0xfa, 0x1d, 0xff, 0x32, 0x3a, 0x32, 0xe0, 0x6a, 0x51, 0x69,
+0x40, 0x18, 0xc3, 0x1d, 0x03, 0x33, 0x00, 0x20, 0x81, 0x00, 0x5e, 0x58,
+0xc9, 0x19, 0xff, 0x31, 0x01, 0x31, 0x4e, 0x61, 0x01, 0x30, 0x04, 0x28,
+0xf6, 0xd3, 0xe0, 0x6a, 0x51, 0x69, 0x40, 0x18, 0xc1, 0x1d, 0x05, 0x31,
+0x00, 0x20, 0x00, 0x22, 0x43, 0x00, 0xca, 0x52, 0x01, 0x30, 0x06, 0x28,
+0xfa, 0xd3, 0x29, 0x1c, 0x11, 0x4a, 0x00, 0x20, 0xff, 0xf7, 0xae, 0xfb,
+0x01, 0x22, 0x52, 0x04, 0x60, 0x6b, 0x02, 0x43, 0x01, 0x20, 0x21, 0x6b,
+0xff, 0xf7, 0xa6, 0xfb, 0x01, 0x22, 0x52, 0x04, 0x60, 0x6b, 0x02, 0x43,
+0x00, 0x20, 0xe1, 0x6a, 0xff, 0xf7, 0x9e, 0xfb, 0xa1, 0x6b, 0x08, 0x4a,
+0x01, 0x20, 0xff, 0xf7, 0x99, 0xfb, 0x03, 0x20, 0x06, 0x49, 0xc0, 0x46,
+0x48, 0x62, 0x01, 0x20, 0xf8, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x4c, 0x2a, 0x00, 0x80, 0x54, 0x00, 0x03, 0x00, 0x14, 0x00, 0x0f, 0x00,
+0x6c, 0x07, 0x00, 0x80, 0xf0, 0xb5, 0x8d, 0xb0, 0x00, 0x20, 0xb5, 0x4a,
+0xd5, 0x1d, 0xf9, 0x35, 0x68, 0x62, 0x01, 0x20, 0x00, 0x05, 0xb3, 0x49,
+0xc0, 0x46, 0x08, 0x60, 0xa8, 0x6a, 0xc4, 0x1d, 0x2d, 0x34, 0xb1, 0x48,
+0xc0, 0x6a, 0xd7, 0x1d, 0xff, 0x37, 0x3a, 0x37, 0x39, 0x68, 0x4b, 0x00,
+0x59, 0x18, 0x49, 0x01, 0x40, 0x18, 0x01, 0x23, 0x9b, 0x07, 0xc1, 0x1d,
+0x05, 0x31, 0x19, 0x43, 0x09, 0x68, 0x08, 0x30, 0x18, 0x43, 0x00, 0x68,
+0xc0, 0x46, 0x09, 0x90, 0xff, 0x23, 0x1b, 0x02, 0x18, 0x40, 0x00, 0x0a,
+0x0a, 0x90, 0x0a, 0x98, 0xa4, 0x4e, 0x01, 0x28, 0x59, 0xd1, 0x28, 0x6b,
+0xa2, 0x68, 0x80, 0x18, 0xa2, 0x4a, 0x21, 0x69,
+0x09, 0x04, 0x09, 0x0c, 0x01, 0xf0, 0x26, 0xf9, 0x28, 0x6b, 0x79, 0x69,
+0x40, 0x18, 0xc1, 0x1d, 0x05, 0x31, 0x00, 0x20, 0x82, 0x00, 0x98, 0x4b,
+0xd3, 0x18, 0xff, 0x33, 0x01, 0x33, 0x5b, 0x69, 0xc0, 0x46, 0x8b, 0x50,
+0x01, 0x30, 0x04, 0x28, 0xf4, 0xd3, 0x00, 0x20, 0x31, 0x1c, 0x82, 0x00,
+0x56, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x33, 0x43, 0x1b, 0x68, 0x04, 0xae,
+0xb3, 0x50, 0x01, 0x30, 0x03, 0x28, 0xf4, 0xd3, 0x00, 0x20, 0x08, 0x90,
+0x90, 0x49, 0x42, 0x00, 0x8b, 0x5a, 0xb2, 0x5a, 0x93, 0x42, 0x13, 0xd0,
+0x8e, 0x48, 0xc1, 0x89, 0x01, 0x31, 0xc1, 0x81, 0xb8, 0x68, 0x00, 0x28,
+0x03, 0xd1, 0x38, 0x8a, 0x10, 0x23, 0x18, 0x43, 0x71, 0xe0, 0x38, 0x8a,
+0x40, 0x23, 0x18, 0x43, 0x6d, 0xe0, 0x00, 0xf0, 0x11, 0xf9, 0x01, 0xf0,
+0x67, 0xff, 0xf5, 0xe0, 0x01, 0x30, 0x06, 0x28, 0xe3, 0xd3, 0x08, 0x98,
+0x00, 0x28, 0x0c, 0xd1, 0xb8, 0x68, 0x41, 0x1c, 0xb9, 0x60, 0x00, 0x28,
+0x03, 0xd1, 0x38, 0x8a, 0x01, 0x23, 0x18, 0x43, 0x02, 0xe0, 0x38, 0x8a,
+0x04, 0x23, 0x18, 0x43, 0x38, 0x82, 0x78, 0x68, 0x01, 0x30, 0x78, 0x60,
+0x62, 0xe0, 0x0a, 0x98, 0x02, 0x28, 0x5f, 0xd1, 0x09, 0x98, 0x40, 0x0c,
+0x73, 0xd3, 0x01, 0x23, 0x9b, 0x07, 0xe0, 0x1d, 0x01, 0x30, 0x18, 0x43,
+0x00, 0x68, 0xe1, 0x1d, 0x0d, 0x31, 0x19, 0x43, 0x09, 0x68, 0x40, 0x18,
+0x0c, 0x38, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x21, 0x8a, 0x00, 0x6b, 0x4b,
+0xd6, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x33, 0x43, 0x1b, 0x68, 0x04, 0xae,
+0xb3, 0x50, 0x01, 0x31, 0x03, 0x29, 0xf3, 0xd3, 0x00, 0x21, 0x83, 0x1e,
+0x0c, 0x93, 0x68, 0x4a, 0x16, 0x6b, 0xc0, 0x46, 0x0b, 0x96, 0x8a, 0x00,
+0x0c, 0x9b, 0x9b, 0x18, 0x0b, 0x9e, 0x9e, 0x19, 0x01, 0x23, 0x9b, 0x07,
+0x33, 0x43, 0x1b, 0x68, 0x6e, 0x46, 0xb3, 0x50, 0x01, 0x31, 0x04, 0x29,
+0xf1, 0xd3, 0x69, 0x46, 0x8b, 0x1c, 0x07, 0x93, 0x00, 0x21, 0x08, 0x91,
+0x04, 0xae, 0x4a, 0x00, 0x07, 0x9b, 0x9b, 0x5a, 0xb2, 0x5a, 0x93, 0x42,
+0x11, 0xd0, 0x58, 0x48, 0xc1, 0x89, 0x01, 0x31, 0xc1, 0x81, 0xf8, 0x68,
+0x41, 0x1c, 0xf9, 0x60, 0x00, 0x28, 0x03, 0xd1, 0x38, 0x8a, 0x20, 0x23,
+0x18, 0x43, 0x02, 0xe0, 0x38, 0x8a, 0x80, 0x23, 0x18, 0x43, 0x38, 0x82,
+0x8f, 0xe7, 0x01, 0x31, 0x06, 0x29, 0xe4, 0xd3, 0x08, 0x99, 0x00, 0x29,
+0x0d, 0xd1, 0xf9, 0x68, 0x4a, 0x1c, 0xfa, 0x60, 0x00, 0x29, 0x04, 0xd1,
+0x39, 0x8a, 0x02, 0x23, 0x19, 0x43, 0x03, 0xe0, 0x0c, 0xe0, 0x39, 0x8a,
+0x08, 0x23, 0x19, 0x43, 0x39, 0x82, 0x29, 0x6b, 0x08, 0x18, 0x01, 0x23,
+0x9b, 0x07, 0x01, 0x38, 0x18, 0x43, 0x00, 0x68, 0xc0, 0x46, 0x20, 0x76,
+0x01, 0x23, 0x9b, 0x07, 0xe0, 0x1d, 0x11, 0x30, 0x18, 0x43, 0x00, 0x68,
+0x01, 0x06, 0x09, 0x0e, 0x00, 0xe0, 0x19, 0xe0, 0x35, 0x48, 0x2a, 0x6b,
+0xc0, 0x46, 0xea, 0x62, 0x04, 0x29, 0x4f, 0xd1, 0x01, 0x21, 0xc6, 0x1d,
+0xff, 0x36, 0x5a, 0x36, 0x31, 0x72, 0x0a, 0x99, 0x02, 0x29, 0x1e, 0xd1,
+0x09, 0x99, 0x09, 0x0e, 0x49, 0x06, 0x1a, 0xd1, 0xe1, 0x1d, 0x05, 0x31,
+0x19, 0x43, 0x09, 0x68, 0x09, 0x06, 0x09, 0x0e, 0x08, 0x39, 0x1a, 0xe0,
+0x01, 0x23, 0x9b, 0x07, 0xe0, 0x1d, 0x01, 0x30, 0x18, 0x43, 0x00, 0x68,
+0xe1, 0x1d, 0x0d, 0x31, 0x19, 0x43, 0x09, 0x68, 0x40, 0x18, 0x00, 0x04,
+0x00, 0x0c, 0xf9, 0x68, 0x4a, 0x1c, 0xfa, 0x60, 0x00, 0x29, 0xbc, 0xd1,
+0xb6, 0xe7, 0x01, 0x23, 0x9b, 0x07, 0xe1, 0x1d,
+0x05, 0x31, 0x19, 0x43, 0x09, 0x68, 0x09, 0x06, 0x09, 0x0e, 0xa1, 0x60,
+0xe8, 0x6a, 0xc0, 0x46, 0x20, 0x60, 0x20, 0x1c, 0xff, 0xf7, 0x88, 0xfc,
+0x20, 0x7e, 0x33, 0x28, 0x01, 0xd0, 0x32, 0x28, 0x11, 0xd1, 0x01, 0x21,
+0x14, 0x4c, 0xc0, 0x46, 0xf9, 0x60, 0xb9, 0x60, 0x20, 0x1c, 0x00, 0xf0,
+0x85, 0xf8, 0x28, 0x6b, 0xa9, 0x6a, 0xc0, 0x46, 0x88, 0x62, 0x20, 0x1c,
+0xff, 0xf7, 0xc0, 0xfd, 0x00, 0x28, 0x11, 0xd1, 0x0e, 0xe0, 0x00, 0x20,
+0x30, 0x72, 0x11, 0xe0, 0x33, 0x29, 0x01, 0xd0, 0x32, 0x29, 0x0d, 0xd1,
+0x07, 0x1c, 0x00, 0xf0, 0x71, 0xf8, 0x38, 0x1c, 0xff, 0xf7, 0xb0, 0xfd,
+0x00, 0x28, 0x01, 0xd1, 0x01, 0xf0, 0x70, 0xfe, 0x0d, 0xb0, 0xf0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0xf0, 0x12, 0xf8, 0xf6, 0xe7, 0x00, 0x00,
+0x6c, 0x06, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0x4c, 0x2a, 0x00, 0x80,
+0xac, 0xab, 0x20, 0x40, 0x40, 0x07, 0x00, 0x80, 0x82, 0x07, 0x00, 0x80,
+0x0c, 0x2b, 0x00, 0x80, 0x6c, 0x07, 0x00, 0x80, 0xf0, 0xb5, 0x25, 0x48,
+0x41, 0x68, 0x01, 0x31, 0x41, 0x60, 0x24, 0x4f, 0xf9, 0x1d, 0xf9, 0x31,
+0x00, 0x24, 0x88, 0x6a, 0xfa, 0x68, 0xc0, 0x46, 0x94, 0x61, 0x04, 0x22,
+0xfb, 0x68, 0xc0, 0x46, 0xda, 0x60, 0x10, 0x22, 0xfb, 0x68, 0xc0, 0x46,
+0x9a, 0x61, 0xfa, 0x1d, 0xff, 0x32, 0x5a, 0x32, 0x13, 0x7a, 0x1b, 0x4a,
+0x00, 0x2b, 0x0b, 0xd0, 0x15, 0x8a, 0x2e, 0x0a, 0x36, 0x02, 0x33, 0x23,
+0x2b, 0x40, 0x9b, 0x00, 0x1e, 0x43, 0xcc, 0x23, 0x2b, 0x40, 0x9b, 0x08,
+0x33, 0x43, 0x13, 0x82, 0x12, 0x8a, 0xfb, 0x68, 0xc0, 0x46, 0xda, 0x83,
+0x4a, 0x6b, 0xfb, 0x68, 0xc0, 0x46, 0xda, 0x81, 0x0a, 0x6b, 0xc0, 0x46,
+0x82, 0x62, 0xc4, 0x62, 0xc3, 0x1d, 0x39, 0x33, 0x4a, 0x6b, 0xc0, 0x46,
+0x5a, 0x83, 0x04, 0x23, 0x02, 0x68, 0x1a, 0x43, 0x02, 0x60, 0x88, 0x6a,
+0x01, 0xf0, 0x32, 0xfa, 0xf8, 0x68, 0x01, 0x23, 0x9b, 0x07, 0x54, 0x30,
+0x18, 0x43, 0x00, 0x68, 0xc0, 0x46, 0xf8, 0x60, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x0c, 0x2b, 0x00, 0x80, 0x6c, 0x06, 0x00, 0x80,
+0xac, 0x07, 0x00, 0x80, 0x80, 0xb5, 0xc1, 0x1d, 0xf9, 0x31, 0x8a, 0x6a,
+0x01, 0x23, 0x9b, 0x07, 0xd1, 0x1d, 0x45, 0x31, 0x19, 0x43, 0x09, 0x68,
+0x0b, 0x06, 0x1b, 0x0e, 0x01, 0x27, 0xc1, 0x1d, 0xff, 0x31, 0x4a, 0x31,
+0x33, 0x2b, 0x05, 0xd1, 0x8b, 0x70, 0x01, 0x1c, 0x10, 0x1c, 0x00, 0xf0,
+0x0f, 0xf8, 0x06, 0xe0, 0x32, 0x2b, 0x08, 0xd1, 0x8b, 0x70, 0x01, 0x1c,
+0x10, 0x1c, 0x00, 0xf0, 0x3c, 0xf8, 0x38, 0x1c, 0x80, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x20, 0x88, 0x70, 0xf9, 0xe7, 0x90, 0xb4, 0xca, 0x1d,
+0xf9, 0x32, 0x33, 0x27, 0xcc, 0x1d, 0xff, 0x34, 0x4a, 0x34, 0xd3, 0x6a,
+0xc0, 0x46, 0xa7, 0x70, 0xff, 0x31, 0x41, 0x31, 0x07, 0x6c, 0xc0, 0x46,
+0x4f, 0x61, 0xfb, 0x18, 0x39, 0x1c, 0x9f, 0x1e, 0x01, 0x23, 0x9b, 0x07,
+0xfc, 0x1c, 0x23, 0x43, 0x1b, 0x68, 0x1b, 0x06, 0x1b, 0x0e, 0x9b, 0x00,
+0x1b, 0x04, 0x1b, 0x0c, 0xc9, 0x18, 0x08, 0x31, 0x01, 0x64, 0x01, 0x23,
+0x9b, 0x07, 0xb9, 0x1c, 0x19, 0x43, 0x09, 0x68, 0x34, 0x30, 0x01, 0x76,
+0xf8, 0x1d, 0x01, 0x30, 0x18, 0x43, 0x00, 0x68, 0x00, 0x04, 0xb9, 0x1d,
+0x19, 0x43, 0xd0, 0x63, 0x09, 0x68, 0x09, 0x04, 0x09, 0x0c, 0x08, 0x43,
+0xd0, 0x63, 0x90, 0xbc, 0x70, 0x47, 0xb0, 0xb5, 0xca, 0x1d, 0xf9, 0x32,
+0xc5, 0x1d, 0x2d, 0x35, 0x32, 0x20, 0xcf, 0x1d,
+0xff, 0x37, 0x4a, 0x37, 0xd3, 0x6a, 0xc0, 0x46, 0xb8, 0x70, 0xcc, 0x1d,
+0xff, 0x34, 0x3a, 0x34, 0xe8, 0x68, 0xc0, 0x46, 0x60, 0x61, 0x10, 0x30,
+0xe8, 0x60, 0x60, 0x69, 0xc0, 0x18, 0x87, 0x1e, 0x01, 0x23, 0x9b, 0x07,
+0x38, 0x1d, 0x18, 0x43, 0x00, 0x68, 0x00, 0x04, 0xb9, 0x1c, 0x19, 0x43,
+0xd0, 0x63, 0x09, 0x68, 0x09, 0x04, 0x09, 0x0c, 0x08, 0x43, 0xd0, 0x63,
+0xf8, 0x1d, 0x03, 0x30, 0xff, 0xf7, 0xfc, 0xfb, 0x20, 0x62, 0xf8, 0x1d,
+0x07, 0x30, 0xff, 0xf7, 0xf7, 0xfb, 0x60, 0x62, 0x00, 0x20, 0x28, 0x76,
+0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xf7, 0xb5, 0x81, 0xb0, 0x01, 0x98,
+0xc7, 0x1d, 0xf9, 0x37, 0xb8, 0x6a, 0x01, 0x23, 0x9b, 0x07, 0xd4, 0x1d,
+0x05, 0x34, 0x23, 0x43, 0x1c, 0x68, 0xff, 0x23, 0xfe, 0x33, 0x23, 0x40,
+0x7f, 0x6b, 0x3f, 0x04, 0x3b, 0x43, 0x0b, 0x60, 0x34, 0x30, 0x1c, 0x1c,
+0x80, 0x23, 0x23, 0x40, 0x01, 0x9f, 0xff, 0x37, 0x41, 0x37, 0x00, 0x2b,
+0x3c, 0xd0, 0x0c, 0x23, 0x00, 0x93, 0x00, 0x23, 0x9d, 0x00, 0xae, 0x18,
+0x36, 0x69, 0x6d, 0x18, 0x6e, 0x61, 0x01, 0x33, 0x05, 0x2b, 0xf7, 0xd3,
+0x00, 0x23, 0x9d, 0x00, 0xae, 0x18, 0x76, 0x6a, 0x6d, 0x18, 0xae, 0x62,
+0x01, 0x33, 0x05, 0x2b, 0xf7, 0xd3, 0x01, 0x9b, 0xff, 0x33, 0x51, 0x33,
+0x9b, 0x78, 0x33, 0x2b, 0x0e, 0xd1, 0x01, 0x23, 0x9b, 0x07, 0xc5, 0x1d,
+0x01, 0x35, 0x2b, 0x43, 0x1b, 0x68, 0xc0, 0x46, 0x4b, 0x81, 0x01, 0x23,
+0x9b, 0x07, 0xc5, 0x1d, 0x0d, 0x35, 0x2b, 0x43, 0x1b, 0x68, 0x16, 0xe0,
+0x7b, 0x69, 0xc0, 0x46, 0x4b, 0x81, 0x01, 0x23, 0x9b, 0x07, 0xc5, 0x1d,
+0x0d, 0x35, 0x2b, 0x43, 0x1b, 0x68, 0x7d, 0x69, 0x5d, 0x1b, 0x01, 0x23,
+0x9b, 0x07, 0xc6, 0x1d, 0x01, 0x36, 0x33, 0x43, 0x1b, 0x68, 0xeb, 0x18,
+0x0c, 0x3b, 0x02, 0xe0, 0x00, 0x23, 0x00, 0x93, 0x4b, 0x81, 0xcb, 0x80,
+0x63, 0x09, 0x49, 0xd3, 0x01, 0x23, 0x9b, 0x07, 0xc4, 0x1d, 0x05, 0x34,
+0x23, 0x43, 0x1b, 0x68, 0xc0, 0x46, 0x0b, 0x81, 0x01, 0x23, 0x9b, 0x07,
+0xc4, 0x1d, 0x0d, 0x34, 0x23, 0x43, 0x1b, 0x68, 0x0c, 0x89, 0x1b, 0x1b,
+0x00, 0x9c, 0x1c, 0x1b, 0x01, 0x23, 0x9b, 0x07, 0x08, 0x30, 0x18, 0x43,
+0x00, 0x68, 0x20, 0x18, 0x88, 0x80, 0x38, 0x6a, 0x04, 0x0e, 0xff, 0x23,
+0x1b, 0x04, 0x03, 0x40, 0x1b, 0x0a, 0x1c, 0x43, 0xff, 0x23, 0x1b, 0x02,
+0x03, 0x40, 0x1b, 0x02, 0x23, 0x43, 0x00, 0x06, 0x18, 0x43, 0xc8, 0x60,
+0x78, 0x6a, 0x07, 0x0e, 0xff, 0x23, 0x1b, 0x04, 0x03, 0x40, 0x1b, 0x0a,
+0x1f, 0x43, 0xff, 0x23, 0x1b, 0x02, 0x03, 0x40, 0x1b, 0x02, 0x3b, 0x43,
+0x00, 0x06, 0x18, 0x43, 0x08, 0x61, 0xd0, 0x6b, 0xc0, 0x46, 0xc8, 0x63,
+0x90, 0x6b, 0xc0, 0x46, 0x08, 0x64, 0x50, 0x6c, 0xc0, 0x46, 0x48, 0x64,
+0x10, 0x6c, 0xc0, 0x46, 0x88, 0x64, 0xd0, 0x6c, 0xc0, 0x46, 0xc8, 0x64,
+0x90, 0x6c, 0xc0, 0x46, 0x08, 0x65, 0x02, 0xe0, 0x00, 0x23, 0x0b, 0x81,
+0x8b, 0x80, 0x04, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5,
+0x0f, 0x4a, 0x93, 0x89, 0x01, 0x33, 0x93, 0x81, 0xc2, 0x1d, 0xf9, 0x32,
+0x04, 0x23, 0x90, 0x6a, 0xc0, 0x46, 0xc3, 0x60, 0x10, 0x23, 0x83, 0x61,
+0xcb, 0x0a, 0x01, 0xd3, 0x18, 0x23, 0x83, 0x61, 0xc1, 0x83, 0x51, 0x6b,
+0xc0, 0x46, 0xc1, 0x81, 0x51, 0x6b, 0xc2, 0x1d, 0x39, 0x32, 0x51, 0x83,
+0x04, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0x01, 0xf0, 0xc2, 0xf8,
+0x08, 0xbc, 0x18, 0x47, 0x0c, 0x2b, 0x00, 0x80,
+0xb0, 0xb5, 0x1b, 0x4c, 0x20, 0x6a, 0x02, 0x28, 0x1b, 0xd2, 0x00, 0x20,
+0xe7, 0x1d, 0x19, 0x37, 0x38, 0x71, 0xe1, 0x68, 0xe0, 0x1d, 0xf9, 0x30,
+0x00, 0x29, 0x15, 0xd0, 0x42, 0x6a, 0x00, 0x2a, 0x12, 0xd1, 0x01, 0x25,
+0x0a, 0xe0, 0xff, 0xf7, 0x89, 0xfb, 0x00, 0x28, 0x09, 0xd1, 0x20, 0x6a,
+0x02, 0x28, 0x00, 0xd3, 0x3d, 0x71, 0xe0, 0x68, 0x00, 0x28, 0x02, 0xd0,
+0x38, 0x79, 0x00, 0x28, 0xf1, 0xd0, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x40, 0x6a, 0x00, 0x28, 0xf9, 0xd1, 0x00, 0x29, 0xf7, 0xd1, 0x60, 0x69,
+0x00, 0x28, 0x04, 0xd0, 0x06, 0x48, 0x00, 0x68, 0x03, 0xf0, 0xa8, 0xfc,
+0xef, 0xe7, 0x60, 0x68, 0x00, 0x28, 0xec, 0xd0, 0x00, 0xf0, 0x5a, 0xf8,
+0xe9, 0xe7, 0x00, 0x00, 0x6c, 0x06, 0x00, 0x80, 0x34, 0x04, 0x00, 0x80,
+0xb0, 0xb5, 0x07, 0x1c, 0x20, 0x23, 0xb8, 0x68, 0x18, 0x40, 0x01, 0x24,
+0x00, 0x25, 0x00, 0x28, 0x0b, 0xd1, 0x38, 0x6a, 0x00, 0x28, 0x03, 0xd1,
+0x28, 0x1c, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x1f, 0x48, 0x01, 0x6e,
+0x01, 0x31, 0x01, 0x66, 0x03, 0xe0, 0x48, 0x68, 0xc4, 0x23, 0x18, 0x40,
+0x03, 0xd1, 0x38, 0x6a, 0x00, 0xf0, 0x0c, 0xfc, 0x2f, 0xe0, 0x38, 0x1c,
+0x00, 0xf0, 0x1c, 0xfc, 0x38, 0x1c, 0x00, 0xf0, 0x7b, 0xfa, 0xb8, 0x68,
+0xc0, 0x08, 0x02, 0xd3, 0x38, 0x6a, 0x00, 0xf0, 0xd1, 0xfb, 0xb8, 0x68,
+0x39, 0x6a, 0xc0, 0x46, 0x88, 0x60, 0x38, 0x6a, 0xc0, 0x46, 0xc5, 0x60,
+0x10, 0x48, 0x41, 0x68, 0x00, 0x29, 0x11, 0xd1, 0xc1, 0x68, 0x00, 0x29,
+0x09, 0xd1, 0x41, 0x69, 0x00, 0x29, 0x06, 0xd1, 0x39, 0x6a, 0xc0, 0x46,
+0x81, 0x60, 0x41, 0x60, 0x00, 0xf0, 0x14, 0xf8, 0x0b, 0xe0, 0x39, 0x6a,
+0xc0, 0x46, 0x81, 0x60, 0x41, 0x60, 0x06, 0xe0, 0x39, 0x6a, 0x82, 0x68,
+0xc0, 0x46, 0xd1, 0x60, 0x39, 0x6a, 0xc0, 0x46, 0x81, 0x60, 0x20, 0x1c,
+0xbd, 0xe7, 0x00, 0x00, 0xa4, 0x2a, 0x00, 0x80, 0x6c, 0x06, 0x00, 0x80,
+0x90, 0xb5, 0x0b, 0x4c, 0x67, 0x68, 0x00, 0x2f, 0x0f, 0xd0, 0x38, 0x1c,
+0x00, 0xf0, 0x12, 0xf8, 0x00, 0x28, 0x0a, 0xd1, 0x60, 0x68, 0xc0, 0x68,
+0xc0, 0x46, 0x60, 0x60, 0x38, 0x1c, 0x00, 0xf0, 0xc3, 0xfb, 0x00, 0x20,
+0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x01, 0x20, 0xfa, 0xe7, 0x00, 0x00,
+0x6c, 0x06, 0x00, 0x80, 0xf0, 0xb5, 0x07, 0x1c, 0xfe, 0x1d, 0x49, 0x36,
+0x30, 0x78, 0x40, 0x00, 0xc0, 0x19, 0x85, 0x8b, 0x33, 0x4c, 0x34, 0x4b,
+0x9d, 0x42, 0x3c, 0xd0, 0x38, 0x1c, 0x21, 0x1c, 0x2a, 0x1c, 0x00, 0xf0,
+0x1d, 0xf9, 0x31, 0x48, 0x80, 0x6a, 0x58, 0x21, 0x69, 0x43, 0x40, 0x18,
+0x01, 0x23, 0x9b, 0x07, 0x18, 0x43, 0x00, 0x68, 0x00, 0x04, 0x00, 0x0c,
+0x2c, 0x4d, 0x01, 0x28, 0x1a, 0xd1, 0x30, 0x78, 0xc0, 0x19, 0xc1, 0x1d,
+0x19, 0x31, 0x08, 0x7a, 0x3a, 0x68, 0x80, 0x18, 0x09, 0x7b, 0xea, 0x1d,
+0x21, 0x32, 0x00, 0xf0, 0xe3, 0xfc, 0x30, 0x78, 0xc0, 0x19, 0x20, 0x30,
+0x00, 0x79, 0x39, 0x68, 0x40, 0x18, 0xc1, 0x1d, 0x05, 0x31, 0x00, 0x20,
+0x00, 0x23, 0x42, 0x00, 0x8b, 0x52, 0x01, 0x30, 0x06, 0x28, 0xfa, 0xd3,
+0xa0, 0x88, 0x41, 0x07, 0x0b, 0xd1, 0x21, 0x89, 0x09, 0x18, 0x78, 0x68,
+0x00, 0x04, 0x00, 0x0c, 0x81, 0x42, 0x04, 0xd8, 0x61, 0x89, 0xe2, 0x88,
+0x89, 0x18, 0x81, 0x42, 0x03, 0xd9, 0x00, 0x20, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x21, 0x1c, 0x14, 0x4a, 0x00, 0x20, 0xfe, 0xf7, 0x5a, 0xff,
+0x01, 0x22, 0x52, 0x04, 0x78, 0x68, 0x02, 0x43,
+0x01, 0x20, 0x39, 0x68, 0xfe, 0xf7, 0x52, 0xff, 0x01, 0x22, 0x52, 0x04,
+0x78, 0x68, 0x02, 0x43, 0x00, 0x20, 0x39, 0x68, 0xfe, 0xf7, 0x4a, 0xff,
+0x0b, 0x49, 0x0c, 0x4a, 0x01, 0x20, 0xfe, 0xf7, 0x45, 0xff, 0x01, 0x20,
+0xe9, 0x1d, 0x19, 0x31, 0x48, 0x71, 0x02, 0x21, 0xea, 0x1d, 0xf9, 0x32,
+0x51, 0x62, 0xd9, 0xe7, 0x28, 0xac, 0x20, 0x40, 0xff, 0xff, 0x00, 0x00,
+0x4c, 0x2a, 0x00, 0x80, 0x6c, 0x06, 0x00, 0x80, 0x54, 0x00, 0x03, 0x00,
+0x14, 0xac, 0x20, 0x40, 0x14, 0x00, 0x07, 0x00, 0xf0, 0xb5, 0x83, 0xb0,
+0x00, 0x21, 0x4f, 0x48, 0xc2, 0x1d, 0xf9, 0x32, 0x51, 0x62, 0x01, 0x21,
+0xc9, 0x04, 0x4d, 0x4a, 0xc0, 0x46, 0x11, 0x60, 0xc1, 0x1d, 0x19, 0x31,
+0x49, 0x79, 0x00, 0x29, 0x04, 0xd1, 0x4a, 0x48, 0x00, 0x68, 0x03, 0xf0,
+0x9b, 0xfb, 0x87, 0xe0, 0x45, 0x48, 0x47, 0x68, 0xfc, 0x1d, 0x49, 0x34,
+0x21, 0x78, 0x48, 0x00, 0xc0, 0x19, 0x80, 0x8b, 0x44, 0x4a, 0x92, 0x6a,
+0x58, 0x23, 0x58, 0x43, 0x15, 0x18, 0x01, 0x23, 0x9b, 0x07, 0xea, 0x1d,
+0x05, 0x32, 0x1a, 0x43, 0x12, 0x68, 0x08, 0x35, 0x2b, 0x43, 0x1d, 0x68,
+0xff, 0x23, 0x1b, 0x02, 0x2b, 0x40, 0x1b, 0x0a, 0x3c, 0x4d, 0x01, 0x2b,
+0x24, 0xd1, 0xc8, 0x19, 0xc1, 0x1d, 0x19, 0x31, 0x08, 0x7a, 0x3a, 0x68,
+0x80, 0x18, 0x39, 0x4a, 0x09, 0x7b, 0x00, 0xf0, 0xc5, 0xfc, 0x20, 0x78,
+0xc0, 0x19, 0x20, 0x30, 0x00, 0x79, 0x39, 0x68, 0x41, 0x18, 0x00, 0x20,
+0x82, 0x00, 0x53, 0x19, 0x9b, 0x6e, 0x6e, 0x46, 0xb3, 0x50, 0x01, 0x30,
+0x03, 0x28, 0xf7, 0xd3, 0xca, 0x1d, 0x05, 0x32, 0x69, 0x46, 0x00, 0x20,
+0x43, 0x00, 0xcd, 0x5a, 0xc0, 0x46, 0xd5, 0x52, 0x01, 0x30, 0x06, 0x28,
+0xf8, 0xd3, 0x2d, 0xe0, 0x02, 0x2b, 0x2b, 0xd1, 0x11, 0x0a, 0x29, 0xd3,
+0x00, 0x21, 0x8a, 0x00, 0x53, 0x19, 0x9b, 0x6e, 0x6e, 0x46, 0xb3, 0x50,
+0x01, 0x31, 0x03, 0x29, 0xf7, 0xd3, 0x21, 0x78, 0x49, 0x00, 0xc9, 0x19,
+0x09, 0x8f, 0x3a, 0x68, 0x8b, 0x18, 0x6a, 0x46, 0x00, 0x21, 0x4d, 0x00,
+0x56, 0x5b, 0xc0, 0x46, 0x5e, 0x53, 0x01, 0x31, 0x06, 0x29, 0xf8, 0xd3,
+0x19, 0x49, 0x8a, 0x6a, 0x13, 0x18, 0x1a, 0x6d, 0x00, 0x9d, 0x55, 0x40,
+0x19, 0x4a, 0xd6, 0x68, 0x75, 0x40, 0x1d, 0x65, 0x89, 0x6a, 0x08, 0x18,
+0x41, 0x6d, 0x02, 0x9b, 0x59, 0x40, 0x92, 0x69, 0x51, 0x40, 0x41, 0x65,
+0x20, 0x78, 0x41, 0x1e, 0x21, 0x70, 0x00, 0x28, 0x0d, 0xd0, 0x38, 0x1c,
+0xff, 0xf7, 0xf4, 0xfe, 0x00, 0x28, 0x0d, 0xd1, 0x08, 0x4a, 0x50, 0x68,
+0xc0, 0x68, 0xc0, 0x46, 0x50, 0x60, 0x38, 0x1c, 0x00, 0xf0, 0xa4, 0xfa,
+0x02, 0xe0, 0x38, 0x1c, 0x00, 0xf0, 0x73, 0xfa, 0x01, 0xf0, 0xde, 0xfa,
+0x03, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x6c, 0x06, 0x00, 0x80,
+0x00, 0x00, 0x00, 0xb0, 0x38, 0x04, 0x00, 0x80, 0x4c, 0x2a, 0x00, 0x80,
+0xac, 0xab, 0x20, 0x40, 0x94, 0x06, 0x00, 0x80, 0x08, 0x83, 0x20, 0x40,
+0xf0, 0xb5, 0x82, 0xb0, 0x69, 0x4b, 0x9f, 0x6a, 0x58, 0x23, 0x5a, 0x43,
+0xba, 0x18, 0xc3, 0x1d, 0x49, 0x33, 0x1f, 0x78, 0x01, 0x23, 0x9b, 0x07,
+0xd4, 0x1d, 0x01, 0x34, 0x23, 0x43, 0x1d, 0x68, 0x43, 0x68, 0x1c, 0x04,
+0x01, 0x23, 0x9b, 0x07, 0xd6, 0x1d, 0x05, 0x36, 0x33, 0x43, 0x1b, 0x68,
+0x1c, 0x43, 0x42, 0x23, 0x1c, 0x43, 0x0c, 0x60, 0xff, 0x26, 0x36, 0x02,
+0x2e, 0x40, 0x01, 0x23, 0x5b, 0x02, 0x9e, 0x42, 0x74, 0xd1, 0x6b, 0x0c,
+0x2b, 0xd3, 0xc3, 0x19, 0x20, 0x33, 0x1b, 0x79,
+0xc0, 0x46, 0x4b, 0x81, 0x7b, 0x00, 0x1b, 0x18, 0x1b, 0x8f, 0x4c, 0x89,
+0x1b, 0x1b, 0xcb, 0x80, 0x00, 0x24, 0xa6, 0x00, 0x01, 0x96, 0xb3, 0x18,
+0xde, 0x1d, 0x09, 0x36, 0x01, 0x23, 0x9b, 0x07, 0x33, 0x43, 0x1b, 0x68,
+0x01, 0x9e, 0x76, 0x18, 0x73, 0x61, 0x01, 0x34, 0x05, 0x2c, 0xf0, 0xd3,
+0x00, 0x24, 0xa6, 0x00, 0x00, 0x96, 0xb3, 0x18, 0xde, 0x1d, 0x1d, 0x36,
+0x01, 0x23, 0x9b, 0x07, 0x33, 0x43, 0x1b, 0x68, 0x00, 0x9e, 0x76, 0x18,
+0xb3, 0x62, 0x01, 0x34, 0x05, 0x2c, 0xf0, 0xd3, 0x06, 0xe0, 0x00, 0x23,
+0x4b, 0x81, 0xcb, 0x80, 0x40, 0x23, 0x9c, 0x43, 0x0c, 0x60, 0x23, 0x1c,
+0x6b, 0x0e, 0x4a, 0xd3, 0xc3, 0x19, 0x20, 0x33, 0x1b, 0x79, 0x10, 0x33,
+0x0b, 0x81, 0x7b, 0x00, 0x1b, 0x18, 0x1b, 0x8f, 0x0f, 0x89, 0xdb, 0x1b,
+0x8b, 0x80, 0x01, 0x23, 0x9b, 0x07, 0xd4, 0x1d, 0x35, 0x34, 0x23, 0x43,
+0x1b, 0x68, 0xc0, 0x46, 0xcb, 0x63, 0x01, 0x23, 0x9b, 0x07, 0xd4, 0x1d,
+0x31, 0x34, 0x23, 0x43, 0x1b, 0x68, 0xc0, 0x46, 0x0b, 0x64, 0xab, 0x0e,
+0x21, 0xd2, 0x01, 0x23, 0x9b, 0x07, 0xd4, 0x1d, 0x3d, 0x34, 0x23, 0x43,
+0x1b, 0x68, 0xc0, 0x46, 0x4b, 0x64, 0x01, 0x23, 0x9b, 0x07, 0xd4, 0x1d,
+0x39, 0x34, 0x23, 0x43, 0x1b, 0x68, 0xc0, 0x46, 0x8b, 0x64, 0x01, 0x23,
+0x9b, 0x07, 0xd4, 0x1d, 0x45, 0x34, 0x23, 0x43, 0x1b, 0x68, 0xc0, 0x46,
+0xcb, 0x64, 0x01, 0x23, 0x9b, 0x07, 0xd4, 0x1d, 0x41, 0x34, 0x23, 0x43,
+0x1b, 0x68, 0xc0, 0x46, 0x0b, 0x65, 0x00, 0xe0, 0x0f, 0xe0, 0xfb, 0x1f,
+0x01, 0x3b, 0x1b, 0x04, 0x1b, 0x0c, 0x07, 0x68, 0xff, 0x18, 0x03, 0x69,
+0x08, 0x1c, 0x39, 0x1c, 0x00, 0xf0, 0x34, 0xf8, 0x2c, 0xe0, 0x00, 0x23,
+0x0b, 0x81, 0x8b, 0x80, 0x28, 0xe0, 0x00, 0x23, 0x8b, 0x80, 0x0b, 0x81,
+0xc3, 0x19, 0x20, 0x33, 0x1b, 0x7a, 0xc0, 0x46, 0x4b, 0x81, 0x7b, 0x00,
+0x18, 0x18, 0x00, 0x8e, 0xc0, 0x46, 0xc8, 0x80, 0x00, 0x20, 0x87, 0x00,
+0xbb, 0x18, 0xdc, 0x1d, 0x09, 0x34, 0x01, 0x23, 0x9b, 0x07, 0x23, 0x43,
+0x1b, 0x68, 0x7f, 0x18, 0x7b, 0x61, 0x01, 0x30, 0x05, 0x28, 0xf2, 0xd3,
+0x00, 0x20, 0x87, 0x00, 0xbb, 0x18, 0xdc, 0x1d, 0x1d, 0x34, 0x01, 0x23,
+0x9b, 0x07, 0x23, 0x43, 0x1b, 0x68, 0x7f, 0x18, 0xbb, 0x62, 0x01, 0x30,
+0x05, 0x28, 0xf2, 0xd3, 0x02, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x4c, 0x2a, 0x00, 0x80, 0x80, 0xb4, 0x1f, 0x1c, 0x3b, 0x0c, 0x18, 0xd2,
+0x17, 0x6d, 0x11, 0x4b, 0xc0, 0x46, 0xdf, 0x60, 0x52, 0x6d, 0xc0, 0x46,
+0x1a, 0x61, 0xc7, 0x60, 0x1a, 0x69, 0xc0, 0x46, 0x02, 0x61, 0xd8, 0x68,
+0xc0, 0x46, 0x08, 0x80, 0xd8, 0x68, 0x00, 0x0c, 0x48, 0x80, 0x18, 0x69,
+0xc0, 0x46, 0x88, 0x80, 0x18, 0x69, 0x00, 0x0c, 0xc8, 0x80, 0x80, 0xbc,
+0x70, 0x47, 0x4a, 0x88, 0x12, 0x04, 0x0b, 0x88, 0x1a, 0x43, 0xc2, 0x60,
+0x8a, 0x88, 0xc9, 0x88, 0x09, 0x04, 0x11, 0x43, 0x01, 0x61, 0xf2, 0xe7,
+0x2c, 0x07, 0x00, 0x80, 0xf1, 0xb5, 0x88, 0xb0, 0x00, 0x22, 0x08, 0x98,
+0x00, 0x6a, 0x08, 0x9b, 0x99, 0x68, 0x49, 0x0a, 0x02, 0xd3, 0x01, 0x27,
+0xff, 0x03, 0x00, 0xe0, 0x00, 0x27, 0x03, 0x8b, 0x00, 0x2b, 0x19, 0xd0,
+0xa3, 0x49, 0x89, 0x6a, 0x1c, 0x1c, 0x58, 0x23, 0x63, 0x43, 0xc9, 0x18,
+0x01, 0x23, 0x9b, 0x07, 0x58, 0x39, 0x19, 0x43, 0x09, 0x68, 0x09, 0x04,
+0x09, 0x0c, 0x02, 0x29, 0x02, 0xd1, 0x08, 0x23, 0x1f, 0x43, 0x07, 0xe0,
+0x41, 0x8b, 0x00, 0x29, 0x02, 0xd0, 0x0c, 0x23,
+0x1f, 0x43, 0x01, 0xe0, 0x04, 0x23, 0x1f, 0x43, 0x83, 0x8a, 0x00, 0x2b,
+0x18, 0xd0, 0x95, 0x49, 0x89, 0x6a, 0x1c, 0x1c, 0x58, 0x23, 0x63, 0x43,
+0xc9, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x58, 0x39, 0x19, 0x43, 0x09, 0x68,
+0x09, 0x04, 0x09, 0x0c, 0x02, 0x29, 0x01, 0xd1, 0x0f, 0x43, 0x07, 0xe0,
+0xc1, 0x8a, 0x00, 0x29, 0x02, 0xd0, 0x03, 0x23, 0x1f, 0x43, 0x01, 0xe0,
+0x01, 0x23, 0x1f, 0x43, 0xc1, 0x1d, 0x39, 0x31, 0x07, 0x91, 0x4b, 0x89,
+0x0c, 0x89, 0x1c, 0x19, 0x24, 0x04, 0x24, 0x0c, 0x08, 0x9d, 0x2d, 0x68,
+0xc0, 0x46, 0x01, 0x95, 0xc9, 0x88, 0x7d, 0x08, 0x1a, 0xd3, 0x1a, 0x1c,
+0xc3, 0x1d, 0x19, 0x33, 0x1a, 0x72, 0x07, 0x9a, 0x92, 0x89, 0xc0, 0x46,
+0x1a, 0x73, 0x07, 0x9a, 0x12, 0x89, 0xc0, 0x46, 0x02, 0x86, 0x04, 0x87,
+0x82, 0x8a, 0x01, 0x3a, 0x82, 0x83, 0x01, 0x22, 0x19, 0x71, 0x08, 0x9b,
+0x1b, 0x68, 0x5b, 0x18, 0x5b, 0x78, 0x9b, 0x00, 0x1b, 0x04, 0x1b, 0x0c,
+0x08, 0x33, 0x59, 0x18, 0xbb, 0x08, 0x47, 0xd3, 0x07, 0x9b, 0x5b, 0x89,
+0x85, 0x18, 0x06, 0x95, 0x20, 0x35, 0x2b, 0x72, 0x07, 0x9b, 0x9b, 0x89,
+0xc0, 0x46, 0x2b, 0x73, 0x07, 0x9b, 0x1b, 0x89, 0x2e, 0x1c, 0x55, 0x00,
+0x2d, 0x18, 0x05, 0x95, 0x2b, 0x86, 0x00, 0x2a, 0x01, 0xd0, 0xc3, 0x8a,
+0x00, 0xe0, 0x83, 0x8a, 0x01, 0x3b, 0x05, 0x9d, 0xc0, 0x46, 0xab, 0x83,
+0x31, 0x71, 0x65, 0x4b, 0x9d, 0x6a, 0x05, 0x9b, 0x9e, 0x8b, 0x58, 0x23,
+0x73, 0x43, 0xeb, 0x18, 0xdd, 0x1d, 0x01, 0x35, 0x01, 0x23, 0x9b, 0x07,
+0x2b, 0x43, 0x1d, 0x68, 0x2b, 0x0e, 0x5b, 0x06, 0x01, 0xd1, 0x08, 0x31,
+0x00, 0xe0, 0x10, 0x31, 0x81, 0x23, 0x5b, 0x02, 0x1d, 0x40, 0x9d, 0x42,
+0x03, 0xd1, 0xe3, 0x1f, 0x05, 0x3b, 0x1c, 0x04, 0x24, 0x0c, 0x05, 0x9b,
+0xc0, 0x46, 0x1c, 0x87, 0x08, 0x9b, 0x1b, 0x68, 0x1b, 0x19, 0x10, 0x3b,
+0x9b, 0x7b, 0x06, 0x9d, 0x40, 0x35, 0x2b, 0x70, 0x2b, 0x78, 0x02, 0x33,
+0xe3, 0x1a, 0x1c, 0x04, 0x24, 0x0c, 0x01, 0x32, 0xbb, 0x08, 0x9b, 0x07,
+0x6d, 0xd0, 0x83, 0x18, 0x20, 0x33, 0x04, 0x93, 0x19, 0x72, 0x01, 0x9b,
+0x5d, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x2b, 0x43, 0x1b, 0x68, 0x1b, 0x07,
+0x1b, 0x0f, 0x9b, 0x00, 0x04, 0x9e, 0xc0, 0x46, 0x33, 0x73, 0x00, 0x95,
+0x2b, 0x78, 0x1b, 0x07, 0x1b, 0x0f, 0x9b, 0x00, 0x04, 0x9d, 0xc0, 0x46,
+0x2b, 0x73, 0x00, 0x9d, 0xeb, 0x78, 0xad, 0x78, 0x1b, 0x02, 0x1d, 0x43,
+0x2b, 0x02, 0x2d, 0x0a, 0x2d, 0x06, 0x2d, 0x0e, 0x2b, 0x43, 0x55, 0x00,
+0x2d, 0x18, 0x2b, 0x86, 0x04, 0x9b, 0xc0, 0x46, 0x59, 0x72, 0x04, 0x9b,
+0x1b, 0x7b, 0x2e, 0x1c, 0x04, 0x9d, 0xc0, 0x46, 0x6b, 0x73, 0x33, 0x8e,
+0xc0, 0x46, 0x73, 0x86, 0x00, 0x9d, 0x2b, 0x78, 0x1b, 0x07, 0x1b, 0x0f,
+0x9b, 0x00, 0x1b, 0x04, 0x1b, 0x0c, 0x59, 0x18, 0x04, 0x25, 0x3d, 0x40,
+0x0e, 0xd0, 0x34, 0x87, 0x03, 0x8b, 0x01, 0x3b, 0xb3, 0x83, 0x13, 0x1c,
+0x1b, 0x18, 0x20, 0x33, 0x19, 0x71, 0x01, 0x9b, 0x5b, 0x18, 0x5b, 0x78,
+0x9b, 0x00, 0x59, 0x18, 0x08, 0x31, 0x01, 0x32, 0x3b, 0x09, 0x37, 0xd3,
+0x00, 0x2d, 0x01, 0xd0, 0x43, 0x8b, 0x00, 0xe0, 0x03, 0x8b, 0x55, 0x00,
+0x2d, 0x18, 0x01, 0x3b, 0xab, 0x83, 0x83, 0x18, 0x03, 0x93, 0x20, 0x33,
+0x19, 0x71, 0x20, 0x4b, 0x9d, 0x6a, 0x53, 0x00, 0x1b, 0x18, 0x02, 0x93,
+0x9e, 0x8b, 0x58, 0x23, 0x73, 0x43, 0xeb, 0x18, 0xdd, 0x1d, 0x01, 0x35,
+0x01, 0x23, 0x9b, 0x07, 0x2b, 0x43, 0x1d, 0x68,
+0x2b, 0x0e, 0x5b, 0x06, 0x02, 0xd1, 0x08, 0x31, 0x01, 0xe0, 0x15, 0xe0,
+0x10, 0x31, 0x81, 0x23, 0x5b, 0x02, 0x1d, 0x40, 0x9d, 0x42, 0x03, 0xd1,
+0xe3, 0x1f, 0x05, 0x3b, 0x1c, 0x04, 0x24, 0x0c, 0x02, 0x9b, 0xc0, 0x46,
+0x1c, 0x87, 0x08, 0x9b, 0x1b, 0x68, 0x1b, 0x19, 0x10, 0x3b, 0x9b, 0x7b,
+0x03, 0x9c, 0x40, 0x34, 0x23, 0x70, 0x01, 0x32, 0x07, 0x9b, 0xc0, 0x46,
+0xd9, 0x80, 0x51, 0x1e, 0xc3, 0x1d, 0x49, 0x33, 0x19, 0x70, 0x07, 0x61,
+0x04, 0x2a, 0x06, 0xd2, 0x06, 0x49, 0x53, 0x00, 0x1b, 0x18, 0x99, 0x83,
+0x01, 0x32, 0x04, 0x2a, 0xf9, 0xd3, 0x09, 0xb0, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x4c, 0x2a, 0x00, 0x80, 0xff, 0xff, 0x00, 0x00,
+0x70, 0x47, 0x80, 0xb5, 0x8c, 0xb0, 0x07, 0x1c, 0x12, 0x48, 0x01, 0x68,
+0x01, 0x31, 0x01, 0x60, 0x38, 0x68, 0xc0, 0x46, 0x00, 0x90, 0x78, 0x68,
+0xc0, 0x46, 0x01, 0x90, 0xb8, 0x68, 0xc0, 0x46, 0x02, 0x90, 0x0d, 0x48,
+0x41, 0x68, 0xc9, 0x68, 0xc0, 0x46, 0x41, 0x60, 0x38, 0x1c, 0x00, 0xf0,
+0x4f, 0xf8, 0xb8, 0x68, 0x40, 0x09, 0x06, 0xd3, 0x10, 0x23, 0x02, 0x98,
+0x18, 0x43, 0x02, 0x90, 0x68, 0x46, 0x02, 0xf0, 0xe1, 0xff, 0x68, 0x46,
+0x02, 0xf0, 0x9a, 0xfe, 0x0c, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x0c, 0x2b, 0x00, 0x80, 0x6c, 0x06, 0x00, 0x80, 0x00, 0xb5, 0x8c, 0xb0,
+0x01, 0x68, 0xc0, 0x46, 0x00, 0x91, 0x41, 0x68, 0x05, 0x4b, 0x19, 0x43,
+0x01, 0x91, 0x00, 0xf0, 0x2f, 0xf8, 0x68, 0x46, 0x02, 0xf0, 0x84, 0xfe,
+0x0c, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0,
+0x02, 0x6a, 0x03, 0x68, 0xc0, 0x46, 0x13, 0x60, 0x40, 0x68, 0xc0, 0x46,
+0x50, 0x60, 0x40, 0x32, 0x48, 0x68, 0xc0, 0x46, 0x90, 0x80, 0xc8, 0x68,
+0xc0, 0x46, 0xd0, 0x80, 0x48, 0x69, 0xc0, 0x46, 0x10, 0x81, 0x88, 0x68,
+0xc0, 0x46, 0x50, 0x81, 0x08, 0x7e, 0xc0, 0x46, 0x90, 0x73, 0x08, 0x69,
+0xc0, 0x46, 0x90, 0x81, 0x70, 0x47, 0x04, 0x49, 0x08, 0x68, 0x00, 0x28,
+0x00, 0xd1, 0x70, 0x47, 0xc2, 0x68, 0xc0, 0x46, 0x0a, 0x60, 0xfa, 0xe7,
+0x6c, 0x06, 0x00, 0x80, 0x02, 0x49, 0x0a, 0x68, 0xc0, 0x46, 0xc2, 0x60,
+0x08, 0x60, 0x70, 0x47, 0x6c, 0x06, 0x00, 0x80, 0xb0, 0xb4, 0x00, 0x22,
+0x12, 0x4f, 0x7c, 0x7f, 0x01, 0x34, 0x7c, 0x77, 0x03, 0x23, 0xfc, 0x1d,
+0x19, 0x34, 0x38, 0x62, 0x79, 0x62, 0x23, 0x72, 0x0e, 0x4c, 0x25, 0x68,
+0x6b, 0x0c, 0x05, 0xd2, 0x23, 0x68, 0x1b, 0x0c, 0x10, 0xd1, 0x24, 0x68,
+0xa3, 0x0a, 0x0d, 0xd3, 0x01, 0x23, 0x0a, 0x4f, 0xc0, 0x46, 0xfb, 0x62,
+0x09, 0x4f, 0x0a, 0x4b, 0xc0, 0x46, 0xdf, 0x60, 0x99, 0x60, 0x58, 0x60,
+0x10, 0x1c, 0x18, 0x60, 0x01, 0x32, 0xfb, 0xe7, 0x10, 0x1c, 0x38, 0x64,
+0x01, 0x32, 0xfb, 0xe7, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x10, 0x40,
+0xc0, 0x00, 0x18, 0x00, 0x02, 0x81, 0x00, 0x00, 0x40, 0x01, 0x18, 0x00,
+0xf0, 0xb5, 0x47, 0x4f, 0x38, 0x68, 0x47, 0x4e, 0x47, 0x4d, 0x07, 0x23,
+0x5b, 0x02, 0xec, 0x18, 0x00, 0x28, 0x1d, 0xd1, 0x20, 0x6b, 0x01, 0x30,
+0x20, 0x63, 0x44, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x43, 0x48, 0x41, 0x69,
+0x00, 0x29, 0x13, 0xd0, 0xc1, 0x1d, 0x69, 0x31, 0x09, 0x7b, 0x00, 0x29,
+0x0e, 0xd0, 0x01, 0x23, 0x9b, 0x07, 0x01, 0x6d, 0x19, 0x43, 0x09, 0x68,
+0xc0, 0x46, 0x81, 0x61, 0xc2, 0x69, 0x91, 0x42, 0x04, 0xd0, 0xf1, 0x6c,
+0x01, 0x31, 0xf1, 0x64, 0x01, 0xf0, 0x50, 0xfe,
+0x38, 0x68, 0x01, 0x28, 0x17, 0xd1, 0x37, 0x48, 0x41, 0x69, 0x00, 0x29,
+0x13, 0xd0, 0xc1, 0x1d, 0x69, 0x31, 0x09, 0x7b, 0x00, 0x29, 0x0e, 0xd0,
+0x01, 0x23, 0x9b, 0x07, 0x01, 0x6d, 0x19, 0x43, 0x09, 0x68, 0xc0, 0x46,
+0x81, 0x61, 0xc2, 0x69, 0x91, 0x42, 0x04, 0xd0, 0xf1, 0x6c, 0x01, 0x31,
+0xf1, 0x64, 0x01, 0xf0, 0x35, 0xfe, 0x38, 0x68, 0x02, 0x28, 0x2f, 0xd1,
+0xbb, 0x23, 0x1b, 0x01, 0xee, 0x18, 0x70, 0x7b, 0x00, 0x28, 0x03, 0xd0,
+0x00, 0x20, 0x70, 0x73, 0x00, 0xf0, 0x4a, 0xfd, 0x30, 0x7b, 0x00, 0x28,
+0x02, 0xd0, 0x78, 0x68, 0x02, 0xf0, 0xaa, 0xff, 0x1b, 0x23, 0xdb, 0x01,
+0xe8, 0x18, 0xc0, 0x8b, 0x04, 0x26, 0x06, 0x40, 0xe0, 0x6a, 0xb0, 0x42,
+0x14, 0xd0, 0xf8, 0x68, 0x01, 0x30, 0xf8, 0x60, 0x19, 0x28, 0x11, 0xd3,
+0x1b, 0x48, 0x01, 0x7b, 0x00, 0x29, 0x0d, 0xd1, 0xff, 0x30, 0x41, 0x30,
+0x40, 0x78, 0x00, 0x28, 0x08, 0xd1, 0xb8, 0x68, 0x02, 0xf0, 0x90, 0xff,
+0x00, 0x20, 0xf8, 0x60, 0xe6, 0x62, 0x01, 0xe0, 0x00, 0x20, 0xf8, 0x60,
+0x38, 0x68, 0x03, 0x28, 0x0b, 0xd1, 0xec, 0x1d, 0x79, 0x34, 0xe0, 0x6b,
+0x80, 0x08, 0x02, 0xd3, 0x02, 0x20, 0x02, 0xf0, 0x07, 0xfc, 0x02, 0x23,
+0xe0, 0x6b, 0x98, 0x43, 0xe0, 0x63, 0x38, 0x68, 0x01, 0x30, 0x38, 0x60,
+0x03, 0x28, 0x01, 0xd9, 0x00, 0x20, 0x38, 0x60, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x3c, 0x04, 0x00, 0x80, 0xa0, 0x82, 0x20, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0x40, 0x01, 0x18, 0x00, 0x64, 0x2d, 0x00, 0x80,
+0xe4, 0x2c, 0x00, 0x80, 0x28, 0x05, 0x00, 0x80, 0xb0, 0xb4, 0x1d, 0x48,
+0x84, 0x8a, 0x1d, 0x4a, 0x13, 0x8a, 0xc1, 0x1d, 0x09, 0x31, 0x01, 0x27,
+0x9c, 0x42, 0x03, 0xd1, 0x43, 0x8a, 0x54, 0x8a, 0xa3, 0x42, 0x10, 0xd0,
+0x0b, 0x78, 0x00, 0x2b, 0x0d, 0xd0, 0x4b, 0x78, 0x00, 0x2b, 0x0a, 0xd0,
+0x44, 0x8b, 0x93, 0x8a, 0x9c, 0x42, 0x04, 0xdc, 0x13, 0x4b, 0xc0, 0x46,
+0x5f, 0x60, 0x97, 0x82, 0x01, 0xe0, 0x01, 0x33, 0x93, 0x82, 0xc3, 0x8b,
+0x5c, 0x1c, 0xc4, 0x83, 0x84, 0x8b, 0xa3, 0x42, 0x0e, 0xdb, 0x84, 0x8a,
+0x05, 0x8b, 0x00, 0x23, 0xac, 0x42, 0x05, 0xda, 0x44, 0x8a, 0xc5, 0x8a,
+0xac, 0x42, 0x01, 0xda, 0x4b, 0x70, 0x00, 0xe0, 0x4f, 0x70, 0x43, 0x82,
+0x83, 0x82, 0xc3, 0x83, 0x41, 0x8a, 0xc0, 0x46, 0x51, 0x82, 0x80, 0x8a,
+0xc0, 0x46, 0x10, 0x82, 0xb0, 0xbc, 0x70, 0x47, 0xe8, 0x0e, 0x00, 0x80,
+0x3c, 0x04, 0x00, 0x80, 0x40, 0x01, 0x18, 0x00, 0xf7, 0xb5, 0x91, 0xb0,
+0x6b, 0x46, 0x84, 0x1e, 0x12, 0x99, 0x14, 0x29, 0x1a, 0xd9, 0x00, 0x20,
+0x81, 0x00, 0x67, 0x58, 0xc0, 0x46, 0x57, 0x50, 0x01, 0x30, 0x00, 0x06,
+0x00, 0x0e, 0x10, 0x28, 0xf6, 0xd3, 0x00, 0x21, 0x05, 0x20, 0x87, 0x00,
+0xd6, 0x59, 0x4f, 0x1c, 0x3d, 0x06, 0x2d, 0x0e, 0x0f, 0x1c, 0xbf, 0x00,
+0xde, 0x51, 0x29, 0x1c, 0x01, 0x30, 0x00, 0x06, 0x00, 0x0e, 0x10, 0x28,
+0xf1, 0xd3, 0x09, 0xe0, 0x00, 0x20, 0x81, 0x00, 0x63, 0x58, 0xc0, 0x46,
+0x53, 0x50, 0x01, 0x30, 0x00, 0x06, 0x00, 0x0e, 0x06, 0x28, 0xf6, 0xd3,
+0x00, 0x20, 0xe0, 0x70, 0x20, 0x72, 0x60, 0x72, 0xa0, 0x72, 0x20, 0x73,
+0x60, 0x73, 0x12, 0x99, 0x14, 0x29, 0x37, 0xd9, 0x69, 0x46, 0x8e, 0x1c,
+0x91, 0x78, 0x09, 0x07, 0x09, 0x0f, 0x89, 0x00, 0x14, 0x39, 0x0d, 0x06,
+0x2d, 0x16, 0x00, 0x27, 0x00, 0x2d, 0x1b, 0xdd, 0xf0, 0x19, 0x10, 0xa9,
+0x00, 0xf0, 0x3d, 0xf8, 0x00, 0x28, 0x0e, 0xd0,
+0x00, 0x20, 0x10, 0xa9, 0x09, 0x78, 0x00, 0x29, 0x09, 0xdd, 0x00, 0x22,
+0x39, 0x18, 0x72, 0x54, 0x01, 0x30, 0x00, 0x06, 0x00, 0x0e, 0x10, 0xa9,
+0x09, 0x78, 0x88, 0x42, 0xf6, 0xdb, 0x10, 0xa8, 0x00, 0x78, 0x38, 0x18,
+0x07, 0x06, 0x3f, 0x0e, 0xaf, 0x42, 0xe3, 0xdb, 0x68, 0x46, 0xe2, 0x1d,
+0x0d, 0x32, 0x00, 0x21, 0xab, 0x08, 0x5f, 0x1c, 0x08, 0xd0, 0x8b, 0x00,
+0xc4, 0x58, 0xc0, 0x46, 0xd4, 0x50, 0x01, 0x31, 0x09, 0x06, 0x09, 0x0e,
+0x8f, 0x42, 0xf6, 0xd8, 0x14, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x90, 0xb4, 0x87, 0x1e, 0x00, 0x20, 0x89, 0x08, 0x4b, 0x1c, 0x08, 0xd0,
+0x81, 0x00, 0x54, 0x58, 0xc0, 0x46, 0x7c, 0x50, 0x01, 0x30, 0x00, 0x06,
+0x00, 0x0e, 0x83, 0x42, 0xf6, 0xd8, 0x90, 0xbc, 0x70, 0x47, 0x80, 0xb4,
+0x02, 0x78, 0xd2, 0x06, 0xd2, 0x0e, 0x00, 0x23, 0x01, 0x27, 0x01, 0x2a,
+0x01, 0xdc, 0x0f, 0x70, 0x11, 0xe0, 0x40, 0x78, 0xc0, 0x46, 0x08, 0x70,
+0x14, 0x2a, 0x04, 0xd1, 0x08, 0x48, 0x01, 0x7a, 0x01, 0x31, 0x01, 0x72,
+0x07, 0xe0, 0x02, 0x2a, 0x05, 0xd0, 0x05, 0x2a, 0x03, 0xd0, 0x06, 0x2a,
+0x01, 0xd0, 0x15, 0x2a, 0x02, 0xd1, 0x18, 0x1c, 0x80, 0xbc, 0x70, 0x47,
+0x38, 0x1c, 0xfb, 0xe7, 0xe0, 0x82, 0x20, 0x40, 0x00, 0xb5, 0x0f, 0x48,
+0x01, 0x23, 0x1b, 0x06, 0x41, 0x69, 0x99, 0x43, 0x1a, 0x09, 0x41, 0x61,
+0xd1, 0x60, 0x00, 0x21, 0xa1, 0x22, 0x52, 0x03, 0x91, 0x61, 0x19, 0x1c,
+0x09, 0x4a, 0xc0, 0x46, 0x11, 0x60, 0x1b, 0x23, 0xdb, 0x01, 0xc0, 0x18,
+0x80, 0x69, 0x00, 0x28, 0x03, 0xd0, 0x02, 0xf0, 0x61, 0xfe, 0x08, 0xbc,
+0x18, 0x47, 0x04, 0x48, 0x41, 0x88, 0x01, 0x31, 0x41, 0x80, 0xf8, 0xe7,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0xe0, 0x82, 0x20, 0x40,
+0x70, 0x47, 0x00, 0x00, 0xf0, 0xb5, 0x86, 0xb0, 0x95, 0x4a, 0xd0, 0x68,
+0xd7, 0x1d, 0x79, 0x37, 0x01, 0x28, 0x09, 0xd1, 0x38, 0x89, 0x00, 0x28,
+0x06, 0xd1, 0xd0, 0x6f, 0x02, 0x23, 0x01, 0x68, 0x99, 0x43, 0x01, 0x60,
+0x14, 0x20, 0x38, 0x81, 0x8e, 0x4c, 0x61, 0x6a, 0x8e, 0x48, 0xc3, 0x6b,
+0x59, 0x18, 0xc1, 0x63, 0xa0, 0x6a, 0x19, 0x23, 0xdb, 0x01, 0xd4, 0x18,
+0xa0, 0x62, 0x21, 0x6a, 0x09, 0x03, 0x09, 0x0b, 0x81, 0x42, 0x05, 0xd1,
+0x01, 0x20, 0x40, 0x04, 0x87, 0x49, 0xc0, 0x46, 0x08, 0x60, 0xf3, 0xe0,
+0xbb, 0x8a, 0x58, 0x1c, 0xb8, 0x82, 0x3d, 0x8b, 0x01, 0x20, 0x00, 0x21,
+0xab, 0x42, 0x04, 0xdb, 0xd3, 0x1d, 0x89, 0x33, 0x58, 0x70, 0xb9, 0x82,
+0xf9, 0x83, 0x33, 0x23, 0x9b, 0x01, 0xd3, 0x18, 0x05, 0x93, 0x5b, 0x69,
+0x0f, 0x2b, 0x73, 0xd2, 0x00, 0x21, 0x7c, 0x4f, 0xc0, 0x46, 0x39, 0x61,
+0x21, 0x6a, 0x8a, 0x68, 0x12, 0x04, 0x12, 0x0c, 0x4b, 0x68, 0x1e, 0x0c,
+0x36, 0x04, 0xfd, 0x1f, 0x09, 0x3d, 0x00, 0x2e, 0x05, 0xd1, 0x3b, 0x2a,
+0x03, 0xd3, 0x01, 0x23, 0xdb, 0x02, 0x9a, 0x42, 0x01, 0xd9, 0xa8, 0x73,
+0xc8, 0xe0, 0x01, 0x23, 0x9b, 0x07, 0x08, 0x31, 0x19, 0x43, 0x09, 0x68,
+0xc0, 0x46, 0x03, 0x91, 0x03, 0xa9, 0x09, 0x88, 0x01, 0x31, 0x09, 0x04,
+0x09, 0x0c, 0x79, 0x82, 0x49, 0x09, 0x05, 0x31, 0x09, 0x06, 0x09, 0x0e,
+0x69, 0x4e, 0xc0, 0x46, 0x02, 0x96, 0x69, 0x48, 0x43, 0x6a, 0xc0, 0x46,
+0x01, 0x93, 0x83, 0x6a, 0xc0, 0x46, 0x00, 0x93, 0xc2, 0x1d, 0x11, 0x32,
+0x80, 0x69, 0x00, 0x03, 0x00, 0x0b, 0x92, 0x68, 0xb3, 0x07, 0x1a, 0x43,
+0x12, 0x68, 0x90, 0x42, 0x01, 0xd1, 0x01, 0x20,
+0x0d, 0xe0, 0x90, 0x42, 0x05, 0xd9, 0x00, 0x9b, 0x18, 0x1a, 0x01, 0x9b,
+0xd2, 0x1a, 0x82, 0x18, 0x00, 0xe0, 0x12, 0x1a, 0x01, 0x20, 0x09, 0x01,
+0x91, 0x42, 0x00, 0xd3, 0x00, 0x20, 0x01, 0x28, 0x65, 0xd1, 0x51, 0x49,
+0x20, 0x69, 0x00, 0x28, 0x62, 0xd0, 0x05, 0x99, 0x48, 0x69, 0x01, 0x30,
+0x48, 0x61, 0x02, 0x20, 0x21, 0x6a, 0xc0, 0x46, 0x08, 0x60, 0x00, 0xf0,
+0xa7, 0xfc, 0x78, 0x63, 0xbe, 0x60, 0x49, 0x49, 0x22, 0x6a, 0xa3, 0x6b,
+0xd3, 0x18, 0x66, 0x6b, 0xb3, 0x42, 0x00, 0xd9, 0x22, 0x6b, 0xc0, 0x46,
+0xba, 0x62, 0xba, 0x6a, 0x0c, 0x32, 0xfa, 0x62, 0x00, 0x22, 0xfa, 0x61,
+0x03, 0xaa, 0x52, 0x88, 0xd2, 0x09, 0x03, 0xd3, 0x01, 0x22, 0x00, 0xe0,
+0x7b, 0xe0, 0x00, 0xe0, 0x00, 0x22, 0x7a, 0x60, 0x7a, 0x68, 0xc0, 0x46,
+0x02, 0x60, 0x78, 0x8a, 0x41, 0x4e, 0x60, 0x28, 0x04, 0xdc, 0xb0, 0x83,
+0x78, 0x8a, 0xc0, 0x46, 0xf0, 0x83, 0x08, 0xe0, 0x60, 0x20, 0xb0, 0x83,
+0x79, 0x8a, 0xf8, 0x6a, 0x42, 0x18, 0x63, 0x6b, 0x9a, 0x42, 0x03, 0xd8,
+0xf1, 0x83, 0x00, 0x22, 0x3a, 0x63, 0x05, 0xe0, 0x21, 0x6b, 0xc0, 0x46,
+0x39, 0x63, 0x61, 0x6b, 0x08, 0x1a, 0xf0, 0x83, 0x2d, 0x49, 0x78, 0x6b,
+0x42, 0x68, 0xc0, 0x46, 0xba, 0x60, 0x82, 0x68, 0xc0, 0x46, 0xfa, 0x60,
+0x02, 0x69, 0xc0, 0x46, 0x7a, 0x61, 0x40, 0x69, 0xc0, 0x46, 0xb8, 0x61,
+0x2e, 0x4b, 0xc8, 0x18, 0x04, 0x90, 0x00, 0xf0, 0x37, 0xf9, 0x04, 0x98,
+0x00, 0xf0, 0x88, 0xf8, 0x00, 0xf0, 0xf6, 0xfa, 0x78, 0x8a, 0xf1, 0x8b,
+0x88, 0x42, 0x04, 0xd1, 0xf9, 0x6a, 0x08, 0x18, 0x04, 0xe0, 0x38, 0xe0,
+0x32, 0xe0, 0x3a, 0x6b, 0x10, 0x18, 0x40, 0x1a, 0x81, 0x07, 0x02, 0xd0,
+0x80, 0x08, 0x80, 0x00, 0x04, 0x30, 0x61, 0x6b, 0x09, 0x1a, 0xa2, 0x6b,
+0x91, 0x42, 0x00, 0xd2, 0x20, 0x6b, 0xc0, 0x46, 0x20, 0x62, 0xe8, 0x7b,
+0x00, 0x28, 0x08, 0xd0, 0x00, 0x22, 0xea, 0x73, 0x05, 0x99, 0x48, 0x69,
+0x01, 0x38, 0x48, 0x61, 0x78, 0x6b, 0x00, 0xf0, 0x73, 0xfa, 0x18, 0x48,
+0x80, 0x6a, 0x80, 0x06, 0x80, 0x0e, 0x01, 0x28, 0x0a, 0xd1, 0x20, 0x6a,
+0x00, 0x03, 0x00, 0x0b, 0x0b, 0x4c, 0xa1, 0x6a, 0x88, 0x42, 0x03, 0xd0,
+0x06, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x01, 0x20, 0x40, 0x04,
+0x08, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x06, 0xe0, 0xe0, 0x68, 0x00, 0x28,
+0x01, 0xd0, 0x00, 0xf0, 0xb5, 0xfa, 0x01, 0x20, 0xa8, 0x73, 0xed, 0xe7,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0x40, 0x14, 0x40, 0xa4, 0x2a, 0x00, 0x80,
+0x00, 0x00, 0x00, 0xb0, 0x28, 0x1a, 0x00, 0x80, 0x55, 0x55, 0x55, 0x55,
+0xa8, 0x03, 0x00, 0x80, 0x68, 0x1a, 0x00, 0x80, 0xc4, 0x0b, 0x00, 0x00,
+0x00, 0x00, 0x10, 0x40, 0x80, 0xb5, 0x07, 0x1c, 0x78, 0x6a, 0x40, 0x89,
+0xff, 0x21, 0x01, 0x31, 0x01, 0x40, 0x10, 0x48, 0x02, 0xd1, 0x81, 0x6c,
+0x01, 0x31, 0x81, 0x64, 0x79, 0x6a, 0x49, 0x89, 0x49, 0x0b, 0x02, 0xd2,
+0x41, 0x6c, 0x01, 0x31, 0x41, 0x64, 0x0b, 0x48, 0x41, 0x6a, 0x01, 0x31,
+0x41, 0x62, 0x78, 0x6a, 0x39, 0x6b, 0xc0, 0x46, 0x48, 0x62, 0x38, 0x6b,
+0x00, 0xf0, 0xf8, 0xfb, 0x38, 0x1c, 0x00, 0xf0, 0xb3, 0xf8, 0x01, 0x20,
+0x04, 0x49, 0xc0, 0x46, 0xc8, 0x73, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0xa4, 0x2a, 0x00, 0x80, 0xa0, 0x82, 0x20, 0x40, 0x18, 0x1a, 0x00, 0x80,
+0xf8, 0xb5, 0x07, 0x1c, 0x00, 0x22, 0xf9, 0x1d, 0x61, 0x31, 0x0d, 0x1c,
+0x78, 0x6a, 0xc0, 0x46, 0x00, 0x90, 0x40, 0x89,
+0x03, 0x0c, 0x01, 0xd2, 0x40, 0x0a, 0x03, 0xd2, 0x38, 0x1c, 0xff, 0xf7,
+0xc1, 0xff, 0x67, 0xe0, 0x35, 0x48, 0xc0, 0x6b, 0x00, 0x09, 0x1f, 0xd3,
+0x08, 0x78, 0x40, 0x08, 0x1c, 0xd2, 0x00, 0x20, 0x43, 0x00, 0xcc, 0x5a,
+0x31, 0x4e, 0x9e, 0x19, 0x33, 0x23, 0x9b, 0x01, 0xf3, 0x18, 0x1b, 0x88,
+0x9c, 0x42, 0x0e, 0xd0, 0xb8, 0x69, 0x39, 0x6b, 0xc0, 0x46, 0x88, 0x61,
+0xf8, 0x68, 0x39, 0x6b, 0xc0, 0x46, 0xc8, 0x60, 0x38, 0x1c, 0x00, 0xf0,
+0x27, 0xf9, 0x38, 0x1c, 0x00, 0xf0, 0x74, 0xf8, 0x46, 0xe0, 0x01, 0x30,
+0x03, 0x28, 0xe3, 0xdb, 0x02, 0x20, 0x43, 0x00, 0x5c, 0x18, 0xe4, 0x88,
+0x22, 0x4e, 0x9e, 0x19, 0x33, 0x23, 0x9b, 0x01, 0xf3, 0x18, 0x1b, 0x88,
+0x9c, 0x42, 0x03, 0xd1, 0x01, 0x23, 0x01, 0x38, 0xd8, 0x42, 0xf0, 0xdc,
+0x01, 0x23, 0xd8, 0x42, 0xc4, 0xd0, 0x1b, 0x4e, 0x0b, 0x23, 0x1b, 0x02,
+0xf0, 0x18, 0x40, 0x69, 0x00, 0x28, 0x24, 0xd0, 0x7d, 0x63, 0x00, 0x98,
+0x40, 0x89, 0x00, 0x0c, 0x1f, 0xd2, 0x00, 0x24, 0x2d, 0x23, 0x9b, 0x01,
+0xf0, 0x18, 0xc0, 0x6b, 0x35, 0x1c, 0x00, 0x28, 0x17, 0xd0, 0xfe, 0x1d,
+0x2d, 0x36, 0xa2, 0x00, 0x52, 0x19, 0x2d, 0x23, 0x9b, 0x01, 0xd2, 0x18,
+0xd2, 0x6b, 0x38, 0x1c, 0x31, 0x1c, 0x02, 0xf0, 0x7b, 0xfc, 0x01, 0x28,
+0x0e, 0xd0, 0x01, 0x34, 0xa0, 0x00, 0x40, 0x19, 0x2d, 0x23, 0x9b, 0x01,
+0xc0, 0x18, 0xc0, 0x6b, 0x00, 0x28, 0xea, 0xd1, 0x01, 0xe0, 0x01, 0x2a,
+0x02, 0xd0, 0x38, 0x1c, 0x00, 0xf0, 0x08, 0xf8, 0xf8, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0xe8, 0x1a, 0x00, 0x80, 0x68, 0x0e, 0x00, 0x80,
+0x80, 0xb5, 0x07, 0x1c, 0xb8, 0x69, 0x39, 0x6b, 0xc0, 0x46, 0x88, 0x61,
+0xf8, 0x68, 0x39, 0x6b, 0xc0, 0x46, 0xc8, 0x60, 0x78, 0x6a, 0x40, 0x89,
+0x01, 0x0c, 0x0e, 0xd2, 0x40, 0x0a, 0x0c, 0xd3, 0x38, 0x68, 0x40, 0x08,
+0x02, 0xd3, 0x38, 0x1c, 0x02, 0xf0, 0x0c, 0xfc, 0x38, 0x1c, 0x00, 0xf0,
+0xbb, 0xf8, 0x38, 0x1c, 0x00, 0xf0, 0x08, 0xf8, 0x02, 0xe0, 0x38, 0x1c,
+0xff, 0xf7, 0x30, 0xff, 0x01, 0x20, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x01, 0x21, 0x00, 0x6b, 0x40, 0x6a, 0xc0, 0x46, 0x01, 0x60, 0x70, 0x47,
+0xb0, 0xb4, 0xc1, 0x1d, 0x39, 0x31, 0x09, 0x8b, 0x89, 0x08, 0x09, 0x04,
+0x09, 0x0c, 0x84, 0x6a, 0xc2, 0x1d, 0x61, 0x32, 0x00, 0x20, 0x00, 0x29,
+0x0c, 0xdd, 0x87, 0x00, 0x3d, 0x19, 0x01, 0x23, 0x9b, 0x07, 0x2b, 0x43,
+0x1b, 0x68, 0xc0, 0x46, 0xd3, 0x51, 0x01, 0x30, 0x00, 0x04, 0x00, 0x0c,
+0x88, 0x42, 0xf2, 0xdb, 0xb0, 0xbc, 0x70, 0x47, 0xf0, 0xb5, 0xa0, 0xb0,
+0x01, 0x23, 0x9b, 0x07, 0xc1, 0x1d, 0x21, 0x31, 0x19, 0x43, 0x09, 0x68,
+0xc0, 0x46, 0x0b, 0x91, 0xc1, 0x1d, 0x53, 0x31, 0x19, 0x43, 0x1f, 0x91,
+0x09, 0x68, 0x01, 0xaf, 0xfa, 0x1d, 0x39, 0x32, 0x1e, 0x92, 0x17, 0xab,
+0x59, 0x80, 0x3a, 0x49, 0x01, 0x23, 0x9b, 0x07, 0x0a, 0x6a, 0x13, 0x43,
+0xcc, 0x1d, 0x11, 0x34, 0x89, 0x69, 0x09, 0x03, 0x09, 0x0b, 0x22, 0x69,
+0xe5, 0x68, 0xc0, 0x46, 0x1d, 0x95, 0xfc, 0x1d, 0x39, 0x34, 0x64, 0x8b,
+0x64, 0x09, 0x05, 0x34, 0x24, 0x06, 0x24, 0x0e, 0x1c, 0x94, 0x56, 0x1a,
+0x1b, 0x96, 0x1c, 0x9c, 0x2e, 0x4a, 0xc0, 0x46, 0x00, 0x92, 0x01, 0x26,
+0x1d, 0x9d, 0x1a, 0x68, 0x91, 0x42, 0x01, 0xd1, 0x32, 0x1c, 0x0b, 0xe0,
+0x91, 0x42, 0x03, 0xd9, 0x52, 0x1b, 0x1b, 0x9e, 0xb5, 0x18, 0x00, 0xe0,
+0x55, 0x1a, 0x01, 0x22, 0x24, 0x01, 0xac, 0x42,
+0x00, 0xd3, 0x00, 0x22, 0x01, 0x2a, 0xe6, 0xd1, 0x91, 0x07, 0x01, 0x43,
+0x09, 0x68, 0xc0, 0x46, 0x39, 0x60, 0x93, 0x07, 0x01, 0x1d, 0x19, 0x43,
+0x09, 0x68, 0xc0, 0x46, 0x79, 0x60, 0xc1, 0x1d, 0x01, 0x31, 0x19, 0x43,
+0x09, 0x68, 0xc0, 0x46, 0xb9, 0x60, 0x1f, 0x99, 0x09, 0x68, 0x1e, 0x9a,
+0xc0, 0x46, 0x51, 0x83, 0xc1, 0x1d, 0x1d, 0x31, 0x19, 0x43, 0x09, 0x68,
+0xc0, 0x46, 0x38, 0x63, 0x79, 0x62, 0xc1, 0x1d, 0x11, 0x31, 0x19, 0x43,
+0x09, 0x68, 0xc0, 0x46, 0xb9, 0x61, 0xc1, 0x1d, 0x05, 0x31, 0x19, 0x43,
+0x09, 0x68, 0xc0, 0x46, 0xf9, 0x60, 0xc1, 0x1d, 0x17, 0x31, 0x19, 0x43,
+0x09, 0x68, 0xc0, 0x46, 0xf9, 0x83, 0x0e, 0x30, 0x18, 0x43, 0x00, 0x68,
+0xc0, 0x46, 0xf8, 0x81, 0x38, 0x68, 0x40, 0x08, 0x02, 0xd3, 0x38, 0x1c,
+0x02, 0xf0, 0x5c, 0xfb, 0x38, 0x1c, 0x00, 0xf0, 0x0b, 0xf8, 0x38, 0x1c,
+0xff, 0xf7, 0x58, 0xff, 0x20, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0xa8, 0x03, 0x00, 0x80, 0x55, 0x55, 0x55, 0x55, 0xf8, 0xb5, 0x07, 0x1c,
+0xf8, 0x1d, 0x39, 0x30, 0x41, 0x8b, 0x39, 0x4a, 0x91, 0x42, 0x00, 0xdd,
+0x42, 0x83, 0x42, 0x8b, 0xc0, 0x46, 0x00, 0x92, 0x01, 0x20, 0x3a, 0x1d,
+0x06, 0xca, 0xbb, 0x6a, 0x02, 0xf0, 0x0e, 0xff, 0x33, 0x4a, 0xc0, 0x46,
+0x00, 0x92, 0x33, 0x4e, 0x30, 0x6a, 0x33, 0x4c, 0xe1, 0x6d, 0x41, 0x18,
+0x38, 0x6b, 0xc3, 0x1d, 0x05, 0x33, 0x01, 0x20, 0x72, 0x6a, 0x02, 0xf0,
+0xfb, 0xfe, 0xe0, 0x6d, 0x18, 0x30, 0x00, 0x25, 0xb1, 0x6a, 0x81, 0x42,
+0x01, 0xd8, 0xe5, 0x65, 0x00, 0xe0, 0xe0, 0x65, 0x2f, 0x23, 0x9b, 0x01,
+0x20, 0x1c, 0xe1, 0x6d, 0xe4, 0x18, 0x22, 0x68, 0x92, 0x00, 0x27, 0x4b,
+0xc0, 0x46, 0x99, 0x50, 0x26, 0x48, 0xc1, 0x6b, 0x4a, 0x08, 0x05, 0xd3,
+0x49, 0x08, 0x49, 0x00, 0xc1, 0x63, 0x01, 0x20, 0x01, 0xf0, 0xd6, 0xff,
+0x22, 0x4a, 0x1f, 0x48, 0xc1, 0x1d, 0x89, 0x31, 0x0b, 0x78, 0x00, 0x2b,
+0x02, 0xd0, 0x49, 0x78, 0x00, 0x29, 0x00, 0xd1, 0x1e, 0x4a, 0xc0, 0x46,
+0x00, 0x92, 0x20, 0x68, 0x80, 0x00, 0x19, 0x4b, 0xc3, 0x18, 0x05, 0xce,
+0xc1, 0x1d, 0x11, 0x31, 0x01, 0x20, 0x02, 0xf0, 0xc7, 0xfe, 0x14, 0x48,
+0x21, 0x68, 0x01, 0x31, 0x21, 0x60, 0x17, 0x29, 0x00, 0xd3, 0x25, 0x60,
+0x39, 0x6b, 0xc0, 0x46, 0x0d, 0x65, 0x79, 0x6a, 0x3a, 0x6b, 0xc0, 0x46,
+0x51, 0x62, 0x33, 0x23, 0x9b, 0x01, 0xc0, 0x18, 0x81, 0x68, 0x00, 0x29,
+0x03, 0xd1, 0x39, 0x6b, 0xc0, 0x46, 0x81, 0x60, 0x04, 0xe0, 0x39, 0x6b,
+0xc2, 0x68, 0xc0, 0x46, 0x11, 0x65, 0x39, 0x6b, 0xc0, 0x46, 0xc1, 0x60,
+0xf8, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0xea, 0x05, 0x00, 0x00,
+0x18, 0x00, 0x14, 0x02, 0x7c, 0x29, 0x00, 0x80, 0x68, 0x0e, 0x00, 0x80,
+0x44, 0x82, 0x20, 0x40, 0xe8, 0x0e, 0x00, 0x80, 0x04, 0x00, 0x00, 0x02,
+0x04, 0x00, 0x00, 0x03, 0xf0, 0xb5, 0x11, 0x4e, 0xff, 0x25, 0x01, 0x35,
+0x10, 0x4f, 0xc0, 0x46, 0x35, 0x60, 0x78, 0x69, 0x01, 0x38, 0x78, 0x61,
+0xbc, 0x68, 0x00, 0x2c, 0x10, 0xd0, 0x20, 0x6d, 0xc0, 0x46, 0xb8, 0x60,
+0x20, 0x1c, 0x00, 0xf0, 0x21, 0xf8, 0x20, 0x1c, 0x00, 0xf0, 0x04, 0xfa,
+0x08, 0x48, 0x80, 0x6a, 0x00, 0x0c, 0x00, 0x07, 0xe9, 0xd1, 0xf0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x05, 0x48, 0xc1, 0x79, 0x01, 0x31, 0xc1, 0x71,
+0xf7, 0xe7, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x28, 0x1b, 0x00, 0x80,
+0x00, 0x00, 0x10, 0x40, 0xa0, 0x82, 0x20, 0x40,
+0x01, 0x20, 0x80, 0x03, 0x01, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x70, 0x47,
+0x00, 0x00, 0x00, 0xb0, 0x90, 0xb5, 0x07, 0x1c, 0x38, 0x68, 0xc0, 0x08,
+0x09, 0xd3, 0x1d, 0x48, 0x01, 0x6a, 0x01, 0x39, 0x01, 0x62, 0x20, 0x30,
+0x00, 0x79, 0x00, 0x28, 0x01, 0xd0, 0xfe, 0xf7, 0xe9, 0xfd, 0x01, 0x23,
+0x9b, 0x07, 0xf8, 0x1d, 0x1d, 0x30, 0x18, 0x43, 0x00, 0x68, 0x16, 0x4c,
+0x61, 0x6a, 0x81, 0x42, 0x21, 0xd1, 0x01, 0x1c, 0x19, 0x43, 0x09, 0x68,
+0x09, 0x04, 0x09, 0x0c, 0x01, 0x29, 0x1a, 0xd1, 0x00, 0xf0, 0x22, 0xf8,
+0x60, 0x62, 0x60, 0x6a, 0x21, 0x6a, 0x88, 0x42, 0x05, 0xd0, 0x01, 0x21,
+0x89, 0x07, 0x01, 0x43, 0x09, 0x68, 0x09, 0x04, 0xf2, 0xd0, 0x51, 0x21,
+0x89, 0x03, 0x62, 0x6a, 0x23, 0x6b, 0x9a, 0x42, 0x02, 0xd1, 0x60, 0x6b,
+0xa2, 0x6b, 0x80, 0x1a, 0x04, 0x38, 0xc8, 0x60, 0x90, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x20, 0x79, 0x6a, 0xc0, 0x46, 0x08, 0x60, 0xf7, 0xe7,
+0x6c, 0x06, 0x00, 0x80, 0xe8, 0x1a, 0x00, 0x80, 0x01, 0x23, 0x9b, 0x07,
+0xc1, 0x1d, 0x01, 0x31, 0x19, 0x43, 0x09, 0x68, 0x09, 0x04, 0x09, 0x0c,
+0x08, 0x18, 0x0d, 0x30, 0x81, 0x07, 0x02, 0xd0, 0x80, 0x08, 0x80, 0x00,
+0x04, 0x30, 0x04, 0x49, 0x8a, 0x6b, 0x12, 0x18, 0x4b, 0x6b, 0x9a, 0x42,
+0x00, 0xd9, 0x08, 0x6b, 0x70, 0x47, 0x00, 0x00, 0xe8, 0x1a, 0x00, 0x80,
+0x00, 0xb5, 0x04, 0x48, 0xc0, 0x68, 0x10, 0x28, 0x01, 0xd3, 0x00, 0xf0,
+0x05, 0xf8, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0xe8, 0x1a, 0x00, 0x80,
+0x88, 0xb5, 0x0c, 0x4f, 0x38, 0x79, 0x00, 0x28, 0x11, 0xd1, 0x0b, 0x49,
+0x10, 0x20, 0x02, 0xf0, 0xf5, 0xfd, 0x00, 0x28, 0x0b, 0xd0, 0x01, 0x20,
+0x38, 0x71, 0x08, 0x4a, 0xc0, 0x46, 0x00, 0x92, 0x07, 0x48, 0x42, 0x68,
+0x07, 0x4b, 0x01, 0x68, 0x00, 0x20, 0x02, 0xf0, 0xdf, 0xfd, 0x88, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0xf8, 0x1a, 0x00, 0x80, 0xf5, 0x2c, 0xff, 0xff,
+0x10, 0x00, 0x35, 0x02, 0x7c, 0x29, 0x00, 0x80, 0x44, 0x80, 0x20, 0x40,
+0x90, 0xb5, 0x01, 0x20, 0x40, 0x02, 0x10, 0x49, 0xc0, 0x46, 0x08, 0x60,
+0x0f, 0x4f, 0x10, 0x21, 0xf8, 0x1d, 0x3d, 0x30, 0x02, 0xf0, 0x4c, 0xfc,
+0x19, 0x23, 0xdb, 0x01, 0xfc, 0x18, 0xe0, 0x68, 0x00, 0x28, 0x01, 0xd0,
+0x00, 0xf0, 0x14, 0xf8, 0x00, 0x20, 0xc9, 0x23, 0x1b, 0x01, 0xf9, 0x18,
+0x08, 0x71, 0xe0, 0x68, 0x10, 0x28, 0x04, 0xd3, 0x01, 0x20, 0xbb, 0x23,
+0x1b, 0x01, 0xf9, 0x18, 0x48, 0x73, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0x00, 0x00, 0xb0, 0x68, 0x0e, 0x00, 0x80, 0xf8, 0xb5, 0x37, 0x48,
+0x19, 0x23, 0xdb, 0x01, 0xc1, 0x18, 0xc9, 0x68, 0x35, 0x4d, 0x10, 0x29,
+0x00, 0xd9, 0x10, 0x21, 0x69, 0x62, 0x32, 0x48, 0xc1, 0x6c, 0x00, 0x6e,
+0x81, 0x42, 0x07, 0xd9, 0x08, 0x1a, 0x07, 0x09, 0x00, 0x24, 0x68, 0x6a,
+0xb8, 0x42, 0x12, 0xd2, 0x07, 0x1c, 0x10, 0xe0, 0x81, 0x42, 0x2a, 0xd2,
+0x2c, 0x4a, 0x52, 0x6b, 0x10, 0x1a, 0x07, 0x09, 0x68, 0x6a, 0xb8, 0x42,
+0x05, 0xd9, 0x0c, 0x09, 0x39, 0x19, 0x88, 0x42, 0x03, 0xd2, 0xc4, 0x1b,
+0x01, 0xe0, 0x00, 0x24, 0x07, 0x1c, 0x3e, 0x19, 0x30, 0x01, 0x25, 0x49,
+0x02, 0xf0, 0x84, 0xfd, 0x00, 0x28, 0x3d, 0xd0, 0x23, 0x48, 0x00, 0x2c,
+0x1a, 0xd1, 0x1e, 0x49, 0x3a, 0x01, 0x6f, 0x62, 0x09, 0x6e, 0x8c, 0x18,
+0x1d, 0x4d, 0x6b, 0x6b, 0xa3, 0x42, 0x00, 0xd8, 0xe4, 0x1a, 0x1e, 0x4b,
+0x1a, 0x43, 0x00, 0x92, 0xea, 0x6a, 0x51, 0x18,
+0x2a, 0x6b, 0x03, 0x1c, 0x20, 0xe0, 0x1b, 0x48, 0x01, 0x6b, 0x01, 0x31,
+0x01, 0x63, 0x00, 0x20, 0x68, 0x62, 0xf8, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x10, 0x49, 0x24, 0x01, 0x3f, 0x01, 0x11, 0x22, 0x52, 0x05, 0x3a, 0x43,
+0x6e, 0x62, 0x00, 0x92, 0x0e, 0x4d, 0xea, 0x6a, 0x09, 0x6e, 0x51, 0x18,
+0x03, 0x1c, 0x06, 0x1c, 0x00, 0x20, 0x2a, 0x6b, 0x02, 0xf0, 0x4a, 0xfd,
+0x0c, 0x4a, 0x22, 0x43, 0x00, 0x92, 0xbb, 0x19, 0xe9, 0x6a, 0x2a, 0x6b,
+0x00, 0x20, 0x02, 0xf0, 0x41, 0xfd, 0x03, 0x48, 0xc0, 0x46, 0x04, 0x66,
+0x00, 0xf0, 0x10, 0xf8, 0x01, 0x20, 0xda, 0xe7, 0x68, 0x0e, 0x00, 0x80,
+0x28, 0x1b, 0x00, 0x80, 0x7c, 0x29, 0x00, 0x80, 0x5d, 0x2e, 0xff, 0xff,
+0x44, 0x80, 0x20, 0x40, 0x00, 0x00, 0x36, 0x02, 0xa0, 0x82, 0x20, 0x40,
+0x04, 0x48, 0x01, 0x6e, 0x04, 0x4a, 0x80, 0x30, 0xd1, 0x60, 0x02, 0x23,
+0xc1, 0x6b, 0x19, 0x43, 0xc1, 0x63, 0x70, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0x90, 0xee, 0x20, 0x40, 0xf0, 0xb5, 0x84, 0xb0, 0x01, 0x20, 0x80, 0x02,
+0x1c, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x00, 0x27, 0x1b, 0x4e, 0x33, 0x23,
+0x9b, 0x01, 0xf5, 0x18, 0x68, 0x6a, 0x00, 0x28, 0x1d, 0xd9, 0x19, 0x4c,
+0x68, 0x46, 0x10, 0x21, 0x02, 0xf0, 0x90, 0xfb, 0x68, 0x46, 0x00, 0xf0,
+0x33, 0xf8, 0x00, 0x28, 0x04, 0xd0, 0x15, 0x49, 0x48, 0x69, 0x01, 0x30,
+0x48, 0x61, 0x0a, 0xe0, 0x13, 0x49, 0x60, 0x7b, 0x01, 0x30, 0x60, 0x73,
+0x88, 0x79, 0x01, 0x30, 0x88, 0x71, 0x11, 0x48, 0x00, 0x68, 0x02, 0xf0,
+0x65, 0xf9, 0x68, 0x6a, 0x01, 0x37, 0xb8, 0x42, 0xe2, 0xd8, 0xbb, 0x23,
+0x1b, 0x01, 0xf0, 0x18, 0x81, 0x7b, 0x00, 0x29, 0x03, 0xd0, 0x00, 0x21,
+0x81, 0x73, 0xff, 0xf7, 0x05, 0xfb, 0xff, 0xf7, 0xe3, 0xfe, 0x04, 0xb0,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0,
+0x68, 0x0e, 0x00, 0x80, 0xb0, 0x82, 0x20, 0x40, 0x08, 0x83, 0x20, 0x40,
+0xa0, 0x82, 0x20, 0x40, 0x58, 0x04, 0x00, 0x80, 0x90, 0xb4, 0x17, 0x4f,
+0x19, 0x23, 0xdb, 0x01, 0xf9, 0x18, 0x00, 0x22, 0xcb, 0x68, 0x00, 0x2b,
+0x23, 0xd0, 0x01, 0x3b, 0xcb, 0x60, 0x33, 0x23, 0x9b, 0x01, 0xff, 0x18,
+0xbb, 0x69, 0x1c, 0x6d, 0xc0, 0x46, 0xbc, 0x61, 0x04, 0x68, 0xc0, 0x46,
+0x5c, 0x60, 0x44, 0x68, 0xc0, 0x46, 0x9c, 0x60, 0x84, 0x68, 0xc0, 0x46,
+0x1c, 0x61, 0xc0, 0x68, 0xc0, 0x46, 0x58, 0x61, 0x1a, 0x65, 0x08, 0x69,
+0x42, 0x1c, 0x0a, 0x61, 0x00, 0x28, 0x03, 0xd0, 0x38, 0x6a, 0xc0, 0x46,
+0x03, 0x65, 0x00, 0xe0, 0xfb, 0x61, 0x3b, 0x62, 0x18, 0x1c, 0x90, 0xbc,
+0x70, 0x47, 0x10, 0x1c, 0xfb, 0xe7, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x0a, 0x4a, 0x33, 0x23, 0x9b, 0x01, 0xd1, 0x18, 0xc8, 0x69, 0x19, 0x23,
+0xdb, 0x01, 0xd2, 0x18, 0x13, 0x69, 0x00, 0x2b, 0x06, 0xd0, 0x01, 0x3b,
+0x13, 0x61, 0xca, 0x69, 0x12, 0x6d, 0xc0, 0x46, 0xca, 0x61, 0x70, 0x47,
+0x00, 0x21, 0x11, 0x61, 0xfb, 0xe7, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x06, 0x4a, 0x11, 0x69, 0x4b, 0x1c, 0x13, 0x61, 0x40, 0x32, 0x00, 0x29,
+0x01, 0xd0, 0xd1, 0x69, 0x00, 0xe0, 0x00, 0x21, 0x01, 0x65, 0xd0, 0x61,
+0x70, 0x47, 0x00, 0x00, 0xe8, 0x1a, 0x00, 0x80, 0x06, 0x4a, 0xd1, 0x68,
+0x4b, 0x1c, 0xd3, 0x60, 0x40, 0x32, 0x00, 0x29, 0x01, 0xd0, 0x91, 0x69,
+0x00, 0xe0, 0x00, 0x21, 0x01, 0x65, 0x90, 0x61, 0x70, 0x47, 0x00, 0x00,
+0xe8, 0x1a, 0x00, 0x80, 0x90, 0xb4, 0x00, 0x21,
+0x0f, 0x4a, 0x97, 0x89, 0x92, 0x6a, 0x4b, 0x00, 0x1b, 0x18, 0x9b, 0x8a,
+0x00, 0x2b, 0x12, 0xd0, 0xbb, 0x42, 0x10, 0xdc, 0x1c, 0x1c, 0x58, 0x23,
+0x63, 0x43, 0xd3, 0x18, 0xdc, 0x1f, 0x49, 0x3c, 0x01, 0x23, 0x9b, 0x07,
+0x23, 0x43, 0x1b, 0x68, 0x1b, 0x06, 0x1b, 0x0e, 0x03, 0x2b, 0x02, 0xd0,
+0x00, 0x20, 0x90, 0xbc, 0x70, 0x47, 0x01, 0x31, 0x04, 0x29, 0xe4, 0xd3,
+0x01, 0x20, 0xf8, 0xe7, 0x4c, 0x2a, 0x00, 0x80, 0xf7, 0xb5, 0x86, 0xb0,
+0x3d, 0x4a, 0x07, 0x1c, 0xd1, 0x69, 0x8f, 0x40, 0x03, 0x1c, 0x14, 0x6a,
+0xe3, 0x40, 0x5f, 0x40, 0x07, 0x9e, 0x8e, 0x40, 0x77, 0x40, 0xcf, 0x40,
+0x94, 0x69, 0xc0, 0x46, 0x05, 0x94, 0x03, 0x1c, 0xa3, 0x40, 0x00, 0x25,
+0x14, 0x69, 0xc0, 0x46, 0x04, 0x94, 0x00, 0x2c, 0x5d, 0xd9, 0x1c, 0x1c,
+0x32, 0x4e, 0x26, 0x43, 0x94, 0x69, 0xe6, 0x40, 0x33, 0x1c, 0x03, 0x96,
+0x53, 0x6a, 0xc0, 0x46, 0x02, 0x93, 0xd2, 0x6a, 0xc0, 0x46, 0x01, 0x92,
+0xbb, 0x00, 0x02, 0x9a, 0xd2, 0x58, 0x13, 0x1c, 0x05, 0x9c, 0xe3, 0x40,
+0x03, 0x9c, 0xa3, 0x42, 0x3e, 0xd1, 0x8a, 0x40, 0xca, 0x40, 0x14, 0x1c,
+0x63, 0x00, 0x1b, 0x19, 0x5b, 0x01, 0x01, 0x9a, 0xd2, 0x18, 0x01, 0x23,
+0x9b, 0x07, 0xd6, 0x1d, 0x01, 0x36, 0x33, 0x43, 0x1b, 0x68, 0x1b, 0x06,
+0x1b, 0x0e, 0x03, 0x2b, 0x2c, 0xd1, 0x01, 0x23, 0x9b, 0x07, 0xd6, 0x1d,
+0x51, 0x36, 0x33, 0x43, 0x1b, 0x68, 0x07, 0x9e, 0x1e, 0x40, 0x00, 0x96,
+0x01, 0x23, 0x9b, 0x07, 0xd6, 0x1d, 0x49, 0x36, 0x33, 0x43, 0x1b, 0x68,
+0x83, 0x42, 0x1b, 0xd1, 0x01, 0x23, 0x9b, 0x07, 0xd6, 0x1d, 0x4d, 0x36,
+0x33, 0x43, 0x1b, 0x68, 0x00, 0x9e, 0xb3, 0x42, 0x12, 0xd1, 0x01, 0x23,
+0x9b, 0x07, 0x1a, 0x43, 0x12, 0x68, 0x12, 0x04, 0x12, 0x0c, 0x08, 0x9b,
+0x32, 0x2b, 0x04, 0xd1, 0x02, 0x2a, 0x07, 0xd1, 0x20, 0x04, 0x00, 0x14,
+0x0f, 0xe0, 0x08, 0x9b, 0x33, 0x2b, 0x01, 0xd1, 0x01, 0x2a, 0xf7, 0xd0,
+0x04, 0x9a, 0x01, 0x37, 0x97, 0x42, 0x00, 0xd3, 0x00, 0x27, 0x04, 0x9a,
+0x01, 0x35, 0xaa, 0x42, 0xae, 0xd8, 0x00, 0x20, 0xc0, 0x43, 0x09, 0xb0,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x4c, 0x2a, 0x00, 0x80,
+0x00, 0x00, 0x00, 0x80, 0xf0, 0xb5, 0x27, 0x4d, 0x68, 0x69, 0x00, 0x28,
+0x06, 0xd0, 0x26, 0x48, 0x00, 0x68, 0x02, 0xf0, 0x2b, 0xf8, 0xf0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x23, 0x4c, 0x00, 0x26, 0xa0, 0x68, 0x23, 0x4f,
+0x00, 0x28, 0x16, 0xd0, 0x0f, 0xe0, 0x28, 0x6a, 0x02, 0x28, 0x02, 0xd3,
+0x01, 0x20, 0x38, 0x71, 0x0f, 0xe0, 0xa6, 0x60, 0xfd, 0xf7, 0xde, 0xfe,
+0x00, 0x28, 0xea, 0xd1, 0x28, 0x6a, 0x02, 0x28, 0x01, 0xd3, 0x01, 0x20,
+0x38, 0x71, 0xe8, 0x68, 0x00, 0x28, 0x02, 0xd0, 0x38, 0x79, 0x00, 0x28,
+0xe9, 0xd0, 0x68, 0x68, 0x00, 0x28, 0x1b, 0xd0, 0x01, 0x20, 0xa0, 0x60,
+0xfe, 0xf7, 0xbc, 0xfb, 0x00, 0x28, 0xd6, 0xd1, 0x68, 0x68, 0x00, 0x28,
+0xf6, 0xd1, 0x11, 0xe0, 0x00, 0x28, 0xd0, 0xd1, 0x28, 0x6a, 0x02, 0x28,
+0x02, 0xd3, 0x01, 0x20, 0x38, 0x71, 0xca, 0xe7, 0xa6, 0x60, 0xfd, 0xf7,
+0xb9, 0xfe, 0x00, 0x28, 0xc5, 0xd1, 0x28, 0x6a, 0x02, 0x28, 0x01, 0xd3,
+0x01, 0x20, 0x38, 0x71, 0xe8, 0x68, 0x00, 0x28, 0xbd, 0xd0, 0x38, 0x79,
+0x00, 0x28, 0xe7, 0xd0, 0xb9, 0xe7, 0x00, 0x00, 0x6c, 0x06, 0x00, 0x80,
+0x5c, 0x04, 0x00, 0x80, 0x4c, 0x2a, 0x00, 0x80, 0x8c, 0x06, 0x00, 0x80,
+0x70, 0x47, 0x00, 0x00, 0x70, 0x47, 0x00, 0x00,
+0x70, 0x47, 0x00, 0x00, 0x90, 0xb5, 0x40, 0x20, 0x1d, 0x49, 0xc0, 0x46,
+0x08, 0x60, 0x01, 0xf0, 0x9d, 0xfc, 0x03, 0x23, 0x1b, 0x07, 0x41, 0x68,
+0x19, 0x40, 0x0c, 0x0f, 0x61, 0x01, 0x09, 0x1b, 0x89, 0x00, 0x18, 0x4a,
+0x8f, 0x18, 0x01, 0x21, 0x39, 0x80, 0x81, 0x6a, 0xc0, 0x46, 0x79, 0x65,
+0x41, 0x6a, 0xc0, 0x46, 0x79, 0x67, 0xb9, 0x6c, 0xfa, 0x6c, 0x89, 0x18,
+0xb9, 0x64, 0x00, 0x21, 0xf9, 0x64, 0xba, 0x6b, 0x3b, 0x6d, 0xd2, 0x18,
+0xba, 0x63, 0x39, 0x65, 0x42, 0x6a, 0x20, 0x32, 0x51, 0x71, 0x79, 0x6d,
+0x7a, 0x6f, 0xd2, 0x6d, 0xc0, 0x46, 0x11, 0x60, 0xfc, 0xf7, 0xca, 0xff,
+0x20, 0x01, 0x09, 0x49, 0x40, 0x18, 0x19, 0x23, 0xdb, 0x01, 0xc0, 0x18,
+0x41, 0x6b, 0x01, 0x39, 0x41, 0x63, 0x78, 0x6f, 0x01, 0xf0, 0xc6, 0xfb,
+0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0,
+0x5c, 0x2b, 0x00, 0x80, 0xa0, 0x1c, 0x00, 0x80, 0xf0, 0xb5, 0x40, 0x20,
+0x12, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x01, 0xf0, 0x59, 0xfc, 0x07, 0x1c,
+0x40, 0x68, 0x03, 0x23, 0x1b, 0x07, 0x18, 0x40, 0x06, 0x0f, 0x70, 0x01,
+0x80, 0x1b, 0x80, 0x00, 0x0c, 0x49, 0x44, 0x18, 0xb8, 0x6a, 0xc0, 0x46,
+0x60, 0x65, 0x78, 0x6a, 0xc0, 0x46, 0x60, 0x67, 0x80, 0x6f, 0x05, 0x1d,
+0xe5, 0x63, 0xb9, 0x69, 0x28, 0x1c, 0x02, 0xf0, 0x89, 0xf9, 0x38, 0x1c,
+0x21, 0x1c, 0x32, 0x1c, 0x2b, 0x1c, 0x00, 0xf0, 0x20, 0xf8, 0xf0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0xb0, 0x5c, 0x2b, 0x00, 0x80,
+0xf0, 0xb5, 0x4b, 0x6f, 0x9b, 0x6f, 0x1f, 0x1d, 0xcf, 0x63, 0x05, 0x68,
+0x00, 0x23, 0x84, 0x69, 0xa4, 0x08, 0x08, 0xd0, 0x9c, 0x00, 0x2e, 0x59,
+0xc0, 0x46, 0x3e, 0x51, 0x84, 0x69, 0xa4, 0x08, 0x01, 0x33, 0x9c, 0x42,
+0xf6, 0xd8, 0x3b, 0x1c, 0x00, 0xf0, 0x03, 0xf8, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0xff, 0xb5, 0x81, 0xb0, 0x04, 0x1c, 0x1d, 0x1c, 0x0f, 0x1c,
+0x46, 0x48, 0x01, 0x69, 0x01, 0x31, 0x01, 0x61, 0xf9, 0x1d, 0x51, 0x31,
+0xbd, 0x65, 0x00, 0x91, 0x20, 0x1c, 0xfd, 0xf7, 0x5d, 0xfc, 0xf8, 0x6d,
+0x40, 0x09, 0x36, 0xd2, 0xb8, 0x6d, 0x06, 0x7b, 0x43, 0x7b, 0x1b, 0x02,
+0x1e, 0x43, 0x17, 0x21, 0x49, 0x02, 0x01, 0x73, 0x0b, 0x0a, 0x43, 0x73,
+0x00, 0x99, 0x20, 0x1c, 0xfd, 0xf7, 0x4c, 0xfc, 0xb8, 0x6d, 0xc0, 0x46,
+0x06, 0x73, 0x33, 0x0a, 0x43, 0x73, 0xf8, 0x6d, 0x40, 0x09, 0x20, 0xd2,
+0x60, 0x68, 0x01, 0x04, 0x09, 0x0c, 0x03, 0x98, 0x01, 0xf0, 0xcc, 0xfc,
+0x60, 0x68, 0x32, 0x4b, 0x18, 0x43, 0x60, 0x60, 0x20, 0x1c, 0x01, 0xf0,
+0x35, 0xfd, 0x00, 0x25, 0x7d, 0x60, 0xbd, 0x60, 0x3d, 0x64, 0x7d, 0x64,
+0x20, 0x1c, 0xfc, 0xf7, 0x31, 0xff, 0x38, 0x88, 0x40, 0x23, 0x18, 0x43,
+0x38, 0x80, 0x7d, 0x62, 0x29, 0x48, 0xc0, 0x46, 0xb8, 0x62, 0x38, 0x1c,
+0x00, 0xf0, 0xa0, 0xfb, 0x44, 0xe0, 0x20, 0x68, 0x01, 0x23, 0x9b, 0x07,
+0x08, 0x38, 0x18, 0x43, 0x00, 0x68, 0xc0, 0x46, 0x78, 0x64, 0x60, 0x68,
+0x02, 0x04, 0x12, 0x0c, 0x78, 0x6e, 0x01, 0x26, 0xc1, 0x1d, 0x0d, 0x31,
+0x8a, 0x42, 0x02, 0xd2, 0x3a, 0x64, 0x08, 0x1c, 0x0e, 0xe0, 0x41, 0x19,
+0x89, 0x89, 0xf0, 0x23, 0x19, 0x40, 0x09, 0x09, 0x89, 0x00, 0x40, 0x18,
+0xf8, 0x60, 0xf9, 0x61, 0x61, 0x68, 0x09, 0x04, 0x09, 0x0c, 0x81, 0x42,
+0x16, 0xd2, 0x39, 0x64, 0x63, 0x68, 0x19, 0x04, 0x09, 0x0c, 0x40, 0x1a,
+0x03, 0x30, 0x80, 0x08, 0x82, 0x00, 0xa0, 0x61,
+0x20, 0x68, 0x09, 0x18, 0x9b, 0x18, 0x63, 0x60, 0xc3, 0x1f, 0x05, 0x3b,
+0x38, 0x1c, 0x00, 0xf0, 0xb6, 0xfa, 0x7e, 0x80, 0x20, 0x1c, 0x00, 0xf0,
+0xbf, 0xfb, 0x0b, 0xe0, 0xb9, 0x68, 0x08, 0x1a, 0x00, 0x25, 0x78, 0x62,
+0xbd, 0x62, 0x38, 0x1c, 0x00, 0xf0, 0x3c, 0xfc, 0x20, 0x1c, 0x39, 0x1c,
+0x00, 0xf0, 0x64, 0xf8, 0x05, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x0c, 0x2b, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x01, 0x00, 0x00, 0xc0,
+0xf0, 0xb5, 0x04, 0x1c, 0x0f, 0x1c, 0x38, 0x6c, 0xf9, 0x6b, 0x0d, 0x18,
+0x21, 0x68, 0x41, 0x18, 0x00, 0x20, 0xa2, 0x69, 0x00, 0x2a, 0x0b, 0xd9,
+0x82, 0x00, 0x56, 0x18, 0x01, 0x23, 0x9b, 0x07, 0x33, 0x43, 0x1b, 0x68,
+0xc0, 0x46, 0xab, 0x50, 0xa2, 0x69, 0x01, 0x30, 0x82, 0x42, 0xf3, 0xd8,
+0x78, 0x6e, 0xf9, 0x6b, 0x09, 0x18, 0x89, 0x89, 0xf0, 0x23, 0x19, 0x40,
+0x09, 0x09, 0x89, 0x00, 0x40, 0x18, 0xf8, 0x60, 0xf9, 0x61, 0x20, 0x68,
+0x01, 0x23, 0x9b, 0x07, 0x08, 0x38, 0x18, 0x43, 0x01, 0x68, 0x78, 0x6c,
+0xfc, 0xf7, 0x95, 0xff, 0x78, 0x64, 0x60, 0x68, 0x01, 0x04, 0x09, 0x0c,
+0xf8, 0x68, 0x81, 0x42, 0x19, 0xd2, 0x39, 0x64, 0x63, 0x68, 0x19, 0x04,
+0x09, 0x0c, 0x40, 0x1a, 0x03, 0x30, 0x80, 0x08, 0x82, 0x00, 0xa0, 0x61,
+0x20, 0x68, 0x09, 0x18, 0x9b, 0x18, 0x63, 0x60, 0xc3, 0x1f, 0x05, 0x3b,
+0x38, 0x1c, 0x00, 0xf0, 0x56, 0xfa, 0x01, 0x20, 0x78, 0x80, 0x20, 0x1c,
+0x00, 0xf0, 0x5e, 0xfb, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xb9, 0x68,
+0x08, 0x1a, 0x78, 0x62, 0x00, 0x20, 0xb8, 0x62, 0x38, 0x1c, 0x00, 0xf0,
+0xd9, 0xfb, 0x20, 0x1c, 0x39, 0x1c, 0x00, 0xf0, 0x01, 0xf8, 0xef, 0xe7,
+0xf0, 0xb5, 0x84, 0xb0, 0x04, 0x1c, 0x0f, 0x1c, 0x8e, 0x48, 0x41, 0x69,
+0x01, 0x31, 0x41, 0x61, 0x03, 0x20, 0x00, 0x07, 0x61, 0x68, 0x08, 0x40,
+0x06, 0x0f, 0x0a, 0x04, 0x12, 0x0c, 0x20, 0x68, 0x11, 0x18, 0xfb, 0x68,
+0xd2, 0x1a, 0x7b, 0x68, 0x9d, 0x1a, 0xc3, 0x1f, 0x05, 0x3b, 0x38, 0x1c,
+0x2a, 0x1c, 0x00, 0xf0, 0x26, 0xfa, 0x00, 0x20, 0x78, 0x80, 0x20, 0x1c,
+0x00, 0xf0, 0x2e, 0xfb, 0x60, 0x68, 0x40, 0x19, 0x01, 0x04, 0x09, 0x0c,
+0x60, 0x60, 0x30, 0x1c, 0x01, 0xf0, 0xe0, 0xfb, 0x7d, 0x4e, 0x0b, 0x23,
+0x1b, 0x02, 0xf0, 0x18, 0x00, 0x69, 0x00, 0x28, 0x19, 0xd0, 0x00, 0x25,
+0x2d, 0x23, 0x9b, 0x01, 0xf0, 0x18, 0xc0, 0x68, 0x00, 0x28, 0x12, 0xd0,
+0xaa, 0x00, 0x92, 0x19, 0x2d, 0x23, 0x9b, 0x01, 0xd2, 0x18, 0xd2, 0x68,
+0x20, 0x1c, 0x39, 0x1c, 0x01, 0xf0, 0x1c, 0xfe, 0x01, 0x35, 0xa8, 0x00,
+0x80, 0x19, 0x2d, 0x23, 0x9b, 0x01, 0xc0, 0x18, 0xc0, 0x68, 0x00, 0x28,
+0xec, 0xd1, 0xf8, 0x6b, 0x01, 0x1f, 0x8a, 0x1c, 0xfa, 0x63, 0xfa, 0x68,
+0x7d, 0x6c, 0x00, 0xf0, 0xbb, 0xf9, 0xc0, 0x43, 0x01, 0x04, 0x09, 0x0c,
+0x28, 0x1c, 0xfc, 0xf7, 0x10, 0xff, 0x03, 0x90, 0xf9, 0x6b, 0x3a, 0x6e,
+0x8e, 0x18, 0x20, 0x68, 0x12, 0x18, 0x01, 0x92, 0x7a, 0x6e, 0x8d, 0x18,
+0x11, 0x18, 0x02, 0x91, 0xc8, 0x1d, 0x09, 0x30, 0xe0, 0x60, 0xb1, 0x88,
+0x08, 0x02, 0x09, 0x0a, 0x09, 0x06, 0x09, 0x0e, 0x08, 0x43, 0x00, 0x04,
+0x00, 0x0c, 0x78, 0x61, 0x68, 0x68, 0x01, 0x0e, 0xff, 0x22, 0x12, 0x04,
+0x02, 0x40, 0x12, 0x0a, 0x11, 0x43, 0xff, 0x22, 0x12, 0x02, 0x02, 0x40,
+0x12, 0x02, 0x11, 0x43, 0x00, 0x06, 0x08, 0x43, 0x38, 0x61, 0xa8, 0x89,
+0x09, 0x23, 0x1b, 0x02, 0x18, 0x40, 0xb8, 0x61,
+0xa8, 0x89, 0x98, 0x43, 0xa8, 0x81, 0xa8, 0x89, 0x02, 0x99, 0xc0, 0x46,
+0x88, 0x81, 0x00, 0x20, 0x70, 0x80, 0xb0, 0x80, 0x70, 0x81, 0x68, 0x60,
+0x28, 0x82, 0xb9, 0x6e, 0x30, 0x1c, 0xfc, 0xf7, 0xe8, 0xfe, 0x38, 0x86,
+0xfa, 0x69, 0x30, 0x1c, 0x29, 0x1c, 0xfc, 0xf7, 0x03, 0xff, 0x78, 0x86,
+0x3d, 0x8e, 0x78, 0x8e, 0x03, 0x99, 0xfc, 0xf7, 0xc8, 0xfe, 0x00, 0x90,
+0x60, 0x68, 0x00, 0x04, 0x00, 0x0c, 0x39, 0x6e, 0x41, 0x1a, 0x09, 0x04,
+0x09, 0x0c, 0x7a, 0x6e, 0x82, 0x1a, 0x13, 0x04, 0x1b, 0x0c, 0x1a, 0x02,
+0x1b, 0x0a, 0x1a, 0x43, 0x16, 0x04, 0x36, 0x0c, 0xba, 0x68, 0x82, 0x42,
+0x01, 0xd2, 0x00, 0x20, 0x00, 0xe0, 0x10, 0x1a, 0xb8, 0x60, 0x08, 0x02,
+0x09, 0x12, 0x09, 0x06, 0x09, 0x0e, 0x08, 0x43, 0x01, 0x04, 0x09, 0x0c,
+0x01, 0x98, 0xc0, 0x46, 0x41, 0x80, 0x28, 0x1c, 0xfc, 0xf7, 0xa3, 0xfe,
+0x05, 0x1c, 0x00, 0x98, 0x31, 0x1c, 0xfc, 0xf7, 0x9e, 0xfe, 0x06, 0x1c,
+0x78, 0x69, 0x00, 0x04, 0x00, 0x0c, 0x01, 0x02, 0x00, 0x0a, 0x08, 0x43,
+0x01, 0x04, 0x09, 0x0c, 0x01, 0x98, 0xc0, 0x46, 0x81, 0x80, 0x28, 0x1c,
+0xfc, 0xf7, 0x8f, 0xfe, 0x79, 0x69, 0x01, 0x31, 0xc0, 0x43, 0x79, 0x61,
+0x01, 0x9a, 0xc0, 0x46, 0x50, 0x81, 0x38, 0x69, 0x01, 0x0e, 0xff, 0x22,
+0x12, 0x04, 0x02, 0x40, 0x12, 0x0a, 0x11, 0x43, 0xff, 0x22, 0x12, 0x02,
+0x02, 0x40, 0x12, 0x02, 0x11, 0x43, 0x00, 0x06, 0x01, 0x43, 0x30, 0x1c,
+0xfc, 0xf7, 0x77, 0xfe, 0x39, 0x69, 0x7a, 0x68, 0x89, 0x18, 0x39, 0x61,
+0xb9, 0x68, 0x00, 0x29, 0x09, 0xd1, 0x02, 0x99, 0x89, 0x89, 0xba, 0x69,
+0x11, 0x43, 0x02, 0x9a, 0xc0, 0x46, 0x91, 0x81, 0xb9, 0x69, 0xfc, 0xf7,
+0x66, 0xfe, 0x20, 0x82, 0x00, 0x20, 0x60, 0x82, 0xf8, 0x6d, 0x41, 0x08,
+0x16, 0xd3, 0x80, 0x0a, 0x0a, 0xd3, 0x60, 0x68, 0x10, 0x38, 0x01, 0x04,
+0x09, 0x0c, 0x08, 0x02, 0x09, 0x0a, 0x08, 0x43, 0x21, 0x68, 0xc0, 0x46,
+0x08, 0x82, 0x09, 0xe0, 0x60, 0x68, 0x0c, 0x38, 0x01, 0x04, 0x09, 0x0c,
+0x08, 0x02, 0x09, 0x0a, 0x08, 0x43, 0x21, 0x68, 0xc0, 0x46, 0x88, 0x81,
+0x04, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x0c, 0x2b, 0x00, 0x80,
+0x68, 0x0e, 0x00, 0x80, 0xf1, 0xb5, 0x84, 0xb0, 0x6e, 0x4d, 0x28, 0x69,
+0x01, 0x22, 0x04, 0x99, 0x8a, 0x40, 0x90, 0x43, 0x28, 0x61, 0x04, 0x98,
+0x43, 0x01, 0x18, 0x1a, 0x80, 0x00, 0x16, 0x1c, 0x69, 0x49, 0x44, 0x18,
+0xe0, 0x6b, 0xc0, 0x46, 0x00, 0x90, 0xa0, 0x68, 0x00, 0x28, 0x01, 0xd1,
+0x00, 0x26, 0x26, 0xe0, 0x65, 0x48, 0x41, 0x69, 0x01, 0x31, 0x41, 0x61,
+0x04, 0x98, 0xfc, 0xf7, 0x09, 0xfd, 0x07, 0x1c, 0x03, 0xd1, 0x28, 0x69,
+0x30, 0x43, 0x28, 0x61, 0xb5, 0xe0, 0xa0, 0x68, 0x65, 0x68, 0xa8, 0x42,
+0x00, 0xd2, 0x05, 0x1c, 0xa1, 0x6c, 0xa9, 0x42, 0x16, 0xd2, 0x40, 0x1a,
+0x62, 0x6a, 0x10, 0x1a, 0x00, 0x26, 0x60, 0x62, 0xa6, 0x60, 0xa6, 0x62,
+0x20, 0x88, 0x48, 0x23, 0x18, 0x43, 0x20, 0x80, 0x0d, 0x1c, 0x09, 0xd1,
+0x38, 0x1c, 0xfc, 0xf7, 0x19, 0xfd, 0x03, 0x20, 0x60, 0x80, 0x66, 0x60,
+0x20, 0x1c, 0x00, 0xf0, 0x8d, 0xf9, 0x96, 0xe0, 0xe1, 0x68, 0x38, 0x68,
+0x09, 0x18, 0xc3, 0x1f, 0x05, 0x3b, 0x20, 0x1c, 0x02, 0x39, 0x2a, 0x1c,
+0x00, 0xf0, 0xcd, 0xf8, 0x38, 0x1c, 0x00, 0xf0, 0xd7, 0xf9, 0xe0, 0x68,
+0x46, 0x19, 0x78, 0x68, 0x30, 0x43, 0x78, 0x60, 0x04, 0x98, 0x31, 0x1c,
+0x01, 0xf0, 0x88, 0xfa, 0x21, 0x6e, 0x00, 0x98,
+0x08, 0x18, 0x01, 0x90, 0x70, 0x1a, 0x00, 0x04, 0x00, 0x0c, 0x61, 0x6e,
+0x71, 0x1a, 0x0a, 0x04, 0x12, 0x0c, 0x11, 0x02, 0x12, 0x0a, 0x11, 0x43,
+0x09, 0x04, 0x09, 0x0c, 0x02, 0x91, 0x01, 0x02, 0x00, 0x0a, 0x08, 0x43,
+0x01, 0x04, 0x09, 0x0c, 0x01, 0x98, 0xc0, 0x46, 0x41, 0x80, 0x20, 0x8e,
+0xfc, 0xf7, 0xcb, 0xfd, 0x06, 0x1c, 0x60, 0x8e, 0x02, 0x99, 0xfc, 0xf7,
+0xc6, 0xfd, 0x03, 0x90, 0x60, 0x69, 0x01, 0x04, 0x09, 0x0c, 0x08, 0x02,
+0x09, 0x0a, 0x08, 0x43, 0x01, 0x04, 0x09, 0x0c, 0x01, 0x98, 0xc0, 0x46,
+0x81, 0x80, 0x30, 0x1c, 0xfc, 0xf7, 0xb7, 0xfd, 0x61, 0x69, 0x01, 0x31,
+0xc0, 0x43, 0x61, 0x61, 0x01, 0x99, 0xc0, 0x46, 0x48, 0x81, 0x60, 0x6e,
+0x00, 0x99, 0x46, 0x18, 0x20, 0x69, 0x01, 0x0e, 0xff, 0x22, 0x12, 0x04,
+0x02, 0x40, 0x12, 0x0a, 0x11, 0x43, 0xff, 0x22, 0x12, 0x02, 0x02, 0x40,
+0x12, 0x02, 0x11, 0x43, 0x00, 0x06, 0x01, 0x43, 0x71, 0x60, 0x03, 0x98,
+0xfc, 0xf7, 0x9b, 0xfd, 0x21, 0x69, 0x49, 0x19, 0x21, 0x61, 0xa1, 0x68,
+0x49, 0x1b, 0xa1, 0x60, 0x06, 0xd1, 0xb1, 0x89, 0xa2, 0x69, 0x11, 0x43,
+0xb1, 0x81, 0xa1, 0x69, 0xfc, 0xf7, 0x8d, 0xfd, 0x38, 0x82, 0x61, 0x6e,
+0x38, 0x68, 0x09, 0x18, 0x0e, 0x31, 0xf9, 0x60, 0xe2, 0x68, 0x00, 0x99,
+0x04, 0x38, 0x00, 0xf0, 0x4c, 0xf8, 0x02, 0x20, 0x78, 0x82, 0xe0, 0x6d,
+0x41, 0x08, 0x16, 0xd3, 0x80, 0x0a, 0x0a, 0xd3, 0x78, 0x68, 0x10, 0x38,
+0x01, 0x04, 0x09, 0x0c, 0x08, 0x02, 0x09, 0x0a, 0x08, 0x43, 0x39, 0x68,
+0xc0, 0x46, 0xc8, 0x81, 0x09, 0xe0, 0x78, 0x68, 0x0c, 0x38, 0x01, 0x04,
+0x09, 0x0c, 0x08, 0x02, 0x09, 0x0a, 0x08, 0x43, 0x39, 0x68, 0xc0, 0x46,
+0x48, 0x81, 0x05, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0xd0, 0x2c, 0x00, 0x80, 0x5c, 0x2b, 0x00, 0x80, 0x0c, 0x2b, 0x00, 0x80,
+0xf7, 0xb5, 0x03, 0x1c, 0x0f, 0x1c, 0x00, 0x20, 0x1c, 0x68, 0x26, 0x04,
+0x31, 0x1c, 0x1d, 0x1d, 0xfc, 0xf7, 0x51, 0xfd, 0x40, 0xc7, 0x02, 0x9a,
+0xd1, 0x1c, 0x89, 0x08, 0x01, 0x39, 0x4a, 0x1e, 0x02, 0x92, 0x00, 0x29,
+0x0d, 0xd0, 0x21, 0x0c, 0x10, 0xcd, 0x22, 0x04, 0x0a, 0x43, 0x11, 0x1c,
+0x16, 0x1c, 0xfc, 0xf7, 0x40, 0xfd, 0x40, 0xc7, 0x02, 0x99, 0x4a, 0x1e,
+0x02, 0x92, 0x00, 0x29, 0xf1, 0xd1, 0x03, 0xb0, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x80, 0x08, 0x80, 0x00, 0x89, 0x08, 0x89, 0x00, 0x03, 0x32,
+0x93, 0x08, 0x5a, 0x1e, 0x00, 0x2b, 0x05, 0xd0, 0x08, 0xc9, 0x08, 0xc0,
+0x13, 0x1c, 0x01, 0x3a, 0x00, 0x2b, 0xf9, 0xd1, 0x70, 0x47, 0xff, 0xb5,
+0x86, 0xb0, 0x17, 0x1c, 0x00, 0x26, 0x06, 0x98, 0x80, 0x6c, 0xc0, 0x1b,
+0x06, 0x99, 0xc0, 0x46, 0x88, 0x64, 0x01, 0x20, 0xc0, 0x05, 0x06, 0x99,
+0x89, 0x6b, 0xc0, 0x46, 0x01, 0x91, 0x06, 0x99, 0x4c, 0x6b, 0x67, 0xe0,
+0x21, 0x68, 0xc0, 0x46, 0x02, 0x91, 0x61, 0x68, 0xc0, 0x46, 0x03, 0x91,
+0xa1, 0x68, 0xc0, 0x46, 0x04, 0x91, 0x02, 0xa9, 0x49, 0x88, 0xb9, 0x42,
+0x08, 0xd2, 0x02, 0xad, 0x6d, 0x88, 0x02, 0xa9, 0x49, 0x88, 0x7f, 0x1a,
+0x00, 0x21, 0x02, 0xab, 0x59, 0x80, 0x19, 0xe0, 0x02, 0xa9, 0x49, 0x88,
+0xc9, 0x1b, 0x02, 0xab, 0x59, 0x80, 0x3d, 0x1c, 0x00, 0x27, 0x01, 0x21,
+0x49, 0x06, 0x07, 0x9b, 0x9a, 0x07, 0x92, 0x0f, 0x0d, 0xd0, 0xeb, 0x06,
+0xdb, 0x0e, 0x08, 0xd0, 0x1e, 0x2b, 0x08, 0xd3, 0x1e, 0x2b, 0x02, 0xd1,
+0x03, 0x2a, 0x04, 0xd1, 0x01, 0xe0, 0x02, 0x2a,
+0x01, 0xd3, 0x01, 0x26, 0x00, 0x21, 0x29, 0x43, 0x01, 0x43, 0x0a, 0x1c,
+0x00, 0x91, 0x00, 0x20, 0x03, 0x99, 0x04, 0x9a, 0x07, 0x9b, 0x01, 0xf0,
+0x5b, 0xff, 0x07, 0x99, 0x49, 0x19, 0x07, 0x91, 0x00, 0x2e, 0x0a, 0xd0,
+0x1d, 0x4a, 0xc0, 0x46, 0x00, 0x92, 0x1d, 0x48, 0x01, 0x6d, 0x42, 0x6d,
+0x00, 0x20, 0x07, 0x9b, 0x01, 0xf0, 0x4c, 0xff, 0x00, 0x26, 0x02, 0xa8,
+0x40, 0x88, 0x00, 0x28, 0x0c, 0xd0, 0x03, 0x98, 0x40, 0x19, 0x03, 0x90,
+0x02, 0x98, 0xc0, 0x46, 0x20, 0x60, 0x03, 0x98, 0xc0, 0x46, 0x60, 0x60,
+0x04, 0x98, 0xc0, 0x46, 0xa0, 0x60, 0x03, 0xe0, 0x01, 0x98, 0x01, 0x38,
+0x01, 0x90, 0x10, 0x34, 0x06, 0x98, 0xc0, 0x46, 0x44, 0x63, 0x01, 0x98,
+0x06, 0x99, 0xc0, 0x46, 0x88, 0x63, 0x00, 0x20, 0x00, 0x2f, 0x02, 0xd0,
+0x01, 0x99, 0x00, 0x29, 0x92, 0xd1, 0x09, 0x4a, 0xc0, 0x46, 0x00, 0x92,
+0x06, 0x48, 0x01, 0x6d, 0x42, 0x6d, 0x00, 0x20, 0x09, 0x9b, 0x01, 0xf0,
+0x1f, 0xff, 0x0a, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x01, 0x00, 0x00, 0x02, 0x7c, 0x29, 0x00, 0x80, 0x04, 0x00, 0x53, 0x02,
+0x90, 0xb5, 0x0c, 0x1c, 0x07, 0x1c, 0x38, 0x68, 0x01, 0x23, 0x9b, 0x07,
+0x08, 0x38, 0x18, 0x43, 0x01, 0x68, 0x38, 0x8a, 0xfc, 0xf7, 0x85, 0xfc,
+0xc0, 0x43, 0xf9, 0x68, 0xc0, 0x46, 0x08, 0x80, 0x78, 0x8a, 0x39, 0x68,
+0x08, 0x1a, 0x38, 0x60, 0x38, 0x1c, 0x01, 0xf0, 0x8b, 0xf9, 0x38, 0x1c,
+0xfc, 0xf7, 0x8c, 0xfb, 0x20, 0x1c, 0xff, 0xf7, 0x33, 0xfe, 0x90, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x80, 0xb5, 0x01, 0x88, 0x8a, 0x09, 0x21, 0xd3,
+0xca, 0x09, 0x1f, 0xd2, 0x8a, 0x08, 0x1d, 0xd3, 0x00, 0x21, 0x01, 0x80,
+0x41, 0x80, 0x47, 0x6f, 0x40, 0x6d, 0xfa, 0x1d, 0x19, 0x32, 0x51, 0x71,
+0xfa, 0x6d, 0xc0, 0x46, 0x10, 0x60, 0x3a, 0x6e, 0xc0, 0x46, 0x10, 0x60,
+0x0c, 0x48, 0xc0, 0x46, 0x81, 0x63, 0xc1, 0x6b, 0x49, 0x08, 0x49, 0x00,
+0xc1, 0x63, 0x01, 0x20, 0x00, 0xf0, 0xcc, 0xff, 0x38, 0x1c, 0x00, 0xf0,
+0x6b, 0xff, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x80, 0x23, 0x19, 0x43,
+0x01, 0x80, 0x01, 0x88, 0x49, 0x09, 0xf6, 0xd2, 0x00, 0xf0, 0xb0, 0xf8,
+0xf3, 0xe7, 0x00, 0x00, 0xe8, 0x0e, 0x00, 0x80, 0xf0, 0xb5, 0x07, 0x1c,
+0x10, 0x1c, 0x0d, 0x1c, 0x00, 0x24, 0x5e, 0x1e, 0x00, 0x2b, 0x19, 0xd0,
+0x01, 0x68, 0xc0, 0x46, 0x39, 0x60, 0x41, 0x88, 0x0c, 0x19, 0x41, 0x68,
+0xc0, 0x46, 0x79, 0x60, 0x81, 0x68, 0xc0, 0x46, 0xb9, 0x60, 0xc1, 0x68,
+0xc0, 0x46, 0xf9, 0x60, 0x10, 0x30, 0x10, 0x37, 0xe9, 0x6a, 0x81, 0x42,
+0x02, 0xd8, 0x28, 0x1c, 0x00, 0xf0, 0xec, 0xff, 0x31, 0x1c, 0x01, 0x3e,
+0x00, 0x29, 0xe5, 0xd1, 0x20, 0x1c, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0x21, 0xc1, 0x61, 0x05, 0x49, 0x0a, 0x68, 0x00, 0x2a, 0x01, 0xd1,
+0x08, 0x60, 0x02, 0xe0, 0x4a, 0x68, 0xc0, 0x46, 0xd0, 0x61, 0x48, 0x60,
+0x70, 0x47, 0x00, 0x00, 0xd0, 0x2c, 0x00, 0x80, 0x03, 0x49, 0x08, 0x68,
+0x00, 0x28, 0x02, 0xd0, 0xc2, 0x69, 0xc0, 0x46, 0x0a, 0x60, 0x70, 0x47,
+0xd0, 0x2c, 0x00, 0x80, 0x00, 0x21, 0x81, 0x67, 0x05, 0x49, 0x8a, 0x68,
+0x00, 0x2a, 0x01, 0xd1, 0x88, 0x60, 0x02, 0xe0, 0xca, 0x68, 0xc0, 0x46,
+0x90, 0x67, 0xc8, 0x60, 0x70, 0x47, 0x00, 0x00, 0xd0, 0x2c, 0x00, 0x80,
+0x03, 0x49, 0x88, 0x68, 0x00, 0x28, 0x02, 0xd0, 0x82, 0x6f, 0xc0, 0x46,
+0x8a, 0x60, 0x70, 0x47, 0xd0, 0x2c, 0x00, 0x80,
+0x00, 0xb5, 0x80, 0x20, 0x13, 0x49, 0xc0, 0x46, 0x08, 0x60, 0xff, 0xf7,
+0xd5, 0xff, 0x00, 0x28, 0x1b, 0xd0, 0x03, 0x23, 0x1b, 0x07, 0x41, 0x68,
+0x19, 0x40, 0x0a, 0x0f, 0x51, 0x01, 0x89, 0x1a, 0x89, 0x00, 0x0d, 0x4b,
+0xc9, 0x18, 0x4b, 0x88, 0x00, 0x2b, 0x04, 0xd1, 0x11, 0x1c, 0xff, 0xf7,
+0x3b, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x01, 0x2b, 0x02, 0xd1, 0xff, 0xf7,
+0x05, 0xfc, 0xf8, 0xe7, 0x02, 0x2b, 0xf6, 0xd1, 0xff, 0xf7, 0x4e, 0xfb,
+0xf3, 0xe7, 0x04, 0x48, 0x01, 0x6d, 0x01, 0x31, 0x01, 0x65, 0xee, 0xe7,
+0x00, 0x00, 0x00, 0xb0, 0x5c, 0x2b, 0x00, 0x80, 0xa0, 0x82, 0x20, 0x40,
+0x00, 0xb5, 0x20, 0x20, 0x0d, 0x49, 0xc0, 0x46, 0x08, 0x60, 0xff, 0xf7,
+0xbf, 0xff, 0x00, 0x28, 0x0e, 0xd0, 0x01, 0x88, 0x20, 0x23, 0x19, 0x43,
+0x01, 0x80, 0x01, 0x88, 0x10, 0x23, 0x99, 0x43, 0x01, 0x80, 0x01, 0x88,
+0x09, 0x0a, 0x01, 0xd3, 0xff, 0xf7, 0x2e, 0xff, 0x08, 0xbc, 0x18, 0x47,
+0x03, 0x48, 0x01, 0x6d, 0x01, 0x31, 0x01, 0x65, 0xf8, 0xe7, 0x00, 0x00,
+0x00, 0x00, 0x00, 0xb0, 0xa0, 0x82, 0x20, 0x40, 0x98, 0xb5, 0x07, 0x1c,
+0x22, 0x48, 0xc0, 0x46, 0x00, 0x90, 0x22, 0x48, 0xc3, 0x1d, 0x41, 0x33,
+0x41, 0x6d, 0x82, 0x6d, 0x80, 0x6c, 0x00, 0x03, 0x00, 0x0b, 0x9c, 0x68,
+0x01, 0x23, 0x9b, 0x07, 0x23, 0x43, 0x1b, 0x68, 0x98, 0x42, 0x00, 0xd1,
+0x0c, 0xe0, 0x98, 0x42, 0x03, 0xd9, 0x10, 0x1a, 0x59, 0x1a, 0x41, 0x18,
+0x00, 0xe0, 0x19, 0x1a, 0x01, 0x20, 0x10, 0x29, 0x00, 0xd8, 0x00, 0x20,
+0x00, 0x28, 0x1f, 0xd0, 0x78, 0x6a, 0xf9, 0x6a, 0xc0, 0x46, 0x08, 0x60,
+0xb8, 0x6a, 0xf9, 0x6a, 0xc0, 0x46, 0x48, 0x60, 0x10, 0x4a, 0xc0, 0x46,
+0x00, 0x92, 0xfb, 0x6a, 0x0f, 0x48, 0x42, 0x6d, 0x03, 0x20, 0x39, 0x6a,
+0x01, 0xf0, 0xe2, 0xfd, 0x38, 0x88, 0x10, 0x23, 0x18, 0x43, 0x38, 0x80,
+0x38, 0x88, 0x40, 0x23, 0x98, 0x43, 0x38, 0x80, 0x38, 0x1c, 0xff, 0xf7,
+0x55, 0xff, 0x98, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x38, 0x88, 0x40, 0x23,
+0x18, 0x43, 0x38, 0x80, 0xf7, 0xe7, 0x00, 0x00, 0x55, 0x55, 0x55, 0x55,
+0xa8, 0x03, 0x00, 0x80, 0x08, 0x00, 0x11, 0x02, 0x7c, 0x29, 0x00, 0x80,
+0xb0, 0xb5, 0x40, 0x20, 0x2c, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x00, 0xf0,
+0xfd, 0xfe, 0x07, 0x1c, 0x40, 0x68, 0x03, 0x23, 0x1b, 0x07, 0x18, 0x40,
+0x05, 0x0f, 0x68, 0x01, 0x40, 0x1b, 0x80, 0x00, 0x26, 0x49, 0x44, 0x18,
+0x20, 0x88, 0x02, 0x23, 0x18, 0x43, 0x20, 0x80, 0x20, 0x88, 0x41, 0x08,
+0x34, 0xd3, 0x40, 0x08, 0x40, 0x00, 0x20, 0x80, 0xa0, 0x6c, 0xe1, 0x6c,
+0x40, 0x18, 0xa0, 0x64, 0x00, 0x20, 0xe0, 0x64, 0xa1, 0x6b, 0x22, 0x6d,
+0x89, 0x18, 0xa1, 0x63, 0x20, 0x65, 0xb8, 0x6a, 0xc0, 0x46, 0x60, 0x65,
+0x03, 0x23, 0x1b, 0x07, 0x78, 0x68, 0x18, 0x40, 0x78, 0x60, 0x61, 0x68,
+0x36, 0x31, 0x94, 0x29, 0x04, 0xd8, 0x38, 0x23, 0x18, 0x43, 0x78, 0x60,
+0x38, 0x20, 0x03, 0xe0, 0x94, 0x23, 0x18, 0x43, 0x78, 0x60, 0x94, 0x20,
+0xb8, 0x61, 0x39, 0x68, 0x78, 0x68, 0x02, 0x04, 0x12, 0x0c, 0x20, 0x1c,
+0xcb, 0x1f, 0x05, 0x3b, 0xff, 0xf7, 0xd7, 0xfd, 0x02, 0x20, 0x60, 0x80,
+0x38, 0x1c, 0xff, 0xf7, 0xdf, 0xfe, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x38, 0x1c, 0xfc, 0xf7, 0x07, 0xfa, 0x28, 0x01, 0x06, 0x49, 0x40, 0x18,
+0x19, 0x23, 0xdb, 0x01, 0xc0, 0x18, 0x41, 0x6b, 0x01, 0x39, 0x41, 0x63,
+0xef, 0xe7, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0,
+0x5c, 0x2b, 0x00, 0x80, 0xa0, 0x1c, 0x00, 0x80, 0x90, 0xb5, 0x00, 0x27,
+0x0f, 0x4c, 0x0d, 0xe0, 0x42, 0x6b, 0x01, 0x3a, 0x42, 0x63, 0x00, 0x2a,
+0x05, 0xdc, 0x02, 0x6b, 0xc0, 0x46, 0x42, 0x63, 0xc0, 0x6a, 0x01, 0xf0,
+0xc6, 0xf9, 0x01, 0x37, 0x0b, 0x2f, 0x07, 0xd2, 0x38, 0x01, 0x00, 0x19,
+0x33, 0x23, 0x9b, 0x01, 0xc0, 0x18, 0x81, 0x6a, 0x00, 0x29, 0xe9, 0xd1,
+0x01, 0x20, 0x40, 0x06, 0x03, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x90, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+0x10, 0x48, 0xc1, 0x68, 0x01, 0x31, 0xc1, 0x60, 0x0f, 0x49, 0xc8, 0x68,
+0x01, 0x28, 0x17, 0xd1, 0xc8, 0x1d, 0x79, 0x30, 0x02, 0x89, 0x00, 0x2a,
+0x12, 0xd0, 0x01, 0x3a, 0x02, 0x81, 0x02, 0x89, 0x00, 0x2a, 0x0d, 0xd1,
+0x42, 0x89, 0x00, 0x2a, 0x08, 0xd1, 0xc9, 0x6f, 0x02, 0x23, 0x0a, 0x68,
+0x1a, 0x43, 0x0a, 0x60, 0x04, 0x21, 0x01, 0x81, 0x01, 0x21, 0x00, 0xe0,
+0x00, 0x21, 0x41, 0x81, 0x70, 0x47, 0x00, 0x00, 0x08, 0x83, 0x20, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0xb0, 0xb5, 0x07, 0x1c, 0x01, 0x23, 0xf8, 0x1d,
+0x69, 0x30, 0x03, 0x73, 0x1e, 0x48, 0xc2, 0x1d, 0x79, 0x32, 0x54, 0x8a,
+0x61, 0x1c, 0x51, 0x82, 0xd5, 0x8a, 0x00, 0x21, 0xac, 0x42, 0x04, 0xdb,
+0xc4, 0x1d, 0x89, 0x34, 0x63, 0x70, 0x51, 0x82, 0xd1, 0x83, 0x01, 0x23,
+0x9b, 0x07, 0x3a, 0x6d, 0x1a, 0x43, 0x12, 0x68, 0xc0, 0x46, 0xba, 0x61,
+0xfb, 0x69, 0x9a, 0x42, 0x06, 0xd1, 0xf8, 0x6c, 0x12, 0x49, 0xc0, 0x46,
+0x08, 0x60, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x79, 0x61, 0x41, 0x69,
+0xfa, 0x6c, 0x91, 0x43, 0x41, 0x61, 0x01, 0x20, 0x00, 0x05, 0xc1, 0x60,
+0x38, 0x69, 0x02, 0x28, 0xf1, 0xd0, 0xb8, 0x69, 0xf9, 0x69, 0x41, 0x1a,
+0x01, 0xd5, 0x78, 0x6d, 0x41, 0x18, 0x38, 0x1c, 0x00, 0xf0, 0x0e, 0xf8,
+0xf9, 0x69, 0x09, 0x18, 0xf9, 0x61, 0x78, 0x6d, 0x81, 0x42, 0xe2, 0xd3,
+0x08, 0x1a, 0xf8, 0x61, 0xdf, 0xe7, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x00, 0x00, 0x00, 0xb0, 0xf8, 0xb5, 0x04, 0x1c, 0x0f, 0x1c, 0xff, 0x23,
+0x21, 0x33, 0x9f, 0x42, 0x01, 0xd9, 0xff, 0x27, 0x21, 0x37, 0xe1, 0x6e,
+0x38, 0x1c, 0x01, 0xf0, 0xcb, 0xfc, 0x2d, 0x4d, 0x00, 0x28, 0x13, 0xd1,
+0xe0, 0x1d, 0x49, 0x30, 0x01, 0x7a, 0x01, 0x23, 0x19, 0x43, 0x01, 0x72,
+0x29, 0x4a, 0xc0, 0x46, 0x00, 0x92, 0x29, 0x48, 0x01, 0x6d, 0x42, 0x6d,
+0x00, 0x20, 0x2b, 0x1c, 0x01, 0xf0, 0xb0, 0xfc, 0x00, 0x20, 0xf8, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x20, 0x69, 0x01, 0x30, 0x20, 0x61, 0x23, 0x49,
+0xc8, 0x1d, 0xb9, 0x30, 0x02, 0x6b, 0x92, 0x00, 0x51, 0x18, 0xc0, 0x31,
+0x0f, 0x61, 0x01, 0x6b, 0x01, 0x31, 0x89, 0x07, 0x89, 0x0f, 0x01, 0x63,
+0x20, 0x6b, 0xc2, 0x19, 0x61, 0x6d, 0x8a, 0x42, 0x03, 0xd8, 0x23, 0x22,
+0x12, 0x05, 0x3a, 0x43, 0x05, 0xe0, 0x09, 0x1a, 0x7e, 0x1a, 0x07, 0xd1,
+0x23, 0x22, 0x12, 0x05, 0x0a, 0x43, 0x00, 0x92, 0x61, 0x6e, 0x09, 0x18,
+0xa2, 0x6e, 0x10, 0xe0, 0x11, 0x22, 0x52, 0x05, 0x0a, 0x43, 0x00, 0x92,
+0x61, 0x6e, 0x09, 0x18, 0x00, 0x20, 0xa2, 0x6e, 0x2b, 0x1c, 0x01, 0xf0,
+0x7d, 0xfc, 0x23, 0x22, 0x12, 0x05, 0x32, 0x43, 0x00, 0x92, 0x61, 0x6e,
+0xa2, 0x6e, 0x00, 0x20, 0x2b, 0x1c, 0x01, 0xf0, 0x73, 0xfc, 0x20, 0x6b,
+0xc0, 0x19, 0x00, 0x09, 0x00, 0x01, 0x61, 0x6d, 0x81, 0x42, 0x00, 0xd8,
+0x40, 0x1a, 0x20, 0x63, 0x38, 0x1c, 0xb8, 0xe7,
+0x44, 0x80, 0x20, 0x40, 0x04, 0x00, 0x1b, 0x02, 0x7c, 0x29, 0x00, 0x80,
+0x68, 0x0e, 0x00, 0x80, 0x80, 0xb5, 0x01, 0x20, 0xc0, 0x03, 0x0d, 0x49,
+0xc0, 0x46, 0x08, 0x60, 0x0c, 0x49, 0xc8, 0x1d, 0x49, 0x30, 0x02, 0x7a,
+0x00, 0x27, 0x00, 0x2a, 0x03, 0xd0, 0x07, 0x72, 0x08, 0x1c, 0xff, 0xf7,
+0x37, 0xff, 0x08, 0x49, 0xc8, 0x1d, 0x49, 0x30, 0x02, 0x7a, 0x00, 0x2a,
+0x03, 0xd0, 0x07, 0x72, 0x08, 0x1c, 0xff, 0xf7, 0x2d, 0xff, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0xb0, 0x64, 0x2d, 0x00, 0x80,
+0xe4, 0x2c, 0x00, 0x80, 0x90, 0xb5, 0x07, 0x1c, 0x10, 0x20, 0x18, 0x49,
+0xc0, 0x46, 0x08, 0x60, 0xf8, 0x68, 0x01, 0x30, 0xf8, 0x60, 0x16, 0x48,
+0xc4, 0x1d, 0xb9, 0x34, 0x61, 0x6b, 0x89, 0x00, 0x09, 0x18, 0xc0, 0x31,
+0x09, 0x69, 0x7a, 0x68, 0x92, 0x00, 0xd2, 0x19, 0x51, 0x64, 0x61, 0x6b,
+0x89, 0x00, 0x08, 0x18, 0xc0, 0x30, 0x01, 0x69, 0x78, 0x68, 0x80, 0x00,
+0xc0, 0x19, 0xc0, 0x6b, 0x01, 0xf0, 0xa2, 0xfa, 0x01, 0x23, 0x78, 0x68,
+0x58, 0x40, 0x78, 0x60, 0x60, 0x6b, 0x01, 0x30, 0x80, 0x07, 0x80, 0x0f,
+0x60, 0x63, 0xf8, 0x1d, 0x19, 0x30, 0x40, 0x79, 0x00, 0x28, 0x02, 0xd1,
+0x38, 0x1c, 0x00, 0xf0, 0x07, 0xf8, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0x00, 0x00, 0xb0, 0x68, 0x0e, 0x00, 0x80, 0x90, 0xb5, 0x07, 0x1c,
+0x39, 0x48, 0xc0, 0x68, 0x00, 0x28, 0x05, 0xd0, 0xb8, 0x6a, 0xc0, 0x68,
+0x80, 0x09, 0x01, 0xd3, 0x02, 0x20, 0x00, 0xe0, 0x78, 0x6f, 0xfc, 0xf7,
+0x59, 0xf8, 0x04, 0x1c, 0x06, 0xd1, 0x01, 0x20, 0xf9, 0x1d, 0x19, 0x31,
+0x08, 0x71, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xf8, 0x6c, 0x2f, 0x49,
+0xc0, 0x46, 0x08, 0x60, 0xba, 0x6a, 0x38, 0x1c, 0x21, 0x1c, 0x00, 0xf0,
+0x59, 0xf8, 0x67, 0x62, 0x00, 0x28, 0x03, 0xd1, 0x20, 0x1c, 0x00, 0xf0,
+0x0b, 0xfd, 0xec, 0xe7, 0xf9, 0x6d, 0x09, 0x68, 0x09, 0x18, 0x09, 0x09,
+0x09, 0x01, 0x7a, 0x6d, 0x8a, 0x42, 0x00, 0xd8, 0x89, 0x1a, 0xa1, 0x62,
+0xb9, 0x68, 0x89, 0x00, 0xc9, 0x19, 0x4a, 0x6c, 0x00, 0x2a, 0x07, 0xd0,
+0x4a, 0x6c, 0x12, 0x1a, 0x4a, 0x64, 0x80, 0x08, 0x80, 0x00, 0xb9, 0x6a,
+0x08, 0x18, 0xb8, 0x62, 0x38, 0x68, 0xb9, 0x6a, 0x80, 0x00, 0xc0, 0x19,
+0x42, 0x6b, 0x91, 0x42, 0x0e, 0xd3, 0x00, 0x21, 0x41, 0x64, 0xb8, 0x6a,
+0x39, 0x68, 0x89, 0x00, 0xc9, 0x19, 0x49, 0x6b, 0x40, 0x1a, 0xb8, 0x62,
+0xb9, 0x68, 0x89, 0x00, 0xc9, 0x19, 0xc9, 0x6b, 0x40, 0x18, 0xb8, 0x62,
+0xb8, 0x68, 0x81, 0x00, 0xc9, 0x19, 0x49, 0x6c, 0x00, 0x29, 0xb8, 0xd1,
+0xb9, 0x6a, 0xfa, 0x6b, 0x91, 0x42, 0xb4, 0xd0, 0x3a, 0x6c, 0x91, 0x42,
+0xb1, 0xd0, 0x01, 0x23, 0x58, 0x40, 0xb8, 0x60, 0x80, 0x00, 0xc0, 0x19,
+0xc0, 0x6b, 0xc0, 0x46, 0xb8, 0x62, 0xf8, 0x68, 0x00, 0x28, 0x01, 0xd0,
+0x01, 0x38, 0xf8, 0x60, 0x38, 0x69, 0x00, 0x28, 0xa1, 0xd0, 0x01, 0x38,
+0x38, 0x61, 0x9e, 0xe7, 0x68, 0x19, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+0xf7, 0xb5, 0x90, 0xb0, 0x04, 0x1c, 0x0d, 0x1c, 0x00, 0x20, 0x05, 0x90,
+0x02, 0x90, 0x00, 0x22, 0x01, 0x92, 0xf9, 0x48, 0xc0, 0x6a, 0xc0, 0x46,
+0xa8, 0x61, 0xa0, 0x68, 0x81, 0x00, 0x09, 0x19, 0x49, 0x6b, 0xc0, 0x46,
+0x20, 0x60, 0xe1, 0x62, 0x12, 0x9a, 0xd0, 0x68, 0xc0, 0x46, 0xa8, 0x60,
+0x12, 0x9a, 0x51, 0x78, 0xc0, 0x46, 0x0c, 0x91, 0xf0, 0x48, 0xc0, 0x46,
+0x03, 0x90, 0xd7, 0x1d, 0x09, 0x37, 0xe0, 0x6a,
+0xc1, 0x1b, 0x09, 0x09, 0xe3, 0x1d, 0x19, 0x33, 0x0c, 0x9a, 0xc0, 0x46,
+0x0f, 0x93, 0xeb, 0x4b, 0xc0, 0x46, 0x0e, 0x93, 0x91, 0x42, 0x01, 0xd3,
+0xb8, 0x42, 0x21, 0xd8, 0xe1, 0x68, 0x02, 0x29, 0x1e, 0xd2, 0x01, 0x20,
+0x0f, 0x99, 0xc0, 0x46, 0x48, 0x71, 0x00, 0x20, 0x03, 0x99, 0x01, 0xf0,
+0x57, 0xfb, 0x00, 0x28, 0x03, 0xd1, 0x0e, 0x9b, 0xd8, 0x6b, 0x01, 0x30,
+0xd8, 0x63, 0x01, 0x20, 0x80, 0x06, 0x00, 0x27, 0x68, 0x60, 0xaf, 0x61,
+0xdd, 0x4a, 0xc0, 0x46, 0x00, 0x92, 0xdd, 0x48, 0x01, 0x6d, 0x42, 0x6d,
+0xdc, 0x4b, 0x00, 0x20, 0x01, 0xf0, 0x3a, 0xfb, 0x38, 0x1c, 0x5c, 0xe3,
+0xb8, 0x42, 0x03, 0xd8, 0x20, 0x1c, 0x00, 0xf0, 0x7b, 0xfc, 0x07, 0x1c,
+0xd7, 0x48, 0xc0, 0x68, 0x00, 0x28, 0x64, 0xd0, 0x38, 0x78, 0x40, 0x07,
+0x40, 0x0f, 0x03, 0x28, 0x60, 0xd1, 0x05, 0x98, 0x01, 0x30, 0x00, 0x06,
+0x00, 0x0e, 0x05, 0x90, 0x38, 0x78, 0xf0, 0x23, 0x18, 0x40, 0x58, 0xd1,
+0xe0, 0x6a, 0xc0, 0x1b, 0x00, 0x09, 0x0c, 0x99, 0x88, 0x42, 0x02, 0xd2,
+0xe0, 0x68, 0x02, 0x28, 0x05, 0xd3, 0xcb, 0x49, 0x88, 0x68, 0x00, 0xf0,
+0x83, 0xff, 0x06, 0x1c, 0x06, 0xd1, 0x03, 0x9b, 0x28, 0x1c, 0x39, 0x1c,
+0x22, 0x1c, 0x00, 0xf0, 0x8b, 0xfc, 0x16, 0xe1, 0x2e, 0x62, 0xf8, 0x68,
+0x00, 0x28, 0x0d, 0xd0, 0xb8, 0x89, 0x00, 0x28, 0x03, 0xd0, 0xc1, 0x49,
+0xc9, 0x68, 0x00, 0xf0, 0x70, 0xff, 0xf8, 0x89, 0x00, 0x28, 0x03, 0xd0,
+0xbd, 0x49, 0xc9, 0x68, 0x00, 0xf0, 0x69, 0xff, 0x7a, 0x68, 0xc0, 0x46,
+0x72, 0x61, 0xb9, 0x68, 0xc0, 0x46, 0xb1, 0x61, 0x30, 0x1c, 0xb8, 0x49,
+0x09, 0x68, 0x00, 0xf0, 0x5e, 0xff, 0x00, 0x28, 0x17, 0xd1, 0x30, 0x1c,
+0xb4, 0x49, 0x49, 0x68, 0x00, 0xf0, 0x57, 0xff, 0x10, 0x37, 0xe0, 0x6a,
+0xb8, 0x42, 0x03, 0xd8, 0x20, 0x1c, 0x00, 0xf0, 0x27, 0xfc, 0x07, 0x1c,
+0x68, 0x68, 0xaf, 0x4b, 0x18, 0x43, 0x68, 0x60, 0x00, 0x20, 0xa8, 0x61,
+0xac, 0x23, 0xa8, 0x68, 0x98, 0x43, 0xa8, 0x60, 0xb0, 0xe0, 0xa8, 0x69,
+0xa8, 0x28, 0x01, 0xd2, 0xa8, 0x20, 0xa8, 0x61, 0x10, 0x37, 0xe0, 0x6a,
+0xb8, 0x42, 0x6c, 0xd8, 0x9c, 0xe0, 0xa5, 0xe0, 0xa4, 0xe0, 0x10, 0x28,
+0x68, 0xd1, 0x03, 0x23, 0x1b, 0x07, 0x68, 0x68, 0x18, 0x40, 0x01, 0x0f,
+0x48, 0x01, 0x40, 0x1a, 0x80, 0x00, 0xa0, 0x4a, 0x82, 0x18, 0x01, 0x92,
+0x78, 0x88, 0x42, 0x0b, 0x31, 0xd3, 0x82, 0x0b, 0x2f, 0xd3, 0x9d, 0x48,
+0xc0, 0x46, 0x03, 0x90, 0x02, 0x20, 0x01, 0x9a, 0xc0, 0x46, 0x10, 0x80,
+0x78, 0x88, 0x00, 0x05, 0x00, 0x0d, 0x01, 0x9a, 0xc0, 0x46, 0x50, 0x60,
+0xb8, 0x68, 0x01, 0x9a, 0xc0, 0x46, 0x90, 0x60, 0x78, 0x68, 0x01, 0x9a,
+0xc0, 0x46, 0x10, 0x62, 0x00, 0x20, 0x01, 0x9a, 0xc0, 0x46, 0x90, 0x64,
+0x01, 0x9a, 0xc0, 0x46, 0x90, 0x63, 0x88, 0x02, 0x8f, 0x49, 0x40, 0x18,
+0x01, 0x9a, 0xc0, 0x46, 0x50, 0x63, 0x01, 0x9a, 0x50, 0x68, 0x36, 0x30,
+0x94, 0x28, 0x01, 0xd8, 0x38, 0x20, 0x00, 0xe0, 0x94, 0x20, 0xa8, 0x61,
+0x10, 0x37, 0xe0, 0x6a, 0xb8, 0x42, 0x28, 0xd8, 0x58, 0xe0, 0x7a, 0x88,
+0x92, 0x0b, 0x03, 0xd3, 0x85, 0x48, 0xc0, 0x46, 0x03, 0x90, 0x23, 0xe0,
+0x01, 0x22, 0x12, 0x03, 0x02, 0x40, 0x83, 0x4b, 0x1d, 0xd0, 0x03, 0x93,
+0x00, 0x05, 0x00, 0x0d, 0x01, 0x9a, 0xc0, 0x46, 0x50, 0x60, 0xb8, 0x68,
+0x01, 0x9a, 0xc0, 0x46, 0x90, 0x60, 0x78, 0x68, 0x01, 0x9a, 0xc0, 0x46,
+0x10, 0x62, 0x00, 0x20, 0x01, 0x9a, 0xc0, 0x46,
+0x90, 0x64, 0x01, 0x9a, 0xc0, 0x46, 0x90, 0x63, 0x88, 0x02, 0x75, 0x49,
+0x40, 0x18, 0x01, 0x9a, 0xc0, 0x46, 0x50, 0x63, 0x02, 0xe0, 0x33, 0xe0,
+0x2a, 0xe0, 0x03, 0x93, 0x01, 0x20, 0x0f, 0x99, 0xc0, 0x46, 0x48, 0x71,
+0x12, 0x9a, 0x50, 0x78, 0x05, 0x99, 0x43, 0x1a, 0x0b, 0x93, 0x10, 0x37,
+0xe0, 0x6a, 0xb8, 0x42, 0x03, 0xd8, 0x20, 0x1c, 0x00, 0xf0, 0x92, 0xfb,
+0x07, 0x1c, 0x01, 0x9a, 0x50, 0x6b, 0x91, 0x6b, 0x09, 0x01, 0x40, 0x18,
+0x0b, 0x9b, 0x21, 0x1c, 0x3a, 0x1c, 0xff, 0xf7, 0x7d, 0xfb, 0x01, 0x9a,
+0xc0, 0x46, 0xd0, 0x64, 0x01, 0x9a, 0x0b, 0x9b, 0xc0, 0x46, 0x13, 0x65,
+0x01, 0x23, 0x5b, 0x06, 0x68, 0x68, 0x18, 0x43, 0x68, 0x60, 0x00, 0x20,
+0xa8, 0x61, 0x0d, 0xe0, 0x10, 0x37, 0xe0, 0x6a, 0xb8, 0x42, 0x03, 0xd8,
+0x20, 0x1c, 0x00, 0xf0, 0x71, 0xfb, 0x07, 0x1c, 0x38, 0x78, 0x40, 0x07,
+0x40, 0x0f, 0x03, 0x28, 0x00, 0xd1, 0xf8, 0xe6, 0xa8, 0x69, 0x03, 0x99,
+0x01, 0xf0, 0x26, 0xfa, 0x00, 0x28, 0x2a, 0xd1, 0x38, 0x1c, 0x21, 0x1c,
+0x00, 0xf0, 0x79, 0xfb, 0xa8, 0x68, 0x80, 0x09, 0x04, 0xd3, 0x30, 0x1c,
+0x49, 0x49, 0x49, 0x68, 0x00, 0xf0, 0x81, 0xfe, 0x41, 0x49, 0x00, 0x20,
+0x01, 0xf0, 0x14, 0xfa, 0x00, 0x28, 0x04, 0xd1, 0x0e, 0x9b, 0xd8, 0x6b,
+0x01, 0x30, 0xd8, 0x63, 0x11, 0xe0, 0x01, 0x20, 0x0f, 0x99, 0xc0, 0x46,
+0x48, 0x71, 0x80, 0x06, 0x00, 0x27, 0x68, 0x60, 0xaf, 0x61, 0x3a, 0x4a,
+0xc0, 0x46, 0x00, 0x92, 0x39, 0x48, 0x01, 0x6d, 0x42, 0x6d, 0x39, 0x4b,
+0x00, 0x20, 0x01, 0xf0, 0xf3, 0xf9, 0x00, 0x20, 0x15, 0xe2, 0x05, 0x98,
+0x0c, 0x99, 0x08, 0x1a, 0x00, 0x04, 0x00, 0x0c, 0x0c, 0x90, 0x0b, 0x90,
+0x0c, 0x98, 0x00, 0x28, 0x03, 0xd0, 0x01, 0x20, 0x0f, 0x99, 0xc0, 0x46,
+0x48, 0x71, 0x28, 0x68, 0xc0, 0x46, 0x04, 0x90, 0x00, 0x26, 0x00, 0x20,
+0x08, 0x90, 0x00, 0x22, 0x0a, 0x92, 0x0c, 0x98, 0x01, 0x38, 0x0d, 0x90,
+0xa3, 0xe0, 0x78, 0x88, 0x8a, 0x1b, 0x12, 0x04, 0x12, 0x0c, 0x90, 0x42,
+0x05, 0xdd, 0x07, 0x92, 0x80, 0x1a, 0x00, 0x04, 0x00, 0x0c, 0x08, 0x90,
+0x00, 0xe0, 0x07, 0x90, 0x08, 0x98, 0x00, 0x28, 0x07, 0xd1, 0x0d, 0x98,
+0x0a, 0x9a, 0x90, 0x42, 0x07, 0xdd, 0x07, 0x98, 0x30, 0x18, 0x88, 0x42,
+0x03, 0xd8, 0x01, 0x20, 0x40, 0x05, 0x06, 0x90, 0x1c, 0xe0, 0x11, 0x20,
+0x40, 0x05, 0x06, 0x90, 0xa8, 0x68, 0x8c, 0x23, 0x18, 0x40, 0x02, 0xd1,
+0x20, 0x48, 0xc0, 0x46, 0x06, 0x90, 0xb1, 0x07, 0x89, 0x0f, 0x0f, 0xd0,
+0x07, 0x98, 0xc0, 0x06, 0xc0, 0x0e, 0x08, 0xd0, 0x1e, 0x28, 0x09, 0xdb,
+0x1e, 0x28, 0x02, 0xd1, 0x03, 0x29, 0x05, 0xd1, 0x01, 0xe0, 0x02, 0x29,
+0x02, 0xd3, 0x01, 0x20, 0x02, 0x90, 0xde, 0xe7, 0x0a, 0x9a, 0x00, 0x2a,
+0x04, 0xd1, 0x01, 0x23, 0xdb, 0x05, 0x06, 0x98, 0x18, 0x43, 0x06, 0x90,
+0x07, 0x98, 0x06, 0x99, 0x08, 0x43, 0x02, 0x1c, 0x00, 0x90, 0x04, 0x98,
+0x83, 0x19, 0x1d, 0xe0, 0xe8, 0x0e, 0x00, 0x80, 0x01, 0x49, 0xff, 0xff,
+0x28, 0x0f, 0x00, 0x80, 0x04, 0x00, 0x12, 0x02, 0x7c, 0x29, 0x00, 0x80,
+0x44, 0x80, 0x20, 0x40, 0x68, 0x19, 0x00, 0x80, 0x60, 0x04, 0x00, 0x80,
+0x00, 0x00, 0x00, 0x80, 0x5c, 0x2b, 0x00, 0x80, 0x55, 0x32, 0xff, 0xff,
+0xac, 0x5e, 0x21, 0x40, 0x0d, 0x3d, 0xff, 0xff, 0xcd, 0x31, 0xff, 0xff,
+0x00, 0x00, 0x32, 0x02, 0x00, 0x20, 0x3a, 0x1d, 0x06, 0xca, 0x01, 0xf0,
+0x6b, 0xf9, 0x07, 0x98, 0x36, 0x18, 0x02, 0x98,
+0x00, 0x28, 0x16, 0xd0, 0xa8, 0x68, 0x8c, 0x23, 0x18, 0x40, 0x04, 0xd1,
+0x09, 0x23, 0x5b, 0x04, 0x06, 0x98, 0x18, 0x43, 0x06, 0x90, 0x06, 0x98,
+0xc2, 0x4a, 0x02, 0x43, 0x00, 0x92, 0x04, 0x98, 0x83, 0x19, 0xc1, 0x48,
+0x01, 0x6d, 0x42, 0x6d, 0x00, 0x20, 0x01, 0xf0, 0x51, 0xf9, 0x00, 0x20,
+0x02, 0x90, 0x08, 0x98, 0x00, 0x28, 0x0b, 0xd1, 0x0b, 0x9b, 0x01, 0x3b,
+0x0b, 0x93, 0x10, 0x37, 0xe0, 0x6a, 0xb8, 0x42, 0x0c, 0xd8, 0x20, 0x1c,
+0x00, 0xf0, 0x8a, 0xfa, 0x07, 0x1c, 0x07, 0xe0, 0x78, 0x68, 0x07, 0x9a,
+0x80, 0x18, 0x78, 0x60, 0x78, 0x88, 0x07, 0x9a, 0x80, 0x1a, 0x78, 0x80,
+0x0a, 0x9a, 0x50, 0x1c, 0x02, 0x04, 0x12, 0x0c, 0x0a, 0x92, 0x0c, 0x98,
+0x0a, 0x9a, 0x82, 0x42, 0x03, 0xda, 0xa9, 0x69, 0xb1, 0x42, 0x00, 0xd9,
+0x53, 0xe7, 0xa8, 0x69, 0xb0, 0x42, 0x6b, 0xd1, 0xa8, 0x68, 0x01, 0x09,
+0x69, 0xd2, 0x08, 0x9a, 0x00, 0x2a, 0x56, 0xd0, 0x0c, 0x99, 0x0a, 0x9a,
+0x8a, 0x42, 0x3e, 0xdb, 0xb1, 0x07, 0x89, 0x0f, 0x0c, 0xd0, 0x08, 0x9a,
+0xd2, 0x06, 0xd2, 0x0e, 0x0b, 0xd0, 0x1e, 0x2a, 0x06, 0xdb, 0x1e, 0x2a,
+0x02, 0xd1, 0x03, 0x29, 0x05, 0xd0, 0x01, 0xe0, 0x02, 0x29, 0x02, 0xd2,
+0x02, 0x99, 0x00, 0x29, 0x21, 0xd0, 0x08, 0x9a, 0xc0, 0x46, 0x00, 0x92,
+0x04, 0x98, 0x83, 0x19, 0x00, 0x20, 0x3a, 0x1d, 0x06, 0xca, 0x01, 0xf0,
+0x01, 0xf9, 0x08, 0x98, 0x36, 0x18, 0xa8, 0x68, 0x8c, 0x23, 0x18, 0x40,
+0x02, 0xd0, 0x01, 0x20, 0x40, 0x06, 0x00, 0xe0, 0x92, 0x48, 0x01, 0x22,
+0x02, 0x43, 0x00, 0x92, 0x04, 0x98, 0x83, 0x19, 0x8e, 0x48, 0x01, 0x6d,
+0x42, 0x6d, 0x00, 0x20, 0x01, 0xf0, 0xec, 0xf8, 0x00, 0x20, 0x02, 0x90,
+0x15, 0xe0, 0x8c, 0x23, 0x18, 0x40, 0x02, 0xd0, 0x01, 0x20, 0x40, 0x06,
+0x00, 0xe0, 0x88, 0x48, 0x08, 0x9a, 0x02, 0x43, 0x00, 0xe0, 0x08, 0x9a,
+0xc0, 0x46, 0x00, 0x92, 0x04, 0x98, 0x83, 0x19, 0x00, 0x20, 0x3a, 0x1d,
+0x06, 0xca, 0x01, 0xf0, 0xd5, 0xf8, 0x08, 0x98, 0x36, 0x18, 0x10, 0x37,
+0xe0, 0x6a, 0xb8, 0x42, 0x03, 0xd8, 0x20, 0x1c, 0x00, 0xf0, 0x14, 0xfa,
+0x07, 0x1c, 0x68, 0x68, 0x80, 0x0e, 0x6b, 0xd2, 0x0a, 0x98, 0xc0, 0x46,
+0x09, 0x90, 0x0c, 0x99, 0x88, 0x42, 0x5c, 0xda, 0x0d, 0x98, 0x09, 0x99,
+0x88, 0x42, 0x03, 0xd0, 0x7a, 0x88, 0x1e, 0xe0, 0x5f, 0xe0, 0x5e, 0xe0,
+0x78, 0x88, 0x01, 0x22, 0x52, 0x06, 0x02, 0x43, 0xa9, 0x68, 0x8c, 0x23,
+0x19, 0x40, 0x02, 0xd1, 0x09, 0x23, 0x5b, 0x04, 0x1a, 0x43, 0xb1, 0x07,
+0x89, 0x0f, 0x0e, 0xd0, 0xc3, 0x06, 0xdb, 0x0e, 0x08, 0xd0, 0x1e, 0x2b,
+0x09, 0xdb, 0x1e, 0x2b, 0x02, 0xd1, 0x03, 0x29, 0x05, 0xd1, 0x01, 0xe0,
+0x02, 0x29, 0x02, 0xd3, 0x01, 0x21, 0x02, 0x91, 0x02, 0x1c, 0x09, 0x98,
+0x00, 0x28, 0x02, 0xd1, 0x01, 0x23, 0xdb, 0x05, 0x1a, 0x43, 0x00, 0x92,
+0x04, 0x98, 0x83, 0x19, 0x00, 0x20, 0x3a, 0x1d, 0x06, 0xca, 0x01, 0xf0,
+0x8f, 0xf8, 0x78, 0x88, 0x86, 0x19, 0x10, 0x37, 0x02, 0x98, 0x00, 0x28,
+0x14, 0xd0, 0xa8, 0x68, 0x8c, 0x23, 0x18, 0x40, 0x02, 0xd0, 0x01, 0x20,
+0x40, 0x06, 0x00, 0xe0, 0x57, 0x48, 0x01, 0x22, 0x02, 0x43, 0x00, 0x92,
+0x04, 0x98, 0x83, 0x19, 0x53, 0x48, 0x01, 0x6d, 0x42, 0x6d, 0x00, 0x20,
+0x01, 0xf0, 0x76, 0xf8, 0x00, 0x20, 0x02, 0x90, 0xe0, 0x6a, 0xb8, 0x42,
+0x03, 0xd8, 0x20, 0x1c, 0x00, 0xf0, 0xb6, 0xf9, 0x07, 0x1c, 0x09, 0x98,
+0x01, 0x30, 0x00, 0x04, 0x00, 0x0c, 0x09, 0x90,
+0x0c, 0x99, 0x88, 0x42, 0xa2, 0xdb, 0x68, 0x68, 0x30, 0x43, 0x01, 0x04,
+0x09, 0x0c, 0x68, 0x60, 0xe8, 0x6a, 0x00, 0xf0, 0x7b, 0xfa, 0x28, 0xe0,
+0x27, 0xe0, 0xa8, 0x68, 0x00, 0x09, 0x14, 0xd3, 0x68, 0x68, 0x80, 0x0e,
+0x15, 0xd2, 0x01, 0x9a, 0x00, 0x2a, 0x12, 0xd0, 0x01, 0x9a, 0x50, 0x6b,
+0x0b, 0x9b, 0x21, 0x1c, 0x3a, 0x1c, 0xff, 0xf7, 0x89, 0xf9, 0x01, 0x9a,
+0xc0, 0x46, 0x90, 0x64, 0x01, 0x9a, 0x0b, 0x9b, 0xc0, 0x46, 0x93, 0x63,
+0x03, 0xe0, 0xe8, 0x6a, 0x31, 0x1c, 0x00, 0xf0, 0x5d, 0xfa, 0x68, 0x68,
+0x30, 0x43, 0x68, 0x60, 0xa8, 0x69, 0xb0, 0x42, 0x05, 0xd9, 0x00, 0x04,
+0x00, 0x0c, 0x80, 0x1b, 0x00, 0xf0, 0xee, 0xf9, 0xae, 0x61, 0xa8, 0x68,
+0x8c, 0x23, 0x18, 0x40, 0x0b, 0xd0, 0x2f, 0x4a, 0xc0, 0x46, 0x00, 0x92,
+0x04, 0x98, 0xc3, 0x1f, 0x05, 0x3b, 0x2a, 0x48, 0x01, 0x6d, 0x42, 0x6d,
+0x00, 0x20, 0x01, 0xf0, 0x23, 0xf8, 0x01, 0x23, 0x9b, 0x07, 0x20, 0x6d,
+0x18, 0x43, 0x00, 0x68, 0xc0, 0x46, 0xa0, 0x61, 0xe1, 0x69, 0x81, 0x42,
+0x12, 0xd0, 0x22, 0x69, 0x02, 0x2a, 0x0f, 0xd2, 0x41, 0x1a, 0x01, 0xd5,
+0x60, 0x6d, 0x41, 0x18, 0x20, 0x1c, 0xff, 0xf7, 0x3f, 0xfb, 0xe1, 0x69,
+0x40, 0x18, 0xe0, 0x61, 0x61, 0x6d, 0x88, 0x42, 0x24, 0xd3, 0x40, 0x1a,
+0xe0, 0x61, 0x21, 0xe0, 0x81, 0x42, 0x1f, 0xd1, 0x20, 0x69, 0x02, 0x28,
+0x1c, 0xd2, 0x01, 0x20, 0x60, 0x61, 0x18, 0x48, 0x41, 0x69, 0xe2, 0x6c,
+0x0a, 0x43, 0x42, 0x61, 0x81, 0x69, 0xe3, 0x6c, 0x99, 0x43, 0x81, 0x61,
+0x01, 0x21, 0x09, 0x05, 0xca, 0x60, 0x80, 0x69, 0xc0, 0x46, 0x08, 0x61,
+0x8b, 0x02, 0x20, 0x6d, 0x18, 0x43, 0x00, 0x68, 0xc0, 0x46, 0xa0, 0x61,
+0xe1, 0x69, 0x81, 0x42, 0x02, 0xd0, 0x20, 0x1c, 0xff, 0xf7, 0xcc, 0xfa,
+0x28, 0x1c, 0x00, 0xf0, 0x0f, 0xf9, 0x0c, 0x98, 0x05, 0x99, 0x40, 0x18,
+0x00, 0x01, 0x10, 0x30, 0x68, 0x61, 0x13, 0xb0, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0x7c, 0x29, 0x00, 0x80,
+0x00, 0x00, 0x12, 0x02, 0x04, 0x00, 0x52, 0x02, 0x68, 0x0e, 0x00, 0x80,
+0xf0, 0xb5, 0x40, 0x20, 0x2d, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x00, 0xf0,
+0x03, 0xf9, 0x07, 0x1c, 0x81, 0x69, 0x44, 0x6a, 0xa0, 0x6f, 0x00, 0xf0,
+0x45, 0xfe, 0x00, 0x20, 0xe1, 0x1d, 0x19, 0x31, 0x48, 0x71, 0x79, 0x68,
+0xc9, 0x0e, 0x09, 0xd3, 0xf8, 0x6a, 0x00, 0x01, 0x24, 0x49, 0x40, 0x18,
+0x24, 0x4b, 0xc0, 0x18, 0x01, 0x68, 0x01, 0x39, 0x01, 0x60, 0x36, 0xe0,
+0xe1, 0x6d, 0x09, 0x68, 0x22, 0x6e, 0xc0, 0x46, 0x11, 0x60, 0x20, 0x4e,
+0xf5, 0x1d, 0x79, 0x35, 0x01, 0x23, 0xe9, 0x6b, 0x19, 0x43, 0xe9, 0x63,
+0xb9, 0x6a, 0xe2, 0x6d, 0xc0, 0x46, 0x11, 0x60, 0xb9, 0x6a, 0x22, 0x6e,
+0xc0, 0x46, 0x11, 0x60, 0x61, 0x69, 0x00, 0x29, 0x04, 0xd1, 0xa9, 0x6b,
+0x01, 0x31, 0xa9, 0x63, 0x08, 0x29, 0x07, 0xd3, 0xa8, 0x63, 0x01, 0x20,
+0x00, 0xf0, 0x86, 0xf8, 0xe8, 0x6b, 0x40, 0x08, 0x40, 0x00, 0xe8, 0x63,
+0x78, 0x68, 0x81, 0x0e, 0x0f, 0xd2, 0x0b, 0x23, 0x1b, 0x02, 0xf1, 0x18,
+0xc9, 0x68, 0x00, 0x29, 0x06, 0xd0, 0x00, 0x08, 0x04, 0xd2, 0x20, 0x1c,
+0x39, 0x1c, 0x00, 0xf0, 0x43, 0xf8, 0x02, 0xe0, 0x38, 0x1c, 0x00, 0xf0,
+0x05, 0xfa, 0x38, 0x1c, 0xfb, 0xf7, 0x06, 0xfc, 0x20, 0x1c, 0x00, 0xf0,
+0x0b, 0xf8, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0xb0,
+0xa0, 0x1c, 0x00, 0x80, 0xb4, 0x0c, 0x00, 0x00,
+0x68, 0x0e, 0x00, 0x80, 0x80, 0xb5, 0x07, 0x1c, 0xf8, 0x1d, 0x19, 0x30,
+0x01, 0x79, 0x00, 0x29, 0x04, 0xd0, 0x00, 0x21, 0x01, 0x71, 0x38, 0x1c,
+0xff, 0xf7, 0x56, 0xfb, 0xf8, 0x68, 0x02, 0x28, 0x0d, 0xd0, 0xb8, 0x68,
+0x80, 0x00, 0xc2, 0x19, 0x50, 0x6c, 0x00, 0x28, 0x11, 0xd0, 0xb8, 0x6a,
+0x41, 0x78, 0x09, 0x01, 0x10, 0x31, 0x52, 0x6b, 0x10, 0x1a, 0x88, 0x42,
+0x05, 0xd3, 0x38, 0x1c, 0xff, 0xf7, 0x42, 0xfb, 0x80, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x38, 0x1c, 0xff, 0xf7, 0x28, 0xfa, 0xf8, 0xe7, 0x78, 0x68,
+0x80, 0x00, 0xc0, 0x19, 0xc0, 0x6b, 0xc0, 0x46, 0xb8, 0x62, 0xf1, 0xe7,
+0xb0, 0xb5, 0x87, 0xb0, 0x0f, 0x1c, 0x80, 0x6f, 0xc0, 0x46, 0x00, 0x90,
+0x00, 0x24, 0x13, 0x4d, 0x0b, 0x23, 0x1b, 0x02, 0xe8, 0x18, 0x80, 0x69,
+0x00, 0x28, 0x17, 0xd0, 0x69, 0x46, 0xa2, 0x00, 0x52, 0x19, 0x0b, 0x23,
+0x1b, 0x02, 0xd2, 0x18, 0x92, 0x69, 0x38, 0x1c, 0x00, 0xf0, 0x92, 0xfb,
+0x00, 0x28, 0x09, 0xd1, 0x01, 0x34, 0xa0, 0x00, 0x40, 0x19, 0x0b, 0x23,
+0x1b, 0x02, 0xc0, 0x18, 0x80, 0x69, 0x00, 0x28, 0xea, 0xd1, 0x01, 0xe0,
+0x01, 0x28, 0x02, 0xd0, 0x38, 0x1c, 0x00, 0xf0, 0x9d, 0xf9, 0x07, 0xb0,
+0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0xb8, 0xb5, 0xc2, 0x07, 0xd2, 0x0f, 0x16, 0x4c, 0x16, 0x49, 0x01, 0xd0,
+0x08, 0x22, 0x08, 0xe0, 0x82, 0x08, 0x05, 0xd3, 0x0c, 0x22, 0xa4, 0x18,
+0x0b, 0x68, 0xdf, 0x1d, 0x15, 0x37, 0x03, 0xe0, 0x1c, 0x22, 0x0b, 0x68,
+0xdf, 0x1d, 0x09, 0x37, 0x0f, 0x4b, 0x1d, 0x78, 0x00, 0x2d, 0x13, 0xd0,
+0x5b, 0x78, 0x00, 0x2b, 0x10, 0xd0, 0x01, 0x23, 0x5b, 0x06, 0x1a, 0x43,
+0x00, 0x28, 0x01, 0xd1, 0x5b, 0x08, 0x1a, 0x43, 0x00, 0x92, 0x4a, 0x68,
+0x01, 0x20, 0x39, 0x1c, 0x23, 0x1c, 0x00, 0xf0, 0xdf, 0xfe, 0xb8, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x03, 0x23, 0x1b, 0x06, 0x1a, 0x43, 0xf1, 0xe7,
+0x90, 0xee, 0x20, 0x40, 0x7c, 0x29, 0x00, 0x80, 0xf8, 0x0e, 0x00, 0x80,
+0x00, 0x21, 0xc1, 0x61, 0x05, 0x49, 0x8a, 0x68, 0x00, 0x2a, 0x01, 0xd1,
+0x88, 0x60, 0x02, 0xe0, 0xca, 0x68, 0xc0, 0x46, 0xd0, 0x61, 0xc8, 0x60,
+0x70, 0x47, 0x00, 0x00, 0x28, 0x0f, 0x00, 0x80, 0x03, 0x49, 0x88, 0x68,
+0x00, 0x28, 0x02, 0xd0, 0xc2, 0x69, 0xc0, 0x46, 0x8a, 0x60, 0x70, 0x47,
+0x28, 0x0f, 0x00, 0x80, 0x01, 0x1c, 0x01, 0x23, 0x88, 0x68, 0x58, 0x40,
+0x88, 0x60, 0xca, 0x68, 0x01, 0x3a, 0xca, 0x60, 0x0a, 0x69, 0x01, 0x3a,
+0x80, 0x00, 0x0a, 0x61, 0x42, 0x18, 0xd0, 0x6b, 0x53, 0x6b, 0xc0, 0x46,
+0xcb, 0x62, 0x0b, 0x68, 0x9b, 0x00, 0x59, 0x18, 0x49, 0x6c, 0x53, 0x6c,
+0xc9, 0x18, 0x51, 0x64, 0x70, 0x47, 0x8a, 0x68, 0x92, 0x00, 0x52, 0x18,
+0xd3, 0x6b, 0x83, 0x42, 0x17, 0xd1, 0xd0, 0x1d, 0x3d, 0x30, 0x0a, 0x68,
+0x92, 0x00, 0x52, 0x18, 0x52, 0x6c, 0x03, 0x68, 0x9a, 0x1a, 0x02, 0x60,
+0x01, 0x23, 0x88, 0x68, 0x58, 0x40, 0x88, 0x60, 0xca, 0x68, 0x01, 0x32,
+0xca, 0x60, 0x0a, 0x69, 0x01, 0x32, 0x80, 0x00, 0x40, 0x18, 0x0a, 0x61,
+0x40, 0x6b, 0xc0, 0x46, 0xc8, 0x62, 0x70, 0x47, 0xb8, 0xb5, 0x04, 0x1c,
+0x1d, 0x1c, 0x17, 0x1c, 0x08, 0x1c, 0x39, 0x1c, 0xff, 0xf7, 0xd9, 0xff,
+0x00, 0x20, 0x29, 0x1c, 0x00, 0xf0, 0x7c, 0xfe, 0x01, 0x20, 0xf9, 0x1d,
+0x19, 0x31, 0x48, 0x71, 0x80, 0x06, 0x60, 0x60, 0x00, 0x20, 0xa0, 0x61,
+0x06, 0x4a, 0xc0, 0x46, 0x00, 0x92, 0x06, 0x48,
+0x01, 0x6d, 0x42, 0x6d, 0x05, 0x4b, 0x00, 0x20, 0x00, 0xf0, 0x62, 0xfe,
+0xb8, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x04, 0x00, 0x12, 0x02,
+0x7c, 0x29, 0x00, 0x80, 0x44, 0x80, 0x20, 0x40, 0x06, 0x49, 0x0a, 0x68,
+0x10, 0x18, 0x08, 0x60, 0x01, 0x23, 0x5b, 0x02, 0x98, 0x42, 0x03, 0xd9,
+0x03, 0x49, 0x0a, 0x79, 0x01, 0x32, 0x0a, 0x71, 0x70, 0x47, 0x00, 0x00,
+0xe4, 0x2d, 0x00, 0x80, 0xa0, 0x82, 0x20, 0x40, 0x80, 0x08, 0x80, 0x00,
+0x06, 0x49, 0x0a, 0x68, 0x10, 0x18, 0x08, 0x60, 0x01, 0x23, 0x5b, 0x02,
+0x98, 0x42, 0x03, 0xd9, 0x03, 0x49, 0x0a, 0x79, 0x01, 0x32, 0x0a, 0x71,
+0x70, 0x47, 0x00, 0x00, 0xe4, 0x2d, 0x00, 0x80, 0xa0, 0x82, 0x20, 0x40,
+0x03, 0x30, 0x80, 0x08, 0x80, 0x00, 0x06, 0x49, 0x0a, 0x68, 0x10, 0x18,
+0x08, 0x60, 0x01, 0x23, 0x5b, 0x02, 0x98, 0x42, 0x03, 0xd9, 0x03, 0x49,
+0x0a, 0x79, 0x01, 0x32, 0x0a, 0x71, 0x70, 0x47, 0xe4, 0x2d, 0x00, 0x80,
+0xa0, 0x82, 0x20, 0x40, 0x02, 0x48, 0x41, 0x79, 0x01, 0x31, 0x41, 0x71,
+0x70, 0x47, 0x00, 0x00, 0xa0, 0x82, 0x20, 0x40, 0x90, 0xb4, 0x82, 0x00,
+0x17, 0x4b, 0x9a, 0x58, 0x8b, 0x07, 0x02, 0xd0, 0x89, 0x08, 0x0b, 0x1d,
+0x01, 0xe0, 0x89, 0x08, 0xcb, 0x1c, 0x11, 0x69, 0xd7, 0x68, 0x12, 0x4c,
+0x80, 0x00, 0x20, 0x58, 0x40, 0x68, 0xb9, 0x42, 0x03, 0xd1, 0x81, 0x42,
+0x19, 0xd9, 0x11, 0x68, 0x17, 0xe0, 0x00, 0x24, 0xb9, 0x42, 0x09, 0xd9,
+0x81, 0x42, 0x12, 0xd9, 0x11, 0x68, 0x78, 0x1a, 0x00, 0xd5, 0x03, 0x30,
+0x80, 0x10, 0x98, 0x42, 0x0b, 0xd8, 0x07, 0xe0, 0x81, 0x42, 0x05, 0xd8,
+0x78, 0x1a, 0x00, 0xd5, 0x03, 0x30, 0x80, 0x10, 0x98, 0x42, 0x02, 0xd8,
+0x20, 0x1c, 0x90, 0xbc, 0x70, 0x47, 0xc8, 0x1d, 0x05, 0x30, 0xfa, 0xe7,
+0x70, 0x04, 0x00, 0x80, 0x80, 0xb5, 0x80, 0x00, 0x0f, 0x4a, 0x17, 0x58,
+0x88, 0x07, 0x02, 0xd0, 0x88, 0x08, 0x04, 0x30, 0x01, 0xe0, 0x88, 0x08,
+0x03, 0x30, 0x39, 0x69, 0x7a, 0x68, 0x91, 0x42, 0x09, 0xd9, 0x39, 0x68,
+0xc0, 0x46, 0x39, 0x61, 0xf9, 0x68, 0x7a, 0x68, 0x91, 0x42, 0x02, 0xd9,
+0x39, 0x68, 0xc0, 0x46, 0xf9, 0x60, 0x81, 0x00, 0x38, 0x69, 0x00, 0xf0,
+0xd1, 0xfd, 0x38, 0x61, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x70, 0x04, 0x00, 0x80, 0x90, 0xb5, 0x03, 0x21, 0x09, 0x07, 0x01, 0x40,
+0x0c, 0x0f, 0x01, 0x04, 0x09, 0x0c, 0x01, 0x22, 0x92, 0x07, 0x02, 0x40,
+0xa3, 0x00, 0x1c, 0x4f, 0xff, 0x58, 0x89, 0x07, 0x89, 0x0f, 0x00, 0x04,
+0x00, 0x0c, 0x80, 0x08, 0x00, 0x29, 0x00, 0xd0, 0x01, 0x30, 0x00, 0x2a,
+0x01, 0xd0, 0x02, 0x30, 0x00, 0xe0, 0x03, 0x30, 0xf9, 0x68, 0x7a, 0x68,
+0x91, 0x42, 0x02, 0xd9, 0x39, 0x68, 0xc0, 0x46, 0xf9, 0x60, 0x81, 0x00,
+0xf8, 0x68, 0x00, 0xf0, 0xa5, 0xfd, 0xf8, 0x60, 0x0f, 0x48, 0x00, 0x69,
+0x00, 0x28, 0x05, 0xd0, 0x01, 0x20, 0xa0, 0x40, 0x02, 0xd0, 0x20, 0x1c,
+0xfe, 0xf7, 0xca, 0xfc, 0x0b, 0x49, 0xc8, 0x1d, 0x19, 0x30, 0x03, 0x79,
+0x00, 0x22, 0x00, 0x2b, 0x05, 0xd1, 0x09, 0x49, 0xc8, 0x1d, 0x19, 0x30,
+0x03, 0x79, 0x00, 0x2b, 0x03, 0xd0, 0x02, 0x71, 0x08, 0x1c, 0xff, 0xf7,
+0x79, 0xf9, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x70, 0x04, 0x00, 0x80,
+0xd0, 0x2c, 0x00, 0x80, 0x64, 0x2d, 0x00, 0x80, 0xe4, 0x2c, 0x00, 0x80,
+0xb0, 0xb5, 0x2b, 0x49, 0x09, 0x79, 0x00, 0x29, 0x03, 0xd1, 0x41, 0x68,
+0x29, 0x4b, 0x19, 0x43, 0x41, 0x60, 0x81, 0x68,
+0x49, 0x08, 0x02, 0xd3, 0x09, 0x21, 0x09, 0x04, 0x01, 0xe0, 0x0d, 0x21,
+0x09, 0x04, 0x0c, 0xc8, 0x08, 0x38, 0x19, 0x43, 0x87, 0x68, 0xbb, 0x0a,
+0x03, 0xd3, 0x43, 0x68, 0x5b, 0x08, 0x00, 0xd3, 0x01, 0x31, 0x40, 0x68,
+0x03, 0x23, 0x1b, 0x07, 0x18, 0x40, 0x07, 0x0f, 0xf8, 0x00, 0x1d, 0x4c,
+0x00, 0x19, 0x23, 0x68, 0xc0, 0x18, 0x50, 0x30, 0x00, 0x79, 0x01, 0x28,
+0x10, 0xd1, 0x60, 0x68, 0x01, 0x28, 0x0d, 0xd0, 0x10, 0x1c, 0x00, 0xf0,
+0x71, 0xf8, 0x38, 0x01, 0x00, 0x19, 0x19, 0x23, 0xdb, 0x01, 0xc0, 0x18,
+0x41, 0x6b, 0x01, 0x39, 0x41, 0x63, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x38, 0x01, 0x00, 0x19, 0x19, 0x23, 0xdb, 0x01, 0xc0, 0x18, 0x03, 0x6b,
+0x5d, 0x1c, 0x05, 0x63, 0xbd, 0x02, 0x2d, 0x19, 0xdb, 0x00, 0xeb, 0x18,
+0x80, 0x33, 0x19, 0x63, 0xda, 0x62, 0x81, 0x6b, 0x01, 0x31, 0x81, 0x63,
+0x01, 0x21, 0xb9, 0x40, 0x22, 0x68, 0x11, 0x43, 0x21, 0x60, 0x01, 0x6b,
+0x80, 0x29, 0xe2, 0xd3, 0x00, 0x21, 0x01, 0x63, 0xdf, 0xe7, 0x00, 0x00,
+0x28, 0x0f, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0xa0, 0x1c, 0x00, 0x80,
+0xf0, 0xb5, 0x1f, 0x4e, 0x70, 0x68, 0x00, 0x28, 0x36, 0xd1, 0x00, 0x24,
+0xb1, 0x68, 0x48, 0x1c, 0xc9, 0x00, 0x89, 0x19, 0xb0, 0x60, 0x32, 0x68,
+0x89, 0x18, 0x60, 0x31, 0x0d, 0x7b, 0x08, 0x28, 0x00, 0xd3, 0xb4, 0x60,
+0x28, 0x01, 0x80, 0x19, 0x19, 0x23, 0xdb, 0x01, 0xc0, 0x18, 0x87, 0x6b,
+0x00, 0x2f, 0x21, 0xd0, 0xc1, 0x6a, 0x4b, 0x1c, 0xaa, 0x02, 0x92, 0x19,
+0xc9, 0x00, 0x51, 0x18, 0x80, 0x31, 0xc3, 0x62, 0xca, 0x6a, 0x09, 0x6b,
+0x01, 0x3f, 0x87, 0x63, 0x80, 0x2b, 0x00, 0xd3, 0xc4, 0x62, 0x00, 0x2f,
+0x06, 0xd1, 0x01, 0x27, 0xaf, 0x40, 0x3b, 0x1c, 0xdb, 0x43, 0x37, 0x68,
+0x3b, 0x40, 0x33, 0x60, 0x43, 0x6b, 0x01, 0x3b, 0x43, 0x63, 0x10, 0x1c,
+0x37, 0x1c, 0x00, 0xf0, 0x09, 0xf8, 0x78, 0x68, 0x00, 0x28, 0xc9, 0xd0,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0xa0, 0x1c, 0x00, 0x80,
+0xf0, 0xb5, 0xcd, 0x0f, 0xed, 0x07, 0x01, 0x24, 0x00, 0x27, 0x2e, 0x4b,
+0x2e, 0x4a, 0x00, 0x2d, 0x1d, 0xd0, 0xd8, 0x6a, 0x01, 0x30, 0xd8, 0x62,
+0x10, 0x1c, 0x52, 0x69, 0x00, 0x2a, 0x12, 0xd0, 0x02, 0x69, 0x53, 0x1c,
+0x92, 0x00, 0x12, 0x18, 0x03, 0x61, 0x91, 0x61, 0x41, 0x69, 0x01, 0x31,
+0x41, 0x61, 0x02, 0x69, 0x0f, 0x2a, 0x00, 0xd3, 0x07, 0x61, 0x0f, 0x29,
+0x00, 0xd3, 0x44, 0x60, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x08, 0x1c,
+0xff, 0xf7, 0xee, 0xfe, 0xf8, 0xe7, 0x15, 0x69, 0x6e, 0x1c, 0xad, 0x00,
+0xad, 0x18, 0x16, 0x61, 0xa9, 0x61, 0x55, 0x69, 0x01, 0x35, 0x55, 0x61,
+0x16, 0x69, 0x0f, 0x2e, 0x00, 0xd3, 0x17, 0x61, 0x0f, 0x2d, 0x00, 0xd3,
+0x54, 0x60, 0x8c, 0x02, 0xa4, 0x0a, 0x16, 0x4f, 0x3a, 0x6f, 0xfd, 0x68,
+0xf9, 0x1d, 0x79, 0x31, 0x01, 0x2d, 0x0c, 0xd1, 0xdb, 0x6d, 0x5b, 0x08,
+0x09, 0xd3, 0x0b, 0x89, 0x00, 0x2b, 0x06, 0xd1, 0xfd, 0x6f, 0x03, 0x3b,
+0x2e, 0x68, 0x33, 0x40, 0x2b, 0x60, 0x14, 0x23, 0x0b, 0x81, 0x10, 0x60,
+0x80, 0x07, 0x80, 0x0a, 0x20, 0x43, 0x03, 0x04, 0x00, 0xd0, 0x01, 0x38,
+0x50, 0x60, 0x09, 0x6a, 0x08, 0x32, 0x91, 0x42, 0x00, 0xd8, 0x07, 0x4a,
+0x00, 0x0d, 0x02, 0xd3, 0x51, 0x20, 0x80, 0x03, 0x82, 0x61, 0x3a, 0x67,
+0xbe, 0xe7, 0x00, 0x00, 0xa4, 0x2a, 0x00, 0x80, 0xa0, 0x1c, 0x00, 0x80,
+0x68, 0x0e, 0x00, 0x80, 0x24, 0xa7, 0x20, 0x40,
+0xb0, 0xb5, 0x00, 0x28, 0x04, 0xd1, 0x01, 0x20, 0xc0, 0x05, 0x16, 0x49,
+0xc0, 0x46, 0x08, 0x60, 0x15, 0x4c, 0x00, 0x25, 0x67, 0x69, 0x00, 0x2f,
+0x16, 0xd0, 0xe0, 0x68, 0x41, 0x1c, 0x80, 0x00, 0x00, 0x19, 0xe1, 0x60,
+0x80, 0x69, 0x01, 0x3f, 0xff, 0xf7, 0x94, 0xfe, 0xe0, 0x68, 0x0f, 0x28,
+0x00, 0xd3, 0xe5, 0x60, 0xe0, 0x68, 0x80, 0x00, 0x00, 0x19, 0x80, 0x69,
+0x00, 0x08, 0x01, 0xd3, 0x00, 0x2f, 0xea, 0xd1, 0x67, 0x61, 0x03, 0xe0,
+0x08, 0x48, 0x01, 0x6d, 0x01, 0x31, 0x01, 0x65, 0x65, 0x60, 0x20, 0x68,
+0x00, 0x28, 0x01, 0xd0, 0xff, 0xf7, 0x26, 0xff, 0xb0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0xa0, 0x1c, 0x00, 0x80,
+0xa0, 0x82, 0x20, 0x40, 0x00, 0x20, 0x70, 0x47, 0xb0, 0xb4, 0x10, 0x23,
+0x82, 0x68, 0x13, 0x40, 0x00, 0x21, 0x00, 0x2b, 0x15, 0xd0, 0x0c, 0x4b,
+0x1a, 0x40, 0x12, 0x01, 0x81, 0x24, 0x14, 0x43, 0x02, 0x68, 0x15, 0x68,
+0x13, 0x1d, 0x80, 0xcb, 0x1b, 0x68, 0x04, 0x3a, 0x02, 0x60, 0x20, 0xc2,
+0x80, 0xc2, 0x08, 0xc2, 0x14, 0x60, 0x42, 0x68, 0x01, 0x23, 0x9b, 0x07,
+0x04, 0x32, 0x1a, 0x43, 0x42, 0x60, 0x08, 0x1c, 0xb0, 0xbc, 0x70, 0x47,
+0x00, 0xf0, 0xff, 0x0f, 0xf0, 0xb4, 0x82, 0x68, 0x53, 0x09, 0x34, 0xd3,
+0x1b, 0x4b, 0x1a, 0x40, 0x12, 0x01, 0x81, 0x26, 0x16, 0x43, 0x03, 0x68,
+0x1d, 0x68, 0x1f, 0x1d, 0x10, 0xcf, 0x3f, 0x68, 0x04, 0x3b, 0x03, 0x60,
+0x20, 0xc3, 0x10, 0xc3, 0x80, 0xc3, 0x1e, 0x60, 0x43, 0x68, 0x1f, 0x1d,
+0x01, 0x23, 0x9b, 0x07, 0x3b, 0x43, 0x43, 0x60, 0xcb, 0x6b, 0x18, 0x1f,
+0xc8, 0x63, 0x80, 0xcb, 0x80, 0xc0, 0x1c, 0x68, 0x1f, 0x1d, 0x03, 0x1d,
+0x04, 0x60, 0x38, 0x1c, 0x3f, 0x68, 0xc0, 0x46, 0x1f, 0x60, 0x1f, 0x1d,
+0x43, 0x68, 0x1c, 0x04, 0x24, 0x0c, 0x81, 0x23, 0x23, 0x43, 0x3b, 0x60,
+0x40, 0x68, 0x00, 0x0c, 0x00, 0x04, 0x10, 0x43, 0x78, 0x60, 0x08, 0x6e,
+0x04, 0x30, 0x08, 0x66, 0x48, 0x6e, 0x04, 0x30, 0x48, 0x66, 0x00, 0x20,
+0xf0, 0xbc, 0x70, 0x47, 0x00, 0xf0, 0xff, 0x0f, 0x80, 0xb4, 0x81, 0x6a,
+0x01, 0x23, 0x9b, 0x07, 0xca, 0x1d, 0x05, 0x32, 0x1a, 0x43, 0x12, 0x68,
+0xcf, 0x1d, 0x01, 0x37, 0x3b, 0x43, 0x1b, 0x68, 0xc0, 0x46, 0xcb, 0x60,
+0x01, 0x23, 0x9b, 0x07, 0x0f, 0x1d, 0x3b, 0x43, 0x1b, 0x68, 0xc0, 0x46,
+0x8b, 0x60, 0x01, 0x23, 0x9b, 0x07, 0x0b, 0x43, 0x1b, 0x68, 0x0c, 0xc1,
+0x02, 0x62, 0x01, 0x6b, 0xc0, 0x46, 0x0a, 0x62, 0x04, 0x23, 0x81, 0x69,
+0x19, 0x43, 0x81, 0x61, 0x02, 0x6b, 0xc0, 0x46, 0x91, 0x61, 0x81, 0x6a,
+0x04, 0x31, 0x81, 0x62, 0x02, 0x6b, 0xc0, 0x46, 0x91, 0x62, 0xc1, 0x1d,
+0x39, 0x31, 0x4a, 0x8b, 0x04, 0x3a, 0x4a, 0x83, 0x49, 0x8b, 0x02, 0x6b,
+0x40, 0x32, 0x51, 0x83, 0xc1, 0x89, 0x04, 0x39, 0xc1, 0x81, 0xc1, 0x68,
+0x00, 0x6b, 0xc0, 0x46, 0xc1, 0x60, 0x00, 0x20, 0x80, 0xbc, 0x70, 0x47,
+0x00, 0x47, 0x08, 0x47, 0x10, 0x47, 0x18, 0x47, 0x20, 0x47, 0x28, 0x47,
+0x30, 0x47, 0x38, 0x47, 0x30, 0x40, 0x2d, 0xe9, 0x0c, 0xc0, 0x9d, 0xe5,
+0x0c, 0x48, 0xa0, 0xe1, 0x24, 0x48, 0xb0, 0xe1, 0x1e, 0x00, 0x00, 0x0a,
+0x01, 0xc0, 0x4c, 0xe2, 0x18, 0x40, 0xa0, 0xe3, 0x64, 0x51, 0x9f, 0xe5,
+0x94, 0x50, 0x20, 0xe0, 0x00, 0x50, 0x90, 0xe5, 0x14, 0x40, 0x90, 0xe5,
+0x00, 0x30, 0x85, 0xe5, 0x04, 0xc0, 0x85, 0xe5, 0x08, 0x10, 0x85, 0xe5,
+0x0c, 0x20, 0x85, 0xe5, 0x10, 0x10, 0x90, 0xe5,
+0x10, 0x50, 0x85, 0xe2, 0x01, 0x00, 0x55, 0xe1, 0x0c, 0x50, 0x90, 0x55,
+0x04, 0x00, 0x55, 0xe1, 0x05, 0x00, 0x00, 0x0a, 0x04, 0x10, 0x90, 0xe5,
+0x00, 0x50, 0x80, 0xe5, 0x00, 0x50, 0x81, 0xe5, 0x00, 0x00, 0xa0, 0xe3,
+0x30, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1, 0x00, 0x30, 0x93, 0xe5,
+0x08, 0x20, 0x90, 0xe5, 0x01, 0x31, 0x83, 0xe3, 0x02, 0x36, 0x83, 0xe3,
+0x03, 0x00, 0x55, 0xe1, 0x14, 0x30, 0x80, 0xe5, 0xf2, 0xff, 0xff, 0x1a,
+0x01, 0x00, 0xa0, 0xe3, 0xf4, 0xff, 0xff, 0xea, 0x01, 0x06, 0x1c, 0xe3,
+0xf1, 0xff, 0xff, 0x0a, 0xec, 0x10, 0x9f, 0xe5, 0x02, 0xc6, 0xcc, 0xe3,
+0x54, 0x20, 0x91, 0xe5, 0xe4, 0x30, 0x9f, 0xe5, 0x50, 0x10, 0x91, 0xe5,
+0xd9, 0xff, 0xff, 0xea, 0xf0, 0x47, 0x2d, 0xe9, 0x20, 0xc0, 0x9d, 0xe5,
+0x0c, 0x68, 0xa0, 0xe1, 0x26, 0x68, 0xb0, 0xe1, 0x25, 0x00, 0x00, 0x0a,
+0x18, 0x40, 0xa0, 0xe3, 0xb8, 0x50, 0x9f, 0xe5, 0x94, 0x00, 0x00, 0xe0,
+0x05, 0x00, 0x80, 0xe0, 0x08, 0x40, 0x90, 0xe5, 0x04, 0x80, 0x90, 0xe5,
+0x00, 0x70, 0xa0, 0xe3, 0x1f, 0xc0, 0xa0, 0xe3, 0x02, 0xc4, 0x8c, 0xe3,
+0x00, 0x50, 0x90, 0xe5, 0x10, 0x90, 0x90, 0xe5, 0x14, 0xa0, 0x90, 0xe5,
+0x00, 0x30, 0x85, 0xe5, 0x04, 0xc0, 0x85, 0xe5, 0x08, 0x10, 0x85, 0xe5,
+0x0c, 0x20, 0x85, 0xe5, 0x10, 0x50, 0x85, 0xe2, 0x09, 0x00, 0x55, 0xe1,
+0x0c, 0x50, 0x90, 0x55, 0x0a, 0x00, 0x55, 0xe1, 0x15, 0x00, 0x00, 0x0a,
+0x03, 0x70, 0x17, 0xe2, 0x20, 0x10, 0x81, 0xe2, 0x20, 0x30, 0x83, 0xe2,
+0x0a, 0x00, 0x00, 0x0a, 0x00, 0x60, 0x96, 0xe2, 0x01, 0x70, 0x87, 0xe2,
+0x09, 0x00, 0x00, 0x0a, 0x20, 0x60, 0x46, 0xe2, 0x20, 0x00, 0x56, 0xe3,
+0xec, 0xff, 0xff, 0xca, 0x00, 0x70, 0xa0, 0xe3, 0x01, 0xc0, 0x46, 0xe2,
+0x02, 0xc4, 0x8c, 0xe3, 0x00, 0x60, 0xa0, 0xe3, 0xe7, 0xff, 0xff, 0xea,
+0x00, 0x50, 0x88, 0xe5, 0xf2, 0xff, 0xff, 0xea, 0x00, 0x10, 0xa0, 0xe3,
+0x00, 0x50, 0x80, 0xe5, 0x01, 0x00, 0xa0, 0xe1, 0xf0, 0x47, 0xbd, 0xe8,
+0x1e, 0xff, 0x2f, 0xe1, 0x00, 0xa0, 0x94, 0xe5, 0x0a, 0x00, 0x55, 0xe1,
+0x14, 0xa0, 0x80, 0xe5, 0xe5, 0xff, 0xff, 0x1a, 0x01, 0x10, 0xa0, 0xe3,
+0xf5, 0xff, 0xff, 0xea, 0xa8, 0x03, 0x00, 0x80, 0x7c, 0x29, 0x00, 0x80,
+0x00, 0x80, 0x20, 0x40, 0x68, 0x82, 0x9f, 0xe5, 0x0b, 0x92, 0xa0, 0xe3,
+0x64, 0xa2, 0x9f, 0xe5, 0x58, 0xb0, 0x9a, 0xe5, 0x0e, 0xf0, 0xa0, 0xe1,
+0x54, 0xb0, 0x9a, 0xe5, 0x1e, 0xff, 0x2f, 0xe1, 0x3f, 0x40, 0x2d, 0xe9,
+0x00, 0x00, 0x4f, 0xe1, 0x1f, 0x00, 0x00, 0xe2, 0x12, 0x00, 0x50, 0xe3,
+0x54, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x0f, 0xe1, 0x80, 0x00, 0xc0, 0xe3,
+0x00, 0xf0, 0x21, 0xe1, 0x04, 0x50, 0xa0, 0xe3, 0x00, 0x40, 0x99, 0xe5,
+0x09, 0x00, 0x00, 0xea, 0x02, 0x00, 0x14, 0xe3, 0x53, 0x00, 0x00, 0x1b,
+0x80, 0x00, 0x14, 0xe3, 0x59, 0x00, 0x00, 0x1b, 0x20, 0x00, 0x14, 0xe3,
+0x59, 0x00, 0x00, 0x1b, 0x02, 0x07, 0x14, 0xe3, 0x59, 0x00, 0x00, 0x1b,
+0x01, 0x06, 0x14, 0xe3, 0x59, 0x00, 0x00, 0x1b, 0x08, 0x00, 0x14, 0xe3,
+0x45, 0x00, 0x00, 0x1b, 0x02, 0x05, 0x14, 0xe3, 0x4a, 0x00, 0x00, 0x1b,
+0x02, 0x08, 0x14, 0xe3, 0x4b, 0x00, 0x00, 0x1b, 0xe5, 0x0e, 0x14, 0xe3,
+0x07, 0x00, 0x00, 0x0a, 0x04, 0x20, 0x98, 0xe5, 0x0c, 0x10, 0x98, 0xe5,
+0x04, 0x30, 0x52, 0xe2, 0x3c, 0x30, 0xa0, 0xb3, 0x04, 0x30, 0x88, 0xe5,
+0x02, 0x00, 0x91, 0xe7, 0x0f, 0xe0, 0xa0, 0xe1,
+0x10, 0xff, 0x2f, 0xe1, 0x01, 0x50, 0x55, 0xe2, 0x03, 0x00, 0x00, 0x0a,
+0x00, 0x40, 0x99, 0xe5, 0x0c, 0x00, 0x9a, 0xe5, 0x00, 0x00, 0x14, 0xe1,
+0x1b, 0xff, 0x2f, 0x11, 0x08, 0x00, 0x9a, 0xe5, 0x00, 0x00, 0x14, 0xe1,
+0x0b, 0x00, 0x00, 0x0a, 0x01, 0x0c, 0x14, 0xe3, 0x98, 0x01, 0x9f, 0x15,
+0x0f, 0xe0, 0xa0, 0x11, 0x10, 0xff, 0x2f, 0x11, 0x02, 0x04, 0x14, 0xe3,
+0x8c, 0x01, 0x9f, 0x15, 0x0f, 0xe0, 0xa0, 0x11, 0x10, 0xff, 0x2f, 0x11,
+0x01, 0x09, 0x14, 0xe3, 0x80, 0x01, 0x9f, 0x15, 0x0f, 0xe0, 0xa0, 0x11,
+0x10, 0xff, 0x2f, 0x11, 0x04, 0x00, 0x9a, 0xe5, 0x00, 0x00, 0x14, 0xe1,
+0x16, 0x00, 0x00, 0x0a, 0x54, 0xe0, 0x8f, 0xe2, 0x04, 0x00, 0x14, 0xe3,
+0x40, 0x00, 0x9a, 0x15, 0x10, 0xff, 0x2f, 0x11, 0x02, 0x0a, 0x14, 0xe3,
+0x44, 0x00, 0x9a, 0x15, 0x10, 0xff, 0x2f, 0x11, 0x02, 0x09, 0x14, 0xe3,
+0x48, 0x00, 0x9a, 0x15, 0x10, 0xff, 0x2f, 0x11, 0x01, 0x02, 0x14, 0xe3,
+0x4c, 0x00, 0x9a, 0x15, 0x10, 0xff, 0x2f, 0x11, 0x01, 0x04, 0x14, 0xe3,
+0x50, 0x00, 0x9a, 0x15, 0x10, 0xff, 0x2f, 0x11, 0x01, 0x0a, 0x14, 0xe3,
+0x21, 0x00, 0x00, 0x1b, 0x02, 0x00, 0x14, 0xe3, 0x0e, 0x00, 0x00, 0x1b,
+0x10, 0x00, 0x9a, 0xe5, 0x00, 0x00, 0x14, 0xe1, 0x1c, 0x00, 0x00, 0x1b,
+0x00, 0x40, 0x99, 0xe5, 0x04, 0x50, 0xa0, 0xe3, 0x00, 0x40, 0x94, 0xe2,
+0x1b, 0xff, 0x2f, 0x11, 0x3f, 0x40, 0xbd, 0xe8, 0x04, 0xf0, 0x5e, 0xe2,
+0xc0, 0x00, 0x80, 0xe3, 0x00, 0xf0, 0x61, 0xe1, 0xfa, 0xff, 0xff, 0xea,
+0x18, 0x00, 0x9a, 0xe5, 0x1c, 0x10, 0x9a, 0xe5, 0x11, 0xff, 0x2f, 0xe1,
+0x54, 0xb0, 0x9a, 0xe5, 0x1c, 0x10, 0x9a, 0xe5, 0x14, 0x00, 0x9a, 0xe5,
+0x11, 0xff, 0x2f, 0xe1, 0x20, 0x10, 0x9a, 0xe5, 0x00, 0x00, 0xa0, 0xe3,
+0x11, 0xff, 0x2f, 0xe1, 0x24, 0x10, 0x9a, 0xe5, 0x11, 0xff, 0x2f, 0xe1,
+0x28, 0x10, 0x9a, 0xe5, 0x11, 0xff, 0x2f, 0xe1, 0x2c, 0x10, 0x9a, 0xe5,
+0x11, 0xff, 0x2f, 0xe1, 0x30, 0x10, 0x9a, 0xe5, 0x11, 0xff, 0x2f, 0xe1,
+0x34, 0x10, 0x9a, 0xe5, 0x11, 0xff, 0x2f, 0xe1, 0xfe, 0xff, 0xff, 0xea,
+0x38, 0xe0, 0x9a, 0xe5, 0x3c, 0x10, 0x9a, 0xe5, 0x18, 0x00, 0x9a, 0xe5,
+0x11, 0xff, 0x2f, 0xe1, 0x38, 0xe0, 0x9a, 0xe5, 0x3c, 0x10, 0x9a, 0xe5,
+0x14, 0x00, 0x9a, 0xe5, 0x11, 0xff, 0x2f, 0xe1, 0x64, 0x20, 0x9f, 0xe5,
+0x00, 0x30, 0x92, 0xe5, 0x00, 0x30, 0x53, 0xe0, 0x0a, 0x00, 0x00, 0xba,
+0x00, 0x30, 0x82, 0xe5, 0x0c, 0x00, 0x92, 0xe5, 0x08, 0x30, 0x92, 0xe5,
+0x00, 0x10, 0x91, 0xe2, 0x03, 0x00, 0x00, 0x0a, 0x03, 0x10, 0x80, 0xe7,
+0x04, 0x30, 0x53, 0xe2, 0x3c, 0x30, 0xa0, 0xb3, 0x08, 0x30, 0x82, 0xe5,
+0x01, 0x00, 0xa0, 0xe3, 0x1e, 0xff, 0x2f, 0xe1, 0x3c, 0x10, 0x9f, 0xe5,
+0x00, 0x00, 0x91, 0xe5, 0x01, 0x00, 0x80, 0xe2, 0x00, 0x00, 0x81, 0xe5,
+0x00, 0x00, 0xa0, 0xe3, 0xf8, 0xff, 0xff, 0xea, 0x10, 0x00, 0x9f, 0xe5,
+0x08, 0x10, 0x90, 0xe5, 0x04, 0x10, 0x51, 0xe2, 0x3c, 0x10, 0xa0, 0xb3,
+0x08, 0x10, 0x80, 0xe5, 0x1e, 0xff, 0x2f, 0xe1, 0xe4, 0x2d, 0x00, 0x80,
+0xcc, 0x04, 0x00, 0x80, 0x71, 0x2b, 0xff, 0xff, 0xd1, 0x3d, 0xff, 0xff,
+0xc9, 0x2b, 0xff, 0xff, 0xa0, 0x82, 0x20, 0x40, 0xc9, 0x1c, 0x89, 0x08,
+0x89, 0x00, 0x01, 0x23, 0x85, 0x4a, 0x5b, 0x07, 0x18, 0x43, 0x13, 0x68,
+0x5b, 0x18, 0x13, 0x60, 0x00, 0x1f, 0x81, 0xa3, 0x5b, 0x1a, 0x18, 0x47,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5, 0x04, 0x20, 0xa0, 0xe5,
+0x1e, 0xff, 0x2f, 0xe1, 0xe4, 0x2d, 0x00, 0x80,
+0x98, 0x00, 0x9f, 0xe5, 0x98, 0x10, 0x9f, 0xe5, 0x01, 0x20, 0x40, 0xe0,
+0x94, 0x30, 0x9f, 0xe5, 0x00, 0x00, 0x91, 0xe5, 0x03, 0x00, 0x50, 0xe1,
+0x03, 0x00, 0x00, 0x1a, 0x04, 0x10, 0x81, 0xe2, 0x04, 0x20, 0x52, 0xe2,
+0x00, 0x00, 0x00, 0x0a, 0xf8, 0xff, 0xff, 0xea, 0x78, 0x00, 0x9f, 0xe5,
+0x00, 0x20, 0x80, 0xe5, 0x74, 0x00, 0x9f, 0xe5, 0x74, 0x10, 0x9f, 0xe5,
+0x01, 0x20, 0x40, 0xe0, 0x60, 0x30, 0x9f, 0xe5, 0x00, 0x00, 0x91, 0xe5,
+0x03, 0x00, 0x50, 0xe1, 0x03, 0x00, 0x00, 0x1a, 0x04, 0x10, 0x81, 0xe2,
+0x04, 0x20, 0x52, 0xe2, 0x00, 0x00, 0x00, 0x0a, 0xf8, 0xff, 0xff, 0xea,
+0x50, 0x00, 0x9f, 0xe5, 0x00, 0x20, 0x80, 0xe5, 0x4c, 0x00, 0x9f, 0xe5,
+0x4c, 0x10, 0x9f, 0xe5, 0x01, 0x20, 0x40, 0xe0, 0x2c, 0x30, 0x9f, 0xe5,
+0x00, 0x00, 0x91, 0xe5, 0x03, 0x00, 0x50, 0xe1, 0x03, 0x00, 0x00, 0x1a,
+0x04, 0x10, 0x81, 0xe2, 0x04, 0x20, 0x52, 0xe2, 0x00, 0x00, 0x00, 0x0a,
+0xf8, 0xff, 0xff, 0xea, 0x28, 0x00, 0x9f, 0xe5, 0x00, 0x20, 0x80, 0xe5,
+0x1e, 0xff, 0x2f, 0xe1, 0x7c, 0x34, 0x00, 0x80, 0x80, 0x30, 0x00, 0x80,
+0xad, 0xde, 0xad, 0xde, 0xc0, 0x04, 0x00, 0x80, 0xfc, 0x37, 0x00, 0x80,
+0x80, 0x34, 0x00, 0x80, 0xc4, 0x04, 0x00, 0x80, 0xfc, 0x3f, 0x00, 0x80,
+0x40, 0x38, 0x00, 0x80, 0xc8, 0x04, 0x00, 0x80, 0x78, 0x47, 0x00, 0x00,
+0x71, 0xea, 0xff, 0xea, 0x78, 0x47, 0x00, 0x00, 0x39, 0xfe, 0xff, 0xea,
+0x78, 0x47, 0x00, 0x00, 0x63, 0xfe, 0xff, 0xea, 0x78, 0x47, 0x00, 0x00,
+0x1b, 0xff, 0xff, 0xea, 0x78, 0x47, 0x00, 0x00, 0x6b, 0xea, 0xff, 0xea,
+0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+0x28, 0x04, 0x00, 0x00, 0xf8, 0x3d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x80,
+0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x0b, 0xff, 0xff,
+0x00, 0x00, 0x00, 0x00, 0xd5, 0x0b, 0xff, 0xff, 0x03, 0xff, 0x06, 0x54,
+0x03, 0x00, 0x00, 0x00, 0x75, 0x04, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+0xa1, 0x05, 0xff, 0xff, 0x04, 0xff, 0x07, 0x54, 0x03, 0x00, 0x00, 0x00,
+0xb5, 0x04, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x05, 0xff, 0xff,
+0x05, 0xff, 0x05, 0x54, 0x03, 0x00, 0x00, 0x00, 0x39, 0x04, 0xff, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x55, 0x05, 0xff, 0xff, 0x01, 0xff, 0x04, 0x00,
+0x03, 0x00, 0x00, 0x00, 0x41, 0x18, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+0x61, 0x0e, 0xff, 0xff, 0x02, 0xff, 0x02, 0x08, 0x00, 0x00, 0x00, 0x00,
+0xa1, 0x02, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x02, 0xff, 0xff,
+0xff, 0xff, 0x01, 0x44, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x9d, 0x0d, 0xff, 0xff, 0x06, 0x00, 0xff, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x3d, 0x50, 0xff, 0xff, 0x81, 0x50, 0xff, 0xff,
+0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x48, 0x05, 0x00, 0x80, 0x11, 0x75, 0x21, 0x40, 0x1b, 0x75, 0x21, 0x40,
+0x31, 0x75, 0x21, 0x40, 0x49, 0x75, 0x21, 0x40,
+0x55, 0x75, 0x21, 0x40, 0x63, 0x75, 0x21, 0x40, 0x7d, 0x75, 0x21, 0x40,
+0xa9, 0x75, 0x21, 0x40, 0x6d, 0x76, 0x21, 0x40, 0xc5, 0x76, 0x21, 0x40,
+0xd3, 0x76, 0x21, 0x40, 0xdd, 0x76, 0x21, 0x40, 0xe7, 0x76, 0x21, 0x40,
+0x99, 0x77, 0x21, 0x40, 0xa7, 0x77, 0x21, 0x40, 0xb5, 0x77, 0x21, 0x40,
+0x61, 0x78, 0x21, 0x40, 0x5f, 0x7c, 0x21, 0x40, 0xe9, 0x7c, 0x21, 0x40,
+0x89, 0x7d, 0x21, 0x40, 0xbd, 0x7e, 0x21, 0x40, 0xc9, 0x7e, 0x21, 0x40,
+0x29, 0x7f, 0x21, 0x40, 0x8d, 0x7f, 0x21, 0x40, 0xb9, 0x7f, 0x21, 0x40,
+0xdd, 0x7f, 0x21, 0x40, 0x1d, 0x80, 0x21, 0x40, 0x45, 0x80, 0x21, 0x40,
+0x8d, 0x80, 0x21, 0x40, 0x9d, 0x80, 0x21, 0x40, 0xc5, 0x80, 0x21, 0x40,
+0xd5, 0x80, 0x21, 0x40, 0x1d, 0x81, 0x21, 0x40, 0x5b, 0x81, 0x21, 0x40,
+0xb1, 0x81, 0x21, 0x40, 0x11, 0x82, 0x21, 0x40, 0x1b, 0x82, 0x21, 0x40,
+0x1f, 0x82, 0x21, 0x40, 0x8d, 0x82, 0x21, 0x40, 0xd9, 0x82, 0x21, 0x40,
+0x31, 0x83, 0x21, 0x40, 0x6d, 0x83, 0x21, 0x40, 0xd1, 0x83, 0x21, 0x40,
+0x09, 0x84, 0x21, 0x40, 0x19, 0x84, 0x21, 0x40, 0x51, 0x84, 0x21, 0x40,
+0x61, 0x84, 0x21, 0x40, 0x75, 0x84, 0x21, 0x40, 0x9d, 0x84, 0x21, 0x40,
+0xa7, 0x84, 0x21, 0x40, 0xb1, 0x84, 0x21, 0x40, 0x15, 0x85, 0x21, 0x40,
+0x45, 0x85, 0x21, 0x40, 0x51, 0x85, 0x21, 0x40, 0xc5, 0x85, 0x21, 0x40,
+0xcf, 0x85, 0x21, 0x40, 0xd9, 0x85, 0x21, 0x40, 0xe3, 0x85, 0x21, 0x40,
+0xed, 0x85, 0x21, 0x40, 0xf7, 0x85, 0x21, 0x40, 0x01, 0x86, 0x21, 0x40,
+0x0b, 0x86, 0x21, 0x40, 0x15, 0x86, 0x21, 0x40, 0x01, 0x89, 0x21, 0x40,
+0x1f, 0x86, 0x21, 0x40, 0x29, 0x86, 0x21, 0x40, 0x33, 0x86, 0x21, 0x40,
+0x3d, 0x86, 0x21, 0x40, 0x65, 0x86, 0x21, 0x40, 0x6f, 0x86, 0x21, 0x40,
+0xd1, 0x86, 0x21, 0x40, 0xdb, 0x86, 0x21, 0x40, 0xe5, 0x86, 0x21, 0x40,
+0xef, 0x86, 0x21, 0x40, 0xf9, 0x86, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40,
+0x03, 0x87, 0x21, 0x40, 0x69, 0x87, 0x21, 0x40, 0xb5, 0x87, 0x21, 0x40,
+0xf9, 0x87, 0x21, 0x40, 0x09, 0x88, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40,
+0x55, 0x88, 0x21, 0x40, 0x59, 0x88, 0x21, 0x40, 0x5d, 0x88, 0x21, 0x40,
+0xb5, 0x88, 0x21, 0x40, 0xdd, 0x88, 0x21, 0x40, 0xe9, 0x88, 0x21, 0x40,
+0xed, 0x88, 0x21, 0x40, 0xf1, 0x88, 0x21, 0x40, 0xf5, 0x88, 0x21, 0x40,
+0xf9, 0x88, 0x21, 0x40, 0xfd, 0x88, 0x21, 0x40, 0x2d, 0x85, 0x21, 0x40,
+0x89, 0x85, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40,
+0x0d, 0x89, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40, 0xe1, 0x74, 0x21, 0x40,
+0x9d, 0x74, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40,
+0x9d, 0x74, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40, 0x9d, 0x74, 0x21, 0x40,
+0x6b, 0x78, 0x21, 0x40, 0xf5, 0x7b, 0x21, 0x40, 0x31, 0x7c, 0x21, 0x40,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x5c, 0x01, 0x18, 0x40, 0x58, 0x01, 0x18, 0x40,
+0x24, 0xa3, 0x20, 0x40, 0x24, 0xa7, 0x20, 0x40, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6c, 0x01, 0x18, 0x40, 0x68, 0x01, 0x18, 0x40,
+0x24, 0x83, 0x20, 0x40, 0x24, 0xa3, 0x20, 0x40, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x7c, 0x01, 0x18, 0x40, 0x78, 0x01, 0x18, 0x40,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x8c, 0x01, 0x18, 0x40,
+0x88, 0x01, 0x18, 0x40, 0x24, 0xa9, 0x20, 0x40, 0x24, 0xab, 0x20, 0x40,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x08, 0x00, 0x12, 0x00, 0x18, 0x00, 0x12, 0x00, 0x0c, 0x00, 0x12, 0x00,
+0x1c, 0x00, 0x12, 0x00, 0x24, 0xa8, 0x20, 0x40, 0xa4, 0xa8, 0x20, 0x40,
+0xa4, 0xa8, 0x20, 0x40, 0x24, 0xa9, 0x20, 0x40, 0x00, 0x00, 0x00, 0x00,
+0xd1, 0xa8, 0x21, 0x40, 0x2d, 0xaa, 0x21, 0x40, 0x00, 0x00, 0x00, 0x00,
+0x89, 0x70, 0x21, 0x40, 0xc9, 0xa1, 0x21, 0x40, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x57, 0x89, 0x21, 0x40, 0xd1, 0xa8, 0x21, 0x40, 0xc5, 0x2f, 0xff, 0xff,
+0x05, 0x21, 0xff, 0xff, 0xef, 0x20, 0xff, 0xff, 0x59, 0xa7, 0x21, 0x40,
+0x34, 0x2e, 0x00, 0x80, 0x48, 0x2e, 0x00, 0x80, 0x5c, 0x2e, 0x00, 0x80,
+0x30, 0x33, 0x3a, 0x31, 0x31, 0x3a, 0x31, 0x31, 0x00, 0x30, 0x37, 0x2f,
+0x32, 0x33, 0x2f, 0x30, 0x31, 0x00, 0x30, 0x30, 0x30, 0x30, 0x31, 0x35,
+0x36, 0x39, 0x00, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74,
+0x20, 0x28, 0x63, 0x29, 0x20, 0x32, 0x30, 0x30, 0x31, 0x20, 0x33, 0x43,
+0x6f, 0x6d, 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69,
+0x6f, 0x6e, 0x0a, 0x00, 0x08, 0x10, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x53, 0xff, 0xff,
+0x27, 0xf0, 0x7d, 0xfd, 0x00, 0x01, 0x00, 0x02, 0xda, 0x0e, 0x82, 0x00,
+0x01, 0x40, 0x64, 0x04, 0x64, 0x2d, 0x00, 0x80, 0xe4, 0x2c, 0x00, 0x80,
+0x69, 0x3e, 0xff, 0xff, 0xc9, 0x4f, 0xff, 0xff, 0xd5, 0x24, 0xff, 0xff,
+0xc9, 0x3b, 0xff, 0xff, 0x29, 0x3c, 0xff, 0xff, 0x19, 0x1a, 0xff, 0xff,
+0x65, 0x11, 0xff, 0xff, 0xcc, 0x53, 0xff, 0xff, 0x21, 0x40, 0xff, 0xff,
+0x89, 0x70, 0x21, 0x40, 0x49, 0x72, 0x21, 0x40, 0xd9, 0x3f, 0xff, 0xff,
+0x21, 0x9a, 0x21, 0x40, 0x85, 0x24, 0xff, 0xff, 0x64, 0x53, 0xff, 0xff,
+0x8c, 0x53, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
+0x80, 0x30, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
+0x00, 0x00, 0x20, 0x40, 0xb0, 0x50, 0x00, 0x00, 0x7b, 0x0e, 0x00, 0x00,
+0x00, 0x6e, 0x21, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0xed, 0x89, 0x21, 0x40, 0x8b, 0x89, 0x21, 0x40, 0xa5, 0x8c, 0x21, 0x40,
+0x05, 0x8d, 0x21, 0x40, 0xcd, 0x8d, 0x21, 0x40, 0x8b, 0x8b, 0x21, 0x40,
+0xa9, 0x8e, 0x21, 0x40, 0x15, 0x8f, 0x21, 0x40, 0x69, 0x8b, 0x21, 0x40,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x59, 0xbd, 0x21, 0x40, 0xc1, 0xbd, 0x21, 0x40, 0x2d, 0xbe, 0x21, 0x40,
+0x00, 0x20, 0x0a, 0x4a, 0x0b, 0x23, 0x1b, 0x02, 0xd1, 0x18, 0x2d, 0x23,
+0x9b, 0x01, 0xd3, 0x18, 0x88, 0x61, 0xd8, 0x60, 0xd8, 0x63, 0x80, 0x32,
+0xc8, 0x60, 0x08, 0x61, 0x48, 0x61, 0xd0, 0x62, 0x03, 0x48, 0xc0, 0x46,
+0x48, 0x60, 0x88, 0x60, 0x70, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0xfe, 0x03, 0x00, 0x00, 0xf0, 0xb5, 0x84, 0xb0, 0x0c, 0x1c, 0x05, 0x1c,
+0x00, 0x23, 0x00, 0x93, 0xff, 0xf7, 0xde, 0xff, 0x68, 0x49, 0x0b, 0x23,
+0x1b, 0x02, 0xcf, 0x18, 0x78, 0x68, 0x28, 0x40,
+0x00, 0x22, 0xf8, 0x60, 0x3a, 0x61, 0xba, 0x68, 0x22, 0x40, 0x7a, 0x61,
+0x0c, 0x1c, 0x41, 0x09, 0x03, 0xd2, 0x51, 0x09, 0x01, 0xd2, 0x80, 0x0a,
+0x02, 0xd3, 0x60, 0x48, 0x00, 0xf0, 0xc2, 0xf8, 0x01, 0x20, 0xf9, 0x68,
+0x49, 0x09, 0x03, 0xd2, 0x79, 0x69, 0x49, 0x09, 0x00, 0xd2, 0x00, 0x20,
+0x00, 0x06, 0x00, 0x0e, 0x03, 0xf0, 0xd4, 0xfa, 0xf8, 0x68, 0x00, 0x28,
+0x70, 0xd0, 0x00, 0x23, 0x02, 0x93, 0x01, 0x93, 0x54, 0x4a, 0x01, 0x23,
+0x18, 0x43, 0xf8, 0x60, 0x00, 0x20, 0xd5, 0x1d, 0x79, 0x35, 0x03, 0x95,
+0x01, 0x24, 0x00, 0x21, 0x4f, 0x4d, 0xfa, 0x68, 0x22, 0x40, 0x39, 0xd0,
+0x8a, 0x00, 0x52, 0x18, 0x92, 0x00, 0x4e, 0x4b, 0x9b, 0x5c, 0x1e, 0x1c,
+0x83, 0x42, 0x04, 0xd0, 0x4b, 0x4b, 0xd3, 0x18, 0x5b, 0x78, 0x83, 0x42,
+0x2c, 0xd1, 0x49, 0x4b, 0xd2, 0x18, 0xd3, 0x78, 0x03, 0x9d, 0xed, 0x6a,
+0xab, 0x42, 0x02, 0xd9, 0x03, 0x9d, 0xc0, 0x46, 0xeb, 0x62, 0x53, 0x68,
+0x5b, 0x08, 0x01, 0xd3, 0x01, 0x23, 0x00, 0x93, 0x86, 0x42, 0x0a, 0xd1,
+0x95, 0x68, 0x02, 0x9b, 0x5e, 0x1c, 0x02, 0x96, 0x9b, 0x00, 0x3c, 0x4e,
+0x9e, 0x19, 0x0b, 0x23, 0x1b, 0x02, 0xf3, 0x18, 0x9d, 0x61, 0x53, 0x78,
+0x83, 0x42, 0x0d, 0xd1, 0xd2, 0x68, 0x01, 0x9b, 0x5d, 0x1c, 0x01, 0x95,
+0x9b, 0x00, 0x35, 0x4d, 0x5d, 0x19, 0x2d, 0x23, 0x9b, 0x01, 0xeb, 0x18,
+0xda, 0x60, 0x3a, 0x69, 0x01, 0x32, 0x3a, 0x61, 0x64, 0x00, 0x01, 0x31,
+0x0b, 0x29, 0xbd, 0xd3, 0x01, 0x30, 0x09, 0x28, 0xb8, 0xd3, 0x00, 0x20,
+0x02, 0x9b, 0x99, 0x00, 0x2b, 0x4a, 0x89, 0x18, 0x0b, 0x23, 0x1b, 0x02,
+0xc9, 0x18, 0x88, 0x61, 0x01, 0x9b, 0x99, 0x00, 0x89, 0x18, 0x2d, 0x23,
+0x9b, 0x01, 0xc9, 0x18, 0xc8, 0x60, 0x00, 0x9b, 0x00, 0x2b, 0x0c, 0xd1,
+0x81, 0x00, 0x89, 0x18, 0x0b, 0x23, 0x1b, 0x02, 0xc9, 0x18, 0xcb, 0x69,
+0xc0, 0x46, 0x8b, 0x61, 0x01, 0x30, 0x0b, 0x28, 0xf4, 0xd3, 0x08, 0xe0,
+0x07, 0xe0, 0x03, 0x9d, 0xe8, 0x6a, 0x30, 0x28, 0x03, 0xd2, 0x30, 0x20,
+0x03, 0x9d, 0xc0, 0x46, 0xe8, 0x62, 0x19, 0x4a, 0x78, 0x69, 0x00, 0x28,
+0x2a, 0xd0, 0x00, 0x21, 0x01, 0x23, 0x18, 0x43, 0x78, 0x61, 0x00, 0x20,
+0x01, 0x24, 0x00, 0x22, 0x13, 0x4e, 0x7b, 0x69, 0x23, 0x40, 0x10, 0xd0,
+0x93, 0x00, 0x9b, 0x18, 0x9b, 0x00, 0x12, 0x4d, 0x5b, 0x19, 0x9d, 0x78,
+0x85, 0x42, 0x08, 0xd1, 0x1d, 0x69, 0x0b, 0x1c, 0x9b, 0x00, 0x9e, 0x19,
+0x2d, 0x23, 0x9b, 0x01, 0xf3, 0x18, 0xdd, 0x63, 0x01, 0x31, 0x64, 0x00,
+0x01, 0x32, 0x0b, 0x2a, 0xe6, 0xd3, 0x01, 0x30, 0x09, 0x28, 0xe1, 0xd3,
+0x00, 0x20, 0x89, 0x00, 0x04, 0x4a, 0x89, 0x18, 0x2d, 0x23, 0x9b, 0x01,
+0xc9, 0x18, 0xc8, 0x63, 0x04, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x68, 0x0e, 0x00, 0x80, 0x30, 0x53, 0xff, 0xff, 0x00, 0x01, 0x00, 0x80,
+0x00, 0x47, 0x08, 0x47, 0x10, 0x47, 0x18, 0x47, 0x78, 0x47, 0xc0, 0x46,
+0x18, 0xc0, 0x9f, 0xe5, 0x1c, 0xff, 0x2f, 0xe1, 0x78, 0x47, 0xc0, 0x46,
+0x10, 0xc0, 0x9f, 0xe5, 0x1c, 0xff, 0x2f, 0xe1, 0x78, 0x47, 0xc0, 0x46,
+0x08, 0xc0, 0x9f, 0xe5, 0x1c, 0xff, 0x2f, 0xe1, 0x38, 0x52, 0xff, 0xff,
+0x88, 0x51, 0xff, 0xff, 0xd5, 0xb0, 0x21, 0x40, 0xf0, 0xb5, 0x04, 0x20,
+0x1a, 0x49, 0x01, 0x25, 0x08, 0x60, 0x1a, 0x4f, 0xbb, 0x23, 0x1b, 0x01,
+0xf8, 0x18, 0x05, 0x73, 0x18, 0x48, 0x41, 0x6b, 0x2c, 0x05, 0x00, 0x20,
+0x7a, 0x6e, 0x17, 0x4b, 0x8a, 0x42, 0x1d, 0xd0,
+0x19, 0x7b, 0x00, 0x29, 0x17, 0xd1, 0xd9, 0x1d, 0xff, 0x31, 0x3a, 0x31,
+0x49, 0x78, 0x1e, 0x1c, 0x00, 0x29, 0x10, 0xd1, 0xb0, 0x60, 0x10, 0x20,
+0x70, 0x60, 0x10, 0x4a, 0x10, 0x49, 0xff, 0xf7, 0xc3, 0xff, 0x00, 0x28,
+0x07, 0xd0, 0x35, 0x73, 0x04, 0x23, 0xb8, 0x69, 0x18, 0x43, 0xb8, 0x61,
+0x20, 0x61, 0x00, 0xf0, 0x17, 0xf8, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x18, 0x73, 0x04, 0x23, 0xb8, 0x69, 0x98, 0x43, 0xb8, 0x61, 0x20, 0x61,
+0xf5, 0xe7, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x68, 0x0e, 0x00, 0x80,
+0x00, 0x01, 0x18, 0x40, 0x28, 0x05, 0x00, 0x80, 0x20, 0x55, 0xff, 0xff,
+0x7d, 0x71, 0x21, 0x40, 0xf8, 0xb5, 0x15, 0x4f, 0x39, 0x6c, 0x15, 0x48,
+0x40, 0x6e, 0x0c, 0x1a, 0x14, 0x4e, 0x71, 0x68, 0x14, 0x4d, 0xa1, 0x42,
+0x06, 0xd8, 0x14, 0x4a, 0x0a, 0x43, 0x00, 0x92, 0xb9, 0x6b, 0x09, 0x18,
+0xfa, 0x6b, 0x11, 0xe0, 0x11, 0x22, 0x52, 0x05, 0x22, 0x43, 0x00, 0x92,
+0xb9, 0x6b, 0x09, 0x18, 0x00, 0x20, 0xfa, 0x6b, 0x2b, 0x1c, 0xff, 0xf7,
+0x8d, 0xff, 0x70, 0x68, 0x00, 0x1b, 0x0a, 0x4a, 0x02, 0x43, 0x00, 0x92,
+0xb9, 0x6b, 0xfa, 0x6b, 0x00, 0x20, 0x2b, 0x1c, 0xff, 0xf7, 0x82, 0xff,
+0xf8, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x7c, 0x29, 0x00, 0x80,
+0x68, 0x0e, 0x00, 0x80, 0x28, 0x05, 0x00, 0x80, 0x44, 0x80, 0x20, 0x40,
+0x00, 0x00, 0x37, 0x02, 0xf0, 0xb5, 0x2b, 0x4f, 0xb8, 0x68, 0x79, 0x68,
+0xc0, 0x19, 0x20, 0x30, 0x29, 0x4a, 0xff, 0xf7, 0x63, 0xff, 0x01, 0x20,
+0xc0, 0x02, 0x28, 0x49, 0xc0, 0x46, 0x08, 0x60, 0xb9, 0x68, 0x38, 0x1c,
+0x26, 0x4d, 0x00, 0x24, 0x26, 0x4e, 0xef, 0x1d, 0x79, 0x37, 0x00, 0x29,
+0x31, 0xd1, 0x31, 0x68, 0x0a, 0x78, 0x12, 0x0a, 0x03, 0xd2, 0x04, 0x73,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x49, 0x78, 0x00, 0x29, 0x0c, 0xd1,
+0x05, 0x1c, 0x40, 0x68, 0x00, 0xf0, 0x3e, 0xf9, 0x30, 0x68, 0x00, 0xf0,
+0x67, 0xf8, 0x00, 0x28, 0x26, 0xd1, 0x2c, 0x73, 0xff, 0xf7, 0x58, 0xff,
+0x22, 0xe0, 0x09, 0x01, 0x07, 0x1c, 0x41, 0x60, 0x08, 0x1c, 0x17, 0x4a,
+0x17, 0x49, 0xff, 0xf7, 0x35, 0xff, 0x00, 0x28, 0x07, 0xd1, 0x3c, 0x73,
+0x04, 0x23, 0xa8, 0x69, 0x98, 0x43, 0x99, 0x04, 0xa8, 0x61, 0x08, 0x61,
+0xda, 0xe7, 0x10, 0x20, 0x00, 0xf0, 0x20, 0xf9, 0x10, 0x20, 0xb8, 0x60,
+0xff, 0xf7, 0x82, 0xff, 0xd2, 0xe7, 0x05, 0x1c, 0x40, 0x68, 0x00, 0xf0,
+0x17, 0xf9, 0x30, 0x68, 0x00, 0xf0, 0x40, 0xf8, 0x00, 0x28, 0xd8, 0xd0,
+0x02, 0x23, 0xf8, 0x6b, 0x18, 0x43, 0xf8, 0x63, 0xc4, 0xe7, 0x00, 0x00,
+0x28, 0x05, 0x00, 0x80, 0xa5, 0x55, 0xff, 0xff, 0x00, 0x00, 0x00, 0xb0,
+0x68, 0x0e, 0x00, 0x80, 0xe4, 0x01, 0x00, 0x80, 0x20, 0x55, 0xff, 0xff,
+0x7d, 0x71, 0x21, 0x40, 0x90, 0xb5, 0x01, 0x20, 0x40, 0x03, 0x10, 0x49,
+0x00, 0x27, 0x08, 0x60, 0x0f, 0x4c, 0xe0, 0x1d, 0xff, 0x30, 0x3a, 0x30,
+0x47, 0x70, 0xe0, 0x69, 0x80, 0x00, 0x00, 0x19, 0x00, 0x69, 0x00, 0xf0,
+0xd7, 0xf8, 0xe0, 0x69, 0x00, 0x28, 0x01, 0xd0, 0xe7, 0x61, 0x01, 0xe0,
+0x01, 0x20, 0xe0, 0x61, 0x07, 0x48, 0x02, 0x23, 0xc1, 0x6b, 0x19, 0x43,
+0xc1, 0x63, 0x27, 0x73, 0xff, 0xf7, 0x00, 0xff, 0x90, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x28, 0x05, 0x00, 0x80,
+0xe8, 0x0e, 0x00, 0x80, 0x80, 0xb5, 0x84, 0xb0, 0x07, 0x1c, 0x78, 0x88,
+0x6d, 0x28, 0x03, 0xdb, 0x38, 0x1c, 0x00, 0xf0,
+0xf7, 0xf8, 0x17, 0xe0, 0x80, 0x00, 0x0d, 0x49, 0x09, 0x58, 0x38, 0x1c,
+0xff, 0xf7, 0xcb, 0xfe, 0x00, 0x28, 0x0f, 0xd1, 0x39, 0x78, 0xc9, 0x09,
+0x0c, 0xd3, 0x69, 0x46, 0x38, 0x1c, 0x00, 0xf0, 0xcf, 0xf8, 0x68, 0x46,
+0x00, 0x21, 0x00, 0xf0, 0x0b, 0xf8, 0x00, 0x28, 0x01, 0xd1, 0x01, 0x20,
+0x00, 0xe0, 0x00, 0x20, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0xe8, 0x01, 0x00, 0x80, 0xf0, 0xb5, 0x82, 0xb0, 0x02, 0x1c, 0x41, 0x4b,
+0xdd, 0x1d, 0xff, 0x35, 0x3a, 0x35, 0x2f, 0x78, 0x00, 0x2f, 0x01, 0xd0,
+0x00, 0x27, 0x00, 0xe0, 0x01, 0x27, 0x2f, 0x70, 0x2f, 0x78, 0xfb, 0x00,
+0xdb, 0x19, 0x5b, 0x01, 0x3a, 0x4f, 0xdc, 0x19, 0x40, 0x78, 0x00, 0x01,
+0xc7, 0x1d, 0x09, 0x37, 0x00, 0x20, 0x83, 0x00, 0xd6, 0x58, 0xc0, 0x46,
+0xe6, 0x50, 0x01, 0x30, 0x04, 0x28, 0xf8, 0xd3, 0x00, 0x29, 0x0f, 0xd0,
+0x00, 0x22, 0xbb, 0x08, 0x01, 0x93, 0x83, 0x42, 0x0b, 0xd9, 0x13, 0x1c,
+0x9b, 0x00, 0xcb, 0x58, 0x86, 0x00, 0xa3, 0x51, 0x01, 0x9b, 0x01, 0x30,
+0x01, 0x32, 0x83, 0x42, 0xf5, 0xd8, 0x00, 0xe0, 0x10, 0x27, 0x2b, 0x48,
+0x02, 0x6d, 0x80, 0x6e, 0x2a, 0x49, 0x82, 0x42, 0x03, 0xd8, 0x82, 0x1a,
+0xcb, 0x6c, 0x9a, 0x1a, 0x00, 0xe0, 0x12, 0x1a, 0xba, 0x42, 0x05, 0xd8,
+0x26, 0x48, 0x81, 0x6b, 0x01, 0x31, 0x81, 0x63, 0x01, 0x20, 0x37, 0xe0,
+0xc3, 0x19, 0xca, 0x6c, 0x93, 0x42, 0x08, 0xd8, 0x22, 0x4a, 0x3a, 0x43,
+0x00, 0x92, 0x0a, 0x1c, 0x49, 0x6c, 0x09, 0x18, 0x92, 0x6c, 0x23, 0x1c,
+0x12, 0xe0, 0x16, 0x1a, 0x00, 0x96, 0x1b, 0x49, 0x49, 0x6c, 0x09, 0x18,
+0x19, 0x48, 0x82, 0x6c, 0x03, 0x20, 0x23, 0x1c, 0xff, 0xf7, 0x5e, 0xfe,
+0xb8, 0x1b, 0x18, 0x4a, 0x02, 0x43, 0x00, 0x92, 0xa3, 0x19, 0x14, 0x48,
+0x82, 0x6c, 0x41, 0x6c, 0x03, 0x20, 0xff, 0xf7, 0x53, 0xfe, 0x01, 0x20,
+0x0d, 0x49, 0xc0, 0x46, 0x68, 0x70, 0x8a, 0x69, 0x92, 0x00, 0x52, 0x18,
+0x17, 0x61, 0x8a, 0x69, 0x00, 0x2a, 0x02, 0xd0, 0x00, 0x27, 0x8f, 0x61,
+0x00, 0xe0, 0x88, 0x61, 0x0c, 0x48, 0x02, 0x23, 0xc1, 0x6b, 0x19, 0x43,
+0xc1, 0x63, 0x00, 0x20, 0x01, 0x27, 0x0a, 0x49, 0xc0, 0x46, 0x4f, 0x73,
+0x02, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x28, 0x05, 0x00, 0x80,
+0x50, 0xba, 0x20, 0x40, 0x68, 0x0e, 0x00, 0x80, 0x7c, 0x29, 0x00, 0x80,
+0xa0, 0x82, 0x20, 0x40, 0x00, 0x00, 0x19, 0x02, 0xe8, 0x0e, 0x00, 0x80,
+0x18, 0x1a, 0x00, 0x80, 0x07, 0x49, 0x8a, 0x6e, 0x10, 0x18, 0x07, 0x4a,
+0xd2, 0x6c, 0x13, 0x04, 0x1b, 0x0c, 0x83, 0x42, 0x00, 0xd8, 0x80, 0x1a,
+0x88, 0x66, 0x88, 0x6e, 0x03, 0x49, 0xc0, 0x46, 0x48, 0x61, 0x70, 0x47,
+0x68, 0x0e, 0x00, 0x80, 0x7c, 0x29, 0x00, 0x80, 0x90, 0xee, 0x20, 0x40,
+0x06, 0x49, 0x4a, 0x6e, 0x10, 0x18, 0x06, 0x4a, 0x12, 0x6c, 0x82, 0x42,
+0x00, 0xd8, 0x80, 0x1a, 0x48, 0x66, 0x48, 0x6e, 0x03, 0x49, 0xc0, 0x46,
+0x08, 0x61, 0x70, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x7c, 0x29, 0x00, 0x80,
+0x90, 0xee, 0x20, 0x40, 0x05, 0x22, 0x0a, 0x60, 0x82, 0x88, 0xc0, 0x46,
+0x8a, 0x80, 0x00, 0x22, 0x4a, 0x70, 0x40, 0x88, 0xc0, 0x46, 0x48, 0x80,
+0xca, 0x80, 0x8a, 0x60, 0xca, 0x60, 0x70, 0x47, 0x05, 0x22, 0x02, 0x60,
+0x00, 0x22, 0x82, 0x80, 0x42, 0x70, 0x41, 0x80, 0xc2, 0x80, 0x82, 0x60,
+0xc2, 0x60, 0x70, 0x47, 0x80, 0xb5, 0x84, 0xb0, 0x07, 0x1c, 0x0e, 0x48,
+0x41, 0x6b, 0x01, 0x31, 0x41, 0x63, 0x69, 0x46,
+0x38, 0x1c, 0xff, 0xf7, 0xdd, 0xff, 0x38, 0x68, 0xc0, 0x46, 0x00, 0x90,
+0x45, 0x20, 0x00, 0xab, 0x18, 0x70, 0x01, 0x27, 0xdf, 0x80, 0x68, 0x46,
+0x00, 0x21, 0xff, 0xf7, 0x11, 0xff, 0x00, 0x28, 0x01, 0xd1, 0x38, 0x1c,
+0x00, 0xe0, 0x00, 0x20, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0xa0, 0x82, 0x20, 0x40, 0x00, 0xb5, 0x84, 0xb0, 0xc1, 0x88, 0x09, 0x4a,
+0xc0, 0x46, 0x91, 0x81, 0x69, 0x46, 0xff, 0xf7, 0xbd, 0xff, 0x01, 0x20,
+0x40, 0x02, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xff, 0xf7,
+0xf5, 0xfe, 0x01, 0x20, 0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0xe8, 0x0e, 0x00, 0x80, 0x00, 0xb5, 0xff, 0xf7, 0xc3, 0xff, 0x08, 0xbc,
+0x18, 0x47, 0x01, 0x20, 0x03, 0x49, 0xc0, 0x46, 0x08, 0x71, 0xa1, 0x21,
+0x49, 0x03, 0x88, 0x60, 0x00, 0x20, 0x70, 0x47, 0x28, 0x0f, 0x00, 0x80,
+0x00, 0x20, 0x04, 0x49, 0xc0, 0x46, 0x08, 0x71, 0xff, 0x21, 0xa1, 0x22,
+0x52, 0x03, 0x01, 0x31, 0x91, 0x60, 0x70, 0x47, 0x28, 0x0f, 0x00, 0x80,
+0x02, 0x20, 0xa1, 0x21, 0x49, 0x03, 0x88, 0x60, 0x00, 0x20, 0x70, 0x47,
+0x01, 0x20, 0x40, 0x02, 0xa1, 0x21, 0x49, 0x03, 0x88, 0x60, 0x00, 0x20,
+0x70, 0x47, 0xc0, 0x88, 0xc0, 0x06, 0xc0, 0x0e, 0xa1, 0x21, 0x49, 0x03,
+0x48, 0x61, 0x02, 0x49, 0xc0, 0x46, 0xc8, 0x63, 0x00, 0x20, 0x70, 0x47,
+0xe8, 0x1a, 0x00, 0x80, 0x80, 0xb5, 0x84, 0xb0, 0x08, 0x49, 0x0f, 0x6b,
+0x69, 0x46, 0xff, 0xf7, 0x71, 0xff, 0xf8, 0x06, 0xc0, 0x0e, 0x01, 0xab,
+0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xff, 0xf7, 0xa9, 0xfe, 0x01, 0x20,
+0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x80, 0x00, 0x14, 0x40,
+0x80, 0xb5, 0x85, 0xb0, 0x07, 0x1c, 0x69, 0x46, 0x38, 0x1c, 0xff, 0xf7,
+0x5b, 0xff, 0xf8, 0x88, 0x04, 0xa9, 0x03, 0xf0, 0xc9, 0xff, 0x01, 0xab,
+0x58, 0x80, 0x01, 0xa8, 0x40, 0x88, 0x00, 0x28, 0x0f, 0xd0, 0x01, 0xa8,
+0x40, 0x88, 0x80, 0x08, 0x03, 0x38, 0x80, 0x08, 0x01, 0x30, 0x04, 0x3b,
+0x58, 0x70, 0x04, 0x98, 0x01, 0x68, 0xc0, 0x46, 0x02, 0x91, 0x40, 0x68,
+0xc0, 0x46, 0x03, 0x90, 0x05, 0xe0, 0x00, 0xa8, 0x00, 0x78, 0x40, 0x23,
+0x18, 0x43, 0x00, 0xab, 0x18, 0x70, 0x04, 0x98, 0xc1, 0x1d, 0x01, 0x31,
+0x68, 0x46, 0xff, 0xf7, 0x75, 0xfe, 0x01, 0x20, 0x05, 0xb0, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x90, 0xb5, 0x84, 0xb0, 0x14, 0x4f, 0x39, 0x7b,
+0x00, 0x29, 0x20, 0xd1, 0xf9, 0x1d, 0xff, 0x31, 0x3a, 0x31, 0x49, 0x78,
+0x00, 0x29, 0x1a, 0xd1, 0x10, 0x49, 0x05, 0x22, 0x00, 0x92, 0x08, 0x22,
+0x00, 0xab, 0x5a, 0x80, 0x98, 0x80, 0x06, 0x20, 0x00, 0xab, 0x58, 0x70,
+0x00, 0x24, 0xdc, 0x80, 0x08, 0x68, 0xc0, 0x46, 0x02, 0x90, 0x48, 0x68,
+0xc0, 0x46, 0x03, 0x90, 0x01, 0x20, 0x38, 0x73, 0x68, 0x46, 0x08, 0x31,
+0xff, 0xf7, 0x4c, 0xfe, 0x00, 0x28, 0x00, 0xd0, 0x3c, 0x73, 0x04, 0xb0,
+0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x28, 0x05, 0x00, 0x80,
+0xa4, 0x2a, 0x00, 0x80, 0x90, 0xb5, 0x84, 0xb0, 0x07, 0x1c, 0x69, 0x46,
+0x38, 0x1c, 0xff, 0xf7, 0xf9, 0xfe, 0xba, 0x68, 0x0d, 0x4c, 0x0e, 0x48,
+0x00, 0x2a, 0x05, 0xd1, 0x0d, 0x49, 0xff, 0xf7, 0xe4, 0xfc, 0x00, 0x28,
+0x0c, 0xda, 0x05, 0xe0, 0xb9, 0x88, 0x0b, 0x4b, 0xff, 0xf7, 0xdf, 0xfc,
+0x00, 0x28, 0x05, 0xda, 0x01, 0xab, 0x5c, 0x80, 0x68, 0x46, 0x00, 0x21,
+0xff, 0xf7, 0x22, 0xfe, 0x00, 0x20, 0x04, 0xb0,
+0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
+0x0d, 0x76, 0x21, 0x40, 0xc1, 0xbd, 0x21, 0x40, 0x59, 0xbd, 0x21, 0x40,
+0x00, 0xb5, 0xc0, 0x88, 0x03, 0xf0, 0x2e, 0xff, 0x00, 0x20, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0xb5, 0xff, 0xf7, 0xe2, 0xfe, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0xb5, 0xff, 0xf7, 0xdd, 0xfe, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5,
+0x01, 0x1c, 0x02, 0x20, 0x00, 0xf0, 0x02, 0xf8, 0x08, 0xbc, 0x18, 0x47,
+0xb0, 0xb5, 0xc6, 0xb0, 0x07, 0x1c, 0x08, 0x1c, 0x69, 0x46, 0xff, 0xf7,
+0xb5, 0xfe, 0x21, 0x48, 0xff, 0xf7, 0xa4, 0xfc, 0x04, 0x1c, 0x20, 0x4a,
+0x00, 0x21, 0x38, 0x1c, 0xff, 0xf7, 0xa0, 0xfc, 0x00, 0x28, 0x27, 0xd0,
+0x04, 0xa9, 0x1d, 0x4a, 0x38, 0x1c, 0xff, 0xf7, 0x99, 0xfc, 0x04, 0xa8,
+0x00, 0x23, 0x01, 0x2f, 0x06, 0xd1, 0x0c, 0xaa, 0x02, 0x32, 0x00, 0x21,
+0x13, 0x60, 0x01, 0x31, 0x10, 0x29, 0xfb, 0xd3, 0x01, 0x68, 0x04, 0x29,
+0x04, 0xd9, 0x89, 0x08, 0x03, 0x39, 0x89, 0x08, 0x01, 0x31, 0x00, 0xe0,
+0x19, 0x1c, 0x00, 0xab, 0x59, 0x70, 0x06, 0xa9, 0x09, 0x78, 0xc0, 0x46,
+0xd9, 0x80, 0x00, 0x68, 0xc0, 0x46, 0x02, 0x90, 0x07, 0x98, 0xc0, 0x46,
+0x03, 0x90, 0x04, 0x33, 0x08, 0xad, 0x02, 0xe0, 0x45, 0x20, 0x00, 0xab,
+0x18, 0x70, 0x09, 0x49, 0x20, 0x1c, 0xff, 0xf7, 0x6e, 0xfc, 0x68, 0x46,
+0x29, 0x1c, 0xff, 0xf7, 0xb7, 0xfd, 0x01, 0x20, 0x46, 0xb0, 0xb0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x24, 0x02, 0xff, 0xff, 0x59, 0xb1, 0x21, 0x40,
+0x9d, 0xaf, 0x21, 0x40, 0x3c, 0x02, 0xff, 0xff, 0x00, 0xb5, 0x01, 0x1c,
+0x02, 0x20, 0x00, 0xf0, 0x10, 0xf8, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5,
+0x01, 0x1c, 0x01, 0x20, 0xff, 0xf7, 0xa2, 0xff, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0xb5, 0x01, 0x1c, 0x01, 0x20, 0x00, 0xf0, 0x02, 0xf8, 0x08, 0xbc,
+0x18, 0x47, 0xf0, 0xb5, 0xc7, 0xb0, 0x04, 0x1c, 0x0f, 0x1c, 0x38, 0x1c,
+0x01, 0xa9, 0xff, 0xf7, 0x4d, 0xfe, 0x21, 0x48, 0xff, 0xf7, 0x3c, 0xfc,
+0x00, 0x90, 0x78, 0x78, 0x00, 0x01, 0xba, 0x68, 0x04, 0x30, 0xfc, 0x2a,
+0x25, 0xd8, 0xff, 0x23, 0x09, 0x33, 0x98, 0x42, 0x21, 0xd8, 0x19, 0x2c,
+0x1f, 0xd8, 0xfd, 0x88, 0xf8, 0x68, 0xc0, 0x46, 0x05, 0x90, 0xf9, 0x1d,
+0x09, 0x31, 0x06, 0xab, 0x00, 0x20, 0x7e, 0x78, 0x00, 0x2e, 0x0d, 0xdd,
+0x40, 0xc9, 0x40, 0xc3, 0x40, 0xc9, 0x40, 0xc3, 0x40, 0xc9, 0x40, 0xc3,
+0x40, 0xc9, 0x40, 0xc3, 0x01, 0x30, 0x00, 0x04, 0x00, 0x0c, 0x7e, 0x78,
+0x86, 0x42, 0xf1, 0xdc, 0x20, 0x1c, 0x05, 0xa9, 0x2b, 0x1c, 0xff, 0xf7,
+0x21, 0xfc, 0x00, 0x28, 0x05, 0xd0, 0x01, 0xa8, 0x00, 0x78, 0x40, 0x23,
+0x18, 0x43, 0x01, 0xab, 0x18, 0x70, 0x07, 0x49, 0x00, 0x98, 0xff, 0xf7,
+0x06, 0xfc, 0x00, 0x21, 0x01, 0xa8, 0xff, 0xf7, 0x4f, 0xfd, 0x01, 0x20,
+0x47, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x24, 0x02, 0xff, 0xff,
+0x3c, 0x02, 0xff, 0xff, 0x00, 0xb5, 0xff, 0xf7, 0x1b, 0xfe, 0x08, 0xbc,
+0x18, 0x47, 0xf0, 0xb5, 0xc6, 0xb0, 0x07, 0x1c, 0xfc, 0x88, 0x25, 0x4d,
+0x68, 0x68, 0x01, 0x30, 0x69, 0x46, 0x68, 0x60, 0x38, 0x1c, 0xff, 0xf7,
+0xf5, 0xfd, 0x10, 0x2c, 0x08, 0xd3, 0x00, 0xa8, 0x00, 0x78, 0x40, 0x23,
+0x18, 0x43, 0x00, 0xab, 0x18, 0x70, 0x02, 0x20, 0xd8, 0x80, 0x17, 0xe0,
+0x78, 0x78, 0x82, 0x00, 0xfb, 0x1d, 0x09, 0x33, 0x00, 0x20, 0xb9, 0x68,
+0x00, 0x2a, 0x15, 0xd9, 0x40, 0xcb, 0x0f, 0x1c,
+0x01, 0x31, 0xbe, 0x42, 0x0d, 0xd0, 0x00, 0xaa, 0x12, 0x78, 0x40, 0x23,
+0x1a, 0x43, 0x00, 0xab, 0x1a, 0x70, 0x04, 0x22, 0xda, 0x80, 0x02, 0x90,
+0x03, 0x91, 0x04, 0x33, 0x68, 0x46, 0x00, 0x21, 0x15, 0xe0, 0x01, 0x30,
+0x90, 0x42, 0xe9, 0xd3, 0x00, 0xab, 0x5c, 0x70, 0x02, 0x94, 0x69, 0x68,
+0xc0, 0x46, 0x03, 0x91, 0xa2, 0x00, 0x00, 0x20, 0x10, 0x33, 0x00, 0x2a,
+0x05, 0xd9, 0x0f, 0x1c, 0x80, 0xc3, 0x01, 0x30, 0x01, 0x31, 0x90, 0x42,
+0xf9, 0xd3, 0x68, 0x46, 0x04, 0xa9, 0xff, 0xf7, 0xf7, 0xfc, 0x01, 0x20,
+0x46, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x9c, 0x03, 0x00, 0x80,
+0x90, 0xb4, 0x23, 0x48, 0x00, 0x68, 0x01, 0x21, 0x42, 0x09, 0x00, 0xd3,
+0x00, 0x21, 0x00, 0x27, 0x3a, 0x1c, 0x43, 0x0b, 0x00, 0xd2, 0x02, 0x22,
+0x11, 0x43, 0x1e, 0x4a, 0x20, 0x24, 0xd3, 0x68, 0x01, 0x2b, 0x2e, 0xd1,
+0x80, 0x0a, 0x00, 0xd2, 0x00, 0x24, 0x0c, 0x43, 0x20, 0x1c, 0x1b, 0x23,
+0xdb, 0x01, 0xd1, 0x18, 0x89, 0x8b, 0x09, 0x0b, 0x00, 0xd2, 0x04, 0x27,
+0x38, 0x43, 0xd1, 0x6f, 0x09, 0x68, 0x09, 0x0a, 0x07, 0xd2, 0xd1, 0x1d,
+0x79, 0x31, 0x09, 0x68, 0x09, 0x68, 0x09, 0x0a, 0x01, 0xd3, 0x08, 0x23,
+0x18, 0x43, 0xe3, 0x23, 0x1b, 0x01, 0xd1, 0x18, 0x89, 0x79, 0x03, 0x29,
+0x02, 0xd1, 0xff, 0x23, 0x01, 0x33, 0x18, 0x43, 0x0b, 0x49, 0x09, 0x6a,
+0x10, 0x22, 0x4b, 0x0a, 0x00, 0xd2, 0x00, 0x22, 0x10, 0x43, 0x89, 0x07,
+0x89, 0x0f, 0x89, 0x01, 0x08, 0x43, 0x90, 0xbc, 0x70, 0x47, 0x40, 0x0c,
+0x00, 0xd2, 0x00, 0x24, 0x0c, 0x43, 0x20, 0x1c, 0xec, 0xe7, 0x00, 0x00,
+0x00, 0x00, 0x10, 0x40, 0x68, 0x0e, 0x00, 0x80, 0xc0, 0x00, 0x18, 0x40,
+0xf0, 0xb5, 0x3a, 0x4c, 0x20, 0x1c, 0x04, 0xf0, 0x07, 0xfa, 0x39, 0x48,
+0xe3, 0x23, 0x1b, 0x01, 0xc7, 0x18, 0xb9, 0x79, 0x37, 0x4e, 0xc5, 0x1d,
+0x79, 0x35, 0x06, 0x29, 0x62, 0xd2, 0x02, 0xa3, 0x5b, 0x5c, 0x5b, 0x00,
+0x9f, 0x44, 0x00, 0x1c, 0x03, 0x0e, 0x1e, 0x37, 0x4e, 0x55, 0x01, 0x20,
+0xb8, 0x71, 0x00, 0x20, 0xb0, 0x60, 0xff, 0xf7, 0x95, 0xff, 0x05, 0x23,
+0x98, 0x43, 0x00, 0xf0, 0x6f, 0xf8, 0x0c, 0xe0, 0xff, 0xf7, 0x8e, 0xff,
+0xc0, 0x08, 0x06, 0xd3, 0xb0, 0x68, 0x41, 0x1c, 0xb1, 0x60, 0x0a, 0x28,
+0x03, 0xd9, 0x04, 0x20, 0x00, 0xe0, 0x02, 0x20, 0xb8, 0x71, 0x64, 0x22,
+0x20, 0x1c, 0x2b, 0xe0, 0x06, 0x1c, 0xc0, 0x6f, 0x80, 0x23, 0x01, 0x68,
+0x19, 0x43, 0x01, 0x60, 0x03, 0x20, 0xb8, 0x71, 0x20, 0x1c, 0x20, 0x4a,
+0x00, 0x21, 0x04, 0xf0, 0x99, 0xf9, 0xf0, 0x6f, 0x04, 0x23, 0x01, 0x68,
+0x99, 0x43, 0x01, 0x60, 0x28, 0x68, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x05, 0x21, 0xb9, 0x71, 0x29, 0x68,
+0x04, 0x23, 0x0a, 0x68, 0x9a, 0x43, 0x0a, 0x60, 0xc0, 0x6f, 0x01, 0x68,
+0x19, 0x43, 0x01, 0x60, 0xff, 0xf7, 0x5a, 0xff, 0x08, 0x23, 0x18, 0x43,
+0x00, 0xf0, 0x34, 0xf8, 0x20, 0x1c, 0x10, 0x4a, 0x00, 0x21, 0x04, 0xf0,
+0x77, 0xf9, 0xe5, 0xe7, 0xff, 0xf7, 0x4e, 0xff, 0x04, 0x23, 0x18, 0x43,
+0x00, 0xf0, 0x28, 0xf8, 0xde, 0xe7, 0x00, 0x20, 0x29, 0x68, 0x60, 0x23,
+0x0a, 0x68, 0x9a, 0x43, 0x0a, 0x60, 0xff, 0xf7, 0xe3, 0xfa, 0xd5, 0xe7,
+0x06, 0x20, 0xb8, 0x71, 0xd2, 0xe7, 0x00, 0x00, 0xa9, 0x79, 0x21, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0x9c, 0x03, 0x00, 0x80, 0x30, 0x75, 0x00, 0x00,
+0x10, 0x27, 0x00, 0x00, 0x00, 0xb5, 0x00, 0x20,
+0x04, 0x49, 0xc0, 0x46, 0x88, 0x71, 0x04, 0x48, 0x01, 0x22, 0x00, 0x21,
+0x04, 0xf0, 0x4e, 0xf9, 0x08, 0xbc, 0x18, 0x47, 0x98, 0x1c, 0x00, 0x80,
+0xa9, 0x79, 0x21, 0x40, 0x90, 0xb5, 0x07, 0x1c, 0x31, 0x48, 0x00, 0x68,
+0x79, 0x08, 0x03, 0xd3, 0x10, 0x23, 0x01, 0x1c, 0x99, 0x43, 0x01, 0xe0,
+0x10, 0x21, 0x01, 0x43, 0x2d, 0x4c, 0xe2, 0x68, 0x01, 0x2a, 0x05, 0xd1,
+0x22, 0x79, 0x00, 0x2a, 0x02, 0xd0, 0x01, 0x23, 0x9b, 0x02, 0x19, 0x43,
+0x81, 0x42, 0x02, 0xd0, 0x01, 0x20, 0x00, 0x05, 0x01, 0x60, 0xe0, 0x68,
+0x01, 0x28, 0x20, 0xd1, 0x1b, 0x23, 0xdb, 0x01, 0xe0, 0x18, 0x80, 0x8b,
+0xf9, 0x08, 0x04, 0xd3, 0x01, 0x23, 0xdb, 0x02, 0x01, 0x1c, 0x99, 0x43,
+0x01, 0xe0, 0x01, 0x21, 0xc9, 0x02, 0x81, 0x42, 0x02, 0xd0, 0x00, 0x20,
+0x02, 0xf0, 0x1a, 0xfb, 0x38, 0x09, 0x07, 0xd3, 0xe0, 0x6f, 0x80, 0x23,
+0x01, 0x68, 0x99, 0x43, 0x01, 0x60, 0xe0, 0x18, 0x00, 0x68, 0x00, 0xe0,
+0xe0, 0x6f, 0x80, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0x15, 0x48,
+0x01, 0x6a, 0x78, 0x09, 0x03, 0xd3, 0xff, 0x20, 0x01, 0x30, 0x08, 0x43,
+0x03, 0xe0, 0xff, 0x23, 0x08, 0x1c, 0x01, 0x33, 0x98, 0x43, 0x80, 0x08,
+0x80, 0x00, 0xba, 0x09, 0x92, 0x07, 0x92, 0x0f, 0x10, 0x43, 0x88, 0x42,
+0x02, 0xd0, 0x0c, 0x49, 0xc0, 0x46, 0x08, 0x62, 0xe1, 0x68, 0x01, 0x29,
+0x08, 0xd1, 0x79, 0x0a, 0x06, 0xd3, 0xff, 0x23, 0x04, 0x33, 0x18, 0x40,
+0x03, 0x28, 0x01, 0xd1, 0xff, 0xf7, 0x8e, 0xff, 0x90, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x68, 0x0e, 0x00, 0x80,
+0xc0, 0x00, 0x18, 0x40, 0xc0, 0x00, 0x18, 0x00, 0x80, 0xb5, 0xff, 0xf7,
+0xb1, 0xfe, 0x80, 0x09, 0x1b, 0xd2, 0x0f, 0x48, 0xe3, 0x23, 0x1b, 0x01,
+0xc1, 0x18, 0x4a, 0x79, 0x00, 0x2a, 0x14, 0xd1, 0x01, 0x22, 0x4a, 0x71,
+0x00, 0x27, 0x80, 0x30, 0x00, 0x68, 0x60, 0x23, 0x01, 0x68, 0x99, 0x43,
+0x01, 0x60, 0x08, 0x48, 0x06, 0xe0, 0x02, 0x20, 0x02, 0xf0, 0x8c, 0xfc,
+0x07, 0x20, 0x02, 0xf0, 0x5b, 0xfc, 0x38, 0x1c, 0xff, 0xf7, 0x36, 0xfa,
+0xf5, 0xe7, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0xf4, 0x01, 0xff, 0xff, 0x00, 0xb5, 0x84, 0xb0, 0x69, 0x46, 0xff, 0xf7,
+0x37, 0xfc, 0xff, 0xf7, 0x85, 0xfe, 0x01, 0xab, 0x58, 0x80, 0x08, 0x48,
+0x00, 0x68, 0xc0, 0x46, 0x02, 0x90, 0x07, 0x48, 0x00, 0x6a, 0xc0, 0x46,
+0x03, 0x90, 0x68, 0x46, 0x00, 0x21, 0xff, 0xf7, 0x67, 0xfb, 0x01, 0x20,
+0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40,
+0xc0, 0x00, 0x18, 0x40, 0x80, 0xb5, 0x84, 0xb0, 0x07, 0x1c, 0x69, 0x46,
+0x38, 0x1c, 0xff, 0xf7, 0x17, 0xfc, 0xf8, 0x88, 0xff, 0xf7, 0x42, 0xff,
+0xff, 0xf7, 0x62, 0xfe, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46, 0x00, 0x21,
+0xff, 0xf7, 0x4c, 0xfb, 0x01, 0x20, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0xb0, 0xb5, 0xc6, 0xb0, 0xc7, 0x88, 0x69, 0x46, 0xff, 0xf7,
+0x01, 0xfc, 0x01, 0x24, 0x1a, 0x4b, 0x9f, 0x42, 0x0a, 0xd9, 0x00, 0xa8,
+0x00, 0x78, 0x40, 0x23, 0x18, 0x43, 0x00, 0xab, 0x18, 0x70, 0x02, 0x20,
+0xd8, 0x80, 0x68, 0x46, 0x00, 0x21, 0x20, 0xe0, 0x14, 0x48, 0xff, 0xf7,
+0xe1, 0xf9, 0x05, 0x1c, 0x13, 0x4a, 0x38, 0x1c, 0x04, 0xa9, 0xff, 0xf7,
+0xdd, 0xf9, 0x12, 0x49, 0x28, 0x1c, 0xff, 0xf7, 0xd8, 0xf9, 0x01, 0x2f,
+0x06, 0xd1, 0x0c, 0xa9, 0x00, 0x20, 0x00, 0x22,
+0x0a, 0x60, 0x01, 0x30, 0x10, 0x28, 0xfb, 0xd3, 0x10, 0x20, 0x00, 0xab,
+0x58, 0x70, 0x04, 0x98, 0xc0, 0x46, 0x02, 0x90, 0x05, 0x98, 0xc0, 0x46,
+0x03, 0x90, 0x68, 0x46, 0x06, 0xa9, 0xff, 0xf7, 0x0f, 0xfb, 0x20, 0x1c,
+0x46, 0xb0, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xff, 0x01, 0x00, 0x00,
+0x24, 0x02, 0xff, 0xff, 0x9d, 0xaf, 0x21, 0x40, 0x3c, 0x02, 0xff, 0xff,
+0xf0, 0xb5, 0xc6, 0xb0, 0x07, 0x1c, 0x69, 0x46, 0x38, 0x1c, 0xff, 0xf7,
+0xbb, 0xfb, 0xfc, 0x88, 0x78, 0x78, 0x01, 0x25, 0x10, 0x28, 0x01, 0xd1,
+0x19, 0x2c, 0x09, 0xd9, 0x00, 0xa8, 0x00, 0x78, 0x40, 0x23, 0x18, 0x43,
+0x00, 0xab, 0x18, 0x70, 0x02, 0x20, 0xd8, 0x80, 0x04, 0x33, 0x27, 0xe0,
+0xb8, 0x68, 0xc0, 0x46, 0x04, 0x90, 0xf8, 0x68, 0xc0, 0x46, 0x05, 0x90,
+0x06, 0xaa, 0xfb, 0x1d, 0x09, 0x33, 0x00, 0x21, 0x78, 0x78, 0x00, 0x28,
+0x0d, 0xdd, 0x00, 0x20, 0x40, 0xcb, 0x40, 0xc2, 0x01, 0x30, 0x00, 0x04,
+0x00, 0x0c, 0x04, 0x28, 0xf8, 0xdb, 0x48, 0x1c, 0x01, 0x04, 0x09, 0x0c,
+0x78, 0x78, 0x88, 0x42, 0xf1, 0xdc, 0x0b, 0x48, 0xff, 0xf7, 0x7e, 0xf9,
+0x07, 0x1c, 0x0a, 0x4a, 0x20, 0x1c, 0x04, 0xa9, 0xff, 0xf7, 0x7a, 0xf9,
+0x08, 0x49, 0x38, 0x1c, 0xff, 0xf7, 0x75, 0xf9, 0x68, 0x46, 0x00, 0x21,
+0xff, 0xf7, 0xbe, 0xfa, 0x28, 0x1c, 0x46, 0xb0, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x24, 0x02, 0xff, 0xff, 0xc5, 0xaf, 0x21, 0x40,
+0x3c, 0x02, 0xff, 0xff, 0xf0, 0xb5, 0x84, 0xb0, 0x04, 0x1c, 0x00, 0x27,
+0xe6, 0x88, 0xa2, 0x68, 0x47, 0x49, 0x08, 0x79, 0x00, 0x28, 0x08, 0xd0,
+0x00, 0x2e, 0x01, 0xd0, 0x01, 0x2e, 0x01, 0xd1, 0x01, 0x27, 0x01, 0xe0,
+0x04, 0x2e, 0x00, 0xd1, 0x03, 0x26, 0x01, 0x25, 0x41, 0x48, 0x05, 0x2e,
+0x66, 0xd2, 0x02, 0xa3, 0x9b, 0x5d, 0x5b, 0x00, 0x9f, 0x44, 0x00, 0x1c,
+0x03, 0x06, 0x08, 0x0c, 0x10, 0x00, 0x05, 0x80, 0x00, 0x23, 0x03, 0xe0,
+0x05, 0x80, 0x05, 0xe0, 0x00, 0x23, 0x03, 0x80, 0x43, 0x80, 0x06, 0xe0,
+0x00, 0x23, 0x03, 0x80, 0x45, 0x80, 0x02, 0xe0, 0xff, 0x23, 0x01, 0x33,
+0x03, 0x80, 0xcb, 0x1d, 0x79, 0x33, 0x9e, 0x89, 0x01, 0x23, 0x5b, 0x02,
+0x9e, 0x42, 0x02, 0xdb, 0xd2, 0x07, 0xd2, 0x0f, 0x00, 0xe0, 0x01, 0x22,
+0x6d, 0x23, 0x5b, 0x01, 0xc9, 0x18, 0x89, 0x88, 0xff, 0x23, 0xe1, 0x33,
+0x99, 0x43, 0x01, 0x23, 0x19, 0x43, 0x06, 0x88, 0xff, 0x33, 0x9e, 0x42,
+0x0d, 0xd1, 0xff, 0x20, 0xe1, 0x30, 0x08, 0x43, 0x00, 0x2a, 0x04, 0xd1,
+0x01, 0x23, 0x9b, 0x02, 0x98, 0x43, 0x01, 0x1c, 0x20, 0xe0, 0x01, 0x21,
+0x89, 0x02, 0x01, 0x43, 0x1c, 0xe0, 0x01, 0x2e, 0x0a, 0xd1, 0x40, 0x88,
+0x01, 0x28, 0x04, 0xd1, 0x60, 0x23, 0x19, 0x43, 0x00, 0x2a, 0x13, 0xd0,
+0x0c, 0xe0, 0x20, 0x23, 0x19, 0x43, 0x0f, 0xe0, 0x00, 0x2e, 0x0d, 0xd1,
+0x40, 0x88, 0x01, 0x28, 0x08, 0xd1, 0xff, 0x23, 0x81, 0x33, 0x19, 0x43,
+0x00, 0x2a, 0x05, 0xd0, 0x01, 0x23, 0x9b, 0x02, 0x19, 0x43, 0x01, 0xe0,
+0x80, 0x23, 0x19, 0x43, 0x04, 0x20, 0x02, 0xf0, 0x75, 0xf9, 0x09, 0x21,
+0x49, 0x02, 0x00, 0x20, 0x02, 0xf0, 0x70, 0xf9, 0x00, 0x2f, 0x02, 0xd1,
+0x00, 0x20, 0x12, 0xe0, 0xff, 0xe7, 0x69, 0x46, 0x20, 0x1c, 0xff, 0xf7,
+0xef, 0xfa, 0x00, 0xa8, 0x00, 0x78, 0x40, 0x23, 0x18, 0x43, 0x00, 0xab,
+0x18, 0x70, 0x02, 0x20, 0xd8, 0x80, 0x68, 0x46, 0x00, 0x21, 0x04, 0x33,
+0xff, 0xf7, 0x22, 0xfa, 0x28, 0x1c, 0x04, 0xb0,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x88, 0x1c, 0x00, 0x80, 0xc0, 0x88, 0x51, 0x21, 0x89, 0x03, 0x08, 0x62,
+0x00, 0x20, 0x70, 0x47, 0x80, 0xb5, 0x16, 0x4f, 0xf8, 0x68, 0x01, 0x28,
+0x07, 0xd1, 0x37, 0x23, 0x9b, 0x01, 0xf8, 0x18, 0x40, 0x8a, 0x80, 0x21,
+0x01, 0x43, 0x1b, 0x20, 0x07, 0xe0, 0x6d, 0x23, 0x5b, 0x01, 0xf8, 0x18,
+0x80, 0x8b, 0x01, 0x21, 0x49, 0x03, 0x01, 0x43, 0x10, 0x20, 0x02, 0xf0,
+0x33, 0xf9, 0x01, 0x20, 0x71, 0x23, 0x5b, 0x01, 0xf9, 0x18, 0x08, 0x80,
+0x48, 0x80, 0x1b, 0x23, 0xdb, 0x01, 0xf8, 0x18, 0x80, 0x8b, 0x01, 0x23,
+0x1b, 0x03, 0x98, 0x43, 0x41, 0x21, 0x09, 0x02, 0x01, 0x43, 0x00, 0x20,
+0x02, 0xf0, 0x20, 0xf9, 0x00, 0x20, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x68, 0x0e, 0x00, 0x80, 0x80, 0xb5, 0x17, 0x4f, 0xf8, 0x68, 0x01, 0x28,
+0x08, 0xd1, 0x37, 0x23, 0x9b, 0x01, 0xf8, 0x18, 0x40, 0x8a, 0x80, 0x23,
+0x98, 0x43, 0x01, 0x1c, 0x1b, 0x20, 0x08, 0xe0, 0x6d, 0x23, 0x5b, 0x01,
+0xf8, 0x18, 0x80, 0x8b, 0x01, 0x23, 0x5b, 0x03, 0x98, 0x43, 0x01, 0x1c,
+0x10, 0x20, 0x02, 0xf0, 0x01, 0xf9, 0xff, 0x20, 0x71, 0x23, 0x5b, 0x01,
+0xf9, 0x18, 0x01, 0x30, 0x08, 0x80, 0x1b, 0x23, 0xdb, 0x01, 0xf8, 0x18,
+0x80, 0x8b, 0x41, 0x23, 0x1b, 0x02, 0x98, 0x43, 0x09, 0x21, 0x49, 0x02,
+0x01, 0x43, 0x00, 0x20, 0x02, 0xf0, 0xee, 0xf8, 0x00, 0x20, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x80, 0xb5, 0x84, 0xb0,
+0x08, 0x49, 0xcf, 0x6a, 0x69, 0x46, 0xff, 0xf7, 0x69, 0xfa, 0xb8, 0x05,
+0x80, 0x0d, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xff, 0xf7,
+0xa1, 0xf9, 0x01, 0x20, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x40, 0x00, 0x14, 0x40, 0xc0, 0x88, 0x9f, 0x23, 0x18, 0x40, 0x05, 0x49,
+0xc9, 0x6a, 0x1b, 0x23, 0x5b, 0x01, 0x19, 0x40, 0x08, 0x43, 0x03, 0x49,
+0xc0, 0x46, 0xc8, 0x62, 0x00, 0x20, 0x70, 0x47, 0x40, 0x00, 0x14, 0x40,
+0x40, 0x00, 0x14, 0x00, 0x80, 0xb5, 0x84, 0xb0, 0x0d, 0x49, 0x0f, 0x6a,
+0x01, 0x2f, 0x01, 0xd1, 0xff, 0x03, 0x07, 0xe0, 0x02, 0x2f, 0x01, 0xd1,
+0x3f, 0x03, 0x03, 0xe0, 0x00, 0x2f, 0x01, 0xd1, 0x01, 0x27, 0xff, 0x02,
+0x69, 0x46, 0xff, 0xf7, 0x35, 0xfa, 0x01, 0xab, 0x5f, 0x80, 0x68, 0x46,
+0x00, 0x21, 0xff, 0xf7, 0x6f, 0xf9, 0x01, 0x20, 0x04, 0xb0, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0x20, 0x14, 0x40, 0xc2, 0x88, 0xa1, 0x20,
+0x40, 0x03, 0x00, 0x21, 0x01, 0x23, 0x5b, 0x03, 0x9a, 0x42, 0x01, 0xd1,
+0x02, 0x22, 0x04, 0xe0, 0x01, 0x23, 0xdb, 0x03, 0x9a, 0x42, 0x02, 0xd1,
+0x01, 0x22, 0x02, 0x62, 0x00, 0xe0, 0x01, 0x62, 0x08, 0x1c, 0x70, 0x47,
+0x90, 0xb5, 0x84, 0xb0, 0x07, 0x1c, 0x02, 0xf0, 0x9f, 0xf8, 0x69, 0x46,
+0x04, 0x1c, 0x38, 0x1c, 0xff, 0xf7, 0x0a, 0xfa, 0x01, 0xab, 0x5c, 0x80,
+0x09, 0x4f, 0xf8, 0x6d, 0xc0, 0x46, 0x02, 0x90, 0x68, 0x46, 0x00, 0x21,
+0xff, 0xf7, 0x40, 0xf9, 0xf8, 0x6d, 0xc0, 0x07, 0xc0, 0x0f, 0x05, 0x49,
+0xc0, 0x46, 0xc8, 0x62, 0x01, 0x20, 0x04, 0xb0, 0x90, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0xa4, 0x2a, 0x00, 0x80, 0x68, 0x1c, 0x00, 0x80,
+0xc0, 0x88, 0x02, 0x49, 0xc0, 0x46, 0x48, 0x61, 0x00, 0x20, 0x70, 0x47,
+0x80, 0x00, 0x14, 0x00, 0x00, 0xb5, 0x84, 0xb0, 0x69, 0x46, 0xff, 0xf7,
+0xe3, 0xf9, 0x06, 0x48, 0xc0, 0x68, 0x01, 0xab,
+0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xff, 0xf7, 0x1b, 0xf9, 0x01, 0x20,
+0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x80, 0x00, 0x14, 0x40,
+0xc0, 0x88, 0x02, 0x49, 0xc0, 0x46, 0xc8, 0x60, 0x00, 0x20, 0x70, 0x47,
+0x80, 0x00, 0x14, 0x00, 0x80, 0xb5, 0x84, 0xb0, 0x69, 0x46, 0x87, 0x68,
+0xff, 0xf7, 0xc6, 0xf9, 0x20, 0x2f, 0x07, 0xd2, 0x78, 0x00, 0x0c, 0x49,
+0x40, 0x18, 0x1b, 0x23, 0xdb, 0x01, 0xc0, 0x18, 0x80, 0x8b, 0x06, 0xe0,
+0x00, 0xa8, 0x00, 0x78, 0x40, 0x23, 0x18, 0x43, 0x00, 0xab, 0x18, 0x70,
+0x02, 0x20, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xff, 0xf7,
+0xef, 0xf8, 0x01, 0x20, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0xb5, 0x84, 0xb0, 0xc1, 0x88, 0x82, 0x68,
+0x20, 0x2a, 0x04, 0xd2, 0x10, 0x1c, 0x02, 0xf0, 0x17, 0xf8, 0x00, 0x20,
+0x10, 0xe0, 0x69, 0x46, 0xff, 0xf7, 0x9a, 0xf9, 0x00, 0xa8, 0x00, 0x78,
+0x40, 0x23, 0x18, 0x43, 0x00, 0xab, 0x18, 0x70, 0x02, 0x20, 0xd8, 0x80,
+0x68, 0x46, 0x00, 0x21, 0x04, 0x33, 0xff, 0xf7, 0xcd, 0xf8, 0x01, 0x20,
+0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x90, 0xb5, 0x84, 0xb0, 0xc7, 0x88,
+0x69, 0x46, 0xff, 0xf7, 0x83, 0xf9, 0x10, 0x48, 0xfe, 0xf7, 0x72, 0xff,
+0x02, 0x20, 0x39, 0x1c, 0x02, 0xf0, 0xf2, 0xff, 0x00, 0x28, 0x06, 0xd0,
+0x02, 0x20, 0x39, 0x1c, 0x02, 0xf0, 0x36, 0xff, 0x01, 0xab, 0x58, 0x80,
+0x02, 0xe0, 0x45, 0x20, 0x00, 0xab, 0x18, 0x70, 0x07, 0x49, 0x20, 0x1c,
+0xfe, 0xf7, 0x5f, 0xff, 0x68, 0x46, 0x00, 0x21, 0xff, 0xf7, 0xa8, 0xf8,
+0x01, 0x20, 0x04, 0xb0, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x24, 0x02, 0xff, 0xff, 0x3c, 0x02, 0xff, 0xff, 0xb0, 0xb5, 0x84, 0xb0,
+0xc7, 0x88, 0x69, 0x46, 0x84, 0x68, 0xff, 0xf7, 0x57, 0xf9, 0x10, 0x48,
+0xfe, 0xf7, 0x46, 0xff, 0x0f, 0x4a, 0x02, 0x20, 0x39, 0x1c, 0xfe, 0xf7,
+0x43, 0xff, 0x00, 0x28, 0x06, 0xd0, 0x0d, 0x4b, 0x02, 0x20, 0x39, 0x1c,
+0x22, 0x1c, 0xfe, 0xf7, 0x3c, 0xff, 0x02, 0xe0, 0x45, 0x20, 0x00, 0xab,
+0x18, 0x70, 0x09, 0x49, 0x28, 0x1c, 0xfe, 0xf7, 0x32, 0xff, 0x68, 0x46,
+0x00, 0x21, 0xff, 0xf7, 0x7b, 0xf8, 0x01, 0x20, 0x04, 0xb0, 0xb0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x24, 0x02, 0xff, 0xff, 0x59, 0xb1, 0x21, 0x40,
+0x59, 0xb0, 0x21, 0x40, 0x3c, 0x02, 0xff, 0xff, 0x00, 0xb5, 0xff, 0xf7,
+0x43, 0xf9, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x20, 0x70, 0x47, 0x80, 0xb4,
+0xc2, 0x88, 0x19, 0x4b, 0xa1, 0x21, 0x49, 0x03, 0x00, 0x2a, 0x03, 0xd1,
+0x18, 0x6b, 0x10, 0x23, 0x98, 0x43, 0x04, 0xe0, 0x01, 0x2a, 0x04, 0xd1,
+0x18, 0x6b, 0x10, 0x23, 0x18, 0x43, 0x48, 0x61, 0x1f, 0xe0, 0x02, 0x2a,
+0x1d, 0xd1, 0xc2, 0x68, 0x87, 0x68, 0x00, 0x20, 0x3b, 0x1c, 0xc3, 0x40,
+0xdb, 0x07, 0xdb, 0x0f, 0x9b, 0x02, 0x03, 0x43, 0x0b, 0x61, 0x01, 0x30,
+0x00, 0x04, 0x00, 0x0c, 0x20, 0x28, 0xf3, 0xdb, 0x00, 0x20, 0x13, 0x1c,
+0xc3, 0x40, 0xdb, 0x07, 0xdb, 0x0f, 0x9b, 0x02, 0xc7, 0x1d, 0x19, 0x37,
+0x3b, 0x43, 0x0b, 0x61, 0x01, 0x30, 0x00, 0x04, 0x00, 0x0c, 0x20, 0x28,
+0xf1, 0xdb, 0x00, 0x20, 0x80, 0xbc, 0x70, 0x47, 0x80, 0x00, 0x14, 0x40,
+0x80, 0xb4, 0xc2, 0x88, 0x81, 0x68, 0x10, 0x02, 0x12, 0x0a, 0x10, 0x43,
+0x02, 0x04, 0x12, 0x0c, 0x0c, 0x48, 0xc0, 0x46, 0x02, 0x60, 0x0c, 0x4b,
+0xc0, 0x46, 0x1a, 0x80, 0x0a, 0x0c, 0x17, 0x02,
+0x12, 0x12, 0x3a, 0x43, 0x12, 0x04, 0x12, 0x0c, 0x42, 0x60, 0x5a, 0x80,
+0x09, 0x04, 0x09, 0x0c, 0x0a, 0x02, 0x09, 0x0a, 0x11, 0x43, 0x09, 0x04,
+0x09, 0x0c, 0x81, 0x60, 0x99, 0x80, 0x00, 0x20, 0x80, 0xbc, 0x70, 0x47,
+0x40, 0x00, 0x14, 0x00, 0x28, 0x1b, 0x00, 0x80, 0xb0, 0xb5, 0x84, 0xb0,
+0x13, 0x49, 0x0a, 0x68, 0x12, 0x04, 0x12, 0x0c, 0x13, 0x02, 0x12, 0x12,
+0x13, 0x43, 0x4a, 0x68, 0x12, 0x04, 0x12, 0x0c, 0x1f, 0x1c, 0x13, 0x02,
+0x12, 0x12, 0x13, 0x43, 0x89, 0x68, 0x09, 0x04, 0x09, 0x0c, 0x0a, 0x02,
+0x09, 0x12, 0x11, 0x43, 0x0c, 0x04, 0x24, 0x0c, 0x69, 0x46, 0x1d, 0x1c,
+0xff, 0xf7, 0xae, 0xf8, 0x01, 0xab, 0x5f, 0x80, 0x28, 0x04, 0x20, 0x43,
+0x02, 0x90, 0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7, 0xe5, 0xff, 0x01, 0x20,
+0x04, 0xb0, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x40, 0x00, 0x14, 0x40,
+0xc1, 0x88, 0x82, 0x68, 0x08, 0x02, 0x09, 0x0a, 0x08, 0x43, 0x00, 0x04,
+0x00, 0x0c, 0x0a, 0x49, 0xc0, 0x46, 0xc8, 0x60, 0x10, 0x0c, 0x03, 0x02,
+0x00, 0x12, 0x18, 0x43, 0x00, 0x04, 0x00, 0x0c, 0x08, 0x61, 0x10, 0x04,
+0x00, 0x0c, 0x02, 0x02, 0x00, 0x0a, 0x10, 0x43, 0x00, 0x04, 0x00, 0x0c,
+0x48, 0x61, 0x00, 0x20, 0x70, 0x47, 0x00, 0x00, 0x40, 0x00, 0x14, 0x00,
+0x90, 0xb5, 0x84, 0xb0, 0x16, 0x4b, 0xd9, 0x68, 0x09, 0x04, 0x09, 0x0c,
+0x0a, 0x02, 0x09, 0x12, 0x11, 0x43, 0x1a, 0x69, 0x12, 0x04, 0x12, 0x0c,
+0x17, 0x02, 0x12, 0x12, 0x3a, 0x43, 0x5b, 0x69, 0x1b, 0x04, 0x1b, 0x0c,
+0x1f, 0x02, 0x1b, 0x12, 0x3b, 0x43, 0x1f, 0x04, 0x3f, 0x0c, 0x05, 0x23,
+0x00, 0x93, 0x84, 0x88, 0x01, 0xab, 0x1c, 0x80, 0x00, 0x24, 0x04, 0x3b,
+0x5c, 0x70, 0x40, 0x88, 0x00, 0xab, 0x58, 0x80, 0xd9, 0x80, 0x10, 0x04,
+0x38, 0x43, 0x02, 0x90, 0x03, 0x94, 0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7,
+0x95, 0xff, 0x01, 0x20, 0x04, 0xb0, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x40, 0x00, 0x14, 0x40, 0x00, 0xb5, 0x84, 0xb0, 0x0b, 0x49, 0x8a, 0x6a,
+0x05, 0x21, 0x00, 0x91, 0x81, 0x88, 0x01, 0xab, 0x19, 0x80, 0x00, 0x21,
+0x04, 0x3b, 0x59, 0x70, 0x40, 0x88, 0x00, 0xab, 0x58, 0x80, 0xda, 0x80,
+0x02, 0x91, 0x03, 0x91, 0x68, 0x46, 0xfe, 0xf7, 0x79, 0xff, 0x01, 0x20,
+0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x40,
+0xc0, 0x88, 0x02, 0x49, 0xc0, 0x46, 0x88, 0x62, 0x00, 0x20, 0x70, 0x47,
+0xc0, 0x00, 0x14, 0x00, 0x00, 0xb5, 0x84, 0xb0, 0x0b, 0x49, 0x0a, 0x6a,
+0x05, 0x21, 0x00, 0x91, 0x81, 0x88, 0x01, 0xab, 0x19, 0x80, 0x00, 0x21,
+0x04, 0x3b, 0x59, 0x70, 0x40, 0x88, 0x00, 0xab, 0x58, 0x80, 0xda, 0x80,
+0x02, 0x91, 0x03, 0x91, 0x68, 0x46, 0xfe, 0xf7, 0x55, 0xff, 0x01, 0x20,
+0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x40,
+0xc0, 0x88, 0x02, 0x49, 0xc0, 0x46, 0x08, 0x62, 0x00, 0x20, 0x70, 0x47,
+0xc0, 0x00, 0x14, 0x00, 0x00, 0xb5, 0xc0, 0x88, 0x02, 0x49, 0xfe, 0xf7,
+0xf4, 0xfd, 0x00, 0x20, 0x08, 0xbc, 0x18, 0x47, 0x75, 0x02, 0xff, 0xff,
+0x00, 0xb5, 0x84, 0xb0, 0x69, 0x46, 0xfe, 0xf7, 0xf7, 0xff, 0x06, 0x48,
+0x00, 0x6b, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7,
+0x2f, 0xff, 0x01, 0x20, 0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0xb5, 0xfe, 0xf7, 0xfd, 0xff, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0xf8, 0xff,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0xf3, 0xff, 0x08, 0xbc,
+0x18, 0x47, 0x80, 0xb5, 0x07, 0x1c, 0x10, 0x48, 0xfe, 0xf7, 0xc6, 0xfd,
+0x01, 0x20, 0x40, 0x02, 0xa1, 0x21, 0x49, 0x03, 0x88, 0x60, 0x00, 0x21,
+0x0c, 0x48, 0xc0, 0x46, 0x01, 0x71, 0x0c, 0x48, 0x02, 0x68, 0x52, 0x0c,
+0x05, 0xd2, 0x02, 0x68, 0x12, 0x0c, 0x06, 0xd1, 0x00, 0x68, 0x80, 0x0a,
+0x03, 0xd3, 0x08, 0x48, 0xc0, 0x46, 0xc7, 0x60, 0x02, 0xe0, 0x07, 0x48,
+0xc0, 0x46, 0x07, 0x64, 0x08, 0x1c, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0xd5, 0x94, 0x21, 0x40, 0x28, 0x0f, 0x00, 0x80, 0x00, 0x00, 0x10, 0x40,
+0x40, 0x01, 0x18, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0xb5, 0x01, 0x20,
+0x03, 0x49, 0xc0, 0x46, 0x08, 0x72, 0x12, 0x20, 0xff, 0xf7, 0xcb, 0xff,
+0x08, 0xbc, 0x18, 0x47, 0x88, 0x1c, 0x00, 0x80, 0x00, 0xb5, 0x01, 0x20,
+0x03, 0x49, 0xc0, 0x46, 0x48, 0x72, 0x15, 0x20, 0xff, 0xf7, 0xbf, 0xff,
+0x08, 0xbc, 0x18, 0x47, 0x88, 0x1c, 0x00, 0x80, 0x00, 0xb5, 0x01, 0xf0,
+0xf9, 0xff, 0x01, 0x20, 0x08, 0xbc, 0x18, 0x47, 0x80, 0xb5, 0x84, 0xb0,
+0x07, 0x1c, 0xf8, 0x88, 0x02, 0xf0, 0xfe, 0xf8, 0x00, 0x28, 0x0c, 0xd1,
+0x69, 0x46, 0x38, 0x1c, 0xfe, 0xf7, 0x82, 0xff, 0x06, 0x48, 0x01, 0xab,
+0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7, 0xbb, 0xfe, 0x01, 0x20,
+0x00, 0xe0, 0x00, 0x20, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0xff, 0xff, 0x00, 0x00, 0x80, 0xb5, 0x84, 0xb0, 0x69, 0x46, 0xfe, 0xf7,
+0x6d, 0xff, 0x01, 0x27, 0x01, 0xab, 0x5f, 0x80, 0x09, 0x48, 0x81, 0x89,
+0x09, 0x04, 0xc2, 0x89, 0x11, 0x43, 0x02, 0x91, 0x81, 0x88, 0x09, 0x04,
+0xc0, 0x88, 0x08, 0x43, 0x03, 0x90, 0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7,
+0x9b, 0xfe, 0x38, 0x1c, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x4c, 0x2a, 0x00, 0x80, 0x00, 0xb5, 0xfe, 0xf7, 0x69, 0xff, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0x64, 0xff, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0xb5, 0xfe, 0xf7, 0x5f, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5,
+0xfe, 0xf7, 0x5a, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7,
+0x55, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0x50, 0xff,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0x4b, 0xff, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0x46, 0xff, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0xb5, 0xfe, 0xf7, 0x41, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5,
+0xfe, 0xf7, 0x3c, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7,
+0x37, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0x32, 0xff,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0x8c, 0xb0, 0x08, 0xa9, 0xfe, 0xf7,
+0x13, 0xff, 0x69, 0x46, 0x08, 0xa8, 0x02, 0xf0, 0xa9, 0xff, 0x02, 0x20,
+0x08, 0xab, 0x58, 0x70, 0x69, 0x46, 0x08, 0xa8, 0xfe, 0xf7, 0x48, 0xfe,
+0x01, 0x20, 0x0c, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7,
+0x19, 0xff, 0x08, 0xbc, 0x18, 0x47, 0x90, 0xb5, 0x84, 0xb0, 0x07, 0x1c,
+0x69, 0x46, 0x38, 0x1c, 0xfe, 0xf7, 0xf8, 0xfe, 0xfa, 0x88, 0x12, 0x49,
+0x01, 0x24, 0xc8, 0x1d, 0x89, 0x30, 0x00, 0x2a, 0x0f, 0xd0, 0x04, 0x70,
+0x44, 0x70, 0xb8, 0x68, 0x00, 0x0c, 0x80, 0x31, 0xc8, 0x82, 0xb8, 0x68,
+0xc0, 0x46, 0x08, 0x83, 0xf8, 0x68, 0x00, 0x0c, 0x48, 0x83, 0xf8, 0x68,
+0xc0, 0x46, 0x88, 0x83, 0x02, 0xe0, 0x00, 0x21,
+0x01, 0x70, 0x41, 0x70, 0x06, 0x48, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46,
+0x00, 0x21, 0xfe, 0xf7, 0x17, 0xfe, 0x20, 0x1c, 0x04, 0xb0, 0x90, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0xff, 0xff, 0x00, 0x00,
+0x00, 0xb5, 0xfe, 0xf7, 0xe3, 0xfe, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5,
+0xfe, 0xf7, 0xde, 0xfe, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7,
+0xd9, 0xfe, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0xd4, 0xfe,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0xfe, 0xf7, 0xcf, 0xfe, 0x08, 0xbc,
+0x18, 0x47, 0x90, 0xb5, 0x84, 0xb0, 0x07, 0x1c, 0x69, 0x46, 0x38, 0x1c,
+0xfe, 0xf7, 0xae, 0xfe, 0xf8, 0x88, 0x03, 0x24, 0xe4, 0x04, 0x04, 0x43,
+0x03, 0x23, 0xdb, 0x04, 0x9c, 0x42, 0x02, 0xd3, 0x0f, 0x4b, 0x9c, 0x42,
+0x06, 0xd9, 0x0f, 0x48, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46, 0x00, 0x21,
+0xfe, 0xf7, 0xdc, 0xfd, 0x01, 0x20, 0x80, 0x07, 0x20, 0x43, 0x00, 0x68,
+0x00, 0x21, 0x00, 0xab, 0x59, 0x70, 0xfa, 0x88, 0xc0, 0x46, 0xda, 0x80,
+0x02, 0x90, 0x03, 0x91, 0x68, 0x46, 0x04, 0x33, 0xfe, 0xf7, 0xcc, 0xfd,
+0x01, 0x20, 0x04, 0xb0, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0xe0, 0x00, 0x18, 0x00, 0xff, 0xff, 0x00, 0x00, 0x80, 0xb5, 0x84, 0xb0,
+0x07, 0x1c, 0x69, 0x46, 0x38, 0x1c, 0xfe, 0xf7, 0x7b, 0xfe, 0xf8, 0x88,
+0x03, 0x23, 0xdb, 0x04, 0x18, 0x43, 0x98, 0x42, 0x02, 0xd3, 0x0a, 0x4b,
+0x98, 0x42, 0x08, 0xd9, 0x09, 0x48, 0x01, 0xab, 0x58, 0x80, 0x68, 0x46,
+0x00, 0x21, 0xfe, 0xf7, 0xab, 0xfd, 0x01, 0x20, 0x03, 0xe0, 0xb9, 0x68,
+0xc0, 0x46, 0x01, 0x60, 0x00, 0x20, 0x04, 0xb0, 0x80, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0xe0, 0x00, 0x18, 0x00, 0xff, 0xff, 0x00, 0x00,
+0x80, 0xb5, 0x86, 0xb0, 0x02, 0xa9, 0xfe, 0xf7, 0x57, 0xfe, 0x01, 0x27,
+0x02, 0xab, 0x5f, 0x70, 0x00, 0x20, 0xd8, 0x80, 0x0a, 0x48, 0x41, 0x68,
+0xc0, 0x46, 0x04, 0x91, 0x81, 0x68, 0xc0, 0x46, 0x05, 0x91, 0xc1, 0x68,
+0xc0, 0x46, 0x00, 0x91, 0x40, 0x69, 0xc0, 0x46, 0x01, 0x90, 0x69, 0x46,
+0x02, 0xa8, 0xfe, 0xf7, 0x81, 0xfd, 0x38, 0x1c, 0x06, 0xb0, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x19, 0x00, 0x80, 0x00, 0xb5, 0xc1, 0x68,
+0x80, 0x68, 0xfe, 0xf7, 0x47, 0xfb, 0x00, 0x20, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0x20, 0x70, 0x47, 0x90, 0xb5, 0x84, 0xb0, 0x04, 0x1c, 0x0f, 0x1c,
+0x68, 0x46, 0x50, 0x21, 0xfe, 0xf7, 0x36, 0xfe, 0x01, 0xab, 0x5c, 0x80,
+0x02, 0x97, 0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7, 0x61, 0xfd, 0x04, 0xb0,
+0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x80, 0xb5, 0x84, 0xb0, 0x07, 0x1c,
+0x68, 0x46, 0x51, 0x21, 0xfe, 0xf7, 0x24, 0xfe, 0x01, 0xab, 0x5f, 0x80,
+0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7, 0x50, 0xfd, 0x04, 0xb0, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x00, 0x20, 0x70, 0x47, 0x00, 0x20, 0x70, 0x47,
+0x90, 0xb5, 0x84, 0xb0, 0x00, 0x27, 0x12, 0x49, 0x09, 0x68, 0x12, 0x4a,
+0x12, 0x6b, 0x10, 0x23, 0x1a, 0x40, 0x01, 0x24, 0x00, 0x2a, 0x00, 0xd0,
+0x01, 0x27, 0x8a, 0x0c, 0x03, 0xd3, 0x3a, 0x04, 0x12, 0x0c, 0x02, 0x27,
+0x17, 0x43, 0xc9, 0x0c, 0x03, 0xd3, 0x39, 0x04, 0x09, 0x0c, 0x04, 0x27,
+0x0f, 0x43, 0x69, 0x46, 0xfe, 0xf7, 0xec, 0xfd, 0x01, 0xab, 0x5f, 0x80,
+0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7, 0x26, 0xfd, 0x20, 0x1c, 0x04, 0xb0,
+0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x00, 0x00, 0x10, 0x40, 0xc0, 0x00, 0x18, 0x40, 0x00, 0xb5, 0x84, 0xb0,
+0x69, 0x46, 0xfe, 0xf7, 0xd7, 0xfd, 0x06, 0x48, 0xc0, 0x6d, 0x01, 0xab,
+0x58, 0x80, 0x68, 0x46, 0x00, 0x21, 0xfe, 0xf7, 0x0f, 0xfd, 0x01, 0x20,
+0x04, 0xb0, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0xa4, 0x2a, 0x00, 0x80,
+0x00, 0xb5, 0xfe, 0xf7, 0xdd, 0xfd, 0x08, 0xbc, 0x18, 0x47, 0x70, 0x47,
+0x00, 0x20, 0x70, 0x47, 0x00, 0x20, 0x70, 0x47, 0x00, 0x20, 0x70, 0x47,
+0x00, 0x20, 0x70, 0x47, 0x00, 0x20, 0x70, 0x47, 0x00, 0x20, 0x70, 0x47,
+0x00, 0xb5, 0xfe, 0xf7, 0xcb, 0xfd, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x80, 0xb5, 0x85, 0xb0, 0x01, 0xa9, 0xfe, 0xf7, 0xab, 0xfd, 0x00, 0x20,
+0x01, 0xab, 0x58, 0x70, 0x0c, 0x49, 0xc9, 0x68, 0x01, 0x27, 0x01, 0x29,
+0x02, 0xd1, 0x03, 0x97, 0x04, 0x97, 0x01, 0xe0, 0x03, 0x97, 0x04, 0x90,
+0x68, 0x46, 0x01, 0xf0, 0x33, 0xfd, 0x02, 0xab, 0x00, 0x98, 0xc0, 0x46,
+0x58, 0x80, 0x00, 0x21, 0x01, 0xa8, 0xfe, 0xf7, 0xd3, 0xfc, 0x38, 0x1c,
+0x05, 0xb0, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0x70, 0x47, 0x04, 0x49, 0x00, 0x20, 0x00, 0x22, 0x0a, 0x70, 0x01, 0x30,
+0x01, 0x31, 0x68, 0x28, 0xfa, 0xd3, 0x70, 0x47, 0xa0, 0x82, 0x20, 0x40,
+0x00, 0x22, 0x88, 0x42, 0x03, 0xd3, 0x40, 0x1a, 0x01, 0x32, 0x88, 0x42,
+0xfb, 0xd2, 0x10, 0x1c, 0x70, 0x47, 0x88, 0x42, 0x02, 0xd3, 0x40, 0x1a,
+0x88, 0x42, 0xfc, 0xd2, 0x70, 0x47, 0x90, 0xb4, 0x01, 0x1c, 0xff, 0x27,
+0x04, 0x29, 0x27, 0xda, 0x00, 0x20, 0x14, 0x4a, 0x43, 0x00, 0x1b, 0x18,
+0xdb, 0x00, 0xd4, 0x58, 0x63, 0x0c, 0x1a, 0xd2, 0x4b, 0x00, 0x59, 0x18,
+0xc9, 0x00, 0x57, 0x58, 0x43, 0x00, 0x1b, 0x18, 0xdb, 0x00, 0xd7, 0x50,
+0x89, 0x18, 0x9a, 0x18, 0x4f, 0x68, 0xc0, 0x46, 0x57, 0x60, 0x8b, 0x68,
+0xc0, 0x46, 0x93, 0x60, 0x0b, 0x69, 0xc0, 0x46, 0x13, 0x61, 0x4b, 0x69,
+0xc0, 0x46, 0x53, 0x61, 0xc9, 0x68, 0xc0, 0x46, 0xd1, 0x60, 0x90, 0xbc,
+0x70, 0x47, 0x01, 0x30, 0x00, 0x06, 0x00, 0x0e, 0x04, 0x28, 0xd9, 0xdb,
+0x38, 0x1c, 0xf6, 0xe7, 0x40, 0xab, 0x20, 0x40, 0xf7, 0xb5, 0xc4, 0xb0,
+0x04, 0x1c, 0x00, 0x20, 0x46, 0x9a, 0x11, 0x21, 0x11, 0x40, 0x6e, 0xd0,
+0x00, 0x27, 0x79, 0x00, 0xc9, 0x19, 0xc9, 0x00, 0x57, 0x4a, 0x51, 0x58,
+0x49, 0x0c, 0x03, 0xd2, 0x01, 0x30, 0x00, 0x06, 0x00, 0x0e, 0x04, 0xe0,
+0x79, 0x1c, 0x0f, 0x06, 0x3f, 0x0e, 0x04, 0x2f, 0xef, 0xdb, 0x00, 0x28,
+0x5b, 0xd0, 0x00, 0x26, 0x00, 0x22, 0x00, 0x92, 0x40, 0x23, 0x00, 0x21,
+0x00, 0x20, 0x02, 0xaa, 0x00, 0xf0, 0x88, 0xfa, 0x04, 0xa9, 0x00, 0x20,
+0x82, 0x00, 0x8a, 0x58, 0x12, 0x06, 0x12, 0x0e, 0xa2, 0x42, 0x03, 0xd1,
+0x72, 0x1c, 0x16, 0x06, 0x36, 0x0e, 0x04, 0xe0, 0x01, 0x30, 0x00, 0x06,
+0x00, 0x0e, 0x10, 0x28, 0xf0, 0xdb, 0x00, 0x2e, 0x3d, 0xd0, 0x04, 0x2c,
+0x3e, 0xd1, 0x80, 0x00, 0x08, 0x58, 0x40, 0x01, 0x80, 0x0d, 0x00, 0x22,
+0x00, 0x92, 0x10, 0x23, 0x00, 0x21, 0x02, 0xaa, 0x00, 0xf0, 0x68, 0xfa,
+0x00, 0x21, 0x01, 0x91, 0x02, 0xa8, 0x05, 0x99, 0x49, 0x0c, 0x89, 0x05,
+0x29, 0xd0, 0xc1, 0x68, 0x0a, 0x06, 0x12, 0x0e, 0x45, 0x9b, 0x9a, 0x42,
+0x11, 0xd1, 0xc0, 0x68, 0x40, 0x01, 0x86, 0x0d, 0x00, 0x22, 0x00, 0x92,
+0x0c, 0x23, 0x00, 0x21, 0x30, 0x1c, 0x02, 0xaa, 0x00, 0xf0, 0x50, 0xfa,
+0x01, 0x99, 0x02, 0x9d, 0x48, 0x1c, 0x01, 0x06,
+0x09, 0x0e, 0x01, 0x91, 0x0e, 0xe0, 0x48, 0x01, 0x86, 0x0d, 0x00, 0x22,
+0x00, 0x92, 0x10, 0x23, 0x00, 0x21, 0x30, 0x1c, 0x02, 0xaa, 0x00, 0xf0,
+0x3f, 0xfa, 0x02, 0xa8, 0x05, 0x99, 0x49, 0x0c, 0x89, 0x05, 0xd8, 0xd1,
+0x01, 0x99, 0x00, 0x29, 0x0f, 0xd1, 0xff, 0x20, 0x3d, 0xe0, 0x40, 0xe0,
+0x80, 0x00, 0x08, 0x58, 0x40, 0x01, 0x86, 0x0d, 0x00, 0x22, 0x00, 0x92,
+0x0c, 0x23, 0x00, 0x21, 0x30, 0x1c, 0x02, 0xaa, 0x00, 0xf0, 0x28, 0xfa,
+0x02, 0x9d, 0x01, 0x20, 0x00, 0x04, 0x46, 0x9a, 0x10, 0x43, 0x79, 0x00,
+0xc9, 0x19, 0xc9, 0x00, 0x17, 0x4a, 0xc0, 0x46, 0x50, 0x50, 0x30, 0x1c,
+0x8e, 0x18, 0x70, 0x60, 0x10, 0x20, 0x04, 0x2c, 0x00, 0xd0, 0x0c, 0x20,
+0x04, 0x1c, 0xb0, 0x60, 0x00, 0x20, 0x20, 0x21, 0x46, 0x9a, 0x11, 0x40,
+0x20, 0x29, 0x00, 0xd0, 0x28, 0x1c, 0x30, 0x61, 0x28, 0x19, 0xff, 0x21,
+0xff, 0x30, 0x08, 0x30, 0x09, 0x31, 0xff, 0xf7, 0x19, 0xff, 0x43, 0x01,
+0x18, 0x18, 0xc0, 0x00, 0x00, 0x1b, 0x70, 0x61, 0x00, 0x20, 0x50, 0x21,
+0x46, 0x9a, 0x11, 0x40, 0x50, 0x29, 0x00, 0xd1, 0x28, 0x1c, 0xf0, 0x60,
+0x38, 0x1c, 0x47, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xff, 0x20,
+0xf9, 0xe7, 0x00, 0x00, 0x40, 0xab, 0x20, 0x40, 0x80, 0xb4, 0x00, 0x23,
+0x00, 0x22, 0x00, 0x29, 0x06, 0xd9, 0x87, 0x5c, 0x7b, 0x40, 0x1b, 0x06,
+0x1b, 0x0e, 0x01, 0x32, 0x8a, 0x42, 0xf8, 0xd3, 0xd8, 0x43, 0x00, 0x06,
+0x00, 0x0e, 0x80, 0xbc, 0x70, 0x47, 0xf0, 0xb5, 0xc6, 0xb0, 0x04, 0x28,
+0x07, 0xda, 0x41, 0x00, 0x09, 0x18, 0xc9, 0x00, 0x45, 0x91, 0x41, 0x4a,
+0x51, 0x58, 0x4b, 0x0c, 0x02, 0xd2, 0x00, 0x20, 0xc0, 0x43, 0x76, 0xe0,
+0x01, 0x23, 0x5b, 0x04, 0x19, 0x40, 0x43, 0x00, 0x18, 0x18, 0xc0, 0x00,
+0x3a, 0x4a, 0x14, 0x18, 0x00, 0x29, 0x61, 0xd0, 0x00, 0x21, 0x02, 0x91,
+0x20, 0x69, 0xa1, 0x68, 0x45, 0x18, 0x30, 0xd0, 0xff, 0x21, 0x68, 0x1e,
+0x09, 0x31, 0xff, 0xf7, 0xcd, 0xfe, 0x61, 0x68, 0x40, 0x18, 0x01, 0x90,
+0x01, 0x98, 0x81, 0x42, 0x02, 0xd1, 0xa6, 0x68, 0xaf, 0x1b, 0x09, 0xe0,
+0x00, 0x26, 0xff, 0x21, 0x28, 0x1c, 0x09, 0x31, 0xff, 0xf7, 0xc7, 0xfe,
+0x07, 0x1c, 0x01, 0xd1, 0xff, 0x27, 0x09, 0x37, 0x00, 0x22, 0x00, 0x92,
+0x01, 0x98, 0x31, 0x1c, 0x03, 0xaa, 0x3b, 0x1c, 0x00, 0xf0, 0x9e, 0xf9,
+0x03, 0xa8, 0x39, 0x1c, 0xff, 0xf7, 0xac, 0xff, 0xc0, 0x43, 0x02, 0x99,
+0x48, 0x40, 0x01, 0x06, 0x09, 0x0e, 0x02, 0x91, 0xed, 0x1b, 0xa0, 0x68,
+0xa8, 0x42, 0x00, 0xd1, 0x00, 0x25, 0x00, 0x2d, 0xce, 0xd8, 0x02, 0x99,
+0xcf, 0x43, 0x00, 0x22, 0x00, 0x92, 0x0c, 0x23, 0x00, 0x21, 0x60, 0x68,
+0x03, 0xaa, 0x00, 0xf0, 0x83, 0xf9, 0x20, 0x69, 0xc0, 0x46, 0x03, 0x90,
+0x05, 0x98, 0x00, 0x0a, 0x00, 0x02, 0x39, 0x06, 0x09, 0x0e, 0x08, 0x43,
+0x05, 0x90, 0xff, 0x23, 0x1b, 0x02, 0x98, 0x43, 0x05, 0x90, 0x0c, 0x21,
+0x03, 0xa8, 0xff, 0xf7, 0x83, 0xff, 0xff, 0x23, 0x1b, 0x02, 0x05, 0x99,
+0x99, 0x43, 0x00, 0x06, 0x00, 0x0e, 0x00, 0x02, 0x08, 0x43, 0x05, 0x90,
+0x0c, 0x23, 0x00, 0x21, 0x60, 0x68, 0x03, 0xaa, 0x00, 0xf0, 0xca, 0xf9,
+0x00, 0x20, 0x45, 0x99, 0x06, 0x4a, 0xc0, 0x46, 0x50, 0x50, 0xc1, 0x43,
+0x61, 0x60, 0xa1, 0x60, 0xe1, 0x60, 0x21, 0x61, 0x61, 0x61, 0x46, 0xb0,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x40, 0xab, 0x20, 0x40,
+0xb0, 0xb4, 0x4c, 0x42, 0x00, 0x29, 0x00, 0xdb,
+0x0c, 0x1c, 0x00, 0x27, 0xff, 0x43, 0x04, 0x28, 0x21, 0xda, 0x12, 0x4d,
+0x43, 0x00, 0x18, 0x18, 0xc0, 0x00, 0x40, 0x19, 0x01, 0x2a, 0x05, 0xd0,
+0x02, 0x2a, 0x09, 0xd0, 0x03, 0x2a, 0x16, 0xd1, 0x01, 0x69, 0x0b, 0xe0,
+0x00, 0x29, 0x12, 0xdb, 0x02, 0x69, 0x8a, 0x42, 0x0f, 0xd3, 0x05, 0xe0,
+0x00, 0x29, 0x07, 0xda, 0xc1, 0x68, 0xa1, 0x42, 0x09, 0xd3, 0x09, 0x1b,
+0xc1, 0x60, 0xc0, 0x68, 0xb0, 0xbc, 0x70, 0x47, 0xc1, 0x68, 0x09, 0x19,
+0x02, 0x69, 0x91, 0x42, 0xf6, 0xd9, 0x38, 0x1c, 0xf6, 0xe7, 0x00, 0x00,
+0x40, 0xab, 0x20, 0x40, 0xf0, 0xb5, 0x84, 0xb0, 0x17, 0x1c, 0x0d, 0x1c,
+0x00, 0x21, 0x02, 0x91, 0x42, 0x00, 0x12, 0x18, 0xd2, 0x00, 0x2c, 0x49,
+0x8b, 0x58, 0x1b, 0x06, 0x1b, 0x0e, 0x01, 0x93, 0x00, 0x23, 0xdb, 0x43,
+0x04, 0x28, 0x02, 0xda, 0x01, 0x98, 0x40, 0x08, 0x01, 0xd2, 0x18, 0x1c,
+0x46, 0xe0, 0x54, 0x18, 0xe0, 0x68, 0xc2, 0x19, 0x21, 0x69, 0x8a, 0x42,
+0x00, 0xd9, 0x0f, 0x1a, 0x00, 0x2f, 0x3c, 0xd9, 0xa0, 0x68, 0xe1, 0x68,
+0x40, 0x18, 0xff, 0x21, 0x09, 0x31, 0xff, 0xf7, 0x0d, 0xfe, 0x61, 0x68,
+0x46, 0x18, 0xa0, 0x68, 0xe1, 0x68, 0x40, 0x18, 0xff, 0x21, 0x09, 0x31,
+0xff, 0xf7, 0x0d, 0xfe, 0xc2, 0x19, 0xff, 0x21, 0x09, 0x31, 0x8a, 0x42,
+0x14, 0xd9, 0x01, 0x9a, 0xc0, 0x46, 0x00, 0x92, 0x0b, 0x1a, 0x03, 0x93,
+0x01, 0x1c, 0x30, 0x1c, 0x2a, 0x1c, 0x00, 0xf0, 0xe1, 0xf8, 0xe0, 0x68,
+0x03, 0x9b, 0xc0, 0x18, 0xe0, 0x60, 0x03, 0x9b, 0x5d, 0x19, 0xff, 0x1a,
+0x02, 0x98, 0x18, 0x18, 0x02, 0x90, 0x10, 0xe0, 0x01, 0x9a, 0xc0, 0x46,
+0x00, 0x92, 0x01, 0x1c, 0x30, 0x1c, 0x2a, 0x1c, 0x3b, 0x1c, 0x00, 0xf0,
+0xcd, 0xf8, 0xe0, 0x68, 0xc0, 0x19, 0xed, 0x19, 0xe0, 0x60, 0x02, 0x98,
+0xc0, 0x19, 0x02, 0x90, 0x00, 0x27, 0x00, 0x2f, 0xc2, 0xd8, 0x02, 0x98,
+0x04, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x40, 0xab, 0x20, 0x40,
+0xf0, 0xb5, 0x83, 0xb0, 0x17, 0x1c, 0x0d, 0x1c, 0x00, 0x21, 0x01, 0x91,
+0x42, 0x00, 0x12, 0x18, 0xd2, 0x00, 0x02, 0x92, 0x30, 0x49, 0x8a, 0x58,
+0x12, 0x06, 0x12, 0x0e, 0x00, 0x24, 0xe4, 0x43, 0x04, 0x28, 0x01, 0xda,
+0x50, 0x09, 0x01, 0xd2, 0x20, 0x1c, 0x51, 0xe0, 0x02, 0x9a, 0x54, 0x18,
+0xe0, 0x68, 0xc2, 0x19, 0x60, 0x69, 0x82, 0x42, 0x01, 0xd9, 0x22, 0x69,
+0x87, 0x1a, 0x00, 0x2f, 0x45, 0xd9, 0x25, 0x4e, 0xa0, 0x68, 0xe1, 0x68,
+0x40, 0x18, 0xff, 0x21, 0x09, 0x31, 0xff, 0xf7, 0xa7, 0xfd, 0x61, 0x68,
+0x40, 0x18, 0x00, 0x90, 0xa0, 0x68, 0xe1, 0x68, 0x40, 0x18, 0xff, 0x21,
+0x09, 0x31, 0xff, 0xf7, 0xa6, 0xfd, 0x02, 0x9a, 0xb1, 0x58, 0x01, 0x23,
+0x5b, 0x04, 0x19, 0x43, 0xb1, 0x50, 0xc1, 0x19, 0xff, 0x22, 0x09, 0x32,
+0x91, 0x42, 0x13, 0xd9, 0x13, 0x1a, 0x01, 0x1c, 0x00, 0x98, 0x2a, 0x1c,
+0x1e, 0x1c, 0x00, 0xf0, 0xdf, 0xf8, 0xe0, 0x68, 0x80, 0x19, 0x75, 0x19,
+0xe0, 0x60, 0x21, 0x69, 0x88, 0x42, 0x00, 0xd9, 0x20, 0x61, 0xbf, 0x1b,
+0x01, 0x98, 0x30, 0x18, 0x01, 0x90, 0x12, 0xe0, 0x01, 0x1c, 0x00, 0x9e,
+0x30, 0x1c, 0x2a, 0x1c, 0x3b, 0x1c, 0x00, 0xf0, 0xcb, 0xf8, 0xe0, 0x68,
+0xc0, 0x19, 0xed, 0x19, 0xe0, 0x60, 0x21, 0x69, 0x88, 0x42, 0x00, 0xd9,
+0x20, 0x61, 0x01, 0x98, 0xc0, 0x19, 0x01, 0x90, 0x00, 0x27, 0x00, 0x2f,
+0xb9, 0xd8, 0x01, 0x98, 0x03, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x40, 0xab, 0x20, 0x40, 0xb0, 0xb5, 0xc3, 0xb0,
+0x0c, 0x1c, 0x00, 0x27, 0xfa, 0x43, 0x04, 0x28, 0x06, 0xda, 0x41, 0x00,
+0x09, 0x18, 0xc9, 0x00, 0x14, 0x48, 0x45, 0x58, 0x6b, 0x0c, 0x04, 0xd2,
+0x10, 0x1c, 0x43, 0xb0, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x62, 0x09,
+0x1b, 0xd3, 0x00, 0x22, 0x00, 0x92, 0x08, 0x18, 0x40, 0x68, 0x0c, 0x23,
+0x00, 0x21, 0x01, 0xaa, 0x00, 0xf0, 0x30, 0xf8, 0x11, 0x2c, 0x0d, 0xd0,
+0x12, 0x2c, 0x0d, 0xd0, 0x13, 0x2c, 0x05, 0xd0, 0x14, 0x2c, 0x0a, 0xd1,
+0x03, 0x98, 0x00, 0x04, 0x07, 0x0e, 0x06, 0xe0, 0x03, 0x98, 0x07, 0x06,
+0x3f, 0x0e, 0x02, 0xe0, 0x01, 0x9f, 0x00, 0xe0, 0x02, 0x9f, 0x38, 0x1c,
+0xdb, 0xe7, 0x00, 0x00, 0x40, 0xab, 0x20, 0x40, 0x03, 0x49, 0x00, 0x20,
+0x00, 0x22, 0x0a, 0x54, 0x01, 0x30, 0x60, 0x28, 0xfb, 0xd3, 0x70, 0x47,
+0x40, 0xab, 0x20, 0x40, 0x00, 0xb5, 0x02, 0xf0, 0x6f, 0xfa, 0x57, 0x20,
+0x02, 0xf0, 0xcc, 0xf9, 0x02, 0xf0, 0x40, 0xf9, 0x00, 0x0a, 0xfb, 0xd3,
+0x02, 0xf0, 0x4e, 0xfa, 0x08, 0xbc, 0x18, 0x47, 0xf0, 0xb5, 0x82, 0xb0,
+0x07, 0x9d, 0x14, 0x1c, 0x1f, 0x1c, 0x30, 0x4a, 0xd2, 0x6f, 0x20, 0x23,
+0x16, 0x68, 0x9e, 0x43, 0x16, 0x60, 0x33, 0x1c, 0xff, 0x22, 0x01, 0x32,
+0x2a, 0x40, 0x40, 0x02, 0x08, 0x43, 0x05, 0x0a, 0x06, 0x1c, 0x00, 0x0c,
+0x01, 0x90, 0x00, 0x2a, 0x20, 0xd0, 0x02, 0xf0, 0x4b, 0xfa, 0x53, 0x20,
+0x02, 0xf0, 0xa8, 0xf9, 0x01, 0x98, 0xc0, 0x46, 0x00, 0x90, 0x02, 0xf0,
+0xa3, 0xf9, 0x28, 0x1c, 0x02, 0xf0, 0xa0, 0xf9, 0x30, 0x1c, 0x02, 0xf0,
+0x9d, 0xf9, 0x02, 0xf0, 0x23, 0xfa, 0xff, 0xf7, 0xc7, 0xff, 0x02, 0xf0,
+0x37, 0xfa, 0x54, 0x20, 0x02, 0xf0, 0x94, 0xf9, 0x00, 0x98, 0x02, 0xf0,
+0x91, 0xf9, 0x28, 0x1c, 0x02, 0xf0, 0x8e, 0xf9, 0x30, 0x1c, 0x14, 0xe0,
+0x02, 0xf0, 0x2a, 0xfa, 0x52, 0x20, 0x02, 0xf0, 0x87, 0xf9, 0x01, 0x98,
+0x02, 0xf0, 0x84, 0xf9, 0x28, 0x1c, 0x02, 0xf0, 0x81, 0xf9, 0x30, 0x1c,
+0x02, 0xf0, 0x7e, 0xf9, 0x00, 0x20, 0x02, 0xf0, 0x7b, 0xf9, 0x00, 0x20,
+0x02, 0xf0, 0x78, 0xf9, 0x00, 0x20, 0x02, 0xf0, 0x75, 0xf9, 0x00, 0x20,
+0x02, 0xf0, 0x72, 0xf9, 0x00, 0x2f, 0x05, 0xd9, 0x02, 0xf0, 0xe4, 0xf8,
+0x20, 0x70, 0x01, 0x34, 0x01, 0x3f, 0xf9, 0xd1, 0x02, 0xf0, 0xf0, 0xf9,
+0x04, 0x4a, 0xd0, 0x6f, 0x20, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60,
+0x02, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0xf0, 0xb5, 0x82, 0xb0, 0x14, 0x1c, 0x1f, 0x1c, 0x42, 0x02, 0x0a, 0x43,
+0x15, 0x1c, 0x01, 0x28, 0x54, 0xd0, 0x2c, 0x49, 0xc8, 0x6f, 0x20, 0x23,
+0x02, 0x68, 0x9a, 0x43, 0x02, 0x60, 0xc8, 0x6f, 0x40, 0x23, 0x01, 0x68,
+0x19, 0x43, 0x01, 0x60, 0x02, 0xf0, 0xe6, 0xf9, 0x53, 0x20, 0x02, 0xf0,
+0x43, 0xf9, 0x28, 0x0c, 0x06, 0x1c, 0x02, 0xf0, 0x3f, 0xf9, 0x28, 0x0a,
+0x01, 0x90, 0x00, 0x90, 0x02, 0xf0, 0x3a, 0xf9, 0x28, 0x1c, 0x02, 0xf0,
+0x37, 0xf9, 0x02, 0xf0, 0xbd, 0xf9, 0xff, 0xf7, 0x61, 0xff, 0x02, 0xf0,
+0xd1, 0xf9, 0x84, 0x20, 0x02, 0xf0, 0x2e, 0xf9, 0x30, 0x1c, 0x02, 0xf0,
+0x2b, 0xf9, 0x00, 0x98, 0x02, 0xf0, 0x28, 0xf9, 0x28, 0x1c, 0x02, 0xf0,
+0x25, 0xf9, 0x00, 0x2f, 0x05, 0xd9, 0x20, 0x78, 0x01, 0x34, 0x02, 0xf0,
+0x1f, 0xf9, 0x01, 0x3f, 0xf9, 0xd1, 0x02, 0xf0, 0xa3, 0xf9, 0x02, 0xf0,
+0xb9, 0xf9, 0x83, 0x20, 0x02, 0xf0, 0x16, 0xf9, 0x30, 0x1c, 0x02, 0xf0,
+0x13, 0xf9, 0x01, 0x98, 0x02, 0xf0, 0x10, 0xf9,
+0x28, 0x1c, 0x02, 0xf0, 0x0d, 0xf9, 0x02, 0xf0, 0x93, 0xf9, 0xff, 0xf7,
+0x37, 0xff, 0x07, 0x49, 0xc8, 0x6f, 0x40, 0x23, 0x02, 0x68, 0x9a, 0x43,
+0x02, 0x60, 0xc8, 0x6f, 0x20, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60,
+0x02, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0x70, 0x47, 0x00, 0x00, 0x80, 0xb5, 0x01, 0xf0, 0x8f, 0xf8, 0x06, 0x4f,
+0xc0, 0x46, 0xf8, 0x60, 0x01, 0xf0, 0xf2, 0xf8, 0x78, 0x80, 0x01, 0xf0,
+0xb1, 0xf8, 0x38, 0x71, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0xb5, 0x01, 0xf0, 0x05, 0xf9, 0x02, 0x49,
+0xc0, 0x46, 0x08, 0x80, 0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0x0b, 0x48, 0xc1, 0x68, 0x01, 0x29, 0x11, 0xd1, 0xc1, 0x6f, 0x02, 0x23,
+0x0a, 0x68, 0x1a, 0x43, 0x0a, 0x60, 0xc1, 0x6f, 0x80, 0x23, 0x0a, 0x68,
+0x1a, 0x43, 0x0a, 0x60, 0xc1, 0x18, 0x08, 0x68, 0x82, 0x23, 0x02, 0x68,
+0x1a, 0x43, 0x02, 0x60, 0x00, 0x20, 0x08, 0x81, 0x70, 0x47, 0x00, 0x00,
+0x68, 0x0e, 0x00, 0x80, 0xf0, 0xb4, 0x4a, 0x49, 0xca, 0x1d, 0x9d, 0x32,
+0x00, 0x20, 0x00, 0x27, 0x83, 0x00, 0xd7, 0x50, 0x01, 0x30, 0x17, 0x28,
+0xfa, 0xd3, 0x46, 0x4c, 0x00, 0x20, 0x82, 0x00, 0xa7, 0x50, 0x01, 0x30,
+0x20, 0x28, 0xfa, 0xd3, 0x43, 0x4a, 0x00, 0x20, 0x83, 0x00, 0xd7, 0x50,
+0x01, 0x30, 0x20, 0x28, 0xfa, 0xd3, 0xa7, 0x61, 0x97, 0x61, 0x4f, 0x65,
+0x8f, 0x65, 0x3f, 0x4d, 0xc0, 0x46, 0x2f, 0x60, 0x6f, 0x60, 0xaf, 0x60,
+0xaf, 0x61, 0xef, 0x60, 0x2f, 0x61, 0x6f, 0x61, 0x00, 0x20, 0xc1, 0x00,
+0x09, 0x18, 0x49, 0x01, 0x35, 0x4b, 0xc9, 0x18, 0x86, 0x00, 0xcb, 0x1d,
+0xf9, 0x33, 0x34, 0x4c, 0x34, 0x19, 0xe3, 0x63, 0x11, 0x23, 0x5b, 0x01,
+0xcb, 0x18, 0x63, 0x63, 0x0d, 0x23, 0x9b, 0x01, 0xcb, 0x18, 0xb4, 0x18,
+0xe3, 0x63, 0x23, 0x23, 0x5b, 0x01, 0xc9, 0x18, 0x61, 0x63, 0x01, 0x30,
+0x02, 0x28, 0xe4, 0xdb, 0x29, 0x48, 0xc1, 0x1d, 0xf9, 0x31, 0x29, 0x4c,
+0xc0, 0x46, 0xa1, 0x62, 0x61, 0x6b, 0x0d, 0x23, 0x9b, 0x01, 0xe1, 0x62,
+0xc1, 0x18, 0x91, 0x62, 0x51, 0x6b, 0xc0, 0x46, 0xd1, 0x62, 0x08, 0x21,
+0xe1, 0x64, 0x25, 0x49, 0xc0, 0x46, 0x21, 0x65, 0x24, 0x49, 0x0b, 0x69,
+0xc0, 0x46, 0x63, 0x65, 0xc3, 0x1d, 0x4d, 0x33, 0xe3, 0x65, 0x25, 0x66,
+0x8b, 0x68, 0xc0, 0x46, 0x63, 0x66, 0xcb, 0x68, 0xc0, 0x46, 0xa3, 0x66,
+0x1e, 0x4b, 0xc0, 0x46, 0xe3, 0x66, 0x27, 0x67, 0x0b, 0x23, 0xdb, 0x01,
+0xc3, 0x18, 0xa3, 0x67, 0x67, 0x67, 0x01, 0x26, 0xe3, 0x1d, 0x69, 0x33,
+0x66, 0x61, 0xe7, 0x61, 0x1f, 0x73, 0x02, 0x23, 0xd3, 0x64, 0x17, 0x4b,
+0xc0, 0x46, 0x13, 0x65, 0xcb, 0x69, 0xc0, 0x46, 0x53, 0x65, 0xc3, 0x1d,
+0x51, 0x33, 0xd3, 0x65, 0x2b, 0x1d, 0x13, 0x66, 0x4b, 0x69, 0xc0, 0x46,
+0x53, 0x66, 0x89, 0x69, 0xc0, 0x46, 0x91, 0x66, 0x0f, 0x49, 0xc0, 0x46,
+0xd1, 0x66, 0x16, 0x67, 0x0f, 0x23, 0xdb, 0x01, 0xc0, 0x18, 0x90, 0x67,
+0x56, 0x67, 0xd7, 0x61, 0xd0, 0x1d, 0x69, 0x30, 0x56, 0x61, 0x07, 0x73,
+0xf0, 0xbc, 0x70, 0x47, 0x68, 0x0e, 0x00, 0x80, 0xe4, 0x2c, 0x00, 0x80,
+0x64, 0x2d, 0x00, 0x80, 0x90, 0xee, 0x20, 0x40, 0x30, 0x01, 0x18, 0x00,
+0x7c, 0x29, 0x00, 0x80, 0x00, 0x55, 0xff, 0xff, 0x38, 0x01, 0x18, 0x00,
+0x10, 0x55, 0xff, 0xff, 0x90, 0xb4, 0x00, 0x21, 0x1e, 0x4a, 0xbb, 0x23,
+0x1b, 0x01, 0xd7, 0x18, 0xf9, 0x73, 0x19, 0x23,
+0xdb, 0x01, 0xd0, 0x18, 0x01, 0x24, 0xcd, 0x23, 0x1b, 0x01, 0xd3, 0x18,
+0xc1, 0x61, 0x1c, 0x70, 0x33, 0x23, 0x9b, 0x01, 0xd3, 0x18, 0x99, 0x60,
+0xb9, 0x73, 0x59, 0x61, 0x2f, 0x23, 0x9b, 0x01, 0xd3, 0x18, 0x19, 0x60,
+0x13, 0x4b, 0x51, 0x27, 0xbf, 0x03, 0x03, 0x63, 0x3b, 0x60, 0x84, 0x69,
+0xe4, 0x18, 0x44, 0x63, 0x04, 0x3c, 0x7c, 0x60, 0x01, 0x24, 0xe4, 0x02,
+0x84, 0x63, 0x0e, 0x4c, 0xc0, 0x46, 0xbc, 0x60, 0x04, 0x6b, 0xc0, 0x46,
+0x44, 0x62, 0x84, 0x69, 0xe4, 0x18, 0x0b, 0x4b, 0xe3, 0x18, 0xfb, 0x60,
+0x03, 0x6b, 0xc0, 0x46, 0x83, 0x62, 0x43, 0x6a, 0xc0, 0x46, 0x03, 0x62,
+0xc1, 0x63, 0x51, 0x64, 0x91, 0x64, 0xd1, 0x65, 0xd1, 0x66, 0x90, 0xbc,
+0x70, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80, 0x00, 0x00, 0x20, 0x40,
+0xfc, 0x07, 0x00, 0x00, 0xfc, 0xf7, 0xff, 0xff, 0x90, 0xb4, 0x00, 0x22,
+0x1b, 0x49, 0xc9, 0x23, 0x1b, 0x01, 0xc8, 0x18, 0x02, 0x71, 0x01, 0x20,
+0xbb, 0x23, 0x1b, 0x01, 0xcb, 0x18, 0x58, 0x73, 0x17, 0x48, 0x03, 0x1c,
+0x00, 0x27, 0xdc, 0x1d, 0xc1, 0x34, 0x1c, 0x65, 0x23, 0x1c, 0x01, 0x37,
+0x3f, 0x2f, 0xf8, 0xd3, 0x1a, 0x65, 0x19, 0x23, 0xdb, 0x01, 0xcf, 0x18,
+0x33, 0x23, 0x9b, 0x01, 0xcb, 0x18, 0x3a, 0x61, 0x98, 0x61, 0x40, 0x20,
+0xf8, 0x60, 0xda, 0x61, 0x1a, 0x62, 0xca, 0x64, 0x0a, 0x66, 0x0c, 0x48,
+0xc0, 0x46, 0xc2, 0x60, 0x0b, 0x48, 0x00, 0x6b, 0xc0, 0x06, 0xc0, 0x0e,
+0xf8, 0x63, 0x0a, 0x48, 0x01, 0x68, 0xc0, 0x46, 0x19, 0x80, 0x41, 0x68,
+0xc0, 0x46, 0x59, 0x80, 0x80, 0x68, 0xc0, 0x46, 0x98, 0x80, 0x90, 0xbc,
+0x70, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80, 0x90, 0xbc, 0x20, 0x40,
+0x90, 0xee, 0x20, 0x40, 0x80, 0x00, 0x14, 0x40, 0x40, 0x00, 0x14, 0x40,
+0x00, 0x20, 0x0a, 0x49, 0xc0, 0x46, 0x08, 0x73, 0xcb, 0x1d, 0xff, 0x33,
+0x3a, 0x33, 0x88, 0x61, 0xc8, 0x61, 0x18, 0x70, 0x06, 0x4a, 0xc0, 0x46,
+0x10, 0x65, 0x50, 0x66, 0x90, 0x66, 0x08, 0x70, 0x58, 0x70, 0xbb, 0x23,
+0x1b, 0x01, 0xd1, 0x18, 0x08, 0x73, 0x70, 0x47, 0x28, 0x05, 0x00, 0x80,
+0x68, 0x0e, 0x00, 0x80, 0xf0, 0xb4, 0x2f, 0x49, 0x2f, 0x4a, 0xc0, 0x46,
+0x11, 0x61, 0x01, 0x23, 0x9b, 0x02, 0xc8, 0x18, 0x50, 0x61, 0x2d, 0x48,
+0xc0, 0x46, 0x10, 0x62, 0xdb, 0x00, 0xc3, 0x18, 0x53, 0x62, 0x00, 0x23,
+0x13, 0x63, 0x53, 0x63, 0x29, 0x4a, 0x2a, 0x4f, 0xd4, 0x1d, 0xff, 0x34,
+0xfa, 0x34, 0x14, 0xc7, 0x08, 0x3f, 0x3b, 0x61, 0x1c, 0x1f, 0x7c, 0x61,
+0x26, 0x4f, 0xc0, 0x46, 0x39, 0x60, 0xb8, 0x61, 0x79, 0x61, 0xf8, 0x62,
+0x3b, 0x63, 0x7b, 0x64, 0xba, 0x64, 0xfa, 0x65, 0x22, 0x4f, 0xfe, 0x1d,
+0xf9, 0x36, 0x22, 0x4d, 0xec, 0x1d, 0x79, 0x34, 0x26, 0x62, 0x51, 0x26,
+0xb6, 0x03, 0x37, 0x61, 0x24, 0x6a, 0xc0, 0x46, 0x74, 0x61, 0x2f, 0x67,
+0x1d, 0x4d, 0x09, 0x27, 0x7f, 0x04, 0xec, 0x1d, 0x75, 0x34, 0x7c, 0x60,
+0x3d, 0x60, 0x1b, 0x4c, 0xc0, 0x46, 0x3c, 0x61, 0xe6, 0x1d, 0x75, 0x36,
+0x7e, 0x61, 0x19, 0x4f, 0xc0, 0x46, 0x7c, 0x60, 0x3d, 0x60, 0x0f, 0x1c,
+0x00, 0x21, 0xff, 0x24, 0x01, 0x34, 0x1d, 0x1c, 0x8b, 0x00, 0xfd, 0x50,
+0x01, 0x31, 0xa1, 0x42, 0xfa, 0xd3, 0x01, 0x1c, 0x00, 0x20, 0x01, 0x27,
+0xff, 0x02, 0x83, 0x00, 0xcd, 0x50, 0x01, 0x30, 0xb8, 0x42, 0xfa, 0xd3,
+0x00, 0x20, 0x81, 0x00, 0x55, 0x50, 0x01, 0x30, 0x80, 0x28, 0xfa, 0xd3,
+0xf0, 0xbc, 0x70, 0x47, 0x24, 0xa3, 0x20, 0x40,
+0x40, 0x01, 0x18, 0x00, 0x24, 0x83, 0x20, 0x40, 0x24, 0xa9, 0x20, 0x40,
+0x80, 0x01, 0x18, 0x00, 0xa8, 0x03, 0x00, 0x80, 0x24, 0xa7, 0x20, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0x24, 0xa8, 0x20, 0x40, 0xa4, 0xa8, 0x20, 0x40,
+0x08, 0x04, 0x00, 0x80, 0xb8, 0xb5, 0x2c, 0x48, 0xfd, 0xf7, 0xba, 0xfd,
+0x01, 0x20, 0x2b, 0x49, 0x0a, 0x68, 0x52, 0x0c, 0x06, 0xd2, 0x0a, 0x68,
+0x12, 0x0c, 0x02, 0xd1, 0x0a, 0x68, 0x92, 0x0a, 0x00, 0xd2, 0x00, 0x20,
+0x04, 0x06, 0x24, 0x0e, 0x25, 0x4a, 0xd7, 0x1d, 0x0d, 0x37, 0x00, 0x23,
+0x00, 0x20, 0x9d, 0x00, 0x78, 0x51, 0x01, 0x33, 0x04, 0x2b, 0xfa, 0xd3,
+0x01, 0x27, 0x3f, 0x05, 0x50, 0x61, 0xf8, 0x60, 0xd0, 0x61, 0xf8, 0x61,
+0x00, 0x23, 0xdb, 0x43, 0x93, 0x61, 0x3b, 0x61, 0x13, 0x62, 0x3b, 0x62,
+0x00, 0x27, 0x1b, 0x4b, 0x8d, 0x68, 0xc0, 0x46, 0x00, 0x95, 0x8d, 0x69,
+0xc0, 0x46, 0x00, 0x95, 0x00, 0x2c, 0x0b, 0xd0, 0xdd, 0x6b, 0xc0, 0x46,
+0x00, 0x95, 0x9d, 0x6b, 0xc0, 0x46, 0x00, 0x95, 0x5d, 0x6b, 0xc0, 0x46,
+0x00, 0x95, 0x1d, 0x6b, 0xc0, 0x46, 0x00, 0x95, 0x01, 0x37, 0x40, 0x2f,
+0xe8, 0xd3, 0x00, 0x27, 0x6c, 0x46, 0x01, 0x23, 0x5b, 0x07, 0x1c, 0x43,
+0x01, 0xe0, 0x20, 0x60, 0x01, 0x37, 0x0d, 0x68, 0x2b, 0x09, 0x02, 0xd2,
+0x80, 0x2f, 0xf8, 0xd3, 0x01, 0xe0, 0x80, 0x2f, 0x03, 0xd3, 0x08, 0x49,
+0x4b, 0x6e, 0x01, 0x33, 0x4b, 0x66, 0xd0, 0x62, 0xb8, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0xf4, 0x01, 0xff, 0xff, 0x00, 0x00, 0x10, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0x01, 0x18, 0x40, 0xa0, 0x82, 0x20, 0x40,
+0x90, 0xb4, 0x00, 0x21, 0x0e, 0x4f, 0x0f, 0x4a, 0x00, 0x20, 0x4c, 0x01,
+0x64, 0x1a, 0xa4, 0x00, 0xa3, 0x18, 0x58, 0x60, 0x98, 0x60, 0x18, 0x64,
+0x58, 0x64, 0x10, 0x53, 0x58, 0x80, 0xcc, 0x00, 0xe4, 0x19, 0x98, 0x67,
+0xdc, 0x62, 0x01, 0x31, 0x03, 0x29, 0xee, 0xd3, 0x06, 0x49, 0xc0, 0x46,
+0x08, 0x60, 0x48, 0x60, 0x88, 0x60, 0xc8, 0x60, 0x08, 0x61, 0x90, 0xbc,
+0x70, 0x47, 0x00, 0x00, 0xac, 0x66, 0x21, 0x40, 0x5c, 0x2b, 0x00, 0x80,
+0xd0, 0x2c, 0x00, 0x80, 0x64, 0x21, 0x05, 0x48, 0xc0, 0x46, 0x01, 0x63,
+0x00, 0x21, 0xc9, 0x43, 0x41, 0x63, 0x81, 0x63, 0x00, 0x21, 0xc1, 0x63,
+0x01, 0x64, 0x70, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x80, 0xb4, 0x01, 0x20,
+0x40, 0x02, 0x0a, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x3c, 0x20, 0x48, 0x60,
+0x88, 0x60, 0x08, 0x48, 0xc0, 0x46, 0xc8, 0x60, 0x00, 0x20, 0x07, 0x4a,
+0x87, 0x00, 0xcb, 0x68, 0xc0, 0x46, 0xda, 0x51, 0x01, 0x30, 0x10, 0x28,
+0xf8, 0xd3, 0x80, 0xbc, 0x70, 0x47, 0x00, 0x00, 0xe4, 0x2d, 0x00, 0x80,
+0xf4, 0x2d, 0x00, 0x80, 0x5d, 0x4c, 0xff, 0xff, 0x12, 0x49, 0x13, 0x48,
+0x67, 0x23, 0x9b, 0x01, 0xca, 0x18, 0x06, 0xc0, 0x08, 0x38, 0x11, 0x4b,
+0xca, 0x18, 0xc1, 0x60, 0x82, 0x60, 0x01, 0x61, 0x0f, 0x49, 0x10, 0x48,
+0xa7, 0x23, 0x9b, 0x01, 0xca, 0x18, 0x06, 0xc0, 0x08, 0x38, 0x0e, 0x4b,
+0xca, 0x18, 0xc1, 0x60, 0x82, 0x60, 0x01, 0x61, 0x0c, 0x48, 0x0d, 0x49,
+0x67, 0x23, 0x9b, 0x01, 0xc2, 0x18, 0x05, 0xc1, 0x08, 0x39, 0x05, 0x4b,
+0xc2, 0x18, 0xc8, 0x60, 0x8a, 0x60, 0x08, 0x61, 0x70, 0x47, 0x00, 0x00,
+0xac, 0x1e, 0x21, 0x40, 0x48, 0x2e, 0x00, 0x80, 0xfc, 0x1f, 0x00, 0x00,
+0xac, 0xee, 0x20, 0x40, 0x34, 0x2e, 0x00, 0x80, 0xfc, 0x2f, 0x00, 0x00,
+0xac, 0x3e, 0x21, 0x40, 0x5c, 0x2e, 0x00, 0x80,
+0x90, 0xb4, 0x00, 0x21, 0x40, 0x4c, 0x00, 0x20, 0x0a, 0x01, 0x12, 0x19,
+0x19, 0x23, 0xdb, 0x01, 0xd2, 0x18, 0xd0, 0x62, 0x10, 0x63, 0x50, 0x63,
+0x90, 0x63, 0x01, 0x31, 0x03, 0x29, 0xf3, 0xd3, 0x3a, 0x49, 0xc0, 0x46,
+0x08, 0x63, 0x48, 0x63, 0x88, 0x63, 0x20, 0x60, 0x01, 0x21, 0xe3, 0x1d,
+0x59, 0x33, 0x60, 0x60, 0x19, 0x71, 0x18, 0x72, 0x98, 0x71, 0x98, 0x72,
+0x59, 0x71, 0x58, 0x72, 0xd8, 0x71, 0xd8, 0x72, 0xe2, 0x1d, 0x49, 0x32,
+0x11, 0x73, 0x19, 0x70, 0x90, 0x73, 0x98, 0x70, 0x51, 0x73, 0x59, 0x70,
+0xd0, 0x73, 0xd8, 0x70, 0x11, 0x71, 0x11, 0x72, 0x90, 0x71, 0x90, 0x72,
+0x50, 0x71, 0x50, 0x72, 0xd0, 0x71, 0xd0, 0x72, 0x18, 0x73, 0x02, 0x22,
+0xe7, 0x1d, 0x69, 0x37, 0x3a, 0x70, 0x99, 0x73, 0xba, 0x70, 0x58, 0x73,
+0x78, 0x70, 0xd8, 0x73, 0xf8, 0x70, 0x39, 0x71, 0x3a, 0x72, 0xb9, 0x71,
+0xb9, 0x72, 0x78, 0x71, 0x7a, 0x72, 0xf9, 0x71, 0xf9, 0x72, 0x39, 0x73,
+0xe3, 0x1d, 0x79, 0x33, 0x1a, 0x70, 0xb9, 0x73, 0x99, 0x70, 0x78, 0x73,
+0x5a, 0x70, 0xf9, 0x73, 0xd9, 0x70, 0x1a, 0x71, 0x1a, 0x72, 0x99, 0x71,
+0x9a, 0x72, 0x58, 0x71, 0x5a, 0x72, 0xd9, 0x71, 0xda, 0x72, 0x19, 0x73,
+0xe7, 0x1d, 0x89, 0x37, 0x3a, 0x70, 0x99, 0x73, 0xb9, 0x70, 0x58, 0x73,
+0x7a, 0x70, 0xd9, 0x73, 0xf9, 0x70, 0x39, 0x71, 0x3a, 0x72, 0xb9, 0x71,
+0xb9, 0x72, 0x78, 0x71, 0x7a, 0x72, 0xf9, 0x71, 0xf9, 0x72, 0x3a, 0x73,
+0xe3, 0x1d, 0x99, 0x33, 0x1a, 0x70, 0xb9, 0x73, 0x9a, 0x70, 0x78, 0x73,
+0x5a, 0x70, 0xf9, 0x73, 0xda, 0x70, 0x19, 0x71, 0x1a, 0x72, 0x99, 0x71,
+0x99, 0x72, 0x58, 0x71, 0x5a, 0x72, 0xd9, 0x71, 0xd9, 0x72, 0x20, 0x61,
+0xe0, 0x60, 0x60, 0x61, 0xa0, 0x60, 0x90, 0xbc, 0x70, 0x47, 0x00, 0x00,
+0xa0, 0x1c, 0x00, 0x80, 0xe8, 0x19, 0x00, 0x80, 0x81, 0x20, 0x00, 0x02,
+0x01, 0x49, 0xc0, 0x46, 0x88, 0x62, 0x70, 0x47, 0xc0, 0x00, 0x14, 0x00,
+0x09, 0x49, 0x0a, 0x4b, 0xc8, 0x18, 0x04, 0x3b, 0xc9, 0x18, 0x08, 0x60,
+0x00, 0x21, 0xc2, 0x1d, 0x29, 0x32, 0xc2, 0x61, 0x10, 0x1c, 0x01, 0x31,
+0x08, 0x29, 0xf8, 0xd3, 0xc1, 0x1f, 0x29, 0x39, 0x00, 0x20, 0xc8, 0x61,
+0x70, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80, 0x84, 0x09, 0x00, 0x00,
+0x06, 0x48, 0x07, 0x49, 0xc0, 0x46, 0x08, 0x80, 0x48, 0x80, 0x00, 0x20,
+0x88, 0x80, 0xc8, 0x80, 0x88, 0x60, 0x04, 0x49, 0xc0, 0x46, 0x48, 0x61,
+0x88, 0x61, 0x70, 0x47, 0xff, 0xff, 0x00, 0x00, 0x4c, 0x2a, 0x00, 0x80,
+0x6c, 0x06, 0x00, 0x80, 0x00, 0x21, 0x06, 0x48, 0xc2, 0x1d, 0x19, 0x32,
+0xc1, 0x60, 0x01, 0x61, 0xc1, 0x61, 0x01, 0x62, 0x11, 0x71, 0xff, 0x30,
+0x01, 0x30, 0x41, 0x62, 0x70, 0x47, 0x00, 0x00, 0x6c, 0x06, 0x00, 0x80,
+0x09, 0x48, 0x0a, 0x4b, 0xc0, 0x46, 0x18, 0x60, 0x00, 0x21, 0xc2, 0x1d,
+0x4d, 0x32, 0xc2, 0x60, 0x10, 0x1c, 0x01, 0x31, 0x14, 0x29, 0xf8, 0xd3,
+0xc1, 0x1f, 0x4d, 0x39, 0x00, 0x20, 0xc8, 0x60, 0x58, 0x60, 0x98, 0x60,
+0x70, 0x47, 0x00, 0x00, 0xd8, 0x07, 0x00, 0x80, 0x6c, 0x06, 0x00, 0x80,
+0x00, 0xb5, 0x0b, 0x49, 0x0b, 0x48, 0xfd, 0xf7, 0xea, 0xfb, 0x0b, 0x48,
+0x00, 0x6a, 0x01, 0x23, 0xdb, 0x03, 0x98, 0x43, 0x09, 0x49, 0xc0, 0x46,
+0x08, 0x62, 0x09, 0x48, 0xc1, 0x68, 0x01, 0x29, 0x04, 0xd1, 0xc0, 0x6f,
+0x80, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0x08, 0xbc, 0x18, 0x47,
+0xc1, 0xbd, 0x21, 0x40, 0x75, 0x98, 0x21, 0x40,
+0xc0, 0x00, 0x18, 0x40, 0xc0, 0x00, 0x18, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x00, 0xb5, 0x0f, 0x48, 0xc1, 0x68, 0x01, 0x29, 0x04, 0xd1, 0xc0, 0x6f,
+0x80, 0x23, 0x01, 0x68, 0x99, 0x43, 0x01, 0x60, 0x0b, 0x4b, 0x0c, 0x48,
+0x0c, 0x4a, 0x00, 0x21, 0xfd, 0xf7, 0xbf, 0xfb, 0x0b, 0x48, 0x41, 0x8d,
+0x01, 0x31, 0x41, 0x85, 0x00, 0x21, 0xc1, 0x85, 0x09, 0x48, 0x00, 0x6a,
+0x01, 0x23, 0xdb, 0x03, 0x18, 0x43, 0x08, 0x49, 0xc0, 0x46, 0x08, 0x62,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x59, 0xbd, 0x21, 0x40,
+0x75, 0x98, 0x21, 0x40, 0xb8, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+0xc0, 0x00, 0x18, 0x40, 0xc0, 0x00, 0x18, 0x00, 0xf0, 0xb5, 0x1b, 0x4c,
+0x10, 0x26, 0xe0, 0x68, 0x01, 0x28, 0x08, 0xd1, 0x60, 0x88, 0x00, 0x28,
+0x05, 0xd1, 0x20, 0x79, 0x00, 0x28, 0x02, 0xd1, 0x19, 0x20, 0xa0, 0x67,
+0x00, 0xe0, 0xa6, 0x67, 0x00, 0x20, 0x07, 0x23, 0x5b, 0x02, 0xe5, 0x18,
+0xc1, 0x43, 0xe8, 0x61, 0x69, 0x62, 0x59, 0x08, 0xa1, 0x27, 0x7f, 0x03,
+0x79, 0x60, 0x0f, 0x21, 0x79, 0x60, 0xe1, 0x1d, 0xb9, 0x31, 0x08, 0x71,
+0x01, 0x20, 0xb8, 0x60, 0x40, 0x02, 0xb8, 0x60, 0x00, 0xf0, 0x4c, 0xfa,
+0x00, 0xf0, 0xf0, 0xfa, 0x04, 0x20, 0xb8, 0x60, 0x07, 0x20, 0x78, 0x61,
+0x7e, 0x60, 0x1b, 0x23, 0xdb, 0x01, 0xe0, 0x18, 0xc0, 0x8b, 0x04, 0x23,
+0x18, 0x40, 0xe8, 0x62, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x68, 0x0e, 0x00, 0x80, 0x90, 0xb4, 0x02, 0x1c, 0x00, 0x20, 0xff, 0x23,
+0x01, 0x33, 0x9a, 0x42, 0x08, 0xd0, 0x01, 0x29, 0x00, 0xd1, 0x01, 0x20,
+0x00, 0x2a, 0x01, 0xd1, 0x02, 0x23, 0x18, 0x43, 0x90, 0xbc, 0x70, 0x47,
+0x1b, 0x4a, 0xd7, 0x68, 0x1a, 0x4b, 0x19, 0x79, 0x1c, 0x1c, 0x37, 0x23,
+0x9b, 0x01, 0xe3, 0x18, 0x01, 0x2f, 0x0d, 0xd1, 0x57, 0x88, 0x00, 0x2f,
+0x0a, 0xd1, 0x00, 0x29, 0x0a, 0xd1, 0x59, 0x8b, 0x0a, 0x09, 0x00, 0xd3,
+0x02, 0x20, 0x49, 0x09, 0xe8, 0xd3, 0x01, 0x23, 0x18, 0x43, 0xe5, 0xe7,
+0x00, 0x29, 0x03, 0xd0, 0x98, 0x8a, 0x80, 0x07, 0x80, 0x0f, 0xdf, 0xe7,
+0x6d, 0x23, 0x5b, 0x01, 0xd1, 0x18, 0x8a, 0x88, 0xff, 0x27, 0x01, 0x37,
+0x17, 0x40, 0x0a, 0x49, 0xc9, 0x88, 0x03, 0xd0, 0x4b, 0x0a, 0x01, 0xd3,
+0x03, 0x20, 0xd1, 0xe7, 0x13, 0x0a, 0x03, 0xd3, 0x0b, 0x0a, 0x01, 0xd3,
+0x02, 0x20, 0xcb, 0xe7, 0xd2, 0x09, 0xc9, 0xd3, 0xc9, 0x09, 0xc7, 0xd3,
+0x01, 0x20, 0xc5, 0xe7, 0x68, 0x0e, 0x00, 0x80, 0x08, 0x1c, 0x00, 0x80,
+0xf0, 0xb5, 0xc1, 0xb0, 0x01, 0x20, 0x00, 0x07, 0x52, 0x49, 0xc0, 0x46,
+0x08, 0x60, 0x52, 0x48, 0x42, 0x69, 0x40, 0x0d, 0xa1, 0x21, 0x49, 0x03,
+0x48, 0x60, 0x50, 0x48, 0xc0, 0x6a, 0x50, 0x4b, 0x18, 0x43, 0x00, 0x21,
+0x03, 0x03, 0x1b, 0x0b, 0x4e, 0x4c, 0x27, 0x6f, 0x3d, 0x03, 0x2d, 0x0b,
+0xe7, 0x1d, 0x79, 0x37, 0xab, 0x42, 0x1c, 0xd0, 0xe3, 0x1d, 0x79, 0x33,
+0x1b, 0x6a, 0xc0, 0x46, 0x40, 0x93, 0x01, 0x23, 0x9b, 0x07, 0x03, 0x43,
+0x1b, 0x68, 0xcc, 0x00, 0x6e, 0x46, 0x33, 0x51, 0x01, 0x23, 0x9b, 0x07,
+0x06, 0x1d, 0x33, 0x43, 0x1b, 0x68, 0x6c, 0x44, 0x63, 0x60, 0x08, 0x30,
+0x01, 0x31, 0x40, 0x9b, 0x83, 0x42, 0x00, 0xd8, 0x3f, 0x48, 0x03, 0x03,
+0x1b, 0x0b, 0xab, 0x42, 0xe7, 0xd1, 0x00, 0x20, 0x01, 0x23, 0x1b, 0x03,
+0x13, 0x40, 0x3c, 0x4c, 0x03, 0xd0, 0x63, 0x6a, 0x01, 0x33, 0x63, 0x62,
+0x09, 0xe0, 0x13, 0x0b, 0x03, 0xd3, 0x23, 0x6a,
+0x01, 0x33, 0x23, 0x62, 0x03, 0xe0, 0x37, 0x4b, 0x5c, 0x6d, 0x01, 0x34,
+0x5c, 0x65, 0x00, 0x29, 0x09, 0xd0, 0x03, 0x1c, 0xdc, 0x00, 0x23, 0x1c,
+0x6b, 0x44, 0x5c, 0x68, 0x01, 0x30, 0x23, 0x0d, 0x01, 0xd2, 0x88, 0x42,
+0xf5, 0xd1, 0x30, 0x4c, 0x25, 0x68, 0x6b, 0x0c, 0x05, 0xd2, 0x23, 0x68,
+0x1b, 0x0c, 0x08, 0xd1, 0x24, 0x68, 0xa3, 0x0a, 0x05, 0xd3, 0x20, 0x24,
+0x2b, 0x4b, 0xc0, 0x46, 0x5c, 0x62, 0x00, 0x24, 0x5c, 0x62, 0x25, 0x4b,
+0x23, 0x4c, 0x51, 0x26, 0xb6, 0x03, 0x23, 0x67, 0x33, 0x61, 0x3d, 0x6a,
+0xc0, 0x46, 0x75, 0x61, 0x02, 0x25, 0xa1, 0x26, 0x76, 0x03, 0x75, 0x60,
+0x01, 0x25, 0xb5, 0x60, 0xe6, 0x1d, 0xb9, 0x36, 0x35, 0x71, 0x88, 0x42,
+0x21, 0xd0, 0x25, 0x1c, 0xc3, 0x00, 0x6c, 0x46, 0xe4, 0x58, 0x2e, 0x6f,
+0x6b, 0x44, 0x34, 0x60, 0x5b, 0x68, 0x2c, 0x6f, 0xc0, 0x46, 0x63, 0x60,
+0x2b, 0x6f, 0x08, 0x33, 0x2b, 0x67, 0x3c, 0x6a, 0xa3, 0x42, 0x02, 0xd3,
+0x12, 0x4b, 0xc0, 0x46, 0x2b, 0x67, 0x03, 0x1c, 0xdb, 0x00, 0x6b, 0x44,
+0x5c, 0x68, 0x01, 0x30, 0x23, 0x0d, 0x04, 0xd3, 0x51, 0x24, 0xa4, 0x03,
+0x2b, 0x6f, 0xc0, 0x46, 0xa3, 0x61, 0x88, 0x42, 0xde, 0xd1, 0x10, 0x0b,
+0x03, 0xd3, 0x0e, 0x49, 0x01, 0x20, 0xfd, 0xf7, 0x74, 0xfa, 0x41, 0xb0,
+0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0,
+0x00, 0x01, 0x14, 0x40, 0x00, 0x40, 0x14, 0x40, 0x00, 0x00, 0x20, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0x24, 0xa7, 0x20, 0x40, 0xa4, 0x2a, 0x00, 0x80,
+0xa0, 0x82, 0x20, 0x40, 0x00, 0x00, 0x10, 0x40, 0xc0, 0x00, 0x18, 0x00,
+0xc9, 0x4f, 0xff, 0xff, 0xf0, 0xb4, 0x00, 0x21, 0x00, 0x23, 0x07, 0x22,
+0x06, 0x24, 0x47, 0x4f, 0xc0, 0x46, 0x3c, 0x61, 0x3a, 0x61, 0x01, 0x33,
+0x20, 0x2b, 0xf9, 0xd3, 0x04, 0x25, 0x3d, 0x61, 0x05, 0x23, 0x3b, 0x61,
+0x3c, 0x61, 0x3a, 0x61, 0x3c, 0x61, 0x3a, 0x61, 0x3d, 0x61, 0x3b, 0x61,
+0x3f, 0x4d, 0xab, 0x6f, 0xde, 0x08, 0x02, 0x23, 0x1e, 0x40, 0x04, 0x23,
+0x33, 0x43, 0x3b, 0x61, 0x05, 0x23, 0x33, 0x43, 0x3b, 0x61, 0xab, 0x6f,
+0x9e, 0x08, 0x02, 0x23, 0x1e, 0x40, 0x04, 0x23, 0x33, 0x43, 0x3b, 0x61,
+0x05, 0x23, 0x33, 0x43, 0x3b, 0x61, 0xab, 0x6f, 0x5e, 0x08, 0x02, 0x23,
+0x1e, 0x40, 0x04, 0x23, 0x33, 0x43, 0x3b, 0x61, 0x05, 0x23, 0x33, 0x43,
+0x3b, 0x61, 0x02, 0x23, 0xae, 0x6f, 0x1e, 0x40, 0x04, 0x23, 0x33, 0x43,
+0x3b, 0x61, 0x05, 0x23, 0x33, 0x43, 0x3b, 0x61, 0xab, 0x6f, 0x5d, 0x00,
+0x02, 0x23, 0x1d, 0x40, 0x04, 0x23, 0x2b, 0x43, 0x3b, 0x61, 0x05, 0x23,
+0x2b, 0x43, 0x3b, 0x61, 0xc5, 0x08, 0x02, 0x23, 0x1d, 0x40, 0x04, 0x23,
+0x2b, 0x43, 0x3b, 0x61, 0x05, 0x23, 0x2b, 0x43, 0x3b, 0x61, 0x85, 0x08,
+0x02, 0x23, 0x1d, 0x40, 0x04, 0x23, 0x2b, 0x43, 0x3b, 0x61, 0x05, 0x23,
+0x2b, 0x43, 0x3b, 0x61, 0x45, 0x08, 0x02, 0x23, 0x1d, 0x40, 0x04, 0x23,
+0x2b, 0x43, 0x3b, 0x61, 0x05, 0x23, 0x2b, 0x43, 0x3b, 0x61, 0x02, 0x25,
+0x05, 0x40, 0x04, 0x23, 0x2b, 0x43, 0x3b, 0x61, 0x05, 0x23, 0x2b, 0x43,
+0x3b, 0x61, 0x40, 0x00, 0x02, 0x23, 0x18, 0x40, 0x04, 0x23, 0x03, 0x43,
+0x3b, 0x61, 0x05, 0x23, 0x18, 0x43, 0x38, 0x61, 0x00, 0x25, 0x3d, 0x61,
+0x01, 0x23, 0x3b, 0x61, 0x3d, 0x61, 0x3b, 0x61, 0x00, 0x20, 0x3d, 0x61,
+0x0d, 0x4b, 0x1b, 0x69, 0x49, 0x00, 0x1e, 0x1c, 0x02, 0x23, 0x33, 0x40,
+0x19, 0x43, 0x01, 0x23, 0x3b, 0x61, 0x01, 0x30,
+0x10, 0x28, 0xf2, 0xd3, 0x02, 0x20, 0x38, 0x61, 0x03, 0x20, 0x38, 0x61,
+0x3c, 0x61, 0x3a, 0x61, 0x3c, 0x61, 0x3a, 0x61, 0x38, 0x61, 0x48, 0x08,
+0xf0, 0xbc, 0x70, 0x47, 0x80, 0x00, 0x14, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x80, 0x00, 0x14, 0x40, 0xf0, 0xb4, 0x00, 0x24, 0x07, 0x23, 0x06, 0x27,
+0x44, 0x4a, 0xc0, 0x46, 0x17, 0x61, 0x13, 0x61, 0x01, 0x34, 0x20, 0x2c,
+0xf9, 0xd3, 0x04, 0x26, 0x16, 0x61, 0x05, 0x24, 0x14, 0x61, 0x17, 0x61,
+0x07, 0x23, 0x13, 0x61, 0x16, 0x61, 0x14, 0x61, 0x17, 0x61, 0x13, 0x61,
+0x3c, 0x4b, 0x9b, 0x6f, 0xdd, 0x08, 0x02, 0x23, 0x1d, 0x40, 0x2b, 0x1c,
+0x33, 0x43, 0x13, 0x61, 0x25, 0x43, 0x15, 0x61, 0x37, 0x4b, 0x9b, 0x6f,
+0x9d, 0x08, 0x02, 0x23, 0x1d, 0x40, 0x2b, 0x1c, 0x33, 0x43, 0x13, 0x61,
+0x25, 0x43, 0x15, 0x61, 0x32, 0x4b, 0x9b, 0x6f, 0x5d, 0x08, 0x02, 0x23,
+0x1d, 0x40, 0x2b, 0x1c, 0x33, 0x43, 0x13, 0x61, 0x25, 0x43, 0x15, 0x61,
+0x2d, 0x4b, 0x9d, 0x6f, 0x02, 0x23, 0x1d, 0x40, 0x2b, 0x1c, 0x33, 0x43,
+0x13, 0x61, 0x25, 0x43, 0x15, 0x61, 0x29, 0x4b, 0x9b, 0x6f, 0x5d, 0x00,
+0x02, 0x23, 0x1d, 0x40, 0x2b, 0x1c, 0x33, 0x43, 0x13, 0x61, 0x25, 0x43,
+0x15, 0x61, 0xc5, 0x08, 0x02, 0x23, 0x1d, 0x40, 0x2b, 0x1c, 0x33, 0x43,
+0x13, 0x61, 0x25, 0x43, 0x15, 0x61, 0x85, 0x08, 0x02, 0x23, 0x1d, 0x40,
+0x2b, 0x1c, 0x33, 0x43, 0x13, 0x61, 0x25, 0x43, 0x15, 0x61, 0x45, 0x08,
+0x02, 0x23, 0x1d, 0x40, 0x2b, 0x1c, 0x33, 0x43, 0x13, 0x61, 0x25, 0x43,
+0x15, 0x61, 0x02, 0x25, 0x05, 0x40, 0x2b, 0x1c, 0x33, 0x43, 0x13, 0x61,
+0x25, 0x43, 0x15, 0x61, 0x40, 0x00, 0x02, 0x23, 0x18, 0x40, 0x03, 0x1c,
+0x33, 0x43, 0x13, 0x61, 0x20, 0x43, 0x10, 0x61, 0x17, 0x61, 0x07, 0x23,
+0x13, 0x61, 0x16, 0x61, 0x14, 0x61, 0x4c, 0x00, 0x00, 0x20, 0x0f, 0x21,
+0x25, 0x1c, 0xcd, 0x40, 0x02, 0x23, 0x1d, 0x40, 0x04, 0x23, 0x2b, 0x43,
+0x13, 0x61, 0x05, 0x23, 0x2b, 0x43, 0x13, 0x61, 0x01, 0x30, 0x01, 0x39,
+0x10, 0x28, 0xf1, 0xd3, 0x17, 0x61, 0x07, 0x23, 0x13, 0x61, 0x17, 0x61,
+0x13, 0x61, 0x03, 0x20, 0x10, 0x61, 0xf0, 0xbc, 0x70, 0x47, 0x00, 0x00,
+0x80, 0x00, 0x14, 0x00, 0x68, 0x0e, 0x00, 0x80, 0xf0, 0xb5, 0x4f, 0x4d,
+0x08, 0x21, 0x02, 0x20, 0x2a, 0x1c, 0xfd, 0xf7, 0x27, 0xf9, 0x4d, 0x4c,
+0x71, 0x23, 0x5b, 0x01, 0xe7, 0x18, 0x38, 0x80, 0x1a, 0x21, 0x02, 0x20,
+0x2a, 0x1c, 0xfd, 0xf7, 0x1d, 0xf9, 0x78, 0x80, 0x20, 0x79, 0x00, 0x28,
+0x0b, 0xd0, 0x00, 0x20, 0x38, 0x80, 0xe0, 0x68, 0x01, 0x28, 0x10, 0xd1,
+0x44, 0x48, 0x00, 0x68, 0x01, 0x23, 0x9b, 0x02, 0x18, 0x43, 0x99, 0x02,
+0x08, 0x60, 0xe0, 0x68, 0x01, 0x28, 0x06, 0xd1, 0x60, 0x88, 0x00, 0x28,
+0x03, 0xd1, 0xf9, 0x21, 0x12, 0x20, 0xff, 0xf7, 0x43, 0xff, 0x01, 0x21,
+0xc9, 0x03, 0x00, 0x20, 0xff, 0xf7, 0x3e, 0xff, 0x00, 0x25, 0x7d, 0x26,
+0xf6, 0x00, 0x00, 0xe0, 0x01, 0x35, 0x00, 0x20, 0xff, 0xf7, 0x9c, 0xfe,
+0x00, 0x0c, 0x01, 0xd3, 0xb5, 0x42, 0xf7, 0xd3, 0x00, 0x25, 0x05, 0xe0,
+0x03, 0x21, 0x09, 0x03, 0x00, 0x20, 0xff, 0xf7, 0x2b, 0xff, 0x01, 0x35,
+0x00, 0x20, 0xff, 0xf7, 0x8d, 0xfe, 0x40, 0x0b, 0x01, 0xd2, 0xb5, 0x42,
+0xf2, 0xd3, 0x04, 0x20, 0xff, 0xf7, 0x86, 0xfe, 0xff, 0x23, 0xe1, 0x33,
+0x98, 0x43, 0x01, 0x21, 0x01, 0x43, 0x38, 0x88, 0xff, 0x23, 0x01, 0x33,
+0x98, 0x42, 0x03, 0xd1, 0x2f, 0x23, 0x5b, 0x01,
+0x19, 0x43, 0x16, 0xe0, 0x01, 0x28, 0x09, 0xd1, 0x78, 0x88, 0x01, 0x28,
+0x03, 0xd1, 0x23, 0x23, 0x5b, 0x01, 0x19, 0x43, 0x0d, 0xe0, 0x20, 0x23,
+0x19, 0x43, 0x0a, 0xe0, 0x00, 0x28, 0x08, 0xd1, 0x78, 0x88, 0x01, 0x28,
+0x03, 0xd1, 0x0b, 0x23, 0xdb, 0x01, 0x19, 0x43, 0x01, 0xe0, 0x80, 0x23,
+0x19, 0x43, 0x04, 0x20, 0xff, 0xf7, 0xf8, 0xfe, 0x09, 0x21, 0x49, 0x02,
+0x00, 0x20, 0xff, 0xf7, 0xf3, 0xfe, 0xe0, 0x68, 0x00, 0x28, 0x0c, 0xd1,
+0x00, 0x21, 0x1b, 0x20, 0xff, 0xf7, 0xec, 0xfe, 0x1a, 0x20, 0xff, 0xf7,
+0x4f, 0xfe, 0x01, 0x21, 0xc9, 0x03, 0x01, 0x43, 0x1a, 0x20, 0xff, 0xf7,
+0xe3, 0xfe, 0x00, 0x27, 0x03, 0xe0, 0x08, 0x2f, 0x01, 0xd3, 0x0f, 0x2f,
+0x08, 0xd9, 0x38, 0x1c, 0xff, 0xf7, 0x40, 0xfe, 0x79, 0x00, 0x09, 0x19,
+0x1b, 0x23, 0xdb, 0x01, 0xc9, 0x18, 0x88, 0x83, 0x01, 0x37, 0x20, 0x2f,
+0xef, 0xd3, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xed, 0xaf, 0x21, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0x00, 0x10, 0x40, 0x81, 0xb0, 0x13, 0x48,
+0x01, 0x68, 0xc0, 0x46, 0x00, 0x91, 0x41, 0x68, 0xc0, 0x46, 0x00, 0x91,
+0x81, 0x68, 0xc0, 0x46, 0x00, 0x91, 0xc1, 0x68, 0xc0, 0x46, 0x00, 0x91,
+0x01, 0x69, 0xc0, 0x46, 0x00, 0x91, 0x41, 0x69, 0xc0, 0x46, 0x00, 0x91,
+0x81, 0x69, 0xc0, 0x46, 0x00, 0x91, 0xc1, 0x69, 0xc0, 0x46, 0x00, 0x91,
+0x01, 0x6a, 0xc0, 0x46, 0x00, 0x91, 0x41, 0x6a, 0xc0, 0x46, 0x00, 0x91,
+0x81, 0x6a, 0xc0, 0x46, 0x00, 0x91, 0xc0, 0x6a, 0xc0, 0x46, 0x00, 0x90,
+0x01, 0xb0, 0x70, 0x47, 0x00, 0x08, 0x14, 0x40, 0xf0, 0xb5, 0x83, 0xb0,
+0x68, 0x4d, 0x1b, 0x23, 0xdb, 0x01, 0xef, 0x18, 0xf8, 0x8b, 0x04, 0x22,
+0x02, 0x40, 0x02, 0x92, 0x71, 0x23, 0x5b, 0x01, 0xe8, 0x18, 0x01, 0x88,
+0xc0, 0x46, 0x01, 0x91, 0x40, 0x88, 0xc0, 0x46, 0x00, 0x90, 0x00, 0x24,
+0x03, 0xe0, 0x08, 0x2c, 0x01, 0xd3, 0x0f, 0x2c, 0x08, 0xd9, 0x20, 0x1c,
+0xff, 0xf7, 0xe8, 0xfd, 0x61, 0x00, 0x49, 0x19, 0x1b, 0x23, 0xdb, 0x01,
+0xc9, 0x18, 0x88, 0x83, 0x01, 0x34, 0x20, 0x2c, 0xef, 0xd3, 0x58, 0x4c,
+0xe0, 0x69, 0x00, 0x28, 0x15, 0xd0, 0x57, 0x4e, 0x20, 0x25, 0x01, 0x3d,
+0x53, 0x49, 0xe0, 0x69, 0x30, 0x40, 0x0b, 0xd0, 0x68, 0x00, 0x40, 0x18,
+0x37, 0x23, 0x9b, 0x01, 0xc0, 0x18, 0x81, 0x8b, 0x28, 0x1c, 0xff, 0xf7,
+0x65, 0xfe, 0xe0, 0x69, 0xb0, 0x43, 0xe0, 0x61, 0x76, 0x08, 0x00, 0x2d,
+0xeb, 0xd1, 0x01, 0x20, 0xff, 0xf7, 0xc2, 0xfd, 0x48, 0x49, 0xc0, 0x46,
+0xf8, 0x83, 0xf8, 0x8b, 0xc2, 0x08, 0x25, 0xd3, 0xca, 0x68, 0x01, 0x2a,
+0x13, 0xd1, 0x0a, 0x79, 0x00, 0x2a, 0x1f, 0xd1, 0x49, 0x88, 0x00, 0x29,
+0x1c, 0xd1, 0x01, 0x99, 0x43, 0x4a, 0x00, 0x29, 0x05, 0xd0, 0x01, 0x29,
+0x16, 0xd1, 0x51, 0x8b, 0xc9, 0x08, 0x13, 0xd2, 0x0f, 0xe0, 0x51, 0x8b,
+0x09, 0x09, 0x0f, 0xd2, 0x0b, 0xe0, 0x0a, 0x79, 0x00, 0x2a, 0x0b, 0xd1,
+0x6d, 0x23, 0x5b, 0x01, 0xc9, 0x18, 0x8a, 0x88, 0xc9, 0x88, 0x11, 0x40,
+0x49, 0x09, 0x09, 0x07, 0x02, 0xd1, 0x04, 0x23, 0x98, 0x43, 0xf8, 0x83,
+0xf8, 0x8b, 0x04, 0x21, 0x01, 0x40, 0x02, 0x9a, 0x1f, 0xd0, 0xb9, 0x8b,
+0x4a, 0x0b, 0x27, 0xd3, 0x80, 0x09, 0x25, 0xd3, 0xff, 0x23, 0x01, 0x98,
+0x01, 0x33, 0x98, 0x42, 0x20, 0xd0, 0x00, 0x25, 0x00, 0x98, 0x01, 0x28,
+0x00, 0xd1, 0x05, 0x02, 0x01, 0x98, 0x00, 0x28, 0x02, 0xd1, 0x01, 0x23,
+0x5b, 0x03, 0x1d, 0x43, 0xa9, 0x42, 0x13, 0xd0,
+0x00, 0x20, 0x29, 0x1c, 0xff, 0xf7, 0x10, 0xfe, 0xbd, 0x83, 0x00, 0x20,
+0xc0, 0x43, 0x60, 0x62, 0x0a, 0xe0, 0xb8, 0x8b, 0x40, 0x0b, 0x07, 0xd2,
+0x09, 0x21, 0x49, 0x02, 0x00, 0x20, 0xff, 0xf7, 0x03, 0xfe, 0x09, 0x20,
+0x40, 0x02, 0xb8, 0x83, 0xf8, 0x8b, 0xc0, 0x08, 0x2d, 0xd3, 0x1d, 0x48,
+0xc7, 0x6a, 0x01, 0x98, 0x00, 0x99, 0xff, 0xf7, 0x51, 0xfc, 0xc2, 0x07,
+0xd2, 0x0f, 0x1a, 0x49, 0x03, 0xd0, 0x04, 0x23, 0xcd, 0x6d, 0x2b, 0x43,
+0x03, 0xe0, 0x04, 0x23, 0xcd, 0x6d, 0x9d, 0x43, 0x2b, 0x1c, 0xcb, 0x65,
+0x83, 0x08, 0x03, 0xd3, 0x02, 0x23, 0xcd, 0x6d, 0x2b, 0x43, 0x03, 0xe0,
+0x02, 0x23, 0xcd, 0x6d, 0x9d, 0x43, 0x2b, 0x1c, 0xcb, 0x65, 0x61, 0x6a,
+0x81, 0x42, 0x0c, 0xd0, 0x60, 0x62, 0x0e, 0x48, 0x00, 0x2a, 0x03, 0xd0,
+0xff, 0x21, 0x21, 0x31, 0x39, 0x43, 0x03, 0xe0, 0xff, 0x23, 0x21, 0x33,
+0x9f, 0x43, 0x39, 0x1c, 0xc1, 0x62, 0x03, 0xb0, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80, 0x68, 0x1c, 0x00, 0x80,
+0x00, 0x00, 0x00, 0x80, 0x28, 0x1c, 0x00, 0x80, 0x40, 0x00, 0x14, 0x40,
+0xa4, 0x2a, 0x00, 0x80, 0x40, 0x00, 0x14, 0x00, 0x90, 0xb4, 0x01, 0x22,
+0x20, 0x28, 0x0f, 0xd2, 0x43, 0x00, 0x0f, 0x1c, 0x07, 0x49, 0x5c, 0x18,
+0x37, 0x23, 0x9b, 0x01, 0xe3, 0x18, 0x9f, 0x83, 0x82, 0x40, 0x07, 0x23,
+0x5b, 0x02, 0xc9, 0x18, 0x10, 0x1c, 0xca, 0x69, 0x10, 0x43, 0xc8, 0x61,
+0x90, 0xbc, 0x70, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x0b, 0x48, 0x40, 0x69,
+0x0b, 0x49, 0xc9, 0x8b, 0x04, 0x22, 0x0a, 0x40, 0x0a, 0x49, 0x06, 0xd0,
+0x01, 0x23, 0xdb, 0x02, 0x98, 0x43, 0x01, 0x23, 0xca, 0x6d, 0x1a, 0x43,
+0x05, 0xe0, 0x01, 0x23, 0xdb, 0x02, 0x18, 0x43, 0xca, 0x6d, 0x52, 0x08,
+0x52, 0x00, 0xca, 0x65, 0x70, 0x47, 0x00, 0x00, 0x80, 0x00, 0x14, 0x40,
+0xe8, 0x1b, 0x00, 0x80, 0xa4, 0x2a, 0x00, 0x80, 0x00, 0xb5, 0x84, 0xb0,
+0xff, 0xf7, 0xde, 0xff, 0x01, 0x1c, 0x05, 0x20, 0x00, 0x90, 0x00, 0x20,
+0x01, 0xab, 0x18, 0x80, 0x04, 0x3b, 0x58, 0x70, 0x1b, 0x22, 0x00, 0xab,
+0x5a, 0x80, 0xd9, 0x80, 0x05, 0x49, 0xc9, 0x6d, 0xc0, 0x46, 0x02, 0x91,
+0x03, 0x90, 0x68, 0x46, 0x00, 0x21, 0xfd, 0xf7, 0x79, 0xf8, 0x04, 0xb0,
+0x08, 0xbc, 0x18, 0x47, 0xa4, 0x2a, 0x00, 0x80, 0x0f, 0x48, 0x01, 0x68,
+0x49, 0x0c, 0x05, 0xd2, 0x01, 0x68, 0x09, 0x0c, 0x06, 0xd1, 0x00, 0x68,
+0x80, 0x0a, 0x03, 0xd3, 0x0b, 0x48, 0x00, 0x68, 0x00, 0x0c, 0x01, 0xe0,
+0x0a, 0x48, 0x80, 0x6c, 0x00, 0x04, 0x00, 0x0c, 0x09, 0x4b, 0x98, 0x42,
+0x05, 0xd0, 0x02, 0x33, 0x98, 0x42, 0x02, 0xd0, 0x07, 0x4b, 0x98, 0x42,
+0x01, 0xd1, 0x01, 0x20, 0x70, 0x47, 0x00, 0x20, 0xfc, 0xe7, 0x00, 0x00,
+0x00, 0x00, 0x10, 0x40, 0x00, 0x00, 0x18, 0x40, 0x00, 0x00, 0x00, 0x80,
+0x04, 0x99, 0x00, 0x00, 0x07, 0x99, 0x00, 0x00, 0x90, 0xb4, 0x01, 0x24,
+0x21, 0x1c, 0x18, 0x48, 0x02, 0x68, 0x52, 0x0c, 0x06, 0xd2, 0x02, 0x68,
+0x12, 0x0c, 0x02, 0xd1, 0x00, 0x68, 0x80, 0x0a, 0x00, 0xd2, 0x00, 0x21,
+0x09, 0x06, 0x09, 0x0e, 0x12, 0x4f, 0x13, 0x4a, 0x02, 0xd0, 0x38, 0x68,
+0x00, 0x0c, 0x00, 0xe0, 0x90, 0x6c, 0x00, 0x04, 0x00, 0x0c, 0x10, 0x4b,
+0x98, 0x42, 0x08, 0xd0, 0x02, 0x33, 0x98, 0x42, 0x05, 0xd0, 0x0e, 0x4b,
+0x98, 0x42, 0x02, 0xd0, 0x02, 0x3b, 0x98, 0x42, 0x0c, 0xd1, 0x00, 0x29,
+0x02, 0xd0, 0xf8, 0x6a, 0x00, 0x0c, 0x00, 0xe0,
+0xd0, 0x6c, 0x40, 0x0a, 0x00, 0xd2, 0x00, 0x24, 0x20, 0x06, 0x00, 0x0e,
+0x90, 0xbc, 0x70, 0x47, 0x00, 0x20, 0xfb, 0xe7, 0x00, 0x00, 0x10, 0x40,
+0x00, 0x00, 0x18, 0x40, 0x00, 0x00, 0x00, 0x80, 0x04, 0x99, 0x00, 0x00,
+0x07, 0x99, 0x00, 0x00, 0x0c, 0x48, 0x01, 0x68, 0x49, 0x0c, 0x05, 0xd2,
+0x01, 0x68, 0x09, 0x0c, 0x05, 0xd1, 0x00, 0x68, 0x80, 0x0a, 0x02, 0xd3,
+0x08, 0x48, 0x80, 0x68, 0x01, 0xe0, 0x08, 0x48, 0x40, 0x6c, 0x00, 0x04,
+0x00, 0x0c, 0x00, 0x21, 0x03, 0x28, 0x03, 0xd0, 0x40, 0x08, 0x01, 0xd3,
+0x01, 0x20, 0x70, 0x47, 0x08, 0x1c, 0xfc, 0xe7, 0x00, 0x00, 0x10, 0x40,
+0x00, 0x00, 0x18, 0x40, 0x00, 0x00, 0x00, 0x80, 0xf0, 0xb5, 0x01, 0x27,
+0x1a, 0x4c, 0x25, 0x68, 0xff, 0xf7, 0x72, 0xff, 0x03, 0x1c, 0x19, 0x4a,
+0x02, 0x21, 0x01, 0x26, 0x18, 0x48, 0x01, 0x2b, 0x1b, 0xd1, 0xcb, 0x04,
+0x1e, 0x60, 0x55, 0x23, 0x03, 0x60, 0x00, 0x23, 0x43, 0x60, 0x06, 0x68,
+0x55, 0x2e, 0x1b, 0xd1, 0xaa, 0x26, 0x06, 0x60, 0x43, 0x60, 0x03, 0x68,
+0xaa, 0x2b, 0x15, 0xd1, 0x09, 0x23, 0x03, 0x60, 0x05, 0x23, 0x0f, 0x4f,
+0xc0, 0x46, 0x3b, 0x60, 0x03, 0x23, 0x0e, 0x4f, 0xc0, 0x46, 0x3b, 0x60,
+0x11, 0x60, 0x07, 0x68, 0x08, 0xe0, 0x08, 0x23, 0x23, 0x60, 0x04, 0x23,
+0x0a, 0x4f, 0xc0, 0x46, 0x3b, 0x60, 0x11, 0x60, 0x06, 0x60, 0x27, 0x68,
+0xc0, 0x46, 0x25, 0x60, 0x38, 0x1c, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0x00, 0x20, 0x40, 0x00, 0x00, 0x24, 0x40, 0x00, 0x00, 0x22, 0x40,
+0x00, 0x00, 0x2a, 0x40, 0x00, 0x00, 0x26, 0x40, 0x00, 0x00, 0x28, 0x40,
+0x80, 0xb5, 0x07, 0x1c, 0xff, 0xf7, 0x30, 0xff, 0x01, 0x28, 0x05, 0xd1,
+0x19, 0x48, 0x00, 0x68, 0x19, 0x49, 0x49, 0x6b, 0x08, 0x40, 0x22, 0xe0,
+0x18, 0x48, 0x01, 0x68, 0x49, 0x0c, 0x05, 0xd2, 0x01, 0x68, 0x09, 0x0c,
+0x06, 0xd1, 0x00, 0x68, 0x80, 0x0a, 0x03, 0xd3, 0x14, 0x48, 0x00, 0x68,
+0x00, 0x0c, 0x01, 0xe0, 0x13, 0x48, 0x80, 0x6c, 0x00, 0x04, 0x00, 0x0c,
+0x12, 0x4b, 0xc0, 0x18, 0x08, 0x28, 0x0b, 0xd2, 0x01, 0xa3, 0x1b, 0x5c,
+0x5b, 0x00, 0x9f, 0x44, 0x05, 0x03, 0x07, 0x03, 0x07, 0x07, 0x05, 0x03,
+0x03, 0x20, 0x02, 0xe0, 0x01, 0x20, 0x00, 0xe0, 0x00, 0x20, 0x01, 0x21,
+0x38, 0x60, 0x80, 0x07, 0x00, 0xd1, 0x00, 0x21, 0x08, 0x06, 0x00, 0x0e,
+0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x34, 0x6e, 0x21, 0x40,
+0x00, 0x00, 0x11, 0x40, 0x00, 0x00, 0x10, 0x40, 0x00, 0x00, 0x18, 0x40,
+0x00, 0x00, 0x00, 0x80, 0xfe, 0x66, 0xff, 0xff, 0xf0, 0xb5, 0x82, 0xb0,
+0x07, 0x1c, 0x01, 0x20, 0x01, 0x90, 0xff, 0xf7, 0xe7, 0xfe, 0x01, 0x28,
+0x13, 0xd1, 0x38, 0x2f, 0x01, 0xd0, 0xa8, 0x2f, 0x07, 0xd1, 0x00, 0x26,
+0xf6, 0x43, 0x34, 0x1c, 0xa8, 0x2f, 0x02, 0xd1, 0x30, 0x1c, 0x00, 0x96,
+0x35, 0x1c, 0x11, 0x20, 0x00, 0x04, 0x06, 0x62, 0x44, 0x62, 0x85, 0x62,
+0x00, 0x99, 0xc0, 0x46, 0xc1, 0x62, 0x00, 0x21, 0x08, 0x48, 0xc0, 0x46,
+0x01, 0x60, 0x38, 0x2f, 0x01, 0xd0, 0xa8, 0x2f, 0x05, 0xd1, 0x01, 0x21,
+0x01, 0x60, 0xa8, 0x2f, 0x01, 0xd1, 0x03, 0x21, 0x01, 0x60, 0x01, 0x98,
+0x02, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x34, 0x6e, 0x21, 0x40,
+0x70, 0x47, 0x00, 0x00, 0x70, 0x47, 0x00, 0x00, 0x90, 0xb5, 0x07, 0x1c,
+0x12, 0x4c, 0x21, 0x68, 0x12, 0x48, 0x81, 0x42, 0x0b, 0xd0, 0x00, 0x23,
+0x21, 0x1c, 0xe2, 0x1d, 0xc1, 0x32, 0x00, 0xe0,
+0x08, 0xc1, 0x91, 0x42, 0xfc, 0xd3, 0x20, 0x60, 0xc8, 0x20, 0xa0, 0x80,
+0x67, 0x72, 0x38, 0x01, 0x00, 0xf0, 0x18, 0xf8, 0x27, 0x72, 0x0a, 0x48,
+0xc0, 0x46, 0xe0, 0x60, 0x09, 0x2f, 0x00, 0xdb, 0x00, 0x27, 0xe0, 0x19,
+0x01, 0x7d, 0x01, 0x31, 0x01, 0x75, 0xe0, 0x88, 0x01, 0x30, 0xe0, 0x80,
+0x01, 0x20, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x00, 0x80,
+0xee, 0xff, 0xc0, 0xd0, 0x08, 0x10, 0x00, 0x03, 0x80, 0xb4, 0x08, 0x4a,
+0xd1, 0x1d, 0x89, 0x31, 0x0b, 0x7a, 0x20, 0x2b, 0x01, 0xd3, 0x00, 0x23,
+0x0b, 0x72, 0x07, 0x1c, 0x08, 0x7a, 0x43, 0x1c, 0x0b, 0x72, 0x80, 0x18,
+0x90, 0x30, 0x47, 0x72, 0x80, 0xbc, 0x70, 0x47, 0x00, 0x00, 0x00, 0x80,
+0x07, 0x49, 0x01, 0x22, 0x12, 0x04, 0x08, 0x68, 0x02, 0x40, 0x01, 0x20,
+0x00, 0x2a, 0x06, 0xd1, 0x0a, 0x68, 0x12, 0x0c, 0x02, 0xd1, 0x09, 0x68,
+0x89, 0x0a, 0x00, 0xd2, 0x00, 0x20, 0x70, 0x47, 0x00, 0x00, 0x10, 0x40,
+0x90, 0xb5, 0x07, 0x1c, 0x09, 0x4c, 0x38, 0x1c, 0x21, 0x1c, 0xfc, 0xf7,
+0x91, 0xff, 0x38, 0x1c, 0x00, 0xf0, 0x0e, 0xf8, 0x01, 0x23, 0xd8, 0x42,
+0x01, 0xd1, 0x00, 0x0c, 0xe0, 0x80, 0x00, 0x21, 0x20, 0x1c, 0xfc, 0xf7,
+0xc5, 0xfe, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xc4, 0x66, 0x21, 0x40,
+0xf8, 0xb5, 0x07, 0x1c, 0x79, 0x7a, 0x76, 0x48, 0x00, 0x23, 0x76, 0x4c,
+0x01, 0x29, 0x5d, 0xd1, 0xa2, 0x88, 0xc0, 0x46, 0x00, 0x92, 0xa1, 0x89,
+0x8a, 0x42, 0x74, 0xda, 0xfa, 0x7a, 0x00, 0x2a, 0x15, 0xd0, 0x7a, 0x6c,
+0x00, 0x2a, 0x12, 0xd0, 0x8a, 0x42, 0x10, 0xd8, 0x00, 0x9a, 0x51, 0x1c,
+0xa1, 0x80, 0xa1, 0x88, 0xc0, 0x46, 0x41, 0x81, 0x78, 0x6c, 0x6b, 0x4e,
+0xc0, 0x46, 0xf0, 0x80, 0xa0, 0x6a, 0x58, 0x23, 0x79, 0x6c, 0x59, 0x43,
+0x40, 0x18, 0xc1, 0x1a, 0x28, 0xe0, 0x22, 0x88, 0x01, 0x32, 0x12, 0x04,
+0x12, 0x0c, 0x22, 0x80, 0x8a, 0x42, 0x00, 0xdb, 0x23, 0x80, 0x00, 0x22,
+0x00, 0x29, 0x69, 0xdd, 0x5f, 0x4c, 0xa4, 0x6a, 0x5e, 0x4b, 0x1d, 0x88,
+0x58, 0x23, 0x6b, 0x43, 0xe3, 0x18, 0xde, 0x1d, 0x01, 0x36, 0x01, 0x23,
+0x9b, 0x07, 0x33, 0x43, 0x1b, 0x68, 0x1b, 0x06, 0x15, 0xd1, 0x58, 0x49,
+0x00, 0x9a, 0x01, 0x32, 0x8a, 0x80, 0x8a, 0x88, 0xc0, 0x46, 0x42, 0x81,
+0x08, 0x88, 0x01, 0x30, 0x54, 0x4e, 0xc0, 0x46, 0xf0, 0x80, 0x58, 0x20,
+0x68, 0x43, 0x21, 0x18, 0x38, 0x1c, 0x00, 0xf0, 0x39, 0xfb, 0xf0, 0x88,
+0x00, 0x04, 0x00, 0x14, 0x95, 0xe0, 0x4d, 0x4b, 0x01, 0x35, 0x2d, 0x04,
+0x2d, 0x0c, 0x1d, 0x80, 0x8d, 0x42, 0x01, 0xdb, 0x00, 0x25, 0x1d, 0x80,
+0x01, 0x32, 0x12, 0x04, 0x12, 0x14, 0x91, 0x42, 0xce, 0xdc, 0x81, 0xe0,
+0xe1, 0x88, 0xe2, 0x89, 0x91, 0x42, 0x18, 0xda, 0xf9, 0x7a, 0x00, 0x29,
+0x2f, 0xd0, 0x79, 0x6c, 0x49, 0x04, 0x49, 0x0c, 0x79, 0x64, 0x2a, 0xd0,
+0xe2, 0x89, 0x91, 0x42, 0x27, 0xd8, 0xe1, 0x88, 0x01, 0x31, 0xe1, 0x80,
+0xe1, 0x88, 0xc0, 0x46, 0x81, 0x81, 0x01, 0x23, 0xdb, 0x03, 0x78, 0x6c,
+0x18, 0x43, 0x3a, 0x4e, 0xc0, 0x46, 0xf0, 0x80, 0x00, 0xe0, 0x63, 0xe0,
+0xe0, 0x6a, 0x79, 0x6c, 0x4b, 0x00, 0x59, 0x18, 0x49, 0x01, 0x40, 0x18,
+0xc1, 0x1f, 0x59, 0x39, 0x38, 0x1c, 0x00, 0xf0, 0x0f, 0xfb, 0xe0, 0x6a,
+0x79, 0x6c, 0x4a, 0x00, 0x52, 0x18, 0x52, 0x01, 0x80, 0x18, 0x01, 0x39,
+0x09, 0x04, 0x09, 0x0c, 0x60, 0x38, 0x00, 0xf0, 0x89, 0xfb, 0xb6, 0xe7,
+0x4a, 0xe0, 0x61, 0x88, 0x01, 0x31, 0x09, 0x04,
+0x09, 0x0c, 0x61, 0x80, 0xe2, 0x89, 0x91, 0x42, 0x00, 0xdb, 0x63, 0x80,
+0x00, 0x21, 0x00, 0x2a, 0x3e, 0xdd, 0x24, 0x4c, 0xe4, 0x6a, 0x23, 0x4b,
+0x5d, 0x88, 0x6b, 0x00, 0x5b, 0x19, 0x5b, 0x01, 0xe3, 0x18, 0xde, 0x1d,
+0x01, 0x36, 0x01, 0x23, 0x9b, 0x07, 0x33, 0x43, 0x1b, 0x68, 0x1b, 0x06,
+0x20, 0xd1, 0x1c, 0x4e, 0xf1, 0x88, 0x01, 0x31, 0xf1, 0x80, 0xf1, 0x88,
+0xc0, 0x46, 0x81, 0x81, 0x70, 0x88, 0x01, 0x23, 0xdb, 0x03, 0x01, 0x30,
+0x18, 0x43, 0x17, 0x49, 0xc0, 0x46, 0xc8, 0x80, 0x68, 0x00, 0x40, 0x19,
+0x40, 0x01, 0x21, 0x18, 0x38, 0x1c, 0x00, 0xf0, 0xcf, 0xfa, 0x71, 0x88,
+0x4a, 0x00, 0x52, 0x18, 0x52, 0x01, 0xf0, 0x6a, 0x80, 0x18, 0x00, 0xf0,
+0x4d, 0xfb, 0x0e, 0x49, 0xc8, 0x88, 0x79, 0xe7, 0x0b, 0x4b, 0x01, 0x35,
+0x2d, 0x04, 0x2d, 0x0c, 0x5d, 0x80, 0x95, 0x42, 0x01, 0xdb, 0x00, 0x25,
+0x5d, 0x80, 0x01, 0x31, 0x09, 0x04, 0x09, 0x14, 0x8a, 0x42, 0xc2, 0xdc,
+0x01, 0x89, 0x01, 0x31, 0x01, 0x81, 0x00, 0x20, 0xc0, 0x43, 0xf8, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x4c, 0x2b, 0x00, 0x80, 0x4c, 0x2a, 0x00, 0x80,
+0xc4, 0x66, 0x21, 0x40, 0xf0, 0xb4, 0x06, 0x1c, 0x01, 0x23, 0xdb, 0x03,
+0x33, 0x40, 0x01, 0x24, 0x44, 0x4f, 0x00, 0x20, 0x44, 0x4a, 0x45, 0x4d,
+0xd1, 0x1d, 0x39, 0x31, 0x00, 0x2b, 0x41, 0xd0, 0xe3, 0x03, 0xf3, 0x1a,
+0x73, 0xd0, 0xee, 0x89, 0x9e, 0x42, 0x71, 0xd3, 0xee, 0x88, 0x00, 0x2e,
+0x6d, 0xd0, 0xed, 0x6a, 0x5e, 0x1e, 0x73, 0x00, 0x9b, 0x19, 0x5b, 0x01,
+0xed, 0x18, 0xae, 0x68, 0x36, 0x06, 0x36, 0x0e, 0x03, 0x2e, 0x02, 0xd0,
+0xce, 0x89, 0x01, 0x36, 0xce, 0x81, 0x40, 0x35, 0xad, 0x8b, 0xad, 0x00,
+0x35, 0x4e, 0x76, 0x6a, 0xc0, 0x46, 0x70, 0x51, 0x55, 0x89, 0x01, 0x35,
+0x55, 0x81, 0x32, 0x4e, 0xf2, 0x6a, 0xd2, 0x18, 0x90, 0x60, 0xf2, 0x6a,
+0xd2, 0x18, 0x90, 0x63, 0xf2, 0x6a, 0xd2, 0x18, 0xd0, 0x63, 0xf2, 0x6a,
+0xd2, 0x18, 0x10, 0x64, 0xf2, 0x6a, 0xd2, 0x18, 0x50, 0x64, 0xf2, 0x6a,
+0xd2, 0x18, 0x90, 0x64, 0xf2, 0x6a, 0xd2, 0x18, 0xd0, 0x64, 0xf0, 0x88,
+0x01, 0x38, 0xf0, 0x80, 0xf0, 0x88, 0xc0, 0x46, 0x88, 0x81, 0x24, 0x49,
+0x00, 0x28, 0x39, 0xd1, 0x4f, 0x80, 0x37, 0xe0, 0x00, 0x2e, 0x38, 0xd9,
+0xab, 0x89, 0xb3, 0x42, 0x30, 0xd3, 0xab, 0x88, 0x00, 0x2b, 0x2c, 0xd0,
+0x53, 0x89, 0x01, 0x33, 0x53, 0x81, 0x2a, 0x1c, 0xad, 0x6a, 0x58, 0x23,
+0x01, 0x3e, 0x73, 0x43, 0xed, 0x18, 0xae, 0x68, 0x36, 0x06, 0x36, 0x0e,
+0x03, 0x2e, 0x02, 0xd0, 0xce, 0x89, 0x01, 0x36, 0xce, 0x81, 0xa8, 0x60,
+0x95, 0x6a, 0xed, 0x18, 0xa8, 0x63, 0x95, 0x6a, 0xed, 0x18, 0xe8, 0x63,
+0x95, 0x6a, 0xed, 0x18, 0x28, 0x64, 0x95, 0x6a, 0xed, 0x18, 0x68, 0x64,
+0x95, 0x6a, 0xed, 0x18, 0xa8, 0x64, 0x95, 0x6a, 0xeb, 0x18, 0xd8, 0x64,
+0x90, 0x88, 0x01, 0x38, 0x90, 0x80, 0x90, 0x88, 0xc0, 0x46, 0x48, 0x81,
+0x00, 0x28, 0x03, 0xd1, 0x01, 0xe0, 0x04, 0xe0, 0x03, 0xe0, 0x17, 0x80,
+0x20, 0x1c, 0xf0, 0xbc, 0x70, 0x47, 0xca, 0x89, 0x01, 0x32, 0xca, 0x81,
+0xf9, 0xe7, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x0c, 0x2b, 0x00, 0x80,
+0x4c, 0x2a, 0x00, 0x80, 0x00, 0xb5, 0x00, 0x21, 0x41, 0x60, 0x10, 0x49,
+0x4a, 0x68, 0x00, 0x2a, 0x10, 0xd1, 0xca, 0x68, 0x00, 0x2a, 0x04, 0xd0,
+0xca, 0x1d, 0x19, 0x32, 0x12, 0x79, 0x00, 0x2a, 0x08, 0xd0, 0x4a, 0x69,
+0x00, 0x2a, 0x0b, 0xd1, 0x88, 0x61, 0x48, 0x61,
+0x00, 0xf0, 0x10, 0xf8, 0x08, 0xbc, 0x18, 0x47, 0x4a, 0x69, 0x00, 0x2a,
+0x02, 0xd1, 0x88, 0x61, 0x48, 0x61, 0xf7, 0xe7, 0x8a, 0x69, 0xc0, 0x46,
+0x50, 0x60, 0x88, 0x61, 0xf2, 0xe7, 0x00, 0x00, 0x6c, 0x06, 0x00, 0x80,
+0xb0, 0xb5, 0x2a, 0x48, 0x40, 0x69, 0x00, 0x28, 0x4c, 0xd0, 0x08, 0x22,
+0xc1, 0x68, 0x0a, 0x40, 0x00, 0x27, 0x27, 0x4b, 0xd9, 0x1d, 0xb9, 0x31,
+0x00, 0x2a, 0x11, 0xd0, 0x04, 0x22, 0x25, 0x4c, 0xc0, 0x46, 0x0c, 0x61,
+0x24, 0x4c, 0xc0, 0x46, 0x4c, 0x62, 0x24, 0x4c, 0xc0, 0x46, 0x8c, 0x62,
+0x23, 0x4c, 0xc0, 0x46, 0xcc, 0x62, 0x23, 0x4c, 0xc0, 0x46, 0x0c, 0x63,
+0x4f, 0x63, 0x12, 0xe0, 0x05, 0x22, 0x21, 0x4c, 0xc0, 0x46, 0x0c, 0x61,
+0x20, 0x4c, 0xc0, 0x46, 0x4c, 0x62, 0x20, 0x4c, 0xc0, 0x46, 0x8c, 0x62,
+0x1f, 0x4c, 0xc0, 0x46, 0xcc, 0x62, 0x1f, 0x4c, 0xc0, 0x46, 0x0c, 0x63,
+0x1e, 0x4c, 0xc0, 0x46, 0x4c, 0x63, 0x40, 0x24, 0xcc, 0x82, 0x4f, 0x83,
+0x1c, 0x4f, 0x00, 0x21, 0x00, 0x2a, 0x0c, 0xd9, 0x8c, 0x00, 0x05, 0x19,
+0x6d, 0x6a, 0x7d, 0x40, 0xe4, 0x18, 0xff, 0x34, 0x01, 0x34, 0x65, 0x62,
+0x01, 0x31, 0x91, 0x42, 0xf4, 0xd3, 0x10, 0x29, 0x07, 0xd2, 0x8a, 0x00,
+0xd2, 0x18, 0xff, 0x32, 0x01, 0x32, 0x57, 0x62, 0x01, 0x31, 0x10, 0x29,
+0xf7, 0xd3, 0x11, 0x49, 0x00, 0xf0, 0x22, 0xf8, 0xb0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x6c, 0x06, 0x00, 0x80, 0xac, 0xab, 0x20, 0x40,
+0x28, 0x01, 0x40, 0x00, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x20, 0x01, 0x40, 0x00,
+0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba, 0xdc, 0xfe,
+0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0, 0x36, 0x36, 0x36, 0x36,
+0x30, 0x80, 0x20, 0x40, 0xb0, 0xb5, 0x0f, 0x1c, 0x15, 0x4d, 0xe9, 0x1d,
+0xc9, 0x31, 0x15, 0x4c, 0x23, 0x1c, 0x15, 0x4a, 0x00, 0x20, 0xfc, 0xf7,
+0x44, 0xfb, 0xe9, 0x1d, 0xff, 0x31, 0x1e, 0x31, 0x23, 0x1c, 0x0d, 0x1c,
+0x11, 0x4a, 0x01, 0x20, 0xfc, 0xf7, 0x3b, 0xfb, 0x29, 0x1c, 0x23, 0x1c,
+0x0e, 0x4a, 0x00, 0x20, 0xfc, 0xf7, 0x35, 0xfb, 0x39, 0x1c, 0x23, 0x1c,
+0x0c, 0x4a, 0x01, 0x20, 0xfc, 0xf7, 0x2f, 0xfb, 0x00, 0x21, 0x0b, 0x48,
+0xc2, 0x1d, 0x19, 0x32, 0x51, 0x71, 0x01, 0x21, 0xff, 0x30, 0x01, 0x30,
+0x41, 0x62, 0x08, 0x1c, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0xac, 0xab, 0x20, 0x40, 0x75, 0x08, 0xff, 0xff, 0x28, 0x00, 0x03, 0x00,
+0x40, 0x00, 0x02, 0x00, 0x14, 0x00, 0x07, 0x00, 0x6c, 0x06, 0x00, 0x80,
+0xf0, 0xb5, 0x37, 0x4a, 0x50, 0x69, 0x01, 0x23, 0x9b, 0x07, 0x08, 0x30,
+0x18, 0x43, 0x00, 0x68, 0x01, 0x06, 0x09, 0x0e, 0x33, 0x4b, 0x01, 0x29,
+0x49, 0xd1, 0x1f, 0x68, 0x19, 0x1c, 0x32, 0x4b, 0x9f, 0x42, 0x04, 0xd1,
+0xff, 0xf7, 0x3e, 0xff, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x23,
+0x9f, 0x00, 0xcc, 0x59, 0x55, 0x69, 0xef, 0x19, 0x3c, 0x61, 0x01, 0x33,
+0x05, 0x2b, 0xf7, 0xd3, 0x00, 0x0a, 0x00, 0x02, 0x02, 0x23, 0x18, 0x43,
+0x53, 0x69, 0xc0, 0x46, 0x98, 0x60, 0x50, 0x69, 0x08, 0x23, 0xc2, 0x68,
+0x13, 0x40, 0x25, 0x4f, 0xfa, 0x1d, 0xb9, 0x32, 0x00, 0x2b, 0x02, 0xd0,
+0x04, 0x23, 0x23, 0x4c, 0x01, 0xe0, 0x05, 0x23, 0x22, 0x4c, 0xc0, 0x46,
+0x14, 0x61, 0x40, 0x24, 0xd4, 0x82, 0x00, 0x24, 0x54, 0x83, 0x20, 0x4c,
+0x00, 0x22, 0x00, 0x2b, 0x0c, 0xd9, 0x95, 0x00,
+0x46, 0x19, 0x76, 0x6a, 0x66, 0x40, 0xed, 0x19, 0xff, 0x35, 0x01, 0x35,
+0x6e, 0x62, 0x01, 0x32, 0x9a, 0x42, 0xf4, 0xd3, 0x10, 0x2a, 0x07, 0xd2,
+0x93, 0x00, 0xdb, 0x19, 0xff, 0x33, 0x01, 0x33, 0x5c, 0x62, 0x01, 0x32,
+0x10, 0x2a, 0xf7, 0xd3, 0xff, 0xf7, 0x70, 0xff, 0xbc, 0xe7, 0x00, 0x21,
+0x8f, 0x00, 0xdc, 0x59, 0x55, 0x69, 0xef, 0x19, 0x7c, 0x62, 0x01, 0x31,
+0x05, 0x29, 0xf7, 0xd3, 0x00, 0x0a, 0x00, 0x02, 0x03, 0x23, 0x18, 0x43,
+0x51, 0x69, 0xc0, 0x46, 0x88, 0x60, 0x50, 0x69, 0x40, 0x68, 0xc0, 0x46,
+0x50, 0x61, 0x09, 0x48, 0xfc, 0xf7, 0xa4, 0xfa, 0xa4, 0xe7, 0x00, 0x00,
+0x6c, 0x06, 0x00, 0x80, 0x30, 0x80, 0x20, 0x40, 0x67, 0x45, 0x23, 0x01,
+0xac, 0xab, 0x20, 0x40, 0x28, 0x01, 0x40, 0x00, 0x20, 0x01, 0x40, 0x00,
+0x5c, 0x5c, 0x5c, 0x5c, 0x11, 0x31, 0xff, 0xff, 0xf0, 0xb5, 0x07, 0x1c,
+0x3b, 0x48, 0x3c, 0x4c, 0x08, 0x21, 0x20, 0x60, 0xa1, 0x80, 0x00, 0x20,
+0x20, 0x81, 0xe1, 0x80, 0x60, 0x81, 0x39, 0x48, 0xc0, 0x46, 0xe0, 0x60,
+0x38, 0x48, 0xc0, 0x46, 0x20, 0x61, 0x38, 0x48, 0xc0, 0x46, 0x60, 0x61,
+0x37, 0x48, 0xc0, 0x46, 0xa0, 0x61, 0x37, 0x48, 0xc0, 0x46, 0xe0, 0x61,
+0x36, 0x48, 0xc0, 0x46, 0x20, 0x62, 0x36, 0x48, 0xc0, 0x46, 0x60, 0x62,
+0x35, 0x48, 0xc0, 0x46, 0xa0, 0x62, 0x35, 0x48, 0xc0, 0x46, 0xe0, 0x62,
+0x34, 0x48, 0xc0, 0x46, 0x20, 0x63, 0x34, 0x48, 0xc0, 0x46, 0x60, 0x63,
+0x33, 0x48, 0xc0, 0x46, 0xa0, 0x63, 0x33, 0x48, 0xc0, 0x46, 0xe0, 0x63,
+0x32, 0x48, 0xc0, 0x46, 0x20, 0x64, 0x32, 0x48, 0xc0, 0x46, 0x60, 0x64,
+0x31, 0x48, 0xc0, 0x46, 0xa0, 0x64, 0x31, 0x48, 0xc0, 0x46, 0xe0, 0x64,
+0x30, 0x48, 0xc0, 0x46, 0x20, 0x65, 0x30, 0x49, 0xc8, 0x68, 0x02, 0x04,
+0x89, 0x69, 0x4a, 0x40, 0xe3, 0x1d, 0x79, 0x33, 0x09, 0x04, 0xc9, 0x43,
+0xc0, 0x43, 0x48, 0x40, 0xe1, 0x1d, 0xb9, 0x31, 0xda, 0x63, 0x08, 0x60,
+0x29, 0x4d, 0x21, 0x1c, 0x2b, 0x1c, 0x29, 0x4a, 0x00, 0x20, 0xfc, 0xf7,
+0x3e, 0xfa, 0x28, 0x4a, 0xe1, 0x1d, 0xb5, 0x31, 0x01, 0x20, 0x2b, 0x1c,
+0x0e, 0x1c, 0xfc, 0xf7, 0x36, 0xfa, 0x24, 0x4a, 0x00, 0x20, 0x31, 0x1c,
+0x2b, 0x1c, 0xfc, 0xf7, 0x30, 0xfa, 0xe1, 0x1d, 0x4d, 0x31, 0x2b, 0x1c,
+0x20, 0x4a, 0x01, 0x20, 0xfc, 0xf7, 0x29, 0xfa, 0xe0, 0x1d, 0x5d, 0x30,
+0x01, 0x68, 0x00, 0x29, 0xfc, 0xd0, 0x60, 0x6d, 0xc0, 0x46, 0x38, 0x65,
+0x20, 0x6e, 0xc0, 0x46, 0x78, 0x65, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x80, 0x00, 0x08, 0x00, 0x8c, 0xb9, 0x20, 0x40, 0x81, 0x81, 0x48, 0xbd,
+0x79, 0x56, 0x23, 0x8c, 0x93, 0x0c, 0x82, 0x95, 0x1d, 0x0e, 0x12, 0xcf,
+0x9b, 0x3b, 0xc0, 0xe9, 0xe6, 0x55, 0x7c, 0x82, 0x99, 0xf6, 0x78, 0x02,
+0xd1, 0xd7, 0x25, 0x73, 0x72, 0x8c, 0x33, 0x10, 0xf7, 0x03, 0xf1, 0x42,
+0x6c, 0x9b, 0x4a, 0xa7, 0x82, 0x8e, 0x23, 0xa9, 0x90, 0xb1, 0x82, 0x8e,
+0xdc, 0x3f, 0xfb, 0x29, 0x00, 0x62, 0x22, 0x45, 0x88, 0x2b, 0xf1, 0x85,
+0x12, 0x61, 0xd1, 0x73, 0x6e, 0xb1, 0x11, 0x16, 0x08, 0x83, 0x20, 0x40,
+0x75, 0x08, 0xff, 0xff, 0x54, 0x00, 0x03, 0x00, 0x08, 0x00, 0x02, 0x00,
+0x14, 0x00, 0x03, 0x00, 0x80, 0xb5, 0x0f, 0x1c, 0x39, 0x1c, 0x00, 0xf0,
+0x33, 0xf8, 0x38, 0x1c, 0xff, 0xf7, 0x4c, 0xff, 0x03, 0x48, 0x01, 0x89,
+0x01, 0x31, 0x01, 0x81, 0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x0c, 0x2b, 0x00, 0x80, 0x90, 0xb5, 0x04, 0x1c,
+0x0f, 0x1c, 0x20, 0x1c, 0x39, 0x1c, 0x00, 0xf0, 0x1f, 0xf8, 0xe0, 0x68,
+0x01, 0x0e, 0xff, 0x22, 0x12, 0x04, 0x02, 0x40, 0x12, 0x0a, 0x11, 0x43,
+0xff, 0x22, 0x12, 0x02, 0x02, 0x40, 0x12, 0x02, 0x11, 0x43, 0x00, 0x06,
+0x08, 0x43, 0x38, 0x65, 0x20, 0x69, 0xc0, 0x46, 0x78, 0x65, 0x60, 0x69,
+0xc0, 0x46, 0xb8, 0x65, 0x03, 0x48, 0x01, 0x89, 0x01, 0x31, 0x01, 0x81,
+0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x0c, 0x2b, 0x00, 0x80,
+0x90, 0xb5, 0x00, 0x22, 0x93, 0x00, 0x1f, 0x18, 0xbf, 0x69, 0x5b, 0x18,
+0x5f, 0x62, 0x01, 0x32, 0x05, 0x2a, 0xf7, 0xd3, 0x07, 0x7a, 0xfb, 0x08,
+0x03, 0xd3, 0x00, 0x23, 0x92, 0x00, 0x52, 0x18, 0x13, 0x62, 0x07, 0x6b,
+0xc0, 0x46, 0x8f, 0x63, 0xc7, 0x6a, 0xc0, 0x46, 0xcf, 0x63, 0x87, 0x6b,
+0xc0, 0x46, 0x0f, 0x64, 0x47, 0x6b, 0xc0, 0x46, 0x4f, 0x64, 0x07, 0x6c,
+0xc0, 0x46, 0x8f, 0x64, 0xc2, 0x6b, 0xc0, 0x46, 0xca, 0x64, 0xc2, 0x88,
+0xc0, 0x46, 0x0a, 0x80, 0x82, 0x7a, 0x12, 0x06, 0x03, 0x7a, 0x1b, 0x04,
+0x1a, 0x43, 0xc3, 0x88, 0x1b, 0x02, 0x1a, 0x43, 0x43, 0x7a, 0xdb, 0x07,
+0x1a, 0x43, 0x8a, 0x60, 0x17, 0x1c, 0x83, 0x7a, 0x5a, 0x08, 0x05, 0xd3,
+0x14, 0x22, 0x1c, 0x1c, 0xa3, 0x08, 0x02, 0xd2, 0x15, 0x22, 0x00, 0xe0,
+0x00, 0x22, 0x00, 0x7a, 0x43, 0x08, 0x10, 0xd3, 0xc0, 0x08, 0x02, 0xd3,
+0x88, 0x20, 0x10, 0x43, 0x01, 0xe0, 0x80, 0x20, 0x10, 0x43, 0x3a, 0x0a,
+0x12, 0x02, 0x01, 0x23, 0x1a, 0x43, 0xc8, 0x60, 0x8a, 0x60, 0x08, 0x1c,
+0xff, 0xf7, 0x78, 0xfd, 0x05, 0xe0, 0x38, 0x0a, 0x00, 0x02, 0x03, 0x23,
+0x18, 0x43, 0x88, 0x60, 0xca, 0x60, 0x03, 0x48, 0x01, 0x89, 0x01, 0x31,
+0x01, 0x81, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x0c, 0x2b, 0x00, 0x80,
+0xf0, 0xb4, 0x02, 0x6d, 0x14, 0x4c, 0x15, 0x1c, 0xe7, 0x69, 0xbd, 0x40,
+0x13, 0x1c, 0x26, 0x6a, 0xf3, 0x40, 0x5d, 0x40, 0x2e, 0x1c, 0x45, 0x6d,
+0xbd, 0x40, 0x6e, 0x40, 0x2b, 0x1c, 0x35, 0x1c, 0xfd, 0x40, 0x2f, 0x1c,
+0xbb, 0x00, 0x65, 0x6a, 0xeb, 0x58, 0x00, 0x2b, 0x08, 0xd0, 0x23, 0x69,
+0x01, 0x37, 0x9f, 0x42, 0x00, 0xd3, 0x00, 0x27, 0xbe, 0x00, 0xae, 0x59,
+0x00, 0x2e, 0xf7, 0xd1, 0xa4, 0x69, 0xa2, 0x40, 0x11, 0x43, 0x05, 0x4b,
+0x19, 0x43, 0xba, 0x00, 0xa9, 0x50, 0x40, 0x30, 0x87, 0x83, 0xf0, 0xbc,
+0x70, 0x47, 0x00, 0x00, 0x4c, 0x2a, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+0x80, 0xb4, 0x00, 0x22, 0x00, 0x23, 0x00, 0x29, 0x05, 0xd9, 0x07, 0x78,
+0x7a, 0x40, 0x01, 0x30, 0x01, 0x33, 0x8b, 0x42, 0xf9, 0xd3, 0xd0, 0x43,
+0x00, 0x06, 0x00, 0x0e, 0x80, 0xbc, 0x70, 0x47, 0xf0, 0xb5, 0x07, 0x1c,
+0x00, 0x24, 0xff, 0x26, 0x09, 0x36, 0x20, 0x1c, 0x00, 0xf0, 0x9a, 0xf8,
+0x00, 0xf0, 0xb8, 0xf9, 0x05, 0x1c, 0x00, 0xf0, 0xc7, 0xfa, 0x3d, 0x70,
+0x28, 0x1c, 0x01, 0x37, 0x01, 0x34, 0xb4, 0x42, 0xf1, 0xd3, 0xf0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x80, 0xb5, 0x00, 0xf0, 0x93, 0xf8, 0x00, 0xf0,
+0xa7, 0xf9, 0x07, 0x1c, 0x00, 0xf0, 0xb6, 0xfa, 0x38, 0x0a, 0xf6, 0xd3,
+0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xf3, 0xb5, 0x82, 0xb0, 0x02, 0x98,
+0x41, 0x02, 0x53, 0x20, 0x00, 0xf0, 0x64, 0xf8, 0x00, 0xf0, 0xa8, 0xfa,
+0xff, 0xf7, 0xe8, 0xff, 0x00, 0x24, 0x00, 0x20, 0x01, 0x90, 0x2e, 0x20,
+0x00, 0x90, 0x00, 0x25, 0x00, 0x27, 0x02, 0x98, 0x01, 0x28, 0x04, 0xd1,
+0x00, 0x98, 0x84, 0x42, 0x01, 0xd3, 0x00, 0x26,
+0x09, 0xe0, 0x01, 0x98, 0x41, 0x1c, 0x01, 0x91, 0x00, 0xf0, 0x60, 0xf8,
+0x00, 0xf0, 0x7e, 0xf9, 0x06, 0x1c, 0x00, 0xf0, 0x8d, 0xfa, 0xf8, 0x00,
+0x86, 0x40, 0x35, 0x43, 0x01, 0x34, 0x01, 0x37, 0x04, 0x2f, 0xe6, 0xd3,
+0x03, 0x99, 0x20, 0xc1, 0x03, 0x91, 0xff, 0x23, 0x09, 0x33, 0x9c, 0x42,
+0xdd, 0xd3, 0x04, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0xf0, 0xb5,
+0x04, 0x1c, 0x0f, 0x1c, 0x01, 0x2c, 0x2a, 0xd0, 0x16, 0x48, 0xc0, 0x6f,
+0x40, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0x00, 0x26, 0x20, 0xcf,
+0xb1, 0x00, 0x84, 0x20, 0x00, 0xf0, 0x24, 0xf8, 0x28, 0x1c, 0x00, 0xf0,
+0xdf, 0xf9, 0x28, 0x0a, 0x00, 0xf0, 0xdc, 0xf9, 0x28, 0x0c, 0x00, 0xf0,
+0xd9, 0xf9, 0x28, 0x0e, 0x00, 0xf0, 0xd6, 0xf9, 0x00, 0xf0, 0x5c, 0xfa,
+0x01, 0x36, 0x42, 0x2e, 0xe9, 0xd3, 0x61, 0x02, 0x83, 0x20, 0x00, 0xf0,
+0x0f, 0xf8, 0x00, 0xf0, 0x53, 0xfa, 0xff, 0xf7, 0x93, 0xff, 0x04, 0x48,
+0xc0, 0x6f, 0x40, 0x23, 0x01, 0x68, 0x99, 0x43, 0x01, 0x60, 0xf0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x90, 0xb5, 0x04, 0x1c,
+0x0f, 0x1c, 0x00, 0xf0, 0x59, 0xfa, 0x20, 0x1c, 0x00, 0xf0, 0xb6, 0xf9,
+0x38, 0x0c, 0x00, 0xf0, 0xb3, 0xf9, 0x38, 0x0a, 0x00, 0xf0, 0xb0, 0xf9,
+0x38, 0x1c, 0x00, 0xf0, 0xad, 0xf9, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x00, 0xb5, 0x01, 0x1c, 0x54, 0x20, 0xff, 0xf7, 0xe7, 0xff, 0x00, 0x20,
+0x00, 0xf0, 0xa2, 0xf9, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0x00, 0xf0,
+0x3d, 0xfa, 0x57, 0x20, 0x00, 0xf0, 0x9a, 0xf9, 0x08, 0xbc, 0x18, 0x47,
+0x90, 0xb5, 0x08, 0x4f, 0xfa, 0x6f, 0x20, 0x23, 0x14, 0x68, 0x9c, 0x43,
+0x14, 0x60, 0x23, 0x1c, 0xff, 0xf7, 0x65, 0xff, 0xf8, 0x6f, 0x20, 0x23,
+0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x68, 0x0e, 0x00, 0x80, 0x90, 0xb5, 0x08, 0x4f, 0xfa, 0x6f, 0x20, 0x23,
+0x14, 0x68, 0x9c, 0x43, 0x14, 0x60, 0x23, 0x1c, 0xff, 0xf7, 0x87, 0xff,
+0xf8, 0x6f, 0x20, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0x90, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0xf0, 0xb5, 0x04, 0x1c,
+0x0f, 0x1c, 0x18, 0x4e, 0xf0, 0x6f, 0x20, 0x23, 0x01, 0x68, 0x99, 0x43,
+0x01, 0x60, 0x61, 0x02, 0x53, 0x20, 0xff, 0xf7, 0xa5, 0xff, 0x00, 0xf0,
+0xe9, 0xf9, 0xff, 0xf7, 0x29, 0xff, 0xf8, 0x1d, 0x05, 0x30, 0x01, 0x2c,
+0x03, 0xd1, 0x22, 0x2f, 0x01, 0xd3, 0x00, 0x27, 0x0f, 0xe0, 0x44, 0x1c,
+0xff, 0xf7, 0xaa, 0xff, 0x00, 0xf0, 0xc8, 0xf8, 0x07, 0x1c, 0x00, 0xf0,
+0xd7, 0xf9, 0x20, 0x1c, 0xff, 0xf7, 0xa2, 0xff, 0x00, 0xf0, 0xc0, 0xf8,
+0x05, 0x1c, 0x00, 0xf0, 0xcf, 0xf9, 0xf0, 0x6f, 0x20, 0x23, 0x01, 0x68,
+0x19, 0x43, 0x01, 0x60, 0x28, 0x02, 0x38, 0x43, 0xf0, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80, 0xf0, 0xb5, 0xc2, 0xb0,
+0x14, 0x1c, 0x0d, 0x1c, 0x07, 0x1c, 0x01, 0x2f, 0x2f, 0xd0, 0x79, 0x02,
+0x19, 0x4e, 0xf0, 0x6f, 0x20, 0x23, 0x02, 0x68, 0x9a, 0x43, 0x02, 0x60,
+0x53, 0x20, 0xff, 0xf7, 0x6b, 0xff, 0x00, 0xf0, 0xaf, 0xf9, 0xff, 0xf7,
+0xef, 0xfe, 0x68, 0x46, 0xff, 0xf7, 0xd6, 0xfe, 0x6a, 0x46, 0xe8, 0x1d,
+0x05, 0x30, 0x14, 0x54, 0x21, 0x0a, 0x68, 0x44, 0x41, 0x70, 0x68, 0x46,
+0x00, 0x99, 0x0c, 0x30, 0xff, 0xf7, 0xba, 0xfe, 0x02, 0xab, 0x18, 0x70,
+0x00, 0x20, 0x58, 0x70, 0x68, 0x46, 0x0c, 0x21,
+0xff, 0xf7, 0xb2, 0xfe, 0x02, 0xab, 0x58, 0x70, 0x69, 0x46, 0x38, 0x1c,
+0xff, 0xf7, 0x15, 0xff, 0xf0, 0x6f, 0x20, 0x23, 0x01, 0x68, 0x19, 0x43,
+0x01, 0x60, 0x42, 0xb0, 0xf0, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x68, 0x0e, 0x00, 0x80, 0xff, 0xb5, 0xc2, 0xb0, 0x07, 0x1c, 0x01, 0x2f,
+0x01, 0xd1, 0x01, 0x20, 0x36, 0xe0, 0x6b, 0x46, 0x00, 0x20, 0xc4, 0x43,
+0x10, 0xc3, 0x01, 0x30, 0x42, 0x28, 0xfb, 0xd3, 0x68, 0x46, 0x0c, 0x30,
+0x03, 0x1c, 0x00, 0x24, 0x00, 0x2a, 0x0a, 0xd9, 0x0e, 0x88, 0xc0, 0x46,
+0x06, 0x70, 0x0e, 0x88, 0x36, 0x12, 0x46, 0x70, 0x02, 0x30, 0x02, 0x31,
+0x02, 0x34, 0x94, 0x42, 0xf4, 0xd3, 0x00, 0x92, 0x18, 0x1c, 0x11, 0x1c,
+0xff, 0xf7, 0x7c, 0xfe, 0x04, 0x1c, 0x00, 0x20, 0x01, 0x90, 0x02, 0xab,
+0x1c, 0x70, 0x58, 0x70, 0x9d, 0x70, 0x68, 0x46, 0x0c, 0x21, 0xff, 0xf7,
+0x71, 0xfe, 0x02, 0xab, 0x58, 0x70, 0x45, 0x9b, 0x1d, 0x06, 0x2d, 0x0e,
+0xac, 0x42, 0x03, 0xd1, 0x69, 0x46, 0x38, 0x1c, 0xff, 0xf7, 0x3e, 0xff,
+0x01, 0x20, 0xac, 0x42, 0x00, 0xd1, 0x00, 0x20, 0x46, 0xb0, 0xf0, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0xb0, 0xb5, 0xc2, 0xb0, 0x0f, 0x1c, 0x41, 0x02,
+0x14, 0x4c, 0xe0, 0x6f, 0x20, 0x23, 0x02, 0x68, 0x9a, 0x43, 0x02, 0x60,
+0x53, 0x20, 0xff, 0xf7, 0xef, 0xfe, 0x00, 0xf0, 0x33, 0xf9, 0xff, 0xf7,
+0x73, 0xfe, 0x68, 0x46, 0xff, 0xf7, 0x5a, 0xfe, 0xe0, 0x6f, 0x20, 0x23,
+0x01, 0x68, 0x19, 0x43, 0x02, 0xad, 0x01, 0x60, 0x6d, 0x78, 0x00, 0x24,
+0x02, 0xab, 0x5c, 0x70, 0x68, 0x46, 0x0c, 0x21, 0xff, 0xf7, 0x3c, 0xfe,
+0xa8, 0x42, 0x02, 0xd1, 0x00, 0x98, 0x87, 0x42, 0x01, 0xd3, 0x20, 0x1c,
+0x00, 0xe0, 0x01, 0x20, 0x42, 0xb0, 0xb0, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x68, 0x0e, 0x00, 0x80, 0xfc, 0x46, 0x60, 0x47, 0x00, 0x00, 0xa0, 0xe3,
+0xb4, 0x22, 0x9f, 0xe5, 0xb4, 0x32, 0x9f, 0xe5, 0x01, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x93, 0xe5,
+0x81, 0x03, 0x80, 0xe1, 0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x93, 0xe5, 0x01, 0x03, 0x80, 0xe1,
+0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x93, 0xe5, 0x81, 0x02, 0x80, 0xe1, 0x01, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x93, 0xe5,
+0x01, 0x02, 0x80, 0xe1, 0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x93, 0xe5, 0x81, 0x01, 0x80, 0xe1,
+0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x93, 0xe5, 0x01, 0x01, 0x80, 0xe1, 0x01, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x93, 0xe5,
+0x81, 0x00, 0x80, 0xe1, 0x01, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x93, 0xe5,
+0x01, 0x00, 0x80, 0xe1, 0x1e, 0xff, 0x2f, 0xe1, 0xfc, 0x46, 0x60, 0x47,
+0xa4, 0x21, 0x9f, 0xe5, 0xa8, 0x31, 0x9f, 0xe5, 0xa0, 0x13, 0xa0, 0xe1,
+0x00, 0x10, 0x83, 0xe5, 0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x20, 0x13, 0xa0, 0xe1, 0x00, 0x10, 0x83, 0xe5,
+0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0xa0, 0x12, 0xa0, 0xe1, 0x00, 0x10, 0x83, 0xe5, 0x01, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x20, 0x12, 0xa0, 0xe1,
+0x00, 0x10, 0x83, 0xe5, 0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0xa0, 0x11, 0xa0, 0xe1, 0x00, 0x10, 0x83, 0xe5,
+0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x20, 0x11, 0xa0, 0xe1, 0x00, 0x10, 0x83, 0xe5, 0x01, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5, 0xa0, 0x10, 0xa0, 0xe1,
+0x00, 0x10, 0x83, 0xe5, 0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0xa0, 0xe1, 0x00, 0x10, 0x83, 0xe5,
+0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5, 0x00, 0x10, 0x82, 0xe5,
+0x1e, 0xff, 0x2f, 0xe1, 0xfc, 0x46, 0x60, 0x47, 0xa0, 0x30, 0x9f, 0xe5,
+0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x1e, 0xff, 0x2f, 0xe1, 0xfc, 0x46, 0x60, 0x47, 0x70, 0x30, 0x9f, 0xe5,
+0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x1e, 0xff, 0x2f, 0xe1, 0xfc, 0x46, 0x60, 0x47, 0x34, 0x20, 0x9f, 0xe5,
+0x3c, 0x30, 0x9f, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x82, 0xe5,
+0x00, 0x10, 0x82, 0xe5, 0x01, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x83, 0xe5,
+0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5, 0x00, 0x10, 0x83, 0xe5,
+0x00, 0x10, 0x83, 0xe5, 0x1e, 0xff, 0x2f, 0xe1, 0xf8, 0x00, 0x18, 0x40,
+0x04, 0x01, 0x18, 0x40, 0x00, 0x01, 0x18, 0x40, 0xfc, 0x00, 0x18, 0x40,
+0x80, 0xb5, 0x00, 0xf0, 0x0c, 0xf8, 0x00, 0x27, 0x38, 0x1c, 0x00, 0xf0,
+0x47, 0xf8, 0x78, 0x1c, 0x07, 0x04, 0x3f, 0x0c, 0x0c, 0x2f, 0xf7, 0xdd,
+0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x1d, 0x48,
+0x02, 0x68, 0x1d, 0x49, 0x8b, 0x69, 0xd2, 0x18, 0x02, 0x60, 0x02, 0x66,
+0x8a, 0x6a, 0x43, 0x68, 0x9b, 0x18, 0x43, 0x60, 0x93, 0x42, 0x02, 0xd2,
+0x82, 0x68, 0x01, 0x32, 0x82, 0x60, 0xc2, 0x68, 0x0b, 0x6a, 0xd2, 0x18,
+0xc2, 0x60, 0x42, 0x69, 0xcb, 0x68, 0xd2, 0x18, 0x42, 0x61, 0xc2, 0x69,
+0x8b, 0x68, 0xd2, 0x18, 0xc2, 0x61, 0x02, 0x69, 0x0b, 0x69, 0xd2, 0x18,
+0x02, 0x61, 0x82, 0x69, 0x0b, 0x68, 0xd2, 0x18, 0x82, 0x61, 0x02, 0x6b,
+0xcb, 0x69, 0xd2, 0x18, 0x02, 0x63, 0x4a, 0x6a, 0x43, 0x6b, 0x9b, 0x18,
+0x43, 0x63, 0x93, 0x42, 0x02, 0xd2, 0x82, 0x6b, 0x01, 0x32, 0x82, 0x63,
+0xc2, 0x6b, 0x4b, 0x69, 0xd2, 0x18, 0xc2, 0x63, 0x02, 0x6c, 0xc9, 0x6a,
+0x51, 0x18, 0x01, 0x64, 0x70, 0x47, 0x00, 0x00, 0xa4, 0x2a, 0x00, 0x80,
+0x00, 0x08, 0x14, 0x40, 0x88, 0xb5, 0x69, 0x46, 0x00, 0xf0, 0x17, 0xf8,
+0x81, 0x08, 0x0a, 0xd0, 0x00, 0x20, 0x00, 0x29, 0x07, 0xd9, 0x00, 0x22,
+0x83, 0x00, 0x00, 0x9f, 0xc0, 0x46, 0xfa, 0x50, 0x01, 0x30, 0x88, 0x42,
+0xf8, 0xd3, 0x88, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0xb5, 0x00, 0xf0,
+0x04, 0xf8, 0x00, 0x04, 0x00, 0x0c, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x22,
+0x00, 0x28, 0x0a, 0xd0, 0x01, 0x28, 0x0a, 0xd0, 0x02, 0x28, 0x0c, 0xd0,
+0x03, 0x28, 0x02, 0xd1, 0x07, 0x48, 0x1c, 0x22, 0x08, 0x60, 0x10, 0x1c,
+0x70, 0x47, 0x06, 0x48, 0x04, 0xe0, 0x06, 0x48, 0x50, 0x22, 0x08, 0x60,
+0xf7, 0xe7, 0x05, 0x48, 0x68, 0x22, 0x08, 0x60, 0xf3, 0xe7, 0x00, 0x00,
+0x08, 0x83, 0x20, 0x40, 0xa4, 0x2a, 0x00, 0x80, 0x0c, 0x2b, 0x00, 0x80,
+0xa0, 0x82, 0x20, 0x40, 0x80, 0xb4, 0x03, 0x22, 0xc2, 0x80, 0x15, 0x4a,
+0xc0, 0x46, 0x82, 0x60, 0x14, 0x4a, 0x12, 0x88, 0x01, 0x32, 0xc2, 0x60,
+0x00, 0x20, 0x13, 0x4a, 0x13, 0x5c, 0xc0, 0x46, 0x0b, 0x70, 0x01, 0x30,
+0x01, 0x31, 0x08, 0x28, 0xf8, 0xd3, 0x20, 0x22, 0x0a, 0x70, 0x01, 0x31,
+0x00, 0x20, 0x0e, 0x4b, 0x1f, 0x5c, 0xc0, 0x46, 0x0f, 0x70, 0x01, 0x30,
+0x01, 0x31, 0x08, 0x28, 0xf8, 0xd3, 0x0a, 0x70, 0x01, 0x31, 0x00, 0x20,
+0x09, 0x4a, 0x13, 0x5c, 0xc0, 0x46, 0x0b, 0x70, 0x01, 0x30, 0x01, 0x31,
+0x08, 0x28, 0xf8, 0xd3, 0x00, 0x20, 0x08, 0x70, 0x80, 0xbc, 0x70, 0x47,
+0x08, 0x10, 0x00, 0x03, 0x68, 0x0e, 0x00, 0x80, 0x7c, 0x04, 0x00, 0x80,
+0x85, 0x04, 0x00, 0x80, 0x8e, 0x04, 0x00, 0x80, 0x00, 0xb5, 0x01, 0x23,
+0x0a, 0x48, 0xc1, 0x1d, 0x89, 0x31, 0x4b, 0x70, 0x00, 0x22, 0x0a, 0x70,
+0x64, 0x21, 0x80, 0x30, 0xc1, 0x82, 0x01, 0x83, 0x43, 0x83, 0x7d, 0x21,
+0xc9, 0x00, 0x81, 0x83, 0xc2, 0x83, 0x04, 0x48, 0x01, 0x22, 0x00, 0x21,
+0x00, 0xf0, 0x8e, 0xfb, 0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0xb5, 0x22, 0xff, 0xff, 0x00, 0xb5, 0xff, 0xf7, 0xe1, 0xff, 0x13, 0x48,
+0x02, 0x22, 0x00, 0x21, 0x00, 0xf0, 0x80, 0xfb, 0x01, 0x23, 0xd8, 0x42,
+0x0a, 0xd1, 0x10, 0x48, 0xc1, 0x1d, 0x39, 0x31, 0xca, 0x88, 0x01, 0x32,
+0xca, 0x80, 0x81, 0x79, 0x01, 0x31, 0x81, 0x71, 0xfd, 0xf7, 0x70, 0xf9,
+0x0b, 0x48, 0xc0, 0x68, 0x01, 0x28, 0x05, 0xd1, 0x0a, 0x48, 0x7d, 0x22,
+0xd2, 0x00, 0x00, 0x21, 0x00, 0xf0, 0x68, 0xfb, 0x08, 0x48, 0xfb, 0xf7,
+0xe1, 0xfc, 0x08, 0x48, 0x28, 0x22, 0x00, 0x21, 0x00, 0xf0, 0x60, 0xfb,
+0x08, 0xbc, 0x18, 0x47, 0x79, 0x21, 0xff, 0xff, 0xa0, 0x82, 0x20, 0x40,
+0x68, 0x0e, 0x00, 0x80, 0xa5, 0x7b, 0x21, 0x40,
+0x95, 0x2c, 0xff, 0xff, 0x59, 0x03, 0xff, 0xff, 0x00, 0xb5, 0x10, 0x20,
+0x0f, 0x49, 0xc0, 0x46, 0x08, 0x60, 0x0f, 0x4a, 0x0f, 0x48, 0x64, 0x21,
+0xfb, 0xf7, 0xc6, 0xfc, 0x0e, 0x48, 0x01, 0x22, 0x12, 0x04, 0x01, 0x68,
+0x0a, 0x40, 0x08, 0x21, 0x00, 0x2a, 0x05, 0xd1, 0x02, 0x68, 0x12, 0x0c,
+0x07, 0xd1, 0x00, 0x68, 0x80, 0x0a, 0x04, 0xd3, 0x08, 0x48, 0xc0, 0x46,
+0xc1, 0x60, 0x08, 0xbc, 0x18, 0x47, 0x07, 0x48, 0xc0, 0x46, 0x01, 0x64,
+0xf9, 0xe7, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0xa5, 0x55, 0xff, 0xff,
+0x7c, 0x29, 0x00, 0x80, 0x00, 0x00, 0x10, 0x40, 0x40, 0x01, 0x18, 0x00,
+0x00, 0x00, 0x00, 0x80, 0xf8, 0xb5, 0x27, 0x48, 0x01, 0x22, 0x12, 0x04,
+0x01, 0x68, 0x0a, 0x40, 0x07, 0x21, 0x00, 0x2a, 0x05, 0xd1, 0x02, 0x68,
+0x12, 0x0c, 0x06, 0xd1, 0x00, 0x68, 0x80, 0x0a, 0x03, 0xd3, 0x21, 0x48,
+0xc0, 0x46, 0xc1, 0x60, 0x02, 0xe0, 0x20, 0x48, 0xc0, 0x46, 0x01, 0x64,
+0x1f, 0x48, 0xfb, 0xf7, 0x87, 0xfc, 0x1f, 0x48, 0xc1, 0x6b, 0xff, 0x29,
+0xfc, 0xd1, 0x81, 0x6b, 0x42, 0x6b, 0x16, 0x1c, 0x0f, 0x1c, 0x1c, 0x4c,
+0x10, 0x23, 0x60, 0x69, 0x18, 0x43, 0x60, 0x61, 0xa1, 0x69, 0x99, 0x43,
+0x1d, 0x04, 0xa1, 0x61, 0xe8, 0x60, 0xa0, 0x69, 0xc0, 0x46, 0x28, 0x61,
+0x16, 0x4a, 0x17, 0x49, 0x64, 0x20, 0xfb, 0xf7, 0x6f, 0xfc, 0x16, 0x4a,
+0xc0, 0x46, 0x00, 0x92, 0x15, 0x4b, 0x00, 0x20, 0x39, 0x1c, 0x32, 0x1c,
+0xfb, 0xf7, 0x6e, 0xfc, 0x13, 0x48, 0xc1, 0x68, 0x08, 0x29, 0xfc, 0xd1,
+0x12, 0x48, 0xfb, 0xf7, 0x5d, 0xfc, 0x10, 0x23, 0x60, 0x69, 0x98, 0x43,
+0x60, 0x61, 0xe8, 0x60, 0x01, 0x20, 0xe3, 0x23, 0x1b, 0x01, 0xe1, 0x18,
+0xc8, 0x71, 0xf8, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00, 0x10, 0x40,
+0x40, 0x01, 0x18, 0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x02, 0xff, 0xff,
+0x00, 0x01, 0x18, 0x40, 0x68, 0x0e, 0x00, 0x80, 0x20, 0x55, 0xff, 0xff,
+0xb5, 0xb6, 0x21, 0x40, 0x64, 0x00, 0x30, 0x02, 0x44, 0x80, 0x20, 0x40,
+0x40, 0x01, 0x18, 0x40, 0xf4, 0x01, 0xff, 0xff, 0x00, 0xb5, 0xfd, 0xf7,
+0x01, 0xff, 0x06, 0x48, 0xfb, 0xf7, 0x32, 0xfc, 0xfd, 0xf7, 0xd6, 0xfe,
+0xfe, 0xf7, 0x04, 0xf8, 0xfe, 0xf7, 0x16, 0xf8, 0xfe, 0xf7, 0x24, 0xf8,
+0x08, 0xbc, 0x18, 0x47, 0x91, 0x03, 0xff, 0xff, 0x90, 0xb5, 0xfd, 0xf7,
+0x6b, 0xfc, 0x34, 0x4f, 0x00, 0x24, 0xf9, 0x68, 0xf8, 0x1d, 0x79, 0x30,
+0x01, 0x29, 0x0f, 0xd1, 0x31, 0x49, 0xc0, 0x46, 0xf9, 0x67, 0x31, 0x49,
+0xc0, 0x46, 0x01, 0x60, 0x30, 0x49, 0xc0, 0x46, 0x0c, 0x60, 0x4c, 0x60,
+0x8c, 0x60, 0xcc, 0x60, 0x0c, 0x61, 0x4c, 0x61, 0x8c, 0x61, 0x04, 0xe0,
+0xf9, 0x1d, 0x7d, 0x31, 0xf9, 0x67, 0x12, 0xc0, 0x08, 0x38, 0x00, 0x68,
+0x60, 0x23, 0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0xf8, 0x6f, 0x20, 0x23,
+0x01, 0x68, 0x19, 0x43, 0x01, 0x60, 0xf8, 0x6f, 0x40, 0x23, 0x01, 0x68,
+0x99, 0x43, 0x01, 0x60, 0x00, 0xf0, 0x54, 0xf8, 0xfd, 0xf7, 0x4e, 0xfc,
+0x00, 0xf0, 0x5e, 0xf9, 0xfd, 0xf7, 0x73, 0xf8, 0xff, 0xf7, 0x0c, 0xfe,
+0xfd, 0xf7, 0x2e, 0xfe, 0xfd, 0xf7, 0xb6, 0xfd, 0xfd, 0xf7, 0xc2, 0xfe,
+0xfd, 0xf7, 0x54, 0xfd, 0xfd, 0xf7, 0x0a, 0xfd, 0xfd, 0xf7, 0x94, 0xfd,
+0x00, 0xf0, 0x1a, 0xfa, 0xfd, 0xf7, 0x9c, 0xff, 0xfd, 0xf7, 0x0a, 0xff,
+0xfd, 0xf7, 0xd2, 0xfe, 0xfd, 0xf7, 0x3c, 0xfc, 0xfb, 0xf7, 0xdc, 0xfa,
+0xff, 0xf7, 0x9c, 0xff, 0x71, 0x23, 0x5b, 0x01,
+0xf8, 0x18, 0x04, 0x72, 0x44, 0x72, 0x07, 0x23, 0x5b, 0x02, 0xf8, 0x18,
+0x04, 0x63, 0xf8, 0x68, 0x01, 0x28, 0x02, 0xd1, 0xa8, 0x20, 0xfe, 0xf7,
+0xb1, 0xfd, 0x09, 0x48, 0xc0, 0x46, 0x44, 0x62, 0x00, 0xf0, 0x18, 0xfa,
+0x07, 0x48, 0xfb, 0xf7, 0xbd, 0xfb, 0x90, 0xbc, 0x08, 0xbc, 0x18, 0x47,
+0x68, 0x0e, 0x00, 0x80, 0x00, 0x01, 0x11, 0x40, 0x04, 0x01, 0x11, 0x40,
+0x00, 0x01, 0x11, 0x00, 0xc0, 0x00, 0x18, 0x00, 0x15, 0x8f, 0x21, 0x40,
+0x00, 0xb5, 0x04, 0x48, 0xfb, 0xf7, 0xaa, 0xfb, 0xfd, 0xf7, 0x5e, 0xff,
+0xfd, 0xf7, 0x24, 0xfc, 0x08, 0xbc, 0x18, 0x47, 0x15, 0x99, 0x21, 0x40,
+0xfa, 0x21, 0x03, 0x48, 0xc0, 0x46, 0x41, 0x62, 0x40, 0x21, 0x41, 0x62,
+0x70, 0x47, 0x00, 0x00, 0xc0, 0x00, 0x18, 0x00, 0x07, 0x48, 0x41, 0x69,
+0x07, 0x4b, 0x19, 0x43, 0x41, 0x61, 0x82, 0x69, 0x9a, 0x43, 0x82, 0x61,
+0x01, 0x22, 0x12, 0x05, 0xd1, 0x60, 0x80, 0x69, 0xc0, 0x46, 0x10, 0x61,
+0x70, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80, 0xfe, 0xaf, 0x9a, 0x10,
+0x00, 0xb5, 0x02, 0x48, 0xfb, 0xf7, 0x80, 0xfb, 0x08, 0xbc, 0x18, 0x47,
+0xc8, 0x57, 0xff, 0xff, 0xf0, 0xb5, 0x24, 0x4c, 0x01, 0x21, 0x09, 0x04,
+0x20, 0x68, 0x01, 0x40, 0x09, 0x20, 0x22, 0x4e, 0x22, 0x4d, 0x00, 0x29,
+0x05, 0xd1, 0x21, 0x68, 0x09, 0x0c, 0x04, 0xd1, 0x21, 0x68, 0x89, 0x0a,
+0x01, 0xd3, 0xf0, 0x60, 0x00, 0xe0, 0x28, 0x64, 0x1d, 0x48, 0xfb, 0xf7,
+0x65, 0xfb, 0x1d, 0x4f, 0x1d, 0x49, 0x88, 0x69, 0x01, 0x30, 0x88, 0x61,
+0x38, 0x7a, 0x00, 0x28, 0x02, 0xd1, 0x78, 0x7a, 0x00, 0x28, 0x1f, 0xd0,
+0x19, 0x48, 0xfb, 0xf7, 0x57, 0xfb, 0x19, 0x48, 0xfb, 0xf7, 0x54, 0xfb,
+0x00, 0x28, 0xfa, 0xd1, 0x38, 0x7a, 0x00, 0x28, 0x02, 0xd0, 0x16, 0x48,
+0xfb, 0xf7, 0x4c, 0xfb, 0x01, 0x21, 0x09, 0x04, 0x20, 0x68, 0x01, 0x40,
+0x14, 0x20, 0x00, 0x29, 0x05, 0xd1, 0x21, 0x68, 0x09, 0x0c, 0x04, 0xd1,
+0x21, 0x68, 0x89, 0x0a, 0x01, 0xd3, 0xf0, 0x60, 0x01, 0xe0, 0x28, 0x64,
+0xff, 0xe7, 0xfe, 0xe7, 0xff, 0xf7, 0x65, 0xfd, 0x0b, 0x48, 0xfb, 0xf7,
+0x35, 0xfb, 0xff, 0xf7, 0xaf, 0xff, 0xcd, 0xe7, 0x00, 0x00, 0x10, 0x40,
+0x40, 0x01, 0x18, 0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x02, 0xff, 0xff,
+0x88, 0x1c, 0x00, 0x80, 0x08, 0x83, 0x20, 0x40, 0xf4, 0x01, 0xff, 0xff,
+0xb5, 0x07, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x99, 0x9f, 0x21, 0x40,
+0x00, 0x20, 0x07, 0x4a, 0x01, 0x21, 0x09, 0x05, 0x50, 0x61, 0xc8, 0x60,
+0xd0, 0x61, 0xc8, 0x61, 0x03, 0x23, 0xdb, 0x04, 0x03, 0x4a, 0x01, 0x21,
+0xd1, 0x63, 0x58, 0x60, 0xfc, 0xe7, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0xc0, 0x00, 0x18, 0x00, 0x80, 0xb5, 0xc0, 0xb0, 0x01, 0x22, 0x00, 0x21,
+0x0a, 0x20, 0xfc, 0xf7, 0xd1, 0xff, 0x07, 0x1c, 0xff, 0x2f, 0x28, 0xd0,
+0x69, 0x46, 0xff, 0x22, 0x38, 0x1c, 0x01, 0x32, 0xfd, 0xf7, 0x54, 0xf9,
+0xff, 0x23, 0x01, 0x33, 0x98, 0x42, 0x1b, 0xd1, 0x0d, 0x98, 0x00, 0x09,
+0x18, 0xd3, 0x38, 0x1c, 0xfd, 0xf7, 0x8d, 0xf8, 0x0e, 0x49, 0x01, 0x22,
+0x12, 0x04, 0x08, 0x68, 0x02, 0x40, 0x0d, 0x48, 0x05, 0xd1, 0x0a, 0x68,
+0x12, 0x0c, 0x06, 0xd1, 0x09, 0x68, 0x89, 0x0a, 0x03, 0xd3, 0x0a, 0x49,
+0xc0, 0x46, 0xc8, 0x60, 0x02, 0xe0, 0x09, 0x49, 0xc0, 0x46, 0x08, 0x64,
+0xff, 0xf7, 0xbc, 0xff, 0x38, 0x1c, 0xfd, 0xf7, 0x74, 0xf8, 0x40, 0xb0,
+0x80, 0xbc, 0x08, 0xbc, 0x18, 0x47, 0x00, 0x00,
+0x00, 0x00, 0x10, 0x40, 0x07, 0x80, 0x00, 0x00, 0x40, 0x01, 0x18, 0x00,
+0x00, 0x00, 0x00, 0x80, 0x00, 0xb5, 0x17, 0x49, 0x01, 0x22, 0x12, 0x04,
+0x08, 0x68, 0x02, 0x40, 0x06, 0x20, 0x00, 0x2a, 0x05, 0xd1, 0x0a, 0x68,
+0x12, 0x0c, 0x06, 0xd1, 0x09, 0x68, 0x89, 0x0a, 0x03, 0xd3, 0x11, 0x49,
+0xc0, 0x46, 0xc8, 0x60, 0x02, 0xe0, 0x10, 0x49, 0xc0, 0x46, 0x08, 0x64,
+0x03, 0x20, 0xfe, 0xf7, 0xd3, 0xfc, 0xfb, 0xf7, 0x0d, 0xff, 0x01, 0x23,
+0x18, 0x43, 0xfb, 0xf7, 0xe7, 0xff, 0xff, 0xf7, 0x83, 0xfe, 0xff, 0xf7,
+0x9d, 0xff, 0xff, 0xf7, 0x05, 0xfe, 0xff, 0xf7, 0xf5, 0xfe, 0xff, 0xf7,
+0x09, 0xff, 0xff, 0xf7, 0x9b, 0xfd, 0xff, 0xf7, 0x21, 0xff, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x40, 0x01, 0x18, 0x00,
+0x00, 0x00, 0x00, 0x80, 0xf0, 0xb4, 0x46, 0x4a, 0x01, 0x21, 0xc9, 0x03,
+0x45, 0x4d, 0x19, 0x23, 0xdb, 0x01, 0xec, 0x18, 0xa1, 0x61, 0x28, 0x88,
+0x40, 0x04, 0x43, 0x4b, 0xc0, 0x18, 0x87, 0x1a, 0x04, 0x20, 0xaf, 0x60,
+0x41, 0x4e, 0xc0, 0x46, 0xb0, 0x61, 0x08, 0x20, 0xc8, 0x23, 0x43, 0x43,
+0xbb, 0x42, 0x21, 0xd9, 0x41, 0x00, 0x3d, 0x4e, 0xc0, 0x46, 0x31, 0x61,
+0xb6, 0x69, 0x20, 0x23, 0x9b, 0x1b, 0x3a, 0x4e, 0xc0, 0x46, 0xf3, 0x61,
+0x10, 0x3b, 0x33, 0x62, 0x8b, 0x00, 0xff, 0x1a, 0x40, 0x08, 0x81, 0x42,
+0x17, 0xd3, 0xb8, 0x23, 0x43, 0x43, 0xbb, 0x42, 0x08, 0xd9, 0x41, 0x1e,
+0x32, 0x4b, 0xc0, 0x46, 0x99, 0x81, 0xd9, 0x81, 0x40, 0x00, 0x02, 0x38,
+0x58, 0x61, 0x0a, 0xe0, 0x01, 0x30, 0x81, 0x42, 0xef, 0xd2, 0x06, 0xe0,
+0x2c, 0x4e, 0xb3, 0x69, 0x01, 0x33, 0xb3, 0x61, 0x40, 0x00, 0x88, 0x42,
+0xd2, 0xd9, 0x2a, 0x49, 0x00, 0x20, 0xa3, 0x69, 0x9b, 0x08, 0x07, 0xd0,
+0x28, 0x4b, 0x87, 0x00, 0xcb, 0x51, 0xa7, 0x69, 0xbf, 0x08, 0x01, 0x30,
+0x87, 0x42, 0xf8, 0xd8, 0x22, 0x49, 0xc0, 0x46, 0x8a, 0x62, 0x8c, 0x89,
+0x58, 0x20, 0x60, 0x43, 0x87, 0x18, 0x00, 0x20, 0x00, 0x22, 0x00, 0x2c,
+0x0a, 0xdd, 0x58, 0x23, 0x43, 0x43, 0x8c, 0x6a, 0xe3, 0x18, 0x01, 0x30,
+0x00, 0x04, 0x00, 0x0c, 0x9a, 0x60, 0x8b, 0x89, 0x83, 0x42, 0xf4, 0xdc,
+0xcf, 0x62, 0xcc, 0x89, 0x60, 0x00, 0x00, 0x19, 0x40, 0x01, 0xc7, 0x19,
+0x00, 0x20, 0x00, 0x2c, 0x0b, 0xdd, 0x43, 0x00, 0x1b, 0x18, 0x5b, 0x01,
+0xcc, 0x6a, 0xe3, 0x18, 0x01, 0x30, 0x00, 0x04, 0x00, 0x0c, 0x9a, 0x60,
+0xcb, 0x89, 0x83, 0x42, 0xf3, 0xdc, 0x4f, 0x62, 0x00, 0x20, 0x0b, 0x69,
+0x00, 0x2b, 0x07, 0xd9, 0x87, 0x00, 0x4b, 0x6a, 0xc0, 0x46, 0xda, 0x51,
+0x0b, 0x69, 0x01, 0x30, 0x83, 0x42, 0xf7, 0xd8, 0x49, 0x6a, 0x80, 0x00,
+0x08, 0x18, 0x04, 0x38, 0x28, 0x61, 0xf0, 0xbc, 0x70, 0x47, 0x00, 0x00,
+0xb0, 0xbe, 0x21, 0x40, 0x68, 0x0e, 0x00, 0x80, 0x00, 0x00, 0x20, 0x40,
+0x4c, 0x2a, 0x00, 0x80, 0x00, 0x00, 0x20, 0x40, 0x00, 0xad, 0xde, 0x00,
+0x0a, 0x48, 0x01, 0x23, 0x1b, 0x06, 0x41, 0x69, 0x99, 0x43, 0x1a, 0x09,
+0x41, 0x61, 0xd1, 0x60, 0x00, 0x21, 0xa1, 0x22, 0x52, 0x03, 0x91, 0x61,
+0x1b, 0x23, 0xdb, 0x01, 0xc0, 0x18, 0x81, 0x61, 0x01, 0x20, 0x00, 0x06,
+0x59, 0x05, 0x08, 0x60, 0x70, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x80, 0xb4, 0x02, 0x1c, 0x0b, 0x48, 0x1b, 0x23, 0xdb, 0x01, 0xc3, 0x18,
+0x9a, 0x61, 0x01, 0x23, 0x1b, 0x06, 0x42, 0x69, 0x1a, 0x43, 0x42, 0x61,
+0x87, 0x69, 0x9f, 0x43, 0x01, 0x23, 0x1b, 0x05,
+0x87, 0x61, 0xda, 0x60, 0x80, 0x69, 0xc0, 0x46, 0x18, 0x61, 0xa1, 0x20,
+0x40, 0x03, 0x81, 0x61, 0x80, 0xbc, 0x70, 0x47, 0x68, 0x0e, 0x00, 0x80,
+0x80, 0xb5, 0xff, 0xf7, 0xc9, 0xff, 0x00, 0x20, 0x00, 0xf0, 0x20, 0xf8,
+0x00, 0x20, 0x09, 0x49, 0x00, 0x22, 0x03, 0x01, 0x5f, 0x18, 0x33, 0x23,
+0x9b, 0x01, 0xfb, 0x18, 0x9a, 0x62, 0x01, 0x30, 0x0b, 0x28, 0xf6, 0xd3,
+0x04, 0x48, 0x01, 0x22, 0x00, 0x21, 0x00, 0xf0, 0x33, 0xf8, 0x80, 0xbc,
+0x08, 0xbc, 0x18, 0x47, 0x68, 0x0e, 0x00, 0x80, 0x1d, 0x3e, 0xff, 0xff,
+0x00, 0xb5, 0x02, 0x48, 0x00, 0xf0, 0x04, 0xf8, 0x08, 0xbc, 0x18, 0x47,
+0xa8, 0x61, 0x00, 0x00, 0x80, 0xb4, 0x01, 0x22, 0x12, 0x05, 0x0f, 0x4b,
+0xa1, 0x21, 0x49, 0x03, 0x00, 0x28, 0x0e, 0xd0, 0xc8, 0x61, 0x18, 0x1c,
+0x59, 0x69, 0x53, 0x01, 0x19, 0x43, 0x41, 0x61, 0x87, 0x69, 0x9f, 0x43,
+0x87, 0x61, 0xd1, 0x60, 0x80, 0x69, 0xc0, 0x46, 0x10, 0x61, 0x80, 0xbc,
+0x70, 0x47, 0x18, 0x1c, 0x5f, 0x69, 0x01, 0x23, 0x5b, 0x06, 0x9f, 0x43,
+0x47, 0x61, 0xd7, 0x60, 0x00, 0x20, 0xc8, 0x61, 0xf3, 0xe7, 0x00, 0x00,
+0x68, 0x0e, 0x00, 0x80, 0xb0, 0xb4, 0x07, 0x1c, 0x00, 0x20, 0x17, 0x4c,
+0x03, 0x01, 0x1d, 0x19, 0x33, 0x23, 0x9b, 0x01, 0xeb, 0x18, 0x9d, 0x6a,
+0xbd, 0x42, 0x05, 0xd1, 0x1d, 0x6b, 0x95, 0x42, 0x02, 0xd1, 0xdb, 0x6a,
+0x8b, 0x42, 0x1c, 0xd0, 0x01, 0x30, 0x0b, 0x28, 0xee, 0xd3, 0x00, 0x20,
+0x03, 0x01, 0x1d, 0x19, 0x33, 0x23, 0x9b, 0x01, 0xeb, 0x18, 0x9b, 0x6a,
+0x00, 0x2b, 0x09, 0xd1, 0x03, 0x01, 0x1c, 0x19, 0x33, 0x23, 0x9b, 0x01,
+0xe3, 0x18, 0x1a, 0x63, 0xd9, 0x62, 0x5a, 0x63, 0x9f, 0x62, 0x02, 0xe0,
+0x01, 0x30, 0x0b, 0x28, 0xea, 0xd3, 0x0b, 0x28, 0x01, 0xd1, 0x00, 0x20,
+0xc0, 0x43, 0xb0, 0xbc, 0x70, 0x47, 0x00, 0x00, 0x68, 0x0e, 0x00, 0x80,
+0x90, 0xb4, 0x01, 0x1c, 0x00, 0x22, 0x01, 0x20, 0x16, 0x4f, 0x01, 0xe0,
+0x00, 0x2a, 0x07, 0xd1, 0x03, 0x01, 0xdc, 0x19, 0x33, 0x23, 0x9b, 0x01,
+0xe3, 0x18, 0x9b, 0x69, 0x8b, 0x42, 0x11, 0xd1, 0x02, 0x01, 0xd2, 0x19,
+0x33, 0x23, 0x9b, 0x01, 0xd2, 0x18, 0x93, 0x6a, 0xc0, 0x46, 0x93, 0x61,
+0xd3, 0x6a, 0xc0, 0x46, 0xd3, 0x61, 0x13, 0x6b, 0xc0, 0x46, 0x13, 0x62,
+0x53, 0x6b, 0xc0, 0x46, 0x53, 0x62, 0x01, 0x22, 0x01, 0x30, 0x0b, 0x28,
+0xe0, 0xd3, 0x07, 0x4b, 0x00, 0x2a, 0x02, 0xd1, 0x9a, 0x68, 0x8a, 0x42,
+0x03, 0xd1, 0x00, 0x21, 0x99, 0x60, 0x90, 0xbc, 0x70, 0x47, 0x00, 0x20,
+0xc0, 0x43, 0xfa, 0xe7, 0x68, 0x0e, 0x00, 0x80, 0xe8, 0x1b, 0x00, 0x80,
+0x0b, 0x28, 0x17, 0xda, 0x0c, 0x49, 0x01, 0x23, 0x5b, 0x06, 0x8a, 0x69,
+0x13, 0x43, 0x01, 0x22, 0x12, 0x05, 0x8b, 0x61, 0x13, 0x61, 0x00, 0x01,
+0x40, 0x18, 0x33, 0x23, 0x9b, 0x01, 0xc0, 0x18, 0x03, 0x6b, 0xc0, 0x46,
+0x43, 0x63, 0x53, 0x01, 0x88, 0x69, 0x98, 0x43, 0x88, 0x61, 0x10, 0x61,
+0x01, 0x20, 0x70, 0x47, 0x00, 0x20, 0xfc, 0xe7, 0x68, 0x0e, 0x00, 0x80,
+0x90, 0xb4, 0x08, 0x4a, 0xd0, 0x69, 0x00, 0x21, 0x07, 0x4f, 0xd3, 0x69,
+0x83, 0x42, 0x02, 0xd9, 0xfc, 0x1a, 0x20, 0x18, 0x00, 0xe0, 0xc0, 0x1a,
+0x09, 0x18, 0x18, 0x1c, 0xb9, 0x42, 0xf4, 0xd9, 0x90, 0xbc, 0x70, 0x47,
+0x00, 0x20, 0x14, 0x40, 0xa8, 0x61, 0x00, 0x00, 0x90, 0xb5, 0x07, 0x1c,
+0x00, 0x24, 0x00, 0x2f, 0x04, 0xd3, 0xff, 0xf7, 0xe3, 0xff, 0x01, 0x34,
+0xbc, 0x42, 0xfa, 0xd9, 0x90, 0xbc, 0x08, 0xbc,
+0x18, 0x47, 0x00, 0x00,
+};
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
new file mode 100644
index 000000000000..b0d337f7f545
--- /dev/null
+++ b/drivers/net/typhoon.c
@@ -0,0 +1,2673 @@
+/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
+/*
+ Written 2002-2004 by David Dillow <dave@thedillows.org>
+ Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
+ Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This software is available on a public web site. It may enable
+ cryptographic capabilities of the 3Com hardware, and may be
+ exported from the United States under License Exception "TSU"
+ pursuant to 15 C.F.R. Section 740.13(e).
+
+ This work was funded by the National Library of Medicine under
+ the Department of Energy project number 0274DD06D1 and NLM project
+ number Y1-LM-2015-01.
+
+ This driver is designed for the 3Com 3CR990 Family of cards with the
+ 3XP Processor. It has been tested on x86 and sparc64.
+
+ KNOWN ISSUES:
+ *) The current firmware always strips the VLAN tag off, even if
+ we tell it not to. You should filter VLANs at the switch
+ as a workaround (good practice in any event) until we can
+ get this fixed.
+ *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
+ issue. Hopefully 3Com will fix it.
+ *) Waiting for a command response takes 8ms due to non-preemptable
+ polling. Only significant for getting stats and creating
+ SAs, but an ugly wart never the less.
+
+ TODO:
+ *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
+ *) Add more support for ethtool (especially for NIC stats)
+ *) Allow disabling of RX checksum offloading
+ *) Fix MAC changing to work while the interface is up
+ (Need to put commands on the TX ring, which changes
+ the locking)
+ *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
+ http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
+*/
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ * Setting to > 1518 effectively disables this feature.
+ */
+static int rx_copybreak = 200;
+
+/* Should we use MMIO or Port IO?
+ * 0: Port IO
+ * 1: MMIO
+ * 2: Try MMIO, fallback to Port IO
+ */
+static unsigned int use_mmio = 2;
+
+/* end user-configurable values */
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ */
+static const int multicast_filter_limit = 32;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ * Making the Tx ring too large decreases the effectiveness of channel
+ * bonding and packet priority.
+ * There are no ill effects from too-large receive rings.
+ *
+ * We don't currently use the Hi Tx ring so, don't make it very big.
+ *
+ * Beware that if we start using the Hi Tx ring, we will need to change
+ * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
+ */
+#define TXHI_ENTRIES 2
+#define TXLO_ENTRIES 128
+#define RX_ENTRIES 32
+#define COMMAND_ENTRIES 16
+#define RESPONSE_ENTRIES 32
+
+#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
+#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
+
+/* The 3XP will preload and remove 64 entries from the free buffer
+ * list, and we need one entry to keep the ring from wrapping, so
+ * to keep this a power of two, we use 128 entries.
+ */
+#define RXFREE_ENTRIES 128
+#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536
+
+#define DRV_MODULE_NAME "typhoon"
+#define DRV_MODULE_VERSION "1.5.7"
+#define DRV_MODULE_RELDATE "05/01/07"
+#define PFX DRV_MODULE_NAME ": "
+#define ERR_PFX KERN_ERR PFX
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/in6.h>
+#include <asm/checksum.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+
+#include "typhoon.h"
+#include "typhoon-firmware.h"
+
+static char version[] __devinitdata =
+ "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
+MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
+ "the buffer given back to the NIC. Default "
+ "is 200.");
+MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
+ "Default is to try MMIO and fallback to PIO.");
+module_param(rx_copybreak, int, 0);
+module_param(use_mmio, int, 0);
+
+#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
+#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
+#undef NETIF_F_TSO
+#endif
+
+#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
+#error TX ring too small!
+#endif
+
+struct typhoon_card_info {
+ char *name;
+ int capabilities;
+};
+
+#define TYPHOON_CRYPTO_NONE 0x00
+#define TYPHOON_CRYPTO_DES 0x01
+#define TYPHOON_CRYPTO_3DES 0x02
+#define TYPHOON_CRYPTO_VARIABLE 0x04
+#define TYPHOON_FIBER 0x08
+#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
+
+enum typhoon_cards {
+ TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
+ TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
+ TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
+ TYPHOON_FXM,
+};
+
+/* directly indexed by enum typhoon_cards, above */
+static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
+ { "3Com Typhoon (3C990-TX)",
+ TYPHOON_CRYPTO_NONE},
+ { "3Com Typhoon (3CR990-TX-95)",
+ TYPHOON_CRYPTO_DES},
+ { "3Com Typhoon (3CR990-TX-97)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
+ { "3Com Typhoon (3C990SVR)",
+ TYPHOON_CRYPTO_NONE},
+ { "3Com Typhoon (3CR990SVR95)",
+ TYPHOON_CRYPTO_DES},
+ { "3Com Typhoon (3CR990SVR97)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
+ { "3Com Typhoon2 (3C990B-TX-M)",
+ TYPHOON_CRYPTO_VARIABLE},
+ { "3Com Typhoon2 (3C990BSVR)",
+ TYPHOON_CRYPTO_VARIABLE},
+ { "3Com Typhoon (3CR990-FX-95)",
+ TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
+ { "3Com Typhoon (3CR990-FX-97)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
+ { "3Com Typhoon (3CR990-FX-95 Server)",
+ TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
+ { "3Com Typhoon (3CR990-FX-97 Server)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
+ { "3Com Typhoon2 (3C990B-FX-97)",
+ TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
+};
+
+/* Notes on the new subsystem numbering scheme:
+ * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
+ * bit 4 indicates if this card has secured firmware (we don't support it)
+ * bit 8 indicates if this is a (0) copper or (1) fiber card
+ * bits 12-16 indicate card type: (0) client and (1) server
+ */
+static struct pci_device_id typhoon_pci_tbl[] = {
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
+ PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
+ PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
+ PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
+
+/* Define the shared memory area
+ * Align everything the 3XP will normally be using.
+ * We'll need to move/align txHi if we start using that ring.
+ */
+#define __3xp_aligned ____cacheline_aligned
+struct typhoon_shared {
+ struct typhoon_interface iface;
+ struct typhoon_indexes indexes __3xp_aligned;
+ struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
+ struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
+ struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
+ struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
+ struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
+ struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
+ u32 zeroWord;
+ struct tx_desc txHi[TXHI_ENTRIES];
+} __attribute__ ((packed));
+
+struct rxbuff_ent {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+struct typhoon {
+ /* Tx cache line section */
+ struct transmit_ring txLoRing ____cacheline_aligned;
+ struct pci_dev * tx_pdev;
+ void __iomem *tx_ioaddr;
+ u32 txlo_dma_addr;
+
+ /* Irq/Rx cache line section */
+ void __iomem *ioaddr ____cacheline_aligned;
+ struct typhoon_indexes *indexes;
+ u8 awaiting_resp;
+ u8 duplex;
+ u8 speed;
+ u8 card_state;
+ struct basic_ring rxLoRing;
+ struct pci_dev * pdev;
+ struct net_device * dev;
+ spinlock_t state_lock;
+ struct vlan_group * vlgrp;
+ struct basic_ring rxHiRing;
+ struct basic_ring rxBuffRing;
+ struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
+
+ /* general section */
+ spinlock_t command_lock ____cacheline_aligned;
+ struct basic_ring cmdRing;
+ struct basic_ring respRing;
+ struct net_device_stats stats;
+ struct net_device_stats stats_saved;
+ const char * name;
+ struct typhoon_shared * shared;
+ dma_addr_t shared_dma;
+ u16 xcvr_select;
+ u16 wol_events;
+ u32 offload;
+
+ /* unused stuff (future use) */
+ int capabilities;
+ struct transmit_ring txHiRing;
+};
+
+enum completion_wait_values {
+ NoWait = 0, WaitNoSleep, WaitSleep,
+};
+
+/* These are the values for the typhoon.card_state variable.
+ * These determine where the statistics will come from in get_stats().
+ * The sleep image does not support the statistics we need.
+ */
+enum state_values {
+ Sleeping = 0, Running,
+};
+
+/* PCI writes are not guaranteed to be posted in order, but outstanding writes
+ * cannot pass a read, so this forces current writes to post.
+ */
+#define typhoon_post_pci_writes(x) \
+ do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
+
+/* We'll wait up to six seconds for a reset, and half a second normally.
+ */
+#define TYPHOON_UDELAY 50
+#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
+#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
+#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
+#define typhoon_synchronize_irq(x) synchronize_irq()
+#else
+#define typhoon_synchronize_irq(x) synchronize_irq(x)
+#endif
+
+#if defined(NETIF_F_TSO)
+#define skb_tso_size(x) (skb_shinfo(x)->tso_size)
+#define TSO_NUM_DESCRIPTORS 2
+#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
+#else
+#define NETIF_F_TSO 0
+#define skb_tso_size(x) 0
+#define TSO_NUM_DESCRIPTORS 0
+#define TSO_OFFLOAD_ON 0
+#endif
+
+static inline void
+typhoon_inc_index(u32 *index, const int count, const int num_entries)
+{
+ /* Increment a ring index -- we can use this for all rings execept
+ * the Rx rings, as they use different size descriptors
+ * otherwise, everything is the same size as a cmd_desc
+ */
+ *index += count * sizeof(struct cmd_desc);
+ *index %= num_entries * sizeof(struct cmd_desc);
+}
+
+static inline void
+typhoon_inc_cmd_index(u32 *index, const int count)
+{
+ typhoon_inc_index(index, count, COMMAND_ENTRIES);
+}
+
+static inline void
+typhoon_inc_resp_index(u32 *index, const int count)
+{
+ typhoon_inc_index(index, count, RESPONSE_ENTRIES);
+}
+
+static inline void
+typhoon_inc_rxfree_index(u32 *index, const int count)
+{
+ typhoon_inc_index(index, count, RXFREE_ENTRIES);
+}
+
+static inline void
+typhoon_inc_tx_index(u32 *index, const int count)
+{
+ /* if we start using the Hi Tx ring, this needs updateing */
+ typhoon_inc_index(index, count, TXLO_ENTRIES);
+}
+
+static inline void
+typhoon_inc_rx_index(u32 *index, const int count)
+{
+ /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
+ *index += count * sizeof(struct rx_desc);
+ *index %= RX_ENTRIES * sizeof(struct rx_desc);
+}
+
+static int
+typhoon_reset(void __iomem *ioaddr, int wait_type)
+{
+ int i, err = 0;
+ int timeout;
+
+ if(wait_type == WaitNoSleep)
+ timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
+ else
+ timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
+
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+
+ iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
+ typhoon_post_pci_writes(ioaddr);
+ udelay(1);
+ iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
+
+ if(wait_type != NoWait) {
+ for(i = 0; i < timeout; i++) {
+ if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
+ TYPHOON_STATUS_WAITING_FOR_HOST)
+ goto out;
+
+ if(wait_type == WaitSleep) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ } else
+ udelay(TYPHOON_UDELAY);
+ }
+
+ err = -ETIMEDOUT;
+ }
+
+out:
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+
+ /* The 3XP seems to need a little extra time to complete the load
+ * of the sleep image before we can reliably boot it. Failure to
+ * do this occasionally results in a hung adapter after boot in
+ * typhoon_init_one() while trying to read the MAC address or
+ * putting the card to sleep. 3Com's driver waits 5ms, but
+ * that seems to be overkill. However, if we can sleep, we might
+ * as well give it that much time. Otherwise, we'll give it 500us,
+ * which should be enough (I've see it work well at 100us, but still
+ * saw occasional problems.)
+ */
+ if(wait_type == WaitSleep)
+ msleep(5);
+ else
+ udelay(500);
+ return err;
+}
+
+static int
+typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
+{
+ int i, err = 0;
+
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
+ goto out;
+ udelay(TYPHOON_UDELAY);
+ }
+
+ err = -ETIMEDOUT;
+
+out:
+ return err;
+}
+
+static inline void
+typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
+{
+ if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
+ netif_carrier_off(dev);
+ else
+ netif_carrier_on(dev);
+}
+
+static inline void
+typhoon_hello(struct typhoon *tp)
+{
+ struct basic_ring *ring = &tp->cmdRing;
+ struct cmd_desc *cmd;
+
+ /* We only get a hello request if we've not sent anything to the
+ * card in a long while. If the lock is held, then we're in the
+ * process of issuing a command, so we don't need to respond.
+ */
+ if(spin_trylock(&tp->command_lock)) {
+ cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
+ typhoon_inc_cmd_index(&ring->lastWrite, 1);
+
+ INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
+ smp_wmb();
+ iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
+ spin_unlock(&tp->command_lock);
+ }
+}
+
+static int
+typhoon_process_response(struct typhoon *tp, int resp_size,
+ struct resp_desc *resp_save)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct resp_desc *resp;
+ u8 *base = tp->respRing.ringBase;
+ int count, len, wrap_len;
+ u32 cleared;
+ u32 ready;
+
+ cleared = le32_to_cpu(indexes->respCleared);
+ ready = le32_to_cpu(indexes->respReady);
+ while(cleared != ready) {
+ resp = (struct resp_desc *)(base + cleared);
+ count = resp->numDesc + 1;
+ if(resp_save && resp->seqNo) {
+ if(count > resp_size) {
+ resp_save->flags = TYPHOON_RESP_ERROR;
+ goto cleanup;
+ }
+
+ wrap_len = 0;
+ len = count * sizeof(*resp);
+ if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
+ wrap_len = cleared + len - RESPONSE_RING_SIZE;
+ len = RESPONSE_RING_SIZE - cleared;
+ }
+
+ memcpy(resp_save, resp, len);
+ if(unlikely(wrap_len)) {
+ resp_save += len / sizeof(*resp);
+ memcpy(resp_save, base, wrap_len);
+ }
+
+ resp_save = NULL;
+ } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
+ typhoon_media_status(tp->dev, resp);
+ } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
+ typhoon_hello(tp);
+ } else {
+ printk(KERN_ERR "%s: dumping unexpected response "
+ "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
+ tp->name, le16_to_cpu(resp->cmd),
+ resp->numDesc, resp->flags,
+ le16_to_cpu(resp->parm1),
+ le32_to_cpu(resp->parm2),
+ le32_to_cpu(resp->parm3));
+ }
+
+cleanup:
+ typhoon_inc_resp_index(&cleared, count);
+ }
+
+ indexes->respCleared = cpu_to_le32(cleared);
+ wmb();
+ return (resp_save == NULL);
+}
+
+static inline int
+typhoon_num_free(int lastWrite, int lastRead, int ringSize)
+{
+ /* this works for all descriptors but rx_desc, as they are a
+ * different size than the cmd_desc -- everyone else is the same
+ */
+ lastWrite /= sizeof(struct cmd_desc);
+ lastRead /= sizeof(struct cmd_desc);
+ return (ringSize + lastRead - lastWrite - 1) % ringSize;
+}
+
+static inline int
+typhoon_num_free_cmd(struct typhoon *tp)
+{
+ int lastWrite = tp->cmdRing.lastWrite;
+ int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
+
+ return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
+}
+
+static inline int
+typhoon_num_free_resp(struct typhoon *tp)
+{
+ int respReady = le32_to_cpu(tp->indexes->respReady);
+ int respCleared = le32_to_cpu(tp->indexes->respCleared);
+
+ return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
+}
+
+static inline int
+typhoon_num_free_tx(struct transmit_ring *ring)
+{
+ /* if we start using the Hi Tx ring, this needs updating */
+ return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
+}
+
+static int
+typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
+ int num_resp, struct resp_desc *resp)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct basic_ring *ring = &tp->cmdRing;
+ struct resp_desc local_resp;
+ int i, err = 0;
+ int got_resp;
+ int freeCmd, freeResp;
+ int len, wrap_len;
+
+ spin_lock(&tp->command_lock);
+
+ freeCmd = typhoon_num_free_cmd(tp);
+ freeResp = typhoon_num_free_resp(tp);
+
+ if(freeCmd < num_cmd || freeResp < num_resp) {
+ printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
+ "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
+ freeResp, num_resp);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if(cmd->flags & TYPHOON_CMD_RESPOND) {
+ /* If we're expecting a response, but the caller hasn't given
+ * us a place to put it, we'll provide one.
+ */
+ tp->awaiting_resp = 1;
+ if(resp == NULL) {
+ resp = &local_resp;
+ num_resp = 1;
+ }
+ }
+
+ wrap_len = 0;
+ len = num_cmd * sizeof(*cmd);
+ if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
+ wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
+ len = COMMAND_RING_SIZE - ring->lastWrite;
+ }
+
+ memcpy(ring->ringBase + ring->lastWrite, cmd, len);
+ if(unlikely(wrap_len)) {
+ struct cmd_desc *wrap_ptr = cmd;
+ wrap_ptr += len / sizeof(*cmd);
+ memcpy(ring->ringBase, wrap_ptr, wrap_len);
+ }
+
+ typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
+
+ /* "I feel a presence... another warrior is on the the mesa."
+ */
+ wmb();
+ iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
+ typhoon_post_pci_writes(tp->ioaddr);
+
+ if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
+ goto out;
+
+ /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
+ * preempt or do anything other than take interrupts. So, don't
+ * wait for a response unless you have to.
+ *
+ * I've thought about trying to sleep here, but we're called
+ * from many contexts that don't allow that. Also, given the way
+ * 3Com has implemented irq coalescing, we would likely timeout --
+ * this has been observed in real life!
+ *
+ * The big killer is we have to wait to get stats from the card,
+ * though we could go to a periodic refresh of those if we don't
+ * mind them getting somewhat stale. The rest of the waiting
+ * commands occur during open/close/suspend/resume, so they aren't
+ * time critical. Creating SAs in the future will also have to
+ * wait here.
+ */
+ got_resp = 0;
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
+ if(indexes->respCleared != indexes->respReady)
+ got_resp = typhoon_process_response(tp, num_resp,
+ resp);
+ udelay(TYPHOON_UDELAY);
+ }
+
+ if(!got_resp) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Collect the error response even if we don't care about the
+ * rest of the response
+ */
+ if(resp->flags & TYPHOON_RESP_ERROR)
+ err = -EIO;
+
+out:
+ if(tp->awaiting_resp) {
+ tp->awaiting_resp = 0;
+ smp_wmb();
+
+ /* Ugh. If a response was added to the ring between
+ * the call to typhoon_process_response() and the clearing
+ * of tp->awaiting_resp, we could have missed the interrupt
+ * and it could hang in the ring an indeterminate amount of
+ * time. So, check for it, and interrupt ourselves if this
+ * is the case.
+ */
+ if(indexes->respCleared != indexes->respReady)
+ iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
+ }
+
+ spin_unlock(&tp->command_lock);
+ return err;
+}
+
+static void
+typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct cmd_desc xp_cmd;
+ int err;
+
+ spin_lock_bh(&tp->state_lock);
+ if(!tp->vlgrp != !grp) {
+ /* We've either been turned on for the first time, or we've
+ * been turned off. Update the 3XP.
+ */
+ if(grp)
+ tp->offload |= TYPHOON_OFFLOAD_VLAN;
+ else
+ tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
+
+ /* If the interface is up, the runtime is running -- and we
+ * must be up for the vlan core to call us.
+ *
+ * Do the command outside of the spin lock, as it is slow.
+ */
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
+ TYPHOON_CMD_SET_OFFLOAD_TASKS);
+ xp_cmd.parm2 = tp->offload;
+ xp_cmd.parm3 = tp->offload;
+ spin_unlock_bh(&tp->state_lock);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ printk("%s: vlan offload error %d\n", tp->name, -err);
+ spin_lock_bh(&tp->state_lock);
+ }
+
+ /* now make the change visible */
+ tp->vlgrp = grp;
+ spin_unlock_bh(&tp->state_lock);
+}
+
+static void
+typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ spin_lock_bh(&tp->state_lock);
+ if(tp->vlgrp)
+ tp->vlgrp->vlan_devices[vid] = NULL;
+ spin_unlock_bh(&tp->state_lock);
+}
+
+static inline void
+typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
+ u32 ring_dma)
+{
+ struct tcpopt_desc *tcpd;
+ u32 tcpd_offset = ring_dma;
+
+ tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
+ tcpd_offset += txRing->lastWrite;
+ tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
+ tcpd->numDesc = 1;
+ tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
+ tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
+ tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
+ tcpd->bytesTx = cpu_to_le32(skb->len);
+ tcpd->status = 0;
+}
+
+static int
+typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct transmit_ring *txRing;
+ struct tx_desc *txd, *first_txd;
+ dma_addr_t skb_dma;
+ int numDesc;
+
+ /* we have two rings to choose from, but we only use txLo for now
+ * If we start using the Hi ring as well, we'll need to update
+ * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
+ * and TXHI_ENTIRES to match, as well as update the TSO code below
+ * to get the right DMA address
+ */
+ txRing = &tp->txLoRing;
+
+ /* We need one descriptor for each fragment of the sk_buff, plus the
+ * one for the ->data area of it.
+ *
+ * The docs say a maximum of 16 fragment descriptors per TCP option
+ * descriptor, then make a new packet descriptor and option descriptor
+ * for the next 16 fragments. The engineers say just an option
+ * descriptor is needed. I've tested up to 26 fragments with a single
+ * packet descriptor/option descriptor combo, so I use that for now.
+ *
+ * If problems develop with TSO, check this first.
+ */
+ numDesc = skb_shinfo(skb)->nr_frags + 1;
+ if(skb_tso_size(skb))
+ numDesc++;
+
+ /* When checking for free space in the ring, we need to also
+ * account for the initial Tx descriptor, and we always must leave
+ * at least one descriptor unused in the ring so that it doesn't
+ * wrap and look empty.
+ *
+ * The only time we should loop here is when we hit the race
+ * between marking the queue awake and updating the cleared index.
+ * Just loop and it will appear. This comes from the acenic driver.
+ */
+ while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
+ smp_rmb();
+
+ first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
+ first_txd->numDesc = 0;
+ first_txd->len = 0;
+ first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
+ first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
+ first_txd->processFlags = 0;
+
+ if(skb->ip_summed == CHECKSUM_HW) {
+ /* The 3XP will figure out if this is UDP/TCP */
+ first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
+ first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
+ first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
+ }
+
+ if(vlan_tx_tag_present(skb)) {
+ first_txd->processFlags |=
+ TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
+ first_txd->processFlags |=
+ cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
+ TYPHOON_TX_PF_VLAN_TAG_SHIFT);
+ }
+
+ if(skb_tso_size(skb)) {
+ first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
+ first_txd->numDesc++;
+
+ typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
+ }
+
+ txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ /* No need to worry about padding packet -- the firmware pads
+ * it with zeros to ETH_ZLEN for us.
+ */
+ if(skb_shinfo(skb)->nr_frags == 0) {
+ skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
+ txd->len = cpu_to_le16(skb->len);
+ txd->addr = cpu_to_le32(skb_dma);
+ txd->addrHi = 0;
+ first_txd->numDesc++;
+ } else {
+ int i, len;
+
+ len = skb_headlen(skb);
+ skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
+ PCI_DMA_TODEVICE);
+ txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
+ txd->len = cpu_to_le16(len);
+ txd->addr = cpu_to_le32(skb_dma);
+ txd->addrHi = 0;
+ first_txd->numDesc++;
+
+ for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *frag_addr;
+
+ txd = (struct tx_desc *) (txRing->ringBase +
+ txRing->lastWrite);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ len = frag->size;
+ frag_addr = (void *) page_address(frag->page) +
+ frag->page_offset;
+ skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
+ PCI_DMA_TODEVICE);
+ txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
+ txd->len = cpu_to_le16(len);
+ txd->addr = cpu_to_le32(skb_dma);
+ txd->addrHi = 0;
+ first_txd->numDesc++;
+ }
+ }
+
+ /* Kick the 3XP
+ */
+ wmb();
+ iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
+
+ dev->trans_start = jiffies;
+
+ /* If we don't have room to put the worst case packet on the
+ * queue, then we must stop the queue. We need 2 extra
+ * descriptors -- one to prevent ring wrap, and one for the
+ * Tx header.
+ */
+ numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
+
+ if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
+ netif_stop_queue(dev);
+
+ /* A Tx complete IRQ could have gotten inbetween, making
+ * the ring free again. Only need to recheck here, since
+ * Tx is serialized.
+ */
+ if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
+ netif_wake_queue(dev);
+ }
+
+ return 0;
+}
+
+static void
+typhoon_set_rx_mode(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct cmd_desc xp_cmd;
+ u32 mc_filter[2];
+ u16 filter;
+
+ filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
+ if(dev->flags & IFF_PROMISC) {
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ filter |= TYPHOON_RX_FILTER_PROMISCOUS;
+ } else if((dev->mc_count > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ filter |= TYPHOON_RX_FILTER_ALL_MCAST;
+ } else if(dev->mc_count) {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd,
+ TYPHOON_CMD_SET_MULTICAST_HASH);
+ xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
+ xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
+ xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ filter |= TYPHOON_RX_FILTER_MCAST_HASH;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ xp_cmd.parm1 = filter;
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+}
+
+static int
+typhoon_do_get_stats(struct typhoon *tp)
+{
+ struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *saved = &tp->stats_saved;
+ struct cmd_desc xp_cmd;
+ struct resp_desc xp_resp[7];
+ struct stats_resp *s = (struct stats_resp *) xp_resp;
+ int err;
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
+ if(err < 0)
+ return err;
+
+ /* 3Com's Linux driver uses txMultipleCollisions as it's
+ * collisions value, but there is some other collision info as well...
+ *
+ * The extra status reported would be a good candidate for
+ * ethtool_ops->get_{strings,stats}()
+ */
+ stats->tx_packets = le32_to_cpu(s->txPackets);
+ stats->tx_bytes = le32_to_cpu(s->txBytes);
+ stats->tx_errors = le32_to_cpu(s->txCarrierLost);
+ stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
+ stats->collisions = le32_to_cpu(s->txMultipleCollisions);
+ stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
+ stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
+ stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
+ stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
+ le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
+ stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
+ stats->rx_length_errors = le32_to_cpu(s->rxOversized);
+ tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
+ SPEED_100 : SPEED_10;
+ tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ /* add in the saved statistics
+ */
+ stats->tx_packets += saved->tx_packets;
+ stats->tx_bytes += saved->tx_bytes;
+ stats->tx_errors += saved->tx_errors;
+ stats->collisions += saved->collisions;
+ stats->rx_packets += saved->rx_packets;
+ stats->rx_bytes += saved->rx_bytes;
+ stats->rx_fifo_errors += saved->rx_fifo_errors;
+ stats->rx_errors += saved->rx_errors;
+ stats->rx_crc_errors += saved->rx_crc_errors;
+ stats->rx_length_errors += saved->rx_length_errors;
+
+ return 0;
+}
+
+static struct net_device_stats *
+typhoon_get_stats(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *saved = &tp->stats_saved;
+
+ smp_rmb();
+ if(tp->card_state == Sleeping)
+ return saved;
+
+ if(typhoon_do_get_stats(tp) < 0) {
+ printk(KERN_ERR "%s: error getting stats\n", dev->name);
+ return saved;
+ }
+
+ return stats;
+}
+
+static int
+typhoon_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *saddr = (struct sockaddr *) addr;
+
+ if(netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static void
+typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct pci_dev *pci_dev = tp->pdev;
+ struct cmd_desc xp_cmd;
+ struct resp_desc xp_resp[3];
+
+ smp_rmb();
+ if(tp->card_state == Sleeping) {
+ strcpy(info->fw_version, "Sleep image");
+ } else {
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ strcpy(info->fw_version, "Unknown runtime");
+ } else {
+ u32 sleep_ver = xp_resp[0].parm2;
+ snprintf(info->fw_version, 32, "%02x.%03x.%03x",
+ sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
+ sleep_ver & 0xfff);
+ }
+ }
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->bus_info, pci_name(pci_dev));
+}
+
+static int
+typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg;
+
+ switch (tp->xcvr_select) {
+ case TYPHOON_XCVR_10HALF:
+ cmd->advertising = ADVERTISED_10baseT_Half;
+ break;
+ case TYPHOON_XCVR_10FULL:
+ cmd->advertising = ADVERTISED_10baseT_Full;
+ break;
+ case TYPHOON_XCVR_100HALF:
+ cmd->advertising = ADVERTISED_100baseT_Half;
+ break;
+ case TYPHOON_XCVR_100FULL:
+ cmd->advertising = ADVERTISED_100baseT_Full;
+ break;
+ case TYPHOON_XCVR_AUTONEG:
+ cmd->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg;
+ break;
+ }
+
+ if(tp->capabilities & TYPHOON_FIBER) {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ } else {
+ cmd->supported |= SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ cmd->port = PORT_TP;
+ }
+
+ /* need to get stats to make these link speed/duplex valid */
+ typhoon_do_get_stats(tp);
+ cmd->speed = tp->speed;
+ cmd->duplex = tp->duplex;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
+ cmd->autoneg = AUTONEG_ENABLE;
+ else
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ return 0;
+}
+
+static int
+typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct cmd_desc xp_cmd;
+ int xcvr;
+ int err;
+
+ err = -EINVAL;
+ if(cmd->autoneg == AUTONEG_ENABLE) {
+ xcvr = TYPHOON_XCVR_AUTONEG;
+ } else {
+ if(cmd->duplex == DUPLEX_HALF) {
+ if(cmd->speed == SPEED_10)
+ xcvr = TYPHOON_XCVR_10HALF;
+ else if(cmd->speed == SPEED_100)
+ xcvr = TYPHOON_XCVR_100HALF;
+ else
+ goto out;
+ } else if(cmd->duplex == DUPLEX_FULL) {
+ if(cmd->speed == SPEED_10)
+ xcvr = TYPHOON_XCVR_10FULL;
+ else if(cmd->speed == SPEED_100)
+ xcvr = TYPHOON_XCVR_100FULL;
+ else
+ goto out;
+ } else
+ goto out;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
+ xp_cmd.parm1 = cpu_to_le16(xcvr);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto out;
+
+ tp->xcvr_select = xcvr;
+ if(cmd->autoneg == AUTONEG_ENABLE) {
+ tp->speed = 0xff; /* invalid */
+ tp->duplex = 0xff; /* invalid */
+ } else {
+ tp->speed = cmd->speed;
+ tp->duplex = cmd->duplex;
+ }
+
+out:
+ return err;
+}
+
+static void
+typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ wol->supported = WAKE_PHY | WAKE_MAGIC;
+ wol->wolopts = 0;
+ if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
+ wol->wolopts |= WAKE_PHY;
+ if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ wol->wolopts |= WAKE_MAGIC;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int
+typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
+ tp->wol_events = 0;
+ if(wol->wolopts & WAKE_PHY)
+ tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
+ if(wol->wolopts & WAKE_MAGIC)
+ tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
+
+ return 0;
+}
+
+static u32
+typhoon_get_rx_csum(struct net_device *dev)
+{
+ /* For now, we don't allow turning off RX checksums.
+ */
+ return 1;
+}
+
+static void
+typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ ering->rx_max_pending = RXENT_ENTRIES;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TXLO_ENTRIES - 1;
+
+ ering->rx_pending = RXENT_ENTRIES;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = TXLO_ENTRIES - 1;
+}
+
+static struct ethtool_ops typhoon_ethtool_ops = {
+ .get_settings = typhoon_get_settings,
+ .set_settings = typhoon_set_settings,
+ .get_drvinfo = typhoon_get_drvinfo,
+ .get_wol = typhoon_get_wol,
+ .set_wol = typhoon_set_wol,
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = typhoon_get_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+ .get_ringparam = typhoon_get_ringparam,
+};
+
+static int
+typhoon_wait_interrupt(void __iomem *ioaddr)
+{
+ int i, err = 0;
+
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
+ TYPHOON_INTR_BOOTCMD)
+ goto out;
+ udelay(TYPHOON_UDELAY);
+ }
+
+ err = -ETIMEDOUT;
+
+out:
+ iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
+ return err;
+}
+
+#define shared_offset(x) offsetof(struct typhoon_shared, x)
+
+static void
+typhoon_init_interface(struct typhoon *tp)
+{
+ struct typhoon_interface *iface = &tp->shared->iface;
+ dma_addr_t shared_dma;
+
+ memset(tp->shared, 0, sizeof(struct typhoon_shared));
+
+ /* The *Hi members of iface are all init'd to zero by the memset().
+ */
+ shared_dma = tp->shared_dma + shared_offset(indexes);
+ iface->ringIndex = cpu_to_le32(shared_dma);
+
+ shared_dma = tp->shared_dma + shared_offset(txLo);
+ iface->txLoAddr = cpu_to_le32(shared_dma);
+ iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(txHi);
+ iface->txHiAddr = cpu_to_le32(shared_dma);
+ iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(rxBuff);
+ iface->rxBuffAddr = cpu_to_le32(shared_dma);
+ iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
+ sizeof(struct rx_free));
+
+ shared_dma = tp->shared_dma + shared_offset(rxLo);
+ iface->rxLoAddr = cpu_to_le32(shared_dma);
+ iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(rxHi);
+ iface->rxHiAddr = cpu_to_le32(shared_dma);
+ iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(cmd);
+ iface->cmdAddr = cpu_to_le32(shared_dma);
+ iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
+
+ shared_dma = tp->shared_dma + shared_offset(resp);
+ iface->respAddr = cpu_to_le32(shared_dma);
+ iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
+
+ shared_dma = tp->shared_dma + shared_offset(zeroWord);
+ iface->zeroAddr = cpu_to_le32(shared_dma);
+
+ tp->indexes = &tp->shared->indexes;
+ tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
+ tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
+ tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
+ tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
+ tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
+ tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
+ tp->respRing.ringBase = (u8 *) tp->shared->resp;
+
+ tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
+ tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
+
+ tp->txlo_dma_addr = iface->txLoAddr;
+ tp->card_state = Sleeping;
+ smp_wmb();
+
+ tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
+ tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+
+ spin_lock_init(&tp->command_lock);
+ spin_lock_init(&tp->state_lock);
+}
+
+static void
+typhoon_init_rings(struct typhoon *tp)
+{
+ memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
+
+ tp->txLoRing.lastWrite = 0;
+ tp->txHiRing.lastWrite = 0;
+ tp->rxLoRing.lastWrite = 0;
+ tp->rxHiRing.lastWrite = 0;
+ tp->rxBuffRing.lastWrite = 0;
+ tp->cmdRing.lastWrite = 0;
+ tp->cmdRing.lastWrite = 0;
+
+ tp->txLoRing.lastRead = 0;
+ tp->txHiRing.lastRead = 0;
+}
+
+static int
+typhoon_download_firmware(struct typhoon *tp)
+{
+ void __iomem *ioaddr = tp->ioaddr;
+ struct pci_dev *pdev = tp->pdev;
+ struct typhoon_file_header *fHdr;
+ struct typhoon_section_header *sHdr;
+ u8 *image_data;
+ void *dpage;
+ dma_addr_t dpage_dma;
+ unsigned int csum;
+ u32 irqEnabled;
+ u32 irqMasked;
+ u32 numSections;
+ u32 section_len;
+ u32 len;
+ u32 load_addr;
+ u32 hmac;
+ int i;
+ int err;
+
+ err = -EINVAL;
+ fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
+ image_data = (u8 *) fHdr;
+
+ if(memcmp(fHdr->tag, "TYPHOON", 8)) {
+ printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
+ goto err_out;
+ }
+
+ /* Cannot just map the firmware image using pci_map_single() as
+ * the firmware is part of the kernel/module image, so we allocate
+ * some consistent memory to copy the sections into, as it is simpler,
+ * and short-lived. If we ever split out and require a userland
+ * firmware loader, then we can revisit this.
+ */
+ err = -ENOMEM;
+ dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
+ if(!dpage) {
+ printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
+ goto err_out;
+ }
+
+ irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
+ iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
+ ioaddr + TYPHOON_REG_INTR_ENABLE);
+ irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
+ ioaddr + TYPHOON_REG_INTR_MASK);
+
+ err = -ETIMEDOUT;
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ printk(KERN_ERR "%s: card ready timeout\n", tp->name);
+ goto err_out_irq;
+ }
+
+ numSections = le32_to_cpu(fHdr->numSections);
+ load_addr = le32_to_cpu(fHdr->startAddr);
+
+ iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
+ iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
+ hmac = le32_to_cpu(fHdr->hmacDigest[0]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
+ hmac = le32_to_cpu(fHdr->hmacDigest[1]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
+ hmac = le32_to_cpu(fHdr->hmacDigest[2]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
+ hmac = le32_to_cpu(fHdr->hmacDigest[3]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
+ hmac = le32_to_cpu(fHdr->hmacDigest[4]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
+
+ image_data += sizeof(struct typhoon_file_header);
+
+ /* The ioread32() in typhoon_wait_interrupt() will force the
+ * last write to the command register to post, so
+ * we don't need a typhoon_post_pci_writes() after it.
+ */
+ for(i = 0; i < numSections; i++) {
+ sHdr = (struct typhoon_section_header *) image_data;
+ image_data += sizeof(struct typhoon_section_header);
+ load_addr = le32_to_cpu(sHdr->startAddr);
+ section_len = le32_to_cpu(sHdr->len);
+
+ while(section_len) {
+ len = min_t(u32, section_len, PAGE_SIZE);
+
+ if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
+ printk(KERN_ERR "%s: segment ready timeout\n",
+ tp->name);
+ goto err_out_irq;
+ }
+
+ /* Do an pseudo IPv4 checksum on the data -- first
+ * need to convert each u16 to cpu order before
+ * summing. Fortunately, due to the properties of
+ * the checksum, we can do this once, at the end.
+ */
+ csum = csum_partial_copy_nocheck(image_data, dpage,
+ len, 0);
+ csum = csum_fold(csum);
+ csum = le16_to_cpu(csum);
+
+ iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
+ iowrite32(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
+ iowrite32(load_addr,
+ ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
+ iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
+ iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
+ ioaddr + TYPHOON_REG_COMMAND);
+
+ image_data += len;
+ load_addr += len;
+ section_len -= len;
+ }
+ }
+
+ if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
+ printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
+ goto err_out_irq;
+ }
+
+ iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
+ tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
+ goto err_out_irq;
+ }
+
+ err = 0;
+
+err_out_irq:
+ iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
+
+ pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
+
+err_out:
+ return err;
+}
+
+static int
+typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
+{
+ void __iomem *ioaddr = tp->ioaddr;
+
+ if(typhoon_wait_status(ioaddr, initial_status) < 0) {
+ printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
+ goto out_timeout;
+ }
+
+ iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
+ iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
+ ioaddr + TYPHOON_REG_COMMAND);
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
+ printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
+ tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
+ goto out_timeout;
+ }
+
+ /* Clear the Transmit and Command ready registers
+ */
+ iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
+ iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
+ iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
+
+ return 0;
+
+out_timeout:
+ return -ETIMEDOUT;
+}
+
+static u32
+typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
+ volatile u32 * index)
+{
+ u32 lastRead = txRing->lastRead;
+ struct tx_desc *tx;
+ dma_addr_t skb_dma;
+ int dma_len;
+ int type;
+
+ while(lastRead != le32_to_cpu(*index)) {
+ tx = (struct tx_desc *) (txRing->ringBase + lastRead);
+ type = tx->flags & TYPHOON_TYPE_MASK;
+
+ if(type == TYPHOON_TX_DESC) {
+ /* This tx_desc describes a packet.
+ */
+ unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
+ struct sk_buff *skb = (struct sk_buff *) ptr;
+ dev_kfree_skb_irq(skb);
+ } else if(type == TYPHOON_FRAG_DESC) {
+ /* This tx_desc describes a memory mapping. Free it.
+ */
+ skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
+ dma_len = le16_to_cpu(tx->len);
+ pci_unmap_single(tp->pdev, skb_dma, dma_len,
+ PCI_DMA_TODEVICE);
+ }
+
+ tx->flags = 0;
+ typhoon_inc_tx_index(&lastRead, 1);
+ }
+
+ return lastRead;
+}
+
+static void
+typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
+ volatile u32 * index)
+{
+ u32 lastRead;
+ int numDesc = MAX_SKB_FRAGS + 1;
+
+ /* This will need changing if we start to use the Hi Tx ring. */
+ lastRead = typhoon_clean_tx(tp, txRing, index);
+ if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
+ lastRead, TXLO_ENTRIES) > (numDesc + 2))
+ netif_wake_queue(tp->dev);
+
+ txRing->lastRead = lastRead;
+ smp_wmb();
+}
+
+static void
+typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
+ struct basic_ring *ring = &tp->rxBuffRing;
+ struct rx_free *r;
+
+ if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ indexes->rxBuffCleared) {
+ /* no room in ring, just drop the skb
+ */
+ dev_kfree_skb_any(rxb->skb);
+ rxb->skb = NULL;
+ return;
+ }
+
+ r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
+ typhoon_inc_rxfree_index(&ring->lastWrite, 1);
+ r->virtAddr = idx;
+ r->physAddr = cpu_to_le32(rxb->dma_addr);
+
+ /* Tell the card about it */
+ wmb();
+ indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
+}
+
+static int
+typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
+ struct basic_ring *ring = &tp->rxBuffRing;
+ struct rx_free *r;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ rxb->skb = NULL;
+
+ if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ indexes->rxBuffCleared)
+ return -ENOMEM;
+
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ if(!skb)
+ return -ENOMEM;
+
+#if 0
+ /* Please, 3com, fix the firmware to allow DMA to a unaligned
+ * address! Pretty please?
+ */
+ skb_reserve(skb, 2);
+#endif
+
+ skb->dev = tp->dev;
+ dma_addr = pci_map_single(tp->pdev, skb->tail,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ /* Since no card does 64 bit DAC, the high bits will never
+ * change from zero.
+ */
+ r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
+ typhoon_inc_rxfree_index(&ring->lastWrite, 1);
+ r->virtAddr = idx;
+ r->physAddr = cpu_to_le32(dma_addr);
+ rxb->skb = skb;
+ rxb->dma_addr = dma_addr;
+
+ /* Tell the card about it */
+ wmb();
+ indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
+ return 0;
+}
+
+static int
+typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
+ volatile u32 * cleared, int budget)
+{
+ struct rx_desc *rx;
+ struct sk_buff *skb, *new_skb;
+ struct rxbuff_ent *rxb;
+ dma_addr_t dma_addr;
+ u32 local_ready;
+ u32 rxaddr;
+ int pkt_len;
+ u32 idx;
+ u32 csum_bits;
+ int received;
+
+ received = 0;
+ local_ready = le32_to_cpu(*ready);
+ rxaddr = le32_to_cpu(*cleared);
+ while(rxaddr != local_ready && budget > 0) {
+ rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
+ idx = rx->addr;
+ rxb = &tp->rxbuffers[idx];
+ skb = rxb->skb;
+ dma_addr = rxb->dma_addr;
+
+ typhoon_inc_rx_index(&rxaddr, 1);
+
+ if(rx->flags & TYPHOON_RX_ERROR) {
+ typhoon_recycle_rx_skb(tp, idx);
+ continue;
+ }
+
+ pkt_len = le16_to_cpu(rx->frameLen);
+
+ if(pkt_len < rx_copybreak &&
+ (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ new_skb->dev = tp->dev;
+ skb_reserve(new_skb, 2);
+ pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
+ PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
+ pci_dma_sync_single_for_device(tp->pdev, dma_addr,
+ PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ skb_put(new_skb, pkt_len);
+ typhoon_recycle_rx_skb(tp, idx);
+ } else {
+ new_skb = skb;
+ skb_put(new_skb, pkt_len);
+ pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ typhoon_alloc_rx_skb(tp, idx);
+ }
+ new_skb->protocol = eth_type_trans(new_skb, tp->dev);
+ csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
+ TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
+ if(csum_bits ==
+ (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
+ || csum_bits ==
+ (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
+ new_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ new_skb->ip_summed = CHECKSUM_NONE;
+
+ spin_lock(&tp->state_lock);
+ if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
+ vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
+ ntohl(rx->vlanTag) & 0xffff);
+ else
+ netif_receive_skb(new_skb);
+ spin_unlock(&tp->state_lock);
+
+ tp->dev->last_rx = jiffies;
+ received++;
+ budget--;
+ }
+ *cleared = cpu_to_le32(rxaddr);
+
+ return received;
+}
+
+static void
+typhoon_fill_free_ring(struct typhoon *tp)
+{
+ u32 i;
+
+ for(i = 0; i < RXENT_ENTRIES; i++) {
+ struct rxbuff_ent *rxb = &tp->rxbuffers[i];
+ if(rxb->skb)
+ continue;
+ if(typhoon_alloc_rx_skb(tp, i) < 0)
+ break;
+ }
+}
+
+static int
+typhoon_poll(struct net_device *dev, int *total_budget)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct typhoon_indexes *indexes = tp->indexes;
+ int orig_budget = *total_budget;
+ int budget, work_done, done;
+
+ rmb();
+ if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
+ typhoon_process_response(tp, 0, NULL);
+
+ if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
+ typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
+
+ if(orig_budget > dev->quota)
+ orig_budget = dev->quota;
+
+ budget = orig_budget;
+ work_done = 0;
+ done = 1;
+
+ if(indexes->rxHiCleared != indexes->rxHiReady) {
+ work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
+ &indexes->rxHiCleared, budget);
+ budget -= work_done;
+ }
+
+ if(indexes->rxLoCleared != indexes->rxLoReady) {
+ work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
+ &indexes->rxLoCleared, budget);
+ }
+
+ if(work_done) {
+ *total_budget -= work_done;
+ dev->quota -= work_done;
+
+ if(work_done >= orig_budget)
+ done = 0;
+ }
+
+ if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
+ /* rxBuff ring is empty, try to fill it. */
+ typhoon_fill_free_ring(tp);
+ }
+
+ if(done) {
+ netif_rx_complete(dev);
+ iowrite32(TYPHOON_INTR_NONE,
+ tp->ioaddr + TYPHOON_REG_INTR_MASK);
+ typhoon_post_pci_writes(tp->ioaddr);
+ }
+
+ return (done ? 0 : 1);
+}
+
+static irqreturn_t
+typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct typhoon *tp = dev->priv;
+ void __iomem *ioaddr = tp->ioaddr;
+ u32 intr_status;
+
+ intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ if(!(intr_status & TYPHOON_INTR_HOST_INT))
+ return IRQ_NONE;
+
+ iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
+
+ if(netif_rx_schedule_prep(dev)) {
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ typhoon_post_pci_writes(ioaddr);
+ __netif_rx_schedule(dev);
+ } else {
+ printk(KERN_ERR "%s: Error, poll already scheduled\n",
+ dev->name);
+ }
+ return IRQ_HANDLED;
+}
+
+static void
+typhoon_free_rx_rings(struct typhoon *tp)
+{
+ u32 i;
+
+ for(i = 0; i < RXENT_ENTRIES; i++) {
+ struct rxbuff_ent *rxb = &tp->rxbuffers[i];
+ if(rxb->skb) {
+ pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rxb->skb);
+ rxb->skb = NULL;
+ }
+ }
+}
+
+static int
+typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events)
+{
+ struct pci_dev *pdev = tp->pdev;
+ void __iomem *ioaddr = tp->ioaddr;
+ struct cmd_desc xp_cmd;
+ int err;
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
+ xp_cmd.parm1 = events;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0) {
+ printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
+ tp->name, err);
+ return err;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0) {
+ printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
+ tp->name, err);
+ return err;
+ }
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
+ return -ETIMEDOUT;
+
+ /* Since we cannot monitor the status of the link while sleeping,
+ * tell the world it went away.
+ */
+ netif_carrier_off(tp->dev);
+
+ pci_enable_wake(tp->pdev, state, 1);
+ pci_disable_device(pdev);
+ return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int
+typhoon_wakeup(struct typhoon *tp, int wait_type)
+{
+ struct pci_dev *pdev = tp->pdev;
+ void __iomem *ioaddr = tp->ioaddr;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /* Post 2.x.x versions of the Sleep Image require a reset before
+ * we can download the Runtime Image. But let's not make users of
+ * the old firmware pay for the reset.
+ */
+ iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
+ (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
+ return typhoon_reset(ioaddr, wait_type);
+
+ return 0;
+}
+
+static int
+typhoon_start_runtime(struct typhoon *tp)
+{
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->ioaddr;
+ struct cmd_desc xp_cmd;
+ int err;
+
+ typhoon_init_rings(tp);
+ typhoon_fill_free_ring(tp);
+
+ err = typhoon_download_firmware(tp);
+ if(err < 0) {
+ printk("%s: cannot load runtime on 3XP\n", tp->name);
+ goto error_out;
+ }
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ printk("%s: cannot boot 3XP\n", tp->name);
+ err = -EIO;
+ goto error_out;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
+ xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
+ xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
+ xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ /* Disable IRQ coalescing -- we can reenable it when 3Com gives
+ * us some more information on how to control it.
+ */
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
+ xp_cmd.parm1 = 0;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
+ xp_cmd.parm1 = tp->xcvr_select;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
+ xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
+ spin_lock_bh(&tp->state_lock);
+ xp_cmd.parm2 = tp->offload;
+ xp_cmd.parm3 = tp->offload;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ spin_unlock_bh(&tp->state_lock);
+ if(err < 0)
+ goto error_out;
+
+ typhoon_set_rx_mode(dev);
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ tp->card_state = Running;
+ smp_wmb();
+
+ iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
+ iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
+ typhoon_post_pci_writes(ioaddr);
+
+ return 0;
+
+error_out:
+ typhoon_reset(ioaddr, WaitNoSleep);
+ typhoon_free_rx_rings(tp);
+ typhoon_init_rings(tp);
+ return err;
+}
+
+static int
+typhoon_stop_runtime(struct typhoon *tp, int wait_type)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct transmit_ring *txLo = &tp->txLoRing;
+ void __iomem *ioaddr = tp->ioaddr;
+ struct cmd_desc xp_cmd;
+ int i;
+
+ /* Disable interrupts early, since we can't schedule a poll
+ * when called with !netif_running(). This will be posted
+ * when we force the posting of the command.
+ */
+ iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ /* Wait 1/2 sec for any outstanding transmits to occur
+ * We'll cleanup after the reset if this times out.
+ */
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
+ break;
+ udelay(TYPHOON_UDELAY);
+ }
+
+ if(i == TYPHOON_WAIT_TIMEOUT)
+ printk(KERN_ERR
+ "%s: halt timed out waiting for Tx to complete\n",
+ tp->name);
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ /* save the statistics so when we bring the interface up again,
+ * the values reported to userspace are correct.
+ */
+ tp->card_state = Sleeping;
+ smp_wmb();
+ typhoon_do_get_stats(tp);
+ memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
+ printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
+ tp->name);
+
+ if(typhoon_reset(ioaddr, wait_type) < 0) {
+ printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
+ return -ETIMEDOUT;
+ }
+
+ /* cleanup any outstanding Tx packets */
+ if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
+ indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
+ typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
+ }
+
+ return 0;
+}
+
+static void
+typhoon_tx_timeout(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
+ printk(KERN_WARNING "%s: could not reset in tx timeout\n",
+ dev->name);
+ goto truely_dead;
+ }
+
+ /* If we ever start using the Hi ring, it will need cleaning too */
+ typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
+ typhoon_free_rx_rings(tp);
+
+ if(typhoon_start_runtime(tp) < 0) {
+ printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
+ dev->name);
+ goto truely_dead;
+ }
+
+ netif_wake_queue(dev);
+ return;
+
+truely_dead:
+ /* Reset the hardware, and turn off carrier to avoid more timeouts */
+ typhoon_reset(tp->ioaddr, NoWait);
+ netif_carrier_off(dev);
+}
+
+static int
+typhoon_open(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ int err;
+
+ err = typhoon_wakeup(tp, WaitSleep);
+ if(err < 0) {
+ printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
+ goto out_sleep;
+ }
+
+ err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if(err < 0)
+ goto out_sleep;
+
+ err = typhoon_start_runtime(tp);
+ if(err < 0)
+ goto out_irq;
+
+ netif_start_queue(dev);
+ return 0;
+
+out_irq:
+ free_irq(dev->irq, dev);
+
+out_sleep:
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ printk(KERN_ERR "%s: unable to reboot into sleep img\n",
+ dev->name);
+ typhoon_reset(tp->ioaddr, NoWait);
+ goto out;
+ }
+
+ if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
+
+out:
+ return err;
+}
+
+static int
+typhoon_close(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ if(typhoon_stop_runtime(tp, WaitSleep) < 0)
+ printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
+
+ /* Make sure there is no irq handler running on a different CPU. */
+ typhoon_synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ typhoon_free_rx_rings(tp);
+ typhoon_init_rings(tp);
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
+ printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
+
+ if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+typhoon_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct typhoon *tp = netdev_priv(dev);
+
+ /* If we're down, resume when we are upped.
+ */
+ if(!netif_running(dev))
+ return 0;
+
+ if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
+ printk(KERN_ERR "%s: critical: could not wake up in resume\n",
+ dev->name);
+ goto reset;
+ }
+
+ if(typhoon_start_runtime(tp) < 0) {
+ printk(KERN_ERR "%s: critical: could not start runtime in "
+ "resume\n", dev->name);
+ goto reset;
+ }
+
+ netif_device_attach(dev);
+ netif_start_queue(dev);
+ return 0;
+
+reset:
+ typhoon_reset(tp->ioaddr, NoWait);
+ return -EBUSY;
+}
+
+static int
+typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct typhoon *tp = netdev_priv(dev);
+ struct cmd_desc xp_cmd;
+
+ /* If we're down, we're already suspended.
+ */
+ if(!netif_running(dev))
+ return 0;
+
+ spin_lock_bh(&tp->state_lock);
+ if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
+ spin_unlock_bh(&tp->state_lock);
+ printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
+ dev->name);
+ return -EBUSY;
+ }
+ spin_unlock_bh(&tp->state_lock);
+
+ netif_device_detach(dev);
+
+ if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
+ printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
+ goto need_resume;
+ }
+
+ typhoon_free_rx_rings(tp);
+ typhoon_init_rings(tp);
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
+ goto need_resume;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
+ xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
+ xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ printk(KERN_ERR "%s: unable to set mac address in suspend\n",
+ dev->name);
+ goto need_resume;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
+ dev->name);
+ goto need_resume;
+ }
+
+ if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
+ printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
+ goto need_resume;
+ }
+
+ return 0;
+
+need_resume:
+ typhoon_resume(pdev);
+ return -EBUSY;
+}
+
+static int
+typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+ return pci_enable_wake(pdev, state, enable);
+}
+#endif
+
+static int __devinit
+typhoon_test_mmio(struct pci_dev *pdev)
+{
+ void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
+ int mode = 0;
+ u32 val;
+
+ if(!ioaddr)
+ goto out;
+
+ if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ TYPHOON_STATUS_WAITING_FOR_HOST)
+ goto out_unmap;
+
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
+
+ /* Ok, see if we can change our interrupt status register by
+ * sending ourselves an interrupt. If so, then MMIO works.
+ * The 50usec delay is arbitrary -- it could probably be smaller.
+ */
+ val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ if((val & TYPHOON_INTR_SELF) == 0) {
+ iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
+ ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ udelay(50);
+ val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ if(val & TYPHOON_INTR_SELF)
+ mode = 1;
+ }
+
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+ iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
+ ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+
+out_unmap:
+ pci_iounmap(pdev, ioaddr);
+
+out:
+ if(!mode)
+ printk(KERN_INFO PFX "falling back to port IO\n");
+ return mode;
+}
+
+static int __devinit
+typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int did_version = 0;
+ struct net_device *dev;
+ struct typhoon *tp;
+ int card_id = (int) ent->driver_data;
+ void __iomem *ioaddr;
+ void *shared;
+ dma_addr_t shared_dma;
+ struct cmd_desc xp_cmd;
+ struct resp_desc xp_resp[3];
+ int i;
+ int err = 0;
+
+ if(!did_version++)
+ printk(KERN_INFO "%s", version);
+
+ dev = alloc_etherdev(sizeof(*tp));
+ if(dev == NULL) {
+ printk(ERR_PFX "%s: unable to alloc new net device\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto error_out;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = pci_enable_device(pdev);
+ if(err < 0) {
+ printk(ERR_PFX "%s: unable to enable device\n",
+ pci_name(pdev));
+ goto error_out_dev;
+ }
+
+ err = pci_set_mwi(pdev);
+ if(err < 0) {
+ printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
+ goto error_out_disable;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if(err < 0) {
+ printk(ERR_PFX "%s: No usable DMA configuration\n",
+ pci_name(pdev));
+ goto error_out_mwi;
+ }
+
+ /* sanity checks on IO and MMIO BARs
+ */
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
+ printk(ERR_PFX
+ "%s: region #1 not a PCI IO resource, aborting\n",
+ pci_name(pdev));
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+ if(pci_resource_len(pdev, 0) < 128) {
+ printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
+ pci_name(pdev));
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+ if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ printk(ERR_PFX
+ "%s: region #1 not a PCI MMIO resource, aborting\n",
+ pci_name(pdev));
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+ if(pci_resource_len(pdev, 1) < 128) {
+ printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
+ pci_name(pdev));
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+
+ err = pci_request_regions(pdev, "typhoon");
+ if(err < 0) {
+ printk(ERR_PFX "%s: could not request regions\n",
+ pci_name(pdev));
+ goto error_out_mwi;
+ }
+
+ /* map our registers
+ */
+ if(use_mmio != 0 && use_mmio != 1)
+ use_mmio = typhoon_test_mmio(pdev);
+
+ ioaddr = pci_iomap(pdev, use_mmio, 128);
+ if (!ioaddr) {
+ printk(ERR_PFX "%s: cannot remap registers, aborting\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto error_out_regions;
+ }
+
+ /* allocate pci dma space for rx and tx descriptor rings
+ */
+ shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
+ &shared_dma);
+ if(!shared) {
+ printk(ERR_PFX "%s: could not allocate DMA memory\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto error_out_remap;
+ }
+
+ dev->irq = pdev->irq;
+ tp = netdev_priv(dev);
+ tp->shared = (struct typhoon_shared *) shared;
+ tp->shared_dma = shared_dma;
+ tp->pdev = pdev;
+ tp->tx_pdev = pdev;
+ tp->ioaddr = ioaddr;
+ tp->tx_ioaddr = ioaddr;
+ tp->dev = dev;
+
+ /* Init sequence:
+ * 1) Reset the adapter to clear any bad juju
+ * 2) Reload the sleep image
+ * 3) Boot the sleep image
+ * 4) Get the hardware address.
+ * 5) Put the card to sleep.
+ */
+ if (typhoon_reset(ioaddr, WaitSleep) < 0) {
+ printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
+ err = -EIO;
+ goto error_out_dma;
+ }
+
+ /* Now that we've reset the 3XP and are sure it's not going to
+ * write all over memory, enable bus mastering, and save our
+ * state for resuming after a suspend.
+ */
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ /* dev->name is not valid until we register, but we need to
+ * use some common routines to initialize the card. So that those
+ * routines print the right name, we keep our oun pointer to the name
+ */
+ tp->name = pci_name(pdev);
+
+ typhoon_init_interface(tp);
+ typhoon_init_rings(tp);
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto error_out_reset;
+ }
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
+ printk(ERR_PFX "%s: cannot read MAC address\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto error_out_reset;
+ }
+
+ *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+
+ if(!is_valid_ether_addr(dev->dev_addr)) {
+ printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
+ "aborting\n", pci_name(pdev));
+ goto error_out_reset;
+ }
+
+ /* Read the Sleep Image version last, so the response is valid
+ * later when we print out the version reported.
+ */
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ printk(ERR_PFX "%s: Could not get Sleep Image version\n",
+ pci_name(pdev));
+ goto error_out_reset;
+ }
+
+ tp->capabilities = typhoon_card_info[card_id].capabilities;
+ tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
+
+ /* Typhoon 1.0 Sleep Images return one response descriptor to the
+ * READ_VERSIONS command. Those versions are OK after waking up
+ * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
+ * seem to need a little extra help to get started. Since we don't
+ * know how to nudge it along, just kick it.
+ */
+ if(xp_resp[0].numDesc != 0)
+ tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
+
+ if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
+ printk(ERR_PFX "%s: cannot put adapter to sleep\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto error_out_reset;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = typhoon_open;
+ dev->hard_start_xmit = typhoon_start_tx;
+ dev->stop = typhoon_close;
+ dev->set_multicast_list = typhoon_set_rx_mode;
+ dev->tx_timeout = typhoon_tx_timeout;
+ dev->poll = typhoon_poll;
+ dev->weight = 16;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->get_stats = typhoon_get_stats;
+ dev->set_mac_address = typhoon_set_mac_address;
+ dev->vlan_rx_register = typhoon_vlan_rx_register;
+ dev->vlan_rx_kill_vid = typhoon_vlan_rx_kill_vid;
+ SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
+
+ /* We can handle scatter gather, up to 16 entries, and
+ * we can do IP checksumming (only version 4, doh...)
+ */
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= NETIF_F_TSO;
+
+ if(register_netdev(dev) < 0)
+ goto error_out_reset;
+
+ /* fixup our local name */
+ tp->name = dev->name;
+
+ pci_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "%s: %s at %s 0x%lx, ",
+ dev->name, typhoon_card_info[card_id].name,
+ use_mmio ? "MMIO" : "IO", pci_resource_start(pdev, use_mmio));
+ for(i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x\n", dev->dev_addr[i]);
+
+ /* xp_resp still contains the response to the READ_VERSIONS command.
+ * For debugging, let the user know what version he has.
+ */
+ if(xp_resp[0].numDesc == 0) {
+ /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
+ * of version is Month/Day of build.
+ */
+ u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
+ printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
+ "%02u/%02u/2000\n", dev->name, monthday >> 8,
+ monthday & 0xff);
+ } else if(xp_resp[0].numDesc == 2) {
+ /* This is the Typhoon 1.1+ type Sleep Image
+ */
+ u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
+ u8 *ver_string = (u8 *) &xp_resp[1];
+ ver_string[25] = 0;
+ printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
+ "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
+ (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
+ ver_string);
+ } else {
+ printk(KERN_WARNING "%s: Unknown Sleep Image version "
+ "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
+ le32_to_cpu(xp_resp[0].parm2));
+ }
+
+ return 0;
+
+error_out_reset:
+ typhoon_reset(ioaddr, NoWait);
+
+error_out_dma:
+ pci_free_consistent(pdev, sizeof(struct typhoon_shared),
+ shared, shared_dma);
+error_out_remap:
+ pci_iounmap(pdev, ioaddr);
+error_out_regions:
+ pci_release_regions(pdev);
+error_out_mwi:
+ pci_clear_mwi(pdev);
+error_out_disable:
+ pci_disable_device(pdev);
+error_out_dev:
+ free_netdev(dev);
+error_out:
+ return err;
+}
+
+static void __devexit
+typhoon_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct typhoon *tp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ typhoon_reset(tp->ioaddr, NoWait);
+ pci_iounmap(pdev, tp->ioaddr);
+ pci_free_consistent(pdev, sizeof(struct typhoon_shared),
+ tp->shared, tp->shared_dma);
+ pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+}
+
+static struct pci_driver typhoon_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = typhoon_pci_tbl,
+ .probe = typhoon_init_one,
+ .remove = __devexit_p(typhoon_remove_one),
+#ifdef CONFIG_PM
+ .suspend = typhoon_suspend,
+ .resume = typhoon_resume,
+ .enable_wake = typhoon_enable_wake,
+#endif
+};
+
+static int __init
+typhoon_init(void)
+{
+ return pci_module_init(&typhoon_driver);
+}
+
+static void __exit
+typhoon_cleanup(void)
+{
+ pci_unregister_driver(&typhoon_driver);
+}
+
+module_init(typhoon_init);
+module_exit(typhoon_cleanup);
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
new file mode 100644
index 000000000000..738ee71d8dfb
--- /dev/null
+++ b/drivers/net/typhoon.h
@@ -0,0 +1,619 @@
+/* typhoon.h: chip info for the 3Com 3CR990 family of controllers */
+/*
+ Written 2002-2003 by David Dillow <dave@thedillows.org>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This software is available on a public web site. It may enable
+ cryptographic capabilities of the 3Com hardware, and may be
+ exported from the United States under License Exception "TSU"
+ pursuant to 15 C.F.R. Section 740.13(e).
+
+ This work was funded by the National Library of Medicine under
+ the Department of Energy project number 0274DD06D1 and NLM project
+ number Y1-LM-2015-01.
+*/
+
+/* All Typhoon ring positions are specificed in bytes, and point to the
+ * first "clean" entry in the ring -- ie the next entry we use for whatever
+ * purpose.
+ */
+
+/* The Typhoon basic ring
+ * ringBase: where this ring lives (our virtual address)
+ * lastWrite: the next entry we'll use
+ */
+struct basic_ring {
+ u8 *ringBase;
+ u32 lastWrite;
+};
+
+/* The Typoon transmit ring -- same as a basic ring, plus:
+ * lastRead: where we're at in regard to cleaning up the ring
+ * writeRegister: register to use for writing (different for Hi & Lo rings)
+ */
+struct transmit_ring {
+ u8 *ringBase;
+ u32 lastWrite;
+ u32 lastRead;
+ int writeRegister;
+};
+
+/* The host<->Typhoon ring index structure
+ * This indicates the current positions in the rings
+ *
+ * All values must be in little endian format for the 3XP
+ *
+ * rxHiCleared: entry we've cleared to in the Hi receive ring
+ * rxLoCleared: entry we've cleared to in the Lo receive ring
+ * rxBuffReady: next entry we'll put a free buffer in
+ * respCleared: entry we've cleared to in the response ring
+ *
+ * txLoCleared: entry the NIC has cleared to in the Lo transmit ring
+ * txHiCleared: entry the NIC has cleared to in the Hi transmit ring
+ * rxLoReady: entry the NIC has filled to in the Lo receive ring
+ * rxBuffCleared: entry the NIC has cleared in the free buffer ring
+ * cmdCleared: entry the NIC has cleared in the command ring
+ * respReady: entry the NIC has filled to in the response ring
+ * rxHiReady: entry the NIC has filled to in the Hi receive ring
+ */
+struct typhoon_indexes {
+ /* The first four are written by the host, and read by the NIC */
+ volatile u32 rxHiCleared;
+ volatile u32 rxLoCleared;
+ volatile u32 rxBuffReady;
+ volatile u32 respCleared;
+
+ /* The remaining are written by the NIC, and read by the host */
+ volatile u32 txLoCleared;
+ volatile u32 txHiCleared;
+ volatile u32 rxLoReady;
+ volatile u32 rxBuffCleared;
+ volatile u32 cmdCleared;
+ volatile u32 respReady;
+ volatile u32 rxHiReady;
+} __attribute__ ((packed));
+
+/* The host<->Typhoon interface
+ * Our means of communicating where things are
+ *
+ * All values must be in little endian format for the 3XP
+ *
+ * ringIndex: 64 bit bus address of the index structure
+ * txLoAddr: 64 bit bus address of the Lo transmit ring
+ * txLoSize: size (in bytes) of the Lo transmit ring
+ * txHi*: as above for the Hi priority transmit ring
+ * rxLo*: as above for the Lo priority receive ring
+ * rxBuff*: as above for the free buffer ring
+ * cmd*: as above for the command ring
+ * resp*: as above for the response ring
+ * zeroAddr: 64 bit bus address of a zero word (for DMA)
+ * rxHi*: as above for the Hi Priority receive ring
+ *
+ * While there is room for 64 bit addresses, current versions of the 3XP
+ * only do 32 bit addresses, so the *Hi for each of the above will always
+ * be zero.
+ */
+struct typhoon_interface {
+ u32 ringIndex;
+ u32 ringIndexHi;
+ u32 txLoAddr;
+ u32 txLoAddrHi;
+ u32 txLoSize;
+ u32 txHiAddr;
+ u32 txHiAddrHi;
+ u32 txHiSize;
+ u32 rxLoAddr;
+ u32 rxLoAddrHi;
+ u32 rxLoSize;
+ u32 rxBuffAddr;
+ u32 rxBuffAddrHi;
+ u32 rxBuffSize;
+ u32 cmdAddr;
+ u32 cmdAddrHi;
+ u32 cmdSize;
+ u32 respAddr;
+ u32 respAddrHi;
+ u32 respSize;
+ u32 zeroAddr;
+ u32 zeroAddrHi;
+ u32 rxHiAddr;
+ u32 rxHiAddrHi;
+ u32 rxHiSize;
+} __attribute__ ((packed));
+
+/* The Typhoon transmit/fragment descriptor
+ *
+ * A packet is described by a packet descriptor, followed by option descriptors,
+ * if any, then one or more fragment descriptors.
+ *
+ * Packet descriptor:
+ * flags: Descriptor type
+ * len:i zero, or length of this packet
+ * addr*: 8 bytes of opaque data to the firmware -- for skb pointer
+ * processFlags: Determine offload tasks to perform on this packet.
+ *
+ * Fragment descriptor:
+ * flags: Descriptor type
+ * len:i length of this fragment
+ * addr: low bytes of DMA address for this part of the packet
+ * addrHi: hi bytes of DMA address for this part of the packet
+ * processFlags: must be zero
+ *
+ * TYPHOON_DESC_VALID is not mentioned in their docs, but their Linux
+ * driver uses it.
+ */
+struct tx_desc {
+ u8 flags;
+#define TYPHOON_TYPE_MASK 0x07
+#define TYPHOON_FRAG_DESC 0x00
+#define TYPHOON_TX_DESC 0x01
+#define TYPHOON_CMD_DESC 0x02
+#define TYPHOON_OPT_DESC 0x03
+#define TYPHOON_RX_DESC 0x04
+#define TYPHOON_RESP_DESC 0x05
+#define TYPHOON_OPT_TYPE_MASK 0xf0
+#define TYPHOON_OPT_IPSEC 0x00
+#define TYPHOON_OPT_TCP_SEG 0x10
+#define TYPHOON_CMD_RESPOND 0x40
+#define TYPHOON_RESP_ERROR 0x40
+#define TYPHOON_RX_ERROR 0x40
+#define TYPHOON_DESC_VALID 0x80
+ u8 numDesc;
+ u16 len;
+ u32 addr;
+ u32 addrHi;
+ u32 processFlags;
+#define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001)
+#define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002)
+#define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004)
+#define TYPHOON_TX_PF_TCP_SEGMENT __constant_cpu_to_le32(0x00000008)
+#define TYPHOON_TX_PF_INSERT_VLAN __constant_cpu_to_le32(0x00000010)
+#define TYPHOON_TX_PF_IPSEC __constant_cpu_to_le32(0x00000020)
+#define TYPHOON_TX_PF_VLAN_PRIORITY __constant_cpu_to_le32(0x00000040)
+#define TYPHOON_TX_PF_UDP_CHKSUM __constant_cpu_to_le32(0x00000080)
+#define TYPHOON_TX_PF_PAD_FRAME __constant_cpu_to_le32(0x00000100)
+#define TYPHOON_TX_PF_RESERVED __constant_cpu_to_le32(0x00000e00)
+#define TYPHOON_TX_PF_VLAN_MASK __constant_cpu_to_le32(0x0ffff000)
+#define TYPHOON_TX_PF_INTERNAL __constant_cpu_to_le32(0xf0000000)
+#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
+} __attribute__ ((packed));
+
+/* The TCP Segmentation offload option descriptor
+ *
+ * flags: descriptor type
+ * numDesc: must be 1
+ * mss_flags: bits 0-11 (little endian) are MSS, 12 is first TSO descriptor
+ * 13 is list TSO descriptor, set both if only one TSO
+ * respAddrLo: low bytes of address of the bytesTx field of this descriptor
+ * bytesTx: total number of bytes in this TSO request
+ * status: 0 on completion
+ */
+struct tcpopt_desc {
+ u8 flags;
+ u8 numDesc;
+ u16 mss_flags;
+#define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000)
+#define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000)
+ u32 respAddrLo;
+ u32 bytesTx;
+ u32 status;
+} __attribute__ ((packed));
+
+/* The IPSEC Offload descriptor
+ *
+ * flags: descriptor type
+ * numDesc: must be 1
+ * ipsecFlags: bit 0: 0 -- generate IV, 1 -- use supplied IV
+ * sa1, sa2: Security Association IDs for this packet
+ * reserved: set to 0
+ */
+struct ipsec_desc {
+ u8 flags;
+ u8 numDesc;
+ u16 ipsecFlags;
+#define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000)
+#define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001)
+ u32 sa1;
+ u32 sa2;
+ u32 reserved;
+} __attribute__ ((packed));
+
+/* The Typhoon receive descriptor (Updated by NIC)
+ *
+ * flags: Descriptor type, error indication
+ * numDesc: Always zero
+ * frameLen: the size of the packet received
+ * addr: low 32 bytes of the virtual addr passed in for this buffer
+ * addrHi: high 32 bytes of the virtual addr passed in for this buffer
+ * rxStatus: Error if set in flags, otherwise result of offload processing
+ * filterResults: results of filtering on packet, not used
+ * ipsecResults: Results of IPSEC processing
+ * vlanTag: the 801.2q TCI from the packet
+ */
+struct rx_desc {
+ u8 flags;
+ u8 numDesc;
+ u16 frameLen;
+ u32 addr;
+ u32 addrHi;
+ u32 rxStatus;
+#define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000)
+#define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001)
+#define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002)
+#define TYPHOON_RX_ERR_RUNT __constant_cpu_to_le32(0x00000003)
+#define TYPHOON_RX_ERR_CRC __constant_cpu_to_le32(0x00000004)
+#define TYPHOON_RX_ERR_OVERSIZE __constant_cpu_to_le32(0x00000005)
+#define TYPHOON_RX_ERR_ALIGN __constant_cpu_to_le32(0x00000006)
+#define TYPHOON_RX_ERR_DRIBBLE __constant_cpu_to_le32(0x00000007)
+#define TYPHOON_RX_PROTO_MASK __constant_cpu_to_le32(0x00000003)
+#define TYPHOON_RX_PROTO_UNKNOWN __constant_cpu_to_le32(0x00000000)
+#define TYPHOON_RX_PROTO_IP __constant_cpu_to_le32(0x00000001)
+#define TYPHOON_RX_PROTO_IPX __constant_cpu_to_le32(0x00000002)
+#define TYPHOON_RX_VLAN __constant_cpu_to_le32(0x00000004)
+#define TYPHOON_RX_IP_FRAG __constant_cpu_to_le32(0x00000008)
+#define TYPHOON_RX_IPSEC __constant_cpu_to_le32(0x00000010)
+#define TYPHOON_RX_IP_CHK_FAIL __constant_cpu_to_le32(0x00000020)
+#define TYPHOON_RX_TCP_CHK_FAIL __constant_cpu_to_le32(0x00000040)
+#define TYPHOON_RX_UDP_CHK_FAIL __constant_cpu_to_le32(0x00000080)
+#define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100)
+#define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200)
+#define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400)
+ u16 filterResults;
+#define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff)
+#define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000)
+ u16 ipsecResults;
+#define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001)
+#define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002)
+#define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004)
+#define TYPHOON_RX_INNER_ESP_GOOD __constant_cpu_to_le16(0x0008)
+#define TYPHOON_RX_OUTER_AH_FAIL __constant_cpu_to_le16(0x0010)
+#define TYPHOON_RX_OUTER_ESP_FAIL __constant_cpu_to_le16(0x0020)
+#define TYPHOON_RX_INNER_AH_FAIL __constant_cpu_to_le16(0x0040)
+#define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080)
+#define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100)
+#define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200)
+ u32 vlanTag;
+} __attribute__ ((packed));
+
+/* The Typhoon free buffer descriptor, used to give a buffer to the NIC
+ *
+ * physAddr: low 32 bits of the bus address of the buffer
+ * physAddrHi: high 32 bits of the bus address of the buffer, always zero
+ * virtAddr: low 32 bits of the skb address
+ * virtAddrHi: high 32 bits of the skb address, always zero
+ *
+ * the virt* address is basically two 32 bit cookies, just passed back
+ * from the NIC
+ */
+struct rx_free {
+ u32 physAddr;
+ u32 physAddrHi;
+ u32 virtAddr;
+ u32 virtAddrHi;
+} __attribute__ ((packed));
+
+/* The Typhoon command descriptor, used for commands and responses
+ *
+ * flags: descriptor type
+ * numDesc: number of descriptors following in this command/response,
+ * ie, zero for a one descriptor command
+ * cmd: the command
+ * seqNo: sequence number (unused)
+ * parm1: use varies by command
+ * parm2: use varies by command
+ * parm3: use varies by command
+ */
+struct cmd_desc {
+ u8 flags;
+ u8 numDesc;
+ u16 cmd;
+#define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001)
+#define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002)
+#define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003)
+#define TYPHOON_CMD_RX_DISABLE __constant_cpu_to_le16(0x0004)
+#define TYPHOON_CMD_SET_RX_FILTER __constant_cpu_to_le16(0x0005)
+#define TYPHOON_CMD_READ_STATS __constant_cpu_to_le16(0x0007)
+#define TYPHOON_CMD_XCVR_SELECT __constant_cpu_to_le16(0x0013)
+#define TYPHOON_CMD_SET_MAX_PKT_SIZE __constant_cpu_to_le16(0x001a)
+#define TYPHOON_CMD_READ_MEDIA_STATUS __constant_cpu_to_le16(0x001b)
+#define TYPHOON_CMD_GOTO_SLEEP __constant_cpu_to_le16(0x0023)
+#define TYPHOON_CMD_SET_MULTICAST_HASH __constant_cpu_to_le16(0x0025)
+#define TYPHOON_CMD_SET_MAC_ADDRESS __constant_cpu_to_le16(0x0026)
+#define TYPHOON_CMD_READ_MAC_ADDRESS __constant_cpu_to_le16(0x0027)
+#define TYPHOON_CMD_VLAN_TYPE_WRITE __constant_cpu_to_le16(0x002b)
+#define TYPHOON_CMD_CREATE_SA __constant_cpu_to_le16(0x0034)
+#define TYPHOON_CMD_DELETE_SA __constant_cpu_to_le16(0x0035)
+#define TYPHOON_CMD_READ_VERSIONS __constant_cpu_to_le16(0x0043)
+#define TYPHOON_CMD_IRQ_COALESCE_CTRL __constant_cpu_to_le16(0x0045)
+#define TYPHOON_CMD_ENABLE_WAKE_EVENTS __constant_cpu_to_le16(0x0049)
+#define TYPHOON_CMD_SET_OFFLOAD_TASKS __constant_cpu_to_le16(0x004f)
+#define TYPHOON_CMD_HELLO_RESP __constant_cpu_to_le16(0x0057)
+#define TYPHOON_CMD_HALT __constant_cpu_to_le16(0x005d)
+#define TYPHOON_CMD_READ_IPSEC_INFO __constant_cpu_to_le16(0x005e)
+#define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067)
+#define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069)
+ u16 seqNo;
+ u16 parm1;
+ u32 parm2;
+ u32 parm3;
+} __attribute__ ((packed));
+
+/* The Typhoon response descriptor, see command descriptor for details
+ */
+struct resp_desc {
+ u8 flags;
+ u8 numDesc;
+ u16 cmd;
+ u16 seqNo;
+ u16 parm1;
+ u32 parm2;
+ u32 parm3;
+} __attribute__ ((packed));
+
+#define INIT_COMMAND_NO_RESPONSE(x, command) \
+ do { struct cmd_desc *_ptr = (x); \
+ memset(_ptr, 0, sizeof(struct cmd_desc)); \
+ _ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \
+ _ptr->cmd = command; \
+ } while(0)
+
+/* We set seqNo to 1 if we're expecting a response from this command */
+#define INIT_COMMAND_WITH_RESPONSE(x, command) \
+ do { struct cmd_desc *_ptr = (x); \
+ memset(_ptr, 0, sizeof(struct cmd_desc)); \
+ _ptr->flags = TYPHOON_CMD_RESPOND | TYPHOON_CMD_DESC; \
+ _ptr->flags |= TYPHOON_DESC_VALID; \
+ _ptr->cmd = command; \
+ _ptr->seqNo = 1; \
+ } while(0)
+
+/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1)
+ */
+#define TYPHOON_RX_FILTER_DIRECTED __constant_cpu_to_le16(0x0001)
+#define TYPHOON_RX_FILTER_ALL_MCAST __constant_cpu_to_le16(0x0002)
+#define TYPHOON_RX_FILTER_BROADCAST __constant_cpu_to_le16(0x0004)
+#define TYPHOON_RX_FILTER_PROMISCOUS __constant_cpu_to_le16(0x0008)
+#define TYPHOON_RX_FILTER_MCAST_HASH __constant_cpu_to_le16(0x0010)
+
+/* TYPHOON_CMD_READ_STATS response format
+ */
+struct stats_resp {
+ u8 flags;
+ u8 numDesc;
+ u16 cmd;
+ u16 seqNo;
+ u16 unused;
+ u32 txPackets;
+ u64 txBytes;
+ u32 txDeferred;
+ u32 txLateCollisions;
+ u32 txCollisions;
+ u32 txCarrierLost;
+ u32 txMultipleCollisions;
+ u32 txExcessiveCollisions;
+ u32 txFifoUnderruns;
+ u32 txMulticastTxOverflows;
+ u32 txFiltered;
+ u32 rxPacketsGood;
+ u64 rxBytesGood;
+ u32 rxFifoOverruns;
+ u32 BadSSD;
+ u32 rxCrcErrors;
+ u32 rxOversized;
+ u32 rxBroadcast;
+ u32 rxMulticast;
+ u32 rxOverflow;
+ u32 rxFiltered;
+ u32 linkStatus;
+#define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001)
+#define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001)
+#define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000)
+#define TYPHOON_LINK_SPEED_MASK __constant_cpu_to_le32(0x00000002)
+#define TYPHOON_LINK_100MBPS __constant_cpu_to_le32(0x00000002)
+#define TYPHOON_LINK_10MBPS __constant_cpu_to_le32(0x00000000)
+#define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004)
+#define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004)
+#define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000)
+ u32 unused2;
+ u32 unused3;
+} __attribute__ ((packed));
+
+/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
+ */
+#define TYPHOON_XCVR_10HALF __constant_cpu_to_le16(0x0000)
+#define TYPHOON_XCVR_10FULL __constant_cpu_to_le16(0x0001)
+#define TYPHOON_XCVR_100HALF __constant_cpu_to_le16(0x0002)
+#define TYPHOON_XCVR_100FULL __constant_cpu_to_le16(0x0003)
+#define TYPHOON_XCVR_AUTONEG __constant_cpu_to_le16(0x0004)
+
+/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1)
+ */
+#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE __constant_cpu_to_le16(0x0004)
+#define TYPHOON_MEDIA_STAT_COLLISION_DETECT __constant_cpu_to_le16(0x0010)
+#define TYPHOON_MEDIA_STAT_CARRIER_SENSE __constant_cpu_to_le16(0x0020)
+#define TYPHOON_MEDIA_STAT_POLARITY_REV __constant_cpu_to_le16(0x0400)
+#define TYPHOON_MEDIA_STAT_NO_LINK __constant_cpu_to_le16(0x0800)
+
+/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1)
+ */
+#define TYPHOON_MCAST_HASH_DISABLE __constant_cpu_to_le16(0x0000)
+#define TYPHOON_MCAST_HASH_ENABLE __constant_cpu_to_le16(0x0001)
+#define TYPHOON_MCAST_HASH_SET __constant_cpu_to_le16(0x0002)
+
+/* TYPHOON_CMD_CREATE_SA descriptor and settings
+ */
+struct sa_descriptor {
+ u8 flags;
+ u8 numDesc;
+ u16 cmd;
+ u16 seqNo;
+ u16 mode;
+#define TYPHOON_SA_MODE_NULL __constant_cpu_to_le16(0x0000)
+#define TYPHOON_SA_MODE_AH __constant_cpu_to_le16(0x0001)
+#define TYPHOON_SA_MODE_ESP __constant_cpu_to_le16(0x0002)
+ u8 hashFlags;
+#define TYPHOON_SA_HASH_ENABLE 0x01
+#define TYPHOON_SA_HASH_SHA1 0x02
+#define TYPHOON_SA_HASH_MD5 0x04
+ u8 direction;
+#define TYPHOON_SA_DIR_RX 0x00
+#define TYPHOON_SA_DIR_TX 0x01
+ u8 encryptionFlags;
+#define TYPHOON_SA_ENCRYPT_ENABLE 0x01
+#define TYPHOON_SA_ENCRYPT_DES 0x02
+#define TYPHOON_SA_ENCRYPT_3DES 0x00
+#define TYPHOON_SA_ENCRYPT_3DES_2KEY 0x00
+#define TYPHOON_SA_ENCRYPT_3DES_3KEY 0x04
+#define TYPHOON_SA_ENCRYPT_CBC 0x08
+#define TYPHOON_SA_ENCRYPT_ECB 0x00
+ u8 specifyIndex;
+#define TYPHOON_SA_SPECIFY_INDEX 0x01
+#define TYPHOON_SA_GENERATE_INDEX 0x00
+ u32 SPI;
+ u32 destAddr;
+ u32 destMask;
+ u8 integKey[20];
+ u8 confKey[24];
+ u32 index;
+ u32 unused;
+ u32 unused2;
+} __attribute__ ((packed));
+
+/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
+ * This is all for IPv4.
+ */
+#define TYPHOON_OFFLOAD_TCP_CHKSUM __constant_cpu_to_le32(0x00000002)
+#define TYPHOON_OFFLOAD_UDP_CHKSUM __constant_cpu_to_le32(0x00000004)
+#define TYPHOON_OFFLOAD_IP_CHKSUM __constant_cpu_to_le32(0x00000008)
+#define TYPHOON_OFFLOAD_IPSEC __constant_cpu_to_le32(0x00000010)
+#define TYPHOON_OFFLOAD_BCAST_THROTTLE __constant_cpu_to_le32(0x00000020)
+#define TYPHOON_OFFLOAD_DHCP_PREVENT __constant_cpu_to_le32(0x00000040)
+#define TYPHOON_OFFLOAD_VLAN __constant_cpu_to_le32(0x00000080)
+#define TYPHOON_OFFLOAD_FILTERING __constant_cpu_to_le32(0x00000100)
+#define TYPHOON_OFFLOAD_TCP_SEGMENT __constant_cpu_to_le32(0x00000200)
+
+/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1)
+ */
+#define TYPHOON_WAKE_MAGIC_PKT __constant_cpu_to_le16(0x01)
+#define TYPHOON_WAKE_LINK_EVENT __constant_cpu_to_le16(0x02)
+#define TYPHOON_WAKE_ICMP_ECHO __constant_cpu_to_le16(0x04)
+#define TYPHOON_WAKE_ARP __constant_cpu_to_le16(0x08)
+
+/* These are used to load the firmware image on the NIC
+ */
+struct typhoon_file_header {
+ u8 tag[8];
+ u32 version;
+ u32 numSections;
+ u32 startAddr;
+ u32 hmacDigest[5];
+} __attribute__ ((packed));
+
+struct typhoon_section_header {
+ u32 len;
+ u16 checksum;
+ u16 reserved;
+ u32 startAddr;
+} __attribute__ ((packed));
+
+/* The Typhoon Register offsets
+ */
+#define TYPHOON_REG_SOFT_RESET 0x00
+#define TYPHOON_REG_INTR_STATUS 0x04
+#define TYPHOON_REG_INTR_ENABLE 0x08
+#define TYPHOON_REG_INTR_MASK 0x0c
+#define TYPHOON_REG_SELF_INTERRUPT 0x10
+#define TYPHOON_REG_HOST2ARM7 0x14
+#define TYPHOON_REG_HOST2ARM6 0x18
+#define TYPHOON_REG_HOST2ARM5 0x1c
+#define TYPHOON_REG_HOST2ARM4 0x20
+#define TYPHOON_REG_HOST2ARM3 0x24
+#define TYPHOON_REG_HOST2ARM2 0x28
+#define TYPHOON_REG_HOST2ARM1 0x2c
+#define TYPHOON_REG_HOST2ARM0 0x30
+#define TYPHOON_REG_ARM2HOST3 0x34
+#define TYPHOON_REG_ARM2HOST2 0x38
+#define TYPHOON_REG_ARM2HOST1 0x3c
+#define TYPHOON_REG_ARM2HOST0 0x40
+
+#define TYPHOON_REG_BOOT_DATA_LO TYPHOON_REG_HOST2ARM5
+#define TYPHOON_REG_BOOT_DATA_HI TYPHOON_REG_HOST2ARM4
+#define TYPHOON_REG_BOOT_DEST_ADDR TYPHOON_REG_HOST2ARM3
+#define TYPHOON_REG_BOOT_CHECKSUM TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_BOOT_LENGTH TYPHOON_REG_HOST2ARM1
+
+#define TYPHOON_REG_DOWNLOAD_BOOT_ADDR TYPHOON_REG_HOST2ARM1
+#define TYPHOON_REG_DOWNLOAD_HMAC_0 TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_DOWNLOAD_HMAC_1 TYPHOON_REG_HOST2ARM3
+#define TYPHOON_REG_DOWNLOAD_HMAC_2 TYPHOON_REG_HOST2ARM4
+#define TYPHOON_REG_DOWNLOAD_HMAC_3 TYPHOON_REG_HOST2ARM5
+#define TYPHOON_REG_DOWNLOAD_HMAC_4 TYPHOON_REG_HOST2ARM6
+
+#define TYPHOON_REG_BOOT_RECORD_ADDR_HI TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_BOOT_RECORD_ADDR_LO TYPHOON_REG_HOST2ARM1
+
+#define TYPHOON_REG_TX_LO_READY TYPHOON_REG_HOST2ARM3
+#define TYPHOON_REG_CMD_READY TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_TX_HI_READY TYPHOON_REG_HOST2ARM1
+
+#define TYPHOON_REG_COMMAND TYPHOON_REG_HOST2ARM0
+#define TYPHOON_REG_HEARTBEAT TYPHOON_REG_ARM2HOST3
+#define TYPHOON_REG_STATUS TYPHOON_REG_ARM2HOST0
+
+/* 3XP Reset values (TYPHOON_REG_SOFT_RESET)
+ */
+#define TYPHOON_RESET_ALL 0x7f
+#define TYPHOON_RESET_NONE 0x00
+
+/* 3XP irq bits (TYPHOON_REG_INTR{STATUS,ENABLE,MASK})
+ *
+ * Some of these came from OpenBSD, as the 3Com docs have it wrong
+ * (INTR_SELF) or don't list it at all (INTR_*_ABORT)
+ *
+ * Enabling irqs on the Heartbeat reg (ArmToHost3) gets you an irq
+ * about every 8ms, so don't do it.
+ */
+#define TYPHOON_INTR_HOST_INT 0x00000001
+#define TYPHOON_INTR_ARM2HOST0 0x00000002
+#define TYPHOON_INTR_ARM2HOST1 0x00000004
+#define TYPHOON_INTR_ARM2HOST2 0x00000008
+#define TYPHOON_INTR_ARM2HOST3 0x00000010
+#define TYPHOON_INTR_DMA0 0x00000020
+#define TYPHOON_INTR_DMA1 0x00000040
+#define TYPHOON_INTR_DMA2 0x00000080
+#define TYPHOON_INTR_DMA3 0x00000100
+#define TYPHOON_INTR_MASTER_ABORT 0x00000200
+#define TYPHOON_INTR_TARGET_ABORT 0x00000400
+#define TYPHOON_INTR_SELF 0x00000800
+#define TYPHOON_INTR_RESERVED 0xfffff000
+
+#define TYPHOON_INTR_BOOTCMD TYPHOON_INTR_ARM2HOST0
+
+#define TYPHOON_INTR_ENABLE_ALL 0xffffffef
+#define TYPHOON_INTR_ALL 0xffffffff
+#define TYPHOON_INTR_NONE 0x00000000
+
+/* The commands for the 3XP chip (TYPHOON_REG_COMMAND)
+ */
+#define TYPHOON_BOOTCMD_BOOT 0x00
+#define TYPHOON_BOOTCMD_WAKEUP 0xfa
+#define TYPHOON_BOOTCMD_DNLD_COMPLETE 0xfb
+#define TYPHOON_BOOTCMD_SEG_AVAILABLE 0xfc
+#define TYPHOON_BOOTCMD_RUNTIME_IMAGE 0xfd
+#define TYPHOON_BOOTCMD_REG_BOOT_RECORD 0xff
+
+/* 3XP Status values (TYPHOON_REG_STATUS)
+ */
+#define TYPHOON_STATUS_WAITING_FOR_BOOT 0x07
+#define TYPHOON_STATUS_SECOND_INIT 0x08
+#define TYPHOON_STATUS_RUNNING 0x09
+#define TYPHOON_STATUS_WAITING_FOR_HOST 0x0d
+#define TYPHOON_STATUS_WAITING_FOR_SEGMENT 0x10
+#define TYPHOON_STATUS_SLEEPING 0x11
+#define TYPHOON_STATUS_HALTED 0x14
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
new file mode 100644
index 000000000000..7b57d552094a
--- /dev/null
+++ b/drivers/net/via-rhine.c
@@ -0,0 +1,2035 @@
+/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+/*
+ Written 1998-2001 by Donald Becker.
+
+ Current Maintainer: Roger Luethi <rl@hellgate.ch>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is designed for the VIA VT86C100A Rhine-I.
+ It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
+ and management NIC 6105M).
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This driver contains some changes from the original Donald Becker
+ version. He may or may not be interested in bug reports on this
+ code. You can find his versions at:
+ http://www.scyld.com/network/via-rhine.html
+
+
+ Linux kernel version history:
+
+ LK1.1.0:
+ - Jeff Garzik: softnet 'n stuff
+
+ LK1.1.1:
+ - Justin Guyett: softnet and locking fixes
+ - Jeff Garzik: use PCI interface
+
+ LK1.1.2:
+ - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
+
+ LK1.1.3:
+ - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
+ code) update "Theory of Operation" with
+ softnet/locking changes
+ - Dave Miller: PCI DMA and endian fixups
+ - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
+
+ LK1.1.4:
+ - Urban Widmark: fix gcc 2.95.2 problem and
+ remove writel's to fixed address 0x7c
+
+ LK1.1.5:
+ - Urban Widmark: mdio locking, bounce buffer changes
+ merges from Beckers 1.05 version
+ added netif_running_on/off support
+
+ LK1.1.6:
+ - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
+ set netif_running_on/off on startup, del_timer_sync
+
+ LK1.1.7:
+ - Manfred Spraul: added reset into tx_timeout
+
+ LK1.1.9:
+ - Urban Widmark: merges from Beckers 1.10 version
+ (media selection + eeprom reload)
+ - David Vrabel: merges from D-Link "1.11" version
+ (disable WOL and PME on startup)
+
+ LK1.1.10:
+ - Manfred Spraul: use "singlecopy" for unaligned buffers
+ don't allocate bounce buffers for !ReqTxAlign cards
+
+ LK1.1.11:
+ - David Woodhouse: Set dev->base_addr before the first time we call
+ wait_for_reset(). It's a lot happier that way.
+ Free np->tx_bufs only if we actually allocated it.
+
+ LK1.1.12:
+ - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
+
+ LK1.1.13 (jgarzik):
+ - Add ethtool support
+ - Replace some MII-related magic numbers with constants
+
+ LK1.1.14 (Ivan G.):
+ - fixes comments for Rhine-III
+ - removes W_MAX_TIMEOUT (unused)
+ - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
+ is R-I and has Davicom chip, flag is referenced in kernel driver)
+ - sends chip_id as a parameter to wait_for_reset since np is not
+ initialized on first call
+ - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
+ for Rhine-III's (documentation says same bit is correct)
+ - transmit frame queue message is off by one - fixed
+ - adds IntrNormalSummary to "Something Wicked" exclusion list
+ so normal interrupts will not trigger the message (src: Donald Becker)
+ (Roger Luethi)
+ - show confused chip where to continue after Tx error
+ - location of collision counter is chip specific
+ - allow selecting backoff algorithm (module parameter)
+
+ LK1.1.15 (jgarzik):
+ - Use new MII lib helper generic_mii_ioctl
+
+ LK1.1.16 (Roger Luethi)
+ - Etherleak fix
+ - Handle Tx buffer underrun
+ - Fix bugs in full duplex handling
+ - New reset code uses "force reset" cmd on Rhine-II
+ - Various clean ups
+
+ LK1.1.17 (Roger Luethi)
+ - Fix race in via_rhine_start_tx()
+ - On errors, wait for Tx engine to turn off before scavenging
+ - Handle Tx descriptor write-back race on Rhine-II
+ - Force flushing for PCI posted writes
+ - More reset code changes
+
+ LK1.1.18 (Roger Luethi)
+ - No filtering multicast in promisc mode (Edward Peng)
+ - Fix for Rhine-I Tx timeouts
+
+ LK1.1.19 (Roger Luethi)
+ - Increase Tx threshold for unspecified errors
+
+ LK1.2.0-2.6 (Roger Luethi)
+ - Massive clean-up
+ - Rewrite PHY, media handling (remove options, full_duplex, backoff)
+ - Fix Tx engine race for good
+
+*/
+
+#define DRV_NAME "via-rhine"
+#define DRV_VERSION "1.2.0-2.6"
+#define DRV_RELDATE "June-10-2004"
+
+
+/* A few user-configurable values.
+ These may be modified when a driver module is loaded. */
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/*
+ * In case you are looking for 'options[]' or 'full_duplex[]', they
+ * are gone. Use ethtool(8) instead.
+ */
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Rhine has a 64 element 8390-like hash table. */
+static const int multicast_filter_limit = 32;
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 16
+
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
+
+/* This driver was written to use PCI memory space. Some early versions
+ of the Rhine may only work correctly with I/O space accesses. */
+#ifdef CONFIG_VIA_RHINE_MMIO
+#define USE_MMIO
+#else
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
+controller.
+
+II. Board-specific settings
+
+Boards with this chip are functional only in a bus-master PCI slot.
+
+Many operational settings are loaded from the EEPROM to the Config word at
+offset 0x78. For most of these settings, this driver assumes that they are
+correct.
+If this driver is compiled to use PCI memory space operations the EEPROM
+must be configured to enable memory ops.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver attempts to use a zero-copy receive and transmit scheme.
+
+Alas, all data buffers are required to start on a 32 bit boundary, so
+the driver must often copy transmit packets into bounce buffers.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in the last phase of rhine_rx().
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+Since the VIA chips are only able to transfer data to buffers on 32 bit
+boundaries, the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. Copying these unaligned buffers
+has the beneficial effect of 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->priv->lock spinlock. The other thread is the interrupt handler, which
+is single threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring. It locks the
+dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
+is not available it stops the transmit queue by calling netif_stop_queue.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. If at least half of the entries in
+the Rx ring are available the transmit queue is woken up if it was stopped.
+
+IV. Notes
+
+IVb. References
+
+Preliminary VT86C100A manual from http://www.via.com.tw/
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
+
+
+IVc. Errata
+
+The VT86C100A manual is not reliable information.
+The 3043 chip does not handle unaligned transmit or receive buffers, resulting
+in significant performance degradation for bounce buffer copies on transmit
+and unaligned IP headers on receive.
+The chip does not pad to minimum transmit length.
+
+*/
+
+
+/* This table drives the PCI probe routines. It's mostly boilerplate in all
+ of the drivers, and will likely be provided by some future kernel.
+ Note the matching code -- the first table entry matchs all 56** cards but
+ second only the 1234 card.
+*/
+
+enum rhine_revs {
+ VT86C100A = 0x00,
+ VTunknown0 = 0x20,
+ VT6102 = 0x40,
+ VT8231 = 0x50, /* Integrated MAC */
+ VT8233 = 0x60, /* Integrated MAC */
+ VT8235 = 0x74, /* Integrated MAC */
+ VT8237 = 0x78, /* Integrated MAC */
+ VTunknown1 = 0x7C,
+ VT6105 = 0x80,
+ VT6105_B0 = 0x83,
+ VT6105L = 0x8A,
+ VT6107 = 0x8C,
+ VTunknown2 = 0x8E,
+ VT6105M = 0x90, /* Management adapter */
+};
+
+enum rhine_quirks {
+ rqWOL = 0x0001, /* Wake-On-LAN support */
+ rqForceReset = 0x0002,
+ rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
+ rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
+ rqRhineI = 0x0100, /* See comment below */
+};
+/*
+ * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
+ * MMIO as well as for the collision counter and the Tx FIFO underflow
+ * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
+ */
+
+/* Beware of PCI posted writes */
+#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
+
+static struct pci_device_id rhine_pci_tbl[] =
+{
+ {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
+ {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
+ {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
+ {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
+
+
+/* Offsets to the device registers. */
+enum register_offsets {
+ StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+ ChipCmd1=0x09,
+ IntrStatus=0x0C, IntrEnable=0x0E,
+ MulticastFilter0=0x10, MulticastFilter1=0x14,
+ RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
+ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+ MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
+ ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
+ RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
+ StickyHW=0x83, IntrStatus2=0x84,
+ WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
+ WOLcrClr1=0xA6, WOLcgClr=0xA7,
+ PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
+};
+
+/* Bits in ConfigD */
+enum backoff_bits {
+ BackOptional=0x01, BackModify=0x02,
+ BackCaptureEffect=0x04, BackRandom=0x08
+};
+
+#ifdef USE_MMIO
+/* Registers we check that mmio and reg are the same. */
+static const int mmio_verify_registers[] = {
+ RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
+ 0
+};
+#endif
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
+ IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
+ IntrPCIErr=0x0040,
+ IntrStatsMax=0x0080, IntrRxEarly=0x0100,
+ IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+ IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+ IntrRxWakeUp=0x8000,
+ IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+ IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
+ IntrTxErrSummary=0x082218,
+};
+
+/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
+enum wol_bits {
+ WOLucast = 0x10,
+ WOLmagic = 0x20,
+ WOLbmcast = 0x30,
+ WOLlnkon = 0x40,
+ WOLlnkoff = 0x80,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+ s32 rx_status;
+ u32 desc_length; /* Chain flag, Buffer/frame length */
+ u32 addr;
+ u32 next_desc;
+};
+struct tx_desc {
+ s32 tx_status;
+ u32 desc_length; /* Chain flag, Tx Config, Frame length */
+ u32 addr;
+ u32 next_desc;
+};
+
+/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
+#define TXDESC 0x00e08000
+
+enum rx_status_bits {
+ RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
+};
+
+/* Bits in *_desc.*_status */
+enum desc_status_bits {
+ DescOwn=0x80000000
+};
+
+/* Bits in ChipCmd. */
+enum chip_cmd_bits {
+ CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
+ CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
+ Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
+ Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
+};
+
+struct rhine_private {
+ /* Descriptor rings */
+ struct rx_desc *rx_ring;
+ struct tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
+
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
+
+ /* Tx bounce buffers */
+ unsigned char *tx_buf[TX_RING_SIZE];
+ unsigned char *tx_bufs;
+ dma_addr_t tx_bufs_dma;
+
+ struct pci_dev *pdev;
+ long pioaddr;
+ struct net_device_stats stats;
+ spinlock_t lock;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ u32 quirks;
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ u8 wolopts;
+
+ u8 tx_thresh, rx_thresh;
+
+ struct mii_if_info mii_if;
+ void __iomem *base;
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int rhine_open(struct net_device *dev);
+static void rhine_tx_timeout(struct net_device *dev);
+static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void rhine_tx(struct net_device *dev);
+static void rhine_rx(struct net_device *dev);
+static void rhine_error(struct net_device *dev, int intr_status);
+static void rhine_set_rx_mode(struct net_device *dev);
+static struct net_device_stats *rhine_get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct ethtool_ops netdev_ethtool_ops;
+static int rhine_close(struct net_device *dev);
+static void rhine_shutdown (struct device *gdev);
+
+#define RHINE_WAIT_FOR(condition) do { \
+ int i=1024; \
+ while (!(condition) && --i) \
+ ; \
+ if (debug > 1 && i < 512) \
+ printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
+ DRV_NAME, 1024-i, __func__, __LINE__); \
+} while(0)
+
+static inline u32 get_intr_status(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u32 intr_status;
+
+ intr_status = ioread16(ioaddr + IntrStatus);
+ /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
+ if (rp->quirks & rqStatusWBRace)
+ intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
+ return intr_status;
+}
+
+/*
+ * Get power related registers into sane state.
+ * Notify user about past WOL event.
+ */
+static void rhine_power_init(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u16 wolstat;
+
+ if (rp->quirks & rqWOL) {
+ /* Make sure chip is in power state D0 */
+ iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
+
+ /* Disable "force PME-enable" */
+ iowrite8(0x80, ioaddr + WOLcgClr);
+
+ /* Clear power-event config bits (WOL) */
+ iowrite8(0xFF, ioaddr + WOLcrClr);
+ /* More recent cards can manage two additional patterns */
+ if (rp->quirks & rq6patterns)
+ iowrite8(0x03, ioaddr + WOLcrClr1);
+
+ /* Save power-event status bits */
+ wolstat = ioread8(ioaddr + PwrcsrSet);
+ if (rp->quirks & rq6patterns)
+ wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
+
+ /* Clear power-event status bits */
+ iowrite8(0xFF, ioaddr + PwrcsrClr);
+ if (rp->quirks & rq6patterns)
+ iowrite8(0x03, ioaddr + PwrcsrClr1);
+
+ if (wolstat) {
+ char *reason;
+ switch (wolstat) {
+ case WOLmagic:
+ reason = "Magic packet";
+ break;
+ case WOLlnkon:
+ reason = "Link went up";
+ break;
+ case WOLlnkoff:
+ reason = "Link went down";
+ break;
+ case WOLucast:
+ reason = "Unicast packet";
+ break;
+ case WOLbmcast:
+ reason = "Multicast/broadcast packet";
+ break;
+ default:
+ reason = "Unknown";
+ }
+ printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
+ DRV_NAME, reason);
+ }
+ }
+}
+
+static void rhine_chip_reset(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
+ IOSYNC;
+
+ if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
+ printk(KERN_INFO "%s: Reset not complete yet. "
+ "Trying harder.\n", DRV_NAME);
+
+ /* Force reset */
+ if (rp->quirks & rqForceReset)
+ iowrite8(0x40, ioaddr + MiscCmd);
+
+ /* Reset can take somewhat longer (rare) */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
+ }
+
+ if (debug > 1)
+ printk(KERN_INFO "%s: Reset %s.\n", dev->name,
+ (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
+ "failed" : "succeeded");
+}
+
+#ifdef USE_MMIO
+static void enable_mmio(long pioaddr, u32 quirks)
+{
+ int n;
+ if (quirks & rqRhineI) {
+ /* More recent docs say that this bit is reserved ... */
+ n = inb(pioaddr + ConfigA) | 0x20;
+ outb(n, pioaddr + ConfigA);
+ } else {
+ n = inb(pioaddr + ConfigD) | 0x80;
+ outb(n, pioaddr + ConfigD);
+ }
+}
+#endif
+
+/*
+ * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
+ * (plus 0x6C for Rhine-I/II)
+ */
+static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ outb(0x20, pioaddr + MACRegEEcsr);
+ RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
+
+#ifdef USE_MMIO
+ /*
+ * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
+ * MMIO. If reloading EEPROM was done first this could be avoided, but
+ * it is not known if that still works with the "win98-reboot" problem.
+ */
+ enable_mmio(pioaddr, rp->quirks);
+#endif
+
+ /* Turn off EEPROM-controlled wake-up (magic packet) */
+ if (rp->quirks & rqWOL)
+ iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
+
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void rhine_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ rhine_interrupt(dev->irq, (void *)dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+static void rhine_hw_init(struct net_device *dev, long pioaddr)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ rhine_chip_reset(dev);
+
+ /* Rhine-I needs extra time to recuperate before EEPROM reload */
+ if (rp->quirks & rqRhineI)
+ msleep(5);
+
+ /* Reload EEPROM controlled bytes cleared by soft reset */
+ rhine_reload_eeprom(pioaddr, dev);
+}
+
+static int __devinit rhine_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct rhine_private *rp;
+ int i, rc;
+ u8 pci_rev;
+ u32 quirks;
+ long pioaddr;
+ long memaddr;
+ void __iomem *ioaddr;
+ int io_size, phy_id;
+ const char *name;
+#ifdef USE_MMIO
+ int bar = 1;
+#else
+ int bar = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
+
+ io_size = 256;
+ phy_id = 0;
+ quirks = 0;
+ name = "Rhine";
+ if (pci_rev < VTunknown0) {
+ quirks = rqRhineI;
+ io_size = 128;
+ }
+ else if (pci_rev >= VT6102) {
+ quirks = rqWOL | rqForceReset;
+ if (pci_rev < VT6105) {
+ name = "Rhine II";
+ quirks |= rqStatusWBRace; /* Rhine-II exclusive */
+ }
+ else {
+ phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
+ if (pci_rev >= VT6105_B0)
+ quirks |= rq6patterns;
+ if (pci_rev < VT6105M)
+ name = "Rhine III";
+ else
+ name = "Rhine III (Management Adapter)";
+ }
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ /* this should always be supported */
+ rc = pci_set_dma_mask(pdev, 0xffffffff);
+ if (rc) {
+ printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
+ "the card!?\n");
+ goto err_out;
+ }
+
+ /* sanity check */
+ if ((pci_resource_len(pdev, 0) < io_size) ||
+ (pci_resource_len(pdev, 1) < io_size)) {
+ rc = -EIO;
+ printk(KERN_ERR "Insufficient PCI resources, aborting\n");
+ goto err_out;
+ }
+
+ pioaddr = pci_resource_start(pdev, 0);
+ memaddr = pci_resource_start(pdev, 1);
+
+ pci_set_master(pdev);
+
+ dev = alloc_etherdev(sizeof(struct rhine_private));
+ if (!dev) {
+ rc = -ENOMEM;
+ printk(KERN_ERR "alloc_etherdev failed\n");
+ goto err_out;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ rp = netdev_priv(dev);
+ rp->quirks = quirks;
+ rp->pioaddr = pioaddr;
+ rp->pdev = pdev;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_free_netdev;
+
+ ioaddr = pci_iomap(pdev, bar, io_size);
+ if (!ioaddr) {
+ rc = -EIO;
+ printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
+ "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
+ goto err_out_free_res;
+ }
+
+#ifdef USE_MMIO
+ enable_mmio(pioaddr, quirks);
+
+ /* Check that selected MMIO registers match the PIO ones */
+ i = 0;
+ while (mmio_verify_registers[i]) {
+ int reg = mmio_verify_registers[i++];
+ unsigned char a = inb(pioaddr+reg);
+ unsigned char b = readb(ioaddr+reg);
+ if (a != b) {
+ rc = -EIO;
+ printk(KERN_ERR "MMIO do not match PIO [%02x] "
+ "(%02x != %02x)\n", reg, a, b);
+ goto err_out_unmap;
+ }
+ }
+#endif /* USE_MMIO */
+
+ dev->base_addr = (unsigned long)ioaddr;
+ rp->base = ioaddr;
+
+ /* Get chip registers into a sane state */
+ rhine_power_init(dev);
+ rhine_hw_init(dev, pioaddr);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ rc = -EIO;
+ printk(KERN_ERR "Invalid MAC address\n");
+ goto err_out_unmap;
+ }
+
+ /* For Rhine-I/II, phy_id is loaded from EEPROM */
+ if (!phy_id)
+ phy_id = ioread8(ioaddr + 0x6C);
+
+ dev->irq = pdev->irq;
+
+ spin_lock_init(&rp->lock);
+ rp->mii_if.dev = dev;
+ rp->mii_if.mdio_read = mdio_read;
+ rp->mii_if.mdio_write = mdio_write;
+ rp->mii_if.phy_id_mask = 0x1f;
+ rp->mii_if.reg_num_mask = 0x1f;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = rhine_open;
+ dev->hard_start_xmit = rhine_start_tx;
+ dev->stop = rhine_close;
+ dev->get_stats = rhine_get_stats;
+ dev->set_multicast_list = rhine_set_rx_mode;
+ dev->do_ioctl = netdev_ioctl;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->tx_timeout = rhine_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = rhine_poll;
+#endif
+ if (rp->quirks & rqRhineI)
+ dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+
+ /* dev->name not defined before register_netdev()! */
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_unmap;
+
+ printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
+ dev->name, name,
+#ifdef USE_MMIO
+ memaddr
+#else
+ (long)ioaddr
+#endif
+ );
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ {
+ u16 mii_cmd;
+ int mii_status = mdio_read(dev, phy_id, 1);
+ mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
+ mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
+ printk(KERN_INFO "%s: MII PHY found at address "
+ "%d, status 0x%4.4x advertising %4.4x "
+ "Link %4.4x.\n", dev->name, phy_id,
+ mii_status, rp->mii_if.advertising,
+ mdio_read(dev, phy_id, 5));
+
+ /* set IFF_RUNNING */
+ if (mii_status & BMSR_LSTATUS)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ }
+ }
+ rp->mii_if.phy_id = phy_id;
+
+ return 0;
+
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_free_netdev:
+ free_netdev(dev);
+err_out:
+ return rc;
+}
+
+static int alloc_ring(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void *ring;
+ dma_addr_t ring_dma;
+
+ ring = pci_alloc_consistent(rp->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ &ring_dma);
+ if (!ring) {
+ printk(KERN_ERR "Could not allocate DMA memory.\n");
+ return -ENOMEM;
+ }
+ if (rp->quirks & rqRhineI) {
+ rp->tx_bufs = pci_alloc_consistent(rp->pdev,
+ PKT_BUF_SZ * TX_RING_SIZE,
+ &rp->tx_bufs_dma);
+ if (rp->tx_bufs == NULL) {
+ pci_free_consistent(rp->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ ring, ring_dma);
+ return -ENOMEM;
+ }
+ }
+
+ rp->rx_ring = ring;
+ rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
+ rp->rx_ring_dma = ring_dma;
+ rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
+
+ return 0;
+}
+
+static void free_ring(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ pci_free_consistent(rp->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ rp->rx_ring, rp->rx_ring_dma);
+ rp->tx_ring = NULL;
+
+ if (rp->tx_bufs)
+ pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
+ rp->tx_bufs, rp->tx_bufs_dma);
+
+ rp->tx_bufs = NULL;
+
+}
+
+static void alloc_rbufs(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ dma_addr_t next;
+ int i;
+
+ rp->dirty_rx = rp->cur_rx = 0;
+
+ rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ rp->rx_head_desc = &rp->rx_ring[0];
+ next = rp->rx_ring_dma;
+
+ /* Init the ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ rp->rx_ring[i].rx_status = 0;
+ rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
+ next += sizeof(struct rx_desc);
+ rp->rx_ring[i].next_desc = cpu_to_le32(next);
+ rp->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
+ rp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+
+ rp->rx_skbuff_dma[i] =
+ pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
+ rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+ }
+ rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+}
+
+static void free_rbufs(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ rp->rx_ring[i].rx_status = 0;
+ rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (rp->rx_skbuff[i]) {
+ pci_unmap_single(rp->pdev,
+ rp->rx_skbuff_dma[i],
+ rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rp->rx_skbuff[i]);
+ }
+ rp->rx_skbuff[i] = NULL;
+ }
+}
+
+static void alloc_tbufs(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ dma_addr_t next;
+ int i;
+
+ rp->dirty_tx = rp->cur_tx = 0;
+ next = rp->tx_ring_dma;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ rp->tx_skbuff[i] = NULL;
+ rp->tx_ring[i].tx_status = 0;
+ rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+ next += sizeof(struct tx_desc);
+ rp->tx_ring[i].next_desc = cpu_to_le32(next);
+ rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
+ }
+ rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
+
+}
+
+static void free_tbufs(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ rp->tx_ring[i].tx_status = 0;
+ rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+ rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (rp->tx_skbuff[i]) {
+ if (rp->tx_skbuff_dma[i]) {
+ pci_unmap_single(rp->pdev,
+ rp->tx_skbuff_dma[i],
+ rp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(rp->tx_skbuff[i]);
+ }
+ rp->tx_skbuff[i] = NULL;
+ rp->tx_buf[i] = NULL;
+ }
+}
+
+static void rhine_check_media(struct net_device *dev, unsigned int init_media)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ mii_check_media(&rp->mii_if, debug, init_media);
+
+ if (rp->mii_if.full_duplex)
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
+ ioaddr + ChipCmd1);
+ else
+ iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
+ ioaddr + ChipCmd1);
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+ iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
+ /* Configure initial FIFO thresholds. */
+ iowrite8(0x20, ioaddr + TxConfig);
+ rp->tx_thresh = 0x20;
+ rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
+
+ iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
+ iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
+
+ rhine_set_rx_mode(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+ IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+
+ iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
+ ioaddr + ChipCmd);
+ rhine_check_media(dev, 1);
+}
+
+/* Enable MII link status auto-polling (required for IntrLinkChange) */
+static void rhine_enable_linkmon(void __iomem *ioaddr)
+{
+ iowrite8(0, ioaddr + MIICmd);
+ iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
+ iowrite8(0x80, ioaddr + MIICmd);
+
+ RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
+
+ iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
+}
+
+/* Disable MII link status auto-polling (required for MDIO access) */
+static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
+{
+ iowrite8(0, ioaddr + MIICmd);
+
+ if (quirks & rqRhineI) {
+ iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
+
+ /* Can be called from ISR. Evil. */
+ mdelay(1);
+
+ /* 0x80 must be set immediately before turning it off */
+ iowrite8(0x80, ioaddr + MIICmd);
+
+ RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
+
+ /* Heh. Now clear 0x80 again. */
+ iowrite8(0, ioaddr + MIICmd);
+ }
+ else
+ RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
+}
+
+/* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int result;
+
+ rhine_disable_linkmon(ioaddr, rp->quirks);
+
+ /* rhine_disable_linkmon already cleared MIICmd */
+ iowrite8(phy_id, ioaddr + MIIPhyAddr);
+ iowrite8(regnum, ioaddr + MIIRegAddr);
+ iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
+ result = ioread16(ioaddr + MIIData);
+
+ rhine_enable_linkmon(ioaddr);
+ return result;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ rhine_disable_linkmon(ioaddr, rp->quirks);
+
+ /* rhine_disable_linkmon already cleared MIICmd */
+ iowrite8(phy_id, ioaddr + MIIPhyAddr);
+ iowrite8(regnum, ioaddr + MIIRegAddr);
+ iowrite16(value, ioaddr + MIIData);
+ iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
+
+ rhine_enable_linkmon(ioaddr);
+}
+
+static int rhine_open(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int rc;
+
+ rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
+ dev);
+ if (rc)
+ return rc;
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
+ dev->name, rp->pdev->irq);
+
+ rc = alloc_ring(dev);
+ if (rc) {
+ free_irq(rp->pdev->irq, dev);
+ return rc;
+ }
+ alloc_rbufs(dev);
+ alloc_tbufs(dev);
+ rhine_chip_reset(dev);
+ init_registers(dev);
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
+ "MII status: %4.4x.\n",
+ dev->name, ioread16(ioaddr + ChipCmd),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void rhine_tx_timeout(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ /* protect against concurrent rx interrupts */
+ disable_irq(rp->pdev->irq);
+
+ spin_lock(&rp->lock);
+
+ /* clear all descriptors */
+ free_tbufs(dev);
+ free_rbufs(dev);
+ alloc_tbufs(dev);
+ alloc_rbufs(dev);
+
+ /* Reinitialize the hardware. */
+ rhine_chip_reset(dev);
+ init_registers(dev);
+
+ spin_unlock(&rp->lock);
+ enable_irq(rp->pdev->irq);
+
+ dev->trans_start = jiffies;
+ rp->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ unsigned entry;
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = rp->cur_tx % TX_RING_SIZE;
+
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ }
+
+ rp->tx_skbuff[entry] = skb;
+
+ if ((rp->quirks & rqRhineI) &&
+ (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
+ /* Must use alignment buffer. */
+ if (skb->len > PKT_BUF_SZ) {
+ /* packet too long, drop it */
+ dev_kfree_skb(skb);
+ rp->tx_skbuff[entry] = NULL;
+ rp->stats.tx_dropped++;
+ return 0;
+ }
+ skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
+ rp->tx_skbuff_dma[entry] = 0;
+ rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
+ (rp->tx_buf[entry] -
+ rp->tx_bufs));
+ } else {
+ rp->tx_skbuff_dma[entry] =
+ pci_map_single(rp->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
+ }
+
+ rp->tx_ring[entry].desc_length =
+ cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+ /* lock eth irq */
+ spin_lock_irq(&rp->lock);
+ wmb();
+ rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+ wmb();
+
+ rp->cur_tx++;
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel */
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ ioaddr + ChipCmd1);
+ IOSYNC;
+
+ if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
+ netif_stop_queue(dev);
+
+ dev->trans_start = jiffies;
+
+ spin_unlock_irq(&rp->lock);
+
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, rp->cur_tx-1, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = dev_instance;
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u32 intr_status;
+ int boguscnt = max_interrupt_work;
+ int handled = 0;
+
+ while ((intr_status = get_intr_status(dev))) {
+ handled = 1;
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ if (intr_status & IntrTxDescRace)
+ iowrite8(0x08, ioaddr + IntrStatus2);
+ iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
+ IOSYNC;
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
+ IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
+ rhine_rx(dev);
+
+ if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
+ if (intr_status & IntrTxErrSummary) {
+ /* Avoid scavenging before Tx engine turned off */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
+ if (debug > 2 &&
+ ioread8(ioaddr+ChipCmd) & CmdTxOn)
+ printk(KERN_WARNING "%s: "
+ "rhine_interrupt() Tx engine"
+ "still on.\n", dev->name);
+ }
+ rhine_tx(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | IntrLinkChange |
+ IntrStatsMax | IntrTxError | IntrTxAborted |
+ IntrTxUnderrun | IntrTxDescRace))
+ rhine_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=%#8.8x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ }
+
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
+ dev->name, ioread16(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+ for clarity. */
+static void rhine_tx(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
+
+ spin_lock(&rp->lock);
+
+ /* find and cleanup dirty tx descriptors */
+ while (rp->dirty_tx != rp->cur_tx) {
+ txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+ if (debug > 6)
+ printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
+ entry, txstatus);
+ if (txstatus & DescOwn)
+ break;
+ if (txstatus & 0x8000) {
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, "
+ "Tx status %8.8x.\n",
+ dev->name, txstatus);
+ rp->stats.tx_errors++;
+ if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
+ if (txstatus & 0x0200) rp->stats.tx_window_errors++;
+ if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
+ if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
+ if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
+ (txstatus & 0x0800) || (txstatus & 0x1000)) {
+ rp->stats.tx_fifo_errors++;
+ rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+ break; /* Keep the skb - we try again */
+ }
+ /* Transmitter restarted in 'abnormal' handler. */
+ } else {
+ if (rp->quirks & rqRhineI)
+ rp->stats.collisions += (txstatus >> 3) & 0x0F;
+ else
+ rp->stats.collisions += txstatus & 0x0F;
+ if (debug > 6)
+ printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
+ (txstatus >> 3) & 0xF,
+ txstatus & 0xF);
+ rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
+ rp->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ if (rp->tx_skbuff_dma[entry]) {
+ pci_unmap_single(rp->pdev,
+ rp->tx_skbuff_dma[entry],
+ rp->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+ rp->tx_skbuff[entry] = NULL;
+ entry = (++rp->dirty_tx) % TX_RING_SIZE;
+ }
+ if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
+ netif_wake_queue(dev);
+
+ spin_unlock(&rp->lock);
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+ for clarity and better register allocation. */
+static void rhine_rx(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int entry = rp->cur_rx % RX_RING_SIZE;
+ int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
+
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
+ dev->name, entry,
+ le32_to_cpu(rp->rx_head_desc->rx_status));
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+ struct rx_desc *desc = rp->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->rx_status);
+ int data_size = desc_status >> 16;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " rhine_rx() status is %8.8x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
+ if ((desc_status & RxWholePkt) != RxWholePkt) {
+ printk(KERN_WARNING "%s: Oversized Ethernet "
+ "frame spanned multiple buffers, entry "
+ "%#x length %d status %8.8x!\n",
+ dev->name, entry, data_size,
+ desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet "
+ "frame %p vs %p.\n", dev->name,
+ rp->rx_head_desc, &rp->rx_ring[entry]);
+ rp->stats.rx_length_errors++;
+ } else if (desc_status & RxErr) {
+ /* There was a error. */
+ if (debug > 2)
+ printk(KERN_DEBUG " rhine_rx() Rx "
+ "error was %8.8x.\n",
+ desc_status);
+ rp->stats.rx_errors++;
+ if (desc_status & 0x0030) rp->stats.rx_length_errors++;
+ if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
+ if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
+ if (desc_status & 0x0002) {
+ /* this can also be updated outside the interrupt handler */
+ spin_lock(&rp->lock);
+ rp->stats.rx_crc_errors++;
+ spin_unlock(&rp->lock);
+ }
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Length should omit the CRC */
+ int pkt_len = data_size - 4;
+
+ /* Check if the packet is long enough to accept without
+ copying to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(rp->pdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ eth_copy_and_sum(skb,
+ rp->rx_skbuff[entry]->tail,
+ pkt_len, 0);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(rp->pdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ skb = rp->rx_skbuff[entry];
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Inconsistent Rx "
+ "descriptor chain.\n",
+ dev->name);
+ break;
+ }
+ rp->rx_skbuff[entry] = NULL;
+ skb_put(skb, pkt_len);
+ pci_unmap_single(rp->pdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ rp->stats.rx_bytes += pkt_len;
+ rp->stats.rx_packets++;
+ }
+ entry = (++rp->cur_rx) % RX_RING_SIZE;
+ rp->rx_head_desc = &rp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = rp->dirty_rx % RX_RING_SIZE;
+ if (rp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(rp->rx_buf_sz);
+ rp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ rp->rx_skbuff_dma[entry] =
+ pci_map_single(rp->pdev, skb->tail,
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
+ }
+ rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+ }
+}
+
+/*
+ * Clears the "tally counters" for CRC errors and missed frames(?).
+ * It has been reported that some chips need a write of 0 to clear
+ * these, for others the counters are set to 1 when written to and
+ * instead cleared when read. So we clear them both ways ...
+ */
+static inline void clear_tally_counters(void __iomem *ioaddr)
+{
+ iowrite32(0, ioaddr + RxMissed);
+ ioread16(ioaddr + RxCRCErrs);
+ ioread16(ioaddr + RxMissed);
+}
+
+static void rhine_restart_tx(struct net_device *dev) {
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int entry = rp->dirty_tx % TX_RING_SIZE;
+ u32 intr_status;
+
+ /*
+ * If new errors occured, we need to sort them out before doing Tx.
+ * In that case the ISR will be back here RSN anyway.
+ */
+ intr_status = get_intr_status(dev);
+
+ if ((intr_status & IntrTxErrSummary) == 0) {
+
+ /* We know better than the chip where it should continue. */
+ iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
+ ioaddr + TxRingPtr);
+
+ iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
+ ioaddr + ChipCmd);
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ ioaddr + ChipCmd1);
+ IOSYNC;
+ }
+ else {
+ /* This should never happen */
+ if (debug > 1)
+ printk(KERN_WARNING "%s: rhine_restart_tx() "
+ "Another error occured %8.8x.\n",
+ dev->name, intr_status);
+ }
+
+}
+
+static void rhine_error(struct net_device *dev, int intr_status)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ spin_lock(&rp->lock);
+
+ if (intr_status & IntrLinkChange)
+ rhine_check_media(dev, 0);
+ if (intr_status & IntrStatsMax) {
+ rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
+ clear_tally_counters(ioaddr);
+ }
+ if (intr_status & IntrTxAborted) {
+ if (debug > 1)
+ printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
+ dev->name, intr_status);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if (rp->tx_thresh < 0xE0)
+ iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ if (debug > 1)
+ printk(KERN_INFO "%s: Transmitter underrun, Tx "
+ "threshold now %2.2x.\n",
+ dev->name, rp->tx_thresh);
+ }
+ if (intr_status & IntrTxDescRace) {
+ if (debug > 2)
+ printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
+ dev->name);
+ }
+ if ((intr_status & IntrTxError) &&
+ (intr_status & (IntrTxAborted |
+ IntrTxUnderrun | IntrTxDescRace)) == 0) {
+ if (rp->tx_thresh < 0xE0) {
+ iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ }
+ if (debug > 1)
+ printk(KERN_INFO "%s: Unspecified error. Tx "
+ "threshold now %2.2x.\n",
+ dev->name, rp->tx_thresh);
+ }
+ if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
+ IntrTxError))
+ rhine_restart_tx(dev);
+
+ if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
+ IntrTxError | IntrTxAborted | IntrNormalSummary |
+ IntrTxDescRace)) {
+ if (debug > 1)
+ printk(KERN_ERR "%s: Something Wicked happened! "
+ "%8.8x.\n", dev->name, intr_status);
+ }
+
+ spin_unlock(&rp->lock);
+}
+
+static struct net_device_stats *rhine_get_stats(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rp->lock, flags);
+ rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
+ clear_tally_counters(ioaddr);
+ spin_unlock_irqrestore(&rp->lock, flags);
+
+ return &rp->stats;
+}
+
+static void rhine_set_rx_mode(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ rx_mode = 0x1C;
+ iowrite32(0xffffffff, ioaddr + MulticastFilter0);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter1);
+ } else if ((dev->mc_count > multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ iowrite32(0xffffffff, ioaddr + MulticastFilter0);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
+ iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ }
+ iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(rp->pdev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&rp->lock);
+ rc = mii_ethtool_gset(&rp->mii_if, cmd);
+ spin_unlock_irq(&rp->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&rp->lock);
+ rc = mii_ethtool_sset(&rp->mii_if, cmd);
+ spin_unlock_irq(&rp->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ return mii_nway_restart(&rp->mii_if);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ return mii_link_ok(&rp->mii_if);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ if (!(rp->quirks & rqWOL))
+ return;
+
+ spin_lock_irq(&rp->lock);
+ wol->supported = WAKE_PHY | WAKE_MAGIC |
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
+ wol->wolopts = rp->wolopts;
+ spin_unlock_irq(&rp->lock);
+}
+
+static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ u32 support = WAKE_PHY | WAKE_MAGIC |
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
+
+ if (!(rp->quirks & rqWOL))
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ spin_lock_irq(&rp->lock);
+ rp->wolopts = wol->wolopts;
+ spin_unlock_irq(&rp->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_wol = rhine_get_wol,
+ .set_wol = rhine_set_wol,
+ .get_sg = ethtool_op_get_sg,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&rp->lock);
+ rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&rp->lock);
+
+ return rc;
+}
+
+static int rhine_close(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ spin_lock_irq(&rp->lock);
+
+ netif_stop_queue(dev);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, "
+ "status was %4.4x.\n",
+ dev->name, ioread16(ioaddr + ChipCmd));
+
+ /* Switch to loopback mode to avoid hardware races. */
+ iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite16(CmdStop, ioaddr + ChipCmd);
+
+ spin_unlock_irq(&rp->lock);
+
+ free_irq(rp->pdev->irq, dev);
+ free_rbufs(dev);
+ free_tbufs(dev);
+ free_ring(dev);
+
+ return 0;
+}
+
+
+static void __devexit rhine_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ pci_iounmap(pdev, rp->base);
+ pci_release_regions(pdev);
+
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void rhine_shutdown (struct device *gendev)
+{
+ struct pci_dev *pdev = to_pci_dev(gendev);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ if (!(rp->quirks & rqWOL))
+ return; /* Nothing to do for non-WOL adapters */
+
+ rhine_power_init(dev);
+
+ /* Make sure we use pattern 0, 1 and not 4, 5 */
+ if (rp->quirks & rq6patterns)
+ iowrite8(0x04, ioaddr + 0xA7);
+
+ if (rp->wolopts & WAKE_MAGIC) {
+ iowrite8(WOLmagic, ioaddr + WOLcrSet);
+ /*
+ * Turn EEPROM-controlled wake-up back on -- some hardware may
+ * not cooperate otherwise.
+ */
+ iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
+ }
+
+ if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
+ iowrite8(WOLbmcast, ioaddr + WOLcgSet);
+
+ if (rp->wolopts & WAKE_PHY)
+ iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
+
+ if (rp->wolopts & WAKE_UCAST)
+ iowrite8(WOLucast, ioaddr + WOLcrSet);
+
+ if (rp->wolopts) {
+ /* Enable legacy WOL (for old motherboards) */
+ iowrite8(0x01, ioaddr + PwcfgSet);
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
+ }
+
+ /* Hit power state D3 (sleep) */
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+
+ /* TODO: Check use of pci_enable_wake() */
+
+}
+
+#ifdef CONFIG_PM
+static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+
+ spin_lock_irqsave(&rp->lock, flags);
+ rhine_shutdown(&pdev->dev);
+ spin_unlock_irqrestore(&rp->lock, flags);
+
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static int rhine_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+ unsigned long flags;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
+ printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
+
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (debug > 1)
+ printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
+ dev->name, ret ? "failed" : "succeeded", ret);
+
+ pci_restore_state(pdev);
+
+ spin_lock_irqsave(&rp->lock, flags);
+#ifdef USE_MMIO
+ enable_mmio(rp->pioaddr, rp->quirks);
+#endif
+ rhine_power_init(dev);
+ free_tbufs(dev);
+ free_rbufs(dev);
+ alloc_tbufs(dev);
+ alloc_rbufs(dev);
+ init_registers(dev);
+ spin_unlock_irqrestore(&rp->lock, flags);
+
+ netif_device_attach(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver rhine_driver = {
+ .name = DRV_NAME,
+ .id_table = rhine_pci_tbl,
+ .probe = rhine_init_one,
+ .remove = __devexit_p(rhine_remove_one),
+#ifdef CONFIG_PM
+ .suspend = rhine_suspend,
+ .resume = rhine_resume,
+#endif /* CONFIG_PM */
+ .driver = {
+ .shutdown = rhine_shutdown,
+ }
+};
+
+
+static int __init rhine_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init(&rhine_driver);
+}
+
+
+static void __exit rhine_cleanup(void)
+{
+ pci_unregister_driver(&rhine_driver);
+}
+
+
+module_init(rhine_init);
+module_exit(rhine_cleanup);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
new file mode 100644
index 000000000000..15e710283493
--- /dev/null
+++ b/drivers/net/via-velocity.c
@@ -0,0 +1,3303 @@
+/*
+ * This code is derived from the VIA reference driver (copyright message
+ * below) provided to Red Hat by VIA Networking Technologies, Inc. for
+ * addition to the Linux kernel.
+ *
+ * The code has been merged into one source file, cleaned up to follow
+ * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned
+ * for 64bit hardware platforms.
+ *
+ * TODO
+ * Big-endian support
+ * rx_copybreak/alignment
+ * Scatter gather
+ * More testing
+ *
+ * The changes are (c) Copyright 2004, Red Hat Inc. <alan@redhat.com>
+ * Additional fixes and clean up: Francois Romieu
+ *
+ * This source has not been verified for use in safety critical systems.
+ *
+ * Please direct queries about the revamped driver to the linux-kernel
+ * list not VIA.
+ *
+ * Original code:
+ *
+ * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
+ * All rights reserved.
+ *
+ * This software may be redistributed and/or modified under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * Author: Chuang Liang-Shing, AJ Jiang
+ *
+ * Date: Jan 24, 2003
+ *
+ * MODULE_LICENSE("GPL");
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+#include <linux/if.h>
+#include <linux/config.h>
+#include <asm/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/inetdevice.h>
+#include <linux/reboot.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/crc-ccitt.h>
+#include <linux/crc32.h>
+
+#include "via-velocity.h"
+
+
+static int velocity_nics = 0;
+static int msglevel = MSG_LEVEL_INFO;
+
+
+static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static struct ethtool_ops velocity_ethtool_ops;
+
+/*
+ Define module options
+*/
+
+MODULE_AUTHOR("VIA Networking Technologies, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
+
+#define VELOCITY_PARAM(N,D) \
+ static int N[MAX_UNITS]=OPTION_DEFAULT;\
+ module_param_array(N, int, NULL, 0); \
+ MODULE_PARM_DESC(N, D);
+
+#define RX_DESC_MIN 64
+#define RX_DESC_MAX 255
+#define RX_DESC_DEF 64
+VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
+
+#define TX_DESC_MIN 16
+#define TX_DESC_MAX 256
+#define TX_DESC_DEF 64
+VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+#define VLAN_ID_MIN 0
+#define VLAN_ID_MAX 4095
+#define VLAN_ID_DEF 0
+/* VID_setting[] is used for setting the VID of NIC.
+ 0: default VID.
+ 1-4094: other VIDs.
+*/
+VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID");
+
+#define RX_THRESH_MIN 0
+#define RX_THRESH_MAX 3
+#define RX_THRESH_DEF 0
+/* rx_thresh[] is used for controlling the receive fifo threshold.
+ 0: indicate the rxfifo threshold is 128 bytes.
+ 1: indicate the rxfifo threshold is 512 bytes.
+ 2: indicate the rxfifo threshold is 1024 bytes.
+ 3: indicate the rxfifo threshold is store & forward.
+*/
+VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
+
+#define DMA_LENGTH_MIN 0
+#define DMA_LENGTH_MAX 7
+#define DMA_LENGTH_DEF 0
+
+/* DMA_length[] is used for controlling the DMA length
+ 0: 8 DWORDs
+ 1: 16 DWORDs
+ 2: 32 DWORDs
+ 3: 64 DWORDs
+ 4: 128 DWORDs
+ 5: 256 DWORDs
+ 6: SF(flush till emply)
+ 7: SF(flush till emply)
+*/
+VELOCITY_PARAM(DMA_length, "DMA length");
+
+#define TAGGING_DEF 0
+/* enable_tagging[] is used for enabling 802.1Q VID tagging.
+ 0: disable VID seeting(default).
+ 1: enable VID setting.
+*/
+VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging");
+
+#define IP_ALIG_DEF 0
+/* IP_byte_align[] is used for IP header DWORD byte aligned
+ 0: indicate the IP header won't be DWORD byte aligned.(Default) .
+ 1: indicate the IP header will be DWORD byte aligned.
+ In some enviroment, the IP header should be DWORD byte aligned,
+ or the packet will be droped when we receive it. (eg: IPVS)
+*/
+VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
+
+#define TX_CSUM_DEF 1
+/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
+ (We only support RX checksum offload now)
+ 0: disable csum_offload[checksum offload
+ 1: enable checksum offload. (Default)
+*/
+VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
+
+#define FLOW_CNTL_DEF 1
+#define FLOW_CNTL_MIN 1
+#define FLOW_CNTL_MAX 5
+
+/* flow_control[] is used for setting the flow control ability of NIC.
+ 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
+ 2: enable TX flow control.
+ 3: enable RX flow control.
+ 4: enable RX/TX flow control.
+ 5: disable
+*/
+VELOCITY_PARAM(flow_control, "Enable flow control ability");
+
+#define MED_LNK_DEF 0
+#define MED_LNK_MIN 0
+#define MED_LNK_MAX 4
+/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
+ 0: indicate autonegotiation for both speed and duplex mode
+ 1: indicate 100Mbps half duplex mode
+ 2: indicate 100Mbps full duplex mode
+ 3: indicate 10Mbps half duplex mode
+ 4: indicate 10Mbps full duplex mode
+
+ Note:
+ if EEPROM have been set to the force mode, this option is ignored
+ by driver.
+*/
+VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
+
+#define VAL_PKT_LEN_DEF 0
+/* ValPktLen[] is used for setting the checksum offload ability of NIC.
+ 0: Receive frame with invalid layer 2 length (Default)
+ 1: Drop frame with invalid layer 2 length
+*/
+VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
+
+#define WOL_OPT_DEF 0
+#define WOL_OPT_MIN 0
+#define WOL_OPT_MAX 7
+/* wol_opts[] is used for controlling wake on lan behavior.
+ 0: Wake up if recevied a magic packet. (Default)
+ 1: Wake up if link status is on/off.
+ 2: Wake up if recevied an arp packet.
+ 4: Wake up if recevied any unicast packet.
+ Those value can be sumed up to support more than one option.
+*/
+VELOCITY_PARAM(wol_opts, "Wake On Lan options");
+
+#define INT_WORKS_DEF 20
+#define INT_WORKS_MIN 10
+#define INT_WORKS_MAX 64
+
+VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
+
+static int rx_copybreak = 200;
+module_param(rx_copybreak, int, 0644);
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+
+static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info);
+static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
+static void velocity_print_info(struct velocity_info *vptr);
+static int velocity_open(struct net_device *dev);
+static int velocity_change_mtu(struct net_device *dev, int mtu);
+static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
+static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs);
+static void velocity_set_multi(struct net_device *dev);
+static struct net_device_stats *velocity_get_stats(struct net_device *dev);
+static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int velocity_close(struct net_device *dev);
+static int velocity_receive_frame(struct velocity_info *, int idx);
+static int velocity_alloc_rx_buf(struct velocity_info *, int idx);
+static void velocity_free_rd_ring(struct velocity_info *vptr);
+static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
+static int velocity_soft_reset(struct velocity_info *vptr);
+static void mii_init(struct velocity_info *vptr, u32 mii_status);
+static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
+static void velocity_print_link_status(struct velocity_info *vptr);
+static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs);
+static void velocity_shutdown(struct velocity_info *vptr);
+static void enable_flow_control_ability(struct velocity_info *vptr);
+static void enable_mii_autopoll(struct mac_regs __iomem * regs);
+static int velocity_mii_read(struct mac_regs __iomem *, u8 byIdx, u16 * pdata);
+static int velocity_mii_write(struct mac_regs __iomem *, u8 byMiiAddr, u16 data);
+static u32 mii_check_media_mode(struct mac_regs __iomem * regs);
+static u32 check_connection_type(struct mac_regs __iomem * regs);
+static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
+
+#ifdef CONFIG_PM
+
+static int velocity_suspend(struct pci_dev *pdev, pm_message_t state);
+static int velocity_resume(struct pci_dev *pdev);
+
+static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
+
+static struct notifier_block velocity_inetaddr_notifier = {
+ .notifier_call = velocity_netdev_event,
+};
+
+static DEFINE_SPINLOCK(velocity_dev_list_lock);
+static LIST_HEAD(velocity_dev_list);
+
+static void velocity_register_notifier(void)
+{
+ register_inetaddr_notifier(&velocity_inetaddr_notifier);
+}
+
+static void velocity_unregister_notifier(void)
+{
+ unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
+}
+
+#else /* CONFIG_PM */
+
+#define velocity_register_notifier() do {} while (0)
+#define velocity_unregister_notifier() do {} while (0)
+
+#endif /* !CONFIG_PM */
+
+/*
+ * Internal board variants. At the moment we have only one
+ */
+
+static struct velocity_info_tbl chip_info_table[] = {
+ {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL},
+ {0, NULL}
+};
+
+/*
+ * Describe the PCI device identifiers that we support in this
+ * device driver. Used for hotplug autoloading.
+ */
+
+static struct pci_device_id velocity_id_table[] __devinitdata = {
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) chip_info_table},
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, velocity_id_table);
+
+/**
+ * get_chip_name - identifier to name
+ * @id: chip identifier
+ *
+ * Given a chip identifier return a suitable description. Returns
+ * a pointer a static string valid while the driver is loaded.
+ */
+
+static char __devinit *get_chip_name(enum chip_type chip_id)
+{
+ int i;
+ for (i = 0; chip_info_table[i].name != NULL; i++)
+ if (chip_info_table[i].chip_id == chip_id)
+ break;
+ return chip_info_table[i].name;
+}
+
+/**
+ * velocity_remove1 - device unplug
+ * @pdev: PCI device being removed
+ *
+ * Device unload callback. Called on an unplug or on module
+ * unload for each active device that is present. Disconnects
+ * the device from the network layer and frees all the resources
+ */
+
+static void __devexit velocity_remove1(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct velocity_info *vptr = dev->priv;
+
+#ifdef CONFIG_PM
+ unsigned long flags;
+
+ spin_lock_irqsave(&velocity_dev_list_lock, flags);
+ if (!list_empty(&velocity_dev_list))
+ list_del(&vptr->list);
+ spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
+#endif
+ unregister_netdev(dev);
+ iounmap(vptr->mac_regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+
+ velocity_nics--;
+}
+
+/**
+ * velocity_set_int_opt - parser for integer options
+ * @opt: pointer to option value
+ * @val: value the user requested (or -1 for default)
+ * @min: lowest value allowed
+ * @max: highest value allowed
+ * @def: default value
+ * @name: property name
+ * @dev: device name
+ *
+ * Set an integer property in the module options. This function does
+ * all the verification and checking as well as reporting so that
+ * we don't duplicate code for each option.
+ */
+
+static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname)
+{
+ if (val == -1)
+ *opt = def;
+ else if (val < min || val > max) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
+ devname, name, min, max);
+ *opt = def;
+ } else {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
+ devname, name, val);
+ *opt = val;
+ }
+}
+
+/**
+ * velocity_set_bool_opt - parser for boolean options
+ * @opt: pointer to option value
+ * @val: value the user requested (or -1 for default)
+ * @def: default value (yes/no)
+ * @flag: numeric value to set for true.
+ * @name: property name
+ * @dev: device name
+ *
+ * Set a boolean property in the module options. This function does
+ * all the verification and checking as well as reporting so that
+ * we don't duplicate code for each option.
+ */
+
+static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname)
+{
+ (*opt) &= (~flag);
+ if (val == -1)
+ *opt |= (def ? flag : 0);
+ else if (val < 0 || val > 1) {
+ printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
+ devname, name);
+ *opt |= (def ? flag : 0);
+ } else {
+ printk(KERN_INFO "%s: set parameter %s to %s\n",
+ devname, name, val ? "TRUE" : "FALSE");
+ *opt |= (val ? flag : 0);
+ }
+}
+
+/**
+ * velocity_get_options - set options on device
+ * @opts: option structure for the device
+ * @index: index of option to use in module options array
+ * @devname: device name
+ *
+ * Turn the module and command options into a single structure
+ * for the current device
+ */
+
+static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname)
+{
+
+ velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
+ velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
+ velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
+ velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
+ velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname);
+ velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname);
+ velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
+ velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
+ velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
+ velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
+ velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
+ velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
+ velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
+ opts->numrx = (opts->numrx & ~3);
+}
+
+/**
+ * velocity_init_cam_filter - initialise CAM
+ * @vptr: velocity to program
+ *
+ * Initialize the content addressable memory used for filters. Load
+ * appropriately according to the presence of VLAN
+ */
+
+static void velocity_init_cam_filter(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+
+ /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
+ WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
+ WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
+
+ /* Disable all CAMs */
+ memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
+ memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
+ mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
+ mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
+
+ /* Enable first VCAM */
+ if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
+ /* If Tagging option is enabled and VLAN ID is not zero, then
+ turn on MCFG_RTGOPT also */
+ if (vptr->options.vid != 0)
+ WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
+
+ mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM);
+ vptr->vCAMmask[0] |= 1;
+ mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
+ } else {
+ u16 temp = 0;
+ mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
+ temp = 1;
+ mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
+ }
+}
+
+/**
+ * velocity_rx_reset - handle a receive reset
+ * @vptr: velocity we are resetting
+ *
+ * Reset the ownership and status for the receive ring side.
+ * Hand all the receive queue to the NIC.
+ */
+
+static void velocity_rx_reset(struct velocity_info *vptr)
+{
+
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ int i;
+
+ vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
+
+ /*
+ * Init state, all RD entries belong to the NIC
+ */
+ for (i = 0; i < vptr->options.numrx; ++i)
+ vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
+
+ writew(vptr->options.numrx, &regs->RBRDU);
+ writel(vptr->rd_pool_dma, &regs->RDBaseLo);
+ writew(0, &regs->RDIdx);
+ writew(vptr->options.numrx - 1, &regs->RDCSize);
+}
+
+/**
+ * velocity_init_registers - initialise MAC registers
+ * @vptr: velocity to init
+ * @type: type of initialisation (hot or cold)
+ *
+ * Initialise the MAC on a reset or on first set up on the
+ * hardware.
+ */
+
+static void velocity_init_registers(struct velocity_info *vptr,
+ enum velocity_init_type type)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ int i, mii_status;
+
+ mac_wol_reset(regs);
+
+ switch (type) {
+ case VELOCITY_INIT_RESET:
+ case VELOCITY_INIT_WOL:
+
+ netif_stop_queue(vptr->dev);
+
+ /*
+ * Reset RX to prevent RX pointer not on the 4X location
+ */
+ velocity_rx_reset(vptr);
+ mac_rx_queue_run(regs);
+ mac_rx_queue_wake(regs);
+
+ mii_status = velocity_get_opt_media_mode(vptr);
+ if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
+ velocity_print_link_status(vptr);
+ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
+ netif_wake_queue(vptr->dev);
+ }
+
+ enable_flow_control_ability(vptr);
+
+ mac_clear_isr(regs);
+ writel(CR0_STOP, &regs->CR0Clr);
+ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
+ &regs->CR0Set);
+
+ break;
+
+ case VELOCITY_INIT_COLD:
+ default:
+ /*
+ * Do reset
+ */
+ velocity_soft_reset(vptr);
+ mdelay(5);
+
+ mac_eeprom_reload(regs);
+ for (i = 0; i < 6; i++) {
+ writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
+ }
+ /*
+ * clear Pre_ACPI bit.
+ */
+ BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
+ mac_set_rx_thresh(regs, vptr->options.rx_thresh);
+ mac_set_dma_length(regs, vptr->options.DMA_length);
+
+ writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
+ /*
+ * Back off algorithm use original IEEE standard
+ */
+ BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
+
+ /*
+ * Init CAM filter
+ */
+ velocity_init_cam_filter(vptr);
+
+ /*
+ * Set packet filter: Receive directed and broadcast address
+ */
+ velocity_set_multi(vptr->dev);
+
+ /*
+ * Enable MII auto-polling
+ */
+ enable_mii_autopoll(regs);
+
+ vptr->int_mask = INT_MASK_DEF;
+
+ writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo);
+ writew(vptr->options.numrx - 1, &regs->RDCSize);
+ mac_rx_queue_run(regs);
+ mac_rx_queue_wake(regs);
+
+ writew(vptr->options.numtx - 1, &regs->TDCSize);
+
+ for (i = 0; i < vptr->num_txq; i++) {
+ writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
+ mac_tx_queue_run(regs, i);
+ }
+
+ init_flow_control_register(vptr);
+
+ writel(CR0_STOP, &regs->CR0Clr);
+ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
+
+ mii_status = velocity_get_opt_media_mode(vptr);
+ netif_stop_queue(vptr->dev);
+
+ mii_init(vptr, mii_status);
+
+ if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
+ velocity_print_link_status(vptr);
+ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
+ netif_wake_queue(vptr->dev);
+ }
+
+ enable_flow_control_ability(vptr);
+ mac_hw_mibs_init(regs);
+ mac_write_int_mask(vptr->int_mask, regs);
+ mac_clear_isr(regs);
+
+ }
+}
+
+/**
+ * velocity_soft_reset - soft reset
+ * @vptr: velocity to reset
+ *
+ * Kick off a soft reset of the velocity adapter and then poll
+ * until the reset sequence has completed before returning.
+ */
+
+static int velocity_soft_reset(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ int i = 0;
+
+ writel(CR0_SFRST, &regs->CR0Set);
+
+ for (i = 0; i < W_MAX_TIMEOUT; i++) {
+ udelay(5);
+ if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
+ break;
+ }
+
+ if (i == W_MAX_TIMEOUT) {
+ writel(CR0_FORSRST, &regs->CR0Set);
+ /* FIXME: PCI POSTING */
+ /* delay 2ms */
+ mdelay(2);
+ }
+ return 0;
+}
+
+/**
+ * velocity_found1 - set up discovered velocity card
+ * @pdev: PCI device
+ * @ent: PCI device table entry that matched
+ *
+ * Configure a discovered adapter from scratch. Return a negative
+ * errno error code on failure paths.
+ */
+
+static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int first = 1;
+ struct net_device *dev;
+ int i;
+ struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data;
+ struct velocity_info *vptr;
+ struct mac_regs __iomem * regs;
+ int ret = -ENOMEM;
+
+ if (velocity_nics >= MAX_UNITS) {
+ printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n",
+ velocity_nics);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct velocity_info));
+
+ if (dev == NULL) {
+ printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n");
+ goto out;
+ }
+
+ /* Chain it all together */
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ vptr = dev->priv;
+
+
+ if (first) {
+ printk(KERN_INFO "%s Ver. %s\n",
+ VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
+ printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
+ printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
+ first = 0;
+ }
+
+ velocity_init_info(pdev, vptr, info);
+
+ vptr->dev = dev;
+
+ dev->irq = pdev->irq;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ goto err_free_dev;
+
+ ret = velocity_get_pci_info(vptr, pdev);
+ if (ret < 0) {
+ printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n");
+ goto err_disable;
+ }
+
+ ret = pci_request_regions(pdev, VELOCITY_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n");
+ goto err_disable;
+ }
+
+ regs = ioremap(vptr->memaddr, vptr->io_size);
+ if (regs == NULL) {
+ ret = -EIO;
+ goto err_release_res;
+ }
+
+ vptr->mac_regs = regs;
+
+ mac_wol_reset(regs);
+
+ dev->base_addr = vptr->ioaddr;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(&regs->PAR[i]);
+
+
+ velocity_get_options(&vptr->options, velocity_nics, dev->name);
+
+ /*
+ * Mask out the options cannot be set to the chip
+ */
+
+ vptr->options.flags &= info->flags;
+
+ /*
+ * Enable the chip specified capbilities
+ */
+
+ vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
+
+ vptr->wol_opts = vptr->options.wol_opts;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+
+ vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
+
+ dev->irq = pdev->irq;
+ dev->open = velocity_open;
+ dev->hard_start_xmit = velocity_xmit;
+ dev->stop = velocity_close;
+ dev->get_stats = velocity_get_stats;
+ dev->set_multicast_list = velocity_set_multi;
+ dev->do_ioctl = velocity_ioctl;
+ dev->ethtool_ops = &velocity_ethtool_ops;
+ dev->change_mtu = velocity_change_mtu;
+#ifdef VELOCITY_ZERO_COPY_SUPPORT
+ dev->features |= NETIF_F_SG;
+#endif
+
+ if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
+ dev->features |= NETIF_F_HW_CSUM;
+ }
+
+ ret = register_netdev(dev);
+ if (ret < 0)
+ goto err_iounmap;
+
+ velocity_print_info(vptr);
+ pci_set_drvdata(pdev, dev);
+
+ /* and leave the chip powered down */
+
+ pci_set_power_state(pdev, PCI_D3hot);
+#ifdef CONFIG_PM
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&velocity_dev_list_lock, flags);
+ list_add(&vptr->list, &velocity_dev_list);
+ spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
+ }
+#endif
+ velocity_nics++;
+out:
+ return ret;
+
+err_iounmap:
+ iounmap(regs);
+err_release_res:
+ pci_release_regions(pdev);
+err_disable:
+ pci_disable_device(pdev);
+err_free_dev:
+ free_netdev(dev);
+ goto out;
+}
+
+/**
+ * velocity_print_info - per driver data
+ * @vptr: velocity
+ *
+ * Print per driver data as the kernel driver finds Velocity
+ * hardware
+ */
+
+static void __devinit velocity_print_info(struct velocity_info *vptr)
+{
+ struct net_device *dev = vptr->dev;
+
+ printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
+ printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
+ dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+}
+
+/**
+ * velocity_init_info - init private data
+ * @pdev: PCI device
+ * @vptr: Velocity info
+ * @info: Board type
+ *
+ * Set up the initial velocity_info struct for the device that has been
+ * discovered.
+ */
+
+static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info)
+{
+ memset(vptr, 0, sizeof(struct velocity_info));
+
+ vptr->pdev = pdev;
+ vptr->chip_id = info->chip_id;
+ vptr->io_size = info->io_size;
+ vptr->num_txq = info->txqueue;
+ vptr->multicast_limit = MCAM_SIZE;
+ spin_lock_init(&vptr->lock);
+ INIT_LIST_HEAD(&vptr->list);
+}
+
+/**
+ * velocity_get_pci_info - retrieve PCI info for device
+ * @vptr: velocity device
+ * @pdev: PCI device it matches
+ *
+ * Retrieve the PCI configuration space data that interests us from
+ * the kernel PCI layer
+ */
+
+static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
+{
+
+ if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
+ return -EIO;
+
+ pci_set_master(pdev);
+
+ vptr->ioaddr = pci_resource_start(pdev, 0);
+ vptr->memaddr = pci_resource_start(pdev, 1);
+
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO))
+ {
+ printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n",
+ pci_name(pdev));
+ return -EINVAL;
+ }
+
+ if((pci_resource_flags(pdev, 1) & IORESOURCE_IO))
+ {
+ printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n",
+ pci_name(pdev));
+ return -EINVAL;
+ }
+
+ if(pci_resource_len(pdev, 1) < 256)
+ {
+ printk(KERN_ERR "%s: region #1 is too small.\n",
+ pci_name(pdev));
+ return -EINVAL;
+ }
+ vptr->pdev = pdev;
+
+ return 0;
+}
+
+/**
+ * velocity_init_rings - set up DMA rings
+ * @vptr: Velocity to set up
+ *
+ * Allocate PCI mapped DMA rings for the receive and transmit layer
+ * to use.
+ */
+
+static int velocity_init_rings(struct velocity_info *vptr)
+{
+ int i;
+ unsigned int psize;
+ unsigned int tsize;
+ dma_addr_t pool_dma;
+ u8 *pool;
+
+ /*
+ * Allocate all RD/TD rings a single pool
+ */
+
+ psize = vptr->options.numrx * sizeof(struct rx_desc) +
+ vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
+
+ /*
+ * pci_alloc_consistent() fulfills the requirement for 64 bytes
+ * alignment
+ */
+ pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma);
+
+ if (pool == NULL) {
+ printk(KERN_ERR "%s : DMA memory allocation failed.\n",
+ vptr->dev->name);
+ return -ENOMEM;
+ }
+
+ memset(pool, 0, psize);
+
+ vptr->rd_ring = (struct rx_desc *) pool;
+
+ vptr->rd_pool_dma = pool_dma;
+
+ tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
+ vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
+ &vptr->tx_bufs_dma);
+
+ if (vptr->tx_bufs == NULL) {
+ printk(KERN_ERR "%s: DMA memory allocation failed.\n",
+ vptr->dev->name);
+ pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
+ return -ENOMEM;
+ }
+
+ memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
+
+ i = vptr->options.numrx * sizeof(struct rx_desc);
+ pool += i;
+ pool_dma += i;
+ for (i = 0; i < vptr->num_txq; i++) {
+ int offset = vptr->options.numtx * sizeof(struct tx_desc);
+
+ vptr->td_pool_dma[i] = pool_dma;
+ vptr->td_rings[i] = (struct tx_desc *) pool;
+ pool += offset;
+ pool_dma += offset;
+ }
+ return 0;
+}
+
+/**
+ * velocity_free_rings - free PCI ring pointers
+ * @vptr: Velocity to free from
+ *
+ * Clean up the PCI ring buffers allocated to this velocity.
+ */
+
+static void velocity_free_rings(struct velocity_info *vptr)
+{
+ int size;
+
+ size = vptr->options.numrx * sizeof(struct rx_desc) +
+ vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
+
+ pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
+
+ size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
+
+ pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
+}
+
+static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ int avail, dirty, unusable;
+
+ /*
+ * RD number must be equal to 4X per hardware spec
+ * (programming guide rev 1.20, p.13)
+ */
+ if (vptr->rd_filled < 4)
+ return;
+
+ wmb();
+
+ unusable = vptr->rd_filled & 0x0003;
+ dirty = vptr->rd_dirty - unusable;
+ for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
+ dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
+ vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
+ }
+
+ writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
+ vptr->rd_filled = unusable;
+}
+
+static int velocity_rx_refill(struct velocity_info *vptr)
+{
+ int dirty = vptr->rd_dirty, done = 0, ret = 0;
+
+ do {
+ struct rx_desc *rd = vptr->rd_ring + dirty;
+
+ /* Fine for an all zero Rx desc at init time as well */
+ if (rd->rdesc0.owner == OWNED_BY_NIC)
+ break;
+
+ if (!vptr->rd_info[dirty].skb) {
+ ret = velocity_alloc_rx_buf(vptr, dirty);
+ if (ret < 0)
+ break;
+ }
+ done++;
+ dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
+ } while (dirty != vptr->rd_curr);
+
+ if (done) {
+ vptr->rd_dirty = dirty;
+ vptr->rd_filled += done;
+ velocity_give_many_rx_descs(vptr);
+ }
+
+ return ret;
+}
+
+/**
+ * velocity_init_rd_ring - set up receive ring
+ * @vptr: velocity to configure
+ *
+ * Allocate and set up the receive buffers for each ring slot and
+ * assign them to the network adapter.
+ */
+
+static int velocity_init_rd_ring(struct velocity_info *vptr)
+{
+ int ret = -ENOMEM;
+ unsigned int rsize = sizeof(struct velocity_rd_info) *
+ vptr->options.numrx;
+
+ vptr->rd_info = kmalloc(rsize, GFP_KERNEL);
+ if(vptr->rd_info == NULL)
+ goto out;
+ memset(vptr->rd_info, 0, rsize);
+
+ vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
+
+ ret = velocity_rx_refill(vptr);
+ if (ret < 0) {
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s: failed to allocate RX buffer.\n", vptr->dev->name);
+ velocity_free_rd_ring(vptr);
+ }
+out:
+ return ret;
+}
+
+/**
+ * velocity_free_rd_ring - free receive ring
+ * @vptr: velocity to clean up
+ *
+ * Free the receive buffers for each ring slot and any
+ * attached socket buffers that need to go away.
+ */
+
+static void velocity_free_rd_ring(struct velocity_info *vptr)
+{
+ int i;
+
+ if (vptr->rd_info == NULL)
+ return;
+
+ for (i = 0; i < vptr->options.numrx; i++) {
+ struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
+
+ if (!rd_info->skb)
+ continue;
+ pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ rd_info->skb_dma = (dma_addr_t) NULL;
+
+ dev_kfree_skb(rd_info->skb);
+ rd_info->skb = NULL;
+ }
+
+ kfree(vptr->rd_info);
+ vptr->rd_info = NULL;
+}
+
+/**
+ * velocity_init_td_ring - set up transmit ring
+ * @vptr: velocity
+ *
+ * Set up the transmit ring and chain the ring pointers together.
+ * Returns zero on success or a negative posix errno code for
+ * failure.
+ */
+
+static int velocity_init_td_ring(struct velocity_info *vptr)
+{
+ int i, j;
+ dma_addr_t curr;
+ struct tx_desc *td;
+ struct velocity_td_info *td_info;
+ unsigned int tsize = sizeof(struct velocity_td_info) *
+ vptr->options.numtx;
+
+ /* Init the TD ring entries */
+ for (j = 0; j < vptr->num_txq; j++) {
+ curr = vptr->td_pool_dma[j];
+
+ vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL);
+ if(vptr->td_infos[j] == NULL)
+ {
+ while(--j >= 0)
+ kfree(vptr->td_infos[j]);
+ return -ENOMEM;
+ }
+ memset(vptr->td_infos[j], 0, tsize);
+
+ for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
+ td = &(vptr->td_rings[j][i]);
+ td_info = &(vptr->td_infos[j][i]);
+ td_info->buf = vptr->tx_bufs +
+ (j * vptr->options.numtx + i) * PKT_BUF_SZ;
+ td_info->buf_dma = vptr->tx_bufs_dma +
+ (j * vptr->options.numtx + i) * PKT_BUF_SZ;
+ }
+ vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
+ }
+ return 0;
+}
+
+/*
+ * FIXME: could we merge this with velocity_free_tx_buf ?
+ */
+
+static void velocity_free_td_ring_entry(struct velocity_info *vptr,
+ int q, int n)
+{
+ struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
+ int i;
+
+ if (td_info == NULL)
+ return;
+
+ if (td_info->skb) {
+ for (i = 0; i < td_info->nskb_dma; i++)
+ {
+ if (td_info->skb_dma[i]) {
+ pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
+ td_info->skb->len, PCI_DMA_TODEVICE);
+ td_info->skb_dma[i] = (dma_addr_t) NULL;
+ }
+ }
+ dev_kfree_skb(td_info->skb);
+ td_info->skb = NULL;
+ }
+}
+
+/**
+ * velocity_free_td_ring - free td ring
+ * @vptr: velocity
+ *
+ * Free up the transmit ring for this particular velocity adapter.
+ * We free the ring contents but not the ring itself.
+ */
+
+static void velocity_free_td_ring(struct velocity_info *vptr)
+{
+ int i, j;
+
+ for (j = 0; j < vptr->num_txq; j++) {
+ if (vptr->td_infos[j] == NULL)
+ continue;
+ for (i = 0; i < vptr->options.numtx; i++) {
+ velocity_free_td_ring_entry(vptr, j, i);
+
+ }
+ if (vptr->td_infos[j]) {
+ kfree(vptr->td_infos[j]);
+ vptr->td_infos[j] = NULL;
+ }
+ }
+}
+
+/**
+ * velocity_rx_srv - service RX interrupt
+ * @vptr: velocity
+ * @status: adapter status (unused)
+ *
+ * Walk the receive ring of the velocity adapter and remove
+ * any received packets from the receive queue. Hand the ring
+ * slots back to the adapter for reuse.
+ */
+
+static int velocity_rx_srv(struct velocity_info *vptr, int status)
+{
+ struct net_device_stats *stats = &vptr->stats;
+ int rd_curr = vptr->rd_curr;
+ int works = 0;
+
+ do {
+ struct rx_desc *rd = vptr->rd_ring + rd_curr;
+
+ if (!vptr->rd_info[rd_curr].skb)
+ break;
+
+ if (rd->rdesc0.owner == OWNED_BY_NIC)
+ break;
+
+ rmb();
+
+ /*
+ * Don't drop CE or RL error frame although RXOK is off
+ */
+ if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
+ if (velocity_receive_frame(vptr, rd_curr) < 0)
+ stats->rx_dropped++;
+ } else {
+ if (rd->rdesc0.RSR & RSR_CRC)
+ stats->rx_crc_errors++;
+ if (rd->rdesc0.RSR & RSR_FAE)
+ stats->rx_frame_errors++;
+
+ stats->rx_dropped++;
+ }
+
+ rd->inten = 1;
+
+ vptr->dev->last_rx = jiffies;
+
+ rd_curr++;
+ if (rd_curr >= vptr->options.numrx)
+ rd_curr = 0;
+ } while (++works <= 15);
+
+ vptr->rd_curr = rd_curr;
+
+ if (works > 0 && velocity_rx_refill(vptr) < 0) {
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s: rx buf allocation failure\n", vptr->dev->name);
+ }
+
+ VAR_USED(stats);
+ return works;
+}
+
+/**
+ * velocity_rx_csum - checksum process
+ * @rd: receive packet descriptor
+ * @skb: network layer packet buffer
+ *
+ * Process the status bits for the received packet and determine
+ * if the checksum was computed and verified by the hardware
+ */
+
+static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (rd->rdesc1.CSM & CSM_IPKT) {
+ if (rd->rdesc1.CSM & CSM_IPOK) {
+ if ((rd->rdesc1.CSM & CSM_TCPKT) ||
+ (rd->rdesc1.CSM & CSM_UDPKT)) {
+ if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
+ return;
+ }
+ }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+}
+
+/**
+ * velocity_rx_copy - in place Rx copy for small packets
+ * @rx_skb: network layer packet buffer candidate
+ * @pkt_size: received data size
+ * @rd: receive packet descriptor
+ * @dev: network device
+ *
+ * Replace the current skb that is scheduled for Rx processing by a
+ * shorter, immediatly allocated skb, if the received packet is small
+ * enough. This function returns a negative value if the received
+ * packet is too big or if memory is exhausted.
+ */
+static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
+ struct velocity_info *vptr)
+{
+ int ret = -1;
+
+ if (pkt_size < rx_copybreak) {
+ struct sk_buff *new_skb;
+
+ new_skb = dev_alloc_skb(pkt_size + 2);
+ if (new_skb) {
+ new_skb->dev = vptr->dev;
+ new_skb->ip_summed = rx_skb[0]->ip_summed;
+
+ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
+ skb_reserve(new_skb, 2);
+
+ memcpy(new_skb->data, rx_skb[0]->tail, pkt_size);
+ *rx_skb = new_skb;
+ ret = 0;
+ }
+
+ }
+ return ret;
+}
+
+/**
+ * velocity_iph_realign - IP header alignment
+ * @vptr: velocity we are handling
+ * @skb: network layer packet buffer
+ * @pkt_size: received data size
+ *
+ * Align IP header on a 2 bytes boundary. This behavior can be
+ * configured by the user.
+ */
+static inline void velocity_iph_realign(struct velocity_info *vptr,
+ struct sk_buff *skb, int pkt_size)
+{
+ /* FIXME - memmove ? */
+ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
+ int i;
+
+ for (i = pkt_size; i >= 0; i--)
+ *(skb->data + i + 2) = *(skb->data + i);
+ skb_reserve(skb, 2);
+ }
+}
+
+/**
+ * velocity_receive_frame - received packet processor
+ * @vptr: velocity we are handling
+ * @idx: ring index
+ *
+ * A packet has arrived. We process the packet and if appropriate
+ * pass the frame up the network stack
+ */
+
+static int velocity_receive_frame(struct velocity_info *vptr, int idx)
+{
+ void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
+ struct net_device_stats *stats = &vptr->stats;
+ struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
+ struct rx_desc *rd = &(vptr->rd_ring[idx]);
+ int pkt_len = rd->rdesc0.len;
+ struct sk_buff *skb;
+
+ if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
+ VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
+ stats->rx_length_errors++;
+ return -EINVAL;
+ }
+
+ if (rd->rdesc0.RSR & RSR_MAR)
+ vptr->stats.multicast++;
+
+ skb = rd_info->skb;
+ skb->dev = vptr->dev;
+
+ pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
+ vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ /*
+ * Drop frame not meeting IEEE 802.3
+ */
+
+ if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
+ if (rd->rdesc0.RSR & RSR_RL) {
+ stats->rx_length_errors++;
+ return -EINVAL;
+ }
+ }
+
+ pci_action = pci_dma_sync_single_for_device;
+
+ velocity_rx_csum(rd, skb);
+
+ if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
+ velocity_iph_realign(vptr, skb, pkt_len);
+ pci_action = pci_unmap_single;
+ rd_info->skb = NULL;
+ }
+
+ pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, pkt_len - 4);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ stats->rx_bytes += pkt_len;
+ netif_rx(skb);
+
+ return 0;
+}
+
+/**
+ * velocity_alloc_rx_buf - allocate aligned receive buffer
+ * @vptr: velocity
+ * @idx: ring index
+ *
+ * Allocate a new full sized buffer for the reception of a frame and
+ * map it into PCI space for the hardware to use. The hardware
+ * requires *64* byte alignment of the buffer which makes life
+ * less fun than would be ideal.
+ */
+
+static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
+{
+ struct rx_desc *rd = &(vptr->rd_ring[idx]);
+ struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
+
+ rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
+ if (rd_info->skb == NULL)
+ return -ENOMEM;
+
+ /*
+ * Do the gymnastics to get the buffer head for data at
+ * 64byte alignment.
+ */
+ skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63);
+ rd_info->skb->dev = vptr->dev;
+ rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ /*
+ * Fill in the descriptor to match
+ */
+
+ *((u32 *) & (rd->rdesc0)) = 0;
+ rd->len = cpu_to_le32(vptr->rx_buf_sz);
+ rd->inten = 1;
+ rd->pa_low = cpu_to_le32(rd_info->skb_dma);
+ rd->pa_high = 0;
+ return 0;
+}
+
+/**
+ * tx_srv - transmit interrupt service
+ * @vptr; Velocity
+ * @status:
+ *
+ * Scan the queues looking for transmitted packets that
+ * we can complete and clean up. Update any statistics as
+ * neccessary/
+ */
+
+static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
+{
+ struct tx_desc *td;
+ int qnum;
+ int full = 0;
+ int idx;
+ int works = 0;
+ struct velocity_td_info *tdinfo;
+ struct net_device_stats *stats = &vptr->stats;
+
+ for (qnum = 0; qnum < vptr->num_txq; qnum++) {
+ for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
+ idx = (idx + 1) % vptr->options.numtx) {
+
+ /*
+ * Get Tx Descriptor
+ */
+ td = &(vptr->td_rings[qnum][idx]);
+ tdinfo = &(vptr->td_infos[qnum][idx]);
+
+ if (td->tdesc0.owner == OWNED_BY_NIC)
+ break;
+
+ if ((works++ > 15))
+ break;
+
+ if (td->tdesc0.TSR & TSR0_TERR) {
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ if (td->tdesc0.TSR & TSR0_CDH)
+ stats->tx_heartbeat_errors++;
+ if (td->tdesc0.TSR & TSR0_CRS)
+ stats->tx_carrier_errors++;
+ if (td->tdesc0.TSR & TSR0_ABT)
+ stats->tx_aborted_errors++;
+ if (td->tdesc0.TSR & TSR0_OWC)
+ stats->tx_window_errors++;
+ } else {
+ stats->tx_packets++;
+ stats->tx_bytes += tdinfo->skb->len;
+ }
+ velocity_free_tx_buf(vptr, tdinfo);
+ vptr->td_used[qnum]--;
+ }
+ vptr->td_tail[qnum] = idx;
+
+ if (AVAIL_TD(vptr, qnum) < 1) {
+ full = 1;
+ }
+ }
+ /*
+ * Look to see if we should kick the transmit network
+ * layer for more work.
+ */
+ if (netif_queue_stopped(vptr->dev) && (full == 0)
+ && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
+ netif_wake_queue(vptr->dev);
+ }
+ return works;
+}
+
+/**
+ * velocity_print_link_status - link status reporting
+ * @vptr: velocity to report on
+ *
+ * Turn the link status of the velocity card into a kernel log
+ * description of the new link state, detailing speed and duplex
+ * status
+ */
+
+static void velocity_print_link_status(struct velocity_info *vptr)
+{
+
+ if (vptr->mii_status & VELOCITY_LINK_FAIL) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
+ } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link autonegation", vptr->dev->name);
+
+ if (vptr->mii_status & VELOCITY_SPEED_1000)
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
+ else if (vptr->mii_status & VELOCITY_SPEED_100)
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
+ else
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
+
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+ VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
+ else
+ VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
+ } else {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
+ switch (vptr->options.spd_dpx) {
+ case SPD_DPX_100_HALF:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
+ break;
+ case SPD_DPX_100_FULL:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
+ break;
+ case SPD_DPX_10_HALF:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
+ break;
+ case SPD_DPX_10_FULL:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * velocity_error - handle error from controller
+ * @vptr: velocity
+ * @status: card status
+ *
+ * Process an error report from the hardware and attempt to recover
+ * the card itself. At the moment we cannot recover from some
+ * theoretically impossible errors but this could be fixed using
+ * the pci_device_failed logic to bounce the hardware
+ *
+ */
+
+static void velocity_error(struct velocity_info *vptr, int status)
+{
+
+ if (status & ISR_TXSTLI) {
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+
+ printk(KERN_ERR "TD structure errror TDindex=%hx\n", readw(&regs->TDIdx[0]));
+ BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
+ writew(TRDCSR_RUN, &regs->TDCSRClr);
+ netif_stop_queue(vptr->dev);
+
+ /* FIXME: port over the pci_device_failed code and use it
+ here */
+ }
+
+ if (status & ISR_SRCI) {
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ int linked;
+
+ if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
+ vptr->mii_status = check_connection_type(regs);
+
+ /*
+ * If it is a 3119, disable frame bursting in
+ * halfduplex mode and enable it in fullduplex
+ * mode
+ */
+ if (vptr->rev_id < REV_ID_VT3216_A0) {
+ if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
+ BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
+ else
+ BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
+ }
+ /*
+ * Only enable CD heart beat counter in 10HD mode
+ */
+ if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) {
+ BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
+ } else {
+ BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
+ }
+ }
+ /*
+ * Get link status from PHYSR0
+ */
+ linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
+
+ if (linked) {
+ vptr->mii_status &= ~VELOCITY_LINK_FAIL;
+ } else {
+ vptr->mii_status |= VELOCITY_LINK_FAIL;
+ }
+
+ velocity_print_link_status(vptr);
+ enable_flow_control_ability(vptr);
+
+ /*
+ * Re-enable auto-polling because SRCI will disable
+ * auto-polling
+ */
+
+ enable_mii_autopoll(regs);
+
+ if (vptr->mii_status & VELOCITY_LINK_FAIL)
+ netif_stop_queue(vptr->dev);
+ else
+ netif_wake_queue(vptr->dev);
+
+ };
+ if (status & ISR_MIBFI)
+ velocity_update_hw_mibs(vptr);
+ if (status & ISR_LSTEI)
+ mac_rx_queue_wake(vptr->mac_regs);
+}
+
+/**
+ * velocity_free_tx_buf - free transmit buffer
+ * @vptr: velocity
+ * @tdinfo: buffer
+ *
+ * Release an transmit buffer. If the buffer was preallocated then
+ * recycle it, if not then unmap the buffer.
+ */
+
+static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
+{
+ struct sk_buff *skb = tdinfo->skb;
+ int i;
+
+ /*
+ * Don't unmap the pre-allocated tx_bufs
+ */
+ if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) {
+
+ for (i = 0; i < tdinfo->nskb_dma; i++) {
+#ifdef VELOCITY_ZERO_COPY_SUPPORT
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
+#else
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
+#endif
+ tdinfo->skb_dma[i] = 0;
+ }
+ }
+ dev_kfree_skb_irq(skb);
+ tdinfo->skb = NULL;
+}
+
+/**
+ * velocity_open - interface activation callback
+ * @dev: network layer device to open
+ *
+ * Called when the network layer brings the interface up. Returns
+ * a negative posix error code on failure, or zero on success.
+ *
+ * All the ring allocation and set up is done on open for this
+ * adapter to minimise memory usage when inactive
+ */
+
+static int velocity_open(struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+ int ret;
+
+ vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ ret = velocity_init_rings(vptr);
+ if (ret < 0)
+ goto out;
+
+ ret = velocity_init_rd_ring(vptr);
+ if (ret < 0)
+ goto err_free_desc_rings;
+
+ ret = velocity_init_td_ring(vptr);
+ if (ret < 0)
+ goto err_free_rd_ring;
+
+ /* Ensure chip is running */
+ pci_set_power_state(vptr->pdev, PCI_D0);
+
+ velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+
+ ret = request_irq(vptr->pdev->irq, &velocity_intr, SA_SHIRQ,
+ dev->name, dev);
+ if (ret < 0) {
+ /* Power down the chip */
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+ goto err_free_td_ring;
+ }
+
+ mac_enable_int(vptr->mac_regs);
+ netif_start_queue(dev);
+ vptr->flags |= VELOCITY_FLAGS_OPENED;
+out:
+ return ret;
+
+err_free_td_ring:
+ velocity_free_td_ring(vptr);
+err_free_rd_ring:
+ velocity_free_rd_ring(vptr);
+err_free_desc_rings:
+ velocity_free_rings(vptr);
+ goto out;
+}
+
+/**
+ * velocity_change_mtu - MTU change callback
+ * @dev: network device
+ * @new_mtu: desired MTU
+ *
+ * Handle requests from the networking layer for MTU change on
+ * this interface. It gets called on a change by the network layer.
+ * Return zero for success or negative posix error code.
+ */
+
+static int velocity_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct velocity_info *vptr = dev->priv;
+ unsigned long flags;
+ int oldmtu = dev->mtu;
+ int ret = 0;
+
+ if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
+ vptr->dev->name);
+ return -EINVAL;
+ }
+
+ if (new_mtu != oldmtu) {
+ spin_lock_irqsave(&vptr->lock, flags);
+
+ netif_stop_queue(dev);
+ velocity_shutdown(vptr);
+
+ velocity_free_td_ring(vptr);
+ velocity_free_rd_ring(vptr);
+
+ dev->mtu = new_mtu;
+ if (new_mtu > 8192)
+ vptr->rx_buf_sz = 9 * 1024;
+ else if (new_mtu > 4096)
+ vptr->rx_buf_sz = 8192;
+ else
+ vptr->rx_buf_sz = 4 * 1024;
+
+ ret = velocity_init_rd_ring(vptr);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = velocity_init_td_ring(vptr);
+ if (ret < 0)
+ goto out_unlock;
+
+ velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+
+ mac_enable_int(vptr->mac_regs);
+ netif_start_queue(dev);
+out_unlock:
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ }
+
+ return ret;
+}
+
+/**
+ * velocity_shutdown - shut down the chip
+ * @vptr: velocity to deactivate
+ *
+ * Shuts down the internal operations of the velocity and
+ * disables interrupts, autopolling, transmit and receive
+ */
+
+static void velocity_shutdown(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ mac_disable_int(regs);
+ writel(CR0_STOP, &regs->CR0Set);
+ writew(0xFFFF, &regs->TDCSRClr);
+ writeb(0xFF, &regs->RDCSRClr);
+ safe_disable_mii_autopoll(regs);
+ mac_clear_isr(regs);
+}
+
+/**
+ * velocity_close - close adapter callback
+ * @dev: network device
+ *
+ * Callback from the network layer when the velocity is being
+ * deactivated by the network layer
+ */
+
+static int velocity_close(struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+
+ netif_stop_queue(dev);
+ velocity_shutdown(vptr);
+
+ if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
+ velocity_get_ip(vptr);
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+
+ /* Power down the chip */
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+
+ /* Free the resources */
+ velocity_free_td_ring(vptr);
+ velocity_free_rd_ring(vptr);
+ velocity_free_rings(vptr);
+
+ vptr->flags &= (~VELOCITY_FLAGS_OPENED);
+ return 0;
+}
+
+/**
+ * velocity_xmit - transmit packet callback
+ * @skb: buffer to transmit
+ * @dev: network device
+ *
+ * Called by the networ layer to request a packet is queued to
+ * the velocity. Returns zero on success.
+ */
+
+static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+ int qnum = 0;
+ struct tx_desc *td_ptr;
+ struct velocity_td_info *tdinfo;
+ unsigned long flags;
+ int index;
+
+ int pktlen = skb->len;
+
+ spin_lock_irqsave(&vptr->lock, flags);
+
+ index = vptr->td_curr[qnum];
+ td_ptr = &(vptr->td_rings[qnum][index]);
+ tdinfo = &(vptr->td_infos[qnum][index]);
+
+ td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
+ td_ptr->tdesc1.TCR = TCR0_TIC;
+ td_ptr->td_buf[0].queue = 0;
+
+ /*
+ * Pad short frames.
+ */
+ if (pktlen < ETH_ZLEN) {
+ /* Cannot occur until ZC support */
+ if(skb_linearize(skb, GFP_ATOMIC))
+ return 0;
+ pktlen = ETH_ZLEN;
+ memcpy(tdinfo->buf, skb->data, skb->len);
+ memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
+ tdinfo->skb = skb;
+ tdinfo->skb_dma[0] = tdinfo->buf_dma;
+ td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
+ td_ptr->td_buf[0].pa_high = 0;
+ td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ tdinfo->nskb_dma = 1;
+ td_ptr->tdesc1.CMDZ = 2;
+ } else
+#ifdef VELOCITY_ZERO_COPY_SUPPORT
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ tdinfo->skb = skb;
+ if (nfrags > 6) {
+ skb_linearize(skb, GFP_ATOMIC);
+ memcpy(tdinfo->buf, skb->data, skb->len);
+ tdinfo->skb_dma[0] = tdinfo->buf_dma;
+ td_ptr->tdesc0.pktsize =
+ td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
+ td_ptr->td_buf[0].pa_high = 0;
+ td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ tdinfo->nskb_dma = 1;
+ td_ptr->tdesc1.CMDZ = 2;
+ } else {
+ int i = 0;
+ tdinfo->nskb_dma = 0;
+ tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
+
+ td_ptr->tdesc0.pktsize = pktlen;
+
+ /* FIXME: support 48bit DMA later */
+ td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
+ td_ptr->td_buf[i].pa_high = 0;
+ td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = ((void *) page_address(frag->page + frag->page_offset));
+
+ tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
+
+ td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
+ td_ptr->td_buf[i + 1].pa_high = 0;
+ td_ptr->td_buf[i + 1].bufsize = frag->size;
+ }
+ tdinfo->nskb_dma = i - 1;
+ td_ptr->tdesc1.CMDZ = i;
+ }
+
+ } else
+#endif
+ {
+ /*
+ * Map the linear network buffer into PCI space and
+ * add it to the transmit ring.
+ */
+ tdinfo->skb = skb;
+ tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
+ td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
+ td_ptr->td_buf[0].pa_high = 0;
+ td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ tdinfo->nskb_dma = 1;
+ td_ptr->tdesc1.CMDZ = 2;
+ }
+
+ if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
+ td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff);
+ td_ptr->tdesc1.pqinf.priority = 0;
+ td_ptr->tdesc1.pqinf.CFI = 0;
+ td_ptr->tdesc1.TCR |= TCR0_VETAG;
+ }
+
+ /*
+ * Handle hardware checksum
+ */
+ if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
+ && (skb->ip_summed == CHECKSUM_HW)) {
+ struct iphdr *ip = skb->nh.iph;
+ if (ip->protocol == IPPROTO_TCP)
+ td_ptr->tdesc1.TCR |= TCR0_TCPCK;
+ else if (ip->protocol == IPPROTO_UDP)
+ td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
+ td_ptr->tdesc1.TCR |= TCR0_IPCK;
+ }
+ {
+
+ int prev = index - 1;
+
+ if (prev < 0)
+ prev = vptr->options.numtx - 1;
+ td_ptr->tdesc0.owner = OWNED_BY_NIC;
+ vptr->td_used[qnum]++;
+ vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
+
+ if (AVAIL_TD(vptr, qnum) < 1)
+ netif_stop_queue(dev);
+
+ td_ptr = &(vptr->td_rings[qnum][prev]);
+ td_ptr->td_buf[0].queue = 1;
+ mac_tx_queue_wake(vptr->mac_regs, qnum);
+ }
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ return 0;
+}
+
+/**
+ * velocity_intr - interrupt callback
+ * @irq: interrupt number
+ * @dev_instance: interrupting device
+ * @pt_regs: CPU register state at interrupt
+ *
+ * Called whenever an interrupt is generated by the velocity
+ * adapter IRQ line. We may not be the source of the interrupt
+ * and need to identify initially if we are, and if not exit as
+ * efficiently as possible.
+ */
+
+static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct velocity_info *vptr = dev->priv;
+ u32 isr_status;
+ int max_count = 0;
+
+
+ spin_lock(&vptr->lock);
+ isr_status = mac_read_isr(vptr->mac_regs);
+
+ /* Not us ? */
+ if (isr_status == 0) {
+ spin_unlock(&vptr->lock);
+ return IRQ_NONE;
+ }
+
+ mac_disable_int(vptr->mac_regs);
+
+ /*
+ * Keep processing the ISR until we have completed
+ * processing and the isr_status becomes zero
+ */
+
+ while (isr_status != 0) {
+ mac_write_isr(vptr->mac_regs, isr_status);
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+ velocity_error(vptr, isr_status);
+ if (isr_status & (ISR_PRXI | ISR_PPRXI))
+ max_count += velocity_rx_srv(vptr, isr_status);
+ if (isr_status & (ISR_PTXI | ISR_PPTXI))
+ max_count += velocity_tx_srv(vptr, isr_status);
+ isr_status = mac_read_isr(vptr->mac_regs);
+ if (max_count > vptr->options.int_works)
+ {
+ printk(KERN_WARNING "%s: excessive work at interrupt.\n",
+ dev->name);
+ max_count = 0;
+ }
+ }
+ spin_unlock(&vptr->lock);
+ mac_enable_int(vptr->mac_regs);
+ return IRQ_HANDLED;
+
+}
+
+
+/**
+ * velocity_set_multi - filter list change callback
+ * @dev: network device
+ *
+ * Called by the network layer when the filter lists need to change
+ * for a velocity adapter. Reload the CAMs with the new address
+ * filter ruleset.
+ */
+
+static void velocity_set_multi(struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ u8 rx_mode;
+ int i;
+ struct dev_mc_list *mclist;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ writel(0xffffffff, &regs->MARCAM[0]);
+ writel(0xffffffff, &regs->MARCAM[4]);
+ rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
+ } else if ((dev->mc_count > vptr->multicast_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ writel(0xffffffff, &regs->MARCAM[0]);
+ writel(0xffffffff, &regs->MARCAM[4]);
+ rx_mode = (RCR_AM | RCR_AB);
+ } else {
+ int offset = MCAM_SIZE - vptr->multicast_limit;
+ mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
+
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ mac_set_cam(regs, i + offset, mclist->dmi_addr, VELOCITY_MULTICAST_CAM);
+ vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
+ }
+
+ mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
+ rx_mode = (RCR_AM | RCR_AB);
+ }
+ if (dev->mtu > 1500)
+ rx_mode |= RCR_AL;
+
+ BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
+
+}
+
+/**
+ * velocity_get_status - statistics callback
+ * @dev: network device
+ *
+ * Callback from the network layer to allow driver statistics
+ * to be resynchronized with hardware collected state. In the
+ * case of the velocity we need to pull the MIB counters from
+ * the hardware into the counters before letting the network
+ * layer display them.
+ */
+
+static struct net_device_stats *velocity_get_stats(struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+
+ /* If the hardware is down, don't touch MII */
+ if(!netif_running(dev))
+ return &vptr->stats;
+
+ spin_lock_irq(&vptr->lock);
+ velocity_update_hw_mibs(vptr);
+ spin_unlock_irq(&vptr->lock);
+
+ vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
+ vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
+ vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
+
+// unsigned long rx_dropped; /* no space in linux buffers */
+ vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
+ /* detailed rx_errors: */
+// unsigned long rx_length_errors;
+// unsigned long rx_over_errors; /* receiver ring buff overflow */
+ vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
+// unsigned long rx_frame_errors; /* recv'd frame alignment error */
+// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
+// unsigned long rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+// unsigned long tx_fifo_errors;
+
+ return &vptr->stats;
+}
+
+
+/**
+ * velocity_ioctl - ioctl entry point
+ * @dev: network device
+ * @rq: interface request ioctl
+ * @cmd: command code
+ *
+ * Called when the user issues an ioctl request to the network
+ * device in question. The velocity interface supports MII.
+ */
+
+static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct velocity_info *vptr = dev->priv;
+ int ret;
+
+ /* If we are asked for information and the device is power
+ saving then we need to bring the device back up to talk to it */
+
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D0);
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ case SIOCSMIIREG: /* Write to MII PHY register. */
+ ret = velocity_mii_ioctl(dev, rq, cmd);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+
+
+ return ret;
+}
+
+/*
+ * Definition for our device driver. The PCI layer interface
+ * uses this to handle all our card discover and plugging
+ */
+
+static struct pci_driver velocity_driver = {
+ .name = VELOCITY_NAME,
+ .id_table = velocity_id_table,
+ .probe = velocity_found1,
+ .remove = __devexit_p(velocity_remove1),
+#ifdef CONFIG_PM
+ .suspend = velocity_suspend,
+ .resume = velocity_resume,
+#endif
+};
+
+/**
+ * velocity_init_module - load time function
+ *
+ * Called when the velocity module is loaded. The PCI driver
+ * is registered with the PCI layer, and in turn will call
+ * the probe functions for each velocity adapter installed
+ * in the system.
+ */
+
+static int __init velocity_init_module(void)
+{
+ int ret;
+
+ velocity_register_notifier();
+ ret = pci_module_init(&velocity_driver);
+ if (ret < 0)
+ velocity_unregister_notifier();
+ return ret;
+}
+
+/**
+ * velocity_cleanup - module unload
+ *
+ * When the velocity hardware is unloaded this function is called.
+ * It will clean up the notifiers and the unregister the PCI
+ * driver interface for this hardware. This in turn cleans up
+ * all discovered interfaces before returning from the function
+ */
+
+static void __exit velocity_cleanup_module(void)
+{
+ velocity_unregister_notifier();
+ pci_unregister_driver(&velocity_driver);
+}
+
+module_init(velocity_init_module);
+module_exit(velocity_cleanup_module);
+
+
+/*
+ * MII access , media link mode setting functions
+ */
+
+
+/**
+ * mii_init - set up MII
+ * @vptr: velocity adapter
+ * @mii_status: links tatus
+ *
+ * Set up the PHY for the current link state.
+ */
+
+static void mii_init(struct velocity_info *vptr, u32 mii_status)
+{
+ u16 BMCR;
+
+ switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
+ case PHYID_CICADA_CS8201:
+ /*
+ * Reset to hardware default
+ */
+ MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ /*
+ * Turn on ECHODIS bit in NWay-forced full mode and turn it
+ * off it in NWay-forced half mode for NWay-forced v.s.
+ * legacy-forced issue.
+ */
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ else
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ /*
+ * Turn on Link/Activity LED enable bit for CIS8201
+ */
+ MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
+ break;
+ case PHYID_VT3216_32BIT:
+ case PHYID_VT3216_64BIT:
+ /*
+ * Reset to hardware default
+ */
+ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ /*
+ * Turn on ECHODIS bit in NWay-forced full mode and turn it
+ * off it in NWay-forced half mode for NWay-forced v.s.
+ * legacy-forced issue
+ */
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ else
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ break;
+
+ case PHYID_MARVELL_1000:
+ case PHYID_MARVELL_1000S:
+ /*
+ * Assert CRS on Transmit
+ */
+ MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
+ /*
+ * Reset to hardware default
+ */
+ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ break;
+ default:
+ ;
+ }
+ velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
+ if (BMCR & BMCR_ISO) {
+ BMCR &= ~BMCR_ISO;
+ velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
+ }
+}
+
+/**
+ * safe_disable_mii_autopoll - autopoll off
+ * @regs: velocity registers
+ *
+ * Turn off the autopoll and wait for it to disable on the chip
+ */
+
+static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs)
+{
+ u16 ww;
+
+ /* turn off MAUTO */
+ writeb(0, &regs->MIICR);
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
+ udelay(1);
+ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
+ break;
+ }
+}
+
+/**
+ * enable_mii_autopoll - turn on autopolling
+ * @regs: velocity registers
+ *
+ * Enable the MII link status autopoll feature on the Velocity
+ * hardware. Wait for it to enable.
+ */
+
+static void enable_mii_autopoll(struct mac_regs __iomem * regs)
+{
+ int ii;
+
+ writeb(0, &(regs->MIICR));
+ writeb(MIIADR_SWMPL, &regs->MIIADR);
+
+ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
+ udelay(1);
+ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
+ break;
+ }
+
+ writeb(MIICR_MAUTO, &regs->MIICR);
+
+ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
+ udelay(1);
+ if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
+ break;
+ }
+
+}
+
+/**
+ * velocity_mii_read - read MII data
+ * @regs: velocity registers
+ * @index: MII register index
+ * @data: buffer for received data
+ *
+ * Perform a single read of an MII 16bit register. Returns zero
+ * on success or -ETIMEDOUT if the PHY did not respond.
+ */
+
+static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
+{
+ u16 ww;
+
+ /*
+ * Disable MIICR_MAUTO, so that mii addr can be set normally
+ */
+ safe_disable_mii_autopoll(regs);
+
+ writeb(index, &regs->MIIADR);
+
+ BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
+
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
+ if (!(readb(&regs->MIICR) & MIICR_RCMD))
+ break;
+ }
+
+ *data = readw(&regs->MIIDATA);
+
+ enable_mii_autopoll(regs);
+ if (ww == W_MAX_TIMEOUT)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+/**
+ * velocity_mii_write - write MII data
+ * @regs: velocity registers
+ * @index: MII register index
+ * @data: 16bit data for the MII register
+ *
+ * Perform a single write to an MII 16bit register. Returns zero
+ * on success or -ETIMEDOUT if the PHY did not respond.
+ */
+
+static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
+{
+ u16 ww;
+
+ /*
+ * Disable MIICR_MAUTO, so that mii addr can be set normally
+ */
+ safe_disable_mii_autopoll(regs);
+
+ /* MII reg offset */
+ writeb(mii_addr, &regs->MIIADR);
+ /* set MII data */
+ writew(data, &regs->MIIDATA);
+
+ /* turn on MIICR_WCMD */
+ BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
+
+ /* W_MAX_TIMEOUT is the timeout period */
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
+ udelay(5);
+ if (!(readb(&regs->MIICR) & MIICR_WCMD))
+ break;
+ }
+ enable_mii_autopoll(regs);
+
+ if (ww == W_MAX_TIMEOUT)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+/**
+ * velocity_get_opt_media_mode - get media selection
+ * @vptr: velocity adapter
+ *
+ * Get the media mode stored in EEPROM or module options and load
+ * mii_status accordingly. The requested link state information
+ * is also returned.
+ */
+
+static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
+{
+ u32 status = 0;
+
+ switch (vptr->options.spd_dpx) {
+ case SPD_DPX_AUTO:
+ status = VELOCITY_AUTONEG_ENABLE;
+ break;
+ case SPD_DPX_100_FULL:
+ status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
+ break;
+ case SPD_DPX_10_FULL:
+ status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
+ break;
+ case SPD_DPX_100_HALF:
+ status = VELOCITY_SPEED_100;
+ break;
+ case SPD_DPX_10_HALF:
+ status = VELOCITY_SPEED_10;
+ break;
+ }
+ vptr->mii_status = status;
+ return status;
+}
+
+/**
+ * mii_set_auto_on - autonegotiate on
+ * @vptr: velocity
+ *
+ * Enable autonegotation on this interface
+ */
+
+static void mii_set_auto_on(struct velocity_info *vptr)
+{
+ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
+ MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ else
+ MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
+}
+
+
+/*
+static void mii_set_auto_off(struct velocity_info * vptr)
+{
+ MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
+}
+*/
+
+/**
+ * set_mii_flow_control - flow control setup
+ * @vptr: velocity interface
+ *
+ * Set up the flow control on this interface according to
+ * the supplied user/eeprom options.
+ */
+
+static void set_mii_flow_control(struct velocity_info *vptr)
+{
+ /*Enable or Disable PAUSE in ANAR */
+ switch (vptr->options.flow_cntl) {
+ case FLOW_CNTL_TX:
+ MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ break;
+
+ case FLOW_CNTL_RX:
+ MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ break;
+
+ case FLOW_CNTL_TX_RX:
+ MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ break;
+
+ case FLOW_CNTL_DISABLE:
+ MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * velocity_set_media_mode - set media mode
+ * @mii_status: old MII link state
+ *
+ * Check the media link state and configure the flow control
+ * PHY and also velocity hardware setup accordingly. In particular
+ * we need to set up CD polling and frame bursting.
+ */
+
+static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
+{
+ u32 curr_status;
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+
+ vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
+ curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
+
+ /* Set mii link status */
+ set_mii_flow_control(vptr);
+
+ /*
+ Check if new status is consisent with current status
+ if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
+ || (mii_status==curr_status)) {
+ vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
+ vptr->mii_status=check_connection_type(vptr->mac_regs);
+ VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
+ return 0;
+ }
+ */
+
+ if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) {
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ }
+
+ /*
+ * If connection type is AUTO
+ */
+ if (mii_status & VELOCITY_AUTONEG_ENABLE) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
+ /* clear force MAC mode bit */
+ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
+ /* set duplex mode of MAC according to duplex mode of MII */
+ MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
+
+ /* enable AUTO-NEGO mode */
+ mii_set_auto_on(vptr);
+ } else {
+ u16 ANAR;
+ u8 CHIPGCR;
+
+ /*
+ * 1. if it's 3119, disable frame bursting in halfduplex mode
+ * and enable it in fullduplex mode
+ * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
+ * 3. only enable CD heart beat counter in 10HD mode
+ */
+
+ /* set force MAC mode bit */
+ BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
+
+ CHIPGCR = readb(&regs->CHIPGCR);
+ CHIPGCR &= ~CHIPGCR_FCGMII;
+
+ if (mii_status & VELOCITY_DUPLEX_FULL) {
+ CHIPGCR |= CHIPGCR_FCFDX;
+ writeb(CHIPGCR, &regs->CHIPGCR);
+ VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
+ if (vptr->rev_id < REV_ID_VT3216_A0)
+ BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
+ } else {
+ CHIPGCR &= ~CHIPGCR_FCFDX;
+ VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
+ writeb(CHIPGCR, &regs->CHIPGCR);
+ if (vptr->rev_id < REV_ID_VT3216_A0)
+ BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
+ }
+
+ MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+
+ if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) {
+ BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
+ } else {
+ BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
+ }
+ /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
+ velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
+ ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
+ if (mii_status & VELOCITY_SPEED_100) {
+ if (mii_status & VELOCITY_DUPLEX_FULL)
+ ANAR |= ANAR_TXFD;
+ else
+ ANAR |= ANAR_TX;
+ } else {
+ if (mii_status & VELOCITY_DUPLEX_FULL)
+ ANAR |= ANAR_10FD;
+ else
+ ANAR |= ANAR_10;
+ }
+ velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
+ /* enable AUTO-NEGO mode */
+ mii_set_auto_on(vptr);
+ /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
+ }
+ /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
+ /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
+ return VELOCITY_LINK_CHANGE;
+}
+
+/**
+ * mii_check_media_mode - check media state
+ * @regs: velocity registers
+ *
+ * Check the current MII status and determine the link status
+ * accordingly
+ */
+
+static u32 mii_check_media_mode(struct mac_regs __iomem * regs)
+{
+ u32 status = 0;
+ u16 ANAR;
+
+ if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
+ status |= VELOCITY_LINK_FAIL;
+
+ if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
+ status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
+ else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
+ status |= (VELOCITY_SPEED_1000);
+ else {
+ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
+ if (ANAR & ANAR_TXFD)
+ status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
+ else if (ANAR & ANAR_TX)
+ status |= VELOCITY_SPEED_100;
+ else if (ANAR & ANAR_10FD)
+ status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
+ else
+ status |= (VELOCITY_SPEED_10);
+ }
+
+ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
+ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
+ if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
+ == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
+ if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ status |= VELOCITY_AUTONEG_ENABLE;
+ }
+ }
+
+ return status;
+}
+
+static u32 check_connection_type(struct mac_regs __iomem * regs)
+{
+ u32 status = 0;
+ u8 PHYSR0;
+ u16 ANAR;
+ PHYSR0 = readb(&regs->PHYSR0);
+
+ /*
+ if (!(PHYSR0 & PHYSR0_LINKGD))
+ status|=VELOCITY_LINK_FAIL;
+ */
+
+ if (PHYSR0 & PHYSR0_FDPX)
+ status |= VELOCITY_DUPLEX_FULL;
+
+ if (PHYSR0 & PHYSR0_SPDG)
+ status |= VELOCITY_SPEED_1000;
+ if (PHYSR0 & PHYSR0_SPD10)
+ status |= VELOCITY_SPEED_10;
+ else
+ status |= VELOCITY_SPEED_100;
+
+ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
+ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
+ if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
+ == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
+ if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ status |= VELOCITY_AUTONEG_ENABLE;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * enable_flow_control_ability - flow control
+ * @vptr: veloity to configure
+ *
+ * Set up flow control according to the flow control options
+ * determined by the eeprom/configuration.
+ */
+
+static void enable_flow_control_ability(struct velocity_info *vptr)
+{
+
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+
+ switch (vptr->options.flow_cntl) {
+
+ case FLOW_CNTL_DEFAULT:
+ if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
+ writel(CR0_FDXRFCEN, &regs->CR0Set);
+ else
+ writel(CR0_FDXRFCEN, &regs->CR0Clr);
+
+ if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
+ writel(CR0_FDXTFCEN, &regs->CR0Set);
+ else
+ writel(CR0_FDXTFCEN, &regs->CR0Clr);
+ break;
+
+ case FLOW_CNTL_TX:
+ writel(CR0_FDXTFCEN, &regs->CR0Set);
+ writel(CR0_FDXRFCEN, &regs->CR0Clr);
+ break;
+
+ case FLOW_CNTL_RX:
+ writel(CR0_FDXRFCEN, &regs->CR0Set);
+ writel(CR0_FDXTFCEN, &regs->CR0Clr);
+ break;
+
+ case FLOW_CNTL_TX_RX:
+ writel(CR0_FDXTFCEN, &regs->CR0Set);
+ writel(CR0_FDXRFCEN, &regs->CR0Set);
+ break;
+
+ case FLOW_CNTL_DISABLE:
+ writel(CR0_FDXRFCEN, &regs->CR0Clr);
+ writel(CR0_FDXTFCEN, &regs->CR0Clr);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+
+/**
+ * velocity_ethtool_up - pre hook for ethtool
+ * @dev: network device
+ *
+ * Called before an ethtool operation. We need to make sure the
+ * chip is out of D3 state before we poke at it.
+ */
+
+static int velocity_ethtool_up(struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D0);
+ return 0;
+}
+
+/**
+ * velocity_ethtool_down - post hook for ethtool
+ * @dev: network device
+ *
+ * Called after an ethtool operation. Restore the chip back to D3
+ * state if it isn't running.
+ */
+
+static void velocity_ethtool_down(struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+}
+
+static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct velocity_info *vptr = dev->priv;
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ u32 status;
+ status = check_connection_type(vptr->mac_regs);
+
+ cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
+ if (status & VELOCITY_SPEED_100)
+ cmd->speed = SPEED_100;
+ else
+ cmd->speed = SPEED_10;
+ cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->port = PORT_TP;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
+
+ if (status & VELOCITY_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ else
+ cmd->duplex = DUPLEX_HALF;
+
+ return 0;
+}
+
+static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct velocity_info *vptr = dev->priv;
+ u32 curr_status;
+ u32 new_status = 0;
+ int ret = 0;
+
+ curr_status = check_connection_type(vptr->mac_regs);
+ curr_status &= (~VELOCITY_LINK_FAIL);
+
+ new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
+ new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
+ new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
+ new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
+
+ if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
+ ret = -EINVAL;
+ else
+ velocity_set_media_mode(vptr, new_status);
+
+ return ret;
+}
+
+static u32 velocity_get_link(struct net_device *dev)
+{
+ struct velocity_info *vptr = dev->priv;
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1;
+}
+
+static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct velocity_info *vptr = dev->priv;
+ strcpy(info->driver, VELOCITY_NAME);
+ strcpy(info->version, VELOCITY_VERSION);
+ strcpy(info->bus_info, pci_name(vptr->pdev));
+}
+
+static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct velocity_info *vptr = dev->priv;
+ wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
+ wol->wolopts |= WAKE_MAGIC;
+ /*
+ if (vptr->wol_opts & VELOCITY_WOL_PHY)
+ wol.wolopts|=WAKE_PHY;
+ */
+ if (vptr->wol_opts & VELOCITY_WOL_UCAST)
+ wol->wolopts |= WAKE_UCAST;
+ if (vptr->wol_opts & VELOCITY_WOL_ARP)
+ wol->wolopts |= WAKE_ARP;
+ memcpy(&wol->sopass, vptr->wol_passwd, 6);
+}
+
+static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct velocity_info *vptr = dev->priv;
+
+ if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
+ return -EFAULT;
+ vptr->wol_opts = VELOCITY_WOL_MAGIC;
+
+ /*
+ if (wol.wolopts & WAKE_PHY) {
+ vptr->wol_opts|=VELOCITY_WOL_PHY;
+ vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ */
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ vptr->wol_opts |= VELOCITY_WOL_MAGIC;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ if (wol->wolopts & WAKE_UCAST) {
+ vptr->wol_opts |= VELOCITY_WOL_UCAST;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ if (wol->wolopts & WAKE_ARP) {
+ vptr->wol_opts |= VELOCITY_WOL_ARP;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ memcpy(vptr->wol_passwd, wol->sopass, 6);
+ return 0;
+}
+
+static u32 velocity_get_msglevel(struct net_device *dev)
+{
+ return msglevel;
+}
+
+static void velocity_set_msglevel(struct net_device *dev, u32 value)
+{
+ msglevel = value;
+}
+
+static struct ethtool_ops velocity_ethtool_ops = {
+ .get_settings = velocity_get_settings,
+ .set_settings = velocity_set_settings,
+ .get_drvinfo = velocity_get_drvinfo,
+ .get_wol = velocity_ethtool_get_wol,
+ .set_wol = velocity_ethtool_set_wol,
+ .get_msglevel = velocity_get_msglevel,
+ .set_msglevel = velocity_set_msglevel,
+ .get_link = velocity_get_link,
+ .begin = velocity_ethtool_up,
+ .complete = velocity_ethtool_down
+};
+
+/**
+ * velocity_mii_ioctl - MII ioctl handler
+ * @dev: network device
+ * @ifr: the ifreq block for the ioctl
+ * @cmd: the command
+ *
+ * Process MII requests made via ioctl from the network layer. These
+ * are used by tools like kudzu to interrogate the link state of the
+ * hardware
+ */
+
+static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct velocity_info *vptr = dev->priv;
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ unsigned long flags;
+ struct mii_ioctl_data *miidata = if_mii(ifr);
+ int err;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
+ break;
+ case SIOCGMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
+ return -ETIMEDOUT;
+ break;
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ spin_lock_irqsave(&vptr->lock, flags);
+ err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ check_connection_type(vptr->mac_regs);
+ if(err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * velocity_save_context - save registers
+ * @vptr: velocity
+ * @context: buffer for stored context
+ *
+ * Retrieve the current configuration from the velocity hardware
+ * and stash it in the context structure, for use by the context
+ * restore functions. This allows us to save things we need across
+ * power down states
+ */
+
+static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ u16 i;
+ u8 __iomem *ptr = (u8 __iomem *)regs;
+
+ for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
+
+ for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
+
+ for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
+
+}
+
+/**
+ * velocity_restore_context - restore registers
+ * @vptr: velocity
+ * @context: buffer for stored context
+ *
+ * Reload the register configuration from the velocity context
+ * created by velocity_save_context.
+ */
+
+static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ int i;
+ u8 __iomem *ptr = (u8 __iomem *)regs;
+
+ for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) {
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
+ }
+
+ /* Just skip cr0 */
+ for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
+ /* Clear */
+ writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
+ /* Set */
+ writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
+ }
+
+ for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) {
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
+ }
+
+ for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) {
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
+ }
+
+ for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) {
+ writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
+ }
+
+}
+
+/**
+ * wol_calc_crc - WOL CRC
+ * @pattern: data pattern
+ * @mask_pattern: mask
+ *
+ * Compute the wake on lan crc hashes for the packet header
+ * we are interested in.
+ */
+
+static u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern)
+{
+ u16 crc = 0xFFFF;
+ u8 mask;
+ int i, j;
+
+ for (i = 0; i < size; i++) {
+ mask = mask_pattern[i];
+
+ /* Skip this loop if the mask equals to zero */
+ if (mask == 0x00)
+ continue;
+
+ for (j = 0; j < 8; j++) {
+ if ((mask & 0x01) == 0) {
+ mask >>= 1;
+ continue;
+ }
+ mask >>= 1;
+ crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
+ }
+ }
+ /* Finally, invert the result once to get the correct data */
+ crc = ~crc;
+ return bitreverse(crc) >> 16;
+}
+
+/**
+ * velocity_set_wol - set up for wake on lan
+ * @vptr: velocity to set WOL status on
+ *
+ * Set a card up for wake on lan either by unicast or by
+ * ARP packet.
+ *
+ * FIXME: check static buffer is safe here
+ */
+
+static int velocity_set_wol(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+ static u8 buf[256];
+ int i;
+
+ static u32 mask_pattern[2][4] = {
+ {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
+ {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
+ };
+
+ writew(0xFFFF, &regs->WOLCRClr);
+ writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
+ writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
+
+ /*
+ if (vptr->wol_opts & VELOCITY_WOL_PHY)
+ writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
+ */
+
+ if (vptr->wol_opts & VELOCITY_WOL_UCAST) {
+ writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
+ }
+
+ if (vptr->wol_opts & VELOCITY_WOL_ARP) {
+ struct arp_packet *arp = (struct arp_packet *) buf;
+ u16 crc;
+ memset(buf, 0, sizeof(struct arp_packet) + 7);
+
+ for (i = 0; i < 4; i++)
+ writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
+
+ arp->type = htons(ETH_P_ARP);
+ arp->ar_op = htons(1);
+
+ memcpy(arp->ar_tip, vptr->ip_addr, 4);
+
+ crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
+ (u8 *) & mask_pattern[0][0]);
+
+ writew(crc, &regs->PatternCRC[0]);
+ writew(WOLCR_ARP_EN, &regs->WOLCRSet);
+ }
+
+ BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
+ BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
+
+ writew(0x0FFF, &regs->WOLSRClr);
+
+ if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
+ if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+
+ MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ }
+
+ if (vptr->mii_status & VELOCITY_SPEED_1000)
+ MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+
+ BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
+
+ {
+ u8 GCR;
+ GCR = readb(&regs->CHIPGCR);
+ GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
+ writeb(GCR, &regs->CHIPGCR);
+ }
+
+ BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
+ /* Turn on SWPTAG just before entering power mode */
+ BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
+ /* Go to bed ..... */
+ BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
+
+ return 0;
+}
+
+static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct velocity_info *vptr = netdev_priv(dev);
+ unsigned long flags;
+
+ if(!netif_running(vptr->dev))
+ return 0;
+
+ netif_device_detach(vptr->dev);
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ pci_save_state(pdev);
+#ifdef ETHTOOL_GWOL
+ if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
+ velocity_get_ip(vptr);
+ velocity_save_context(vptr, &vptr->context);
+ velocity_shutdown(vptr);
+ velocity_set_wol(vptr);
+ pci_enable_wake(pdev, 3, 1);
+ pci_set_power_state(pdev, PCI_D3hot);
+ } else {
+ velocity_save_context(vptr, &vptr->context);
+ velocity_shutdown(vptr);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ }
+#else
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+#endif
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ return 0;
+}
+
+static int velocity_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct velocity_info *vptr = netdev_priv(dev);
+ unsigned long flags;
+ int i;
+
+ if(!netif_running(vptr->dev))
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, 0, 0);
+ pci_restore_state(pdev);
+
+ mac_wol_reset(vptr->mac_regs);
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ velocity_restore_context(vptr, &vptr->context);
+ velocity_init_registers(vptr, VELOCITY_INIT_WOL);
+ mac_disable_int(vptr->mac_regs);
+
+ velocity_tx_srv(vptr, 0);
+
+ for (i = 0; i < vptr->num_txq; i++) {
+ if (vptr->td_used[i]) {
+ mac_tx_queue_wake(vptr->mac_regs, i);
+ }
+ }
+
+ mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ netif_device_attach(vptr->dev);
+
+ return 0;
+}
+
+static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+
+ if (ifa) {
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct velocity_info *vptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&velocity_dev_list_lock, flags);
+ list_for_each_entry(vptr, &velocity_dev_list, list) {
+ if (vptr->dev == dev) {
+ velocity_get_ip(vptr);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
+ }
+ return NOTIFY_DONE;
+}
+#endif
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
new file mode 100644
index 000000000000..1b70b7c97580
--- /dev/null
+++ b/drivers/net/via-velocity.h
@@ -0,0 +1,1879 @@
+/*
+ * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
+ * All rights reserved.
+ *
+ * This software may be redistributed and/or modified under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * File: via-velocity.h
+ *
+ * Purpose: Header file to define driver's private structures.
+ *
+ * Author: Chuang Liang-Shing, AJ Jiang
+ *
+ * Date: Jan 24, 2003
+ */
+
+
+#ifndef VELOCITY_H
+#define VELOCITY_H
+
+#define VELOCITY_TX_CSUM_SUPPORT
+
+#define VELOCITY_NAME "via-velocity"
+#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
+#define VELOCITY_VERSION "1.13"
+
+#define PKT_BUF_SZ 1540
+
+#define MAX_UNITS 8
+#define OPTION_DEFAULT { [0 ... MAX_UNITS-1] = -1}
+
+#define REV_ID_VT6110 (0)
+
+#define BYTE_REG_BITS_ON(x,p) do { writeb(readb((p))|(x),(p));} while (0)
+#define WORD_REG_BITS_ON(x,p) do { writew(readw((p))|(x),(p));} while (0)
+#define DWORD_REG_BITS_ON(x,p) do { writel(readl((p))|(x),(p));} while (0)
+
+#define BYTE_REG_BITS_IS_ON(x,p) (readb((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x,p) (readw((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x,p) (readl((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x,p) do { writeb(readb((p)) & (~(x)),(p));} while (0)
+#define WORD_REG_BITS_OFF(x,p) do { writew(readw((p)) & (~(x)),(p));} while (0)
+#define DWORD_REG_BITS_OFF(x,p) do { writel(readl((p)) & (~(x)),(p));} while (0)
+
+#define BYTE_REG_BITS_SET(x,m,p) do { writeb( (readb((p)) & (~(m))) |(x),(p));} while (0)
+#define WORD_REG_BITS_SET(x,m,p) do { writew( (readw((p)) & (~(m))) |(x),(p));} while (0)
+#define DWORD_REG_BITS_SET(x,m,p) do { writel( (readl((p)) & (~(m)))|(x),(p));} while (0)
+
+#define VAR_USED(p) do {(p)=(p);} while (0)
+
+/*
+ * Purpose: Structures for MAX RX/TX descriptors.
+ */
+
+
+#define B_OWNED_BY_CHIP 1
+#define B_OWNED_BY_HOST 0
+
+/*
+ * Bits in the RSR0 register
+ */
+
+#define RSR_DETAG 0x0080
+#define RSR_SNTAG 0x0040
+#define RSR_RXER 0x0020
+#define RSR_RL 0x0010
+#define RSR_CE 0x0008
+#define RSR_FAE 0x0004
+#define RSR_CRC 0x0002
+#define RSR_VIDM 0x0001
+
+/*
+ * Bits in the RSR1 register
+ */
+
+#define RSR_RXOK 0x8000 // rx OK
+#define RSR_PFT 0x4000 // Perfect filtering address match
+#define RSR_MAR 0x2000 // MAC accept multicast address packet
+#define RSR_BAR 0x1000 // MAC accept broadcast address packet
+#define RSR_PHY 0x0800 // MAC accept physical address packet
+#define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator
+#define RSR_STP 0x0200 // start of packet
+#define RSR_EDP 0x0100 // end of packet
+
+/*
+ * Bits in the RSR1 register
+ */
+
+#define RSR1_RXOK 0x80 // rx OK
+#define RSR1_PFT 0x40 // Perfect filtering address match
+#define RSR1_MAR 0x20 // MAC accept multicast address packet
+#define RSR1_BAR 0x10 // MAC accept broadcast address packet
+#define RSR1_PHY 0x08 // MAC accept physical address packet
+#define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator
+#define RSR1_STP 0x02 // start of packet
+#define RSR1_EDP 0x01 // end of packet
+
+/*
+ * Bits in the CSM register
+ */
+
+#define CSM_IPOK 0x40 //IP Checkusm validatiaon ok
+#define CSM_TUPOK 0x20 //TCP/UDP Checkusm validatiaon ok
+#define CSM_FRAG 0x10 //Fragment IP datagram
+#define CSM_IPKT 0x04 //Received an IP packet
+#define CSM_TCPKT 0x02 //Received a TCP packet
+#define CSM_UDPKT 0x01 //Received a UDP packet
+
+/*
+ * Bits in the TSR0 register
+ */
+
+#define TSR0_ABT 0x0080 // Tx abort because of excessive collision
+#define TSR0_OWT 0x0040 // Jumbo frame Tx abort
+#define TSR0_OWC 0x0020 // Out of window collision
+#define TSR0_COLS 0x0010 // experience collision in this transmit event
+#define TSR0_NCR3 0x0008 // collision retry counter[3]
+#define TSR0_NCR2 0x0004 // collision retry counter[2]
+#define TSR0_NCR1 0x0002 // collision retry counter[1]
+#define TSR0_NCR0 0x0001 // collision retry counter[0]
+#define TSR0_TERR 0x8000 //
+#define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode
+#define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode
+#define TSR0_LNKFL 0x1000 // packet serviced during link down
+#define TSR0_SHDN 0x0400 // shutdown case
+#define TSR0_CRS 0x0200 // carrier sense lost
+#define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat)
+
+/*
+ * Bits in the TSR1 register
+ */
+
+#define TSR1_TERR 0x80 //
+#define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode
+#define TSR1_GMII 0x20 // current transaction is serviced by GMII mode
+#define TSR1_LNKFL 0x10 // packet serviced during link down
+#define TSR1_SHDN 0x04 // shutdown case
+#define TSR1_CRS 0x02 // carrier sense lost
+#define TSR1_CDH 0x01 // AQE test fail (CD heartbeat)
+
+//
+// Bits in the TCR0 register
+//
+#define TCR0_TIC 0x80 // assert interrupt immediately while descriptor has been send complete
+#define TCR0_PIC 0x40 // priority interrupt request, INA# is issued over adaptive interrupt scheme
+#define TCR0_VETAG 0x20 // enable VLAN tag
+#define TCR0_IPCK 0x10 // request IP checksum calculation.
+#define TCR0_UDPCK 0x08 // request UDP checksum calculation.
+#define TCR0_TCPCK 0x04 // request TCP checksum calculation.
+#define TCR0_JMBO 0x02 // indicate a jumbo packet in GMAC side
+#define TCR0_CRC 0x01 // disable CRC generation
+
+#define TCPLS_NORMAL 3
+#define TCPLS_START 2
+#define TCPLS_END 1
+#define TCPLS_MED 0
+
+
+// max transmit or receive buffer size
+#define CB_RX_BUF_SIZE 2048UL // max buffer size
+ // NOTE: must be multiple of 4
+
+#define CB_MAX_RD_NUM 512 // MAX # of RD
+#define CB_MAX_TD_NUM 256 // MAX # of TD
+
+#define CB_INIT_RD_NUM_3119 128 // init # of RD, for setup VT3119
+#define CB_INIT_TD_NUM_3119 64 // init # of TD, for setup VT3119
+
+#define CB_INIT_RD_NUM 128 // init # of RD, for setup default
+#define CB_INIT_TD_NUM 64 // init # of TD, for setup default
+
+// for 3119
+#define CB_TD_RING_NUM 4 // # of TD rings.
+#define CB_MAX_SEG_PER_PKT 7 // max data seg per packet (Tx)
+
+
+/*
+ * If collisions excess 15 times , tx will abort, and
+ * if tx fifo underflow, tx will fail
+ * we should try to resend it
+ */
+
+#define CB_MAX_TX_ABORT_RETRY 3
+
+/*
+ * Receive descriptor
+ */
+
+struct rdesc0 {
+ u16 RSR; /* Receive status */
+ u16 len:14; /* Received packet length */
+ u16 reserved:1;
+ u16 owner:1; /* Who owns this buffer ? */
+};
+
+struct rdesc1 {
+ u16 PQTAG;
+ u8 CSM;
+ u8 IPKT;
+};
+
+struct rx_desc {
+ struct rdesc0 rdesc0;
+ struct rdesc1 rdesc1;
+ u32 pa_low; /* Low 32 bit PCI address */
+ u16 pa_high; /* Next 16 bit PCI address (48 total) */
+ u16 len:15; /* Frame size */
+ u16 inten:1; /* Enable interrupt */
+} __attribute__ ((__packed__));
+
+/*
+ * Transmit descriptor
+ */
+
+struct tdesc0 {
+ u16 TSR; /* Transmit status register */
+ u16 pktsize:14; /* Size of frame */
+ u16 reserved:1;
+ u16 owner:1; /* Who owns the buffer */
+};
+
+struct pqinf { /* Priority queue info */
+ u16 VID:12;
+ u16 CFI:1;
+ u16 priority:3;
+} __attribute__ ((__packed__));
+
+struct tdesc1 {
+ struct pqinf pqinf;
+ u8 TCR;
+ u8 TCPLS:2;
+ u8 reserved:2;
+ u8 CMDZ:4;
+} __attribute__ ((__packed__));
+
+struct td_buf {
+ u32 pa_low;
+ u16 pa_high;
+ u16 bufsize:14;
+ u16 reserved:1;
+ u16 queue:1;
+} __attribute__ ((__packed__));
+
+struct tx_desc {
+ struct tdesc0 tdesc0;
+ struct tdesc1 tdesc1;
+ struct td_buf td_buf[7];
+};
+
+struct velocity_rd_info {
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+};
+
+/**
+ * alloc_rd_info - allocate an rd info block
+ *
+ * Alocate and initialize a receive info structure used for keeping
+ * track of kernel side information related to each receive
+ * descriptor we are using
+ */
+
+static inline struct velocity_rd_info *alloc_rd_info(void)
+{
+ struct velocity_rd_info *ptr;
+ if ((ptr = kmalloc(sizeof(struct velocity_rd_info), GFP_ATOMIC)) == NULL)
+ return NULL;
+ else {
+ memset(ptr, 0, sizeof(struct velocity_rd_info));
+ return ptr;
+ }
+}
+
+/*
+ * Used to track transmit side buffers.
+ */
+
+struct velocity_td_info {
+ struct sk_buff *skb;
+ u8 *buf;
+ int nskb_dma;
+ dma_addr_t skb_dma[7];
+ dma_addr_t buf_dma;
+};
+
+enum velocity_owner {
+ OWNED_BY_HOST = 0,
+ OWNED_BY_NIC = 1
+};
+
+
+/*
+ * MAC registers and macros.
+ */
+
+
+#define MCAM_SIZE 64
+#define VCAM_SIZE 64
+#define TX_QUEUE_NO 4
+
+#define MAX_HW_MIB_COUNTER 32
+#define VELOCITY_MIN_MTU (1514-14)
+#define VELOCITY_MAX_MTU (9000)
+
+/*
+ * Registers in the MAC
+ */
+
+#define MAC_REG_PAR 0x00 // physical address
+#define MAC_REG_RCR 0x06
+#define MAC_REG_TCR 0x07
+#define MAC_REG_CR0_SET 0x08
+#define MAC_REG_CR1_SET 0x09
+#define MAC_REG_CR2_SET 0x0A
+#define MAC_REG_CR3_SET 0x0B
+#define MAC_REG_CR0_CLR 0x0C
+#define MAC_REG_CR1_CLR 0x0D
+#define MAC_REG_CR2_CLR 0x0E
+#define MAC_REG_CR3_CLR 0x0F
+#define MAC_REG_MAR 0x10
+#define MAC_REG_CAM 0x10
+#define MAC_REG_DEC_BASE_HI 0x18
+#define MAC_REG_DBF_BASE_HI 0x1C
+#define MAC_REG_ISR_CTL 0x20
+#define MAC_REG_ISR_HOTMR 0x20
+#define MAC_REG_ISR_TSUPTHR 0x20
+#define MAC_REG_ISR_RSUPTHR 0x20
+#define MAC_REG_ISR_CTL1 0x21
+#define MAC_REG_TXE_SR 0x22
+#define MAC_REG_RXE_SR 0x23
+#define MAC_REG_ISR 0x24
+#define MAC_REG_ISR0 0x24
+#define MAC_REG_ISR1 0x25
+#define MAC_REG_ISR2 0x26
+#define MAC_REG_ISR3 0x27
+#define MAC_REG_IMR 0x28
+#define MAC_REG_IMR0 0x28
+#define MAC_REG_IMR1 0x29
+#define MAC_REG_IMR2 0x2A
+#define MAC_REG_IMR3 0x2B
+#define MAC_REG_TDCSR_SET 0x30
+#define MAC_REG_RDCSR_SET 0x32
+#define MAC_REG_TDCSR_CLR 0x34
+#define MAC_REG_RDCSR_CLR 0x36
+#define MAC_REG_RDBASE_LO 0x38
+#define MAC_REG_RDINDX 0x3C
+#define MAC_REG_TDBASE_LO 0x40
+#define MAC_REG_RDCSIZE 0x50
+#define MAC_REG_TDCSIZE 0x52
+#define MAC_REG_TDINDX 0x54
+#define MAC_REG_TDIDX0 0x54
+#define MAC_REG_TDIDX1 0x56
+#define MAC_REG_TDIDX2 0x58
+#define MAC_REG_TDIDX3 0x5A
+#define MAC_REG_PAUSE_TIMER 0x5C
+#define MAC_REG_RBRDU 0x5E
+#define MAC_REG_FIFO_TEST0 0x60
+#define MAC_REG_FIFO_TEST1 0x64
+#define MAC_REG_CAMADDR 0x68
+#define MAC_REG_CAMCR 0x69
+#define MAC_REG_GFTEST 0x6A
+#define MAC_REG_FTSTCMD 0x6B
+#define MAC_REG_MIICFG 0x6C
+#define MAC_REG_MIISR 0x6D
+#define MAC_REG_PHYSR0 0x6E
+#define MAC_REG_PHYSR1 0x6F
+#define MAC_REG_MIICR 0x70
+#define MAC_REG_MIIADR 0x71
+#define MAC_REG_MIIDATA 0x72
+#define MAC_REG_SOFT_TIMER0 0x74
+#define MAC_REG_SOFT_TIMER1 0x76
+#define MAC_REG_CFGA 0x78
+#define MAC_REG_CFGB 0x79
+#define MAC_REG_CFGC 0x7A
+#define MAC_REG_CFGD 0x7B
+#define MAC_REG_DCFG0 0x7C
+#define MAC_REG_DCFG1 0x7D
+#define MAC_REG_MCFG0 0x7E
+#define MAC_REG_MCFG1 0x7F
+
+#define MAC_REG_TBIST 0x80
+#define MAC_REG_RBIST 0x81
+#define MAC_REG_PMCC 0x82
+#define MAC_REG_STICKHW 0x83
+#define MAC_REG_MIBCR 0x84
+#define MAC_REG_EERSV 0x85
+#define MAC_REG_REVID 0x86
+#define MAC_REG_MIBREAD 0x88
+#define MAC_REG_BPMA 0x8C
+#define MAC_REG_EEWR_DATA 0x8C
+#define MAC_REG_BPMD_WR 0x8F
+#define MAC_REG_BPCMD 0x90
+#define MAC_REG_BPMD_RD 0x91
+#define MAC_REG_EECHKSUM 0x92
+#define MAC_REG_EECSR 0x93
+#define MAC_REG_EERD_DATA 0x94
+#define MAC_REG_EADDR 0x96
+#define MAC_REG_EMBCMD 0x97
+#define MAC_REG_JMPSR0 0x98
+#define MAC_REG_JMPSR1 0x99
+#define MAC_REG_JMPSR2 0x9A
+#define MAC_REG_JMPSR3 0x9B
+#define MAC_REG_CHIPGSR 0x9C
+#define MAC_REG_TESTCFG 0x9D
+#define MAC_REG_DEBUG 0x9E
+#define MAC_REG_CHIPGCR 0x9F
+#define MAC_REG_WOLCR0_SET 0xA0
+#define MAC_REG_WOLCR1_SET 0xA1
+#define MAC_REG_PWCFG_SET 0xA2
+#define MAC_REG_WOLCFG_SET 0xA3
+#define MAC_REG_WOLCR0_CLR 0xA4
+#define MAC_REG_WOLCR1_CLR 0xA5
+#define MAC_REG_PWCFG_CLR 0xA6
+#define MAC_REG_WOLCFG_CLR 0xA7
+#define MAC_REG_WOLSR0_SET 0xA8
+#define MAC_REG_WOLSR1_SET 0xA9
+#define MAC_REG_WOLSR0_CLR 0xAC
+#define MAC_REG_WOLSR1_CLR 0xAD
+#define MAC_REG_PATRN_CRC0 0xB0
+#define MAC_REG_PATRN_CRC1 0xB2
+#define MAC_REG_PATRN_CRC2 0xB4
+#define MAC_REG_PATRN_CRC3 0xB6
+#define MAC_REG_PATRN_CRC4 0xB8
+#define MAC_REG_PATRN_CRC5 0xBA
+#define MAC_REG_PATRN_CRC6 0xBC
+#define MAC_REG_PATRN_CRC7 0xBE
+#define MAC_REG_BYTEMSK0_0 0xC0
+#define MAC_REG_BYTEMSK0_1 0xC4
+#define MAC_REG_BYTEMSK0_2 0xC8
+#define MAC_REG_BYTEMSK0_3 0xCC
+#define MAC_REG_BYTEMSK1_0 0xD0
+#define MAC_REG_BYTEMSK1_1 0xD4
+#define MAC_REG_BYTEMSK1_2 0xD8
+#define MAC_REG_BYTEMSK1_3 0xDC
+#define MAC_REG_BYTEMSK2_0 0xE0
+#define MAC_REG_BYTEMSK2_1 0xE4
+#define MAC_REG_BYTEMSK2_2 0xE8
+#define MAC_REG_BYTEMSK2_3 0xEC
+#define MAC_REG_BYTEMSK3_0 0xF0
+#define MAC_REG_BYTEMSK3_1 0xF4
+#define MAC_REG_BYTEMSK3_2 0xF8
+#define MAC_REG_BYTEMSK3_3 0xFC
+
+/*
+ * Bits in the RCR register
+ */
+
+#define RCR_AS 0x80
+#define RCR_AP 0x40
+#define RCR_AL 0x20
+#define RCR_PROM 0x10
+#define RCR_AB 0x08
+#define RCR_AM 0x04
+#define RCR_AR 0x02
+#define RCR_SEP 0x01
+
+/*
+ * Bits in the TCR register
+ */
+
+#define TCR_TB2BDIS 0x80
+#define TCR_COLTMC1 0x08
+#define TCR_COLTMC0 0x04
+#define TCR_LB1 0x02 /* loopback[1] */
+#define TCR_LB0 0x01 /* loopback[0] */
+
+/*
+ * Bits in the CR0 register
+ */
+
+#define CR0_TXON 0x00000008UL
+#define CR0_RXON 0x00000004UL
+#define CR0_STOP 0x00000002UL /* stop MAC, default = 1 */
+#define CR0_STRT 0x00000001UL /* start MAC */
+#define CR0_SFRST 0x00008000UL /* software reset */
+#define CR0_TM1EN 0x00004000UL
+#define CR0_TM0EN 0x00002000UL
+#define CR0_DPOLL 0x00000800UL /* disable rx/tx auto polling */
+#define CR0_DISAU 0x00000100UL
+#define CR0_XONEN 0x00800000UL
+#define CR0_FDXTFCEN 0x00400000UL /* full-duplex TX flow control enable */
+#define CR0_FDXRFCEN 0x00200000UL /* full-duplex RX flow control enable */
+#define CR0_HDXFCEN 0x00100000UL /* half-duplex flow control enable */
+#define CR0_XHITH1 0x00080000UL /* TX XON high threshold 1 */
+#define CR0_XHITH0 0x00040000UL /* TX XON high threshold 0 */
+#define CR0_XLTH1 0x00020000UL /* TX pause frame low threshold 1 */
+#define CR0_XLTH0 0x00010000UL /* TX pause frame low threshold 0 */
+#define CR0_GSPRST 0x80000000UL
+#define CR0_FORSRST 0x40000000UL
+#define CR0_FPHYRST 0x20000000UL
+#define CR0_DIAG 0x10000000UL
+#define CR0_INTPCTL 0x04000000UL
+#define CR0_GINTMSK1 0x02000000UL
+#define CR0_GINTMSK0 0x01000000UL
+
+/*
+ * Bits in the CR1 register
+ */
+
+#define CR1_SFRST 0x80 /* software reset */
+#define CR1_TM1EN 0x40
+#define CR1_TM0EN 0x20
+#define CR1_DPOLL 0x08 /* disable rx/tx auto polling */
+#define CR1_DISAU 0x01
+
+/*
+ * Bits in the CR2 register
+ */
+
+#define CR2_XONEN 0x80
+#define CR2_FDXTFCEN 0x40 /* full-duplex TX flow control enable */
+#define CR2_FDXRFCEN 0x20 /* full-duplex RX flow control enable */
+#define CR2_HDXFCEN 0x10 /* half-duplex flow control enable */
+#define CR2_XHITH1 0x08 /* TX XON high threshold 1 */
+#define CR2_XHITH0 0x04 /* TX XON high threshold 0 */
+#define CR2_XLTH1 0x02 /* TX pause frame low threshold 1 */
+#define CR2_XLTH0 0x01 /* TX pause frame low threshold 0 */
+
+/*
+ * Bits in the CR3 register
+ */
+
+#define CR3_GSPRST 0x80
+#define CR3_FORSRST 0x40
+#define CR3_FPHYRST 0x20
+#define CR3_DIAG 0x10
+#define CR3_INTPCTL 0x04
+#define CR3_GINTMSK1 0x02
+#define CR3_GINTMSK0 0x01
+
+#define ISRCTL_UDPINT 0x8000
+#define ISRCTL_TSUPDIS 0x4000
+#define ISRCTL_RSUPDIS 0x2000
+#define ISRCTL_PMSK1 0x1000
+#define ISRCTL_PMSK0 0x0800
+#define ISRCTL_INTPD 0x0400
+#define ISRCTL_HCRLD 0x0200
+#define ISRCTL_SCRLD 0x0100
+
+/*
+ * Bits in the ISR_CTL1 register
+ */
+
+#define ISRCTL1_UDPINT 0x80
+#define ISRCTL1_TSUPDIS 0x40
+#define ISRCTL1_RSUPDIS 0x20
+#define ISRCTL1_PMSK1 0x10
+#define ISRCTL1_PMSK0 0x08
+#define ISRCTL1_INTPD 0x04
+#define ISRCTL1_HCRLD 0x02
+#define ISRCTL1_SCRLD 0x01
+
+/*
+ * Bits in the TXE_SR register
+ */
+
+#define TXESR_TFDBS 0x08
+#define TXESR_TDWBS 0x04
+#define TXESR_TDRBS 0x02
+#define TXESR_TDSTR 0x01
+
+/*
+ * Bits in the RXE_SR register
+ */
+
+#define RXESR_RFDBS 0x08
+#define RXESR_RDWBS 0x04
+#define RXESR_RDRBS 0x02
+#define RXESR_RDSTR 0x01
+
+/*
+ * Bits in the ISR register
+ */
+
+#define ISR_ISR3 0x80000000UL
+#define ISR_ISR2 0x40000000UL
+#define ISR_ISR1 0x20000000UL
+#define ISR_ISR0 0x10000000UL
+#define ISR_TXSTLI 0x02000000UL
+#define ISR_RXSTLI 0x01000000UL
+#define ISR_HFLD 0x00800000UL
+#define ISR_UDPI 0x00400000UL
+#define ISR_MIBFI 0x00200000UL
+#define ISR_SHDNI 0x00100000UL
+#define ISR_PHYI 0x00080000UL
+#define ISR_PWEI 0x00040000UL
+#define ISR_TMR1I 0x00020000UL
+#define ISR_TMR0I 0x00010000UL
+#define ISR_SRCI 0x00008000UL
+#define ISR_LSTPEI 0x00004000UL
+#define ISR_LSTEI 0x00002000UL
+#define ISR_OVFI 0x00001000UL
+#define ISR_FLONI 0x00000800UL
+#define ISR_RACEI 0x00000400UL
+#define ISR_TXWB1I 0x00000200UL
+#define ISR_TXWB0I 0x00000100UL
+#define ISR_PTX3I 0x00000080UL
+#define ISR_PTX2I 0x00000040UL
+#define ISR_PTX1I 0x00000020UL
+#define ISR_PTX0I 0x00000010UL
+#define ISR_PTXI 0x00000008UL
+#define ISR_PRXI 0x00000004UL
+#define ISR_PPTXI 0x00000002UL
+#define ISR_PPRXI 0x00000001UL
+
+/*
+ * Bits in the IMR register
+ */
+
+#define IMR_TXSTLM 0x02000000UL
+#define IMR_UDPIM 0x00400000UL
+#define IMR_MIBFIM 0x00200000UL
+#define IMR_SHDNIM 0x00100000UL
+#define IMR_PHYIM 0x00080000UL
+#define IMR_PWEIM 0x00040000UL
+#define IMR_TMR1IM 0x00020000UL
+#define IMR_TMR0IM 0x00010000UL
+
+#define IMR_SRCIM 0x00008000UL
+#define IMR_LSTPEIM 0x00004000UL
+#define IMR_LSTEIM 0x00002000UL
+#define IMR_OVFIM 0x00001000UL
+#define IMR_FLONIM 0x00000800UL
+#define IMR_RACEIM 0x00000400UL
+#define IMR_TXWB1IM 0x00000200UL
+#define IMR_TXWB0IM 0x00000100UL
+
+#define IMR_PTX3IM 0x00000080UL
+#define IMR_PTX2IM 0x00000040UL
+#define IMR_PTX1IM 0x00000020UL
+#define IMR_PTX0IM 0x00000010UL
+#define IMR_PTXIM 0x00000008UL
+#define IMR_PRXIM 0x00000004UL
+#define IMR_PPTXIM 0x00000002UL
+#define IMR_PPRXIM 0x00000001UL
+
+/* 0x0013FB0FUL = initial value of IMR */
+
+#define INT_MASK_DEF (IMR_PPTXIM|IMR_PPRXIM|IMR_PTXIM|IMR_PRXIM|\
+ IMR_PWEIM|IMR_TXWB0IM|IMR_TXWB1IM|IMR_FLONIM|\
+ IMR_OVFIM|IMR_LSTEIM|IMR_LSTPEIM|IMR_SRCIM|IMR_MIBFIM|\
+ IMR_SHDNIM|IMR_TMR1IM|IMR_TMR0IM|IMR_TXSTLM)
+
+/*
+ * Bits in the TDCSR0/1, RDCSR0 register
+ */
+
+#define TRDCSR_DEAD 0x0008
+#define TRDCSR_WAK 0x0004
+#define TRDCSR_ACT 0x0002
+#define TRDCSR_RUN 0x0001
+
+/*
+ * Bits in the CAMADDR register
+ */
+
+#define CAMADDR_CAMEN 0x80
+#define CAMADDR_VCAMSL 0x40
+
+/*
+ * Bits in the CAMCR register
+ */
+
+#define CAMCR_PS1 0x80
+#define CAMCR_PS0 0x40
+#define CAMCR_AITRPKT 0x20
+#define CAMCR_AITR16 0x10
+#define CAMCR_CAMRD 0x08
+#define CAMCR_CAMWR 0x04
+#define CAMCR_PS_CAM_MASK 0x40
+#define CAMCR_PS_CAM_DATA 0x80
+#define CAMCR_PS_MAR 0x00
+
+/*
+ * Bits in the MIICFG register
+ */
+
+#define MIICFG_MPO1 0x80
+#define MIICFG_MPO0 0x40
+#define MIICFG_MFDC 0x20
+
+/*
+ * Bits in the MIISR register
+ */
+
+#define MIISR_MIDLE 0x80
+
+/*
+ * Bits in the PHYSR0 register
+ */
+
+#define PHYSR0_PHYRST 0x80
+#define PHYSR0_LINKGD 0x40
+#define PHYSR0_FDPX 0x10
+#define PHYSR0_SPDG 0x08
+#define PHYSR0_SPD10 0x04
+#define PHYSR0_RXFLC 0x02
+#define PHYSR0_TXFLC 0x01
+
+/*
+ * Bits in the PHYSR1 register
+ */
+
+#define PHYSR1_PHYTBI 0x01
+
+/*
+ * Bits in the MIICR register
+ */
+
+#define MIICR_MAUTO 0x80
+#define MIICR_RCMD 0x40
+#define MIICR_WCMD 0x20
+#define MIICR_MDPM 0x10
+#define MIICR_MOUT 0x08
+#define MIICR_MDO 0x04
+#define MIICR_MDI 0x02
+#define MIICR_MDC 0x01
+
+/*
+ * Bits in the MIIADR register
+ */
+
+#define MIIADR_SWMPL 0x80
+
+/*
+ * Bits in the CFGA register
+ */
+
+#define CFGA_PMHCTG 0x08
+#define CFGA_GPIO1PD 0x04
+#define CFGA_ABSHDN 0x02
+#define CFGA_PACPI 0x01
+
+/*
+ * Bits in the CFGB register
+ */
+
+#define CFGB_GTCKOPT 0x80
+#define CFGB_MIIOPT 0x40
+#define CFGB_CRSEOPT 0x20
+#define CFGB_OFSET 0x10
+#define CFGB_CRANDOM 0x08
+#define CFGB_CAP 0x04
+#define CFGB_MBA 0x02
+#define CFGB_BAKOPT 0x01
+
+/*
+ * Bits in the CFGC register
+ */
+
+#define CFGC_EELOAD 0x80
+#define CFGC_BROPT 0x40
+#define CFGC_DLYEN 0x20
+#define CFGC_DTSEL 0x10
+#define CFGC_BTSEL 0x08
+#define CFGC_BPS2 0x04 /* bootrom select[2] */
+#define CFGC_BPS1 0x02 /* bootrom select[1] */
+#define CFGC_BPS0 0x01 /* bootrom select[0] */
+
+/*
+ * Bits in the CFGD register
+ */
+
+#define CFGD_IODIS 0x80
+#define CFGD_MSLVDACEN 0x40
+#define CFGD_CFGDACEN 0x20
+#define CFGD_PCI64EN 0x10
+#define CFGD_HTMRL4 0x08
+
+/*
+ * Bits in the DCFG1 register
+ */
+
+#define DCFG_XMWI 0x8000
+#define DCFG_XMRM 0x4000
+#define DCFG_XMRL 0x2000
+#define DCFG_PERDIS 0x1000
+#define DCFG_MRWAIT 0x0400
+#define DCFG_MWWAIT 0x0200
+#define DCFG_LATMEN 0x0100
+
+/*
+ * Bits in the MCFG0 register
+ */
+
+#define MCFG_RXARB 0x0080
+#define MCFG_RFT1 0x0020
+#define MCFG_RFT0 0x0010
+#define MCFG_LOWTHOPT 0x0008
+#define MCFG_PQEN 0x0004
+#define MCFG_RTGOPT 0x0002
+#define MCFG_VIDFR 0x0001
+
+/*
+ * Bits in the MCFG1 register
+ */
+
+#define MCFG_TXARB 0x8000
+#define MCFG_TXQBK1 0x0800
+#define MCFG_TXQBK0 0x0400
+#define MCFG_TXQNOBK 0x0200
+#define MCFG_SNAPOPT 0x0100
+
+/*
+ * Bits in the PMCC register
+ */
+
+#define PMCC_DSI 0x80
+#define PMCC_D2_DIS 0x40
+#define PMCC_D1_DIS 0x20
+#define PMCC_D3C_EN 0x10
+#define PMCC_D3H_EN 0x08
+#define PMCC_D2_EN 0x04
+#define PMCC_D1_EN 0x02
+#define PMCC_D0_EN 0x01
+
+/*
+ * Bits in STICKHW
+ */
+
+#define STICKHW_SWPTAG 0x10
+#define STICKHW_WOLSR 0x08
+#define STICKHW_WOLEN 0x04
+#define STICKHW_DS1 0x02 /* R/W by software/cfg cycle */
+#define STICKHW_DS0 0x01 /* suspend well DS write port */
+
+/*
+ * Bits in the MIBCR register
+ */
+
+#define MIBCR_MIBISTOK 0x80
+#define MIBCR_MIBISTGO 0x40
+#define MIBCR_MIBINC 0x20
+#define MIBCR_MIBHI 0x10
+#define MIBCR_MIBFRZ 0x08
+#define MIBCR_MIBFLSH 0x04
+#define MIBCR_MPTRINI 0x02
+#define MIBCR_MIBCLR 0x01
+
+/*
+ * Bits in the EERSV register
+ */
+
+#define EERSV_BOOT_RPL ((u8) 0x01) /* Boot method selection for VT6110 */
+
+#define EERSV_BOOT_MASK ((u8) 0x06)
+#define EERSV_BOOT_INT19 ((u8) 0x00)
+#define EERSV_BOOT_INT18 ((u8) 0x02)
+#define EERSV_BOOT_LOCAL ((u8) 0x04)
+#define EERSV_BOOT_BEV ((u8) 0x06)
+
+
+/*
+ * Bits in BPCMD
+ */
+
+#define BPCMD_BPDNE 0x80
+#define BPCMD_EBPWR 0x02
+#define BPCMD_EBPRD 0x01
+
+/*
+ * Bits in the EECSR register
+ */
+
+#define EECSR_EMBP 0x40 /* eeprom embeded programming */
+#define EECSR_RELOAD 0x20 /* eeprom content reload */
+#define EECSR_DPM 0x10 /* eeprom direct programming */
+#define EECSR_ECS 0x08 /* eeprom CS pin */
+#define EECSR_ECK 0x04 /* eeprom CK pin */
+#define EECSR_EDI 0x02 /* eeprom DI pin */
+#define EECSR_EDO 0x01 /* eeprom DO pin */
+
+/*
+ * Bits in the EMBCMD register
+ */
+
+#define EMBCMD_EDONE 0x80
+#define EMBCMD_EWDIS 0x08
+#define EMBCMD_EWEN 0x04
+#define EMBCMD_EWR 0x02
+#define EMBCMD_ERD 0x01
+
+/*
+ * Bits in TESTCFG register
+ */
+
+#define TESTCFG_HBDIS 0x80
+
+/*
+ * Bits in CHIPGCR register
+ */
+
+#define CHIPGCR_FCGMII 0x80
+#define CHIPGCR_FCFDX 0x40
+#define CHIPGCR_FCRESV 0x20
+#define CHIPGCR_FCMODE 0x10
+#define CHIPGCR_LPSOPT 0x08
+#define CHIPGCR_TM1US 0x04
+#define CHIPGCR_TM0US 0x02
+#define CHIPGCR_PHYINTEN 0x01
+
+/*
+ * Bits in WOLCR0
+ */
+
+#define WOLCR_MSWOLEN7 0x0080 /* enable pattern match filtering */
+#define WOLCR_MSWOLEN6 0x0040
+#define WOLCR_MSWOLEN5 0x0020
+#define WOLCR_MSWOLEN4 0x0010
+#define WOLCR_MSWOLEN3 0x0008
+#define WOLCR_MSWOLEN2 0x0004
+#define WOLCR_MSWOLEN1 0x0002
+#define WOLCR_MSWOLEN0 0x0001
+#define WOLCR_ARP_EN 0x0001
+
+/*
+ * Bits in WOLCR1
+ */
+
+#define WOLCR_LINKOFF_EN 0x0800 /* link off detected enable */
+#define WOLCR_LINKON_EN 0x0400 /* link on detected enable */
+#define WOLCR_MAGIC_EN 0x0200 /* magic packet filter enable */
+#define WOLCR_UNICAST_EN 0x0100 /* unicast filter enable */
+
+
+/*
+ * Bits in PWCFG
+ */
+
+#define PWCFG_PHYPWOPT 0x80 /* internal MII I/F timing */
+#define PWCFG_PCISTICK 0x40 /* PCI sticky R/W enable */
+#define PWCFG_WOLTYPE 0x20 /* pulse(1) or button (0) */
+#define PWCFG_LEGCY_WOL 0x10
+#define PWCFG_PMCSR_PME_SR 0x08
+#define PWCFG_PMCSR_PME_EN 0x04 /* control by PCISTICK */
+#define PWCFG_LEGACY_WOLSR 0x02 /* Legacy WOL_SR shadow */
+#define PWCFG_LEGACY_WOLEN 0x01 /* Legacy WOL_EN shadow */
+
+/*
+ * Bits in WOLCFG
+ */
+
+#define WOLCFG_PMEOVR 0x80 /* for legacy use, force PMEEN always */
+#define WOLCFG_SAM 0x20 /* accept multicast case reset, default=0 */
+#define WOLCFG_SAB 0x10 /* accept broadcast case reset, default=0 */
+#define WOLCFG_SMIIACC 0x08 /* ?? */
+#define WOLCFG_SGENWH 0x02
+#define WOLCFG_PHYINTEN 0x01 /* 0:PHYINT trigger enable, 1:use internal MII
+ to report status change */
+/*
+ * Bits in WOLSR1
+ */
+
+#define WOLSR_LINKOFF_INT 0x0800
+#define WOLSR_LINKON_INT 0x0400
+#define WOLSR_MAGIC_INT 0x0200
+#define WOLSR_UNICAST_INT 0x0100
+
+/*
+ * Ethernet address filter type
+ */
+
+#define PKT_TYPE_NONE 0x0000 /* Turn off receiver */
+#define PKT_TYPE_DIRECTED 0x0001 /* obselete, directed address is always accepted */
+#define PKT_TYPE_MULTICAST 0x0002
+#define PKT_TYPE_ALL_MULTICAST 0x0004
+#define PKT_TYPE_BROADCAST 0x0008
+#define PKT_TYPE_PROMISCUOUS 0x0020
+#define PKT_TYPE_LONG 0x2000 /* NOTE.... the definition of LONG is >2048 bytes in our chip */
+#define PKT_TYPE_RUNT 0x4000
+#define PKT_TYPE_ERROR 0x8000 /* Accept error packets, e.g. CRC error */
+
+/*
+ * Loopback mode
+ */
+
+#define MAC_LB_NONE 0x00
+#define MAC_LB_INTERNAL 0x01
+#define MAC_LB_EXTERNAL 0x02
+
+/*
+ * Enabled mask value of irq
+ */
+
+#if defined(_SIM)
+#define IMR_MASK_VALUE 0x0033FF0FUL /* initial value of IMR
+ set IMR0 to 0x0F according to spec */
+
+#else
+#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR
+ ignore MIBFI,RACEI to
+ reduce intr. frequency
+ NOTE.... do not enable NoBuf int mask at driver driver
+ when (1) NoBuf -> RxThreshold = SF
+ (2) OK -> RxThreshold = original value
+ */
+#endif
+
+/*
+ * Revision id
+ */
+
+#define REV_ID_VT3119_A0 0x00
+#define REV_ID_VT3119_A1 0x01
+#define REV_ID_VT3216_A0 0x10
+
+/*
+ * Max time out delay time
+ */
+
+#define W_MAX_TIMEOUT 0x0FFFU
+
+
+/*
+ * MAC registers as a structure. Cannot be directly accessed this
+ * way but generates offsets for readl/writel() calls
+ */
+
+struct mac_regs {
+ volatile u8 PAR[6]; /* 0x00 */
+ volatile u8 RCR;
+ volatile u8 TCR;
+
+ volatile u32 CR0Set; /* 0x08 */
+ volatile u32 CR0Clr; /* 0x0C */
+
+ volatile u8 MARCAM[8]; /* 0x10 */
+
+ volatile u32 DecBaseHi; /* 0x18 */
+ volatile u16 DbfBaseHi; /* 0x1C */
+ volatile u16 reserved_1E;
+
+ volatile u16 ISRCTL; /* 0x20 */
+ volatile u8 TXESR;
+ volatile u8 RXESR;
+
+ volatile u32 ISR; /* 0x24 */
+ volatile u32 IMR;
+
+ volatile u32 TDStatusPort; /* 0x2C */
+
+ volatile u16 TDCSRSet; /* 0x30 */
+ volatile u8 RDCSRSet;
+ volatile u8 reserved_33;
+ volatile u16 TDCSRClr;
+ volatile u8 RDCSRClr;
+ volatile u8 reserved_37;
+
+ volatile u32 RDBaseLo; /* 0x38 */
+ volatile u16 RDIdx; /* 0x3C */
+ volatile u16 reserved_3E;
+
+ volatile u32 TDBaseLo[4]; /* 0x40 */
+
+ volatile u16 RDCSize; /* 0x50 */
+ volatile u16 TDCSize; /* 0x52 */
+ volatile u16 TDIdx[4]; /* 0x54 */
+ volatile u16 tx_pause_timer; /* 0x5C */
+ volatile u16 RBRDU; /* 0x5E */
+
+ volatile u32 FIFOTest0; /* 0x60 */
+ volatile u32 FIFOTest1; /* 0x64 */
+
+ volatile u8 CAMADDR; /* 0x68 */
+ volatile u8 CAMCR; /* 0x69 */
+ volatile u8 GFTEST; /* 0x6A */
+ volatile u8 FTSTCMD; /* 0x6B */
+
+ volatile u8 MIICFG; /* 0x6C */
+ volatile u8 MIISR;
+ volatile u8 PHYSR0;
+ volatile u8 PHYSR1;
+ volatile u8 MIICR;
+ volatile u8 MIIADR;
+ volatile u16 MIIDATA;
+
+ volatile u16 SoftTimer0; /* 0x74 */
+ volatile u16 SoftTimer1;
+
+ volatile u8 CFGA; /* 0x78 */
+ volatile u8 CFGB;
+ volatile u8 CFGC;
+ volatile u8 CFGD;
+
+ volatile u16 DCFG; /* 0x7C */
+ volatile u16 MCFG;
+
+ volatile u8 TBIST; /* 0x80 */
+ volatile u8 RBIST;
+ volatile u8 PMCPORT;
+ volatile u8 STICKHW;
+
+ volatile u8 MIBCR; /* 0x84 */
+ volatile u8 reserved_85;
+ volatile u8 rev_id;
+ volatile u8 PORSTS;
+
+ volatile u32 MIBData; /* 0x88 */
+
+ volatile u16 EEWrData;
+
+ volatile u8 reserved_8E;
+ volatile u8 BPMDWr;
+ volatile u8 BPCMD;
+ volatile u8 BPMDRd;
+
+ volatile u8 EECHKSUM; /* 0x92 */
+ volatile u8 EECSR;
+
+ volatile u16 EERdData; /* 0x94 */
+ volatile u8 EADDR;
+ volatile u8 EMBCMD;
+
+
+ volatile u8 JMPSR0; /* 0x98 */
+ volatile u8 JMPSR1;
+ volatile u8 JMPSR2;
+ volatile u8 JMPSR3;
+ volatile u8 CHIPGSR; /* 0x9C */
+ volatile u8 TESTCFG;
+ volatile u8 DEBUG;
+ volatile u8 CHIPGCR;
+
+ volatile u16 WOLCRSet; /* 0xA0 */
+ volatile u8 PWCFGSet;
+ volatile u8 WOLCFGSet;
+
+ volatile u16 WOLCRClr; /* 0xA4 */
+ volatile u8 PWCFGCLR;
+ volatile u8 WOLCFGClr;
+
+ volatile u16 WOLSRSet; /* 0xA8 */
+ volatile u16 reserved_AA;
+
+ volatile u16 WOLSRClr; /* 0xAC */
+ volatile u16 reserved_AE;
+
+ volatile u16 PatternCRC[8]; /* 0xB0 */
+ volatile u32 ByteMask[4][4]; /* 0xC0 */
+} __attribute__ ((__packed__));
+
+
+enum hw_mib {
+ HW_MIB_ifRxAllPkts = 0,
+ HW_MIB_ifRxOkPkts,
+ HW_MIB_ifTxOkPkts,
+ HW_MIB_ifRxErrorPkts,
+ HW_MIB_ifRxRuntOkPkt,
+ HW_MIB_ifRxRuntErrPkt,
+ HW_MIB_ifRx64Pkts,
+ HW_MIB_ifTx64Pkts,
+ HW_MIB_ifRx65To127Pkts,
+ HW_MIB_ifTx65To127Pkts,
+ HW_MIB_ifRx128To255Pkts,
+ HW_MIB_ifTx128To255Pkts,
+ HW_MIB_ifRx256To511Pkts,
+ HW_MIB_ifTx256To511Pkts,
+ HW_MIB_ifRx512To1023Pkts,
+ HW_MIB_ifTx512To1023Pkts,
+ HW_MIB_ifRx1024To1518Pkts,
+ HW_MIB_ifTx1024To1518Pkts,
+ HW_MIB_ifTxEtherCollisions,
+ HW_MIB_ifRxPktCRCE,
+ HW_MIB_ifRxJumboPkts,
+ HW_MIB_ifTxJumboPkts,
+ HW_MIB_ifRxMacControlFrames,
+ HW_MIB_ifTxMacControlFrames,
+ HW_MIB_ifRxPktFAE,
+ HW_MIB_ifRxLongOkPkt,
+ HW_MIB_ifRxLongPktErrPkt,
+ HW_MIB_ifTXSQEErrors,
+ HW_MIB_ifRxNobuf,
+ HW_MIB_ifRxSymbolErrors,
+ HW_MIB_ifInRangeLengthErrors,
+ HW_MIB_ifLateCollisions,
+ HW_MIB_SIZE
+};
+
+enum chip_type {
+ CHIP_TYPE_VT6110 = 1,
+};
+
+struct velocity_info_tbl {
+ enum chip_type chip_id;
+ char *name;
+ int io_size;
+ int txqueue;
+ u32 flags;
+};
+
+#define mac_hw_mibs_init(regs) {\
+ BYTE_REG_BITS_ON(MIBCR_MIBFRZ,&((regs)->MIBCR));\
+ BYTE_REG_BITS_ON(MIBCR_MIBCLR,&((regs)->MIBCR));\
+ do {}\
+ while (BYTE_REG_BITS_IS_ON(MIBCR_MIBCLR,&((regs)->MIBCR)));\
+ BYTE_REG_BITS_OFF(MIBCR_MIBFRZ,&((regs)->MIBCR));\
+}
+
+#define mac_read_isr(regs) readl(&((regs)->ISR))
+#define mac_write_isr(regs, x) writel((x),&((regs)->ISR))
+#define mac_clear_isr(regs) writel(0xffffffffL,&((regs)->ISR))
+
+#define mac_write_int_mask(mask, regs) writel((mask),&((regs)->IMR));
+#define mac_disable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Clr))
+#define mac_enable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Set))
+
+#define mac_hw_mibs_read(regs, MIBs) {\
+ int i;\
+ BYTE_REG_BITS_ON(MIBCR_MPTRINI,&((regs)->MIBCR));\
+ for (i=0;i<HW_MIB_SIZE;i++) {\
+ (MIBs)[i]=readl(&((regs)->MIBData));\
+ }\
+}
+
+#define mac_set_dma_length(regs, n) {\
+ BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\
+}
+
+#define mac_set_rx_thresh(regs, n) {\
+ BYTE_REG_BITS_SET((n),(MCFG_RFT0|MCFG_RFT1),&((regs)->MCFG));\
+}
+
+#define mac_rx_queue_run(regs) {\
+ writeb(TRDCSR_RUN, &((regs)->RDCSRSet));\
+}
+
+#define mac_rx_queue_wake(regs) {\
+ writeb(TRDCSR_WAK, &((regs)->RDCSRSet));\
+}
+
+#define mac_tx_queue_run(regs, n) {\
+ writew(TRDCSR_RUN<<((n)*4),&((regs)->TDCSRSet));\
+}
+
+#define mac_tx_queue_wake(regs, n) {\
+ writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\
+}
+
+#define mac_eeprom_reload(regs) {\
+ int i=0;\
+ BYTE_REG_BITS_ON(EECSR_RELOAD,&((regs)->EECSR));\
+ do {\
+ udelay(10);\
+ if (i++>0x1000) {\
+ break;\
+ }\
+ }while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&((regs)->EECSR)));\
+}
+
+enum velocity_cam_type {
+ VELOCITY_VLAN_ID_CAM = 0,
+ VELOCITY_MULTICAST_CAM
+};
+
+/**
+ * mac_get_cam_mask - Read a CAM mask
+ * @regs: register block for this velocity
+ * @mask: buffer to store mask
+ * @cam_type: CAM to fetch
+ *
+ * Fetch the mask bits of the selected CAM and store them into the
+ * provided mask buffer.
+ */
+
+static inline void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask, enum velocity_cam_type cam_type)
+{
+ int i;
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ if (cam_type == VELOCITY_VLAN_ID_CAM)
+ writeb(CAMADDR_VCAMSL, &regs->CAMADDR);
+ else
+ writeb(0, &regs->CAMADDR);
+
+ /* read mask */
+ for (i = 0; i < 8; i++)
+ *mask++ = readb(&(regs->MARCAM[i]));
+
+ /* disable CAMEN */
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+}
+
+/**
+ * mac_set_cam_mask - Set a CAM mask
+ * @regs: register block for this velocity
+ * @mask: CAM mask to load
+ * @cam_type: CAM to store
+ *
+ * Store a new mask into a CAM
+ */
+
+static inline void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask, enum velocity_cam_type cam_type)
+{
+ int i;
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ if (cam_type == VELOCITY_VLAN_ID_CAM)
+ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
+ else
+ writeb(CAMADDR_CAMEN, &regs->CAMADDR);
+
+ for (i = 0; i < 8; i++) {
+ writeb(*mask++, &(regs->MARCAM[i]));
+ }
+ /* disable CAMEN */
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+/**
+ * mac_set_cam - set CAM data
+ * @regs: register block of this velocity
+ * @idx: Cam index
+ * @addr: 2 or 6 bytes of CAM data
+ * @cam_type: CAM to load
+ *
+ * Load an address or vlan tag into a CAM
+ */
+
+static inline void mac_set_cam(struct mac_regs __iomem * regs, int idx, u8 *addr, enum velocity_cam_type cam_type)
+{
+ int i;
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ idx &= (64 - 1);
+
+ if (cam_type == VELOCITY_VLAN_ID_CAM)
+ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
+ else
+ writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
+
+ if (cam_type == VELOCITY_VLAN_ID_CAM)
+ writew(*((u16 *) addr), &regs->MARCAM[0]);
+ else {
+ for (i = 0; i < 6; i++) {
+ writeb(*addr++, &(regs->MARCAM[i]));
+ }
+ }
+ BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
+
+ udelay(10);
+
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+/**
+ * mac_get_cam - fetch CAM data
+ * @regs: register block of this velocity
+ * @idx: Cam index
+ * @addr: buffer to hold up to 6 bytes of CAM data
+ * @cam_type: CAM to load
+ *
+ * Load an address or vlan tag from a CAM into the buffer provided by
+ * the caller. VLAN tags are 2 bytes the address cam entries are 6.
+ */
+
+static inline void mac_get_cam(struct mac_regs __iomem * regs, int idx, u8 *addr, enum velocity_cam_type cam_type)
+{
+ int i;
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ idx &= (64 - 1);
+
+ if (cam_type == VELOCITY_VLAN_ID_CAM)
+ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
+ else
+ writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
+
+ BYTE_REG_BITS_ON(CAMCR_CAMRD, &regs->CAMCR);
+
+ udelay(10);
+
+ if (cam_type == VELOCITY_VLAN_ID_CAM)
+ *((u16 *) addr) = readw(&(regs->MARCAM[0]));
+ else
+ for (i = 0; i < 6; i++, addr++)
+ *((u8 *) addr) = readb(&(regs->MARCAM[i]));
+
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+/**
+ * mac_wol_reset - reset WOL after exiting low power
+ * @regs: register block of this velocity
+ *
+ * Called after we drop out of wake on lan mode in order to
+ * reset the Wake on lan features. This function doesn't restore
+ * the rest of the logic from the result of sleep/wakeup
+ */
+
+inline static void mac_wol_reset(struct mac_regs __iomem * regs)
+{
+
+ /* Turn off SWPTAG right after leaving power mode */
+ BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
+ /* clear sticky bits */
+ BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
+
+ BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
+ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
+ /* disable force PME-enable */
+ writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
+ /* disable power-event config bit */
+ writew(0xFFFF, &regs->WOLCRClr);
+ /* clear power status */
+ writew(0xFFFF, &regs->WOLSRClr);
+}
+
+
+/*
+ * Header for WOL definitions. Used to compute hashes
+ */
+
+typedef u8 MCAM_ADDR[ETH_ALEN];
+
+struct arp_packet {
+ u8 dest_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u16 type;
+ u16 ar_hrd;
+ u16 ar_pro;
+ u8 ar_hln;
+ u8 ar_pln;
+ u16 ar_op;
+ u8 ar_sha[ETH_ALEN];
+ u8 ar_sip[4];
+ u8 ar_tha[ETH_ALEN];
+ u8 ar_tip[4];
+} __attribute__ ((__packed__));
+
+struct _magic_packet {
+ u8 dest_mac[6];
+ u8 src_mac[6];
+ u16 type;
+ u8 MAC[16][6];
+ u8 password[6];
+} __attribute__ ((__packed__));
+
+/*
+ * Store for chip context when saving and restoring status. Not
+ * all fields are saved/restored currently.
+ */
+
+struct velocity_context {
+ u8 mac_reg[256];
+ MCAM_ADDR cam_addr[MCAM_SIZE];
+ u16 vcam[VCAM_SIZE];
+ u32 cammask[2];
+ u32 patcrc[2];
+ u32 pattern[8];
+};
+
+
+/*
+ * MII registers.
+ */
+
+
+/*
+ * Registers in the MII (offset unit is WORD)
+ */
+
+#define MII_REG_BMCR 0x00 // physical address
+#define MII_REG_BMSR 0x01 //
+#define MII_REG_PHYID1 0x02 // OUI
+#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
+#define MII_REG_ANAR 0x04 //
+#define MII_REG_ANLPAR 0x05 //
+#define MII_REG_G1000CR 0x09 //
+#define MII_REG_G1000SR 0x0A //
+#define MII_REG_MODCFG 0x10 //
+#define MII_REG_TCSR 0x16 //
+#define MII_REG_PLED 0x1B //
+// NS, MYSON only
+#define MII_REG_PCR 0x17 //
+// ESI only
+#define MII_REG_PCSR 0x17 //
+#define MII_REG_AUXCR 0x1C //
+
+// Marvell 88E1000/88E1000S
+#define MII_REG_PSCR 0x10 // PHY specific control register
+
+//
+// Bits in the BMCR register
+//
+#define BMCR_RESET 0x8000 //
+#define BMCR_LBK 0x4000 //
+#define BMCR_SPEED100 0x2000 //
+#define BMCR_AUTO 0x1000 //
+#define BMCR_PD 0x0800 //
+#define BMCR_ISO 0x0400 //
+#define BMCR_REAUTO 0x0200 //
+#define BMCR_FDX 0x0100 //
+#define BMCR_SPEED1G 0x0040 //
+//
+// Bits in the BMSR register
+//
+#define BMSR_AUTOCM 0x0020 //
+#define BMSR_LNK 0x0004 //
+
+//
+// Bits in the ANAR register
+//
+#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
+#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
+#define ANAR_T4 0x0200 //
+#define ANAR_TXFD 0x0100 //
+#define ANAR_TX 0x0080 //
+#define ANAR_10FD 0x0040 //
+#define ANAR_10 0x0020 //
+//
+// Bits in the ANLPAR register
+//
+#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
+#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
+#define ANLPAR_T4 0x0200 //
+#define ANLPAR_TXFD 0x0100 //
+#define ANLPAR_TX 0x0080 //
+#define ANLPAR_10FD 0x0040 //
+#define ANLPAR_10 0x0020 //
+
+//
+// Bits in the G1000CR register
+//
+#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
+#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
+
+//
+// Bits in the G1000SR register
+//
+#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
+#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
+
+#define TCSR_ECHODIS 0x2000 //
+#define AUXCR_MDPPS 0x0004 //
+
+// Bits in the PLED register
+#define PLED_LALBE 0x0004 //
+
+// Marvell 88E1000/88E1000S Bits in the PHY specific control register (10h)
+#define PSCR_ACRSTX 0x0800 // Assert CRS on Transmit
+
+#define PHYID_CICADA_CS8201 0x000FC410UL
+#define PHYID_VT3216_32BIT 0x000FC610UL
+#define PHYID_VT3216_64BIT 0x000FC600UL
+#define PHYID_MARVELL_1000 0x01410C50UL
+#define PHYID_MARVELL_1000S 0x01410C40UL
+
+#define PHYID_REV_ID_MASK 0x0000000FUL
+
+#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
+#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
+
+#define MII_REG_BITS_ON(x,i,p) do {\
+ u16 w;\
+ velocity_mii_read((p),(i),&(w));\
+ (w)|=(x);\
+ velocity_mii_write((p),(i),(w));\
+} while (0)
+
+#define MII_REG_BITS_OFF(x,i,p) do {\
+ u16 w;\
+ velocity_mii_read((p),(i),&(w));\
+ (w)&=(~(x));\
+ velocity_mii_write((p),(i),(w));\
+} while (0)
+
+#define MII_REG_BITS_IS_ON(x,i,p) ({\
+ u16 w;\
+ velocity_mii_read((p),(i),&(w));\
+ ((int) ((w) & (x)));})
+
+#define MII_GET_PHY_ID(p) ({\
+ u32 id;\
+ velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\
+ velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\
+ (id);})
+
+/*
+ * Inline debug routine
+ */
+
+
+enum velocity_msg_level {
+ MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation.
+ MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified.
+ MSG_LEVEL_INFO = 2, //Normal message.
+ MSG_LEVEL_VERBOSE = 3, //Will report all trival errors.
+ MSG_LEVEL_DEBUG = 4 //Only for debug purpose.
+};
+
+#ifdef VELOCITY_DEBUG
+#define ASSERT(x) { \
+ if (!(x)) { \
+ printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
+ __FUNCTION__, __LINE__);\
+ BUG(); \
+ }\
+}
+#define VELOCITY_DBG(p,args...) printk(p, ##args)
+#else
+#define ASSERT(x)
+#define VELOCITY_DBG(x)
+#endif
+
+#define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printk( p ,##args);} while (0)
+
+#define VELOCITY_PRT_CAMMASK(p,t) {\
+ int i;\
+ if ((t)==VELOCITY_MULTICAST_CAM) {\
+ for (i=0;i<(MCAM_SIZE/8);i++)\
+ printk("%02X",(p)->mCAMmask[i]);\
+ }\
+ else {\
+ for (i=0;i<(VCAM_SIZE/8);i++)\
+ printk("%02X",(p)->vCAMmask[i]);\
+ }\
+ printk("\n");\
+}
+
+
+
+#define VELOCITY_WOL_MAGIC 0x00000000UL
+#define VELOCITY_WOL_PHY 0x00000001UL
+#define VELOCITY_WOL_ARP 0x00000002UL
+#define VELOCITY_WOL_UCAST 0x00000004UL
+#define VELOCITY_WOL_BCAST 0x00000010UL
+#define VELOCITY_WOL_MCAST 0x00000020UL
+#define VELOCITY_WOL_MAGIC_SEC 0x00000040UL
+
+/*
+ * Flags for options
+ */
+
+#define VELOCITY_FLAGS_TAGGING 0x00000001UL
+#define VELOCITY_FLAGS_TX_CSUM 0x00000002UL
+#define VELOCITY_FLAGS_RX_CSUM 0x00000004UL
+#define VELOCITY_FLAGS_IP_ALIGN 0x00000008UL
+#define VELOCITY_FLAGS_VAL_PKT_LEN 0x00000010UL
+
+#define VELOCITY_FLAGS_FLOW_CTRL 0x01000000UL
+
+/*
+ * Flags for driver status
+ */
+
+#define VELOCITY_FLAGS_OPENED 0x00010000UL
+#define VELOCITY_FLAGS_VMNS_CONNECTED 0x00020000UL
+#define VELOCITY_FLAGS_VMNS_COMMITTED 0x00040000UL
+#define VELOCITY_FLAGS_WOL_ENABLED 0x00080000UL
+
+/*
+ * Flags for MII status
+ */
+
+#define VELOCITY_LINK_FAIL 0x00000001UL
+#define VELOCITY_SPEED_10 0x00000002UL
+#define VELOCITY_SPEED_100 0x00000004UL
+#define VELOCITY_SPEED_1000 0x00000008UL
+#define VELOCITY_DUPLEX_FULL 0x00000010UL
+#define VELOCITY_AUTONEG_ENABLE 0x00000020UL
+#define VELOCITY_FORCED_BY_EEPROM 0x00000040UL
+
+/*
+ * For velocity_set_media_duplex
+ */
+
+#define VELOCITY_LINK_CHANGE 0x00000001UL
+
+enum speed_opt {
+ SPD_DPX_AUTO = 0,
+ SPD_DPX_100_HALF = 1,
+ SPD_DPX_100_FULL = 2,
+ SPD_DPX_10_HALF = 3,
+ SPD_DPX_10_FULL = 4
+};
+
+enum velocity_init_type {
+ VELOCITY_INIT_COLD = 0,
+ VELOCITY_INIT_RESET,
+ VELOCITY_INIT_WOL
+};
+
+enum velocity_flow_cntl_type {
+ FLOW_CNTL_DEFAULT = 1,
+ FLOW_CNTL_TX,
+ FLOW_CNTL_RX,
+ FLOW_CNTL_TX_RX,
+ FLOW_CNTL_DISABLE,
+};
+
+struct velocity_opt {
+ int numrx; /* Number of RX descriptors */
+ int numtx; /* Number of TX descriptors */
+ enum speed_opt spd_dpx; /* Media link mode */
+ int vid; /* vlan id */
+ int DMA_length; /* DMA length */
+ int rx_thresh; /* RX_THRESH */
+ int flow_cntl;
+ int wol_opts; /* Wake on lan options */
+ int td_int_count;
+ int int_works;
+ int rx_bandwidth_hi;
+ int rx_bandwidth_lo;
+ int rx_bandwidth_en;
+ u32 flags;
+};
+
+struct velocity_info {
+ struct list_head list;
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ struct net_device_stats stats;
+
+ dma_addr_t rd_pool_dma;
+ dma_addr_t td_pool_dma[TX_QUEUE_NO];
+
+ dma_addr_t tx_bufs_dma;
+ u8 *tx_bufs;
+
+ u8 ip_addr[4];
+ enum chip_type chip_id;
+
+ struct mac_regs __iomem * mac_regs;
+ unsigned long memaddr;
+ unsigned long ioaddr;
+ u32 io_size;
+
+ u8 rev_id;
+
+#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)]))
+
+ int num_txq;
+
+ volatile int td_used[TX_QUEUE_NO];
+ int td_curr[TX_QUEUE_NO];
+ int td_tail[TX_QUEUE_NO];
+ struct tx_desc *td_rings[TX_QUEUE_NO];
+ struct velocity_td_info *td_infos[TX_QUEUE_NO];
+
+ int rd_curr;
+ int rd_dirty;
+ u32 rd_filled;
+ struct rx_desc *rd_ring;
+ struct velocity_rd_info *rd_info; /* It's an array */
+
+#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
+ u32 mib_counter[MAX_HW_MIB_COUNTER];
+ struct velocity_opt options;
+
+ u32 int_mask;
+
+ u32 flags;
+
+ int rx_buf_sz;
+ u32 mii_status;
+ u32 phy_id;
+ int multicast_limit;
+
+ u8 vCAMmask[(VCAM_SIZE / 8)];
+ u8 mCAMmask[(MCAM_SIZE / 8)];
+
+ spinlock_t lock;
+
+ int wol_opts;
+ u8 wol_passwd[6];
+
+ struct velocity_context context;
+
+ u32 ticks;
+ u32 rx_bytes;
+
+};
+
+/**
+ * velocity_get_ip - find an IP address for the device
+ * @vptr: Velocity to query
+ *
+ * Dig out an IP address for this interface so that we can
+ * configure wakeup with WOL for ARP. If there are multiple IP
+ * addresses on this chain then we use the first - multi-IP WOL is not
+ * supported.
+ *
+ * CHECK ME: locking
+ */
+
+inline static int velocity_get_ip(struct velocity_info *vptr)
+{
+ struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr;
+ struct in_ifaddr *ifa;
+
+ if (in_dev != NULL) {
+ ifa = (struct in_ifaddr *) in_dev->ifa_list;
+ if (ifa != NULL) {
+ memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * velocity_update_hw_mibs - fetch MIB counters from chip
+ * @vptr: velocity to update
+ *
+ * The velocity hardware keeps certain counters in the hardware
+ * side. We need to read these when the user asks for statistics
+ * or when they overflow (causing an interrupt). The read of the
+ * statistic clears it, so we keep running master counters in user
+ * space.
+ */
+
+static inline void velocity_update_hw_mibs(struct velocity_info *vptr)
+{
+ u32 tmp;
+ int i;
+ BYTE_REG_BITS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR));
+
+ while (BYTE_REG_BITS_IS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR)));
+
+ BYTE_REG_BITS_ON(MIBCR_MPTRINI, &(vptr->mac_regs->MIBCR));
+ for (i = 0; i < HW_MIB_SIZE; i++) {
+ tmp = readl(&(vptr->mac_regs->MIBData)) & 0x00FFFFFFUL;
+ vptr->mib_counter[i] += tmp;
+ }
+}
+
+/**
+ * init_flow_control_register - set up flow control
+ * @vptr: velocity to configure
+ *
+ * Configure the flow control registers for this velocity device.
+ */
+
+static inline void init_flow_control_register(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+
+ /* Set {XHITH1, XHITH0, XLTH1, XLTH0} in FlowCR1 to {1, 0, 1, 1}
+ depend on RD=64, and Turn on XNOEN in FlowCR1 */
+ writel((CR0_XONEN | CR0_XHITH1 | CR0_XLTH1 | CR0_XLTH0), &regs->CR0Set);
+ writel((CR0_FDXTFCEN | CR0_FDXRFCEN | CR0_HDXFCEN | CR0_XHITH0), &regs->CR0Clr);
+
+ /* Set TxPauseTimer to 0xFFFF */
+ writew(0xFFFF, &regs->tx_pause_timer);
+
+ /* Initialize RBRDU to Rx buffer count. */
+ writew(vptr->options.numrx, &regs->RBRDU);
+}
+
+
+#endif
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
new file mode 100644
index 000000000000..35791934a602
--- /dev/null
+++ b/drivers/net/wan/Kconfig
@@ -0,0 +1,607 @@
+#
+# wan devices configuration
+#
+
+menu "Wan interfaces"
+ depends on NETDEVICES
+
+config WAN
+ bool "Wan interfaces support"
+ ---help---
+ Wide Area Networks (WANs), such as X.25, Frame Relay and leased
+ lines, are used to interconnect Local Area Networks (LANs) over vast
+ distances with data transfer rates significantly higher than those
+ achievable with commonly used asynchronous modem connections.
+
+ Usually, a quite expensive external device called a `WAN router' is
+ needed to connect to a WAN. As an alternative, a relatively
+ inexpensive WAN interface card can allow your Linux box to directly
+ connect to a WAN.
+
+ If you have one of those cards and wish to use it under Linux,
+ say Y here and also to the WAN driver for your card.
+
+ If unsure, say N.
+
+# There is no way to detect a comtrol sv11 - force it modular for now.
+config HOSTESS_SV11
+ tristate "Comtrol Hostess SV-11 support"
+ depends on WAN && ISA && m
+ help
+ Driver for Comtrol Hostess SV-11 network card which
+ operates on low speed synchronous serial links at up to
+ 256Kbps, supporting PPP and Cisco HDLC.
+
+ The driver will be compiled as a module: the
+ module will be called hostess_sv11.
+
+# The COSA/SRP driver has not been tested as non-modular yet.
+config COSA
+ tristate "COSA/SRP sync serial boards support"
+ depends on WAN && ISA && m
+ ---help---
+ Driver for COSA and SRP synchronous serial boards.
+
+ These boards allow to connect synchronous serial devices (for example
+ base-band modems, or any other device with the X.21, V.24, V.35 or
+ V.36 interface) to your Linux box. The cards can work as the
+ character device, synchronous PPP network device, or the Cisco HDLC
+ network device.
+
+ You will need user-space utilities COSA or SRP boards for downloading
+ the firmware to the cards and to set them up. Look at the
+ <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
+ read the comment at the top of the <file:drivers/net/wan/cosa.c> for
+ details about the cards and the driver itself.
+
+ The driver will be compiled as a module: the
+ module will be called cosa.
+
+config DSCC4
+ tristate "Etinc PCISYNC serial board support"
+ depends on WAN && PCI && m
+ help
+ Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens)
+ DSCC4 chipset.
+
+ This is supposed to work with the four port card. Take a look at
+ <http://www.cogenit.fr/dscc4/> for further information about the
+ driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dscc4.
+
+config DSCC4_PCISYNC
+ bool "Etinc PCISYNC features"
+ depends on DSCC4
+ help
+ Due to Etinc's design choice for its PCISYNC cards, some operations
+ are only allowed on specific ports of the DSCC4. This option is the
+ only way for the driver to know that it shouldn't return a success
+ code for these operations.
+
+ Please say Y if your card is an Etinc's PCISYNC.
+
+config DSCC4_PCI_RST
+ bool "Hard reset support"
+ depends on DSCC4
+ help
+ Various DSCC4 bugs forbid any reliable software reset of the ASIC.
+ As a replacement, some vendors provide a way to assert the PCI #RST
+ pin of DSCC4 through the GPIO port of the card. If you choose Y,
+ the driver will make use of this feature before module removal
+ (i.e. rmmod). The feature is known to be available on Commtech's
+ cards. Contact your manufacturer for details.
+
+ Say Y if your card supports this feature.
+
+#
+# Lan Media's board. Currently 1000, 1200, 5200, 5245
+#
+config LANMEDIA
+ tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
+ depends on WAN && PCI
+ ---help---
+ Driver for the following Lan Media family of serial boards:
+
+ - LMC 1000 board allows you to connect synchronous serial devices
+ (for example base-band modems, or any other device with the X.21,
+ V.24, V.35 or V.36 interface) to your Linux box.
+
+ - LMC 1200 with on board DSU board allows you to connect your Linux
+ box directly to a T1 or E1 circuit.
+
+ - LMC 5200 board provides a HSSI interface capable of running up to
+ 52 Mbits per second.
+
+ - LMC 5245 board connects directly to a T3 circuit saving the
+ additional external hardware.
+
+ To change setting such as syncPPP vs Cisco HDLC or clock source you
+ will need lmcctl. It is available at <ftp://ftp.lanmedia.com/>
+ (broken link).
+
+ To compile this driver as a module, choose M here: the
+ module will be called lmc.
+
+# There is no way to detect a Sealevel board. Force it modular
+config SEALEVEL_4021
+ tristate "Sealevel Systems 4021 support"
+ depends on WAN && ISA && m
+ help
+ This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
+
+ The driver will be compiled as a module: the
+ module will be called sealevel.
+
+config SYNCLINK_SYNCPPP
+ tristate "SyncLink HDLC/SYNCPPP support"
+ depends on WAN
+ help
+ Enables HDLC/SYNCPPP support for the SyncLink WAN driver.
+
+ Normally the SyncLink WAN driver works with the main PPP driver
+ <file:drivers/net/ppp_generic.c> and pppd program.
+ HDLC/SYNCPPP support allows use of the Cisco HDLC/PPP driver
+ <file:drivers/net/wan/syncppp.c>. The SyncLink WAN driver (in
+ character devices) must also be enabled.
+
+# Generic HDLC
+config HDLC
+ tristate "Generic HDLC layer"
+ depends on WAN
+ help
+ Say Y to this option if your Linux box contains a WAN (Wide Area
+ Network) card supported by this driver and you are planning to
+ connect the box to a WAN.
+
+ You will need supporting software from
+ <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+ Generic HDLC driver currently supports raw HDLC, Cisco HDLC, Frame
+ Relay, synchronous Point-to-Point Protocol (PPP) and X.25.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hdlc.
+
+ If unsure, say N.
+
+config HDLC_RAW
+ bool "Raw HDLC support"
+ depends on HDLC
+ help
+ Generic HDLC driver supporting raw HDLC over WAN connections.
+
+ If unsure, say N.
+
+config HDLC_RAW_ETH
+ bool "Raw HDLC Ethernet device support"
+ depends on HDLC
+ help
+ Generic HDLC driver supporting raw HDLC Ethernet device emulation
+ over WAN connections.
+
+ You will need it for Ethernet over HDLC bridges.
+
+ If unsure, say N.
+
+config HDLC_CISCO
+ bool "Cisco HDLC support"
+ depends on HDLC
+ help
+ Generic HDLC driver supporting Cisco HDLC over WAN connections.
+
+ If unsure, say N.
+
+config HDLC_FR
+ bool "Frame Relay support"
+ depends on HDLC
+ help
+ Generic HDLC driver supporting Frame Relay over WAN connections.
+
+ If unsure, say N.
+
+config HDLC_PPP
+ bool "Synchronous Point-to-Point Protocol (PPP) support"
+ depends on HDLC
+ help
+ Generic HDLC driver supporting PPP over WAN connections.
+
+ If unsure, say N.
+
+config HDLC_X25
+ bool "X.25 protocol support"
+ depends on HDLC && (LAPB=m && HDLC=m || LAPB=y)
+ help
+ Generic HDLC driver supporting X.25 over WAN connections.
+
+ If unsure, say N.
+
+comment "X.25/LAPB support is disabled"
+ depends on WAN && HDLC && (LAPB!=m || HDLC!=m) && LAPB!=y
+
+config PCI200SYN
+ tristate "Goramo PCI200SYN support"
+ depends on HDLC && PCI
+ help
+ Driver for PCI200SYN cards by Goramo sp. j.
+
+ If you have such a card, say Y here and see
+ <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+ To compile this as a module, choose M here: the
+ module will be called pci200syn.
+
+ If unsure, say N.
+
+config WANXL
+ tristate "SBE Inc. wanXL support"
+ depends on HDLC && PCI
+ help
+ Driver for wanXL PCI cards by SBE Inc.
+
+ If you have such a card, say Y here and see
+ <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+ To compile this as a module, choose M here: the
+ module will be called wanxl.
+
+ If unsure, say N.
+
+config WANXL_BUILD_FIRMWARE
+ bool "rebuild wanXL firmware"
+ depends on WANXL && !PREVENT_FIRMWARE_BUILD
+ help
+ Allows you to rebuild firmware run by the QUICC processor.
+ It requires as68k, ld68k and hexdump programs.
+
+ You should never need this option, say N.
+
+config PC300
+ tristate "Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)"
+ depends on HDLC && PCI
+ ---help---
+ Driver for the Cyclades-PC300 synchronous communication boards.
+
+ These boards provide synchronous serial interfaces to your
+ Linux box (interfaces currently available are RS-232/V.35, X.21 and
+ T1/E1). If you wish to support Multilink PPP, please select the
+ option later and read the file README.mlppp provided by PC300
+ package.
+
+ To compile this as a module, choose M here: the module
+ will be called pc300.
+
+ If unsure, say N.
+
+config PC300_MLPPP
+ bool "Cyclades-PC300 MLPPP support"
+ depends on PC300 && PPP_MULTILINK && PPP_SYNC_TTY && HDLC_PPP
+ help
+ Multilink PPP over the PC300 synchronous communication boards.
+
+comment "Cyclades-PC300 MLPPP support is disabled."
+ depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
+
+comment "Refer to the file README.mlppp, provided by PC300 package."
+ depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
+
+config N2
+ tristate "SDL RISCom/N2 support"
+ depends on HDLC && ISA
+ help
+ Driver for RISCom/N2 single or dual channel ISA cards by
+ SDL Communications Inc.
+
+ If you have such a card, say Y here and see
+ <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+ Note that N2csu and N2dds cards are not supported by this driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called n2.
+
+ If unsure, say N.
+
+config C101
+ tristate "Moxa C101 support"
+ depends on HDLC && ISA
+ help
+ Driver for C101 SuperSync ISA cards by Moxa Technologies Co., Ltd.
+
+ If you have such a card, say Y here and see
+ <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called c101.
+
+ If unsure, say N.
+
+config FARSYNC
+ tristate "FarSync T-Series support"
+ depends on HDLC && PCI
+ ---help---
+ Support for the FarSync T-Series X.21 (and V.35/V.24) cards by
+ FarSite Communications Ltd.
+
+ Synchronous communication is supported on all ports at speeds up to
+ 8Mb/s (128K on V.24) using synchronous PPP, Cisco HDLC, raw HDLC,
+ Frame Relay or X.25/LAPB.
+
+ If you want the module to be automatically loaded when the interface
+ is referenced then you should add "alias hdlcX farsync" to
+ /etc/modprobe.conf for each interface, where X is 0, 1, 2, ..., or
+ simply use "alias hdlc* farsync" to indicate all of them.
+
+ To compile this driver as a module, choose M here: the
+ module will be called farsync.
+
+config DLCI
+ tristate "Frame Relay DLCI support"
+ depends on WAN
+ ---help---
+ Support for the Frame Relay protocol.
+
+ Frame Relay is a fast low-cost way to connect to a remote Internet
+ access provider or to form a private wide area network. The one
+ physical line from your box to the local "switch" (i.e. the entry
+ point to the Frame Relay network, usually at the phone company) can
+ carry several logical point-to-point connections to other computers
+ connected to the Frame Relay network. For a general explanation of
+ the protocol, check out <http://www.mplsforum.org/>.
+
+ To use frame relay, you need supporting hardware (called FRAD) and
+ certain programs from the net-tools package as explained in
+ <file:Documentation/networking/framerelay.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dlci.
+
+config DLCI_COUNT
+ int "Max open DLCI"
+ depends on DLCI
+ default "24"
+ help
+ Maximal number of logical point-to-point frame relay connections
+ (the identifiers of which are called DCLIs) that the driver can
+ handle.
+
+ The default is probably fine.
+
+config DLCI_MAX
+ int "Max DLCI per device"
+ depends on DLCI
+ default "8"
+ help
+ How many logical point-to-point frame relay connections (the
+ identifiers of which are called DCLIs) should be handled by each
+ of your hardware frame relay access devices.
+
+ Go with the default.
+
+config SDLA
+ tristate "SDLA (Sangoma S502/S508) support"
+ depends on DLCI && ISA
+ help
+ Driver for the Sangoma S502A, S502E, and S508 Frame Relay Access
+ Devices.
+
+ These are multi-protocol cards, but only Frame Relay is supported
+ by the driver at this time. Please read
+ <file:Documentation/networking/framerelay.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sdla.
+
+# Wan router core.
+config WAN_ROUTER_DRIVERS
+ bool "WAN router drivers"
+ depends on WAN && WAN_ROUTER
+ ---help---
+ Connect LAN to WAN via Linux box.
+
+ Select driver your card and remember to say Y to "Wan Router."
+ You will need the wan-tools package which is available from
+ <ftp://ftp.sangoma.com/>. For more information read:
+ <file:Documentation/networking/wan-router.txt>.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about WAN router drivers.
+
+ If unsure, say N.
+
+config VENDOR_SANGOMA
+ tristate "Sangoma WANPIPE(tm) multiprotocol cards"
+ depends on WAN_ROUTER_DRIVERS && WAN_ROUTER && (PCI || ISA) && BROKEN
+ ---help---
+ Driver for S514-PCI/ISA Synchronous Data Link Adapters (SDLA).
+
+ WANPIPE from Sangoma Technologies Inc. <http://www.sangoma.com/>
+ is a family of intelligent multiprotocol WAN adapters with data
+ transfer rates up to 4Mbps. Cards support:
+
+ - X.25, Frame Relay, PPP, Cisco HDLC protocols.
+
+ - API for protocols like HDLC (LAPB), HDLC Streaming, X.25,
+ Frame Relay and BiSync.
+
+ - Ethernet Bridging over Frame Relay protocol.
+
+ - MULTILINK PPP
+
+ - Async PPP (Modem Dialup)
+
+ The next questions will ask you about the protocols you want
+ the driver to support.
+
+ If you have one or more of these cards, say M to this option;
+ and read <file:Documentation/networking/wanpipe.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wanpipe.
+
+config WANPIPE_CHDLC
+ bool "WANPIPE Cisco HDLC support"
+ depends on VENDOR_SANGOMA
+ ---help---
+ Connect a WANPIPE card to a leased line using the Cisco HDLC.
+
+ - Supports Dual Port Cisco HDLC on the S514-PCI/S508-ISA cards
+ which allows user to build applications using the HDLC streaming API.
+
+ - CHDLC Streaming MULTILINK PPP that can bind multiple WANPIPE T1
+ cards into a single logical channel.
+
+ Say Y and the Cisco HDLC support, HDLC streaming API and
+ MULTILINK PPP will be included in the driver.
+
+config WANPIPE_FR
+ bool "WANPIPE Frame Relay support"
+ depends on VENDOR_SANGOMA
+ help
+ Connect a WANPIPE card to a Frame Relay network, or use Frame Felay
+ API to develop custom applications.
+
+ Contains the Ethernet Bridging over Frame Relay feature, where
+ a WANPIPE frame relay link can be directly connected to the Linux
+ kernel bridge. The Frame Relay option is supported on S514-PCI
+ and S508-ISA cards.
+
+ Say Y and the Frame Relay support will be included in the driver.
+
+config WANPIPE_X25
+ bool "WANPIPE X.25 support"
+ depends on VENDOR_SANGOMA
+ help
+ Connect a WANPIPE card to an X.25 network.
+
+ Includes the X.25 API support for custom applications over the
+ X.25 protocol. The X.25 option is supported on S514-PCI and
+ S508-ISA cards.
+
+ Say Y and the X.25 support will be included in the driver.
+
+config WANPIPE_PPP
+ bool "WANPIPE PPP support"
+ depends on VENDOR_SANGOMA
+ help
+ Connect a WANPIPE card to a leased line using Point-to-Point
+ Protocol (PPP).
+
+ The PPP option is supported on S514-PCI/S508-ISA cards.
+
+ Say Y and the PPP support will be included in the driver.
+
+config WANPIPE_MULTPPP
+ bool "WANPIPE Multi-Port PPP support"
+ depends on VENDOR_SANGOMA
+ help
+ Connect a WANPIPE card to a leased line using Point-to-Point
+ Protocol (PPP).
+
+ Uses in-kernel SyncPPP protocol over the Sangoma HDLC Streaming
+ adapter. In this case each Sangoma adapter port can support an
+ independent PPP connection. For example, a single Quad-Port PCI
+ adapter can support up to four independent PPP links. The PPP
+ option is supported on S514-PCI/S508-ISA cards.
+
+ Say Y and the Multi-Port PPP support will be included in the driver.
+
+config CYCLADES_SYNC
+ tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)"
+ depends on WAN_ROUTER_DRIVERS && (PCI || ISA)
+ ---help---
+ Cyclom 2X from Cyclades Corporation <http://www.cyclades.com/> is an
+ intelligent multiprotocol WAN adapter with data transfer rates up to
+ 512 Kbps. These cards support the X.25 and SNA related protocols.
+
+ While no documentation is available at this time please grab the
+ wanconfig tarball in
+ <http://www.conectiva.com.br/~acme/cycsyn-devel/> (with minor changes
+ to make it compile with the current wanrouter include files; efforts
+ are being made to use the original package available at
+ <ftp://ftp.sangoma.com/>).
+
+ Feel free to contact me or the cycsyn-devel mailing list at
+ <acme@conectiva.com.br> and <cycsyn-devel@bazar.conectiva.com.br> for
+ additional details, I hope to have documentation available as soon as
+ possible. (Cyclades Brazil is writing the Documentation).
+
+ The next questions will ask you about the protocols you want the
+ driver to support (for now only X.25 is supported).
+
+ If you have one or more of these cards, say Y to this option.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cyclomx.
+
+config CYCLOMX_X25
+ bool "Cyclom 2X X.25 support (EXPERIMENTAL)"
+ depends on CYCLADES_SYNC
+ help
+ Connect a Cyclom 2X card to an X.25 network.
+
+ Enabling X.25 support will enlarge your kernel by about 11 kB.
+
+# X.25 network drivers
+config LAPBETHER
+ tristate "LAPB over Ethernet driver (EXPERIMENTAL)"
+ depends on WAN && LAPB && X25
+ ---help---
+ Driver for a pseudo device (typically called /dev/lapb0) which allows
+ you to open an LAPB point-to-point connection to some other computer
+ on your Ethernet network.
+
+ In order to do this, you need to say Y or M to the driver for your
+ Ethernet card as well as to "LAPB Data Link Driver".
+
+ To compile this driver as a module, choose M here: the
+ module will be called lapbether.
+
+ If unsure, say N.
+
+config X25_ASY
+ tristate "X.25 async driver (EXPERIMENTAL)"
+ depends on WAN && LAPB && X25
+ ---help---
+ Send and receive X.25 frames over regular asynchronous serial
+ lines such as telephone lines equipped with ordinary modems.
+
+ Experts should note that this driver doesn't currently comply with
+ the asynchronous HDLS framing protocols in CCITT recommendation X.25.
+
+ To compile this driver as a module, choose M here: the
+ module will be called x25_asy.
+
+ If unsure, say N.
+
+config SBNI
+ tristate "Granch SBNI12 Leased Line adapter support"
+ depends on WAN && X86
+ ---help---
+ Driver for ISA SBNI12-xx cards which are low cost alternatives to
+ leased line modems.
+
+ You can find more information and last versions of drivers and
+ utilities at <http://www.granch.ru/>. If you have any question you
+ can send email to <sbni@granch.ru>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sbni.
+
+ If unsure, say N.
+
+config SBNI_MULTILINE
+ bool "Multiple line feature support"
+ depends on SBNI
+ help
+ Schedule traffic for some parallel lines, via SBNI12 adapters.
+
+ If you have two computers connected with two parallel lines it's
+ possible to increase transfer rate nearly twice. You should have
+ a program named 'sbniconfig' to configure adapters.
+
+ If unsure, say N.
+
+endmenu
+
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
new file mode 100644
index 000000000000..ce6c56b903e7
--- /dev/null
+++ b/drivers/net/wan/Makefile
@@ -0,0 +1,86 @@
+#
+# Makefile for the Linux network (wan) device drivers.
+#
+# 3 Aug 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+wanpipe-y := sdlamain.o sdla_ft1.o
+wanpipe-$(CONFIG_WANPIPE_X25) += sdla_x25.o
+wanpipe-$(CONFIG_WANPIPE_FR) += sdla_fr.o
+wanpipe-$(CONFIG_WANPIPE_CHDLC) += sdla_chdlc.o
+wanpipe-$(CONFIG_WANPIPE_PPP) += sdla_ppp.o
+wanpipe-$(CONFIG_WANPIPE_MULTPPP) += wanpipe_multppp.o
+wanpipe-objs := $(wanpipe-y)
+
+cyclomx-y := cycx_main.o
+cyclomx-$(CONFIG_CYCLOMX_X25) += cycx_x25.o
+cyclomx-objs := $(cyclomx-y)
+
+hdlc-y := hdlc_generic.o
+hdlc-$(CONFIG_HDLC_RAW) += hdlc_raw.o
+hdlc-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o
+hdlc-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o
+hdlc-$(CONFIG_HDLC_FR) += hdlc_fr.o
+hdlc-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
+hdlc-$(CONFIG_HDLC_X25) += hdlc_x25.o
+hdlc-objs := $(hdlc-y)
+
+pc300-y := pc300_drv.o
+pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o
+pc300-objs := $(pc300-y)
+
+obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o
+obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o
+obj-$(CONFIG_COSA) += syncppp.o cosa.o
+obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o
+obj-$(CONFIG_DSCC4) += dscc4.o
+obj-$(CONFIG_LANMEDIA) += syncppp.o
+obj-$(CONFIG_SYNCLINK_SYNCPPP) += syncppp.o
+obj-$(CONFIG_X25_ASY) += x25_asy.o
+
+obj-$(CONFIG_LANMEDIA) += lmc/
+
+obj-$(CONFIG_DLCI) += dlci.o
+obj-$(CONFIG_SDLA) += sdla.o
+ifeq ($(CONFIG_WANPIPE_MULTPPP),y)
+ obj-$(CONFIG_VENDOR_SANGOMA) += sdladrv.o wanpipe.o syncppp.o
+else
+ obj-$(CONFIG_VENDOR_SANGOMA) += sdladrv.o wanpipe.o
+endif
+obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o
+obj-$(CONFIG_LAPBETHER) += lapbether.o
+obj-$(CONFIG_SBNI) += sbni.o
+obj-$(CONFIG_PC300) += pc300.o
+obj-$(CONFIG_HDLC) += hdlc.o
+ifeq ($(CONFIG_HDLC_PPP),y)
+ obj-$(CONFIG_HDLC) += syncppp.o
+endif
+obj-$(CONFIG_N2) += n2.o
+obj-$(CONFIG_C101) += c101.o
+obj-$(CONFIG_WANXL) += wanxl.o
+obj-$(CONFIG_PCI200SYN) += pci200syn.o
+
+clean-files := wanxlfw.inc
+$(obj)/wanxl.o: $(obj)/wanxlfw.inc
+
+ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
+ifeq ($(ARCH),m68k)
+ AS68K = $(AS)
+ LD68K = $(LD)
+else
+ AS68K = as68k
+ LD68K = ld68k
+endif
+
+quiet_cmd_build_wanxlfw = BLD FW $@
+ cmd_build_wanxlfw = \
+ $(CPP) -Wp,-MD,$(depfile) -I$(srctree)/include $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
+ $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
+ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
+ rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
+
+$(obj)/wanxlfw.inc: $(src)/wanxlfw.S
+ $(call if_changed_dep,build_wanxlfw)
+targets += wanxlfw.inc
+endif
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
new file mode 100644
index 000000000000..43d854ace233
--- /dev/null
+++ b/drivers/net/wan/c101.c
@@ -0,0 +1,446 @@
+/*
+ * Moxa C101 synchronous serial card driver for Linux
+ *
+ * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * For information see http://hq.pm.waw.pl/hdlc/
+ *
+ * Sources of information:
+ * Hitachi HD64570 SCA User's Manual
+ * Moxa C101 User's Manual
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "hd64570.h"
+
+
+static const char* version = "Moxa C101 driver version: 1.15";
+static const char* devname = "C101";
+
+#undef DEBUG_PKT
+#define DEBUG_RINGS
+
+#define C101_PAGE 0x1D00
+#define C101_DTR 0x1E00
+#define C101_SCA 0x1F00
+#define C101_WINDOW_SIZE 0x2000
+#define C101_MAPPED_RAM_SIZE 0x4000
+
+#define RAM_SIZE (256 * 1024)
+#define TX_RING_BUFFERS 10
+#define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \
+ (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS)
+
+#define CLOCK_BASE 9830400 /* 9.8304 MHz */
+#define PAGE0_ALWAYS_MAPPED
+
+static char *hw; /* pointer to hw=xxx command line string */
+
+
+typedef struct card_s {
+ struct net_device *dev;
+ spinlock_t lock; /* TX lock */
+ u8 __iomem *win0base; /* ISA window base address */
+ u32 phy_winbase; /* ISA physical base address */
+ sync_serial_settings settings;
+ int rxpart; /* partial frame received, next frame invalid*/
+ unsigned short encoding;
+ unsigned short parity;
+ u16 rx_ring_buffers; /* number of buffers in a ring */
+ u16 tx_ring_buffers;
+ u16 buff_offset; /* offset of first buffer of first channel */
+ u16 rxin; /* rx ring buffer 'in' pointer */
+ u16 txin; /* tx ring buffer 'in' and 'last' pointers */
+ u16 txlast;
+ u8 rxs, txs, tmc; /* SCA registers */
+ u8 irq; /* IRQ (3-15) */
+ u8 page;
+
+ struct card_s *next_card;
+}card_t;
+
+typedef card_t port_t;
+
+static card_t *first_card;
+static card_t **new_card = &first_card;
+
+
+#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg))
+#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg))
+#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg))
+
+/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
+#define sca_outw(value, reg, card) do { \
+ writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
+ writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\
+} while(0)
+
+#define port_to_card(port) (port)
+#define log_node(port) (0)
+#define phy_node(port) (0)
+#define winsize(card) (C101_WINDOW_SIZE)
+#define win0base(card) ((card)->win0base)
+#define winbase(card) ((card)->win0base + 0x2000)
+#define get_port(card, port) (card)
+static void sca_msci_intr(port_t *port);
+
+
+static inline u8 sca_get_page(card_t *card)
+{
+ return card->page;
+}
+
+static inline void openwin(card_t *card, u8 page)
+{
+ card->page = page;
+ writeb(page, card->win0base + C101_PAGE);
+}
+
+
+#include "hd6457x.c"
+
+
+static void sca_msci_intr(port_t *port)
+{
+ struct net_device *dev = port_to_dev(port);
+ card_t* card = port_to_card(port);
+ u8 stat = sca_in(MSCI1_OFFSET + ST1, card); /* read MSCI ST1 status */
+
+ /* Reset MSCI TX underrun status bit */
+ sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, card);
+
+ if (stat & ST1_UDRN) {
+ struct net_device_stats *stats = hdlc_stats(dev);
+ stats->tx_errors++; /* TX Underrun error detected */
+ stats->tx_fifo_errors++;
+ }
+
+ /* Reset MSCI CDCD status bit - uses ch#2 DCD input */
+ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, card);
+
+ if (stat & ST1_CDCD)
+ hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD),
+ dev);
+}
+
+
+static void c101_set_iface(port_t *port)
+{
+ u8 rxs = port->rxs & CLK_BRG_MASK;
+ u8 txs = port->txs & CLK_BRG_MASK;
+
+ switch(port->settings.clock_type) {
+ case CLOCK_INT:
+ rxs |= CLK_BRG_RX; /* TX clock */
+ txs |= CLK_RXCLK_TX; /* BRG output */
+ break;
+
+ case CLOCK_TXINT:
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_BRG_TX; /* BRG output */
+ break;
+
+ case CLOCK_TXFROMRX:
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_RXCLK_TX; /* RX clock */
+ break;
+
+ default: /* EXTernal clock */
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_LINE_TX; /* TXC input */
+ }
+
+ port->rxs = rxs;
+ port->txs = txs;
+ sca_out(rxs, MSCI1_OFFSET + RXS, port);
+ sca_out(txs, MSCI1_OFFSET + TXS, port);
+ sca_set_port(port);
+}
+
+
+static int c101_open(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ int result;
+
+ result = hdlc_open(dev);
+ if (result)
+ return result;
+
+ writeb(1, port->win0base + C101_DTR);
+ sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
+ sca_open(dev);
+ /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */
+ sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
+ sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
+
+ hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD), dev);
+ printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
+
+ /* enable MSCI1 CDCD interrupt */
+ sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
+ sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port);
+ sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */
+ c101_set_iface(port);
+ return 0;
+}
+
+
+static int c101_close(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+
+ sca_close(dev);
+ writeb(0, port->win0base + C101_DTR);
+ sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
+ hdlc_close(dev);
+ return 0;
+}
+
+
+static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
+
+#ifdef DEBUG_RINGS
+ if (cmd == SIOCDEVPRIVATE) {
+ sca_dump_rings(dev);
+ printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
+ sca_in(MSCI1_OFFSET + ST0, port),
+ sca_in(MSCI1_OFFSET + ST1, port),
+ sca_in(MSCI1_OFFSET + ST2, port),
+ sca_in(MSCI1_OFFSET + ST3, port));
+ return 0;
+ }
+#endif
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(line, &port->settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ if (new_line.clock_type != CLOCK_EXT &&
+ new_line.clock_type != CLOCK_TXFROMRX &&
+ new_line.clock_type != CLOCK_INT &&
+ new_line.clock_type != CLOCK_TXINT)
+ return -EINVAL; /* No such clock setting */
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ memcpy(&port->settings, &new_line, size); /* Update settings */
+ c101_set_iface(port);
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+
+
+static void c101_destroy_card(card_t *card)
+{
+ readb(card->win0base + C101_PAGE); /* Resets SCA? */
+
+ if (card->irq)
+ free_irq(card->irq, card);
+
+ if (card->win0base) {
+ iounmap(card->win0base);
+ release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE);
+ }
+
+ free_netdev(card->dev);
+
+ kfree(card);
+}
+
+
+
+static int __init c101_run(unsigned long irq, unsigned long winbase)
+{
+ struct net_device *dev;
+ hdlc_device *hdlc;
+ card_t *card;
+ int result;
+
+ if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
+ printk(KERN_ERR "c101: invalid IRQ value\n");
+ return -ENODEV;
+ }
+
+ if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
+ printk(KERN_ERR "c101: invalid RAM value\n");
+ return -ENODEV;
+ }
+
+ card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ if (card == NULL) {
+ printk(KERN_ERR "c101: unable to allocate memory\n");
+ return -ENOBUFS;
+ }
+ memset(card, 0, sizeof(card_t));
+
+ card->dev = alloc_hdlcdev(card);
+ if (!card->dev) {
+ printk(KERN_ERR "c101: unable to allocate memory\n");
+ kfree(card);
+ return -ENOBUFS;
+ }
+
+ if (request_irq(irq, sca_intr, 0, devname, card)) {
+ printk(KERN_ERR "c101: could not allocate IRQ\n");
+ c101_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->irq = irq;
+
+ if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
+ printk(KERN_ERR "c101: could not request RAM window\n");
+ c101_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->phy_winbase = winbase;
+ card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
+ if (!card->win0base) {
+ printk(KERN_ERR "c101: could not map I/O address\n");
+ c101_destroy_card(card);
+ return -EBUSY;
+ }
+
+ card->tx_ring_buffers = TX_RING_BUFFERS;
+ card->rx_ring_buffers = RX_RING_BUFFERS;
+ card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */
+
+ readb(card->win0base + C101_PAGE); /* Resets SCA? */
+ udelay(100);
+ writeb(0, card->win0base + C101_PAGE);
+ writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */
+
+ sca_init(card, 0);
+
+ dev = port_to_dev(card);
+ hdlc = dev_to_hdlc(dev);
+
+ spin_lock_init(&card->lock);
+ SET_MODULE_OWNER(dev);
+ dev->irq = irq;
+ dev->mem_start = winbase;
+ dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
+ dev->tx_queue_len = 50;
+ dev->do_ioctl = c101_ioctl;
+ dev->open = c101_open;
+ dev->stop = c101_close;
+ hdlc->attach = sca_attach;
+ hdlc->xmit = sca_xmit;
+ card->settings.clock_type = CLOCK_EXT;
+
+ result = register_hdlc_device(dev);
+ if (result) {
+ printk(KERN_WARNING "c101: unable to register hdlc device\n");
+ c101_destroy_card(card);
+ return result;
+ }
+
+ sca_init_sync_port(card); /* Set up C101 memory */
+ hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), dev);
+
+ printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
+ " using %u TX + %u RX packets rings\n",
+ dev->name, card->irq,
+ card->tx_ring_buffers, card->rx_ring_buffers);
+
+ *new_card = card;
+ new_card = &card->next_card;
+ return 0;
+}
+
+
+
+static int __init c101_init(void)
+{
+ if (hw == NULL) {
+#ifdef MODULE
+ printk(KERN_INFO "c101: no card initialized\n");
+#endif
+ return -ENOSYS; /* no parameters specified, abort */
+ }
+
+ printk(KERN_INFO "%s\n", version);
+
+ do {
+ unsigned long irq, ram;
+
+ irq = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ ram = simple_strtoul(hw, &hw, 0);
+
+ if (*hw == ':' || *hw == '\x0')
+ c101_run(irq, ram);
+
+ if (*hw == '\x0')
+ return first_card ? 0 : -ENOSYS;
+ }while(*hw++ == ':');
+
+ printk(KERN_ERR "c101: invalid hardware parameters\n");
+ return first_card ? 0 : -ENOSYS;
+}
+
+
+static void __exit c101_cleanup(void)
+{
+ card_t *card = first_card;
+
+ while (card) {
+ card_t *ptr = card;
+ card = card->next_card;
+ unregister_hdlc_device(port_to_dev(ptr));
+ c101_destroy_card(ptr);
+ }
+}
+
+
+module_init(c101_init);
+module_exit(c101_cleanup);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Moxa C101 serial port driver");
+MODULE_LICENSE("GPL v2");
+module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
new file mode 100644
index 000000000000..921a573372e9
--- /dev/null
+++ b/drivers/net/wan/cosa.c
@@ -0,0 +1,2100 @@
+/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
+
+/*
+ * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * The driver for the SRP and COSA synchronous serial cards.
+ *
+ * HARDWARE INFO
+ *
+ * Both cards are developed at the Institute of Computer Science,
+ * Masaryk University (http://www.ics.muni.cz/). The hardware is
+ * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
+ * and the photo of both cards is available at
+ * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
+ * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/.
+ * For Linux-specific utilities, see below in the "Software info" section.
+ * If you want to order the card, contact Jiri Novotny.
+ *
+ * The SRP (serial port?, the Czech word "srp" means "sickle") card
+ * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card
+ * with V.24 interfaces up to 80kb/s each.
+ *
+ * The COSA (communication serial adapter?, the Czech word "kosa" means
+ * "scythe") is a next-generation sync/async board with two interfaces
+ * - currently any of V.24, X.21, V.35 and V.36 can be selected.
+ * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel.
+ * The 8-channels version is in development.
+ *
+ * Both types have downloadable firmware and communicate via ISA DMA.
+ * COSA can be also a bus-mastering device.
+ *
+ * SOFTWARE INFO
+ *
+ * The homepage of the Linux driver is at http://www.fi.muni.cz/~kas/cosa/.
+ * The CVS tree of Linux driver can be viewed there, as well as the
+ * firmware binaries and user-space utilities for downloading the firmware
+ * into the card and setting up the card.
+ *
+ * The Linux driver (unlike the present *BSD drivers :-) can work even
+ * for the COSA and SRP in one computer and allows each channel to work
+ * in one of the three modes (character device, Cisco HDLC, Sync PPP).
+ *
+ * AUTHOR
+ *
+ * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>.
+ *
+ * You can mail me bugfixes and even success reports. I am especially
+ * interested in the SMP and/or muliti-channel success/failure reports
+ * (I wonder if I did the locking properly :-).
+ *
+ * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER
+ *
+ * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek
+ * The skeleton.c by Donald Becker
+ * The SDL Riscom/N2 driver by Mike Natale
+ * The Comtrol Hostess SV11 driver by Alan Cox
+ * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
+ */
+/*
+ * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br>
+ * fixed a deadlock in cosa_sppp_open
+ */
+
+/* ---------- Headers, macros, data structures ---------- */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/device.h>
+
+#undef COSA_SLOW_IO /* for testing purposes only */
+#undef REALLY_SLOW_IO
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/syncppp.h>
+#include "cosa.h"
+
+/* Maximum length of the identification string. */
+#define COSA_MAX_ID_STRING 128
+
+/* Maximum length of the channel name */
+#define COSA_MAX_NAME (sizeof("cosaXXXcXXX")+1)
+
+/* Per-channel data structure */
+
+struct channel_data {
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+ int usage; /* Usage count; >0 for chrdev, -1 for netdev */
+ int num; /* Number of the channel */
+ struct cosa_data *cosa; /* Pointer to the per-card structure */
+ int txsize; /* Size of transmitted data */
+ char *txbuf; /* Transmit buffer */
+ char name[COSA_MAX_NAME]; /* channel name */
+
+ /* The HW layer interface */
+ /* routine called from the RX interrupt */
+ char *(*setup_rx)(struct channel_data *channel, int size);
+ /* routine called when the RX is done (from the EOT interrupt) */
+ int (*rx_done)(struct channel_data *channel);
+ /* routine called when the TX is done (from the EOT interrupt) */
+ int (*tx_done)(struct channel_data *channel, int size);
+
+ /* Character device parts */
+ struct semaphore rsem, wsem;
+ char *rxdata;
+ int rxsize;
+ wait_queue_head_t txwaitq, rxwaitq;
+ int tx_status, rx_status;
+
+ /* SPPP/HDLC device parts */
+ struct ppp_device pppdev;
+ struct sk_buff *rx_skb, *tx_skb;
+ struct net_device_stats stats;
+};
+
+/* cosa->firmware_status bits */
+#define COSA_FW_RESET (1<<0) /* Is the ROM monitor active? */
+#define COSA_FW_DOWNLOAD (1<<1) /* Is the microcode downloaded? */
+#define COSA_FW_START (1<<2) /* Is the microcode running? */
+
+struct cosa_data {
+ int num; /* Card number */
+ char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */
+ unsigned int datareg, statusreg; /* I/O ports */
+ unsigned short irq, dma; /* IRQ and DMA number */
+ unsigned short startaddr; /* Firmware start address */
+ unsigned short busmaster; /* Use busmastering? */
+ int nchannels; /* # of channels on this card */
+ int driver_status; /* For communicating with firmware */
+ int firmware_status; /* Downloaded, reseted, etc. */
+ long int rxbitmap, txbitmap; /* Bitmap of channels who are willing to send/receive data */
+ long int rxtx; /* RX or TX in progress? */
+ int enabled;
+ int usage; /* usage count */
+ int txchan, txsize, rxsize;
+ struct channel_data *rxchan;
+ char *bouncebuf;
+ char *txbuf, *rxbuf;
+ struct channel_data *chan;
+ spinlock_t lock; /* For exclusive operations on this structure */
+ char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */
+ char *type; /* card type */
+};
+
+/*
+ * Define this if you want all the possible ports to be autoprobed.
+ * It is here but it probably is not a good idea to use this.
+ */
+/* #define COSA_ISA_AUTOPROBE 1 */
+
+/*
+ * Character device major number. 117 was allocated for us.
+ * The value of 0 means to allocate a first free one.
+ */
+static int cosa_major = 117;
+
+/*
+ * Encoding of the minor numbers:
+ * The lowest CARD_MINOR_BITS bits means the channel on the single card,
+ * the highest bits means the card number.
+ */
+#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
+ * for the single card */
+/*
+ * The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
+ * macro doesn't like anything other than the raw number as an argument :-(
+ */
+#define MAX_CARDS 16
+/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */
+
+#define DRIVER_RX_READY 0x0001
+#define DRIVER_TX_READY 0x0002
+#define DRIVER_TXMAP_SHIFT 2
+#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
+
+/*
+ * for cosa->rxtx - indicates whether either transmit or receive is
+ * in progress. These values are mean number of the bit.
+ */
+#define TXBIT 0
+#define RXBIT 1
+#define IRQBIT 2
+
+#define COSA_MTU 2000 /* FIXME: I don't know this exactly */
+
+#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */
+#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
+#undef DEBUG_IO //1 /* Dump the I/O traffic */
+
+#define TX_TIMEOUT (5*HZ)
+
+/* Maybe the following should be allocated dynamically */
+static struct cosa_data cosa_cards[MAX_CARDS];
+static int nr_cards;
+
+#ifdef COSA_ISA_AUTOPROBE
+static int io[MAX_CARDS+1] = { 0x220, 0x228, 0x210, 0x218, 0, };
+/* NOTE: DMA is not autoprobed!!! */
+static int dma[MAX_CARDS+1] = { 1, 7, 1, 7, 1, 7, 1, 7, 0, };
+#else
+static int io[MAX_CARDS+1];
+static int dma[MAX_CARDS+1];
+#endif
+/* IRQ can be safely autoprobed */
+static int irq[MAX_CARDS+1] = { -1, -1, -1, -1, -1, -1, 0, };
+
+/* for class stuff*/
+static struct class_simple *cosa_class;
+
+#ifdef MODULE
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards");
+module_param_array(dma, int, NULL, 0);
+MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards");
+
+MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>");
+MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card");
+MODULE_LICENSE("GPL");
+#endif
+
+/* I use this mainly for testing purposes */
+#ifdef COSA_SLOW_IO
+#define cosa_outb outb_p
+#define cosa_outw outw_p
+#define cosa_inb inb_p
+#define cosa_inw inw_p
+#else
+#define cosa_outb outb
+#define cosa_outw outw
+#define cosa_inb inb
+#define cosa_inw inw
+#endif
+
+#define is_8bit(cosa) (!(cosa->datareg & 0x08))
+
+#define cosa_getstatus(cosa) (cosa_inb(cosa->statusreg))
+#define cosa_putstatus(cosa, stat) (cosa_outb(stat, cosa->statusreg))
+#define cosa_getdata16(cosa) (cosa_inw(cosa->datareg))
+#define cosa_getdata8(cosa) (cosa_inb(cosa->datareg))
+#define cosa_putdata16(cosa, dt) (cosa_outw(dt, cosa->datareg))
+#define cosa_putdata8(cosa, dt) (cosa_outb(dt, cosa->datareg))
+
+/* Initialization stuff */
+static int cosa_probe(int ioaddr, int irq, int dma);
+
+/* HW interface */
+static void cosa_enable_rx(struct channel_data *chan);
+static void cosa_disable_rx(struct channel_data *chan);
+static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
+static void cosa_kick(struct cosa_data *cosa);
+static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
+
+/* SPPP/HDLC stuff */
+static void sppp_channel_init(struct channel_data *chan);
+static void sppp_channel_delete(struct channel_data *chan);
+static int cosa_sppp_open(struct net_device *d);
+static int cosa_sppp_close(struct net_device *d);
+static void cosa_sppp_timeout(struct net_device *d);
+static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d);
+static char *sppp_setup_rx(struct channel_data *channel, int size);
+static int sppp_rx_done(struct channel_data *channel);
+static int sppp_tx_done(struct channel_data *channel, int size);
+static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static struct net_device_stats *cosa_net_stats(struct net_device *dev);
+
+/* Character device */
+static void chardev_channel_init(struct channel_data *chan);
+static char *chrdev_setup_rx(struct channel_data *channel, int size);
+static int chrdev_rx_done(struct channel_data *channel);
+static int chrdev_tx_done(struct channel_data *channel, int size);
+static ssize_t cosa_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos);
+static ssize_t cosa_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static unsigned int cosa_poll(struct file *file, poll_table *poll);
+static int cosa_open(struct inode *inode, struct file *file);
+static int cosa_release(struct inode *inode, struct file *file);
+static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+#ifdef COSA_FASYNC_WORKING
+static int cosa_fasync(struct inode *inode, struct file *file, int on);
+#endif
+
+static struct file_operations cosa_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = cosa_read,
+ .write = cosa_write,
+ .poll = cosa_poll,
+ .ioctl = cosa_chardev_ioctl,
+ .open = cosa_open,
+ .release = cosa_release,
+#ifdef COSA_FASYNC_WORKING
+ .fasync = cosa_fasync,
+#endif
+};
+
+/* Ioctls */
+static int cosa_start(struct cosa_data *cosa, int address);
+static int cosa_reset(struct cosa_data *cosa);
+static int cosa_download(struct cosa_data *cosa, void __user *a);
+static int cosa_readmem(struct cosa_data *cosa, void __user *a);
+
+/* COSA/SRP ROM monitor */
+static int download(struct cosa_data *cosa, const char __user *data, int addr, int len);
+static int startmicrocode(struct cosa_data *cosa, int address);
+static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
+static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
+
+/* Auxilliary functions */
+static int get_wait_data(struct cosa_data *cosa);
+static int put_wait_data(struct cosa_data *cosa, int data);
+static int puthexnumber(struct cosa_data *cosa, int number);
+static void put_driver_status(struct cosa_data *cosa);
+static void put_driver_status_nolock(struct cosa_data *cosa);
+
+/* Interrupt handling */
+static irqreturn_t cosa_interrupt(int irq, void *cosa, struct pt_regs *regs);
+
+/* I/O ops debugging */
+#ifdef DEBUG_IO
+static void debug_data_in(struct cosa_data *cosa, int data);
+static void debug_data_out(struct cosa_data *cosa, int data);
+static void debug_data_cmd(struct cosa_data *cosa, int data);
+static void debug_status_in(struct cosa_data *cosa, int status);
+static void debug_status_out(struct cosa_data *cosa, int status);
+#endif
+
+
+/* ---------- Initialization stuff ---------- */
+
+static int __init cosa_init(void)
+{
+ int i, err = 0;
+
+ printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n");
+#ifdef CONFIG_SMP
+ printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n");
+#endif
+ if (cosa_major > 0) {
+ if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
+ printk(KERN_WARNING "cosa: unable to get major %d\n",
+ cosa_major);
+ err = -EIO;
+ goto out;
+ }
+ } else {
+ if (!(cosa_major=register_chrdev(0, "cosa", &cosa_fops))) {
+ printk(KERN_WARNING "cosa: unable to register chardev\n");
+ err = -EIO;
+ goto out;
+ }
+ }
+ for (i=0; i<MAX_CARDS; i++)
+ cosa_cards[i].num = -1;
+ for (i=0; io[i] != 0 && i < MAX_CARDS; i++)
+ cosa_probe(io[i], irq[i], dma[i]);
+ if (!nr_cards) {
+ printk(KERN_WARNING "cosa: no devices found.\n");
+ unregister_chrdev(cosa_major, "cosa");
+ err = -ENODEV;
+ goto out;
+ }
+ devfs_mk_dir("cosa");
+ cosa_class = class_simple_create(THIS_MODULE, "cosa");
+ if (IS_ERR(cosa_class)) {
+ err = PTR_ERR(cosa_class);
+ goto out_chrdev;
+ }
+ for (i=0; i<nr_cards; i++) {
+ class_simple_device_add(cosa_class, MKDEV(cosa_major, i),
+ NULL, "cosa%d", i);
+ err = devfs_mk_cdev(MKDEV(cosa_major, i),
+ S_IFCHR|S_IRUSR|S_IWUSR,
+ "cosa/%d", i);
+ if (err) {
+ class_simple_device_remove(MKDEV(cosa_major, i));
+ goto out_chrdev;
+ }
+ }
+ err = 0;
+ goto out;
+
+out_chrdev:
+ unregister_chrdev(cosa_major, "cosa");
+out:
+ return err;
+}
+module_init(cosa_init);
+
+static void __exit cosa_exit(void)
+{
+ struct cosa_data *cosa;
+ int i;
+ printk(KERN_INFO "Unloading the cosa module\n");
+
+ for (i=0; i<nr_cards; i++) {
+ class_simple_device_remove(MKDEV(cosa_major, i));
+ devfs_remove("cosa/%d", i);
+ }
+ class_simple_destroy(cosa_class);
+ devfs_remove("cosa");
+ for (cosa=cosa_cards; nr_cards--; cosa++) {
+ /* Clean up the per-channel data */
+ for (i=0; i<cosa->nchannels; i++) {
+ /* Chardev driver has no alloc'd per-channel data */
+ sppp_channel_delete(cosa->chan+i);
+ }
+ /* Clean up the per-card data */
+ kfree(cosa->chan);
+ kfree(cosa->bouncebuf);
+ free_irq(cosa->irq, cosa);
+ free_dma(cosa->dma);
+ release_region(cosa->datareg,is_8bit(cosa)?2:4);
+ }
+ unregister_chrdev(cosa_major, "cosa");
+}
+module_exit(cosa_exit);
+
+/*
+ * This function should register all the net devices needed for the
+ * single channel.
+ */
+static __inline__ void channel_init(struct channel_data *chan)
+{
+ sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num);
+
+ /* Initialize the chardev data structures */
+ chardev_channel_init(chan);
+
+ /* Register the sppp interface */
+ sppp_channel_init(chan);
+}
+
+static int cosa_probe(int base, int irq, int dma)
+{
+ struct cosa_data *cosa = cosa_cards+nr_cards;
+ int i, err = 0;
+
+ memset(cosa, 0, sizeof(struct cosa_data));
+
+ /* Checking validity of parameters: */
+ /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
+ if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
+ printk (KERN_INFO "cosa_probe: invalid IRQ %d\n", irq);
+ return -1;
+ }
+ /* I/O address should be between 0x100 and 0x3ff and should be
+ * multiple of 8. */
+ if (base < 0x100 || base > 0x3ff || base & 0x7) {
+ printk (KERN_INFO "cosa_probe: invalid I/O address 0x%x\n",
+ base);
+ return -1;
+ }
+ /* DMA should be 0,1 or 3-7 */
+ if (dma < 0 || dma == 4 || dma > 7) {
+ printk (KERN_INFO "cosa_probe: invalid DMA %d\n", dma);
+ return -1;
+ }
+ /* and finally, on 16-bit COSA DMA should be 4-7 and
+ * I/O base should not be multiple of 0x10 */
+ if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
+ printk (KERN_INFO "cosa_probe: 8/16 bit base and DMA mismatch"
+ " (base=0x%x, dma=%d)\n", base, dma);
+ return -1;
+ }
+
+ cosa->dma = dma;
+ cosa->datareg = base;
+ cosa->statusreg = is_8bit(cosa)?base+1:base+2;
+ spin_lock_init(&cosa->lock);
+
+ if (!request_region(base, is_8bit(cosa)?2:4,"cosa"))
+ return -1;
+
+ if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
+ printk(KERN_DEBUG "cosa: probe at 0x%x failed.\n", base);
+ err = -1;
+ goto err_out;
+ }
+
+ /* Test the validity of identification string */
+ if (!strncmp(cosa->id_string, "SRP", 3))
+ cosa->type = "srp";
+ else if (!strncmp(cosa->id_string, "COSA", 4))
+ cosa->type = is_8bit(cosa)? "cosa8": "cosa16";
+ else {
+/* Print a warning only if we are not autoprobing */
+#ifndef COSA_ISA_AUTOPROBE
+ printk(KERN_INFO "cosa: valid signature not found at 0x%x.\n",
+ base);
+#endif
+ err = -1;
+ goto err_out;
+ }
+ /* Update the name of the region now we know the type of card */
+ release_region(base, is_8bit(cosa)?2:4);
+ if (!request_region(base, is_8bit(cosa)?2:4, cosa->type)) {
+ printk(KERN_DEBUG "cosa: changing name at 0x%x failed.\n", base);
+ return -1;
+ }
+
+ /* Now do IRQ autoprobe */
+ if (irq < 0) {
+ unsigned long irqs;
+/* printk(KERN_INFO "IRQ autoprobe\n"); */
+ irqs = probe_irq_on();
+ /*
+ * Enable interrupt on tx buffer empty (it sure is)
+ * really sure ?
+ * FIXME: When this code is not used as module, we should
+ * probably call udelay() instead of the interruptible sleep.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ cosa_putstatus(cosa, SR_TX_INT_ENA);
+ schedule_timeout(30);
+ irq = probe_irq_off(irqs);
+ /* Disable all IRQs from the card */
+ cosa_putstatus(cosa, 0);
+ /* Empty the received data register */
+ cosa_getdata8(cosa);
+
+ if (irq < 0) {
+ printk (KERN_INFO "cosa IRQ autoprobe: multiple interrupts obtained (%d, board at 0x%x)\n",
+ irq, cosa->datareg);
+ err = -1;
+ goto err_out;
+ }
+ if (irq == 0) {
+ printk (KERN_INFO "cosa IRQ autoprobe: no interrupt obtained (board at 0x%x)\n",
+ cosa->datareg);
+ /* return -1; */
+ }
+ }
+
+ cosa->irq = irq;
+ cosa->num = nr_cards;
+ cosa->usage = 0;
+ cosa->nchannels = 2; /* FIXME: how to determine this? */
+
+ if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
+ err = -1;
+ goto err_out;
+ }
+ if (request_dma(cosa->dma, cosa->type)) {
+ err = -1;
+ goto err_out1;
+ }
+
+ cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL|GFP_DMA);
+ if (!cosa->bouncebuf) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ sprintf(cosa->name, "cosa%d", cosa->num);
+
+ /* Initialize the per-channel data */
+ cosa->chan = kmalloc(sizeof(struct channel_data)*cosa->nchannels,
+ GFP_KERNEL);
+ if (!cosa->chan) {
+ err = -ENOMEM;
+ goto err_out3;
+ }
+ memset(cosa->chan, 0, sizeof(struct channel_data)*cosa->nchannels);
+ for (i=0; i<cosa->nchannels; i++) {
+ cosa->chan[i].cosa = cosa;
+ cosa->chan[i].num = i;
+ channel_init(cosa->chan+i);
+ }
+
+ printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
+ cosa->num, cosa->id_string, cosa->type,
+ cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
+
+ return nr_cards++;
+err_out3:
+ kfree(cosa->bouncebuf);
+err_out2:
+ free_dma(cosa->dma);
+err_out1:
+ free_irq(cosa->irq, cosa);
+err_out:
+ release_region(cosa->datareg,is_8bit(cosa)?2:4);
+ printk(KERN_NOTICE "cosa%d: allocating resources failed\n",
+ cosa->num);
+ return err;
+}
+
+
+/*---------- SPPP/HDLC netdevice ---------- */
+
+static void cosa_setup(struct net_device *d)
+{
+ d->open = cosa_sppp_open;
+ d->stop = cosa_sppp_close;
+ d->hard_start_xmit = cosa_sppp_tx;
+ d->do_ioctl = cosa_sppp_ioctl;
+ d->get_stats = cosa_net_stats;
+ d->tx_timeout = cosa_sppp_timeout;
+ d->watchdog_timeo = TX_TIMEOUT;
+}
+
+static void sppp_channel_init(struct channel_data *chan)
+{
+ struct net_device *d;
+ chan->if_ptr = &chan->pppdev;
+ d = alloc_netdev(0, chan->name, cosa_setup);
+ if (!d) {
+ printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name);
+ return;
+ }
+ chan->pppdev.dev = d;
+ d->base_addr = chan->cosa->datareg;
+ d->irq = chan->cosa->irq;
+ d->dma = chan->cosa->dma;
+ d->priv = chan;
+ sppp_attach(&chan->pppdev);
+ if (register_netdev(d)) {
+ printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
+ sppp_detach(d);
+ free_netdev(d);
+ chan->pppdev.dev = NULL;
+ return;
+ }
+}
+
+static void sppp_channel_delete(struct channel_data *chan)
+{
+ unregister_netdev(chan->pppdev.dev);
+ sppp_detach(chan->pppdev.dev);
+ free_netdev(chan->pppdev.dev);
+ chan->pppdev.dev = NULL;
+}
+
+static int cosa_sppp_open(struct net_device *d)
+{
+ struct channel_data *chan = d->priv;
+ int err;
+ unsigned long flags;
+
+ if (!(chan->cosa->firmware_status & COSA_FW_START)) {
+ printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
+ chan->cosa->name, chan->cosa->firmware_status);
+ return -EPERM;
+ }
+ spin_lock_irqsave(&chan->cosa->lock, flags);
+ if (chan->usage != 0) {
+ printk(KERN_WARNING "%s: sppp_open called with usage count %d\n",
+ chan->name, chan->usage);
+ spin_unlock_irqrestore(&chan->cosa->lock, flags);
+ return -EBUSY;
+ }
+ chan->setup_rx = sppp_setup_rx;
+ chan->tx_done = sppp_tx_done;
+ chan->rx_done = sppp_rx_done;
+ chan->usage=-1;
+ chan->cosa->usage++;
+ spin_unlock_irqrestore(&chan->cosa->lock, flags);
+
+ err = sppp_open(d);
+ if (err) {
+ spin_lock_irqsave(&chan->cosa->lock, flags);
+ chan->usage=0;
+ chan->cosa->usage--;
+
+ spin_unlock_irqrestore(&chan->cosa->lock, flags);
+ return err;
+ }
+
+ netif_start_queue(d);
+ cosa_enable_rx(chan);
+ return 0;
+}
+
+static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct channel_data *chan = dev->priv;
+
+ netif_stop_queue(dev);
+
+ chan->tx_skb = skb;
+ cosa_start_tx(chan, skb->data, skb->len);
+ return 0;
+}
+
+static void cosa_sppp_timeout(struct net_device *dev)
+{
+ struct channel_data *chan = dev->priv;
+
+ if (test_bit(RXBIT, &chan->cosa->rxtx)) {
+ chan->stats.rx_errors++;
+ chan->stats.rx_missed_errors++;
+ } else {
+ chan->stats.tx_errors++;
+ chan->stats.tx_aborted_errors++;
+ }
+ cosa_kick(chan->cosa);
+ if (chan->tx_skb) {
+ dev_kfree_skb(chan->tx_skb);
+ chan->tx_skb = NULL;
+ }
+ netif_wake_queue(dev);
+}
+
+static int cosa_sppp_close(struct net_device *d)
+{
+ struct channel_data *chan = d->priv;
+ unsigned long flags;
+
+ netif_stop_queue(d);
+ sppp_close(d);
+ cosa_disable_rx(chan);
+ spin_lock_irqsave(&chan->cosa->lock, flags);
+ if (chan->rx_skb) {
+ kfree_skb(chan->rx_skb);
+ chan->rx_skb = NULL;
+ }
+ if (chan->tx_skb) {
+ kfree_skb(chan->tx_skb);
+ chan->tx_skb = NULL;
+ }
+ chan->usage=0;
+ chan->cosa->usage--;
+ spin_unlock_irqrestore(&chan->cosa->lock, flags);
+ return 0;
+}
+
+static char *sppp_setup_rx(struct channel_data *chan, int size)
+{
+ /*
+ * We can safely fall back to non-dma-able memory, because we have
+ * the cosa->bouncebuf pre-allocated.
+ */
+ if (chan->rx_skb)
+ kfree_skb(chan->rx_skb);
+ chan->rx_skb = dev_alloc_skb(size);
+ if (chan->rx_skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n",
+ chan->name);
+ chan->stats.rx_dropped++;
+ return NULL;
+ }
+ chan->pppdev.dev->trans_start = jiffies;
+ return skb_put(chan->rx_skb, size);
+}
+
+static int sppp_rx_done(struct channel_data *chan)
+{
+ if (!chan->rx_skb) {
+ printk(KERN_WARNING "%s: rx_done with empty skb!\n",
+ chan->name);
+ chan->stats.rx_errors++;
+ chan->stats.rx_frame_errors++;
+ return 0;
+ }
+ chan->rx_skb->protocol = htons(ETH_P_WAN_PPP);
+ chan->rx_skb->dev = chan->pppdev.dev;
+ chan->rx_skb->mac.raw = chan->rx_skb->data;
+ chan->stats.rx_packets++;
+ chan->stats.rx_bytes += chan->cosa->rxsize;
+ netif_rx(chan->rx_skb);
+ chan->rx_skb = NULL;
+ chan->pppdev.dev->last_rx = jiffies;
+ return 0;
+}
+
+/* ARGSUSED */
+static int sppp_tx_done(struct channel_data *chan, int size)
+{
+ if (!chan->tx_skb) {
+ printk(KERN_WARNING "%s: tx_done with empty skb!\n",
+ chan->name);
+ chan->stats.tx_errors++;
+ chan->stats.tx_aborted_errors++;
+ return 1;
+ }
+ dev_kfree_skb_irq(chan->tx_skb);
+ chan->tx_skb = NULL;
+ chan->stats.tx_packets++;
+ chan->stats.tx_bytes += size;
+ netif_wake_queue(chan->pppdev.dev);
+ return 1;
+}
+
+static struct net_device_stats *cosa_net_stats(struct net_device *dev)
+{
+ struct channel_data *chan = dev->priv;
+ return &chan->stats;
+}
+
+
+/*---------- Character device ---------- */
+
+static void chardev_channel_init(struct channel_data *chan)
+{
+ init_MUTEX(&chan->rsem);
+ init_MUTEX(&chan->wsem);
+}
+
+static ssize_t cosa_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ struct channel_data *chan = file->private_data;
+ struct cosa_data *cosa = chan->cosa;
+ char *kbuf;
+
+ if (!(cosa->firmware_status & COSA_FW_START)) {
+ printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
+ cosa->name, cosa->firmware_status);
+ return -EPERM;
+ }
+ if (down_interruptible(&chan->rsem))
+ return -ERESTARTSYS;
+
+ if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) {
+ printk(KERN_INFO "%s: cosa_read() - OOM\n", cosa->name);
+ up(&chan->rsem);
+ return -ENOMEM;
+ }
+
+ chan->rx_status = 0;
+ cosa_enable_rx(chan);
+ spin_lock_irqsave(&cosa->lock, flags);
+ add_wait_queue(&chan->rxwaitq, &wait);
+ while(!chan->rx_status) {
+ current->state = TASK_INTERRUPTIBLE;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ schedule();
+ spin_lock_irqsave(&cosa->lock, flags);
+ if (signal_pending(current) && chan->rx_status == 0) {
+ chan->rx_status = 1;
+ remove_wait_queue(&chan->rxwaitq, &wait);
+ current->state = TASK_RUNNING;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ up(&chan->rsem);
+ return -ERESTARTSYS;
+ }
+ }
+ remove_wait_queue(&chan->rxwaitq, &wait);
+ current->state = TASK_RUNNING;
+ kbuf = chan->rxdata;
+ count = chan->rxsize;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ up(&chan->rsem);
+
+ if (copy_to_user(buf, kbuf, count)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+ kfree(kbuf);
+ return count;
+}
+
+static char *chrdev_setup_rx(struct channel_data *chan, int size)
+{
+ /* Expect size <= COSA_MTU */
+ chan->rxsize = size;
+ return chan->rxdata;
+}
+
+static int chrdev_rx_done(struct channel_data *chan)
+{
+ if (chan->rx_status) { /* Reader has died */
+ kfree(chan->rxdata);
+ up(&chan->wsem);
+ }
+ chan->rx_status = 1;
+ wake_up_interruptible(&chan->rxwaitq);
+ return 1;
+}
+
+
+static ssize_t cosa_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct channel_data *chan = file->private_data;
+ struct cosa_data *cosa = chan->cosa;
+ unsigned long flags;
+ char *kbuf;
+
+ if (!(cosa->firmware_status & COSA_FW_START)) {
+ printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
+ cosa->name, cosa->firmware_status);
+ return -EPERM;
+ }
+ if (down_interruptible(&chan->wsem))
+ return -ERESTARTSYS;
+
+ if (count > COSA_MTU)
+ count = COSA_MTU;
+
+ /* Allocate the buffer */
+ if ((kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA)) == NULL) {
+ printk(KERN_NOTICE "%s: cosa_write() OOM - dropping packet\n",
+ cosa->name);
+ up(&chan->wsem);
+ return -ENOMEM;
+ }
+ if (copy_from_user(kbuf, buf, count)) {
+ up(&chan->wsem);
+ kfree(kbuf);
+ return -EFAULT;
+ }
+ chan->tx_status=0;
+ cosa_start_tx(chan, kbuf, count);
+
+ spin_lock_irqsave(&cosa->lock, flags);
+ add_wait_queue(&chan->txwaitq, &wait);
+ while(!chan->tx_status) {
+ current->state = TASK_INTERRUPTIBLE;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ schedule();
+ spin_lock_irqsave(&cosa->lock, flags);
+ if (signal_pending(current) && chan->tx_status == 0) {
+ chan->tx_status = 1;
+ remove_wait_queue(&chan->txwaitq, &wait);
+ current->state = TASK_RUNNING;
+ chan->tx_status = 1;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ return -ERESTARTSYS;
+ }
+ }
+ remove_wait_queue(&chan->txwaitq, &wait);
+ current->state = TASK_RUNNING;
+ up(&chan->wsem);
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ kfree(kbuf);
+ return count;
+}
+
+static int chrdev_tx_done(struct channel_data *chan, int size)
+{
+ if (chan->tx_status) { /* Writer was interrupted */
+ kfree(chan->txbuf);
+ up(&chan->wsem);
+ }
+ chan->tx_status = 1;
+ wake_up_interruptible(&chan->txwaitq);
+ return 1;
+}
+
+static unsigned int cosa_poll(struct file *file, poll_table *poll)
+{
+ printk(KERN_INFO "cosa_poll is here\n");
+ return 0;
+}
+
+static int cosa_open(struct inode *inode, struct file *file)
+{
+ struct cosa_data *cosa;
+ struct channel_data *chan;
+ unsigned long flags;
+ int n;
+
+ if ((n=iminor(file->f_dentry->d_inode)>>CARD_MINOR_BITS)
+ >= nr_cards)
+ return -ENODEV;
+ cosa = cosa_cards+n;
+
+ if ((n=iminor(file->f_dentry->d_inode)
+ & ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels)
+ return -ENODEV;
+ chan = cosa->chan + n;
+
+ file->private_data = chan;
+
+ spin_lock_irqsave(&cosa->lock, flags);
+
+ if (chan->usage < 0) { /* in netdev mode */
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ return -EBUSY;
+ }
+ cosa->usage++;
+ chan->usage++;
+
+ chan->tx_done = chrdev_tx_done;
+ chan->setup_rx = chrdev_setup_rx;
+ chan->rx_done = chrdev_rx_done;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ return 0;
+}
+
+static int cosa_release(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel = file->private_data;
+ struct cosa_data *cosa;
+ unsigned long flags;
+
+ cosa = channel->cosa;
+ spin_lock_irqsave(&cosa->lock, flags);
+ cosa->usage--;
+ channel->usage--;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ return 0;
+}
+
+#ifdef COSA_FASYNC_WORKING
+static struct fasync_struct *fasync[256] = { NULL, };
+
+/* To be done ... */
+static int cosa_fasync(struct inode *inode, struct file *file, int on)
+{
+ int port = iminor(inode);
+ int rv = fasync_helper(inode, file, on, &fasync[port]);
+ return rv < 0 ? rv : 0;
+}
+#endif
+
+
+/* ---------- Ioctls ---------- */
+
+/*
+ * Ioctl subroutines can safely be made inline, because they are called
+ * only from cosa_ioctl().
+ */
+static inline int cosa_reset(struct cosa_data *cosa)
+{
+ char idstring[COSA_MAX_ID_STRING];
+ if (cosa->usage > 1)
+ printk(KERN_INFO "cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+ cosa->num, cosa->usage);
+ cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_START);
+ if (cosa_reset_and_read_id(cosa, idstring) < 0) {
+ printk(KERN_NOTICE "cosa%d: reset failed\n", cosa->num);
+ return -EIO;
+ }
+ printk(KERN_INFO "cosa%d: resetting device: %s\n", cosa->num,
+ idstring);
+ cosa->firmware_status |= COSA_FW_RESET;
+ return 0;
+}
+
+/* High-level function to download data into COSA memory. Calls download() */
+static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
+{
+ struct cosa_download d;
+ int i;
+
+ if (cosa->usage > 1)
+ printk(KERN_INFO "%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+ cosa->name, cosa->usage);
+ if (!(cosa->firmware_status & COSA_FW_RESET)) {
+ printk(KERN_NOTICE "%s: reset the card first (status %d).\n",
+ cosa->name, cosa->firmware_status);
+ return -EPERM;
+ }
+
+ if (copy_from_user(&d, arg, sizeof(d)))
+ return -EFAULT;
+
+ if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE)
+ return -EINVAL;
+ if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
+ return -EINVAL;
+
+
+ /* If something fails, force the user to reset the card */
+ cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_DOWNLOAD);
+
+ i = download(cosa, d.code, d.len, d.addr);
+ if (i < 0) {
+ printk(KERN_NOTICE "cosa%d: microcode download failed: %d\n",
+ cosa->num, i);
+ return -EIO;
+ }
+ printk(KERN_INFO "cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
+ cosa->num, d.len, d.addr);
+ cosa->firmware_status |= COSA_FW_RESET|COSA_FW_DOWNLOAD;
+ return 0;
+}
+
+/* High-level function to read COSA memory. Calls readmem() */
+static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
+{
+ struct cosa_download d;
+ int i;
+
+ if (cosa->usage > 1)
+ printk(KERN_INFO "cosa%d: WARNING: readmem requested with "
+ "cosa->usage > 1 (%d). Odd things may happen.\n",
+ cosa->num, cosa->usage);
+ if (!(cosa->firmware_status & COSA_FW_RESET)) {
+ printk(KERN_NOTICE "%s: reset the card first (status %d).\n",
+ cosa->name, cosa->firmware_status);
+ return -EPERM;
+ }
+
+ if (copy_from_user(&d, arg, sizeof(d)))
+ return -EFAULT;
+
+ /* If something fails, force the user to reset the card */
+ cosa->firmware_status &= ~COSA_FW_RESET;
+
+ i = readmem(cosa, d.code, d.len, d.addr);
+ if (i < 0) {
+ printk(KERN_NOTICE "cosa%d: reading memory failed: %d\n",
+ cosa->num, i);
+ return -EIO;
+ }
+ printk(KERN_INFO "cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
+ cosa->num, d.len, d.addr);
+ cosa->firmware_status |= COSA_FW_RESET;
+ return 0;
+}
+
+/* High-level function to start microcode. Calls startmicrocode(). */
+static inline int cosa_start(struct cosa_data *cosa, int address)
+{
+ int i;
+
+ if (cosa->usage > 1)
+ printk(KERN_INFO "cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+ cosa->num, cosa->usage);
+
+ if ((cosa->firmware_status & (COSA_FW_RESET|COSA_FW_DOWNLOAD))
+ != (COSA_FW_RESET|COSA_FW_DOWNLOAD)) {
+ printk(KERN_NOTICE "%s: download the microcode and/or reset the card first (status %d).\n",
+ cosa->name, cosa->firmware_status);
+ return -EPERM;
+ }
+ cosa->firmware_status &= ~COSA_FW_RESET;
+ if ((i=startmicrocode(cosa, address)) < 0) {
+ printk(KERN_NOTICE "cosa%d: start microcode at 0x%04x failed: %d\n",
+ cosa->num, address, i);
+ return -EIO;
+ }
+ printk(KERN_INFO "cosa%d: starting microcode at 0x%04x\n",
+ cosa->num, address);
+ cosa->startaddr = address;
+ cosa->firmware_status |= COSA_FW_START;
+ return 0;
+}
+
+/* Buffer of size at least COSA_MAX_ID_STRING is expected */
+static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
+{
+ int l = strlen(cosa->id_string)+1;
+ if (copy_to_user(string, cosa->id_string, l))
+ return -EFAULT;
+ return l;
+}
+
+/* Buffer of size at least COSA_MAX_ID_STRING is expected */
+static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
+{
+ int l = strlen(cosa->type)+1;
+ if (copy_to_user(string, cosa->type, l))
+ return -EFAULT;
+ return l;
+}
+
+static int cosa_ioctl_common(struct cosa_data *cosa,
+ struct channel_data *channel, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ switch(cmd) {
+ case COSAIORSET: /* Reset the device */
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+ return cosa_reset(cosa);
+ case COSAIOSTRT: /* Start the firmware */
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ return cosa_start(cosa, arg);
+ case COSAIODOWNLD: /* Download the firmware */
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ return cosa_download(cosa, argp);
+ case COSAIORMEM:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ return cosa_readmem(cosa, argp);
+ case COSAIORTYPE:
+ return cosa_gettype(cosa, argp);
+ case COSAIORIDSTR:
+ return cosa_getidstr(cosa, argp);
+ case COSAIONRCARDS:
+ return nr_cards;
+ case COSAIONRCHANS:
+ return cosa->nchannels;
+ case COSAIOBMSET:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ if (is_8bit(cosa))
+ return -EINVAL;
+ if (arg != COSA_BM_OFF && arg != COSA_BM_ON)
+ return -EINVAL;
+ cosa->busmaster = arg;
+ return 0;
+ case COSAIOBMGET:
+ return cosa->busmaster;
+ }
+ return -ENOIOCTLCMD;
+}
+
+static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ int rv;
+ struct channel_data *chan = dev->priv;
+ rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data);
+ if (rv == -ENOIOCTLCMD) {
+ return sppp_do_ioctl(dev, ifr, cmd);
+ }
+ return rv;
+}
+
+static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct channel_data *channel = file->private_data;
+ struct cosa_data *cosa = channel->cosa;
+ return cosa_ioctl_common(cosa, channel, cmd, arg);
+}
+
+
+/*---------- HW layer interface ---------- */
+
+/*
+ * The higher layer can bind itself to the HW layer by setting the callbacks
+ * in the channel_data structure and by using these routines.
+ */
+static void cosa_enable_rx(struct channel_data *chan)
+{
+ struct cosa_data *cosa = chan->cosa;
+
+ if (!test_and_set_bit(chan->num, &cosa->rxbitmap))
+ put_driver_status(cosa);
+}
+
+static void cosa_disable_rx(struct channel_data *chan)
+{
+ struct cosa_data *cosa = chan->cosa;
+
+ if (test_and_clear_bit(chan->num, &cosa->rxbitmap))
+ put_driver_status(cosa);
+}
+
+/*
+ * FIXME: This routine probably should check for cosa_start_tx() called when
+ * the previous transmit is still unfinished. In this case the non-zero
+ * return value should indicate to the caller that the queuing(sp?) up
+ * the transmit has failed.
+ */
+static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
+{
+ struct cosa_data *cosa = chan->cosa;
+ unsigned long flags;
+#ifdef DEBUG_DATA
+ int i;
+
+ printk(KERN_INFO "cosa%dc%d: starting tx(0x%x)", chan->cosa->num,
+ chan->num, len);
+ for (i=0; i<len; i++)
+ printk(" %02x", buf[i]&0xff);
+ printk("\n");
+#endif
+ spin_lock_irqsave(&cosa->lock, flags);
+ chan->txbuf = buf;
+ chan->txsize = len;
+ if (len > COSA_MTU)
+ chan->txsize = COSA_MTU;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+
+ /* Tell the firmware we are ready */
+ set_bit(chan->num, &cosa->txbitmap);
+ put_driver_status(cosa);
+
+ return 0;
+}
+
+static void put_driver_status(struct cosa_data *cosa)
+{
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&cosa->lock, flags);
+
+ status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
+ | (cosa->txbitmap ? DRIVER_TX_READY : 0)
+ | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
+ &DRIVER_TXMAP_MASK : 0);
+ if (!cosa->rxtx) {
+ if (cosa->rxbitmap|cosa->txbitmap) {
+ if (!cosa->enabled) {
+ cosa_putstatus(cosa, SR_RX_INT_ENA);
+#ifdef DEBUG_IO
+ debug_status_out(cosa, SR_RX_INT_ENA);
+#endif
+ cosa->enabled = 1;
+ }
+ } else if (cosa->enabled) {
+ cosa->enabled = 0;
+ cosa_putstatus(cosa, 0);
+#ifdef DEBUG_IO
+ debug_status_out(cosa, 0);
+#endif
+ }
+ cosa_putdata8(cosa, status);
+#ifdef DEBUG_IO
+ debug_data_cmd(cosa, status);
+#endif
+ }
+ spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static void put_driver_status_nolock(struct cosa_data *cosa)
+{
+ int status;
+
+ status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
+ | (cosa->txbitmap ? DRIVER_TX_READY : 0)
+ | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
+ &DRIVER_TXMAP_MASK : 0);
+
+ if (cosa->rxbitmap|cosa->txbitmap) {
+ cosa_putstatus(cosa, SR_RX_INT_ENA);
+#ifdef DEBUG_IO
+ debug_status_out(cosa, SR_RX_INT_ENA);
+#endif
+ cosa->enabled = 1;
+ } else {
+ cosa_putstatus(cosa, 0);
+#ifdef DEBUG_IO
+ debug_status_out(cosa, 0);
+#endif
+ cosa->enabled = 0;
+ }
+ cosa_putdata8(cosa, status);
+#ifdef DEBUG_IO
+ debug_data_cmd(cosa, status);
+#endif
+}
+
+/*
+ * The "kickme" function: When the DMA times out, this is called to
+ * clean up the driver status.
+ * FIXME: Preliminary support, the interface is probably wrong.
+ */
+static void cosa_kick(struct cosa_data *cosa)
+{
+ unsigned long flags, flags1;
+ char *s = "(probably) IRQ";
+
+ if (test_bit(RXBIT, &cosa->rxtx))
+ s = "RX DMA";
+ if (test_bit(TXBIT, &cosa->rxtx))
+ s = "TX DMA";
+
+ printk(KERN_INFO "%s: %s timeout - restarting.\n", cosa->name, s);
+ spin_lock_irqsave(&cosa->lock, flags);
+ cosa->rxtx = 0;
+
+ flags1 = claim_dma_lock();
+ disable_dma(cosa->dma);
+ clear_dma_ff(cosa->dma);
+ release_dma_lock(flags1);
+
+ /* FIXME: Anything else? */
+ udelay(100);
+ cosa_putstatus(cosa, 0);
+ udelay(100);
+ (void) cosa_getdata8(cosa);
+ udelay(100);
+ cosa_putdata8(cosa, 0);
+ udelay(100);
+ put_driver_status_nolock(cosa);
+ spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+/*
+ * Check if the whole buffer is DMA-able. It means it is below the 16M of
+ * physical memory and doesn't span the 64k boundary. For now it seems
+ * SKB's never do this, but we'll check this anyway.
+ */
+static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
+{
+ static int count;
+ unsigned long b = (unsigned long)buf;
+ if (b+len >= MAX_DMA_ADDRESS)
+ return 0;
+ if ((b^ (b+len)) & 0x10000) {
+ if (count++ < 5)
+ printk(KERN_INFO "%s: packet spanning a 64k boundary\n",
+ chan->name);
+ return 0;
+ }
+ return 1;
+}
+
+
+/* ---------- The SRP/COSA ROM monitor functions ---------- */
+
+/*
+ * Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
+ * drivers need to say 4-digit hex number meaning start address of the microcode
+ * separated by a single space. Monitor replies by saying " =". Now driver
+ * has to write 4-digit hex number meaning the last byte address ended
+ * by a single space. Monitor has to reply with a space. Now the download
+ * begins. After the download monitor replies with "\r\n." (CR LF dot).
+ */
+static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address)
+{
+ int i;
+
+ if (put_wait_data(cosa, 'w') == -1) return -1;
+ if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
+ if (get_wait_data(cosa) != '=') return -3;
+
+ if (puthexnumber(cosa, address) < 0) return -4;
+ if (put_wait_data(cosa, ' ') == -1) return -10;
+ if (get_wait_data(cosa) != ' ') return -11;
+ if (get_wait_data(cosa) != '=') return -12;
+
+ if (puthexnumber(cosa, address+length-1) < 0) return -13;
+ if (put_wait_data(cosa, ' ') == -1) return -18;
+ if (get_wait_data(cosa) != ' ') return -19;
+
+ while (length--) {
+ char c;
+#ifndef SRP_DOWNLOAD_AT_BOOT
+ if (get_user(c, microcode))
+ return -23; /* ??? */
+#else
+ c = *microcode;
+#endif
+ if (put_wait_data(cosa, c) == -1)
+ return -20;
+ microcode++;
+ }
+
+ if (get_wait_data(cosa) != '\r') return -21;
+ if (get_wait_data(cosa) != '\n') return -22;
+ if (get_wait_data(cosa) != '.') return -23;
+#if 0
+ printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
+#endif
+ return 0;
+}
+
+
+/*
+ * Starting microcode is done via the "g" command of the SRP monitor.
+ * The chat should be the following: "g" "g=" "<addr><CR>"
+ * "<CR><CR><LF><CR><LF>".
+ */
+static int startmicrocode(struct cosa_data *cosa, int address)
+{
+ if (put_wait_data(cosa, 'g') == -1) return -1;
+ if (get_wait_data(cosa) != 'g') return -2;
+ if (get_wait_data(cosa) != '=') return -3;
+
+ if (puthexnumber(cosa, address) < 0) return -4;
+ if (put_wait_data(cosa, '\r') == -1) return -5;
+
+ if (get_wait_data(cosa) != '\r') return -6;
+ if (get_wait_data(cosa) != '\r') return -7;
+ if (get_wait_data(cosa) != '\n') return -8;
+ if (get_wait_data(cosa) != '\r') return -9;
+ if (get_wait_data(cosa) != '\n') return -10;
+#if 0
+ printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
+#endif
+ return 0;
+}
+
+/*
+ * Reading memory is done via the "r" command of the SRP monitor.
+ * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
+ * Then driver can read the data and the conversation is finished
+ * by SRP monitor sending "<CR><LF>." (dot at the end).
+ *
+ * This routine is not needed during the normal operation and serves
+ * for debugging purposes only.
+ */
+static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
+{
+ if (put_wait_data(cosa, 'r') == -1) return -1;
+ if ((get_wait_data(cosa)) != 'r') return -2;
+ if ((get_wait_data(cosa)) != '=') return -3;
+
+ if (puthexnumber(cosa, address) < 0) return -4;
+ if (put_wait_data(cosa, ' ') == -1) return -5;
+ if (get_wait_data(cosa) != ' ') return -6;
+ if (get_wait_data(cosa) != '=') return -7;
+
+ if (puthexnumber(cosa, address+length-1) < 0) return -8;
+ if (put_wait_data(cosa, ' ') == -1) return -9;
+ if (get_wait_data(cosa) != ' ') return -10;
+
+ while (length--) {
+ char c;
+ int i;
+ if ((i=get_wait_data(cosa)) == -1) {
+ printk (KERN_INFO "cosa: 0x%04x bytes remaining\n",
+ length);
+ return -11;
+ }
+ c=i;
+#if 1
+ if (put_user(c, microcode))
+ return -23; /* ??? */
+#else
+ *microcode = c;
+#endif
+ microcode++;
+ }
+
+ if (get_wait_data(cosa) != '\r') return -21;
+ if (get_wait_data(cosa) != '\n') return -22;
+ if (get_wait_data(cosa) != '.') return -23;
+#if 0
+ printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
+#endif
+ return 0;
+}
+
+/*
+ * This function resets the device and reads the initial prompt
+ * of the device's ROM monitor.
+ */
+static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
+{
+ int i=0, id=0, prev=0, curr=0;
+
+ /* Reset the card ... */
+ cosa_putstatus(cosa, 0);
+ cosa_getdata8(cosa);
+ cosa_putstatus(cosa, SR_RST);
+#ifdef MODULE
+ msleep(500);
+#else
+ udelay(5*100000);
+#endif
+ /* Disable all IRQs from the card */
+ cosa_putstatus(cosa, 0);
+
+ /*
+ * Try to read the ID string. The card then prints out the
+ * identification string ended by the "\n\x2e".
+ *
+ * The following loop is indexed through i (instead of id)
+ * to avoid looping forever when for any reason
+ * the port returns '\r', '\n' or '\x2e' permanently.
+ */
+ for (i=0; i<COSA_MAX_ID_STRING-1; i++, prev=curr) {
+ if ((curr = get_wait_data(cosa)) == -1) {
+ return -1;
+ }
+ curr &= 0xff;
+ if (curr != '\r' && curr != '\n' && curr != 0x2e)
+ idstring[id++] = curr;
+ if (curr == 0x2e && prev == '\n')
+ break;
+ }
+ /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */
+ idstring[id] = '\0';
+ return id;
+}
+
+
+/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
+
+/*
+ * This routine gets the data byte from the card waiting for the SR_RX_RDY
+ * bit to be set in a loop. It should be used in the exceptional cases
+ * only (for example when resetting the card or downloading the firmware.
+ */
+static int get_wait_data(struct cosa_data *cosa)
+{
+ int retries = 1000;
+
+ while (--retries) {
+ /* read data and return them */
+ if (cosa_getstatus(cosa) & SR_RX_RDY) {
+ short r;
+ r = cosa_getdata8(cosa);
+#if 0
+ printk(KERN_INFO "cosa: get_wait_data returning after %d retries\n", 999-retries);
+#endif
+ return r;
+ }
+ /* sleep if not ready to read */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n",
+ cosa_getstatus(cosa));
+ return -1;
+}
+
+/*
+ * This routine puts the data byte to the card waiting for the SR_TX_RDY
+ * bit to be set in a loop. It should be used in the exceptional cases
+ * only (for example when resetting the card or downloading the firmware).
+ */
+static int put_wait_data(struct cosa_data *cosa, int data)
+{
+ int retries = 1000;
+ while (--retries) {
+ /* read data and return them */
+ if (cosa_getstatus(cosa) & SR_TX_RDY) {
+ cosa_putdata8(cosa, data);
+#if 0
+ printk(KERN_INFO "Putdata: %d retries\n", 999-retries);
+#endif
+ return 0;
+ }
+#if 0
+ /* sleep if not ready to read */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1);
+#endif
+ }
+ printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n",
+ cosa->num, cosa_getstatus(cosa));
+ return -1;
+}
+
+/*
+ * The following routine puts the hexadecimal number into the SRP monitor
+ * and verifies the proper echo of the sent bytes. Returns 0 on success,
+ * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
+ * (-2,-4,-6,-8) means that reading echo failed.
+ */
+static int puthexnumber(struct cosa_data *cosa, int number)
+{
+ char temp[5];
+ int i;
+
+ /* Well, I should probably replace this by something faster. */
+ sprintf(temp, "%04X", number);
+ for (i=0; i<4; i++) {
+ if (put_wait_data(cosa, temp[i]) == -1) {
+ printk(KERN_NOTICE "cosa%d: puthexnumber failed to write byte %d\n",
+ cosa->num, i);
+ return -1-2*i;
+ }
+ if (get_wait_data(cosa) != temp[i]) {
+ printk(KERN_NOTICE "cosa%d: puthexhumber failed to read echo of byte %d\n",
+ cosa->num, i);
+ return -2-2*i;
+ }
+ }
+ return 0;
+}
+
+
+/* ---------- Interrupt routines ---------- */
+
+/*
+ * There are three types of interrupt:
+ * At the beginning of transmit - this handled is in tx_interrupt(),
+ * at the beginning of receive - it is in rx_interrupt() and
+ * at the end of transmit/receive - it is the eot_interrupt() function.
+ * These functions are multiplexed by cosa_interrupt() according to the
+ * COSA status byte. I have moved the rx/tx/eot interrupt handling into
+ * separate functions to make it more readable. These functions are inline,
+ * so there should be no overhead of function call.
+ *
+ * In the COSA bus-master mode, we need to tell the card the address of a
+ * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
+ * It's time to use the bottom half :-(
+ */
+
+/*
+ * Transmit interrupt routine - called when COSA is willing to obtain
+ * data from the OS. The most tricky part of the routine is selection
+ * of channel we (OS) want to send packet for. For SRP we should probably
+ * use the round-robin approach. The newer COSA firmwares have a simple
+ * flow-control - in the status word has bits 2 and 3 set to 1 means that the
+ * channel 0 or 1 doesn't want to receive data.
+ *
+ * It seems there is a bug in COSA firmware (need to trace it further):
+ * When the driver status says that the kernel has no more data for transmit
+ * (e.g. at the end of TX DMA) and then the kernel changes its mind
+ * (e.g. new packet is queued to hard_start_xmit()), the card issues
+ * the TX interrupt but does not mark the channel as ready-to-transmit.
+ * The fix seems to be to push the packet to COSA despite its request.
+ * We first try to obey the card's opinion, and then fall back to forced TX.
+ */
+static inline void tx_interrupt(struct cosa_data *cosa, int status)
+{
+ unsigned long flags, flags1;
+#ifdef DEBUG_IRQS
+ printk(KERN_INFO "cosa%d: SR_DOWN_REQUEST status=0x%04x\n",
+ cosa->num, status);
+#endif
+ spin_lock_irqsave(&cosa->lock, flags);
+ set_bit(TXBIT, &cosa->rxtx);
+ if (!test_bit(IRQBIT, &cosa->rxtx)) {
+ /* flow control, see the comment above */
+ int i=0;
+ if (!cosa->txbitmap) {
+ printk(KERN_WARNING "%s: No channel wants data "
+ "in TX IRQ. Expect DMA timeout.",
+ cosa->name);
+ put_driver_status_nolock(cosa);
+ clear_bit(TXBIT, &cosa->rxtx);
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ return;
+ }
+ while(1) {
+ cosa->txchan++;
+ i++;
+ if (cosa->txchan >= cosa->nchannels)
+ cosa->txchan = 0;
+ if (!(cosa->txbitmap & (1<<cosa->txchan)))
+ continue;
+ if (~status & (1 << (cosa->txchan+DRIVER_TXMAP_SHIFT)))
+ break;
+ /* in second pass, accept first ready-to-TX channel */
+ if (i > cosa->nchannels) {
+ /* Can be safely ignored */
+#ifdef DEBUG_IRQS
+ printk(KERN_DEBUG "%s: Forcing TX "
+ "to not-ready channel %d\n",
+ cosa->name, cosa->txchan);
+#endif
+ break;
+ }
+ }
+
+ cosa->txsize = cosa->chan[cosa->txchan].txsize;
+ if (cosa_dma_able(cosa->chan+cosa->txchan,
+ cosa->chan[cosa->txchan].txbuf, cosa->txsize)) {
+ cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
+ } else {
+ memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
+ cosa->txsize);
+ cosa->txbuf = cosa->bouncebuf;
+ }
+ }
+
+ if (is_8bit(cosa)) {
+ if (!test_bit(IRQBIT, &cosa->rxtx)) {
+ cosa_putstatus(cosa, SR_TX_INT_ENA);
+ cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0)|
+ ((cosa->txsize >> 8) & 0x1f));
+#ifdef DEBUG_IO
+ debug_status_out(cosa, SR_TX_INT_ENA);
+ debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0)|
+ ((cosa->txsize >> 8) & 0x1f));
+ debug_data_in(cosa, cosa_getdata8(cosa));
+#else
+ cosa_getdata8(cosa);
+#endif
+ set_bit(IRQBIT, &cosa->rxtx);
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ return;
+ } else {
+ clear_bit(IRQBIT, &cosa->rxtx);
+ cosa_putstatus(cosa, 0);
+ cosa_putdata8(cosa, cosa->txsize&0xff);
+#ifdef DEBUG_IO
+ debug_status_out(cosa, 0);
+ debug_data_out(cosa, cosa->txsize&0xff);
+#endif
+ }
+ } else {
+ cosa_putstatus(cosa, SR_TX_INT_ENA);
+ cosa_putdata16(cosa, ((cosa->txchan<<13) & 0xe000)
+ | (cosa->txsize & 0x1fff));
+#ifdef DEBUG_IO
+ debug_status_out(cosa, SR_TX_INT_ENA);
+ debug_data_out(cosa, ((cosa->txchan<<13) & 0xe000)
+ | (cosa->txsize & 0x1fff));
+ debug_data_in(cosa, cosa_getdata8(cosa));
+ debug_status_out(cosa, 0);
+#else
+ cosa_getdata8(cosa);
+#endif
+ cosa_putstatus(cosa, 0);
+ }
+
+ if (cosa->busmaster) {
+ unsigned long addr = virt_to_bus(cosa->txbuf);
+ int count=0;
+ printk(KERN_INFO "busmaster IRQ\n");
+ while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+ count++;
+ udelay(10);
+ if (count > 1000) break;
+ }
+ printk(KERN_INFO "status %x\n", cosa_getstatus(cosa));
+ printk(KERN_INFO "ready after %d loops\n", count);
+ cosa_putdata16(cosa, (addr >> 16)&0xffff);
+
+ count = 0;
+ while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+ count++;
+ if (count > 1000) break;
+ udelay(10);
+ }
+ printk(KERN_INFO "ready after %d loops\n", count);
+ cosa_putdata16(cosa, addr &0xffff);
+ flags1 = claim_dma_lock();
+ set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
+ enable_dma(cosa->dma);
+ release_dma_lock(flags1);
+ } else {
+ /* start the DMA */
+ flags1 = claim_dma_lock();
+ disable_dma(cosa->dma);
+ clear_dma_ff(cosa->dma);
+ set_dma_mode(cosa->dma, DMA_MODE_WRITE);
+ set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf));
+ set_dma_count(cosa->dma, cosa->txsize);
+ enable_dma(cosa->dma);
+ release_dma_lock(flags1);
+ }
+ cosa_putstatus(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+#ifdef DEBUG_IO
+ debug_status_out(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+#endif
+ spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static inline void rx_interrupt(struct cosa_data *cosa, int status)
+{
+ unsigned long flags;
+#ifdef DEBUG_IRQS
+ printk(KERN_INFO "cosa%d: SR_UP_REQUEST\n", cosa->num);
+#endif
+
+ spin_lock_irqsave(&cosa->lock, flags);
+ set_bit(RXBIT, &cosa->rxtx);
+
+ if (is_8bit(cosa)) {
+ if (!test_bit(IRQBIT, &cosa->rxtx)) {
+ set_bit(IRQBIT, &cosa->rxtx);
+ put_driver_status_nolock(cosa);
+ cosa->rxsize = cosa_getdata8(cosa) <<8;
+#ifdef DEBUG_IO
+ debug_data_in(cosa, cosa->rxsize >> 8);
+#endif
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ return;
+ } else {
+ clear_bit(IRQBIT, &cosa->rxtx);
+ cosa->rxsize |= cosa_getdata8(cosa) & 0xff;
+#ifdef DEBUG_IO
+ debug_data_in(cosa, cosa->rxsize & 0xff);
+#endif
+#if 0
+ printk(KERN_INFO "cosa%d: receive rxsize = (0x%04x).\n",
+ cosa->num, cosa->rxsize);
+#endif
+ }
+ } else {
+ cosa->rxsize = cosa_getdata16(cosa);
+#ifdef DEBUG_IO
+ debug_data_in(cosa, cosa->rxsize);
+#endif
+#if 0
+ printk(KERN_INFO "cosa%d: receive rxsize = (0x%04x).\n",
+ cosa->num, cosa->rxsize);
+#endif
+ }
+ if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
+ printk(KERN_WARNING "%s: rx for unknown channel (0x%04x)\n",
+ cosa->name, cosa->rxsize);
+ spin_unlock_irqrestore(&cosa->lock, flags);
+ goto reject;
+ }
+ cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13);
+ cosa->rxsize &= 0x1fff;
+ spin_unlock_irqrestore(&cosa->lock, flags);
+
+ cosa->rxbuf = NULL;
+ if (cosa->rxchan->setup_rx)
+ cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize);
+
+ if (!cosa->rxbuf) {
+reject: /* Reject the packet */
+ printk(KERN_INFO "cosa%d: rejecting packet on channel %d\n",
+ cosa->num, cosa->rxchan->num);
+ cosa->rxbuf = cosa->bouncebuf;
+ }
+
+ /* start the DMA */
+ flags = claim_dma_lock();
+ disable_dma(cosa->dma);
+ clear_dma_ff(cosa->dma);
+ set_dma_mode(cosa->dma, DMA_MODE_READ);
+ if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff)) {
+ set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
+ } else {
+ set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
+ }
+ set_dma_count(cosa->dma, (cosa->rxsize&0x1fff));
+ enable_dma(cosa->dma);
+ release_dma_lock(flags);
+ spin_lock_irqsave(&cosa->lock, flags);
+ cosa_putstatus(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+ if (!is_8bit(cosa) && (status & SR_TX_RDY))
+ cosa_putdata8(cosa, DRIVER_RX_READY);
+#ifdef DEBUG_IO
+ debug_status_out(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+ if (!is_8bit(cosa) && (status & SR_TX_RDY))
+ debug_data_cmd(cosa, DRIVER_RX_READY);
+#endif
+ spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static inline void eot_interrupt(struct cosa_data *cosa, int status)
+{
+ unsigned long flags, flags1;
+ spin_lock_irqsave(&cosa->lock, flags);
+ flags1 = claim_dma_lock();
+ disable_dma(cosa->dma);
+ clear_dma_ff(cosa->dma);
+ release_dma_lock(flags1);
+ if (test_bit(TXBIT, &cosa->rxtx)) {
+ struct channel_data *chan = cosa->chan+cosa->txchan;
+ if (chan->tx_done)
+ if (chan->tx_done(chan, cosa->txsize))
+ clear_bit(chan->num, &cosa->txbitmap);
+ } else if (test_bit(RXBIT, &cosa->rxtx)) {
+#ifdef DEBUG_DATA
+ {
+ int i;
+ printk(KERN_INFO "cosa%dc%d: done rx(0x%x)", cosa->num,
+ cosa->rxchan->num, cosa->rxsize);
+ for (i=0; i<cosa->rxsize; i++)
+ printk (" %02x", cosa->rxbuf[i]&0xff);
+ printk("\n");
+ }
+#endif
+ /* Packet for unknown channel? */
+ if (cosa->rxbuf == cosa->bouncebuf)
+ goto out;
+ if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize))
+ memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize);
+ if (cosa->rxchan->rx_done)
+ if (cosa->rxchan->rx_done(cosa->rxchan))
+ clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
+ } else {
+ printk(KERN_NOTICE "cosa%d: unexpected EOT interrupt\n",
+ cosa->num);
+ }
+ /*
+ * Clear the RXBIT, TXBIT and IRQBIT (the latest should be
+ * cleared anyway). We should do it as soon as possible
+ * so that we can tell the COSA we are done and to give it a time
+ * for recovery.
+ */
+out:
+ cosa->rxtx = 0;
+ put_driver_status_nolock(cosa);
+ spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static irqreturn_t cosa_interrupt(int irq, void *cosa_, struct pt_regs *regs)
+{
+ unsigned status;
+ int count = 0;
+ struct cosa_data *cosa = cosa_;
+again:
+ status = cosa_getstatus(cosa);
+#ifdef DEBUG_IRQS
+ printk(KERN_INFO "cosa%d: got IRQ, status 0x%02x\n", cosa->num,
+ status & 0xff);
+#endif
+#ifdef DEBUG_IO
+ debug_status_in(cosa, status);
+#endif
+ switch (status & SR_CMD_FROM_SRP_MASK) {
+ case SR_DOWN_REQUEST:
+ tx_interrupt(cosa, status);
+ break;
+ case SR_UP_REQUEST:
+ rx_interrupt(cosa, status);
+ break;
+ case SR_END_OF_TRANSFER:
+ eot_interrupt(cosa, status);
+ break;
+ default:
+ /* We may be too fast for SRP. Try to wait a bit more. */
+ if (count++ < 100) {
+ udelay(100);
+ goto again;
+ }
+ printk(KERN_INFO "cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
+ cosa->num, status & 0xff, count);
+ }
+#ifdef DEBUG_IRQS
+ if (count)
+ printk(KERN_INFO "%s: %d-times got unknown status in IRQ\n",
+ cosa->name, count);
+ else
+ printk(KERN_INFO "%s: returning from IRQ\n", cosa->name);
+#endif
+ return IRQ_HANDLED;
+}
+
+
+/* ---------- I/O debugging routines ---------- */
+/*
+ * These routines can be used to monitor COSA/SRP I/O and to printk()
+ * the data being transferred on the data and status I/O port in a
+ * readable way.
+ */
+
+#ifdef DEBUG_IO
+static void debug_status_in(struct cosa_data *cosa, int status)
+{
+ char *s;
+ switch(status & SR_CMD_FROM_SRP_MASK) {
+ case SR_UP_REQUEST:
+ s = "RX_REQ";
+ break;
+ case SR_DOWN_REQUEST:
+ s = "TX_REQ";
+ break;
+ case SR_END_OF_TRANSFER:
+ s = "ET_REQ";
+ break;
+ default:
+ s = "NO_REQ";
+ break;
+ }
+ printk(KERN_INFO "%s: IO: status -> 0x%02x (%s%s%s%s)\n",
+ cosa->name,
+ status,
+ status & SR_USR_RQ ? "USR_RQ|":"",
+ status & SR_TX_RDY ? "TX_RDY|":"",
+ status & SR_RX_RDY ? "RX_RDY|":"",
+ s);
+}
+
+static void debug_status_out(struct cosa_data *cosa, int status)
+{
+ printk(KERN_INFO "%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
+ cosa->name,
+ status,
+ status & SR_RX_DMA_ENA ? "RXDMA|":"!rxdma|",
+ status & SR_TX_DMA_ENA ? "TXDMA|":"!txdma|",
+ status & SR_RST ? "RESET|":"",
+ status & SR_USR_INT_ENA ? "USRINT|":"!usrint|",
+ status & SR_TX_INT_ENA ? "TXINT|":"!txint|",
+ status & SR_RX_INT_ENA ? "RXINT":"!rxint");
+}
+
+static void debug_data_in(struct cosa_data *cosa, int data)
+{
+ printk(KERN_INFO "%s: IO: data -> 0x%04x\n", cosa->name, data);
+}
+
+static void debug_data_out(struct cosa_data *cosa, int data)
+{
+ printk(KERN_INFO "%s: IO: data <- 0x%04x\n", cosa->name, data);
+}
+
+static void debug_data_cmd(struct cosa_data *cosa, int data)
+{
+ printk(KERN_INFO "%s: IO: data <- 0x%04x (%s|%s)\n",
+ cosa->name, data,
+ data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
+ data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
+}
+#endif
+
+/* EOF -- this file has not been truncated */
diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h
new file mode 100644
index 000000000000..028f3d96b971
--- /dev/null
+++ b/drivers/net/wan/cosa.h
@@ -0,0 +1,117 @@
+/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */
+
+/*
+ * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef COSA_H__
+#define COSA_H__
+
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+/* status register - output bits */
+#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */
+#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */
+#define SR_RST 0x10 /* SRP reset */
+#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */
+#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */
+#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */
+
+/* status register - input bits */
+#define SR_USR_RQ 0x20 /* user interrupt request pending */
+#define SR_TX_RDY 0x40 /* transmitter empty (ready) */
+#define SR_RX_RDY 0x80 /* receiver data ready */
+
+#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data
+ up to PC */
+#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down
+ from PC to SRP */
+#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of
+ transfer (up or down) */
+
+#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */
+
+/* bits in driver status byte definitions : */
+#define SR_RDY_RCV 0x01 /* ready to receive packet */
+#define SR_RDY_SND 0x02 /* ready to send packet */
+#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */
+
+/* ???? */
+#define SR_PKT_UP 0x01 /* transfer of packet up in progress */
+#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */
+
+#endif /* __KERNEL__ */
+
+#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */
+#define SR_START_ADDR 0x4400 /* SRP microcode start address */
+
+#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */
+#define COSA_MAX_FIRMWARE_SIZE 0x10000
+
+/* ioctls */
+struct cosa_download {
+ int addr, len;
+ char __user *code;
+};
+
+/* Reset the device */
+#define COSAIORSET _IO('C',0xf0)
+
+/* Start microcode at given address */
+#define COSAIOSTRT _IOW('C',0xf1, int)
+
+/* Read the block from the device memory */
+#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *)
+ /* actually the struct cosa_download itself; this is to keep
+ * the ioctl number same as in 2.4 in order to keep the user-space
+ * utils compatible. */
+
+/* Write the block to the device memory (i.e. download the microcode) */
+#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *)
+ /* actually the struct cosa_download itself; this is to keep
+ * the ioctl number same as in 2.4 in order to keep the user-space
+ * utils compatible. */
+
+/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */
+#define COSAIORTYPE _IOR('C',0xf3, char *)
+
+/* Read the device identification string */
+#define COSAIORIDSTR _IOR('C',0xf4, char *)
+/* Maximum length of the identification string. */
+#define COSA_MAX_ID_STRING 128
+
+/* Increment/decrement the module usage count :-) */
+/* #define COSAIOMINC _IO('C',0xf5) */
+/* #define COSAIOMDEC _IO('C',0xf6) */
+
+/* Get the total number of cards installed */
+#define COSAIONRCARDS _IO('C',0xf7)
+
+/* Get the number of channels on this card */
+#define COSAIONRCHANS _IO('C',0xf8)
+
+/* Set the driver for the bus-master operations */
+#define COSAIOBMSET _IOW('C', 0xf9, unsigned short)
+
+#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */
+#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */
+
+/* Gets the busmaster status */
+#define COSAIOBMGET _IO('C', 0xfa)
+
+#endif /* !COSA_H__ */
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
new file mode 100644
index 000000000000..6e74af62ca08
--- /dev/null
+++ b/drivers/net/wan/cycx_drv.c
@@ -0,0 +1,586 @@
+/*
+* cycx_drv.c Cyclom 2X Support Module.
+*
+* This module is a library of common hardware specific
+* functions used by the Cyclades Cyclom 2X sync card.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
+*
+* Based on sdladrv.c by Gene Kozin <genek@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/11/11 acme set_current_state(TASK_INTERRUPTIBLE), code
+* cleanup
+* 1999/11/08 acme init_cyc2x deleted, doing nothing
+* 1999/11/06 acme back to read[bw], write[bw] and memcpy_to and
+* fromio to use dpmbase ioremaped
+* 1999/10/26 acme use isa_read[bw], isa_write[bw] & isa_memcpy_to
+* & fromio
+* 1999/10/23 acme cleanup to only supports cyclom2x: all the other
+* boards are no longer manufactured by cyclades,
+* if someone wants to support them... be my guest!
+* 1999/05/28 acme cycx_intack & cycx_intde gone for good
+* 1999/05/18 acme lots of unlogged work, submitting to Linus...
+* 1999/01/03 acme more judicious use of data types
+* 1999/01/03 acme judicious use of data types :>
+* cycx_inten trying to reset pending interrupts
+* from cyclom 2x - I think this isn't the way to
+* go, but for now...
+* 1999/01/02 acme cycx_intack ok, I think there's nothing to do
+* to ack an int in cycx_drv.c, only handle it in
+* cyx_isr (or in the other protocols: cyp_isr,
+* cyf_isr, when they get implemented.
+* Dec 31, 1998 acme cycx_data_boot & cycx_code_boot fixed, crossing
+* fingers to see x25_configure in cycx_x25.c
+* work... :)
+* Dec 26, 1998 acme load implementation fixed, seems to work! :)
+* cycx_2x_dpmbase_options with all the possible
+* DPM addresses (20).
+* cycx_intr implemented (test this!)
+* general code cleanup
+* Dec 8, 1998 Ivan Passos Cyclom-2X firmware load implementation.
+* Aug 8, 1998 acme Initial version.
+*/
+
+#include <linux/init.h> /* __init */
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/sched.h> /* for jiffies, HZ, etc. */
+#include <linux/cycx_drv.h> /* API definitions */
+#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
+#include <linux/delay.h> /* udelay */
+#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
+
+#define MOD_VERSION 0
+#define MOD_RELEASE 6
+
+MODULE_AUTHOR("Arnaldo Carvalho de Melo");
+MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver");
+MODULE_LICENSE("GPL");
+
+/* Hardware-specific functions */
+static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len);
+static void cycx_bootcfg(struct cycx_hw *hw);
+
+static int reset_cyc2x(void __iomem *addr);
+static int detect_cyc2x(void __iomem *addr);
+
+/* Miscellaneous functions */
+static void delay_cycx(int sec);
+static int get_option_index(long *optlist, long optval);
+static u16 checksum(u8 *buf, u32 len);
+
+#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
+
+/* Global Data */
+
+/* private data */
+static char modname[] = "cycx_drv";
+static char fullname[] = "Cyclom 2X Support Module";
+static char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
+ "<acme@conectiva.com.br>";
+
+/* Hardware configuration options.
+ * These are arrays of configuration options used by verification routines.
+ * The first element of each array is its size (i.e. number of options).
+ */
+static long cyc2x_dpmbase_options[] = {
+ 20,
+ 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
+ 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
+ 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
+};
+
+static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
+
+/* Kernel Loadable Module Entry Points */
+/* Module 'insert' entry point.
+ * o print announcement
+ * o initialize static data
+ *
+ * Return: 0 Ok
+ * < 0 error.
+ * Context: process */
+
+int __init cycx_drv_init(void)
+{
+ printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
+ copyright);
+
+ return 0;
+}
+
+/* Module 'remove' entry point.
+ * o release all remaining system resources */
+void cycx_drv_cleanup(void)
+{
+}
+
+/* Kernel APIs */
+/* Set up adapter.
+ * o detect adapter type
+ * o verify hardware configuration options
+ * o check for hardware conflicts
+ * o set up adapter shared memory
+ * o test adapter memory
+ * o load firmware
+ * Return: 0 ok.
+ * < 0 error */
+EXPORT_SYMBOL(cycx_setup);
+int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase)
+{
+ int err;
+
+ /* Verify IRQ configuration options */
+ if (!get_option_index(cycx_2x_irq_options, hw->irq)) {
+ printk(KERN_ERR "%s: IRQ %d is invalid!\n", modname, hw->irq);
+ return -EINVAL;
+ }
+
+ /* Setup adapter dual-port memory window and test memory */
+ if (!dpmbase) {
+ printk(KERN_ERR "%s: you must specify the dpm address!\n",
+ modname);
+ return -EINVAL;
+ } else if (!get_option_index(cyc2x_dpmbase_options, dpmbase)) {
+ printk(KERN_ERR "%s: memory address 0x%lX is invalid!\n",
+ modname, dpmbase);
+ return -EINVAL;
+ }
+
+ hw->dpmbase = ioremap(dpmbase, CYCX_WINDOWSIZE);
+ hw->dpmsize = CYCX_WINDOWSIZE;
+
+ if (!detect_cyc2x(hw->dpmbase)) {
+ printk(KERN_ERR "%s: adapter Cyclom 2X not found at "
+ "address 0x%lX!\n", modname, dpmbase);
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "%s: found Cyclom 2X card at address 0x%lX.\n",
+ modname, dpmbase);
+
+ /* Load firmware. If loader fails then shut down adapter */
+ err = load_cyc2x(hw, cfm, len);
+
+ if (err)
+ cycx_down(hw); /* shutdown adapter */
+
+ return err;
+}
+
+EXPORT_SYMBOL(cycx_down);
+int cycx_down(struct cycx_hw *hw)
+{
+ iounmap(hw->dpmbase);
+ return 0;
+}
+
+/* Enable interrupt generation. */
+EXPORT_SYMBOL(cycx_inten);
+void cycx_inten(struct cycx_hw *hw)
+{
+ writeb(0, hw->dpmbase);
+}
+
+/* Generate an interrupt to adapter's CPU. */
+EXPORT_SYMBOL(cycx_intr);
+void cycx_intr(struct cycx_hw *hw)
+{
+ writew(0, hw->dpmbase + GEN_CYCX_INTR);
+}
+
+/* Execute Adapter Command.
+ * o Set exec flag.
+ * o Busy-wait until flag is reset. */
+EXPORT_SYMBOL(cycx_exec);
+int cycx_exec(void __iomem *addr)
+{
+ u16 i = 0;
+ /* wait till addr content is zeroed */
+
+ while (readw(addr)) {
+ udelay(1000);
+
+ if (++i > 50)
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Read absolute adapter memory.
+ * Transfer data from adapter's memory to data buffer. */
+EXPORT_SYMBOL(cycx_peek);
+int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
+{
+ if (len == 1)
+ *(u8*)buf = readb(hw->dpmbase + addr);
+ else
+ memcpy_fromio(buf, hw->dpmbase + addr, len);
+
+ return 0;
+}
+
+/* Write Absolute Adapter Memory.
+ * Transfer data from data buffer to adapter's memory. */
+EXPORT_SYMBOL(cycx_poke);
+int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
+{
+ if (len == 1)
+ writeb(*(u8*)buf, hw->dpmbase + addr);
+ else
+ memcpy_toio(hw->dpmbase + addr, buf, len);
+
+ return 0;
+}
+
+/* Hardware-Specific Functions */
+
+/* Load Aux Routines */
+/* Reset board hardware.
+ return 1 if memory exists at addr and 0 if not. */
+static int memory_exists(void __iomem *addr)
+{
+ int tries = 0;
+
+ for (; tries < 3 ; tries++) {
+ writew(TEST_PATTERN, addr + 0x10);
+
+ if (readw(addr + 0x10) == TEST_PATTERN)
+ if (readw(addr + 0x10) == TEST_PATTERN)
+ return 1;
+
+ delay_cycx(1);
+ }
+
+ return 0;
+}
+
+/* Load reset code. */
+static void reset_load(void __iomem *addr, u8 *buffer, u32 cnt)
+{
+ void __iomem *pt_code = addr + RESET_OFFSET;
+ u16 i; /*, j; */
+
+ for (i = 0 ; i < cnt ; i++) {
+/* for (j = 0 ; j < 50 ; j++); Delay - FIXME busy waiting... */
+ writeb(*buffer++, pt_code++);
+ }
+}
+
+/* Load buffer using boot interface.
+ * o copy data from buffer to Cyclom-X memory
+ * o wait for reset code to copy it to right portion of memory */
+static int buffer_load(void __iomem *addr, u8 *buffer, u32 cnt)
+{
+ memcpy_toio(addr + DATA_OFFSET, buffer, cnt);
+ writew(GEN_BOOT_DAT, addr + CMD_OFFSET);
+
+ return wait_cyc(addr);
+}
+
+/* Set up entry point and kick start Cyclom-X CPU. */
+static void cycx_start(void __iomem *addr)
+{
+ /* put in 0x30 offset the jump instruction to the code entry point */
+ writeb(0xea, addr + 0x30);
+ writeb(0x00, addr + 0x31);
+ writeb(0xc4, addr + 0x32);
+ writeb(0x00, addr + 0x33);
+ writeb(0x00, addr + 0x34);
+
+ /* cmd to start executing code */
+ writew(GEN_START, addr + CMD_OFFSET);
+}
+
+/* Load and boot reset code. */
+static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
+{
+ void __iomem *pt_start = addr + START_OFFSET;
+
+ writeb(0xea, pt_start++); /* jmp to f000:3f00 */
+ writeb(0x00, pt_start++);
+ writeb(0xfc, pt_start++);
+ writeb(0x00, pt_start++);
+ writeb(0xf0, pt_start);
+ reset_load(addr, code, len);
+
+ /* 80186 was in hold, go */
+ writeb(0, addr + START_CPU);
+ delay_cycx(1);
+}
+
+/* Load data.bin file through boot (reset) interface. */
+static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len)
+{
+ void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
+ u32 i;
+
+ /* boot buffer lenght */
+ writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
+ writew(GEN_DEFPAR, pt_boot_cmd);
+
+ if (wait_cyc(addr) < 0)
+ return -1;
+
+ writew(0, pt_boot_cmd + sizeof(u16));
+ writew(0x4000, pt_boot_cmd + 2 * sizeof(u16));
+ writew(GEN_SET_SEG, pt_boot_cmd);
+
+ if (wait_cyc(addr) < 0)
+ return -1;
+
+ for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
+ if (buffer_load(addr, code + i,
+ min_t(u32, CFM_LOAD_BUFSZ, (len - i))) < 0) {
+ printk(KERN_ERR "%s: Error !!\n", modname);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/* Load code.bin file through boot (reset) interface. */
+static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len)
+{
+ void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
+ u32 i;
+
+ /* boot buffer lenght */
+ writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
+ writew(GEN_DEFPAR, pt_boot_cmd);
+
+ if (wait_cyc(addr) < 0)
+ return -1;
+
+ writew(0x0000, pt_boot_cmd + sizeof(u16));
+ writew(0xc400, pt_boot_cmd + 2 * sizeof(u16));
+ writew(GEN_SET_SEG, pt_boot_cmd);
+
+ if (wait_cyc(addr) < 0)
+ return -1;
+
+ for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
+ if (buffer_load(addr, code + i,
+ min_t(u32, CFM_LOAD_BUFSZ, (len - i)))) {
+ printk(KERN_ERR "%s: Error !!\n", modname);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Load adapter from the memory image of the CYCX firmware module.
+ * o verify firmware integrity and compatibility
+ * o start adapter up */
+static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
+{
+ int i, j;
+ struct cycx_fw_header *img_hdr;
+ u8 *reset_image,
+ *data_image,
+ *code_image;
+ void __iomem *pt_cycld = hw->dpmbase + 0x400;
+ u16 cksum;
+
+ /* Announce */
+ printk(KERN_INFO "%s: firmware signature=\"%s\"\n", modname,
+ cfm->signature);
+
+ /* Verify firmware signature */
+ if (strcmp(cfm->signature, CFM_SIGNATURE)) {
+ printk(KERN_ERR "%s:load_cyc2x: not Cyclom-2X firmware!\n",
+ modname);
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "%s: firmware version=%u\n", modname, cfm->version);
+
+ /* Verify firmware module format version */
+ if (cfm->version != CFM_VERSION) {
+ printk(KERN_ERR "%s:%s: firmware format %u rejected! "
+ "Expecting %u.\n",
+ modname, __FUNCTION__, cfm->version, CFM_VERSION);
+ return -EINVAL;
+ }
+
+ /* Verify firmware module length and checksum */
+ cksum = checksum((u8*)&cfm->info, sizeof(struct cycx_fw_info) +
+ cfm->info.codesize);
+/*
+ FIXME cfm->info.codesize is off by 2
+ if (((len - sizeof(struct cycx_firmware) - 1) != cfm->info.codesize) ||
+*/
+ if (cksum != cfm->checksum) {
+ printk(KERN_ERR "%s:%s: firmware corrupted!\n",
+ modname, __FUNCTION__);
+ printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
+ len - (int)sizeof(struct cycx_firmware) - 1,
+ cfm->info.codesize);
+ printk(KERN_ERR " chksum = 0x%x (expected 0x%x)\n",
+ cksum, cfm->checksum);
+ return -EINVAL;
+ }
+
+ /* If everything is ok, set reset, data and code pointers */
+ img_hdr = (struct cycx_fw_header *)&cfm->image;
+#ifdef FIRMWARE_DEBUG
+ printk(KERN_INFO "%s:%s: image sizes\n", __FUNCTION__, modname);
+ printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
+ printk(KERN_INFO " data=%lu\n", img_hdr->data_size);
+ printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
+#endif
+ reset_image = ((u8 *)img_hdr) + sizeof(struct cycx_fw_header);
+ data_image = reset_image + img_hdr->reset_size;
+ code_image = data_image + img_hdr->data_size;
+
+ /*---- Start load ----*/
+ /* Announce */
+ printk(KERN_INFO "%s: loading firmware %s (ID=%u)...\n", modname,
+ cfm->descr[0] ? cfm->descr : "unknown firmware",
+ cfm->info.codeid);
+
+ for (i = 0 ; i < 5 ; i++) {
+ /* Reset Cyclom hardware */
+ if (!reset_cyc2x(hw->dpmbase)) {
+ printk(KERN_ERR "%s: dpm problem or board not found\n",
+ modname);
+ return -EINVAL;
+ }
+
+ /* Load reset.bin */
+ cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
+ /* reset is waiting for boot */
+ writew(GEN_POWER_ON, pt_cycld);
+ delay_cycx(1);
+
+ for (j = 0 ; j < 3 ; j++)
+ if (!readw(pt_cycld))
+ goto reset_loaded;
+ else
+ delay_cycx(1);
+ }
+
+ printk(KERN_ERR "%s: reset not started.\n", modname);
+ return -EINVAL;
+
+reset_loaded:
+ /* Load data.bin */
+ if (cycx_data_boot(hw->dpmbase, data_image, img_hdr->data_size)) {
+ printk(KERN_ERR "%s: cannot load data file.\n", modname);
+ return -EINVAL;
+ }
+
+ /* Load code.bin */
+ if (cycx_code_boot(hw->dpmbase, code_image, img_hdr->code_size)) {
+ printk(KERN_ERR "%s: cannot load code file.\n", modname);
+ return -EINVAL;
+ }
+
+ /* Prepare boot-time configuration data */
+ cycx_bootcfg(hw);
+
+ /* kick-off CPU */
+ cycx_start(hw->dpmbase);
+
+ /* Arthur Ganzert's tip: wait a while after the firmware loading...
+ seg abr 26 17:17:12 EST 1999 - acme */
+ delay_cycx(7);
+ printk(KERN_INFO "%s: firmware loaded!\n", modname);
+
+ /* enable interrupts */
+ cycx_inten(hw);
+
+ return 0;
+}
+
+/* Prepare boot-time firmware configuration data.
+ * o initialize configuration data area
+ From async.doc - V_3.4.0 - 07/18/1994
+ - As of now, only static buffers are available to the user.
+ So, the bit VD_RXDIRC must be set in 'valid'. That means that user
+ wants to use the static transmission and reception buffers. */
+static void cycx_bootcfg(struct cycx_hw *hw)
+{
+ /* use fixed buffers */
+ writeb(FIXED_BUFFERS, hw->dpmbase + CONF_OFFSET);
+}
+
+/* Detect Cyclom 2x adapter.
+ * Following tests are used to detect Cyclom 2x adapter:
+ * to be completed based on the tests done below
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test. */
+static int detect_cyc2x(void __iomem *addr)
+{
+ reset_cyc2x(addr);
+
+ return memory_exists(addr);
+}
+
+/* Miscellaneous */
+/* Get option's index into the options list.
+ * Return option's index (1 .. N) or zero if option is invalid. */
+static int get_option_index(long *optlist, long optval)
+{
+ int i = 1;
+
+ for (; i <= optlist[0]; ++i)
+ if (optlist[i] == optval)
+ return i;
+
+ return 0;
+}
+
+/* Reset adapter's CPU. */
+static int reset_cyc2x(void __iomem *addr)
+{
+ writeb(0, addr + RST_ENABLE);
+ delay_cycx(2);
+ writeb(0, addr + RST_DISABLE);
+ delay_cycx(2);
+
+ return memory_exists(addr);
+}
+
+/* Delay */
+static void delay_cycx(int sec)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(sec * HZ);
+}
+
+/* Calculate 16-bit CRC using CCITT polynomial. */
+static u16 checksum(u8 *buf, u32 len)
+{
+ u16 crc = 0;
+ u16 mask, flag;
+
+ for (; len; --len, ++buf)
+ for (mask = 0x80; mask; mask >>= 1) {
+ flag = (crc & 0x8000);
+ crc <<= 1;
+ crc |= ((*buf & mask) ? 1 : 0);
+
+ if (flag)
+ crc ^= 0x1021;
+ }
+
+ return crc;
+}
+
+module_init(cycx_drv_init);
+module_exit(cycx_drv_cleanup);
+
+/* End */
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
new file mode 100644
index 000000000000..7b48064364dc
--- /dev/null
+++ b/drivers/net/wan/cycx_main.c
@@ -0,0 +1,351 @@
+/*
+* cycx_main.c Cyclades Cyclom 2X WAN Link Driver. Main module.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
+*
+* Based on sdlamain.c by Gene Kozin <genek@compuserve.com> &
+* Jaspreet Singh <jaspreet@sangoma.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Please look at the bitkeeper changelog (or any other scm tool that ends up
+* importing bitkeeper changelog or that replaces bitkeeper in the future as
+* main tool for linux development).
+*
+* 2001/05/09 acme Fix MODULE_DESC for debug, .bss nitpicks,
+* some cleanups
+* 2000/07/13 acme remove useless #ifdef MODULE and crap
+* #if KERNEL_VERSION > blah
+* 2000/07/06 acme __exit at cyclomx_cleanup
+* 2000/04/02 acme dprintk and cycx_debug
+* module_init/module_exit
+* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count
+* and cyclomx_close to cyclomx_mod_dec_use_count
+* 2000/01/08 acme cleanup
+* 1999/11/06 acme cycx_down back to life (it needs to be
+* called to iounmap the dpmbase)
+* 1999/08/09 acme removed references to enable_tx_int
+* use spinlocks instead of cli/sti in
+* cyclomx_set_state
+* 1999/05/19 acme works directly linked into the kernel
+* init_waitqueue_head for 2.3.* kernel
+* 1999/05/18 acme major cleanup (polling not needed), etc
+* 1998/08/28 acme minor cleanup (ioctls for firmware deleted)
+* queue_task activated
+* 1998/08/08 acme Initial version.
+*/
+
+#include <linux/config.h> /* OS configuration options */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/module.h> /* support for loadable modules */
+#include <linux/ioport.h> /* request_region(), release_region() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/cyclomx.h> /* cyclomx common user API definitions */
+#include <linux/init.h> /* __init (when not using as a module) */
+
+unsigned int cycx_debug;
+
+MODULE_AUTHOR("Arnaldo Carvalho de Melo");
+MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
+MODULE_LICENSE("GPL");
+module_param(cycx_debug, int, 0);
+MODULE_PARM_DESC(cycx_debug, "cyclomx debug level");
+
+/* Defines & Macros */
+
+#define CYCX_DRV_VERSION 0 /* version number */
+#define CYCX_DRV_RELEASE 11 /* release (minor version) number */
+#define CYCX_MAX_CARDS 1 /* max number of adapters */
+
+#define CONFIG_CYCX_CARDS 1
+
+/* Function Prototypes */
+
+/* WAN link driver entry points */
+static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf);
+static int cycx_wan_shutdown(struct wan_device *wandev);
+
+/* Miscellaneous functions */
+static irqreturn_t cycx_isr(int irq, void *dev_id, struct pt_regs *regs);
+
+/* Global Data
+ * Note: All data must be explicitly initialized!!!
+ */
+
+/* private data */
+static char cycx_drvname[] = "cyclomx";
+static char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
+static char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
+ "<acme@conectiva.com.br>";
+static int cycx_ncards = CONFIG_CYCX_CARDS;
+static struct cycx_device *cycx_card_array; /* adapter data space */
+
+/* Kernel Loadable Module Entry Points */
+
+/*
+ * Module 'insert' entry point.
+ * o print announcement
+ * o allocate adapter data space
+ * o initialize static data
+ * o register all cards with WAN router
+ * o calibrate Cyclom 2X shared memory access delay.
+ *
+ * Return: 0 Ok
+ * < 0 error.
+ * Context: process
+ */
+int __init cycx_init(void)
+{
+ int cnt, err = -ENOMEM;
+
+ printk(KERN_INFO "%s v%u.%u %s\n",
+ cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE,
+ cycx_copyright);
+
+ /* Verify number of cards and allocate adapter data space */
+ cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
+ cycx_ncards = max_t(int, cycx_ncards, 1);
+ cycx_card_array = kmalloc(sizeof(struct cycx_device) * cycx_ncards,
+ GFP_KERNEL);
+ if (!cycx_card_array)
+ goto out;
+
+ memset(cycx_card_array, 0, sizeof(struct cycx_device) * cycx_ncards);
+
+ /* Register adapters with WAN router */
+ for (cnt = 0; cnt < cycx_ncards; ++cnt) {
+ struct cycx_device *card = &cycx_card_array[cnt];
+ struct wan_device *wandev = &card->wandev;
+
+ sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1);
+ wandev->magic = ROUTER_MAGIC;
+ wandev->name = card->devname;
+ wandev->private = card;
+ wandev->setup = cycx_wan_setup;
+ wandev->shutdown = cycx_wan_shutdown;
+ err = register_wan_device(wandev);
+
+ if (err) {
+ printk(KERN_ERR "%s: %s registration failed with "
+ "error %d!\n",
+ cycx_drvname, card->devname, err);
+ break;
+ }
+ }
+
+ err = -ENODEV;
+ if (!cnt) {
+ kfree(cycx_card_array);
+ goto out;
+ }
+ err = 0;
+ cycx_ncards = cnt; /* adjust actual number of cards */
+out: return err;
+}
+
+/*
+ * Module 'remove' entry point.
+ * o unregister all adapters from the WAN router
+ * o release all remaining system resources
+ */
+static void __exit cycx_exit(void)
+{
+ int i = 0;
+
+ for (; i < cycx_ncards; ++i) {
+ struct cycx_device *card = &cycx_card_array[i];
+ unregister_wan_device(card->devname);
+ }
+
+ kfree(cycx_card_array);
+}
+
+/* WAN Device Driver Entry Points */
+/*
+ * Setup/configure WAN link driver.
+ * o check adapter state
+ * o make sure firmware is present in configuration
+ * o allocate interrupt vector
+ * o setup Cyclom 2X hardware
+ * o call appropriate routine to perform protocol-specific initialization
+ *
+ * This function is called when router handles ROUTER_SETUP IOCTL. The
+ * configuration structure is in kernel memory (including extended data, if
+ * any).
+ */
+static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
+{
+ int rc = -EFAULT;
+ struct cycx_device *card;
+ int irq;
+
+ /* Sanity checks */
+
+ if (!wandev || !wandev->private || !conf)
+ goto out;
+
+ card = wandev->private;
+ rc = -EBUSY;
+ if (wandev->state != WAN_UNCONFIGURED)
+ goto out;
+
+ rc = -EINVAL;
+ if (!conf->data_size || !conf->data) {
+ printk(KERN_ERR "%s: firmware not found in configuration "
+ "data!\n", wandev->name);
+ goto out;
+ }
+
+ if (conf->irq <= 0) {
+ printk(KERN_ERR "%s: can't configure without IRQ!\n",
+ wandev->name);
+ goto out;
+ }
+
+ /* Allocate IRQ */
+ irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
+
+ if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
+ printk(KERN_ERR "%s: can't reserve IRQ %d!\n",
+ wandev->name, irq);
+ goto out;
+ }
+
+ /* Configure hardware, load firmware, etc. */
+ memset(&card->hw, 0, sizeof(card->hw));
+ card->hw.irq = irq;
+ card->hw.dpmsize = CYCX_WINDOWSIZE;
+ card->hw.fwid = CFID_X25_2X;
+ spin_lock_init(&card->lock);
+ init_waitqueue_head(&card->wait_stats);
+
+ rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr);
+ if (rc)
+ goto out_irq;
+
+ /* Initialize WAN device data space */
+ wandev->irq = irq;
+ wandev->dma = wandev->ioport = 0;
+ wandev->maddr = (unsigned long)card->hw.dpmbase;
+ wandev->msize = card->hw.dpmsize;
+ wandev->hw_opt[2] = 0;
+ wandev->hw_opt[3] = card->hw.fwid;
+
+ /* Protocol-specific initialization */
+ switch (card->hw.fwid) {
+#ifdef CONFIG_CYCLOMX_X25
+ case CFID_X25_2X:
+ rc = cycx_x25_wan_init(card, conf);
+ break;
+#endif
+ default:
+ printk(KERN_ERR "%s: this firmware is not supported!\n",
+ wandev->name);
+ rc = -EINVAL;
+ }
+
+ if (rc) {
+ cycx_down(&card->hw);
+ goto out_irq;
+ }
+
+ rc = 0;
+out:
+ return rc;
+out_irq:
+ free_irq(irq, card);
+ goto out;
+}
+
+/*
+ * Shut down WAN link driver.
+ * o shut down adapter hardware
+ * o release system resources.
+ *
+ * This function is called by the router when device is being unregistered or
+ * when it handles ROUTER_DOWN IOCTL.
+ */
+static int cycx_wan_shutdown(struct wan_device *wandev)
+{
+ int ret = -EFAULT;
+ struct cycx_device *card;
+
+ /* sanity checks */
+ if (!wandev || !wandev->private)
+ goto out;
+
+ ret = 0;
+ if (wandev->state == WAN_UNCONFIGURED)
+ goto out;
+
+ card = wandev->private;
+ wandev->state = WAN_UNCONFIGURED;
+ cycx_down(&card->hw);
+ printk(KERN_INFO "%s: irq %d being freed!\n", wandev->name,
+ wandev->irq);
+ free_irq(wandev->irq, card);
+out: return ret;
+}
+
+/* Miscellaneous */
+/*
+ * Cyclom 2X Interrupt Service Routine.
+ * o acknowledge Cyclom 2X hardware interrupt.
+ * o call protocol-specific interrupt service routine, if any.
+ */
+static irqreturn_t cycx_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct cycx_device *card = (struct cycx_device *)dev_id;
+
+ if (!card || card->wandev.state == WAN_UNCONFIGURED)
+ goto out;
+
+ if (card->in_isr) {
+ printk(KERN_WARNING "%s: interrupt re-entrancy on IRQ %d!\n",
+ card->devname, card->wandev.irq);
+ goto out;
+ }
+
+ if (card->isr)
+ card->isr(card);
+ return IRQ_HANDLED;
+out:
+ return IRQ_NONE;
+}
+
+/* Set WAN device state. */
+void cycx_set_state(struct cycx_device *card, int state)
+{
+ unsigned long flags;
+ char *string_state = NULL;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ if (card->wandev.state != state) {
+ switch (state) {
+ case WAN_CONNECTED:
+ string_state = "connected!";
+ break;
+ case WAN_DISCONNECTED:
+ string_state = "disconnected!";
+ break;
+ }
+ printk(KERN_INFO "%s: link %s\n", card->devname, string_state);
+ card->wandev.state = state;
+ }
+
+ card->state_tick = jiffies;
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+module_init(cycx_init);
+module_exit(cycx_exit);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
new file mode 100644
index 000000000000..5b48cd8568f5
--- /dev/null
+++ b/drivers/net/wan/cycx_x25.c
@@ -0,0 +1,1609 @@
+/*
+* cycx_x25.c Cyclom 2X WAN Link Driver. X.25 module.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
+*
+* Based on sdla_x25.c by Gene Kozin <genek@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 2001/01/12 acme use dev_kfree_skb_irq on interrupt context
+* 2000/04/02 acme dprintk, cycx_debug
+* fixed the bug introduced in get_dev_by_lcn and
+* get_dev_by_dte_addr by the anonymous hacker
+* that converted this driver to softnet
+* 2000/01/08 acme cleanup
+* 1999/10/27 acme use ARPHRD_HWX25 so that the X.25 stack know
+* that we have a X.25 stack implemented in
+* firmware onboard
+* 1999/10/18 acme support for X.25 sockets in if_send,
+* beware: socket(AF_X25...) IS WORK IN PROGRESS,
+* TCP/IP over X.25 via wanrouter not affected,
+* working.
+* 1999/10/09 acme chan_disc renamed to chan_disconnect,
+* began adding support for X.25 sockets:
+* conf->protocol in new_if
+* 1999/10/05 acme fixed return E... to return -E...
+* 1999/08/10 acme serialized access to the card thru a spinlock
+* in x25_exec
+* 1999/08/09 acme removed per channel spinlocks
+* removed references to enable_tx_int
+* 1999/05/28 acme fixed nibble_to_byte, ackvc now properly treated
+* if_send simplified
+* 1999/05/25 acme fixed t1, t2, t21 & t23 configuration
+* use spinlocks instead of cli/sti in some points
+* 1999/05/24 acme finished the x25_get_stat function
+* 1999/05/23 acme dev->type = ARPHRD_X25 (tcpdump only works,
+* AFAIT, with ARPHRD_ETHER). This seems to be
+* needed to use socket(AF_X25)...
+* Now the config file must specify a peer media
+* address for svc channels over a crossover cable.
+* Removed hold_timeout from x25_channel_t,
+* not used.
+* A little enhancement in the DEBUG processing
+* 1999/05/22 acme go to DISCONNECTED in disconnect_confirm_intr,
+* instead of chan_disc.
+* 1999/05/16 marcelo fixed timer initialization in SVCs
+* 1999/01/05 acme x25_configure now get (most of) all
+* parameters...
+* 1999/01/05 acme pktlen now (correctly) uses log2 (value
+* configured)
+* 1999/01/03 acme judicious use of data types (u8, u16, u32, etc)
+* 1999/01/03 acme cyx_isr: reset dpmbase to acknowledge
+* indication (interrupt from cyclom 2x)
+* 1999/01/02 acme cyx_isr: first hackings...
+* 1999/01/0203 acme when initializing an array don't give less
+* elements than declared...
+* example: char send_cmd[6] = "?\xFF\x10";
+* you'll gonna lose a couple hours, 'cause your
+* brain won't admit that there's an error in the
+* above declaration... the side effect is that
+* memset is put into the unresolved symbols
+* instead of using the inline memset functions...
+* 1999/01/02 acme began chan_connect, chan_send, x25_send
+* 1998/12/31 acme x25_configure
+* this code can be compiled as non module
+* 1998/12/27 acme code cleanup
+* IPX code wiped out! let's decrease code
+* complexity for now, remember: I'm learning! :)
+* bps_to_speed_code OK
+* 1998/12/26 acme Minimal debug code cleanup
+* 1998/08/08 acme Initial version.
+*/
+
+#define CYCLOMX_X25_DEBUG 1
+
+#include <linux/errno.h> /* return codes */
+#include <linux/if_arp.h> /* ARPHRD_HWX25 */
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/module.h>
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/wanrouter.h> /* WAN router definitions */
+
+#include <asm/byteorder.h> /* htons(), etc. */
+
+#include <linux/cyclomx.h> /* Cyclom 2X common user API definitions */
+#include <linux/cycx_x25.h> /* X.25 firmware API definitions */
+
+#include <net/x25device.h>
+
+/* Defines & Macros */
+#define CYCX_X25_MAX_CMD_RETRY 5
+#define CYCX_X25_CHAN_MTU 2048 /* unfragmented logical channel MTU */
+
+/* Data Structures */
+/* This is an extension of the 'struct net_device' we create for each network
+ interface to keep the rest of X.25 channel-specific data. */
+struct cycx_x25_channel {
+ /* This member must be first. */
+ struct net_device *slave; /* WAN slave */
+
+ char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
+ char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */
+ char *local_addr; /* local media address, ASCIIZ -
+ svc thru crossover cable */
+ s16 lcn; /* logical channel number/conn.req.key*/
+ u8 link;
+ struct timer_list timer; /* timer used for svc channel disc. */
+ u16 protocol; /* ethertype, 0 - multiplexed */
+ u8 svc; /* 0 - permanent, 1 - switched */
+ u8 state; /* channel state */
+ u8 drop_sequence; /* mark sequence for dropping */
+ u32 idle_tmout; /* sec, before disconnecting */
+ struct sk_buff *rx_skb; /* receive socket buffer */
+ struct cycx_device *card; /* -> owner */
+ struct net_device_stats ifstats;/* interface statistics */
+};
+
+/* Function Prototypes */
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int cycx_wan_update(struct wan_device *wandev),
+ cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
+ wanif_conf_t *conf),
+ cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev);
+
+/* Network device interface */
+static int cycx_netdevice_init(struct net_device *dev),
+ cycx_netdevice_open(struct net_device *dev),
+ cycx_netdevice_stop(struct net_device *dev),
+ cycx_netdevice_hard_header(struct sk_buff *skb,
+ struct net_device *dev, u16 type,
+ void *daddr, void *saddr, unsigned len),
+ cycx_netdevice_rebuild_header(struct sk_buff *skb),
+ cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+
+static struct net_device_stats *
+ cycx_netdevice_get_stats(struct net_device *dev);
+
+/* Interrupt handlers */
+static void cycx_x25_irq_handler(struct cycx_device *card),
+ cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_log(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_stat(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_connect_confirm(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_connect(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_disconnect(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd),
+ cycx_x25_irq_spurious(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd);
+
+/* X.25 firmware interface functions */
+static int cycx_x25_configure(struct cycx_device *card,
+ struct cycx_x25_config *conf),
+ cycx_x25_get_stats(struct cycx_device *card),
+ cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
+ int len, void *buf),
+ cycx_x25_connect_response(struct cycx_device *card,
+ struct cycx_x25_channel *chan),
+ cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
+ u8 lcn);
+
+/* channel functions */
+static int cycx_x25_chan_connect(struct net_device *dev),
+ cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb);
+
+static void cycx_x25_chan_disconnect(struct net_device *dev),
+ cycx_x25_chan_send_event(struct net_device *dev, u8 event);
+
+/* Miscellaneous functions */
+static void cycx_x25_set_chan_state(struct net_device *dev, u8 state),
+ cycx_x25_chan_timer(unsigned long d);
+
+static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble),
+ reset_timer(struct net_device *dev);
+
+static u8 bps_to_speed_code(u32 bps);
+static u8 cycx_log2(u32 n);
+
+static unsigned dec_to_uint(u8 *str, int len);
+
+static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
+ s16 lcn);
+static struct net_device *
+ cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte);
+
+#ifdef CYCLOMX_X25_DEBUG
+static void hex_dump(char *msg, unsigned char *p, int len);
+static void cycx_x25_dump_config(struct cycx_x25_config *conf);
+static void cycx_x25_dump_stats(struct cycx_x25_stats *stats);
+static void cycx_x25_dump_devs(struct wan_device *wandev);
+#else
+#define hex_dump(msg, p, len)
+#define cycx_x25_dump_config(conf)
+#define cycx_x25_dump_stats(stats)
+#define cycx_x25_dump_devs(wandev)
+#endif
+/* Public Functions */
+
+/* X.25 Protocol Initialization routine.
+ *
+ * This routine is called by the main Cyclom 2X module during setup. At this
+ * point adapter is completely initialized and X.25 firmware is running.
+ * o configure adapter
+ * o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return: 0 o.k.
+ * < 0 failure. */
+int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf)
+{
+ struct cycx_x25_config cfg;
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_X25) {
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
+ return -EINVAL;
+ }
+
+ /* Initialize protocol-specific fields */
+ card->mbox = card->hw.dpmbase + X25_MBOX_OFFS;
+ card->u.x.connection_keys = 0;
+ spin_lock_init(&card->u.x.lock);
+
+ /* Configure adapter. Here we set reasonable defaults, then parse
+ * device configuration structure and set configuration options.
+ * Most configuration options are verified and corrected (if
+ * necessary) since we can't rely on the adapter to do so and don't
+ * want it to fail either. */
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.link = 0;
+ cfg.clock = conf->clocking == WANOPT_EXTERNAL ? 8 : 55;
+ cfg.speed = bps_to_speed_code(conf->bps);
+ cfg.n3win = 7;
+ cfg.n2win = 2;
+ cfg.n2 = 5;
+ cfg.nvc = 1;
+ cfg.npvc = 1;
+ cfg.flags = 0x02; /* default = V35 */
+ cfg.t1 = 10; /* line carrier timeout */
+ cfg.t2 = 29; /* tx timeout */
+ cfg.t21 = 180; /* CALL timeout */
+ cfg.t23 = 180; /* CLEAR timeout */
+
+ /* adjust MTU */
+ if (!conf->mtu || conf->mtu >= 512)
+ card->wandev.mtu = 512;
+ else if (conf->mtu >= 256)
+ card->wandev.mtu = 256;
+ else if (conf->mtu >= 128)
+ card->wandev.mtu = 128;
+ else
+ card->wandev.mtu = 64;
+
+ cfg.pktlen = cycx_log2(card->wandev.mtu);
+
+ if (conf->station == WANOPT_DTE) {
+ cfg.locaddr = 3; /* DTE */
+ cfg.remaddr = 1; /* DCE */
+ } else {
+ cfg.locaddr = 1; /* DCE */
+ cfg.remaddr = 3; /* DTE */
+ }
+
+ if (conf->interface == WANOPT_RS232)
+ cfg.flags = 0; /* FIXME just reset the 2nd bit */
+
+ if (conf->u.x25.hi_pvc) {
+ card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, 4095);
+ card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
+ }
+
+ if (conf->u.x25.hi_svc) {
+ card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, 4095);
+ card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
+ }
+
+ if (card->u.x.lo_pvc == 255)
+ cfg.npvc = 0;
+ else
+ cfg.npvc = card->u.x.hi_pvc - card->u.x.lo_pvc + 1;
+
+ cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc;
+
+ if (conf->u.x25.hdlc_window)
+ cfg.n2win = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
+
+ if (conf->u.x25.pkt_window)
+ cfg.n3win = min_t(unsigned int, conf->u.x25.pkt_window, 7);
+
+ if (conf->u.x25.t1)
+ cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
+
+ if (conf->u.x25.t2)
+ cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 30);
+
+ if (conf->u.x25.t11_t21)
+ cfg.t21 = min_t(unsigned int, conf->u.x25.t11_t21, 30);
+
+ if (conf->u.x25.t13_t23)
+ cfg.t23 = min_t(unsigned int, conf->u.x25.t13_t23, 30);
+
+ if (conf->u.x25.n2)
+ cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
+
+ /* initialize adapter */
+ if (cycx_x25_configure(card, &cfg))
+ return -EIO;
+
+ /* Initialize protocol-specific fields of adapter data space */
+ card->wandev.bps = conf->bps;
+ card->wandev.interface = conf->interface;
+ card->wandev.clocking = conf->clocking;
+ card->wandev.station = conf->station;
+ card->isr = cycx_x25_irq_handler;
+ card->exec = NULL;
+ card->wandev.update = cycx_wan_update;
+ card->wandev.new_if = cycx_wan_new_if;
+ card->wandev.del_if = cycx_wan_del_if;
+ card->wandev.state = WAN_DISCONNECTED;
+
+ return 0;
+}
+
+/* WAN Device Driver Entry Points */
+/* Update device status & statistics. */
+static int cycx_wan_update(struct wan_device *wandev)
+{
+ /* sanity checks */
+ if (!wandev || !wandev->private)
+ return -EFAULT;
+
+ if (wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ cycx_x25_get_stats(wandev->private);
+
+ return 0;
+}
+
+/* Create new logical channel.
+ * This routine is called by the router when ROUTER_IFNEW IOCTL is being
+ * handled.
+ * o parse media- and hardware-specific configuration
+ * o make sure that a new channel can be created
+ * o allocate resources, if necessary
+ * o prepare network device structure for registration.
+ *
+ * Return: 0 o.k.
+ * < 0 failure (channel will not be created) */
+static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
+ wanif_conf_t *conf)
+{
+ struct cycx_device *card = wandev->private;
+ struct cycx_x25_channel *chan;
+ int err = 0;
+
+ if (!conf->name[0] || strlen(conf->name) > WAN_IFNAME_SZ) {
+ printk(KERN_INFO "%s: invalid interface name!\n",
+ card->devname);
+ return -EINVAL;
+ }
+
+ /* allocate and initialize private data */
+ chan = kmalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ memset(chan, 0, sizeof(*chan));
+ strcpy(chan->name, conf->name);
+ chan->card = card;
+ chan->link = conf->port;
+ chan->protocol = conf->protocol ? ETH_P_X25 : ETH_P_IP;
+ chan->rx_skb = NULL;
+ /* only used in svc connected thru crossover cable */
+ chan->local_addr = NULL;
+
+ if (conf->addr[0] == '@') { /* SVC */
+ int len = strlen(conf->local_addr);
+
+ if (len) {
+ if (len > WAN_ADDRESS_SZ) {
+ printk(KERN_ERR "%s: %s local addr too long!\n",
+ wandev->name, chan->name);
+ kfree(chan);
+ return -EINVAL;
+ } else {
+ chan->local_addr = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!chan->local_addr) {
+ kfree(chan);
+ return -ENOMEM;
+ }
+ }
+
+ strncpy(chan->local_addr, conf->local_addr,
+ WAN_ADDRESS_SZ);
+ }
+
+ chan->svc = 1;
+ strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
+ init_timer(&chan->timer);
+ chan->timer.function = cycx_x25_chan_timer;
+ chan->timer.data = (unsigned long)dev;
+
+ /* Set channel timeouts (default if not specified) */
+ chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
+ } else if (is_digit(conf->addr[0])) { /* PVC */
+ s16 lcn = dec_to_uint(conf->addr, 0);
+
+ if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
+ chan->lcn = lcn;
+ else {
+ printk(KERN_ERR
+ "%s: PVC %u is out of range on interface %s!\n",
+ wandev->name, lcn, chan->name);
+ err = -EINVAL;
+ }
+ } else {
+ printk(KERN_ERR "%s: invalid media address on interface %s!\n",
+ wandev->name, chan->name);
+ err = -EINVAL;
+ }
+
+ if (err) {
+ if (chan->local_addr)
+ kfree(chan->local_addr);
+
+ kfree(chan);
+ return err;
+ }
+
+ /* prepare network device data space for registration */
+ strcpy(dev->name, chan->name);
+ dev->init = cycx_netdevice_init;
+ dev->priv = chan;
+
+ return 0;
+}
+
+/* Delete logical channel. */
+static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
+{
+ if (dev->priv) {
+ struct cycx_x25_channel *chan = dev->priv;
+
+ if (chan->svc) {
+ if (chan->local_addr)
+ kfree(chan->local_addr);
+
+ if (chan->state == WAN_CONNECTED)
+ del_timer(&chan->timer);
+ }
+
+ kfree(chan);
+ dev->priv = NULL;
+ }
+
+ return 0;
+}
+
+/* Network Device Interface */
+/* Initialize Linux network interface.
+ *
+ * This routine is called only once for each interface, during Linux network
+ * interface registration. Returning anything but zero will fail interface
+ * registration. */
+static int cycx_netdevice_init(struct net_device *dev)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_device *card = chan->card;
+ struct wan_device *wandev = &card->wandev;
+
+ /* Initialize device driver entry points */
+ dev->open = cycx_netdevice_open;
+ dev->stop = cycx_netdevice_stop;
+ dev->hard_header = cycx_netdevice_hard_header;
+ dev->rebuild_header = cycx_netdevice_rebuild_header;
+ dev->hard_start_xmit = cycx_netdevice_hard_start_xmit;
+ dev->get_stats = cycx_netdevice_get_stats;
+
+ /* Initialize media-specific parameters */
+ dev->mtu = CYCX_X25_CHAN_MTU;
+ dev->type = ARPHRD_HWX25; /* ARP h/w type */
+ dev->hard_header_len = 0; /* media header length */
+ dev->addr_len = 0; /* hardware address length */
+
+ if (!chan->svc)
+ *(u16*)dev->dev_addr = htons(chan->lcn);
+
+ /* Initialize hardware parameters (just for reference) */
+ dev->irq = wandev->irq;
+ dev->dma = wandev->dma;
+ dev->base_addr = wandev->ioport;
+ dev->mem_start = (unsigned long)wandev->maddr;
+ dev->mem_end = (unsigned long)(wandev->maddr +
+ wandev->msize - 1);
+ dev->flags |= IFF_NOARP;
+
+ /* Set transmit buffer queue length */
+ dev->tx_queue_len = 10;
+ SET_MODULE_OWNER(dev);
+
+ /* Initialize socket buffers */
+ cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+
+ return 0;
+}
+
+/* Open network interface.
+ * o prevent module from unloading by incrementing use count
+ * o if link is disconnected then initiate connection
+ *
+ * Return 0 if O.k. or errno. */
+static int cycx_netdevice_open(struct net_device *dev)
+{
+ if (netif_running(dev))
+ return -EBUSY; /* only one open is allowed */
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* Close network interface.
+ * o reset flags.
+ * o if there's no more open channels then disconnect physical link. */
+static int cycx_netdevice_stop(struct net_device *dev)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+
+ netif_stop_queue(dev);
+
+ if (chan->state == WAN_CONNECTED || chan->state == WAN_CONNECTING)
+ cycx_x25_chan_disconnect(dev);
+
+ return 0;
+}
+
+/* Build media header.
+ * o encapsulate packet according to encapsulation type.
+ *
+ * The trick here is to put packet type (Ethertype) into 'protocol' field of
+ * the socket buffer, so that we don't forget it. If encapsulation fails,
+ * set skb->protocol to 0 and discard packet later.
+ *
+ * Return: media header length. */
+static int cycx_netdevice_hard_header(struct sk_buff *skb,
+ struct net_device *dev, u16 type,
+ void *daddr, void *saddr, unsigned len)
+{
+ skb->protocol = type;
+
+ return dev->hard_header_len;
+}
+
+/* * Re-build media header.
+ * Return: 1 physical address resolved.
+ * 0 physical address not resolved */
+static int cycx_netdevice_rebuild_header(struct sk_buff *skb)
+{
+ return 1;
+}
+
+/* Send a packet on a network interface.
+ * o set busy flag (marks start of the transmission).
+ * o check link state. If link is not up, then drop the packet.
+ * o check channel status. If it's down then initiate a call.
+ * o pass a packet to corresponding WAN device.
+ * o free socket buffer
+ *
+ * Return: 0 complete (socket buffer must be freed)
+ * non-0 packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ * bottom half" (with interrupts enabled).
+ * 2. Setting tbusy flag will inhibit further transmit requests from the
+ * protocol stack and can be used for flow control with protocol layer. */
+static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_device *card = chan->card;
+
+ if (!chan->svc)
+ chan->protocol = skb->protocol;
+
+ if (card->wandev.state != WAN_CONNECTED)
+ ++chan->ifstats.tx_dropped;
+ else if (chan->svc && chan->protocol &&
+ chan->protocol != skb->protocol) {
+ printk(KERN_INFO
+ "%s: unsupported Ethertype 0x%04X on interface %s!\n",
+ card->devname, skb->protocol, dev->name);
+ ++chan->ifstats.tx_errors;
+ } else if (chan->protocol == ETH_P_IP) {
+ switch (chan->state) {
+ case WAN_DISCONNECTED:
+ if (cycx_x25_chan_connect(dev)) {
+ netif_stop_queue(dev);
+ return -EBUSY;
+ }
+ /* fall thru */
+ case WAN_CONNECTED:
+ reset_timer(dev);
+ dev->trans_start = jiffies;
+ netif_stop_queue(dev);
+
+ if (cycx_x25_chan_send(dev, skb))
+ return -EBUSY;
+
+ break;
+ default:
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ }
+ } else { /* chan->protocol == ETH_P_X25 */
+ switch (skb->data[0]) {
+ case 0: break;
+ case 1: /* Connect request */
+ cycx_x25_chan_connect(dev);
+ goto free_packet;
+ case 2: /* Disconnect request */
+ cycx_x25_chan_disconnect(dev);
+ goto free_packet;
+ default:
+ printk(KERN_INFO
+ "%s: unknown %d x25-iface request on %s!\n",
+ card->devname, skb->data[0], dev->name);
+ ++chan->ifstats.tx_errors;
+ goto free_packet;
+ }
+
+ skb_pull(skb, 1); /* Remove control byte */
+ reset_timer(dev);
+ dev->trans_start = jiffies;
+ netif_stop_queue(dev);
+
+ if (cycx_x25_chan_send(dev, skb)) {
+ /* prepare for future retransmissions */
+ skb_push(skb, 1);
+ return -EBUSY;
+ }
+ }
+
+free_packet:
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/* Get Ethernet-style interface statistics.
+ * Return a pointer to struct net_device_stats */
+static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+
+ return chan ? &chan->ifstats : NULL;
+}
+
+/* Interrupt Handlers */
+/* X.25 Interrupt Service Routine. */
+static void cycx_x25_irq_handler(struct cycx_device *card)
+{
+ struct cycx_x25_cmd cmd;
+ u16 z = 0;
+
+ card->in_isr = 1;
+ card->buff_int_mode_unbusy = 0;
+ cycx_peek(&card->hw, X25_RXMBOX_OFFS, &cmd, sizeof(cmd));
+
+ switch (cmd.command) {
+ case X25_DATA_INDICATION:
+ cycx_x25_irq_rx(card, &cmd);
+ break;
+ case X25_ACK_FROM_VC:
+ cycx_x25_irq_tx(card, &cmd);
+ break;
+ case X25_LOG:
+ cycx_x25_irq_log(card, &cmd);
+ break;
+ case X25_STATISTIC:
+ cycx_x25_irq_stat(card, &cmd);
+ break;
+ case X25_CONNECT_CONFIRM:
+ cycx_x25_irq_connect_confirm(card, &cmd);
+ break;
+ case X25_CONNECT_INDICATION:
+ cycx_x25_irq_connect(card, &cmd);
+ break;
+ case X25_DISCONNECT_INDICATION:
+ cycx_x25_irq_disconnect(card, &cmd);
+ break;
+ case X25_DISCONNECT_CONFIRM:
+ cycx_x25_irq_disconnect_confirm(card, &cmd);
+ break;
+ case X25_LINE_ON:
+ cycx_set_state(card, WAN_CONNECTED);
+ break;
+ case X25_LINE_OFF:
+ cycx_set_state(card, WAN_DISCONNECTED);
+ break;
+ default:
+ cycx_x25_irq_spurious(card, &cmd);
+ break;
+ }
+
+ cycx_poke(&card->hw, 0, &z, sizeof(z));
+ cycx_poke(&card->hw, X25_RXMBOX_OFFS, &z, sizeof(z));
+ card->in_isr = 0;
+}
+
+/* Transmit interrupt handler.
+ * o Release socket buffer
+ * o Clear 'tbusy' flag */
+static void cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
+{
+ struct net_device *dev;
+ struct wan_device *wandev = &card->wandev;
+ u8 lcn;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+
+ /* unbusy device and then dev_tint(); */
+ dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+ if (dev) {
+ card->buff_int_mode_unbusy = 1;
+ netif_wake_queue(dev);
+ } else
+ printk(KERN_ERR "%s:ackvc for inexistent lcn %d\n",
+ card->devname, lcn);
+}
+
+/* Receive interrupt handler.
+ * This routine handles fragmented IP packets using M-bit according to the
+ * RFC1356.
+ * o map logical channel number to network interface.
+ * o allocate socket buffer or append received packet to the existing one.
+ * o if M-bit is reset (i.e. it's the last packet in a sequence) then
+ * decapsulate packet and pass socket buffer to the protocol stack.
+ *
+ * Notes:
+ * 1. When allocating a socket buffer, if M-bit is set then more data is
+ * coming and we have to allocate buffer for the maximum IP packet size
+ * expected on this channel.
+ * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
+ * socket buffers available) the whole packet sequence must be discarded. */
+static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
+{
+ struct wan_device *wandev = &card->wandev;
+ struct net_device *dev;
+ struct cycx_x25_channel *chan;
+ struct sk_buff *skb;
+ u8 bitm, lcn;
+ int pktlen = cmd->len - 5;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ cycx_peek(&card->hw, cmd->buf + 4, &bitm, sizeof(bitm));
+ bitm &= 0x10;
+
+ dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+ if (!dev) {
+ /* Invalid channel, discard packet */
+ printk(KERN_INFO "%s: receiving on orphaned LCN %d!\n",
+ card->devname, lcn);
+ return;
+ }
+
+ chan = dev->priv;
+ reset_timer(dev);
+
+ if (chan->drop_sequence) {
+ if (!bitm)
+ chan->drop_sequence = 0;
+ else
+ return;
+ }
+
+ if ((skb = chan->rx_skb) == NULL) {
+ /* Allocate new socket buffer */
+ int bufsize = bitm ? dev->mtu : pktlen;
+
+ if ((skb = dev_alloc_skb((chan->protocol == ETH_P_X25 ? 1 : 0) +
+ bufsize +
+ dev->hard_header_len)) == NULL) {
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ chan->drop_sequence = 1;
+ ++chan->ifstats.rx_dropped;
+ return;
+ }
+
+ if (chan->protocol == ETH_P_X25) /* X.25 socket layer control */
+ /* 0 = data packet (dev_alloc_skb zeroed skb->data) */
+ skb_put(skb, 1);
+
+ skb->dev = dev;
+ skb->protocol = htons(chan->protocol);
+ chan->rx_skb = skb;
+ }
+
+ if (skb_tailroom(skb) < pktlen) {
+ /* No room for the packet. Call off the whole thing! */
+ dev_kfree_skb_irq(skb);
+ chan->rx_skb = NULL;
+
+ if (bitm)
+ chan->drop_sequence = 1;
+
+ printk(KERN_INFO "%s: unexpectedly long packet sequence "
+ "on interface %s!\n", card->devname, dev->name);
+ ++chan->ifstats.rx_length_errors;
+ return;
+ }
+
+ /* Append packet to the socket buffer */
+ cycx_peek(&card->hw, cmd->buf + 5, skb_put(skb, pktlen), pktlen);
+
+ if (bitm)
+ return; /* more data is coming */
+
+ chan->rx_skb = NULL; /* dequeue packet */
+
+ ++chan->ifstats.rx_packets;
+ chan->ifstats.rx_bytes += pktlen;
+
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ dev->last_rx = jiffies; /* timestamp */
+}
+
+/* Connect interrupt handler. */
+static void cycx_x25_irq_connect(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd)
+{
+ struct wan_device *wandev = &card->wandev;
+ struct net_device *dev = NULL;
+ struct cycx_x25_channel *chan;
+ u8 d[32],
+ loc[24],
+ rem[24];
+ u8 lcn, sizeloc, sizerem;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ cycx_peek(&card->hw, cmd->buf + 5, &sizeloc, sizeof(sizeloc));
+ cycx_peek(&card->hw, cmd->buf + 6, d, cmd->len - 6);
+
+ sizerem = sizeloc >> 4;
+ sizeloc &= 0x0F;
+
+ loc[0] = rem[0] = '\0';
+
+ if (sizeloc)
+ nibble_to_byte(d, loc, sizeloc, 0);
+
+ if (sizerem)
+ nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
+
+ dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
+ __FUNCTION__, lcn, loc, rem);
+
+ dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
+ if (!dev) {
+ /* Invalid channel, discard packet */
+ printk(KERN_INFO "%s: connect not expected: remote %s!\n",
+ card->devname, rem);
+ return;
+ }
+
+ chan = dev->priv;
+ chan->lcn = lcn;
+ cycx_x25_connect_response(card, chan);
+ cycx_x25_set_chan_state(dev, WAN_CONNECTED);
+}
+
+/* Connect confirm interrupt handler. */
+static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd)
+{
+ struct wan_device *wandev = &card->wandev;
+ struct net_device *dev;
+ struct cycx_x25_channel *chan;
+ u8 lcn, key;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
+ dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
+ card->devname, __FUNCTION__, lcn, key);
+
+ dev = cycx_x25_get_dev_by_lcn(wandev, -key);
+ if (!dev) {
+ /* Invalid channel, discard packet */
+ clear_bit(--key, (void*)&card->u.x.connection_keys);
+ printk(KERN_INFO "%s: connect confirm not expected: lcn %d, "
+ "key=%d!\n", card->devname, lcn, key);
+ return;
+ }
+
+ clear_bit(--key, (void*)&card->u.x.connection_keys);
+ chan = dev->priv;
+ chan->lcn = lcn;
+ cycx_x25_set_chan_state(dev, WAN_CONNECTED);
+}
+
+/* Disconnect confirm interrupt handler. */
+static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd)
+{
+ struct wan_device *wandev = &card->wandev;
+ struct net_device *dev;
+ u8 lcn;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
+ card->devname, __FUNCTION__, lcn);
+ dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+ if (!dev) {
+ /* Invalid channel, discard packet */
+ printk(KERN_INFO "%s:disconnect confirm not expected!:lcn %d\n",
+ card->devname, lcn);
+ return;
+ }
+
+ cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+}
+
+/* disconnect interrupt handler. */
+static void cycx_x25_irq_disconnect(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd)
+{
+ struct wan_device *wandev = &card->wandev;
+ struct net_device *dev;
+ u8 lcn;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ dprintk(1, KERN_INFO "%s:lcn=%d\n", __FUNCTION__, lcn);
+
+ dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+ if (dev) {
+ struct cycx_x25_channel *chan = dev->priv;
+
+ cycx_x25_disconnect_response(card, chan->link, lcn);
+ cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+ } else
+ cycx_x25_disconnect_response(card, 0, lcn);
+}
+
+/* LOG interrupt handler. */
+static void cycx_x25_irq_log(struct cycx_device *card, struct cycx_x25_cmd *cmd)
+{
+#if CYCLOMX_X25_DEBUG
+ char bf[20];
+ u16 size, toread, link, msg_code;
+ u8 code, routine;
+
+ cycx_peek(&card->hw, cmd->buf, &msg_code, sizeof(msg_code));
+ cycx_peek(&card->hw, cmd->buf + 2, &link, sizeof(link));
+ cycx_peek(&card->hw, cmd->buf + 4, &size, sizeof(size));
+ /* at most 20 bytes are available... thanks to Daniela :) */
+ toread = size < 20 ? size : 20;
+ cycx_peek(&card->hw, cmd->buf + 10, &bf, toread);
+ cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1);
+ cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1);
+
+ printk(KERN_INFO "cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n");
+ printk(KERN_INFO "cmd->buf=0x%X\n", cmd->buf);
+ printk(KERN_INFO "Log message code=0x%X\n", msg_code);
+ printk(KERN_INFO "Link=%d\n", link);
+ printk(KERN_INFO "log code=0x%X\n", code);
+ printk(KERN_INFO "log routine=0x%X\n", routine);
+ printk(KERN_INFO "Message size=%d\n", size);
+ hex_dump("Message", bf, toread);
+#endif
+}
+
+/* STATISTIC interrupt handler. */
+static void cycx_x25_irq_stat(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd)
+{
+ cycx_peek(&card->hw, cmd->buf, &card->u.x.stats,
+ sizeof(card->u.x.stats));
+ hex_dump("cycx_x25_irq_stat", (unsigned char*)&card->u.x.stats,
+ sizeof(card->u.x.stats));
+ cycx_x25_dump_stats(&card->u.x.stats);
+ wake_up_interruptible(&card->wait_stats);
+}
+
+/* Spurious interrupt handler.
+ * o print a warning
+ * If number of spurious interrupts exceeded some limit, then ??? */
+static void cycx_x25_irq_spurious(struct cycx_device *card,
+ struct cycx_x25_cmd *cmd)
+{
+ printk(KERN_INFO "%s: spurious interrupt (0x%X)!\n",
+ card->devname, cmd->command);
+}
+#ifdef CYCLOMX_X25_DEBUG
+static void hex_dump(char *msg, unsigned char *p, int len)
+{
+ unsigned char hex[1024],
+ * phex = hex;
+
+ if (len >= (sizeof(hex) / 2))
+ len = (sizeof(hex) / 2) - 1;
+
+ while (len--) {
+ sprintf(phex, "%02x", *p++);
+ phex += 2;
+ }
+
+ printk(KERN_INFO "%s: %s\n", msg, hex);
+}
+#endif
+
+/* Cyclom 2X Firmware-Specific Functions */
+/* Exec X.25 command. */
+static int x25_exec(struct cycx_device *card, int command, int link,
+ void *d1, int len1, void *d2, int len2)
+{
+ struct cycx_x25_cmd c;
+ unsigned long flags;
+ u32 addr = 0x1200 + 0x2E0 * link + 0x1E2;
+ u8 retry = CYCX_X25_MAX_CMD_RETRY;
+ int err = 0;
+
+ c.command = command;
+ c.link = link;
+ c.len = len1 + len2;
+
+ spin_lock_irqsave(&card->u.x.lock, flags);
+
+ /* write command */
+ cycx_poke(&card->hw, X25_MBOX_OFFS, &c, sizeof(c) - sizeof(c.buf));
+
+ /* write X.25 data */
+ if (d1) {
+ cycx_poke(&card->hw, addr, d1, len1);
+
+ if (d2) {
+ if (len2 > 254) {
+ u32 addr1 = 0xA00 + 0x400 * link;
+
+ cycx_poke(&card->hw, addr + len1, d2, 249);
+ cycx_poke(&card->hw, addr1, ((u8*)d2) + 249,
+ len2 - 249);
+ } else
+ cycx_poke(&card->hw, addr + len1, d2, len2);
+ }
+ }
+
+ /* generate interruption, executing command */
+ cycx_intr(&card->hw);
+
+ /* wait till card->mbox == 0 */
+ do {
+ err = cycx_exec(card->mbox);
+ } while (retry-- && err);
+
+ spin_unlock_irqrestore(&card->u.x.lock, flags);
+
+ return err;
+}
+
+/* Configure adapter. */
+static int cycx_x25_configure(struct cycx_device *card,
+ struct cycx_x25_config *conf)
+{
+ struct {
+ u16 nlinks;
+ struct cycx_x25_config conf[2];
+ } x25_cmd_conf;
+
+ memset(&x25_cmd_conf, 0, sizeof(x25_cmd_conf));
+ x25_cmd_conf.nlinks = 2;
+ x25_cmd_conf.conf[0] = *conf;
+ /* FIXME: we need to find a way in the wanrouter framework
+ to configure the second link, for now lets use it
+ with the same config from the first link, fixing
+ the interface type to RS232, the speed in 38400 and
+ the clock to external */
+ x25_cmd_conf.conf[1] = *conf;
+ x25_cmd_conf.conf[1].link = 1;
+ x25_cmd_conf.conf[1].speed = 5; /* 38400 */
+ x25_cmd_conf.conf[1].clock = 8;
+ x25_cmd_conf.conf[1].flags = 0; /* default = RS232 */
+
+ cycx_x25_dump_config(&x25_cmd_conf.conf[0]);
+ cycx_x25_dump_config(&x25_cmd_conf.conf[1]);
+
+ return x25_exec(card, X25_CONFIG, 0,
+ &x25_cmd_conf, sizeof(x25_cmd_conf), NULL, 0);
+}
+
+/* Get protocol statistics. */
+static int cycx_x25_get_stats(struct cycx_device *card)
+{
+ /* the firmware expects 20 in the size field!!!
+ thanks to Daniela */
+ int err = x25_exec(card, X25_STATISTIC, 0, NULL, 20, NULL, 0);
+
+ if (err)
+ return err;
+
+ interruptible_sleep_on(&card->wait_stats);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ card->wandev.stats.rx_packets = card->u.x.stats.n2_rx_frames;
+ card->wandev.stats.rx_over_errors = card->u.x.stats.rx_over_errors;
+ card->wandev.stats.rx_crc_errors = card->u.x.stats.rx_crc_errors;
+ card->wandev.stats.rx_length_errors = 0; /* not available from fw */
+ card->wandev.stats.rx_frame_errors = 0; /* not available from fw */
+ card->wandev.stats.rx_missed_errors = card->u.x.stats.rx_aborts;
+ card->wandev.stats.rx_dropped = 0; /* not available from fw */
+ card->wandev.stats.rx_errors = 0; /* not available from fw */
+ card->wandev.stats.tx_packets = card->u.x.stats.n2_tx_frames;
+ card->wandev.stats.tx_aborted_errors = card->u.x.stats.tx_aborts;
+ card->wandev.stats.tx_dropped = 0; /* not available from fw */
+ card->wandev.stats.collisions = 0; /* not available from fw */
+ card->wandev.stats.tx_errors = 0; /* not available from fw */
+
+ cycx_x25_dump_devs(&card->wandev);
+
+ return 0;
+}
+
+/* return the number of nibbles */
+static int byte_to_nibble(u8 *s, u8 *d, char *nibble)
+{
+ int i = 0;
+
+ if (*nibble && *s) {
+ d[i] |= *s++ - '0';
+ *nibble = 0;
+ ++i;
+ }
+
+ while (*s) {
+ d[i] = (*s - '0') << 4;
+ if (*(s + 1))
+ d[i] |= *(s + 1) - '0';
+ else {
+ *nibble = 1;
+ break;
+ }
+ ++i;
+ s += 2;
+ }
+
+ return i;
+}
+
+static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble)
+{
+ if (nibble) {
+ *d++ = '0' + (*s++ & 0x0F);
+ --len;
+ }
+
+ while (len) {
+ *d++ = '0' + (*s >> 4);
+
+ if (--len) {
+ *d++ = '0' + (*s & 0x0F);
+ --len;
+ } else break;
+
+ ++s;
+ }
+
+ *d = '\0';
+}
+
+/* Place X.25 call. */
+static int x25_place_call(struct cycx_device *card,
+ struct cycx_x25_channel *chan)
+{
+ int err = 0,
+ len;
+ char d[64],
+ nibble = 0,
+ mylen = chan->local_addr ? strlen(chan->local_addr) : 0,
+ remotelen = strlen(chan->addr);
+ u8 key;
+
+ if (card->u.x.connection_keys == ~0U) {
+ printk(KERN_INFO "%s: too many simultaneous connection "
+ "requests!\n", card->devname);
+ return -EAGAIN;
+ }
+
+ key = ffz(card->u.x.connection_keys);
+ set_bit(key, (void*)&card->u.x.connection_keys);
+ ++key;
+ dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
+ memset(d, 0, sizeof(d));
+ d[1] = key; /* user key */
+ d[2] = 0x10;
+ d[4] = 0x0B;
+
+ len = byte_to_nibble(chan->addr, d + 6, &nibble);
+
+ if (chan->local_addr)
+ len += byte_to_nibble(chan->local_addr, d + 6 + len, &nibble);
+
+ if (nibble)
+ ++len;
+
+ d[5] = mylen << 4 | remotelen;
+ d[6 + len + 1] = 0xCC; /* TCP/IP over X.25, thanks to Daniela :) */
+
+ if ((err = x25_exec(card, X25_CONNECT_REQUEST, chan->link,
+ &d, 7 + len + 1, NULL, 0)) != 0)
+ clear_bit(--key, (void*)&card->u.x.connection_keys);
+ else
+ chan->lcn = -key;
+
+ return err;
+}
+
+/* Place X.25 CONNECT RESPONSE. */
+static int cycx_x25_connect_response(struct cycx_device *card,
+ struct cycx_x25_channel *chan)
+{
+ u8 d[8];
+
+ memset(d, 0, sizeof(d));
+ d[0] = d[3] = chan->lcn;
+ d[2] = 0x10;
+ d[4] = 0x0F;
+ d[7] = 0xCC; /* TCP/IP over X.25, thanks Daniela */
+
+ return x25_exec(card, X25_CONNECT_RESPONSE, chan->link, &d, 8, NULL, 0);
+}
+
+/* Place X.25 DISCONNECT RESPONSE. */
+static int cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
+ u8 lcn)
+{
+ char d[5];
+
+ memset(d, 0, sizeof(d));
+ d[0] = d[3] = lcn;
+ d[2] = 0x10;
+ d[4] = 0x17;
+
+ return x25_exec(card, X25_DISCONNECT_RESPONSE, link, &d, 5, NULL, 0);
+}
+
+/* Clear X.25 call. */
+static int x25_clear_call(struct cycx_device *card, u8 link, u8 lcn, u8 cause,
+ u8 diagn)
+{
+ u8 d[7];
+
+ memset(d, 0, sizeof(d));
+ d[0] = d[3] = lcn;
+ d[2] = 0x10;
+ d[4] = 0x13;
+ d[5] = cause;
+ d[6] = diagn;
+
+ return x25_exec(card, X25_DISCONNECT_REQUEST, link, d, 7, NULL, 0);
+}
+
+/* Send X.25 data packet. */
+static int cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
+ int len, void *buf)
+{
+ u8 d[] = "?\xFF\x10??";
+
+ d[0] = d[3] = lcn;
+ d[4] = bitm;
+
+ return x25_exec(card, X25_DATA_REQUEST, link, &d, 5, buf, len);
+}
+
+/* Miscellaneous */
+/* Find network device by its channel number. */
+static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
+ s16 lcn)
+{
+ struct net_device *dev = wandev->dev;
+ struct cycx_x25_channel *chan;
+
+ while (dev) {
+ chan = (struct cycx_x25_channel*)dev->priv;
+
+ if (chan->lcn == lcn)
+ break;
+ dev = chan->slave;
+ }
+ return dev;
+}
+
+/* Find network device by its remote dte address. */
+static struct net_device *
+ cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte)
+{
+ struct net_device *dev = wandev->dev;
+ struct cycx_x25_channel *chan;
+
+ while (dev) {
+ chan = (struct cycx_x25_channel*)dev->priv;
+
+ if (!strcmp(chan->addr, dte))
+ break;
+ dev = chan->slave;
+ }
+ return dev;
+}
+
+/* Initiate connection on the logical channel.
+ * o for PVC we just get channel configuration
+ * o for SVCs place an X.25 call
+ *
+ * Return: 0 connected
+ * >0 connection in progress
+ * <0 failure */
+static int cycx_x25_chan_connect(struct net_device *dev)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_device *card = chan->card;
+
+ if (chan->svc) {
+ if (!chan->addr[0])
+ return -EINVAL; /* no destination address */
+
+ dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n",
+ card->devname, chan->addr);
+
+ if (x25_place_call(card, chan))
+ return -EIO;
+
+ cycx_x25_set_chan_state(dev, WAN_CONNECTING);
+ return 1;
+ } else
+ cycx_x25_set_chan_state(dev, WAN_CONNECTED);
+
+ return 0;
+}
+
+/* Disconnect logical channel.
+ * o if SVC then clear X.25 call */
+static void cycx_x25_chan_disconnect(struct net_device *dev)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+
+ if (chan->svc) {
+ x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
+ cycx_x25_set_chan_state(dev, WAN_DISCONNECTING);
+ } else
+ cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+}
+
+/* Called by kernel timer */
+static void cycx_x25_chan_timer(unsigned long d)
+{
+ struct net_device *dev = (struct net_device *)d;
+ struct cycx_x25_channel *chan = dev->priv;
+
+ if (chan->state == WAN_CONNECTED)
+ cycx_x25_chan_disconnect(dev);
+ else
+ printk(KERN_ERR "%s: %s for svc (%s) not connected!\n",
+ chan->card->devname, __FUNCTION__, dev->name);
+}
+
+/* Set logical channel state. */
+static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_device *card = chan->card;
+ unsigned long flags;
+ char *string_state = NULL;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ if (chan->state != state) {
+ if (chan->svc && chan->state == WAN_CONNECTED)
+ del_timer(&chan->timer);
+
+ switch (state) {
+ case WAN_CONNECTED:
+ string_state = "connected!";
+ *(u16*)dev->dev_addr = htons(chan->lcn);
+ netif_wake_queue(dev);
+ reset_timer(dev);
+
+ if (chan->protocol == ETH_P_X25)
+ cycx_x25_chan_send_event(dev, 1);
+
+ break;
+ case WAN_CONNECTING:
+ string_state = "connecting...";
+ break;
+ case WAN_DISCONNECTING:
+ string_state = "disconnecting...";
+ break;
+ case WAN_DISCONNECTED:
+ string_state = "disconnected!";
+
+ if (chan->svc) {
+ *(unsigned short*)dev->dev_addr = 0;
+ chan->lcn = 0;
+ }
+
+ if (chan->protocol == ETH_P_X25)
+ cycx_x25_chan_send_event(dev, 2);
+
+ netif_wake_queue(dev);
+ break;
+ }
+
+ printk(KERN_INFO "%s: interface %s %s\n", card->devname,
+ dev->name, string_state);
+ chan->state = state;
+ }
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/* Send packet on a logical channel.
+ * When this function is called, tx_skb field of the channel data space
+ * points to the transmit socket buffer. When transmission is complete,
+ * release socket buffer and reset 'tbusy' flag.
+ *
+ * Return: 0 - transmission complete
+ * 1 - busy
+ *
+ * Notes:
+ * 1. If packet length is greater than MTU for this channel, we'll fragment
+ * the packet into 'complete sequence' using M-bit.
+ * 2. When transmission is complete, an event notification should be issued
+ * to the router. */
+static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+ struct cycx_device *card = chan->card;
+ int bitm = 0; /* final packet */
+ unsigned len = skb->len;
+
+ if (skb->len > card->wandev.mtu) {
+ len = card->wandev.mtu;
+ bitm = 0x10; /* set M-bit (more data) */
+ }
+
+ if (cycx_x25_send(card, chan->link, chan->lcn, bitm, len, skb->data))
+ return 1;
+
+ if (bitm) {
+ skb_pull(skb, len);
+ return 1;
+ }
+
+ ++chan->ifstats.tx_packets;
+ chan->ifstats.tx_bytes += len;
+
+ return 0;
+}
+
+/* Send event (connection, disconnection, etc) to X.25 socket layer */
+
+static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
+{
+ struct sk_buff *skb;
+ unsigned char *ptr;
+
+ if ((skb = dev_alloc_skb(1)) == NULL) {
+ printk(KERN_ERR "%s: out of memory\n", __FUNCTION__);
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = event;
+
+ skb->protocol = x25_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies; /* timestamp */
+}
+
+/* Convert line speed in bps to a number used by cyclom 2x code. */
+static u8 bps_to_speed_code(u32 bps)
+{
+ u8 number = 0; /* defaults to the lowest (1200) speed ;> */
+
+ if (bps >= 512000) number = 8;
+ else if (bps >= 256000) number = 7;
+ else if (bps >= 64000) number = 6;
+ else if (bps >= 38400) number = 5;
+ else if (bps >= 19200) number = 4;
+ else if (bps >= 9600) number = 3;
+ else if (bps >= 4800) number = 2;
+ else if (bps >= 2400) number = 1;
+
+ return number;
+}
+
+/* log base 2 */
+static u8 cycx_log2(u32 n)
+{
+ u8 log = 0;
+
+ if (!n)
+ return 0;
+
+ while (n > 1) {
+ n >>= 1;
+ ++log;
+ }
+
+ return log;
+}
+
+/* Convert decimal string to unsigned integer.
+ * If len != 0 then only 'len' characters of the string are converted. */
+static unsigned dec_to_uint(u8 *str, int len)
+{
+ unsigned val = 0;
+
+ if (!len)
+ len = strlen(str);
+
+ for (; len && is_digit(*str); ++str, --len)
+ val = (val * 10) + (*str - (unsigned) '0');
+
+ return val;
+}
+
+static void reset_timer(struct net_device *dev)
+{
+ struct cycx_x25_channel *chan = dev->priv;
+
+ if (chan->svc)
+ mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
+}
+#ifdef CYCLOMX_X25_DEBUG
+static void cycx_x25_dump_config(struct cycx_x25_config *conf)
+{
+ printk(KERN_INFO "X.25 configuration\n");
+ printk(KERN_INFO "-----------------\n");
+ printk(KERN_INFO "link number=%d\n", conf->link);
+ printk(KERN_INFO "line speed=%d\n", conf->speed);
+ printk(KERN_INFO "clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
+ printk(KERN_INFO "# level 2 retransm.=%d\n", conf->n2);
+ printk(KERN_INFO "level 2 window=%d\n", conf->n2win);
+ printk(KERN_INFO "level 3 window=%d\n", conf->n3win);
+ printk(KERN_INFO "# logical channels=%d\n", conf->nvc);
+ printk(KERN_INFO "level 3 pkt len=%d\n", conf->pktlen);
+ printk(KERN_INFO "my address=%d\n", conf->locaddr);
+ printk(KERN_INFO "remote address=%d\n", conf->remaddr);
+ printk(KERN_INFO "t1=%d seconds\n", conf->t1);
+ printk(KERN_INFO "t2=%d seconds\n", conf->t2);
+ printk(KERN_INFO "t21=%d seconds\n", conf->t21);
+ printk(KERN_INFO "# PVCs=%d\n", conf->npvc);
+ printk(KERN_INFO "t23=%d seconds\n", conf->t23);
+ printk(KERN_INFO "flags=0x%x\n", conf->flags);
+}
+
+static void cycx_x25_dump_stats(struct cycx_x25_stats *stats)
+{
+ printk(KERN_INFO "X.25 statistics\n");
+ printk(KERN_INFO "--------------\n");
+ printk(KERN_INFO "rx_crc_errors=%d\n", stats->rx_crc_errors);
+ printk(KERN_INFO "rx_over_errors=%d\n", stats->rx_over_errors);
+ printk(KERN_INFO "n2_tx_frames=%d\n", stats->n2_tx_frames);
+ printk(KERN_INFO "n2_rx_frames=%d\n", stats->n2_rx_frames);
+ printk(KERN_INFO "tx_timeouts=%d\n", stats->tx_timeouts);
+ printk(KERN_INFO "rx_timeouts=%d\n", stats->rx_timeouts);
+ printk(KERN_INFO "n3_tx_packets=%d\n", stats->n3_tx_packets);
+ printk(KERN_INFO "n3_rx_packets=%d\n", stats->n3_rx_packets);
+ printk(KERN_INFO "tx_aborts=%d\n", stats->tx_aborts);
+ printk(KERN_INFO "rx_aborts=%d\n", stats->rx_aborts);
+}
+
+static void cycx_x25_dump_devs(struct wan_device *wandev)
+{
+ struct net_device *dev = wandev->dev;
+
+ printk(KERN_INFO "X.25 dev states\n");
+ printk(KERN_INFO "name: addr: txoff: protocol:\n");
+ printk(KERN_INFO "---------------------------------------\n");
+
+ while(dev) {
+ struct cycx_x25_channel *chan = dev->priv;
+
+ printk(KERN_INFO "%-5.5s %-15.15s %d ETH_P_%s\n",
+ chan->name, chan->addr, netif_queue_stopped(dev),
+ chan->protocol == ETH_P_IP ? "IP" : "X25");
+ dev = chan->slave;
+ }
+}
+
+#endif /* CYCLOMX_X25_DEBUG */
+/* End */
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
new file mode 100644
index 000000000000..6e1ec5bf22fc
--- /dev/null
+++ b/drivers/net/wan/dlci.c
@@ -0,0 +1,566 @@
+/*
+ * DLCI Implementation of Frame Relay protocol for Linux, according to
+ * RFC 1490. This generic device provides en/decapsulation for an
+ * underlying hardware driver. Routes & IPs are assigned to these
+ * interfaces. Requires 'dlcicfg' program to create usable
+ * interfaces, the initial one, 'dlci' is for IOCTL use only.
+ *
+ * Version: @(#)dlci.c 0.35 4 Jan 1997
+ *
+ * Author: Mike McLagan <mike.mclagan@linux.org>
+ *
+ * Changes:
+ *
+ * 0.15 Mike Mclagan Packet freeing, bug in kmalloc call
+ * DLCI_RET handling
+ * 0.20 Mike McLagan More conservative on which packets
+ * are returned for retry and which are
+ * are dropped. If DLCI_RET_DROP is
+ * returned from the FRAD, the packet is
+ * sent back to Linux for re-transmission
+ * 0.25 Mike McLagan Converted to use SIOC IOCTL calls
+ * 0.30 Jim Freeman Fixed to allow IPX traffic
+ * 0.35 Michael Elizabeth Fixed incorrect memcpy_fromfs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h> /* for CONFIG_DLCI_COUNT */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_frad.h>
+#include <linux/bitops.h>
+
+#include <net/sock.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org";
+
+static LIST_HEAD(dlci_devs);
+
+static void dlci_setup(struct net_device *);
+
+/*
+ * these encapsulate the RFC 1490 requirements as well as
+ * deal with packet transmission and reception, working with
+ * the upper network layers
+ */
+
+static int dlci_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len)
+{
+ struct frhdr hdr;
+ struct dlci_local *dlp;
+ unsigned int hlen;
+ char *dest;
+
+ dlp = dev->priv;
+
+ hdr.control = FRAD_I_UI;
+ switch(type)
+ {
+ case ETH_P_IP:
+ hdr.IP_NLPID = FRAD_P_IP;
+ hlen = sizeof(hdr.control) + sizeof(hdr.IP_NLPID);
+ break;
+
+ /* feel free to add other types, if necessary */
+
+ default:
+ hdr.pad = FRAD_P_PADDING;
+ hdr.NLPID = FRAD_P_SNAP;
+ memset(hdr.OUI, 0, sizeof(hdr.OUI));
+ hdr.PID = htons(type);
+ hlen = sizeof(hdr);
+ break;
+ }
+
+ dest = skb_push(skb, hlen);
+ if (!dest)
+ return(0);
+
+ memcpy(dest, &hdr, hlen);
+
+ return(hlen);
+}
+
+static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dlci_local *dlp;
+ struct frhdr *hdr;
+ int process, header;
+
+ dlp = dev->priv;
+ if (!pskb_may_pull(skb, sizeof(*hdr))) {
+ printk(KERN_NOTICE "%s: invalid data no header\n",
+ dev->name);
+ dlp->stats.rx_errors++;
+ kfree_skb(skb);
+ return;
+ }
+
+ hdr = (struct frhdr *) skb->data;
+ process = 0;
+ header = 0;
+ skb->dev = dev;
+
+ if (hdr->control != FRAD_I_UI)
+ {
+ printk(KERN_NOTICE "%s: Invalid header flag 0x%02X.\n", dev->name, hdr->control);
+ dlp->stats.rx_errors++;
+ }
+ else
+ switch(hdr->IP_NLPID)
+ {
+ case FRAD_P_PADDING:
+ if (hdr->NLPID != FRAD_P_SNAP)
+ {
+ printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->NLPID);
+ dlp->stats.rx_errors++;
+ break;
+ }
+
+ if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0)
+ {
+ printk(KERN_NOTICE "%s: Unsupported organizationally unique identifier 0x%02X-%02X-%02X.\n", dev->name, hdr->OUI[0], hdr->OUI[1], hdr->OUI[2]);
+ dlp->stats.rx_errors++;
+ break;
+ }
+
+ /* at this point, it's an EtherType frame */
+ header = sizeof(struct frhdr);
+ /* Already in network order ! */
+ skb->protocol = hdr->PID;
+ process = 1;
+ break;
+
+ case FRAD_P_IP:
+ header = sizeof(hdr->control) + sizeof(hdr->IP_NLPID);
+ skb->protocol = htons(ETH_P_IP);
+ process = 1;
+ break;
+
+ case FRAD_P_SNAP:
+ case FRAD_P_Q933:
+ case FRAD_P_CLNP:
+ printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->pad);
+ dlp->stats.rx_errors++;
+ break;
+
+ default:
+ printk(KERN_NOTICE "%s: Invalid pad byte 0x%02X.\n", dev->name, hdr->pad);
+ dlp->stats.rx_errors++;
+ break;
+ }
+
+ if (process)
+ {
+ /* we've set up the protocol, so discard the header */
+ skb->mac.raw = skb->data;
+ skb_pull(skb, header);
+ dlp->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ dlp->stats.rx_packets++;
+ dev->last_rx = jiffies;
+ }
+ else
+ dev_kfree_skb(skb);
+}
+
+static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dlci_local *dlp;
+ int ret;
+
+ ret = 0;
+
+ if (!skb || !dev)
+ return(0);
+
+ dlp = dev->priv;
+
+ netif_stop_queue(dev);
+
+ ret = dlp->slave->hard_start_xmit(skb, dlp->slave);
+ switch (ret)
+ {
+ case DLCI_RET_OK:
+ dlp->stats.tx_packets++;
+ ret = 0;
+ break;
+ case DLCI_RET_ERR:
+ dlp->stats.tx_errors++;
+ ret = 0;
+ break;
+ case DLCI_RET_DROP:
+ dlp->stats.tx_dropped++;
+ ret = 1;
+ break;
+ }
+ /* Alan Cox recommends always returning 0, and always freeing the packet */
+ /* experience suggest a slightly more conservative approach */
+
+ if (!ret)
+ {
+ dev_kfree_skb(skb);
+ netif_wake_queue(dev);
+ }
+ return(ret);
+}
+
+static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, int get)
+{
+ struct dlci_conf config;
+ struct dlci_local *dlp;
+ struct frad_local *flp;
+ int err;
+
+ dlp = dev->priv;
+
+ flp = dlp->slave->priv;
+
+ if (!get)
+ {
+ if(copy_from_user(&config, conf, sizeof(struct dlci_conf)))
+ return -EFAULT;
+ if (config.flags & ~DLCI_VALID_FLAGS)
+ return(-EINVAL);
+ memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
+ dlp->configured = 1;
+ }
+
+ err = (*flp->dlci_conf)(dlp->slave, dev, get);
+ if (err)
+ return(err);
+
+ if (get)
+ {
+ if(copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
+ return -EFAULT;
+ }
+
+ return(0);
+}
+
+static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dlci_local *dlp;
+
+ if (!capable(CAP_NET_ADMIN))
+ return(-EPERM);
+
+ dlp = dev->priv;
+
+ switch(cmd)
+ {
+ case DLCI_GET_SLAVE:
+ if (!*(short *)(dev->dev_addr))
+ return(-EINVAL);
+
+ strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
+ break;
+
+ case DLCI_GET_CONF:
+ case DLCI_SET_CONF:
+ if (!*(short *)(dev->dev_addr))
+ return(-EINVAL);
+
+ return(dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF));
+ break;
+
+ default:
+ return(-EOPNOTSUPP);
+ }
+ return(0);
+}
+
+static int dlci_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dlci_local *dlp;
+
+ dlp = dev->priv;
+
+ return((*dlp->slave->change_mtu)(dlp->slave, new_mtu));
+}
+
+static int dlci_open(struct net_device *dev)
+{
+ struct dlci_local *dlp;
+ struct frad_local *flp;
+ int err;
+
+ dlp = dev->priv;
+
+ if (!*(short *)(dev->dev_addr))
+ return(-EINVAL);
+
+ if (!netif_running(dlp->slave))
+ return(-ENOTCONN);
+
+ flp = dlp->slave->priv;
+ err = (*flp->activate)(dlp->slave, dev);
+ if (err)
+ return(err);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int dlci_close(struct net_device *dev)
+{
+ struct dlci_local *dlp;
+ struct frad_local *flp;
+ int err;
+
+ netif_stop_queue(dev);
+
+ dlp = dev->priv;
+
+ flp = dlp->slave->priv;
+ err = (*flp->deactivate)(dlp->slave, dev);
+
+ return 0;
+}
+
+static struct net_device_stats *dlci_get_stats(struct net_device *dev)
+{
+ struct dlci_local *dlp;
+
+ dlp = dev->priv;
+
+ return(&dlp->stats);
+}
+
+static int dlci_add(struct dlci_add *dlci)
+{
+ struct net_device *master, *slave;
+ struct dlci_local *dlp;
+ struct frad_local *flp;
+ int err = -EINVAL;
+
+
+ /* validate slave device */
+ slave = dev_get_by_name(dlci->devname);
+ if (!slave)
+ return -ENODEV;
+
+ if (slave->type != ARPHRD_FRAD || slave->priv == NULL)
+ goto err1;
+
+ /* create device name */
+ master = alloc_netdev( sizeof(struct dlci_local), "dlci%d",
+ dlci_setup);
+ if (!master) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ /* make sure same slave not already registered */
+ rtnl_lock();
+ list_for_each_entry(dlp, &dlci_devs, list) {
+ if (dlp->slave == slave) {
+ err = -EBUSY;
+ goto err2;
+ }
+ }
+
+ err = dev_alloc_name(master, master->name);
+ if (err < 0)
+ goto err2;
+
+ *(short *)(master->dev_addr) = dlci->dlci;
+
+ dlp = (struct dlci_local *) master->priv;
+ dlp->slave = slave;
+ dlp->master = master;
+
+ flp = slave->priv;
+ err = (*flp->assoc)(slave, master);
+ if (err < 0)
+ goto err2;
+
+ err = register_netdevice(master);
+ if (err < 0)
+ goto err2;
+
+ strcpy(dlci->devname, master->name);
+
+ list_add(&dlp->list, &dlci_devs);
+ rtnl_unlock();
+
+ return(0);
+
+ err2:
+ rtnl_unlock();
+ free_netdev(master);
+ err1:
+ dev_put(slave);
+ return(err);
+}
+
+static int dlci_del(struct dlci_add *dlci)
+{
+ struct dlci_local *dlp;
+ struct frad_local *flp;
+ struct net_device *master, *slave;
+ int err;
+
+ /* validate slave device */
+ master = __dev_get_by_name(dlci->devname);
+ if (!master)
+ return(-ENODEV);
+
+ if (netif_running(master)) {
+ return(-EBUSY);
+ }
+
+ dlp = master->priv;
+ slave = dlp->slave;
+ flp = slave->priv;
+
+ rtnl_lock();
+ err = (*flp->deassoc)(slave, master);
+ if (!err) {
+ list_del(&dlp->list);
+
+ unregister_netdevice(master);
+
+ dev_put(slave);
+ }
+ rtnl_unlock();
+
+ return(err);
+}
+
+static int dlci_ioctl(unsigned int cmd, void __user *arg)
+{
+ struct dlci_add add;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return(-EPERM);
+
+ if(copy_from_user(&add, arg, sizeof(struct dlci_add)))
+ return -EFAULT;
+
+ switch (cmd)
+ {
+ case SIOCADDDLCI:
+ err = dlci_add(&add);
+
+ if (!err)
+ if(copy_to_user(arg, &add, sizeof(struct dlci_add)))
+ return -EFAULT;
+ break;
+
+ case SIOCDELDLCI:
+ err = dlci_del(&add);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return(err);
+}
+
+static void dlci_setup(struct net_device *dev)
+{
+ struct dlci_local *dlp = dev->priv;
+
+ dev->flags = 0;
+ dev->open = dlci_open;
+ dev->stop = dlci_close;
+ dev->do_ioctl = dlci_dev_ioctl;
+ dev->hard_start_xmit = dlci_transmit;
+ dev->hard_header = dlci_header;
+ dev->get_stats = dlci_get_stats;
+ dev->change_mtu = dlci_change_mtu;
+ dev->destructor = free_netdev;
+
+ dlp->receive = dlci_receive;
+
+ dev->type = ARPHRD_DLCI;
+ dev->hard_header_len = sizeof(struct frhdr);
+ dev->addr_len = sizeof(short);
+
+}
+
+/* if slave is unregistering, then cleanup master */
+static int dlci_dev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+
+ if (event == NETDEV_UNREGISTER) {
+ struct dlci_local *dlp;
+
+ list_for_each_entry(dlp, &dlci_devs, list) {
+ if (dlp->slave == dev) {
+ list_del(&dlp->list);
+ unregister_netdevice(dlp->master);
+ dev_put(dlp->slave);
+ break;
+ }
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dlci_notifier = {
+ .notifier_call = dlci_dev_event,
+};
+
+static int __init init_dlci(void)
+{
+ dlci_ioctl_set(dlci_ioctl);
+ register_netdevice_notifier(&dlci_notifier);
+
+ printk("%s.\n", version);
+
+ return 0;
+}
+
+static void __exit dlci_exit(void)
+{
+ struct dlci_local *dlp, *nxt;
+
+ dlci_ioctl_set(NULL);
+ unregister_netdevice_notifier(&dlci_notifier);
+
+ rtnl_lock();
+ list_for_each_entry_safe(dlp, nxt, &dlci_devs, list) {
+ unregister_netdevice(dlp->master);
+ dev_put(dlp->slave);
+ }
+ rtnl_unlock();
+}
+
+module_init(init_dlci);
+module_exit(dlci_exit);
+
+MODULE_AUTHOR("Mike McLagan");
+MODULE_DESCRIPTION("Frame Relay DLCI layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
new file mode 100644
index 000000000000..520a77a798e2
--- /dev/null
+++ b/drivers/net/wan/dscc4.c
@@ -0,0 +1,2074 @@
+/*
+ * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU General Public License.
+ *
+ * The author may be reached as romieu@cogenit.fr.
+ * Specific bug reports/asian food will be welcome.
+ *
+ * Special thanks to the nice people at CS-Telecom for the hardware and the
+ * access to the test/measure tools.
+ *
+ *
+ * Theory of Operation
+ *
+ * I. Board Compatibility
+ *
+ * This device driver is designed for the Siemens PEB20534 4 ports serial
+ * controller as found on Etinc PCISYNC cards. The documentation for the
+ * chipset is available at http://www.infineon.com:
+ * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with
+ * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1";
+ * - Application Hint "Management of DSCC4 on-chip FIFO resources".
+ * - Errata sheet DS5 (courtesy of Michael Skerritt).
+ * Jens David has built an adapter based on the same chipset. Take a look
+ * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific
+ * driver.
+ * Sample code (2 revisions) is available at Infineon.
+ *
+ * II. Board-specific settings
+ *
+ * Pcisync can transmit some clock signal to the outside world on the
+ * *first two* ports provided you put a quartz and a line driver on it and
+ * remove the jumpers. The operation is described on Etinc web site. If you
+ * go DCE on these ports, don't forget to use an adequate cable.
+ *
+ * Sharing of the PCI interrupt line for this board is possible.
+ *
+ * III. Driver operation
+ *
+ * The rx/tx operations are based on a linked list of descriptors. The driver
+ * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more
+ * I tried to fix it, the more it started to look like (convoluted) software
+ * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider
+ * this a rfc2119 MUST.
+ *
+ * Tx direction
+ * When the tx ring is full, the xmit routine issues a call to netdev_stop.
+ * The device is supposed to be enabled again during an ALLS irq (we could
+ * use HI but as it's easy to lose events, it's fscked).
+ *
+ * Rx direction
+ * The received frames aren't supposed to span over multiple receiving areas.
+ * I may implement it some day but it isn't the highest ranked item.
+ *
+ * IV. Notes
+ * The current error (XDU, RFO) recovery code is untested.
+ * So far, RDO takes his RX channel down and the right sequence to enable it
+ * again is still a mistery. If RDO happens, plan a reboot. More details
+ * in the code (NB: as this happens, TX still works).
+ * Don't mess the cables during operation, especially on DTE ports. I don't
+ * suggest it for DCE either but at least one can get some messages instead
+ * of a complete instant freeze.
+ * Tests are done on Rev. 20 of the silicium. The RDO handling changes with
+ * the documentation/chipset releases.
+ *
+ * TODO:
+ * - test X25.
+ * - use polling at high irq/s,
+ * - performance analysis,
+ * - endianness.
+ *
+ * 2001/12/10 Daniela Squassoni <daniela@cyclades.com>
+ * - Contribution to support the new generic HDLC layer.
+ *
+ * 2002/01 Ueimor
+ * - old style interface removal
+ * - dscc4_release_ring fix (related to DMA mapping)
+ * - hard_start_xmit fix (hint: TxSizeMax)
+ * - misc crapectomy.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/cache.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <net/syncppp.h>
+#include <linux/hdlc.h>
+
+/* Version */
+static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
+static int debug;
+static int quartz;
+
+#ifdef CONFIG_DSCC4_PCI_RST
+static DECLARE_MUTEX(dscc4_sem);
+static u32 dscc4_pci_config_store[16];
+#endif
+
+#define DRV_NAME "dscc4"
+
+#undef DSCC4_POLLING
+
+/* Module parameters */
+
+MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
+MODULE_LICENSE("GPL");
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug,"Enable/disable extra messages");
+module_param(quartz, int, 0);
+MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
+
+/* Structures */
+
+struct thingie {
+ int define;
+ u32 bits;
+};
+
+struct TxFD {
+ u32 state;
+ u32 next;
+ u32 data;
+ u32 complete;
+ u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */
+};
+
+struct RxFD {
+ u32 state1;
+ u32 next;
+ u32 data;
+ u32 state2;
+ u32 end;
+};
+
+#define DUMMY_SKB_SIZE 64
+#define TX_LOW 8
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 32
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
+#define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */
+#define TX_TIMEOUT (HZ/10)
+#define DSCC4_HZ_MAX 33000000
+#define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */
+#define dev_per_card 4
+#define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */
+
+#define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
+#define TO_SIZE(state) (((state) >> 16) & 0x1fff)
+
+/*
+ * Given the operating range of Linux HDLC, the 2 defines below could be
+ * made simpler. However they are a fine reminder for the limitations of
+ * the driver: it's better to stay < TxSizeMax and < RxSizeMax.
+ */
+#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
+#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
+#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */
+#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
+
+struct dscc4_pci_priv {
+ u32 *iqcfg;
+ int cfg_cur;
+ spinlock_t lock;
+ struct pci_dev *pdev;
+
+ struct dscc4_dev_priv *root;
+ dma_addr_t iqcfg_dma;
+ u32 xtal_hz;
+};
+
+struct dscc4_dev_priv {
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+
+ struct RxFD *rx_fd;
+ struct TxFD *tx_fd;
+ u32 *iqrx;
+ u32 *iqtx;
+
+ /* FIXME: check all the volatile are required */
+ volatile u32 tx_current;
+ u32 rx_current;
+ u32 iqtx_current;
+ u32 iqrx_current;
+
+ volatile u32 tx_dirty;
+ volatile u32 ltda;
+ u32 rx_dirty;
+ u32 lrda;
+
+ dma_addr_t tx_fd_dma;
+ dma_addr_t rx_fd_dma;
+ dma_addr_t iqtx_dma;
+ dma_addr_t iqrx_dma;
+
+ u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */
+
+ struct timer_list timer;
+
+ struct dscc4_pci_priv *pci_priv;
+ spinlock_t lock;
+
+ int dev_id;
+ volatile u32 flags;
+ u32 timer_help;
+
+ unsigned short encoding;
+ unsigned short parity;
+ struct net_device *dev;
+ sync_serial_settings settings;
+ void __iomem *base_addr;
+ u32 __pad __attribute__ ((aligned (4)));
+};
+
+/* GLOBAL registers definitions */
+#define GCMDR 0x00
+#define GSTAR 0x04
+#define GMODE 0x08
+#define IQLENR0 0x0C
+#define IQLENR1 0x10
+#define IQRX0 0x14
+#define IQTX0 0x24
+#define IQCFG 0x3c
+#define FIFOCR1 0x44
+#define FIFOCR2 0x48
+#define FIFOCR3 0x4c
+#define FIFOCR4 0x34
+#define CH0CFG 0x50
+#define CH0BRDA 0x54
+#define CH0BTDA 0x58
+#define CH0FRDA 0x98
+#define CH0FTDA 0xb0
+#define CH0LRDA 0xc8
+#define CH0LTDA 0xe0
+
+/* SCC registers definitions */
+#define SCC_START 0x0100
+#define SCC_OFFSET 0x80
+#define CMDR 0x00
+#define STAR 0x04
+#define CCR0 0x08
+#define CCR1 0x0c
+#define CCR2 0x10
+#define BRR 0x2C
+#define RLCR 0x40
+#define IMR 0x54
+#define ISR 0x58
+
+#define GPDIR 0x0400
+#define GPDATA 0x0404
+#define GPIM 0x0408
+
+/* Bit masks */
+#define EncodingMask 0x00700000
+#define CrcMask 0x00000003
+
+#define IntRxScc0 0x10000000
+#define IntTxScc0 0x01000000
+
+#define TxPollCmd 0x00000400
+#define RxActivate 0x08000000
+#define MTFi 0x04000000
+#define Rdr 0x00400000
+#define Rdt 0x00200000
+#define Idr 0x00100000
+#define Idt 0x00080000
+#define TxSccRes 0x01000000
+#define RxSccRes 0x00010000
+#define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */
+#define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */
+
+#define Ccr0ClockMask 0x0000003f
+#define Ccr1LoopMask 0x00000200
+#define IsrMask 0x000fffff
+#define BrrExpMask 0x00000f00
+#define BrrMultMask 0x0000003f
+#define EncodingMask 0x00700000
+#define Hold 0x40000000
+#define SccBusy 0x10000000
+#define PowerUp 0x80000000
+#define Vis 0x00001000
+#define FrameOk (FrameVfr | FrameCrc)
+#define FrameVfr 0x80
+#define FrameRdo 0x40
+#define FrameCrc 0x20
+#define FrameRab 0x10
+#define FrameAborted 0x00000200
+#define FrameEnd 0x80000000
+#define DataComplete 0x40000000
+#define LengthCheck 0x00008000
+#define SccEvt 0x02000000
+#define NoAck 0x00000200
+#define Action 0x00000001
+#define HiDesc 0x20000000
+
+/* SCC events */
+#define RxEvt 0xf0000000
+#define TxEvt 0x0f000000
+#define Alls 0x00040000
+#define Xdu 0x00010000
+#define Cts 0x00004000
+#define Xmr 0x00002000
+#define Xpr 0x00001000
+#define Rdo 0x00000080
+#define Rfs 0x00000040
+#define Cd 0x00000004
+#define Rfo 0x00000002
+#define Flex 0x00000001
+
+/* DMA core events */
+#define Cfg 0x00200000
+#define Hi 0x00040000
+#define Fi 0x00020000
+#define Err 0x00010000
+#define Arf 0x00000002
+#define ArAck 0x00000001
+
+/* State flags */
+#define Ready 0x00000000
+#define NeedIDR 0x00000001
+#define NeedIDT 0x00000002
+#define RdoSet 0x00000004
+#define FakeReset 0x00000008
+
+/* Don't mask RDO. Ever. */
+#ifdef DSCC4_POLLING
+#define EventsMask 0xfffeef7f
+#else
+#define EventsMask 0xfffa8f7a
+#endif
+
+/* Functions prototypes */
+static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
+static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
+static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
+static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
+static int dscc4_open(struct net_device *);
+static int dscc4_start_xmit(struct sk_buff *, struct net_device *);
+static int dscc4_close(struct net_device *);
+static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int dscc4_init_ring(struct net_device *);
+static void dscc4_release_ring(struct dscc4_dev_priv *);
+static void dscc4_timer(unsigned long);
+static void dscc4_tx_timeout(struct net_device *);
+static irqreturn_t dscc4_irq(int irq, void *dev_id, struct pt_regs *ptregs);
+static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
+static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
+#ifdef DSCC4_POLLING
+static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
+#endif
+
+static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
+{
+ return dev_to_hdlc(dev)->priv;
+}
+
+static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p)
+{
+ return p->dev;
+}
+
+static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
+ struct net_device *dev, int offset)
+{
+ u32 state;
+
+ /* Cf scc_writel for concern regarding thread-safety */
+ state = dpriv->scc_regs[offset >> 2];
+ state &= ~mask;
+ state |= value;
+ dpriv->scc_regs[offset >> 2] = state;
+ writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
+}
+
+static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
+ struct net_device *dev, int offset)
+{
+ /*
+ * Thread-UNsafe.
+ * As of 2002/02/16, there are no thread racing for access.
+ */
+ dpriv->scc_regs[offset >> 2] = bits;
+ writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
+}
+
+static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
+{
+ return dpriv->scc_regs[offset >> 2];
+}
+
+static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+ /* Cf errata DS5 p.4 */
+ readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
+ return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
+}
+
+static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ dpriv->ltda = dpriv->tx_fd_dma +
+ ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
+ writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
+ /* Flush posted writes *NOW* */
+ readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
+}
+
+static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ dpriv->lrda = dpriv->rx_fd_dma +
+ ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
+ writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
+}
+
+static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
+{
+ return dpriv->tx_current == dpriv->tx_dirty;
+}
+
+static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
+}
+
+int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
+ const char *msg)
+{
+ int ret = 0;
+
+ if (debug > 1) {
+ if (SOURCE_ID(state) != dpriv->dev_id) {
+ printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
+ dev->name, msg, SOURCE_ID(state), state );
+ ret = -1;
+ }
+ if (state & 0x0df80c00) {
+ printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
+ dev->name, msg, state);
+ ret = -1;
+ }
+ }
+ return ret;
+}
+
+void dscc4_tx_print(struct net_device *dev, struct dscc4_dev_priv *dpriv,
+ char *msg)
+{
+ printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
+ dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
+}
+
+static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
+{
+ struct pci_dev *pdev = dpriv->pci_priv->pdev;
+ struct TxFD *tx_fd = dpriv->tx_fd;
+ struct RxFD *rx_fd = dpriv->rx_fd;
+ struct sk_buff **skbuff;
+ int i;
+
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
+
+ skbuff = dpriv->tx_skbuff;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (*skbuff) {
+ pci_unmap_single(pdev, tx_fd->data, (*skbuff)->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(*skbuff);
+ }
+ skbuff++;
+ tx_fd++;
+ }
+
+ skbuff = dpriv->rx_skbuff;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (*skbuff) {
+ pci_unmap_single(pdev, rx_fd->data,
+ RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(*skbuff);
+ }
+ skbuff++;
+ rx_fd++;
+ }
+}
+
+inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+ unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
+ struct RxFD *rx_fd = dpriv->rx_fd + dirty;
+ const int len = RX_MAX(HDLC_MAX_MRU);
+ struct sk_buff *skb;
+ int ret = 0;
+
+ skb = dev_alloc_skb(len);
+ dpriv->rx_skbuff[dirty] = skb;
+ if (skb) {
+ skb->protocol = hdlc_type_trans(skb, dev);
+ rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
+ len, PCI_DMA_FROMDEVICE);
+ } else {
+ rx_fd->data = (u32) NULL;
+ ret = -1;
+ }
+ return ret;
+}
+
+/*
+ * IRQ/thread/whatever safe
+ */
+static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev, char *msg)
+{
+ s8 i = 0;
+
+ do {
+ if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
+ printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
+ msg, i);
+ goto done;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+ rmb();
+ } while (++i > 0);
+ printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
+done:
+ return (i >= 0) ? i : -EAGAIN;
+}
+
+static int dscc4_do_action(struct net_device *dev, char *msg)
+{
+ void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
+ s16 i = 0;
+
+ writel(Action, ioaddr + GCMDR);
+ ioaddr += GSTAR;
+ do {
+ u32 state = readl(ioaddr);
+
+ if (state & ArAck) {
+ printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg);
+ writel(ArAck, ioaddr);
+ goto done;
+ } else if (state & Arf) {
+ printk(KERN_ERR "%s: %s failed\n", dev->name, msg);
+ writel(Arf, ioaddr);
+ i = -1;
+ goto done;
+ }
+ rmb();
+ } while (++i > 0);
+ printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
+done:
+ return i;
+}
+
+static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
+{
+ int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
+ s8 i = 0;
+
+ do {
+ if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
+ (dpriv->iqtx[cur] & Xpr))
+ break;
+ smp_rmb();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+ } while (++i > 0);
+
+ return (i >= 0 ) ? i : -EAGAIN;
+}
+
+#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */
+static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
+ /* Cf errata DS5 p.6 */
+ writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
+ scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
+ readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
+ writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
+ writel(Action, dpriv->base_addr + GCMDR);
+ spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
+}
+
+#endif
+
+#if 0
+static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+ u16 i = 0;
+
+ /* Cf errata DS5 p.7 */
+ scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
+ scc_writel(0x00050000, dpriv, dev, CCR2);
+ /*
+ * Must be longer than the time required to fill the fifo.
+ */
+ while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
+ udelay(1);
+ wmb();
+ }
+
+ writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
+ if (dscc4_do_action(dev, "Rdt") < 0)
+ printk(KERN_ERR "%s: Tx reset failed\n", dev->name);
+}
+#endif
+
+/* TODO: (ab)use this function to refill a completely depleted RX ring. */
+static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
+ struct net_device_stats *stats = hdlc_stats(dev);
+ struct pci_dev *pdev = dpriv->pci_priv->pdev;
+ struct sk_buff *skb;
+ int pkt_len;
+
+ skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
+ if (!skb) {
+ printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__);
+ goto refill;
+ }
+ pkt_len = TO_SIZE(rx_fd->state2);
+ pci_unmap_single(pdev, rx_fd->data, RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
+ if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ skb_put(skb, pkt_len);
+ if (netif_running(dev))
+ skb->protocol = hdlc_type_trans(skb, dev);
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ } else {
+ if (skb->data[pkt_len] & FrameRdo)
+ stats->rx_fifo_errors++;
+ else if (!(skb->data[pkt_len] | ~FrameCrc))
+ stats->rx_crc_errors++;
+ else if (!(skb->data[pkt_len] | ~(FrameVfr | FrameRab)))
+ stats->rx_length_errors++;
+ else
+ stats->rx_errors++;
+ dev_kfree_skb_irq(skb);
+ }
+refill:
+ while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
+ if (try_get_rx_skb(dpriv, dev) < 0)
+ break;
+ dpriv->rx_dirty++;
+ }
+ dscc4_rx_update(dpriv, dev);
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+}
+
+static void dscc4_free1(struct pci_dev *pdev)
+{
+ struct dscc4_pci_priv *ppriv;
+ struct dscc4_dev_priv *root;
+ int i;
+
+ ppriv = pci_get_drvdata(pdev);
+ root = ppriv->root;
+
+ for (i = 0; i < dev_per_card; i++)
+ unregister_hdlc_device(dscc4_to_dev(root + i));
+
+ pci_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < dev_per_card; i++)
+ free_netdev(root[i].dev);
+ kfree(root);
+ kfree(ppriv);
+}
+
+static int __devinit dscc4_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct dscc4_pci_priv *priv;
+ struct dscc4_dev_priv *dpriv;
+ void __iomem *ioaddr;
+ int i, rc;
+
+ printk(KERN_DEBUG "%s", version);
+
+ rc = pci_enable_device(pdev);
+ if (rc < 0)
+ goto out;
+
+ rc = pci_request_region(pdev, 0, "registers");
+ if (rc < 0) {
+ printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
+ DRV_NAME);
+ goto err_disable_0;
+ }
+ rc = pci_request_region(pdev, 1, "LBI interface");
+ if (rc < 0) {
+ printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
+ DRV_NAME);
+ goto err_free_mmio_region_1;
+ }
+
+ ioaddr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!ioaddr) {
+ printk(KERN_ERR "%s: cannot remap MMIO region %lx @ %lx\n",
+ DRV_NAME, pci_resource_len(pdev, 0),
+ pci_resource_start(pdev, 0));
+ rc = -EIO;
+ goto err_free_mmio_regions_2;
+ }
+ printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d\n",
+ pci_resource_start(pdev, 0),
+ pci_resource_start(pdev, 1), pdev->irq);
+
+ /* Cf errata DS5 p.2 */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
+ pci_set_master(pdev);
+
+ rc = dscc4_found1(pdev, ioaddr);
+ if (rc < 0)
+ goto err_iounmap_3;
+
+ priv = pci_get_drvdata(pdev);
+
+ rc = request_irq(pdev->irq, dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root);
+ if (rc < 0) {
+ printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
+ goto err_release_4;
+ }
+
+ /* power up/little endian/dma core controlled via lrda/ltda */
+ writel(0x00000001, ioaddr + GMODE);
+ /* Shared interrupt queue */
+ {
+ u32 bits;
+
+ bits = (IRQ_RING_SIZE >> 5) - 1;
+ bits |= bits << 4;
+ bits |= bits << 8;
+ bits |= bits << 16;
+ writel(bits, ioaddr + IQLENR0);
+ }
+ /* Global interrupt queue */
+ writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
+ priv->iqcfg = (u32 *) pci_alloc_consistent(pdev,
+ IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma);
+ if (!priv->iqcfg)
+ goto err_free_irq_5;
+ writel(priv->iqcfg_dma, ioaddr + IQCFG);
+
+ rc = -ENOMEM;
+
+ /*
+ * SCC 0-3 private rx/tx irq structures
+ * IQRX/TXi needs to be set soon. Learned it the hard way...
+ */
+ for (i = 0; i < dev_per_card; i++) {
+ dpriv = priv->root + i;
+ dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev,
+ IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
+ if (!dpriv->iqtx)
+ goto err_free_iqtx_6;
+ writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
+ }
+ for (i = 0; i < dev_per_card; i++) {
+ dpriv = priv->root + i;
+ dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev,
+ IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
+ if (!dpriv->iqrx)
+ goto err_free_iqrx_7;
+ writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
+ }
+
+ /* Cf application hint. Beware of hard-lock condition on threshold. */
+ writel(0x42104000, ioaddr + FIFOCR1);
+ //writel(0x9ce69800, ioaddr + FIFOCR2);
+ writel(0xdef6d800, ioaddr + FIFOCR2);
+ //writel(0x11111111, ioaddr + FIFOCR4);
+ writel(0x18181818, ioaddr + FIFOCR4);
+ // FIXME: should depend on the chipset revision
+ writel(0x0000000e, ioaddr + FIFOCR3);
+
+ writel(0xff200001, ioaddr + GCMDR);
+
+ rc = 0;
+out:
+ return rc;
+
+err_free_iqrx_7:
+ while (--i >= 0) {
+ dpriv = priv->root + i;
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqrx, dpriv->iqrx_dma);
+ }
+ i = dev_per_card;
+err_free_iqtx_6:
+ while (--i >= 0) {
+ dpriv = priv->root + i;
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqtx, dpriv->iqtx_dma);
+ }
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
+ priv->iqcfg_dma);
+err_free_irq_5:
+ free_irq(pdev->irq, priv->root);
+err_release_4:
+ dscc4_free1(pdev);
+err_iounmap_3:
+ iounmap (ioaddr);
+err_free_mmio_regions_2:
+ pci_release_region(pdev, 1);
+err_free_mmio_region_1:
+ pci_release_region(pdev, 0);
+err_disable_0:
+ pci_disable_device(pdev);
+ goto out;
+};
+
+/*
+ * Let's hope the default values are decent enough to protect my
+ * feet from the user's gun - Ueimor
+ */
+static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ /* No interrupts, SCC core disabled. Let's relax */
+ scc_writel(0x00000000, dpriv, dev, CCR0);
+
+ scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
+
+ /*
+ * No address recognition/crc-CCITT/cts enabled
+ * Shared flags transmission disabled - cf errata DS5 p.11
+ * Carrier detect disabled - cf errata p.14
+ * FIXME: carrier detection/polarity may be handled more gracefully.
+ */
+ scc_writel(0x02408000, dpriv, dev, CCR1);
+
+ /* crc not forwarded - Cf errata DS5 p.11 */
+ scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
+ // crc forwarded
+ //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2);
+}
+
+static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
+{
+ int ret = 0;
+
+ if ((hz < 0) || (hz > DSCC4_HZ_MAX))
+ ret = -EOPNOTSUPP;
+ else
+ dpriv->pci_priv->xtal_hz = hz;
+
+ return ret;
+}
+
+static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
+{
+ struct dscc4_pci_priv *ppriv;
+ struct dscc4_dev_priv *root;
+ int i, ret = -ENOMEM;
+
+ root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
+ if (!root) {
+ printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
+ goto err_out;
+ }
+ memset(root, 0, dev_per_card*sizeof(*root));
+
+ for (i = 0; i < dev_per_card; i++) {
+ root[i].dev = alloc_hdlcdev(root + i);
+ if (!root[i].dev)
+ goto err_free_dev;
+ }
+
+ ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL);
+ if (!ppriv) {
+ printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
+ goto err_free_dev;
+ }
+ memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
+
+ ppriv->root = root;
+ spin_lock_init(&ppriv->lock);
+
+ for (i = 0; i < dev_per_card; i++) {
+ struct dscc4_dev_priv *dpriv = root + i;
+ struct net_device *d = dscc4_to_dev(dpriv);
+ hdlc_device *hdlc = dev_to_hdlc(d);
+
+ d->base_addr = (unsigned long)ioaddr;
+ d->init = NULL;
+ d->irq = pdev->irq;
+ d->open = dscc4_open;
+ d->stop = dscc4_close;
+ d->set_multicast_list = NULL;
+ d->do_ioctl = dscc4_ioctl;
+ d->tx_timeout = dscc4_tx_timeout;
+ d->watchdog_timeo = TX_TIMEOUT;
+ SET_MODULE_OWNER(d);
+ SET_NETDEV_DEV(d, &pdev->dev);
+
+ dpriv->dev_id = i;
+ dpriv->pci_priv = ppriv;
+ dpriv->base_addr = ioaddr;
+ spin_lock_init(&dpriv->lock);
+
+ hdlc->xmit = dscc4_start_xmit;
+ hdlc->attach = dscc4_hdlc_attach;
+
+ dscc4_init_registers(dpriv, d);
+ dpriv->parity = PARITY_CRC16_PR0_CCITT;
+ dpriv->encoding = ENCODING_NRZ;
+
+ ret = dscc4_init_ring(d);
+ if (ret < 0)
+ goto err_unregister;
+
+ ret = register_hdlc_device(d);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
+ dscc4_release_ring(dpriv);
+ goto err_unregister;
+ }
+ }
+
+ ret = dscc4_set_quartz(root, quartz);
+ if (ret < 0)
+ goto err_unregister;
+
+ pci_set_drvdata(pdev, ppriv);
+ return ret;
+
+err_unregister:
+ while (i-- > 0) {
+ dscc4_release_ring(root + i);
+ unregister_hdlc_device(dscc4_to_dev(root + i));
+ }
+ kfree(ppriv);
+ i = dev_per_card;
+err_free_dev:
+ while (i-- > 0)
+ free_netdev(root[i].dev);
+ kfree(root);
+err_out:
+ return ret;
+};
+
+/* FIXME: get rid of the unneeded code */
+static void dscc4_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+// struct dscc4_pci_priv *ppriv;
+
+ goto done;
+done:
+ dpriv->timer.expires = jiffies + TX_TIMEOUT;
+ add_timer(&dpriv->timer);
+}
+
+static void dscc4_tx_timeout(struct net_device *dev)
+{
+ /* FIXME: something is missing there */
+}
+
+static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
+{
+ sync_serial_settings *settings = &dpriv->settings;
+
+ if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
+ struct net_device *dev = dscc4_to_dev(dpriv);
+
+ printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DSCC4_PCI_RST
+/*
+ * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together
+ * so as to provide a safe way to reset the asic while not the whole machine
+ * rebooting.
+ *
+ * This code doesn't need to be efficient. Keep It Simple
+ */
+static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
+{
+ int i;
+
+ down(&dscc4_sem);
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
+
+ /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */
+ writel(0x001c0000, ioaddr + GMODE);
+ /* Configure GPIO port as output */
+ writel(0x0000ffff, ioaddr + GPDIR);
+ /* Disable interruption */
+ writel(0x0000ffff, ioaddr + GPIM);
+
+ writel(0x0000ffff, ioaddr + GPDATA);
+ writel(0x00000000, ioaddr + GPDATA);
+
+ /* Flush posted writes */
+ readl(ioaddr + GSTAR);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
+ up(&dscc4_sem);
+}
+#else
+#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
+#endif /* CONFIG_DSCC4_PCI_RST */
+
+static int dscc4_open(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+ struct dscc4_pci_priv *ppriv;
+ int ret = -EAGAIN;
+
+ if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit)
+ goto err;
+
+ if ((ret = hdlc_open(dev)))
+ goto err;
+
+ ppriv = dpriv->pci_priv;
+
+ /*
+ * Due to various bugs, there is no way to reliably reset a
+ * specific port (manufacturer's dependant special PCI #RST wiring
+ * apart: it affects all ports). Thus the device goes in the best
+ * silent mode possible at dscc4_close() time and simply claims to
+ * be up if it's opened again. It still isn't possible to change
+ * the HDLC configuration without rebooting but at least the ports
+ * can be up/down ifconfig'ed without killing the host.
+ */
+ if (dpriv->flags & FakeReset) {
+ dpriv->flags &= ~FakeReset;
+ scc_patchl(0, PowerUp, dpriv, dev, CCR0);
+ scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
+ scc_writel(EventsMask, dpriv, dev, IMR);
+ printk(KERN_INFO "%s: up again.\n", dev->name);
+ goto done;
+ }
+
+ /* IDT+IDR during XPR */
+ dpriv->flags = NeedIDR | NeedIDT;
+
+ scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
+
+ /*
+ * The following is a bit paranoid...
+ *
+ * NB: the datasheet "...CEC will stay active if the SCC is in
+ * power-down mode or..." and CCR2.RAC = 1 are two different
+ * situations.
+ */
+ if (scc_readl_star(dpriv, dev) & SccBusy) {
+ printk(KERN_ERR "%s busy. Try later\n", dev->name);
+ ret = -EAGAIN;
+ goto err_out;
+ } else
+ printk(KERN_INFO "%s: available. Good\n", dev->name);
+
+ scc_writel(EventsMask, dpriv, dev, IMR);
+
+ /* Posted write is flushed in the wait_ack loop */
+ scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
+
+ if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
+ goto err_disable_scc_events;
+
+ /*
+ * I would expect XPR near CE completion (before ? after ?).
+ * At worst, this code won't see a late XPR and people
+ * will have to re-issue an ifconfig (this is harmless).
+ * WARNING, a really missing XPR usually means a hardware
+ * reset is needed. Suggestions anyone ?
+ */
+ if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
+ printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR");
+ goto err_disable_scc_events;
+ }
+
+ if (debug > 2)
+ dscc4_tx_print(dev, dpriv, "Open");
+
+done:
+ netif_start_queue(dev);
+
+ init_timer(&dpriv->timer);
+ dpriv->timer.expires = jiffies + 10*HZ;
+ dpriv->timer.data = (unsigned long)dev;
+ dpriv->timer.function = &dscc4_timer;
+ add_timer(&dpriv->timer);
+ netif_carrier_on(dev);
+
+ return 0;
+
+err_disable_scc_events:
+ scc_writel(0xffffffff, dpriv, dev, IMR);
+ scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
+err_out:
+ hdlc_close(dev);
+err:
+ return ret;
+}
+
+#ifdef DSCC4_POLLING
+static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+ /* FIXME: it's gonna be easy (TM), for sure */
+}
+#endif /* DSCC4_POLLING */
+
+static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+ struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
+ struct TxFD *tx_fd;
+ int next;
+
+ next = dpriv->tx_current%TX_RING_SIZE;
+ dpriv->tx_skbuff[next] = skb;
+ tx_fd = dpriv->tx_fd + next;
+ tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
+ tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ tx_fd->complete = 0x00000000;
+ tx_fd->jiffies = jiffies;
+ mb();
+
+#ifdef DSCC4_POLLING
+ spin_lock(&dpriv->lock);
+ while (dscc4_tx_poll(dpriv, dev));
+ spin_unlock(&dpriv->lock);
+#endif
+
+ dev->trans_start = jiffies;
+
+ if (debug > 2)
+ dscc4_tx_print(dev, dpriv, "Xmit");
+ /* To be cleaned(unsigned int)/optimized. Later, ok ? */
+ if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
+ netif_stop_queue(dev);
+
+ if (dscc4_tx_quiescent(dpriv, dev))
+ dscc4_do_tx(dpriv, dev);
+
+ return 0;
+}
+
+static int dscc4_close(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+
+ del_timer_sync(&dpriv->timer);
+ netif_stop_queue(dev);
+
+ scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
+ scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
+ scc_writel(0xffffffff, dpriv, dev, IMR);
+
+ dpriv->flags |= FakeReset;
+
+ hdlc_close(dev);
+
+ return 0;
+}
+
+static inline int dscc4_check_clock_ability(int port)
+{
+ int ret = 0;
+
+#ifdef CONFIG_DSCC4_PCISYNC
+ if (port >= 2)
+ ret = -1;
+#endif
+ return ret;
+}
+
+/*
+ * DS1 p.137: "There are a total of 13 different clocking modes..."
+ * ^^
+ * Design choices:
+ * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a).
+ * Clock mode 3b _should_ work but the testing seems to make this point
+ * dubious (DIY testing requires setting CCR0 at 0x00000033).
+ * This is supposed to provide least surprise "DTE like" behavior.
+ * - if line rate is specified, clocks are assumed to be locally generated.
+ * A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing
+ * between these it automagically done according on the required frequency
+ * scaling. Of course some rounding may take place.
+ * - no high speed mode (40Mb/s). May be trivial to do but I don't have an
+ * appropriate external clocking device for testing.
+ * - no time-slot/clock mode 5: shameless lazyness.
+ *
+ * The clock signals wiring can be (is ?) manufacturer dependant. Good luck.
+ *
+ * BIG FAT WARNING: if the device isn't provided enough clocking signal, it
+ * won't pass the init sequence. For example, straight back-to-back DTE without
+ * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is
+ * called.
+ *
+ * Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153
+ * DS0 for example)
+ *
+ * Clock mode related bits of CCR0:
+ * +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only)
+ * | +---------- SSEL: sub-mode select 0 -> a, 1 -> b
+ * | | +-------- High Speed: say 0
+ * | | | +-+-+-- Clock Mode: 0..7
+ * | | | | | |
+ * -+-+-+-+-+-+-+-+
+ * x|x|5|4|3|2|1|0| lower bits
+ *
+ * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b)
+ * +-+-+-+------------------ M (0..15)
+ * | | | | +-+-+-+-+-+-- N (0..63)
+ * 0 0 0 0 | | | | 0 0 | | | | | |
+ * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits
+ *
+ */
+static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
+{
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+ int ret = -1;
+ u32 brr;
+
+ *state &= ~Ccr0ClockMask;
+ if (*bps) { /* Clock generated - required for DCE */
+ u32 n = 0, m = 0, divider;
+ int xtal;
+
+ xtal = dpriv->pci_priv->xtal_hz;
+ if (!xtal)
+ goto done;
+ if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
+ goto done;
+ divider = xtal / *bps;
+ if (divider > BRR_DIVIDER_MAX) {
+ divider >>= 4;
+ *state |= 0x00000036; /* Clock mode 6b (BRG/16) */
+ } else
+ *state |= 0x00000037; /* Clock mode 7b (BRG) */
+ if (divider >> 22) {
+ n = 63;
+ m = 15;
+ } else if (divider) {
+ /* Extraction of the 6 highest weighted bits */
+ m = 0;
+ while (0xffffffc0 & divider) {
+ m++;
+ divider >>= 1;
+ }
+ n = divider;
+ }
+ brr = (m << 8) | n;
+ divider = n << m;
+ if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */
+ divider <<= 4;
+ *bps = xtal / divider;
+ } else {
+ /*
+ * External clock - DTE
+ * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00).
+ * Nothing more to be done
+ */
+ brr = 0;
+ }
+ scc_writel(brr, dpriv, dev, BRR);
+ ret = 0;
+done:
+ return ret;
+}
+
+static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+ const size_t size = sizeof(dpriv->settings);
+ int ret = 0;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (cmd != SIOCWANDEV)
+ return -EOPNOTSUPP;
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(line, &dpriv->settings, size))
+ return -EFAULT;
+ break;
+
+ case IF_IFACE_SYNC_SERIAL:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (dpriv->flags & FakeReset) {
+ printk(KERN_INFO "%s: please reset the device"
+ " before this command\n", dev->name);
+ return -EPERM;
+ }
+ if (copy_from_user(&dpriv->settings, line, size))
+ return -EFAULT;
+ ret = dscc4_set_iface(dpriv, dev);
+ break;
+
+ default:
+ ret = hdlc_ioctl(dev, ifr, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static int dscc4_match(struct thingie *p, int value)
+{
+ int i;
+
+ for (i = 0; p[i].define != -1; i++) {
+ if (value == p[i].define)
+ break;
+ }
+ if (p[i].define == -1)
+ return -1;
+ else
+ return i;
+}
+
+static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ sync_serial_settings *settings = &dpriv->settings;
+ int ret = -EOPNOTSUPP;
+ u32 bps, state;
+
+ bps = settings->clock_rate;
+ state = scc_readl(dpriv, CCR0);
+ if (dscc4_set_clock(dev, &bps, &state) < 0)
+ goto done;
+ if (bps) { /* DCE */
+ printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
+ if (settings->clock_rate != bps) {
+ printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
+ dev->name, settings->clock_rate, bps);
+ settings->clock_rate = bps;
+ }
+ } else { /* DTE */
+ state |= PowerUp | Vis;
+ printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
+ }
+ scc_writel(state, dpriv, dev, CCR0);
+ ret = 0;
+done:
+ return ret;
+}
+
+static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ struct thingie encoding[] = {
+ { ENCODING_NRZ, 0x00000000 },
+ { ENCODING_NRZI, 0x00200000 },
+ { ENCODING_FM_MARK, 0x00400000 },
+ { ENCODING_FM_SPACE, 0x00500000 },
+ { ENCODING_MANCHESTER, 0x00600000 },
+ { -1, 0}
+ };
+ int i, ret = 0;
+
+ i = dscc4_match(encoding, dpriv->encoding);
+ if (i >= 0)
+ scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
+ else
+ ret = -EOPNOTSUPP;
+ return ret;
+}
+
+static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ sync_serial_settings *settings = &dpriv->settings;
+ u32 state;
+
+ state = scc_readl(dpriv, CCR1);
+ if (settings->loopback) {
+ printk(KERN_DEBUG "%s: loopback\n", dev->name);
+ state |= 0x00000100;
+ } else {
+ printk(KERN_DEBUG "%s: normal\n", dev->name);
+ state &= ~0x00000100;
+ }
+ scc_writel(state, dpriv, dev, CCR1);
+ return 0;
+}
+
+static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
+{
+ struct thingie crc[] = {
+ { PARITY_CRC16_PR0_CCITT, 0x00000010 },
+ { PARITY_CRC16_PR1_CCITT, 0x00000000 },
+ { PARITY_CRC32_PR0_CCITT, 0x00000011 },
+ { PARITY_CRC32_PR1_CCITT, 0x00000001 }
+ };
+ int i, ret = 0;
+
+ i = dscc4_match(crc, dpriv->parity);
+ if (i >= 0)
+ scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
+ else
+ ret = -EOPNOTSUPP;
+ return ret;
+}
+
+static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+ struct {
+ int (*action)(struct dscc4_dev_priv *, struct net_device *);
+ } *p, do_setting[] = {
+ { dscc4_encoding_setting },
+ { dscc4_clock_setting },
+ { dscc4_loopback_setting },
+ { dscc4_crc_setting },
+ { NULL }
+ };
+ int ret = 0;
+
+ for (p = do_setting; p->action; p++) {
+ if ((ret = p->action(dpriv, dev)) < 0)
+ break;
+ }
+ return ret;
+}
+
+static irqreturn_t dscc4_irq(int irq, void *token, struct pt_regs *ptregs)
+{
+ struct dscc4_dev_priv *root = token;
+ struct dscc4_pci_priv *priv;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ u32 state;
+ unsigned long flags;
+ int i, handled = 1;
+
+ priv = root->pci_priv;
+ dev = dscc4_to_dev(root);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ioaddr = root->base_addr;
+
+ state = readl(ioaddr + GSTAR);
+ if (!state) {
+ handled = 0;
+ goto out;
+ }
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
+ writel(state, ioaddr + GSTAR);
+
+ if (state & Arf) {
+ printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n",
+ dev->name);
+ goto out;
+ }
+ state &= ~ArAck;
+ if (state & Cfg) {
+ if (debug > 0)
+ printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
+ if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & Arf)
+ printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
+ if (!(state &= ~Cfg))
+ goto out;
+ }
+ if (state & RxEvt) {
+ i = dev_per_card - 1;
+ do {
+ dscc4_rx_irq(priv, root + i);
+ } while (--i >= 0);
+ state &= ~RxEvt;
+ }
+ if (state & TxEvt) {
+ i = dev_per_card - 1;
+ do {
+ dscc4_tx_irq(priv, root + i);
+ } while (--i >= 0);
+ state &= ~TxEvt;
+ }
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
+ struct dscc4_dev_priv *dpriv)
+{
+ struct net_device *dev = dscc4_to_dev(dpriv);
+ u32 state;
+ int cur, loop = 0;
+
+try:
+ cur = dpriv->iqtx_current%IRQ_RING_SIZE;
+ state = dpriv->iqtx[cur];
+ if (!state) {
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
+ state);
+ if ((debug > 1) && (loop > 1))
+ printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
+ if (loop && netif_queue_stopped(dev))
+ if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
+ netif_wake_queue(dev);
+
+ if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
+ !dscc4_tx_done(dpriv))
+ dscc4_do_tx(dpriv, dev);
+ return;
+ }
+ loop++;
+ dpriv->iqtx[cur] = 0;
+ dpriv->iqtx_current++;
+
+ if (state_check(state, dpriv, dev, "Tx") < 0)
+ return;
+
+ if (state & SccEvt) {
+ if (state & Alls) {
+ struct net_device_stats *stats = hdlc_stats(dev);
+ struct sk_buff *skb;
+ struct TxFD *tx_fd;
+
+ if (debug > 2)
+ dscc4_tx_print(dev, dpriv, "Alls");
+ /*
+ * DataComplete can't be trusted for Tx completion.
+ * Cf errata DS5 p.8
+ */
+ cur = dpriv->tx_dirty%TX_RING_SIZE;
+ tx_fd = dpriv->tx_fd + cur;
+ skb = dpriv->tx_skbuff[cur];
+ if (skb) {
+ pci_unmap_single(ppriv->pdev, tx_fd->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (tx_fd->state & FrameEnd) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ }
+ dev_kfree_skb_irq(skb);
+ dpriv->tx_skbuff[cur] = NULL;
+ ++dpriv->tx_dirty;
+ } else {
+ if (debug > 1)
+ printk(KERN_ERR "%s Tx: NULL skb %d\n",
+ dev->name, cur);
+ }
+ /*
+ * If the driver ends sending crap on the wire, it
+ * will be way easier to diagnose than the (not so)
+ * random freeze induced by null sized tx frames.
+ */
+ tx_fd->data = tx_fd->next;
+ tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
+ tx_fd->complete = 0x00000000;
+ tx_fd->jiffies = 0;
+
+ if (!(state &= ~Alls))
+ goto try;
+ }
+ /*
+ * Transmit Data Underrun
+ */
+ if (state & Xdu) {
+ printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
+ dpriv->flags = NeedIDT;
+ /* Tx reset */
+ writel(MTFi | Rdt,
+ dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
+ writel(Action, dpriv->base_addr + GCMDR);
+ return;
+ }
+ if (state & Cts) {
+ printk(KERN_INFO "%s: CTS transition\n", dev->name);
+ if (!(state &= ~Cts)) /* DEBUG */
+ goto try;
+ }
+ if (state & Xmr) {
+ /* Frame needs to be sent again - FIXME */
+ printk(KERN_ERR "%s: Xmr. Ask maintainer\n", DRV_NAME);
+ if (!(state &= ~Xmr)) /* DEBUG */
+ goto try;
+ }
+ if (state & Xpr) {
+ void __iomem *scc_addr;
+ unsigned long ring;
+ int i;
+
+ /*
+ * - the busy condition happens (sometimes);
+ * - it doesn't seem to make the handler unreliable.
+ */
+ for (i = 1; i; i <<= 1) {
+ if (!(scc_readl_star(dpriv, dev) & SccBusy))
+ break;
+ }
+ if (!i)
+ printk(KERN_INFO "%s busy in irq\n", dev->name);
+
+ scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
+ /* Keep this order: IDT before IDR */
+ if (dpriv->flags & NeedIDT) {
+ if (debug > 2)
+ dscc4_tx_print(dev, dpriv, "Xpr");
+ ring = dpriv->tx_fd_dma +
+ (dpriv->tx_dirty%TX_RING_SIZE)*
+ sizeof(struct TxFD);
+ writel(ring, scc_addr + CH0BTDA);
+ dscc4_do_tx(dpriv, dev);
+ writel(MTFi | Idt, scc_addr + CH0CFG);
+ if (dscc4_do_action(dev, "IDT") < 0)
+ goto err_xpr;
+ dpriv->flags &= ~NeedIDT;
+ }
+ if (dpriv->flags & NeedIDR) {
+ ring = dpriv->rx_fd_dma +
+ (dpriv->rx_current%RX_RING_SIZE)*
+ sizeof(struct RxFD);
+ writel(ring, scc_addr + CH0BRDA);
+ dscc4_rx_update(dpriv, dev);
+ writel(MTFi | Idr, scc_addr + CH0CFG);
+ if (dscc4_do_action(dev, "IDR") < 0)
+ goto err_xpr;
+ dpriv->flags &= ~NeedIDR;
+ smp_wmb();
+ /* Activate receiver and misc */
+ scc_writel(0x08050008, dpriv, dev, CCR2);
+ }
+ err_xpr:
+ if (!(state &= ~Xpr))
+ goto try;
+ }
+ if (state & Cd) {
+ if (debug > 0)
+ printk(KERN_INFO "%s: CD transition\n", dev->name);
+ if (!(state &= ~Cd)) /* DEBUG */
+ goto try;
+ }
+ } else { /* ! SccEvt */
+ if (state & Hi) {
+#ifdef DSCC4_POLLING
+ while (!dscc4_tx_poll(dpriv, dev));
+#endif
+ printk(KERN_INFO "%s: Tx Hi\n", dev->name);
+ state &= ~Hi;
+ }
+ if (state & Err) {
+ printk(KERN_INFO "%s: Tx ERR\n", dev->name);
+ hdlc_stats(dev)->tx_errors++;
+ state &= ~Err;
+ }
+ }
+ goto try;
+}
+
+static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
+ struct dscc4_dev_priv *dpriv)
+{
+ struct net_device *dev = dscc4_to_dev(dpriv);
+ u32 state;
+ int cur;
+
+try:
+ cur = dpriv->iqrx_current%IRQ_RING_SIZE;
+ state = dpriv->iqrx[cur];
+ if (!state)
+ return;
+ dpriv->iqrx[cur] = 0;
+ dpriv->iqrx_current++;
+
+ if (state_check(state, dpriv, dev, "Rx") < 0)
+ return;
+
+ if (!(state & SccEvt)){
+ struct RxFD *rx_fd;
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
+ state);
+ state &= 0x00ffffff;
+ if (state & Err) { /* Hold or reset */
+ printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
+ cur = dpriv->rx_current%RX_RING_SIZE;
+ rx_fd = dpriv->rx_fd + cur;
+ /*
+ * Presume we're not facing a DMAC receiver reset.
+ * As We use the rx size-filtering feature of the
+ * DSCC4, the beginning of a new frame is waiting in
+ * the rx fifo. I bet a Receive Data Overflow will
+ * happen most of time but let's try and avoid it.
+ * Btw (as for RDO) if one experiences ERR whereas
+ * the system looks rather idle, there may be a
+ * problem with latency. In this case, increasing
+ * RX_RING_SIZE may help.
+ */
+ //while (dpriv->rx_needs_refill) {
+ while (!(rx_fd->state1 & Hold)) {
+ rx_fd++;
+ cur++;
+ if (!(cur = cur%RX_RING_SIZE))
+ rx_fd = dpriv->rx_fd;
+ }
+ //dpriv->rx_needs_refill--;
+ try_get_rx_skb(dpriv, dev);
+ if (!rx_fd->data)
+ goto try;
+ rx_fd->state1 &= ~Hold;
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+ //}
+ goto try;
+ }
+ if (state & Fi) {
+ dscc4_rx_skb(dpriv, dev);
+ goto try;
+ }
+ if (state & Hi ) { /* HI bit */
+ printk(KERN_INFO "%s: Rx Hi\n", dev->name);
+ state &= ~Hi;
+ goto try;
+ }
+ } else { /* SccEvt */
+ if (debug > 1) {
+ //FIXME: verifier la presence de tous les evenements
+ static struct {
+ u32 mask;
+ const char *irq_name;
+ } evts[] = {
+ { 0x00008000, "TIN"},
+ { 0x00000020, "RSC"},
+ { 0x00000010, "PCE"},
+ { 0x00000008, "PLLA"},
+ { 0, NULL}
+ }, *evt;
+
+ for (evt = evts; evt->irq_name; evt++) {
+ if (state & evt->mask) {
+ printk(KERN_DEBUG "%s: %s\n",
+ dev->name, evt->irq_name);
+ if (!(state &= ~evt->mask))
+ goto try;
+ }
+ }
+ } else {
+ if (!(state &= ~0x0000c03c))
+ goto try;
+ }
+ if (state & Cts) {
+ printk(KERN_INFO "%s: CTS transition\n", dev->name);
+ if (!(state &= ~Cts)) /* DEBUG */
+ goto try;
+ }
+ /*
+ * Receive Data Overflow (FIXME: fscked)
+ */
+ if (state & Rdo) {
+ struct RxFD *rx_fd;
+ void __iomem *scc_addr;
+ int cur;
+
+ //if (debug)
+ // dscc4_rx_dump(dpriv);
+ scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
+
+ scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
+ /*
+ * This has no effect. Why ?
+ * ORed with TxSccRes, one sees the CFG ack (for
+ * the TX part only).
+ */
+ scc_writel(RxSccRes, dpriv, dev, CMDR);
+ dpriv->flags |= RdoSet;
+
+ /*
+ * Let's try and save something in the received data.
+ * rx_current must be incremented at least once to
+ * avoid HOLD in the BRDA-to-be-pointed desc.
+ */
+ do {
+ cur = dpriv->rx_current++%RX_RING_SIZE;
+ rx_fd = dpriv->rx_fd + cur;
+ if (!(rx_fd->state2 & DataComplete))
+ break;
+ if (rx_fd->state2 & FrameAborted) {
+ hdlc_stats(dev)->rx_over_errors++;
+ rx_fd->state1 |= Hold;
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+ } else
+ dscc4_rx_skb(dpriv, dev);
+ } while (1);
+
+ if (debug > 0) {
+ if (dpriv->flags & RdoSet)
+ printk(KERN_DEBUG
+ "%s: no RDO in Rx data\n", DRV_NAME);
+ }
+#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
+ /*
+ * FIXME: must the reset be this violent ?
+ */
+#warning "FIXME: CH0BRDA"
+ writel(dpriv->rx_fd_dma +
+ (dpriv->rx_current%RX_RING_SIZE)*
+ sizeof(struct RxFD), scc_addr + CH0BRDA);
+ writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
+ if (dscc4_do_action(dev, "RDR") < 0) {
+ printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
+ dev->name, "RDR");
+ goto rdo_end;
+ }
+ writel(MTFi|Idr, scc_addr + CH0CFG);
+ if (dscc4_do_action(dev, "IDR") < 0) {
+ printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
+ dev->name, "IDR");
+ goto rdo_end;
+ }
+ rdo_end:
+#endif
+ scc_patchl(0, RxActivate, dpriv, dev, CCR2);
+ goto try;
+ }
+ if (state & Cd) {
+ printk(KERN_INFO "%s: CD transition\n", dev->name);
+ if (!(state &= ~Cd)) /* DEBUG */
+ goto try;
+ }
+ if (state & Flex) {
+ printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
+ if (!(state &= ~Flex))
+ goto try;
+ }
+ }
+}
+
+/*
+ * I had expected the following to work for the first descriptor
+ * (tx_fd->state = 0xc0000000)
+ * - Hold=1 (don't try and branch to the next descripto);
+ * - No=0 (I want an empty data section, i.e. size=0);
+ * - Fe=1 (required by No=0 or we got an Err irq and must reset).
+ * It failed and locked solid. Thus the introduction of a dummy skb.
+ * Problem is acknowledged in errata sheet DS5. Joy :o/
+ */
+struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(DUMMY_SKB_SIZE);
+ if (skb) {
+ int last = dpriv->tx_dirty%TX_RING_SIZE;
+ struct TxFD *tx_fd = dpriv->tx_fd + last;
+
+ skb->len = DUMMY_SKB_SIZE;
+ memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE);
+ tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
+ tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
+ DUMMY_SKB_SIZE, PCI_DMA_TODEVICE);
+ dpriv->tx_skbuff[last] = skb;
+ }
+ return skb;
+}
+
+static int dscc4_init_ring(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+ struct pci_dev *pdev = dpriv->pci_priv->pdev;
+ struct TxFD *tx_fd;
+ struct RxFD *rx_fd;
+ void *ring;
+ int i;
+
+ ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma);
+ if (!ring)
+ goto err_out;
+ dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
+
+ ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma);
+ if (!ring)
+ goto err_free_dma_rx;
+ dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
+
+ memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
+ dpriv->tx_dirty = 0xffffffff;
+ i = dpriv->tx_current = 0;
+ do {
+ tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
+ tx_fd->complete = 0x00000000;
+ /* FIXME: NULL should be ok - to be tried */
+ tx_fd->data = dpriv->tx_fd_dma;
+ (tx_fd++)->next = (u32)(dpriv->tx_fd_dma +
+ (++i%TX_RING_SIZE)*sizeof(*tx_fd));
+ } while (i < TX_RING_SIZE);
+
+ if (dscc4_init_dummy_skb(dpriv) < 0)
+ goto err_free_dma_tx;
+
+ memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
+ i = dpriv->rx_dirty = dpriv->rx_current = 0;
+ do {
+ /* size set by the host. Multiple of 4 bytes please */
+ rx_fd->state1 = HiDesc;
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+ rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
+ // FIXME: return value verifiee mais traitement suspect
+ if (try_get_rx_skb(dpriv, dev) >= 0)
+ dpriv->rx_dirty++;
+ (rx_fd++)->next = (u32)(dpriv->rx_fd_dma +
+ (++i%RX_RING_SIZE)*sizeof(*rx_fd));
+ } while (i < RX_RING_SIZE);
+
+ return 0;
+
+err_free_dma_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
+err_free_dma_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
+err_out:
+ return -ENOMEM;
+}
+
+static void __devexit dscc4_remove_one(struct pci_dev *pdev)
+{
+ struct dscc4_pci_priv *ppriv;
+ struct dscc4_dev_priv *root;
+ void __iomem *ioaddr;
+ int i;
+
+ ppriv = pci_get_drvdata(pdev);
+ root = ppriv->root;
+
+ ioaddr = root->base_addr;
+
+ dscc4_pci_reset(pdev, ioaddr);
+
+ free_irq(pdev->irq, root);
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
+ ppriv->iqcfg_dma);
+ for (i = 0; i < dev_per_card; i++) {
+ struct dscc4_dev_priv *dpriv = root + i;
+
+ dscc4_release_ring(dpriv);
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqrx, dpriv->iqrx_dma);
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqtx, dpriv->iqtx_dma);
+ }
+
+ dscc4_free1(pdev);
+
+ iounmap(ioaddr);
+
+ pci_release_region(pdev, 1);
+ pci_release_region(pdev, 0);
+
+ pci_disable_device(pdev);
+}
+
+static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI &&
+ encoding != ENCODING_FM_MARK &&
+ encoding != ENCODING_FM_SPACE &&
+ encoding != ENCODING_MANCHESTER)
+ return -EINVAL;
+
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC16_PR0_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT &&
+ parity != PARITY_CRC32_PR0_CCITT &&
+ parity != PARITY_CRC32_PR1_CCITT)
+ return -EINVAL;
+
+ dpriv->encoding = encoding;
+ dpriv->parity = parity;
+ return 0;
+}
+
+#ifndef MODULE
+static int __init dscc4_setup(char *str)
+{
+ int *args[] = { &debug, &quartz, NULL }, **p = args;
+
+ while (*p && (get_option(&str, *p) == 2))
+ p++;
+ return 1;
+}
+
+__setup("dscc4.setup=", dscc4_setup);
+#endif
+
+static struct pci_device_id dscc4_pci_tbl[] = {
+ { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
+
+static struct pci_driver dscc4_driver = {
+ .name = DRV_NAME,
+ .id_table = dscc4_pci_tbl,
+ .probe = dscc4_init_one,
+ .remove = __devexit_p(dscc4_remove_one),
+};
+
+static int __init dscc4_init_module(void)
+{
+ return pci_module_init(&dscc4_driver);
+}
+
+static void __exit dscc4_cleanup_module(void)
+{
+ pci_unregister_driver(&dscc4_driver);
+}
+
+module_init(dscc4_init_module);
+module_exit(dscc4_cleanup_module);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
new file mode 100644
index 000000000000..7575b799ce53
--- /dev/null
+++ b/drivers/net/wan/farsync.c
@@ -0,0 +1,2712 @@
+/*
+ * FarSync WAN driver for Linux (2.6.x kernel version)
+ *
+ * Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
+ *
+ * Copyright (C) 2001-2004 FarSite Communications Ltd.
+ * www.farsite.co.uk
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: R.J.Dunlop <bob.dunlop@farsite.co.uk>
+ * Maintainer: Kevin Curtis <kevin.curtis@farsite.co.uk>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/if.h>
+#include <linux/hdlc.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "farsync.h"
+
+/*
+ * Module info
+ */
+MODULE_AUTHOR("R.J.Dunlop <bob.dunlop@farsite.co.uk>");
+MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
+MODULE_LICENSE("GPL");
+
+/* Driver configuration and global parameters
+ * ==========================================
+ */
+
+/* Number of ports (per card) and cards supported
+ */
+#define FST_MAX_PORTS 4
+#define FST_MAX_CARDS 32
+
+/* Default parameters for the link
+ */
+#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
+ * useful, the syncppp module forces
+ * this down assuming a slower line I
+ * guess.
+ */
+#define FST_TXQ_DEPTH 16 /* This one is for the buffering
+ * of frames on the way down to the card
+ * so that we can keep the card busy
+ * and maximise throughput
+ */
+#define FST_HIGH_WATER_MARK 12 /* Point at which we flow control
+ * network layer */
+#define FST_LOW_WATER_MARK 8 /* Point at which we remove flow
+ * control from network layer */
+#define FST_MAX_MTU 8000 /* Huge but possible */
+#define FST_DEF_MTU 1500 /* Common sane value */
+
+#define FST_TX_TIMEOUT (2*HZ)
+
+#ifdef ARPHRD_RAWHDLC
+#define ARPHRD_MYTYPE ARPHRD_RAWHDLC /* Raw frames */
+#else
+#define ARPHRD_MYTYPE ARPHRD_HDLC /* Cisco-HDLC (keepalives etc) */
+#endif
+
+/*
+ * Modules parameters and associated varaibles
+ */
+int fst_txq_low = FST_LOW_WATER_MARK;
+int fst_txq_high = FST_HIGH_WATER_MARK;
+int fst_max_reads = 7;
+int fst_excluded_cards = 0;
+int fst_excluded_list[FST_MAX_CARDS];
+
+module_param(fst_txq_low, int, 0);
+module_param(fst_txq_high, int, 0);
+module_param(fst_max_reads, int, 0);
+module_param(fst_excluded_cards, int, 0);
+module_param_array(fst_excluded_list, int, NULL, 0);
+
+/* Card shared memory layout
+ * =========================
+ */
+#pragma pack(1)
+
+/* This information is derived in part from the FarSite FarSync Smc.h
+ * file. Unfortunately various name clashes and the non-portability of the
+ * bit field declarations in that file have meant that I have chosen to
+ * recreate the information here.
+ *
+ * The SMC (Shared Memory Configuration) has a version number that is
+ * incremented every time there is a significant change. This number can
+ * be used to check that we have not got out of step with the firmware
+ * contained in the .CDE files.
+ */
+#define SMC_VERSION 24
+
+#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */
+
+#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main
+ * configuration structure */
+#define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA
+ * buffers */
+
+#define LEN_TX_BUFFER 8192 /* Size of packet buffers */
+#define LEN_RX_BUFFER 8192
+
+#define LEN_SMALL_TX_BUFFER 256 /* Size of obsolete buffs used for DOS diags */
+#define LEN_SMALL_RX_BUFFER 256
+
+#define NUM_TX_BUFFER 2 /* Must be power of 2. Fixed by firmware */
+#define NUM_RX_BUFFER 8
+
+/* Interrupt retry time in milliseconds */
+#define INT_RETRY_TIME 2
+
+/* The Am186CH/CC processors support a SmartDMA mode using circular pools
+ * of buffer descriptors. The structure is almost identical to that used
+ * in the LANCE Ethernet controllers. Details available as PDF from the
+ * AMD web site: http://www.amd.com/products/epd/processors/\
+ * 2.16bitcont/3.am186cxfa/a21914/21914.pdf
+ */
+struct txdesc { /* Transmit descriptor */
+ volatile u16 ladr; /* Low order address of packet. This is a
+ * linear address in the Am186 memory space
+ */
+ volatile u8 hadr; /* High order address. Low 4 bits only, high 4
+ * bits must be zero
+ */
+ volatile u8 bits; /* Status and config */
+ volatile u16 bcnt; /* 2s complement of packet size in low 15 bits.
+ * Transmit terminal count interrupt enable in
+ * top bit.
+ */
+ u16 unused; /* Not used in Tx */
+};
+
+struct rxdesc { /* Receive descriptor */
+ volatile u16 ladr; /* Low order address of packet */
+ volatile u8 hadr; /* High order address */
+ volatile u8 bits; /* Status and config */
+ volatile u16 bcnt; /* 2s complement of buffer size in low 15 bits.
+ * Receive terminal count interrupt enable in
+ * top bit.
+ */
+ volatile u16 mcnt; /* Message byte count (15 bits) */
+};
+
+/* Convert a length into the 15 bit 2's complement */
+/* #define cnv_bcnt(len) (( ~(len) + 1 ) & 0x7FFF ) */
+/* Since we need to set the high bit to enable the completion interrupt this
+ * can be made a lot simpler
+ */
+#define cnv_bcnt(len) (-(len))
+
+/* Status and config bits for the above */
+#define DMA_OWN 0x80 /* SmartDMA owns the descriptor */
+#define TX_STP 0x02 /* Tx: start of packet */
+#define TX_ENP 0x01 /* Tx: end of packet */
+#define RX_ERR 0x40 /* Rx: error (OR of next 4 bits) */
+#define RX_FRAM 0x20 /* Rx: framing error */
+#define RX_OFLO 0x10 /* Rx: overflow error */
+#define RX_CRC 0x08 /* Rx: CRC error */
+#define RX_HBUF 0x04 /* Rx: buffer error */
+#define RX_STP 0x02 /* Rx: start of packet */
+#define RX_ENP 0x01 /* Rx: end of packet */
+
+/* Interrupts from the card are caused by various events which are presented
+ * in a circular buffer as several events may be processed on one physical int
+ */
+#define MAX_CIRBUFF 32
+
+struct cirbuff {
+ u8 rdindex; /* read, then increment and wrap */
+ u8 wrindex; /* write, then increment and wrap */
+ u8 evntbuff[MAX_CIRBUFF];
+};
+
+/* Interrupt event codes.
+ * Where appropriate the two low order bits indicate the port number
+ */
+#define CTLA_CHG 0x18 /* Control signal changed */
+#define CTLB_CHG 0x19
+#define CTLC_CHG 0x1A
+#define CTLD_CHG 0x1B
+
+#define INIT_CPLT 0x20 /* Initialisation complete */
+#define INIT_FAIL 0x21 /* Initialisation failed */
+
+#define ABTA_SENT 0x24 /* Abort sent */
+#define ABTB_SENT 0x25
+#define ABTC_SENT 0x26
+#define ABTD_SENT 0x27
+
+#define TXA_UNDF 0x28 /* Transmission underflow */
+#define TXB_UNDF 0x29
+#define TXC_UNDF 0x2A
+#define TXD_UNDF 0x2B
+
+#define F56_INT 0x2C
+#define M32_INT 0x2D
+
+#define TE1_ALMA 0x30
+
+/* Port physical configuration. See farsync.h for field values */
+struct port_cfg {
+ u16 lineInterface; /* Physical interface type */
+ u8 x25op; /* Unused at present */
+ u8 internalClock; /* 1 => internal clock, 0 => external */
+ u8 transparentMode; /* 1 => on, 0 => off */
+ u8 invertClock; /* 0 => normal, 1 => inverted */
+ u8 padBytes[6]; /* Padding */
+ u32 lineSpeed; /* Speed in bps */
+};
+
+/* TE1 port physical configuration */
+struct su_config {
+ u32 dataRate;
+ u8 clocking;
+ u8 framing;
+ u8 structure;
+ u8 interface;
+ u8 coding;
+ u8 lineBuildOut;
+ u8 equalizer;
+ u8 transparentMode;
+ u8 loopMode;
+ u8 range;
+ u8 txBufferMode;
+ u8 rxBufferMode;
+ u8 startingSlot;
+ u8 losThreshold;
+ u8 enableIdleCode;
+ u8 idleCode;
+ u8 spare[44];
+};
+
+/* TE1 Status */
+struct su_status {
+ u32 receiveBufferDelay;
+ u32 framingErrorCount;
+ u32 codeViolationCount;
+ u32 crcErrorCount;
+ u32 lineAttenuation;
+ u8 portStarted;
+ u8 lossOfSignal;
+ u8 receiveRemoteAlarm;
+ u8 alarmIndicationSignal;
+ u8 spare[40];
+};
+
+/* Finally sling all the above together into the shared memory structure.
+ * Sorry it's a hodge podge of arrays, structures and unused bits, it's been
+ * evolving under NT for some time so I guess we're stuck with it.
+ * The structure starts at offset SMC_BASE.
+ * See farsync.h for some field values.
+ */
+struct fst_shared {
+ /* DMA descriptor rings */
+ struct rxdesc rxDescrRing[FST_MAX_PORTS][NUM_RX_BUFFER];
+ struct txdesc txDescrRing[FST_MAX_PORTS][NUM_TX_BUFFER];
+
+ /* Obsolete small buffers */
+ u8 smallRxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_SMALL_RX_BUFFER];
+ u8 smallTxBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_SMALL_TX_BUFFER];
+
+ u8 taskStatus; /* 0x00 => initialising, 0x01 => running,
+ * 0xFF => halted
+ */
+
+ u8 interruptHandshake; /* Set to 0x01 by adapter to signal interrupt,
+ * set to 0xEE by host to acknowledge interrupt
+ */
+
+ u16 smcVersion; /* Must match SMC_VERSION */
+
+ u32 smcFirmwareVersion; /* 0xIIVVRRBB where II = product ID, VV = major
+ * version, RR = revision and BB = build
+ */
+
+ u16 txa_done; /* Obsolete completion flags */
+ u16 rxa_done;
+ u16 txb_done;
+ u16 rxb_done;
+ u16 txc_done;
+ u16 rxc_done;
+ u16 txd_done;
+ u16 rxd_done;
+
+ u16 mailbox[4]; /* Diagnostics mailbox. Not used */
+
+ struct cirbuff interruptEvent; /* interrupt causes */
+
+ u32 v24IpSts[FST_MAX_PORTS]; /* V.24 control input status */
+ u32 v24OpSts[FST_MAX_PORTS]; /* V.24 control output status */
+
+ struct port_cfg portConfig[FST_MAX_PORTS];
+
+ u16 clockStatus[FST_MAX_PORTS]; /* lsb: 0=> present, 1=> absent */
+
+ u16 cableStatus; /* lsb: 0=> present, 1=> absent */
+
+ u16 txDescrIndex[FST_MAX_PORTS]; /* transmit descriptor ring index */
+ u16 rxDescrIndex[FST_MAX_PORTS]; /* receive descriptor ring index */
+
+ u16 portMailbox[FST_MAX_PORTS][2]; /* command, modifier */
+ u16 cardMailbox[4]; /* Not used */
+
+ /* Number of times the card thinks the host has
+ * missed an interrupt by not acknowledging
+ * within 2mS (I guess NT has problems)
+ */
+ u32 interruptRetryCount;
+
+ /* Driver private data used as an ID. We'll not
+ * use this as I'd rather keep such things
+ * in main memory rather than on the PCI bus
+ */
+ u32 portHandle[FST_MAX_PORTS];
+
+ /* Count of Tx underflows for stats */
+ u32 transmitBufferUnderflow[FST_MAX_PORTS];
+
+ /* Debounced V.24 control input status */
+ u32 v24DebouncedSts[FST_MAX_PORTS];
+
+ /* Adapter debounce timers. Don't touch */
+ u32 ctsTimer[FST_MAX_PORTS];
+ u32 ctsTimerRun[FST_MAX_PORTS];
+ u32 dcdTimer[FST_MAX_PORTS];
+ u32 dcdTimerRun[FST_MAX_PORTS];
+
+ u32 numberOfPorts; /* Number of ports detected at startup */
+
+ u16 _reserved[64];
+
+ u16 cardMode; /* Bit-mask to enable features:
+ * Bit 0: 1 enables LED identify mode
+ */
+
+ u16 portScheduleOffset;
+
+ struct su_config suConfig; /* TE1 Bits */
+ struct su_status suStatus;
+
+ u32 endOfSmcSignature; /* endOfSmcSignature MUST be the last member of
+ * the structure and marks the end of shared
+ * memory. Adapter code initializes it as
+ * END_SIG.
+ */
+};
+
+/* endOfSmcSignature value */
+#define END_SIG 0x12345678
+
+/* Mailbox values. (portMailbox) */
+#define NOP 0 /* No operation */
+#define ACK 1 /* Positive acknowledgement to PC driver */
+#define NAK 2 /* Negative acknowledgement to PC driver */
+#define STARTPORT 3 /* Start an HDLC port */
+#define STOPPORT 4 /* Stop an HDLC port */
+#define ABORTTX 5 /* Abort the transmitter for a port */
+#define SETV24O 6 /* Set V24 outputs */
+
+/* PLX Chip Register Offsets */
+#define CNTRL_9052 0x50 /* Control Register */
+#define CNTRL_9054 0x6c /* Control Register */
+
+#define INTCSR_9052 0x4c /* Interrupt control/status register */
+#define INTCSR_9054 0x68 /* Interrupt control/status register */
+
+/* 9054 DMA Registers */
+/*
+ * Note that we will be using DMA Channel 0 for copying rx data
+ * and Channel 1 for copying tx data
+ */
+#define DMAMODE0 0x80
+#define DMAPADR0 0x84
+#define DMALADR0 0x88
+#define DMASIZ0 0x8c
+#define DMADPR0 0x90
+#define DMAMODE1 0x94
+#define DMAPADR1 0x98
+#define DMALADR1 0x9c
+#define DMASIZ1 0xa0
+#define DMADPR1 0xa4
+#define DMACSR0 0xa8
+#define DMACSR1 0xa9
+#define DMAARB 0xac
+#define DMATHR 0xb0
+#define DMADAC0 0xb4
+#define DMADAC1 0xb8
+#define DMAMARBR 0xac
+
+#define FST_MIN_DMA_LEN 64
+#define FST_RX_DMA_INT 0x01
+#define FST_TX_DMA_INT 0x02
+#define FST_CARD_INT 0x04
+
+/* Larger buffers are positioned in memory at offset BFM_BASE */
+struct buf_window {
+ u8 txBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_TX_BUFFER];
+ u8 rxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_RX_BUFFER];
+};
+
+/* Calculate offset of a buffer object within the shared memory window */
+#define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X))
+
+#pragma pack()
+
+/* Device driver private information
+ * =================================
+ */
+/* Per port (line or channel) information
+ */
+struct fst_port_info {
+ struct net_device *dev; /* Device struct - must be first */
+ struct fst_card_info *card; /* Card we're associated with */
+ int index; /* Port index on the card */
+ int hwif; /* Line hardware (lineInterface copy) */
+ int run; /* Port is running */
+ int mode; /* Normal or FarSync raw */
+ int rxpos; /* Next Rx buffer to use */
+ int txpos; /* Next Tx buffer to use */
+ int txipos; /* Next Tx buffer to check for free */
+ int start; /* Indication of start/stop to network */
+ /*
+ * A sixteen entry transmit queue
+ */
+ int txqs; /* index to get next buffer to tx */
+ int txqe; /* index to queue next packet */
+ struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */
+ int rxqdepth;
+};
+
+/* Per card information
+ */
+struct fst_card_info {
+ char __iomem *mem; /* Card memory mapped to kernel space */
+ char __iomem *ctlmem; /* Control memory for PCI cards */
+ unsigned int phys_mem; /* Physical memory window address */
+ unsigned int phys_ctlmem; /* Physical control memory address */
+ unsigned int irq; /* Interrupt request line number */
+ unsigned int nports; /* Number of serial ports */
+ unsigned int type; /* Type index of card */
+ unsigned int state; /* State of card */
+ spinlock_t card_lock; /* Lock for SMP access */
+ unsigned short pci_conf; /* PCI card config in I/O space */
+ /* Per port info */
+ struct fst_port_info ports[FST_MAX_PORTS];
+ struct pci_dev *device; /* Information about the pci device */
+ int card_no; /* Inst of the card on the system */
+ int family; /* TxP or TxU */
+ int dmarx_in_progress;
+ int dmatx_in_progress;
+ unsigned long int_count;
+ unsigned long int_time_ave;
+ void *rx_dma_handle_host;
+ dma_addr_t rx_dma_handle_card;
+ void *tx_dma_handle_host;
+ dma_addr_t tx_dma_handle_card;
+ struct sk_buff *dma_skb_rx;
+ struct fst_port_info *dma_port_rx;
+ struct fst_port_info *dma_port_tx;
+ int dma_len_rx;
+ int dma_len_tx;
+ int dma_txpos;
+ int dma_rxpos;
+};
+
+/* Convert an HDLC device pointer into a port info pointer and similar */
+#define dev_to_port(D) (dev_to_hdlc(D)->priv)
+#define port_to_dev(P) ((P)->dev)
+
+
+/*
+ * Shared memory window access macros
+ *
+ * We have a nice memory based structure above, which could be directly
+ * mapped on i386 but might not work on other architectures unless we use
+ * the readb,w,l and writeb,w,l macros. Unfortunately these macros take
+ * physical offsets so we have to convert. The only saving grace is that
+ * this should all collapse back to a simple indirection eventually.
+ */
+#define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X))
+
+#define FST_RDB(C,E) readb ((C)->mem + WIN_OFFSET(E))
+#define FST_RDW(C,E) readw ((C)->mem + WIN_OFFSET(E))
+#define FST_RDL(C,E) readl ((C)->mem + WIN_OFFSET(E))
+
+#define FST_WRB(C,E,B) writeb ((B), (C)->mem + WIN_OFFSET(E))
+#define FST_WRW(C,E,W) writew ((W), (C)->mem + WIN_OFFSET(E))
+#define FST_WRL(C,E,L) writel ((L), (C)->mem + WIN_OFFSET(E))
+
+/*
+ * Debug support
+ */
+#if FST_DEBUG
+
+static int fst_debug_mask = { FST_DEBUG };
+
+/* Most common debug activity is to print something if the corresponding bit
+ * is set in the debug mask. Note: this uses a non-ANSI extension in GCC to
+ * support variable numbers of macro parameters. The inverted if prevents us
+ * eating someone else's else clause.
+ */
+#define dbg(F,fmt,A...) if ( ! ( fst_debug_mask & (F))) \
+ ; \
+ else \
+ printk ( KERN_DEBUG FST_NAME ": " fmt, ## A )
+
+#else
+#define dbg(X...) /* NOP */
+#endif
+
+/* Printing short cuts
+ */
+#define printk_err(fmt,A...) printk ( KERN_ERR FST_NAME ": " fmt, ## A )
+#define printk_warn(fmt,A...) printk ( KERN_WARNING FST_NAME ": " fmt, ## A )
+#define printk_info(fmt,A...) printk ( KERN_INFO FST_NAME ": " fmt, ## A )
+
+/*
+ * PCI ID lookup table
+ */
+static struct pci_device_id fst_pci_dev_id[] __devinitdata = {
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
+
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, FST_TYPE_T4P},
+
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, FST_TYPE_T1U},
+
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, FST_TYPE_T2U},
+
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, FST_TYPE_T4U},
+
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
+
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
+ {0,} /* End */
+};
+
+MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
+
+/*
+ * Device Driver Work Queues
+ *
+ * So that we don't spend too much time processing events in the
+ * Interrupt Service routine, we will declare a work queue per Card
+ * and make the ISR schedule a task in the queue for later execution.
+ * In the 2.4 Kernel we used to use the immediate queue for BH's
+ * Now that they are gone, tasklets seem to be much better than work
+ * queues.
+ */
+
+static void do_bottom_half_tx(struct fst_card_info *card);
+static void do_bottom_half_rx(struct fst_card_info *card);
+static void fst_process_tx_work_q(unsigned long work_q);
+static void fst_process_int_work_q(unsigned long work_q);
+
+DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
+DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
+
+struct fst_card_info *fst_card_array[FST_MAX_CARDS];
+spinlock_t fst_work_q_lock;
+u64 fst_work_txq;
+u64 fst_work_intq;
+
+static void
+fst_q_work_item(u64 * queue, int card_index)
+{
+ unsigned long flags;
+ u64 mask;
+
+ /*
+ * Grab the queue exclusively
+ */
+ spin_lock_irqsave(&fst_work_q_lock, flags);
+
+ /*
+ * Making an entry in the queue is simply a matter of setting
+ * a bit for the card indicating that there is work to do in the
+ * bottom half for the card. Note the limitation of 64 cards.
+ * That ought to be enough
+ */
+ mask = 1 << card_index;
+ *queue |= mask;
+ spin_unlock_irqrestore(&fst_work_q_lock, flags);
+}
+
+static void
+fst_process_tx_work_q(unsigned long /*void **/work_q)
+{
+ unsigned long flags;
+ u64 work_txq;
+ int i;
+
+ /*
+ * Grab the queue exclusively
+ */
+ dbg(DBG_TX, "fst_process_tx_work_q\n");
+ spin_lock_irqsave(&fst_work_q_lock, flags);
+ work_txq = fst_work_txq;
+ fst_work_txq = 0;
+ spin_unlock_irqrestore(&fst_work_q_lock, flags);
+
+ /*
+ * Call the bottom half for each card with work waiting
+ */
+ for (i = 0; i < FST_MAX_CARDS; i++) {
+ if (work_txq & 0x01) {
+ if (fst_card_array[i] != NULL) {
+ dbg(DBG_TX, "Calling tx bh for card %d\n", i);
+ do_bottom_half_tx(fst_card_array[i]);
+ }
+ }
+ work_txq = work_txq >> 1;
+ }
+}
+
+static void
+fst_process_int_work_q(unsigned long /*void **/work_q)
+{
+ unsigned long flags;
+ u64 work_intq;
+ int i;
+
+ /*
+ * Grab the queue exclusively
+ */
+ dbg(DBG_INTR, "fst_process_int_work_q\n");
+ spin_lock_irqsave(&fst_work_q_lock, flags);
+ work_intq = fst_work_intq;
+ fst_work_intq = 0;
+ spin_unlock_irqrestore(&fst_work_q_lock, flags);
+
+ /*
+ * Call the bottom half for each card with work waiting
+ */
+ for (i = 0; i < FST_MAX_CARDS; i++) {
+ if (work_intq & 0x01) {
+ if (fst_card_array[i] != NULL) {
+ dbg(DBG_INTR,
+ "Calling rx & tx bh for card %d\n", i);
+ do_bottom_half_rx(fst_card_array[i]);
+ do_bottom_half_tx(fst_card_array[i]);
+ }
+ }
+ work_intq = work_intq >> 1;
+ }
+}
+
+/* Card control functions
+ * ======================
+ */
+/* Place the processor in reset state
+ *
+ * Used to be a simple write to card control space but a glitch in the latest
+ * AMD Am186CH processor means that we now have to do it by asserting and de-
+ * asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register
+ * at offset 9052_CNTRL. Note the updates for the TXU.
+ */
+static inline void
+fst_cpureset(struct fst_card_info *card)
+{
+ unsigned char interrupt_line_register;
+ unsigned long j = jiffies + 1;
+ unsigned int regval;
+
+ if (card->family == FST_FAMILY_TXU) {
+ if (pci_read_config_byte
+ (card->device, PCI_INTERRUPT_LINE, &interrupt_line_register)) {
+ dbg(DBG_ASS,
+ "Error in reading interrupt line register\n");
+ }
+ /*
+ * Assert PLX software reset and Am186 hardware reset
+ * and then deassert the PLX software reset but 186 still in reset
+ */
+ outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
+ outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
+ /*
+ * We are delaying here to allow the 9054 to reset itself
+ */
+ j = jiffies + 1;
+ while (jiffies < j)
+ /* Do nothing */ ;
+ outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
+ /*
+ * We are delaying here to allow the 9054 to reload its eeprom
+ */
+ j = jiffies + 1;
+ while (jiffies < j)
+ /* Do nothing */ ;
+ outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
+
+ if (pci_write_config_byte
+ (card->device, PCI_INTERRUPT_LINE, interrupt_line_register)) {
+ dbg(DBG_ASS,
+ "Error in writing interrupt line register\n");
+ }
+
+ } else {
+ regval = inl(card->pci_conf + CNTRL_9052);
+
+ outl(regval | 0x40000000, card->pci_conf + CNTRL_9052);
+ outl(regval & ~0x40000000, card->pci_conf + CNTRL_9052);
+ }
+}
+
+/* Release the processor from reset
+ */
+static inline void
+fst_cpurelease(struct fst_card_info *card)
+{
+ if (card->family == FST_FAMILY_TXU) {
+ /*
+ * Force posted writes to complete
+ */
+ (void) readb(card->mem);
+
+ /*
+ * Release LRESET DO = 1
+ * Then release Local Hold, DO = 1
+ */
+ outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
+ outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
+ } else {
+ (void) readb(card->ctlmem);
+ }
+}
+
+/* Clear the cards interrupt flag
+ */
+static inline void
+fst_clear_intr(struct fst_card_info *card)
+{
+ if (card->family == FST_FAMILY_TXU) {
+ (void) readb(card->ctlmem);
+ } else {
+ /* Poke the appropriate PLX chip register (same as enabling interrupts)
+ */
+ outw(0x0543, card->pci_conf + INTCSR_9052);
+ }
+}
+
+/* Enable card interrupts
+ */
+static inline void
+fst_enable_intr(struct fst_card_info *card)
+{
+ if (card->family == FST_FAMILY_TXU) {
+ outl(0x0f0c0900, card->pci_conf + INTCSR_9054);
+ } else {
+ outw(0x0543, card->pci_conf + INTCSR_9052);
+ }
+}
+
+/* Disable card interrupts
+ */
+static inline void
+fst_disable_intr(struct fst_card_info *card)
+{
+ if (card->family == FST_FAMILY_TXU) {
+ outl(0x00000000, card->pci_conf + INTCSR_9054);
+ } else {
+ outw(0x0000, card->pci_conf + INTCSR_9052);
+ }
+}
+
+/* Process the result of trying to pass a received frame up the stack
+ */
+static void
+fst_process_rx_status(int rx_status, char *name)
+{
+ switch (rx_status) {
+ case NET_RX_SUCCESS:
+ {
+ /*
+ * Nothing to do here
+ */
+ break;
+ }
+
+ case NET_RX_CN_LOW:
+ {
+ dbg(DBG_ASS, "%s: Receive Low Congestion\n", name);
+ break;
+ }
+
+ case NET_RX_CN_MOD:
+ {
+ dbg(DBG_ASS, "%s: Receive Moderate Congestion\n", name);
+ break;
+ }
+
+ case NET_RX_CN_HIGH:
+ {
+ dbg(DBG_ASS, "%s: Receive High Congestion\n", name);
+ break;
+ }
+
+ case NET_RX_DROP:
+ {
+ dbg(DBG_ASS, "%s: Received packet dropped\n", name);
+ break;
+ }
+ }
+}
+
+/* Initilaise DMA for PLX 9054
+ */
+static inline void
+fst_init_dma(struct fst_card_info *card)
+{
+ /*
+ * This is only required for the PLX 9054
+ */
+ if (card->family == FST_FAMILY_TXU) {
+ pci_set_master(card->device);
+ outl(0x00020441, card->pci_conf + DMAMODE0);
+ outl(0x00020441, card->pci_conf + DMAMODE1);
+ outl(0x0, card->pci_conf + DMATHR);
+ }
+}
+
+/* Tx dma complete interrupt
+ */
+static void
+fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
+ int len, int txpos)
+{
+ struct net_device *dev = port_to_dev(port);
+ struct net_device_stats *stats = hdlc_stats(dev);
+
+ /*
+ * Everything is now set, just tell the card to go
+ */
+ dbg(DBG_TX, "fst_tx_dma_complete\n");
+ FST_WRB(card, txDescrRing[port->index][txpos].bits,
+ DMA_OWN | TX_STP | TX_ENP);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ dev->trans_start = jiffies;
+}
+
+/*
+ * Mark it for our own raw sockets interface
+ */
+static unsigned short farsync_type_trans(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+ return htons(ETH_P_CUST);
+}
+
+/* Rx dma complete interrupt
+ */
+static void
+fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
+ int len, struct sk_buff *skb, int rxp)
+{
+ struct net_device *dev = port_to_dev(port);
+ struct net_device_stats *stats = hdlc_stats(dev);
+ int pi;
+ int rx_status;
+
+ dbg(DBG_TX, "fst_rx_dma_complete\n");
+ pi = port->index;
+ memcpy(skb_put(skb, len), card->rx_dma_handle_host, len);
+
+ /* Reset buffer descriptor */
+ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+ /* Update stats */
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+
+ /* Push upstream */
+ dbg(DBG_RX, "Pushing the frame up the stack\n");
+ if (port->mode == FST_RAW)
+ skb->protocol = farsync_type_trans(skb, dev);
+ else
+ skb->protocol = hdlc_type_trans(skb, dev);
+ rx_status = netif_rx(skb);
+ fst_process_rx_status(rx_status, port_to_dev(port)->name);
+ if (rx_status == NET_RX_DROP)
+ stats->rx_dropped++;
+ dev->last_rx = jiffies;
+}
+
+/*
+ * Receive a frame through the DMA
+ */
+static inline void
+fst_rx_dma(struct fst_card_info *card, unsigned char *skb,
+ unsigned char *mem, int len)
+{
+ /*
+ * This routine will setup the DMA and start it
+ */
+
+ dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len);
+ if (card->dmarx_in_progress) {
+ dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
+ }
+
+ outl((unsigned long) skb, card->pci_conf + DMAPADR0); /* Copy to here */
+ outl((unsigned long) mem, card->pci_conf + DMALADR0); /* from here */
+ outl(len, card->pci_conf + DMASIZ0); /* for this length */
+ outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
+
+ /*
+ * We use the dmarx_in_progress flag to flag the channel as busy
+ */
+ card->dmarx_in_progress = 1;
+ outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */
+}
+
+/*
+ * Send a frame through the DMA
+ */
+static inline void
+fst_tx_dma(struct fst_card_info *card, unsigned char *skb,
+ unsigned char *mem, int len)
+{
+ /*
+ * This routine will setup the DMA and start it.
+ */
+
+ dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len);
+ if (card->dmatx_in_progress) {
+ dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
+ }
+
+ outl((unsigned long) skb, card->pci_conf + DMAPADR1); /* Copy from here */
+ outl((unsigned long) mem, card->pci_conf + DMALADR1); /* to here */
+ outl(len, card->pci_conf + DMASIZ1); /* for this length */
+ outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
+
+ /*
+ * We use the dmatx_in_progress to flag the channel as busy
+ */
+ card->dmatx_in_progress = 1;
+ outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */
+}
+
+/* Issue a Mailbox command for a port.
+ * Note we issue them on a fire and forget basis, not expecting to see an
+ * error and not waiting for completion.
+ */
+static void
+fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
+{
+ struct fst_card_info *card;
+ unsigned short mbval;
+ unsigned long flags;
+ int safety;
+
+ card = port->card;
+ spin_lock_irqsave(&card->card_lock, flags);
+ mbval = FST_RDW(card, portMailbox[port->index][0]);
+
+ safety = 0;
+ /* Wait for any previous command to complete */
+ while (mbval > NAK) {
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ schedule_timeout(1);
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ if (++safety > 2000) {
+ printk_err("Mailbox safety timeout\n");
+ break;
+ }
+
+ mbval = FST_RDW(card, portMailbox[port->index][0]);
+ }
+ if (safety > 0) {
+ dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety);
+ }
+ if (mbval == NAK) {
+ dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n");
+ }
+
+ FST_WRW(card, portMailbox[port->index][0], cmd);
+
+ if (cmd == ABORTTX || cmd == STARTPORT) {
+ port->txpos = 0;
+ port->txipos = 0;
+ port->start = 0;
+ }
+
+ spin_unlock_irqrestore(&card->card_lock, flags);
+}
+
+/* Port output signals control
+ */
+static inline void
+fst_op_raise(struct fst_port_info *port, unsigned int outputs)
+{
+ outputs |= FST_RDL(port->card, v24OpSts[port->index]);
+ FST_WRL(port->card, v24OpSts[port->index], outputs);
+
+ if (port->run)
+ fst_issue_cmd(port, SETV24O);
+}
+
+static inline void
+fst_op_lower(struct fst_port_info *port, unsigned int outputs)
+{
+ outputs = ~outputs & FST_RDL(port->card, v24OpSts[port->index]);
+ FST_WRL(port->card, v24OpSts[port->index], outputs);
+
+ if (port->run)
+ fst_issue_cmd(port, SETV24O);
+}
+
+/*
+ * Setup port Rx buffers
+ */
+static void
+fst_rx_config(struct fst_port_info *port)
+{
+ int i;
+ int pi;
+ unsigned int offset;
+ unsigned long flags;
+ struct fst_card_info *card;
+
+ pi = port->index;
+ card = port->card;
+ spin_lock_irqsave(&card->card_lock, flags);
+ for (i = 0; i < NUM_RX_BUFFER; i++) {
+ offset = BUF_OFFSET(rxBuffer[pi][i][0]);
+
+ FST_WRW(card, rxDescrRing[pi][i].ladr, (u16) offset);
+ FST_WRB(card, rxDescrRing[pi][i].hadr, (u8) (offset >> 16));
+ FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER));
+ FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER);
+ FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN);
+ }
+ port->rxpos = 0;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+}
+
+/*
+ * Setup port Tx buffers
+ */
+static void
+fst_tx_config(struct fst_port_info *port)
+{
+ int i;
+ int pi;
+ unsigned int offset;
+ unsigned long flags;
+ struct fst_card_info *card;
+
+ pi = port->index;
+ card = port->card;
+ spin_lock_irqsave(&card->card_lock, flags);
+ for (i = 0; i < NUM_TX_BUFFER; i++) {
+ offset = BUF_OFFSET(txBuffer[pi][i][0]);
+
+ FST_WRW(card, txDescrRing[pi][i].ladr, (u16) offset);
+ FST_WRB(card, txDescrRing[pi][i].hadr, (u8) (offset >> 16));
+ FST_WRW(card, txDescrRing[pi][i].bcnt, 0);
+ FST_WRB(card, txDescrRing[pi][i].bits, 0);
+ }
+ port->txpos = 0;
+ port->txipos = 0;
+ port->start = 0;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+}
+
+/* TE1 Alarm change interrupt event
+ */
+static void
+fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port)
+{
+ u8 los;
+ u8 rra;
+ u8 ais;
+
+ los = FST_RDB(card, suStatus.lossOfSignal);
+ rra = FST_RDB(card, suStatus.receiveRemoteAlarm);
+ ais = FST_RDB(card, suStatus.alarmIndicationSignal);
+
+ if (los) {
+ /*
+ * Lost the link
+ */
+ if (netif_carrier_ok(port_to_dev(port))) {
+ dbg(DBG_INTR, "Net carrier off\n");
+ netif_carrier_off(port_to_dev(port));
+ }
+ } else {
+ /*
+ * Link available
+ */
+ if (!netif_carrier_ok(port_to_dev(port))) {
+ dbg(DBG_INTR, "Net carrier on\n");
+ netif_carrier_on(port_to_dev(port));
+ }
+ }
+
+ if (los)
+ dbg(DBG_INTR, "Assert LOS Alarm\n");
+ else
+ dbg(DBG_INTR, "De-assert LOS Alarm\n");
+ if (rra)
+ dbg(DBG_INTR, "Assert RRA Alarm\n");
+ else
+ dbg(DBG_INTR, "De-assert RRA Alarm\n");
+
+ if (ais)
+ dbg(DBG_INTR, "Assert AIS Alarm\n");
+ else
+ dbg(DBG_INTR, "De-assert AIS Alarm\n");
+}
+
+/* Control signal change interrupt event
+ */
+static void
+fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port)
+{
+ int signals;
+
+ signals = FST_RDL(card, v24DebouncedSts[port->index]);
+
+ if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+ ? IPSTS_INDICATE : IPSTS_DCD)) {
+ if (!netif_carrier_ok(port_to_dev(port))) {
+ dbg(DBG_INTR, "DCD active\n");
+ netif_carrier_on(port_to_dev(port));
+ }
+ } else {
+ if (netif_carrier_ok(port_to_dev(port))) {
+ dbg(DBG_INTR, "DCD lost\n");
+ netif_carrier_off(port_to_dev(port));
+ }
+ }
+}
+
+/* Log Rx Errors
+ */
+static void
+fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port,
+ unsigned char dmabits, int rxp, unsigned short len)
+{
+ struct net_device *dev = port_to_dev(port);
+ struct net_device_stats *stats = hdlc_stats(dev);
+
+ /*
+ * Increment the appropriate error counter
+ */
+ stats->rx_errors++;
+ if (dmabits & RX_OFLO) {
+ stats->rx_fifo_errors++;
+ dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n",
+ card->card_no, port->index, rxp);
+ }
+ if (dmabits & RX_CRC) {
+ stats->rx_crc_errors++;
+ dbg(DBG_ASS, "Rx crc error on card %d port %d\n",
+ card->card_no, port->index);
+ }
+ if (dmabits & RX_FRAM) {
+ stats->rx_frame_errors++;
+ dbg(DBG_ASS, "Rx frame error on card %d port %d\n",
+ card->card_no, port->index);
+ }
+ if (dmabits == (RX_STP | RX_ENP)) {
+ stats->rx_length_errors++;
+ dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n",
+ len, card->card_no, port->index);
+ }
+}
+
+/* Rx Error Recovery
+ */
+static void
+fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
+ unsigned char dmabits, int rxp, unsigned short len)
+{
+ int i;
+ int pi;
+
+ pi = port->index;
+ /*
+ * Discard buffer descriptors until we see the start of the
+ * next frame. Note that for long frames this could be in
+ * a subsequent interrupt.
+ */
+ i = 0;
+ while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
+ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+ rxp = (rxp+1) % NUM_RX_BUFFER;
+ if (++i > NUM_RX_BUFFER) {
+ dbg(DBG_ASS, "intr_rx: Discarding more bufs"
+ " than we have\n");
+ break;
+ }
+ dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
+ dbg(DBG_ASS, "DMA Bits of next buffer was %x\n", dmabits);
+ }
+ dbg(DBG_ASS, "There were %d subsequent buffers in error\n", i);
+
+ /* Discard the terminal buffer */
+ if (!(dmabits & DMA_OWN)) {
+ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+ rxp = (rxp+1) % NUM_RX_BUFFER;
+ }
+ port->rxpos = rxp;
+ return;
+
+}
+
+/* Rx complete interrupt
+ */
+static void
+fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
+{
+ unsigned char dmabits;
+ int pi;
+ int rxp;
+ int rx_status;
+ unsigned short len;
+ struct sk_buff *skb;
+ struct net_device *dev = port_to_dev(port);
+ struct net_device_stats *stats = hdlc_stats(dev);
+
+ /* Check we have a buffer to process */
+ pi = port->index;
+ rxp = port->rxpos;
+ dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
+ if (dmabits & DMA_OWN) {
+ dbg(DBG_RX | DBG_INTR, "intr_rx: No buffer port %d pos %d\n",
+ pi, rxp);
+ return;
+ }
+ if (card->dmarx_in_progress) {
+ return;
+ }
+
+ /* Get buffer length */
+ len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt);
+ /* Discard the CRC */
+ len -= 2;
+ if (len == 0) {
+ /*
+ * This seems to happen on the TE1 interface sometimes
+ * so throw the frame away and log the event.
+ */
+ printk_err("Frame received with 0 length. Card %d Port %d\n",
+ card->card_no, port->index);
+ /* Return descriptor to card */
+ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+ rxp = (rxp+1) % NUM_RX_BUFFER;
+ port->rxpos = rxp;
+ return;
+ }
+
+ /* Check buffer length and for other errors. We insist on one packet
+ * in one buffer. This simplifies things greatly and since we've
+ * allocated 8K it shouldn't be a real world limitation
+ */
+ dbg(DBG_RX, "intr_rx: %d,%d: flags %x len %d\n", pi, rxp, dmabits, len);
+ if (dmabits != (RX_STP | RX_ENP) || len > LEN_RX_BUFFER - 2) {
+ fst_log_rx_error(card, port, dmabits, rxp, len);
+ fst_recover_rx_error(card, port, dmabits, rxp, len);
+ return;
+ }
+
+ /* Allocate SKB */
+ if ((skb = dev_alloc_skb(len)) == NULL) {
+ dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
+
+ stats->rx_dropped++;
+
+ /* Return descriptor to card */
+ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+ rxp = (rxp+1) % NUM_RX_BUFFER;
+ port->rxpos = rxp;
+ return;
+ }
+
+ /*
+ * We know the length we need to receive, len.
+ * It's not worth using the DMA for reads of less than
+ * FST_MIN_DMA_LEN
+ */
+
+ if ((len < FST_MIN_DMA_LEN) || (card->family == FST_FAMILY_TXP)) {
+ memcpy_fromio(skb_put(skb, len),
+ card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]),
+ len);
+
+ /* Reset buffer descriptor */
+ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+ /* Update stats */
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+
+ /* Push upstream */
+ dbg(DBG_RX, "Pushing frame up the stack\n");
+ if (port->mode == FST_RAW)
+ skb->protocol = farsync_type_trans(skb, dev);
+ else
+ skb->protocol = hdlc_type_trans(skb, dev);
+ rx_status = netif_rx(skb);
+ fst_process_rx_status(rx_status, port_to_dev(port)->name);
+ if (rx_status == NET_RX_DROP) {
+ stats->rx_dropped++;
+ }
+ dev->last_rx = jiffies;
+ } else {
+ card->dma_skb_rx = skb;
+ card->dma_port_rx = port;
+ card->dma_len_rx = len;
+ card->dma_rxpos = rxp;
+ fst_rx_dma(card, (char *) card->rx_dma_handle_card,
+ (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
+ }
+ if (rxp != port->rxpos) {
+ dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
+ dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
+ }
+ rxp = (rxp+1) % NUM_RX_BUFFER;
+ port->rxpos = rxp;
+}
+
+/*
+ * The bottom halfs to the ISR
+ *
+ */
+
+static void
+do_bottom_half_tx(struct fst_card_info *card)
+{
+ struct fst_port_info *port;
+ int pi;
+ int txq_length;
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct net_device *dev;
+ struct net_device_stats *stats;
+
+ /*
+ * Find a free buffer for the transmit
+ * Step through each port on this card
+ */
+
+ dbg(DBG_TX, "do_bottom_half_tx\n");
+ for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
+ if (!port->run)
+ continue;
+
+ dev = port_to_dev(port);
+ stats = hdlc_stats(dev);
+ while (!
+ (FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
+ DMA_OWN)
+ && !(card->dmatx_in_progress)) {
+ /*
+ * There doesn't seem to be a txdone event per-se
+ * We seem to have to deduce it, by checking the DMA_OWN
+ * bit on the next buffer we think we can use
+ */
+ spin_lock_irqsave(&card->card_lock, flags);
+ if ((txq_length = port->txqe - port->txqs) < 0) {
+ /*
+ * This is the case where one has wrapped and the
+ * maths gives us a negative number
+ */
+ txq_length = txq_length + FST_TXQ_DEPTH;
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ if (txq_length > 0) {
+ /*
+ * There is something to send
+ */
+ spin_lock_irqsave(&card->card_lock, flags);
+ skb = port->txq[port->txqs];
+ port->txqs++;
+ if (port->txqs == FST_TXQ_DEPTH) {
+ port->txqs = 0;
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ /*
+ * copy the data and set the required indicators on the
+ * card.
+ */
+ FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
+ cnv_bcnt(skb->len));
+ if ((skb->len < FST_MIN_DMA_LEN)
+ || (card->family == FST_FAMILY_TXP)) {
+ /* Enqueue the packet with normal io */
+ memcpy_toio(card->mem +
+ BUF_OFFSET(txBuffer[pi]
+ [port->
+ txpos][0]),
+ skb->data, skb->len);
+ FST_WRB(card,
+ txDescrRing[pi][port->txpos].
+ bits,
+ DMA_OWN | TX_STP | TX_ENP);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ } else {
+ /* Or do it through dma */
+ memcpy(card->tx_dma_handle_host,
+ skb->data, skb->len);
+ card->dma_port_tx = port;
+ card->dma_len_tx = skb->len;
+ card->dma_txpos = port->txpos;
+ fst_tx_dma(card,
+ (char *) card->
+ tx_dma_handle_card,
+ (char *)
+ BUF_OFFSET(txBuffer[pi]
+ [port->txpos][0]),
+ skb->len);
+ }
+ if (++port->txpos >= NUM_TX_BUFFER)
+ port->txpos = 0;
+ /*
+ * If we have flow control on, can we now release it?
+ */
+ if (port->start) {
+ if (txq_length < fst_txq_low) {
+ netif_wake_queue(port_to_dev
+ (port));
+ port->start = 0;
+ }
+ }
+ dev_kfree_skb(skb);
+ } else {
+ /*
+ * Nothing to send so break out of the while loop
+ */
+ break;
+ }
+ }
+ }
+}
+
+static void
+do_bottom_half_rx(struct fst_card_info *card)
+{
+ struct fst_port_info *port;
+ int pi;
+ int rx_count = 0;
+
+ /* Check for rx completions on all ports on this card */
+ dbg(DBG_RX, "do_bottom_half_rx\n");
+ for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
+ if (!port->run)
+ continue;
+
+ while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
+ & DMA_OWN) && !(card->dmarx_in_progress)) {
+ if (rx_count > fst_max_reads) {
+ /*
+ * Don't spend forever in receive processing
+ * Schedule another event
+ */
+ fst_q_work_item(&fst_work_intq, card->card_no);
+ tasklet_schedule(&fst_int_task);
+ break; /* Leave the loop */
+ }
+ fst_intr_rx(card, port);
+ rx_count++;
+ }
+ }
+}
+
+/*
+ * The interrupt service routine
+ * Dev_id is our fst_card_info pointer
+ */
+irqreturn_t
+fst_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct fst_card_info *card;
+ struct fst_port_info *port;
+ int rdidx; /* Event buffer indices */
+ int wridx;
+ int event; /* Actual event for processing */
+ unsigned int dma_intcsr = 0;
+ unsigned int do_card_interrupt;
+ unsigned int int_retry_count;
+
+ if ((card = dev_id) == NULL) {
+ dbg(DBG_INTR, "intr: spurious %d\n", irq);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Check to see if the interrupt was for this card
+ * return if not
+ * Note that the call to clear the interrupt is important
+ */
+ dbg(DBG_INTR, "intr: %d %p\n", irq, card);
+ if (card->state != FST_RUNNING) {
+ printk_err
+ ("Interrupt received for card %d in a non running state (%d)\n",
+ card->card_no, card->state);
+
+ /*
+ * It is possible to really be running, i.e. we have re-loaded
+ * a running card
+ * Clear and reprime the interrupt source
+ */
+ fst_clear_intr(card);
+ return IRQ_HANDLED;
+ }
+
+ /* Clear and reprime the interrupt source */
+ fst_clear_intr(card);
+
+ /*
+ * Is the interrupt for this card (handshake == 1)
+ */
+ do_card_interrupt = 0;
+ if (FST_RDB(card, interruptHandshake) == 1) {
+ do_card_interrupt += FST_CARD_INT;
+ /* Set the software acknowledge */
+ FST_WRB(card, interruptHandshake, 0xEE);
+ }
+ if (card->family == FST_FAMILY_TXU) {
+ /*
+ * Is it a DMA Interrupt
+ */
+ dma_intcsr = inl(card->pci_conf + INTCSR_9054);
+ if (dma_intcsr & 0x00200000) {
+ /*
+ * DMA Channel 0 (Rx transfer complete)
+ */
+ dbg(DBG_RX, "DMA Rx xfer complete\n");
+ outb(0x8, card->pci_conf + DMACSR0);
+ fst_rx_dma_complete(card, card->dma_port_rx,
+ card->dma_len_rx, card->dma_skb_rx,
+ card->dma_rxpos);
+ card->dmarx_in_progress = 0;
+ do_card_interrupt += FST_RX_DMA_INT;
+ }
+ if (dma_intcsr & 0x00400000) {
+ /*
+ * DMA Channel 1 (Tx transfer complete)
+ */
+ dbg(DBG_TX, "DMA Tx xfer complete\n");
+ outb(0x8, card->pci_conf + DMACSR1);
+ fst_tx_dma_complete(card, card->dma_port_tx,
+ card->dma_len_tx, card->dma_txpos);
+ card->dmatx_in_progress = 0;
+ do_card_interrupt += FST_TX_DMA_INT;
+ }
+ }
+
+ /*
+ * Have we been missing Interrupts
+ */
+ int_retry_count = FST_RDL(card, interruptRetryCount);
+ if (int_retry_count) {
+ dbg(DBG_ASS, "Card %d int_retry_count is %d\n",
+ card->card_no, int_retry_count);
+ FST_WRL(card, interruptRetryCount, 0);
+ }
+
+ if (!do_card_interrupt) {
+ return IRQ_HANDLED;
+ }
+
+ /* Scehdule the bottom half of the ISR */
+ fst_q_work_item(&fst_work_intq, card->card_no);
+ tasklet_schedule(&fst_int_task);
+
+ /* Drain the event queue */
+ rdidx = FST_RDB(card, interruptEvent.rdindex) & 0x1f;
+ wridx = FST_RDB(card, interruptEvent.wrindex) & 0x1f;
+ while (rdidx != wridx) {
+ event = FST_RDB(card, interruptEvent.evntbuff[rdidx]);
+ port = &card->ports[event & 0x03];
+
+ dbg(DBG_INTR, "Processing Interrupt event: %x\n", event);
+
+ switch (event) {
+ case TE1_ALMA:
+ dbg(DBG_INTR, "TE1 Alarm intr\n");
+ if (port->run)
+ fst_intr_te1_alarm(card, port);
+ break;
+
+ case CTLA_CHG:
+ case CTLB_CHG:
+ case CTLC_CHG:
+ case CTLD_CHG:
+ if (port->run)
+ fst_intr_ctlchg(card, port);
+ break;
+
+ case ABTA_SENT:
+ case ABTB_SENT:
+ case ABTC_SENT:
+ case ABTD_SENT:
+ dbg(DBG_TX, "Abort complete port %d\n", port->index);
+ break;
+
+ case TXA_UNDF:
+ case TXB_UNDF:
+ case TXC_UNDF:
+ case TXD_UNDF:
+ /* Difficult to see how we'd get this given that we
+ * always load up the entire packet for DMA.
+ */
+ dbg(DBG_TX, "Tx underflow port %d\n", port->index);
+ hdlc_stats(port_to_dev(port))->tx_errors++;
+ hdlc_stats(port_to_dev(port))->tx_fifo_errors++;
+ dbg(DBG_ASS, "Tx underflow on card %d port %d\n",
+ card->card_no, port->index);
+ break;
+
+ case INIT_CPLT:
+ dbg(DBG_INIT, "Card init OK intr\n");
+ break;
+
+ case INIT_FAIL:
+ dbg(DBG_INIT, "Card init FAILED intr\n");
+ card->state = FST_IFAILED;
+ break;
+
+ default:
+ printk_err("intr: unknown card event %d. ignored\n",
+ event);
+ break;
+ }
+
+ /* Bump and wrap the index */
+ if (++rdidx >= MAX_CIRBUFF)
+ rdidx = 0;
+ }
+ FST_WRB(card, interruptEvent.rdindex, rdidx);
+ return IRQ_HANDLED;
+}
+
+/* Check that the shared memory configuration is one that we can handle
+ * and that some basic parameters are correct
+ */
+static void
+check_started_ok(struct fst_card_info *card)
+{
+ int i;
+
+ /* Check structure version and end marker */
+ if (FST_RDW(card, smcVersion) != SMC_VERSION) {
+ printk_err("Bad shared memory version %d expected %d\n",
+ FST_RDW(card, smcVersion), SMC_VERSION);
+ card->state = FST_BADVERSION;
+ return;
+ }
+ if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
+ printk_err("Missing shared memory signature\n");
+ card->state = FST_BADVERSION;
+ return;
+ }
+ /* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
+ if ((i = FST_RDB(card, taskStatus)) == 0x01) {
+ card->state = FST_RUNNING;
+ } else if (i == 0xFF) {
+ printk_err("Firmware initialisation failed. Card halted\n");
+ card->state = FST_HALTED;
+ return;
+ } else if (i != 0x00) {
+ printk_err("Unknown firmware status 0x%x\n", i);
+ card->state = FST_HALTED;
+ return;
+ }
+
+ /* Finally check the number of ports reported by firmware against the
+ * number we assumed at card detection. Should never happen with
+ * existing firmware etc so we just report it for the moment.
+ */
+ if (FST_RDL(card, numberOfPorts) != card->nports) {
+ printk_warn("Port count mismatch on card %d."
+ " Firmware thinks %d we say %d\n", card->card_no,
+ FST_RDL(card, numberOfPorts), card->nports);
+ }
+}
+
+static int
+set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
+ struct fstioc_info *info)
+{
+ int err;
+ unsigned char my_framing;
+
+ /* Set things according to the user set valid flags
+ * Several of the old options have been invalidated/replaced by the
+ * generic hdlc package.
+ */
+ err = 0;
+ if (info->valid & FSTVAL_PROTO) {
+ if (info->proto == FST_RAW)
+ port->mode = FST_RAW;
+ else
+ port->mode = FST_GEN_HDLC;
+ }
+
+ if (info->valid & FSTVAL_CABLE)
+ err = -EINVAL;
+
+ if (info->valid & FSTVAL_SPEED)
+ err = -EINVAL;
+
+ if (info->valid & FSTVAL_PHASE)
+ FST_WRB(card, portConfig[port->index].invertClock,
+ info->invertClock);
+ if (info->valid & FSTVAL_MODE)
+ FST_WRW(card, cardMode, info->cardMode);
+ if (info->valid & FSTVAL_TE1) {
+ FST_WRL(card, suConfig.dataRate, info->lineSpeed);
+ FST_WRB(card, suConfig.clocking, info->clockSource);
+ my_framing = FRAMING_E1;
+ if (info->framing == E1)
+ my_framing = FRAMING_E1;
+ if (info->framing == T1)
+ my_framing = FRAMING_T1;
+ if (info->framing == J1)
+ my_framing = FRAMING_J1;
+ FST_WRB(card, suConfig.framing, my_framing);
+ FST_WRB(card, suConfig.structure, info->structure);
+ FST_WRB(card, suConfig.interface, info->interface);
+ FST_WRB(card, suConfig.coding, info->coding);
+ FST_WRB(card, suConfig.lineBuildOut, info->lineBuildOut);
+ FST_WRB(card, suConfig.equalizer, info->equalizer);
+ FST_WRB(card, suConfig.transparentMode, info->transparentMode);
+ FST_WRB(card, suConfig.loopMode, info->loopMode);
+ FST_WRB(card, suConfig.range, info->range);
+ FST_WRB(card, suConfig.txBufferMode, info->txBufferMode);
+ FST_WRB(card, suConfig.rxBufferMode, info->rxBufferMode);
+ FST_WRB(card, suConfig.startingSlot, info->startingSlot);
+ FST_WRB(card, suConfig.losThreshold, info->losThreshold);
+ if (info->idleCode)
+ FST_WRB(card, suConfig.enableIdleCode, 1);
+ else
+ FST_WRB(card, suConfig.enableIdleCode, 0);
+ FST_WRB(card, suConfig.idleCode, info->idleCode);
+#if FST_DEBUG
+ if (info->valid & FSTVAL_TE1) {
+ printk("Setting TE1 data\n");
+ printk("Line Speed = %d\n", info->lineSpeed);
+ printk("Start slot = %d\n", info->startingSlot);
+ printk("Clock source = %d\n", info->clockSource);
+ printk("Framing = %d\n", my_framing);
+ printk("Structure = %d\n", info->structure);
+ printk("interface = %d\n", info->interface);
+ printk("Coding = %d\n", info->coding);
+ printk("Line build out = %d\n", info->lineBuildOut);
+ printk("Equaliser = %d\n", info->equalizer);
+ printk("Transparent mode = %d\n",
+ info->transparentMode);
+ printk("Loop mode = %d\n", info->loopMode);
+ printk("Range = %d\n", info->range);
+ printk("Tx Buffer mode = %d\n", info->txBufferMode);
+ printk("Rx Buffer mode = %d\n", info->rxBufferMode);
+ printk("LOS Threshold = %d\n", info->losThreshold);
+ printk("Idle Code = %d\n", info->idleCode);
+ }
+#endif
+ }
+#if FST_DEBUG
+ if (info->valid & FSTVAL_DEBUG) {
+ fst_debug_mask = info->debug;
+ }
+#endif
+
+ return err;
+}
+
+static void
+gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
+ struct fstioc_info *info)
+{
+ int i;
+
+ memset(info, 0, sizeof (struct fstioc_info));
+
+ i = port->index;
+ info->kernelVersion = LINUX_VERSION_CODE;
+ info->nports = card->nports;
+ info->type = card->type;
+ info->state = card->state;
+ info->proto = FST_GEN_HDLC;
+ info->index = i;
+#if FST_DEBUG
+ info->debug = fst_debug_mask;
+#endif
+
+ /* Only mark information as valid if card is running.
+ * Copy the data anyway in case it is useful for diagnostics
+ */
+ info->valid = ((card->state == FST_RUNNING) ? FSTVAL_ALL : FSTVAL_CARD)
+#if FST_DEBUG
+ | FSTVAL_DEBUG
+#endif
+ ;
+
+ info->lineInterface = FST_RDW(card, portConfig[i].lineInterface);
+ info->internalClock = FST_RDB(card, portConfig[i].internalClock);
+ info->lineSpeed = FST_RDL(card, portConfig[i].lineSpeed);
+ info->invertClock = FST_RDB(card, portConfig[i].invertClock);
+ info->v24IpSts = FST_RDL(card, v24IpSts[i]);
+ info->v24OpSts = FST_RDL(card, v24OpSts[i]);
+ info->clockStatus = FST_RDW(card, clockStatus[i]);
+ info->cableStatus = FST_RDW(card, cableStatus);
+ info->cardMode = FST_RDW(card, cardMode);
+ info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion);
+
+ /*
+ * The T2U can report cable presence for both A or B
+ * in bits 0 and 1 of cableStatus. See which port we are and
+ * do the mapping.
+ */
+ if (card->family == FST_FAMILY_TXU) {
+ if (port->index == 0) {
+ /*
+ * Port A
+ */
+ info->cableStatus = info->cableStatus & 1;
+ } else {
+ /*
+ * Port B
+ */
+ info->cableStatus = info->cableStatus >> 1;
+ info->cableStatus = info->cableStatus & 1;
+ }
+ }
+ /*
+ * Some additional bits if we are TE1
+ */
+ if (card->type == FST_TYPE_TE1) {
+ info->lineSpeed = FST_RDL(card, suConfig.dataRate);
+ info->clockSource = FST_RDB(card, suConfig.clocking);
+ info->framing = FST_RDB(card, suConfig.framing);
+ info->structure = FST_RDB(card, suConfig.structure);
+ info->interface = FST_RDB(card, suConfig.interface);
+ info->coding = FST_RDB(card, suConfig.coding);
+ info->lineBuildOut = FST_RDB(card, suConfig.lineBuildOut);
+ info->equalizer = FST_RDB(card, suConfig.equalizer);
+ info->loopMode = FST_RDB(card, suConfig.loopMode);
+ info->range = FST_RDB(card, suConfig.range);
+ info->txBufferMode = FST_RDB(card, suConfig.txBufferMode);
+ info->rxBufferMode = FST_RDB(card, suConfig.rxBufferMode);
+ info->startingSlot = FST_RDB(card, suConfig.startingSlot);
+ info->losThreshold = FST_RDB(card, suConfig.losThreshold);
+ if (FST_RDB(card, suConfig.enableIdleCode))
+ info->idleCode = FST_RDB(card, suConfig.idleCode);
+ else
+ info->idleCode = 0;
+ info->receiveBufferDelay =
+ FST_RDL(card, suStatus.receiveBufferDelay);
+ info->framingErrorCount =
+ FST_RDL(card, suStatus.framingErrorCount);
+ info->codeViolationCount =
+ FST_RDL(card, suStatus.codeViolationCount);
+ info->crcErrorCount = FST_RDL(card, suStatus.crcErrorCount);
+ info->lineAttenuation = FST_RDL(card, suStatus.lineAttenuation);
+ info->lossOfSignal = FST_RDB(card, suStatus.lossOfSignal);
+ info->receiveRemoteAlarm =
+ FST_RDB(card, suStatus.receiveRemoteAlarm);
+ info->alarmIndicationSignal =
+ FST_RDB(card, suStatus.alarmIndicationSignal);
+ }
+}
+
+static int
+fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
+ struct ifreq *ifr)
+{
+ sync_serial_settings sync;
+ int i;
+
+ if (ifr->ifr_settings.size != sizeof (sync)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user
+ (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof (sync))) {
+ return -EFAULT;
+ }
+
+ if (sync.loopback)
+ return -EINVAL;
+
+ i = port->index;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_IFACE_V35:
+ FST_WRW(card, portConfig[i].lineInterface, V35);
+ port->hwif = V35;
+ break;
+
+ case IF_IFACE_V24:
+ FST_WRW(card, portConfig[i].lineInterface, V24);
+ port->hwif = V24;
+ break;
+
+ case IF_IFACE_X21:
+ FST_WRW(card, portConfig[i].lineInterface, X21);
+ port->hwif = X21;
+ break;
+
+ case IF_IFACE_X21D:
+ FST_WRW(card, portConfig[i].lineInterface, X21D);
+ port->hwif = X21D;
+ break;
+
+ case IF_IFACE_T1:
+ FST_WRW(card, portConfig[i].lineInterface, T1);
+ port->hwif = T1;
+ break;
+
+ case IF_IFACE_E1:
+ FST_WRW(card, portConfig[i].lineInterface, E1);
+ port->hwif = E1;
+ break;
+
+ case IF_IFACE_SYNC_SERIAL:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (sync.clock_type) {
+ case CLOCK_EXT:
+ FST_WRB(card, portConfig[i].internalClock, EXTCLK);
+ break;
+
+ case CLOCK_INT:
+ FST_WRB(card, portConfig[i].internalClock, INTCLK);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ FST_WRL(card, portConfig[i].lineSpeed, sync.clock_rate);
+ return 0;
+}
+
+static int
+fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
+ struct ifreq *ifr)
+{
+ sync_serial_settings sync;
+ int i;
+
+ /* First check what line type is set, we'll default to reporting X.21
+ * if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
+ * changed
+ */
+ switch (port->hwif) {
+ case E1:
+ ifr->ifr_settings.type = IF_IFACE_E1;
+ break;
+ case T1:
+ ifr->ifr_settings.type = IF_IFACE_T1;
+ break;
+ case V35:
+ ifr->ifr_settings.type = IF_IFACE_V35;
+ break;
+ case V24:
+ ifr->ifr_settings.type = IF_IFACE_V24;
+ break;
+ case X21D:
+ ifr->ifr_settings.type = IF_IFACE_X21D;
+ break;
+ case X21:
+ default:
+ ifr->ifr_settings.type = IF_IFACE_X21;
+ break;
+ }
+ if (ifr->ifr_settings.size == 0) {
+ return 0; /* only type requested */
+ }
+ if (ifr->ifr_settings.size < sizeof (sync)) {
+ return -ENOMEM;
+ }
+
+ i = port->index;
+ sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
+ /* Lucky card and linux use same encoding here */
+ sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
+ INTCLK ? CLOCK_INT : CLOCK_EXT;
+ sync.loopback = 0;
+
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
+ return -EFAULT;
+ }
+
+ ifr->ifr_settings.size = sizeof (sync);
+ return 0;
+}
+
+static int
+fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct fst_card_info *card;
+ struct fst_port_info *port;
+ struct fstioc_write wrthdr;
+ struct fstioc_info info;
+ unsigned long flags;
+
+ dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
+
+ port = dev_to_port(dev);
+ card = port->card;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case FSTCPURESET:
+ fst_cpureset(card);
+ card->state = FST_RESET;
+ return 0;
+
+ case FSTCPURELEASE:
+ fst_cpurelease(card);
+ card->state = FST_STARTING;
+ return 0;
+
+ case FSTWRITE: /* Code write (download) */
+
+ /* First copy in the header with the length and offset of data
+ * to write
+ */
+ if (ifr->ifr_data == NULL) {
+ return -EINVAL;
+ }
+ if (copy_from_user(&wrthdr, ifr->ifr_data,
+ sizeof (struct fstioc_write))) {
+ return -EFAULT;
+ }
+
+ /* Sanity check the parameters. We don't support partial writes
+ * when going over the top
+ */
+ if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE
+ || wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
+ return -ENXIO;
+ }
+
+ /* Now copy the data to the card.
+ * This will probably break on some architectures.
+ * I'll fix it when I have something to test on.
+ */
+ if (copy_from_user(card->mem + wrthdr.offset,
+ ifr->ifr_data + sizeof (struct fstioc_write),
+ wrthdr.size)) {
+ return -EFAULT;
+ }
+
+ /* Writes to the memory of a card in the reset state constitute
+ * a download
+ */
+ if (card->state == FST_RESET) {
+ card->state = FST_DOWNLOAD;
+ }
+ return 0;
+
+ case FSTGETCONF:
+
+ /* If card has just been started check the shared memory config
+ * version and marker
+ */
+ if (card->state == FST_STARTING) {
+ check_started_ok(card);
+
+ /* If everything checked out enable card interrupts */
+ if (card->state == FST_RUNNING) {
+ spin_lock_irqsave(&card->card_lock, flags);
+ fst_enable_intr(card);
+ FST_WRB(card, interruptHandshake, 0xEE);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ }
+ }
+
+ if (ifr->ifr_data == NULL) {
+ return -EINVAL;
+ }
+
+ gather_conf_info(card, port, &info);
+
+ if (copy_to_user(ifr->ifr_data, &info, sizeof (info))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ case FSTSETCONF:
+
+ /*
+ * Most of the settings have been moved to the generic ioctls
+ * this just covers debug and board ident now
+ */
+
+ if (card->state != FST_RUNNING) {
+ printk_err
+ ("Attempt to configure card %d in non-running state (%d)\n",
+ card->card_no, card->state);
+ return -EIO;
+ }
+ if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
+ return -EFAULT;
+ }
+
+ return set_conf_from_info(card, port, &info);
+
+ case SIOCWANDEV:
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ return fst_get_iface(card, port, ifr);
+
+ case IF_IFACE_SYNC_SERIAL:
+ case IF_IFACE_V35:
+ case IF_IFACE_V24:
+ case IF_IFACE_X21:
+ case IF_IFACE_X21D:
+ case IF_IFACE_T1:
+ case IF_IFACE_E1:
+ return fst_set_iface(card, port, ifr);
+
+ case IF_PROTO_RAW:
+ port->mode = FST_RAW;
+ return 0;
+
+ case IF_GET_PROTO:
+ if (port->mode == FST_RAW) {
+ ifr->ifr_settings.type = IF_PROTO_RAW;
+ return 0;
+ }
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ default:
+ port->mode = FST_GEN_HDLC;
+ dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
+ ifr->ifr_settings.type);
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+
+ default:
+ /* Not one of ours. Pass through to HDLC package */
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+static void
+fst_openport(struct fst_port_info *port)
+{
+ int signals;
+ int txq_length;
+
+ /* Only init things if card is actually running. This allows open to
+ * succeed for downloads etc.
+ */
+ if (port->card->state == FST_RUNNING) {
+ if (port->run) {
+ dbg(DBG_OPEN, "open: found port already running\n");
+
+ fst_issue_cmd(port, STOPPORT);
+ port->run = 0;
+ }
+
+ fst_rx_config(port);
+ fst_tx_config(port);
+ fst_op_raise(port, OPSTS_RTS | OPSTS_DTR);
+
+ fst_issue_cmd(port, STARTPORT);
+ port->run = 1;
+
+ signals = FST_RDL(port->card, v24DebouncedSts[port->index]);
+ if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+ ? IPSTS_INDICATE : IPSTS_DCD))
+ netif_carrier_on(port_to_dev(port));
+ else
+ netif_carrier_off(port_to_dev(port));
+
+ txq_length = port->txqe - port->txqs;
+ port->txqe = 0;
+ port->txqs = 0;
+ }
+
+}
+
+static void
+fst_closeport(struct fst_port_info *port)
+{
+ if (port->card->state == FST_RUNNING) {
+ if (port->run) {
+ port->run = 0;
+ fst_op_lower(port, OPSTS_RTS | OPSTS_DTR);
+
+ fst_issue_cmd(port, STOPPORT);
+ } else {
+ dbg(DBG_OPEN, "close: port not running\n");
+ }
+ }
+}
+
+static int
+fst_open(struct net_device *dev)
+{
+ int err;
+ struct fst_port_info *port;
+
+ port = dev_to_port(dev);
+ if (!try_module_get(THIS_MODULE))
+ return -EBUSY;
+
+ if (port->mode != FST_RAW) {
+ err = hdlc_open(dev);
+ if (err)
+ return err;
+ }
+
+ fst_openport(port);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int
+fst_close(struct net_device *dev)
+{
+ struct fst_port_info *port;
+ struct fst_card_info *card;
+ unsigned char tx_dma_done;
+ unsigned char rx_dma_done;
+
+ port = dev_to_port(dev);
+ card = port->card;
+
+ tx_dma_done = inb(card->pci_conf + DMACSR1);
+ rx_dma_done = inb(card->pci_conf + DMACSR0);
+ dbg(DBG_OPEN,
+ "Port Close: tx_dma_in_progress = %d (%x) rx_dma_in_progress = %d (%x)\n",
+ card->dmatx_in_progress, tx_dma_done, card->dmarx_in_progress,
+ rx_dma_done);
+
+ netif_stop_queue(dev);
+ fst_closeport(dev_to_port(dev));
+ if (port->mode != FST_RAW) {
+ hdlc_close(dev);
+ }
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+static int
+fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity)
+{
+ /*
+ * Setting currently fixed in FarSync card so we check and forget
+ */
+ if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT)
+ return -EINVAL;
+ return 0;
+}
+
+static void
+fst_tx_timeout(struct net_device *dev)
+{
+ struct fst_port_info *port;
+ struct fst_card_info *card;
+ struct net_device_stats *stats = hdlc_stats(dev);
+
+ port = dev_to_port(dev);
+ card = port->card;
+ stats->tx_errors++;
+ stats->tx_aborted_errors++;
+ dbg(DBG_ASS, "Tx timeout card %d port %d\n",
+ card->card_no, port->index);
+ fst_issue_cmd(port, ABORTTX);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ port->start = 0;
+}
+
+static int
+fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fst_card_info *card;
+ struct fst_port_info *port;
+ struct net_device_stats *stats = hdlc_stats(dev);
+ unsigned long flags;
+ int txq_length;
+
+ port = dev_to_port(dev);
+ card = port->card;
+ dbg(DBG_TX, "fst_start_xmit: length = %d\n", skb->len);
+
+ /* Drop packet with error if we don't have carrier */
+ if (!netif_carrier_ok(dev)) {
+ dev_kfree_skb(skb);
+ stats->tx_errors++;
+ stats->tx_carrier_errors++;
+ dbg(DBG_ASS,
+ "Tried to transmit but no carrier on card %d port %d\n",
+ card->card_no, port->index);
+ return 0;
+ }
+
+ /* Drop it if it's too big! MTU failure ? */
+ if (skb->len > LEN_TX_BUFFER) {
+ dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
+ LEN_TX_BUFFER);
+ dev_kfree_skb(skb);
+ stats->tx_errors++;
+ return 0;
+ }
+
+ /*
+ * We are always going to queue the packet
+ * so that the bottom half is the only place we tx from
+ * Check there is room in the port txq
+ */
+ spin_lock_irqsave(&card->card_lock, flags);
+ if ((txq_length = port->txqe - port->txqs) < 0) {
+ /*
+ * This is the case where the next free has wrapped but the
+ * last used hasn't
+ */
+ txq_length = txq_length + FST_TXQ_DEPTH;
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ if (txq_length > fst_txq_high) {
+ /*
+ * We have got enough buffers in the pipeline. Ask the network
+ * layer to stop sending frames down
+ */
+ netif_stop_queue(dev);
+ port->start = 1; /* I'm using this to signal stop sent up */
+ }
+
+ if (txq_length == FST_TXQ_DEPTH - 1) {
+ /*
+ * This shouldn't have happened but such is life
+ */
+ dev_kfree_skb(skb);
+ stats->tx_errors++;
+ dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
+ card->card_no, port->index);
+ return 0;
+ }
+
+ /*
+ * queue the buffer
+ */
+ spin_lock_irqsave(&card->card_lock, flags);
+ port->txq[port->txqe] = skb;
+ port->txqe++;
+ if (port->txqe == FST_TXQ_DEPTH)
+ port->txqe = 0;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+ /* Scehdule the bottom half which now does transmit processing */
+ fst_q_work_item(&fst_work_txq, card->card_no);
+ tasklet_schedule(&fst_tx_task);
+
+ return 0;
+}
+
+/*
+ * Card setup having checked hardware resources.
+ * Should be pretty bizarre if we get an error here (kernel memory
+ * exhaustion is one possibility). If we do see a problem we report it
+ * via a printk and leave the corresponding interface and all that follow
+ * disabled.
+ */
+static char *type_strings[] __devinitdata = {
+ "no hardware", /* Should never be seen */
+ "FarSync T2P",
+ "FarSync T4P",
+ "FarSync T1U",
+ "FarSync T2U",
+ "FarSync T4U",
+ "FarSync TE1"
+};
+
+static void __devinit
+fst_init_card(struct fst_card_info *card)
+{
+ int i;
+ int err;
+
+ /* We're working on a number of ports based on the card ID. If the
+ * firmware detects something different later (should never happen)
+ * we'll have to revise it in some way then.
+ */
+ for (i = 0; i < card->nports; i++) {
+ err = register_hdlc_device(card->ports[i].dev);
+ if (err < 0) {
+ int j;
+ printk_err ("Cannot register HDLC device for port %d"
+ " (errno %d)\n", i, -err );
+ for (j = i; j < card->nports; j++) {
+ free_netdev(card->ports[j].dev);
+ card->ports[j].dev = NULL;
+ }
+ card->nports = i;
+ break;
+ }
+ }
+
+ printk_info("%s-%s: %s IRQ%d, %d ports\n",
+ port_to_dev(&card->ports[0])->name,
+ port_to_dev(&card->ports[card->nports - 1])->name,
+ type_strings[card->type], card->irq, card->nports);
+}
+
+/*
+ * Initialise card when detected.
+ * Returns 0 to indicate success, or errno otherwise.
+ */
+static int __devinit
+fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int firsttime_done = 0;
+ static int no_of_cards_added = 0;
+ struct fst_card_info *card;
+ int err = 0;
+ int i;
+
+ if (!firsttime_done) {
+ printk_info("FarSync WAN driver " FST_USER_VERSION
+ " (c) 2001-2004 FarSite Communications Ltd.\n");
+ firsttime_done = 1;
+ dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
+ }
+
+ /*
+ * We are going to be clever and allow certain cards not to be
+ * configured. An exclude list can be provided in /etc/modules.conf
+ */
+ if (fst_excluded_cards != 0) {
+ /*
+ * There are cards to exclude
+ *
+ */
+ for (i = 0; i < fst_excluded_cards; i++) {
+ if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
+ printk_info("FarSync PCI device %d not assigned\n",
+ (pdev->devfn) >> 3);
+ return -EBUSY;
+ }
+ }
+ }
+
+ /* Allocate driver private data */
+ card = kmalloc(sizeof (struct fst_card_info), GFP_KERNEL);
+ if (card == NULL) {
+ printk_err("FarSync card found but insufficient memory for"
+ " driver storage\n");
+ return -ENOMEM;
+ }
+ memset(card, 0, sizeof (struct fst_card_info));
+
+ /* Try to enable the device */
+ if ((err = pci_enable_device(pdev)) != 0) {
+ printk_err("Failed to enable card. Err %d\n", -err);
+ kfree(card);
+ return err;
+ }
+
+ if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
+ printk_err("Failed to allocate regions. Err %d\n", -err);
+ pci_disable_device(pdev);
+ kfree(card);
+ return err;
+ }
+
+ /* Get virtual addresses of memory regions */
+ card->pci_conf = pci_resource_start(pdev, 1);
+ card->phys_mem = pci_resource_start(pdev, 2);
+ card->phys_ctlmem = pci_resource_start(pdev, 3);
+ if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
+ printk_err("Physical memory remap failed\n");
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(card);
+ return -ENODEV;
+ }
+ if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
+ printk_err("Control memory remap failed\n");
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(card);
+ return -ENODEV;
+ }
+ dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
+
+ /* Register the interrupt handler */
+ if (request_irq(pdev->irq, fst_intr, SA_SHIRQ, FST_DEV_NAME, card)) {
+ printk_err("Unable to register interrupt %d\n", card->irq);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ iounmap(card->ctlmem);
+ iounmap(card->mem);
+ kfree(card);
+ return -ENODEV;
+ }
+
+ /* Record info we need */
+ card->irq = pdev->irq;
+ card->type = ent->driver_data;
+ card->family = ((ent->driver_data == FST_TYPE_T2P) ||
+ (ent->driver_data == FST_TYPE_T4P))
+ ? FST_FAMILY_TXP : FST_FAMILY_TXU;
+ if ((ent->driver_data == FST_TYPE_T1U) ||
+ (ent->driver_data == FST_TYPE_TE1))
+ card->nports = 1;
+ else
+ card->nports = ((ent->driver_data == FST_TYPE_T2P) ||
+ (ent->driver_data == FST_TYPE_T2U)) ? 2 : 4;
+
+ card->state = FST_UNINIT;
+ spin_lock_init ( &card->card_lock );
+
+ for ( i = 0 ; i < card->nports ; i++ ) {
+ struct net_device *dev = alloc_hdlcdev(&card->ports[i]);
+ hdlc_device *hdlc;
+ if (!dev) {
+ while (i--)
+ free_netdev(card->ports[i].dev);
+ printk_err ("FarSync: out of memory\n");
+ free_irq(card->irq, card);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ iounmap(card->ctlmem);
+ iounmap(card->mem);
+ kfree(card);
+ return -ENODEV;
+ }
+ card->ports[i].dev = dev;
+ card->ports[i].card = card;
+ card->ports[i].index = i;
+ card->ports[i].run = 0;
+
+ hdlc = dev_to_hdlc(dev);
+
+ /* Fill in the net device info */
+ /* Since this is a PCI setup this is purely
+ * informational. Give them the buffer addresses
+ * and basic card I/O.
+ */
+ dev->mem_start = card->phys_mem
+ + BUF_OFFSET ( txBuffer[i][0][0]);
+ dev->mem_end = card->phys_mem
+ + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
+ dev->base_addr = card->pci_conf;
+ dev->irq = card->irq;
+
+ dev->tx_queue_len = FST_TX_QUEUE_LEN;
+ dev->open = fst_open;
+ dev->stop = fst_close;
+ dev->do_ioctl = fst_ioctl;
+ dev->watchdog_timeo = FST_TX_TIMEOUT;
+ dev->tx_timeout = fst_tx_timeout;
+ hdlc->attach = fst_attach;
+ hdlc->xmit = fst_start_xmit;
+ }
+
+ card->device = pdev;
+
+ dbg(DBG_PCI, "type %d nports %d irq %d\n", card->type,
+ card->nports, card->irq);
+ dbg(DBG_PCI, "conf %04x mem %08x ctlmem %08x\n",
+ card->pci_conf, card->phys_mem, card->phys_ctlmem);
+
+ /* Reset the card's processor */
+ fst_cpureset(card);
+ card->state = FST_RESET;
+
+ /* Initialise DMA (if required) */
+ fst_init_dma(card);
+
+ /* Record driver data for later use */
+ pci_set_drvdata(pdev, card);
+
+ /* Remainder of card setup */
+ fst_card_array[no_of_cards_added] = card;
+ card->card_no = no_of_cards_added++; /* Record instance and bump it */
+ fst_init_card(card);
+ if (card->family == FST_FAMILY_TXU) {
+ /*
+ * Allocate a dma buffer for transmit and receives
+ */
+ card->rx_dma_handle_host =
+ pci_alloc_consistent(card->device, FST_MAX_MTU,
+ &card->rx_dma_handle_card);
+ if (card->rx_dma_handle_host == NULL) {
+ printk_err("Could not allocate rx dma buffer\n");
+ fst_disable_intr(card);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ iounmap(card->ctlmem);
+ iounmap(card->mem);
+ kfree(card);
+ return -ENOMEM;
+ }
+ card->tx_dma_handle_host =
+ pci_alloc_consistent(card->device, FST_MAX_MTU,
+ &card->tx_dma_handle_card);
+ if (card->tx_dma_handle_host == NULL) {
+ printk_err("Could not allocate tx dma buffer\n");
+ fst_disable_intr(card);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ iounmap(card->ctlmem);
+ iounmap(card->mem);
+ kfree(card);
+ return -ENOMEM;
+ }
+ }
+ return 0; /* Success */
+}
+
+/*
+ * Cleanup and close down a card
+ */
+static void __devexit
+fst_remove_one(struct pci_dev *pdev)
+{
+ struct fst_card_info *card;
+ int i;
+
+ card = pci_get_drvdata(pdev);
+
+ for (i = 0; i < card->nports; i++) {
+ struct net_device *dev = port_to_dev(&card->ports[i]);
+ unregister_hdlc_device(dev);
+ }
+
+ fst_disable_intr(card);
+ free_irq(card->irq, card);
+
+ iounmap(card->ctlmem);
+ iounmap(card->mem);
+ pci_release_regions(pdev);
+ if (card->family == FST_FAMILY_TXU) {
+ /*
+ * Free dma buffers
+ */
+ pci_free_consistent(card->device, FST_MAX_MTU,
+ card->rx_dma_handle_host,
+ card->rx_dma_handle_card);
+ pci_free_consistent(card->device, FST_MAX_MTU,
+ card->tx_dma_handle_host,
+ card->tx_dma_handle_card);
+ }
+ fst_card_array[card->card_no] = NULL;
+}
+
+static struct pci_driver fst_driver = {
+ .name = FST_NAME,
+ .id_table = fst_pci_dev_id,
+ .probe = fst_add_one,
+ .remove = __devexit_p(fst_remove_one),
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+static int __init
+fst_init(void)
+{
+ int i;
+
+ for (i = 0; i < FST_MAX_CARDS; i++)
+ fst_card_array[i] = NULL;
+ spin_lock_init(&fst_work_q_lock);
+ return pci_module_init(&fst_driver);
+}
+
+static void __exit
+fst_cleanup_module(void)
+{
+ printk_info("FarSync WAN driver unloading\n");
+ pci_unregister_driver(&fst_driver);
+}
+
+module_init(fst_init);
+module_exit(fst_cleanup_module);
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
new file mode 100644
index 000000000000..d871dafa87a1
--- /dev/null
+++ b/drivers/net/wan/farsync.h
@@ -0,0 +1,357 @@
+/*
+ * FarSync X21 driver for Linux
+ *
+ * Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
+ *
+ * Copyright (C) 2001 FarSite Communications Ltd.
+ * www.farsite.co.uk
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: R.J.Dunlop <bob.dunlop@farsite.co.uk>
+ *
+ * For the most part this file only contains structures and information
+ * that is visible to applications outside the driver. Shared memory
+ * layout etc is internal to the driver and described within farsync.c.
+ * Overlap exists in that the values used for some fields within the
+ * ioctl interface extend into the cards firmware interface so values in
+ * this file may not be changed arbitrarily.
+ */
+
+/* What's in a name
+ *
+ * The project name for this driver is Oscar. The driver is intended to be
+ * used with the FarSite T-Series cards (T2P & T4P) running in the high
+ * speed frame shifter mode. This is sometimes referred to as X.21 mode
+ * which is a complete misnomer as the card continues to support V.24 and
+ * V.35 as well as X.21.
+ *
+ * A short common prefix is useful for routines within the driver to avoid
+ * conflict with other similar drivers and I chosen to use "fst_" for this
+ * purpose (FarSite T-series).
+ *
+ * Finally the device driver needs a short network interface name. Since
+ * "hdlc" is already in use I've chosen the even less informative "sync"
+ * for the present.
+ */
+#define FST_NAME "fst" /* In debug/info etc */
+#define FST_NDEV_NAME "sync" /* For net interface */
+#define FST_DEV_NAME "farsync" /* For misc interfaces */
+
+
+/* User version number
+ *
+ * This version number is incremented with each official release of the
+ * package and is a simplified number for normal user reference.
+ * Individual files are tracked by the version control system and may
+ * have individual versions (or IDs) that move much faster than the
+ * the release version as individual updates are tracked.
+ */
+#define FST_USER_VERSION "1.04"
+
+
+/* Ioctl call command values
+ *
+ * The first three private ioctls are used by the sync-PPP module,
+ * allowing a little room for expansion we start our numbering at 10.
+ */
+#define FSTWRITE (SIOCDEVPRIVATE+10)
+#define FSTCPURESET (SIOCDEVPRIVATE+11)
+#define FSTCPURELEASE (SIOCDEVPRIVATE+12)
+#define FSTGETCONF (SIOCDEVPRIVATE+13)
+#define FSTSETCONF (SIOCDEVPRIVATE+14)
+
+
+/* FSTWRITE
+ *
+ * Used to write a block of data (firmware etc) before the card is running
+ */
+struct fstioc_write {
+ unsigned int size;
+ unsigned int offset;
+ unsigned char data[0];
+};
+
+
+/* FSTCPURESET and FSTCPURELEASE
+ *
+ * These take no additional data.
+ * FSTCPURESET forces the cards CPU into a reset state and holds it there.
+ * FSTCPURELEASE releases the CPU from this reset state allowing it to run,
+ * the reset vector should be setup before this ioctl is run.
+ */
+
+/* FSTGETCONF and FSTSETCONF
+ *
+ * Get and set a card/ports configuration.
+ * In order to allow selective setting of items and for the kernel to
+ * indicate a partial status response the first field "valid" is a bitmask
+ * indicating which other fields in the structure are valid.
+ * Many of the field names in this structure match those used in the
+ * firmware shared memory configuration interface and come originally from
+ * the NT header file Smc.h
+ *
+ * When used with FSTGETCONF this structure should be zeroed before use.
+ * This is to allow for possible future expansion when some of the fields
+ * might be used to indicate a different (expanded) structure.
+ */
+struct fstioc_info {
+ unsigned int valid; /* Bits of structure that are valid */
+ unsigned int nports; /* Number of serial ports */
+ unsigned int type; /* Type index of card */
+ unsigned int state; /* State of card */
+ unsigned int index; /* Index of port ioctl was issued on */
+ unsigned int smcFirmwareVersion;
+ unsigned long kernelVersion; /* What Kernel version we are working with */
+ unsigned short lineInterface; /* Physical interface type */
+ unsigned char proto; /* Line protocol */
+ unsigned char internalClock; /* 1 => internal clock, 0 => external */
+ unsigned int lineSpeed; /* Speed in bps */
+ unsigned int v24IpSts; /* V.24 control input status */
+ unsigned int v24OpSts; /* V.24 control output status */
+ unsigned short clockStatus; /* lsb: 0=> present, 1=> absent */
+ unsigned short cableStatus; /* lsb: 0=> present, 1=> absent */
+ unsigned short cardMode; /* lsb: LED id mode */
+ unsigned short debug; /* Debug flags */
+ unsigned char transparentMode; /* Not used always 0 */
+ unsigned char invertClock; /* Invert clock feature for syncing */
+ unsigned char startingSlot; /* Time slot to use for start of tx */
+ unsigned char clockSource; /* External or internal */
+ unsigned char framing; /* E1, T1 or J1 */
+ unsigned char structure; /* unframed, double, crc4, f4, f12, */
+ /* f24 f72 */
+ unsigned char interface; /* rj48c or bnc */
+ unsigned char coding; /* hdb3 b8zs */
+ unsigned char lineBuildOut; /* 0, -7.5, -15, -22 */
+ unsigned char equalizer; /* short or lon haul settings */
+ unsigned char loopMode; /* various loopbacks */
+ unsigned char range; /* cable lengths */
+ unsigned char txBufferMode; /* tx elastic buffer depth */
+ unsigned char rxBufferMode; /* rx elastic buffer depth */
+ unsigned char losThreshold; /* Attenuation on LOS signal */
+ unsigned char idleCode; /* Value to send as idle timeslot */
+ unsigned int receiveBufferDelay; /* delay thro rx buffer timeslots */
+ unsigned int framingErrorCount; /* framing errors */
+ unsigned int codeViolationCount; /* code violations */
+ unsigned int crcErrorCount; /* CRC errors */
+ int lineAttenuation; /* in dB*/
+ unsigned short lossOfSignal;
+ unsigned short receiveRemoteAlarm;
+ unsigned short alarmIndicationSignal;
+};
+
+/* "valid" bitmask */
+#define FSTVAL_NONE 0x00000000 /* Nothing valid (firmware not running).
+ * Slight misnomer. In fact nports,
+ * type, state and index will be set
+ * based on hardware detected.
+ */
+#define FSTVAL_OMODEM 0x0000001F /* First 5 bits correspond to the
+ * output status bits defined for
+ * v24OpSts
+ */
+#define FSTVAL_SPEED 0x00000020 /* internalClock, lineSpeed, clockStatus
+ */
+#define FSTVAL_CABLE 0x00000040 /* lineInterface, cableStatus */
+#define FSTVAL_IMODEM 0x00000080 /* v24IpSts */
+#define FSTVAL_CARD 0x00000100 /* nports, type, state, index,
+ * smcFirmwareVersion
+ */
+#define FSTVAL_PROTO 0x00000200 /* proto */
+#define FSTVAL_MODE 0x00000400 /* cardMode */
+#define FSTVAL_PHASE 0x00000800 /* Clock phase */
+#define FSTVAL_TE1 0x00001000 /* T1E1 Configuration */
+#define FSTVAL_DEBUG 0x80000000 /* debug */
+#define FSTVAL_ALL 0x00001FFF /* Note: does not include DEBUG flag */
+
+/* "type" */
+#define FST_TYPE_NONE 0 /* Probably should never happen */
+#define FST_TYPE_T2P 1 /* T2P X21 2 port card */
+#define FST_TYPE_T4P 2 /* T4P X21 4 port card */
+#define FST_TYPE_T1U 3 /* T1U X21 1 port card */
+#define FST_TYPE_T2U 4 /* T2U X21 2 port card */
+#define FST_TYPE_T4U 5 /* T4U X21 4 port card */
+#define FST_TYPE_TE1 6 /* T1E1 X21 1 port card */
+
+/* "family" */
+#define FST_FAMILY_TXP 0 /* T2P or T4P */
+#define FST_FAMILY_TXU 1 /* T1U or T2U or T4U */
+
+/* "state" */
+#define FST_UNINIT 0 /* Raw uninitialised state following
+ * system startup */
+#define FST_RESET 1 /* Processor held in reset state */
+#define FST_DOWNLOAD 2 /* Card being downloaded */
+#define FST_STARTING 3 /* Released following download */
+#define FST_RUNNING 4 /* Processor running */
+#define FST_BADVERSION 5 /* Bad shared memory version detected */
+#define FST_HALTED 6 /* Processor flagged a halt */
+#define FST_IFAILED 7 /* Firmware issued initialisation failed
+ * interrupt
+ */
+/* "lineInterface" */
+#define V24 1
+#define X21 2
+#define V35 3
+#define X21D 4
+#define T1 5
+#define E1 6
+#define J1 7
+
+/* "proto" */
+#define FST_HDLC 1 /* Cisco compatible HDLC */
+#define FST_PPP 2 /* Sync PPP */
+#define FST_MONITOR 3 /* Monitor only (raw packet reception) */
+#define FST_RAW 4 /* Two way raw packets */
+#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */
+
+/* "internalClock" */
+#define INTCLK 1
+#define EXTCLK 0
+
+/* "v24IpSts" bitmask */
+#define IPSTS_CTS 0x00000001 /* Clear To Send (Indicate for X.21) */
+#define IPSTS_INDICATE IPSTS_CTS
+#define IPSTS_DSR 0x00000002 /* Data Set Ready (T2P Port A) */
+#define IPSTS_DCD 0x00000004 /* Data Carrier Detect */
+#define IPSTS_RI 0x00000008 /* Ring Indicator (T2P Port A) */
+#define IPSTS_TMI 0x00000010 /* Test Mode Indicator (Not Supported)*/
+
+/* "v24OpSts" bitmask */
+#define OPSTS_RTS 0x00000001 /* Request To Send (Control for X.21) */
+#define OPSTS_CONTROL OPSTS_RTS
+#define OPSTS_DTR 0x00000002 /* Data Terminal Ready */
+#define OPSTS_DSRS 0x00000004 /* Data Signalling Rate Select (Not
+ * Supported) */
+#define OPSTS_SS 0x00000008 /* Select Standby (Not Supported) */
+#define OPSTS_LL 0x00000010 /* Maintenance Test (Not Supported) */
+
+/* "cardMode" bitmask */
+#define CARD_MODE_IDENTIFY 0x0001
+
+/*
+ * Constants for T1/E1 configuration
+ */
+
+/*
+ * Clock source
+ */
+#define CLOCKING_SLAVE 0
+#define CLOCKING_MASTER 1
+
+/*
+ * Framing
+ */
+#define FRAMING_E1 0
+#define FRAMING_J1 1
+#define FRAMING_T1 2
+
+/*
+ * Structure
+ */
+#define STRUCTURE_UNFRAMED 0
+#define STRUCTURE_E1_DOUBLE 1
+#define STRUCTURE_E1_CRC4 2
+#define STRUCTURE_E1_CRC4M 3
+#define STRUCTURE_T1_4 4
+#define STRUCTURE_T1_12 5
+#define STRUCTURE_T1_24 6
+#define STRUCTURE_T1_72 7
+
+/*
+ * Interface
+ */
+#define INTERFACE_RJ48C 0
+#define INTERFACE_BNC 1
+
+/*
+ * Coding
+ */
+
+#define CODING_HDB3 0
+#define CODING_NRZ 1
+#define CODING_CMI 2
+#define CODING_CMI_HDB3 3
+#define CODING_CMI_B8ZS 4
+#define CODING_AMI 5
+#define CODING_AMI_ZCS 6
+#define CODING_B8ZS 7
+
+/*
+ * Line Build Out
+ */
+#define LBO_0dB 0
+#define LBO_7dB5 1
+#define LBO_15dB 2
+#define LBO_22dB5 3
+
+/*
+ * Range for long haul t1 > 655ft
+ */
+#define RANGE_0_133_FT 0
+#define RANGE_0_40_M RANGE_0_133_FT
+#define RANGE_133_266_FT 1
+#define RANGE_40_81_M RANGE_133_266_FT
+#define RANGE_266_399_FT 2
+#define RANGE_81_122_M RANGE_266_399_FT
+#define RANGE_399_533_FT 3
+#define RANGE_122_162_M RANGE_399_533_FT
+#define RANGE_533_655_FT 4
+#define RANGE_162_200_M RANGE_533_655_FT
+/*
+ * Receive Equaliser
+ */
+#define EQUALIZER_SHORT 0
+#define EQUALIZER_LONG 1
+
+/*
+ * Loop modes
+ */
+#define LOOP_NONE 0
+#define LOOP_LOCAL 1
+#define LOOP_PAYLOAD_EXC_TS0 2
+#define LOOP_PAYLOAD_INC_TS0 3
+#define LOOP_REMOTE 4
+
+/*
+ * Buffer modes
+ */
+#define BUFFER_2_FRAME 0
+#define BUFFER_1_FRAME 1
+#define BUFFER_96_BIT 2
+#define BUFFER_NONE 3
+
+/* Debug support
+ *
+ * These should only be enabled for development kernels, production code
+ * should define FST_DEBUG=0 in order to exclude the code.
+ * Setting FST_DEBUG=1 will include all the debug code but in a disabled
+ * state, use the FSTSETCONF ioctl to enable specific debug actions, or
+ * FST_DEBUG can be set to prime the debug selection.
+ */
+#define FST_DEBUG 0x0000
+#if FST_DEBUG
+
+extern int fst_debug_mask; /* Bit mask of actions to debug, bits
+ * listed below. Note: Bit 0 is used
+ * to trigger the inclusion of this
+ * code, without enabling any actions.
+ */
+#define DBG_INIT 0x0002 /* Card detection and initialisation */
+#define DBG_OPEN 0x0004 /* Open and close sequences */
+#define DBG_PCI 0x0008 /* PCI config operations */
+#define DBG_IOCTL 0x0010 /* Ioctls and other config */
+#define DBG_INTR 0x0020 /* Interrupt routines (be careful) */
+#define DBG_TX 0x0040 /* Packet transmission */
+#define DBG_RX 0x0080 /* Packet reception */
+#define DBG_CMD 0x0100 /* Port command issuing */
+
+#define DBG_ASS 0xFFFF /* Assert like statements. Code that
+ * should never be reached, if you see
+ * one of these then I've been an ass
+ */
+#endif /* FST_DEBUG */
+
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
new file mode 100644
index 000000000000..3839662ff201
--- /dev/null
+++ b/drivers/net/wan/hd64570.h
@@ -0,0 +1,241 @@
+#ifndef __HD64570_H
+#define __HD64570_H
+
+/* SCA HD64570 register definitions - all addresses for mode 0 (8086 MPU)
+ and 1 (64180 MPU). For modes 2 and 3, XOR the address with 0x01.
+
+ Source: HD64570 SCA User's Manual
+*/
+
+
+
+/* SCA Control Registers */
+#define LPR 0x00 /* Low Power */
+
+/* Wait controller registers */
+#define PABR0 0x02 /* Physical Address Boundary 0 */
+#define PABR1 0x03 /* Physical Address Boundary 1 */
+#define WCRL 0x04 /* Wait Control L */
+#define WCRM 0x05 /* Wait Control M */
+#define WCRH 0x06 /* Wait Control H */
+
+#define PCR 0x08 /* DMA Priority Control */
+#define DMER 0x09 /* DMA Master Enable */
+
+
+/* Interrupt registers */
+#define ISR0 0x10 /* Interrupt Status 0 */
+#define ISR1 0x11 /* Interrupt Status 1 */
+#define ISR2 0x12 /* Interrupt Status 2 */
+
+#define IER0 0x14 /* Interrupt Enable 0 */
+#define IER1 0x15 /* Interrupt Enable 1 */
+#define IER2 0x16 /* Interrupt Enable 2 */
+
+#define ITCR 0x18 /* Interrupt Control */
+#define IVR 0x1A /* Interrupt Vector */
+#define IMVR 0x1C /* Interrupt Modified Vector */
+
+
+
+/* MSCI channel (port) 0 registers - offset 0x20
+ MSCI channel (port) 1 registers - offset 0x40 */
+
+#define MSCI0_OFFSET 0x20
+#define MSCI1_OFFSET 0x40
+
+#define TRBL 0x00 /* TX/RX buffer L */
+#define TRBH 0x01 /* TX/RX buffer H */
+#define ST0 0x02 /* Status 0 */
+#define ST1 0x03 /* Status 1 */
+#define ST2 0x04 /* Status 2 */
+#define ST3 0x05 /* Status 3 */
+#define FST 0x06 /* Frame Status */
+#define IE0 0x08 /* Interrupt Enable 0 */
+#define IE1 0x09 /* Interrupt Enable 1 */
+#define IE2 0x0A /* Interrupt Enable 2 */
+#define FIE 0x0B /* Frame Interrupt Enable */
+#define CMD 0x0C /* Command */
+#define MD0 0x0E /* Mode 0 */
+#define MD1 0x0F /* Mode 1 */
+#define MD2 0x10 /* Mode 2 */
+#define CTL 0x11 /* Control */
+#define SA0 0x12 /* Sync/Address 0 */
+#define SA1 0x13 /* Sync/Address 1 */
+#define IDL 0x14 /* Idle Pattern */
+#define TMC 0x15 /* Time Constant */
+#define RXS 0x16 /* RX Clock Source */
+#define TXS 0x17 /* TX Clock Source */
+#define TRC0 0x18 /* TX Ready Control 0 */
+#define TRC1 0x19 /* TX Ready Control 1 */
+#define RRC 0x1A /* RX Ready Control */
+#define CST0 0x1C /* Current Status 0 */
+#define CST1 0x1D /* Current Status 1 */
+
+
+/* Timer channel 0 (port 0 RX) registers - offset 0x60
+ Timer channel 1 (port 0 TX) registers - offset 0x68
+ Timer channel 2 (port 1 RX) registers - offset 0x70
+ Timer channel 3 (port 1 TX) registers - offset 0x78
+*/
+
+#define TIMER0RX_OFFSET 0x60
+#define TIMER0TX_OFFSET 0x68
+#define TIMER1RX_OFFSET 0x70
+#define TIMER1TX_OFFSET 0x78
+
+#define TCNTL 0x00 /* Up-counter L */
+#define TCNTH 0x01 /* Up-counter H */
+#define TCONRL 0x02 /* Constant L */
+#define TCONRH 0x03 /* Constant H */
+#define TCSR 0x04 /* Control/Status */
+#define TEPR 0x05 /* Expand Prescale */
+
+
+
+/* DMA channel 0 (port 0 RX) registers - offset 0x80
+ DMA channel 1 (port 0 TX) registers - offset 0xA0
+ DMA channel 2 (port 1 RX) registers - offset 0xC0
+ DMA channel 3 (port 1 TX) registers - offset 0xE0
+*/
+
+#define DMAC0RX_OFFSET 0x80
+#define DMAC0TX_OFFSET 0xA0
+#define DMAC1RX_OFFSET 0xC0
+#define DMAC1TX_OFFSET 0xE0
+
+#define BARL 0x00 /* Buffer Address L (chained block) */
+#define BARH 0x01 /* Buffer Address H (chained block) */
+#define BARB 0x02 /* Buffer Address B (chained block) */
+
+#define DARL 0x00 /* RX Destination Addr L (single block) */
+#define DARH 0x01 /* RX Destination Addr H (single block) */
+#define DARB 0x02 /* RX Destination Addr B (single block) */
+
+#define SARL 0x04 /* TX Source Address L (single block) */
+#define SARH 0x05 /* TX Source Address H (single block) */
+#define SARB 0x06 /* TX Source Address B (single block) */
+
+#define CPB 0x06 /* Chain Pointer Base (chained block) */
+
+#define CDAL 0x08 /* Current Descriptor Addr L (chained block) */
+#define CDAH 0x09 /* Current Descriptor Addr H (chained block) */
+#define EDAL 0x0A /* Error Descriptor Addr L (chained block) */
+#define EDAH 0x0B /* Error Descriptor Addr H (chained block) */
+#define BFLL 0x0C /* RX Receive Buffer Length L (chained block)*/
+#define BFLH 0x0D /* RX Receive Buffer Length H (chained block)*/
+#define BCRL 0x0E /* Byte Count L */
+#define BCRH 0x0F /* Byte Count H */
+#define DSR 0x10 /* DMA Status */
+#define DSR_RX(node) (DSR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DSR_TX(node) (DSR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DMR 0x11 /* DMA Mode */
+#define DMR_RX(node) (DMR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DMR_TX(node) (DMR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define FCT 0x13 /* Frame End Interrupt Counter */
+#define FCT_RX(node) (FCT + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define FCT_TX(node) (FCT + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DIR 0x14 /* DMA Interrupt Enable */
+#define DIR_RX(node) (DIR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DIR_TX(node) (DIR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DCR 0x15 /* DMA Command */
+#define DCR_RX(node) (DCR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DCR_TX(node) (DCR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+
+
+
+
+/* Descriptor Structure */
+
+typedef struct {
+ u16 cp; /* Chain Pointer */
+ u32 bp; /* Buffer Pointer (24 bits) */
+ u16 len; /* Data Length */
+ u8 stat; /* Status */
+ u8 unused; /* pads to 2-byte boundary */
+}__attribute__ ((packed)) pkt_desc;
+
+
+/* Packet Descriptor Status bits */
+
+#define ST_TX_EOM 0x80 /* End of frame */
+#define ST_TX_EOT 0x01 /* End of transmition */
+
+#define ST_RX_EOM 0x80 /* End of frame */
+#define ST_RX_SHORT 0x40 /* Short frame */
+#define ST_RX_ABORT 0x20 /* Abort */
+#define ST_RX_RESBIT 0x10 /* Residual bit */
+#define ST_RX_OVERRUN 0x08 /* Overrun */
+#define ST_RX_CRC 0x04 /* CRC */
+
+#define ST_ERROR_MASK 0x7C
+
+#define DIR_EOTE 0x80 /* Transfer completed */
+#define DIR_EOME 0x40 /* Frame Transfer Completed (chained-block) */
+#define DIR_BOFE 0x20 /* Buffer Overflow/Underflow (chained-block)*/
+#define DIR_COFE 0x10 /* Counter Overflow (chained-block) */
+
+
+#define DSR_EOT 0x80 /* Transfer completed */
+#define DSR_EOM 0x40 /* Frame Transfer Completed (chained-block) */
+#define DSR_BOF 0x20 /* Buffer Overflow/Underflow (chained-block)*/
+#define DSR_COF 0x10 /* Counter Overflow (chained-block) */
+#define DSR_DE 0x02 /* DMA Enable */
+#define DSR_DWE 0x01 /* DMA Write Disable */
+
+/* DMA Master Enable Register (DMER) bits */
+#define DMER_DME 0x80 /* DMA Master Enable */
+
+
+#define CMD_RESET 0x21 /* Reset Channel */
+#define CMD_TX_ENABLE 0x02 /* Start transmitter */
+#define CMD_RX_ENABLE 0x12 /* Start receiver */
+
+#define MD0_HDLC 0x80 /* Bit-sync HDLC mode */
+#define MD0_CRC_ENA 0x04 /* Enable CRC code calculation */
+#define MD0_CRC_CCITT 0x02 /* CCITT CRC instead of CRC-16 */
+#define MD0_CRC_PR1 0x01 /* Initial all-ones instead of all-zeros */
+
+#define MD0_CRC_NONE 0x00
+#define MD0_CRC_16_0 0x04
+#define MD0_CRC_16 0x05
+#define MD0_CRC_ITU_0 0x06
+#define MD0_CRC_ITU 0x07
+
+#define MD2_NRZ 0x00
+#define MD2_NRZI 0x20
+#define MD2_MANCHESTER 0x80
+#define MD2_FM_MARK 0xA0
+#define MD2_FM_SPACE 0xC0
+#define MD2_LOOPBACK 0x03 /* Local data Loopback */
+
+#define CTL_NORTS 0x01
+#define CTL_IDLE 0x10 /* Transmit an idle pattern */
+#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmition */
+
+#define ST0_TXRDY 0x02 /* TX ready */
+#define ST0_RXRDY 0x01 /* RX ready */
+
+#define ST1_UDRN 0x80 /* MSCI TX underrun */
+#define ST1_CDCD 0x04 /* DCD level changed */
+
+#define ST3_CTS 0x08 /* modem input - /CTS */
+#define ST3_DCD 0x04 /* modem input - /DCD */
+
+#define IE0_TXINT 0x80 /* TX INT MSCI interrupt enable */
+#define IE0_RXINTA 0x40 /* RX INT A MSCI interrupt enable */
+#define IE1_UDRN 0x80 /* TX underrun MSCI interrupt enable */
+#define IE1_CDCD 0x04 /* DCD level changed */
+
+#define DCR_ABORT 0x01 /* Software abort command */
+#define DCR_CLEAR_EOF 0x02 /* Clear EOF interrupt */
+
+/* TX and RX Clock Source - RXS and TXS */
+#define CLK_BRG_MASK 0x0F
+#define CLK_LINE_RX 0x00 /* TX/RX clock line input */
+#define CLK_LINE_TX 0x00 /* TX/RX line input */
+#define CLK_BRG_RX 0x40 /* internal baud rate generator */
+#define CLK_BRG_TX 0x40 /* internal baud rate generator */
+#define CLK_RXCLK_TX 0x60 /* TX clock from RX clock */
+
+#endif
diff --git a/drivers/net/wan/hd64572.h b/drivers/net/wan/hd64572.h
new file mode 100644
index 000000000000..96567c2dc4db
--- /dev/null
+++ b/drivers/net/wan/hd64572.h
@@ -0,0 +1,527 @@
+/*
+ * hd64572.h Description of the Hitachi HD64572 (SCA-II), valid for
+ * CPU modes 0 & 2.
+ *
+ * Author: Ivan Passos <ivan@cyclades.com>
+ *
+ * Copyright: (c) 2000-2001 Cyclades Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Log: hd64572.h,v $
+ * Revision 3.1 2001/06/15 12:41:10 regina
+ * upping major version number
+ *
+ * Revision 1.1.1.1 2001/06/13 20:24:49 daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 1.0 2000/01/25 ivan
+ * Initial version.
+ *
+ */
+
+#ifndef __HD64572_H
+#define __HD64572_H
+
+/* Illegal Access Register */
+#define ILAR 0x00
+
+/* Wait Controller Registers */
+#define PABR0L 0x20 /* Physical Addr Boundary Register 0 L */
+#define PABR0H 0x21 /* Physical Addr Boundary Register 0 H */
+#define PABR1L 0x22 /* Physical Addr Boundary Register 1 L */
+#define PABR1H 0x23 /* Physical Addr Boundary Register 1 H */
+#define WCRL 0x24 /* Wait Control Register L */
+#define WCRM 0x25 /* Wait Control Register M */
+#define WCRH 0x26 /* Wait Control Register H */
+
+/* Interrupt Registers */
+#define IVR 0x60 /* Interrupt Vector Register */
+#define IMVR 0x64 /* Interrupt Modified Vector Register */
+#define ITCR 0x68 /* Interrupt Control Register */
+#define ISR0 0x6c /* Interrupt Status Register 0 */
+#define ISR1 0x70 /* Interrupt Status Register 1 */
+#define IER0 0x74 /* Interrupt Enable Register 0 */
+#define IER1 0x78 /* Interrupt Enable Register 1 */
+
+/* Register Access Macros (chan is 0 or 1 in _any_ case) */
+#define M_REG(reg, chan) (reg + 0x80*chan) /* MSCI */
+#define DRX_REG(reg, chan) (reg + 0x40*chan) /* DMA Rx */
+#define DTX_REG(reg, chan) (reg + 0x20*(2*chan + 1)) /* DMA Tx */
+#define TRX_REG(reg, chan) (reg + 0x20*chan) /* Timer Rx */
+#define TTX_REG(reg, chan) (reg + 0x10*(2*chan + 1)) /* Timer Tx */
+#define ST_REG(reg, chan) (reg + 0x80*chan) /* Status Cnt */
+#define IR0_DRX(val, chan) ((val)<<(8*(chan))) /* Int DMA Rx */
+#define IR0_DTX(val, chan) ((val)<<(4*(2*chan + 1))) /* Int DMA Tx */
+#define IR0_M(val, chan) ((val)<<(8*(chan))) /* Int MSCI */
+
+/* MSCI Channel Registers */
+#define MSCI0_OFFSET 0x00
+#define MSCI1_OFFSET 0x80
+
+#define MD0 0x138 /* Mode reg 0 */
+#define MD1 0x139 /* Mode reg 1 */
+#define MD2 0x13a /* Mode reg 2 */
+#define MD3 0x13b /* Mode reg 3 */
+#define CTL 0x130 /* Control reg */
+#define RXS 0x13c /* RX clock source */
+#define TXS 0x13d /* TX clock source */
+#define EXS 0x13e /* External clock input selection */
+#define TMCT 0x144 /* Time constant (Tx) */
+#define TMCR 0x145 /* Time constant (Rx) */
+#define CMD 0x128 /* Command reg */
+#define ST0 0x118 /* Status reg 0 */
+#define ST1 0x119 /* Status reg 1 */
+#define ST2 0x11a /* Status reg 2 */
+#define ST3 0x11b /* Status reg 3 */
+#define ST4 0x11c /* Status reg 4 */
+#define FST 0x11d /* frame Status reg */
+#define IE0 0x120 /* Interrupt enable reg 0 */
+#define IE1 0x121 /* Interrupt enable reg 1 */
+#define IE2 0x122 /* Interrupt enable reg 2 */
+#define IE4 0x124 /* Interrupt enable reg 4 */
+#define FIE 0x125 /* Frame Interrupt enable reg */
+#define SA0 0x140 /* Syn Address reg 0 */
+#define SA1 0x141 /* Syn Address reg 1 */
+#define IDL 0x142 /* Idle register */
+#define TRBL 0x100 /* TX/RX buffer reg L */
+#define TRBK 0x101 /* TX/RX buffer reg K */
+#define TRBJ 0x102 /* TX/RX buffer reg J */
+#define TRBH 0x103 /* TX/RX buffer reg H */
+#define TRC0 0x148 /* TX Ready control reg 0 */
+#define TRC1 0x149 /* TX Ready control reg 1 */
+#define RRC 0x14a /* RX Ready control reg */
+#define CST0 0x108 /* Current Status Register 0 */
+#define CST1 0x109 /* Current Status Register 1 */
+#define CST2 0x10a /* Current Status Register 2 */
+#define CST3 0x10b /* Current Status Register 3 */
+#define GPO 0x131 /* General Purpose Output Pin Ctl Reg */
+#define TFS 0x14b /* Tx Start Threshold Ctl Reg */
+#define TFN 0x143 /* Inter-transmit-frame Time Fill Ctl Reg */
+#define TBN 0x110 /* Tx Buffer Number Reg */
+#define RBN 0x111 /* Rx Buffer Number Reg */
+#define TNR0 0x150 /* Tx DMA Request Ctl Reg 0 */
+#define TNR1 0x151 /* Tx DMA Request Ctl Reg 1 */
+#define TCR 0x152 /* Tx DMA Critical Request Reg */
+#define RNR 0x154 /* Rx DMA Request Ctl Reg */
+#define RCR 0x156 /* Rx DMA Critical Request Reg */
+
+/* Timer Registers */
+#define TIMER0RX_OFFSET 0x00
+#define TIMER0TX_OFFSET 0x10
+#define TIMER1RX_OFFSET 0x20
+#define TIMER1TX_OFFSET 0x30
+
+#define TCNTL 0x200 /* Timer Upcounter L */
+#define TCNTH 0x201 /* Timer Upcounter H */
+#define TCONRL 0x204 /* Timer Constant Register L */
+#define TCONRH 0x205 /* Timer Constant Register H */
+#define TCSR 0x206 /* Timer Control/Status Register */
+#define TEPR 0x207 /* Timer Expand Prescale Register */
+
+/* DMA registers */
+#define PCR 0x40 /* DMA priority control reg */
+#define DRR 0x44 /* DMA reset reg */
+#define DMER 0x07 /* DMA Master Enable reg */
+#define BTCR 0x08 /* Burst Tx Ctl Reg */
+#define BOLR 0x0c /* Back-off Length Reg */
+#define DSR_RX(chan) (0x48 + 2*chan) /* DMA Status Reg (Rx) */
+#define DSR_TX(chan) (0x49 + 2*chan) /* DMA Status Reg (Tx) */
+#define DIR_RX(chan) (0x4c + 2*chan) /* DMA Interrupt Enable Reg (Rx) */
+#define DIR_TX(chan) (0x4d + 2*chan) /* DMA Interrupt Enable Reg (Tx) */
+#define FCT_RX(chan) (0x50 + 2*chan) /* Frame End Interrupt Counter (Rx) */
+#define FCT_TX(chan) (0x51 + 2*chan) /* Frame End Interrupt Counter (Tx) */
+#define DMR_RX(chan) (0x54 + 2*chan) /* DMA Mode Reg (Rx) */
+#define DMR_TX(chan) (0x55 + 2*chan) /* DMA Mode Reg (Tx) */
+#define DCR_RX(chan) (0x58 + 2*chan) /* DMA Command Reg (Rx) */
+#define DCR_TX(chan) (0x59 + 2*chan) /* DMA Command Reg (Tx) */
+
+/* DMA Channel Registers */
+#define DMAC0RX_OFFSET 0x00
+#define DMAC0TX_OFFSET 0x20
+#define DMAC1RX_OFFSET 0x40
+#define DMAC1TX_OFFSET 0x60
+
+#define DARL 0x80 /* Dest Addr Register L (single-block, RX only) */
+#define DARH 0x81 /* Dest Addr Register H (single-block, RX only) */
+#define DARB 0x82 /* Dest Addr Register B (single-block, RX only) */
+#define DARBH 0x83 /* Dest Addr Register BH (single-block, RX only) */
+#define SARL 0x80 /* Source Addr Register L (single-block, TX only) */
+#define SARH 0x81 /* Source Addr Register H (single-block, TX only) */
+#define SARB 0x82 /* Source Addr Register B (single-block, TX only) */
+#define DARBH 0x83 /* Source Addr Register BH (single-block, TX only) */
+#define BARL 0x80 /* Buffer Addr Register L (chained-block) */
+#define BARH 0x81 /* Buffer Addr Register H (chained-block) */
+#define BARB 0x82 /* Buffer Addr Register B (chained-block) */
+#define BARBH 0x83 /* Buffer Addr Register BH (chained-block) */
+#define CDAL 0x84 /* Current Descriptor Addr Register L */
+#define CDAH 0x85 /* Current Descriptor Addr Register H */
+#define CDAB 0x86 /* Current Descriptor Addr Register B */
+#define CDABH 0x87 /* Current Descriptor Addr Register BH */
+#define EDAL 0x88 /* Error Descriptor Addr Register L */
+#define EDAH 0x89 /* Error Descriptor Addr Register H */
+#define EDAB 0x8a /* Error Descriptor Addr Register B */
+#define EDABH 0x8b /* Error Descriptor Addr Register BH */
+#define BFLL 0x90 /* RX Buffer Length L (only RX) */
+#define BFLH 0x91 /* RX Buffer Length H (only RX) */
+#define BCRL 0x8c /* Byte Count Register L */
+#define BCRH 0x8d /* Byte Count Register H */
+
+/* Block Descriptor Structure */
+typedef struct {
+ unsigned long next; /* pointer to next block descriptor */
+ unsigned long ptbuf; /* buffer pointer */
+ unsigned short len; /* data length */
+ unsigned char status; /* status */
+ unsigned char filler[5]; /* alignment filler (16 bytes) */
+} pcsca_bd_t;
+
+/* Block Descriptor Structure */
+typedef struct {
+ u32 cp; /* pointer to next block descriptor */
+ u32 bp; /* buffer pointer */
+ u16 len; /* data length */
+ u8 stat; /* status */
+ u8 unused; /* pads to 4-byte boundary */
+}pkt_desc;
+
+
+/*
+ Descriptor Status definitions:
+
+ Bit Transmission Reception
+
+ 7 EOM EOM
+ 6 - Short Frame
+ 5 - Abort
+ 4 - Residual bit
+ 3 Underrun Overrun
+ 2 - CRC
+ 1 Ownership Ownership
+ 0 EOT -
+*/
+#define DST_EOT 0x01 /* End of transmit command */
+#define DST_OSB 0x02 /* Ownership bit */
+#define DST_CRC 0x04 /* CRC Error */
+#define DST_OVR 0x08 /* Overrun */
+#define DST_UDR 0x08 /* Underrun */
+#define DST_RBIT 0x10 /* Residual bit */
+#define DST_ABT 0x20 /* Abort */
+#define DST_SHRT 0x40 /* Short Frame */
+#define DST_EOM 0x80 /* End of Message */
+
+/* Packet Descriptor Status bits */
+
+#define ST_TX_EOM 0x80 /* End of frame */
+#define ST_TX_UNDRRUN 0x08
+#define ST_TX_OWNRSHP 0x02
+#define ST_TX_EOT 0x01 /* End of transmition */
+
+#define ST_RX_EOM 0x80 /* End of frame */
+#define ST_RX_SHORT 0x40 /* Short frame */
+#define ST_RX_ABORT 0x20 /* Abort */
+#define ST_RX_RESBIT 0x10 /* Residual bit */
+#define ST_RX_OVERRUN 0x08 /* Overrun */
+#define ST_RX_CRC 0x04 /* CRC */
+#define ST_RX_OWNRSHP 0x02
+
+#define ST_ERROR_MASK 0x7C
+
+/* Status Counter Registers */
+#define CMCR 0x158 /* Counter Master Ctl Reg */
+#define TECNTL 0x160 /* Tx EOM Counter L */
+#define TECNTM 0x161 /* Tx EOM Counter M */
+#define TECNTH 0x162 /* Tx EOM Counter H */
+#define TECCR 0x163 /* Tx EOM Counter Ctl Reg */
+#define URCNTL 0x164 /* Underrun Counter L */
+#define URCNTH 0x165 /* Underrun Counter H */
+#define URCCR 0x167 /* Underrun Counter Ctl Reg */
+#define RECNTL 0x168 /* Rx EOM Counter L */
+#define RECNTM 0x169 /* Rx EOM Counter M */
+#define RECNTH 0x16a /* Rx EOM Counter H */
+#define RECCR 0x16b /* Rx EOM Counter Ctl Reg */
+#define ORCNTL 0x16c /* Overrun Counter L */
+#define ORCNTH 0x16d /* Overrun Counter H */
+#define ORCCR 0x16f /* Overrun Counter Ctl Reg */
+#define CECNTL 0x170 /* CRC Counter L */
+#define CECNTH 0x171 /* CRC Counter H */
+#define CECCR 0x173 /* CRC Counter Ctl Reg */
+#define ABCNTL 0x174 /* Abort frame Counter L */
+#define ABCNTH 0x175 /* Abort frame Counter H */
+#define ABCCR 0x177 /* Abort frame Counter Ctl Reg */
+#define SHCNTL 0x178 /* Short frame Counter L */
+#define SHCNTH 0x179 /* Short frame Counter H */
+#define SHCCR 0x17b /* Short frame Counter Ctl Reg */
+#define RSCNTL 0x17c /* Residual bit Counter L */
+#define RSCNTH 0x17d /* Residual bit Counter H */
+#define RSCCR 0x17f /* Residual bit Counter Ctl Reg */
+
+/* Register Programming Constants */
+
+#define IR0_DMIC 0x00000001
+#define IR0_DMIB 0x00000002
+#define IR0_DMIA 0x00000004
+#define IR0_EFT 0x00000008
+#define IR0_DMAREQ 0x00010000
+#define IR0_TXINT 0x00020000
+#define IR0_RXINTB 0x00040000
+#define IR0_RXINTA 0x00080000
+#define IR0_TXRDY 0x00100000
+#define IR0_RXRDY 0x00200000
+
+#define MD0_CRC16_0 0x00
+#define MD0_CRC16_1 0x01
+#define MD0_CRC32 0x02
+#define MD0_CRC_CCITT 0x03
+#define MD0_CRCC0 0x04
+#define MD0_CRCC1 0x08
+#define MD0_AUTO_ENA 0x10
+#define MD0_ASYNC 0x00
+#define MD0_BY_MSYNC 0x20
+#define MD0_BY_BISYNC 0x40
+#define MD0_BY_EXT 0x60
+#define MD0_BIT_SYNC 0x80
+#define MD0_TRANSP 0xc0
+
+#define MD0_HDLC 0x80 /* Bit-sync HDLC mode */
+
+#define MD0_CRC_NONE 0x00
+#define MD0_CRC_16_0 0x04
+#define MD0_CRC_16 0x05
+#define MD0_CRC_ITU32 0x06
+#define MD0_CRC_ITU 0x07
+
+#define MD1_NOADDR 0x00
+#define MD1_SADDR1 0x40
+#define MD1_SADDR2 0x80
+#define MD1_DADDR 0xc0
+
+#define MD2_NRZI_IEEE 0x40
+#define MD2_MANCHESTER 0x80
+#define MD2_FM_MARK 0xA0
+#define MD2_FM_SPACE 0xC0
+#define MD2_LOOPBACK 0x03 /* Local data Loopback */
+
+#define MD2_F_DUPLEX 0x00
+#define MD2_AUTO_ECHO 0x01
+#define MD2_LOOP_HI_Z 0x02
+#define MD2_LOOP_MIR 0x03
+#define MD2_ADPLL_X8 0x00
+#define MD2_ADPLL_X16 0x08
+#define MD2_ADPLL_X32 0x10
+#define MD2_NRZ 0x00
+#define MD2_NRZI 0x20
+#define MD2_NRZ_IEEE 0x40
+#define MD2_MANCH 0x00
+#define MD2_FM1 0x20
+#define MD2_FM0 0x40
+#define MD2_FM 0x80
+
+#define CTL_RTS 0x01
+#define CTL_DTR 0x02
+#define CTL_SYN 0x04
+#define CTL_IDLC 0x10
+#define CTL_UDRNC 0x20
+#define CTL_URSKP 0x40
+#define CTL_URCT 0x80
+
+#define CTL_NORTS 0x01
+#define CTL_NODTR 0x02
+#define CTL_IDLE 0x10
+
+#define RXS_BR0 0x01
+#define RXS_BR1 0x02
+#define RXS_BR2 0x04
+#define RXS_BR3 0x08
+#define RXS_ECLK 0x00
+#define RXS_ECLK_NS 0x20
+#define RXS_IBRG 0x40
+#define RXS_PLL1 0x50
+#define RXS_PLL2 0x60
+#define RXS_PLL3 0x70
+#define RXS_DRTXC 0x80
+
+#define TXS_BR0 0x01
+#define TXS_BR1 0x02
+#define TXS_BR2 0x04
+#define TXS_BR3 0x08
+#define TXS_ECLK 0x00
+#define TXS_IBRG 0x40
+#define TXS_RCLK 0x60
+#define TXS_DTRXC 0x80
+
+#define EXS_RES0 0x01
+#define EXS_RES1 0x02
+#define EXS_RES2 0x04
+#define EXS_TES0 0x10
+#define EXS_TES1 0x20
+#define EXS_TES2 0x40
+
+#define CLK_BRG_MASK 0x0F
+#define CLK_PIN_OUT 0x80
+#define CLK_LINE 0x00 /* clock line input */
+#define CLK_BRG 0x40 /* internal baud rate generator */
+#define CLK_TX_RXCLK 0x60 /* TX clock from RX clock */
+
+#define CMD_RX_RST 0x11
+#define CMD_RX_ENA 0x12
+#define CMD_RX_DIS 0x13
+#define CMD_RX_CRC_INIT 0x14
+#define CMD_RX_MSG_REJ 0x15
+#define CMD_RX_MP_SRCH 0x16
+#define CMD_RX_CRC_EXC 0x17
+#define CMD_RX_CRC_FRC 0x18
+#define CMD_TX_RST 0x01
+#define CMD_TX_ENA 0x02
+#define CMD_TX_DISA 0x03
+#define CMD_TX_CRC_INIT 0x04
+#define CMD_TX_CRC_EXC 0x05
+#define CMD_TX_EOM 0x06
+#define CMD_TX_ABORT 0x07
+#define CMD_TX_MP_ON 0x08
+#define CMD_TX_BUF_CLR 0x09
+#define CMD_TX_DISB 0x0b
+#define CMD_CH_RST 0x21
+#define CMD_SRCH_MODE 0x31
+#define CMD_NOP 0x00
+
+#define CMD_RESET 0x21
+#define CMD_TX_ENABLE 0x02
+#define CMD_RX_ENABLE 0x12
+
+#define ST0_RXRDY 0x01
+#define ST0_TXRDY 0x02
+#define ST0_RXINTB 0x20
+#define ST0_RXINTA 0x40
+#define ST0_TXINT 0x80
+
+#define ST1_IDLE 0x01
+#define ST1_ABORT 0x02
+#define ST1_CDCD 0x04
+#define ST1_CCTS 0x08
+#define ST1_SYN_FLAG 0x10
+#define ST1_CLMD 0x20
+#define ST1_TXIDLE 0x40
+#define ST1_UDRN 0x80
+
+#define ST2_CRCE 0x04
+#define ST2_ONRN 0x08
+#define ST2_RBIT 0x10
+#define ST2_ABORT 0x20
+#define ST2_SHORT 0x40
+#define ST2_EOM 0x80
+
+#define ST3_RX_ENA 0x01
+#define ST3_TX_ENA 0x02
+#define ST3_DCD 0x04
+#define ST3_CTS 0x08
+#define ST3_SRCH_MODE 0x10
+#define ST3_SLOOP 0x20
+#define ST3_GPI 0x80
+
+#define ST4_RDNR 0x01
+#define ST4_RDCR 0x02
+#define ST4_TDNR 0x04
+#define ST4_TDCR 0x08
+#define ST4_OCLM 0x20
+#define ST4_CFT 0x40
+#define ST4_CGPI 0x80
+
+#define FST_CRCEF 0x04
+#define FST_OVRNF 0x08
+#define FST_RBIF 0x10
+#define FST_ABTF 0x20
+#define FST_SHRTF 0x40
+#define FST_EOMF 0x80
+
+#define IE0_RXRDY 0x01
+#define IE0_TXRDY 0x02
+#define IE0_RXINTB 0x20
+#define IE0_RXINTA 0x40
+#define IE0_TXINT 0x80
+#define IE0_UDRN 0x00008000 /* TX underrun MSCI interrupt enable */
+#define IE0_CDCD 0x00000400 /* CD level change interrupt enable */
+
+#define IE1_IDLD 0x01
+#define IE1_ABTD 0x02
+#define IE1_CDCD 0x04
+#define IE1_CCTS 0x08
+#define IE1_SYNCD 0x10
+#define IE1_CLMD 0x20
+#define IE1_IDL 0x40
+#define IE1_UDRN 0x80
+
+#define IE2_CRCE 0x04
+#define IE2_OVRN 0x08
+#define IE2_RBIT 0x10
+#define IE2_ABT 0x20
+#define IE2_SHRT 0x40
+#define IE2_EOM 0x80
+
+#define IE4_RDNR 0x01
+#define IE4_RDCR 0x02
+#define IE4_TDNR 0x04
+#define IE4_TDCR 0x08
+#define IE4_OCLM 0x20
+#define IE4_CFT 0x40
+#define IE4_CGPI 0x80
+
+#define FIE_CRCEF 0x04
+#define FIE_OVRNF 0x08
+#define FIE_RBIF 0x10
+#define FIE_ABTF 0x20
+#define FIE_SHRTF 0x40
+#define FIE_EOMF 0x80
+
+#define DSR_DWE 0x01
+#define DSR_DE 0x02
+#define DSR_REF 0x04
+#define DSR_UDRF 0x04
+#define DSR_COA 0x08
+#define DSR_COF 0x10
+#define DSR_BOF 0x20
+#define DSR_EOM 0x40
+#define DSR_EOT 0x80
+
+#define DIR_REF 0x04
+#define DIR_UDRF 0x04
+#define DIR_COA 0x08
+#define DIR_COF 0x10
+#define DIR_BOF 0x20
+#define DIR_EOM 0x40
+#define DIR_EOT 0x80
+
+#define DIR_REFE 0x04
+#define DIR_UDRFE 0x04
+#define DIR_COAE 0x08
+#define DIR_COFE 0x10
+#define DIR_BOFE 0x20
+#define DIR_EOME 0x40
+#define DIR_EOTE 0x80
+
+#define DMR_CNTE 0x02
+#define DMR_NF 0x04
+#define DMR_SEOME 0x08
+#define DMR_TMOD 0x10
+
+#define DMER_DME 0x80 /* DMA Master Enable */
+
+#define DCR_SW_ABT 0x01
+#define DCR_FCT_CLR 0x02
+
+#define DCR_ABORT 0x01
+#define DCR_CLEAR_EOF 0x02
+
+#define PCR_COTE 0x80
+#define PCR_PR0 0x01
+#define PCR_PR1 0x02
+#define PCR_PR2 0x04
+#define PCR_CCC 0x08
+#define PCR_BRC 0x10
+#define PCR_OSB 0x40
+#define PCR_BURST 0x80
+
+#endif /* (__HD64572_H) */
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd6457x.c
new file mode 100644
index 000000000000..d3743321a977
--- /dev/null
+++ b/drivers/net/wan/hd6457x.c
@@ -0,0 +1,853 @@
+/*
+ * Hitachi SCA HD64570 and HD64572 common driver for Linux
+ *
+ * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Sources of information:
+ * Hitachi HD64570 SCA User's Manual
+ * Hitachi HD64572 SCA-II User's Manual
+ *
+ * We use the following SCA memory map:
+ *
+ * Packet buffer descriptor rings - starting from winbase or win0base:
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
+ *
+ * Packet data buffers - starting from winbase + buff_offset:
+ * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
+ * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
+ * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
+ * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/hdlc.h>
+
+#if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \
+ (defined (__HD64570_H) && defined (__HD64572_H))
+#error Either hd64570.h or hd64572.h must be included
+#endif
+
+#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
+#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
+#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
+
+#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
+#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
+#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
+
+#ifdef __HD64570_H /* HD64570 */
+#define sca_outa(value, reg, card) sca_outw(value, reg, card)
+#define sca_ina(reg, card) sca_inw(reg, card)
+#define writea(value, ptr) writew(value, ptr)
+
+#else /* HD64572 */
+#define sca_outa(value, reg, card) sca_outl(value, reg, card)
+#define sca_ina(reg, card) sca_inl(reg, card)
+#define writea(value, ptr) writel(value, ptr)
+#endif
+
+static inline struct net_device *port_to_dev(port_t *port)
+{
+ return port->dev;
+}
+
+static inline int sca_intr_status(card_t *card)
+{
+ u8 result = 0;
+
+#ifdef __HD64570_H /* HD64570 */
+ u8 isr0 = sca_in(ISR0, card);
+ u8 isr1 = sca_in(ISR1, card);
+
+ if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
+ if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
+ if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
+ if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
+ if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
+ if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
+
+#else /* HD64572 */
+ u32 isr0 = sca_inl(ISR0, card);
+
+ if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
+ if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
+ if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
+ if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
+ if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
+ if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
+
+#endif /* HD64570 vs HD64572 */
+
+ if (!(result & SCA_INTR_DMAC_TX(0)))
+ if (sca_in(DSR_TX(0), card) & DSR_EOM)
+ result |= SCA_INTR_DMAC_TX(0);
+ if (!(result & SCA_INTR_DMAC_TX(1)))
+ if (sca_in(DSR_TX(1), card) & DSR_EOM)
+ result |= SCA_INTR_DMAC_TX(1);
+
+ return result;
+}
+
+static inline port_t* dev_to_port(struct net_device *dev)
+{
+ return dev_to_hdlc(dev)->priv;
+}
+
+static inline u16 next_desc(port_t *port, u16 desc, int transmit)
+{
+ return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers
+ : port_to_card(port)->rx_ring_buffers);
+}
+
+
+
+static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
+{
+ u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
+ u16 tx_buffs = port_to_card(port)->tx_ring_buffers;
+
+ desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
+ return log_node(port) * (rx_buffs + tx_buffs) +
+ transmit * rx_buffs + desc;
+}
+
+
+
+static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
+{
+ /* Descriptor offset always fits in 16 bytes */
+ return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
+}
+
+
+
+static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, int transmit)
+{
+#ifdef PAGE0_ALWAYS_MAPPED
+ return (pkt_desc __iomem *)(win0base(port_to_card(port))
+ + desc_offset(port, desc, transmit));
+#else
+ return (pkt_desc __iomem *)(winbase(port_to_card(port))
+ + desc_offset(port, desc, transmit));
+#endif
+}
+
+
+
+static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
+{
+ return port_to_card(port)->buff_offset +
+ desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
+}
+
+
+
+static void sca_init_sync_port(port_t *port)
+{
+ card_t *card = port_to_card(port);
+ int transmit, i;
+
+ port->rxin = 0;
+ port->txin = 0;
+ port->txlast = 0;
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ openwin(card, 0);
+#endif
+
+ for (transmit = 0; transmit < 2; transmit++) {
+ u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
+ u16 buffs = transmit ? card->tx_ring_buffers
+ : card->rx_ring_buffers;
+
+ for (i = 0; i < buffs; i++) {
+ pkt_desc __iomem *desc = desc_address(port, i, transmit);
+ u16 chain_off = desc_offset(port, i + 1, transmit);
+ u32 buff_off = buffer_offset(port, i, transmit);
+
+ writea(chain_off, &desc->cp);
+ writel(buff_off, &desc->bp);
+ writew(0, &desc->len);
+ writeb(0, &desc->stat);
+ }
+
+ /* DMA disable - to halt state */
+ sca_out(0, transmit ? DSR_TX(phy_node(port)) :
+ DSR_RX(phy_node(port)), card);
+ /* software ABORT - to initial state */
+ sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
+ DCR_RX(phy_node(port)), card);
+
+#ifdef __HD64570_H
+ sca_out(0, dmac + CPB, card); /* pointer base */
+#endif
+ /* current desc addr */
+ sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card);
+ if (!transmit)
+ sca_outa(desc_offset(port, buffs - 1, transmit),
+ dmac + EDAL, card);
+ else
+ sca_outa(desc_offset(port, 0, transmit), dmac + EDAL,
+ card);
+
+ /* clear frame end interrupt counter */
+ sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
+ DCR_RX(phy_node(port)), card);
+
+ if (!transmit) { /* Receive */
+ /* set buffer length */
+ sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
+ /* Chain mode, Multi-frame */
+ sca_out(0x14, DMR_RX(phy_node(port)), card);
+ sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
+ card);
+ /* DMA enable */
+ sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
+ } else { /* Transmit */
+ /* Chain mode, Multi-frame */
+ sca_out(0x14, DMR_TX(phy_node(port)), card);
+ /* enable underflow interrupts */
+ sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
+ }
+ }
+
+ hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD),
+ port_to_dev(port));
+}
+
+
+
+#ifdef NEED_SCA_MSCI_INTR
+/* MSCI interrupt service */
+static inline void sca_msci_intr(port_t *port)
+{
+ u16 msci = get_msci(port);
+ card_t* card = port_to_card(port);
+ u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
+
+ /* Reset MSCI TX underrun and CDCD status bit */
+ sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
+
+ if (stat & ST1_UDRN) {
+ struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
+ stats->tx_errors++; /* TX Underrun error detected */
+ stats->tx_fifo_errors++;
+ }
+
+ if (stat & ST1_CDCD)
+ hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD),
+ port_to_dev(port));
+}
+#endif
+
+
+
+static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin)
+{
+ struct net_device *dev = port_to_dev(port);
+ struct net_device_stats *stats = hdlc_stats(dev);
+ struct sk_buff *skb;
+ u16 len;
+ u32 buff;
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ u32 maxlen;
+ u8 page;
+#endif
+
+ len = readw(&desc->len);
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ buff = buffer_offset(port, rxin, 0);
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ page = buff / winsize(card);
+ buff = buff % winsize(card);
+ maxlen = winsize(card) - buff;
+
+ openwin(card, page);
+
+ if (len > maxlen) {
+ memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
+ openwin(card, page + 1);
+ memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
+ } else
+#endif
+ memcpy_fromio(skb->data, winbase(card) + buff, len);
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ /* select pkt_desc table page back */
+ openwin(card, 0);
+#endif
+ skb_put(skb, len);
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
+ debug_frame(skb);
+#endif
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ dev->last_rx = jiffies;
+ skb->protocol = hdlc_type_trans(skb, dev);
+ netif_rx(skb);
+}
+
+
+
+/* Receive DMA interrupt service */
+static inline void sca_rx_intr(port_t *port)
+{
+ u16 dmac = get_dmac_rx(port);
+ card_t *card = port_to_card(port);
+ u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
+ struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
+
+ /* Reset DSR status bits */
+ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+ DSR_RX(phy_node(port)), card);
+
+ if (stat & DSR_BOF)
+ stats->rx_over_errors++; /* Dropped one or more frames */
+
+ while (1) {
+ u32 desc_off = desc_offset(port, port->rxin, 0);
+ pkt_desc __iomem *desc;
+ u32 cda = sca_ina(dmac + CDAL, card);
+
+ if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+ break; /* No frame received */
+
+ desc = desc_address(port, port->rxin, 0);
+ stat = readb(&desc->stat);
+ if (!(stat & ST_RX_EOM))
+ port->rxpart = 1; /* partial frame received */
+ else if ((stat & ST_ERROR_MASK) || port->rxpart) {
+ stats->rx_errors++;
+ if (stat & ST_RX_OVERRUN) stats->rx_fifo_errors++;
+ else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
+ ST_RX_RESBIT)) || port->rxpart)
+ stats->rx_frame_errors++;
+ else if (stat & ST_RX_CRC) stats->rx_crc_errors++;
+ if (stat & ST_RX_EOM)
+ port->rxpart = 0; /* received last fragment */
+ } else
+ sca_rx(card, port, desc, port->rxin);
+
+ /* Set new error descriptor address */
+ sca_outa(desc_off, dmac + EDAL, card);
+ port->rxin = next_desc(port, port->rxin, 0);
+ }
+
+ /* make sure RX DMA is enabled */
+ sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
+}
+
+
+
+/* Transmit DMA interrupt service */
+static inline void sca_tx_intr(port_t *port)
+{
+ struct net_device *dev = port_to_dev(port);
+ struct net_device_stats *stats = hdlc_stats(dev);
+ u16 dmac = get_dmac_tx(port);
+ card_t* card = port_to_card(port);
+ u8 stat;
+
+ spin_lock(&port->lock);
+
+ stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
+
+ /* Reset DSR status bits */
+ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+ DSR_TX(phy_node(port)), card);
+
+ while (1) {
+ pkt_desc __iomem *desc;
+
+ u32 desc_off = desc_offset(port, port->txlast, 1);
+ u32 cda = sca_ina(dmac + CDAL, card);
+ if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+ break; /* Transmitter is/will_be sending this frame */
+
+ desc = desc_address(port, port->txlast, 1);
+ stats->tx_packets++;
+ stats->tx_bytes += readw(&desc->len);
+ writeb(0, &desc->stat); /* Free descriptor */
+ port->txlast = next_desc(port, port->txlast, 1);
+ }
+
+ netif_wake_queue(dev);
+ spin_unlock(&port->lock);
+}
+
+
+
+static irqreturn_t sca_intr(int irq, void* dev_id, struct pt_regs *regs)
+{
+ card_t *card = dev_id;
+ int i;
+ u8 stat;
+ int handled = 0;
+
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ u8 page = sca_get_page(card);
+#endif
+
+ while((stat = sca_intr_status(card)) != 0) {
+ handled = 1;
+ for (i = 0; i < 2; i++) {
+ port_t *port = get_port(card, i);
+ if (port) {
+ if (stat & SCA_INTR_MSCI(i))
+ sca_msci_intr(port);
+
+ if (stat & SCA_INTR_DMAC_RX(i))
+ sca_rx_intr(port);
+
+ if (stat & SCA_INTR_DMAC_TX(i))
+ sca_tx_intr(port);
+ }
+ }
+ }
+
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ openwin(card, page); /* Restore original page */
+#endif
+ return IRQ_RETVAL(handled);
+}
+
+
+
+static void sca_set_port(port_t *port)
+{
+ card_t* card = port_to_card(port);
+ u16 msci = get_msci(port);
+ u8 md2 = sca_in(msci + MD2, card);
+ unsigned int tmc, br = 10, brv = 1024;
+
+
+ if (port->settings.clock_rate > 0) {
+ /* Try lower br for better accuracy*/
+ do {
+ br--;
+ brv >>= 1; /* brv = 2^9 = 512 max in specs */
+
+ /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
+ tmc = CLOCK_BASE / brv / port->settings.clock_rate;
+ }while (br > 1 && tmc <= 128);
+
+ if (tmc < 1) {
+ tmc = 1;
+ br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
+ brv = 1;
+ } else if (tmc > 255)
+ tmc = 256; /* tmc=0 means 256 - low baud rates */
+
+ port->settings.clock_rate = CLOCK_BASE / brv / tmc;
+ } else {
+ br = 9; /* Minimum clock rate */
+ tmc = 256; /* 8bit = 0 */
+ port->settings.clock_rate = CLOCK_BASE / (256 * 512);
+ }
+
+ port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
+ port->txs = (port->txs & ~CLK_BRG_MASK) | br;
+ port->tmc = tmc;
+
+ /* baud divisor - time constant*/
+#ifdef __HD64570_H
+ sca_out(port->tmc, msci + TMC, card);
+#else
+ sca_out(port->tmc, msci + TMCR, card);
+ sca_out(port->tmc, msci + TMCT, card);
+#endif
+
+ /* Set BRG bits */
+ sca_out(port->rxs, msci + RXS, card);
+ sca_out(port->txs, msci + TXS, card);
+
+ if (port->settings.loopback)
+ md2 |= MD2_LOOPBACK;
+ else
+ md2 &= ~MD2_LOOPBACK;
+
+ sca_out(md2, msci + MD2, card);
+
+}
+
+
+
+static void sca_open(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ card_t* card = port_to_card(port);
+ u16 msci = get_msci(port);
+ u8 md0, md2;
+
+ switch(port->encoding) {
+ case ENCODING_NRZ: md2 = MD2_NRZ; break;
+ case ENCODING_NRZI: md2 = MD2_NRZI; break;
+ case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
+ case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
+ default: md2 = MD2_MANCHESTER;
+ }
+
+ if (port->settings.loopback)
+ md2 |= MD2_LOOPBACK;
+
+ switch(port->parity) {
+ case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
+ case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
+#ifdef __HD64570_H
+ case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
+#else
+ case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
+#endif
+ case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
+ default: md0 = MD0_HDLC | MD0_CRC_NONE;
+ }
+
+ sca_out(CMD_RESET, msci + CMD, card);
+ sca_out(md0, msci + MD0, card);
+ sca_out(0x00, msci + MD1, card); /* no address field check */
+ sca_out(md2, msci + MD2, card);
+ sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
+#ifdef __HD64570_H
+ sca_out(CTL_IDLE, msci + CTL, card);
+#else
+ /* Skip the rest of underrun frame */
+ sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
+#endif
+
+#ifdef __HD64570_H
+ /* Allow at least 8 bytes before requesting RX DMA operation */
+ /* TX with higher priority and possibly with shorter transfers */
+ sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
+ sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
+ sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
+#else
+ sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
+ sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
+ sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
+ sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
+ sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
+#endif
+
+/* We're using the following interrupts:
+ - TXINT (DMAC completed all transmisions, underrun or DCD change)
+ - all DMA interrupts
+*/
+
+ hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), dev);
+
+#ifdef __HD64570_H
+ /* MSCI TX INT and RX INT A IRQ enable */
+ sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
+ sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
+ sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C),
+ IER0, card); /* TXINT and RXINT */
+ /* enable DMA IRQ */
+ sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
+ IER1, card);
+#else
+ /* MSCI TXINT and RXINTA interrupt enable */
+ sca_outl(IE0_TXINT | IE0_RXINTA | IE0_UDRN | IE0_CDCD, msci + IE0,
+ card);
+ /* DMA & MSCI IRQ enable */
+ sca_outl(sca_inl(IER0, card) |
+ (phy_node(port) ? 0x0A006600 : 0x000A0066), IER0, card);
+#endif
+
+#ifdef __HD64570_H
+ sca_out(port->tmc, msci + TMC, card); /* Restore registers */
+#else
+ sca_out(port->tmc, msci + TMCR, card);
+ sca_out(port->tmc, msci + TMCT, card);
+#endif
+ sca_out(port->rxs, msci + RXS, card);
+ sca_out(port->txs, msci + TXS, card);
+ sca_out(CMD_TX_ENABLE, msci + CMD, card);
+ sca_out(CMD_RX_ENABLE, msci + CMD, card);
+
+ netif_start_queue(dev);
+}
+
+
+
+static void sca_close(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ card_t* card = port_to_card(port);
+
+ /* reset channel */
+ sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
+#ifdef __HD64570_H
+ /* disable MSCI interrupts */
+ sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
+ IER0, card);
+ /* disable DMA interrupts */
+ sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
+ IER1, card);
+#else
+ /* disable DMA & MSCI IRQ */
+ sca_outl(sca_inl(IER0, card) &
+ (phy_node(port) ? 0x00FF00FF : 0xFF00FF00), IER0, card);
+#endif
+ netif_stop_queue(dev);
+}
+
+
+
+static int sca_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI &&
+ encoding != ENCODING_FM_MARK &&
+ encoding != ENCODING_FM_SPACE &&
+ encoding != ENCODING_MANCHESTER)
+ return -EINVAL;
+
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC16_PR0 &&
+ parity != PARITY_CRC16_PR1 &&
+#ifdef __HD64570_H
+ parity != PARITY_CRC16_PR0_CCITT &&
+#else
+ parity != PARITY_CRC32_PR1_CCITT &&
+#endif
+ parity != PARITY_CRC16_PR1_CCITT)
+ return -EINVAL;
+
+ dev_to_port(dev)->encoding = encoding;
+ dev_to_port(dev)->parity = parity;
+ return 0;
+}
+
+
+
+#ifdef DEBUG_RINGS
+static void sca_dump_rings(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ card_t *card = port_to_card(port);
+ u16 cnt;
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ u8 page;
+#endif
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ page = sca_get_page(card);
+ openwin(card, 0);
+#endif
+
+ printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
+ sca_ina(get_dmac_rx(port) + CDAL, card),
+ sca_ina(get_dmac_rx(port) + EDAL, card),
+ sca_in(DSR_RX(phy_node(port)), card), port->rxin,
+ sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in");
+ for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
+ printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+
+ printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+ "last=%u %sactive",
+ sca_ina(get_dmac_tx(port) + CDAL, card),
+ sca_ina(get_dmac_tx(port) + EDAL, card),
+ sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
+ sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
+
+ for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
+ printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
+ printk("\n");
+
+ printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, "
+ "ST: %02x %02x %02x %02x"
+#ifdef __HD64572_H
+ " %02x"
+#endif
+ ", FST: %02x CST: %02x %02x\n",
+ sca_in(get_msci(port) + MD0, card),
+ sca_in(get_msci(port) + MD1, card),
+ sca_in(get_msci(port) + MD2, card),
+ sca_in(get_msci(port) + ST0, card),
+ sca_in(get_msci(port) + ST1, card),
+ sca_in(get_msci(port) + ST2, card),
+ sca_in(get_msci(port) + ST3, card),
+#ifdef __HD64572_H
+ sca_in(get_msci(port) + ST4, card),
+#endif
+ sca_in(get_msci(port) + FST, card),
+ sca_in(get_msci(port) + CST0, card),
+ sca_in(get_msci(port) + CST1, card));
+
+#ifdef __HD64572_H
+ printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
+ sca_inl(ISR0, card), sca_inl(ISR1, card));
+#else
+ printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
+ sca_in(ISR1, card), sca_in(ISR2, card));
+#endif
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ openwin(card, page); /* Restore original page */
+#endif
+}
+#endif /* DEBUG_RINGS */
+
+
+
+static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ card_t *card = port_to_card(port);
+ pkt_desc __iomem *desc;
+ u32 buff, len;
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ u8 page;
+ u32 maxlen;
+#endif
+
+ spin_lock_irq(&port->lock);
+
+ desc = desc_address(port, port->txin + 1, 1);
+ if (readb(&desc->stat)) { /* allow 1 packet gap */
+ /* should never happen - previous xmit should stop queue */
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
+#endif
+ netif_stop_queue(dev);
+ spin_unlock_irq(&port->lock);
+ return 1; /* request packet to be queued */
+ }
+
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
+ debug_frame(skb);
+#endif
+
+ desc = desc_address(port, port->txin, 1);
+ buff = buffer_offset(port, port->txin, 1);
+ len = skb->len;
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ page = buff / winsize(card);
+ buff = buff % winsize(card);
+ maxlen = winsize(card) - buff;
+
+ openwin(card, page);
+ if (len > maxlen) {
+ memcpy_toio(winbase(card) + buff, skb->data, maxlen);
+ openwin(card, page + 1);
+ memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
+ }
+ else
+#endif
+ memcpy_toio(winbase(card) + buff, skb->data, len);
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ openwin(card, 0); /* select pkt_desc table page back */
+#endif
+ writew(len, &desc->len);
+ writeb(ST_TX_EOM, &desc->stat);
+ dev->trans_start = jiffies;
+
+ port->txin = next_desc(port, port->txin, 1);
+ sca_outa(desc_offset(port, port->txin, 1),
+ get_dmac_tx(port) + EDAL, card);
+
+ sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
+
+ desc = desc_address(port, port->txin + 1, 1);
+ if (readb(&desc->stat)) /* allow 1 packet gap */
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&port->lock);
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+
+#ifdef NEED_DETECT_RAM
+static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
+{
+ /* Round RAM size to 32 bits, fill from end to start */
+ u32 i = ramsize &= ~3;
+
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ u32 size = winsize(card);
+
+ openwin(card, (i - 4) / size); /* select last window */
+#endif
+ do {
+ i -= 4;
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ if ((i + 4) % size == 0)
+ openwin(card, i / size);
+ writel(i ^ 0x12345678, rambase + i % size);
+#else
+ writel(i ^ 0x12345678, rambase + i);
+#endif
+ }while (i > 0);
+
+ for (i = 0; i < ramsize ; i += 4) {
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ if (i % size == 0)
+ openwin(card, i / size);
+
+ if (readl(rambase + i % size) != (i ^ 0x12345678))
+ break;
+#else
+ if (readl(rambase + i) != (i ^ 0x12345678))
+ break;
+#endif
+ }
+
+ return i;
+}
+#endif /* NEED_DETECT_RAM */
+
+
+
+static void __devinit sca_init(card_t *card, int wait_states)
+{
+ sca_out(wait_states, WCRL, card); /* Wait Control */
+ sca_out(wait_states, WCRM, card);
+ sca_out(wait_states, WCRH, card);
+
+ sca_out(0, DMER, card); /* DMA Master disable */
+ sca_out(0x03, PCR, card); /* DMA priority */
+ sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
+ sca_out(0, DSR_TX(0), card);
+ sca_out(0, DSR_RX(1), card);
+ sca_out(0, DSR_TX(1), card);
+ sca_out(DMER_DME, DMER, card); /* DMA Master enable */
+}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
new file mode 100644
index 000000000000..c1b6896d7007
--- /dev/null
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -0,0 +1,330 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Cisco HDLC support
+ *
+ * Copyright (C) 2000 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+#undef DEBUG_HARD_HEADER
+
+#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
+#define CISCO_UNICAST 0x0F /* Cisco unicast address */
+#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
+#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
+#define CISCO_ADDR_REQ 0 /* Cisco address request */
+#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
+
+
+static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
+ u16 type, void *daddr, void *saddr,
+ unsigned int len)
+{
+ hdlc_header *data;
+#ifdef DEBUG_HARD_HEADER
+ printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
+#endif
+
+ skb_push(skb, sizeof(hdlc_header));
+ data = (hdlc_header*)skb->data;
+ if (type == CISCO_KEEPALIVE)
+ data->address = CISCO_MULTICAST;
+ else
+ data->address = CISCO_UNICAST;
+ data->control = 0;
+ data->protocol = htons(type);
+
+ return sizeof(hdlc_header);
+}
+
+
+
+static void cisco_keepalive_send(struct net_device *dev, u32 type,
+ u32 par1, u32 par2)
+{
+ struct sk_buff *skb;
+ cisco_packet *data;
+
+ skb = dev_alloc_skb(sizeof(hdlc_header) + sizeof(cisco_packet));
+ if (!skb) {
+ printk(KERN_WARNING
+ "%s: Memory squeeze on cisco_keepalive_send()\n",
+ dev->name);
+ return;
+ }
+ skb_reserve(skb, 4);
+ cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
+ data = (cisco_packet*)skb->tail;
+
+ data->type = htonl(type);
+ data->par1 = htonl(par1);
+ data->par2 = htonl(par2);
+ data->rel = 0xFFFF;
+ /* we will need do_div here if 1000 % HZ != 0 */
+ data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
+
+ skb_put(skb, sizeof(cisco_packet));
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb->nh.raw = skb->data;
+
+ dev_queue_xmit(skb);
+}
+
+
+
+static unsigned short cisco_type_trans(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ hdlc_header *data = (hdlc_header*)skb->data;
+
+ if (skb->len < sizeof(hdlc_header))
+ return __constant_htons(ETH_P_HDLC);
+
+ if (data->address != CISCO_MULTICAST &&
+ data->address != CISCO_UNICAST)
+ return __constant_htons(ETH_P_HDLC);
+
+ switch(data->protocol) {
+ case __constant_htons(ETH_P_IP):
+ case __constant_htons(ETH_P_IPX):
+ case __constant_htons(ETH_P_IPV6):
+ skb_pull(skb, sizeof(hdlc_header));
+ return data->protocol;
+ default:
+ return __constant_htons(ETH_P_HDLC);
+ }
+}
+
+
+static int cisco_rx(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ hdlc_header *data = (hdlc_header*)skb->data;
+ cisco_packet *cisco_data;
+ struct in_device *in_dev;
+ u32 addr, mask;
+
+ if (skb->len < sizeof(hdlc_header))
+ goto rx_error;
+
+ if (data->address != CISCO_MULTICAST &&
+ data->address != CISCO_UNICAST)
+ goto rx_error;
+
+ switch(ntohs(data->protocol)) {
+ case CISCO_SYS_INFO:
+ /* Packet is not needed, drop it. */
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+
+ case CISCO_KEEPALIVE:
+ if (skb->len != sizeof(hdlc_header) + CISCO_PACKET_LEN &&
+ skb->len != sizeof(hdlc_header) + CISCO_BIG_PACKET_LEN) {
+ printk(KERN_INFO "%s: Invalid length of Cisco "
+ "control packet (%d bytes)\n",
+ dev->name, skb->len);
+ goto rx_error;
+ }
+
+ cisco_data = (cisco_packet*)(skb->data + sizeof(hdlc_header));
+
+ switch(ntohl (cisco_data->type)) {
+ case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
+ in_dev = dev->ip_ptr;
+ addr = 0;
+ mask = ~0; /* is the mask correct? */
+
+ if (in_dev != NULL) {
+ struct in_ifaddr **ifap = &in_dev->ifa_list;
+
+ while (*ifap != NULL) {
+ if (strcmp(dev->name,
+ (*ifap)->ifa_label) == 0) {
+ addr = (*ifap)->ifa_local;
+ mask = (*ifap)->ifa_mask;
+ break;
+ }
+ ifap = &(*ifap)->ifa_next;
+ }
+
+ cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
+ addr, mask);
+ }
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+
+ case CISCO_ADDR_REPLY:
+ printk(KERN_INFO "%s: Unexpected Cisco IP address "
+ "reply\n", dev->name);
+ goto rx_error;
+
+ case CISCO_KEEPALIVE_REQ:
+ hdlc->state.cisco.rxseq = ntohl(cisco_data->par1);
+ if (hdlc->state.cisco.request_sent &&
+ ntohl(cisco_data->par2)==hdlc->state.cisco.txseq) {
+ hdlc->state.cisco.last_poll = jiffies;
+ if (!hdlc->state.cisco.up) {
+ u32 sec, min, hrs, days;
+ sec = ntohl(cisco_data->time) / 1000;
+ min = sec / 60; sec -= min * 60;
+ hrs = min / 60; min -= hrs * 60;
+ days = hrs / 24; hrs -= days * 24;
+ printk(KERN_INFO "%s: Link up (peer "
+ "uptime %ud%uh%um%us)\n",
+ dev->name, days, hrs,
+ min, sec);
+ netif_carrier_on(dev);
+ hdlc->state.cisco.up = 1;
+ }
+ }
+
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+ } /* switch(keepalive type) */
+ } /* switch(protocol) */
+
+ printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name,
+ data->protocol);
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+
+ rx_error:
+ hdlc->stats.rx_errors++; /* Mark error */
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+}
+
+
+
+static void cisco_timer(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ if (hdlc->state.cisco.up &&
+ time_after(jiffies, hdlc->state.cisco.last_poll +
+ hdlc->state.cisco.settings.timeout * HZ)) {
+ hdlc->state.cisco.up = 0;
+ printk(KERN_INFO "%s: Link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+
+ cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ,
+ ++hdlc->state.cisco.txseq,
+ hdlc->state.cisco.rxseq);
+ hdlc->state.cisco.request_sent = 1;
+ hdlc->state.cisco.timer.expires = jiffies +
+ hdlc->state.cisco.settings.interval * HZ;
+ hdlc->state.cisco.timer.function = cisco_timer;
+ hdlc->state.cisco.timer.data = arg;
+ add_timer(&hdlc->state.cisco.timer);
+}
+
+
+
+static void cisco_start(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ hdlc->state.cisco.up = 0;
+ hdlc->state.cisco.request_sent = 0;
+ hdlc->state.cisco.txseq = hdlc->state.cisco.rxseq = 0;
+
+ init_timer(&hdlc->state.cisco.timer);
+ hdlc->state.cisco.timer.expires = jiffies + HZ; /*First poll after 1s*/
+ hdlc->state.cisco.timer.function = cisco_timer;
+ hdlc->state.cisco.timer.data = (unsigned long)dev;
+ add_timer(&hdlc->state.cisco.timer);
+}
+
+
+
+static void cisco_stop(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ del_timer_sync(&hdlc->state.cisco.timer);
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ hdlc->state.cisco.up = 0;
+ hdlc->state.cisco.request_sent = 0;
+}
+
+
+
+int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
+ const size_t size = sizeof(cisco_proto);
+ cisco_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_CISCO;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(cisco_s, &hdlc->state.cisco.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_CISCO:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, cisco_s, size))
+ return -EFAULT;
+
+ if (new_settings.interval < 1 ||
+ new_settings.timeout < 2)
+ return -EINVAL;
+
+ result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memcpy(&hdlc->state.cisco.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.start = cisco_start;
+ hdlc->proto.stop = cisco_stop;
+ hdlc->proto.netif_rx = cisco_rx;
+ hdlc->proto.type_trans = cisco_type_trans;
+ hdlc->proto.id = IF_PROTO_CISCO;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = cisco_hard_header;
+ dev->hard_header_cache = NULL;
+ dev->type = ARPHRD_CISCO;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->addr_len = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
new file mode 100644
index 000000000000..7f450b51a6cb
--- /dev/null
+++ b/drivers/net/wan/hdlc_fr.c
@@ -0,0 +1,1237 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Frame Relay support
+ *
+ * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+
+ Theory of PVC state
+
+ DCE mode:
+
+ (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
+ 0,x -> 1,1 if "link reliable" when sending FULL STATUS
+ 1,1 -> 1,0 if received FULL STATUS ACK
+
+ (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
+ -> 1 when "PVC up" and (exist,new) = 1,0
+
+ DTE mode:
+ (exist,new,active) = FULL STATUS if "link reliable"
+ = 0, 0, 0 if "link unreliable"
+ No LMI:
+ active = open and "link reliable"
+ exist = new = not used
+
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/random.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/hdlc.h>
+
+#undef DEBUG_PKT
+#undef DEBUG_ECN
+#undef DEBUG_LINK
+
+#define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */
+
+#define PVC_STATE_NEW 0x01
+#define PVC_STATE_ACTIVE 0x02
+#define PVC_STATE_FECN 0x08 /* FECN condition */
+#define PVC_STATE_BECN 0x10 /* BECN condition */
+
+
+#define FR_UI 0x03
+#define FR_PAD 0x00
+
+#define NLPID_IP 0xCC
+#define NLPID_IPV6 0x8E
+#define NLPID_SNAP 0x80
+#define NLPID_PAD 0x00
+#define NLPID_Q933 0x08
+
+
+#define LMI_DLCI 0 /* LMI DLCI */
+#define LMI_PROTO 0x08
+#define LMI_CALLREF 0x00 /* Call Reference */
+#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI lockshift */
+#define LMI_REPTYPE 1 /* report type */
+#define LMI_CCITT_REPTYPE 0x51
+#define LMI_ALIVE 3 /* keep alive */
+#define LMI_CCITT_ALIVE 0x53
+#define LMI_PVCSTAT 7 /* pvc status */
+#define LMI_CCITT_PVCSTAT 0x57
+#define LMI_FULLREP 0 /* full report */
+#define LMI_INTEGRITY 1 /* link integrity report */
+#define LMI_SINGLE 2 /* single pvc report */
+#define LMI_STATUS_ENQUIRY 0x75
+#define LMI_STATUS 0x7D /* reply */
+
+#define LMI_REPT_LEN 1 /* report type element length */
+#define LMI_INTEG_LEN 2 /* link integrity element length */
+
+#define LMI_LENGTH 13 /* standard LMI frame length */
+#define LMI_ANSI_LENGTH 14
+
+
+typedef struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned ea1: 1;
+ unsigned cr: 1;
+ unsigned dlcih: 6;
+
+ unsigned ea2: 1;
+ unsigned de: 1;
+ unsigned becn: 1;
+ unsigned fecn: 1;
+ unsigned dlcil: 4;
+#else
+ unsigned dlcih: 6;
+ unsigned cr: 1;
+ unsigned ea1: 1;
+
+ unsigned dlcil: 4;
+ unsigned fecn: 1;
+ unsigned becn: 1;
+ unsigned de: 1;
+ unsigned ea2: 1;
+#endif
+}__attribute__ ((packed)) fr_hdr;
+
+
+static inline u16 q922_to_dlci(u8 *hdr)
+{
+ return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
+}
+
+
+
+static inline void dlci_to_q922(u8 *hdr, u16 dlci)
+{
+ hdr[0] = (dlci >> 2) & 0xFC;
+ hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
+}
+
+
+
+static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
+{
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) {
+ if (pvc->dlci == dlci)
+ return pvc;
+ if (pvc->dlci > dlci)
+ return NULL; /* the listed is sorted */
+ pvc = pvc->next;
+ }
+
+ return NULL;
+}
+
+
+static inline pvc_device* add_pvc(struct net_device *dev, u16 dlci)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc;
+
+ while (*pvc_p) {
+ if ((*pvc_p)->dlci == dlci)
+ return *pvc_p;
+ if ((*pvc_p)->dlci > dlci)
+ break; /* the list is sorted */
+ pvc_p = &(*pvc_p)->next;
+ }
+
+ pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC);
+ if (!pvc)
+ return NULL;
+
+ memset(pvc, 0, sizeof(pvc_device));
+ pvc->dlci = dlci;
+ pvc->master = dev;
+ pvc->next = *pvc_p; /* Put it in the chain */
+ *pvc_p = pvc;
+ return pvc;
+}
+
+
+static inline int pvc_is_used(pvc_device *pvc)
+{
+ return pvc->main != NULL || pvc->ether != NULL;
+}
+
+
+static inline void pvc_carrier(int on, pvc_device *pvc)
+{
+ if (on) {
+ if (pvc->main)
+ if (!netif_carrier_ok(pvc->main))
+ netif_carrier_on(pvc->main);
+ if (pvc->ether)
+ if (!netif_carrier_ok(pvc->ether))
+ netif_carrier_on(pvc->ether);
+ } else {
+ if (pvc->main)
+ if (netif_carrier_ok(pvc->main))
+ netif_carrier_off(pvc->main);
+ if (pvc->ether)
+ if (netif_carrier_ok(pvc->ether))
+ netif_carrier_off(pvc->ether);
+ }
+}
+
+
+static inline void delete_unused_pvcs(hdlc_device *hdlc)
+{
+ pvc_device **pvc_p = &hdlc->state.fr.first_pvc;
+
+ while (*pvc_p) {
+ if (!pvc_is_used(*pvc_p)) {
+ pvc_device *pvc = *pvc_p;
+ *pvc_p = pvc->next;
+ kfree(pvc);
+ continue;
+ }
+ pvc_p = &(*pvc_p)->next;
+ }
+}
+
+
+static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
+{
+ if (type == ARPHRD_ETHER)
+ return &pvc->ether;
+ else
+ return &pvc->main;
+}
+
+
+static inline u16 status_to_dlci(u8 *status, int *active, int *new)
+{
+ *new = (status[2] & 0x08) ? 1 : 0;
+ *active = (status[2] & 0x02) ? 1 : 0;
+
+ return ((status[0] & 0x3F) << 4) | ((status[1] & 0x78) >> 3);
+}
+
+
+static inline void dlci_to_status(u16 dlci, u8 *status, int active, int new)
+{
+ status[0] = (dlci >> 4) & 0x3F;
+ status[1] = ((dlci << 3) & 0x78) | 0x80;
+ status[2] = 0x80;
+
+ if (new)
+ status[2] |= 0x08;
+ else if (active)
+ status[2] |= 0x02;
+}
+
+
+
+static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
+{
+ u16 head_len;
+ struct sk_buff *skb = *skb_p;
+
+ switch (skb->protocol) {
+ case __constant_ntohs(ETH_P_IP):
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_IP;
+ break;
+
+ case __constant_ntohs(ETH_P_IPV6):
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_IPV6;
+ break;
+
+ case __constant_ntohs(LMI_PROTO):
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = LMI_PROTO;
+ break;
+
+ case __constant_ntohs(ETH_P_802_3):
+ head_len = 10;
+ if (skb_headroom(skb) < head_len) {
+ struct sk_buff *skb2 = skb_realloc_headroom(skb,
+ head_len);
+ if (!skb2)
+ return -ENOBUFS;
+ dev_kfree_skb(skb);
+ skb = *skb_p = skb2;
+ }
+ skb_push(skb, head_len);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ skb->data[5] = FR_PAD;
+ skb->data[6] = 0x80;
+ skb->data[7] = 0xC2;
+ skb->data[8] = 0x00;
+ skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
+ break;
+
+ default:
+ head_len = 10;
+ skb_push(skb, head_len);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ skb->data[5] = FR_PAD;
+ skb->data[6] = FR_PAD;
+ skb->data[7] = FR_PAD;
+ *(u16*)(skb->data + 8) = skb->protocol;
+ }
+
+ dlci_to_q922(skb->data, dlci);
+ skb->data[2] = FR_UI;
+ return 0;
+}
+
+
+
+static int pvc_open(struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+
+ if ((pvc->master->flags & IFF_UP) == 0)
+ return -EIO; /* Master must be UP in order to activate PVC */
+
+ if (pvc->open_count++ == 0) {
+ hdlc_device *hdlc = dev_to_hdlc(pvc->master);
+ if (hdlc->state.fr.settings.lmi == LMI_NONE)
+ pvc->state.active = hdlc->carrier;
+
+ pvc_carrier(pvc->state.active, pvc);
+ hdlc->state.fr.dce_changed = 1;
+ }
+ return 0;
+}
+
+
+
+static int pvc_close(struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+
+ if (--pvc->open_count == 0) {
+ hdlc_device *hdlc = dev_to_hdlc(pvc->master);
+ if (hdlc->state.fr.settings.lmi == LMI_NONE)
+ pvc->state.active = 0;
+
+ if (hdlc->state.fr.settings.dce) {
+ hdlc->state.fr.dce_changed = 1;
+ pvc->state.active = 0;
+ }
+ }
+ return 0;
+}
+
+
+
+int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+ fr_proto_pvc_info info;
+
+ if (ifr->ifr_settings.type == IF_GET_PROTO) {
+ if (dev->type == ARPHRD_ETHER)
+ ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
+ else
+ ifr->ifr_settings.type = IF_PROTO_FR_PVC;
+
+ if (ifr->ifr_settings.size < sizeof(info)) {
+ /* data size wanted */
+ ifr->ifr_settings.size = sizeof(info);
+ return -ENOBUFS;
+ }
+
+ info.dlci = pvc->dlci;
+ memcpy(info.master, pvc->master->name, IFNAMSIZ);
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
+ &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+
+static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+
+
+static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+ struct net_device_stats *stats = pvc_get_stats(dev);
+
+ if (pvc->state.active) {
+ if (dev->type == ARPHRD_ETHER) {
+ int pad = ETH_ZLEN - skb->len;
+ if (pad > 0) { /* Pad the frame with zeros */
+ int len = skb->len;
+ if (skb_tailroom(skb) < pad)
+ if (pskb_expand_head(skb, 0, pad,
+ GFP_ATOMIC)) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ skb_put(skb, pad);
+ memset(skb->data + len, 0, pad);
+ }
+ skb->protocol = __constant_htons(ETH_P_802_3);
+ }
+ if (!fr_hard_header(&skb, pvc->dlci)) {
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ if (pvc->state.fecn) /* TX Congestion counter */
+ stats->tx_compressed++;
+ skb->dev = pvc->master;
+ dev_queue_xmit(skb);
+ return 0;
+ }
+ }
+
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+
+static int pvc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+
+static inline void fr_log_dlci_active(pvc_device *pvc)
+{
+ printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
+ pvc->master->name,
+ pvc->dlci,
+ pvc->main ? pvc->main->name : "",
+ pvc->main && pvc->ether ? " " : "",
+ pvc->ether ? pvc->ether->name : "",
+ pvc->state.new ? " new" : "",
+ !pvc->state.exist ? "deleted" :
+ pvc->state.active ? "active" : "inactive");
+}
+
+
+
+static inline u8 fr_lmi_nextseq(u8 x)
+{
+ x++;
+ return x ? x : 1;
+}
+
+
+
+static void fr_lmi_send(struct net_device *dev, int fullrep)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct sk_buff *skb;
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+ int len = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? LMI_ANSI_LENGTH
+ : LMI_LENGTH;
+ int stat_len = 3;
+ u8 *data;
+ int i = 0;
+
+ if (hdlc->state.fr.settings.dce && fullrep) {
+ len += hdlc->state.fr.dce_pvc_count * (2 + stat_len);
+ if (len > HDLC_MAX_MRU) {
+ printk(KERN_WARNING "%s: Too many PVCs while sending "
+ "LMI full report\n", dev->name);
+ return;
+ }
+ }
+
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
+ dev->name);
+ return;
+ }
+ memset(skb->data, 0, len);
+ skb_reserve(skb, 4);
+ skb->protocol = __constant_htons(LMI_PROTO);
+ fr_hard_header(&skb, LMI_DLCI);
+ data = skb->tail;
+ data[i++] = LMI_CALLREF;
+ data[i++] = hdlc->state.fr.settings.dce
+ ? LMI_STATUS : LMI_STATUS_ENQUIRY;
+ if (hdlc->state.fr.settings.lmi == LMI_ANSI)
+ data[i++] = LMI_ANSI_LOCKSHIFT;
+ data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT)
+ ? LMI_CCITT_REPTYPE : LMI_REPTYPE;
+ data[i++] = LMI_REPT_LEN;
+ data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
+
+ data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT)
+ ? LMI_CCITT_ALIVE : LMI_ALIVE;
+ data[i++] = LMI_INTEG_LEN;
+ data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq);
+ data[i++] = hdlc->state.fr.rxseq;
+
+ if (hdlc->state.fr.settings.dce && fullrep) {
+ while (pvc) {
+ data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT)
+ ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT;
+ data[i++] = stat_len;
+
+ /* LMI start/restart */
+ if (hdlc->state.fr.reliable && !pvc->state.exist) {
+ pvc->state.exist = pvc->state.new = 1;
+ fr_log_dlci_active(pvc);
+ }
+
+ /* ifconfig PVC up */
+ if (pvc->open_count && !pvc->state.active &&
+ pvc->state.exist && !pvc->state.new) {
+ pvc_carrier(1, pvc);
+ pvc->state.active = 1;
+ fr_log_dlci_active(pvc);
+ }
+
+ dlci_to_status(pvc->dlci, data + i,
+ pvc->state.active, pvc->state.new);
+ i += stat_len;
+ pvc = pvc->next;
+ }
+ }
+
+ skb_put(skb, i);
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb->nh.raw = skb->data;
+
+ dev_queue_xmit(skb);
+}
+
+
+
+static void fr_set_link_state(int reliable, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+
+ hdlc->state.fr.reliable = reliable;
+ if (reliable) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+
+ hdlc->state.fr.n391cnt = 0; /* Request full status */
+ hdlc->state.fr.dce_changed = 1;
+
+ if (hdlc->state.fr.settings.lmi == LMI_NONE) {
+ while (pvc) { /* Activate all PVCs */
+ pvc_carrier(1, pvc);
+ pvc->state.exist = pvc->state.active = 1;
+ pvc->state.new = 0;
+ pvc = pvc->next;
+ }
+ }
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+
+ while (pvc) { /* Deactivate all PVCs */
+ pvc_carrier(0, pvc);
+ pvc->state.exist = pvc->state.active = 0;
+ pvc->state.new = 0;
+ pvc = pvc->next;
+ }
+ }
+}
+
+
+
+static void fr_timer(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int i, cnt = 0, reliable;
+ u32 list;
+
+ if (hdlc->state.fr.settings.dce)
+ reliable = hdlc->state.fr.request &&
+ time_before(jiffies, hdlc->state.fr.last_poll +
+ hdlc->state.fr.settings.t392 * HZ);
+ else {
+ hdlc->state.fr.last_errors <<= 1; /* Shift the list */
+ if (hdlc->state.fr.request) {
+ if (hdlc->state.fr.reliable)
+ printk(KERN_INFO "%s: No LMI status reply "
+ "received\n", dev->name);
+ hdlc->state.fr.last_errors |= 1;
+ }
+
+ list = hdlc->state.fr.last_errors;
+ for (i = 0; i < hdlc->state.fr.settings.n393; i++, list >>= 1)
+ cnt += (list & 1); /* errors count */
+
+ reliable = (cnt < hdlc->state.fr.settings.n392);
+ }
+
+ if (hdlc->state.fr.reliable != reliable) {
+ printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
+ reliable ? "" : "un");
+ fr_set_link_state(reliable, dev);
+ }
+
+ if (hdlc->state.fr.settings.dce)
+ hdlc->state.fr.timer.expires = jiffies +
+ hdlc->state.fr.settings.t392 * HZ;
+ else {
+ if (hdlc->state.fr.n391cnt)
+ hdlc->state.fr.n391cnt--;
+
+ fr_lmi_send(dev, hdlc->state.fr.n391cnt == 0);
+
+ hdlc->state.fr.last_poll = jiffies;
+ hdlc->state.fr.request = 1;
+ hdlc->state.fr.timer.expires = jiffies +
+ hdlc->state.fr.settings.t391 * HZ;
+ }
+
+ hdlc->state.fr.timer.function = fr_timer;
+ hdlc->state.fr.timer.data = arg;
+ add_timer(&hdlc->state.fr.timer);
+}
+
+
+
+static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int stat_len;
+ pvc_device *pvc;
+ int reptype = -1, error, no_ram;
+ u8 rxseq, txseq;
+ int i;
+
+ if (skb->len < ((hdlc->state.fr.settings.lmi == LMI_ANSI)
+ ? LMI_ANSI_LENGTH : LMI_LENGTH)) {
+ printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
+ return 1;
+ }
+
+ if (skb->data[5] != (!hdlc->state.fr.settings.dce ?
+ LMI_STATUS : LMI_STATUS_ENQUIRY)) {
+ printk(KERN_INFO "%s: LMI msgtype=%x, Not LMI status %s\n",
+ dev->name, skb->data[2],
+ hdlc->state.fr.settings.dce ? "enquiry" : "reply");
+ return 1;
+ }
+
+ i = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? 7 : 6;
+
+ if (skb->data[i] !=
+ ((hdlc->state.fr.settings.lmi == LMI_CCITT)
+ ? LMI_CCITT_REPTYPE : LMI_REPTYPE)) {
+ printk(KERN_INFO "%s: Not a report type=%x\n",
+ dev->name, skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ i++; /* Skip length field */
+
+ reptype = skb->data[i++];
+
+ if (skb->data[i]!=
+ ((hdlc->state.fr.settings.lmi == LMI_CCITT)
+ ? LMI_CCITT_ALIVE : LMI_ALIVE)) {
+ printk(KERN_INFO "%s: Unsupported status element=%x\n",
+ dev->name, skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ i++; /* Skip length field */
+
+ hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */
+ rxseq = skb->data[i++]; /* Should confirm our sequence */
+
+ txseq = hdlc->state.fr.txseq;
+
+ if (hdlc->state.fr.settings.dce) {
+ if (reptype != LMI_FULLREP && reptype != LMI_INTEGRITY) {
+ printk(KERN_INFO "%s: Unsupported report type=%x\n",
+ dev->name, reptype);
+ return 1;
+ }
+ hdlc->state.fr.last_poll = jiffies;
+ }
+
+ error = 0;
+ if (!hdlc->state.fr.reliable)
+ error = 1;
+
+ if (rxseq == 0 || rxseq != txseq) {
+ hdlc->state.fr.n391cnt = 0; /* Ask for full report next time */
+ error = 1;
+ }
+
+ if (hdlc->state.fr.settings.dce) {
+ if (hdlc->state.fr.fullrep_sent && !error) {
+/* Stop sending full report - the last one has been confirmed by DTE */
+ hdlc->state.fr.fullrep_sent = 0;
+ pvc = hdlc->state.fr.first_pvc;
+ while (pvc) {
+ if (pvc->state.new) {
+ pvc->state.new = 0;
+
+/* Tell DTE that new PVC is now active */
+ hdlc->state.fr.dce_changed = 1;
+ }
+ pvc = pvc->next;
+ }
+ }
+
+ if (hdlc->state.fr.dce_changed) {
+ reptype = LMI_FULLREP;
+ hdlc->state.fr.fullrep_sent = 1;
+ hdlc->state.fr.dce_changed = 0;
+ }
+
+ fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
+ return 0;
+ }
+
+ /* DTE */
+
+ hdlc->state.fr.request = 0; /* got response, no request pending */
+
+ if (error)
+ return 0;
+
+ if (reptype != LMI_FULLREP)
+ return 0;
+
+ stat_len = 3;
+ pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) {
+ pvc->state.deleted = 1;
+ pvc = pvc->next;
+ }
+
+ no_ram = 0;
+ while (skb->len >= i + 2 + stat_len) {
+ u16 dlci;
+ unsigned int active, new;
+
+ if (skb->data[i] != ((hdlc->state.fr.settings.lmi == LMI_CCITT)
+ ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) {
+ printk(KERN_WARNING "%s: Invalid PVCSTAT ID: %x\n",
+ dev->name, skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ if (skb->data[i] != stat_len) {
+ printk(KERN_WARNING "%s: Invalid PVCSTAT length: %x\n",
+ dev->name, skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ dlci = status_to_dlci(skb->data + i, &active, &new);
+
+ pvc = add_pvc(dev, dlci);
+
+ if (!pvc && !no_ram) {
+ printk(KERN_WARNING
+ "%s: Memory squeeze on fr_lmi_recv()\n",
+ dev->name);
+ no_ram = 1;
+ }
+
+ if (pvc) {
+ pvc->state.exist = 1;
+ pvc->state.deleted = 0;
+ if (active != pvc->state.active ||
+ new != pvc->state.new ||
+ !pvc->state.exist) {
+ pvc->state.new = new;
+ pvc->state.active = active;
+ pvc_carrier(active, pvc);
+ fr_log_dlci_active(pvc);
+ }
+ }
+
+ i += stat_len;
+ }
+
+ pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) {
+ if (pvc->state.deleted && pvc->state.exist) {
+ pvc_carrier(0, pvc);
+ pvc->state.active = pvc->state.new = 0;
+ pvc->state.exist = 0;
+ fr_log_dlci_active(pvc);
+ }
+ pvc = pvc->next;
+ }
+
+ /* Next full report after N391 polls */
+ hdlc->state.fr.n391cnt = hdlc->state.fr.settings.n391;
+
+ return 0;
+}
+
+
+
+static int fr_rx(struct sk_buff *skb)
+{
+ struct net_device *ndev = skb->dev;
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
+ fr_hdr *fh = (fr_hdr*)skb->data;
+ u8 *data = skb->data;
+ u16 dlci;
+ pvc_device *pvc;
+ struct net_device *dev = NULL;
+
+ if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
+ goto rx_error;
+
+ dlci = q922_to_dlci(skb->data);
+
+ if (dlci == LMI_DLCI) {
+ if (hdlc->state.fr.settings.lmi == LMI_NONE)
+ goto rx_error; /* LMI packet with no LMI? */
+
+ if (data[3] == LMI_PROTO) {
+ if (fr_lmi_recv(ndev, skb))
+ goto rx_error;
+ else {
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+ }
+ }
+
+ printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
+ ndev->name);
+ goto rx_error;
+ }
+
+ pvc = find_pvc(hdlc, dlci);
+ if (!pvc) {
+#ifdef DEBUG_PKT
+ printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
+ ndev->name, dlci);
+#endif
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ if (pvc->state.fecn != fh->fecn) {
+#ifdef DEBUG_ECN
+ printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", ndev->name,
+ dlci, fh->fecn ? "N" : "FF");
+#endif
+ pvc->state.fecn ^= 1;
+ }
+
+ if (pvc->state.becn != fh->becn) {
+#ifdef DEBUG_ECN
+ printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", ndev->name,
+ dlci, fh->becn ? "N" : "FF");
+#endif
+ pvc->state.becn ^= 1;
+ }
+
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ hdlc->stats.rx_dropped++;
+ return NET_RX_DROP;
+ }
+
+ if (data[3] == NLPID_IP) {
+ skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+ dev = pvc->main;
+ skb->protocol = htons(ETH_P_IP);
+
+ } else if (data[3] == NLPID_IPV6) {
+ skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+ dev = pvc->main;
+ skb->protocol = htons(ETH_P_IPV6);
+
+ } else if (skb->len > 10 && data[3] == FR_PAD &&
+ data[4] == NLPID_SNAP && data[5] == FR_PAD) {
+ u16 oui = ntohs(*(u16*)(data + 6));
+ u16 pid = ntohs(*(u16*)(data + 8));
+ skb_pull(skb, 10);
+
+ switch ((((u32)oui) << 16) | pid) {
+ case ETH_P_ARP: /* routed frame with SNAP */
+ case ETH_P_IPX:
+ case ETH_P_IP: /* a long variant */
+ case ETH_P_IPV6:
+ dev = pvc->main;
+ skb->protocol = htons(pid);
+ break;
+
+ case 0x80C20007: /* bridged Ethernet frame */
+ if ((dev = pvc->ether) != NULL)
+ skb->protocol = eth_type_trans(skb, dev);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
+ "PID=%x\n", ndev->name, oui, pid);
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+ } else {
+ printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
+ "length = %i\n", ndev->name, data[3], skb->len);
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ if (dev) {
+ struct net_device_stats *stats = pvc_get_stats(dev);
+ stats->rx_packets++; /* PVC traffic */
+ stats->rx_bytes += skb->len;
+ if (pvc->state.becn)
+ stats->rx_compressed++;
+ skb->dev = dev;
+ netif_rx(skb);
+ return NET_RX_SUCCESS;
+ } else {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ rx_error:
+ hdlc->stats.rx_errors++; /* Mark error */
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+}
+
+
+
+static void fr_start(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "fr_start\n");
+#endif
+ if (hdlc->state.fr.settings.lmi != LMI_NONE) {
+ hdlc->state.fr.reliable = 0;
+ hdlc->state.fr.dce_changed = 1;
+ hdlc->state.fr.request = 0;
+ hdlc->state.fr.fullrep_sent = 0;
+ hdlc->state.fr.last_errors = 0xFFFFFFFF;
+ hdlc->state.fr.n391cnt = 0;
+ hdlc->state.fr.txseq = hdlc->state.fr.rxseq = 0;
+
+ init_timer(&hdlc->state.fr.timer);
+ /* First poll after 1 s */
+ hdlc->state.fr.timer.expires = jiffies + HZ;
+ hdlc->state.fr.timer.function = fr_timer;
+ hdlc->state.fr.timer.data = (unsigned long)dev;
+ add_timer(&hdlc->state.fr.timer);
+ } else
+ fr_set_link_state(1, dev);
+}
+
+
+
+static void fr_stop(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "fr_stop\n");
+#endif
+ if (hdlc->state.fr.settings.lmi != LMI_NONE)
+ del_timer_sync(&hdlc->state.fr.timer);
+ fr_set_link_state(0, dev);
+}
+
+
+
+static void fr_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) { /* Shutdown all PVCs for this FRAD */
+ if (pvc->main)
+ dev_close(pvc->main);
+ if (pvc->ether)
+ dev_close(pvc->ether);
+ pvc = pvc->next;
+ }
+}
+
+static void dlci_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_DLCI;
+ dev->flags = IFF_POINTOPOINT;
+ dev->hard_header_len = 10;
+ dev->addr_len = 2;
+}
+
+static int fr_add_pvc(struct net_device *master, unsigned int dlci, int type)
+{
+ hdlc_device *hdlc = dev_to_hdlc(master);
+ pvc_device *pvc = NULL;
+ struct net_device *dev;
+ int result, used;
+ char * prefix = "pvc%d";
+
+ if (type == ARPHRD_ETHER)
+ prefix = "pvceth%d";
+
+ if ((pvc = add_pvc(master, dlci)) == NULL) {
+ printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
+ master->name);
+ return -ENOBUFS;
+ }
+
+ if (*get_dev_p(pvc, type))
+ return -EEXIST;
+
+ used = pvc_is_used(pvc);
+
+ if (type == ARPHRD_ETHER)
+ dev = alloc_netdev(sizeof(struct net_device_stats),
+ "pvceth%d", ether_setup);
+ else
+ dev = alloc_netdev(sizeof(struct net_device_stats),
+ "pvc%d", dlci_setup);
+
+ if (!dev) {
+ printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
+ master->name);
+ delete_unused_pvcs(hdlc);
+ return -ENOBUFS;
+ }
+
+ if (type == ARPHRD_ETHER) {
+ memcpy(dev->dev_addr, "\x00\x01", 2);
+ get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ } else {
+ *(u16*)dev->dev_addr = htons(dlci);
+ dlci_to_q922(dev->broadcast, dlci);
+ }
+ dev->hard_start_xmit = pvc_xmit;
+ dev->get_stats = pvc_get_stats;
+ dev->open = pvc_open;
+ dev->stop = pvc_close;
+ dev->do_ioctl = pvc_ioctl;
+ dev->change_mtu = pvc_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+ dev->tx_queue_len = 0;
+ dev->priv = pvc;
+
+ result = dev_alloc_name(dev, dev->name);
+ if (result < 0) {
+ free_netdev(dev);
+ delete_unused_pvcs(hdlc);
+ return result;
+ }
+
+ if (register_netdevice(dev) != 0) {
+ free_netdev(dev);
+ delete_unused_pvcs(hdlc);
+ return -EIO;
+ }
+
+ dev->destructor = free_netdev;
+ *get_dev_p(pvc, type) = dev;
+ if (!used) {
+ hdlc->state.fr.dce_changed = 1;
+ hdlc->state.fr.dce_pvc_count++;
+ }
+ return 0;
+}
+
+
+
+static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
+{
+ pvc_device *pvc;
+ struct net_device *dev;
+
+ if ((pvc = find_pvc(hdlc, dlci)) == NULL)
+ return -ENOENT;
+
+ if ((dev = *get_dev_p(pvc, type)) == NULL)
+ return -ENOENT;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY; /* PVC in use */
+
+ unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
+ *get_dev_p(pvc, type) = NULL;
+
+ if (!pvc_is_used(pvc)) {
+ hdlc->state.fr.dce_pvc_count--;
+ hdlc->state.fr.dce_changed = 1;
+ }
+ delete_unused_pvcs(hdlc);
+ return 0;
+}
+
+
+
+static void fr_destroy(hdlc_device *hdlc)
+{
+ pvc_device *pvc;
+
+ pvc = hdlc->state.fr.first_pvc;
+ hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */
+ hdlc->state.fr.dce_pvc_count = 0;
+ hdlc->state.fr.dce_changed = 1;
+
+ while (pvc) {
+ pvc_device *next = pvc->next;
+ /* destructors will free_netdev() main and ether */
+ if (pvc->main)
+ unregister_netdevice(pvc->main);
+
+ if (pvc->ether)
+ unregister_netdevice(pvc->ether);
+
+ kfree(pvc);
+ pvc = next;
+ }
+}
+
+
+
+int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
+ const size_t size = sizeof(fr_proto);
+ fr_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ fr_proto_pvc pvc;
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_FR;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(fr_s, &hdlc->state.fr.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_FR:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, fr_s, size))
+ return -EFAULT;
+
+ if (new_settings.lmi == LMI_DEFAULT)
+ new_settings.lmi = LMI_ANSI;
+
+ if ((new_settings.lmi != LMI_NONE &&
+ new_settings.lmi != LMI_ANSI &&
+ new_settings.lmi != LMI_CCITT) ||
+ new_settings.t391 < 1 ||
+ new_settings.t392 < 2 ||
+ new_settings.n391 < 1 ||
+ new_settings.n392 < 1 ||
+ new_settings.n393 < new_settings.n392 ||
+ new_settings.n393 > 32 ||
+ (new_settings.dce != 0 &&
+ new_settings.dce != 1))
+ return -EINVAL;
+
+ result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ if (result)
+ return result;
+
+ if (hdlc->proto.id != IF_PROTO_FR) {
+ hdlc_proto_detach(hdlc);
+ hdlc->state.fr.first_pvc = NULL;
+ hdlc->state.fr.dce_pvc_count = 0;
+ }
+ memcpy(&hdlc->state.fr.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.close = fr_close;
+ hdlc->proto.start = fr_start;
+ hdlc->proto.stop = fr_stop;
+ hdlc->proto.detach = fr_destroy;
+ hdlc->proto.netif_rx = fr_rx;
+ hdlc->proto.id = IF_PROTO_FR;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = NULL;
+ dev->type = ARPHRD_FRAD;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->addr_len = 0;
+ return 0;
+
+ case IF_PROTO_FR_ADD_PVC:
+ case IF_PROTO_FR_DEL_PVC:
+ case IF_PROTO_FR_ADD_ETH_PVC:
+ case IF_PROTO_FR_DEL_ETH_PVC:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
+ sizeof(fr_proto_pvc)))
+ return -EFAULT;
+
+ if (pvc.dlci <= 0 || pvc.dlci >= 1024)
+ return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
+
+ if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
+ ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
+ result = ARPHRD_ETHER; /* bridged Ethernet device */
+ else
+ result = ARPHRD_DLCI;
+
+ if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
+ ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
+ return fr_add_pvc(dev, pvc.dlci, result);
+ else
+ return fr_del_pvc(hdlc, pvc.dlci, result);
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
new file mode 100644
index 000000000000..6ed064cb4469
--- /dev/null
+++ b/drivers/net/wan/hdlc_generic.c
@@ -0,0 +1,343 @@
+/*
+ * Generic HDLC support routines for Linux
+ *
+ * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Currently supported:
+ * * raw IP-in-HDLC
+ * * Cisco HDLC
+ * * Frame Relay with ANSI or CCITT LMI (both user and network side)
+ * * PPP
+ * * X.25
+ *
+ * Use sethdlc utility to set line parameters, protocol and PVCs
+ *
+ * How does it work:
+ * - proto.open(), close(), start(), stop() calls are serialized.
+ * The order is: open, [ start, stop ... ] close ...
+ * - proto.start() and stop() are called with spin_lock_irq held.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+
+static const char* version = "HDLC support module revision 1.17";
+
+#undef DEBUG_LINK
+
+
+static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+
+static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
+{
+ return hdlc_stats(dev);
+}
+
+
+
+static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *p)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ if (hdlc->proto.netif_rx)
+ return hdlc->proto.netif_rx(skb);
+
+ hdlc->stats.rx_dropped++; /* Shouldn't happen */
+ dev_kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+
+
+static void __hdlc_set_carrier_on(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ if (hdlc->proto.start)
+ return hdlc->proto.start(dev);
+#ifdef DEBUG_LINK
+ if (netif_carrier_ok(dev))
+ printk(KERN_ERR "hdlc_set_carrier_on(): already on\n");
+#endif
+ netif_carrier_on(dev);
+}
+
+
+
+static void __hdlc_set_carrier_off(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ if (hdlc->proto.stop)
+ return hdlc->proto.stop(dev);
+
+#ifdef DEBUG_LINK
+ if (!netif_carrier_ok(dev))
+ printk(KERN_ERR "hdlc_set_carrier_off(): already off\n");
+#endif
+ netif_carrier_off(dev);
+}
+
+
+
+void hdlc_set_carrier(int on, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ unsigned long flags;
+ on = on ? 1 : 0;
+
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "hdlc_set_carrier %i\n", on);
+#endif
+
+ spin_lock_irqsave(&hdlc->state_lock, flags);
+
+ if (hdlc->carrier == on)
+ goto carrier_exit; /* no change in DCD line level */
+
+#ifdef DEBUG_LINK
+ printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off");
+#endif
+ hdlc->carrier = on;
+
+ if (!hdlc->open)
+ goto carrier_exit;
+
+ if (hdlc->carrier)
+ __hdlc_set_carrier_on(dev);
+ else
+ __hdlc_set_carrier_off(dev);
+
+carrier_exit:
+ spin_unlock_irqrestore(&hdlc->state_lock, flags);
+}
+
+
+
+/* Must be called by hardware driver when HDLC device is being opened */
+int hdlc_open(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "hdlc_open() carrier %i open %i\n",
+ hdlc->carrier, hdlc->open);
+#endif
+
+ if (hdlc->proto.id == -1)
+ return -ENOSYS; /* no protocol attached */
+
+ if (hdlc->proto.open) {
+ int result = hdlc->proto.open(dev);
+ if (result)
+ return result;
+ }
+
+ spin_lock_irq(&hdlc->state_lock);
+
+ if (hdlc->carrier)
+ __hdlc_set_carrier_on(dev);
+
+ hdlc->open = 1;
+
+ spin_unlock_irq(&hdlc->state_lock);
+ return 0;
+}
+
+
+
+/* Must be called by hardware driver when HDLC device is being closed */
+void hdlc_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "hdlc_close() carrier %i open %i\n",
+ hdlc->carrier, hdlc->open);
+#endif
+
+ spin_lock_irq(&hdlc->state_lock);
+
+ hdlc->open = 0;
+ if (hdlc->carrier)
+ __hdlc_set_carrier_off(dev);
+
+ spin_unlock_irq(&hdlc->state_lock);
+
+ if (hdlc->proto.close)
+ hdlc->proto.close(dev);
+}
+
+
+
+#ifndef CONFIG_HDLC_RAW
+#define hdlc_raw_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_RAW_ETH
+#define hdlc_raw_eth_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_PPP
+#define hdlc_ppp_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_CISCO
+#define hdlc_cisco_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_FR
+#define hdlc_fr_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_X25
+#define hdlc_x25_ioctl(dev, ifr) -ENOSYS
+#endif
+
+
+int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ unsigned int proto;
+
+ if (cmd != SIOCWANDEV)
+ return -EINVAL;
+
+ switch(ifr->ifr_settings.type) {
+ case IF_PROTO_HDLC:
+ case IF_PROTO_HDLC_ETH:
+ case IF_PROTO_PPP:
+ case IF_PROTO_CISCO:
+ case IF_PROTO_FR:
+ case IF_PROTO_X25:
+ proto = ifr->ifr_settings.type;
+ break;
+
+ default:
+ proto = hdlc->proto.id;
+ }
+
+ switch(proto) {
+ case IF_PROTO_HDLC: return hdlc_raw_ioctl(dev, ifr);
+ case IF_PROTO_HDLC_ETH: return hdlc_raw_eth_ioctl(dev, ifr);
+ case IF_PROTO_PPP: return hdlc_ppp_ioctl(dev, ifr);
+ case IF_PROTO_CISCO: return hdlc_cisco_ioctl(dev, ifr);
+ case IF_PROTO_FR: return hdlc_fr_ioctl(dev, ifr);
+ case IF_PROTO_X25: return hdlc_x25_ioctl(dev, ifr);
+ default: return -EINVAL;
+ }
+}
+
+static void hdlc_setup(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ dev->get_stats = hdlc_get_stats;
+ dev->change_mtu = hdlc_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+
+ dev->type = ARPHRD_RAWHDLC;
+ dev->hard_header_len = 16;
+
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ hdlc->proto.id = -1;
+ hdlc->proto.detach = NULL;
+ hdlc->carrier = 1;
+ hdlc->open = 0;
+ spin_lock_init(&hdlc->state_lock);
+}
+
+struct net_device *alloc_hdlcdev(void *priv)
+{
+ struct net_device *dev;
+ dev = alloc_netdev(sizeof(hdlc_device), "hdlc%d", hdlc_setup);
+ if (dev)
+ dev_to_hdlc(dev)->priv = priv;
+ return dev;
+}
+
+int register_hdlc_device(struct net_device *dev)
+{
+ int result = dev_alloc_name(dev, "hdlc%d");
+ if (result < 0)
+ return result;
+
+ result = register_netdev(dev);
+ if (result != 0)
+ return -EIO;
+
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev); /* no carrier until DCD goes up */
+
+ return 0;
+}
+
+
+
+void unregister_hdlc_device(struct net_device *dev)
+{
+ rtnl_lock();
+ hdlc_proto_detach(dev_to_hdlc(dev));
+ unregister_netdevice(dev);
+ rtnl_unlock();
+}
+
+
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("HDLC support module");
+MODULE_LICENSE("GPL v2");
+
+EXPORT_SYMBOL(hdlc_open);
+EXPORT_SYMBOL(hdlc_close);
+EXPORT_SYMBOL(hdlc_set_carrier);
+EXPORT_SYMBOL(hdlc_ioctl);
+EXPORT_SYMBOL(alloc_hdlcdev);
+EXPORT_SYMBOL(register_hdlc_device);
+EXPORT_SYMBOL(unregister_hdlc_device);
+
+static struct packet_type hdlc_packet_type = {
+ .type = __constant_htons(ETH_P_HDLC),
+ .func = hdlc_rcv,
+};
+
+
+static int __init hdlc_module_init(void)
+{
+ printk(KERN_INFO "%s\n", version);
+ dev_add_pack(&hdlc_packet_type);
+ return 0;
+}
+
+
+
+static void __exit hdlc_module_exit(void)
+{
+ dev_remove_pack(&hdlc_packet_type);
+}
+
+
+module_init(hdlc_module_init);
+module_exit(hdlc_module_exit);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
new file mode 100644
index 000000000000..7cd6195a2e46
--- /dev/null
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -0,0 +1,115 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Point-to-point protocol support
+ *
+ * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+
+static int ppp_open(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ void *old_ioctl;
+ int result;
+
+ dev->priv = &hdlc->state.ppp.syncppp_ptr;
+ hdlc->state.ppp.syncppp_ptr = &hdlc->state.ppp.pppdev;
+ hdlc->state.ppp.pppdev.dev = dev;
+
+ old_ioctl = dev->do_ioctl;
+ hdlc->state.ppp.old_change_mtu = dev->change_mtu;
+ sppp_attach(&hdlc->state.ppp.pppdev);
+ /* sppp_attach nukes them. We don't need syncppp's ioctl */
+ dev->do_ioctl = old_ioctl;
+ hdlc->state.ppp.pppdev.sppp.pp_flags &= ~PP_CISCO;
+ dev->type = ARPHRD_PPP;
+ result = sppp_open(dev);
+ if (result) {
+ sppp_detach(dev);
+ return result;
+ }
+
+ return 0;
+}
+
+
+
+static void ppp_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ sppp_close(dev);
+ sppp_detach(dev);
+ dev->rebuild_header = NULL;
+ dev->change_mtu = hdlc->state.ppp.old_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+ dev->hard_header_len = 16;
+}
+
+
+
+static unsigned short ppp_type_trans(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return __constant_htons(ETH_P_WAN_PPP);
+}
+
+
+
+int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_PPP;
+ return 0; /* return protocol only, no settable parameters */
+
+ case IF_PROTO_PPP:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* no settable parameters */
+
+ result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.open = ppp_open;
+ hdlc->proto.close = ppp_close;
+ hdlc->proto.type_trans = ppp_type_trans;
+ hdlc->proto.id = IF_PROTO_PPP;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = NULL;
+ dev->type = ARPHRD_PPP;
+ dev->addr_len = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
new file mode 100644
index 000000000000..c41fb70b6929
--- /dev/null
+++ b/drivers/net/wan/hdlc_raw.c
@@ -0,0 +1,90 @@
+/*
+ * Generic HDLC support routines for Linux
+ * HDLC support
+ *
+ * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+
+static unsigned short raw_type_trans(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return __constant_htons(ETH_P_IP);
+}
+
+
+
+int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ const size_t size = sizeof(raw_hdlc_proto);
+ raw_hdlc_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_HDLC;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_HDLC:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, raw_s, size))
+ return -EFAULT;
+
+ if (new_settings.encoding == ENCODING_DEFAULT)
+ new_settings.encoding = ENCODING_NRZ;
+
+ if (new_settings.parity == PARITY_DEFAULT)
+ new_settings.parity = PARITY_CRC16_PR1_CCITT;
+
+ result = hdlc->attach(dev, new_settings.encoding,
+ new_settings.parity);
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.type_trans = raw_type_trans;
+ hdlc->proto.id = IF_PROTO_HDLC;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = NULL;
+ dev->type = ARPHRD_RAWHDLC;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->addr_len = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
new file mode 100644
index 000000000000..b1285cc8fee6
--- /dev/null
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -0,0 +1,107 @@
+/*
+ * Generic HDLC support routines for Linux
+ * HDLC Ethernet emulation support
+ *
+ * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/random.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/hdlc.h>
+
+
+static int eth_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int pad = ETH_ZLEN - skb->len;
+ if (pad > 0) { /* Pad the frame with zeros */
+ int len = skb->len;
+ if (skb_tailroom(skb) < pad)
+ if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) {
+ hdlc_stats(dev)->tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ skb_put(skb, pad);
+ memset(skb->data + len, 0, pad);
+ }
+ return dev_to_hdlc(dev)->xmit(skb, dev);
+}
+
+
+int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ const size_t size = sizeof(raw_hdlc_proto);
+ raw_hdlc_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+ void *old_ch_mtu;
+ int old_qlen;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_HDLC_ETH;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_HDLC_ETH:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, raw_s, size))
+ return -EFAULT;
+
+ if (new_settings.encoding == ENCODING_DEFAULT)
+ new_settings.encoding = ENCODING_NRZ;
+
+ if (new_settings.parity == PARITY_DEFAULT)
+ new_settings.parity = PARITY_CRC16_PR1_CCITT;
+
+ result = hdlc->attach(dev, new_settings.encoding,
+ new_settings.parity);
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.type_trans = eth_type_trans;
+ hdlc->proto.id = IF_PROTO_HDLC_ETH;
+ dev->hard_start_xmit = eth_tx;
+ old_ch_mtu = dev->change_mtu;
+ old_qlen = dev->tx_queue_len;
+ ether_setup(dev);
+ dev->change_mtu = old_ch_mtu;
+ dev->tx_queue_len = old_qlen;
+ memcpy(dev->dev_addr, "\x00\x01", 2);
+ get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
new file mode 100644
index 000000000000..07e5eef1fe0f
--- /dev/null
+++ b/drivers/net/wan/hdlc_x25.c
@@ -0,0 +1,219 @@
+/*
+ * Generic HDLC support routines for Linux
+ * X.25 support
+ *
+ * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+#include <net/x25device.h>
+
+/* These functions are callbacks called by LAPB layer */
+
+static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
+{
+ struct sk_buff *skb;
+ unsigned char *ptr;
+
+ if ((skb = dev_alloc_skb(1)) == NULL) {
+ printk(KERN_ERR "%s: out of memory\n", dev->name);
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = code;
+
+ skb->protocol = x25_type_trans(skb, dev);
+ netif_rx(skb);
+}
+
+
+
+static void x25_connected(struct net_device *dev, int reason)
+{
+ x25_connect_disconnect(dev, reason, 1);
+}
+
+
+
+static void x25_disconnected(struct net_device *dev, int reason)
+{
+ x25_connect_disconnect(dev, reason, 2);
+}
+
+
+
+static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
+{
+ unsigned char *ptr;
+
+ skb_push(skb, 1);
+
+ if (skb_cow(skb, 1))
+ return NET_RX_DROP;
+
+ ptr = skb->data;
+ *ptr = 0;
+
+ skb->protocol = x25_type_trans(skb, dev);
+ return netif_rx(skb);
+}
+
+
+
+static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ hdlc->xmit(skb, dev); /* Ignore return value :-( */
+}
+
+
+
+static int x25_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int result;
+
+
+ /* X.25 to LAPB */
+ switch (skb->data[0]) {
+ case 0: /* Data to be transmitted */
+ skb_pull(skb, 1);
+ if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
+ dev_kfree_skb(skb);
+ return 0;
+
+ case 1:
+ if ((result = lapb_connect_request(dev))!= LAPB_OK) {
+ if (result == LAPB_CONNECTED)
+ /* Send connect confirm. msg to level 3 */
+ x25_connected(dev, 0);
+ else
+ printk(KERN_ERR "%s: LAPB connect request "
+ "failed, error code = %i\n",
+ dev->name, result);
+ }
+ break;
+
+ case 2:
+ if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
+ if (result == LAPB_NOTCONNECTED)
+ /* Send disconnect confirm. msg to level 3 */
+ x25_disconnected(dev, 0);
+ else
+ printk(KERN_ERR "%s: LAPB disconnect request "
+ "failed, error code = %i\n",
+ dev->name, result);
+ }
+ break;
+
+ default: /* to be defined */
+ break;
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+
+static int x25_open(struct net_device *dev)
+{
+ struct lapb_register_struct cb;
+ int result;
+
+ cb.connect_confirmation = x25_connected;
+ cb.connect_indication = x25_connected;
+ cb.disconnect_confirmation = x25_disconnected;
+ cb.disconnect_indication = x25_disconnected;
+ cb.data_indication = x25_data_indication;
+ cb.data_transmit = x25_data_transmit;
+
+ result = lapb_register(dev, &cb);
+ if (result != LAPB_OK)
+ return result;
+ return 0;
+}
+
+
+
+static void x25_close(struct net_device *dev)
+{
+ lapb_unregister(dev);
+}
+
+
+
+static int x25_rx(struct sk_buff *skb)
+{
+ hdlc_device *hdlc = dev_to_hdlc(skb->dev);
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ hdlc->stats.rx_dropped++;
+ return NET_RX_DROP;
+ }
+
+ if (lapb_data_received(skb->dev, skb) == LAPB_OK)
+ return NET_RX_SUCCESS;
+
+ hdlc->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+}
+
+
+
+int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_X25;
+ return 0; /* return protocol only, no settable parameters */
+
+ case IF_PROTO_X25:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.open = x25_open;
+ hdlc->proto.close = x25_close;
+ hdlc->proto.netif_rx = x25_rx;
+ hdlc->proto.type_trans = NULL;
+ hdlc->proto.id = IF_PROTO_X25;
+ dev->hard_start_xmit = x25_xmit;
+ dev->hard_header = NULL;
+ dev->type = ARPHRD_X25;
+ dev->addr_len = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
new file mode 100644
index 000000000000..7db1d1d0bb34
--- /dev/null
+++ b/drivers/net/wan/hostess_sv11.c
@@ -0,0 +1,420 @@
+/*
+ * Comtrol SV11 card driver
+ *
+ * This is a slightly odd Z85230 synchronous driver. All you need to
+ * know basically is
+ *
+ * Its a genuine Z85230
+ *
+ * It supports DMA using two DMA channels in SYNC mode. The driver doesn't
+ * use these facilities
+ *
+ * The control port is at io+1, the data at io+3 and turning off the DMA
+ * is done by writing 0 to io+4
+ *
+ * The hardware does the bus handling to avoid the need for delays between
+ * touching control registers.
+ *
+ * Port B isnt wired (why - beats me)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <net/arp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <net/syncppp.h>
+#include "z85230.h"
+
+static int dma;
+
+struct sv11_device
+{
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+ struct z8530_dev sync;
+ struct ppp_device netdev;
+};
+
+/*
+ * Network driver support routines
+ */
+
+/*
+ * Frame receive. Simple for our card as we do sync ppp and there
+ * is no funny garbage involved
+ */
+
+static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
+{
+ /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
+ skb_trim(skb, skb->len-2);
+ skb->protocol=__constant_htons(ETH_P_WAN_PPP);
+ skb->mac.raw=skb->data;
+ skb->dev=c->netdevice;
+ /*
+ * Send it to the PPP layer. We don't have time to process
+ * it right now.
+ */
+ netif_rx(skb);
+ c->netdevice->last_rx = jiffies;
+}
+
+/*
+ * We've been placed in the UP state
+ */
+
+static int hostess_open(struct net_device *d)
+{
+ struct sv11_device *sv11=d->priv;
+ int err = -1;
+
+ /*
+ * Link layer up
+ */
+ switch(dma)
+ {
+ case 0:
+ err=z8530_sync_open(d, &sv11->sync.chanA);
+ break;
+ case 1:
+ err=z8530_sync_dma_open(d, &sv11->sync.chanA);
+ break;
+ case 2:
+ err=z8530_sync_txdma_open(d, &sv11->sync.chanA);
+ break;
+ }
+
+ if(err)
+ return err;
+ /*
+ * Begin PPP
+ */
+ err=sppp_open(d);
+ if(err)
+ {
+ switch(dma)
+ {
+ case 0:
+ z8530_sync_close(d, &sv11->sync.chanA);
+ break;
+ case 1:
+ z8530_sync_dma_close(d, &sv11->sync.chanA);
+ break;
+ case 2:
+ z8530_sync_txdma_close(d, &sv11->sync.chanA);
+ break;
+ }
+ return err;
+ }
+ sv11->sync.chanA.rx_function=hostess_input;
+
+ /*
+ * Go go go
+ */
+
+ netif_start_queue(d);
+ return 0;
+}
+
+static int hostess_close(struct net_device *d)
+{
+ struct sv11_device *sv11=d->priv;
+ /*
+ * Discard new frames
+ */
+ sv11->sync.chanA.rx_function=z8530_null_rx;
+ /*
+ * PPP off
+ */
+ sppp_close(d);
+ /*
+ * Link layer down
+ */
+ netif_stop_queue(d);
+
+ switch(dma)
+ {
+ case 0:
+ z8530_sync_close(d, &sv11->sync.chanA);
+ break;
+ case 1:
+ z8530_sync_dma_close(d, &sv11->sync.chanA);
+ break;
+ case 2:
+ z8530_sync_txdma_close(d, &sv11->sync.chanA);
+ break;
+ }
+ return 0;
+}
+
+static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
+{
+ /* struct sv11_device *sv11=d->priv;
+ z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */
+ return sppp_do_ioctl(d, ifr,cmd);
+}
+
+static struct net_device_stats *hostess_get_stats(struct net_device *d)
+{
+ struct sv11_device *sv11=d->priv;
+ if(sv11)
+ return z8530_get_stats(&sv11->sync.chanA);
+ else
+ return NULL;
+}
+
+/*
+ * Passed PPP frames, fire them downwind.
+ */
+
+static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
+{
+ struct sv11_device *sv11=d->priv;
+ return z8530_queue_xmit(&sv11->sync.chanA, skb);
+}
+
+static int hostess_neigh_setup(struct neighbour *n)
+{
+ if (n->nud_state == NUD_NONE) {
+ n->ops = &arp_broken_ops;
+ n->output = n->ops->output;
+ }
+ return 0;
+}
+
+static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
+{
+ if (p->tbl->family == AF_INET) {
+ p->neigh_setup = hostess_neigh_setup;
+ p->ucast_probes = 0;
+ p->mcast_probes = 0;
+ }
+ return 0;
+}
+
+static void sv11_setup(struct net_device *dev)
+{
+ dev->open = hostess_open;
+ dev->stop = hostess_close;
+ dev->hard_start_xmit = hostess_queue_xmit;
+ dev->get_stats = hostess_get_stats;
+ dev->do_ioctl = hostess_ioctl;
+ dev->neigh_setup = hostess_neigh_setup_dev;
+}
+
+/*
+ * Description block for a Comtrol Hostess SV11 card
+ */
+
+static struct sv11_device *sv11_init(int iobase, int irq)
+{
+ struct z8530_dev *dev;
+ struct sv11_device *sv;
+
+ /*
+ * Get the needed I/O space
+ */
+
+ if(!request_region(iobase, 8, "Comtrol SV11"))
+ {
+ printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase);
+ return NULL;
+ }
+
+ sv=(struct sv11_device *)kmalloc(sizeof(struct sv11_device), GFP_KERNEL);
+ if(!sv)
+ goto fail3;
+
+ memset(sv, 0, sizeof(*sv));
+ sv->if_ptr=&sv->netdev;
+
+ sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
+ if(!sv->netdev.dev)
+ goto fail2;
+
+ SET_MODULE_OWNER(sv->netdev.dev);
+
+ dev=&sv->sync;
+
+ /*
+ * Stuff in the I/O addressing
+ */
+
+ dev->active = 0;
+
+ dev->chanA.ctrlio=iobase+1;
+ dev->chanA.dataio=iobase+3;
+ dev->chanB.ctrlio=-1;
+ dev->chanB.dataio=-1;
+ dev->chanA.irqs=&z8530_nop;
+ dev->chanB.irqs=&z8530_nop;
+
+ outb(0, iobase+4); /* DMA off */
+
+ /* We want a fast IRQ for this device. Actually we'd like an even faster
+ IRQ ;) - This is one driver RtLinux is made for */
+
+ if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "Hostess SV11", dev)<0)
+ {
+ printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
+ goto fail1;
+ }
+
+ dev->irq=irq;
+ dev->chanA.private=sv;
+ dev->chanA.netdevice=sv->netdev.dev;
+ dev->chanA.dev=dev;
+ dev->chanB.dev=dev;
+
+ if(dma)
+ {
+ /*
+ * You can have DMA off or 1 and 3 thats the lot
+ * on the Comtrol.
+ */
+ dev->chanA.txdma=3;
+ dev->chanA.rxdma=1;
+ outb(0x03|0x08, iobase+4); /* DMA on */
+ if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0)
+ goto fail;
+
+ if(dma==1)
+ {
+ if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0)
+ goto dmafail;
+ }
+ }
+
+ /* Kill our private IRQ line the hostess can end up chattering
+ until the configuration is set */
+ disable_irq(irq);
+
+ /*
+ * Begin normal initialise
+ */
+
+ if(z8530_init(dev)!=0)
+ {
+ printk(KERN_ERR "Z8530 series device not found.\n");
+ enable_irq(irq);
+ goto dmafail2;
+ }
+ z8530_channel_load(&dev->chanB, z8530_dead_port);
+ if(dev->type==Z85C30)
+ z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
+ else
+ z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
+
+ enable_irq(irq);
+
+
+ /*
+ * Now we can take the IRQ
+ */
+ if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0)
+ {
+ struct net_device *d=dev->chanA.netdevice;
+
+ /*
+ * Initialise the PPP components
+ */
+ sppp_attach(&sv->netdev);
+
+ /*
+ * Local fields
+ */
+
+ d->base_addr = iobase;
+ d->irq = irq;
+ d->priv = sv;
+
+ if(register_netdev(d))
+ {
+ printk(KERN_ERR "%s: unable to register device.\n",
+ d->name);
+ sppp_detach(d);
+ goto dmafail2;
+ }
+
+ z8530_describe(dev, "I/O", iobase);
+ dev->active=1;
+ return sv;
+ }
+dmafail2:
+ if(dma==1)
+ free_dma(dev->chanA.rxdma);
+dmafail:
+ if(dma)
+ free_dma(dev->chanA.txdma);
+fail:
+ free_irq(irq, dev);
+fail1:
+ free_netdev(sv->netdev.dev);
+fail2:
+ kfree(sv);
+fail3:
+ release_region(iobase,8);
+ return NULL;
+}
+
+static void sv11_shutdown(struct sv11_device *dev)
+{
+ sppp_detach(dev->netdev.dev);
+ unregister_netdev(dev->netdev.dev);
+ z8530_shutdown(&dev->sync);
+ free_irq(dev->sync.irq, dev);
+ if(dma)
+ {
+ if(dma==1)
+ free_dma(dev->sync.chanA.rxdma);
+ free_dma(dev->sync.chanA.txdma);
+ }
+ release_region(dev->sync.chanA.ctrlio-1, 8);
+ free_netdev(dev->netdev.dev);
+ kfree(dev);
+}
+
+#ifdef MODULE
+
+static int io=0x200;
+static int irq=9;
+
+module_param(io, int, 0);
+MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
+module_param(dma, int, 0);
+MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX");
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card");
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
+
+static struct sv11_device *sv11_unit;
+
+int init_module(void)
+{
+ printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n");
+ printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n");
+ if((sv11_unit=sv11_init(io,irq))==NULL)
+ return -ENODEV;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ if(sv11_unit)
+ sv11_shutdown(sv11_unit);
+}
+
+#endif
+
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
new file mode 100644
index 000000000000..7f2e3653c5e5
--- /dev/null
+++ b/drivers/net/wan/lapbether.c
@@ -0,0 +1,465 @@
+/*
+ * "LAPB via ethernet" driver release 001
+ *
+ * This code REQUIRES 2.1.15 or higher/ NET3.038
+ *
+ * This module:
+ * This module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This is a "pseudo" network driver to allow LAPB over Ethernet.
+ *
+ * This driver can use any ethernet destination address, and can be
+ * limited to accept frames from one dedicated ethernet card only.
+ *
+ * History
+ * LAPBETH 001 Jonathan Naylor Cloned from bpqether.c
+ * 2000-10-29 Henner Eisen lapb_data_indication() return status.
+ * 2000-11-14 Henner Eisen dev_hold/put, NETDEV_GOING_DOWN support
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/stat.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/lapb.h>
+#include <linux/init.h>
+
+#include <net/x25device.h>
+
+static char bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+/* If this number is made larger, check that the temporary string buffer
+ * in lapbeth_new_device is large enough to store the probe device name.*/
+#define MAXLAPBDEV 100
+
+struct lapbethdev {
+ struct list_head node;
+ struct net_device *ethdev; /* link to ethernet device */
+ struct net_device *axdev; /* lapbeth device (lapb#) */
+ struct net_device_stats stats; /* some statistics */
+};
+
+static struct list_head lapbeth_devices = LIST_HEAD_INIT(lapbeth_devices);
+
+/* ------------------------------------------------------------------------ */
+
+/*
+ * Get the LAPB device for the ethernet device
+ */
+static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
+{
+ struct lapbethdev *lapbeth;
+
+ list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+ if (lapbeth->ethdev == dev)
+ return lapbeth;
+ }
+ return NULL;
+}
+
+static __inline__ int dev_is_ethdev(struct net_device *dev)
+{
+ return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+}
+
+/* ------------------------------------------------------------------------ */
+
+/*
+ * Receive a LAPB frame via an ethernet interface.
+ */
+static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype)
+{
+ int len, err;
+ struct lapbethdev *lapbeth;
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ return NET_RX_DROP;
+
+ if (!pskb_may_pull(skb, 2))
+ goto drop;
+
+ rcu_read_lock();
+ lapbeth = lapbeth_get_x25_dev(dev);
+ if (!lapbeth)
+ goto drop_unlock;
+ if (!netif_running(lapbeth->axdev))
+ goto drop_unlock;
+
+ lapbeth->stats.rx_packets++;
+
+ len = skb->data[0] + skb->data[1] * 256;
+ lapbeth->stats.rx_bytes += len;
+
+ skb_pull(skb, 2); /* Remove the length bytes */
+ skb_trim(skb, len); /* Set the length of the data */
+
+ if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) {
+ printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err);
+ goto drop_unlock;
+ }
+out:
+ rcu_read_unlock();
+ return 0;
+drop_unlock:
+ kfree_skb(skb);
+ goto out;
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
+{
+ unsigned char *ptr;
+
+ skb_push(skb, 1);
+
+ if (skb_cow(skb, 1))
+ return NET_RX_DROP;
+
+ ptr = skb->data;
+ *ptr = 0x00;
+
+ skb->protocol = x25_type_trans(skb, dev);
+ skb->dev->last_rx = jiffies;
+ return netif_rx(skb);
+}
+
+/*
+ * Send a LAPB frame via an ethernet interface
+ */
+static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int err = -ENODEV;
+
+ /*
+ * Just to be *really* sure not to send anything if the interface
+ * is down, the ethernet device may have gone.
+ */
+ if (!netif_running(dev)) {
+ goto drop;
+ }
+
+ switch (skb->data[0]) {
+ case 0x00:
+ err = 0;
+ break;
+ case 0x01:
+ if ((err = lapb_connect_request(dev)) != LAPB_OK)
+ printk(KERN_ERR "lapbeth: lapb_connect_request "
+ "error: %d\n", err);
+ goto drop_ok;
+ case 0x02:
+ if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
+ printk(KERN_ERR "lapbeth: lapb_disconnect_request "
+ "err: %d\n", err);
+ /* Fall thru */
+ default:
+ goto drop_ok;
+ }
+
+ skb_pull(skb, 1);
+
+ if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
+ printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
+ err = -ENOMEM;
+ goto drop;
+ }
+ err = 0;
+out:
+ return err;
+drop_ok:
+ err = 0;
+drop:
+ kfree_skb(skb);
+ goto out;
+}
+
+static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
+{
+ struct lapbethdev *lapbeth = netdev_priv(ndev);
+ unsigned char *ptr;
+ struct net_device *dev;
+ int size = skb->len;
+
+ skb->protocol = htons(ETH_P_X25);
+
+ ptr = skb_push(skb, 2);
+
+ *ptr++ = size % 256;
+ *ptr++ = size / 256;
+
+ lapbeth->stats.tx_packets++;
+ lapbeth->stats.tx_bytes += size;
+
+ skb->dev = dev = lapbeth->ethdev;
+
+ dev->hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
+
+ dev_queue_xmit(skb);
+}
+
+static void lapbeth_connected(struct net_device *dev, int reason)
+{
+ unsigned char *ptr;
+ struct sk_buff *skb = dev_alloc_skb(1);
+
+ if (!skb) {
+ printk(KERN_ERR "lapbeth: out of memory\n");
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = 0x01;
+
+ skb->protocol = x25_type_trans(skb, dev);
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+}
+
+static void lapbeth_disconnected(struct net_device *dev, int reason)
+{
+ unsigned char *ptr;
+ struct sk_buff *skb = dev_alloc_skb(1);
+
+ if (!skb) {
+ printk(KERN_ERR "lapbeth: out of memory\n");
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = 0x02;
+
+ skb->protocol = x25_type_trans(skb, dev);
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+}
+
+/*
+ * Statistics
+ */
+static struct net_device_stats *lapbeth_get_stats(struct net_device *dev)
+{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
+ return &lapbeth->stats;
+}
+
+/*
+ * Set AX.25 callsign
+ */
+static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ return 0;
+}
+
+
+static struct lapb_register_struct lapbeth_callbacks = {
+ .connect_confirmation = lapbeth_connected,
+ .connect_indication = lapbeth_connected,
+ .disconnect_confirmation = lapbeth_disconnected,
+ .disconnect_indication = lapbeth_disconnected,
+ .data_indication = lapbeth_data_indication,
+ .data_transmit = lapbeth_data_transmit,
+
+};
+
+/*
+ * open/close a device
+ */
+static int lapbeth_open(struct net_device *dev)
+{
+ int err;
+
+ if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
+ printk(KERN_ERR "lapbeth: lapb_register error - %d\n", err);
+ return -ENODEV;
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int lapbeth_close(struct net_device *dev)
+{
+ int err;
+
+ netif_stop_queue(dev);
+
+ if ((err = lapb_unregister(dev)) != LAPB_OK)
+ printk(KERN_ERR "lapbeth: lapb_unregister error - %d\n", err);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void lapbeth_setup(struct net_device *dev)
+{
+ dev->hard_start_xmit = lapbeth_xmit;
+ dev->open = lapbeth_open;
+ dev->stop = lapbeth_close;
+ dev->destructor = free_netdev;
+ dev->set_mac_address = lapbeth_set_mac_address;
+ dev->get_stats = lapbeth_get_stats;
+ dev->type = ARPHRD_X25;
+ dev->hard_header_len = 3;
+ dev->mtu = 1000;
+ dev->addr_len = 0;
+ SET_MODULE_OWNER(dev);
+}
+
+/*
+ * Setup a new device.
+ */
+static int lapbeth_new_device(struct net_device *dev)
+{
+ struct net_device *ndev;
+ struct lapbethdev *lapbeth;
+ int rc = -ENOMEM;
+
+ ASSERT_RTNL();
+
+ ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d",
+ lapbeth_setup);
+ if (!ndev)
+ goto out;
+
+ lapbeth = netdev_priv(ndev);
+ lapbeth->axdev = ndev;
+
+ dev_hold(dev);
+ lapbeth->ethdev = dev;
+
+ rc = dev_alloc_name(ndev, ndev->name);
+ if (rc < 0)
+ goto fail;
+
+ rc = -EIO;
+ if (register_netdevice(ndev))
+ goto fail;
+
+ list_add_rcu(&lapbeth->node, &lapbeth_devices);
+ rc = 0;
+out:
+ return rc;
+fail:
+ dev_put(dev);
+ free_netdev(ndev);
+ kfree(lapbeth);
+ goto out;
+}
+
+/*
+ * Free a lapb network device.
+ */
+static void lapbeth_free_device(struct lapbethdev *lapbeth)
+{
+ dev_put(lapbeth->ethdev);
+ list_del_rcu(&lapbeth->node);
+ unregister_netdevice(lapbeth->axdev);
+}
+
+/*
+ * Handle device status changes.
+ *
+ * Called from notifier with RTNL held.
+ */
+static int lapbeth_device_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct lapbethdev *lapbeth;
+ struct net_device *dev = ptr;
+
+ if (!dev_is_ethdev(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ /* New ethernet device -> new LAPB interface */
+ if (lapbeth_get_x25_dev(dev) == NULL)
+ lapbeth_new_device(dev);
+ break;
+ case NETDEV_DOWN:
+ /* ethernet device closed -> close LAPB interface */
+ lapbeth = lapbeth_get_x25_dev(dev);
+ if (lapbeth)
+ dev_close(lapbeth->axdev);
+ break;
+ case NETDEV_UNREGISTER:
+ /* ethernet device disappears -> remove LAPB interface */
+ lapbeth = lapbeth_get_x25_dev(dev);
+ if (lapbeth)
+ lapbeth_free_device(lapbeth);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct packet_type lapbeth_packet_type = {
+ .type = __constant_htons(ETH_P_DEC),
+ .func = lapbeth_rcv,
+};
+
+static struct notifier_block lapbeth_dev_notifier = {
+ .notifier_call = lapbeth_device_event,
+};
+
+static char banner[] __initdata = KERN_INFO "LAPB Ethernet driver version 0.02\n";
+
+static int __init lapbeth_init_driver(void)
+{
+ dev_add_pack(&lapbeth_packet_type);
+
+ register_netdevice_notifier(&lapbeth_dev_notifier);
+
+ printk(banner);
+
+ return 0;
+}
+module_init(lapbeth_init_driver);
+
+static void __exit lapbeth_cleanup_driver(void)
+{
+ struct lapbethdev *lapbeth;
+ struct list_head *entry, *tmp;
+
+ dev_remove_pack(&lapbeth_packet_type);
+ unregister_netdevice_notifier(&lapbeth_dev_notifier);
+
+ rtnl_lock();
+ list_for_each_safe(entry, tmp, &lapbeth_devices) {
+ lapbeth = list_entry(entry, struct lapbethdev, node);
+
+ unregister_netdevice(lapbeth->axdev);
+ }
+ rtnl_unlock();
+}
+module_exit(lapbeth_cleanup_driver);
+
+MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
+MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
new file mode 100644
index 000000000000..dabdcfed4efd
--- /dev/null
+++ b/drivers/net/wan/lmc/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the Lan Media 21140 based WAN cards
+# Specifically the 1000,1200,5200,5245
+#
+
+obj-$(CONFIG_LANMEDIA) += lmc.o
+
+lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
+
+# Like above except every packet gets echoed to KERN_DEBUG
+# in hex
+#
+# DBDEF = \
+# -DDEBUG \
+# -DLMC_PACKET_LOG
+
+EXTRA_CFLAGS += -I. $(DBGDEF)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
new file mode 100644
index 000000000000..882e58c1bfd7
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc.h
@@ -0,0 +1,33 @@
+#ifndef _LMC_H_
+#define _LMC_H_
+
+#include "lmc_var.h"
+
+/*
+ * prototypes for everyone
+ */
+int lmc_probe(struct net_device * dev);
+unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
+ devaddr, unsigned regno);
+void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
+ unsigned regno, unsigned data);
+void lmc_led_on(lmc_softc_t * const, u_int32_t);
+void lmc_led_off(lmc_softc_t * const, u_int32_t);
+unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
+void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
+void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits);
+void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits);
+
+int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+extern lmc_media_t lmc_ds3_media;
+extern lmc_media_t lmc_ssi_media;
+extern lmc_media_t lmc_t1_media;
+extern lmc_media_t lmc_hssi_media;
+
+#ifdef _DBG_EVENTLOG
+static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 );
+#endif
+
+#endif
+
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
new file mode 100644
index 000000000000..9dccd9546a17
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -0,0 +1,85 @@
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include "lmc_debug.h"
+
+/*
+ * Prints out len, max to 80 octets using printk, 20 per line
+ */
+void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
+{
+#ifdef DEBUG
+#ifdef LMC_PACKET_LOG
+ int iNewLine = 1;
+ char str[80], *pstr;
+
+ sprintf(str, KERN_DEBUG "lmc: %s: ", type);
+ pstr = str+strlen(str);
+
+ if(iLen > 240){
+ printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
+ iLen = 240;
+ }
+ else{
+ printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
+ }
+
+ while(iLen > 0)
+ {
+ sprintf(pstr, "%02x ", *ucData);
+ pstr+=3;
+ ucData++;
+ if( !(iNewLine % 20))
+ {
+ sprintf(pstr, "\n");
+ printk(str);
+ sprintf(str, KERN_DEBUG "lmc: %s: ", type);
+ pstr=str+strlen(str);
+ }
+ iNewLine++;
+ iLen--;
+ }
+ sprintf(pstr, "\n");
+ printk(str);
+#endif
+#endif
+}
+
+#ifdef DEBUG
+u_int32_t lmcEventLogIndex = 0;
+u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
+#endif
+
+void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3)
+{
+#ifdef DEBUG
+ lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
+ lmcEventLogBuf[lmcEventLogIndex++] = arg2;
+ lmcEventLogBuf[lmcEventLogIndex++] = arg3;
+ lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
+
+ lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
+#endif
+}
+
+void lmc_trace(struct net_device *dev, char *msg){
+#ifdef LMC_TRACE
+ unsigned long j = jiffies + 3; /* Wait for 50 ms */
+
+ if(in_interrupt()){
+ printk("%s: * %s\n", dev->name, msg);
+// while(time_before(jiffies, j+10))
+// ;
+ }
+ else {
+ printk("%s: %s\n", dev->name, msg);
+ while(time_before(jiffies, j))
+ schedule();
+ }
+#endif
+}
+
+
+/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
new file mode 100644
index 000000000000..cf3563859bf3
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -0,0 +1,52 @@
+#ifndef _LMC_DEBUG_H_
+#define _LMC_DEBUG_H_
+
+#ifdef DEBUG
+#ifdef LMC_PACKET_LOG
+#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z))
+#else
+#define LMC_CONSOLE_LOG(x,y,z)
+#endif
+#else
+#define LMC_CONSOLE_LOG(x,y,z)
+#endif
+
+
+
+/* Debug --- Event log definitions --- */
+/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */
+#define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */
+#define LMC_EVENTLOGARGS 4 /* number of args for each event */
+
+/* event indicators */
+#define LMC_EVENT_XMT 1
+#define LMC_EVENT_XMTEND 2
+#define LMC_EVENT_XMTINT 3
+#define LMC_EVENT_RCVINT 4
+#define LMC_EVENT_RCVEND 5
+#define LMC_EVENT_INT 6
+#define LMC_EVENT_XMTINTTMO 7
+#define LMC_EVENT_XMTPRCTMO 8
+#define LMC_EVENT_INTEND 9
+#define LMC_EVENT_RESET1 10
+#define LMC_EVENT_RESET2 11
+#define LMC_EVENT_FORCEDRESET 12
+#define LMC_EVENT_WATCHDOG 13
+#define LMC_EVENT_BADPKTSURGE 14
+#define LMC_EVENT_TBUSY0 15
+#define LMC_EVENT_TBUSY1 16
+
+
+#ifdef DEBUG
+extern u_int32_t lmcEventLogIndex;
+extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
+#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
+#else
+#define LMC_EVENT_LOG(x,y,z)
+#endif /* end ifdef _DBG_EVENTLOG */
+
+void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
+void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3);
+void lmc_trace(struct net_device *dev, char *msg);
+
+#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
new file mode 100644
index 000000000000..57dd861cd3db
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_ioctl.h
@@ -0,0 +1,257 @@
+#ifndef _LMC_IOCTL_H_
+#define _LMC_IOCTL_H_
+/* $Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $ */
+
+ /*
+ * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+ * All rights reserved. www.lanmedia.com
+ *
+ * This code is written by:
+ * Andrew Stanley-Jones (asj@cban.com)
+ * Rob Braun (bbraun@vix.com),
+ * Michael Graff (explorer@vix.com) and
+ * Matt Thomas (matt@3am-software.com).
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License version 2, incorporated herein by reference.
+ */
+
+#define LMCIOCGINFO SIOCDEVPRIVATE+3 /* get current state */
+#define LMCIOCSINFO SIOCDEVPRIVATE+4 /* set state to user values */
+#define LMCIOCGETLMCSTATS SIOCDEVPRIVATE+5
+#define LMCIOCCLEARLMCSTATS SIOCDEVPRIVATE+6
+#define LMCIOCDUMPEVENTLOG SIOCDEVPRIVATE+7
+#define LMCIOCGETXINFO SIOCDEVPRIVATE+8
+#define LMCIOCSETCIRCUIT SIOCDEVPRIVATE+9
+#define LMCIOCUNUSEDATM SIOCDEVPRIVATE+10
+#define LMCIOCRESET SIOCDEVPRIVATE+11
+#define LMCIOCT1CONTROL SIOCDEVPRIVATE+12
+#define LMCIOCIFTYPE SIOCDEVPRIVATE+13
+#define LMCIOCXILINX SIOCDEVPRIVATE+14
+
+#define LMC_CARDTYPE_UNKNOWN -1
+#define LMC_CARDTYPE_HSSI 1 /* probed card is a HSSI card */
+#define LMC_CARDTYPE_DS3 2 /* probed card is a DS3 card */
+#define LMC_CARDTYPE_SSI 3 /* probed card is a SSI card */
+#define LMC_CARDTYPE_T1 4 /* probed card is a T1 card */
+
+#define LMC_CTL_CARDTYPE_LMC5200 0 /* HSSI */
+#define LMC_CTL_CARDTYPE_LMC5245 1 /* DS3 */
+#define LMC_CTL_CARDTYPE_LMC1000 2 /* SSI, V.35 */
+#define LMC_CTL_CARDTYPE_LMC1200 3 /* DS1 */
+
+#define LMC_CTL_OFF 0 /* generic OFF value */
+#define LMC_CTL_ON 1 /* generic ON value */
+
+#define LMC_CTL_CLOCK_SOURCE_EXT 0 /* clock off line */
+#define LMC_CTL_CLOCK_SOURCE_INT 1 /* internal clock */
+
+#define LMC_CTL_CRC_LENGTH_16 16
+#define LMC_CTL_CRC_LENGTH_32 32
+#define LMC_CTL_CRC_BYTESIZE_2 2
+#define LMC_CTL_CRC_BYTESIZE_4 4
+
+
+#define LMC_CTL_CABLE_LENGTH_LT_100FT 0 /* DS3 cable < 100 feet */
+#define LMC_CTL_CABLE_LENGTH_GT_100FT 1 /* DS3 cable >= 100 feet */
+
+#define LMC_CTL_CIRCUIT_TYPE_E1 0
+#define LMC_CTL_CIRCUIT_TYPE_T1 1
+
+/*
+ * IFTYPE defines
+ */
+#define LMC_PPP 1 /* use sppp interface */
+#define LMC_NET 2 /* use direct net interface */
+#define LMC_RAW 3 /* use direct net interface */
+
+/*
+ * These are not in the least IOCTL related, but I want them common.
+ */
+/*
+ * assignments for the GPIO register on the DEC chip (common)
+ */
+#define LMC_GEP_INIT 0x01 /* 0: */
+#define LMC_GEP_RESET 0x02 /* 1: */
+#define LMC_GEP_MODE 0x10 /* 4: */
+#define LMC_GEP_DP 0x20 /* 5: */
+#define LMC_GEP_DATA 0x40 /* 6: serial out */
+#define LMC_GEP_CLK 0x80 /* 7: serial clock */
+
+/*
+ * HSSI GPIO assignments
+ */
+#define LMC_GEP_HSSI_ST 0x04 /* 2: receive timing sense (deprecated) */
+#define LMC_GEP_HSSI_CLOCK 0x08 /* 3: clock source */
+
+/*
+ * T1 GPIO assignments
+ */
+#define LMC_GEP_SSI_GENERATOR 0x04 /* 2: enable prog freq gen serial i/f */
+#define LMC_GEP_SSI_TXCLOCK 0x08 /* 3: provide clock on TXCLOCK output */
+
+/*
+ * Common MII16 bits
+ */
+#define LMC_MII16_LED0 0x0080
+#define LMC_MII16_LED1 0x0100
+#define LMC_MII16_LED2 0x0200
+#define LMC_MII16_LED3 0x0400 /* Error, and the red one */
+#define LMC_MII16_LED_ALL 0x0780 /* LED bit mask */
+#define LMC_MII16_FIFO_RESET 0x0800
+
+/*
+ * definitions for HSSI
+ */
+#define LMC_MII16_HSSI_TA 0x0001
+#define LMC_MII16_HSSI_CA 0x0002
+#define LMC_MII16_HSSI_LA 0x0004
+#define LMC_MII16_HSSI_LB 0x0008
+#define LMC_MII16_HSSI_LC 0x0010
+#define LMC_MII16_HSSI_TM 0x0020
+#define LMC_MII16_HSSI_CRC 0x0040
+
+/*
+ * assignments for the MII register 16 (DS3)
+ */
+#define LMC_MII16_DS3_ZERO 0x0001
+#define LMC_MII16_DS3_TRLBK 0x0002
+#define LMC_MII16_DS3_LNLBK 0x0004
+#define LMC_MII16_DS3_RAIS 0x0008
+#define LMC_MII16_DS3_TAIS 0x0010
+#define LMC_MII16_DS3_BIST 0x0020
+#define LMC_MII16_DS3_DLOS 0x0040
+#define LMC_MII16_DS3_CRC 0x1000
+#define LMC_MII16_DS3_SCRAM 0x2000
+#define LMC_MII16_DS3_SCRAM_LARS 0x4000
+
+/* Note: 2 pairs of LEDs where swapped by mistake
+ * in Xilinx code for DS3 & DS1 adapters */
+#define LMC_DS3_LED0 0x0100 /* bit 08 yellow */
+#define LMC_DS3_LED1 0x0080 /* bit 07 blue */
+#define LMC_DS3_LED2 0x0400 /* bit 10 green */
+#define LMC_DS3_LED3 0x0200 /* bit 09 red */
+
+/*
+ * framer register 0 and 7 (7 is latched and reset on read)
+ */
+#define LMC_FRAMER_REG0_DLOS 0x80 /* digital loss of service */
+#define LMC_FRAMER_REG0_OOFS 0x40 /* out of frame sync */
+#define LMC_FRAMER_REG0_AIS 0x20 /* alarm indication signal */
+#define LMC_FRAMER_REG0_CIS 0x10 /* channel idle */
+#define LMC_FRAMER_REG0_LOC 0x08 /* loss of clock */
+
+/*
+ * Framer register 9 contains the blue alarm signal
+ */
+#define LMC_FRAMER_REG9_RBLUE 0x02 /* Blue alarm failure */
+
+/*
+ * Framer register 0x10 contains xbit error
+ */
+#define LMC_FRAMER_REG10_XBIT 0x01 /* X bit error alarm failure */
+
+/*
+ * And SSI, LMC1000
+ */
+#define LMC_MII16_SSI_DTR 0x0001 /* DTR output RW */
+#define LMC_MII16_SSI_DSR 0x0002 /* DSR input RO */
+#define LMC_MII16_SSI_RTS 0x0004 /* RTS output RW */
+#define LMC_MII16_SSI_CTS 0x0008 /* CTS input RO */
+#define LMC_MII16_SSI_DCD 0x0010 /* DCD input RO */
+#define LMC_MII16_SSI_RI 0x0020 /* RI input RO */
+#define LMC_MII16_SSI_CRC 0x1000 /* CRC select - RW */
+
+/*
+ * bits 0x0080 through 0x0800 are generic, and described
+ * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET
+ */
+#define LMC_MII16_SSI_LL 0x1000 /* LL output RW */
+#define LMC_MII16_SSI_RL 0x2000 /* RL output RW */
+#define LMC_MII16_SSI_TM 0x4000 /* TM input RO */
+#define LMC_MII16_SSI_LOOP 0x8000 /* loopback enable RW */
+
+/*
+ * Some of the MII16 bits are mirrored in the MII17 register as well,
+ * but let's keep thing separate for now, and get only the cable from
+ * the MII17.
+ */
+#define LMC_MII17_SSI_CABLE_MASK 0x0038 /* mask to extract the cable type */
+#define LMC_MII17_SSI_CABLE_SHIFT 3 /* shift to extract the cable type */
+
+/*
+ * And T1, LMC1200
+ */
+#define LMC_MII16_T1_UNUSED1 0x0003
+#define LMC_MII16_T1_XOE 0x0004
+#define LMC_MII16_T1_RST 0x0008 /* T1 chip reset - RW */
+#define LMC_MII16_T1_Z 0x0010 /* output impedance T1=1, E1=0 output - RW */
+#define LMC_MII16_T1_INTR 0x0020 /* interrupt from 8370 - RO */
+#define LMC_MII16_T1_ONESEC 0x0040 /* one second square wave - ro */
+
+#define LMC_MII16_T1_LED0 0x0100
+#define LMC_MII16_T1_LED1 0x0080
+#define LMC_MII16_T1_LED2 0x0400
+#define LMC_MII16_T1_LED3 0x0200
+#define LMC_MII16_T1_FIFO_RESET 0x0800
+
+#define LMC_MII16_T1_CRC 0x1000 /* CRC select - RW */
+#define LMC_MII16_T1_UNUSED2 0xe000
+
+
+/* 8370 framer registers */
+
+#define T1FRAMER_ALARM1_STATUS 0x47
+#define T1FRAMER_ALARM2_STATUS 0x48
+#define T1FRAMER_FERR_LSB 0x50
+#define T1FRAMER_FERR_MSB 0x51 /* framing bit error counter */
+#define T1FRAMER_LCV_LSB 0x54
+#define T1FRAMER_LCV_MSB 0x55 /* line code violation counter */
+#define T1FRAMER_AERR 0x5A
+
+/* mask for the above AERR register */
+#define T1FRAMER_LOF_MASK (0x0f0) /* receive loss of frame */
+#define T1FRAMER_COFA_MASK (0x0c0) /* change of frame alignment */
+#define T1FRAMER_SEF_MASK (0x03) /* severely errored frame */
+
+/* 8370 framer register ALM1 (0x47) values
+ * used to determine link status
+ */
+
+#define T1F_SIGFRZ 0x01 /* signaling freeze */
+#define T1F_RLOF 0x02 /* receive loss of frame alignment */
+#define T1F_RLOS 0x04 /* receive loss of signal */
+#define T1F_RALOS 0x08 /* receive analog loss of signal or RCKI loss of clock */
+#define T1F_RAIS 0x10 /* receive alarm indication signal */
+#define T1F_UNUSED 0x20
+#define T1F_RYEL 0x40 /* receive yellow alarm */
+#define T1F_RMYEL 0x80 /* receive multiframe yellow alarm */
+
+#define LMC_T1F_WRITE 0
+#define LMC_T1F_READ 1
+
+typedef struct lmc_st1f_control {
+ int command;
+ int address;
+ int value;
+ char __user *data;
+} lmc_t1f_control;
+
+enum lmc_xilinx_c {
+ lmc_xilinx_reset = 1,
+ lmc_xilinx_load_prom = 2,
+ lmc_xilinx_load = 3
+};
+
+struct lmc_xilinx_control {
+ enum lmc_xilinx_c command;
+ int len;
+ char __user *data;
+};
+
+/* ------------------ end T1 defs ------------------- */
+
+#define LMC_MII_LedMask 0x0780
+#define LMC_MII_LedBitPos 7
+
+#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
new file mode 100644
index 000000000000..15e545f66cd7
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -0,0 +1,2201 @@
+ /*
+ * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+ * All rights reserved. www.lanmedia.com
+ *
+ * This code is written by:
+ * Andrew Stanley-Jones (asj@cban.com)
+ * Rob Braun (bbraun@vix.com),
+ * Michael Graff (explorer@vix.com) and
+ * Matt Thomas (matt@3am-software.com).
+ *
+ * With Help By:
+ * David Boggs
+ * Ron Crane
+ * Alan Cox
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License version 2, incorporated herein by reference.
+ *
+ * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
+ *
+ * To control link specific options lmcctl is required.
+ * It can be obtained from ftp.lanmedia.com.
+ *
+ * Linux driver notes:
+ * Linux uses the device struct lmc_private to pass private information
+ * arround.
+ *
+ * The initialization portion of this driver (the lmc_reset() and the
+ * lmc_dec_reset() functions, as well as the led controls and the
+ * lmc_initcsrs() functions.
+ *
+ * The watchdog function runs every second and checks to see if
+ * we still have link, and that the timing source is what we expected
+ * it to be. If link is lost, the interface is marked down, and
+ * we no longer can transmit.
+ *
+ */
+
+/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/bitops.h>
+
+#include <net/syncppp.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+//#include <asm/spinlock.h>
+
+#define DRIVER_MAJOR_VERSION 1
+#define DRIVER_MINOR_VERSION 34
+#define DRIVER_SUB_VERSION 0
+
+#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
+
+#include "lmc.h"
+#include "lmc_var.h"
+#include "lmc_ioctl.h"
+#include "lmc_debug.h"
+#include "lmc_proto.h"
+
+static int lmc_first_load = 0;
+
+static int LMC_PKT_BUF_SZ = 1542;
+
+static struct pci_device_id lmc_pci_tbl[] = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
+ PCI_VENDOR_ID_LMC, PCI_ANY_ID },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
+ PCI_ANY_ID, PCI_VENDOR_ID_LMC },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
+MODULE_LICENSE("GPL");
+
+
+static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int lmc_rx (struct net_device *dev);
+static int lmc_open(struct net_device *dev);
+static int lmc_close(struct net_device *dev);
+static struct net_device_stats *lmc_get_stats(struct net_device *dev);
+static irqreturn_t lmc_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
+static void lmc_softreset(lmc_softc_t * const);
+static void lmc_running_reset(struct net_device *dev);
+static int lmc_ifdown(struct net_device * const);
+static void lmc_watchdog(unsigned long data);
+static void lmc_reset(lmc_softc_t * const sc);
+static void lmc_dec_reset(lmc_softc_t * const sc);
+static void lmc_driver_timeout(struct net_device *dev);
+
+/*
+ * linux reserves 16 device specific IOCTLs. We call them
+ * LMCIOC* to control various bits of our world.
+ */
+int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
+{
+ lmc_softc_t *sc;
+ lmc_ctl_t ctl;
+ int ret;
+ u_int16_t regVal;
+ unsigned long flags;
+
+ struct sppp *sp;
+
+ ret = -EOPNOTSUPP;
+
+ sc = dev->priv;
+
+ lmc_trace(dev, "lmc_ioctl in");
+
+ /*
+ * Most functions mess with the structure
+ * Disable interrupts while we do the polling
+ */
+ spin_lock_irqsave(&sc->lmc_lock, flags);
+
+ switch (cmd) {
+ /*
+ * Return current driver state. Since we keep this up
+ * To date internally, just copy this out to the user.
+ */
+ case LMCIOCGINFO: /*fold01*/
+ if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof (lmc_ctl_t)))
+ return -EFAULT;
+ ret = 0;
+ break;
+
+ case LMCIOCSINFO: /*fold01*/
+ sp = &((struct ppp_device *) dev)->sppp;
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ if(dev->flags & IFF_UP){
+ ret = -EBUSY;
+ break;
+ }
+
+ if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t)))
+ return -EFAULT;
+
+ sc->lmc_media->set_status (sc, &ctl);
+
+ if(ctl.crc_length != sc->ictl.crc_length) {
+ sc->lmc_media->set_crc_length(sc, ctl.crc_length);
+ if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
+ sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
+ else
+ sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
+ }
+
+ if (ctl.keepalive_onoff == LMC_CTL_OFF)
+ sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */
+ else
+ sp->pp_flags |= PP_KEEPALIVE; /* Turn on */
+
+ ret = 0;
+ break;
+
+ case LMCIOCIFTYPE: /*fold01*/
+ {
+ u_int16_t old_type = sc->if_type;
+ u_int16_t new_type;
+
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t)))
+ return -EFAULT;
+
+
+ if (new_type == old_type)
+ {
+ ret = 0 ;
+ break; /* no change */
+ }
+
+ lmc_proto_close(sc);
+ lmc_proto_detach(sc);
+
+ sc->if_type = new_type;
+// lmc_proto_init(sc);
+ lmc_proto_attach(sc);
+ lmc_proto_open(sc);
+
+ ret = 0 ;
+ break ;
+ }
+
+ case LMCIOCGETXINFO: /*fold01*/
+ sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
+
+ sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
+ sc->lmc_xinfo.PciSlotNumber = 0;
+ sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
+ sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
+ sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
+ sc->lmc_xinfo.XilinxRevisionNumber =
+ lmc_mii_readreg (sc, 0, 3) & 0xf;
+ sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
+ sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
+ sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
+
+ sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
+
+ if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
+ sizeof (struct lmc_xinfo)))
+ return -EFAULT;
+ ret = 0;
+
+ break;
+
+ case LMCIOCGETLMCSTATS: /*fold01*/
+ if (sc->lmc_cardtype == LMC_CARDTYPE_T1){
+ lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB);
+ sc->stats.framingBitErrorCount +=
+ lmc_mii_readreg (sc, 0, 18) & 0xff;
+ lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB);
+ sc->stats.framingBitErrorCount +=
+ (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;
+ lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB);
+ sc->stats.lineCodeViolationCount +=
+ lmc_mii_readreg (sc, 0, 18) & 0xff;
+ lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB);
+ sc->stats.lineCodeViolationCount +=
+ (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;
+ lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR);
+ regVal = lmc_mii_readreg (sc, 0, 18) & 0xff;
+
+ sc->stats.lossOfFrameCount +=
+ (regVal & T1FRAMER_LOF_MASK) >> 4;
+ sc->stats.changeOfFrameAlignmentCount +=
+ (regVal & T1FRAMER_COFA_MASK) >> 2;
+ sc->stats.severelyErroredFrameCount +=
+ regVal & T1FRAMER_SEF_MASK;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &sc->stats,
+ sizeof (struct lmc_statistics)))
+ return -EFAULT;
+
+ ret = 0;
+ break;
+
+ case LMCIOCCLEARLMCSTATS: /*fold01*/
+ if (!capable(CAP_NET_ADMIN)){
+ ret = -EPERM;
+ break;
+ }
+
+ memset (&sc->stats, 0, sizeof (struct lmc_statistics));
+ sc->stats.check = STATCHECK;
+ sc->stats.version_size = (DRIVER_VERSION << 16) +
+ sizeof (struct lmc_statistics);
+ sc->stats.lmc_cardtype = sc->lmc_cardtype;
+ ret = 0;
+ break;
+
+ case LMCIOCSETCIRCUIT: /*fold01*/
+ if (!capable(CAP_NET_ADMIN)){
+ ret = -EPERM;
+ break;
+ }
+
+ if(dev->flags & IFF_UP){
+ ret = -EBUSY;
+ break;
+ }
+
+ if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t)))
+ return -EFAULT;
+ sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
+ sc->ictl.circuit_type = ctl.circuit_type;
+ ret = 0;
+
+ break;
+
+ case LMCIOCRESET: /*fold01*/
+ if (!capable(CAP_NET_ADMIN)){
+ ret = -EPERM;
+ break;
+ }
+
+ /* Reset driver and bring back to current state */
+ printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
+ lmc_running_reset (dev);
+ printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
+
+ LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
+
+ ret = 0;
+ break;
+
+#ifdef DEBUG
+ case LMCIOCDUMPEVENTLOG:
+ if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof (u32)))
+ return -EFAULT;
+ if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf)))
+ return -EFAULT;
+
+ ret = 0;
+ break;
+#endif /* end ifdef _DBG_EVENTLOG */
+ case LMCIOCT1CONTROL: /*fold01*/
+ if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ case LMCIOCXILINX: /*fold01*/
+ {
+ struct lmc_xilinx_control xc; /*fold02*/
+
+ if (!capable(CAP_NET_ADMIN)){
+ ret = -EPERM;
+ break;
+ }
+
+ /*
+ * Stop the xwitter whlie we restart the hardware
+ */
+ netif_stop_queue(dev);
+
+ if (copy_from_user(&xc, ifr->ifr_data, sizeof (struct lmc_xilinx_control)))
+ return -EFAULT;
+ switch(xc.command){
+ case lmc_xilinx_reset: /*fold02*/
+ {
+ u16 mii;
+ mii = lmc_mii_readreg (sc, 0, 16);
+
+ /*
+ * Make all of them 0 and make input
+ */
+ lmc_gpio_mkinput(sc, 0xff);
+
+ /*
+ * make the reset output
+ */
+ lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
+
+ /*
+ * RESET low to force configuration. This also forces
+ * the transmitter clock to be internal, but we expect to reset
+ * that later anyway.
+ */
+
+ sc->lmc_gpio &= ~LMC_GEP_RESET;
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+
+ /*
+ * hold for more than 10 microseconds
+ */
+ udelay(50);
+
+ sc->lmc_gpio |= LMC_GEP_RESET;
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+
+ /*
+ * stop driving Xilinx-related signals
+ */
+ lmc_gpio_mkinput(sc, 0xff);
+
+ /* Reset the frammer hardware */
+ sc->lmc_media->set_link_status (sc, 1);
+ sc->lmc_media->set_status (sc, NULL);
+// lmc_softreset(sc);
+
+ {
+ int i;
+ for(i = 0; i < 5; i++){
+ lmc_led_on(sc, LMC_DS3_LED0);
+ mdelay(100);
+ lmc_led_off(sc, LMC_DS3_LED0);
+ lmc_led_on(sc, LMC_DS3_LED1);
+ mdelay(100);
+ lmc_led_off(sc, LMC_DS3_LED1);
+ lmc_led_on(sc, LMC_DS3_LED3);
+ mdelay(100);
+ lmc_led_off(sc, LMC_DS3_LED3);
+ lmc_led_on(sc, LMC_DS3_LED2);
+ mdelay(100);
+ lmc_led_off(sc, LMC_DS3_LED2);
+ }
+ }
+
+
+
+ ret = 0x0;
+
+ }
+
+ break;
+ case lmc_xilinx_load_prom: /*fold02*/
+ {
+ u16 mii;
+ int timeout = 500000;
+ mii = lmc_mii_readreg (sc, 0, 16);
+
+ /*
+ * Make all of them 0 and make input
+ */
+ lmc_gpio_mkinput(sc, 0xff);
+
+ /*
+ * make the reset output
+ */
+ lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET);
+
+ /*
+ * RESET low to force configuration. This also forces
+ * the transmitter clock to be internal, but we expect to reset
+ * that later anyway.
+ */
+
+ sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+
+ /*
+ * hold for more than 10 microseconds
+ */
+ udelay(50);
+
+ sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+ /*
+ * busy wait for the chip to reset
+ */
+ while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
+ (timeout-- > 0))
+ ;
+
+
+ /*
+ * stop driving Xilinx-related signals
+ */
+ lmc_gpio_mkinput(sc, 0xff);
+
+ ret = 0x0;
+
+
+ break;
+
+ }
+
+ case lmc_xilinx_load: /*fold02*/
+ {
+ char *data;
+ int pos;
+ int timeout = 500000;
+
+ if(xc.data == 0x0){
+ ret = -EINVAL;
+ break;
+ }
+
+ data = kmalloc(xc.len, GFP_KERNEL);
+ if(data == 0x0){
+ printk(KERN_WARNING "%s: Failed to allocate memory for copy\n", dev->name);
+ ret = -ENOMEM;
+ break;
+ }
+
+ if(copy_from_user(data, xc.data, xc.len))
+ {
+ kfree(data);
+ ret = -ENOMEM;
+ break;
+ }
+
+ printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
+
+ lmc_gpio_mkinput(sc, 0xff);
+
+ /*
+ * Clear the Xilinx and start prgramming from the DEC
+ */
+
+ /*
+ * Set ouput as:
+ * Reset: 0 (active)
+ * DP: 0 (active)
+ * Mode: 1
+ *
+ */
+ sc->lmc_gpio = 0x00;
+ sc->lmc_gpio &= ~LMC_GEP_DP;
+ sc->lmc_gpio &= ~LMC_GEP_RESET;
+ sc->lmc_gpio |= LMC_GEP_MODE;
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+ lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
+
+ /*
+ * Wait at least 10 us 20 to be safe
+ */
+ udelay(50);
+
+ /*
+ * Clear reset and activate programming lines
+ * Reset: Input
+ * DP: Input
+ * Clock: Output
+ * Data: Output
+ * Mode: Output
+ */
+ lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
+
+ /*
+ * Set LOAD, DATA, Clock to 1
+ */
+ sc->lmc_gpio = 0x00;
+ sc->lmc_gpio |= LMC_GEP_MODE;
+ sc->lmc_gpio |= LMC_GEP_DATA;
+ sc->lmc_gpio |= LMC_GEP_CLK;
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+ lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
+
+ /*
+ * busy wait for the chip to reset
+ */
+ while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
+ (timeout-- > 0))
+ ;
+
+ printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
+
+ for(pos = 0; pos < xc.len; pos++){
+ switch(data[pos]){
+ case 0:
+ sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
+ break;
+ case 1:
+ sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
+ break;
+ default:
+ printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
+ sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
+ }
+ sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
+ sc->lmc_gpio |= LMC_GEP_MODE;
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+ udelay(1);
+
+ sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
+ sc->lmc_gpio |= LMC_GEP_MODE;
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+ udelay(1);
+ }
+ if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
+ printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
+ }
+ else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
+ printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
+ }
+ else {
+ printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
+ }
+
+ lmc_gpio_mkinput(sc, 0xff);
+
+ sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
+ lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+
+ sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
+ lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+
+ kfree(data);
+
+ ret = 0;
+
+ break;
+ }
+ default: /*fold02*/
+ ret = -EBADE;
+ break;
+ }
+
+ netif_wake_queue(dev);
+ sc->lmc_txfull = 0;
+
+ }
+ break;
+ default: /*fold01*/
+ /* If we don't know what to do, give the protocol a shot. */
+ ret = lmc_proto_ioctl (sc, ifr, cmd);
+ break;
+ }
+
+ spin_unlock_irqrestore(&sc->lmc_lock, flags); /*fold01*/
+
+ lmc_trace(dev, "lmc_ioctl out");
+
+ return ret;
+}
+
+
+/* the watchdog process that cruises around */
+static void lmc_watchdog (unsigned long data) /*fold00*/
+{
+ struct net_device *dev = (struct net_device *) data;
+ lmc_softc_t *sc;
+ int link_status;
+ u_int32_t ticks;
+ unsigned long flags;
+
+ sc = dev->priv;
+
+ lmc_trace(dev, "lmc_watchdog in");
+
+ spin_lock_irqsave(&sc->lmc_lock, flags);
+
+ if(sc->check != 0xBEAFCAFE){
+ printk("LMC: Corrupt net_device stuct, breaking out\n");
+ spin_unlock_irqrestore(&sc->lmc_lock, flags);
+ return;
+ }
+
+
+ /* Make sure the tx jabber and rx watchdog are off,
+ * and the transmit and receive processes are running.
+ */
+
+ LMC_CSR_WRITE (sc, csr_15, 0x00000011);
+ sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
+ LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
+
+ if (sc->lmc_ok == 0)
+ goto kick_timer;
+
+ LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
+
+ /* --- begin time out check -----------------------------------
+ * check for a transmit interrupt timeout
+ * Has the packet xmt vs xmt serviced threshold been exceeded */
+ if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
+ sc->stats.tx_packets > sc->lasttx_packets &&
+ sc->tx_TimeoutInd == 0)
+ {
+
+ /* wait for the watchdog to come around again */
+ sc->tx_TimeoutInd = 1;
+ }
+ else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
+ sc->stats.tx_packets > sc->lasttx_packets &&
+ sc->tx_TimeoutInd)
+ {
+
+ LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
+
+ sc->tx_TimeoutDisplay = 1;
+ sc->stats.tx_TimeoutCnt++;
+
+ /* DEC chip is stuck, hit it with a RESET!!!! */
+ lmc_running_reset (dev);
+
+
+ /* look at receive & transmit process state to make sure they are running */
+ LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
+
+ /* look at: DSR - 02 for Reg 16
+ * CTS - 08
+ * DCD - 10
+ * RI - 20
+ * for Reg 17
+ */
+ LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
+
+ /* reset the transmit timeout detection flag */
+ sc->tx_TimeoutInd = 0;
+ sc->lastlmc_taint_tx = sc->lmc_taint_tx;
+ sc->lasttx_packets = sc->stats.tx_packets;
+ }
+ else
+ {
+ sc->tx_TimeoutInd = 0;
+ sc->lastlmc_taint_tx = sc->lmc_taint_tx;
+ sc->lasttx_packets = sc->stats.tx_packets;
+ }
+
+ /* --- end time out check ----------------------------------- */
+
+
+ link_status = sc->lmc_media->get_link_status (sc);
+
+ /*
+ * hardware level link lost, but the interface is marked as up.
+ * Mark it as down.
+ */
+ if ((link_status == 0) && (sc->last_link_status != 0)) {
+ printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
+ sc->last_link_status = 0;
+ /* lmc_reset (sc); Why reset??? The link can go down ok */
+
+ /* Inform the world that link has been lost */
+ dev->flags &= ~IFF_RUNNING;
+ }
+
+ /*
+ * hardware link is up, but the interface is marked as down.
+ * Bring it back up again.
+ */
+ if (link_status != 0 && sc->last_link_status == 0) {
+ printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
+ sc->last_link_status = 1;
+ /* lmc_reset (sc); Again why reset??? */
+
+ /* Inform the world that link protocol is back up. */
+ dev->flags |= IFF_RUNNING;
+
+ /* Now we have to tell the syncppp that we had an outage
+ * and that it should deal. Calling sppp_reopen here
+ * should do the trick, but we may have to call sppp_close
+ * when the link goes down, and call sppp_open here.
+ * Subject to more testing.
+ * --bbraun
+ */
+
+ lmc_proto_reopen(sc);
+
+ }
+
+ /* Call media specific watchdog functions */
+ sc->lmc_media->watchdog(sc);
+
+ /*
+ * Poke the transmitter to make sure it
+ * never stops, even if we run out of mem
+ */
+ LMC_CSR_WRITE(sc, csr_rxpoll, 0);
+
+ /*
+ * Check for code that failed
+ * and try and fix it as appropriate
+ */
+ if(sc->failed_ring == 1){
+ /*
+ * Failed to setup the recv/xmit rin
+ * Try again
+ */
+ sc->failed_ring = 0;
+ lmc_softreset(sc);
+ }
+ if(sc->failed_recv_alloc == 1){
+ /*
+ * We failed to alloc mem in the
+ * interrupt handler, go through the rings
+ * and rebuild them
+ */
+ sc->failed_recv_alloc = 0;
+ lmc_softreset(sc);
+ }
+
+
+ /*
+ * remember the timer value
+ */
+kick_timer:
+
+ ticks = LMC_CSR_READ (sc, csr_gp_timer);
+ LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
+ sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
+
+ /*
+ * restart this timer.
+ */
+ sc->timer.expires = jiffies + (HZ);
+ add_timer (&sc->timer);
+
+ spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+ lmc_trace(dev, "lmc_watchdog out");
+
+}
+
+static void lmc_setup(struct net_device * const dev) /*fold00*/
+{
+ lmc_trace(dev, "lmc_setup in");
+
+ dev->type = ARPHRD_HDLC;
+ dev->hard_start_xmit = lmc_start_xmit;
+ dev->open = lmc_open;
+ dev->stop = lmc_close;
+ dev->get_stats = lmc_get_stats;
+ dev->do_ioctl = lmc_ioctl;
+ dev->tx_timeout = lmc_driver_timeout;
+ dev->watchdog_timeo = (HZ); /* 1 second */
+
+ lmc_trace(dev, "lmc_setup out");
+}
+
+
+static int __devinit lmc_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ lmc_softc_t *sc;
+ u16 subdevice;
+ u_int16_t AdapModelNum;
+ int err = -ENOMEM;
+ static int cards_found;
+#ifndef GCOM
+ /* We name by type not by vendor */
+ static const char lmcname[] = "hdlc%d";
+#else
+ /*
+ * GCOM uses LMC vendor name so that clients can know which card
+ * to attach to.
+ */
+ static const char lmcname[] = "lmc%d";
+#endif
+
+
+ /*
+ * Allocate our own device structure
+ */
+ dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup);
+ if (!dev) {
+ printk (KERN_ERR "lmc:alloc_netdev for device failed\n");
+ goto out1;
+ }
+
+ lmc_trace(dev, "lmc_init_one in");
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "lmc: pci enable failed:%d\n", err);
+ goto out2;
+ }
+
+ if (pci_request_regions(pdev, "lmc")) {
+ printk(KERN_ERR "lmc: pci_request_region failed\n");
+ err = -EIO;
+ goto out3;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ if(lmc_first_load == 0){
+ printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n",
+ DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION);
+ lmc_first_load = 1;
+ }
+
+ sc = dev->priv;
+ sc->lmc_device = dev;
+ sc->name = dev->name;
+
+ /* Initialize the sppp layer */
+ /* An ioctl can cause a subsequent detach for raw frame interface */
+ sc->if_type = LMC_PPP;
+ sc->check = 0xBEAFCAFE;
+ dev->base_addr = pci_resource_start(pdev, 0);
+ dev->irq = pdev->irq;
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /*
+ * This will get the protocol layer ready and do any 1 time init's
+ * Must have a valid sc and dev structure
+ */
+ lmc_proto_init(sc);
+
+ lmc_proto_attach(sc);
+
+ /*
+ * Why were we changing this???
+ dev->tx_queue_len = 100;
+ */
+
+ /* Init the spin lock so can call it latter */
+
+ spin_lock_init(&sc->lmc_lock);
+ pci_set_master(pdev);
+
+ printk ("%s: detected at %lx, irq %d\n", dev->name,
+ dev->base_addr, dev->irq);
+
+ if (register_netdev (dev) != 0) {
+ printk (KERN_ERR "%s: register_netdev failed.\n", dev->name);
+ goto out4;
+ }
+
+ sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
+ sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
+
+ /*
+ *
+ * Check either the subvendor or the subdevice, some systems reverse
+ * the setting in the bois, seems to be version and arch dependent?
+ * Fix the error, exchange the two values
+ */
+ if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
+ subdevice = pdev->subsystem_vendor;
+
+ switch (subdevice) {
+ case PCI_DEVICE_ID_LMC_HSSI:
+ printk ("%s: LMC HSSI\n", dev->name);
+ sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
+ sc->lmc_media = &lmc_hssi_media;
+ break;
+ case PCI_DEVICE_ID_LMC_DS3:
+ printk ("%s: LMC DS3\n", dev->name);
+ sc->lmc_cardtype = LMC_CARDTYPE_DS3;
+ sc->lmc_media = &lmc_ds3_media;
+ break;
+ case PCI_DEVICE_ID_LMC_SSI:
+ printk ("%s: LMC SSI\n", dev->name);
+ sc->lmc_cardtype = LMC_CARDTYPE_SSI;
+ sc->lmc_media = &lmc_ssi_media;
+ break;
+ case PCI_DEVICE_ID_LMC_T1:
+ printk ("%s: LMC T1\n", dev->name);
+ sc->lmc_cardtype = LMC_CARDTYPE_T1;
+ sc->lmc_media = &lmc_t1_media;
+ break;
+ default:
+ printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);
+ break;
+ }
+
+ lmc_initcsrs (sc, dev->base_addr, 8);
+
+ lmc_gpio_mkinput (sc, 0xff);
+ sc->lmc_gpio = 0; /* drive no signals yet */
+
+ sc->lmc_media->defaults (sc);
+
+ sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
+
+ /* verify that the PCI Sub System ID matches the Adapter Model number
+ * from the MII register
+ */
+ AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
+
+ if ((AdapModelNum == LMC_ADAP_T1
+ && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */
+ (AdapModelNum == LMC_ADAP_SSI
+ && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */
+ (AdapModelNum == LMC_ADAP_DS3
+ && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */
+ (AdapModelNum == LMC_ADAP_HSSI
+ && subdevice == PCI_DEVICE_ID_LMC_HSSI))
+ { /* detect LMC5200 */
+
+ }
+ else {
+ printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",
+ dev->name, AdapModelNum, subdevice);
+// return (NULL);
+ }
+ /*
+ * reset clock
+ */
+ LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
+
+ sc->board_idx = cards_found++;
+ sc->stats.check = STATCHECK;
+ sc->stats.version_size = (DRIVER_VERSION << 16) +
+ sizeof (struct lmc_statistics);
+ sc->stats.lmc_cardtype = sc->lmc_cardtype;
+
+ sc->lmc_ok = 0;
+ sc->last_link_status = 0;
+
+ lmc_trace(dev, "lmc_init_one out");
+ return 0;
+
+ out4:
+ lmc_proto_detach(sc);
+ out3:
+ if (pdev) {
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+ out2:
+ free_netdev(dev);
+ out1:
+ return err;
+}
+
+/*
+ * Called from pci when removing module.
+ */
+static void __devexit lmc_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ lmc_softc_t *sc = dev->priv;
+
+ printk("%s: removing...\n", dev->name);
+ lmc_proto_detach(sc);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+/* After this is called, packets can be sent.
+ * Does not initialize the addresses
+ */
+static int lmc_open (struct net_device *dev) /*fold00*/
+{
+ lmc_softc_t *sc = dev->priv;
+
+ lmc_trace(dev, "lmc_open in");
+
+ lmc_led_on(sc, LMC_DS3_LED0);
+
+ lmc_dec_reset (sc);
+ lmc_reset (sc);
+
+ LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
+ LMC_EVENT_LOG(LMC_EVENT_RESET2,
+ lmc_mii_readreg (sc, 0, 16),
+ lmc_mii_readreg (sc, 0, 17));
+
+
+ if (sc->lmc_ok){
+ lmc_trace(dev, "lmc_open lmc_ok out");
+ return (0);
+ }
+
+ lmc_softreset (sc);
+
+ /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
+ if (request_irq (dev->irq, &lmc_interrupt, SA_SHIRQ, dev->name, dev)){
+ printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
+ lmc_trace(dev, "lmc_open irq failed out");
+ return -EAGAIN;
+ }
+ sc->got_irq = 1;
+
+ /* Assert Terminal Active */
+ sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
+ sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
+
+ /*
+ * reset to last state.
+ */
+ sc->lmc_media->set_status (sc, NULL);
+
+ /* setup default bits to be used in tulip_desc_t transmit descriptor
+ * -baz */
+ sc->TxDescriptControlInit = (
+ LMC_TDES_INTERRUPT_ON_COMPLETION
+ | LMC_TDES_FIRST_SEGMENT
+ | LMC_TDES_LAST_SEGMENT
+ | LMC_TDES_SECOND_ADDR_CHAINED
+ | LMC_TDES_DISABLE_PADDING
+ );
+
+ if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
+ /* disable 32 bit CRC generated by ASIC */
+ sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
+ }
+ sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
+ /* Acknoledge the Terminal Active and light LEDs */
+
+ /* dev->flags |= IFF_UP; */
+
+ lmc_proto_open(sc);
+
+ dev->do_ioctl = lmc_ioctl;
+
+
+ netif_start_queue(dev);
+
+ sc->stats.tx_tbusy0++ ;
+
+ /*
+ * select what interrupts we want to get
+ */
+ sc->lmc_intrmask = 0;
+ /* Should be using the default interrupt mask defined in the .h file. */
+ sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
+ | TULIP_STS_RXINTR
+ | TULIP_STS_TXINTR
+ | TULIP_STS_ABNRMLINTR
+ | TULIP_STS_SYSERROR
+ | TULIP_STS_TXSTOPPED
+ | TULIP_STS_TXUNDERFLOW
+ | TULIP_STS_RXSTOPPED
+ | TULIP_STS_RXNOBUF
+ );
+ LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
+
+ sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
+ sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
+ LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
+
+ sc->lmc_ok = 1; /* Run watchdog */
+
+ /*
+ * Set the if up now - pfb
+ */
+
+ sc->last_link_status = 1;
+
+ /*
+ * Setup a timer for the watchdog on probe, and start it running.
+ * Since lmc_ok == 0, it will be a NOP for now.
+ */
+ init_timer (&sc->timer);
+ sc->timer.expires = jiffies + HZ;
+ sc->timer.data = (unsigned long) dev;
+ sc->timer.function = &lmc_watchdog;
+ add_timer (&sc->timer);
+
+ lmc_trace(dev, "lmc_open out");
+
+ return (0);
+}
+
+/* Total reset to compensate for the AdTran DSU doing bad things
+ * under heavy load
+ */
+
+static void lmc_running_reset (struct net_device *dev) /*fold00*/
+{
+
+ lmc_softc_t *sc = (lmc_softc_t *) dev->priv;
+
+ lmc_trace(dev, "lmc_runnig_reset in");
+
+ /* stop interrupts */
+ /* Clear the interrupt mask */
+ LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
+
+ lmc_dec_reset (sc);
+ lmc_reset (sc);
+ lmc_softreset (sc);
+ /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
+ sc->lmc_media->set_link_status (sc, 1);
+ sc->lmc_media->set_status (sc, NULL);
+
+ //dev->flags |= IFF_RUNNING;
+
+ netif_wake_queue(dev);
+
+ sc->lmc_txfull = 0;
+ sc->stats.tx_tbusy0++ ;
+
+ sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
+ LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
+
+ sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
+ LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
+
+ lmc_trace(dev, "lmc_runnin_reset_out");
+}
+
+
+/* This is what is called when you ifconfig down a device.
+ * This disables the timer for the watchdog and keepalives,
+ * and disables the irq for dev.
+ */
+static int lmc_close (struct net_device *dev) /*fold00*/
+{
+ /* not calling release_region() as we should */
+ lmc_softc_t *sc;
+
+ lmc_trace(dev, "lmc_close in");
+
+ sc = dev->priv;
+ sc->lmc_ok = 0;
+ sc->lmc_media->set_link_status (sc, 0);
+ del_timer (&sc->timer);
+ lmc_proto_close(sc);
+ lmc_ifdown (dev);
+
+ lmc_trace(dev, "lmc_close out");
+
+ return 0;
+}
+
+/* Ends the transfer of packets */
+/* When the interface goes down, this is called */
+static int lmc_ifdown (struct net_device *dev) /*fold00*/
+{
+ lmc_softc_t *sc = dev->priv;
+ u32 csr6;
+ int i;
+
+ lmc_trace(dev, "lmc_ifdown in");
+
+ /* Don't let anything else go on right now */
+ // dev->start = 0;
+ netif_stop_queue(dev);
+ sc->stats.tx_tbusy1++ ;
+
+ /* stop interrupts */
+ /* Clear the interrupt mask */
+ LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
+
+ /* Stop Tx and Rx on the chip */
+ csr6 = LMC_CSR_READ (sc, csr_command);
+ csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
+ csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
+ LMC_CSR_WRITE (sc, csr_command, csr6);
+
+ dev->flags &= ~IFF_RUNNING;
+
+ sc->stats.rx_missed_errors +=
+ LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;
+
+ /* release the interrupt */
+ if(sc->got_irq == 1){
+ free_irq (dev->irq, dev);
+ sc->got_irq = 0;
+ }
+
+ /* free skbuffs in the Rx queue */
+ for (i = 0; i < LMC_RXDESCS; i++)
+ {
+ struct sk_buff *skb = sc->lmc_rxq[i];
+ sc->lmc_rxq[i] = NULL;
+ sc->lmc_rxring[i].status = 0;
+ sc->lmc_rxring[i].length = 0;
+ sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
+ if (skb != NULL)
+ dev_kfree_skb(skb);
+ sc->lmc_rxq[i] = NULL;
+ }
+
+ for (i = 0; i < LMC_TXDESCS; i++)
+ {
+ if (sc->lmc_txq[i] != NULL)
+ dev_kfree_skb(sc->lmc_txq[i]);
+ sc->lmc_txq[i] = NULL;
+ }
+
+ lmc_led_off (sc, LMC_MII16_LED_ALL);
+
+ netif_wake_queue(dev);
+ sc->stats.tx_tbusy0++ ;
+
+ lmc_trace(dev, "lmc_ifdown out");
+
+ return 0;
+}
+
+/* Interrupt handling routine. This will take an incoming packet, or clean
+ * up after a trasmit.
+ */
+static irqreturn_t lmc_interrupt (int irq, void *dev_instance, struct pt_regs *regs) /*fold00*/
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ lmc_softc_t *sc;
+ u32 csr;
+ int i;
+ s32 stat;
+ unsigned int badtx;
+ u32 firstcsr;
+ int max_work = LMC_RXDESCS;
+ int handled = 0;
+
+ lmc_trace(dev, "lmc_interrupt in");
+
+ sc = dev->priv;
+
+ spin_lock(&sc->lmc_lock);
+
+ /*
+ * Read the csr to find what interrupts we have (if any)
+ */
+ csr = LMC_CSR_READ (sc, csr_status);
+
+ /*
+ * Make sure this is our interrupt
+ */
+ if ( ! (csr & sc->lmc_intrmask)) {
+ goto lmc_int_fail_out;
+ }
+
+ firstcsr = csr;
+
+ /* always go through this loop at least once */
+ while (csr & sc->lmc_intrmask) {
+ handled = 1;
+
+ /*
+ * Clear interrupt bits, we handle all case below
+ */
+ LMC_CSR_WRITE (sc, csr_status, csr);
+
+ /*
+ * One of
+ * - Transmit process timed out CSR5<1>
+ * - Transmit jabber timeout CSR5<3>
+ * - Transmit underflow CSR5<5>
+ * - Transmit Receiver buffer unavailable CSR5<7>
+ * - Receive process stopped CSR5<8>
+ * - Receive watchdog timeout CSR5<9>
+ * - Early transmit interrupt CSR5<10>
+ *
+ * Is this really right? Should we do a running reset for jabber?
+ * (being a WAN card and all)
+ */
+ if (csr & TULIP_STS_ABNRMLINTR){
+ lmc_running_reset (dev);
+ break;
+ }
+
+ if (csr & TULIP_STS_RXINTR){
+ lmc_trace(dev, "rx interrupt");
+ lmc_rx (dev);
+
+ }
+ if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
+
+ int n_compl = 0 ;
+ /* reset the transmit timeout detection flag -baz */
+ sc->stats.tx_NoCompleteCnt = 0;
+
+ badtx = sc->lmc_taint_tx;
+ i = badtx % LMC_TXDESCS;
+
+ while ((badtx < sc->lmc_next_tx)) {
+ stat = sc->lmc_txring[i].status;
+
+ LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
+ sc->lmc_txring[i].length);
+ /*
+ * If bit 31 is 1 the tulip owns it break out of the loop
+ */
+ if (stat & 0x80000000)
+ break;
+
+ n_compl++ ; /* i.e., have an empty slot in ring */
+ /*
+ * If we have no skbuff or have cleared it
+ * Already continue to the next buffer
+ */
+ if (sc->lmc_txq[i] == NULL)
+ continue;
+
+ /*
+ * Check the total error summary to look for any errors
+ */
+ if (stat & 0x8000) {
+ sc->stats.tx_errors++;
+ if (stat & 0x4104)
+ sc->stats.tx_aborted_errors++;
+ if (stat & 0x0C00)
+ sc->stats.tx_carrier_errors++;
+ if (stat & 0x0200)
+ sc->stats.tx_window_errors++;
+ if (stat & 0x0002)
+ sc->stats.tx_fifo_errors++;
+ }
+ else {
+
+ sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
+
+ sc->stats.tx_packets++;
+ }
+
+ // dev_kfree_skb(sc->lmc_txq[i]);
+ dev_kfree_skb_irq(sc->lmc_txq[i]);
+ sc->lmc_txq[i] = NULL;
+
+ badtx++;
+ i = badtx % LMC_TXDESCS;
+ }
+
+ if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
+ {
+ printk ("%s: out of sync pointer\n", dev->name);
+ badtx += LMC_TXDESCS;
+ }
+ LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
+ sc->lmc_txfull = 0;
+ netif_wake_queue(dev);
+ sc->stats.tx_tbusy0++ ;
+
+
+#ifdef DEBUG
+ sc->stats.dirtyTx = badtx;
+ sc->stats.lmc_next_tx = sc->lmc_next_tx;
+ sc->stats.lmc_txfull = sc->lmc_txfull;
+#endif
+ sc->lmc_taint_tx = badtx;
+
+ /*
+ * Why was there a break here???
+ */
+ } /* end handle transmit interrupt */
+
+ if (csr & TULIP_STS_SYSERROR) {
+ u32 error;
+ printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
+ error = csr>>23 & 0x7;
+ switch(error){
+ case 0x000:
+ printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
+ break;
+ case 0x001:
+ printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
+ break;
+ case 0x010:
+ printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
+ break;
+ default:
+ printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
+ }
+ lmc_dec_reset (sc);
+ lmc_reset (sc);
+ LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
+ LMC_EVENT_LOG(LMC_EVENT_RESET2,
+ lmc_mii_readreg (sc, 0, 16),
+ lmc_mii_readreg (sc, 0, 17));
+
+ }
+
+
+ if(max_work-- <= 0)
+ break;
+
+ /*
+ * Get current csr status to make sure
+ * we've cleared all interrupts
+ */
+ csr = LMC_CSR_READ (sc, csr_status);
+ } /* end interrupt loop */
+ LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
+
+lmc_int_fail_out:
+
+ spin_unlock(&sc->lmc_lock);
+
+ lmc_trace(dev, "lmc_interrupt out");
+ return IRQ_RETVAL(handled);
+}
+
+static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/
+{
+ lmc_softc_t *sc;
+ u32 flag;
+ int entry;
+ int ret = 0;
+ unsigned long flags;
+
+ lmc_trace(dev, "lmc_start_xmit in");
+
+ sc = dev->priv;
+
+ spin_lock_irqsave(&sc->lmc_lock, flags);
+
+ /* normal path, tbusy known to be zero */
+
+ entry = sc->lmc_next_tx % LMC_TXDESCS;
+
+ sc->lmc_txq[entry] = skb;
+ sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
+
+ LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
+
+#ifndef GCOM
+ /* If the queue is less than half full, don't interrupt */
+ if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
+ {
+ /* Do not interrupt on completion of this packet */
+ flag = 0x60000000;
+ netif_wake_queue(dev);
+ }
+ else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
+ {
+ /* This generates an interrupt on completion of this packet */
+ flag = 0xe0000000;
+ netif_wake_queue(dev);
+ }
+ else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
+ {
+ /* Do not interrupt on completion of this packet */
+ flag = 0x60000000;
+ netif_wake_queue(dev);
+ }
+ else
+ {
+ /* This generates an interrupt on completion of this packet */
+ flag = 0xe0000000;
+ sc->lmc_txfull = 1;
+ netif_stop_queue(dev);
+ }
+#else
+ flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
+
+ if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
+ { /* ring full, go busy */
+ sc->lmc_txfull = 1;
+ netif_stop_queue(dev);
+ sc->stats.tx_tbusy1++ ;
+ LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
+ }
+#endif
+
+
+ if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */
+ flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */
+
+ /* don't pad small packets either */
+ flag = sc->lmc_txring[entry].length = (skb->len) | flag |
+ sc->TxDescriptControlInit;
+
+ /* set the transmit timeout flag to be checked in
+ * the watchdog timer handler. -baz
+ */
+
+ sc->stats.tx_NoCompleteCnt++;
+ sc->lmc_next_tx++;
+
+ /* give ownership to the chip */
+ LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
+ sc->lmc_txring[entry].status = 0x80000000;
+
+ /* send now! */
+ LMC_CSR_WRITE (sc, csr_txpoll, 0);
+
+ dev->trans_start = jiffies;
+
+ spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+ lmc_trace(dev, "lmc_start_xmit_out");
+ return ret;
+}
+
+
+static int lmc_rx (struct net_device *dev) /*fold00*/
+{
+ lmc_softc_t *sc;
+ int i;
+ int rx_work_limit = LMC_RXDESCS;
+ unsigned int next_rx;
+ int rxIntLoopCnt; /* debug -baz */
+ int localLengthErrCnt = 0;
+ long stat;
+ struct sk_buff *skb, *nsb;
+ u16 len;
+
+ lmc_trace(dev, "lmc_rx in");
+
+ sc = dev->priv;
+
+ lmc_led_on(sc, LMC_DS3_LED3);
+
+ rxIntLoopCnt = 0; /* debug -baz */
+
+ i = sc->lmc_next_rx % LMC_RXDESCS;
+ next_rx = sc->lmc_next_rx;
+
+ while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
+ {
+ rxIntLoopCnt++; /* debug -baz */
+ len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
+ if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
+ if ((stat & 0x0000ffff) != 0x7fff) {
+ /* Oversized frame */
+ sc->stats.rx_length_errors++;
+ goto skip_packet;
+ }
+ }
+
+ if(stat & 0x00000008){ /* Catch a dribbling bit error */
+ sc->stats.rx_errors++;
+ sc->stats.rx_frame_errors++;
+ goto skip_packet;
+ }
+
+
+ if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */
+ sc->stats.rx_errors++;
+ sc->stats.rx_crc_errors++;
+ goto skip_packet;
+ }
+
+
+ if (len > LMC_PKT_BUF_SZ){
+ sc->stats.rx_length_errors++;
+ localLengthErrCnt++;
+ goto skip_packet;
+ }
+
+ if (len < sc->lmc_crcSize + 2) {
+ sc->stats.rx_length_errors++;
+ sc->stats.rx_SmallPktCnt++;
+ localLengthErrCnt++;
+ goto skip_packet;
+ }
+
+ if(stat & 0x00004000){
+ printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
+ }
+
+ len -= sc->lmc_crcSize;
+
+ skb = sc->lmc_rxq[i];
+
+ /*
+ * We ran out of memory at some point
+ * just allocate an skb buff and continue.
+ */
+
+ if(skb == 0x0){
+ nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
+ if (nsb) {
+ sc->lmc_rxq[i] = nsb;
+ nsb->dev = dev;
+ sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail);
+ }
+ sc->failed_recv_alloc = 1;
+ goto skip_packet;
+ }
+
+ dev->last_rx = jiffies;
+ sc->stats.rx_packets++;
+ sc->stats.rx_bytes += len;
+
+ LMC_CONSOLE_LOG("recv", skb->data, len);
+
+ /*
+ * I'm not sure of the sanity of this
+ * Packets could be arriving at a constant
+ * 44.210mbits/sec and we're going to copy
+ * them into a new buffer??
+ */
+
+ if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
+ /*
+ * If it's a large packet don't copy it just hand it up
+ */
+ give_it_anyways:
+
+ sc->lmc_rxq[i] = NULL;
+ sc->lmc_rxring[i].buffer1 = 0x0;
+
+ skb_put (skb, len);
+ skb->protocol = lmc_proto_type(sc, skb);
+ skb->protocol = htons(ETH_P_WAN_PPP);
+ skb->mac.raw = skb->data;
+// skb->nh.raw = skb->data;
+ skb->dev = dev;
+ lmc_proto_netif(sc, skb);
+
+ /*
+ * This skb will be destroyed by the upper layers, make a new one
+ */
+ nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
+ if (nsb) {
+ sc->lmc_rxq[i] = nsb;
+ nsb->dev = dev;
+ sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail);
+ /* Transferred to 21140 below */
+ }
+ else {
+ /*
+ * We've run out of memory, stop trying to allocate
+ * memory and exit the interrupt handler
+ *
+ * The chip may run out of receivers and stop
+ * in which care we'll try to allocate the buffer
+ * again. (once a second)
+ */
+ sc->stats.rx_BuffAllocErr++;
+ LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
+ sc->failed_recv_alloc = 1;
+ goto skip_out_of_mem;
+ }
+ }
+ else {
+ nsb = dev_alloc_skb(len);
+ if(!nsb) {
+ goto give_it_anyways;
+ }
+ memcpy(skb_put(nsb, len), skb->data, len);
+
+ nsb->protocol = lmc_proto_type(sc, skb);
+ nsb->mac.raw = nsb->data;
+// nsb->nh.raw = nsb->data;
+ nsb->dev = dev;
+ lmc_proto_netif(sc, nsb);
+ }
+
+ skip_packet:
+ LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
+ sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
+
+ sc->lmc_next_rx++;
+ i = sc->lmc_next_rx % LMC_RXDESCS;
+ rx_work_limit--;
+ if (rx_work_limit < 0)
+ break;
+ }
+
+ /* detect condition for LMC1000 where DSU cable attaches and fills
+ * descriptors with bogus packets
+ *
+ if (localLengthErrCnt > LMC_RXDESCS - 3) {
+ sc->stats.rx_BadPktSurgeCnt++;
+ LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE,
+ localLengthErrCnt,
+ sc->stats.rx_BadPktSurgeCnt);
+ } */
+
+ /* save max count of receive descriptors serviced */
+ if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) {
+ sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
+ }
+
+#ifdef DEBUG
+ if (rxIntLoopCnt == 0)
+ {
+ for (i = 0; i < LMC_RXDESCS; i++)
+ {
+ if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
+ != DESC_OWNED_BY_DC21X4)
+ {
+ rxIntLoopCnt++;
+ }
+ }
+ LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
+ }
+#endif
+
+
+ lmc_led_off(sc, LMC_DS3_LED3);
+
+skip_out_of_mem:
+
+ lmc_trace(dev, "lmc_rx out");
+
+ return 0;
+}
+
+static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/
+{
+ lmc_softc_t *sc = dev->priv;
+ unsigned long flags;
+
+ lmc_trace(dev, "lmc_get_stats in");
+
+
+ spin_lock_irqsave(&sc->lmc_lock, flags);
+
+ sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;
+
+ spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+ lmc_trace(dev, "lmc_get_stats out");
+
+ return (struct net_device_stats *) &sc->stats;
+}
+
+static struct pci_driver lmc_driver = {
+ .name = "lmc",
+ .id_table = lmc_pci_tbl,
+ .probe = lmc_init_one,
+ .remove = __devexit_p(lmc_remove_one),
+};
+
+static int __init init_lmc(void)
+{
+ return pci_module_init(&lmc_driver);
+}
+
+static void __exit exit_lmc(void)
+{
+ pci_unregister_driver(&lmc_driver);
+}
+
+module_init(init_lmc);
+module_exit(exit_lmc);
+
+unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
+{
+ int i;
+ int command = (0xf6 << 10) | (devaddr << 5) | regno;
+ int retval = 0;
+
+ lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
+
+ LMC_MII_SYNC (sc);
+
+ lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
+
+ for (i = 15; i >= 0; i--)
+ {
+ int dataval = (command & (1 << i)) ? 0x20000 : 0;
+
+ LMC_CSR_WRITE (sc, csr_9, dataval);
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ }
+
+ lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
+
+ for (i = 19; i > 0; i--)
+ {
+ LMC_CSR_WRITE (sc, csr_9, 0x40000);
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
+ LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ }
+
+ lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
+
+ return (retval >> 1) & 0xffff;
+}
+
+void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
+{
+ int i = 32;
+ int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
+
+ lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
+
+ LMC_MII_SYNC (sc);
+
+ i = 31;
+ while (i >= 0)
+ {
+ int datav;
+
+ if (command & (1 << i))
+ datav = 0x20000;
+ else
+ datav = 0x00000;
+
+ LMC_CSR_WRITE (sc, csr_9, datav);
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ i--;
+ }
+
+ i = 2;
+ while (i > 0)
+ {
+ LMC_CSR_WRITE (sc, csr_9, 0x40000);
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ LMC_CSR_WRITE (sc, csr_9, 0x50000);
+ lmc_delay ();
+ /* __SLOW_DOWN_IO; */
+ i--;
+ }
+
+ lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
+}
+
+static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
+{
+ int i;
+
+ lmc_trace(sc->lmc_device, "lmc_softreset in");
+
+ /* Initialize the receive rings and buffers. */
+ sc->lmc_txfull = 0;
+ sc->lmc_next_rx = 0;
+ sc->lmc_next_tx = 0;
+ sc->lmc_taint_rx = 0;
+ sc->lmc_taint_tx = 0;
+
+ /*
+ * Setup each one of the receiver buffers
+ * allocate an skbuff for each one, setup the descriptor table
+ * and point each buffer at the next one
+ */
+
+ for (i = 0; i < LMC_RXDESCS; i++)
+ {
+ struct sk_buff *skb;
+
+ if (sc->lmc_rxq[i] == NULL)
+ {
+ skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
+ if(skb == NULL){
+ printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
+ sc->failed_ring = 1;
+ break;
+ }
+ else{
+ sc->lmc_rxq[i] = skb;
+ }
+ }
+ else
+ {
+ skb = sc->lmc_rxq[i];
+ }
+
+ skb->dev = sc->lmc_device;
+
+ /* owned by 21140 */
+ sc->lmc_rxring[i].status = 0x80000000;
+
+ /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
+ sc->lmc_rxring[i].length = skb->end - skb->data;
+
+ /* use to be tail which is dumb since you're thinking why write
+ * to the end of the packj,et but since there's nothing there tail == data
+ */
+ sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
+
+ /* This is fair since the structure is static and we have the next address */
+ sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
+
+ }
+
+ /*
+ * Sets end of ring
+ */
+ sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
+ sc->lmc_rxring[i - 1].buffer2 = virt_to_bus (&sc->lmc_rxring[0]); /* Point back to the start */
+ LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
+
+
+ /* Initialize the transmit rings and buffers */
+ for (i = 0; i < LMC_TXDESCS; i++)
+ {
+ if (sc->lmc_txq[i] != NULL){ /* have buffer */
+ dev_kfree_skb(sc->lmc_txq[i]); /* free it */
+ sc->stats.tx_dropped++; /* We just dropped a packet */
+ }
+ sc->lmc_txq[i] = NULL;
+ sc->lmc_txring[i].status = 0x00000000;
+ sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
+ }
+ sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
+ LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
+
+ lmc_trace(sc->lmc_device, "lmc_softreset out");
+}
+
+void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
+ sc->lmc_gpio_io &= ~bits;
+ LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
+ lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
+}
+
+void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
+ sc->lmc_gpio_io |= bits;
+ LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
+ lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
+}
+
+void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_led_on in");
+ if((~sc->lmc_miireg16) & led){ /* Already on! */
+ lmc_trace(sc->lmc_device, "lmc_led_on aon out");
+ return;
+ }
+
+ sc->lmc_miireg16 &= ~led;
+ lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+ lmc_trace(sc->lmc_device, "lmc_led_on out");
+}
+
+void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_led_off in");
+ if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
+ lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
+ return;
+ }
+
+ sc->lmc_miireg16 |= led;
+ lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+ lmc_trace(sc->lmc_device, "lmc_led_off out");
+}
+
+static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_reset in");
+ sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
+ lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+
+ sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
+ lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+
+ /*
+ * make some of the GPIO pins be outputs
+ */
+ lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
+
+ /*
+ * RESET low to force state reset. This also forces
+ * the transmitter clock to be internal, but we expect to reset
+ * that later anyway.
+ */
+ sc->lmc_gpio &= ~(LMC_GEP_RESET);
+ LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+ /*
+ * hold for more than 10 microseconds
+ */
+ udelay(50);
+
+ /*
+ * stop driving Xilinx-related signals
+ */
+ lmc_gpio_mkinput(sc, LMC_GEP_RESET);
+
+ /*
+ * Call media specific init routine
+ */
+ sc->lmc_media->init(sc);
+
+ sc->stats.resetCount++;
+ lmc_trace(sc->lmc_device, "lmc_reset out");
+}
+
+static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
+{
+ u_int32_t val;
+ lmc_trace(sc->lmc_device, "lmc_dec_reset in");
+
+ /*
+ * disable all interrupts
+ */
+ sc->lmc_intrmask = 0;
+ LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
+
+ /*
+ * Reset the chip with a software reset command.
+ * Wait 10 microseconds (actually 50 PCI cycles but at
+ * 33MHz that comes to two microseconds but wait a
+ * bit longer anyways)
+ */
+ LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
+ udelay(25);
+#ifdef __sparc__
+ sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
+ sc->lmc_busmode = 0x00100000;
+ sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
+ LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
+#endif
+ sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
+
+ /*
+ * We want:
+ * no ethernet address in frames we write
+ * disable padding (txdesc, padding disable)
+ * ignore runt frames (rdes0 bit 15)
+ * no receiver watchdog or transmitter jabber timer
+ * (csr15 bit 0,14 == 1)
+ * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
+ */
+
+ sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
+ | TULIP_CMD_FULLDUPLEX
+ | TULIP_CMD_PASSBADPKT
+ | TULIP_CMD_NOHEARTBEAT
+ | TULIP_CMD_PORTSELECT
+ | TULIP_CMD_RECEIVEALL
+ | TULIP_CMD_MUSTBEONE
+ );
+ sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
+ | TULIP_CMD_THRESHOLDCTL
+ | TULIP_CMD_STOREFWD
+ | TULIP_CMD_TXTHRSHLDCTL
+ );
+
+ LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
+
+ /*
+ * disable receiver watchdog and transmit jabber
+ */
+ val = LMC_CSR_READ(sc, csr_sia_general);
+ val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
+ LMC_CSR_WRITE(sc, csr_sia_general, val);
+
+ lmc_trace(sc->lmc_device, "lmc_dec_reset out");
+}
+
+static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
+ size_t csr_size)
+{
+ lmc_trace(sc->lmc_device, "lmc_initcsrs in");
+ sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
+ sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
+ sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
+ sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size;
+ sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size;
+ sc->lmc_csrs.csr_status = csr_base + 5 * csr_size;
+ sc->lmc_csrs.csr_command = csr_base + 6 * csr_size;
+ sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size;
+ sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size;
+ sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size;
+ sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size;
+ sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size;
+ sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size;
+ sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
+ sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
+ sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
+ lmc_trace(sc->lmc_device, "lmc_initcsrs out");
+}
+
+static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
+ lmc_softc_t *sc;
+ u32 csr6;
+ unsigned long flags;
+
+ lmc_trace(dev, "lmc_driver_timeout in");
+
+ sc = dev->priv;
+
+ spin_lock_irqsave(&sc->lmc_lock, flags);
+
+ printk("%s: Xmitter busy|\n", dev->name);
+
+ sc->stats.tx_tbusy_calls++ ;
+ if (jiffies - dev->trans_start < TX_TIMEOUT) {
+ goto bug_out;
+ }
+
+ /*
+ * Chip seems to have locked up
+ * Reset it
+ * This whips out all our decriptor
+ * table and starts from scartch
+ */
+
+ LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
+ LMC_CSR_READ (sc, csr_status),
+ sc->stats.tx_ProcTimeout);
+
+ lmc_running_reset (dev);
+
+ LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
+ LMC_EVENT_LOG(LMC_EVENT_RESET2,
+ lmc_mii_readreg (sc, 0, 16),
+ lmc_mii_readreg (sc, 0, 17));
+
+ /* restart the tx processes */
+ csr6 = LMC_CSR_READ (sc, csr_command);
+ LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
+ LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
+
+ /* immediate transmit */
+ LMC_CSR_WRITE (sc, csr_txpoll, 0);
+
+ sc->stats.tx_errors++;
+ sc->stats.tx_ProcTimeout++; /* -baz */
+
+ dev->trans_start = jiffies;
+
+bug_out:
+
+ spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+ lmc_trace(dev, "lmc_driver_timout out");
+
+
+}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
new file mode 100644
index 000000000000..f55ce76b00ed
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -0,0 +1,1246 @@
+/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/bitops.h>
+
+#include <net/syncppp.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <asm/uaccess.h>
+
+#include "lmc.h"
+#include "lmc_var.h"
+#include "lmc_ioctl.h"
+#include "lmc_debug.h"
+
+#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1
+
+ /*
+ * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+ * All rights reserved. www.lanmedia.com
+ *
+ * This code is written by:
+ * Andrew Stanley-Jones (asj@cban.com)
+ * Rob Braun (bbraun@vix.com),
+ * Michael Graff (explorer@vix.com) and
+ * Matt Thomas (matt@3am-software.com).
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License version 2, incorporated herein by reference.
+ */
+
+/*
+ * For lack of a better place, put the SSI cable stuff here.
+ */
+char *lmc_t1_cables[] = {
+ "V.10/RS423", "EIA530A", "reserved", "X.21", "V.35",
+ "EIA449/EIA530/V.36", "V.28/EIA232", "none", NULL
+};
+
+/*
+ * protocol independent method.
+ */
+static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
+
+/*
+ * media independent methods to check on media status, link, light LEDs,
+ * etc.
+ */
+static void lmc_ds3_init (lmc_softc_t * const);
+static void lmc_ds3_default (lmc_softc_t * const);
+static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static void lmc_ds3_set_100ft (lmc_softc_t * const, int);
+static int lmc_ds3_get_link_status (lmc_softc_t * const);
+static void lmc_ds3_set_crc_length (lmc_softc_t * const, int);
+static void lmc_ds3_set_scram (lmc_softc_t * const, int);
+static void lmc_ds3_watchdog (lmc_softc_t * const);
+
+static void lmc_hssi_init (lmc_softc_t * const);
+static void lmc_hssi_default (lmc_softc_t * const);
+static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static void lmc_hssi_set_clock (lmc_softc_t * const, int);
+static int lmc_hssi_get_link_status (lmc_softc_t * const);
+static void lmc_hssi_set_link_status (lmc_softc_t * const, int);
+static void lmc_hssi_set_crc_length (lmc_softc_t * const, int);
+static void lmc_hssi_watchdog (lmc_softc_t * const);
+
+static void lmc_ssi_init (lmc_softc_t * const);
+static void lmc_ssi_default (lmc_softc_t * const);
+static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static void lmc_ssi_set_clock (lmc_softc_t * const, int);
+static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *);
+static int lmc_ssi_get_link_status (lmc_softc_t * const);
+static void lmc_ssi_set_link_status (lmc_softc_t * const, int);
+static void lmc_ssi_set_crc_length (lmc_softc_t * const, int);
+static void lmc_ssi_watchdog (lmc_softc_t * const);
+
+static void lmc_t1_init (lmc_softc_t * const);
+static void lmc_t1_default (lmc_softc_t * const);
+static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static int lmc_t1_get_link_status (lmc_softc_t * const);
+static void lmc_t1_set_circuit_type (lmc_softc_t * const, int);
+static void lmc_t1_set_crc_length (lmc_softc_t * const, int);
+static void lmc_t1_set_clock (lmc_softc_t * const, int);
+static void lmc_t1_watchdog (lmc_softc_t * const);
+
+static void lmc_dummy_set_1 (lmc_softc_t * const, int);
+static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
+
+static inline void write_av9110_bit (lmc_softc_t *, int);
+static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t,
+ u_int32_t, u_int32_t);
+
+lmc_media_t lmc_ds3_media = {
+ lmc_ds3_init, /* special media init stuff */
+ lmc_ds3_default, /* reset to default state */
+ lmc_ds3_set_status, /* reset status to state provided */
+ lmc_dummy_set_1, /* set clock source */
+ lmc_dummy_set2_1, /* set line speed */
+ lmc_ds3_set_100ft, /* set cable length */
+ lmc_ds3_set_scram, /* set scrambler */
+ lmc_ds3_get_link_status, /* get link status */
+ lmc_dummy_set_1, /* set link status */
+ lmc_ds3_set_crc_length, /* set CRC length */
+ lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ lmc_ds3_watchdog
+};
+
+lmc_media_t lmc_hssi_media = {
+ lmc_hssi_init, /* special media init stuff */
+ lmc_hssi_default, /* reset to default state */
+ lmc_hssi_set_status, /* reset status to state provided */
+ lmc_hssi_set_clock, /* set clock source */
+ lmc_dummy_set2_1, /* set line speed */
+ lmc_dummy_set_1, /* set cable length */
+ lmc_dummy_set_1, /* set scrambler */
+ lmc_hssi_get_link_status, /* get link status */
+ lmc_hssi_set_link_status, /* set link status */
+ lmc_hssi_set_crc_length, /* set CRC length */
+ lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ lmc_hssi_watchdog
+};
+
+lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
+ lmc_ssi_default, /* reset to default state */
+ lmc_ssi_set_status, /* reset status to state provided */
+ lmc_ssi_set_clock, /* set clock source */
+ lmc_ssi_set_speed, /* set line speed */
+ lmc_dummy_set_1, /* set cable length */
+ lmc_dummy_set_1, /* set scrambler */
+ lmc_ssi_get_link_status, /* get link status */
+ lmc_ssi_set_link_status, /* set link status */
+ lmc_ssi_set_crc_length, /* set CRC length */
+ lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ lmc_ssi_watchdog
+};
+
+lmc_media_t lmc_t1_media = {
+ lmc_t1_init, /* special media init stuff */
+ lmc_t1_default, /* reset to default state */
+ lmc_t1_set_status, /* reset status to state provided */
+ lmc_t1_set_clock, /* set clock source */
+ lmc_dummy_set2_1, /* set line speed */
+ lmc_dummy_set_1, /* set cable length */
+ lmc_dummy_set_1, /* set scrambler */
+ lmc_t1_get_link_status, /* get link status */
+ lmc_dummy_set_1, /* set link status */
+ lmc_t1_set_crc_length, /* set CRC length */
+ lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
+ lmc_t1_watchdog
+};
+
+static void
+lmc_dummy_set_1 (lmc_softc_t * const sc, int a)
+{
+}
+
+static void
+lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a)
+{
+}
+
+/*
+ * HSSI methods
+ */
+
+static void
+lmc_hssi_init (lmc_softc_t * const sc)
+{
+ sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200;
+
+ lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK);
+}
+
+static void
+lmc_hssi_default (lmc_softc_t * const sc)
+{
+ sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+
+ sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+ sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+ sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+}
+
+/*
+ * Given a user provided state, set ourselves up to match it. This will
+ * always reset the card if needed.
+ */
+static void
+lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+ if (ctl == NULL)
+ {
+ sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
+ lmc_set_protocol (sc, NULL);
+
+ return;
+ }
+
+ /*
+ * check for change in clock source
+ */
+ if (ctl->clock_source && !sc->ictl.clock_source)
+ {
+ sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
+ sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
+ }
+ else if (!ctl->clock_source && sc->ictl.clock_source)
+ {
+ sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
+ sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+ }
+
+ lmc_set_protocol (sc, ctl);
+}
+
+/*
+ * 1 == internal, 0 == external
+ */
+static void
+lmc_hssi_set_clock (lmc_softc_t * const sc, int ie)
+{
+ int old;
+ old = sc->ictl.clock_source;
+ if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
+ {
+ sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK;
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
+ if(old != ie)
+ printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
+ }
+ else
+ {
+ sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK);
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+ if(old != ie)
+ printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
+ }
+}
+
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */
+static int
+lmc_hssi_get_link_status (lmc_softc_t * const sc)
+{
+ /*
+ * We're using the same code as SSI since
+ * they're practically the same
+ */
+ return lmc_ssi_get_link_status(sc);
+}
+
+static void
+lmc_hssi_set_link_status (lmc_softc_t * const sc, int state)
+{
+ if (state == LMC_LINK_UP)
+ sc->lmc_miireg16 |= LMC_MII16_HSSI_TA;
+ else
+ sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA;
+
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit
+ */
+static void
+lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state)
+{
+ if (state == LMC_CTL_CRC_LENGTH_32)
+ {
+ /* 32 bit */
+ sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+ }
+ else
+ {
+ /* 16 bit */
+ sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+ }
+
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+static void
+lmc_hssi_watchdog (lmc_softc_t * const sc)
+{
+ /* HSSI is blank */
+}
+
+/*
+ * DS3 methods
+ */
+
+/*
+ * Set cable length
+ */
+static void
+lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie)
+{
+ if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT)
+ {
+ sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO;
+ sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT;
+ }
+ else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT)
+ {
+ sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO;
+ sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT;
+ }
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+static void
+lmc_ds3_default (lmc_softc_t * const sc)
+{
+ sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+
+ sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+ sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
+ sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF);
+ sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+}
+
+/*
+ * Given a user provided state, set ourselves up to match it. This will
+ * always reset the card if needed.
+ */
+static void
+lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+ if (ctl == NULL)
+ {
+ sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length);
+ sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff);
+ lmc_set_protocol (sc, NULL);
+
+ return;
+ }
+
+ /*
+ * check for change in cable length setting
+ */
+ if (ctl->cable_length && !sc->ictl.cable_length)
+ lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT);
+ else if (!ctl->cable_length && sc->ictl.cable_length)
+ lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
+
+ /*
+ * Check for change in scrambler setting (requires reset)
+ */
+ if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff)
+ lmc_ds3_set_scram (sc, LMC_CTL_ON);
+ else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff)
+ lmc_ds3_set_scram (sc, LMC_CTL_OFF);
+
+ lmc_set_protocol (sc, ctl);
+}
+
+static void
+lmc_ds3_init (lmc_softc_t * const sc)
+{
+ int i;
+
+ sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245;
+
+ /* writes zeros everywhere */
+ for (i = 0; i < 21; i++)
+ {
+ lmc_mii_writereg (sc, 0, 17, i);
+ lmc_mii_writereg (sc, 0, 18, 0);
+ }
+
+ /* set some essential bits */
+ lmc_mii_writereg (sc, 0, 17, 1);
+ lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */
+
+ lmc_mii_writereg (sc, 0, 17, 5);
+ lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */
+
+ lmc_mii_writereg (sc, 0, 17, 14);
+ lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */
+
+ /* clear counters and latched bits */
+ for (i = 0; i < 21; i++)
+ {
+ lmc_mii_writereg (sc, 0, 17, i);
+ lmc_mii_readreg (sc, 0, 18);
+ }
+}
+
+/*
+ * 1 == DS3 payload scrambled, 0 == not scrambled
+ */
+static void
+lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
+{
+ if (ie == LMC_CTL_ON)
+ {
+ sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM;
+ sc->ictl.scrambler_onoff = LMC_CTL_ON;
+ }
+ else
+ {
+ sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM;
+ sc->ictl.scrambler_onoff = LMC_CTL_OFF;
+ }
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */
+static int
+lmc_ds3_get_link_status (lmc_softc_t * const sc)
+{
+ u_int16_t link_status, link_status_11;
+ int ret = 1;
+
+ lmc_mii_writereg (sc, 0, 17, 7);
+ link_status = lmc_mii_readreg (sc, 0, 18);
+
+ /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
+ * led0 yellow = far-end adapter is in Red alarm condition
+ * led1 blue = received an Alarm Indication signal
+ * (upstream failure)
+ * led2 Green = power to adapter, Gate Array loaded & driver
+ * attached
+ * led3 red = Loss of Signal (LOS) or out of frame (OOF)
+ * conditions detected on T3 receive signal
+ */
+
+ lmc_led_on(sc, LMC_DS3_LED2);
+
+ if ((link_status & LMC_FRAMER_REG0_DLOS) ||
+ (link_status & LMC_FRAMER_REG0_OOFS)){
+ ret = 0;
+ if(sc->last_led_err[3] != 1){
+ u16 r1;
+ lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
+ r1 = lmc_mii_readreg (sc, 0, 18);
+ r1 &= 0xfe;
+ lmc_mii_writereg(sc, 0, 18, r1);
+ printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name);
+ }
+ lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */
+ sc->last_led_err[3] = 1;
+ }
+ else {
+ lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
+ if(sc->last_led_err[3] == 1){
+ u16 r1;
+ lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
+ r1 = lmc_mii_readreg (sc, 0, 18);
+ r1 |= 0x01;
+ lmc_mii_writereg(sc, 0, 18, r1);
+ }
+ sc->last_led_err[3] = 0;
+ }
+
+ lmc_mii_writereg(sc, 0, 17, 0x10);
+ link_status_11 = lmc_mii_readreg(sc, 0, 18);
+ if((link_status & LMC_FRAMER_REG0_AIS) ||
+ (link_status_11 & LMC_FRAMER_REG10_XBIT)) {
+ ret = 0;
+ if(sc->last_led_err[0] != 1){
+ printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name);
+ printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name);
+ }
+ lmc_led_on(sc, LMC_DS3_LED0);
+ sc->last_led_err[0] = 1;
+ }
+ else {
+ lmc_led_off(sc, LMC_DS3_LED0);
+ sc->last_led_err[0] = 0;
+ }
+
+ lmc_mii_writereg (sc, 0, 17, 9);
+ link_status = lmc_mii_readreg (sc, 0, 18);
+
+ if(link_status & LMC_FRAMER_REG9_RBLUE){
+ ret = 0;
+ if(sc->last_led_err[1] != 1){
+ printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name);
+ }
+ lmc_led_on(sc, LMC_DS3_LED1);
+ sc->last_led_err[1] = 1;
+ }
+ else {
+ lmc_led_off(sc, LMC_DS3_LED1);
+ sc->last_led_err[1] = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit
+ */
+static void
+lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state)
+{
+ if (state == LMC_CTL_CRC_LENGTH_32)
+ {
+ /* 32 bit */
+ sc->lmc_miireg16 |= LMC_MII16_DS3_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+ }
+ else
+ {
+ /* 16 bit */
+ sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+ }
+
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+static void
+lmc_ds3_watchdog (lmc_softc_t * const sc)
+{
+
+}
+
+
+/*
+ * SSI methods
+ */
+
+static void
+lmc_ssi_init (lmc_softc_t * const sc)
+{
+ u_int16_t mii17;
+ int cable;
+
+ sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
+
+ mii17 = lmc_mii_readreg (sc, 0, 17);
+
+ cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
+ sc->ictl.cable_type = cable;
+
+ lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
+}
+
+static void
+lmc_ssi_default (lmc_softc_t * const sc)
+{
+ sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+
+ /*
+ * make TXCLOCK always be an output
+ */
+ lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
+
+ sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+ sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+ sc->lmc_media->set_speed (sc, NULL);
+ sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+}
+
+/*
+ * Given a user provided state, set ourselves up to match it. This will
+ * always reset the card if needed.
+ */
+static void
+lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+ if (ctl == NULL)
+ {
+ sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
+ sc->lmc_media->set_speed (sc, &sc->ictl);
+ lmc_set_protocol (sc, NULL);
+
+ return;
+ }
+
+ /*
+ * check for change in clock source
+ */
+ if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT
+ && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT)
+ {
+ sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
+ sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
+ }
+ else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT
+ && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT)
+ {
+ sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+ sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
+ }
+
+ if (ctl->clock_rate != sc->ictl.clock_rate)
+ sc->lmc_media->set_speed (sc, ctl);
+
+ lmc_set_protocol (sc, ctl);
+}
+
+/*
+ * 1 == internal, 0 == external
+ */
+static void
+lmc_ssi_set_clock (lmc_softc_t * const sc, int ie)
+{
+ int old;
+ old = ie;
+ if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
+ {
+ sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
+ if(ie != old)
+ printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
+ }
+ else
+ {
+ sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+ if(ie != old)
+ printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
+ }
+}
+
+static void
+lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+ lmc_ctl_t *ictl = &sc->ictl;
+ lmc_av9110_t *av;
+
+ /* original settings for clock rate of:
+ * 100 Khz (8,25,0,0,2) were incorrect
+ * they should have been 80,125,1,3,3
+ * There are 17 param combinations to produce this freq.
+ * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations)
+ */
+ if (ctl == NULL)
+ {
+ av = &ictl->cardspec.ssi;
+ ictl->clock_rate = 1500000;
+ av->f = ictl->clock_rate;
+ av->n = 120;
+ av->m = 100;
+ av->v = 1;
+ av->x = 1;
+ av->r = 2;
+
+ write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
+ return;
+ }
+
+ av = &ctl->cardspec.ssi;
+
+ if (av->f == 0)
+ return;
+
+ ictl->clock_rate = av->f; /* really, this is the rate we are */
+ ictl->cardspec.ssi = *av;
+
+ write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
+}
+
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */
+static int
+lmc_ssi_get_link_status (lmc_softc_t * const sc)
+{
+ u_int16_t link_status;
+ u_int32_t ticks;
+ int ret = 1;
+ int hw_hdsk = 1;
+
+ /*
+ * missing CTS? Hmm. If we require CTS on, we may never get the
+ * link to come up, so omit it in this test.
+ *
+ * Also, it seems that with a loopback cable, DCD isn't asserted,
+ * so just check for things like this:
+ * DSR _must_ be asserted.
+ * One of DCD or CTS must be asserted.
+ */
+
+ /* LMC 1000 (SSI) LED definitions
+ * led0 Green = power to adapter, Gate Array loaded &
+ * driver attached
+ * led1 Green = DSR and DTR and RTS and CTS are set
+ * led2 Green = Cable detected
+ * led3 red = No timing is available from the
+ * cable or the on-board frequency
+ * generator.
+ */
+
+ link_status = lmc_mii_readreg (sc, 0, 16);
+
+ /* Is the transmit clock still available */
+ ticks = LMC_CSR_READ (sc, csr_gp_timer);
+ ticks = 0x0000ffff - (ticks & 0x0000ffff);
+
+ lmc_led_on (sc, LMC_MII16_LED0);
+
+ /* ====== transmit clock determination ===== */
+ if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) {
+ lmc_led_off(sc, LMC_MII16_LED3);
+ }
+ else if (ticks == 0 ) { /* no clock found ? */
+ ret = 0;
+ if(sc->last_led_err[3] != 1){
+ sc->stats.tx_lossOfClockCnt++;
+ printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
+ }
+ sc->last_led_err[3] = 1;
+ lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
+ }
+ else {
+ if(sc->last_led_err[3] == 1)
+ printk(KERN_WARNING "%s: Clock Returned\n", sc->name);
+ sc->last_led_err[3] = 0;
+ lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */
+ }
+
+ if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */
+ ret = 0;
+ hw_hdsk = 0;
+ }
+
+#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE
+ if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){
+ ret = 0;
+ hw_hdsk = 0;
+ }
+#endif
+
+ if(hw_hdsk == 0){
+ if(sc->last_led_err[1] != 1)
+ printk(KERN_WARNING "%s: DSR not asserted\n", sc->name);
+ sc->last_led_err[1] = 1;
+ lmc_led_off(sc, LMC_MII16_LED1);
+ }
+ else {
+ if(sc->last_led_err[1] != 0)
+ printk(KERN_WARNING "%s: DSR now asserted\n", sc->name);
+ sc->last_led_err[1] = 0;
+ lmc_led_on(sc, LMC_MII16_LED1);
+ }
+
+ if(ret == 1) {
+ lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */
+ }
+
+ return ret;
+}
+
+static void
+lmc_ssi_set_link_status (lmc_softc_t * const sc, int state)
+{
+ if (state == LMC_LINK_UP)
+ {
+ sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
+ printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS);
+ }
+ else
+ {
+ sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
+ printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS);
+ }
+
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit
+ */
+static void
+lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state)
+{
+ if (state == LMC_CTL_CRC_LENGTH_32)
+ {
+ /* 32 bit */
+ sc->lmc_miireg16 |= LMC_MII16_SSI_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+ sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
+
+ }
+ else
+ {
+ /* 16 bit */
+ sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+ sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
+ }
+
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * These are bits to program the ssi frequency generator
+ */
+static inline void
+write_av9110_bit (lmc_softc_t * sc, int c)
+{
+ /*
+ * set the data bit as we need it.
+ */
+ sc->lmc_gpio &= ~(LMC_GEP_CLK);
+ if (c & 0x01)
+ sc->lmc_gpio |= LMC_GEP_DATA;
+ else
+ sc->lmc_gpio &= ~(LMC_GEP_DATA);
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+ /*
+ * set the clock to high
+ */
+ sc->lmc_gpio |= LMC_GEP_CLK;
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+ /*
+ * set the clock to low again.
+ */
+ sc->lmc_gpio &= ~(LMC_GEP_CLK);
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+}
+
+static void
+write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
+ u_int32_t x, u_int32_t r)
+{
+ int i;
+
+#if 0
+ printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n",
+ LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r);
+#endif
+
+ sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR;
+ sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK);
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+ /*
+ * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK
+ * as outputs.
+ */
+ lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK
+ | LMC_GEP_SSI_GENERATOR));
+
+ sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR);
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+ /*
+ * a shifting we will go...
+ */
+ for (i = 0; i < 7; i++)
+ write_av9110_bit (sc, n >> i);
+ for (i = 0; i < 7; i++)
+ write_av9110_bit (sc, m >> i);
+ for (i = 0; i < 1; i++)
+ write_av9110_bit (sc, v >> i);
+ for (i = 0; i < 2; i++)
+ write_av9110_bit (sc, x >> i);
+ for (i = 0; i < 2; i++)
+ write_av9110_bit (sc, r >> i);
+ for (i = 0; i < 5; i++)
+ write_av9110_bit (sc, 0x17 >> i);
+
+ /*
+ * stop driving serial-related signals
+ */
+ lmc_gpio_mkinput (sc,
+ (LMC_GEP_DATA | LMC_GEP_CLK
+ | LMC_GEP_SSI_GENERATOR));
+}
+
+static void
+lmc_ssi_watchdog (lmc_softc_t * const sc)
+{
+ u_int16_t mii17;
+ struct ssicsr2
+ {
+ unsigned short dtr:1, dsr:1, rts:1, cable:3, crc:1, led0:1, led1:1,
+ led2:1, led3:1, fifo:1, ll:1, rl:1, tm:1, loop:1;
+ };
+ struct ssicsr2 *ssicsr;
+ mii17 = lmc_mii_readreg (sc, 0, 17);
+ ssicsr = (struct ssicsr2 *) &mii17;
+ if (ssicsr->cable == 7)
+ {
+ lmc_led_off (sc, LMC_MII16_LED2);
+ }
+ else
+ {
+ lmc_led_on (sc, LMC_MII16_LED2);
+ }
+
+}
+
+/*
+ * T1 methods
+ */
+
+/*
+ * The framer regs are multiplexed through MII regs 17 & 18
+ * write the register address to MII reg 17 and the * data to MII reg 18. */
+static void
+lmc_t1_write (lmc_softc_t * const sc, int a, int d)
+{
+ lmc_mii_writereg (sc, 0, 17, a);
+ lmc_mii_writereg (sc, 0, 18, d);
+}
+
+/* Save a warning
+static int
+lmc_t1_read (lmc_softc_t * const sc, int a)
+{
+ lmc_mii_writereg (sc, 0, 17, a);
+ return lmc_mii_readreg (sc, 0, 18);
+}
+*/
+
+
+static void
+lmc_t1_init (lmc_softc_t * const sc)
+{
+ u_int16_t mii16;
+ int i;
+
+ sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
+ mii16 = lmc_mii_readreg (sc, 0, 16);
+
+ /* reset 8370 */
+ mii16 &= ~LMC_MII16_T1_RST;
+ lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST);
+ lmc_mii_writereg (sc, 0, 16, mii16);
+
+ /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */
+ sc->lmc_miireg16 = mii16;
+ lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1);
+ mii16 = sc->lmc_miireg16;
+
+ lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */
+ lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */
+ lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */
+ lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */
+ lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */
+ lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */
+ lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */
+ lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */
+ lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */
+ lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */
+ lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */
+ lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */
+ lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */
+ lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */
+ lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */
+ lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */
+ lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */
+ lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */
+ lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */
+ lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */
+ lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */
+ lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */
+ lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */
+ lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */
+ lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */
+ lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */
+ lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */
+ lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */
+ for (i = 0; i < 32; i++)
+ {
+ lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */
+ lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */
+ lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */
+ }
+ for (i = 1; i < 25; i++)
+ {
+ lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */
+ }
+
+ mii16 |= LMC_MII16_T1_XOE;
+ lmc_mii_writereg (sc, 0, 16, mii16);
+ sc->lmc_miireg16 = mii16;
+}
+
+static void
+lmc_t1_default (lmc_softc_t * const sc)
+{
+ sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+ sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+ sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
+ sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+ /* Right now we can only clock from out internal source */
+ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+}
+/* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed.
+ */
+static void
+lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+ if (ctl == NULL)
+ {
+ sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type);
+ lmc_set_protocol (sc, NULL);
+
+ return;
+ }
+ /*
+ * check for change in circuit type */
+ if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1
+ && sc->ictl.circuit_type ==
+ LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc,
+ LMC_CTL_CIRCUIT_TYPE_E1);
+ else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1
+ && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1)
+ sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
+ lmc_set_protocol (sc, ctl);
+}
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */ static int
+lmc_t1_get_link_status (lmc_softc_t * const sc)
+{
+ u_int16_t link_status;
+ int ret = 1;
+
+ /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
+ * led0 yellow = far-end adapter is in Red alarm condition
+ * led1 blue = received an Alarm Indication signal
+ * (upstream failure)
+ * led2 Green = power to adapter, Gate Array loaded & driver
+ * attached
+ * led3 red = Loss of Signal (LOS) or out of frame (OOF)
+ * conditions detected on T3 receive signal
+ */
+ lmc_trace(sc->lmc_device, "lmc_t1_get_link_status in");
+ lmc_led_on(sc, LMC_DS3_LED2);
+
+ lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
+ link_status = lmc_mii_readreg (sc, 0, 18);
+
+
+ if (link_status & T1F_RAIS) { /* turn on blue LED */
+ ret = 0;
+ if(sc->last_led_err[1] != 1){
+ printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name);
+ }
+ lmc_led_on(sc, LMC_DS3_LED1);
+ sc->last_led_err[1] = 1;
+ }
+ else {
+ if(sc->last_led_err[1] != 0){
+ printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name);
+ }
+ lmc_led_off (sc, LMC_DS3_LED1);
+ sc->last_led_err[1] = 0;
+ }
+
+ /*
+ * Yellow Alarm is nasty evil stuff, looks at data patterns
+ * inside the channel and confuses it with HDLC framing
+ * ignore all yellow alarms.
+ *
+ * Do listen to MultiFrame Yellow alarm which while implemented
+ * different ways isn't in the channel and hence somewhat
+ * more reliable
+ */
+
+ if (link_status & T1F_RMYEL) {
+ ret = 0;
+ if(sc->last_led_err[0] != 1){
+ printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name);
+ }
+ lmc_led_on(sc, LMC_DS3_LED0);
+ sc->last_led_err[0] = 1;
+ }
+ else {
+ if(sc->last_led_err[0] != 0){
+ printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name);
+ }
+ lmc_led_off(sc, LMC_DS3_LED0);
+ sc->last_led_err[0] = 0;
+ }
+
+ /*
+ * Loss of signal and los of frame
+ * Use the green bit to identify which one lit the led
+ */
+ if(link_status & T1F_RLOF){
+ ret = 0;
+ if(sc->last_led_err[3] != 1){
+ printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name);
+ }
+ lmc_led_on(sc, LMC_DS3_LED3);
+ sc->last_led_err[3] = 1;
+
+ }
+ else {
+ if(sc->last_led_err[3] != 0){
+ printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name);
+ }
+ if( ! (link_status & T1F_RLOS))
+ lmc_led_off(sc, LMC_DS3_LED3);
+ sc->last_led_err[3] = 0;
+ }
+
+ if(link_status & T1F_RLOS){
+ ret = 0;
+ if(sc->last_led_err[2] != 1){
+ printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name);
+ }
+ lmc_led_on(sc, LMC_DS3_LED3);
+ sc->last_led_err[2] = 1;
+
+ }
+ else {
+ if(sc->last_led_err[2] != 0){
+ printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name);
+ }
+ if( ! (link_status & T1F_RLOF))
+ lmc_led_off(sc, LMC_DS3_LED3);
+ sc->last_led_err[2] = 0;
+ }
+
+ sc->lmc_xinfo.t1_alarm1_status = link_status;
+
+ lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
+ sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
+
+
+ lmc_trace(sc->lmc_device, "lmc_t1_get_link_status out");
+
+ return ret;
+}
+
+/*
+ * 1 == T1 Circuit Type , 0 == E1 Circuit Type
+ */
+static void
+lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie)
+{
+ if (ie == LMC_CTL_CIRCUIT_TYPE_T1) {
+ sc->lmc_miireg16 |= LMC_MII16_T1_Z;
+ sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1;
+ printk(KERN_INFO "%s: In T1 Mode\n", sc->name);
+ }
+ else {
+ sc->lmc_miireg16 &= ~LMC_MII16_T1_Z;
+ sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1;
+ printk(KERN_INFO "%s: In E1 Mode\n", sc->name);
+ }
+
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit */
+static void
+lmc_t1_set_crc_length (lmc_softc_t * const sc, int state)
+{
+ if (state == LMC_CTL_CRC_LENGTH_32)
+ {
+ /* 32 bit */
+ sc->lmc_miireg16 |= LMC_MII16_T1_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+ sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
+
+ }
+ else
+ {
+ /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC;
+ sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+ sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
+
+ }
+
+ lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * 1 == internal, 0 == external
+ */
+static void
+lmc_t1_set_clock (lmc_softc_t * const sc, int ie)
+{
+ int old;
+ old = ie;
+ if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
+ {
+ sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
+ if(old != ie)
+ printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
+ }
+ else
+ {
+ sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
+ LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+ if(old != ie)
+ printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
+ }
+}
+
+static void
+lmc_t1_watchdog (lmc_softc_t * const sc)
+{
+}
+
+static void
+lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+ if (ctl == 0)
+ {
+ sc->ictl.keepalive_onoff = LMC_CTL_ON;
+
+ return;
+ }
+}
diff --git a/drivers/net/wan/lmc/lmc_media.h b/drivers/net/wan/lmc/lmc_media.h
new file mode 100644
index 000000000000..ddcc00403563
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_media.h
@@ -0,0 +1,65 @@
+#ifndef _LMC_MEDIA_H_
+#define _LMC_MEDIA_H_
+
+lmc_media_t lmc_ds3_media = {
+ lmc_ds3_init, /* special media init stuff */
+ lmc_ds3_default, /* reset to default state */
+ lmc_ds3_set_status, /* reset status to state provided */
+ lmc_dummy_set_1, /* set clock source */
+ lmc_dummy_set2_1, /* set line speed */
+ lmc_ds3_set_100ft, /* set cable length */
+ lmc_ds3_set_scram, /* set scrambler */
+ lmc_ds3_get_link_status, /* get link status */
+ lmc_dummy_set_1, /* set link status */
+ lmc_ds3_set_crc_length, /* set CRC length */
+ lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ lmc_ds3_watchdog
+};
+
+lmc_media_t lmc_hssi_media = {
+ lmc_hssi_init, /* special media init stuff */
+ lmc_hssi_default, /* reset to default state */
+ lmc_hssi_set_status, /* reset status to state provided */
+ lmc_hssi_set_clock, /* set clock source */
+ lmc_dummy_set2_1, /* set line speed */
+ lmc_dummy_set_1, /* set cable length */
+ lmc_dummy_set_1, /* set scrambler */
+ lmc_hssi_get_link_status, /* get link status */
+ lmc_hssi_set_link_status, /* set link status */
+ lmc_hssi_set_crc_length, /* set CRC length */
+ lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ lmc_hssi_watchdog
+};
+
+lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
+ lmc_ssi_default, /* reset to default state */
+ lmc_ssi_set_status, /* reset status to state provided */
+ lmc_ssi_set_clock, /* set clock source */
+ lmc_ssi_set_speed, /* set line speed */
+ lmc_dummy_set_1, /* set cable length */
+ lmc_dummy_set_1, /* set scrambler */
+ lmc_ssi_get_link_status, /* get link status */
+ lmc_ssi_set_link_status, /* set link status */
+ lmc_ssi_set_crc_length, /* set CRC length */
+ lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ lmc_ssi_watchdog
+};
+
+lmc_media_t lmc_t1_media = {
+ lmc_t1_init, /* special media init stuff */
+ lmc_t1_default, /* reset to default state */
+ lmc_t1_set_status, /* reset status to state provided */
+ lmc_t1_set_clock, /* set clock source */
+ lmc_dummy_set2_1, /* set line speed */
+ lmc_dummy_set_1, /* set cable length */
+ lmc_dummy_set_1, /* set scrambler */
+ lmc_t1_get_link_status, /* get link status */
+ lmc_dummy_set_1, /* set link status */
+ lmc_t1_set_crc_length, /* set CRC length */
+ lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
+ lmc_t1_watchdog
+};
+
+
+#endif
+
diff --git a/drivers/net/wan/lmc/lmc_prot.h b/drivers/net/wan/lmc/lmc_prot.h
new file mode 100644
index 000000000000..f3b1df9e2cdb
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_prot.h
@@ -0,0 +1,15 @@
+#ifndef _LMC_PROTO_H_
+#define _LMC_PROTO_H_
+
+void lmc_proto_init(lmc_softc_t * const)
+void lmc_proto_attach(lmc_softc_t *sc const)
+void lmc_proto_detach(lmc_softc *sc const)
+void lmc_proto_reopen(lmc_softc_t *sc const)
+int lmc_proto_ioctl(lmc_softc_t *sc const, struct ifreq *ifr, int cmd)
+void lmc_proto_open(lmc_softc_t *sc const)
+void lmc_proto_close(lmc_softc_t *sc const)
+unsigned short lmc_proto_type(lmc_softc_t *sc const, struct skbuff *skb)
+
+
+#endif
+
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
new file mode 100644
index 000000000000..74876c0073e8
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -0,0 +1,249 @@
+ /*
+ * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+ * All rights reserved. www.lanmedia.com
+ *
+ * This code is written by:
+ * Andrew Stanley-Jones (asj@cban.com)
+ * Rob Braun (bbraun@vix.com),
+ * Michael Graff (explorer@vix.com) and
+ * Matt Thomas (matt@3am-software.com).
+ *
+ * With Help By:
+ * David Boggs
+ * Ron Crane
+ * Allan Cox
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License version 2, incorporated herein by reference.
+ *
+ * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/bitops.h>
+
+#include <net/syncppp.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/smp.h>
+
+#include "lmc.h"
+#include "lmc_var.h"
+#include "lmc_debug.h"
+#include "lmc_ioctl.h"
+#include "lmc_proto.h"
+
+/*
+ * The compile-time variable SPPPSTUP causes the module to be
+ * compiled without referencing any of the sync ppp routines.
+ */
+#ifdef SPPPSTUB
+#define SPPP_detach(d) (void)0
+#define SPPP_open(d) 0
+#define SPPP_reopen(d) (void)0
+#define SPPP_close(d) (void)0
+#define SPPP_attach(d) (void)0
+#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP
+#else
+#define SPPP_attach(x) sppp_attach((x)->pd)
+#define SPPP_detach(x) sppp_detach((x)->pd->dev)
+#define SPPP_open(x) sppp_open((x)->pd->dev)
+#define SPPP_reopen(x) sppp_reopen((x)->pd->dev)
+#define SPPP_close(x) sppp_close((x)->pd->dev)
+#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z))
+#endif
+
+// init
+void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_proto_init in");
+ switch(sc->if_type){
+ case LMC_PPP:
+ sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
+ if (!sc->pd) {
+ printk("lmc_proto_init(): kmalloc failure!\n");
+ return;
+ }
+ sc->pd->dev = sc->lmc_device;
+ sc->if_ptr = sc->pd;
+ break;
+ case LMC_RAW:
+ break;
+ default:
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_init out");
+}
+
+// attach
+void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_proto_attach in");
+ switch(sc->if_type){
+ case LMC_PPP:
+ {
+ struct net_device *dev = sc->lmc_device;
+ SPPP_attach(sc);
+ dev->do_ioctl = lmc_ioctl;
+ }
+ break;
+ case LMC_NET:
+ {
+ struct net_device *dev = sc->lmc_device;
+ /*
+ * They set a few basics because they don't use sync_ppp
+ */
+ dev->flags |= IFF_POINTOPOINT;
+ dev->hard_header = NULL;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ }
+ case LMC_RAW: /* Setup the task queue, maybe we should notify someone? */
+ {
+ }
+ default:
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_attach out");
+}
+
+// detach
+void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/
+{
+ switch(sc->if_type){
+ case LMC_PPP:
+ SPPP_detach(sc);
+ break;
+ case LMC_RAW: /* Tell someone we're detaching? */
+ break;
+ default:
+ break;
+ }
+
+}
+
+// reopen
+void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_proto_reopen in");
+ switch(sc->if_type){
+ case LMC_PPP:
+ SPPP_reopen(sc);
+ break;
+ case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */
+ break;
+ default:
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_reopen out");
+}
+
+
+// ioctl
+int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
+ switch(sc->if_type){
+ case LMC_PPP:
+ return SPPP_do_ioctl (sc, ifr, cmd);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
+}
+
+// open
+void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/
+{
+ int ret;
+
+ lmc_trace(sc->lmc_device, "lmc_proto_open in");
+ switch(sc->if_type){
+ case LMC_PPP:
+ ret = SPPP_open(sc);
+ if(ret < 0)
+ printk("%s: syncPPP open failed: %d\n", sc->name, ret);
+ break;
+ case LMC_RAW: /* We're about to start getting packets! */
+ break;
+ default:
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_open out");
+}
+
+// close
+
+void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_proto_close in");
+ switch(sc->if_type){
+ case LMC_PPP:
+ SPPP_close(sc);
+ break;
+ case LMC_RAW: /* Interface going down */
+ break;
+ default:
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_close out");
+}
+
+unsigned short lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_proto_type in");
+ switch(sc->if_type){
+ case LMC_PPP:
+ return htons(ETH_P_WAN_PPP);
+ break;
+ case LMC_NET:
+ return htons(ETH_P_802_2);
+ break;
+ case LMC_RAW: /* Packet type for skbuff kind of useless */
+ return htons(ETH_P_802_2);
+ break;
+ default:
+ printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
+ return htons(ETH_P_802_2);
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_tye out");
+
+}
+
+void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
+{
+ lmc_trace(sc->lmc_device, "lmc_proto_netif in");
+ switch(sc->if_type){
+ case LMC_PPP:
+ case LMC_NET:
+ default:
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ break;
+ case LMC_RAW:
+ break;
+ }
+ lmc_trace(sc->lmc_device, "lmc_proto_netif out");
+}
+
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
new file mode 100644
index 000000000000..080a55773349
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -0,0 +1,16 @@
+#ifndef _LMC_PROTO_H_
+#define _LMC_PROTO_H_
+
+void lmc_proto_init(lmc_softc_t *sc);
+void lmc_proto_attach(lmc_softc_t *sc);
+void lmc_proto_detach(lmc_softc_t *sc);
+void lmc_proto_reopen(lmc_softc_t *sc);
+int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
+void lmc_proto_open(lmc_softc_t *sc);
+void lmc_proto_close(lmc_softc_t *sc);
+unsigned short lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
+void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
+int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused);
+
+#endif
+
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
new file mode 100644
index 000000000000..6d003a39bfad
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -0,0 +1,570 @@
+#ifndef _LMC_VAR_H_
+#define _LMC_VAR_H_
+
+/* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */
+
+ /*
+ * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+ * All rights reserved. www.lanmedia.com
+ *
+ * This code is written by:
+ * Andrew Stanley-Jones (asj@cban.com)
+ * Rob Braun (bbraun@vix.com),
+ * Michael Graff (explorer@vix.com) and
+ * Matt Thomas (matt@3am-software.com).
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License version 2, incorporated herein by reference.
+ */
+
+#include <linux/timer.h>
+
+#ifndef __KERNEL__
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#define BITS_PER_LONG 32
+
+#endif
+
+/*
+ * basic definitions used in lmc include files
+ */
+
+typedef struct lmc___softc lmc_softc_t;
+typedef struct lmc___media lmc_media_t;
+typedef struct lmc___ctl lmc_ctl_t;
+
+#define lmc_csrptr_t unsigned long
+#define u_int16_t u16
+#define u_int8_t u8
+#define tulip_uint32_t u32
+
+#define LMC_REG_RANGE 0x80
+
+#define LMC_PRINTF_FMT "%s"
+#define LMC_PRINTF_ARGS (sc->lmc_device->name)
+
+#define TX_TIMEOUT (2*HZ)
+
+#define LMC_TXDESCS 32
+#define LMC_RXDESCS 32
+
+#define LMC_LINK_UP 1
+#define LMC_LINK_DOWN 0
+
+/* These macros for generic read and write to and from the dec chip */
+#define LMC_CSR_READ(sc, csr) \
+ inl((sc)->lmc_csrs.csr)
+#define LMC_CSR_WRITE(sc, reg, val) \
+ outl((val), (sc)->lmc_csrs.reg)
+
+//#ifdef _LINUX_DELAY_H
+// #define SLOW_DOWN_IO udelay(2);
+// #undef __SLOW_DOWN_IO
+// #define __SLOW_DOWN_IO udelay(2);
+//#endif
+
+#define DELAY(n) SLOW_DOWN_IO
+
+#define lmc_delay() inl(sc->lmc_csrs.csr_9)
+
+/* This macro sync's up with the mii so that reads and writes can take place */
+#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \
+ LMC_CSR_WRITE((sc), csr_9, 0x20000); \
+ lmc_delay(); \
+ LMC_CSR_WRITE((sc), csr_9, 0x30000); \
+ lmc_delay(); \
+ n--; }} while(0)
+
+struct lmc_regfile_t {
+ lmc_csrptr_t csr_busmode; /* CSR0 */
+ lmc_csrptr_t csr_txpoll; /* CSR1 */
+ lmc_csrptr_t csr_rxpoll; /* CSR2 */
+ lmc_csrptr_t csr_rxlist; /* CSR3 */
+ lmc_csrptr_t csr_txlist; /* CSR4 */
+ lmc_csrptr_t csr_status; /* CSR5 */
+ lmc_csrptr_t csr_command; /* CSR6 */
+ lmc_csrptr_t csr_intr; /* CSR7 */
+ lmc_csrptr_t csr_missed_frames; /* CSR8 */
+ lmc_csrptr_t csr_9; /* CSR9 */
+ lmc_csrptr_t csr_10; /* CSR10 */
+ lmc_csrptr_t csr_11; /* CSR11 */
+ lmc_csrptr_t csr_12; /* CSR12 */
+ lmc_csrptr_t csr_13; /* CSR13 */
+ lmc_csrptr_t csr_14; /* CSR14 */
+ lmc_csrptr_t csr_15; /* CSR15 */
+};
+
+#define csr_enetrom csr_9 /* 21040 */
+#define csr_reserved csr_10 /* 21040 */
+#define csr_full_duplex csr_11 /* 21040 */
+#define csr_bootrom csr_10 /* 21041/21140A/?? */
+#define csr_gp csr_12 /* 21140* */
+#define csr_watchdog csr_15 /* 21140* */
+#define csr_gp_timer csr_11 /* 21041/21140* */
+#define csr_srom_mii csr_9 /* 21041/21140* */
+#define csr_sia_status csr_12 /* 2104x */
+#define csr_sia_connectivity csr_13 /* 2104x */
+#define csr_sia_tx_rx csr_14 /* 2104x */
+#define csr_sia_general csr_15 /* 2104x */
+
+/* tulip length/control transmit descriptor definitions
+ * used to define bits in the second tulip_desc_t field (length)
+ * for the transmit descriptor -baz */
+
+#define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF))
+#define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800))
+#define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000))
+#define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000))
+#define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000))
+#define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000))
+#define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000))
+#define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000))
+#define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000))
+#define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000))
+#define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000))
+#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000))
+
+#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
+#define TDES_COLLISION_COUNT_BIT_NUMBER 3
+
+/* Constants for the RCV descriptor RDES */
+
+#define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001))
+#define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002))
+#define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004))
+#define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008))
+#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010))
+#define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020))
+#define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040))
+#define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080))
+#define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100))
+#define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200))
+#define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400))
+#define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800))
+#define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000))
+#define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000))
+#define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000))
+#define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000))
+#define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000))
+
+#define RDES_FRAME_LENGTH_BIT_NUMBER 16
+
+#define LMC_RDES_ERROR_MASK ( (u_int32_t)( \
+ LMC_RDES_OVERFLOW \
+ | LMC_RDES_DRIBBLING_BIT \
+ | LMC_RDES_REPORT_ON_MII_ERR \
+ | LMC_RDES_COLLISION_SEEN ) )
+
+
+/*
+ * Ioctl info
+ */
+
+typedef struct {
+ u_int32_t n;
+ u_int32_t m;
+ u_int32_t v;
+ u_int32_t x;
+ u_int32_t r;
+ u_int32_t f;
+ u_int32_t exact;
+} lmc_av9110_t;
+
+/*
+ * Common structure passed to the ioctl code.
+ */
+struct lmc___ctl {
+ u_int32_t cardtype;
+ u_int32_t clock_source; /* HSSI, T1 */
+ u_int32_t clock_rate; /* T1 */
+ u_int32_t crc_length;
+ u_int32_t cable_length; /* DS3 */
+ u_int32_t scrambler_onoff; /* DS3 */
+ u_int32_t cable_type; /* T1 */
+ u_int32_t keepalive_onoff; /* protocol */
+ u_int32_t ticks; /* ticks/sec */
+ union {
+ lmc_av9110_t ssi;
+ } cardspec;
+ u_int32_t circuit_type; /* T1 or E1 */
+};
+
+
+/*
+ * Carefull, look at the data sheet, there's more to this
+ * structure than meets the eye. It should probably be:
+ *
+ * struct tulip_desc_t {
+ * u8 own:1;
+ * u32 status:31;
+ * u32 control:10;
+ * u32 buffer1;
+ * u32 buffer2;
+ * };
+ * You could also expand status control to provide more bit information
+ */
+
+struct tulip_desc_t {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 buffer2;
+};
+
+/*
+ * media independent methods to check on media status, link, light LEDs,
+ * etc.
+ */
+struct lmc___media {
+ void (* init)(lmc_softc_t * const);
+ void (* defaults)(lmc_softc_t * const);
+ void (* set_status)(lmc_softc_t * const, lmc_ctl_t *);
+ void (* set_clock_source)(lmc_softc_t * const, int);
+ void (* set_speed)(lmc_softc_t * const, lmc_ctl_t *);
+ void (* set_cable_length)(lmc_softc_t * const, int);
+ void (* set_scrambler)(lmc_softc_t * const, int);
+ int (* get_link_status)(lmc_softc_t * const);
+ void (* set_link_status)(lmc_softc_t * const, int);
+ void (* set_crc_length)(lmc_softc_t * const, int);
+ void (* set_circuit_type)(lmc_softc_t * const, int);
+ void (* watchdog)(lmc_softc_t * const);
+};
+
+
+#define STATCHECK 0xBEEFCAFE
+
+/* Included in this structure are first
+ * - standard net_device_stats
+ * - some other counters used for debug and driver performance
+ * evaluation -baz
+ */
+struct lmc_statistics
+{
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* no space in linux buffers */
+ unsigned long tx_dropped; /* no space available in linux */
+ unsigned long multicast; /* multicast packets received */
+ unsigned long collisions;
+
+ /* detailed rx_errors: */
+ unsigned long rx_length_errors;
+ unsigned long rx_over_errors; /* receiver ring buff overflow */
+ unsigned long rx_crc_errors; /* recved pkt with crc error */
+ unsigned long rx_frame_errors; /* recv'd frame alignment error */
+ unsigned long rx_fifo_errors; /* recv'r fifo overrun */
+ unsigned long rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ unsigned long tx_aborted_errors;
+ unsigned long tx_carrier_errors;
+ unsigned long tx_fifo_errors;
+ unsigned long tx_heartbeat_errors;
+ unsigned long tx_window_errors;
+
+ /* for cslip etc */
+ unsigned long rx_compressed;
+ unsigned long tx_compressed;
+
+ /* -------------------------------------
+ * Custom stats & counters follow -baz */
+ u_int32_t version_size;
+ u_int32_t lmc_cardtype;
+
+ u_int32_t tx_ProcTimeout;
+ u_int32_t tx_IntTimeout;
+ u_int32_t tx_NoCompleteCnt;
+ u_int32_t tx_MaxXmtsB4Int;
+ u_int32_t tx_TimeoutCnt;
+ u_int32_t tx_OutOfSyncPtr;
+ u_int32_t tx_tbusy0;
+ u_int32_t tx_tbusy1;
+ u_int32_t tx_tbusy_calls;
+ u_int32_t resetCount;
+ u_int32_t lmc_txfull;
+ u_int32_t tbusy;
+ u_int32_t dirtyTx;
+ u_int32_t lmc_next_tx;
+ u_int32_t otherTypeCnt;
+ u_int32_t lastType;
+ u_int32_t lastTypeOK;
+ u_int32_t txLoopCnt;
+ u_int32_t usedXmtDescripCnt;
+ u_int32_t txIndexCnt;
+ u_int32_t rxIntLoopCnt;
+
+ u_int32_t rx_SmallPktCnt;
+ u_int32_t rx_BadPktSurgeCnt;
+ u_int32_t rx_BuffAllocErr;
+ u_int32_t tx_lossOfClockCnt;
+
+ /* T1 error counters */
+ u_int32_t framingBitErrorCount;
+ u_int32_t lineCodeViolationCount;
+
+ u_int32_t lossOfFrameCount;
+ u_int32_t changeOfFrameAlignmentCount;
+ u_int32_t severelyErroredFrameCount;
+
+ u_int32_t check;
+};
+
+
+typedef struct lmc_xinfo {
+ u_int32_t Magic0; /* BEEFCAFE */
+
+ u_int32_t PciCardType;
+ u_int32_t PciSlotNumber; /* PCI slot number */
+
+ u_int16_t DriverMajorVersion;
+ u_int16_t DriverMinorVersion;
+ u_int16_t DriverSubVersion;
+
+ u_int16_t XilinxRevisionNumber;
+ u_int16_t MaxFrameSize;
+
+ u_int16_t t1_alarm1_status;
+ u_int16_t t1_alarm2_status;
+
+ int link_status;
+ u_int32_t mii_reg16;
+
+ u_int32_t Magic1; /* DEADBEEF */
+} LMC_XINFO;
+
+
+/*
+ * forward decl
+ */
+struct lmc___softc {
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+ char *name;
+ u8 board_idx;
+ struct lmc_statistics stats;
+ struct net_device *lmc_device;
+
+ int hang, rxdesc, bad_packet, some_counter;
+ u_int32_t txgo;
+ struct lmc_regfile_t lmc_csrs;
+ volatile u_int32_t lmc_txtick;
+ volatile u_int32_t lmc_rxtick;
+ u_int32_t lmc_flags;
+ u_int32_t lmc_intrmask; /* our copy of csr_intr */
+ u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */
+ u_int32_t lmc_busmode; /* our copy of csr_busmode */
+ u_int32_t lmc_gpio_io; /* state of in/out settings */
+ u_int32_t lmc_gpio; /* state of outputs */
+ struct sk_buff* lmc_txq[LMC_TXDESCS];
+ struct sk_buff* lmc_rxq[LMC_RXDESCS];
+ volatile
+ struct tulip_desc_t lmc_rxring[LMC_RXDESCS];
+ volatile
+ struct tulip_desc_t lmc_txring[LMC_TXDESCS];
+ unsigned int lmc_next_rx, lmc_next_tx;
+ volatile
+ unsigned int lmc_taint_tx, lmc_taint_rx;
+ int lmc_tx_start, lmc_txfull;
+ int lmc_txbusy;
+ u_int16_t lmc_miireg16;
+ int lmc_ok;
+ int last_link_status;
+ int lmc_cardtype;
+ u_int32_t last_frameerr;
+ lmc_media_t *lmc_media;
+ struct timer_list timer;
+ lmc_ctl_t ictl;
+ u_int32_t TxDescriptControlInit;
+
+ int tx_TimeoutInd; /* additional driver state */
+ int tx_TimeoutDisplay;
+ unsigned int lastlmc_taint_tx;
+ int lasttx_packets;
+ u_int32_t tx_clockState;
+ u_int32_t lmc_crcSize;
+ LMC_XINFO lmc_xinfo;
+ char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
+ char lmc_timing; /* for HSSI and SSI */
+ int got_irq;
+
+ char last_led_err[4];
+
+ u32 last_int;
+ u32 num_int;
+
+ spinlock_t lmc_lock;
+ u_int16_t if_type; /* PPP or NET */
+ struct ppp_device *pd;
+
+ /* Failure cases */
+ u8 failed_ring;
+ u8 failed_recv_alloc;
+
+ /* Structure check */
+ u32 check;
+};
+
+#define LMC_PCI_TIME 1
+#define LMC_EXT_TIME 0
+
+#define PKT_BUF_SZ 1542 /* was 1536 */
+
+/* CSR5 settings */
+#define TIMER_INT 0x00000800
+#define TP_LINK_FAIL 0x00001000
+#define TP_LINK_PASS 0x00000010
+#define NORMAL_INT 0x00010000
+#define ABNORMAL_INT 0x00008000
+#define RX_JABBER_INT 0x00000200
+#define RX_DIED 0x00000100
+#define RX_NOBUFF 0x00000080
+#define RX_INT 0x00000040
+#define TX_FIFO_UNDER 0x00000020
+#define TX_JABBER 0x00000008
+#define TX_NOBUFF 0x00000004
+#define TX_DIED 0x00000002
+#define TX_INT 0x00000001
+
+/* CSR6 settings */
+#define OPERATION_MODE 0x00000200 /* Full Duplex */
+#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */
+#define RECIEVE_ALL 0x40000000 /* Recieve All */
+#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */
+
+/* Dec control registers CSR6 as well */
+#define LMC_DEC_ST 0x00002000
+#define LMC_DEC_SR 0x00000002
+
+/* CSR15 settings */
+#define RECV_WATCHDOG_DISABLE 0x00000010
+#define JABBER_DISABLE 0x00000001
+
+/* More settings */
+/*
+ * aSR6 -- Command (Operation Mode) Register
+ */
+#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */
+#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */
+#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */
+#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Foward (21140) */
+#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */
+#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */
+#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */
+#define TULIP_CMD_OPERMODE 0x00000C00L /* (RW) Operating Mode */
+#define TULIP_CMD_PROMISCUOUS 0x00000041L /* (RW) Promiscuous Mode */
+#define TULIP_CMD_PASSBADPKT 0x00000008L /* (RW) Pass Bad Frames */
+#define TULIP_CMD_THRESHOLDCTL 0x0000C000L /* (RW) Threshold Control */
+
+#define TULIP_GP_PINSET 0x00000100L
+#define TULIP_BUSMODE_SWRESET 0x00000001L
+#define TULIP_WATCHDOG_TXDISABLE 0x00000001L
+#define TULIP_WATCHDOG_RXDISABLE 0x00000010L
+
+#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */
+#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */
+#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */
+#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */
+#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */
+#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */
+#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */
+#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */
+#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */
+#define TULIP_STS_RXINTR 0x00000040L /* (RW) Receive Interrupt */
+#define TULIP_STS_TXUNDERFLOW 0x00000020L /* (RW) Transmit Underflow */
+#define TULIP_STS_TXJABER 0x00000008L /* (RW) Jabber timeout */
+#define TULIP_STS_TXNOBUF 0x00000004L
+#define TULIP_STS_TXSTOPPED 0x00000002L /* (RW) Transmit Process Stopped */
+#define TULIP_STS_TXINTR 0x00000001L /* (RW) Transmit Interrupt */
+
+#define TULIP_STS_RXS_STOPPED 0x00000000L /* 000 - Stopped */
+
+#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receive Process Stopped */
+#define TULIP_STS_RXNOBUF 0x00000080L
+
+#define TULIP_CMD_TXRUN 0x00002000L /* (RW) Start/Stop Transmitter */
+#define TULIP_CMD_RXRUN 0x00000002L /* (RW) Start/Stop Receive Filtering */
+#define TULIP_DSTS_TxDEFERRED 0x00000001 /* Initially Deferred */
+#define TULIP_DSTS_OWNER 0x80000000 /* Owner (1 = 21040) */
+#define TULIP_DSTS_RxMIIERR 0x00000008
+#define LMC_DSTS_ERRSUM (TULIP_DSTS_RxMIIERR)
+
+#define TULIP_DEFAULT_INTR_MASK (TULIP_STS_NORMALINTR \
+ | TULIP_STS_RXINTR \
+ | TULIP_STS_TXINTR \
+ | TULIP_STS_ABNRMLINTR \
+ | TULIP_STS_SYSERROR \
+ | TULIP_STS_TXSTOPPED \
+ | TULIP_STS_TXUNDERFLOW\
+ | TULIP_STS_RXSTOPPED )
+
+#define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000))
+#define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000))
+
+#ifndef TULIP_CMD_RECEIVEALL
+#define TULIP_CMD_RECEIVEALL 0x40000000L
+#endif
+
+/* Adapter module number */
+#define LMC_ADAP_HSSI 2
+#define LMC_ADAP_DS3 3
+#define LMC_ADAP_SSI 4
+#define LMC_ADAP_T1 5
+
+#define HDLC_HDR_LEN 4
+#define HDLC_ADDR_LEN 1
+#define HDLC_SLARP 0x8035
+#define LMC_MTU 1500
+#define SLARP_LINECHECK 2
+
+#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
+#define LMC_CRC_LEN_32 4
+
+#ifdef LMC_HDLC
+/* definition of an hdlc header. */
+struct hdlc_hdr
+{
+ u8 address;
+ u8 control;
+ u16 type;
+};
+
+/* definition of a slarp header. */
+struct slarp
+{
+ long code;
+ union sl
+ {
+ struct
+ {
+ ulong address;
+ ulong mask;
+ ushort unused;
+ } add;
+ struct
+ {
+ ulong mysequence;
+ ulong yoursequence;
+ ushort reliability;
+ ulong time;
+ } chk;
+ } t;
+};
+#endif /* LMC_HDLC */
+
+
+#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
new file mode 100644
index 000000000000..cd32751b64eb
--- /dev/null
+++ b/drivers/net/wan/n2.c
@@ -0,0 +1,562 @@
+/*
+ * SDL Inc. RISCom/N2 synchronous serial card driver for Linux
+ *
+ * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * For information see http://hq.pm.waw.pl/hdlc/
+ *
+ * Note: integrated CSU/DSU/DDS are not supported by this driver
+ *
+ * Sources of information:
+ * Hitachi HD64570 SCA User's Manual
+ * SDL Inc. PPP/HDLC/CISCO driver
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <asm/io.h>
+#include "hd64570.h"
+
+
+static const char* version = "SDL RISCom/N2 driver version: 1.15";
+static const char* devname = "RISCom/N2";
+
+#undef DEBUG_PKT
+#define DEBUG_RINGS
+
+#define USE_WINDOWSIZE 16384
+#define USE_BUS16BITS 1
+#define CLOCK_BASE 9830400 /* 9.8304 MHz */
+#define MAX_PAGES 16 /* 16 RAM pages at max */
+#define MAX_RAM_SIZE 0x80000 /* 512 KB */
+#if MAX_RAM_SIZE > MAX_PAGES * USE_WINDOWSIZE
+#undef MAX_RAM_SIZE
+#define MAX_RAM_SIZE (MAX_PAGES * USE_WINDOWSIZE)
+#endif
+#define N2_IOPORTS 0x10
+#define NEED_DETECT_RAM
+#define NEED_SCA_MSCI_INTR
+#define MAX_TX_BUFFERS 10
+
+static char *hw = NULL; /* pointer to hw=xxx command line string */
+
+/* RISCom/N2 Board Registers */
+
+/* PC Control Register */
+#define N2_PCR 0
+#define PCR_RUNSCA 1 /* Run 64570 */
+#define PCR_VPM 2 /* Enable VPM - needed if using RAM above 1 MB */
+#define PCR_ENWIN 4 /* Open window */
+#define PCR_BUS16 8 /* 16-bit bus */
+
+
+/* Memory Base Address Register */
+#define N2_BAR 2
+
+
+/* Page Scan Register */
+#define N2_PSR 4
+#define WIN16K 0x00
+#define WIN32K 0x20
+#define WIN64K 0x40
+#define PSR_WINBITS 0x60
+#define PSR_DMAEN 0x80
+#define PSR_PAGEBITS 0x0F
+
+
+/* Modem Control Reg */
+#define N2_MCR 6
+#define CLOCK_OUT_PORT1 0x80
+#define CLOCK_OUT_PORT0 0x40
+#define TX422_PORT1 0x20
+#define TX422_PORT0 0x10
+#define DSR_PORT1 0x08
+#define DSR_PORT0 0x04
+#define DTR_PORT1 0x02
+#define DTR_PORT0 0x01
+
+
+typedef struct port_s {
+ struct net_device *dev;
+ struct card_s *card;
+ spinlock_t lock; /* TX lock */
+ sync_serial_settings settings;
+ int valid; /* port enabled */
+ int rxpart; /* partial frame received, next frame invalid*/
+ unsigned short encoding;
+ unsigned short parity;
+ u16 rxin; /* rx ring buffer 'in' pointer */
+ u16 txin; /* tx ring buffer 'in' and 'last' pointers */
+ u16 txlast;
+ u8 rxs, txs, tmc; /* SCA registers */
+ u8 phy_node; /* physical port # - 0 or 1 */
+ u8 log_node; /* logical port # */
+}port_t;
+
+
+
+typedef struct card_s {
+ u8 __iomem *winbase; /* ISA window base address */
+ u32 phy_winbase; /* ISA physical base address */
+ u32 ram_size; /* number of bytes */
+ u16 io; /* IO Base address */
+ u16 buff_offset; /* offset of first buffer of first channel */
+ u16 rx_ring_buffers; /* number of buffers in a ring */
+ u16 tx_ring_buffers;
+ u8 irq; /* IRQ (3-15) */
+
+ port_t ports[2];
+ struct card_s *next_card;
+}card_t;
+
+
+static card_t *first_card;
+static card_t **new_card = &first_card;
+
+
+#define sca_reg(reg, card) (0x8000 | (card)->io | \
+ ((reg) & 0x0F) | (((reg) & 0xF0) << 6))
+#define sca_in(reg, card) inb(sca_reg(reg, card))
+#define sca_out(value, reg, card) outb(value, sca_reg(reg, card))
+#define sca_inw(reg, card) inw(sca_reg(reg, card))
+#define sca_outw(value, reg, card) outw(value, sca_reg(reg, card))
+
+#define port_to_card(port) ((port)->card)
+#define log_node(port) ((port)->log_node)
+#define phy_node(port) ((port)->phy_node)
+#define winsize(card) (USE_WINDOWSIZE)
+#define winbase(card) ((card)->winbase)
+#define get_port(card, port) ((card)->ports[port].valid ? \
+ &(card)->ports[port] : NULL)
+
+
+
+static __inline__ u8 sca_get_page(card_t *card)
+{
+ return inb(card->io + N2_PSR) & PSR_PAGEBITS;
+}
+
+
+static __inline__ void openwin(card_t *card, u8 page)
+{
+ u8 psr = inb(card->io + N2_PSR);
+ outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
+}
+
+
+
+#include "hd6457x.c"
+
+
+
+static void n2_set_iface(port_t *port)
+{
+ card_t *card = port->card;
+ int io = card->io;
+ u8 mcr = inb(io + N2_MCR);
+ u8 msci = get_msci(port);
+ u8 rxs = port->rxs & CLK_BRG_MASK;
+ u8 txs = port->txs & CLK_BRG_MASK;
+
+ switch(port->settings.clock_type) {
+ case CLOCK_INT:
+ mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+ rxs |= CLK_BRG_RX; /* BRG output */
+ txs |= CLK_RXCLK_TX; /* RX clock */
+ break;
+
+ case CLOCK_TXINT:
+ mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_BRG_TX; /* BRG output */
+ break;
+
+ case CLOCK_TXFROMRX:
+ mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_RXCLK_TX; /* RX clock */
+ break;
+
+ default: /* Clock EXTernal */
+ mcr &= port->phy_node ? ~CLOCK_OUT_PORT1 : ~CLOCK_OUT_PORT0;
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_LINE_TX; /* TXC input */
+ }
+
+ outb(mcr, io + N2_MCR);
+ port->rxs = rxs;
+ port->txs = txs;
+ sca_out(rxs, msci + RXS, card);
+ sca_out(txs, msci + TXS, card);
+ sca_set_port(port);
+}
+
+
+
+static int n2_open(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ int io = port->card->io;
+ u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1:TX422_PORT0);
+ int result;
+
+ result = hdlc_open(dev);
+ if (result)
+ return result;
+
+ mcr &= port->phy_node ? ~DTR_PORT1 : ~DTR_PORT0; /* set DTR ON */
+ outb(mcr, io + N2_MCR);
+
+ outb(inb(io + N2_PCR) | PCR_ENWIN, io + N2_PCR); /* open window */
+ outb(inb(io + N2_PSR) | PSR_DMAEN, io + N2_PSR); /* enable dma */
+ sca_open(dev);
+ n2_set_iface(port);
+ return 0;
+}
+
+
+
+static int n2_close(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ int io = port->card->io;
+ u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0);
+
+ sca_close(dev);
+ mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
+ outb(mcr, io + N2_MCR);
+ hdlc_close(dev);
+ return 0;
+}
+
+
+
+static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
+
+#ifdef DEBUG_RINGS
+ if (cmd == SIOCDEVPRIVATE) {
+ sca_dump_rings(dev);
+ return 0;
+ }
+#endif
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(line, &port->settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ if (new_line.clock_type != CLOCK_EXT &&
+ new_line.clock_type != CLOCK_TXFROMRX &&
+ new_line.clock_type != CLOCK_INT &&
+ new_line.clock_type != CLOCK_TXINT)
+ return -EINVAL; /* No such clock setting */
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ memcpy(&port->settings, &new_line, size); /* Update settings */
+ n2_set_iface(port);
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+
+
+static void n2_destroy_card(card_t *card)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < 2; cnt++)
+ if (card->ports[cnt].card) {
+ struct net_device *dev = port_to_dev(&card->ports[cnt]);
+ unregister_hdlc_device(dev);
+ }
+
+ if (card->irq)
+ free_irq(card->irq, card);
+
+ if (card->winbase) {
+ iounmap(card->winbase);
+ release_mem_region(card->phy_winbase, USE_WINDOWSIZE);
+ }
+
+ if (card->io)
+ release_region(card->io, N2_IOPORTS);
+ if (card->ports[0].dev)
+ free_netdev(card->ports[0].dev);
+ if (card->ports[1].dev)
+ free_netdev(card->ports[1].dev);
+ kfree(card);
+}
+
+
+
+static int __init n2_run(unsigned long io, unsigned long irq,
+ unsigned long winbase, long valid0, long valid1)
+{
+ card_t *card;
+ u8 cnt, pcr;
+ int i;
+
+ if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) {
+ printk(KERN_ERR "n2: invalid I/O port value\n");
+ return -ENODEV;
+ }
+
+ if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
+ printk(KERN_ERR "n2: invalid IRQ value\n");
+ return -ENODEV;
+ }
+
+ if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) {
+ printk(KERN_ERR "n2: invalid RAM value\n");
+ return -ENODEV;
+ }
+
+ card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ if (card == NULL) {
+ printk(KERN_ERR "n2: unable to allocate memory\n");
+ return -ENOBUFS;
+ }
+ memset(card, 0, sizeof(card_t));
+
+ card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
+ card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
+ if (!card->ports[0].dev || !card->ports[1].dev) {
+ printk(KERN_ERR "n2: unable to allocate memory\n");
+ n2_destroy_card(card);
+ return -ENOMEM;
+ }
+
+ if (!request_region(io, N2_IOPORTS, devname)) {
+ printk(KERN_ERR "n2: I/O port region in use\n");
+ n2_destroy_card(card);
+ return -EBUSY;
+ }
+ card->io = io;
+
+ if (request_irq(irq, &sca_intr, 0, devname, card)) {
+ printk(KERN_ERR "n2: could not allocate IRQ\n");
+ n2_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->irq = irq;
+
+ if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
+ printk(KERN_ERR "n2: could not request RAM window\n");
+ n2_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->phy_winbase = winbase;
+ card->winbase = ioremap(winbase, USE_WINDOWSIZE);
+
+ outb(0, io + N2_PCR);
+ outb(winbase >> 12, io + N2_BAR);
+
+ switch (USE_WINDOWSIZE) {
+ case 16384:
+ outb(WIN16K, io + N2_PSR);
+ break;
+
+ case 32768:
+ outb(WIN32K, io + N2_PSR);
+ break;
+
+ case 65536:
+ outb(WIN64K, io + N2_PSR);
+ break;
+
+ default:
+ printk(KERN_ERR "n2: invalid window size\n");
+ n2_destroy_card(card);
+ return -ENODEV;
+ }
+
+ pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0);
+ outb(pcr, io + N2_PCR);
+
+ card->ram_size = sca_detect_ram(card, card->winbase, MAX_RAM_SIZE);
+
+ /* number of TX + RX buffers for one port */
+ i = card->ram_size / ((valid0 + valid1) * (sizeof(pkt_desc) +
+ HDLC_MAX_MRU));
+
+ card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
+ card->rx_ring_buffers = i - card->tx_ring_buffers;
+
+ card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) *
+ (card->tx_ring_buffers + card->rx_ring_buffers);
+
+ printk(KERN_INFO "n2: RISCom/N2 %u KB RAM, IRQ%u, "
+ "using %u TX + %u RX packets rings\n", card->ram_size / 1024,
+ card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+
+ if (card->tx_ring_buffers < 1) {
+ printk(KERN_ERR "n2: RAM test failed\n");
+ n2_destroy_card(card);
+ return -EIO;
+ }
+
+ pcr |= PCR_RUNSCA; /* run SCA */
+ outb(pcr, io + N2_PCR);
+ outb(0, io + N2_MCR);
+
+ sca_init(card, 0);
+ for (cnt = 0; cnt < 2; cnt++) {
+ port_t *port = &card->ports[cnt];
+ struct net_device *dev = port_to_dev(port);
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1))
+ continue;
+
+ port->phy_node = cnt;
+ port->valid = 1;
+
+ if ((cnt == 1) && valid0)
+ port->log_node = 1;
+
+ spin_lock_init(&port->lock);
+ SET_MODULE_OWNER(dev);
+ dev->irq = irq;
+ dev->mem_start = winbase;
+ dev->mem_end = winbase + USE_WINDOWSIZE - 1;
+ dev->tx_queue_len = 50;
+ dev->do_ioctl = n2_ioctl;
+ dev->open = n2_open;
+ dev->stop = n2_close;
+ hdlc->attach = sca_attach;
+ hdlc->xmit = sca_xmit;
+ port->settings.clock_type = CLOCK_EXT;
+ port->card = card;
+
+ if (register_hdlc_device(dev)) {
+ printk(KERN_WARNING "n2: unable to register hdlc "
+ "device\n");
+ port->card = NULL;
+ n2_destroy_card(card);
+ return -ENOBUFS;
+ }
+ sca_init_sync_port(port); /* Set up SCA memory */
+
+ printk(KERN_INFO "%s: RISCom/N2 node %d\n",
+ dev->name, port->phy_node);
+ }
+
+ *new_card = card;
+ new_card = &card->next_card;
+
+ return 0;
+}
+
+
+
+static int __init n2_init(void)
+{
+ if (hw==NULL) {
+#ifdef MODULE
+ printk(KERN_INFO "n2: no card initialized\n");
+#endif
+ return -ENOSYS; /* no parameters specified, abort */
+ }
+
+ printk(KERN_INFO "%s\n", version);
+
+ do {
+ unsigned long io, irq, ram;
+ long valid[2] = { 0, 0 }; /* Default = both ports disabled */
+
+ io = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ irq = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ ram = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ while(1) {
+ if (*hw == '0' && !valid[0])
+ valid[0] = 1; /* Port 0 enabled */
+ else if (*hw == '1' && !valid[1])
+ valid[1] = 1; /* Port 1 enabled */
+ else
+ break;
+ hw++;
+ }
+
+ if (!valid[0] && !valid[1])
+ break; /* at least one port must be used */
+
+ if (*hw == ':' || *hw == '\x0')
+ n2_run(io, irq, ram, valid[0], valid[1]);
+
+ if (*hw == '\x0')
+ return first_card ? 0 : -ENOSYS;
+ }while(*hw++ == ':');
+
+ printk(KERN_ERR "n2: invalid hardware parameters\n");
+ return first_card ? 0 : -ENOSYS;
+}
+
+
+static void __exit n2_cleanup(void)
+{
+ card_t *card = first_card;
+
+ while (card) {
+ card_t *ptr = card;
+ card = card->next_card;
+ n2_destroy_card(ptr);
+ }
+}
+
+
+module_init(n2_init);
+module_exit(n2_cleanup);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("RISCom/N2 serial port driver");
+MODULE_LICENSE("GPL v2");
+module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */
diff --git a/drivers/net/wan/pc300-falc-lh.h b/drivers/net/wan/pc300-falc-lh.h
new file mode 100644
index 000000000000..01ed23ca76c7
--- /dev/null
+++ b/drivers/net/wan/pc300-falc-lh.h
@@ -0,0 +1,1238 @@
+/*
+ * falc.h Description of the Siemens FALC T1/E1 framer.
+ *
+ * Author: Ivan Passos <ivan@cyclades.com>
+ *
+ * Copyright: (c) 2000-2001 Cyclades Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Log: falc-lh.h,v $
+ * Revision 3.1 2001/06/15 12:41:10 regina
+ * upping major version number
+ *
+ * Revision 1.1.1.1 2001/06/13 20:24:47 daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 1.1 2000/05/15 ivan
+ * Included DJA bits for the LIM2 register.
+ *
+ * Revision 1.0 2000/02/22 ivan
+ * Initial version.
+ *
+ */
+
+#ifndef _FALC_LH_H
+#define _FALC_LH_H
+
+#define NUM_OF_T1_CHANNELS 24
+#define NUM_OF_E1_CHANNELS 32
+
+/*>>>>>>>>>>>>>>>>> FALC Register Bits (Transmit Mode) <<<<<<<<<<<<<<<<<<< */
+
+/* CMDR (Command Register)
+ ---------------- E1 & T1 ------------------------------ */
+#define CMDR_RMC 0x80
+#define CMDR_RRES 0x40
+#define CMDR_XREP 0x20
+#define CMDR_XRES 0x10
+#define CMDR_XHF 0x08
+#define CMDR_XTF 0x04
+#define CMDR_XME 0x02
+#define CMDR_SRES 0x01
+
+/* MODE (Mode Register)
+ ----------------- E1 & T1 ----------------------------- */
+#define MODE_MDS2 0x80
+#define MODE_MDS1 0x40
+#define MODE_MDS0 0x20
+#define MODE_BRAC 0x10
+#define MODE_HRAC 0x08
+
+/* IPC (Interrupt Port Configuration)
+ ----------------- E1 & T1 ----------------------------- */
+#define IPC_VIS 0x80
+#define IPC_SCI 0x04
+#define IPC_IC1 0x02
+#define IPC_IC0 0x01
+
+/* CCR1 (Common Configuration Register 1)
+ ----------------- E1 & T1 ----------------------------- */
+#define CCR1_SFLG 0x80
+#define CCR1_XTS16RA 0x40
+#define CCR1_BRM 0x40
+#define CCR1_CASSYM 0x20
+#define CCR1_EDLX 0x20
+#define CCR1_EITS 0x10
+#define CCR1_ITF 0x08
+#define CCR1_RFT1 0x02
+#define CCR1_RFT0 0x01
+
+/* CCR3 (Common Configuration Register 3)
+ ---------------- E1 & T1 ------------------------------ */
+
+#define CCR3_PRE1 0x80
+#define CCR3_PRE0 0x40
+#define CCR3_EPT 0x20
+#define CCR3_RADD 0x10
+#define CCR3_RCRC 0x04
+#define CCR3_XCRC 0x02
+
+
+/* RTR1-4 (Receive Timeslot Register 1-4)
+ ---------------- E1 & T1 ------------------------------ */
+
+#define RTR1_TS0 0x80
+#define RTR1_TS1 0x40
+#define RTR1_TS2 0x20
+#define RTR1_TS3 0x10
+#define RTR1_TS4 0x08
+#define RTR1_TS5 0x04
+#define RTR1_TS6 0x02
+#define RTR1_TS7 0x01
+
+#define RTR2_TS8 0x80
+#define RTR2_TS9 0x40
+#define RTR2_TS10 0x20
+#define RTR2_TS11 0x10
+#define RTR2_TS12 0x08
+#define RTR2_TS13 0x04
+#define RTR2_TS14 0x02
+#define RTR2_TS15 0x01
+
+#define RTR3_TS16 0x80
+#define RTR3_TS17 0x40
+#define RTR3_TS18 0x20
+#define RTR3_TS19 0x10
+#define RTR3_TS20 0x08
+#define RTR3_TS21 0x04
+#define RTR3_TS22 0x02
+#define RTR3_TS23 0x01
+
+#define RTR4_TS24 0x80
+#define RTR4_TS25 0x40
+#define RTR4_TS26 0x20
+#define RTR4_TS27 0x10
+#define RTR4_TS28 0x08
+#define RTR4_TS29 0x04
+#define RTR4_TS30 0x02
+#define RTR4_TS31 0x01
+
+
+/* TTR1-4 (Transmit Timeslot Register 1-4)
+ ---------------- E1 & T1 ------------------------------ */
+
+#define TTR1_TS0 0x80
+#define TTR1_TS1 0x40
+#define TTR1_TS2 0x20
+#define TTR1_TS3 0x10
+#define TTR1_TS4 0x08
+#define TTR1_TS5 0x04
+#define TTR1_TS6 0x02
+#define TTR1_TS7 0x01
+
+#define TTR2_TS8 0x80
+#define TTR2_TS9 0x40
+#define TTR2_TS10 0x20
+#define TTR2_TS11 0x10
+#define TTR2_TS12 0x08
+#define TTR2_TS13 0x04
+#define TTR2_TS14 0x02
+#define TTR2_TS15 0x01
+
+#define TTR3_TS16 0x80
+#define TTR3_TS17 0x40
+#define TTR3_TS18 0x20
+#define TTR3_TS19 0x10
+#define TTR3_TS20 0x08
+#define TTR3_TS21 0x04
+#define TTR3_TS22 0x02
+#define TTR3_TS23 0x01
+
+#define TTR4_TS24 0x80
+#define TTR4_TS25 0x40
+#define TTR4_TS26 0x20
+#define TTR4_TS27 0x10
+#define TTR4_TS28 0x08
+#define TTR4_TS29 0x04
+#define TTR4_TS30 0x02
+#define TTR4_TS31 0x01
+
+
+
+/* IMR0-4 (Interrupt Mask Register 0-4)
+
+ ----------------- E1 & T1 ----------------------------- */
+
+#define IMR0_RME 0x80
+#define IMR0_RFS 0x40
+#define IMR0_T8MS 0x20
+#define IMR0_ISF 0x20
+#define IMR0_RMB 0x10
+#define IMR0_CASC 0x08
+#define IMR0_RSC 0x08
+#define IMR0_CRC6 0x04
+#define IMR0_CRC4 0x04
+#define IMR0_PDEN 0x02
+#define IMR0_RPF 0x01
+
+#define IMR1_CASE 0x80
+#define IMR1_RDO 0x40
+#define IMR1_ALLS 0x20
+#define IMR1_XDU 0x10
+#define IMR1_XMB 0x08
+#define IMR1_XLSC 0x02
+#define IMR1_XPR 0x01
+#define IMR1_LLBSC 0x80
+
+#define IMR2_FAR 0x80
+#define IMR2_LFA 0x40
+#define IMR2_MFAR 0x20
+#define IMR2_T400MS 0x10
+#define IMR2_LMFA 0x10
+#define IMR2_AIS 0x08
+#define IMR2_LOS 0x04
+#define IMR2_RAR 0x02
+#define IMR2_RA 0x01
+
+#define IMR3_ES 0x80
+#define IMR3_SEC 0x40
+#define IMR3_LMFA16 0x20
+#define IMR3_AIS16 0x10
+#define IMR3_RA16 0x08
+#define IMR3_API 0x04
+#define IMR3_XSLP 0x20
+#define IMR3_XSLN 0x10
+#define IMR3_LLBSC 0x08
+#define IMR3_XRS 0x04
+#define IMR3_SLN 0x02
+#define IMR3_SLP 0x01
+
+#define IMR4_LFA 0x80
+#define IMR4_FER 0x40
+#define IMR4_CER 0x20
+#define IMR4_AIS 0x10
+#define IMR4_LOS 0x08
+#define IMR4_CVE 0x04
+#define IMR4_SLIP 0x02
+#define IMR4_EBE 0x01
+
+/* FMR0-5 for E1 and T1 (Framer Mode Register ) */
+
+#define FMR0_XC1 0x80
+#define FMR0_XC0 0x40
+#define FMR0_RC1 0x20
+#define FMR0_RC0 0x10
+#define FMR0_EXTD 0x08
+#define FMR0_ALM 0x04
+#define E1_FMR0_FRS 0x02
+#define T1_FMR0_FRS 0x08
+#define FMR0_SRAF 0x04
+#define FMR0_EXLS 0x02
+#define FMR0_SIM 0x01
+
+#define FMR1_MFCS 0x80
+#define FMR1_AFR 0x40
+#define FMR1_ENSA 0x20
+#define FMR1_CTM 0x80
+#define FMR1_SIGM 0x40
+#define FMR1_EDL 0x20
+#define FMR1_PMOD 0x10
+#define FMR1_XFS 0x08
+#define FMR1_CRC 0x08
+#define FMR1_ECM 0x04
+#define FMR1_IMOD 0x02
+#define FMR1_XAIS 0x01
+
+#define FMR2_RFS1 0x80
+#define FMR2_RFS0 0x40
+#define FMR2_MCSP 0x40
+#define FMR2_RTM 0x20
+#define FMR2_SSP 0x20
+#define FMR2_DAIS 0x10
+#define FMR2_SAIS 0x08
+#define FMR2_PLB 0x04
+#define FMR2_AXRA 0x02
+#define FMR2_ALMF 0x01
+#define FMR2_EXZE 0x01
+
+#define LOOP_RTM 0x40
+#define LOOP_SFM 0x40
+#define LOOP_ECLB 0x20
+#define LOOP_CLA 0x1f
+
+/*--------------------- E1 ----------------------------*/
+#define FMR3_XLD 0x20
+#define FMR3_XLU 0x10
+
+/*--------------------- T1 ----------------------------*/
+#define FMR4_AIS3 0x80
+#define FMR4_TM 0x40
+#define FMR4_XRA 0x20
+#define FMR4_SSC1 0x10
+#define FMR4_SSC0 0x08
+#define FMR4_AUTO 0x04
+#define FMR4_FM1 0x02
+#define FMR4_FM0 0x01
+
+#define FMR5_SRS 0x80
+#define FMR5_EIBR 0x40
+#define FMR5_XLD 0x20
+#define FMR5_XLU 0x10
+
+
+/* LOOP (Channel Loop Back)
+
+ ------------------ E1 & T1 ---------------------------- */
+
+#define LOOP_SFM 0x40
+#define LOOP_ECLB 0x20
+#define LOOP_CLA4 0x10
+#define LOOP_CLA3 0x08
+#define LOOP_CLA2 0x04
+#define LOOP_CLA1 0x02
+#define LOOP_CLA0 0x01
+
+
+
+/* XSW (Transmit Service Word Pulseframe)
+
+ ------------------- E1 --------------------------- */
+
+#define XSW_XSIS 0x80
+#define XSW_XTM 0x40
+#define XSW_XRA 0x20
+#define XSW_XY0 0x10
+#define XSW_XY1 0x08
+#define XSW_XY2 0x04
+#define XSW_XY3 0x02
+#define XSW_XY4 0x01
+
+
+/* XSP (Transmit Spare Bits)
+
+ ------------------- E1 --------------------------- */
+
+#define XSP_XAP 0x80
+#define XSP_CASEN 0x40
+#define XSP_TT0 0x20
+#define XSP_EBP 0x10
+#define XSP_AXS 0x08
+#define XSP_XSIF 0x04
+#define XSP_XS13 0x02
+#define XSP_XS15 0x01
+
+
+/* XC0/1 (Transmit Control 0/1)
+ ------------------ E1 & T1 ---------------------------- */
+
+#define XC0_SA8E 0x80
+#define XC0_SA7E 0x40
+#define XC0_SA6E 0x20
+#define XC0_SA5E 0x10
+#define XC0_SA4E 0x08
+#define XC0_BRM 0x80
+#define XC0_MFBS 0x40
+#define XC0_SFRZ 0x10
+#define XC0_XCO2 0x04
+#define XC0_XCO1 0x02
+#define XC0_XCO0 0x01
+
+#define XC1_XTO5 0x20
+#define XC1_XTO4 0x10
+#define XC1_XTO3 0x08
+#define XC1_XTO2 0x04
+#define XC1_XTO1 0x02
+#define XC1_XTO0 0x01
+
+
+/* RC0/1 (Receive Control 0/1)
+ ------------------ E1 & T1 ---------------------------- */
+
+#define RC0_SICS 0x40
+#define RC0_CRCI 0x20
+#define RC0_XCRCI 0x10
+#define RC0_RDIS 0x08
+#define RC0_RCO2 0x04
+#define RC0_RCO1 0x02
+#define RC0_RCO0 0x01
+
+#define RC1_SWD 0x80
+#define RC1_ASY4 0x40
+#define RC1_RRAM 0x40
+#define RC1_RTO5 0x20
+#define RC1_RTO4 0x10
+#define RC1_RTO3 0x08
+#define RC1_RTO2 0x04
+#define RC1_RTO1 0x02
+#define RC1_RTO0 0x01
+
+
+
+/* XPM0-2 (Transmit Pulse Mask 0-2)
+ --------------------- E1 & T1 ------------------------- */
+
+#define XPM0_XP12 0x80
+#define XPM0_XP11 0x40
+#define XPM0_XP10 0x20
+#define XPM0_XP04 0x10
+#define XPM0_XP03 0x08
+#define XPM0_XP02 0x04
+#define XPM0_XP01 0x02
+#define XPM0_XP00 0x01
+
+#define XPM1_XP30 0x80
+#define XPM1_XP24 0x40
+#define XPM1_XP23 0x20
+#define XPM1_XP22 0x10
+#define XPM1_XP21 0x08
+#define XPM1_XP20 0x04
+#define XPM1_XP14 0x02
+#define XPM1_XP13 0x01
+
+#define XPM2_XLHP 0x80
+#define XPM2_XLT 0x40
+#define XPM2_DAXLT 0x20
+#define XPM2_XP34 0x08
+#define XPM2_XP33 0x04
+#define XPM2_XP32 0x02
+#define XPM2_XP31 0x01
+
+
+/* TSWM (Transparent Service Word Mask)
+ ------------------ E1 ---------------------------- */
+
+#define TSWM_TSIS 0x80
+#define TSWM_TSIF 0x40
+#define TSWM_TRA 0x20
+#define TSWM_TSA4 0x10
+#define TSWM_TSA5 0x08
+#define TSWM_TSA6 0x04
+#define TSWM_TSA7 0x02
+#define TSWM_TSA8 0x01
+
+/* IDLE <Idle Channel Code Register>
+
+ ------------------ E1 & T1 ----------------------- */
+
+#define IDLE_IDL7 0x80
+#define IDLE_IDL6 0x40
+#define IDLE_IDL5 0x20
+#define IDLE_IDL4 0x10
+#define IDLE_IDL3 0x08
+#define IDLE_IDL2 0x04
+#define IDLE_IDL1 0x02
+#define IDLE_IDL0 0x01
+
+
+/* XSA4-8 <Transmit SA4-8 Register(Read/Write) >
+ -------------------E1 ----------------------------- */
+
+#define XSA4_XS47 0x80
+#define XSA4_XS46 0x40
+#define XSA4_XS45 0x20
+#define XSA4_XS44 0x10
+#define XSA4_XS43 0x08
+#define XSA4_XS42 0x04
+#define XSA4_XS41 0x02
+#define XSA4_XS40 0x01
+
+#define XSA5_XS57 0x80
+#define XSA5_XS56 0x40
+#define XSA5_XS55 0x20
+#define XSA5_XS54 0x10
+#define XSA5_XS53 0x08
+#define XSA5_XS52 0x04
+#define XSA5_XS51 0x02
+#define XSA5_XS50 0x01
+
+#define XSA6_XS67 0x80
+#define XSA6_XS66 0x40
+#define XSA6_XS65 0x20
+#define XSA6_XS64 0x10
+#define XSA6_XS63 0x08
+#define XSA6_XS62 0x04
+#define XSA6_XS61 0x02
+#define XSA6_XS60 0x01
+
+#define XSA7_XS77 0x80
+#define XSA7_XS76 0x40
+#define XSA7_XS75 0x20
+#define XSA7_XS74 0x10
+#define XSA7_XS73 0x08
+#define XSA7_XS72 0x04
+#define XSA7_XS71 0x02
+#define XSA7_XS70 0x01
+
+#define XSA8_XS87 0x80
+#define XSA8_XS86 0x40
+#define XSA8_XS85 0x20
+#define XSA8_XS84 0x10
+#define XSA8_XS83 0x08
+#define XSA8_XS82 0x04
+#define XSA8_XS81 0x02
+#define XSA8_XS80 0x01
+
+
+/* XDL1-3 (Transmit DL-Bit Register1-3 (read/write))
+ ----------------------- T1 --------------------- */
+
+#define XDL1_XDL17 0x80
+#define XDL1_XDL16 0x40
+#define XDL1_XDL15 0x20
+#define XDL1_XDL14 0x10
+#define XDL1_XDL13 0x08
+#define XDL1_XDL12 0x04
+#define XDL1_XDL11 0x02
+#define XDL1_XDL10 0x01
+
+#define XDL2_XDL27 0x80
+#define XDL2_XDL26 0x40
+#define XDL2_XDL25 0x20
+#define XDL2_XDL24 0x10
+#define XDL2_XDL23 0x08
+#define XDL2_XDL22 0x04
+#define XDL2_XDL21 0x02
+#define XDL2_XDL20 0x01
+
+#define XDL3_XDL37 0x80
+#define XDL3_XDL36 0x40
+#define XDL3_XDL35 0x20
+#define XDL3_XDL34 0x10
+#define XDL3_XDL33 0x08
+#define XDL3_XDL32 0x04
+#define XDL3_XDL31 0x02
+#define XDL3_XDL30 0x01
+
+
+/* ICB1-4 (Idle Channel Register 1-4)
+ ------------------ E1 ---------------------------- */
+
+#define E1_ICB1_IC0 0x80
+#define E1_ICB1_IC1 0x40
+#define E1_ICB1_IC2 0x20
+#define E1_ICB1_IC3 0x10
+#define E1_ICB1_IC4 0x08
+#define E1_ICB1_IC5 0x04
+#define E1_ICB1_IC6 0x02
+#define E1_ICB1_IC7 0x01
+
+#define E1_ICB2_IC8 0x80
+#define E1_ICB2_IC9 0x40
+#define E1_ICB2_IC10 0x20
+#define E1_ICB2_IC11 0x10
+#define E1_ICB2_IC12 0x08
+#define E1_ICB2_IC13 0x04
+#define E1_ICB2_IC14 0x02
+#define E1_ICB2_IC15 0x01
+
+#define E1_ICB3_IC16 0x80
+#define E1_ICB3_IC17 0x40
+#define E1_ICB3_IC18 0x20
+#define E1_ICB3_IC19 0x10
+#define E1_ICB3_IC20 0x08
+#define E1_ICB3_IC21 0x04
+#define E1_ICB3_IC22 0x02
+#define E1_ICB3_IC23 0x01
+
+#define E1_ICB4_IC24 0x80
+#define E1_ICB4_IC25 0x40
+#define E1_ICB4_IC26 0x20
+#define E1_ICB4_IC27 0x10
+#define E1_ICB4_IC28 0x08
+#define E1_ICB4_IC29 0x04
+#define E1_ICB4_IC30 0x02
+#define E1_ICB4_IC31 0x01
+
+/* ICB1-4 (Idle Channel Register 1-4)
+ ------------------ T1 ---------------------------- */
+
+#define T1_ICB1_IC1 0x80
+#define T1_ICB1_IC2 0x40
+#define T1_ICB1_IC3 0x20
+#define T1_ICB1_IC4 0x10
+#define T1_ICB1_IC5 0x08
+#define T1_ICB1_IC6 0x04
+#define T1_ICB1_IC7 0x02
+#define T1_ICB1_IC8 0x01
+
+#define T1_ICB2_IC9 0x80
+#define T1_ICB2_IC10 0x40
+#define T1_ICB2_IC11 0x20
+#define T1_ICB2_IC12 0x10
+#define T1_ICB2_IC13 0x08
+#define T1_ICB2_IC14 0x04
+#define T1_ICB2_IC15 0x02
+#define T1_ICB2_IC16 0x01
+
+#define T1_ICB3_IC17 0x80
+#define T1_ICB3_IC18 0x40
+#define T1_ICB3_IC19 0x20
+#define T1_ICB3_IC20 0x10
+#define T1_ICB3_IC21 0x08
+#define T1_ICB3_IC22 0x04
+#define T1_ICB3_IC23 0x02
+#define T1_ICB3_IC24 0x01
+
+/* FMR3 (Framer Mode Register 3)
+ --------------------E1------------------------ */
+
+#define FMR3_CMI 0x08
+#define FMR3_SYNSA 0x04
+#define FMR3_CFRZ 0x02
+#define FMR3_EXTIW 0x01
+
+
+
+/* CCB1-3 (Clear Channel Register)
+ ------------------- T1 ----------------------- */
+
+#define CCB1_CH1 0x80
+#define CCB1_CH2 0x40
+#define CCB1_CH3 0x20
+#define CCB1_CH4 0x10
+#define CCB1_CH5 0x08
+#define CCB1_CH6 0x04
+#define CCB1_CH7 0x02
+#define CCB1_CH8 0x01
+
+#define CCB2_CH9 0x80
+#define CCB2_CH10 0x40
+#define CCB2_CH11 0x20
+#define CCB2_CH12 0x10
+#define CCB2_CH13 0x08
+#define CCB2_CH14 0x04
+#define CCB2_CH15 0x02
+#define CCB2_CH16 0x01
+
+#define CCB3_CH17 0x80
+#define CCB3_CH18 0x40
+#define CCB3_CH19 0x20
+#define CCB3_CH20 0x10
+#define CCB3_CH21 0x08
+#define CCB3_CH22 0x04
+#define CCB3_CH23 0x02
+#define CCB3_CH24 0x01
+
+
+/* LIM0/1 (Line Interface Mode 0/1)
+ ------------------- E1 & T1 --------------------------- */
+
+#define LIM0_XFB 0x80
+#define LIM0_XDOS 0x40
+#define LIM0_SCL1 0x20
+#define LIM0_SCL0 0x10
+#define LIM0_EQON 0x08
+#define LIM0_ELOS 0x04
+#define LIM0_LL 0x02
+#define LIM0_MAS 0x01
+
+#define LIM1_EFSC 0x80
+#define LIM1_RIL2 0x40
+#define LIM1_RIL1 0x20
+#define LIM1_RIL0 0x10
+#define LIM1_DCOC 0x08
+#define LIM1_JATT 0x04
+#define LIM1_RL 0x02
+#define LIM1_DRS 0x01
+
+
+/* PCDR (Pulse Count Detection Register(Read/Write))
+ ------------------ E1 & T1 ------------------------- */
+
+#define PCDR_PCD7 0x80
+#define PCDR_PCD6 0x40
+#define PCDR_PCD5 0x20
+#define PCDR_PCD4 0x10
+#define PCDR_PCD3 0x08
+#define PCDR_PCD2 0x04
+#define PCDR_PCD1 0x02
+#define PCDR_PCD0 0x01
+
+#define PCRR_PCR7 0x80
+#define PCRR_PCR6 0x40
+#define PCRR_PCR5 0x20
+#define PCRR_PCR4 0x10
+#define PCRR_PCR3 0x08
+#define PCRR_PCR2 0x04
+#define PCRR_PCR1 0x02
+#define PCRR_PCR0 0x01
+
+
+/* LIM2 (Line Interface Mode 2)
+
+ ------------------ E1 & T1 ---------------------------- */
+
+#define LIM2_DJA2 0x20
+#define LIM2_DJA1 0x10
+#define LIM2_LOS2 0x02
+#define LIM2_LOS1 0x01
+
+/* LCR1 (Loop Code Register 1) */
+
+#define LCR1_EPRM 0x80
+#define LCR1_XPRBS 0x40
+
+/* SIC1 (System Interface Control 1) */
+#define SIC1_SRSC 0x80
+#define SIC1_RBS1 0x20
+#define SIC1_RBS0 0x10
+#define SIC1_SXSC 0x08
+#define SIC1_XBS1 0x02
+#define SIC1_XBS0 0x01
+
+/* DEC (Disable Error Counter)
+ ------------------ E1 & T1 ---------------------------- */
+
+#define DEC_DCEC3 0x20
+#define DEC_DBEC 0x10
+#define DEC_DCEC1 0x08
+#define DEC_DCEC 0x08
+#define DEC_DEBC 0x04
+#define DEC_DCVC 0x02
+#define DEC_DFEC 0x01
+
+
+/* FALC Register Bits (Receive Mode)
+ ---------------------------------------------------------------------------- */
+
+
+/* FRS0/1 (Framer Receive Status Register 0/1)
+ ----------------- E1 & T1 ---------------------------------- */
+
+#define FRS0_LOS 0x80
+#define FRS0_AIS 0x40
+#define FRS0_LFA 0x20
+#define FRS0_RRA 0x10
+#define FRS0_API 0x08
+#define FRS0_NMF 0x04
+#define FRS0_LMFA 0x02
+#define FRS0_FSRF 0x01
+
+#define FRS1_TS16RA 0x40
+#define FRS1_TS16LOS 0x20
+#define FRS1_TS16AIS 0x10
+#define FRS1_TS16LFA 0x08
+#define FRS1_EXZD 0x80
+#define FRS1_LLBDD 0x10
+#define FRS1_LLBAD 0x08
+#define FRS1_XLS 0x02
+#define FRS1_XLO 0x01
+#define FRS1_PDEN 0x40
+
+/* FRS2/3 (Framer Receive Status Register 2/3)
+ ----------------- T1 ---------------------------------- */
+
+#define FRS2_ESC2 0x80
+#define FRS2_ESC1 0x40
+#define FRS2_ESC0 0x20
+
+#define FRS3_FEH5 0x20
+#define FRS3_FEH4 0x10
+#define FRS3_FEH3 0x08
+#define FRS3_FEH2 0x04
+#define FRS3_FEH1 0x02
+#define FRS3_FEH0 0x01
+
+
+/* RSW (Receive Service Word Pulseframe)
+ ----------------- E1 ------------------------------ */
+
+#define RSW_RSI 0x80
+#define RSW_RRA 0x20
+#define RSW_RYO 0x10
+#define RSW_RY1 0x08
+#define RSW_RY2 0x04
+#define RSW_RY3 0x02
+#define RSW_RY4 0x01
+
+
+/* RSP (Receive Spare Bits / Additional Status)
+ ---------------- E1 ------------------------------- */
+
+#define RSP_SI1 0x80
+#define RSP_SI2 0x40
+#define RSP_LLBDD 0x10
+#define RSP_LLBAD 0x08
+#define RSP_RSIF 0x04
+#define RSP_RS13 0x02
+#define RSP_RS15 0x01
+
+
+/* FECL (Framing Error Counter)
+ ---------------- E1 & T1 -------------------------- */
+
+#define FECL_FE7 0x80
+#define FECL_FE6 0x40
+#define FECL_FE5 0x20
+#define FECL_FE4 0x10
+#define FECL_FE3 0x08
+#define FECL_FE2 0x04
+#define FECL_FE1 0x02
+#define FECL_FE0 0x01
+
+#define FECH_FE15 0x80
+#define FECH_FE14 0x40
+#define FECH_FE13 0x20
+#define FECH_FE12 0x10
+#define FECH_FE11 0x08
+#define FECH_FE10 0x04
+#define FECH_FE9 0x02
+#define FECH_FE8 0x01
+
+
+/* CVCl (Code Violation Counter)
+ ----------------- E1 ------------------------- */
+
+#define CVCL_CV7 0x80
+#define CVCL_CV6 0x40
+#define CVCL_CV5 0x20
+#define CVCL_CV4 0x10
+#define CVCL_CV3 0x08
+#define CVCL_CV2 0x04
+#define CVCL_CV1 0x02
+#define CVCL_CV0 0x01
+
+#define CVCH_CV15 0x80
+#define CVCH_CV14 0x40
+#define CVCH_CV13 0x20
+#define CVCH_CV12 0x10
+#define CVCH_CV11 0x08
+#define CVCH_CV10 0x04
+#define CVCH_CV9 0x02
+#define CVCH_CV8 0x01
+
+
+/* CEC1-3L (CRC Error Counter)
+ ------------------ E1 ----------------------------- */
+
+#define CEC1L_CR7 0x80
+#define CEC1L_CR6 0x40
+#define CEC1L_CR5 0x20
+#define CEC1L_CR4 0x10
+#define CEC1L_CR3 0x08
+#define CEC1L_CR2 0x04
+#define CEC1L_CR1 0x02
+#define CEC1L_CR0 0x01
+
+#define CEC1H_CR15 0x80
+#define CEC1H_CR14 0x40
+#define CEC1H_CR13 0x20
+#define CEC1H_CR12 0x10
+#define CEC1H_CR11 0x08
+#define CEC1H_CR10 0x04
+#define CEC1H_CR9 0x02
+#define CEC1H_CR8 0x01
+
+#define CEC2L_CR7 0x80
+#define CEC2L_CR6 0x40
+#define CEC2L_CR5 0x20
+#define CEC2L_CR4 0x10
+#define CEC2L_CR3 0x08
+#define CEC2L_CR2 0x04
+#define CEC2L_CR1 0x02
+#define CEC2L_CR0 0x01
+
+#define CEC2H_CR15 0x80
+#define CEC2H_CR14 0x40
+#define CEC2H_CR13 0x20
+#define CEC2H_CR12 0x10
+#define CEC2H_CR11 0x08
+#define CEC2H_CR10 0x04
+#define CEC2H_CR9 0x02
+#define CEC2H_CR8 0x01
+
+#define CEC3L_CR7 0x80
+#define CEC3L_CR6 0x40
+#define CEC3L_CR5 0x20
+#define CEC3L_CR4 0x10
+#define CEC3L_CR3 0x08
+#define CEC3L_CR2 0x04
+#define CEC3L_CR1 0x02
+#define CEC3L_CR0 0x01
+
+#define CEC3H_CR15 0x80
+#define CEC3H_CR14 0x40
+#define CEC3H_CR13 0x20
+#define CEC3H_CR12 0x10
+#define CEC3H_CR11 0x08
+#define CEC3H_CR10 0x04
+#define CEC3H_CR9 0x02
+#define CEC3H_CR8 0x01
+
+
+/* CECL (CRC Error Counter)
+
+ ------------------ T1 ----------------------------- */
+
+#define CECL_CR7 0x80
+#define CECL_CR6 0x40
+#define CECL_CR5 0x20
+#define CECL_CR4 0x10
+#define CECL_CR3 0x08
+#define CECL_CR2 0x04
+#define CECL_CR1 0x02
+#define CECL_CR0 0x01
+
+#define CECH_CR15 0x80
+#define CECH_CR14 0x40
+#define CECH_CR13 0x20
+#define CECH_CR12 0x10
+#define CECH_CR11 0x08
+#define CECH_CR10 0x04
+#define CECH_CR9 0x02
+#define CECH_CR8 0x01
+
+/* EBCL (E Bit Error Counter)
+ ------------------- E1 & T1 ------------------------- */
+
+#define EBCL_EB7 0x80
+#define EBCL_EB6 0x40
+#define EBCL_EB5 0x20
+#define EBCL_EB4 0x10
+#define EBCL_EB3 0x08
+#define EBCL_EB2 0x04
+#define EBCL_EB1 0x02
+#define EBCL_EB0 0x01
+
+#define EBCH_EB15 0x80
+#define EBCH_EB14 0x40
+#define EBCH_EB13 0x20
+#define EBCH_EB12 0x10
+#define EBCH_EB11 0x08
+#define EBCH_EB10 0x04
+#define EBCH_EB9 0x02
+#define EBCH_EB8 0x01
+
+
+/* RSA4-8 (Receive Sa4-8-Bit Register)
+ -------------------- E1 --------------------------- */
+
+#define RSA4_RS47 0x80
+#define RSA4_RS46 0x40
+#define RSA4_RS45 0x20
+#define RSA4_RS44 0x10
+#define RSA4_RS43 0x08
+#define RSA4_RS42 0x04
+#define RSA4_RS41 0x02
+#define RSA4_RS40 0x01
+
+#define RSA5_RS57 0x80
+#define RSA5_RS56 0x40
+#define RSA5_RS55 0x20
+#define RSA5_RS54 0x10
+#define RSA5_RS53 0x08
+#define RSA5_RS52 0x04
+#define RSA5_RS51 0x02
+#define RSA5_RS50 0x01
+
+#define RSA6_RS67 0x80
+#define RSA6_RS66 0x40
+#define RSA6_RS65 0x20
+#define RSA6_RS64 0x10
+#define RSA6_RS63 0x08
+#define RSA6_RS62 0x04
+#define RSA6_RS61 0x02
+#define RSA6_RS60 0x01
+
+#define RSA7_RS77 0x80
+#define RSA7_RS76 0x40
+#define RSA7_RS75 0x20
+#define RSA7_RS74 0x10
+#define RSA7_RS73 0x08
+#define RSA7_RS72 0x04
+#define RSA7_RS71 0x02
+#define RSA7_RS70 0x01
+
+#define RSA8_RS87 0x80
+#define RSA8_RS86 0x40
+#define RSA8_RS85 0x20
+#define RSA8_RS84 0x10
+#define RSA8_RS83 0x08
+#define RSA8_RS82 0x04
+#define RSA8_RS81 0x02
+#define RSA8_RS80 0x01
+
+/* RSA6S (Receive Sa6 Bit Status Register)
+ ------------------------ T1 ------------------------- */
+
+#define RSA6S_SX 0x20
+#define RSA6S_SF 0x10
+#define RSA6S_SE 0x08
+#define RSA6S_SC 0x04
+#define RSA6S_SA 0x02
+#define RSA6S_S8 0x01
+
+
+/* RDL1-3 Receive DL-Bit Register1-3)
+ ------------------------ T1 ------------------------- */
+
+#define RDL1_RDL17 0x80
+#define RDL1_RDL16 0x40
+#define RDL1_RDL15 0x20
+#define RDL1_RDL14 0x10
+#define RDL1_RDL13 0x08
+#define RDL1_RDL12 0x04
+#define RDL1_RDL11 0x02
+#define RDL1_RDL10 0x01
+
+#define RDL2_RDL27 0x80
+#define RDL2_RDL26 0x40
+#define RDL2_RDL25 0x20
+#define RDL2_RDL24 0x10
+#define RDL2_RDL23 0x08
+#define RDL2_RDL22 0x04
+#define RDL2_RDL21 0x02
+#define RDL2_RDL20 0x01
+
+#define RDL3_RDL37 0x80
+#define RDL3_RDL36 0x40
+#define RDL3_RDL35 0x20
+#define RDL3_RDL34 0x10
+#define RDL3_RDL33 0x08
+#define RDL3_RDL32 0x04
+#define RDL3_RDL31 0x02
+#define RDL3_RDL30 0x01
+
+
+/* SIS (Signaling Status Register)
+
+ -------------------- E1 & T1 -------------------------- */
+
+#define SIS_XDOV 0x80
+#define SIS_XFW 0x40
+#define SIS_XREP 0x20
+#define SIS_RLI 0x08
+#define SIS_CEC 0x04
+#define SIS_BOM 0x01
+
+
+/* RSIS (Receive Signaling Status Register)
+
+ -------------------- E1 & T1 --------------------------- */
+
+#define RSIS_VFR 0x80
+#define RSIS_RDO 0x40
+#define RSIS_CRC16 0x20
+#define RSIS_RAB 0x10
+#define RSIS_HA1 0x08
+#define RSIS_HA0 0x04
+#define RSIS_HFR 0x02
+#define RSIS_LA 0x01
+
+
+/* RBCL/H (Receive Byte Count Low/High)
+
+ ------------------- E1 & T1 ----------------------- */
+
+#define RBCL_RBC7 0x80
+#define RBCL_RBC6 0x40
+#define RBCL_RBC5 0x20
+#define RBCL_RBC4 0x10
+#define RBCL_RBC3 0x08
+#define RBCL_RBC2 0x04
+#define RBCL_RBC1 0x02
+#define RBCL_RBC0 0x01
+
+#define RBCH_OV 0x10
+#define RBCH_RBC11 0x08
+#define RBCH_RBC10 0x04
+#define RBCH_RBC9 0x02
+#define RBCH_RBC8 0x01
+
+
+/* ISR1-3 (Interrupt Status Register 1-3)
+
+ ------------------ E1 & T1 ------------------------------ */
+
+#define FISR0_RME 0x80
+#define FISR0_RFS 0x40
+#define FISR0_T8MS 0x20
+#define FISR0_ISF 0x20
+#define FISR0_RMB 0x10
+#define FISR0_CASC 0x08
+#define FISR0_RSC 0x08
+#define FISR0_CRC6 0x04
+#define FISR0_CRC4 0x04
+#define FISR0_PDEN 0x02
+#define FISR0_RPF 0x01
+
+#define FISR1_CASE 0x80
+#define FISR1_LLBSC 0x80
+#define FISR1_RDO 0x40
+#define FISR1_ALLS 0x20
+#define FISR1_XDU 0x10
+#define FISR1_XMB 0x08
+#define FISR1_XLSC 0x02
+#define FISR1_XPR 0x01
+
+#define FISR2_FAR 0x80
+#define FISR2_LFA 0x40
+#define FISR2_MFAR 0x20
+#define FISR2_T400MS 0x10
+#define FISR2_LMFA 0x10
+#define FISR2_AIS 0x08
+#define FISR2_LOS 0x04
+#define FISR2_RAR 0x02
+#define FISR2_RA 0x01
+
+#define FISR3_ES 0x80
+#define FISR3_SEC 0x40
+#define FISR3_LMFA16 0x20
+#define FISR3_AIS16 0x10
+#define FISR3_RA16 0x08
+#define FISR3_API 0x04
+#define FISR3_XSLP 0x20
+#define FISR3_XSLN 0x10
+#define FISR3_LLBSC 0x08
+#define FISR3_XRS 0x04
+#define FISR3_SLN 0x02
+#define FISR3_SLP 0x01
+
+
+/* GIS (Global Interrupt Status Register)
+
+ --------------------- E1 & T1 --------------------- */
+
+#define GIS_ISR3 0x08
+#define GIS_ISR2 0x04
+#define GIS_ISR1 0x02
+#define GIS_ISR0 0x01
+
+
+/* VSTR (Version Status Register)
+
+ --------------------- E1 & T1 --------------------- */
+
+#define VSTR_VN3 0x08
+#define VSTR_VN2 0x04
+#define VSTR_VN1 0x02
+#define VSTR_VN0 0x01
+
+
+/*>>>>>>>>>>>>>>>>>>>>> Local Control Structures <<<<<<<<<<<<<<<<<<<<<<<<< */
+
+/* Write-only Registers (E1/T1 control mode write registers) */
+#define XFIFOH 0x00 /* Tx FIFO High Byte */
+#define XFIFOL 0x01 /* Tx FIFO Low Byte */
+#define CMDR 0x02 /* Command Reg */
+#define DEC 0x60 /* Disable Error Counter */
+#define TEST2 0x62 /* Manuf. Test Reg 2 */
+#define XS(nbr) (0x70 + (nbr)) /* Tx CAS Reg (0 to 15) */
+
+/* Read-write Registers (E1/T1 status mode read registers) */
+#define MODE 0x03 /* Mode Reg */
+#define RAH1 0x04 /* Receive Address High 1 */
+#define RAH2 0x05 /* Receive Address High 2 */
+#define RAL1 0x06 /* Receive Address Low 1 */
+#define RAL2 0x07 /* Receive Address Low 2 */
+#define IPC 0x08 /* Interrupt Port Configuration */
+#define CCR1 0x09 /* Common Configuration Reg 1 */
+#define CCR3 0x0A /* Common Configuration Reg 3 */
+#define PRE 0x0B /* Preamble Reg */
+#define RTR1 0x0C /* Receive Timeslot Reg 1 */
+#define RTR2 0x0D /* Receive Timeslot Reg 2 */
+#define RTR3 0x0E /* Receive Timeslot Reg 3 */
+#define RTR4 0x0F /* Receive Timeslot Reg 4 */
+#define TTR1 0x10 /* Transmit Timeslot Reg 1 */
+#define TTR2 0x11 /* Transmit Timeslot Reg 2 */
+#define TTR3 0x12 /* Transmit Timeslot Reg 3 */
+#define TTR4 0x13 /* Transmit Timeslot Reg 4 */
+#define IMR0 0x14 /* Interrupt Mask Reg 0 */
+#define IMR1 0x15 /* Interrupt Mask Reg 1 */
+#define IMR2 0x16 /* Interrupt Mask Reg 2 */
+#define IMR3 0x17 /* Interrupt Mask Reg 3 */
+#define IMR4 0x18 /* Interrupt Mask Reg 4 */
+#define IMR5 0x19 /* Interrupt Mask Reg 5 */
+#define FMR0 0x1A /* Framer Mode Reigster 0 */
+#define FMR1 0x1B /* Framer Mode Reigster 1 */
+#define FMR2 0x1C /* Framer Mode Reigster 2 */
+#define LOOP 0x1D /* Channel Loop Back */
+#define XSW 0x1E /* Transmit Service Word */
+#define FMR4 0x1E /* Framer Mode Reg 4 */
+#define XSP 0x1F /* Transmit Spare Bits */
+#define FMR5 0x1F /* Framer Mode Reg 5 */
+#define XC0 0x20 /* Transmit Control 0 */
+#define XC1 0x21 /* Transmit Control 1 */
+#define RC0 0x22 /* Receive Control 0 */
+#define RC1 0x23 /* Receive Control 1 */
+#define XPM0 0x24 /* Transmit Pulse Mask 0 */
+#define XPM1 0x25 /* Transmit Pulse Mask 1 */
+#define XPM2 0x26 /* Transmit Pulse Mask 2 */
+#define TSWM 0x27 /* Transparent Service Word Mask */
+#define TEST1 0x28 /* Manuf. Test Reg 1 */
+#define IDLE 0x29 /* Idle Channel Code */
+#define XSA4 0x2A /* Transmit SA4 Bit Reg */
+#define XDL1 0x2A /* Transmit DL-Bit Reg 2 */
+#define XSA5 0x2B /* Transmit SA4 Bit Reg */
+#define XDL2 0x2B /* Transmit DL-Bit Reg 2 */
+#define XSA6 0x2C /* Transmit SA4 Bit Reg */
+#define XDL3 0x2C /* Transmit DL-Bit Reg 2 */
+#define XSA7 0x2D /* Transmit SA4 Bit Reg */
+#define CCB1 0x2D /* Clear Channel Reg 1 */
+#define XSA8 0x2E /* Transmit SA4 Bit Reg */
+#define CCB2 0x2E /* Clear Channel Reg 2 */
+#define FMR3 0x2F /* Framer Mode Reg. 3 */
+#define CCB3 0x2F /* Clear Channel Reg 3 */
+#define ICB1 0x30 /* Idle Channel Reg 1 */
+#define ICB2 0x31 /* Idle Channel Reg 2 */
+#define ICB3 0x32 /* Idle Channel Reg 3 */
+#define ICB4 0x33 /* Idle Channel Reg 4 */
+#define LIM0 0x34 /* Line Interface Mode 0 */
+#define LIM1 0x35 /* Line Interface Mode 1 */
+#define PCDR 0x36 /* Pulse Count Detection */
+#define PCRR 0x37 /* Pulse Count Recovery */
+#define LIM2 0x38 /* Line Interface Mode Reg 2 */
+#define LCR1 0x39 /* Loop Code Reg 1 */
+#define LCR2 0x3A /* Loop Code Reg 2 */
+#define LCR3 0x3B /* Loop Code Reg 3 */
+#define SIC1 0x3C /* System Interface Control 1 */
+
+/* Read-only Registers (E1/T1 control mode read registers) */
+#define RFIFOH 0x00 /* Receive FIFO */
+#define RFIFOL 0x01 /* Receive FIFO */
+#define FRS0 0x4C /* Framer Receive Status 0 */
+#define FRS1 0x4D /* Framer Receive Status 1 */
+#define RSW 0x4E /* Receive Service Word */
+#define FRS2 0x4E /* Framer Receive Status 2 */
+#define RSP 0x4F /* Receive Spare Bits */
+#define FRS3 0x4F /* Framer Receive Status 3 */
+#define FECL 0x50 /* Framing Error Counter */
+#define FECH 0x51 /* Framing Error Counter */
+#define CVCL 0x52 /* Code Violation Counter */
+#define CVCH 0x53 /* Code Violation Counter */
+#define CECL 0x54 /* CRC Error Counter 1 */
+#define CECH 0x55 /* CRC Error Counter 1 */
+#define EBCL 0x56 /* E-Bit Error Counter */
+#define EBCH 0x57 /* E-Bit Error Counter */
+#define BECL 0x58 /* Bit Error Counter Low */
+#define BECH 0x59 /* Bit Error Counter Low */
+#define CEC3 0x5A /* CRC Error Counter 3 (16-bit) */
+#define RSA4 0x5C /* Receive SA4 Bit Reg */
+#define RDL1 0x5C /* Receive DL-Bit Reg 1 */
+#define RSA5 0x5D /* Receive SA5 Bit Reg */
+#define RDL2 0x5D /* Receive DL-Bit Reg 2 */
+#define RSA6 0x5E /* Receive SA6 Bit Reg */
+#define RDL3 0x5E /* Receive DL-Bit Reg 3 */
+#define RSA7 0x5F /* Receive SA7 Bit Reg */
+#define RSA8 0x60 /* Receive SA8 Bit Reg */
+#define RSA6S 0x61 /* Receive SA6 Bit Status Reg */
+#define TSR0 0x62 /* Manuf. Test Reg 0 */
+#define TSR1 0x63 /* Manuf. Test Reg 1 */
+#define SIS 0x64 /* Signaling Status Reg */
+#define RSIS 0x65 /* Receive Signaling Status Reg */
+#define RBCL 0x66 /* Receive Byte Control */
+#define RBCH 0x67 /* Receive Byte Control */
+#define FISR0 0x68 /* Interrupt Status Reg 0 */
+#define FISR1 0x69 /* Interrupt Status Reg 1 */
+#define FISR2 0x6A /* Interrupt Status Reg 2 */
+#define FISR3 0x6B /* Interrupt Status Reg 3 */
+#define GIS 0x6E /* Global Interrupt Status */
+#define VSTR 0x6F /* Version Status */
+#define RS(nbr) (0x70 + (nbr)) /* Rx CAS Reg (0 to 15) */
+
+#endif /* _FALC_LH_H */
+
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
new file mode 100644
index 000000000000..73401b0f0151
--- /dev/null
+++ b/drivers/net/wan/pc300.h
@@ -0,0 +1,497 @@
+/*
+ * pc300.h Cyclades-PC300(tm) Kernel API Definitions.
+ *
+ * Author: Ivan Passos <ivan@cyclades.com>
+ *
+ * Copyright: (c) 1999-2002 Cyclades Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Log: pc300.h,v $
+ * Revision 3.12 2002/03/07 14:17:09 henrique
+ * License data fixed
+ *
+ * Revision 3.11 2002/01/28 21:09:39 daniela
+ * Included ';' after pc300hw.bus.
+ *
+ * Revision 3.10 2002/01/17 17:58:52 ivan
+ * Support for PC300-TE/M (PMC).
+ *
+ * Revision 3.9 2001/09/28 13:30:53 daniela
+ * Renamed dma_start routine to rx_dma_start.
+ *
+ * Revision 3.8 2001/09/24 13:03:45 daniela
+ * Fixed BOF interrupt treatment. Created dma_start routine.
+ *
+ * Revision 3.7 2001/08/10 17:19:58 daniela
+ * Fixed IOCTLs defines.
+ *
+ * Revision 3.6 2001/07/18 19:24:42 daniela
+ * Included kernel version.
+ *
+ * Revision 3.5 2001/07/05 18:38:08 daniela
+ * DMA transmission bug fix.
+ *
+ * Revision 3.4 2001/06/26 17:10:40 daniela
+ * New configuration parameters (line code, CRC calculation and clock).
+ *
+ * Revision 3.3 2001/06/22 13:13:02 regina
+ * MLPPP implementation
+ *
+ * Revision 3.2 2001/06/18 17:56:09 daniela
+ * Increased DEF_MTU and TX_QUEUE_LEN.
+ *
+ * Revision 3.1 2001/06/15 12:41:10 regina
+ * upping major version number
+ *
+ * Revision 1.1.1.1 2001/06/13 20:25:06 daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 2.3 2001/03/05 daniela
+ * Created struct pc300conf, to provide the hardware information to pc300util.
+ * Inclusion of 'alloc_ramsize' field on structure 'pc300hw'.
+ *
+ * Revision 2.2 2000/12/22 daniela
+ * Structures and defines to support pc300util: statistics, status,
+ * loopback tests, trace.
+ *
+ * Revision 2.1 2000/09/28 ivan
+ * Inclusion of 'iophys' and 'iosize' fields on structure 'pc300hw', to
+ * allow release of I/O region at module unload.
+ * Changed location of include files.
+ *
+ * Revision 2.0 2000/03/27 ivan
+ * Added support for the PC300/TE cards.
+ *
+ * Revision 1.1 2000/01/31 ivan
+ * Replaced 'pc300[drv|sca].h' former PC300 driver include files.
+ *
+ * Revision 1.0 1999/12/16 ivan
+ * First official release.
+ * Inclusion of 'nchan' field on structure 'pc300hw', to allow variable
+ * number of ports per card.
+ * Inclusion of 'if_ptr' field on structure 'pc300dev'.
+ *
+ * Revision 0.6 1999/11/17 ivan
+ * Changed X.25-specific function names to comply with adopted convention.
+ *
+ * Revision 0.5 1999/11/16 Daniela Squassoni
+ * X.25 support.
+ *
+ * Revision 0.4 1999/11/15 ivan
+ * Inclusion of 'clock' field on structure 'pc300hw'.
+ *
+ * Revision 0.3 1999/11/10 ivan
+ * IOCTL name changing.
+ * Inclusion of driver function prototypes.
+ *
+ * Revision 0.2 1999/11/03 ivan
+ * Inclusion of 'tx_skb' and union 'ifu' on structure 'pc300dev'.
+ *
+ * Revision 0.1 1999/01/15 ivan
+ * Initial version.
+ *
+ */
+
+#ifndef _PC300_H
+#define _PC300_H
+
+#include <linux/hdlc.h>
+#include "hd64572.h"
+#include "pc300-falc-lh.h"
+
+#ifndef CY_TYPES
+#define CY_TYPES
+typedef __u64 ucdouble; /* 64 bits, unsigned */
+typedef __u32 uclong; /* 32 bits, unsigned */
+typedef __u16 ucshort; /* 16 bits, unsigned */
+typedef __u8 ucchar; /* 8 bits, unsigned */
+#endif /* CY_TYPES */
+
+#define PC300_PROTO_MLPPP 1
+
+#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */
+
+#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */
+#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */
+
+#define PC300_MAXCARDS 4 /* Max number of cards per system */
+#define PC300_MAXCHAN 2 /* Number of channels per card */
+
+#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */
+#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */
+#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */
+#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */
+
+#define PC300_OSC_CLOCK 24576000
+#define PC300_PCI_CLOCK 33000000
+
+#define BD_DEF_LEN 0x0800 /* DMA buffer length (2KB) */
+#define DMA_TX_MEMSZ 0x8000 /* Total DMA Tx memory size (32KB/ch) */
+#define DMA_RX_MEMSZ 0x10000 /* Total DMA Rx memory size (64KB/ch) */
+
+#define N_DMA_TX_BUF (DMA_TX_MEMSZ / BD_DEF_LEN) /* DMA Tx buffers */
+#define N_DMA_RX_BUF (DMA_RX_MEMSZ / BD_DEF_LEN) /* DMA Rx buffers */
+
+/* DMA Buffer Offsets */
+#define DMA_TX_BASE ((N_DMA_TX_BUF + N_DMA_RX_BUF) * \
+ PC300_MAXCHAN * sizeof(pcsca_bd_t))
+#define DMA_RX_BASE (DMA_TX_BASE + PC300_MAXCHAN*DMA_TX_MEMSZ)
+
+/* DMA Descriptor Offsets */
+#define DMA_TX_BD_BASE 0x0000
+#define DMA_RX_BD_BASE (DMA_TX_BD_BASE + ((PC300_MAXCHAN*DMA_TX_MEMSZ / \
+ BD_DEF_LEN) * sizeof(pcsca_bd_t)))
+
+/* DMA Descriptor Macros */
+#define TX_BD_ADDR(chan, n) (DMA_TX_BD_BASE + \
+ ((N_DMA_TX_BUF*chan) + n) * sizeof(pcsca_bd_t))
+#define RX_BD_ADDR(chan, n) (DMA_RX_BD_BASE + \
+ ((N_DMA_RX_BUF*chan) + n) * sizeof(pcsca_bd_t))
+
+/* Macro to access the FALC registers (TE only) */
+#define F_REG(reg, chan) (0x200*(chan) + ((reg)<<2))
+
+/***************************************
+ * Memory access functions/macros *
+ * (required to support Alpha systems) *
+ ***************************************/
+#ifdef __KERNEL__
+#define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();}
+#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();}
+#define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();}
+
+#define cpc_readb(port) readb(port)
+#define cpc_readw(port) readw(port)
+#define cpc_readl(port) readl(port)
+
+#else /* __KERNEL__ */
+#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val))
+#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val))
+#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val))
+
+#define cpc_readb(port) (*(volatile ucchar *)(port))
+#define cpc_readw(port) (*(volatile ucshort *)(port))
+#define cpc_readl(port) (*(volatile uclong *)(port))
+
+#endif /* __KERNEL__ */
+
+/****** Data Structures *****************************************************/
+
+/*
+ * RUNTIME_9050 - PLX PCI9050-1 local configuration and shared runtime
+ * registers. This structure can be used to access the 9050 registers
+ * (memory mapped).
+ */
+struct RUNTIME_9050 {
+ uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
+ uclong loc_rom_range; /* 10h : Local ROM Range */
+ uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
+ uclong loc_rom_base; /* 24h : Local ROM Base */
+ uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
+ uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */
+ uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
+ uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
+ uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
+};
+
+#define PLX_9050_LINT1_ENABLE 0x01
+#define PLX_9050_LINT1_POL 0x02
+#define PLX_9050_LINT1_STATUS 0x04
+#define PLX_9050_LINT2_ENABLE 0x08
+#define PLX_9050_LINT2_POL 0x10
+#define PLX_9050_LINT2_STATUS 0x20
+#define PLX_9050_INTR_ENABLE 0x40
+#define PLX_9050_SW_INTR 0x80
+
+/* Masks to access the init_ctrl PLX register */
+#define PC300_CLKSEL_MASK (0x00000004UL)
+#define PC300_CHMEDIA_MASK(chan) (0x00000020UL<<(chan*3))
+#define PC300_CTYPE_MASK (0x00000800UL)
+
+/* CPLD Registers (base addr = falcbase, TE only) */
+/* CPLD v. 0 */
+#define CPLD_REG1 0x140 /* Chip resets, DCD/CTS status */
+#define CPLD_REG2 0x144 /* Clock enable , LED control */
+/* CPLD v. 2 or higher */
+#define CPLD_V2_REG1 0x100 /* Chip resets, DCD/CTS status */
+#define CPLD_V2_REG2 0x104 /* Clock enable , LED control */
+#define CPLD_ID_REG 0x108 /* CPLD version */
+
+/* CPLD Register bit description: for the FALC bits, they should always be
+ set based on the channel (use (bit<<(2*ch)) to access the correct bit for
+ that channel) */
+#define CPLD_REG1_FALC_RESET 0x01
+#define CPLD_REG1_SCA_RESET 0x02
+#define CPLD_REG1_GLOBAL_CLK 0x08
+#define CPLD_REG1_FALC_DCD 0x10
+#define CPLD_REG1_FALC_CTS 0x20
+
+#define CPLD_REG2_FALC_TX_CLK 0x01
+#define CPLD_REG2_FALC_RX_CLK 0x02
+#define CPLD_REG2_FALC_LED1 0x10
+#define CPLD_REG2_FALC_LED2 0x20
+
+/* Structure with FALC-related fields (TE only) */
+#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */
+
+typedef struct falc {
+ ucchar sync; /* If true FALC is synchronized */
+ ucchar active; /* if TRUE then already active */
+ ucchar loop_active; /* if TRUE a line loopback UP was received */
+ ucchar loop_gen; /* if TRUE a line loopback UP was issued */
+
+ ucchar num_channels;
+ ucchar offset; /* 1 for T1, 0 for E1 */
+ ucchar full_bandwidth;
+
+ ucchar xmb_cause;
+ ucchar multiframe_mode;
+
+ /* Statistics */
+ ucshort pden; /* Pulse Density violation count */
+ ucshort los; /* Loss of Signal count */
+ ucshort losr; /* Loss of Signal recovery count */
+ ucshort lfa; /* Loss of frame alignment count */
+ ucshort farec; /* Frame Alignment Recovery count */
+ ucshort lmfa; /* Loss of multiframe alignment count */
+ ucshort ais; /* Remote Alarm indication Signal count */
+ ucshort sec; /* One-second timer */
+ ucshort es; /* Errored second */
+ ucshort rai; /* remote alarm received */
+ ucshort bec;
+ ucshort fec;
+ ucshort cvc;
+ ucshort cec;
+ ucshort ebc;
+
+ /* Status */
+ ucchar red_alarm;
+ ucchar blue_alarm;
+ ucchar loss_fa;
+ ucchar yellow_alarm;
+ ucchar loss_mfa;
+ ucchar prbs;
+} falc_t;
+
+typedef struct falc_status {
+ ucchar sync; /* If true FALC is synchronized */
+ ucchar red_alarm;
+ ucchar blue_alarm;
+ ucchar loss_fa;
+ ucchar yellow_alarm;
+ ucchar loss_mfa;
+ ucchar prbs;
+} falc_status_t;
+
+typedef struct rsv_x21_status {
+ ucchar dcd;
+ ucchar dsr;
+ ucchar cts;
+ ucchar rts;
+ ucchar dtr;
+} rsv_x21_status_t;
+
+typedef struct pc300stats {
+ int hw_type;
+ uclong line_on;
+ uclong line_off;
+ struct net_device_stats gen_stats;
+ falc_t te_stats;
+} pc300stats_t;
+
+typedef struct pc300status {
+ int hw_type;
+ rsv_x21_status_t gen_status;
+ falc_status_t te_status;
+} pc300status_t;
+
+typedef struct pc300loopback {
+ char loop_type;
+ char loop_on;
+} pc300loopback_t;
+
+typedef struct pc300patterntst {
+ char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */
+ ucshort num_errors;
+} pc300patterntst_t;
+
+typedef struct pc300dev {
+ void *if_ptr; /* General purpose pointer */
+ struct pc300ch *chan;
+ ucchar trace_on;
+ uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */
+ uclong line_off;
+#ifdef __KERNEL__
+ char name[16];
+ struct net_device *dev;
+
+ void *private;
+ struct sk_buff *tx_skb;
+ union { /* This union has all the protocol-specific structures */
+ struct ppp_device pppdev;
+ }ifu;
+#ifdef CONFIG_PC300_MLPPP
+ void *cpc_tty; /* information to PC300 TTY driver */
+#endif
+#endif /* __KERNEL__ */
+}pc300dev_t;
+
+typedef struct pc300hw {
+ int type; /* RSV, X21, etc. */
+ int bus; /* Bus (PCI, PMC, etc.) */
+ int nchan; /* number of channels */
+ int irq; /* interrupt request level */
+ uclong clock; /* Board clock */
+ ucchar cpld_id; /* CPLD ID (TE only) */
+ ucshort cpld_reg1; /* CPLD reg 1 (TE only) */
+ ucshort cpld_reg2; /* CPLD reg 2 (TE only) */
+ ucshort gpioc_reg; /* PLX GPIOC reg */
+ ucshort intctl_reg; /* PLX Int Ctrl/Status reg */
+ uclong iophys; /* PLX registers I/O base */
+ uclong iosize; /* PLX registers I/O size */
+ uclong plxphys; /* PLX registers MMIO base (physical) */
+ void __iomem * plxbase; /* PLX registers MMIO base (virtual) */
+ uclong plxsize; /* PLX registers MMIO size */
+ uclong scaphys; /* SCA registers MMIO base (physical) */
+ void __iomem * scabase; /* SCA registers MMIO base (virtual) */
+ uclong scasize; /* SCA registers MMIO size */
+ uclong ramphys; /* On-board RAM MMIO base (physical) */
+ void __iomem * rambase; /* On-board RAM MMIO base (virtual) */
+ uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */
+ uclong ramsize; /* On-board RAM MMIO size */
+ uclong falcphys; /* FALC registers MMIO base (physical) */
+ void __iomem * falcbase;/* FALC registers MMIO base (virtual) */
+ uclong falcsize; /* FALC registers MMIO size */
+} pc300hw_t;
+
+typedef struct pc300chconf {
+ sync_serial_settings phys_settings; /* Clock type/rate (in bps),
+ loopback mode */
+ raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */
+ uclong media; /* HW media (RS232, V.35, etc.) */
+ uclong proto; /* Protocol (PPP, X.25, etc.) */
+ ucchar monitor; /* Monitor mode (0 = off, !0 = on) */
+
+ /* TE-specific parameters */
+ ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */
+ ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */
+ ucchar lbo; /* Line Build Out */
+ ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */
+ uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */
+} pc300chconf_t;
+
+typedef struct pc300ch {
+ struct pc300 *card;
+ int channel;
+ pc300dev_t d;
+ pc300chconf_t conf;
+ ucchar tx_first_bd; /* First TX DMA block descr. w/ data */
+ ucchar tx_next_bd; /* Next free TX DMA block descriptor */
+ ucchar rx_first_bd; /* First free RX DMA block descriptor */
+ ucchar rx_last_bd; /* Last free RX DMA block descriptor */
+ ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */
+ falc_t falc; /* FALC structure (TE only) */
+} pc300ch_t;
+
+typedef struct pc300 {
+ pc300hw_t hw; /* hardware config. */
+ pc300ch_t chan[PC300_MAXCHAN];
+#ifdef __KERNEL__
+ spinlock_t card_lock;
+#endif /* __KERNEL__ */
+} pc300_t;
+
+typedef struct pc300conf {
+ pc300hw_t hw;
+ pc300chconf_t conf;
+} pc300conf_t;
+
+/* DEV ioctl() commands */
+#define N_SPPP_IOCTLS 2
+
+enum pc300_ioctl_cmds {
+ SIOCCPCRESERVED = (SIOCDEVPRIVATE + N_SPPP_IOCTLS),
+ SIOCGPC300CONF,
+ SIOCSPC300CONF,
+ SIOCGPC300STATUS,
+ SIOCGPC300FALCSTATUS,
+ SIOCGPC300UTILSTATS,
+ SIOCGPC300UTILSTATUS,
+ SIOCSPC300TRACE,
+ SIOCSPC300LOOPBACK,
+ SIOCSPC300PATTERNTEST,
+};
+
+/* Loopback types - PC300/TE boards */
+enum pc300_loopback_cmds {
+ PC300LOCLOOP = 1,
+ PC300REMLOOP,
+ PC300PAYLOADLOOP,
+ PC300GENLOOPUP,
+ PC300GENLOOPDOWN,
+};
+
+/* Control Constant Definitions */
+#define PC300_RSV 0x01
+#define PC300_X21 0x02
+#define PC300_TE 0x03
+
+#define PC300_PCI 0x00
+#define PC300_PMC 0x01
+
+#define PC300_LC_AMI 0x01
+#define PC300_LC_B8ZS 0x02
+#define PC300_LC_NRZ 0x03
+#define PC300_LC_HDB3 0x04
+
+/* Framing (T1) */
+#define PC300_FR_ESF 0x01
+#define PC300_FR_D4 0x02
+#define PC300_FR_ESF_JAPAN 0x03
+
+/* Framing (E1) */
+#define PC300_FR_MF_CRC4 0x04
+#define PC300_FR_MF_NON_CRC4 0x05
+#define PC300_FR_UNFRAMED 0x06
+
+#define PC300_LBO_0_DB 0x00
+#define PC300_LBO_7_5_DB 0x01
+#define PC300_LBO_15_DB 0x02
+#define PC300_LBO_22_5_DB 0x03
+
+#define PC300_RX_SENS_SH 0x01
+#define PC300_RX_SENS_LH 0x02
+
+#define PC300_TX_TIMEOUT (2*HZ)
+#define PC300_TX_QUEUE_LEN 100
+#define PC300_DEF_MTU 1600
+
+#ifdef __KERNEL__
+/* Function Prototypes */
+int dma_buf_write(pc300_t *, int, ucchar *, int);
+int dma_buf_read(pc300_t *, int, struct sk_buff *);
+void tx_dma_start(pc300_t *, int);
+void rx_dma_start(pc300_t *, int);
+void tx_dma_stop(pc300_t *, int);
+void rx_dma_stop(pc300_t *, int);
+int cpc_queue_xmit(struct sk_buff *, struct net_device *);
+void cpc_net_rx(struct net_device *);
+void cpc_sca_status(pc300_t *, int);
+int cpc_change_mtu(struct net_device *, int);
+int cpc_ioctl(struct net_device *, struct ifreq *, int);
+int ch_config(pc300dev_t *);
+int rx_config(pc300dev_t *);
+int tx_config(pc300dev_t *);
+void cpc_opench(pc300dev_t *);
+void cpc_closech(pc300dev_t *);
+int cpc_open(struct net_device *dev);
+int cpc_close(struct net_device *dev);
+int cpc_set_media(hdlc_device *, int);
+#endif /* __KERNEL__ */
+
+#endif /* _PC300_H */
+
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
new file mode 100644
index 000000000000..d67be2587d4d
--- /dev/null
+++ b/drivers/net/wan/pc300_drv.c
@@ -0,0 +1,3692 @@
+#define USE_PCI_CLOCK
+static char rcsid[] =
+"Revision: 3.4.5 Date: 2002/03/07 ";
+
+/*
+ * pc300.c Cyclades-PC300(tm) Driver.
+ *
+ * Author: Ivan Passos <ivan@cyclades.com>
+ * Maintainer: PC300 Maintainer <pc300@cyclades.com>
+ *
+ * Copyright: (c) 1999-2003 Cyclades Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Using tabstop = 4.
+ *
+ * $Log: pc300_drv.c,v $
+ * Revision 3.23 2002/03/20 13:58:40 henrique
+ * Fixed ortographic mistakes
+ *
+ * Revision 3.22 2002/03/13 16:56:56 henrique
+ * Take out the debug messages
+ *
+ * Revision 3.21 2002/03/07 14:17:09 henrique
+ * License data fixed
+ *
+ * Revision 3.20 2002/01/17 17:58:52 ivan
+ * Support for PC300-TE/M (PMC).
+ *
+ * Revision 3.19 2002/01/03 17:08:47 daniela
+ * Enables DMA reception when the SCA-II disables it improperly.
+ *
+ * Revision 3.18 2001/12/03 18:47:50 daniela
+ * Esthetic changes.
+ *
+ * Revision 3.17 2001/10/19 16:50:13 henrique
+ * Patch to kernel 2.4.12 and new generic hdlc.
+ *
+ * Revision 3.16 2001/10/16 15:12:31 regina
+ * clear statistics
+ *
+ * Revision 3.11 to 3.15 2001/10/11 20:26:04 daniela
+ * More DMA fixes for noisy lines.
+ * Return the size of bad frames in dma_get_rx_frame_size, so that the Rx buffer
+ * descriptors can be cleaned by dma_buf_read (called in cpc_net_rx).
+ * Renamed dma_start routine to rx_dma_start. Improved Rx statistics.
+ * Fixed BOF interrupt treatment. Created dma_start routine.
+ * Changed min and max to cpc_min and cpc_max.
+ *
+ * Revision 3.10 2001/08/06 12:01:51 regina
+ * Fixed problem in DSR_DE bit.
+ *
+ * Revision 3.9 2001/07/18 19:27:26 daniela
+ * Added some history comments.
+ *
+ * Revision 3.8 2001/07/12 13:11:19 regina
+ * bug fix - DCD-OFF in pc300 tty driver
+ *
+ * Revision 3.3 to 3.7 2001/07/06 15:00:20 daniela
+ * Removing kernel 2.4.3 and previous support.
+ * DMA transmission bug fix.
+ * MTU check in cpc_net_rx fixed.
+ * Boot messages reviewed.
+ * New configuration parameters (line code, CRC calculation and clock).
+ *
+ * Revision 3.2 2001/06/22 13:13:02 regina
+ * MLPPP implementation. Changed the header of message trace to include
+ * the device name. New format : "hdlcX[R/T]: ".
+ * Default configuration changed.
+ *
+ * Revision 3.1 2001/06/15 regina
+ * in cpc_queue_xmit, netif_stop_queue is called if don't have free descriptor
+ * upping major version number
+ *
+ * Revision 1.1.1.1 2001/06/13 20:25:04 daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 3.0.1.2 2001/06/08 daniela
+ * Did some changes in the DMA programming implementation to avoid the
+ * occurrence of a SCA-II bug when CDA is accessed during a DMA transfer.
+ *
+ * Revision 3.0.1.1 2001/05/02 daniela
+ * Added kernel 2.4.3 support.
+ *
+ * Revision 3.0.1.0 2001/03/13 daniela, henrique
+ * Added Frame Relay Support.
+ * Driver now uses HDLC generic driver to provide protocol support.
+ *
+ * Revision 3.0.0.8 2001/03/02 daniela
+ * Fixed ram size detection.
+ * Changed SIOCGPC300CONF ioctl, to give hw information to pc300util.
+ *
+ * Revision 3.0.0.7 2001/02/23 daniela
+ * netif_stop_queue called before the SCA-II transmition commands in
+ * cpc_queue_xmit, and with interrupts disabled to avoid race conditions with
+ * transmition interrupts.
+ * Fixed falc_check_status for Unframed E1.
+ *
+ * Revision 3.0.0.6 2000/12/13 daniela
+ * Implemented pc300util support: trace, statistics, status and loopback
+ * tests for the PC300 TE boards.
+ *
+ * Revision 3.0.0.5 2000/12/12 ivan
+ * Added support for Unframed E1.
+ * Implemented monitor mode.
+ * Fixed DCD sensitivity on the second channel.
+ * Driver now complies with new PCI kernel architecture.
+ *
+ * Revision 3.0.0.4 2000/09/28 ivan
+ * Implemented DCD sensitivity.
+ * Moved hardware-specific open to the end of cpc_open, to avoid race
+ * conditions with early reception interrupts.
+ * Included code for [request|release]_mem_region().
+ * Changed location of pc300.h .
+ * Minor code revision (contrib. of Jeff Garzik).
+ *
+ * Revision 3.0.0.3 2000/07/03 ivan
+ * Previous bugfix for the framing errors with external clock made X21
+ * boards stop working. This version fixes it.
+ *
+ * Revision 3.0.0.2 2000/06/23 ivan
+ * Revisited cpc_queue_xmit to prevent race conditions on Tx DMA buffer
+ * handling when Tx timeouts occur.
+ * Revisited Rx statistics.
+ * Fixed a bug in the SCA-II programming that would cause framing errors
+ * when external clock was configured.
+ *
+ * Revision 3.0.0.1 2000/05/26 ivan
+ * Added logic in the SCA interrupt handler so that no board can monopolize
+ * the driver.
+ * Request PLX I/O region, although driver doesn't use it, to avoid
+ * problems with other drivers accessing it.
+ *
+ * Revision 3.0.0.0 2000/05/15 ivan
+ * Did some changes in the DMA programming implementation to avoid the
+ * occurrence of a SCA-II bug in the second channel.
+ * Implemented workaround for PLX9050 bug that would cause a system lockup
+ * in certain systems, depending on the MMIO addresses allocated to the
+ * board.
+ * Fixed the FALC chip programming to avoid synchronization problems in the
+ * second channel (TE only).
+ * Implemented a cleaner and faster Tx DMA descriptor cleanup procedure in
+ * cpc_queue_xmit().
+ * Changed the built-in driver implementation so that the driver can use the
+ * general 'hdlcN' naming convention instead of proprietary device names.
+ * Driver load messages are now device-centric, instead of board-centric.
+ * Dynamic allocation of net_device structures.
+ * Code is now compliant with the new module interface (module_[init|exit]).
+ * Make use of the PCI helper functions to access PCI resources.
+ *
+ * Revision 2.0.0.0 2000/04/15 ivan
+ * Added support for the PC300/TE boards (T1/FT1/E1/FE1).
+ *
+ * Revision 1.1.0.0 2000/02/28 ivan
+ * Major changes in the driver architecture.
+ * Softnet compliancy implemented.
+ * Driver now reports physical instead of virtual memory addresses.
+ * Added cpc_change_mtu function.
+ *
+ * Revision 1.0.0.0 1999/12/16 ivan
+ * First official release.
+ * Support for 1- and 2-channel boards (which use distinct PCI Device ID's).
+ * Support for monolythic installation (i.e., drv built into the kernel).
+ * X.25 additional checking when lapb_[dis]connect_request returns an error.
+ * SCA programming now covers X.21 as well.
+ *
+ * Revision 0.3.1.0 1999/11/18 ivan
+ * Made X.25 support configuration-dependent (as it depends on external
+ * modules to work).
+ * Changed X.25-specific function names to comply with adopted convention.
+ * Fixed typos in X.25 functions that would cause compile errors (Daniela).
+ * Fixed bug in ch_config that would disable interrupts on a previously
+ * enabled channel if the other channel on the same board was enabled later.
+ *
+ * Revision 0.3.0.0 1999/11/16 daniela
+ * X.25 support.
+ *
+ * Revision 0.2.3.0 1999/11/15 ivan
+ * Function cpc_ch_status now provides more detailed information.
+ * Added support for X.21 clock configuration.
+ * Changed TNR1 setting in order to prevent Tx FIFO overaccesses by the SCA.
+ * Now using PCI clock instead of internal oscillator clock for the SCA.
+ *
+ * Revision 0.2.2.0 1999/11/10 ivan
+ * Changed the *_dma_buf_check functions so that they would print only
+ * the useful info instead of the whole buffer descriptor bank.
+ * Fixed bug in cpc_queue_xmit that would eventually crash the system
+ * in case of a packet drop.
+ * Implemented TX underrun handling.
+ * Improved SCA fine tuning to boost up its performance.
+ *
+ * Revision 0.2.1.0 1999/11/03 ivan
+ * Added functions *dma_buf_pt_init to allow independent initialization
+ * of the next-descr. and DMA buffer pointers on the DMA descriptors.
+ * Kernel buffer release and tbusy clearing is now done in the interrupt
+ * handler.
+ * Fixed bug in cpc_open that would cause an interface reopen to fail.
+ * Added a protocol-specific code section in cpc_net_rx.
+ * Removed printk level defs (they might be added back after the beta phase).
+ *
+ * Revision 0.2.0.0 1999/10/28 ivan
+ * Revisited the code so that new protocols can be easily added / supported.
+ *
+ * Revision 0.1.0.1 1999/10/20 ivan
+ * Mostly "esthetic" changes.
+ *
+ * Revision 0.1.0.0 1999/10/11 ivan
+ * Initial version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/if.h>
+
+#include <net/syncppp.h>
+#include <net/arp.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "pc300.h"
+
+#define CPC_LOCK(card,flags) \
+ do { \
+ spin_lock_irqsave(&card->card_lock, flags); \
+ } while (0)
+
+#define CPC_UNLOCK(card,flags) \
+ do { \
+ spin_unlock_irqrestore(&card->card_lock, flags); \
+ } while (0)
+
+#undef PC300_DEBUG_PCI
+#undef PC300_DEBUG_INTR
+#undef PC300_DEBUG_TX
+#undef PC300_DEBUG_RX
+#undef PC300_DEBUG_OTHER
+
+static struct pci_device_id cpc_pci_dev_id[] __devinitdata = {
+ /* PC300/RSV or PC300/X21, 2 chan */
+ {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
+ /* PC300/RSV or PC300/X21, 1 chan */
+ {0x120e, 0x301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x301},
+ /* PC300/TE, 2 chan */
+ {0x120e, 0x310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x310},
+ /* PC300/TE, 1 chan */
+ {0x120e, 0x311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x311},
+ /* PC300/TE-M, 2 chan */
+ {0x120e, 0x320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x320},
+ /* PC300/TE-M, 1 chan */
+ {0x120e, 0x321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x321},
+ /* End of table */
+ {0,},
+};
+MODULE_DEVICE_TABLE(pci, cpc_pci_dev_id);
+
+#ifndef cpc_min
+#define cpc_min(a,b) (((a)<(b))?(a):(b))
+#endif
+#ifndef cpc_max
+#define cpc_max(a,b) (((a)>(b))?(a):(b))
+#endif
+
+/* prototypes */
+static void tx_dma_buf_pt_init(pc300_t *, int);
+static void tx_dma_buf_init(pc300_t *, int);
+static void rx_dma_buf_pt_init(pc300_t *, int);
+static void rx_dma_buf_init(pc300_t *, int);
+static void tx_dma_buf_check(pc300_t *, int);
+static void rx_dma_buf_check(pc300_t *, int);
+static irqreturn_t cpc_intr(int, void *, struct pt_regs *);
+static struct net_device_stats *cpc_get_stats(struct net_device *);
+static int clock_rate_calc(uclong, uclong, int *);
+static uclong detect_ram(pc300_t *);
+static void plx_init(pc300_t *);
+static void cpc_trace(struct net_device *, struct sk_buff *, char);
+static int cpc_attach(struct net_device *, unsigned short, unsigned short);
+
+#ifdef CONFIG_PC300_MLPPP
+void cpc_tty_init(pc300dev_t * dev);
+void cpc_tty_unregister_service(pc300dev_t * pc300dev);
+void cpc_tty_receive(pc300dev_t * pc300dev);
+void cpc_tty_trigger_poll(pc300dev_t * pc300dev);
+void cpc_tty_reset_var(void);
+#endif
+
+/************************/
+/*** DMA Routines ***/
+/************************/
+static void tx_dma_buf_pt_init(pc300_t * card, int ch)
+{
+ int i;
+ int ch_factor = ch * N_DMA_TX_BUF;
+ volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+ + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+ for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
+ cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE +
+ (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t)));
+ cpc_writel(&ptdescr->ptbuf,
+ (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
+ }
+}
+
+static void tx_dma_buf_init(pc300_t * card, int ch)
+{
+ int i;
+ int ch_factor = ch * N_DMA_TX_BUF;
+ volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+ + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+ for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
+ memset_io(ptdescr, 0, sizeof(pcsca_bd_t));
+ cpc_writew(&ptdescr->len, 0);
+ cpc_writeb(&ptdescr->status, DST_OSB);
+ }
+ tx_dma_buf_pt_init(card, ch);
+}
+
+static void rx_dma_buf_pt_init(pc300_t * card, int ch)
+{
+ int i;
+ int ch_factor = ch * N_DMA_RX_BUF;
+ volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+ + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+ for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
+ cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE +
+ (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
+ cpc_writel(&ptdescr->ptbuf,
+ (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
+ }
+}
+
+static void rx_dma_buf_init(pc300_t * card, int ch)
+{
+ int i;
+ int ch_factor = ch * N_DMA_RX_BUF;
+ volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+ + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+ for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
+ memset_io(ptdescr, 0, sizeof(pcsca_bd_t));
+ cpc_writew(&ptdescr->len, 0);
+ cpc_writeb(&ptdescr->status, 0);
+ }
+ rx_dma_buf_pt_init(card, ch);
+}
+
+static void tx_dma_buf_check(pc300_t * card, int ch)
+{
+ volatile pcsca_bd_t __iomem *ptdescr;
+ int i;
+ ucshort first_bd = card->chan[ch].tx_first_bd;
+ ucshort next_bd = card->chan[ch].tx_next_bd;
+
+ printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch,
+ first_bd, TX_BD_ADDR(ch, first_bd),
+ next_bd, TX_BD_ADDR(ch, next_bd));
+ for (i = first_bd,
+ ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, first_bd));
+ i != ((next_bd + 1) & (N_DMA_TX_BUF - 1));
+ i = (i + 1) & (N_DMA_TX_BUF - 1),
+ ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, i))) {
+ printk("\n CH%d TX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
+ ch, i, cpc_readl(&ptdescr->next),
+ cpc_readl(&ptdescr->ptbuf),
+ cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len));
+ }
+ printk("\n");
+}
+
+#ifdef PC300_DEBUG_OTHER
+/* Show all TX buffer descriptors */
+static void tx1_dma_buf_check(pc300_t * card, int ch)
+{
+ volatile pcsca_bd_t __iomem *ptdescr;
+ int i;
+ ucshort first_bd = card->chan[ch].tx_first_bd;
+ ucshort next_bd = card->chan[ch].tx_next_bd;
+ uclong scabase = card->hw.scabase;
+
+ printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
+ printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
+ first_bd, TX_BD_ADDR(ch, first_bd),
+ next_bd, TX_BD_ADDR(ch, next_bd));
+ printk("TX_CDA=0x%08x, TX_EDA=0x%08x\n",
+ cpc_readl(scabase + DTX_REG(CDAL, ch)),
+ cpc_readl(scabase + DTX_REG(EDAL, ch)));
+ for (i = 0; i < N_DMA_TX_BUF; i++) {
+ ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, i));
+ printk("\n CH%d TX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
+ ch, i, cpc_readl(&ptdescr->next),
+ cpc_readl(&ptdescr->ptbuf),
+ cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len));
+ }
+ printk("\n");
+}
+#endif
+
+static void rx_dma_buf_check(pc300_t * card, int ch)
+{
+ volatile pcsca_bd_t __iomem *ptdescr;
+ int i;
+ ucshort first_bd = card->chan[ch].rx_first_bd;
+ ucshort last_bd = card->chan[ch].rx_last_bd;
+ int ch_factor;
+
+ ch_factor = ch * N_DMA_RX_BUF;
+ printk("#CH%d: f_bd = %d, l_bd = %d\n", ch, first_bd, last_bd);
+ for (i = 0, ptdescr = (card->hw.rambase +
+ DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+ i < N_DMA_RX_BUF; i++, ptdescr++) {
+ if (cpc_readb(&ptdescr->status) & DST_OSB)
+ printk ("\n CH%d RX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
+ ch, i, cpc_readl(&ptdescr->next),
+ cpc_readl(&ptdescr->ptbuf),
+ cpc_readb(&ptdescr->status),
+ cpc_readw(&ptdescr->len));
+ }
+ printk("\n");
+}
+
+int dma_get_rx_frame_size(pc300_t * card, int ch)
+{
+ volatile pcsca_bd_t __iomem *ptdescr;
+ ucshort first_bd = card->chan[ch].rx_first_bd;
+ int rcvd = 0;
+ volatile ucchar status;
+
+ ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd));
+ while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+ rcvd += cpc_readw(&ptdescr->len);
+ first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1);
+ if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) {
+ /* Return the size of a good frame or incomplete bad frame
+ * (dma_buf_read will clean the buffer descriptors in this case). */
+ return (rcvd);
+ }
+ ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
+ }
+ return (-1);
+}
+
+/*
+ * dma_buf_write: writes a frame to the Tx DMA buffers
+ * NOTE: this function writes one frame at a time.
+ */
+int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
+{
+ int i, nchar;
+ volatile pcsca_bd_t __iomem *ptdescr;
+ int tosend = len;
+ ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1;
+
+ if (nbuf >= card->chan[ch].nfree_tx_bd) {
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nbuf; i++) {
+ ptdescr = (card->hw.rambase +
+ TX_BD_ADDR(ch, card->chan[ch].tx_next_bd));
+ nchar = cpc_min(BD_DEF_LEN, tosend);
+ if (cpc_readb(&ptdescr->status) & DST_OSB) {
+ memcpy_toio((card->hw.rambase + cpc_readl(&ptdescr->ptbuf)),
+ &ptdata[len - tosend], nchar);
+ cpc_writew(&ptdescr->len, nchar);
+ card->chan[ch].nfree_tx_bd--;
+ if ((i + 1) == nbuf) {
+ /* This must be the last BD to be used */
+ cpc_writeb(&ptdescr->status, DST_EOM);
+ } else {
+ cpc_writeb(&ptdescr->status, 0);
+ }
+ } else {
+ return -ENOMEM;
+ }
+ tosend -= nchar;
+ card->chan[ch].tx_next_bd =
+ (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1);
+ }
+ /* If it gets to here, it means we have sent the whole frame */
+ return 0;
+}
+
+/*
+ * dma_buf_read: reads a frame from the Rx DMA buffers
+ * NOTE: this function reads one frame at a time.
+ */
+int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
+{
+ int nchar;
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ volatile pcsca_bd_t __iomem *ptdescr;
+ int rcvd = 0;
+ volatile ucchar status;
+
+ ptdescr = (card->hw.rambase +
+ RX_BD_ADDR(ch, chan->rx_first_bd));
+ while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+ nchar = cpc_readw(&ptdescr->len);
+ if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT))
+ || (nchar > BD_DEF_LEN)) {
+
+ if (nchar > BD_DEF_LEN)
+ status |= DST_RBIT;
+ rcvd = -status;
+ /* Discard remaining descriptors used by the bad frame */
+ while (chan->rx_first_bd != chan->rx_last_bd) {
+ cpc_writeb(&ptdescr->status, 0);
+ chan->rx_first_bd = (chan->rx_first_bd+1) & (N_DMA_RX_BUF-1);
+ if (status & DST_EOM)
+ break;
+ ptdescr = (card->hw.rambase +
+ cpc_readl(&ptdescr->next));
+ status = cpc_readb(&ptdescr->status);
+ }
+ break;
+ }
+ if (nchar != 0) {
+ if (skb) {
+ memcpy_fromio(skb_put(skb, nchar),
+ (card->hw.rambase+cpc_readl(&ptdescr->ptbuf)),nchar);
+ }
+ rcvd += nchar;
+ }
+ cpc_writeb(&ptdescr->status, 0);
+ cpc_writeb(&ptdescr->len, 0);
+ chan->rx_first_bd = (chan->rx_first_bd + 1) & (N_DMA_RX_BUF - 1);
+
+ if (status & DST_EOM)
+ break;
+
+ ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
+ }
+
+ if (rcvd != 0) {
+ /* Update pointer */
+ chan->rx_last_bd = (chan->rx_first_bd - 1) & (N_DMA_RX_BUF - 1);
+ /* Update EDA */
+ cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
+ RX_BD_ADDR(ch, chan->rx_last_bd));
+ }
+ return (rcvd);
+}
+
+void tx_dma_stop(pc300_t * card, int ch)
+{
+ void __iomem *scabase = card->hw.scabase;
+ ucchar drr_ena_bit = 1 << (5 + 2 * ch);
+ ucchar drr_rst_bit = 1 << (1 + 2 * ch);
+
+ /* Disable DMA */
+ cpc_writeb(scabase + DRR, drr_ena_bit);
+ cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
+}
+
+void rx_dma_stop(pc300_t * card, int ch)
+{
+ void __iomem *scabase = card->hw.scabase;
+ ucchar drr_ena_bit = 1 << (4 + 2 * ch);
+ ucchar drr_rst_bit = 1 << (2 * ch);
+
+ /* Disable DMA */
+ cpc_writeb(scabase + DRR, drr_ena_bit);
+ cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
+}
+
+void rx_dma_start(pc300_t * card, int ch)
+{
+ void __iomem *scabase = card->hw.scabase;
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+
+ /* Start DMA */
+ cpc_writel(scabase + DRX_REG(CDAL, ch),
+ RX_BD_ADDR(ch, chan->rx_first_bd));
+ if (cpc_readl(scabase + DRX_REG(CDAL,ch)) !=
+ RX_BD_ADDR(ch, chan->rx_first_bd)) {
+ cpc_writel(scabase + DRX_REG(CDAL, ch),
+ RX_BD_ADDR(ch, chan->rx_first_bd));
+ }
+ cpc_writel(scabase + DRX_REG(EDAL, ch),
+ RX_BD_ADDR(ch, chan->rx_last_bd));
+ cpc_writew(scabase + DRX_REG(BFLL, ch), BD_DEF_LEN);
+ cpc_writeb(scabase + DSR_RX(ch), DSR_DE);
+ if (!(cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+ cpc_writeb(scabase + DSR_RX(ch), DSR_DE);
+ }
+}
+
+/*************************/
+/*** FALC Routines ***/
+/*************************/
+void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
+{
+ void __iomem *falcbase = card->hw.falcbase;
+ unsigned long i = 0;
+
+ while (cpc_readb(falcbase + F_REG(SIS, ch)) & SIS_CEC) {
+ if (i++ >= PC300_FALC_MAXLOOP) {
+ printk("%s: FALC command locked(cmd=0x%x).\n",
+ card->chan[ch].d.name, cmd);
+ break;
+ }
+ }
+ cpc_writeb(falcbase + F_REG(CMDR, ch), cmd);
+}
+
+void falc_intr_enable(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ /* Interrupt pins are open-drain */
+ cpc_writeb(falcbase + F_REG(IPC, ch),
+ cpc_readb(falcbase + F_REG(IPC, ch)) & ~IPC_IC0);
+ /* Conters updated each second */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_ECM);
+ /* Enable SEC and ES interrupts */
+ cpc_writeb(falcbase + F_REG(IMR3, ch),
+ cpc_readb(falcbase + F_REG(IMR3, ch)) & ~(IMR3_SEC | IMR3_ES));
+ if (conf->fr_mode == PC300_FR_UNFRAMED) {
+ cpc_writeb(falcbase + F_REG(IMR4, ch),
+ cpc_readb(falcbase + F_REG(IMR4, ch)) & ~(IMR4_LOS));
+ } else {
+ cpc_writeb(falcbase + F_REG(IMR4, ch),
+ cpc_readb(falcbase + F_REG(IMR4, ch)) &
+ ~(IMR4_LFA | IMR4_AIS | IMR4_LOS | IMR4_SLIP));
+ }
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(IMR3, ch),
+ cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC);
+ } else {
+ cpc_writeb(falcbase + F_REG(IPC, ch),
+ cpc_readb(falcbase + F_REG(IPC, ch)) | IPC_SCI);
+ if (conf->fr_mode == PC300_FR_UNFRAMED) {
+ cpc_writeb(falcbase + F_REG(IMR2, ch),
+ cpc_readb(falcbase + F_REG(IMR2, ch)) & ~(IMR2_LOS));
+ } else {
+ cpc_writeb(falcbase + F_REG(IMR2, ch),
+ cpc_readb(falcbase + F_REG(IMR2, ch)) &
+ ~(IMR2_FAR | IMR2_LFA | IMR2_AIS | IMR2_LOS));
+ if (pfalc->multiframe_mode) {
+ cpc_writeb(falcbase + F_REG(IMR2, ch),
+ cpc_readb(falcbase + F_REG(IMR2, ch)) &
+ ~(IMR2_T400MS | IMR2_MFAR));
+ } else {
+ cpc_writeb(falcbase + F_REG(IMR2, ch),
+ cpc_readb(falcbase + F_REG(IMR2, ch)) |
+ IMR2_T400MS | IMR2_MFAR);
+ }
+ }
+ }
+}
+
+void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
+{
+ void __iomem *falcbase = card->hw.falcbase;
+ ucchar tshf = card->chan[ch].falc.offset;
+
+ cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
+ cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) &
+ ~(0x80 >> ((timeslot - tshf) & 0x07)));
+ cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch),
+ cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) |
+ (0x80 >> (timeslot & 0x07)));
+ cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch),
+ cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) |
+ (0x80 >> (timeslot & 0x07)));
+}
+
+void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
+{
+ void __iomem *falcbase = card->hw.falcbase;
+ ucchar tshf = card->chan[ch].falc.offset;
+
+ cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
+ cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) |
+ (0x80 >> ((timeslot - tshf) & 0x07)));
+ cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch),
+ cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) &
+ ~(0x80 >> (timeslot & 0x07)));
+ cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch),
+ cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) &
+ ~(0x80 >> (timeslot & 0x07)));
+}
+
+void falc_close_all_timeslots(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ cpc_writeb(falcbase + F_REG(ICB1, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(TTR1, ch), 0);
+ cpc_writeb(falcbase + F_REG(RTR1, ch), 0);
+ cpc_writeb(falcbase + F_REG(ICB2, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(TTR2, ch), 0);
+ cpc_writeb(falcbase + F_REG(RTR2, ch), 0);
+ cpc_writeb(falcbase + F_REG(ICB3, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(TTR3, ch), 0);
+ cpc_writeb(falcbase + F_REG(RTR3, ch), 0);
+ if (conf->media == IF_IFACE_E1) {
+ cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(TTR4, ch), 0);
+ cpc_writeb(falcbase + F_REG(RTR4, ch), 0);
+ }
+}
+
+void falc_open_all_timeslots(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ cpc_writeb(falcbase + F_REG(ICB1, ch), 0);
+ if (conf->fr_mode == PC300_FR_UNFRAMED) {
+ cpc_writeb(falcbase + F_REG(TTR1, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(RTR1, ch), 0xff);
+ } else {
+ /* Timeslot 0 is never enabled */
+ cpc_writeb(falcbase + F_REG(TTR1, ch), 0x7f);
+ cpc_writeb(falcbase + F_REG(RTR1, ch), 0x7f);
+ }
+ cpc_writeb(falcbase + F_REG(ICB2, ch), 0);
+ cpc_writeb(falcbase + F_REG(TTR2, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(RTR2, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(ICB3, ch), 0);
+ cpc_writeb(falcbase + F_REG(TTR3, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(RTR3, ch), 0xff);
+ if (conf->media == IF_IFACE_E1) {
+ cpc_writeb(falcbase + F_REG(ICB4, ch), 0);
+ cpc_writeb(falcbase + F_REG(TTR4, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(RTR4, ch), 0xff);
+ } else {
+ cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(TTR4, ch), 0x80);
+ cpc_writeb(falcbase + F_REG(RTR4, ch), 0x80);
+ }
+}
+
+void falc_init_timeslot(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ int tslot;
+
+ for (tslot = 0; tslot < pfalc->num_channels; tslot++) {
+ if (conf->tslot_bitmap & (1 << tslot)) {
+ // Channel enabled
+ falc_open_timeslot(card, ch, tslot + 1);
+ } else {
+ // Channel disabled
+ falc_close_timeslot(card, ch, tslot + 1);
+ }
+ }
+}
+
+void falc_enable_comm(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+
+ if (pfalc->full_bandwidth) {
+ falc_open_all_timeslots(card, ch);
+ } else {
+ falc_init_timeslot(card, ch);
+ }
+ // CTS/DCD ON
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
+ ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
+}
+
+void falc_disable_comm(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+
+ if (pfalc->loop_active != 2) {
+ falc_close_all_timeslots(card, ch);
+ }
+ // CTS/DCD OFF
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+ ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
+}
+
+void falc_init_t1(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+ ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
+
+ /* Switch to T1 mode (PCM 24) */
+ cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD);
+
+ /* Wait 20 us for setup */
+ udelay(20);
+
+ /* Transmit Buffer Size (1 frame) */
+ cpc_writeb(falcbase + F_REG(SIC1, ch), SIC1_XBS0);
+
+ /* Clock mode */
+ if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS);
+ } else { /* Slave mode */
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS);
+ cpc_writeb(falcbase + F_REG(LOOP, ch),
+ cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_RTM);
+ }
+
+ cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI);
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) &
+ ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1));
+
+ switch (conf->lcode) {
+ case PC300_LC_AMI:
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) |
+ FMR0_XC1 | FMR0_RC1);
+ /* Clear Channel register to ON for all channels */
+ cpc_writeb(falcbase + F_REG(CCB1, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(CCB2, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(CCB3, ch), 0xff);
+ break;
+
+ case PC300_LC_B8ZS:
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) |
+ FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1);
+ break;
+
+ case PC300_LC_NRZ:
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) | 0x00);
+ break;
+ }
+
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_ELOS);
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0));
+ /* Set interface mode to 2 MBPS */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD);
+
+ switch (conf->fr_mode) {
+ case PC300_FR_ESF:
+ pfalc->multiframe_mode = 0;
+ cpc_writeb(falcbase + F_REG(FMR4, ch),
+ cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_FM1);
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) |
+ FMR1_CRC | FMR1_EDL);
+ cpc_writeb(falcbase + F_REG(XDL1, ch), 0);
+ cpc_writeb(falcbase + F_REG(XDL2, ch), 0);
+ cpc_writeb(falcbase + F_REG(XDL3, ch), 0);
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) & ~FMR0_SRAF);
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2,ch)) | FMR2_MCSP | FMR2_SSP);
+ break;
+
+ case PC300_FR_D4:
+ pfalc->multiframe_mode = 1;
+ cpc_writeb(falcbase + F_REG(FMR4, ch),
+ cpc_readb(falcbase + F_REG(FMR4, ch)) &
+ ~(FMR4_FM1 | FMR4_FM0));
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) | FMR0_SRAF);
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_SSP);
+ break;
+ }
+
+ /* Enable Automatic Resynchronization */
+ cpc_writeb(falcbase + F_REG(FMR4, ch),
+ cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_AUTO);
+
+ /* Transmit Automatic Remote Alarm */
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
+
+ /* Channel translation mode 1 : one to one */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_CTM);
+
+ /* No signaling */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_SIGM);
+ cpc_writeb(falcbase + F_REG(FMR5, ch),
+ cpc_readb(falcbase + F_REG(FMR5, ch)) &
+ ~(FMR5_EIBR | FMR5_SRS));
+ cpc_writeb(falcbase + F_REG(CCR1, ch), 0);
+
+ cpc_writeb(falcbase + F_REG(LIM1, ch),
+ cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1);
+
+ switch (conf->lbo) {
+ /* Provides proper Line Build Out */
+ case PC300_LBO_0_DB:
+ cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja));
+ cpc_writeb(falcbase + F_REG(XPM0, ch), 0x5a);
+ cpc_writeb(falcbase + F_REG(XPM1, ch), 0x8f);
+ cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+ break;
+ case PC300_LBO_7_5_DB:
+ cpc_writeb(falcbase + F_REG(LIM2, ch), (0x40 | LIM2_LOS1 | dja));
+ cpc_writeb(falcbase + F_REG(XPM0, ch), 0x11);
+ cpc_writeb(falcbase + F_REG(XPM1, ch), 0x02);
+ cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+ break;
+ case PC300_LBO_15_DB:
+ cpc_writeb(falcbase + F_REG(LIM2, ch), (0x80 | LIM2_LOS1 | dja));
+ cpc_writeb(falcbase + F_REG(XPM0, ch), 0x8e);
+ cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01);
+ cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+ break;
+ case PC300_LBO_22_5_DB:
+ cpc_writeb(falcbase + F_REG(LIM2, ch), (0xc0 | LIM2_LOS1 | dja));
+ cpc_writeb(falcbase + F_REG(XPM0, ch), 0x09);
+ cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01);
+ cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+ break;
+ }
+
+ /* Transmit Clock-Slot Offset */
+ cpc_writeb(falcbase + F_REG(XC0, ch),
+ cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01);
+ /* Transmit Time-slot Offset */
+ cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e);
+ /* Receive Clock-Slot offset */
+ cpc_writeb(falcbase + F_REG(RC0, ch), 0x05);
+ /* Receive Time-slot offset */
+ cpc_writeb(falcbase + F_REG(RC1, ch), 0x00);
+
+ /* LOS Detection after 176 consecutive 0s */
+ cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a);
+ /* LOS Recovery after 22 ones in the time window of PCD */
+ cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15);
+
+ cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f);
+
+ if (conf->fr_mode == PC300_FR_ESF_JAPAN) {
+ cpc_writeb(falcbase + F_REG(RC1, ch),
+ cpc_readb(falcbase + F_REG(RC1, ch)) | 0x80);
+ }
+
+ falc_close_all_timeslots(card, ch);
+}
+
+void falc_init_e1(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+ ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
+
+ /* Switch to E1 mode (PCM 30) */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_PMOD);
+
+ /* Clock mode */
+ if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS);
+ } else { /* Slave mode */
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS);
+ }
+ cpc_writeb(falcbase + F_REG(LOOP, ch),
+ cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_SFM);
+
+ cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI);
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) &
+ ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1));
+
+ switch (conf->lcode) {
+ case PC300_LC_AMI:
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) |
+ FMR0_XC1 | FMR0_RC1);
+ break;
+
+ case PC300_LC_HDB3:
+ cpc_writeb(falcbase + F_REG(FMR0, ch),
+ cpc_readb(falcbase + F_REG(FMR0, ch)) |
+ FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1);
+ break;
+
+ case PC300_LC_NRZ:
+ break;
+ }
+
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0));
+ /* Set interface mode to 2 MBPS */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD);
+
+ cpc_writeb(falcbase + F_REG(XPM0, ch), 0x18);
+ cpc_writeb(falcbase + F_REG(XPM1, ch), 0x03);
+ cpc_writeb(falcbase + F_REG(XPM2, ch), 0x00);
+
+ switch (conf->fr_mode) {
+ case PC300_FR_MF_CRC4:
+ pfalc->multiframe_mode = 1;
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_XFS);
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_RFS1);
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_RFS0);
+ cpc_writeb(falcbase + F_REG(FMR3, ch),
+ cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_EXTIW);
+
+ /* MultiFrame Resynchronization */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_MFCS);
+
+ /* Automatic Loss of Multiframe > 914 CRC errors */
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_ALMF);
+
+ /* S1 and SI1/SI2 spare Bits set to 1 */
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_AXS);
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_EBP);
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XS13 | XSP_XS15);
+
+ /* Automatic Force Resynchronization */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR);
+
+ /* Transmit Automatic Remote Alarm */
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
+
+ /* Transmit Spare Bits for National Use (Y, Sn, Sa) */
+ cpc_writeb(falcbase + F_REG(XSW, ch),
+ cpc_readb(falcbase + F_REG(XSW, ch)) |
+ XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4);
+ break;
+
+ case PC300_FR_MF_NON_CRC4:
+ case PC300_FR_D4:
+ pfalc->multiframe_mode = 0;
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS);
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) &
+ ~(FMR2_RFS1 | FMR2_RFS0));
+ cpc_writeb(falcbase + F_REG(XSW, ch),
+ cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XSIS);
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XSIF);
+
+ /* Automatic Force Resynchronization */
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR);
+
+ /* Transmit Automatic Remote Alarm */
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
+
+ /* Transmit Spare Bits for National Use (Y, Sn, Sa) */
+ cpc_writeb(falcbase + F_REG(XSW, ch),
+ cpc_readb(falcbase + F_REG(XSW, ch)) |
+ XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4);
+ break;
+
+ case PC300_FR_UNFRAMED:
+ pfalc->multiframe_mode = 0;
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS);
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) &
+ ~(FMR2_RFS1 | FMR2_RFS0));
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_TT0);
+ cpc_writeb(falcbase + F_REG(XSW, ch),
+ cpc_readb(falcbase + F_REG(XSW, ch)) &
+ ~(XSW_XTM|XSW_XY0|XSW_XY1|XSW_XY2|XSW_XY3|XSW_XY4));
+ cpc_writeb(falcbase + F_REG(TSWM, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) |
+ (FMR2_RTM | FMR2_DAIS));
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_AXRA);
+ cpc_writeb(falcbase + F_REG(FMR1, ch),
+ cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_AFR);
+ pfalc->sync = 1;
+ cpc_writeb(falcbase + card->hw.cpld_reg2,
+ cpc_readb(falcbase + card->hw.cpld_reg2) |
+ (CPLD_REG2_FALC_LED2 << (2 * ch)));
+ break;
+ }
+
+ /* No signaling */
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_CASEN);
+ cpc_writeb(falcbase + F_REG(CCR1, ch), 0);
+
+ cpc_writeb(falcbase + F_REG(LIM1, ch),
+ cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1);
+ cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja));
+
+ /* Transmit Clock-Slot Offset */
+ cpc_writeb(falcbase + F_REG(XC0, ch),
+ cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01);
+ /* Transmit Time-slot Offset */
+ cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e);
+ /* Receive Clock-Slot offset */
+ cpc_writeb(falcbase + F_REG(RC0, ch), 0x05);
+ /* Receive Time-slot offset */
+ cpc_writeb(falcbase + F_REG(RC1, ch), 0x00);
+
+ /* LOS Detection after 176 consecutive 0s */
+ cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a);
+ /* LOS Recovery after 22 ones in the time window of PCD */
+ cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15);
+
+ cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f);
+
+ falc_close_all_timeslots(card, ch);
+}
+
+void falc_init_hdlc(pc300_t * card, int ch)
+{
+ void __iomem *falcbase = card->hw.falcbase;
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+
+ /* Enable transparent data transfer */
+ if (conf->fr_mode == PC300_FR_UNFRAMED) {
+ cpc_writeb(falcbase + F_REG(MODE, ch), 0);
+ } else {
+ cpc_writeb(falcbase + F_REG(MODE, ch),
+ cpc_readb(falcbase + F_REG(MODE, ch)) |
+ (MODE_HRAC | MODE_MDS2));
+ cpc_writeb(falcbase + F_REG(RAH2, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(RAH1, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(RAL2, ch), 0xff);
+ cpc_writeb(falcbase + F_REG(RAL1, ch), 0xff);
+ }
+
+ /* Tx/Rx reset */
+ falc_issue_cmd(card, ch, CMDR_RRES | CMDR_XRES | CMDR_SRES);
+
+ /* Enable interrupt sources */
+ falc_intr_enable(card, ch);
+}
+
+void te_config(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+ ucchar dummy;
+ unsigned long flags;
+
+ memset(pfalc, 0, sizeof(falc_t));
+ switch (conf->media) {
+ case IF_IFACE_T1:
+ pfalc->num_channels = NUM_OF_T1_CHANNELS;
+ pfalc->offset = 1;
+ break;
+ case IF_IFACE_E1:
+ pfalc->num_channels = NUM_OF_E1_CHANNELS;
+ pfalc->offset = 0;
+ break;
+ }
+ if (conf->tslot_bitmap == 0xffffffffUL)
+ pfalc->full_bandwidth = 1;
+ else
+ pfalc->full_bandwidth = 0;
+
+ CPC_LOCK(card, flags);
+ /* Reset the FALC chip */
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+ (CPLD_REG1_FALC_RESET << (2 * ch)));
+ udelay(10000);
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
+ ~(CPLD_REG1_FALC_RESET << (2 * ch)));
+
+ if (conf->media == IF_IFACE_T1) {
+ falc_init_t1(card, ch);
+ } else {
+ falc_init_e1(card, ch);
+ }
+ falc_init_hdlc(card, ch);
+ if (conf->rx_sens == PC300_RX_SENS_SH) {
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_EQON);
+ } else {
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_EQON);
+ }
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
+ ((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK) << (2 * ch)));
+
+ /* Clear all interrupt registers */
+ dummy = cpc_readb(falcbase + F_REG(FISR0, ch)) +
+ cpc_readb(falcbase + F_REG(FISR1, ch)) +
+ cpc_readb(falcbase + F_REG(FISR2, ch)) +
+ cpc_readb(falcbase + F_REG(FISR3, ch));
+ CPC_UNLOCK(card, flags);
+}
+
+void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ /* Verify LOS */
+ if (frs0 & FRS0_LOS) {
+ if (!pfalc->red_alarm) {
+ pfalc->red_alarm = 1;
+ pfalc->los++;
+ if (!pfalc->blue_alarm) {
+ // EVENT_FALC_ABNORMAL
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable this interrupt as it may otherwise interfere
+ * with other working boards. */
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch))
+ | IMR0_PDEN);
+ }
+ falc_disable_comm(card, ch);
+ // EVENT_FALC_ABNORMAL
+ }
+ }
+ } else {
+ if (pfalc->red_alarm) {
+ pfalc->red_alarm = 0;
+ pfalc->losr++;
+ }
+ }
+
+ if (conf->fr_mode != PC300_FR_UNFRAMED) {
+ /* Verify AIS alarm */
+ if (frs0 & FRS0_AIS) {
+ if (!pfalc->blue_alarm) {
+ pfalc->blue_alarm = 1;
+ pfalc->ais++;
+ // EVENT_AIS
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable this interrupt as it may otherwise interfere with other working boards. */
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+ }
+ falc_disable_comm(card, ch);
+ // EVENT_AIS
+ }
+ } else {
+ pfalc->blue_alarm = 0;
+ }
+
+ /* Verify LFA */
+ if (frs0 & FRS0_LFA) {
+ if (!pfalc->loss_fa) {
+ pfalc->loss_fa = 1;
+ pfalc->lfa++;
+ if (!pfalc->blue_alarm && !pfalc->red_alarm) {
+ // EVENT_FALC_ABNORMAL
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable this interrupt as it may otherwise
+ * interfere with other working boards. */
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch))
+ | IMR0_PDEN);
+ }
+ falc_disable_comm(card, ch);
+ // EVENT_FALC_ABNORMAL
+ }
+ }
+ } else {
+ if (pfalc->loss_fa) {
+ pfalc->loss_fa = 0;
+ pfalc->farec++;
+ }
+ }
+
+ /* Verify LMFA */
+ if (pfalc->multiframe_mode && (frs0 & FRS0_LMFA)) {
+ /* D4 or CRC4 frame mode */
+ if (!pfalc->loss_mfa) {
+ pfalc->loss_mfa = 1;
+ pfalc->lmfa++;
+ if (!pfalc->blue_alarm && !pfalc->red_alarm &&
+ !pfalc->loss_fa) {
+ // EVENT_FALC_ABNORMAL
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable this interrupt as it may otherwise
+ * interfere with other working boards. */
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch))
+ | IMR0_PDEN);
+ }
+ falc_disable_comm(card, ch);
+ // EVENT_FALC_ABNORMAL
+ }
+ }
+ } else {
+ pfalc->loss_mfa = 0;
+ }
+
+ /* Verify Remote Alarm */
+ if (frs0 & FRS0_RRA) {
+ if (!pfalc->yellow_alarm) {
+ pfalc->yellow_alarm = 1;
+ pfalc->rai++;
+ if (pfalc->sync) {
+ // EVENT_RAI
+ falc_disable_comm(card, ch);
+ // EVENT_RAI
+ }
+ }
+ } else {
+ pfalc->yellow_alarm = 0;
+ }
+ } /* if !PC300_UNFRAMED */
+
+ if (pfalc->red_alarm || pfalc->loss_fa ||
+ pfalc->loss_mfa || pfalc->blue_alarm) {
+ if (pfalc->sync) {
+ pfalc->sync = 0;
+ chan->d.line_off++;
+ cpc_writeb(falcbase + card->hw.cpld_reg2,
+ cpc_readb(falcbase + card->hw.cpld_reg2) &
+ ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+ }
+ } else {
+ if (!pfalc->sync) {
+ pfalc->sync = 1;
+ chan->d.line_on++;
+ cpc_writeb(falcbase + card->hw.cpld_reg2,
+ cpc_readb(falcbase + card->hw.cpld_reg2) |
+ (CPLD_REG2_FALC_LED2 << (2 * ch)));
+ }
+ }
+
+ if (pfalc->sync && !pfalc->yellow_alarm) {
+ if (!pfalc->active) {
+ // EVENT_FALC_NORMAL
+ if (pfalc->loop_active) {
+ return;
+ }
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch)) & ~IMR0_PDEN);
+ }
+ falc_enable_comm(card, ch);
+ // EVENT_FALC_NORMAL
+ pfalc->active = 1;
+ }
+ } else {
+ if (pfalc->active) {
+ pfalc->active = 0;
+ }
+ }
+}
+
+void falc_update_stats(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+ ucshort counter;
+
+ counter = cpc_readb(falcbase + F_REG(FECL, ch));
+ counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8;
+ pfalc->fec += counter;
+
+ counter = cpc_readb(falcbase + F_REG(CVCL, ch));
+ counter |= cpc_readb(falcbase + F_REG(CVCH, ch)) << 8;
+ pfalc->cvc += counter;
+
+ counter = cpc_readb(falcbase + F_REG(CECL, ch));
+ counter |= cpc_readb(falcbase + F_REG(CECH, ch)) << 8;
+ pfalc->cec += counter;
+
+ counter = cpc_readb(falcbase + F_REG(EBCL, ch));
+ counter |= cpc_readb(falcbase + F_REG(EBCH, ch)) << 8;
+ pfalc->ebc += counter;
+
+ if (cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) {
+ mdelay(10);
+ counter = cpc_readb(falcbase + F_REG(BECL, ch));
+ counter |= cpc_readb(falcbase + F_REG(BECH, ch)) << 8;
+ pfalc->bec += counter;
+
+ if (((conf->media == IF_IFACE_T1) &&
+ (cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_LLBAD) &&
+ (!(cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_PDEN)))
+ ||
+ ((conf->media == IF_IFACE_E1) &&
+ (cpc_readb(falcbase + F_REG(RSP, ch)) & RSP_LLBAD))) {
+ pfalc->prbs = 2;
+ } else {
+ pfalc->prbs = 1;
+ }
+ }
+}
+
+/*----------------------------------------------------------------------------
+ * falc_remote_loop
+ *----------------------------------------------------------------------------
+ * Description: In the remote loopback mode the clock and data recovered
+ * from the line inputs RL1/2 or RDIP/RDIN are routed back
+ * to the line outputs XL1/2 or XDOP/XDON via the analog
+ * transmitter. As in normal mode they are processsed by
+ * the synchronizer and then sent to the system interface.
+ *----------------------------------------------------------------------------
+ */
+void falc_remote_loop(pc300_t * card, int ch, int loop_on)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (loop_on) {
+ // EVENT_FALC_ABNORMAL
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable this interrupt as it may otherwise interfere with
+ * other working boards. */
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+ }
+ falc_disable_comm(card, ch);
+ // EVENT_FALC_ABNORMAL
+ cpc_writeb(falcbase + F_REG(LIM1, ch),
+ cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RL);
+ pfalc->loop_active = 1;
+ } else {
+ cpc_writeb(falcbase + F_REG(LIM1, ch),
+ cpc_readb(falcbase + F_REG(LIM1, ch)) & ~LIM1_RL);
+ pfalc->sync = 0;
+ cpc_writeb(falcbase + card->hw.cpld_reg2,
+ cpc_readb(falcbase + card->hw.cpld_reg2) &
+ ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+ pfalc->active = 0;
+ falc_issue_cmd(card, ch, CMDR_XRES);
+ pfalc->loop_active = 0;
+ }
+}
+
+/*----------------------------------------------------------------------------
+ * falc_local_loop
+ *----------------------------------------------------------------------------
+ * Description: The local loopback mode disconnects the receive lines
+ * RL1/RL2 resp. RDIP/RDIN from the receiver. Instead of the
+ * signals coming from the line the data provided by system
+ * interface are routed through the analog receiver back to
+ * the system interface. The unipolar bit stream will be
+ * undisturbed transmitted on the line. Receiver and transmitter
+ * coding must be identical.
+ *----------------------------------------------------------------------------
+ */
+void falc_local_loop(pc300_t * card, int ch, int loop_on)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (loop_on) {
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_LL);
+ pfalc->loop_active = 1;
+ } else {
+ cpc_writeb(falcbase + F_REG(LIM0, ch),
+ cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_LL);
+ pfalc->loop_active = 0;
+ }
+}
+
+/*----------------------------------------------------------------------------
+ * falc_payload_loop
+ *----------------------------------------------------------------------------
+ * Description: This routine allows to enable/disable payload loopback.
+ * When the payload loop is activated, the received 192 bits
+ * of payload data will be looped back to the transmit
+ * direction. The framing bits, CRC6 and DL bits are not
+ * looped. They are originated by the FALC-LH transmitter.
+ *----------------------------------------------------------------------------
+ */
+void falc_payload_loop(pc300_t * card, int ch, int loop_on)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (loop_on) {
+ // EVENT_FALC_ABNORMAL
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable this interrupt as it may otherwise interfere with
+ * other working boards. */
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+ }
+ falc_disable_comm(card, ch);
+ // EVENT_FALC_ABNORMAL
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_PLB);
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(FMR4, ch),
+ cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_TM);
+ } else {
+ cpc_writeb(falcbase + F_REG(FMR5, ch),
+ cpc_readb(falcbase + F_REG(FMR5, ch)) | XSP_TT0);
+ }
+ falc_open_all_timeslots(card, ch);
+ pfalc->loop_active = 2;
+ } else {
+ cpc_writeb(falcbase + F_REG(FMR2, ch),
+ cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_PLB);
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(FMR4, ch),
+ cpc_readb(falcbase + F_REG(FMR4, ch)) & ~FMR4_TM);
+ } else {
+ cpc_writeb(falcbase + F_REG(FMR5, ch),
+ cpc_readb(falcbase + F_REG(FMR5, ch)) & ~XSP_TT0);
+ }
+ pfalc->sync = 0;
+ cpc_writeb(falcbase + card->hw.cpld_reg2,
+ cpc_readb(falcbase + card->hw.cpld_reg2) &
+ ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+ pfalc->active = 0;
+ falc_issue_cmd(card, ch, CMDR_XRES);
+ pfalc->loop_active = 0;
+ }
+}
+
+/*----------------------------------------------------------------------------
+ * turn_off_xlu
+ *----------------------------------------------------------------------------
+ * Description: Turns XLU bit off in the proper register
+ *----------------------------------------------------------------------------
+ */
+void turn_off_xlu(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(FMR5, ch),
+ cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLU);
+ } else {
+ cpc_writeb(falcbase + F_REG(FMR3, ch),
+ cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLU);
+ }
+}
+
+/*----------------------------------------------------------------------------
+ * turn_off_xld
+ *----------------------------------------------------------------------------
+ * Description: Turns XLD bit off in the proper register
+ *----------------------------------------------------------------------------
+ */
+void turn_off_xld(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(FMR5, ch),
+ cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLD);
+ } else {
+ cpc_writeb(falcbase + F_REG(FMR3, ch),
+ cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLD);
+ }
+}
+
+/*----------------------------------------------------------------------------
+ * falc_generate_loop_up_code
+ *----------------------------------------------------------------------------
+ * Description: This routine writes the proper FALC chip register in order
+ * to generate a LOOP activation code over a T1/E1 line.
+ *----------------------------------------------------------------------------
+ */
+void falc_generate_loop_up_code(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(FMR5, ch),
+ cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLU);
+ } else {
+ cpc_writeb(falcbase + F_REG(FMR3, ch),
+ cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLU);
+ }
+ // EVENT_FALC_ABNORMAL
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable this interrupt as it may otherwise interfere with
+ * other working boards. */
+ cpc_writeb(falcbase + F_REG(IMR0, ch),
+ cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+ }
+ falc_disable_comm(card, ch);
+ // EVENT_FALC_ABNORMAL
+ pfalc->loop_gen = 1;
+}
+
+/*----------------------------------------------------------------------------
+ * falc_generate_loop_down_code
+ *----------------------------------------------------------------------------
+ * Description: This routine writes the proper FALC chip register in order
+ * to generate a LOOP deactivation code over a T1/E1 line.
+ *----------------------------------------------------------------------------
+ */
+void falc_generate_loop_down_code(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (conf->media == IF_IFACE_T1) {
+ cpc_writeb(falcbase + F_REG(FMR5, ch),
+ cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLD);
+ } else {
+ cpc_writeb(falcbase + F_REG(FMR3, ch),
+ cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLD);
+ }
+ pfalc->sync = 0;
+ cpc_writeb(falcbase + card->hw.cpld_reg2,
+ cpc_readb(falcbase + card->hw.cpld_reg2) &
+ ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+ pfalc->active = 0;
+//? falc_issue_cmd(card, ch, CMDR_XRES);
+ pfalc->loop_gen = 0;
+}
+
+/*----------------------------------------------------------------------------
+ * falc_pattern_test
+ *----------------------------------------------------------------------------
+ * Description: This routine generates a pattern code and checks
+ * it on the reception side.
+ *----------------------------------------------------------------------------
+ */
+void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (activate) {
+ pfalc->prbs = 1;
+ pfalc->bec = 0;
+ if (conf->media == IF_IFACE_T1) {
+ /* Disable local loop activation/deactivation detect */
+ cpc_writeb(falcbase + F_REG(IMR3, ch),
+ cpc_readb(falcbase + F_REG(IMR3, ch)) | IMR3_LLBSC);
+ } else {
+ /* Disable local loop activation/deactivation detect */
+ cpc_writeb(falcbase + F_REG(IMR1, ch),
+ cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_LLBSC);
+ }
+ /* Activates generation and monitoring of PRBS
+ * (Pseudo Random Bit Sequence) */
+ cpc_writeb(falcbase + F_REG(LCR1, ch),
+ cpc_readb(falcbase + F_REG(LCR1, ch)) | LCR1_EPRM | LCR1_XPRBS);
+ } else {
+ pfalc->prbs = 0;
+ /* Deactivates generation and monitoring of PRBS
+ * (Pseudo Random Bit Sequence) */
+ cpc_writeb(falcbase + F_REG(LCR1, ch),
+ cpc_readb(falcbase+F_REG(LCR1,ch)) & ~(LCR1_EPRM | LCR1_XPRBS));
+ if (conf->media == IF_IFACE_T1) {
+ /* Enable local loop activation/deactivation detect */
+ cpc_writeb(falcbase + F_REG(IMR3, ch),
+ cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC);
+ } else {
+ /* Enable local loop activation/deactivation detect */
+ cpc_writeb(falcbase + F_REG(IMR1, ch),
+ cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_LLBSC);
+ }
+ }
+}
+
+/*----------------------------------------------------------------------------
+ * falc_pattern_test_error
+ *----------------------------------------------------------------------------
+ * Description: This routine returns the bit error counter value
+ *----------------------------------------------------------------------------
+ */
+ucshort falc_pattern_test_error(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+
+ return (pfalc->bec);
+}
+
+/**********************************/
+/*** Net Interface Routines ***/
+/**********************************/
+
+static void
+cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
+{
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(10 + skb_main->len)) == NULL) {
+ printk("%s: out of memory\n", dev->name);
+ return;
+ }
+ skb_put(skb, 10 + skb_main->len);
+
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_CUST);
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+ skb->len = 10 + skb_main->len;
+
+ memcpy(skb->data, dev->name, 5);
+ skb->data[5] = '[';
+ skb->data[6] = rx_tx;
+ skb->data[7] = ']';
+ skb->data[8] = ':';
+ skb->data[9] = ' ';
+ memcpy(&skb->data[10], skb_main->data, skb_main->len);
+
+ netif_rx(skb);
+}
+
+void cpc_tx_timeout(struct net_device *dev)
+{
+ pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ struct net_device_stats *stats = hdlc_stats(dev);
+ int ch = chan->channel;
+ unsigned long flags;
+ ucchar ilar;
+
+ stats->tx_errors++;
+ stats->tx_aborted_errors++;
+ CPC_LOCK(card, flags);
+ if ((ilar = cpc_readb(card->hw.scabase + ILAR)) != 0) {
+ printk("%s: ILAR=0x%x\n", dev->name, ilar);
+ cpc_writeb(card->hw.scabase + ILAR, ilar);
+ cpc_writeb(card->hw.scabase + DMER, 0x80);
+ }
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
+ ~(CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+ dev->trans_start = jiffies;
+ CPC_UNLOCK(card, flags);
+ netif_wake_queue(dev);
+}
+
+int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ struct net_device_stats *stats = hdlc_stats(dev);
+ int ch = chan->channel;
+ unsigned long flags;
+#ifdef PC300_DEBUG_TX
+ int i;
+#endif
+
+ if (chan->conf.monitor) {
+ /* In monitor mode no Tx is done: ignore packet */
+ dev_kfree_skb(skb);
+ return 0;
+ } else if (!netif_carrier_ok(dev)) {
+ /* DCD must be OFF: drop packet */
+ dev_kfree_skb(skb);
+ stats->tx_errors++;
+ stats->tx_carrier_errors++;
+ return 0;
+ } else if (cpc_readb(card->hw.scabase + M_REG(ST3, ch)) & ST3_DCD) {
+ printk("%s: DCD is OFF. Going administrative down.\n", dev->name);
+ stats->tx_errors++;
+ stats->tx_carrier_errors++;
+ dev_kfree_skb(skb);
+ netif_carrier_off(dev);
+ CPC_LOCK(card, flags);
+ cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR);
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
+ ~(CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+ CPC_UNLOCK(card, flags);
+ netif_wake_queue(dev);
+ return 0;
+ }
+
+ /* Write buffer to DMA buffers */
+ if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) {
+// printk("%s: write error. Dropping TX packet.\n", dev->name);
+ netif_stop_queue(dev);
+ dev_kfree_skb(skb);
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ return 0;
+ }
+#ifdef PC300_DEBUG_TX
+ printk("%s T:", dev->name);
+ for (i = 0; i < skb->len; i++)
+ printk(" %02x", *(skb->data + i));
+ printk("\n");
+#endif
+
+ if (d->trace_on) {
+ cpc_trace(dev, skb, 'T');
+ }
+ dev->trans_start = jiffies;
+
+ /* Start transmission */
+ CPC_LOCK(card, flags);
+ /* verify if it has more than one free descriptor */
+ if (card->chan[ch].nfree_tx_bd <= 1) {
+ /* don't have so stop the queue */
+ netif_stop_queue(dev);
+ }
+ cpc_writel(card->hw.scabase + DTX_REG(EDAL, ch),
+ TX_BD_ADDR(ch, chan->tx_next_bd));
+ cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA);
+ cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE);
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
+ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+ CPC_UNLOCK(card, flags);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+void cpc_net_rx(struct net_device *dev)
+{
+ pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ struct net_device_stats *stats = hdlc_stats(dev);
+ int ch = chan->channel;
+#ifdef PC300_DEBUG_RX
+ int i;
+#endif
+ int rxb;
+ struct sk_buff *skb;
+
+ while (1) {
+ if ((rxb = dma_get_rx_frame_size(card, ch)) == -1)
+ return;
+
+ if (!netif_carrier_ok(dev)) {
+ /* DCD must be OFF: drop packet */
+ printk("%s : DCD is OFF - drop %d rx bytes\n", dev->name, rxb);
+ skb = NULL;
+ } else {
+ if (rxb > (dev->mtu + 40)) { /* add headers */
+ printk("%s : MTU exceeded %d\n", dev->name, rxb);
+ skb = NULL;
+ } else {
+ skb = dev_alloc_skb(rxb);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze!!\n", dev->name);
+ return;
+ }
+ skb->dev = dev;
+ }
+ }
+
+ if (((rxb = dma_buf_read(card, ch, skb)) <= 0) || (skb == NULL)) {
+#ifdef PC300_DEBUG_RX
+ printk("%s: rxb = %x\n", dev->name, rxb);
+#endif
+ if ((skb == NULL) && (rxb > 0)) {
+ /* rxb > dev->mtu */
+ stats->rx_errors++;
+ stats->rx_length_errors++;
+ continue;
+ }
+
+ if (rxb < 0) { /* Invalid frame */
+ rxb = -rxb;
+ if (rxb & DST_OVR) {
+ stats->rx_errors++;
+ stats->rx_fifo_errors++;
+ }
+ if (rxb & DST_CRC) {
+ stats->rx_errors++;
+ stats->rx_crc_errors++;
+ }
+ if (rxb & (DST_RBIT | DST_SHRT | DST_ABT)) {
+ stats->rx_errors++;
+ stats->rx_frame_errors++;
+ }
+ }
+ if (skb) {
+ dev_kfree_skb_irq(skb);
+ }
+ continue;
+ }
+
+ stats->rx_bytes += rxb;
+
+#ifdef PC300_DEBUG_RX
+ printk("%s R:", dev->name);
+ for (i = 0; i < skb->len; i++)
+ printk(" %02x", *(skb->data + i));
+ printk("\n");
+#endif
+ if (d->trace_on) {
+ cpc_trace(dev, skb, 'R');
+ }
+ stats->rx_packets++;
+ skb->protocol = hdlc_type_trans(skb, dev);
+ netif_rx(skb);
+ }
+}
+
+/************************************/
+/*** PC300 Interrupt Routines ***/
+/************************************/
+static void sca_tx_intr(pc300dev_t *dev)
+{
+ pc300ch_t *chan = (pc300ch_t *)dev->chan;
+ pc300_t *card = (pc300_t *)chan->card;
+ int ch = chan->channel;
+ volatile pcsca_bd_t __iomem * ptdescr;
+ struct net_device_stats *stats = hdlc_stats(dev->dev);
+
+ /* Clean up descriptors from previous transmission */
+ ptdescr = (card->hw.rambase +
+ TX_BD_ADDR(ch,chan->tx_first_bd));
+ while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) !=
+ TX_BD_ADDR(ch,chan->tx_first_bd)) &&
+ (cpc_readb(&ptdescr->status) & DST_OSB)) {
+ stats->tx_packets++;
+ stats->tx_bytes += cpc_readw(&ptdescr->len);
+ cpc_writeb(&ptdescr->status, DST_OSB);
+ cpc_writew(&ptdescr->len, 0);
+ chan->nfree_tx_bd++;
+ chan->tx_first_bd = (chan->tx_first_bd + 1) & (N_DMA_TX_BUF - 1);
+ ptdescr = (card->hw.rambase + TX_BD_ADDR(ch,chan->tx_first_bd));
+ }
+
+#ifdef CONFIG_PC300_MLPPP
+ if (chan->conf.proto == PC300_PROTO_MLPPP) {
+ cpc_tty_trigger_poll(dev);
+ } else {
+#endif
+ /* Tell the upper layer we are ready to transmit more packets */
+ netif_wake_queue(dev->dev);
+#ifdef CONFIG_PC300_MLPPP
+ }
+#endif
+}
+
+static void sca_intr(pc300_t * card)
+{
+ void __iomem *scabase = card->hw.scabase;
+ volatile uclong status;
+ int ch;
+ int intr_count = 0;
+ unsigned char dsr_rx;
+
+ while ((status = cpc_readl(scabase + ISR0)) != 0) {
+ for (ch = 0; ch < card->hw.nchan; ch++) {
+ pc300ch_t *chan = &card->chan[ch];
+ pc300dev_t *d = &chan->d;
+ struct net_device *dev = d->dev;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ spin_lock(&card->card_lock);
+
+ /**** Reception ****/
+ if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) {
+ ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch));
+
+ /* Clear RX interrupts */
+ cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE);
+
+#ifdef PC300_DEBUG_INTR
+ printk ("sca_intr: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n",
+ ch, status, drx_stat);
+#endif
+ if (status & IR0_DRX(IR0_DMIA, ch)) {
+ if (drx_stat & DSR_BOF) {
+#ifdef CONFIG_PC300_MLPPP
+ if (chan->conf.proto == PC300_PROTO_MLPPP) {
+ /* verify if driver is TTY */
+ if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+ rx_dma_stop(card, ch);
+ }
+ cpc_tty_receive(d);
+ rx_dma_start(card, ch);
+ } else
+#endif
+ {
+ if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+ rx_dma_stop(card, ch);
+ }
+ cpc_net_rx(dev);
+ /* Discard invalid frames */
+ hdlc->stats.rx_errors++;
+ hdlc->stats.rx_over_errors++;
+ chan->rx_first_bd = 0;
+ chan->rx_last_bd = N_DMA_RX_BUF - 1;
+ rx_dma_start(card, ch);
+ }
+ }
+ }
+ if (status & IR0_DRX(IR0_DMIB, ch)) {
+ if (drx_stat & DSR_EOM) {
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase +
+ card->hw.cpld_reg2,
+ cpc_readb (card->hw.falcbase +
+ card->hw.cpld_reg2) |
+ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+#ifdef CONFIG_PC300_MLPPP
+ if (chan->conf.proto == PC300_PROTO_MLPPP) {
+ /* verify if driver is TTY */
+ cpc_tty_receive(d);
+ } else {
+ cpc_net_rx(dev);
+ }
+#else
+ cpc_net_rx(dev);
+#endif
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase +
+ card->hw.cpld_reg2,
+ cpc_readb (card->hw.falcbase +
+ card->hw.cpld_reg2) &
+ ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+ }
+ }
+ if (!(dsr_rx = cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+#ifdef PC300_DEBUG_INTR
+ printk("%s: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x, dsr2=0x%02x)\n",
+ dev->name, ch, status, drx_stat, dsr_rx);
+#endif
+ cpc_writeb(scabase + DSR_RX(ch), (dsr_rx | DSR_DE) & 0xfe);
+ }
+ }
+
+ /**** Transmission ****/
+ if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) {
+ ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch));
+
+ /* Clear TX interrupts */
+ cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE);
+
+#ifdef PC300_DEBUG_INTR
+ printk ("sca_intr: TX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n",
+ ch, status, dtx_stat);
+#endif
+ if (status & IR0_DTX(IR0_EFT, ch)) {
+ if (dtx_stat & DSR_UDRF) {
+ if (cpc_readb (scabase + M_REG(TBN, ch)) != 0) {
+ cpc_writeb(scabase + M_REG(CMD,ch), CMD_TX_BUF_CLR);
+ }
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb (card->hw.falcbase +
+ card->hw.cpld_reg2) &
+ ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+ hdlc->stats.tx_errors++;
+ hdlc->stats.tx_fifo_errors++;
+ sca_tx_intr(d);
+ }
+ }
+ if (status & IR0_DTX(IR0_DMIA, ch)) {
+ if (dtx_stat & DSR_BOF) {
+ }
+ }
+ if (status & IR0_DTX(IR0_DMIB, ch)) {
+ if (dtx_stat & DSR_EOM) {
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb (card->hw.falcbase +
+ card->hw.cpld_reg2) &
+ ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+ sca_tx_intr(d);
+ }
+ }
+ }
+
+ /**** MSCI ****/
+ if (status & IR0_M(IR0_RXINTA, ch)) {
+ ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch));
+
+ /* Clear MSCI interrupts */
+ cpc_writeb(scabase + M_REG(ST1, ch), st1);
+
+#ifdef PC300_DEBUG_INTR
+ printk("sca_intr: MSCI intr chan[%d] (st=0x%08lx, st1=0x%02x)\n",
+ ch, status, st1);
+#endif
+ if (st1 & ST1_CDCD) { /* DCD changed */
+ if (cpc_readb(scabase + M_REG(ST3, ch)) & ST3_DCD) {
+ printk ("%s: DCD is OFF. Going administrative down.\n",
+ dev->name);
+#ifdef CONFIG_PC300_MLPPP
+ if (chan->conf.proto != PC300_PROTO_MLPPP) {
+ netif_carrier_off(dev);
+ }
+#else
+ netif_carrier_off(dev);
+
+#endif
+ card->chan[ch].d.line_off++;
+ } else { /* DCD = 1 */
+ printk ("%s: DCD is ON. Going administrative up.\n",
+ dev->name);
+#ifdef CONFIG_PC300_MLPPP
+ if (chan->conf.proto != PC300_PROTO_MLPPP)
+ /* verify if driver is not TTY */
+#endif
+ netif_carrier_on(dev);
+ card->chan[ch].d.line_on++;
+ }
+ }
+ }
+ spin_unlock(&card->card_lock);
+ }
+ if (++intr_count == 10)
+ /* Too much work at this board. Force exit */
+ break;
+ }
+}
+
+static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) &&
+ !pfalc->loop_gen) {
+ if (frs1 & FRS1_LLBDD) {
+ // A Line Loop Back Deactivation signal detected
+ if (pfalc->loop_active) {
+ falc_remote_loop(card, ch, 0);
+ }
+ } else {
+ if ((frs1 & FRS1_LLBAD) &&
+ ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) {
+ // A Line Loop Back Activation signal detected
+ if (!pfalc->loop_active) {
+ falc_remote_loop(card, ch, 1);
+ }
+ }
+ }
+ }
+}
+
+static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+
+ if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) &&
+ !pfalc->loop_gen) {
+ if (rsp & RSP_LLBDD) {
+ // A Line Loop Back Deactivation signal detected
+ if (pfalc->loop_active) {
+ falc_remote_loop(card, ch, 0);
+ }
+ } else {
+ if ((rsp & RSP_LLBAD) &&
+ ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) {
+ // A Line Loop Back Activation signal detected
+ if (!pfalc->loop_active) {
+ falc_remote_loop(card, ch, 1);
+ }
+ }
+ }
+ }
+}
+
+static void falc_t1_intr(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+ ucchar isr0, isr3, gis;
+ ucchar dummy;
+
+ while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
+ if (gis & GIS_ISR0) {
+ isr0 = cpc_readb(falcbase + F_REG(FISR0, ch));
+ if (isr0 & FISR0_PDEN) {
+ /* Read the bit to clear the situation */
+ if (cpc_readb(falcbase + F_REG(FRS1, ch)) &
+ FRS1_PDEN) {
+ pfalc->pden++;
+ }
+ }
+ }
+
+ if (gis & GIS_ISR1) {
+ dummy = cpc_readb(falcbase + F_REG(FISR1, ch));
+ }
+
+ if (gis & GIS_ISR2) {
+ dummy = cpc_readb(falcbase + F_REG(FISR2, ch));
+ }
+
+ if (gis & GIS_ISR3) {
+ isr3 = cpc_readb(falcbase + F_REG(FISR3, ch));
+ if (isr3 & FISR3_SEC) {
+ pfalc->sec++;
+ falc_update_stats(card, ch);
+ falc_check_status(card, ch,
+ cpc_readb(falcbase + F_REG(FRS0, ch)));
+ }
+ if (isr3 & FISR3_ES) {
+ pfalc->es++;
+ }
+ if (isr3 & FISR3_LLBSC) {
+ falc_t1_loop_detection(card, ch,
+ cpc_readb(falcbase + F_REG(FRS1, ch)));
+ }
+ }
+ }
+}
+
+static void falc_e1_intr(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ void __iomem *falcbase = card->hw.falcbase;
+ ucchar isr1, isr2, isr3, gis, rsp;
+ ucchar dummy;
+
+ while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
+ rsp = cpc_readb(falcbase + F_REG(RSP, ch));
+
+ if (gis & GIS_ISR0) {
+ dummy = cpc_readb(falcbase + F_REG(FISR0, ch));
+ }
+ if (gis & GIS_ISR1) {
+ isr1 = cpc_readb(falcbase + F_REG(FISR1, ch));
+ if (isr1 & FISR1_XMB) {
+ if ((pfalc->xmb_cause & 2)
+ && pfalc->multiframe_mode) {
+ if (cpc_readb (falcbase + F_REG(FRS0, ch)) &
+ (FRS0_LOS | FRS0_AIS | FRS0_LFA)) {
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch))
+ & ~XSP_AXS);
+ } else {
+ cpc_writeb(falcbase + F_REG(XSP, ch),
+ cpc_readb(falcbase + F_REG(XSP, ch))
+ | XSP_AXS);
+ }
+ }
+ pfalc->xmb_cause = 0;
+ cpc_writeb(falcbase + F_REG(IMR1, ch),
+ cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_XMB);
+ }
+ if (isr1 & FISR1_LLBSC) {
+ falc_e1_loop_detection(card, ch, rsp);
+ }
+ }
+ if (gis & GIS_ISR2) {
+ isr2 = cpc_readb(falcbase + F_REG(FISR2, ch));
+ if (isr2 & FISR2_T400MS) {
+ cpc_writeb(falcbase + F_REG(XSW, ch),
+ cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XRA);
+ }
+ if (isr2 & FISR2_MFAR) {
+ cpc_writeb(falcbase + F_REG(XSW, ch),
+ cpc_readb(falcbase + F_REG(XSW, ch)) & ~XSW_XRA);
+ }
+ if (isr2 & (FISR2_FAR | FISR2_LFA | FISR2_AIS | FISR2_LOS)) {
+ pfalc->xmb_cause |= 2;
+ cpc_writeb(falcbase + F_REG(IMR1, ch),
+ cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_XMB);
+ }
+ }
+ if (gis & GIS_ISR3) {
+ isr3 = cpc_readb(falcbase + F_REG(FISR3, ch));
+ if (isr3 & FISR3_SEC) {
+ pfalc->sec++;
+ falc_update_stats(card, ch);
+ falc_check_status(card, ch,
+ cpc_readb(falcbase + F_REG(FRS0, ch)));
+ }
+ if (isr3 & FISR3_ES) {
+ pfalc->es++;
+ }
+ }
+ }
+}
+
+static void falc_intr(pc300_t * card)
+{
+ int ch;
+
+ for (ch = 0; ch < card->hw.nchan; ch++) {
+ pc300ch_t *chan = &card->chan[ch];
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+
+ if (conf->media == IF_IFACE_T1) {
+ falc_t1_intr(card, ch);
+ } else {
+ falc_e1_intr(card, ch);
+ }
+ }
+}
+
+static irqreturn_t cpc_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ pc300_t *card;
+ volatile ucchar plx_status;
+
+ if ((card = (pc300_t *) dev_id) == 0) {
+#ifdef PC300_DEBUG_INTR
+ printk("cpc_intr: spurious intr %d\n", irq);
+#endif
+ return IRQ_NONE; /* spurious intr */
+ }
+
+ if (card->hw.rambase == 0) {
+#ifdef PC300_DEBUG_INTR
+ printk("cpc_intr: spurious intr2 %d\n", irq);
+#endif
+ return IRQ_NONE; /* spurious intr */
+ }
+
+ switch (card->hw.type) {
+ case PC300_RSV:
+ case PC300_X21:
+ sca_intr(card);
+ break;
+
+ case PC300_TE:
+ while ( (plx_status = (cpc_readb(card->hw.plxbase + card->hw.intctl_reg) &
+ (PLX_9050_LINT1_STATUS | PLX_9050_LINT2_STATUS))) != 0) {
+ if (plx_status & PLX_9050_LINT1_STATUS) { /* SCA Interrupt */
+ sca_intr(card);
+ }
+ if (plx_status & PLX_9050_LINT2_STATUS) { /* FALC Interrupt */
+ falc_intr(card);
+ }
+ }
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+void cpc_sca_status(pc300_t * card, int ch)
+{
+ ucchar ilar;
+ void __iomem *scabase = card->hw.scabase;
+ unsigned long flags;
+
+ tx_dma_buf_check(card, ch);
+ rx_dma_buf_check(card, ch);
+ ilar = cpc_readb(scabase + ILAR);
+ printk ("ILAR=0x%02x, WCRL=0x%02x, PCR=0x%02x, BTCR=0x%02x, BOLR=0x%02x\n",
+ ilar, cpc_readb(scabase + WCRL), cpc_readb(scabase + PCR),
+ cpc_readb(scabase + BTCR), cpc_readb(scabase + BOLR));
+ printk("TX_CDA=0x%08x, TX_EDA=0x%08x\n",
+ cpc_readl(scabase + DTX_REG(CDAL, ch)),
+ cpc_readl(scabase + DTX_REG(EDAL, ch)));
+ printk("RX_CDA=0x%08x, RX_EDA=0x%08x, BFL=0x%04x\n",
+ cpc_readl(scabase + DRX_REG(CDAL, ch)),
+ cpc_readl(scabase + DRX_REG(EDAL, ch)),
+ cpc_readw(scabase + DRX_REG(BFLL, ch)));
+ printk("DMER=0x%02x, DSR_TX=0x%02x, DSR_RX=0x%02x\n",
+ cpc_readb(scabase + DMER), cpc_readb(scabase + DSR_TX(ch)),
+ cpc_readb(scabase + DSR_RX(ch)));
+ printk("DMR_TX=0x%02x, DMR_RX=0x%02x, DIR_TX=0x%02x, DIR_RX=0x%02x\n",
+ cpc_readb(scabase + DMR_TX(ch)), cpc_readb(scabase + DMR_RX(ch)),
+ cpc_readb(scabase + DIR_TX(ch)),
+ cpc_readb(scabase + DIR_RX(ch)));
+ printk("DCR_TX=0x%02x, DCR_RX=0x%02x, FCT_TX=0x%02x, FCT_RX=0x%02x\n",
+ cpc_readb(scabase + DCR_TX(ch)), cpc_readb(scabase + DCR_RX(ch)),
+ cpc_readb(scabase + FCT_TX(ch)),
+ cpc_readb(scabase + FCT_RX(ch)));
+ printk("MD0=0x%02x, MD1=0x%02x, MD2=0x%02x, MD3=0x%02x, IDL=0x%02x\n",
+ cpc_readb(scabase + M_REG(MD0, ch)),
+ cpc_readb(scabase + M_REG(MD1, ch)),
+ cpc_readb(scabase + M_REG(MD2, ch)),
+ cpc_readb(scabase + M_REG(MD3, ch)),
+ cpc_readb(scabase + M_REG(IDL, ch)));
+ printk("CMD=0x%02x, SA0=0x%02x, SA1=0x%02x, TFN=0x%02x, CTL=0x%02x\n",
+ cpc_readb(scabase + M_REG(CMD, ch)),
+ cpc_readb(scabase + M_REG(SA0, ch)),
+ cpc_readb(scabase + M_REG(SA1, ch)),
+ cpc_readb(scabase + M_REG(TFN, ch)),
+ cpc_readb(scabase + M_REG(CTL, ch)));
+ printk("ST0=0x%02x, ST1=0x%02x, ST2=0x%02x, ST3=0x%02x, ST4=0x%02x\n",
+ cpc_readb(scabase + M_REG(ST0, ch)),
+ cpc_readb(scabase + M_REG(ST1, ch)),
+ cpc_readb(scabase + M_REG(ST2, ch)),
+ cpc_readb(scabase + M_REG(ST3, ch)),
+ cpc_readb(scabase + M_REG(ST4, ch)));
+ printk ("CST0=0x%02x, CST1=0x%02x, CST2=0x%02x, CST3=0x%02x, FST=0x%02x\n",
+ cpc_readb(scabase + M_REG(CST0, ch)),
+ cpc_readb(scabase + M_REG(CST1, ch)),
+ cpc_readb(scabase + M_REG(CST2, ch)),
+ cpc_readb(scabase + M_REG(CST3, ch)),
+ cpc_readb(scabase + M_REG(FST, ch)));
+ printk("TRC0=0x%02x, TRC1=0x%02x, RRC=0x%02x, TBN=0x%02x, RBN=0x%02x\n",
+ cpc_readb(scabase + M_REG(TRC0, ch)),
+ cpc_readb(scabase + M_REG(TRC1, ch)),
+ cpc_readb(scabase + M_REG(RRC, ch)),
+ cpc_readb(scabase + M_REG(TBN, ch)),
+ cpc_readb(scabase + M_REG(RBN, ch)));
+ printk("TFS=0x%02x, TNR0=0x%02x, TNR1=0x%02x, RNR=0x%02x\n",
+ cpc_readb(scabase + M_REG(TFS, ch)),
+ cpc_readb(scabase + M_REG(TNR0, ch)),
+ cpc_readb(scabase + M_REG(TNR1, ch)),
+ cpc_readb(scabase + M_REG(RNR, ch)));
+ printk("TCR=0x%02x, RCR=0x%02x, TNR1=0x%02x, RNR=0x%02x\n",
+ cpc_readb(scabase + M_REG(TCR, ch)),
+ cpc_readb(scabase + M_REG(RCR, ch)),
+ cpc_readb(scabase + M_REG(TNR1, ch)),
+ cpc_readb(scabase + M_REG(RNR, ch)));
+ printk("TXS=0x%02x, RXS=0x%02x, EXS=0x%02x, TMCT=0x%02x, TMCR=0x%02x\n",
+ cpc_readb(scabase + M_REG(TXS, ch)),
+ cpc_readb(scabase + M_REG(RXS, ch)),
+ cpc_readb(scabase + M_REG(EXS, ch)),
+ cpc_readb(scabase + M_REG(TMCT, ch)),
+ cpc_readb(scabase + M_REG(TMCR, ch)));
+ printk("IE0=0x%02x, IE1=0x%02x, IE2=0x%02x, IE4=0x%02x, FIE=0x%02x\n",
+ cpc_readb(scabase + M_REG(IE0, ch)),
+ cpc_readb(scabase + M_REG(IE1, ch)),
+ cpc_readb(scabase + M_REG(IE2, ch)),
+ cpc_readb(scabase + M_REG(IE4, ch)),
+ cpc_readb(scabase + M_REG(FIE, ch)));
+ printk("IER0=0x%08x\n", cpc_readl(scabase + IER0));
+
+ if (ilar != 0) {
+ CPC_LOCK(card, flags);
+ cpc_writeb(scabase + ILAR, ilar);
+ cpc_writeb(scabase + DMER, 0x80);
+ CPC_UNLOCK(card, flags);
+ }
+}
+
+void cpc_falc_status(pc300_t * card, int ch)
+{
+ pc300ch_t *chan = &card->chan[ch];
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ unsigned long flags;
+
+ CPC_LOCK(card, flags);
+ printk("CH%d: %s %s %d channels\n",
+ ch, (pfalc->sync ? "SYNC" : ""), (pfalc->active ? "ACTIVE" : ""),
+ pfalc->num_channels);
+
+ printk(" pden=%d, los=%d, losr=%d, lfa=%d, farec=%d\n",
+ pfalc->pden, pfalc->los, pfalc->losr, pfalc->lfa, pfalc->farec);
+ printk(" lmfa=%d, ais=%d, sec=%d, es=%d, rai=%d\n",
+ pfalc->lmfa, pfalc->ais, pfalc->sec, pfalc->es, pfalc->rai);
+ printk(" bec=%d, fec=%d, cvc=%d, cec=%d, ebc=%d\n",
+ pfalc->bec, pfalc->fec, pfalc->cvc, pfalc->cec, pfalc->ebc);
+
+ printk("\n");
+ printk(" STATUS: %s %s %s %s %s %s\n",
+ (pfalc->red_alarm ? "RED" : ""),
+ (pfalc->blue_alarm ? "BLU" : ""),
+ (pfalc->yellow_alarm ? "YEL" : ""),
+ (pfalc->loss_fa ? "LFA" : ""),
+ (pfalc->loss_mfa ? "LMF" : ""), (pfalc->prbs ? "PRB" : ""));
+ CPC_UNLOCK(card, flags);
+}
+
+int cpc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ pc300conf_t conf_aux;
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ int ch = chan->channel;
+ void __user *arg = ifr->ifr_data;
+ struct if_settings *settings = &ifr->ifr_settings;
+ void __iomem *scabase = card->hw.scabase;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case SIOCGPC300CONF:
+#ifdef CONFIG_PC300_MLPPP
+ if (conf->proto != PC300_PROTO_MLPPP) {
+ conf->proto = hdlc->proto.id;
+ }
+#else
+ conf->proto = hdlc->proto.id;
+#endif
+ memcpy(&conf_aux.conf, conf, sizeof(pc300chconf_t));
+ memcpy(&conf_aux.hw, &card->hw, sizeof(pc300hw_t));
+ if (!arg ||
+ copy_to_user(arg, &conf_aux, sizeof(pc300conf_t)))
+ return -EINVAL;
+ return 0;
+ case SIOCSPC300CONF:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (!arg ||
+ copy_from_user(&conf_aux.conf, arg, sizeof(pc300chconf_t)))
+ return -EINVAL;
+ if (card->hw.cpld_id < 0x02 &&
+ conf_aux.conf.fr_mode == PC300_FR_UNFRAMED) {
+ /* CPLD_ID < 0x02 doesn't support Unframed E1 */
+ return -EINVAL;
+ }
+#ifdef CONFIG_PC300_MLPPP
+ if (conf_aux.conf.proto == PC300_PROTO_MLPPP) {
+ if (conf->proto != PC300_PROTO_MLPPP) {
+ memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
+ cpc_tty_init(d); /* init TTY driver */
+ }
+ } else {
+ if (conf_aux.conf.proto == 0xffff) {
+ if (conf->proto == PC300_PROTO_MLPPP){
+ /* ifdown interface */
+ cpc_close(dev);
+ }
+ } else {
+ memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
+ hdlc->proto.id = conf->proto;
+ }
+ }
+#else
+ memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
+ hdlc->proto.id = conf->proto;
+#endif
+ return 0;
+ case SIOCGPC300STATUS:
+ cpc_sca_status(card, ch);
+ return 0;
+ case SIOCGPC300FALCSTATUS:
+ cpc_falc_status(card, ch);
+ return 0;
+
+ case SIOCGPC300UTILSTATS:
+ {
+ if (!arg) { /* clear statistics */
+ memset(&hdlc->stats, 0, sizeof(struct net_device_stats));
+ if (card->hw.type == PC300_TE) {
+ memset(&chan->falc, 0, sizeof(falc_t));
+ }
+ } else {
+ pc300stats_t pc300stats;
+
+ memset(&pc300stats, 0, sizeof(pc300stats_t));
+ pc300stats.hw_type = card->hw.type;
+ pc300stats.line_on = card->chan[ch].d.line_on;
+ pc300stats.line_off = card->chan[ch].d.line_off;
+ memcpy(&pc300stats.gen_stats, &hdlc->stats,
+ sizeof(struct net_device_stats));
+ if (card->hw.type == PC300_TE)
+ memcpy(&pc300stats.te_stats,&chan->falc,sizeof(falc_t));
+ if (copy_to_user(arg, &pc300stats, sizeof(pc300stats_t)))
+ return -EFAULT;
+ }
+ return 0;
+ }
+
+ case SIOCGPC300UTILSTATUS:
+ {
+ struct pc300status pc300status;
+
+ pc300status.hw_type = card->hw.type;
+ if (card->hw.type == PC300_TE) {
+ pc300status.te_status.sync = chan->falc.sync;
+ pc300status.te_status.red_alarm = chan->falc.red_alarm;
+ pc300status.te_status.blue_alarm = chan->falc.blue_alarm;
+ pc300status.te_status.loss_fa = chan->falc.loss_fa;
+ pc300status.te_status.yellow_alarm =chan->falc.yellow_alarm;
+ pc300status.te_status.loss_mfa = chan->falc.loss_mfa;
+ pc300status.te_status.prbs = chan->falc.prbs;
+ } else {
+ pc300status.gen_status.dcd =
+ !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_DCD);
+ pc300status.gen_status.cts =
+ !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_CTS);
+ pc300status.gen_status.rts =
+ !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_RTS);
+ pc300status.gen_status.dtr =
+ !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_DTR);
+ /* There is no DSR in HD64572 */
+ }
+ if (!arg
+ || copy_to_user(arg, &pc300status, sizeof(pc300status_t)))
+ return -EINVAL;
+ return 0;
+ }
+
+ case SIOCSPC300TRACE:
+ /* Sets/resets a trace_flag for the respective device */
+ if (!arg || copy_from_user(&d->trace_on, arg,sizeof(unsigned char)))
+ return -EINVAL;
+ return 0;
+
+ case SIOCSPC300LOOPBACK:
+ {
+ struct pc300loopback pc300loop;
+
+ /* TE boards only */
+ if (card->hw.type != PC300_TE)
+ return -EINVAL;
+
+ if (!arg ||
+ copy_from_user(&pc300loop, arg, sizeof(pc300loopback_t)))
+ return -EINVAL;
+ switch (pc300loop.loop_type) {
+ case PC300LOCLOOP: /* Turn the local loop on/off */
+ falc_local_loop(card, ch, pc300loop.loop_on);
+ return 0;
+
+ case PC300REMLOOP: /* Turn the remote loop on/off */
+ falc_remote_loop(card, ch, pc300loop.loop_on);
+ return 0;
+
+ case PC300PAYLOADLOOP: /* Turn the payload loop on/off */
+ falc_payload_loop(card, ch, pc300loop.loop_on);
+ return 0;
+
+ case PC300GENLOOPUP: /* Generate loop UP */
+ if (pc300loop.loop_on) {
+ falc_generate_loop_up_code (card, ch);
+ } else {
+ turn_off_xlu(card, ch);
+ }
+ return 0;
+
+ case PC300GENLOOPDOWN: /* Generate loop DOWN */
+ if (pc300loop.loop_on) {
+ falc_generate_loop_down_code (card, ch);
+ } else {
+ turn_off_xld(card, ch);
+ }
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ case SIOCSPC300PATTERNTEST:
+ /* Turn the pattern test on/off and show the errors counter */
+ {
+ struct pc300patterntst pc300patrntst;
+
+ /* TE boards only */
+ if (card->hw.type != PC300_TE)
+ return -EINVAL;
+
+ if (card->hw.cpld_id < 0x02) {
+ /* CPLD_ID < 0x02 doesn't support pattern test */
+ return -EINVAL;
+ }
+
+ if (!arg ||
+ copy_from_user(&pc300patrntst,arg,sizeof(pc300patterntst_t)))
+ return -EINVAL;
+ if (pc300patrntst.patrntst_on == 2) {
+ if (chan->falc.prbs == 0) {
+ falc_pattern_test(card, ch, 1);
+ }
+ pc300patrntst.num_errors =
+ falc_pattern_test_error(card, ch);
+ if (!arg
+ || copy_to_user(arg, &pc300patrntst,
+ sizeof (pc300patterntst_t)))
+ return -EINVAL;
+ } else {
+ falc_pattern_test(card, ch, pc300patrntst.patrntst_on);
+ }
+ return 0;
+ }
+
+ case SIOCWANDEV:
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ {
+ const size_t size = sizeof(sync_serial_settings);
+ ifr->ifr_settings.type = conf->media;
+ if (ifr->ifr_settings.size < size) {
+ /* data size wanted */
+ ifr->ifr_settings.size = size;
+ return -ENOBUFS;
+ }
+
+ if (copy_to_user(settings->ifs_ifsu.sync,
+ &conf->phys_settings, size)) {
+ return -EFAULT;
+ }
+ return 0;
+ }
+
+ case IF_IFACE_V35:
+ case IF_IFACE_V24:
+ case IF_IFACE_X21:
+ {
+ const size_t size = sizeof(sync_serial_settings);
+
+ if (!capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+ /* incorrect data len? */
+ if (ifr->ifr_settings.size != size) {
+ return -ENOBUFS;
+ }
+
+ if (copy_from_user(&conf->phys_settings,
+ settings->ifs_ifsu.sync, size)) {
+ return -EFAULT;
+ }
+
+ if (conf->phys_settings.loopback) {
+ cpc_writeb(card->hw.scabase + M_REG(MD2, ch),
+ cpc_readb(card->hw.scabase + M_REG(MD2, ch)) |
+ MD2_LOOP_MIR);
+ }
+ conf->media = ifr->ifr_settings.type;
+ return 0;
+ }
+
+ case IF_IFACE_T1:
+ case IF_IFACE_E1:
+ {
+ const size_t te_size = sizeof(te1_settings);
+ const size_t size = sizeof(sync_serial_settings);
+
+ if (!capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+
+ /* incorrect data len? */
+ if (ifr->ifr_settings.size != te_size) {
+ return -ENOBUFS;
+ }
+
+ if (copy_from_user(&conf->phys_settings,
+ settings->ifs_ifsu.te1, size)) {
+ return -EFAULT;
+ }/* Ignoring HDLC slot_map for a while */
+
+ if (conf->phys_settings.loopback) {
+ cpc_writeb(card->hw.scabase + M_REG(MD2, ch),
+ cpc_readb(card->hw.scabase + M_REG(MD2, ch)) |
+ MD2_LOOP_MIR);
+ }
+ conf->media = ifr->ifr_settings.type;
+ return 0;
+ }
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+static struct net_device_stats *cpc_get_stats(struct net_device *dev)
+{
+ return hdlc_stats(dev);
+}
+
+static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
+{
+ int br, tc;
+ int br_pwr, error;
+
+ if (rate == 0)
+ return (0);
+
+ for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) {
+ if ((tc = clock / br_pwr / rate) <= 0xff) {
+ *br_io = br;
+ break;
+ }
+ }
+
+ if (tc <= 0xff) {
+ error = ((rate - (clock / br_pwr / rate)) / rate) * 1000;
+ /* Errors bigger than +/- 1% won't be tolerated */
+ if (error < -10 || error > 10)
+ return (-1);
+ else
+ return (tc);
+ } else {
+ return (-1);
+ }
+}
+
+int ch_config(pc300dev_t * d)
+{
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+ pc300_t *card = (pc300_t *) chan->card;
+ void __iomem *scabase = card->hw.scabase;
+ void __iomem *plxbase = card->hw.plxbase;
+ int ch = chan->channel;
+ uclong clkrate = chan->conf.phys_settings.clock_rate;
+ uclong clktype = chan->conf.phys_settings.clock_type;
+ ucshort encoding = chan->conf.proto_settings.encoding;
+ ucshort parity = chan->conf.proto_settings.parity;
+ int tmc, br;
+ ucchar md0, md2;
+
+ /* Reset the channel */
+ cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST);
+
+ /* Configure the SCA registers */
+ switch (parity) {
+ case PARITY_NONE:
+ md0 = MD0_BIT_SYNC;
+ break;
+ case PARITY_CRC16_PR0:
+ md0 = MD0_CRC16_0|MD0_CRCC0|MD0_BIT_SYNC;
+ break;
+ case PARITY_CRC16_PR1:
+ md0 = MD0_CRC16_1|MD0_CRCC0|MD0_BIT_SYNC;
+ break;
+ case PARITY_CRC32_PR1_CCITT:
+ md0 = MD0_CRC32|MD0_CRCC0|MD0_BIT_SYNC;
+ break;
+ case PARITY_CRC16_PR1_CCITT:
+ default:
+ md0 = MD0_CRC_CCITT|MD0_CRCC0|MD0_BIT_SYNC;
+ break;
+ }
+ switch (encoding) {
+ case ENCODING_NRZI:
+ md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZI;
+ break;
+ case ENCODING_FM_MARK: /* FM1 */
+ md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM1;
+ break;
+ case ENCODING_FM_SPACE: /* FM0 */
+ md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM0;
+ break;
+ case ENCODING_MANCHESTER: /* It's not working... */
+ md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_MANCH;
+ break;
+ case ENCODING_NRZ:
+ default:
+ md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZ;
+ break;
+ }
+ cpc_writeb(scabase + M_REG(MD0, ch), md0);
+ cpc_writeb(scabase + M_REG(MD1, ch), 0);
+ cpc_writeb(scabase + M_REG(MD2, ch), md2);
+ cpc_writeb(scabase + M_REG(IDL, ch), 0x7e);
+ cpc_writeb(scabase + M_REG(CTL, ch), CTL_URSKP | CTL_IDLC);
+
+ /* Configure HW media */
+ switch (card->hw.type) {
+ case PC300_RSV:
+ if (conf->media == IF_IFACE_V35) {
+ cpc_writel((plxbase + card->hw.gpioc_reg),
+ cpc_readl(plxbase + card->hw.gpioc_reg) | PC300_CHMEDIA_MASK(ch));
+ } else {
+ cpc_writel((plxbase + card->hw.gpioc_reg),
+ cpc_readl(plxbase + card->hw.gpioc_reg) & ~PC300_CHMEDIA_MASK(ch));
+ }
+ break;
+
+ case PC300_X21:
+ break;
+
+ case PC300_TE:
+ te_config(card, ch);
+ break;
+ }
+
+ switch (card->hw.type) {
+ case PC300_RSV:
+ case PC300_X21:
+ if (clktype == CLOCK_INT || clktype == CLOCK_TXINT) {
+ /* Calculate the clkrate parameters */
+ tmc = clock_rate_calc(clkrate, card->hw.clock, &br);
+ cpc_writeb(scabase + M_REG(TMCT, ch), tmc);
+ cpc_writeb(scabase + M_REG(TXS, ch),
+ (TXS_DTRXC | TXS_IBRG | br));
+ if (clktype == CLOCK_INT) {
+ cpc_writeb(scabase + M_REG(TMCR, ch), tmc);
+ cpc_writeb(scabase + M_REG(RXS, ch),
+ (RXS_IBRG | br));
+ } else {
+ cpc_writeb(scabase + M_REG(TMCR, ch), 1);
+ cpc_writeb(scabase + M_REG(RXS, ch), 0);
+ }
+ if (card->hw.type == PC300_X21) {
+ cpc_writeb(scabase + M_REG(GPO, ch), 1);
+ cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1);
+ } else {
+ cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1);
+ }
+ } else {
+ cpc_writeb(scabase + M_REG(TMCT, ch), 1);
+ if (clktype == CLOCK_EXT) {
+ cpc_writeb(scabase + M_REG(TXS, ch),
+ TXS_DTRXC);
+ } else {
+ cpc_writeb(scabase + M_REG(TXS, ch),
+ TXS_DTRXC|TXS_RCLK);
+ }
+ cpc_writeb(scabase + M_REG(TMCR, ch), 1);
+ cpc_writeb(scabase + M_REG(RXS, ch), 0);
+ if (card->hw.type == PC300_X21) {
+ cpc_writeb(scabase + M_REG(GPO, ch), 0);
+ cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1);
+ } else {
+ cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1);
+ }
+ }
+ break;
+
+ case PC300_TE:
+ /* SCA always receives clock from the FALC chip */
+ cpc_writeb(scabase + M_REG(TMCT, ch), 1);
+ cpc_writeb(scabase + M_REG(TXS, ch), 0);
+ cpc_writeb(scabase + M_REG(TMCR, ch), 1);
+ cpc_writeb(scabase + M_REG(RXS, ch), 0);
+ cpc_writeb(scabase + M_REG(EXS, ch), 0);
+ break;
+ }
+
+ /* Enable Interrupts */
+ cpc_writel(scabase + IER0,
+ cpc_readl(scabase + IER0) |
+ IR0_M(IR0_RXINTA, ch) |
+ IR0_DRX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch) |
+ IR0_DTX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch));
+ cpc_writeb(scabase + M_REG(IE0, ch),
+ cpc_readl(scabase + M_REG(IE0, ch)) | IE0_RXINTA);
+ cpc_writeb(scabase + M_REG(IE1, ch),
+ cpc_readl(scabase + M_REG(IE1, ch)) | IE1_CDCD);
+
+ return 0;
+}
+
+int rx_config(pc300dev_t * d)
+{
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ void __iomem *scabase = card->hw.scabase;
+ int ch = chan->channel;
+
+ cpc_writeb(scabase + DSR_RX(ch), 0);
+
+ /* General RX settings */
+ cpc_writeb(scabase + M_REG(RRC, ch), 0);
+ cpc_writeb(scabase + M_REG(RNR, ch), 16);
+
+ /* Enable reception */
+ cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_CRC_INIT);
+ cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_ENA);
+
+ /* Initialize DMA stuff */
+ chan->rx_first_bd = 0;
+ chan->rx_last_bd = N_DMA_RX_BUF - 1;
+ rx_dma_buf_init(card, ch);
+ cpc_writeb(scabase + DCR_RX(ch), DCR_FCT_CLR);
+ cpc_writeb(scabase + DMR_RX(ch), (DMR_TMOD | DMR_NF));
+ cpc_writeb(scabase + DIR_RX(ch), (DIR_EOM | DIR_BOF));
+
+ /* Start DMA */
+ rx_dma_start(card, ch);
+
+ return 0;
+}
+
+int tx_config(pc300dev_t * d)
+{
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ void __iomem *scabase = card->hw.scabase;
+ int ch = chan->channel;
+
+ cpc_writeb(scabase + DSR_TX(ch), 0);
+
+ /* General TX settings */
+ cpc_writeb(scabase + M_REG(TRC0, ch), 0);
+ cpc_writeb(scabase + M_REG(TFS, ch), 32);
+ cpc_writeb(scabase + M_REG(TNR0, ch), 20);
+ cpc_writeb(scabase + M_REG(TNR1, ch), 48);
+ cpc_writeb(scabase + M_REG(TCR, ch), 8);
+
+ /* Enable transmission */
+ cpc_writeb(scabase + M_REG(CMD, ch), CMD_TX_CRC_INIT);
+
+ /* Initialize DMA stuff */
+ chan->tx_first_bd = 0;
+ chan->tx_next_bd = 0;
+ tx_dma_buf_init(card, ch);
+ cpc_writeb(scabase + DCR_TX(ch), DCR_FCT_CLR);
+ cpc_writeb(scabase + DMR_TX(ch), (DMR_TMOD | DMR_NF));
+ cpc_writeb(scabase + DIR_TX(ch), (DIR_EOM | DIR_BOF | DIR_UDRF));
+ cpc_writel(scabase + DTX_REG(CDAL, ch), TX_BD_ADDR(ch, chan->tx_first_bd));
+ cpc_writel(scabase + DTX_REG(EDAL, ch), TX_BD_ADDR(ch, chan->tx_next_bd));
+
+ return 0;
+}
+
+static int cpc_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ pc300dev_t *d = (pc300dev_t *)dev->priv;
+ pc300ch_t *chan = (pc300ch_t *)d->chan;
+ pc300_t *card = (pc300_t *)chan->card;
+ pc300chconf_t *conf = (pc300chconf_t *)&chan->conf;
+
+ if (card->hw.type == PC300_TE) {
+ if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) {
+ return -EINVAL;
+ }
+ } else {
+ if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI &&
+ encoding != ENCODING_FM_MARK && encoding != ENCODING_FM_SPACE) {
+ /* Driver doesn't support ENCODING_MANCHESTER yet */
+ return -EINVAL;
+ }
+ }
+
+ if (parity != PARITY_NONE && parity != PARITY_CRC16_PR0 &&
+ parity != PARITY_CRC16_PR1 && parity != PARITY_CRC32_PR1_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT) {
+ return -EINVAL;
+ }
+
+ conf->proto_settings.encoding = encoding;
+ conf->proto_settings.parity = parity;
+ return 0;
+}
+
+void cpc_opench(pc300dev_t * d)
+{
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ int ch = chan->channel;
+ void __iomem *scabase = card->hw.scabase;
+
+ ch_config(d);
+
+ rx_config(d);
+
+ tx_config(d);
+
+ /* Assert RTS and DTR */
+ cpc_writeb(scabase + M_REG(CTL, ch),
+ cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR));
+}
+
+void cpc_closech(pc300dev_t * d)
+{
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ falc_t *pfalc = (falc_t *) & chan->falc;
+ int ch = chan->channel;
+
+ cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_CH_RST);
+ rx_dma_stop(card, ch);
+ tx_dma_stop(card, ch);
+
+ if (card->hw.type == PC300_TE) {
+ memset(pfalc, 0, sizeof(falc_t));
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
+ ~((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK |
+ CPLD_REG2_FALC_LED2) << (2 * ch)));
+ /* Reset the FALC chip */
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+ (CPLD_REG1_FALC_RESET << (2 * ch)));
+ udelay(10000);
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
+ ~(CPLD_REG1_FALC_RESET << (2 * ch)));
+ }
+}
+
+int cpc_open(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pc300dev_t *d = (pc300dev_t *) dev->priv;
+ struct ifreq ifr;
+ int result;
+
+#ifdef PC300_DEBUG_OTHER
+ printk("pc300: cpc_open");
+#endif
+
+ if (hdlc->proto.id == IF_PROTO_PPP) {
+ d->if_ptr = &hdlc->state.ppp.pppdev;
+ }
+
+ result = hdlc_open(dev);
+ if (hdlc->proto.id == IF_PROTO_PPP) {
+ dev->priv = d;
+ }
+ if (result) {
+ return result;
+ }
+
+ sprintf(ifr.ifr_name, "%s", dev->name);
+ cpc_opench(d);
+ netif_start_queue(dev);
+ return 0;
+}
+
+int cpc_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pc300dev_t *d = (pc300dev_t *) dev->priv;
+ pc300ch_t *chan = (pc300ch_t *) d->chan;
+ pc300_t *card = (pc300_t *) chan->card;
+ unsigned long flags;
+
+#ifdef PC300_DEBUG_OTHER
+ printk("pc300: cpc_close");
+#endif
+
+ netif_stop_queue(dev);
+
+ CPC_LOCK(card, flags);
+ cpc_closech(d);
+ CPC_UNLOCK(card, flags);
+
+ hdlc_close(dev);
+ if (hdlc->proto.id == IF_PROTO_PPP) {
+ d->if_ptr = NULL;
+ }
+#ifdef CONFIG_PC300_MLPPP
+ if (chan->conf.proto == PC300_PROTO_MLPPP) {
+ cpc_tty_unregister_service(d);
+ chan->conf.proto = 0xffff;
+ }
+#endif
+
+ return 0;
+}
+
+static uclong detect_ram(pc300_t * card)
+{
+ uclong i;
+ ucchar data;
+ void __iomem *rambase = card->hw.rambase;
+
+ card->hw.ramsize = PC300_RAMSIZE;
+ /* Let's find out how much RAM is present on this board */
+ for (i = 0; i < card->hw.ramsize; i++) {
+ data = (ucchar) (i & 0xff);
+ cpc_writeb(rambase + i, data);
+ if (cpc_readb(rambase + i) != data) {
+ break;
+ }
+ }
+ return (i);
+}
+
+static void plx_init(pc300_t * card)
+{
+ struct RUNTIME_9050 __iomem *plx_ctl = card->hw.plxbase;
+
+ /* Reset PLX */
+ cpc_writel(&plx_ctl->init_ctrl,
+ cpc_readl(&plx_ctl->init_ctrl) | 0x40000000);
+ udelay(10000L);
+ cpc_writel(&plx_ctl->init_ctrl,
+ cpc_readl(&plx_ctl->init_ctrl) & ~0x40000000);
+
+ /* Reload Config. Registers from EEPROM */
+ cpc_writel(&plx_ctl->init_ctrl,
+ cpc_readl(&plx_ctl->init_ctrl) | 0x20000000);
+ udelay(10000L);
+ cpc_writel(&plx_ctl->init_ctrl,
+ cpc_readl(&plx_ctl->init_ctrl) & ~0x20000000);
+
+}
+
+static inline void show_version(void)
+{
+ char *rcsvers, *rcsdate, *tmp;
+
+ rcsvers = strchr(rcsid, ' ');
+ rcsvers++;
+ tmp = strchr(rcsvers, ' ');
+ *tmp++ = '\0';
+ rcsdate = strchr(tmp, ' ');
+ rcsdate++;
+ tmp = strrchr(rcsdate, ' ');
+ *tmp = '\0';
+ printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n",
+ rcsvers, rcsdate, __DATE__, __TIME__);
+} /* show_version */
+
+static void cpc_init_card(pc300_t * card)
+{
+ int i, devcount = 0;
+ static int board_nbr = 1;
+
+ /* Enable interrupts on the PCI bridge */
+ plx_init(card);
+ cpc_writew(card->hw.plxbase + card->hw.intctl_reg,
+ cpc_readw(card->hw.plxbase + card->hw.intctl_reg) | 0x0040);
+
+#ifdef USE_PCI_CLOCK
+ /* Set board clock to PCI clock */
+ cpc_writel(card->hw.plxbase + card->hw.gpioc_reg,
+ cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) | 0x00000004UL);
+ card->hw.clock = PC300_PCI_CLOCK;
+#else
+ /* Set board clock to internal oscillator clock */
+ cpc_writel(card->hw.plxbase + card->hw.gpioc_reg,
+ cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & ~0x00000004UL);
+ card->hw.clock = PC300_OSC_CLOCK;
+#endif
+
+ /* Detect actual on-board RAM size */
+ card->hw.ramsize = detect_ram(card);
+
+ /* Set Global SCA-II registers */
+ cpc_writeb(card->hw.scabase + PCR, PCR_PR2);
+ cpc_writeb(card->hw.scabase + BTCR, 0x10);
+ cpc_writeb(card->hw.scabase + WCRL, 0);
+ cpc_writeb(card->hw.scabase + DMER, 0x80);
+
+ if (card->hw.type == PC300_TE) {
+ ucchar reg1;
+
+ /* Check CPLD version */
+ reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1);
+ cpc_writeb(card->hw.falcbase + CPLD_REG1, (reg1 + 0x5a));
+ if (cpc_readb(card->hw.falcbase + CPLD_REG1) == reg1) {
+ /* New CPLD */
+ card->hw.cpld_id = cpc_readb(card->hw.falcbase + CPLD_ID_REG);
+ card->hw.cpld_reg1 = CPLD_V2_REG1;
+ card->hw.cpld_reg2 = CPLD_V2_REG2;
+ } else {
+ /* old CPLD */
+ card->hw.cpld_id = 0;
+ card->hw.cpld_reg1 = CPLD_REG1;
+ card->hw.cpld_reg2 = CPLD_REG2;
+ cpc_writeb(card->hw.falcbase + CPLD_REG1, reg1);
+ }
+
+ /* Enable the board's global clock */
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+ CPLD_REG1_GLOBAL_CLK);
+
+ }
+
+ for (i = 0; i < card->hw.nchan; i++) {
+ pc300ch_t *chan = &card->chan[i];
+ pc300dev_t *d = &chan->d;
+ hdlc_device *hdlc;
+ struct net_device *dev;
+
+ chan->card = card;
+ chan->channel = i;
+ chan->conf.phys_settings.clock_rate = 0;
+ chan->conf.phys_settings.clock_type = CLOCK_EXT;
+ chan->conf.proto_settings.encoding = ENCODING_NRZ;
+ chan->conf.proto_settings.parity = PARITY_CRC16_PR1_CCITT;
+ switch (card->hw.type) {
+ case PC300_TE:
+ chan->conf.media = IF_IFACE_T1;
+ chan->conf.lcode = PC300_LC_B8ZS;
+ chan->conf.fr_mode = PC300_FR_ESF;
+ chan->conf.lbo = PC300_LBO_0_DB;
+ chan->conf.rx_sens = PC300_RX_SENS_SH;
+ chan->conf.tslot_bitmap = 0xffffffffUL;
+ break;
+
+ case PC300_X21:
+ chan->conf.media = IF_IFACE_X21;
+ break;
+
+ case PC300_RSV:
+ default:
+ chan->conf.media = IF_IFACE_V35;
+ break;
+ }
+ chan->conf.proto = IF_PROTO_PPP;
+ chan->tx_first_bd = 0;
+ chan->tx_next_bd = 0;
+ chan->rx_first_bd = 0;
+ chan->rx_last_bd = N_DMA_RX_BUF - 1;
+ chan->nfree_tx_bd = N_DMA_TX_BUF;
+
+ d->chan = chan;
+ d->tx_skb = NULL;
+ d->trace_on = 0;
+ d->line_on = 0;
+ d->line_off = 0;
+
+ dev = alloc_hdlcdev(NULL);
+ if (dev == NULL)
+ continue;
+
+ hdlc = dev_to_hdlc(dev);
+ hdlc->xmit = cpc_queue_xmit;
+ hdlc->attach = cpc_attach;
+ d->dev = dev;
+ dev->mem_start = card->hw.ramphys;
+ dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1;
+ dev->irq = card->hw.irq;
+ dev->init = NULL;
+ dev->tx_queue_len = PC300_TX_QUEUE_LEN;
+ dev->mtu = PC300_DEF_MTU;
+
+ dev->open = cpc_open;
+ dev->stop = cpc_close;
+ dev->tx_timeout = cpc_tx_timeout;
+ dev->watchdog_timeo = PC300_TX_TIMEOUT;
+ dev->get_stats = cpc_get_stats;
+ dev->set_multicast_list = NULL;
+ dev->set_mac_address = NULL;
+ dev->change_mtu = cpc_change_mtu;
+ dev->do_ioctl = cpc_ioctl;
+
+ if (register_hdlc_device(dev) == 0) {
+ dev->priv = d; /* We need 'priv', hdlc doesn't */
+ printk("%s: Cyclades-PC300/", dev->name);
+ switch (card->hw.type) {
+ case PC300_TE:
+ if (card->hw.bus == PC300_PMC) {
+ printk("TE-M");
+ } else {
+ printk("TE ");
+ }
+ break;
+
+ case PC300_X21:
+ printk("X21 ");
+ break;
+
+ case PC300_RSV:
+ default:
+ printk("RSV ");
+ break;
+ }
+ printk (" #%d, %dKB of RAM at 0x%08x, IRQ%d, channel %d.\n",
+ board_nbr, card->hw.ramsize / 1024,
+ card->hw.ramphys, card->hw.irq, i + 1);
+ devcount++;
+ } else {
+ printk ("Dev%d on card(0x%08x): unable to allocate i/f name.\n",
+ i + 1, card->hw.ramphys);
+ free_netdev(dev);
+ continue;
+ }
+ }
+ spin_lock_init(&card->card_lock);
+
+ board_nbr++;
+}
+
+static int __devinit
+cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int first_time = 1;
+ ucchar cpc_rev_id;
+ int err = 0, eeprom_outdated = 0;
+ ucshort device_id;
+ pc300_t *card;
+
+ if (first_time) {
+ first_time = 0;
+ show_version();
+#ifdef CONFIG_PC300_MLPPP
+ cpc_tty_reset_var();
+#endif
+ }
+
+ card = (pc300_t *) kmalloc(sizeof(pc300_t), GFP_KERNEL);
+ if (card == NULL) {
+ printk("PC300 found at RAM 0x%08lx, "
+ "but could not allocate card structure.\n",
+ pci_resource_start(pdev, 3));
+ return -ENOMEM;
+ }
+ memset(card, 0, sizeof(pc300_t));
+
+ /* read PCI configuration area */
+ device_id = ent->device;
+ card->hw.irq = pdev->irq;
+ card->hw.iophys = pci_resource_start(pdev, 1);
+ card->hw.iosize = pci_resource_len(pdev, 1);
+ card->hw.scaphys = pci_resource_start(pdev, 2);
+ card->hw.scasize = pci_resource_len(pdev, 2);
+ card->hw.ramphys = pci_resource_start(pdev, 3);
+ card->hw.alloc_ramsize = pci_resource_len(pdev, 3);
+ card->hw.falcphys = pci_resource_start(pdev, 4);
+ card->hw.falcsize = pci_resource_len(pdev, 4);
+ card->hw.plxphys = pci_resource_start(pdev, 5);
+ card->hw.plxsize = pci_resource_len(pdev, 5);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &cpc_rev_id);
+
+ switch (device_id) {
+ case PCI_DEVICE_ID_PC300_RX_1:
+ case PCI_DEVICE_ID_PC300_TE_1:
+ case PCI_DEVICE_ID_PC300_TE_M_1:
+ card->hw.nchan = 1;
+ break;
+
+ case PCI_DEVICE_ID_PC300_RX_2:
+ case PCI_DEVICE_ID_PC300_TE_2:
+ case PCI_DEVICE_ID_PC300_TE_M_2:
+ default:
+ card->hw.nchan = PC300_MAXCHAN;
+ break;
+ }
+#ifdef PC300_DEBUG_PCI
+ printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn);
+ printk("rev_id=%d) IRQ%d\n", cpc_rev_id, card->hw.irq);
+ printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx "
+ "ctladdr=0x%08lx falcaddr=0x%08lx\n",
+ card->hw.ramphys, card->hw.plxphys, card->hw.scaphys,
+ card->hw.falcphys);
+#endif
+ /* Although we don't use this I/O region, we should
+ * request it from the kernel anyway, to avoid problems
+ * with other drivers accessing it. */
+ if (!request_region(card->hw.iophys, card->hw.iosize, "PLX Registers")) {
+ /* In case we can't allocate it, warn user */
+ printk("WARNING: couldn't allocate I/O region for PC300 board "
+ "at 0x%08x!\n", card->hw.ramphys);
+ }
+
+ if (card->hw.plxphys) {
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, card->hw.plxphys);
+ } else {
+ eeprom_outdated = 1;
+ card->hw.plxphys = pci_resource_start(pdev, 0);
+ card->hw.plxsize = pci_resource_len(pdev, 0);
+ }
+
+ if (!request_mem_region(card->hw.plxphys, card->hw.plxsize,
+ "PLX Registers")) {
+ printk("PC300 found at RAM 0x%08x, "
+ "but could not allocate PLX mem region.\n",
+ card->hw.ramphys);
+ err = -ENODEV;
+ goto err_release_io;
+ }
+ if (!request_mem_region(card->hw.ramphys, card->hw.alloc_ramsize,
+ "On-board RAM")) {
+ printk("PC300 found at RAM 0x%08x, "
+ "but could not allocate RAM mem region.\n",
+ card->hw.ramphys);
+ err = -ENODEV;
+ goto err_release_plx;
+ }
+ if (!request_mem_region(card->hw.scaphys, card->hw.scasize,
+ "SCA-II Registers")) {
+ printk("PC300 found at RAM 0x%08x, "
+ "but could not allocate SCA mem region.\n",
+ card->hw.ramphys);
+ err = -ENODEV;
+ goto err_release_ram;
+ }
+
+ if ((err = pci_enable_device(pdev)) != 0)
+ goto err_release_sca;
+
+ card->hw.plxbase = ioremap(card->hw.plxphys, card->hw.plxsize);
+ card->hw.rambase = ioremap(card->hw.ramphys, card->hw.alloc_ramsize);
+ card->hw.scabase = ioremap(card->hw.scaphys, card->hw.scasize);
+ switch (device_id) {
+ case PCI_DEVICE_ID_PC300_TE_1:
+ case PCI_DEVICE_ID_PC300_TE_2:
+ case PCI_DEVICE_ID_PC300_TE_M_1:
+ case PCI_DEVICE_ID_PC300_TE_M_2:
+ request_mem_region(card->hw.falcphys, card->hw.falcsize,
+ "FALC Registers");
+ card->hw.falcbase = ioremap(card->hw.falcphys, card->hw.falcsize);
+ break;
+
+ case PCI_DEVICE_ID_PC300_RX_1:
+ case PCI_DEVICE_ID_PC300_RX_2:
+ default:
+ card->hw.falcbase = NULL;
+ break;
+ }
+
+#ifdef PC300_DEBUG_PCI
+ printk("cpc: relocate ramaddr=0x%08lx plxaddr=0x%08lx "
+ "ctladdr=0x%08lx falcaddr=0x%08lx\n",
+ card->hw.rambase, card->hw.plxbase, card->hw.scabase,
+ card->hw.falcbase);
+#endif
+
+ /* Set PCI drv pointer to the card structure */
+ pci_set_drvdata(pdev, card);
+
+ /* Set board type */
+ switch (device_id) {
+ case PCI_DEVICE_ID_PC300_TE_1:
+ case PCI_DEVICE_ID_PC300_TE_2:
+ case PCI_DEVICE_ID_PC300_TE_M_1:
+ case PCI_DEVICE_ID_PC300_TE_M_2:
+ card->hw.type = PC300_TE;
+
+ if ((device_id == PCI_DEVICE_ID_PC300_TE_M_1) ||
+ (device_id == PCI_DEVICE_ID_PC300_TE_M_2)) {
+ card->hw.bus = PC300_PMC;
+ /* Set PLX register offsets */
+ card->hw.gpioc_reg = 0x54;
+ card->hw.intctl_reg = 0x4c;
+ } else {
+ card->hw.bus = PC300_PCI;
+ /* Set PLX register offsets */
+ card->hw.gpioc_reg = 0x50;
+ card->hw.intctl_reg = 0x4c;
+ }
+ break;
+
+ case PCI_DEVICE_ID_PC300_RX_1:
+ case PCI_DEVICE_ID_PC300_RX_2:
+ default:
+ card->hw.bus = PC300_PCI;
+ /* Set PLX register offsets */
+ card->hw.gpioc_reg = 0x50;
+ card->hw.intctl_reg = 0x4c;
+
+ if ((cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & PC300_CTYPE_MASK)) {
+ card->hw.type = PC300_X21;
+ } else {
+ card->hw.type = PC300_RSV;
+ }
+ break;
+ }
+
+ /* Allocate IRQ */
+ if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ, "Cyclades-PC300", card)) {
+ printk ("PC300 found at RAM 0x%08x, but could not allocate IRQ%d.\n",
+ card->hw.ramphys, card->hw.irq);
+ goto err_io_unmap;
+ }
+
+ cpc_init_card(card);
+
+ if (eeprom_outdated)
+ printk("WARNING: PC300 with outdated EEPROM.\n");
+ return 0;
+
+err_io_unmap:
+ iounmap(card->hw.plxbase);
+ iounmap(card->hw.scabase);
+ iounmap(card->hw.rambase);
+ if (card->hw.type == PC300_TE) {
+ iounmap(card->hw.falcbase);
+ release_mem_region(card->hw.falcphys, card->hw.falcsize);
+ }
+err_release_sca:
+ release_mem_region(card->hw.scaphys, card->hw.scasize);
+err_release_ram:
+ release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize);
+err_release_plx:
+ release_mem_region(card->hw.plxphys, card->hw.plxsize);
+err_release_io:
+ release_region(card->hw.iophys, card->hw.iosize);
+ kfree(card);
+ return -ENODEV;
+}
+
+static void __devexit cpc_remove_one(struct pci_dev *pdev)
+{
+ pc300_t *card = pci_get_drvdata(pdev);
+
+ if (card->hw.rambase != 0) {
+ int i;
+
+ /* Disable interrupts on the PCI bridge */
+ cpc_writew(card->hw.plxbase + card->hw.intctl_reg,
+ cpc_readw(card->hw.plxbase + card->hw.intctl_reg) & ~(0x0040));
+
+ for (i = 0; i < card->hw.nchan; i++) {
+ unregister_hdlc_device(card->chan[i].d.dev);
+ }
+ iounmap(card->hw.plxbase);
+ iounmap(card->hw.scabase);
+ iounmap(card->hw.rambase);
+ release_mem_region(card->hw.plxphys, card->hw.plxsize);
+ release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize);
+ release_mem_region(card->hw.scaphys, card->hw.scasize);
+ release_region(card->hw.iophys, card->hw.iosize);
+ if (card->hw.type == PC300_TE) {
+ iounmap(card->hw.falcbase);
+ release_mem_region(card->hw.falcphys, card->hw.falcsize);
+ }
+ for (i = 0; i < card->hw.nchan; i++)
+ if (card->chan[i].d.dev)
+ free_netdev(card->chan[i].d.dev);
+ if (card->hw.irq)
+ free_irq(card->hw.irq, card);
+ kfree(card);
+ }
+}
+
+static struct pci_driver cpc_driver = {
+ .name = "pc300",
+ .id_table = cpc_pci_dev_id,
+ .probe = cpc_init_one,
+ .remove = __devexit_p(cpc_remove_one),
+};
+
+static int __init cpc_init(void)
+{
+ return pci_module_init(&cpc_driver);
+}
+
+static void __exit cpc_cleanup_module(void)
+{
+ pci_unregister_driver(&cpc_driver);
+}
+
+module_init(cpc_init);
+module_exit(cpc_cleanup_module);
+
+MODULE_DESCRIPTION("Cyclades-PC300 cards driver");
+MODULE_AUTHOR( "Author: Ivan Passos <ivan@cyclades.com>\r\n"
+ "Maintainer: PC300 Maintainer <pc300@cyclades.com");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
new file mode 100644
index 000000000000..29f84ad08730
--- /dev/null
+++ b/drivers/net/wan/pc300_tty.c
@@ -0,0 +1,1095 @@
+/*
+ * pc300_tty.c Cyclades-PC300(tm) TTY Driver.
+ *
+ * Author: Regina Kodato <reginak@cyclades.com>
+ *
+ * Copyright: (c) 1999-2002 Cyclades Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Log: pc300_tty.c,v $
+ * Revision 3.7 2002/03/07 14:17:09 henrique
+ * License data fixed
+ *
+ * Revision 3.6 2001/12/10 12:29:42 regina
+ * Fix the MLPPP bug
+ *
+ * Revision 3.5 2001/10/31 11:20:05 regina
+ * automatic pppd starts
+ *
+ * Revision 3.4 2001/08/06 12:01:51 regina
+ * problem in DSR_DE bit
+ *
+ * Revision 3.3 2001/07/26 22:58:41 regina
+ * update EDA value
+ *
+ * Revision 3.2 2001/07/12 13:11:20 regina
+ * bug fix - DCD-OFF in pc300 tty driver
+ *
+ * DMA transmission bug fix
+ *
+ * Revision 3.1 2001/06/22 13:13:02 regina
+ * MLPPP implementation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/if.h>
+#include <linux/skbuff.h>
+/* TTY includes */
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "pc300.h"
+
+/* defines and macros */
+/* TTY Global definitions */
+#define CPC_TTY_NPORTS 8 /* maximum number of the sync tty connections */
+#define CPC_TTY_MAJOR CYCLADES_MAJOR
+#define CPC_TTY_MINOR_START 240 /* minor of the first PC300 interface */
+
+#define CPC_TTY_MAX_MTU 2000
+
+/* tty interface state */
+#define CPC_TTY_ST_IDLE 0
+#define CPC_TTY_ST_INIT 1 /* configured with MLPPP and up */
+#define CPC_TTY_ST_OPEN 2 /* opened by application */
+
+#define CPC_TTY_LOCK(card,flags)\
+ do {\
+ spin_lock_irqsave(&card->card_lock, flags); \
+ } while (0)
+
+#define CPC_TTY_UNLOCK(card,flags) \
+ do {\
+ spin_unlock_irqrestore(&card->card_lock, flags); \
+ } while (0)
+
+//#define CPC_TTY_DBG(format,a...) printk(format,##a)
+#define CPC_TTY_DBG(format,a...)
+
+/* data structures */
+typedef struct _st_cpc_rx_buf {
+ struct _st_cpc_rx_buf *next;
+ int size;
+ unsigned char data[1];
+} st_cpc_rx_buf;
+
+struct st_cpc_rx_list {
+ st_cpc_rx_buf *first;
+ st_cpc_rx_buf *last;
+};
+
+typedef struct _st_cpc_tty_area {
+ int state; /* state of the TTY interface */
+ int num_open;
+ unsigned int tty_minor; /* minor this interface */
+ volatile struct st_cpc_rx_list buf_rx; /* ptr. to reception buffer */
+ unsigned char* buf_tx; /* ptr. to transmission buffer */
+ pc300dev_t* pc300dev; /* ptr. to info struct in PC300 driver */
+ unsigned char name[20]; /* interf. name + "-tty" */
+ struct tty_struct *tty;
+ struct work_struct tty_tx_work; /* tx work - tx interrupt */
+ struct work_struct tty_rx_work; /* rx work - rx interrupt */
+ } st_cpc_tty_area;
+
+/* TTY data structures */
+static struct tty_driver serial_drv;
+
+/* local variables */
+st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS];
+
+int cpc_tty_cnt=0; /* number of intrfaces configured with MLPPP */
+int cpc_tty_unreg_flag = 0;
+
+/* TTY functions prototype */
+static int cpc_tty_open(struct tty_struct *tty, struct file *flip);
+static void cpc_tty_close(struct tty_struct *tty, struct file *flip);
+static int cpc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
+static int cpc_tty_write_room(struct tty_struct *tty);
+static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
+static void cpc_tty_flush_buffer(struct tty_struct *tty);
+static void cpc_tty_hangup(struct tty_struct *tty);
+static void cpc_tty_rx_work(void *data);
+static void cpc_tty_tx_work(void *data);
+static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
+static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
+static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
+static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
+
+int pc300_tiocmset(struct tty_struct *, struct file *,
+ unsigned int, unsigned int);
+int pc300_tiocmget(struct tty_struct *, struct file *);
+
+/* functions called by PC300 driver */
+void cpc_tty_init(pc300dev_t *dev);
+void cpc_tty_unregister_service(pc300dev_t *pc300dev);
+void cpc_tty_receive(pc300dev_t *pc300dev);
+void cpc_tty_trigger_poll(pc300dev_t *pc300dev);
+void cpc_tty_reset_var(void);
+
+/*
+ * PC300 TTY clear "signal"
+ */
+static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char signal)
+{
+ pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
+ pc300_t *card = (pc300_t *) pc300chan->card;
+ int ch = pc300chan->channel;
+ unsigned long flags;
+
+ CPC_TTY_DBG("%s-tty: Clear signal %x\n",
+ pc300dev->dev->name, signal);
+ CPC_TTY_LOCK(card, flags);
+ cpc_writeb(card->hw.scabase + M_REG(CTL,ch),
+ cpc_readb(card->hw.scabase+M_REG(CTL,ch))& signal);
+ CPC_TTY_UNLOCK(card,flags);
+}
+
+/*
+ * PC300 TTY set "signal" to ON
+ */
+static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal)
+{
+ pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
+ pc300_t *card = (pc300_t *) pc300chan->card;
+ int ch = pc300chan->channel;
+ unsigned long flags;
+
+ CPC_TTY_DBG("%s-tty: Set signal %x\n",
+ pc300dev->dev->name, signal);
+ CPC_TTY_LOCK(card, flags);
+ cpc_writeb(card->hw.scabase + M_REG(CTL,ch),
+ cpc_readb(card->hw.scabase+M_REG(CTL,ch))& ~signal);
+ CPC_TTY_UNLOCK(card,flags);
+}
+
+/*
+ * PC300 TTY initialization routine
+ *
+ * This routine is called by the PC300 driver during board configuration
+ * (ioctl=SIOCSP300CONF). At this point the adapter is completely
+ * initialized.
+ * o verify kernel version (only 2.4.x)
+ * o register TTY driver
+ * o init cpc_tty_area struct
+ */
+void cpc_tty_init(pc300dev_t *pc300dev)
+{
+ unsigned long port;
+ int aux;
+ st_cpc_tty_area * cpc_tty;
+
+ /* hdlcX - X=interface number */
+ port = pc300dev->dev->name[4] - '0';
+ if (port >= CPC_TTY_NPORTS) {
+ printk("%s-tty: invalid interface selected (0-%i): %li",
+ pc300dev->dev->name,
+ CPC_TTY_NPORTS-1,port);
+ return;
+ }
+
+ if (cpc_tty_cnt == 0) { /* first TTY connection -> register driver */
+ CPC_TTY_DBG("%s-tty: driver init, major:%i, minor range:%i=%i\n",
+ pc300dev->dev->name,
+ CPC_TTY_MAJOR, CPC_TTY_MINOR_START,
+ CPC_TTY_MINOR_START+CPC_TTY_NPORTS);
+ /* initialize tty driver struct */
+ memset(&serial_drv,0,sizeof(struct tty_driver));
+ serial_drv.magic = TTY_DRIVER_MAGIC;
+ serial_drv.owner = THIS_MODULE;
+ serial_drv.driver_name = "pc300_tty";
+ serial_drv.name = "ttyCP";
+ serial_drv.major = CPC_TTY_MAJOR;
+ serial_drv.minor_start = CPC_TTY_MINOR_START;
+ serial_drv.num = CPC_TTY_NPORTS;
+ serial_drv.type = TTY_DRIVER_TYPE_SERIAL;
+ serial_drv.subtype = SERIAL_TYPE_NORMAL;
+
+ serial_drv.init_termios = tty_std_termios;
+ serial_drv.init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
+ serial_drv.flags = TTY_DRIVER_REAL_RAW;
+
+ /* interface routines from the upper tty layer to the tty driver */
+ serial_drv.open = cpc_tty_open;
+ serial_drv.close = cpc_tty_close;
+ serial_drv.write = cpc_tty_write;
+ serial_drv.write_room = cpc_tty_write_room;
+ serial_drv.chars_in_buffer = cpc_tty_chars_in_buffer;
+ serial_drv.tiocmset = pc300_tiocmset;
+ serial_drv.tiocmget = pc300_tiocmget;
+ serial_drv.flush_buffer = cpc_tty_flush_buffer;
+ serial_drv.hangup = cpc_tty_hangup;
+
+ /* register the TTY driver */
+ if (tty_register_driver(&serial_drv)) {
+ printk("%s-tty: Failed to register serial driver! ",
+ pc300dev->dev->name);
+ return;
+ }
+
+ memset((void *)cpc_tty_area, 0,
+ sizeof(st_cpc_tty_area) * CPC_TTY_NPORTS);
+ }
+
+ cpc_tty = &cpc_tty_area[port];
+
+ if (cpc_tty->state != CPC_TTY_ST_IDLE) {
+ CPC_TTY_DBG("%s-tty: TTY port %i, already in use.\n",
+ pc300dev->dev->name, port);
+ return;
+ }
+
+ cpc_tty_cnt++;
+ cpc_tty->state = CPC_TTY_ST_INIT;
+ cpc_tty->num_open= 0;
+ cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
+ cpc_tty->pc300dev = pc300dev;
+
+ INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
+ INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
+
+ cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
+
+ pc300dev->cpc_tty = (void *)cpc_tty;
+
+ aux = strlen(pc300dev->dev->name);
+ memcpy(cpc_tty->name, pc300dev->dev->name, aux);
+ memcpy(&cpc_tty->name[aux], "-tty", 5);
+
+ cpc_open(pc300dev->dev);
+ cpc_tty_signal_off(pc300dev, CTL_DTR);
+
+ CPC_TTY_DBG("%s: Initializing TTY Sync Driver, tty major#%d minor#%i\n",
+ cpc_tty->name,CPC_TTY_MAJOR,cpc_tty->tty_minor);
+ return;
+}
+
+/*
+ * PC300 TTY OPEN routine
+ *
+ * This routine is called by the tty driver to open the interface
+ * o verify minor
+ * o allocate buffer to Rx and Tx
+ */
+static int cpc_tty_open(struct tty_struct *tty, struct file *flip)
+{
+ int port ;
+ st_cpc_tty_area *cpc_tty;
+
+ if (!tty) {
+ return -ENODEV;
+ }
+
+ port = tty->index;
+
+ if ((port < 0) || (port >= CPC_TTY_NPORTS)){
+ CPC_TTY_DBG("pc300_tty: open invalid port %d\n", port);
+ return -ENODEV;
+ }
+
+ cpc_tty = &cpc_tty_area[port];
+
+ if (cpc_tty->state == CPC_TTY_ST_IDLE){
+ CPC_TTY_DBG("%s: open - invalid interface, port=%d\n",
+ cpc_tty->name, tty->index);
+ return -ENODEV;
+ }
+
+ if (cpc_tty->num_open == 0) { /* first open of this tty */
+ if (!cpc_tty_area[port].buf_tx){
+ cpc_tty_area[port].buf_tx = kmalloc(CPC_TTY_MAX_MTU,GFP_KERNEL);
+ if (cpc_tty_area[port].buf_tx == 0){
+ CPC_TTY_DBG("%s: error in memory allocation\n",cpc_tty->name);
+ return -ENOMEM;
+ }
+ }
+
+ if (cpc_tty_area[port].buf_rx.first) {
+ unsigned char * aux;
+ while (cpc_tty_area[port].buf_rx.first) {
+ aux = (unsigned char *)cpc_tty_area[port].buf_rx.first;
+ cpc_tty_area[port].buf_rx.first = cpc_tty_area[port].buf_rx.first->next;
+ kfree(aux);
+ }
+ cpc_tty_area[port].buf_rx.first = NULL;
+ cpc_tty_area[port].buf_rx.last = NULL;
+ }
+
+ cpc_tty_area[port].state = CPC_TTY_ST_OPEN;
+ cpc_tty_area[port].tty = tty;
+ tty->driver_data = &cpc_tty_area[port];
+
+ cpc_tty_signal_on(cpc_tty->pc300dev, CTL_DTR);
+ }
+
+ cpc_tty->num_open++;
+
+ CPC_TTY_DBG("%s: opening TTY driver\n", cpc_tty->name);
+
+ /* avisar driver PC300 */
+ return 0;
+}
+
+/*
+ * PC300 TTY CLOSE routine
+ *
+ * This routine is called by the tty driver to close the interface
+ * o call close channel in PC300 driver (cpc_closech)
+ * o free Rx and Tx buffers
+ */
+
+static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
+{
+ st_cpc_tty_area *cpc_tty;
+ unsigned long flags;
+ int res;
+
+ if (!tty || !tty->driver_data ) {
+ CPC_TTY_DBG("hdlx-tty: no TTY in close \n");
+ return;
+ }
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ if ((cpc_tty->tty != tty)|| (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+ CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+ return;
+ }
+
+ if (!cpc_tty->num_open) {
+ CPC_TTY_DBG("%s: TTY is closed\n",cpc_tty->name);
+ return;
+ }
+
+ if (--cpc_tty->num_open > 0) {
+ CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
+ return;
+ }
+
+ cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
+
+ CPC_TTY_LOCK(cpc_tty->pc300dev->chan->card, flags); /* lock irq */
+ cpc_tty->tty = NULL;
+ cpc_tty->state = CPC_TTY_ST_INIT;
+ CPC_TTY_UNLOCK(cpc_tty->pc300dev->chan->card, flags); /* unlock irq */
+
+ if (cpc_tty->buf_rx.first) {
+ unsigned char * aux;
+ while (cpc_tty->buf_rx.first) {
+ aux = (unsigned char *)cpc_tty->buf_rx.first;
+ cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
+ kfree(aux);
+ }
+ cpc_tty->buf_rx.first = NULL;
+ cpc_tty->buf_rx.last = NULL;
+ }
+
+ if (cpc_tty->buf_tx) {
+ kfree(cpc_tty->buf_tx);
+ cpc_tty->buf_tx = NULL;
+ }
+
+ CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
+
+ if (!serial_drv.refcount && cpc_tty_unreg_flag) {
+ cpc_tty_unreg_flag = 0;
+ CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
+ if ((res=tty_unregister_driver(&serial_drv))) {
+ CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
+ cpc_tty->name,res);
+ }
+ }
+ return;
+}
+
+/*
+ * PC300 TTY WRITE routine
+ *
+ * This routine is called by the tty driver to write a series of characters
+ * to the tty device. The characters may come from user or kernel space.
+ * o verify the DCD signal
+ * o send characters to board and start the transmission
+ */
+static int cpc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ st_cpc_tty_area *cpc_tty;
+ pc300ch_t *pc300chan;
+ pc300_t *card;
+ int ch;
+ unsigned long flags;
+ struct net_device_stats *stats;
+
+ if (!tty || !tty->driver_data ) {
+ CPC_TTY_DBG("hdlcX-tty: no TTY in write\n");
+ return -ENODEV;
+ }
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+ CPC_TTY_DBG("%s: TTY is not opened\n", cpc_tty->name);
+ return -ENODEV;
+ }
+
+ if (count > CPC_TTY_MAX_MTU) {
+ CPC_TTY_DBG("%s: count is invalid\n",cpc_tty->name);
+ return -EINVAL; /* frame too big */
+ }
+
+ CPC_TTY_DBG("%s: cpc_tty_write data len=%i\n",cpc_tty->name,count);
+
+ pc300chan = (pc300ch_t *)((pc300dev_t*)cpc_tty->pc300dev)->chan;
+ stats = hdlc_stats(((pc300dev_t*)cpc_tty->pc300dev)->dev);
+ card = (pc300_t *) pc300chan->card;
+ ch = pc300chan->channel;
+
+ /* verify DCD signal*/
+ if (cpc_readb(card->hw.scabase + M_REG(ST3,ch)) & ST3_DCD) {
+ /* DCD is OFF */
+ CPC_TTY_DBG("%s : DCD is OFF\n", cpc_tty->name);
+ stats->tx_errors++;
+ stats->tx_carrier_errors++;
+ CPC_TTY_LOCK(card, flags);
+ cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR);
+
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
+ ~(CPLD_REG2_FALC_LED1 << (2 *ch)));
+ }
+
+ CPC_TTY_UNLOCK(card, flags);
+
+ return -EINVAL;
+ }
+
+ if (cpc_tty_send_to_card(cpc_tty->pc300dev, (void*)buf, count)) {
+ /* failed to send */
+ CPC_TTY_DBG("%s: trasmition error\n", cpc_tty->name);
+ return 0;
+ }
+ return count;
+}
+
+/*
+ * PC300 TTY Write Room routine
+ *
+ * This routine returns the numbers of characteres the tty driver will accept
+ * for queuing to be written.
+ * o return MTU
+ */
+static int cpc_tty_write_room(struct tty_struct *tty)
+{
+ st_cpc_tty_area *cpc_tty;
+
+ if (!tty || !tty->driver_data ) {
+ CPC_TTY_DBG("hdlcX-tty: no TTY to write room\n");
+ return -ENODEV;
+ }
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+ CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+ return -ENODEV;
+ }
+
+ CPC_TTY_DBG("%s: write room\n",cpc_tty->name);
+
+ return CPC_TTY_MAX_MTU;
+}
+
+/*
+ * PC300 TTY chars in buffer routine
+ *
+ * This routine returns the chars number in the transmission buffer
+ * o returns 0
+ */
+static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ st_cpc_tty_area *cpc_tty;
+
+ if (!tty || !tty->driver_data ) {
+ CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
+ return -ENODEV;
+ }
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+ CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+ return -ENODEV;
+ }
+
+ return(0);
+}
+
+int pc300_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ st_cpc_tty_area *cpc_tty;
+
+ CPC_TTY_DBG("%s: set:%x clear:%x\n", __FUNCTION__, set, clear);
+
+ if (!tty || !tty->driver_data ) {
+ CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
+ return -ENODEV;
+ }
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ if (set & TIOCM_RTS)
+ cpc_tty_signal_on(cpc_tty->pc300dev, CTL_RTS);
+ if (set & TIOCM_DTR)
+ cpc_tty_signal_on(cpc_tty->pc300dev, CTL_DTR);
+
+ if (clear & TIOCM_RTS)
+ cpc_tty_signal_off(cpc_tty->pc300dev, CTL_RTS);
+ if (clear & TIOCM_DTR)
+ cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
+
+ return 0;
+}
+
+int pc300_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ unsigned int result;
+ unsigned char status;
+ unsigned long flags;
+ st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+ pc300dev_t *pc300dev = cpc_tty->pc300dev;
+ pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
+ pc300_t *card = (pc300_t *) pc300chan->card;
+ int ch = pc300chan->channel;
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ CPC_TTY_DBG("%s-tty: tiocmget\n",
+ ((struct net_device*)(pc300dev->hdlc))->name);
+
+ CPC_TTY_LOCK(card, flags);
+ status = cpc_readb(card->hw.scabase+M_REG(CTL,ch));
+ CPC_TTY_UNLOCK(card,flags);
+
+ result = ((status & CTL_DTR) ? TIOCM_DTR : 0) |
+ ((status & CTL_RTS) ? TIOCM_RTS : 0);
+
+ return result;
+}
+
+/*
+ * PC300 TTY Flush Buffer routine
+ *
+ * This routine resets the transmission buffer
+ */
+static void cpc_tty_flush_buffer(struct tty_struct *tty)
+{
+ st_cpc_tty_area *cpc_tty;
+
+ if (!tty || !tty->driver_data ) {
+ CPC_TTY_DBG("hdlcX-tty: no TTY to flush buffer\n");
+ return;
+ }
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+ CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+ return;
+ }
+
+ CPC_TTY_DBG("%s: call wake_up_interruptible\n",cpc_tty->name);
+
+ tty_wakeup(tty);
+ return;
+}
+
+/*
+ * PC300 TTY Hangup routine
+ *
+ * This routine is called by the tty driver to hangup the interface
+ * o clear DTR signal
+ */
+
+static void cpc_tty_hangup(struct tty_struct *tty)
+{
+ st_cpc_tty_area *cpc_tty;
+ int res;
+
+ if (!tty || !tty->driver_data ) {
+ CPC_TTY_DBG("hdlcX-tty: no TTY to hangup\n");
+ return ;
+ }
+
+ cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+ if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+ CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+ return ;
+ }
+ if (!serial_drv.refcount && cpc_tty_unreg_flag) {
+ cpc_tty_unreg_flag = 0;
+ CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
+ if ((res=tty_unregister_driver(&serial_drv))) {
+ CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
+ cpc_tty->name,res);
+ }
+ }
+ cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
+}
+
+/*
+ * PC300 TTY RX work routine
+ * This routine treats RX work
+ * o verify read buffer
+ * o call the line disc. read
+ * o free memory
+ */
+static void cpc_tty_rx_work(void * data)
+{
+ unsigned long port;
+ int i, j;
+ st_cpc_tty_area *cpc_tty;
+ volatile st_cpc_rx_buf * buf;
+ char flags=0,flg_rx=1;
+ struct tty_ldisc *ld;
+
+ if (cpc_tty_cnt == 0) return;
+
+
+ for (i=0; (i < 4) && flg_rx ; i++) {
+ flg_rx = 0;
+ port = (unsigned long)data;
+ for (j=0; j < CPC_TTY_NPORTS; j++) {
+ cpc_tty = &cpc_tty_area[port];
+
+ if ((buf=cpc_tty->buf_rx.first) != 0) {
+ if(cpc_tty->tty) {
+ ld = tty_ldisc_ref(cpc_tty->tty);
+ if(ld) {
+ if (ld->receive_buf) {
+ CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name);
+ ld->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size);
+ }
+ tty_ldisc_deref(ld);
+ }
+ }
+ cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
+ kfree((unsigned char *)buf);
+ buf = cpc_tty->buf_rx.first;
+ flg_rx = 1;
+ }
+ if (++port == CPC_TTY_NPORTS) port = 0;
+ }
+ }
+}
+
+/*
+ * PC300 TTY RX work routine
+ *
+ * This routine treats RX interrupt.
+ * o read all frames in card
+ * o verify the frame size
+ * o read the frame in rx buffer
+ */
+static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan)
+{
+ volatile pcsca_bd_t __iomem * ptdescr;
+ volatile unsigned char status;
+ pc300_t *card = (pc300_t *)pc300chan->card;
+ int ch = pc300chan->channel;
+
+ /* dma buf read */
+ ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase +
+ RX_BD_ADDR(ch, pc300chan->rx_first_bd));
+ while (pc300chan->rx_first_bd != pc300chan->rx_last_bd) {
+ status = cpc_readb(&ptdescr->status);
+ cpc_writeb(&ptdescr->status, 0);
+ cpc_writeb(&ptdescr->len, 0);
+ pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) &
+ (N_DMA_RX_BUF - 1);
+ if (status & DST_EOM) {
+ break; /* end of message */
+ }
+ ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + cpc_readl(&ptdescr->next));
+ }
+}
+
+void cpc_tty_receive(pc300dev_t *pc300dev)
+{
+ st_cpc_tty_area *cpc_tty;
+ pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
+ pc300_t *card = (pc300_t *)pc300chan->card;
+ int ch = pc300chan->channel;
+ volatile pcsca_bd_t __iomem * ptdescr;
+ struct net_device_stats *stats = hdlc_stats(pc300dev->dev);
+ int rx_len, rx_aux;
+ volatile unsigned char status;
+ unsigned short first_bd = pc300chan->rx_first_bd;
+ st_cpc_rx_buf *new=NULL;
+ unsigned char dsr_rx;
+
+ if (pc300dev->cpc_tty == NULL) {
+ return;
+ }
+
+ dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch));
+
+ cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty;
+
+ while (1) {
+ rx_len = 0;
+ ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + RX_BD_ADDR(ch, first_bd));
+ while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+ rx_len += cpc_readw(&ptdescr->len);
+ first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1);
+ if (status & DST_EOM) {
+ break;
+ }
+ ptdescr=(pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
+ }
+
+ if (!rx_len) {
+ if (dsr_rx & DSR_BOF) {
+ /* update EDA */
+ cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
+ RX_BD_ADDR(ch, pc300chan->rx_last_bd));
+ }
+ if (new) {
+ kfree(new);
+ new = NULL;
+ }
+ return;
+ }
+
+ if (rx_len > CPC_TTY_MAX_MTU) {
+ /* Free RX descriptors */
+ CPC_TTY_DBG("%s: frame size is invalid.\n",cpc_tty->name);
+ stats->rx_errors++;
+ stats->rx_frame_errors++;
+ cpc_tty_rx_disc_frame(pc300chan);
+ continue;
+ }
+
+ new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
+ if (new == 0) {
+ cpc_tty_rx_disc_frame(pc300chan);
+ continue;
+ }
+
+ /* dma buf read */
+ ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase +
+ RX_BD_ADDR(ch, pc300chan->rx_first_bd));
+
+ rx_len = 0; /* counter frame size */
+
+ while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+ rx_aux = cpc_readw(&ptdescr->len);
+ if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT))
+ || (rx_aux > BD_DEF_LEN)) {
+ CPC_TTY_DBG("%s: reception error\n", cpc_tty->name);
+ stats->rx_errors++;
+ if (status & DST_OVR) {
+ stats->rx_fifo_errors++;
+ }
+ if (status & DST_CRC) {
+ stats->rx_crc_errors++;
+ }
+ if ((status & (DST_RBIT | DST_SHRT | DST_ABT)) ||
+ (rx_aux > BD_DEF_LEN)) {
+ stats->rx_frame_errors++;
+ }
+ /* discard remainig descriptors used by the bad frame */
+ CPC_TTY_DBG("%s: reception error - discard descriptors",
+ cpc_tty->name);
+ cpc_tty_rx_disc_frame(pc300chan);
+ rx_len = 0;
+ kfree(new);
+ new = NULL;
+ break; /* read next frame - while(1) */
+ }
+
+ if (cpc_tty->state != CPC_TTY_ST_OPEN) {
+ /* Free RX descriptors */
+ cpc_tty_rx_disc_frame(pc300chan);
+ stats->rx_dropped++;
+ rx_len = 0;
+ kfree(new);
+ new = NULL;
+ break; /* read next frame - while(1) */
+ }
+
+ /* read the segment of the frame */
+ if (rx_aux != 0) {
+ memcpy_fromio((new->data + rx_len),
+ (void __iomem *)(card->hw.rambase +
+ cpc_readl(&ptdescr->ptbuf)), rx_aux);
+ rx_len += rx_aux;
+ }
+ cpc_writeb(&ptdescr->status,0);
+ cpc_writeb(&ptdescr->len, 0);
+ pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) &
+ (N_DMA_RX_BUF -1);
+ if (status & DST_EOM)break;
+
+ ptdescr = (pcsca_bd_t __iomem *) (card->hw.rambase +
+ cpc_readl(&ptdescr->next));
+ }
+ /* update pointer */
+ pc300chan->rx_last_bd = (pc300chan->rx_first_bd - 1) &
+ (N_DMA_RX_BUF - 1) ;
+ if (!(dsr_rx & DSR_BOF)) {
+ /* update EDA */
+ cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
+ RX_BD_ADDR(ch, pc300chan->rx_last_bd));
+ }
+ if (rx_len != 0) {
+ stats->rx_bytes += rx_len;
+
+ if (pc300dev->trace_on) {
+ cpc_tty_trace(pc300dev, new->data,rx_len, 'R');
+ }
+ new->size = rx_len;
+ new->next = NULL;
+ if (cpc_tty->buf_rx.first == 0) {
+ cpc_tty->buf_rx.first = new;
+ cpc_tty->buf_rx.last = new;
+ } else {
+ cpc_tty->buf_rx.last->next = new;
+ cpc_tty->buf_rx.last = new;
+ }
+ schedule_work(&(cpc_tty->tty_rx_work));
+ stats->rx_packets++;
+ }
+ }
+}
+
+/*
+ * PC300 TTY TX work routine
+ *
+ * This routine treats TX interrupt.
+ * o if need call line discipline wakeup
+ * o call wake_up_interruptible
+ */
+static void cpc_tty_tx_work(void *data)
+{
+ st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data;
+ struct tty_struct *tty;
+
+ CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
+
+ if ((tty = cpc_tty->tty) == 0) {
+ CPC_TTY_DBG("%s: the interface is not opened\n",cpc_tty->name);
+ return;
+ }
+ tty_wakeup(tty);
+}
+
+/*
+ * PC300 TTY send to card routine
+ *
+ * This routine send data to card.
+ * o clear descriptors
+ * o write data to DMA buffers
+ * o start the transmission
+ */
+static int cpc_tty_send_to_card(pc300dev_t *dev,void* buf, int len)
+{
+ pc300ch_t *chan = (pc300ch_t *)dev->chan;
+ pc300_t *card = (pc300_t *)chan->card;
+ int ch = chan->channel;
+ struct net_device_stats *stats = hdlc_stats(dev->dev);
+ unsigned long flags;
+ volatile pcsca_bd_t __iomem *ptdescr;
+ int i, nchar;
+ int tosend = len;
+ int nbuf = ((len - 1)/BD_DEF_LEN) + 1;
+ unsigned char *pdata=buf;
+
+ CPC_TTY_DBG("%s:cpc_tty_send_to_cars len=%i",
+ (st_cpc_tty_area *)dev->cpc_tty->name,len);
+
+ if (nbuf >= card->chan[ch].nfree_tx_bd) {
+ return 1;
+ }
+
+ /* write buffer to DMA buffers */
+ CPC_TTY_DBG("%s: call dma_buf_write\n",
+ (st_cpc_tty_area *)dev->cpc_tty->name);
+ for (i = 0 ; i < nbuf ; i++) {
+ ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase +
+ TX_BD_ADDR(ch, card->chan[ch].tx_next_bd));
+ nchar = (BD_DEF_LEN > tosend) ? tosend : BD_DEF_LEN;
+ if (cpc_readb(&ptdescr->status) & DST_OSB) {
+ memcpy_toio((void __iomem *)(card->hw.rambase +
+ cpc_readl(&ptdescr->ptbuf)),
+ &pdata[len - tosend],
+ nchar);
+ card->chan[ch].nfree_tx_bd--;
+ if ((i + 1) == nbuf) {
+ /* This must be the last BD to be used */
+ cpc_writeb(&ptdescr->status, DST_EOM);
+ } else {
+ cpc_writeb(&ptdescr->status, 0);
+ }
+ cpc_writew(&ptdescr->len, nchar);
+ } else {
+ CPC_TTY_DBG("%s: error in dma_buf_write\n",
+ (st_cpc_tty_area *)dev->cpc_tty->name);
+ stats->tx_dropped++;
+ return 1;
+ }
+ tosend -= nchar;
+ card->chan[ch].tx_next_bd =
+ (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1);
+ }
+
+ if (dev->trace_on) {
+ cpc_tty_trace(dev, buf, len,'T');
+ }
+
+ /* start transmission */
+ CPC_TTY_DBG("%s: start transmission\n",
+ (st_cpc_tty_area *)dev->cpc_tty->name);
+
+ CPC_TTY_LOCK(card, flags);
+ cpc_writeb(card->hw.scabase + DTX_REG(EDAL, ch),
+ TX_BD_ADDR(ch, chan->tx_next_bd));
+ cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA);
+ cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE);
+
+ if (card->hw.type == PC300_TE) {
+ cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+ cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
+ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+ }
+ CPC_TTY_UNLOCK(card, flags);
+ return 0;
+}
+
+/*
+ * PC300 TTY trace routine
+ *
+ * This routine send trace of connection to application.
+ * o clear descriptors
+ * o write data to DMA buffers
+ * o start the transmission
+ */
+
+static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx)
+{
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(10 + len)) == NULL) {
+ /* out of memory */
+ CPC_TTY_DBG("%s: tty_trace - out of memory\n", dev->dev->name);
+ return;
+ }
+
+ skb_put (skb, 10 + len);
+ skb->dev = dev->dev;
+ skb->protocol = htons(ETH_P_CUST);
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+ skb->len = 10 + len;
+
+ memcpy(skb->data,dev->dev->name,5);
+ skb->data[5] = '[';
+ skb->data[6] = rxtx;
+ skb->data[7] = ']';
+ skb->data[8] = ':';
+ skb->data[9] = ' ';
+ memcpy(&skb->data[10], buf, len);
+ netif_rx(skb);
+}
+
+/*
+ * PC300 TTY unregister service routine
+ *
+ * This routine unregister one interface.
+ */
+void cpc_tty_unregister_service(pc300dev_t *pc300dev)
+{
+ st_cpc_tty_area *cpc_tty;
+ ulong flags;
+ int res;
+
+ if ((cpc_tty= (st_cpc_tty_area *) pc300dev->cpc_tty) == 0) {
+ CPC_TTY_DBG("%s: interface is not TTY\n", pc300dev->dev->name);
+ return;
+ }
+ CPC_TTY_DBG("%s: cpc_tty_unregister_service", cpc_tty->name);
+
+ if (cpc_tty->pc300dev != pc300dev) {
+ CPC_TTY_DBG("%s: invalid tty ptr=%s\n",
+ pc300dev->dev->name, cpc_tty->name);
+ return;
+ }
+
+ if (--cpc_tty_cnt == 0) {
+ if (serial_drv.refcount) {
+ CPC_TTY_DBG("%s: unregister is not possible, refcount=%d",
+ cpc_tty->name, serial_drv.refcount);
+ cpc_tty_cnt++;
+ cpc_tty_unreg_flag = 1;
+ return;
+ } else {
+ CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
+ if ((res=tty_unregister_driver(&serial_drv))) {
+ CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
+ cpc_tty->name,res);
+ }
+ }
+ }
+ CPC_TTY_LOCK(pc300dev->chan->card,flags);
+ cpc_tty->tty = NULL;
+ CPC_TTY_UNLOCK(pc300dev->chan->card, flags);
+ cpc_tty->tty_minor = 0;
+ cpc_tty->state = CPC_TTY_ST_IDLE;
+}
+
+/*
+ * PC300 TTY trigger poll routine
+ * This routine is called by pc300driver to treats Tx interrupt.
+ */
+void cpc_tty_trigger_poll(pc300dev_t *pc300dev)
+{
+ st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty;
+ if (!cpc_tty) {
+ return;
+ }
+ schedule_work(&(cpc_tty->tty_tx_work));
+}
+
+/*
+ * PC300 TTY reset var routine
+ * This routine is called by pc300driver to init the TTY area.
+ */
+
+void cpc_tty_reset_var(void)
+{
+ int i ;
+
+ CPC_TTY_DBG("hdlcX-tty: reset variables\n");
+ /* reset the tty_driver structure - serial_drv */
+ memset(&serial_drv, 0, sizeof(struct tty_driver));
+ for (i=0; i < CPC_TTY_NPORTS; i++){
+ memset(&cpc_tty_area[i],0, sizeof(st_cpc_tty_area));
+ }
+}
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
new file mode 100644
index 000000000000..8dea07b47999
--- /dev/null
+++ b/drivers/net/wan/pci200syn.c
@@ -0,0 +1,488 @@
+/*
+ * Goramo PCI200SYN synchronous serial card driver for Linux
+ *
+ * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * For information see http://hq.pm.waw.pl/hdlc/
+ *
+ * Sources of information:
+ * Hitachi HD64572 SCA-II User's Manual
+ * PLX Technology Inc. PCI9052 Data Book
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/pci.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+
+#include "hd64572.h"
+
+static const char* version = "Goramo PCI200SYN driver version: 1.16";
+static const char* devname = "PCI200SYN";
+
+#undef DEBUG_PKT
+#define DEBUG_RINGS
+
+#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
+#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
+#define ALL_PAGES_ALWAYS_MAPPED
+#define NEED_DETECT_RAM
+#define NEED_SCA_MSCI_INTR
+#define MAX_TX_BUFFERS 10
+
+static int pci_clock_freq = 33000000;
+#define CLOCK_BASE pci_clock_freq
+
+#define PCI_VENDOR_ID_GORAMO 0x10B5 /* uses PLX:9050 ID - this card */
+#define PCI_DEVICE_ID_PCI200SYN 0x9050 /* doesn't have its own ID */
+
+
+/*
+ * PLX PCI9052 local configuration and shared runtime registers.
+ * This structure can be used to access 9052 registers (memory mapped).
+ */
+typedef struct {
+ u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
+ u32 loc_rom_range; /* 10h : Local ROM Range */
+ u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
+ u32 loc_rom_base; /* 24h : Local ROM Base */
+ u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
+ u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
+ u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
+ u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
+ u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
+}plx9052;
+
+
+
+typedef struct port_s {
+ struct net_device *dev;
+ struct card_s *card;
+ spinlock_t lock; /* TX lock */
+ sync_serial_settings settings;
+ int rxpart; /* partial frame received, next frame invalid*/
+ unsigned short encoding;
+ unsigned short parity;
+ u16 rxin; /* rx ring buffer 'in' pointer */
+ u16 txin; /* tx ring buffer 'in' and 'last' pointers */
+ u16 txlast;
+ u8 rxs, txs, tmc; /* SCA registers */
+ u8 phy_node; /* physical port # - 0 or 1 */
+}port_t;
+
+
+
+typedef struct card_s {
+ u8 __iomem *rambase; /* buffer memory base (virtual) */
+ u8 __iomem *scabase; /* SCA memory base (virtual) */
+ plx9052 __iomem *plxbase;/* PLX registers memory base (virtual) */
+ u16 rx_ring_buffers; /* number of buffers in a ring */
+ u16 tx_ring_buffers;
+ u16 buff_offset; /* offset of first buffer of first channel */
+ u8 irq; /* interrupt request level */
+
+ port_t ports[2];
+}card_t;
+
+
+#define sca_in(reg, card) readb(card->scabase + (reg))
+#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
+#define sca_inw(reg, card) readw(card->scabase + (reg))
+#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
+#define sca_inl(reg, card) readl(card->scabase + (reg))
+#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
+
+#define port_to_card(port) (port->card)
+#define log_node(port) (port->phy_node)
+#define phy_node(port) (port->phy_node)
+#define winbase(card) (card->rambase)
+#define get_port(card, port) (&card->ports[port])
+#define sca_flush(card) (sca_in(IER0, card));
+
+static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
+{
+ int len;
+ do {
+ len = length > 256 ? 256 : length;
+ memcpy_toio(dest, src, len);
+ dest += len;
+ src += len;
+ length -= len;
+ readb(dest);
+ } while (len);
+}
+
+#undef memcpy_toio
+#define memcpy_toio new_memcpy_toio
+
+#include "hd6457x.c"
+
+
+static void pci200_set_iface(port_t *port)
+{
+ card_t *card = port->card;
+ u16 msci = get_msci(port);
+ u8 rxs = port->rxs & CLK_BRG_MASK;
+ u8 txs = port->txs & CLK_BRG_MASK;
+
+ sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
+ port_to_card(port));
+ switch(port->settings.clock_type) {
+ case CLOCK_INT:
+ rxs |= CLK_BRG; /* BRG output */
+ txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
+ break;
+
+ case CLOCK_TXINT:
+ rxs |= CLK_LINE; /* RXC input */
+ txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
+ break;
+
+ case CLOCK_TXFROMRX:
+ rxs |= CLK_LINE; /* RXC input */
+ txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
+ break;
+
+ default: /* EXTernal clock */
+ rxs |= CLK_LINE; /* RXC input */
+ txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
+ break;
+ }
+
+ port->rxs = rxs;
+ port->txs = txs;
+ sca_out(rxs, msci + RXS, card);
+ sca_out(txs, msci + TXS, card);
+ sca_set_port(port);
+}
+
+
+
+static int pci200_open(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+
+ int result = hdlc_open(dev);
+ if (result)
+ return result;
+
+ sca_open(dev);
+ pci200_set_iface(port);
+ sca_flush(port_to_card(port));
+ return 0;
+}
+
+
+
+static int pci200_close(struct net_device *dev)
+{
+ sca_close(dev);
+ sca_flush(port_to_card(dev_to_port(dev)));
+ hdlc_close(dev);
+ return 0;
+}
+
+
+
+static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
+
+#ifdef DEBUG_RINGS
+ if (cmd == SIOCDEVPRIVATE) {
+ sca_dump_rings(dev);
+ return 0;
+ }
+#endif
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_V35;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(line, &port->settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_V35:
+ case IF_IFACE_SYNC_SERIAL:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ if (new_line.clock_type != CLOCK_EXT &&
+ new_line.clock_type != CLOCK_TXFROMRX &&
+ new_line.clock_type != CLOCK_INT &&
+ new_line.clock_type != CLOCK_TXINT)
+ return -EINVAL; /* No such clock setting */
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ memcpy(&port->settings, &new_line, size); /* Update settings */
+ pci200_set_iface(port);
+ sca_flush(port_to_card(port));
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+
+
+static void pci200_pci_remove_one(struct pci_dev *pdev)
+{
+ int i;
+ card_t *card = pci_get_drvdata(pdev);
+
+ for(i = 0; i < 2; i++)
+ if (card->ports[i].card) {
+ struct net_device *dev = port_to_dev(&card->ports[i]);
+ unregister_hdlc_device(dev);
+ }
+
+ if (card->irq)
+ free_irq(card->irq, card);
+
+ if (card->rambase)
+ iounmap(card->rambase);
+ if (card->scabase)
+ iounmap(card->scabase);
+ if (card->plxbase)
+ iounmap(card->plxbase);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ if (card->ports[0].dev)
+ free_netdev(card->ports[0].dev);
+ if (card->ports[1].dev)
+ free_netdev(card->ports[1].dev);
+ kfree(card);
+}
+
+
+
+static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ card_t *card;
+ u8 rev_id;
+ u32 __iomem *p;
+ int i;
+ u32 ramsize;
+ u32 ramphys; /* buffer memory base */
+ u32 scaphys; /* SCA memory base */
+ u32 plxphys; /* PLX registers memory base */
+
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(KERN_INFO "%s\n", version);
+#endif
+
+ i = pci_enable_device(pdev);
+ if (i)
+ return i;
+
+ i = pci_request_regions(pdev, "PCI200SYN");
+ if (i) {
+ pci_disable_device(pdev);
+ return i;
+ }
+
+ card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ if (card == NULL) {
+ printk(KERN_ERR "pci200syn: unable to allocate memory\n");
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ return -ENOBUFS;
+ }
+ memset(card, 0, sizeof(card_t));
+ pci_set_drvdata(pdev, card);
+ card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
+ card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
+ if (!card->ports[0].dev || !card->ports[1].dev) {
+ printk(KERN_ERR "pci200syn: unable to allocate memory\n");
+ pci200_pci_remove_one(pdev);
+ return -ENOMEM;
+ }
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+ if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
+ pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
+ pci_resource_len(pdev, 3) < 16384) {
+ printk(KERN_ERR "pci200syn: invalid card EEPROM parameters\n");
+ pci200_pci_remove_one(pdev);
+ return -EFAULT;
+ }
+
+ plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
+ card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
+
+ scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
+ card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
+
+ ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
+ card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));
+
+ if (card->plxbase == NULL ||
+ card->scabase == NULL ||
+ card->rambase == NULL) {
+ printk(KERN_ERR "pci200syn: ioremap() failed\n");
+ pci200_pci_remove_one(pdev);
+ }
+
+ /* Reset PLX */
+ p = &card->plxbase->init_ctrl;
+ writel(readl(p) | 0x40000000, p);
+ readl(p); /* Flush the write - do not use sca_flush */
+ udelay(1);
+
+ writel(readl(p) & ~0x40000000, p);
+ readl(p); /* Flush the write - do not use sca_flush */
+ udelay(1);
+
+ ramsize = sca_detect_ram(card, card->rambase,
+ pci_resource_len(pdev, 3));
+
+ /* number of TX + RX buffers for one port - this is dual port card */
+ i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
+ card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
+ card->rx_ring_buffers = i - card->tx_ring_buffers;
+
+ card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
+ card->rx_ring_buffers);
+
+ printk(KERN_INFO "pci200syn: %u KB RAM at 0x%x, IRQ%u, using %u TX +"
+ " %u RX packets rings\n", ramsize / 1024, ramphys,
+ pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+
+ if (card->tx_ring_buffers < 1) {
+ printk(KERN_ERR "pci200syn: RAM test failed\n");
+ pci200_pci_remove_one(pdev);
+ return -EFAULT;
+ }
+
+ /* Enable interrupts on the PCI bridge */
+ p = &card->plxbase->intr_ctrl_stat;
+ writew(readw(p) | 0x0040, p);
+
+ /* Allocate IRQ */
+ if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
+ printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
+ pdev->irq);
+ pci200_pci_remove_one(pdev);
+ return -EBUSY;
+ }
+ card->irq = pdev->irq;
+
+ sca_init(card, 0);
+
+ for(i = 0; i < 2; i++) {
+ port_t *port = &card->ports[i];
+ struct net_device *dev = port_to_dev(port);
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ port->phy_node = i;
+
+ spin_lock_init(&port->lock);
+ SET_MODULE_OWNER(dev);
+ dev->irq = card->irq;
+ dev->mem_start = ramphys;
+ dev->mem_end = ramphys + ramsize - 1;
+ dev->tx_queue_len = 50;
+ dev->do_ioctl = pci200_ioctl;
+ dev->open = pci200_open;
+ dev->stop = pci200_close;
+ hdlc->attach = sca_attach;
+ hdlc->xmit = sca_xmit;
+ port->settings.clock_type = CLOCK_EXT;
+ port->card = card;
+ if(register_hdlc_device(dev)) {
+ printk(KERN_ERR "pci200syn: unable to register hdlc "
+ "device\n");
+ port->card = NULL;
+ pci200_pci_remove_one(pdev);
+ return -ENOBUFS;
+ }
+ sca_init_sync_port(port); /* Set up SCA memory */
+
+ printk(KERN_INFO "%s: PCI200SYN node %d\n",
+ dev->name, port->phy_node);
+ }
+
+ sca_flush(card);
+ return 0;
+}
+
+
+
+static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_GORAMO, PCI_DEVICE_ID_PCI200SYN, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0 },
+ { 0, }
+};
+
+
+static struct pci_driver pci200_pci_driver = {
+ .name = "PCI200SYN",
+ .id_table = pci200_pci_tbl,
+ .probe = pci200_pci_init_one,
+ .remove = pci200_pci_remove_one,
+};
+
+
+static int __init pci200_init_module(void)
+{
+#ifdef MODULE
+ printk(KERN_INFO "%s\n", version);
+#endif
+ if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
+ printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
+ return -EINVAL;
+ }
+ return pci_module_init(&pci200_pci_driver);
+}
+
+
+
+static void __exit pci200_cleanup_module(void)
+{
+ pci_unregister_driver(&pci200_pci_driver);
+}
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Goramo PCI200SYN serial port driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pci200_pci_tbl);
+module_param(pci_clock_freq, int, 0444);
+MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
+module_init(pci200_init_module);
+module_exit(pci200_cleanup_module);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
new file mode 100644
index 000000000000..db2c798ba89e
--- /dev/null
+++ b/drivers/net/wan/sbni.c
@@ -0,0 +1,1735 @@
+/* sbni.c: Granch SBNI12 leased line adapters driver for linux
+ *
+ * Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
+ *
+ * Previous versions were written by Yaroslav Polyakov,
+ * Alexey Zverev and Max Khon.
+ *
+ * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
+ * double-channel, PCI and ISA modifications.
+ * More info and useful utilities to work with SBNI12 cards you can find
+ * at http://www.granch.com (English) or http://www.granch.ru (Russian)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License.
+ *
+ *
+ * 5.0.1 Jun 22 2001
+ * - Fixed bug in probe
+ * 5.0.0 Jun 06 2001
+ * - Driver was completely redesigned by Denis I.Timofeev,
+ * - now PCI/Dual, ISA/Dual (with single interrupt line) models are
+ * - supported
+ * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000
+ * - PCI cards support
+ * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999
+ * - Completely rebuilt all the packet storage system
+ * - to work in Ethernet-like style.
+ * 3.1.1 just fixed some bugs (5 aug 1999)
+ * 3.1.0 added balancing feature (26 apr 1999)
+ * 3.0.1 just fixed some bugs (14 apr 1999).
+ * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999)
+ * - added pre-calculation for CRC, fixed bug with "len-2" frames,
+ * - removed outbound fragmentation (MTU=1000), written CRC-calculation
+ * - on asm, added work with hard_headers and now we have our own cache
+ * - for them, optionally supported word-interchange on some chipsets,
+ *
+ * Known problem: this driver wasn't tested on multiprocessor machine.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <net/arp.h>
+
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "sbni.h"
+
+/* device private data */
+
+struct net_local {
+ struct net_device_stats stats;
+ struct timer_list watchdog;
+
+ spinlock_t lock;
+ struct sk_buff *rx_buf_p; /* receive buffer ptr */
+ struct sk_buff *tx_buf_p; /* transmit buffer ptr */
+
+ unsigned int framelen; /* current frame length */
+ unsigned int maxframe; /* maximum valid frame length */
+ unsigned int state;
+ unsigned int inppos, outpos; /* positions in rx/tx buffers */
+
+ /* transmitting frame number - from frames qty to 1 */
+ unsigned int tx_frameno;
+
+ /* expected number of next receiving frame */
+ unsigned int wait_frameno;
+
+ /* count of failed attempts to frame send - 32 attempts do before
+ error - while receiver tunes on opposite side of wire */
+ unsigned int trans_errors;
+
+ /* idle time; send pong when limit exceeded */
+ unsigned int timer_ticks;
+
+ /* fields used for receive level autoselection */
+ int delta_rxl;
+ unsigned int cur_rxl_index, timeout_rxl;
+ unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
+
+ struct sbni_csr1 csr1; /* current value of CSR1 */
+ struct sbni_in_stats in_stats; /* internal statistics */
+
+ struct net_device *second; /* for ISA/dual cards */
+
+#ifdef CONFIG_SBNI_MULTILINE
+ struct net_device *master;
+ struct net_device *link;
+#endif
+};
+
+
+static int sbni_card_probe( unsigned long );
+static int sbni_pci_probe( struct net_device * );
+static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
+static int sbni_open( struct net_device * );
+static int sbni_close( struct net_device * );
+static int sbni_start_xmit( struct sk_buff *, struct net_device * );
+static int sbni_ioctl( struct net_device *, struct ifreq *, int );
+static struct net_device_stats *sbni_get_stats( struct net_device * );
+static void set_multicast_list( struct net_device * );
+
+static irqreturn_t sbni_interrupt( int, void *, struct pt_regs * );
+static void handle_channel( struct net_device * );
+static int recv_frame( struct net_device * );
+static void send_frame( struct net_device * );
+static int upload_data( struct net_device *,
+ unsigned, unsigned, unsigned, u32 );
+static void download_data( struct net_device *, u32 * );
+static void sbni_watchdog( unsigned long );
+static void interpret_ack( struct net_device *, unsigned );
+static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
+static void indicate_pkt( struct net_device * );
+static void card_start( struct net_device * );
+static void prepare_to_send( struct sk_buff *, struct net_device * );
+static void drop_xmit_queue( struct net_device * );
+static void send_frame_header( struct net_device *, u32 * );
+static int skip_tail( unsigned int, unsigned int, u32 );
+static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
+static void change_level( struct net_device * );
+static void timeout_change_level( struct net_device * );
+static u32 calc_crc32( u32, u8 *, u32 );
+static struct sk_buff * get_rx_buf( struct net_device * );
+static int sbni_init( struct net_device * );
+
+#ifdef CONFIG_SBNI_MULTILINE
+static int enslave( struct net_device *, struct net_device * );
+static int emancipate( struct net_device * );
+#endif
+
+#ifdef __i386__
+#define ASM_CRC 1
+#endif
+
+static const char version[] =
+ "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
+
+static int skip_pci_probe __initdata = 0;
+static int scandone __initdata = 0;
+static int num __initdata = 0;
+
+static unsigned char rxl_tab[];
+static u32 crc32tab[];
+
+/* A list of all installed devices, for removing the driver module. */
+static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
+
+/* Lists of device's parameters */
+static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
+ { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
+static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
+static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
+static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
+ { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
+static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
+
+#ifndef MODULE
+typedef u32 iarr[];
+static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac };
+#endif
+
+/* A zero-terminated list of I/O addresses to be probed on ISA bus */
+static unsigned int netcard_portlist[ ] __initdata = {
+ 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
+ 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
+ 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
+ 0 };
+
+
+/*
+ * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
+ * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
+ */
+
+static inline int __init
+sbni_isa_probe( struct net_device *dev )
+{
+ if( dev->base_addr > 0x1ff
+ && request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name )
+ && sbni_probe1( dev, dev->base_addr, dev->irq ) )
+
+ return 0;
+ else {
+ printk( KERN_ERR "sbni: base address 0x%lx is busy, or adapter "
+ "is malfunctional!\n", dev->base_addr );
+ return -ENODEV;
+ }
+}
+
+static void __init sbni_devsetup(struct net_device *dev)
+{
+ ether_setup( dev );
+ dev->open = &sbni_open;
+ dev->stop = &sbni_close;
+ dev->hard_start_xmit = &sbni_start_xmit;
+ dev->get_stats = &sbni_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &sbni_ioctl;
+
+ SET_MODULE_OWNER( dev );
+}
+
+int __init sbni_probe(int unit)
+{
+ struct net_device *dev;
+ static unsigned version_printed __initdata = 0;
+ int err;
+
+ dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup);
+ if (!dev)
+ return -ENOMEM;
+
+ sprintf(dev->name, "sbni%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = sbni_init(dev);
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ release_region( dev->base_addr, SBNI_IO_EXTENT );
+ free_netdev(dev);
+ return err;
+ }
+ if( version_printed++ == 0 )
+ printk( KERN_INFO "%s", version );
+ return 0;
+}
+
+static int __init sbni_init(struct net_device *dev)
+{
+ int i;
+ if( dev->base_addr )
+ return sbni_isa_probe( dev );
+ /* otherwise we have to perform search our adapter */
+
+ if( io[ num ] != -1 )
+ dev->base_addr = io[ num ],
+ dev->irq = irq[ num ];
+ else if( scandone || io[ 0 ] != -1 )
+ return -ENODEV;
+
+ /* if io[ num ] contains non-zero address, then that is on ISA bus */
+ if( dev->base_addr )
+ return sbni_isa_probe( dev );
+
+ /* ...otherwise - scan PCI first */
+ if( !skip_pci_probe && !sbni_pci_probe( dev ) )
+ return 0;
+
+ if( io[ num ] == -1 ) {
+ /* Auto-scan will be stopped when first ISA card were found */
+ scandone = 1;
+ if( num > 0 )
+ return -ENODEV;
+ }
+
+ for( i = 0; netcard_portlist[ i ]; ++i ) {
+ int ioaddr = netcard_portlist[ i ];
+ if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name )
+ && sbni_probe1( dev, ioaddr, 0 ))
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+
+int __init
+sbni_pci_probe( struct net_device *dev )
+{
+ struct pci_dev *pdev = NULL;
+
+ while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
+ != NULL ) {
+ int pci_irq_line;
+ unsigned long pci_ioaddr;
+ u16 subsys;
+
+ if( pdev->vendor != SBNI_PCI_VENDOR
+ && pdev->device != SBNI_PCI_DEVICE )
+ continue;
+
+ pci_ioaddr = pci_resource_start( pdev, 0 );
+ pci_irq_line = pdev->irq;
+
+ /* Avoid already found cards from previous calls */
+ if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
+ pci_read_config_word( pdev, PCI_SUBSYSTEM_ID, &subsys );
+
+ if (subsys != 2)
+ continue;
+
+ /* Dual adapter is present */
+ if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
+ dev->name ) )
+ continue;
+ }
+
+ if( pci_irq_line <= 0 || pci_irq_line >= NR_IRQS )
+ printk( KERN_WARNING " WARNING: The PCI BIOS assigned "
+ "this PCI card to IRQ %d, which is unlikely "
+ "to work!.\n"
+ KERN_WARNING " You should use the PCI BIOS "
+ "setup to assign a valid IRQ line.\n",
+ pci_irq_line );
+
+ /* avoiding re-enable dual adapters */
+ if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
+ release_region( pci_ioaddr, SBNI_IO_EXTENT );
+ pci_dev_put( pdev );
+ return -EIO;
+ }
+ if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ /* not the best thing to do, but this is all messed up
+ for hotplug systems anyway... */
+ pci_dev_put( pdev );
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+
+static struct net_device * __init
+sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
+{
+ struct net_local *nl;
+
+ if( sbni_card_probe( ioaddr ) ) {
+ release_region( ioaddr, SBNI_IO_EXTENT );
+ return NULL;
+ }
+
+ outb( 0, ioaddr + CSR0 );
+
+ if( irq < 2 ) {
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ outb( EN_INT | TR_REQ, ioaddr + CSR0 );
+ outb( PR_RES, ioaddr + CSR1 );
+ mdelay(50);
+ irq = probe_irq_off(irq_mask);
+ outb( 0, ioaddr + CSR0 );
+
+ if( !irq ) {
+ printk( KERN_ERR "%s: can't detect device irq!\n",
+ dev->name );
+ release_region( ioaddr, SBNI_IO_EXTENT );
+ return NULL;
+ }
+ } else if( irq == 2 )
+ irq = 9;
+
+ dev->irq = irq;
+ dev->base_addr = ioaddr;
+
+ /* Allocate dev->priv and fill in sbni-specific dev fields. */
+ nl = dev->priv;
+ if( !nl ) {
+ printk( KERN_ERR "%s: unable to get memory!\n", dev->name );
+ release_region( ioaddr, SBNI_IO_EXTENT );
+ return NULL;
+ }
+
+ dev->priv = nl;
+ memset( nl, 0, sizeof(struct net_local) );
+ spin_lock_init( &nl->lock );
+
+ /* store MAC address (generate if that isn't known) */
+ *(u16 *)dev->dev_addr = htons( 0x00ff );
+ *(u32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
+ ( (mac[num] ? mac[num] : (u32)((long)dev->priv)) & 0x00ffffff) );
+
+ /* store link settings (speed, receive level ) */
+ nl->maxframe = DEFAULT_FRAME_LEN;
+ nl->csr1.rate = baud[ num ];
+
+ if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
+ /* autotune rxl */
+ nl->cur_rxl_index = DEF_RXL,
+ nl->delta_rxl = DEF_RXL_DELTA;
+ else
+ nl->delta_rxl = 0;
+ nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
+ if( inb( ioaddr + CSR0 ) & 0x01 )
+ nl->state |= FL_SLOW_MODE;
+
+ printk( KERN_NOTICE "%s: ioaddr %#lx, irq %d, "
+ "MAC: 00:ff:01:%02x:%02x:%02x\n",
+ dev->name, dev->base_addr, dev->irq,
+ ((u8 *) dev->dev_addr) [3],
+ ((u8 *) dev->dev_addr) [4],
+ ((u8 *) dev->dev_addr) [5] );
+
+ printk( KERN_NOTICE "%s: speed %d, receive level ", dev->name,
+ ( (nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
+ / (1 << nl->csr1.rate) );
+
+ if( nl->delta_rxl == 0 )
+ printk( "0x%x (fixed)\n", nl->cur_rxl_index );
+ else
+ printk( "(auto)\n");
+
+#ifdef CONFIG_SBNI_MULTILINE
+ nl->master = dev;
+ nl->link = NULL;
+#endif
+
+ sbni_cards[ num++ ] = dev;
+ return dev;
+}
+
+/* -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_SBNI_MULTILINE
+
+static int
+sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
+{
+ struct net_device *p;
+
+ netif_stop_queue( dev );
+
+ /* Looking for idle device in the list */
+ for( p = dev; p; ) {
+ struct net_local *nl = (struct net_local *) p->priv;
+ spin_lock( &nl->lock );
+ if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
+ p = nl->link;
+ spin_unlock( &nl->lock );
+ } else {
+ /* Idle dev is found */
+ prepare_to_send( skb, p );
+ spin_unlock( &nl->lock );
+ netif_start_queue( dev );
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+#else /* CONFIG_SBNI_MULTILINE */
+
+static int
+sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ netif_stop_queue( dev );
+ spin_lock( &nl->lock );
+
+ prepare_to_send( skb, dev );
+
+ spin_unlock( &nl->lock );
+ return 0;
+}
+
+#endif /* CONFIG_SBNI_MULTILINE */
+
+/* -------------------------------------------------------------------------- */
+
+/* interrupt handler */
+
+/*
+ * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
+ * be looked as two independent single-channel devices. Every channel seems
+ * as Ethernet interface but interrupt handler must be common. Really, first
+ * channel ("master") driver only registers the handler. In its struct net_local
+ * it has got pointer to "slave" channel's struct net_local and handles that's
+ * interrupts too.
+ * dev of successfully attached ISA SBNI boards is linked to list.
+ * While next board driver is initialized, it scans this list. If one
+ * has found dev with same irq and ioaddr different by 4 then it assumes
+ * this board to be "master".
+ */
+
+static irqreturn_t
+sbni_interrupt( int irq, void *dev_id, struct pt_regs *regs )
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct net_local *nl = (struct net_local *) dev->priv;
+ int repeat;
+
+ spin_lock( &nl->lock );
+ if( nl->second )
+ spin_lock( &((struct net_local *) nl->second->priv)->lock );
+
+ do {
+ repeat = 0;
+ if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
+ handle_channel( dev ),
+ repeat = 1;
+ if( nl->second && /* second channel present */
+ (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
+ handle_channel( nl->second ),
+ repeat = 1;
+ } while( repeat );
+
+ if( nl->second )
+ spin_unlock( &((struct net_local *)nl->second->priv)->lock );
+ spin_unlock( &nl->lock );
+ return IRQ_HANDLED;
+}
+
+
+static void
+handle_channel( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+
+ int req_ans;
+ unsigned char csr0;
+
+#ifdef CONFIG_SBNI_MULTILINE
+ /* Lock the master device because we going to change its local data */
+ if( nl->state & FL_SLAVE )
+ spin_lock( &((struct net_local *) nl->master->priv)->lock );
+#endif
+
+ outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
+
+ nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
+ for(;;) {
+ csr0 = inb( ioaddr + CSR0 );
+ if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
+ break;
+
+ req_ans = !(nl->state & FL_PREV_OK);
+
+ if( csr0 & RC_RDY )
+ req_ans = recv_frame( dev );
+
+ /*
+ * TR_RDY always equals 1 here because we have owned the marker,
+ * and we set TR_REQ when disabled interrupts
+ */
+ csr0 = inb( ioaddr + CSR0 );
+ if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
+ printk( KERN_ERR "%s: internal error!\n", dev->name );
+
+ /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
+ if( req_ans || nl->tx_frameno != 0 )
+ send_frame( dev );
+ else
+ /* send marker without any data */
+ outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
+ }
+
+ outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
+
+#ifdef CONFIG_SBNI_MULTILINE
+ if( nl->state & FL_SLAVE )
+ spin_unlock( &((struct net_local *) nl->master->priv)->lock );
+#endif
+}
+
+
+/*
+ * Routine returns 1 if it need to acknoweledge received frame.
+ * Empty frame received without errors won't be acknoweledged.
+ */
+
+static int
+recv_frame( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+
+ u32 crc = CRC32_INITIAL;
+
+ unsigned framelen, frameno, ack;
+ unsigned is_first, frame_ok;
+
+ if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
+ frame_ok = framelen > 4
+ ? upload_data( dev, framelen, frameno, is_first, crc )
+ : skip_tail( ioaddr, framelen, crc );
+ if( frame_ok )
+ interpret_ack( dev, ack );
+ } else
+ frame_ok = 0;
+
+ outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
+ if( frame_ok ) {
+ nl->state |= FL_PREV_OK;
+ if( framelen > 4 )
+ nl->in_stats.all_rx_number++;
+ } else
+ nl->state &= ~FL_PREV_OK,
+ change_level( dev ),
+ nl->in_stats.all_rx_number++,
+ nl->in_stats.bad_rx_number++;
+
+ return !frame_ok || framelen > 4;
+}
+
+
+static void
+send_frame( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ u32 crc = CRC32_INITIAL;
+
+ if( nl->state & FL_NEED_RESEND ) {
+
+ /* if frame was sended but not ACK'ed - resend it */
+ if( nl->trans_errors ) {
+ --nl->trans_errors;
+ if( nl->framelen != 0 )
+ nl->in_stats.resend_tx_number++;
+ } else {
+ /* cannot xmit with many attempts */
+#ifdef CONFIG_SBNI_MULTILINE
+ if( (nl->state & FL_SLAVE) || nl->link )
+#endif
+ nl->state |= FL_LINE_DOWN;
+ drop_xmit_queue( dev );
+ goto do_send;
+ }
+ } else
+ nl->trans_errors = TR_ERROR_COUNT;
+
+ send_frame_header( dev, &crc );
+ nl->state |= FL_NEED_RESEND;
+ /*
+ * FL_NEED_RESEND will be cleared after ACK, but if empty
+ * frame sended then in prepare_to_send next frame
+ */
+
+
+ if( nl->framelen ) {
+ download_data( dev, &crc );
+ nl->in_stats.all_tx_number++;
+ nl->state |= FL_WAIT_ACK;
+ }
+
+ outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
+
+do_send:
+ outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
+
+ if( nl->tx_frameno )
+ /* next frame exists - we request card to send it */
+ outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
+ dev->base_addr + CSR0 );
+}
+
+
+/*
+ * Write the frame data into adapter's buffer memory, and calculate CRC.
+ * Do padding if necessary.
+ */
+
+static void
+download_data( struct net_device *dev, u32 *crc_p )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+ struct sk_buff *skb = nl->tx_buf_p;
+
+ unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
+
+ outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
+ *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
+
+ /* if packet too short we should write some more bytes to pad */
+ for( len = nl->framelen - len; len--; )
+ outb( 0, dev->base_addr + DAT ),
+ *crc_p = CRC32( 0, *crc_p );
+}
+
+
+static int
+upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
+ unsigned is_first, u32 crc )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ int frame_ok;
+
+ if( is_first )
+ nl->wait_frameno = frameno,
+ nl->inppos = 0;
+
+ if( nl->wait_frameno == frameno ) {
+
+ if( nl->inppos + framelen <= ETHER_MAX_LEN )
+ frame_ok = append_frame_to_pkt( dev, framelen, crc );
+
+ /*
+ * if CRC is right but framelen incorrect then transmitter
+ * error was occurred... drop entire packet
+ */
+ else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
+ != 0 )
+ nl->wait_frameno = 0,
+ nl->inppos = 0,
+#ifdef CONFIG_SBNI_MULTILINE
+ ((struct net_local *) nl->master->priv)
+ ->stats.rx_errors++,
+ ((struct net_local *) nl->master->priv)
+ ->stats.rx_missed_errors++;
+#else
+ nl->stats.rx_errors++,
+ nl->stats.rx_missed_errors++;
+#endif
+ /* now skip all frames until is_first != 0 */
+ } else
+ frame_ok = skip_tail( dev->base_addr, framelen, crc );
+
+ if( is_first && !frame_ok )
+ /*
+ * Frame has been broken, but we had already stored
+ * is_first... Drop entire packet.
+ */
+ nl->wait_frameno = 0,
+#ifdef CONFIG_SBNI_MULTILINE
+ ((struct net_local *) nl->master->priv)->stats.rx_errors++,
+ ((struct net_local *) nl->master->priv)->stats.rx_crc_errors++;
+#else
+ nl->stats.rx_errors++,
+ nl->stats.rx_crc_errors++;
+#endif
+
+ return frame_ok;
+}
+
+
+static __inline void
+send_complete( struct net_local *nl )
+{
+#ifdef CONFIG_SBNI_MULTILINE
+ ((struct net_local *) nl->master->priv)->stats.tx_packets++;
+ ((struct net_local *) nl->master->priv)->stats.tx_bytes
+ += nl->tx_buf_p->len;
+#else
+ nl->stats.tx_packets++;
+ nl->stats.tx_bytes += nl->tx_buf_p->len;
+#endif
+ dev_kfree_skb_irq( nl->tx_buf_p );
+
+ nl->tx_buf_p = NULL;
+
+ nl->outpos = 0;
+ nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+ nl->framelen = 0;
+}
+
+
+static void
+interpret_ack( struct net_device *dev, unsigned ack )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ if( ack == FRAME_SENT_OK ) {
+ nl->state &= ~FL_NEED_RESEND;
+
+ if( nl->state & FL_WAIT_ACK ) {
+ nl->outpos += nl->framelen;
+
+ if( --nl->tx_frameno )
+ nl->framelen = min_t(unsigned int,
+ nl->maxframe,
+ nl->tx_buf_p->len - nl->outpos);
+ else
+ send_complete( nl ),
+#ifdef CONFIG_SBNI_MULTILINE
+ netif_wake_queue( nl->master );
+#else
+ netif_wake_queue( dev );
+#endif
+ }
+ }
+
+ nl->state &= ~FL_WAIT_ACK;
+}
+
+
+/*
+ * Glue received frame with previous fragments of packet.
+ * Indicate packet when last frame would be accepted.
+ */
+
+static int
+append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ u8 *p;
+
+ if( nl->inppos + framelen > ETHER_MAX_LEN )
+ return 0;
+
+ if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
+ return 0;
+
+ p = nl->rx_buf_p->data + nl->inppos;
+ insb( dev->base_addr + DAT, p, framelen );
+ if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
+ return 0;
+
+ nl->inppos += framelen - 4;
+ if( --nl->wait_frameno == 0 ) /* last frame received */
+ indicate_pkt( dev );
+
+ return 1;
+}
+
+
+/*
+ * Prepare to start output on adapter.
+ * Transmitter will be actually activated when marker is accepted.
+ */
+
+static void
+prepare_to_send( struct sk_buff *skb, struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ unsigned int len;
+
+ /* nl->tx_buf_p == NULL here! */
+ if( nl->tx_buf_p )
+ printk( KERN_ERR "%s: memory leak!\n", dev->name );
+
+ nl->outpos = 0;
+ nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+
+ len = skb->len;
+ if( len < SBNI_MIN_LEN )
+ len = SBNI_MIN_LEN;
+
+ nl->tx_buf_p = skb;
+ nl->tx_frameno = (len + nl->maxframe - 1) / nl->maxframe;
+ nl->framelen = len < nl->maxframe ? len : nl->maxframe;
+
+ outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
+#ifdef CONFIG_SBNI_MULTILINE
+ nl->master->trans_start = jiffies;
+#else
+ dev->trans_start = jiffies;
+#endif
+}
+
+
+static void
+drop_xmit_queue( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ if( nl->tx_buf_p )
+ dev_kfree_skb_any( nl->tx_buf_p ),
+ nl->tx_buf_p = NULL,
+#ifdef CONFIG_SBNI_MULTILINE
+ ((struct net_local *) nl->master->priv)
+ ->stats.tx_errors++,
+ ((struct net_local *) nl->master->priv)
+ ->stats.tx_carrier_errors++;
+#else
+ nl->stats.tx_errors++,
+ nl->stats.tx_carrier_errors++;
+#endif
+
+ nl->tx_frameno = 0;
+ nl->framelen = 0;
+ nl->outpos = 0;
+ nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+#ifdef CONFIG_SBNI_MULTILINE
+ netif_start_queue( nl->master );
+ nl->master->trans_start = jiffies;
+#else
+ netif_start_queue( dev );
+ dev->trans_start = jiffies;
+#endif
+}
+
+
+static void
+send_frame_header( struct net_device *dev, u32 *crc_p )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ u32 crc = *crc_p;
+ u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
+ u8 value;
+
+ if( nl->state & FL_NEED_RESEND )
+ len_field |= FRAME_RETRY; /* non-first attempt... */
+
+ if( nl->outpos == 0 )
+ len_field |= FRAME_FIRST;
+
+ len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
+ outb( SBNI_SIG, dev->base_addr + DAT );
+
+ value = (u8) len_field;
+ outb( value, dev->base_addr + DAT );
+ crc = CRC32( value, crc );
+ value = (u8) (len_field >> 8);
+ outb( value, dev->base_addr + DAT );
+ crc = CRC32( value, crc );
+
+ outb( nl->tx_frameno, dev->base_addr + DAT );
+ crc = CRC32( nl->tx_frameno, crc );
+ outb( 0, dev->base_addr + DAT );
+ crc = CRC32( 0, crc );
+ *crc_p = crc;
+}
+
+
+/*
+ * if frame tail not needed (incorrect number or received twice),
+ * it won't store, but CRC will be calculated
+ */
+
+static int
+skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
+{
+ while( tail_len-- )
+ crc = CRC32( inb( ioaddr + DAT ), crc );
+
+ return crc == CRC32_REMAINDER;
+}
+
+
+/*
+ * Preliminary checks if frame header is correct, calculates its CRC
+ * and split it to simple fields
+ */
+
+static int
+check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
+ u32 *is_first, u32 *crc_p )
+{
+ u32 crc = *crc_p;
+ u8 value;
+
+ if( inb( ioaddr + DAT ) != SBNI_SIG )
+ return 0;
+
+ value = inb( ioaddr + DAT );
+ *framelen = (u32)value;
+ crc = CRC32( value, crc );
+ value = inb( ioaddr + DAT );
+ *framelen |= ((u32)value) << 8;
+ crc = CRC32( value, crc );
+
+ *ack = *framelen & FRAME_ACK_MASK;
+ *is_first = (*framelen & FRAME_FIRST) != 0;
+
+ if( (*framelen &= FRAME_LEN_MASK) < 6
+ || *framelen > SBNI_MAX_FRAME - 3 )
+ return 0;
+
+ value = inb( ioaddr + DAT );
+ *frameno = (u32)value;
+ crc = CRC32( value, crc );
+
+ crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */
+ *framelen -= 2;
+
+ *crc_p = crc;
+ return 1;
+}
+
+
+static struct sk_buff *
+get_rx_buf( struct net_device *dev )
+{
+ /* +2 is to compensate for the alignment fixup below */
+ struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
+ if( !skb )
+ return NULL;
+
+#ifdef CONFIG_SBNI_MULTILINE
+ skb->dev = ((struct net_local *) dev->priv)->master;
+#else
+ skb->dev = dev;
+#endif
+ skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
+ return skb;
+}
+
+
+static void
+indicate_pkt( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+ struct sk_buff *skb = nl->rx_buf_p;
+
+ skb_put( skb, nl->inppos );
+
+#ifdef CONFIG_SBNI_MULTILINE
+ skb->protocol = eth_type_trans( skb, nl->master );
+ netif_rx( skb );
+ dev->last_rx = jiffies;
+ ++((struct net_local *) nl->master->priv)->stats.rx_packets;
+ ((struct net_local *) nl->master->priv)->stats.rx_bytes += nl->inppos;
+#else
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->last_rx = jiffies;
+ ++nl->stats.rx_packets;
+ nl->stats.rx_bytes += nl->inppos;
+#endif
+ nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */
+}
+
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Routine checks periodically wire activity and regenerates marker if
+ * connect was inactive for a long time.
+ */
+
+static void
+sbni_watchdog( unsigned long arg )
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct net_local *nl = (struct net_local *) dev->priv;
+ struct timer_list *w = &nl->watchdog;
+ unsigned long flags;
+ unsigned char csr0;
+
+ spin_lock_irqsave( &nl->lock, flags );
+
+ csr0 = inb( dev->base_addr + CSR0 );
+ if( csr0 & RC_CHK ) {
+
+ if( nl->timer_ticks ) {
+ if( csr0 & (RC_RDY | BU_EMP) )
+ /* receiving not active */
+ nl->timer_ticks--;
+ } else {
+ nl->in_stats.timeout_number++;
+ if( nl->delta_rxl )
+ timeout_change_level( dev );
+
+ outb( *(u_char *)&nl->csr1 | PR_RES,
+ dev->base_addr + CSR1 );
+ csr0 = inb( dev->base_addr + CSR0 );
+ }
+ } else
+ nl->state &= ~FL_LINE_DOWN;
+
+ outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
+
+ init_timer( w );
+ w->expires = jiffies + SBNI_TIMEOUT;
+ w->data = arg;
+ w->function = sbni_watchdog;
+ add_timer( w );
+
+ spin_unlock_irqrestore( &nl->lock, flags );
+}
+
+
+static unsigned char rxl_tab[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
+ 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
+};
+
+#define SIZE_OF_TIMEOUT_RXL_TAB 4
+static unsigned char timeout_rxl_tab[] = {
+ 0x03, 0x05, 0x08, 0x0b
+};
+
+/* -------------------------------------------------------------------------- */
+
+static void
+card_start( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
+ nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+ nl->state |= FL_PREV_OK;
+
+ nl->inppos = nl->outpos = 0;
+ nl->wait_frameno = 0;
+ nl->tx_frameno = 0;
+ nl->framelen = 0;
+
+ outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
+ outb( EN_INT, dev->base_addr + CSR0 );
+}
+
+/* -------------------------------------------------------------------------- */
+
+/* Receive level auto-selection */
+
+static void
+change_level( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
+ return;
+
+ if( nl->cur_rxl_index == 0 )
+ nl->delta_rxl = 1;
+ else if( nl->cur_rxl_index == 15 )
+ nl->delta_rxl = -1;
+ else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
+ nl->delta_rxl = -nl->delta_rxl;
+
+ nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
+ inb( dev->base_addr + CSR0 ); /* needs for PCI cards */
+ outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
+
+ nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
+ nl->cur_rxl_rcvd = 0;
+}
+
+
+static void
+timeout_change_level( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
+ if( ++nl->timeout_rxl >= 4 )
+ nl->timeout_rxl = 0;
+
+ nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
+ inb( dev->base_addr + CSR0 );
+ outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
+
+ nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
+ nl->cur_rxl_rcvd = 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Open/initialize the board.
+ */
+
+static int
+sbni_open( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+ struct timer_list *w = &nl->watchdog;
+
+ /*
+ * For double ISA adapters within "common irq" mode, we have to
+ * determine whether primary or secondary channel is initialized,
+ * and set the irq handler only in first case.
+ */
+ if( dev->base_addr < 0x400 ) { /* ISA only */
+ struct net_device **p = sbni_cards;
+ for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
+ if( (*p)->irq == dev->irq
+ && ((*p)->base_addr == dev->base_addr + 4
+ || (*p)->base_addr == dev->base_addr - 4)
+ && (*p)->flags & IFF_UP ) {
+
+ ((struct net_local *) ((*p)->priv))
+ ->second = dev;
+ printk( KERN_NOTICE "%s: using shared irq "
+ "with %s\n", dev->name, (*p)->name );
+ nl->state |= FL_SECONDARY;
+ goto handler_attached;
+ }
+ }
+
+ if( request_irq(dev->irq, sbni_interrupt, SA_SHIRQ, dev->name, dev) ) {
+ printk( KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq );
+ return -EAGAIN;
+ }
+
+handler_attached:
+
+ spin_lock( &nl->lock );
+ memset( &nl->stats, 0, sizeof(struct net_device_stats) );
+ memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
+
+ card_start( dev );
+
+ netif_start_queue( dev );
+
+ /* set timer watchdog */
+ init_timer( w );
+ w->expires = jiffies + SBNI_TIMEOUT;
+ w->data = (unsigned long) dev;
+ w->function = sbni_watchdog;
+ add_timer( w );
+
+ spin_unlock( &nl->lock );
+ return 0;
+}
+
+
+static int
+sbni_close( struct net_device *dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+
+ if( nl->second && nl->second->flags & IFF_UP ) {
+ printk( KERN_NOTICE "Secondary channel (%s) is active!\n",
+ nl->second->name );
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_SBNI_MULTILINE
+ if( nl->state & FL_SLAVE )
+ emancipate( dev );
+ else
+ while( nl->link ) /* it's master device! */
+ emancipate( nl->link );
+#endif
+
+ spin_lock( &nl->lock );
+
+ nl->second = NULL;
+ drop_xmit_queue( dev );
+ netif_stop_queue( dev );
+
+ del_timer( &nl->watchdog );
+
+ outb( 0, dev->base_addr + CSR0 );
+
+ if( !(nl->state & FL_SECONDARY) )
+ free_irq( dev->irq, dev );
+ nl->state &= FL_SECONDARY;
+
+ spin_unlock( &nl->lock );
+ return 0;
+}
+
+
+/*
+ Valid combinations in CSR0 (for probing):
+
+ VALID_DECODER 0000,0011,1011,1010
+
+ ; 0 ; -
+ TR_REQ ; 1 ; +
+ TR_RDY ; 2 ; -
+ TR_RDY TR_REQ ; 3 ; +
+ BU_EMP ; 4 ; +
+ BU_EMP TR_REQ ; 5 ; +
+ BU_EMP TR_RDY ; 6 ; -
+ BU_EMP TR_RDY TR_REQ ; 7 ; +
+ RC_RDY ; 8 ; +
+ RC_RDY TR_REQ ; 9 ; +
+ RC_RDY TR_RDY ; 10 ; -
+ RC_RDY TR_RDY TR_REQ ; 11 ; -
+ RC_RDY BU_EMP ; 12 ; -
+ RC_RDY BU_EMP TR_REQ ; 13 ; -
+ RC_RDY BU_EMP TR_RDY ; 14 ; -
+ RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
+*/
+
+#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
+
+
+static int
+sbni_card_probe( unsigned long ioaddr )
+{
+ unsigned char csr0;
+
+ csr0 = inb( ioaddr + CSR0 );
+ if( csr0 != 0xff && csr0 != 0x00 ) {
+ csr0 &= ~EN_INT;
+ if( csr0 & BU_EMP )
+ csr0 |= EN_INT;
+
+ if( VALID_DECODER & (1 << (csr0 >> 4)) )
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int
+sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+ struct sbni_flags flags;
+ int error = 0;
+
+#ifdef CONFIG_SBNI_MULTILINE
+ struct net_device *slave_dev;
+ char slave_name[ 8 ];
+#endif
+
+ switch( cmd ) {
+ case SIOCDEVGETINSTATS :
+ if (copy_to_user( ifr->ifr_data, &nl->in_stats,
+ sizeof(struct sbni_in_stats) ))
+ error = -EFAULT;
+ break;
+
+ case SIOCDEVRESINSTATS :
+ if( current->euid != 0 ) /* root only */
+ return -EPERM;
+ memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
+ break;
+
+ case SIOCDEVGHWSTATE :
+ flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
+ flags.rate = nl->csr1.rate;
+ flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
+ flags.rxl = nl->cur_rxl_index;
+ flags.fixed_rxl = nl->delta_rxl == 0;
+
+ if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
+ error = -EFAULT;
+ break;
+
+ case SIOCDEVSHWSTATE :
+ if( current->euid != 0 ) /* root only */
+ return -EPERM;
+
+ spin_lock( &nl->lock );
+ flags = *(struct sbni_flags*) &ifr->ifr_ifru;
+ if( flags.fixed_rxl )
+ nl->delta_rxl = 0,
+ nl->cur_rxl_index = flags.rxl;
+ else
+ nl->delta_rxl = DEF_RXL_DELTA,
+ nl->cur_rxl_index = DEF_RXL;
+
+ nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
+ nl->csr1.rate = flags.rate;
+ outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
+ spin_unlock( &nl->lock );
+ break;
+
+#ifdef CONFIG_SBNI_MULTILINE
+
+ case SIOCDEVENSLAVE :
+ if( current->euid != 0 ) /* root only */
+ return -EPERM;
+
+ if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
+ return -EFAULT;
+ slave_dev = dev_get_by_name( slave_name );
+ if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
+ printk( KERN_ERR "%s: trying to enslave non-active "
+ "device %s\n", dev->name, slave_name );
+ return -EPERM;
+ }
+
+ return enslave( dev, slave_dev );
+
+ case SIOCDEVEMANSIPATE :
+ if( current->euid != 0 ) /* root only */
+ return -EPERM;
+
+ return emancipate( dev );
+
+#endif /* CONFIG_SBNI_MULTILINE */
+
+ default :
+ return -EOPNOTSUPP;
+ }
+
+ return error;
+}
+
+
+#ifdef CONFIG_SBNI_MULTILINE
+
+static int
+enslave( struct net_device *dev, struct net_device *slave_dev )
+{
+ struct net_local *nl = (struct net_local *) dev->priv;
+ struct net_local *snl = (struct net_local *) slave_dev->priv;
+
+ if( nl->state & FL_SLAVE ) /* This isn't master or free device */
+ return -EBUSY;
+
+ if( snl->state & FL_SLAVE ) /* That was already enslaved */
+ return -EBUSY;
+
+ spin_lock( &nl->lock );
+ spin_lock( &snl->lock );
+
+ /* append to list */
+ snl->link = nl->link;
+ nl->link = slave_dev;
+ snl->master = dev;
+ snl->state |= FL_SLAVE;
+
+ /* Summary statistics of MultiLine operation will be stored
+ in master's counters */
+ memset( &snl->stats, 0, sizeof(struct net_device_stats) );
+ netif_stop_queue( slave_dev );
+ netif_wake_queue( dev ); /* Now we are able to transmit */
+
+ spin_unlock( &snl->lock );
+ spin_unlock( &nl->lock );
+ printk( KERN_NOTICE "%s: slave device (%s) attached.\n",
+ dev->name, slave_dev->name );
+ return 0;
+}
+
+
+static int
+emancipate( struct net_device *dev )
+{
+ struct net_local *snl = (struct net_local *) dev->priv;
+ struct net_device *p = snl->master;
+ struct net_local *nl = (struct net_local *) p->priv;
+
+ if( !(snl->state & FL_SLAVE) )
+ return -EINVAL;
+
+ spin_lock( &nl->lock );
+ spin_lock( &snl->lock );
+ drop_xmit_queue( dev );
+
+ /* exclude from list */
+ for(;;) { /* must be in list */
+ struct net_local *t = (struct net_local *) p->priv;
+ if( t->link == dev ) {
+ t->link = snl->link;
+ break;
+ }
+ p = t->link;
+ }
+
+ snl->link = NULL;
+ snl->master = dev;
+ snl->state &= ~FL_SLAVE;
+
+ netif_start_queue( dev );
+
+ spin_unlock( &snl->lock );
+ spin_unlock( &nl->lock );
+
+ dev_put( dev );
+ return 0;
+}
+
+#endif
+
+
+static struct net_device_stats *
+sbni_get_stats( struct net_device *dev )
+{
+ return &((struct net_local *) dev->priv)->stats;
+}
+
+
+static void
+set_multicast_list( struct net_device *dev )
+{
+ return; /* sbni always operate in promiscuos mode */
+}
+
+
+#ifdef MODULE
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(baud, int, NULL, 0);
+module_param_array(rxl, int, NULL, 0);
+module_param_array(mac, int, NULL, 0);
+module_param(skip_pci_probe, bool, 0);
+
+MODULE_LICENSE("GPL");
+
+
+int
+init_module( void )
+{
+ struct net_device *dev;
+ int err;
+
+ while( num < SBNI_MAX_NUM_CARDS ) {
+ dev = alloc_netdev(sizeof(struct net_local),
+ "sbni%d", sbni_devsetup);
+ if( !dev)
+ break;
+
+ sprintf( dev->name, "sbni%d", num );
+
+ err = sbni_init(dev);
+ if (err) {
+ free_netdev(dev);
+ break;
+ }
+
+ if( register_netdev( dev ) ) {
+ release_region( dev->base_addr, SBNI_IO_EXTENT );
+ free_netdev( dev );
+ break;
+ }
+ }
+
+ return *sbni_cards ? 0 : -ENODEV;
+}
+
+void
+cleanup_module( void )
+{
+ struct net_device *dev;
+ int num;
+
+ for( num = 0; num < SBNI_MAX_NUM_CARDS; ++num )
+ if( (dev = sbni_cards[ num ]) != NULL ) {
+ unregister_netdev( dev );
+ release_region( dev->base_addr, SBNI_IO_EXTENT );
+ free_netdev( dev );
+ }
+}
+
+#else /* MODULE */
+
+static int __init
+sbni_setup( char *p )
+{
+ int n, parm;
+
+ if( *p++ != '(' )
+ goto bad_param;
+
+ for( n = 0, parm = 0; *p && n < 8; ) {
+ (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
+ if( !*p || *p == ')' )
+ return 1;
+ if( *p == ';' )
+ ++p, ++n, parm = 0;
+ else if( *p++ != ',' )
+ break;
+ else
+ if( ++parm >= 5 )
+ break;
+ }
+bad_param:
+ printk( KERN_ERR "Error in sbni kernel parameter!\n" );
+ return 0;
+}
+
+__setup( "sbni=", sbni_setup );
+
+#endif /* MODULE */
+
+/* -------------------------------------------------------------------------- */
+
+#ifdef ASM_CRC
+
+static u32
+calc_crc32( u32 crc, u8 *p, u32 len )
+{
+ register u32 _crc;
+ _crc = crc;
+
+ __asm__ __volatile__ (
+ "xorl %%ebx, %%ebx\n"
+ "movl %2, %%esi\n"
+ "movl %3, %%ecx\n"
+ "movl $crc32tab, %%edi\n"
+ "shrl $2, %%ecx\n"
+ "jz 1f\n"
+
+ ".align 4\n"
+ "0:\n"
+ "movb %%al, %%bl\n"
+ "movl (%%esi), %%edx\n"
+ "shrl $8, %%eax\n"
+ "xorb %%dl, %%bl\n"
+ "shrl $8, %%edx\n"
+ "xorl (%%edi,%%ebx,4), %%eax\n"
+
+ "movb %%al, %%bl\n"
+ "shrl $8, %%eax\n"
+ "xorb %%dl, %%bl\n"
+ "shrl $8, %%edx\n"
+ "xorl (%%edi,%%ebx,4), %%eax\n"
+
+ "movb %%al, %%bl\n"
+ "shrl $8, %%eax\n"
+ "xorb %%dl, %%bl\n"
+ "movb %%dh, %%dl\n"
+ "xorl (%%edi,%%ebx,4), %%eax\n"
+
+ "movb %%al, %%bl\n"
+ "shrl $8, %%eax\n"
+ "xorb %%dl, %%bl\n"
+ "addl $4, %%esi\n"
+ "xorl (%%edi,%%ebx,4), %%eax\n"
+
+ "decl %%ecx\n"
+ "jnz 0b\n"
+
+ "1:\n"
+ "movl %3, %%ecx\n"
+ "andl $3, %%ecx\n"
+ "jz 2f\n"
+
+ "movb %%al, %%bl\n"
+ "shrl $8, %%eax\n"
+ "xorb (%%esi), %%bl\n"
+ "xorl (%%edi,%%ebx,4), %%eax\n"
+
+ "decl %%ecx\n"
+ "jz 2f\n"
+
+ "movb %%al, %%bl\n"
+ "shrl $8, %%eax\n"
+ "xorb 1(%%esi), %%bl\n"
+ "xorl (%%edi,%%ebx,4), %%eax\n"
+
+ "decl %%ecx\n"
+ "jz 2f\n"
+
+ "movb %%al, %%bl\n"
+ "shrl $8, %%eax\n"
+ "xorb 2(%%esi), %%bl\n"
+ "xorl (%%edi,%%ebx,4), %%eax\n"
+ "2:\n"
+ : "=a" (_crc)
+ : "0" (_crc), "g" (p), "g" (len)
+ : "bx", "cx", "dx", "si", "di"
+ );
+
+ return _crc;
+}
+
+#else /* ASM_CRC */
+
+static u32
+calc_crc32( u32 crc, u8 *p, u32 len )
+{
+ while( len-- )
+ crc = CRC32( *p++, crc );
+
+ return crc;
+}
+
+#endif /* ASM_CRC */
+
+
+static u32 crc32tab[] __attribute__ ((aligned(8))) = {
+ 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
+ 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
+ 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
+ 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
+ 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
+ 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
+ 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
+ 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
+ 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
+ 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
+ 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
+ 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
+ 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
+ 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
+ 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
+ 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
+ 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
+ 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
+ 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
+ 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
+ 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
+ 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
+ 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
+ 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
+ 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
+ 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
+ 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
+ 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
+ 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
+ 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
+ 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
+ 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
+ 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
+ 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
+ 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
+ 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
+ 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
+ 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
+ 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
+ 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
+ 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
+ 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
+ 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
+ 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
+ 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
+ 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
+ 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
+ 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
+ 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
+ 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
+ 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
+ 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
+ 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
+ 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
+ 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
+ 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
+ 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
+ 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
+ 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
+ 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
+ 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
+ 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
+ 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
+ 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
+};
+
diff --git a/drivers/net/wan/sbni.h b/drivers/net/wan/sbni.h
new file mode 100644
index 000000000000..27715e70f28b
--- /dev/null
+++ b/drivers/net/wan/sbni.h
@@ -0,0 +1,141 @@
+/* sbni.h: definitions for a Granch SBNI12 driver, version 5.0.0
+ * Written 2001 Denis I.Timofeev (timofeev@granch.ru)
+ * This file is distributed under the GNU GPL
+ */
+
+#ifndef SBNI_H
+#define SBNI_H
+
+#ifdef SBNI_DEBUG
+#define DP( A ) A
+#else
+#define DP( A )
+#endif
+
+
+/* We don't have official vendor id yet... */
+#define SBNI_PCI_VENDOR 0x55
+#define SBNI_PCI_DEVICE 0x9f
+
+#define ISA_MODE 0x00
+#define PCI_MODE 0x01
+
+#define SBNI_IO_EXTENT 4
+
+enum sbni_reg {
+ CSR0 = 0,
+ CSR1 = 1,
+ DAT = 2
+};
+
+/* CSR0 mapping */
+enum {
+ BU_EMP = 0x02,
+ RC_CHK = 0x04,
+ CT_ZER = 0x08,
+ TR_REQ = 0x10,
+ TR_RDY = 0x20,
+ EN_INT = 0x40,
+ RC_RDY = 0x80
+};
+
+
+/* CSR1 mapping */
+#define PR_RES 0x80
+
+struct sbni_csr1 {
+ unsigned rxl : 5;
+ unsigned rate : 2;
+ unsigned : 1;
+};
+
+/* fields in frame header */
+#define FRAME_ACK_MASK (unsigned short)0x7000
+#define FRAME_LEN_MASK (unsigned short)0x03FF
+#define FRAME_FIRST (unsigned short)0x8000
+#define FRAME_RETRY (unsigned short)0x0800
+
+#define FRAME_SENT_BAD (unsigned short)0x4000
+#define FRAME_SENT_OK (unsigned short)0x3000
+
+
+/* state flags */
+enum {
+ FL_WAIT_ACK = 0x01,
+ FL_NEED_RESEND = 0x02,
+ FL_PREV_OK = 0x04,
+ FL_SLOW_MODE = 0x08,
+ FL_SECONDARY = 0x10,
+#ifdef CONFIG_SBNI_MULTILINE
+ FL_SLAVE = 0x20,
+#endif
+ FL_LINE_DOWN = 0x40
+};
+
+
+enum {
+ DEFAULT_IOBASEADDR = 0x210,
+ DEFAULT_INTERRUPTNUMBER = 5,
+ DEFAULT_RATE = 0,
+ DEFAULT_FRAME_LEN = 1012
+};
+
+#define DEF_RXL_DELTA -1
+#define DEF_RXL 0xf
+
+#define SBNI_SIG 0x5a
+
+#define SBNI_MIN_LEN 60 /* Shortest Ethernet frame without FCS */
+#define SBNI_MAX_FRAME 1023
+#define ETHER_MAX_LEN 1518
+
+#define SBNI_TIMEOUT (HZ/10)
+
+#define TR_ERROR_COUNT 32
+#define CHANGE_LEVEL_START_TICKS 4
+
+#define SBNI_MAX_NUM_CARDS 16
+
+/* internal SBNI-specific statistics */
+struct sbni_in_stats {
+ u32 all_rx_number;
+ u32 bad_rx_number;
+ u32 timeout_number;
+ u32 all_tx_number;
+ u32 resend_tx_number;
+};
+
+/* SBNI ioctl params */
+#define SIOCDEVGETINSTATS SIOCDEVPRIVATE
+#define SIOCDEVRESINSTATS SIOCDEVPRIVATE+1
+#define SIOCDEVGHWSTATE SIOCDEVPRIVATE+2
+#define SIOCDEVSHWSTATE SIOCDEVPRIVATE+3
+#define SIOCDEVENSLAVE SIOCDEVPRIVATE+4
+#define SIOCDEVEMANSIPATE SIOCDEVPRIVATE+5
+
+
+/* data packet for SIOCDEVGHWSTATE/SIOCDEVSHWSTATE ioctl requests */
+struct sbni_flags {
+ u32 rxl : 4;
+ u32 rate : 2;
+ u32 fixed_rxl : 1;
+ u32 slow_mode : 1;
+ u32 mac_addr : 24;
+};
+
+/*
+ * CRC-32 stuff
+ */
+#define CRC32(c,crc) (crc32tab[((size_t)(crc) ^ (c)) & 0xff] ^ (((crc) >> 8) & 0x00FFFFFF))
+ /* CRC generator 0xEDB88320 */
+ /* CRC remainder 0x2144DF1C */
+ /* CRC initial value 0x00000000 */
+#define CRC32_REMAINDER 0x2144DF1C
+#define CRC32_INITIAL 0x00000000
+
+#ifndef __initdata
+#define __initdata
+#endif
+
+#endif
+
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
new file mode 100644
index 000000000000..3ac9a45b20fa
--- /dev/null
+++ b/drivers/net/wan/sdla.c
@@ -0,0 +1,1676 @@
+/*
+ * SDLA An implementation of a driver for the Sangoma S502/S508 series
+ * multi-protocol PC interface card. Initial offering is with
+ * the DLCI driver, providing Frame Relay support for linux.
+ *
+ * Global definitions for the Frame relay interface.
+ *
+ * Version: @(#)sdla.c 0.30 12 Sep 1996
+ *
+ * Credits: Sangoma Technologies, for the use of 2 cards for an extended
+ * period of time.
+ * David Mandelstam <dm@sangoma.com> for getting me started on
+ * this project, and incentive to complete it.
+ * Gene Kozen <74604.152@compuserve.com> for providing me with
+ * important information about the cards.
+ *
+ * Author: Mike McLagan <mike.mclagan@linux.org>
+ *
+ * Changes:
+ * 0.15 Mike McLagan Improved error handling, packet dropping
+ * 0.20 Mike McLagan New transmit/receive flags for config
+ * If in FR mode, don't accept packets from
+ * non DLCI devices.
+ * 0.25 Mike McLagan Fixed problem with rejecting packets
+ * from non DLCI devices.
+ * 0.30 Mike McLagan Fixed kernel panic when used with modified
+ * ifconfig
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h> /* for CONFIG_DLCI_MAX */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_frad.h>
+#include <linux/sdla.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org";
+
+static unsigned int valid_port[] __initdata = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390};
+
+static unsigned int valid_mem[] __initdata = {
+ 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
+ 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
+ 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
+ 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000,
+ 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000};
+
+static DEFINE_SPINLOCK(sdla_lock);
+
+/*********************************************************
+ *
+ * these are the core routines that access the card itself
+ *
+ *********************************************************/
+
+#define SDLA_WINDOW(dev,addr) outb((((addr) >> 13) & 0x1F), (dev)->base_addr + SDLA_REG_Z80_WINDOW)
+
+static void __sdla_read(struct net_device *dev, int addr, void *buf, short len)
+{
+ char *temp;
+ const void *base;
+ int offset, bytes;
+
+ temp = buf;
+ while(len)
+ {
+ offset = addr & SDLA_ADDR_MASK;
+ bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
+ base = (const void *) (dev->mem_start + offset);
+
+ SDLA_WINDOW(dev, addr);
+ memcpy(temp, base, bytes);
+
+ addr += bytes;
+ temp += bytes;
+ len -= bytes;
+ }
+}
+
+static void sdla_read(struct net_device *dev, int addr, void *buf, short len)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&sdla_lock, flags);
+ __sdla_read(dev, addr, buf, len);
+ spin_unlock_irqrestore(&sdla_lock, flags);
+}
+
+static void __sdla_write(struct net_device *dev, int addr,
+ const void *buf, short len)
+{
+ const char *temp;
+ void *base;
+ int offset, bytes;
+
+ temp = buf;
+ while(len)
+ {
+ offset = addr & SDLA_ADDR_MASK;
+ bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
+ base = (void *) (dev->mem_start + offset);
+
+ SDLA_WINDOW(dev, addr);
+ memcpy(base, temp, bytes);
+
+ addr += bytes;
+ temp += bytes;
+ len -= bytes;
+ }
+}
+
+static void sdla_write(struct net_device *dev, int addr,
+ const void *buf, short len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdla_lock, flags);
+ __sdla_write(dev, addr, buf, len);
+ spin_unlock_irqrestore(&sdla_lock, flags);
+}
+
+
+static void sdla_clear(struct net_device *dev)
+{
+ unsigned long flags;
+ char *base;
+ int len, addr, bytes;
+
+ len = 65536;
+ addr = 0;
+ bytes = SDLA_WINDOW_SIZE;
+ base = (void *) dev->mem_start;
+
+ spin_lock_irqsave(&sdla_lock, flags);
+ while(len)
+ {
+ SDLA_WINDOW(dev, addr);
+ memset(base, 0, bytes);
+
+ addr += bytes;
+ len -= bytes;
+ }
+ spin_unlock_irqrestore(&sdla_lock, flags);
+
+}
+
+static char sdla_byte(struct net_device *dev, int addr)
+{
+ unsigned long flags;
+ char byte, *temp;
+
+ temp = (void *) (dev->mem_start + (addr & SDLA_ADDR_MASK));
+
+ spin_lock_irqsave(&sdla_lock, flags);
+ SDLA_WINDOW(dev, addr);
+ byte = *temp;
+ spin_unlock_irqrestore(&sdla_lock, flags);
+
+ return(byte);
+}
+
+void sdla_stop(struct net_device *dev)
+{
+ struct frad_local *flp;
+
+ flp = dev->priv;
+ switch(flp->type)
+ {
+ case SDLA_S502A:
+ outb(SDLA_S502A_HALT, dev->base_addr + SDLA_REG_CONTROL);
+ flp->state = SDLA_HALT;
+ break;
+ case SDLA_S502E:
+ outb(SDLA_HALT, dev->base_addr + SDLA_REG_Z80_CONTROL);
+ outb(SDLA_S502E_ENABLE, dev->base_addr + SDLA_REG_CONTROL);
+ flp->state = SDLA_S502E_ENABLE;
+ break;
+ case SDLA_S507:
+ flp->state &= ~SDLA_CPUEN;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ break;
+ case SDLA_S508:
+ flp->state &= ~SDLA_CPUEN;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ break;
+ }
+}
+
+void sdla_start(struct net_device *dev)
+{
+ struct frad_local *flp;
+
+ flp = dev->priv;
+ switch(flp->type)
+ {
+ case SDLA_S502A:
+ outb(SDLA_S502A_NMI, dev->base_addr + SDLA_REG_CONTROL);
+ outb(SDLA_S502A_START, dev->base_addr + SDLA_REG_CONTROL);
+ flp->state = SDLA_S502A_START;
+ break;
+ case SDLA_S502E:
+ outb(SDLA_S502E_CPUEN, dev->base_addr + SDLA_REG_Z80_CONTROL);
+ outb(0x00, dev->base_addr + SDLA_REG_CONTROL);
+ flp->state = 0;
+ break;
+ case SDLA_S507:
+ flp->state |= SDLA_CPUEN;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ break;
+ case SDLA_S508:
+ flp->state |= SDLA_CPUEN;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ break;
+ }
+}
+
+/****************************************************
+ *
+ * this is used for the S502A/E cards to determine
+ * the speed of the onboard CPU. Calibration is
+ * necessary for the Frame Relay code uploaded
+ * later. Incorrect results cause timing problems
+ * with link checks & status messages
+ *
+ ***************************************************/
+
+int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
+{
+ unsigned long start, done, now;
+ char resp, *temp;
+
+ start = now = jiffies;
+ done = jiffies + jiffs;
+
+ temp = (void *)dev->mem_start;
+ temp += z80_addr & SDLA_ADDR_MASK;
+
+ resp = ~resp1;
+ while (time_before(jiffies, done) && (resp != resp1) && (!resp2 || (resp != resp2)))
+ {
+ if (jiffies != now)
+ {
+ SDLA_WINDOW(dev, z80_addr);
+ now = jiffies;
+ resp = *temp;
+ }
+ }
+ return(time_before(jiffies, done) ? jiffies - start : -1);
+}
+
+/* constants for Z80 CPU speed */
+#define Z80_READY '1' /* Z80 is ready to begin */
+#define LOADER_READY '2' /* driver is ready to begin */
+#define Z80_SCC_OK '3' /* SCC is on board */
+#define Z80_SCC_BAD '4' /* SCC was not found */
+
+static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
+{
+ int jiffs;
+ char data;
+
+ sdla_start(dev);
+ if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
+ return(-EIO);
+
+ data = LOADER_READY;
+ sdla_write(dev, 0, &data, 1);
+
+ if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
+ return(-EIO);
+
+ sdla_stop(dev);
+ sdla_read(dev, 0, &data, 1);
+
+ if (data == Z80_SCC_BAD)
+ {
+ printk("%s: SCC bad\n", dev->name);
+ return(-EIO);
+ }
+
+ if (data != Z80_SCC_OK)
+ return(-EINVAL);
+
+ if (jiffs < 165)
+ ifr->ifr_mtu = SDLA_CPU_16M;
+ else if (jiffs < 220)
+ ifr->ifr_mtu = SDLA_CPU_10M;
+ else if (jiffs < 258)
+ ifr->ifr_mtu = SDLA_CPU_8M;
+ else if (jiffs < 357)
+ ifr->ifr_mtu = SDLA_CPU_7M;
+ else if (jiffs < 467)
+ ifr->ifr_mtu = SDLA_CPU_5M;
+ else
+ ifr->ifr_mtu = SDLA_CPU_3M;
+
+ return(0);
+}
+
+/************************************************
+ *
+ * Direct interaction with the Frame Relay code
+ * starts here.
+ *
+ ************************************************/
+
+struct _dlci_stat
+{
+ short dlci __attribute__((packed));
+ char flags __attribute__((packed));
+};
+
+struct _frad_stat
+{
+ char flags;
+ struct _dlci_stat dlcis[SDLA_MAX_DLCI];
+};
+
+static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int len, void *data)
+{
+ struct _dlci_stat *pstatus;
+ short *pdlci;
+ int i;
+ char *state, line[30];
+
+ switch (ret)
+ {
+ case SDLA_RET_MODEM:
+ state = data;
+ if (*state & SDLA_MODEM_DCD_LOW)
+ printk(KERN_INFO "%s: Modem DCD unexpectedly low!\n", dev->name);
+ if (*state & SDLA_MODEM_CTS_LOW)
+ printk(KERN_INFO "%s: Modem CTS unexpectedly low!\n", dev->name);
+ /* I should probably do something about this! */
+ break;
+
+ case SDLA_RET_CHANNEL_OFF:
+ printk(KERN_INFO "%s: Channel became inoperative!\n", dev->name);
+ /* same here */
+ break;
+
+ case SDLA_RET_CHANNEL_ON:
+ printk(KERN_INFO "%s: Channel became operative!\n", dev->name);
+ /* same here */
+ break;
+
+ case SDLA_RET_DLCI_STATUS:
+ printk(KERN_INFO "%s: Status change reported by Access Node.\n", dev->name);
+ len /= sizeof(struct _dlci_stat);
+ for(pstatus = data, i=0;i < len;i++,pstatus++)
+ {
+ if (pstatus->flags & SDLA_DLCI_NEW)
+ state = "new";
+ else if (pstatus->flags & SDLA_DLCI_DELETED)
+ state = "deleted";
+ else if (pstatus->flags & SDLA_DLCI_ACTIVE)
+ state = "active";
+ else
+ {
+ sprintf(line, "unknown status: %02X", pstatus->flags);
+ state = line;
+ }
+ printk(KERN_INFO "%s: DLCI %i: %s.\n", dev->name, pstatus->dlci, state);
+ /* same here */
+ }
+ break;
+
+ case SDLA_RET_DLCI_UNKNOWN:
+ printk(KERN_INFO "%s: Received unknown DLCIs:", dev->name);
+ len /= sizeof(short);
+ for(pdlci = data,i=0;i < len;i++,pdlci++)
+ printk(" %i", *pdlci);
+ printk("\n");
+ break;
+
+ case SDLA_RET_TIMEOUT:
+ printk(KERN_ERR "%s: Command timed out!\n", dev->name);
+ break;
+
+ case SDLA_RET_BUF_OVERSIZE:
+ printk(KERN_INFO "%s: Bc/CIR overflow, acceptable size is %i\n", dev->name, len);
+ break;
+
+ case SDLA_RET_BUF_TOO_BIG:
+ printk(KERN_INFO "%s: Buffer size over specified max of %i\n", dev->name, len);
+ break;
+
+ case SDLA_RET_CHANNEL_INACTIVE:
+ case SDLA_RET_DLCI_INACTIVE:
+ case SDLA_RET_CIR_OVERFLOW:
+ case SDLA_RET_NO_BUFS:
+ if (cmd == SDLA_INFORMATION_WRITE)
+ break;
+
+ default:
+ printk(KERN_DEBUG "%s: Cmd 0x%2.2X generated return code 0x%2.2X\n", dev->name, cmd, ret);
+ /* Further processing could be done here */
+ break;
+ }
+}
+
+static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
+ void *inbuf, short inlen, void *outbuf, short *outlen)
+{
+ static struct _frad_stat status;
+ struct frad_local *flp;
+ struct sdla_cmd *cmd_buf;
+ unsigned long pflags;
+ unsigned long jiffs;
+ int ret, waiting, len;
+ long window;
+
+ flp = dev->priv;
+ window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF;
+ cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK));
+ ret = 0;
+ len = 0;
+ jiffs = jiffies + HZ; /* 1 second is plenty */
+
+ spin_lock_irqsave(&sdla_lock, pflags);
+ SDLA_WINDOW(dev, window);
+ cmd_buf->cmd = cmd;
+ cmd_buf->dlci = dlci;
+ cmd_buf->flags = flags;
+
+ if (inbuf)
+ memcpy(cmd_buf->data, inbuf, inlen);
+
+ cmd_buf->length = inlen;
+
+ cmd_buf->opp_flag = 1;
+ spin_unlock_irqrestore(&sdla_lock, pflags);
+
+ waiting = 1;
+ len = 0;
+ while (waiting && time_before_eq(jiffies, jiffs))
+ {
+ if (waiting++ % 3)
+ {
+ spin_lock_irqsave(&sdla_lock, pflags);
+ SDLA_WINDOW(dev, window);
+ waiting = ((volatile int)(cmd_buf->opp_flag));
+ spin_unlock_irqrestore(&sdla_lock, pflags);
+ }
+ }
+
+ if (!waiting)
+ {
+
+ spin_lock_irqsave(&sdla_lock, pflags);
+ SDLA_WINDOW(dev, window);
+ ret = cmd_buf->retval;
+ len = cmd_buf->length;
+ if (outbuf && outlen)
+ {
+ *outlen = *outlen >= len ? len : *outlen;
+
+ if (*outlen)
+ memcpy(outbuf, cmd_buf->data, *outlen);
+ }
+
+ /* This is a local copy that's used for error handling */
+ if (ret)
+ memcpy(&status, cmd_buf->data, len > sizeof(status) ? sizeof(status) : len);
+
+ spin_unlock_irqrestore(&sdla_lock, pflags);
+ }
+ else
+ ret = SDLA_RET_TIMEOUT;
+
+ if (ret != SDLA_RET_OK)
+ sdla_errors(dev, cmd, dlci, ret, len, &status);
+
+ return(ret);
+}
+
+/***********************************************
+ *
+ * these functions are called by the DLCI driver
+ *
+ ***********************************************/
+
+static int sdla_reconfig(struct net_device *dev);
+
+int sdla_activate(struct net_device *slave, struct net_device *master)
+{
+ struct frad_local *flp;
+ int i;
+
+ flp = slave->priv;
+
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->master[i] == master)
+ break;
+
+ if (i == CONFIG_DLCI_MAX)
+ return(-ENODEV);
+
+ flp->dlci[i] = abs(flp->dlci[i]);
+
+ if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
+ sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
+
+ return(0);
+}
+
+int sdla_deactivate(struct net_device *slave, struct net_device *master)
+{
+ struct frad_local *flp;
+ int i;
+
+ flp = slave->priv;
+
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->master[i] == master)
+ break;
+
+ if (i == CONFIG_DLCI_MAX)
+ return(-ENODEV);
+
+ flp->dlci[i] = -abs(flp->dlci[i]);
+
+ if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
+ sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
+
+ return(0);
+}
+
+int sdla_assoc(struct net_device *slave, struct net_device *master)
+{
+ struct frad_local *flp;
+ int i;
+
+ if (master->type != ARPHRD_DLCI)
+ return(-EINVAL);
+
+ flp = slave->priv;
+
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ {
+ if (!flp->master[i])
+ break;
+ if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
+ return(-EADDRINUSE);
+ }
+
+ if (i == CONFIG_DLCI_MAX)
+ return(-EMLINK); /* #### Alan: Comments on this ?? */
+
+
+ flp->master[i] = master;
+ flp->dlci[i] = -*(short *)(master->dev_addr);
+ master->mtu = slave->mtu;
+
+ if (netif_running(slave)) {
+ if (flp->config.station == FRAD_STATION_CPE)
+ sdla_reconfig(slave);
+ else
+ sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
+ }
+
+ return(0);
+}
+
+int sdla_deassoc(struct net_device *slave, struct net_device *master)
+{
+ struct frad_local *flp;
+ int i;
+
+ flp = slave->priv;
+
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->master[i] == master)
+ break;
+
+ if (i == CONFIG_DLCI_MAX)
+ return(-ENODEV);
+
+ flp->master[i] = NULL;
+ flp->dlci[i] = 0;
+
+
+ if (netif_running(slave)) {
+ if (flp->config.station == FRAD_STATION_CPE)
+ sdla_reconfig(slave);
+ else
+ sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
+ }
+
+ return(0);
+}
+
+int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
+{
+ struct frad_local *flp;
+ struct dlci_local *dlp;
+ int i;
+ short len, ret;
+
+ flp = slave->priv;
+
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->master[i] == master)
+ break;
+
+ if (i == CONFIG_DLCI_MAX)
+ return(-ENODEV);
+
+ dlp = master->priv;
+
+ ret = SDLA_RET_OK;
+ len = sizeof(struct dlci_conf);
+ if (netif_running(slave)) {
+ if (get)
+ ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
+ NULL, 0, &dlp->config, &len);
+ else
+ ret = sdla_cmd(slave, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
+ &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
+ }
+
+ return(ret == SDLA_RET_OK ? 0 : -EIO);
+}
+
+/**************************
+ *
+ * now for the Linux driver
+ *
+ **************************/
+
+/* NOTE: the DLCI driver deals with freeing the SKB!! */
+static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct frad_local *flp;
+ int ret, addr, accept, i;
+ short size;
+ unsigned long flags;
+ struct buf_entry *pbuf;
+
+ flp = dev->priv;
+ ret = 0;
+ accept = 1;
+
+ netif_stop_queue(dev);
+
+ /*
+ * stupid GateD insists on setting up the multicast router thru us
+ * and we're ill equipped to handle a non Frame Relay packet at this
+ * time!
+ */
+
+ accept = 1;
+ switch (dev->type)
+ {
+ case ARPHRD_FRAD:
+ if (skb->dev->type != ARPHRD_DLCI)
+ {
+ printk(KERN_WARNING "%s: Non DLCI device, type %i, tried to send on FRAD module.\n", dev->name, skb->dev->type);
+ accept = 0;
+ }
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown firmware type 0x%4.4X\n", dev->name, dev->type);
+ accept = 0;
+ break;
+ }
+ if (accept)
+ {
+ /* this is frame specific, but till there's a PPP module, it's the default */
+ switch (flp->type)
+ {
+ case SDLA_S502A:
+ case SDLA_S502E:
+ ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL);
+ break;
+ case SDLA_S508:
+ size = sizeof(addr);
+ ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size);
+ if (ret == SDLA_RET_OK)
+ {
+
+ spin_lock_irqsave(&sdla_lock, flags);
+ SDLA_WINDOW(dev, addr);
+ pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+ __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
+ SDLA_WINDOW(dev, addr);
+ pbuf->opp_flag = 1;
+ spin_unlock_irqrestore(&sdla_lock, flags);
+ }
+ break;
+ }
+ switch (ret)
+ {
+ case SDLA_RET_OK:
+ flp->stats.tx_packets++;
+ ret = DLCI_RET_OK;
+ break;
+
+ case SDLA_RET_CIR_OVERFLOW:
+ case SDLA_RET_BUF_OVERSIZE:
+ case SDLA_RET_NO_BUFS:
+ flp->stats.tx_dropped++;
+ ret = DLCI_RET_DROP;
+ break;
+
+ default:
+ flp->stats.tx_errors++;
+ ret = DLCI_RET_ERR;
+ break;
+ }
+ }
+ netif_wake_queue(dev);
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ {
+ if(flp->master[i]!=NULL)
+ netif_wake_queue(flp->master[i]);
+ }
+ return(ret);
+}
+
+static void sdla_receive(struct net_device *dev)
+{
+ struct net_device *master;
+ struct frad_local *flp;
+ struct dlci_local *dlp;
+ struct sk_buff *skb;
+
+ struct sdla_cmd *cmd;
+ struct buf_info *pbufi;
+ struct buf_entry *pbuf;
+
+ unsigned long flags;
+ int i=0, received, success, addr, buf_base, buf_top;
+ short dlci, len, len2, split;
+
+ flp = dev->priv;
+ success = 1;
+ received = addr = buf_top = buf_base = 0;
+ len = dlci = 0;
+ skb = NULL;
+ master = NULL;
+ cmd = NULL;
+ pbufi = NULL;
+ pbuf = NULL;
+
+ spin_lock_irqsave(&sdla_lock, flags);
+
+ switch (flp->type)
+ {
+ case SDLA_S502A:
+ case SDLA_S502E:
+ cmd = (void *) (dev->mem_start + (SDLA_502_RCV_BUF & SDLA_ADDR_MASK));
+ SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
+ success = cmd->opp_flag;
+ if (!success)
+ break;
+
+ dlci = cmd->dlci;
+ len = cmd->length;
+ break;
+
+ case SDLA_S508:
+ pbufi = (void *) (dev->mem_start + (SDLA_508_RXBUF_INFO & SDLA_ADDR_MASK));
+ SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
+ pbuf = (void *) (dev->mem_start + ((pbufi->rse_base + flp->buffer * sizeof(struct buf_entry)) & SDLA_ADDR_MASK));
+ success = pbuf->opp_flag;
+ if (!success)
+ break;
+
+ buf_top = pbufi->buf_top;
+ buf_base = pbufi->buf_base;
+ dlci = pbuf->dlci;
+ len = pbuf->length;
+ addr = pbuf->buf_addr;
+ break;
+ }
+
+ /* common code, find the DLCI and get the SKB */
+ if (success)
+ {
+ for (i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->dlci[i] == dlci)
+ break;
+
+ if (i == CONFIG_DLCI_MAX)
+ {
+ printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci);
+ flp->stats.rx_errors++;
+ success = 0;
+ }
+ }
+
+ if (success)
+ {
+ master = flp->master[i];
+ skb = dev_alloc_skb(len + sizeof(struct frhdr));
+ if (skb == NULL)
+ {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ flp->stats.rx_dropped++;
+ success = 0;
+ }
+ else
+ skb_reserve(skb, sizeof(struct frhdr));
+ }
+
+ /* pick up the data */
+ switch (flp->type)
+ {
+ case SDLA_S502A:
+ case SDLA_S502E:
+ if (success)
+ __sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len);
+
+ SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
+ cmd->opp_flag = 0;
+ break;
+
+ case SDLA_S508:
+ if (success)
+ {
+ /* is this buffer split off the end of the internal ring buffer */
+ split = addr + len > buf_top + 1 ? len - (buf_top - addr + 1) : 0;
+ len2 = len - split;
+
+ __sdla_read(dev, addr, skb_put(skb, len2), len2);
+ if (split)
+ __sdla_read(dev, buf_base, skb_put(skb, split), split);
+ }
+
+ /* increment the buffer we're looking at */
+ SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
+ flp->buffer = (flp->buffer + 1) % pbufi->rse_num;
+ pbuf->opp_flag = 0;
+ break;
+ }
+
+ if (success)
+ {
+ flp->stats.rx_packets++;
+ dlp = master->priv;
+ (*dlp->receive)(skb, master);
+ }
+
+ spin_unlock_irqrestore(&sdla_lock, flags);
+}
+
+static irqreturn_t sdla_isr(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev;
+ struct frad_local *flp;
+ char byte;
+
+ dev = dev_id;
+
+ if (dev == NULL)
+ {
+ printk(KERN_WARNING "sdla_isr(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ flp = dev->priv;
+
+ if (!flp->initialized)
+ {
+ printk(KERN_WARNING "%s: irq %d for uninitialized device.\n", dev->name, irq);
+ return IRQ_NONE;
+ }
+
+ byte = sdla_byte(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE);
+ switch (byte)
+ {
+ case SDLA_INTR_RX:
+ sdla_receive(dev);
+ break;
+
+ /* the command will get an error return, which is processed above */
+ case SDLA_INTR_MODEM:
+ case SDLA_INTR_STATUS:
+ sdla_cmd(dev, SDLA_READ_DLC_STATUS, 0, 0, NULL, 0, NULL, NULL);
+ break;
+
+ case SDLA_INTR_TX:
+ case SDLA_INTR_COMPLETE:
+ case SDLA_INTR_TIMER:
+ printk(KERN_WARNING "%s: invalid irq flag 0x%02X.\n", dev->name, byte);
+ break;
+ }
+
+ /* the S502E requires a manual acknowledgement of the interrupt */
+ if (flp->type == SDLA_S502E)
+ {
+ flp->state &= ~SDLA_S502E_INTACK;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ flp->state |= SDLA_S502E_INTACK;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ }
+
+ /* this clears the byte, informing the Z80 we're done */
+ byte = 0;
+ sdla_write(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
+ return IRQ_HANDLED;
+}
+
+static void sdla_poll(unsigned long device)
+{
+ struct net_device *dev;
+ struct frad_local *flp;
+
+ dev = (struct net_device *) device;
+ flp = dev->priv;
+
+ if (sdla_byte(dev, SDLA_502_RCV_BUF))
+ sdla_receive(dev);
+
+ flp->timer.expires = 1;
+ add_timer(&flp->timer);
+}
+
+static int sdla_close(struct net_device *dev)
+{
+ struct frad_local *flp;
+ struct intr_info intr;
+ int len, i;
+ short dlcis[CONFIG_DLCI_MAX];
+
+ flp = dev->priv;
+
+ len = 0;
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->dlci[i])
+ dlcis[len++] = abs(flp->dlci[i]);
+ len *= 2;
+
+ if (flp->config.station == FRAD_STATION_NODE)
+ {
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->dlci[i] > 0)
+ sdla_cmd(dev, SDLA_DEACTIVATE_DLCI, 0, 0, dlcis, len, NULL, NULL);
+ sdla_cmd(dev, SDLA_DELETE_DLCI, 0, 0, &flp->dlci[i], sizeof(flp->dlci[i]), NULL, NULL);
+ }
+
+ memset(&intr, 0, sizeof(intr));
+ /* let's start up the reception */
+ switch(flp->type)
+ {
+ case SDLA_S502A:
+ del_timer(&flp->timer);
+ break;
+
+ case SDLA_S502E:
+ sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
+ flp->state &= ~SDLA_S502E_INTACK;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ break;
+
+ case SDLA_S507:
+ break;
+
+ case SDLA_S508:
+ sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
+ flp->state &= ~SDLA_S508_INTEN;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ break;
+ }
+
+ sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+
+ netif_stop_queue(dev);
+
+ return(0);
+}
+
+struct conf_data {
+ struct frad_conf config;
+ short dlci[CONFIG_DLCI_MAX];
+};
+
+static int sdla_open(struct net_device *dev)
+{
+ struct frad_local *flp;
+ struct dlci_local *dlp;
+ struct conf_data data;
+ struct intr_info intr;
+ int len, i;
+ char byte;
+
+ flp = dev->priv;
+
+ if (!flp->initialized)
+ return(-EPERM);
+
+ if (!flp->configured)
+ return(-EPERM);
+
+ /* time to send in the configuration */
+ len = 0;
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->dlci[i])
+ data.dlci[len++] = abs(flp->dlci[i]);
+ len *= 2;
+
+ memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
+ len += sizeof(struct frad_conf);
+
+ sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+ sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
+
+ if (flp->type == SDLA_S508)
+ flp->buffer = 0;
+
+ sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+
+ /* let's start up the reception */
+ memset(&intr, 0, sizeof(intr));
+ switch(flp->type)
+ {
+ case SDLA_S502A:
+ flp->timer.expires = 1;
+ add_timer(&flp->timer);
+ break;
+
+ case SDLA_S502E:
+ flp->state |= SDLA_S502E_ENABLE;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ flp->state |= SDLA_S502E_INTACK;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ byte = 0;
+ sdla_write(dev, SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
+ intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
+ sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
+ break;
+
+ case SDLA_S507:
+ break;
+
+ case SDLA_S508:
+ flp->state |= SDLA_S508_INTEN;
+ outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+ byte = 0;
+ sdla_write(dev, SDLA_508_IRQ_INTERFACE, &byte, sizeof(byte));
+ intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
+ intr.irq = dev->irq;
+ sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
+ break;
+ }
+
+ if (flp->config.station == FRAD_STATION_CPE)
+ {
+ byte = SDLA_ICS_STATUS_ENQ;
+ sdla_cmd(dev, SDLA_ISSUE_IN_CHANNEL_SIGNAL, 0, 0, &byte, sizeof(byte), NULL, NULL);
+ }
+ else
+ {
+ sdla_cmd(dev, SDLA_ADD_DLCI, 0, 0, data.dlci, len - sizeof(struct frad_conf), NULL, NULL);
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->dlci[i] > 0)
+ sdla_cmd(dev, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], 2*sizeof(flp->dlci[i]), NULL, NULL);
+ }
+
+ /* configure any specific DLCI settings */
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->dlci[i])
+ {
+ dlp = flp->master[i]->priv;
+ if (dlp->configured)
+ sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL);
+ }
+
+ netif_start_queue(dev);
+
+ return(0);
+}
+
+static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
+{
+ struct frad_local *flp;
+ struct conf_data data;
+ int i;
+ short size;
+
+ if (dev->type == 0xFFFF)
+ return(-EUNATCH);
+
+ flp = dev->priv;
+
+ if (!get)
+ {
+ if (netif_running(dev))
+ return(-EBUSY);
+
+ if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
+ return -EFAULT;
+
+ if (data.config.station & ~FRAD_STATION_NODE)
+ return(-EINVAL);
+
+ if (data.config.flags & ~FRAD_VALID_FLAGS)
+ return(-EINVAL);
+
+ if ((data.config.kbaud < 0) ||
+ ((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
+ return(-EINVAL);
+
+ if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
+ return(-EINVAL);
+
+ if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
+ return(-EINVAL);
+
+ if ((data.config.T391 < 5) || (data.config.T391 > 30))
+ return(-EINVAL);
+
+ if ((data.config.T392 < 5) || (data.config.T392 > 30))
+ return(-EINVAL);
+
+ if ((data.config.N391 < 1) || (data.config.N391 > 255))
+ return(-EINVAL);
+
+ if ((data.config.N392 < 1) || (data.config.N392 > 10))
+ return(-EINVAL);
+
+ if ((data.config.N393 < 1) || (data.config.N393 > 10))
+ return(-EINVAL);
+
+ memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
+ flp->config.flags |= SDLA_DIRECT_RECV;
+
+ if (flp->type == SDLA_S508)
+ flp->config.flags |= SDLA_TX70_RX30;
+
+ if (dev->mtu != flp->config.mtu)
+ {
+ /* this is required to change the MTU */
+ dev->mtu = flp->config.mtu;
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->master[i])
+ flp->master[i]->mtu = flp->config.mtu;
+ }
+
+ flp->config.mtu += sizeof(struct frhdr);
+
+ /* off to the races! */
+ if (!flp->configured)
+ sdla_start(dev);
+
+ flp->configured = 1;
+ }
+ else
+ {
+ /* no sense reading if the CPU isn't started */
+ if (netif_running(dev))
+ {
+ size = sizeof(data);
+ if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
+ return(-EIO);
+ }
+ else
+ if (flp->configured)
+ memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
+ else
+ memset(&data.config, 0, sizeof(struct frad_conf));
+
+ memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
+ data.config.flags &= FRAD_VALID_FLAGS;
+ data.config.mtu -= data.config.mtu > sizeof(struct frhdr) ? sizeof(struct frhdr) : data.config.mtu;
+ return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
+ }
+
+ return(0);
+}
+
+static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
+{
+ struct sdla_mem mem;
+ char *temp;
+
+ if(copy_from_user(&mem, info, sizeof(mem)))
+ return -EFAULT;
+
+ if (read)
+ {
+ temp = kmalloc(mem.len, GFP_KERNEL);
+ if (!temp)
+ return(-ENOMEM);
+ memset(temp, 0, mem.len);
+ sdla_read(dev, mem.addr, temp, mem.len);
+ if(copy_to_user(mem.data, temp, mem.len))
+ {
+ kfree(temp);
+ return -EFAULT;
+ }
+ kfree(temp);
+ }
+ else
+ {
+ temp = kmalloc(mem.len, GFP_KERNEL);
+ if (!temp)
+ return(-ENOMEM);
+ if(copy_from_user(temp, mem.data, mem.len))
+ {
+ kfree(temp);
+ return -EFAULT;
+ }
+ sdla_write(dev, mem.addr, temp, mem.len);
+ kfree(temp);
+ }
+ return(0);
+}
+
+static int sdla_reconfig(struct net_device *dev)
+{
+ struct frad_local *flp;
+ struct conf_data data;
+ int i, len;
+
+ flp = dev->priv;
+
+ len = 0;
+ for(i=0;i<CONFIG_DLCI_MAX;i++)
+ if (flp->dlci[i])
+ data.dlci[len++] = flp->dlci[i];
+ len *= 2;
+
+ memcpy(&data, &flp->config, sizeof(struct frad_conf));
+ len += sizeof(struct frad_conf);
+
+ sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+ sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
+ sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+
+ return(0);
+}
+
+static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct frad_local *flp;
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ flp = dev->priv;
+
+ if (!flp->initialized)
+ return(-EINVAL);
+
+ switch (cmd)
+ {
+ case FRAD_GET_CONF:
+ case FRAD_SET_CONF:
+ return(sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF));
+
+ case SDLA_IDENTIFY:
+ ifr->ifr_flags = flp->type;
+ break;
+
+ case SDLA_CPUSPEED:
+ return(sdla_cpuspeed(dev, ifr));
+
+/* ==========================================================
+NOTE: This is rather a useless action right now, as the
+ current driver does not support protocols other than
+ FR. However, Sangoma has modules for a number of
+ other protocols in the works.
+============================================================*/
+ case SDLA_PROTOCOL:
+ if (flp->configured)
+ return(-EALREADY);
+
+ switch (ifr->ifr_flags)
+ {
+ case ARPHRD_FRAD:
+ dev->type = ifr->ifr_flags;
+ break;
+ default:
+ return(-ENOPROTOOPT);
+ }
+ break;
+
+ case SDLA_CLEARMEM:
+ sdla_clear(dev);
+ break;
+
+ case SDLA_WRITEMEM:
+ case SDLA_READMEM:
+ if(!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ return(sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM));
+
+ case SDLA_START:
+ sdla_start(dev);
+ break;
+
+ case SDLA_STOP:
+ sdla_stop(dev);
+ break;
+
+ default:
+ return(-EOPNOTSUPP);
+ }
+ return(0);
+}
+
+int sdla_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct frad_local *flp;
+
+ flp = dev->priv;
+
+ if (netif_running(dev))
+ return(-EBUSY);
+
+ /* for now, you can't change the MTU! */
+ return(-EOPNOTSUPP);
+}
+
+int sdla_set_config(struct net_device *dev, struct ifmap *map)
+{
+ struct frad_local *flp;
+ int i;
+ char byte;
+ unsigned base;
+ int err = -EINVAL;
+
+ flp = dev->priv;
+
+ if (flp->initialized)
+ return(-EINVAL);
+
+ for(i=0;i < sizeof(valid_port) / sizeof (int) ; i++)
+ if (valid_port[i] == map->base_addr)
+ break;
+
+ if (i == sizeof(valid_port) / sizeof(int))
+ return(-EINVAL);
+
+ if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
+ printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr);
+ return(-EINVAL);
+ }
+ base = map->base_addr;
+
+ /* test for card types, S502A, S502E, S507, S508 */
+ /* these tests shut down the card completely, so clear the state */
+ flp->type = SDLA_UNKNOWN;
+ flp->state = 0;
+
+ for(i=1;i<SDLA_IO_EXTENTS;i++)
+ if (inb(base + i) != 0xFF)
+ break;
+
+ if (i == SDLA_IO_EXTENTS) {
+ outb(SDLA_HALT, base + SDLA_REG_Z80_CONTROL);
+ if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x08) {
+ outb(SDLA_S502E_INTACK, base + SDLA_REG_CONTROL);
+ if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x0C) {
+ outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+ flp->type = SDLA_S502E;
+ goto got_type;
+ }
+ }
+ }
+
+ for(byte=inb(base),i=0;i<SDLA_IO_EXTENTS;i++)
+ if (inb(base + i) != byte)
+ break;
+
+ if (i == SDLA_IO_EXTENTS) {
+ outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+ if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x30) {
+ outb(SDLA_S507_ENABLE, base + SDLA_REG_CONTROL);
+ if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x32) {
+ outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+ flp->type = SDLA_S507;
+ goto got_type;
+ }
+ }
+ }
+
+ outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+ if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x00) {
+ outb(SDLA_S508_INTEN, base + SDLA_REG_CONTROL);
+ if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x10) {
+ outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+ flp->type = SDLA_S508;
+ goto got_type;
+ }
+ }
+
+ outb(SDLA_S502A_HALT, base + SDLA_REG_CONTROL);
+ if (inb(base + SDLA_S502_STS) == 0x40) {
+ outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
+ if (inb(base + SDLA_S502_STS) == 0x40) {
+ outb(SDLA_S502A_INTEN, base + SDLA_REG_CONTROL);
+ if (inb(base + SDLA_S502_STS) == 0x44) {
+ outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
+ flp->type = SDLA_S502A;
+ goto got_type;
+ }
+ }
+ }
+
+ printk(KERN_NOTICE "%s: Unknown card type\n", dev->name);
+ err = -ENODEV;
+ goto fail;
+
+got_type:
+ switch(base) {
+ case 0x270:
+ case 0x280:
+ case 0x380:
+ case 0x390:
+ if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
+ goto fail;
+ }
+
+ switch (map->irq) {
+ case 2:
+ if (flp->type != SDLA_S502E)
+ goto fail;
+ break;
+
+ case 10:
+ case 11:
+ case 12:
+ case 15:
+ case 4:
+ if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
+ goto fail;
+ break;
+ case 3:
+ case 5:
+ case 7:
+ if (flp->type == SDLA_S502A)
+ goto fail;
+ break;
+
+ default:
+ goto fail;
+ }
+
+ err = -EAGAIN;
+ if (request_irq(dev->irq, &sdla_isr, 0, dev->name, dev))
+ goto fail;
+
+ if (flp->type == SDLA_S507) {
+ switch(dev->irq) {
+ case 3:
+ flp->state = SDLA_S507_IRQ3;
+ break;
+ case 4:
+ flp->state = SDLA_S507_IRQ4;
+ break;
+ case 5:
+ flp->state = SDLA_S507_IRQ5;
+ break;
+ case 7:
+ flp->state = SDLA_S507_IRQ7;
+ break;
+ case 10:
+ flp->state = SDLA_S507_IRQ10;
+ break;
+ case 11:
+ flp->state = SDLA_S507_IRQ11;
+ break;
+ case 12:
+ flp->state = SDLA_S507_IRQ12;
+ break;
+ case 15:
+ flp->state = SDLA_S507_IRQ15;
+ break;
+ }
+ }
+
+ for(i=0;i < sizeof(valid_mem) / sizeof (int) ; i++)
+ if (valid_mem[i] == map->mem_start)
+ break;
+
+ err = -EINVAL;
+ if (i == sizeof(valid_mem) / sizeof(int))
+ goto fail2;
+
+ if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E)
+ goto fail2;
+
+ if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B)
+ goto fail2;
+
+ if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D)
+ goto fail2;
+
+ byte = flp->type != SDLA_S508 ? SDLA_8K_WINDOW : 0;
+ byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0));
+ switch(flp->type) {
+ case SDLA_S502A:
+ case SDLA_S502E:
+ switch (map->mem_start >> 16) {
+ case 0x0A:
+ byte |= SDLA_S502_SEG_A;
+ break;
+ case 0x0C:
+ byte |= SDLA_S502_SEG_C;
+ break;
+ case 0x0D:
+ byte |= SDLA_S502_SEG_D;
+ break;
+ case 0x0E:
+ byte |= SDLA_S502_SEG_E;
+ break;
+ }
+ break;
+ case SDLA_S507:
+ switch (map->mem_start >> 16) {
+ case 0x0A:
+ byte |= SDLA_S507_SEG_A;
+ break;
+ case 0x0B:
+ byte |= SDLA_S507_SEG_B;
+ break;
+ case 0x0C:
+ byte |= SDLA_S507_SEG_C;
+ break;
+ case 0x0E:
+ byte |= SDLA_S507_SEG_E;
+ break;
+ }
+ break;
+ case SDLA_S508:
+ switch (map->mem_start >> 16) {
+ case 0x0A:
+ byte |= SDLA_S508_SEG_A;
+ break;
+ case 0x0C:
+ byte |= SDLA_S508_SEG_C;
+ break;
+ case 0x0D:
+ byte |= SDLA_S508_SEG_D;
+ break;
+ case 0x0E:
+ byte |= SDLA_S508_SEG_E;
+ break;
+ }
+ break;
+ }
+
+ /* set the memory bits, and enable access */
+ outb(byte, base + SDLA_REG_PC_WINDOW);
+
+ switch(flp->type)
+ {
+ case SDLA_S502E:
+ flp->state = SDLA_S502E_ENABLE;
+ break;
+ case SDLA_S507:
+ flp->state |= SDLA_MEMEN;
+ break;
+ case SDLA_S508:
+ flp->state = SDLA_MEMEN;
+ break;
+ }
+ outb(flp->state, base + SDLA_REG_CONTROL);
+
+ dev->irq = map->irq;
+ dev->base_addr = base;
+ dev->mem_start = map->mem_start;
+ dev->mem_end = dev->mem_start + 0x2000;
+ flp->initialized = 1;
+ return 0;
+
+fail2:
+ free_irq(map->irq, dev);
+fail:
+ release_region(base, SDLA_IO_EXTENTS);
+ return err;
+}
+
+static struct net_device_stats *sdla_stats(struct net_device *dev)
+{
+ struct frad_local *flp;
+ flp = dev->priv;
+
+ return(&flp->stats);
+}
+
+static void setup_sdla(struct net_device *dev)
+{
+ struct frad_local *flp = dev->priv;
+
+ netdev_boot_setup_check(dev);
+
+ SET_MODULE_OWNER(dev);
+ dev->flags = 0;
+ dev->type = 0xFFFF;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = SDLA_MAX_MTU;
+
+ dev->open = sdla_open;
+ dev->stop = sdla_close;
+ dev->do_ioctl = sdla_ioctl;
+ dev->set_config = sdla_set_config;
+ dev->get_stats = sdla_stats;
+ dev->hard_start_xmit = sdla_transmit;
+ dev->change_mtu = sdla_change_mtu;
+
+ flp->activate = sdla_activate;
+ flp->deactivate = sdla_deactivate;
+ flp->assoc = sdla_assoc;
+ flp->deassoc = sdla_deassoc;
+ flp->dlci_conf = sdla_dlci_conf;
+
+ init_timer(&flp->timer);
+ flp->timer.expires = 1;
+ flp->timer.data = (unsigned long) dev;
+ flp->timer.function = sdla_poll;
+}
+
+static struct net_device *sdla;
+
+static int __init init_sdla(void)
+{
+ int err;
+
+ printk("%s.\n", version);
+
+ sdla = alloc_netdev(sizeof(struct frad_local), "sdla0", setup_sdla);
+ if (!sdla)
+ return -ENOMEM;
+
+ err = register_netdev(sdla);
+ if (err)
+ free_netdev(sdla);
+
+ return err;
+}
+
+static void __exit exit_sdla(void)
+{
+ struct frad_local *flp = sdla->priv;
+
+ unregister_netdev(sdla);
+ if (flp->initialized) {
+ free_irq(sdla->irq, sdla);
+ release_region(sdla->base_addr, SDLA_IO_EXTENTS);
+ }
+ del_timer_sync(&flp->timer);
+ free_netdev(sdla);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_sdla);
+module_exit(exit_sdla);
diff --git a/drivers/net/wan/sdla_chdlc.c b/drivers/net/wan/sdla_chdlc.c
new file mode 100644
index 000000000000..afbe0024e3e1
--- /dev/null
+++ b/drivers/net/wan/sdla_chdlc.c
@@ -0,0 +1,4433 @@
+/*****************************************************************************
+* sdla_chdlc.c WANPIPE(tm) Multiprotocol WAN Link Driver. Cisco HDLC module.
+*
+* Authors: Nenad Corbic <ncorbic@sangoma.com>
+* Gideon Hack
+*
+* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Feb 28, 2001 Nenad Corbic Updated if_tx_timeout() routine for
+* 2.4.X kernels.
+* Jan 25, 2001 Nenad Corbic Added a TTY Sync serial driver over the
+* HDLC streaming protocol
+* Added a TTY Async serial driver over the
+* Async protocol.
+* Dec 15, 2000 Nenad Corbic Updated for 2.4.X Kernel support
+* Nov 13, 2000 Nenad Corbic Added true interface type encoding option.
+* Tcpdump doesn't support CHDLC inteface
+* types, to fix this "true type" option will set
+* the interface type to RAW IP mode.
+* Nov 07, 2000 Nenad Corbic Added security features for UDP debugging:
+* Deny all and specify allowed requests.
+* Jun 20, 2000 Nenad Corbic Fixed the API IP ERROR bug. Caused by the
+* latest update.
+* May 09, 2000 Nenad Corbic Option to bring down an interface
+* upon disconnect.
+* Mar 23, 2000 Nenad Corbic Improved task queue, bh handling.
+* Mar 16, 2000 Nenad Corbic Fixed the SLARP Dynamic IP addressing.
+* Mar 06, 2000 Nenad Corbic Bug Fix: corrupted mbox recovery.
+* Feb 10, 2000 Gideon Hack Added ASYNC support.
+* Feb 09, 2000 Nenad Corbic Fixed two shutdown bugs in update() and
+* if_stats() functions.
+* Jan 24, 2000 Nenad Corbic Fixed a startup wanpipe state racing,
+* condition between if_open and isr.
+* Jan 10, 2000 Nenad Corbic Added new socket API support.
+* Dev 15, 1999 Nenad Corbic Fixed up header files for 2.0.X kernels
+* Nov 20, 1999 Nenad Corbic Fixed zero length API bug.
+* Sep 30, 1999 Nenad Corbic Fixed dynamic IP and route setup.
+* Sep 23, 1999 Nenad Corbic Added SMP support, fixed tracing
+* Sep 13, 1999 Nenad Corbic Split up Port 0 and 1 into separate devices.
+* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
+* Oct 30, 1998 Jaspreet Singh Added Support for CHDLC API (HDLC STREAMING).
+* Oct 28, 1998 Jaspreet Singh Added Support for Dual Port CHDLC.
+* Aug 07, 1998 David Fong Initial version.
+*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/if_arp.h> /* ARPHRD_* defines */
+
+
+#include <asm/uaccess.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+
+#include <linux/in.h> /* sockaddr_in */
+#include <linux/inet.h>
+#include <linux/if.h>
+#include <asm/byteorder.h> /* htons(), etc. */
+#include <linux/sdlapci.h>
+#include <asm/io.h>
+
+#include <linux/sdla_chdlc.h> /* CHDLC firmware API definitions */
+#include <linux/sdla_asy.h> /* CHDLC (async) API definitions */
+
+#include <linux/if_wanpipe_common.h> /* Socket Driver common area */
+#include <linux/if_wanpipe.h>
+
+/* TTY Includes */
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+
+
+/****** Defines & Macros ****************************************************/
+
+/* reasons for enabling the timer interrupt on the adapter */
+#define TMR_INT_ENABLED_UDP 0x01
+#define TMR_INT_ENABLED_UPDATE 0x02
+#define TMR_INT_ENABLED_CONFIG 0x10
+
+#define MAX_IP_ERRORS 10
+
+#define TTY_CHDLC_MAX_MTU 2000
+#define CHDLC_DFLT_DATA_LEN 1500 /* default MTU */
+#define CHDLC_HDR_LEN 1
+
+#define CHDLC_API 0x01
+
+#define PORT(x) (x == 0 ? "PRIMARY" : "SECONDARY" )
+#define MAX_BH_BUFF 10
+
+//#define PRINT_DEBUG
+#ifdef PRINT_DEBUG
+#define dbg_printk(format, a...) printk(format, ## a)
+#else
+#define dbg_printk(format, a...)
+#endif
+
+/******Data Structures*****************************************************/
+
+/* This structure is placed in the private data area of the device structure.
+ * The card structure used to occupy the private area but now the following
+ * structure will incorporate the card structure along with CHDLC specific data
+ */
+
+typedef struct chdlc_private_area
+{
+ wanpipe_common_t common;
+ sdla_t *card;
+ int TracingEnabled; /* For enabling Tracing */
+ unsigned long curr_trace_addr; /* Used for Tracing */
+ unsigned long start_trace_addr;
+ unsigned long end_trace_addr;
+ unsigned long base_addr_trace_buffer;
+ unsigned long end_addr_trace_buffer;
+ unsigned short number_trace_elements;
+ unsigned available_buffer_space;
+ unsigned long router_start_time;
+ unsigned char route_status;
+ unsigned char route_removed;
+ unsigned long tick_counter; /* For 5s timeout counter */
+ unsigned long router_up_time;
+ u32 IP_address; /* IP addressing */
+ u32 IP_netmask;
+ u32 ip_local;
+ u32 ip_remote;
+ u32 ip_local_tmp;
+ u32 ip_remote_tmp;
+ u8 ip_error;
+ u8 config_chdlc;
+ u8 config_chdlc_timeout;
+ unsigned char mc; /* Mulitcast support on/off */
+ unsigned short udp_pkt_lgth; /* udp packet processing */
+ char udp_pkt_src;
+ char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
+ unsigned short timer_int_enabled;
+ char update_comms_stats; /* updating comms stats */
+
+ bh_data_t *bh_head; /* Circular buffer for chdlc_bh */
+ unsigned long tq_working;
+ volatile int bh_write;
+ volatile int bh_read;
+ atomic_t bh_buff_used;
+
+ unsigned char interface_down;
+
+ /* Polling work queue entry. Each interface
+ * has its own work queue entry, which is used
+ * to defer events from the interrupt */
+ struct work_struct poll_work;
+ struct timer_list poll_delay_timer;
+
+ u8 gateway;
+ u8 true_if_encoding;
+ //FIXME: add driver stats as per frame relay!
+
+} chdlc_private_area_t;
+
+/* Route Status options */
+#define NO_ROUTE 0x00
+#define ADD_ROUTE 0x01
+#define ROUTE_ADDED 0x02
+#define REMOVE_ROUTE 0x03
+
+
+/* variable for keeping track of enabling/disabling FT1 monitor status */
+static int rCount = 0;
+
+/* variable for tracking how many interfaces to open for WANPIPE on the
+ two ports */
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+/****** Function Prototypes *************************************************/
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int update(struct wan_device* wandev);
+static int new_if(struct wan_device* wandev, struct net_device* dev,
+ wanif_conf_t* conf);
+
+/* Network device interface */
+static int if_init(struct net_device* dev);
+static int if_open(struct net_device* dev);
+static int if_close(struct net_device* dev);
+static int if_header(struct sk_buff* skb, struct net_device* dev,
+ unsigned short type, void* daddr, void* saddr,
+ unsigned len);
+
+static int if_rebuild_hdr (struct sk_buff *skb);
+static struct net_device_stats* if_stats(struct net_device* dev);
+
+static int if_send(struct sk_buff* skb, struct net_device* dev);
+
+/* CHDLC Firmware interface functions */
+static int chdlc_configure (sdla_t* card, void* data);
+static int chdlc_comm_enable (sdla_t* card);
+static int chdlc_read_version (sdla_t* card, char* str);
+static int chdlc_set_intr_mode (sdla_t* card, unsigned mode);
+static int chdlc_send (sdla_t* card, void* data, unsigned len);
+static int chdlc_read_comm_err_stats (sdla_t* card);
+static int chdlc_read_op_stats (sdla_t* card);
+static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb);
+
+
+static int chdlc_disable_comm_shutdown (sdla_t *card);
+static void if_tx_timeout(struct net_device *dev);
+
+/* Miscellaneous CHDLC Functions */
+static int set_chdlc_config (sdla_t* card);
+static void init_chdlc_tx_rx_buff( sdla_t* card);
+static int process_chdlc_exception(sdla_t *card);
+static int process_global_exception(sdla_t *card);
+static int update_comms_stats(sdla_t* card,
+ chdlc_private_area_t* chdlc_priv_area);
+static int configure_ip (sdla_t* card);
+static int unconfigure_ip (sdla_t* card);
+static void process_route(sdla_t *card);
+static void port_set_state (sdla_t *card, int);
+static int config_chdlc (sdla_t *card);
+static void disable_comm (sdla_t *card);
+
+static void trigger_chdlc_poll(struct net_device *dev);
+static void chdlc_poll(struct net_device *dev);
+static void chdlc_poll_delay (unsigned long dev_ptr);
+
+
+/* Miscellaneous asynchronous interface Functions */
+static int set_asy_config (sdla_t* card);
+static int asy_comm_enable (sdla_t* card);
+
+/* Interrupt handlers */
+static void wpc_isr (sdla_t* card);
+static void rx_intr (sdla_t* card);
+static void timer_intr(sdla_t *);
+
+/* Bottom half handlers */
+static void chdlc_work(struct net_device *dev);
+static int chdlc_work_cleanup(struct net_device *dev);
+static int bh_enqueue(struct net_device *dev, struct sk_buff *skb);
+
+/* Miscellaneous functions */
+static int chk_bcast_mcast_addr(sdla_t* card, struct net_device* dev,
+ struct sk_buff *skb);
+static int reply_udp( unsigned char *data, unsigned int mbox_len );
+static int intr_test( sdla_t* card);
+static int udp_pkt_type( struct sk_buff *skb , sdla_t* card);
+static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area);
+static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area);
+static unsigned short calc_checksum (char *, int);
+static void s508_lock (sdla_t *card, unsigned long *smp_flags);
+static void s508_unlock (sdla_t *card, unsigned long *smp_flags);
+
+
+static int Intr_test_counter;
+
+/* TTY Global Definitions */
+
+#define NR_PORTS 4
+#define WAN_TTY_MAJOR 226
+#define WAN_TTY_MINOR 0
+
+#define WAN_CARD(port) (tty_card_map[port])
+#define MIN_PORT 0
+#define MAX_PORT NR_PORTS-1
+
+#define CRC_LENGTH 2
+
+static int wanpipe_tty_init(sdla_t *card);
+static void wanpipe_tty_receive(sdla_t *, unsigned, unsigned int);
+static void wanpipe_tty_trigger_poll(sdla_t *card);
+
+static struct tty_driver serial_driver;
+static int tty_init_cnt=0;
+
+static struct serial_state rs_table[NR_PORTS];
+
+static char tty_driver_mode=WANOPT_TTY_SYNC;
+
+static char *opt_decode[] = {"NONE","CRTSCTS","XONXOFF-RX",
+ "CRTSCTS XONXOFF-RX","XONXOFF-TX",
+ "CRTSCTS XONXOFF-TX","CRTSCTS XONXOFF"};
+static char *p_decode[] = {"NONE","ODD","EVEN"};
+
+static void* tty_card_map[NR_PORTS] = {NULL,NULL,NULL,NULL};
+
+
+/****** Public Functions ****************************************************/
+
+/*============================================================================
+ * Cisco HDLC protocol initialization routine.
+ *
+ * This routine is called by the main WANPIPE module during setup. At this
+ * point adapter is completely initialized and firmware is running.
+ * o read firmware version (to make sure it's alive)
+ * o configure adapter
+ * o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return: 0 o.k.
+ * < 0 failure.
+ */
+int wpc_init (sdla_t* card, wandev_conf_t* conf)
+{
+ unsigned char port_num;
+ int err;
+ unsigned long max_permitted_baud = 0;
+ SHARED_MEMORY_INFO_STRUCT *flags;
+
+ union
+ {
+ char str[80];
+ } u;
+ volatile CHDLC_MAILBOX_STRUCT* mb;
+ CHDLC_MAILBOX_STRUCT* mb1;
+ unsigned long timeout;
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_CHDLC) {
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
+ return -EINVAL;
+ }
+
+ /* Find out which Port to use */
+ if ((conf->comm_port == WANOPT_PRI) || (conf->comm_port == WANOPT_SEC)){
+ if (card->next){
+
+ if (conf->comm_port != card->next->u.c.comm_port){
+ card->u.c.comm_port = conf->comm_port;
+ }else{
+ printk(KERN_INFO "%s: ERROR - %s port used!\n",
+ card->wandev.name, PORT(conf->comm_port));
+ return -EINVAL;
+ }
+ }else{
+ card->u.c.comm_port = conf->comm_port;
+ }
+ }else{
+ printk(KERN_INFO "%s: ERROR - Invalid Port Selected!\n",
+ card->wandev.name);
+ return -EINVAL;
+ }
+
+
+ /* Initialize protocol-specific fields */
+ if(card->hw.type != SDLA_S514){
+
+ if (card->u.c.comm_port == WANOPT_PRI){
+ card->mbox = (void *) card->hw.dpmbase;
+ }else{
+ card->mbox = (void *) card->hw.dpmbase +
+ SEC_BASE_ADDR_MB_STRUCT - PRI_BASE_ADDR_MB_STRUCT;
+ }
+ }else{
+ /* for a S514 adapter, set a pointer to the actual mailbox in the */
+ /* allocated virtual memory area */
+ if (card->u.c.comm_port == WANOPT_PRI){
+ card->mbox = (void *) card->hw.dpmbase + PRI_BASE_ADDR_MB_STRUCT;
+ }else{
+ card->mbox = (void *) card->hw.dpmbase + SEC_BASE_ADDR_MB_STRUCT;
+ }
+ }
+
+ mb = mb1 = card->mbox;
+
+ if (!card->configured){
+
+ /* The board will place an 'I' in the return code to indicate that it is
+ ready to accept commands. We expect this to be completed in less
+ than 1 second. */
+
+ timeout = jiffies;
+ while (mb->return_code != 'I') /* Wait 1s for board to initialize */
+ if ((jiffies - timeout) > 1*HZ) break;
+
+ if (mb->return_code != 'I') {
+ printk(KERN_INFO
+ "%s: Initialization not completed by adapter\n",
+ card->devname);
+ printk(KERN_INFO "Please contact Sangoma representative.\n");
+ return -EIO;
+ }
+ }
+
+ /* Read firmware version. Note that when adapter initializes, it
+ * clears the mailbox, so it may appear that the first command was
+ * executed successfully when in fact it was merely erased. To work
+ * around this, we execute the first command twice.
+ */
+
+ if (chdlc_read_version(card, u.str))
+ return -EIO;
+
+ printk(KERN_INFO "%s: Running Cisco HDLC firmware v%s\n",
+ card->devname, u.str);
+
+ card->isr = &wpc_isr;
+ card->poll = NULL;
+ card->exec = NULL;
+ card->wandev.update = &update;
+ card->wandev.new_if = &new_if;
+ card->wandev.del_if = NULL;
+ card->wandev.udp_port = conf->udp_port;
+ card->disable_comm = &disable_comm;
+ card->wandev.new_if_cnt = 0;
+
+ /* reset the number of times the 'update()' proc has been called */
+ card->u.c.update_call_count = 0;
+
+ card->wandev.ttl = conf->ttl;
+ card->wandev.interface = conf->interface;
+
+ if ((card->u.c.comm_port == WANOPT_SEC && conf->interface == WANOPT_V35)&&
+ card->hw.type != SDLA_S514){
+ printk(KERN_INFO "%s: ERROR - V35 Interface not supported on S508 %s port \n",
+ card->devname, PORT(card->u.c.comm_port));
+ return -EIO;
+ }
+
+ card->wandev.clocking = conf->clocking;
+
+ port_num = card->u.c.comm_port;
+
+ /* in API mode, we can configure for "receive only" buffering */
+ if(card->hw.type == SDLA_S514) {
+ card->u.c.receive_only = conf->receive_only;
+ if(conf->receive_only) {
+ printk(KERN_INFO
+ "%s: Configured for 'receive only' mode\n",
+ card->devname);
+ }
+ }
+
+ /* Setup Port Bps */
+
+ if(card->wandev.clocking) {
+ if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
+ /* For Primary Port 0 */
+ max_permitted_baud =
+ (card->hw.type == SDLA_S514) ?
+ PRI_MAX_BAUD_RATE_S514 :
+ PRI_MAX_BAUD_RATE_S508;
+
+ }else if(port_num == WANOPT_SEC) {
+ /* For Secondary Port 1 */
+ max_permitted_baud =
+ (card->hw.type == SDLA_S514) ?
+ SEC_MAX_BAUD_RATE_S514 :
+ SEC_MAX_BAUD_RATE_S508;
+ }
+
+ if(conf->bps > max_permitted_baud) {
+ conf->bps = max_permitted_baud;
+ printk(KERN_INFO "%s: Baud too high!\n",
+ card->wandev.name);
+ printk(KERN_INFO "%s: Baud rate set to %lu bps\n",
+ card->wandev.name, max_permitted_baud);
+ }
+ card->wandev.bps = conf->bps;
+ }else{
+ card->wandev.bps = 0;
+ }
+
+ /* Setup the Port MTU */
+ if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
+
+ /* For Primary Port 0 */
+ card->wandev.mtu =
+ (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
+ min_t(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
+ CHDLC_DFLT_DATA_LEN;
+ } else if(port_num == WANOPT_SEC) {
+ /* For Secondary Port 1 */
+ card->wandev.mtu =
+ (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
+ min_t(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
+ CHDLC_DFLT_DATA_LEN;
+ }
+
+ /* Set up the interrupt status area */
+ /* Read the CHDLC Configuration and obtain:
+ * Ptr to shared memory infor struct
+ * Use this pointer to calculate the value of card->u.c.flags !
+ */
+ mb1->buffer_length = 0;
+ mb1->command = READ_CHDLC_CONFIGURATION;
+ err = sdla_exec(mb1) ? mb1->return_code : CMD_TIMEOUT;
+ if(err != COMMAND_OK) {
+ if(card->hw.type != SDLA_S514)
+ enable_irq(card->hw.irq);
+
+ chdlc_error(card, err, mb1);
+ return -EIO;
+ }
+
+ if(card->hw.type == SDLA_S514){
+ card->u.c.flags = (void *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
+ ptr_shared_mem_info_struct));
+ }else{
+ card->u.c.flags = (void *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
+ ptr_shared_mem_info_struct % SDLA_WINDOWSIZE));
+ }
+
+ flags = card->u.c.flags;
+
+ /* This is for the ports link state */
+ card->wandev.state = WAN_DUALPORT;
+ card->u.c.state = WAN_DISCONNECTED;
+
+
+ if (!card->wandev.piggyback){
+ int err;
+
+ /* Perform interrupt testing */
+ err = intr_test(card);
+
+ if(err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
+ printk(KERN_INFO "%s: Interrupt test failed (%i)\n",
+ card->devname, Intr_test_counter);
+ printk(KERN_INFO "%s: Please choose another interrupt\n",
+ card->devname);
+ return -EIO;
+ }
+
+ printk(KERN_INFO "%s: Interrupt test passed (%i)\n",
+ card->devname, Intr_test_counter);
+ card->configured = 1;
+ }
+
+ if ((card->tty_opt=conf->tty) == WANOPT_YES){
+ int err;
+ card->tty_minor = conf->tty_minor;
+
+ /* On ASYNC connections internal clocking
+ * is mandatory */
+ if ((card->u.c.async_mode = conf->tty_mode)){
+ card->wandev.clocking = 1;
+ }
+ err=wanpipe_tty_init(card);
+ if (err){
+ return err;
+ }
+ }else{
+
+
+ if (chdlc_set_intr_mode(card, APP_INT_ON_TIMER)){
+ printk (KERN_INFO "%s: "
+ "Failed to set interrupt triggers!\n",
+ card->devname);
+ return -EIO;
+ }
+
+ /* Mask the Timer interrupt */
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~APP_INT_ON_TIMER;
+ }
+
+ /* If we are using CHDLC in backup mode, this flag will
+ * indicate not to look for IP addresses in config_chdlc()*/
+ card->u.c.backup = conf->backup;
+
+ printk(KERN_INFO "\n");
+
+ return 0;
+}
+
+/******* WAN Device Driver Entry Points *************************************/
+
+/*============================================================================
+ * Update device status & statistics
+ * This procedure is called when updating the PROC file system and returns
+ * various communications statistics. These statistics are accumulated from 3
+ * different locations:
+ * 1) The 'if_stats' recorded for the device.
+ * 2) Communication error statistics on the adapter.
+ * 3) CHDLC operational statistics on the adapter.
+ * The board level statistics are read during a timer interrupt. Note that we
+ * read the error and operational statistics during consecitive timer ticks so
+ * as to minimize the time that we are inside the interrupt handler.
+ *
+ */
+static int update(struct wan_device* wandev)
+{
+ sdla_t* card = wandev->private;
+ struct net_device* dev;
+ volatile chdlc_private_area_t* chdlc_priv_area;
+ SHARED_MEMORY_INFO_STRUCT *flags;
+ unsigned long timeout;
+
+ /* sanity checks */
+ if((wandev == NULL) || (wandev->private == NULL))
+ return -EFAULT;
+
+ if(wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ /* more sanity checks */
+ if(!card->u.c.flags)
+ return -ENODEV;
+
+ if(test_bit(PERI_CRIT, (void*)&card->wandev.critical))
+ return -EAGAIN;
+
+ if((dev=card->wandev.dev) == NULL)
+ return -ENODEV;
+
+ if((chdlc_priv_area=dev->priv) == NULL)
+ return -ENODEV;
+
+ flags = card->u.c.flags;
+ if(chdlc_priv_area->update_comms_stats){
+ return -EAGAIN;
+ }
+
+ /* we will need 2 timer interrupts to complete the */
+ /* reading of the statistics */
+ chdlc_priv_area->update_comms_stats = 2;
+ flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
+ chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UPDATE;
+
+ /* wait a maximum of 1 second for the statistics to be updated */
+ timeout = jiffies;
+ for(;;) {
+ if(chdlc_priv_area->update_comms_stats == 0)
+ break;
+ if ((jiffies - timeout) > (1 * HZ)){
+ chdlc_priv_area->update_comms_stats = 0;
+ chdlc_priv_area->timer_int_enabled &=
+ ~TMR_INT_ENABLED_UPDATE;
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+
+/*============================================================================
+ * Create new logical channel.
+ * This routine is called by the router when ROUTER_IFNEW IOCTL is being
+ * handled.
+ * o parse media- and hardware-specific configuration
+ * o make sure that a new channel can be created
+ * o allocate resources, if necessary
+ * o prepare network device structure for registaration.
+ *
+ * Return: 0 o.k.
+ * < 0 failure (channel will not be created)
+ */
+static int new_if(struct wan_device* wandev, struct net_device* dev,
+ wanif_conf_t* conf)
+{
+ sdla_t* card = wandev->private;
+ chdlc_private_area_t* chdlc_priv_area;
+
+
+ printk(KERN_INFO "%s: Configuring Interface: %s\n",
+ card->devname, conf->name);
+
+ if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
+ printk(KERN_INFO "%s: Invalid interface name!\n",
+ card->devname);
+ return -EINVAL;
+ }
+
+ /* allocate and initialize private data */
+ chdlc_priv_area = kmalloc(sizeof(chdlc_private_area_t), GFP_KERNEL);
+
+ if(chdlc_priv_area == NULL)
+ return -ENOMEM;
+
+ memset(chdlc_priv_area, 0, sizeof(chdlc_private_area_t));
+
+ chdlc_priv_area->card = card;
+ chdlc_priv_area->common.sk = NULL;
+ chdlc_priv_area->common.func = NULL;
+
+ /* initialize data */
+ strcpy(card->u.c.if_name, conf->name);
+
+ if(card->wandev.new_if_cnt > 0) {
+ kfree(chdlc_priv_area);
+ return -EEXIST;
+ }
+
+ card->wandev.new_if_cnt++;
+
+ chdlc_priv_area->TracingEnabled = 0;
+ chdlc_priv_area->route_status = NO_ROUTE;
+ chdlc_priv_area->route_removed = 0;
+
+ card->u.c.async_mode = conf->async_mode;
+
+ /* setup for asynchronous mode */
+ if(conf->async_mode) {
+ printk(KERN_INFO "%s: Configuring for asynchronous mode\n",
+ wandev->name);
+
+ if(card->u.c.comm_port == WANOPT_PRI) {
+ printk(KERN_INFO
+ "%s:Asynchronous mode on secondary port only\n",
+ wandev->name);
+ kfree(chdlc_priv_area);
+ return -EINVAL;
+ }
+
+ if(strcmp(conf->usedby, "WANPIPE") == 0) {
+ printk(KERN_INFO
+ "%s: Running in WANIPE Async Mode\n", wandev->name);
+ card->u.c.usedby = WANPIPE;
+ }else{
+ card->u.c.usedby = API;
+ }
+
+ if(!card->wandev.clocking) {
+ printk(KERN_INFO
+ "%s: Asynch. clocking must be 'Internal'\n",
+ wandev->name);
+ kfree(chdlc_priv_area);
+ return -EINVAL;
+ }
+
+ if((card->wandev.bps < MIN_ASY_BAUD_RATE) ||
+ (card->wandev.bps > MAX_ASY_BAUD_RATE)) {
+ printk(KERN_INFO "%s: Selected baud rate is invalid.\n",
+ wandev->name);
+ printk(KERN_INFO "Must be between %u and %u bps.\n",
+ MIN_ASY_BAUD_RATE, MAX_ASY_BAUD_RATE);
+ kfree(chdlc_priv_area);
+ return -EINVAL;
+ }
+
+ card->u.c.api_options = 0;
+ if (conf->asy_data_trans == WANOPT_YES) {
+ card->u.c.api_options |= ASY_RX_DATA_TRANSPARENT;
+ }
+
+ card->u.c.protocol_options = 0;
+ if (conf->rts_hs_for_receive == WANOPT_YES) {
+ card->u.c.protocol_options |= ASY_RTS_HS_FOR_RX;
+ }
+ if (conf->xon_xoff_hs_for_receive == WANOPT_YES) {
+ card->u.c.protocol_options |= ASY_XON_XOFF_HS_FOR_RX;
+ }
+ if (conf->xon_xoff_hs_for_transmit == WANOPT_YES) {
+ card->u.c.protocol_options |= ASY_XON_XOFF_HS_FOR_TX;
+ }
+ if (conf->dcd_hs_for_transmit == WANOPT_YES) {
+ card->u.c.protocol_options |= ASY_DCD_HS_FOR_TX;
+ }
+ if (conf->cts_hs_for_transmit == WANOPT_YES) {
+ card->u.c.protocol_options |= ASY_CTS_HS_FOR_TX;
+ }
+
+ card->u.c.tx_bits_per_char = conf->tx_bits_per_char;
+ card->u.c.rx_bits_per_char = conf->rx_bits_per_char;
+ card->u.c.stop_bits = conf->stop_bits;
+ card->u.c.parity = conf->parity;
+ card->u.c.break_timer = conf->break_timer;
+ card->u.c.inter_char_timer = conf->inter_char_timer;
+ card->u.c.rx_complete_length = conf->rx_complete_length;
+ card->u.c.xon_char = conf->xon_char;
+
+ } else { /* setup for synchronous mode */
+
+ card->u.c.protocol_options = 0;
+ if (conf->ignore_dcd == WANOPT_YES){
+ card->u.c.protocol_options |= IGNORE_DCD_FOR_LINK_STAT;
+ }
+ if (conf->ignore_cts == WANOPT_YES){
+ card->u.c.protocol_options |= IGNORE_CTS_FOR_LINK_STAT;
+ }
+
+ if (conf->ignore_keepalive == WANOPT_YES) {
+ card->u.c.protocol_options |=
+ IGNORE_KPALV_FOR_LINK_STAT;
+ card->u.c.kpalv_tx = MIN_Tx_KPALV_TIMER;
+ card->u.c.kpalv_rx = MIN_Rx_KPALV_TIMER;
+ card->u.c.kpalv_err = MIN_KPALV_ERR_TOL;
+
+ } else { /* Do not ignore keepalives */
+ card->u.c.kpalv_tx =
+ ((conf->keepalive_tx_tmr - MIN_Tx_KPALV_TIMER)
+ >= 0) ?
+ min_t(unsigned int, conf->keepalive_tx_tmr,MAX_Tx_KPALV_TIMER) :
+ DEFAULT_Tx_KPALV_TIMER;
+
+ card->u.c.kpalv_rx =
+ ((conf->keepalive_rx_tmr - MIN_Rx_KPALV_TIMER)
+ >= 0) ?
+ min_t(unsigned int, conf->keepalive_rx_tmr,MAX_Rx_KPALV_TIMER) :
+ DEFAULT_Rx_KPALV_TIMER;
+
+ card->u.c.kpalv_err =
+ ((conf->keepalive_err_margin-MIN_KPALV_ERR_TOL)
+ >= 0) ?
+ min_t(unsigned int, conf->keepalive_err_margin,
+ MAX_KPALV_ERR_TOL) :
+ DEFAULT_KPALV_ERR_TOL;
+ }
+
+ /* Setup slarp timer to control delay between slarps */
+ card->u.c.slarp_timer =
+ ((conf->slarp_timer - MIN_SLARP_REQ_TIMER) >= 0) ?
+ min_t(unsigned int, conf->slarp_timer, MAX_SLARP_REQ_TIMER) :
+ DEFAULT_SLARP_REQ_TIMER;
+
+ if (conf->hdlc_streaming == WANOPT_YES) {
+ printk(KERN_INFO "%s: Enabling HDLC STREAMING Mode\n",
+ wandev->name);
+ card->u.c.protocol_options = HDLC_STREAMING_MODE;
+ }
+
+ if ((chdlc_priv_area->true_if_encoding = conf->true_if_encoding) == WANOPT_YES){
+ printk(KERN_INFO
+ "%s: Enabling, true interface type encoding.\n",
+ card->devname);
+ }
+
+ /* Setup wanpipe as a router (WANPIPE) or as an API */
+ if( strcmp(conf->usedby, "WANPIPE") == 0) {
+
+ printk(KERN_INFO "%s: Running in WANPIPE mode!\n",
+ wandev->name);
+ card->u.c.usedby = WANPIPE;
+
+ /* Option to bring down the interface when
+ * the link goes down */
+ if (conf->if_down){
+ set_bit(DYN_OPT_ON,&chdlc_priv_area->interface_down);
+ printk(KERN_INFO
+ "%s: Dynamic interface configuration enabled\n",
+ card->devname);
+ }
+
+ } else if( strcmp(conf->usedby, "API") == 0) {
+ card->u.c.usedby = API;
+ printk(KERN_INFO "%s: Running in API mode !\n",
+ wandev->name);
+ }
+ }
+
+ /* Tells us that if this interface is a
+ * gateway or not */
+ if ((chdlc_priv_area->gateway = conf->gateway) == WANOPT_YES){
+ printk(KERN_INFO "%s: Interface %s is set as a gateway.\n",
+ card->devname,card->u.c.if_name);
+ }
+
+ /* Get Multicast Information */
+ chdlc_priv_area->mc = conf->mc;
+
+ /* prepare network device data space for registration */
+ strcpy(dev->name,card->u.c.if_name);
+
+ dev->init = &if_init;
+ dev->priv = chdlc_priv_area;
+
+ /* Initialize the polling work routine */
+ INIT_WORK(&chdlc_priv_area->poll_work, (void*)(void*)chdlc_poll, dev);
+
+ /* Initialize the polling delay timer */
+ init_timer(&chdlc_priv_area->poll_delay_timer);
+ chdlc_priv_area->poll_delay_timer.data = (unsigned long)dev;
+ chdlc_priv_area->poll_delay_timer.function = chdlc_poll_delay;
+
+ printk(KERN_INFO "\n");
+
+ return 0;
+}
+
+
+/****** Network Device Interface ********************************************/
+
+/*============================================================================
+ * Initialize Linux network interface.
+ *
+ * This routine is called only once for each interface, during Linux network
+ * interface registration. Returning anything but zero will fail interface
+ * registration.
+ */
+static int if_init(struct net_device* dev)
+{
+ chdlc_private_area_t* chdlc_priv_area = dev->priv;
+ sdla_t* card = chdlc_priv_area->card;
+ struct wan_device* wandev = &card->wandev;
+
+ /* Initialize device driver entry points */
+ dev->open = &if_open;
+ dev->stop = &if_close;
+ dev->hard_header = &if_header;
+ dev->rebuild_header = &if_rebuild_hdr;
+ dev->hard_start_xmit = &if_send;
+ dev->get_stats = &if_stats;
+ dev->tx_timeout = &if_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /* Initialize media-specific parameters */
+ dev->flags |= IFF_POINTOPOINT;
+ dev->flags |= IFF_NOARP;
+
+ /* Enable Mulitcasting if user selected */
+ if (chdlc_priv_area->mc == WANOPT_YES){
+ dev->flags |= IFF_MULTICAST;
+ }
+
+ if (chdlc_priv_area->true_if_encoding){
+ dev->type = ARPHRD_HDLC; /* This breaks the tcpdump */
+ }else{
+ dev->type = ARPHRD_PPP;
+ }
+
+ dev->mtu = card->wandev.mtu;
+ /* for API usage, add the API header size to the requested MTU size */
+ if(card->u.c.usedby == API) {
+ dev->mtu += sizeof(api_tx_hdr_t);
+ }
+
+ dev->hard_header_len = CHDLC_HDR_LEN;
+
+ /* Initialize hardware parameters */
+ dev->irq = wandev->irq;
+ dev->dma = wandev->dma;
+ dev->base_addr = wandev->ioport;
+ dev->mem_start = wandev->maddr;
+ dev->mem_end = wandev->maddr + wandev->msize - 1;
+
+ /* Set transmit buffer queue length
+ * If too low packets will not be retransmitted
+ * by stack.
+ */
+ dev->tx_queue_len = 100;
+ SET_MODULE_OWNER(dev);
+
+ return 0;
+}
+
+/*============================================================================
+ * Open network interface.
+ * o enable communications and interrupts.
+ * o prevent module from unloading by incrementing use count
+ *
+ * Return 0 if O.k. or errno.
+ */
+static int if_open(struct net_device* dev)
+{
+ chdlc_private_area_t* chdlc_priv_area = dev->priv;
+ sdla_t* card = chdlc_priv_area->card;
+ struct timeval tv;
+ int err = 0;
+
+ /* Only one open per interface is allowed */
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Initialize the work queue entry */
+ chdlc_priv_area->tq_working=0;
+
+ INIT_WORK(&chdlc_priv_area->common.wanpipe_work,
+ (void *)(void *)chdlc_work, dev);
+
+ /* Allocate and initialize BH circular buffer */
+ /* Add 1 to MAX_BH_BUFF so we don't have test with (MAX_BH_BUFF-1) */
+ chdlc_priv_area->bh_head = kmalloc((sizeof(bh_data_t)*(MAX_BH_BUFF+1)),GFP_ATOMIC);
+ memset(chdlc_priv_area->bh_head,0,(sizeof(bh_data_t)*(MAX_BH_BUFF+1)));
+ atomic_set(&chdlc_priv_area->bh_buff_used, 0);
+
+ do_gettimeofday(&tv);
+ chdlc_priv_area->router_start_time = tv.tv_sec;
+
+ netif_start_queue(dev);
+
+ wanpipe_open(card);
+
+ /* TTY is configured during wanpipe_set_termios
+ * call, not here */
+ if (card->tty_opt)
+ return err;
+
+ set_bit(0,&chdlc_priv_area->config_chdlc);
+ chdlc_priv_area->config_chdlc_timeout=jiffies;
+
+ /* Start the CHDLC configuration after 1sec delay.
+ * This will give the interface initilization time
+ * to finish its configuration */
+ mod_timer(&chdlc_priv_area->poll_delay_timer, jiffies + HZ);
+ return err;
+}
+
+/*============================================================================
+ * Close network interface.
+ * o if this is the last close, then disable communications and interrupts.
+ * o reset flags.
+ */
+static int if_close(struct net_device* dev)
+{
+ chdlc_private_area_t* chdlc_priv_area = dev->priv;
+ sdla_t* card = chdlc_priv_area->card;
+
+ if (chdlc_priv_area->bh_head){
+ int i;
+ struct sk_buff *skb;
+
+ for (i=0; i<(MAX_BH_BUFF+1); i++){
+ skb = ((bh_data_t *)&chdlc_priv_area->bh_head[i])->skb;
+ if (skb != NULL){
+ dev_kfree_skb_any(skb);
+ }
+ }
+ kfree(chdlc_priv_area->bh_head);
+ chdlc_priv_area->bh_head=NULL;
+ }
+
+ netif_stop_queue(dev);
+ wanpipe_close(card);
+ del_timer(&chdlc_priv_area->poll_delay_timer);
+ return 0;
+}
+
+static void disable_comm (sdla_t *card)
+{
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+
+ if (card->u.c.comm_enabled){
+ chdlc_disable_comm_shutdown (card);
+ }else{
+ flags->interrupt_info_struct.interrupt_permission = 0;
+ }
+
+ if (!tty_init_cnt)
+ return;
+
+ if (card->tty_opt){
+ struct serial_state * state;
+ if (!(--tty_init_cnt)){
+ int e1;
+ serial_driver.refcount=0;
+
+ if ((e1 = tty_unregister_driver(&serial_driver)))
+ printk("SERIAL: failed to unregister serial driver (%d)\n",
+ e1);
+ printk(KERN_INFO "%s: Unregistering TTY Driver, Major %i\n",
+ card->devname,WAN_TTY_MAJOR);
+ }
+ card->tty=NULL;
+ tty_card_map[card->tty_minor]=NULL;
+ state = &rs_table[card->tty_minor];
+ memset(state, 0, sizeof(*state));
+ }
+ return;
+}
+
+
+/*============================================================================
+ * Build media header.
+ *
+ * The trick here is to put packet type (Ethertype) into 'protocol' field of
+ * the socket buffer, so that we don't forget it. If packet type is not
+ * supported, set skb->protocol to 0 and discard packet later.
+ *
+ * Return: media header length.
+ */
+static int if_header(struct sk_buff* skb, struct net_device* dev,
+ unsigned short type, void* daddr, void* saddr,
+ unsigned len)
+{
+ skb->protocol = htons(type);
+
+ return CHDLC_HDR_LEN;
+}
+
+
+/*============================================================================
+ * Handle transmit timeout event from netif watchdog
+ */
+static void if_tx_timeout(struct net_device *dev)
+{
+ chdlc_private_area_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+
+ /* If our device stays busy for at least 5 seconds then we will
+ * kick start the device by making dev->tbusy = 0. We expect
+ * that our device never stays busy more than 5 seconds. So this
+ * is only used as a last resort.
+ */
+
+ ++card->wandev.stats.collisions;
+
+ printk (KERN_INFO "%s: Transmit timed out on %s\n", card->devname,dev->name);
+ netif_wake_queue (dev);
+}
+
+
+
+/*============================================================================
+ * Re-build media header.
+ *
+ * Return: 1 physical address resolved.
+ * 0 physical address not resolved
+ */
+static int if_rebuild_hdr (struct sk_buff *skb)
+{
+ return 1;
+}
+
+
+/*============================================================================
+ * Send a packet on a network interface.
+ * o set tbusy flag (marks start of the transmission) to block a timer-based
+ * transmit from overlapping.
+ * o check link state. If link is not up, then drop the packet.
+ * o execute adapter send command.
+ * o free socket buffer
+ *
+ * Return: 0 complete (socket buffer must be freed)
+ * non-0 packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ * bottom half" (with interrupts enabled).
+ * 2. Setting tbusy flag will inhibit further transmit requests from the
+ * protocol stack and can be used for flow control with protocol layer.
+ */
+static int if_send(struct sk_buff* skb, struct net_device* dev)
+{
+ chdlc_private_area_t *chdlc_priv_area = dev->priv;
+ sdla_t *card = chdlc_priv_area->card;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+ INTERRUPT_INFORMATION_STRUCT *chdlc_int = &flags->interrupt_info_struct;
+ int udp_type = 0;
+ unsigned long smp_flags;
+ int err=0;
+
+ netif_stop_queue(dev);
+
+ if (skb == NULL){
+ /* If we get here, some higher layer thinks we've missed an
+ * tx-done interrupt.
+ */
+ printk(KERN_INFO "%s: interface %s got kicked!\n",
+ card->devname, dev->name);
+
+ netif_wake_queue(dev);
+ return 0;
+ }
+
+ if (ntohs(skb->protocol) != htons(PVC_PROT)){
+
+ /* check the udp packet type */
+
+ udp_type = udp_pkt_type(skb, card);
+
+ if (udp_type == UDP_CPIPE_TYPE){
+ if(store_udp_mgmt_pkt(UDP_PKT_FRM_STACK, card, skb, dev,
+ chdlc_priv_area)){
+ chdlc_int->interrupt_permission |=
+ APP_INT_ON_TIMER;
+ }
+ netif_start_queue(dev);
+ return 0;
+ }
+
+ /* check to see if the source IP address is a broadcast or */
+ /* multicast IP address */
+ if(chk_bcast_mcast_addr(card, dev, skb)){
+ ++card->wandev.stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ netif_start_queue(dev);
+ return 0;
+ }
+ }
+
+ /* Lock the 508 Card: SMP is supported */
+ if(card->hw.type != SDLA_S514){
+ s508_lock(card,&smp_flags);
+ }
+
+ if(test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
+
+ printk(KERN_INFO "%s: Critical in if_send: %lx\n",
+ card->wandev.name,card->wandev.critical);
+ ++card->wandev.stats.tx_dropped;
+ netif_start_queue(dev);
+ goto if_send_exit_crit;
+ }
+
+ if(card->u.c.state != WAN_CONNECTED){
+ ++card->wandev.stats.tx_dropped;
+ netif_start_queue(dev);
+
+ }else if(!skb->protocol){
+ ++card->wandev.stats.tx_errors;
+ netif_start_queue(dev);
+
+ }else {
+ void* data = skb->data;
+ unsigned len = skb->len;
+ unsigned char attr;
+
+ /* If it's an API packet pull off the API
+ * header. Also check that the packet size
+ * is larger than the API header
+ */
+ if (card->u.c.usedby == API){
+ api_tx_hdr_t* api_tx_hdr;
+
+ /* discard the frame if we are configured for */
+ /* 'receive only' mode or if there is no data */
+ if (card->u.c.receive_only ||
+ (len <= sizeof(api_tx_hdr_t))) {
+
+ ++card->wandev.stats.tx_dropped;
+ netif_start_queue(dev);
+ goto if_send_exit_crit;
+ }
+
+ api_tx_hdr = (api_tx_hdr_t *)data;
+ attr = api_tx_hdr->attr;
+ data += sizeof(api_tx_hdr_t);
+ len -= sizeof(api_tx_hdr_t);
+ }
+
+ if(chdlc_send(card, data, len)) {
+ netif_stop_queue(dev);
+ }else{
+ ++card->wandev.stats.tx_packets;
+ card->wandev.stats.tx_bytes += len;
+
+ netif_start_queue(dev);
+
+ dev->trans_start = jiffies;
+ }
+ }
+
+if_send_exit_crit:
+
+ if (!(err=netif_queue_stopped(dev))) {
+ dev_kfree_skb_any(skb);
+ }else{
+ chdlc_priv_area->tick_counter = jiffies;
+ chdlc_int->interrupt_permission |= APP_INT_ON_TX_FRAME;
+ }
+
+ clear_bit(SEND_CRIT, (void*)&card->wandev.critical);
+ if(card->hw.type != SDLA_S514){
+ s508_unlock(card,&smp_flags);
+ }
+
+ return err;
+}
+
+
+/*============================================================================
+ * Check to see if the packet to be transmitted contains a broadcast or
+ * multicast source IP address.
+ */
+
+static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
+ struct sk_buff *skb)
+{
+ u32 src_ip_addr;
+ u32 broadcast_ip_addr = 0;
+ struct in_device *in_dev;
+
+ /* read the IP source address from the outgoing packet */
+ src_ip_addr = *(u32 *)(skb->data + 12);
+
+ /* read the IP broadcast address for the device */
+ in_dev = dev->ip_ptr;
+ if(in_dev != NULL) {
+ struct in_ifaddr *ifa= in_dev->ifa_list;
+ if(ifa != NULL)
+ broadcast_ip_addr = ifa->ifa_broadcast;
+ else
+ return 0;
+ }
+
+ /* check if the IP Source Address is a Broadcast address */
+ if((dev->flags & IFF_BROADCAST) && (src_ip_addr == broadcast_ip_addr)) {
+ printk(KERN_INFO "%s: Broadcast Source Address silently discarded\n",
+ card->devname);
+ return 1;
+ }
+
+ /* check if the IP Source Address is a Multicast address */
+ if((ntohl(src_ip_addr) >= 0xE0000001) &&
+ (ntohl(src_ip_addr) <= 0xFFFFFFFE)) {
+ printk(KERN_INFO "%s: Multicast Source Address silently discarded\n",
+ card->devname);
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/*============================================================================
+ * Reply to UDP Management system.
+ * Return length of reply.
+ */
+static int reply_udp( unsigned char *data, unsigned int mbox_len )
+{
+
+ unsigned short len, udp_length, temp, ip_length;
+ unsigned long ip_temp;
+ int even_bound = 0;
+ chdlc_udp_pkt_t *c_udp_pkt = (chdlc_udp_pkt_t *)data;
+
+ /* Set length of packet */
+ len = sizeof(ip_pkt_t)+
+ sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ sizeof(trace_info_t)+
+ mbox_len;
+
+ /* fill in UDP reply */
+ c_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
+
+ /* fill in UDP length */
+ udp_length = sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ sizeof(trace_info_t)+
+ mbox_len;
+
+ /* put it on an even boundary */
+ if ( udp_length & 0x0001 ) {
+ udp_length += 1;
+ len += 1;
+ even_bound = 1;
+ }
+
+ temp = (udp_length<<8)|(udp_length>>8);
+ c_udp_pkt->udp_pkt.udp_length = temp;
+
+ /* swap UDP ports */
+ temp = c_udp_pkt->udp_pkt.udp_src_port;
+ c_udp_pkt->udp_pkt.udp_src_port =
+ c_udp_pkt->udp_pkt.udp_dst_port;
+ c_udp_pkt->udp_pkt.udp_dst_port = temp;
+
+ /* add UDP pseudo header */
+ temp = 0x1100;
+ *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound)) = temp;
+ temp = (udp_length<<8)|(udp_length>>8);
+ *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound+2)) = temp;
+
+
+ /* calculate UDP checksum */
+ c_udp_pkt->udp_pkt.udp_checksum = 0;
+ c_udp_pkt->udp_pkt.udp_checksum = calc_checksum(&data[UDP_OFFSET],udp_length+UDP_OFFSET);
+
+ /* fill in IP length */
+ ip_length = len;
+ temp = (ip_length<<8)|(ip_length>>8);
+ c_udp_pkt->ip_pkt.total_length = temp;
+
+ /* swap IP addresses */
+ ip_temp = c_udp_pkt->ip_pkt.ip_src_address;
+ c_udp_pkt->ip_pkt.ip_src_address = c_udp_pkt->ip_pkt.ip_dst_address;
+ c_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
+
+ /* fill in IP checksum */
+ c_udp_pkt->ip_pkt.hdr_checksum = 0;
+ c_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data,sizeof(ip_pkt_t));
+
+ return len;
+
+} /* reply_udp */
+
+unsigned short calc_checksum (char *data, int len)
+{
+ unsigned short temp;
+ unsigned long sum=0;
+ int i;
+
+ for( i = 0; i <len; i+=2 ) {
+ memcpy(&temp,&data[i],2);
+ sum += (unsigned long)temp;
+ }
+
+ while (sum >> 16 ) {
+ sum = (sum & 0xffffUL) + (sum >> 16);
+ }
+
+ temp = (unsigned short)sum;
+ temp = ~temp;
+
+ if( temp == 0 )
+ temp = 0xffff;
+
+ return temp;
+}
+
+
+/*============================================================================
+ * Get ethernet-style interface statistics.
+ * Return a pointer to struct enet_statistics.
+ */
+static struct net_device_stats* if_stats(struct net_device* dev)
+{
+ sdla_t *my_card;
+ chdlc_private_area_t* chdlc_priv_area;
+
+ if ((chdlc_priv_area=dev->priv) == NULL)
+ return NULL;
+
+ my_card = chdlc_priv_area->card;
+ return &my_card->wandev.stats;
+}
+
+
+/****** Cisco HDLC Firmware Interface Functions *******************************/
+
+/*============================================================================
+ * Read firmware code version.
+ * Put code version as ASCII string in str.
+ */
+static int chdlc_read_version (sdla_t* card, char* str)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ int len;
+ char err;
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if(err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ }
+ else if (str) { /* is not null */
+ len = mb->buffer_length;
+ memcpy(str, mb->data, len);
+ str[len] = '\0';
+ }
+ return (err);
+}
+
+/*-----------------------------------------------------------------------------
+ * Configure CHDLC firmware.
+ */
+static int chdlc_configure (sdla_t* card, void* data)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT *mailbox = card->mbox;
+ int data_length = sizeof(CHDLC_CONFIGURATION_STRUCT);
+
+ mailbox->buffer_length = data_length;
+ memcpy(mailbox->data, data, data_length);
+ mailbox->command = SET_CHDLC_CONFIGURATION;
+ err = sdla_exec(mailbox) ? mailbox->return_code : CMD_TIMEOUT;
+
+ if (err != COMMAND_OK) chdlc_error (card, err, mailbox);
+
+ return err;
+}
+
+
+/*============================================================================
+ * Set interrupt mode -- HDLC Version.
+ */
+
+static int chdlc_set_intr_mode (sdla_t* card, unsigned mode)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ CHDLC_INT_TRIGGERS_STRUCT* int_data =
+ (CHDLC_INT_TRIGGERS_STRUCT *)mb->data;
+ int err;
+
+ int_data->CHDLC_interrupt_triggers = mode;
+ int_data->IRQ = card->hw.irq;
+ int_data->interrupt_timer = 1;
+
+ mb->buffer_length = sizeof(CHDLC_INT_TRIGGERS_STRUCT);
+ mb->command = SET_CHDLC_INTERRUPT_TRIGGERS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error (card, err, mb);
+ return err;
+}
+
+
+/*===========================================================
+ * chdlc_disable_comm_shutdown
+ *
+ * Shutdown() disables the communications. We must
+ * have a sparate functions, because we must not
+ * call chdlc_error() hander since the private
+ * area has already been replaced */
+
+static int chdlc_disable_comm_shutdown (sdla_t *card)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ CHDLC_INT_TRIGGERS_STRUCT* int_data =
+ (CHDLC_INT_TRIGGERS_STRUCT *)mb->data;
+ int err;
+
+ /* Disable Interrutps */
+ int_data->CHDLC_interrupt_triggers = 0;
+ int_data->IRQ = card->hw.irq;
+ int_data->interrupt_timer = 1;
+
+ mb->buffer_length = sizeof(CHDLC_INT_TRIGGERS_STRUCT);
+ mb->command = SET_CHDLC_INTERRUPT_TRIGGERS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ /* Disable Communications */
+
+ if (card->u.c.async_mode) {
+ mb->command = DISABLE_ASY_COMMUNICATIONS;
+ }else{
+ mb->command = DISABLE_CHDLC_COMMUNICATIONS;
+ }
+
+ mb->buffer_length = 0;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ card->u.c.comm_enabled = 0;
+
+ return 0;
+}
+
+/*============================================================================
+ * Enable communications.
+ */
+
+static int chdlc_comm_enable (sdla_t* card)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = ENABLE_CHDLC_COMMUNICATIONS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error(card, err, mb);
+ else
+ card->u.c.comm_enabled = 1;
+
+ return err;
+}
+
+/*============================================================================
+ * Read communication error statistics.
+ */
+static int chdlc_read_comm_err_stats (sdla_t* card)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = READ_COMMS_ERROR_STATS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error(card,err,mb);
+ return err;
+}
+
+
+/*============================================================================
+ * Read CHDLC operational statistics.
+ */
+static int chdlc_read_op_stats (sdla_t* card)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_OPERATIONAL_STATS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error(card,err,mb);
+ return err;
+}
+
+
+/*============================================================================
+ * Update communications error and general packet statistics.
+ */
+static int update_comms_stats(sdla_t* card,
+ chdlc_private_area_t* chdlc_priv_area)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ COMMS_ERROR_STATS_STRUCT* err_stats;
+ CHDLC_OPERATIONAL_STATS_STRUCT *op_stats;
+
+ /* on the first timer interrupt, read the comms error statistics */
+ if(chdlc_priv_area->update_comms_stats == 2) {
+ if(chdlc_read_comm_err_stats(card))
+ return 1;
+ err_stats = (COMMS_ERROR_STATS_STRUCT *)mb->data;
+ card->wandev.stats.rx_over_errors =
+ err_stats->Rx_overrun_err_count;
+ card->wandev.stats.rx_crc_errors =
+ err_stats->CRC_err_count;
+ card->wandev.stats.rx_frame_errors =
+ err_stats->Rx_abort_count;
+ card->wandev.stats.rx_fifo_errors =
+ err_stats->Rx_dis_pri_bfrs_full_count;
+ card->wandev.stats.rx_missed_errors =
+ card->wandev.stats.rx_fifo_errors;
+ card->wandev.stats.tx_aborted_errors =
+ err_stats->sec_Tx_abort_count;
+ }
+
+ /* on the second timer interrupt, read the operational statistics */
+ else {
+ if(chdlc_read_op_stats(card))
+ return 1;
+ op_stats = (CHDLC_OPERATIONAL_STATS_STRUCT *)mb->data;
+ card->wandev.stats.rx_length_errors =
+ (op_stats->Rx_Data_discard_short_count +
+ op_stats->Rx_Data_discard_long_count);
+ }
+
+ return 0;
+}
+
+/*============================================================================
+ * Send packet.
+ * Return: 0 - o.k.
+ * 1 - no transmit buffers available
+ */
+static int chdlc_send (sdla_t* card, void* data, unsigned len)
+{
+ CHDLC_DATA_TX_STATUS_EL_STRUCT *txbuf = card->u.c.txbuf;
+
+ if (txbuf->opp_flag)
+ return 1;
+
+ sdla_poke(&card->hw, txbuf->ptr_data_bfr, data, len);
+
+ txbuf->frame_length = len;
+ txbuf->opp_flag = 1; /* start transmission */
+
+ /* Update transmit buffer control fields */
+ card->u.c.txbuf = ++txbuf;
+
+ if ((void*)txbuf > card->u.c.txbuf_last)
+ card->u.c.txbuf = card->u.c.txbuf_base;
+
+ return 0;
+}
+
+/****** Firmware Error Handler **********************************************/
+
+/*============================================================================
+ * Firmware error handler.
+ * This routine is called whenever firmware command returns non-zero
+ * return code.
+ *
+ * Return zero if previous command has to be cancelled.
+ */
+static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb)
+{
+ unsigned cmd = mb->command;
+
+ switch (err) {
+
+ case CMD_TIMEOUT:
+ printk(KERN_INFO "%s: command 0x%02X timed out!\n",
+ card->devname, cmd);
+ break;
+
+ case S514_BOTH_PORTS_SAME_CLK_MODE:
+ if(cmd == SET_CHDLC_CONFIGURATION) {
+ printk(KERN_INFO
+ "%s: Configure both ports for the same clock source\n",
+ card->devname);
+ break;
+ }
+
+ default:
+ printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n",
+ card->devname, cmd, err);
+ }
+
+ return 0;
+}
+
+
+/********** Bottom Half Handlers ********************************************/
+
+/* NOTE: There is no API, BH support for Kernels lower than 2.2.X.
+ * DO NOT INSERT ANY CODE HERE, NOTICE THE
+ * PREPROCESSOR STATEMENT ABOVE, UNLESS YOU KNOW WHAT YOU ARE
+ * DOING */
+
+static void chdlc_work(struct net_device * dev)
+{
+ chdlc_private_area_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+ struct sk_buff *skb;
+
+ if (atomic_read(&chan->bh_buff_used) == 0){
+ clear_bit(0, &chan->tq_working);
+ return;
+ }
+
+ while (atomic_read(&chan->bh_buff_used)){
+
+ skb = ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb;
+
+ if (skb != NULL){
+
+ if (chan->common.sk == NULL || chan->common.func == NULL){
+ ++card->wandev.stats.rx_dropped;
+ dev_kfree_skb_any(skb);
+ chdlc_work_cleanup(dev);
+ continue;
+ }
+
+ if (chan->common.func(skb,dev,chan->common.sk) != 0){
+ /* Sock full cannot send, queue us for another
+ * try */
+ atomic_set(&chan->common.receive_block,1);
+ return;
+ }else{
+ chdlc_work_cleanup(dev);
+ }
+ }else{
+ chdlc_work_cleanup(dev);
+ }
+ }
+ clear_bit(0, &chan->tq_working);
+
+ return;
+}
+
+static int chdlc_work_cleanup(struct net_device *dev)
+{
+ chdlc_private_area_t* chan = dev->priv;
+
+ ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb = NULL;
+
+ if (chan->bh_read == MAX_BH_BUFF){
+ chan->bh_read=0;
+ }else{
+ ++chan->bh_read;
+ }
+
+ atomic_dec(&chan->bh_buff_used);
+ return 0;
+}
+
+
+
+static int bh_enqueue(struct net_device *dev, struct sk_buff *skb)
+{
+ /* Check for full */
+ chdlc_private_area_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+
+ if (atomic_read(&chan->bh_buff_used) == (MAX_BH_BUFF+1)){
+ ++card->wandev.stats.rx_dropped;
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+
+ ((bh_data_t *)&chan->bh_head[chan->bh_write])->skb = skb;
+
+ if (chan->bh_write == MAX_BH_BUFF){
+ chan->bh_write=0;
+ }else{
+ ++chan->bh_write;
+ }
+
+ atomic_inc(&chan->bh_buff_used);
+
+ return 0;
+}
+
+/* END OF API BH Support */
+
+
+/****** Interrupt Handlers **************************************************/
+
+/*============================================================================
+ * Cisco HDLC interrupt service routine.
+ */
+static void wpc_isr (sdla_t* card)
+{
+ struct net_device* dev;
+ SHARED_MEMORY_INFO_STRUCT* flags = NULL;
+ int i;
+ sdla_t *my_card;
+
+
+ /* Check for which port the interrupt has been generated
+ * Since Secondary Port is piggybacking on the Primary
+ * the check must be done here.
+ */
+
+ flags = card->u.c.flags;
+ if (!flags->interrupt_info_struct.interrupt_type){
+ /* Check for a second port (piggybacking) */
+ if ((my_card = card->next)){
+ flags = my_card->u.c.flags;
+ if (flags->interrupt_info_struct.interrupt_type){
+ card = my_card;
+ card->isr(card);
+ return;
+ }
+ }
+ }
+
+ flags = card->u.c.flags;
+ card->in_isr = 1;
+ dev = card->wandev.dev;
+
+ /* If we get an interrupt with no network device, stop the interrupts
+ * and issue an error */
+ if (!card->tty_opt && !dev &&
+ flags->interrupt_info_struct.interrupt_type !=
+ COMMAND_COMPLETE_APP_INT_PEND){
+
+ goto isr_done;
+ }
+
+ /* if critical due to peripheral operations
+ * ie. update() or getstats() then reset the interrupt and
+ * wait for the board to retrigger.
+ */
+ if(test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
+ printk(KERN_INFO "ISR CRIT TO PERI\n");
+ goto isr_done;
+ }
+
+ /* On a 508 Card, if critical due to if_send
+ * Major Error !!! */
+ if(card->hw.type != SDLA_S514) {
+ if(test_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
+ printk(KERN_INFO "%s: Critical while in ISR: %lx\n",
+ card->devname, card->wandev.critical);
+ card->in_isr = 0;
+ flags->interrupt_info_struct.interrupt_type = 0;
+ return;
+ }
+ }
+
+ switch(flags->interrupt_info_struct.interrupt_type) {
+
+ case RX_APP_INT_PEND: /* 0x01: receive interrupt */
+ rx_intr(card);
+ break;
+
+ case TX_APP_INT_PEND: /* 0x02: transmit interrupt */
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~APP_INT_ON_TX_FRAME;
+
+ if (card->tty_opt){
+ wanpipe_tty_trigger_poll(card);
+ break;
+ }
+
+ if (dev && netif_queue_stopped(dev)){
+ if (card->u.c.usedby == API){
+ netif_start_queue(dev);
+ wakeup_sk_bh(dev);
+ }else{
+ netif_wake_queue(dev);
+ }
+ }
+ break;
+
+ case COMMAND_COMPLETE_APP_INT_PEND:/* 0x04: cmd cplt */
+ ++ Intr_test_counter;
+ break;
+
+ case CHDLC_EXCEP_COND_APP_INT_PEND: /* 0x20 */
+ process_chdlc_exception(card);
+ break;
+
+ case GLOBAL_EXCEP_COND_APP_INT_PEND:
+ process_global_exception(card);
+ break;
+
+ case TIMER_APP_INT_PEND:
+ timer_intr(card);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: spurious interrupt 0x%02X!\n",
+ card->devname,
+ flags->interrupt_info_struct.interrupt_type);
+ printk(KERN_INFO "Code name: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codename[i]);
+ printk(KERN_INFO "\nCode version: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codeversion[i]);
+ printk(KERN_INFO "\n");
+ break;
+ }
+
+isr_done:
+
+ card->in_isr = 0;
+ flags->interrupt_info_struct.interrupt_type = 0;
+ return;
+}
+
+/*============================================================================
+ * Receive interrupt handler.
+ */
+static void rx_intr (sdla_t* card)
+{
+ struct net_device *dev;
+ chdlc_private_area_t *chdlc_priv_area;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+ CHDLC_DATA_RX_STATUS_EL_STRUCT *rxbuf = card->u.c.rxmb;
+ struct sk_buff *skb;
+ unsigned len;
+ unsigned addr = rxbuf->ptr_data_bfr;
+ void *buf;
+ int i,udp_type;
+
+ if (rxbuf->opp_flag != 0x01) {
+ printk(KERN_INFO
+ "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
+ card->devname, (unsigned)rxbuf, rxbuf->opp_flag);
+ printk(KERN_INFO "Code name: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codename[i]);
+ printk(KERN_INFO "\nCode version: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codeversion[i]);
+ printk(KERN_INFO "\n");
+
+
+ /* Bug Fix: Mar 6 2000
+ * If we get a corrupted mailbox, it measn that driver
+ * is out of sync with the firmware. There is no recovery.
+ * If we don't turn off all interrupts for this card
+ * the machine will crash.
+ */
+ printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
+ printk(KERN_INFO "Please contact Sangoma Technologies !\n");
+ chdlc_set_intr_mode(card,0);
+ return;
+ }
+
+ len = rxbuf->frame_length;
+
+ if (card->tty_opt){
+
+ if (rxbuf->error_flag){
+ goto rx_exit;
+ }
+
+ if (len <= CRC_LENGTH){
+ goto rx_exit;
+ }
+
+ if (!card->u.c.async_mode){
+ len -= CRC_LENGTH;
+ }
+
+ wanpipe_tty_receive(card,addr,len);
+ goto rx_exit;
+ }
+
+ dev = card->wandev.dev;
+
+ if (!dev){
+ goto rx_exit;
+ }
+
+ if (!netif_running(dev))
+ goto rx_exit;
+
+ chdlc_priv_area = dev->priv;
+
+
+ /* Allocate socket buffer */
+ skb = dev_alloc_skb(len);
+
+ if (skb == NULL) {
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ ++card->wandev.stats.rx_dropped;
+ goto rx_exit;
+ }
+
+ /* Copy data to the socket buffer */
+ if((addr + len) > card->u.c.rx_top + 1) {
+ unsigned tmp = card->u.c.rx_top - addr + 1;
+ buf = skb_put(skb, tmp);
+ sdla_peek(&card->hw, addr, buf, tmp);
+ addr = card->u.c.rx_base;
+ len -= tmp;
+ }
+
+ buf = skb_put(skb, len);
+ sdla_peek(&card->hw, addr, buf, len);
+
+ skb->protocol = htons(ETH_P_IP);
+
+ card->wandev.stats.rx_packets ++;
+ card->wandev.stats.rx_bytes += skb->len;
+ udp_type = udp_pkt_type( skb, card );
+
+ if(udp_type == UDP_CPIPE_TYPE) {
+ if(store_udp_mgmt_pkt(UDP_PKT_FRM_NETWORK,
+ card, skb, dev, chdlc_priv_area)) {
+ flags->interrupt_info_struct.
+ interrupt_permission |=
+ APP_INT_ON_TIMER;
+ }
+ } else if(card->u.c.usedby == API) {
+
+ api_rx_hdr_t* api_rx_hdr;
+ skb_push(skb, sizeof(api_rx_hdr_t));
+ api_rx_hdr = (api_rx_hdr_t*)&skb->data[0x00];
+ api_rx_hdr->error_flag = rxbuf->error_flag;
+ api_rx_hdr->time_stamp = rxbuf->time_stamp;
+
+ skb->protocol = htons(PVC_PROT);
+ skb->mac.raw = skb->data;
+ skb->dev = dev;
+ skb->pkt_type = WAN_PACKET_DATA;
+
+ bh_enqueue(dev, skb);
+
+ if (!test_and_set_bit(0,&chdlc_priv_area->tq_working))
+ wanpipe_queue_work(&chdlc_priv_area->common.wanpipe_work);
+ }else{
+ /* FIXME: we should check to see if the received packet is a
+ multicast packet so that we can increment the multicast
+ statistic
+ ++ chdlc_priv_area->if_stats.multicast;
+ */
+ /* Pass it up the protocol stack */
+
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+
+rx_exit:
+ /* Release buffer element and calculate a pointer to the next one */
+ rxbuf->opp_flag = 0x00;
+ card->u.c.rxmb = ++ rxbuf;
+ if((void*)rxbuf > card->u.c.rxbuf_last){
+ card->u.c.rxmb = card->u.c.rxbuf_base;
+ }
+}
+
+/*============================================================================
+ * Timer interrupt handler.
+ * The timer interrupt is used for two purposes:
+ * 1) Processing udp calls from 'cpipemon'.
+ * 2) Reading board-level statistics for updating the proc file system.
+ */
+void timer_intr(sdla_t *card)
+{
+ struct net_device* dev;
+ chdlc_private_area_t* chdlc_priv_area = NULL;
+ SHARED_MEMORY_INFO_STRUCT* flags = NULL;
+
+ if ((dev = card->wandev.dev)==NULL){
+ flags = card->u.c.flags;
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~APP_INT_ON_TIMER;
+ return;
+ }
+
+ chdlc_priv_area = dev->priv;
+
+ if (chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_CONFIG) {
+ if (!config_chdlc(card)){
+ chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_CONFIG;
+ }
+ }
+
+ /* process a udp call if pending */
+ if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UDP) {
+ process_udp_mgmt_pkt(card, dev,
+ chdlc_priv_area);
+ chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_UDP;
+ }
+
+ /* read the communications statistics if required */
+ if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UPDATE) {
+ update_comms_stats(card, chdlc_priv_area);
+ if(!(-- chdlc_priv_area->update_comms_stats)) {
+ chdlc_priv_area->timer_int_enabled &=
+ ~TMR_INT_ENABLED_UPDATE;
+ }
+ }
+
+ /* only disable the timer interrupt if there are no udp or statistic */
+ /* updates pending */
+ if(!chdlc_priv_area->timer_int_enabled) {
+ flags = card->u.c.flags;
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~APP_INT_ON_TIMER;
+ }
+}
+
+/*------------------------------------------------------------------------------
+ Miscellaneous Functions
+ - set_chdlc_config() used to set configuration options on the board
+------------------------------------------------------------------------------*/
+
+static int set_chdlc_config(sdla_t* card)
+{
+ CHDLC_CONFIGURATION_STRUCT cfg;
+
+ memset(&cfg, 0, sizeof(CHDLC_CONFIGURATION_STRUCT));
+
+ if(card->wandev.clocking){
+ cfg.baud_rate = card->wandev.bps;
+ }
+
+ cfg.line_config_options = (card->wandev.interface == WANOPT_RS232) ?
+ INTERFACE_LEVEL_RS232 : INTERFACE_LEVEL_V35;
+
+ cfg.modem_config_options = 0;
+ cfg.modem_status_timer = 100;
+
+ cfg.CHDLC_protocol_options = card->u.c.protocol_options;
+
+ if (card->tty_opt){
+ cfg.CHDLC_API_options = DISCARD_RX_ERROR_FRAMES;
+ }
+
+ cfg.percent_data_buffer_for_Tx = (card->u.c.receive_only) ? 0 : 50;
+ cfg.CHDLC_statistics_options = (CHDLC_TX_DATA_BYTE_COUNT_STAT |
+ CHDLC_RX_DATA_BYTE_COUNT_STAT);
+
+ if (card->tty_opt){
+ card->wandev.mtu = TTY_CHDLC_MAX_MTU;
+ }
+ cfg.max_CHDLC_data_field_length = card->wandev.mtu;
+ cfg.transmit_keepalive_timer = card->u.c.kpalv_tx;
+ cfg.receive_keepalive_timer = card->u.c.kpalv_rx;
+ cfg.keepalive_error_tolerance = card->u.c.kpalv_err;
+ cfg.SLARP_request_timer = card->u.c.slarp_timer;
+
+ if (cfg.SLARP_request_timer) {
+ cfg.IP_address = 0;
+ cfg.IP_netmask = 0;
+
+ }else if (card->wandev.dev){
+ struct net_device *dev = card->wandev.dev;
+ chdlc_private_area_t *chdlc_priv_area = dev->priv;
+
+ struct in_device *in_dev = dev->ip_ptr;
+
+ if(in_dev != NULL) {
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+
+ if (ifa != NULL ) {
+ cfg.IP_address = ntohl(ifa->ifa_local);
+ cfg.IP_netmask = ntohl(ifa->ifa_mask);
+ chdlc_priv_area->IP_address = ntohl(ifa->ifa_local);
+ chdlc_priv_area->IP_netmask = ntohl(ifa->ifa_mask);
+ }
+ }
+
+ /* FIXME: We must re-think this message in next release
+ if((cfg.IP_address & 0x000000FF) > 2) {
+ printk(KERN_WARNING "\n");
+ printk(KERN_WARNING " WARNING:%s configured with an\n",
+ card->devname);
+ printk(KERN_WARNING " invalid local IP address.\n");
+ printk(KERN_WARNING " Slarp pragmatics will fail.\n");
+ printk(KERN_WARNING " IP address should be of the\n");
+ printk(KERN_WARNING " format A.B.C.1 or A.B.C.2.\n");
+ }
+ */
+ }
+
+ return chdlc_configure(card, &cfg);
+}
+
+
+/*-----------------------------------------------------------------------------
+ set_asy_config() used to set asynchronous configuration options on the board
+------------------------------------------------------------------------------*/
+
+static int set_asy_config(sdla_t* card)
+{
+
+ ASY_CONFIGURATION_STRUCT cfg;
+ CHDLC_MAILBOX_STRUCT *mailbox = card->mbox;
+ int err;
+
+ memset(&cfg, 0, sizeof(ASY_CONFIGURATION_STRUCT));
+
+ if(card->wandev.clocking)
+ cfg.baud_rate = card->wandev.bps;
+
+ cfg.line_config_options = (card->wandev.interface == WANOPT_RS232) ?
+ INTERFACE_LEVEL_RS232 : INTERFACE_LEVEL_V35;
+
+ cfg.modem_config_options = 0;
+ cfg.asy_API_options = card->u.c.api_options;
+ cfg.asy_protocol_options = card->u.c.protocol_options;
+ cfg.Tx_bits_per_char = card->u.c.tx_bits_per_char;
+ cfg.Rx_bits_per_char = card->u.c.rx_bits_per_char;
+ cfg.stop_bits = card->u.c.stop_bits;
+ cfg.parity = card->u.c.parity;
+ cfg.break_timer = card->u.c.break_timer;
+ cfg.asy_Rx_inter_char_timer = card->u.c.inter_char_timer;
+ cfg.asy_Rx_complete_length = card->u.c.rx_complete_length;
+ cfg.XON_char = card->u.c.xon_char;
+ cfg.XOFF_char = card->u.c.xoff_char;
+ cfg.asy_statistics_options = (CHDLC_TX_DATA_BYTE_COUNT_STAT |
+ CHDLC_RX_DATA_BYTE_COUNT_STAT);
+
+ mailbox->buffer_length = sizeof(ASY_CONFIGURATION_STRUCT);
+ memcpy(mailbox->data, &cfg, mailbox->buffer_length);
+ mailbox->command = SET_ASY_CONFIGURATION;
+ err = sdla_exec(mailbox) ? mailbox->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error (card, err, mailbox);
+ return err;
+}
+
+/*============================================================================
+ * Enable asynchronous communications.
+ */
+
+static int asy_comm_enable (sdla_t* card)
+{
+
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = ENABLE_ASY_COMMUNICATIONS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK && card->wandev.dev)
+ chdlc_error(card, err, mb);
+
+ if (!err)
+ card->u.c.comm_enabled = 1;
+
+ return err;
+}
+
+/*============================================================================
+ * Process global exception condition
+ */
+static int process_global_exception(sdla_t *card)
+{
+ CHDLC_MAILBOX_STRUCT* mbox = card->mbox;
+ int err;
+
+ mbox->buffer_length = 0;
+ mbox->command = READ_GLOBAL_EXCEPTION_CONDITION;
+ err = sdla_exec(mbox) ? mbox->return_code : CMD_TIMEOUT;
+
+ if(err != CMD_TIMEOUT ){
+
+ switch(mbox->return_code) {
+
+ case EXCEP_MODEM_STATUS_CHANGE:
+
+ printk(KERN_INFO "%s: Modem status change\n",
+ card->devname);
+
+ switch(mbox->data[0] & (DCD_HIGH | CTS_HIGH)) {
+ case (DCD_HIGH):
+ printk(KERN_INFO "%s: DCD high, CTS low\n",card->devname);
+ break;
+ case (CTS_HIGH):
+ printk(KERN_INFO "%s: DCD low, CTS high\n",card->devname);
+ break;
+ case ((DCD_HIGH | CTS_HIGH)):
+ printk(KERN_INFO "%s: DCD high, CTS high\n",card->devname);
+ break;
+ default:
+ printk(KERN_INFO "%s: DCD low, CTS low\n",card->devname);
+ break;
+ }
+ break;
+
+ case EXCEP_TRC_DISABLED:
+ printk(KERN_INFO "%s: Line trace disabled\n",
+ card->devname);
+ break;
+
+ case EXCEP_IRQ_TIMEOUT:
+ printk(KERN_INFO "%s: IRQ timeout occurred\n",
+ card->devname);
+ break;
+
+ case 0x17:
+ if (card->tty_opt){
+ if (card->tty && card->tty_open){
+ printk(KERN_INFO
+ "%s: Modem Hangup Exception: Hanging Up!\n",
+ card->devname);
+ tty_hangup(card->tty);
+ }
+ break;
+ }
+
+ /* If TTY is not used just drop throught */
+
+ default:
+ printk(KERN_INFO "%s: Global exception %x\n",
+ card->devname, mbox->return_code);
+ break;
+ }
+ }
+ return 0;
+}
+
+
+/*============================================================================
+ * Process chdlc exception condition
+ */
+static int process_chdlc_exception(sdla_t *card)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ int err;
+
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_EXCEPTION_CONDITION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if(err != CMD_TIMEOUT) {
+
+ switch (err) {
+
+ case EXCEP_LINK_ACTIVE:
+ port_set_state(card, WAN_CONNECTED);
+ trigger_chdlc_poll(card->wandev.dev);
+ break;
+
+ case EXCEP_LINK_INACTIVE_MODEM:
+ port_set_state(card, WAN_DISCONNECTED);
+ unconfigure_ip(card);
+ trigger_chdlc_poll(card->wandev.dev);
+ break;
+
+ case EXCEP_LINK_INACTIVE_KPALV:
+ port_set_state(card, WAN_DISCONNECTED);
+ printk(KERN_INFO "%s: Keepalive timer expired.\n",
+ card->devname);
+ unconfigure_ip(card);
+ trigger_chdlc_poll(card->wandev.dev);
+ break;
+
+ case EXCEP_IP_ADDRESS_DISCOVERED:
+ if (configure_ip(card))
+ return -1;
+ break;
+
+ case EXCEP_LOOPBACK_CONDITION:
+ printk(KERN_INFO "%s: Loopback Condition Detected.\n",
+ card->devname);
+ break;
+
+ case NO_CHDLC_EXCEP_COND_TO_REPORT:
+ printk(KERN_INFO "%s: No exceptions reported.\n",
+ card->devname);
+ break;
+ }
+
+ }
+ return 0;
+}
+
+
+/*============================================================================
+ * Configure IP from SLARP negotiation
+ * This adds dynamic routes when SLARP has provided valid addresses
+ */
+
+static int configure_ip (sdla_t* card)
+{
+ struct net_device *dev = card->wandev.dev;
+ chdlc_private_area_t *chdlc_priv_area;
+ char err;
+
+ if (!dev)
+ return 0;
+
+ chdlc_priv_area = dev->priv;
+
+
+ /* set to discover */
+ if(card->u.c.slarp_timer != 0x00) {
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ CHDLC_CONFIGURATION_STRUCT *cfg;
+
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CONFIGURATION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if(err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ return -1;
+ }
+
+ cfg = (CHDLC_CONFIGURATION_STRUCT *)mb->data;
+ chdlc_priv_area->IP_address = cfg->IP_address;
+ chdlc_priv_area->IP_netmask = cfg->IP_netmask;
+
+ /* Set flag to add route */
+ chdlc_priv_area->route_status = ADD_ROUTE;
+
+ /* The idea here is to add the route in the poll routine.
+ This way, we aren't in interrupt context when adding routes */
+ trigger_chdlc_poll(dev);
+ }
+
+ return 0;
+}
+
+
+/*============================================================================
+ * Un-Configure IP negotiated by SLARP
+ * This removes dynamic routes when the link becomes inactive.
+ */
+
+static int unconfigure_ip (sdla_t* card)
+{
+ struct net_device *dev = card->wandev.dev;
+ chdlc_private_area_t *chdlc_priv_area;
+
+ if (!dev)
+ return 0;
+
+ chdlc_priv_area= dev->priv;
+
+ if (chdlc_priv_area->route_status == ROUTE_ADDED) {
+
+ /* Note: If this function is called, the
+ * port state has been DISCONNECTED. This state
+ * change will trigger a poll_disconnected
+ * function, that will check for this condition.
+ */
+ chdlc_priv_area->route_status = REMOVE_ROUTE;
+
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Routine to add/remove routes
+ * Called like a polling routine when Routes are flagged to be added/removed.
+ */
+
+static void process_route (sdla_t *card)
+{
+ struct net_device *dev = card->wandev.dev;
+ unsigned char port_num;
+ chdlc_private_area_t *chdlc_priv_area = NULL;
+ u32 local_IP_addr = 0;
+ u32 remote_IP_addr = 0;
+ u32 IP_netmask, IP_addr;
+ int err = 0;
+ struct in_device *in_dev;
+ mm_segment_t fs;
+ struct ifreq if_info;
+ struct sockaddr_in *if_data1, *if_data2;
+
+ chdlc_priv_area = dev->priv;
+ port_num = card->u.c.comm_port;
+
+ /* Bug Fix Mar 16 2000
+ * AND the IP address to the Mask before checking
+ * the last two bits. */
+
+ if((chdlc_priv_area->route_status == ADD_ROUTE) &&
+ ((chdlc_priv_area->IP_address & ~chdlc_priv_area->IP_netmask) > 2)) {
+
+ printk(KERN_INFO "%s: Dynamic route failure.\n",card->devname);
+
+ if(card->u.c.slarp_timer) {
+ u32 addr_net = htonl(chdlc_priv_area->IP_address);
+
+ printk(KERN_INFO "%s: Bad IP address %u.%u.%u.%u received\n",
+ card->devname,
+ NIPQUAD(addr_net));
+ printk(KERN_INFO "%s: from remote station.\n",
+ card->devname);
+
+ }else{
+ u32 addr_net = htonl(chdlc_priv_area->IP_address);
+
+ printk(KERN_INFO "%s: Bad IP address %u.%u.%u.%u issued\n",
+ card->devname,
+ NIPQUAD(addr_net));
+ printk(KERN_INFO "%s: to remote station. Local\n",
+ card->devname);
+ printk(KERN_INFO "%s: IP address must be A.B.C.1\n",
+ card->devname);
+ printk(KERN_INFO "%s: or A.B.C.2.\n",card->devname);
+ }
+
+ /* remove the route due to the IP address error condition */
+ chdlc_priv_area->route_status = REMOVE_ROUTE;
+ err = 1;
+ }
+
+ /* If we are removing a route with bad IP addressing, then use the */
+ /* locally configured IP addresses */
+ if((chdlc_priv_area->route_status == REMOVE_ROUTE) && err) {
+
+ /* do not remove a bad route that has already been removed */
+ if(chdlc_priv_area->route_removed) {
+ return;
+ }
+
+ in_dev = dev->ip_ptr;
+
+ if(in_dev != NULL) {
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+ if (ifa != NULL ) {
+ local_IP_addr = ifa->ifa_local;
+ IP_netmask = ifa->ifa_mask;
+ }
+ }
+ }else{
+ /* According to Cisco HDLC, if the point-to-point address is
+ A.B.C.1, then we are the opposite (A.B.C.2), and vice-versa.
+ */
+ IP_netmask = ntohl(chdlc_priv_area->IP_netmask);
+ remote_IP_addr = ntohl(chdlc_priv_area->IP_address);
+
+
+ /* If Netmask is 255.255.255.255 the local address
+ * calculation will fail. Default it back to 255.255.255.0 */
+ if (IP_netmask == 0xffffffff)
+ IP_netmask &= 0x00ffffff;
+
+ /* Bug Fix Mar 16 2000
+ * AND the Remote IP address with IP netmask, instead
+ * of static netmask of 255.255.255.0 */
+ local_IP_addr = (remote_IP_addr & IP_netmask) +
+ (~remote_IP_addr & ntohl(0x0003));
+
+ if(!card->u.c.slarp_timer) {
+ IP_addr = local_IP_addr;
+ local_IP_addr = remote_IP_addr;
+ remote_IP_addr = IP_addr;
+ }
+ }
+
+ fs = get_fs(); /* Save file system */
+ set_fs(get_ds()); /* Get user space block */
+
+ /* Setup a structure for adding/removing routes */
+ memset(&if_info, 0, sizeof(if_info));
+ strcpy(if_info.ifr_name, dev->name);
+
+ switch (chdlc_priv_area->route_status) {
+
+ case ADD_ROUTE:
+
+ if(!card->u.c.slarp_timer) {
+ if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
+ if_data2->sin_addr.s_addr = remote_IP_addr;
+ if_data2->sin_family = AF_INET;
+ err = devinet_ioctl(SIOCSIFDSTADDR, &if_info);
+ } else {
+ if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
+ if_data1->sin_addr.s_addr = local_IP_addr;
+ if_data1->sin_family = AF_INET;
+ if(!(err = devinet_ioctl(SIOCSIFADDR, &if_info))){
+ if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
+ if_data2->sin_addr.s_addr = remote_IP_addr;
+ if_data2->sin_family = AF_INET;
+ err = devinet_ioctl(SIOCSIFDSTADDR, &if_info);
+ }
+ }
+
+ if(err) {
+ printk(KERN_INFO "%s: Add route %u.%u.%u.%u failed (%d)\n",
+ card->devname, NIPQUAD(remote_IP_addr), err);
+ } else {
+ ((chdlc_private_area_t *)dev->priv)->route_status = ROUTE_ADDED;
+ printk(KERN_INFO "%s: Dynamic route added.\n",
+ card->devname);
+ printk(KERN_INFO "%s: Local IP addr : %u.%u.%u.%u\n",
+ card->devname, NIPQUAD(local_IP_addr));
+ printk(KERN_INFO "%s: Remote IP addr: %u.%u.%u.%u\n",
+ card->devname, NIPQUAD(remote_IP_addr));
+ chdlc_priv_area->route_removed = 0;
+ }
+ break;
+
+
+ case REMOVE_ROUTE:
+
+ /* Change the local ip address of the interface to 0.
+ * This will also delete the destination route.
+ */
+ if(!card->u.c.slarp_timer) {
+ if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
+ if_data2->sin_addr.s_addr = 0;
+ if_data2->sin_family = AF_INET;
+ err = devinet_ioctl(SIOCSIFDSTADDR, &if_info);
+ } else {
+ if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
+ if_data1->sin_addr.s_addr = 0;
+ if_data1->sin_family = AF_INET;
+ err = devinet_ioctl(SIOCSIFADDR,&if_info);
+
+ }
+ if(err) {
+ printk(KERN_INFO
+ "%s: Remove route %u.%u.%u.%u failed, (err %d)\n",
+ card->devname, NIPQUAD(remote_IP_addr),
+ err);
+ } else {
+ ((chdlc_private_area_t *)dev->priv)->route_status =
+ NO_ROUTE;
+ printk(KERN_INFO "%s: Dynamic route removed: %u.%u.%u.%u\n",
+ card->devname, NIPQUAD(local_IP_addr));
+ chdlc_priv_area->route_removed = 1;
+ }
+ break;
+ }
+
+ set_fs(fs); /* Restore file system */
+
+}
+
+
+/*=============================================================================
+ * Store a UDP management packet for later processing.
+ */
+
+static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area)
+{
+ int udp_pkt_stored = 0;
+
+ if(!chdlc_priv_area->udp_pkt_lgth &&
+ (skb->len <= MAX_LGTH_UDP_MGNT_PKT)) {
+ chdlc_priv_area->udp_pkt_lgth = skb->len;
+ chdlc_priv_area->udp_pkt_src = udp_pkt_src;
+ memcpy(chdlc_priv_area->udp_pkt_data, skb->data, skb->len);
+ chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UDP;
+ udp_pkt_stored = 1;
+ }
+
+ if(udp_pkt_src == UDP_PKT_FRM_STACK){
+ dev_kfree_skb_any(skb);
+ }else{
+ dev_kfree_skb_any(skb);
+ }
+
+ return(udp_pkt_stored);
+}
+
+
+/*=============================================================================
+ * Process UDP management packet.
+ */
+
+static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area )
+{
+ unsigned char *buf;
+ unsigned int frames, len;
+ struct sk_buff *new_skb;
+ unsigned short buffer_length, real_len;
+ unsigned long data_ptr;
+ unsigned data_length;
+ int udp_mgmt_req_valid = 1;
+ CHDLC_MAILBOX_STRUCT *mb = card->mbox;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+ chdlc_udp_pkt_t *chdlc_udp_pkt;
+ struct timeval tv;
+ int err;
+ char ut_char;
+
+ chdlc_udp_pkt = (chdlc_udp_pkt_t *) chdlc_priv_area->udp_pkt_data;
+
+ if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK){
+
+ /* Only these commands are support for remote debugging.
+ * All others are not */
+ switch(chdlc_udp_pkt->cblock.command) {
+
+ case READ_GLOBAL_STATISTICS:
+ case READ_MODEM_STATUS:
+ case READ_CHDLC_LINK_STATUS:
+ case CPIPE_ROUTER_UP_TIME:
+ case READ_COMMS_ERROR_STATS:
+ case READ_CHDLC_OPERATIONAL_STATS:
+
+ /* These two commands are executed for
+ * each request */
+ case READ_CHDLC_CONFIGURATION:
+ case READ_CHDLC_CODE_VERSION:
+ udp_mgmt_req_valid = 1;
+ break;
+ default:
+ udp_mgmt_req_valid = 0;
+ break;
+ }
+ }
+
+ if(!udp_mgmt_req_valid) {
+
+ /* set length to 0 */
+ chdlc_udp_pkt->cblock.buffer_length = 0;
+
+ /* set return code */
+ chdlc_udp_pkt->cblock.return_code = 0xCD;
+
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Warning, Illegal UDP command attempted from network: %x\n",
+ card->devname,chdlc_udp_pkt->cblock.command);
+ }
+
+ } else {
+ unsigned long trace_status_cfg_addr = 0;
+ TRACE_STATUS_EL_CFG_STRUCT trace_cfg_struct;
+ TRACE_STATUS_ELEMENT_STRUCT trace_element_struct;
+
+ switch(chdlc_udp_pkt->cblock.command) {
+
+ case CPIPE_ENABLE_TRACING:
+ if (!chdlc_priv_area->TracingEnabled) {
+
+ /* OPERATE_DATALINE_MONITOR */
+
+ mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
+ mb->command = SET_TRACE_CONFIGURATION;
+
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
+ trace_config = TRACE_ACTIVE;
+ /* Trace delay mode is not used because it slows
+ down transfer and results in a standoff situation
+ when there is a lot of data */
+
+ /* Configure the Trace based on user inputs */
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->trace_config |=
+ chdlc_udp_pkt->data[0];
+
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
+ trace_deactivation_timer = 4000;
+
+
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ card->TracingEnabled = 0;
+ chdlc_udp_pkt->cblock.return_code = err;
+ mb->buffer_length = 0;
+ break;
+ }
+
+ /* Get the base address of the trace element list */
+ mb->buffer_length = 0;
+ mb->command = READ_TRACE_CONFIGURATION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if (err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ chdlc_priv_area->TracingEnabled = 0;
+ chdlc_udp_pkt->cblock.return_code = err;
+ mb->buffer_length = 0;
+ break;
+ }
+
+ trace_status_cfg_addr =((LINE_TRACE_CONFIG_STRUCT *)
+ mb->data) -> ptr_trace_stat_el_cfg_struct;
+
+ sdla_peek(&card->hw, trace_status_cfg_addr,
+ &trace_cfg_struct, sizeof(trace_cfg_struct));
+
+ chdlc_priv_area->start_trace_addr = trace_cfg_struct.
+ base_addr_trace_status_elements;
+
+ chdlc_priv_area->number_trace_elements =
+ trace_cfg_struct.number_trace_status_elements;
+
+ chdlc_priv_area->end_trace_addr = (unsigned long)
+ ((TRACE_STATUS_ELEMENT_STRUCT *)
+ chdlc_priv_area->start_trace_addr +
+ (chdlc_priv_area->number_trace_elements - 1));
+
+ chdlc_priv_area->base_addr_trace_buffer =
+ trace_cfg_struct.base_addr_trace_buffer;
+
+ chdlc_priv_area->end_addr_trace_buffer =
+ trace_cfg_struct.end_addr_trace_buffer;
+
+ chdlc_priv_area->curr_trace_addr =
+ trace_cfg_struct.next_trace_element_to_use;
+
+ chdlc_priv_area->available_buffer_space = 2000 -
+ sizeof(ip_pkt_t) -
+ sizeof(udp_pkt_t) -
+ sizeof(wp_mgmt_t) -
+ sizeof(cblock_t) -
+ sizeof(trace_info_t);
+ }
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+ mb->buffer_length = 0;
+ chdlc_priv_area->TracingEnabled = 1;
+ break;
+
+
+ case CPIPE_DISABLE_TRACING:
+ if (chdlc_priv_area->TracingEnabled) {
+
+ /* OPERATE_DATALINE_MONITOR */
+ mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
+ mb->command = SET_TRACE_CONFIGURATION;
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
+ trace_config = TRACE_INACTIVE;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ }
+
+ chdlc_priv_area->TracingEnabled = 0;
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+ mb->buffer_length = 0;
+ break;
+
+
+ case CPIPE_GET_TRACE_INFO:
+
+ if (!chdlc_priv_area->TracingEnabled) {
+ chdlc_udp_pkt->cblock.return_code = 1;
+ mb->buffer_length = 0;
+ break;
+ }
+
+ chdlc_udp_pkt->trace_info.ismoredata = 0x00;
+ buffer_length = 0; /* offset of packet already occupied */
+
+ for (frames=0; frames < chdlc_priv_area->number_trace_elements; frames++){
+
+ trace_pkt_t *trace_pkt = (trace_pkt_t *)
+ &chdlc_udp_pkt->data[buffer_length];
+
+ sdla_peek(&card->hw, chdlc_priv_area->curr_trace_addr,
+ (unsigned char *)&trace_element_struct,
+ sizeof(TRACE_STATUS_ELEMENT_STRUCT));
+
+ if (trace_element_struct.opp_flag == 0x00) {
+ break;
+ }
+
+ /* get pointer to real data */
+ data_ptr = trace_element_struct.ptr_data_bfr;
+
+ /* See if there is actual data on the trace buffer */
+ if (data_ptr){
+ data_length = trace_element_struct.trace_length;
+ }else{
+ data_length = 0;
+ chdlc_udp_pkt->trace_info.ismoredata = 0x01;
+ }
+
+ if( (chdlc_priv_area->available_buffer_space - buffer_length)
+ < ( sizeof(trace_pkt_t) + data_length) ) {
+
+ /* indicate there are more frames on board & exit */
+ chdlc_udp_pkt->trace_info.ismoredata = 0x01;
+ break;
+ }
+
+ trace_pkt->status = trace_element_struct.trace_type;
+
+ trace_pkt->time_stamp =
+ trace_element_struct.trace_time_stamp;
+
+ trace_pkt->real_length =
+ trace_element_struct.trace_length;
+
+ /* see if we can fit the frame into the user buffer */
+ real_len = trace_pkt->real_length;
+
+ if (data_ptr == 0) {
+ trace_pkt->data_avail = 0x00;
+ } else {
+ unsigned tmp = 0;
+
+ /* get the data from circular buffer
+ must check for end of buffer */
+ trace_pkt->data_avail = 0x01;
+
+ if ((data_ptr + real_len) >
+ chdlc_priv_area->end_addr_trace_buffer + 1){
+
+ tmp = chdlc_priv_area->end_addr_trace_buffer - data_ptr + 1;
+ sdla_peek(&card->hw, data_ptr,
+ trace_pkt->data,tmp);
+ data_ptr = chdlc_priv_area->base_addr_trace_buffer;
+ }
+
+ sdla_peek(&card->hw, data_ptr,
+ &trace_pkt->data[tmp], real_len - tmp);
+ }
+
+ /* zero the opp flag to show we got the frame */
+ ut_char = 0x00;
+ sdla_poke(&card->hw, chdlc_priv_area->curr_trace_addr, &ut_char, 1);
+
+ /* now move onto the next frame */
+ chdlc_priv_area->curr_trace_addr += sizeof(TRACE_STATUS_ELEMENT_STRUCT);
+
+ /* check if we went over the last address */
+ if ( chdlc_priv_area->curr_trace_addr > chdlc_priv_area->end_trace_addr ) {
+ chdlc_priv_area->curr_trace_addr = chdlc_priv_area->start_trace_addr;
+ }
+
+ if(trace_pkt->data_avail == 0x01) {
+ buffer_length += real_len - 1;
+ }
+
+ /* for the header */
+ buffer_length += sizeof(trace_pkt_t);
+
+ } /* For Loop */
+
+ if (frames == chdlc_priv_area->number_trace_elements){
+ chdlc_udp_pkt->trace_info.ismoredata = 0x01;
+ }
+ chdlc_udp_pkt->trace_info.num_frames = frames;
+
+ mb->buffer_length = buffer_length;
+ chdlc_udp_pkt->cblock.buffer_length = buffer_length;
+
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+
+ break;
+
+
+ case CPIPE_FT1_READ_STATUS:
+ ((unsigned char *)chdlc_udp_pkt->data )[0] =
+ flags->FT1_info_struct.parallel_port_A_input;
+
+ ((unsigned char *)chdlc_udp_pkt->data )[1] =
+ flags->FT1_info_struct.parallel_port_B_input;
+
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+ chdlc_udp_pkt->cblock.buffer_length = 2;
+ mb->buffer_length = 2;
+ break;
+
+ case CPIPE_ROUTER_UP_TIME:
+ do_gettimeofday( &tv );
+ chdlc_priv_area->router_up_time = tv.tv_sec -
+ chdlc_priv_area->router_start_time;
+ *(unsigned long *)&chdlc_udp_pkt->data =
+ chdlc_priv_area->router_up_time;
+ mb->buffer_length = sizeof(unsigned long);
+ chdlc_udp_pkt->cblock.buffer_length = sizeof(unsigned long);
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+ break;
+
+ case FT1_MONITOR_STATUS_CTRL:
+ /* Enable FT1 MONITOR STATUS */
+ if ((chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_STATUS) ||
+ (chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_OP_STATS)) {
+
+ if( rCount++ != 0 ) {
+ chdlc_udp_pkt->cblock.
+ return_code = COMMAND_OK;
+ mb->buffer_length = 1;
+ break;
+ }
+ }
+
+ /* Disable FT1 MONITOR STATUS */
+ if( chdlc_udp_pkt->data[0] == 0) {
+
+ if( --rCount != 0) {
+ chdlc_udp_pkt->cblock.
+ return_code = COMMAND_OK;
+ mb->buffer_length = 1;
+ break;
+ }
+ }
+ goto dflt_1;
+
+ default:
+dflt_1:
+ /* it's a board command */
+ mb->command = chdlc_udp_pkt->cblock.command;
+ mb->buffer_length = chdlc_udp_pkt->cblock.buffer_length;
+ if (mb->buffer_length) {
+ memcpy(&mb->data, (unsigned char *) chdlc_udp_pkt->
+ data, mb->buffer_length);
+ }
+ /* run the command on the board */
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK) {
+ break;
+ }
+
+ /* copy the result back to our buffer */
+ memcpy(&chdlc_udp_pkt->cblock, mb, sizeof(cblock_t));
+
+ if (mb->buffer_length) {
+ memcpy(&chdlc_udp_pkt->data, &mb->data,
+ mb->buffer_length);
+ }
+
+ } /* end of switch */
+ } /* end of else */
+
+ /* Fill UDP TTL */
+ chdlc_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
+
+ len = reply_udp(chdlc_priv_area->udp_pkt_data, mb->buffer_length);
+
+
+ if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK){
+
+ /* Must check if we interrupted if_send() routine. The
+ * tx buffers might be used. If so drop the packet */
+ if (!test_bit(SEND_CRIT,&card->wandev.critical)) {
+
+ if(!chdlc_send(card, chdlc_priv_area->udp_pkt_data, len)) {
+ ++ card->wandev.stats.tx_packets;
+ card->wandev.stats.tx_bytes += len;
+ }
+ }
+ } else {
+
+ /* Pass it up the stack
+ Allocate socket buffer */
+ if ((new_skb = dev_alloc_skb(len)) != NULL) {
+ /* copy data into new_skb */
+
+ buf = skb_put(new_skb, len);
+ memcpy(buf, chdlc_priv_area->udp_pkt_data, len);
+
+ /* Decapsulate pkt and pass it up the protocol stack */
+ new_skb->protocol = htons(ETH_P_IP);
+ new_skb->dev = dev;
+ new_skb->mac.raw = new_skb->data;
+
+ netif_rx(new_skb);
+ dev->last_rx = jiffies;
+ } else {
+
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ }
+ }
+
+ chdlc_priv_area->udp_pkt_lgth = 0;
+
+ return 0;
+}
+
+/*============================================================================
+ * Initialize Receive and Transmit Buffers.
+ */
+
+static void init_chdlc_tx_rx_buff( sdla_t* card)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ CHDLC_TX_STATUS_EL_CFG_STRUCT *tx_config;
+ CHDLC_RX_STATUS_EL_CFG_STRUCT *rx_config;
+ char err;
+
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CONFIGURATION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if(err != COMMAND_OK) {
+ if (card->wandev.dev){
+ chdlc_error(card,err,mb);
+ }
+ return;
+ }
+
+ if(card->hw.type == SDLA_S514) {
+ tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Tx_stat_el_cfg_struct));
+ rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Rx_stat_el_cfg_struct));
+
+ /* Setup Head and Tails for buffers */
+ card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
+ tx_config->base_addr_Tx_status_elements);
+ card->u.c.txbuf_last =
+ (CHDLC_DATA_TX_STATUS_EL_STRUCT *)
+ card->u.c.txbuf_base +
+ (tx_config->number_Tx_status_elements - 1);
+
+ card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
+ rx_config->base_addr_Rx_status_elements);
+ card->u.c.rxbuf_last =
+ (CHDLC_DATA_RX_STATUS_EL_STRUCT *)
+ card->u.c.rxbuf_base +
+ (rx_config->number_Rx_status_elements - 1);
+
+ /* Set up next pointer to be used */
+ card->u.c.txbuf = (void *)(card->hw.dpmbase +
+ tx_config->next_Tx_status_element_to_use);
+ card->u.c.rxmb = (void *)(card->hw.dpmbase +
+ rx_config->next_Rx_status_element_to_use);
+ }
+ else {
+ tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Tx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
+
+ rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Rx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
+
+ /* Setup Head and Tails for buffers */
+ card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
+ (tx_config->base_addr_Tx_status_elements % SDLA_WINDOWSIZE));
+ card->u.c.txbuf_last =
+ (CHDLC_DATA_TX_STATUS_EL_STRUCT *)card->u.c.txbuf_base
+ + (tx_config->number_Tx_status_elements - 1);
+ card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
+ (rx_config->base_addr_Rx_status_elements % SDLA_WINDOWSIZE));
+ card->u.c.rxbuf_last =
+ (CHDLC_DATA_RX_STATUS_EL_STRUCT *)card->u.c.rxbuf_base
+ + (rx_config->number_Rx_status_elements - 1);
+
+ /* Set up next pointer to be used */
+ card->u.c.txbuf = (void *)(card->hw.dpmbase +
+ (tx_config->next_Tx_status_element_to_use % SDLA_WINDOWSIZE));
+ card->u.c.rxmb = (void *)(card->hw.dpmbase +
+ (rx_config->next_Rx_status_element_to_use % SDLA_WINDOWSIZE));
+ }
+
+ /* Setup Actual Buffer Start and end addresses */
+ card->u.c.rx_base = rx_config->base_addr_Rx_buffer;
+ card->u.c.rx_top = rx_config->end_addr_Rx_buffer;
+
+}
+
+/*=============================================================================
+ * Perform Interrupt Test by running READ_CHDLC_CODE_VERSION command MAX_INTR
+ * _TEST_COUNTER times.
+ */
+static int intr_test( sdla_t* card)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ int err,i;
+
+ Intr_test_counter = 0;
+
+ err = chdlc_set_intr_mode(card, APP_INT_ON_COMMAND_COMPLETE);
+
+ if (err == CMD_OK) {
+ for (i = 0; i < MAX_INTR_TEST_COUNTER; i ++) {
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != CMD_OK)
+ chdlc_error(card, err, mb);
+ }
+ }
+ else {
+ return err;
+ }
+
+ err = chdlc_set_intr_mode(card, 0);
+
+ if (err != CMD_OK)
+ return err;
+
+ return 0;
+}
+
+/*==============================================================================
+ * Determine what type of UDP call it is. CPIPEAB ?
+ */
+static int udp_pkt_type(struct sk_buff *skb, sdla_t* card)
+{
+ chdlc_udp_pkt_t *chdlc_udp_pkt = (chdlc_udp_pkt_t *)skb->data;
+
+#ifdef _WAN_UDP_DEBUG
+ printk(KERN_INFO "SIG %s = %s\n\
+ UPP %x = %x\n\
+ PRT %x = %x\n\
+ REQ %i = %i\n\
+ 36 th = %x 37th = %x\n",
+ chdlc_udp_pkt->wp_mgmt.signature,
+ UDPMGMT_SIGNATURE,
+ chdlc_udp_pkt->udp_pkt.udp_dst_port,
+ ntohs(card->wandev.udp_port),
+ chdlc_udp_pkt->ip_pkt.protocol,
+ UDPMGMT_UDP_PROTOCOL,
+ chdlc_udp_pkt->wp_mgmt.request_reply,
+ UDPMGMT_REQUEST,
+ skb->data[36], skb->data[37]);
+#endif
+
+ if (!strncmp(chdlc_udp_pkt->wp_mgmt.signature,UDPMGMT_SIGNATURE,8) &&
+ (chdlc_udp_pkt->udp_pkt.udp_dst_port == ntohs(card->wandev.udp_port)) &&
+ (chdlc_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
+ (chdlc_udp_pkt->wp_mgmt.request_reply == UDPMGMT_REQUEST)) {
+
+ return UDP_CPIPE_TYPE;
+
+ }else{
+ return UDP_INVALID_TYPE;
+ }
+}
+
+/*============================================================================
+ * Set PORT state.
+ */
+static void port_set_state (sdla_t *card, int state)
+{
+ if (card->u.c.state != state)
+ {
+ switch (state)
+ {
+ case WAN_CONNECTED:
+ printk (KERN_INFO "%s: Link connected!\n",
+ card->devname);
+ break;
+
+ case WAN_CONNECTING:
+ printk (KERN_INFO "%s: Link connecting...\n",
+ card->devname);
+ break;
+
+ case WAN_DISCONNECTED:
+ printk (KERN_INFO "%s: Link disconnected!\n",
+ card->devname);
+ break;
+ }
+
+ card->wandev.state = card->u.c.state = state;
+ if (card->wandev.dev){
+ struct net_device *dev = card->wandev.dev;
+ chdlc_private_area_t *chdlc_priv_area = dev->priv;
+ chdlc_priv_area->common.state = state;
+ }
+ }
+}
+
+/*===========================================================================
+ * config_chdlc
+ *
+ * Configure the chdlc protocol and enable communications.
+ *
+ * The if_open() function binds this function to the poll routine.
+ * Therefore, this function will run every time the chdlc interface
+ * is brought up. We cannot run this function from the if_open
+ * because if_open does not have access to the remote IP address.
+ *
+ * If the communications are not enabled, proceed to configure
+ * the card and enable communications.
+ *
+ * If the communications are enabled, it means that the interface
+ * was shutdown by ether the user or driver. In this case, we
+ * have to check that the IP addresses have not changed. If
+ * the IP addresses have changed, we have to reconfigure the firmware
+ * and update the changed IP addresses. Otherwise, just exit.
+ *
+ */
+
+static int config_chdlc (sdla_t *card)
+{
+ struct net_device *dev = card->wandev.dev;
+ chdlc_private_area_t *chdlc_priv_area = dev->priv;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+
+ if (card->u.c.comm_enabled){
+
+ /* Jun 20. 2000: NC
+ * IP addresses are not used in the API mode */
+
+ if ((chdlc_priv_area->ip_local_tmp != chdlc_priv_area->ip_local ||
+ chdlc_priv_area->ip_remote_tmp != chdlc_priv_area->ip_remote) &&
+ card->u.c.usedby == WANPIPE) {
+
+ /* The IP addersses have changed, we must
+ * stop the communications and reconfigure
+ * the card. Reason: the firmware must know
+ * the local and remote IP addresses. */
+ disable_comm(card);
+ port_set_state(card, WAN_DISCONNECTED);
+ printk(KERN_INFO
+ "%s: IP addresses changed!\n",
+ card->devname);
+ printk(KERN_INFO
+ "%s: Restarting communications ...\n",
+ card->devname);
+ }else{
+ /* IP addresses are the same and the link is up,
+ * we don't have to do anything here. Therefore, exit */
+ return 0;
+ }
+ }
+
+ chdlc_priv_area->ip_local = chdlc_priv_area->ip_local_tmp;
+ chdlc_priv_area->ip_remote = chdlc_priv_area->ip_remote_tmp;
+
+
+ /* Setup the Board for asynchronous mode */
+ if (card->u.c.async_mode){
+
+ if (set_asy_config(card)) {
+ printk (KERN_INFO "%s: Failed CHDLC Async configuration!\n",
+ card->devname);
+ return 0;
+ }
+ }else{
+ /* Setup the Board for CHDLC */
+ if (set_chdlc_config(card)) {
+ printk (KERN_INFO "%s: Failed CHDLC configuration!\n",
+ card->devname);
+ return 0;
+ }
+ }
+
+ /* Set interrupt mode and mask */
+ if (chdlc_set_intr_mode(card, APP_INT_ON_RX_FRAME |
+ APP_INT_ON_GLOBAL_EXCEP_COND |
+ APP_INT_ON_TX_FRAME |
+ APP_INT_ON_CHDLC_EXCEP_COND | APP_INT_ON_TIMER)){
+ printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
+ card->devname);
+ return 0;
+ }
+
+
+ /* Mask the Transmit and Timer interrupt */
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~(APP_INT_ON_TX_FRAME | APP_INT_ON_TIMER);
+
+ /* In TTY mode, receive interrupt will be enabled during
+ * wanpipe_tty_open() operation */
+ if (card->tty_opt){
+ flags->interrupt_info_struct.interrupt_permission &= ~APP_INT_ON_RX_FRAME;
+ }
+
+ /* Enable communications */
+ if (card->u.c.async_mode){
+ if (asy_comm_enable(card) != 0) {
+ printk(KERN_INFO "%s: Failed to enable async commnunication!\n",
+ card->devname);
+ flags->interrupt_info_struct.interrupt_permission = 0;
+ card->u.c.comm_enabled=0;
+ chdlc_set_intr_mode(card,0);
+ return 0;
+ }
+ }else{
+ if (chdlc_comm_enable(card) != 0) {
+ printk(KERN_INFO "%s: Failed to enable chdlc communications!\n",
+ card->devname);
+ flags->interrupt_info_struct.interrupt_permission = 0;
+ card->u.c.comm_enabled=0;
+ chdlc_set_intr_mode(card,0);
+ return 0;
+ }
+ }
+
+ /* Initialize Rx/Tx buffer control fields */
+ init_chdlc_tx_rx_buff(card);
+ port_set_state(card, WAN_CONNECTING);
+ return 0;
+}
+
+
+/*============================================================
+ * chdlc_poll
+ *
+ * Rationale:
+ * We cannot manipulate the routing tables, or
+ * ip addresses withing the interrupt. Therefore
+ * we must perform such actons outside an interrupt
+ * at a later time.
+ *
+ * Description:
+ * CHDLC polling routine, responsible for
+ * shutting down interfaces upon disconnect
+ * and adding/removing routes.
+ *
+ * Usage:
+ * This function is executed for each CHDLC
+ * interface through a tq_schedule bottom half.
+ *
+ * trigger_chdlc_poll() function is used to kick
+ * the chldc_poll routine.
+ */
+
+static void chdlc_poll(struct net_device *dev)
+{
+ chdlc_private_area_t *chdlc_priv_area;
+ sdla_t *card;
+ u8 check_gateway=0;
+ SHARED_MEMORY_INFO_STRUCT* flags;
+
+
+ if (!dev || (chdlc_priv_area=dev->priv) == NULL)
+ return;
+
+ card = chdlc_priv_area->card;
+ flags = card->u.c.flags;
+
+ /* (Re)Configuraiton is in progress, stop what you are
+ * doing and get out */
+ if (test_bit(PERI_CRIT,&card->wandev.critical)){
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+ return;
+ }
+
+ /* if_open() function has triggered the polling routine
+ * to determine the configured IP addresses. Once the
+ * addresses are found, trigger the chdlc configuration */
+ if (test_bit(0,&chdlc_priv_area->config_chdlc)){
+
+ chdlc_priv_area->ip_local_tmp = get_ip_address(dev,WAN_LOCAL_IP);
+ chdlc_priv_area->ip_remote_tmp = get_ip_address(dev,WAN_POINTOPOINT_IP);
+
+ /* Jun 20. 2000 Bug Fix
+ * Only perform this check in WANPIPE mode, since
+ * IP addresses are not used in the API mode. */
+
+ if (chdlc_priv_area->ip_local_tmp == chdlc_priv_area->ip_remote_tmp &&
+ card->u.c.slarp_timer == 0x00 &&
+ !card->u.c.backup &&
+ card->u.c.usedby == WANPIPE){
+
+ if (++chdlc_priv_area->ip_error > MAX_IP_ERRORS){
+ printk(KERN_INFO "\n%s: --- WARNING ---\n",
+ card->devname);
+ printk(KERN_INFO
+ "%s: The local IP address is the same as the\n",
+ card->devname);
+ printk(KERN_INFO
+ "%s: Point-to-Point IP address.\n",
+ card->devname);
+ printk(KERN_INFO "%s: --- WARNING ---\n\n",
+ card->devname);
+ }else{
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+ chdlc_priv_area->poll_delay_timer.expires = jiffies+HZ;
+ add_timer(&chdlc_priv_area->poll_delay_timer);
+ return;
+ }
+ }
+
+ clear_bit(0,&chdlc_priv_area->config_chdlc);
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+
+ chdlc_priv_area->timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
+ flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
+ return;
+ }
+ /* Dynamic interface implementation, as well as dynamic
+ * routing. */
+
+ switch (card->u.c.state){
+
+ case WAN_DISCONNECTED:
+
+ /* If the dynamic interface configuration is on, and interface
+ * is up, then bring down the netowrk interface */
+
+ if (test_bit(DYN_OPT_ON,&chdlc_priv_area->interface_down) &&
+ !test_bit(DEV_DOWN, &chdlc_priv_area->interface_down) &&
+ card->wandev.dev->flags & IFF_UP){
+
+ printk(KERN_INFO "%s: Interface %s down.\n",
+ card->devname,card->wandev.dev->name);
+ change_dev_flags(card->wandev.dev,(card->wandev.dev->flags&~IFF_UP));
+ set_bit(DEV_DOWN,&chdlc_priv_area->interface_down);
+ chdlc_priv_area->route_status = NO_ROUTE;
+
+ }else{
+ /* We need to check if the local IP address is
+ * zero. If it is, we shouldn't try to remove it.
+ */
+
+ if (card->wandev.dev->flags & IFF_UP &&
+ get_ip_address(card->wandev.dev,WAN_LOCAL_IP) &&
+ chdlc_priv_area->route_status != NO_ROUTE &&
+ card->u.c.slarp_timer){
+
+ process_route(card);
+ }
+ }
+ break;
+
+ case WAN_CONNECTED:
+
+ /* In SMP machine this code can execute before the interface
+ * comes up. In this case, we must make sure that we do not
+ * try to bring up the interface before dev_open() is finished */
+
+
+ /* DEV_DOWN will be set only when we bring down the interface
+ * for the very first time. This way we know that it was us
+ * that brought the interface down */
+
+ if (test_bit(DYN_OPT_ON,&chdlc_priv_area->interface_down) &&
+ test_bit(DEV_DOWN, &chdlc_priv_area->interface_down) &&
+ !(card->wandev.dev->flags & IFF_UP)){
+
+ printk(KERN_INFO "%s: Interface %s up.\n",
+ card->devname,card->wandev.dev->name);
+ change_dev_flags(card->wandev.dev,(card->wandev.dev->flags|IFF_UP));
+ clear_bit(DEV_DOWN,&chdlc_priv_area->interface_down);
+ check_gateway=1;
+ }
+
+ if (chdlc_priv_area->route_status == ADD_ROUTE &&
+ card->u.c.slarp_timer){
+
+ process_route(card);
+ check_gateway=1;
+ }
+
+ if (chdlc_priv_area->gateway && check_gateway)
+ add_gateway(card,dev);
+
+ break;
+ }
+
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+}
+
+/*============================================================
+ * trigger_chdlc_poll
+ *
+ * Description:
+ * Add a chdlc_poll() work entry into the keventd work queue
+ * for a specific dlci/interface. This will kick
+ * the fr_poll() routine at a later time.
+ *
+ * Usage:
+ * Interrupts use this to defer a taks to
+ * a polling routine.
+ *
+ */
+static void trigger_chdlc_poll(struct net_device *dev)
+{
+ chdlc_private_area_t *chdlc_priv_area;
+ sdla_t *card;
+
+ if (!dev)
+ return;
+
+ if ((chdlc_priv_area = dev->priv)==NULL)
+ return;
+
+ card = chdlc_priv_area->card;
+
+ if (test_and_set_bit(POLL_CRIT,&card->wandev.critical)){
+ return;
+ }
+ if (test_bit(PERI_CRIT,&card->wandev.critical)){
+ return;
+ }
+ schedule_work(&chdlc_priv_area->poll_work);
+}
+
+
+static void chdlc_poll_delay (unsigned long dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ trigger_chdlc_poll(dev);
+}
+
+
+void s508_lock (sdla_t *card, unsigned long *smp_flags)
+{
+ spin_lock_irqsave(&card->wandev.lock, *smp_flags);
+ if (card->next){
+ spin_lock(&card->next->wandev.lock);
+ }
+}
+
+void s508_unlock (sdla_t *card, unsigned long *smp_flags)
+{
+ if (card->next){
+ spin_unlock(&card->next->wandev.lock);
+ }
+ spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
+}
+
+//*********** TTY SECTION ****************
+
+static void wanpipe_tty_trigger_tx_irq(sdla_t *card)
+{
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+ INTERRUPT_INFORMATION_STRUCT *chdlc_int = &flags->interrupt_info_struct;
+ chdlc_int->interrupt_permission |= APP_INT_ON_TX_FRAME;
+}
+
+static void wanpipe_tty_trigger_poll(sdla_t *card)
+{
+ schedule_work(&card->tty_work);
+}
+
+static void tty_poll_work (void* data)
+{
+ sdla_t *card = (sdla_t*)data;
+ struct tty_struct *tty;
+
+ if ((tty=card->tty)==NULL)
+ return;
+
+ tty_wakeup(tty);
+#if defined(SERIAL_HAVE_POLL_WAIT)
+ wake_up_interruptible(&tty->poll_wait);
+#endif
+ return;
+}
+
+static void wanpipe_tty_close(struct tty_struct *tty, struct file * filp)
+{
+ sdla_t *card;
+ unsigned long smp_flags;
+
+ if (!tty || !tty->driver_data){
+ return;
+ }
+
+ card = (sdla_t*)tty->driver_data;
+
+ if (!card)
+ return;
+
+ printk(KERN_INFO "%s: Closing TTY Driver!\n",
+ card->devname);
+
+ /* Sanity Check */
+ if (!card->tty_open)
+ return;
+
+ wanpipe_close(card);
+ if (--card->tty_open == 0){
+
+ lock_adapter_irq(&card->wandev.lock,&smp_flags);
+ card->tty=NULL;
+ chdlc_disable_comm_shutdown(card);
+ unlock_adapter_irq(&card->wandev.lock,&smp_flags);
+
+ if (card->tty_buf){
+ kfree(card->tty_buf);
+ card->tty_buf=NULL;
+ }
+
+ if (card->tty_rx){
+ kfree(card->tty_rx);
+ card->tty_rx=NULL;
+ }
+ }
+ return;
+}
+static int wanpipe_tty_open(struct tty_struct *tty, struct file * filp)
+{
+ unsigned long smp_flags;
+ sdla_t *card;
+
+ if (!tty){
+ return -ENODEV;
+ }
+
+ if (!tty->driver_data){
+ int port;
+ port = tty->index;
+ if ((port < 0) || (port >= NR_PORTS))
+ return -ENODEV;
+
+ tty->driver_data = WAN_CARD(port);
+ if (!tty->driver_data)
+ return -ENODEV;
+ }
+
+ card = (sdla_t*)tty->driver_data;
+
+ if (!card){
+ lock_adapter_irq(&card->wandev.lock,&smp_flags);
+ card->tty=NULL;
+ unlock_adapter_irq(&card->wandev.lock,&smp_flags);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "%s: Opening TTY Driver!\n",
+ card->devname);
+
+ if (card->tty_open == 0){
+ lock_adapter_irq(&card->wandev.lock,&smp_flags);
+ card->tty=tty;
+ unlock_adapter_irq(&card->wandev.lock,&smp_flags);
+
+ if (!card->tty_buf){
+ card->tty_buf = kmalloc(TTY_CHDLC_MAX_MTU, GFP_KERNEL);
+ if (!card->tty_buf){
+ card->tty_buf=NULL;
+ card->tty=NULL;
+ return -ENOMEM;
+ }
+ }
+
+ if (!card->tty_rx){
+ card->tty_rx = kmalloc(TTY_CHDLC_MAX_MTU, GFP_KERNEL);
+ if (!card->tty_rx){
+ /* Free the buffer above */
+ kfree(card->tty_buf);
+ card->tty_buf=NULL;
+ card->tty=NULL;
+ return -ENOMEM;
+ }
+ }
+ }
+
+ ++card->tty_open;
+ wanpipe_open(card);
+ return 0;
+}
+
+static int wanpipe_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ unsigned long smp_flags=0;
+ sdla_t *card=NULL;
+
+ if (!tty){
+ dbg_printk(KERN_INFO "NO TTY in Write\n");
+ return -ENODEV;
+ }
+
+ card = (sdla_t *)tty->driver_data;
+
+ if (!card){
+ dbg_printk(KERN_INFO "No Card in TTY Write\n");
+ return -ENODEV;
+ }
+
+ if (count > card->wandev.mtu){
+ dbg_printk(KERN_INFO "Frame too big in Write %i Max: %i\n",
+ count,card->wandev.mtu);
+ return -EINVAL;
+ }
+
+ if (card->wandev.state != WAN_CONNECTED){
+ dbg_printk(KERN_INFO "Card not connected in TTY Write\n");
+ return -EINVAL;
+ }
+
+ /* Lock the 508 Card: SMP is supported */
+ if(card->hw.type != SDLA_S514){
+ s508_lock(card,&smp_flags);
+ }
+
+ if (test_and_set_bit(SEND_CRIT,(void*)&card->wandev.critical)){
+ printk(KERN_INFO "%s: Critical in TTY Write\n",
+ card->devname);
+
+ /* Lock the 508 Card: SMP is supported */
+ if(card->hw.type != SDLA_S514)
+ s508_unlock(card,&smp_flags);
+
+ return -EINVAL;
+ }
+
+ if (chdlc_send(card,(void*)buf,count)){
+ dbg_printk(KERN_INFO "%s: Failed to send, retry later: kernel!\n",
+ card->devname);
+ clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
+
+ wanpipe_tty_trigger_tx_irq(card);
+
+ if(card->hw.type != SDLA_S514)
+ s508_unlock(card,&smp_flags);
+ return 0;
+ }
+ dbg_printk(KERN_INFO "%s: Packet sent OK: %i\n",card->devname,count);
+ clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
+
+ if(card->hw.type != SDLA_S514)
+ s508_unlock(card,&smp_flags);
+
+ return count;
+}
+
+static void wanpipe_tty_receive(sdla_t *card, unsigned addr, unsigned int len)
+{
+ unsigned offset=0;
+ unsigned olen=len;
+ char fp=0;
+ struct tty_struct *tty;
+ int i;
+ struct tty_ldisc *ld;
+
+ if (!card->tty_open){
+ dbg_printk(KERN_INFO "%s: TTY not open during receive\n",
+ card->devname);
+ return;
+ }
+
+ if ((tty=card->tty) == NULL){
+ dbg_printk(KERN_INFO "%s: No TTY on receive\n",
+ card->devname);
+ return;
+ }
+
+ if (!tty->driver_data){
+ dbg_printk(KERN_INFO "%s: No Driver Data, or Flip on receive\n",
+ card->devname);
+ return;
+ }
+
+
+ if (card->u.c.async_mode){
+ if ((tty->flip.count+len) >= TTY_FLIPBUF_SIZE){
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Received packet size too big: %i bytes, Max: %i!\n",
+ card->devname,len,TTY_FLIPBUF_SIZE);
+ }
+ return;
+ }
+
+
+ if((addr + len) > card->u.c.rx_top + 1) {
+ offset = card->u.c.rx_top - addr + 1;
+
+ sdla_peek(&card->hw, addr, tty->flip.char_buf_ptr, offset);
+
+ addr = card->u.c.rx_base;
+ len -= offset;
+
+ tty->flip.char_buf_ptr+=offset;
+ tty->flip.count+=offset;
+ for (i=0;i<offset;i++){
+ *tty->flip.flag_buf_ptr = 0;
+ tty->flip.flag_buf_ptr++;
+ }
+ }
+
+ sdla_peek(&card->hw, addr, tty->flip.char_buf_ptr, len);
+
+ tty->flip.char_buf_ptr+=len;
+ card->tty->flip.count+=len;
+ for (i=0;i<len;i++){
+ *tty->flip.flag_buf_ptr = 0;
+ tty->flip.flag_buf_ptr++;
+ }
+
+ tty->low_latency=1;
+ tty_flip_buffer_push(tty);
+ }else{
+ if (!card->tty_rx){
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Receive sync buffer not available!\n",
+ card->devname);
+ }
+ return;
+ }
+
+ if (len > TTY_CHDLC_MAX_MTU){
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Received packet size too big: %i bytes, Max: %i!\n",
+ card->devname,len,TTY_FLIPBUF_SIZE);
+ }
+ return;
+ }
+
+
+ if((addr + len) > card->u.c.rx_top + 1) {
+ offset = card->u.c.rx_top - addr + 1;
+
+ sdla_peek(&card->hw, addr, card->tty_rx, offset);
+
+ addr = card->u.c.rx_base;
+ len -= offset;
+ }
+ sdla_peek(&card->hw, addr, card->tty_rx+offset, len);
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->receive_buf)
+ ld->receive_buf(tty,card->tty_rx,&fp,olen);
+ tty_ldisc_deref(ld);
+ }else{
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: NO TTY Sync line discipline!\n",
+ card->devname);
+ }
+ }
+ }
+
+ dbg_printk(KERN_INFO "%s: Received Data %i\n",card->devname,olen);
+ return;
+}
+
+#if 0
+static int wanpipe_tty_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+#endif
+
+static void wanpipe_tty_stop(struct tty_struct *tty)
+{
+ return;
+}
+
+static void wanpipe_tty_start(struct tty_struct *tty)
+{
+ return;
+}
+
+static int config_tty (sdla_t *card)
+{
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+
+ /* Setup the Board for asynchronous mode */
+ if (card->u.c.async_mode){
+
+ if (set_asy_config(card)) {
+ printk (KERN_INFO "%s: Failed CHDLC Async configuration!\n",
+ card->devname);
+ return -EINVAL;
+ }
+ }else{
+ /* Setup the Board for CHDLC */
+ if (set_chdlc_config(card)) {
+ printk (KERN_INFO "%s: Failed CHDLC configuration!\n",
+ card->devname);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mode and mask */
+ if (chdlc_set_intr_mode(card, APP_INT_ON_RX_FRAME |
+ APP_INT_ON_GLOBAL_EXCEP_COND |
+ APP_INT_ON_TX_FRAME |
+ APP_INT_ON_CHDLC_EXCEP_COND | APP_INT_ON_TIMER)){
+ printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
+ card->devname);
+ return -EINVAL;
+ }
+
+
+ /* Mask the Transmit and Timer interrupt */
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~(APP_INT_ON_TX_FRAME | APP_INT_ON_TIMER);
+
+
+ /* Enable communications */
+ if (card->u.c.async_mode){
+ if (asy_comm_enable(card) != 0) {
+ printk(KERN_INFO "%s: Failed to enable async commnunication!\n",
+ card->devname);
+ flags->interrupt_info_struct.interrupt_permission = 0;
+ card->u.c.comm_enabled=0;
+ chdlc_set_intr_mode(card,0);
+ return -EINVAL;
+ }
+ }else{
+ if (chdlc_comm_enable(card) != 0) {
+ printk(KERN_INFO "%s: Failed to enable chdlc communications!\n",
+ card->devname);
+ flags->interrupt_info_struct.interrupt_permission = 0;
+ card->u.c.comm_enabled=0;
+ chdlc_set_intr_mode(card,0);
+ return -EINVAL;
+ }
+ }
+
+ /* Initialize Rx/Tx buffer control fields */
+ init_chdlc_tx_rx_buff(card);
+ port_set_state(card, WAN_CONNECTING);
+ return 0;
+}
+
+
+static int change_speed(sdla_t *card, struct tty_struct *tty,
+ struct termios *old_termios)
+{
+ int baud, ret=0;
+ unsigned cflag;
+ int dbits,sbits,parity,handshaking;
+
+ cflag = tty->termios->c_cflag;
+
+ /* There is always one stop bit */
+ sbits=WANOPT_ONE;
+
+ /* Parity is defaulted to NONE */
+ parity = WANOPT_NONE;
+
+ handshaking=0;
+
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5: dbits = 5; break;
+ case CS6: dbits = 6; break;
+ case CS7: dbits = 7; break;
+ case CS8: dbits = 8; break;
+ /* Never happens, but GCC is too dumb to figure it out */
+ default: dbits = 8; break;
+ }
+
+ /* One more stop bit should be supported, thus increment
+ * the number of stop bits Max=2 */
+ if (cflag & CSTOPB) {
+ sbits = WANOPT_TWO;
+ }
+ if (cflag & PARENB) {
+ parity = WANOPT_EVEN;
+ }
+ if (cflag & PARODD){
+ parity = WANOPT_ODD;
+ }
+
+ /* Determine divisor based on baud rate */
+ baud = tty_get_baud_rate(tty);
+
+ if (!baud)
+ baud = 9600; /* B0 transition handled in rs_set_termios */
+
+ if (cflag & CRTSCTS) {
+ handshaking|=ASY_RTS_HS_FOR_RX;
+ }
+
+ if (I_IGNPAR(tty))
+ parity = WANOPT_NONE;
+
+ if (I_IXOFF(tty)){
+ handshaking|=ASY_XON_XOFF_HS_FOR_RX;
+ handshaking|=ASY_XON_XOFF_HS_FOR_TX;
+ }
+
+ if (I_IXON(tty)){
+ handshaking|=ASY_XON_XOFF_HS_FOR_RX;
+ handshaking|=ASY_XON_XOFF_HS_FOR_TX;
+ }
+
+ if (card->u.c.async_mode){
+ if (card->wandev.bps != baud)
+ ret=1;
+ card->wandev.bps = baud;
+ }
+
+ if (card->u.c.async_mode){
+ if (card->u.c.protocol_options != handshaking)
+ ret=1;
+ card->u.c.protocol_options = handshaking;
+
+ if (card->u.c.tx_bits_per_char != dbits)
+ ret=1;
+ card->u.c.tx_bits_per_char = dbits;
+
+ if (card->u.c.rx_bits_per_char != dbits)
+ ret=1;
+ card->u.c.rx_bits_per_char = dbits;
+
+ if (card->u.c.stop_bits != sbits)
+ ret=1;
+ card->u.c.stop_bits = sbits;
+
+ if (card->u.c.parity != parity)
+ ret=1;
+ card->u.c.parity = parity;
+
+ card->u.c.break_timer = 50;
+ card->u.c.inter_char_timer = 10;
+ card->u.c.rx_complete_length = 100;
+ card->u.c.xon_char = 0xFE;
+ }else{
+ card->u.c.protocol_options = HDLC_STREAMING_MODE;
+ }
+
+ return ret;
+}
+
+
+static void wanpipe_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
+{
+ sdla_t *card;
+ int err=1;
+
+ if (!tty){
+ return;
+ }
+
+ card = (sdla_t *)tty->driver_data;
+
+ if (!card)
+ return;
+
+ if (change_speed(card, tty, old_termios) || !card->u.c.comm_enabled){
+ unsigned long smp_flags;
+
+ if (card->u.c.comm_enabled){
+ lock_adapter_irq(&card->wandev.lock,&smp_flags);
+ chdlc_disable_comm_shutdown(card);
+ unlock_adapter_irq(&card->wandev.lock,&smp_flags);
+ }
+ lock_adapter_irq(&card->wandev.lock,&smp_flags);
+ err = config_tty(card);
+ unlock_adapter_irq(&card->wandev.lock,&smp_flags);
+ if (card->u.c.async_mode){
+ printk(KERN_INFO "%s: TTY Async Configuration:\n"
+ " Baud =%i\n"
+ " Handshaking =%s\n"
+ " Tx Dbits =%i\n"
+ " Rx Dbits =%i\n"
+ " Parity =%s\n"
+ " Stop Bits =%i\n",
+ card->devname,
+ card->wandev.bps,
+ opt_decode[card->u.c.protocol_options],
+ card->u.c.tx_bits_per_char,
+ card->u.c.rx_bits_per_char,
+ p_decode[card->u.c.parity] ,
+ card->u.c.stop_bits);
+ }else{
+ printk(KERN_INFO "%s: TTY Sync Configuration:\n"
+ " Baud =%i\n"
+ " Protocol =HDLC_STREAMING\n",
+ card->devname,card->wandev.bps);
+ }
+ if (!err){
+ port_set_state(card,WAN_CONNECTED);
+ }else{
+ port_set_state(card,WAN_DISCONNECTED);
+ }
+ }
+ return;
+}
+
+static void wanpipe_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ sdla_t *card;
+ unsigned long smp_flags=0;
+
+ if (!tty){
+ return;
+ }
+
+ card = (sdla_t *)tty->driver_data;
+
+ if (!card)
+ return;
+
+ if (card->wandev.state != WAN_CONNECTED)
+ return;
+
+ if(card->hw.type != SDLA_S514)
+ s508_lock(card,&smp_flags);
+
+ if (test_and_set_bit(SEND_CRIT,(void*)&card->wandev.critical)){
+
+ wanpipe_tty_trigger_tx_irq(card);
+
+ if(card->hw.type != SDLA_S514)
+ s508_unlock(card,&smp_flags);
+ return;
+ }
+
+ if (chdlc_send(card,(void*)&ch,1)){
+ wanpipe_tty_trigger_tx_irq(card);
+ dbg_printk("%s: Failed to TX char!\n",card->devname);
+ }
+
+ dbg_printk("%s: Char TX OK\n",card->devname);
+
+ clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
+
+ if(card->hw.type != SDLA_S514)
+ s508_unlock(card,&smp_flags);
+
+ return;
+}
+
+static void wanpipe_tty_flush_chars(struct tty_struct *tty)
+{
+ return;
+}
+
+static void wanpipe_tty_flush_buffer(struct tty_struct *tty)
+{
+ if (!tty)
+ return;
+
+#if defined(SERIAL_HAVE_POLL_WAIT)
+ wake_up_interruptible(&tty->poll_wait);
+#endif
+ tty_wakeup(tty);
+ return;
+}
+
+/*
+ * This function is used to send a high-priority XON/XOFF character to
+ * the device
+ */
+static void wanpipe_tty_send_xchar(struct tty_struct *tty, char ch)
+{
+ return;
+}
+
+
+static int wanpipe_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+
+static int wanpipe_tty_write_room(struct tty_struct *tty)
+{
+ sdla_t *card;
+
+ printk(KERN_INFO "TTY Write Room\n");
+
+ if (!tty){
+ return 0;
+ }
+
+ card = (sdla_t *)tty->driver_data;
+ if (!card)
+ return 0;
+
+ if (card->wandev.state != WAN_CONNECTED)
+ return 0;
+
+ return SEC_MAX_NO_DATA_BYTES_IN_FRAME;
+}
+
+
+static int set_modem_status(sdla_t *card, unsigned char data)
+{
+ CHDLC_MAILBOX_STRUCT *mb = card->mbox;
+ int err;
+
+ mb->buffer_length=1;
+ mb->command=SET_MODEM_STATUS;
+ mb->data[0]=data;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error (card, err, mb);
+
+ return err;
+}
+
+static void wanpipe_tty_hangup(struct tty_struct *tty)
+{
+ sdla_t *card;
+ unsigned long smp_flags;
+
+ printk(KERN_INFO "TTY Hangup!\n");
+
+ if (!tty){
+ return;
+ }
+
+ card = (sdla_t *)tty->driver_data;
+ if (!card)
+ return;
+
+ lock_adapter_irq(&card->wandev.lock,&smp_flags);
+ set_modem_status(card,0);
+ unlock_adapter_irq(&card->wandev.lock,&smp_flags);
+ return;
+}
+
+static void wanpipe_tty_break(struct tty_struct *tty, int break_state)
+{
+ return;
+}
+
+static void wanpipe_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ return;
+}
+
+static void wanpipe_tty_throttle(struct tty_struct * tty)
+{
+ return;
+}
+
+static void wanpipe_tty_unthrottle(struct tty_struct * tty)
+{
+ return;
+}
+
+int wanpipe_tty_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ return 0;
+}
+
+/*
+ * The serial driver boot-time initialization code!
+ */
+int wanpipe_tty_init(sdla_t *card)
+{
+ struct serial_state * state;
+
+ /* Initialize the tty_driver structure */
+
+ if (card->tty_minor < 0 || card->tty_minor > NR_PORTS){
+ printk(KERN_INFO "%s: Illegal Minor TTY number (0-4): %i\n",
+ card->devname,card->tty_minor);
+ return -EINVAL;
+ }
+
+ if (WAN_CARD(card->tty_minor)){
+ printk(KERN_INFO "%s: TTY Minor %i, already in use\n",
+ card->devname,card->tty_minor);
+ return -EBUSY;
+ }
+
+ if (tty_init_cnt==0){
+
+ printk(KERN_INFO "%s: TTY %s Driver Init: Major %i, Minor Range %i-%i\n",
+ card->devname,
+ card->u.c.async_mode ? "ASYNC" : "SYNC",
+ WAN_TTY_MAJOR,MIN_PORT,MAX_PORT);
+
+ tty_driver_mode = card->u.c.async_mode;
+
+ memset(&serial_driver, 0, sizeof(struct tty_driver));
+ serial_driver.magic = TTY_DRIVER_MAGIC;
+ serial_driver.owner = THIS_MODULE;
+ serial_driver.driver_name = "wanpipe_tty";
+ serial_driver.name = "ttyW";
+ serial_driver.major = WAN_TTY_MAJOR;
+ serial_driver.minor_start = WAN_TTY_MINOR;
+ serial_driver.num = NR_PORTS;
+ serial_driver.type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver.subtype = SERIAL_TYPE_NORMAL;
+
+ serial_driver.init_termios = tty_std_termios;
+ serial_driver.init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver.flags = TTY_DRIVER_REAL_RAW;
+
+ serial_driver.refcount = 1; /* !@!@^#^&!! */
+
+ serial_driver.open = wanpipe_tty_open;
+ serial_driver.close = wanpipe_tty_close;
+ serial_driver.write = wanpipe_tty_write;
+
+ serial_driver.put_char = wanpipe_tty_put_char;
+ serial_driver.flush_chars = wanpipe_tty_flush_chars;
+ serial_driver.write_room = wanpipe_tty_write_room;
+ serial_driver.chars_in_buffer = wanpipe_tty_chars_in_buffer;
+ serial_driver.flush_buffer = wanpipe_tty_flush_buffer;
+ //serial_driver.ioctl = wanpipe_tty_ioctl;
+ serial_driver.throttle = wanpipe_tty_throttle;
+ serial_driver.unthrottle = wanpipe_tty_unthrottle;
+ serial_driver.send_xchar = wanpipe_tty_send_xchar;
+ serial_driver.set_termios = wanpipe_tty_set_termios;
+ serial_driver.stop = wanpipe_tty_stop;
+ serial_driver.start = wanpipe_tty_start;
+ serial_driver.hangup = wanpipe_tty_hangup;
+ serial_driver.break_ctl = wanpipe_tty_break;
+ serial_driver.wait_until_sent = wanpipe_tty_wait_until_sent;
+ serial_driver.read_proc = wanpipe_tty_read_proc;
+
+ if (tty_register_driver(&serial_driver)){
+ printk(KERN_INFO "%s: Failed to register serial driver!\n",
+ card->devname);
+ }
+ }
+
+
+ /* The subsequent ports must comply to the initial configuration */
+ if (tty_driver_mode != card->u.c.async_mode){
+ printk(KERN_INFO "%s: Error: TTY Driver operation mode mismatch!\n",
+ card->devname);
+ printk(KERN_INFO "%s: The TTY driver is configured for %s!\n",
+ card->devname, tty_driver_mode ? "ASYNC" : "SYNC");
+ return -EINVAL;
+ }
+
+ tty_init_cnt++;
+
+ printk(KERN_INFO "%s: Initializing TTY %s Driver Minor %i\n",
+ card->devname,
+ tty_driver_mode ? "ASYNC" : "SYNC",
+ card->tty_minor);
+
+ tty_card_map[card->tty_minor] = card;
+ state = &rs_table[card->tty_minor];
+
+ state->magic = SSTATE_MAGIC;
+ state->line = 0;
+ state->type = PORT_UNKNOWN;
+ state->custom_divisor = 0;
+ state->close_delay = 5*HZ/10;
+ state->closing_wait = 30*HZ;
+ state->icount.cts = state->icount.dsr =
+ state->icount.rng = state->icount.dcd = 0;
+ state->icount.rx = state->icount.tx = 0;
+ state->icount.frame = state->icount.parity = 0;
+ state->icount.overrun = state->icount.brk = 0;
+ state->irq = card->wandev.irq;
+
+ INIT_WORK(&card->tty_work, tty_poll_work, (void*)card);
+ return 0;
+}
+
+
+MODULE_LICENSE("GPL");
+
+/****** End ****************************************************************/
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
new file mode 100644
index 000000000000..2efccb0554c0
--- /dev/null
+++ b/drivers/net/wan/sdla_fr.c
@@ -0,0 +1,5068 @@
+/*****************************************************************************
+* sdla_fr.c WANPIPE(tm) Multiprotocol WAN Link Driver. Frame relay module.
+*
+* Author(s): Nenad Corbic <ncorbic@sangoma.com>
+* Gideon Hack
+*
+* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Nov 23, 2000 Nenad Corbic o Added support for 2.4.X kernels
+* Nov 15, 2000 David Rokavarg
+* Nenad Corbic o Added frame relay bridging support.
+* Original code from Mark Wells and Kristian Hoffmann has
+* been integrated into the frame relay driver.
+* Nov 13, 2000 Nenad Corbic o Added true interface type encoding option.
+* Tcpdump doesn't support Frame Relay inteface
+* types, to fix this true type option will set
+* the interface type to RAW IP mode.
+* Nov 07, 2000 Nenad Corbic o Added security features for UDP debugging:
+* Deny all and specify allowed requests.
+* Nov 06, 2000 Nenad Corbic o Wanpipe interfaces conform to raw packet interfaces.
+* Moved the if_header into the if_send() routine.
+* The if_header() was breaking the libpcap
+* support. i.e. support for tcpdump, ethereal ...
+* Oct 12. 2000 Nenad Corbic o Added error message in fr_configure
+* Jul 31, 2000 Nenad Corbic o Fixed the Router UP Time.
+* Apr 28, 2000 Nenad Corbic o Added the option to shutdown an interface
+* when the channel gets disconnected.
+* Apr 28, 2000 Nenad Corbic o Added M.Grants patch: disallow duplicate
+* interface setups.
+* Apr 25, 2000 Nenad Corbic o Added M.Grants patch: dynamically add/remove
+* new dlcis/interfaces.
+* Mar 23, 2000 Nenad Corbic o Improved task queue, bh handling.
+* Mar 16, 2000 Nenad Corbic o Added Inverse ARP support
+* Mar 13, 2000 Nenad Corbic o Added new socket API support.
+* Mar 06, 2000 Nenad Corbic o Bug Fix: corrupted mbox recovery.
+* Feb 24, 2000 Nenad Corbic o Fixed up FT1 UDP debugging problem.
+* Dev 15, 1999 Nenad Corbic o Fixed up header files for 2.0.X kernels
+*
+* Nov 08, 1999 Nenad Corbic o Combined all debug UDP calls into one function
+* o Removed the ARP support. This has to be done
+* in the next version.
+* o Only a Node can implement NO signalling.
+* Initialize DLCI during if_open() if NO
+* signalling.
+* o Took out IPX support, implement in next
+* version
+* Sep 29, 1999 Nenad Corbic o Added SMP support and changed the update
+* function to use timer interrupt.
+* o Fixed the CIR bug: Set the value of BC
+* to CIR when the CIR is enabled.
+* o Updated comments, statistics and tracing.
+* Jun 02, 1999 Gideon Hack o Updated for S514 support.
+* Sep 18, 1998 Jaspreet Singh o Updated for 2.2.X kernels.
+* Jul 31, 1998 Jaspreet Singh o Removed wpf_poll routine. The channel/DLCI
+* status is received through an event interrupt.
+* Jul 08, 1998 David Fong o Added inverse ARP support.
+* Mar 26, 1997 Jaspreet Singh o Returning return codes for failed UDP cmds.
+* Jan 28, 1997 Jaspreet Singh o Improved handling of inactive DLCIs.
+* Dec 30, 1997 Jaspreet Singh o Replaced dev_tint() with mark_bh(NET_BH)
+* Dec 16, 1997 Jaspreet Singh o Implemented Multiple IPX support.
+* Nov 26, 1997 Jaspreet Singh o Improved load sharing with multiple boards
+* o Added Cli() to protect enabling of interrupts
+* while polling is called.
+* Nov 24, 1997 Jaspreet Singh o Added counters to avoid enabling of interrupts
+* when they have been disabled by another
+* interface or routine (eg. wpf_poll).
+* Nov 06, 1997 Jaspreet Singh o Added INTR_TEST_MODE to avoid polling
+* routine disable interrupts during interrupt
+* testing.
+* Oct 20, 1997 Jaspreet Singh o Added hooks in for Router UP time.
+* Oct 16, 1997 Jaspreet Singh o The critical flag is used to maintain flow
+* control by avoiding RACE conditions. The
+* cli() and restore_flags() are taken out.
+* The fr_channel structure is appended for
+* Driver Statistics.
+* Oct 15, 1997 Farhan Thawar o updated if_send() and receive for IPX
+* Aug 29, 1997 Farhan Thawar o Removed most of the cli() and sti()
+* o Abstracted the UDP management stuff
+* o Now use tbusy and critical more intelligently
+* Jul 21, 1997 Jaspreet Singh o Can configure T391, T392, N391, N392 & N393
+* through router.conf.
+* o Protected calls to sdla_peek() by adDing
+* save_flags(), cli() and restore_flags().
+* o Added error message for Inactive DLCIs in
+* fr_event() and update_chan_state().
+* o Fixed freeing up of buffers using kfree()
+* when packets are received.
+* Jul 07, 1997 Jaspreet Singh o Added configurable TTL for UDP packets
+* o Added ability to discard multicast and
+* broadcast source addressed packets
+* Jun 27, 1997 Jaspreet Singh o Added FT1 monitor capabilities
+* New case (0x44) statement in if_send routine
+* Added a global variable rCount to keep track
+* of FT1 status enabled on the board.
+* May 29, 1997 Jaspreet Singh o Fixed major Flow Control Problem
+* With multiple boards a problem was seen where
+* the second board always stopped transmitting
+* packet after running for a while. The code
+* got into a stage where the interrupts were
+* disabled and dev->tbusy was set to 1.
+* This caused the If_send() routine to get into
+* the if clause for it(0,dev->tbusy)
+* forever.
+* The code got into this stage due to an
+* interrupt occurring within the if clause for
+* set_bit(0,dev->tbusy). Since an interrupt
+* disables furhter transmit interrupt and
+* makes dev->tbusy = 0, this effect was undone
+* by making dev->tbusy = 1 in the if clause.
+* The Fix checks to see if Transmit interrupts
+* are disabled then do not make dev->tbusy = 1
+* Introduced a global variable: int_occur and
+* added tx_int_enabled in the wan_device
+* structure.
+* May 21, 1997 Jaspreet Singh o Fixed UDP Management for multiple
+* boards.
+*
+* Apr 25, 1997 Farhan Thawar o added UDP Management stuff
+* o fixed bug in if_send() and tx_intr() to
+* sleep and wakeup all devices
+* Mar 11, 1997 Farhan Thawar Version 3.1.1
+* o fixed (+1) bug in fr508_rx_intr()
+* o changed if_send() to return 0 if
+* wandev.critical() is true
+* o free socket buffer in if_send() if
+* returning 0
+* o added tx_intr() routine
+* Jan 30, 1997 Gene Kozin Version 3.1.0
+* o implemented exec() entry point
+* o fixed a bug causing driver configured as
+* a FR switch to be stuck in WAN_
+* mode
+* Jan 02, 1997 Gene Kozin Initial version.
+*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/workqueue.h>
+#include <linux/if_arp.h> /* ARPHRD_* defines */
+#include <asm/byteorder.h> /* htons(), etc. */
+#include <asm/io.h> /* for inb(), outb(), etc. */
+#include <linux/time.h> /* for do_gettimeofday */
+#include <linux/in.h> /* sockaddr_in */
+#include <asm/errno.h>
+
+#include <linux/ip.h>
+#include <linux/if.h>
+
+#include <linux/if_wanpipe_common.h> /* Wanpipe Socket */
+#include <linux/if_wanpipe.h>
+
+#include <linux/sdla_fr.h> /* frame relay firmware API definitions */
+
+#include <asm/uaccess.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+
+#include <net/route.h> /* Dynamic Route Creation */
+#include <linux/etherdevice.h> /* eth_type_trans() used for bridging */
+#include <linux/random.h>
+
+/****** Defines & Macros ****************************************************/
+
+#define MAX_CMD_RETRY 10 /* max number of firmware retries */
+
+#define FR_HEADER_LEN 8 /* max encapsulation header size */
+#define FR_CHANNEL_MTU 1500 /* unfragmented logical channel MTU */
+
+/* Q.922 frame types */
+#define Q922_UI 0x03 /* Unnumbered Info frame */
+#define Q922_XID 0xAF
+
+/* DLCI configured or not */
+#define DLCI_NOT_CONFIGURED 0x00
+#define DLCI_CONFIG_PENDING 0x01
+#define DLCI_CONFIGURED 0x02
+
+/* CIR enabled or not */
+#define CIR_ENABLED 0x00
+#define CIR_DISABLED 0x01
+
+#define FRAME_RELAY_API 1
+#define MAX_BH_BUFF 10
+
+/* For handle_IPXWAN() */
+#define CVHexToAscii(b) (((unsigned char)(b) > (unsigned char)9) ? ((unsigned char)'A' + ((unsigned char)(b) - (unsigned char)10)) : ((unsigned char)'0' + (unsigned char)(b)))
+
+/****** Data Structures *****************************************************/
+
+/* This is an extention of the 'struct device' we create for each network
+ * interface to keep the rest of channel-specific data.
+ */
+typedef struct fr_channel
+{
+ wanpipe_common_t common;
+ char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
+ unsigned dlci_configured ; /* check whether configured or not */
+ unsigned cir_status; /* check whether CIR enabled or not */
+ unsigned dlci; /* logical channel number */
+ unsigned cir; /* committed information rate */
+ unsigned bc; /* committed burst size */
+ unsigned be; /* excess burst size */
+ unsigned mc; /* multicast support on or off */
+ unsigned tx_int_status; /* Transmit Interrupt Status */
+ unsigned short pkt_length; /* Packet Length */
+ unsigned long router_start_time;/* Router start time in seconds */
+ unsigned long tick_counter; /* counter for transmit time out */
+ char dev_pending_devtint; /* interface pending dev_tint() */
+ void *dlci_int_interface; /* pointer to the DLCI Interface */
+ unsigned long IB_addr; /* physical address of Interface Byte */
+ unsigned long state_tick; /* time of the last state change */
+ unsigned char enable_IPX; /* Enable/Disable the use of IPX */
+ unsigned long network_number; /* Internal Network Number for IPX*/
+ sdla_t *card; /* -> owner */
+ unsigned route_flag; /* Add/Rem dest addr in route tables */
+ unsigned inarp; /* Inverse Arp Request status */
+ long inarp_ready; /* Ready to send requests */
+ int inarp_interval; /* Time between InArp Requests */
+ unsigned long inarp_tick; /* InArp jiffies tick counter */
+ long interface_down; /* Bring interface down on disconnect */
+ struct net_device_stats ifstats; /* interface statistics */
+ if_send_stat_t drvstats_if_send;
+ rx_intr_stat_t drvstats_rx_intr;
+ pipe_mgmt_stat_t drvstats_gen;
+ unsigned long router_up_time;
+
+ unsigned short transmit_length;
+ struct sk_buff *delay_skb;
+
+ bh_data_t *bh_head; /* Circular buffer for chdlc_bh */
+ unsigned long tq_working;
+ volatile int bh_write;
+ volatile int bh_read;
+ atomic_t bh_buff_used;
+
+ /* Polling task queue. Each interface
+ * has its own task queue, which is used
+ * to defer events from the interrupt */
+ struct work_struct fr_poll_work;
+ struct timer_list fr_arp_timer;
+
+ u32 ip_local;
+ u32 ip_remote;
+ long config_dlci;
+ long unconfig_dlci;
+
+ /* Whether this interface should be setup as a gateway.
+ * Used by dynamic route setup code */
+ u8 gateway;
+
+ /* True interface type */
+ u8 true_if_encoding;
+ u8 fr_header[FR_HEADER_LEN];
+ char fr_header_len;
+
+} fr_channel_t;
+
+/* Route Flag options */
+#define NO_ROUTE 0x00
+#define ADD_ROUTE 0x01
+#define ROUTE_ADDED 0x02
+#define REMOVE_ROUTE 0x03
+#define ARP_REQ 0x04
+
+/* inarp options */
+#define INARP_NONE 0x00
+#define INARP_REQUEST 0x01
+#define INARP_CONFIGURED 0x02
+
+/* reasons for enabling the timer interrupt on the adapter */
+#define TMR_INT_ENABLED_UDP 0x01
+#define TMR_INT_ENABLED_UPDATE 0x02
+#define TMR_INT_ENABLED_ARP 0x04
+#define TMR_INT_ENABLED_UPDATE_STATE 0x08
+#define TMR_INT_ENABLED_CONFIG 0x10
+#define TMR_INT_ENABLED_UNCONFIG 0x20
+
+
+typedef struct dlci_status
+{
+ unsigned short dlci PACKED;
+ unsigned char state PACKED;
+} dlci_status_t;
+
+typedef struct dlci_IB_mapping
+{
+ unsigned short dlci PACKED;
+ unsigned long addr_value PACKED;
+} dlci_IB_mapping_t;
+
+/* This structure is used for DLCI list Tx interrupt mode. It is used to
+ enable interrupt bit and set the packet length for transmission
+ */
+typedef struct fr_dlci_interface
+{
+ unsigned char gen_interrupt PACKED;
+ unsigned short packet_length PACKED;
+ unsigned char reserved PACKED;
+} fr_dlci_interface_t;
+
+/* variable for keeping track of enabling/disabling FT1 monitor status */
+static int rCount = 0;
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+/* variable for keeping track of number of interrupts generated during
+ * interrupt test routine
+ */
+static int Intr_test_counter;
+
+/****** Function Prototypes *************************************************/
+
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int update(struct wan_device *wandev);
+static int new_if(struct wan_device *wandev, struct net_device *dev,
+ wanif_conf_t *conf);
+static int del_if(struct wan_device *wandev, struct net_device *dev);
+static void disable_comm (sdla_t *card);
+
+/* WANPIPE-specific entry points */
+static int wpf_exec(struct sdla *card, void *u_cmd, void *u_data);
+
+/* Network device interface */
+static int if_init(struct net_device *dev);
+static int if_open(struct net_device *dev);
+static int if_close(struct net_device *dev);
+
+static void if_tx_timeout(struct net_device *dev);
+
+static int if_rebuild_hdr (struct sk_buff *skb);
+
+static int if_send(struct sk_buff *skb, struct net_device *dev);
+static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
+ struct sk_buff *skb);
+static struct net_device_stats *if_stats(struct net_device *dev);
+
+/* Interrupt handlers */
+static void fr_isr(sdla_t *card);
+static void rx_intr(sdla_t *card);
+static void tx_intr(sdla_t *card);
+static void timer_intr(sdla_t *card);
+static void spur_intr(sdla_t *card);
+
+/* Frame relay firmware interface functions */
+static int fr_read_version(sdla_t *card, char *str);
+static int fr_configure(sdla_t *card, fr_conf_t *conf);
+static int fr_dlci_configure(sdla_t *card, fr_dlc_conf_t *conf, unsigned dlci);
+static int fr_init_dlci (sdla_t *card, fr_channel_t *chan);
+static int fr_set_intr_mode (sdla_t *card, unsigned mode, unsigned mtu, unsigned short timeout);
+static int fr_comm_enable(sdla_t *card);
+static void fr_comm_disable(sdla_t *card);
+static int fr_get_err_stats(sdla_t *card);
+static int fr_get_stats(sdla_t *card);
+static int fr_add_dlci(sdla_t *card, int dlci);
+static int fr_activate_dlci(sdla_t *card, int dlci);
+static int fr_delete_dlci (sdla_t* card, int dlci);
+static int fr_issue_isf(sdla_t *card, int isf);
+static int fr_send(sdla_t *card, int dlci, unsigned char attr, int len,
+ void *buf);
+static int fr_send_data_header(sdla_t *card, int dlci, unsigned char attr, int len,
+ void *buf,unsigned char hdr_len);
+static unsigned int fr_send_hdr(sdla_t *card, int dlci, unsigned int offset);
+
+static int check_dlci_config (sdla_t *card, fr_channel_t *chan);
+static void initialize_rx_tx_buffers (sdla_t *card);
+
+
+/* Firmware asynchronous event handlers */
+static int fr_event(sdla_t *card, int event, fr_mbox_t *mbox);
+static int fr_modem_failure(sdla_t *card, fr_mbox_t *mbox);
+static int fr_dlci_change(sdla_t *card, fr_mbox_t *mbox);
+
+/* Miscellaneous functions */
+static int update_chan_state(struct net_device *dev);
+static void set_chan_state(struct net_device *dev, int state);
+static struct net_device *find_channel(sdla_t *card, unsigned dlci);
+static int is_tx_ready(sdla_t *card, fr_channel_t *chan);
+static unsigned int dec_to_uint(unsigned char *str, int len);
+static int reply_udp( unsigned char *data, unsigned int mbox_len );
+
+static int intr_test( sdla_t* card );
+static void init_chan_statistics( fr_channel_t* chan );
+static void init_global_statistics( sdla_t* card );
+static void read_DLCI_IB_mapping( sdla_t* card, fr_channel_t* chan );
+static int setup_for_delayed_transmit(struct net_device* dev,
+ struct sk_buff *skb);
+
+struct net_device *move_dev_to_next(sdla_t *card, struct net_device *dev);
+static int check_tx_status(sdla_t *card, struct net_device *dev);
+
+/* Frame Relay Socket API */
+static void trigger_fr_bh (fr_channel_t *);
+static void fr_bh(struct net_device *dev);
+static int fr_bh_cleanup(struct net_device *dev);
+static int bh_enqueue(struct net_device *dev, struct sk_buff *skb);
+
+static void trigger_fr_poll(struct net_device *dev);
+static void fr_poll(struct net_device *dev);
+//static void add_gateway(struct net_device *dev);
+
+static void trigger_unconfig_fr(struct net_device *dev);
+static void unconfig_fr (sdla_t *);
+
+static void trigger_config_fr (sdla_t *);
+static void config_fr (sdla_t *);
+
+
+/* Inverse ARP and Dynamic routing functions */
+int process_ARP(arphdr_1490_t *ArpPacket, sdla_t *card, struct net_device *dev);
+int is_arp(void *buf);
+int send_inarp_request(sdla_t *card, struct net_device *dev);
+
+static void trigger_fr_arp(struct net_device *dev);
+static void fr_arp (unsigned long data);
+
+
+/* Udp management functions */
+static int process_udp_mgmt_pkt(sdla_t *card);
+static int udp_pkt_type( struct sk_buff *skb, sdla_t *card );
+static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, int dlci);
+
+/* IPX functions */
+static void switch_net_numbers(unsigned char *sendpacket,
+ unsigned long network_number, unsigned char incoming);
+
+static int handle_IPXWAN(unsigned char *sendpacket, char *devname,
+ unsigned char enable_IPX, unsigned long network_number);
+
+/* Lock Functions: SMP supported */
+void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
+void s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
+
+unsigned short calc_checksum (char *, int);
+static int setup_fr_header(struct sk_buff** skb,
+ struct net_device* dev, char op_mode);
+
+
+/****** Public Functions ****************************************************/
+
+/*============================================================================
+ * Frame relay protocol initialization routine.
+ *
+ * This routine is called by the main WANPIPE module during setup. At this
+ * point adapter is completely initialized and firmware is running.
+ * o read firmware version (to make sure it's alive)
+ * o configure adapter
+ * o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return: 0 o.k.
+ * < 0 failure.
+ */
+int wpf_init(sdla_t *card, wandev_conf_t *conf)
+{
+
+ int err;
+ fr508_flags_t* flags;
+
+ union
+ {
+ char str[80];
+ fr_conf_t cfg;
+ } u;
+
+ fr_buf_info_t* buf_info;
+ int i;
+
+
+ printk(KERN_INFO "\n");
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_FR) {
+
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
+ return -EINVAL;
+
+ }
+
+ /* Initialize protocol-specific fields of adapter data space */
+ switch (card->hw.fwid) {
+
+ case SFID_FR508:
+ card->mbox = (void*)(card->hw.dpmbase +
+ FR508_MBOX_OFFS);
+ card->flags = (void*)(card->hw.dpmbase +
+ FR508_FLAG_OFFS);
+ if(card->hw.type == SDLA_S514) {
+ card->mbox += FR_MB_VECTOR;
+ card->flags += FR_MB_VECTOR;
+ }
+ card->isr = &fr_isr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ flags = card->flags;
+
+ /* Read firmware version. Note that when adapter initializes, it
+ * clears the mailbox, so it may appear that the first command was
+ * executed successfully when in fact it was merely erased. To work
+ * around this, we execute the first command twice.
+ */
+
+ if (fr_read_version(card, NULL) || fr_read_version(card, u.str))
+ return -EIO;
+
+ printk(KERN_INFO "%s: running frame relay firmware v%s\n",
+ card->devname, u.str);
+
+ /* Adjust configuration */
+ conf->mtu += FR_HEADER_LEN;
+ conf->mtu = (conf->mtu >= MIN_LGTH_FR_DATA_CFG) ?
+ min_t(unsigned int, conf->mtu, FR_MAX_NO_DATA_BYTES_IN_FRAME) :
+ FR_CHANNEL_MTU + FR_HEADER_LEN;
+
+ conf->bps = min_t(unsigned int, conf->bps, 2048000);
+
+ /* Initialze the configuration structure sent to the board to zero */
+ memset(&u.cfg, 0, sizeof(u.cfg));
+
+ memset(card->u.f.dlci_to_dev_map, 0, sizeof(card->u.f.dlci_to_dev_map));
+
+ /* Configure adapter firmware */
+
+ u.cfg.mtu = conf->mtu;
+ u.cfg.kbps = conf->bps / 1000;
+
+ u.cfg.cir_fwd = u.cfg.cir_bwd = 16;
+ u.cfg.bc_fwd = u.cfg.bc_bwd = 16;
+
+ u.cfg.options = 0x0000;
+ printk(KERN_INFO "%s: Global CIR enabled by Default\n", card->devname);
+
+ switch (conf->u.fr.signalling) {
+
+ case WANOPT_FR_ANSI:
+ u.cfg.options = 0x0000;
+ break;
+
+ case WANOPT_FR_Q933:
+ u.cfg.options |= 0x0200;
+ break;
+
+ case WANOPT_FR_LMI:
+ u.cfg.options |= 0x0400;
+ break;
+
+ case WANOPT_NO:
+ u.cfg.options |= 0x0800;
+ break;
+ default:
+ printk(KERN_INFO "%s: Illegal Signalling option\n",
+ card->wandev.name);
+ return -EINVAL;
+ }
+
+
+ card->wandev.signalling = conf->u.fr.signalling;
+
+ if (conf->station == WANOPT_CPE) {
+
+
+ if (conf->u.fr.signalling == WANOPT_NO){
+ printk(KERN_INFO
+ "%s: ERROR - For NO signalling, station must be set to Node!",
+ card->devname);
+ return -EINVAL;
+ }
+
+ u.cfg.station = 0;
+ u.cfg.options |= 0x8000; /* auto config DLCI */
+ card->u.f.dlci_num = 0;
+
+ } else {
+
+ u.cfg.station = 1; /* switch emulation mode */
+
+ /* For switch emulation we have to create a list of dlci(s)
+ * that will be sent to be global SET_DLCI_CONFIGURATION
+ * command in fr_configure() routine.
+ */
+
+ card->u.f.dlci_num = min_t(unsigned int, max_t(unsigned int, conf->u.fr.dlci_num, 1), 100);
+
+ for ( i = 0; i < card->u.f.dlci_num; i++) {
+
+ card->u.f.node_dlci[i] = (unsigned short)
+ conf->u.fr.dlci[i] ? conf->u.fr.dlci[i] : 16;
+
+ }
+ }
+
+ if (conf->clocking == WANOPT_INTERNAL)
+ u.cfg.port |= 0x0001;
+
+ if (conf->interface == WANOPT_RS232)
+ u.cfg.port |= 0x0002;
+
+ if (conf->u.fr.t391)
+ u.cfg.t391 = min_t(unsigned int, conf->u.fr.t391, 30);
+ else
+ u.cfg.t391 = 5;
+
+ if (conf->u.fr.t392)
+ u.cfg.t392 = min_t(unsigned int, conf->u.fr.t392, 30);
+ else
+ u.cfg.t392 = 15;
+
+ if (conf->u.fr.n391)
+ u.cfg.n391 = min_t(unsigned int, conf->u.fr.n391, 255);
+ else
+ u.cfg.n391 = 2;
+
+ if (conf->u.fr.n392)
+ u.cfg.n392 = min_t(unsigned int, conf->u.fr.n392, 10);
+ else
+ u.cfg.n392 = 3;
+
+ if (conf->u.fr.n393)
+ u.cfg.n393 = min_t(unsigned int, conf->u.fr.n393, 10);
+ else
+ u.cfg.n393 = 4;
+
+ if (fr_configure(card, &u.cfg))
+ return -EIO;
+
+ if (card->hw.type == SDLA_S514) {
+
+ buf_info = (void*)(card->hw.dpmbase + FR_MB_VECTOR +
+ FR508_RXBC_OFFS);
+
+ card->rxmb = (void*)(buf_info->rse_next + card->hw.dpmbase);
+
+ card->u.f.rxmb_base =
+ (void*)(buf_info->rse_base + card->hw.dpmbase);
+
+ card->u.f.rxmb_last =
+ (void*)(buf_info->rse_base +
+ (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) +
+ card->hw.dpmbase);
+ }else{
+ buf_info = (void*)(card->hw.dpmbase + FR508_RXBC_OFFS);
+
+ card->rxmb = (void*)(buf_info->rse_next -
+ FR_MB_VECTOR + card->hw.dpmbase);
+
+ card->u.f.rxmb_base =
+ (void*)(buf_info->rse_base -
+ FR_MB_VECTOR + card->hw.dpmbase);
+
+ card->u.f.rxmb_last =
+ (void*)(buf_info->rse_base +
+ (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) -
+ FR_MB_VECTOR + card->hw.dpmbase);
+ }
+
+ card->u.f.rx_base = buf_info->buf_base;
+ card->u.f.rx_top = buf_info->buf_top;
+
+ card->u.f.tx_interrupts_pending = 0;
+
+ card->wandev.mtu = conf->mtu;
+ card->wandev.bps = conf->bps;
+ card->wandev.interface = conf->interface;
+ card->wandev.clocking = conf->clocking;
+ card->wandev.station = conf->station;
+ card->poll = NULL;
+ card->exec = &wpf_exec;
+ card->wandev.update = &update;
+ card->wandev.new_if = &new_if;
+ card->wandev.del_if = &del_if;
+ card->wandev.state = WAN_DISCONNECTED;
+ card->wandev.ttl = conf->ttl;
+ card->wandev.udp_port = conf->udp_port;
+ card->disable_comm = &disable_comm;
+ card->u.f.arp_dev = NULL;
+
+ /* Intialize global statistics for a card */
+ init_global_statistics( card );
+
+ card->TracingEnabled = 0;
+
+ /* Interrupt Test */
+ Intr_test_counter = 0;
+ card->intr_mode = INTR_TEST_MODE;
+ err = intr_test( card );
+
+ printk(KERN_INFO "%s: End of Interrupt Test rc=0x%x count=%i\n",
+ card->devname,err,Intr_test_counter);
+
+ if (err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
+ printk(KERN_ERR "%s: Interrupt Test Failed, Counter: %i\n",
+ card->devname, Intr_test_counter);
+ printk(KERN_ERR "Please choose another interrupt\n");
+ err = -EIO;
+ return err;
+ }
+
+ printk(KERN_INFO "%s: Interrupt Test Passed, Counter: %i\n",
+ card->devname, Intr_test_counter);
+
+
+ /* Apr 28 2000. Nenad Corbic
+ * Enable commnunications here, not in if_open or new_if, since
+ * interfaces come down when the link is disconnected.
+ */
+
+ /* If you enable comms and then set ints, you get a Tx int as you
+ * perform the SET_INT_TRIGGERS command. So, we only set int
+ * triggers and then adjust the interrupt mask (to disable Tx ints)
+ * before enabling comms.
+ */
+ if (fr_set_intr_mode(card, (FR_INTR_RXRDY | FR_INTR_TXRDY |
+ FR_INTR_DLC | FR_INTR_TIMER | FR_INTR_TX_MULT_DLCIs) ,
+ card->wandev.mtu, 0)) {
+ return -EIO;
+ }
+
+ flags->imask &= ~(FR_INTR_TXRDY | FR_INTR_TIMER);
+
+ if (fr_comm_enable(card)) {
+ return -EIO;
+ }
+ wanpipe_set_state(card, WAN_CONNECTED);
+ spin_lock_init(&card->u.f.if_send_lock);
+
+ printk(KERN_INFO "\n");
+
+ return 0;
+}
+
+/******* WAN Device Driver Entry Points *************************************/
+
+/*============================================================================
+ * Update device status & statistics.
+ */
+static int update(struct wan_device* wandev)
+{
+ volatile sdla_t* card;
+ unsigned long timeout;
+ fr508_flags_t* flags;
+
+ /* sanity checks */
+ if ((wandev == NULL) || (wandev->private == NULL))
+ return -EFAULT;
+
+ if (wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ card = wandev->private;
+ flags = card->flags;
+
+
+ card->u.f.update_comms_stats = 1;
+ card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UPDATE;
+ flags->imask |= FR_INTR_TIMER;
+ timeout = jiffies;
+ for(;;) {
+ if(card->u.f.update_comms_stats == 0)
+ break;
+ if ((jiffies - timeout) > (1 * HZ)){
+ card->u.f.update_comms_stats = 0;
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+/*============================================================================
+ * Create new logical channel.
+ * This routine is called by the router when ROUTER_IFNEW IOCTL is being
+ * handled.
+ * o parse media- and hardware-specific configuration
+ * o make sure that a new channel can be created
+ * o allocate resources, if necessary
+ * o prepare network device structure for registaration.
+ *
+ * Return: 0 o.k.
+ * < 0 failure (channel will not be created)
+ */
+static int new_if(struct wan_device* wandev, struct net_device* dev,
+ wanif_conf_t* conf)
+{
+ sdla_t* card = wandev->private;
+ fr_channel_t* chan;
+ int dlci = 0;
+ int err = 0;
+
+
+ if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
+
+ printk(KERN_INFO "%s: Invalid interface name!\n",
+ card->devname);
+ return -EINVAL;
+ }
+
+ /* allocate and initialize private data */
+ chan = kmalloc(sizeof(fr_channel_t), GFP_KERNEL);
+
+ if (chan == NULL)
+ return -ENOMEM;
+
+ memset(chan, 0, sizeof(fr_channel_t));
+ strcpy(chan->name, conf->name);
+ chan->card = card;
+
+ /* verify media address */
+ if (is_digit(conf->addr[0])) {
+
+ dlci = dec_to_uint(conf->addr, 0);
+
+ if (dlci && (dlci <= HIGHEST_VALID_DLCI)) {
+
+ chan->dlci = dlci;
+
+ } else {
+
+ printk(KERN_ERR
+ "%s: Invalid DLCI %u on interface %s!\n",
+ wandev->name, dlci, chan->name);
+ err = -EINVAL;
+ }
+
+ } else {
+ printk(KERN_ERR
+ "%s: Invalid media address on interface %s!\n",
+ wandev->name, chan->name);
+ err = -EINVAL;
+ }
+
+ if ((chan->true_if_encoding = conf->true_if_encoding) == WANOPT_YES){
+ printk(KERN_INFO
+ "%s: Enabling, true interface type encoding.\n",
+ card->devname);
+ }
+
+
+
+ /* Setup wanpipe as a router (WANPIPE) even if it is
+ * a bridged DLCI, or as an API
+ */
+ if (strcmp(conf->usedby, "WANPIPE") == 0 ||
+ strcmp(conf->usedby, "BRIDGE") == 0 ||
+ strcmp(conf->usedby, "BRIDGE_N") == 0){
+
+ if(strcmp(conf->usedby, "WANPIPE") == 0){
+ chan->common.usedby = WANPIPE;
+
+ printk(KERN_INFO "%s: Running in WANPIPE mode.\n",
+ card->devname);
+
+ }else if(strcmp(conf->usedby, "BRIDGE") == 0){
+
+ chan->common.usedby = BRIDGE;
+
+ printk(KERN_INFO "%s: Running in WANPIPE (BRIDGE) mode.\n",
+ card->devname);
+ }else if( strcmp(conf->usedby, "BRIDGE_N") == 0 ){
+
+ chan->common.usedby = BRIDGE_NODE;
+
+ printk(KERN_INFO "%s: Running in WANPIPE (BRIDGE_NODE) mode.\n",
+ card->devname);
+ }
+
+ if (!err){
+ /* Dynamic interface configuration option.
+ * On disconnect, if the options is selected,
+ * the interface will be brought down */
+ if (conf->if_down == WANOPT_YES){
+ set_bit(DYN_OPT_ON,&chan->interface_down);
+ printk(KERN_INFO
+ "%s: Dynamic interface configuration enabled.\n",
+ card->devname);
+ }
+ }
+
+ } else if(strcmp(conf->usedby, "API") == 0){
+
+ chan->common.usedby = API;
+ printk(KERN_INFO "%s: Running in API mode.\n",
+ wandev->name);
+ }
+
+ if (err) {
+
+ kfree(chan);
+ return err;
+ }
+
+ /* place cir,be,bc and other channel specific information into the
+ * chan structure
+ */
+ if (conf->cir) {
+
+ chan->cir = max_t(unsigned int, 1,
+ min_t(unsigned int, conf->cir, 512));
+ chan->cir_status = CIR_ENABLED;
+
+
+ /* If CIR is enabled, force BC to equal CIR
+ * this solves number of potential problems if CIR is
+ * set and BC is not
+ */
+ chan->bc = chan->cir;
+
+ if (conf->be){
+ chan->be = max_t(unsigned int,
+ 0, min_t(unsigned int, conf->be, 511));
+ }else{
+ conf->be = 0;
+ }
+
+ printk (KERN_INFO "%s: CIR enabled for DLCI %i \n",
+ wandev->name,chan->dlci);
+ printk (KERN_INFO "%s: CIR = %i ; BC = %i ; BE = %i\n",
+ wandev->name,chan->cir,chan->bc,chan->be);
+
+
+ }else{
+ chan->cir_status = CIR_DISABLED;
+ printk (KERN_INFO "%s: CIR disabled for DLCI %i\n",
+ wandev->name,chan->dlci);
+ }
+
+ chan->mc = conf->mc;
+
+ if (conf->inarp == WANOPT_YES){
+ printk(KERN_INFO "%s: Inverse ARP Support Enabled\n",card->devname);
+ chan->inarp = conf->inarp ? INARP_REQUEST : INARP_NONE;
+ chan->inarp_interval = conf->inarp_interval ? conf->inarp_interval : 10;
+ }else{
+ printk(KERN_INFO "%s: Inverse ARP Support Disabled\n",card->devname);
+ chan->inarp = INARP_NONE;
+ chan->inarp_interval = 10;
+ }
+
+
+ chan->dlci_configured = DLCI_NOT_CONFIGURED;
+
+
+ /*FIXME: IPX disabled in this WANPIPE version */
+ if (conf->enable_IPX == WANOPT_YES){
+ printk(KERN_INFO "%s: ERROR - This version of WANPIPE doesn't support IPX\n",
+ card->devname);
+ kfree(chan);
+ return -EINVAL;
+ }else{
+ chan->enable_IPX = WANOPT_NO;
+ }
+
+ if (conf->network_number){
+ chan->network_number = conf->network_number;
+ }else{
+ chan->network_number = 0xDEADBEEF;
+ }
+
+ chan->route_flag = NO_ROUTE;
+
+ init_chan_statistics(chan);
+
+ chan->transmit_length = 0;
+
+ /* prepare network device data space for registration */
+ strcpy(dev->name,chan->name);
+
+ dev->init = &if_init;
+ dev->priv = chan;
+
+ /* Initialize FR Polling Task Queue
+ * We need a poll routine for each network
+ * interface.
+ */
+ INIT_WORK(&chan->fr_poll_work, (void *)fr_poll, dev);
+
+ init_timer(&chan->fr_arp_timer);
+ chan->fr_arp_timer.data=(unsigned long)dev;
+ chan->fr_arp_timer.function = fr_arp;
+
+ wandev->new_if_cnt++;
+
+ /* Tells us that if this interface is a
+ * gateway or not */
+ if ((chan->gateway = conf->gateway) == WANOPT_YES){
+ printk(KERN_INFO "%s: Interface %s is set as a gateway.\n",
+ card->devname,dev->name);
+ }
+
+ /* M. Grant Patch Apr 28 2000
+ * Disallow duplicate dlci configurations. */
+ if (card->u.f.dlci_to_dev_map[chan->dlci] != NULL) {
+ kfree(chan);
+ return -EBUSY;
+ }
+
+ /* Configure this dlci at a later date, when
+ * the interface comes up. i.e. when if_open()
+ * executes */
+ set_bit(0,&chan->config_dlci);
+
+ printk(KERN_INFO "\n");
+
+ return 0;
+}
+
+/*============================================================================
+ * Delete logical channel.
+ */
+static int del_if(struct wan_device* wandev, struct net_device* dev)
+{
+ fr_channel_t* chan = dev->priv;
+ unsigned long smp_flags=0;
+
+ /* This interface is dead, make sure the
+ * ARP timer is stopped */
+ del_timer(&chan->fr_arp_timer);
+
+ /* If we are a NODE, we must unconfigure this DLCI
+ * Trigger an unconfigure command that will
+ * be executed in timer interrupt. We must wait
+ * for the command to complete. */
+ trigger_unconfig_fr(dev);
+
+ lock_adapter_irq(&wandev->lock, &smp_flags);
+ wandev->new_if_cnt--;
+ unlock_adapter_irq(&wandev->lock, &smp_flags);
+
+ return 0;
+}
+
+
+/*=====================================================================
+ * disable_comm
+ *
+ * Description:
+ * Disable communications.
+ * This code runs in shutdown (sdlamain.c)
+ * under critical flag. Therefore it is not
+ * necessary to set a critical flag here
+ *
+ * Usage:
+ * Commnunications are disabled only on a card
+ * shutdown.
+ */
+
+static void disable_comm (sdla_t *card)
+{
+ printk(KERN_INFO "%s: Disabling Communications!\n",
+ card->devname);
+ fr_comm_disable(card);
+}
+
+/****** WANPIPE-specific entry points ***************************************/
+
+/*============================================================================
+ * Execute adapter interface command.
+ */
+static int wpf_exec (struct sdla* card, void* u_cmd, void* u_data)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err, len;
+ fr_cmd_t cmd;
+
+ if(copy_from_user((void*)&cmd, u_cmd, sizeof(cmd)))
+ return -EFAULT;
+
+ /* execute command */
+ do
+ {
+ memcpy(&mbox->cmd, &cmd, sizeof(cmd));
+
+ if (cmd.length){
+ if( copy_from_user((void*)&mbox->data, u_data, cmd.length))
+ return -EFAULT;
+ }
+
+ if (sdla_exec(mbox))
+ err = mbox->cmd.result;
+
+ else return -EIO;
+
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ /* return result */
+ if (copy_to_user(u_cmd, (void*)&mbox->cmd, sizeof(fr_cmd_t)))
+ return -EFAULT;
+
+ len = mbox->cmd.length;
+
+ if (len && u_data && !copy_to_user(u_data, (void*)&mbox->data, len))
+ return -EFAULT;
+ return 0;
+}
+
+/****** Network Device Interface ********************************************/
+
+/*============================================================================
+ * Initialize Linux network interface.
+ *
+ * This routine is called only once for each interface, during Linux network
+ * interface registration. Returning anything but zero will fail interface
+ * registration.
+ */
+static int if_init(struct net_device* dev)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ struct wan_device* wandev = &card->wandev;
+
+ /* Initialize device driver entry points */
+ dev->open = &if_open;
+ dev->stop = &if_close;
+ dev->hard_header = NULL;
+ dev->rebuild_header = &if_rebuild_hdr;
+ dev->hard_start_xmit = &if_send;
+ dev->get_stats = &if_stats;
+ dev->tx_timeout = &if_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ if (chan->common.usedby == WANPIPE || chan->common.usedby == API){
+
+ /* Initialize media-specific parameters */
+ if (chan->true_if_encoding){
+ dev->type = ARPHRD_DLCI; /* This breaks tcpdump */
+ }else{
+ dev->type = ARPHRD_PPP; /* ARP h/w type */
+ }
+
+ dev->flags |= IFF_POINTOPOINT;
+ dev->flags |= IFF_NOARP;
+
+ /* Enable Multicast addressing */
+ if (chan->mc == WANOPT_YES){
+ dev->flags |= IFF_MULTICAST;
+ }
+
+ dev->mtu = wandev->mtu - FR_HEADER_LEN;
+ /* For an API, the maximum number of bytes that the stack will pass
+ to the driver is (dev->mtu + dev->hard_header_len). So, adjust the
+ mtu so that a frame of maximum size can be transmitted by the API.
+ */
+ if(chan->common.usedby == API) {
+ dev->mtu += (sizeof(api_tx_hdr_t) - FR_HEADER_LEN);
+ }
+
+ dev->hard_header_len = FR_HEADER_LEN;/* media header length */
+ dev->addr_len = 2; /* hardware address length */
+ *(unsigned short*)dev->dev_addr = htons(chan->dlci);
+
+ /* Set transmit buffer queue length */
+ dev->tx_queue_len = 100;
+
+ }else{
+
+ /* Setup the interface for Bridging */
+ int hw_addr=0;
+ ether_setup(dev);
+
+ /* Use a random number to generate the MAC address */
+ memcpy(dev->dev_addr, "\xFE\xFC\x00\x00\x00\x00", 6);
+ get_random_bytes(&hw_addr, sizeof(hw_addr));
+ *(int *)(dev->dev_addr + 2) += hw_addr;
+ }
+
+ /* Initialize hardware parameters (just for reference) */
+ dev->irq = wandev->irq;
+ dev->dma = wandev->dma;
+ dev->base_addr = wandev->ioport;
+ dev->mem_start = wandev->maddr;
+ dev->mem_end = wandev->maddr + wandev->msize - 1;
+ SET_MODULE_OWNER(dev);
+
+ return 0;
+}
+
+/*============================================================================
+ * Open network interface.
+ * o if this is the first open, then enable communications and interrupts.
+ * o prevent module from unloading by incrementing use count
+ *
+ * Return 0 if O.k. or errno.
+ */
+static int if_open(struct net_device* dev)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ int err = 0;
+ struct timeval tv;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Initialize the task queue */
+ chan->tq_working=0;
+
+ INIT_WORK(&chan->common.wanpipe_work, (void *)fr_bh, dev);
+
+ /* Allocate and initialize BH circular buffer */
+ chan->bh_head = kmalloc((sizeof(bh_data_t)*MAX_BH_BUFF),GFP_ATOMIC);
+ memset(chan->bh_head,0,(sizeof(bh_data_t)*MAX_BH_BUFF));
+ atomic_set(&chan->bh_buff_used, 0);
+
+ netif_start_queue(dev);
+
+ wanpipe_open(card);
+ do_gettimeofday( &tv );
+ chan->router_start_time = tv.tv_sec;
+
+ if (test_bit(0,&chan->config_dlci)){
+ trigger_config_fr (card);
+ }else if (chan->inarp == INARP_REQUEST){
+ trigger_fr_arp(dev);
+ }
+
+ return err;
+}
+
+/*============================================================================
+ * Close network interface.
+ * o if this is the last open, then disable communications and interrupts.
+ * o reset flags.
+ */
+static int if_close(struct net_device* dev)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+
+ if (chan->inarp == INARP_CONFIGURED) {
+ chan->inarp = INARP_REQUEST;
+ }
+
+ netif_stop_queue(dev);
+ wanpipe_close(card);
+
+ return 0;
+}
+
+/*============================================================================
+ * Re-build media header.
+ *
+ * Return: 1 physical address resolved.
+ * 0 physical address not resolved
+ */
+static int if_rebuild_hdr (struct sk_buff* skb)
+{
+ struct net_device *dev = skb->dev;
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+
+ printk(KERN_INFO "%s: rebuild_header() called for interface %s!\n",
+ card->devname, dev->name);
+ return 1;
+}
+
+/*============================================================================
+ * Handle transmit timeout event from netif watchdog
+ */
+static void if_tx_timeout(struct net_device *dev)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+
+ /* If our device stays busy for at least 5 seconds then we will
+ * kick start the device by making dev->tbusy = 0. We expect
+ * that our device never stays busy more than 5 seconds. So this
+ * is only used as a last resort.
+ */
+
+ chan->drvstats_if_send.if_send_tbusy++;
+ ++chan->ifstats.collisions;
+
+ printk (KERN_INFO "%s: Transmit timed out on %s\n",
+ card->devname, dev->name);
+ chan->drvstats_if_send.if_send_tbusy_timeout++;
+ netif_wake_queue (dev);
+
+}
+
+
+/*============================================================================
+ * Send a packet on a network interface.
+ * o set tbusy flag (marks start of the transmission) to block a timer-based
+ * transmit from overlapping.
+ * o set critical flag when accessing board.
+ * o check link state. If link is not up, then drop the packet.
+ * o check channel status. If it's down then initiate a call.
+ * o pass a packet to corresponding WAN device.
+ * o free socket buffer
+ *
+ * Return: 0 complete (socket buffer must be freed)
+ * non-0 packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ * bottom half" (with interrupts enabled).
+ *
+ * 2. Using netif_start_queue() and netif_stop_queue()
+ * will inhibit further transmit requests from the protocol stack
+ * and can be used for flow control with protocol layer.
+ */
+static int if_send(struct sk_buff* skb, struct net_device* dev)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ int err;
+ unsigned char *sendpacket;
+ fr508_flags_t* adptr_flags = card->flags;
+ int udp_type;
+ long delay_tx_queued = 0;
+ unsigned long smp_flags=0;
+ unsigned char attr = 0;
+
+ chan->drvstats_if_send.if_send_entry++;
+
+ netif_stop_queue(dev);
+
+ if (skb == NULL) {
+ /* if we get here, some higher layer thinks we've missed an
+ * tx-done interrupt.
+ */
+ printk(KERN_INFO "%s: interface %s got kicked!\n",
+ card->devname, dev->name);
+ chan->drvstats_if_send.if_send_skb_null ++;
+
+ netif_wake_queue(dev);
+ return 0;
+ }
+
+ /* If a peripheral task is running just drop packets */
+ if (test_bit(PERI_CRIT, &card->wandev.critical)){
+
+ printk(KERN_INFO "%s: Critical in if_send(): Peripheral running!\n",
+ card->devname);
+
+ dev_kfree_skb_any(skb);
+ netif_start_queue(dev);
+ return 0;
+ }
+
+ /* We must set the 'tbusy' flag if we already have a packet queued for
+ transmission in the transmit interrupt handler. However, we must
+ ensure that the transmit interrupt does not reset the 'tbusy' flag
+ just before we set it, as this will result in a "transmit timeout".
+ */
+ set_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical);
+ if(chan->transmit_length) {
+ netif_stop_queue(dev);
+ chan->tick_counter = jiffies;
+ clear_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical);
+ return 1;
+ }
+ clear_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical);
+
+ /* Move the if_header() code to here. By inserting frame
+ * relay header in if_header() we would break the
+ * tcpdump and other packet sniffers */
+ chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby);
+ if (chan->fr_header_len < 0 ){
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+
+ dev_kfree_skb_any(skb);
+ netif_start_queue(dev);
+ return 0;
+ }
+
+ sendpacket = skb->data;
+
+ udp_type = udp_pkt_type(skb, card);
+
+ if(udp_type != UDP_INVALID_TYPE) {
+ if(store_udp_mgmt_pkt(udp_type, UDP_PKT_FRM_STACK, card, skb,
+ chan->dlci)) {
+ adptr_flags->imask |= FR_INTR_TIMER;
+ if (udp_type == UDP_FPIPE_TYPE){
+ chan->drvstats_if_send.
+ if_send_PIPE_request ++;
+ }
+ }
+ netif_start_queue(dev);
+ return 0;
+ }
+
+ //FIXME: can we do better than sendpacket[2]?
+ if ((chan->common.usedby == WANPIPE) && (sendpacket[2] == 0x45)) {
+
+ /* check to see if the source IP address is a broadcast or */
+ /* multicast IP address */
+ if(chk_bcast_mcast_addr(card, dev, skb)){
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ netif_start_queue(dev);
+ return 0;
+ }
+ }
+
+
+ /* Lock the S514/S508 card: SMP Supported */
+ s508_s514_lock(card,&smp_flags);
+
+ if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
+
+ chan->drvstats_if_send.if_send_critical_non_ISR ++;
+ chan->ifstats.tx_dropped ++;
+ printk(KERN_INFO "%s Critical in IF_SEND: if_send() already running!\n",
+ card->devname);
+ goto if_send_start_and_exit;
+ }
+
+ /* API packet check: minimum packet size must be greater than
+ * 16 byte API header */
+ if((chan->common.usedby == API) && (skb->len <= sizeof(api_tx_hdr_t))) {
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+
+
+ goto if_send_start_and_exit;
+
+ }else{
+ /* During API transmission, get rid of the API header */
+ if (chan->common.usedby == API) {
+ api_tx_hdr_t* api_tx_hdr;
+ api_tx_hdr = (api_tx_hdr_t*)&skb->data[0x00];
+ attr = api_tx_hdr->attr;
+ skb_pull(skb,sizeof(api_tx_hdr_t));
+ }
+ }
+
+ if (card->wandev.state != WAN_CONNECTED) {
+ chan->drvstats_if_send.if_send_wan_disconnected ++;
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+
+ } else if (chan->common.state != WAN_CONNECTED) {
+ chan->drvstats_if_send.if_send_dlci_disconnected ++;
+
+ /* Update the DLCI state in timer interrupt */
+ card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UPDATE_STATE;
+ adptr_flags->imask |= FR_INTR_TIMER;
+
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+
+ } else if (!is_tx_ready(card, chan)) {
+ /* No tx buffers available, store for delayed transmit */
+ if (!setup_for_delayed_transmit(dev, skb)){
+ set_bit(1,&delay_tx_queued);
+ }
+ chan->drvstats_if_send.if_send_no_bfrs++;
+
+ } else if (!skb->protocol) {
+ /* No protocols drop packet */
+ chan->drvstats_if_send.if_send_protocol_error ++;
+ ++card->wandev.stats.tx_errors;
+
+ } else if (test_bit(ARP_CRIT,&card->wandev.critical)){
+ /* We are trying to send an ARP Packet, block IP data until
+ * ARP is sent */
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+
+ } else {
+ //FIXME: IPX is not implemented in this version of Frame Relay ?
+ if((chan->common.usedby == WANPIPE) &&
+ sendpacket[1] == 0x00 &&
+ sendpacket[2] == 0x80 &&
+ sendpacket[6] == 0x81 &&
+ sendpacket[7] == 0x37) {
+
+ if( chan->enable_IPX ) {
+ switch_net_numbers(sendpacket,
+ chan->network_number, 0);
+ } else {
+ //FIXME: Take this out when IPX is fixed
+ printk(KERN_INFO
+ "%s: WARNING: Unsupported IPX data in send, packet dropped\n",
+ card->devname);
+ }
+
+ }else{
+ err = fr_send_data_header(card, chan->dlci, attr, skb->len, skb->data, chan->fr_header_len);
+ if (err) {
+ switch(err) {
+ case FRRES_CIR_OVERFLOW:
+ case FRRES_BUFFER_OVERFLOW:
+ if (!setup_for_delayed_transmit(dev, skb)){
+ set_bit(1,&delay_tx_queued);
+ }
+ chan->drvstats_if_send.
+ if_send_adptr_bfrs_full ++;
+ break;
+
+ case FRRES_TOO_LONG:
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Error: Frame too long, transmission failed %i\n",
+ card->devname, (unsigned int)skb->len);
+ }
+ /* Drop down to default */
+ default:
+ chan->drvstats_if_send.
+ if_send_dlci_disconnected ++;
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ break;
+ }
+ } else {
+ chan->drvstats_if_send.
+ if_send_bfr_passed_to_adptr++;
+ ++chan->ifstats.tx_packets;
+ ++card->wandev.stats.tx_packets;
+
+ chan->ifstats.tx_bytes += skb->len;
+ card->wandev.stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ }
+ }
+ }
+
+if_send_start_and_exit:
+
+ netif_start_queue(dev);
+
+ /* If we queued the packet for transmission, we must not
+ * deallocate it. The packet is unlinked from the IP stack
+ * not copied. Therefore, we must keep the original packet */
+ if (!test_bit(1,&delay_tx_queued)) {
+ dev_kfree_skb_any(skb);
+ }else{
+ adptr_flags->imask |= FR_INTR_TXRDY;
+ card->u.f.tx_interrupts_pending ++;
+ }
+
+ clear_bit(SEND_CRIT, (void*)&card->wandev.critical);
+
+ s508_s514_unlock(card,&smp_flags);
+
+ return 0;
+}
+
+
+
+/*============================================================================
+ * Setup so that a frame can be transmitted on the occurrence of a transmit
+ * interrupt.
+ */
+static int setup_for_delayed_transmit(struct net_device* dev,
+ struct sk_buff *skb)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ fr_dlci_interface_t* dlci_interface;
+ int len = skb->len;
+
+ /* Check that the dlci is properly configured,
+ * before using tx interrupt */
+ if (!chan->dlci_int_interface){
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: ERROR on DLCI %i: Not configured properly !\n",
+ card->devname, chan->dlci);
+ printk(KERN_INFO "%s: Please contact Sangoma Technologies\n",
+ card->devname);
+ }
+ return 1;
+ }
+
+ dlci_interface = chan->dlci_int_interface;
+
+ if(chan->transmit_length) {
+ printk(KERN_INFO "%s: Big mess in setup_for_del...\n",
+ card->devname);
+ return 1;
+ }
+
+ if(len > FR_MAX_NO_DATA_BYTES_IN_FRAME) {
+ //FIXME: increment some statistic */
+ return 1;
+ }
+
+ skb_unlink(skb);
+
+ chan->transmit_length = len;
+ chan->delay_skb = skb;
+
+ dlci_interface->gen_interrupt |= FR_INTR_TXRDY;
+ dlci_interface->packet_length = len;
+
+ /* Turn on TX interrupt at the end of if_send */
+ return 0;
+}
+
+
+/*============================================================================
+ * Check to see if the packet to be transmitted contains a broadcast or
+ * multicast source IP address.
+ * Return 0 if not broadcast/multicast address, otherwise return 1.
+ */
+
+static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
+ struct sk_buff *skb)
+{
+ u32 src_ip_addr;
+ u32 broadcast_ip_addr = 0;
+ struct in_device *in_dev;
+ fr_channel_t* chan = dev->priv;
+
+ /* read the IP source address from the outgoing packet */
+ src_ip_addr = *(u32 *)(skb->data + 14);
+
+ /* read the IP broadcast address for the device */
+ in_dev = dev->ip_ptr;
+ if(in_dev != NULL) {
+ struct in_ifaddr *ifa= in_dev->ifa_list;
+ if(ifa != NULL)
+ broadcast_ip_addr = ifa->ifa_broadcast;
+ else
+ return 0;
+ }
+
+ /* check if the IP Source Address is a Broadcast address */
+ if((dev->flags & IFF_BROADCAST) && (src_ip_addr == broadcast_ip_addr)) {
+ printk(KERN_INFO
+ "%s: Broadcast Source Address silently discarded\n",
+ card->devname);
+ return 1;
+ }
+
+ /* check if the IP Source Address is a Multicast address */
+ if((chan->mc == WANOPT_NO) && (ntohl(src_ip_addr) >= 0xE0000001) &&
+ (ntohl(src_ip_addr) <= 0xFFFFFFFE)) {
+ printk(KERN_INFO
+ "%s: Multicast Source Address silently discarded\n",
+ card->devname);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*============================================================================
+ * Reply to UDP Management system.
+ * Return nothing.
+ */
+static int reply_udp( unsigned char *data, unsigned int mbox_len )
+{
+ unsigned short len, udp_length, temp, ip_length;
+ unsigned long ip_temp;
+ int even_bound = 0;
+
+
+ fr_udp_pkt_t *fr_udp_pkt = (fr_udp_pkt_t *)data;
+
+ /* Set length of packet */
+ len = //sizeof(fr_encap_hdr_t)+
+ sizeof(ip_pkt_t)+
+ sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ mbox_len;
+
+
+ /* fill in UDP reply */
+ fr_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
+
+ /* fill in UDP length */
+ udp_length = sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ mbox_len;
+
+
+ /* put it on an even boundary */
+ if ( udp_length & 0x0001 ) {
+ udp_length += 1;
+ len += 1;
+ even_bound = 1;
+ }
+
+ temp = (udp_length<<8)|(udp_length>>8);
+ fr_udp_pkt->udp_pkt.udp_length = temp;
+
+ /* swap UDP ports */
+ temp = fr_udp_pkt->udp_pkt.udp_src_port;
+ fr_udp_pkt->udp_pkt.udp_src_port =
+ fr_udp_pkt->udp_pkt.udp_dst_port;
+ fr_udp_pkt->udp_pkt.udp_dst_port = temp;
+
+
+
+ /* add UDP pseudo header */
+ temp = 0x1100;
+ *((unsigned short *)
+ (fr_udp_pkt->data+mbox_len+even_bound)) = temp;
+ temp = (udp_length<<8)|(udp_length>>8);
+ *((unsigned short *)
+ (fr_udp_pkt->data+mbox_len+even_bound+2)) = temp;
+
+ /* calculate UDP checksum */
+ fr_udp_pkt->udp_pkt.udp_checksum = 0;
+
+ fr_udp_pkt->udp_pkt.udp_checksum =
+ calc_checksum(&data[UDP_OFFSET/*+sizeof(fr_encap_hdr_t)*/],
+ udp_length+UDP_OFFSET);
+
+ /* fill in IP length */
+ ip_length = udp_length + sizeof(ip_pkt_t);
+ temp = (ip_length<<8)|(ip_length>>8);
+ fr_udp_pkt->ip_pkt.total_length = temp;
+
+ /* swap IP addresses */
+ ip_temp = fr_udp_pkt->ip_pkt.ip_src_address;
+ fr_udp_pkt->ip_pkt.ip_src_address =
+ fr_udp_pkt->ip_pkt.ip_dst_address;
+ fr_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
+
+
+ /* fill in IP checksum */
+ fr_udp_pkt->ip_pkt.hdr_checksum = 0;
+ fr_udp_pkt->ip_pkt.hdr_checksum =
+ calc_checksum(&data[/*sizeof(fr_encap_hdr_t)*/0],
+ sizeof(ip_pkt_t));
+
+ return len;
+} /* reply_udp */
+
+unsigned short calc_checksum (char *data, int len)
+{
+ unsigned short temp;
+ unsigned long sum=0;
+ int i;
+
+ for( i = 0; i <len; i+=2 ) {
+ memcpy(&temp,&data[i],2);
+ sum += (unsigned long)temp;
+ }
+
+ while (sum >> 16 ) {
+ sum = (sum & 0xffffUL) + (sum >> 16);
+ }
+
+ temp = (unsigned short)sum;
+ temp = ~temp;
+
+ if( temp == 0 )
+ temp = 0xffff;
+
+ return temp;
+}
+
+/*
+ If incoming is 0 (outgoing)- if the net numbers is ours make it 0
+ if incoming is 1 - if the net number is 0 make it ours
+
+*/
+static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number, unsigned char incoming)
+{
+ unsigned long pnetwork_number;
+
+ pnetwork_number = (unsigned long)((sendpacket[14] << 24) +
+ (sendpacket[15] << 16) + (sendpacket[16] << 8) +
+ sendpacket[17]);
+
+ if (!incoming) {
+ /* If the destination network number is ours, make it 0 */
+ if( pnetwork_number == network_number) {
+ sendpacket[14] = sendpacket[15] = sendpacket[16] =
+ sendpacket[17] = 0x00;
+ }
+ } else {
+ /* If the incoming network is 0, make it ours */
+ if( pnetwork_number == 0) {
+ sendpacket[14] = (unsigned char)(network_number >> 24);
+ sendpacket[15] = (unsigned char)((network_number &
+ 0x00FF0000) >> 16);
+ sendpacket[16] = (unsigned char)((network_number &
+ 0x0000FF00) >> 8);
+ sendpacket[17] = (unsigned char)(network_number &
+ 0x000000FF);
+ }
+ }
+
+
+ pnetwork_number = (unsigned long)((sendpacket[26] << 24) +
+ (sendpacket[27] << 16) + (sendpacket[28] << 8) +
+ sendpacket[29]);
+
+ if( !incoming ) {
+ /* If the source network is ours, make it 0 */
+ if( pnetwork_number == network_number) {
+ sendpacket[26] = sendpacket[27] = sendpacket[28] =
+ sendpacket[29] = 0x00;
+ }
+ } else {
+ /* If the source network is 0, make it ours */
+ if( pnetwork_number == 0 ) {
+ sendpacket[26] = (unsigned char)(network_number >> 24);
+ sendpacket[27] = (unsigned char)((network_number &
+ 0x00FF0000) >> 16);
+ sendpacket[28] = (unsigned char)((network_number &
+ 0x0000FF00) >> 8);
+ sendpacket[29] = (unsigned char)(network_number &
+ 0x000000FF);
+ }
+ }
+} /* switch_net_numbers */
+
+/*============================================================================
+ * Get ethernet-style interface statistics.
+ * Return a pointer to struct enet_statistics.
+ */
+static struct net_device_stats *if_stats(struct net_device *dev)
+{
+ fr_channel_t* chan = dev->priv;
+
+ if(chan == NULL)
+ return NULL;
+
+ return &chan->ifstats;
+}
+
+/****** Interrupt Handlers **************************************************/
+
+/*============================================================================
+ * fr_isr: S508 frame relay interrupt service routine.
+ *
+ * Description:
+ * Frame relay main interrupt service route. This
+ * function check the interrupt type and takes
+ * the appropriate action.
+ */
+static void fr_isr (sdla_t* card)
+{
+ fr508_flags_t* flags = card->flags;
+ char *ptr = &flags->iflag;
+ int i,err;
+ fr_mbox_t* mbox = card->mbox;
+
+ /* This flag prevents nesting of interrupts. See sdla_isr() routine
+ * in sdlamain.c. */
+ card->in_isr = 1;
+
+ ++card->statistics.isr_entry;
+
+
+ /* All peripheral (configuraiton, re-configuration) events
+ * take presidence over the ISR. Thus, retrigger */
+ if (test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
+ ++card->statistics.isr_already_critical;
+ goto fr_isr_exit;
+ }
+
+ if(card->hw.type != SDLA_S514) {
+ if (test_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
+ printk(KERN_INFO "%s: Critical while in ISR: If Send Running!\n",
+ card->devname);
+ ++card->statistics.isr_already_critical;
+ goto fr_isr_exit;
+ }
+ }
+
+ switch (flags->iflag) {
+
+ case FR_INTR_RXRDY: /* receive interrupt */
+ ++card->statistics.isr_rx;
+ rx_intr(card);
+ break;
+
+
+ case FR_INTR_TXRDY: /* transmit interrupt */
+ ++ card->statistics.isr_tx;
+ tx_intr(card);
+ break;
+
+ case FR_INTR_READY:
+ Intr_test_counter++;
+ ++card->statistics.isr_intr_test;
+ break;
+
+ case FR_INTR_DLC: /* Event interrupt occurred */
+ mbox->cmd.command = FR_READ_STATUS;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err)
+ fr_event(card, err, mbox);
+ break;
+
+ case FR_INTR_TIMER: /* Timer interrupt */
+ timer_intr(card);
+ break;
+
+ default:
+ ++card->statistics.isr_spurious;
+ spur_intr(card);
+ printk(KERN_INFO "%s: Interrupt Type 0x%02X!\n",
+ card->devname, flags->iflag);
+
+ printk(KERN_INFO "%s: ID Bytes = ",card->devname);
+ for(i = 0; i < 8; i ++)
+ printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
+ printk(KERN_INFO "\n");
+
+ break;
+ }
+
+fr_isr_exit:
+
+ card->in_isr = 0;
+ flags->iflag = 0;
+ return;
+}
+
+
+
+/*===========================================================
+ * rx_intr Receive interrupt handler.
+ *
+ * Description
+ * Upon receiveing an interrupt:
+ * 1. Check that the firmware is in sync with
+ * the driver.
+ * 2. Find an appropriate network interface
+ * based on the received dlci number.
+ * 3. Check that the netowrk interface exists
+ * and that it's setup properly.
+ * 4. Copy the data into an skb buffer.
+ * 5. Check the packet type and take
+ * appropriate acton: UPD, API, ARP or Data.
+ */
+
+static void rx_intr (sdla_t* card)
+{
+ fr_rx_buf_ctl_t* frbuf = card->rxmb;
+ fr508_flags_t* flags = card->flags;
+ fr_channel_t* chan;
+ char *ptr = &flags->iflag;
+ struct sk_buff* skb;
+ struct net_device* dev;
+ void* buf;
+ unsigned dlci, len, offs, len_incl_hdr;
+ int i, udp_type;
+
+
+ /* Check that firmware buffers are in sync */
+ if (frbuf->flag != 0x01) {
+
+ printk(KERN_INFO
+ "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
+ card->devname, (unsigned)frbuf, frbuf->flag);
+
+ printk(KERN_INFO "%s: ID Bytes = ",card->devname);
+ for(i = 0; i < 8; i ++)
+ printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
+ printk(KERN_INFO "\n");
+
+ ++card->statistics.rx_intr_corrupt_rx_bfr;
+
+ /* Bug Fix: Mar 6 2000
+ * If we get a corrupted mailbox, it means that driver
+ * is out of sync with the firmware. There is no recovery.
+ * If we don't turn off all interrupts for this card
+ * the machine will crash.
+ */
+ printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
+ printk(KERN_INFO "Please contact Sangoma Technologies !\n");
+ fr_set_intr_mode(card, 0, 0, 0);
+ return;
+ }
+
+ len = frbuf->length;
+ dlci = frbuf->dlci;
+ offs = frbuf->offset;
+
+ /* Find the network interface for this packet */
+ dev = find_channel(card, dlci);
+
+
+ /* Check that the network interface is active and
+ * properly setup */
+ if (dev == NULL) {
+ if( net_ratelimit()) {
+ printk(KERN_INFO "%s: received data on unconfigured DLCI %d!\n",
+ card->devname, dlci);
+ }
+ ++card->statistics.rx_intr_on_orphaned_DLCI;
+ ++card->wandev.stats.rx_dropped;
+ goto rx_done;
+ }
+
+ if ((chan = dev->priv) == NULL){
+ if( net_ratelimit()) {
+ printk(KERN_INFO "%s: received data on unconfigured DLCI %d!\n",
+ card->devname, dlci);
+ }
+ ++card->statistics.rx_intr_on_orphaned_DLCI;
+ ++card->wandev.stats.rx_dropped;
+ goto rx_done;
+ }
+
+ skb = dev_alloc_skb(len);
+
+ if (!netif_running(dev) || (skb == NULL)){
+
+ ++chan->ifstats.rx_dropped;
+
+ if(skb == NULL) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO
+ "%s: no socket buffers available!\n",
+ card->devname);
+ }
+ chan->drvstats_rx_intr.rx_intr_no_socket ++;
+ }
+
+ if (!netif_running(dev)){
+ chan->drvstats_rx_intr.
+ rx_intr_dev_not_started ++;
+ if (skb){
+ dev_kfree_skb_any(skb);
+ }
+ }
+ goto rx_done;
+ }
+
+ /* Copy data from the board into the socket buffer */
+ if ((offs + len) > card->u.f.rx_top + 1) {
+ unsigned tmp = card->u.f.rx_top - offs + 1;
+
+ buf = skb_put(skb, tmp);
+ sdla_peek(&card->hw, offs, buf, tmp);
+ offs = card->u.f.rx_base;
+ len -= tmp;
+ }
+
+ buf = skb_put(skb, len);
+ sdla_peek(&card->hw, offs, buf, len);
+
+
+ /* We got the packet from the bard.
+ * Check the packet type and take appropriate action */
+
+ udp_type = udp_pkt_type( skb, card );
+
+ if(udp_type != UDP_INVALID_TYPE) {
+
+ /* UDP Debug packet received, store the
+ * packet and handle it in timer interrupt */
+
+ skb_pull(skb, 1);
+ if (wanrouter_type_trans(skb, dev)){
+ if(store_udp_mgmt_pkt(udp_type,UDP_PKT_FRM_NETWORK,card,skb,dlci)){
+
+ flags->imask |= FR_INTR_TIMER;
+
+ if (udp_type == UDP_FPIPE_TYPE){
+ ++chan->drvstats_rx_intr.rx_intr_PIPE_request;
+ }
+ }
+ }
+
+ }else if (chan->common.usedby == API) {
+
+ /* We are in API mode.
+ * Add an API header to the RAW packet
+ * and queue it into a circular buffer.
+ * Then kick the fr_bh() bottom half handler */
+
+ api_rx_hdr_t* api_rx_hdr;
+ chan->drvstats_rx_intr.rx_intr_bfr_passed_to_stack ++;
+ chan->ifstats.rx_packets ++;
+ card->wandev.stats.rx_packets ++;
+
+ chan->ifstats.rx_bytes += skb->len;
+ card->wandev.stats.rx_bytes += skb->len;
+
+ skb_push(skb, sizeof(api_rx_hdr_t));
+ api_rx_hdr = (api_rx_hdr_t*)&skb->data[0x00];
+ api_rx_hdr->attr = frbuf->attr;
+ api_rx_hdr->time_stamp = frbuf->tmstamp;
+
+ skb->protocol = htons(ETH_P_IP);
+ skb->mac.raw = skb->data;
+ skb->dev = dev;
+ skb->pkt_type = WAN_PACKET_DATA;
+
+ bh_enqueue(dev, skb);
+
+ trigger_fr_bh(chan);
+
+ }else if (handle_IPXWAN(skb->data,chan->name,chan->enable_IPX, chan->network_number)){
+
+ //FIXME: Frame Relay IPX is not supported, Yet !
+ //if (chan->enable_IPX) {
+ // fr_send(card, dlci, 0, skb->len,skb->data);
+ //}
+ dev_kfree_skb_any(skb);
+
+ } else if (is_arp(skb->data)) {
+
+ /* ARP support enabled Mar 16 2000
+ * Process incoming ARP reply/request, setup
+ * dynamic routes. */
+
+ if (process_ARP((arphdr_1490_t *)skb->data, card, dev)) {
+ if (net_ratelimit()){
+ printk (KERN_INFO
+ "%s: Error processing ARP Packet.\n",
+ card->devname);
+ }
+ }
+ dev_kfree_skb_any(skb);
+
+ } else if (skb->data[0] != 0x03) {
+
+ if (net_ratelimit()) {
+ printk(KERN_INFO "%s: Non IETF packet discarded.\n",
+ card->devname);
+ }
+ dev_kfree_skb_any(skb);
+
+ } else {
+
+ len_incl_hdr = skb->len;
+ /* Decapsulate packet and pass it up the
+ protocol stack */
+ skb->dev = dev;
+
+ if (chan->common.usedby == BRIDGE || chan->common.usedby == BRIDGE_NODE){
+
+ /* Make sure it's an Ethernet frame, otherwise drop it */
+ if (!memcmp(skb->data, "\x03\x00\x80\x00\x80\xC2\x00\x07", 8)) {
+ skb_pull(skb, 8);
+ skb->protocol=eth_type_trans(skb,dev);
+ }else{
+ ++chan->drvstats_rx_intr.rx_intr_bfr_not_passed_to_stack;
+ ++chan->ifstats.rx_errors;
+ ++card->wandev.stats.rx_errors;
+ goto rx_done;
+ }
+ }else{
+
+ /* remove hardware header */
+ buf = skb_pull(skb, 1);
+
+ if (!wanrouter_type_trans(skb, dev)) {
+
+ /* can't decapsulate packet */
+ dev_kfree_skb_any(skb);
+
+ ++chan->drvstats_rx_intr.rx_intr_bfr_not_passed_to_stack;
+ ++chan->ifstats.rx_errors;
+ ++card->wandev.stats.rx_errors;
+ goto rx_done;
+ }
+ skb->mac.raw = skb->data;
+ }
+
+
+ /* Send a packet up the IP stack */
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ ++chan->drvstats_rx_intr.rx_intr_bfr_passed_to_stack;
+ ++chan->ifstats.rx_packets;
+ ++card->wandev.stats.rx_packets;
+
+ chan->ifstats.rx_bytes += len_incl_hdr;
+ card->wandev.stats.rx_bytes += len_incl_hdr;
+ }
+
+rx_done:
+
+ /* Release buffer element and calculate a pointer to the next one */
+ frbuf->flag = 0;
+ card->rxmb = ++frbuf;
+ if ((void*)frbuf > card->u.f.rxmb_last)
+ card->rxmb = card->u.f.rxmb_base;
+
+}
+
+/*==================================================================
+ * tx_intr: Transmit interrupt handler.
+ *
+ * Rationale:
+ * If the board is busy transmitting, if_send() will
+ * buffers a single packet and turn on
+ * the tx interrupt. Tx interrupt will be called
+ * by the board, once the firmware can send more
+ * data. Thus, no polling is required.
+ *
+ * Description:
+ * Tx interrupt is called for each
+ * configured dlci channel. Thus:
+ * 1. Obtain the netowrk interface based on the
+ * dlci number.
+ * 2. Check that network interface is up and
+ * properly setup.
+ * 3. Check for a buffered packet.
+ * 4. Transmit the packet.
+ * 5. If we are in WANPIPE mode, mark the
+ * NET_BH handler.
+ * 6. If we are in API mode, kick
+ * the AF_WANPIPE socket for more data.
+ *
+ */
+static void tx_intr(sdla_t *card)
+{
+ fr508_flags_t* flags = card->flags;
+ fr_tx_buf_ctl_t* bctl;
+ struct net_device* dev;
+ fr_channel_t* chan;
+
+ if(card->hw.type == SDLA_S514){
+ bctl = (void*)(flags->tse_offs + card->hw.dpmbase);
+ }else{
+ bctl = (void*)(flags->tse_offs - FR_MB_VECTOR +
+ card->hw.dpmbase);
+ }
+
+ /* Find the structure and make it unbusy */
+ dev = find_channel(card, flags->dlci);
+ if (dev == NULL){
+ printk(KERN_INFO "NO DEV IN TX Interrupt\n");
+ goto end_of_tx_intr;
+ }
+
+ if ((chan = dev->priv) == NULL){
+ printk(KERN_INFO "NO CHAN IN TX Interrupt\n");
+ goto end_of_tx_intr;
+ }
+
+ if(!chan->transmit_length || !chan->delay_skb) {
+ printk(KERN_INFO "%s: tx int error - transmit length zero\n",
+ card->wandev.name);
+ goto end_of_tx_intr;
+ }
+
+ /* If the 'if_send()' procedure is currently checking the 'tbusy'
+ status, then we cannot transmit. Instead, we configure the microcode
+ so as to re-issue this transmit interrupt at a later stage.
+ */
+ if (test_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical)) {
+
+ fr_dlci_interface_t* dlci_interface = chan->dlci_int_interface;
+ bctl->flag = 0xA0;
+ dlci_interface->gen_interrupt |= FR_INTR_TXRDY;
+ return;
+
+ }else{
+ bctl->dlci = flags->dlci;
+ bctl->length = chan->transmit_length+chan->fr_header_len;
+ sdla_poke(&card->hw,
+ fr_send_hdr(card,bctl->dlci,bctl->offset),
+ chan->delay_skb->data,
+ chan->delay_skb->len);
+ bctl->flag = 0xC0;
+
+ ++chan->ifstats.tx_packets;
+ ++card->wandev.stats.tx_packets;
+ chan->ifstats.tx_bytes += chan->transmit_length;
+ card->wandev.stats.tx_bytes += chan->transmit_length;
+
+ /* We must free an sk buffer, which we used
+ * for delayed transmission; Otherwise, the sock
+ * will run out of memory */
+ dev_kfree_skb_any(chan->delay_skb);
+
+ chan->delay_skb = NULL;
+ chan->transmit_length = 0;
+
+ dev->trans_start = jiffies;
+
+ if (netif_queue_stopped(dev)){
+ /* If using API, than wakeup socket BH handler */
+ if (chan->common.usedby == API){
+ netif_start_queue(dev);
+ wakeup_sk_bh(dev);
+ }else{
+ netif_wake_queue(dev);
+ }
+ }
+ }
+
+end_of_tx_intr:
+
+ /* if any other interfaces have transmit interrupts pending,
+ * do not disable the global transmit interrupt */
+ if(!(-- card->u.f.tx_interrupts_pending))
+ flags->imask &= ~FR_INTR_TXRDY;
+
+
+}
+
+
+/*============================================================================
+ * timer_intr: Timer interrupt handler.
+ *
+ * Rationale:
+ * All commans must be executed within the timer
+ * interrupt since no two commands should execute
+ * at the same time.
+ *
+ * Description:
+ * The timer interrupt is used to:
+ * 1. Processing udp calls from 'fpipemon'.
+ * 2. Processing update calls from /proc file system
+ * 3. Reading board-level statistics for
+ * updating the proc file system.
+ * 4. Sending inverse ARP request packets.
+ * 5. Configure a dlci/channel.
+ * 6. Unconfigure a dlci/channel. (Node only)
+ */
+
+static void timer_intr(sdla_t *card)
+{
+ fr508_flags_t* flags = card->flags;
+
+ /* UDP Debuging: fpipemon call */
+ if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UDP) {
+ if(card->u.f.udp_type == UDP_FPIPE_TYPE) {
+ if(process_udp_mgmt_pkt(card)) {
+ card->u.f.timer_int_enabled &=
+ ~TMR_INT_ENABLED_UDP;
+ }
+ }
+ }
+
+ /* /proc update call : triggered from update() */
+ if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UPDATE) {
+ fr_get_err_stats(card);
+ fr_get_stats(card);
+ card->u.f.update_comms_stats = 0;
+ card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE;
+ }
+
+ /* Update the channel state call. This is call is
+ * triggered by if_send() function */
+ if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UPDATE_STATE){
+ struct net_device *dev;
+ if (card->wandev.state == WAN_CONNECTED){
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)){
+ fr_channel_t *chan = dev->priv;
+ if (chan->common.state != WAN_CONNECTED){
+ update_chan_state(dev);
+ }
+ }
+ }
+ card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE_STATE;
+ }
+
+ /* configure a dlci/channel */
+ if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_CONFIG){
+ config_fr(card);
+ card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_CONFIG;
+ }
+
+ /* unconfigure a dlci/channel */
+ if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UNCONFIG){
+ unconfig_fr(card);
+ card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UNCONFIG;
+ }
+
+
+ /* Transmit ARP packets */
+ if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_ARP){
+ int i=0;
+ struct net_device *dev;
+
+ if (card->u.f.arp_dev == NULL)
+ card->u.f.arp_dev = card->wandev.dev;
+
+ dev = card->u.f.arp_dev;
+
+ for (;;){
+
+ fr_channel_t *chan = dev->priv;
+
+ /* If the interface is brought down cancel sending In-ARPs */
+ if (!(dev->flags&IFF_UP)){
+ clear_bit(0,&chan->inarp_ready);
+ }
+
+ if (test_bit(0,&chan->inarp_ready)){
+
+ if (check_tx_status(card,dev)){
+ set_bit(ARP_CRIT,&card->wandev.critical);
+ break;
+ }
+
+ if (!send_inarp_request(card,dev)){
+ trigger_fr_arp(dev);
+ chan->inarp_tick = jiffies;
+ }
+
+ clear_bit(0,&chan->inarp_ready);
+ dev = move_dev_to_next(card,dev);
+ break;
+ }
+ dev = move_dev_to_next(card,dev);
+
+ if (++i == card->wandev.new_if_cnt){
+ card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_ARP;
+ break;
+ }
+ }
+ card->u.f.arp_dev = dev;
+ }
+
+ if(!card->u.f.timer_int_enabled)
+ flags->imask &= ~FR_INTR_TIMER;
+}
+
+
+/*============================================================================
+ * spur_intr: Spurious interrupt handler.
+ *
+ * Description:
+ * We don't know this interrupt.
+ * Print a warning.
+ */
+
+static void spur_intr (sdla_t* card)
+{
+ if (net_ratelimit()){
+ printk(KERN_INFO "%s: spurious interrupt!\n", card->devname);
+ }
+}
+
+
+//FIXME: Fix the IPX in next version
+/*===========================================================================
+ * Return 0 for non-IPXWAN packet
+ * 1 for IPXWAN packet or IPX is not enabled!
+ * FIXME: Use a IPX structure here not offsets
+ */
+static int handle_IPXWAN(unsigned char *sendpacket,
+ char *devname, unsigned char enable_IPX,
+ unsigned long network_number)
+{
+ int i;
+
+ if( sendpacket[1] == 0x00 && sendpacket[2] == 0x80 &&
+ sendpacket[6] == 0x81 && sendpacket[7] == 0x37) {
+
+ /* It's an IPX packet */
+ if (!enable_IPX){
+ /* Return 1 so we don't pass it up the stack. */
+ //FIXME: Take this out when IPX is fixed
+ if (net_ratelimit()){
+ printk (KERN_INFO
+ "%s: WARNING: Unsupported IPX packet received and dropped\n",
+ devname);
+ }
+ return 1;
+ }
+ } else {
+ /* It's not IPX so return and pass it up the stack. */
+ return 0;
+ }
+
+ if( sendpacket[24] == 0x90 && sendpacket[25] == 0x04){
+ /* It's IPXWAN */
+
+ if( sendpacket[10] == 0x02 && sendpacket[42] == 0x00){
+
+ /* It's a timer request packet */
+ printk(KERN_INFO "%s: Received IPXWAN Timer Request packet\n",
+ devname);
+
+ /* Go through the routing options and answer no to every
+ * option except Unnumbered RIP/SAP
+ */
+ for(i = 49; sendpacket[i] == 0x00; i += 5){
+ /* 0x02 is the option for Unnumbered RIP/SAP */
+ if( sendpacket[i + 4] != 0x02){
+ sendpacket[i + 1] = 0;
+ }
+ }
+
+ /* Skip over the extended Node ID option */
+ if( sendpacket[i] == 0x04 ){
+ i += 8;
+ }
+
+ /* We also want to turn off all header compression opt.
+ */
+ for(; sendpacket[i] == 0x80 ;){
+ sendpacket[i + 1] = 0;
+ i += (sendpacket[i + 2] << 8) + (sendpacket[i + 3]) + 4;
+ }
+
+ /* Set the packet type to timer response */
+ sendpacket[42] = 0x01;
+
+ printk(KERN_INFO "%s: Sending IPXWAN Timer Response\n",
+ devname);
+
+ } else if( sendpacket[42] == 0x02 ){
+
+ /* This is an information request packet */
+ printk(KERN_INFO
+ "%s: Received IPXWAN Information Request packet\n",
+ devname);
+
+ /* Set the packet type to information response */
+ sendpacket[42] = 0x03;
+
+ /* Set the router name */
+ sendpacket[59] = 'F';
+ sendpacket[60] = 'P';
+ sendpacket[61] = 'I';
+ sendpacket[62] = 'P';
+ sendpacket[63] = 'E';
+ sendpacket[64] = '-';
+ sendpacket[65] = CVHexToAscii(network_number >> 28);
+ sendpacket[66] = CVHexToAscii((network_number & 0x0F000000)>> 24);
+ sendpacket[67] = CVHexToAscii((network_number & 0x00F00000)>> 20);
+ sendpacket[68] = CVHexToAscii((network_number & 0x000F0000)>> 16);
+ sendpacket[69] = CVHexToAscii((network_number & 0x0000F000)>> 12);
+ sendpacket[70] = CVHexToAscii((network_number & 0x00000F00)>> 8);
+ sendpacket[71] = CVHexToAscii((network_number & 0x000000F0)>> 4);
+ sendpacket[72] = CVHexToAscii(network_number & 0x0000000F);
+ for(i = 73; i < 107; i+= 1)
+ {
+ sendpacket[i] = 0;
+ }
+
+ printk(KERN_INFO "%s: Sending IPXWAN Information Response packet\n",
+ devname);
+ } else {
+
+ printk(KERN_INFO "%s: Unknown IPXWAN packet!\n",devname);
+ return 0;
+ }
+
+ /* Set the WNodeID to our network address */
+ sendpacket[43] = (unsigned char)(network_number >> 24);
+ sendpacket[44] = (unsigned char)((network_number & 0x00FF0000) >> 16);
+ sendpacket[45] = (unsigned char)((network_number & 0x0000FF00) >> 8);
+ sendpacket[46] = (unsigned char)(network_number & 0x000000FF);
+
+ return 1;
+ }
+
+ /* If we get here, it's an IPX-data packet so it'll get passed up the
+ * stack.
+ * switch the network numbers
+ */
+ switch_net_numbers(sendpacket, network_number ,1);
+ return 0;
+}
+/*============================================================================
+ * process_route
+ *
+ * Rationale:
+ * If the interface goes down, or we receive an ARP request,
+ * we have to change the network interface ip addresses.
+ * This cannot be done within the interrupt.
+ *
+ * Description:
+ *
+ * This routine is called as a polling routine to dynamically
+ * add/delete routes negotiated by inverse ARP. It is in this
+ * "task" because we don't want routes to be added while in
+ * interrupt context.
+ *
+ * Usage:
+ * This function is called by fr_poll() polling funtion.
+ */
+
+static void process_route(struct net_device *dev)
+{
+ fr_channel_t *chan = dev->priv;
+ sdla_t *card = chan->card;
+
+ struct ifreq if_info;
+ struct sockaddr_in *if_data;
+ mm_segment_t fs = get_fs();
+ u32 ip_tmp;
+ int err;
+
+
+ switch(chan->route_flag){
+
+ case ADD_ROUTE:
+
+ /* Set remote addresses */
+ memset(&if_info, 0, sizeof(if_info));
+ strcpy(if_info.ifr_name, dev->name);
+
+ set_fs(get_ds()); /* get user space block */
+
+ if_data = (struct sockaddr_in *)&if_info.ifr_dstaddr;
+ if_data->sin_addr.s_addr = chan->ip_remote;
+ if_data->sin_family = AF_INET;
+ err = devinet_ioctl( SIOCSIFDSTADDR, &if_info );
+
+ set_fs(fs); /* restore old block */
+
+ if (err) {
+ printk(KERN_INFO
+ "%s: Route Add failed. Error: %d\n",
+ card->devname,err);
+ printk(KERN_INFO "%s: Address: %u.%u.%u.%u\n",
+ chan->name, NIPQUAD(chan->ip_remote));
+
+ }else {
+ printk(KERN_INFO "%s: Route Added Successfully: %u.%u.%u.%u\n",
+ card->devname,NIPQUAD(chan->ip_remote));
+ chan->route_flag = ROUTE_ADDED;
+ }
+ break;
+
+ case REMOVE_ROUTE:
+
+ /* Set remote addresses */
+ memset(&if_info, 0, sizeof(if_info));
+ strcpy(if_info.ifr_name, dev->name);
+
+ ip_tmp = get_ip_address(dev,WAN_POINTOPOINT_IP);
+
+ set_fs(get_ds()); /* get user space block */
+
+ if_data = (struct sockaddr_in *)&if_info.ifr_dstaddr;
+ if_data->sin_addr.s_addr = 0;
+ if_data->sin_family = AF_INET;
+ err = devinet_ioctl( SIOCSIFDSTADDR, &if_info );
+
+ set_fs(fs);
+
+ if (err) {
+ printk(KERN_INFO
+ "%s: Deleting of route failed. Error: %d\n",
+ card->devname,err);
+ printk(KERN_INFO "%s: Address: %u.%u.%u.%u\n",
+ dev->name,NIPQUAD(chan->ip_remote) );
+
+ } else {
+ printk(KERN_INFO "%s: Route Removed Sucessfuly: %u.%u.%u.%u\n",
+ card->devname,NIPQUAD(ip_tmp));
+ chan->route_flag = NO_ROUTE;
+ }
+ break;
+
+ } /* Case Statement */
+
+}
+
+
+
+/****** Frame Relay Firmware-Specific Functions *****************************/
+
+/*============================================================================
+ * Read firmware code version.
+ * o fill string str with firmware version info.
+ */
+static int fr_read_version (sdla_t* card, char* str)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ mbox->cmd.command = FR_READ_CODE_VERSION;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ if (!err && str) {
+ int len = mbox->cmd.length;
+ memcpy(str, mbox->data, len);
+ str[len] = '\0';
+ }
+ return err;
+}
+
+/*============================================================================
+ * Set global configuration.
+ */
+static int fr_configure (sdla_t* card, fr_conf_t *conf)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int dlci_num = card->u.f.dlci_num;
+ int err, i;
+
+ do
+ {
+ memcpy(mbox->data, conf, sizeof(fr_conf_t));
+
+ if (dlci_num) for (i = 0; i < dlci_num; ++i)
+ ((fr_conf_t*)mbox->data)->dlci[i] =
+ card->u.f.node_dlci[i];
+
+ mbox->cmd.command = FR_SET_CONFIG;
+ mbox->cmd.length =
+ sizeof(fr_conf_t) + dlci_num * sizeof(short);
+
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ /*NC Oct 12 2000 */
+ if (err != CMD_OK){
+ printk(KERN_ERR "%s: Frame Relay Configuration Failed: rc=0x%x\n",
+ card->devname,err);
+ }
+
+ return err;
+}
+
+/*============================================================================
+ * Set DLCI configuration.
+ */
+static int fr_dlci_configure (sdla_t* card, fr_dlc_conf_t *conf, unsigned dlci)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memcpy(mbox->data, conf, sizeof(fr_dlc_conf_t));
+ mbox->cmd.dlci = (unsigned short) dlci;
+ mbox->cmd.command = FR_SET_CONFIG;
+ mbox->cmd.length = sizeof(fr_dlc_conf_t);
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry--);
+
+ return err;
+}
+/*============================================================================
+ * Set interrupt mode.
+ */
+static int fr_set_intr_mode (sdla_t* card, unsigned mode, unsigned mtu,
+ unsigned short timeout)
+{
+ fr_mbox_t* mbox = card->mbox;
+ fr508_intr_ctl_t* ictl = (void*)mbox->data;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(ictl, 0, sizeof(fr508_intr_ctl_t));
+ ictl->mode = mode;
+ ictl->tx_len = mtu;
+ ictl->irq = card->hw.irq;
+
+ /* indicate timeout on timer */
+ if (mode & 0x20) ictl->timeout = timeout;
+
+ mbox->cmd.length = sizeof(fr508_intr_ctl_t);
+ mbox->cmd.command = FR_SET_INTR_MODE;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ return err;
+}
+
+/*============================================================================
+ * Enable communications.
+ */
+static int fr_comm_enable (sdla_t* card)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ mbox->cmd.command = FR_COMM_ENABLE;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ return err;
+}
+
+/*============================================================================
+ * fr_comm_disable
+ *
+ * Warning: This functin is called by the shutdown() procedure. It is void
+ * since dev->priv are has already been deallocated and no
+ * error checking is possible using fr_event() function.
+ */
+static void fr_comm_disable (sdla_t* card)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do {
+ mbox->cmd.command = FR_SET_MODEM_STATUS;
+ mbox->cmd.length = 1;
+ mbox->data[0] = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry--);
+
+ retry = MAX_CMD_RETRY;
+
+ do
+ {
+ mbox->cmd.command = FR_COMM_DISABLE;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry--);
+
+ return;
+}
+
+
+
+/*============================================================================
+ * Get communications error statistics.
+ */
+static int fr_get_err_stats (sdla_t* card)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+
+ do
+ {
+ mbox->cmd.command = FR_READ_ERROR_STATS;
+ mbox->cmd.length = 0;
+ mbox->cmd.dlci = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ if (!err) {
+ fr_comm_stat_t* stats = (void*)mbox->data;
+ card->wandev.stats.rx_over_errors = stats->rx_overruns;
+ card->wandev.stats.rx_crc_errors = stats->rx_bad_crc;
+ card->wandev.stats.rx_missed_errors = stats->rx_aborts;
+ card->wandev.stats.rx_length_errors = stats->rx_too_long;
+ card->wandev.stats.tx_aborted_errors = stats->tx_aborts;
+
+ }
+
+ return err;
+}
+
+/*============================================================================
+ * Get statistics.
+ */
+static int fr_get_stats (sdla_t* card)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+
+ do
+ {
+ mbox->cmd.command = FR_READ_STATISTICS;
+ mbox->cmd.length = 0;
+ mbox->cmd.dlci = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ if (!err) {
+ fr_link_stat_t* stats = (void*)mbox->data;
+ card->wandev.stats.rx_frame_errors = stats->rx_bad_format;
+ card->wandev.stats.rx_dropped =
+ stats->rx_dropped + stats->rx_dropped2;
+ }
+
+ return err;
+}
+
+/*============================================================================
+ * Add DLCI(s) (Access Node only!).
+ * This routine will perform the ADD_DLCIs command for the specified DLCI.
+ */
+static int fr_add_dlci (sdla_t* card, int dlci)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ unsigned short* dlci_list = (void*)mbox->data;
+
+ mbox->cmd.length = sizeof(short);
+ dlci_list[0] = dlci;
+ mbox->cmd.command = FR_ADD_DLCI;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ return err;
+}
+
+/*============================================================================
+ * Activate DLCI(s) (Access Node only!).
+ * This routine will perform the ACTIVATE_DLCIs command with a DLCI number.
+ */
+static int fr_activate_dlci (sdla_t* card, int dlci)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ unsigned short* dlci_list = (void*)mbox->data;
+
+ mbox->cmd.length = sizeof(short);
+ dlci_list[0] = dlci;
+ mbox->cmd.command = FR_ACTIVATE_DLCI;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ return err;
+}
+
+/*============================================================================
+ * Delete DLCI(s) (Access Node only!).
+ * This routine will perform the DELETE_DLCIs command with a DLCI number.
+ */
+static int fr_delete_dlci (sdla_t* card, int dlci)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ unsigned short* dlci_list = (void*)mbox->data;
+
+ mbox->cmd.length = sizeof(short);
+ dlci_list[0] = dlci;
+ mbox->cmd.command = FR_DELETE_DLCI;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ return err;
+}
+
+
+
+/*============================================================================
+ * Issue in-channel signalling frame.
+ */
+static int fr_issue_isf (sdla_t* card, int isf)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ mbox->data[0] = isf;
+ mbox->cmd.length = 1;
+ mbox->cmd.command = FR_ISSUE_IS_FRAME;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ return err;
+}
+
+
+static unsigned int fr_send_hdr (sdla_t*card, int dlci, unsigned int offset)
+{
+ struct net_device *dev = find_channel(card,dlci);
+ fr_channel_t *chan;
+
+ if (!dev || !(chan=dev->priv))
+ return offset;
+
+ if (chan->fr_header_len){
+ sdla_poke(&card->hw, offset, chan->fr_header, chan->fr_header_len);
+ }
+
+ return offset+chan->fr_header_len;
+}
+
+/*============================================================================
+ * Send a frame on a selected DLCI.
+ */
+static int fr_send_data_header (sdla_t* card, int dlci, unsigned char attr, int len,
+ void *buf, unsigned char hdr_len)
+{
+ fr_mbox_t* mbox = card->mbox + 0x800;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ mbox->cmd.dlci = dlci;
+ mbox->cmd.attr = attr;
+ mbox->cmd.length = len+hdr_len;
+ mbox->cmd.command = FR_WRITE;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ if (!err) {
+ fr_tx_buf_ctl_t* frbuf;
+
+ if(card->hw.type == SDLA_S514)
+ frbuf = (void*)(*(unsigned long*)mbox->data +
+ card->hw.dpmbase);
+ else
+ frbuf = (void*)(*(unsigned long*)mbox->data -
+ FR_MB_VECTOR + card->hw.dpmbase);
+
+ sdla_poke(&card->hw, fr_send_hdr(card,dlci,frbuf->offset), buf, len);
+ frbuf->flag = 0x01;
+ }
+
+ return err;
+}
+
+static int fr_send (sdla_t* card, int dlci, unsigned char attr, int len,
+ void *buf)
+{
+ fr_mbox_t* mbox = card->mbox + 0x800;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ mbox->cmd.dlci = dlci;
+ mbox->cmd.attr = attr;
+ mbox->cmd.length = len;
+ mbox->cmd.command = FR_WRITE;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ if (!err) {
+ fr_tx_buf_ctl_t* frbuf;
+
+ if(card->hw.type == SDLA_S514)
+ frbuf = (void*)(*(unsigned long*)mbox->data +
+ card->hw.dpmbase);
+ else
+ frbuf = (void*)(*(unsigned long*)mbox->data -
+ FR_MB_VECTOR + card->hw.dpmbase);
+
+ sdla_poke(&card->hw, frbuf->offset, buf, len);
+ frbuf->flag = 0x01;
+ }
+
+ return err;
+}
+
+
+/****** Firmware Asynchronous Event Handlers ********************************/
+
+/*============================================================================
+ * Main asyncronous event/error handler.
+ * This routine is called whenever firmware command returns non-zero
+ * return code.
+ *
+ * Return zero if previous command has to be cancelled.
+ */
+static int fr_event (sdla_t *card, int event, fr_mbox_t* mbox)
+{
+ fr508_flags_t* flags = card->flags;
+ char *ptr = &flags->iflag;
+ int i;
+
+ switch (event) {
+
+ case FRRES_MODEM_FAILURE:
+ return fr_modem_failure(card, mbox);
+
+ case FRRES_CHANNEL_DOWN: {
+ struct net_device *dev;
+
+ /* Remove all routes from associated DLCI's */
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)) {
+ fr_channel_t *chan = dev->priv;
+ if (chan->route_flag == ROUTE_ADDED) {
+ chan->route_flag = REMOVE_ROUTE;
+ }
+
+ if (chan->inarp == INARP_CONFIGURED) {
+ chan->inarp = INARP_REQUEST;
+ }
+
+ /* If the link becomes disconnected then,
+ * all channels will be disconnected
+ * as well.
+ */
+ set_chan_state(dev,WAN_DISCONNECTED);
+ }
+
+ wanpipe_set_state(card, WAN_DISCONNECTED);
+ return 1;
+ }
+
+ case FRRES_CHANNEL_UP: {
+ struct net_device *dev;
+
+ /* FIXME: Only startup devices that are on the list */
+
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)) {
+
+ set_chan_state(dev,WAN_CONNECTED);
+ }
+
+ wanpipe_set_state(card, WAN_CONNECTED);
+ return 1;
+ }
+
+ case FRRES_DLCI_CHANGE:
+ return fr_dlci_change(card, mbox);
+
+ case FRRES_DLCI_MISMATCH:
+ printk(KERN_INFO "%s: DLCI list mismatch!\n",
+ card->devname);
+ return 1;
+
+ case CMD_TIMEOUT:
+ printk(KERN_ERR "%s: command 0x%02X timed out!\n",
+ card->devname, mbox->cmd.command);
+ printk(KERN_INFO "%s: ID Bytes = ",card->devname);
+ for(i = 0; i < 8; i ++)
+ printk(KERN_INFO "0x%02X ", *(ptr + 0x18 + i));
+ printk(KERN_INFO "\n");
+
+ break;
+
+ case FRRES_DLCI_INACTIVE:
+ break;
+
+ case FRRES_CIR_OVERFLOW:
+ break;
+
+ case FRRES_BUFFER_OVERFLOW:
+ break;
+
+ default:
+ printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n"
+ , card->devname, mbox->cmd.command, event);
+ }
+
+ return 0;
+}
+
+/*============================================================================
+ * Handle modem error.
+ *
+ * Return zero if previous command has to be cancelled.
+ */
+static int fr_modem_failure (sdla_t *card, fr_mbox_t* mbox)
+{
+ printk(KERN_INFO "%s: physical link down! (modem error 0x%02X)\n",
+ card->devname, mbox->data[0]);
+
+ switch (mbox->cmd.command){
+ case FR_WRITE:
+
+ case FR_READ:
+ return 0;
+ }
+
+ return 1;
+}
+
+/*============================================================================
+ * Handle DLCI status change.
+ *
+ * Return zero if previous command has to be cancelled.
+ */
+static int fr_dlci_change (sdla_t *card, fr_mbox_t* mbox)
+{
+ dlci_status_t* status = (void*)mbox->data;
+ int cnt = mbox->cmd.length / sizeof(dlci_status_t);
+ fr_channel_t *chan;
+ struct net_device* dev2;
+
+
+ for (; cnt; --cnt, ++status) {
+
+ unsigned short dlci= status->dlci;
+ struct net_device* dev = find_channel(card, dlci);
+
+ if (dev == NULL){
+ printk(KERN_INFO
+ "%s: CPE contains unconfigured DLCI= %d\n",
+ card->devname, dlci);
+
+ printk(KERN_INFO
+ "%s: unconfigured DLCI %d reported by network\n"
+ , card->devname, dlci);
+
+ }else{
+ if (status->state == FR_LINK_INOPER) {
+ printk(KERN_INFO
+ "%s: DLCI %u is inactive!\n",
+ card->devname, dlci);
+
+ if (dev && netif_running(dev))
+ set_chan_state(dev, WAN_DISCONNECTED);
+ }
+
+ if (status->state & FR_DLCI_DELETED) {
+
+ printk(KERN_INFO
+ "%s: DLCI %u has been deleted!\n",
+ card->devname, dlci);
+
+ if (dev && netif_running(dev)){
+
+ fr_channel_t *chan = dev->priv;
+
+ if (chan->route_flag == ROUTE_ADDED) {
+ chan->route_flag = REMOVE_ROUTE;
+ /* The state change will trigger
+ * the fr polling routine */
+ }
+
+ if (chan->inarp == INARP_CONFIGURED) {
+ chan->inarp = INARP_REQUEST;
+ }
+
+ set_chan_state(dev, WAN_DISCONNECTED);
+ }
+
+ } else if (status->state & FR_DLCI_ACTIVE) {
+
+ chan = dev->priv;
+
+ /* This flag is used for configuring specific
+ DLCI(s) when they become active.
+ */
+ chan->dlci_configured = DLCI_CONFIG_PENDING;
+
+ set_chan_state(dev, WAN_CONNECTED);
+
+ }
+ }
+ }
+
+ for (dev2 = card->wandev.dev; dev2;
+ dev2 = *((struct net_device **)dev2->priv)){
+
+ chan = dev2->priv;
+
+ if (chan->dlci_configured == DLCI_CONFIG_PENDING) {
+ if (fr_init_dlci(card, chan)){
+ return 1;
+ }
+ }
+
+ }
+ return 1;
+}
+
+
+static int fr_init_dlci (sdla_t *card, fr_channel_t *chan)
+{
+ fr_dlc_conf_t cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ if ( chan->cir_status == CIR_DISABLED) {
+
+ cfg.cir_fwd = cfg.cir_bwd = 16;
+ cfg.bc_fwd = cfg.bc_bwd = 16;
+ cfg.conf_flags = 0x0001;
+
+ }else if (chan->cir_status == CIR_ENABLED) {
+
+ cfg.cir_fwd = cfg.cir_bwd = chan->cir;
+ cfg.bc_fwd = cfg.bc_bwd = chan->bc;
+ cfg.be_fwd = cfg.be_bwd = chan->be;
+ cfg.conf_flags = 0x0000;
+ }
+
+ if (fr_dlci_configure( card, &cfg , chan->dlci)){
+ printk(KERN_INFO
+ "%s: DLCI Configure failed for %d\n",
+ card->devname, chan->dlci);
+ return 1;
+ }
+
+ chan->dlci_configured = DLCI_CONFIGURED;
+
+ /* Read the interface byte mapping into the channel
+ * structure.
+ */
+ read_DLCI_IB_mapping( card, chan );
+
+ return 0;
+}
+/******* Miscellaneous ******************************************************/
+
+/*============================================================================
+ * Update channel state.
+ */
+static int update_chan_state(struct net_device* dev)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ mbox->cmd.command = FR_LIST_ACTIVE_DLCI;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ if (!err) {
+
+ unsigned short* list = (void*)mbox->data;
+ int cnt = mbox->cmd.length / sizeof(short);
+
+ err=1;
+
+ for (; cnt; --cnt, ++list) {
+
+ if (*list == chan->dlci) {
+ set_chan_state(dev, WAN_CONNECTED);
+
+
+ /* May 23 2000. NC
+ * When a dlci is added or restarted,
+ * the dlci_int_interface pointer must
+ * be reinitialized. */
+ if (!chan->dlci_int_interface){
+ err=fr_init_dlci (card,chan);
+ }
+ break;
+ }
+ }
+ }
+
+ return err;
+}
+
+/*============================================================================
+ * Set channel state.
+ */
+static void set_chan_state(struct net_device* dev, int state)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+
+ if (chan->common.state != state) {
+
+ switch (state) {
+
+ case WAN_CONNECTED:
+ printk(KERN_INFO
+ "%s: Interface %s: DLCI %d connected\n",
+ card->devname, dev->name, chan->dlci);
+
+ /* If the interface was previoulsy down,
+ * bring it up, since the channel is active */
+
+ trigger_fr_poll (dev);
+ trigger_fr_arp (dev);
+ break;
+
+ case WAN_CONNECTING:
+ printk(KERN_INFO
+ "%s: Interface %s: DLCI %d connecting\n",
+ card->devname, dev->name, chan->dlci);
+ break;
+
+ case WAN_DISCONNECTED:
+ printk (KERN_INFO
+ "%s: Interface %s: DLCI %d disconnected!\n",
+ card->devname, dev->name, chan->dlci);
+
+ /* If the interface is up, bring it down,
+ * since the channel is now disconnected */
+ trigger_fr_poll (dev);
+ break;
+ }
+
+ chan->common.state = state;
+ }
+
+ chan->state_tick = jiffies;
+}
+
+/*============================================================================
+ * Find network device by its channel number.
+ *
+ * We need this critical flag because we change
+ * the dlci_to_dev_map outside the interrupt.
+ *
+ * NOTE: del_if() functions updates this array, it uses
+ * the spin locks to avoid corruption.
+ */
+static struct net_device* find_channel(sdla_t* card, unsigned dlci)
+{
+ if(dlci > HIGHEST_VALID_DLCI)
+ return NULL;
+
+ return(card->u.f.dlci_to_dev_map[dlci]);
+}
+
+/*============================================================================
+ * Check to see if a frame can be sent. If no transmit buffers available,
+ * enable transmit interrupts.
+ *
+ * Return: 1 - Tx buffer(s) available
+ * 0 - no buffers available
+ */
+static int is_tx_ready (sdla_t* card, fr_channel_t* chan)
+{
+ unsigned char sb;
+
+ if(card->hw.type == SDLA_S514)
+ return 1;
+
+ sb = inb(card->hw.port);
+ if (sb & 0x02)
+ return 1;
+
+ return 0;
+}
+
+/*============================================================================
+ * Convert decimal string to unsigned integer.
+ * If len != 0 then only 'len' characters of the string are converted.
+ */
+static unsigned int dec_to_uint (unsigned char* str, int len)
+{
+ unsigned val;
+
+ if (!len)
+ len = strlen(str);
+
+ for (val = 0; len && is_digit(*str); ++str, --len)
+ val = (val * 10) + (*str - (unsigned)'0');
+
+ return val;
+}
+
+
+
+/*=============================================================================
+ * Store a UDP management packet for later processing.
+ */
+
+static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, int dlci)
+{
+ int udp_pkt_stored = 0;
+
+ struct net_device *dev = find_channel(card, dlci);
+ fr_channel_t *chan;
+
+ if (!dev || !(chan=dev->priv))
+ return 1;
+
+ if(!card->u.f.udp_pkt_lgth && (skb->len <= MAX_LGTH_UDP_MGNT_PKT)){
+ card->u.f.udp_pkt_lgth = skb->len + chan->fr_header_len;
+ card->u.f.udp_type = udp_type;
+ card->u.f.udp_pkt_src = udp_pkt_src;
+ card->u.f.udp_dlci = dlci;
+ memcpy(card->u.f.udp_pkt_data, skb->data, skb->len);
+ card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UDP;
+ udp_pkt_stored = 1;
+
+ }else{
+ printk(KERN_INFO "ERROR: UDP packet not stored for DLCI %d\n",
+ dlci);
+ }
+
+ if(udp_pkt_src == UDP_PKT_FRM_STACK){
+ dev_kfree_skb_any(skb);
+ }else{
+ dev_kfree_skb_any(skb);
+ }
+
+ return(udp_pkt_stored);
+}
+
+
+/*==============================================================================
+ * Process UDP call of type FPIPE8ND
+ */
+static int process_udp_mgmt_pkt(sdla_t* card)
+{
+
+ int c_retry = MAX_CMD_RETRY;
+ unsigned char *buf;
+ unsigned char frames;
+ unsigned int len;
+ unsigned short buffer_length;
+ struct sk_buff *new_skb;
+ fr_mbox_t* mbox = card->mbox;
+ int err;
+ struct timeval tv;
+ int udp_mgmt_req_valid = 1;
+ struct net_device* dev;
+ fr_channel_t* chan;
+ fr_udp_pkt_t *fr_udp_pkt;
+ unsigned short num_trc_els;
+ fr_trc_el_t* ptr_trc_el;
+ fr_trc_el_t trc_el;
+ fpipemon_trc_t* fpipemon_trc;
+
+ char udp_pkt_src = card->u.f.udp_pkt_src;
+ int dlci = card->u.f.udp_dlci;
+
+ /* Find network interface for this packet */
+ dev = find_channel(card, dlci);
+ if (!dev){
+ card->u.f.udp_pkt_lgth = 0;
+ return 1;
+ }
+ if ((chan = dev->priv) == NULL){
+ card->u.f.udp_pkt_lgth = 0;
+ return 1;
+ }
+
+ /* If the UDP packet is from the network, we are going to have to
+ transmit a response. Before doing so, we must check to see that
+ we are not currently transmitting a frame (in 'if_send()') and
+ that we are not already in a 'delayed transmit' state.
+ */
+ if(udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+ if (check_tx_status(card,dev)){
+ card->u.f.udp_pkt_lgth = 0;
+ return 1;
+ }
+ }
+
+ fr_udp_pkt = (fr_udp_pkt_t *)card->u.f.udp_pkt_data;
+
+ if(udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+
+ switch(fr_udp_pkt->cblock.command) {
+
+ case FR_READ_MODEM_STATUS:
+ case FR_READ_STATUS:
+ case FPIPE_ROUTER_UP_TIME:
+ case FR_READ_ERROR_STATS:
+ case FPIPE_DRIVER_STAT_GEN:
+ case FR_READ_STATISTICS:
+ case FR_READ_ADD_DLC_STATS:
+ case FR_READ_CONFIG:
+ case FR_READ_CODE_VERSION:
+ udp_mgmt_req_valid = 1;
+ break;
+ default:
+ udp_mgmt_req_valid = 0;
+ break;
+ }
+ }
+
+ if(!udp_mgmt_req_valid) {
+ /* set length to 0 */
+ fr_udp_pkt->cblock.length = 0;
+ /* set return code */
+ fr_udp_pkt->cblock.result = 0xCD;
+
+ chan->drvstats_gen.UDP_PIPE_mgmt_direction_err ++;
+
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Warning, Illegal UDP command attempted from network: %x\n",
+ card->devname,fr_udp_pkt->cblock.command);
+ }
+
+ } else {
+
+ switch(fr_udp_pkt->cblock.command) {
+
+ case FPIPE_ENABLE_TRACING:
+ if(!card->TracingEnabled) {
+ do {
+ mbox->cmd.command = FR_SET_TRACE_CONFIG;
+ mbox->cmd.length = 1;
+ mbox->cmd.dlci = 0x00;
+ mbox->data[0] = fr_udp_pkt->data[0] |
+ RESET_TRC;
+ err = sdla_exec(mbox) ?
+ mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && c_retry-- && fr_event(card, err,
+ mbox));
+
+ if(err) {
+ card->TracingEnabled = 0;
+ /* set the return code */
+ fr_udp_pkt->cblock.result =
+ mbox->cmd.result;
+ mbox->cmd.length = 0;
+ break;
+ }
+
+ sdla_peek(&card->hw, NO_TRC_ELEMENTS_OFF,
+ &num_trc_els, 2);
+ sdla_peek(&card->hw, BASE_TRC_ELEMENTS_OFF,
+ &card->u.f.trc_el_base, 4);
+ card->u.f.curr_trc_el = card->u.f.trc_el_base;
+ card->u.f.trc_el_last = card->u.f.curr_trc_el +
+ ((num_trc_els - 1) *
+ sizeof(fr_trc_el_t));
+
+ /* Calculate the maximum trace data area in */
+ /* the UDP packet */
+ card->u.f.trc_bfr_space=(MAX_LGTH_UDP_MGNT_PKT -
+ //sizeof(fr_encap_hdr_t) -
+ sizeof(ip_pkt_t) -
+ sizeof(udp_pkt_t) -
+ sizeof(wp_mgmt_t) -
+ sizeof(cblock_t));
+
+ /* set return code */
+ fr_udp_pkt->cblock.result = 0;
+
+ } else {
+ /* set return code to line trace already
+ enabled */
+ fr_udp_pkt->cblock.result = 1;
+ }
+
+ mbox->cmd.length = 0;
+ card->TracingEnabled = 1;
+ break;
+
+
+ case FPIPE_DISABLE_TRACING:
+ if(card->TracingEnabled) {
+
+ do {
+ mbox->cmd.command = FR_SET_TRACE_CONFIG;
+ mbox->cmd.length = 1;
+ mbox->cmd.dlci = 0x00;
+ mbox->data[0] = ~ACTIVATE_TRC;
+ err = sdla_exec(mbox) ?
+ mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && c_retry-- && fr_event(card, err, mbox));
+ }
+
+ /* set return code */
+ fr_udp_pkt->cblock.result = 0;
+ mbox->cmd.length = 0;
+ card->TracingEnabled = 0;
+ break;
+
+ case FPIPE_GET_TRACE_INFO:
+
+ /* Line trace cannot be performed on the 502 */
+ if(!card->TracingEnabled) {
+ /* set return code */
+ fr_udp_pkt->cblock.result = 1;
+ mbox->cmd.length = 0;
+ break;
+ }
+
+ ptr_trc_el = (void *)card->u.f.curr_trc_el;
+
+ buffer_length = 0;
+ fr_udp_pkt->data[0x00] = 0x00;
+
+ for(frames = 0; frames < MAX_FRMS_TRACED; frames ++) {
+
+ sdla_peek(&card->hw, (unsigned long)ptr_trc_el,
+ (void *)&trc_el.flag,
+ sizeof(fr_trc_el_t));
+ if(trc_el.flag == 0x00) {
+ break;
+ }
+ if((card->u.f.trc_bfr_space - buffer_length)
+ < sizeof(fpipemon_trc_hdr_t)) {
+ fr_udp_pkt->data[0x00] |= MORE_TRC_DATA;
+ break;
+ }
+
+ fpipemon_trc =
+ (fpipemon_trc_t *)&fr_udp_pkt->data[buffer_length];
+ fpipemon_trc->fpipemon_trc_hdr.status =
+ trc_el.attr;
+ fpipemon_trc->fpipemon_trc_hdr.tmstamp =
+ trc_el.tmstamp;
+ fpipemon_trc->fpipemon_trc_hdr.length =
+ trc_el.length;
+
+ if(!trc_el.offset || !trc_el.length) {
+
+ fpipemon_trc->fpipemon_trc_hdr.data_passed = 0x00;
+
+ }else if((trc_el.length + sizeof(fpipemon_trc_hdr_t) + 1) >
+ (card->u.f.trc_bfr_space - buffer_length)){
+
+ fpipemon_trc->fpipemon_trc_hdr.data_passed = 0x00;
+ fr_udp_pkt->data[0x00] |= MORE_TRC_DATA;
+
+ }else {
+ fpipemon_trc->fpipemon_trc_hdr.data_passed = 0x01;
+ sdla_peek(&card->hw, trc_el.offset,
+ fpipemon_trc->data,
+ trc_el.length);
+ }
+
+ trc_el.flag = 0x00;
+ sdla_poke(&card->hw, (unsigned long)ptr_trc_el,
+ &trc_el.flag, 1);
+
+ ptr_trc_el ++;
+ if((void *)ptr_trc_el > card->u.f.trc_el_last)
+ ptr_trc_el = (void*)card->u.f.trc_el_base;
+
+ buffer_length += sizeof(fpipemon_trc_hdr_t);
+ if(fpipemon_trc->fpipemon_trc_hdr.data_passed) {
+ buffer_length += trc_el.length;
+ }
+
+ if(fr_udp_pkt->data[0x00] & MORE_TRC_DATA) {
+ break;
+ }
+ }
+
+ if(frames == MAX_FRMS_TRACED) {
+ fr_udp_pkt->data[0x00] |= MORE_TRC_DATA;
+ }
+
+ card->u.f.curr_trc_el = (void *)ptr_trc_el;
+
+ /* set the total number of frames passed */
+ fr_udp_pkt->data[0x00] |=
+ ((frames << 1) & (MAX_FRMS_TRACED << 1));
+
+ /* set the data length and return code */
+ fr_udp_pkt->cblock.length = mbox->cmd.length = buffer_length;
+ fr_udp_pkt->cblock.result = 0;
+ break;
+
+ case FPIPE_FT1_READ_STATUS:
+ sdla_peek(&card->hw, 0xF020,
+ &fr_udp_pkt->data[0x00] , 2);
+ fr_udp_pkt->cblock.length = mbox->cmd.length = 2;
+ fr_udp_pkt->cblock.result = 0;
+ break;
+
+ case FPIPE_FLUSH_DRIVER_STATS:
+ init_chan_statistics(chan);
+ init_global_statistics(card);
+ mbox->cmd.length = 0;
+ break;
+
+ case FPIPE_ROUTER_UP_TIME:
+ do_gettimeofday(&tv);
+ chan->router_up_time = tv.tv_sec -
+ chan->router_start_time;
+ *(unsigned long *)&fr_udp_pkt->data =
+ chan->router_up_time;
+ mbox->cmd.length = fr_udp_pkt->cblock.length = 4;
+ fr_udp_pkt->cblock.result = 0;
+ break;
+
+ case FPIPE_DRIVER_STAT_IFSEND:
+ memcpy(fr_udp_pkt->data,
+ &chan->drvstats_if_send.if_send_entry,
+ sizeof(if_send_stat_t));
+ mbox->cmd.length = fr_udp_pkt->cblock.length =sizeof(if_send_stat_t);
+ fr_udp_pkt->cblock.result = 0;
+ break;
+
+ case FPIPE_DRIVER_STAT_INTR:
+
+ memcpy(fr_udp_pkt->data,
+ &card->statistics.isr_entry,
+ sizeof(global_stats_t));
+
+ memcpy(&fr_udp_pkt->data[sizeof(global_stats_t)],
+ &chan->drvstats_rx_intr.rx_intr_no_socket,
+ sizeof(rx_intr_stat_t));
+
+ mbox->cmd.length = fr_udp_pkt->cblock.length =
+ sizeof(global_stats_t) +
+ sizeof(rx_intr_stat_t);
+ fr_udp_pkt->cblock.result = 0;
+ break;
+
+ case FPIPE_DRIVER_STAT_GEN:
+ memcpy(fr_udp_pkt->data,
+ &chan->drvstats_gen.UDP_PIPE_mgmt_kmalloc_err,
+ sizeof(pipe_mgmt_stat_t));
+
+ memcpy(&fr_udp_pkt->data[sizeof(pipe_mgmt_stat_t)],
+ &card->statistics, sizeof(global_stats_t));
+
+ mbox->cmd.length = fr_udp_pkt->cblock.length = sizeof(global_stats_t)+
+ sizeof(rx_intr_stat_t);
+ fr_udp_pkt->cblock.result = 0;
+ break;
+
+
+ case FR_FT1_STATUS_CTRL:
+ if(fr_udp_pkt->data[0] == 1) {
+ if(rCount++ != 0 ){
+ fr_udp_pkt->cblock.result = 0;
+ mbox->cmd.length = 1;
+ break;
+ }
+ }
+
+ /* Disable FT1 MONITOR STATUS */
+ if(fr_udp_pkt->data[0] == 0) {
+ if( --rCount != 0) {
+ fr_udp_pkt->cblock.result = 0;
+ mbox->cmd.length = 1;
+ break;
+ }
+ }
+ goto udp_mgmt_dflt;
+
+
+ default:
+udp_mgmt_dflt:
+ do {
+ memcpy(&mbox->cmd,
+ &fr_udp_pkt->cblock.command,
+ sizeof(fr_cmd_t));
+ if(mbox->cmd.length) {
+ memcpy(&mbox->data,
+ (char *)fr_udp_pkt->data,
+ mbox->cmd.length);
+ }
+
+ err = sdla_exec(mbox) ? mbox->cmd.result :
+ CMD_TIMEOUT;
+ } while (err && c_retry-- && fr_event(card, err, mbox));
+
+ if(!err)
+ chan->drvstats_gen.
+ UDP_PIPE_mgmt_adptr_cmnd_OK ++;
+ else
+ chan->drvstats_gen.
+ UDP_PIPE_mgmt_adptr_cmnd_timeout ++;
+
+ /* copy the result back to our buffer */
+ memcpy(&fr_udp_pkt->cblock.command,
+ &mbox->cmd, sizeof(fr_cmd_t));
+
+ if(mbox->cmd.length) {
+ memcpy(&fr_udp_pkt->data,
+ &mbox->data, mbox->cmd.length);
+ }
+ }
+ }
+
+ /* Fill UDP TTL */
+ fr_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
+ len = reply_udp(card->u.f.udp_pkt_data, mbox->cmd.length);
+
+ if(udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+
+ chan->fr_header_len=2;
+ chan->fr_header[0]=Q922_UI;
+ chan->fr_header[1]=NLPID_IP;
+
+ err = fr_send_data_header(card, dlci, 0, len,
+ card->u.f.udp_pkt_data,chan->fr_header_len);
+ if (err){
+ chan->drvstats_gen.UDP_PIPE_mgmt_adptr_send_passed ++;
+ }else{
+ chan->drvstats_gen.UDP_PIPE_mgmt_adptr_send_failed ++;
+ }
+
+ } else {
+ /* Allocate socket buffer */
+ if((new_skb = dev_alloc_skb(len)) != NULL) {
+
+ /* copy data into new_skb */
+ buf = skb_put(new_skb, len);
+ memcpy(buf, card->u.f.udp_pkt_data, len);
+
+ chan->drvstats_gen.
+ UDP_PIPE_mgmt_passed_to_stack ++;
+ new_skb->dev = dev;
+ new_skb->protocol = htons(ETH_P_IP);
+ new_skb->mac.raw = new_skb->data;
+ netif_rx(new_skb);
+
+ } else {
+ chan->drvstats_gen.UDP_PIPE_mgmt_no_socket ++;
+ printk(KERN_INFO
+ "%s: UDP mgmt cmnd, no socket buffers available!\n",
+ card->devname);
+ }
+ }
+
+ card->u.f.udp_pkt_lgth = 0;
+
+ return 1;
+}
+
+/*==============================================================================
+ * Send Inverse ARP Request
+ */
+
+int send_inarp_request(sdla_t *card, struct net_device *dev)
+{
+ int err=0;
+
+ arphdr_1490_t *ArpPacket;
+ arphdr_fr_t *arphdr;
+ fr_channel_t *chan = dev->priv;
+ struct in_device *in_dev;
+
+ in_dev = dev->ip_ptr;
+
+ if(in_dev != NULL ) {
+
+ ArpPacket = kmalloc(sizeof(arphdr_1490_t) + sizeof(arphdr_fr_t), GFP_ATOMIC);
+ /* SNAP Header indicating ARP */
+ ArpPacket->control = 0x03;
+ ArpPacket->pad = 0x00;
+ ArpPacket->NLPID = 0x80;
+ ArpPacket->OUI[0] = 0;
+ ArpPacket->OUI[1] = 0;
+ ArpPacket->OUI[2] = 0;
+ ArpPacket->PID = 0x0608;
+
+ arphdr = (arphdr_fr_t *)(ArpPacket + 1); // Go to ARP Packet
+
+ /* InARP request */
+ arphdr->ar_hrd = 0x0F00; /* Frame Relay HW type */
+ arphdr->ar_pro = 0x0008; /* IP Protocol */
+ arphdr->ar_hln = 2; /* HW addr length */
+ arphdr->ar_pln = 4; /* IP addr length */
+ arphdr->ar_op = htons(0x08); /* InARP Request */
+ arphdr->ar_sha = 0; /* src HW DLCI - Doesn't matter */
+ if(in_dev->ifa_list != NULL)
+ arphdr->ar_sip = in_dev->ifa_list->ifa_local; /* Local Address */else
+ arphdr->ar_sip = 0;
+ arphdr->ar_tha = 0; /* dst HW DLCI - Doesn't matter */
+ arphdr->ar_tip = 0; /* Remote Address -- what we want */
+
+ err = fr_send(card, chan->dlci, 0, sizeof(arphdr_1490_t) + sizeof(arphdr_fr_t),
+ (void *)ArpPacket);
+
+ if (!err){
+ printk(KERN_INFO "\n%s: Sending InARP request on DLCI %d.\n",
+ card->devname, chan->dlci);
+ clear_bit(ARP_CRIT,&card->wandev.critical);
+ }
+
+ kfree(ArpPacket);
+ }else{
+ printk(KERN_INFO "%s: INARP ERROR: %s doesn't have a local IP address!\n",
+ card->devname,dev->name);
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/*==============================================================================
+ * Check packet for ARP Type
+ */
+
+int is_arp(void *buf)
+{
+ arphdr_1490_t *arphdr = (arphdr_1490_t *)buf;
+
+ if (arphdr->pad == 0x00 &&
+ arphdr->NLPID == 0x80 &&
+ arphdr->PID == 0x0608)
+ return 1;
+ else return 0;
+}
+
+/*==============================================================================
+ * Process ARP Packet Type
+ */
+
+int process_ARP(arphdr_1490_t *ArpPacket, sdla_t *card, struct net_device* dev)
+{
+
+
+ arphdr_fr_t *arphdr = (arphdr_fr_t *)(ArpPacket + 1); /* Skip header */
+ fr_rx_buf_ctl_t* frbuf = card->rxmb;
+ struct in_device *in_dev;
+ fr_channel_t *chan = dev->priv;
+
+ /* Before we transmit ARP packet, we must check
+ * to see that we are not currently transmitting a
+ * frame (in 'if_send()') and that we are not
+ * already in a 'delayed transmit' state. */
+ if (check_tx_status(card,dev)){
+ if (net_ratelimit()){
+ printk(KERN_INFO "%s: Disabling comminication to process ARP\n",
+ card->devname);
+ }
+ set_bit(ARP_CRIT,&card->wandev.critical);
+ return 0;
+ }
+
+ in_dev = dev->ip_ptr;
+
+ /* Check that IP addresses exist for our network address */
+ if (in_dev == NULL || in_dev->ifa_list == NULL)
+ return -1;
+
+ switch (ntohs(arphdr->ar_op)) {
+
+ case 0x08: // Inverse ARP request -- Send Reply, add route.
+
+ /* Check for valid Address */
+ printk(KERN_INFO "%s: Recvd PtP addr -InArp Req: %u.%u.%u.%u\n",
+ card->devname, NIPQUAD(arphdr->ar_sip));
+
+
+ /* Check that the network address is the same as ours, only
+ * if the netowrk mask is not 255.255.255.255. Otherwise
+ * this check would not make sense */
+
+ if (in_dev->ifa_list->ifa_mask != 0xFFFFFFFF &&
+ (in_dev->ifa_list->ifa_mask & arphdr->ar_sip) !=
+ (in_dev->ifa_list->ifa_mask & in_dev->ifa_list->ifa_local)){
+ printk(KERN_INFO
+ "%s: Invalid PtP address. %u.%u.%u.%u InARP ignored.\n",
+ card->devname,NIPQUAD(arphdr->ar_sip));
+
+ printk(KERN_INFO "%s: mask %u.%u.%u.%u\n",
+ card->devname, NIPQUAD(in_dev->ifa_list->ifa_mask));
+ printk(KERN_INFO "%s: local %u.%u.%u.%u\n",
+ card->devname,NIPQUAD(in_dev->ifa_list->ifa_local));
+ return -1;
+ }
+
+ if (in_dev->ifa_list->ifa_local == arphdr->ar_sip){
+ printk(KERN_INFO
+ "%s: Local addr = PtP addr. InARP ignored.\n",
+ card->devname);
+ return -1;
+ }
+
+ arphdr->ar_op = htons(0x09); /* InARP Reply */
+
+ /* Set addresses */
+ arphdr->ar_tip = arphdr->ar_sip;
+ arphdr->ar_sip = in_dev->ifa_list->ifa_local;
+
+ chan->ip_local = in_dev->ifa_list->ifa_local;
+ chan->ip_remote = arphdr->ar_sip;
+
+ fr_send(card, frbuf->dlci, 0, frbuf->length, (void *)ArpPacket);
+
+ if (test_bit(ARP_CRIT,&card->wandev.critical)){
+ if (net_ratelimit()){
+ printk(KERN_INFO "%s: ARP Processed Enabling Communication!\n",
+ card->devname);
+ }
+ }
+ clear_bit(ARP_CRIT,&card->wandev.critical);
+
+ chan->ip_local = in_dev->ifa_list->ifa_local;
+ chan->ip_remote = arphdr->ar_sip;
+
+ /* Add Route Flag */
+ /* The route will be added in the polling routine so
+ that it is not interrupt context. */
+
+ chan->route_flag = ADD_ROUTE;
+ trigger_fr_poll (dev);
+
+ break;
+
+ case 0x09: // Inverse ARP reply
+
+ /* Check for valid Address */
+ printk(KERN_INFO "%s: Recvd PtP addr %u.%u.%u.%u -InArp Reply\n",
+ card->devname, NIPQUAD(arphdr->ar_sip));
+
+
+ /* Compare network addresses, only if network mask
+ * is not 255.255.255.255 It would not make sense
+ * to perform this test if the mask was all 1's */
+
+ if (in_dev->ifa_list->ifa_mask != 0xffffffff &&
+ (in_dev->ifa_list->ifa_mask & arphdr->ar_sip) !=
+ (in_dev->ifa_list->ifa_mask & in_dev->ifa_list->ifa_local)) {
+
+ printk(KERN_INFO "%s: Invalid PtP address. InARP ignored.\n",
+ card->devname);
+ return -1;
+ }
+
+ /* Make sure that the received IP address is not
+ * the same as our own local address */
+ if (in_dev->ifa_list->ifa_local == arphdr->ar_sip) {
+ printk(KERN_INFO "%s: Local addr = PtP addr. InARP ignored.\n",
+ card->devname);
+ return -1;
+ }
+
+ chan->ip_local = in_dev->ifa_list->ifa_local;
+ chan->ip_remote = arphdr->ar_sip;
+
+ /* Add Route Flag */
+ /* The route will be added in the polling routine so
+ that it is not interrupt context. */
+
+ chan->route_flag = ADD_ROUTE;
+ chan->inarp = INARP_CONFIGURED;
+ trigger_fr_poll(dev);
+
+ break;
+ default:
+ break; // ARP's and RARP's -- Shouldn't happen.
+ }
+
+ return 0;
+}
+
+
+/*============================================================
+ * trigger_fr_arp
+ *
+ * Description:
+ * Add an fr_arp() task into a arp
+ * timer handler for a specific dlci/interface.
+ * This will kick the fr_arp() routine
+ * within the specified time interval.
+ *
+ * Usage:
+ * This timer is used to send ARP requests at
+ * certain time intervals.
+ * Called by an interrupt to request an action
+ * at a later date.
+ */
+
+static void trigger_fr_arp(struct net_device *dev)
+{
+ fr_channel_t* chan = dev->priv;
+
+ mod_timer(&chan->fr_arp_timer, jiffies + chan->inarp_interval * HZ);
+ return;
+}
+
+
+
+/*==============================================================================
+ * ARP Request Action
+ *
+ * This funciton is called by timer interrupt to send an arp request
+ * to the remote end.
+ */
+
+static void fr_arp (unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ fr_channel_t *chan = dev->priv;
+ volatile sdla_t *card = chan->card;
+ fr508_flags_t* flags = card->flags;
+
+ /* Send ARP packets for all devs' until
+ * ARP state changes to CONFIGURED */
+
+ if (chan->inarp == INARP_REQUEST &&
+ chan->common.state == WAN_CONNECTED &&
+ card->wandev.state == WAN_CONNECTED){
+ set_bit(0,&chan->inarp_ready);
+ card->u.f.timer_int_enabled |= TMR_INT_ENABLED_ARP;
+ flags->imask |= FR_INTR_TIMER;
+ }
+
+ return;
+}
+
+
+/*==============================================================================
+ * Perform the Interrupt Test by running the READ_CODE_VERSION command MAX_INTR_
+ * TEST_COUNTER times.
+ */
+static int intr_test( sdla_t* card )
+{
+ fr_mbox_t* mb = card->mbox;
+ int err,i;
+
+ err = fr_set_intr_mode(card, FR_INTR_READY, card->wandev.mtu, 0 );
+
+ if (err == CMD_OK) {
+
+ for ( i = 0; i < MAX_INTR_TEST_COUNTER; i++ ) {
+ /* Run command READ_CODE_VERSION */
+ mb->cmd.length = 0;
+ mb->cmd.command = FR_READ_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+ if (err != CMD_OK)
+ fr_event(card, err, mb);
+ }
+
+ } else {
+ return err;
+ }
+
+ err = fr_set_intr_mode( card, 0, card->wandev.mtu, 0 );
+
+ if( err != CMD_OK )
+ return err;
+
+ return 0;
+}
+
+/*==============================================================================
+ * Determine what type of UDP call it is. FPIPE8ND ?
+ */
+static int udp_pkt_type( struct sk_buff *skb, sdla_t* card )
+{
+ fr_udp_pkt_t *fr_udp_pkt = (fr_udp_pkt_t *)skb->data;
+
+ /* Quick HACK */
+
+
+ if((fr_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
+ (fr_udp_pkt->ip_pkt.ver_inet_hdr_length == 0x45) &&
+ (fr_udp_pkt->udp_pkt.udp_dst_port ==
+ ntohs(card->wandev.udp_port)) &&
+ (fr_udp_pkt->wp_mgmt.request_reply ==
+ UDPMGMT_REQUEST)) {
+ if(!strncmp(fr_udp_pkt->wp_mgmt.signature,
+ UDPMGMT_FPIPE_SIGNATURE, 8)){
+ return UDP_FPIPE_TYPE;
+ }
+ }
+ return UDP_INVALID_TYPE;
+}
+
+
+/*==============================================================================
+ * Initializes the Statistics values in the fr_channel structure.
+ */
+void init_chan_statistics( fr_channel_t* chan)
+{
+ memset(&chan->drvstats_if_send.if_send_entry, 0,
+ sizeof(if_send_stat_t));
+ memset(&chan->drvstats_rx_intr.rx_intr_no_socket, 0,
+ sizeof(rx_intr_stat_t));
+ memset(&chan->drvstats_gen.UDP_PIPE_mgmt_kmalloc_err, 0,
+ sizeof(pipe_mgmt_stat_t));
+}
+
+/*==============================================================================
+ * Initializes the Statistics values in the Sdla_t structure.
+ */
+void init_global_statistics( sdla_t* card )
+{
+ /* Intialize global statistics for a card */
+ memset(&card->statistics.isr_entry, 0, sizeof(global_stats_t));
+}
+
+static void read_DLCI_IB_mapping( sdla_t* card, fr_channel_t* chan )
+{
+ fr_mbox_t* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ dlci_IB_mapping_t* result;
+ int err, counter, found;
+
+ do {
+ mbox->cmd.command = FR_READ_DLCI_IB_MAPPING;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && fr_event(card, err, mbox));
+
+ if( mbox->cmd.result != 0){
+ printk(KERN_INFO "%s: Read DLCI IB Mapping failed\n",
+ chan->name);
+ }
+
+ counter = mbox->cmd.length / sizeof(dlci_IB_mapping_t);
+ result = (void *)mbox->data;
+
+ found = 0;
+ for (; counter; --counter, ++result) {
+ if ( result->dlci == chan->dlci ) {
+ chan->IB_addr = result->addr_value;
+ if(card->hw.type == SDLA_S514){
+ chan->dlci_int_interface =
+ (void*)(card->hw.dpmbase +
+ chan->IB_addr);
+ }else{
+ chan->dlci_int_interface =
+ (void*)(card->hw.dpmbase +
+ (chan->IB_addr & 0x00001FFF));
+
+ }
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ printk( KERN_INFO "%s: DLCI %d not found by IB MAPPING cmd\n",
+ card->devname, chan->dlci);
+}
+
+
+
+void s508_s514_lock(sdla_t *card, unsigned long *smp_flags)
+{
+ if (card->hw.type != SDLA_S514){
+
+ spin_lock_irqsave(&card->wandev.lock, *smp_flags);
+ }else{
+ spin_lock(&card->u.f.if_send_lock);
+ }
+ return;
+}
+
+
+void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags)
+{
+ if (card->hw.type != SDLA_S514){
+
+ spin_unlock_irqrestore (&card->wandev.lock, *smp_flags);
+ }else{
+ spin_unlock(&card->u.f.if_send_lock);
+ }
+ return;
+}
+
+
+
+/*----------------------------------------------------------------------
+ RECEIVE INTERRUPT: BOTTOM HALF HANDLERS
+ ----------------------------------------------------------------------*/
+
+
+/*========================================================
+ * bh_enqueue
+ *
+ * Description:
+ * Insert a received packet into a circular
+ * rx queue. This packet will be picked up
+ * by fr_bh() and sent up the stack to the
+ * user.
+ *
+ * Usage:
+ * This function is called by rx interrupt,
+ * in API mode.
+ *
+ */
+
+static int bh_enqueue(struct net_device *dev, struct sk_buff *skb)
+{
+ /* Check for full */
+ fr_channel_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+
+
+ if (atomic_read(&chan->bh_buff_used) == MAX_BH_BUFF){
+ ++card->wandev.stats.rx_dropped;
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+
+ ((bh_data_t *)&chan->bh_head[chan->bh_write])->skb = skb;
+
+ if (chan->bh_write == (MAX_BH_BUFF-1)){
+ chan->bh_write=0;
+ }else{
+ ++chan->bh_write;
+ }
+
+ atomic_inc(&chan->bh_buff_used);
+
+ return 0;
+}
+
+
+/*========================================================
+ * trigger_fr_bh
+ *
+ * Description:
+ * Kick the fr_bh() handler
+ *
+ * Usage:
+ * rx interrupt calls this function during
+ * the API mode.
+ */
+
+static void trigger_fr_bh (fr_channel_t *chan)
+{
+ if (!test_and_set_bit(0,&chan->tq_working)){
+ wanpipe_queue_work(&chan->common.wanpipe_work);
+ }
+}
+
+
+/*========================================================
+ * fr_bh
+ *
+ * Description:
+ * Frame relay receive BH handler.
+ * Dequeue data from the BH circular
+ * buffer and pass it up the API sock.
+ *
+ * Rationale:
+ * This fuction is used to offload the
+ * rx_interrupt during API operation mode.
+ * The fr_bh() function executes for each
+ * dlci/interface.
+ *
+ * Once receive interrupt copies data from the
+ * card into an skb buffer, the skb buffer
+ * is appended to a circular BH buffer.
+ * Then the interrupt kicks fr_bh() to finish the
+ * job at a later time (not within the interrupt).
+ *
+ * Usage:
+ * Interrupts use this to defer a task to
+ * a polling routine.
+ *
+ */
+
+static void fr_bh(struct net_device * dev)
+{
+ fr_channel_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+ struct sk_buff *skb;
+
+ if (atomic_read(&chan->bh_buff_used) == 0){
+ clear_bit(0, &chan->tq_working);
+ return;
+ }
+
+ while (atomic_read(&chan->bh_buff_used)){
+
+ if (chan->common.sk == NULL || chan->common.func == NULL){
+ clear_bit(0, &chan->tq_working);
+ return;
+ }
+
+ skb = ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb;
+
+ if (skb != NULL){
+
+ if (chan->common.sk == NULL || chan->common.func == NULL){
+ ++card->wandev.stats.rx_dropped;
+ ++chan->ifstats.rx_dropped;
+ dev_kfree_skb_any(skb);
+ fr_bh_cleanup(dev);
+ continue;
+ }
+
+ if (chan->common.func(skb,dev,chan->common.sk) != 0){
+ /* Sock full cannot send, queue us for
+ * another try */
+ atomic_set(&chan->common.receive_block,1);
+ return;
+ }else{
+ fr_bh_cleanup(dev);
+ }
+ }else{
+ fr_bh_cleanup(dev);
+ }
+ }
+ clear_bit(0, &chan->tq_working);
+
+ return;
+}
+
+static int fr_bh_cleanup(struct net_device *dev)
+{
+ fr_channel_t* chan = dev->priv;
+
+ ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb = NULL;
+
+ if (chan->bh_read == (MAX_BH_BUFF-1)){
+ chan->bh_read=0;
+ }else{
+ ++chan->bh_read;
+ }
+
+ atomic_dec(&chan->bh_buff_used);
+ return 0;
+}
+
+
+/*----------------------------------------------------------------------
+ POLL BH HANDLERS AND KICK ROUTINES
+ ----------------------------------------------------------------------*/
+
+/*============================================================
+ * trigger_fr_poll
+ *
+ * Description:
+ * Add a fr_poll() task into a tq_scheduler bh handler
+ * for a specific dlci/interface. This will kick
+ * the fr_poll() routine at a later time.
+ *
+ * Usage:
+ * Interrupts use this to defer a taks to
+ * a polling routine.
+ *
+ */
+static void trigger_fr_poll(struct net_device *dev)
+{
+ fr_channel_t* chan = dev->priv;
+ schedule_work(&chan->fr_poll_work);
+ return;
+}
+
+
+/*============================================================
+ * fr_poll
+ *
+ * Rationale:
+ * We cannot manipulate the routing tables, or
+ * ip addresses withing the interrupt. Therefore
+ * we must perform such actons outside an interrupt
+ * at a later time.
+ *
+ * Description:
+ * Frame relay polling routine, responsible for
+ * shutting down interfaces upon disconnect
+ * and adding/removing routes.
+ *
+ * Usage:
+ * This function is executed for each frame relay
+ * dlci/interface through a tq_schedule bottom half.
+ *
+ * trigger_fr_poll() function is used to kick
+ * the fr_poll routine.
+ */
+
+static void fr_poll(struct net_device *dev)
+{
+
+ fr_channel_t* chan;
+ sdla_t *card;
+ u8 check_gateway=0;
+
+ if (!dev || (chan = dev->priv) == NULL)
+ return;
+
+ card = chan->card;
+
+ /* (Re)Configuraiton is in progress, stop what you are
+ * doing and get out */
+ if (test_bit(PERI_CRIT,&card->wandev.critical)){
+ return;
+ }
+
+ switch (chan->common.state){
+
+ case WAN_DISCONNECTED:
+
+ if (test_bit(DYN_OPT_ON,&chan->interface_down) &&
+ !test_bit(DEV_DOWN, &chan->interface_down) &&
+ dev->flags&IFF_UP){
+
+ printk(KERN_INFO "%s: Interface %s is Down.\n",
+ card->devname,dev->name);
+ change_dev_flags(dev,dev->flags&~IFF_UP);
+ set_bit(DEV_DOWN, &chan->interface_down);
+ chan->route_flag = NO_ROUTE;
+
+ }else{
+ if (chan->inarp != INARP_NONE)
+ process_route(dev);
+ }
+ break;
+
+ case WAN_CONNECTED:
+
+ if (test_bit(DYN_OPT_ON,&chan->interface_down) &&
+ test_bit(DEV_DOWN, &chan->interface_down) &&
+ !(dev->flags&IFF_UP)){
+
+ printk(KERN_INFO "%s: Interface %s is Up.\n",
+ card->devname,dev->name);
+
+ change_dev_flags(dev,dev->flags|IFF_UP);
+ clear_bit(DEV_DOWN, &chan->interface_down);
+ check_gateway=1;
+ }
+
+ if (chan->inarp != INARP_NONE){
+ process_route(dev);
+ check_gateway=1;
+ }
+
+ if (chan->gateway && check_gateway)
+ add_gateway(card,dev);
+
+ break;
+
+ }
+
+ return;
+}
+
+/*==============================================================
+ * check_tx_status
+ *
+ * Rationale:
+ * We cannot transmit from an interrupt while
+ * the if_send is transmitting data. Therefore,
+ * we must check whether the tx buffers are
+ * begin used, before we transmit from an
+ * interrupt.
+ *
+ * Description:
+ * Checks whether it's safe to use the transmit
+ * buffers.
+ *
+ * Usage:
+ * ARP and UDP handling routines use this function
+ * because, they need to transmit data during
+ * an interrupt.
+ */
+
+static int check_tx_status(sdla_t *card, struct net_device *dev)
+{
+
+ if (card->hw.type == SDLA_S514){
+ if (test_bit(SEND_CRIT, (void*)&card->wandev.critical) ||
+ test_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical)) {
+ return 1;
+ }
+ }
+
+ if (netif_queue_stopped(dev) || (card->u.f.tx_interrupts_pending))
+ return 1;
+
+ return 0;
+}
+
+/*===============================================================
+ * move_dev_to_next
+ *
+ * Description:
+ * Move the dev pointer to the next location in the
+ * link list. Check if we are at the end of the
+ * list, if so start from the begining.
+ *
+ * Usage:
+ * Timer interrupt uses this function to efficiently
+ * step through the devices that need to send ARP data.
+ *
+ */
+
+struct net_device *move_dev_to_next(sdla_t *card, struct net_device *dev)
+{
+ if (card->wandev.new_if_cnt != 1){
+ if (!*((struct net_device **)dev->priv))
+ return card->wandev.dev;
+ else
+ return *((struct net_device **)dev->priv);
+ }
+ return dev;
+}
+
+/*==============================================================
+ * trigger_config_fr
+ *
+ * Rationale:
+ * All commands must be performed inside of a
+ * interrupt.
+ *
+ * Description:
+ * Kick the config_fr() routine throught the
+ * timer interrupt.
+ */
+
+
+static void trigger_config_fr (sdla_t *card)
+{
+ fr508_flags_t* flags = card->flags;
+
+ card->u.f.timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
+ flags->imask |= FR_INTR_TIMER;
+}
+
+
+/*==============================================================
+ * config_fr
+ *
+ * Rationale:
+ * All commands must be performed inside of a
+ * interrupt.
+ &
+ * Description:
+ * Configure a DLCI. This function is executed
+ * by a timer_interrupt. The if_open() function
+ * triggers it.
+ *
+ * Usage:
+ * new_if() collects all data necessary to
+ * configure the DLCI. It sets the chan->dlci_ready
+ * bit. When the if_open() function is executed
+ * it checks this bit, and if its set it triggers
+ * the timer interrupt to execute the config_fr()
+ * function.
+ */
+
+static void config_fr (sdla_t *card)
+{
+ struct net_device *dev;
+ fr_channel_t *chan;
+
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)) {
+
+ if ((chan=dev->priv) == NULL)
+ continue;
+
+ if (!test_bit(0,&chan->config_dlci))
+ continue;
+
+ clear_bit(0,&chan->config_dlci);
+
+ /* If signalling is set to NO, then setup
+ * DLCI addresses right away. Don't have to wait for
+ * link to connect.
+ */
+ if (card->wandev.signalling == WANOPT_NO){
+ printk(KERN_INFO "%s: Signalling set to NO: Mapping DLCI's\n",
+ card->wandev.name);
+ if (fr_init_dlci(card,chan)){
+ printk(KERN_INFO "%s: ERROR: Failed to configure DLCI %i !\n",
+ card->devname, chan->dlci);
+ return;
+ }
+ }
+
+ if (card->wandev.station == WANOPT_CPE) {
+
+ update_chan_state(dev);
+
+ /* CPE: issue full status enquiry */
+ fr_issue_isf(card, FR_ISF_FSE);
+
+ } else {
+ /* FR switch: activate DLCI(s) */
+
+ /* For Switch emulation we have to ADD and ACTIVATE
+ * the DLCI(s) that were configured with the SET_DLCI_
+ * CONFIGURATION command. Add and Activate will fail if
+ * DLCI specified is not included in the list.
+ *
+ * Also If_open is called once for each interface. But
+ * it does not get in here for all the interface. So
+ * we have to pass the entire list of DLCI(s) to add
+ * activate routines.
+ */
+
+ if (!check_dlci_config (card, chan)){
+ fr_add_dlci(card, chan->dlci);
+ fr_activate_dlci(card, chan->dlci);
+ }
+ }
+
+ card->u.f.dlci_to_dev_map[chan->dlci] = dev;
+ }
+ return;
+}
+
+
+/*==============================================================
+ * config_fr
+ *
+ * Rationale:
+ * All commands must be executed during an interrupt.
+ *
+ * Description:
+ * Trigger uncofig_fr() function through
+ * the timer interrupt.
+ *
+ */
+
+static void trigger_unconfig_fr(struct net_device *dev)
+{
+ fr_channel_t *chan = dev->priv;
+ volatile sdla_t *card = chan->card;
+ u32 timeout;
+ fr508_flags_t* flags = card->flags;
+ int reset_critical=0;
+
+ if (test_bit(PERI_CRIT,(void*)&card->wandev.critical)){
+ clear_bit(PERI_CRIT,(void*)&card->wandev.critical);
+ reset_critical=1;
+ }
+
+ /* run unconfig_dlci() function
+ * throught the timer interrupt */
+ set_bit(0,(void*)&chan->unconfig_dlci);
+ card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UNCONFIG;
+ flags->imask |= FR_INTR_TIMER;
+
+ /* Wait for the command to complete */
+ timeout = jiffies;
+ for(;;) {
+
+ if(!(card->u.f.timer_int_enabled & TMR_INT_ENABLED_UNCONFIG))
+ break;
+
+ if ((jiffies - timeout) > (1 * HZ)){
+ card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UNCONFIG;
+ printk(KERN_INFO "%s: Failed to delete DLCI %i\n",
+ card->devname,chan->dlci);
+ break;
+ }
+ }
+
+ if (reset_critical){
+ set_bit(PERI_CRIT,(void*)&card->wandev.critical);
+ }
+}
+
+/*==============================================================
+ * unconfig_fr
+ *
+ * Rationale:
+ * All commands must be executed during an interrupt.
+ *
+ * Description:
+ * Remove the dlci from firmware.
+ * This funciton is used in NODE shutdown.
+ */
+
+static void unconfig_fr (sdla_t *card)
+{
+ struct net_device *dev;
+ fr_channel_t *chan;
+
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)){
+
+ if ((chan=dev->priv) == NULL)
+ continue;
+
+ if (!test_bit(0,&chan->unconfig_dlci))
+ continue;
+
+ clear_bit(0,&chan->unconfig_dlci);
+
+ if (card->wandev.station == WANOPT_NODE){
+ printk(KERN_INFO "%s: Unconfiguring DLCI %i\n",
+ card->devname,chan->dlci);
+ fr_delete_dlci(card,chan->dlci);
+ }
+ card->u.f.dlci_to_dev_map[chan->dlci] = NULL;
+ }
+}
+
+static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
+ char op_mode)
+{
+ struct sk_buff *skb = *skb_orig;
+ fr_channel_t *chan=dev->priv;
+
+ if (op_mode == WANPIPE){
+
+ chan->fr_header[0]=Q922_UI;
+
+ switch (htons(skb->protocol)){
+
+ case ETH_P_IP:
+ chan->fr_header[1]=NLPID_IP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 2;
+ }
+
+ /* If we are in bridging mode, we must apply
+ * an Ethernet header */
+ if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){
+
+
+ /* Encapsulate the packet as a bridged Ethernet frame. */
+#ifdef DEBUG
+ printk(KERN_INFO "%s: encapsulating skb for frame relay\n",
+ dev->name);
+#endif
+
+ chan->fr_header[0] = 0x03;
+ chan->fr_header[1] = 0x00;
+ chan->fr_header[2] = 0x80;
+ chan->fr_header[3] = 0x00;
+ chan->fr_header[4] = 0x80;
+ chan->fr_header[5] = 0xC2;
+ chan->fr_header[6] = 0x00;
+ chan->fr_header[7] = 0x07;
+
+ /* Yuck. */
+ skb->protocol = ETH_P_802_3;
+ return 8;
+
+ }
+
+ return 0;
+}
+
+
+static int check_dlci_config (sdla_t *card, fr_channel_t *chan)
+{
+ fr_mbox_t* mbox = card->mbox;
+ int err=0;
+ fr_conf_t *conf=NULL;
+ unsigned short dlci_num = chan->dlci;
+ int dlci_offset=0;
+ struct net_device *dev = NULL;
+
+ mbox->cmd.command = FR_READ_CONFIG;
+ mbox->cmd.length = 0;
+ mbox->cmd.dlci = dlci_num;
+
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ if (err == CMD_OK){
+ return 0;
+ }
+
+ for (dev = card->wandev.dev; dev;
+ dev=*((struct net_device **)dev->priv))
+ set_chan_state(dev,WAN_DISCONNECTED);
+
+ printk(KERN_INFO "DLCI %i Not configured, configuring\n",dlci_num);
+
+ mbox->cmd.command = FR_COMM_DISABLE;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err != CMD_OK){
+ fr_event(card, err, mbox);
+ return 2;
+ }
+
+ printk(KERN_INFO "Disabled Communications \n");
+
+ mbox->cmd.command = FR_READ_CONFIG;
+ mbox->cmd.length = 0;
+ mbox->cmd.dlci = 0;
+
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK){
+ fr_event(card, err, mbox);
+ return 2;
+ }
+
+ conf = (fr_conf_t *)mbox->data;
+
+ dlci_offset=0;
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)) {
+ fr_channel_t *chan_tmp = dev->priv;
+ conf->dlci[dlci_offset] = chan_tmp->dlci;
+ dlci_offset++;
+ }
+
+ printk(KERN_INFO "Got Fr configuration Buffer Length is %x Dlci %i Dlci Off %i\n",
+ mbox->cmd.length,
+ mbox->cmd.length > 0x20 ? conf->dlci[0] : -1,
+ dlci_offset );
+
+ mbox->cmd.length = 0x20 + dlci_offset*2;
+
+ mbox->cmd.command = FR_SET_CONFIG;
+ mbox->cmd.dlci = 0;
+
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK){
+ fr_event(card, err, mbox);
+ return 2;
+ }
+
+ initialize_rx_tx_buffers (card);
+
+
+ printk(KERN_INFO "Configuraiton Succeded for new DLCI %i\n",dlci_num);
+
+ if (fr_comm_enable (card)){
+ return 2;
+ }
+
+ printk(KERN_INFO "Enabling Communications \n");
+
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)) {
+ fr_channel_t *chan_tmp = dev->priv;
+ fr_init_dlci(card,chan_tmp);
+ fr_add_dlci(card, chan_tmp->dlci);
+ fr_activate_dlci(card, chan_tmp->dlci);
+ }
+
+ printk(KERN_INFO "END OF CONFIGURAITON %i\n",dlci_num);
+
+ return 1;
+}
+
+static void initialize_rx_tx_buffers (sdla_t *card)
+{
+ fr_buf_info_t* buf_info;
+
+ if (card->hw.type == SDLA_S514) {
+
+ buf_info = (void*)(card->hw.dpmbase + FR_MB_VECTOR +
+ FR508_RXBC_OFFS);
+
+ card->rxmb = (void*)(buf_info->rse_next + card->hw.dpmbase);
+
+ card->u.f.rxmb_base =
+ (void*)(buf_info->rse_base + card->hw.dpmbase);
+
+ card->u.f.rxmb_last =
+ (void*)(buf_info->rse_base +
+ (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) +
+ card->hw.dpmbase);
+ }else{
+ buf_info = (void*)(card->hw.dpmbase + FR508_RXBC_OFFS);
+
+ card->rxmb = (void*)(buf_info->rse_next -
+ FR_MB_VECTOR + card->hw.dpmbase);
+
+ card->u.f.rxmb_base =
+ (void*)(buf_info->rse_base -
+ FR_MB_VECTOR + card->hw.dpmbase);
+
+ card->u.f.rxmb_last =
+ (void*)(buf_info->rse_base +
+ (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) -
+ FR_MB_VECTOR + card->hw.dpmbase);
+ }
+
+ card->u.f.rx_base = buf_info->buf_base;
+ card->u.f.rx_top = buf_info->buf_top;
+
+ card->u.f.tx_interrupts_pending = 0;
+
+ return;
+}
+
+
+
+MODULE_LICENSE("GPL");
+
+/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdla_ft1.c b/drivers/net/wan/sdla_ft1.c
new file mode 100644
index 000000000000..5e3124856eb0
--- /dev/null
+++ b/drivers/net/wan/sdla_ft1.c
@@ -0,0 +1,344 @@
+/*****************************************************************************
+* sdla_chdlc.c WANPIPE(tm) Multiprotocol WAN Link Driver. Cisco HDLC module.
+*
+* Authors: Nenad Corbic <ncorbic@sangoma.com>
+* Gideon Hack
+*
+* Copyright: (c) 1995-1999 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Sep 30, 1999 Nenad Corbic Fixed dynamic IP and route setup.
+* Sep 23, 1999 Nenad Corbic Added SMP support, fixed tracing
+* Sep 13, 1999 Nenad Corbic Split up Port 0 and 1 into separate devices.
+* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
+* Oct 30, 1998 Jaspreet Singh Added Support for CHDLC API (HDLC STREAMING).
+* Oct 28, 1998 Jaspreet Singh Added Support for Dual Port CHDLC.
+* Aug 07, 1998 David Fong Initial version.
+*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/if_arp.h> /* ARPHRD_* defines */
+
+#include <linux/inetdevice.h>
+#include <asm/uaccess.h>
+
+#include <linux/in.h> /* sockaddr_in */
+#include <linux/inet.h>
+#include <linux/if.h>
+#include <asm/byteorder.h> /* htons(), etc. */
+#include <linux/sdlapci.h>
+#include <asm/io.h>
+
+#include <linux/sdla_chdlc.h> /* CHDLC firmware API definitions */
+
+/****** Defines & Macros ****************************************************/
+
+/* reasons for enabling the timer interrupt on the adapter */
+#define TMR_INT_ENABLED_UDP 0x0001
+#define TMR_INT_ENABLED_UPDATE 0x0002
+
+#define CHDLC_DFLT_DATA_LEN 1500 /* default MTU */
+#define CHDLC_HDR_LEN 1
+
+#define IFF_POINTTOPOINT 0x10
+
+#define WANPIPE 0x00
+#define API 0x01
+#define CHDLC_API 0x01
+
+#define PORT(x) (x == 0 ? "PRIMARY" : "SECONDARY" )
+
+
+/******Data Structures*****************************************************/
+
+/* This structure is placed in the private data area of the device structure.
+ * The card structure used to occupy the private area but now the following
+ * structure will incorporate the card structure along with CHDLC specific data
+ */
+
+typedef struct chdlc_private_area
+{
+ struct net_device *slave;
+ sdla_t *card;
+ int TracingEnabled; /* For enabling Tracing */
+ unsigned long curr_trace_addr; /* Used for Tracing */
+ unsigned long start_trace_addr;
+ unsigned long end_trace_addr;
+ unsigned long base_addr_trace_buffer;
+ unsigned long end_addr_trace_buffer;
+ unsigned short number_trace_elements;
+ unsigned available_buffer_space;
+ unsigned long router_start_time;
+ unsigned char route_status;
+ unsigned char route_removed;
+ unsigned long tick_counter; /* For 5s timeout counter */
+ unsigned long router_up_time;
+ u32 IP_address; /* IP addressing */
+ u32 IP_netmask;
+ unsigned char mc; /* Mulitcast support on/off */
+ unsigned short udp_pkt_lgth; /* udp packet processing */
+ char udp_pkt_src;
+ char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
+ unsigned short timer_int_enabled;
+ char update_comms_stats; /* updating comms stats */
+ //FIXME: add driver stats as per frame relay!
+
+} chdlc_private_area_t;
+
+/* Route Status options */
+#define NO_ROUTE 0x00
+#define ADD_ROUTE 0x01
+#define ROUTE_ADDED 0x02
+#define REMOVE_ROUTE 0x03
+
+
+/****** Function Prototypes *************************************************/
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int wpft1_exec (struct sdla *card, void *u_cmd, void *u_data);
+static int chdlc_read_version (sdla_t* card, char* str);
+static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb);
+
+/****** Public Functions ****************************************************/
+
+/*============================================================================
+ * Cisco HDLC protocol initialization routine.
+ *
+ * This routine is called by the main WANPIPE module during setup. At this
+ * point adapter is completely initialized and firmware is running.
+ * o read firmware version (to make sure it's alive)
+ * o configure adapter
+ * o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return: 0 o.k.
+ * < 0 failure.
+ */
+int wpft1_init (sdla_t* card, wandev_conf_t* conf)
+{
+ unsigned char port_num;
+ int err;
+
+ union
+ {
+ char str[80];
+ } u;
+ volatile CHDLC_MAILBOX_STRUCT* mb;
+ CHDLC_MAILBOX_STRUCT* mb1;
+ unsigned long timeout;
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_CHDLC) {
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
+ return -EINVAL;
+ }
+
+ /* Use primary port */
+ card->u.c.comm_port = 0;
+
+
+ /* Initialize protocol-specific fields */
+ if(card->hw.type != SDLA_S514){
+ card->mbox = (void *) card->hw.dpmbase;
+ }else{
+ card->mbox = (void *) card->hw.dpmbase + PRI_BASE_ADDR_MB_STRUCT;
+ }
+
+ mb = mb1 = card->mbox;
+
+ if (!card->configured){
+
+ /* The board will place an 'I' in the return code to indicate that it is
+ ready to accept commands. We expect this to be completed in less
+ than 1 second. */
+
+ timeout = jiffies;
+ while (mb->return_code != 'I') /* Wait 1s for board to initialize */
+ if ((jiffies - timeout) > 1*HZ) break;
+
+ if (mb->return_code != 'I') {
+ printk(KERN_INFO
+ "%s: Initialization not completed by adapter\n",
+ card->devname);
+ printk(KERN_INFO "Please contact Sangoma representative.\n");
+ return -EIO;
+ }
+ }
+
+ /* Read firmware version. Note that when adapter initializes, it
+ * clears the mailbox, so it may appear that the first command was
+ * executed successfully when in fact it was merely erased. To work
+ * around this, we execute the first command twice.
+ */
+
+ if (chdlc_read_version(card, u.str))
+ return -EIO;
+
+ printk(KERN_INFO "%s: Running FT1 Configuration firmware v%s\n",
+ card->devname, u.str);
+
+ card->isr = NULL;
+ card->poll = NULL;
+ card->exec = &wpft1_exec;
+ card->wandev.update = NULL;
+ card->wandev.new_if = NULL;
+ card->wandev.del_if = NULL;
+ card->wandev.state = WAN_DUALPORT;
+ card->wandev.udp_port = conf->udp_port;
+
+ card->wandev.new_if_cnt = 0;
+
+ /* This is for the ports link state */
+ card->u.c.state = WAN_DISCONNECTED;
+
+ /* reset the number of times the 'update()' proc has been called */
+ card->u.c.update_call_count = 0;
+
+ card->wandev.ttl = 0x7F;
+ card->wandev.interface = 0;
+
+ card->wandev.clocking = 0;
+
+ port_num = card->u.c.comm_port;
+
+ /* Setup Port Bps */
+
+ card->wandev.bps = 0;
+
+ card->wandev.mtu = MIN_LGTH_CHDLC_DATA_CFG;
+
+ /* Set up the interrupt status area */
+ /* Read the CHDLC Configuration and obtain:
+ * Ptr to shared memory infor struct
+ * Use this pointer to calculate the value of card->u.c.flags !
+ */
+ mb1->buffer_length = 0;
+ mb1->command = READ_CHDLC_CONFIGURATION;
+ err = sdla_exec(mb1) ? mb1->return_code : CMD_TIMEOUT;
+ if(err != COMMAND_OK) {
+ chdlc_error(card, err, mb1);
+ return -EIO;
+ }
+
+ if(card->hw.type == SDLA_S514){
+ card->u.c.flags = (void *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
+ ptr_shared_mem_info_struct));
+ }else{
+ card->u.c.flags = (void *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
+ ptr_shared_mem_info_struct % SDLA_WINDOWSIZE));
+ }
+
+ card->wandev.state = WAN_FT1_READY;
+ printk(KERN_INFO "%s: FT1 Config Ready !\n",card->devname);
+
+ return 0;
+}
+
+static int wpft1_exec(sdla_t *card, void *u_cmd, void *u_data)
+{
+ CHDLC_MAILBOX_STRUCT* mbox = card->mbox;
+ int len;
+
+ if (copy_from_user((void*)&mbox->command, u_cmd, sizeof(ft1_exec_cmd_t))){
+ return -EFAULT;
+ }
+
+ len = mbox->buffer_length;
+
+ if (len) {
+ if( copy_from_user((void*)&mbox->data, u_data, len)){
+ return -EFAULT;
+ }
+ }
+
+ /* execute command */
+ if (!sdla_exec(mbox)){
+ return -EIO;
+ }
+
+ /* return result */
+ if( copy_to_user(u_cmd, (void*)&mbox->command, sizeof(ft1_exec_cmd_t))){
+ return -EFAULT;
+ }
+
+ len = mbox->buffer_length;
+
+ if (len && u_data && copy_to_user(u_data, (void*)&mbox->data, len)){
+ return -EFAULT;
+ }
+
+ return 0;
+
+}
+
+/*============================================================================
+ * Read firmware code version.
+ * Put code version as ASCII string in str.
+ */
+static int chdlc_read_version (sdla_t* card, char* str)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ int len;
+ char err;
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if(err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ }
+ else if (str) { /* is not null */
+ len = mb->buffer_length;
+ memcpy(str, mb->data, len);
+ str[len] = '\0';
+ }
+ return (err);
+}
+
+/*============================================================================
+ * Firmware error handler.
+ * This routine is called whenever firmware command returns non-zero
+ * return code.
+ *
+ * Return zero if previous command has to be cancelled.
+ */
+static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb)
+{
+ unsigned cmd = mb->command;
+
+ switch (err) {
+
+ case CMD_TIMEOUT:
+ printk(KERN_ERR "%s: command 0x%02X timed out!\n",
+ card->devname, cmd);
+ break;
+
+ case S514_BOTH_PORTS_SAME_CLK_MODE:
+ if(cmd == SET_CHDLC_CONFIGURATION) {
+ printk(KERN_INFO
+ "%s: Configure both ports for the same clock source\n",
+ card->devname);
+ break;
+ }
+
+ default:
+ printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n",
+ card->devname, cmd, err);
+ }
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/sdla_ppp.c b/drivers/net/wan/sdla_ppp.c
new file mode 100644
index 000000000000..1761cb68ab48
--- /dev/null
+++ b/drivers/net/wan/sdla_ppp.c
@@ -0,0 +1,3429 @@
+/*****************************************************************************
+* sdla_ppp.c WANPIPE(tm) Multiprotocol WAN Link Driver. PPP module.
+*
+* Author: Nenad Corbic <ncorbic@sangoma.com>
+*
+* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Feb 28, 2001 Nenad Corbic o Updated if_tx_timeout() routine for
+* 2.4.X kernels.
+* Nov 29, 2000 Nenad Corbic o Added the 2.4.x kernel support:
+* get_ip_address() function has moved
+* into the ppp_poll() routine. It cannot
+* be called from an interrupt.
+* Nov 07, 2000 Nenad Corbic o Added security features for UDP debugging:
+* Deny all and specify allowed requests.
+* May 02, 2000 Nenad Corbic o Added the dynamic interface shutdown
+* option. When the link goes down, the
+* network interface IFF_UP flag is reset.
+* Mar 06, 2000 Nenad Corbic o Bug Fix: corrupted mbox recovery.
+* Feb 25, 2000 Nenad Corbic o Fixed the FT1 UDP debugger problem.
+* Feb 09, 2000 Nenad Coribc o Shutdown bug fix. update() was called
+* with NULL dev pointer: no check.
+* Jan 24, 2000 Nenad Corbic o Disabled use of CMD complete inter.
+* Dev 15, 1999 Nenad Corbic o Fixed up header files for 2.0.X kernels
+* Oct 25, 1999 Nenad Corbic o Support for 2.0.X kernels
+* Moved dynamic route processing into
+* a polling routine.
+* Oct 07, 1999 Nenad Corbic o Support for S514 PCI card.
+* Gideon Hack o UPD and Updates executed using timer interrupt
+* Sep 10, 1999 Nenad Corbic o Fixed up the /proc statistics
+* Jul 20, 1999 Nenad Corbic o Remove the polling routines and use
+* interrupts instead.
+* Sep 17, 1998 Jaspreet Singh o Updates for 2.2.X Kernels.
+* Aug 13, 1998 Jaspreet Singh o Improved Line Tracing.
+* Jun 22, 1998 David Fong o Added remote IP address assignment
+* Mar 15, 1998 Alan Cox o 2.1.8x basic port.
+* Apr 16, 1998 Jaspreet Singh o using htons() for the IPX protocol.
+* Dec 09, 1997 Jaspreet Singh o Added PAP and CHAP.
+* o Implemented new routines like
+* ppp_set_inbnd_auth(), ppp_set_outbnd_auth(),
+* tokenize() and strstrip().
+* Nov 27, 1997 Jaspreet Singh o Added protection against enabling of irqs
+* while they have been disabled.
+* Nov 24, 1997 Jaspreet Singh o Fixed another RACE condition caused by
+* disabling and enabling of irqs.
+* o Added new counters for stats on disable/enable
+* IRQs.
+* Nov 10, 1997 Jaspreet Singh o Initialized 'skb->mac.raw' to 'skb->data'
+* before every netif_rx().
+* o Free up the device structure in del_if().
+* Nov 07, 1997 Jaspreet Singh o Changed the delay to zero for Line tracing
+* command.
+* Oct 20, 1997 Jaspreet Singh o Added hooks in for Router UP time.
+* Oct 16, 1997 Jaspreet Singh o The critical flag is used to maintain flow
+* control by avoiding RACE conditions. The
+* cli() and restore_flags() are taken out.
+* A new structure, "ppp_private_area", is added
+* to provide Driver Statistics.
+* Jul 21, 1997 Jaspreet Singh o Protected calls to sdla_peek() by adding
+* save_flags(), cli() and restore_flags().
+* Jul 07, 1997 Jaspreet Singh o Added configurable TTL for UDP packets
+* o Added ability to discard mulitcast and
+* broacast source addressed packets.
+* Jun 27, 1997 Jaspreet Singh o Added FT1 monitor capabilities
+* New case (0x25) statement in if_send routine.
+* Added a global variable rCount to keep track
+* of FT1 status enabled on the board.
+* May 22, 1997 Jaspreet Singh o Added change in the PPP_SET_CONFIG command for
+* 508 card to reflect changes in the new
+* ppp508.sfm for supporting:continous transmission
+* of Configure-Request packets without receiving a
+* reply
+* OR-ed 0x300 to conf_flags
+* o Changed connect_tmout from 900 to 0
+* May 21, 1997 Jaspreet Singh o Fixed UDP Management for multiple boards
+* Apr 25, 1997 Farhan Thawar o added UDP Management stuff
+* Mar 11, 1997 Farhan Thawar Version 3.1.1
+* o fixed (+1) bug in rx_intr()
+* o changed if_send() to return 0 if
+* wandev.critical() is true
+* o free socket buffer in if_send() if
+* returning 0
+* Jan 15, 1997 Gene Kozin Version 3.1.0
+* o implemented exec() entry point
+* Jan 06, 1997 Gene Kozin Initial version.
+*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/if_arp.h> /* ARPHRD_* defines */
+#include <asm/byteorder.h> /* htons(), etc. */
+#include <linux/in.h> /* sockaddr_in */
+
+
+#include <asm/uaccess.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+
+#include <linux/if.h>
+#include <linux/sdla_ppp.h> /* PPP firmware API definitions */
+#include <linux/sdlasfm.h> /* S514 Type Definition */
+/****** Defines & Macros ****************************************************/
+
+#define PPP_DFLT_MTU 1500 /* default MTU */
+#define PPP_MAX_MTU 4000 /* maximum MTU */
+#define PPP_HDR_LEN 1
+
+#define MAX_IP_ERRORS 100
+
+#define CONNECT_TIMEOUT (90*HZ) /* link connection timeout */
+#define HOLD_DOWN_TIME (5*HZ) /* link hold down time : Changed from 30 to 5 */
+
+/* For handle_IPXWAN() */
+#define CVHexToAscii(b) (((unsigned char)(b) > (unsigned char)9) ? ((unsigned char)'A' + ((unsigned char)(b) - (unsigned char)10)) : ((unsigned char)'0' + (unsigned char)(b)))
+
+/* Macro for enabling/disabling debugging comments */
+//#define NEX_DEBUG
+#ifdef NEX_DEBUG
+#define NEX_PRINTK(format, a...) printk(format, ## a)
+#else
+#define NEX_PRINTK(format, a...)
+#endif /* NEX_DEBUG */
+
+#define DCD(a) ( a & 0x08 ? "HIGH" : "LOW" )
+#define CTS(a) ( a & 0x20 ? "HIGH" : "LOW" )
+#define LCP(a) ( a == 0x09 ? "OPEN" : "CLOSED" )
+#define IP(a) ( a == 0x09 ? "ENABLED" : "DISABLED" )
+
+#define TMR_INT_ENABLED_UPDATE 0x01
+#define TMR_INT_ENABLED_PPP_EVENT 0x02
+#define TMR_INT_ENABLED_UDP 0x04
+#define TMR_INT_ENABLED_CONFIG 0x20
+
+/* Set Configuraton Command Definitions */
+#define PERCENT_TX_BUFF 60
+#define TIME_BETWEEN_CONF_REQ 30
+#define TIME_BETWEEN_PAP_CHAP_REQ 30
+#define WAIT_PAP_CHAP_WITHOUT_REPLY 300
+#define WAIT_AFTER_DCD_CTS_LOW 5
+#define TIME_DCD_CTS_LOW_AFTER_LNK_DOWN 10
+#define WAIT_DCD_HIGH_AFTER_ENABLE_COMM 900
+#define MAX_CONF_REQ_WITHOUT_REPLY 10
+#define MAX_TERM_REQ_WITHOUT_REPLY 2
+#define NUM_CONF_NAK_WITHOUT_REPLY 5
+#define NUM_AUTH_REQ_WITHOUT_REPLY 10
+
+#define END_OFFSET 0x1F0
+
+
+/******Data Structures*****************************************************/
+
+/* This structure is placed in the private data area of the device structure.
+ * The card structure used to occupy the private area but now the following
+ * structure will incorporate the card structure along with PPP specific data
+ */
+
+typedef struct ppp_private_area
+{
+ struct net_device *slave;
+ sdla_t* card;
+ unsigned long router_start_time; /*router start time in sec */
+ unsigned long tick_counter; /*used for 5 second counter*/
+ unsigned mc; /*multicast support on or off*/
+ unsigned char enable_IPX;
+ unsigned long network_number;
+ unsigned char pap;
+ unsigned char chap;
+ unsigned char sysname[31]; /* system name for in-bnd auth*/
+ unsigned char userid[511]; /* list of user ids */
+ unsigned char passwd[511]; /* list of passwords */
+ unsigned protocol; /* SKB Protocol */
+ u32 ip_local; /* Local IP Address */
+ u32 ip_remote; /* remote IP Address */
+
+ u32 ip_local_tmp;
+ u32 ip_remote_tmp;
+
+ unsigned char timer_int_enabled; /* Who enabled the timer inter*/
+ unsigned char update_comms_stats; /* Used by update function */
+ unsigned long curr_trace_addr; /* Trace information */
+ unsigned long start_trace_addr;
+ unsigned long end_trace_addr;
+
+ unsigned char interface_down; /* Brind down interface when channel
+ goes down */
+ unsigned long config_wait_timeout; /* After if_open() if in dynamic if mode,
+ wait a few seconds before configuring */
+
+ unsigned short udp_pkt_lgth;
+ char udp_pkt_src;
+ char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
+
+ /* PPP specific statistics */
+
+ if_send_stat_t if_send_stat;
+ rx_intr_stat_t rx_intr_stat;
+ pipe_mgmt_stat_t pipe_mgmt_stat;
+
+ unsigned long router_up_time;
+
+ /* Polling work queue entry. Each interface
+ * has its own work queue entry, which is used
+ * to defer events from the interrupt */
+ struct work_struct poll_work;
+ struct timer_list poll_delay_timer;
+
+ u8 gateway;
+ u8 config_ppp;
+ u8 ip_error;
+
+}ppp_private_area_t;
+
+/* variable for keeping track of enabling/disabling FT1 monitor status */
+static int rCount = 0;
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+/****** Function Prototypes *************************************************/
+
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int update(struct wan_device *wandev);
+static int new_if(struct wan_device *wandev, struct net_device *dev,
+ wanif_conf_t *conf);
+static int del_if(struct wan_device *wandev, struct net_device *dev);
+
+/* WANPIPE-specific entry points */
+static int wpp_exec (struct sdla *card, void *u_cmd, void *u_data);
+
+/* Network device interface */
+static int if_init(struct net_device *dev);
+static int if_open(struct net_device *dev);
+static int if_close(struct net_device *dev);
+static int if_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ void *daddr, void *saddr, unsigned len);
+
+static void if_tx_timeout(struct net_device *dev);
+
+static int if_rebuild_hdr(struct sk_buff *skb);
+static struct net_device_stats *if_stats(struct net_device *dev);
+static int if_send(struct sk_buff *skb, struct net_device *dev);
+
+
+/* PPP firmware interface functions */
+static int ppp_read_version(sdla_t *card, char *str);
+static int ppp_set_outbnd_auth(sdla_t *card, ppp_private_area_t *ppp_priv_area);
+static int ppp_set_inbnd_auth(sdla_t *card, ppp_private_area_t *ppp_priv_area);
+static int ppp_configure(sdla_t *card, void *data);
+static int ppp_set_intr_mode(sdla_t *card, unsigned char mode);
+static int ppp_comm_enable(sdla_t *card);
+static int ppp_comm_disable(sdla_t *card);
+static int ppp_comm_disable_shutdown(sdla_t *card);
+static int ppp_get_err_stats(sdla_t *card);
+static int ppp_send(sdla_t *card, void *data, unsigned len, unsigned proto);
+static int ppp_error(sdla_t *card, int err, ppp_mbox_t *mb);
+
+static void wpp_isr(sdla_t *card);
+static void rx_intr(sdla_t *card);
+static void event_intr(sdla_t *card);
+static void timer_intr(sdla_t *card);
+
+/* Background polling routines */
+static void process_route(sdla_t *card);
+static void retrigger_comm(sdla_t *card);
+
+/* Miscellaneous functions */
+static int read_info( sdla_t *card );
+static int read_connection_info (sdla_t *card);
+static void remove_route( sdla_t *card );
+static int config508(struct net_device *dev, sdla_t *card);
+static void show_disc_cause(sdla_t * card, unsigned cause);
+static int reply_udp( unsigned char *data, unsigned int mbox_len );
+static void process_udp_mgmt_pkt(sdla_t *card, struct net_device *dev,
+ ppp_private_area_t *ppp_priv_area);
+static void init_ppp_tx_rx_buff( sdla_t *card );
+static int intr_test( sdla_t *card );
+static int udp_pkt_type( struct sk_buff *skb , sdla_t *card);
+static void init_ppp_priv_struct( ppp_private_area_t *ppp_priv_area);
+static void init_global_statistics( sdla_t *card );
+static int tokenize(char *str, char **tokens);
+static char* strstrip(char *str, char *s);
+static int chk_bcast_mcast_addr(sdla_t* card, struct net_device* dev,
+ struct sk_buff *skb);
+
+static int config_ppp (sdla_t *);
+static void ppp_poll(struct net_device *dev);
+static void trigger_ppp_poll(struct net_device *dev);
+static void ppp_poll_delay (unsigned long dev_ptr);
+
+
+static int Read_connection_info;
+static int Intr_test_counter;
+static unsigned short available_buffer_space;
+
+
+/* IPX functions */
+static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number,
+ unsigned char incoming);
+static int handle_IPXWAN(unsigned char *sendpacket, char *devname, unsigned char enable_PX,
+ unsigned long network_number, unsigned short proto);
+
+/* Lock Functions */
+static void s508_lock (sdla_t *card, unsigned long *smp_flags);
+static void s508_unlock (sdla_t *card, unsigned long *smp_flags);
+
+static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, struct net_device* dev,
+ ppp_private_area_t* ppp_priv_area );
+static unsigned short calc_checksum (char *data, int len);
+static void disable_comm (sdla_t *card);
+static int detect_and_fix_tx_bug (sdla_t *card);
+
+/****** Public Functions ****************************************************/
+
+/*============================================================================
+ * PPP protocol initialization routine.
+ *
+ * This routine is called by the main WANPIPE module during setup. At this
+ * point adapter is completely initialized and firmware is running.
+ * o read firmware version (to make sure it's alive)
+ * o configure adapter
+ * o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return: 0 o.k.
+ * < 0 failure.
+ */
+int wpp_init(sdla_t *card, wandev_conf_t *conf)
+{
+ ppp_flags_t *flags;
+ union
+ {
+ char str[80];
+ } u;
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_PPP) {
+
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
+ return -EINVAL;
+
+ }
+
+ /* Initialize miscellaneous pointers to structures on the adapter */
+ switch (card->hw.type) {
+
+ case SDLA_S508:
+ card->mbox =(void*)(card->hw.dpmbase + PPP508_MB_OFFS);
+ card->flags=(void*)(card->hw.dpmbase + PPP508_FLG_OFFS);
+ break;
+
+ case SDLA_S514:
+ card->mbox =(void*)(card->hw.dpmbase + PPP514_MB_OFFS);
+ card->flags=(void*)(card->hw.dpmbase + PPP514_FLG_OFFS);
+ break;
+
+ default:
+ return -EINVAL;
+
+ }
+ flags = card->flags;
+
+ /* Read firmware version. Note that when adapter initializes, it
+ * clears the mailbox, so it may appear that the first command was
+ * executed successfully when in fact it was merely erased. To work
+ * around this, we execute the first command twice.
+ */
+ if (ppp_read_version(card, NULL) || ppp_read_version(card, u.str))
+ return -EIO;
+
+ printk(KERN_INFO "%s: running PPP firmware v%s\n",card->devname, u.str);
+ /* Adjust configuration and set defaults */
+ card->wandev.mtu = (conf->mtu) ?
+ min_t(unsigned int, conf->mtu, PPP_MAX_MTU) : PPP_DFLT_MTU;
+
+ card->wandev.bps = conf->bps;
+ card->wandev.interface = conf->interface;
+ card->wandev.clocking = conf->clocking;
+ card->wandev.station = conf->station;
+ card->isr = &wpp_isr;
+ card->poll = NULL;
+ card->exec = &wpp_exec;
+ card->wandev.update = &update;
+ card->wandev.new_if = &new_if;
+ card->wandev.del_if = &del_if;
+ card->wandev.udp_port = conf->udp_port;
+ card->wandev.ttl = conf->ttl;
+ card->wandev.state = WAN_DISCONNECTED;
+ card->disable_comm = &disable_comm;
+ card->irq_dis_if_send_count = 0;
+ card->irq_dis_poll_count = 0;
+ card->u.p.authenticator = conf->u.ppp.authenticator;
+ card->u.p.ip_mode = conf->u.ppp.ip_mode ?
+ conf->u.ppp.ip_mode : WANOPT_PPP_STATIC;
+ card->TracingEnabled = 0;
+ Read_connection_info = 1;
+
+ /* initialize global statistics */
+ init_global_statistics( card );
+
+
+
+ if (!card->configured){
+ int err;
+
+ Intr_test_counter = 0;
+ err = intr_test(card);
+
+ if(err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
+ printk("%s: Interrupt Test Failed, Counter: %i\n",
+ card->devname, Intr_test_counter);
+ printk( "%s: Please choose another interrupt\n",card->devname);
+ return -EIO;
+ }
+
+ printk(KERN_INFO "%s: Interrupt Test Passed, Counter: %i\n",
+ card->devname, Intr_test_counter);
+ card->configured = 1;
+ }
+
+ ppp_set_intr_mode(card, PPP_INTR_TIMER);
+
+ /* Turn off the transmit and timer interrupt */
+ flags->imask &= ~PPP_INTR_TIMER;
+
+ printk(KERN_INFO "\n");
+
+ return 0;
+}
+
+/******* WAN Device Driver Entry Points *************************************/
+
+/*============================================================================
+ * Update device status & statistics.
+ */
+static int update(struct wan_device *wandev)
+{
+ sdla_t* card = wandev->private;
+ struct net_device* dev;
+ volatile ppp_private_area_t *ppp_priv_area;
+ ppp_flags_t *flags = card->flags;
+ unsigned long timeout;
+
+ /* sanity checks */
+ if ((wandev == NULL) || (wandev->private == NULL))
+ return -EFAULT;
+
+ if (wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ /* Shutdown bug fix. This function can be
+ * called with NULL dev pointer during
+ * shutdown
+ */
+ if ((dev=card->wandev.dev) == NULL){
+ return -ENODEV;
+ }
+
+ if ((ppp_priv_area=dev->priv) == NULL){
+ return -ENODEV;
+ }
+
+ ppp_priv_area->update_comms_stats = 2;
+ ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_UPDATE;
+ flags->imask |= PPP_INTR_TIMER;
+
+ /* wait a maximum of 1 second for the statistics to be updated */
+ timeout = jiffies;
+ for(;;) {
+ if(ppp_priv_area->update_comms_stats == 0){
+ break;
+ }
+ if ((jiffies - timeout) > (1 * HZ)){
+ ppp_priv_area->update_comms_stats = 0;
+ ppp_priv_area->timer_int_enabled &=
+ ~TMR_INT_ENABLED_UPDATE;
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+/*============================================================================
+ * Create new logical channel.
+ * This routine is called by the router when ROUTER_IFNEW IOCTL is being
+ * handled.
+ * o parse media- and hardware-specific configuration
+ * o make sure that a new channel can be created
+ * o allocate resources, if necessary
+ * o prepare network device structure for registaration.
+ *
+ * Return: 0 o.k.
+ * < 0 failure (channel will not be created)
+ */
+static int new_if(struct wan_device *wandev, struct net_device *dev,
+ wanif_conf_t *conf)
+{
+ sdla_t *card = wandev->private;
+ ppp_private_area_t *ppp_priv_area;
+
+ if (wandev->ndev)
+ return -EEXIST;
+
+
+ printk(KERN_INFO "%s: Configuring Interface: %s\n",
+ card->devname, conf->name);
+
+ if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
+
+ printk(KERN_INFO "%s: Invalid interface name!\n",
+ card->devname);
+ return -EINVAL;
+
+ }
+
+ /* allocate and initialize private data */
+ ppp_priv_area = kmalloc(sizeof(ppp_private_area_t), GFP_KERNEL);
+
+ if( ppp_priv_area == NULL )
+ return -ENOMEM;
+
+ memset(ppp_priv_area, 0, sizeof(ppp_private_area_t));
+
+ ppp_priv_area->card = card;
+
+ /* initialize data */
+ strcpy(card->u.p.if_name, conf->name);
+
+ /* initialize data in ppp_private_area structure */
+
+ init_ppp_priv_struct( ppp_priv_area );
+
+ ppp_priv_area->mc = conf->mc;
+ ppp_priv_area->pap = conf->pap;
+ ppp_priv_area->chap = conf->chap;
+
+ /* Option to bring down the interface when
+ * the link goes down */
+ if (conf->if_down){
+ set_bit(DYN_OPT_ON,&ppp_priv_area->interface_down);
+ printk("%s: Dynamic interface configuration enabled\n",
+ card->devname);
+ }
+
+ /* If no user ids are specified */
+ if(!strlen(conf->userid) && (ppp_priv_area->pap||ppp_priv_area->chap)){
+ kfree(ppp_priv_area);
+ return -EINVAL;
+ }
+
+ /* If no passwords are specified */
+ if(!strlen(conf->passwd) && (ppp_priv_area->pap||ppp_priv_area->chap)){
+ kfree(ppp_priv_area);
+ return -EINVAL;
+ }
+
+ if(strlen(conf->sysname) > 31){
+ kfree(ppp_priv_area);
+ return -EINVAL;
+ }
+
+ /* If no system name is specified */
+ if(!strlen(conf->sysname) && (card->u.p.authenticator)){
+ kfree(ppp_priv_area);
+ return -EINVAL;
+ }
+
+ /* copy the data into the ppp private structure */
+ memcpy(ppp_priv_area->userid, conf->userid, strlen(conf->userid));
+ memcpy(ppp_priv_area->passwd, conf->passwd, strlen(conf->passwd));
+ memcpy(ppp_priv_area->sysname, conf->sysname, strlen(conf->sysname));
+
+
+ ppp_priv_area->enable_IPX = conf->enable_IPX;
+ if (conf->network_number){
+ ppp_priv_area->network_number = conf->network_number;
+ }else{
+ ppp_priv_area->network_number = 0xDEADBEEF;
+ }
+
+ /* Tells us that if this interface is a
+ * gateway or not */
+ if ((ppp_priv_area->gateway = conf->gateway) == WANOPT_YES){
+ printk(KERN_INFO "%s: Interface %s is set as a gateway.\n",
+ card->devname,card->u.p.if_name);
+ }
+
+ /* prepare network device data space for registration */
+ strcpy(dev->name,card->u.p.if_name);
+
+ dev->init = &if_init;
+ dev->priv = ppp_priv_area;
+ dev->mtu = min_t(unsigned int, dev->mtu, card->wandev.mtu);
+
+ /* Initialize the polling work routine */
+ INIT_WORK(&ppp_priv_area->poll_work, (void*)(void*)ppp_poll, dev);
+
+ /* Initialize the polling delay timer */
+ init_timer(&ppp_priv_area->poll_delay_timer);
+ ppp_priv_area->poll_delay_timer.data = (unsigned long)dev;
+ ppp_priv_area->poll_delay_timer.function = ppp_poll_delay;
+
+
+ /* Since we start with dummy IP addresses we can say
+ * that route exists */
+ printk(KERN_INFO "\n");
+
+ return 0;
+}
+
+/*============================================================================
+ * Delete logical channel.
+ */
+static int del_if(struct wan_device *wandev, struct net_device *dev)
+{
+ return 0;
+}
+
+static void disable_comm (sdla_t *card)
+{
+ ppp_comm_disable_shutdown(card);
+ return;
+}
+
+/****** WANPIPE-specific entry points ***************************************/
+
+/*============================================================================
+ * Execute adapter interface command.
+ */
+
+//FIXME: Why do we need this ????
+static int wpp_exec(struct sdla *card, void *u_cmd, void *u_data)
+{
+ ppp_mbox_t *mbox = card->mbox;
+ int len;
+
+ if (copy_from_user((void*)&mbox->cmd, u_cmd, sizeof(ppp_cmd_t)))
+ return -EFAULT;
+
+ len = mbox->cmd.length;
+
+ if (len) {
+
+ if( copy_from_user((void*)&mbox->data, u_data, len))
+ return -EFAULT;
+
+ }
+
+ /* execute command */
+ if (!sdla_exec(mbox))
+ return -EIO;
+
+ /* return result */
+ if( copy_to_user(u_cmd, (void*)&mbox->cmd, sizeof(ppp_cmd_t)))
+ return -EFAULT;
+ len = mbox->cmd.length;
+
+ if (len && u_data && copy_to_user(u_data, (void*)&mbox->data, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/****** Network Device Interface ********************************************/
+
+/*============================================================================
+ * Initialize Linux network interface.
+ *
+ * This routine is called only once for each interface, during Linux network
+ * interface registration. Returning anything but zero will fail interface
+ * registration.
+ */
+static int if_init(struct net_device *dev)
+{
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ sdla_t *card = ppp_priv_area->card;
+ struct wan_device *wandev = &card->wandev;
+
+ /* Initialize device driver entry points */
+ dev->open = &if_open;
+ dev->stop = &if_close;
+ dev->hard_header = &if_header;
+ dev->rebuild_header = &if_rebuild_hdr;
+ dev->hard_start_xmit = &if_send;
+ dev->get_stats = &if_stats;
+ dev->tx_timeout = &if_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /* Initialize media-specific parameters */
+ dev->type = ARPHRD_PPP; /* ARP h/w type */
+ dev->flags |= IFF_POINTOPOINT;
+ dev->flags |= IFF_NOARP;
+
+ /* Enable Mulitcasting if specified by user*/
+ if (ppp_priv_area->mc == WANOPT_YES){
+ dev->flags |= IFF_MULTICAST;
+ }
+
+ dev->mtu = wandev->mtu;
+ dev->hard_header_len = PPP_HDR_LEN; /* media header length */
+
+ /* Initialize hardware parameters (just for reference) */
+ dev->irq = wandev->irq;
+ dev->dma = wandev->dma;
+ dev->base_addr = wandev->ioport;
+ dev->mem_start = wandev->maddr;
+ dev->mem_end = wandev->maddr + wandev->msize - 1;
+
+ /* Set transmit buffer queue length */
+ dev->tx_queue_len = 100;
+ SET_MODULE_OWNER(dev);
+
+ return 0;
+}
+
+/*============================================================================
+ * Open network interface.
+ * o enable communications and interrupts.
+ * o prevent module from unloading by incrementing use count
+ *
+ * Return 0 if O.k. or errno.
+ */
+static int if_open(struct net_device *dev)
+{
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ sdla_t *card = ppp_priv_area->card;
+ struct timeval tv;
+ //unsigned long smp_flags;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ wanpipe_open(card);
+
+ netif_start_queue(dev);
+
+ do_gettimeofday( &tv );
+ ppp_priv_area->router_start_time = tv.tv_sec;
+
+ /* We cannot configure the card here because we don't
+ * have access to the interface IP addresses.
+ * Once the interface initilization is complete, we will be
+ * able to access the IP addresses. Therefore,
+ * configure the ppp link in the poll routine */
+ set_bit(0,&ppp_priv_area->config_ppp);
+ ppp_priv_area->config_wait_timeout=jiffies;
+
+ /* Start the PPP configuration after 1sec delay.
+ * This will give the interface initilization time
+ * to finish its configuration */
+ mod_timer(&ppp_priv_area->poll_delay_timer, jiffies + HZ);
+ return 0;
+}
+
+/*============================================================================
+ * Close network interface.
+ * o if this is the last open, then disable communications and interrupts.
+ * o reset flags.
+ */
+static int if_close(struct net_device *dev)
+{
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ sdla_t *card = ppp_priv_area->card;
+
+ netif_stop_queue(dev);
+ wanpipe_close(card);
+
+ del_timer (&ppp_priv_area->poll_delay_timer);
+ return 0;
+}
+
+/*============================================================================
+ * Build media header.
+ *
+ * The trick here is to put packet type (Ethertype) into 'protocol' field of
+ * the socket buffer, so that we don't forget it. If packet type is not
+ * supported, set skb->protocol to 0 and discard packet later.
+ *
+ * Return: media header length.
+ */
+static int if_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+ switch (type)
+ {
+ case ETH_P_IP:
+ case ETH_P_IPX:
+ skb->protocol = htons(type);
+ break;
+
+ default:
+ skb->protocol = 0;
+ }
+
+ return PPP_HDR_LEN;
+}
+
+/*============================================================================
+ * Re-build media header.
+ *
+ * Return: 1 physical address resolved.
+ * 0 physical address not resolved
+ */
+static int if_rebuild_hdr (struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ sdla_t *card = ppp_priv_area->card;
+
+ printk(KERN_INFO "%s: rebuild_header() called for interface %s!\n",
+ card->devname, dev->name);
+ return 1;
+}
+
+/*============================================================================
+ * Handle transmit timeout event from netif watchdog
+ */
+static void if_tx_timeout(struct net_device *dev)
+{
+ ppp_private_area_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+
+ /* If our device stays busy for at least 5 seconds then we will
+ * kick start the device by making dev->tbusy = 0. We expect
+ * that our device never stays busy more than 5 seconds. So this
+ * is only used as a last resort.
+ */
+
+ ++ chan->if_send_stat.if_send_tbusy;
+ ++card->wandev.stats.collisions;
+
+ printk (KERN_INFO "%s: Transmit timed out on %s\n", card->devname,dev->name);
+ ++chan->if_send_stat.if_send_tbusy_timeout;
+ netif_wake_queue (dev);
+}
+
+
+
+/*============================================================================
+ * Send a packet on a network interface.
+ * o set tbusy flag (marks start of the transmission) to block a timer-based
+ * transmit from overlapping.
+ * o check link state. If link is not up, then drop the packet.
+ * o execute adapter send command.
+ * o free socket buffer
+ *
+ * Return: 0 complete (socket buffer must be freed)
+ * non-0 packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ * bottom half" (with interrupts enabled).
+ * 2. Setting tbusy flag will inhibit further transmit requests from the
+ * protocol stack and can be used for flow control with protocol layer.
+ */
+static int if_send (struct sk_buff *skb, struct net_device *dev)
+{
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ sdla_t *card = ppp_priv_area->card;
+ unsigned char *sendpacket;
+ unsigned long smp_flags;
+ ppp_flags_t *flags = card->flags;
+ int udp_type;
+ int err=0;
+
+ ++ppp_priv_area->if_send_stat.if_send_entry;
+
+ netif_stop_queue(dev);
+
+ if (skb == NULL) {
+
+ /* If we get here, some higher layer thinks we've missed an
+ * tx-done interrupt.
+ */
+ printk(KERN_INFO "%s: interface %s got kicked!\n",
+ card->devname, dev->name);
+
+ ++ppp_priv_area->if_send_stat.if_send_skb_null;
+
+ netif_wake_queue(dev);
+ return 0;
+ }
+
+ sendpacket = skb->data;
+
+ udp_type = udp_pkt_type( skb, card );
+
+
+ if (udp_type == UDP_PTPIPE_TYPE){
+ if(store_udp_mgmt_pkt(UDP_PKT_FRM_STACK, card, skb, dev,
+ ppp_priv_area)){
+ flags->imask |= PPP_INTR_TIMER;
+ }
+ ++ppp_priv_area->if_send_stat.if_send_PIPE_request;
+ netif_start_queue(dev);
+ return 0;
+ }
+
+ /* Check for broadcast and multicast addresses
+ * If found, drop (deallocate) a packet and return.
+ */
+ if(chk_bcast_mcast_addr(card, dev, skb)){
+ ++card->wandev.stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ netif_start_queue(dev);
+ return 0;
+ }
+
+
+ if(card->hw.type != SDLA_S514){
+ s508_lock(card,&smp_flags);
+ }
+
+ if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
+
+ printk(KERN_INFO "%s: Critical in if_send: %lx\n",
+ card->wandev.name,card->wandev.critical);
+
+ ++card->wandev.stats.tx_dropped;
+ ++ppp_priv_area->if_send_stat.if_send_critical_non_ISR;
+ netif_start_queue(dev);
+ goto if_send_exit_crit;
+ }
+
+ if (card->wandev.state != WAN_CONNECTED) {
+
+ ++ppp_priv_area->if_send_stat.if_send_wan_disconnected;
+ ++card->wandev.stats.tx_dropped;
+ netif_start_queue(dev);
+
+ } else if (!skb->protocol) {
+ ++ppp_priv_area->if_send_stat.if_send_protocol_error;
+ ++card->wandev.stats.tx_errors;
+ netif_start_queue(dev);
+
+ } else {
+
+ /*If it's IPX change the network numbers to 0 if they're ours.*/
+ if( skb->protocol == htons(ETH_P_IPX) ) {
+ if(ppp_priv_area->enable_IPX) {
+ switch_net_numbers( skb->data,
+ ppp_priv_area->network_number, 0);
+ } else {
+ ++card->wandev.stats.tx_dropped;
+ netif_start_queue(dev);
+ goto if_send_exit_crit;
+ }
+ }
+
+ if (ppp_send(card, skb->data, skb->len, skb->protocol)) {
+ netif_stop_queue(dev);
+ ++ppp_priv_area->if_send_stat.if_send_adptr_bfrs_full;
+ ++ppp_priv_area->if_send_stat.if_send_tx_int_enabled;
+ } else {
+ ++ppp_priv_area->if_send_stat.if_send_bfr_passed_to_adptr;
+ ++card->wandev.stats.tx_packets;
+ card->wandev.stats.tx_bytes += skb->len;
+ netif_start_queue(dev);
+ dev->trans_start = jiffies;
+ }
+ }
+
+if_send_exit_crit:
+
+ if (!(err=netif_queue_stopped(dev))){
+ dev_kfree_skb_any(skb);
+ }else{
+ ppp_priv_area->tick_counter = jiffies;
+ flags->imask |= PPP_INTR_TXRDY; /* unmask Tx interrupts */
+ }
+
+ clear_bit(SEND_CRIT,&card->wandev.critical);
+ if(card->hw.type != SDLA_S514){
+ s508_unlock(card,&smp_flags);
+ }
+
+ return err;
+}
+
+
+/*=============================================================================
+ * Store a UDP management packet for later processing.
+ */
+
+static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, struct net_device* dev,
+ ppp_private_area_t* ppp_priv_area )
+{
+ int udp_pkt_stored = 0;
+
+ if(!ppp_priv_area->udp_pkt_lgth && (skb->len<=MAX_LGTH_UDP_MGNT_PKT)){
+ ppp_priv_area->udp_pkt_lgth = skb->len;
+ ppp_priv_area->udp_pkt_src = udp_pkt_src;
+ memcpy(ppp_priv_area->udp_pkt_data, skb->data, skb->len);
+ ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_UDP;
+ ppp_priv_area->protocol = skb->protocol;
+ udp_pkt_stored = 1;
+ }else{
+ if (skb->len > MAX_LGTH_UDP_MGNT_PKT){
+ printk(KERN_INFO "%s: PIPEMON UDP request too long : %i\n",
+ card->devname, skb->len);
+ }else{
+ printk(KERN_INFO "%s: PIPEMON UPD request already pending\n",
+ card->devname);
+ }
+ ppp_priv_area->udp_pkt_lgth = 0;
+ }
+
+ if(udp_pkt_src == UDP_PKT_FRM_STACK){
+ dev_kfree_skb_any(skb);
+ }else{
+ dev_kfree_skb_any(skb);
+ }
+
+ return(udp_pkt_stored);
+}
+
+
+
+/*============================================================================
+ * Reply to UDP Management system.
+ * Return length of reply.
+ */
+static int reply_udp( unsigned char *data, unsigned int mbox_len )
+{
+ unsigned short len, udp_length, temp, ip_length;
+ unsigned long ip_temp;
+ int even_bound = 0;
+ ppp_udp_pkt_t *p_udp_pkt = (ppp_udp_pkt_t *)data;
+
+ /* Set length of packet */
+ len = sizeof(ip_pkt_t)+
+ sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ mbox_len;
+
+ /* fill in UDP reply */
+ p_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
+
+ /* fill in UDP length */
+ udp_length = sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ mbox_len;
+
+
+ /* put it on an even boundary */
+ if ( udp_length & 0x0001 ) {
+ udp_length += 1;
+ len += 1;
+ even_bound=1;
+ }
+
+ temp = (udp_length<<8)|(udp_length>>8);
+ p_udp_pkt->udp_pkt.udp_length = temp;
+
+
+ /* swap UDP ports */
+ temp = p_udp_pkt->udp_pkt.udp_src_port;
+ p_udp_pkt->udp_pkt.udp_src_port =
+ p_udp_pkt->udp_pkt.udp_dst_port;
+ p_udp_pkt->udp_pkt.udp_dst_port = temp;
+
+
+ /* add UDP pseudo header */
+ temp = 0x1100;
+ *((unsigned short *)(p_udp_pkt->data+mbox_len+even_bound)) = temp;
+ temp = (udp_length<<8)|(udp_length>>8);
+ *((unsigned short *)(p_udp_pkt->data+mbox_len+even_bound+2)) = temp;
+
+ /* calculate UDP checksum */
+ p_udp_pkt->udp_pkt.udp_checksum = 0;
+ p_udp_pkt->udp_pkt.udp_checksum =
+ calc_checksum(&data[UDP_OFFSET],udp_length+UDP_OFFSET);
+
+ /* fill in IP length */
+ ip_length = udp_length + sizeof(ip_pkt_t);
+ temp = (ip_length<<8)|(ip_length>>8);
+ p_udp_pkt->ip_pkt.total_length = temp;
+
+ /* swap IP addresses */
+ ip_temp = p_udp_pkt->ip_pkt.ip_src_address;
+ p_udp_pkt->ip_pkt.ip_src_address = p_udp_pkt->ip_pkt.ip_dst_address;
+ p_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
+
+ /* fill in IP checksum */
+ p_udp_pkt->ip_pkt.hdr_checksum = 0;
+ p_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data,sizeof(ip_pkt_t));
+
+ return len;
+
+} /* reply_udp */
+
+unsigned short calc_checksum (char *data, int len)
+{
+ unsigned short temp;
+ unsigned long sum=0;
+ int i;
+
+ for( i = 0; i <len; i+=2 ) {
+ memcpy(&temp,&data[i],2);
+ sum += (unsigned long)temp;
+ }
+
+ while (sum >> 16 ) {
+ sum = (sum & 0xffffUL) + (sum >> 16);
+ }
+
+ temp = (unsigned short)sum;
+ temp = ~temp;
+
+ if( temp == 0 )
+ temp = 0xffff;
+
+ return temp;
+}
+
+/*
+ If incoming is 0 (outgoing)- if the net numbers is ours make it 0
+ if incoming is 1 - if the net number is 0 make it ours
+
+*/
+static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number, unsigned char incoming)
+{
+ unsigned long pnetwork_number;
+
+ pnetwork_number = (unsigned long)((sendpacket[6] << 24) +
+ (sendpacket[7] << 16) + (sendpacket[8] << 8) +
+ sendpacket[9]);
+
+ if (!incoming) {
+ //If the destination network number is ours, make it 0
+ if( pnetwork_number == network_number) {
+ sendpacket[6] = sendpacket[7] = sendpacket[8] =
+ sendpacket[9] = 0x00;
+ }
+ } else {
+ //If the incoming network is 0, make it ours
+ if( pnetwork_number == 0) {
+ sendpacket[6] = (unsigned char)(network_number >> 24);
+ sendpacket[7] = (unsigned char)((network_number &
+ 0x00FF0000) >> 16);
+ sendpacket[8] = (unsigned char)((network_number &
+ 0x0000FF00) >> 8);
+ sendpacket[9] = (unsigned char)(network_number &
+ 0x000000FF);
+ }
+ }
+
+
+ pnetwork_number = (unsigned long)((sendpacket[18] << 24) +
+ (sendpacket[19] << 16) + (sendpacket[20] << 8) +
+ sendpacket[21]);
+
+ if( !incoming ) {
+ //If the source network is ours, make it 0
+ if( pnetwork_number == network_number) {
+ sendpacket[18] = sendpacket[19] = sendpacket[20] =
+ sendpacket[21] = 0x00;
+ }
+ } else {
+ //If the source network is 0, make it ours
+ if( pnetwork_number == 0 ) {
+ sendpacket[18] = (unsigned char)(network_number >> 24);
+ sendpacket[19] = (unsigned char)((network_number &
+ 0x00FF0000) >> 16);
+ sendpacket[20] = (unsigned char)((network_number &
+ 0x0000FF00) >> 8);
+ sendpacket[21] = (unsigned char)(network_number &
+ 0x000000FF);
+ }
+ }
+} /* switch_net_numbers */
+
+/*============================================================================
+ * Get ethernet-style interface statistics.
+ * Return a pointer to struct net_device_stats.
+ */
+static struct net_device_stats *if_stats(struct net_device *dev)
+{
+
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ sdla_t* card;
+
+ if( ppp_priv_area == NULL )
+ return NULL;
+
+ card = ppp_priv_area->card;
+ return &card->wandev.stats;
+}
+
+/****** PPP Firmware Interface Functions ************************************/
+
+/*============================================================================
+ * Read firmware code version.
+ * Put code version as ASCII string in str.
+ */
+static int ppp_read_version(sdla_t *card, char *str)
+{
+ ppp_mbox_t *mb = card->mbox;
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ mb->cmd.command = PPP_READ_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK)
+
+ ppp_error(card, err, mb);
+
+ else if (str) {
+
+ int len = mb->cmd.length;
+
+ memcpy(str, mb->data, len);
+ str[len] = '\0';
+
+ }
+
+ return err;
+}
+/*===========================================================================
+ * Set Out-Bound Authentication.
+*/
+static int ppp_set_outbnd_auth (sdla_t *card, ppp_private_area_t *ppp_priv_area)
+{
+ ppp_mbox_t *mb = card->mbox;
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ memset(&mb->data, 0, (strlen(ppp_priv_area->userid) +
+ strlen(ppp_priv_area->passwd) + 2 ) );
+ memcpy(mb->data, ppp_priv_area->userid, strlen(ppp_priv_area->userid));
+ memcpy((mb->data + strlen(ppp_priv_area->userid) + 1),
+ ppp_priv_area->passwd, strlen(ppp_priv_area->passwd));
+
+ mb->cmd.length = strlen(ppp_priv_area->userid) +
+ strlen(ppp_priv_area->passwd) + 2 ;
+
+ mb->cmd.command = PPP_SET_OUTBOUND_AUTH;
+
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK)
+ ppp_error(card, err, mb);
+
+ return err;
+}
+
+/*===========================================================================
+ * Set In-Bound Authentication.
+*/
+static int ppp_set_inbnd_auth (sdla_t *card, ppp_private_area_t *ppp_priv_area)
+{
+ ppp_mbox_t *mb = card->mbox;
+ int err, i;
+ char* user_tokens[32];
+ char* pass_tokens[32];
+ int userids, passwds;
+ int add_ptr;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ memset(&mb->data, 0, 1008);
+ memcpy(mb->data, ppp_priv_area->sysname,
+ strlen(ppp_priv_area->sysname));
+
+ /* Parse the userid string and the password string and build a string
+ to copy it to the data area of the command structure. The string
+ will look like "SYS_NAME<NULL>USER1<NULL>PASS1<NULL>USER2<NULL>PASS2
+ ....<NULL> "
+ */
+ userids = tokenize( ppp_priv_area->userid, user_tokens);
+ passwds = tokenize( ppp_priv_area->passwd, pass_tokens);
+
+ if (userids != passwds){
+ printk(KERN_INFO "%s: Number of passwords does not equal the number of user ids\n", card->devname);
+ return 1;
+ }
+
+ add_ptr = strlen(ppp_priv_area->sysname) + 1;
+ for (i=0; i<userids; i++){
+ memcpy((mb->data + add_ptr), user_tokens[i],
+ strlen(user_tokens[i]));
+ memcpy((mb->data + add_ptr + strlen(user_tokens[i]) + 1),
+ pass_tokens[i], strlen(pass_tokens[i]));
+ add_ptr = add_ptr + strlen(user_tokens[i]) + 1 +
+ strlen(pass_tokens[i]) + 1;
+ }
+
+ mb->cmd.length = add_ptr + 1;
+ mb->cmd.command = PPP_SET_INBOUND_AUTH;
+
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK)
+ ppp_error(card, err, mb);
+
+ return err;
+}
+
+
+/*============================================================================
+ * Tokenize string.
+ * Parse a string of the following syntax:
+ * <arg1>,<arg2>,...
+ * and fill array of tokens with pointers to string elements.
+ *
+ */
+static int tokenize (char *str, char **tokens)
+{
+ int cnt = 0;
+
+ tokens[0] = strsep(&str, "/");
+ while (tokens[cnt] && (cnt < 32 - 1))
+ {
+ tokens[cnt] = strstrip(tokens[cnt], " \t");
+ tokens[++cnt] = strsep(&str, "/");
+ }
+ return cnt;
+}
+
+/*============================================================================
+ * Strip leading and trailing spaces off the string str.
+ */
+static char* strstrip (char *str, char* s)
+{
+ char *eos = str + strlen(str); /* -> end of string */
+
+ while (*str && strchr(s, *str))
+ ++str /* strip leading spaces */
+ ;
+ while ((eos > str) && strchr(s, *(eos - 1)))
+ --eos /* strip trailing spaces */
+ ;
+ *eos = '\0';
+ return str;
+}
+/*============================================================================
+ * Configure PPP firmware.
+ */
+static int ppp_configure(sdla_t *card, void *data)
+{
+ ppp_mbox_t *mb = card->mbox;
+ int data_len = sizeof(ppp508_conf_t);
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ memcpy(mb->data, data, data_len);
+ mb->cmd.length = data_len;
+ mb->cmd.command = PPP_SET_CONFIG;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK)
+ ppp_error(card, err, mb);
+
+ return err;
+}
+
+/*============================================================================
+ * Set interrupt mode.
+ */
+static int ppp_set_intr_mode(sdla_t *card, unsigned char mode)
+{
+ ppp_mbox_t *mb = card->mbox;
+ ppp_intr_info_t *ppp_intr_data = (ppp_intr_info_t *) &mb->data[0];
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ ppp_intr_data->i_enable = mode;
+
+ ppp_intr_data->irq = card->hw.irq;
+ mb->cmd.length = 2;
+
+ /* If timer has been enabled, set the timer delay to 1sec */
+ if (mode & 0x80){
+ ppp_intr_data->timer_len = 250; //5;//100; //250;
+ mb->cmd.length = 4;
+ }
+
+ mb->cmd.command = PPP_SET_INTR_FLAGS;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK)
+ ppp_error(card, err, mb);
+
+
+ return err;
+}
+
+/*============================================================================
+ * Enable communications.
+ */
+static int ppp_comm_enable(sdla_t *card)
+{
+ ppp_mbox_t *mb = card->mbox;
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ mb->cmd.command = PPP_COMM_ENABLE;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK)
+ ppp_error(card, err, mb);
+ else
+ card->u.p.comm_enabled = 1;
+
+ return err;
+}
+
+/*============================================================================
+ * Disable communications.
+ */
+static int ppp_comm_disable(sdla_t *card)
+{
+ ppp_mbox_t *mb = card->mbox;
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ mb->cmd.command = PPP_COMM_DISABLE;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+ if (err != CMD_OK)
+ ppp_error(card, err, mb);
+ else
+ card->u.p.comm_enabled = 0;
+
+ return err;
+}
+
+static int ppp_comm_disable_shutdown(sdla_t *card)
+{
+ ppp_mbox_t *mb = card->mbox;
+ ppp_intr_info_t *ppp_intr_data;
+ int err;
+
+ if (!mb){
+ return 1;
+ }
+
+ ppp_intr_data = (ppp_intr_info_t *) &mb->data[0];
+
+ /* Disable all interrupts */
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ ppp_intr_data->i_enable = 0;
+
+ ppp_intr_data->irq = card->hw.irq;
+ mb->cmd.length = 2;
+
+ mb->cmd.command = PPP_SET_INTR_FLAGS;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ /* Disable communicatinons */
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ mb->cmd.command = PPP_COMM_DISABLE;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ card->u.p.comm_enabled = 0;
+
+ return 0;
+}
+
+
+
+/*============================================================================
+ * Get communications error statistics.
+ */
+static int ppp_get_err_stats(sdla_t *card)
+{
+ ppp_mbox_t *mb = card->mbox;
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ mb->cmd.command = PPP_READ_ERROR_STATS;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err == CMD_OK) {
+
+ ppp_err_stats_t* stats = (void*)mb->data;
+ card->wandev.stats.rx_over_errors = stats->rx_overrun;
+ card->wandev.stats.rx_crc_errors = stats->rx_bad_crc;
+ card->wandev.stats.rx_missed_errors = stats->rx_abort;
+ card->wandev.stats.rx_length_errors = stats->rx_lost;
+ card->wandev.stats.tx_aborted_errors = stats->tx_abort;
+
+ } else
+ ppp_error(card, err, mb);
+
+ return err;
+}
+
+/*============================================================================
+ * Send packet.
+ * Return: 0 - o.k.
+ * 1 - no transmit buffers available
+ */
+static int ppp_send (sdla_t *card, void *data, unsigned len, unsigned proto)
+{
+ ppp_buf_ctl_t *txbuf = card->u.p.txbuf;
+
+ if (txbuf->flag)
+ return 1;
+
+ sdla_poke(&card->hw, txbuf->buf.ptr, data, len);
+
+ txbuf->length = len; /* frame length */
+
+ if (proto == htons(ETH_P_IPX))
+ txbuf->proto = 0x01; /* protocol ID */
+ else
+ txbuf->proto = 0x00; /* protocol ID */
+
+ txbuf->flag = 1; /* start transmission */
+
+ /* Update transmit buffer control fields */
+ card->u.p.txbuf = ++txbuf;
+
+ if ((void*)txbuf > card->u.p.txbuf_last)
+ card->u.p.txbuf = card->u.p.txbuf_base;
+
+ return 0;
+}
+
+/****** Firmware Error Handler **********************************************/
+
+/*============================================================================
+ * Firmware error handler.
+ * This routine is called whenever firmware command returns non-zero
+ * return code.
+ *
+ * Return zero if previous command has to be cancelled.
+ */
+static int ppp_error(sdla_t *card, int err, ppp_mbox_t *mb)
+{
+ unsigned cmd = mb->cmd.command;
+
+ switch (err) {
+
+ case CMD_TIMEOUT:
+ printk(KERN_ERR "%s: command 0x%02X timed out!\n",
+ card->devname, cmd);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n"
+ , card->devname, cmd, err);
+ }
+
+ return 0;
+}
+
+/****** Interrupt Handlers **************************************************/
+
+/*============================================================================
+ * PPP interrupt service routine.
+ */
+static void wpp_isr (sdla_t *card)
+{
+ ppp_flags_t *flags = card->flags;
+ char *ptr = &flags->iflag;
+ struct net_device *dev = card->wandev.dev;
+ int i;
+
+ card->in_isr = 1;
+ ++card->statistics.isr_entry;
+
+ if (!dev && flags->iflag != PPP_INTR_CMD){
+ card->in_isr = 0;
+ flags->iflag = 0;
+ return;
+ }
+
+ if (test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
+ card->in_isr = 0;
+ flags->iflag = 0;
+ return;
+ }
+
+
+ if(card->hw.type != SDLA_S514){
+ if (test_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
+ ++card->statistics.isr_already_critical;
+ printk (KERN_INFO "%s: Critical while in ISR!\n",
+ card->devname);
+ card->in_isr = 0;
+ flags->iflag = 0;
+ return;
+ }
+ }
+
+ switch (flags->iflag) {
+
+ case PPP_INTR_RXRDY: /* receive interrupt 0x01 (bit 0)*/
+ ++card->statistics.isr_rx;
+ rx_intr(card);
+ break;
+
+ case PPP_INTR_TXRDY: /* transmit interrupt 0x02 (bit 1)*/
+ ++card->statistics.isr_tx;
+ flags->imask &= ~PPP_INTR_TXRDY;
+ netif_wake_queue(dev);
+ break;
+
+ case PPP_INTR_CMD: /* interface command completed */
+ ++Intr_test_counter;
+ ++card->statistics.isr_intr_test;
+ break;
+
+ case PPP_INTR_MODEM: /* modem status change (DCD, CTS) 0x04 (bit 2)*/
+ case PPP_INTR_DISC: /* Data link disconnected 0x10 (bit 4)*/
+ case PPP_INTR_OPEN: /* Data link open 0x20 (bit 5)*/
+ case PPP_INTR_DROP_DTR: /* DTR drop timeout expired 0x40 bit 6 */
+ event_intr(card);
+ break;
+
+ case PPP_INTR_TIMER:
+ timer_intr(card);
+ break;
+
+ default: /* unexpected interrupt */
+ ++card->statistics.isr_spurious;
+ printk(KERN_INFO "%s: spurious interrupt 0x%02X!\n",
+ card->devname, flags->iflag);
+ printk(KERN_INFO "%s: ID Bytes = ",card->devname);
+ for(i = 0; i < 8; i ++)
+ printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
+ printk(KERN_INFO "\n");
+ }
+
+ card->in_isr = 0;
+ flags->iflag = 0;
+ return;
+}
+
+/*============================================================================
+ * Receive interrupt handler.
+ */
+static void rx_intr(sdla_t *card)
+{
+ ppp_buf_ctl_t *rxbuf = card->rxmb;
+ struct net_device *dev = card->wandev.dev;
+ ppp_private_area_t *ppp_priv_area;
+ struct sk_buff *skb;
+ unsigned len;
+ void *buf;
+ int i;
+ ppp_flags_t *flags = card->flags;
+ char *ptr = &flags->iflag;
+ int udp_type;
+
+
+ if (rxbuf->flag != 0x01) {
+
+ printk(KERN_INFO
+ "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
+ card->devname, (unsigned)rxbuf, rxbuf->flag);
+
+ printk(KERN_INFO "%s: ID Bytes = ",card->devname);
+
+ for(i = 0; i < 8; i ++)
+ printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
+ printk(KERN_INFO "\n");
+
+ ++card->statistics.rx_intr_corrupt_rx_bfr;
+
+
+ /* Bug Fix: Mar 6 2000
+ * If we get a corrupted mailbox, it means that driver
+ * is out of sync with the firmware. There is no recovery.
+ * If we don't turn off all interrupts for this card
+ * the machine will crash.
+ */
+ printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
+ printk(KERN_INFO "Please contact Sangoma Technologies !\n");
+ ppp_set_intr_mode(card,0);
+ return;
+ }
+
+ if (dev && netif_running(dev) && dev->priv){
+
+ len = rxbuf->length;
+ ppp_priv_area = dev->priv;
+
+ /* Allocate socket buffer */
+ skb = dev_alloc_skb(len);
+
+ if (skb != NULL) {
+
+ /* Copy data to the socket buffer */
+ unsigned addr = rxbuf->buf.ptr;
+
+ if ((addr + len) > card->u.p.rx_top + 1) {
+
+ unsigned tmp = card->u.p.rx_top - addr + 1;
+ buf = skb_put(skb, tmp);
+ sdla_peek(&card->hw, addr, buf, tmp);
+ addr = card->u.p.rx_base;
+ len -= tmp;
+ }
+ buf = skb_put(skb, len);
+ sdla_peek(&card->hw, addr, buf, len);
+
+ /* Decapsulate packet */
+ switch (rxbuf->proto) {
+
+ case 0x00:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+
+ case 0x01:
+ skb->protocol = htons(ETH_P_IPX);
+ break;
+ }
+
+ udp_type = udp_pkt_type( skb, card );
+
+ if (udp_type == UDP_PTPIPE_TYPE){
+
+ /* Handle a UDP Request in Timer Interrupt */
+ if(store_udp_mgmt_pkt(UDP_PKT_FRM_NETWORK, card, skb, dev,
+ ppp_priv_area)){
+ flags->imask |= PPP_INTR_TIMER;
+ }
+ ++ppp_priv_area->rx_intr_stat.rx_intr_PIPE_request;
+
+
+ } else if (handle_IPXWAN(skb->data,card->devname,
+ ppp_priv_area->enable_IPX,
+ ppp_priv_area->network_number,
+ skb->protocol)) {
+
+ /* Handle an IPXWAN packet */
+ if( ppp_priv_area->enable_IPX) {
+
+ /* Make sure we are not already sending */
+ if (!test_bit(SEND_CRIT, &card->wandev.critical)){
+ ppp_send(card, skb->data, skb->len, htons(ETH_P_IPX));
+ }
+ dev_kfree_skb_any(skb);
+
+ } else {
+ ++card->wandev.stats.rx_dropped;
+ }
+ } else {
+ /* Pass data up the protocol stack */
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+
+ ++card->wandev.stats.rx_packets;
+ card->wandev.stats.rx_bytes += skb->len;
+ ++ppp_priv_area->rx_intr_stat.rx_intr_bfr_passed_to_stack;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+
+ } else {
+
+ if (net_ratelimit()){
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ }
+ ++card->wandev.stats.rx_dropped;
+ ++ppp_priv_area->rx_intr_stat.rx_intr_no_socket;
+ }
+
+ } else {
+ ++card->statistics.rx_intr_dev_not_started;
+ }
+
+ /* Release buffer element and calculate a pointer to the next one */
+ rxbuf->flag = 0x00;
+ card->rxmb = ++rxbuf;
+ if ((void*)rxbuf > card->u.p.rxbuf_last)
+ card->rxmb = card->u.p.rxbuf_base;
+}
+
+
+void event_intr (sdla_t *card)
+{
+
+ struct net_device* dev = card->wandev.dev;
+ ppp_private_area_t* ppp_priv_area = dev->priv;
+ volatile ppp_flags_t *flags = card->flags;
+
+ switch (flags->iflag){
+
+ case PPP_INTR_MODEM: /* modem status change (DCD, CTS) 0x04 (bit 2)*/
+
+ if (net_ratelimit()){
+ printk (KERN_INFO "%s: Modem status: DCD=%s CTS=%s\n",
+ card->devname, DCD(flags->mstatus), CTS(flags->mstatus));
+ }
+ break;
+
+ case PPP_INTR_DISC: /* Data link disconnected 0x10 (bit 4)*/
+
+ NEX_PRINTK (KERN_INFO "Data link disconnected intr Cause %X\n",
+ flags->disc_cause);
+
+ if (flags->disc_cause &
+ (PPP_LOCAL_TERMINATION | PPP_DCD_CTS_DROP |
+ PPP_REMOTE_TERMINATION)) {
+
+ if (card->u.p.ip_mode == WANOPT_PPP_PEER) {
+ set_bit(0,&Read_connection_info);
+ }
+ wanpipe_set_state(card, WAN_DISCONNECTED);
+
+ show_disc_cause(card, flags->disc_cause);
+ ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_PPP_EVENT;
+ flags->imask |= PPP_INTR_TIMER;
+ trigger_ppp_poll(dev);
+ }
+ break;
+
+ case PPP_INTR_OPEN: /* Data link open 0x20 (bit 5)*/
+
+ NEX_PRINTK (KERN_INFO "%s: PPP Link Open, LCP=%s IP=%s\n",
+ card->devname,LCP(flags->lcp_state),
+ IP(flags->ip_state));
+
+ if (flags->lcp_state == 0x09 &&
+ (flags->ip_state == 0x09 || flags->ipx_state == 0x09)){
+
+ /* Initialize the polling timer and set the state
+ * to WAN_CONNNECTED */
+
+
+ /* BUG FIX: When the protocol restarts, during heavy
+ * traffic, board tx buffers and driver tx buffers
+ * can go out of sync. This checks the condition
+ * and if the tx buffers are out of sync, the
+ * protocols are restarted.
+ * I don't know why the board tx buffer is out
+ * of sync. It could be that a packets is tx
+ * while the link is down, but that is not
+ * possible. The other possiblility is that the
+ * firmware doesn't reinitialize properly.
+ * FIXME: A better fix should be found.
+ */
+ if (detect_and_fix_tx_bug(card)){
+
+ ppp_comm_disable(card);
+
+ wanpipe_set_state(card, WAN_DISCONNECTED);
+
+ ppp_priv_area->timer_int_enabled |=
+ TMR_INT_ENABLED_PPP_EVENT;
+ flags->imask |= PPP_INTR_TIMER;
+ break;
+ }
+
+ card->state_tick = jiffies;
+ wanpipe_set_state(card, WAN_CONNECTED);
+
+ NEX_PRINTK(KERN_INFO "CON: L Tx: %lx B Tx: %lx || L Rx %lx B Rx %lx\n",
+ (unsigned long)card->u.p.txbuf, *card->u.p.txbuf_next,
+ (unsigned long)card->rxmb, *card->u.p.rxbuf_next);
+
+ /* Tell timer interrupt that PPP event occurred */
+ ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_PPP_EVENT;
+ flags->imask |= PPP_INTR_TIMER;
+
+ /* If we are in PEER mode, we must first obtain the
+ * IP information and then go into the poll routine */
+ if (card->u.p.ip_mode != WANOPT_PPP_PEER){
+ trigger_ppp_poll(dev);
+ }
+ }
+ break;
+
+ case PPP_INTR_DROP_DTR: /* DTR drop timeout expired 0x40 bit 6 */
+
+ NEX_PRINTK(KERN_INFO "DTR Drop Timeout Interrrupt \n");
+
+ if (card->u.p.ip_mode == WANOPT_PPP_PEER) {
+ set_bit(0,&Read_connection_info);
+ }
+
+ wanpipe_set_state(card, WAN_DISCONNECTED);
+
+ show_disc_cause(card, flags->disc_cause);
+ ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_PPP_EVENT;
+ flags->imask |= PPP_INTR_TIMER;
+ trigger_ppp_poll(dev);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: Error, Invalid PPP Event\n",card->devname);
+ }
+}
+
+
+
+/* TIMER INTERRUPT */
+
+void timer_intr (sdla_t *card)
+{
+
+ struct net_device* dev = card->wandev.dev;
+ ppp_private_area_t* ppp_priv_area = dev->priv;
+ ppp_flags_t *flags = card->flags;
+
+
+ if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_CONFIG){
+ if (!config_ppp(card)){
+ ppp_priv_area->timer_int_enabled &=
+ ~TMR_INT_ENABLED_CONFIG;
+ }
+ }
+
+ /* Update statistics */
+ if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_UPDATE){
+ ppp_get_err_stats(card);
+ if(!(--ppp_priv_area->update_comms_stats)){
+ ppp_priv_area->timer_int_enabled &=
+ ~TMR_INT_ENABLED_UPDATE;
+ }
+ }
+
+ /* PPIPEMON UDP request */
+
+ if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_UDP){
+ process_udp_mgmt_pkt(card,dev, ppp_priv_area);
+ ppp_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_UDP;
+ }
+
+ /* PPP Event */
+ if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_PPP_EVENT){
+
+ if (card->wandev.state == WAN_DISCONNECTED){
+ retrigger_comm(card);
+ }
+
+ /* If the state is CONNECTING, it means that communicatins were
+ * enabled. When the remote side enables its comminication we
+ * should get an interrupt PPP_INTR_OPEN, thus turn off polling
+ */
+
+ else if (card->wandev.state == WAN_CONNECTING){
+ /* Turn off the timer interrupt */
+ ppp_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_PPP_EVENT;
+ }
+
+ /* If state is connected and we are in PEER mode
+ * poll for an IP address which will be provided by remote end.
+ */
+ else if ((card->wandev.state == WAN_CONNECTED &&
+ card->u.p.ip_mode == WANOPT_PPP_PEER) &&
+ test_bit(0,&Read_connection_info)){
+
+ card->state_tick = jiffies;
+ if (read_connection_info (card)){
+ printk(KERN_INFO "%s: Failed to read PEER IP Addresses\n",
+ card->devname);
+ }else{
+ clear_bit(0,&Read_connection_info);
+ set_bit(1,&Read_connection_info);
+ trigger_ppp_poll(dev);
+ }
+ }else{
+ //FIXME Put the comment back int
+ ppp_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_PPP_EVENT;
+ }
+
+ }/* End of PPP_EVENT */
+
+
+ /* Only disable the timer interrupt if there are no udp, statistic */
+ /* updates or events pending */
+ if(!ppp_priv_area->timer_int_enabled) {
+ flags->imask &= ~PPP_INTR_TIMER;
+ }
+}
+
+
+static int handle_IPXWAN(unsigned char *sendpacket, char *devname, unsigned char enable_IPX, unsigned long network_number, unsigned short proto)
+{
+ int i;
+
+ if( proto == htons(ETH_P_IPX) ) {
+ //It's an IPX packet
+ if(!enable_IPX) {
+ //Return 1 so we don't pass it up the stack.
+ return 1;
+ }
+ } else {
+ //It's not IPX so pass it up the stack.
+ return 0;
+ }
+
+ if( sendpacket[16] == 0x90 &&
+ sendpacket[17] == 0x04)
+ {
+ //It's IPXWAN
+
+ if( sendpacket[2] == 0x02 &&
+ sendpacket[34] == 0x00)
+ {
+ //It's a timer request packet
+ printk(KERN_INFO "%s: Received IPXWAN Timer Request packet\n",devname);
+
+ //Go through the routing options and answer no to every
+ //option except Unnumbered RIP/SAP
+ for(i = 41; sendpacket[i] == 0x00; i += 5)
+ {
+ //0x02 is the option for Unnumbered RIP/SAP
+ if( sendpacket[i + 4] != 0x02)
+ {
+ sendpacket[i + 1] = 0;
+ }
+ }
+
+ //Skip over the extended Node ID option
+ if( sendpacket[i] == 0x04 )
+ {
+ i += 8;
+ }
+
+ //We also want to turn off all header compression opt.
+ for(; sendpacket[i] == 0x80 ;)
+ {
+ sendpacket[i + 1] = 0;
+ i += (sendpacket[i + 2] << 8) + (sendpacket[i + 3]) + 4;
+ }
+
+ //Set the packet type to timer response
+ sendpacket[34] = 0x01;
+
+ printk(KERN_INFO "%s: Sending IPXWAN Timer Response\n",devname);
+ }
+ else if( sendpacket[34] == 0x02 )
+ {
+ //This is an information request packet
+ printk(KERN_INFO "%s: Received IPXWAN Information Request packet\n",devname);
+
+ //Set the packet type to information response
+ sendpacket[34] = 0x03;
+
+ //Set the router name
+ sendpacket[51] = 'P';
+ sendpacket[52] = 'T';
+ sendpacket[53] = 'P';
+ sendpacket[54] = 'I';
+ sendpacket[55] = 'P';
+ sendpacket[56] = 'E';
+ sendpacket[57] = '-';
+ sendpacket[58] = CVHexToAscii(network_number >> 28);
+ sendpacket[59] = CVHexToAscii((network_number & 0x0F000000)>> 24);
+ sendpacket[60] = CVHexToAscii((network_number & 0x00F00000)>> 20);
+ sendpacket[61] = CVHexToAscii((network_number & 0x000F0000)>> 16);
+ sendpacket[62] = CVHexToAscii((network_number & 0x0000F000)>> 12);
+ sendpacket[63] = CVHexToAscii((network_number & 0x00000F00)>> 8);
+ sendpacket[64] = CVHexToAscii((network_number & 0x000000F0)>> 4);
+ sendpacket[65] = CVHexToAscii(network_number & 0x0000000F);
+ for(i = 66; i < 99; i+= 1)
+ {
+ sendpacket[i] = 0;
+ }
+
+ printk(KERN_INFO "%s: Sending IPXWAN Information Response packet\n",devname);
+ }
+ else
+ {
+ printk(KERN_INFO "%s: Unknown IPXWAN packet!\n",devname);
+ return 0;
+ }
+
+ //Set the WNodeID to our network address
+ sendpacket[35] = (unsigned char)(network_number >> 24);
+ sendpacket[36] = (unsigned char)((network_number & 0x00FF0000) >> 16);
+ sendpacket[37] = (unsigned char)((network_number & 0x0000FF00) >> 8);
+ sendpacket[38] = (unsigned char)(network_number & 0x000000FF);
+
+ return 1;
+ } else {
+ //If we get here it's an IPX-data packet, so it'll get passed up the stack.
+
+ //switch the network numbers
+ switch_net_numbers(sendpacket, network_number, 1);
+ return 0;
+ }
+}
+
+/****** Background Polling Routines ****************************************/
+
+/* All polling functions are invoked by the TIMER interrupt in the wpp_isr
+ * routine.
+ */
+
+/*============================================================================
+ * Monitor active link phase.
+ */
+static void process_route (sdla_t *card)
+{
+ ppp_flags_t *flags = card->flags;
+ struct net_device *dev = card->wandev.dev;
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+
+ if ((card->u.p.ip_mode == WANOPT_PPP_PEER) &&
+ (flags->ip_state == 0x09)){
+
+ /* We get ip_local from the firmware in PEER mode.
+ * Therefore, if ip_local is 0, we failed to obtain
+ * the remote IP address. */
+ if (ppp_priv_area->ip_local == 0)
+ return;
+
+ printk(KERN_INFO "%s: IPCP State Opened.\n", card->devname);
+ if (read_info( card )) {
+ printk(KERN_INFO
+ "%s: An error occurred in IP assignment.\n",
+ card->devname);
+ } else {
+ struct in_device *in_dev = dev->ip_ptr;
+ if (in_dev != NULL ) {
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+
+ printk(KERN_INFO "%s: Assigned Lcl. Addr: %u.%u.%u.%u\n",
+ card->devname, NIPQUAD(ifa->ifa_local));
+ printk(KERN_INFO "%s: Assigned Rmt. Addr: %u.%u.%u.%u\n",
+ card->devname, NIPQUAD(ifa->ifa_address));
+ }else{
+ printk(KERN_INFO
+ "%s: Error: Failed to add a route for PPP interface %s\n",
+ card->devname,dev->name);
+ }
+ }
+ }
+}
+
+/*============================================================================
+ * Monitor physical link disconnected phase.
+ * o if interface is up and the hold-down timeout has expired, then retry
+ * connection.
+ */
+static void retrigger_comm(sdla_t *card)
+{
+ struct net_device *dev = card->wandev.dev;
+
+ if (dev && ((jiffies - card->state_tick) > HOLD_DOWN_TIME)) {
+
+ wanpipe_set_state(card, WAN_CONNECTING);
+
+ if(ppp_comm_enable(card) == CMD_OK){
+ init_ppp_tx_rx_buff( card );
+ }
+ }
+}
+
+/****** Miscellaneous Functions *********************************************/
+
+/*============================================================================
+ * Configure S508 adapter.
+ */
+static int config508(struct net_device *dev, sdla_t *card)
+{
+ ppp508_conf_t cfg;
+ struct in_device *in_dev = dev->ip_ptr;
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+
+ /* Prepare PPP configuration structure */
+ memset(&cfg, 0, sizeof(ppp508_conf_t));
+
+ if (card->wandev.clocking)
+ cfg.line_speed = card->wandev.bps;
+
+ if (card->wandev.interface == WANOPT_RS232)
+ cfg.conf_flags |= INTERFACE_LEVEL_RS232;
+
+
+ cfg.conf_flags |= DONT_TERMINATE_LNK_MAX_CONFIG; /*send Configure-Request packets forever*/
+ cfg.txbuf_percent = PERCENT_TX_BUFF; /* % of Tx bufs */
+ cfg.mtu_local = card->wandev.mtu;
+ cfg.mtu_remote = card->wandev.mtu; /* Default */
+ cfg.restart_tmr = TIME_BETWEEN_CONF_REQ; /* 30 = 3sec */
+ cfg.auth_rsrt_tmr = TIME_BETWEEN_PAP_CHAP_REQ; /* 30 = 3sec */
+ cfg.auth_wait_tmr = WAIT_PAP_CHAP_WITHOUT_REPLY; /* 300 = 30s */
+ cfg.mdm_fail_tmr = WAIT_AFTER_DCD_CTS_LOW; /* 5 = 0.5s */
+ cfg.dtr_drop_tmr = TIME_DCD_CTS_LOW_AFTER_LNK_DOWN; /* 10 = 1s */
+ cfg.connect_tmout = WAIT_DCD_HIGH_AFTER_ENABLE_COMM; /* 900 = 90s */
+ cfg.conf_retry = MAX_CONF_REQ_WITHOUT_REPLY; /* 10 = 1s */
+ cfg.term_retry = MAX_TERM_REQ_WITHOUT_REPLY; /* 2 times */
+ cfg.fail_retry = NUM_CONF_NAK_WITHOUT_REPLY; /* 5 times */
+ cfg.auth_retry = NUM_AUTH_REQ_WITHOUT_REPLY; /* 10 times */
+
+
+ if( !card->u.p.authenticator ) {
+ printk(KERN_INFO "%s: Device is not configured as an authenticator\n",
+ card->devname);
+ cfg.auth_options = NO_AUTHENTICATION;
+ }else{
+ printk(KERN_INFO "%s: Device is configured as an authenticator\n",
+ card->devname);
+ cfg.auth_options = INBOUND_AUTH;
+ }
+
+ if( ppp_priv_area->pap == WANOPT_YES){
+ cfg.auth_options |=PAP_AUTH;
+ printk(KERN_INFO "%s: Pap enabled\n", card->devname);
+ }
+ if( ppp_priv_area->chap == WANOPT_YES){
+ cfg.auth_options |= CHAP_AUTH;
+ printk(KERN_INFO "%s: Chap enabled\n", card->devname);
+ }
+
+
+ if (ppp_priv_area->enable_IPX == WANOPT_YES){
+ printk(KERN_INFO "%s: Enabling IPX Protocol\n",card->devname);
+ cfg.ipx_options = ENABLE_IPX | ROUTING_PROT_DEFAULT;
+ }else{
+ cfg.ipx_options = DISABLE_IPX;
+ }
+
+ switch (card->u.p.ip_mode) {
+
+ case WANOPT_PPP_STATIC:
+
+ printk(KERN_INFO "%s: PPP IP Mode: STATIC\n",card->devname);
+ cfg.ip_options = L_AND_R_IP_NO_ASSIG |
+ ENABLE_IP;
+ cfg.ip_local = in_dev->ifa_list->ifa_local;
+ cfg.ip_remote = in_dev->ifa_list->ifa_address;
+ /* Debugging code used to check that IP addresses
+ * obtained from the kernel are correct */
+
+ NEX_PRINTK(KERN_INFO "Local %u.%u.%u.%u Remote %u.%u.%u.%u Name %s\n",
+ NIPQUAD(ip_local),NIPQUAD(ip_remote), dev->name);
+ break;
+
+ case WANOPT_PPP_HOST:
+
+ printk(KERN_INFO "%s: PPP IP Mode: HOST\n",card->devname);
+ cfg.ip_options = L_IP_LOCAL_ASSIG |
+ R_IP_LOCAL_ASSIG |
+ ENABLE_IP;
+ cfg.ip_local = in_dev->ifa_list->ifa_local;
+ cfg.ip_remote = in_dev->ifa_list->ifa_address;
+ /* Debugging code used to check that IP addresses
+ * obtained from the kernel are correct */
+ NEX_PRINTK (KERN_INFO "Local %u.%u.%u.%u Remote %u.%u.%u.%u Name %s\n",
+ NIPQUAD(ip_local),NIPQUAD(ip_remote), dev->name);
+
+ break;
+
+ case WANOPT_PPP_PEER:
+
+ printk(KERN_INFO "%s: PPP IP Mode: PEER\n",card->devname);
+ cfg.ip_options = L_IP_REMOTE_ASSIG |
+ R_IP_REMOTE_ASSIG |
+ ENABLE_IP;
+ cfg.ip_local = 0x00;
+ cfg.ip_remote = 0x00;
+ break;
+
+ default:
+ printk(KERN_INFO "%s: ERROR: Unsupported PPP Mode Selected\n",
+ card->devname);
+ printk(KERN_INFO "%s: PPP IP Modes: STATIC, PEER or HOST\n",
+ card->devname);
+ return 1;
+ }
+
+ return ppp_configure(card, &cfg);
+}
+
+/*============================================================================
+ * Show disconnection cause.
+ */
+static void show_disc_cause(sdla_t *card, unsigned cause)
+{
+ if (cause & 0x0802)
+
+ printk(KERN_INFO "%s: link terminated by peer\n",
+ card->devname);
+
+ else if (cause & 0x0004)
+
+ printk(KERN_INFO "%s: link terminated by user\n",
+ card->devname);
+
+ else if (cause & 0x0008)
+
+ printk(KERN_INFO "%s: authentication failed\n", card->devname);
+
+ else if (cause & 0x0010)
+
+ printk(KERN_INFO
+ "%s: authentication protocol negotiation failed\n",
+ card->devname);
+
+ else if (cause & 0x0020)
+
+ printk(KERN_INFO
+ "%s: peer's request for authentication rejected\n",
+ card->devname);
+
+ else if (cause & 0x0040)
+
+ printk(KERN_INFO "%s: MRU option rejected by peer\n",
+ card->devname);
+
+ else if (cause & 0x0080)
+
+ printk(KERN_INFO "%s: peer's MRU was too small\n",
+ card->devname);
+
+ else if (cause & 0x0100)
+
+ printk(KERN_INFO "%s: failed to negotiate peer's LCP options\n",
+ card->devname);
+
+ else if (cause & 0x0200)
+
+ printk(KERN_INFO "%s: failed to negotiate peer's IPCP options\n"
+ , card->devname);
+
+ else if (cause & 0x0400)
+
+ printk(KERN_INFO
+ "%s: failed to negotiate peer's IPXCP options\n",
+ card->devname);
+}
+
+/*=============================================================================
+ * Process UDP call of type PTPIPEAB.
+ */
+static void process_udp_mgmt_pkt(sdla_t *card, struct net_device *dev,
+ ppp_private_area_t *ppp_priv_area )
+{
+ unsigned char buf2[5];
+ unsigned char *buf;
+ unsigned int frames, len;
+ struct sk_buff *new_skb;
+ unsigned short data_length, buffer_length, real_len;
+ unsigned long data_ptr;
+ int udp_mgmt_req_valid = 1;
+ ppp_mbox_t *mbox = card->mbox;
+ struct timeval tv;
+ int err;
+ ppp_udp_pkt_t *ppp_udp_pkt = (ppp_udp_pkt_t*)&ppp_priv_area->udp_pkt_data;
+
+ memcpy(&buf2, &card->wandev.udp_port, 2 );
+
+
+ if(ppp_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+
+ switch(ppp_udp_pkt->cblock.command) {
+
+ case PPIPE_GET_IBA_DATA:
+ case PPP_READ_CONFIG:
+ case PPP_GET_CONNECTION_INFO:
+ case PPIPE_ROUTER_UP_TIME:
+ case PPP_READ_STATISTICS:
+ case PPP_READ_ERROR_STATS:
+ case PPP_READ_PACKET_STATS:
+ case PPP_READ_LCP_STATS:
+ case PPP_READ_IPCP_STATS:
+ case PPP_READ_IPXCP_STATS:
+ case PPP_READ_PAP_STATS:
+ case PPP_READ_CHAP_STATS:
+ case PPP_READ_CODE_VERSION:
+ udp_mgmt_req_valid = 1;
+ break;
+
+ default:
+ udp_mgmt_req_valid = 0;
+ break;
+ }
+ }
+
+ if(!udp_mgmt_req_valid) {
+
+ /* set length to 0 */
+ ppp_udp_pkt->cblock.length = 0x00;
+
+ /* set return code */
+ ppp_udp_pkt->cblock.result = 0xCD;
+ ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_direction_err;
+
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Warning, Illegal UDP command attempted from network: %x\n",
+ card->devname,ppp_udp_pkt->cblock.command);
+ }
+ } else {
+ /* Initialize the trace element */
+ trace_element_t trace_element;
+
+ switch (ppp_udp_pkt->cblock.command){
+
+ /* PPIPE_ENABLE_TRACING */
+ case PPIPE_ENABLE_TRACING:
+ if (!card->TracingEnabled) {
+
+ /* OPERATE_DATALINE_MONITOR */
+ mbox->cmd.command = PPP_DATALINE_MONITOR;
+ mbox->cmd.length = 0x01;
+ mbox->data[0] = ppp_udp_pkt->data[0];
+ err = sdla_exec(mbox) ?
+ mbox->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK) {
+
+ ppp_error(card, err, mbox);
+ card->TracingEnabled = 0;
+
+ /* set the return code */
+
+ ppp_udp_pkt->cblock.result = mbox->cmd.result;
+ mbox->cmd.length = 0;
+ break;
+ }
+
+ sdla_peek(&card->hw, 0xC000, &buf2, 2);
+
+ ppp_priv_area->curr_trace_addr = 0;
+ memcpy(&ppp_priv_area->curr_trace_addr, &buf2, 2);
+ ppp_priv_area->start_trace_addr =
+ ppp_priv_area->curr_trace_addr;
+ ppp_priv_area->end_trace_addr =
+ ppp_priv_area->start_trace_addr + END_OFFSET;
+
+ /* MAX_SEND_BUFFER_SIZE - 28 (IP header)
+ - 32 (ppipemon CBLOCK) */
+ available_buffer_space = MAX_LGTH_UDP_MGNT_PKT -
+ sizeof(ip_pkt_t)-
+ sizeof(udp_pkt_t)-
+ sizeof(wp_mgmt_t)-
+ sizeof(cblock_t);
+ }
+ ppp_udp_pkt->cblock.result = 0;
+ mbox->cmd.length = 0;
+ card->TracingEnabled = 1;
+ break;
+
+ /* PPIPE_DISABLE_TRACING */
+ case PPIPE_DISABLE_TRACING:
+
+ if(card->TracingEnabled) {
+
+ /* OPERATE_DATALINE_MONITOR */
+ mbox->cmd.command = 0x33;
+ mbox->cmd.length = 1;
+ mbox->data[0] = 0x00;
+ err = sdla_exec(mbox) ?
+ mbox->cmd.result : CMD_TIMEOUT;
+
+ }
+
+ /*set return code*/
+ ppp_udp_pkt->cblock.result = 0;
+ mbox->cmd.length = 0;
+ card->TracingEnabled = 0;
+ break;
+
+ /* PPIPE_GET_TRACE_INFO */
+ case PPIPE_GET_TRACE_INFO:
+
+ if(!card->TracingEnabled) {
+ /* set return code */
+ ppp_udp_pkt->cblock.result = 1;
+ mbox->cmd.length = 0;
+ }
+
+ buffer_length = 0;
+
+ /* frames < 62, where 62 is the number of trace
+ information elements. There is in total 496
+ bytes of space and each trace information
+ element is 8 bytes.
+ */
+ for ( frames=0; frames<62; frames++) {
+
+ trace_pkt_t *trace_pkt = (trace_pkt_t *)
+ &ppp_udp_pkt->data[buffer_length];
+
+ /* Read the whole trace packet */
+ sdla_peek(&card->hw, ppp_priv_area->curr_trace_addr,
+ &trace_element, sizeof(trace_element_t));
+
+ /* no data on board so exit */
+ if( trace_element.opp_flag == 0x00 )
+ break;
+
+ data_ptr = trace_element.trace_data_ptr;
+
+ /* See if there is actual data on the trace buffer */
+ if (data_ptr){
+ data_length = trace_element.trace_length;
+ }else{
+ data_length = 0;
+ ppp_udp_pkt->data[0] |= 0x02;
+ }
+
+ //FIXME: Do we need this check
+ if ((available_buffer_space - buffer_length)
+ < (sizeof(trace_element_t)+1)){
+
+ /*indicate we have more frames
+ * on board and exit
+ */
+ ppp_udp_pkt->data[0] |= 0x02;
+ break;
+ }
+
+ trace_pkt->status = trace_element.trace_type;
+ trace_pkt->time_stamp = trace_element.trace_time_stamp;
+ trace_pkt->real_length = trace_element.trace_length;
+
+ real_len = trace_element.trace_length;
+
+ if(data_ptr == 0){
+ trace_pkt->data_avail = 0x00;
+ }else{
+ /* we can take it next time */
+ if ((available_buffer_space - buffer_length)<
+ (real_len + sizeof(trace_pkt_t))){
+
+ ppp_udp_pkt->data[0] |= 0x02;
+ break;
+ }
+ trace_pkt->data_avail = 0x01;
+
+ /* get the data */
+ sdla_peek(&card->hw, data_ptr,
+ &trace_pkt->data,
+ real_len);
+ }
+ /* zero the opp flag to
+ show we got the frame */
+ buf2[0] = 0x00;
+ sdla_poke(&card->hw, ppp_priv_area->curr_trace_addr,
+ &buf2, 1);
+
+ /* now move onto the next
+ frame */
+ ppp_priv_area->curr_trace_addr += 8;
+
+ /* check if we passed the last address */
+ if ( ppp_priv_area->curr_trace_addr >=
+ ppp_priv_area->end_trace_addr){
+
+ ppp_priv_area->curr_trace_addr =
+ ppp_priv_area->start_trace_addr;
+ }
+
+ /* update buffer length and make sure its even */
+
+ if ( trace_pkt->data_avail == 0x01 ) {
+ buffer_length += real_len - 1;
+ }
+
+ /* for the header */
+ buffer_length += 8;
+
+ if( buffer_length & 0x0001 )
+ buffer_length += 1;
+ }
+
+ /* ok now set the total number of frames passed
+ in the high 5 bits */
+ ppp_udp_pkt->data[0] |= (frames << 2);
+
+ /* set the data length */
+ mbox->cmd.length = buffer_length;
+ ppp_udp_pkt->cblock.length = buffer_length;
+
+ /* set return code */
+ ppp_udp_pkt->cblock.result = 0;
+ break;
+
+ /* PPIPE_GET_IBA_DATA */
+ case PPIPE_GET_IBA_DATA:
+
+ mbox->cmd.length = 0x09;
+
+ sdla_peek(&card->hw, 0xF003, &ppp_udp_pkt->data,
+ mbox->cmd.length);
+
+ /* set the length of the data */
+ ppp_udp_pkt->cblock.length = 0x09;
+
+ /* set return code */
+ ppp_udp_pkt->cblock.result = 0x00;
+ ppp_udp_pkt->cblock.result = 0;
+ break;
+
+ /* PPIPE_FT1_READ_STATUS */
+ case PPIPE_FT1_READ_STATUS:
+ sdla_peek(&card->hw, 0xF020, &ppp_udp_pkt->data[0], 2);
+ ppp_udp_pkt->cblock.length = mbox->cmd.length = 2;
+ ppp_udp_pkt->cblock.result = 0;
+ break;
+
+ case PPIPE_FLUSH_DRIVER_STATS:
+ init_ppp_priv_struct( ppp_priv_area );
+ init_global_statistics( card );
+ mbox->cmd.length = 0;
+ ppp_udp_pkt->cblock.result = 0;
+ break;
+
+
+ case PPIPE_ROUTER_UP_TIME:
+
+ do_gettimeofday( &tv );
+ ppp_priv_area->router_up_time = tv.tv_sec -
+ ppp_priv_area->router_start_time;
+ *(unsigned long *)&ppp_udp_pkt->data = ppp_priv_area->router_up_time;
+ mbox->cmd.length = 4;
+ ppp_udp_pkt->cblock.result = 0;
+ break;
+
+ /* PPIPE_DRIVER_STATISTICS */
+ case PPIPE_DRIVER_STAT_IFSEND:
+ memcpy(&ppp_udp_pkt->data, &ppp_priv_area->if_send_stat,
+ sizeof(if_send_stat_t));
+
+
+ ppp_udp_pkt->cblock.result = 0;
+ ppp_udp_pkt->cblock.length = sizeof(if_send_stat_t);
+ mbox->cmd.length = sizeof(if_send_stat_t);
+ break;
+
+ case PPIPE_DRIVER_STAT_INTR:
+ memcpy(&ppp_udp_pkt->data, &card->statistics,
+ sizeof(global_stats_t));
+
+ memcpy(&ppp_udp_pkt->data+sizeof(global_stats_t),
+ &ppp_priv_area->rx_intr_stat,
+ sizeof(rx_intr_stat_t));
+
+ ppp_udp_pkt->cblock.result = 0;
+ ppp_udp_pkt->cblock.length = sizeof(global_stats_t)+
+ sizeof(rx_intr_stat_t);
+ mbox->cmd.length = ppp_udp_pkt->cblock.length;
+ break;
+
+ case PPIPE_DRIVER_STAT_GEN:
+ memcpy( &ppp_udp_pkt->data,
+ &ppp_priv_area->pipe_mgmt_stat,
+ sizeof(pipe_mgmt_stat_t));
+
+ memcpy(&ppp_udp_pkt->data+sizeof(pipe_mgmt_stat_t),
+ &card->statistics, sizeof(global_stats_t));
+
+ ppp_udp_pkt->cblock.result = 0;
+ ppp_udp_pkt->cblock.length = sizeof(global_stats_t)+
+ sizeof(rx_intr_stat_t);
+ mbox->cmd.length = ppp_udp_pkt->cblock.length;
+ break;
+
+
+ /* FT1 MONITOR STATUS */
+ case FT1_MONITOR_STATUS_CTRL:
+
+ /* Enable FT1 MONITOR STATUS */
+ if( ppp_udp_pkt->data[0] == 1) {
+
+ if( rCount++ != 0 ) {
+ ppp_udp_pkt->cblock.result = 0;
+ mbox->cmd.length = 1;
+ break;
+ }
+ }
+
+ /* Disable FT1 MONITOR STATUS */
+ if( ppp_udp_pkt->data[0] == 0) {
+
+ if( --rCount != 0) {
+ ppp_udp_pkt->cblock.result = 0;
+ mbox->cmd.length = 1;
+ break;
+ }
+ }
+ goto udp_dflt_cmd;
+
+ /* WARNING: FIXME: This should be fixed.
+ * The FT1 Status Ctrl doesn't have a break
+ * statment. Thus, no code must be inserted
+ * HERE: between default and above case statement */
+
+ default:
+udp_dflt_cmd:
+
+ /* it's a board command */
+ mbox->cmd.command = ppp_udp_pkt->cblock.command;
+ mbox->cmd.length = ppp_udp_pkt->cblock.length;
+
+ if(mbox->cmd.length) {
+ memcpy(&mbox->data,(unsigned char *)ppp_udp_pkt->data,
+ mbox->cmd.length);
+ }
+
+ /* run the command on the board */
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK) {
+
+ ppp_error(card, err, mbox);
+ ++ppp_priv_area->pipe_mgmt_stat.
+ UDP_PIPE_mgmt_adptr_cmnd_timeout;
+ break;
+ }
+
+ ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_cmnd_OK;
+
+ /* copy the result back to our buffer */
+ memcpy(&ppp_udp_pkt->cblock,mbox, sizeof(cblock_t));
+
+ if(mbox->cmd.length) {
+ memcpy(&ppp_udp_pkt->data,&mbox->data,mbox->cmd.length);
+ }
+
+ } /* end of switch */
+ } /* end of else */
+
+ /* Fill UDP TTL */
+ ppp_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
+ len = reply_udp(ppp_priv_area->udp_pkt_data, mbox->cmd.length);
+
+ if (ppp_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+
+ /* Make sure we are not already sending */
+ if (!test_bit(SEND_CRIT,&card->wandev.critical)){
+ ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_passed_to_adptr;
+ ppp_send(card,ppp_priv_area->udp_pkt_data,len,ppp_priv_area->protocol);
+ }
+
+ } else {
+
+ /* Pass it up the stack
+ Allocate socket buffer */
+ if ((new_skb = dev_alloc_skb(len)) != NULL) {
+
+ /* copy data into new_skb */
+
+ buf = skb_put(new_skb, len);
+ memcpy(buf,ppp_priv_area->udp_pkt_data, len);
+
+ ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_passed_to_stack;
+
+ /* Decapsulate packet and pass it up the protocol
+ stack */
+ new_skb->protocol = htons(ETH_P_IP);
+ new_skb->dev = dev;
+ new_skb->mac.raw = new_skb->data;
+ netif_rx(new_skb);
+ dev->last_rx = jiffies;
+
+ } else {
+
+ ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_no_socket;
+ printk(KERN_INFO "no socket buffers available!\n");
+ }
+ }
+
+ ppp_priv_area->udp_pkt_lgth = 0;
+
+ return;
+}
+
+/*=============================================================================
+ * Initial the ppp_private_area structure.
+ */
+static void init_ppp_priv_struct( ppp_private_area_t *ppp_priv_area )
+{
+
+ memset(&ppp_priv_area->if_send_stat, 0, sizeof(if_send_stat_t));
+ memset(&ppp_priv_area->rx_intr_stat, 0, sizeof(rx_intr_stat_t));
+ memset(&ppp_priv_area->pipe_mgmt_stat, 0, sizeof(pipe_mgmt_stat_t));
+}
+
+/*============================================================================
+ * Initialize Global Statistics
+ */
+static void init_global_statistics( sdla_t *card )
+{
+ memset(&card->statistics, 0, sizeof(global_stats_t));
+}
+
+/*============================================================================
+ * Initialize Receive and Transmit Buffers.
+ */
+static void init_ppp_tx_rx_buff( sdla_t *card )
+{
+ ppp508_buf_info_t* info;
+
+ if (card->hw.type == SDLA_S514) {
+
+ info = (void*)(card->hw.dpmbase + PPP514_BUF_OFFS);
+
+ card->u.p.txbuf_base = (void*)(card->hw.dpmbase +
+ info->txb_ptr);
+
+ card->u.p.txbuf_last = (ppp_buf_ctl_t*)card->u.p.txbuf_base +
+ (info->txb_num - 1);
+
+ card->u.p.rxbuf_base = (void*)(card->hw.dpmbase +
+ info->rxb_ptr);
+
+ card->u.p.rxbuf_last = (ppp_buf_ctl_t*)card->u.p.rxbuf_base +
+ (info->rxb_num - 1);
+
+ } else {
+
+ info = (void*)(card->hw.dpmbase + PPP508_BUF_OFFS);
+
+ card->u.p.txbuf_base = (void*)(card->hw.dpmbase +
+ (info->txb_ptr - PPP508_MB_VECT));
+
+ card->u.p.txbuf_last = (ppp_buf_ctl_t*)card->u.p.txbuf_base +
+ (info->txb_num - 1);
+
+ card->u.p.rxbuf_base = (void*)(card->hw.dpmbase +
+ (info->rxb_ptr - PPP508_MB_VECT));
+
+ card->u.p.rxbuf_last = (ppp_buf_ctl_t*)card->u.p.rxbuf_base +
+ (info->rxb_num - 1);
+ }
+
+ card->u.p.txbuf_next = (unsigned long*)&info->txb_nxt;
+ card->u.p.rxbuf_next = (unsigned long*)&info->rxb1_ptr;
+
+ card->u.p.rx_base = info->rxb_base;
+ card->u.p.rx_top = info->rxb_end;
+
+ card->u.p.txbuf = card->u.p.txbuf_base;
+ card->rxmb = card->u.p.rxbuf_base;
+
+}
+
+/*=============================================================================
+ * Read Connection Information (ie for Remote IP address assginment).
+ * Called when ppp interface connected.
+ */
+static int read_info( sdla_t *card )
+{
+ struct net_device *dev = card->wandev.dev;
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ int err;
+
+ struct ifreq if_info;
+ struct sockaddr_in *if_data1, *if_data2;
+ mm_segment_t fs;
+
+ /* Set Local and remote addresses */
+ memset(&if_info, 0, sizeof(if_info));
+ strcpy(if_info.ifr_name, dev->name);
+
+
+ fs = get_fs();
+ set_fs(get_ds()); /* get user space block */
+
+ /* Change the local and remote ip address of the interface.
+ * This will also add in the destination route.
+ */
+ if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
+ if_data1->sin_addr.s_addr = ppp_priv_area->ip_local;
+ if_data1->sin_family = AF_INET;
+ err = devinet_ioctl( SIOCSIFADDR, &if_info );
+ if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
+ if_data2->sin_addr.s_addr = ppp_priv_area->ip_remote;
+ if_data2->sin_family = AF_INET;
+ err = devinet_ioctl( SIOCSIFDSTADDR, &if_info );
+
+ set_fs(fs); /* restore old block */
+
+ if (err) {
+ printk (KERN_INFO "%s: Adding of route failed: %i\n",
+ card->devname,err);
+ printk (KERN_INFO "%s: Local : %u.%u.%u.%u\n",
+ card->devname,NIPQUAD(ppp_priv_area->ip_local));
+ printk (KERN_INFO "%s: Remote: %u.%u.%u.%u\n",
+ card->devname,NIPQUAD(ppp_priv_area->ip_remote));
+ }
+ return err;
+}
+
+/*=============================================================================
+ * Remove Dynamic Route.
+ * Called when ppp interface disconnected.
+ */
+
+static void remove_route( sdla_t *card )
+{
+
+ struct net_device *dev = card->wandev.dev;
+ long ip_addr;
+ int err;
+
+ mm_segment_t fs;
+ struct ifreq if_info;
+ struct sockaddr_in *if_data1;
+ struct in_device *in_dev = dev->ip_ptr;
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+
+ ip_addr = ifa->ifa_local;
+
+ /* Set Local and remote addresses */
+ memset(&if_info, 0, sizeof(if_info));
+ strcpy(if_info.ifr_name, dev->name);
+
+ fs = get_fs();
+ set_fs(get_ds()); /* get user space block */
+
+ /* Change the local ip address of the interface to 0.
+ * This will also delete the destination route.
+ */
+ if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
+ if_data1->sin_addr.s_addr = 0;
+ if_data1->sin_family = AF_INET;
+ err = devinet_ioctl( SIOCSIFADDR, &if_info );
+
+ set_fs(fs); /* restore old block */
+
+
+ if (err) {
+ printk (KERN_INFO "%s: Deleting dynamic route failed %d!\n",
+ card->devname, err);
+ return;
+ }else{
+ printk (KERN_INFO "%s: PPP Deleting dynamic route %u.%u.%u.%u successfuly\n",
+ card->devname, NIPQUAD(ip_addr));
+ }
+ return;
+}
+
+/*=============================================================================
+ * Perform the Interrupt Test by running the READ_CODE_VERSION command MAX_INTR
+ * _TEST_COUNTER times.
+ */
+static int intr_test( sdla_t *card )
+{
+ ppp_mbox_t *mb = card->mbox;
+ int err,i;
+
+ err = ppp_set_intr_mode( card, 0x08 );
+
+ if (err == CMD_OK) {
+
+ for (i = 0; i < MAX_INTR_TEST_COUNTER; i ++) {
+ /* Run command READ_CODE_VERSION */
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ mb->cmd.length = 0;
+ mb->cmd.command = PPP_READ_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+ if (err != CMD_OK)
+ ppp_error(card, err, mb);
+ }
+ }
+ else return err;
+
+ err = ppp_set_intr_mode( card, 0 );
+ if (err != CMD_OK)
+ return err;
+
+ return 0;
+}
+
+/*==============================================================================
+ * Determine what type of UDP call it is. DRVSTATS or PTPIPEAB ?
+ */
+static int udp_pkt_type( struct sk_buff *skb, sdla_t *card )
+{
+ unsigned char *sendpacket;
+ unsigned char buf2[5];
+ ppp_udp_pkt_t *ppp_udp_pkt = (ppp_udp_pkt_t *)skb->data;
+
+ sendpacket = skb->data;
+ memcpy(&buf2, &card->wandev.udp_port, 2);
+
+ if( ppp_udp_pkt->ip_pkt.ver_inet_hdr_length == 0x45 && /* IP packet */
+ sendpacket[9] == 0x11 && /* UDP packet */
+ sendpacket[22] == buf2[1] && /* UDP Port */
+ sendpacket[23] == buf2[0] &&
+ sendpacket[36] == 0x01 ) {
+
+ if ( sendpacket[28] == 0x50 && /* PTPIPEAB: Signature */
+ sendpacket[29] == 0x54 &&
+ sendpacket[30] == 0x50 &&
+ sendpacket[31] == 0x49 &&
+ sendpacket[32] == 0x50 &&
+ sendpacket[33] == 0x45 &&
+ sendpacket[34] == 0x41 &&
+ sendpacket[35] == 0x42 ){
+
+ return UDP_PTPIPE_TYPE;
+
+ } else if(sendpacket[28] == 0x44 && /* DRVSTATS: Signature */
+ sendpacket[29] == 0x52 &&
+ sendpacket[30] == 0x56 &&
+ sendpacket[31] == 0x53 &&
+ sendpacket[32] == 0x54 &&
+ sendpacket[33] == 0x41 &&
+ sendpacket[34] == 0x54 &&
+ sendpacket[35] == 0x53 ){
+
+ return UDP_DRVSTATS_TYPE;
+
+ } else
+ return UDP_INVALID_TYPE;
+
+ } else
+ return UDP_INVALID_TYPE;
+
+}
+
+/*============================================================================
+ * Check to see if the packet to be transmitted contains a broadcast or
+ * multicast source IP address.
+ */
+
+static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
+ struct sk_buff *skb)
+{
+ u32 src_ip_addr;
+ u32 broadcast_ip_addr = 0;
+ struct in_device *in_dev;
+
+ /* read the IP source address from the outgoing packet */
+ src_ip_addr = *(u32 *)(skb->data + 12);
+
+ /* read the IP broadcast address for the device */
+ in_dev = dev->ip_ptr;
+ if(in_dev != NULL) {
+ struct in_ifaddr *ifa= in_dev->ifa_list;
+ if(ifa != NULL)
+ broadcast_ip_addr = ifa->ifa_broadcast;
+ else
+ return 0;
+ }
+
+ /* check if the IP Source Address is a Broadcast address */
+ if((dev->flags & IFF_BROADCAST) && (src_ip_addr == broadcast_ip_addr)) {
+ printk(KERN_INFO "%s: Broadcast Source Address silently discarded\n",
+ card->devname);
+ return 1;
+ }
+
+ /* check if the IP Source Address is a Multicast address */
+ if((ntohl(src_ip_addr) >= 0xE0000001) &&
+ (ntohl(src_ip_addr) <= 0xFFFFFFFE)) {
+ printk(KERN_INFO "%s: Multicast Source Address silently discarded\n",
+ card->devname);
+ return 1;
+ }
+
+ return 0;
+}
+
+void s508_lock (sdla_t *card, unsigned long *smp_flags)
+{
+ spin_lock_irqsave(&card->wandev.lock, *smp_flags);
+}
+
+void s508_unlock (sdla_t *card, unsigned long *smp_flags)
+{
+ spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
+}
+
+static int read_connection_info (sdla_t *card)
+{
+ ppp_mbox_t *mb = card->mbox;
+ struct net_device *dev = card->wandev.dev;
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+ ppp508_connect_info_t *ppp508_connect_info;
+ int err;
+
+ memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
+ mb->cmd.length = 0;
+ mb->cmd.command = PPP_GET_CONNECTION_INFO;
+ err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK) {
+ ppp_error(card, err, mb);
+ ppp_priv_area->ip_remote = 0;
+ ppp_priv_area->ip_local = 0;
+ }
+ else {
+ ppp508_connect_info = (ppp508_connect_info_t *)mb->data;
+ ppp_priv_area->ip_remote = ppp508_connect_info->ip_remote;
+ ppp_priv_area->ip_local = ppp508_connect_info->ip_local;
+
+ NEX_PRINTK(KERN_INFO "READ CONNECTION GOT IP ADDRESS %x, %x\n",
+ ppp_priv_area->ip_remote,
+ ppp_priv_area->ip_local);
+ }
+
+ return err;
+}
+
+/*===============================================================================
+ * config_ppp
+ *
+ * Configure the ppp protocol and enable communications.
+ *
+ * The if_open function binds this function to the poll routine.
+ * Therefore, this function will run every time the ppp interface
+ * is brought up.
+ *
+ * If the communications are not enabled, proceed to configure
+ * the card and enable communications.
+ *
+ * If the communications are enabled, it means that the interface
+ * was shutdown by ether the user or driver. In this case, we
+ * have to check that the IP addresses have not changed. If
+ * the IP addresses changed, we have to reconfigure the firmware
+ * and update the changed IP addresses. Otherwise, just exit.
+ */
+static int config_ppp (sdla_t *card)
+{
+
+ struct net_device *dev = card->wandev.dev;
+ ppp_flags_t *flags = card->flags;
+ ppp_private_area_t *ppp_priv_area = dev->priv;
+
+ if (card->u.p.comm_enabled){
+
+ if (ppp_priv_area->ip_local_tmp != ppp_priv_area->ip_local ||
+ ppp_priv_area->ip_remote_tmp != ppp_priv_area->ip_remote){
+
+ /* The IP addersses have changed, we must
+ * stop the communications and reconfigure
+ * the card. Reason: the firmware must know
+ * the local and remote IP addresses. */
+ disable_comm(card);
+ wanpipe_set_state(card, WAN_DISCONNECTED);
+ printk(KERN_INFO
+ "%s: IP addresses changed!\n",
+ card->devname);
+ printk(KERN_INFO "%s: Restarting communications ...\n",
+ card->devname);
+ }else{
+ /* IP addresses are the same and the link is up,
+ * we don't have to do anything here. Therefore, exit */
+ return 0;
+ }
+ }
+
+ /* Record the new IP addreses */
+ ppp_priv_area->ip_local = ppp_priv_area->ip_local_tmp;
+ ppp_priv_area->ip_remote = ppp_priv_area->ip_remote_tmp;
+
+ if (config508(dev, card)){
+ printk(KERN_INFO "%s: Failed to configure PPP device\n",
+ card->devname);
+ return 0;
+ }
+
+ if (ppp_set_intr_mode(card, PPP_INTR_RXRDY|
+ PPP_INTR_TXRDY|
+ PPP_INTR_MODEM|
+ PPP_INTR_DISC |
+ PPP_INTR_OPEN |
+ PPP_INTR_DROP_DTR |
+ PPP_INTR_TIMER)) {
+
+ printk(KERN_INFO "%s: Failed to configure board interrupts !\n",
+ card->devname);
+ return 0;
+ }
+
+ /* Turn off the transmit and timer interrupt */
+ flags->imask &= ~(PPP_INTR_TXRDY | PPP_INTR_TIMER) ;
+
+
+ /* If you are not the authenticator and any one of the protocol is
+ * enabled then we call the set_out_bound_authentication.
+ */
+ if ( !card->u.p.authenticator && (ppp_priv_area->pap || ppp_priv_area->chap)) {
+ if ( ppp_set_outbnd_auth(card, ppp_priv_area) ){
+ printk(KERN_INFO "%s: Outbound authentication failed !\n",
+ card->devname);
+ return 0;
+ }
+ }
+
+ /* If you are the authenticator and any one of the protocol is enabled
+ * then we call the set_in_bound_authentication.
+ */
+ if (card->u.p.authenticator && (ppp_priv_area->pap || ppp_priv_area->chap)){
+ if (ppp_set_inbnd_auth(card, ppp_priv_area)){
+ printk(KERN_INFO "%s: Inbound authentication failed !\n",
+ card->devname);
+ return 0;
+ }
+ }
+
+ /* If we fail to enable communications here it's OK,
+ * since the DTR timer will cause a disconnected, which
+ * will retrigger communication in timer_intr() */
+ if (ppp_comm_enable(card) == CMD_OK) {
+ wanpipe_set_state(card, WAN_CONNECTING);
+ init_ppp_tx_rx_buff(card);
+ }
+
+ return 0;
+}
+
+/*============================================================
+ * ppp_poll
+ *
+ * Rationale:
+ * We cannot manipulate the routing tables, or
+ * ip addresses withing the interrupt. Therefore
+ * we must perform such actons outside an interrupt
+ * at a later time.
+ *
+ * Description:
+ * PPP polling routine, responsible for
+ * shutting down interfaces upon disconnect
+ * and adding/removing routes.
+ *
+ * Usage:
+ * This function is executed for each ppp
+ * interface through a tq_schedule bottom half.
+ *
+ * trigger_ppp_poll() function is used to kick
+ * the ppp_poll routine.
+ */
+static void ppp_poll(struct net_device *dev)
+{
+ ppp_private_area_t *ppp_priv_area;
+ sdla_t *card;
+ u8 check_gateway=0;
+ ppp_flags_t *flags;
+
+ if (!dev || (ppp_priv_area = dev->priv) == NULL)
+ return;
+
+ card = ppp_priv_area->card;
+ flags = card->flags;
+
+ /* Shutdown is in progress, stop what you are
+ * doing and get out */
+ if (test_bit(PERI_CRIT,&card->wandev.critical)){
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+ return;
+ }
+
+ /* if_open() function has triggered the polling routine
+ * to determine the configured IP addresses. Once the
+ * addresses are found, trigger the chdlc configuration */
+ if (test_bit(0,&ppp_priv_area->config_ppp)){
+
+ ppp_priv_area->ip_local_tmp = get_ip_address(dev,WAN_LOCAL_IP);
+ ppp_priv_area->ip_remote_tmp = get_ip_address(dev,WAN_POINTOPOINT_IP);
+
+ if (ppp_priv_area->ip_local_tmp == ppp_priv_area->ip_remote_tmp &&
+ card->u.p.ip_mode == WANOPT_PPP_HOST){
+
+ if (++ppp_priv_area->ip_error > MAX_IP_ERRORS){
+ printk(KERN_INFO "\n%s: --- WARNING ---\n",
+ card->devname);
+ printk(KERN_INFO "%s: The local IP address is the same as the\n",
+ card->devname);
+ printk(KERN_INFO "%s: Point-to-Point IP address.\n",
+ card->devname);
+ printk(KERN_INFO "%s: --- WARNING ---\n\n",
+ card->devname);
+ }else{
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+ ppp_priv_area->poll_delay_timer.expires = jiffies+HZ;
+ add_timer(&ppp_priv_area->poll_delay_timer);
+ return;
+ }
+ }
+
+ ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
+ flags->imask |= PPP_INTR_TIMER;
+ ppp_priv_area->ip_error=0;
+
+ clear_bit(0,&ppp_priv_area->config_ppp);
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+ return;
+ }
+
+ /* Dynamic interface implementation, as well as dynamic
+ * routing. */
+
+ switch (card->wandev.state) {
+
+ case WAN_DISCONNECTED:
+
+ /* If the dynamic interface configuration is on, and interface
+ * is up, then bring down the netowrk interface */
+
+ if (test_bit(DYN_OPT_ON,&ppp_priv_area->interface_down) &&
+ !test_bit(DEV_DOWN,&ppp_priv_area->interface_down) &&
+ card->wandev.dev->flags & IFF_UP){
+
+ printk(KERN_INFO "%s: Interface %s down.\n",
+ card->devname,card->wandev.dev->name);
+ change_dev_flags(card->wandev.dev,
+ (card->wandev.dev->flags&~IFF_UP));
+ set_bit(DEV_DOWN,&ppp_priv_area->interface_down);
+ }else{
+ /* We need to check if the local IP address is
+ * zero. If it is, we shouldn't try to remove it.
+ * For some reason the kernel crashes badly if
+ * we try to remove the route twice */
+
+ if (card->wandev.dev->flags & IFF_UP &&
+ get_ip_address(card->wandev.dev,WAN_LOCAL_IP) &&
+ card->u.p.ip_mode == WANOPT_PPP_PEER){
+
+ remove_route(card);
+ }
+ }
+ break;
+
+ case WAN_CONNECTED:
+
+ /* In SMP machine this code can execute before the interface
+ * comes up. In this case, we must make sure that we do not
+ * try to bring up the interface before dev_open() is finished */
+
+
+ /* DEV_DOWN will be set only when we bring down the interface
+ * for the very first time. This way we know that it was us
+ * that brought the interface down */
+
+ if (test_bit(DYN_OPT_ON,&ppp_priv_area->interface_down) &&
+ test_bit(DEV_DOWN, &ppp_priv_area->interface_down) &&
+ !(card->wandev.dev->flags & IFF_UP)){
+
+ printk(KERN_INFO "%s: Interface %s up.\n",
+ card->devname,card->wandev.dev->name);
+
+ change_dev_flags(card->wandev.dev,(card->wandev.dev->flags|IFF_UP));
+ clear_bit(DEV_DOWN,&ppp_priv_area->interface_down);
+ check_gateway=1;
+ }
+
+ if ((card->u.p.ip_mode == WANOPT_PPP_PEER) &&
+ test_bit(1,&Read_connection_info)) {
+
+ process_route(card);
+ clear_bit(1,&Read_connection_info);
+ check_gateway=1;
+ }
+
+ if (ppp_priv_area->gateway && check_gateway)
+ add_gateway(card,dev);
+
+ break;
+ }
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+ return;
+}
+
+/*============================================================
+ * trigger_ppp_poll
+ *
+ * Description:
+ * Add a ppp_poll() task into a tq_scheduler bh handler
+ * for a specific interface. This will kick
+ * the ppp_poll() routine at a later time.
+ *
+ * Usage:
+ * Interrupts use this to defer a taks to
+ * a polling routine.
+ *
+ */
+
+static void trigger_ppp_poll(struct net_device *dev)
+{
+ ppp_private_area_t *ppp_priv_area;
+ if ((ppp_priv_area=dev->priv) != NULL){
+
+ sdla_t *card = ppp_priv_area->card;
+
+ if (test_bit(PERI_CRIT,&card->wandev.critical)){
+ return;
+ }
+
+ if (test_and_set_bit(POLL_CRIT,&card->wandev.critical)){
+ return;
+ }
+
+ schedule_work(&ppp_priv_area->poll_work);
+ }
+ return;
+}
+
+static void ppp_poll_delay (unsigned long dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ trigger_ppp_poll(dev);
+}
+
+/*============================================================
+ * detect_and_fix_tx_bug
+ *
+ * Description:
+ * On connect, if the board tx buffer ptr is not the same
+ * as the driver tx buffer ptr, we found a firmware bug.
+ * Report the bug to the above layer. To fix the
+ * error restart communications again.
+ *
+ * Usage:
+ *
+ */
+
+static int detect_and_fix_tx_bug (sdla_t *card)
+{
+ if (((unsigned long)card->u.p.txbuf_base&0xFFF) != ((*card->u.p.txbuf_next)&0xFFF)){
+ NEX_PRINTK(KERN_INFO "Major Error, Fix the bug\n");
+ return 1;
+ }
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
+
+/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdla_x25.c b/drivers/net/wan/sdla_x25.c
new file mode 100644
index 000000000000..3a93d2fd4fbf
--- /dev/null
+++ b/drivers/net/wan/sdla_x25.c
@@ -0,0 +1,5496 @@
+/*****************************************************************************
+* sdla_x25.c WANPIPE(tm) Multiprotocol WAN Link Driver. X.25 module.
+*
+* Author: Nenad Corbic <ncorbic@sangoma.com>
+*
+* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Apr 03, 2001 Nenad Corbic o Fixed the rx_skb=NULL bug in x25 in rx_intr().
+* Dec 26, 2000 Nenad Corbic o Added a new polling routine, that uses
+* a kernel timer (more efficient).
+* Dec 25, 2000 Nenad Corbic o Updated for 2.4.X kernel
+* Jul 26, 2000 Nenad Corbic o Increased the local packet buffering
+* for API to 4096+header_size.
+* Jul 17, 2000 Nenad Corbic o Fixed the x25 startup bug. Enable
+* communications only after all interfaces
+* come up. HIGH SVC/PVC is used to calculate
+* the number of channels.
+* Enable protocol only after all interfaces
+* are enabled.
+* Jul 10, 2000 Nenad Corbic o Fixed the M_BIT bug.
+* Apr 25, 2000 Nenad Corbic o Pass Modem messages to the API.
+* Disable idle timeout in X25 API.
+* Apr 14, 2000 Nenad Corbic o Fixed: Large LCN number support.
+* Maximum LCN number is 4095.
+* Maximum number of X25 channels is 255.
+* Apr 06, 2000 Nenad Corbic o Added SMP Support.
+* Mar 29, 2000 Nenad Corbic o Added support for S514 PCI Card
+* Mar 23, 2000 Nenad Corbic o Improved task queue, BH handling.
+* Mar 14, 2000 Nenad Corbic o Updated Protocol Violation handling
+* routines. Bug Fix.
+* Mar 10, 2000 Nenad Corbic o Bug Fix: corrupted mbox recovery.
+* Mar 09, 2000 Nenad Corbic o Fixed the auto HDLC bug.
+* Mar 08, 2000 Nenad Corbic o Fixed LAPB HDLC startup problems.
+* Application must bring the link up
+* before tx/rx, and bring the
+* link down on close().
+* Mar 06, 2000 Nenad Corbic o Added an option for logging call setup
+* information.
+* Feb 29, 2000 Nenad Corbic o Added support for LAPB HDLC API
+* Feb 25, 2000 Nenad Corbic o Fixed the modem failure handling.
+* No Modem OOB message will be passed
+* to the user.
+* Feb 21, 2000 Nenad Corbic o Added Xpipemon Debug Support
+* Dec 30, 1999 Nenad Corbic o Socket based X25API
+* Sep 17, 1998 Jaspreet Singh o Updates for 2.2.X kernel
+* Mar 15, 1998 Alan Cox o 2.1.x porting
+* Dec 19, 1997 Jaspreet Singh o Added multi-channel IPX support
+* Nov 27, 1997 Jaspreet Singh o Added protection against enabling of irqs
+* when they are disabled.
+* Nov 17, 1997 Farhan Thawar o Added IPX support
+* o Changed if_send() to now buffer packets when
+* the board is busy
+* o Removed queueing of packets via the polling
+* routing
+* o Changed if_send() critical flags to properly
+* handle race conditions
+* Nov 06, 1997 Farhan Thawar o Added support for SVC timeouts
+* o Changed PVC encapsulation to ETH_P_IP
+* Jul 21, 1997 Jaspreet Singh o Fixed freeing up of buffers using kfree()
+* when packets are received.
+* Mar 11, 1997 Farhan Thawar Version 3.1.1
+* o added support for V35
+* o changed if_send() to return 0 if
+* wandev.critical() is true
+* o free socket buffer in if_send() if
+* returning 0
+* o added support for single '@' address to
+* accept all incoming calls
+* o fixed bug in set_chan_state() to disconnect
+* Jan 15, 1997 Gene Kozin Version 3.1.0
+* o implemented exec() entry point
+* Jan 07, 1997 Gene Kozin Initial version.
+*****************************************************************************/
+
+/*======================================================
+ * Includes
+ *=====================================================*/
+
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/ctype.h>
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/workqueue.h>
+#include <asm/byteorder.h> /* htons(), etc. */
+#include <asm/atomic.h>
+#include <linux/delay.h> /* Experimental delay */
+
+#include <asm/uaccess.h>
+
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/sdla_x25.h> /* X.25 firmware API definitions */
+#include <linux/if_wanpipe_common.h>
+#include <linux/if_wanpipe.h>
+
+
+/*======================================================
+ * Defines & Macros
+ *=====================================================*/
+
+
+#define CMD_OK 0 /* normal firmware return code */
+#define CMD_TIMEOUT 0xFF /* firmware command timed out */
+#define MAX_CMD_RETRY 10 /* max number of firmware retries */
+
+#define X25_CHAN_MTU 4096 /* unfragmented logical channel MTU */
+#define X25_HRDHDR_SZ 7 /* max encapsulation header size */
+#define X25_CONCT_TMOUT (90*HZ) /* link connection timeout */
+#define X25_RECON_TMOUT (10*HZ) /* link connection timeout */
+#define CONNECT_TIMEOUT (90*HZ) /* link connection timeout */
+#define HOLD_DOWN_TIME (30*HZ) /* link hold down time */
+#define MAX_BH_BUFF 10
+#define M_BIT 0x01
+
+//#define PRINT_DEBUG 1
+#ifdef PRINT_DEBUG
+#define DBG_PRINTK(format, a...) printk(format, ## a)
+#else
+#define DBG_PRINTK(format, a...)
+#endif
+
+#define TMR_INT_ENABLED_POLL_ACTIVE 0x01
+#define TMR_INT_ENABLED_POLL_CONNECT_ON 0x02
+#define TMR_INT_ENABLED_POLL_CONNECT_OFF 0x04
+#define TMR_INT_ENABLED_POLL_DISCONNECT 0x08
+#define TMR_INT_ENABLED_CMD_EXEC 0x10
+#define TMR_INT_ENABLED_UPDATE 0x20
+#define TMR_INT_ENABLED_UDP_PKT 0x40
+
+#define MAX_X25_ADDR_SIZE 16
+#define MAX_X25_DATA_SIZE 129
+#define MAX_X25_FACL_SIZE 110
+
+#define TRY_CMD_AGAIN 2
+#define DELAY_RESULT 1
+#define RETURN_RESULT 0
+
+#define DCD(x) (x & 0x03 ? "HIGH" : "LOW")
+#define CTS(x) (x & 0x05 ? "HIGH" : "LOW")
+
+
+/* Driver will not write log messages about
+ * modem status if defined.*/
+#define MODEM_NOT_LOG 1
+
+/*====================================================
+ * For IPXWAN
+ *===================================================*/
+
+#define CVHexToAscii(b) (((unsigned char)(b) > (unsigned char)9) ? ((unsigned char)'A' + ((unsigned char)(b) - (unsigned char)10)) : ((unsigned char)'0' + (unsigned char)(b)))
+
+
+/*====================================================
+ * MEMORY DEBUGGING FUNCTION
+ *====================================================
+
+#define KMEM_SAFETYZONE 8
+
+static void * dbg_kmalloc(unsigned int size, int prio, int line) {
+ int i = 0;
+ void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio);
+ char * c1 = v;
+ c1 += sizeof(unsigned int);
+ *((unsigned int *)v) = size;
+
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D';
+ c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F';
+ c1 += 8;
+ }
+ c1 += size;
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G';
+ c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L';
+ c1 += 8;
+ }
+ v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8;
+ printk(KERN_INFO "line %d kmalloc(%d,%d) = %p\n",line,size,prio,v);
+ return v;
+}
+static void dbg_kfree(void * v, int line) {
+ unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8));
+ unsigned int size = *sp;
+ char * c1 = ((char *)v) - KMEM_SAFETYZONE*8;
+ int i = 0;
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ if ( c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D'
+ || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') {
+ printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v);
+ printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
+ c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
+ }
+ c1 += 8;
+ }
+ c1 += size;
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ if ( c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G'
+ || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L'
+ ) {
+ printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v);
+ printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
+ c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
+ }
+ c1 += 8;
+ }
+ printk(KERN_INFO "line %d kfree(%p)\n",line,v);
+ v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8);
+ kfree(v);
+}
+
+#define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__)
+#define kfree(x) dbg_kfree(x,__LINE__)
+
+==============================================================*/
+
+
+
+/*===============================================
+ * Data Structures
+ *===============================================*/
+
+
+/*========================================================
+ * Name: x25_channel
+ *
+ * Purpose: To hold private informaton for each
+ * logical channel.
+ *
+ * Rationale: Per-channel debugging is possible if each
+ * channel has its own private area.
+ *
+ * Assumptions:
+ *
+ * Description: This is an extention of the struct net_device
+ * we create for each network interface to keep
+ * the rest of X.25 channel-specific data.
+ *
+ * Construct: Typedef
+ */
+typedef struct x25_channel
+{
+ wanpipe_common_t common; /* common area for x25api and socket */
+ char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
+ char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */
+ unsigned tx_pkt_size;
+ unsigned short protocol; /* ethertype, 0 - multiplexed */
+ char drop_sequence; /* mark sequence for dropping */
+ unsigned long state_tick; /* time of the last state change */
+ unsigned idle_timeout; /* sec, before disconnecting */
+ unsigned long i_timeout_sofar; /* # of sec's we've been idle */
+ unsigned hold_timeout; /* sec, before re-connecting */
+ unsigned long tick_counter; /* counter for transmit time out */
+ char devtint; /* Weather we should dev_tint() */
+ struct sk_buff* rx_skb; /* receive socket buffer */
+ struct sk_buff* tx_skb; /* transmit socket buffer */
+
+ bh_data_t *bh_head; /* Circular buffer for x25api_bh */
+ unsigned long tq_working;
+ volatile int bh_write;
+ volatile int bh_read;
+ atomic_t bh_buff_used;
+
+ sdla_t* card; /* -> owner */
+ struct net_device *dev; /* -> bound devce */
+
+ int ch_idx;
+ unsigned char enable_IPX;
+ unsigned long network_number;
+ struct net_device_stats ifstats; /* interface statistics */
+ unsigned short transmit_length;
+ unsigned short tx_offset;
+ char transmit_buffer[X25_CHAN_MTU+sizeof(x25api_hdr_t)];
+
+ if_send_stat_t if_send_stat;
+ rx_intr_stat_t rx_intr_stat;
+ pipe_mgmt_stat_t pipe_mgmt_stat;
+
+ unsigned long router_start_time; /* Router start time in seconds */
+ unsigned long router_up_time;
+
+} x25_channel_t;
+
+/* FIXME Take this out */
+
+#ifdef NEX_OLD_CALL_INFO
+typedef struct x25_call_info
+{
+ char dest[17]; PACKED;/* ASCIIZ destination address */
+ char src[17]; PACKED;/* ASCIIZ source address */
+ char nuser; PACKED;/* number of user data bytes */
+ unsigned char user[127]; PACKED;/* user data */
+ char nfacil; PACKED;/* number of facilities */
+ struct
+ {
+ unsigned char code; PACKED;
+ unsigned char parm; PACKED;
+ } facil[64]; /* facilities */
+} x25_call_info_t;
+#else
+typedef struct x25_call_info
+{
+ char dest[MAX_X25_ADDR_SIZE] PACKED;/* ASCIIZ destination address */
+ char src[MAX_X25_ADDR_SIZE] PACKED;/* ASCIIZ source address */
+ unsigned char nuser PACKED;
+ unsigned char user[MAX_X25_DATA_SIZE] PACKED;/* user data */
+ unsigned char nfacil PACKED;
+ unsigned char facil[MAX_X25_FACL_SIZE] PACKED;
+ unsigned short lcn PACKED;
+} x25_call_info_t;
+#endif
+
+
+
+/*===============================================
+ * Private Function Prototypes
+ *==============================================*/
+
+
+/*=================================================
+ * WAN link driver entry points. These are
+ * called by the WAN router module.
+ */
+static int update(struct wan_device* wandev);
+static int new_if(struct wan_device* wandev, struct net_device* dev,
+ wanif_conf_t* conf);
+static int del_if(struct wan_device* wandev, struct net_device* dev);
+static void disable_comm (sdla_t* card);
+static void disable_comm_shutdown(sdla_t *card);
+
+
+
+/*=================================================
+ * WANPIPE-specific entry points
+ */
+static int wpx_exec (struct sdla* card, void* u_cmd, void* u_data);
+static void x25api_bh(struct net_device *dev);
+static int x25api_bh_cleanup(struct net_device *dev);
+static int bh_enqueue(struct net_device *dev, struct sk_buff *skb);
+
+
+/*=================================================
+ * Network device interface
+ */
+static int if_init(struct net_device* dev);
+static int if_open(struct net_device* dev);
+static int if_close(struct net_device* dev);
+static int if_header(struct sk_buff* skb, struct net_device* dev,
+ unsigned short type, void* daddr, void* saddr, unsigned len);
+static int if_rebuild_hdr (struct sk_buff* skb);
+static int if_send(struct sk_buff* skb, struct net_device* dev);
+static struct net_device_stats *if_stats(struct net_device* dev);
+
+static void if_tx_timeout(struct net_device *dev);
+
+/*=================================================
+ * Interrupt handlers
+ */
+static void wpx_isr (sdla_t *);
+static void rx_intr (sdla_t *);
+static void tx_intr (sdla_t *);
+static void status_intr (sdla_t *);
+static void event_intr (sdla_t *);
+static void spur_intr (sdla_t *);
+static void timer_intr (sdla_t *);
+
+static int tx_intr_send(sdla_t *card, struct net_device *dev);
+static struct net_device *move_dev_to_next(sdla_t *card,
+ struct net_device *dev);
+
+/*=================================================
+ * Background polling routines
+ */
+static void wpx_poll (sdla_t* card);
+static void poll_disconnected (sdla_t* card);
+static void poll_connecting (sdla_t* card);
+static void poll_active (sdla_t* card);
+static void trigger_x25_poll(sdla_t *card);
+static void x25_timer_routine(unsigned long data);
+
+
+
+/*=================================================
+ * X.25 firmware interface functions
+ */
+static int x25_get_version (sdla_t* card, char* str);
+static int x25_configure (sdla_t* card, TX25Config* conf);
+static int hdlc_configure (sdla_t* card, TX25Config* conf);
+static int set_hdlc_level (sdla_t* card);
+static int x25_get_err_stats (sdla_t* card);
+static int x25_get_stats (sdla_t* card);
+static int x25_set_intr_mode (sdla_t* card, int mode);
+static int x25_close_hdlc (sdla_t* card);
+static int x25_open_hdlc (sdla_t* card);
+static int x25_setup_hdlc (sdla_t* card);
+static int x25_set_dtr (sdla_t* card, int dtr);
+static int x25_get_chan_conf (sdla_t* card, x25_channel_t* chan);
+static int x25_place_call (sdla_t* card, x25_channel_t* chan);
+static int x25_accept_call (sdla_t* card, int lcn, int qdm);
+static int x25_clear_call (sdla_t* card, int lcn, int cause, int diagn);
+static int x25_send (sdla_t* card, int lcn, int qdm, int len, void* buf);
+static int x25_fetch_events (sdla_t* card);
+static int x25_error (sdla_t* card, int err, int cmd, int lcn);
+
+/*=================================================
+ * X.25 asynchronous event handlers
+ */
+static int incoming_call (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
+static int call_accepted (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
+static int call_cleared (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
+static int timeout_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
+static int restart_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
+
+
+/*=================================================
+ * Miscellaneous functions
+ */
+static int connect (sdla_t* card);
+static int disconnect (sdla_t* card);
+static struct net_device* get_dev_by_lcn(struct wan_device* wandev,
+ unsigned lcn);
+static int chan_connect(struct net_device* dev);
+static int chan_disc(struct net_device* dev);
+static void set_chan_state(struct net_device* dev, int state);
+static int chan_send(struct net_device *dev, void* buff, unsigned data_len,
+ unsigned char tx_intr);
+static unsigned char bps_to_speed_code (unsigned long bps);
+static unsigned int dec_to_uint (unsigned char* str, int len);
+static unsigned int hex_to_uint (unsigned char*, int);
+static void parse_call_info (unsigned char*, x25_call_info_t*);
+static struct net_device *find_channel(sdla_t *card, unsigned lcn);
+static void bind_lcn_to_dev(sdla_t *card, struct net_device *dev, unsigned lcn);
+static void setup_for_delayed_transmit(struct net_device *dev,
+ void *buf, unsigned len);
+
+
+/*=================================================
+ * X25 API Functions
+ */
+static int wanpipe_pull_data_in_skb(sdla_t *card, struct net_device *dev,
+ struct sk_buff **);
+static void timer_intr_exec(sdla_t *, unsigned char);
+static int execute_delayed_cmd(sdla_t *card, struct net_device *dev,
+ mbox_cmd_t *usr_cmd, char bad_cmd);
+static int api_incoming_call (sdla_t*, TX25Mbox *, int);
+static int alloc_and_init_skb_buf (sdla_t *,struct sk_buff **, int);
+static void send_delayed_cmd_result(sdla_t *card, struct net_device *dev,
+ TX25Mbox* mbox);
+static int clear_confirm_event (sdla_t *, TX25Mbox*);
+static void send_oob_msg (sdla_t *card, struct net_device *dev, TX25Mbox *mbox);
+static int timer_intr_cmd_exec(sdla_t *card);
+static void api_oob_event (sdla_t *card,TX25Mbox *mbox);
+static int check_bad_command(sdla_t *card, struct net_device *dev);
+static int channel_disconnect(sdla_t* card, struct net_device *dev);
+static void hdlc_link_down (sdla_t*);
+
+/*=================================================
+ * XPIPEMON Functions
+ */
+static int process_udp_mgmt_pkt(sdla_t *);
+static int udp_pkt_type( struct sk_buff *, sdla_t*);
+static int reply_udp( unsigned char *, unsigned int);
+static void init_x25_channel_struct( x25_channel_t *);
+static void init_global_statistics( sdla_t *);
+static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t *card,
+ struct net_device *dev,
+ struct sk_buff *skb, int lcn);
+static unsigned short calc_checksum (char *, int);
+
+
+
+/*=================================================
+ * IPX functions
+ */
+static void switch_net_numbers(unsigned char *, unsigned long, unsigned char);
+static int handle_IPXWAN(unsigned char *, char *, unsigned char ,
+ unsigned long , unsigned short );
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+static void S508_S514_lock(sdla_t *, unsigned long *);
+static void S508_S514_unlock(sdla_t *, unsigned long *);
+
+
+/*=================================================
+ * Global Variables
+ *=================================================*/
+
+
+
+/*=================================================
+ * Public Functions
+ *=================================================*/
+
+
+
+
+/*===================================================================
+ * wpx_init: X.25 Protocol Initialization routine.
+ *
+ * Purpose: To initialize the protocol/firmware.
+ *
+ * Rationale: This function is called by setup() function, in
+ * sdlamain.c, to dynamically setup the x25 protocol.
+ * This is the first protocol specific function, which
+ * executes once on startup.
+ *
+ * Description: This procedure initializes the x25 firmware and
+ * sets up the mailbox, transmit and receive buffer
+ * pointers. It also initializes all debugging structures
+ * and sets up the X25 environment.
+ *
+ * Sets up hardware options defined by user in [wanpipe#]
+ * section of wanpipe#.conf configuration file.
+ *
+ * At this point adapter is completely initialized
+ * and X.25 firmware is running.
+ * o read firmware version (to make sure it's alive)
+ * o configure adapter
+ * o initialize protocol-specific fields of the
+ * adapter data space.
+ *
+ * Called by: setup() function in sdlamain.c
+ *
+ * Assumptions: None
+ *
+ * Warnings: None
+ *
+ * Return: 0 o.k.
+ * < 0 failure.
+ */
+
+int wpx_init (sdla_t* card, wandev_conf_t* conf)
+{
+ union{
+ char str[80];
+ TX25Config cfg;
+ } u;
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_X25){
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id)
+ ;
+ return -EINVAL;
+ }
+
+ /* Initialize protocol-specific fields */
+ card->mbox = (void*)(card->hw.dpmbase + X25_MBOX_OFFS);
+ card->rxmb = (void*)(card->hw.dpmbase + X25_RXMBOX_OFFS);
+ card->flags = (void*)(card->hw.dpmbase + X25_STATUS_OFFS);
+
+ /* Initialize for S514 Card */
+ if(card->hw.type == SDLA_S514) {
+ card->mbox += X25_MB_VECTOR;
+ card->flags += X25_MB_VECTOR;
+ card->rxmb += X25_MB_VECTOR;
+ }
+
+
+ /* Read firmware version. Note that when adapter initializes, it
+ * clears the mailbox, so it may appear that the first command was
+ * executed successfully when in fact it was merely erased. To work
+ * around this, we execute the first command twice.
+ */
+ if (x25_get_version(card, NULL) || x25_get_version(card, u.str))
+ return -EIO;
+
+
+ /* X25 firmware can run ether in X25 or LAPB HDLC mode.
+ * Check the user defined option and configure accordingly */
+ if (conf->u.x25.LAPB_hdlc_only == WANOPT_YES){
+ if (set_hdlc_level(card) != CMD_OK){
+ return -EIO;
+ }else{
+ printk(KERN_INFO "%s: running LAP_B HDLC firmware v%s\n",
+ card->devname, u.str);
+ }
+ card->u.x.LAPB_hdlc = 1;
+ }else{
+ printk(KERN_INFO "%s: running X.25 firmware v%s\n",
+ card->devname, u.str);
+ card->u.x.LAPB_hdlc = 0;
+ }
+
+ /* Configure adapter. Here we set resonable defaults, then parse
+ * device configuration structure and set configuration options.
+ * Most configuration options are verified and corrected (if
+ * necessary) since we can't rely on the adapter to do so.
+ */
+ memset(&u.cfg, 0, sizeof(u.cfg));
+ u.cfg.t1 = 3;
+ u.cfg.n2 = 10;
+ u.cfg.autoHdlc = 1; /* automatic HDLC connection */
+ u.cfg.hdlcWindow = 7;
+ u.cfg.pktWindow = 2;
+ u.cfg.station = 1; /* DTE */
+ u.cfg.options = 0x0090; /* disable D-bit pragmatics */
+ u.cfg.ccittCompat = 1988;
+ u.cfg.t10t20 = 30;
+ u.cfg.t11t21 = 30;
+ u.cfg.t12t22 = 30;
+ u.cfg.t13t23 = 30;
+ u.cfg.t16t26 = 30;
+ u.cfg.t28 = 30;
+ u.cfg.r10r20 = 5;
+ u.cfg.r12r22 = 5;
+ u.cfg.r13r23 = 5;
+ u.cfg.responseOpt = 1; /* RR's after every packet */
+
+ if (card->u.x.LAPB_hdlc){
+ u.cfg.hdlcMTU = 1027;
+ }
+
+ if (conf->u.x25.x25_conf_opt){
+ u.cfg.options = conf->u.x25.x25_conf_opt;
+ }
+
+ if (conf->clocking != WANOPT_EXTERNAL)
+ u.cfg.baudRate = bps_to_speed_code(conf->bps);
+
+ if (conf->station != WANOPT_DTE){
+ u.cfg.station = 0; /* DCE mode */
+ }
+
+ if (conf->interface != WANOPT_RS232 ){
+ u.cfg.hdlcOptions |= 0x80; /* V35 mode */
+ }
+
+ /* adjust MTU */
+ if (!conf->mtu || (conf->mtu >= 1024))
+ card->wandev.mtu = 1024;
+ else if (conf->mtu >= 512)
+ card->wandev.mtu = 512;
+ else if (conf->mtu >= 256)
+ card->wandev.mtu = 256;
+ else if (conf->mtu >= 128)
+ card->wandev.mtu = 128;
+ else
+ card->wandev.mtu = 64;
+
+ u.cfg.defPktSize = u.cfg.pktMTU = card->wandev.mtu;
+
+ if (conf->u.x25.hi_pvc){
+ card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, MAX_LCN_NUM);
+ card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
+ }
+
+ if (conf->u.x25.hi_svc){
+ card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, MAX_LCN_NUM);
+ card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
+ }
+
+ /* Figure out the total number of channels to configure */
+ card->u.x.num_of_ch = 0;
+ if (card->u.x.hi_svc != 0){
+ card->u.x.num_of_ch = (card->u.x.hi_svc - card->u.x.lo_svc) + 1;
+ }
+ if (card->u.x.hi_pvc != 0){
+ card->u.x.num_of_ch += (card->u.x.hi_pvc - card->u.x.lo_pvc) + 1;
+ }
+
+ if (card->u.x.num_of_ch == 0){
+ printk(KERN_INFO "%s: ERROR, Minimum number of PVC/SVC channels is 1 !\n"
+ "%s: Please set the Lowest/Highest PVC/SVC values !\n",
+ card->devname,card->devname);
+ return -ECHRNG;
+ }
+
+ u.cfg.loPVC = card->u.x.lo_pvc;
+ u.cfg.hiPVC = card->u.x.hi_pvc;
+ u.cfg.loTwoWaySVC = card->u.x.lo_svc;
+ u.cfg.hiTwoWaySVC = card->u.x.hi_svc;
+
+ if (conf->u.x25.hdlc_window)
+ u.cfg.hdlcWindow = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
+ if (conf->u.x25.pkt_window)
+ u.cfg.pktWindow = min_t(unsigned int, conf->u.x25.pkt_window, 7);
+
+ if (conf->u.x25.t1)
+ u.cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
+ if (conf->u.x25.t2)
+ u.cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 29);
+ if (conf->u.x25.t4)
+ u.cfg.t4 = min_t(unsigned int, conf->u.x25.t4, 240);
+ if (conf->u.x25.n2)
+ u.cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
+
+ if (conf->u.x25.t10_t20)
+ u.cfg.t10t20 = min_t(unsigned int, conf->u.x25.t10_t20,255);
+ if (conf->u.x25.t11_t21)
+ u.cfg.t11t21 = min_t(unsigned int, conf->u.x25.t11_t21,255);
+ if (conf->u.x25.t12_t22)
+ u.cfg.t12t22 = min_t(unsigned int, conf->u.x25.t12_t22,255);
+ if (conf->u.x25.t13_t23)
+ u.cfg.t13t23 = min_t(unsigned int, conf->u.x25.t13_t23,255);
+ if (conf->u.x25.t16_t26)
+ u.cfg.t16t26 = min_t(unsigned int, conf->u.x25.t16_t26, 255);
+ if (conf->u.x25.t28)
+ u.cfg.t28 = min_t(unsigned int, conf->u.x25.t28, 255);
+
+ if (conf->u.x25.r10_r20)
+ u.cfg.r10r20 = min_t(unsigned int, conf->u.x25.r10_r20,250);
+ if (conf->u.x25.r12_r22)
+ u.cfg.r12r22 = min_t(unsigned int, conf->u.x25.r12_r22,250);
+ if (conf->u.x25.r13_r23)
+ u.cfg.r13r23 = min_t(unsigned int, conf->u.x25.r13_r23,250);
+
+
+ if (conf->u.x25.ccitt_compat)
+ u.cfg.ccittCompat = conf->u.x25.ccitt_compat;
+
+ /* initialize adapter */
+ if (card->u.x.LAPB_hdlc){
+ if (hdlc_configure(card, &u.cfg) != CMD_OK)
+ return -EIO;
+ }else{
+ if (x25_configure(card, &u.cfg) != CMD_OK)
+ return -EIO;
+ }
+
+ if ((x25_close_hdlc(card) != CMD_OK) || /* close HDLC link */
+ (x25_set_dtr(card, 0) != CMD_OK)) /* drop DTR */
+ return -EIO;
+
+ /* Initialize protocol-specific fields of adapter data space */
+ card->wandev.bps = conf->bps;
+ card->wandev.interface = conf->interface;
+ card->wandev.clocking = conf->clocking;
+ card->wandev.station = conf->station;
+ card->isr = &wpx_isr;
+ card->poll = NULL; //&wpx_poll;
+ card->disable_comm = &disable_comm;
+ card->exec = &wpx_exec;
+ card->wandev.update = &update;
+ card->wandev.new_if = &new_if;
+ card->wandev.del_if = &del_if;
+
+ /* WARNING: This function cannot exit with an error
+ * after the change of state */
+ card->wandev.state = WAN_DISCONNECTED;
+
+ card->wandev.enable_tx_int = 0;
+ card->irq_dis_if_send_count = 0;
+ card->irq_dis_poll_count = 0;
+ card->u.x.tx_dev = NULL;
+ card->u.x.no_dev = 0;
+
+
+ /* Configure for S514 PCI Card */
+ if (card->hw.type == SDLA_S514) {
+ card->u.x.hdlc_buf_status =
+ (volatile unsigned char *)
+ (card->hw.dpmbase + X25_MB_VECTOR+ X25_MISC_HDLC_BITS);
+ }else{
+ card->u.x.hdlc_buf_status =
+ (volatile unsigned char *)(card->hw.dpmbase + X25_MISC_HDLC_BITS);
+ }
+
+ card->u.x.poll_device=NULL;
+ card->wandev.udp_port = conf->udp_port;
+
+ /* Enable or disable call setup logging */
+ if (conf->u.x25.logging == WANOPT_YES){
+ printk(KERN_INFO "%s: Enabling Call Logging.\n",
+ card->devname);
+ card->u.x.logging = 1;
+ }else{
+ card->u.x.logging = 0;
+ }
+
+ /* Enable or disable modem status reporting */
+ if (conf->u.x25.oob_on_modem == WANOPT_YES){
+ printk(KERN_INFO "%s: Enabling OOB on Modem change.\n",
+ card->devname);
+ card->u.x.oob_on_modem = 1;
+ }else{
+ card->u.x.oob_on_modem = 0;
+ }
+
+ init_global_statistics(card);
+
+ INIT_WORK(&card->u.x.x25_poll_work, (void *)wpx_poll, card);
+
+ init_timer(&card->u.x.x25_timer);
+ card->u.x.x25_timer.data = (unsigned long)card;
+ card->u.x.x25_timer.function = x25_timer_routine;
+
+ return 0;
+}
+
+/*=========================================================
+ * WAN Device Driver Entry Points
+ *========================================================*/
+
+/*============================================================
+ * Name: update(), Update device status & statistics.
+ *
+ * Purpose: To provide debugging and statitical
+ * information to the /proc file system.
+ * /proc/net/wanrouter/wanpipe#
+ *
+ * Rationale: The /proc file system is used to collect
+ * information about the kernel and drivers.
+ * Using the /proc file system the user
+ * can see exactly what the sangoma drivers are
+ * doing. And in what state they are in.
+ *
+ * Description: Collect all driver statistical information
+ * and pass it to the top laywer.
+ *
+ * Since we have to execute a debugging command,
+ * to obtain firmware statitics, we trigger a
+ * UPDATE function within the timer interrtup.
+ * We wait until the timer update is complete.
+ * Once complete return the appropriate return
+ * code to indicate that the update was successful.
+ *
+ * Called by: device_stat() in wanmain.c
+ *
+ * Assumptions:
+ *
+ * Warnings: This function will degrade the performance
+ * of the router, since it uses the mailbox.
+ *
+ * Return: 0 OK
+ * <0 Failed (or busy).
+ */
+
+static int update(struct wan_device* wandev)
+{
+ volatile sdla_t* card;
+ TX25Status* status;
+ unsigned long timeout;
+
+ /* sanity checks */
+ if ((wandev == NULL) || (wandev->private == NULL))
+ return -EFAULT;
+
+ if (wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ if (test_bit(SEND_CRIT, (void*)&wandev->critical))
+ return -EAGAIN;
+
+ if (!wandev->dev)
+ return -ENODEV;
+
+ card = wandev->private;
+ status = card->flags;
+
+ card->u.x.timer_int_enabled |= TMR_INT_ENABLED_UPDATE;
+ status->imask |= INTR_ON_TIMER;
+ timeout = jiffies;
+
+ for (;;){
+ if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_UPDATE)){
+ break;
+ }
+ if ((jiffies-timeout) > 1*HZ){
+ card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE;
+ return -EAGAIN;
+ }
+ }
+ return 0;
+}
+
+
+/*===================================================================
+ * Name: new_if
+ *
+ * Purpose: To allocate and initialize resources for a
+ * new logical channel.
+ *
+ * Rationale: A new channel can be added dynamically via
+ * ioctl call.
+ *
+ * Description: Allocate a private channel structure, x25_channel_t.
+ * Parse the user interface options from wanpipe#.conf
+ * configuration file.
+ * Bind the private are into the network device private
+ * area pointer (dev->priv).
+ * Prepare the network device structure for registration.
+ *
+ * Called by: ROUTER_IFNEW Ioctl call, from wanrouter_ioctl()
+ * (wanmain.c)
+ *
+ * Assumptions: None
+ *
+ * Warnings: None
+ *
+ * Return: 0 Ok
+ * <0 Failed (channel will not be created)
+ */
+static int new_if(struct wan_device* wandev, struct net_device* dev,
+ wanif_conf_t* conf)
+{
+ sdla_t* card = wandev->private;
+ x25_channel_t* chan;
+ int err = 0;
+
+ if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)){
+ printk(KERN_INFO "%s: invalid interface name!\n",
+ card->devname);
+ return -EINVAL;
+ }
+
+ if(card->wandev.new_if_cnt++ > 0 && card->u.x.LAPB_hdlc) {
+ printk(KERN_INFO "%s: Error: Running LAPB HDLC Mode !\n",
+ card->devname);
+ printk(KERN_INFO
+ "%s: Maximum number of network interfaces must be one !\n",
+ card->devname);
+ return -EEXIST;
+ }
+
+ /* allocate and initialize private data */
+ chan = kmalloc(sizeof(x25_channel_t), GFP_ATOMIC);
+ if (chan == NULL){
+ return -ENOMEM;
+ }
+
+ memset(chan, 0, sizeof(x25_channel_t));
+
+ /* Bug Fix: Seg Err on PVC startup
+ * It must be here since bind_lcn_to_dev expects
+ * it bellow */
+ dev->priv = chan;
+
+ strcpy(chan->name, conf->name);
+ chan->card = card;
+ chan->dev = dev;
+ chan->common.sk = NULL;
+ chan->common.func = NULL;
+ chan->common.rw_bind = 0;
+ chan->tx_skb = chan->rx_skb = NULL;
+
+ /* verify media address */
+ if (conf->addr[0] == '@'){ /* SVC */
+ chan->common.svc = 1;
+ strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
+
+ /* Set channel timeouts (default if not specified) */
+ chan->idle_timeout = (conf->idle_timeout) ?
+ conf->idle_timeout : 90;
+ chan->hold_timeout = (conf->hold_timeout) ?
+ conf->hold_timeout : 10;
+
+ }else if (is_digit(conf->addr[0])){ /* PVC */
+ int lcn = dec_to_uint(conf->addr, 0);
+
+ if ((lcn >= card->u.x.lo_pvc) && (lcn <= card->u.x.hi_pvc)){
+ bind_lcn_to_dev (card, dev, lcn);
+ }else{
+ printk(KERN_ERR
+ "%s: PVC %u is out of range on interface %s!\n",
+ wandev->name, lcn, chan->name);
+ err = -EINVAL;
+ }
+ }else{
+ printk(KERN_ERR
+ "%s: invalid media address on interface %s!\n",
+ wandev->name, chan->name);
+ err = -EINVAL;
+ }
+
+ if(strcmp(conf->usedby, "WANPIPE") == 0){
+ printk(KERN_INFO "%s: Running in WANPIPE mode %s\n",
+ wandev->name, chan->name);
+ chan->common.usedby = WANPIPE;
+ chan->protocol = htons(ETH_P_IP);
+
+ }else if(strcmp(conf->usedby, "API") == 0){
+ chan->common.usedby = API;
+ printk(KERN_INFO "%s: Running in API mode %s\n",
+ wandev->name, chan->name);
+ chan->protocol = htons(X25_PROT);
+ }
+
+
+ if (err){
+ kfree(chan);
+ dev->priv = NULL;
+ return err;
+ }
+
+ chan->enable_IPX = conf->enable_IPX;
+
+ if (chan->enable_IPX)
+ chan->protocol = htons(ETH_P_IPX);
+
+ if (conf->network_number)
+ chan->network_number = conf->network_number;
+ else
+ chan->network_number = 0xDEADBEEF;
+
+ /* prepare network device data space for registration */
+ strcpy(dev->name,chan->name);
+
+ dev->init = &if_init;
+
+ init_x25_channel_struct(chan);
+
+ return 0;
+}
+
+/*===================================================================
+ * Name: del_if(), Remove a logical channel.
+ *
+ * Purpose: To dynamically remove a logical channel.
+ *
+ * Rationale: Each logical channel should be dynamically
+ * removable. This functin is called by an
+ * IOCTL_IFDEL ioctl call or shutdown().
+ *
+ * Description: Do nothing.
+ *
+ * Called by: IOCTL_IFDEL : wanrouter_ioctl() from wanmain.c
+ * shutdown() from sdlamain.c
+ *
+ * Assumptions:
+ *
+ * Warnings:
+ *
+ * Return: 0 Ok. Void function.
+ */
+
+//FIXME Del IF Should be taken out now.
+
+static int del_if(struct wan_device* wandev, struct net_device* dev)
+{
+ return 0;
+}
+
+
+/*============================================================
+ * Name: wpx_exec
+ *
+ * Description: Execute adapter interface command.
+ * This option is currently dissabled.
+ *===========================================================*/
+
+static int wpx_exec (struct sdla* card, void* u_cmd, void* u_data)
+{
+ return 0;
+}
+
+/*============================================================
+ * Name: disable_comm
+ *
+ * Description: Disable communications during shutdown.
+ * Dont check return code because there is
+ * nothing we can do about it.
+ *
+ * Warning: Dev and private areas are gone at this point.
+ *===========================================================*/
+
+static void disable_comm(sdla_t* card)
+{
+ disable_comm_shutdown(card);
+ del_timer(&card->u.x.x25_timer);
+ return;
+}
+
+
+/*============================================================
+ * Network Device Interface
+ *===========================================================*/
+
+/*===================================================================
+ * Name: if_init(), Netowrk Interface Initialization
+ *
+ * Purpose: To initialize a network interface device structure.
+ *
+ * Rationale: During network interface startup, the if_init
+ * is called by the kernel to initialize the
+ * netowrk device structure. Thus a driver
+ * can customze a network device.
+ *
+ * Description: Initialize the netowrk device call back
+ * routines. This is where we tell the kernel
+ * which function to use when it wants to send
+ * via our interface.
+ * Furthermore, we initialize the device flags,
+ * MTU and physical address of the board.
+ *
+ * Called by: Kernel (/usr/src/linux/net/core/dev.c)
+ * (dev->init())
+ *
+ * Assumptions: None
+ *
+ * Warnings: None
+ *
+ * Return: 0 Ok : Void function.
+ */
+static int if_init(struct net_device* dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ struct wan_device* wandev = &card->wandev;
+
+ /* Initialize device driver entry points */
+ dev->open = &if_open;
+ dev->stop = &if_close;
+ dev->hard_header = &if_header;
+ dev->rebuild_header = &if_rebuild_hdr;
+ dev->hard_start_xmit = &if_send;
+ dev->get_stats = &if_stats;
+ dev->tx_timeout = &if_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /* Initialize media-specific parameters */
+ dev->type = ARPHRD_PPP; /* ARP h/w type */
+ dev->flags |= IFF_POINTOPOINT;
+ dev->flags |= IFF_NOARP;
+
+ if (chan->common.usedby == API){
+ dev->mtu = X25_CHAN_MTU+sizeof(x25api_hdr_t);
+ }else{
+ dev->mtu = card->wandev.mtu;
+ }
+
+ dev->hard_header_len = X25_HRDHDR_SZ; /* media header length */
+ dev->addr_len = 2; /* hardware address length */
+
+ if (!chan->common.svc){
+ *(unsigned short*)dev->dev_addr = htons(chan->common.lcn);
+ }
+
+ /* Initialize hardware parameters (just for reference) */
+ dev->irq = wandev->irq;
+ dev->dma = wandev->dma;
+ dev->base_addr = wandev->ioport;
+ dev->mem_start = (unsigned long)wandev->maddr;
+ dev->mem_end = wandev->maddr + wandev->msize - 1;
+
+ /* Set transmit buffer queue length */
+ dev->tx_queue_len = 100;
+ SET_MODULE_OWNER(dev);
+
+ /* FIXME Why are we doing this */
+ set_chan_state(dev, WAN_DISCONNECTED);
+ return 0;
+}
+
+
+/*===================================================================
+ * Name: if_open(), Open/Bring up the Netowrk Interface
+ *
+ * Purpose: To bring up a network interface.
+ *
+ * Rationale:
+ *
+ * Description: Open network interface.
+ * o prevent module from unloading by incrementing use count
+ * o if link is disconnected then initiate connection
+ *
+ * Called by: Kernel (/usr/src/linux/net/core/dev.c)
+ * (dev->open())
+ *
+ * Assumptions: None
+ *
+ * Warnings: None
+ *
+ * Return: 0 Ok
+ * <0 Failure: Interface will not come up.
+ */
+
+static int if_open(struct net_device* dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ struct timeval tv;
+ unsigned long smp_flags;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ chan->tq_working = 0;
+
+ /* Initialize the workqueue */
+ INIT_WORK(&chan->common.wanpipe_work, (void *)x25api_bh, dev);
+
+ /* Allocate and initialize BH circular buffer */
+ /* Add 1 to MAX_BH_BUFF so we don't have test with (MAX_BH_BUFF-1) */
+ chan->bh_head = kmalloc((sizeof(bh_data_t)*(MAX_BH_BUFF+1)),GFP_ATOMIC);
+
+ if (chan->bh_head == NULL){
+ printk(KERN_INFO "%s: ERROR, failed to allocate memory ! BH_BUFFERS !\n",
+ card->devname);
+
+ return -ENOBUFS;
+ }
+ memset(chan->bh_head,0,(sizeof(bh_data_t)*(MAX_BH_BUFF+1)));
+ atomic_set(&chan->bh_buff_used, 0);
+
+ /* Increment the number of interfaces */
+ ++card->u.x.no_dev;
+
+ wanpipe_open(card);
+
+ /* LAPB protocol only uses one interface, thus
+ * start the protocol after it comes up. */
+ if (card->u.x.LAPB_hdlc){
+ if (card->open_cnt == 1){
+ TX25Status* status = card->flags;
+ S508_S514_lock(card, &smp_flags);
+ x25_set_intr_mode(card, INTR_ON_TIMER);
+ status->imask &= ~INTR_ON_TIMER;
+ S508_S514_unlock(card, &smp_flags);
+ }
+ }else{
+ /* X25 can have multiple interfaces thus, start the
+ * protocol once all interfaces are up */
+
+ //FIXME: There is a bug here. If interface is
+ //brought down and up, it will try to enable comm.
+ if (card->open_cnt == card->u.x.num_of_ch){
+
+ S508_S514_lock(card, &smp_flags);
+ connect(card);
+ S508_S514_unlock(card, &smp_flags);
+
+ mod_timer(&card->u.x.x25_timer, jiffies + HZ);
+ }
+ }
+ /* Device is not up until the we are in connected state */
+ do_gettimeofday( &tv );
+ chan->router_start_time = tv.tv_sec;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*===================================================================
+ * Name: if_close(), Close/Bring down the Netowrk Interface
+ *
+ * Purpose: To bring down a network interface.
+ *
+ * Rationale:
+ *
+ * Description: Close network interface.
+ * o decrement use module use count
+ *
+ * Called by: Kernel (/usr/src/linux/net/core/dev.c)
+ * (dev->close())
+ * ifconfig <name> down: will trigger the kernel
+ * which will call this function.
+ *
+ * Assumptions: None
+ *
+ * Warnings: None
+ *
+ * Return: 0 Ok
+ * <0 Failure: Interface will not exit properly.
+ */
+static int if_close(struct net_device* dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ unsigned long smp_flags;
+
+ netif_stop_queue(dev);
+
+ if ((chan->common.state == WAN_CONNECTED) ||
+ (chan->common.state == WAN_CONNECTING)){
+ S508_S514_lock(card, &smp_flags);
+ chan_disc(dev);
+ S508_S514_unlock(card, &smp_flags);
+ }
+
+ wanpipe_close(card);
+
+ S508_S514_lock(card, &smp_flags);
+ if (chan->bh_head){
+ int i;
+ struct sk_buff *skb;
+
+ for (i=0; i<(MAX_BH_BUFF+1); i++){
+ skb = ((bh_data_t *)&chan->bh_head[i])->skb;
+ if (skb != NULL){
+ dev_kfree_skb_any(skb);
+ }
+ }
+ kfree(chan->bh_head);
+ chan->bh_head=NULL;
+ }
+ S508_S514_unlock(card, &smp_flags);
+
+ /* If this is the last close, disconnect physical link */
+ if (!card->open_cnt){
+ S508_S514_lock(card, &smp_flags);
+ disconnect(card);
+ x25_set_intr_mode(card, 0);
+ S508_S514_unlock(card, &smp_flags);
+ }
+
+ /* Decrement the number of interfaces */
+ --card->u.x.no_dev;
+ return 0;
+}
+
+/*======================================================================
+ * Build media header.
+ * o encapsulate packet according to encapsulation type.
+ *
+ * The trick here is to put packet type (Ethertype) into 'protocol'
+ * field of the socket buffer, so that we don't forget it.
+ * If encapsulation fails, set skb->protocol to 0 and discard
+ * packet later.
+ *
+ * Return: media header length.
+ *======================================================================*/
+
+static int if_header(struct sk_buff* skb, struct net_device* dev,
+ unsigned short type, void* daddr, void* saddr,
+ unsigned len)
+{
+ x25_channel_t* chan = dev->priv;
+ int hdr_len = dev->hard_header_len;
+
+ skb->protocol = htons(type);
+ if (!chan->protocol){
+ hdr_len = wanrouter_encapsulate(skb, dev, type);
+ if (hdr_len < 0){
+ hdr_len = 0;
+ skb->protocol = htons(0);
+ }
+ }
+ return hdr_len;
+}
+
+/*===============================================================
+ * Re-build media header.
+ *
+ * Return: 1 physical address resolved.
+ * 0 physical address not resolved
+ *==============================================================*/
+
+static int if_rebuild_hdr (struct sk_buff* skb)
+{
+ struct net_device *dev = skb->dev;
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+
+ printk(KERN_INFO "%s: rebuild_header() called for interface %s!\n",
+ card->devname, dev->name);
+ return 1;
+}
+
+
+/*============================================================================
+ * Handle transmit timeout event from netif watchdog
+ */
+static void if_tx_timeout(struct net_device *dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+
+ /* If our device stays busy for at least 5 seconds then we will
+ * kick start the device by making dev->tbusy = 0. We expect
+ * that our device never stays busy more than 5 seconds. So this
+ * is only used as a last resort.
+ */
+
+ ++chan->if_send_stat.if_send_tbusy_timeout;
+ printk (KERN_INFO "%s: Transmit timed out on %s\n",
+ card->devname, dev->name);
+ netif_wake_queue (dev);
+}
+
+
+/*=========================================================================
+ * Send a packet on a network interface.
+ * o set tbusy flag (marks start of the transmission).
+ * o check link state. If link is not up, then drop the packet.
+ * o check channel status. If it's down then initiate a call.
+ * o pass a packet to corresponding WAN device.
+ * o free socket buffer
+ *
+ * Return: 0 complete (socket buffer must be freed)
+ * non-0 packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ * bottom half" (with interrupts enabled).
+ * 2. Setting tbusy flag will inhibit further transmit requests from the
+ * protocol stack and can be used for flow control with protocol layer.
+ *
+ *========================================================================*/
+
+static int if_send(struct sk_buff* skb, struct net_device* dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ TX25Status* status = card->flags;
+ int udp_type;
+ unsigned long smp_flags=0;
+
+ ++chan->if_send_stat.if_send_entry;
+
+ netif_stop_queue(dev);
+
+ /* No need to check frame length, since socket code
+ * will perform the check for us */
+
+ chan->tick_counter = jiffies;
+
+ /* Critical region starts here */
+ S508_S514_lock(card, &smp_flags);
+
+ if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)){
+ printk(KERN_INFO "Hit critical in if_send()! %lx\n",card->wandev.critical);
+ goto if_send_crit_exit;
+ }
+
+ udp_type = udp_pkt_type(skb, card);
+
+ if(udp_type != UDP_INVALID_TYPE) {
+
+ if(store_udp_mgmt_pkt(udp_type, UDP_PKT_FRM_STACK, card, dev, skb,
+ chan->common.lcn)) {
+
+ status->imask |= INTR_ON_TIMER;
+ if (udp_type == UDP_XPIPE_TYPE){
+ chan->if_send_stat.if_send_PIPE_request++;
+ }
+ }
+ netif_start_queue(dev);
+ clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
+ S508_S514_unlock(card, &smp_flags);
+ return 0;
+ }
+
+ if (chan->transmit_length){
+ //FIXME: This check doesn't make sense any more
+ if (chan->common.state != WAN_CONNECTED){
+ chan->transmit_length=0;
+ atomic_set(&chan->common.driver_busy,0);
+ }else{
+ netif_stop_queue(dev);
+ ++card->u.x.tx_interrupts_pending;
+ status->imask |= INTR_ON_TX_FRAME;
+ clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
+ S508_S514_unlock(card, &smp_flags);
+ return 1;
+ }
+ }
+
+ if (card->wandev.state != WAN_CONNECTED){
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ ++chan->if_send_stat.if_send_wan_disconnected;
+
+ }else if ( chan->protocol && (chan->protocol != skb->protocol)){
+ printk(KERN_INFO
+ "%s: unsupported Ethertype 0x%04X on interface %s!\n",
+ chan->name, htons(skb->protocol), dev->name);
+
+ printk(KERN_INFO "PROTO %Xn", htons(chan->protocol));
+ ++chan->ifstats.tx_errors;
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ ++chan->if_send_stat.if_send_protocol_error;
+
+ }else switch (chan->common.state){
+
+ case WAN_DISCONNECTED:
+ /* Try to establish connection. If succeded, then start
+ * transmission, else drop a packet.
+ */
+ if (chan->common.usedby == API){
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ break;
+ }else{
+ if (chan_connect(dev) != 0){
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ break;
+ }
+ }
+ /* fall through */
+
+ case WAN_CONNECTED:
+ if( skb->protocol == htons(ETH_P_IPX)) {
+ if(chan->enable_IPX) {
+ switch_net_numbers( skb->data,
+ chan->network_number, 0);
+ } else {
+ ++card->wandev.stats.tx_dropped;
+ ++chan->ifstats.tx_dropped;
+ ++chan->if_send_stat.if_send_protocol_error;
+ goto if_send_crit_exit;
+ }
+ }
+ /* We never drop here, if cannot send than, copy
+ * a packet into a transmit buffer
+ */
+ chan_send(dev, skb->data, skb->len, 0);
+ break;
+
+ default:
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ break;
+ }
+
+
+if_send_crit_exit:
+
+ dev_kfree_skb_any(skb);
+
+ netif_start_queue(dev);
+ clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
+ S508_S514_unlock(card, &smp_flags);
+ return 0;
+}
+
+/*============================================================================
+ * Setup so that a frame can be transmitted on the occurrence of a transmit
+ * interrupt.
+ *===========================================================================*/
+
+static void setup_for_delayed_transmit(struct net_device* dev, void* buf,
+ unsigned len)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ TX25Status* status = card->flags;
+
+ ++chan->if_send_stat.if_send_adptr_bfrs_full;
+
+ if(chan->transmit_length) {
+ printk(KERN_INFO "%s: Error, transmit length set in delayed transmit!\n",
+ card->devname);
+ return;
+ }
+
+ if (chan->common.usedby == API){
+ if (len > X25_CHAN_MTU+sizeof(x25api_hdr_t)) {
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ printk(KERN_INFO "%s: Length is too big for delayed transmit\n",
+ card->devname);
+ return;
+ }
+ }else{
+ if (len > X25_MAX_DATA) {
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ printk(KERN_INFO "%s: Length is too big for delayed transmit\n",
+ card->devname);
+ return;
+ }
+ }
+
+ chan->transmit_length = len;
+ atomic_set(&chan->common.driver_busy,1);
+ memcpy(chan->transmit_buffer, buf, len);
+
+ ++chan->if_send_stat.if_send_tx_int_enabled;
+
+ /* Enable Transmit Interrupt */
+ ++card->u.x.tx_interrupts_pending;
+ status->imask |= INTR_ON_TX_FRAME;
+}
+
+
+/*===============================================================
+ * net_device_stats
+ *
+ * Get ethernet-style interface statistics.
+ * Return a pointer to struct enet_statistics.
+ *
+ *==============================================================*/
+static struct net_device_stats *if_stats(struct net_device* dev)
+{
+ x25_channel_t *chan = dev->priv;
+
+ if(chan == NULL)
+ return NULL;
+
+ return &chan->ifstats;
+}
+
+
+/*
+ * Interrupt Handlers
+ */
+
+/*
+ * X.25 Interrupt Service Routine.
+ */
+
+static void wpx_isr (sdla_t* card)
+{
+ TX25Status* status = card->flags;
+
+ card->in_isr = 1;
+ ++card->statistics.isr_entry;
+
+ if (test_bit(PERI_CRIT,(void*)&card->wandev.critical)){
+ card->in_isr=0;
+ status->iflags = 0;
+ return;
+ }
+
+ if (test_bit(SEND_CRIT, (void*)&card->wandev.critical)){
+
+ printk(KERN_INFO "%s: wpx_isr: wandev.critical set to 0x%02lx, int type = 0x%02x\n",
+ card->devname, card->wandev.critical, status->iflags);
+ card->in_isr = 0;
+ status->iflags = 0;
+ return;
+ }
+
+ /* For all interrupts set the critical flag to CRITICAL_RX_INTR.
+ * If the if_send routine is called with this flag set it will set
+ * the enable transmit flag to 1. (for a delayed interrupt)
+ */
+ switch (status->iflags){
+
+ case RX_INTR_PENDING: /* receive interrupt */
+ rx_intr(card);
+ break;
+
+ case TX_INTR_PENDING: /* transmit interrupt */
+ tx_intr(card);
+ break;
+
+ case MODEM_INTR_PENDING: /* modem status interrupt */
+ status_intr(card);
+ break;
+
+ case X25_ASY_TRANS_INTR_PENDING: /* network event interrupt */
+ event_intr(card);
+ break;
+
+ case TIMER_INTR_PENDING:
+ timer_intr(card);
+ break;
+
+ default: /* unwanted interrupt */
+ spur_intr(card);
+ }
+
+ card->in_isr = 0;
+ status->iflags = 0; /* clear interrupt condition */
+}
+
+/*
+ * Receive interrupt handler.
+ * This routine handles fragmented IP packets using M-bit according to the
+ * RFC1356.
+ * o map ligical channel number to network interface.
+ * o allocate socket buffer or append received packet to the existing one.
+ * o if M-bit is reset (i.e. it's the last packet in a sequence) then
+ * decapsulate packet and pass socket buffer to the protocol stack.
+ *
+ * Notes:
+ * 1. When allocating a socket buffer, if M-bit is set then more data is
+ * coming and we have to allocate buffer for the maximum IP packet size
+ * expected on this channel.
+ * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
+ * socket buffers available) the whole packet sequence must be discarded.
+ */
+
+static void rx_intr (sdla_t* card)
+{
+ TX25Mbox* rxmb = card->rxmb;
+ unsigned lcn = rxmb->cmd.lcn;
+ struct net_device* dev = find_channel(card,lcn);
+ x25_channel_t* chan;
+ struct sk_buff* skb=NULL;
+
+ if (dev == NULL){
+ /* Invalid channel, discard packet */
+ printk(KERN_INFO "%s: receiving on orphaned LCN %d!\n",
+ card->devname, lcn);
+ return;
+ }
+
+ chan = dev->priv;
+ chan->i_timeout_sofar = jiffies;
+
+
+ /* Copy the data from the board, into an
+ * skb buffer
+ */
+ if (wanpipe_pull_data_in_skb(card,dev,&skb)){
+ ++chan->ifstats.rx_dropped;
+ ++card->wandev.stats.rx_dropped;
+ ++chan->rx_intr_stat.rx_intr_no_socket;
+ ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
+ return;
+ }
+
+ dev->last_rx = jiffies; /* timestamp */
+
+
+ /* ------------ API ----------------*/
+
+ if (chan->common.usedby == API){
+
+ if (bh_enqueue(dev, skb)){
+ ++chan->ifstats.rx_dropped;
+ ++card->wandev.stats.rx_dropped;
+ ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ++chan->ifstats.rx_packets;
+ chan->ifstats.rx_bytes += skb->len;
+
+
+ chan->rx_skb = NULL;
+ if (!test_and_set_bit(0, &chan->tq_working)){
+ wanpipe_queue_work(&chan->common.wanpipe_work);
+ }
+ return;
+ }
+
+
+ /* ------------- WANPIPE -------------------*/
+
+ /* set rx_skb to NULL so we won't access it later when kernel already owns it */
+ chan->rx_skb=NULL;
+
+ /* Decapsulate packet, if necessary */
+ if (!skb->protocol && !wanrouter_type_trans(skb, dev)){
+ /* can't decapsulate packet */
+ dev_kfree_skb_any(skb);
+ ++chan->ifstats.rx_errors;
+ ++chan->ifstats.rx_dropped;
+ ++card->wandev.stats.rx_dropped;
+ ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
+
+ }else{
+ if( handle_IPXWAN(skb->data, chan->name,
+ chan->enable_IPX, chan->network_number,
+ skb->protocol)){
+
+ if( chan->enable_IPX ){
+ if(chan_send(dev, skb->data, skb->len,0)){
+ chan->tx_skb = skb;
+ }else{
+ dev_kfree_skb_any(skb);
+ ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
+ }
+ }else{
+ /* increment IPX packet dropped statistic */
+ ++chan->ifstats.rx_dropped;
+ ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
+ }
+ }else{
+ skb->mac.raw = skb->data;
+ chan->ifstats.rx_bytes += skb->len;
+ ++chan->ifstats.rx_packets;
+ ++chan->rx_intr_stat.rx_intr_bfr_passed_to_stack;
+ netif_rx(skb);
+ }
+ }
+
+ return;
+}
+
+
+static int wanpipe_pull_data_in_skb(sdla_t *card, struct net_device *dev,
+ struct sk_buff **skb)
+{
+ void *bufptr;
+ TX25Mbox* rxmb = card->rxmb;
+ unsigned len = rxmb->cmd.length; /* packet length */
+ unsigned qdm = rxmb->cmd.qdm; /* Q,D and M bits */
+ x25_channel_t *chan = dev->priv;
+ struct sk_buff *new_skb = *skb;
+
+ if (chan->common.usedby == WANPIPE){
+ if (chan->drop_sequence){
+ if (!(qdm & 0x01)){
+ chan->drop_sequence = 0;
+ }
+ return 1;
+ }
+ new_skb = chan->rx_skb;
+ }else{
+ /* Add on the API header to the received
+ * data
+ */
+ len += sizeof(x25api_hdr_t);
+ }
+
+ if (new_skb == NULL){
+ int bufsize;
+
+ if (chan->common.usedby == WANPIPE){
+ bufsize = (qdm & 0x01) ? dev->mtu : len;
+ }else{
+ bufsize = len;
+ }
+
+ /* Allocate new socket buffer */
+ new_skb = dev_alloc_skb(bufsize + dev->hard_header_len);
+ if (new_skb == NULL){
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ chan->drop_sequence = 1; /* set flag */
+ ++chan->ifstats.rx_dropped;
+ return 1;
+ }
+ }
+
+ if (skb_tailroom(new_skb) < len){
+ /* No room for the packet. Call off the whole thing! */
+ dev_kfree_skb_any(new_skb);
+ if (chan->common.usedby == WANPIPE){
+ chan->rx_skb = NULL;
+ if (qdm & 0x01){
+ chan->drop_sequence = 1;
+ }
+ }
+
+ printk(KERN_INFO "%s: unexpectedly long packet sequence "
+ "on interface %s!\n", card->devname, dev->name);
+ ++chan->ifstats.rx_length_errors;
+ return 1;
+ }
+
+ bufptr = skb_put(new_skb,len);
+
+
+ if (chan->common.usedby == API){
+ /* Fill in the x25api header
+ */
+ x25api_t * api_data = (x25api_t*)bufptr;
+ api_data->hdr.qdm = rxmb->cmd.qdm;
+ api_data->hdr.cause = rxmb->cmd.cause;
+ api_data->hdr.diagn = rxmb->cmd.diagn;
+ api_data->hdr.length = rxmb->cmd.length;
+ memcpy(api_data->data, rxmb->data, rxmb->cmd.length);
+ }else{
+ memcpy(bufptr, rxmb->data, len);
+ }
+
+ new_skb->dev = dev;
+
+ if (chan->common.usedby == API){
+ new_skb->mac.raw = new_skb->data;
+ new_skb->protocol = htons(X25_PROT);
+ new_skb->pkt_type = WAN_PACKET_DATA;
+ }else{
+ new_skb->protocol = chan->protocol;
+ chan->rx_skb = new_skb;
+ }
+
+ /* If qdm bit is set, more data is coming
+ * thus, exit and wait for more data before
+ * sending the packet up. (Used by router only)
+ */
+ if ((qdm & 0x01) && (chan->common.usedby == WANPIPE))
+ return 1;
+
+ *skb = new_skb;
+
+ return 0;
+}
+
+/*===============================================================
+ * tx_intr
+ *
+ * Transmit interrupt handler.
+ * For each dev, check that there is something to send.
+ * If data available, transmit.
+ *
+ *===============================================================*/
+
+static void tx_intr (sdla_t* card)
+{
+ struct net_device *dev;
+ TX25Status* status = card->flags;
+ unsigned char more_to_tx=0;
+ x25_channel_t *chan=NULL;
+ int i=0;
+
+ if (card->u.x.tx_dev == NULL){
+ card->u.x.tx_dev = card->wandev.dev;
+ }
+
+ dev = card->u.x.tx_dev;
+
+ for (;;){
+
+ chan = dev->priv;
+ if (chan->transmit_length){
+ /* Device was set to transmit, check if the TX
+ * buffers are available
+ */
+ if (chan->common.state != WAN_CONNECTED){
+ chan->transmit_length = 0;
+ atomic_set(&chan->common.driver_busy,0);
+ chan->tx_offset=0;
+ if (netif_queue_stopped(dev)){
+ if (chan->common.usedby == API){
+ netif_start_queue(dev);
+ wakeup_sk_bh(dev);
+ }else{
+ netif_wake_queue(dev);
+ }
+ }
+ dev = move_dev_to_next(card,dev);
+ break;
+ }
+
+ if ((status->cflags[chan->ch_idx] & 0x40 || card->u.x.LAPB_hdlc) &&
+ (*card->u.x.hdlc_buf_status & 0x40) ){
+ /* Tx buffer available, we can send */
+
+ if (tx_intr_send(card, dev)){
+ more_to_tx=1;
+ }
+
+ /* If more than one interface present, move the
+ * device pointer to the next interface, so on the
+ * next TX interrupt we will try sending from it.
+ */
+ dev = move_dev_to_next(card,dev);
+ break;
+ }else{
+ /* Tx buffers not available, but device set
+ * the TX interrupt. Set more_to_tx and try
+ * to transmit for other devices.
+ */
+ more_to_tx=1;
+ dev = move_dev_to_next(card,dev);
+ }
+
+ }else{
+ /* This device was not set to transmit,
+ * go to next
+ */
+ dev = move_dev_to_next(card,dev);
+ }
+
+ if (++i == card->u.x.no_dev){
+ if (!more_to_tx){
+ DBG_PRINTK(KERN_INFO "%s: Nothing to Send in TX INTR\n",
+ card->devname);
+ }
+ break;
+ }
+
+ } //End of FOR
+
+ card->u.x.tx_dev = dev;
+
+ if (!more_to_tx){
+ /* if any other interfaces have transmit interrupts pending, */
+ /* do not disable the global transmit interrupt */
+ if (!(--card->u.x.tx_interrupts_pending)){
+ status->imask &= ~INTR_ON_TX_FRAME;
+ }
+ }
+ return;
+}
+
+/*===============================================================
+ * move_dev_to_next
+ *
+ *
+ *===============================================================*/
+
+
+struct net_device *move_dev_to_next(sdla_t *card, struct net_device *dev)
+{
+ if (card->u.x.no_dev != 1){
+ if (!*((struct net_device **)dev->priv))
+ return card->wandev.dev;
+ else
+ return *((struct net_device **)dev->priv);
+ }
+ return dev;
+}
+
+/*===============================================================
+ * tx_intr_send
+ *
+ *
+ *===============================================================*/
+
+static int tx_intr_send(sdla_t *card, struct net_device *dev)
+{
+ x25_channel_t* chan = dev->priv;
+
+ if (chan_send (dev,chan->transmit_buffer,chan->transmit_length,1)){
+
+ /* Packet was split up due to its size, do not disable
+ * tx_intr
+ */
+ return 1;
+ }
+
+ chan->transmit_length=0;
+ atomic_set(&chan->common.driver_busy,0);
+ chan->tx_offset=0;
+
+ /* If we are in API mode, wakeup the
+ * sock BH handler, not the NET_BH */
+ if (netif_queue_stopped(dev)){
+ if (chan->common.usedby == API){
+ netif_start_queue(dev);
+ wakeup_sk_bh(dev);
+ }else{
+ netif_wake_queue(dev);
+ }
+ }
+ return 0;
+}
+
+
+/*===============================================================
+ * timer_intr
+ *
+ * Timer interrupt handler.
+ * Check who called the timer interrupt and perform
+ * action accordingly.
+ *
+ *===============================================================*/
+
+static void timer_intr (sdla_t *card)
+{
+ TX25Status* status = card->flags;
+
+ if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC){
+
+ if (timer_intr_cmd_exec(card) == 0){
+ card->u.x.timer_int_enabled &=
+ ~TMR_INT_ENABLED_CMD_EXEC;
+ }
+
+ }else if(card->u.x.timer_int_enabled & TMR_INT_ENABLED_UDP_PKT) {
+
+ if ((*card->u.x.hdlc_buf_status & 0x40) &&
+ card->u.x.udp_type == UDP_XPIPE_TYPE){
+
+ if(process_udp_mgmt_pkt(card)) {
+ card->u.x.timer_int_enabled &=
+ ~TMR_INT_ENABLED_UDP_PKT;
+ }
+ }
+
+ }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_ACTIVE) {
+
+ struct net_device *dev = card->u.x.poll_device;
+ x25_channel_t *chan = NULL;
+
+ if (!dev){
+ card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_ACTIVE;
+ return;
+ }
+ chan = dev->priv;
+
+ printk(KERN_INFO
+ "%s: Closing down Idle link %s on LCN %d\n",
+ card->devname,chan->name,chan->common.lcn);
+ chan->i_timeout_sofar = jiffies;
+ chan_disc(dev);
+ card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_ACTIVE;
+ card->u.x.poll_device=NULL;
+
+ }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_CONNECT_ON) {
+
+ wanpipe_set_state(card, WAN_CONNECTED);
+ if (card->u.x.LAPB_hdlc){
+ struct net_device *dev = card->wandev.dev;
+ set_chan_state(dev,WAN_CONNECTED);
+ send_delayed_cmd_result(card,dev,card->mbox);
+ }
+
+ /* 0x8F enable all interrupts */
+ x25_set_intr_mode(card, INTR_ON_RX_FRAME|
+ INTR_ON_TX_FRAME|
+ INTR_ON_MODEM_STATUS_CHANGE|
+ //INTR_ON_COMMAND_COMPLETE|
+ X25_ASY_TRANS_INTR_PENDING |
+ INTR_ON_TIMER |
+ DIRECT_RX_INTR_USAGE
+ );
+
+ status->imask &= ~INTR_ON_TX_FRAME; /* mask Tx interrupts */
+ card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_CONNECT_ON;
+
+ }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_CONNECT_OFF) {
+
+ //printk(KERN_INFO "Poll connect, Turning OFF\n");
+ disconnect(card);
+ card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_CONNECT_OFF;
+
+ }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_DISCONNECT) {
+
+ //printk(KERN_INFO "POll disconnect, trying to connect\n");
+ connect(card);
+ card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_DISCONNECT;
+
+ }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_UPDATE){
+
+ if (*card->u.x.hdlc_buf_status & 0x40){
+ x25_get_err_stats(card);
+ x25_get_stats(card);
+ card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE;
+ }
+ }
+
+ if(!card->u.x.timer_int_enabled){
+ //printk(KERN_INFO "Turning Timer Off \n");
+ status->imask &= ~INTR_ON_TIMER;
+ }
+}
+
+/*====================================================================
+ * Modem status interrupt handler.
+ *===================================================================*/
+static void status_intr (sdla_t* card)
+{
+
+ /* Added to avoid Modem status message flooding */
+ static TX25ModemStatus last_stat;
+
+ TX25Mbox* mbox = card->mbox;
+ TX25ModemStatus *modem_status;
+ struct net_device *dev;
+ x25_channel_t *chan;
+ int err;
+
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_READ_MODEM_STATUS;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err){
+ x25_error(card, err, X25_READ_MODEM_STATUS, 0);
+ }else{
+
+ modem_status = (TX25ModemStatus*)mbox->data;
+
+ /* Check if the last status was the same
+ * if it was, do NOT print message again */
+
+ if (last_stat.status != modem_status->status){
+
+ printk(KERN_INFO "%s: Modem Status Change: DCD=%s, CTS=%s\n",
+ card->devname,DCD(modem_status->status),CTS(modem_status->status));
+
+ last_stat.status = modem_status->status;
+
+ if (card->u.x.oob_on_modem){
+
+ mbox->cmd.pktType = mbox->cmd.command;
+ mbox->cmd.result = 0x08;
+
+ /* Send a OOB to all connected sockets */
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device**)dev->priv)) {
+ chan=dev->priv;
+ if (chan->common.usedby == API){
+ send_oob_msg(card,dev,mbox);
+ }
+ }
+
+ /* The modem OOB message will probably kill the
+ * the link. If we don't clear the flag here,
+ * a deadlock could occur */
+ if (atomic_read(&card->u.x.command_busy)){
+ atomic_set(&card->u.x.command_busy,0);
+ }
+ }
+ }
+ }
+
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_HDLC_LINK_STATUS;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err){
+ x25_error(card, err, X25_HDLC_LINK_STATUS, 0);
+ }
+
+}
+
+/*====================================================================
+ * Network event interrupt handler.
+ *===================================================================*/
+static void event_intr (sdla_t* card)
+{
+ x25_fetch_events(card);
+}
+
+/*====================================================================
+ * Spurious interrupt handler.
+ * o print a warning
+ * o
+ *====================================================================*/
+
+static void spur_intr (sdla_t* card)
+{
+ printk(KERN_INFO "%s: spurious interrupt!\n", card->devname);
+}
+
+
+/*
+ * Background Polling Routines
+ */
+
+/*====================================================================
+ * Main polling routine.
+ * This routine is repeatedly called by the WANPIPE 'thread' to allow for
+ * time-dependent housekeeping work.
+ *
+ * Notes:
+ * 1. This routine may be called on interrupt context with all interrupts
+ * enabled. Beware!
+ *====================================================================*/
+
+static void wpx_poll (sdla_t *card)
+{
+ if (!card->wandev.dev){
+ goto wpx_poll_exit;
+ }
+
+ if (card->open_cnt != card->u.x.num_of_ch){
+ goto wpx_poll_exit;
+ }
+
+ if (test_bit(PERI_CRIT,&card->wandev.critical)){
+ goto wpx_poll_exit;
+ }
+
+ if (test_bit(SEND_CRIT,&card->wandev.critical)){
+ goto wpx_poll_exit;
+ }
+
+ switch(card->wandev.state){
+ case WAN_CONNECTED:
+ poll_active(card);
+ break;
+
+ case WAN_CONNECTING:
+ poll_connecting(card);
+ break;
+
+ case WAN_DISCONNECTED:
+ poll_disconnected(card);
+ break;
+ }
+
+wpx_poll_exit:
+ clear_bit(POLL_CRIT,&card->wandev.critical);
+ return;
+}
+
+static void trigger_x25_poll(sdla_t *card)
+{
+ schedule_work(&card->u.x.x25_poll_work);
+}
+
+/*====================================================================
+ * Handle physical link establishment phase.
+ * o if connection timed out, disconnect the link.
+ *===================================================================*/
+
+static void poll_connecting (sdla_t* card)
+{
+ volatile TX25Status* status = card->flags;
+
+ if (status->gflags & X25_HDLC_ABM){
+
+ timer_intr_exec (card, TMR_INT_ENABLED_POLL_CONNECT_ON);
+
+ }else if ((jiffies - card->state_tick) > CONNECT_TIMEOUT){
+
+ timer_intr_exec (card, TMR_INT_ENABLED_POLL_CONNECT_OFF);
+
+ }
+}
+
+/*====================================================================
+ * Handle physical link disconnected phase.
+ * o if hold-down timeout has expired and there are open interfaces,
+ * connect link.
+ *===================================================================*/
+
+static void poll_disconnected (sdla_t* card)
+{
+ struct net_device *dev;
+ x25_channel_t *chan;
+ TX25Status* status = card->flags;
+
+ if (!card->u.x.LAPB_hdlc && card->open_cnt &&
+ ((jiffies - card->state_tick) > HOLD_DOWN_TIME)){
+ timer_intr_exec(card, TMR_INT_ENABLED_POLL_DISCONNECT);
+ }
+
+
+ if ((dev=card->wandev.dev) == NULL)
+ return;
+
+ if ((chan=dev->priv) == NULL)
+ return;
+
+ if (chan->common.usedby == API &&
+ atomic_read(&chan->common.command) &&
+ card->u.x.LAPB_hdlc){
+
+ if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC))
+ card->u.x.timer_int_enabled |= TMR_INT_ENABLED_CMD_EXEC;
+
+ if (!(status->imask & INTR_ON_TIMER))
+ status->imask |= INTR_ON_TIMER;
+ }
+
+}
+
+/*====================================================================
+ * Handle active link phase.
+ * o fetch X.25 asynchronous events.
+ * o kick off transmission on all interfaces.
+ *===================================================================*/
+
+static void poll_active (sdla_t* card)
+{
+ struct net_device* dev;
+ TX25Status* status = card->flags;
+
+ for (dev = card->wandev.dev; dev;
+ dev = *((struct net_device **)dev->priv)){
+ x25_channel_t* chan = dev->priv;
+
+ /* If SVC has been idle long enough, close virtual circuit */
+ if ( chan->common.svc &&
+ chan->common.state == WAN_CONNECTED &&
+ chan->common.usedby == WANPIPE ){
+
+ if( (jiffies - chan->i_timeout_sofar) / HZ > chan->idle_timeout ){
+ /* Close svc */
+ card->u.x.poll_device=dev;
+ timer_intr_exec (card, TMR_INT_ENABLED_POLL_ACTIVE);
+ }
+ }
+
+#ifdef PRINT_DEBUG
+ chan->ifstats.tx_compressed = atomic_read(&chan->common.command);
+ chan->ifstats.tx_errors = chan->common.state;
+ chan->ifstats.rx_fifo_errors = atomic_read(&card->u.x.command_busy);
+ ++chan->ifstats.tx_bytes;
+
+ chan->ifstats.rx_fifo_errors=atomic_read(&chan->common.disconnect);
+ chan->ifstats.multicast=atomic_read(&chan->bh_buff_used);
+ chan->ifstats.rx_length_errors=*card->u.x.hdlc_buf_status;
+#endif
+
+ if (chan->common.usedby == API &&
+ atomic_read(&chan->common.command) &&
+ !card->u.x.LAPB_hdlc){
+
+ if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC))
+ card->u.x.timer_int_enabled |= TMR_INT_ENABLED_CMD_EXEC;
+
+ if (!(status->imask & INTR_ON_TIMER))
+ status->imask |= INTR_ON_TIMER;
+ }
+
+ if ((chan->common.usedby == API) &&
+ atomic_read(&chan->common.disconnect)){
+
+ if (chan->common.state == WAN_DISCONNECTED){
+ atomic_set(&chan->common.disconnect,0);
+ return;
+ }
+
+ atomic_set(&chan->common.command,X25_CLEAR_CALL);
+ if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC))
+ card->u.x.timer_int_enabled |= TMR_INT_ENABLED_CMD_EXEC;
+
+ if (!(status->imask & INTR_ON_TIMER))
+ status->imask |= INTR_ON_TIMER;
+ }
+ }
+}
+
+static void timer_intr_exec(sdla_t *card, unsigned char TYPE)
+{
+ TX25Status* status = card->flags;
+ card->u.x.timer_int_enabled |= TYPE;
+ if (!(status->imask & INTR_ON_TIMER))
+ status->imask |= INTR_ON_TIMER;
+}
+
+
+/*====================================================================
+ * SDLA Firmware-Specific Functions
+ *
+ * Almost all X.25 commands can unexpetedly fail due to so called 'X.25
+ * asynchronous events' such as restart, interrupt, incoming call request,
+ * call clear request, etc. They can't be ignored and have to be delt with
+ * immediately. To tackle with this problem we execute each interface
+ * command in a loop until good return code is received or maximum number
+ * of retries is reached. Each interface command returns non-zero return
+ * code, an asynchronous event/error handler x25_error() is called.
+ *====================================================================*/
+
+/*====================================================================
+ * Read X.25 firmware version.
+ * Put code version as ASCII string in str.
+ *===================================================================*/
+
+static int x25_get_version (sdla_t* card, char* str)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_READ_CODE_VERSION;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- &&
+ x25_error(card, err, X25_READ_CODE_VERSION, 0));
+
+ if (!err && str)
+ {
+ int len = mbox->cmd.length;
+
+ memcpy(str, mbox->data, len);
+ str[len] = '\0';
+ }
+ return err;
+}
+
+/*====================================================================
+ * Configure adapter.
+ *===================================================================*/
+
+static int x25_configure (sdla_t* card, TX25Config* conf)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do{
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ memcpy(mbox->data, (void*)conf, sizeof(TX25Config));
+ mbox->cmd.length = sizeof(TX25Config);
+ mbox->cmd.command = X25_SET_CONFIGURATION;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_SET_CONFIGURATION, 0));
+ return err;
+}
+
+/*====================================================================
+ * Configure adapter for HDLC only.
+ *===================================================================*/
+
+static int hdlc_configure (sdla_t* card, TX25Config* conf)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do{
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ memcpy(mbox->data, (void*)conf, sizeof(TX25Config));
+ mbox->cmd.length = sizeof(TX25Config);
+ mbox->cmd.command = X25_HDLC_SET_CONFIG;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_SET_CONFIGURATION, 0));
+
+ return err;
+}
+
+static int set_hdlc_level (sdla_t* card)
+{
+
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do{
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = SET_PROTOCOL_LEVEL;
+ mbox->cmd.length = 1;
+ mbox->data[0] = HDLC_LEVEL; //| DO_HDLC_LEVEL_ERROR_CHECKING;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, SET_PROTOCOL_LEVEL, 0));
+
+ return err;
+}
+
+
+
+/*====================================================================
+ * Get communications error statistics.
+ *====================================================================*/
+
+static int x25_get_err_stats (sdla_t* card)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_HDLC_READ_COMM_ERR;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_HDLC_READ_COMM_ERR, 0));
+
+ if (!err)
+ {
+ THdlcCommErr* stats = (void*)mbox->data;
+
+ card->wandev.stats.rx_over_errors = stats->rxOverrun;
+ card->wandev.stats.rx_crc_errors = stats->rxBadCrc;
+ card->wandev.stats.rx_missed_errors = stats->rxAborted;
+ card->wandev.stats.tx_aborted_errors = stats->txAborted;
+ }
+ return err;
+}
+
+/*====================================================================
+ * Get protocol statistics.
+ *===================================================================*/
+
+static int x25_get_stats (sdla_t* card)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_READ_STATISTICS;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_READ_STATISTICS, 0)) ;
+
+ if (!err)
+ {
+ TX25Stats* stats = (void*)mbox->data;
+
+ card->wandev.stats.rx_packets = stats->rxData;
+ card->wandev.stats.tx_packets = stats->txData;
+ }
+ return err;
+}
+
+/*====================================================================
+ * Close HDLC link.
+ *===================================================================*/
+
+static int x25_close_hdlc (sdla_t* card)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_HDLC_LINK_CLOSE;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_CLOSE, 0));
+
+ return err;
+}
+
+
+/*====================================================================
+ * Open HDLC link.
+ *===================================================================*/
+
+static int x25_open_hdlc (sdla_t* card)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_HDLC_LINK_OPEN;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_OPEN, 0));
+
+ return err;
+}
+
+/*=====================================================================
+ * Setup HDLC link.
+ *====================================================================*/
+static int x25_setup_hdlc (sdla_t* card)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_HDLC_LINK_SETUP;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_SETUP, 0));
+
+ return err;
+}
+
+/*====================================================================
+ * Set (raise/drop) DTR.
+ *===================================================================*/
+
+static int x25_set_dtr (sdla_t* card, int dtr)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->data[0] = 0;
+ mbox->data[2] = 0;
+ mbox->data[1] = dtr ? 0x02 : 0x01;
+ mbox->cmd.length = 3;
+ mbox->cmd.command = X25_SET_GLOBAL_VARS;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_SET_GLOBAL_VARS, 0));
+
+ return err;
+}
+
+/*====================================================================
+ * Set interrupt mode.
+ *===================================================================*/
+
+static int x25_set_intr_mode (sdla_t* card, int mode)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->data[0] = mode;
+ if (card->hw.fwid == SFID_X25_508){
+ mbox->data[1] = card->hw.irq;
+ mbox->data[2] = 2;
+ mbox->cmd.length = 3;
+ }else {
+ mbox->cmd.length = 1;
+ }
+ mbox->cmd.command = X25_SET_INTERRUPT_MODE;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_SET_INTERRUPT_MODE, 0));
+
+ return err;
+}
+
+/*====================================================================
+ * Read X.25 channel configuration.
+ *===================================================================*/
+
+static int x25_get_chan_conf (sdla_t* card, x25_channel_t* chan)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int lcn = chan->common.lcn;
+ int err;
+
+ do{
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.lcn = lcn;
+ mbox->cmd.command = X25_READ_CHANNEL_CONFIG;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_READ_CHANNEL_CONFIG, lcn));
+
+ if (!err)
+ {
+ TX25Status* status = card->flags;
+
+ /* calculate an offset into the array of status bytes */
+ if (card->u.x.hi_svc <= X25_MAX_CHAN){
+
+ chan->ch_idx = lcn - 1;
+
+ }else{
+ int offset;
+
+ /* FIX: Apr 14 2000 : Nenad Corbic
+ * The data field was being compared to 0x1F using
+ * '&&' instead of '&'.
+ * This caused X25API to fail for LCNs greater than 255.
+ */
+ switch (mbox->data[0] & 0x1F)
+ {
+ case 0x01:
+ offset = status->pvc_map; break;
+ case 0x03:
+ offset = status->icc_map; break;
+ case 0x07:
+ offset = status->twc_map; break;
+ case 0x0B:
+ offset = status->ogc_map; break;
+ default:
+ offset = 0;
+ }
+ chan->ch_idx = lcn - 1 - offset;
+ }
+
+ /* get actual transmit packet size on this channel */
+ switch(mbox->data[1] & 0x38)
+ {
+ case 0x00:
+ chan->tx_pkt_size = 16;
+ break;
+ case 0x08:
+ chan->tx_pkt_size = 32;
+ break;
+ case 0x10:
+ chan->tx_pkt_size = 64;
+ break;
+ case 0x18:
+ chan->tx_pkt_size = 128;
+ break;
+ case 0x20:
+ chan->tx_pkt_size = 256;
+ break;
+ case 0x28:
+ chan->tx_pkt_size = 512;
+ break;
+ case 0x30:
+ chan->tx_pkt_size = 1024;
+ break;
+ }
+ if (card->u.x.logging)
+ printk(KERN_INFO "%s: X.25 packet size on LCN %d is %d.\n",
+ card->devname, lcn, chan->tx_pkt_size);
+ }
+ return err;
+}
+
+/*====================================================================
+ * Place X.25 call.
+ *====================================================================*/
+
+static int x25_place_call (sdla_t* card, x25_channel_t* chan)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+ char str[64];
+
+
+ if (chan->protocol == htons(ETH_P_IP)){
+ sprintf(str, "-d%s -uCC", chan->addr);
+
+ }else if (chan->protocol == htons(ETH_P_IPX)){
+ sprintf(str, "-d%s -u800000008137", chan->addr);
+
+ }
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ strcpy(mbox->data, str);
+ mbox->cmd.length = strlen(str);
+ mbox->cmd.command = X25_PLACE_CALL;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_PLACE_CALL, 0));
+
+ if (!err){
+ bind_lcn_to_dev (card, chan->dev, mbox->cmd.lcn);
+ }
+ return err;
+}
+
+/*====================================================================
+ * Accept X.25 call.
+ *====================================================================*/
+
+static int x25_accept_call (sdla_t* card, int lcn, int qdm)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.lcn = lcn;
+ mbox->cmd.qdm = qdm;
+ mbox->cmd.command = X25_ACCEPT_CALL;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_ACCEPT_CALL, lcn));
+
+ return err;
+}
+
+/*====================================================================
+ * Clear X.25 call.
+ *====================================================================*/
+
+static int x25_clear_call (sdla_t* card, int lcn, int cause, int diagn)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.lcn = lcn;
+ mbox->cmd.cause = cause;
+ mbox->cmd.diagn = diagn;
+ mbox->cmd.command = X25_CLEAR_CALL;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, X25_CLEAR_CALL, lcn));
+
+ return err;
+}
+
+/*====================================================================
+ * Send X.25 data packet.
+ *====================================================================*/
+
+static int x25_send (sdla_t* card, int lcn, int qdm, int len, void* buf)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = MAX_CMD_RETRY;
+ int err;
+ unsigned char cmd;
+
+ if (card->u.x.LAPB_hdlc)
+ cmd = X25_HDLC_WRITE;
+ else
+ cmd = X25_WRITE;
+
+ do
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ memcpy(mbox->data, buf, len);
+ mbox->cmd.length = len;
+ mbox->cmd.lcn = lcn;
+
+ if (card->u.x.LAPB_hdlc){
+ mbox->cmd.pf = qdm;
+ }else{
+ mbox->cmd.qdm = qdm;
+ }
+
+ mbox->cmd.command = cmd;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && retry-- && x25_error(card, err, cmd , lcn));
+
+
+ /* If buffers are busy the return code for LAPB HDLC is
+ * 1. The above functions are looking for return code
+ * of X25RES_NOT_READY if busy. */
+
+ if (card->u.x.LAPB_hdlc && err == 1){
+ err = X25RES_NOT_READY;
+ }
+
+ return err;
+}
+
+/*====================================================================
+ * Fetch X.25 asynchronous events.
+ *===================================================================*/
+
+static int x25_fetch_events (sdla_t* card)
+{
+ TX25Status* status = card->flags;
+ TX25Mbox* mbox = card->mbox;
+ int err = 0;
+
+ if (status->gflags & 0x20)
+ {
+ memset(&mbox->cmd, 0, sizeof(TX25Cmd));
+ mbox->cmd.command = X25_IS_DATA_AVAILABLE;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err) x25_error(card, err, X25_IS_DATA_AVAILABLE, 0);
+ }
+ return err;
+}
+
+/*====================================================================
+ * X.25 asynchronous event/error handler.
+ * This routine is called each time interface command returns
+ * non-zero return code to handle X.25 asynchronous events and
+ * common errors. Return non-zero to repeat command or zero to
+ * cancel it.
+ *
+ * Notes:
+ * 1. This function may be called recursively, as handling some of the
+ * asynchronous events (e.g. call request) requires execution of the
+ * interface command(s) that, in turn, may also return asynchronous
+ * events. To avoid re-entrancy problems we copy mailbox to dynamically
+ * allocated memory before processing events.
+ *====================================================================*/
+
+static int x25_error (sdla_t* card, int err, int cmd, int lcn)
+{
+ int retry = 1;
+ unsigned dlen = ((TX25Mbox*)card->mbox)->cmd.length;
+ TX25Mbox* mb;
+
+ mb = kmalloc(sizeof(TX25Mbox) + dlen, GFP_ATOMIC);
+ if (mb == NULL)
+ {
+ printk(KERN_ERR "%s: x25_error() out of memory!\n",
+ card->devname);
+ return 0;
+ }
+ memcpy(mb, card->mbox, sizeof(TX25Mbox) + dlen);
+ switch (err){
+
+ case X25RES_ASYNC_PACKET: /* X.25 asynchronous packet was received */
+
+ mb->data[dlen] = '\0';
+
+ switch (mb->cmd.pktType & 0x7F){
+
+ case ASE_CALL_RQST: /* incoming call */
+ retry = incoming_call(card, cmd, lcn, mb);
+ break;
+
+ case ASE_CALL_ACCEPTED: /* connected */
+ retry = call_accepted(card, cmd, lcn, mb);
+ break;
+
+ case ASE_CLEAR_RQST: /* call clear request */
+ retry = call_cleared(card, cmd, lcn, mb);
+ break;
+
+ case ASE_RESET_RQST: /* reset request */
+ printk(KERN_INFO "%s: X.25 reset request on LCN %d! "
+ "Cause:0x%02X Diagn:0x%02X\n",
+ card->devname, mb->cmd.lcn, mb->cmd.cause,
+ mb->cmd.diagn);
+ api_oob_event (card,mb);
+ break;
+
+ case ASE_RESTART_RQST: /* restart request */
+ retry = restart_event(card, cmd, lcn, mb);
+ break;
+
+ case ASE_CLEAR_CONFRM:
+ if (clear_confirm_event (card,mb))
+ break;
+
+ /* I use the goto statement here so if
+ * somebody inserts code between the
+ * case and default, we will not have
+ * ghost problems */
+
+ goto dflt_1;
+
+ default:
+dflt_1:
+ printk(KERN_INFO "%s: X.25 event 0x%02X on LCN %d! "
+ "Cause:0x%02X Diagn:0x%02X\n",
+ card->devname, mb->cmd.pktType,
+ mb->cmd.lcn, mb->cmd.cause, mb->cmd.diagn);
+ }
+ break;
+
+ case X25RES_PROTO_VIOLATION: /* X.25 protocol violation indication */
+
+ /* Bug Fix: Mar 14 2000
+ * The Protocol violation error conditions were
+ * not handled previously */
+
+ switch (mb->cmd.pktType & 0x7F){
+
+ case PVE_CLEAR_RQST: /* Clear request */
+ retry = call_cleared(card, cmd, lcn, mb);
+ break;
+
+ case PVE_RESET_RQST: /* Reset request */
+ printk(KERN_INFO "%s: X.25 reset request on LCN %d! "
+ "Cause:0x%02X Diagn:0x%02X\n",
+ card->devname, mb->cmd.lcn, mb->cmd.cause,
+ mb->cmd.diagn);
+ api_oob_event (card,mb);
+ break;
+
+ case PVE_RESTART_RQST: /* Restart request */
+ retry = restart_event(card, cmd, lcn, mb);
+ break;
+
+ default :
+ printk(KERN_INFO
+ "%s: X.25 protocol violation on LCN %d! "
+ "Packet:0x%02X Cause:0x%02X Diagn:0x%02X\n",
+ card->devname, mb->cmd.lcn,
+ mb->cmd.pktType & 0x7F, mb->cmd.cause, mb->cmd.diagn);
+ api_oob_event(card,mb);
+ }
+ break;
+
+ case 0x42: /* X.25 timeout */
+ retry = timeout_event(card, cmd, lcn, mb);
+ break;
+
+ case 0x43: /* X.25 retry limit exceeded */
+ printk(KERN_INFO
+ "%s: exceeded X.25 retry limit on LCN %d! "
+ "Packet:0x%02X Diagn:0x%02X\n", card->devname,
+ mb->cmd.lcn, mb->cmd.pktType, mb->cmd.diagn)
+ ;
+ break;
+
+ case 0x08: /* modem failure */
+#ifndef MODEM_NOT_LOG
+ printk(KERN_INFO "%s: modem failure!\n", card->devname);
+#endif /* MODEM_NOT_LOG */
+ api_oob_event(card,mb);
+ break;
+
+ case 0x09: /* N2 retry limit */
+ printk(KERN_INFO "%s: exceeded HDLC retry limit!\n",
+ card->devname);
+ api_oob_event(card,mb);
+ break;
+
+ case 0x06: /* unnumbered frame was received while in ABM */
+ printk(KERN_INFO "%s: received Unnumbered frame 0x%02X!\n",
+ card->devname, mb->data[0]);
+ api_oob_event(card,mb);
+ break;
+
+ case CMD_TIMEOUT:
+ printk(KERN_ERR "%s: command 0x%02X timed out!\n",
+ card->devname, cmd)
+ ;
+ retry = 0; /* abort command */
+ break;
+
+ case X25RES_NOT_READY:
+ retry = 1;
+ break;
+
+ case 0x01:
+ if (card->u.x.LAPB_hdlc)
+ break;
+
+ if (mb->cmd.command == 0x16)
+ break;
+ /* I use the goto statement here so if
+ * somebody inserts code between the
+ * case and default, we will not have
+ * ghost problems */
+ goto dflt_2;
+
+ default:
+dflt_2:
+ printk(KERN_INFO "%s: command 0x%02X returned 0x%02X! Lcn %i\n",
+ card->devname, cmd, err, mb->cmd.lcn)
+ ;
+ retry = 0; /* abort command */
+ }
+ kfree(mb);
+ return retry;
+}
+
+/*====================================================================
+ * X.25 Asynchronous Event Handlers
+ * These functions are called by the x25_error() and should return 0, if
+ * the command resulting in the asynchronous event must be aborted.
+ *====================================================================*/
+
+
+
+/*====================================================================
+ *Handle X.25 incoming call request.
+ * RFC 1356 establishes the following rules:
+ * 1. The first octet in the Call User Data (CUD) field of the call
+ * request packet contains NLPID identifying protocol encapsulation
+ * 2. Calls MUST NOT be accepted unless router supports requested
+ * protocol encapsulation.
+ * 3. A diagnostic code 249 defined by ISO/IEC 8208 may be used
+ * when clearing a call because protocol encapsulation is not
+ * supported.
+ * 4. If an incoming call is received while a call request is
+ * pending (i.e. call collision has occurred), the incoming call
+ * shall be rejected and call request shall be retried.
+ *====================================================================*/
+
+static int incoming_call (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
+{
+ struct wan_device* wandev = &card->wandev;
+ int new_lcn = mb->cmd.lcn;
+ struct net_device* dev = get_dev_by_lcn(wandev, new_lcn);
+ x25_channel_t* chan = NULL;
+ int accept = 0; /* set to '1' if o.k. to accept call */
+ unsigned int user_data;
+ x25_call_info_t* info;
+
+ /* Make sure there is no call collision */
+ if (dev != NULL)
+ {
+ printk(KERN_INFO
+ "%s: X.25 incoming call collision on LCN %d!\n",
+ card->devname, new_lcn);
+
+ x25_clear_call(card, new_lcn, 0, 0);
+ return 1;
+ }
+
+ /* Make sure D bit is not set in call request */
+//FIXME: THIS IS NOT TURE !!!! TAKE IT OUT
+// if (mb->cmd.qdm & 0x02)
+// {
+// printk(KERN_INFO
+// "%s: X.25 incoming call on LCN %d with D-bit set!\n",
+// card->devname, new_lcn);
+//
+// x25_clear_call(card, new_lcn, 0, 0);
+// return 1;
+// }
+
+ /* Parse call request data */
+ info = kmalloc(sizeof(x25_call_info_t), GFP_ATOMIC);
+ if (info == NULL)
+ {
+ printk(KERN_ERR
+ "%s: not enough memory to parse X.25 incoming call "
+ "on LCN %d!\n", card->devname, new_lcn);
+ x25_clear_call(card, new_lcn, 0, 0);
+ return 1;
+ }
+
+ parse_call_info(mb->data, info);
+
+ if (card->u.x.logging)
+ printk(KERN_INFO "\n%s: X.25 incoming call on LCN %d!\n",
+ card->devname, new_lcn);
+
+ /* Conver the first two ASCII characters into an
+ * interger. Used to check the incoming protocol
+ */
+ user_data = hex_to_uint(info->user,2);
+
+ /* Find available channel */
+ for (dev = wandev->dev; dev; dev = *((struct net_device **)dev->priv)) {
+ chan = dev->priv;
+
+ if (chan->common.usedby == API)
+ continue;
+
+ if (!chan->common.svc || (chan->common.state != WAN_DISCONNECTED))
+ continue;
+
+ if (user_data == NLPID_IP && chan->protocol != htons(ETH_P_IP)){
+ printk(KERN_INFO "IP packet but configured for IPX : %x, %x\n",
+ htons(chan->protocol), info->user[0]);
+ continue;
+ }
+
+ if (user_data == NLPID_SNAP && chan->protocol != htons(ETH_P_IPX)){
+ printk(KERN_INFO "IPX packet but configured for IP: %x\n",
+ htons(chan->protocol));
+ continue;
+ }
+ if (strcmp(info->src, chan->addr) == 0)
+ break;
+
+ /* If just an '@' is specified, accept all incoming calls */
+ if (strcmp(chan->addr, "") == 0)
+ break;
+ }
+
+ if (dev == NULL){
+
+ /* If the call is not for any WANPIPE interfaces
+ * check to see if there is an API listening queue
+ * waiting for data. If there is send the packet
+ * up the stack.
+ */
+ if (card->sk != NULL && card->func != NULL){
+ if (api_incoming_call(card,mb,new_lcn)){
+ x25_clear_call(card, new_lcn, 0, 0);
+ }
+ accept = 0;
+ }else{
+ printk(KERN_INFO "%s: no channels available!\n",
+ card->devname);
+
+ x25_clear_call(card, new_lcn, 0, 0);
+ }
+
+ }else if (info->nuser == 0){
+
+ printk(KERN_INFO
+ "%s: no user data in incoming call on LCN %d!\n",
+ card->devname, new_lcn)
+ ;
+ x25_clear_call(card, new_lcn, 0, 0);
+
+ }else switch (info->user[0]){
+
+ case 0: /* multiplexed */
+ chan->protocol = htons(0);
+ accept = 1;
+ break;
+
+ case NLPID_IP: /* IP datagrams */
+ accept = 1;
+ break;
+
+ case NLPID_SNAP: /* IPX datagrams */
+ accept = 1;
+ break;
+
+ default:
+ printk(KERN_INFO
+ "%s: unsupported NLPID 0x%02X in incoming call "
+ "on LCN %d!\n", card->devname, info->user[0], new_lcn);
+ x25_clear_call(card, new_lcn, 0, 249);
+ }
+
+ if (accept && (x25_accept_call(card, new_lcn, 0) == CMD_OK)){
+
+ bind_lcn_to_dev (card, chan->dev, new_lcn);
+
+ if (x25_get_chan_conf(card, chan) == CMD_OK)
+ set_chan_state(dev, WAN_CONNECTED);
+ else
+ x25_clear_call(card, new_lcn, 0, 0);
+ }
+ kfree(info);
+ return 1;
+}
+
+/*====================================================================
+ * Handle accepted call.
+ *====================================================================*/
+
+static int call_accepted (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
+{
+ unsigned new_lcn = mb->cmd.lcn;
+ struct net_device* dev = find_channel(card, new_lcn);
+ x25_channel_t* chan;
+
+ if (dev == NULL){
+ printk(KERN_INFO
+ "%s: clearing orphaned connection on LCN %d!\n",
+ card->devname, new_lcn);
+ x25_clear_call(card, new_lcn, 0, 0);
+ return 1;
+ }
+
+ if (card->u.x.logging)
+ printk(KERN_INFO "%s: X.25 call accepted on Dev %s and LCN %d!\n",
+ card->devname, dev->name, new_lcn);
+
+ /* Get channel configuration and notify router */
+ chan = dev->priv;
+ if (x25_get_chan_conf(card, chan) != CMD_OK)
+ {
+ x25_clear_call(card, new_lcn, 0, 0);
+ return 1;
+ }
+
+ set_chan_state(dev, WAN_CONNECTED);
+
+ if (chan->common.usedby == API){
+ send_delayed_cmd_result(card,dev,mb);
+ bind_lcn_to_dev (card, dev, new_lcn);
+ }
+
+ return 1;
+}
+
+/*====================================================================
+ * Handle cleared call.
+ *====================================================================*/
+
+static int call_cleared (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
+{
+ unsigned new_lcn = mb->cmd.lcn;
+ struct net_device* dev = find_channel(card, new_lcn);
+ x25_channel_t *chan;
+ unsigned char old_state;
+
+ if (card->u.x.logging){
+ printk(KERN_INFO "%s: X.25 clear request on LCN %d! Cause:0x%02X "
+ "Diagn:0x%02X\n",
+ card->devname, new_lcn, mb->cmd.cause, mb->cmd.diagn);
+ }
+
+ if (dev == NULL){
+ printk(KERN_INFO "%s: X.25 clear request : No device for clear\n",
+ card->devname);
+ return 1;
+ }
+
+ chan=dev->priv;
+
+ old_state = chan->common.state;
+
+ set_chan_state(dev, WAN_DISCONNECTED);
+
+ if (chan->common.usedby == API){
+
+ switch (old_state){
+
+ case WAN_CONNECTING:
+ send_delayed_cmd_result(card,dev,mb);
+ break;
+ case WAN_CONNECTED:
+ send_oob_msg(card,dev,mb);
+ break;
+ }
+ }
+
+ return ((cmd == X25_WRITE) && (lcn == new_lcn)) ? 0 : 1;
+}
+
+/*====================================================================
+ * Handle X.25 restart event.
+ *====================================================================*/
+
+static int restart_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
+{
+ struct wan_device* wandev = &card->wandev;
+ struct net_device* dev;
+ x25_channel_t *chan;
+ unsigned char old_state;
+
+ printk(KERN_INFO
+ "%s: X.25 restart request! Cause:0x%02X Diagn:0x%02X\n",
+ card->devname, mb->cmd.cause, mb->cmd.diagn);
+
+ /* down all logical channels */
+ for (dev = wandev->dev; dev; dev = *((struct net_device **)dev->priv)) {
+ chan=dev->priv;
+ old_state = chan->common.state;
+
+ set_chan_state(dev, WAN_DISCONNECTED);
+
+ if (chan->common.usedby == API){
+ switch (old_state){
+
+ case WAN_CONNECTING:
+ send_delayed_cmd_result(card,dev,mb);
+ break;
+ case WAN_CONNECTED:
+ send_oob_msg(card,dev,mb);
+ break;
+ }
+ }
+ }
+ return (cmd == X25_WRITE) ? 0 : 1;
+}
+
+/*====================================================================
+ * Handle timeout event.
+ *====================================================================*/
+
+static int timeout_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
+{
+ unsigned new_lcn = mb->cmd.lcn;
+
+ if (mb->cmd.pktType == 0x05) /* call request time out */
+ {
+ struct net_device* dev = find_channel(card,new_lcn);
+
+ printk(KERN_INFO "%s: X.25 call timed timeout on LCN %d!\n",
+ card->devname, new_lcn);
+
+ if (dev){
+ x25_channel_t *chan = dev->priv;
+ set_chan_state(dev, WAN_DISCONNECTED);
+
+ if (chan->common.usedby == API){
+ send_delayed_cmd_result(card,dev,card->mbox);
+ }
+ }
+ }else{
+ printk(KERN_INFO "%s: X.25 packet 0x%02X timeout on LCN %d!\n",
+ card->devname, mb->cmd.pktType, new_lcn);
+ }
+ return 1;
+}
+
+/*
+ * Miscellaneous
+ */
+
+/*====================================================================
+ * Establish physical connection.
+ * o open HDLC and raise DTR
+ *
+ * Return: 0 connection established
+ * 1 connection is in progress
+ * <0 error
+ *===================================================================*/
+
+static int connect (sdla_t* card)
+{
+ TX25Status* status = card->flags;
+
+ if (x25_open_hdlc(card) || x25_setup_hdlc(card))
+ return -EIO;
+
+ wanpipe_set_state(card, WAN_CONNECTING);
+
+ x25_set_intr_mode(card, INTR_ON_TIMER);
+ status->imask &= ~INTR_ON_TIMER;
+
+ return 1;
+}
+
+/*
+ * Tear down physical connection.
+ * o close HDLC link
+ * o drop DTR
+ *
+ * Return: 0
+ * <0 error
+ */
+
+static int disconnect (sdla_t* card)
+{
+ wanpipe_set_state(card, WAN_DISCONNECTED);
+ x25_set_intr_mode(card, INTR_ON_TIMER); /* disable all interrupt except timer */
+ x25_close_hdlc(card); /* close HDLC link */
+ x25_set_dtr(card, 0); /* drop DTR */
+ return 0;
+}
+
+/*
+ * Find network device by its channel number.
+ */
+
+static struct net_device* get_dev_by_lcn(struct wan_device* wandev,
+ unsigned lcn)
+{
+ struct net_device* dev;
+
+ for (dev = wandev->dev; dev; dev = *((struct net_device **)dev->priv))
+ if (((x25_channel_t*)dev->priv)->common.lcn == lcn)
+ break;
+ return dev;
+}
+
+/*
+ * Initiate connection on the logical channel.
+ * o for PVC we just get channel configuration
+ * o for SVCs place an X.25 call
+ *
+ * Return: 0 connected
+ * >0 connection in progress
+ * <0 failure
+ */
+
+static int chan_connect(struct net_device* dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+
+ if (chan->common.svc && chan->common.usedby == WANPIPE){
+ if (!chan->addr[0]){
+ printk(KERN_INFO "%s: No Destination Address\n",
+ card->devname);
+ return -EINVAL; /* no destination address */
+ }
+ printk(KERN_INFO "%s: placing X.25 call to %s ...\n",
+ card->devname, chan->addr);
+
+ if (x25_place_call(card, chan) != CMD_OK)
+ return -EIO;
+
+ set_chan_state(dev, WAN_CONNECTING);
+ return 1;
+ }else{
+ if (x25_get_chan_conf(card, chan) != CMD_OK)
+ return -EIO;
+
+ set_chan_state(dev, WAN_CONNECTED);
+ }
+ return 0;
+}
+
+/*
+ * Disconnect logical channel.
+ * o if SVC then clear X.25 call
+ */
+
+static int chan_disc(struct net_device* dev)
+{
+ x25_channel_t* chan = dev->priv;
+
+ if (chan->common.svc){
+ x25_clear_call(chan->card, chan->common.lcn, 0, 0);
+
+ /* For API we disconnect on clear
+ * confirmation.
+ */
+ if (chan->common.usedby == API)
+ return 0;
+ }
+
+ set_chan_state(dev, WAN_DISCONNECTED);
+
+ return 0;
+}
+
+/*
+ * Set logical channel state.
+ */
+
+static void set_chan_state(struct net_device* dev, int state)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if (chan->common.state != state)
+ {
+ switch (state)
+ {
+ case WAN_CONNECTED:
+ if (card->u.x.logging){
+ printk (KERN_INFO
+ "%s: interface %s connected, lcn %i !\n",
+ card->devname, dev->name,chan->common.lcn);
+ }
+ *(unsigned short*)dev->dev_addr = htons(chan->common.lcn);
+ chan->i_timeout_sofar = jiffies;
+
+ /* LAPB is PVC Based */
+ if (card->u.x.LAPB_hdlc)
+ chan->common.svc=0;
+ break;
+
+ case WAN_CONNECTING:
+ if (card->u.x.logging){
+ printk (KERN_INFO
+ "%s: interface %s connecting, lcn %i ...\n",
+ card->devname, dev->name, chan->common.lcn);
+ }
+ break;
+
+ case WAN_DISCONNECTED:
+ if (card->u.x.logging){
+ printk (KERN_INFO
+ "%s: interface %s disconnected, lcn %i !\n",
+ card->devname, dev->name,chan->common.lcn);
+ }
+ atomic_set(&chan->common.disconnect,0);
+
+ if (chan->common.svc) {
+ *(unsigned short*)dev->dev_addr = 0;
+ card->u.x.svc_to_dev_map[(chan->common.lcn%X25_MAX_CHAN)]=NULL;
+ chan->common.lcn = 0;
+ }
+
+ if (chan->transmit_length){
+ chan->transmit_length=0;
+ atomic_set(&chan->common.driver_busy,0);
+ chan->tx_offset=0;
+ if (netif_queue_stopped(dev)){
+ netif_wake_queue(dev);
+ }
+ }
+ atomic_set(&chan->common.command,0);
+ break;
+
+ case WAN_DISCONNECTING:
+ if (card->u.x.logging){
+ printk (KERN_INFO
+ "\n%s: interface %s disconnecting, lcn %i ...\n",
+ card->devname, dev->name,chan->common.lcn);
+ }
+ atomic_set(&chan->common.disconnect,0);
+ break;
+ }
+ chan->common.state = state;
+ }
+ chan->state_tick = jiffies;
+ restore_flags(flags);
+}
+
+/*
+ * Send packet on a logical channel.
+ * When this function is called, tx_skb field of the channel data
+ * space points to the transmit socket buffer. When transmission
+ * is complete, release socket buffer and reset 'tbusy' flag.
+ *
+ * Return: 0 - transmission complete
+ * 1 - busy
+ *
+ * Notes:
+ * 1. If packet length is greater than MTU for this channel, we'll fragment
+ * the packet into 'complete sequence' using M-bit.
+ * 2. When transmission is complete, an event notification should be issued
+ * to the router.
+ */
+
+static int chan_send(struct net_device* dev, void* buff, unsigned data_len,
+ unsigned char tx_intr)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ TX25Status* status = card->flags;
+ unsigned len=0, qdm=0, res=0, orig_len = 0;
+ void *data;
+
+ /* Check to see if channel is ready */
+ if ((!(status->cflags[chan->ch_idx] & 0x40) && !card->u.x.LAPB_hdlc) ||
+ !(*card->u.x.hdlc_buf_status & 0x40)){
+
+ if (!tx_intr){
+ setup_for_delayed_transmit (dev, buff, data_len);
+ return 0;
+ }else{
+ /* By returning 0 to tx_intr the packet will be dropped */
+ ++card->wandev.stats.tx_dropped;
+ ++chan->ifstats.tx_dropped;
+ printk(KERN_INFO "%s: ERROR, Tx intr could not send, dropping %s:\n",
+ card->devname,dev->name);
+ ++chan->if_send_stat.if_send_bfr_not_passed_to_adptr;
+ return 0;
+ }
+ }
+
+ if (chan->common.usedby == API){
+ /* Remove the API Header */
+ x25api_hdr_t *api_data = (x25api_hdr_t *)buff;
+
+ /* Set the qdm bits from the packet header
+ * User has the option to set the qdm bits
+ */
+ qdm = api_data->qdm;
+
+ orig_len = len = data_len - sizeof(x25api_hdr_t);
+ data = (unsigned char*)buff + sizeof(x25api_hdr_t);
+ }else{
+ data = buff;
+ orig_len = len = data_len;
+ }
+
+ if (tx_intr){
+ /* We are in tx_intr, minus the tx_offset from
+ * the total length. The tx_offset part of the
+ * data has already been sent. Also, move the
+ * data pointer to proper offset location.
+ */
+ len -= chan->tx_offset;
+ data = (unsigned char*)data + chan->tx_offset;
+ }
+
+ /* Check if the packet length is greater than MTU
+ * If YES: Cut the len to MTU and set the M bit
+ */
+ if (len > chan->tx_pkt_size && !card->u.x.LAPB_hdlc){
+ len = chan->tx_pkt_size;
+ qdm |= M_BIT;
+ }
+
+
+ /* Pass only first three bits of the qdm byte to the send
+ * routine. In case user sets any other bit which might
+ * cause errors.
+ */
+
+ switch(x25_send(card, chan->common.lcn, (qdm&0x07), len, data)){
+ case 0x00: /* success */
+ chan->i_timeout_sofar = jiffies;
+
+ dev->trans_start=jiffies;
+
+ if ((qdm & M_BIT) && !card->u.x.LAPB_hdlc){
+ if (!tx_intr){
+ /* The M bit was set, which means that part of the
+ * packet has been sent. Copy the packet into a buffer
+ * and set the offset to len, so on next tx_inter
+ * the packet will be sent using the below offset.
+ */
+ chan->tx_offset += len;
+
+ ++chan->ifstats.tx_packets;
+ chan->ifstats.tx_bytes += len;
+
+ if (chan->tx_offset < orig_len){
+ setup_for_delayed_transmit (dev, buff, data_len);
+ }
+ res=0;
+ }else{
+ /* We are already in tx_inter, thus data is already
+ * in the buffer. Update the offset and wait for
+ * next tx_intr. We add on to the offset, since data can
+ * be X number of times larger than max data size.
+ */
+ ++chan->ifstats.tx_packets;
+ chan->ifstats.tx_bytes += len;
+
+ ++chan->if_send_stat.if_send_bfr_passed_to_adptr;
+ chan->tx_offset += len;
+
+ /* The user can set the qdm bit as well.
+ * If the entire packet was sent and qdm is still
+ * set, than it's the user who has set the M bit. In that,
+ * case indicate that the packet was send by returning
+ * 0 and wait for a new packet. Otherwise, wait for next
+ * tx interrupt to send the rest of the packet */
+
+ if (chan->tx_offset < orig_len){
+ res=1;
+ }else{
+ res=0;
+ }
+ }
+ }else{
+ ++chan->ifstats.tx_packets;
+ chan->ifstats.tx_bytes += len;
+ ++chan->if_send_stat.if_send_bfr_passed_to_adptr;
+ res=0;
+ }
+ break;
+
+ case 0x33: /* Tx busy */
+ if (tx_intr){
+ printk(KERN_INFO "%s: Tx_intr: Big Error dropping packet %s\n",
+ card->devname,dev->name);
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ ++chan->if_send_stat.if_send_bfr_not_passed_to_adptr;
+ res=0;
+ }else{
+ DBG_PRINTK(KERN_INFO
+ "%s: Send: Big Error should have tx: storring %s\n",
+ card->devname,dev->name);
+ setup_for_delayed_transmit (dev, buff, data_len);
+ res=1;
+ }
+ break;
+
+ default: /* failure */
+ ++chan->ifstats.tx_errors;
+ if (tx_intr){
+ printk(KERN_INFO "%s: Tx_intr: Failure to send, dropping %s\n",
+ card->devname,dev->name);
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ ++chan->if_send_stat.if_send_bfr_not_passed_to_adptr;
+ res=0;
+ }else{
+ DBG_PRINTK(KERN_INFO "%s: Send: Failure to send !!!, storing %s\n",
+ card->devname,dev->name);
+ setup_for_delayed_transmit (dev, buff, data_len);
+ res=1;
+ }
+ break;
+ }
+ return res;
+}
+
+
+/*
+ * Parse X.25 call request data and fill x25_call_info_t structure.
+ */
+
+static void parse_call_info (unsigned char* str, x25_call_info_t* info)
+{
+ memset(info, 0, sizeof(x25_call_info_t));
+ for (; *str; ++str)
+ {
+ int i;
+ unsigned char ch;
+
+ if (*str == '-') switch (str[1]) {
+
+ /* Take minus 2 off the maximum size so that
+ * last byte is 0. This way we can use string
+ * manipulaton functions on call information.
+ */
+
+ case 'd': /* destination address */
+ for (i = 0; i < (MAX_X25_ADDR_SIZE-2); ++i){
+ ch = str[2+i];
+ if (isspace(ch)) break;
+ info->dest[i] = ch;
+ }
+ break;
+
+ case 's': /* source address */
+ for (i = 0; i < (MAX_X25_ADDR_SIZE-2); ++i){
+ ch = str[2+i];
+ if (isspace(ch)) break;
+ info->src[i] = ch;
+ }
+ break;
+
+ case 'u': /* user data */
+ for (i = 0; i < (MAX_X25_DATA_SIZE-2); ++i){
+ ch = str[2+i];
+ if (isspace(ch)) break;
+ info->user[i] = ch;
+ }
+ info->nuser = i;
+ break;
+
+ case 'f': /* facilities */
+ for (i = 0; i < (MAX_X25_FACL_SIZE-2); ++i){
+ ch = str[2+i];
+ if (isspace(ch)) break;
+ info->facil[i] = ch;
+ }
+ info->nfacil = i;
+ break;
+ }
+ }
+}
+
+/*
+ * Convert line speed in bps to a number used by S502 code.
+ */
+
+static unsigned char bps_to_speed_code (unsigned long bps)
+{
+ unsigned char number;
+
+ if (bps <= 1200) number = 0x01;
+ else if (bps <= 2400) number = 0x02;
+ else if (bps <= 4800) number = 0x03;
+ else if (bps <= 9600) number = 0x04;
+ else if (bps <= 19200) number = 0x05;
+ else if (bps <= 38400) number = 0x06;
+ else if (bps <= 45000) number = 0x07;
+ else if (bps <= 56000) number = 0x08;
+ else if (bps <= 64000) number = 0x09;
+ else if (bps <= 74000) number = 0x0A;
+ else if (bps <= 112000) number = 0x0B;
+ else if (bps <= 128000) number = 0x0C;
+ else number = 0x0D;
+
+ return number;
+}
+
+/*
+ * Convert decimal string to unsigned integer.
+ * If len != 0 then only 'len' characters of the string are converted.
+ */
+
+static unsigned int dec_to_uint (unsigned char* str, int len)
+{
+ unsigned val;
+
+ if (!len)
+ len = strlen(str);
+
+ for (val = 0; len && is_digit(*str); ++str, --len)
+ val = (val * 10) + (*str - (unsigned)'0');
+
+ return val;
+}
+
+/*
+ * Convert hex string to unsigned integer.
+ * If len != 0 then only 'len' characters of the string are conferted.
+ */
+
+static unsigned int hex_to_uint (unsigned char* str, int len)
+{
+ unsigned val, ch;
+
+ if (!len)
+ len = strlen(str);
+
+ for (val = 0; len; ++str, --len)
+ {
+ ch = *str;
+ if (is_digit(ch))
+ val = (val << 4) + (ch - (unsigned)'0');
+ else if (is_hex_digit(ch))
+ val = (val << 4) + ((ch & 0xDF) - (unsigned)'A' + 10);
+ else break;
+ }
+ return val;
+}
+
+
+static int handle_IPXWAN(unsigned char *sendpacket, char *devname, unsigned char enable_IPX, unsigned long network_number, unsigned short proto)
+{
+ int i;
+
+ if( proto == ETH_P_IPX) {
+ /* It's an IPX packet */
+ if(!enable_IPX) {
+ /* Return 1 so we don't pass it up the stack. */
+ return 1;
+ }
+ } else {
+ /* It's not IPX so pass it up the stack.*/
+ return 0;
+ }
+
+ if( sendpacket[16] == 0x90 &&
+ sendpacket[17] == 0x04)
+ {
+ /* It's IPXWAN */
+
+ if( sendpacket[2] == 0x02 &&
+ sendpacket[34] == 0x00)
+ {
+ /* It's a timer request packet */
+ printk(KERN_INFO "%s: Received IPXWAN Timer Request packet\n",devname);
+
+ /* Go through the routing options and answer no to every
+ * option except Unnumbered RIP/SAP
+ */
+ for(i = 41; sendpacket[i] == 0x00; i += 5)
+ {
+ /* 0x02 is the option for Unnumbered RIP/SAP */
+ if( sendpacket[i + 4] != 0x02)
+ {
+ sendpacket[i + 1] = 0;
+ }
+ }
+
+ /* Skip over the extended Node ID option */
+ if( sendpacket[i] == 0x04 )
+ {
+ i += 8;
+ }
+
+ /* We also want to turn off all header compression opt. */
+ for(; sendpacket[i] == 0x80 ;)
+ {
+ sendpacket[i + 1] = 0;
+ i += (sendpacket[i + 2] << 8) + (sendpacket[i + 3]) + 4;
+ }
+
+ /* Set the packet type to timer response */
+ sendpacket[34] = 0x01;
+
+ printk(KERN_INFO "%s: Sending IPXWAN Timer Response\n",devname);
+ }
+ else if( sendpacket[34] == 0x02 )
+ {
+ /* This is an information request packet */
+ printk(KERN_INFO "%s: Received IPXWAN Information Request packet\n",devname);
+
+ /* Set the packet type to information response */
+ sendpacket[34] = 0x03;
+
+ /* Set the router name */
+ sendpacket[51] = 'X';
+ sendpacket[52] = 'T';
+ sendpacket[53] = 'P';
+ sendpacket[54] = 'I';
+ sendpacket[55] = 'P';
+ sendpacket[56] = 'E';
+ sendpacket[57] = '-';
+ sendpacket[58] = CVHexToAscii(network_number >> 28);
+ sendpacket[59] = CVHexToAscii((network_number & 0x0F000000)>> 24);
+ sendpacket[60] = CVHexToAscii((network_number & 0x00F00000)>> 20);
+ sendpacket[61] = CVHexToAscii((network_number & 0x000F0000)>> 16);
+ sendpacket[62] = CVHexToAscii((network_number & 0x0000F000)>> 12);
+ sendpacket[63] = CVHexToAscii((network_number & 0x00000F00)>> 8);
+ sendpacket[64] = CVHexToAscii((network_number & 0x000000F0)>> 4);
+ sendpacket[65] = CVHexToAscii(network_number & 0x0000000F);
+ for(i = 66; i < 99; i+= 1)
+ {
+ sendpacket[i] = 0;
+ }
+
+ printk(KERN_INFO "%s: Sending IPXWAN Information Response packet\n",devname);
+ }
+ else
+ {
+ printk(KERN_INFO "%s: Unknown IPXWAN packet!\n",devname);
+ return 0;
+ }
+
+ /* Set the WNodeID to our network address */
+ sendpacket[35] = (unsigned char)(network_number >> 24);
+ sendpacket[36] = (unsigned char)((network_number & 0x00FF0000) >> 16);
+ sendpacket[37] = (unsigned char)((network_number & 0x0000FF00) >> 8);
+ sendpacket[38] = (unsigned char)(network_number & 0x000000FF);
+
+ return 1;
+ } else {
+ /*If we get here it's an IPX-data packet, so it'll get passed up the stack.
+ */
+ /* switch the network numbers */
+ switch_net_numbers(sendpacket, network_number, 1);
+ return 0;
+ }
+}
+
+/*
+ * If incoming is 0 (outgoing)- if the net numbers is ours make it 0
+ * if incoming is 1 - if the net number is 0 make it ours
+ */
+
+static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number, unsigned char incoming)
+{
+ unsigned long pnetwork_number;
+
+ pnetwork_number = (unsigned long)((sendpacket[6] << 24) +
+ (sendpacket[7] << 16) + (sendpacket[8] << 8) +
+ sendpacket[9]);
+
+
+ if (!incoming) {
+ /*If the destination network number is ours, make it 0 */
+ if( pnetwork_number == network_number) {
+ sendpacket[6] = sendpacket[7] = sendpacket[8] =
+ sendpacket[9] = 0x00;
+ }
+ } else {
+ /* If the incoming network is 0, make it ours */
+ if( pnetwork_number == 0) {
+ sendpacket[6] = (unsigned char)(network_number >> 24);
+ sendpacket[7] = (unsigned char)((network_number &
+ 0x00FF0000) >> 16);
+ sendpacket[8] = (unsigned char)((network_number &
+ 0x0000FF00) >> 8);
+ sendpacket[9] = (unsigned char)(network_number &
+ 0x000000FF);
+ }
+ }
+
+
+ pnetwork_number = (unsigned long)((sendpacket[18] << 24) +
+ (sendpacket[19] << 16) + (sendpacket[20] << 8) +
+ sendpacket[21]);
+
+
+ if( !incoming ) {
+ /* If the source network is ours, make it 0 */
+ if( pnetwork_number == network_number) {
+ sendpacket[18] = sendpacket[19] = sendpacket[20] =
+ sendpacket[21] = 0x00;
+ }
+ } else {
+ /* If the source network is 0, make it ours */
+ if( pnetwork_number == 0 ) {
+ sendpacket[18] = (unsigned char)(network_number >> 24);
+ sendpacket[19] = (unsigned char)((network_number &
+ 0x00FF0000) >> 16);
+ sendpacket[20] = (unsigned char)((network_number &
+ 0x0000FF00) >> 8);
+ sendpacket[21] = (unsigned char)(network_number &
+ 0x000000FF);
+ }
+ }
+} /* switch_net_numbers */
+
+
+
+
+/********************* X25API SPECIFIC FUNCTIONS ****************/
+
+
+/*===============================================================
+ * find_channel
+ *
+ * Manages the lcn to device map. It increases performance
+ * because it eliminates the need to search through the link
+ * list for a device which is bounded to a specific lcn.
+ *
+ *===============================================================*/
+
+
+struct net_device *find_channel(sdla_t *card, unsigned lcn)
+{
+ if (card->u.x.LAPB_hdlc){
+
+ return card->wandev.dev;
+
+ }else{
+ /* We don't know whether the incoming lcn
+ * is a PVC or an SVC channel. But we do know that
+ * the lcn cannot be for both the PVC and the SVC
+ * channel.
+
+ * If the lcn number is greater or equal to 255,
+ * take the modulo 255 of that number. We only have
+ * 255 locations, thus higher numbers must be mapped
+ * to a number between 0 and 245.
+
+ * We must separate pvc's and svc's since two don't
+ * have to be contiguous. Meaning pvc's can start
+ * from 1 to 10 and svc's can start from 256 to 266.
+ * But 256%255 is 1, i.e. CONFLICT.
+ */
+
+
+ /* Highest LCN number must be less or equal to 4096 */
+ if ((lcn <= MAX_LCN_NUM) && (lcn > 0)){
+
+ if (lcn < X25_MAX_CHAN){
+ if (card->u.x.svc_to_dev_map[lcn])
+ return card->u.x.svc_to_dev_map[lcn];
+
+ if (card->u.x.pvc_to_dev_map[lcn])
+ return card->u.x.pvc_to_dev_map[lcn];
+
+ }else{
+ int new_lcn = lcn%X25_MAX_CHAN;
+ if (card->u.x.svc_to_dev_map[new_lcn])
+ return card->u.x.svc_to_dev_map[new_lcn];
+
+ if (card->u.x.pvc_to_dev_map[new_lcn])
+ return card->u.x.pvc_to_dev_map[new_lcn];
+ }
+ }
+ return NULL;
+ }
+}
+
+void bind_lcn_to_dev(sdla_t *card, struct net_device *dev, unsigned lcn)
+{
+ x25_channel_t *chan = dev->priv;
+
+ /* Modulo the lcn number by X25_MAX_CHAN (255)
+ * because the lcn number can be greater than 255
+ *
+ * We need to split svc and pvc since they don't have
+ * to be contigous.
+ */
+
+ if (chan->common.svc){
+ card->u.x.svc_to_dev_map[(lcn % X25_MAX_CHAN)] = dev;
+ }else{
+ card->u.x.pvc_to_dev_map[(lcn % X25_MAX_CHAN)] = dev;
+ }
+ chan->common.lcn = lcn;
+}
+
+
+
+/*===============================================================
+ * x25api_bh
+ *
+ *
+ *==============================================================*/
+
+static void x25api_bh(struct net_device* dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t* card = chan->card;
+ struct sk_buff *skb;
+
+ if (atomic_read(&chan->bh_buff_used) == 0){
+ printk(KERN_INFO "%s: BH Buffer Empty in BH\n",
+ card->devname);
+ clear_bit(0, &chan->tq_working);
+ return;
+ }
+
+ while (atomic_read(&chan->bh_buff_used)){
+
+ /* If the sock is in the process of unlinking the
+ * driver from the socket, we must get out.
+ * This never happends but is a sanity check. */
+ if (test_bit(0,&chan->common.common_critical)){
+ clear_bit(0, &chan->tq_working);
+ return;
+ }
+
+ /* If LAPB HDLC, do not drop packets if socket is
+ * not connected. Let the buffer fill up and
+ * turn off rx interrupt */
+ if (card->u.x.LAPB_hdlc){
+ if (chan->common.sk == NULL || chan->common.func == NULL){
+ clear_bit(0, &chan->tq_working);
+ return;
+ }
+ }
+
+ skb = ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb;
+
+ if (skb == NULL){
+ printk(KERN_INFO "%s: BH Skb empty for read %i\n",
+ card->devname,chan->bh_read);
+ }else{
+
+ if (chan->common.sk == NULL || chan->common.func == NULL){
+ printk(KERN_INFO "%s: BH: Socket disconnected, dropping\n",
+ card->devname);
+ dev_kfree_skb_any(skb);
+ x25api_bh_cleanup(dev);
+ ++chan->ifstats.rx_dropped;
+ ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
+ continue;
+ }
+
+
+ if (chan->common.func(skb,dev,chan->common.sk) != 0){
+ /* Sock full cannot send, queue us for another
+ * try
+ */
+ printk(KERN_INFO "%s: BH: !!! Packet failed to send !!!!! \n",
+ card->devname);
+ atomic_set(&chan->common.receive_block,1);
+ return;
+ }else{
+ x25api_bh_cleanup(dev);
+ ++chan->rx_intr_stat.rx_intr_bfr_passed_to_stack;
+ }
+ }
+ }
+ clear_bit(0, &chan->tq_working);
+
+ return;
+}
+
+/*===============================================================
+ * x25api_bh_cleanup
+ *
+ *
+ *==============================================================*/
+
+static int x25api_bh_cleanup(struct net_device *dev)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+ TX25Status* status = card->flags;
+
+
+ ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb = NULL;
+
+ if (chan->bh_read == MAX_BH_BUFF){
+ chan->bh_read=0;
+ }else{
+ ++chan->bh_read;
+ }
+
+ /* If the Receive interrupt was off, it means
+ * that we filled up our circular buffer. Check
+ * that we have space in the buffer. If so
+ * turn the RX interrupt back on.
+ */
+ if (!(status->imask & INTR_ON_RX_FRAME)){
+ if (atomic_read(&chan->bh_buff_used) < (MAX_BH_BUFF+1)){
+ printk(KERN_INFO "%s: BH: Turning on the interrupt\n",
+ card->devname);
+ status->imask |= INTR_ON_RX_FRAME;
+ }
+ }
+
+ atomic_dec(&chan->bh_buff_used);
+ return 0;
+}
+
+
+/*===============================================================
+ * bh_enqueue
+ *
+ *
+ *==============================================================*/
+
+static int bh_enqueue(struct net_device *dev, struct sk_buff *skb)
+{
+ x25_channel_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+ TX25Status* status = card->flags;
+
+ if (atomic_read(&chan->bh_buff_used) == (MAX_BH_BUFF+1)){
+ printk(KERN_INFO "%s: Bottom half buffer FULL\n",
+ card->devname);
+ return 1;
+ }
+
+ ((bh_data_t *)&chan->bh_head[chan->bh_write])->skb = skb;
+
+ if (chan->bh_write == MAX_BH_BUFF){
+ chan->bh_write=0;
+ }else{
+ ++chan->bh_write;
+ }
+
+ atomic_inc(&chan->bh_buff_used);
+
+ if (atomic_read(&chan->bh_buff_used) == (MAX_BH_BUFF+1)){
+ printk(KERN_INFO "%s: Buffer is now full, Turning off RX Intr\n",
+ card->devname);
+ status->imask &= ~INTR_ON_RX_FRAME;
+ }
+
+ return 0;
+}
+
+
+/*===============================================================
+ * timer_intr_cmd_exec
+ *
+ * Called by timer interrupt to execute a command
+ *===============================================================*/
+
+static int timer_intr_cmd_exec (sdla_t* card)
+{
+ struct net_device *dev;
+ unsigned char more_to_exec=0;
+ volatile x25_channel_t *chan=NULL;
+ int i=0,bad_cmd=0,err=0;
+
+ if (card->u.x.cmd_dev == NULL){
+ card->u.x.cmd_dev = card->wandev.dev;
+ }
+
+ dev = card->u.x.cmd_dev;
+
+ for (;;){
+
+ chan = dev->priv;
+
+ if (atomic_read(&chan->common.command)){
+
+ bad_cmd = check_bad_command(card,dev);
+
+ if ((!chan->common.mbox || atomic_read(&chan->common.disconnect)) &&
+ !bad_cmd){
+
+ /* Socket has died or exited, We must bring the
+ * channel down before anybody else tries to
+ * use it */
+ err = channel_disconnect(card,dev);
+ }else{
+ err = execute_delayed_cmd(card, dev,
+ (mbox_cmd_t*)chan->common.mbox,
+ bad_cmd);
+ }
+
+ switch (err){
+
+ case RETURN_RESULT:
+
+ /* Return the result to the socket without
+ * delay. NO_WAIT Command */
+ atomic_set(&chan->common.command,0);
+ if (atomic_read(&card->u.x.command_busy))
+ atomic_set(&card->u.x.command_busy,0);
+
+ send_delayed_cmd_result(card,dev,card->mbox);
+
+ more_to_exec=0;
+ break;
+ case DELAY_RESULT:
+
+ /* Wait for the remote to respond, before
+ * sending the result up to the socket.
+ * WAIT command */
+ if (atomic_read(&card->u.x.command_busy))
+ atomic_set(&card->u.x.command_busy,0);
+
+ atomic_set(&chan->common.command,0);
+ more_to_exec=0;
+ break;
+ default:
+
+ /* If command could not be executed for
+ * some reason (i.e return code 0x33 busy)
+ * set the more_to_exec bit which will
+ * indicate that this command must be exectued
+ * again during next timer interrupt
+ */
+ more_to_exec=1;
+ if (atomic_read(&card->u.x.command_busy) == 0)
+ atomic_set(&card->u.x.command_busy,1);
+ break;
+ }
+
+ bad_cmd=0;
+
+ /* If flags is set, there are no hdlc buffers,
+ * thus, wait for the next pass and try the
+ * same command again. Otherwise, start searching
+ * from next device on the next pass.
+ */
+ if (!more_to_exec){
+ dev = move_dev_to_next(card,dev);
+ }
+ break;
+ }else{
+ /* This device has nothing to execute,
+ * go to next.
+ */
+ if (atomic_read(&card->u.x.command_busy))
+ atomic_set(&card->u.x.command_busy,0);
+ dev = move_dev_to_next(card,dev);
+ }
+
+ if (++i == card->u.x.no_dev){
+ if (!more_to_exec){
+ DBG_PRINTK(KERN_INFO "%s: Nothing to execute in Timer\n",
+ card->devname);
+ if (atomic_read(&card->u.x.command_busy)){
+ atomic_set(&card->u.x.command_busy,0);
+ }
+ }
+ break;
+ }
+
+ } //End of FOR
+
+ card->u.x.cmd_dev = dev;
+
+ if (more_to_exec){
+ /* If more commands are pending, do not turn off timer
+ * interrupt */
+ return 1;
+ }else{
+ /* No more commands, turn off timer interrupt */
+ return 0;
+ }
+}
+
+/*===============================================================
+ * execute_delayed_cmd
+ *
+ * Execute an API command which was passed down from the
+ * sock. Sock is very limited in which commands it can
+ * execute. Wait and No Wait commands are supported.
+ * Place Call, Clear Call and Reset wait commands, where
+ * Accept Call is a no_wait command.
+ *
+ *===============================================================*/
+
+static int execute_delayed_cmd(sdla_t* card, struct net_device *dev,
+ mbox_cmd_t *usr_cmd, char bad_cmd)
+{
+ TX25Mbox* mbox = card->mbox;
+ int err;
+ x25_channel_t *chan = dev->priv;
+ int delay=RETURN_RESULT;
+
+ if (!(*card->u.x.hdlc_buf_status & 0x40) && !bad_cmd){
+ return TRY_CMD_AGAIN;
+ }
+
+ /* This way a command is guaranteed to be executed for
+ * a specific lcn, the network interface is bound to. */
+ usr_cmd->cmd.lcn = chan->common.lcn;
+
+
+ /* If channel is pvc, instead of place call
+ * run x25_channel configuration. If running LAPB HDLC
+ * enable communications.
+ */
+ if ((!chan->common.svc) && (usr_cmd->cmd.command == X25_PLACE_CALL)){
+
+ if (card->u.x.LAPB_hdlc){
+ DBG_PRINTK(KERN_INFO "LAPB: Connecting\n");
+ connect(card);
+ set_chan_state(dev,WAN_CONNECTING);
+ return DELAY_RESULT;
+ }else{
+ DBG_PRINTK(KERN_INFO "%s: PVC is CONNECTING\n",card->devname);
+ if (x25_get_chan_conf(card, chan) == CMD_OK){
+ set_chan_state(dev, WAN_CONNECTED);
+ }else{
+ set_chan_state(dev, WAN_DISCONNECTED);
+ }
+ return RETURN_RESULT;
+ }
+ }
+
+ /* Copy the socket mbox command onto the board */
+
+ memcpy(&mbox->cmd, &usr_cmd->cmd, sizeof(TX25Cmd));
+ if (usr_cmd->cmd.length){
+ memcpy(mbox->data, usr_cmd->data, usr_cmd->cmd.length);
+ }
+
+ /* Check if command is bad. We need to copy the cmd into
+ * the buffer regardless since we return the, mbox to
+ * the user */
+ if (bad_cmd){
+ mbox->cmd.result=0x01;
+ return RETURN_RESULT;
+ }
+
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ if (err != CMD_OK && err != X25RES_NOT_READY)
+ x25_error(card, err, usr_cmd->cmd.command, usr_cmd->cmd.lcn);
+
+ if (mbox->cmd.result == X25RES_NOT_READY){
+ return TRY_CMD_AGAIN;
+ }
+
+ switch (mbox->cmd.command){
+
+ case X25_PLACE_CALL:
+
+ switch (mbox->cmd.result){
+
+ case CMD_OK:
+
+ /* Check if Place call is a wait command or a
+ * no wait command */
+ if (atomic_read(&chan->common.command) & 0x80)
+ delay=RETURN_RESULT;
+ else
+ delay=DELAY_RESULT;
+
+
+ DBG_PRINTK(KERN_INFO "\n%s: PLACE CALL Binding dev %s to lcn %i\n",
+ card->devname,dev->name, mbox->cmd.lcn);
+
+ bind_lcn_to_dev (card, dev, mbox->cmd.lcn);
+ set_chan_state(dev, WAN_CONNECTING);
+ break;
+
+
+ default:
+ delay=RETURN_RESULT;
+ set_chan_state(dev, WAN_DISCONNECTED);
+ break;
+ }
+ break;
+
+ case X25_ACCEPT_CALL:
+
+ switch (mbox->cmd.result){
+
+ case CMD_OK:
+
+ DBG_PRINTK(KERN_INFO "\n%s: ACCEPT Binding dev %s to lcn %i\n",
+ card->devname,dev->name,mbox->cmd.lcn);
+
+ bind_lcn_to_dev (card, dev, mbox->cmd.lcn);
+
+ if (x25_get_chan_conf(card, chan) == CMD_OK){
+
+ set_chan_state(dev, WAN_CONNECTED);
+ delay=RETURN_RESULT;
+
+ }else{
+ if (x25_clear_call(card, usr_cmd->cmd.lcn, 0, 0) == CMD_OK){
+ /* if clear is successful, wait for clear confirm
+ */
+ delay=DELAY_RESULT;
+ }else{
+ /* Do not change the state here. If we fail
+ * the accept the return code is send up
+ *the stack, which will ether retry
+ * or clear the call
+ */
+ DBG_PRINTK(KERN_INFO
+ "%s: ACCEPT: STATE MAY BE CURRUPTED 2 !!!!!\n",
+ card->devname);
+ delay=RETURN_RESULT;
+ }
+ }
+ break;
+
+
+ case X25RES_ASYNC_PACKET:
+ delay=TRY_CMD_AGAIN;
+ break;
+
+ default:
+ DBG_PRINTK(KERN_INFO "%s: ACCEPT FAILED\n",card->devname);
+ if (x25_clear_call(card, usr_cmd->cmd.lcn, 0, 0) == CMD_OK){
+ delay=DELAY_RESULT;
+ }else{
+ /* Do not change the state here. If we fail the accept. The
+ * return code is send up the stack, which will ether retry
+ * or clear the call */
+ DBG_PRINTK(KERN_INFO
+ "%s: ACCEPT: STATE MAY BE CORRUPTED 1 !!!!!\n",
+ card->devname);
+ delay=RETURN_RESULT;
+ }
+ }
+ break;
+
+ case X25_CLEAR_CALL:
+
+ switch (mbox->cmd.result){
+
+ case CMD_OK:
+ DBG_PRINTK(KERN_INFO
+ "CALL CLEAR OK: Dev %s Mbox Lcn %i Chan Lcn %i\n",
+ dev->name,mbox->cmd.lcn,chan->common.lcn);
+ set_chan_state(dev, WAN_DISCONNECTING);
+ delay = DELAY_RESULT;
+ break;
+
+ case X25RES_CHANNEL_IN_USE:
+ case X25RES_ASYNC_PACKET:
+ delay = TRY_CMD_AGAIN;
+ break;
+
+ case X25RES_LINK_NOT_IN_ABM:
+ case X25RES_INVAL_LCN:
+ case X25RES_INVAL_STATE:
+ set_chan_state(dev, WAN_DISCONNECTED);
+ delay = RETURN_RESULT;
+ break;
+
+ default:
+ /* If command did not execute because of user
+ * fault, do not change the state. This will
+ * signal the socket that clear command failed.
+ * User can retry or close the socket.
+ * When socket gets killed, it will set the
+ * chan->disconnect which will signal
+ * driver to clear the call */
+ printk(KERN_INFO "%s: Clear Command Failed, Rc %x\n",
+ card->devname,mbox->cmd.command);
+ delay = RETURN_RESULT;
+ }
+ break;
+ }
+
+ return delay;
+}
+
+/*===============================================================
+ * api_incoming_call
+ *
+ * Pass an incoming call request up the listening
+ * sock. If the API sock is not listening reject the
+ * call.
+ *
+ *===============================================================*/
+
+static int api_incoming_call (sdla_t* card, TX25Mbox *mbox, int lcn)
+{
+ struct sk_buff *skb;
+ int len = sizeof(TX25Cmd)+mbox->cmd.length;
+
+ if (alloc_and_init_skb_buf(card, &skb, len)){
+ printk(KERN_INFO "%s: API incoming call, no memory\n",card->devname);
+ return 1;
+ }
+
+ memcpy(skb_put(skb,len),&mbox->cmd,len);
+
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(X25_PROT);
+ skb->pkt_type = WAN_PACKET_ASYNC;
+
+ if (card->func(skb,card->sk) < 0){
+ printk(KERN_INFO "%s: MAJOR ERROR: Failed to send up place call \n",card->devname);
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*===============================================================
+ * send_delayed_cmd_result
+ *
+ * Wait commands like PLEACE CALL or CLEAR CALL must wait
+ * until the result arrives. This function passes
+ * the result to a waiting sock.
+ *
+ *===============================================================*/
+static void send_delayed_cmd_result(sdla_t *card, struct net_device *dev,
+ TX25Mbox* mbox)
+{
+ x25_channel_t *chan = dev->priv;
+ mbox_cmd_t *usr_cmd = (mbox_cmd_t *)chan->common.mbox;
+ struct sk_buff *skb;
+ int len=sizeof(unsigned char);
+
+ atomic_set(&chan->common.command,0);
+
+ /* If the sock is in the process of unlinking the
+ * driver from the socket, we must get out.
+ * This never happends but is a sanity check. */
+ if (test_bit(0,&chan->common.common_critical)){
+ return;
+ }
+
+ if (!usr_cmd || !chan->common.sk || !chan->common.func){
+ DBG_PRINTK(KERN_INFO "Delay result: Sock not bounded sk: %u, func: %u, mbox: %u\n",
+ (unsigned int)chan->common.sk,
+ (unsigned int)chan->common.func,
+ (unsigned int)usr_cmd);
+ return;
+ }
+
+ memcpy(&usr_cmd->cmd, &mbox->cmd, sizeof(TX25Cmd));
+ if (mbox->cmd.length > 0){
+ memcpy(usr_cmd->data, mbox->data, mbox->cmd.length);
+ }
+
+ if (alloc_and_init_skb_buf(card,&skb,len)){
+ printk(KERN_INFO "Delay result: No sock buffers\n");
+ return;
+ }
+
+ memcpy(skb_put(skb,len),&mbox->cmd.command,len);
+
+ skb->mac.raw = skb->data;
+ skb->pkt_type = WAN_PACKET_CMD;
+
+ chan->common.func(skb,dev,chan->common.sk);
+}
+
+/*===============================================================
+ * clear_confirm_event
+ *
+ * Pass the clear confirmation event up the sock. The
+ * API will disconnect only after the clear confirmation
+ * has been received.
+ *
+ * Depending on the state, clear confirmation could
+ * be an OOB event, or a result of an API command.
+ *===============================================================*/
+
+static int clear_confirm_event (sdla_t *card, TX25Mbox* mb)
+{
+ struct net_device *dev;
+ x25_channel_t *chan;
+ unsigned char old_state;
+
+ dev = find_channel(card,mb->cmd.lcn);
+ if (!dev){
+ DBG_PRINTK(KERN_INFO "%s: *** GOT CLEAR BUT NO DEV %i\n",
+ card->devname,mb->cmd.lcn);
+ return 0;
+ }
+
+ chan=dev->priv;
+ DBG_PRINTK(KERN_INFO "%s: GOT CLEAR CONFIRM %s: Mbox lcn %i Chan lcn %i\n",
+ card->devname, dev->name, mb->cmd.lcn, chan->common.lcn);
+
+ /* If not API fall through to default.
+ * If API, send the result to a waiting
+ * socket.
+ */
+
+ old_state = chan->common.state;
+ set_chan_state(dev, WAN_DISCONNECTED);
+
+ if (chan->common.usedby == API){
+ switch (old_state) {
+
+ case WAN_DISCONNECTING:
+ case WAN_CONNECTING:
+ send_delayed_cmd_result(card,dev,mb);
+ break;
+ case WAN_CONNECTED:
+ send_oob_msg(card,dev,mb);
+ break;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+/*===============================================================
+ * send_oob_msg
+ *
+ * Construct an NEM Message and pass it up the connected
+ * sock. If the sock is not bounded discard the NEM.
+ *
+ *===============================================================*/
+
+static void send_oob_msg(sdla_t *card, struct net_device *dev, TX25Mbox *mbox)
+{
+ x25_channel_t *chan = dev->priv;
+ mbox_cmd_t *usr_cmd = (mbox_cmd_t *)chan->common.mbox;
+ struct sk_buff *skb;
+ int len=sizeof(x25api_hdr_t)+mbox->cmd.length;
+ x25api_t *api_hdr;
+
+ /* If the sock is in the process of unlinking the
+ * driver from the socket, we must get out.
+ * This never happends but is a sanity check. */
+ if (test_bit(0,&chan->common.common_critical)){
+ return;
+ }
+
+ if (!usr_cmd || !chan->common.sk || !chan->common.func){
+ DBG_PRINTK(KERN_INFO "OOB MSG: Sock not bounded\n");
+ return;
+ }
+
+ memcpy(&usr_cmd->cmd, &mbox->cmd, sizeof(TX25Cmd));
+ if (mbox->cmd.length > 0){
+ memcpy(usr_cmd->data, mbox->data, mbox->cmd.length);
+ }
+
+ if (alloc_and_init_skb_buf(card,&skb,len)){
+ printk(KERN_INFO "%s: OOB MSG: No sock buffers\n",card->devname);
+ return;
+ }
+
+ api_hdr = (x25api_t*)skb_put(skb,len);
+ api_hdr->hdr.pktType = mbox->cmd.pktType & 0x7F;
+ api_hdr->hdr.qdm = mbox->cmd.qdm;
+ api_hdr->hdr.cause = mbox->cmd.cause;
+ api_hdr->hdr.diagn = mbox->cmd.diagn;
+ api_hdr->hdr.length = mbox->cmd.length;
+ api_hdr->hdr.result = mbox->cmd.result;
+ api_hdr->hdr.lcn = mbox->cmd.lcn;
+
+ if (mbox->cmd.length > 0){
+ memcpy(api_hdr->data,mbox->data,mbox->cmd.length);
+ }
+
+ skb->mac.raw = skb->data;
+ skb->pkt_type = WAN_PACKET_ERR;
+
+ if (chan->common.func(skb,dev,chan->common.sk) < 0){
+ if (bh_enqueue(dev,skb)){
+ printk(KERN_INFO "%s: Dropping OOB MSG\n",card->devname);
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+ DBG_PRINTK(KERN_INFO "%s: OOB MSG OK, %s, lcn %i\n",
+ card->devname, dev->name, mbox->cmd.lcn);
+}
+
+/*===============================================================
+ * alloc_and_init_skb_buf
+ *
+ * Allocate and initialize an skb buffer.
+ *
+ *===============================================================*/
+
+static int alloc_and_init_skb_buf (sdla_t *card, struct sk_buff **skb, int len)
+{
+ struct sk_buff *new_skb = *skb;
+
+ new_skb = dev_alloc_skb(len + X25_HRDHDR_SZ);
+ if (new_skb == NULL){
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ return 1;
+ }
+
+ if (skb_tailroom(new_skb) < len){
+ /* No room for the packet. Call off the whole thing! */
+ dev_kfree_skb_any(new_skb);
+ printk(KERN_INFO "%s: Listen: unexpectedly long packet sequence\n"
+ ,card->devname);
+ *skb = NULL;
+ return 1;
+ }
+
+ *skb = new_skb;
+ return 0;
+
+}
+
+/*===============================================================
+ * api_oob_event
+ *
+ * Send an OOB event up to the sock
+ *
+ *===============================================================*/
+
+static void api_oob_event (sdla_t *card,TX25Mbox *mbox)
+{
+ struct net_device *dev = find_channel(card, mbox->cmd.lcn);
+ x25_channel_t *chan;
+
+ if (!dev)
+ return;
+
+ chan=dev->priv;
+
+ if (chan->common.usedby == API)
+ send_oob_msg(card,dev,mbox);
+
+}
+
+
+
+
+static int channel_disconnect(sdla_t* card, struct net_device *dev)
+{
+
+ int err;
+ x25_channel_t *chan = dev->priv;
+
+ DBG_PRINTK(KERN_INFO "%s: TIMER: %s, Device down disconnecting\n",
+ card->devname,dev->name);
+
+ if (chan->common.svc){
+ err = x25_clear_call(card,chan->common.lcn,0,0);
+ }else{
+ /* If channel is PVC or LAPB HDLC, there is no call
+ * to be cleared, thus drop down to the default
+ * area
+ */
+ err = 1;
+ }
+
+ switch (err){
+
+ case X25RES_CHANNEL_IN_USE:
+ case X25RES_NOT_READY:
+ err = TRY_CMD_AGAIN;
+ break;
+ case CMD_OK:
+ DBG_PRINTK(KERN_INFO "CALL CLEAR OK: Dev %s Chan Lcn %i\n",
+ dev->name,chan->common.lcn);
+
+ set_chan_state(dev,WAN_DISCONNECTING);
+ atomic_set(&chan->common.command,0);
+ err = DELAY_RESULT;
+ break;
+ default:
+ /* If LAPB HDLC protocol, bring the whole link down
+ * once the application terminates
+ */
+
+ set_chan_state(dev,WAN_DISCONNECTED);
+
+ if (card->u.x.LAPB_hdlc){
+ DBG_PRINTK(KERN_INFO "LAPB: Disconnecting Link\n");
+ hdlc_link_down (card);
+ }
+ atomic_set(&chan->common.command,0);
+ err = RETURN_RESULT;
+ break;
+ }
+
+ return err;
+}
+
+static void hdlc_link_down (sdla_t *card)
+{
+ TX25Mbox* mbox = card->mbox;
+ int retry = 5;
+ int err=0;
+
+ do {
+ memset(mbox,0,sizeof(TX25Mbox));
+ mbox->cmd.command = X25_HDLC_LINK_DISC;
+ mbox->cmd.length = 1;
+ mbox->data[0]=0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+
+ } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_DISC, 0));
+
+ if (err)
+ printk(KERN_INFO "%s: Hdlc Link Down Failed %x\n",card->devname,err);
+
+ disconnect (card);
+
+}
+
+static int check_bad_command(sdla_t* card, struct net_device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+ int bad_cmd = 0;
+
+ switch (atomic_read(&chan->common.command)&0x7F){
+
+ case X25_PLACE_CALL:
+ if (chan->common.state != WAN_DISCONNECTED)
+ bad_cmd=1;
+ break;
+ case X25_CLEAR_CALL:
+ if (chan->common.state == WAN_DISCONNECTED)
+ bad_cmd=1;
+ break;
+ case X25_ACCEPT_CALL:
+ if (chan->common.state != WAN_CONNECTING)
+ bad_cmd=1;
+ break;
+ case X25_RESET:
+ if (chan->common.state != WAN_CONNECTED)
+ bad_cmd=1;
+ break;
+ default:
+ bad_cmd=1;
+ break;
+ }
+
+ if (bad_cmd){
+ printk(KERN_INFO "%s: Invalid State, BAD Command %x, dev %s, lcn %i, st %i\n",
+ card->devname,atomic_read(&chan->common.command),dev->name,
+ chan->common.lcn, chan->common.state);
+ }
+
+ return bad_cmd;
+}
+
+
+
+/*************************** XPIPEMON FUNCTIONS **************************/
+
+/*==============================================================================
+ * Process UDP call of type XPIPE
+ */
+
+static int process_udp_mgmt_pkt(sdla_t *card)
+{
+ int c_retry = MAX_CMD_RETRY;
+ unsigned int len;
+ struct sk_buff *new_skb;
+ TX25Mbox *mbox = card->mbox;
+ int err;
+ int udp_mgmt_req_valid = 1;
+ struct net_device *dev;
+ x25_channel_t *chan;
+ unsigned short lcn;
+ struct timeval tv;
+
+
+ x25_udp_pkt_t *x25_udp_pkt;
+ x25_udp_pkt = (x25_udp_pkt_t *)card->u.x.udp_pkt_data;
+
+ dev = card->u.x.udp_dev;
+ chan = dev->priv;
+ lcn = chan->common.lcn;
+
+ switch(x25_udp_pkt->cblock.command) {
+
+ /* XPIPE_ENABLE_TRACE */
+ case XPIPE_ENABLE_TRACING:
+
+ /* XPIPE_GET_TRACE_INFO */
+ case XPIPE_GET_TRACE_INFO:
+
+ /* SET FT1 MODE */
+ case XPIPE_SET_FT1_MODE:
+
+ if(card->u.x.udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_direction_err;
+ udp_mgmt_req_valid = 0;
+ break;
+ }
+
+ /* XPIPE_FT1_READ_STATUS */
+ case XPIPE_FT1_READ_STATUS:
+
+ /* FT1 MONITOR STATUS */
+ case XPIPE_FT1_STATUS_CTRL:
+ if(card->hw.fwid != SFID_X25_508) {
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_type_err;
+ udp_mgmt_req_valid = 0;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if(!udp_mgmt_req_valid) {
+ /* set length to 0 */
+ x25_udp_pkt->cblock.length = 0;
+ /* set return code */
+ x25_udp_pkt->cblock.result = (card->hw.fwid != SFID_X25_508) ? 0x1F : 0xCD;
+
+ } else {
+
+ switch (x25_udp_pkt->cblock.command) {
+
+
+ case XPIPE_FLUSH_DRIVER_STATS:
+ init_x25_channel_struct(chan);
+ init_global_statistics(card);
+ mbox->cmd.length = 0;
+ break;
+
+
+ case XPIPE_DRIVER_STAT_IFSEND:
+ memcpy(x25_udp_pkt->data, &chan->if_send_stat, sizeof(if_send_stat_t));
+ mbox->cmd.length = sizeof(if_send_stat_t);
+ x25_udp_pkt->cblock.length = mbox->cmd.length;
+ break;
+
+ case XPIPE_DRIVER_STAT_INTR:
+ memcpy(&x25_udp_pkt->data[0], &card->statistics, sizeof(global_stats_t));
+ memcpy(&x25_udp_pkt->data[sizeof(global_stats_t)],
+ &chan->rx_intr_stat, sizeof(rx_intr_stat_t));
+
+ mbox->cmd.length = sizeof(global_stats_t) +
+ sizeof(rx_intr_stat_t);
+ x25_udp_pkt->cblock.length = mbox->cmd.length;
+ break;
+
+ case XPIPE_DRIVER_STAT_GEN:
+ memcpy(x25_udp_pkt->data,
+ &chan->pipe_mgmt_stat.UDP_PIPE_mgmt_kmalloc_err,
+ sizeof(pipe_mgmt_stat_t));
+
+ memcpy(&x25_udp_pkt->data[sizeof(pipe_mgmt_stat_t)],
+ &card->statistics, sizeof(global_stats_t));
+
+ x25_udp_pkt->cblock.result = 0;
+ x25_udp_pkt->cblock.length = sizeof(global_stats_t)+
+ sizeof(rx_intr_stat_t);
+ mbox->cmd.length = x25_udp_pkt->cblock.length;
+ break;
+
+ case XPIPE_ROUTER_UP_TIME:
+ do_gettimeofday(&tv);
+ chan->router_up_time = tv.tv_sec - chan->router_start_time;
+ *(unsigned long *)&x25_udp_pkt->data = chan->router_up_time;
+ x25_udp_pkt->cblock.length = mbox->cmd.length = 4;
+ x25_udp_pkt->cblock.result = 0;
+ break;
+
+ default :
+
+ do {
+ memcpy(&mbox->cmd, &x25_udp_pkt->cblock.command, sizeof(TX25Cmd));
+ if(mbox->cmd.length){
+ memcpy(&mbox->data,
+ (char *)x25_udp_pkt->data,
+ mbox->cmd.length);
+ }
+
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ } while (err && c_retry-- && x25_error(card, err, mbox->cmd.command, 0));
+
+
+ if ( err == CMD_OK ||
+ (err == 1 &&
+ (mbox->cmd.command == 0x06 ||
+ mbox->cmd.command == 0x16) ) ){
+
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_cmnd_OK;
+ } else {
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_cmnd_timeout;
+ }
+
+ /* copy the result back to our buffer */
+ memcpy(&x25_udp_pkt->cblock.command, &mbox->cmd, sizeof(TX25Cmd));
+
+ if(mbox->cmd.length) {
+ memcpy(&x25_udp_pkt->data, &mbox->data, mbox->cmd.length);
+ }
+ break;
+
+ } //switch
+
+ }
+
+ /* Fill UDP TTL */
+
+ x25_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
+ len = reply_udp(card->u.x.udp_pkt_data, mbox->cmd.length);
+
+
+ if(card->u.x.udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+
+ err = x25_send(card, lcn, 0, len, card->u.x.udp_pkt_data);
+ if (!err)
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_send_passed;
+ else
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_send_failed;
+
+ } else {
+
+ /* Allocate socket buffer */
+ if((new_skb = dev_alloc_skb(len)) != NULL) {
+ void *buf;
+
+ /* copy data into new_skb */
+ buf = skb_put(new_skb, len);
+ memcpy(buf, card->u.x.udp_pkt_data, len);
+
+ /* Decapsulate packet and pass it up the protocol
+ stack */
+ new_skb->dev = dev;
+
+ if (chan->common.usedby == API)
+ new_skb->protocol = htons(X25_PROT);
+ else
+ new_skb->protocol = htons(ETH_P_IP);
+
+ new_skb->mac.raw = new_skb->data;
+
+ netif_rx(new_skb);
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_passed_to_stack;
+
+ } else {
+ ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_no_socket;
+ printk(KERN_INFO
+ "%s: UDP mgmt cmnd, no socket buffers available!\n",
+ card->devname);
+ }
+ }
+
+ card->u.x.udp_pkt_lgth = 0;
+
+ return 1;
+}
+
+
+/*==============================================================================
+ * Determine what type of UDP call it is. DRVSTATS or XPIPE8ND ?
+ */
+static int udp_pkt_type( struct sk_buff *skb, sdla_t* card )
+{
+ x25_udp_pkt_t *x25_udp_pkt = (x25_udp_pkt_t *)skb->data;
+
+ if((x25_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
+ (x25_udp_pkt->ip_pkt.ver_inet_hdr_length == 0x45) &&
+ (x25_udp_pkt->udp_pkt.udp_dst_port == ntohs(card->wandev.udp_port)) &&
+ (x25_udp_pkt->wp_mgmt.request_reply == UDPMGMT_REQUEST)) {
+
+ if(!strncmp(x25_udp_pkt->wp_mgmt.signature,
+ UDPMGMT_XPIPE_SIGNATURE, 8)){
+ return UDP_XPIPE_TYPE;
+ }else{
+ printk(KERN_INFO "%s: UDP Packet, Failed Signature !\n",
+ card->devname);
+ }
+ }
+
+ return UDP_INVALID_TYPE;
+}
+
+
+/*============================================================================
+ * Reply to UDP Management system.
+ * Return nothing.
+ */
+static int reply_udp( unsigned char *data, unsigned int mbox_len )
+{
+ unsigned short len, udp_length, temp, ip_length;
+ unsigned long ip_temp;
+ int even_bound = 0;
+
+
+ x25_udp_pkt_t *x25_udp_pkt = (x25_udp_pkt_t *)data;
+
+ /* Set length of packet */
+ len = sizeof(ip_pkt_t)+
+ sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ mbox_len;
+
+
+ /* fill in UDP reply */
+ x25_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
+
+ /* fill in UDP length */
+ udp_length = sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ mbox_len;
+
+
+ /* put it on an even boundary */
+ if ( udp_length & 0x0001 ) {
+ udp_length += 1;
+ len += 1;
+ even_bound = 1;
+ }
+
+ temp = (udp_length<<8)|(udp_length>>8);
+ x25_udp_pkt->udp_pkt.udp_length = temp;
+
+ /* swap UDP ports */
+ temp = x25_udp_pkt->udp_pkt.udp_src_port;
+ x25_udp_pkt->udp_pkt.udp_src_port =
+ x25_udp_pkt->udp_pkt.udp_dst_port;
+ x25_udp_pkt->udp_pkt.udp_dst_port = temp;
+
+
+
+ /* add UDP pseudo header */
+ temp = 0x1100;
+ *((unsigned short *)
+ (x25_udp_pkt->data+mbox_len+even_bound)) = temp;
+ temp = (udp_length<<8)|(udp_length>>8);
+ *((unsigned short *)
+ (x25_udp_pkt->data+mbox_len+even_bound+2)) = temp;
+
+ /* calculate UDP checksum */
+ x25_udp_pkt->udp_pkt.udp_checksum = 0;
+
+ x25_udp_pkt->udp_pkt.udp_checksum =
+ calc_checksum(&data[UDP_OFFSET], udp_length+UDP_OFFSET);
+
+ /* fill in IP length */
+ ip_length = len;
+ temp = (ip_length<<8)|(ip_length>>8);
+ x25_udp_pkt->ip_pkt.total_length = temp;
+
+ /* swap IP addresses */
+ ip_temp = x25_udp_pkt->ip_pkt.ip_src_address;
+ x25_udp_pkt->ip_pkt.ip_src_address =
+ x25_udp_pkt->ip_pkt.ip_dst_address;
+ x25_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
+
+
+ /* fill in IP checksum */
+ x25_udp_pkt->ip_pkt.hdr_checksum = 0;
+ x25_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data, sizeof(ip_pkt_t));
+
+ return len;
+} /* reply_udp */
+
+unsigned short calc_checksum (char *data, int len)
+{
+ unsigned short temp;
+ unsigned long sum=0;
+ int i;
+
+ for( i = 0; i <len; i+=2 ) {
+ memcpy(&temp,&data[i],2);
+ sum += (unsigned long)temp;
+ }
+
+ while (sum >> 16 ) {
+ sum = (sum & 0xffffUL) + (sum >> 16);
+ }
+
+ temp = (unsigned short)sum;
+ temp = ~temp;
+
+ if( temp == 0 )
+ temp = 0xffff;
+
+ return temp;
+}
+
+/*=============================================================================
+ * Store a UDP management packet for later processing.
+ */
+
+static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t* card,
+ struct net_device *dev, struct sk_buff *skb,
+ int lcn)
+{
+ int udp_pkt_stored = 0;
+
+ if(!card->u.x.udp_pkt_lgth && (skb->len <= MAX_LGTH_UDP_MGNT_PKT)){
+ card->u.x.udp_pkt_lgth = skb->len;
+ card->u.x.udp_type = udp_type;
+ card->u.x.udp_pkt_src = udp_pkt_src;
+ card->u.x.udp_lcn = lcn;
+ card->u.x.udp_dev = dev;
+ memcpy(card->u.x.udp_pkt_data, skb->data, skb->len);
+ card->u.x.timer_int_enabled |= TMR_INT_ENABLED_UDP_PKT;
+ udp_pkt_stored = 1;
+
+ }else{
+ printk(KERN_INFO "%s: ERROR: UDP packet not stored for LCN %d\n",
+ card->devname,lcn);
+ }
+
+ if(udp_pkt_src == UDP_PKT_FRM_STACK){
+ dev_kfree_skb_any(skb);
+ }else{
+ dev_kfree_skb_any(skb);
+ }
+
+ return(udp_pkt_stored);
+}
+
+
+
+/*=============================================================================
+ * Initial the ppp_private_area structure.
+ */
+static void init_x25_channel_struct( x25_channel_t *chan )
+{
+ memset(&chan->if_send_stat.if_send_entry,0,sizeof(if_send_stat_t));
+ memset(&chan->rx_intr_stat.rx_intr_no_socket,0,sizeof(rx_intr_stat_t));
+ memset(&chan->pipe_mgmt_stat.UDP_PIPE_mgmt_kmalloc_err,0,sizeof(pipe_mgmt_stat_t));
+}
+
+/*============================================================================
+ * Initialize Global Statistics
+ */
+static void init_global_statistics( sdla_t *card )
+{
+ memset(&card->statistics.isr_entry,0,sizeof(global_stats_t));
+}
+
+
+/*===============================================================
+ * SMP Support
+ * ==============================================================*/
+
+static void S508_S514_lock(sdla_t *card, unsigned long *smp_flags)
+{
+ spin_lock_irqsave(&card->wandev.lock, *smp_flags);
+}
+static void S508_S514_unlock(sdla_t *card, unsigned long *smp_flags)
+{
+ spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
+}
+
+/*===============================================================
+ * x25_timer_routine
+ *
+ * A more efficient polling routine. Each half a second
+ * queue a polling task. We want to do the polling in a
+ * task not timer, because timer runs in interrupt time.
+ *
+ * FIXME Polling should be rethinked.
+ *==============================================================*/
+
+static void x25_timer_routine(unsigned long data)
+{
+ sdla_t *card = (sdla_t*)data;
+
+ if (!card->wandev.dev){
+ printk(KERN_INFO "%s: Stopping the X25 Poll Timer: No Dev.\n",
+ card->devname);
+ return;
+ }
+
+ if (card->open_cnt != card->u.x.num_of_ch){
+ printk(KERN_INFO "%s: Stopping the X25 Poll Timer: Interface down.\n",
+ card->devname);
+ return;
+ }
+
+ if (test_bit(PERI_CRIT,&card->wandev.critical)){
+ printk(KERN_INFO "%s: Stopping the X25 Poll Timer: Shutting down.\n",
+ card->devname);
+ return;
+ }
+
+ if (!test_and_set_bit(POLL_CRIT,&card->wandev.critical)){
+ trigger_x25_poll(card);
+ }
+
+ card->u.x.x25_timer.expires=jiffies+(HZ>>1);
+ add_timer(&card->u.x.x25_timer);
+ return;
+}
+
+void disable_comm_shutdown(sdla_t *card)
+{
+ TX25Mbox* mbox = card->mbox;
+ int err;
+
+ /* Turn of interrutps */
+ mbox->data[0] = 0;
+ if (card->hw.fwid == SFID_X25_508){
+ mbox->data[1] = card->hw.irq;
+ mbox->data[2] = 2;
+ mbox->cmd.length = 3;
+ }else {
+ mbox->cmd.length = 1;
+ }
+ mbox->cmd.command = X25_SET_INTERRUPT_MODE;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err)
+ printk(KERN_INFO "INTERRUPT OFF FAIED %x\n",err);
+
+ /* Bring down HDLC */
+ mbox->cmd.command = X25_HDLC_LINK_CLOSE;
+ mbox->cmd.length = 0;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err)
+ printk(KERN_INFO "LINK CLOSED FAILED %x\n",err);
+
+
+ /* Brind down DTR */
+ mbox->data[0] = 0;
+ mbox->data[2] = 0;
+ mbox->data[1] = 0x01;
+ mbox->cmd.length = 3;
+ mbox->cmd.command = X25_SET_GLOBAL_VARS;
+ err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
+ if (err)
+ printk(KERN_INFO "DTR DOWN FAILED %x\n",err);
+
+}
+
+MODULE_LICENSE("GPL");
+
+/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdladrv.c b/drivers/net/wan/sdladrv.c
new file mode 100644
index 000000000000..c8bc6da57a41
--- /dev/null
+++ b/drivers/net/wan/sdladrv.c
@@ -0,0 +1,2318 @@
+/*****************************************************************************
+* sdladrv.c SDLA Support Module. Main module.
+*
+* This module is a library of common hardware-specific functions
+* used by all Sangoma drivers.
+*
+* Author: Gideon Hack
+*
+* Copyright: (c) 1995-2000 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Mar 20, 2001 Nenad Corbic Added the auto_pci_cfg filed, to support
+* the PCISLOT #0.
+* Apr 04, 2000 Nenad Corbic Fixed the auto memory detection code.
+* The memory test at address 0xC8000.
+* Mar 09, 2000 Nenad Corbic Added Gideon's Bug Fix: clear pci
+* interrupt flags on initial load.
+* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
+* Updates for Linux 2.2.X kernels.
+* Sep 17, 1998 Jaspreet Singh Updates for linux 2.2.X kernels
+* Dec 20, 1996 Gene Kozin Version 3.0.0. Complete overhaul.
+* Jul 12, 1996 Gene Kozin Changes for Linux 2.0 compatibility.
+* Jun 12, 1996 Gene Kozin Added support for S503 card.
+* Apr 30, 1996 Gene Kozin SDLA hardware interrupt is acknowledged before
+* calling protocolspecific ISR.
+* Register I/O ports with Linux kernel.
+* Miscellaneous bug fixes.
+* Dec 20, 1995 Gene Kozin Fixed a bug in interrupt routine.
+* Oct 14, 1995 Gene Kozin Initial version.
+*****************************************************************************/
+
+/*****************************************************************************
+ * Notes:
+ * ------
+ * 1. This code is ment to be system-independent (as much as possible). To
+ * achive this, various macros are used to hide system-specific interfaces.
+ * To compile this code, one of the following constants must be defined:
+ *
+ * Platform Define
+ * -------- ------
+ * Linux _LINUX_
+ * SCO Unix _SCO_UNIX_
+ *
+ * 2. Supported adapter types:
+ *
+ * S502A
+ * ES502A (S502E)
+ * S503
+ * S507
+ * S508 (S509)
+ *
+ * 3. S502A Notes:
+ *
+ * There is no separate DPM window enable/disable control in S502A. It
+ * opens immediately after a window number it written to the HMCR
+ * register. To close the window, HMCR has to be written a value
+ * ????1111b (e.g. 0x0F or 0xFF).
+ *
+ * S502A DPM window cannot be located at offset E000 (e.g. 0xAE000).
+ *
+ * There should be a delay of ??? before reading back S502A status
+ * register.
+ *
+ * 4. S502E Notes:
+ *
+ * S502E has a h/w bug: although default IRQ line state is HIGH, enabling
+ * interrupts by setting bit 1 of the control register (BASE) to '1'
+ * causes it to go LOW! Therefore, disabling interrupts by setting that
+ * bit to '0' causes low-to-high transition on IRQ line (ghosty
+ * interrupt). The same occurs when disabling CPU by resetting bit 0 of
+ * CPU control register (BASE+3) - see the next note.
+ *
+ * S502E CPU and DPM control is limited:
+ *
+ * o CPU cannot be stopped independently. Resetting bit 0 of the CPUi
+ * control register (BASE+3) shuts the board down entirely, including
+ * DPM;
+ *
+ * o DPM access cannot be controlled dynamically. Ones CPU is started,
+ * bit 1 of the control register (BASE) is used to enable/disable IRQ,
+ * so that access to shared memory cannot be disabled while CPU is
+ * running.
+ ****************************************************************************/
+
+#define _LINUX_
+
+#if defined(_LINUX_) /****** Linux *******************************/
+
+#include <linux/config.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/module.h> /* support for loadable modules */
+#include <linux/jiffies.h> /* for jiffies, HZ, etc. */
+#include <linux/sdladrv.h> /* API definitions */
+#include <linux/sdlasfm.h> /* SDLA firmware module definitions */
+#include <linux/sdlapci.h> /* SDLA PCI hardware definitions */
+#include <linux/pci.h> /* PCI defines and function prototypes */
+#include <asm/io.h> /* for inb(), outb(), etc. */
+
+#define _INB(port) (inb(port))
+#define _OUTB(port, byte) (outb((byte),(port)))
+#define SYSTEM_TICK jiffies
+
+#include <linux/init.h>
+
+
+#elif defined(_SCO_UNIX_) /****** SCO Unix ****************************/
+
+#if !defined(INKERNEL)
+#error This code MUST be compiled in kernel mode!
+#endif
+#include <sys/sdladrv.h> /* API definitions */
+#include <sys/sdlasfm.h> /* SDLA firmware module definitions */
+#include <sys/inline.h> /* for inb(), outb(), etc. */
+#define _INB(port) (inb(port))
+#define _OUTB(port, byte) (outb((port),(byte)))
+#define SYSTEM_TICK lbolt
+
+#else
+#error Unknown system type!
+#endif
+
+#define MOD_VERSION 3
+#define MOD_RELEASE 0
+
+#define SDLA_IODELAY 100 /* I/O Rd/Wr delay, 10 works for 486DX2-66 */
+#define EXEC_DELAY 20 /* shared memory access delay, mks */
+#define EXEC_TIMEOUT (HZ*2) /* command timeout, in ticks */
+
+/* I/O port address range */
+#define S502A_IORANGE 3
+#define S502E_IORANGE 4
+#define S503_IORANGE 3
+#define S507_IORANGE 4
+#define S508_IORANGE 4
+
+/* Maximum amount of memory */
+#define S502_MAXMEM 0x10000L
+#define S503_MAXMEM 0x10000L
+#define S507_MAXMEM 0x40000L
+#define S508_MAXMEM 0x40000L
+
+/* Minimum amount of memory */
+#define S502_MINMEM 0x8000L
+#define S503_MINMEM 0x8000L
+#define S507_MINMEM 0x20000L
+#define S508_MINMEM 0x20000L
+#define NO_PORT -1
+
+
+
+
+
+/****** Function Prototypes *************************************************/
+
+/* Hardware-specific functions */
+static int sdla_detect (sdlahw_t* hw);
+static int sdla_autodpm (sdlahw_t* hw);
+static int sdla_setdpm (sdlahw_t* hw);
+static int sdla_load (sdlahw_t* hw, sfm_t* sfm, unsigned len);
+static int sdla_init (sdlahw_t* hw);
+static unsigned long sdla_memtest (sdlahw_t* hw);
+static int sdla_bootcfg (sdlahw_t* hw, sfm_info_t* sfminfo);
+static unsigned char make_config_byte (sdlahw_t* hw);
+static int sdla_start (sdlahw_t* hw, unsigned addr);
+
+static int init_s502a (sdlahw_t* hw);
+static int init_s502e (sdlahw_t* hw);
+static int init_s503 (sdlahw_t* hw);
+static int init_s507 (sdlahw_t* hw);
+static int init_s508 (sdlahw_t* hw);
+
+static int detect_s502a (int port);
+static int detect_s502e (int port);
+static int detect_s503 (int port);
+static int detect_s507 (int port);
+static int detect_s508 (int port);
+static int detect_s514 (sdlahw_t* hw);
+static int find_s514_adapter(sdlahw_t* hw, char find_first_S514_card);
+
+/* Miscellaneous functions */
+static void peek_by_4 (unsigned long src, void* buf, unsigned len);
+static void poke_by_4 (unsigned long dest, void* buf, unsigned len);
+static int calibrate_delay (int mks);
+static int get_option_index (unsigned* optlist, unsigned optval);
+static unsigned check_memregion (void* ptr, unsigned len);
+static unsigned test_memregion (void* ptr, unsigned len);
+static unsigned short checksum (unsigned char* buf, unsigned len);
+static int init_pci_slot(sdlahw_t *);
+
+static int pci_probe(sdlahw_t *hw);
+
+/****** Global Data **********************************************************
+ * Note: All data must be explicitly initialized!!!
+ */
+
+static struct pci_device_id sdladrv_pci_tbl[] = {
+ { V3_VENDOR_ID, V3_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, sdladrv_pci_tbl);
+
+MODULE_LICENSE("GPL");
+
+/* private data */
+static char modname[] = "sdladrv";
+static char fullname[] = "SDLA Support Module";
+static char copyright[] = "(c) 1995-1999 Sangoma Technologies Inc.";
+static unsigned exec_idle;
+
+/* Hardware configuration options.
+ * These are arrays of configuration options used by verification routines.
+ * The first element of each array is its size (i.e. number of options).
+ */
+static unsigned s502_port_options[] =
+ { 4, 0x250, 0x300, 0x350, 0x360 }
+;
+static unsigned s503_port_options[] =
+ { 8, 0x250, 0x254, 0x300, 0x304, 0x350, 0x354, 0x360, 0x364 }
+;
+static unsigned s508_port_options[] =
+ { 8, 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390 }
+;
+
+static unsigned s502a_irq_options[] = { 0 };
+static unsigned s502e_irq_options[] = { 4, 2, 3, 5, 7 };
+static unsigned s503_irq_options[] = { 5, 2, 3, 4, 5, 7 };
+static unsigned s508_irq_options[] = { 8, 3, 4, 5, 7, 10, 11, 12, 15 };
+
+static unsigned s502a_dpmbase_options[] =
+{
+ 28,
+ 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000,
+ 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000,
+ 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000,
+ 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000,
+};
+static unsigned s507_dpmbase_options[] =
+{
+ 32,
+ 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
+ 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
+ 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
+ 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000,
+};
+static unsigned s508_dpmbase_options[] = /* incl. S502E and S503 */
+{
+ 32,
+ 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
+ 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
+ 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000,
+ 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000,
+};
+
+/*
+static unsigned s502_dpmsize_options[] = { 2, 0x2000, 0x10000 };
+static unsigned s507_dpmsize_options[] = { 2, 0x2000, 0x4000 };
+static unsigned s508_dpmsize_options[] = { 1, 0x2000 };
+*/
+
+static unsigned s502a_pclk_options[] = { 2, 3600, 7200 };
+static unsigned s502e_pclk_options[] = { 5, 3600, 5000, 7200, 8000, 10000 };
+static unsigned s503_pclk_options[] = { 3, 7200, 8000, 10000 };
+static unsigned s507_pclk_options[] = { 1, 12288 };
+static unsigned s508_pclk_options[] = { 1, 16000 };
+
+/* Host memory control register masks */
+static unsigned char s502a_hmcr[] =
+{
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, /* A0000 - AC000 */
+ 0x20, 0x22, 0x24, 0x26, 0x28, 0x2A, 0x2C, /* C0000 - CC000 */
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, /* D0000 - DC000 */
+ 0x30, 0x32, 0x34, 0x36, 0x38, 0x3A, 0x3C, /* E0000 - EC000 */
+};
+static unsigned char s502e_hmcr[] =
+{
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E, /* A0000 - AE000 */
+ 0x20, 0x22, 0x24, 0x26, 0x28, 0x2A, 0x2C, 0x2E, /* C0000 - CE000 */
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, /* D0000 - DE000 */
+ 0x30, 0x32, 0x34, 0x36, 0x38, 0x3A, 0x3C, 0x3E, /* E0000 - EE000 */
+};
+static unsigned char s507_hmcr[] =
+{
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, /* A0000 - AE000 */
+ 0x40, 0x42, 0x44, 0x46, 0x48, 0x4A, 0x4C, 0x4E, /* B0000 - BE000 */
+ 0x80, 0x82, 0x84, 0x86, 0x88, 0x8A, 0x8C, 0x8E, /* C0000 - CE000 */
+ 0xC0, 0xC2, 0xC4, 0xC6, 0xC8, 0xCA, 0xCC, 0xCE, /* E0000 - EE000 */
+};
+static unsigned char s508_hmcr[] =
+{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* A0000 - AE000 */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* C0000 - CE000 */
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /* D0000 - DE000 */
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, /* E0000 - EE000 */
+};
+
+static unsigned char s507_irqmask[] =
+{
+ 0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0, 0xE0
+};
+
+static int pci_slot_ar[MAX_S514_CARDS];
+
+/******* Kernel Loadable Module Entry Points ********************************/
+
+/*============================================================================
+ * Module 'insert' entry point.
+ * o print announcement
+ * o initialize static data
+ * o calibrate SDLA shared memory access delay.
+ *
+ * Return: 0 Ok
+ * < 0 error.
+ * Context: process
+ */
+
+static int __init sdladrv_init(void)
+{
+ int i=0;
+
+ printk(KERN_INFO "%s v%u.%u %s\n",
+ fullname, MOD_VERSION, MOD_RELEASE, copyright);
+ exec_idle = calibrate_delay(EXEC_DELAY);
+#ifdef WANDEBUG
+ printk(KERN_DEBUG "%s: exec_idle = %d\n", modname, exec_idle);
+#endif
+
+ /* Initialize the PCI Card array, which
+ * will store flags, used to mark
+ * card initialization state */
+ for (i=0; i<MAX_S514_CARDS; i++)
+ pci_slot_ar[i] = 0xFF;
+
+ return 0;
+}
+
+/*============================================================================
+ * Module 'remove' entry point.
+ * o release all remaining system resources
+ */
+static void __exit sdladrv_cleanup(void)
+{
+}
+
+module_init(sdladrv_init);
+module_exit(sdladrv_cleanup);
+
+/******* Kernel APIs ********************************************************/
+
+/*============================================================================
+ * Set up adapter.
+ * o detect adapter type
+ * o verify hardware configuration options
+ * o check for hardware conflicts
+ * o set up adapter shared memory
+ * o test adapter memory
+ * o load firmware
+ * Return: 0 ok.
+ * < 0 error
+ */
+
+EXPORT_SYMBOL(sdla_setup);
+
+int sdla_setup (sdlahw_t* hw, void* sfm, unsigned len)
+{
+ unsigned* irq_opt = NULL; /* IRQ options */
+ unsigned* dpmbase_opt = NULL; /* DPM window base options */
+ unsigned* pclk_opt = NULL; /* CPU clock rate options */
+ int err=0;
+
+ if (sdla_detect(hw)) {
+ if(hw->type != SDLA_S514)
+ printk(KERN_INFO "%s: no SDLA card found at port 0x%X\n",
+ modname, hw->port);
+ return -EINVAL;
+ }
+
+ if(hw->type != SDLA_S514) {
+ printk(KERN_INFO "%s: found S%04u card at port 0x%X.\n",
+ modname, hw->type, hw->port);
+
+ hw->dpmsize = SDLA_WINDOWSIZE;
+ switch (hw->type) {
+ case SDLA_S502A:
+ hw->io_range = S502A_IORANGE;
+ irq_opt = s502a_irq_options;
+ dpmbase_opt = s502a_dpmbase_options;
+ pclk_opt = s502a_pclk_options;
+ break;
+
+ case SDLA_S502E:
+ hw->io_range = S502E_IORANGE;
+ irq_opt = s502e_irq_options;
+ dpmbase_opt = s508_dpmbase_options;
+ pclk_opt = s502e_pclk_options;
+ break;
+
+ case SDLA_S503:
+ hw->io_range = S503_IORANGE;
+ irq_opt = s503_irq_options;
+ dpmbase_opt = s508_dpmbase_options;
+ pclk_opt = s503_pclk_options;
+ break;
+
+ case SDLA_S507:
+ hw->io_range = S507_IORANGE;
+ irq_opt = s508_irq_options;
+ dpmbase_opt = s507_dpmbase_options;
+ pclk_opt = s507_pclk_options;
+ break;
+
+ case SDLA_S508:
+ hw->io_range = S508_IORANGE;
+ irq_opt = s508_irq_options;
+ dpmbase_opt = s508_dpmbase_options;
+ pclk_opt = s508_pclk_options;
+ break;
+ }
+
+ /* Verify IRQ configuration options */
+ if (!get_option_index(irq_opt, hw->irq)) {
+ printk(KERN_INFO "%s: IRQ %d is invalid!\n",
+ modname, hw->irq);
+ return -EINVAL;
+ }
+
+ /* Verify CPU clock rate configuration options */
+ if (hw->pclk == 0)
+ hw->pclk = pclk_opt[1]; /* use default */
+
+ else if (!get_option_index(pclk_opt, hw->pclk)) {
+ printk(KERN_INFO "%s: CPU clock %u is invalid!\n",
+ modname, hw->pclk);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: assuming CPU clock rate of %u kHz.\n",
+ modname, hw->pclk);
+
+ /* Setup adapter dual-port memory window and test memory */
+ if (hw->dpmbase == 0) {
+ err = sdla_autodpm(hw);
+ if (err) {
+ printk(KERN_INFO
+ "%s: can't find available memory region!\n",
+ modname);
+ return err;
+ }
+ }
+ else if (!get_option_index(dpmbase_opt,
+ virt_to_phys(hw->dpmbase))) {
+ printk(KERN_INFO
+ "%s: memory address 0x%lX is invalid!\n",
+ modname, virt_to_phys(hw->dpmbase));
+ return -EINVAL;
+ }
+ else if (sdla_setdpm(hw)) {
+ printk(KERN_INFO
+ "%s: 8K memory region at 0x%lX is not available!\n",
+ modname, virt_to_phys(hw->dpmbase));
+ return -EINVAL;
+ }
+ printk(KERN_INFO
+ "%s: dual-port memory window is set at 0x%lX.\n",
+ modname, virt_to_phys(hw->dpmbase));
+
+
+ /* If we find memory in 0xE**** Memory region,
+ * warn the user to disable the SHADOW RAM.
+ * Since memory corruption can occur if SHADOW is
+ * enabled. This can causes random crashes ! */
+ if (virt_to_phys(hw->dpmbase) >= 0xE0000){
+ printk(KERN_WARNING "\n%s: !!!!!!!! WARNING !!!!!!!!\n",modname);
+ printk(KERN_WARNING "%s: WANPIPE is using 0x%lX memory region !!!\n",
+ modname, virt_to_phys(hw->dpmbase));
+ printk(KERN_WARNING " Please disable the SHADOW RAM, otherwise\n");
+ printk(KERN_WARNING " your system might crash randomly from time to time !\n");
+ printk(KERN_WARNING "%s: !!!!!!!! WARNING !!!!!!!!\n\n",modname);
+ }
+ }
+
+ else {
+ hw->memory = test_memregion((void*)hw->dpmbase,
+ MAX_SIZEOF_S514_MEMORY);
+ if(hw->memory < (256 * 1024)) {
+ printk(KERN_INFO
+ "%s: error in testing S514 memory (0x%lX)\n",
+ modname, hw->memory);
+ sdla_down(hw);
+ return -EINVAL;
+ }
+ }
+
+ printk(KERN_INFO "%s: found %luK bytes of on-board memory\n",
+ modname, hw->memory / 1024);
+
+ /* Load firmware. If loader fails then shut down adapter */
+ err = sdla_load(hw, sfm, len);
+ if (err) sdla_down(hw); /* shutdown adapter */
+
+ return err;
+}
+
+/*============================================================================
+ * Shut down SDLA: disable shared memory access and interrupts, stop CPU, etc.
+ */
+
+EXPORT_SYMBOL(sdla_down);
+
+int sdla_down (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int i;
+ unsigned char CPU_no;
+ u32 int_config, int_status;
+
+ if(!port && (hw->type != SDLA_S514))
+ return -EFAULT;
+
+ switch (hw->type) {
+ case SDLA_S502A:
+ _OUTB(port, 0x08); /* halt CPU */
+ _OUTB(port, 0x08);
+ _OUTB(port, 0x08);
+ hw->regs[0] = 0x08;
+ _OUTB(port + 1, 0xFF); /* close memory window */
+ hw->regs[1] = 0xFF;
+ break;
+
+ case SDLA_S502E:
+ _OUTB(port + 3, 0); /* stop CPU */
+ _OUTB(port, 0); /* reset board */
+ for (i = 0; i < S502E_IORANGE; ++i)
+ hw->regs[i] = 0
+ ;
+ break;
+
+ case SDLA_S503:
+ case SDLA_S507:
+ case SDLA_S508:
+ _OUTB(port, 0); /* reset board logic */
+ hw->regs[0] = 0;
+ break;
+
+ case SDLA_S514:
+ /* halt the adapter */
+ *(char *)hw->vector = S514_CPU_HALT;
+ CPU_no = hw->S514_cpu_no[0];
+
+ /* disable the PCI IRQ and disable memory access */
+ pci_read_config_dword(hw->pci_dev, PCI_INT_CONFIG, &int_config);
+ int_config &= (CPU_no == S514_CPU_A) ? ~PCI_DISABLE_IRQ_CPU_A : ~PCI_DISABLE_IRQ_CPU_B;
+ pci_write_config_dword(hw->pci_dev, PCI_INT_CONFIG, int_config);
+ read_S514_int_stat(hw, &int_status);
+ S514_intack(hw, int_status);
+ if(CPU_no == S514_CPU_A)
+ pci_write_config_dword(hw->pci_dev, PCI_MAP0_DWORD,
+ PCI_CPU_A_MEM_DISABLE);
+ else
+ pci_write_config_dword(hw->pci_dev, PCI_MAP1_DWORD,
+ PCI_CPU_B_MEM_DISABLE);
+
+ /* free up the allocated virtual memory */
+ iounmap((void *)hw->dpmbase);
+ iounmap((void *)hw->vector);
+ break;
+
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Map shared memory window into SDLA address space.
+ */
+
+EXPORT_SYMBOL(sdla_mapmem);
+
+int sdla_mapmem (sdlahw_t* hw, unsigned long addr)
+{
+ unsigned port = hw->port;
+ register int tmp;
+
+ switch (hw->type) {
+ case SDLA_S502A:
+ case SDLA_S502E:
+ if (addr < S502_MAXMEM) { /* verify parameter */
+ tmp = addr >> 13; /* convert to register mask */
+ _OUTB(port + 2, tmp);
+ hw->regs[2] = tmp;
+ }
+ else return -EINVAL;
+ break;
+
+ case SDLA_S503:
+ if (addr < S503_MAXMEM) { /* verify parameter */
+ tmp = (hw->regs[0] & 0x8F) | ((addr >> 9) & 0x70);
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp;
+ }
+ else return -EINVAL;
+ break;
+
+ case SDLA_S507:
+ if (addr < S507_MAXMEM) {
+ if (!(_INB(port) & 0x02))
+ return -EIO;
+ tmp = addr >> 13; /* convert to register mask */
+ _OUTB(port + 2, tmp);
+ hw->regs[2] = tmp;
+ }
+ else return -EINVAL;
+ break;
+
+ case SDLA_S508:
+ if (addr < S508_MAXMEM) {
+ tmp = addr >> 13; /* convert to register mask */
+ _OUTB(port + 2, tmp);
+ hw->regs[2] = tmp;
+ }
+ else return -EINVAL;
+ break;
+
+ case SDLA_S514:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+ hw->vector = addr & 0xFFFFE000L;
+ return 0;
+}
+
+/*============================================================================
+ * Enable interrupt generation.
+ */
+
+EXPORT_SYMBOL(sdla_inten);
+
+int sdla_inten (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp, i;
+
+ switch (hw->type) {
+ case SDLA_S502E:
+ /* Note thar interrupt control operations on S502E are allowed
+ * only if CPU is enabled (bit 0 of status register is set).
+ */
+ if (_INB(port) & 0x01) {
+ _OUTB(port, 0x02); /* bit1 = 1, bit2 = 0 */
+ _OUTB(port, 0x06); /* bit1 = 1, bit2 = 1 */
+ hw->regs[0] = 0x06;
+ }
+ else return -EIO;
+ break;
+
+ case SDLA_S503:
+ tmp = hw->regs[0] | 0x04;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (!(_INB(port) & 0x02)) /* verify */
+ return -EIO;
+ break;
+
+ case SDLA_S508:
+ tmp = hw->regs[0] | 0x10;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (!(_INB(port + 1) & 0x10)) /* verify */
+ return -EIO;
+ break;
+
+ case SDLA_S502A:
+ case SDLA_S507:
+ break;
+
+ case SDLA_S514:
+ break;
+
+ default:
+ return -EINVAL;
+
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Disable interrupt generation.
+ */
+
+EXPORT_SYMBOL(sdla_intde);
+
+int sdla_intde (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp, i;
+
+ switch (hw->type) {
+ case SDLA_S502E:
+ /* Notes:
+ * 1) interrupt control operations are allowed only if CPU is
+ * enabled (bit 0 of status register is set).
+ * 2) disabling interrupts using bit 1 of control register
+ * causes IRQ line go high, therefore we are going to use
+ * 0x04 instead: lower it to inhibit interrupts to PC.
+ */
+ if (_INB(port) & 0x01) {
+ _OUTB(port, hw->regs[0] & ~0x04);
+ hw->regs[0] &= ~0x04;
+ }
+ else return -EIO;
+ break;
+
+ case SDLA_S503:
+ tmp = hw->regs[0] & ~0x04;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) & 0x02) /* verify */
+ return -EIO;
+ break;
+
+ case SDLA_S508:
+ tmp = hw->regs[0] & ~0x10;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) & 0x10) /* verify */
+ return -EIO;
+ break;
+
+ case SDLA_S502A:
+ case SDLA_S507:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Acknowledge SDLA hardware interrupt.
+ */
+
+EXPORT_SYMBOL(sdla_intack);
+
+int sdla_intack (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp;
+
+ switch (hw->type) {
+ case SDLA_S502E:
+ /* To acknoledge hardware interrupt we have to toggle bit 3 of
+ * control register: \_/
+ * Note that interrupt control operations on S502E are allowed
+ * only if CPU is enabled (bit 1 of status register is set).
+ */
+ if (_INB(port) & 0x01) {
+ tmp = hw->regs[0] & ~0x04;
+ _OUTB(port, tmp);
+ tmp |= 0x04;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp;
+ }
+ else return -EIO;
+ break;
+
+ case SDLA_S503:
+ if (_INB(port) & 0x04) {
+ tmp = hw->regs[0] & ~0x08;
+ _OUTB(port, tmp);
+ tmp |= 0x08;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp;
+ }
+ break;
+
+ case SDLA_S502A:
+ case SDLA_S507:
+ case SDLA_S508:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+/*============================================================================
+ * Acknowledge S514 hardware interrupt.
+ */
+
+EXPORT_SYMBOL(S514_intack);
+
+void S514_intack (sdlahw_t* hw, u32 int_status)
+{
+ pci_write_config_dword(hw->pci_dev, PCI_INT_STATUS, int_status);
+}
+
+
+/*============================================================================
+ * Read the S514 hardware interrupt status.
+ */
+
+EXPORT_SYMBOL(read_S514_int_stat);
+
+void read_S514_int_stat (sdlahw_t* hw, u32* int_status)
+{
+ pci_read_config_dword(hw->pci_dev, PCI_INT_STATUS, int_status);
+}
+
+
+/*============================================================================
+ * Generate an interrupt to adapter's CPU.
+ */
+
+EXPORT_SYMBOL(sdla_intr);
+
+int sdla_intr (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+
+ switch (hw->type) {
+ case SDLA_S502A:
+ if (!(_INB(port) & 0x40)) {
+ _OUTB(port, 0x10); /* issue NMI to CPU */
+ hw->regs[0] = 0x10;
+ }
+ else return -EIO;
+ break;
+
+ case SDLA_S507:
+ if ((_INB(port) & 0x06) == 0x06) {
+ _OUTB(port + 3, 0);
+ }
+ else return -EIO;
+ break;
+
+ case SDLA_S508:
+ if (_INB(port + 1) & 0x02) {
+ _OUTB(port, 0x08);
+ }
+ else return -EIO;
+ break;
+
+ case SDLA_S502E:
+ case SDLA_S503:
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Execute Adapter Command.
+ * o Set exec flag.
+ * o Busy-wait until flag is reset.
+ * o Return number of loops made, or 0 if command timed out.
+ */
+
+EXPORT_SYMBOL(sdla_exec);
+
+int sdla_exec (void* opflag)
+{
+ volatile unsigned char* flag = opflag;
+ unsigned long tstop;
+ int nloops;
+
+ if(readb(flag) != 0x00) {
+ printk(KERN_INFO
+ "WANPIPE: opp flag set on entry to sdla_exec\n");
+ return 0;
+ }
+
+ writeb(0x01, flag);
+
+ tstop = SYSTEM_TICK + EXEC_TIMEOUT;
+
+ for (nloops = 1; (readb(flag) == 0x01); ++ nloops) {
+ unsigned delay = exec_idle;
+ while (-- delay); /* delay */
+ if (SYSTEM_TICK > tstop) return 0; /* time is up! */
+ }
+ return nloops;
+}
+
+/*============================================================================
+ * Read absolute adapter memory.
+ * Transfer data from adapter's memory to data buffer.
+ *
+ * Note:
+ * Care should be taken when crossing dual-port memory window boundary.
+ * This function is not atomic, so caller must disable interrupt if
+ * interrupt routines are accessing adapter shared memory.
+ */
+
+EXPORT_SYMBOL(sdla_peek);
+
+int sdla_peek (sdlahw_t* hw, unsigned long addr, void* buf, unsigned len)
+{
+
+ if (addr + len > hw->memory) /* verify arguments */
+ return -EINVAL;
+
+ if(hw->type == SDLA_S514) { /* copy data for the S514 adapter */
+ peek_by_4 ((unsigned long)hw->dpmbase + addr, buf, len);
+ return 0;
+ }
+
+ else { /* copy data for the S508 adapter */
+ unsigned long oldvec = hw->vector;
+ unsigned winsize = hw->dpmsize;
+ unsigned curpos, curlen; /* current offset and block size */
+ unsigned long curvec; /* current DPM window vector */
+ int err = 0;
+
+ while (len && !err) {
+ curpos = addr % winsize; /* current window offset */
+ curvec = addr - curpos; /* current window vector */
+ curlen = (len > (winsize - curpos)) ?
+ (winsize - curpos) : len;
+ /* Relocate window and copy block of data */
+ err = sdla_mapmem(hw, curvec);
+ peek_by_4 ((unsigned long)hw->dpmbase + curpos, buf,
+ curlen);
+ addr += curlen;
+ buf = (char*)buf + curlen;
+ len -= curlen;
+ }
+
+ /* Restore DPM window position */
+ sdla_mapmem(hw, oldvec);
+ return err;
+ }
+}
+
+
+/*============================================================================
+ * Read data from adapter's memory to a data buffer in 4-byte chunks.
+ * Note that we ensure that the SDLA memory address is on a 4-byte boundary
+ * before we begin moving the data in 4-byte chunks.
+*/
+
+static void peek_by_4 (unsigned long src, void* buf, unsigned len)
+{
+
+ /* byte copy data until we get to a 4-byte boundary */
+ while (len && (src & 0x03)) {
+ *(char *)buf ++ = readb(src ++);
+ len --;
+ }
+
+ /* copy data in 4-byte chunks */
+ while (len >= 4) {
+ *(unsigned long *)buf = readl(src);
+ buf += 4;
+ src += 4;
+ len -= 4;
+ }
+
+ /* byte copy any remaining data */
+ while (len) {
+ *(char *)buf ++ = readb(src ++);
+ len --;
+ }
+}
+
+
+/*============================================================================
+ * Write Absolute Adapter Memory.
+ * Transfer data from data buffer to adapter's memory.
+ *
+ * Note:
+ * Care should be taken when crossing dual-port memory window boundary.
+ * This function is not atomic, so caller must disable interrupt if
+ * interrupt routines are accessing adapter shared memory.
+ */
+
+EXPORT_SYMBOL(sdla_poke);
+
+int sdla_poke (sdlahw_t* hw, unsigned long addr, void* buf, unsigned len)
+{
+
+ if (addr + len > hw->memory) /* verify arguments */
+ return -EINVAL;
+
+ if(hw->type == SDLA_S514) { /* copy data for the S514 adapter */
+ poke_by_4 ((unsigned long)hw->dpmbase + addr, buf, len);
+ return 0;
+ }
+
+ else { /* copy data for the S508 adapter */
+ unsigned long oldvec = hw->vector;
+ unsigned winsize = hw->dpmsize;
+ unsigned curpos, curlen; /* current offset and block size */
+ unsigned long curvec; /* current DPM window vector */
+ int err = 0;
+
+ while (len && !err) {
+ curpos = addr % winsize; /* current window offset */
+ curvec = addr - curpos; /* current window vector */
+ curlen = (len > (winsize - curpos)) ?
+ (winsize - curpos) : len;
+ /* Relocate window and copy block of data */
+ sdla_mapmem(hw, curvec);
+ poke_by_4 ((unsigned long)hw->dpmbase + curpos, buf,
+ curlen);
+ addr += curlen;
+ buf = (char*)buf + curlen;
+ len -= curlen;
+ }
+
+ /* Restore DPM window position */
+ sdla_mapmem(hw, oldvec);
+ return err;
+ }
+}
+
+
+/*============================================================================
+ * Write from a data buffer to adapter's memory in 4-byte chunks.
+ * Note that we ensure that the SDLA memory address is on a 4-byte boundary
+ * before we begin moving the data in 4-byte chunks.
+*/
+
+static void poke_by_4 (unsigned long dest, void* buf, unsigned len)
+{
+
+ /* byte copy data until we get to a 4-byte boundary */
+ while (len && (dest & 0x03)) {
+ writeb (*(char *)buf ++, dest ++);
+ len --;
+ }
+
+ /* copy data in 4-byte chunks */
+ while (len >= 4) {
+ writel (*(unsigned long *)buf, dest);
+ dest += 4;
+ buf += 4;
+ len -= 4;
+ }
+
+ /* byte copy any remaining data */
+ while (len) {
+ writeb (*(char *)buf ++ , dest ++);
+ len --;
+ }
+}
+
+
+#ifdef DONT_COMPIPLE_THIS
+#endif /* DONT_COMPIPLE_THIS */
+
+/****** Hardware-Specific Functions *****************************************/
+
+/*============================================================================
+ * Detect adapter type.
+ * o if adapter type is specified then call detection routine for that adapter
+ * type. Otherwise call detection routines for every adapter types until
+ * adapter is detected.
+ *
+ * Notes:
+ * 1) Detection tests are destructive! Adapter will be left in shutdown state
+ * after the test.
+ */
+static int sdla_detect (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int err = 0;
+
+ if (!port && (hw->type != SDLA_S514))
+ return -EFAULT;
+
+ switch (hw->type) {
+ case SDLA_S502A:
+ if (!detect_s502a(port)) err = -ENODEV;
+ break;
+
+ case SDLA_S502E:
+ if (!detect_s502e(port)) err = -ENODEV;
+ break;
+
+ case SDLA_S503:
+ if (!detect_s503(port)) err = -ENODEV;
+ break;
+
+ case SDLA_S507:
+ if (!detect_s507(port)) err = -ENODEV;
+ break;
+
+ case SDLA_S508:
+ if (!detect_s508(port)) err = -ENODEV;
+ break;
+
+ case SDLA_S514:
+ if (!detect_s514(hw)) err = -ENODEV;
+ break;
+
+ default:
+ if (detect_s502a(port))
+ hw->type = SDLA_S502A;
+ else if (detect_s502e(port))
+ hw->type = SDLA_S502E;
+ else if (detect_s503(port))
+ hw->type = SDLA_S503;
+ else if (detect_s507(port))
+ hw->type = SDLA_S507;
+ else if (detect_s508(port))
+ hw->type = SDLA_S508;
+ else err = -ENODEV;
+ }
+ return err;
+}
+
+/*============================================================================
+ * Autoselect memory region.
+ * o try all available DMP address options from the top down until success.
+ */
+static int sdla_autodpm (sdlahw_t* hw)
+{
+ int i, err = -EINVAL;
+ unsigned* opt;
+
+ switch (hw->type) {
+ case SDLA_S502A:
+ opt = s502a_dpmbase_options;
+ break;
+
+ case SDLA_S502E:
+ case SDLA_S503:
+ case SDLA_S508:
+ opt = s508_dpmbase_options;
+ break;
+
+ case SDLA_S507:
+ opt = s507_dpmbase_options;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Start testing from 8th position, address
+ * 0xC8000 from the 508 address table.
+ * We don't want to test A**** addresses, since
+ * they are usually used for Video */
+ for (i = 8; i <= opt[0] && err; i++) {
+ hw->dpmbase = phys_to_virt(opt[i]);
+ err = sdla_setdpm(hw);
+ }
+ return err;
+}
+
+/*============================================================================
+ * Set up adapter dual-port memory window.
+ * o shut down adapter
+ * o make sure that no physical memory exists in this region, i.e entire
+ * region reads 0xFF and is not writable when adapter is shut down.
+ * o initialize adapter hardware
+ * o make sure that region is usable with SDLA card, i.e. we can write to it
+ * when adapter is configured.
+ */
+static int sdla_setdpm (sdlahw_t* hw)
+{
+ int err;
+
+ /* Shut down card and verify memory region */
+ sdla_down(hw);
+ if (check_memregion(hw->dpmbase, hw->dpmsize))
+ return -EINVAL;
+
+ /* Initialize adapter and test on-board memory segment by segment.
+ * If memory size appears to be less than shared memory window size,
+ * assume that memory region is unusable.
+ */
+ err = sdla_init(hw);
+ if (err) return err;
+
+ if (sdla_memtest(hw) < hw->dpmsize) { /* less than window size */
+ sdla_down(hw);
+ return -EIO;
+ }
+ sdla_mapmem(hw, 0L); /* set window vector at bottom */
+ return 0;
+}
+
+/*============================================================================
+ * Load adapter from the memory image of the SDLA firmware module.
+ * o verify firmware integrity and compatibility
+ * o start adapter up
+ */
+static int sdla_load (sdlahw_t* hw, sfm_t* sfm, unsigned len)
+{
+
+ int i;
+
+ /* Verify firmware signature */
+ if (strcmp(sfm->signature, SFM_SIGNATURE)) {
+ printk(KERN_INFO "%s: not SDLA firmware!\n",
+ modname);
+ return -EINVAL;
+ }
+
+ /* Verify firmware module format version */
+ if (sfm->version != SFM_VERSION) {
+ printk(KERN_INFO
+ "%s: firmware format %u rejected! Expecting %u.\n",
+ modname, sfm->version, SFM_VERSION);
+ return -EINVAL;
+ }
+
+ /* Verify firmware module length and checksum */
+ if ((len - offsetof(sfm_t, image) != sfm->info.codesize) ||
+ (checksum((void*)&sfm->info,
+ sizeof(sfm_info_t) + sfm->info.codesize) != sfm->checksum)) {
+ printk(KERN_INFO "%s: firmware corrupted!\n", modname);
+ return -EINVAL;
+ }
+
+ /* Announce */
+ printk(KERN_INFO "%s: loading %s (ID=%u)...\n", modname,
+ (sfm->descr[0] != '\0') ? sfm->descr : "unknown firmware",
+ sfm->info.codeid);
+
+ if(hw->type == SDLA_S514)
+ printk(KERN_INFO "%s: loading S514 adapter, CPU %c\n",
+ modname, hw->S514_cpu_no[0]);
+
+ /* Scan through the list of compatible adapters and make sure our
+ * adapter type is listed.
+ */
+ for (i = 0;
+ (i < SFM_MAX_SDLA) && (sfm->info.adapter[i] != hw->type);
+ ++i);
+
+ if (i == SFM_MAX_SDLA) {
+ printk(KERN_INFO "%s: firmware is not compatible with S%u!\n",
+ modname, hw->type);
+ return -EINVAL;
+ }
+
+
+ /* Make sure there is enough on-board memory */
+ if (hw->memory < sfm->info.memsize) {
+ printk(KERN_INFO
+ "%s: firmware needs %lu bytes of on-board memory!\n",
+ modname, sfm->info.memsize);
+ return -EINVAL;
+ }
+
+ /* Move code onto adapter */
+ if (sdla_poke(hw, sfm->info.codeoffs, sfm->image, sfm->info.codesize)) {
+ printk(KERN_INFO "%s: failed to load code segment!\n",
+ modname);
+ return -EIO;
+ }
+
+ /* Prepare boot-time configuration data and kick-off CPU */
+ sdla_bootcfg(hw, &sfm->info);
+ if (sdla_start(hw, sfm->info.startoffs)) {
+ printk(KERN_INFO "%s: Damn... Adapter won't start!\n",
+ modname);
+ return -EIO;
+ }
+
+ /* position DPM window over the mailbox and enable interrupts */
+ if (sdla_mapmem(hw, sfm->info.winoffs) || sdla_inten(hw)) {
+ printk(KERN_INFO "%s: adapter hardware failure!\n",
+ modname);
+ return -EIO;
+ }
+ hw->fwid = sfm->info.codeid; /* set firmware ID */
+ return 0;
+}
+
+/*============================================================================
+ * Initialize SDLA hardware: setup memory window, IRQ, etc.
+ */
+static int sdla_init (sdlahw_t* hw)
+{
+ int i;
+
+ for (i = 0; i < SDLA_MAXIORANGE; ++i)
+ hw->regs[i] = 0;
+
+ switch (hw->type) {
+ case SDLA_S502A: return init_s502a(hw);
+ case SDLA_S502E: return init_s502e(hw);
+ case SDLA_S503: return init_s503(hw);
+ case SDLA_S507: return init_s507(hw);
+ case SDLA_S508: return init_s508(hw);
+ }
+ return -EINVAL;
+}
+
+/*============================================================================
+ * Test adapter on-board memory.
+ * o slide DPM window from the bottom up and test adapter memory segment by
+ * segment.
+ * Return adapter memory size.
+ */
+static unsigned long sdla_memtest (sdlahw_t* hw)
+{
+ unsigned long memsize;
+ unsigned winsize;
+
+ for (memsize = 0, winsize = hw->dpmsize;
+ !sdla_mapmem(hw, memsize) &&
+ (test_memregion(hw->dpmbase, winsize) == winsize)
+ ;
+ memsize += winsize)
+ ;
+ hw->memory = memsize;
+ return memsize;
+}
+
+/*============================================================================
+ * Prepare boot-time firmware configuration data.
+ * o position DPM window
+ * o initialize configuration data area
+ */
+static int sdla_bootcfg (sdlahw_t* hw, sfm_info_t* sfminfo)
+{
+ unsigned char* data;
+
+ if (!sfminfo->datasize) return 0; /* nothing to do */
+
+ if (sdla_mapmem(hw, sfminfo->dataoffs) != 0)
+ return -EIO;
+
+ if(hw->type == SDLA_S514)
+ data = (void*)(hw->dpmbase + sfminfo->dataoffs);
+ else
+ data = (void*)((u8 *)hw->dpmbase +
+ (sfminfo->dataoffs - hw->vector));
+
+ memset_io (data, 0, sfminfo->datasize);
+
+ writeb (make_config_byte(hw), &data[0x00]);
+
+ switch (sfminfo->codeid) {
+ case SFID_X25_502:
+ case SFID_X25_508:
+ writeb (3, &data[0x01]); /* T1 timer */
+ writeb (10, &data[0x03]); /* N2 */
+ writeb (7, &data[0x06]); /* HDLC window size */
+ writeb (1, &data[0x0B]); /* DTE */
+ writeb (2, &data[0x0C]); /* X.25 packet window size */
+ writew (128, &data[0x0D]); /* default X.25 data size */
+ writew (128, &data[0x0F]); /* maximum X.25 data size */
+ break;
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Prepare configuration byte identifying adapter type and CPU clock rate.
+ */
+static unsigned char make_config_byte (sdlahw_t* hw)
+{
+ unsigned char byte = 0;
+
+ switch (hw->pclk) {
+ case 5000: byte = 0x01; break;
+ case 7200: byte = 0x02; break;
+ case 8000: byte = 0x03; break;
+ case 10000: byte = 0x04; break;
+ case 16000: byte = 0x05; break;
+ }
+
+ switch (hw->type) {
+ case SDLA_S502E: byte |= 0x80; break;
+ case SDLA_S503: byte |= 0x40; break;
+ }
+ return byte;
+}
+
+/*============================================================================
+ * Start adapter's CPU.
+ * o calculate a pointer to adapter's cold boot entry point
+ * o position DPM window
+ * o place boot instruction (jp addr) at cold boot entry point
+ * o start CPU
+ */
+static int sdla_start (sdlahw_t* hw, unsigned addr)
+{
+ unsigned port = hw->port;
+ unsigned char *bootp;
+ int err, tmp, i;
+
+ if (!port && (hw->type != SDLA_S514)) return -EFAULT;
+
+ switch (hw->type) {
+ case SDLA_S502A:
+ bootp = hw->dpmbase;
+ bootp += 0x66;
+ break;
+
+ case SDLA_S502E:
+ case SDLA_S503:
+ case SDLA_S507:
+ case SDLA_S508:
+ case SDLA_S514:
+ bootp = hw->dpmbase;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ err = sdla_mapmem(hw, 0);
+ if (err) return err;
+
+ writeb (0xC3, bootp); /* Z80: 'jp' opcode */
+ bootp ++;
+ writew (addr, bootp);
+
+ switch (hw->type) {
+ case SDLA_S502A:
+ _OUTB(port, 0x10); /* issue NMI to CPU */
+ hw->regs[0] = 0x10;
+ break;
+
+ case SDLA_S502E:
+ _OUTB(port + 3, 0x01); /* start CPU */
+ hw->regs[3] = 0x01;
+ for (i = 0; i < SDLA_IODELAY; ++i);
+ if (_INB(port) & 0x01) { /* verify */
+ /*
+ * Enabling CPU changes functionality of the
+ * control register, so we have to reset its
+ * mirror.
+ */
+ _OUTB(port, 0); /* disable interrupts */
+ hw->regs[0] = 0;
+ }
+ else return -EIO;
+ break;
+
+ case SDLA_S503:
+ tmp = hw->regs[0] | 0x09; /* set bits 0 and 3 */
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i);
+ if (!(_INB(port) & 0x01)) /* verify */
+ return -EIO;
+ break;
+
+ case SDLA_S507:
+ tmp = hw->regs[0] | 0x02;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i);
+ if (!(_INB(port) & 0x04)) /* verify */
+ return -EIO;
+ break;
+
+ case SDLA_S508:
+ tmp = hw->regs[0] | 0x02;
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i);
+ if (!(_INB(port + 1) & 0x02)) /* verify */
+ return -EIO;
+ break;
+
+ case SDLA_S514:
+ writeb (S514_CPU_START, hw->vector);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Initialize S502A adapter.
+ */
+static int init_s502a (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp, i;
+
+ if (!detect_s502a(port))
+ return -ENODEV;
+
+ hw->regs[0] = 0x08;
+ hw->regs[1] = 0xFF;
+
+ /* Verify configuration options */
+ i = get_option_index(s502a_dpmbase_options, virt_to_phys(hw->dpmbase));
+ if (i == 0)
+ return -EINVAL;
+
+ tmp = s502a_hmcr[i - 1];
+ switch (hw->dpmsize) {
+ case 0x2000:
+ tmp |= 0x01;
+ break;
+
+ case 0x10000L:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Setup dual-port memory window (this also enables memory access) */
+ _OUTB(port + 1, tmp);
+ hw->regs[1] = tmp;
+ hw->regs[0] = 0x08;
+ return 0;
+}
+
+/*============================================================================
+ * Initialize S502E adapter.
+ */
+static int init_s502e (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp, i;
+
+ if (!detect_s502e(port))
+ return -ENODEV;
+
+ /* Verify configuration options */
+ i = get_option_index(s508_dpmbase_options, virt_to_phys(hw->dpmbase));
+ if (i == 0)
+ return -EINVAL;
+
+ tmp = s502e_hmcr[i - 1];
+ switch (hw->dpmsize) {
+ case 0x2000:
+ tmp |= 0x01;
+ break;
+
+ case 0x10000L:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Setup dual-port memory window */
+ _OUTB(port + 1, tmp);
+ hw->regs[1] = tmp;
+
+ /* Enable memory access */
+ _OUTB(port, 0x02);
+ hw->regs[0] = 0x02;
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ return (_INB(port) & 0x02) ? 0 : -EIO;
+}
+
+/*============================================================================
+ * Initialize S503 adapter.
+ * ---------------------------------------------------------------------------
+ */
+static int init_s503 (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp, i;
+
+ if (!detect_s503(port))
+ return -ENODEV;
+
+ /* Verify configuration options */
+ i = get_option_index(s508_dpmbase_options, virt_to_phys(hw->dpmbase));
+ if (i == 0)
+ return -EINVAL;
+
+ tmp = s502e_hmcr[i - 1];
+ switch (hw->dpmsize) {
+ case 0x2000:
+ tmp |= 0x01;
+ break;
+
+ case 0x10000L:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Setup dual-port memory window */
+ _OUTB(port + 1, tmp);
+ hw->regs[1] = tmp;
+
+ /* Enable memory access */
+ _OUTB(port, 0x02);
+ hw->regs[0] = 0x02; /* update mirror */
+ return 0;
+}
+
+/*============================================================================
+ * Initialize S507 adapter.
+ */
+static int init_s507 (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp, i;
+
+ if (!detect_s507(port))
+ return -ENODEV;
+
+ /* Verify configuration options */
+ i = get_option_index(s507_dpmbase_options, virt_to_phys(hw->dpmbase));
+ if (i == 0)
+ return -EINVAL;
+
+ tmp = s507_hmcr[i - 1];
+ switch (hw->dpmsize) {
+ case 0x2000:
+ tmp |= 0x01;
+ break;
+
+ case 0x10000L:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Enable adapter's logic */
+ _OUTB(port, 0x01);
+ hw->regs[0] = 0x01;
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (!(_INB(port) & 0x20))
+ return -EIO;
+
+ /* Setup dual-port memory window */
+ _OUTB(port + 1, tmp);
+ hw->regs[1] = tmp;
+
+ /* Enable memory access */
+ tmp = hw->regs[0] | 0x04;
+ if (hw->irq) {
+ i = get_option_index(s508_irq_options, hw->irq);
+ if (i) tmp |= s507_irqmask[i - 1];
+ }
+ _OUTB(port, tmp);
+ hw->regs[0] = tmp; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ return (_INB(port) & 0x08) ? 0 : -EIO;
+}
+
+/*============================================================================
+ * Initialize S508 adapter.
+ */
+static int init_s508 (sdlahw_t* hw)
+{
+ unsigned port = hw->port;
+ int tmp, i;
+
+ if (!detect_s508(port))
+ return -ENODEV;
+
+ /* Verify configuration options */
+ i = get_option_index(s508_dpmbase_options, virt_to_phys(hw->dpmbase));
+ if (i == 0)
+ return -EINVAL;
+
+ /* Setup memory configuration */
+ tmp = s508_hmcr[i - 1];
+ _OUTB(port + 1, tmp);
+ hw->regs[1] = tmp;
+
+ /* Enable memory access */
+ _OUTB(port, 0x04);
+ hw->regs[0] = 0x04; /* update mirror */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ return (_INB(port + 1) & 0x04) ? 0 : -EIO;
+}
+
+/*============================================================================
+ * Detect S502A adapter.
+ * Following tests are used to detect S502A adapter:
+ * 1. All registers other than status (BASE) should read 0xFF
+ * 2. After writing 00001000b to control register, status register should
+ * read 01000000b.
+ * 3. After writing 0 to control register, status register should still
+ * read 01000000b.
+ * 4. After writing 00000100b to control register, status register should
+ * read 01000100b.
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test.
+ */
+static int detect_s502a (int port)
+{
+ int i, j;
+
+ if (!get_option_index(s502_port_options, port))
+ return 0;
+
+ for (j = 1; j < SDLA_MAXIORANGE; ++j) {
+ if (_INB(port + j) != 0xFF)
+ return 0;
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ }
+
+ _OUTB(port, 0x08); /* halt CPU */
+ _OUTB(port, 0x08);
+ _OUTB(port, 0x08);
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) != 0x40)
+ return 0;
+ _OUTB(port, 0x00);
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) != 0x40)
+ return 0;
+ _OUTB(port, 0x04);
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) != 0x44)
+ return 0;
+
+ /* Reset adapter */
+ _OUTB(port, 0x08);
+ _OUTB(port, 0x08);
+ _OUTB(port, 0x08);
+ _OUTB(port + 1, 0xFF);
+ return 1;
+}
+
+/*============================================================================
+ * Detect S502E adapter.
+ * Following tests are used to verify adapter presence:
+ * 1. All registers other than status (BASE) should read 0xFF.
+ * 2. After writing 0 to CPU control register (BASE+3), status register
+ * (BASE) should read 11111000b.
+ * 3. After writing 00000100b to port BASE (set bit 2), status register
+ * (BASE) should read 11111100b.
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test.
+ */
+static int detect_s502e (int port)
+{
+ int i, j;
+
+ if (!get_option_index(s502_port_options, port))
+ return 0;
+ for (j = 1; j < SDLA_MAXIORANGE; ++j) {
+ if (_INB(port + j) != 0xFF)
+ return 0;
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ }
+
+ _OUTB(port + 3, 0); /* CPU control reg. */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) != 0xF8) /* read status */
+ return 0;
+ _OUTB(port, 0x04); /* set bit 2 */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) != 0xFC) /* verify */
+ return 0;
+
+ /* Reset adapter */
+ _OUTB(port, 0);
+ return 1;
+}
+
+/*============================================================================
+ * Detect s503 adapter.
+ * Following tests are used to verify adapter presence:
+ * 1. All registers other than status (BASE) should read 0xFF.
+ * 2. After writing 0 to control register (BASE), status register (BASE)
+ * should read 11110000b.
+ * 3. After writing 00000100b (set bit 2) to control register (BASE),
+ * status register should read 11110010b.
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test.
+ */
+static int detect_s503 (int port)
+{
+ int i, j;
+
+ if (!get_option_index(s503_port_options, port))
+ return 0;
+ for (j = 1; j < SDLA_MAXIORANGE; ++j) {
+ if (_INB(port + j) != 0xFF)
+ return 0;
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ }
+
+ _OUTB(port, 0); /* reset control reg.*/
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) != 0xF0) /* read status */
+ return 0;
+ _OUTB(port, 0x04); /* set bit 2 */
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if (_INB(port) != 0xF2) /* verify */
+ return 0;
+
+ /* Reset adapter */
+ _OUTB(port, 0);
+ return 1;
+}
+
+/*============================================================================
+ * Detect s507 adapter.
+ * Following tests are used to detect s507 adapter:
+ * 1. All ports should read the same value.
+ * 2. After writing 0x00 to control register, status register should read
+ * ?011000?b.
+ * 3. After writing 0x01 to control register, status register should read
+ * ?011001?b.
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test.
+ */
+static int detect_s507 (int port)
+{
+ int tmp, i, j;
+
+ if (!get_option_index(s508_port_options, port))
+ return 0;
+ tmp = _INB(port);
+ for (j = 1; j < S507_IORANGE; ++j) {
+ if (_INB(port + j) != tmp)
+ return 0;
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ }
+
+ _OUTB(port, 0x00);
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if ((_INB(port) & 0x7E) != 0x30)
+ return 0;
+ _OUTB(port, 0x01);
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if ((_INB(port) & 0x7E) != 0x32)
+ return 0;
+
+ /* Reset adapter */
+ _OUTB(port, 0x00);
+ return 1;
+}
+
+/*============================================================================
+ * Detect s508 adapter.
+ * Following tests are used to detect s508 adapter:
+ * 1. After writing 0x00 to control register, status register should read
+ * ??000000b.
+ * 2. After writing 0x10 to control register, status register should read
+ * ??010000b
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test.
+ */
+static int detect_s508 (int port)
+{
+ int i;
+
+ if (!get_option_index(s508_port_options, port))
+ return 0;
+ _OUTB(port, 0x00);
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if ((_INB(port + 1) & 0x3F) != 0x00)
+ return 0;
+ _OUTB(port, 0x10);
+ for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
+ if ((_INB(port + 1) & 0x3F) != 0x10)
+ return 0;
+
+ /* Reset adapter */
+ _OUTB(port, 0x00);
+ return 1;
+}
+
+/*============================================================================
+ * Detect s514 PCI adapter.
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test.
+ */
+static int detect_s514 (sdlahw_t* hw)
+{
+ unsigned char CPU_no, slot_no, auto_slot_cfg;
+ int number_S514_cards = 0;
+ u32 S514_mem_base_addr = 0;
+ u32 ut_u32;
+ struct pci_dev *pci_dev;
+
+
+#ifndef CONFIG_PCI
+ printk(KERN_INFO "%s: Linux not compiled for PCI usage!\n", modname);
+ return 0;
+#endif
+
+ /*
+ The 'setup()' procedure in 'sdlamain.c' passes the CPU number and the
+ slot number defined in 'router.conf' via the 'port' definition.
+ */
+ CPU_no = hw->S514_cpu_no[0];
+ slot_no = hw->S514_slot_no;
+ auto_slot_cfg = hw->auto_pci_cfg;
+
+ if (auto_slot_cfg){
+ printk(KERN_INFO "%s: srch... S514 card, CPU %c, Slot=Auto\n",
+ modname, CPU_no);
+
+ }else{
+ printk(KERN_INFO "%s: srch... S514 card, CPU %c, Slot #%d\n",
+ modname, CPU_no, slot_no);
+ }
+
+ /* check to see that CPU A or B has been selected in 'router.conf' */
+ switch(CPU_no) {
+ case S514_CPU_A:
+ case S514_CPU_B:
+ break;
+
+ default:
+ printk(KERN_INFO "%s: S514 CPU definition invalid.\n",
+ modname);
+ printk(KERN_INFO "Must be 'A' or 'B'\n");
+ return 0;
+ }
+
+ number_S514_cards = find_s514_adapter(hw, 0);
+ if(!number_S514_cards)
+ return 0;
+
+ /* we are using a single S514 adapter with a slot of 0 so re-read the */
+ /* location of this adapter */
+ if((number_S514_cards == 1) && auto_slot_cfg) {
+ number_S514_cards = find_s514_adapter(hw, 1);
+ if(!number_S514_cards) {
+ printk(KERN_INFO "%s: Error finding PCI card\n",
+ modname);
+ return 0;
+ }
+ }
+
+ pci_dev = hw->pci_dev;
+ /* read the physical memory base address */
+ S514_mem_base_addr = (CPU_no == S514_CPU_A) ?
+ (pci_dev->resource[1].start) :
+ (pci_dev->resource[2].start);
+
+ printk(KERN_INFO "%s: S514 PCI memory at 0x%X\n",
+ modname, S514_mem_base_addr);
+ if(!S514_mem_base_addr) {
+ if(CPU_no == S514_CPU_B)
+ printk(KERN_INFO "%s: CPU #B not present on the card\n", modname);
+ else
+ printk(KERN_INFO "%s: No PCI memory allocated to card\n", modname);
+ return 0;
+ }
+
+ /* enable the PCI memory */
+ pci_read_config_dword(pci_dev,
+ (CPU_no == S514_CPU_A) ? PCI_MAP0_DWORD : PCI_MAP1_DWORD,
+ &ut_u32);
+ pci_write_config_dword(pci_dev,
+ (CPU_no == S514_CPU_A) ? PCI_MAP0_DWORD : PCI_MAP1_DWORD,
+ (ut_u32 | PCI_MEMORY_ENABLE));
+
+ /* check the IRQ allocated and enable IRQ usage */
+ if(!(hw->irq = pci_dev->irq)) {
+ printk(KERN_INFO "%s: IRQ not allocated to S514 adapter\n",
+ modname);
+ return 0;
+ }
+
+ /* BUG FIX : Mar 6 2000
+ * On a initial loading of the card, we must check
+ * and clear PCI interrupt bits, due to a reset
+ * problem on some other boards. i.e. An interrupt
+ * might be pending, even after system bootup,
+ * in which case, when starting wanrouter the machine
+ * would crash.
+ */
+ if (init_pci_slot(hw))
+ return 0;
+
+ pci_read_config_dword(pci_dev, PCI_INT_CONFIG, &ut_u32);
+ ut_u32 |= (CPU_no == S514_CPU_A) ?
+ PCI_ENABLE_IRQ_CPU_A : PCI_ENABLE_IRQ_CPU_B;
+ pci_write_config_dword(pci_dev, PCI_INT_CONFIG, ut_u32);
+
+ printk(KERN_INFO "%s: IRQ %d allocated to the S514 card\n",
+ modname, hw->irq);
+
+ /* map the physical PCI memory to virtual memory */
+ (void *)hw->dpmbase = ioremap((unsigned long)S514_mem_base_addr,
+ (unsigned long)MAX_SIZEOF_S514_MEMORY);
+ /* map the physical control register memory to virtual memory */
+ hw->vector = (unsigned long)ioremap(
+ (unsigned long)(S514_mem_base_addr + S514_CTRL_REG_BYTE),
+ (unsigned long)16);
+
+ if(!hw->dpmbase || !hw->vector) {
+ printk(KERN_INFO "%s: PCI virtual memory allocation failed\n",
+ modname);
+ return 0;
+ }
+
+ /* halt the adapter */
+ writeb (S514_CPU_HALT, hw->vector);
+
+ return 1;
+}
+
+/*============================================================================
+ * Find the S514 PCI adapter in the PCI bus.
+ * Return the number of S514 adapters found (0 if no adapter found).
+ */
+static int find_s514_adapter(sdlahw_t* hw, char find_first_S514_card)
+{
+ unsigned char slot_no;
+ int number_S514_cards = 0;
+ char S514_found_in_slot = 0;
+ u16 PCI_subsys_vendor;
+
+ struct pci_dev *pci_dev = NULL;
+
+ slot_no = hw->S514_slot_no;
+
+ while ((pci_dev = pci_find_device(V3_VENDOR_ID, V3_DEVICE_ID, pci_dev))
+ != NULL) {
+
+ pci_read_config_word(pci_dev, PCI_SUBSYS_VENDOR_WORD,
+ &PCI_subsys_vendor);
+
+ if(PCI_subsys_vendor != SANGOMA_SUBSYS_VENDOR)
+ continue;
+
+ hw->pci_dev = pci_dev;
+
+ if(find_first_S514_card)
+ return(1);
+
+ number_S514_cards ++;
+
+ printk(KERN_INFO
+ "%s: S514 card found, slot #%d (devfn 0x%X)\n",
+ modname, ((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK),
+ pci_dev->devfn);
+
+ if (hw->auto_pci_cfg){
+ hw->S514_slot_no = ((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK);
+ slot_no = hw->S514_slot_no;
+
+ }else if (((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK) == slot_no){
+ S514_found_in_slot = 1;
+ break;
+ }
+ }
+
+ /* if no S514 adapter has been found, then exit */
+ if (!number_S514_cards) {
+ printk(KERN_INFO "%s: Error, no S514 adapters found\n", modname);
+ return 0;
+ }
+ /* if more than one S514 card has been found, then the user must have */ /* defined a slot number so that the correct adapter is used */
+ else if ((number_S514_cards > 1) && hw->auto_pci_cfg) {
+ printk(KERN_INFO "%s: Error, PCI Slot autodetect Failed! \n"
+ "%s: More than one S514 adapter found.\n"
+ "%s: Disable the Autodetect feature and supply\n"
+ "%s: the PCISLOT numbers for each card.\n",
+ modname,modname,modname,modname);
+ return 0;
+ }
+ /* if the user has specified a slot number and the S514 adapter has */
+ /* not been found in that slot, then exit */
+ else if (!hw->auto_pci_cfg && !S514_found_in_slot) {
+ printk(KERN_INFO
+ "%s: Error, S514 card not found in specified slot #%d\n",
+ modname, slot_no);
+ return 0;
+ }
+
+ return (number_S514_cards);
+}
+
+
+
+/******* Miscellaneous ******************************************************/
+
+/*============================================================================
+ * Calibrate SDLA memory access delay.
+ * Count number of idle loops made within 1 second and then calculate the
+ * number of loops that should be made to achive desired delay.
+ */
+static int calibrate_delay (int mks)
+{
+ unsigned int delay;
+ unsigned long stop;
+
+ for (delay = 0, stop = SYSTEM_TICK + HZ; SYSTEM_TICK < stop; ++delay);
+ return (delay/(1000000L/mks) + 1);
+}
+
+/*============================================================================
+ * Get option's index into the options list.
+ * Return option's index (1 .. N) or zero if option is invalid.
+ */
+static int get_option_index (unsigned* optlist, unsigned optval)
+{
+ int i;
+
+ for (i = 1; i <= optlist[0]; ++i)
+ if ( optlist[i] == optval)
+ return i;
+ return 0;
+}
+
+/*============================================================================
+ * Check memory region to see if it's available.
+ * Return: 0 ok.
+ */
+static unsigned check_memregion (void* ptr, unsigned len)
+{
+ volatile unsigned char* p = ptr;
+
+ for (; len && (readb (p) == 0xFF); --len, ++p) {
+ writeb (0, p); /* attempt to write 0 */
+ if (readb(p) != 0xFF) { /* still has to read 0xFF */
+ writeb (0xFF, p);/* restore original value */
+ break; /* not good */
+ }
+ }
+
+ return len;
+}
+
+/*============================================================================
+ * Test memory region.
+ * Return: size of the region that passed the test.
+ * Note: Region size must be multiple of 2 !
+ */
+static unsigned test_memregion (void* ptr, unsigned len)
+{
+ volatile unsigned short* w_ptr;
+ unsigned len_w = len >> 1; /* region len in words */
+ unsigned i;
+
+ for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
+ writew (0xAA55, w_ptr);
+
+ for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
+ if (readw (w_ptr) != 0xAA55) {
+ len_w = i;
+ break;
+ }
+
+ for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
+ writew (0x55AA, w_ptr);
+
+ for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
+ if (readw(w_ptr) != 0x55AA) {
+ len_w = i;
+ break;
+ }
+
+ for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
+ writew (0, w_ptr);
+
+ return len_w << 1;
+}
+
+/*============================================================================
+ * Calculate 16-bit CRC using CCITT polynomial.
+ */
+static unsigned short checksum (unsigned char* buf, unsigned len)
+{
+ unsigned short crc = 0;
+ unsigned mask, flag;
+
+ for (; len; --len, ++buf) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ flag = (crc & 0x8000);
+ crc <<= 1;
+ crc |= ((*buf & mask) ? 1 : 0);
+ if (flag) crc ^= 0x1021;
+ }
+ }
+ return crc;
+}
+
+static int init_pci_slot(sdlahw_t *hw)
+{
+
+ u32 int_status;
+ int volatile found=0;
+ int i=0;
+
+ /* Check if this is a very first load for a specific
+ * pci card. If it is, clear the interrput bits, and
+ * set the flag indicating that this card was initialized.
+ */
+
+ for (i=0; (i<MAX_S514_CARDS) && !found; i++){
+ if (pci_slot_ar[i] == hw->S514_slot_no){
+ found=1;
+ break;
+ }
+ if (pci_slot_ar[i] == 0xFF){
+ break;
+ }
+ }
+
+ if (!found){
+ read_S514_int_stat(hw,&int_status);
+ S514_intack(hw,int_status);
+ if (i == MAX_S514_CARDS){
+ printk(KERN_INFO "%s: Critical Error !!!\n",modname);
+ printk(KERN_INFO
+ "%s: Number of Sangoma PCI cards exceeded maximum limit.\n",
+ modname);
+ printk(KERN_INFO "Please contact Sangoma Technologies\n");
+ return 1;
+ }
+ pci_slot_ar[i] = hw->S514_slot_no;
+ }
+ return 0;
+}
+
+static int pci_probe(sdlahw_t *hw)
+{
+
+ unsigned char slot_no;
+ int number_S514_cards = 0;
+ u16 PCI_subsys_vendor;
+ u16 PCI_card_type;
+
+ struct pci_dev *pci_dev = NULL;
+ struct pci_bus *bus = NULL;
+
+ slot_no = 0;
+
+ while ((pci_dev = pci_find_device(V3_VENDOR_ID, V3_DEVICE_ID, pci_dev))
+ != NULL) {
+
+ pci_read_config_word(pci_dev, PCI_SUBSYS_VENDOR_WORD,
+ &PCI_subsys_vendor);
+
+ if(PCI_subsys_vendor != SANGOMA_SUBSYS_VENDOR)
+ continue;
+
+ pci_read_config_word(pci_dev, PCI_CARD_TYPE,
+ &PCI_card_type);
+
+ bus = pci_dev->bus;
+
+ /* A dual cpu card can support up to 4 physical connections,
+ * where a single cpu card can support up to 2 physical
+ * connections. The FT1 card can only support a single
+ * connection, however we cannot distinguish between a Single
+ * CPU card and an FT1 card. */
+ if (PCI_card_type == S514_DUAL_CPU){
+ number_S514_cards += 4;
+ printk(KERN_INFO
+ "wanpipe: S514-PCI card found, cpu(s) 2, bus #%d, slot #%d, irq #%d\n",
+ bus->number,((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK),
+ pci_dev->irq);
+ }else{
+ number_S514_cards += 2;
+ printk(KERN_INFO
+ "wanpipe: S514-PCI card found, cpu(s) 1, bus #%d, slot #%d, irq #%d\n",
+ bus->number,((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK),
+ pci_dev->irq);
+ }
+ }
+
+ return number_S514_cards;
+
+}
+
+
+
+EXPORT_SYMBOL(wanpipe_hw_probe);
+
+unsigned wanpipe_hw_probe(void)
+{
+ sdlahw_t hw;
+ unsigned* opt = s508_port_options;
+ unsigned cardno=0;
+ int i;
+
+ memset(&hw, 0, sizeof(hw));
+
+ for (i = 1; i <= opt[0]; i++) {
+ if (detect_s508(opt[i])){
+ /* S508 card can support up to two physical links */
+ cardno+=2;
+ printk(KERN_INFO "wanpipe: S508-ISA card found, port 0x%x\n",opt[i]);
+ }
+ }
+
+ #ifdef CONFIG_PCI
+ hw.S514_slot_no = 0;
+ cardno += pci_probe(&hw);
+ #else
+ printk(KERN_INFO "wanpipe: Warning, Kernel not compiled for PCI support!\n");
+ printk(KERN_INFO "wanpipe: PCI Hardware Probe Failed!\n");
+ #endif
+
+ return cardno;
+}
+
+/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c
new file mode 100644
index 000000000000..74e151acef3e
--- /dev/null
+++ b/drivers/net/wan/sdlamain.c
@@ -0,0 +1,1341 @@
+/****************************************************************************
+* sdlamain.c WANPIPE(tm) Multiprotocol WAN Link Driver. Main module.
+*
+* Author: Nenad Corbic <ncorbic@sangoma.com>
+* Gideon Hack
+*
+* Copyright: (c) 1995-2000 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Dec 22, 2000 Nenad Corbic Updated for 2.4.X kernels.
+* Removed the polling routine.
+* Nov 13, 2000 Nenad Corbic Added hw probing on module load and dynamic
+* device allocation.
+* Nov 7, 2000 Nenad Corbic Fixed the Multi-Port PPP for kernels
+* 2.2.16 and above.
+* Aug 2, 2000 Nenad Corbic Block the Multi-Port PPP from running on
+* kernels 2.2.16 or greater. The SyncPPP
+* has changed.
+* Jul 25, 2000 Nenad Corbic Updated the Piggiback support for MultPPPP.
+* Jul 13, 2000 Nenad Corbic Added Multi-PPP support.
+* Feb 02, 2000 Nenad Corbic Fixed up piggyback probing and selection.
+* Sep 23, 1999 Nenad Corbic Added support for SMP
+* Sep 13, 1999 Nenad Corbic Each port is treated as a separate device.
+* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
+* Updates for Linux 2.2.X kernels.
+* Sep 17, 1998 Jaspreet Singh Updated for 2.1.121+ kernel
+* Nov 28, 1997 Jaspreet Singh Changed DRV_RELEASE to 1
+* Nov 10, 1997 Jaspreet Singh Changed sti() to restore_flags();
+* Nov 06, 1997 Jaspreet Singh Changed DRV_VERSION to 4 and DRV_RELEASE to 0
+* Oct 20, 1997 Jaspreet Singh Modified sdla_isr routine so that card->in_isr
+* assignments are taken out and placed in the
+* sdla_ppp.c, sdla_fr.c and sdla_x25.c isr
+* routines. Took out 'wandev->tx_int_enabled' and
+* replaced it with 'wandev->enable_tx_int'.
+* May 29, 1997 Jaspreet Singh Flow Control Problem
+* added "wandev->tx_int_enabled=1" line in the
+* init module. This line initializes the flag for
+* preventing Interrupt disabled with device set to
+* busy
+* Jan 15, 1997 Gene Kozin Version 3.1.0
+* o added UDP management stuff
+* Jan 02, 1997 Gene Kozin Initial version.
+*****************************************************************************/
+
+#include <linux/config.h> /* OS configuration options */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/init.h>
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/module.h> /* support for loadable modules */
+#include <linux/ioport.h> /* request_region(), release_region() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+
+#include <linux/in.h>
+#include <asm/io.h> /* phys_to_virt() */
+#include <linux/pci.h>
+#include <linux/sdlapci.h>
+#include <linux/if_wanpipe_common.h>
+
+#include <asm/uaccess.h> /* kernel <-> user copy */
+#include <linux/inetdevice.h>
+
+#include <linux/ip.h>
+#include <net/route.h>
+
+#define KMEM_SAFETYZONE 8
+
+
+#ifndef CONFIG_WANPIPE_FR
+ #define wpf_init(a,b) (-EPROTONOSUPPORT)
+#endif
+
+#ifndef CONFIG_WANPIPE_CHDLC
+ #define wpc_init(a,b) (-EPROTONOSUPPORT)
+#endif
+
+#ifndef CONFIG_WANPIPE_X25
+ #define wpx_init(a,b) (-EPROTONOSUPPORT)
+#endif
+
+#ifndef CONFIG_WANPIPE_PPP
+ #define wpp_init(a,b) (-EPROTONOSUPPORT)
+#endif
+
+#ifndef CONFIG_WANPIPE_MULTPPP
+ #define wsppp_init(a,b) (-EPROTONOSUPPORT)
+#endif
+
+
+/***********FOR DEBUGGING PURPOSES*********************************************
+static void * dbg_kmalloc(unsigned int size, int prio, int line) {
+ int i = 0;
+ void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio);
+ char * c1 = v;
+ c1 += sizeof(unsigned int);
+ *((unsigned int *)v) = size;
+
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D';
+ c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F';
+ c1 += 8;
+ }
+ c1 += size;
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G';
+ c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L';
+ c1 += 8;
+ }
+ v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8;
+ printk(KERN_INFO "line %d kmalloc(%d,%d) = %p\n",line,size,prio,v);
+ return v;
+}
+static void dbg_kfree(void * v, int line) {
+ unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8));
+ unsigned int size = *sp;
+ char * c1 = ((char *)v) - KMEM_SAFETYZONE*8;
+ int i = 0;
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ if ( c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D'
+ || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') {
+ printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v);
+ printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
+ c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
+ }
+ c1 += 8;
+ }
+ c1 += size;
+ for (i = 0; i < KMEM_SAFETYZONE; i++) {
+ if ( c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G'
+ || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L'
+ ) {
+ printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v);
+ printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
+ c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
+ }
+ c1 += 8;
+ }
+ printk(KERN_INFO "line %d kfree(%p)\n",line,v);
+ v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8);
+ kfree(v);
+}
+
+#define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__)
+#define kfree(x) dbg_kfree(x,__LINE__)
+******************************************************************************/
+
+
+
+/****** Defines & Macros ****************************************************/
+
+#ifdef _DEBUG_
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+#define DRV_VERSION 5 /* version number */
+#define DRV_RELEASE 0 /* release (minor version) number */
+#define MAX_CARDS 16 /* max number of adapters */
+
+#ifndef CONFIG_WANPIPE_CARDS /* configurable option */
+#define CONFIG_WANPIPE_CARDS 1
+#endif
+
+#define CMD_OK 0 /* normal firmware return code */
+#define CMD_TIMEOUT 0xFF /* firmware command timed out */
+#define MAX_CMD_RETRY 10 /* max number of firmware retries */
+/****** Function Prototypes *************************************************/
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+/* WAN link driver entry points */
+static int setup(struct wan_device* wandev, wandev_conf_t* conf);
+static int shutdown(struct wan_device* wandev);
+static int ioctl(struct wan_device* wandev, unsigned cmd, unsigned long arg);
+
+/* IOCTL handlers */
+static int ioctl_dump (sdla_t* card, sdla_dump_t* u_dump);
+static int ioctl_exec (sdla_t* card, sdla_exec_t* u_exec, int);
+
+/* Miscellaneous functions */
+STATIC irqreturn_t sdla_isr (int irq, void* dev_id, struct pt_regs *regs);
+static void release_hw (sdla_t *card);
+
+static int check_s508_conflicts (sdla_t* card,wandev_conf_t* conf, int*);
+static int check_s514_conflicts (sdla_t* card,wandev_conf_t* conf, int*);
+
+
+/****** Global Data **********************************************************
+ * Note: All data must be explicitly initialized!!!
+ */
+
+/* private data */
+static char drvname[] = "wanpipe";
+static char fullname[] = "WANPIPE(tm) Multiprotocol Driver";
+static char copyright[] = "(c) 1995-2000 Sangoma Technologies Inc.";
+static int ncards;
+static sdla_t* card_array; /* adapter data space */
+
+/* Wanpipe's own workqueue, used for all API's.
+ * All protocol specific tasks will be inserted
+ * into the "wanpipe_wq" workqueue.
+
+ * The kernel workqueue mechanism will execute
+ * all pending tasks in the "wanpipe_wq" workqueue.
+ */
+
+struct workqueue_struct *wanpipe_wq;
+DECLARE_WORK(wanpipe_work, NULL, NULL);
+
+static int wanpipe_bh_critical;
+
+/******* Kernel Loadable Module Entry Points ********************************/
+
+/*============================================================================
+ * Module 'insert' entry point.
+ * o print announcement
+ * o allocate adapter data space
+ * o initialize static data
+ * o register all cards with WAN router
+ * o calibrate SDLA shared memory access delay.
+ *
+ * Return: 0 Ok
+ * < 0 error.
+ * Context: process
+ */
+
+static int __init wanpipe_init(void)
+{
+ int cnt, err = 0;
+
+ printk(KERN_INFO "%s v%u.%u %s\n",
+ fullname, DRV_VERSION, DRV_RELEASE, copyright);
+
+ wanpipe_wq = create_workqueue("wanpipe_wq");
+ if (!wanpipe_wq)
+ return -ENOMEM;
+
+ /* Probe for wanpipe cards and return the number found */
+ printk(KERN_INFO "wanpipe: Probing for WANPIPE hardware.\n");
+ ncards = wanpipe_hw_probe();
+ if (ncards){
+ printk(KERN_INFO "wanpipe: Allocating maximum %i devices: wanpipe%i - wanpipe%i.\n",ncards,1,ncards);
+ }else{
+ printk(KERN_INFO "wanpipe: No S514/S508 cards found, unloading modules!\n");
+ destroy_workqueue(wanpipe_wq);
+ return -ENODEV;
+ }
+
+ /* Verify number of cards and allocate adapter data space */
+ card_array = kmalloc(sizeof(sdla_t) * ncards, GFP_KERNEL);
+ if (card_array == NULL) {
+ destroy_workqueue(wanpipe_wq);
+ return -ENOMEM;
+ }
+
+ memset(card_array, 0, sizeof(sdla_t) * ncards);
+
+ /* Register adapters with WAN router */
+ for (cnt = 0; cnt < ncards; ++ cnt) {
+ sdla_t* card = &card_array[cnt];
+ struct wan_device* wandev = &card->wandev;
+
+ card->next = NULL;
+ sprintf(card->devname, "%s%d", drvname, cnt + 1);
+ wandev->magic = ROUTER_MAGIC;
+ wandev->name = card->devname;
+ wandev->private = card;
+ wandev->enable_tx_int = 0;
+ wandev->setup = &setup;
+ wandev->shutdown = &shutdown;
+ wandev->ioctl = &ioctl;
+ err = register_wan_device(wandev);
+ if (err) {
+ printk(KERN_INFO
+ "%s: %s registration failed with error %d!\n",
+ drvname, card->devname, err);
+ break;
+ }
+ }
+ if (cnt){
+ ncards = cnt; /* adjust actual number of cards */
+ }else {
+ kfree(card_array);
+ destroy_workqueue(wanpipe_wq);
+ printk(KERN_INFO "IN Init Module: NO Cards registered\n");
+ err = -ENODEV;
+ }
+
+ return err;
+}
+
+/*============================================================================
+ * Module 'remove' entry point.
+ * o unregister all adapters from the WAN router
+ * o release all remaining system resources
+ */
+static void __exit wanpipe_cleanup(void)
+{
+ int i;
+
+ if (!ncards)
+ return;
+
+ for (i = 0; i < ncards; ++i) {
+ sdla_t* card = &card_array[i];
+ unregister_wan_device(card->devname);
+ }
+ destroy_workqueue(wanpipe_wq);
+ kfree(card_array);
+
+ printk(KERN_INFO "\nwanpipe: WANPIPE Modules Unloaded.\n");
+}
+
+module_init(wanpipe_init);
+module_exit(wanpipe_cleanup);
+
+/******* WAN Device Driver Entry Points *************************************/
+
+/*============================================================================
+ * Setup/configure WAN link driver.
+ * o check adapter state
+ * o make sure firmware is present in configuration
+ * o make sure I/O port and IRQ are specified
+ * o make sure I/O region is available
+ * o allocate interrupt vector
+ * o setup SDLA hardware
+ * o call appropriate routine to perform protocol-specific initialization
+ * o mark I/O region as used
+ * o if this is the first active card, then schedule background task
+ *
+ * This function is called when router handles ROUTER_SETUP IOCTL. The
+ * configuration structure is in kernel memory (including extended data, if
+ * any).
+ */
+
+static int setup(struct wan_device* wandev, wandev_conf_t* conf)
+{
+ sdla_t* card;
+ int err = 0;
+ int irq=0;
+
+ /* Sanity checks */
+ if ((wandev == NULL) || (wandev->private == NULL) || (conf == NULL)){
+ printk(KERN_INFO
+ "%s: Failed Sdlamain Setup wandev %u, card %u, conf %u !\n",
+ wandev->name,
+ (unsigned int)wandev,(unsigned int)wandev->private,
+ (unsigned int)conf);
+ return -EFAULT;
+ }
+
+ printk(KERN_INFO "%s: Starting WAN Setup\n", wandev->name);
+
+ card = wandev->private;
+ if (wandev->state != WAN_UNCONFIGURED){
+ printk(KERN_INFO "%s: failed sdlamain setup, busy!\n",
+ wandev->name);
+ return -EBUSY; /* already configured */
+ }
+
+ printk(KERN_INFO "\nProcessing WAN device %s...\n", wandev->name);
+
+ /* Initialize the counters for each wandev
+ * Used for counting number of times new_if and
+ * del_if get called.
+ */
+ wandev->del_if_cnt = 0;
+ wandev->new_if_cnt = 0;
+ wandev->config_id = conf->config_id;
+
+ if (!conf->data_size || (conf->data == NULL)) {
+ printk(KERN_INFO
+ "%s: firmware not found in configuration data!\n",
+ wandev->name);
+ return -EINVAL;
+ }
+
+ /* Check for resource conflicts and setup the
+ * card for piggibacking if necessary */
+ if(!conf->S514_CPU_no[0]) {
+ if ((err=check_s508_conflicts(card,conf,&irq)) != 0){
+ return err;
+ }
+ }else {
+ if ((err=check_s514_conflicts(card,conf,&irq)) != 0){
+ return err;
+ }
+ }
+
+ /* If the current card has already been configured
+ * or it's a piggyback card, do not try to allocate
+ * resources.
+ */
+ if (!card->wandev.piggyback && !card->configured){
+
+ /* Configure hardware, load firmware, etc. */
+ memset(&card->hw, 0, sizeof(sdlahw_t));
+
+ /* for an S514 adapter, pass the CPU number and the slot number read */
+ /* from 'router.conf' to the 'sdla_setup()' function via the 'port' */
+ /* parameter */
+ if (conf->S514_CPU_no[0]){
+
+ card->hw.S514_cpu_no[0] = conf->S514_CPU_no[0];
+ card->hw.S514_slot_no = conf->PCI_slot_no;
+ card->hw.auto_pci_cfg = conf->auto_pci_cfg;
+
+ if (card->hw.auto_pci_cfg == WANOPT_YES){
+ printk(KERN_INFO "%s: Setting CPU to %c and Slot to Auto\n",
+ card->devname, card->hw.S514_cpu_no[0]);
+ }else{
+ printk(KERN_INFO "%s: Setting CPU to %c and Slot to %i\n",
+ card->devname, card->hw.S514_cpu_no[0], card->hw.S514_slot_no);
+ }
+
+ }else{
+ /* 508 Card io port and irq initialization */
+ card->hw.port = conf->ioport;
+ card->hw.irq = (conf->irq == 9) ? 2 : conf->irq;
+ }
+
+
+ /* Compute the virtual address of the card in kernel space */
+ if(conf->maddr){
+ card->hw.dpmbase = phys_to_virt(conf->maddr);
+ }else{
+ card->hw.dpmbase = (void *)conf->maddr;
+ }
+
+ card->hw.dpmsize = SDLA_WINDOWSIZE;
+
+ /* set the adapter type if using an S514 adapter */
+ card->hw.type = (conf->S514_CPU_no[0]) ? SDLA_S514 : conf->hw_opt[0];
+ card->hw.pclk = conf->hw_opt[1];
+
+ err = sdla_setup(&card->hw, conf->data, conf->data_size);
+ if (err){
+ printk(KERN_INFO "%s: Hardware setup Failed %i\n",
+ card->devname,err);
+ return err;
+ }
+
+ if(card->hw.type != SDLA_S514)
+ irq = (conf->irq == 2) ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
+ else
+ irq = card->hw.irq;
+
+ /* request an interrupt vector - note that interrupts may be shared */
+ /* when using the S514 PCI adapter */
+
+ if(request_irq(irq, sdla_isr,
+ (card->hw.type == SDLA_S514) ? SA_SHIRQ : 0,
+ wandev->name, card)){
+
+ printk(KERN_INFO "%s: Can't reserve IRQ %d!\n", wandev->name, irq);
+ return -EINVAL;
+ }
+
+ }else{
+ printk(KERN_INFO "%s: Card Configured %lu or Piggybacking %i!\n",
+ wandev->name,card->configured,card->wandev.piggyback);
+ }
+
+
+ if (!card->configured){
+
+ /* Initialize the Spin lock */
+ printk(KERN_INFO "%s: Initializing for SMP\n",wandev->name);
+
+ /* Piggyback spin lock has already been initialized,
+ * in check_s514/s508_conflicts() */
+ if (!card->wandev.piggyback){
+ spin_lock_init(&card->wandev.lock);
+ }
+
+ /* Intialize WAN device data space */
+ wandev->irq = irq;
+ wandev->dma = 0;
+ if(card->hw.type != SDLA_S514){
+ wandev->ioport = card->hw.port;
+ }else{
+ wandev->S514_cpu_no[0] = card->hw.S514_cpu_no[0];
+ wandev->S514_slot_no = card->hw.S514_slot_no;
+ }
+ wandev->maddr = (unsigned long)card->hw.dpmbase;
+ wandev->msize = card->hw.dpmsize;
+ wandev->hw_opt[0] = card->hw.type;
+ wandev->hw_opt[1] = card->hw.pclk;
+ wandev->hw_opt[2] = card->hw.memory;
+ wandev->hw_opt[3] = card->hw.fwid;
+ }
+
+ /* Protocol-specific initialization */
+ switch (card->hw.fwid) {
+
+ case SFID_X25_502:
+ case SFID_X25_508:
+ printk(KERN_INFO "%s: Starting X.25 Protocol Init.\n",
+ card->devname);
+ err = wpx_init(card, conf);
+ break;
+ case SFID_FR502:
+ case SFID_FR508:
+ printk(KERN_INFO "%s: Starting Frame Relay Protocol Init.\n",
+ card->devname);
+ err = wpf_init(card, conf);
+ break;
+ case SFID_PPP502:
+ case SFID_PPP508:
+ printk(KERN_INFO "%s: Starting PPP Protocol Init.\n",
+ card->devname);
+ err = wpp_init(card, conf);
+ break;
+
+ case SFID_CHDLC508:
+ case SFID_CHDLC514:
+ if (conf->ft1){
+ printk(KERN_INFO "%s: Starting FT1 CSU/DSU Config Driver.\n",
+ card->devname);
+ err = wpft1_init(card, conf);
+ break;
+
+ }else if (conf->config_id == WANCONFIG_MPPP){
+ printk(KERN_INFO "%s: Starting Multi-Port PPP Protocol Init.\n",
+ card->devname);
+ err = wsppp_init(card,conf);
+ break;
+
+ }else{
+ printk(KERN_INFO "%s: Starting CHDLC Protocol Init.\n",
+ card->devname);
+ err = wpc_init(card, conf);
+ break;
+ }
+ default:
+ printk(KERN_INFO "%s: Error, Firmware is not supported %X %X!\n",
+ wandev->name,card->hw.fwid,SFID_CHDLC508);
+ err = -EPROTONOSUPPORT;
+ }
+
+ if (err != 0){
+ if (err == -EPROTONOSUPPORT){
+ printk(KERN_INFO
+ "%s: Error, Protocol selected has not been compiled!\n",
+ card->devname);
+ printk(KERN_INFO
+ "%s: Re-configure the kernel and re-build the modules!\n",
+ card->devname);
+ }
+
+ release_hw(card);
+ wandev->state = WAN_UNCONFIGURED;
+ return err;
+ }
+
+
+ /* Reserve I/O region and schedule background task */
+ if(card->hw.type != SDLA_S514 && !card->wandev.piggyback)
+ if (!request_region(card->hw.port, card->hw.io_range,
+ wandev->name)) {
+ printk(KERN_WARNING "port 0x%04x busy\n", card->hw.port);
+ release_hw(card);
+ wandev->state = WAN_UNCONFIGURED;
+ return -EBUSY;
+ }
+
+ /* Only use the polling routine for the X25 protocol */
+
+ card->wandev.critical=0;
+ return 0;
+}
+
+/*==================================================================
+ * configure_s508_card
+ *
+ * For a S508 adapter, check for a possible configuration error in that
+ * we are loading an adapter in the same IO port as a previously loaded S508
+ * card.
+ */
+
+static int check_s508_conflicts (sdla_t* card,wandev_conf_t* conf, int *irq)
+{
+ unsigned long smp_flags;
+ int i;
+
+ if (conf->ioport <= 0) {
+ printk(KERN_INFO
+ "%s: can't configure without I/O port address!\n",
+ card->wandev.name);
+ return -EINVAL;
+ }
+
+ if (conf->irq <= 0) {
+ printk(KERN_INFO "%s: can't configure without IRQ!\n",
+ card->wandev.name);
+ return -EINVAL;
+ }
+
+ if (test_bit(0,&card->configured))
+ return 0;
+
+
+ /* Check for already loaded card with the same IO port and IRQ
+ * If found, copy its hardware configuration and use its
+ * resources (i.e. piggybacking)
+ */
+
+ for (i = 0; i < ncards; i++) {
+ sdla_t *nxt_card = &card_array[i];
+
+ /* Skip the current card ptr */
+ if (nxt_card == card)
+ continue;
+
+
+ /* Find a card that is already configured with the
+ * same IO Port */
+ if ((nxt_card->hw.type == SDLA_S508) &&
+ (nxt_card->hw.port == conf->ioport) &&
+ (nxt_card->next == NULL)){
+
+ /* We found a card the card that has same configuration
+ * as us. This means, that we must setup this card in
+ * piggibacking mode. However, only CHDLC and MPPP protocol
+ * support this setup */
+
+ if ((conf->config_id == WANCONFIG_CHDLC ||
+ conf->config_id == WANCONFIG_MPPP) &&
+ (nxt_card->wandev.config_id == WANCONFIG_CHDLC ||
+ nxt_card->wandev.config_id == WANCONFIG_MPPP)){
+
+ *irq = nxt_card->hw.irq;
+ memcpy(&card->hw, &nxt_card->hw, sizeof(sdlahw_t));
+
+ /* The master could already be running, we must
+ * set this as a critical area */
+ lock_adapter_irq(&nxt_card->wandev.lock, &smp_flags);
+
+ nxt_card->next = card;
+ card->next = nxt_card;
+
+ card->wandev.piggyback = WANOPT_YES;
+
+ /* We must initialise the piggiback spin lock here
+ * since isr will try to lock card->next if it
+ * exists */
+ spin_lock_init(&card->wandev.lock);
+
+ unlock_adapter_irq(&nxt_card->wandev.lock, &smp_flags);
+ break;
+ }else{
+ /* Trying to run piggibacking with a wrong protocol */
+ printk(KERN_INFO "%s: ERROR: Resource busy, ioport: 0x%x\n"
+ "%s: This protocol doesn't support\n"
+ "%s: multi-port operation!\n",
+ card->devname,nxt_card->hw.port,
+ card->devname,card->devname);
+ return -EEXIST;
+ }
+ }
+ }
+
+
+ /* Make sure I/O port region is available only if we are the
+ * master device. If we are running in piggybacking mode,
+ * we will use the resources of the master card. */
+ if (!card->wandev.piggyback) {
+ struct resource *rr =
+ request_region(conf->ioport, SDLA_MAXIORANGE, "sdlamain");
+ release_region(conf->ioport, SDLA_MAXIORANGE);
+
+ if (!rr) {
+ printk(KERN_INFO
+ "%s: I/O region 0x%X - 0x%X is in use!\n",
+ card->wandev.name, conf->ioport,
+ conf->ioport + SDLA_MAXIORANGE - 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*==================================================================
+ * configure_s514_card
+ *
+ * For a S514 adapter, check for a possible configuration error in that
+ * we are loading an adapter in the same slot as a previously loaded S514
+ * card.
+ */
+
+
+static int check_s514_conflicts(sdla_t* card,wandev_conf_t* conf, int *irq)
+{
+ unsigned long smp_flags;
+ int i;
+
+ if (test_bit(0,&card->configured))
+ return 0;
+
+
+ /* Check for already loaded card with the same IO port and IRQ
+ * If found, copy its hardware configuration and use its
+ * resources (i.e. piggybacking)
+ */
+
+ for (i = 0; i < ncards; i ++) {
+
+ sdla_t* nxt_card = &card_array[i];
+ if(nxt_card == card)
+ continue;
+
+ if((nxt_card->hw.type == SDLA_S514) &&
+ (nxt_card->hw.S514_slot_no == conf->PCI_slot_no) &&
+ (nxt_card->hw.S514_cpu_no[0] == conf->S514_CPU_no[0])&&
+ (nxt_card->next == NULL)){
+
+
+ if ((conf->config_id == WANCONFIG_CHDLC ||
+ conf->config_id == WANCONFIG_MPPP) &&
+ (nxt_card->wandev.config_id == WANCONFIG_CHDLC ||
+ nxt_card->wandev.config_id == WANCONFIG_MPPP)){
+
+ *irq = nxt_card->hw.irq;
+ memcpy(&card->hw, &nxt_card->hw, sizeof(sdlahw_t));
+
+ /* The master could already be running, we must
+ * set this as a critical area */
+ lock_adapter_irq(&nxt_card->wandev.lock,&smp_flags);
+ nxt_card->next = card;
+ card->next = nxt_card;
+
+ card->wandev.piggyback = WANOPT_YES;
+
+ /* We must initialise the piggiback spin lock here
+ * since isr will try to lock card->next if it
+ * exists */
+ spin_lock_init(&card->wandev.lock);
+
+ unlock_adapter_irq(&nxt_card->wandev.lock,&smp_flags);
+
+ }else{
+ /* Trying to run piggibacking with a wrong protocol */
+ printk(KERN_INFO "%s: ERROR: Resource busy: CPU %c PCISLOT %i\n"
+ "%s: This protocol doesn't support\n"
+ "%s: multi-port operation!\n",
+ card->devname,
+ conf->S514_CPU_no[0],conf->PCI_slot_no,
+ card->devname,card->devname);
+ return -EEXIST;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+
+/*============================================================================
+ * Shut down WAN link driver.
+ * o shut down adapter hardware
+ * o release system resources.
+ *
+ * This function is called by the router when device is being unregistered or
+ * when it handles ROUTER_DOWN IOCTL.
+ */
+static int shutdown(struct wan_device* wandev)
+{
+ sdla_t *card;
+ int err=0;
+
+ /* sanity checks */
+ if ((wandev == NULL) || (wandev->private == NULL)){
+ return -EFAULT;
+ }
+
+ if (wandev->state == WAN_UNCONFIGURED){
+ return 0;
+ }
+
+ card = wandev->private;
+
+ if (card->tty_opt){
+ if (card->tty_open){
+ printk(KERN_INFO
+ "%s: Shutdown Failed: TTY is still open\n",
+ card->devname);
+ return -EBUSY;
+ }
+ }
+
+ wandev->state = WAN_UNCONFIGURED;
+
+ set_bit(PERI_CRIT,(void*)&wandev->critical);
+
+ /* In case of piggibacking, make sure that
+ * we never try to shutdown both devices at the same
+ * time, because they depend on one another */
+
+ if (card->disable_comm){
+ card->disable_comm(card);
+ }
+
+ /* Release Resources */
+ release_hw(card);
+
+ /* only free the allocated I/O range if not an S514 adapter */
+ if (wandev->hw_opt[0] != SDLA_S514 && !card->configured){
+ release_region(card->hw.port, card->hw.io_range);
+ }
+
+ if (!card->configured){
+ memset(&card->hw, 0, sizeof(sdlahw_t));
+ if (card->next){
+ memset(&card->next->hw, 0, sizeof(sdlahw_t));
+ }
+ }
+
+
+ clear_bit(PERI_CRIT,(void*)&wandev->critical);
+ return err;
+}
+
+static void release_hw (sdla_t *card)
+{
+ sdla_t *nxt_card;
+
+
+ /* Check if next device exists */
+ if (card->next){
+ nxt_card = card->next;
+ /* If next device is down then release resources */
+ if (nxt_card->wandev.state == WAN_UNCONFIGURED){
+ if (card->wandev.piggyback){
+ /* If this device is piggyback then use
+ * information of the master device
+ */
+ printk(KERN_INFO "%s: Piggyback shutting down\n",card->devname);
+ sdla_down(&card->next->hw);
+ free_irq(card->wandev.irq, card->next);
+ card->configured = 0;
+ card->next->configured = 0;
+ card->wandev.piggyback = 0;
+ }else{
+ /* Master device shutting down */
+ printk(KERN_INFO "%s: Master shutting down\n",card->devname);
+ sdla_down(&card->hw);
+ free_irq(card->wandev.irq, card);
+ card->configured = 0;
+ card->next->configured = 0;
+ }
+ }else{
+ printk(KERN_INFO "%s: Device still running %i\n",
+ nxt_card->devname,nxt_card->wandev.state);
+
+ card->configured = 1;
+ }
+ }else{
+ printk(KERN_INFO "%s: Master shutting down\n",card->devname);
+ sdla_down(&card->hw);
+ free_irq(card->wandev.irq, card);
+ card->configured = 0;
+ }
+ return;
+}
+
+
+/*============================================================================
+ * Driver I/O control.
+ * o verify arguments
+ * o perform requested action
+ *
+ * This function is called when router handles one of the reserved user
+ * IOCTLs. Note that 'arg' stil points to user address space.
+ */
+static int ioctl(struct wan_device* wandev, unsigned cmd, unsigned long arg)
+{
+ sdla_t* card;
+ int err;
+
+ /* sanity checks */
+ if ((wandev == NULL) || (wandev->private == NULL))
+ return -EFAULT;
+ if (wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ card = wandev->private;
+
+ if(card->hw.type != SDLA_S514){
+ disable_irq(card->hw.irq);
+ }
+
+ if (test_bit(SEND_CRIT, (void*)&wandev->critical)) {
+ return -EAGAIN;
+ }
+
+ switch (cmd) {
+ case WANPIPE_DUMP:
+ err = ioctl_dump(wandev->private, (void*)arg);
+ break;
+
+ case WANPIPE_EXEC:
+ err = ioctl_exec(wandev->private, (void*)arg, cmd);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/****** Driver IOCTL Handlers ***********************************************/
+
+/*============================================================================
+ * Dump adapter memory to user buffer.
+ * o verify request structure
+ * o copy request structure to kernel data space
+ * o verify length/offset
+ * o verify user buffer
+ * o copy adapter memory image to user buffer
+ *
+ * Note: when dumping memory, this routine switches curent dual-port memory
+ * vector, so care must be taken to avoid racing conditions.
+ */
+static int ioctl_dump (sdla_t* card, sdla_dump_t* u_dump)
+{
+ sdla_dump_t dump;
+ unsigned winsize;
+ unsigned long oldvec; /* DPM window vector */
+ unsigned long smp_flags;
+ int err = 0;
+
+ if(copy_from_user((void*)&dump, (void*)u_dump, sizeof(sdla_dump_t)))
+ return -EFAULT;
+
+ if ((dump.magic != WANPIPE_MAGIC) ||
+ (dump.offset + dump.length > card->hw.memory))
+ return -EINVAL;
+
+ winsize = card->hw.dpmsize;
+
+ if(card->hw.type != SDLA_S514) {
+
+ lock_adapter_irq(&card->wandev.lock, &smp_flags);
+
+ oldvec = card->hw.vector;
+ while (dump.length) {
+ /* current offset */
+ unsigned pos = dump.offset % winsize;
+ /* current vector */
+ unsigned long vec = dump.offset - pos;
+ unsigned len = (dump.length > (winsize - pos)) ?
+ (winsize - pos) : dump.length;
+ /* relocate window */
+ if (sdla_mapmem(&card->hw, vec) != 0) {
+ err = -EIO;
+ break;
+ }
+
+ if(copy_to_user((void *)dump.ptr,
+ (u8 *)card->hw.dpmbase + pos, len)){
+
+ unlock_adapter_irq(&card->wandev.lock, &smp_flags);
+ return -EFAULT;
+ }
+
+ dump.length -= len;
+ dump.offset += len;
+ dump.ptr = (char*)dump.ptr + len;
+ }
+
+ sdla_mapmem(&card->hw, oldvec);/* restore DPM window position */
+ unlock_adapter_irq(&card->wandev.lock, &smp_flags);
+
+ }else {
+
+ if(copy_to_user((void *)dump.ptr,
+ (u8 *)card->hw.dpmbase + dump.offset, dump.length)){
+ return -EFAULT;
+ }
+ }
+
+ return err;
+}
+
+/*============================================================================
+ * Execute adapter firmware command.
+ * o verify request structure
+ * o copy request structure to kernel data space
+ * o call protocol-specific 'exec' function
+ */
+static int ioctl_exec (sdla_t* card, sdla_exec_t* u_exec, int cmd)
+{
+ sdla_exec_t exec;
+ int err=0;
+
+ if (card->exec == NULL && cmd == WANPIPE_EXEC){
+ return -ENODEV;
+ }
+
+ if(copy_from_user((void*)&exec, (void*)u_exec, sizeof(sdla_exec_t)))
+ return -EFAULT;
+
+ if ((exec.magic != WANPIPE_MAGIC) || (exec.cmd == NULL))
+ return -EINVAL;
+
+ switch (cmd) {
+ case WANPIPE_EXEC:
+ err = card->exec(card, exec.cmd, exec.data);
+ break;
+ }
+ return err;
+}
+
+/******* Miscellaneous ******************************************************/
+
+/*============================================================================
+ * SDLA Interrupt Service Routine.
+ * o acknowledge SDLA hardware interrupt.
+ * o call protocol-specific interrupt service routine, if any.
+ */
+STATIC irqreturn_t sdla_isr (int irq, void* dev_id, struct pt_regs *regs)
+{
+#define card ((sdla_t*)dev_id)
+
+ if(card->hw.type == SDLA_S514) { /* handle interrrupt on S514 */
+ u32 int_status;
+ unsigned char CPU_no = card->hw.S514_cpu_no[0];
+ unsigned char card_found_for_IRQ;
+ u8 IRQ_count = 0;
+
+ for(;;) {
+
+ read_S514_int_stat(&card->hw, &int_status);
+
+ /* check if the interrupt is for this device */
+ if(!((unsigned char)int_status &
+ (IRQ_CPU_A | IRQ_CPU_B)))
+ return IRQ_HANDLED;
+
+ /* if the IRQ is for both CPUs on the same adapter, */
+ /* then alter the interrupt status so as to handle */
+ /* one CPU at a time */
+ if(((unsigned char)int_status & (IRQ_CPU_A | IRQ_CPU_B))
+ == (IRQ_CPU_A | IRQ_CPU_B)) {
+ int_status &= (CPU_no == S514_CPU_A) ?
+ ~IRQ_CPU_B : ~IRQ_CPU_A;
+ }
+
+ card_found_for_IRQ = 0;
+
+ /* check to see that the CPU number for this device */
+ /* corresponds to the interrupt status read */
+ switch (CPU_no) {
+ case S514_CPU_A:
+ if((unsigned char)int_status &
+ IRQ_CPU_A)
+ card_found_for_IRQ = 1;
+ break;
+
+ case S514_CPU_B:
+ if((unsigned char)int_status &
+ IRQ_CPU_B)
+ card_found_for_IRQ = 1;
+ break;
+ }
+
+ /* exit if the interrupt is for another CPU on the */
+ /* same IRQ */
+ if(!card_found_for_IRQ)
+ return IRQ_HANDLED;
+
+ if (!card ||
+ (card->wandev.state == WAN_UNCONFIGURED && !card->configured)){
+ printk(KERN_INFO
+ "Received IRQ %d for CPU #%c\n",
+ irq, CPU_no);
+ printk(KERN_INFO
+ "IRQ for unconfigured adapter\n");
+ S514_intack(&card->hw, int_status);
+ return IRQ_HANDLED;
+ }
+
+ if (card->in_isr) {
+ printk(KERN_INFO
+ "%s: interrupt re-entrancy on IRQ %d\n",
+ card->devname, card->wandev.irq);
+ S514_intack(&card->hw, int_status);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&card->wandev.lock);
+ if (card->next){
+ spin_lock(&card->next->wandev.lock);
+ }
+
+ S514_intack(&card->hw, int_status);
+ if (card->isr)
+ card->isr(card);
+
+ if (card->next){
+ spin_unlock(&card->next->wandev.lock);
+ }
+ spin_unlock(&card->wandev.lock);
+
+ /* handle a maximum of two interrupts (one for each */
+ /* CPU on the adapter) before returning */
+ if((++ IRQ_count) == 2)
+ return IRQ_HANDLED;
+ }
+ }
+
+ else { /* handle interrupt on S508 adapter */
+
+ if (!card || ((card->wandev.state == WAN_UNCONFIGURED) && !card->configured))
+ return IRQ_HANDLED;
+
+ if (card->in_isr) {
+ printk(KERN_INFO
+ "%s: interrupt re-entrancy on IRQ %d!\n",
+ card->devname, card->wandev.irq);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&card->wandev.lock);
+ if (card->next){
+ spin_lock(&card->next->wandev.lock);
+ }
+
+ sdla_intack(&card->hw);
+ if (card->isr)
+ card->isr(card);
+
+ if (card->next){
+ spin_unlock(&card->next->wandev.lock);
+ }
+ spin_unlock(&card->wandev.lock);
+
+ }
+ return IRQ_HANDLED;
+#undef card
+}
+
+/*============================================================================
+ * This routine is called by the protocol-specific modules when network
+ * interface is being open. The only reason we need this, is because we
+ * have to call MOD_INC_USE_COUNT, but cannot include 'module.h' where it's
+ * defined more than once into the same kernel module.
+ */
+void wanpipe_open (sdla_t* card)
+{
+ ++card->open_cnt;
+}
+
+/*============================================================================
+ * This routine is called by the protocol-specific modules when network
+ * interface is being closed. The only reason we need this, is because we
+ * have to call MOD_DEC_USE_COUNT, but cannot include 'module.h' where it's
+ * defined more than once into the same kernel module.
+ */
+void wanpipe_close (sdla_t* card)
+{
+ --card->open_cnt;
+}
+
+/*============================================================================
+ * Set WAN device state.
+ */
+void wanpipe_set_state (sdla_t* card, int state)
+{
+ if (card->wandev.state != state) {
+ switch (state) {
+ case WAN_CONNECTED:
+ printk (KERN_INFO "%s: link connected!\n",
+ card->devname);
+ break;
+
+ case WAN_CONNECTING:
+ printk (KERN_INFO "%s: link connecting...\n",
+ card->devname);
+ break;
+
+ case WAN_DISCONNECTED:
+ printk (KERN_INFO "%s: link disconnected!\n",
+ card->devname);
+ break;
+ }
+ card->wandev.state = state;
+ }
+ card->state_tick = jiffies;
+}
+
+sdla_t * wanpipe_find_card (char *name)
+{
+ int cnt;
+ for (cnt = 0; cnt < ncards; ++ cnt) {
+ sdla_t* card = &card_array[cnt];
+ if (!strcmp(card->devname,name))
+ return card;
+ }
+ return NULL;
+}
+
+sdla_t * wanpipe_find_card_num (int num)
+{
+ if (num < 1 || num > ncards)
+ return NULL;
+ num--;
+ return &card_array[num];
+}
+
+/*
+ * @work_pointer: work_struct to be done;
+ * should already have PREPARE_WORK() or
+ * INIT_WORK() done on it by caller;
+ */
+void wanpipe_queue_work (struct work_struct *work_pointer)
+{
+ if (test_and_set_bit(1, (void*)&wanpipe_bh_critical))
+ printk(KERN_INFO "CRITICAL IN QUEUING WORK\n");
+
+ queue_work(wanpipe_wq, work_pointer);
+ clear_bit(1,(void*)&wanpipe_bh_critical);
+}
+
+void wakeup_sk_bh(struct net_device *dev)
+{
+ wanpipe_common_t *chan = dev->priv;
+
+ if (test_bit(0,&chan->common_critical))
+ return;
+
+ if (chan->sk && chan->tx_timer){
+ chan->tx_timer->expires=jiffies+1;
+ add_timer(chan->tx_timer);
+ }
+}
+
+int change_dev_flags(struct net_device *dev, unsigned flags)
+{
+ struct ifreq if_info;
+ mm_segment_t fs = get_fs();
+ int err;
+
+ memset(&if_info, 0, sizeof(if_info));
+ strcpy(if_info.ifr_name, dev->name);
+ if_info.ifr_flags = flags;
+
+ set_fs(get_ds()); /* get user space block */
+ err = devinet_ioctl(SIOCSIFFLAGS, &if_info);
+ set_fs(fs);
+
+ return err;
+}
+
+unsigned long get_ip_address(struct net_device *dev, int option)
+{
+
+ struct in_ifaddr *ifaddr;
+ struct in_device *in_dev;
+
+ if ((in_dev = __in_dev_get(dev)) == NULL){
+ return 0;
+ }
+
+ if ((ifaddr = in_dev->ifa_list)== NULL ){
+ return 0;
+ }
+
+ switch (option){
+
+ case WAN_LOCAL_IP:
+ return ifaddr->ifa_local;
+ break;
+
+ case WAN_POINTOPOINT_IP:
+ return ifaddr->ifa_address;
+ break;
+
+ case WAN_NETMASK_IP:
+ return ifaddr->ifa_mask;
+ break;
+
+ case WAN_BROADCAST_IP:
+ return ifaddr->ifa_broadcast;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+void add_gateway(sdla_t *card, struct net_device *dev)
+{
+ mm_segment_t oldfs;
+ struct rtentry route;
+ int res;
+
+ memset((char*)&route,0,sizeof(struct rtentry));
+
+ ((struct sockaddr_in *)
+ &(route.rt_dst))->sin_addr.s_addr = 0;
+ ((struct sockaddr_in *)
+ &(route.rt_dst))->sin_family = AF_INET;
+
+ ((struct sockaddr_in *)
+ &(route.rt_genmask))->sin_addr.s_addr = 0;
+ ((struct sockaddr_in *)
+ &(route.rt_genmask)) ->sin_family = AF_INET;
+
+
+ route.rt_flags = 0;
+ route.rt_dev = dev->name;
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+ res = ip_rt_ioctl(SIOCADDRT,&route);
+ set_fs(oldfs);
+
+ if (res == 0){
+ printk(KERN_INFO "%s: Gateway added for %s\n",
+ card->devname,dev->name);
+ }
+
+ return;
+}
+
+MODULE_LICENSE("GPL");
+
+/****** End *********************************************************/
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
new file mode 100644
index 000000000000..5380ddfcd7d5
--- /dev/null
+++ b/drivers/net/wan/sealevel.c
@@ -0,0 +1,469 @@
+/*
+ * Sealevel Systems 4021 driver.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * (c) Copyright 1999, 2001 Alan Cox
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <net/arp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <net/syncppp.h>
+#include "z85230.h"
+
+
+struct slvl_device
+{
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+ struct z8530_channel *chan;
+ struct ppp_device pppdev;
+ int channel;
+};
+
+
+struct slvl_board
+{
+ struct slvl_device *dev[2];
+ struct z8530_dev board;
+ int iobase;
+};
+
+/*
+ * Network driver support routines
+ */
+
+/*
+ * Frame receive. Simple for our card as we do sync ppp and there
+ * is no funny garbage involved
+ */
+
+static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
+{
+ /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
+ skb_trim(skb, skb->len-2);
+ skb->protocol=htons(ETH_P_WAN_PPP);
+ skb->mac.raw=skb->data;
+ skb->dev=c->netdevice;
+ /*
+ * Send it to the PPP layer. We don't have time to process
+ * it right now.
+ */
+ netif_rx(skb);
+ c->netdevice->last_rx = jiffies;
+}
+
+/*
+ * We've been placed in the UP state
+ */
+
+static int sealevel_open(struct net_device *d)
+{
+ struct slvl_device *slvl=d->priv;
+ int err = -1;
+ int unit = slvl->channel;
+
+ /*
+ * Link layer up.
+ */
+
+ switch(unit)
+ {
+ case 0:
+ err=z8530_sync_dma_open(d, slvl->chan);
+ break;
+ case 1:
+ err=z8530_sync_open(d, slvl->chan);
+ break;
+ }
+
+ if(err)
+ return err;
+ /*
+ * Begin PPP
+ */
+ err=sppp_open(d);
+ if(err)
+ {
+ switch(unit)
+ {
+ case 0:
+ z8530_sync_dma_close(d, slvl->chan);
+ break;
+ case 1:
+ z8530_sync_close(d, slvl->chan);
+ break;
+ }
+ return err;
+ }
+
+ slvl->chan->rx_function=sealevel_input;
+
+ /*
+ * Go go go
+ */
+ netif_start_queue(d);
+ return 0;
+}
+
+static int sealevel_close(struct net_device *d)
+{
+ struct slvl_device *slvl=d->priv;
+ int unit = slvl->channel;
+
+ /*
+ * Discard new frames
+ */
+
+ slvl->chan->rx_function=z8530_null_rx;
+
+ /*
+ * PPP off
+ */
+ sppp_close(d);
+ /*
+ * Link layer down
+ */
+
+ netif_stop_queue(d);
+
+ switch(unit)
+ {
+ case 0:
+ z8530_sync_dma_close(d, slvl->chan);
+ break;
+ case 1:
+ z8530_sync_close(d, slvl->chan);
+ break;
+ }
+ return 0;
+}
+
+static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
+{
+ /* struct slvl_device *slvl=d->priv;
+ z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
+ return sppp_do_ioctl(d, ifr,cmd);
+}
+
+static struct net_device_stats *sealevel_get_stats(struct net_device *d)
+{
+ struct slvl_device *slvl=d->priv;
+ if(slvl)
+ return z8530_get_stats(slvl->chan);
+ else
+ return NULL;
+}
+
+/*
+ * Passed PPP frames, fire them downwind.
+ */
+
+static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
+{
+ struct slvl_device *slvl=d->priv;
+ return z8530_queue_xmit(slvl->chan, skb);
+}
+
+static int sealevel_neigh_setup(struct neighbour *n)
+{
+ if (n->nud_state == NUD_NONE) {
+ n->ops = &arp_broken_ops;
+ n->output = n->ops->output;
+ }
+ return 0;
+}
+
+static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
+{
+ if (p->tbl->family == AF_INET) {
+ p->neigh_setup = sealevel_neigh_setup;
+ p->ucast_probes = 0;
+ p->mcast_probes = 0;
+ }
+ return 0;
+}
+
+static int sealevel_attach(struct net_device *dev)
+{
+ struct slvl_device *sv = dev->priv;
+ sppp_attach(&sv->pppdev);
+ return 0;
+}
+
+static void sealevel_detach(struct net_device *dev)
+{
+ sppp_detach(dev);
+}
+
+static void slvl_setup(struct net_device *d)
+{
+ d->open = sealevel_open;
+ d->stop = sealevel_close;
+ d->init = sealevel_attach;
+ d->uninit = sealevel_detach;
+ d->hard_start_xmit = sealevel_queue_xmit;
+ d->get_stats = sealevel_get_stats;
+ d->set_multicast_list = NULL;
+ d->do_ioctl = sealevel_ioctl;
+ d->neigh_setup = sealevel_neigh_setup_dev;
+ d->set_mac_address = NULL;
+
+}
+
+static inline struct slvl_device *slvl_alloc(int iobase, int irq)
+{
+ struct net_device *d;
+ struct slvl_device *sv;
+
+ d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",
+ slvl_setup);
+
+ if (!d)
+ return NULL;
+
+ sv = d->priv;
+ sv->if_ptr = &sv->pppdev;
+ sv->pppdev.dev = d;
+ d->base_addr = iobase;
+ d->irq = irq;
+
+ return sv;
+}
+
+
+/*
+ * Allocate and setup Sealevel board.
+ */
+
+static __init struct slvl_board *slvl_init(int iobase, int irq,
+ int txdma, int rxdma, int slow)
+{
+ struct z8530_dev *dev;
+ struct slvl_board *b;
+
+ /*
+ * Get the needed I/O space
+ */
+
+ if(!request_region(iobase, 8, "Sealevel 4021"))
+ {
+ printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase);
+ return NULL;
+ }
+
+ b = kmalloc(sizeof(struct slvl_board), GFP_KERNEL);
+ if(!b)
+ goto fail3;
+
+ memset(b, 0, sizeof(*b));
+ if (!(b->dev[0]= slvl_alloc(iobase, irq)))
+ goto fail2;
+
+ b->dev[0]->chan = &b->board.chanA;
+ b->dev[0]->channel = 0;
+
+ if (!(b->dev[1] = slvl_alloc(iobase, irq)))
+ goto fail1_0;
+
+ b->dev[1]->chan = &b->board.chanB;
+ b->dev[1]->channel = 1;
+
+ dev = &b->board;
+
+ /*
+ * Stuff in the I/O addressing
+ */
+
+ dev->active = 0;
+
+ b->iobase = iobase;
+
+ /*
+ * Select 8530 delays for the old board
+ */
+
+ if(slow)
+ iobase |= Z8530_PORT_SLEEP;
+
+ dev->chanA.ctrlio=iobase+1;
+ dev->chanA.dataio=iobase;
+ dev->chanB.ctrlio=iobase+3;
+ dev->chanB.dataio=iobase+2;
+
+ dev->chanA.irqs=&z8530_nop;
+ dev->chanB.irqs=&z8530_nop;
+
+ /*
+ * Assert DTR enable DMA
+ */
+
+ outb(3|(1<<7), b->iobase+4);
+
+
+ /* We want a fast IRQ for this device. Actually we'd like an even faster
+ IRQ ;) - This is one driver RtLinux is made for */
+
+ if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "SeaLevel", dev)<0)
+ {
+ printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
+ goto fail1_1;
+ }
+
+ dev->irq=irq;
+ dev->chanA.private=&b->dev[0];
+ dev->chanB.private=&b->dev[1];
+ dev->chanA.netdevice=b->dev[0]->pppdev.dev;
+ dev->chanB.netdevice=b->dev[1]->pppdev.dev;
+ dev->chanA.dev=dev;
+ dev->chanB.dev=dev;
+
+ dev->chanA.txdma=3;
+ dev->chanA.rxdma=1;
+ if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0)
+ goto fail;
+
+ if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0)
+ goto dmafail;
+
+ disable_irq(irq);
+
+ /*
+ * Begin normal initialise
+ */
+
+ if(z8530_init(dev)!=0)
+ {
+ printk(KERN_ERR "Z8530 series device not found.\n");
+ enable_irq(irq);
+ goto dmafail2;
+ }
+ if(dev->type==Z85C30)
+ {
+ z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
+ z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
+ }
+ else
+ {
+ z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
+ z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
+ }
+
+ /*
+ * Now we can take the IRQ
+ */
+
+ enable_irq(irq);
+
+ if (register_netdev(b->dev[0]->pppdev.dev))
+ goto dmafail2;
+
+ if (register_netdev(b->dev[1]->pppdev.dev))
+ goto fail_unit;
+
+ z8530_describe(dev, "I/O", iobase);
+ dev->active=1;
+ return b;
+
+fail_unit:
+ unregister_netdev(b->dev[0]->pppdev.dev);
+
+dmafail2:
+ free_dma(dev->chanA.rxdma);
+dmafail:
+ free_dma(dev->chanA.txdma);
+fail:
+ free_irq(irq, dev);
+fail1_1:
+ free_netdev(b->dev[1]->pppdev.dev);
+fail1_0:
+ free_netdev(b->dev[0]->pppdev.dev);
+fail2:
+ kfree(b);
+fail3:
+ release_region(iobase,8);
+ return NULL;
+}
+
+static void __exit slvl_shutdown(struct slvl_board *b)
+{
+ int u;
+
+ z8530_shutdown(&b->board);
+
+ for(u=0; u<2; u++)
+ {
+ struct net_device *d = b->dev[u]->pppdev.dev;
+ unregister_netdev(d);
+ free_netdev(d);
+ }
+
+ free_irq(b->board.irq, &b->board);
+ free_dma(b->board.chanA.rxdma);
+ free_dma(b->board.chanA.txdma);
+ /* DMA off on the card, drop DTR */
+ outb(0, b->iobase);
+ release_region(b->iobase, 8);
+ kfree(b);
+}
+
+
+static int io=0x238;
+static int txdma=1;
+static int rxdma=3;
+static int irq=5;
+static int slow=0;
+
+module_param(io, int, 0);
+MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
+module_param(txdma, int, 0);
+MODULE_PARM_DESC(txdma, "Transmit DMA channel");
+module_param(rxdma, int, 0);
+MODULE_PARM_DESC(rxdma, "Receive DMA channel");
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
+module_param(slow, bool, 0);
+MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
+
+static struct slvl_board *slvl_unit;
+
+static int __init slvl_init_module(void)
+{
+#ifdef MODULE
+ printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");
+ printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");
+#endif
+ slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
+
+ return slvl_unit ? 0 : -ENODEV;
+}
+
+static void __exit slvl_cleanup_module(void)
+{
+ if(slvl_unit)
+ slvl_shutdown(slvl_unit);
+}
+
+module_init(slvl_init_module);
+module_exit(slvl_cleanup_module);
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
new file mode 100644
index 000000000000..84b65c60c799
--- /dev/null
+++ b/drivers/net/wan/syncppp.c
@@ -0,0 +1,1488 @@
+/*
+ * NET3: A (fairly minimal) implementation of synchronous PPP for Linux
+ * as well as a CISCO HDLC implementation. See the copyright
+ * message below for the original source.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the license, or (at your option) any later version.
+ *
+ * Note however. This code is also used in a different form by FreeBSD.
+ * Therefore when making any non OS specific change please consider
+ * contributing it back to the original author under the terms
+ * below in addition.
+ * -- Alan
+ *
+ * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+ */
+
+/*
+ * Synchronous PPP/Cisco link level subroutines.
+ * Keepalive protocol implemented in both Cisco and PPP modes.
+ *
+ * Copyright (C) 1994 Cronyx Ltd.
+ * Author: Serge Vakulenko, <vak@zebub.msk.su>
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organisations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * Version 1.9, Wed Oct 4 18:58:15 MSK 1995
+ *
+ * $Id: syncppp.c,v 1.18 2000/04/11 05:25:31 asj Exp $
+ */
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/random.h>
+#include <linux/pkt_sched.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+
+#include <net/syncppp.h>
+
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define MAXALIVECNT 6 /* max. alive packets */
+
+#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
+#define PPP_UI 0x03 /* Unnumbered Information */
+#define PPP_IP 0x0021 /* Internet Protocol */
+#define PPP_ISO 0x0023 /* ISO OSI Protocol */
+#define PPP_XNS 0x0025 /* Xerox NS Protocol */
+#define PPP_IPX 0x002b /* Novell IPX Protocol */
+#define PPP_LCP 0xc021 /* Link Control Protocol */
+#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
+
+#define LCP_CONF_REQ 1 /* PPP LCP configure request */
+#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */
+#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */
+#define LCP_CONF_REJ 4 /* PPP LCP configure reject */
+#define LCP_TERM_REQ 5 /* PPP LCP terminate request */
+#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */
+#define LCP_CODE_REJ 7 /* PPP LCP code reject */
+#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */
+#define LCP_ECHO_REQ 9 /* PPP LCP echo request */
+#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */
+#define LCP_DISC_REQ 11 /* PPP LCP discard request */
+
+#define LCP_OPT_MRU 1 /* maximum receive unit */
+#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
+#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
+#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
+#define LCP_OPT_MAGIC 5 /* magic number */
+#define LCP_OPT_RESERVED 6 /* reserved */
+#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
+#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
+
+#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */
+#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */
+#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */
+#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */
+#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */
+#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */
+#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */
+
+#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
+#define CISCO_UNICAST 0x0f /* Cisco unicast address */
+#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
+#define CISCO_ADDR_REQ 0 /* Cisco address request */
+#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
+
+struct ppp_header {
+ u8 address;
+ u8 control;
+ u16 protocol;
+};
+#define PPP_HEADER_LEN sizeof (struct ppp_header)
+
+struct lcp_header {
+ u8 type;
+ u8 ident;
+ u16 len;
+};
+#define LCP_HEADER_LEN sizeof (struct lcp_header)
+
+struct cisco_packet {
+ u32 type;
+ u32 par1;
+ u32 par2;
+ u16 rel;
+ u16 time0;
+ u16 time1;
+};
+#define CISCO_PACKET_LEN 18
+#define CISCO_BIG_PACKET_LEN 20
+
+static struct sppp *spppq;
+static struct timer_list sppp_keepalive_timer;
+static DEFINE_SPINLOCK(spppq_lock);
+
+/* global xmit queue for sending packets while spinlock is held */
+static struct sk_buff_head tx_queue;
+
+static void sppp_keepalive (unsigned long dummy);
+static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
+ u8 ident, u16 len, void *data);
+static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2);
+static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m);
+static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m);
+static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m);
+static void sppp_lcp_open (struct sppp *sp);
+static void sppp_ipcp_open (struct sppp *sp);
+static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
+ int len, u32 *magic);
+static void sppp_cp_timeout (unsigned long arg);
+static char *sppp_lcp_type_name (u8 type);
+static char *sppp_ipcp_type_name (u8 type);
+static void sppp_print_bytes (u8 *p, u16 len);
+
+static int debug;
+
+/* Flush global outgoing packet queue to dev_queue_xmit().
+ *
+ * dev_queue_xmit() must be called with interrupts enabled
+ * which means it can't be called with spinlocks held.
+ * If a packet needs to be sent while a spinlock is held,
+ * then put the packet into tx_queue, and call sppp_flush_xmit()
+ * after spinlock is released.
+ */
+static void sppp_flush_xmit(void)
+{
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&tx_queue)) != NULL)
+ dev_queue_xmit(skb);
+}
+
+/*
+ * Interface down stub
+ */
+
+static void if_down(struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+
+ sp->pp_link_state=SPPP_LINK_DOWN;
+}
+
+/*
+ * Timeout routine activations.
+ */
+
+static void sppp_set_timeout(struct sppp *p,int s)
+{
+ if (! (p->pp_flags & PP_TIMO))
+ {
+ init_timer(&p->pp_timer);
+ p->pp_timer.function=sppp_cp_timeout;
+ p->pp_timer.expires=jiffies+s*HZ;
+ p->pp_timer.data=(unsigned long)p;
+ p->pp_flags |= PP_TIMO;
+ add_timer(&p->pp_timer);
+ }
+}
+
+static void sppp_clear_timeout(struct sppp *p)
+{
+ if (p->pp_flags & PP_TIMO)
+ {
+ del_timer(&p->pp_timer);
+ p->pp_flags &= ~PP_TIMO;
+ }
+}
+
+/**
+ * sppp_input - receive and process a WAN PPP frame
+ * @skb: The buffer to process
+ * @dev: The device it arrived on
+ *
+ * This can be called directly by cards that do not have
+ * timing constraints but is normally called from the network layer
+ * after interrupt servicing to process frames queued via netif_rx().
+ *
+ * We process the options in the card. If the frame is destined for
+ * the protocol stacks then it requeues the frame for the upper level
+ * protocol. If it is a control from it is processed and discarded
+ * here.
+ */
+
+void sppp_input (struct net_device *dev, struct sk_buff *skb)
+{
+ struct ppp_header *h;
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ skb->dev=dev;
+ skb->mac.raw=skb->data;
+
+ if (dev->flags & IFF_RUNNING)
+ {
+ /* Count received bytes, add FCS and one flag */
+ sp->ibytes+= skb->len + 3;
+ sp->ipkts++;
+ }
+
+ if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
+ /* Too small packet, drop it. */
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n",
+ dev->name, skb->len);
+ kfree_skb(skb);
+ return;
+ }
+
+ /* Get PPP header. */
+ h = (struct ppp_header *)skb->data;
+ skb_pull(skb,sizeof(struct ppp_header));
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ switch (h->address) {
+ default: /* Invalid PPP packet. */
+ goto invalid;
+ case PPP_ALLSTATIONS:
+ if (h->control != PPP_UI)
+ goto invalid;
+ if (sp->pp_flags & PP_CISCO) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n",
+ dev->name,
+ h->address, h->control, ntohs (h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ if (sp->lcp.state == LCP_STATE_OPENED)
+ sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ,
+ ++sp->pp_seq, skb->len + 2,
+ &h->protocol);
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n",
+ dev->name,
+ h->address, h->control, ntohs (h->protocol));
+ goto drop;
+ case PPP_LCP:
+ sppp_lcp_input (sp, skb);
+ goto drop;
+ case PPP_IPCP:
+ if (sp->lcp.state == LCP_STATE_OPENED)
+ sppp_ipcp_input (sp, skb);
+ else
+ printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n");
+ goto drop;
+ case PPP_IP:
+ if (sp->ipcp.state == IPCP_STATE_OPENED) {
+ if(sp->pp_flags&PP_DEBUG)
+ printk(KERN_DEBUG "Yow an IP frame.\n");
+ skb->protocol=htons(ETH_P_IP);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+ }
+ break;
+#ifdef IPX
+ case PPP_IPX:
+ /* IPX IPXCP not implemented yet */
+ if (sp->lcp.state == LCP_STATE_OPENED) {
+ skb->protocol=htons(ETH_P_IPX);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+ }
+ break;
+#endif
+ }
+ break;
+ case CISCO_MULTICAST:
+ case CISCO_UNICAST:
+ /* Don't check the control field here (RFC 1547). */
+ if (! (sp->pp_flags & PP_CISCO)) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n",
+ dev->name,
+ h->address, h->control, ntohs (h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ goto invalid;
+ case CISCO_KEEPALIVE:
+ sppp_cisco_input (sp, skb);
+ goto drop;
+#ifdef CONFIG_INET
+ case ETH_P_IP:
+ skb->protocol=htons(ETH_P_IP);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+#endif
+#ifdef CONFIG_IPX
+ case ETH_P_IPX:
+ skb->protocol=htons(ETH_P_IPX);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+#endif
+ }
+ break;
+ }
+ goto drop;
+
+invalid:
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n",
+ dev->name, h->address, h->control, ntohs (h->protocol));
+drop:
+ kfree_skb(skb);
+done:
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sppp_flush_xmit();
+ return;
+}
+
+EXPORT_SYMBOL(sppp_input);
+
+/*
+ * Handle transmit packets.
+ */
+
+static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 type,
+ void *daddr, void *saddr, unsigned int len)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ struct ppp_header *h;
+ skb_push(skb,sizeof(struct ppp_header));
+ h=(struct ppp_header *)skb->data;
+ if(sp->pp_flags&PP_CISCO)
+ {
+ h->address = CISCO_UNICAST;
+ h->control = 0;
+ }
+ else
+ {
+ h->address = PPP_ALLSTATIONS;
+ h->control = PPP_UI;
+ }
+ if(sp->pp_flags & PP_CISCO)
+ {
+ h->protocol = htons(type);
+ }
+ else switch(type)
+ {
+ case ETH_P_IP:
+ h->protocol = htons(PPP_IP);
+ break;
+ case ETH_P_IPX:
+ h->protocol = htons(PPP_IPX);
+ break;
+ }
+ return sizeof(struct ppp_header);
+}
+
+static int sppp_rebuild_header(struct sk_buff *skb)
+{
+ return 0;
+}
+
+/*
+ * Send keepalive packets, every 10 seconds.
+ */
+
+static void sppp_keepalive (unsigned long dummy)
+{
+ struct sppp *sp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&spppq_lock, flags);
+
+ for (sp=spppq; sp; sp=sp->pp_next)
+ {
+ struct net_device *dev = sp->pp_if;
+
+ /* Keepalive mode disabled or channel down? */
+ if (! (sp->pp_flags & PP_KEEPALIVE) ||
+ ! (dev->flags & IFF_UP))
+ continue;
+
+ spin_lock(&sp->lock);
+
+ /* No keepalive in PPP mode if LCP not opened yet. */
+ if (! (sp->pp_flags & PP_CISCO) &&
+ sp->lcp.state != LCP_STATE_OPENED) {
+ spin_unlock(&sp->lock);
+ continue;
+ }
+
+ if (sp->pp_alivecnt == MAXALIVECNT) {
+ /* No keepalive packets got. Stop the interface. */
+ printk (KERN_WARNING "%s: protocol down\n", dev->name);
+ if_down (dev);
+ if (! (sp->pp_flags & PP_CISCO)) {
+ /* Shut down the PPP link. */
+ sp->lcp.magic = jiffies;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sppp_clear_timeout (sp);
+ /* Initiate negotiation. */
+ sppp_lcp_open (sp);
+ }
+ }
+ if (sp->pp_alivecnt <= MAXALIVECNT)
+ ++sp->pp_alivecnt;
+ if (sp->pp_flags & PP_CISCO)
+ sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
+ sp->pp_rseq);
+ else if (sp->lcp.state == LCP_STATE_OPENED) {
+ long nmagic = htonl (sp->lcp.magic);
+ sp->lcp.echoid = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ,
+ sp->lcp.echoid, 4, &nmagic);
+ }
+
+ spin_unlock(&sp->lock);
+ }
+ spin_unlock_irqrestore(&spppq_lock, flags);
+ sppp_flush_xmit();
+ sppp_keepalive_timer.expires=jiffies+10*HZ;
+ add_timer(&sppp_keepalive_timer);
+}
+
+/*
+ * Handle incoming PPP Link Control Protocol packets.
+ */
+
+static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb)
+{
+ struct lcp_header *h;
+ struct net_device *dev = sp->pp_if;
+ int len = skb->len;
+ u8 *p, opt[6];
+ u32 rmagic;
+
+ if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n",
+ dev->name, len);
+ return;
+ }
+ h = (struct lcp_header *)skb->data;
+ skb_pull(skb,sizeof(struct lcp_header *));
+
+ if (sp->pp_flags & PP_DEBUG)
+ {
+ char state = '?';
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED: state = 'C'; break;
+ case LCP_STATE_ACK_RCVD: state = 'R'; break;
+ case LCP_STATE_ACK_SENT: state = 'S'; break;
+ case LCP_STATE_OPENED: state = 'O'; break;
+ }
+ printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh",
+ dev->name, state, len,
+ sppp_lcp_type_name (h->type), h->ident, ntohs (h->len));
+ if (len > 4)
+ sppp_print_bytes ((u8*) (h+1), len-4);
+ printk (">\n");
+ }
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ switch (h->type) {
+ default:
+ /* Unknown packet type -- send Code-Reject packet. */
+ sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq,
+ skb->len, h);
+ break;
+ case LCP_CONF_REQ:
+ if (len < 4) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n",
+ dev->name, len);
+ break;
+ }
+ if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic))
+ goto badreq;
+ if (rmagic == sp->lcp.magic) {
+ /* Local and remote magics equal -- loopback? */
+ if (sp->pp_loopcnt >= MAXALIVECNT*5) {
+ printk (KERN_WARNING "%s: loopback\n",
+ dev->name);
+ sp->pp_loopcnt = 0;
+ if (dev->flags & IFF_UP) {
+ if_down (dev);
+ }
+ } else if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG "%s: conf req: magic glitch\n",
+ dev->name);
+ ++sp->pp_loopcnt;
+
+ /* MUST send Conf-Nack packet. */
+ rmagic = ~sp->lcp.magic;
+ opt[0] = LCP_OPT_MAGIC;
+ opt[1] = sizeof (opt);
+ opt[2] = rmagic >> 24;
+ opt[3] = rmagic >> 16;
+ opt[4] = rmagic >> 8;
+ opt[5] = rmagic;
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK,
+ h->ident, sizeof (opt), &opt);
+badreq:
+ switch (sp->lcp.state) {
+ case LCP_STATE_OPENED:
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ /* fall through... */
+ case LCP_STATE_ACK_SENT:
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ break;
+ }
+ /* Send Configure-Ack packet. */
+ sp->pp_loopcnt = 0;
+ if (sp->lcp.state != LCP_STATE_OPENED) {
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
+ h->ident, len-4, h+1);
+ }
+ /* Change the state. */
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED:
+ sp->lcp.state = LCP_STATE_ACK_SENT;
+ break;
+ case LCP_STATE_ACK_RCVD:
+ sp->lcp.state = LCP_STATE_OPENED;
+ sppp_ipcp_open (sp);
+ break;
+ case LCP_STATE_OPENED:
+ /* Remote magic changed -- close session. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ /* Send ACK after our REQ in attempt to break loop */
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
+ h->ident, len-4, h+1);
+ sp->lcp.state = LCP_STATE_ACK_SENT;
+ break;
+ }
+ break;
+ case LCP_CONF_ACK:
+ if (h->ident != sp->lcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ if ((sp->pp_link_state != SPPP_LINK_UP) &&
+ (dev->flags & IFF_UP)) {
+ /* Coming out of loopback mode. */
+ sp->pp_link_state=SPPP_LINK_UP;
+ printk (KERN_INFO "%s: protocol up\n", dev->name);
+ }
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED:
+ sp->lcp.state = LCP_STATE_ACK_RCVD;
+ sppp_set_timeout (sp, 5);
+ break;
+ case LCP_STATE_ACK_SENT:
+ sp->lcp.state = LCP_STATE_OPENED;
+ sppp_ipcp_open (sp);
+ break;
+ }
+ break;
+ case LCP_CONF_NAK:
+ if (h->ident != sp->lcp.confid)
+ break;
+ p = (u8*) (h+1);
+ if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) {
+ rmagic = (u32)p[2] << 24 |
+ (u32)p[3] << 16 | p[4] << 8 | p[5];
+ if (rmagic == ~sp->lcp.magic) {
+ int newmagic;
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG "%s: conf nak: magic glitch\n",
+ dev->name);
+ get_random_bytes(&newmagic, sizeof(newmagic));
+ sp->lcp.magic += newmagic;
+ } else
+ sp->lcp.magic = rmagic;
+ }
+ if (sp->lcp.state != LCP_STATE_ACK_SENT) {
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ /* The link will be renegotiated after timeout,
+ * to avoid endless req-nack loop. */
+ sppp_clear_timeout (sp);
+ sppp_set_timeout (sp, 2);
+ break;
+ case LCP_CONF_REJ:
+ if (h->ident != sp->lcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ if (sp->lcp.state != LCP_STATE_ACK_SENT) {
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ break;
+ case LCP_TERM_REQ:
+ sppp_clear_timeout (sp);
+ /* Send Terminate-Ack packet. */
+ sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL);
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ break;
+ case LCP_TERM_ACK:
+ case LCP_CODE_REJ:
+ case LCP_PROTO_REJ:
+ /* Ignore for now. */
+ break;
+ case LCP_DISC_REQ:
+ /* Discard the packet. */
+ break;
+ case LCP_ECHO_REQ:
+ if (sp->lcp.state != LCP_STATE_OPENED)
+ break;
+ if (len < 8) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n",
+ dev->name, len);
+ break;
+ }
+ if (ntohl (*(long*)(h+1)) == sp->lcp.magic) {
+ /* Line loopback mode detected. */
+ printk (KERN_WARNING "%s: loopback\n", dev->name);
+ if_down (dev);
+
+ /* Shut down the PPP link. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sppp_clear_timeout (sp);
+ /* Initiate negotiation. */
+ sppp_lcp_open (sp);
+ break;
+ }
+ *(long*)(h+1) = htonl (sp->lcp.magic);
+ sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1);
+ break;
+ case LCP_ECHO_REPLY:
+ if (h->ident != sp->lcp.echoid)
+ break;
+ if (len < 8) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n",
+ dev->name, len);
+ break;
+ }
+ if (ntohl (*(long*)(h+1)) != sp->lcp.magic)
+ sp->pp_alivecnt = 0;
+ break;
+ }
+}
+
+/*
+ * Handle incoming Cisco keepalive protocol packets.
+ */
+
+static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
+{
+ struct cisco_packet *h;
+ struct net_device *dev = sp->pp_if;
+
+ if (!pskb_may_pull(skb, sizeof(struct cisco_packet))
+ || (skb->len != CISCO_PACKET_LEN
+ && skb->len != CISCO_BIG_PACKET_LEN)) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n",
+ dev->name, skb->len);
+ return;
+ }
+ h = (struct cisco_packet *)skb->data;
+ skb_pull(skb, sizeof(struct cisco_packet*));
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n",
+ dev->name, skb->len,
+ ntohl (h->type), h->par1, h->par2, h->rel,
+ h->time0, h->time1);
+ switch (ntohl (h->type)) {
+ default:
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n",
+ dev->name, ntohl (h->type));
+ break;
+ case CISCO_ADDR_REPLY:
+ /* Reply on address request, ignore */
+ break;
+ case CISCO_KEEPALIVE_REQ:
+ sp->pp_alivecnt = 0;
+ sp->pp_rseq = ntohl (h->par1);
+ if (sp->pp_seq == sp->pp_rseq) {
+ /* Local and remote sequence numbers are equal.
+ * Probably, the line is in loopback mode. */
+ int newseq;
+ if (sp->pp_loopcnt >= MAXALIVECNT) {
+ printk (KERN_WARNING "%s: loopback\n",
+ dev->name);
+ sp->pp_loopcnt = 0;
+ if (dev->flags & IFF_UP) {
+ if_down (dev);
+ }
+ }
+ ++sp->pp_loopcnt;
+
+ /* Generate new local sequence number */
+ get_random_bytes(&newseq, sizeof(newseq));
+ sp->pp_seq ^= newseq;
+ break;
+ }
+ sp->pp_loopcnt = 0;
+ if (sp->pp_link_state==SPPP_LINK_DOWN &&
+ (dev->flags & IFF_UP)) {
+ sp->pp_link_state=SPPP_LINK_UP;
+ printk (KERN_INFO "%s: protocol up\n", dev->name);
+ }
+ break;
+ case CISCO_ADDR_REQ:
+ /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
+ {
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
+#ifdef CONFIG_INET
+ rcu_read_lock();
+ if ((in_dev = __in_dev_get(dev)) != NULL)
+ {
+ for (ifa=in_dev->ifa_list; ifa != NULL;
+ ifa=ifa->ifa_next) {
+ if (strcmp(dev->name, ifa->ifa_label) == 0)
+ {
+ addr = ifa->ifa_local;
+ mask = ifa->ifa_mask;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+#endif
+ /* I hope both addr and mask are in the net order */
+ sppp_cisco_send (sp, CISCO_ADDR_REPLY, addr, mask);
+ break;
+ }
+ }
+}
+
+
+/*
+ * Send PPP LCP packet.
+ */
+
+static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
+ u8 ident, u16 len, void *data)
+{
+ struct ppp_header *h;
+ struct lcp_header *lh;
+ struct sk_buff *skb;
+ struct net_device *dev = sp->pp_if;
+
+ skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len,
+ GFP_ATOMIC);
+ if (skb==NULL)
+ return;
+
+ skb_reserve(skb,dev->hard_header_len);
+
+ h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header));
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ h->protocol = htons (proto); /* Link Control Protocol */
+
+ lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header));
+ lh->type = type;
+ lh->ident = ident;
+ lh->len = htons (LCP_HEADER_LEN + len);
+
+ if (len)
+ memcpy(skb_put(skb,len),data, len);
+
+ if (sp->pp_flags & PP_DEBUG) {
+ printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh",
+ dev->name,
+ proto==PPP_LCP ? "lcp" : "ipcp",
+ proto==PPP_LCP ? sppp_lcp_type_name (lh->type) :
+ sppp_ipcp_type_name (lh->type), lh->ident,
+ ntohs (lh->len));
+ if (len)
+ sppp_print_bytes ((u8*) (lh+1), len);
+ printk (">\n");
+ }
+ sp->obytes += skb->len;
+ /* Control is high priority so it doesn't get queued behind data */
+ skb->priority=TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb_queue_tail(&tx_queue, skb);
+}
+
+/*
+ * Send Cisco keepalive packet.
+ */
+
+static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2)
+{
+ struct ppp_header *h;
+ struct cisco_packet *ch;
+ struct sk_buff *skb;
+ struct net_device *dev = sp->pp_if;
+ u32 t = jiffies * 1000/HZ;
+
+ skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN,
+ GFP_ATOMIC);
+
+ if(skb==NULL)
+ return;
+
+ skb_reserve(skb, dev->hard_header_len);
+ h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header));
+ h->address = CISCO_MULTICAST;
+ h->control = 0;
+ h->protocol = htons (CISCO_KEEPALIVE);
+
+ ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN);
+ ch->type = htonl (type);
+ ch->par1 = htonl (par1);
+ ch->par2 = htonl (par2);
+ ch->rel = -1;
+ ch->time0 = htons ((u16) (t >> 16));
+ ch->time1 = htons ((u16) t);
+
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
+ dev->name, ntohl (ch->type), ch->par1,
+ ch->par2, ch->rel, ch->time0, ch->time1);
+ sp->obytes += skb->len;
+ skb->priority=TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb_queue_tail(&tx_queue, skb);
+}
+
+/**
+ * sppp_close - close down a synchronous PPP or Cisco HDLC link
+ * @dev: The network device to drop the link of
+ *
+ * This drops the logical interface to the channel. It is not
+ * done politely as we assume we will also be dropping DTR. Any
+ * timeouts are killed.
+ */
+
+int sppp_close (struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ sp->pp_link_state = SPPP_LINK_DOWN;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sppp_clear_timeout (sp);
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_close);
+
+/**
+ * sppp_open - open a synchronous PPP or Cisco HDLC link
+ * @dev: Network device to activate
+ *
+ * Close down any existing synchronous session and commence
+ * from scratch. In the PPP case this means negotiating LCP/IPCP
+ * and friends, while for Cisco HDLC we simply need to start sending
+ * keepalives
+ */
+
+int sppp_open (struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ sppp_close(dev);
+
+ spin_lock_irqsave(&sp->lock, flags);
+ if (!(sp->pp_flags & PP_CISCO)) {
+ sppp_lcp_open (sp);
+ }
+ sp->pp_link_state = SPPP_LINK_DOWN;
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sppp_flush_xmit();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_open);
+
+/**
+ * sppp_reopen - notify of physical link loss
+ * @dev: Device that lost the link
+ *
+ * This function informs the synchronous protocol code that
+ * the underlying link died (for example a carrier drop on X.21)
+ *
+ * We increment the magic numbers to ensure that if the other end
+ * failed to notice we will correctly start a new session. It happens
+ * do to the nature of telco circuits is that you can lose carrier on
+ * one endonly.
+ *
+ * Having done this we go back to negotiating. This function may
+ * be called from an interrupt context.
+ */
+
+int sppp_reopen (struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ sppp_close(dev);
+
+ spin_lock_irqsave(&sp->lock, flags);
+ if (!(sp->pp_flags & PP_CISCO))
+ {
+ sp->lcp.magic = jiffies;
+ ++sp->pp_seq;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Give it a moment for the line to settle then go */
+ sppp_set_timeout (sp, 1);
+ }
+ sp->pp_link_state=SPPP_LINK_DOWN;
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_reopen);
+
+/**
+ * sppp_change_mtu - Change the link MTU
+ * @dev: Device to change MTU on
+ * @new_mtu: New MTU
+ *
+ * Change the MTU on the link. This can only be called with
+ * the link down. It returns an error if the link is up or
+ * the mtu is out of range.
+ */
+
+int sppp_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
+ return -EINVAL;
+ dev->mtu=new_mtu;
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_change_mtu);
+
+/**
+ * sppp_do_ioctl - Ioctl handler for ppp/hdlc
+ * @dev: Device subject to ioctl
+ * @ifr: Interface request block from the user
+ * @cmd: Command that is being issued
+ *
+ * This function handles the ioctls that may be issued by the user
+ * to control the settings of a PPP/HDLC link. It does both busy
+ * and security checks. This function is intended to be wrapped by
+ * callers who wish to add additional ioctl calls of their own.
+ */
+
+int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+
+ if(dev->flags&IFF_UP)
+ return -EBUSY;
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch(cmd)
+ {
+ case SPPPIOCCISCO:
+ sp->pp_flags|=PP_CISCO;
+ dev->type = ARPHRD_HDLC;
+ break;
+ case SPPPIOCPPP:
+ sp->pp_flags&=~PP_CISCO;
+ dev->type = ARPHRD_PPP;
+ break;
+ case SPPPIOCDEBUG:
+ sp->pp_flags&=~PP_DEBUG;
+ if(ifr->ifr_flags)
+ sp->pp_flags|=PP_DEBUG;
+ break;
+ case SPPPIOCGFLAGS:
+ if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags)))
+ return -EFAULT;
+ break;
+ case SPPPIOCSFLAGS:
+ if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_do_ioctl);
+
+/**
+ * sppp_attach - attach synchronous PPP/HDLC to a device
+ * @pd: PPP device to initialise
+ *
+ * This initialises the PPP/HDLC support on an interface. At the
+ * time of calling the dev element must point to the network device
+ * that this interface is attached to. The interface should not yet
+ * be registered.
+ */
+
+void sppp_attach(struct ppp_device *pd)
+{
+ struct net_device *dev = pd->dev;
+ struct sppp *sp = &pd->sppp;
+ unsigned long flags;
+
+ /* Make sure embedding is safe for sppp_of */
+ BUG_ON(sppp_of(dev) != sp);
+
+ spin_lock_irqsave(&spppq_lock, flags);
+ /* Initialize keepalive handler. */
+ if (! spppq)
+ {
+ init_timer(&sppp_keepalive_timer);
+ sppp_keepalive_timer.expires=jiffies+10*HZ;
+ sppp_keepalive_timer.function=sppp_keepalive;
+ add_timer(&sppp_keepalive_timer);
+ }
+ /* Insert new entry into the keepalive list. */
+ sp->pp_next = spppq;
+ spppq = sp;
+ spin_unlock_irqrestore(&spppq_lock, flags);
+
+ sp->pp_loopcnt = 0;
+ sp->pp_alivecnt = 0;
+ sp->pp_seq = 0;
+ sp->pp_rseq = 0;
+ sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/
+ sp->lcp.magic = 0;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sp->pp_if = dev;
+ spin_lock_init(&sp->lock);
+
+ /*
+ * Device specific setup. All but interrupt handler and
+ * hard_start_xmit.
+ */
+
+ dev->hard_header = sppp_hard_header;
+ dev->rebuild_header = sppp_rebuild_header;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_HDLC;
+ dev->addr_len = 0;
+ dev->hard_header_len = sizeof(struct ppp_header);
+ dev->mtu = PPP_MTU;
+ /*
+ * These 4 are callers but MUST also call sppp_ functions
+ */
+ dev->do_ioctl = sppp_do_ioctl;
+#if 0
+ dev->get_stats = NULL; /* Let the driver override these */
+ dev->open = sppp_open;
+ dev->stop = sppp_close;
+#endif
+ dev->change_mtu = sppp_change_mtu;
+ dev->hard_header_cache = NULL;
+ dev->header_cache_update = NULL;
+ dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
+}
+
+EXPORT_SYMBOL(sppp_attach);
+
+/**
+ * sppp_detach - release PPP resources from a device
+ * @dev: Network device to release
+ *
+ * Stop and free up any PPP/HDLC resources used by this
+ * interface. This must be called before the device is
+ * freed.
+ */
+
+void sppp_detach (struct net_device *dev)
+{
+ struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&spppq_lock, flags);
+ /* Remove the entry from the keepalive list. */
+ for (q = &spppq; (p = *q); q = &p->pp_next)
+ if (p == sp) {
+ *q = p->pp_next;
+ break;
+ }
+
+ /* Stop keepalive handler. */
+ if (! spppq)
+ del_timer(&sppp_keepalive_timer);
+ sppp_clear_timeout (sp);
+ spin_unlock_irqrestore(&spppq_lock, flags);
+}
+
+EXPORT_SYMBOL(sppp_detach);
+
+/*
+ * Analyze the LCP Configure-Request options list
+ * for the presence of unknown options.
+ * If the request contains unknown options, build and
+ * send Configure-reject packet, containing only unknown options.
+ */
+static int
+sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
+ int len, u32 *magic)
+{
+ u8 *buf, *r, *p;
+ int rlen;
+
+ len -= 4;
+ buf = r = kmalloc (len, GFP_ATOMIC);
+ if (! buf)
+ return (0);
+
+ p = (void*) (h+1);
+ for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- extract. */
+ if (len >= 6 && p[1] == 6) {
+ *magic = (u32)p[2] << 24 |
+ (u32)p[3] << 16 | p[4] << 8 | p[5];
+ continue;
+ }
+ break;
+ case LCP_OPT_ASYNC_MAP:
+ /* Async control character map -- check to be zero. */
+ if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] &&
+ ! p[4] && ! p[5])
+ continue;
+ break;
+ case LCP_OPT_MRU:
+ /* Maximum receive unit -- always OK. */
+ continue;
+ default:
+ /* Others not supported. */
+ break;
+ }
+ /* Add the option to rejected list. */
+ memcpy(r, p, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen)
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf);
+ kfree(buf);
+ return (rlen == 0);
+}
+
+static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb)
+{
+ struct lcp_header *h;
+ struct net_device *dev = sp->pp_if;
+ int len = skb->len;
+
+ if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n",
+ dev->name, len);
+ return;
+ }
+ h = (struct lcp_header *)skb->data;
+ skb_pull(skb,sizeof(struct lcp_header));
+ if (sp->pp_flags & PP_DEBUG) {
+ printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh",
+ dev->name, len,
+ sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len));
+ if (len > 4)
+ sppp_print_bytes ((u8*) (h+1), len-4);
+ printk (">\n");
+ }
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ switch (h->type) {
+ default:
+ /* Unknown packet type -- send Code-Reject packet. */
+ sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h);
+ break;
+ case IPCP_CONF_REQ:
+ if (len < 4) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n",
+ dev->name, len);
+ return;
+ }
+ if (len > 4) {
+ sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident,
+ len-4, h+1);
+
+ switch (sp->ipcp.state) {
+ case IPCP_STATE_OPENED:
+ /* Initiate renegotiation. */
+ sppp_ipcp_open (sp);
+ /* fall through... */
+ case IPCP_STATE_ACK_SENT:
+ /* Go to closed state. */
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ } else {
+ /* Send Configure-Ack packet. */
+ sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident,
+ 0, NULL);
+ /* Change the state. */
+ if (sp->ipcp.state == IPCP_STATE_ACK_RCVD)
+ sp->ipcp.state = IPCP_STATE_OPENED;
+ else
+ sp->ipcp.state = IPCP_STATE_ACK_SENT;
+ }
+ break;
+ case IPCP_CONF_ACK:
+ if (h->ident != sp->ipcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ switch (sp->ipcp.state) {
+ case IPCP_STATE_CLOSED:
+ sp->ipcp.state = IPCP_STATE_ACK_RCVD;
+ sppp_set_timeout (sp, 5);
+ break;
+ case IPCP_STATE_ACK_SENT:
+ sp->ipcp.state = IPCP_STATE_OPENED;
+ break;
+ }
+ break;
+ case IPCP_CONF_NAK:
+ case IPCP_CONF_REJ:
+ if (h->ident != sp->ipcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ /* Initiate renegotiation. */
+ sppp_ipcp_open (sp);
+ if (sp->ipcp.state != IPCP_STATE_ACK_SENT)
+ /* Go to closed state. */
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ break;
+ case IPCP_TERM_REQ:
+ /* Send Terminate-Ack packet. */
+ sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL);
+ /* Go to closed state. */
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Initiate renegotiation. */
+ sppp_ipcp_open (sp);
+ break;
+ case IPCP_TERM_ACK:
+ /* Ignore for now. */
+ case IPCP_CODE_REJ:
+ /* Ignore for now. */
+ break;
+ }
+}
+
+static void sppp_lcp_open (struct sppp *sp)
+{
+ char opt[6];
+
+ if (! sp->lcp.magic)
+ sp->lcp.magic = jiffies;
+ opt[0] = LCP_OPT_MAGIC;
+ opt[1] = sizeof (opt);
+ opt[2] = sp->lcp.magic >> 24;
+ opt[3] = sp->lcp.magic >> 16;
+ opt[4] = sp->lcp.magic >> 8;
+ opt[5] = sp->lcp.magic;
+ sp->lcp.confid = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid,
+ sizeof (opt), &opt);
+ sppp_set_timeout (sp, 2);
+}
+
+static void sppp_ipcp_open (struct sppp *sp)
+{
+ sp->ipcp.confid = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL);
+ sppp_set_timeout (sp, 2);
+}
+
+/*
+ * Process PPP control protocol timeouts.
+ */
+
+static void sppp_cp_timeout (unsigned long arg)
+{
+ struct sppp *sp = (struct sppp*) arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ sp->pp_flags &= ~PP_TIMO;
+ if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) {
+ spin_unlock_irqrestore(&sp->lock, flags);
+ return;
+ }
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED:
+ /* No ACK for Configure-Request, retry. */
+ sppp_lcp_open (sp);
+ break;
+ case LCP_STATE_ACK_RCVD:
+ /* ACK got, but no Configure-Request for peer, retry. */
+ sppp_lcp_open (sp);
+ sp->lcp.state = LCP_STATE_CLOSED;
+ break;
+ case LCP_STATE_ACK_SENT:
+ /* ACK sent but no ACK for Configure-Request, retry. */
+ sppp_lcp_open (sp);
+ break;
+ case LCP_STATE_OPENED:
+ /* LCP is already OK, try IPCP. */
+ switch (sp->ipcp.state) {
+ case IPCP_STATE_CLOSED:
+ /* No ACK for Configure-Request, retry. */
+ sppp_ipcp_open (sp);
+ break;
+ case IPCP_STATE_ACK_RCVD:
+ /* ACK got, but no Configure-Request for peer, retry. */
+ sppp_ipcp_open (sp);
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ break;
+ case IPCP_STATE_ACK_SENT:
+ /* ACK sent but no ACK for Configure-Request, retry. */
+ sppp_ipcp_open (sp);
+ break;
+ case IPCP_STATE_OPENED:
+ /* IPCP is OK. */
+ break;
+ }
+ break;
+ }
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sppp_flush_xmit();
+}
+
+static char *sppp_lcp_type_name (u8 type)
+{
+ static char buf [8];
+ switch (type) {
+ case LCP_CONF_REQ: return ("conf-req");
+ case LCP_CONF_ACK: return ("conf-ack");
+ case LCP_CONF_NAK: return ("conf-nack");
+ case LCP_CONF_REJ: return ("conf-rej");
+ case LCP_TERM_REQ: return ("term-req");
+ case LCP_TERM_ACK: return ("term-ack");
+ case LCP_CODE_REJ: return ("code-rej");
+ case LCP_PROTO_REJ: return ("proto-rej");
+ case LCP_ECHO_REQ: return ("echo-req");
+ case LCP_ECHO_REPLY: return ("echo-reply");
+ case LCP_DISC_REQ: return ("discard-req");
+ }
+ sprintf (buf, "%xh", type);
+ return (buf);
+}
+
+static char *sppp_ipcp_type_name (u8 type)
+{
+ static char buf [8];
+ switch (type) {
+ case IPCP_CONF_REQ: return ("conf-req");
+ case IPCP_CONF_ACK: return ("conf-ack");
+ case IPCP_CONF_NAK: return ("conf-nack");
+ case IPCP_CONF_REJ: return ("conf-rej");
+ case IPCP_TERM_REQ: return ("term-req");
+ case IPCP_TERM_ACK: return ("term-ack");
+ case IPCP_CODE_REJ: return ("code-rej");
+ }
+ sprintf (buf, "%xh", type);
+ return (buf);
+}
+
+static void sppp_print_bytes (u_char *p, u16 len)
+{
+ printk (" %x", *p++);
+ while (--len > 0)
+ printk ("-%x", *p++);
+}
+
+/**
+ * sppp_rcv - receive and process a WAN PPP frame
+ * @skb: The buffer to process
+ * @dev: The device it arrived on
+ * @p: Unused
+ *
+ * Protocol glue. This drives the deferred processing mode the poorer
+ * cards use. This can be called directly by cards that do not have
+ * timing constraints but is normally called from the network layer
+ * after interrupt servicing to process frames queued via netif_rx.
+ */
+
+static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p)
+{
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ return NET_RX_DROP;
+ sppp_input(dev,skb);
+ return 0;
+}
+
+struct packet_type sppp_packet_type = {
+ .type = __constant_htons(ETH_P_WAN_PPP),
+ .func = sppp_rcv,
+};
+
+static char banner[] __initdata =
+ KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n"
+ KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & "
+ "Jan \"Yenya\" Kasprzak.\n";
+
+static int __init sync_ppp_init(void)
+{
+ if(debug)
+ debug=PP_DEBUG;
+ printk(banner);
+ skb_queue_head_init(&tx_queue);
+ dev_add_pack(&sppp_packet_type);
+ return 0;
+}
+
+
+static void __exit sync_ppp_cleanup(void)
+{
+ dev_remove_pack(&sppp_packet_type);
+}
+
+module_init(sync_ppp_init);
+module_exit(sync_ppp_cleanup);
+module_param(debug, int, 0);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/wan/wanpipe_multppp.c b/drivers/net/wan/wanpipe_multppp.c
new file mode 100644
index 000000000000..6aa6987d96cb
--- /dev/null
+++ b/drivers/net/wan/wanpipe_multppp.c
@@ -0,0 +1,2357 @@
+/*****************************************************************************
+* wanpipe_multppp.c Multi-Port PPP driver module.
+*
+* Authors: Nenad Corbic <ncorbic@sangoma.com>
+*
+* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Dec 15 2000 Updated for 2.4.X kernel
+* Nov 15 2000 Fixed the SyncPPP support for kernels 2.2.16 and higher.
+* The pppstruct has changed.
+* Jul 13 2000 Using the kernel Syncppp module on top of RAW Wanpipe CHDLC
+* module.
+*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/slab.h> /* kmalloc(), kfree() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/if_arp.h> /* ARPHRD_* defines */
+
+#include <linux/in.h> /* sockaddr_in */
+#include <linux/inet.h>
+#include <linux/if.h>
+#include <asm/byteorder.h> /* htons(), etc. */
+#include <linux/sdlapci.h>
+#include <asm/io.h>
+
+#include <linux/sdla_chdlc.h> /* CHDLC firmware API definitions */
+#include <linux/sdla_asy.h> /* CHDLC (async) API definitions */
+
+#include <linux/if_wanpipe_common.h> /* Socket Driver common area */
+#include <linux/if_wanpipe.h>
+
+
+#include <linux/inetdevice.h>
+#include <asm/uaccess.h>
+
+#include <net/syncppp.h>
+
+
+/****** Defines & Macros ****************************************************/
+
+#ifdef _DEBUG_
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+/* reasons for enabling the timer interrupt on the adapter */
+#define TMR_INT_ENABLED_UDP 0x01
+#define TMR_INT_ENABLED_UPDATE 0x02
+#define TMR_INT_ENABLED_CONFIG 0x04
+
+#define CHDLC_DFLT_DATA_LEN 1500 /* default MTU */
+#define CHDLC_HDR_LEN 1
+
+#define IFF_POINTTOPOINT 0x10
+
+#define CHDLC_API 0x01
+
+#define PORT(x) (x == 0 ? "PRIMARY" : "SECONDARY" )
+#define MAX_BH_BUFF 10
+
+#define CRC_LENGTH 2
+#define PPP_HEADER_LEN 4
+
+/******Data Structures*****************************************************/
+
+/* This structure is placed in the private data area of the device structure.
+ * The card structure used to occupy the private area but now the following
+ * structure will incorporate the card structure along with CHDLC specific data
+ */
+
+typedef struct chdlc_private_area
+{
+ void *if_ptr; /* General Pointer used by SPPP */
+ wanpipe_common_t common;
+ sdla_t *card;
+ int TracingEnabled; /* For enabling Tracing */
+ unsigned long curr_trace_addr; /* Used for Tracing */
+ unsigned long start_trace_addr;
+ unsigned long end_trace_addr;
+ unsigned long base_addr_trace_buffer;
+ unsigned long end_addr_trace_buffer;
+ unsigned short number_trace_elements;
+ unsigned available_buffer_space;
+ unsigned long router_start_time;
+ unsigned char route_status;
+ unsigned char route_removed;
+ unsigned long tick_counter; /* For 5s timeout counter */
+ unsigned long router_up_time;
+ u32 IP_address; /* IP addressing */
+ u32 IP_netmask;
+ unsigned char mc; /* Mulitcast support on/off */
+ unsigned short udp_pkt_lgth; /* udp packet processing */
+ char udp_pkt_src;
+ char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
+ unsigned short timer_int_enabled;
+ char update_comms_stats; /* updating comms stats */
+
+ //FIXME: add driver stats as per frame relay!
+
+} chdlc_private_area_t;
+
+/* Route Status options */
+#define NO_ROUTE 0x00
+#define ADD_ROUTE 0x01
+#define ROUTE_ADDED 0x02
+#define REMOVE_ROUTE 0x03
+
+
+/* variable for keeping track of enabling/disabling FT1 monitor status */
+static int rCount = 0;
+
+/* variable for tracking how many interfaces to open for WANPIPE on the
+ two ports */
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+/****** Function Prototypes *************************************************/
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int update(struct wan_device* wandev);
+static int new_if(struct wan_device* wandev, struct net_device* dev,
+ wanif_conf_t* conf);
+static int del_if(struct wan_device* wandev, struct net_device* dev);
+
+/* Network device interface */
+static int if_init(struct net_device* dev);
+static int if_open(struct net_device* dev);
+static int if_close(struct net_device* dev);
+static int if_send(struct sk_buff* skb, struct net_device* dev);
+static struct net_device_stats* if_stats(struct net_device* dev);
+
+static void if_tx_timeout(struct net_device *dev);
+
+/* CHDLC Firmware interface functions */
+static int chdlc_configure (sdla_t* card, void* data);
+static int chdlc_comm_enable (sdla_t* card);
+static int chdlc_comm_disable (sdla_t* card);
+static int chdlc_read_version (sdla_t* card, char* str);
+static int chdlc_set_intr_mode (sdla_t* card, unsigned mode);
+static int chdlc_send (sdla_t* card, void* data, unsigned len);
+static int chdlc_read_comm_err_stats (sdla_t* card);
+static int chdlc_read_op_stats (sdla_t* card);
+static int config_chdlc (sdla_t *card);
+
+
+/* Miscellaneous CHDLC Functions */
+static int set_chdlc_config (sdla_t* card);
+static void init_chdlc_tx_rx_buff(sdla_t* card, struct net_device *dev);
+static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb);
+static int process_chdlc_exception(sdla_t *card);
+static int process_global_exception(sdla_t *card);
+static int update_comms_stats(sdla_t* card,
+ chdlc_private_area_t* chdlc_priv_area);
+static void port_set_state (sdla_t *card, int);
+
+/* Interrupt handlers */
+static void wsppp_isr (sdla_t* card);
+static void rx_intr (sdla_t* card);
+static void timer_intr(sdla_t *);
+
+/* Miscellaneous functions */
+static int reply_udp( unsigned char *data, unsigned int mbox_len );
+static int intr_test( sdla_t* card);
+static int udp_pkt_type( struct sk_buff *skb , sdla_t* card);
+static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area);
+static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area);
+static unsigned short calc_checksum (char *, int);
+static void s508_lock (sdla_t *card, unsigned long *smp_flags);
+static void s508_unlock (sdla_t *card, unsigned long *smp_flags);
+static void send_ppp_term_request(struct net_device *dev);
+
+
+static int Intr_test_counter;
+/****** Public Functions ****************************************************/
+
+/*============================================================================
+ * Cisco HDLC protocol initialization routine.
+ *
+ * This routine is called by the main WANPIPE module during setup. At this
+ * point adapter is completely initialized and firmware is running.
+ * o read firmware version (to make sure it's alive)
+ * o configure adapter
+ * o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return: 0 o.k.
+ * < 0 failure.
+ */
+int wsppp_init (sdla_t* card, wandev_conf_t* conf)
+{
+ unsigned char port_num;
+ int err;
+ unsigned long max_permitted_baud = 0;
+ SHARED_MEMORY_INFO_STRUCT *flags;
+
+ union
+ {
+ char str[80];
+ } u;
+ volatile CHDLC_MAILBOX_STRUCT* mb;
+ CHDLC_MAILBOX_STRUCT* mb1;
+ unsigned long timeout;
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_MPPP) {
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
+ return -EINVAL;
+ }
+
+ /* Find out which Port to use */
+ if ((conf->comm_port == WANOPT_PRI) || (conf->comm_port == WANOPT_SEC)){
+ if (card->next){
+
+ if (conf->comm_port != card->next->u.c.comm_port){
+ card->u.c.comm_port = conf->comm_port;
+ }else{
+ printk(KERN_ERR "%s: ERROR - %s port used!\n",
+ card->wandev.name, PORT(conf->comm_port));
+ return -EINVAL;
+ }
+ }else{
+ card->u.c.comm_port = conf->comm_port;
+ }
+ }else{
+ printk(KERN_ERR "%s: ERROR - Invalid Port Selected!\n",
+ card->wandev.name);
+ return -EINVAL;
+ }
+
+
+ /* Initialize protocol-specific fields */
+ if(card->hw.type != SDLA_S514){
+
+ if (card->u.c.comm_port == WANOPT_PRI){
+ card->mbox = (void *) card->hw.dpmbase;
+ }else{
+ card->mbox = (void *) card->hw.dpmbase +
+ SEC_BASE_ADDR_MB_STRUCT - PRI_BASE_ADDR_MB_STRUCT;
+ }
+ }else{
+ /* for a S514 adapter, set a pointer to the actual mailbox in the */
+ /* allocated virtual memory area */
+ if (card->u.c.comm_port == WANOPT_PRI){
+ card->mbox = (void *) card->hw.dpmbase + PRI_BASE_ADDR_MB_STRUCT;
+ }else{
+ card->mbox = (void *) card->hw.dpmbase + SEC_BASE_ADDR_MB_STRUCT;
+ }
+ }
+
+ mb = mb1 = card->mbox;
+
+ if (!card->configured){
+
+ /* The board will place an 'I' in the return code to indicate that it is
+ ready to accept commands. We expect this to be completed in less
+ than 1 second. */
+
+ timeout = jiffies;
+ while (mb->return_code != 'I') /* Wait 1s for board to initialize */
+ if ((jiffies - timeout) > 1*HZ) break;
+
+ if (mb->return_code != 'I') {
+ printk(KERN_INFO
+ "%s: Initialization not completed by adapter\n",
+ card->devname);
+ printk(KERN_INFO "Please contact Sangoma representative.\n");
+ return -EIO;
+ }
+ }
+
+ /* Read firmware version. Note that when adapter initializes, it
+ * clears the mailbox, so it may appear that the first command was
+ * executed successfully when in fact it was merely erased. To work
+ * around this, we execute the first command twice.
+ */
+
+ if (chdlc_read_version(card, u.str))
+ return -EIO;
+
+ printk(KERN_INFO "%s: Running Raw CHDLC firmware v%s\n"
+ "%s: for Multi-Port PPP protocol.\n",
+ card->devname,u.str,card->devname);
+
+ card->isr = &wsppp_isr;
+ card->poll = NULL;
+ card->exec = NULL;
+ card->wandev.update = &update;
+ card->wandev.new_if = &new_if;
+ card->wandev.del_if = &del_if;
+ card->wandev.udp_port = conf->udp_port;
+
+ card->wandev.new_if_cnt = 0;
+
+ /* reset the number of times the 'update()' proc has been called */
+ card->u.c.update_call_count = 0;
+
+ card->wandev.ttl = conf->ttl;
+ card->wandev.interface = conf->interface;
+
+ if ((card->u.c.comm_port == WANOPT_SEC && conf->interface == WANOPT_V35)&&
+ card->hw.type != SDLA_S514){
+ printk(KERN_INFO "%s: ERROR - V35 Interface not supported on S508 %s port \n",
+ card->devname, PORT(card->u.c.comm_port));
+ return -EIO;
+ }
+
+
+ card->wandev.clocking = conf->clocking;
+
+ port_num = card->u.c.comm_port;
+
+ /* Setup Port Bps */
+
+ if(card->wandev.clocking) {
+ if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
+ /* For Primary Port 0 */
+ max_permitted_baud =
+ (card->hw.type == SDLA_S514) ?
+ PRI_MAX_BAUD_RATE_S514 :
+ PRI_MAX_BAUD_RATE_S508;
+ }
+ else if(port_num == WANOPT_SEC) {
+ /* For Secondary Port 1 */
+ max_permitted_baud =
+ (card->hw.type == SDLA_S514) ?
+ SEC_MAX_BAUD_RATE_S514 :
+ SEC_MAX_BAUD_RATE_S508;
+ }
+
+ if(conf->bps > max_permitted_baud) {
+ conf->bps = max_permitted_baud;
+ printk(KERN_INFO "%s: Baud too high!\n",
+ card->wandev.name);
+ printk(KERN_INFO "%s: Baud rate set to %lu bps\n",
+ card->wandev.name, max_permitted_baud);
+ }
+
+ card->wandev.bps = conf->bps;
+ }else{
+ card->wandev.bps = 0;
+ }
+
+ /* Setup the Port MTU */
+ if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
+
+ /* For Primary Port 0 */
+ card->wandev.mtu =
+ (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
+ min_t(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
+ CHDLC_DFLT_DATA_LEN;
+ } else if(port_num == WANOPT_SEC) {
+ /* For Secondary Port 1 */
+ card->wandev.mtu =
+ (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
+ min_t(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
+ CHDLC_DFLT_DATA_LEN;
+ }
+
+ /* Add on a PPP Header */
+ card->wandev.mtu += PPP_HEADER_LEN;
+
+ /* Set up the interrupt status area */
+ /* Read the CHDLC Configuration and obtain:
+ * Ptr to shared memory infor struct
+ * Use this pointer to calculate the value of card->u.c.flags !
+ */
+ mb1->buffer_length = 0;
+ mb1->command = READ_CHDLC_CONFIGURATION;
+ err = sdla_exec(mb1) ? mb1->return_code : CMD_TIMEOUT;
+ if(err != COMMAND_OK) {
+ clear_bit(1, (void*)&card->wandev.critical);
+
+ if(card->hw.type != SDLA_S514)
+ enable_irq(card->hw.irq);
+
+ chdlc_error(card, err, mb1);
+ return -EIO;
+ }
+
+ if(card->hw.type == SDLA_S514){
+ card->u.c.flags = (void *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
+ ptr_shared_mem_info_struct));
+ }else{
+ card->u.c.flags = (void *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
+ ptr_shared_mem_info_struct % SDLA_WINDOWSIZE));
+ }
+
+ flags = card->u.c.flags;
+
+ /* This is for the ports link state */
+ card->wandev.state = WAN_DUALPORT;
+ card->u.c.state = WAN_DISCONNECTED;
+
+
+ if (!card->wandev.piggyback){
+ err = intr_test(card);
+
+ if(err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
+ printk(KERN_ERR "%s: Interrupt test failed (%i)\n",
+ card->devname, Intr_test_counter);
+ printk(KERN_ERR "%s: Please choose another interrupt\n",
+ card->devname);
+ return -EIO;
+ }
+
+ printk(KERN_INFO "%s: Interrupt test passed (%i)\n",
+ card->devname, Intr_test_counter);
+ }
+
+
+ if (chdlc_set_intr_mode(card, APP_INT_ON_TIMER)){
+ printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
+ card->devname);
+ return -EIO;
+ }
+
+ /* Mask the Timer interrupt */
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~APP_INT_ON_TIMER;
+
+ printk(KERN_INFO "\n");
+
+ return 0;
+}
+
+/******* WAN Device Driver Entry Points *************************************/
+
+/*============================================================================
+ * Update device status & statistics
+ * This procedure is called when updating the PROC file system and returns
+ * various communications statistics. These statistics are accumulated from 3
+ * different locations:
+ * 1) The 'if_stats' recorded for the device.
+ * 2) Communication error statistics on the adapter.
+ * 3) CHDLC operational statistics on the adapter.
+ * The board level statistics are read during a timer interrupt. Note that we
+ * read the error and operational statistics during consecitive timer ticks so
+ * as to minimize the time that we are inside the interrupt handler.
+ *
+ */
+static int update(struct wan_device* wandev)
+{
+ sdla_t* card = wandev->private;
+ struct net_device* dev;
+ volatile chdlc_private_area_t* chdlc_priv_area;
+ SHARED_MEMORY_INFO_STRUCT *flags;
+ unsigned long timeout;
+
+ /* sanity checks */
+ if((wandev == NULL) || (wandev->private == NULL))
+ return -EFAULT;
+
+ if(wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ /* more sanity checks */
+ if(!card->u.c.flags)
+ return -ENODEV;
+
+ if((dev=card->wandev.dev) == NULL)
+ return -ENODEV;
+
+ if((chdlc_priv_area=dev->priv) == NULL)
+ return -ENODEV;
+
+ flags = card->u.c.flags;
+
+ if(chdlc_priv_area->update_comms_stats){
+ return -EAGAIN;
+ }
+
+ /* we will need 2 timer interrupts to complete the */
+ /* reading of the statistics */
+ chdlc_priv_area->update_comms_stats = 2;
+ flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
+ chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UPDATE;
+
+ /* wait a maximum of 1 second for the statistics to be updated */
+ timeout = jiffies;
+ for(;;) {
+ if(chdlc_priv_area->update_comms_stats == 0)
+ break;
+ if ((jiffies - timeout) > (1 * HZ)){
+ chdlc_priv_area->update_comms_stats = 0;
+ chdlc_priv_area->timer_int_enabled &=
+ ~TMR_INT_ENABLED_UPDATE;
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+
+/*============================================================================
+ * Create new logical channel.
+ * This routine is called by the router when ROUTER_IFNEW IOCTL is being
+ * handled.
+ * o parse media- and hardware-specific configuration
+ * o make sure that a new channel can be created
+ * o allocate resources, if necessary
+ * o prepare network device structure for registaration.
+ *
+ * Return: 0 o.k.
+ * < 0 failure (channel will not be created)
+ */
+static int new_if(struct wan_device* wandev, struct net_device* pdev,
+ wanif_conf_t* conf)
+{
+
+ struct ppp_device *pppdev = (struct ppp_device *)pdev;
+ struct net_device *dev = NULL;
+ struct sppp *sp;
+ sdla_t* card = wandev->private;
+ chdlc_private_area_t* chdlc_priv_area;
+
+ if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
+ printk(KERN_INFO "%s: invalid interface name!\n",
+ card->devname);
+ return -EINVAL;
+ }
+
+ /* allocate and initialize private data */
+ chdlc_priv_area = kmalloc(sizeof(chdlc_private_area_t), GFP_KERNEL);
+
+ if(chdlc_priv_area == NULL)
+ return -ENOMEM;
+
+ memset(chdlc_priv_area, 0, sizeof(chdlc_private_area_t));
+
+ chdlc_priv_area->card = card;
+
+ /* initialize data */
+ strcpy(card->u.c.if_name, conf->name);
+
+ if(card->wandev.new_if_cnt > 0) {
+ kfree(chdlc_priv_area);
+ return -EEXIST;
+ }
+
+ card->wandev.new_if_cnt++;
+
+ chdlc_priv_area->TracingEnabled = 0;
+
+ //We don't need this any more
+ chdlc_priv_area->route_status = NO_ROUTE;
+ chdlc_priv_area->route_removed = 0;
+
+ printk(KERN_INFO "%s: Firmware running in HDLC STREAMING Mode\n",
+ wandev->name);
+
+ /* Setup wanpipe as a router (WANPIPE) or as an API */
+ if( strcmp(conf->usedby, "WANPIPE") == 0) {
+ printk(KERN_INFO "%s: Driver running in WANPIPE mode!\n",
+ wandev->name);
+ card->u.c.usedby = WANPIPE;
+ } else {
+ printk(KERN_INFO
+ "%s: API Mode is not supported for SyncPPP!\n",
+ wandev->name);
+ kfree(chdlc_priv_area);
+ return -EINVAL;
+ }
+
+ /* Get Multicast Information */
+ chdlc_priv_area->mc = conf->mc;
+
+
+ chdlc_priv_area->if_ptr = pppdev;
+
+ /* prepare network device data space for registration */
+
+ strcpy(dev->name,card->u.c.if_name);
+
+ /* Attach PPP protocol layer to pppdev
+ * The sppp_attach() will initilize the dev structure
+ * and setup ppp layer protocols.
+ * All we have to do is to bind in:
+ * if_open(), if_close(), if_send() and get_stats() functions.
+ */
+ sppp_attach(pppdev);
+ dev = pppdev->dev;
+ sp = &pppdev->sppp;
+
+ /* Enable PPP Debugging */
+ // FIXME Fix this up somehow
+ //sp->pp_flags |= PP_DEBUG;
+ sp->pp_flags &= ~PP_CISCO;
+
+ dev->init = &if_init;
+ dev->priv = chdlc_priv_area;
+
+ return 0;
+}
+
+
+
+
+/*============================================================================
+ * Delete logical channel.
+ */
+static int del_if(struct wan_device* wandev, struct net_device* dev)
+{
+ chdlc_private_area_t *chdlc_priv_area = dev->priv;
+ sdla_t *card = chdlc_priv_area->card;
+ unsigned long smp_lock;
+
+ /* Detach the PPP layer */
+ printk(KERN_INFO "%s: Detaching SyncPPP Module from %s\n",
+ wandev->name,dev->name);
+
+ lock_adapter_irq(&wandev->lock,&smp_lock);
+
+ sppp_detach(dev);
+ chdlc_priv_area->if_ptr=NULL;
+
+ chdlc_set_intr_mode(card, 0);
+ if (card->u.c.comm_enabled)
+ chdlc_comm_disable(card);
+ unlock_adapter_irq(&wandev->lock,&smp_lock);
+
+ port_set_state(card, WAN_DISCONNECTED);
+
+ return 0;
+}
+
+
+/****** Network Device Interface ********************************************/
+
+/*============================================================================
+ * Initialize Linux network interface.
+ *
+ * This routine is called only once for each interface, during Linux network
+ * interface registration. Returning anything but zero will fail interface
+ * registration.
+ */
+static int if_init(struct net_device* dev)
+{
+ chdlc_private_area_t* chdlc_priv_area = dev->priv;
+ sdla_t* card = chdlc_priv_area->card;
+ struct wan_device* wandev = &card->wandev;
+
+ /* NOTE: Most of the dev initialization was
+ * done in sppp_attach(), called by new_if()
+ * function. All we have to do here is
+ * to link four major routines below.
+ */
+
+ /* Initialize device driver entry points */
+ dev->open = &if_open;
+ dev->stop = &if_close;
+ dev->hard_start_xmit = &if_send;
+ dev->get_stats = &if_stats;
+ dev->tx_timeout = &if_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+
+ /* Initialize hardware parameters */
+ dev->irq = wandev->irq;
+ dev->dma = wandev->dma;
+ dev->base_addr = wandev->ioport;
+ dev->mem_start = wandev->maddr;
+ dev->mem_end = wandev->maddr + wandev->msize - 1;
+
+ /* Set transmit buffer queue length
+ * If we over fill this queue the packets will
+ * be droped by the kernel.
+ * sppp_attach() sets this to 10, but
+ * 100 will give us more room at low speeds.
+ */
+ dev->tx_queue_len = 100;
+
+ return 0;
+}
+
+
+/*============================================================================
+ * Handle transmit timeout event from netif watchdog
+ */
+static void if_tx_timeout(struct net_device *dev)
+{
+ chdlc_private_area_t* chan = dev->priv;
+ sdla_t *card = chan->card;
+
+ /* If our device stays busy for at least 5 seconds then we will
+ * kick start the device by making dev->tbusy = 0. We expect
+ * that our device never stays busy more than 5 seconds. So this
+ * is only used as a last resort.
+ */
+
+ ++card->wandev.stats.collisions;
+
+ printk (KERN_INFO "%s: Transmit timed out on %s\n", card->devname,dev->name);
+ netif_wake_queue (dev);
+}
+
+
+/*============================================================================
+ * Open network interface.
+ * o enable communications and interrupts.
+ * o prevent module from unloading by incrementing use count
+ *
+ * Return 0 if O.k. or errno.
+ */
+static int if_open(struct net_device* dev)
+{
+ chdlc_private_area_t* chdlc_priv_area = dev->priv;
+ sdla_t* card = chdlc_priv_area->card;
+ struct timeval tv;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+
+ /* Only one open per interface is allowed */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Start PPP Layer */
+ if (sppp_open(dev)){
+ return -EIO;
+ }
+
+ do_gettimeofday(&tv);
+ chdlc_priv_area->router_start_time = tv.tv_sec;
+
+ netif_start_queue(dev);
+
+ wanpipe_open(card);
+
+ chdlc_priv_area->timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
+ flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
+ return 0;
+}
+
+/*============================================================================
+ * Close network interface.
+ * o if this is the last close, then disable communications and interrupts.
+ * o reset flags.
+ */
+static int if_close(struct net_device* dev)
+{
+ chdlc_private_area_t* chdlc_priv_area = dev->priv;
+ sdla_t* card = chdlc_priv_area->card;
+
+ /* Stop the PPP Layer */
+ sppp_close(dev);
+ netif_stop_queue(dev);
+
+ wanpipe_close(card);
+
+ return 0;
+}
+
+/*============================================================================
+ * Send a packet on a network interface.
+ * o set tbusy flag (marks start of the transmission) to block a timer-based
+ * transmit from overlapping.
+ * o check link state. If link is not up, then drop the packet.
+ * o execute adapter send command.
+ * o free socket buffer
+ *
+ * Return: 0 complete (socket buffer must be freed)
+ * non-0 packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ * bottom half" (with interrupts enabled).
+ * 2. Setting tbusy flag will inhibit further transmit requests from the
+ * protocol stack and can be used for flow control with protocol layer.
+ */
+static int if_send(struct sk_buff* skb, struct net_device* dev)
+{
+ chdlc_private_area_t *chdlc_priv_area = dev->priv;
+ sdla_t *card = chdlc_priv_area->card;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+ INTERRUPT_INFORMATION_STRUCT *chdlc_int = &flags->interrupt_info_struct;
+ int udp_type = 0;
+ unsigned long smp_flags;
+ int err=0;
+
+ netif_stop_queue(dev);
+
+
+ if (skb == NULL){
+ /* If we get here, some higher layer thinks we've missed an
+ * tx-done interrupt.
+ */
+ printk(KERN_INFO "%s: Received NULL skb buffer! interface %s got kicked!\n",
+ card->devname, dev->name);
+
+ netif_wake_queue(dev);
+ return 0;
+ }
+
+ if (ntohs(skb->protocol) != htons(PVC_PROT)){
+ /* check the udp packet type */
+
+ udp_type = udp_pkt_type(skb, card);
+ if (udp_type == UDP_CPIPE_TYPE){
+ if(store_udp_mgmt_pkt(UDP_PKT_FRM_STACK, card, skb, dev,
+ chdlc_priv_area)){
+ chdlc_int->interrupt_permission |=
+ APP_INT_ON_TIMER;
+ }
+ netif_start_queue(dev);
+ return 0;
+ }
+ }
+
+ /* Lock the 508 Card: SMP is supported */
+ if(card->hw.type != SDLA_S514){
+ s508_lock(card,&smp_flags);
+ }
+
+ if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)){
+
+ printk(KERN_INFO "%s: Critical in if_send: %lx\n",
+ card->wandev.name,card->wandev.critical);
+ ++card->wandev.stats.tx_dropped;
+ netif_start_queue(dev);
+ goto if_send_crit_exit;
+ }
+
+ if (card->wandev.state != WAN_CONNECTED){
+ ++card->wandev.stats.tx_dropped;
+ netif_start_queue(dev);
+ goto if_send_crit_exit;
+ }
+
+ if (chdlc_send(card, skb->data, skb->len)){
+ netif_stop_queue(dev);
+
+ }else{
+ ++card->wandev.stats.tx_packets;
+ card->wandev.stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+ }
+
+if_send_crit_exit:
+ if (!(err=netif_queue_stopped(dev))){
+ dev_kfree_skb_any(skb);
+ }else{
+ chdlc_priv_area->tick_counter = jiffies;
+ chdlc_int->interrupt_permission |= APP_INT_ON_TX_FRAME;
+ }
+
+ clear_bit(SEND_CRIT, (void*)&card->wandev.critical);
+ if(card->hw.type != SDLA_S514){
+ s508_unlock(card,&smp_flags);
+ }
+
+ return err;
+}
+
+
+/*============================================================================
+ * Reply to UDP Management system.
+ * Return length of reply.
+ */
+static int reply_udp( unsigned char *data, unsigned int mbox_len )
+{
+
+ unsigned short len, udp_length, temp, ip_length;
+ unsigned long ip_temp;
+ int even_bound = 0;
+ chdlc_udp_pkt_t *c_udp_pkt = (chdlc_udp_pkt_t *)data;
+
+ /* Set length of packet */
+ len = sizeof(ip_pkt_t)+
+ sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ sizeof(trace_info_t)+
+ mbox_len;
+
+ /* fill in UDP reply */
+ c_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
+
+ /* fill in UDP length */
+ udp_length = sizeof(udp_pkt_t)+
+ sizeof(wp_mgmt_t)+
+ sizeof(cblock_t)+
+ sizeof(trace_info_t)+
+ mbox_len;
+
+ /* put it on an even boundary */
+ if ( udp_length & 0x0001 ) {
+ udp_length += 1;
+ len += 1;
+ even_bound = 1;
+ }
+
+ temp = (udp_length<<8)|(udp_length>>8);
+ c_udp_pkt->udp_pkt.udp_length = temp;
+
+ /* swap UDP ports */
+ temp = c_udp_pkt->udp_pkt.udp_src_port;
+ c_udp_pkt->udp_pkt.udp_src_port =
+ c_udp_pkt->udp_pkt.udp_dst_port;
+ c_udp_pkt->udp_pkt.udp_dst_port = temp;
+
+ /* add UDP pseudo header */
+ temp = 0x1100;
+ *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound)) = temp;
+ temp = (udp_length<<8)|(udp_length>>8);
+ *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound+2)) = temp;
+
+
+ /* calculate UDP checksum */
+ c_udp_pkt->udp_pkt.udp_checksum = 0;
+ c_udp_pkt->udp_pkt.udp_checksum = calc_checksum(&data[UDP_OFFSET],udp_length+UDP_OFFSET);
+
+ /* fill in IP length */
+ ip_length = len;
+ temp = (ip_length<<8)|(ip_length>>8);
+ c_udp_pkt->ip_pkt.total_length = temp;
+
+ /* swap IP addresses */
+ ip_temp = c_udp_pkt->ip_pkt.ip_src_address;
+ c_udp_pkt->ip_pkt.ip_src_address = c_udp_pkt->ip_pkt.ip_dst_address;
+ c_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
+
+ /* fill in IP checksum */
+ c_udp_pkt->ip_pkt.hdr_checksum = 0;
+ c_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data,sizeof(ip_pkt_t));
+
+ return len;
+
+} /* reply_udp */
+
+unsigned short calc_checksum (char *data, int len)
+{
+ unsigned short temp;
+ unsigned long sum=0;
+ int i;
+
+ for( i = 0; i <len; i+=2 ) {
+ memcpy(&temp,&data[i],2);
+ sum += (unsigned long)temp;
+ }
+
+ while (sum >> 16 ) {
+ sum = (sum & 0xffffUL) + (sum >> 16);
+ }
+
+ temp = (unsigned short)sum;
+ temp = ~temp;
+
+ if( temp == 0 )
+ temp = 0xffff;
+
+ return temp;
+}
+
+
+/*============================================================================
+ * Get ethernet-style interface statistics.
+ * Return a pointer to struct enet_statistics.
+ */
+static struct net_device_stats* if_stats(struct net_device* dev)
+{
+ sdla_t *my_card;
+ chdlc_private_area_t* chdlc_priv_area;
+
+ /* Shutdown bug fix. In del_if() we kill
+ * dev->priv pointer. This function, gets
+ * called after del_if(), thus check
+ * if pointer has been deleted */
+ if ((chdlc_priv_area=dev->priv) == NULL)
+ return NULL;
+
+ my_card = chdlc_priv_area->card;
+ return &my_card->wandev.stats;
+}
+
+
+/****** Cisco HDLC Firmware Interface Functions *******************************/
+
+/*============================================================================
+ * Read firmware code version.
+ * Put code version as ASCII string in str.
+ */
+static int chdlc_read_version (sdla_t* card, char* str)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ int len;
+ char err;
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if(err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ }
+ else if (str) { /* is not null */
+ len = mb->buffer_length;
+ memcpy(str, mb->data, len);
+ str[len] = '\0';
+ }
+ return (err);
+}
+
+/*-----------------------------------------------------------------------------
+ * Configure CHDLC firmware.
+ */
+static int chdlc_configure (sdla_t* card, void* data)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT *mailbox = card->mbox;
+ int data_length = sizeof(CHDLC_CONFIGURATION_STRUCT);
+
+ mailbox->buffer_length = data_length;
+ memcpy(mailbox->data, data, data_length);
+ mailbox->command = SET_CHDLC_CONFIGURATION;
+ err = sdla_exec(mailbox) ? mailbox->return_code : CMD_TIMEOUT;
+
+ if (err != COMMAND_OK) chdlc_error (card, err, mailbox);
+
+ return err;
+}
+
+
+/*============================================================================
+ * Set interrupt mode -- HDLC Version.
+ */
+
+static int chdlc_set_intr_mode (sdla_t* card, unsigned mode)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ CHDLC_INT_TRIGGERS_STRUCT* int_data =
+ (CHDLC_INT_TRIGGERS_STRUCT *)mb->data;
+ int err;
+
+ int_data->CHDLC_interrupt_triggers = mode;
+ int_data->IRQ = card->hw.irq;
+ int_data->interrupt_timer = 1;
+
+ mb->buffer_length = sizeof(CHDLC_INT_TRIGGERS_STRUCT);
+ mb->command = SET_CHDLC_INTERRUPT_TRIGGERS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error (card, err, mb);
+ return err;
+}
+
+
+/*============================================================================
+ * Enable communications.
+ */
+
+static int chdlc_comm_enable (sdla_t* card)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = ENABLE_CHDLC_COMMUNICATIONS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error(card, err, mb);
+ else
+ card->u.c.comm_enabled=1;
+
+ return err;
+}
+
+/*============================================================================
+ * Disable communications and Drop the Modem lines (DCD and RTS).
+ */
+static int chdlc_comm_disable (sdla_t* card)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = DISABLE_CHDLC_COMMUNICATIONS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error(card,err,mb);
+
+ return err;
+}
+
+/*============================================================================
+ * Read communication error statistics.
+ */
+static int chdlc_read_comm_err_stats (sdla_t* card)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = READ_COMMS_ERROR_STATS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error(card,err,mb);
+ return err;
+}
+
+
+/*============================================================================
+ * Read CHDLC operational statistics.
+ */
+static int chdlc_read_op_stats (sdla_t* card)
+{
+ int err;
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_OPERATIONAL_STATS;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK)
+ chdlc_error(card,err,mb);
+ return err;
+}
+
+
+/*============================================================================
+ * Update communications error and general packet statistics.
+ */
+static int update_comms_stats(sdla_t* card,
+ chdlc_private_area_t* chdlc_priv_area)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ COMMS_ERROR_STATS_STRUCT* err_stats;
+ CHDLC_OPERATIONAL_STATS_STRUCT *op_stats;
+
+ /* on the first timer interrupt, read the comms error statistics */
+ if(chdlc_priv_area->update_comms_stats == 2) {
+ if(chdlc_read_comm_err_stats(card))
+ return 1;
+ err_stats = (COMMS_ERROR_STATS_STRUCT *)mb->data;
+ card->wandev.stats.rx_over_errors =
+ err_stats->Rx_overrun_err_count;
+ card->wandev.stats.rx_crc_errors =
+ err_stats->CRC_err_count;
+ card->wandev.stats.rx_frame_errors =
+ err_stats->Rx_abort_count;
+ card->wandev.stats.rx_fifo_errors =
+ err_stats->Rx_dis_pri_bfrs_full_count;
+ card->wandev.stats.rx_missed_errors =
+ card->wandev.stats.rx_fifo_errors;
+ card->wandev.stats.tx_aborted_errors =
+ err_stats->sec_Tx_abort_count;
+ }
+
+ /* on the second timer interrupt, read the operational statistics */
+ else {
+ if(chdlc_read_op_stats(card))
+ return 1;
+ op_stats = (CHDLC_OPERATIONAL_STATS_STRUCT *)mb->data;
+ card->wandev.stats.rx_length_errors =
+ (op_stats->Rx_Data_discard_short_count +
+ op_stats->Rx_Data_discard_long_count);
+ }
+
+ return 0;
+}
+
+/*============================================================================
+ * Send packet.
+ * Return: 0 - o.k.
+ * 1 - no transmit buffers available
+ */
+static int chdlc_send (sdla_t* card, void* data, unsigned len)
+{
+ CHDLC_DATA_TX_STATUS_EL_STRUCT *txbuf = card->u.c.txbuf;
+
+ if (txbuf->opp_flag)
+ return 1;
+
+ sdla_poke(&card->hw, txbuf->ptr_data_bfr, data, len);
+
+ txbuf->frame_length = len;
+ txbuf->opp_flag = 1; /* start transmission */
+
+ /* Update transmit buffer control fields */
+ card->u.c.txbuf = ++txbuf;
+
+ if ((void*)txbuf > card->u.c.txbuf_last)
+ card->u.c.txbuf = card->u.c.txbuf_base;
+
+ return 0;
+}
+
+/****** Firmware Error Handler **********************************************/
+
+/*============================================================================
+ * Firmware error handler.
+ * This routine is called whenever firmware command returns non-zero
+ * return code.
+ *
+ * Return zero if previous command has to be cancelled.
+ */
+static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb)
+{
+ unsigned cmd = mb->command;
+
+ switch (err) {
+
+ case CMD_TIMEOUT:
+ printk(KERN_ERR "%s: command 0x%02X timed out!\n",
+ card->devname, cmd);
+ break;
+
+ case S514_BOTH_PORTS_SAME_CLK_MODE:
+ if(cmd == SET_CHDLC_CONFIGURATION) {
+ printk(KERN_INFO
+ "%s: Configure both ports for the same clock source\n",
+ card->devname);
+ break;
+ }
+
+ default:
+ printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n",
+ card->devname, cmd, err);
+ }
+
+ return 0;
+}
+
+/****** Interrupt Handlers **************************************************/
+
+/*============================================================================
+ * Cisco HDLC interrupt service routine.
+ */
+STATIC void wsppp_isr (sdla_t* card)
+{
+ struct net_device* dev;
+ SHARED_MEMORY_INFO_STRUCT* flags = NULL;
+ int i;
+ sdla_t *my_card;
+
+
+ /* Check for which port the interrupt has been generated
+ * Since Secondary Port is piggybacking on the Primary
+ * the check must be done here.
+ */
+
+ flags = card->u.c.flags;
+ if (!flags->interrupt_info_struct.interrupt_type){
+ /* Check for a second port (piggybacking) */
+ if((my_card = card->next)){
+ flags = my_card->u.c.flags;
+ if (flags->interrupt_info_struct.interrupt_type){
+ card = my_card;
+ card->isr(card);
+ return;
+ }
+ }
+ }
+
+ dev = card->wandev.dev;
+ card->in_isr = 1;
+ flags = card->u.c.flags;
+
+ /* If we get an interrupt with no network device, stop the interrupts
+ * and issue an error */
+ if ((!dev || !dev->priv) && flags->interrupt_info_struct.interrupt_type !=
+ COMMAND_COMPLETE_APP_INT_PEND){
+ goto isr_done;
+ }
+
+
+ /* if critical due to peripheral operations
+ * ie. update() or getstats() then reset the interrupt and
+ * wait for the board to retrigger.
+ */
+ if(test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
+ flags->interrupt_info_struct.
+ interrupt_type = 0;
+ goto isr_done;
+ }
+
+
+ /* On a 508 Card, if critical due to if_send
+ * Major Error !!!
+ */
+ if(card->hw.type != SDLA_S514) {
+ if(test_bit(0, (void*)&card->wandev.critical)) {
+ printk(KERN_INFO "%s: Critical while in ISR: %lx\n",
+ card->devname, card->wandev.critical);
+ goto isr_done;
+ }
+ }
+
+ switch(flags->interrupt_info_struct.interrupt_type) {
+
+ case RX_APP_INT_PEND: /* 0x01: receive interrupt */
+ rx_intr(card);
+ break;
+
+ case TX_APP_INT_PEND: /* 0x02: transmit interrupt */
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~APP_INT_ON_TX_FRAME;
+
+ netif_wake_queue(dev);
+ break;
+
+ case COMMAND_COMPLETE_APP_INT_PEND:/* 0x04: cmd cplt */
+ ++ Intr_test_counter;
+ break;
+
+ case CHDLC_EXCEP_COND_APP_INT_PEND: /* 0x20 */
+ process_chdlc_exception(card);
+ break;
+
+ case GLOBAL_EXCEP_COND_APP_INT_PEND:
+ process_global_exception(card);
+ break;
+
+ case TIMER_APP_INT_PEND:
+ timer_intr(card);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: spurious interrupt 0x%02X!\n",
+ card->devname,
+ flags->interrupt_info_struct.interrupt_type);
+ printk(KERN_INFO "Code name: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codename[i]);
+ printk(KERN_INFO "\nCode version: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codeversion[i]);
+ printk(KERN_INFO "\n");
+ break;
+ }
+
+isr_done:
+ card->in_isr = 0;
+ flags->interrupt_info_struct.interrupt_type = 0;
+}
+
+/*============================================================================
+ * Receive interrupt handler.
+ */
+static void rx_intr (sdla_t* card)
+{
+ struct net_device *dev;
+ chdlc_private_area_t *chdlc_priv_area;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+ CHDLC_DATA_RX_STATUS_EL_STRUCT *rxbuf = card->u.c.rxmb;
+ struct sk_buff *skb;
+ unsigned len;
+ unsigned addr = rxbuf->ptr_data_bfr;
+ void *buf;
+ int i,udp_type;
+
+ if (rxbuf->opp_flag != 0x01) {
+ printk(KERN_INFO
+ "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
+ card->devname, (unsigned)rxbuf, rxbuf->opp_flag);
+ printk(KERN_INFO "Code name: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codename[i]);
+ printk(KERN_INFO "\nCode version: ");
+ for(i = 0; i < 4; i ++)
+ printk(KERN_INFO "%c",
+ flags->global_info_struct.codeversion[i]);
+ printk(KERN_INFO "\n");
+
+
+ /* Bug Fix: Mar 6 2000
+ * If we get a corrupted mailbox, it measn that driver
+ * is out of sync with the firmware. There is no recovery.
+ * If we don't turn off all interrupts for this card
+ * the machine will crash.
+ */
+ printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
+ printk(KERN_INFO "Please contact Sangoma Technologies !\n");
+ chdlc_set_intr_mode(card,0);
+ return;
+ }
+
+ dev = card->wandev.dev;
+
+ if (!dev){
+ goto rx_exit;
+ }
+
+ if (!netif_running(dev)){
+ goto rx_exit;
+ }
+
+ chdlc_priv_area = dev->priv;
+
+ if (rxbuf->error_flag){
+ goto rx_exit;
+ }
+ /* Take off two CRC bytes */
+
+ if (rxbuf->frame_length < 7 || rxbuf->frame_length > 1506 ){
+ goto rx_exit;
+ }
+
+ len = rxbuf->frame_length - CRC_LENGTH;
+
+ /* Allocate socket buffer */
+ skb = dev_alloc_skb(len);
+
+ if (skb == NULL) {
+ if (net_ratelimit()){
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ }
+ ++card->wandev.stats.rx_dropped;
+ goto rx_exit;
+ }
+
+ /* Copy data to the socket buffer */
+ if((addr + len) > card->u.c.rx_top + 1) {
+ unsigned tmp = card->u.c.rx_top - addr + 1;
+ buf = skb_put(skb, tmp);
+ sdla_peek(&card->hw, addr, buf, tmp);
+ addr = card->u.c.rx_base;
+ len -= tmp;
+ }
+
+ buf = skb_put(skb, len);
+ sdla_peek(&card->hw, addr, buf, len);
+
+ skb->protocol = htons(ETH_P_WAN_PPP);
+
+ card->wandev.stats.rx_packets ++;
+ card->wandev.stats.rx_bytes += skb->len;
+ udp_type = udp_pkt_type( skb, card );
+
+ if(udp_type == UDP_CPIPE_TYPE) {
+ if(store_udp_mgmt_pkt(UDP_PKT_FRM_NETWORK,
+ card, skb, dev, chdlc_priv_area)) {
+ flags->interrupt_info_struct.
+ interrupt_permission |=
+ APP_INT_ON_TIMER;
+ }
+ }else{
+ /* Pass it up the protocol stack */
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+
+rx_exit:
+ /* Release buffer element and calculate a pointer to the next one */
+ rxbuf->opp_flag = 0x00;
+ card->u.c.rxmb = ++ rxbuf;
+ if((void*)rxbuf > card->u.c.rxbuf_last){
+ card->u.c.rxmb = card->u.c.rxbuf_base;
+ }
+}
+
+/*============================================================================
+ * Timer interrupt handler.
+ * The timer interrupt is used for two purposes:
+ * 1) Processing udp calls from 'cpipemon'.
+ * 2) Reading board-level statistics for updating the proc file system.
+ */
+void timer_intr(sdla_t *card)
+{
+ struct net_device* dev;
+ chdlc_private_area_t* chdlc_priv_area = NULL;
+ SHARED_MEMORY_INFO_STRUCT* flags = NULL;
+
+ dev = card->wandev.dev;
+ chdlc_priv_area = dev->priv;
+
+ if (chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_CONFIG) {
+ if (!config_chdlc(card)){
+ chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_CONFIG;
+ }
+ }
+
+ /* process a udp call if pending */
+ if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UDP) {
+ process_udp_mgmt_pkt(card, dev,
+ chdlc_priv_area);
+ chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_UDP;
+ }
+
+
+ /* read the communications statistics if required */
+ if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UPDATE) {
+ update_comms_stats(card, chdlc_priv_area);
+ if(!(-- chdlc_priv_area->update_comms_stats)) {
+ chdlc_priv_area->timer_int_enabled &=
+ ~TMR_INT_ENABLED_UPDATE;
+ }
+ }
+
+ /* only disable the timer interrupt if there are no udp or statistic */
+ /* updates pending */
+ if(!chdlc_priv_area->timer_int_enabled) {
+ flags = card->u.c.flags;
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~APP_INT_ON_TIMER;
+ }
+}
+
+/*------------------------------------------------------------------------------
+ Miscellaneous Functions
+ - set_chdlc_config() used to set configuration options on the board
+------------------------------------------------------------------------------*/
+
+static int set_chdlc_config(sdla_t* card)
+{
+
+ CHDLC_CONFIGURATION_STRUCT cfg;
+
+ memset(&cfg, 0, sizeof(CHDLC_CONFIGURATION_STRUCT));
+
+ if(card->wandev.clocking)
+ cfg.baud_rate = card->wandev.bps;
+
+ cfg.line_config_options = (card->wandev.interface == WANOPT_RS232) ?
+ INTERFACE_LEVEL_RS232 : INTERFACE_LEVEL_V35;
+
+ cfg.modem_config_options = 0;
+ //API OPTIONS
+ cfg.CHDLC_API_options = DISCARD_RX_ERROR_FRAMES;
+ cfg.modem_status_timer = 100;
+ cfg.CHDLC_protocol_options = HDLC_STREAMING_MODE;
+ cfg.percent_data_buffer_for_Tx = 50;
+ cfg.CHDLC_statistics_options = (CHDLC_TX_DATA_BYTE_COUNT_STAT |
+ CHDLC_RX_DATA_BYTE_COUNT_STAT);
+ cfg.max_CHDLC_data_field_length = card->wandev.mtu;
+
+ cfg.transmit_keepalive_timer = 0;
+ cfg.receive_keepalive_timer = 0;
+ cfg.keepalive_error_tolerance = 0;
+ cfg.SLARP_request_timer = 0;
+
+ cfg.IP_address = 0;
+ cfg.IP_netmask = 0;
+
+ return chdlc_configure(card, &cfg);
+}
+
+/*============================================================================
+ * Process global exception condition
+ */
+static int process_global_exception(sdla_t *card)
+{
+ CHDLC_MAILBOX_STRUCT* mbox = card->mbox;
+ int err;
+
+ mbox->buffer_length = 0;
+ mbox->command = READ_GLOBAL_EXCEPTION_CONDITION;
+ err = sdla_exec(mbox) ? mbox->return_code : CMD_TIMEOUT;
+
+ if(err != CMD_TIMEOUT ){
+
+ switch(mbox->return_code) {
+
+ case EXCEP_MODEM_STATUS_CHANGE:
+
+ printk(KERN_INFO "%s: Modem status change\n",
+ card->devname);
+
+ switch(mbox->data[0] & (DCD_HIGH | CTS_HIGH)) {
+ case (DCD_HIGH):
+ printk(KERN_INFO "%s: DCD high, CTS low\n",card->devname);
+ break;
+ case (CTS_HIGH):
+ printk(KERN_INFO "%s: DCD low, CTS high\n",card->devname);
+ break;
+ case ((DCD_HIGH | CTS_HIGH)):
+ printk(KERN_INFO "%s: DCD high, CTS high\n",card->devname);
+ break;
+ default:
+ printk(KERN_INFO "%s: DCD low, CTS low\n",card->devname);
+ break;
+ }
+
+ if (!(mbox->data[0] & DCD_HIGH) || !(mbox->data[0] & DCD_HIGH)){
+ //printk(KERN_INFO "Sending TERM Request Manually !\n");
+ send_ppp_term_request(card->wandev.dev);
+ }
+ break;
+
+ case EXCEP_TRC_DISABLED:
+ printk(KERN_INFO "%s: Line trace disabled\n",
+ card->devname);
+ break;
+
+ case EXCEP_IRQ_TIMEOUT:
+ printk(KERN_INFO "%s: IRQ timeout occurred\n",
+ card->devname);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: Global exception %x\n",
+ card->devname, mbox->return_code);
+ break;
+ }
+ }
+ return 0;
+}
+
+
+/*============================================================================
+ * Process chdlc exception condition
+ */
+static int process_chdlc_exception(sdla_t *card)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ int err;
+
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_EXCEPTION_CONDITION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if(err != CMD_TIMEOUT) {
+
+ switch (err) {
+
+ case EXCEP_LINK_ACTIVE:
+ port_set_state(card, WAN_CONNECTED);
+ break;
+
+ case EXCEP_LINK_INACTIVE_MODEM:
+ port_set_state(card, WAN_DISCONNECTED);
+ break;
+
+ case EXCEP_LOOPBACK_CONDITION:
+ printk(KERN_INFO "%s: Loopback Condition Detected.\n",
+ card->devname);
+ break;
+
+ case NO_CHDLC_EXCEP_COND_TO_REPORT:
+ printk(KERN_INFO "%s: No exceptions reported.\n",
+ card->devname);
+ break;
+ default:
+ printk(KERN_INFO "%s: Exception Condition %x!\n",
+ card->devname,err);
+ break;
+ }
+
+ }
+ return 0;
+}
+
+
+/*=============================================================================
+ * Store a UDP management packet for later processing.
+ */
+
+static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
+ struct sk_buff *skb, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area )
+{
+ int udp_pkt_stored = 0;
+
+ if(!chdlc_priv_area->udp_pkt_lgth &&
+ (skb->len <= MAX_LGTH_UDP_MGNT_PKT)) {
+ chdlc_priv_area->udp_pkt_lgth = skb->len;
+ chdlc_priv_area->udp_pkt_src = udp_pkt_src;
+ memcpy(chdlc_priv_area->udp_pkt_data, skb->data, skb->len);
+ chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UDP;
+ udp_pkt_stored = 1;
+ }
+
+ if(udp_pkt_src == UDP_PKT_FRM_STACK)
+ dev_kfree_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return(udp_pkt_stored);
+}
+
+
+/*=============================================================================
+ * Process UDP management packet.
+ */
+
+static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
+ chdlc_private_area_t* chdlc_priv_area )
+{
+ unsigned char *buf;
+ unsigned int frames, len;
+ struct sk_buff *new_skb;
+ unsigned short buffer_length, real_len;
+ unsigned long data_ptr;
+ unsigned data_length;
+ int udp_mgmt_req_valid = 1;
+ CHDLC_MAILBOX_STRUCT *mb = card->mbox;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+ chdlc_udp_pkt_t *chdlc_udp_pkt;
+ struct timeval tv;
+ int err;
+ char ut_char;
+
+ chdlc_udp_pkt = (chdlc_udp_pkt_t *) chdlc_priv_area->udp_pkt_data;
+
+ if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+
+ switch(chdlc_udp_pkt->cblock.command) {
+ case READ_GLOBAL_STATISTICS:
+ case READ_MODEM_STATUS:
+ case READ_CHDLC_LINK_STATUS:
+ case CPIPE_ROUTER_UP_TIME:
+ case READ_COMMS_ERROR_STATS:
+ case READ_CHDLC_OPERATIONAL_STATS:
+
+ /* These two commands are executed for
+ * each request */
+ case READ_CHDLC_CONFIGURATION:
+ case READ_CHDLC_CODE_VERSION:
+ udp_mgmt_req_valid = 1;
+ break;
+ default:
+ udp_mgmt_req_valid = 0;
+ break;
+ }
+ }
+
+ if(!udp_mgmt_req_valid) {
+
+ /* set length to 0 */
+ chdlc_udp_pkt->cblock.buffer_length = 0;
+
+ /* set return code */
+ chdlc_udp_pkt->cblock.return_code = 0xCD;
+
+ if (net_ratelimit()){
+ printk(KERN_INFO
+ "%s: Warning, Illegal UDP command attempted from network: %x\n",
+ card->devname,chdlc_udp_pkt->cblock.command);
+ }
+
+ } else {
+ unsigned long trace_status_cfg_addr = 0;
+ TRACE_STATUS_EL_CFG_STRUCT trace_cfg_struct;
+ TRACE_STATUS_ELEMENT_STRUCT trace_element_struct;
+
+ switch(chdlc_udp_pkt->cblock.command) {
+
+ case CPIPE_ENABLE_TRACING:
+ if (!chdlc_priv_area->TracingEnabled) {
+
+ /* OPERATE_DATALINE_MONITOR */
+
+ mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
+ mb->command = SET_TRACE_CONFIGURATION;
+
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
+ trace_config = TRACE_ACTIVE;
+ /* Trace delay mode is not used because it slows
+ down transfer and results in a standoff situation
+ when there is a lot of data */
+
+ /* Configure the Trace based on user inputs */
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->trace_config |=
+ chdlc_udp_pkt->data[0];
+
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
+ trace_deactivation_timer = 4000;
+
+
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ card->TracingEnabled = 0;
+ chdlc_udp_pkt->cblock.return_code = err;
+ mb->buffer_length = 0;
+ break;
+ }
+
+ /* Get the base address of the trace element list */
+ mb->buffer_length = 0;
+ mb->command = READ_TRACE_CONFIGURATION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if (err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ chdlc_priv_area->TracingEnabled = 0;
+ chdlc_udp_pkt->cblock.return_code = err;
+ mb->buffer_length = 0;
+ break;
+ }
+
+ trace_status_cfg_addr =((LINE_TRACE_CONFIG_STRUCT *)
+ mb->data) -> ptr_trace_stat_el_cfg_struct;
+
+ sdla_peek(&card->hw, trace_status_cfg_addr,
+ &trace_cfg_struct, sizeof(trace_cfg_struct));
+
+ chdlc_priv_area->start_trace_addr = trace_cfg_struct.
+ base_addr_trace_status_elements;
+
+ chdlc_priv_area->number_trace_elements =
+ trace_cfg_struct.number_trace_status_elements;
+
+ chdlc_priv_area->end_trace_addr = (unsigned long)
+ ((TRACE_STATUS_ELEMENT_STRUCT *)
+ chdlc_priv_area->start_trace_addr +
+ (chdlc_priv_area->number_trace_elements - 1));
+
+ chdlc_priv_area->base_addr_trace_buffer =
+ trace_cfg_struct.base_addr_trace_buffer;
+
+ chdlc_priv_area->end_addr_trace_buffer =
+ trace_cfg_struct.end_addr_trace_buffer;
+
+ chdlc_priv_area->curr_trace_addr =
+ trace_cfg_struct.next_trace_element_to_use;
+
+ chdlc_priv_area->available_buffer_space = 2000 -
+ sizeof(ip_pkt_t) -
+ sizeof(udp_pkt_t) -
+ sizeof(wp_mgmt_t) -
+ sizeof(cblock_t) -
+ sizeof(trace_info_t);
+ }
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+ mb->buffer_length = 0;
+ chdlc_priv_area->TracingEnabled = 1;
+ break;
+
+
+ case CPIPE_DISABLE_TRACING:
+ if (chdlc_priv_area->TracingEnabled) {
+
+ /* OPERATE_DATALINE_MONITOR */
+ mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
+ mb->command = SET_TRACE_CONFIGURATION;
+ ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
+ trace_config = TRACE_INACTIVE;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ }
+
+ chdlc_priv_area->TracingEnabled = 0;
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+ mb->buffer_length = 0;
+ break;
+
+
+ case CPIPE_GET_TRACE_INFO:
+
+ if (!chdlc_priv_area->TracingEnabled) {
+ chdlc_udp_pkt->cblock.return_code = 1;
+ mb->buffer_length = 0;
+ break;
+ }
+
+ chdlc_udp_pkt->trace_info.ismoredata = 0x00;
+ buffer_length = 0; /* offset of packet already occupied */
+
+ for (frames=0; frames < chdlc_priv_area->number_trace_elements; frames++){
+
+ trace_pkt_t *trace_pkt = (trace_pkt_t *)
+ &chdlc_udp_pkt->data[buffer_length];
+
+ sdla_peek(&card->hw, chdlc_priv_area->curr_trace_addr,
+ (unsigned char *)&trace_element_struct,
+ sizeof(TRACE_STATUS_ELEMENT_STRUCT));
+
+ if (trace_element_struct.opp_flag == 0x00) {
+ break;
+ }
+
+ /* get pointer to real data */
+ data_ptr = trace_element_struct.ptr_data_bfr;
+
+ /* See if there is actual data on the trace buffer */
+ if (data_ptr){
+ data_length = trace_element_struct.trace_length;
+ }else{
+ data_length = 0;
+ chdlc_udp_pkt->trace_info.ismoredata = 0x01;
+ }
+
+ if( (chdlc_priv_area->available_buffer_space - buffer_length)
+ < ( sizeof(trace_pkt_t) + data_length) ) {
+
+ /* indicate there are more frames on board & exit */
+ chdlc_udp_pkt->trace_info.ismoredata = 0x01;
+ break;
+ }
+
+ trace_pkt->status = trace_element_struct.trace_type;
+
+ trace_pkt->time_stamp =
+ trace_element_struct.trace_time_stamp;
+
+ trace_pkt->real_length =
+ trace_element_struct.trace_length;
+
+ /* see if we can fit the frame into the user buffer */
+ real_len = trace_pkt->real_length;
+
+ if (data_ptr == 0) {
+ trace_pkt->data_avail = 0x00;
+ } else {
+ unsigned tmp = 0;
+
+ /* get the data from circular buffer
+ must check for end of buffer */
+ trace_pkt->data_avail = 0x01;
+
+ if ((data_ptr + real_len) >
+ chdlc_priv_area->end_addr_trace_buffer + 1){
+
+ tmp = chdlc_priv_area->end_addr_trace_buffer - data_ptr + 1;
+ sdla_peek(&card->hw, data_ptr,
+ trace_pkt->data,tmp);
+ data_ptr = chdlc_priv_area->base_addr_trace_buffer;
+ }
+
+ sdla_peek(&card->hw, data_ptr,
+ &trace_pkt->data[tmp], real_len - tmp);
+ }
+
+ /* zero the opp flag to show we got the frame */
+ ut_char = 0x00;
+ sdla_poke(&card->hw, chdlc_priv_area->curr_trace_addr, &ut_char, 1);
+
+ /* now move onto the next frame */
+ chdlc_priv_area->curr_trace_addr += sizeof(TRACE_STATUS_ELEMENT_STRUCT);
+
+ /* check if we went over the last address */
+ if ( chdlc_priv_area->curr_trace_addr > chdlc_priv_area->end_trace_addr ) {
+ chdlc_priv_area->curr_trace_addr = chdlc_priv_area->start_trace_addr;
+ }
+
+ if(trace_pkt->data_avail == 0x01) {
+ buffer_length += real_len - 1;
+ }
+
+ /* for the header */
+ buffer_length += sizeof(trace_pkt_t);
+
+ } /* For Loop */
+
+ if (frames == chdlc_priv_area->number_trace_elements){
+ chdlc_udp_pkt->trace_info.ismoredata = 0x01;
+ }
+ chdlc_udp_pkt->trace_info.num_frames = frames;
+
+ mb->buffer_length = buffer_length;
+ chdlc_udp_pkt->cblock.buffer_length = buffer_length;
+
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+
+ break;
+
+
+ case CPIPE_FT1_READ_STATUS:
+ ((unsigned char *)chdlc_udp_pkt->data )[0] =
+ flags->FT1_info_struct.parallel_port_A_input;
+
+ ((unsigned char *)chdlc_udp_pkt->data )[1] =
+ flags->FT1_info_struct.parallel_port_B_input;
+
+ chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
+ mb->buffer_length = 2;
+ break;
+
+ case CPIPE_ROUTER_UP_TIME:
+ do_gettimeofday( &tv );
+ chdlc_priv_area->router_up_time = tv.tv_sec -
+ chdlc_priv_area->router_start_time;
+ *(unsigned long *)&chdlc_udp_pkt->data =
+ chdlc_priv_area->router_up_time;
+ mb->buffer_length = sizeof(unsigned long);
+ break;
+
+ case FT1_MONITOR_STATUS_CTRL:
+ /* Enable FT1 MONITOR STATUS */
+ if ((chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_STATUS) ||
+ (chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_OP_STATS)) {
+
+ if( rCount++ != 0 ) {
+ chdlc_udp_pkt->cblock.
+ return_code = COMMAND_OK;
+ mb->buffer_length = 1;
+ break;
+ }
+ }
+
+ /* Disable FT1 MONITOR STATUS */
+ if( chdlc_udp_pkt->data[0] == 0) {
+
+ if( --rCount != 0) {
+ chdlc_udp_pkt->cblock.
+ return_code = COMMAND_OK;
+ mb->buffer_length = 1;
+ break;
+ }
+ }
+
+ default:
+ /* it's a board command */
+ mb->command = chdlc_udp_pkt->cblock.command;
+ mb->buffer_length = chdlc_udp_pkt->cblock.buffer_length;
+ if (mb->buffer_length) {
+ memcpy(&mb->data, (unsigned char *) chdlc_udp_pkt->
+ data, mb->buffer_length);
+ }
+ /* run the command on the board */
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ if (err != COMMAND_OK) {
+ break;
+ }
+
+ /* copy the result back to our buffer */
+ memcpy(&chdlc_udp_pkt->cblock, mb, sizeof(cblock_t));
+
+ if (mb->buffer_length) {
+ memcpy(&chdlc_udp_pkt->data, &mb->data,
+ mb->buffer_length);
+ }
+
+ } /* end of switch */
+ } /* end of else */
+
+ /* Fill UDP TTL */
+ chdlc_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
+
+ len = reply_udp(chdlc_priv_area->udp_pkt_data, mb->buffer_length);
+
+ if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
+ if(!chdlc_send(card, chdlc_priv_area->udp_pkt_data, len)) {
+ ++ card->wandev.stats.tx_packets;
+ card->wandev.stats.tx_bytes += len;
+ }
+ } else {
+
+ /* Pass it up the stack
+ Allocate socket buffer */
+ if ((new_skb = dev_alloc_skb(len)) != NULL) {
+ /* copy data into new_skb */
+
+ buf = skb_put(new_skb, len);
+ memcpy(buf, chdlc_priv_area->udp_pkt_data, len);
+
+ /* Decapsulate pkt and pass it up the protocol stack */
+ new_skb->protocol = htons(ETH_P_IP);
+ new_skb->dev = dev;
+ new_skb->mac.raw = new_skb->data;
+
+ netif_rx(new_skb);
+ dev->last_rx = jiffies;
+ } else {
+
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ }
+ }
+
+ chdlc_priv_area->udp_pkt_lgth = 0;
+
+ return 0;
+}
+
+/*============================================================================
+ * Initialize Receive and Transmit Buffers.
+ */
+
+static void init_chdlc_tx_rx_buff(sdla_t* card, struct net_device *dev)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ CHDLC_TX_STATUS_EL_CFG_STRUCT *tx_config;
+ CHDLC_RX_STATUS_EL_CFG_STRUCT *rx_config;
+ char err;
+
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CONFIGURATION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+
+ if(err != COMMAND_OK) {
+ chdlc_error(card,err,mb);
+ return;
+ }
+
+ if(card->hw.type == SDLA_S514) {
+ tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Tx_stat_el_cfg_struct));
+ rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Rx_stat_el_cfg_struct));
+
+ /* Setup Head and Tails for buffers */
+ card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
+ tx_config->base_addr_Tx_status_elements);
+ card->u.c.txbuf_last =
+ (CHDLC_DATA_TX_STATUS_EL_STRUCT *)
+ card->u.c.txbuf_base +
+ (tx_config->number_Tx_status_elements - 1);
+
+ card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
+ rx_config->base_addr_Rx_status_elements);
+ card->u.c.rxbuf_last =
+ (CHDLC_DATA_RX_STATUS_EL_STRUCT *)
+ card->u.c.rxbuf_base +
+ (rx_config->number_Rx_status_elements - 1);
+
+ /* Set up next pointer to be used */
+ card->u.c.txbuf = (void *)(card->hw.dpmbase +
+ tx_config->next_Tx_status_element_to_use);
+ card->u.c.rxmb = (void *)(card->hw.dpmbase +
+ rx_config->next_Rx_status_element_to_use);
+ }
+ else {
+ tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Tx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
+
+ rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
+ (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
+ ptr_CHDLC_Rx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
+
+ /* Setup Head and Tails for buffers */
+ card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
+ (tx_config->base_addr_Tx_status_elements % SDLA_WINDOWSIZE));
+ card->u.c.txbuf_last =
+ (CHDLC_DATA_TX_STATUS_EL_STRUCT *)card->u.c.txbuf_base
+ + (tx_config->number_Tx_status_elements - 1);
+ card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
+ (rx_config->base_addr_Rx_status_elements % SDLA_WINDOWSIZE));
+ card->u.c.rxbuf_last =
+ (CHDLC_DATA_RX_STATUS_EL_STRUCT *)card->u.c.rxbuf_base
+ + (rx_config->number_Rx_status_elements - 1);
+
+ /* Set up next pointer to be used */
+ card->u.c.txbuf = (void *)(card->hw.dpmbase +
+ (tx_config->next_Tx_status_element_to_use % SDLA_WINDOWSIZE));
+ card->u.c.rxmb = (void *)(card->hw.dpmbase +
+ (rx_config->next_Rx_status_element_to_use % SDLA_WINDOWSIZE));
+ }
+
+ /* Setup Actual Buffer Start and end addresses */
+ card->u.c.rx_base = rx_config->base_addr_Rx_buffer;
+ card->u.c.rx_top = rx_config->end_addr_Rx_buffer;
+
+}
+
+/*=============================================================================
+ * Perform Interrupt Test by running READ_CHDLC_CODE_VERSION command MAX_INTR
+ * _TEST_COUNTER times.
+ */
+static int intr_test( sdla_t* card)
+{
+ CHDLC_MAILBOX_STRUCT* mb = card->mbox;
+ int err,i;
+
+ Intr_test_counter = 0;
+
+ /* The critical flag is unset because during initialization (if_open)
+ * we want the interrupts to be enabled so that when the wpc_isr is
+ * called it does not exit due to critical flag set.
+ */
+
+ err = chdlc_set_intr_mode(card, APP_INT_ON_COMMAND_COMPLETE);
+
+ if (err == CMD_OK) {
+ for (i = 0; i < MAX_INTR_TEST_COUNTER; i ++) {
+ mb->buffer_length = 0;
+ mb->command = READ_CHDLC_CODE_VERSION;
+ err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
+ }
+ }
+ else {
+ return err;
+ }
+
+ err = chdlc_set_intr_mode(card, 0);
+
+ if (err != CMD_OK)
+ return err;
+
+ return 0;
+}
+
+/*==============================================================================
+ * Determine what type of UDP call it is. CPIPEAB ?
+ */
+static int udp_pkt_type(struct sk_buff *skb, sdla_t* card)
+{
+ chdlc_udp_pkt_t *chdlc_udp_pkt = (chdlc_udp_pkt_t *)skb->data;
+
+ if (!strncmp(chdlc_udp_pkt->wp_mgmt.signature,UDPMGMT_SIGNATURE,8) &&
+ (chdlc_udp_pkt->udp_pkt.udp_dst_port == ntohs(card->wandev.udp_port)) &&
+ (chdlc_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
+ (chdlc_udp_pkt->wp_mgmt.request_reply == UDPMGMT_REQUEST)) {
+ return UDP_CPIPE_TYPE;
+ }
+ else return UDP_INVALID_TYPE;
+}
+
+/*============================================================================
+ * Set PORT state.
+ */
+static void port_set_state (sdla_t *card, int state)
+{
+ struct net_device *dev = card->wandev.dev;
+ chdlc_private_area_t *chdlc_priv_area = dev->priv;
+
+ if (card->u.c.state != state)
+ {
+ switch (state)
+ {
+ case WAN_CONNECTED:
+ printk (KERN_INFO "%s: HDLC link connected!\n",
+ card->devname);
+ break;
+
+ case WAN_CONNECTING:
+ printk (KERN_INFO "%s: HDLC link connecting...\n",
+ card->devname);
+ break;
+
+ case WAN_DISCONNECTED:
+ printk (KERN_INFO "%s: HDLC link disconnected!\n",
+ card->devname);
+ break;
+ }
+
+ card->wandev.state = card->u.c.state = state;
+ chdlc_priv_area->common.state = state;
+ }
+}
+
+void s508_lock (sdla_t *card, unsigned long *smp_flags)
+{
+ spin_lock_irqsave(&card->wandev.lock, *smp_flags);
+ if (card->next){
+ /* It is ok to use spin_lock here, since we
+ * already turned off interrupts */
+ spin_lock(&card->next->wandev.lock);
+ }
+}
+
+void s508_unlock (sdla_t *card, unsigned long *smp_flags)
+{
+ if (card->next){
+ spin_unlock(&card->next->wandev.lock);
+ }
+ spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
+}
+
+
+
+/*===========================================================================
+ * config_chdlc
+ *
+ * Configure the chdlc protocol and enable communications.
+ *
+ * The if_open() function binds this function to the poll routine.
+ * Therefore, this function will run every time the chdlc interface
+ * is brought up. We cannot run this function from the if_open
+ * because if_open does not have access to the remote IP address.
+ *
+ * If the communications are not enabled, proceed to configure
+ * the card and enable communications.
+ *
+ * If the communications are enabled, it means that the interface
+ * was shutdown by ether the user or driver. In this case, we
+ * have to check that the IP addresses have not changed. If
+ * the IP addresses have changed, we have to reconfigure the firmware
+ * and update the changed IP addresses. Otherwise, just exit.
+ *
+ */
+
+static int config_chdlc (sdla_t *card)
+{
+ struct net_device *dev = card->wandev.dev;
+ SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
+
+ if (card->u.c.comm_enabled){
+ chdlc_comm_disable(card);
+ port_set_state(card, WAN_DISCONNECTED);
+ }
+
+ if (set_chdlc_config(card)) {
+ printk(KERN_INFO "%s: CHDLC Configuration Failed!\n",
+ card->devname);
+ return 0;
+ }
+ init_chdlc_tx_rx_buff(card, dev);
+
+ /* Set interrupt mode and mask */
+ if (chdlc_set_intr_mode(card, APP_INT_ON_RX_FRAME |
+ APP_INT_ON_GLOBAL_EXCEP_COND |
+ APP_INT_ON_TX_FRAME |
+ APP_INT_ON_CHDLC_EXCEP_COND | APP_INT_ON_TIMER)){
+ printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
+ card->devname);
+ return 0;
+ }
+
+
+ /* Mask the Transmit and Timer interrupt */
+ flags->interrupt_info_struct.interrupt_permission &=
+ ~(APP_INT_ON_TX_FRAME | APP_INT_ON_TIMER);
+
+
+ if (chdlc_comm_enable(card) != 0) {
+ printk(KERN_INFO "%s: Failed to enable chdlc communications!\n",
+ card->devname);
+ flags->interrupt_info_struct.interrupt_permission = 0;
+ card->u.c.comm_enabled=0;
+ chdlc_set_intr_mode(card,0);
+ return 0;
+ }
+
+ /* Initialize Rx/Tx buffer control fields */
+ port_set_state(card, WAN_CONNECTING);
+ return 0;
+}
+
+
+static void send_ppp_term_request(struct net_device *dev)
+{
+ struct sk_buff *new_skb;
+ unsigned char *buf;
+
+ if ((new_skb = dev_alloc_skb(8)) != NULL) {
+ /* copy data into new_skb */
+
+ buf = skb_put(new_skb, 8);
+ sprintf(buf,"%c%c%c%c%c%c%c%c", 0xFF,0x03,0xC0,0x21,0x05,0x98,0x00,0x07);
+
+ /* Decapsulate pkt and pass it up the protocol stack */
+ new_skb->protocol = htons(ETH_P_WAN_PPP);
+ new_skb->dev = dev;
+ new_skb->mac.raw = new_skb->data;
+
+ netif_rx(new_skb);
+ dev->last_rx = jiffies;
+ }
+}
+
+
+MODULE_LICENSE("GPL");
+
+/****** End ****************************************************************/
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
new file mode 100644
index 000000000000..1e7b47704ad9
--- /dev/null
+++ b/drivers/net/wan/wanxl.c
@@ -0,0 +1,839 @@
+/*
+ * wanXL serial card driver for Linux
+ * host part
+ *
+ * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Status:
+ * - Only DTE (external clock) support with NRZ and NRZI encodings
+ * - wanXL100 will require minor driver modifications, no access to hw
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+
+#include "wanxl.h"
+
+static const char* version = "wanXL serial card driver version: 0.48";
+
+#define PLX_CTL_RESET 0x40000000 /* adapter reset */
+
+#undef DEBUG_PKT
+#undef DEBUG_PCI
+
+/* MAILBOX #1 - PUTS COMMANDS */
+#define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
+#ifdef __LITTLE_ENDIAN
+#define MBX1_CMD_BSWAP 0x8C000001 /* little-endian Byte Swap Mode */
+#else
+#define MBX1_CMD_BSWAP 0x8C000000 /* big-endian Byte Swap Mode */
+#endif
+
+/* MAILBOX #2 - DRAM SIZE */
+#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
+
+
+typedef struct {
+ struct net_device *dev;
+ struct card_t *card;
+ spinlock_t lock; /* for wanxl_xmit */
+ int node; /* physical port #0 - 3 */
+ unsigned int clock_type;
+ int tx_in, tx_out;
+ struct sk_buff *tx_skbs[TX_BUFFERS];
+}port_t;
+
+
+typedef struct {
+ desc_t rx_descs[RX_QUEUE_LENGTH];
+ port_status_t port_status[4];
+}card_status_t;
+
+
+typedef struct card_t {
+ int n_ports; /* 1, 2 or 4 ports */
+ u8 irq;
+
+ u8 __iomem *plx; /* PLX PCI9060 virtual base address */
+ struct pci_dev *pdev; /* for pci_name(pdev) */
+ int rx_in;
+ struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
+ card_status_t *status; /* shared between host and card */
+ dma_addr_t status_address;
+ port_t ports[0]; /* 1 - 4 port_t structures follow */
+}card_t;
+
+
+
+static inline port_t* dev_to_port(struct net_device *dev)
+{
+ return (port_t *)dev_to_hdlc(dev)->priv;
+}
+
+
+static inline port_status_t* get_status(port_t *port)
+{
+ return &port->card->status->port_status[port->node];
+}
+
+
+#ifdef DEBUG_PCI
+static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
+ size_t size, int direction)
+{
+ dma_addr_t addr = pci_map_single(pdev, ptr, size, direction);
+ if (addr + size > 0x100000000LL)
+ printk(KERN_CRIT "wanXL %s: pci_map_single() returned memory"
+ " at 0x%LX!\n", pci_name(pdev),
+ (unsigned long long)addr);
+ return addr;
+}
+
+#undef pci_map_single
+#define pci_map_single pci_map_single_debug
+#endif
+
+
+/* Cable and/or personality module change interrupt service */
+static inline void wanxl_cable_intr(port_t *port)
+{
+ u32 value = get_status(port)->cable;
+ int valid = 1;
+ const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
+
+ switch(value & 0x7) {
+ case STATUS_CABLE_V35: cable = "V.35"; break;
+ case STATUS_CABLE_X21: cable = "X.21"; break;
+ case STATUS_CABLE_V24: cable = "V.24"; break;
+ case STATUS_CABLE_EIA530: cable = "EIA530"; break;
+ case STATUS_CABLE_NONE: cable = "no"; break;
+ default: cable = "invalid";
+ }
+
+ switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
+ case STATUS_CABLE_V35: pm = "V.35"; break;
+ case STATUS_CABLE_X21: pm = "X.21"; break;
+ case STATUS_CABLE_V24: pm = "V.24"; break;
+ case STATUS_CABLE_EIA530: pm = "EIA530"; break;
+ case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
+ default: pm = "invalid personality"; valid = 0;
+ }
+
+ if (valid) {
+ if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
+ dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
+ ", DSR off";
+ dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
+ ", carrier off";
+ }
+ dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
+ }
+ printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n",
+ port->dev->name, pm, dte, cable, dsr, dcd);
+
+ hdlc_set_carrier(value & STATUS_CABLE_DCD, port->dev);
+}
+
+
+
+/* Transmit complete interrupt service */
+static inline void wanxl_tx_intr(port_t *port)
+{
+ struct net_device *dev = port->dev;
+ struct net_device_stats *stats = hdlc_stats(dev);
+ while (1) {
+ desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
+ struct sk_buff *skb = port->tx_skbs[port->tx_in];
+
+ switch (desc->stat) {
+ case PACKET_FULL:
+ case PACKET_EMPTY:
+ netif_wake_queue(dev);
+ return;
+
+ case PACKET_UNDERRUN:
+ stats->tx_errors++;
+ stats->tx_fifo_errors++;
+ break;
+
+ default:
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ }
+ desc->stat = PACKET_EMPTY; /* Free descriptor */
+ pci_unmap_single(port->card->pdev, desc->address, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
+ }
+}
+
+
+
+/* Receive complete interrupt service */
+static inline void wanxl_rx_intr(card_t *card)
+{
+ desc_t *desc;
+ while (desc = &card->status->rx_descs[card->rx_in],
+ desc->stat != PACKET_EMPTY) {
+ if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
+ printk(KERN_CRIT "wanXL %s: received packet for"
+ " nonexistent port\n", pci_name(card->pdev));
+ else {
+ struct sk_buff *skb = card->rx_skbs[card->rx_in];
+ port_t *port = &card->ports[desc->stat &
+ PACKET_PORT_MASK];
+ struct net_device *dev = port->dev;
+ struct net_device_stats *stats = hdlc_stats(dev);
+
+ if (!skb)
+ stats->rx_dropped++;
+ else {
+ pci_unmap_single(card->pdev, desc->address,
+ BUFFER_LENGTH,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, desc->length);
+
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s RX(%i):", dev->name,
+ skb->len);
+ debug_frame(skb);
+#endif
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ dev->last_rx = jiffies;
+ skb->protocol = hdlc_type_trans(skb, dev);
+ netif_rx(skb);
+ skb = NULL;
+ }
+
+ if (!skb) {
+ skb = dev_alloc_skb(BUFFER_LENGTH);
+ desc->address = skb ?
+ pci_map_single(card->pdev, skb->data,
+ BUFFER_LENGTH,
+ PCI_DMA_FROMDEVICE) : 0;
+ card->rx_skbs[card->rx_in] = skb;
+ }
+ }
+ desc->stat = PACKET_EMPTY; /* Free descriptor */
+ card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
+ }
+}
+
+
+
+static irqreturn_t wanxl_intr(int irq, void* dev_id, struct pt_regs *regs)
+{
+ card_t *card = dev_id;
+ int i;
+ u32 stat;
+ int handled = 0;
+
+
+ while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
+ handled = 1;
+ writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
+
+ for (i = 0; i < card->n_ports; i++) {
+ if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
+ wanxl_tx_intr(&card->ports[i]);
+ if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
+ wanxl_cable_intr(&card->ports[i]);
+ }
+ if (stat & (1 << DOORBELL_FROM_CARD_RX))
+ wanxl_rx_intr(card);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+
+
+static int wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ desc_t *desc;
+
+ spin_lock(&port->lock);
+
+ desc = &get_status(port)->tx_descs[port->tx_out];
+ if (desc->stat != PACKET_EMPTY) {
+ /* should never happen - previous xmit should stop queue */
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
+#endif
+ netif_stop_queue(dev);
+ spin_unlock_irq(&port->lock);
+ return 1; /* request packet to be queued */
+ }
+
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
+ debug_frame(skb);
+#endif
+
+ port->tx_skbs[port->tx_out] = skb;
+ desc->address = pci_map_single(port->card->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ desc->length = skb->len;
+ desc->stat = PACKET_FULL;
+ writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
+ port->card->plx + PLX_DOORBELL_TO_CARD);
+ dev->trans_start = jiffies;
+
+ port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
+
+ if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
+ netif_stop_queue(dev);
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
+#endif
+ }
+
+ spin_unlock(&port->lock);
+ return 0;
+}
+
+
+
+static int wanxl_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ port_t *port = dev_to_port(dev);
+
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI)
+ return -EINVAL;
+
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC32_PR1_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT &&
+ parity != PARITY_CRC32_PR0_CCITT &&
+ parity != PARITY_CRC16_PR0_CCITT)
+ return -EINVAL;
+
+ get_status(port)->encoding = encoding;
+ get_status(port)->parity = parity;
+ return 0;
+}
+
+
+
+static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings line;
+ port_t *port = dev_to_port(dev);
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ line.clock_type = get_status(port)->clocking;
+ line.clock_rate = 0;
+ line.loopback = 0;
+
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
+ size))
+ return -EFAULT;
+
+ if (line.clock_type != CLOCK_EXT &&
+ line.clock_type != CLOCK_TXFROMRX)
+ return -EINVAL; /* No such clock setting */
+
+ if (line.loopback != 0)
+ return -EINVAL;
+
+ get_status(port)->clocking = line.clock_type;
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+
+
+static int wanxl_open(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
+ unsigned long timeout;
+ int i;
+
+ if (get_status(port)->open) {
+ printk(KERN_ERR "%s: port already open\n", dev->name);
+ return -EIO;
+ }
+ if ((i = hdlc_open(dev)) != 0)
+ return i;
+
+ port->tx_in = port->tx_out = 0;
+ for (i = 0; i < TX_BUFFERS; i++)
+ get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
+ /* signal the card */
+ writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
+
+ timeout = jiffies + HZ;
+ do
+ if (get_status(port)->open) {
+ netif_start_queue(dev);
+ return 0;
+ }
+ while (time_after(timeout, jiffies));
+
+ printk(KERN_ERR "%s: unable to open port\n", dev->name);
+ /* ask the card to close the port, should it be still alive */
+ writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
+ return -EFAULT;
+}
+
+
+
+static int wanxl_close(struct net_device *dev)
+{
+ port_t *port = dev_to_port(dev);
+ unsigned long timeout;
+ int i;
+
+ hdlc_close(dev);
+ /* signal the card */
+ writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
+ port->card->plx + PLX_DOORBELL_TO_CARD);
+
+ timeout = jiffies + HZ;
+ do
+ if (!get_status(port)->open)
+ break;
+ while (time_after(timeout, jiffies));
+
+ if (get_status(port)->open)
+ printk(KERN_ERR "%s: unable to close port\n", dev->name);
+
+ netif_stop_queue(dev);
+
+ for (i = 0; i < TX_BUFFERS; i++) {
+ desc_t *desc = &get_status(port)->tx_descs[i];
+
+ if (desc->stat != PACKET_EMPTY) {
+ desc->stat = PACKET_EMPTY;
+ pci_unmap_single(port->card->pdev, desc->address,
+ port->tx_skbs[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(port->tx_skbs[i]);
+ }
+ }
+ return 0;
+}
+
+
+
+static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
+{
+ struct net_device_stats *stats = hdlc_stats(dev);
+ port_t *port = dev_to_port(dev);
+
+ stats->rx_over_errors = get_status(port)->rx_overruns;
+ stats->rx_frame_errors = get_status(port)->rx_frame_errors;
+ stats->rx_errors = stats->rx_over_errors + stats->rx_frame_errors;
+ return stats;
+}
+
+
+
+static int wanxl_puts_command(card_t *card, u32 cmd)
+{
+ unsigned long timeout = jiffies + 5 * HZ;
+
+ writel(cmd, card->plx + PLX_MAILBOX_1);
+ do {
+ if (readl(card->plx + PLX_MAILBOX_1) == 0)
+ return 0;
+
+ schedule();
+ }while (time_after(timeout, jiffies));
+
+ return -1;
+}
+
+
+
+static void wanxl_reset(card_t *card)
+{
+ u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
+
+ writel(0x80, card->plx + PLX_MAILBOX_0);
+ writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
+ readl(card->plx + PLX_CONTROL); /* wait for posted write */
+ udelay(1);
+ writel(old_value, card->plx + PLX_CONTROL);
+ readl(card->plx + PLX_CONTROL); /* wait for posted write */
+}
+
+
+
+static void wanxl_pci_remove_one(struct pci_dev *pdev)
+{
+ card_t *card = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < card->n_ports; i++) {
+ unregister_hdlc_device(card->ports[i].dev);
+ free_netdev(card->ports[i].dev);
+ }
+
+ /* unregister and free all host resources */
+ if (card->irq)
+ free_irq(card->irq, card);
+
+ wanxl_reset(card);
+
+ for (i = 0; i < RX_QUEUE_LENGTH; i++)
+ if (card->rx_skbs[i]) {
+ pci_unmap_single(card->pdev,
+ card->status->rx_descs[i].address,
+ BUFFER_LENGTH, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(card->rx_skbs[i]);
+ }
+
+ if (card->plx)
+ iounmap(card->plx);
+
+ if (card->status)
+ pci_free_consistent(pdev, sizeof(card_status_t),
+ card->status, card->status_address);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ kfree(card);
+}
+
+
+#include "wanxlfw.inc"
+
+static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ card_t *card;
+ u32 ramsize, stat;
+ unsigned long timeout;
+ u32 plx_phy; /* PLX PCI base address */
+ u32 mem_phy; /* memory PCI base addr */
+ u8 __iomem *mem; /* memory virtual base addr */
+ int i, ports, alloc_size;
+
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version) {
+ printed_version++;
+ printk(KERN_INFO "%s\n", version);
+ }
+#endif
+
+ i = pci_enable_device(pdev);
+ if (i)
+ return i;
+
+ /* QUICC can only access first 256 MB of host RAM directly,
+ but PLX9060 DMA does 32-bits for actual packet data transfers */
+
+ /* FIXME when PCI/DMA subsystems are fixed.
+ We set both dma_mask and consistent_dma_mask to 28 bits
+ and pray pci_alloc_consistent() will use this info. It should
+ work on most platforms */
+ if (pci_set_consistent_dma_mask(pdev, 0x0FFFFFFF) ||
+ pci_set_dma_mask(pdev, 0x0FFFFFFF)) {
+ printk(KERN_ERR "wanXL: No usable DMA configuration\n");
+ return -EIO;
+ }
+
+ i = pci_request_regions(pdev, "wanXL");
+ if (i) {
+ pci_disable_device(pdev);
+ return i;
+ }
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
+ case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
+ default: ports = 4;
+ }
+
+ alloc_size = sizeof(card_t) + ports * sizeof(port_t);
+ card = kmalloc(alloc_size, GFP_KERNEL);
+ if (card == NULL) {
+ printk(KERN_ERR "wanXL %s: unable to allocate memory\n",
+ pci_name(pdev));
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ return -ENOBUFS;
+ }
+ memset(card, 0, alloc_size);
+
+ pci_set_drvdata(pdev, card);
+ card->pdev = pdev;
+
+ card->status = pci_alloc_consistent(pdev, sizeof(card_status_t),
+ &card->status_address);
+ if (card->status == NULL) {
+ wanxl_pci_remove_one(pdev);
+ return -ENOBUFS;
+ }
+
+#ifdef DEBUG_PCI
+ printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
+ " at 0x%LX\n", pci_name(pdev),
+ (unsigned long long)card->status_address);
+#endif
+
+ /* FIXME when PCI/DMA subsystems are fixed.
+ We set both dma_mask and consistent_dma_mask back to 32 bits
+ to indicate the card can do 32-bit DMA addressing */
+ if (pci_set_consistent_dma_mask(pdev, 0xFFFFFFFF) ||
+ pci_set_dma_mask(pdev, 0xFFFFFFFF)) {
+ printk(KERN_ERR "wanXL: No usable DMA configuration\n");
+ wanxl_pci_remove_one(pdev);
+ return -EIO;
+ }
+
+ /* set up PLX mapping */
+ plx_phy = pci_resource_start(pdev, 0);
+ card->plx = ioremap_nocache(plx_phy, 0x70);
+
+#if RESET_WHILE_LOADING
+ wanxl_reset(card);
+#endif
+
+ timeout = jiffies + 20 * HZ;
+ while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
+ if (time_before(timeout, jiffies)) {
+ printk(KERN_WARNING "wanXL %s: timeout waiting for"
+ " PUTS to complete\n", pci_name(pdev));
+ wanxl_pci_remove_one(pdev);
+ return -ENODEV;
+ }
+
+ switch(stat & 0xC0) {
+ case 0x00: /* hmm - PUTS completed with non-zero code? */
+ case 0x80: /* PUTS still testing the hardware */
+ break;
+
+ default:
+ printk(KERN_WARNING "wanXL %s: PUTS test 0x%X"
+ " failed\n", pci_name(pdev), stat & 0x30);
+ wanxl_pci_remove_one(pdev);
+ return -ENODEV;
+ }
+
+ schedule();
+ }
+
+ /* get on-board memory size (PUTS detects no more than 4 MB) */
+ ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
+
+ /* set up on-board RAM mapping */
+ mem_phy = pci_resource_start(pdev, 2);
+
+
+ /* sanity check the board's reported memory size */
+ if (ramsize < BUFFERS_ADDR +
+ (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
+ printk(KERN_WARNING "wanXL %s: no enough on-board RAM"
+ " (%u bytes detected, %u bytes required)\n",
+ pci_name(pdev), ramsize, BUFFERS_ADDR +
+ (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
+ wanxl_pci_remove_one(pdev);
+ return -ENODEV;
+ }
+
+ if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
+ printk(KERN_WARNING "wanXL %s: unable to Set Byte Swap"
+ " Mode\n", pci_name(pdev));
+ wanxl_pci_remove_one(pdev);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < RX_QUEUE_LENGTH; i++) {
+ struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
+ card->rx_skbs[i] = skb;
+ if (skb)
+ card->status->rx_descs[i].address =
+ pci_map_single(card->pdev, skb->data,
+ BUFFER_LENGTH,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
+ for (i = 0; i < sizeof(firmware); i += 4)
+ writel(htonl(*(u32*)(firmware + i)), mem + PDM_OFFSET + i);
+
+ for (i = 0; i < ports; i++)
+ writel(card->status_address +
+ (void *)&card->status->port_status[i] -
+ (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
+ writel(card->status_address, mem + PDM_OFFSET + 20);
+ writel(PDM_OFFSET, mem);
+ iounmap(mem);
+
+ writel(0, card->plx + PLX_MAILBOX_5);
+
+ if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
+ printk(KERN_WARNING "wanXL %s: unable to Abort and Jump\n",
+ pci_name(pdev));
+ wanxl_pci_remove_one(pdev);
+ return -ENODEV;
+ }
+
+ stat = 0;
+ timeout = jiffies + 5 * HZ;
+ do {
+ if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
+ break;
+ schedule();
+ }while (time_after(timeout, jiffies));
+
+ if (!stat) {
+ printk(KERN_WARNING "wanXL %s: timeout while initializing card"
+ "firmware\n", pci_name(pdev));
+ wanxl_pci_remove_one(pdev);
+ return -ENODEV;
+ }
+
+#if DETECT_RAM
+ ramsize = stat;
+#endif
+
+ printk(KERN_INFO "wanXL %s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
+ pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
+
+ /* Allocate IRQ */
+ if (request_irq(pdev->irq, wanxl_intr, SA_SHIRQ, "wanXL", card)) {
+ printk(KERN_WARNING "wanXL %s: could not allocate IRQ%i.\n",
+ pci_name(pdev), pdev->irq);
+ wanxl_pci_remove_one(pdev);
+ return -EBUSY;
+ }
+ card->irq = pdev->irq;
+
+ for (i = 0; i < ports; i++) {
+ hdlc_device *hdlc;
+ port_t *port = &card->ports[i];
+ struct net_device *dev = alloc_hdlcdev(port);
+ if (!dev) {
+ printk(KERN_ERR "wanXL %s: unable to allocate"
+ " memory\n", pci_name(pdev));
+ wanxl_pci_remove_one(pdev);
+ return -ENOMEM;
+ }
+
+ port->dev = dev;
+ hdlc = dev_to_hdlc(dev);
+ spin_lock_init(&port->lock);
+ SET_MODULE_OWNER(dev);
+ dev->tx_queue_len = 50;
+ dev->do_ioctl = wanxl_ioctl;
+ dev->open = wanxl_open;
+ dev->stop = wanxl_close;
+ hdlc->attach = wanxl_attach;
+ hdlc->xmit = wanxl_xmit;
+ dev->get_stats = wanxl_get_stats;
+ port->card = card;
+ port->node = i;
+ get_status(port)->clocking = CLOCK_EXT;
+ if (register_hdlc_device(dev)) {
+ printk(KERN_ERR "wanXL %s: unable to register hdlc"
+ " device\n", pci_name(pdev));
+ free_netdev(dev);
+ wanxl_pci_remove_one(pdev);
+ return -ENOBUFS;
+ }
+ card->n_ports++;
+ }
+
+ printk(KERN_INFO "wanXL %s: port", pci_name(pdev));
+ for (i = 0; i < ports; i++)
+ printk("%s #%i: %s", i ? "," : "", i,
+ card->ports[i].dev->name);
+ printk("\n");
+
+ for (i = 0; i < ports; i++)
+ wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
+
+ return 0;
+}
+
+static struct pci_device_id wanxl_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0 },
+ { 0, }
+};
+
+
+static struct pci_driver wanxl_pci_driver = {
+ .name = "wanXL",
+ .id_table = wanxl_pci_tbl,
+ .probe = wanxl_pci_init_one,
+ .remove = wanxl_pci_remove_one,
+};
+
+
+static int __init wanxl_init_module(void)
+{
+#ifdef MODULE
+ printk(KERN_INFO "%s\n", version);
+#endif
+ return pci_module_init(&wanxl_pci_driver);
+}
+
+static void __exit wanxl_cleanup_module(void)
+{
+ pci_unregister_driver(&wanxl_pci_driver);
+}
+
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
+
+module_init(wanxl_init_module);
+module_exit(wanxl_cleanup_module);
diff --git a/drivers/net/wan/wanxl.h b/drivers/net/wan/wanxl.h
new file mode 100644
index 000000000000..3f86558f8a6b
--- /dev/null
+++ b/drivers/net/wan/wanxl.h
@@ -0,0 +1,152 @@
+/*
+ * wanXL serial card driver for Linux
+ * definitions common to host driver and card firmware
+ *
+ * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#define RESET_WHILE_LOADING 0
+
+/* you must rebuild the firmware if any of the following is changed */
+#define DETECT_RAM 0 /* needed for > 4MB RAM, 16 MB maximum */
+#define QUICC_MEMCPY_USES_PLX 1 /* must be used if the host has > 256 MB RAM */
+
+
+#define STATUS_CABLE_V35 2
+#define STATUS_CABLE_X21 3
+#define STATUS_CABLE_V24 4
+#define STATUS_CABLE_EIA530 5
+#define STATUS_CABLE_INVALID 6
+#define STATUS_CABLE_NONE 7
+
+#define STATUS_CABLE_DCE 0x8000
+#define STATUS_CABLE_DSR 0x0010
+#define STATUS_CABLE_DCD 0x0008
+#define STATUS_CABLE_PM_SHIFT 5
+
+#define PDM_OFFSET 0x1000
+
+#define TX_BUFFERS 10 /* per port */
+#define RX_BUFFERS 30
+#define RX_QUEUE_LENGTH 40 /* card->host queue length - per card */
+
+#define PACKET_EMPTY 0x00
+#define PACKET_FULL 0x10
+#define PACKET_SENT 0x20 /* TX only */
+#define PACKET_UNDERRUN 0x30 /* TX only */
+#define PACKET_PORT_MASK 0x03 /* RX only */
+
+/* bit numbers in PLX9060 doorbell registers */
+#define DOORBELL_FROM_CARD_TX_0 0 /* packet sent by the card */
+#define DOORBELL_FROM_CARD_TX_1 1
+#define DOORBELL_FROM_CARD_TX_2 2
+#define DOORBELL_FROM_CARD_TX_3 3
+#define DOORBELL_FROM_CARD_RX 4
+#define DOORBELL_FROM_CARD_CABLE_0 5 /* cable/PM/etc. changed */
+#define DOORBELL_FROM_CARD_CABLE_1 6
+#define DOORBELL_FROM_CARD_CABLE_2 7
+#define DOORBELL_FROM_CARD_CABLE_3 8
+
+#define DOORBELL_TO_CARD_OPEN_0 0
+#define DOORBELL_TO_CARD_OPEN_1 1
+#define DOORBELL_TO_CARD_OPEN_2 2
+#define DOORBELL_TO_CARD_OPEN_3 3
+#define DOORBELL_TO_CARD_CLOSE_0 4
+#define DOORBELL_TO_CARD_CLOSE_1 5
+#define DOORBELL_TO_CARD_CLOSE_2 6
+#define DOORBELL_TO_CARD_CLOSE_3 7
+#define DOORBELL_TO_CARD_TX_0 8 /* outbound packet queued */
+#define DOORBELL_TO_CARD_TX_1 9
+#define DOORBELL_TO_CARD_TX_2 10
+#define DOORBELL_TO_CARD_TX_3 11
+
+/* firmware-only status bits, starting from last DOORBELL_TO_CARD + 1 */
+#define TASK_SCC_0 12
+#define TASK_SCC_1 13
+#define TASK_SCC_2 14
+#define TASK_SCC_3 15
+
+#define ALIGN32(x) (((x) + 3) & 0xFFFFFFFC)
+#define BUFFER_LENGTH ALIGN32(HDLC_MAX_MRU + 4) /* 4 bytes for 32-bit CRC */
+
+/* Address of TX and RX buffers in 68360 address space */
+#define BUFFERS_ADDR 0x4000 /* 16 KB */
+
+#ifndef __ASSEMBLER__
+#define PLX_OFFSET 0
+#else
+#define PLX_OFFSET PLX + 0x80
+#endif
+
+#define PLX_MAILBOX_0 (PLX_OFFSET + 0x40)
+#define PLX_MAILBOX_1 (PLX_OFFSET + 0x44)
+#define PLX_MAILBOX_2 (PLX_OFFSET + 0x48)
+#define PLX_MAILBOX_3 (PLX_OFFSET + 0x4C)
+#define PLX_MAILBOX_4 (PLX_OFFSET + 0x50)
+#define PLX_MAILBOX_5 (PLX_OFFSET + 0x54)
+#define PLX_MAILBOX_6 (PLX_OFFSET + 0x58)
+#define PLX_MAILBOX_7 (PLX_OFFSET + 0x5C)
+#define PLX_DOORBELL_TO_CARD (PLX_OFFSET + 0x60)
+#define PLX_DOORBELL_FROM_CARD (PLX_OFFSET + 0x64)
+#define PLX_INTERRUPT_CS (PLX_OFFSET + 0x68)
+#define PLX_CONTROL (PLX_OFFSET + 0x6C)
+
+#ifdef __ASSEMBLER__
+#define PLX_DMA_0_MODE (PLX + 0x100)
+#define PLX_DMA_0_PCI (PLX + 0x104)
+#define PLX_DMA_0_LOCAL (PLX + 0x108)
+#define PLX_DMA_0_LENGTH (PLX + 0x10C)
+#define PLX_DMA_0_DESC (PLX + 0x110)
+#define PLX_DMA_1_MODE (PLX + 0x114)
+#define PLX_DMA_1_PCI (PLX + 0x118)
+#define PLX_DMA_1_LOCAL (PLX + 0x11C)
+#define PLX_DMA_1_LENGTH (PLX + 0x120)
+#define PLX_DMA_1_DESC (PLX + 0x124)
+#define PLX_DMA_CMD_STS (PLX + 0x128)
+#define PLX_DMA_ARBITR_0 (PLX + 0x12C)
+#define PLX_DMA_ARBITR_1 (PLX + 0x130)
+#endif
+
+#define DESC_LENGTH 12
+
+/* offsets from start of status_t */
+/* card to host */
+#define STATUS_OPEN 0
+#define STATUS_CABLE (STATUS_OPEN + 4)
+#define STATUS_RX_OVERRUNS (STATUS_CABLE + 4)
+#define STATUS_RX_FRAME_ERRORS (STATUS_RX_OVERRUNS + 4)
+
+/* host to card */
+#define STATUS_PARITY (STATUS_RX_FRAME_ERRORS + 4)
+#define STATUS_ENCODING (STATUS_PARITY + 4)
+#define STATUS_CLOCKING (STATUS_ENCODING + 4)
+#define STATUS_TX_DESCS (STATUS_CLOCKING + 4)
+
+#ifndef __ASSEMBLER__
+
+typedef struct {
+ volatile u32 stat;
+ u32 address; /* PCI address */
+ volatile u32 length;
+}desc_t;
+
+
+typedef struct {
+// Card to host
+ volatile u32 open;
+ volatile u32 cable;
+ volatile u32 rx_overruns;
+ volatile u32 rx_frame_errors;
+
+// Host to card
+ u32 parity;
+ u32 encoding;
+ u32 clocking;
+ desc_t tx_descs[TX_BUFFERS];
+}port_status_t;
+
+#endif /* __ASSEMBLER__ */
diff --git a/drivers/net/wan/wanxlfw.S b/drivers/net/wan/wanxlfw.S
new file mode 100644
index 000000000000..73aae2bf2f1c
--- /dev/null
+++ b/drivers/net/wan/wanxlfw.S
@@ -0,0 +1,895 @@
+.psize 0
+/*
+ wanXL serial card driver for Linux
+ card firmware part
+
+ Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of version 2 of the GNU General Public License
+ as published by the Free Software Foundation.
+
+
+
+
+ DPRAM BDs:
+ 0x000 - 0x050 TX#0 0x050 - 0x140 RX#0
+ 0x140 - 0x190 TX#1 0x190 - 0x280 RX#1
+ 0x280 - 0x2D0 TX#2 0x2D0 - 0x3C0 RX#2
+ 0x3C0 - 0x410 TX#3 0x410 - 0x500 RX#3
+
+
+ 000 5FF 1536 Bytes Dual-Port RAM User Data / BDs
+ 600 6FF 256 Bytes Dual-Port RAM User Data / BDs
+ 700 7FF 256 Bytes Dual-Port RAM User Data / BDs
+ C00 CBF 192 Bytes Dual-Port RAM Parameter RAM Page 1
+ D00 DBF 192 Bytes Dual-Port RAM Parameter RAM Page 2
+ E00 EBF 192 Bytes Dual-Port RAM Parameter RAM Page 3
+ F00 FBF 192 Bytes Dual-Port RAM Parameter RAM Page 4
+
+ local interrupts level
+ NMI 7
+ PIT timer, CPM (RX/TX complete) 4
+ PCI9060 DMA and PCI doorbells 3
+ Cable - not used 1
+*/
+
+#include <linux/hdlc.h>
+#include "wanxl.h"
+
+/* memory addresses and offsets */
+
+MAX_RAM_SIZE = 16 * 1024 * 1024 // max RAM supported by hardware
+
+PCI9060_VECTOR = 0x0000006C
+CPM_IRQ_BASE = 0x40
+ERROR_VECTOR = CPM_IRQ_BASE * 4
+SCC1_VECTOR = (CPM_IRQ_BASE + 0x1E) * 4
+SCC2_VECTOR = (CPM_IRQ_BASE + 0x1D) * 4
+SCC3_VECTOR = (CPM_IRQ_BASE + 0x1C) * 4
+SCC4_VECTOR = (CPM_IRQ_BASE + 0x1B) * 4
+CPM_IRQ_LEVEL = 4
+TIMER_IRQ = 128
+TIMER_IRQ_LEVEL = 4
+PITR_CONST = 0x100 + 16 // 1 Hz timer
+
+MBAR = 0x0003FF00
+
+VALUE_WINDOW = 0x40000000
+ORDER_WINDOW = 0xC0000000
+
+PLX = 0xFFF90000
+
+CSRA = 0xFFFB0000
+CSRB = 0xFFFB0002
+CSRC = 0xFFFB0004
+CSRD = 0xFFFB0006
+STATUS_CABLE_LL = 0x2000
+STATUS_CABLE_DTR = 0x1000
+
+DPRBASE = 0xFFFC0000
+
+SCC1_BASE = DPRBASE + 0xC00
+MISC_BASE = DPRBASE + 0xCB0
+SCC2_BASE = DPRBASE + 0xD00
+SCC3_BASE = DPRBASE + 0xE00
+SCC4_BASE = DPRBASE + 0xF00
+
+// offset from SCCx_BASE
+// SCC_xBASE contain offsets from DPRBASE and must be divisible by 8
+SCC_RBASE = 0 // 16-bit RxBD base address
+SCC_TBASE = 2 // 16-bit TxBD base address
+SCC_RFCR = 4 // 8-bit Rx function code
+SCC_TFCR = 5 // 8-bit Tx function code
+SCC_MRBLR = 6 // 16-bit maximum Rx buffer length
+SCC_C_MASK = 0x34 // 32-bit CRC constant
+SCC_C_PRES = 0x38 // 32-bit CRC preset
+SCC_MFLR = 0x46 // 16-bit max Rx frame length (without flags)
+
+REGBASE = DPRBASE + 0x1000
+PICR = REGBASE + 0x026 // 16-bit periodic irq control
+PITR = REGBASE + 0x02A // 16-bit periodic irq timing
+OR1 = REGBASE + 0x064 // 32-bit RAM bank #1 options
+CICR = REGBASE + 0x540 // 32(24)-bit CP interrupt config
+CIMR = REGBASE + 0x548 // 32-bit CP interrupt mask
+CISR = REGBASE + 0x54C // 32-bit CP interrupts in-service
+PADIR = REGBASE + 0x550 // 16-bit PortA data direction bitmap
+PAPAR = REGBASE + 0x552 // 16-bit PortA pin assignment bitmap
+PAODR = REGBASE + 0x554 // 16-bit PortA open drain bitmap
+PADAT = REGBASE + 0x556 // 16-bit PortA data register
+
+PCDIR = REGBASE + 0x560 // 16-bit PortC data direction bitmap
+PCPAR = REGBASE + 0x562 // 16-bit PortC pin assignment bitmap
+PCSO = REGBASE + 0x564 // 16-bit PortC special options
+PCDAT = REGBASE + 0x566 // 16-bit PortC data register
+PCINT = REGBASE + 0x568 // 16-bit PortC interrupt control
+CR = REGBASE + 0x5C0 // 16-bit Command register
+
+SCC1_REGS = REGBASE + 0x600
+SCC2_REGS = REGBASE + 0x620
+SCC3_REGS = REGBASE + 0x640
+SCC4_REGS = REGBASE + 0x660
+SICR = REGBASE + 0x6EC // 32-bit SI clock route
+
+// offset from SCCx_REGS
+SCC_GSMR_L = 0x00 // 32 bits
+SCC_GSMR_H = 0x04 // 32 bits
+SCC_PSMR = 0x08 // 16 bits
+SCC_TODR = 0x0C // 16 bits
+SCC_DSR = 0x0E // 16 bits
+SCC_SCCE = 0x10 // 16 bits
+SCC_SCCM = 0x14 // 16 bits
+SCC_SCCS = 0x17 // 8 bits
+
+#if QUICC_MEMCPY_USES_PLX
+ .macro memcpy_from_pci src, dest, len // len must be < 8 MB
+ addl #3, \len
+ andl #0xFFFFFFFC, \len // always copy n * 4 bytes
+ movel \src, PLX_DMA_0_PCI
+ movel \dest, PLX_DMA_0_LOCAL
+ movel \len, PLX_DMA_0_LENGTH
+ movel #0x0103, PLX_DMA_CMD_STS // start channel 0 transfer
+ bsr memcpy_from_pci_run
+ .endm
+
+ .macro memcpy_to_pci src, dest, len
+ addl #3, \len
+ andl #0xFFFFFFFC, \len // always copy n * 4 bytes
+ movel \src, PLX_DMA_1_LOCAL
+ movel \dest, PLX_DMA_1_PCI
+ movel \len, PLX_DMA_1_LENGTH
+ movel #0x0301, PLX_DMA_CMD_STS // start channel 1 transfer
+ bsr memcpy_to_pci_run
+ .endm
+
+#else
+
+ .macro memcpy src, dest, len // len must be < 65536 bytes
+ movel %d7, -(%sp) // src and dest must be < 256 MB
+ movel \len, %d7 // bits 0 and 1
+ lsrl #2, \len
+ andl \len, \len
+ beq 99f // only 0 - 3 bytes
+ subl #1, \len // for dbf
+98: movel (\src)+, (\dest)+
+ dbfw \len, 98b
+99: movel %d7, \len
+ btstl #1, \len
+ beq 99f
+ movew (\src)+, (\dest)+
+99: btstl #0, \len
+ beq 99f
+ moveb (\src)+, (\dest)+
+99:
+ movel (%sp)+, %d7
+ .endm
+
+ .macro memcpy_from_pci src, dest, len
+ addl #VALUE_WINDOW, \src
+ memcpy \src, \dest, \len
+ .endm
+
+ .macro memcpy_to_pci src, dest, len
+ addl #VALUE_WINDOW, \dest
+ memcpy \src, \dest, \len
+ .endm
+#endif
+
+
+ .macro wait_for_command
+99: btstl #0, CR
+ bne 99b
+ .endm
+
+
+
+
+/****************************** card initialization *******************/
+ .text
+ .global _start
+_start: bra init
+
+ .org _start + 4
+ch_status_addr: .long 0, 0, 0, 0
+rx_descs_addr: .long 0
+
+init:
+#if DETECT_RAM
+ movel OR1, %d0
+ andl #0xF00007FF, %d0 // mask AMxx bits
+ orl #0xFFFF800 & ~(MAX_RAM_SIZE - 1), %d0 // update RAM bank size
+ movel %d0, OR1
+#endif
+
+ addl #VALUE_WINDOW, rx_descs_addr // PCI addresses of shared data
+ clrl %d0 // D0 = 4 * port
+init_1: tstl ch_status_addr(%d0)
+ beq init_2
+ addl #VALUE_WINDOW, ch_status_addr(%d0)
+init_2: addl #4, %d0
+ cmpl #4 * 4, %d0
+ bne init_1
+
+ movel #pci9060_interrupt, PCI9060_VECTOR
+ movel #error_interrupt, ERROR_VECTOR
+ movel #port_interrupt_1, SCC1_VECTOR
+ movel #port_interrupt_2, SCC2_VECTOR
+ movel #port_interrupt_3, SCC3_VECTOR
+ movel #port_interrupt_4, SCC4_VECTOR
+ movel #timer_interrupt, TIMER_IRQ * 4
+
+ movel #0x78000000, CIMR // only SCCx IRQs from CPM
+ movew #(TIMER_IRQ_LEVEL << 8) + TIMER_IRQ, PICR // interrupt from PIT
+ movew #PITR_CONST, PITR
+
+ // SCC1=SCCa SCC2=SCCb SCC3=SCCc SCC4=SCCd prio=4 HP=-1 IRQ=64-79
+ movel #0xD41F40 + (CPM_IRQ_LEVEL << 13), CICR
+ movel #0x543, PLX_DMA_0_MODE // 32-bit, Ready, Burst, IRQ
+ movel #0x543, PLX_DMA_1_MODE
+ movel #0x0, PLX_DMA_0_DESC // from PCI to local
+ movel #0x8, PLX_DMA_1_DESC // from local to PCI
+ movel #0x101, PLX_DMA_CMD_STS // enable both DMA channels
+ // enable local IRQ, DMA, doorbells and PCI IRQ
+ orl #0x000F0300, PLX_INTERRUPT_CS
+
+#if DETECT_RAM
+ bsr ram_test
+#else
+ movel #1, PLX_MAILBOX_5 // non-zero value = init complete
+#endif
+ bsr check_csr
+
+ movew #0xFFFF, PAPAR // all pins are clocks/data
+ clrw PADIR // first function
+ clrw PCSO // CD and CTS always active
+
+
+/****************************** main loop *****************************/
+
+main: movel channel_stats, %d7 // D7 = doorbell + irq status
+ clrl channel_stats
+
+ tstl %d7
+ bne main_1
+ // nothing to do - wait for next event
+ stop #0x2200 // supervisor + IRQ level 2
+ movew #0x2700, %sr // disable IRQs again
+ bra main
+
+main_1: clrl %d0 // D0 = 4 * port
+ clrl %d6 // D6 = doorbell to host value
+
+main_l: btstl #DOORBELL_TO_CARD_CLOSE_0, %d7
+ beq main_op
+ bclrl #DOORBELL_TO_CARD_OPEN_0, %d7 // in case both bits are set
+ bsr close_port
+main_op:
+ btstl #DOORBELL_TO_CARD_OPEN_0, %d7
+ beq main_cl
+ bsr open_port
+main_cl:
+ btstl #DOORBELL_TO_CARD_TX_0, %d7
+ beq main_txend
+ bsr tx
+main_txend:
+ btstl #TASK_SCC_0, %d7
+ beq main_next
+ bsr tx_end
+ bsr rx
+
+main_next:
+ lsrl #1, %d7 // port status for next port
+ addl #4, %d0 // D0 = 4 * next port
+ cmpl #4 * 4, %d0
+ bne main_l
+ movel %d6, PLX_DOORBELL_FROM_CARD // signal the host
+ bra main
+
+
+/****************************** open port *****************************/
+
+open_port: // D0 = 4 * port, D6 = doorbell to host
+ movel ch_status_addr(%d0), %a0 // A0 = port status address
+ tstl STATUS_OPEN(%a0)
+ bne open_port_ret // port already open
+ movel #1, STATUS_OPEN(%a0) // confirm the port is open
+// setup BDs
+ clrl tx_in(%d0)
+ clrl tx_out(%d0)
+ clrl tx_count(%d0)
+ clrl rx_in(%d0)
+
+ movel SICR, %d1 // D1 = clock settings in SICR
+ andl clocking_mask(%d0), %d1
+ cmpl #CLOCK_TXFROMRX, STATUS_CLOCKING(%a0)
+ bne open_port_clock_ext
+ orl clocking_txfromrx(%d0), %d1
+ bra open_port_set_clock
+
+open_port_clock_ext:
+ orl clocking_ext(%d0), %d1
+open_port_set_clock:
+ movel %d1, SICR // update clock settings in SICR
+
+ orw #STATUS_CABLE_DTR, csr_output(%d0) // DTR on
+ bsr check_csr // call with disabled timer interrupt
+
+// Setup TX descriptors
+ movel first_buffer(%d0), %d1 // D1 = starting buffer address
+ movel tx_first_bd(%d0), %a1 // A1 = starting TX BD address
+ movel #TX_BUFFERS - 2, %d2 // D2 = TX_BUFFERS - 1 counter
+ movel #0x18000000, %d3 // D3 = initial TX BD flags: Int + Last
+ cmpl #PARITY_NONE, STATUS_PARITY(%a0)
+ beq open_port_tx_loop
+ bsetl #26, %d3 // TX BD flag: Transmit CRC
+open_port_tx_loop:
+ movel %d3, (%a1)+ // TX flags + length
+ movel %d1, (%a1)+ // buffer address
+ addl #BUFFER_LENGTH, %d1
+ dbfw %d2, open_port_tx_loop
+
+ bsetl #29, %d3 // TX BD flag: Wrap (last BD)
+ movel %d3, (%a1)+ // Final TX flags + length
+ movel %d1, (%a1)+ // buffer address
+
+// Setup RX descriptors // A1 = starting RX BD address
+ movel #RX_BUFFERS - 2, %d2 // D2 = RX_BUFFERS - 1 counter
+open_port_rx_loop:
+ movel #0x90000000, (%a1)+ // RX flags + length
+ movel %d1, (%a1)+ // buffer address
+ addl #BUFFER_LENGTH, %d1
+ dbfw %d2, open_port_rx_loop
+
+ movel #0xB0000000, (%a1)+ // Final RX flags + length
+ movel %d1, (%a1)+ // buffer address
+
+// Setup port parameters
+ movel scc_base_addr(%d0), %a1 // A1 = SCC_BASE address
+ movel scc_reg_addr(%d0), %a2 // A2 = SCC_REGS address
+
+ movel #0xFFFF, SCC_SCCE(%a2) // clear status bits
+ movel #0x0000, SCC_SCCM(%a2) // interrupt mask
+
+ movel tx_first_bd(%d0), %d1
+ movew %d1, SCC_TBASE(%a1) // D1 = offset of first TxBD
+ addl #TX_BUFFERS * 8, %d1
+ movew %d1, SCC_RBASE(%a1) // D1 = offset of first RxBD
+ moveb #0x8, SCC_RFCR(%a1) // Intel mode, 1000
+ moveb #0x8, SCC_TFCR(%a1)
+
+// Parity settings
+ cmpl #PARITY_CRC16_PR1_CCITT, STATUS_PARITY(%a0)
+ bne open_port_parity_1
+ clrw SCC_PSMR(%a2) // CRC16-CCITT
+ movel #0xF0B8, SCC_C_MASK(%a1)
+ movel #0xFFFF, SCC_C_PRES(%a1)
+ movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
+ movew #2, parity_bytes(%d0)
+ bra open_port_2
+
+open_port_parity_1:
+ cmpl #PARITY_CRC32_PR1_CCITT, STATUS_PARITY(%a0)
+ bne open_port_parity_2
+ movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT
+ movel #0xDEBB20E3, SCC_C_MASK(%a1)
+ movel #0xFFFFFFFF, SCC_C_PRES(%a1)
+ movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
+ movew #4, parity_bytes(%d0)
+ bra open_port_2
+
+open_port_parity_2:
+ cmpl #PARITY_CRC16_PR0_CCITT, STATUS_PARITY(%a0)
+ bne open_port_parity_3
+ clrw SCC_PSMR(%a2) // CRC16-CCITT preset 0
+ movel #0xF0B8, SCC_C_MASK(%a1)
+ clrl SCC_C_PRES(%a1)
+ movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
+ movew #2, parity_bytes(%d0)
+ bra open_port_2
+
+open_port_parity_3:
+ cmpl #PARITY_CRC32_PR0_CCITT, STATUS_PARITY(%a0)
+ bne open_port_parity_4
+ movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT preset 0
+ movel #0xDEBB20E3, SCC_C_MASK(%a1)
+ clrl SCC_C_PRES(%a1)
+ movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
+ movew #4, parity_bytes(%d0)
+ bra open_port_2
+
+open_port_parity_4:
+ clrw SCC_PSMR(%a2) // no parity
+ movel #0xF0B8, SCC_C_MASK(%a1)
+ movel #0xFFFF, SCC_C_PRES(%a1)
+ movew #HDLC_MAX_MRU, SCC_MFLR(%a1) // 0 bytes for CRC
+ clrw parity_bytes(%d0)
+
+open_port_2:
+ movel #0x00000003, SCC_GSMR_H(%a2) // RTSM
+ cmpl #ENCODING_NRZI, STATUS_ENCODING(%a0)
+ bne open_port_nrz
+ movel #0x10040900, SCC_GSMR_L(%a2) // NRZI: TCI Tend RECN+TENC=1
+ bra open_port_3
+
+open_port_nrz:
+ movel #0x10040000, SCC_GSMR_L(%a2) // NRZ: TCI Tend RECN+TENC=0
+open_port_3:
+ movew #BUFFER_LENGTH, SCC_MRBLR(%a1)
+ movel %d0, %d1
+ lsll #4, %d1 // D1 bits 7 and 6 = port
+ orl #1, %d1
+ movew %d1, CR // Init SCC RX and TX params
+ wait_for_command
+
+ // TCI Tend ENR ENT
+ movew #0x001F, SCC_SCCM(%a2) // TXE RXF BSY TXB RXB interrupts
+ orl #0x00000030, SCC_GSMR_L(%a2) // enable SCC
+open_port_ret:
+ rts
+
+
+/****************************** close port ****************************/
+
+close_port: // D0 = 4 * port, D6 = doorbell to host
+ movel scc_reg_addr(%d0), %a0 // A0 = SCC_REGS address
+ clrw SCC_SCCM(%a0) // no SCC interrupts
+ andl #0xFFFFFFCF, SCC_GSMR_L(%a0) // Disable ENT and ENR
+
+ andw #~STATUS_CABLE_DTR, csr_output(%d0) // DTR off
+ bsr check_csr // call with disabled timer interrupt
+
+ movel ch_status_addr(%d0), %d1
+ clrl STATUS_OPEN(%d1) // confirm the port is closed
+ rts
+
+
+/****************************** transmit packet ***********************/
+// queue packets for transmission
+tx: // D0 = 4 * port, D6 = doorbell to host
+ cmpl #TX_BUFFERS, tx_count(%d0)
+ beq tx_ret // all DB's = descs in use
+
+ movel tx_out(%d0), %d1
+ movel %d1, %d2 // D1 = D2 = tx_out BD# = desc#
+ mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
+ addl ch_status_addr(%d0), %d2
+ addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
+ cmpl #PACKET_FULL, (%d2) // desc status
+ bne tx_ret
+
+// queue it
+ movel 4(%d2), %a0 // PCI address
+ lsll #3, %d1 // BD is 8-bytes long
+ addl tx_first_bd(%d0), %d1 // D1 = current tx_out BD addr
+
+ movel 4(%d1), %a1 // A1 = dest address
+ movel 8(%d2), %d2 // D2 = length
+ movew %d2, 2(%d1) // length into BD
+ memcpy_from_pci %a0, %a1, %d2
+ bsetl #31, (%d1) // CP go ahead
+
+// update tx_out and tx_count
+ movel tx_out(%d0), %d1
+ addl #1, %d1
+ cmpl #TX_BUFFERS, %d1
+ bne tx_1
+ clrl %d1
+tx_1: movel %d1, tx_out(%d0)
+
+ addl #1, tx_count(%d0)
+ bra tx
+
+tx_ret: rts
+
+
+/****************************** packet received ***********************/
+
+// Service receive buffers // D0 = 4 * port, D6 = doorbell to host
+rx: movel rx_in(%d0), %d1 // D1 = rx_in BD#
+ lsll #3, %d1 // BD is 8-bytes long
+ addl rx_first_bd(%d0), %d1 // D1 = current rx_in BD address
+ movew (%d1), %d2 // D2 = RX BD flags
+ btstl #15, %d2
+ bne rx_ret // BD still empty
+
+ btstl #1, %d2
+ bne rx_overrun
+
+ tstw parity_bytes(%d0)
+ bne rx_parity
+ bclrl #2, %d2 // do not test for CRC errors
+rx_parity:
+ andw #0x0CBC, %d2 // mask status bits
+ cmpw #0x0C00, %d2 // correct frame
+ bne rx_bad_frame
+ clrl %d3
+ movew 2(%d1), %d3
+ subw parity_bytes(%d0), %d3 // D3 = packet length
+ cmpw #HDLC_MAX_MRU, %d3
+ bgt rx_bad_frame
+
+rx_good_frame:
+ movel rx_out, %d2
+ mulul #DESC_LENGTH, %d2
+ addl rx_descs_addr, %d2 // D2 = RX desc address
+ cmpl #PACKET_EMPTY, (%d2) // desc stat
+ bne rx_overrun
+
+ movel %d3, 8(%d2)
+ movel 4(%d1), %a0 // A0 = source address
+ movel 4(%d2), %a1
+ tstl %a1
+ beq rx_ignore_data
+ memcpy_to_pci %a0, %a1, %d3
+rx_ignore_data:
+ movel packet_full(%d0), (%d2) // update desc stat
+
+// update D6 and rx_out
+ bsetl #DOORBELL_FROM_CARD_RX, %d6 // signal host that RX completed
+ movel rx_out, %d2
+ addl #1, %d2
+ cmpl #RX_QUEUE_LENGTH, %d2
+ bne rx_1
+ clrl %d2
+rx_1: movel %d2, rx_out
+
+rx_free_bd:
+ andw #0xF000, (%d1) // clear CM and error bits
+ bsetl #31, (%d1) // free BD
+// update rx_in
+ movel rx_in(%d0), %d1
+ addl #1, %d1
+ cmpl #RX_BUFFERS, %d1
+ bne rx_2
+ clrl %d1
+rx_2: movel %d1, rx_in(%d0)
+ bra rx
+
+rx_overrun:
+ movel ch_status_addr(%d0), %d2
+ addl #1, STATUS_RX_OVERRUNS(%d2)
+ bra rx_free_bd
+
+rx_bad_frame:
+ movel ch_status_addr(%d0), %d2
+ addl #1, STATUS_RX_FRAME_ERRORS(%d2)
+ bra rx_free_bd
+
+rx_ret: rts
+
+
+/****************************** packet transmitted ********************/
+
+// Service transmit buffers // D0 = 4 * port, D6 = doorbell to host
+tx_end: tstl tx_count(%d0)
+ beq tx_end_ret // TX buffers already empty
+
+ movel tx_in(%d0), %d1
+ movel %d1, %d2 // D1 = D2 = tx_in BD# = desc#
+ lsll #3, %d1 // BD is 8-bytes long
+ addl tx_first_bd(%d0), %d1 // D1 = current tx_in BD address
+ movew (%d1), %d3 // D3 = TX BD flags
+ btstl #15, %d3
+ bne tx_end_ret // BD still being transmitted
+
+// update D6, tx_in and tx_count
+ orl bell_tx(%d0), %d6 // signal host that TX desc freed
+ subl #1, tx_count(%d0)
+ movel tx_in(%d0), %d1
+ addl #1, %d1
+ cmpl #TX_BUFFERS, %d1
+ bne tx_end_1
+ clrl %d1
+tx_end_1:
+ movel %d1, tx_in(%d0)
+
+// free host's descriptor
+ mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
+ addl ch_status_addr(%d0), %d2
+ addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
+ btstl #1, %d3
+ bne tx_end_underrun
+ movel #PACKET_SENT, (%d2)
+ bra tx_end
+
+tx_end_underrun:
+ movel #PACKET_UNDERRUN, (%d2)
+ bra tx_end
+
+tx_end_ret: rts
+
+
+/****************************** PLX PCI9060 DMA memcpy ****************/
+
+#if QUICC_MEMCPY_USES_PLX
+// called with interrupts disabled
+memcpy_from_pci_run:
+ movel %d0, -(%sp)
+ movew %sr, -(%sp)
+memcpy_1:
+ movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
+ btstl #4, %d0 // transfer done?
+ bne memcpy_end
+ stop #0x2200 // enable PCI9060 interrupts
+ movew #0x2700, %sr // disable interrupts again
+ bra memcpy_1
+
+memcpy_to_pci_run:
+ movel %d0, -(%sp)
+ movew %sr, -(%sp)
+memcpy_2:
+ movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
+ btstl #12, %d0 // transfer done?
+ bne memcpy_end
+ stop #0x2200 // enable PCI9060 interrupts
+ movew #0x2700, %sr // disable interrupts again
+ bra memcpy_2
+
+memcpy_end:
+ movew (%sp)+, %sr
+ movel (%sp)+, %d0
+ rts
+#endif
+
+
+
+
+
+
+/****************************** PLX PCI9060 interrupt *****************/
+
+pci9060_interrupt:
+ movel %d0, -(%sp)
+
+ movel PLX_DOORBELL_TO_CARD, %d0
+ movel %d0, PLX_DOORBELL_TO_CARD // confirm all requests
+ orl %d0, channel_stats
+
+ movel #0x0909, PLX_DMA_CMD_STS // clear DMA ch #0 and #1 interrupts
+
+ movel (%sp)+, %d0
+ rte
+
+/****************************** SCC interrupts ************************/
+
+port_interrupt_1:
+ orl #0, SCC1_REGS + SCC_SCCE; // confirm SCC events
+ orl #1 << TASK_SCC_0, channel_stats
+ movel #0x40000000, CISR
+ rte
+
+port_interrupt_2:
+ orl #0, SCC2_REGS + SCC_SCCE; // confirm SCC events
+ orl #1 << TASK_SCC_1, channel_stats
+ movel #0x20000000, CISR
+ rte
+
+port_interrupt_3:
+ orl #0, SCC3_REGS + SCC_SCCE; // confirm SCC events
+ orl #1 << TASK_SCC_2, channel_stats
+ movel #0x10000000, CISR
+ rte
+
+port_interrupt_4:
+ orl #0, SCC4_REGS + SCC_SCCE; // confirm SCC events
+ orl #1 << TASK_SCC_3, channel_stats
+ movel #0x08000000, CISR
+ rte
+
+error_interrupt:
+ rte
+
+
+/****************************** cable and PM routine ******************/
+// modified registers: none
+check_csr:
+ movel %d0, -(%sp)
+ movel %d1, -(%sp)
+ movel %d2, -(%sp)
+ movel %a0, -(%sp)
+ movel %a1, -(%sp)
+
+ clrl %d0 // D0 = 4 * port
+ movel #CSRA, %a0 // A0 = CSR address
+
+check_csr_loop:
+ movew (%a0), %d1 // D1 = CSR input bits
+ andl #0xE7, %d1 // PM and cable sense bits (no DCE bit)
+ cmpw #STATUS_CABLE_V35 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+ bne check_csr_1
+ movew #0x0E08, %d1
+ bra check_csr_valid
+
+check_csr_1:
+ cmpw #STATUS_CABLE_X21 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+ bne check_csr_2
+ movew #0x0408, %d1
+ bra check_csr_valid
+
+check_csr_2:
+ cmpw #STATUS_CABLE_V24 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+ bne check_csr_3
+ movew #0x0208, %d1
+ bra check_csr_valid
+
+check_csr_3:
+ cmpw #STATUS_CABLE_EIA530 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+ bne check_csr_disable
+ movew #0x0D08, %d1
+ bra check_csr_valid
+
+check_csr_disable:
+ movew #0x0008, %d1 // D1 = disable everything
+ movew #0x80E7, %d2 // D2 = input mask: ignore DSR
+ bra check_csr_write
+
+check_csr_valid: // D1 = mode and IRQ bits
+ movew csr_output(%d0), %d2
+ andw #0x3000, %d2 // D2 = requested LL and DTR bits
+ orw %d2, %d1 // D1 = all requested output bits
+ movew #0x80FF, %d2 // D2 = input mask: include DSR
+
+check_csr_write:
+ cmpw old_csr_output(%d0), %d1
+ beq check_csr_input
+ movew %d1, old_csr_output(%d0)
+ movew %d1, (%a0) // Write CSR output bits
+
+check_csr_input:
+ movew (PCDAT), %d1
+ andw dcd_mask(%d0), %d1
+ beq check_csr_dcd_on // DCD and CTS signals are negated
+ movew (%a0), %d1 // D1 = CSR input bits
+ andw #~STATUS_CABLE_DCD, %d1 // DCD off
+ bra check_csr_previous
+
+check_csr_dcd_on:
+ movew (%a0), %d1 // D1 = CSR input bits
+ orw #STATUS_CABLE_DCD, %d1 // DCD on
+check_csr_previous:
+ andw %d2, %d1 // input mask
+ movel ch_status_addr(%d0), %a1
+ cmpl STATUS_CABLE(%a1), %d1 // check for change
+ beq check_csr_next
+ movel %d1, STATUS_CABLE(%a1) // update status
+ movel bell_cable(%d0), PLX_DOORBELL_FROM_CARD // signal the host
+
+check_csr_next:
+ addl #2, %a0 // next CSR register
+ addl #4, %d0 // D0 = 4 * next port
+ cmpl #4 * 4, %d0
+ bne check_csr_loop
+
+ movel (%sp)+, %a1
+ movel (%sp)+, %a0
+ movel (%sp)+, %d2
+ movel (%sp)+, %d1
+ movel (%sp)+, %d0
+ rts
+
+
+/****************************** timer interrupt ***********************/
+
+timer_interrupt:
+ bsr check_csr
+ rte
+
+
+/****************************** RAM sizing and test *******************/
+#if DETECT_RAM
+ram_test:
+ movel #0x12345678, %d1 // D1 = test value
+ movel %d1, (128 * 1024 - 4)
+ movel #128 * 1024, %d0 // D0 = RAM size tested
+ram_test_size:
+ cmpl #MAX_RAM_SIZE, %d0
+ beq ram_test_size_found
+ movel %d0, %a0
+ addl #128 * 1024 - 4, %a0
+ cmpl (%a0), %d1
+ beq ram_test_size_check
+ram_test_next_size:
+ lsll #1, %d0
+ bra ram_test_size
+
+ram_test_size_check:
+ eorl #0xFFFFFFFF, %d1
+ movel %d1, (128 * 1024 - 4)
+ cmpl (%a0), %d1
+ bne ram_test_next_size
+
+ram_test_size_found: // D0 = RAM size
+ movel %d0, %a0 // A0 = fill ptr
+ subl #firmware_end + 4, %d0
+ lsrl #2, %d0
+ movel %d0, %d1 // D1 = DBf counter
+ram_test_fill:
+ movel %a0, -(%a0)
+ dbfw %d1, ram_test_fill
+ subl #0x10000, %d1
+ cmpl #0xFFFFFFFF, %d1
+ bne ram_test_fill
+
+ram_test_loop: // D0 = DBf counter
+ cmpl (%a0)+, %a0
+ dbnew %d0, ram_test_loop
+ bne ram_test_found_bad
+ subl #0x10000, %d0
+ cmpl #0xFFFFFFFF, %d0
+ bne ram_test_loop
+ bra ram_test_all_ok
+
+ram_test_found_bad:
+ subl #4, %a0
+ram_test_all_ok:
+ movel %a0, PLX_MAILBOX_5
+ rts
+#endif
+
+
+/****************************** constants *****************************/
+
+scc_reg_addr:
+ .long SCC1_REGS, SCC2_REGS, SCC3_REGS, SCC4_REGS
+scc_base_addr:
+ .long SCC1_BASE, SCC2_BASE, SCC3_BASE, SCC4_BASE
+
+tx_first_bd:
+ .long DPRBASE
+ .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8
+ .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
+ .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
+
+rx_first_bd:
+ .long DPRBASE + TX_BUFFERS * 8
+ .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8
+ .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
+ .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
+
+first_buffer:
+ .long BUFFERS_ADDR
+ .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH
+ .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 2
+ .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 3
+
+bell_tx:
+ .long 1 << DOORBELL_FROM_CARD_TX_0, 1 << DOORBELL_FROM_CARD_TX_1
+ .long 1 << DOORBELL_FROM_CARD_TX_2, 1 << DOORBELL_FROM_CARD_TX_3
+
+bell_cable:
+ .long 1 << DOORBELL_FROM_CARD_CABLE_0, 1 << DOORBELL_FROM_CARD_CABLE_1
+ .long 1 << DOORBELL_FROM_CARD_CABLE_2, 1 << DOORBELL_FROM_CARD_CABLE_3
+
+packet_full:
+ .long PACKET_FULL, PACKET_FULL + 1, PACKET_FULL + 2, PACKET_FULL + 3
+
+clocking_ext:
+ .long 0x0000002C, 0x00003E00, 0x002C0000, 0x3E000000
+clocking_txfromrx:
+ .long 0x0000002D, 0x00003F00, 0x002D0000, 0x3F000000
+clocking_mask:
+ .long 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
+dcd_mask:
+ .word 0x020, 0, 0x080, 0, 0x200, 0, 0x800
+
+ .ascii "wanXL firmware\n"
+ .asciz "Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>\n"
+
+
+/****************************** variables *****************************/
+
+ .align 4
+channel_stats: .long 0
+
+tx_in: .long 0, 0, 0, 0 // transmitted
+tx_out: .long 0, 0, 0, 0 // received from host for transmission
+tx_count: .long 0, 0, 0, 0 // currently in transmit queue
+
+rx_in: .long 0, 0, 0, 0 // received from port
+rx_out: .long 0 // transmitted to host
+parity_bytes: .word 0, 0, 0, 0, 0, 0, 0 // only 4 words are used
+
+csr_output: .word 0
+old_csr_output: .word 0, 0, 0, 0, 0, 0, 0
+ .align 4
+firmware_end: // must be dword-aligned
diff --git a/drivers/net/wan/wanxlfw.inc_shipped b/drivers/net/wan/wanxlfw.inc_shipped
new file mode 100644
index 000000000000..73da688f943b
--- /dev/null
+++ b/drivers/net/wan/wanxlfw.inc_shipped
@@ -0,0 +1,158 @@
+static u8 firmware[]={
+0x60,0x00,0x00,0x16,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0xB9,0x40,0x00,0x00,0x00,0x00,0x00,
+0x10,0x14,0x42,0x80,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x67,0x00,0x00,0x0E,
+0x06,0xB0,0x40,0x00,0x00,0x00,0x09,0xB0,0x00,0x00,0x10,0x04,0x58,0x80,0x0C,0x80,
+0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xDE,0x21,0xFC,0x00,0x00,0x16,0xBC,0x00,0x6C,
+0x21,0xFC,0x00,0x00,0x17,0x5E,0x01,0x00,0x21,0xFC,0x00,0x00,0x16,0xDE,0x01,0x78,
+0x21,0xFC,0x00,0x00,0x16,0xFE,0x01,0x74,0x21,0xFC,0x00,0x00,0x17,0x1E,0x01,0x70,
+0x21,0xFC,0x00,0x00,0x17,0x3E,0x01,0x6C,0x21,0xFC,0x00,0x00,0x18,0x4C,0x02,0x00,
+0x23,0xFC,0x78,0x00,0x00,0x00,0xFF,0xFC,0x15,0x48,0x33,0xFC,0x04,0x80,0xFF,0xFC,
+0x10,0x26,0x33,0xFC,0x01,0x10,0xFF,0xFC,0x10,0x2A,0x23,0xFC,0x00,0xD4,0x9F,0x40,
+0xFF,0xFC,0x15,0x40,0x23,0xFC,0x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x00,0x23,0xFC,
+0x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x14,0x23,0xFC,0x00,0x00,0x00,0x00,0xFF,0xF9,
+0x01,0x10,0x23,0xFC,0x00,0x00,0x00,0x08,0xFF,0xF9,0x01,0x24,0x23,0xFC,0x00,0x00,
+0x01,0x01,0xFF,0xF9,0x01,0x28,0x00,0xB9,0x00,0x0F,0x03,0x00,0xFF,0xF9,0x00,0xE8,
+0x23,0xFC,0x00,0x00,0x00,0x01,0xFF,0xF9,0x00,0xD4,0x61,0x00,0x06,0x74,0x33,0xFC,
+0xFF,0xFF,0xFF,0xFC,0x15,0x52,0x42,0x79,0xFF,0xFC,0x15,0x50,0x42,0x79,0xFF,0xFC,
+0x15,0x64,0x2E,0x3A,0x08,0x50,0x42,0xB9,0x00,0x00,0x19,0x54,0x4A,0x87,0x66,0x00,
+0x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE6,0x42,0x80,
+0x42,0x86,0x08,0x07,0x00,0x04,0x67,0x00,0x00,0x0A,0x08,0x87,0x00,0x00,0x61,0x00,
+0x02,0xA0,0x08,0x07,0x00,0x00,0x67,0x00,0x00,0x06,0x61,0x00,0x00,0x36,0x08,0x07,
+0x00,0x08,0x67,0x00,0x00,0x06,0x61,0x00,0x02,0xB8,0x08,0x07,0x00,0x0C,0x67,0x00,
+0x00,0x0A,0x61,0x00,0x04,0x94,0x61,0x00,0x03,0x60,0xE2,0x8F,0x58,0x80,0x0C,0x80,
+0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xBC,0x23,0xC6,0xFF,0xF9,0x00,0xE4,0x60,0x00,
+0xFF,0x92,0x20,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0x4A,0xA8,0x00,0x00,0x66,0x00,
+0x02,0x4E,0x21,0x7C,0x00,0x00,0x00,0x01,0x00,0x00,0x42,0xB0,0x09,0xB0,0x00,0x00,
+0x19,0x58,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x68,0x42,0xB0,0x09,0xB0,0x00,0x00,
+0x19,0x78,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x88,0x22,0x39,0xFF,0xFC,0x16,0xEC,
+0xC2,0xB0,0x09,0xB0,0x00,0x00,0x18,0xF2,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x18,
+0x66,0x00,0x00,0x0E,0x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xE2,0x60,0x00,0x00,0x0A,
+0x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xD2,0x23,0xC1,0xFF,0xFC,0x16,0xEC,0x00,0x70,
+0x10,0x00,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00,0x05,0x76,0x22,0x30,0x09,0xB0,
+0x00,0x00,0x18,0x92,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x72,0x74,0x08,0x26,0x3C,
+0x18,0x00,0x00,0x00,0x0C,0xA8,0x00,0x00,0x00,0x01,0x00,0x10,0x67,0x00,0x00,0x06,
+0x08,0xC3,0x00,0x1A,0x22,0xC3,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA,
+0xFF,0xF4,0x08,0xC3,0x00,0x1D,0x22,0xC3,0x22,0xC1,0x74,0x1C,0x22,0xFC,0x90,0x00,
+0x00,0x00,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA,0xFF,0xF0,0x22,0xFC,
+0xB0,0x00,0x00,0x00,0x22,0xC1,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x62,0x24,0x70,
+0x09,0xB0,0x00,0x00,0x18,0x52,0x25,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x10,0x25,0x7C,
+0x00,0x00,0x00,0x00,0x00,0x14,0x22,0x30,0x09,0xB0,0x00,0x00,0x18,0x72,0x33,0x41,
+0x00,0x02,0x06,0x81,0x00,0x00,0x00,0x50,0x33,0x41,0x00,0x00,0x13,0x7C,0x00,0x08,
+0x00,0x04,0x13,0x7C,0x00,0x08,0x00,0x05,0x0C,0xA8,0x00,0x00,0x00,0x05,0x00,0x10,
+0x66,0x00,0x00,0x2A,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,
+0x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xFA,0x00,0x46,0x31,0xBC,
+0x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,0x00,0xBC,0x0C,0xA8,0x00,0x00,
+0x00,0x07,0x00,0x10,0x66,0x00,0x00,0x2C,0x35,0x7C,0x08,0x00,0x00,0x08,0x23,0x7C,
+0xDE,0xBB,0x20,0xE3,0x00,0x34,0x23,0x7C,0xFF,0xFF,0xFF,0xFF,0x00,0x38,0x33,0x7C,
+0x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,
+0x00,0x86,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x10,0x66,0x00,0x00,0x26,0x42,0x6A,
+0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,0x42,0xA9,0x00,0x38,0x33,0x7C,
+0x05,0xFA,0x00,0x46,0x31,0xBC,0x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,
+0x00,0x56,0x0C,0xA8,0x00,0x00,0x00,0x06,0x00,0x10,0x66,0x00,0x00,0x28,0x35,0x7C,
+0x08,0x00,0x00,0x08,0x23,0x7C,0xDE,0xBB,0x20,0xE3,0x00,0x34,0x42,0xA9,0x00,0x38,
+0x33,0x7C,0x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C,
+0x60,0x00,0x00,0x24,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,
+0x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xF8,0x00,0x46,0x42,0x70,
+0x09,0xB0,0x00,0x00,0x19,0x9C,0x25,0x7C,0x00,0x00,0x00,0x03,0x00,0x04,0x0C,0xA8,
+0x00,0x00,0x00,0x02,0x00,0x14,0x66,0x00,0x00,0x0E,0x25,0x7C,0x10,0x04,0x09,0x00,
+0x00,0x00,0x60,0x00,0x00,0x0A,0x25,0x7C,0x10,0x04,0x00,0x00,0x00,0x00,0x33,0x7C,
+0x05,0xFC,0x00,0x06,0x22,0x00,0xE9,0x89,0x00,0x81,0x00,0x00,0x00,0x01,0x33,0xC1,
+0xFF,0xFC,0x15,0xC0,0x08,0x39,0x00,0x00,0xFF,0xFC,0x15,0xC0,0x66,0x00,0xFF,0xF6,
+0x35,0x7C,0x00,0x1F,0x00,0x14,0x00,0xAA,0x00,0x00,0x00,0x30,0x00,0x00,0x4E,0x75,
+0x20,0x70,0x09,0xB0,0x00,0x00,0x18,0x52,0x42,0x68,0x00,0x14,0x02,0xA8,0xFF,0xFF,
+0xFF,0xCF,0x00,0x00,0x02,0x70,0xEF,0xFF,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00,
+0x03,0x70,0x22,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x42,0xB0,0x19,0x90,0x4E,0x75,
+0x0C,0xB0,0x00,0x00,0x00,0x0A,0x09,0xB0,0x00,0x00,0x19,0x78,0x67,0x00,0x00,0xA8,
+0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x68,0x24,0x01,0x4C,0x3C,0x20,0x00,0x00,0x00,
+0x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C,
+0x0C,0xB0,0x00,0x00,0x00,0x10,0x29,0x90,0x66,0x00,0x00,0x7C,0x20,0x70,0x29,0xA0,
+0x00,0x04,0xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x22,0x70,0x19,0xA0,
+0x00,0x04,0x24,0x30,0x29,0xA0,0x00,0x08,0x31,0x82,0x19,0xA0,0x00,0x02,0x56,0x82,
+0x02,0x82,0xFF,0xFF,0xFF,0xFC,0x23,0xC8,0xFF,0xF9,0x01,0x04,0x23,0xC9,0xFF,0xF9,
+0x01,0x08,0x23,0xC2,0xFF,0xF9,0x01,0x0C,0x23,0xFC,0x00,0x00,0x01,0x03,0xFF,0xF9,
+0x01,0x28,0x61,0x00,0x01,0xF6,0x08,0xF0,0x00,0x1F,0x19,0x90,0x22,0x30,0x09,0xB0,
+0x00,0x00,0x19,0x68,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04,
+0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x68,0x52,0xB0,0x09,0xB0,0x00,0x00,
+0x19,0x78,0x60,0x00,0xFF,0x4C,0x4E,0x75,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88,
+0xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x82,0x34,0x30,0x19,0x90,0x08,0x02,
+0x00,0x0F,0x66,0x00,0x01,0x12,0x08,0x02,0x00,0x01,0x66,0x00,0x00,0xE6,0x4A,0x70,
+0x09,0xB0,0x00,0x00,0x19,0x9C,0x66,0x00,0x00,0x06,0x08,0x82,0x00,0x02,0x02,0x42,
+0x0C,0xBC,0x0C,0x42,0x0C,0x00,0x66,0x00,0x00,0xDC,0x42,0x83,0x36,0x30,0x19,0xA0,
+0x00,0x02,0x96,0x70,0x09,0xB0,0x00,0x00,0x19,0x9C,0x0C,0x43,0x05,0xF8,0x6E,0x00,
+0x00,0xC4,0x24,0x3A,0x04,0x84,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xBA,
+0xFA,0xF4,0x0C,0xB0,0x00,0x00,0x00,0x00,0x29,0x90,0x66,0x00,0x00,0x96,0x21,0x83,
+0x29,0xA0,0x00,0x08,0x20,0x70,0x19,0xA0,0x00,0x04,0x22,0x70,0x29,0xA0,0x00,0x04,
+0x4A,0x89,0x67,0x00,0x00,0x2A,0x56,0x83,0x02,0x83,0xFF,0xFF,0xFF,0xFC,0x23,0xC8,
+0xFF,0xF9,0x01,0x1C,0x23,0xC9,0xFF,0xF9,0x01,0x18,0x23,0xC3,0xFF,0xF9,0x01,0x20,
+0x23,0xFC,0x00,0x00,0x03,0x01,0xFF,0xF9,0x01,0x28,0x61,0x00,0x01,0x2C,0x21,0xB0,
+0x09,0xB0,0x00,0x00,0x18,0xC2,0x29,0x90,0x08,0xC6,0x00,0x04,0x24,0x3A,0x04,0x1A,
+0x52,0x82,0x0C,0x82,0x00,0x00,0x00,0x28,0x66,0x00,0x00,0x04,0x42,0x82,0x23,0xC2,
+0x00,0x00,0x19,0x98,0x02,0x70,0xF0,0x00,0x19,0x90,0x08,0xF0,0x00,0x1F,0x19,0x90,
+0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x1E,
+0x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x88,0x60,0x00,
+0xFE,0xF8,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0,0x00,0x08,
+0x60,0x00,0xFF,0xC2,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0,
+0x00,0x0C,0x60,0x00,0xFF,0xB0,0x4E,0x75,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x19,0x78,
+0x67,0x00,0x00,0x86,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x24,0x01,0xE7,0x89,
+0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x36,0x30,0x19,0x90,0x08,0x03,0x00,0x0F,
+0x66,0x00,0x00,0x66,0x8C,0xB0,0x09,0xB0,0x00,0x00,0x18,0xA2,0x53,0xB0,0x09,0xB0,
+0x00,0x00,0x19,0x78,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x52,0x81,0x0C,0x81,
+0x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,
+0x19,0x58,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00,
+0x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C,0x08,0x03,0x00,0x01,0x66,0x00,0x00,0x0E,
+0x21,0xBC,0x00,0x00,0x00,0x20,0x29,0x90,0x60,0x00,0xFF,0x7E,0x21,0xBC,0x00,0x00,
+0x00,0x30,0x29,0x90,0x60,0x00,0xFF,0x72,0x4E,0x75,0x2F,0x00,0x40,0xE7,0x20,0x39,
+0xFF,0xF9,0x01,0x28,0x08,0x00,0x00,0x04,0x66,0x00,0x00,0x2C,0x4E,0x72,0x22,0x00,
+0x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE8,0x2F,0x00,0x40,0xE7,0x20,0x39,0xFF,0xF9,
+0x01,0x28,0x08,0x00,0x00,0x0C,0x66,0x00,0x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC,
+0x27,0x00,0x60,0x00,0xFF,0xE8,0x46,0xDF,0x20,0x1F,0x4E,0x75,0x2F,0x00,0x20,0x39,
+0xFF,0xF9,0x00,0xE0,0x23,0xC0,0xFF,0xF9,0x00,0xE0,0x81,0xB9,0x00,0x00,0x19,0x54,
+0x23,0xFC,0x00,0x00,0x09,0x09,0xFF,0xF9,0x01,0x28,0x20,0x1F,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x10,0x00,0xB9,0x00,0x00,0x10,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x40,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x30,0x00,0xB9,0x00,0x00,0x20,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x20,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x50,0x00,0xB9,0x00,0x00,0x40,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x10,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x70,0x00,0xB9,0x00,0x00,0x80,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x08,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x4E,0x73,
+0x2F,0x00,0x2F,0x01,0x2F,0x02,0x2F,0x08,0x2F,0x09,0x42,0x80,0x20,0x7C,0xFF,0xFB,
+0x00,0x00,0x32,0x10,0x02,0x81,0x00,0x00,0x00,0xE7,0x0C,0x41,0x00,0x42,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x0E,0x08,0x60,0x00,0x00,0x3E,0x0C,0x41,0x00,0x63,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x04,0x08,0x60,0x00,0x00,0x2E,0x0C,0x41,0x00,0x84,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x02,0x08,0x60,0x00,0x00,0x1E,0x0C,0x41,0x00,0xA5,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x0D,0x08,0x60,0x00,0x00,0x0E,0x32,0x3C,0x00,0x08,0x34,0x3C,
+0x80,0xE7,0x60,0x00,0x00,0x14,0x34,0x30,0x09,0xB0,0x00,0x00,0x19,0xAA,0x02,0x42,
+0x30,0x00,0x82,0x42,0x34,0x3C,0x80,0xFF,0xB2,0x70,0x09,0xB0,0x00,0x00,0x19,0xAC,
+0x67,0x00,0x00,0x0C,0x31,0x81,0x09,0xB0,0x00,0x00,0x19,0xAC,0x30,0x81,0x32,0x39,
+0xFF,0xFC,0x15,0x66,0xC2,0x70,0x09,0xB0,0x00,0x00,0x19,0x02,0x67,0x00,0x00,0x0C,
+0x32,0x10,0x02,0x41,0xFF,0xF7,0x60,0x00,0x00,0x08,0x32,0x10,0x00,0x41,0x00,0x08,
+0xC2,0x42,0x22,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0xB2,0xA9,0x00,0x04,0x67,0x00,
+0x00,0x12,0x23,0x41,0x00,0x04,0x23,0xF0,0x09,0xB0,0x00,0x00,0x18,0xB2,0xFF,0xF9,
+0x00,0xE4,0x54,0x88,0x58,0x80,0x0C,0x80,0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0x34,
+0x22,0x5F,0x20,0x5F,0x24,0x1F,0x22,0x1F,0x20,0x1F,0x4E,0x75,0x61,0x00,0xFF,0x12,
+0x4E,0x73,0xFF,0xFC,0x16,0x00,0xFF,0xFC,0x16,0x20,0xFF,0xFC,0x16,0x40,0xFF,0xFC,
+0x16,0x60,0xFF,0xFC,0x0C,0x00,0xFF,0xFC,0x0D,0x00,0xFF,0xFC,0x0E,0x00,0xFF,0xFC,
+0x0F,0x00,0xFF,0xFC,0x00,0x00,0xFF,0xFC,0x01,0x40,0xFF,0xFC,0x02,0x80,0xFF,0xFC,
+0x03,0xC0,0xFF,0xFC,0x00,0x50,0xFF,0xFC,0x01,0x90,0xFF,0xFC,0x02,0xD0,0xFF,0xFC,
+0x04,0x10,0x00,0x00,0x40,0x00,0x00,0x01,0x2F,0x60,0x00,0x02,0x1E,0xC0,0x00,0x03,
+0x0E,0x20,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00,
+0x00,0x08,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x80,0x00,0x00,
+0x01,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x12,0x00,0x00,
+0x00,0x13,0x00,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00,
+0x00,0x00,0x00,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00,
+0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,
+0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x80,0x00,0x00,0x02,0x00,0x00,0x00,0x08,0x00,
+0x77,0x61,0x6E,0x58,0x4C,0x20,0x66,0x69,0x72,0x6D,0x77,0x61,0x72,0x65,0x0A,0x43,
+0x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x43,0x29,0x20,0x32,0x30,0x30,
+0x33,0x20,0x4B,0x72,0x7A,0x79,0x73,0x7A,0x74,0x6F,0x66,0x20,0x48,0x61,0x6C,0x61,
+0x73,0x61,0x20,0x3C,0x6B,0x68,0x63,0x40,0x70,0x6D,0x2E,0x77,0x61,0x77,0x2E,0x70,
+0x6C,0x3E,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+};
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
new file mode 100644
index 000000000000..8c5cfcb55826
--- /dev/null
+++ b/drivers/net/wan/x25_asy.c
@@ -0,0 +1,844 @@
+/*
+ * Things to sort out:
+ *
+ * o tbusy handling
+ * o allow users to set the parameters
+ * o sync/async switching ?
+ *
+ * Note: This does _not_ implement CCITT X.25 asynchronous framing
+ * recommendations. Its primarily for testing purposes. If you wanted
+ * to do CCITT then in theory all you need is to nick the HDLC async
+ * checksum routines from ppp.c
+ * Changes:
+ *
+ * 2000-10-29 Henner Eisen lapb_data_indication() return status.
+ */
+
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/x25.h>
+#include <linux/lapb.h>
+#include <linux/init.h>
+#include "x25_asy.h"
+
+#include <net/x25device.h>
+
+static struct net_device **x25_asy_devs;
+static int x25_asy_maxdev = SL_NRUNIT;
+
+module_param(x25_asy_maxdev, int, 0);
+MODULE_LICENSE("GPL");
+
+static int x25_asy_esc(unsigned char *p, unsigned char *d, int len);
+static void x25_asy_unesc(struct x25_asy *sl, unsigned char c);
+static void x25_asy_setup(struct net_device *dev);
+
+/* Find a free X.25 channel, and link in this `tty' line. */
+static struct x25_asy *x25_asy_alloc(void)
+{
+ struct net_device *dev = NULL;
+ struct x25_asy *sl;
+ int i;
+
+ if (x25_asy_devs == NULL)
+ return NULL; /* Master array missing ! */
+
+ for (i = 0; i < x25_asy_maxdev; i++) {
+ dev = x25_asy_devs[i];
+
+ /* Not allocated ? */
+ if (dev == NULL)
+ break;
+
+ sl = dev->priv;
+ /* Not in use ? */
+ if (!test_and_set_bit(SLF_INUSE, &sl->flags))
+ return sl;
+ }
+
+
+ /* Sorry, too many, all slots in use */
+ if (i >= x25_asy_maxdev)
+ return NULL;
+
+ /* If no channels are available, allocate one */
+ if (!dev) {
+ char name[IFNAMSIZ];
+ sprintf(name, "x25asy%d", i);
+
+ dev = alloc_netdev(sizeof(struct x25_asy),
+ name, x25_asy_setup);
+ if (!dev)
+ return NULL;
+
+ /* Initialize channel control data */
+ sl = dev->priv;
+ dev->base_addr = i;
+
+ /* register device so that it can be ifconfig'ed */
+ if (register_netdev(dev) == 0) {
+ /* (Re-)Set the INUSE bit. Very Important! */
+ set_bit(SLF_INUSE, &sl->flags);
+ x25_asy_devs[i] = dev;
+ return sl;
+ } else {
+ printk("x25_asy_alloc() - register_netdev() failure.\n");
+ free_netdev(dev);
+ }
+ }
+ return NULL;
+}
+
+
+/* Free an X.25 channel. */
+static void x25_asy_free(struct x25_asy *sl)
+{
+ /* Free all X.25 frame buffers. */
+ if (sl->rbuff) {
+ kfree(sl->rbuff);
+ }
+ sl->rbuff = NULL;
+ if (sl->xbuff) {
+ kfree(sl->xbuff);
+ }
+ sl->xbuff = NULL;
+
+ if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) {
+ printk("%s: x25_asy_free for already free unit.\n", sl->dev->name);
+ }
+}
+
+static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
+{
+ struct x25_asy *sl = dev->priv;
+ unsigned char *xbuff, *rbuff;
+ int len = 2* newmtu;
+
+ xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
+ rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
+
+ if (xbuff == NULL || rbuff == NULL)
+ {
+ printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
+ dev->name);
+ if (xbuff != NULL)
+ kfree(xbuff);
+ if (rbuff != NULL)
+ kfree(rbuff);
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&sl->lock);
+ xbuff = xchg(&sl->xbuff, xbuff);
+ if (sl->xleft) {
+ if (sl->xleft <= len) {
+ memcpy(sl->xbuff, sl->xhead, sl->xleft);
+ } else {
+ sl->xleft = 0;
+ sl->stats.tx_dropped++;
+ }
+ }
+ sl->xhead = sl->xbuff;
+
+ rbuff = xchg(&sl->rbuff, rbuff);
+ if (sl->rcount) {
+ if (sl->rcount <= len) {
+ memcpy(sl->rbuff, rbuff, sl->rcount);
+ } else {
+ sl->rcount = 0;
+ sl->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+
+ dev->mtu = newmtu;
+ sl->buffsize = len;
+
+ spin_unlock_bh(&sl->lock);
+
+ if (xbuff != NULL)
+ kfree(xbuff);
+ if (rbuff != NULL)
+ kfree(rbuff);
+ return 0;
+}
+
+
+/* Set the "sending" flag. This must be atomic, hence the ASM. */
+
+static inline void x25_asy_lock(struct x25_asy *sl)
+{
+ netif_stop_queue(sl->dev);
+}
+
+
+/* Clear the "sending" flag. This must be atomic, hence the ASM. */
+
+static inline void x25_asy_unlock(struct x25_asy *sl)
+{
+ netif_wake_queue(sl->dev);
+}
+
+/* Send one completely decapsulated IP datagram to the IP layer. */
+
+static void x25_asy_bump(struct x25_asy *sl)
+{
+ struct sk_buff *skb;
+ int count;
+ int err;
+
+ count = sl->rcount;
+ sl->stats.rx_bytes+=count;
+
+ skb = dev_alloc_skb(count+1);
+ if (skb == NULL)
+ {
+ printk("%s: memory squeeze, dropping packet.\n", sl->dev->name);
+ sl->stats.rx_dropped++;
+ return;
+ }
+ skb_push(skb,1); /* LAPB internal control */
+ memcpy(skb_put(skb,count), sl->rbuff, count);
+ skb->protocol = x25_type_trans(skb, sl->dev);
+ if((err=lapb_data_received(skb->dev, skb))!=LAPB_OK)
+ {
+ kfree_skb(skb);
+ printk(KERN_DEBUG "x25_asy: data received err - %d\n",err);
+ }
+ else
+ {
+ netif_rx(skb);
+ sl->dev->last_rx = jiffies;
+ sl->stats.rx_packets++;
+ }
+}
+
+/* Encapsulate one IP datagram and stuff into a TTY queue. */
+static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
+{
+ unsigned char *p;
+ int actual, count, mtu = sl->dev->mtu;
+
+ if (len > mtu)
+ { /* Sigh, shouldn't occur BUT ... */
+ len = mtu;
+ printk ("%s: truncating oversized transmit packet!\n", sl->dev->name);
+ sl->stats.tx_dropped++;
+ x25_asy_unlock(sl);
+ return;
+ }
+
+ p = icp;
+ count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len);
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside driver.write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ actual = sl->tty->driver->write(sl->tty, sl->xbuff, count);
+ sl->xleft = count - actual;
+ sl->xhead = sl->xbuff + actual;
+ /* VSV */
+ clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */
+}
+
+/*
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+static void x25_asy_write_wakeup(struct tty_struct *tty)
+{
+ int actual;
+ struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
+ return;
+
+ if (sl->xleft <= 0)
+ {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sl->stats.tx_packets++;
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ x25_asy_unlock(sl);
+ return;
+ }
+
+ actual = tty->driver->write(tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+}
+
+static void x25_asy_timeout(struct net_device *dev)
+{
+ struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+
+ spin_lock(&sl->lock);
+ if (netif_queue_stopped(dev)) {
+ /* May be we must check transmitter timeout here ?
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+ "bad line quality" : "driver error");
+ sl->xleft = 0;
+ sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ x25_asy_unlock(sl);
+ }
+ spin_unlock(&sl->lock);
+}
+
+/* Encapsulate an IP datagram and kick it into a TTY queue. */
+
+static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+ int err;
+
+ if (!netif_running(sl->dev)) {
+ printk("%s: xmit call when iface is down\n", dev->name);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ switch(skb->data[0])
+ {
+ case 0x00:break;
+ case 0x01: /* Connection request .. do nothing */
+ if((err=lapb_connect_request(dev))!=LAPB_OK)
+ printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
+ kfree_skb(skb);
+ return 0;
+ case 0x02: /* Disconnect request .. do nothing - hang up ?? */
+ if((err=lapb_disconnect_request(dev))!=LAPB_OK)
+ printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
+ default:
+ kfree_skb(skb);
+ return 0;
+ }
+ skb_pull(skb,1); /* Remove control byte */
+ /*
+ * If we are busy already- too bad. We ought to be able
+ * to queue things at this point, to allow for a little
+ * frame buffer. Oh well...
+ * -----------------------------------------------------
+ * I hate queues in X.25 driver. May be it's efficient,
+ * but for me latency is more important. ;)
+ * So, no queues !
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+
+ if((err=lapb_data_request(dev,skb))!=LAPB_OK)
+ {
+ printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
+ kfree_skb(skb);
+ return 0;
+ }
+ return 0;
+}
+
+
+/*
+ * LAPB interface boilerplate
+ */
+
+/*
+ * Called when I frame data arrives. We did the work above - throw it
+ * at the net layer.
+ */
+
+static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
+{
+ skb->dev->last_rx = jiffies;
+ return netif_rx(skb);
+}
+
+/*
+ * Data has emerged from the LAPB protocol machine. We don't handle
+ * busy cases too well. Its tricky to see how to do this nicely -
+ * perhaps lapb should allow us to bounce this ?
+ */
+
+static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
+{
+ struct x25_asy *sl=dev->priv;
+
+ spin_lock(&sl->lock);
+ if (netif_queue_stopped(sl->dev) || sl->tty == NULL)
+ {
+ spin_unlock(&sl->lock);
+ printk(KERN_ERR "x25_asy: tbusy drop\n");
+ kfree_skb(skb);
+ return;
+ }
+ /* We were not busy, so we are now... :-) */
+ if (skb != NULL)
+ {
+ x25_asy_lock(sl);
+ sl->stats.tx_bytes+=skb->len;
+ x25_asy_encaps(sl, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ }
+ spin_unlock(&sl->lock);
+}
+
+/*
+ * LAPB connection establish/down information.
+ */
+
+static void x25_asy_connected(struct net_device *dev, int reason)
+{
+ struct x25_asy *sl = dev->priv;
+ struct sk_buff *skb;
+ unsigned char *ptr;
+
+ if ((skb = dev_alloc_skb(1)) == NULL) {
+ printk(KERN_ERR "lapbeth: out of memory\n");
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = 0x01;
+
+ skb->protocol = x25_type_trans(skb, sl->dev);
+ netif_rx(skb);
+ sl->dev->last_rx = jiffies;
+}
+
+static void x25_asy_disconnected(struct net_device *dev, int reason)
+{
+ struct x25_asy *sl = dev->priv;
+ struct sk_buff *skb;
+ unsigned char *ptr;
+
+ if ((skb = dev_alloc_skb(1)) == NULL) {
+ printk(KERN_ERR "x25_asy: out of memory\n");
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = 0x02;
+
+ skb->protocol = x25_type_trans(skb, sl->dev);
+ netif_rx(skb);
+ sl->dev->last_rx = jiffies;
+}
+
+static struct lapb_register_struct x25_asy_callbacks = {
+ .connect_confirmation = x25_asy_connected,
+ .connect_indication = x25_asy_connected,
+ .disconnect_confirmation = x25_asy_disconnected,
+ .disconnect_indication = x25_asy_disconnected,
+ .data_indication = x25_asy_data_indication,
+ .data_transmit = x25_asy_data_transmit,
+
+};
+
+
+/* Open the low-level part of the X.25 channel. Easy! */
+static int x25_asy_open(struct net_device *dev)
+{
+ struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+ unsigned long len;
+ int err;
+
+ if (sl->tty == NULL)
+ return -ENODEV;
+
+ /*
+ * Allocate the X.25 frame buffers:
+ *
+ * rbuff Receive buffer.
+ * xbuff Transmit buffer.
+ */
+
+ len = dev->mtu * 2;
+
+ sl->rbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL);
+ if (sl->rbuff == NULL) {
+ goto norbuff;
+ }
+ sl->xbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL);
+ if (sl->xbuff == NULL) {
+ goto noxbuff;
+ }
+
+ sl->buffsize = len;
+ sl->rcount = 0;
+ sl->xleft = 0;
+ sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */
+
+ netif_start_queue(dev);
+
+ /*
+ * Now attach LAPB
+ */
+ if((err=lapb_register(dev, &x25_asy_callbacks))==LAPB_OK)
+ return 0;
+
+ /* Cleanup */
+ kfree(sl->xbuff);
+noxbuff:
+ kfree(sl->rbuff);
+norbuff:
+ return -ENOMEM;
+}
+
+
+/* Close the low-level part of the X.25 channel. Easy! */
+static int x25_asy_close(struct net_device *dev)
+{
+ struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+ int err;
+
+ spin_lock(&sl->lock);
+ if (sl->tty)
+ sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ if((err=lapb_unregister(dev))!=LAPB_OK)
+ printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",err);
+ spin_unlock(&sl->lock);
+ return 0;
+}
+
+static int x25_asy_receive_room(struct tty_struct *tty)
+{
+ return 65536; /* We can handle an infinite amount of data. :-) */
+}
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of X.25 data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing.
+ */
+
+static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+{
+ struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+
+ if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
+ return;
+
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags)) {
+ sl->stats.rx_errors++;
+ }
+ cp++;
+ continue;
+ }
+ x25_asy_unesc(sl, *cp++);
+ }
+}
+
+/*
+ * Open the high-level part of the X.25 channel.
+ * This function is called by the TTY module when the
+ * X.25 line discipline is called for. Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free X.25 channel...
+ */
+
+static int x25_asy_open_tty(struct tty_struct *tty)
+{
+ struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+ int err;
+
+ /* First make sure we're not already connected. */
+ if (sl && sl->magic == X25_ASY_MAGIC) {
+ return -EEXIST;
+ }
+
+ /* OK. Find a free X.25 channel to use. */
+ if ((sl = x25_asy_alloc()) == NULL) {
+ return -ENFILE;
+ }
+
+ sl->tty = tty;
+ tty->disc_data = sl;
+ if (tty->driver->flush_buffer) {
+ tty->driver->flush_buffer(tty);
+ }
+ if (tty->ldisc.flush_buffer) {
+ tty->ldisc.flush_buffer(tty);
+ }
+
+ /* Restore default settings */
+ sl->dev->type = ARPHRD_X25;
+
+ /* Perform the low-level X.25 async init */
+ if ((err = x25_asy_open(sl->dev)))
+ return err;
+
+ /* Done. We have linked the TTY line to a channel. */
+ return sl->dev->base_addr;
+}
+
+
+/*
+ * Close down an X.25 channel.
+ * This means flushing out any pending queues, and then restoring the
+ * TTY line discipline to what it was before it got hooked to X.25
+ * (which usually is TTY again).
+ */
+static void x25_asy_close_tty(struct tty_struct *tty)
+{
+ struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != X25_ASY_MAGIC)
+ return;
+
+ if (sl->dev->flags & IFF_UP)
+ {
+ (void) dev_close(sl->dev);
+ }
+
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ x25_asy_free(sl);
+}
+
+
+static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
+{
+ struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+
+ return &sl->stats;
+}
+
+
+ /************************************************************************
+ * STANDARD X.25 ENCAPSULATION *
+ ************************************************************************/
+
+int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
+{
+ unsigned char *ptr = d;
+ unsigned char c;
+
+ /*
+ * Send an initial END character to flush out any
+ * data that may have accumulated in the receiver
+ * due to line noise.
+ */
+
+ *ptr++ = X25_END; /* Send 10111110 bit seq */
+
+ /*
+ * For each byte in the packet, send the appropriate
+ * character sequence, according to the X.25 protocol.
+ */
+
+ while (len-- > 0)
+ {
+ switch(c = *s++)
+ {
+ case X25_END:
+ *ptr++ = X25_ESC;
+ *ptr++ = X25_ESCAPE(X25_END);
+ break;
+ case X25_ESC:
+ *ptr++ = X25_ESC;
+ *ptr++ = X25_ESCAPE(X25_ESC);
+ break;
+ default:
+ *ptr++ = c;
+ break;
+ }
+ }
+ *ptr++ = X25_END;
+ return (ptr - d);
+}
+
+static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
+{
+
+ switch(s)
+ {
+ case X25_END:
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2))
+ {
+ x25_asy_bump(sl);
+ }
+ clear_bit(SLF_ESCAPE, &sl->flags);
+ sl->rcount = 0;
+ return;
+
+ case X25_ESC:
+ set_bit(SLF_ESCAPE, &sl->flags);
+ return;
+
+ case X25_ESCAPE(X25_ESC):
+ case X25_ESCAPE(X25_END):
+ if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
+ s = X25_UNESCAPE(s);
+ break;
+ }
+ if (!test_bit(SLF_ERROR, &sl->flags))
+ {
+ if (sl->rcount < sl->buffsize)
+ {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ }
+ sl->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+}
+
+
+/* Perform I/O control on an active X.25 channel. */
+static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != X25_ASY_MAGIC)
+ return -EINVAL;
+
+ switch(cmd) {
+ case SIOCGIFNAME:
+ if (copy_to_user((void __user *)arg, sl->dev->name,
+ strlen(sl->dev->name) + 1))
+ return -EFAULT;
+ return 0;
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+ /* Allow stty to read, but not set, the serial port */
+ case TCGETS:
+ case TCGETA:
+ return n_tty_ioctl(tty, file, cmd, arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int x25_asy_open_dev(struct net_device *dev)
+{
+ struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+ if(sl->tty==NULL)
+ return -ENODEV;
+ return 0;
+}
+
+/* Initialise the X.25 driver. Called by the device init code */
+static void x25_asy_setup(struct net_device *dev)
+{
+ struct x25_asy *sl = dev->priv;
+
+ sl->magic = X25_ASY_MAGIC;
+ sl->dev = dev;
+ spin_lock_init(&sl->lock);
+ set_bit(SLF_INUSE, &sl->flags);
+
+ /*
+ * Finish setting up the DEVICE info.
+ */
+
+ dev->mtu = SL_MTU;
+ dev->hard_start_xmit = x25_asy_xmit;
+ dev->tx_timeout = x25_asy_timeout;
+ dev->watchdog_timeo = HZ*20;
+ dev->open = x25_asy_open_dev;
+ dev->stop = x25_asy_close;
+ dev->get_stats = x25_asy_get_stats;
+ dev->change_mtu = x25_asy_change_mtu;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_X25;
+ dev->tx_queue_len = 10;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+}
+
+static struct tty_ldisc x25_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "X.25",
+ .open = x25_asy_open_tty,
+ .close = x25_asy_close_tty,
+ .ioctl = x25_asy_ioctl,
+ .receive_buf = x25_asy_receive_buf,
+ .receive_room = x25_asy_receive_room,
+ .write_wakeup = x25_asy_write_wakeup,
+};
+
+static int __init init_x25_asy(void)
+{
+ if (x25_asy_maxdev < 4)
+ x25_asy_maxdev = 4; /* Sanity */
+
+ printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
+ "(dynamic channels, max=%d).\n", x25_asy_maxdev );
+
+ x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev,
+ GFP_KERNEL);
+ if (!x25_asy_devs) {
+ printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
+ "array! Uaargh! (-> No X.25 available)\n");
+ return -ENOMEM;
+ }
+ memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev);
+
+ return tty_register_ldisc(N_X25, &x25_ldisc);
+}
+
+
+static void __exit exit_x25_asy(void)
+{
+ struct net_device *dev;
+ int i;
+
+ for (i = 0; i < x25_asy_maxdev; i++) {
+ dev = x25_asy_devs[i];
+ if (dev) {
+ struct x25_asy *sl = dev->priv;
+
+ spin_lock_bh(&sl->lock);
+ if (sl->tty)
+ tty_hangup(sl->tty);
+
+ spin_unlock_bh(&sl->lock);
+ /*
+ * VSV = if dev->start==0, then device
+ * unregistered while close proc.
+ */
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ }
+
+ kfree(x25_asy_devs);
+ tty_register_ldisc(N_X25, NULL);
+}
+
+module_init(init_x25_asy);
+module_exit(exit_x25_asy);
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
new file mode 100644
index 000000000000..41770200ceb6
--- /dev/null
+++ b/drivers/net/wan/x25_asy.h
@@ -0,0 +1,50 @@
+#ifndef _LINUX_X25_ASY_H
+#define _LINUX_X25_ASY_H
+
+/* X.25 asy configuration. */
+#define SL_NRUNIT 256 /* MAX number of X.25 channels;
+ This can be overridden with
+ insmod -ox25_asy_maxdev=nnn */
+#define SL_MTU 256
+
+/* X25 async protocol characters. */
+#define X25_END 0x7E /* indicates end of frame */
+#define X25_ESC 0x7D /* indicates byte stuffing */
+#define X25_ESCAPE(x) ((x)^0x20)
+#define X25_UNESCAPE(x) ((x)^0x20)
+
+
+struct x25_asy {
+ int magic;
+
+ /* Various fields. */
+ spinlock_t lock;
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char *rbuff; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char *xbuff; /* transmitter buffer */
+ unsigned char *xhead; /* pointer to next byte to XMIT */
+ int xleft; /* bytes left in XMIT queue */
+
+ /* X.25 interface statistics. */
+ struct net_device_stats stats;
+
+ int buffsize; /* Max buffers sizes */
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_INUSE 0 /* Channel in use */
+#define SLF_ESCAPE 1 /* ESC received */
+#define SLF_ERROR 2 /* Parity, etc. error */
+#define SLF_OUTWAIT 4 /* Waiting for output */
+};
+
+
+
+#define X25_ASY_MAGIC 0x5303
+
+extern int x25_asy_init(struct net_device *dev);
+
+#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
new file mode 100644
index 000000000000..caa48f12fd0f
--- /dev/null
+++ b/drivers/net/wan/z85230.c
@@ -0,0 +1,1851 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * (c) Copyright 2000, 2001 Red Hat Inc
+ *
+ * Development of this driver was funded by Equiinet Ltd
+ * http://www.equiinet.com
+ *
+ * ChangeLog:
+ *
+ * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
+ * unification of all the Z85x30 asynchronous drivers for real.
+ *
+ * DMA now uses get_free_page as kmalloc buffers may span a 64K
+ * boundary.
+ *
+ * Modified for SMP safety and SMP locking by Alan Cox <alan@redhat.com>
+ *
+ * Performance
+ *
+ * Z85230:
+ * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
+ * X.25 is not unrealistic on all machines. DMA mode can in theory
+ * handle T1/E1 quite nicely. In practice the limit seems to be about
+ * 512Kbit->1Mbit depending on motherboard.
+ *
+ * Z85C30:
+ * 64K will take DMA, 9600 baud X.25 should be ok.
+ *
+ * Z8530:
+ * Synchronous mode without DMA is unlikely to pass about 2400 baud.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#define RT_LOCK
+#define RT_UNLOCK
+#include <linux/spinlock.h>
+
+#include <net/syncppp.h>
+#include "z85230.h"
+
+
+/**
+ * z8530_read_port - Architecture specific interface function
+ * @p: port to read
+ *
+ * Provided port access methods. The Comtrol SV11 requires no delays
+ * between accesses and uses PC I/O. Some drivers may need a 5uS delay
+ *
+ * In the longer term this should become an architecture specific
+ * section so that this can become a generic driver interface for all
+ * platforms. For now we only handle PC I/O ports with or without the
+ * dread 5uS sanity delay.
+ *
+ * The caller must hold sufficient locks to avoid violating the horrible
+ * 5uS delay rule.
+ */
+
+static inline int z8530_read_port(unsigned long p)
+{
+ u8 r=inb(Z8530_PORT_OF(p));
+ if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
+ udelay(5);
+ return r;
+}
+
+/**
+ * z8530_write_port - Architecture specific interface function
+ * @p: port to write
+ * @d: value to write
+ *
+ * Write a value to a port with delays if need be. Note that the
+ * caller must hold locks to avoid read/writes from other contexts
+ * violating the 5uS rule
+ *
+ * In the longer term this should become an architecture specific
+ * section so that this can become a generic driver interface for all
+ * platforms. For now we only handle PC I/O ports with or without the
+ * dread 5uS sanity delay.
+ */
+
+
+static inline void z8530_write_port(unsigned long p, u8 d)
+{
+ outb(d,Z8530_PORT_OF(p));
+ if(p&Z8530_PORT_SLEEP)
+ udelay(5);
+}
+
+
+
+static void z8530_rx_done(struct z8530_channel *c);
+static void z8530_tx_done(struct z8530_channel *c);
+
+
+/**
+ * read_zsreg - Read a register from a Z85230
+ * @c: Z8530 channel to read from (2 per chip)
+ * @reg: Register to read
+ * FIXME: Use a spinlock.
+ *
+ * Most of the Z8530 registers are indexed off the control registers.
+ * A read is done by writing to the control register and reading the
+ * register back. The caller must hold the lock
+ */
+
+static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
+{
+ if(reg)
+ z8530_write_port(c->ctrlio, reg);
+ return z8530_read_port(c->ctrlio);
+}
+
+/**
+ * read_zsdata - Read the data port of a Z8530 channel
+ * @c: The Z8530 channel to read the data port from
+ *
+ * The data port provides fast access to some things. We still
+ * have all the 5uS delays to worry about.
+ */
+
+static inline u8 read_zsdata(struct z8530_channel *c)
+{
+ u8 r;
+ r=z8530_read_port(c->dataio);
+ return r;
+}
+
+/**
+ * write_zsreg - Write to a Z8530 channel register
+ * @c: The Z8530 channel
+ * @reg: Register number
+ * @val: Value to write
+ *
+ * Write a value to an indexed register. The caller must hold the lock
+ * to honour the irritating delay rules. We know about register 0
+ * being fast to access.
+ *
+ * Assumes c->lock is held.
+ */
+static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
+{
+ if(reg)
+ z8530_write_port(c->ctrlio, reg);
+ z8530_write_port(c->ctrlio, val);
+
+}
+
+/**
+ * write_zsctrl - Write to a Z8530 control register
+ * @c: The Z8530 channel
+ * @val: Value to write
+ *
+ * Write directly to the control register on the Z8530
+ */
+
+static inline void write_zsctrl(struct z8530_channel *c, u8 val)
+{
+ z8530_write_port(c->ctrlio, val);
+}
+
+/**
+ * write_zsdata - Write to a Z8530 control register
+ * @c: The Z8530 channel
+ * @val: Value to write
+ *
+ * Write directly to the data register on the Z8530
+ */
+
+
+static inline void write_zsdata(struct z8530_channel *c, u8 val)
+{
+ z8530_write_port(c->dataio, val);
+}
+
+/*
+ * Register loading parameters for a dead port
+ */
+
+u8 z8530_dead_port[]=
+{
+ 255
+};
+
+EXPORT_SYMBOL(z8530_dead_port);
+
+/*
+ * Register loading parameters for currently supported circuit types
+ */
+
+
+/*
+ * Data clocked by telco end. This is the correct data for the UK
+ * "kilostream" service, and most other similar services.
+ */
+
+u8 z8530_hdlc_kilostream[]=
+{
+ 4, SYNC_ENAB|SDLC|X1CLK,
+ 2, 0, /* No vector */
+ 1, 0,
+ 3, ENT_HM|RxCRC_ENAB|Rx8,
+ 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 9, 0, /* Disable interrupts */
+ 6, 0xFF,
+ 7, FLAG,
+ 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
+ 11, TCTRxCP,
+ 14, DISDPLL,
+ 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
+ 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
+ 9, NV|MIE|NORESET,
+ 255
+};
+
+EXPORT_SYMBOL(z8530_hdlc_kilostream);
+
+/*
+ * As above but for enhanced chips.
+ */
+
+u8 z8530_hdlc_kilostream_85230[]=
+{
+ 4, SYNC_ENAB|SDLC|X1CLK,
+ 2, 0, /* No vector */
+ 1, 0,
+ 3, ENT_HM|RxCRC_ENAB|Rx8,
+ 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 9, 0, /* Disable interrupts */
+ 6, 0xFF,
+ 7, FLAG,
+ 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
+ 11, TCTRxCP,
+ 14, DISDPLL,
+ 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
+ 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
+ 9, NV|MIE|NORESET,
+ 23, 3, /* Extended mode AUTO TX and EOM*/
+
+ 255
+};
+
+EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
+
+/**
+ * z8530_flush_fifo - Flush on chip RX FIFO
+ * @c: Channel to flush
+ *
+ * Flush the receive FIFO. There is no specific option for this, we
+ * blindly read bytes and discard them. Reading when there is no data
+ * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
+ *
+ * All locking is handled for the caller. On return data may still be
+ * present if it arrived during the flush.
+ */
+
+static void z8530_flush_fifo(struct z8530_channel *c)
+{
+ read_zsreg(c, R1);
+ read_zsreg(c, R1);
+ read_zsreg(c, R1);
+ read_zsreg(c, R1);
+ if(c->dev->type==Z85230)
+ {
+ read_zsreg(c, R1);
+ read_zsreg(c, R1);
+ read_zsreg(c, R1);
+ read_zsreg(c, R1);
+ }
+}
+
+/**
+ * z8530_rtsdtr - Control the outgoing DTS/RTS line
+ * @c: The Z8530 channel to control;
+ * @set: 1 to set, 0 to clear
+ *
+ * Sets or clears DTR/RTS on the requested line. All locking is handled
+ * by the caller. For now we assume all boards use the actual RTS/DTR
+ * on the chip. Apparently one or two don't. We'll scream about them
+ * later.
+ */
+
+static void z8530_rtsdtr(struct z8530_channel *c, int set)
+{
+ if (set)
+ c->regs[5] |= (RTS | DTR);
+ else
+ c->regs[5] &= ~(RTS | DTR);
+ write_zsreg(c, R5, c->regs[5]);
+}
+
+/**
+ * z8530_rx - Handle a PIO receive event
+ * @c: Z8530 channel to process
+ *
+ * Receive handler for receiving in PIO mode. This is much like the
+ * async one but not quite the same or as complex
+ *
+ * Note: Its intended that this handler can easily be separated from
+ * the main code to run realtime. That'll be needed for some machines
+ * (eg to ever clock 64kbits on a sparc ;)).
+ *
+ * The RT_LOCK macros don't do anything now. Keep the code covered
+ * by them as short as possible in all circumstances - clocks cost
+ * baud. The interrupt handler is assumed to be atomic w.r.t. to
+ * other code - this is true in the RT case too.
+ *
+ * We only cover the sync cases for this. If you want 2Mbit async
+ * do it yourself but consider medical assistance first. This non DMA
+ * synchronous mode is portable code. The DMA mode assumes PCI like
+ * ISA DMA
+ *
+ * Called with the device lock held
+ */
+
+static void z8530_rx(struct z8530_channel *c)
+{
+ u8 ch,stat;
+ spin_lock(c->lock);
+
+ while(1)
+ {
+ /* FIFO empty ? */
+ if(!(read_zsreg(c, R0)&1))
+ break;
+ ch=read_zsdata(c);
+ stat=read_zsreg(c, R1);
+
+ /*
+ * Overrun ?
+ */
+ if(c->count < c->max)
+ {
+ *c->dptr++=ch;
+ c->count++;
+ }
+
+ if(stat&END_FR)
+ {
+
+ /*
+ * Error ?
+ */
+ if(stat&(Rx_OVR|CRC_ERR))
+ {
+ /* Rewind the buffer and return */
+ if(c->skb)
+ c->dptr=c->skb->data;
+ c->count=0;
+ if(stat&Rx_OVR)
+ {
+ printk(KERN_WARNING "%s: overrun\n", c->dev->name);
+ c->rx_overrun++;
+ }
+ if(stat&CRC_ERR)
+ {
+ c->rx_crc_err++;
+ /* printk("crc error\n"); */
+ }
+ /* Shove the frame upstream */
+ }
+ else
+ {
+ /*
+ * Drop the lock for RX processing, or
+ * there are deadlocks
+ */
+ z8530_rx_done(c);
+ write_zsctrl(c, RES_Rx_CRC);
+ }
+ }
+ }
+ /*
+ * Clear irq
+ */
+ write_zsctrl(c, ERR_RES);
+ write_zsctrl(c, RES_H_IUS);
+ spin_unlock(c->lock);
+}
+
+
+/**
+ * z8530_tx - Handle a PIO transmit event
+ * @c: Z8530 channel to process
+ *
+ * Z8530 transmit interrupt handler for the PIO mode. The basic
+ * idea is to attempt to keep the FIFO fed. We fill as many bytes
+ * in as possible, its quite possible that we won't keep up with the
+ * data rate otherwise.
+ */
+
+static void z8530_tx(struct z8530_channel *c)
+{
+ spin_lock(c->lock);
+ while(c->txcount) {
+ /* FIFO full ? */
+ if(!(read_zsreg(c, R0)&4))
+ break;
+ c->txcount--;
+ /*
+ * Shovel out the byte
+ */
+ write_zsreg(c, R8, *c->tx_ptr++);
+ write_zsctrl(c, RES_H_IUS);
+ /* We are about to underflow */
+ if(c->txcount==0)
+ {
+ write_zsctrl(c, RES_EOM_L);
+ write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ }
+ }
+
+
+ /*
+ * End of frame TX - fire another one
+ */
+
+ write_zsctrl(c, RES_Tx_P);
+
+ z8530_tx_done(c);
+ write_zsctrl(c, RES_H_IUS);
+ spin_unlock(c->lock);
+}
+
+/**
+ * z8530_status - Handle a PIO status exception
+ * @chan: Z8530 channel to process
+ *
+ * A status event occurred in PIO synchronous mode. There are several
+ * reasons the chip will bother us here. A transmit underrun means we
+ * failed to feed the chip fast enough and just broke a packet. A DCD
+ * change is a line up or down. We communicate that back to the protocol
+ * layer for synchronous PPP to renegotiate.
+ */
+
+static void z8530_status(struct z8530_channel *chan)
+{
+ u8 status, altered;
+
+ spin_lock(chan->lock);
+ status=read_zsreg(chan, R0);
+ altered=chan->status^status;
+
+ chan->status=status;
+
+ if(status&TxEOM)
+ {
+/* printk("%s: Tx underrun.\n", chan->dev->name); */
+ chan->stats.tx_fifo_errors++;
+ write_zsctrl(chan, ERR_RES);
+ z8530_tx_done(chan);
+ }
+
+ if(altered&chan->dcdcheck)
+ {
+ if(status&chan->dcdcheck)
+ {
+ printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
+ write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
+ if(chan->netdevice &&
+ ((chan->netdevice->type == ARPHRD_HDLC) ||
+ (chan->netdevice->type == ARPHRD_PPP)))
+ sppp_reopen(chan->netdevice);
+ }
+ else
+ {
+ printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
+ write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
+ z8530_flush_fifo(chan);
+ }
+
+ }
+ write_zsctrl(chan, RES_EXT_INT);
+ write_zsctrl(chan, RES_H_IUS);
+ spin_unlock(chan->lock);
+}
+
+struct z8530_irqhandler z8530_sync=
+{
+ z8530_rx,
+ z8530_tx,
+ z8530_status
+};
+
+EXPORT_SYMBOL(z8530_sync);
+
+/**
+ * z8530_dma_rx - Handle a DMA RX event
+ * @chan: Channel to handle
+ *
+ * Non bus mastering DMA interfaces for the Z8x30 devices. This
+ * is really pretty PC specific. The DMA mode means that most receive
+ * events are handled by the DMA hardware. We get a kick here only if
+ * a frame ended.
+ */
+
+static void z8530_dma_rx(struct z8530_channel *chan)
+{
+ spin_lock(chan->lock);
+ if(chan->rxdma_on)
+ {
+ /* Special condition check only */
+ u8 status;
+
+ read_zsreg(chan, R7);
+ read_zsreg(chan, R6);
+
+ status=read_zsreg(chan, R1);
+
+ if(status&END_FR)
+ {
+ z8530_rx_done(chan); /* Fire up the next one */
+ }
+ write_zsctrl(chan, ERR_RES);
+ write_zsctrl(chan, RES_H_IUS);
+ }
+ else
+ {
+ /* DMA is off right now, drain the slow way */
+ z8530_rx(chan);
+ }
+ spin_unlock(chan->lock);
+}
+
+/**
+ * z8530_dma_tx - Handle a DMA TX event
+ * @chan: The Z8530 channel to handle
+ *
+ * We have received an interrupt while doing DMA transmissions. It
+ * shouldn't happen. Scream loudly if it does.
+ */
+
+static void z8530_dma_tx(struct z8530_channel *chan)
+{
+ spin_lock(chan->lock);
+ if(!chan->dma_tx)
+ {
+ printk(KERN_WARNING "Hey who turned the DMA off?\n");
+ z8530_tx(chan);
+ return;
+ }
+ /* This shouldnt occur in DMA mode */
+ printk(KERN_ERR "DMA tx - bogus event!\n");
+ z8530_tx(chan);
+ spin_unlock(chan->lock);
+}
+
+/**
+ * z8530_dma_status - Handle a DMA status exception
+ * @chan: Z8530 channel to process
+ *
+ * A status event occurred on the Z8530. We receive these for two reasons
+ * when in DMA mode. Firstly if we finished a packet transfer we get one
+ * and kick the next packet out. Secondly we may see a DCD change and
+ * have to poke the protocol layer.
+ *
+ */
+
+static void z8530_dma_status(struct z8530_channel *chan)
+{
+ u8 status, altered;
+
+ status=read_zsreg(chan, R0);
+ altered=chan->status^status;
+
+ chan->status=status;
+
+
+ if(chan->dma_tx)
+ {
+ if(status&TxEOM)
+ {
+ unsigned long flags;
+
+ flags=claim_dma_lock();
+ disable_dma(chan->txdma);
+ clear_dma_ff(chan->txdma);
+ chan->txdma_on=0;
+ release_dma_lock(flags);
+ z8530_tx_done(chan);
+ }
+ }
+
+ spin_lock(chan->lock);
+ if(altered&chan->dcdcheck)
+ {
+ if(status&chan->dcdcheck)
+ {
+ printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
+ write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
+ if(chan->netdevice &&
+ ((chan->netdevice->type == ARPHRD_HDLC) ||
+ (chan->netdevice->type == ARPHRD_PPP)))
+ sppp_reopen(chan->netdevice);
+ }
+ else
+ {
+ printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
+ write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
+ z8530_flush_fifo(chan);
+ }
+ }
+
+ write_zsctrl(chan, RES_EXT_INT);
+ write_zsctrl(chan, RES_H_IUS);
+ spin_unlock(chan->lock);
+}
+
+struct z8530_irqhandler z8530_dma_sync=
+{
+ z8530_dma_rx,
+ z8530_dma_tx,
+ z8530_dma_status
+};
+
+EXPORT_SYMBOL(z8530_dma_sync);
+
+struct z8530_irqhandler z8530_txdma_sync=
+{
+ z8530_rx,
+ z8530_dma_tx,
+ z8530_dma_status
+};
+
+EXPORT_SYMBOL(z8530_txdma_sync);
+
+/**
+ * z8530_rx_clear - Handle RX events from a stopped chip
+ * @c: Z8530 channel to shut up
+ *
+ * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
+ * For machines with PCI Z85x30 cards, or level triggered interrupts
+ * (eg the MacII) we must clear the interrupt cause or die.
+ */
+
+
+static void z8530_rx_clear(struct z8530_channel *c)
+{
+ /*
+ * Data and status bytes
+ */
+ u8 stat;
+
+ read_zsdata(c);
+ stat=read_zsreg(c, R1);
+
+ if(stat&END_FR)
+ write_zsctrl(c, RES_Rx_CRC);
+ /*
+ * Clear irq
+ */
+ write_zsctrl(c, ERR_RES);
+ write_zsctrl(c, RES_H_IUS);
+}
+
+/**
+ * z8530_tx_clear - Handle TX events from a stopped chip
+ * @c: Z8530 channel to shut up
+ *
+ * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
+ * For machines with PCI Z85x30 cards, or level triggered interrupts
+ * (eg the MacII) we must clear the interrupt cause or die.
+ */
+
+static void z8530_tx_clear(struct z8530_channel *c)
+{
+ write_zsctrl(c, RES_Tx_P);
+ write_zsctrl(c, RES_H_IUS);
+}
+
+/**
+ * z8530_status_clear - Handle status events from a stopped chip
+ * @chan: Z8530 channel to shut up
+ *
+ * Status interrupt vectors for a Z8530 that is in 'parked' mode.
+ * For machines with PCI Z85x30 cards, or level triggered interrupts
+ * (eg the MacII) we must clear the interrupt cause or die.
+ */
+
+static void z8530_status_clear(struct z8530_channel *chan)
+{
+ u8 status=read_zsreg(chan, R0);
+ if(status&TxEOM)
+ write_zsctrl(chan, ERR_RES);
+ write_zsctrl(chan, RES_EXT_INT);
+ write_zsctrl(chan, RES_H_IUS);
+}
+
+struct z8530_irqhandler z8530_nop=
+{
+ z8530_rx_clear,
+ z8530_tx_clear,
+ z8530_status_clear
+};
+
+
+EXPORT_SYMBOL(z8530_nop);
+
+/**
+ * z8530_interrupt - Handle an interrupt from a Z8530
+ * @irq: Interrupt number
+ * @dev_id: The Z8530 device that is interrupting.
+ * @regs: unused
+ *
+ * A Z85[2]30 device has stuck its hand in the air for attention.
+ * We scan both the channels on the chip for events and then call
+ * the channel specific call backs for each channel that has events.
+ * We have to use callback functions because the two channels can be
+ * in different modes.
+ *
+ * Locking is done for the handlers. Note that locking is done
+ * at the chip level (the 5uS delay issue is per chip not per
+ * channel). c->lock for both channels points to dev->lock
+ */
+
+irqreturn_t z8530_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct z8530_dev *dev=dev_id;
+ u8 intr;
+ static volatile int locker=0;
+ int work=0;
+ struct z8530_irqhandler *irqs;
+
+ if(locker)
+ {
+ printk(KERN_ERR "IRQ re-enter\n");
+ return IRQ_NONE;
+ }
+ locker=1;
+
+ spin_lock(&dev->lock);
+
+ while(++work<5000)
+ {
+
+ intr = read_zsreg(&dev->chanA, R3);
+ if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
+ break;
+
+ /* This holds the IRQ status. On the 8530 you must read it from chan
+ A even though it applies to the whole chip */
+
+ /* Now walk the chip and see what it is wanting - it may be
+ an IRQ for someone else remember */
+
+ irqs=dev->chanA.irqs;
+
+ if(intr & (CHARxIP|CHATxIP|CHAEXT))
+ {
+ if(intr&CHARxIP)
+ irqs->rx(&dev->chanA);
+ if(intr&CHATxIP)
+ irqs->tx(&dev->chanA);
+ if(intr&CHAEXT)
+ irqs->status(&dev->chanA);
+ }
+
+ irqs=dev->chanB.irqs;
+
+ if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
+ {
+ if(intr&CHBRxIP)
+ irqs->rx(&dev->chanB);
+ if(intr&CHBTxIP)
+ irqs->tx(&dev->chanB);
+ if(intr&CHBEXT)
+ irqs->status(&dev->chanB);
+ }
+ }
+ spin_unlock(&dev->lock);
+ if(work==5000)
+ printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
+ /* Ok all done */
+ locker=0;
+ return IRQ_HANDLED;
+}
+
+EXPORT_SYMBOL(z8530_interrupt);
+
+static char reg_init[16]=
+{
+ 0,0,0,0,
+ 0,0,0,0,
+ 0,0,0,0,
+ 0x55,0,0,0
+};
+
+
+/**
+ * z8530_sync_open - Open a Z8530 channel for PIO
+ * @dev: The network interface we are using
+ * @c: The Z8530 channel to open in synchronous PIO mode
+ *
+ * Switch a Z8530 into synchronous mode without DMA assist. We
+ * raise the RTS/DTR and commence network operation.
+ */
+
+int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(c->lock, flags);
+
+ c->sync = 1;
+ c->mtu = dev->mtu+64;
+ c->count = 0;
+ c->skb = NULL;
+ c->skb2 = NULL;
+ c->irqs = &z8530_sync;
+
+ /* This loads the double buffer up */
+ z8530_rx_done(c); /* Load the frame ring */
+ z8530_rx_done(c); /* Load the backup frame */
+ z8530_rtsdtr(c,1);
+ c->dma_tx = 0;
+ c->regs[R1]|=TxINT_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+
+ spin_unlock_irqrestore(c->lock, flags);
+ return 0;
+}
+
+
+EXPORT_SYMBOL(z8530_sync_open);
+
+/**
+ * z8530_sync_close - Close a PIO Z8530 channel
+ * @dev: Network device to close
+ * @c: Z8530 channel to disassociate and move to idle
+ *
+ * Close down a Z8530 interface and switch its interrupt handlers
+ * to discard future events.
+ */
+
+int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
+{
+ u8 chk;
+ unsigned long flags;
+
+ spin_lock_irqsave(c->lock, flags);
+ c->irqs = &z8530_nop;
+ c->max = 0;
+ c->sync = 0;
+
+ chk=read_zsreg(c,R0);
+ write_zsreg(c, R3, c->regs[R3]);
+ z8530_rtsdtr(c,0);
+
+ spin_unlock_irqrestore(c->lock, flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_close);
+
+/**
+ * z8530_sync_dma_open - Open a Z8530 for DMA I/O
+ * @dev: The network device to attach
+ * @c: The Z8530 channel to configure in sync DMA mode.
+ *
+ * Set up a Z85x30 device for synchronous DMA in both directions. Two
+ * ISA DMA channels must be available for this to work. We assume ISA
+ * DMA driven I/O and PC limits on access.
+ */
+
+int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
+{
+ unsigned long cflags, dflags;
+
+ c->sync = 1;
+ c->mtu = dev->mtu+64;
+ c->count = 0;
+ c->skb = NULL;
+ c->skb2 = NULL;
+ /*
+ * Load the DMA interfaces up
+ */
+ c->rxdma_on = 0;
+ c->txdma_on = 0;
+
+ /*
+ * Allocate the DMA flip buffers. Limit by page size.
+ * Everyone runs 1500 mtu or less on wan links so this
+ * should be fine.
+ */
+
+ if(c->mtu > PAGE_SIZE/2)
+ return -EMSGSIZE;
+
+ c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+ if(c->rx_buf[0]==NULL)
+ return -ENOBUFS;
+ c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
+
+ c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+ if(c->tx_dma_buf[0]==NULL)
+ {
+ free_page((unsigned long)c->rx_buf[0]);
+ c->rx_buf[0]=NULL;
+ return -ENOBUFS;
+ }
+ c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
+
+ c->tx_dma_used=0;
+ c->dma_tx = 1;
+ c->dma_num=0;
+ c->dma_ready=1;
+
+ /*
+ * Enable DMA control mode
+ */
+
+ spin_lock_irqsave(c->lock, cflags);
+
+ /*
+ * TX DMA via DIR/REQ
+ */
+
+ c->regs[R14]|= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1]&= ~TxINT_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+
+ /*
+ * RX DMA via W/Req
+ */
+
+ c->regs[R1]|= WT_FN_RDYFN;
+ c->regs[R1]|= WT_RDY_RT;
+ c->regs[R1]|= INT_ERR_Rx;
+ c->regs[R1]&= ~TxINT_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1]|= WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+
+ /*
+ * DMA interrupts
+ */
+
+ /*
+ * Set up the DMA configuration
+ */
+
+ dflags=claim_dma_lock();
+
+ disable_dma(c->rxdma);
+ clear_dma_ff(c->rxdma);
+ set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+ set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
+ set_dma_count(c->rxdma, c->mtu);
+ enable_dma(c->rxdma);
+
+ disable_dma(c->txdma);
+ clear_dma_ff(c->txdma);
+ set_dma_mode(c->txdma, DMA_MODE_WRITE);
+ disable_dma(c->txdma);
+
+ release_dma_lock(dflags);
+
+ /*
+ * Select the DMA interrupt handlers
+ */
+
+ c->rxdma_on = 1;
+ c->txdma_on = 1;
+ c->tx_dma_used = 1;
+
+ c->irqs = &z8530_dma_sync;
+ z8530_rtsdtr(c,1);
+ write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+
+ spin_unlock_irqrestore(c->lock, cflags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_dma_open);
+
+/**
+ * z8530_sync_dma_close - Close down DMA I/O
+ * @dev: Network device to detach
+ * @c: Z8530 channel to move into discard mode
+ *
+ * Shut down a DMA mode synchronous interface. Halt the DMA, and
+ * free the buffers.
+ */
+
+int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
+{
+ u8 chk;
+ unsigned long flags;
+
+ c->irqs = &z8530_nop;
+ c->max = 0;
+ c->sync = 0;
+
+ /*
+ * Disable the PC DMA channels
+ */
+
+ flags=claim_dma_lock();
+ disable_dma(c->rxdma);
+ clear_dma_ff(c->rxdma);
+
+ c->rxdma_on = 0;
+
+ disable_dma(c->txdma);
+ clear_dma_ff(c->txdma);
+ release_dma_lock(flags);
+
+ c->txdma_on = 0;
+ c->tx_dma_used = 0;
+
+ spin_lock_irqsave(c->lock, flags);
+
+ /*
+ * Disable DMA control mode
+ */
+
+ c->regs[R1]&= ~WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
+ c->regs[R1]|= INT_ALL_Rx;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R14]&= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if(c->rx_buf[0])
+ {
+ free_page((unsigned long)c->rx_buf[0]);
+ c->rx_buf[0]=NULL;
+ }
+ if(c->tx_dma_buf[0])
+ {
+ free_page((unsigned long)c->tx_dma_buf[0]);
+ c->tx_dma_buf[0]=NULL;
+ }
+ chk=read_zsreg(c,R0);
+ write_zsreg(c, R3, c->regs[R3]);
+ z8530_rtsdtr(c,0);
+
+ spin_unlock_irqrestore(c->lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_dma_close);
+
+/**
+ * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
+ * @dev: The network device to attach
+ * @c: The Z8530 channel to configure in sync DMA mode.
+ *
+ * Set up a Z85x30 device for synchronous DMA tranmission. One
+ * ISA DMA channel must be available for this to work. The receive
+ * side is run in PIO mode, but then it has the bigger FIFO.
+ */
+
+int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
+{
+ unsigned long cflags, dflags;
+
+ printk("Opening sync interface for TX-DMA\n");
+ c->sync = 1;
+ c->mtu = dev->mtu+64;
+ c->count = 0;
+ c->skb = NULL;
+ c->skb2 = NULL;
+
+ /*
+ * Allocate the DMA flip buffers. Limit by page size.
+ * Everyone runs 1500 mtu or less on wan links so this
+ * should be fine.
+ */
+
+ if(c->mtu > PAGE_SIZE/2)
+ return -EMSGSIZE;
+
+ c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+ if(c->tx_dma_buf[0]==NULL)
+ return -ENOBUFS;
+
+ c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
+
+
+ spin_lock_irqsave(c->lock, cflags);
+
+ /*
+ * Load the PIO receive ring
+ */
+
+ z8530_rx_done(c);
+ z8530_rx_done(c);
+
+ /*
+ * Load the DMA interfaces up
+ */
+
+ c->rxdma_on = 0;
+ c->txdma_on = 0;
+
+ c->tx_dma_used=0;
+ c->dma_num=0;
+ c->dma_ready=1;
+ c->dma_tx = 1;
+
+ /*
+ * Enable DMA control mode
+ */
+
+ /*
+ * TX DMA via DIR/REQ
+ */
+ c->regs[R14]|= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1]&= ~TxINT_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+
+ /*
+ * Set up the DMA configuration
+ */
+
+ dflags = claim_dma_lock();
+
+ disable_dma(c->txdma);
+ clear_dma_ff(c->txdma);
+ set_dma_mode(c->txdma, DMA_MODE_WRITE);
+ disable_dma(c->txdma);
+
+ release_dma_lock(dflags);
+
+ /*
+ * Select the DMA interrupt handlers
+ */
+
+ c->rxdma_on = 0;
+ c->txdma_on = 1;
+ c->tx_dma_used = 1;
+
+ c->irqs = &z8530_txdma_sync;
+ z8530_rtsdtr(c,1);
+ write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ spin_unlock_irqrestore(c->lock, cflags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_txdma_open);
+
+/**
+ * z8530_sync_txdma_close - Close down a TX driven DMA channel
+ * @dev: Network device to detach
+ * @c: Z8530 channel to move into discard mode
+ *
+ * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
+ * and free the buffers.
+ */
+
+int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
+{
+ unsigned long dflags, cflags;
+ u8 chk;
+
+
+ spin_lock_irqsave(c->lock, cflags);
+
+ c->irqs = &z8530_nop;
+ c->max = 0;
+ c->sync = 0;
+
+ /*
+ * Disable the PC DMA channels
+ */
+
+ dflags = claim_dma_lock();
+
+ disable_dma(c->txdma);
+ clear_dma_ff(c->txdma);
+ c->txdma_on = 0;
+ c->tx_dma_used = 0;
+
+ release_dma_lock(dflags);
+
+ /*
+ * Disable DMA control mode
+ */
+
+ c->regs[R1]&= ~WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
+ c->regs[R1]|= INT_ALL_Rx;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R14]&= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if(c->tx_dma_buf[0])
+ {
+ free_page((unsigned long)c->tx_dma_buf[0]);
+ c->tx_dma_buf[0]=NULL;
+ }
+ chk=read_zsreg(c,R0);
+ write_zsreg(c, R3, c->regs[R3]);
+ z8530_rtsdtr(c,0);
+
+ spin_unlock_irqrestore(c->lock, cflags);
+ return 0;
+}
+
+
+EXPORT_SYMBOL(z8530_sync_txdma_close);
+
+
+/*
+ * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
+ * it exists...
+ */
+
+static char *z8530_type_name[]={
+ "Z8530",
+ "Z85C30",
+ "Z85230"
+};
+
+/**
+ * z8530_describe - Uniformly describe a Z8530 port
+ * @dev: Z8530 device to describe
+ * @mapping: string holding mapping type (eg "I/O" or "Mem")
+ * @io: the port value in question
+ *
+ * Describe a Z8530 in a standard format. We must pass the I/O as
+ * the port offset isnt predictable. The main reason for this function
+ * is to try and get a common format of report.
+ */
+
+void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
+{
+ printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
+ dev->name,
+ z8530_type_name[dev->type],
+ mapping,
+ Z8530_PORT_OF(io),
+ dev->irq);
+}
+
+EXPORT_SYMBOL(z8530_describe);
+
+/*
+ * Locked operation part of the z8530 init code
+ */
+
+static inline int do_z8530_init(struct z8530_dev *dev)
+{
+ /* NOP the interrupt handlers first - we might get a
+ floating IRQ transition when we reset the chip */
+ dev->chanA.irqs=&z8530_nop;
+ dev->chanB.irqs=&z8530_nop;
+ dev->chanA.dcdcheck=DCD;
+ dev->chanB.dcdcheck=DCD;
+
+ /* Reset the chip */
+ write_zsreg(&dev->chanA, R9, 0xC0);
+ udelay(200);
+ /* Now check its valid */
+ write_zsreg(&dev->chanA, R12, 0xAA);
+ if(read_zsreg(&dev->chanA, R12)!=0xAA)
+ return -ENODEV;
+ write_zsreg(&dev->chanA, R12, 0x55);
+ if(read_zsreg(&dev->chanA, R12)!=0x55)
+ return -ENODEV;
+
+ dev->type=Z8530;
+
+ /*
+ * See the application note.
+ */
+
+ write_zsreg(&dev->chanA, R15, 0x01);
+
+ /*
+ * If we can set the low bit of R15 then
+ * the chip is enhanced.
+ */
+
+ if(read_zsreg(&dev->chanA, R15)==0x01)
+ {
+ /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
+ /* Put a char in the fifo */
+ write_zsreg(&dev->chanA, R8, 0);
+ if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
+ dev->type = Z85230; /* Has a FIFO */
+ else
+ dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
+ }
+
+ /*
+ * The code assumes R7' and friends are
+ * off. Use write_zsext() for these and keep
+ * this bit clear.
+ */
+
+ write_zsreg(&dev->chanA, R15, 0);
+
+ /*
+ * At this point it looks like the chip is behaving
+ */
+
+ memcpy(dev->chanA.regs, reg_init, 16);
+ memcpy(dev->chanB.regs, reg_init ,16);
+
+ return 0;
+}
+
+/**
+ * z8530_init - Initialise a Z8530 device
+ * @dev: Z8530 device to initialise.
+ *
+ * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
+ * is present, identify the type and then program it to hopefully
+ * keep quite and behave. This matters a lot, a Z8530 in the wrong
+ * state will sometimes get into stupid modes generating 10Khz
+ * interrupt streams and the like.
+ *
+ * We set the interrupt handler up to discard any events, in case
+ * we get them during reset or setp.
+ *
+ * Return 0 for success, or a negative value indicating the problem
+ * in errno form.
+ */
+
+int z8530_init(struct z8530_dev *dev)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Set up the chip level lock */
+ spin_lock_init(&dev->lock);
+ dev->chanA.lock = &dev->lock;
+ dev->chanB.lock = &dev->lock;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = do_z8530_init(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return ret;
+}
+
+
+EXPORT_SYMBOL(z8530_init);
+
+/**
+ * z8530_shutdown - Shutdown a Z8530 device
+ * @dev: The Z8530 chip to shutdown
+ *
+ * We set the interrupt handlers to silence any interrupts. We then
+ * reset the chip and wait 100uS to be sure the reset completed. Just
+ * in case the caller then tries to do stuff.
+ *
+ * This is called without the lock held
+ */
+
+int z8530_shutdown(struct z8530_dev *dev)
+{
+ unsigned long flags;
+ /* Reset the chip */
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->chanA.irqs=&z8530_nop;
+ dev->chanB.irqs=&z8530_nop;
+ write_zsreg(&dev->chanA, R9, 0xC0);
+ /* We must lock the udelay, the chip is offlimits here */
+ udelay(100);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(z8530_shutdown);
+
+/**
+ * z8530_channel_load - Load channel data
+ * @c: Z8530 channel to configure
+ * @rtable: table of register, value pairs
+ * FIXME: ioctl to allow user uploaded tables
+ *
+ * Load a Z8530 channel up from the system data. We use +16 to
+ * indicate the "prime" registers. The value 255 terminates the
+ * table.
+ */
+
+int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(c->lock, flags);
+
+ while(*rtable!=255)
+ {
+ int reg=*rtable++;
+ if(reg>0x0F)
+ write_zsreg(c, R15, c->regs[15]|1);
+ write_zsreg(c, reg&0x0F, *rtable);
+ if(reg>0x0F)
+ write_zsreg(c, R15, c->regs[15]&~1);
+ c->regs[reg]=*rtable++;
+ }
+ c->rx_function=z8530_null_rx;
+ c->skb=NULL;
+ c->tx_skb=NULL;
+ c->tx_next_skb=NULL;
+ c->mtu=1500;
+ c->max=0;
+ c->count=0;
+ c->status=read_zsreg(c, R0);
+ c->sync=1;
+ write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+
+ spin_unlock_irqrestore(c->lock, flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(z8530_channel_load);
+
+
+/**
+ * z8530_tx_begin - Begin packet transmission
+ * @c: The Z8530 channel to kick
+ *
+ * This is the speed sensitive side of transmission. If we are called
+ * and no buffer is being transmitted we commence the next buffer. If
+ * nothing is queued we idle the sync.
+ *
+ * Note: We are handling this code path in the interrupt path, keep it
+ * fast or bad things will happen.
+ *
+ * Called with the lock held.
+ */
+
+static void z8530_tx_begin(struct z8530_channel *c)
+{
+ unsigned long flags;
+ if(c->tx_skb)
+ return;
+
+ c->tx_skb=c->tx_next_skb;
+ c->tx_next_skb=NULL;
+ c->tx_ptr=c->tx_next_ptr;
+
+ if(c->tx_skb==NULL)
+ {
+ /* Idle on */
+ if(c->dma_tx)
+ {
+ flags=claim_dma_lock();
+ disable_dma(c->txdma);
+ /*
+ * Check if we crapped out.
+ */
+ if(get_dma_residue(c->txdma))
+ {
+ c->stats.tx_dropped++;
+ c->stats.tx_fifo_errors++;
+ }
+ release_dma_lock(flags);
+ }
+ c->txcount=0;
+ }
+ else
+ {
+ c->txcount=c->tx_skb->len;
+
+
+ if(c->dma_tx)
+ {
+ /*
+ * FIXME. DMA is broken for the original 8530,
+ * on the older parts we need to set a flag and
+ * wait for a further TX interrupt to fire this
+ * stage off
+ */
+
+ flags=claim_dma_lock();
+ disable_dma(c->txdma);
+
+ /*
+ * These two are needed by the 8530/85C30
+ * and must be issued when idling.
+ */
+
+ if(c->dev->type!=Z85230)
+ {
+ write_zsctrl(c, RES_Tx_CRC);
+ write_zsctrl(c, RES_EOM_L);
+ }
+ write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ clear_dma_ff(c->txdma);
+ set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
+ set_dma_count(c->txdma, c->txcount);
+ enable_dma(c->txdma);
+ release_dma_lock(flags);
+ write_zsctrl(c, RES_EOM_L);
+ write_zsreg(c, R5, c->regs[R5]|TxENAB);
+ }
+ else
+ {
+
+ /* ABUNDER off */
+ write_zsreg(c, R10, c->regs[10]);
+ write_zsctrl(c, RES_Tx_CRC);
+
+ while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
+ {
+ write_zsreg(c, R8, *c->tx_ptr++);
+ c->txcount--;
+ }
+
+ }
+ }
+ /*
+ * Since we emptied tx_skb we can ask for more
+ */
+ netif_wake_queue(c->netdevice);
+}
+
+/**
+ * z8530_tx_done - TX complete callback
+ * @c: The channel that completed a transmit.
+ *
+ * This is called when we complete a packet send. We wake the queue,
+ * start the next packet going and then free the buffer of the existing
+ * packet. This code is fairly timing sensitive.
+ *
+ * Called with the register lock held.
+ */
+
+static void z8530_tx_done(struct z8530_channel *c)
+{
+ struct sk_buff *skb;
+
+ /* Actually this can happen.*/
+ if(c->tx_skb==NULL)
+ return;
+
+ skb=c->tx_skb;
+ c->tx_skb=NULL;
+ z8530_tx_begin(c);
+ c->stats.tx_packets++;
+ c->stats.tx_bytes+=skb->len;
+ dev_kfree_skb_irq(skb);
+}
+
+/**
+ * z8530_null_rx - Discard a packet
+ * @c: The channel the packet arrived on
+ * @skb: The buffer
+ *
+ * We point the receive handler at this function when idle. Instead
+ * of syncppp processing the frames we get to throw them away.
+ */
+
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+EXPORT_SYMBOL(z8530_null_rx);
+
+/**
+ * z8530_rx_done - Receive completion callback
+ * @c: The channel that completed a receive
+ *
+ * A new packet is complete. Our goal here is to get back into receive
+ * mode as fast as possible. On the Z85230 we could change to using
+ * ESCC mode, but on the older chips we have no choice. We flip to the
+ * new buffer immediately in DMA mode so that the DMA of the next
+ * frame can occur while we are copying the previous buffer to an sk_buff
+ *
+ * Called with the lock held
+ */
+
+static void z8530_rx_done(struct z8530_channel *c)
+{
+ struct sk_buff *skb;
+ int ct;
+
+ /*
+ * Is our receive engine in DMA mode
+ */
+
+ if(c->rxdma_on)
+ {
+ /*
+ * Save the ready state and the buffer currently
+ * being used as the DMA target
+ */
+
+ int ready=c->dma_ready;
+ unsigned char *rxb=c->rx_buf[c->dma_num];
+ unsigned long flags;
+
+ /*
+ * Complete this DMA. Neccessary to find the length
+ */
+
+ flags=claim_dma_lock();
+
+ disable_dma(c->rxdma);
+ clear_dma_ff(c->rxdma);
+ c->rxdma_on=0;
+ ct=c->mtu-get_dma_residue(c->rxdma);
+ if(ct<0)
+ ct=2; /* Shit happens.. */
+ c->dma_ready=0;
+
+ /*
+ * Normal case: the other slot is free, start the next DMA
+ * into it immediately.
+ */
+
+ if(ready)
+ {
+ c->dma_num^=1;
+ set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+ set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
+ set_dma_count(c->rxdma, c->mtu);
+ c->rxdma_on = 1;
+ enable_dma(c->rxdma);
+ /* Stop any frames that we missed the head of
+ from passing */
+ write_zsreg(c, R0, RES_Rx_CRC);
+ }
+ else
+ /* Can't occur as we dont reenable the DMA irq until
+ after the flip is done */
+ printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name);
+
+ release_dma_lock(flags);
+
+ /*
+ * Shove the old buffer into an sk_buff. We can't DMA
+ * directly into one on a PC - it might be above the 16Mb
+ * boundary. Optimisation - we could check to see if we
+ * can avoid the copy. Optimisation 2 - make the memcpy
+ * a copychecksum.
+ */
+
+ skb=dev_alloc_skb(ct);
+ if(skb==NULL)
+ {
+ c->stats.rx_dropped++;
+ printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);
+ }
+ else
+ {
+ skb_put(skb, ct);
+ memcpy(skb->data, rxb, ct);
+ c->stats.rx_packets++;
+ c->stats.rx_bytes+=ct;
+ }
+ c->dma_ready=1;
+ }
+ else
+ {
+ RT_LOCK;
+ skb=c->skb;
+
+ /*
+ * The game we play for non DMA is similar. We want to
+ * get the controller set up for the next packet as fast
+ * as possible. We potentially only have one byte + the
+ * fifo length for this. Thus we want to flip to the new
+ * buffer and then mess around copying and allocating
+ * things. For the current case it doesn't matter but
+ * if you build a system where the sync irq isnt blocked
+ * by the kernel IRQ disable then you need only block the
+ * sync IRQ for the RT_LOCK area.
+ *
+ */
+ ct=c->count;
+
+ c->skb = c->skb2;
+ c->count = 0;
+ c->max = c->mtu;
+ if(c->skb)
+ {
+ c->dptr = c->skb->data;
+ c->max = c->mtu;
+ }
+ else
+ {
+ c->count= 0;
+ c->max = 0;
+ }
+ RT_UNLOCK;
+
+ c->skb2 = dev_alloc_skb(c->mtu);
+ if(c->skb2==NULL)
+ printk(KERN_WARNING "%s: memory squeeze.\n",
+ c->netdevice->name);
+ else
+ {
+ skb_put(c->skb2,c->mtu);
+ }
+ c->stats.rx_packets++;
+ c->stats.rx_bytes+=ct;
+
+ }
+ /*
+ * If we received a frame we must now process it.
+ */
+ if(skb)
+ {
+ skb_trim(skb, ct);
+ c->rx_function(c,skb);
+ }
+ else
+ {
+ c->stats.rx_dropped++;
+ printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
+ }
+}
+
+/**
+ * spans_boundary - Check a packet can be ISA DMA'd
+ * @skb: The buffer to check
+ *
+ * Returns true if the buffer cross a DMA boundary on a PC. The poor
+ * thing can only DMA within a 64K block not across the edges of it.
+ */
+
+static inline int spans_boundary(struct sk_buff *skb)
+{
+ unsigned long a=(unsigned long)skb->data;
+ a^=(a+skb->len);
+ if(a&0x00010000) /* If the 64K bit is different.. */
+ return 1;
+ return 0;
+}
+
+/**
+ * z8530_queue_xmit - Queue a packet
+ * @c: The channel to use
+ * @skb: The packet to kick down the channel
+ *
+ * Queue a packet for transmission. Because we have rather
+ * hard to hit interrupt latencies for the Z85230 per packet
+ * even in DMA mode we do the flip to DMA buffer if needed here
+ * not in the IRQ.
+ *
+ * Called from the network code. The lock is not held at this
+ * point.
+ */
+
+int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ netif_stop_queue(c->netdevice);
+ if(c->tx_next_skb)
+ {
+ return 1;
+ }
+
+ /* PC SPECIFIC - DMA limits */
+
+ /*
+ * If we will DMA the transmit and its gone over the ISA bus
+ * limit, then copy to the flip buffer
+ */
+
+ if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
+ {
+ /*
+ * Send the flip buffer, and flip the flippy bit.
+ * We don't care which is used when just so long as
+ * we never use the same buffer twice in a row. Since
+ * only one buffer can be going out at a time the other
+ * has to be safe.
+ */
+ c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
+ c->tx_dma_used^=1; /* Flip temp buffer */
+ memcpy(c->tx_next_ptr, skb->data, skb->len);
+ }
+ else
+ c->tx_next_ptr=skb->data;
+ RT_LOCK;
+ c->tx_next_skb=skb;
+ RT_UNLOCK;
+
+ spin_lock_irqsave(c->lock, flags);
+ z8530_tx_begin(c);
+ spin_unlock_irqrestore(c->lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(z8530_queue_xmit);
+
+/**
+ * z8530_get_stats - Get network statistics
+ * @c: The channel to use
+ *
+ * Get the statistics block. We keep the statistics in software as
+ * the chip doesn't do it for us.
+ *
+ * Locking is ignored here - we could lock for a copy but its
+ * not likely to be that big an issue
+ */
+
+struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
+{
+ return &c->stats;
+}
+
+EXPORT_SYMBOL(z8530_get_stats);
+
+/*
+ * Module support
+ */
+static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
+
+static int __init z85230_init_driver(void)
+{
+ printk(banner);
+ return 0;
+}
+module_init(z85230_init_driver);
+
+static void __exit z85230_cleanup_driver(void)
+{
+}
+module_exit(z85230_cleanup_driver);
+
+MODULE_AUTHOR("Red Hat Inc.");
+MODULE_DESCRIPTION("Z85x30 synchronous driver core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
new file mode 100644
index 000000000000..77e53208045f
--- /dev/null
+++ b/drivers/net/wan/z85230.h
@@ -0,0 +1,449 @@
+/*
+ * Description of Z8530 Z85C30 and Z85230 communications chips
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Alan Cox <alan@redhat.com>
+ */
+
+#ifndef _Z8530_H
+#define _Z8530_H
+
+#include <linux/tty.h>
+#include <linux/interrupt.h>
+
+/* Conversion routines to/from brg time constants from/to bits
+ * per second.
+ */
+#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+/* The Zilog register set */
+
+#define FLAG 0x7e
+
+/* Write Register 0 */
+#define R0 0 /* Register selects */
+#define R1 1
+#define R2 2
+#define R3 3
+#define R4 4
+#define R5 5
+#define R6 6
+#define R7 7
+#define R8 8
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+
+#define RPRIME 16 /* Indicate a prime register access on 230 */
+
+#define NULLCODE 0 /* Null Code */
+#define POINT_HIGH 0x8 /* Select upper half of registers */
+#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
+#define SEND_ABORT 0x18 /* HDLC Abort */
+#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
+#define RES_Tx_P 0x28 /* Reset TxINT Pending */
+#define ERR_RES 0x30 /* Error Reset */
+#define RES_H_IUS 0x38 /* Reset highest IUS */
+
+#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
+#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
+#define RES_EOM_L 0xC0 /* Reset EOM latch */
+
+/* Write Register 1 */
+
+#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
+#define TxINT_ENAB 0x2 /* Tx Int Enable */
+#define PAR_SPEC 0x4 /* Parity is special condition */
+
+#define RxINT_DISAB 0 /* Rx Int Disable */
+#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
+#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
+#define INT_ERR_Rx 0x18 /* Int on error only */
+
+#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
+#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
+#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
+
+/* Write Register #2 (Interrupt Vector) */
+
+/* Write Register 3 */
+
+#define RxENABLE 0x1 /* Rx Enable */
+#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
+#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
+#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
+#define ENT_HM 0x10 /* Enter Hunt Mode */
+#define AUTO_ENAB 0x20 /* Auto Enables */
+#define Rx5 0x0 /* Rx 5 Bits/Character */
+#define Rx7 0x40 /* Rx 7 Bits/Character */
+#define Rx6 0x80 /* Rx 6 Bits/Character */
+#define Rx8 0xc0 /* Rx 8 Bits/Character */
+
+/* Write Register 4 */
+
+#define PAR_ENA 0x1 /* Parity Enable */
+#define PAR_EVEN 0x2 /* Parity Even/Odd* */
+
+#define SYNC_ENAB 0 /* Sync Modes Enable */
+#define SB1 0x4 /* 1 stop bit/char */
+#define SB15 0x8 /* 1.5 stop bits/char */
+#define SB2 0xc /* 2 stop bits/char */
+
+#define MONSYNC 0 /* 8 Bit Sync character */
+#define BISYNC 0x10 /* 16 bit sync character */
+#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC 0x30 /* External Sync Mode */
+
+#define X1CLK 0x0 /* x1 clock mode */
+#define X16CLK 0x40 /* x16 clock mode */
+#define X32CLK 0x80 /* x32 clock mode */
+#define X64CLK 0xC0 /* x64 clock mode */
+
+/* Write Register 5 */
+
+#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
+#define RTS 0x2 /* RTS */
+#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
+#define TxENAB 0x8 /* Tx Enable */
+#define SND_BRK 0x10 /* Send Break */
+#define Tx5 0x0 /* Tx 5 bits (or less)/character */
+#define Tx7 0x20 /* Tx 7 bits/character */
+#define Tx6 0x40 /* Tx 6 bits/character */
+#define Tx8 0x60 /* Tx 8 bits/character */
+#define DTR 0x80 /* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 8 (transmit buffer) */
+
+/* Write Register 9 (Master interrupt control) */
+#define VIS 1 /* Vector Includes Status */
+#define NV 2 /* No Vector */
+#define DLC 4 /* Disable Lower Chain */
+#define MIE 8 /* Master Interrupt Enable */
+#define STATHI 0x10 /* Status high */
+#define NORESET 0 /* No reset on write to R9 */
+#define CHRB 0x40 /* Reset channel B */
+#define CHRA 0x80 /* Reset channel A */
+#define FHWRES 0xc0 /* Force hardware reset */
+
+/* Write Register 10 (misc control bits) */
+#define BIT6 1 /* 6 bit/8bit sync */
+#define LOOPMODE 2 /* SDLC Loop mode */
+#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE 8 /* Mark/flag on idle */
+#define GAOP 0x10 /* Go active on poll */
+#define NRZ 0 /* NRZ mode */
+#define NRZI 0x20 /* NRZI mode */
+#define FM1 0x40 /* FM1 (transition = 1) */
+#define FM0 0x60 /* FM0 (transition = 0) */
+#define CRCPS 0x80 /* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode control) */
+#define TRxCXT 0 /* TRxC = Xtal output */
+#define TRxCTC 1 /* TRxC = Transmit clock */
+#define TRxCBR 2 /* TRxC = BR Generator Output */
+#define TRxCDP 3 /* TRxC = DPLL output */
+#define TRxCOI 4 /* TRxC O/I */
+#define TCRTxCP 0 /* Transmit clock = RTxC pin */
+#define TCTRxCP 8 /* Transmit clock = TRxC pin */
+#define TCBR 0x10 /* Transmit clock = BR Generator output */
+#define TCDPLL 0x18 /* Transmit clock = DPLL output */
+#define RCRTxCP 0 /* Receive clock = RTxC pin */
+#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
+#define RCBR 0x40 /* Receive clock = BR Generator output */
+#define RCDPLL 0x60 /* Receive clock = DPLL output */
+#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (lower byte of baud rate generator time constant) */
+
+/* Write Register 13 (upper byte of baud rate generator time constant) */
+
+/* Write Register 14 (Misc control bits) */
+#define BRENABL 1 /* Baud rate generator enable */
+#define BRSRC 2 /* Baud rate generator source */
+#define DTRREQ 4 /* DTR/Request function */
+#define AUTOECHO 8 /* Auto Echo */
+#define LOOPBAK 0x10 /* Local loopback */
+#define SEARCH 0x20 /* Enter search mode */
+#define RMC 0x40 /* Reset missing clock */
+#define DISDPLL 0x60 /* Disable DPLL */
+#define SSBR 0x80 /* Set DPLL source = BR generator */
+#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
+#define SFMM 0xc0 /* Set FM mode */
+#define SNRZI 0xe0 /* Set NRZI mode */
+
+/* Write Register 15 (external/status interrupt control) */
+#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */
+#define ZCIE 2 /* Zero count IE */
+#define FIFOE 4 /* Z85230 only */
+#define DCDIE 8 /* DCD IE */
+#define SYNCIE 0x10 /* Sync/hunt IE */
+#define CTSIE 0x20 /* CTS IE */
+#define TxUIE 0x40 /* Tx Underrun/EOM IE */
+#define BRKIE 0x80 /* Break/Abort IE */
+
+
+/* Read Register 0 */
+#define Rx_CH_AV 0x1 /* Rx Character Available */
+#define ZCOUNT 0x2 /* Zero count */
+#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
+#define DCD 0x8 /* DCD */
+#define SYNC_HUNT 0x10 /* Sync/hunt */
+#define CTS 0x20 /* CTS */
+#define TxEOM 0x40 /* Tx underrun */
+#define BRK_ABRT 0x80 /* Break/Abort */
+
+/* Read Register 1 */
+#define ALL_SNT 0x1 /* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3 0x8 /* 0/3 */
+#define RES4 0x4 /* 0/4 */
+#define RES5 0xc /* 0/5 */
+#define RES6 0x2 /* 0/6 */
+#define RES7 0xa /* 0/7 */
+#define RES8 0x6 /* 0/8 */
+#define RES18 0xe /* 1/8 */
+#define RES28 0x0 /* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR 0x10 /* Parity error */
+#define Rx_OVR 0x20 /* Rx Overrun Error */
+#define CRC_ERR 0x40 /* CRC/Framing Error */
+#define END_FR 0x80 /* End of Frame (SDLC) */
+
+/* Read Register 2 (channel b only) - Interrupt vector */
+
+/* Read Register 3 (interrupt pending register) ch a only */
+#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
+#define CHBTxIP 0x2 /* Channel B Tx IP */
+#define CHBRxIP 0x4 /* Channel B Rx IP */
+#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
+#define CHATxIP 0x10 /* Channel A Tx IP */
+#define CHARxIP 0x20 /* Channel A Rx IP */
+
+/* Read Register 8 (receive data register) */
+
+/* Read Register 10 (misc status bits) */
+#define ONLOOP 2 /* On loop */
+#define LOOPSEND 0x10 /* Loop sending */
+#define CLK2MIS 0x40 /* Two clocks missing */
+#define CLK1MIS 0x80 /* One clock missing */
+
+/* Read Register 12 (lower byte of baud rate generator constant) */
+
+/* Read Register 13 (upper byte of baud rate generator constant) */
+
+/* Read Register 15 (value of WR 15) */
+
+
+/*
+ * Interrupt handling functions for this SCC
+ */
+
+struct z8530_channel;
+
+struct z8530_irqhandler
+{
+ void (*rx)(struct z8530_channel *);
+ void (*tx)(struct z8530_channel *);
+ void (*status)(struct z8530_channel *);
+};
+
+/*
+ * A channel of the Z8530
+ */
+
+struct z8530_channel
+{
+ struct z8530_irqhandler *irqs; /* IRQ handlers */
+ /*
+ * Synchronous
+ */
+ u16 count; /* Buyes received */
+ u16 max; /* Most we can receive this frame */
+ u16 mtu; /* MTU of the device */
+ u8 *dptr; /* Pointer into rx buffer */
+ struct sk_buff *skb; /* Buffer dptr points into */
+ struct sk_buff *skb2; /* Pending buffer */
+ u8 status; /* Current DCD */
+ u8 dcdcheck; /* which bit to check for line */
+ u8 sync; /* Set if in sync mode */
+
+ u8 regs[32]; /* Register map for the chip */
+ u8 pendregs[32]; /* Pending register values */
+
+ struct sk_buff *tx_skb; /* Buffer being transmitted */
+ struct sk_buff *tx_next_skb; /* Next transmit buffer */
+ u8 *tx_ptr; /* Byte pointer into the buffer */
+ u8 *tx_next_ptr; /* Next pointer to use */
+ u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */
+ u8 tx_dma_used; /* Flip buffer usage toggler */
+ u16 txcount; /* Count of bytes to transmit */
+
+ void (*rx_function)(struct z8530_channel *, struct sk_buff *);
+
+ /*
+ * Sync DMA
+ */
+
+ u8 rxdma; /* DMA channels */
+ u8 txdma;
+ u8 rxdma_on; /* DMA active if flag set */
+ u8 txdma_on;
+ u8 dma_num; /* Buffer we are DMAing into */
+ u8 dma_ready; /* Is the other buffer free */
+ u8 dma_tx; /* TX is to use DMA */
+ u8 *rx_buf[2]; /* The flip buffers */
+
+ /*
+ * System
+ */
+
+ struct z8530_dev *dev; /* Z85230 chip instance we are from */
+ unsigned long ctrlio; /* I/O ports */
+ unsigned long dataio;
+
+ /*
+ * For PC we encode this way.
+ */
+#define Z8530_PORT_SLEEP 0x80000000
+#define Z8530_PORT_OF(x) ((x)&0xFFFF)
+
+ u32 rx_overrun; /* Overruns - not done yet */
+ u32 rx_crc_err;
+
+ /*
+ * Bound device pointers
+ */
+
+ void *private; /* For our owner */
+ struct net_device *netdevice; /* Network layer device */
+ struct net_device_stats stats; /* Network layer statistics */
+
+ /*
+ * Async features
+ */
+
+ struct tty_struct *tty; /* Attached terminal */
+ int line; /* Minor number */
+ wait_queue_head_t open_wait; /* Tasks waiting to open */
+ wait_queue_head_t close_wait; /* and for close to end */
+ unsigned long event; /* Pending events */
+ int fdcount; /* # of fd on device */
+ int blocked_open; /* # of blocked opens */
+ int x_char; /* XON/XOF char */
+ unsigned char *xmit_buf; /* Transmit pointer */
+ int xmit_head; /* Transmit ring */
+ int xmit_tail;
+ int xmit_cnt;
+ int flags;
+ int timeout;
+ int xmit_fifo_size; /* Transmit FIFO info */
+
+ int close_delay; /* Do we wait for drain on close ? */
+ unsigned short closing_wait;
+
+ /* We need to know the current clock divisor
+ * to read the bps rate the chip has currently
+ * loaded.
+ */
+
+ unsigned char clk_divisor; /* May be 1, 16, 32, or 64 */
+ int zs_baud;
+
+ int magic;
+ int baud_base; /* Baud parameters */
+ int custom_divisor;
+
+
+ unsigned char tx_active; /* character is being xmitted */
+ unsigned char tx_stopped; /* output is suspended */
+
+ spinlock_t *lock; /* Devicr lock */
+};
+
+/*
+ * Each Z853x0 device.
+ */
+
+struct z8530_dev
+{
+ char *name; /* Device instance name */
+ struct z8530_channel chanA; /* SCC channel A */
+ struct z8530_channel chanB; /* SCC channel B */
+ int type;
+#define Z8530 0 /* NMOS dinosaur */
+#define Z85C30 1 /* CMOS - better */
+#define Z85230 2 /* CMOS with real FIFO */
+ int irq; /* Interrupt for the device */
+ int active; /* Soft interrupt enable - the Mac doesn't
+ always have a hard disable on its 8530s... */
+ spinlock_t lock;
+};
+
+
+/*
+ * Functions
+ */
+
+extern u8 z8530_dead_port[];
+extern u8 z8530_hdlc_kilostream_85230[];
+extern u8 z8530_hdlc_kilostream[];
+extern irqreturn_t z8530_interrupt(int, void *, struct pt_regs *);
+extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+extern int z8530_init(struct z8530_dev *);
+extern int z8530_shutdown(struct z8530_dev *);
+extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+extern int z8530_channel_load(struct z8530_channel *, u8 *);
+extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);
+extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+
+
+/*
+ * Standard interrupt vector sets
+ */
+
+extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
+
+/*
+ * Asynchronous Interfacing
+ */
+
+#define SERIAL_MAGIC 0x5301
+
+/*
+ * The size of the serial xmit buffer is 1 page, or 4096 bytes
+ */
+
+#define SERIAL_XMIT_SIZE 4096
+#define WAKEUP_CHARS 256
+
+/*
+ * Events are used to schedule things to happen at timer-interrupt
+ * time, instead of at rs interrupt time.
+ */
+#define RS_EVENT_WRITE_WAKEUP 0
+
+/* Internal flags used only by kernel/chr_drv/serial.c */
+#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */
+#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
+#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
+#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
+#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */
+#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */
+#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */
+
+#endif /* !(_Z8530_H) */
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
new file mode 100644
index 000000000000..1f05d9bd05e4
--- /dev/null
+++ b/drivers/net/wd.c
@@ -0,0 +1,559 @@
+/* wd.c: A WD80x3 ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is a driver for WD8003 and WD8013 "compatible" ethercards.
+
+ Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users, support
+ for non-standard memory sizes.
+
+
+*/
+
+static const char version[] =
+ "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "wd"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int wd_portlist[] __initdata =
+{0x300, 0x280, 0x380, 0x240, 0};
+
+static int wd_probe1(struct net_device *dev, int ioaddr);
+
+static int wd_open(struct net_device *dev);
+static void wd_reset_8390(struct net_device *dev);
+static void wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void wd_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void wd_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static int wd_close(struct net_device *dev);
+
+
+#define WD_START_PG 0x00 /* First page of TX buffer */
+#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
+#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define WD_CMDREG 0 /* Offset to ASIC command register. */
+#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */
+#define WD_MEMENB 0x40 /* Enable the shared memory. */
+#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */
+#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */
+#define NIC16 0x40 /* Enable 16 bit access from the 8390. */
+#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */
+#define WD_IO_EXTENT 32
+
+
+/* Probe for the WD8003 and WD8013. These cards have the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following. A Soundblaster can have the same checksum as an WDethercard,
+ so we have an extra exclusionary check for it.
+
+ The wd_probe1() routine initializes the card and fills the
+ station address field. */
+
+static int __init do_wd_probe(struct net_device *dev)
+{
+ int i;
+ struct resource *r;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+ int mem_end = dev->mem_end;
+
+ SET_MODULE_OWNER(dev);
+
+ if (base_addr > 0x1ff) { /* Check a user specified location. */
+ r = request_region(base_addr, WD_IO_EXTENT, "wd-probe");
+ if ( r == NULL)
+ return -EBUSY;
+ i = wd_probe1(dev, base_addr);
+ if (i != 0)
+ release_region(base_addr, WD_IO_EXTENT);
+ else
+ r->name = dev->name;
+ return i;
+ }
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; wd_portlist[i]; i++) {
+ int ioaddr = wd_portlist[i];
+ r = request_region(ioaddr, WD_IO_EXTENT, "wd-probe");
+ if (r == NULL)
+ continue;
+ if (wd_probe1(dev, ioaddr) == 0) {
+ r->name = dev->name;
+ return 0;
+ }
+ release_region(ioaddr, WD_IO_EXTENT);
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ dev->mem_end = mem_end;
+ }
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+#ifndef MODULE
+struct net_device * __init wd_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_wd_probe(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init wd_probe1(struct net_device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ int ancient = 0; /* An old card without config registers. */
+ int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ const char *model_name;
+ static unsigned version_printed;
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */
+ || inb(ioaddr + 9) == 0xff
+ || (checksum & 0xff) != 0xFF)
+ return -ENODEV;
+
+ /* Check for semi-valid mem_start/end values if supplied. */
+ if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
+ printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ dev->mem_start = 0;
+ dev->mem_end = 0;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: WD80x3 at %#3x,", dev->name, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* The following PureData probe code was contributed by
+ Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
+ configuration differently from others so we have to check for them.
+ This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card.
+ */
+ if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') {
+ unsigned char reg5 = inb(ioaddr+5);
+
+ switch (inb(ioaddr+2)) {
+ case 0x03: word16 = 0; model_name = "PDI8023-8"; break;
+ case 0x05: word16 = 0; model_name = "PDUC8023"; break;
+ case 0x0a: word16 = 1; model_name = "PDI8023-16"; break;
+ /* Either 0x01 (dumb) or they've released a new version. */
+ default: word16 = 0; model_name = "PDI8023"; break;
+ }
+ dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12;
+ dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1;
+ } else { /* End of PureData probe */
+ /* This method of checking for a 16-bit board is borrowed from the
+ we.c driver. A simpler method is just to look in ASIC reg. 0x03.
+ I'm comparing the two method in alpha test to make certain they
+ return the same result. */
+ /* Check for the old 8 bit board - it has register 0/8 aliasing.
+ Do NOT check i>=6 here -- it hangs the old 8003 boards! */
+ for (i = 0; i < 6; i++)
+ if (inb(ioaddr+i) != inb(ioaddr+8+i))
+ break;
+ if (i >= 6) {
+ ancient = 1;
+ model_name = "WD8003-old";
+ word16 = 0;
+ } else {
+ int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */
+ outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */
+ if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */
+ && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */
+ int asic_reg5 = inb(ioaddr+WD_CMDREG5);
+ /* Magic to set ASIC to word-wide mode. */
+ outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5);
+ outb(tmp, ioaddr+1);
+ model_name = "WD8013";
+ word16 = 1; /* We have a 16bit board here! */
+ } else {
+ model_name = "WD8003";
+ word16 = 0;
+ }
+ outb(tmp, ioaddr+1); /* Restore original reg1 value. */
+ }
+#ifndef final_version
+ if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
+ printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+#endif
+ }
+
+#if defined(WD_SHMEM) && WD_SHMEM > 0x80000
+ /* Allow a compile-time override. */
+ dev->mem_start = WD_SHMEM;
+#else
+ if (dev->mem_start == 0) {
+ /* Sanity and old 8003 check */
+ int reg0 = inb(ioaddr);
+ if (reg0 == 0xff || reg0 == 0) {
+ /* Future plan: this could check a few likely locations first. */
+ dev->mem_start = 0xd0000;
+ printk(" assigning address %#lx", dev->mem_start);
+ } else {
+ int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
+ /* Some boards don't have the register 5 -- it returns 0xff. */
+ if (high_addr_bits == 0x1f || word16 == 0)
+ high_addr_bits = 0x01;
+ dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19);
+ }
+ }
+#endif
+
+ /* The 8390 isn't at the base address -- the ASIC regs are there! */
+ dev->base_addr = ioaddr+WD_NIC_OFFSET;
+
+ if (dev->irq < 2) {
+ int irqmap[] = {9,3,5,7,10,11,15,4};
+ int reg1 = inb(ioaddr+1);
+ int reg4 = inb(ioaddr+4);
+ if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
+ short nic_addr = ioaddr+WD_NIC_OFFSET;
+ unsigned long irq_mask;
+
+ /* We have an old-style ethercard that doesn't report its IRQ
+ line. Do autoirq to find the IRQ line. Note that this IS NOT
+ a reliable way to trigger an interrupt. */
+ outb_p(E8390_NODMA + E8390_STOP, nic_addr);
+ outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */
+
+ irq_mask = probe_irq_on();
+ outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */
+ outb_p(0x00, nic_addr + EN0_RCNTLO);
+ outb_p(0x00, nic_addr + EN0_RCNTHI);
+ outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+
+ outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
+
+ if (ei_debug > 2)
+ printk(" autoirq is %d", dev->irq);
+ if (dev->irq < 2)
+ dev->irq = word16 ? 10 : 5;
+ } else
+ dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)];
+ } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (i) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return i;
+ }
+
+ /* OK, were are certain this is going to work. Setup the device. */
+ ei_status.name = model_name;
+ ei_status.word16 = word16;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+
+ /* Don't map in the shared memory until the board is actually opened. */
+
+ /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */
+ if (dev->mem_end != 0) {
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.priv = dev->mem_end - dev->mem_start;
+ } else {
+ ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG;
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256;
+ ei_status.priv = (ei_status.stop_page - WD_START_PG)*256;
+ }
+
+ ei_status.mem = ioremap(dev->mem_start, ei_status.priv);
+ if (!ei_status.mem) {
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+
+ printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+
+ ei_status.reset_8390 = &wd_reset_8390;
+ ei_status.block_input = &wd_block_input;
+ ei_status.block_output = &wd_block_output;
+ ei_status.get_8390_hdr = &wd_get_8390_hdr;
+ dev->open = &wd_open;
+ dev->stop = &wd_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+ NS8390_init(dev, 0);
+
+#if 1
+ /* Enable interrupt generation on softconfig cards -- M.U */
+ /* .. but possibly potentially unsafe - Donald */
+ if (inb(ioaddr+14) & 0x20)
+ outb(inb(ioaddr+4)|0x80, ioaddr+4);
+#endif
+
+ return 0;
+}
+
+static int
+wd_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ /* Map in the shared memory. Always set register 0 last to remain
+ compatible with very old boards. */
+ ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB;
+ ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16;
+
+ if (ei_status.word16)
+ outb(ei_status.reg5, ioaddr+WD_CMDREG5);
+ outb(ei_status.reg0, ioaddr); /* WD_CMDREG */
+
+ ei_open(dev);
+ return 0;
+}
+
+static void
+wd_reset_8390(struct net_device *dev)
+{
+ int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ outb(WD_RESET, wd_cmd_port);
+ if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+ outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port);
+ if (ei_status.word16)
+ outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - WD_START_PG)<<8);
+
+ /* We'll always get a 4 byte header read followed by a packet read, so
+ we enable 16 bit mode before the header, and disable after the body. */
+ if (ei_status.word16)
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+
+#ifdef __BIG_ENDIAN
+ /* Officially this is what we are doing, but the readl() is faster */
+ /* unfortunately it isn't endian aware of the struct */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, and trivial
+ on the Western digital card where there is no choice of how to do it.
+ The only complications are that the ring buffer wraps, and need to map
+ switch between 8- and 16-bit modes. */
+
+static void
+wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long offset = ring_offset - (WD_START_PG<<8);
+ void __iomem *xfer_start = ei_status.mem + offset;
+
+ if (offset + count > ei_status.priv) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.priv - offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ /* Turn off 16 bit access so that reboot works. ISA brain-damage */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+}
+
+static void
+wd_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ void __iomem *shmem = ei_status.mem + ((start_page - WD_START_PG)<<8);
+
+
+ if (ei_status.word16) {
+ /* Turn on and off 16 bit access so that reboot works. */
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ memcpy_toio(shmem, buf, count);
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ } else
+ memcpy_toio(shmem, buf, count);
+}
+
+
+static int
+wd_close(struct net_device *dev)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+
+ /* Change from 16-bit to 8-bit shared memory so reboot works. */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 );
+
+ /* And disable the shared memory. */
+ outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg);
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_WD_CARDS 4 /* Max number of wd cards per module */
+static struct net_device *dev_wd[MAX_WD_CARDS];
+static int io[MAX_WD_CARDS];
+static int irq[MAX_WD_CARDS];
+static int mem[MAX_WD_CARDS];
+static int mem_end[MAX_WD_CARDS]; /* for non std. mem size */
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+module_param_array(mem_end, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
+MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
+MODULE_PARM_DESC(mem_end, "memory end address(es)");
+MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = mem_end[this_dev];
+ if (do_wd_probe(dev) == 0) {
+ if (register_netdev(dev) == 0) {
+ dev_wd[found++] = dev;
+ continue;
+ }
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct net_device *dev = dev_wd[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
new file mode 100644
index 000000000000..0aaa12c0c098
--- /dev/null
+++ b/drivers/net/wireless/Kconfig
@@ -0,0 +1,365 @@
+#
+# Wireless LAN device configuration
+#
+
+menu "Wireless LAN (non-hamradio)"
+ depends on NETDEVICES
+
+config NET_RADIO
+ bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions"
+ ---help---
+ Support for wireless LANs and everything having to do with radio,
+ but not with amateur radio or FM broadcasting.
+
+ Saying Y here also enables the Wireless Extensions (creates
+ /proc/net/wireless and enables iwconfig access). The Wireless
+ Extension is a generic API allowing a driver to expose to the user
+ space configuration and statistics specific to common Wireless LANs.
+ The beauty of it is that a single set of tool can support all the
+ variations of Wireless LANs, regardless of their type (as long as
+ the driver supports Wireless Extension). Another advantage is that
+ these parameters may be changed on the fly without restarting the
+ driver (or Linux). If you wish to use Wireless Extensions with
+ wireless PCMCIA (PC-) cards, you need to say Y here; you can fetch
+ the tools from
+ <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+
+ Some user-level drivers for scarab devices which don't require
+ special kernel support are available from
+ <ftp://shadow.cabi.net/pub/Linux/>.
+
+# Note : the cards are obsolete (can't buy them anymore), but the drivers
+# are not, as people are still using them...
+comment "Obsolete Wireless cards support (pre-802.11)"
+ depends on NET_RADIO && (INET || ISA || PCMCIA)
+
+config STRIP
+ tristate "STRIP (Metricom starmode radio IP)"
+ depends on NET_RADIO && INET
+ ---help---
+ Say Y if you have a Metricom radio and intend to use Starmode Radio
+ IP. STRIP is a radio protocol developed for the MosquitoNet project
+ (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet
+ traffic using Metricom radios. Metricom radios are small, battery
+ powered, 100kbit/sec packet radio transceivers, about the size and
+ weight of a cellular telephone. (You may also have heard them called
+ "Metricom modems" but we avoid the term "modem" because it misleads
+ many people into thinking that you can plug a Metricom modem into a
+ phone line and use it as a modem.)
+
+ You can use STRIP on any Linux machine with a serial port, although
+ it is obviously most useful for people with laptop computers. If you
+ think you might get a Metricom radio in the future, there is no harm
+ in saying Y to STRIP now, except that it makes the kernel a bit
+ bigger.
+
+ To compile this as a module, choose M here: the module will be
+ called strip.
+
+config ARLAN
+ tristate "Aironet Arlan 655 & IC2200 DS support"
+ depends on NET_RADIO && ISA && !64BIT
+ ---help---
+ Aironet makes Arlan, a class of wireless LAN adapters. These use the
+ www.Telxon.com chip, which is also used on several similar cards.
+ This driver is tested on the 655 and IC2200 series cards. Look at
+ <http://www.ylenurme.ee/~elmer/655/> for the latest information.
+
+ The driver is built as two modules, arlan and arlan-proc. The latter
+ is the /proc interface and is not needed most of time.
+
+ On some computers the card ends up in non-valid state after some
+ time. Use a ping-reset script to clear it.
+
+config WAVELAN
+ tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
+ depends on NET_RADIO && ISA
+ ---help---
+ The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
+ a Radio LAN (wireless Ethernet-like Local Area Network) using the
+ radio frequencies 900 MHz and 2.4 GHz.
+
+ This driver support the ISA version of the WaveLAN card. A separate
+ driver for the PCMCIA (PC-card) hardware is available in David
+ Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
+ for location).
+
+ If you want to use an ISA WaveLAN card under Linux, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Some more specific
+ information is contained in
+ <file:Documentation/networking/wavelan.txt> and in the source code
+ <file:drivers/net/wavelan.p.h>.
+
+ You will also need the wireless tools package available from
+ <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+ Please read the man pages contained therein.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wavelan.
+
+config PCMCIA_WAVELAN
+ tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
+ depends on NET_RADIO && PCMCIA
+ help
+ Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
+ (PC-card) wireless Ethernet networking card to your computer. This
+ driver is for the non-IEEE-802.11 Wavelan cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wavelan_cs. If unsure, say N.
+
+config PCMCIA_NETWAVE
+ tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
+ depends on NET_RADIO && PCMCIA
+ help
+ Say Y here if you intend to attach this type of PCMCIA (PC-card)
+ wireless Ethernet networking card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called netwave_cs. If unsure, say N.
+
+comment "Wireless 802.11 Frequency Hopping cards support"
+ depends on NET_RADIO && PCMCIA
+
+config PCMCIA_RAYCS
+ tristate "Aviator/Raytheon 2.4MHz wireless support"
+ depends on NET_RADIO && PCMCIA
+ ---help---
+ Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
+ (PC-card) wireless Ethernet networking card to your computer.
+ Please read the file <file:Documentation/networking/ray_cs.txt> for
+ details.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ray_cs. If unsure, say N.
+
+comment "Wireless 802.11b ISA/PCI cards support"
+ depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
+
+config AIRO
+ tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
+ depends on NET_RADIO && ISA && (PCI || BROKEN)
+ ---help---
+ This is the standard Linux driver to support Cisco/Aironet ISA and
+ PCI 802.11 wireless cards.
+ It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
+ - with or without encryption) as well as card before the Cisco
+ aquisition (Aironet 4500, Aironet 4800, Aironet 4800B).
+
+ This driver support both the standard Linux Wireless Extensions
+ and Cisco proprietary API, so both the Linux Wireless Tools and the
+ Cisco Linux utilities can be used to configure the card.
+
+ The driver can be compiled as a module and will be named "airo".
+
+config HERMES
+ tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
+ depends on NET_RADIO && (PPC_PMAC || PCI || PCMCIA)
+ ---help---
+ A driver for 802.11b wireless cards based based on the "Hermes" or
+ Intersil HFA384x (Prism 2) MAC controller. This includes the vast
+ majority of the PCMCIA 802.11b cards (which are nearly all rebadges)
+ - except for the Cisco/Aironet cards. Cards supported include the
+ Apple Airport (not a PCMCIA card), WavelanIEEE/Orinoco,
+ Cabletron/EnteraSys Roamabout, ELSA AirLancer, MELCO Buffalo, Avaya,
+ IBM High Rate Wireless, Farralon Syyline, Samsung MagicLAN, Netgear
+ MA401, LinkSys WPC-11, D-Link DWL-650, 3Com AirConnect, Intel
+ PRO/Wireless, and Symbol Spectrum24 High Rate amongst others.
+
+ This option includes the guts of the driver, but in order to
+ actually use a card you will also need to enable support for PCMCIA
+ Hermes cards, PLX9052 based PCI adaptors or the Apple Airport below.
+
+ You will also very likely also need the Wireless Tools in order to
+ configure your card and that /etc/pcmcia/wireless.opts works :
+ <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
+
+config APPLE_AIRPORT
+ tristate "Apple Airport support (built-in)"
+ depends on PPC_PMAC && HERMES
+ help
+ Say Y here to support the Airport 802.11b wireless Ethernet hardware
+ built into the Macintosh iBook and other recent PowerPC-based
+ Macintosh machines. This is essentially a Lucent Orinoco card with
+ a non-standard interface
+
+config PLX_HERMES
+ tristate "Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.) (EXPERIMENTAL)"
+ depends on PCI && HERMES && EXPERIMENTAL
+ help
+ Enable support for PCMCIA cards supported by the "Hermes" (aka
+ orinoco) driver when used in PLX9052 based PCI adaptors. These
+ adaptors are not a full PCMCIA controller but act as a more limited
+ PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
+ 802.11b PCMCIA cards can be used in desktop machines. The Netgear
+ MA301 is such an adaptor.
+
+ Support for these adaptors is so far still incomplete and buggy.
+ You have been warned.
+
+config TMD_HERMES
+ tristate "Hermes in TMD7160 based PCI adaptor support (EXPERIMENTAL)"
+ depends on PCI && HERMES && EXPERIMENTAL
+ help
+ Enable support for PCMCIA cards supported by the "Hermes" (aka
+ orinoco) driver when used in TMD7160 based PCI adaptors. These
+ adaptors are not a full PCMCIA controller but act as a more limited
+ PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
+ 802.11b PCMCIA cards can be used in desktop machines.
+
+ Support for these adaptors is so far still incomplete and buggy.
+ You have been warned.
+
+config PCI_HERMES
+ tristate "Prism 2.5 PCI 802.11b adaptor support (EXPERIMENTAL)"
+ depends on PCI && HERMES && EXPERIMENTAL
+ help
+ Enable support for PCI and mini-PCI 802.11b wireless NICs based on
+ the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
+ PCMCIA cards bundled with PCI<->PCMCIA adaptors which are also
+ common. Some of the built-in wireless adaptors in laptops are of
+ this variety.
+
+config ATMEL
+ tristate "Atmel at76c50x chipset 802.11b support"
+ depends on NET_RADIO && EXPERIMENTAL
+ select FW_LOADER
+ select CRC32
+ ---help---
+ A driver 802.11b wireless cards based on the Atmel fast-vnet
+ chips. This driver supports standard Linux wireless extensions.
+
+ Many cards based on this chipset do not have flash memory
+ and need their firmware loaded at start-up. If yours is
+ one of these, you will need to provide a firmware image
+ to be loaded into the card by the driver. The Atmel
+ firmware package can be downloaded from
+ <http://www.thekelleys.org.uk/atmel>
+
+config PCI_ATMEL
+ tristate "Atmel at76c506 PCI cards"
+ depends on ATMEL && PCI
+ ---help---
+ Enable support for PCI and mini-PCI cards containing the
+ Atmel at76c506 chip.
+
+# If Pcmcia is compiled in, offer Pcmcia cards...
+comment "Wireless 802.11b Pcmcia/Cardbus cards support"
+ depends on NET_RADIO && PCMCIA
+
+config PCMCIA_HERMES
+ tristate "Hermes PCMCIA card support"
+ depends on NET_RADIO && PCMCIA && HERMES
+ ---help---
+ A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
+ as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
+ EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and
+ others). It should also be usable on various Prism II based cards
+ such as the Linksys, D-Link and Farallon Skyline. It should also
+ work on Symbol cards such as the 3Com AirConnect and Ericsson WLAN.
+
+ To use your PC-cards, you will need supporting software from David
+ Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
+ for location). You also want to check out the PCMCIA-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ You will also very likely also need the Wireless Tools in order to
+ configure your card and that /etc/pcmcia/wireless.opts works:
+ <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+
+config AIRO_CS
+ tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
+ depends on NET_RADIO && PCMCIA
+ ---help---
+ This is the standard Linux driver to support Cisco/Aironet PCMCIA
+ 802.11 wireless cards. This driver is the same as the Aironet
+ driver part of the Linux Pcmcia package.
+ It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
+ - with or without encryption) as well as card before the Cisco
+ aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also
+ supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom
+ 802.11b cards.
+
+ This driver support both the standard Linux Wireless Extensions
+ and Cisco proprietary API, so both the Linux Wireless Tools and the
+ Cisco Linux utilities can be used to configure the card.
+
+ To use your PC-cards, you will need supporting software from David
+ Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
+ for location). You also want to check out the PCMCIA-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+config PCMCIA_ATMEL
+ tristate "Atmel at76c502/at76c504 PCMCIA cards"
+ depends on NET_RADIO && ATMEL && PCMCIA
+ select FW_LOADER
+ select CRC32
+ ---help---
+ Enable support for PCMCIA cards containing the
+ Atmel at76c502 and at76c504 chips.
+
+config PCMCIA_WL3501
+ tristate "Planet WL3501 PCMCIA cards"
+ depends on NET_RADIO && EXPERIMENTAL && PCMCIA
+ ---help---
+ A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
+ It has basic support for Linux wireless extensions and initial
+ micro support for ethtool.
+
+comment "Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support"
+ depends on NET_RADIO && PCI
+config PRISM54
+ tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus'
+ depends on PCI && NET_RADIO && EXPERIMENTAL
+ select FW_LOADER
+ ---help---
+ Enable PCI and Cardbus support for the following chipset based cards:
+
+ ISL3880 - Prism GT 802.11 b/g
+ ISL3877 - Prism Indigo 802.11 a
+ ISL3890 - Prism Duette 802.11 a/b/g
+
+ For a complete list of supported cards visit <http://prism54.org>.
+ Here is the latest confirmed list of supported cards:
+
+ 3com OfficeConnect 11g Cardbus Card aka 3CRWE154G72
+ Allnet ALL0271 PCI Card
+ Compex WL54G Cardbus Card
+ Corega CG-WLCB54GT Cardbus Card
+ D-Link Air Plus Xtreme G A1 Cardbus Card aka DWL-g650
+ I-O Data WN-G54/CB Cardbus Card
+ Kobishi XG-300 aka Z-Com Cardbus Card
+ Netgear WG511 Cardbus Card
+ Ovislink WL-5400PCI PCI Card
+ Peabird WLG-PCI PCI Card
+ Sitecom WL-100i Cardbus Card
+ Sitecom WL-110i PCI Card
+ SMC2802W - EZ Connect g 2.4GHz 54 Mbps Wireless PCI Card
+ SMC2835W - EZ Connect g 2.4GHz 54 Mbps Wireless Cardbus Card
+ SMC2835W-V2 - EZ Connect g 2.4GHz 54 Mbps Wireless Cardbus Card
+ Z-Com XG-900 PCI Card
+ Zyxel G-100 Cardbus Card
+
+ If you enable this you will need a firmware file as well.
+ You will need to copy this to /usr/lib/hotplug/firmware/isl3890.
+ You can get this non-GPL'd firmware file from the Prism54 project page:
+ <http://prism54.org>
+ You will also need the /etc/hotplug/firmware.agent script from
+ a current hotplug package.
+
+ Note: You need a motherboard with DMA support to use any of these cards
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/modules.txt>. The module
+ will be called prism54.ko.
+
+# yes, this works even when no drivers are selected
+config NET_WIRELESS
+ bool
+ depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
+ default y
+
+endmenu
+
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
new file mode 100644
index 000000000000..2b87841322cc
--- /dev/null
+++ b/drivers/net/wireless/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for the Linux Wireless network device drivers.
+#
+
+obj-$(CONFIG_STRIP) += strip.o
+obj-$(CONFIG_ARLAN) += arlan.o
+
+arlan-objs := arlan-main.o arlan-proc.o
+
+# Obsolete cards
+obj-$(CONFIG_WAVELAN) += wavelan.o
+obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
+obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
+
+obj-$(CONFIG_HERMES) += orinoco.o hermes.o
+obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o
+obj-$(CONFIG_APPLE_AIRPORT) += airport.o
+obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
+obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
+obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
+
+obj-$(CONFIG_AIRO) += airo.o
+obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o
+
+obj-$(CONFIG_ATMEL) += atmel.o
+obj-$(CONFIG_PCI_ATMEL) += atmel_pci.o
+obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o
+
+obj-$(CONFIG_PRISM54) += prism54/
+
+# 16-bit wireless PCMCIA client drivers
+obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
+obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
diff --git a/drivers/net/wireless/README b/drivers/net/wireless/README
new file mode 100644
index 000000000000..0c274bf6d45e
--- /dev/null
+++ b/drivers/net/wireless/README
@@ -0,0 +1,25 @@
+ README
+ ------
+
+ This directory is mostly for Wireless LAN drivers, in their
+various incarnations (ISA, PCI, Pcmcia...).
+ This separate directory is needed because a lot of driver work
+on different bus (typically PCI + Pcmcia) and share 95% of the
+code. This allow the code and the config options to be in one single
+place instead of scattered all over the driver tree, which is never
+100% satisfactory.
+
+ Note : if you want more info on the topic of Wireless LANs,
+you are kindly invited to have a look at the Wireless Howto :
+ http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/
+ Some Wireless LAN drivers, like orinoco_cs, require the use of
+Wireless Tools to be configured :
+ http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html
+
+ Special notes for distribution maintainers :
+ 1) wvlan_cs will be discontinued soon in favor of orinoco_cs
+ 2) Please add Wireless Tools support in your scripts
+
+ Have fun...
+
+ Jean
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
new file mode 100644
index 000000000000..2899144f2153
--- /dev/null
+++ b/drivers/net/wireless/airo.c
@@ -0,0 +1,7667 @@
+/*======================================================================
+
+ Aironet driver for 4500 and 4800 series cards
+
+ This code is released under both the GPL version 2 and BSD licenses.
+ Either license may be used. The respective licenses are found at
+ the end of this file.
+
+ This code was developed by Benjamin Reed <breed@users.sourceforge.net>
+ including portions of which come from the Aironet PC4500
+ Developer's Reference Manual and used with permission. Copyright
+ (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
+ code in the Developer's manual was granted for this driver by
+ Aironet. Major code contributions were received from Javier Achirica
+ <achirica@users.sourceforge.net> and Jean Tourrilhes <jt@hpl.hp.com>.
+ Code was also integrated from the Cisco Aironet driver for Linux.
+ Support for MPI350 cards was added by Fabrice Bellet
+ <fabrice@bellet.info>.
+
+======================================================================*/
+
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_PCI
+static struct pci_device_id card_ids[] = {
+ { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
+ { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0x0340, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0x0350, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0x5000, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0xa504, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, card_ids);
+
+static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *);
+static void airo_pci_remove(struct pci_dev *);
+static int airo_pci_suspend(struct pci_dev *pdev, u32 state);
+static int airo_pci_resume(struct pci_dev *pdev);
+
+static struct pci_driver airo_driver = {
+ .name = "airo",
+ .id_table = card_ids,
+ .probe = airo_pci_probe,
+ .remove = __devexit_p(airo_pci_remove),
+ .suspend = airo_pci_suspend,
+ .resume = airo_pci_resume,
+};
+#endif /* CONFIG_PCI */
+
+/* Include Wireless Extension definition and check version - Jean II */
+#include <linux/wireless.h>
+#define WIRELESS_SPY // enable iwspy support
+#include <net/iw_handler.h> // New driver API
+
+#define CISCO_EXT // enable Cisco extensions
+#ifdef CISCO_EXT
+#include <linux/delay.h>
+#endif
+
+/* Support Cisco MIC feature */
+#define MICSUPPORT
+
+#if defined(MICSUPPORT) && !defined(CONFIG_CRYPTO)
+#warning MIC support requires Crypto API
+#undef MICSUPPORT
+#endif
+
+/* Hack to do some power saving */
+#define POWER_ON_DOWN
+
+/* As you can see this list is HUGH!
+ I really don't know what a lot of these counts are about, but they
+ are all here for completeness. If the IGNLABEL macro is put in
+ infront of the label, that statistic will not be included in the list
+ of statistics in the /proc filesystem */
+
+#define IGNLABEL(comment) NULL
+static char *statsLabels[] = {
+ "RxOverrun",
+ IGNLABEL("RxPlcpCrcErr"),
+ IGNLABEL("RxPlcpFormatErr"),
+ IGNLABEL("RxPlcpLengthErr"),
+ "RxMacCrcErr",
+ "RxMacCrcOk",
+ "RxWepErr",
+ "RxWepOk",
+ "RetryLong",
+ "RetryShort",
+ "MaxRetries",
+ "NoAck",
+ "NoCts",
+ "RxAck",
+ "RxCts",
+ "TxAck",
+ "TxRts",
+ "TxCts",
+ "TxMc",
+ "TxBc",
+ "TxUcFrags",
+ "TxUcPackets",
+ "TxBeacon",
+ "RxBeacon",
+ "TxSinColl",
+ "TxMulColl",
+ "DefersNo",
+ "DefersProt",
+ "DefersEngy",
+ "DupFram",
+ "RxFragDisc",
+ "TxAged",
+ "RxAged",
+ "LostSync-MaxRetry",
+ "LostSync-MissedBeacons",
+ "LostSync-ArlExceeded",
+ "LostSync-Deauth",
+ "LostSync-Disassoced",
+ "LostSync-TsfTiming",
+ "HostTxMc",
+ "HostTxBc",
+ "HostTxUc",
+ "HostTxFail",
+ "HostRxMc",
+ "HostRxBc",
+ "HostRxUc",
+ "HostRxDiscard",
+ IGNLABEL("HmacTxMc"),
+ IGNLABEL("HmacTxBc"),
+ IGNLABEL("HmacTxUc"),
+ IGNLABEL("HmacTxFail"),
+ IGNLABEL("HmacRxMc"),
+ IGNLABEL("HmacRxBc"),
+ IGNLABEL("HmacRxUc"),
+ IGNLABEL("HmacRxDiscard"),
+ IGNLABEL("HmacRxAccepted"),
+ "SsidMismatch",
+ "ApMismatch",
+ "RatesMismatch",
+ "AuthReject",
+ "AuthTimeout",
+ "AssocReject",
+ "AssocTimeout",
+ IGNLABEL("ReasonOutsideTable"),
+ IGNLABEL("ReasonStatus1"),
+ IGNLABEL("ReasonStatus2"),
+ IGNLABEL("ReasonStatus3"),
+ IGNLABEL("ReasonStatus4"),
+ IGNLABEL("ReasonStatus5"),
+ IGNLABEL("ReasonStatus6"),
+ IGNLABEL("ReasonStatus7"),
+ IGNLABEL("ReasonStatus8"),
+ IGNLABEL("ReasonStatus9"),
+ IGNLABEL("ReasonStatus10"),
+ IGNLABEL("ReasonStatus11"),
+ IGNLABEL("ReasonStatus12"),
+ IGNLABEL("ReasonStatus13"),
+ IGNLABEL("ReasonStatus14"),
+ IGNLABEL("ReasonStatus15"),
+ IGNLABEL("ReasonStatus16"),
+ IGNLABEL("ReasonStatus17"),
+ IGNLABEL("ReasonStatus18"),
+ IGNLABEL("ReasonStatus19"),
+ "RxMan",
+ "TxMan",
+ "RxRefresh",
+ "TxRefresh",
+ "RxPoll",
+ "TxPoll",
+ "HostRetries",
+ "LostSync-HostReq",
+ "HostTxBytes",
+ "HostRxBytes",
+ "ElapsedUsec",
+ "ElapsedSec",
+ "LostSyncBetterAP",
+ "PrivacyMismatch",
+ "Jammed",
+ "DiscRxNotWepped",
+ "PhyEleMismatch",
+ (char*)-1 };
+#ifndef RUN_AT
+#define RUN_AT(x) (jiffies+(x))
+#endif
+
+
+/* These variables are for insmod, since it seems that the rates
+ can only be set in setup_card. Rates should be a comma separated
+ (no spaces) list of rates (up to 8). */
+
+static int rates[8];
+static int basic_rate;
+static char *ssids[3];
+
+static int io[4];
+static int irq[4];
+
+static
+int maxencrypt /* = 0 */; /* The highest rate that the card can encrypt at.
+ 0 means no limit. For old cards this was 4 */
+
+static int auto_wep /* = 0 */; /* If set, it tries to figure out the wep mode */
+static int aux_bap /* = 0 */; /* Checks to see if the aux ports are needed to read
+ the bap, needed on some older cards and buses. */
+static int adhoc;
+
+static int probe = 1;
+
+static int proc_uid /* = 0 */;
+
+static int proc_gid /* = 0 */;
+
+static int airo_perm = 0555;
+
+static int proc_perm = 0644;
+
+MODULE_AUTHOR("Benjamin Reed");
+MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet \
+ cards. Direct support for ISA/PCI/MPI cards and support \
+ for PCMCIA when used with airo_cs.");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350");
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param(basic_rate, int, 0);
+module_param_array(rates, int, NULL, 0);
+module_param_array(ssids, charp, NULL, 0);
+module_param(auto_wep, int, 0);
+MODULE_PARM_DESC(auto_wep, "If non-zero, the driver will keep looping through \
+the authentication options until an association is made. The value of \
+auto_wep is number of the wep keys to check. A value of 2 will try using \
+the key at index 0 and index 1.");
+module_param(aux_bap, int, 0);
+MODULE_PARM_DESC(aux_bap, "If non-zero, the driver will switch into a mode \
+than seems to work better for older cards with some older buses. Before \
+switching it checks that the switch is needed.");
+module_param(maxencrypt, int, 0);
+MODULE_PARM_DESC(maxencrypt, "The maximum speed that the card can do \
+encryption. Units are in 512kbs. Zero (default) means there is no limit. \
+Older cards used to be limited to 2mbs (4).");
+module_param(adhoc, int, 0);
+MODULE_PARM_DESC(adhoc, "If non-zero, the card will start in adhoc mode.");
+module_param(probe, int, 0);
+MODULE_PARM_DESC(probe, "If zero, the driver won't start the card.");
+
+module_param(proc_uid, int, 0);
+MODULE_PARM_DESC(proc_uid, "The uid that the /proc files will belong to.");
+module_param(proc_gid, int, 0);
+MODULE_PARM_DESC(proc_gid, "The gid that the /proc files will belong to.");
+module_param(airo_perm, int, 0);
+MODULE_PARM_DESC(airo_perm, "The permission bits of /proc/[driver/]aironet.");
+module_param(proc_perm, int, 0);
+MODULE_PARM_DESC(proc_perm, "The permission bits of the files in /proc");
+
+/* This is a kind of sloppy hack to get this information to OUT4500 and
+ IN4500. I would be extremely interested in the situation where this
+ doesn't work though!!! */
+static int do8bitIO = 0;
+
+/* Return codes */
+#define SUCCESS 0
+#define ERROR -1
+#define NO_PACKET -2
+
+/* Commands */
+#define NOP2 0x0000
+#define MAC_ENABLE 0x0001
+#define MAC_DISABLE 0x0002
+#define CMD_LOSE_SYNC 0x0003 /* Not sure what this does... */
+#define CMD_SOFTRESET 0x0004
+#define HOSTSLEEP 0x0005
+#define CMD_MAGIC_PKT 0x0006
+#define CMD_SETWAKEMASK 0x0007
+#define CMD_READCFG 0x0008
+#define CMD_SETMODE 0x0009
+#define CMD_ALLOCATETX 0x000a
+#define CMD_TRANSMIT 0x000b
+#define CMD_DEALLOCATETX 0x000c
+#define NOP 0x0010
+#define CMD_WORKAROUND 0x0011
+#define CMD_ALLOCATEAUX 0x0020
+#define CMD_ACCESS 0x0021
+#define CMD_PCIBAP 0x0022
+#define CMD_PCIAUX 0x0023
+#define CMD_ALLOCBUF 0x0028
+#define CMD_GETTLV 0x0029
+#define CMD_PUTTLV 0x002a
+#define CMD_DELTLV 0x002b
+#define CMD_FINDNEXTTLV 0x002c
+#define CMD_PSPNODES 0x0030
+#define CMD_SETCW 0x0031
+#define CMD_SETPCF 0x0032
+#define CMD_SETPHYREG 0x003e
+#define CMD_TXTEST 0x003f
+#define MAC_ENABLETX 0x0101
+#define CMD_LISTBSS 0x0103
+#define CMD_SAVECFG 0x0108
+#define CMD_ENABLEAUX 0x0111
+#define CMD_WRITERID 0x0121
+#define CMD_USEPSPNODES 0x0130
+#define MAC_ENABLERX 0x0201
+
+/* Command errors */
+#define ERROR_QUALIF 0x00
+#define ERROR_ILLCMD 0x01
+#define ERROR_ILLFMT 0x02
+#define ERROR_INVFID 0x03
+#define ERROR_INVRID 0x04
+#define ERROR_LARGE 0x05
+#define ERROR_NDISABL 0x06
+#define ERROR_ALLOCBSY 0x07
+#define ERROR_NORD 0x0B
+#define ERROR_NOWR 0x0C
+#define ERROR_INVFIDTX 0x0D
+#define ERROR_TESTACT 0x0E
+#define ERROR_TAGNFND 0x12
+#define ERROR_DECODE 0x20
+#define ERROR_DESCUNAV 0x21
+#define ERROR_BADLEN 0x22
+#define ERROR_MODE 0x80
+#define ERROR_HOP 0x81
+#define ERROR_BINTER 0x82
+#define ERROR_RXMODE 0x83
+#define ERROR_MACADDR 0x84
+#define ERROR_RATES 0x85
+#define ERROR_ORDER 0x86
+#define ERROR_SCAN 0x87
+#define ERROR_AUTH 0x88
+#define ERROR_PSMODE 0x89
+#define ERROR_RTYPE 0x8A
+#define ERROR_DIVER 0x8B
+#define ERROR_SSID 0x8C
+#define ERROR_APLIST 0x8D
+#define ERROR_AUTOWAKE 0x8E
+#define ERROR_LEAP 0x8F
+
+/* Registers */
+#define COMMAND 0x00
+#define PARAM0 0x02
+#define PARAM1 0x04
+#define PARAM2 0x06
+#define STATUS 0x08
+#define RESP0 0x0a
+#define RESP1 0x0c
+#define RESP2 0x0e
+#define LINKSTAT 0x10
+#define SELECT0 0x18
+#define OFFSET0 0x1c
+#define RXFID 0x20
+#define TXALLOCFID 0x22
+#define TXCOMPLFID 0x24
+#define DATA0 0x36
+#define EVSTAT 0x30
+#define EVINTEN 0x32
+#define EVACK 0x34
+#define SWS0 0x28
+#define SWS1 0x2a
+#define SWS2 0x2c
+#define SWS3 0x2e
+#define AUXPAGE 0x3A
+#define AUXOFF 0x3C
+#define AUXDATA 0x3E
+
+#define FID_TX 1
+#define FID_RX 2
+/* Offset into aux memory for descriptors */
+#define AUX_OFFSET 0x800
+/* Size of allocated packets */
+#define PKTSIZE 1840
+#define RIDSIZE 2048
+/* Size of the transmit queue */
+#define MAXTXQ 64
+
+/* BAP selectors */
+#define BAP0 0 // Used for receiving packets
+#define BAP1 2 // Used for xmiting packets and working with RIDS
+
+/* Flags */
+#define COMMAND_BUSY 0x8000
+
+#define BAP_BUSY 0x8000
+#define BAP_ERR 0x4000
+#define BAP_DONE 0x2000
+
+#define PROMISC 0xffff
+#define NOPROMISC 0x0000
+
+#define EV_CMD 0x10
+#define EV_CLEARCOMMANDBUSY 0x4000
+#define EV_RX 0x01
+#define EV_TX 0x02
+#define EV_TXEXC 0x04
+#define EV_ALLOC 0x08
+#define EV_LINK 0x80
+#define EV_AWAKE 0x100
+#define EV_TXCPY 0x400
+#define EV_UNKNOWN 0x800
+#define EV_MIC 0x1000 /* Message Integrity Check Interrupt */
+#define EV_AWAKEN 0x2000
+#define STATUS_INTS (EV_AWAKE|EV_LINK|EV_TXEXC|EV_TX|EV_TXCPY|EV_RX|EV_MIC)
+
+#ifdef CHECK_UNKNOWN_INTS
+#define IGNORE_INTS ( EV_CMD | EV_UNKNOWN)
+#else
+#define IGNORE_INTS (~STATUS_INTS)
+#endif
+
+/* RID TYPES */
+#define RID_RW 0x20
+
+/* The RIDs */
+#define RID_CAPABILITIES 0xFF00
+#define RID_APINFO 0xFF01
+#define RID_RADIOINFO 0xFF02
+#define RID_UNKNOWN3 0xFF03
+#define RID_RSSI 0xFF04
+#define RID_CONFIG 0xFF10
+#define RID_SSID 0xFF11
+#define RID_APLIST 0xFF12
+#define RID_DRVNAME 0xFF13
+#define RID_ETHERENCAP 0xFF14
+#define RID_WEP_TEMP 0xFF15
+#define RID_WEP_PERM 0xFF16
+#define RID_MODULATION 0xFF17
+#define RID_OPTIONS 0xFF18
+#define RID_ACTUALCONFIG 0xFF20 /*readonly*/
+#define RID_FACTORYCONFIG 0xFF21
+#define RID_UNKNOWN22 0xFF22
+#define RID_LEAPUSERNAME 0xFF23
+#define RID_LEAPPASSWORD 0xFF24
+#define RID_STATUS 0xFF50
+#define RID_BEACON_HST 0xFF51
+#define RID_BUSY_HST 0xFF52
+#define RID_RETRIES_HST 0xFF53
+#define RID_UNKNOWN54 0xFF54
+#define RID_UNKNOWN55 0xFF55
+#define RID_UNKNOWN56 0xFF56
+#define RID_MIC 0xFF57
+#define RID_STATS16 0xFF60
+#define RID_STATS16DELTA 0xFF61
+#define RID_STATS16DELTACLEAR 0xFF62
+#define RID_STATS 0xFF68
+#define RID_STATSDELTA 0xFF69
+#define RID_STATSDELTACLEAR 0xFF6A
+#define RID_ECHOTEST_RID 0xFF70
+#define RID_ECHOTEST_RESULTS 0xFF71
+#define RID_BSSLISTFIRST 0xFF72
+#define RID_BSSLISTNEXT 0xFF73
+
+typedef struct {
+ u16 cmd;
+ u16 parm0;
+ u16 parm1;
+ u16 parm2;
+} Cmd;
+
+typedef struct {
+ u16 status;
+ u16 rsp0;
+ u16 rsp1;
+ u16 rsp2;
+} Resp;
+
+/*
+ * Rids and endian-ness: The Rids will always be in cpu endian, since
+ * this all the patches from the big-endian guys end up doing that.
+ * so all rid access should use the read/writeXXXRid routines.
+ */
+
+/* This is redundant for x86 archs, but it seems necessary for ARM */
+#pragma pack(1)
+
+/* This structure came from an email sent to me from an engineer at
+ aironet for inclusion into this driver */
+typedef struct {
+ u16 len;
+ u16 kindex;
+ u8 mac[ETH_ALEN];
+ u16 klen;
+ u8 key[16];
+} WepKeyRid;
+
+/* These structures are from the Aironet's PC4500 Developers Manual */
+typedef struct {
+ u16 len;
+ u8 ssid[32];
+} Ssid;
+
+typedef struct {
+ u16 len;
+ Ssid ssids[3];
+} SsidRid;
+
+typedef struct {
+ u16 len;
+ u16 modulation;
+#define MOD_DEFAULT 0
+#define MOD_CCK 1
+#define MOD_MOK 2
+} ModulationRid;
+
+typedef struct {
+ u16 len; /* sizeof(ConfigRid) */
+ u16 opmode; /* operating mode */
+#define MODE_STA_IBSS 0
+#define MODE_STA_ESS 1
+#define MODE_AP 2
+#define MODE_AP_RPTR 3
+#define MODE_ETHERNET_HOST (0<<8) /* rx payloads converted */
+#define MODE_LLC_HOST (1<<8) /* rx payloads left as is */
+#define MODE_AIRONET_EXTEND (1<<9) /* enable Aironet extenstions */
+#define MODE_AP_INTERFACE (1<<10) /* enable ap interface extensions */
+#define MODE_ANTENNA_ALIGN (1<<11) /* enable antenna alignment */
+#define MODE_ETHER_LLC (1<<12) /* enable ethernet LLC */
+#define MODE_LEAF_NODE (1<<13) /* enable leaf node bridge */
+#define MODE_CF_POLLABLE (1<<14) /* enable CF pollable */
+#define MODE_MIC (1<<15) /* enable MIC */
+ u16 rmode; /* receive mode */
+#define RXMODE_BC_MC_ADDR 0
+#define RXMODE_BC_ADDR 1 /* ignore multicasts */
+#define RXMODE_ADDR 2 /* ignore multicast and broadcast */
+#define RXMODE_RFMON 3 /* wireless monitor mode */
+#define RXMODE_RFMON_ANYBSS 4
+#define RXMODE_LANMON 5 /* lan style monitor -- data packets only */
+#define RXMODE_DISABLE_802_3_HEADER (1<<8) /* disables 802.3 header on rx */
+#define RXMODE_NORMALIZED_RSSI (1<<9) /* return normalized RSSI */
+ u16 fragThresh;
+ u16 rtsThres;
+ u8 macAddr[ETH_ALEN];
+ u8 rates[8];
+ u16 shortRetryLimit;
+ u16 longRetryLimit;
+ u16 txLifetime; /* in kusec */
+ u16 rxLifetime; /* in kusec */
+ u16 stationary;
+ u16 ordering;
+ u16 u16deviceType; /* for overriding device type */
+ u16 cfpRate;
+ u16 cfpDuration;
+ u16 _reserved1[3];
+ /*---------- Scanning/Associating ----------*/
+ u16 scanMode;
+#define SCANMODE_ACTIVE 0
+#define SCANMODE_PASSIVE 1
+#define SCANMODE_AIROSCAN 2
+ u16 probeDelay; /* in kusec */
+ u16 probeEnergyTimeout; /* in kusec */
+ u16 probeResponseTimeout;
+ u16 beaconListenTimeout;
+ u16 joinNetTimeout;
+ u16 authTimeout;
+ u16 authType;
+#define AUTH_OPEN 0x1
+#define AUTH_ENCRYPT 0x101
+#define AUTH_SHAREDKEY 0x102
+#define AUTH_ALLOW_UNENCRYPTED 0x200
+ u16 associationTimeout;
+ u16 specifiedApTimeout;
+ u16 offlineScanInterval;
+ u16 offlineScanDuration;
+ u16 linkLossDelay;
+ u16 maxBeaconLostTime;
+ u16 refreshInterval;
+#define DISABLE_REFRESH 0xFFFF
+ u16 _reserved1a[1];
+ /*---------- Power save operation ----------*/
+ u16 powerSaveMode;
+#define POWERSAVE_CAM 0
+#define POWERSAVE_PSP 1
+#define POWERSAVE_PSPCAM 2
+ u16 sleepForDtims;
+ u16 listenInterval;
+ u16 fastListenInterval;
+ u16 listenDecay;
+ u16 fastListenDelay;
+ u16 _reserved2[2];
+ /*---------- Ap/Ibss config items ----------*/
+ u16 beaconPeriod;
+ u16 atimDuration;
+ u16 hopPeriod;
+ u16 channelSet;
+ u16 channel;
+ u16 dtimPeriod;
+ u16 bridgeDistance;
+ u16 radioID;
+ /*---------- Radio configuration ----------*/
+ u16 radioType;
+#define RADIOTYPE_DEFAULT 0
+#define RADIOTYPE_802_11 1
+#define RADIOTYPE_LEGACY 2
+ u8 rxDiversity;
+ u8 txDiversity;
+ u16 txPower;
+#define TXPOWER_DEFAULT 0
+ u16 rssiThreshold;
+#define RSSI_DEFAULT 0
+ u16 modulation;
+#define PREAMBLE_AUTO 0
+#define PREAMBLE_LONG 1
+#define PREAMBLE_SHORT 2
+ u16 preamble;
+ u16 homeProduct;
+ u16 radioSpecific;
+ /*---------- Aironet Extensions ----------*/
+ u8 nodeName[16];
+ u16 arlThreshold;
+ u16 arlDecay;
+ u16 arlDelay;
+ u16 _reserved4[1];
+ /*---------- Aironet Extensions ----------*/
+ u8 magicAction;
+#define MAGIC_ACTION_STSCHG 1
+#define MAGIC_ACTION_RESUME 2
+#define MAGIC_IGNORE_MCAST (1<<8)
+#define MAGIC_IGNORE_BCAST (1<<9)
+#define MAGIC_SWITCH_TO_PSP (0<<10)
+#define MAGIC_STAY_IN_CAM (1<<10)
+ u8 magicControl;
+ u16 autoWake;
+} ConfigRid;
+
+typedef struct {
+ u16 len;
+ u8 mac[ETH_ALEN];
+ u16 mode;
+ u16 errorCode;
+ u16 sigQuality;
+ u16 SSIDlen;
+ char SSID[32];
+ char apName[16];
+ u8 bssid[4][ETH_ALEN];
+ u16 beaconPeriod;
+ u16 dimPeriod;
+ u16 atimDuration;
+ u16 hopPeriod;
+ u16 channelSet;
+ u16 channel;
+ u16 hopsToBackbone;
+ u16 apTotalLoad;
+ u16 generatedLoad;
+ u16 accumulatedArl;
+ u16 signalQuality;
+ u16 currentXmitRate;
+ u16 apDevExtensions;
+ u16 normalizedSignalStrength;
+ u16 shortPreamble;
+ u8 apIP[4];
+ u8 noisePercent; /* Noise percent in last second */
+ u8 noisedBm; /* Noise dBm in last second */
+ u8 noiseAvePercent; /* Noise percent in last minute */
+ u8 noiseAvedBm; /* Noise dBm in last minute */
+ u8 noiseMaxPercent; /* Highest noise percent in last minute */
+ u8 noiseMaxdBm; /* Highest noise dbm in last minute */
+ u16 load;
+ u8 carrier[4];
+ u16 assocStatus;
+#define STAT_NOPACKETS 0
+#define STAT_NOCARRIERSET 10
+#define STAT_GOTCARRIERSET 11
+#define STAT_WRONGSSID 20
+#define STAT_BADCHANNEL 25
+#define STAT_BADBITRATES 30
+#define STAT_BADPRIVACY 35
+#define STAT_APFOUND 40
+#define STAT_APREJECTED 50
+#define STAT_AUTHENTICATING 60
+#define STAT_DEAUTHENTICATED 61
+#define STAT_AUTHTIMEOUT 62
+#define STAT_ASSOCIATING 70
+#define STAT_DEASSOCIATED 71
+#define STAT_ASSOCTIMEOUT 72
+#define STAT_NOTAIROAP 73
+#define STAT_ASSOCIATED 80
+#define STAT_LEAPING 90
+#define STAT_LEAPFAILED 91
+#define STAT_LEAPTIMEDOUT 92
+#define STAT_LEAPCOMPLETE 93
+} StatusRid;
+
+typedef struct {
+ u16 len;
+ u16 spacer;
+ u32 vals[100];
+} StatsRid;
+
+
+typedef struct {
+ u16 len;
+ u8 ap[4][ETH_ALEN];
+} APListRid;
+
+typedef struct {
+ u16 len;
+ char oui[3];
+ char zero;
+ u16 prodNum;
+ char manName[32];
+ char prodName[16];
+ char prodVer[8];
+ char factoryAddr[ETH_ALEN];
+ char aironetAddr[ETH_ALEN];
+ u16 radioType;
+ u16 country;
+ char callid[ETH_ALEN];
+ char supportedRates[8];
+ char rxDiversity;
+ char txDiversity;
+ u16 txPowerLevels[8];
+ u16 hardVer;
+ u16 hardCap;
+ u16 tempRange;
+ u16 softVer;
+ u16 softSubVer;
+ u16 interfaceVer;
+ u16 softCap;
+ u16 bootBlockVer;
+ u16 requiredHard;
+ u16 extSoftCap;
+} CapabilityRid;
+
+typedef struct {
+ u16 len;
+ u16 index; /* First is 0 and 0xffff means end of list */
+#define RADIO_FH 1 /* Frequency hopping radio type */
+#define RADIO_DS 2 /* Direct sequence radio type */
+#define RADIO_TMA 4 /* Proprietary radio used in old cards (2500) */
+ u16 radioType;
+ u8 bssid[ETH_ALEN]; /* Mac address of the BSS */
+ u8 zero;
+ u8 ssidLen;
+ u8 ssid[32];
+ u16 rssi;
+#define CAP_ESS (1<<0)
+#define CAP_IBSS (1<<1)
+#define CAP_PRIVACY (1<<4)
+#define CAP_SHORTHDR (1<<5)
+ u16 cap;
+ u16 beaconInterval;
+ u8 rates[8]; /* Same as rates for config rid */
+ struct { /* For frequency hopping only */
+ u16 dwell;
+ u8 hopSet;
+ u8 hopPattern;
+ u8 hopIndex;
+ u8 fill;
+ } fh;
+ u16 dsChannel;
+ u16 atimWindow;
+} BSSListRid;
+
+typedef struct {
+ u8 rssipct;
+ u8 rssidBm;
+} tdsRssiEntry;
+
+typedef struct {
+ u16 len;
+ tdsRssiEntry x[256];
+} tdsRssiRid;
+
+typedef struct {
+ u16 len;
+ u16 state;
+ u16 multicastValid;
+ u8 multicast[16];
+ u16 unicastValid;
+ u8 unicast[16];
+} MICRid;
+
+typedef struct {
+ u16 typelen;
+
+ union {
+ u8 snap[8];
+ struct {
+ u8 dsap;
+ u8 ssap;
+ u8 control;
+ u8 orgcode[3];
+ u8 fieldtype[2];
+ } llc;
+ } u;
+ u32 mic;
+ u32 seq;
+} MICBuffer;
+
+typedef struct {
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+} etherHead;
+
+#pragma pack()
+
+#define TXCTL_TXOK (1<<1) /* report if tx is ok */
+#define TXCTL_TXEX (1<<2) /* report if tx fails */
+#define TXCTL_802_3 (0<<3) /* 802.3 packet */
+#define TXCTL_802_11 (1<<3) /* 802.11 mac packet */
+#define TXCTL_ETHERNET (0<<4) /* payload has ethertype */
+#define TXCTL_LLC (1<<4) /* payload is llc */
+#define TXCTL_RELEASE (0<<5) /* release after completion */
+#define TXCTL_NORELEASE (1<<5) /* on completion returns to host */
+
+#define BUSY_FID 0x10000
+
+#ifdef CISCO_EXT
+#define AIROMAGIC 0xa55a
+/* Warning : SIOCDEVPRIVATE may disapear during 2.5.X - Jean II */
+#ifdef SIOCIWFIRSTPRIV
+#ifdef SIOCDEVPRIVATE
+#define AIROOLDIOCTL SIOCDEVPRIVATE
+#define AIROOLDIDIFC AIROOLDIOCTL + 1
+#endif /* SIOCDEVPRIVATE */
+#else /* SIOCIWFIRSTPRIV */
+#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
+#endif /* SIOCIWFIRSTPRIV */
+/* This may be wrong. When using the new SIOCIWFIRSTPRIV range, we probably
+ * should use only "GET" ioctls (last bit set to 1). "SET" ioctls are root
+ * only and don't return the modified struct ifreq to the application which
+ * is usually a problem. - Jean II */
+#define AIROIOCTL SIOCIWFIRSTPRIV
+#define AIROIDIFC AIROIOCTL + 1
+
+/* Ioctl constants to be used in airo_ioctl.command */
+
+#define AIROGCAP 0 // Capability rid
+#define AIROGCFG 1 // USED A LOT
+#define AIROGSLIST 2 // System ID list
+#define AIROGVLIST 3 // List of specified AP's
+#define AIROGDRVNAM 4 // NOTUSED
+#define AIROGEHTENC 5 // NOTUSED
+#define AIROGWEPKTMP 6
+#define AIROGWEPKNV 7
+#define AIROGSTAT 8
+#define AIROGSTATSC32 9
+#define AIROGSTATSD32 10
+#define AIROGMICRID 11
+#define AIROGMICSTATS 12
+#define AIROGFLAGS 13
+#define AIROGID 14
+#define AIRORRID 15
+#define AIRORSWVERSION 17
+
+/* Leave gap of 40 commands after AIROGSTATSD32 for future */
+
+#define AIROPCAP AIROGSTATSD32 + 40
+#define AIROPVLIST AIROPCAP + 1
+#define AIROPSLIST AIROPVLIST + 1
+#define AIROPCFG AIROPSLIST + 1
+#define AIROPSIDS AIROPCFG + 1
+#define AIROPAPLIST AIROPSIDS + 1
+#define AIROPMACON AIROPAPLIST + 1 /* Enable mac */
+#define AIROPMACOFF AIROPMACON + 1 /* Disable mac */
+#define AIROPSTCLR AIROPMACOFF + 1
+#define AIROPWEPKEY AIROPSTCLR + 1
+#define AIROPWEPKEYNV AIROPWEPKEY + 1
+#define AIROPLEAPPWD AIROPWEPKEYNV + 1
+#define AIROPLEAPUSR AIROPLEAPPWD + 1
+
+/* Flash codes */
+
+#define AIROFLSHRST AIROPWEPKEYNV + 40
+#define AIROFLSHGCHR AIROFLSHRST + 1
+#define AIROFLSHSTFL AIROFLSHGCHR + 1
+#define AIROFLSHPCHR AIROFLSHSTFL + 1
+#define AIROFLPUTBUF AIROFLSHPCHR + 1
+#define AIRORESTART AIROFLPUTBUF + 1
+
+#define FLASHSIZE 32768
+#define AUXMEMSIZE (256 * 1024)
+
+typedef struct aironet_ioctl {
+ unsigned short command; // What to do
+ unsigned short len; // Len of data
+ unsigned short ridnum; // rid number
+ unsigned char __user *data; // d-data
+} aironet_ioctl;
+
+static char *swversion = "2.1";
+#endif /* CISCO_EXT */
+
+#define NUM_MODULES 2
+#define MIC_MSGLEN_MAX 2400
+#define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX
+
+typedef struct {
+ u32 size; // size
+ u8 enabled; // MIC enabled or not
+ u32 rxSuccess; // successful packets received
+ u32 rxIncorrectMIC; // pkts dropped due to incorrect MIC comparison
+ u32 rxNotMICed; // pkts dropped due to not being MIC'd
+ u32 rxMICPlummed; // pkts dropped due to not having a MIC plummed
+ u32 rxWrongSequence; // pkts dropped due to sequence number violation
+ u32 reserve[32];
+} mic_statistics;
+
+typedef struct {
+ u32 coeff[((EMMH32_MSGLEN_MAX)+3)>>2];
+ u64 accum; // accumulated mic, reduced to u32 in final()
+ int position; // current position (byte offset) in message
+ union {
+ u8 d8[4];
+ u32 d32;
+ } part; // saves partial message word across update() calls
+} emmh32_context;
+
+typedef struct {
+ emmh32_context seed; // Context - the seed
+ u32 rx; // Received sequence number
+ u32 tx; // Tx sequence number
+ u32 window; // Start of window
+ u8 valid; // Flag to say if context is valid or not
+ u8 key[16];
+} miccntx;
+
+typedef struct {
+ miccntx mCtx; // Multicast context
+ miccntx uCtx; // Unicast context
+} mic_module;
+
+typedef struct {
+ unsigned int rid: 16;
+ unsigned int len: 15;
+ unsigned int valid: 1;
+ dma_addr_t host_addr;
+} Rid;
+
+typedef struct {
+ unsigned int offset: 15;
+ unsigned int eoc: 1;
+ unsigned int len: 15;
+ unsigned int valid: 1;
+ dma_addr_t host_addr;
+} TxFid;
+
+typedef struct {
+ unsigned int ctl: 15;
+ unsigned int rdy: 1;
+ unsigned int len: 15;
+ unsigned int valid: 1;
+ dma_addr_t host_addr;
+} RxFid;
+
+/*
+ * Host receive descriptor
+ */
+typedef struct {
+ unsigned char __iomem *card_ram_off; /* offset into card memory of the
+ desc */
+ RxFid rx_desc; /* card receive descriptor */
+ char *virtual_host_addr; /* virtual address of host receive
+ buffer */
+ int pending;
+} HostRxDesc;
+
+/*
+ * Host transmit descriptor
+ */
+typedef struct {
+ unsigned char __iomem *card_ram_off; /* offset into card memory of the
+ desc */
+ TxFid tx_desc; /* card transmit descriptor */
+ char *virtual_host_addr; /* virtual address of host receive
+ buffer */
+ int pending;
+} HostTxDesc;
+
+/*
+ * Host RID descriptor
+ */
+typedef struct {
+ unsigned char __iomem *card_ram_off; /* offset into card memory of the
+ descriptor */
+ Rid rid_desc; /* card RID descriptor */
+ char *virtual_host_addr; /* virtual address of host receive
+ buffer */
+} HostRidDesc;
+
+typedef struct {
+ u16 sw0;
+ u16 sw1;
+ u16 status;
+ u16 len;
+#define HOST_SET (1 << 0)
+#define HOST_INT_TX (1 << 1) /* Interrupt on successful TX */
+#define HOST_INT_TXERR (1 << 2) /* Interrupt on unseccessful TX */
+#define HOST_LCC_PAYLOAD (1 << 4) /* LLC payload, 0 = Ethertype */
+#define HOST_DONT_RLSE (1 << 5) /* Don't release buffer when done */
+#define HOST_DONT_RETRY (1 << 6) /* Don't retry trasmit */
+#define HOST_CLR_AID (1 << 7) /* clear AID failure */
+#define HOST_RTS (1 << 9) /* Force RTS use */
+#define HOST_SHORT (1 << 10) /* Do short preamble */
+ u16 ctl;
+ u16 aid;
+ u16 retries;
+ u16 fill;
+} TxCtlHdr;
+
+typedef struct {
+ u16 ctl;
+ u16 duration;
+ char addr1[6];
+ char addr2[6];
+ char addr3[6];
+ u16 seq;
+ char addr4[6];
+} WifiHdr;
+
+
+typedef struct {
+ TxCtlHdr ctlhdr;
+ u16 fill1;
+ u16 fill2;
+ WifiHdr wifihdr;
+ u16 gaplen;
+ u16 status;
+} WifiCtlHdr;
+
+WifiCtlHdr wifictlhdr8023 = {
+ .ctlhdr = {
+ .ctl = HOST_DONT_RLSE,
+ }
+};
+
+#ifdef WIRELESS_EXT
+// Frequency list (map channels to frequencies)
+static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+
+// A few details needed for WEP (Wireless Equivalent Privacy)
+#define MAX_KEY_SIZE 13 // 128 (?) bits
+#define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP
+typedef struct wep_key_t {
+ u16 len;
+ u8 key[16]; /* 40-bit and 104-bit keys */
+} wep_key_t;
+
+/* Backward compatibility */
+#ifndef IW_ENCODE_NOKEY
+#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
+#define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN)
+#endif /* IW_ENCODE_NOKEY */
+
+/* List of Wireless Handlers (new API) */
+static const struct iw_handler_def airo_handler_def;
+#endif /* WIRELESS_EXT */
+
+static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
+
+struct airo_info;
+
+static int get_dec_u16( char *buffer, int *start, int limit );
+static void OUT4500( struct airo_info *, u16 register, u16 value );
+static unsigned short IN4500( struct airo_info *, u16 register );
+static u16 setup_card(struct airo_info*, u8 *mac, int lock);
+static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock );
+static void disable_MAC(struct airo_info *ai, int lock);
+static void enable_interrupts(struct airo_info*);
+static void disable_interrupts(struct airo_info*);
+static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp);
+static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap);
+static int aux_bap_read(struct airo_info*, u16 *pu16Dst, int bytelen,
+ int whichbap);
+static int fast_bap_read(struct airo_info*, u16 *pu16Dst, int bytelen,
+ int whichbap);
+static int bap_write(struct airo_info*, const u16 *pu16Src, int bytelen,
+ int whichbap);
+static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd);
+static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len, int lock);
+static int PC4500_writerid(struct airo_info*, u16 rid, const void
+ *pBuf, int len, int lock);
+static int do_writerid( struct airo_info*, u16 rid, const void *rid_data,
+ int len, int dummy );
+static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw);
+static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket);
+static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket);
+
+static int mpi_send_packet (struct net_device *dev);
+static void mpi_unmap_card(struct pci_dev *pci);
+static void mpi_receive_802_3(struct airo_info *ai);
+static void mpi_receive_802_11(struct airo_info *ai);
+static int waitbusy (struct airo_info *ai);
+
+static irqreturn_t airo_interrupt( int irq, void* dev_id, struct pt_regs
+ *regs);
+static int airo_thread(void *data);
+static void timer_func( struct net_device *dev );
+static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#ifdef WIRELESS_EXT
+struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
+static void airo_read_wireless_stats (struct airo_info *local);
+#endif /* WIRELESS_EXT */
+#ifdef CISCO_EXT
+static int readrids(struct net_device *dev, aironet_ioctl *comp);
+static int writerids(struct net_device *dev, aironet_ioctl *comp);
+int flashcard(struct net_device *dev, aironet_ioctl *comp);
+#endif /* CISCO_EXT */
+#ifdef MICSUPPORT
+static void micinit(struct airo_info *ai);
+static int micsetup(struct airo_info *ai);
+static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len);
+static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen);
+
+#include <linux/crypto.h>
+#endif
+
+struct airo_info {
+ struct net_device_stats stats;
+ struct net_device *dev;
+ /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we
+ use the high bit to mark whether it is in use. */
+#define MAX_FIDS 6
+#define MPI_MAX_FIDS 1
+ int fids[MAX_FIDS];
+ ConfigRid config;
+ char keyindex; // Used with auto wep
+ char defindex; // Used with auto wep
+ struct proc_dir_entry *proc_entry;
+ spinlock_t aux_lock;
+ unsigned long flags;
+#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
+#define FLAG_RADIO_OFF 0 /* User disabling of MAC */
+#define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */
+#define FLAG_RADIO_MASK 0x03
+#define FLAG_ENABLED 2
+#define FLAG_ADHOC 3 /* Needed by MIC */
+#define FLAG_MIC_CAPABLE 4
+#define FLAG_UPDATE_MULTI 5
+#define FLAG_UPDATE_UNI 6
+#define FLAG_802_11 7
+#define FLAG_PENDING_XMIT 9
+#define FLAG_PENDING_XMIT11 10
+#define FLAG_MPI 11
+#define FLAG_REGISTERED 12
+#define FLAG_COMMIT 13
+#define FLAG_RESET 14
+#define FLAG_FLASHING 15
+#define JOB_MASK 0x1ff0000
+#define JOB_DIE 16
+#define JOB_XMIT 17
+#define JOB_XMIT11 18
+#define JOB_STATS 19
+#define JOB_PROMISC 20
+#define JOB_MIC 21
+#define JOB_EVENT 22
+#define JOB_AUTOWEP 23
+#define JOB_WSTATS 24
+ int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen,
+ int whichbap);
+ unsigned short *flash;
+ tdsRssiEntry *rssi;
+ struct task_struct *task;
+ struct semaphore sem;
+ pid_t thr_pid;
+ wait_queue_head_t thr_wait;
+ struct completion thr_exited;
+ unsigned long expires;
+ struct {
+ struct sk_buff *skb;
+ int fid;
+ } xmit, xmit11;
+ struct net_device *wifidev;
+#ifdef WIRELESS_EXT
+ struct iw_statistics wstats; // wireless stats
+ unsigned long scan_timestamp; /* Time started to scan */
+ struct iw_spy_data spy_data;
+ struct iw_public_data wireless_data;
+#endif /* WIRELESS_EXT */
+#ifdef MICSUPPORT
+ /* MIC stuff */
+ struct crypto_tfm *tfm;
+ mic_module mod[2];
+ mic_statistics micstats;
+#endif
+ HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
+ HostTxDesc txfids[MPI_MAX_FIDS];
+ HostRidDesc config_desc;
+ unsigned long ridbus; // phys addr of config_desc
+ struct sk_buff_head txq;// tx queue used by mpi350 code
+ struct pci_dev *pci;
+ unsigned char __iomem *pcimem;
+ unsigned char __iomem *pciaux;
+ unsigned char *shared;
+ dma_addr_t shared_dma;
+ int power;
+ SsidRid *SSID;
+ APListRid *APList;
+#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
+ char proc_name[IFNAMSIZ];
+};
+
+static inline int bap_read(struct airo_info *ai, u16 *pu16Dst, int bytelen,
+ int whichbap) {
+ return ai->bap_read(ai, pu16Dst, bytelen, whichbap);
+}
+
+static int setup_proc_entry( struct net_device *dev,
+ struct airo_info *apriv );
+static int takedown_proc_entry( struct net_device *dev,
+ struct airo_info *apriv );
+
+#ifdef MICSUPPORT
+/***********************************************************************
+ * MIC ROUTINES *
+ ***********************************************************************
+ */
+
+static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
+static void MoveWindow(miccntx *context, u32 micSeq);
+void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *);
+void emmh32_init(emmh32_context *context);
+void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
+void emmh32_final(emmh32_context *context, u8 digest[4]);
+
+/* micinit - Initialize mic seed */
+
+static void micinit(struct airo_info *ai)
+{
+ MICRid mic_rid;
+
+ clear_bit(JOB_MIC, &ai->flags);
+ PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0);
+ up(&ai->sem);
+
+ ai->micstats.enabled = (mic_rid.state & 0x00FF) ? 1 : 0;
+
+ if (ai->micstats.enabled) {
+ /* Key must be valid and different */
+ if (mic_rid.multicastValid && (!ai->mod[0].mCtx.valid ||
+ (memcmp (ai->mod[0].mCtx.key, mic_rid.multicast,
+ sizeof(ai->mod[0].mCtx.key)) != 0))) {
+ /* Age current mic Context */
+ memcpy(&ai->mod[1].mCtx,&ai->mod[0].mCtx,sizeof(miccntx));
+ /* Initialize new context */
+ memcpy(&ai->mod[0].mCtx.key,mic_rid.multicast,sizeof(mic_rid.multicast));
+ ai->mod[0].mCtx.window = 33; //Window always points to the middle
+ ai->mod[0].mCtx.rx = 0; //Rx Sequence numbers
+ ai->mod[0].mCtx.tx = 0; //Tx sequence numbers
+ ai->mod[0].mCtx.valid = 1; //Key is now valid
+
+ /* Give key to mic seed */
+ emmh32_setseed(&ai->mod[0].mCtx.seed,mic_rid.multicast,sizeof(mic_rid.multicast), ai->tfm);
+ }
+
+ /* Key must be valid and different */
+ if (mic_rid.unicastValid && (!ai->mod[0].uCtx.valid ||
+ (memcmp(ai->mod[0].uCtx.key, mic_rid.unicast,
+ sizeof(ai->mod[0].uCtx.key)) != 0))) {
+ /* Age current mic Context */
+ memcpy(&ai->mod[1].uCtx,&ai->mod[0].uCtx,sizeof(miccntx));
+ /* Initialize new context */
+ memcpy(&ai->mod[0].uCtx.key,mic_rid.unicast,sizeof(mic_rid.unicast));
+
+ ai->mod[0].uCtx.window = 33; //Window always points to the middle
+ ai->mod[0].uCtx.rx = 0; //Rx Sequence numbers
+ ai->mod[0].uCtx.tx = 0; //Tx sequence numbers
+ ai->mod[0].uCtx.valid = 1; //Key is now valid
+
+ //Give key to mic seed
+ emmh32_setseed(&ai->mod[0].uCtx.seed, mic_rid.unicast, sizeof(mic_rid.unicast), ai->tfm);
+ }
+ } else {
+ /* So next time we have a valid key and mic is enabled, we will update
+ * the sequence number if the key is the same as before.
+ */
+ ai->mod[0].uCtx.valid = 0;
+ ai->mod[0].mCtx.valid = 0;
+ }
+}
+
+/* micsetup - Get ready for business */
+
+static int micsetup(struct airo_info *ai) {
+ int i;
+
+ if (ai->tfm == NULL)
+ ai->tfm = crypto_alloc_tfm("aes", 0);
+
+ if (ai->tfm == NULL) {
+ printk(KERN_ERR "airo: failed to load transform for AES\n");
+ return ERROR;
+ }
+
+ for (i=0; i < NUM_MODULES; i++) {
+ memset(&ai->mod[i].mCtx,0,sizeof(miccntx));
+ memset(&ai->mod[i].uCtx,0,sizeof(miccntx));
+ }
+ return SUCCESS;
+}
+
+char micsnap[]= {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
+
+/*===========================================================================
+ * Description: Mic a packet
+ *
+ * Inputs: etherHead * pointer to an 802.3 frame
+ *
+ * Returns: BOOLEAN if successful, otherwise false.
+ * PacketTxLen will be updated with the mic'd packets size.
+ *
+ * Caveats: It is assumed that the frame buffer will already
+ * be big enough to hold the largets mic message possible.
+ * (No memory allocation is done here).
+ *
+ * Author: sbraneky (10/15/01)
+ * Merciless hacks by rwilcher (1/14/02)
+ */
+
+static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, int payLen)
+{
+ miccntx *context;
+
+ // Determine correct context
+ // If not adhoc, always use unicast key
+
+ if (test_bit(FLAG_ADHOC, &ai->flags) && (frame->da[0] & 0x1))
+ context = &ai->mod[0].mCtx;
+ else
+ context = &ai->mod[0].uCtx;
+
+ if (!context->valid)
+ return ERROR;
+
+ mic->typelen = htons(payLen + 16); //Length of Mic'd packet
+
+ memcpy(&mic->u.snap, micsnap, sizeof(micsnap)); // Add Snap
+
+ // Add Tx sequence
+ mic->seq = htonl(context->tx);
+ context->tx += 2;
+
+ emmh32_init(&context->seed); // Mic the packet
+ emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
+ emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
+ emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
+ emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+ emmh32_final(&context->seed, (u8*)&mic->mic);
+
+ /* New Type/length ?????????? */
+ mic->typelen = 0; //Let NIC know it could be an oversized packet
+ return SUCCESS;
+}
+
+typedef enum {
+ NONE,
+ NOMIC,
+ NOMICPLUMMED,
+ SEQUENCE,
+ INCORRECTMIC,
+} mic_error;
+
+/*===========================================================================
+ * Description: Decapsulates a MIC'd packet and returns the 802.3 packet
+ * (removes the MIC stuff) if packet is a valid packet.
+ *
+ * Inputs: etherHead pointer to the 802.3 packet
+ *
+ * Returns: BOOLEAN - TRUE if packet should be dropped otherwise FALSE
+ *
+ * Author: sbraneky (10/15/01)
+ * Merciless hacks by rwilcher (1/14/02)
+ *---------------------------------------------------------------------------
+ */
+
+static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16 payLen)
+{
+ int i;
+ u32 micSEQ;
+ miccntx *context;
+ u8 digest[4];
+ mic_error micError = NONE;
+
+ // Check if the packet is a Mic'd packet
+
+ if (!ai->micstats.enabled) {
+ //No Mic set or Mic OFF but we received a MIC'd packet.
+ if (memcmp ((u8*)eth + 14, micsnap, sizeof(micsnap)) == 0) {
+ ai->micstats.rxMICPlummed++;
+ return ERROR;
+ }
+ return SUCCESS;
+ }
+
+ if (ntohs(mic->typelen) == 0x888E)
+ return SUCCESS;
+
+ if (memcmp (mic->u.snap, micsnap, sizeof(micsnap)) != 0) {
+ // Mic enabled but packet isn't Mic'd
+ ai->micstats.rxMICPlummed++;
+ return ERROR;
+ }
+
+ micSEQ = ntohl(mic->seq); //store SEQ as CPU order
+
+ //At this point we a have a mic'd packet and mic is enabled
+ //Now do the mic error checking.
+
+ //Receive seq must be odd
+ if ( (micSEQ & 1) == 0 ) {
+ ai->micstats.rxWrongSequence++;
+ return ERROR;
+ }
+
+ for (i = 0; i < NUM_MODULES; i++) {
+ int mcast = eth->da[0] & 1;
+ //Determine proper context
+ context = mcast ? &ai->mod[i].mCtx : &ai->mod[i].uCtx;
+
+ //Make sure context is valid
+ if (!context->valid) {
+ if (i == 0)
+ micError = NOMICPLUMMED;
+ continue;
+ }
+ //DeMic it
+
+ if (!mic->typelen)
+ mic->typelen = htons(payLen + sizeof(MICBuffer) - 2);
+
+ emmh32_init(&context->seed);
+ emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
+ emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
+ emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
+ emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);
+ //Calculate MIC
+ emmh32_final(&context->seed, digest);
+
+ if (memcmp(digest, &mic->mic, 4)) { //Make sure the mics match
+ //Invalid Mic
+ if (i == 0)
+ micError = INCORRECTMIC;
+ continue;
+ }
+
+ //Check Sequence number if mics pass
+ if (RxSeqValid(ai, context, mcast, micSEQ) == SUCCESS) {
+ ai->micstats.rxSuccess++;
+ return SUCCESS;
+ }
+ if (i == 0)
+ micError = SEQUENCE;
+ }
+
+ // Update statistics
+ switch (micError) {
+ case NOMICPLUMMED: ai->micstats.rxMICPlummed++; break;
+ case SEQUENCE: ai->micstats.rxWrongSequence++; break;
+ case INCORRECTMIC: ai->micstats.rxIncorrectMIC++; break;
+ case NONE: break;
+ case NOMIC: break;
+ }
+ return ERROR;
+}
+
+/*===========================================================================
+ * Description: Checks the Rx Seq number to make sure it is valid
+ * and hasn't already been received
+ *
+ * Inputs: miccntx - mic context to check seq against
+ * micSeq - the Mic seq number
+ *
+ * Returns: TRUE if valid otherwise FALSE.
+ *
+ * Author: sbraneky (10/15/01)
+ * Merciless hacks by rwilcher (1/14/02)
+ *---------------------------------------------------------------------------
+ */
+
+static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq)
+{
+ u32 seq,index;
+
+ //Allow for the ap being rebooted - if it is then use the next
+ //sequence number of the current sequence number - might go backwards
+
+ if (mcast) {
+ if (test_bit(FLAG_UPDATE_MULTI, &ai->flags)) {
+ clear_bit (FLAG_UPDATE_MULTI, &ai->flags);
+ context->window = (micSeq > 33) ? micSeq : 33;
+ context->rx = 0; // Reset rx
+ }
+ } else if (test_bit(FLAG_UPDATE_UNI, &ai->flags)) {
+ clear_bit (FLAG_UPDATE_UNI, &ai->flags);
+ context->window = (micSeq > 33) ? micSeq : 33; // Move window
+ context->rx = 0; // Reset rx
+ }
+
+ //Make sequence number relative to START of window
+ seq = micSeq - (context->window - 33);
+
+ //Too old of a SEQ number to check.
+ if ((s32)seq < 0)
+ return ERROR;
+
+ if ( seq > 64 ) {
+ //Window is infinite forward
+ MoveWindow(context,micSeq);
+ return SUCCESS;
+ }
+
+ // We are in the window. Now check the context rx bit to see if it was already sent
+ seq >>= 1; //divide by 2 because we only have odd numbers
+ index = 1 << seq; //Get an index number
+
+ if (!(context->rx & index)) {
+ //micSEQ falls inside the window.
+ //Add seqence number to the list of received numbers.
+ context->rx |= index;
+
+ MoveWindow(context,micSeq);
+
+ return SUCCESS;
+ }
+ return ERROR;
+}
+
+static void MoveWindow(miccntx *context, u32 micSeq)
+{
+ u32 shift;
+
+ //Move window if seq greater than the middle of the window
+ if (micSeq > context->window) {
+ shift = (micSeq - context->window) >> 1;
+
+ //Shift out old
+ if (shift < 32)
+ context->rx >>= shift;
+ else
+ context->rx = 0;
+
+ context->window = micSeq; //Move window
+ }
+}
+
+/*==============================================*/
+/*========== EMMH ROUTINES ====================*/
+/*==============================================*/
+
+/* mic accumulate */
+#define MIC_ACCUM(val) \
+ context->accum += (u64)(val) * context->coeff[coeff_position++];
+
+static unsigned char aes_counter[16];
+
+/* expand the key to fill the MMH coefficient array */
+void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm)
+{
+ /* take the keying material, expand if necessary, truncate at 16-bytes */
+ /* run through AES counter mode to generate context->coeff[] */
+
+ int i,j;
+ u32 counter;
+ u8 *cipher, plain[16];
+ struct scatterlist sg[1];
+
+ crypto_cipher_setkey(tfm, pkey, 16);
+ counter = 0;
+ for (i = 0; i < (sizeof(context->coeff)/sizeof(context->coeff[0])); ) {
+ aes_counter[15] = (u8)(counter >> 0);
+ aes_counter[14] = (u8)(counter >> 8);
+ aes_counter[13] = (u8)(counter >> 16);
+ aes_counter[12] = (u8)(counter >> 24);
+ counter++;
+ memcpy (plain, aes_counter, 16);
+ sg[0].page = virt_to_page(plain);
+ sg[0].offset = ((long) plain & ~PAGE_MASK);
+ sg[0].length = 16;
+ crypto_cipher_encrypt(tfm, sg, sg, 16);
+ cipher = kmap(sg[0].page) + sg[0].offset;
+ for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) {
+ context->coeff[i++] = ntohl(*(u32 *)&cipher[j]);
+ j += 4;
+ }
+ }
+}
+
+/* prepare for calculation of a new mic */
+void emmh32_init(emmh32_context *context)
+{
+ /* prepare for new mic calculation */
+ context->accum = 0;
+ context->position = 0;
+}
+
+/* add some bytes to the mic calculation */
+void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
+{
+ int coeff_position, byte_position;
+
+ if (len == 0) return;
+
+ coeff_position = context->position >> 2;
+
+ /* deal with partial 32-bit word left over from last update */
+ byte_position = context->position & 3;
+ if (byte_position) {
+ /* have a partial word in part to deal with */
+ do {
+ if (len == 0) return;
+ context->part.d8[byte_position++] = *pOctets++;
+ context->position++;
+ len--;
+ } while (byte_position < 4);
+ MIC_ACCUM(htonl(context->part.d32));
+ }
+
+ /* deal with full 32-bit words */
+ while (len >= 4) {
+ MIC_ACCUM(htonl(*(u32 *)pOctets));
+ context->position += 4;
+ pOctets += 4;
+ len -= 4;
+ }
+
+ /* deal with partial 32-bit word that will be left over from this update */
+ byte_position = 0;
+ while (len > 0) {
+ context->part.d8[byte_position++] = *pOctets++;
+ context->position++;
+ len--;
+ }
+}
+
+/* mask used to zero empty bytes for final partial word */
+static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L };
+
+/* calculate the mic */
+void emmh32_final(emmh32_context *context, u8 digest[4])
+{
+ int coeff_position, byte_position;
+ u32 val;
+
+ u64 sum, utmp;
+ s64 stmp;
+
+ coeff_position = context->position >> 2;
+
+ /* deal with partial 32-bit word left over from last update */
+ byte_position = context->position & 3;
+ if (byte_position) {
+ /* have a partial word in part to deal with */
+ val = htonl(context->part.d32);
+ MIC_ACCUM(val & mask32[byte_position]); /* zero empty bytes */
+ }
+
+ /* reduce the accumulated u64 to a 32-bit MIC */
+ sum = context->accum;
+ stmp = (sum & 0xffffffffLL) - ((sum >> 32) * 15);
+ utmp = (stmp & 0xffffffffLL) - ((stmp >> 32) * 15);
+ sum = utmp & 0xffffffffLL;
+ if (utmp > 0x10000000fLL)
+ sum -= 15;
+
+ val = (u32)sum;
+ digest[0] = (val>>24) & 0xFF;
+ digest[1] = (val>>16) & 0xFF;
+ digest[2] = (val>>8) & 0xFF;
+ digest[3] = val & 0xFF;
+}
+#endif
+
+static int readBSSListRid(struct airo_info *ai, int first,
+ BSSListRid *list) {
+ int rc;
+ Cmd cmd;
+ Resp rsp;
+
+ if (first == 1) {
+ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_LISTBSS;
+ if (down_interruptible(&ai->sem))
+ return -ERESTARTSYS;
+ issuecommand(ai, &cmd, &rsp);
+ up(&ai->sem);
+ /* Let the command take effect */
+ ai->task = current;
+ ssleep(3);
+ ai->task = NULL;
+ }
+ rc = PC4500_readrid(ai, first ? RID_BSSLISTFIRST : RID_BSSLISTNEXT,
+ list, sizeof(*list), 1);
+
+ list->len = le16_to_cpu(list->len);
+ list->index = le16_to_cpu(list->index);
+ list->radioType = le16_to_cpu(list->radioType);
+ list->cap = le16_to_cpu(list->cap);
+ list->beaconInterval = le16_to_cpu(list->beaconInterval);
+ list->fh.dwell = le16_to_cpu(list->fh.dwell);
+ list->dsChannel = le16_to_cpu(list->dsChannel);
+ list->atimWindow = le16_to_cpu(list->atimWindow);
+ return rc;
+}
+
+static int readWepKeyRid(struct airo_info*ai, WepKeyRid *wkr, int temp, int lock) {
+ int rc = PC4500_readrid(ai, temp ? RID_WEP_TEMP : RID_WEP_PERM,
+ wkr, sizeof(*wkr), lock);
+
+ wkr->len = le16_to_cpu(wkr->len);
+ wkr->kindex = le16_to_cpu(wkr->kindex);
+ wkr->klen = le16_to_cpu(wkr->klen);
+ return rc;
+}
+/* In the writeXXXRid routines we copy the rids so that we don't screwup
+ * the originals when we endian them... */
+static int writeWepKeyRid(struct airo_info*ai, WepKeyRid *pwkr, int perm, int lock) {
+ int rc;
+ WepKeyRid wkr = *pwkr;
+
+ wkr.len = cpu_to_le16(wkr.len);
+ wkr.kindex = cpu_to_le16(wkr.kindex);
+ wkr.klen = cpu_to_le16(wkr.klen);
+ rc = PC4500_writerid(ai, RID_WEP_TEMP, &wkr, sizeof(wkr), lock);
+ if (rc!=SUCCESS) printk(KERN_ERR "airo: WEP_TEMP set %x\n", rc);
+ if (perm) {
+ rc = PC4500_writerid(ai, RID_WEP_PERM, &wkr, sizeof(wkr), lock);
+ if (rc!=SUCCESS) {
+ printk(KERN_ERR "airo: WEP_PERM set %x\n", rc);
+ }
+ }
+ return rc;
+}
+
+static int readSsidRid(struct airo_info*ai, SsidRid *ssidr) {
+ int i;
+ int rc = PC4500_readrid(ai, RID_SSID, ssidr, sizeof(*ssidr), 1);
+
+ ssidr->len = le16_to_cpu(ssidr->len);
+ for(i = 0; i < 3; i++) {
+ ssidr->ssids[i].len = le16_to_cpu(ssidr->ssids[i].len);
+ }
+ return rc;
+}
+static int writeSsidRid(struct airo_info*ai, SsidRid *pssidr, int lock) {
+ int rc;
+ int i;
+ SsidRid ssidr = *pssidr;
+
+ ssidr.len = cpu_to_le16(ssidr.len);
+ for(i = 0; i < 3; i++) {
+ ssidr.ssids[i].len = cpu_to_le16(ssidr.ssids[i].len);
+ }
+ rc = PC4500_writerid(ai, RID_SSID, &ssidr, sizeof(ssidr), lock);
+ return rc;
+}
+static int readConfigRid(struct airo_info*ai, int lock) {
+ int rc;
+ u16 *s;
+ ConfigRid cfg;
+
+ if (ai->config.len)
+ return SUCCESS;
+
+ rc = PC4500_readrid(ai, RID_ACTUALCONFIG, &cfg, sizeof(cfg), lock);
+ if (rc != SUCCESS)
+ return rc;
+
+ for(s = &cfg.len; s <= &cfg.rtsThres; s++) *s = le16_to_cpu(*s);
+
+ for(s = &cfg.shortRetryLimit; s <= &cfg.radioType; s++)
+ *s = le16_to_cpu(*s);
+
+ for(s = &cfg.txPower; s <= &cfg.radioSpecific; s++)
+ *s = le16_to_cpu(*s);
+
+ for(s = &cfg.arlThreshold; s <= &cfg._reserved4[0]; s++)
+ *s = cpu_to_le16(*s);
+
+ for(s = &cfg.autoWake; s <= &cfg.autoWake; s++)
+ *s = cpu_to_le16(*s);
+
+ ai->config = cfg;
+ return SUCCESS;
+}
+static inline void checkThrottle(struct airo_info *ai) {
+ int i;
+/* Old hardware had a limit on encryption speed */
+ if (ai->config.authType != AUTH_OPEN && maxencrypt) {
+ for(i=0; i<8; i++) {
+ if (ai->config.rates[i] > maxencrypt) {
+ ai->config.rates[i] = 0;
+ }
+ }
+ }
+}
+static int writeConfigRid(struct airo_info*ai, int lock) {
+ u16 *s;
+ ConfigRid cfgr;
+
+ if (!test_bit (FLAG_COMMIT, &ai->flags))
+ return SUCCESS;
+
+ clear_bit (FLAG_COMMIT, &ai->flags);
+ clear_bit (FLAG_RESET, &ai->flags);
+ checkThrottle(ai);
+ cfgr = ai->config;
+
+ if ((cfgr.opmode & 0xFF) == MODE_STA_IBSS)
+ set_bit(FLAG_ADHOC, &ai->flags);
+ else
+ clear_bit(FLAG_ADHOC, &ai->flags);
+
+ for(s = &cfgr.len; s <= &cfgr.rtsThres; s++) *s = cpu_to_le16(*s);
+
+ for(s = &cfgr.shortRetryLimit; s <= &cfgr.radioType; s++)
+ *s = cpu_to_le16(*s);
+
+ for(s = &cfgr.txPower; s <= &cfgr.radioSpecific; s++)
+ *s = cpu_to_le16(*s);
+
+ for(s = &cfgr.arlThreshold; s <= &cfgr._reserved4[0]; s++)
+ *s = cpu_to_le16(*s);
+
+ for(s = &cfgr.autoWake; s <= &cfgr.autoWake; s++)
+ *s = cpu_to_le16(*s);
+
+ return PC4500_writerid( ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock);
+}
+static int readStatusRid(struct airo_info*ai, StatusRid *statr, int lock) {
+ int rc = PC4500_readrid(ai, RID_STATUS, statr, sizeof(*statr), lock);
+ u16 *s;
+
+ statr->len = le16_to_cpu(statr->len);
+ for(s = &statr->mode; s <= &statr->SSIDlen; s++) *s = le16_to_cpu(*s);
+
+ for(s = &statr->beaconPeriod; s <= &statr->shortPreamble; s++)
+ *s = le16_to_cpu(*s);
+ statr->load = le16_to_cpu(statr->load);
+ statr->assocStatus = le16_to_cpu(statr->assocStatus);
+ return rc;
+}
+static int readAPListRid(struct airo_info*ai, APListRid *aplr) {
+ int rc = PC4500_readrid(ai, RID_APLIST, aplr, sizeof(*aplr), 1);
+ aplr->len = le16_to_cpu(aplr->len);
+ return rc;
+}
+static int writeAPListRid(struct airo_info*ai, APListRid *aplr, int lock) {
+ int rc;
+ aplr->len = cpu_to_le16(aplr->len);
+ rc = PC4500_writerid(ai, RID_APLIST, aplr, sizeof(*aplr), lock);
+ return rc;
+}
+static int readCapabilityRid(struct airo_info*ai, CapabilityRid *capr, int lock) {
+ int rc = PC4500_readrid(ai, RID_CAPABILITIES, capr, sizeof(*capr), lock);
+ u16 *s;
+
+ capr->len = le16_to_cpu(capr->len);
+ capr->prodNum = le16_to_cpu(capr->prodNum);
+ capr->radioType = le16_to_cpu(capr->radioType);
+ capr->country = le16_to_cpu(capr->country);
+ for(s = &capr->txPowerLevels[0]; s <= &capr->requiredHard; s++)
+ *s = le16_to_cpu(*s);
+ return rc;
+}
+static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock) {
+ int rc = PC4500_readrid(ai, rid, sr, sizeof(*sr), lock);
+ u32 *i;
+
+ sr->len = le16_to_cpu(sr->len);
+ for(i = &sr->vals[0]; i <= &sr->vals[99]; i++) *i = le32_to_cpu(*i);
+ return rc;
+}
+
+static int airo_open(struct net_device *dev) {
+ struct airo_info *info = dev->priv;
+ Resp rsp;
+
+ if (test_bit(FLAG_FLASHING, &info->flags))
+ return -EIO;
+
+ /* Make sure the card is configured.
+ * Wireless Extensions may postpone config changes until the card
+ * is open (to pipeline changes and speed-up card setup). If
+ * those changes are not yet commited, do it now - Jean II */
+ if (test_bit (FLAG_COMMIT, &info->flags)) {
+ disable_MAC(info, 1);
+ writeConfigRid(info, 1);
+ }
+
+ if (info->wifidev != dev) {
+ /* Power on the MAC controller (which may have been disabled) */
+ clear_bit(FLAG_RADIO_DOWN, &info->flags);
+ enable_interrupts(info);
+ }
+ enable_MAC(info, &rsp, 1);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
+ int npacks, pending;
+ unsigned long flags;
+ struct airo_info *ai = dev->priv;
+
+ if (!skb) {
+ printk(KERN_ERR "airo: %s: skb==NULL\n",__FUNCTION__);
+ return 0;
+ }
+ npacks = skb_queue_len (&ai->txq);
+
+ if (npacks >= MAXTXQ - 1) {
+ netif_stop_queue (dev);
+ if (npacks > MAXTXQ) {
+ ai->stats.tx_fifo_errors++;
+ return 1;
+ }
+ skb_queue_tail (&ai->txq, skb);
+ return 0;
+ }
+
+ spin_lock_irqsave(&ai->aux_lock, flags);
+ skb_queue_tail (&ai->txq, skb);
+ pending = test_bit(FLAG_PENDING_XMIT, &ai->flags);
+ spin_unlock_irqrestore(&ai->aux_lock,flags);
+ netif_wake_queue (dev);
+
+ if (pending == 0) {
+ set_bit(FLAG_PENDING_XMIT, &ai->flags);
+ mpi_send_packet (dev);
+ }
+ return 0;
+}
+
+/*
+ * @mpi_send_packet
+ *
+ * Attempt to transmit a packet. Can be called from interrupt
+ * or transmit . return number of packets we tried to send
+ */
+
+static int mpi_send_packet (struct net_device *dev)
+{
+ struct sk_buff *skb;
+ unsigned char *buffer;
+ s16 len, *payloadLen;
+ struct airo_info *ai = dev->priv;
+ u8 *sendbuf;
+
+ /* get a packet to send */
+
+ if ((skb = skb_dequeue(&ai->txq)) == 0) {
+ printk (KERN_ERR
+ "airo: %s: Dequeue'd zero in send_packet()\n",
+ __FUNCTION__);
+ return 0;
+ }
+
+ /* check min length*/
+ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ buffer = skb->data;
+
+ ai->txfids[0].tx_desc.offset = 0;
+ ai->txfids[0].tx_desc.valid = 1;
+ ai->txfids[0].tx_desc.eoc = 1;
+ ai->txfids[0].tx_desc.len =len+sizeof(WifiHdr);
+
+/*
+ * Magic, the cards firmware needs a length count (2 bytes) in the host buffer
+ * right after TXFID_HDR.The TXFID_HDR contains the status short so payloadlen
+ * is immediatly after it. ------------------------------------------------
+ * |TXFIDHDR+STATUS|PAYLOADLEN|802.3HDR|PACKETDATA|
+ * ------------------------------------------------
+ */
+
+ memcpy((char *)ai->txfids[0].virtual_host_addr,
+ (char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
+
+ payloadLen = (s16 *)(ai->txfids[0].virtual_host_addr +
+ sizeof(wifictlhdr8023));
+ sendbuf = ai->txfids[0].virtual_host_addr +
+ sizeof(wifictlhdr8023) + 2 ;
+
+ /*
+ * Firmware automaticly puts 802 header on so
+ * we don't need to account for it in the length
+ */
+#ifdef MICSUPPORT
+ if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
+ (ntohs(((u16 *)buffer)[6]) != 0x888E)) {
+ MICBuffer pMic;
+
+ if (encapsulate(ai, (etherHead *)buffer, &pMic, len - sizeof(etherHead)) != SUCCESS)
+ return ERROR;
+
+ *payloadLen = cpu_to_le16(len-sizeof(etherHead)+sizeof(pMic));
+ ai->txfids[0].tx_desc.len += sizeof(pMic);
+ /* copy data into airo dma buffer */
+ memcpy (sendbuf, buffer, sizeof(etherHead));
+ buffer += sizeof(etherHead);
+ sendbuf += sizeof(etherHead);
+ memcpy (sendbuf, &pMic, sizeof(pMic));
+ sendbuf += sizeof(pMic);
+ memcpy (sendbuf, buffer, len - sizeof(etherHead));
+ } else
+#endif
+ {
+ *payloadLen = cpu_to_le16(len - sizeof(etherHead));
+
+ dev->trans_start = jiffies;
+
+ /* copy data into airo dma buffer */
+ memcpy(sendbuf, buffer, len);
+ }
+
+ memcpy_toio(ai->txfids[0].card_ram_off,
+ &ai->txfids[0].tx_desc, sizeof(TxFid));
+
+ OUT4500(ai, EVACK, 8);
+
+ dev_kfree_skb_any(skb);
+ return 1;
+}
+
+static void get_tx_error(struct airo_info *ai, u32 fid)
+{
+ u16 status;
+
+ if (fid < 0)
+ status = ((WifiCtlHdr *)ai->txfids[0].virtual_host_addr)->ctlhdr.status;
+ else {
+ if (bap_setup(ai, ai->fids[fid] & 0xffff, 4, BAP0) != SUCCESS)
+ return;
+ bap_read(ai, &status, 2, BAP0);
+ }
+ if (le16_to_cpu(status) & 2) /* Too many retries */
+ ai->stats.tx_aborted_errors++;
+ if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */
+ ai->stats.tx_heartbeat_errors++;
+ if (le16_to_cpu(status) & 8) /* Aid fail */
+ { }
+ if (le16_to_cpu(status) & 0x10) /* MAC disabled */
+ ai->stats.tx_carrier_errors++;
+ if (le16_to_cpu(status) & 0x20) /* Association lost */
+ { }
+ /* We produce a TXDROP event only for retry or lifetime
+ * exceeded, because that's the only status that really mean
+ * that this particular node went away.
+ * Other errors means that *we* screwed up. - Jean II */
+ if ((le16_to_cpu(status) & 2) ||
+ (le16_to_cpu(status) & 4)) {
+ union iwreq_data wrqu;
+ char junk[0x18];
+
+ /* Faster to skip over useless data than to do
+ * another bap_setup(). We are at offset 0x6 and
+ * need to go to 0x18 and read 6 bytes - Jean II */
+ bap_read(ai, (u16 *) junk, 0x18, BAP0);
+
+ /* Copy 802.11 dest address.
+ * We use the 802.11 header because the frame may
+ * not be 802.3 or may be mangled...
+ * In Ad-Hoc mode, it will be the node address.
+ * In managed mode, it will be most likely the AP addr
+ * User space will figure out how to convert it to
+ * whatever it needs (IP address or else).
+ * - Jean II */
+ memcpy(wrqu.addr.sa_data, junk + 0x12, ETH_ALEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ /* Send event to user space */
+ wireless_send_event(ai->dev, IWEVTXDROP, &wrqu, NULL);
+ }
+}
+
+static void airo_end_xmit(struct net_device *dev) {
+ u16 status;
+ int i;
+ struct airo_info *priv = dev->priv;
+ struct sk_buff *skb = priv->xmit.skb;
+ int fid = priv->xmit.fid;
+ u32 *fids = priv->fids;
+
+ clear_bit(JOB_XMIT, &priv->flags);
+ clear_bit(FLAG_PENDING_XMIT, &priv->flags);
+ status = transmit_802_3_packet (priv, fids[fid], skb->data);
+ up(&priv->sem);
+
+ i = 0;
+ if ( status == SUCCESS ) {
+ dev->trans_start = jiffies;
+ for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
+ } else {
+ priv->fids[fid] &= 0xffff;
+ priv->stats.tx_window_errors++;
+ }
+ if (i < MAX_FIDS / 2)
+ netif_wake_queue(dev);
+ dev_kfree_skb(skb);
+}
+
+static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
+ s16 len;
+ int i, j;
+ struct airo_info *priv = dev->priv;
+ u32 *fids = priv->fids;
+
+ if ( skb == NULL ) {
+ printk( KERN_ERR "airo: skb == NULL!!!\n" );
+ return 0;
+ }
+
+ /* Find a vacant FID */
+ for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
+ for( j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++ );
+
+ if ( j >= MAX_FIDS / 2 ) {
+ netif_stop_queue(dev);
+
+ if (i == MAX_FIDS / 2) {
+ priv->stats.tx_fifo_errors++;
+ return 1;
+ }
+ }
+ /* check min length*/
+ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ /* Mark fid as used & save length for later */
+ fids[i] |= (len << 16);
+ priv->xmit.skb = skb;
+ priv->xmit.fid = i;
+ if (down_trylock(&priv->sem) != 0) {
+ set_bit(FLAG_PENDING_XMIT, &priv->flags);
+ netif_stop_queue(dev);
+ set_bit(JOB_XMIT, &priv->flags);
+ wake_up_interruptible(&priv->thr_wait);
+ } else
+ airo_end_xmit(dev);
+ return 0;
+}
+
+static void airo_end_xmit11(struct net_device *dev) {
+ u16 status;
+ int i;
+ struct airo_info *priv = dev->priv;
+ struct sk_buff *skb = priv->xmit11.skb;
+ int fid = priv->xmit11.fid;
+ u32 *fids = priv->fids;
+
+ clear_bit(JOB_XMIT11, &priv->flags);
+ clear_bit(FLAG_PENDING_XMIT11, &priv->flags);
+ status = transmit_802_11_packet (priv, fids[fid], skb->data);
+ up(&priv->sem);
+
+ i = MAX_FIDS / 2;
+ if ( status == SUCCESS ) {
+ dev->trans_start = jiffies;
+ for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
+ } else {
+ priv->fids[fid] &= 0xffff;
+ priv->stats.tx_window_errors++;
+ }
+ if (i < MAX_FIDS)
+ netif_wake_queue(dev);
+ dev_kfree_skb(skb);
+}
+
+static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
+ s16 len;
+ int i, j;
+ struct airo_info *priv = dev->priv;
+ u32 *fids = priv->fids;
+
+ if (test_bit(FLAG_MPI, &priv->flags)) {
+ /* Not implemented yet for MPI350 */
+ netif_stop_queue(dev);
+ return -ENETDOWN;
+ }
+
+ if ( skb == NULL ) {
+ printk( KERN_ERR "airo: skb == NULL!!!\n" );
+ return 0;
+ }
+
+ /* Find a vacant FID */
+ for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
+ for( j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++ );
+
+ if ( j >= MAX_FIDS ) {
+ netif_stop_queue(dev);
+
+ if (i == MAX_FIDS) {
+ priv->stats.tx_fifo_errors++;
+ return 1;
+ }
+ }
+ /* check min length*/
+ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ /* Mark fid as used & save length for later */
+ fids[i] |= (len << 16);
+ priv->xmit11.skb = skb;
+ priv->xmit11.fid = i;
+ if (down_trylock(&priv->sem) != 0) {
+ set_bit(FLAG_PENDING_XMIT11, &priv->flags);
+ netif_stop_queue(dev);
+ set_bit(JOB_XMIT11, &priv->flags);
+ wake_up_interruptible(&priv->thr_wait);
+ } else
+ airo_end_xmit11(dev);
+ return 0;
+}
+
+static void airo_read_stats(struct airo_info *ai) {
+ StatsRid stats_rid;
+ u32 *vals = stats_rid.vals;
+
+ clear_bit(JOB_STATS, &ai->flags);
+ if (ai->power) {
+ up(&ai->sem);
+ return;
+ }
+ readStatsRid(ai, &stats_rid, RID_STATS, 0);
+ up(&ai->sem);
+
+ ai->stats.rx_packets = vals[43] + vals[44] + vals[45];
+ ai->stats.tx_packets = vals[39] + vals[40] + vals[41];
+ ai->stats.rx_bytes = vals[92];
+ ai->stats.tx_bytes = vals[91];
+ ai->stats.rx_errors = vals[0] + vals[2] + vals[3] + vals[4];
+ ai->stats.tx_errors = vals[42] + ai->stats.tx_fifo_errors;
+ ai->stats.multicast = vals[43];
+ ai->stats.collisions = vals[89];
+
+ /* detailed rx_errors: */
+ ai->stats.rx_length_errors = vals[3];
+ ai->stats.rx_crc_errors = vals[4];
+ ai->stats.rx_frame_errors = vals[2];
+ ai->stats.rx_fifo_errors = vals[0];
+}
+
+struct net_device_stats *airo_get_stats(struct net_device *dev)
+{
+ struct airo_info *local = dev->priv;
+
+ if (!test_bit(JOB_STATS, &local->flags)) {
+ /* Get stats out of the card if available */
+ if (down_trylock(&local->sem) != 0) {
+ set_bit(JOB_STATS, &local->flags);
+ wake_up_interruptible(&local->thr_wait);
+ } else
+ airo_read_stats(local);
+ }
+
+ return &local->stats;
+}
+
+static void airo_set_promisc(struct airo_info *ai) {
+ Cmd cmd;
+ Resp rsp;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_SETMODE;
+ clear_bit(JOB_PROMISC, &ai->flags);
+ cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
+ issuecommand(ai, &cmd, &rsp);
+ up(&ai->sem);
+}
+
+static void airo_set_multicast_list(struct net_device *dev) {
+ struct airo_info *ai = dev->priv;
+
+ if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
+ change_bit(FLAG_PROMISC, &ai->flags);
+ if (down_trylock(&ai->sem) != 0) {
+ set_bit(JOB_PROMISC, &ai->flags);
+ wake_up_interruptible(&ai->thr_wait);
+ } else
+ airo_set_promisc(ai);
+ }
+
+ if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
+ /* Turn on multicast. (Should be already setup...) */
+ }
+}
+
+static int airo_set_mac_address(struct net_device *dev, void *p)
+{
+ struct airo_info *ai = dev->priv;
+ struct sockaddr *addr = p;
+ Resp rsp;
+
+ readConfigRid(ai, 1);
+ memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len);
+ set_bit (FLAG_COMMIT, &ai->flags);
+ disable_MAC(ai, 1);
+ writeConfigRid (ai, 1);
+ enable_MAC(ai, &rsp, 1);
+ memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len);
+ if (ai->wifidev)
+ memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int airo_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 2400))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+static int airo_close(struct net_device *dev) {
+ struct airo_info *ai = dev->priv;
+
+ netif_stop_queue(dev);
+
+ if (ai->wifidev != dev) {
+#ifdef POWER_ON_DOWN
+ /* Shut power to the card. The idea is that the user can save
+ * power when he doesn't need the card with "ifconfig down".
+ * That's the method that is most friendly towards the network
+ * stack (i.e. the network stack won't try to broadcast
+ * anything on the interface and routes are gone. Jean II */
+ set_bit(FLAG_RADIO_DOWN, &ai->flags);
+ disable_MAC(ai, 1);
+#endif
+ disable_interrupts( ai );
+ }
+ return 0;
+}
+
+static void del_airo_dev( struct net_device *dev );
+
+void stop_airo_card( struct net_device *dev, int freeres )
+{
+ struct airo_info *ai = dev->priv;
+
+ set_bit(FLAG_RADIO_DOWN, &ai->flags);
+ disable_MAC(ai, 1);
+ disable_interrupts(ai);
+ free_irq( dev->irq, dev );
+ takedown_proc_entry( dev, ai );
+ if (test_bit(FLAG_REGISTERED, &ai->flags)) {
+ unregister_netdev( dev );
+ if (ai->wifidev) {
+ unregister_netdev(ai->wifidev);
+ free_netdev(ai->wifidev);
+ ai->wifidev = NULL;
+ }
+ clear_bit(FLAG_REGISTERED, &ai->flags);
+ }
+ set_bit(JOB_DIE, &ai->flags);
+ kill_proc(ai->thr_pid, SIGTERM, 1);
+ wait_for_completion(&ai->thr_exited);
+
+ /*
+ * Clean out tx queue
+ */
+ if (test_bit(FLAG_MPI, &ai->flags) && skb_queue_len (&ai->txq) > 0) {
+ struct sk_buff *skb = NULL;
+ for (;(skb = skb_dequeue(&ai->txq));)
+ dev_kfree_skb(skb);
+ }
+
+ if (ai->flash)
+ kfree(ai->flash);
+ if (ai->rssi)
+ kfree(ai->rssi);
+ if (ai->APList)
+ kfree(ai->APList);
+ if (ai->SSID)
+ kfree(ai->SSID);
+ if (freeres) {
+ /* PCMCIA frees this stuff, so only for PCI and ISA */
+ release_region( dev->base_addr, 64 );
+ if (test_bit(FLAG_MPI, &ai->flags)) {
+ if (ai->pci)
+ mpi_unmap_card(ai->pci);
+ if (ai->pcimem)
+ iounmap(ai->pcimem);
+ if (ai->pciaux)
+ iounmap(ai->pciaux);
+ pci_free_consistent(ai->pci, PCI_SHARED_LEN,
+ ai->shared, ai->shared_dma);
+ }
+ }
+#ifdef MICSUPPORT
+ if (ai->tfm)
+ crypto_free_tfm(ai->tfm);
+#endif
+ del_airo_dev( dev );
+ free_netdev( dev );
+}
+
+EXPORT_SYMBOL(stop_airo_card);
+
+static int add_airo_dev( struct net_device *dev );
+
+int wll_header_parse(struct sk_buff *skb, unsigned char *haddr)
+{
+ memcpy(haddr, skb->mac.raw + 10, ETH_ALEN);
+ return ETH_ALEN;
+}
+
+static void mpi_unmap_card(struct pci_dev *pci)
+{
+ unsigned long mem_start = pci_resource_start(pci, 1);
+ unsigned long mem_len = pci_resource_len(pci, 1);
+ unsigned long aux_start = pci_resource_start(pci, 2);
+ unsigned long aux_len = AUXMEMSIZE;
+
+ release_mem_region(aux_start, aux_len);
+ release_mem_region(mem_start, mem_len);
+}
+
+/*************************************************************
+ * This routine assumes that descriptors have been setup .
+ * Run at insmod time or after reset when the decriptors
+ * have been initialized . Returns 0 if all is well nz
+ * otherwise . Does not allocate memory but sets up card
+ * using previously allocated descriptors.
+ */
+static int mpi_init_descriptors (struct airo_info *ai)
+{
+ Cmd cmd;
+ Resp rsp;
+ int i;
+ int rc = SUCCESS;
+
+ /* Alloc card RX descriptors */
+ netif_stop_queue(ai->dev);
+
+ memset(&rsp,0,sizeof(rsp));
+ memset(&cmd,0,sizeof(cmd));
+
+ cmd.cmd = CMD_ALLOCATEAUX;
+ cmd.parm0 = FID_RX;
+ cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux);
+ cmd.parm2 = MPI_MAX_FIDS;
+ rc=issuecommand(ai, &cmd, &rsp);
+ if (rc != SUCCESS) {
+ printk(KERN_ERR "airo: Couldn't allocate RX FID\n");
+ return rc;
+ }
+
+ for (i=0; i<MPI_MAX_FIDS; i++) {
+ memcpy_toio(ai->rxfids[i].card_ram_off,
+ &ai->rxfids[i].rx_desc, sizeof(RxFid));
+ }
+
+ /* Alloc card TX descriptors */
+
+ memset(&rsp,0,sizeof(rsp));
+ memset(&cmd,0,sizeof(cmd));
+
+ cmd.cmd = CMD_ALLOCATEAUX;
+ cmd.parm0 = FID_TX;
+ cmd.parm1 = (ai->txfids[0].card_ram_off - ai->pciaux);
+ cmd.parm2 = MPI_MAX_FIDS;
+
+ for (i=0; i<MPI_MAX_FIDS; i++) {
+ ai->txfids[i].tx_desc.valid = 1;
+ memcpy_toio(ai->txfids[i].card_ram_off,
+ &ai->txfids[i].tx_desc, sizeof(TxFid));
+ }
+ ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
+
+ rc=issuecommand(ai, &cmd, &rsp);
+ if (rc != SUCCESS) {
+ printk(KERN_ERR "airo: Couldn't allocate TX FID\n");
+ return rc;
+ }
+
+ /* Alloc card Rid descriptor */
+ memset(&rsp,0,sizeof(rsp));
+ memset(&cmd,0,sizeof(cmd));
+
+ cmd.cmd = CMD_ALLOCATEAUX;
+ cmd.parm0 = RID_RW;
+ cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux);
+ cmd.parm2 = 1; /* Magic number... */
+ rc=issuecommand(ai, &cmd, &rsp);
+ if (rc != SUCCESS) {
+ printk(KERN_ERR "airo: Couldn't allocate RID\n");
+ return rc;
+ }
+
+ memcpy_toio(ai->config_desc.card_ram_off,
+ &ai->config_desc.rid_desc, sizeof(Rid));
+
+ return rc;
+}
+
+/*
+ * We are setting up three things here:
+ * 1) Map AUX memory for descriptors: Rid, TxFid, or RxFid.
+ * 2) Map PCI memory for issueing commands.
+ * 3) Allocate memory (shared) to send and receive ethernet frames.
+ */
+static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
+ const char *name)
+{
+ unsigned long mem_start, mem_len, aux_start, aux_len;
+ int rc = -1;
+ int i;
+ unsigned char *busaddroff,*vpackoff;
+ unsigned char __iomem *pciaddroff;
+
+ mem_start = pci_resource_start(pci, 1);
+ mem_len = pci_resource_len(pci, 1);
+ aux_start = pci_resource_start(pci, 2);
+ aux_len = AUXMEMSIZE;
+
+ if (!request_mem_region(mem_start, mem_len, name)) {
+ printk(KERN_ERR "airo: Couldn't get region %x[%x] for %s\n",
+ (int)mem_start, (int)mem_len, name);
+ goto out;
+ }
+ if (!request_mem_region(aux_start, aux_len, name)) {
+ printk(KERN_ERR "airo: Couldn't get region %x[%x] for %s\n",
+ (int)aux_start, (int)aux_len, name);
+ goto free_region1;
+ }
+
+ ai->pcimem = ioremap(mem_start, mem_len);
+ if (!ai->pcimem) {
+ printk(KERN_ERR "airo: Couldn't map region %x[%x] for %s\n",
+ (int)mem_start, (int)mem_len, name);
+ goto free_region2;
+ }
+ ai->pciaux = ioremap(aux_start, aux_len);
+ if (!ai->pciaux) {
+ printk(KERN_ERR "airo: Couldn't map region %x[%x] for %s\n",
+ (int)aux_start, (int)aux_len, name);
+ goto free_memmap;
+ }
+
+ /* Reserve PKTSIZE for each fid and 2K for the Rids */
+ ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma);
+ if (!ai->shared) {
+ printk(KERN_ERR "airo: Couldn't alloc_consistent %d\n",
+ PCI_SHARED_LEN);
+ goto free_auxmap;
+ }
+
+ /*
+ * Setup descriptor RX, TX, CONFIG
+ */
+ busaddroff = (unsigned char *)ai->shared_dma;
+ pciaddroff = ai->pciaux + AUX_OFFSET;
+ vpackoff = ai->shared;
+
+ /* RX descriptor setup */
+ for(i = 0; i < MPI_MAX_FIDS; i++) {
+ ai->rxfids[i].pending = 0;
+ ai->rxfids[i].card_ram_off = pciaddroff;
+ ai->rxfids[i].virtual_host_addr = vpackoff;
+ ai->rxfids[i].rx_desc.host_addr = (dma_addr_t) busaddroff;
+ ai->rxfids[i].rx_desc.valid = 1;
+ ai->rxfids[i].rx_desc.len = PKTSIZE;
+ ai->rxfids[i].rx_desc.rdy = 0;
+
+ pciaddroff += sizeof(RxFid);
+ busaddroff += PKTSIZE;
+ vpackoff += PKTSIZE;
+ }
+
+ /* TX descriptor setup */
+ for(i = 0; i < MPI_MAX_FIDS; i++) {
+ ai->txfids[i].card_ram_off = pciaddroff;
+ ai->txfids[i].virtual_host_addr = vpackoff;
+ ai->txfids[i].tx_desc.valid = 1;
+ ai->txfids[i].tx_desc.host_addr = (dma_addr_t) busaddroff;
+ memcpy(ai->txfids[i].virtual_host_addr,
+ &wifictlhdr8023, sizeof(wifictlhdr8023));
+
+ pciaddroff += sizeof(TxFid);
+ busaddroff += PKTSIZE;
+ vpackoff += PKTSIZE;
+ }
+ ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
+
+ /* Rid descriptor setup */
+ ai->config_desc.card_ram_off = pciaddroff;
+ ai->config_desc.virtual_host_addr = vpackoff;
+ ai->config_desc.rid_desc.host_addr = (dma_addr_t) busaddroff;
+ ai->ridbus = (dma_addr_t)busaddroff;
+ ai->config_desc.rid_desc.rid = 0;
+ ai->config_desc.rid_desc.len = RIDSIZE;
+ ai->config_desc.rid_desc.valid = 1;
+ pciaddroff += sizeof(Rid);
+ busaddroff += RIDSIZE;
+ vpackoff += RIDSIZE;
+
+ /* Tell card about descriptors */
+ if (mpi_init_descriptors (ai) != SUCCESS)
+ goto free_shared;
+
+ return 0;
+ free_shared:
+ pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
+ free_auxmap:
+ iounmap(ai->pciaux);
+ free_memmap:
+ iounmap(ai->pcimem);
+ free_region2:
+ release_mem_region(aux_start, aux_len);
+ free_region1:
+ release_mem_region(mem_start, mem_len);
+ out:
+ return rc;
+}
+
+static void wifi_setup(struct net_device *dev)
+{
+ dev->hard_header = NULL;
+ dev->rebuild_header = NULL;
+ dev->hard_header_cache = NULL;
+ dev->header_cache_update= NULL;
+
+ dev->hard_header_parse = wll_header_parse;
+ dev->hard_start_xmit = &airo_start_xmit11;
+ dev->get_stats = &airo_get_stats;
+ dev->set_mac_address = &airo_set_mac_address;
+ dev->do_ioctl = &airo_ioctl;
+#ifdef WIRELESS_EXT
+ dev->wireless_handlers = &airo_handler_def;
+#endif /* WIRELESS_EXT */
+ dev->change_mtu = &airo_change_mtu;
+ dev->open = &airo_open;
+ dev->stop = &airo_close;
+
+ dev->type = ARPHRD_IEEE80211;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = 2312;
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 100;
+
+ memset(dev->broadcast,0xFF, ETH_ALEN);
+
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+}
+
+static struct net_device *init_wifidev(struct airo_info *ai,
+ struct net_device *ethdev)
+{
+ int err;
+ struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup);
+ if (!dev)
+ return NULL;
+ dev->priv = ethdev->priv;
+ dev->irq = ethdev->irq;
+ dev->base_addr = ethdev->base_addr;
+#ifdef WIRELESS_EXT
+ dev->wireless_data = ethdev->wireless_data;
+#endif /* WIRELESS_EXT */
+ memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
+ err = register_netdev(dev);
+ if (err<0) {
+ free_netdev(dev);
+ return NULL;
+ }
+ return dev;
+}
+
+int reset_card( struct net_device *dev , int lock) {
+ struct airo_info *ai = dev->priv;
+
+ if (lock && down_interruptible(&ai->sem))
+ return -1;
+ waitbusy (ai);
+ OUT4500(ai,COMMAND,CMD_SOFTRESET);
+ msleep(200);
+ waitbusy (ai);
+ msleep(200);
+ if (lock)
+ up(&ai->sem);
+ return 0;
+}
+
+struct net_device *_init_airo_card( unsigned short irq, int port,
+ int is_pcmcia, struct pci_dev *pci,
+ struct device *dmdev )
+{
+ struct net_device *dev;
+ struct airo_info *ai;
+ int i, rc;
+
+ /* Create the network device object. */
+ dev = alloc_etherdev(sizeof(*ai));
+ if (!dev) {
+ printk(KERN_ERR "airo: Couldn't alloc_etherdev\n");
+ return NULL;
+ }
+ if (dev_alloc_name(dev, dev->name) < 0) {
+ printk(KERN_ERR "airo: Couldn't get name!\n");
+ goto err_out_free;
+ }
+
+ ai = dev->priv;
+ ai->wifidev = NULL;
+ ai->flags = 0;
+ if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
+ printk(KERN_DEBUG "airo: Found an MPI350 card\n");
+ set_bit(FLAG_MPI, &ai->flags);
+ }
+ ai->dev = dev;
+ spin_lock_init(&ai->aux_lock);
+ sema_init(&ai->sem, 1);
+ ai->config.len = 0;
+ ai->pci = pci;
+ init_waitqueue_head (&ai->thr_wait);
+ init_completion (&ai->thr_exited);
+ ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES);
+ if (ai->thr_pid < 0)
+ goto err_out_free;
+#ifdef MICSUPPORT
+ ai->tfm = NULL;
+#endif
+ rc = add_airo_dev( dev );
+ if (rc)
+ goto err_out_thr;
+
+ /* The Airo-specific entries in the device structure. */
+ if (test_bit(FLAG_MPI,&ai->flags)) {
+ skb_queue_head_init (&ai->txq);
+ dev->hard_start_xmit = &mpi_start_xmit;
+ } else
+ dev->hard_start_xmit = &airo_start_xmit;
+ dev->get_stats = &airo_get_stats;
+ dev->set_multicast_list = &airo_set_multicast_list;
+ dev->set_mac_address = &airo_set_mac_address;
+ dev->do_ioctl = &airo_ioctl;
+#ifdef WIRELESS_EXT
+ dev->wireless_handlers = &airo_handler_def;
+ ai->wireless_data.spy_data = &ai->spy_data;
+ dev->wireless_data = &ai->wireless_data;
+#endif /* WIRELESS_EXT */
+ dev->change_mtu = &airo_change_mtu;
+ dev->open = &airo_open;
+ dev->stop = &airo_close;
+ dev->irq = irq;
+ dev->base_addr = port;
+
+ SET_NETDEV_DEV(dev, dmdev);
+
+
+ if (test_bit(FLAG_MPI,&ai->flags))
+ reset_card (dev, 1);
+
+ rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev );
+ if (rc) {
+ printk(KERN_ERR "airo: register interrupt %d failed, rc %d\n", irq, rc );
+ goto err_out_unlink;
+ }
+ if (!is_pcmcia) {
+ if (!request_region( dev->base_addr, 64, dev->name )) {
+ rc = -EBUSY;
+ printk(KERN_ERR "airo: Couldn't request region\n");
+ goto err_out_irq;
+ }
+ }
+
+ if (test_bit(FLAG_MPI,&ai->flags)) {
+ if (mpi_map_card(ai, pci, dev->name)) {
+ printk(KERN_ERR "airo: Could not map memory\n");
+ goto err_out_res;
+ }
+ }
+
+ if (probe) {
+ if ( setup_card( ai, dev->dev_addr, 1 ) != SUCCESS ) {
+ printk( KERN_ERR "airo: MAC could not be enabled\n" );
+ rc = -EIO;
+ goto err_out_map;
+ }
+ } else if (!test_bit(FLAG_MPI,&ai->flags)) {
+ ai->bap_read = fast_bap_read;
+ set_bit(FLAG_FLASHING, &ai->flags);
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ printk(KERN_ERR "airo: Couldn't register_netdev\n");
+ goto err_out_map;
+ }
+ ai->wifidev = init_wifidev(ai, dev);
+
+ set_bit(FLAG_REGISTERED,&ai->flags);
+ printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n",
+ dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] );
+
+ /* Allocate the transmit buffers */
+ if (probe && !test_bit(FLAG_MPI,&ai->flags))
+ for( i = 0; i < MAX_FIDS; i++ )
+ ai->fids[i] = transmit_allocate(ai,2312,i>=MAX_FIDS/2);
+
+ setup_proc_entry( dev, dev->priv ); /* XXX check for failure */
+ netif_start_queue(dev);
+ SET_MODULE_OWNER(dev);
+ return dev;
+
+err_out_map:
+ if (test_bit(FLAG_MPI,&ai->flags) && pci) {
+ pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
+ iounmap(ai->pciaux);
+ iounmap(ai->pcimem);
+ mpi_unmap_card(ai->pci);
+ }
+err_out_res:
+ if (!is_pcmcia)
+ release_region( dev->base_addr, 64 );
+err_out_irq:
+ free_irq(dev->irq, dev);
+err_out_unlink:
+ del_airo_dev(dev);
+err_out_thr:
+ set_bit(JOB_DIE, &ai->flags);
+ kill_proc(ai->thr_pid, SIGTERM, 1);
+ wait_for_completion(&ai->thr_exited);
+err_out_free:
+ free_netdev(dev);
+ return NULL;
+}
+
+struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia,
+ struct device *dmdev)
+{
+ return _init_airo_card ( irq, port, is_pcmcia, NULL, dmdev);
+}
+
+EXPORT_SYMBOL(init_airo_card);
+
+static int waitbusy (struct airo_info *ai) {
+ int delay = 0;
+ while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) {
+ udelay (10);
+ if ((++delay % 20) == 0)
+ OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
+ }
+ return delay < 10000;
+}
+
+int reset_airo_card( struct net_device *dev )
+{
+ int i;
+ struct airo_info *ai = dev->priv;
+
+ if (reset_card (dev, 1))
+ return -1;
+
+ if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) {
+ printk( KERN_ERR "airo: MAC could not be enabled\n" );
+ return -1;
+ }
+ printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n", dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ /* Allocate the transmit buffers if needed */
+ if (!test_bit(FLAG_MPI,&ai->flags))
+ for( i = 0; i < MAX_FIDS; i++ )
+ ai->fids[i] = transmit_allocate (ai,2312,i>=MAX_FIDS/2);
+
+ enable_interrupts( ai );
+ netif_wake_queue(dev);
+ return 0;
+}
+
+EXPORT_SYMBOL(reset_airo_card);
+
+static void airo_send_event(struct net_device *dev) {
+ struct airo_info *ai = dev->priv;
+ union iwreq_data wrqu;
+ StatusRid status_rid;
+
+ clear_bit(JOB_EVENT, &ai->flags);
+ PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0);
+ up(&ai->sem);
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ memcpy(wrqu.ap_addr.sa_data, status_rid.bssid[0], ETH_ALEN);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+ /* Send event to user space */
+ wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+static int airo_thread(void *data) {
+ struct net_device *dev = data;
+ struct airo_info *ai = dev->priv;
+ int locked;
+
+ daemonize("%s", dev->name);
+ allow_signal(SIGTERM);
+
+ while(1) {
+ if (signal_pending(current))
+ flush_signals(current);
+
+ /* make swsusp happy with our thread */
+ try_to_freeze(PF_FREEZE);
+
+ if (test_bit(JOB_DIE, &ai->flags))
+ break;
+
+ if (ai->flags & JOB_MASK) {
+ locked = down_interruptible(&ai->sem);
+ } else {
+ wait_queue_t wait;
+
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&ai->thr_wait, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (ai->flags & JOB_MASK)
+ break;
+ if (ai->expires) {
+ if (time_after_eq(jiffies,ai->expires)){
+ set_bit(JOB_AUTOWEP,&ai->flags);
+ break;
+ }
+ if (!signal_pending(current)) {
+ schedule_timeout(ai->expires - jiffies);
+ continue;
+ }
+ } else if (!signal_pending(current)) {
+ schedule();
+ continue;
+ }
+ break;
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&ai->thr_wait, &wait);
+ locked = 1;
+ }
+
+ if (locked)
+ continue;
+
+ if (test_bit(JOB_DIE, &ai->flags)) {
+ up(&ai->sem);
+ break;
+ }
+
+ if (ai->power || test_bit(FLAG_FLASHING, &ai->flags)) {
+ up(&ai->sem);
+ continue;
+ }
+
+ if (test_bit(JOB_XMIT, &ai->flags))
+ airo_end_xmit(dev);
+ else if (test_bit(JOB_XMIT11, &ai->flags))
+ airo_end_xmit11(dev);
+ else if (test_bit(JOB_STATS, &ai->flags))
+ airo_read_stats(ai);
+ else if (test_bit(JOB_WSTATS, &ai->flags))
+ airo_read_wireless_stats(ai);
+ else if (test_bit(JOB_PROMISC, &ai->flags))
+ airo_set_promisc(ai);
+#ifdef MICSUPPORT
+ else if (test_bit(JOB_MIC, &ai->flags))
+ micinit(ai);
+#endif
+ else if (test_bit(JOB_EVENT, &ai->flags))
+ airo_send_event(dev);
+ else if (test_bit(JOB_AUTOWEP, &ai->flags))
+ timer_func(dev);
+ }
+ complete_and_exit (&ai->thr_exited, 0);
+}
+
+static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) {
+ struct net_device *dev = (struct net_device *)dev_id;
+ u16 status;
+ u16 fid;
+ struct airo_info *apriv = dev->priv;
+ u16 savedInterrupts = 0;
+ int handled = 0;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+
+ for (;;) {
+ status = IN4500( apriv, EVSTAT );
+ if ( !(status & STATUS_INTS) || status == 0xffff ) break;
+
+ handled = 1;
+
+ if ( status & EV_AWAKE ) {
+ OUT4500( apriv, EVACK, EV_AWAKE );
+ OUT4500( apriv, EVACK, EV_AWAKE );
+ }
+
+ if (!savedInterrupts) {
+ savedInterrupts = IN4500( apriv, EVINTEN );
+ OUT4500( apriv, EVINTEN, 0 );
+ }
+
+ if ( status & EV_MIC ) {
+ OUT4500( apriv, EVACK, EV_MIC );
+#ifdef MICSUPPORT
+ if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) {
+ set_bit(JOB_MIC, &apriv->flags);
+ wake_up_interruptible(&apriv->thr_wait);
+ }
+#endif
+ }
+ if ( status & EV_LINK ) {
+ union iwreq_data wrqu;
+ /* The link status has changed, if you want to put a
+ monitor hook in, do it here. (Remember that
+ interrupts are still disabled!)
+ */
+ u16 newStatus = IN4500(apriv, LINKSTAT);
+ OUT4500( apriv, EVACK, EV_LINK);
+ /* Here is what newStatus means: */
+#define NOBEACON 0x8000 /* Loss of sync - missed beacons */
+#define MAXRETRIES 0x8001 /* Loss of sync - max retries */
+#define MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/
+#define FORCELOSS 0x8003 /* Loss of sync - host request */
+#define TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */
+#define DEAUTH 0x8100 /* Deauthentication (low byte is reason code) */
+#define DISASS 0x8200 /* Disassociation (low byte is reason code) */
+#define ASSFAIL 0x8400 /* Association failure (low byte is reason
+ code) */
+#define AUTHFAIL 0x0300 /* Authentication failure (low byte is reason
+ code) */
+#define ASSOCIATED 0x0400 /* Assocatied */
+#define RC_RESERVED 0 /* Reserved return code */
+#define RC_NOREASON 1 /* Unspecified reason */
+#define RC_AUTHINV 2 /* Previous authentication invalid */
+#define RC_DEAUTH 3 /* Deauthenticated because sending station is
+ leaving */
+#define RC_NOACT 4 /* Disassociated due to inactivity */
+#define RC_MAXLOAD 5 /* Disassociated because AP is unable to handle
+ all currently associated stations */
+#define RC_BADCLASS2 6 /* Class 2 frame received from
+ non-Authenticated station */
+#define RC_BADCLASS3 7 /* Class 3 frame received from
+ non-Associated station */
+#define RC_STATLEAVE 8 /* Disassociated because sending station is
+ leaving BSS */
+#define RC_NOAUTH 9 /* Station requesting (Re)Association is not
+ Authenticated with the responding station */
+ if (newStatus != ASSOCIATED) {
+ if (auto_wep && !apriv->expires) {
+ apriv->expires = RUN_AT(3*HZ);
+ wake_up_interruptible(&apriv->thr_wait);
+ }
+ } else {
+ struct task_struct *task = apriv->task;
+ if (auto_wep)
+ apriv->expires = 0;
+ if (task)
+ wake_up_process (task);
+ set_bit(FLAG_UPDATE_UNI, &apriv->flags);
+ set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
+ }
+ /* Question : is ASSOCIATED the only status
+ * that is valid ? We want to catch handover
+ * and reassociations as valid status
+ * Jean II */
+ if(newStatus == ASSOCIATED) {
+ if (apriv->scan_timestamp) {
+ /* Send an empty event to user space.
+ * We don't send the received data on
+ * the event because it would require
+ * us to do complex transcoding, and
+ * we want to minimise the work done in
+ * the irq handler. Use a request to
+ * extract the data - Jean II */
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
+ apriv->scan_timestamp = 0;
+ }
+ if (down_trylock(&apriv->sem) != 0) {
+ set_bit(JOB_EVENT, &apriv->flags);
+ wake_up_interruptible(&apriv->thr_wait);
+ } else
+ airo_send_event(dev);
+ } else {
+ memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+ /* Send event to user space */
+ wireless_send_event(dev, SIOCGIWAP, &wrqu,NULL);
+ }
+ }
+
+ /* Check to see if there is something to receive */
+ if ( status & EV_RX ) {
+ struct sk_buff *skb = NULL;
+ u16 fc, len, hdrlen = 0;
+#pragma pack(1)
+ struct {
+ u16 status, len;
+ u8 rssi[2];
+ u8 rate;
+ u8 freq;
+ u16 tmp[4];
+ } hdr;
+#pragma pack()
+ u16 gap;
+ u16 tmpbuf[4];
+ u16 *buffer;
+
+ if (test_bit(FLAG_MPI,&apriv->flags)) {
+ if (test_bit(FLAG_802_11, &apriv->flags))
+ mpi_receive_802_11(apriv);
+ else
+ mpi_receive_802_3(apriv);
+ OUT4500(apriv, EVACK, EV_RX);
+ goto exitrx;
+ }
+
+ fid = IN4500( apriv, RXFID );
+
+ /* Get the packet length */
+ if (test_bit(FLAG_802_11, &apriv->flags)) {
+ bap_setup (apriv, fid, 4, BAP0);
+ bap_read (apriv, (u16*)&hdr, sizeof(hdr), BAP0);
+ /* Bad CRC. Ignore packet */
+ if (le16_to_cpu(hdr.status) & 2)
+ hdr.len = 0;
+ if (apriv->wifidev == NULL)
+ hdr.len = 0;
+ } else {
+ bap_setup (apriv, fid, 0x36, BAP0);
+ bap_read (apriv, (u16*)&hdr.len, 2, BAP0);
+ }
+ len = le16_to_cpu(hdr.len);
+
+ if (len > 2312) {
+ printk( KERN_ERR "airo: Bad size %d\n", len );
+ goto badrx;
+ }
+ if (len == 0)
+ goto badrx;
+
+ if (test_bit(FLAG_802_11, &apriv->flags)) {
+ bap_read (apriv, (u16*)&fc, sizeof(fc), BAP0);
+ fc = le16_to_cpu(fc);
+ switch (fc & 0xc) {
+ case 4:
+ if ((fc & 0xe0) == 0xc0)
+ hdrlen = 10;
+ else
+ hdrlen = 16;
+ break;
+ case 8:
+ if ((fc&0x300)==0x300){
+ hdrlen = 30;
+ break;
+ }
+ default:
+ hdrlen = 24;
+ }
+ } else
+ hdrlen = ETH_ALEN * 2;
+
+ skb = dev_alloc_skb( len + hdrlen + 2 + 2 );
+ if ( !skb ) {
+ apriv->stats.rx_dropped++;
+ goto badrx;
+ }
+ skb_reserve(skb, 2); /* This way the IP header is aligned */
+ buffer = (u16*)skb_put (skb, len + hdrlen);
+ if (test_bit(FLAG_802_11, &apriv->flags)) {
+ buffer[0] = fc;
+ bap_read (apriv, buffer + 1, hdrlen - 2, BAP0);
+ if (hdrlen == 24)
+ bap_read (apriv, tmpbuf, 6, BAP0);
+
+ bap_read (apriv, &gap, sizeof(gap), BAP0);
+ gap = le16_to_cpu(gap);
+ if (gap) {
+ if (gap <= 8)
+ bap_read (apriv, tmpbuf, gap, BAP0);
+ else
+ printk(KERN_ERR "airo: gaplen too big. Problems will follow...\n");
+ }
+ bap_read (apriv, buffer + hdrlen/2, len, BAP0);
+ } else {
+#ifdef MICSUPPORT
+ MICBuffer micbuf;
+#endif
+ bap_read (apriv, buffer, ETH_ALEN*2, BAP0);
+#ifdef MICSUPPORT
+ if (apriv->micstats.enabled) {
+ bap_read (apriv,(u16*)&micbuf,sizeof(micbuf),BAP0);
+ if (ntohs(micbuf.typelen) > 0x05DC)
+ bap_setup (apriv, fid, 0x44, BAP0);
+ else {
+ if (len <= sizeof(micbuf))
+ goto badmic;
+
+ len -= sizeof(micbuf);
+ skb_trim (skb, len + hdrlen);
+ }
+ }
+#endif
+ bap_read(apriv,buffer+ETH_ALEN,len,BAP0);
+#ifdef MICSUPPORT
+ if (decapsulate(apriv,&micbuf,(etherHead*)buffer,len)) {
+badmic:
+ dev_kfree_skb_irq (skb);
+#else
+ if (0) {
+#endif
+badrx:
+ OUT4500( apriv, EVACK, EV_RX);
+ goto exitrx;
+ }
+ }
+#ifdef WIRELESS_SPY
+ if (apriv->spy_data.spy_number > 0) {
+ char *sa;
+ struct iw_quality wstats;
+ /* Prepare spy data : addr + qual */
+ if (!test_bit(FLAG_802_11, &apriv->flags)) {
+ sa = (char*)buffer + 6;
+ bap_setup (apriv, fid, 8, BAP0);
+ bap_read (apriv, (u16*)hdr.rssi, 2, BAP0);
+ } else
+ sa = (char*)buffer + 10;
+ wstats.qual = hdr.rssi[0];
+ if (apriv->rssi)
+ wstats.level = 0x100 - apriv->rssi[hdr.rssi[1]].rssidBm;
+ else
+ wstats.level = (hdr.rssi[1] + 321) / 2;
+ wstats.updated = 3;
+ /* Update spy records */
+ wireless_spy_update(dev, sa, &wstats);
+ }
+#endif /* WIRELESS_SPY */
+ OUT4500( apriv, EVACK, EV_RX);
+
+ if (test_bit(FLAG_802_11, &apriv->flags)) {
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->dev = apriv->wifidev;
+ skb->protocol = htons(ETH_P_802_2);
+ } else {
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb,dev);
+ }
+ skb->dev->last_rx = jiffies;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx( skb );
+ }
+exitrx:
+
+ /* Check to see if a packet has been transmitted */
+ if ( status & ( EV_TX|EV_TXCPY|EV_TXEXC ) ) {
+ int i;
+ int len = 0;
+ int index = -1;
+
+ if (test_bit(FLAG_MPI,&apriv->flags)) {
+ unsigned long flags;
+
+ if (status & EV_TXEXC)
+ get_tx_error(apriv, -1);
+ spin_lock_irqsave(&apriv->aux_lock, flags);
+ if (skb_queue_len (&apriv->txq)) {
+ spin_unlock_irqrestore(&apriv->aux_lock,flags);
+ mpi_send_packet (dev);
+ } else {
+ clear_bit(FLAG_PENDING_XMIT, &apriv->flags);
+ spin_unlock_irqrestore(&apriv->aux_lock,flags);
+ netif_wake_queue (dev);
+ }
+ OUT4500( apriv, EVACK,
+ status & (EV_TX|EV_TXCPY|EV_TXEXC));
+ goto exittx;
+ }
+
+ fid = IN4500(apriv, TXCOMPLFID);
+
+ for( i = 0; i < MAX_FIDS; i++ ) {
+ if ( ( apriv->fids[i] & 0xffff ) == fid ) {
+ len = apriv->fids[i] >> 16;
+ index = i;
+ }
+ }
+ if (index != -1) {
+ if (status & EV_TXEXC)
+ get_tx_error(apriv, index);
+ OUT4500( apriv, EVACK, status & (EV_TX | EV_TXEXC));
+ /* Set up to be used again */
+ apriv->fids[index] &= 0xffff;
+ if (index < MAX_FIDS / 2) {
+ if (!test_bit(FLAG_PENDING_XMIT, &apriv->flags))
+ netif_wake_queue(dev);
+ } else {
+ if (!test_bit(FLAG_PENDING_XMIT11, &apriv->flags))
+ netif_wake_queue(apriv->wifidev);
+ }
+ } else {
+ OUT4500( apriv, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
+ printk( KERN_ERR "airo: Unallocated FID was used to xmit\n" );
+ }
+ }
+exittx:
+ if ( status & ~STATUS_INTS & ~IGNORE_INTS )
+ printk( KERN_WARNING "airo: Got weird status %x\n",
+ status & ~STATUS_INTS & ~IGNORE_INTS );
+ }
+
+ if (savedInterrupts)
+ OUT4500( apriv, EVINTEN, savedInterrupts );
+
+ /* done.. */
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Routines to talk to the card
+ */
+
+/*
+ * This was originally written for the 4500, hence the name
+ * NOTE: If use with 8bit mode and SMP bad things will happen!
+ * Why would some one do 8 bit IO in an SMP machine?!?
+ */
+static void OUT4500( struct airo_info *ai, u16 reg, u16 val ) {
+ if (test_bit(FLAG_MPI,&ai->flags))
+ reg <<= 1;
+ if ( !do8bitIO )
+ outw( val, ai->dev->base_addr + reg );
+ else {
+ outb( val & 0xff, ai->dev->base_addr + reg );
+ outb( val >> 8, ai->dev->base_addr + reg + 1 );
+ }
+}
+
+static u16 IN4500( struct airo_info *ai, u16 reg ) {
+ unsigned short rc;
+
+ if (test_bit(FLAG_MPI,&ai->flags))
+ reg <<= 1;
+ if ( !do8bitIO )
+ rc = inw( ai->dev->base_addr + reg );
+ else {
+ rc = inb( ai->dev->base_addr + reg );
+ rc += ((int)inb( ai->dev->base_addr + reg + 1 )) << 8;
+ }
+ return rc;
+}
+
+static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) {
+ int rc;
+ Cmd cmd;
+
+ /* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions
+ * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down"
+ * Note : we could try to use !netif_running(dev) in enable_MAC()
+ * instead of this flag, but I don't trust it *within* the
+ * open/close functions, and testing both flags together is
+ * "cheaper" - Jean II */
+ if (ai->flags & FLAG_RADIO_MASK) return SUCCESS;
+
+ if (lock && down_interruptible(&ai->sem))
+ return -ERESTARTSYS;
+
+ if (!test_bit(FLAG_ENABLED, &ai->flags)) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = MAC_ENABLE;
+ rc = issuecommand(ai, &cmd, rsp);
+ if (rc == SUCCESS)
+ set_bit(FLAG_ENABLED, &ai->flags);
+ } else
+ rc = SUCCESS;
+
+ if (lock)
+ up(&ai->sem);
+
+ if (rc)
+ printk(KERN_ERR "%s: Cannot enable MAC, err=%d\n",
+ __FUNCTION__,rc);
+ return rc;
+}
+
+static void disable_MAC( struct airo_info *ai, int lock ) {
+ Cmd cmd;
+ Resp rsp;
+
+ if (lock && down_interruptible(&ai->sem))
+ return;
+
+ if (test_bit(FLAG_ENABLED, &ai->flags)) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = MAC_DISABLE; // disable in case already enabled
+ issuecommand(ai, &cmd, &rsp);
+ clear_bit(FLAG_ENABLED, &ai->flags);
+ }
+ if (lock)
+ up(&ai->sem);
+}
+
+static void enable_interrupts( struct airo_info *ai ) {
+ /* Enable the interrupts */
+ OUT4500( ai, EVINTEN, STATUS_INTS );
+}
+
+static void disable_interrupts( struct airo_info *ai ) {
+ OUT4500( ai, EVINTEN, 0 );
+}
+
+static void mpi_receive_802_3(struct airo_info *ai)
+{
+ RxFid rxd;
+ int len = 0;
+ struct sk_buff *skb;
+ char *buffer;
+#ifdef MICSUPPORT
+ int off = 0;
+ MICBuffer micbuf;
+#endif
+
+ memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
+ /* Make sure we got something */
+ if (rxd.rdy && rxd.valid == 0) {
+ len = rxd.len + 12;
+ if (len < 12 || len > 2048)
+ goto badrx;
+
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ ai->stats.rx_dropped++;
+ goto badrx;
+ }
+ buffer = skb_put(skb,len);
+#ifdef MICSUPPORT
+ memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2);
+ if (ai->micstats.enabled) {
+ memcpy(&micbuf,
+ ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2,
+ sizeof(micbuf));
+ if (ntohs(micbuf.typelen) <= 0x05DC) {
+ if (len <= sizeof(micbuf) + ETH_ALEN * 2)
+ goto badmic;
+
+ off = sizeof(micbuf);
+ skb_trim (skb, len - off);
+ }
+ }
+ memcpy(buffer + ETH_ALEN * 2,
+ ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2 + off,
+ len - ETH_ALEN * 2 - off);
+ if (decapsulate (ai, &micbuf, (etherHead*)buffer, len - off - ETH_ALEN * 2)) {
+badmic:
+ dev_kfree_skb_irq (skb);
+ goto badrx;
+ }
+#else
+ memcpy(buffer, ai->rxfids[0].virtual_host_addr, len);
+#endif
+#ifdef WIRELESS_SPY
+ if (ai->spy_data.spy_number > 0) {
+ char *sa;
+ struct iw_quality wstats;
+ /* Prepare spy data : addr + qual */
+ sa = buffer + ETH_ALEN;
+ wstats.qual = 0; /* XXX Where do I get that info from ??? */
+ wstats.level = 0;
+ wstats.updated = 0;
+ /* Update spy records */
+ wireless_spy_update(ai->dev, sa, &wstats);
+ }
+#endif /* WIRELESS_SPY */
+
+ skb->dev = ai->dev;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->protocol = eth_type_trans(skb, ai->dev);
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ }
+badrx:
+ if (rxd.valid == 0) {
+ rxd.valid = 1;
+ rxd.rdy = 0;
+ rxd.len = PKTSIZE;
+ memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd));
+ }
+}
+
+void mpi_receive_802_11 (struct airo_info *ai)
+{
+ RxFid rxd;
+ struct sk_buff *skb = NULL;
+ u16 fc, len, hdrlen = 0;
+#pragma pack(1)
+ struct {
+ u16 status, len;
+ u8 rssi[2];
+ u8 rate;
+ u8 freq;
+ u16 tmp[4];
+ } hdr;
+#pragma pack()
+ u16 gap;
+ u16 *buffer;
+ char *ptr = ai->rxfids[0].virtual_host_addr+4;
+
+ memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
+ memcpy ((char *)&hdr, ptr, sizeof(hdr));
+ ptr += sizeof(hdr);
+ /* Bad CRC. Ignore packet */
+ if (le16_to_cpu(hdr.status) & 2)
+ hdr.len = 0;
+ if (ai->wifidev == NULL)
+ hdr.len = 0;
+ len = le16_to_cpu(hdr.len);
+ if (len > 2312) {
+ printk( KERN_ERR "airo: Bad size %d\n", len );
+ goto badrx;
+ }
+ if (len == 0)
+ goto badrx;
+
+ memcpy ((char *)&fc, ptr, sizeof(fc));
+ fc = le16_to_cpu(fc);
+ switch (fc & 0xc) {
+ case 4:
+ if ((fc & 0xe0) == 0xc0)
+ hdrlen = 10;
+ else
+ hdrlen = 16;
+ break;
+ case 8:
+ if ((fc&0x300)==0x300){
+ hdrlen = 30;
+ break;
+ }
+ default:
+ hdrlen = 24;
+ }
+
+ skb = dev_alloc_skb( len + hdrlen + 2 );
+ if ( !skb ) {
+ ai->stats.rx_dropped++;
+ goto badrx;
+ }
+ buffer = (u16*)skb_put (skb, len + hdrlen);
+ memcpy ((char *)buffer, ptr, hdrlen);
+ ptr += hdrlen;
+ if (hdrlen == 24)
+ ptr += 6;
+ memcpy ((char *)&gap, ptr, sizeof(gap));
+ ptr += sizeof(gap);
+ gap = le16_to_cpu(gap);
+ if (gap) {
+ if (gap <= 8)
+ ptr += gap;
+ else
+ printk(KERN_ERR
+ "airo: gaplen too big. Problems will follow...\n");
+ }
+ memcpy ((char *)buffer + hdrlen, ptr, len);
+ ptr += len;
+#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
+ if (ai->spy_data.spy_number > 0) {
+ char *sa;
+ struct iw_quality wstats;
+ /* Prepare spy data : addr + qual */
+ sa = (char*)buffer + 10;
+ wstats.qual = hdr.rssi[0];
+ if (ai->rssi)
+ wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm;
+ else
+ wstats.level = (hdr.rssi[1] + 321) / 2;
+ wstats.updated = 3;
+ /* Update spy records */
+ wireless_spy_update(ai->dev, sa, &wstats);
+ }
+#endif /* IW_WIRELESS_SPY */
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->dev = ai->wifidev;
+ skb->protocol = htons(ETH_P_802_2);
+ skb->dev->last_rx = jiffies;
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx( skb );
+badrx:
+ if (rxd.valid == 0) {
+ rxd.valid = 1;
+ rxd.rdy = 0;
+ rxd.len = PKTSIZE;
+ memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd));
+ }
+}
+
+static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
+{
+ Cmd cmd;
+ Resp rsp;
+ int status;
+ int i;
+ SsidRid mySsid;
+ u16 lastindex;
+ WepKeyRid wkr;
+ int rc;
+
+ memset( &mySsid, 0, sizeof( mySsid ) );
+ if (ai->flash) {
+ kfree (ai->flash);
+ ai->flash = NULL;
+ }
+
+ /* The NOP is the first step in getting the card going */
+ cmd.cmd = NOP;
+ cmd.parm0 = cmd.parm1 = cmd.parm2 = 0;
+ if (lock && down_interruptible(&ai->sem))
+ return ERROR;
+ if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) {
+ if (lock)
+ up(&ai->sem);
+ return ERROR;
+ }
+ disable_MAC( ai, 0);
+
+ // Let's figure out if we need to use the AUX port
+ if (!test_bit(FLAG_MPI,&ai->flags)) {
+ cmd.cmd = CMD_ENABLEAUX;
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
+ if (lock)
+ up(&ai->sem);
+ printk(KERN_ERR "airo: Error checking for AUX port\n");
+ return ERROR;
+ }
+ if (!aux_bap || rsp.status & 0xff00) {
+ ai->bap_read = fast_bap_read;
+ printk(KERN_DEBUG "airo: Doing fast bap_reads\n");
+ } else {
+ ai->bap_read = aux_bap_read;
+ printk(KERN_DEBUG "airo: Doing AUX bap_reads\n");
+ }
+ }
+ if (lock)
+ up(&ai->sem);
+ if (ai->config.len == 0) {
+ tdsRssiRid rssi_rid;
+ CapabilityRid cap_rid;
+
+ if (ai->APList) {
+ kfree(ai->APList);
+ ai->APList = NULL;
+ }
+ if (ai->SSID) {
+ kfree(ai->SSID);
+ ai->SSID = NULL;
+ }
+ // general configuration (read/modify/write)
+ status = readConfigRid(ai, lock);
+ if ( status != SUCCESS ) return ERROR;
+
+ status = readCapabilityRid(ai, &cap_rid, lock);
+ if ( status != SUCCESS ) return ERROR;
+
+ status = PC4500_readrid(ai,RID_RSSI,&rssi_rid,sizeof(rssi_rid),lock);
+ if ( status == SUCCESS ) {
+ if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
+ memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512);
+ }
+ else {
+ if (ai->rssi) {
+ kfree(ai->rssi);
+ ai->rssi = NULL;
+ }
+ if (cap_rid.softCap & 8)
+ ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
+ else
+ printk(KERN_WARNING "airo: unknown received signal level scale\n");
+ }
+ ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
+ ai->config.authType = AUTH_OPEN;
+ ai->config.modulation = MOD_CCK;
+
+#ifdef MICSUPPORT
+ if ((cap_rid.len>=sizeof(cap_rid)) && (cap_rid.extSoftCap&1) &&
+ (micsetup(ai) == SUCCESS)) {
+ ai->config.opmode |= MODE_MIC;
+ set_bit(FLAG_MIC_CAPABLE, &ai->flags);
+ }
+#endif
+
+ /* Save off the MAC */
+ for( i = 0; i < ETH_ALEN; i++ ) {
+ mac[i] = ai->config.macAddr[i];
+ }
+
+ /* Check to see if there are any insmod configured
+ rates to add */
+ if ( rates[0] ) {
+ int i = 0;
+ memset(ai->config.rates,0,sizeof(ai->config.rates));
+ for( i = 0; i < 8 && rates[i]; i++ ) {
+ ai->config.rates[i] = rates[i];
+ }
+ }
+ if ( basic_rate > 0 ) {
+ int i;
+ for( i = 0; i < 8; i++ ) {
+ if ( ai->config.rates[i] == basic_rate ||
+ !ai->config.rates ) {
+ ai->config.rates[i] = basic_rate | 0x80;
+ break;
+ }
+ }
+ }
+ set_bit (FLAG_COMMIT, &ai->flags);
+ }
+
+ /* Setup the SSIDs if present */
+ if ( ssids[0] ) {
+ int i;
+ for( i = 0; i < 3 && ssids[i]; i++ ) {
+ mySsid.ssids[i].len = strlen(ssids[i]);
+ if ( mySsid.ssids[i].len > 32 )
+ mySsid.ssids[i].len = 32;
+ memcpy(mySsid.ssids[i].ssid, ssids[i],
+ mySsid.ssids[i].len);
+ }
+ mySsid.len = sizeof(mySsid);
+ }
+
+ status = writeConfigRid(ai, lock);
+ if ( status != SUCCESS ) return ERROR;
+
+ /* Set up the SSID list */
+ if ( ssids[0] ) {
+ status = writeSsidRid(ai, &mySsid, lock);
+ if ( status != SUCCESS ) return ERROR;
+ }
+
+ status = enable_MAC(ai, &rsp, lock);
+ if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) {
+ printk( KERN_ERR "airo: Bad MAC enable reason = %x, rid = %x, offset = %d\n", rsp.rsp0, rsp.rsp1, rsp.rsp2 );
+ return ERROR;
+ }
+
+ /* Grab the initial wep key, we gotta save it for auto_wep */
+ rc = readWepKeyRid(ai, &wkr, 1, lock);
+ if (rc == SUCCESS) do {
+ lastindex = wkr.kindex;
+ if (wkr.kindex == 0xffff) {
+ ai->defindex = wkr.mac[0];
+ }
+ rc = readWepKeyRid(ai, &wkr, 0, lock);
+ } while(lastindex != wkr.kindex);
+
+ if (auto_wep) {
+ ai->expires = RUN_AT(3*HZ);
+ wake_up_interruptible(&ai->thr_wait);
+ }
+
+ return SUCCESS;
+}
+
+static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
+ // Im really paranoid about letting it run forever!
+ int max_tries = 600000;
+
+ if (IN4500(ai, EVSTAT) & EV_CMD)
+ OUT4500(ai, EVACK, EV_CMD);
+
+ OUT4500(ai, PARAM0, pCmd->parm0);
+ OUT4500(ai, PARAM1, pCmd->parm1);
+ OUT4500(ai, PARAM2, pCmd->parm2);
+ OUT4500(ai, COMMAND, pCmd->cmd);
+
+ while (max_tries-- && (IN4500(ai, EVSTAT) & EV_CMD) == 0) {
+ if ((IN4500(ai, COMMAND)) == pCmd->cmd)
+ // PC4500 didn't notice command, try again
+ OUT4500(ai, COMMAND, pCmd->cmd);
+ if (!in_atomic() && (max_tries & 255) == 0)
+ schedule();
+ }
+
+ if ( max_tries == -1 ) {
+ printk( KERN_ERR
+ "airo: Max tries exceeded when issueing command\n" );
+ if (IN4500(ai, COMMAND) & COMMAND_BUSY)
+ OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
+ return ERROR;
+ }
+
+ // command completed
+ pRsp->status = IN4500(ai, STATUS);
+ pRsp->rsp0 = IN4500(ai, RESP0);
+ pRsp->rsp1 = IN4500(ai, RESP1);
+ pRsp->rsp2 = IN4500(ai, RESP2);
+ if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) {
+ printk (KERN_ERR "airo: cmd= %x\n", pCmd->cmd);
+ printk (KERN_ERR "airo: status= %x\n", pRsp->status);
+ printk (KERN_ERR "airo: Rsp0= %x\n", pRsp->rsp0);
+ printk (KERN_ERR "airo: Rsp1= %x\n", pRsp->rsp1);
+ printk (KERN_ERR "airo: Rsp2= %x\n", pRsp->rsp2);
+ }
+
+ // clear stuck command busy if necessary
+ if (IN4500(ai, COMMAND) & COMMAND_BUSY) {
+ OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
+ }
+ // acknowledge processing the status/response
+ OUT4500(ai, EVACK, EV_CMD);
+
+ return SUCCESS;
+}
+
+/* Sets up the bap to start exchange data. whichbap should
+ * be one of the BAP0 or BAP1 defines. Locks should be held before
+ * calling! */
+static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
+{
+ int timeout = 50;
+ int max_tries = 3;
+
+ OUT4500(ai, SELECT0+whichbap, rid);
+ OUT4500(ai, OFFSET0+whichbap, offset);
+ while (1) {
+ int status = IN4500(ai, OFFSET0+whichbap);
+ if (status & BAP_BUSY) {
+ /* This isn't really a timeout, but its kinda
+ close */
+ if (timeout--) {
+ continue;
+ }
+ } else if ( status & BAP_ERR ) {
+ /* invalid rid or offset */
+ printk( KERN_ERR "airo: BAP error %x %d\n",
+ status, whichbap );
+ return ERROR;
+ } else if (status & BAP_DONE) { // success
+ return SUCCESS;
+ }
+ if ( !(max_tries--) ) {
+ printk( KERN_ERR
+ "airo: BAP setup error too many retries\n" );
+ return ERROR;
+ }
+ // -- PC4500 missed it, try again
+ OUT4500(ai, SELECT0+whichbap, rid);
+ OUT4500(ai, OFFSET0+whichbap, offset);
+ timeout = 50;
+ }
+}
+
+/* should only be called by aux_bap_read. This aux function and the
+ following use concepts not documented in the developers guide. I
+ got them from a patch given to my by Aironet */
+static u16 aux_setup(struct airo_info *ai, u16 page,
+ u16 offset, u16 *len)
+{
+ u16 next;
+
+ OUT4500(ai, AUXPAGE, page);
+ OUT4500(ai, AUXOFF, 0);
+ next = IN4500(ai, AUXDATA);
+ *len = IN4500(ai, AUXDATA)&0xff;
+ if (offset != 4) OUT4500(ai, AUXOFF, offset);
+ return next;
+}
+
+/* requires call to bap_setup() first */
+static int aux_bap_read(struct airo_info *ai, u16 *pu16Dst,
+ int bytelen, int whichbap)
+{
+ u16 len;
+ u16 page;
+ u16 offset;
+ u16 next;
+ int words;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ai->aux_lock, flags);
+ page = IN4500(ai, SWS0+whichbap);
+ offset = IN4500(ai, SWS2+whichbap);
+ next = aux_setup(ai, page, offset, &len);
+ words = (bytelen+1)>>1;
+
+ for (i=0; i<words;) {
+ int count;
+ count = (len>>1) < (words-i) ? (len>>1) : (words-i);
+ if ( !do8bitIO )
+ insw( ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i,count );
+ else
+ insb( ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i, count << 1 );
+ i += count;
+ if (i<words) {
+ next = aux_setup(ai, next, 4, &len);
+ }
+ }
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
+ return SUCCESS;
+}
+
+
+/* requires call to bap_setup() first */
+static int fast_bap_read(struct airo_info *ai, u16 *pu16Dst,
+ int bytelen, int whichbap)
+{
+ bytelen = (bytelen + 1) & (~1); // round up to even value
+ if ( !do8bitIO )
+ insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1 );
+ else
+ insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen );
+ return SUCCESS;
+}
+
+/* requires call to bap_setup() first */
+static int bap_write(struct airo_info *ai, const u16 *pu16Src,
+ int bytelen, int whichbap)
+{
+ bytelen = (bytelen + 1) & (~1); // round up to even value
+ if ( !do8bitIO )
+ outsw( ai->dev->base_addr+DATA0+whichbap,
+ pu16Src, bytelen>>1 );
+ else
+ outsb( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen );
+ return SUCCESS;
+}
+
+static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd)
+{
+ Cmd cmd; /* for issuing commands */
+ Resp rsp; /* response from commands */
+ u16 status;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = accmd;
+ cmd.parm0 = rid;
+ status = issuecommand(ai, &cmd, &rsp);
+ if (status != 0) return status;
+ if ( (rsp.status & 0x7F00) != 0) {
+ return (accmd << 8) + (rsp.rsp0 & 0xFF);
+ }
+ return 0;
+}
+
+/* Note, that we are using BAP1 which is also used by transmit, so
+ * we must get a lock. */
+static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, int lock)
+{
+ u16 status;
+ int rc = SUCCESS;
+
+ if (lock) {
+ if (down_interruptible(&ai->sem))
+ return ERROR;
+ }
+ if (test_bit(FLAG_MPI,&ai->flags)) {
+ Cmd cmd;
+ Resp rsp;
+
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ ai->config_desc.rid_desc.valid = 1;
+ ai->config_desc.rid_desc.len = RIDSIZE;
+ ai->config_desc.rid_desc.rid = 0;
+ ai->config_desc.rid_desc.host_addr = ai->ridbus;
+
+ cmd.cmd = CMD_ACCESS;
+ cmd.parm0 = rid;
+
+ memcpy_toio(ai->config_desc.card_ram_off,
+ &ai->config_desc.rid_desc, sizeof(Rid));
+
+ rc = issuecommand(ai, &cmd, &rsp);
+
+ if (rsp.status & 0x7f00)
+ rc = rsp.rsp0;
+ if (!rc)
+ memcpy(pBuf, ai->config_desc.virtual_host_addr, len);
+ goto done;
+ } else {
+ if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS))!=SUCCESS) {
+ rc = status;
+ goto done;
+ }
+ if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
+ rc = ERROR;
+ goto done;
+ }
+ // read the rid length field
+ bap_read(ai, pBuf, 2, BAP1);
+ // length for remaining part of rid
+ len = min(len, (int)le16_to_cpu(*(u16*)pBuf)) - 2;
+
+ if ( len <= 2 ) {
+ printk( KERN_ERR
+ "airo: Rid %x has a length of %d which is too short\n",
+ (int)rid, (int)len );
+ rc = ERROR;
+ goto done;
+ }
+ // read remainder of the rid
+ rc = bap_read(ai, ((u16*)pBuf)+1, len, BAP1);
+ }
+done:
+ if (lock)
+ up(&ai->sem);
+ return rc;
+}
+
+/* Note, that we are using BAP1 which is also used by transmit, so
+ * make sure this isnt called when a transmit is happening */
+static int PC4500_writerid(struct airo_info *ai, u16 rid,
+ const void *pBuf, int len, int lock)
+{
+ u16 status;
+ int rc = SUCCESS;
+
+ *(u16*)pBuf = cpu_to_le16((u16)len);
+
+ if (lock) {
+ if (down_interruptible(&ai->sem))
+ return ERROR;
+ }
+ if (test_bit(FLAG_MPI,&ai->flags)) {
+ Cmd cmd;
+ Resp rsp;
+
+ if (test_bit(FLAG_ENABLED, &ai->flags))
+ printk(KERN_ERR
+ "%s: MAC should be disabled (rid=%04x)\n",
+ __FUNCTION__, rid);
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+
+ ai->config_desc.rid_desc.valid = 1;
+ ai->config_desc.rid_desc.len = *((u16 *)pBuf);
+ ai->config_desc.rid_desc.rid = 0;
+
+ cmd.cmd = CMD_WRITERID;
+ cmd.parm0 = rid;
+
+ memcpy_toio(ai->config_desc.card_ram_off,
+ &ai->config_desc.rid_desc, sizeof(Rid));
+
+ if (len < 4 || len > 2047) {
+ printk(KERN_ERR "%s: len=%d\n",__FUNCTION__,len);
+ rc = -1;
+ } else {
+ memcpy((char *)ai->config_desc.virtual_host_addr,
+ pBuf, len);
+
+ rc = issuecommand(ai, &cmd, &rsp);
+ if ((rc & 0xff00) != 0) {
+ printk(KERN_ERR "%s: Write rid Error %d\n",
+ __FUNCTION__,rc);
+ printk(KERN_ERR "%s: Cmd=%04x\n",
+ __FUNCTION__,cmd.cmd);
+ }
+
+ if ((rsp.status & 0x7f00))
+ rc = rsp.rsp0;
+ }
+ } else {
+ // --- first access so that we can write the rid data
+ if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
+ rc = status;
+ goto done;
+ }
+ // --- now write the rid data
+ if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
+ rc = ERROR;
+ goto done;
+ }
+ bap_write(ai, pBuf, len, BAP1);
+ // ---now commit the rid data
+ rc = PC4500_accessrid(ai, rid, 0x100|CMD_ACCESS);
+ }
+done:
+ if (lock)
+ up(&ai->sem);
+ return rc;
+}
+
+/* Allocates a FID to be used for transmitting packets. We only use
+ one for now. */
+static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw)
+{
+ unsigned int loop = 3000;
+ Cmd cmd;
+ Resp rsp;
+ u16 txFid;
+ u16 txControl;
+
+ cmd.cmd = CMD_ALLOCATETX;
+ cmd.parm0 = lenPayload;
+ if (down_interruptible(&ai->sem))
+ return ERROR;
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
+ txFid = ERROR;
+ goto done;
+ }
+ if ( (rsp.status & 0xFF00) != 0) {
+ txFid = ERROR;
+ goto done;
+ }
+ /* wait for the allocate event/indication
+ * It makes me kind of nervous that this can just sit here and spin,
+ * but in practice it only loops like four times. */
+ while (((IN4500(ai, EVSTAT) & EV_ALLOC) == 0) && --loop);
+ if (!loop) {
+ txFid = ERROR;
+ goto done;
+ }
+
+ // get the allocated fid and acknowledge
+ txFid = IN4500(ai, TXALLOCFID);
+ OUT4500(ai, EVACK, EV_ALLOC);
+
+ /* The CARD is pretty cool since it converts the ethernet packet
+ * into 802.11. Also note that we don't release the FID since we
+ * will be using the same one over and over again. */
+ /* We only have to setup the control once since we are not
+ * releasing the fid. */
+ if (raw)
+ txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_11
+ | TXCTL_ETHERNET | TXCTL_NORELEASE);
+ else
+ txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_3
+ | TXCTL_ETHERNET | TXCTL_NORELEASE);
+ if (bap_setup(ai, txFid, 0x0008, BAP1) != SUCCESS)
+ txFid = ERROR;
+ else
+ bap_write(ai, &txControl, sizeof(txControl), BAP1);
+
+done:
+ up(&ai->sem);
+
+ return txFid;
+}
+
+/* In general BAP1 is dedicated to transmiting packets. However,
+ since we need a BAP when accessing RIDs, we also use BAP1 for that.
+ Make sure the BAP1 spinlock is held when this is called. */
+static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
+{
+ u16 payloadLen;
+ Cmd cmd;
+ Resp rsp;
+ int miclen = 0;
+ u16 txFid = len;
+ MICBuffer pMic;
+
+ len >>= 16;
+
+ if (len <= ETH_ALEN * 2) {
+ printk( KERN_WARNING "Short packet %d\n", len );
+ return ERROR;
+ }
+ len -= ETH_ALEN * 2;
+
+#ifdef MICSUPPORT
+ if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
+ (ntohs(((u16 *)pPacket)[6]) != 0x888E)) {
+ if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS)
+ return ERROR;
+ miclen = sizeof(pMic);
+ }
+#endif
+
+ // packet is destination[6], source[6], payload[len-12]
+ // write the payload length and dst/src/payload
+ if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR;
+ /* The hardware addresses aren't counted as part of the payload, so
+ * we have to subtract the 12 bytes for the addresses off */
+ payloadLen = cpu_to_le16(len + miclen);
+ bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
+ bap_write(ai, (const u16*)pPacket, sizeof(etherHead), BAP1);
+ if (miclen)
+ bap_write(ai, (const u16*)&pMic, miclen, BAP1);
+ bap_write(ai, (const u16*)(pPacket + sizeof(etherHead)), len, BAP1);
+ // issue the transmit command
+ memset( &cmd, 0, sizeof( cmd ) );
+ cmd.cmd = CMD_TRANSMIT;
+ cmd.parm0 = txFid;
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
+ if ( (rsp.status & 0xFF00) != 0) return ERROR;
+ return SUCCESS;
+}
+
+static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
+{
+ u16 fc, payloadLen;
+ Cmd cmd;
+ Resp rsp;
+ int hdrlen;
+ struct {
+ u8 addr4[ETH_ALEN];
+ u16 gaplen;
+ u8 gap[6];
+ } gap;
+ u16 txFid = len;
+ len >>= 16;
+ gap.gaplen = 6;
+
+ fc = le16_to_cpu(*(const u16*)pPacket);
+ switch (fc & 0xc) {
+ case 4:
+ if ((fc & 0xe0) == 0xc0)
+ hdrlen = 10;
+ else
+ hdrlen = 16;
+ break;
+ case 8:
+ if ((fc&0x300)==0x300){
+ hdrlen = 30;
+ break;
+ }
+ default:
+ hdrlen = 24;
+ }
+
+ if (len < hdrlen) {
+ printk( KERN_WARNING "Short packet %d\n", len );
+ return ERROR;
+ }
+
+ /* packet is 802.11 header + payload
+ * write the payload length and dst/src/payload */
+ if (bap_setup(ai, txFid, 6, BAP1) != SUCCESS) return ERROR;
+ /* The 802.11 header aren't counted as part of the payload, so
+ * we have to subtract the header bytes off */
+ payloadLen = cpu_to_le16(len-hdrlen);
+ bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
+ if (bap_setup(ai, txFid, 0x0014, BAP1) != SUCCESS) return ERROR;
+ bap_write(ai, (const u16*)pPacket, hdrlen, BAP1);
+ bap_write(ai, hdrlen == 30 ?
+ (const u16*)&gap.gaplen : (const u16*)&gap, 38 - hdrlen, BAP1);
+
+ bap_write(ai, (const u16*)(pPacket + hdrlen), len - hdrlen, BAP1);
+ // issue the transmit command
+ memset( &cmd, 0, sizeof( cmd ) );
+ cmd.cmd = CMD_TRANSMIT;
+ cmd.parm0 = txFid;
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
+ if ( (rsp.status & 0xFF00) != 0) return ERROR;
+ return SUCCESS;
+}
+
+/*
+ * This is the proc_fs routines. It is a bit messier than I would
+ * like! Feel free to clean it up!
+ */
+
+static ssize_t proc_read( struct file *file,
+ char __user *buffer,
+ size_t len,
+ loff_t *offset);
+
+static ssize_t proc_write( struct file *file,
+ const char __user *buffer,
+ size_t len,
+ loff_t *offset );
+static int proc_close( struct inode *inode, struct file *file );
+
+static int proc_stats_open( struct inode *inode, struct file *file );
+static int proc_statsdelta_open( struct inode *inode, struct file *file );
+static int proc_status_open( struct inode *inode, struct file *file );
+static int proc_SSID_open( struct inode *inode, struct file *file );
+static int proc_APList_open( struct inode *inode, struct file *file );
+static int proc_BSSList_open( struct inode *inode, struct file *file );
+static int proc_config_open( struct inode *inode, struct file *file );
+static int proc_wepkey_open( struct inode *inode, struct file *file );
+
+static struct file_operations proc_statsdelta_ops = {
+ .read = proc_read,
+ .open = proc_statsdelta_open,
+ .release = proc_close
+};
+
+static struct file_operations proc_stats_ops = {
+ .read = proc_read,
+ .open = proc_stats_open,
+ .release = proc_close
+};
+
+static struct file_operations proc_status_ops = {
+ .read = proc_read,
+ .open = proc_status_open,
+ .release = proc_close
+};
+
+static struct file_operations proc_SSID_ops = {
+ .read = proc_read,
+ .write = proc_write,
+ .open = proc_SSID_open,
+ .release = proc_close
+};
+
+static struct file_operations proc_BSSList_ops = {
+ .read = proc_read,
+ .write = proc_write,
+ .open = proc_BSSList_open,
+ .release = proc_close
+};
+
+static struct file_operations proc_APList_ops = {
+ .read = proc_read,
+ .write = proc_write,
+ .open = proc_APList_open,
+ .release = proc_close
+};
+
+static struct file_operations proc_config_ops = {
+ .read = proc_read,
+ .write = proc_write,
+ .open = proc_config_open,
+ .release = proc_close
+};
+
+static struct file_operations proc_wepkey_ops = {
+ .read = proc_read,
+ .write = proc_write,
+ .open = proc_wepkey_open,
+ .release = proc_close
+};
+
+static struct proc_dir_entry *airo_entry;
+
+struct proc_data {
+ int release_buffer;
+ int readlen;
+ char *rbuffer;
+ int writelen;
+ int maxwritelen;
+ char *wbuffer;
+ void (*on_close) (struct inode *, struct file *);
+};
+
+#ifndef SETPROC_OPS
+#define SETPROC_OPS(entry, ops) (entry)->proc_fops = &(ops)
+#endif
+
+static int setup_proc_entry( struct net_device *dev,
+ struct airo_info *apriv ) {
+ struct proc_dir_entry *entry;
+ /* First setup the device directory */
+ strcpy(apriv->proc_name,dev->name);
+ apriv->proc_entry = create_proc_entry(apriv->proc_name,
+ S_IFDIR|airo_perm,
+ airo_entry);
+ apriv->proc_entry->uid = proc_uid;
+ apriv->proc_entry->gid = proc_gid;
+ apriv->proc_entry->owner = THIS_MODULE;
+
+ /* Setup the StatsDelta */
+ entry = create_proc_entry("StatsDelta",
+ S_IFREG | (S_IRUGO&proc_perm),
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_statsdelta_ops);
+
+ /* Setup the Stats */
+ entry = create_proc_entry("Stats",
+ S_IFREG | (S_IRUGO&proc_perm),
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_stats_ops);
+
+ /* Setup the Status */
+ entry = create_proc_entry("Status",
+ S_IFREG | (S_IRUGO&proc_perm),
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_status_ops);
+
+ /* Setup the Config */
+ entry = create_proc_entry("Config",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_config_ops);
+
+ /* Setup the SSID */
+ entry = create_proc_entry("SSID",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_SSID_ops);
+
+ /* Setup the APList */
+ entry = create_proc_entry("APList",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_APList_ops);
+
+ /* Setup the BSSList */
+ entry = create_proc_entry("BSSList",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_BSSList_ops);
+
+ /* Setup the WepKey */
+ entry = create_proc_entry("WepKey",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->owner = THIS_MODULE;
+ SETPROC_OPS(entry, proc_wepkey_ops);
+
+ return 0;
+}
+
+static int takedown_proc_entry( struct net_device *dev,
+ struct airo_info *apriv ) {
+ if ( !apriv->proc_entry->namelen ) return 0;
+ remove_proc_entry("Stats",apriv->proc_entry);
+ remove_proc_entry("StatsDelta",apriv->proc_entry);
+ remove_proc_entry("Status",apriv->proc_entry);
+ remove_proc_entry("Config",apriv->proc_entry);
+ remove_proc_entry("SSID",apriv->proc_entry);
+ remove_proc_entry("APList",apriv->proc_entry);
+ remove_proc_entry("BSSList",apriv->proc_entry);
+ remove_proc_entry("WepKey",apriv->proc_entry);
+ remove_proc_entry(apriv->proc_name,airo_entry);
+ return 0;
+}
+
+/*
+ * What we want from the proc_fs is to be able to efficiently read
+ * and write the configuration. To do this, we want to read the
+ * configuration when the file is opened and write it when the file is
+ * closed. So basically we allocate a read buffer at open and fill it
+ * with data, and allocate a write buffer and read it at close.
+ */
+
+/*
+ * The read routine is generic, it relies on the preallocated rbuffer
+ * to supply the data.
+ */
+static ssize_t proc_read( struct file *file,
+ char __user *buffer,
+ size_t len,
+ loff_t *offset )
+{
+ loff_t pos = *offset;
+ struct proc_data *priv = (struct proc_data*)file->private_data;
+
+ if (!priv->rbuffer)
+ return -EINVAL;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= priv->readlen)
+ return 0;
+ if (len > priv->readlen - pos)
+ len = priv->readlen - pos;
+ if (copy_to_user(buffer, priv->rbuffer + pos, len))
+ return -EFAULT;
+ *offset = pos + len;
+ return len;
+}
+
+/*
+ * The write routine is generic, it fills in a preallocated rbuffer
+ * to supply the data.
+ */
+static ssize_t proc_write( struct file *file,
+ const char __user *buffer,
+ size_t len,
+ loff_t *offset )
+{
+ loff_t pos = *offset;
+ struct proc_data *priv = (struct proc_data*)file->private_data;
+
+ if (!priv->wbuffer)
+ return -EINVAL;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= priv->maxwritelen)
+ return 0;
+ if (len > priv->maxwritelen - pos)
+ len = priv->maxwritelen - pos;
+ if (copy_from_user(priv->wbuffer + pos, buffer, len))
+ return -EFAULT;
+ if ( pos + len > priv->writelen )
+ priv->writelen = len + file->f_pos;
+ *offset = pos + len;
+ return len;
+}
+
+static int proc_status_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *apriv = dev->priv;
+ CapabilityRid cap_rid;
+ StatusRid status_rid;
+ int i;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+
+ readStatusRid(apriv, &status_rid, 1);
+ readCapabilityRid(apriv, &cap_rid, 1);
+
+ i = sprintf(data->rbuffer, "Status: %s%s%s%s%s%s%s%s%s\n",
+ status_rid.mode & 1 ? "CFG ": "",
+ status_rid.mode & 2 ? "ACT ": "",
+ status_rid.mode & 0x10 ? "SYN ": "",
+ status_rid.mode & 0x20 ? "LNK ": "",
+ status_rid.mode & 0x40 ? "LEAP ": "",
+ status_rid.mode & 0x80 ? "PRIV ": "",
+ status_rid.mode & 0x100 ? "KEY ": "",
+ status_rid.mode & 0x200 ? "WEP ": "",
+ status_rid.mode & 0x8000 ? "ERR ": "");
+ sprintf( data->rbuffer+i, "Mode: %x\n"
+ "Signal Strength: %d\n"
+ "Signal Quality: %d\n"
+ "SSID: %-.*s\n"
+ "AP: %-.16s\n"
+ "Freq: %d\n"
+ "BitRate: %dmbs\n"
+ "Driver Version: %s\n"
+ "Device: %s\nManufacturer: %s\nFirmware Version: %s\n"
+ "Radio type: %x\nCountry: %x\nHardware Version: %x\n"
+ "Software Version: %x\nSoftware Subversion: %x\n"
+ "Boot block version: %x\n",
+ (int)status_rid.mode,
+ (int)status_rid.normalizedSignalStrength,
+ (int)status_rid.signalQuality,
+ (int)status_rid.SSIDlen,
+ status_rid.SSID,
+ status_rid.apName,
+ (int)status_rid.channel,
+ (int)status_rid.currentXmitRate/2,
+ version,
+ cap_rid.prodName,
+ cap_rid.manName,
+ cap_rid.prodVer,
+ cap_rid.radioType,
+ cap_rid.country,
+ cap_rid.hardVer,
+ (int)cap_rid.softVer,
+ (int)cap_rid.softSubVer,
+ (int)cap_rid.bootBlockVer );
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_stats_rid_open(struct inode*, struct file*, u16);
+static int proc_statsdelta_open( struct inode *inode,
+ struct file *file ) {
+ if (file->f_mode&FMODE_WRITE) {
+ return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR);
+ }
+ return proc_stats_rid_open(inode, file, RID_STATSDELTA);
+}
+
+static int proc_stats_open( struct inode *inode, struct file *file ) {
+ return proc_stats_rid_open(inode, file, RID_STATS);
+}
+
+static int proc_stats_rid_open( struct inode *inode,
+ struct file *file,
+ u16 rid ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *apriv = dev->priv;
+ StatsRid stats;
+ int i, j;
+ u32 *vals = stats.vals;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+
+ readStatsRid(apriv, &stats, rid, 1);
+
+ j = 0;
+ for(i=0; statsLabels[i]!=(char *)-1 &&
+ i*4<stats.len; i++){
+ if (!statsLabels[i]) continue;
+ if (j+strlen(statsLabels[i])+16>4096) {
+ printk(KERN_WARNING
+ "airo: Potentially disasterous buffer overflow averted!\n");
+ break;
+ }
+ j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i], vals[i]);
+ }
+ if (i*4>=stats.len){
+ printk(KERN_WARNING
+ "airo: Got a short rid\n");
+ }
+ data->readlen = j;
+ return 0;
+}
+
+static int get_dec_u16( char *buffer, int *start, int limit ) {
+ u16 value;
+ int valid = 0;
+ for( value = 0; buffer[*start] >= '0' &&
+ buffer[*start] <= '9' &&
+ *start < limit; (*start)++ ) {
+ valid = 1;
+ value *= 10;
+ value += buffer[*start] - '0';
+ }
+ if ( !valid ) return -1;
+ return value;
+}
+
+static int airo_config_commit(struct net_device *dev,
+ struct iw_request_info *info, void *zwrq,
+ char *extra);
+
+static void proc_config_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data = file->private_data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ char *line;
+
+ if ( !data->writelen ) return;
+
+ readConfigRid(ai, 1);
+ set_bit (FLAG_COMMIT, &ai->flags);
+
+ line = data->wbuffer;
+ while( line[0] ) {
+/*** Mode processing */
+ if ( !strncmp( line, "Mode: ", 6 ) ) {
+ line += 6;
+ if ((ai->config.rmode & 0xff) >= RXMODE_RFMON)
+ set_bit (FLAG_RESET, &ai->flags);
+ ai->config.rmode &= 0xfe00;
+ clear_bit (FLAG_802_11, &ai->flags);
+ ai->config.opmode &= 0xFF00;
+ ai->config.scanMode = SCANMODE_ACTIVE;
+ if ( line[0] == 'a' ) {
+ ai->config.opmode |= 0;
+ } else {
+ ai->config.opmode |= 1;
+ if ( line[0] == 'r' ) {
+ ai->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
+ ai->config.scanMode = SCANMODE_PASSIVE;
+ set_bit (FLAG_802_11, &ai->flags);
+ } else if ( line[0] == 'y' ) {
+ ai->config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER;
+ ai->config.scanMode = SCANMODE_PASSIVE;
+ set_bit (FLAG_802_11, &ai->flags);
+ } else if ( line[0] == 'l' )
+ ai->config.rmode |= RXMODE_LANMON;
+ }
+ set_bit (FLAG_COMMIT, &ai->flags);
+ }
+
+/*** Radio status */
+ else if (!strncmp(line,"Radio: ", 7)) {
+ line += 7;
+ if (!strncmp(line,"off",3)) {
+ set_bit (FLAG_RADIO_OFF, &ai->flags);
+ } else {
+ clear_bit (FLAG_RADIO_OFF, &ai->flags);
+ }
+ }
+/*** NodeName processing */
+ else if ( !strncmp( line, "NodeName: ", 10 ) ) {
+ int j;
+
+ line += 10;
+ memset( ai->config.nodeName, 0, 16 );
+/* Do the name, assume a space between the mode and node name */
+ for( j = 0; j < 16 && line[j] != '\n'; j++ ) {
+ ai->config.nodeName[j] = line[j];
+ }
+ set_bit (FLAG_COMMIT, &ai->flags);
+ }
+
+/*** PowerMode processing */
+ else if ( !strncmp( line, "PowerMode: ", 11 ) ) {
+ line += 11;
+ if ( !strncmp( line, "PSPCAM", 6 ) ) {
+ ai->config.powerSaveMode = POWERSAVE_PSPCAM;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "PSP", 3 ) ) {
+ ai->config.powerSaveMode = POWERSAVE_PSP;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else {
+ ai->config.powerSaveMode = POWERSAVE_CAM;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ }
+ } else if ( !strncmp( line, "DataRates: ", 11 ) ) {
+ int v, i = 0, k = 0; /* i is index into line,
+ k is index to rates */
+
+ line += 11;
+ while((v = get_dec_u16(line, &i, 3))!=-1) {
+ ai->config.rates[k++] = (u8)v;
+ line += i + 1;
+ i = 0;
+ }
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "Channel: ", 9 ) ) {
+ int v, i = 0;
+ line += 9;
+ v = get_dec_u16(line, &i, i+3);
+ if ( v != -1 ) {
+ ai->config.channelSet = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ }
+ } else if ( !strncmp( line, "XmitPower: ", 11 ) ) {
+ int v, i = 0;
+ line += 11;
+ v = get_dec_u16(line, &i, i+3);
+ if ( v != -1 ) {
+ ai->config.txPower = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ }
+ } else if ( !strncmp( line, "WEP: ", 5 ) ) {
+ line += 5;
+ switch( line[0] ) {
+ case 's':
+ ai->config.authType = (u16)AUTH_SHAREDKEY;
+ break;
+ case 'e':
+ ai->config.authType = (u16)AUTH_ENCRYPT;
+ break;
+ default:
+ ai->config.authType = (u16)AUTH_OPEN;
+ break;
+ }
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "LongRetryLimit: ", 16 ) ) {
+ int v, i = 0;
+
+ line += 16;
+ v = get_dec_u16(line, &i, 3);
+ v = (v<0) ? 0 : ((v>255) ? 255 : v);
+ ai->config.longRetryLimit = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "ShortRetryLimit: ", 17 ) ) {
+ int v, i = 0;
+
+ line += 17;
+ v = get_dec_u16(line, &i, 3);
+ v = (v<0) ? 0 : ((v>255) ? 255 : v);
+ ai->config.shortRetryLimit = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "RTSThreshold: ", 14 ) ) {
+ int v, i = 0;
+
+ line += 14;
+ v = get_dec_u16(line, &i, 4);
+ v = (v<0) ? 0 : ((v>2312) ? 2312 : v);
+ ai->config.rtsThres = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) {
+ int v, i = 0;
+
+ line += 16;
+ v = get_dec_u16(line, &i, 5);
+ v = (v<0) ? 0 : v;
+ ai->config.txLifetime = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "RXMSDULifetime: ", 16 ) ) {
+ int v, i = 0;
+
+ line += 16;
+ v = get_dec_u16(line, &i, 5);
+ v = (v<0) ? 0 : v;
+ ai->config.rxLifetime = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "TXDiversity: ", 13 ) ) {
+ ai->config.txDiversity =
+ (line[13]=='l') ? 1 :
+ ((line[13]=='r')? 2: 3);
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "RXDiversity: ", 13 ) ) {
+ ai->config.rxDiversity =
+ (line[13]=='l') ? 1 :
+ ((line[13]=='r')? 2: 3);
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if ( !strncmp( line, "FragThreshold: ", 15 ) ) {
+ int v, i = 0;
+
+ line += 15;
+ v = get_dec_u16(line, &i, 4);
+ v = (v<256) ? 256 : ((v>2312) ? 2312 : v);
+ v = v & 0xfffe; /* Make sure its even */
+ ai->config.fragThresh = (u16)v;
+ set_bit (FLAG_COMMIT, &ai->flags);
+ } else if (!strncmp(line, "Modulation: ", 12)) {
+ line += 12;
+ switch(*line) {
+ case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
+ default:
+ printk( KERN_WARNING "airo: Unknown modulation\n" );
+ }
+ } else if (!strncmp(line, "Preamble: ", 10)) {
+ line += 10;
+ switch(*line) {
+ case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
+ default: printk(KERN_WARNING "airo: Unknown preamble\n");
+ }
+ } else {
+ printk( KERN_WARNING "Couldn't figure out %s\n", line );
+ }
+ while( line[0] && line[0] != '\n' ) line++;
+ if ( line[0] ) line++;
+ }
+ airo_config_commit(dev, NULL, NULL, NULL);
+}
+
+static char *get_rmode(u16 mode) {
+ switch(mode&0xff) {
+ case RXMODE_RFMON: return "rfmon";
+ case RXMODE_RFMON_ANYBSS: return "yna (any) bss rfmon";
+ case RXMODE_LANMON: return "lanmon";
+ }
+ return "ESS";
+}
+
+static int proc_config_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ int i;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ if ((data->wbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, 2048 );
+ data->maxwritelen = 2048;
+ data->on_close = proc_config_on_close;
+
+ readConfigRid(ai, 1);
+
+ i = sprintf( data->rbuffer,
+ "Mode: %s\n"
+ "Radio: %s\n"
+ "NodeName: %-16s\n"
+ "PowerMode: %s\n"
+ "DataRates: %d %d %d %d %d %d %d %d\n"
+ "Channel: %d\n"
+ "XmitPower: %d\n",
+ (ai->config.opmode & 0xFF) == 0 ? "adhoc" :
+ (ai->config.opmode & 0xFF) == 1 ? get_rmode(ai->config.rmode):
+ (ai->config.opmode & 0xFF) == 2 ? "AP" :
+ (ai->config.opmode & 0xFF) == 3 ? "AP RPTR" : "Error",
+ test_bit(FLAG_RADIO_OFF, &ai->flags) ? "off" : "on",
+ ai->config.nodeName,
+ ai->config.powerSaveMode == 0 ? "CAM" :
+ ai->config.powerSaveMode == 1 ? "PSP" :
+ ai->config.powerSaveMode == 2 ? "PSPCAM" : "Error",
+ (int)ai->config.rates[0],
+ (int)ai->config.rates[1],
+ (int)ai->config.rates[2],
+ (int)ai->config.rates[3],
+ (int)ai->config.rates[4],
+ (int)ai->config.rates[5],
+ (int)ai->config.rates[6],
+ (int)ai->config.rates[7],
+ (int)ai->config.channelSet,
+ (int)ai->config.txPower
+ );
+ sprintf( data->rbuffer + i,
+ "LongRetryLimit: %d\n"
+ "ShortRetryLimit: %d\n"
+ "RTSThreshold: %d\n"
+ "TXMSDULifetime: %d\n"
+ "RXMSDULifetime: %d\n"
+ "TXDiversity: %s\n"
+ "RXDiversity: %s\n"
+ "FragThreshold: %d\n"
+ "WEP: %s\n"
+ "Modulation: %s\n"
+ "Preamble: %s\n",
+ (int)ai->config.longRetryLimit,
+ (int)ai->config.shortRetryLimit,
+ (int)ai->config.rtsThres,
+ (int)ai->config.txLifetime,
+ (int)ai->config.rxLifetime,
+ ai->config.txDiversity == 1 ? "left" :
+ ai->config.txDiversity == 2 ? "right" : "both",
+ ai->config.rxDiversity == 1 ? "left" :
+ ai->config.rxDiversity == 2 ? "right" : "both",
+ (int)ai->config.fragThresh,
+ ai->config.authType == AUTH_ENCRYPT ? "encrypt" :
+ ai->config.authType == AUTH_SHAREDKEY ? "shared" : "open",
+ ai->config.modulation == 0 ? "default" :
+ ai->config.modulation == MOD_CCK ? "cck" :
+ ai->config.modulation == MOD_MOK ? "mok" : "error",
+ ai->config.preamble == PREAMBLE_AUTO ? "auto" :
+ ai->config.preamble == PREAMBLE_LONG ? "long" :
+ ai->config.preamble == PREAMBLE_SHORT ? "short" : "error"
+ );
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static void proc_SSID_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data = (struct proc_data *)file->private_data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ SsidRid SSID_rid;
+ Resp rsp;
+ int i;
+ int offset = 0;
+
+ if ( !data->writelen ) return;
+
+ memset( &SSID_rid, 0, sizeof( SSID_rid ) );
+
+ for( i = 0; i < 3; i++ ) {
+ int j;
+ for( j = 0; j+offset < data->writelen && j < 32 &&
+ data->wbuffer[offset+j] != '\n'; j++ ) {
+ SSID_rid.ssids[i].ssid[j] = data->wbuffer[offset+j];
+ }
+ if ( j == 0 ) break;
+ SSID_rid.ssids[i].len = j;
+ offset += j;
+ while( data->wbuffer[offset] != '\n' &&
+ offset < data->writelen ) offset++;
+ offset++;
+ }
+ if (i)
+ SSID_rid.len = sizeof(SSID_rid);
+ disable_MAC(ai, 1);
+ writeSsidRid(ai, &SSID_rid, 1);
+ enable_MAC(ai, &rsp, 1);
+}
+
+inline static u8 hexVal(char c) {
+ if (c>='0' && c<='9') return c -= '0';
+ if (c>='a' && c<='f') return c -= 'a'-10;
+ if (c>='A' && c<='F') return c -= 'A'-10;
+ return 0;
+}
+
+static void proc_APList_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data = (struct proc_data *)file->private_data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ APListRid APList_rid;
+ Resp rsp;
+ int i;
+
+ if ( !data->writelen ) return;
+
+ memset( &APList_rid, 0, sizeof(APList_rid) );
+ APList_rid.len = sizeof(APList_rid);
+
+ for( i = 0; i < 4 && data->writelen >= (i+1)*6*3; i++ ) {
+ int j;
+ for( j = 0; j < 6*3 && data->wbuffer[j+i*6*3]; j++ ) {
+ switch(j%3) {
+ case 0:
+ APList_rid.ap[i][j/3]=
+ hexVal(data->wbuffer[j+i*6*3])<<4;
+ break;
+ case 1:
+ APList_rid.ap[i][j/3]|=
+ hexVal(data->wbuffer[j+i*6*3]);
+ break;
+ }
+ }
+ }
+ disable_MAC(ai, 1);
+ writeAPListRid(ai, &APList_rid, 1);
+ enable_MAC(ai, &rsp, 1);
+}
+
+/* This function wraps PC4500_writerid with a MAC disable */
+static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
+ int len, int dummy ) {
+ int rc;
+ Resp rsp;
+
+ disable_MAC(ai, 1);
+ rc = PC4500_writerid(ai, rid, rid_data, len, 1);
+ enable_MAC(ai, &rsp, 1);
+ return rc;
+}
+
+/* Returns the length of the key at the index. If index == 0xffff
+ * the index of the transmit key is returned. If the key doesn't exist,
+ * -1 will be returned.
+ */
+static int get_wep_key(struct airo_info *ai, u16 index) {
+ WepKeyRid wkr;
+ int rc;
+ u16 lastindex;
+
+ rc = readWepKeyRid(ai, &wkr, 1, 1);
+ if (rc == SUCCESS) do {
+ lastindex = wkr.kindex;
+ if (wkr.kindex == index) {
+ if (index == 0xffff) {
+ return wkr.mac[0];
+ }
+ return wkr.klen;
+ }
+ readWepKeyRid(ai, &wkr, 0, 1);
+ } while(lastindex != wkr.kindex);
+ return -1;
+}
+
+static int set_wep_key(struct airo_info *ai, u16 index,
+ const char *key, u16 keylen, int perm, int lock ) {
+ static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+ WepKeyRid wkr;
+ Resp rsp;
+
+ memset(&wkr, 0, sizeof(wkr));
+ if (keylen == 0) {
+// We are selecting which key to use
+ wkr.len = sizeof(wkr);
+ wkr.kindex = 0xffff;
+ wkr.mac[0] = (char)index;
+ if (perm) printk(KERN_INFO "Setting transmit key to %d\n", index);
+ if (perm) ai->defindex = (char)index;
+ } else {
+// We are actually setting the key
+ wkr.len = sizeof(wkr);
+ wkr.kindex = index;
+ wkr.klen = keylen;
+ memcpy( wkr.key, key, keylen );
+ memcpy( wkr.mac, macaddr, ETH_ALEN );
+ printk(KERN_INFO "Setting key %d\n", index);
+ }
+
+ disable_MAC(ai, lock);
+ writeWepKeyRid(ai, &wkr, perm, lock);
+ enable_MAC(ai, &rsp, lock);
+ return 0;
+}
+
+static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ int i;
+ char key[16];
+ u16 index = 0;
+ int j = 0;
+
+ memset(key, 0, sizeof(key));
+
+ data = (struct proc_data *)file->private_data;
+ if ( !data->writelen ) return;
+
+ if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' &&
+ (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
+ index = data->wbuffer[0] - '0';
+ if (data->wbuffer[1] == '\n') {
+ set_wep_key(ai, index, NULL, 0, 1, 1);
+ return;
+ }
+ j = 2;
+ } else {
+ printk(KERN_ERR "airo: WepKey passed invalid key index\n");
+ return;
+ }
+
+ for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
+ switch(i%3) {
+ case 0:
+ key[i/3] = hexVal(data->wbuffer[i+j])<<4;
+ break;
+ case 1:
+ key[i/3] |= hexVal(data->wbuffer[i+j]);
+ break;
+ }
+ }
+ set_wep_key(ai, index, key, i/3, 1, 1);
+}
+
+static int proc_wepkey_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ char *ptr;
+ WepKeyRid wkr;
+ u16 lastindex;
+ int j=0;
+ int rc;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ memset(&wkr, 0, sizeof(wkr));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 180, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset(data->rbuffer, 0, 180);
+ data->writelen = 0;
+ data->maxwritelen = 80;
+ if ((data->wbuffer = kmalloc( 80, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, 80 );
+ data->on_close = proc_wepkey_on_close;
+
+ ptr = data->rbuffer;
+ strcpy(ptr, "No wep keys\n");
+ rc = readWepKeyRid(ai, &wkr, 1, 1);
+ if (rc == SUCCESS) do {
+ lastindex = wkr.kindex;
+ if (wkr.kindex == 0xffff) {
+ j += sprintf(ptr+j, "Tx key = %d\n",
+ (int)wkr.mac[0]);
+ } else {
+ j += sprintf(ptr+j, "Key %d set with length = %d\n",
+ (int)wkr.kindex, (int)wkr.klen);
+ }
+ readWepKeyRid(ai, &wkr, 0, 1);
+ } while((lastindex != wkr.kindex) && (j < 180-30));
+
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_SSID_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ int i;
+ char *ptr;
+ SsidRid SSID_rid;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ data->writelen = 0;
+ data->maxwritelen = 33*3;
+ if ((data->wbuffer = kmalloc( 33*3, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, 33*3 );
+ data->on_close = proc_SSID_on_close;
+
+ readSsidRid(ai, &SSID_rid);
+ ptr = data->rbuffer;
+ for( i = 0; i < 3; i++ ) {
+ int j;
+ if ( !SSID_rid.ssids[i].len ) break;
+ for( j = 0; j < 32 &&
+ j < SSID_rid.ssids[i].len &&
+ SSID_rid.ssids[i].ssid[j]; j++ ) {
+ *ptr++ = SSID_rid.ssids[i].ssid[j];
+ }
+ *ptr++ = '\n';
+ }
+ *ptr = '\0';
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_APList_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ int i;
+ char *ptr;
+ APListRid APList_rid;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ data->writelen = 0;
+ data->maxwritelen = 4*6*3;
+ if ((data->wbuffer = kmalloc( data->maxwritelen, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, data->maxwritelen );
+ data->on_close = proc_APList_on_close;
+
+ readAPListRid(ai, &APList_rid);
+ ptr = data->rbuffer;
+ for( i = 0; i < 4; i++ ) {
+// We end when we find a zero MAC
+ if ( !*(int*)APList_rid.ap[i] &&
+ !*(int*)&APList_rid.ap[i][2]) break;
+ ptr += sprintf(ptr, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int)APList_rid.ap[i][0],
+ (int)APList_rid.ap[i][1],
+ (int)APList_rid.ap[i][2],
+ (int)APList_rid.ap[i][3],
+ (int)APList_rid.ap[i][4],
+ (int)APList_rid.ap[i][5]);
+ }
+ if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
+
+ *ptr = '\0';
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_BSSList_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = PDE(inode);
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = dev->priv;
+ char *ptr;
+ BSSListRid BSSList_rid;
+ int rc;
+ /* If doLoseSync is not 1, we won't do a Lose Sync */
+ int doLoseSync = -1;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ data->writelen = 0;
+ data->maxwritelen = 0;
+ data->wbuffer = NULL;
+ data->on_close = NULL;
+
+ if (file->f_mode & FMODE_WRITE) {
+ if (!(file->f_mode & FMODE_READ)) {
+ Cmd cmd;
+ Resp rsp;
+
+ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_LISTBSS;
+ if (down_interruptible(&ai->sem))
+ return -ERESTARTSYS;
+ issuecommand(ai, &cmd, &rsp);
+ up(&ai->sem);
+ data->readlen = 0;
+ return 0;
+ }
+ doLoseSync = 1;
+ }
+ ptr = data->rbuffer;
+ /* There is a race condition here if there are concurrent opens.
+ Since it is a rare condition, we'll just live with it, otherwise
+ we have to add a spin lock... */
+ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
+ while(rc == 0 && BSSList_rid.index != 0xffff) {
+ ptr += sprintf(ptr, "%02x:%02x:%02x:%02x:%02x:%02x %*s rssi = %d",
+ (int)BSSList_rid.bssid[0],
+ (int)BSSList_rid.bssid[1],
+ (int)BSSList_rid.bssid[2],
+ (int)BSSList_rid.bssid[3],
+ (int)BSSList_rid.bssid[4],
+ (int)BSSList_rid.bssid[5],
+ (int)BSSList_rid.ssidLen,
+ BSSList_rid.ssid,
+ (int)BSSList_rid.rssi);
+ ptr += sprintf(ptr, " channel = %d %s %s %s %s\n",
+ (int)BSSList_rid.dsChannel,
+ BSSList_rid.cap & CAP_ESS ? "ESS" : "",
+ BSSList_rid.cap & CAP_IBSS ? "adhoc" : "",
+ BSSList_rid.cap & CAP_PRIVACY ? "wep" : "",
+ BSSList_rid.cap & CAP_SHORTHDR ? "shorthdr" : "");
+ rc = readBSSListRid(ai, 0, &BSSList_rid);
+ }
+ *ptr = '\0';
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_close( struct inode *inode, struct file *file )
+{
+ struct proc_data *data = (struct proc_data *)file->private_data;
+ if ( data->on_close != NULL ) data->on_close( inode, file );
+ if ( data->rbuffer ) kfree( data->rbuffer );
+ if ( data->wbuffer ) kfree( data->wbuffer );
+ kfree( data );
+ return 0;
+}
+
+static struct net_device_list {
+ struct net_device *dev;
+ struct net_device_list *next;
+} *airo_devices;
+
+/* Since the card doesn't automatically switch to the right WEP mode,
+ we will make it do it. If the card isn't associated, every secs we
+ will switch WEP modes to see if that will help. If the card is
+ associated we will check every minute to see if anything has
+ changed. */
+static void timer_func( struct net_device *dev ) {
+ struct airo_info *apriv = dev->priv;
+ Resp rsp;
+
+/* We don't have a link so try changing the authtype */
+ readConfigRid(apriv, 0);
+ disable_MAC(apriv, 0);
+ switch(apriv->config.authType) {
+ case AUTH_ENCRYPT:
+/* So drop to OPEN */
+ apriv->config.authType = AUTH_OPEN;
+ break;
+ case AUTH_SHAREDKEY:
+ if (apriv->keyindex < auto_wep) {
+ set_wep_key(apriv, apriv->keyindex, NULL, 0, 0, 0);
+ apriv->config.authType = AUTH_SHAREDKEY;
+ apriv->keyindex++;
+ } else {
+ /* Drop to ENCRYPT */
+ apriv->keyindex = 0;
+ set_wep_key(apriv, apriv->defindex, NULL, 0, 0, 0);
+ apriv->config.authType = AUTH_ENCRYPT;
+ }
+ break;
+ default: /* We'll escalate to SHAREDKEY */
+ apriv->config.authType = AUTH_SHAREDKEY;
+ }
+ set_bit (FLAG_COMMIT, &apriv->flags);
+ writeConfigRid(apriv, 0);
+ enable_MAC(apriv, &rsp, 0);
+ up(&apriv->sem);
+
+/* Schedule check to see if the change worked */
+ clear_bit(JOB_AUTOWEP, &apriv->flags);
+ apriv->expires = RUN_AT(HZ*3);
+}
+
+static int add_airo_dev( struct net_device *dev ) {
+ struct net_device_list *node = kmalloc( sizeof( *node ), GFP_KERNEL );
+ if ( !node )
+ return -ENOMEM;
+
+ node->dev = dev;
+ node->next = airo_devices;
+ airo_devices = node;
+
+ return 0;
+}
+
+static void del_airo_dev( struct net_device *dev ) {
+ struct net_device_list **p = &airo_devices;
+ while( *p && ( (*p)->dev != dev ) )
+ p = &(*p)->next;
+ if ( *p && (*p)->dev == dev )
+ *p = (*p)->next;
+}
+
+#ifdef CONFIG_PCI
+static int __devinit airo_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pent)
+{
+ struct net_device *dev;
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+ pci_set_master(pdev);
+
+ if (pdev->device == 0x5000 || pdev->device == 0xa504)
+ dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev);
+ else
+ dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev);
+ if (!dev)
+ return -ENODEV;
+
+ pci_set_drvdata(pdev, dev);
+ return 0;
+}
+
+static void __devexit airo_pci_remove(struct pci_dev *pdev)
+{
+}
+
+static int airo_pci_suspend(struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct airo_info *ai = dev->priv;
+ Cmd cmd;
+ Resp rsp;
+
+ if ((ai->APList == NULL) &&
+ (ai->APList = kmalloc(sizeof(APListRid), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ if ((ai->SSID == NULL) &&
+ (ai->SSID = kmalloc(sizeof(SsidRid), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ readAPListRid(ai, ai->APList);
+ readSsidRid(ai, ai->SSID);
+ memset(&cmd, 0, sizeof(cmd));
+ /* the lock will be released at the end of the resume callback */
+ if (down_interruptible(&ai->sem))
+ return -EAGAIN;
+ disable_MAC(ai, 0);
+ netif_device_detach(dev);
+ ai->power = state;
+ cmd.cmd=HOSTSLEEP;
+ issuecommand(ai, &cmd, &rsp);
+
+ pci_enable_wake(pdev, state, 1);
+ pci_save_state(pdev);
+ return pci_set_power_state(pdev, state);
+}
+
+static int airo_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct airo_info *ai = dev->priv;
+ Resp rsp;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, ai->power, 0);
+
+ if (ai->power > 1) {
+ reset_card(dev, 0);
+ mpi_init_descriptors(ai);
+ setup_card(ai, dev->dev_addr, 0);
+ clear_bit(FLAG_RADIO_OFF, &ai->flags);
+ clear_bit(FLAG_PENDING_XMIT, &ai->flags);
+ } else {
+ OUT4500(ai, EVACK, EV_AWAKEN);
+ OUT4500(ai, EVACK, EV_AWAKEN);
+ msleep(100);
+ }
+
+ set_bit (FLAG_COMMIT, &ai->flags);
+ disable_MAC(ai, 0);
+ msleep(200);
+ if (ai->SSID) {
+ writeSsidRid(ai, ai->SSID, 0);
+ kfree(ai->SSID);
+ ai->SSID = NULL;
+ }
+ if (ai->APList) {
+ writeAPListRid(ai, ai->APList, 0);
+ kfree(ai->APList);
+ ai->APList = NULL;
+ }
+ writeConfigRid(ai, 0);
+ enable_MAC(ai, &rsp, 0);
+ ai->power = 0;
+ netif_device_attach(dev);
+ netif_wake_queue(dev);
+ enable_interrupts(ai);
+ up(&ai->sem);
+ return 0;
+}
+#endif
+
+static int __init airo_init_module( void )
+{
+ int i, have_isa_dev = 0;
+
+ airo_entry = create_proc_entry("aironet",
+ S_IFDIR | airo_perm,
+ proc_root_driver);
+ airo_entry->uid = proc_uid;
+ airo_entry->gid = proc_gid;
+
+ for( i = 0; i < 4 && io[i] && irq[i]; i++ ) {
+ printk( KERN_INFO
+ "airo: Trying to configure ISA adapter at irq=%d io=0x%x\n",
+ irq[i], io[i] );
+ if (init_airo_card( irq[i], io[i], 0, NULL ))
+ have_isa_dev = 1;
+ }
+
+#ifdef CONFIG_PCI
+ printk( KERN_INFO "airo: Probing for PCI adapters\n" );
+ pci_register_driver(&airo_driver);
+ printk( KERN_INFO "airo: Finished probing for PCI adapters\n" );
+#endif
+
+ /* Always exit with success, as we are a library module
+ * as well as a driver module
+ */
+ return 0;
+}
+
+static void __exit airo_cleanup_module( void )
+{
+ while( airo_devices ) {
+ printk( KERN_INFO "airo: Unregistering %s\n", airo_devices->dev->name );
+ stop_airo_card( airo_devices->dev, 1 );
+ }
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&airo_driver);
+#endif
+ remove_proc_entry("aironet", proc_root_driver);
+}
+
+#ifdef WIRELESS_EXT
+/*
+ * Initial Wireless Extension code for Aironet driver by :
+ * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
+ * Conversion to new driver API by :
+ * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
+ * Javier also did a good amount of work here, adding some new extensions
+ * and fixing my code. Let's just say that without him this code just
+ * would not work at all... - Jean II
+ */
+
+static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid)
+{
+ int quality = 0;
+
+ if ((status_rid->mode & 0x3f) == 0x3f && (cap_rid->hardCap & 8)) {
+ if (memcmp(cap_rid->prodName, "350", 3))
+ if (status_rid->signalQuality > 0x20)
+ quality = 0;
+ else
+ quality = 0x20 - status_rid->signalQuality;
+ else
+ if (status_rid->signalQuality > 0xb0)
+ quality = 0;
+ else if (status_rid->signalQuality < 0x10)
+ quality = 0xa0;
+ else
+ quality = 0xb0 - status_rid->signalQuality;
+ }
+ return quality;
+}
+
+#define airo_get_max_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x20 : 0xa0)
+#define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50);
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get protocol name
+ */
+static int airo_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ char *cwrq,
+ char *extra)
+{
+ strcpy(cwrq, "IEEE 802.11-DS");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set frequency
+ */
+static int airo_set_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ int rc = -EINPROGRESS; /* Call commit handler */
+
+ /* If setting by frequency, convert to a channel */
+ if((fwrq->e == 1) &&
+ (fwrq->m >= (int) 2.412e8) &&
+ (fwrq->m <= (int) 2.487e8)) {
+ int f = fwrq->m / 100000;
+ int c = 0;
+ while((c < 14) && (f != frequency_list[c]))
+ c++;
+ /* Hack to fall through... */
+ fwrq->e = 0;
+ fwrq->m = c + 1;
+ }
+ /* Setting by channel number */
+ if((fwrq->m > 1000) || (fwrq->e > 0))
+ rc = -EOPNOTSUPP;
+ else {
+ int channel = fwrq->m;
+ /* We should do a better check than that,
+ * based on the card capability !!! */
+ if((channel < 1) || (channel > 16)) {
+ printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
+ rc = -EINVAL;
+ } else {
+ readConfigRid(local, 1);
+ /* Yes ! We can set it !!! */
+ local->config.channelSet = (u16)(channel - 1);
+ set_bit (FLAG_COMMIT, &local->flags);
+ }
+ }
+ return rc;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get frequency
+ */
+static int airo_get_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ StatusRid status_rid; /* Card status info */
+
+ readConfigRid(local, 1);
+ if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
+ status_rid.channel = local->config.channelSet;
+ else
+ readStatusRid(local, &status_rid, 1);
+
+#ifdef WEXT_USECHANNELS
+ fwrq->m = ((int)status_rid.channel) + 1;
+ fwrq->e = 0;
+#else
+ {
+ int f = (int)status_rid.channel;
+ fwrq->m = frequency_list[f] * 100000;
+ fwrq->e = 1;
+ }
+#endif
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set ESSID
+ */
+static int airo_set_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ Resp rsp;
+ SsidRid SSID_rid; /* SSIDs */
+
+ /* Reload the list of current SSID */
+ readSsidRid(local, &SSID_rid);
+
+ /* Check if we asked for `any' */
+ if(dwrq->flags == 0) {
+ /* Just send an empty SSID list */
+ memset(&SSID_rid, 0, sizeof(SSID_rid));
+ } else {
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ /* Check the size of the string */
+ if(dwrq->length > IW_ESSID_MAX_SIZE+1) {
+ return -E2BIG ;
+ }
+ /* Check if index is valid */
+ if((index < 0) || (index >= 4)) {
+ return -EINVAL;
+ }
+
+ /* Set the SSID */
+ memset(SSID_rid.ssids[index].ssid, 0,
+ sizeof(SSID_rid.ssids[index].ssid));
+ memcpy(SSID_rid.ssids[index].ssid, extra, dwrq->length);
+ SSID_rid.ssids[index].len = dwrq->length - 1;
+ }
+ SSID_rid.len = sizeof(SSID_rid);
+ /* Write it to the card */
+ disable_MAC(local, 1);
+ writeSsidRid(local, &SSID_rid, 1);
+ enable_MAC(local, &rsp, 1);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get ESSID
+ */
+static int airo_get_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ StatusRid status_rid; /* Card status info */
+
+ readStatusRid(local, &status_rid, 1);
+
+ /* Note : if dwrq->flags != 0, we should
+ * get the relevant SSID from the SSID list... */
+
+ /* Get the current SSID */
+ memcpy(extra, status_rid.SSID, status_rid.SSIDlen);
+ extra[status_rid.SSIDlen] = '\0';
+ /* If none, we may want to get the one that was set */
+
+ /* Push it out ! */
+ dwrq->length = status_rid.SSIDlen + 1;
+ dwrq->flags = 1; /* active */
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set AP address
+ */
+static int airo_set_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ Cmd cmd;
+ Resp rsp;
+ APListRid APList_rid;
+ static const unsigned char bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 };
+
+ if (awrq->sa_family != ARPHRD_ETHER)
+ return -EINVAL;
+ else if (!memcmp(bcast, awrq->sa_data, ETH_ALEN)) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_LOSE_SYNC;
+ if (down_interruptible(&local->sem))
+ return -ERESTARTSYS;
+ issuecommand(local, &cmd, &rsp);
+ up(&local->sem);
+ } else {
+ memset(&APList_rid, 0, sizeof(APList_rid));
+ APList_rid.len = sizeof(APList_rid);
+ memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN);
+ disable_MAC(local, 1);
+ writeAPListRid(local, &APList_rid, 1);
+ enable_MAC(local, &rsp, 1);
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get AP address
+ */
+static int airo_get_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ StatusRid status_rid; /* Card status info */
+
+ readStatusRid(local, &status_rid, 1);
+
+ /* Tentative. This seems to work, wow, I'm lucky !!! */
+ memcpy(awrq->sa_data, status_rid.bssid[0], ETH_ALEN);
+ awrq->sa_family = ARPHRD_ETHER;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Nickname
+ */
+static int airo_set_nick(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ /* Check the size of the string */
+ if(dwrq->length > 16 + 1) {
+ return -E2BIG;
+ }
+ readConfigRid(local, 1);
+ memset(local->config.nodeName, 0, sizeof(local->config.nodeName));
+ memcpy(local->config.nodeName, extra, dwrq->length);
+ set_bit (FLAG_COMMIT, &local->flags);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Nickname
+ */
+static int airo_get_nick(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ strncpy(extra, local->config.nodeName, 16);
+ extra[16] = '\0';
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Bit-Rate
+ */
+static int airo_set_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ CapabilityRid cap_rid; /* Card capability info */
+ u8 brate = 0;
+ int i;
+
+ /* First : get a valid bit rate value */
+ readCapabilityRid(local, &cap_rid, 1);
+
+ /* Which type of value ? */
+ if((vwrq->value < 8) && (vwrq->value >= 0)) {
+ /* Setting by rate index */
+ /* Find value in the magic rate table */
+ brate = cap_rid.supportedRates[vwrq->value];
+ } else {
+ /* Setting by frequency value */
+ u8 normvalue = (u8) (vwrq->value/500000);
+
+ /* Check if rate is valid */
+ for(i = 0 ; i < 8 ; i++) {
+ if(normvalue == cap_rid.supportedRates[i]) {
+ brate = normvalue;
+ break;
+ }
+ }
+ }
+ /* -1 designed the max rate (mostly auto mode) */
+ if(vwrq->value == -1) {
+ /* Get the highest available rate */
+ for(i = 0 ; i < 8 ; i++) {
+ if(cap_rid.supportedRates[i] == 0)
+ break;
+ }
+ if(i != 0)
+ brate = cap_rid.supportedRates[i - 1];
+ }
+ /* Check that it is valid */
+ if(brate == 0) {
+ return -EINVAL;
+ }
+
+ readConfigRid(local, 1);
+ /* Now, check if we want a fixed or auto value */
+ if(vwrq->fixed == 0) {
+ /* Fill all the rates up to this max rate */
+ memset(local->config.rates, 0, 8);
+ for(i = 0 ; i < 8 ; i++) {
+ local->config.rates[i] = cap_rid.supportedRates[i];
+ if(local->config.rates[i] == brate)
+ break;
+ }
+ } else {
+ /* Fixed mode */
+ /* One rate, fixed */
+ memset(local->config.rates, 0, 8);
+ local->config.rates[0] = brate;
+ }
+ set_bit (FLAG_COMMIT, &local->flags);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Bit-Rate
+ */
+static int airo_get_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ StatusRid status_rid; /* Card status info */
+
+ readStatusRid(local, &status_rid, 1);
+
+ vwrq->value = status_rid.currentXmitRate * 500000;
+ /* If more than one rate, set auto */
+ readConfigRid(local, 1);
+ vwrq->fixed = (local->config.rates[1] == 0);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set RTS threshold
+ */
+static int airo_set_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ int rthr = vwrq->value;
+
+ if(vwrq->disabled)
+ rthr = 2312;
+ if((rthr < 0) || (rthr > 2312)) {
+ return -EINVAL;
+ }
+ readConfigRid(local, 1);
+ local->config.rtsThres = rthr;
+ set_bit (FLAG_COMMIT, &local->flags);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get RTS threshold
+ */
+static int airo_get_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ vwrq->value = local->config.rtsThres;
+ vwrq->disabled = (vwrq->value >= 2312);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Fragmentation threshold
+ */
+static int airo_set_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ int fthr = vwrq->value;
+
+ if(vwrq->disabled)
+ fthr = 2312;
+ if((fthr < 256) || (fthr > 2312)) {
+ return -EINVAL;
+ }
+ fthr &= ~0x1; /* Get an even value - is it really needed ??? */
+ readConfigRid(local, 1);
+ local->config.fragThresh = (u16)fthr;
+ set_bit (FLAG_COMMIT, &local->flags);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Fragmentation threshold
+ */
+static int airo_get_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ vwrq->value = local->config.fragThresh;
+ vwrq->disabled = (vwrq->value >= 2312);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Mode of Operation
+ */
+static int airo_set_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ int reset = 0;
+
+ readConfigRid(local, 1);
+ if ((local->config.rmode & 0xff) >= RXMODE_RFMON)
+ reset = 1;
+
+ switch(*uwrq) {
+ case IW_MODE_ADHOC:
+ local->config.opmode &= 0xFF00;
+ local->config.opmode |= MODE_STA_IBSS;
+ local->config.rmode &= 0xfe00;
+ local->config.scanMode = SCANMODE_ACTIVE;
+ clear_bit (FLAG_802_11, &local->flags);
+ break;
+ case IW_MODE_INFRA:
+ local->config.opmode &= 0xFF00;
+ local->config.opmode |= MODE_STA_ESS;
+ local->config.rmode &= 0xfe00;
+ local->config.scanMode = SCANMODE_ACTIVE;
+ clear_bit (FLAG_802_11, &local->flags);
+ break;
+ case IW_MODE_MASTER:
+ local->config.opmode &= 0xFF00;
+ local->config.opmode |= MODE_AP;
+ local->config.rmode &= 0xfe00;
+ local->config.scanMode = SCANMODE_ACTIVE;
+ clear_bit (FLAG_802_11, &local->flags);
+ break;
+ case IW_MODE_REPEAT:
+ local->config.opmode &= 0xFF00;
+ local->config.opmode |= MODE_AP_RPTR;
+ local->config.rmode &= 0xfe00;
+ local->config.scanMode = SCANMODE_ACTIVE;
+ clear_bit (FLAG_802_11, &local->flags);
+ break;
+ case IW_MODE_MONITOR:
+ local->config.opmode &= 0xFF00;
+ local->config.opmode |= MODE_STA_ESS;
+ local->config.rmode &= 0xfe00;
+ local->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
+ local->config.scanMode = SCANMODE_PASSIVE;
+ set_bit (FLAG_802_11, &local->flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (reset)
+ set_bit (FLAG_RESET, &local->flags);
+ set_bit (FLAG_COMMIT, &local->flags);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Mode of Operation
+ */
+static int airo_get_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ /* If not managed, assume it's ad-hoc */
+ switch (local->config.opmode & 0xFF) {
+ case MODE_STA_ESS:
+ *uwrq = IW_MODE_INFRA;
+ break;
+ case MODE_AP:
+ *uwrq = IW_MODE_MASTER;
+ break;
+ case MODE_AP_RPTR:
+ *uwrq = IW_MODE_REPEAT;
+ break;
+ default:
+ *uwrq = IW_MODE_ADHOC;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Encryption Key
+ */
+static int airo_set_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ CapabilityRid cap_rid; /* Card capability info */
+
+ /* Is WEP supported ? */
+ readCapabilityRid(local, &cap_rid, 1);
+ /* Older firmware doesn't support this...
+ if(!(cap_rid.softCap & 2)) {
+ return -EOPNOTSUPP;
+ } */
+ readConfigRid(local, 1);
+
+ /* Basic checking: do we have a key to set ?
+ * Note : with the new API, it's impossible to get a NULL pointer.
+ * Therefore, we need to check a key size == 0 instead.
+ * New version of iwconfig properly set the IW_ENCODE_NOKEY flag
+ * when no key is present (only change flags), but older versions
+ * don't do it. - Jean II */
+ if (dwrq->length > 0) {
+ wep_key_t key;
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ int current_index = get_wep_key(local, 0xffff);
+ /* Check the size of the key */
+ if (dwrq->length > MAX_KEY_SIZE) {
+ return -EINVAL;
+ }
+ /* Check the index (none -> use current) */
+ if ((index < 0) || (index >= ((cap_rid.softCap & 0x80) ? 4:1)))
+ index = current_index;
+ /* Set the length */
+ if (dwrq->length > MIN_KEY_SIZE)
+ key.len = MAX_KEY_SIZE;
+ else
+ if (dwrq->length > 0)
+ key.len = MIN_KEY_SIZE;
+ else
+ /* Disable the key */
+ key.len = 0;
+ /* Check if the key is not marked as invalid */
+ if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ /* Cleanup */
+ memset(key.key, 0, MAX_KEY_SIZE);
+ /* Copy the key in the driver */
+ memcpy(key.key, extra, dwrq->length);
+ /* Send the key to the card */
+ set_wep_key(local, index, key.key, key.len, 1, 1);
+ }
+ /* WE specify that if a valid key is set, encryption
+ * should be enabled (user may turn it off later)
+ * This is also how "iwconfig ethX key on" works */
+ if((index == current_index) && (key.len > 0) &&
+ (local->config.authType == AUTH_OPEN)) {
+ local->config.authType = AUTH_ENCRYPT;
+ set_bit (FLAG_COMMIT, &local->flags);
+ }
+ } else {
+ /* Do we want to just set the transmit key index ? */
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if ((index >= 0) && (index < ((cap_rid.softCap & 0x80)?4:1))) {
+ set_wep_key(local, index, NULL, 0, 1, 1);
+ } else
+ /* Don't complain if only change the mode */
+ if(!dwrq->flags & IW_ENCODE_MODE) {
+ return -EINVAL;
+ }
+ }
+ /* Read the flags */
+ if(dwrq->flags & IW_ENCODE_DISABLED)
+ local->config.authType = AUTH_OPEN; // disable encryption
+ if(dwrq->flags & IW_ENCODE_RESTRICTED)
+ local->config.authType = AUTH_SHAREDKEY; // Only Both
+ if(dwrq->flags & IW_ENCODE_OPEN)
+ local->config.authType = AUTH_ENCRYPT; // Only Wep
+ /* Commit the changes to flags if needed */
+ if(dwrq->flags & IW_ENCODE_MODE)
+ set_bit (FLAG_COMMIT, &local->flags);
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Encryption Key
+ */
+static int airo_get_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ CapabilityRid cap_rid; /* Card capability info */
+
+ /* Is it supported ? */
+ readCapabilityRid(local, &cap_rid, 1);
+ if(!(cap_rid.softCap & 2)) {
+ return -EOPNOTSUPP;
+ }
+ readConfigRid(local, 1);
+ /* Check encryption mode */
+ switch(local->config.authType) {
+ case AUTH_ENCRYPT:
+ dwrq->flags = IW_ENCODE_OPEN;
+ break;
+ case AUTH_SHAREDKEY:
+ dwrq->flags = IW_ENCODE_RESTRICTED;
+ break;
+ default:
+ case AUTH_OPEN:
+ dwrq->flags = IW_ENCODE_DISABLED;
+ break;
+ }
+ /* We can't return the key, so set the proper flag and return zero */
+ dwrq->flags |= IW_ENCODE_NOKEY;
+ memset(extra, 0, 16);
+
+ /* Which key do we want ? -1 -> tx index */
+ if ((index < 0) || (index >= ((cap_rid.softCap & 0x80) ? 4 : 1)))
+ index = get_wep_key(local, 0xffff);
+ dwrq->flags |= index + 1;
+ /* Copy the key to the user buffer */
+ dwrq->length = get_wep_key(local, index);
+ if (dwrq->length > 16) {
+ dwrq->length=0;
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Tx-Power
+ */
+static int airo_set_txpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ CapabilityRid cap_rid; /* Card capability info */
+ int i;
+ int rc = -EINVAL;
+
+ readCapabilityRid(local, &cap_rid, 1);
+
+ if (vwrq->disabled) {
+ set_bit (FLAG_RADIO_OFF, &local->flags);
+ set_bit (FLAG_COMMIT, &local->flags);
+ return -EINPROGRESS; /* Call commit handler */
+ }
+ if (vwrq->flags != IW_TXPOW_MWATT) {
+ return -EINVAL;
+ }
+ clear_bit (FLAG_RADIO_OFF, &local->flags);
+ for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++)
+ if ((vwrq->value==cap_rid.txPowerLevels[i])) {
+ readConfigRid(local, 1);
+ local->config.txPower = vwrq->value;
+ set_bit (FLAG_COMMIT, &local->flags);
+ rc = -EINPROGRESS; /* Call commit handler */
+ break;
+ }
+ return rc;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Tx-Power
+ */
+static int airo_get_txpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ vwrq->value = local->config.txPower;
+ vwrq->fixed = 1; /* No power control */
+ vwrq->disabled = test_bit(FLAG_RADIO_OFF, &local->flags);
+ vwrq->flags = IW_TXPOW_MWATT;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Retry limits
+ */
+static int airo_set_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ int rc = -EINVAL;
+
+ if(vwrq->disabled) {
+ return -EINVAL;
+ }
+ readConfigRid(local, 1);
+ if(vwrq->flags & IW_RETRY_LIMIT) {
+ if(vwrq->flags & IW_RETRY_MAX)
+ local->config.longRetryLimit = vwrq->value;
+ else if (vwrq->flags & IW_RETRY_MIN)
+ local->config.shortRetryLimit = vwrq->value;
+ else {
+ /* No modifier : set both */
+ local->config.longRetryLimit = vwrq->value;
+ local->config.shortRetryLimit = vwrq->value;
+ }
+ set_bit (FLAG_COMMIT, &local->flags);
+ rc = -EINPROGRESS; /* Call commit handler */
+ }
+ if(vwrq->flags & IW_RETRY_LIFETIME) {
+ local->config.txLifetime = vwrq->value / 1024;
+ set_bit (FLAG_COMMIT, &local->flags);
+ rc = -EINPROGRESS; /* Call commit handler */
+ }
+ return rc;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Retry limits
+ */
+static int airo_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ vwrq->disabled = 0; /* Can't be disabled */
+
+ readConfigRid(local, 1);
+ /* Note : by default, display the min retry number */
+ if((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ vwrq->flags = IW_RETRY_LIFETIME;
+ vwrq->value = (int)local->config.txLifetime * 1024;
+ } else if((vwrq->flags & IW_RETRY_MAX)) {
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ vwrq->value = (int)local->config.longRetryLimit;
+ } else {
+ vwrq->flags = IW_RETRY_LIMIT;
+ vwrq->value = (int)local->config.shortRetryLimit;
+ if((int)local->config.shortRetryLimit != (int)local->config.longRetryLimit)
+ vwrq->flags |= IW_RETRY_MIN;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get range info
+ */
+static int airo_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ struct iw_range *range = (struct iw_range *) extra;
+ CapabilityRid cap_rid; /* Card capability info */
+ int i;
+ int k;
+
+ readCapabilityRid(local, &cap_rid, 1);
+
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(*range));
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0x0000;
+ range->num_channels = 14;
+ /* Should be based on cap_rid.country to give only
+ * what the current card support */
+ k = 0;
+ for(i = 0; i < 14; i++) {
+ range->freq[k].i = i + 1; /* List index */
+ range->freq[k].m = frequency_list[i] * 100000;
+ range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+ }
+ range->num_frequency = k;
+
+ /* Hum... Should put the right values there */
+ range->max_qual.qual = airo_get_max_quality(&cap_rid);
+ range->max_qual.level = 0x100 - 120; /* -120 dBm */
+ range->max_qual.noise = 0;
+ range->sensitivity = 65535;
+
+ for(i = 0 ; i < 8 ; i++) {
+ range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
+ if(range->bitrate[i] == 0)
+ break;
+ }
+ range->num_bitrates = i;
+
+ /* Set an indication of the max TCP throughput
+ * in bit/s that we can expect using this interface.
+ * May be use for QoS stuff... Jean II */
+ if(i > 2)
+ range->throughput = 5000 * 1000;
+ else
+ range->throughput = 1500 * 1000;
+
+ range->min_rts = 0;
+ range->max_rts = 2312;
+ range->min_frag = 256;
+ range->max_frag = 2312;
+
+ if(cap_rid.softCap & 2) {
+ // WEP: RC4 40 bits
+ range->encoding_size[0] = 5;
+ // RC4 ~128 bits
+ if (cap_rid.softCap & 0x100) {
+ range->encoding_size[1] = 13;
+ range->num_encoding_sizes = 2;
+ } else
+ range->num_encoding_sizes = 1;
+ range->max_encoding_tokens = (cap_rid.softCap & 0x80) ? 4 : 1;
+ } else {
+ range->num_encoding_sizes = 0;
+ range->max_encoding_tokens = 0;
+ }
+ range->min_pmp = 0;
+ range->max_pmp = 5000000; /* 5 secs */
+ range->min_pmt = 0;
+ range->max_pmt = 65535 * 1024; /* ??? */
+ range->pmp_flags = IW_POWER_PERIOD;
+ range->pmt_flags = IW_POWER_TIMEOUT;
+ range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
+
+ /* Transmit Power - values are in mW */
+ for(i = 0 ; i < 8 ; i++) {
+ range->txpower[i] = cap_rid.txPowerLevels[i];
+ if(range->txpower[i] == 0)
+ break;
+ }
+ range->num_txpower = i;
+ range->txpower_capa = IW_TXPOW_MWATT;
+ range->we_version_source = 12;
+ range->we_version_compiled = WIRELESS_EXT;
+ range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = IW_RETRY_LIFETIME;
+ range->min_retry = 1;
+ range->max_retry = 65535;
+ range->min_r_time = 1024;
+ range->max_r_time = 65535 * 1024;
+ /* Experimental measurements - boundary 11/5.5 Mb/s */
+ /* Note : with or without the (local->rssi), results
+ * are somewhat different. - Jean II */
+ range->avg_qual.qual = airo_get_avg_quality(&cap_rid);
+ if (local->rssi)
+ range->avg_qual.level = 186; /* -70 dBm */
+ else
+ range->avg_qual.level = 176; /* -80 dBm */
+ range->avg_qual.noise = 0;
+
+ /* Event capability (kernel + driver) */
+ range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+ IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
+ IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+ IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
+ range->event_capa[1] = IW_EVENT_CAPA_K_1;
+ range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVTXDROP);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Power Management
+ */
+static int airo_set_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ if (vwrq->disabled) {
+ if ((local->config.rmode & 0xFF) >= RXMODE_RFMON) {
+ return -EINVAL;
+ }
+ local->config.powerSaveMode = POWERSAVE_CAM;
+ local->config.rmode &= 0xFF00;
+ local->config.rmode |= RXMODE_BC_MC_ADDR;
+ set_bit (FLAG_COMMIT, &local->flags);
+ return -EINPROGRESS; /* Call commit handler */
+ }
+ if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ local->config.fastListenDelay = (vwrq->value + 500) / 1024;
+ local->config.powerSaveMode = POWERSAVE_PSPCAM;
+ set_bit (FLAG_COMMIT, &local->flags);
+ } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
+ local->config.fastListenInterval = local->config.listenInterval = (vwrq->value + 500) / 1024;
+ local->config.powerSaveMode = POWERSAVE_PSPCAM;
+ set_bit (FLAG_COMMIT, &local->flags);
+ }
+ switch (vwrq->flags & IW_POWER_MODE) {
+ case IW_POWER_UNICAST_R:
+ if ((local->config.rmode & 0xFF) >= RXMODE_RFMON) {
+ return -EINVAL;
+ }
+ local->config.rmode &= 0xFF00;
+ local->config.rmode |= RXMODE_ADDR;
+ set_bit (FLAG_COMMIT, &local->flags);
+ break;
+ case IW_POWER_ALL_R:
+ if ((local->config.rmode & 0xFF) >= RXMODE_RFMON) {
+ return -EINVAL;
+ }
+ local->config.rmode &= 0xFF00;
+ local->config.rmode |= RXMODE_BC_MC_ADDR;
+ set_bit (FLAG_COMMIT, &local->flags);
+ case IW_POWER_ON:
+ break;
+ default:
+ return -EINVAL;
+ }
+ // Note : we may want to factor local->need_commit here
+ // Note2 : may also want to factor RXMODE_RFMON test
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Power Management
+ */
+static int airo_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ int mode;
+
+ readConfigRid(local, 1);
+ mode = local->config.powerSaveMode;
+ if ((vwrq->disabled = (mode == POWERSAVE_CAM)))
+ return 0;
+ if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ vwrq->value = (int)local->config.fastListenDelay * 1024;
+ vwrq->flags = IW_POWER_TIMEOUT;
+ } else {
+ vwrq->value = (int)local->config.fastListenInterval * 1024;
+ vwrq->flags = IW_POWER_PERIOD;
+ }
+ if ((local->config.rmode & 0xFF) == RXMODE_ADDR)
+ vwrq->flags |= IW_POWER_UNICAST_R;
+ else
+ vwrq->flags |= IW_POWER_ALL_R;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set Sensitivity
+ */
+static int airo_set_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ local->config.rssiThreshold = vwrq->disabled ? RSSI_DEFAULT : vwrq->value;
+ set_bit (FLAG_COMMIT, &local->flags);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get Sensitivity
+ */
+static int airo_get_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+
+ readConfigRid(local, 1);
+ vwrq->value = local->config.rssiThreshold;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get AP List
+ * Note : this is deprecated in favor of IWSCAN
+ */
+static int airo_get_aplist(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *local = dev->priv;
+ struct sockaddr *address = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ BSSListRid BSSList;
+ int i;
+ int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
+
+ for (i = 0; i < IW_MAX_AP; i++) {
+ if (readBSSListRid(local, loseSync, &BSSList))
+ break;
+ loseSync = 0;
+ memcpy(address[i].sa_data, BSSList.bssid, ETH_ALEN);
+ address[i].sa_family = ARPHRD_ETHER;
+ if (local->rssi)
+ qual[i].level = 0x100 - local->rssi[BSSList.rssi].rssidBm;
+ else
+ qual[i].level = (BSSList.rssi + 321) / 2;
+ qual[i].qual = qual[i].noise = 0;
+ qual[i].updated = 2;
+ if (BSSList.index == 0xffff)
+ break;
+ }
+ if (!i) {
+ StatusRid status_rid; /* Card status info */
+ readStatusRid(local, &status_rid, 1);
+ for (i = 0;
+ i < min(IW_MAX_AP, 4) &&
+ (status_rid.bssid[i][0]
+ & status_rid.bssid[i][1]
+ & status_rid.bssid[i][2]
+ & status_rid.bssid[i][3]
+ & status_rid.bssid[i][4]
+ & status_rid.bssid[i][5])!=0xff &&
+ (status_rid.bssid[i][0]
+ | status_rid.bssid[i][1]
+ | status_rid.bssid[i][2]
+ | status_rid.bssid[i][3]
+ | status_rid.bssid[i][4]
+ | status_rid.bssid[i][5]);
+ i++) {
+ memcpy(address[i].sa_data,
+ status_rid.bssid[i], ETH_ALEN);
+ address[i].sa_family = ARPHRD_ETHER;
+ }
+ } else {
+ dwrq->flags = 1; /* Should be define'd */
+ memcpy(extra + sizeof(struct sockaddr)*i,
+ &qual, sizeof(struct iw_quality)*i);
+ }
+ dwrq->length = i;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : Initiate Scan
+ */
+static int airo_set_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct airo_info *ai = dev->priv;
+ Cmd cmd;
+ Resp rsp;
+
+ /* Note : you may have realised that, as this is a SET operation,
+ * this is privileged and therefore a normal user can't
+ * perform scanning.
+ * This is not an error, while the device perform scanning,
+ * traffic doesn't flow, so it's a perfect DoS...
+ * Jean II */
+ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
+
+ /* Initiate a scan command */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_LISTBSS;
+ if (down_interruptible(&ai->sem))
+ return -ERESTARTSYS;
+ issuecommand(ai, &cmd, &rsp);
+ ai->scan_timestamp = jiffies;
+ up(&ai->sem);
+
+ /* At this point, just return to the user. */
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate scan data returned from the card to a card independent
+ * format that the Wireless Tools will understand - Jean II
+ */
+static inline char *airo_translate_scan(struct net_device *dev,
+ char *current_ev,
+ char *end_buf,
+ BSSListRid *list)
+{
+ struct airo_info *ai = dev->priv;
+ struct iw_event iwe; /* Temporary buffer */
+ u16 capabilities;
+ char * current_val; /* For rates */
+ int i;
+
+ /* First entry *MUST* be the AP MAC address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, list->bssid, ETH_ALEN);
+ current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
+
+ /* Other entries will be displayed in the order we give them */
+
+ /* Add the ESSID */
+ iwe.u.data.length = list->ssidLen;
+ if(iwe.u.data.length > 32)
+ iwe.u.data.length = 32;
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, list->ssid);
+
+ /* Add mode */
+ iwe.cmd = SIOCGIWMODE;
+ capabilities = le16_to_cpu(list->cap);
+ if(capabilities & (CAP_ESS | CAP_IBSS)) {
+ if(capabilities & CAP_ESS)
+ iwe.u.mode = IW_MODE_MASTER;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_UINT_LEN);
+ }
+
+ /* Add frequency */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = le16_to_cpu(list->dsChannel);
+ iwe.u.freq.m = frequency_list[iwe.u.freq.m] * 100000;
+ iwe.u.freq.e = 1;
+ current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
+
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ if (ai->rssi)
+ iwe.u.qual.level = 0x100 - ai->rssi[list->rssi].rssidBm;
+ else
+ iwe.u.qual.level = (list->rssi + 321) / 2;
+ iwe.u.qual.noise = 0;
+ iwe.u.qual.qual = 0;
+ current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
+
+ /* Add encryption capability */
+ iwe.cmd = SIOCGIWENCODE;
+ if(capabilities & CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, list->ssid);
+
+ /* Rate : stuffing multiple values in a single event require a bit
+ * more of magic - Jean II */
+ current_val = current_ev + IW_EV_LCP_LEN;
+
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ /* Max 8 values */
+ for(i = 0 ; i < 8 ; i++) {
+ /* NULL terminated */
+ if(list->rates[i] == 0)
+ break;
+ /* Bit rate given in 500 kb/s units (+ 0x80) */
+ iwe.u.bitrate.value = ((list->rates[i] & 0x7f) * 500000);
+ /* Add new value to event */
+ current_val = iwe_stream_add_value(current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
+ }
+ /* Check if we added any event */
+ if((current_val - current_ev) > IW_EV_LCP_LEN)
+ current_ev = current_val;
+
+ /* The other data in the scan result are not really
+ * interesting, so for now drop it - Jean II */
+ return current_ev;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : Read Scan Results
+ */
+static int airo_get_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct airo_info *ai = dev->priv;
+ BSSListRid BSSList;
+ int rc;
+ char *current_ev = extra;
+
+ /* When we are associated again, the scan has surely finished.
+ * Just in case, let's make sure enough time has elapsed since
+ * we started the scan. - Javier */
+ if(ai->scan_timestamp && time_before(jiffies,ai->scan_timestamp+3*HZ)) {
+ /* Important note : we don't want to block the caller
+ * until results are ready for various reasons.
+ * First, managing wait queues is complex and racy
+ * (there may be multiple simultaneous callers).
+ * Second, we grab some rtnetlink lock before comming
+ * here (in dev_ioctl()).
+ * Third, the caller can wait on the Wireless Event
+ * - Jean II */
+ return -EAGAIN;
+ }
+ ai->scan_timestamp = 0;
+
+ /* There's only a race with proc_BSSList_open(), but its
+ * consequences are begnign. So I don't bother fixing it - Javier */
+
+ /* Try to read the first entry of the scan result */
+ rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 1);
+ if((rc) || (BSSList.index == 0xffff)) {
+ /* Client error, no scan results...
+ * The caller need to restart the scan. */
+ return -ENODATA;
+ }
+
+ /* Read and parse all entries */
+ while((!rc) && (BSSList.index != 0xffff)) {
+ /* Translate to WE format this entry */
+ current_ev = airo_translate_scan(dev, current_ev,
+ extra + dwrq->length,
+ &BSSList);
+
+ /* Check if there is space for one more entry */
+ if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
+ /* Ask user space to try again with a bigger buffer */
+ return -E2BIG;
+ }
+
+ /* Read next entry */
+ rc = PC4500_readrid(ai, RID_BSSLISTNEXT,
+ &BSSList, sizeof(BSSList), 1);
+ }
+ /* Length of data */
+ dwrq->length = (current_ev - extra);
+ dwrq->flags = 0; /* todo */
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Commit handler : called after a bunch of SET operations
+ */
+static int airo_config_commit(struct net_device *dev,
+ struct iw_request_info *info, /* NULL */
+ void *zwrq, /* NULL */
+ char *extra) /* NULL */
+{
+ struct airo_info *local = dev->priv;
+ Resp rsp;
+
+ if (!test_bit (FLAG_COMMIT, &local->flags))
+ return 0;
+
+ /* Some of the "SET" function may have modified some of the
+ * parameters. It's now time to commit them in the card */
+ disable_MAC(local, 1);
+ if (test_bit (FLAG_RESET, &local->flags)) {
+ APListRid APList_rid;
+ SsidRid SSID_rid;
+
+ readAPListRid(local, &APList_rid);
+ readSsidRid(local, &SSID_rid);
+ if (test_bit(FLAG_MPI,&local->flags))
+ setup_card(local, dev->dev_addr, 1 );
+ else
+ reset_airo_card(dev);
+ disable_MAC(local, 1);
+ writeSsidRid(local, &SSID_rid, 1);
+ writeAPListRid(local, &APList_rid, 1);
+ }
+ if (down_interruptible(&local->sem))
+ return -ERESTARTSYS;
+ writeConfigRid(local, 0);
+ enable_MAC(local, &rsp, 0);
+ if (test_bit (FLAG_RESET, &local->flags))
+ airo_set_promisc(local);
+ else
+ up(&local->sem);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Structures to export the Wireless Handlers
+ */
+
+static const struct iw_priv_args airo_private_args[] = {
+/*{ cmd, set_args, get_args, name } */
+ { AIROIOCTL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl),
+ IW_PRIV_TYPE_BYTE | 2047, "airoioctl" },
+ { AIROIDIFC, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl),
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "airoidifc" },
+};
+
+static const iw_handler airo_handler[] =
+{
+ (iw_handler) airo_config_commit, /* SIOCSIWCOMMIT */
+ (iw_handler) airo_get_name, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) airo_set_freq, /* SIOCSIWFREQ */
+ (iw_handler) airo_get_freq, /* SIOCGIWFREQ */
+ (iw_handler) airo_set_mode, /* SIOCSIWMODE */
+ (iw_handler) airo_get_mode, /* SIOCGIWMODE */
+ (iw_handler) airo_set_sens, /* SIOCSIWSENS */
+ (iw_handler) airo_get_sens, /* SIOCGIWSENS */
+ (iw_handler) NULL, /* SIOCSIWRANGE */
+ (iw_handler) airo_get_range, /* SIOCGIWRANGE */
+ (iw_handler) NULL, /* SIOCSIWPRIV */
+ (iw_handler) NULL, /* SIOCGIWPRIV */
+ (iw_handler) NULL, /* SIOCSIWSTATS */
+ (iw_handler) NULL, /* SIOCGIWSTATS */
+ iw_handler_set_spy, /* SIOCSIWSPY */
+ iw_handler_get_spy, /* SIOCGIWSPY */
+ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
+ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
+ (iw_handler) airo_set_wap, /* SIOCSIWAP */
+ (iw_handler) airo_get_wap, /* SIOCGIWAP */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) airo_get_aplist, /* SIOCGIWAPLIST */
+ (iw_handler) airo_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) airo_get_scan, /* SIOCGIWSCAN */
+ (iw_handler) airo_set_essid, /* SIOCSIWESSID */
+ (iw_handler) airo_get_essid, /* SIOCGIWESSID */
+ (iw_handler) airo_set_nick, /* SIOCSIWNICKN */
+ (iw_handler) airo_get_nick, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) airo_set_rate, /* SIOCSIWRATE */
+ (iw_handler) airo_get_rate, /* SIOCGIWRATE */
+ (iw_handler) airo_set_rts, /* SIOCSIWRTS */
+ (iw_handler) airo_get_rts, /* SIOCGIWRTS */
+ (iw_handler) airo_set_frag, /* SIOCSIWFRAG */
+ (iw_handler) airo_get_frag, /* SIOCGIWFRAG */
+ (iw_handler) airo_set_txpow, /* SIOCSIWTXPOW */
+ (iw_handler) airo_get_txpow, /* SIOCGIWTXPOW */
+ (iw_handler) airo_set_retry, /* SIOCSIWRETRY */
+ (iw_handler) airo_get_retry, /* SIOCGIWRETRY */
+ (iw_handler) airo_set_encode, /* SIOCSIWENCODE */
+ (iw_handler) airo_get_encode, /* SIOCGIWENCODE */
+ (iw_handler) airo_set_power, /* SIOCSIWPOWER */
+ (iw_handler) airo_get_power, /* SIOCGIWPOWER */
+};
+
+/* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here.
+ * We want to force the use of the ioctl code, because those can't be
+ * won't work the iw_handler code (because they simultaneously read
+ * and write data and iw_handler can't do that).
+ * Note that it's perfectly legal to read/write on a single ioctl command,
+ * you just can't use iwpriv and need to force it via the ioctl handler.
+ * Jean II */
+static const iw_handler airo_private_handler[] =
+{
+ NULL, /* SIOCIWFIRSTPRIV */
+};
+
+static const struct iw_handler_def airo_handler_def =
+{
+ .num_standard = sizeof(airo_handler)/sizeof(iw_handler),
+ .num_private = sizeof(airo_private_handler)/sizeof(iw_handler),
+ .num_private_args = sizeof(airo_private_args)/sizeof(struct iw_priv_args),
+ .standard = airo_handler,
+ .private = airo_private_handler,
+ .private_args = airo_private_args,
+ .get_wireless_stats = airo_get_wireless_stats,
+};
+
+#endif /* WIRELESS_EXT */
+
+/*
+ * This defines the configuration part of the Wireless Extensions
+ * Note : irq and spinlock protection will occur in the subroutines
+ *
+ * TODO :
+ * o Check input value more carefully and fill correct values in range
+ * o Test and shakeout the bugs (if any)
+ *
+ * Jean II
+ *
+ * Javier Achirica did a great job of merging code from the unnamed CISCO
+ * developer that added support for flashing the card.
+ */
+static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ int rc = 0;
+ struct airo_info *ai = (struct airo_info *)dev->priv;
+
+ if (ai->power)
+ return 0;
+
+ switch (cmd) {
+#ifdef CISCO_EXT
+ case AIROIDIFC:
+#ifdef AIROOLDIDIFC
+ case AIROOLDIDIFC:
+#endif
+ {
+ int val = AIROMAGIC;
+ aironet_ioctl com;
+ if (copy_from_user(&com,rq->ifr_data,sizeof(com)))
+ rc = -EFAULT;
+ else if (copy_to_user(com.data,(char *)&val,sizeof(val)))
+ rc = -EFAULT;
+ }
+ break;
+
+ case AIROIOCTL:
+#ifdef AIROOLDIOCTL
+ case AIROOLDIOCTL:
+#endif
+ /* Get the command struct and hand it off for evaluation by
+ * the proper subfunction
+ */
+ {
+ aironet_ioctl com;
+ if (copy_from_user(&com,rq->ifr_data,sizeof(com))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ /* Separate R/W functions bracket legality here
+ */
+ if ( com.command == AIRORSWVERSION ) {
+ if (copy_to_user(com.data, swversion, sizeof(swversion)))
+ rc = -EFAULT;
+ else
+ rc = 0;
+ }
+ else if ( com.command <= AIRORRID)
+ rc = readrids(dev,&com);
+ else if ( com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2) )
+ rc = writerids(dev,&com);
+ else if ( com.command >= AIROFLSHRST && com.command <= AIRORESTART )
+ rc = flashcard(dev,&com);
+ else
+ rc = -EINVAL; /* Bad command in ioctl */
+ }
+ break;
+#endif /* CISCO_EXT */
+
+ // All other calls are currently unsupported
+ default:
+ rc = -EOPNOTSUPP;
+ }
+ return rc;
+}
+
+#ifdef WIRELESS_EXT
+/*
+ * Get the Wireless stats out of the driver
+ * Note : irq and spinlock protection will occur in the subroutines
+ *
+ * TODO :
+ * o Check if work in Ad-Hoc mode (otherwise, use SPY, as in wvlan_cs)
+ *
+ * Jean
+ */
+static void airo_read_wireless_stats(struct airo_info *local)
+{
+ StatusRid status_rid;
+ StatsRid stats_rid;
+ CapabilityRid cap_rid;
+ u32 *vals = stats_rid.vals;
+
+ /* Get stats out of the card */
+ clear_bit(JOB_WSTATS, &local->flags);
+ if (local->power) {
+ up(&local->sem);
+ return;
+ }
+ readCapabilityRid(local, &cap_rid, 0);
+ readStatusRid(local, &status_rid, 0);
+ readStatsRid(local, &stats_rid, RID_STATS, 0);
+ up(&local->sem);
+
+ /* The status */
+ local->wstats.status = status_rid.mode;
+
+ /* Signal quality and co. But where is the noise level ??? */
+ local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid);
+ if (local->rssi)
+ local->wstats.qual.level = 0x100 - local->rssi[status_rid.sigQuality].rssidBm;
+ else
+ local->wstats.qual.level = (status_rid.normalizedSignalStrength + 321) / 2;
+ if (status_rid.len >= 124) {
+ local->wstats.qual.noise = 256 - status_rid.noisedBm;
+ local->wstats.qual.updated = 7;
+ } else {
+ local->wstats.qual.noise = 0;
+ local->wstats.qual.updated = 3;
+ }
+
+ /* Packets discarded in the wireless adapter due to wireless
+ * specific problems */
+ local->wstats.discard.nwid = vals[56] + vals[57] + vals[58];/* SSID Mismatch */
+ local->wstats.discard.code = vals[6];/* RxWepErr */
+ local->wstats.discard.fragment = vals[30];
+ local->wstats.discard.retries = vals[10];
+ local->wstats.discard.misc = vals[1] + vals[32];
+ local->wstats.miss.beacon = vals[34];
+}
+
+struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
+{
+ struct airo_info *local = dev->priv;
+
+ if (!test_bit(JOB_WSTATS, &local->flags)) {
+ /* Get stats out of the card if available */
+ if (down_trylock(&local->sem) != 0) {
+ set_bit(JOB_WSTATS, &local->flags);
+ wake_up_interruptible(&local->thr_wait);
+ } else
+ airo_read_wireless_stats(local);
+ }
+
+ return &local->wstats;
+}
+#endif /* WIRELESS_EXT */
+
+#ifdef CISCO_EXT
+/*
+ * This just translates from driver IOCTL codes to the command codes to
+ * feed to the radio's host interface. Things can be added/deleted
+ * as needed. This represents the READ side of control I/O to
+ * the card
+ */
+static int readrids(struct net_device *dev, aironet_ioctl *comp) {
+ unsigned short ridcode;
+ unsigned char *iobuf;
+ int len;
+ struct airo_info *ai = dev->priv;
+ Resp rsp;
+
+ if (test_bit(FLAG_FLASHING, &ai->flags))
+ return -EIO;
+
+ switch(comp->command)
+ {
+ case AIROGCAP: ridcode = RID_CAPABILITIES; break;
+ case AIROGCFG: ridcode = RID_CONFIG;
+ if (test_bit(FLAG_COMMIT, &ai->flags)) {
+ disable_MAC (ai, 1);
+ writeConfigRid (ai, 1);
+ enable_MAC (ai, &rsp, 1);
+ }
+ break;
+ case AIROGSLIST: ridcode = RID_SSID; break;
+ case AIROGVLIST: ridcode = RID_APLIST; break;
+ case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
+ case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ break;
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM;
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ break;
+ case AIROGSTAT: ridcode = RID_STATUS; break;
+ case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
+ case AIROGSTATSC32: ridcode = RID_STATS; break;
+#ifdef MICSUPPORT
+ case AIROGMICSTATS:
+ if (copy_to_user(comp->data, &ai->micstats,
+ min((int)comp->len,(int)sizeof(ai->micstats))))
+ return -EFAULT;
+ return 0;
+#endif
+ case AIRORRID: ridcode = comp->ridnum; break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
+ /* get the count of bytes in the rid docs say 1st 2 bytes is it.
+ * then return it to the user
+ * 9/22/2000 Honor user given length
+ */
+ len = comp->len;
+
+ if (copy_to_user(comp->data, iobuf, min(len, (int)RIDSIZE))) {
+ kfree (iobuf);
+ return -EFAULT;
+ }
+ kfree (iobuf);
+ return 0;
+}
+
+/*
+ * Danger Will Robinson write the rids here
+ */
+
+static int writerids(struct net_device *dev, aironet_ioctl *comp) {
+ struct airo_info *ai = dev->priv;
+ int ridcode;
+#ifdef MICSUPPORT
+ int enabled;
+#endif
+ Resp rsp;
+ static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
+ unsigned char *iobuf;
+
+ /* Only super-user can write RIDs */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (test_bit(FLAG_FLASHING, &ai->flags))
+ return -EIO;
+
+ ridcode = 0;
+ writer = do_writerid;
+
+ switch(comp->command)
+ {
+ case AIROPSIDS: ridcode = RID_SSID; break;
+ case AIROPCAP: ridcode = RID_CAPABILITIES; break;
+ case AIROPAPLIST: ridcode = RID_APLIST; break;
+ case AIROPCFG: ai->config.len = 0;
+ clear_bit(FLAG_COMMIT, &ai->flags);
+ ridcode = RID_CONFIG; break;
+ case AIROPWEPKEYNV: ridcode = RID_WEP_PERM; break;
+ case AIROPLEAPUSR: ridcode = RID_LEAPUSERNAME; break;
+ case AIROPLEAPPWD: ridcode = RID_LEAPPASSWORD; break;
+ case AIROPWEPKEY: ridcode = RID_WEP_TEMP; writer = PC4500_writerid;
+ break;
+ case AIROPLEAPUSR+1: ridcode = 0xFF2A; break;
+ case AIROPLEAPUSR+2: ridcode = 0xFF2B; break;
+
+ /* this is not really a rid but a command given to the card
+ * same with MAC off
+ */
+ case AIROPMACON:
+ if (enable_MAC(ai, &rsp, 1) != 0)
+ return -EIO;
+ return 0;
+
+ /*
+ * Evidently this code in the airo driver does not get a symbol
+ * as disable_MAC. it's probably so short the compiler does not gen one.
+ */
+ case AIROPMACOFF:
+ disable_MAC(ai, 1);
+ return 0;
+
+ /* This command merely clears the counts does not actually store any data
+ * only reads rid. But as it changes the cards state, I put it in the
+ * writerid routines.
+ */
+ case AIROPSTCLR:
+ if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1);
+
+#ifdef MICSUPPORT
+ enabled = ai->micstats.enabled;
+ memset(&ai->micstats,0,sizeof(ai->micstats));
+ ai->micstats.enabled = enabled;
+#endif
+
+ if (copy_to_user(comp->data, iobuf,
+ min((int)comp->len, (int)RIDSIZE))) {
+ kfree (iobuf);
+ return -EFAULT;
+ }
+ kfree (iobuf);
+ return 0;
+
+ default:
+ return -EOPNOTSUPP; /* Blarg! */
+ }
+ if(comp->len > RIDSIZE)
+ return -EINVAL;
+
+ if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(iobuf,comp->data,comp->len)) {
+ kfree (iobuf);
+ return -EFAULT;
+ }
+
+ if (comp->command == AIROPCFG) {
+ ConfigRid *cfg = (ConfigRid *)iobuf;
+
+ if (test_bit(FLAG_MIC_CAPABLE, &ai->flags))
+ cfg->opmode |= MODE_MIC;
+
+ if ((cfg->opmode & 0xFF) == MODE_STA_IBSS)
+ set_bit (FLAG_ADHOC, &ai->flags);
+ else
+ clear_bit (FLAG_ADHOC, &ai->flags);
+ }
+
+ if((*writer)(ai, ridcode, iobuf,comp->len,1)) {
+ kfree (iobuf);
+ return -EIO;
+ }
+ kfree (iobuf);
+ return 0;
+}
+
+/*****************************************************************************
+ * Ancillary flash / mod functions much black magic lurkes here *
+ *****************************************************************************
+ */
+
+/*
+ * Flash command switch table
+ */
+
+int flashcard(struct net_device *dev, aironet_ioctl *comp) {
+ int z;
+ int cmdreset(struct airo_info *);
+ int setflashmode(struct airo_info *);
+ int flashgchar(struct airo_info *,int,int);
+ int flashpchar(struct airo_info *,int,int);
+ int flashputbuf(struct airo_info *);
+ int flashrestart(struct airo_info *,struct net_device *);
+
+ /* Only super-user can modify flash */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch(comp->command)
+ {
+ case AIROFLSHRST:
+ return cmdreset((struct airo_info *)dev->priv);
+
+ case AIROFLSHSTFL:
+ if (!((struct airo_info *)dev->priv)->flash &&
+ (((struct airo_info *)dev->priv)->flash = kmalloc (FLASHSIZE, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ return setflashmode((struct airo_info *)dev->priv);
+
+ case AIROFLSHGCHR: /* Get char from aux */
+ if(comp->len != sizeof(int))
+ return -EINVAL;
+ if (copy_from_user(&z,comp->data,comp->len))
+ return -EFAULT;
+ return flashgchar((struct airo_info *)dev->priv,z,8000);
+
+ case AIROFLSHPCHR: /* Send char to card. */
+ if(comp->len != sizeof(int))
+ return -EINVAL;
+ if (copy_from_user(&z,comp->data,comp->len))
+ return -EFAULT;
+ return flashpchar((struct airo_info *)dev->priv,z,8000);
+
+ case AIROFLPUTBUF: /* Send 32k to card */
+ if (!((struct airo_info *)dev->priv)->flash)
+ return -ENOMEM;
+ if(comp->len > FLASHSIZE)
+ return -EINVAL;
+ if(copy_from_user(((struct airo_info *)dev->priv)->flash,comp->data,comp->len))
+ return -EFAULT;
+
+ flashputbuf((struct airo_info *)dev->priv);
+ return 0;
+
+ case AIRORESTART:
+ if(flashrestart((struct airo_info *)dev->priv,dev))
+ return -EIO;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#define FLASH_COMMAND 0x7e7e
+
+/*
+ * STEP 1)
+ * Disable MAC and do soft reset on
+ * card.
+ */
+
+int cmdreset(struct airo_info *ai) {
+ disable_MAC(ai, 1);
+
+ if(!waitbusy (ai)){
+ printk(KERN_INFO "Waitbusy hang before RESET\n");
+ return -EBUSY;
+ }
+
+ OUT4500(ai,COMMAND,CMD_SOFTRESET);
+
+ ssleep(1); /* WAS 600 12/7/00 */
+
+ if(!waitbusy (ai)){
+ printk(KERN_INFO "Waitbusy hang AFTER RESET\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/* STEP 2)
+ * Put the card in legendary flash
+ * mode
+ */
+
+int setflashmode (struct airo_info *ai) {
+ set_bit (FLAG_FLASHING, &ai->flags);
+
+ OUT4500(ai, SWS0, FLASH_COMMAND);
+ OUT4500(ai, SWS1, FLASH_COMMAND);
+ if (probe) {
+ OUT4500(ai, SWS0, FLASH_COMMAND);
+ OUT4500(ai, COMMAND,0x10);
+ } else {
+ OUT4500(ai, SWS2, FLASH_COMMAND);
+ OUT4500(ai, SWS3, FLASH_COMMAND);
+ OUT4500(ai, COMMAND,0);
+ }
+ msleep(500); /* 500ms delay */
+
+ if(!waitbusy(ai)) {
+ clear_bit (FLAG_FLASHING, &ai->flags);
+ printk(KERN_INFO "Waitbusy hang after setflash mode\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Put character to SWS0 wait for dwelltime
+ * x 50us for echo .
+ */
+
+int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
+ int echo;
+ int waittime;
+
+ byte |= 0x8000;
+
+ if(dwelltime == 0 )
+ dwelltime = 200;
+
+ waittime=dwelltime;
+
+ /* Wait for busy bit d15 to go false indicating buffer empty */
+ while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) {
+ udelay (50);
+ waittime -= 50;
+ }
+
+ /* timeout for busy clear wait */
+ if(waittime <= 0 ){
+ printk(KERN_INFO "flash putchar busywait timeout! \n");
+ return -EBUSY;
+ }
+
+ /* Port is clear now write byte and wait for it to echo back */
+ do {
+ OUT4500(ai,SWS0,byte);
+ udelay(50);
+ dwelltime -= 50;
+ echo = IN4500(ai,SWS1);
+ } while (dwelltime >= 0 && echo != byte);
+
+ OUT4500(ai,SWS1,0);
+
+ return (echo == byte) ? 0 : -EIO;
+}
+
+/*
+ * Get a character from the card matching matchbyte
+ * Step 3)
+ */
+int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
+ int rchar;
+ unsigned char rbyte=0;
+
+ do {
+ rchar = IN4500(ai,SWS1);
+
+ if(dwelltime && !(0x8000 & rchar)){
+ dwelltime -= 10;
+ mdelay(10);
+ continue;
+ }
+ rbyte = 0xff & rchar;
+
+ if( (rbyte == matchbyte) && (0x8000 & rchar) ){
+ OUT4500(ai,SWS1,0);
+ return 0;
+ }
+ if( rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
+ break;
+ OUT4500(ai,SWS1,0);
+
+ }while(dwelltime > 0);
+ return -EIO;
+}
+
+/*
+ * Transfer 32k of firmware data from user buffer to our buffer and
+ * send to the card
+ */
+
+int flashputbuf(struct airo_info *ai){
+ int nwords;
+
+ /* Write stuff */
+ if (test_bit(FLAG_MPI,&ai->flags))
+ memcpy_toio(ai->pciaux + 0x8000, ai->flash, FLASHSIZE);
+ else {
+ OUT4500(ai,AUXPAGE,0x100);
+ OUT4500(ai,AUXOFF,0);
+
+ for(nwords=0;nwords != FLASHSIZE / 2;nwords++){
+ OUT4500(ai,AUXDATA,ai->flash[nwords] & 0xffff);
+ }
+ }
+ OUT4500(ai,SWS0,0x8000);
+
+ return 0;
+}
+
+/*
+ *
+ */
+int flashrestart(struct airo_info *ai,struct net_device *dev){
+ int i,status;
+
+ ssleep(1); /* Added 12/7/00 */
+ clear_bit (FLAG_FLASHING, &ai->flags);
+ if (test_bit(FLAG_MPI, &ai->flags)) {
+ status = mpi_init_descriptors(ai);
+ if (status != SUCCESS)
+ return status;
+ }
+ status = setup_card(ai, dev->dev_addr, 1);
+
+ if (!test_bit(FLAG_MPI,&ai->flags))
+ for( i = 0; i < MAX_FIDS; i++ ) {
+ ai->fids[i] = transmit_allocate
+ ( ai, 2312, i >= MAX_FIDS / 2 );
+ }
+
+ ssleep(1); /* Added 12/7/00 */
+ return status;
+}
+#endif /* CISCO_EXT */
+
+/*
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ In addition:
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+module_init(airo_init_module);
+module_exit(airo_cleanup_module);
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
new file mode 100644
index 000000000000..fbf53af6cda4
--- /dev/null
+++ b/drivers/net/wireless/airo_cs.c
@@ -0,0 +1,622 @@
+/*======================================================================
+
+ Aironet driver for 4500 and 4800 series cards
+
+ This code is released under both the GPL version 2 and BSD licenses.
+ Either license may be used. The respective licenses are found at
+ the end of this file.
+
+ This code was developed by Benjamin Reed <breed@users.sourceforge.net>
+ including portions of which come from the Aironet PC4500
+ Developer's Reference Manual and used with permission. Copyright
+ (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
+ code in the Developer's manual was granted for this driver by
+ Aironet.
+
+ In addition this module was derived from dummy_cs.
+ The initial developer of dummy_cs is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+======================================================================*/
+
+#include <linux/config.h>
+#ifdef __IN_PCMCIA_PACKAGE__
+#include <pcmcia/k_compat.h>
+#endif
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/netdevice.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+/*
+ All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ you do not define PCMCIA_DEBUG at all, all the debug code will be
+ left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ be present but disabled -- but it can then be enabled for specific
+ modules at load time with a 'pc_debug=#' option to insmod.
+*/
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+static char *version = "$Revision: 1.2 $";
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+MODULE_AUTHOR("Benjamin Reed");
+MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet \
+ cards. This is the module that links the PCMCIA card \
+ with the airo module.");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");
+
+/*====================================================================*/
+
+/*
+ The event() function is this driver's Card Services event handler.
+ It will be called by Card Services when an appropriate card status
+ event is received. The config() and release() entry points are
+ used to configure or release a socket, in response to card
+ insertion and ejection events. They are invoked from the airo_cs
+ event handler.
+*/
+
+struct net_device *init_airo_card( int, int, int, struct device * );
+void stop_airo_card( struct net_device *, int );
+int reset_airo_card( struct net_device * );
+
+static void airo_config(dev_link_t *link);
+static void airo_release(dev_link_t *link);
+static int airo_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+/*
+ The attach() and detach() entry points are used to create and destroy
+ "instances" of the driver, where each instance represents everything
+ needed to manage one actual PCMCIA card.
+*/
+
+static dev_link_t *airo_attach(void);
+static void airo_detach(dev_link_t *);
+
+/*
+ You'll also need to prototype all the functions that will actually
+ be used to talk to your device. See 'pcmem_cs' for a good example
+ of a fully self-sufficient driver; the other drivers rely more or
+ less on other parts of the kernel.
+*/
+
+/*
+ The dev_info variable is the "key" that is used to match up this
+ device driver with appropriate cards, through the card configuration
+ database.
+*/
+
+static dev_info_t dev_info = "airo_cs";
+
+/*
+ A linked list of "instances" of the aironet device. Each actual
+ PCMCIA card corresponds to one device instance, and is described
+ by one dev_link_t structure (defined in ds.h).
+
+ You may not want to use a linked list for this -- for example, the
+ memory card driver uses an array of dev_link_t pointers, where minor
+ device numbers are used to derive the corresponding array index.
+*/
+
+static dev_link_t *dev_list = NULL;
+
+/*
+ A dev_link_t structure has fields for most things that are needed
+ to keep track of a socket, but there will usually be some device
+ specific information that also needs to be kept track of. The
+ 'priv' pointer in a dev_link_t structure can be used to point to
+ a device-specific private data structure, like this.
+
+ A driver needs to provide a dev_node_t structure for each device
+ on a card. In some cases, there is only one device per card (for
+ example, ethernet cards, modems). In other cases, there may be
+ many actual or logical devices (SCSI adapters, memory cards with
+ multiple partitions). The dev_node_t structures need to be kept
+ in a linked list starting at the 'dev' field of a dev_link_t
+ structure. We allocate them in the card's private data structure,
+ because they generally shouldn't be allocated dynamically.
+
+ In this case, we also provide a flag to indicate if a device is
+ "stopped" due to a power management event, or card ejection. The
+ device IO routines can use a flag like this to throttle IO to a
+ card that is not ready to accept it.
+*/
+
+typedef struct local_info_t {
+ dev_node_t node;
+ struct net_device *eth_dev;
+} local_info_t;
+
+/*======================================================================
+
+ airo_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+ The dev_link structure is initialized, but we don't actually
+ configure the card at this point -- we wait until we receive a
+ card insertion event.
+
+ ======================================================================*/
+
+static dev_link_t *airo_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ local_info_t *local;
+ int ret;
+
+ DEBUG(0, "airo_attach()\n");
+
+ /* Initialize the dev_link_t structure */
+ link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
+ if (!link) {
+ printk(KERN_ERR "airo_cs: no memory for new device\n");
+ return NULL;
+ }
+ memset(link, 0, sizeof(struct dev_link_t));
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = NULL;
+
+ /*
+ General socket configuration defaults can go here. In this
+ client, we assume very little, and rely on the CIS for almost
+ everything. In most clients, many details (i.e., number, sizes,
+ and attributes of IO windows) are fixed by the nature of the
+ device, and can be hard-wired here.
+ */
+ link->conf.Attributes = 0;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* Allocate space for private device-specific data */
+ local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
+ if (!local) {
+ printk(KERN_ERR "airo_cs: no memory for new device\n");
+ kfree (link);
+ return NULL;
+ }
+ memset(local, 0, sizeof(local_info_t));
+ link->priv = local;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &airo_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ airo_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* airo_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+ ======================================================================*/
+
+static void airo_detach(dev_link_t *link)
+{
+ dev_link_t **linkp;
+
+ DEBUG(0, "airo_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->state & DEV_CONFIG)
+ airo_release(link);
+
+ if ( ((local_info_t*)link->priv)->eth_dev ) {
+ stop_airo_card( ((local_info_t*)link->priv)->eth_dev, 0 );
+ }
+ ((local_info_t*)link->priv)->eth_dev = NULL;
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ if (link->priv) {
+ kfree(link->priv);
+ }
+ kfree(link);
+
+} /* airo_detach */
+
+/*======================================================================
+
+ airo_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ device available to the system.
+
+ ======================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void airo_config(dev_link_t *link)
+{
+ client_handle_t handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ local_info_t *dev;
+ int last_fn, last_ret;
+ u_char buf[64];
+ win_req_t req;
+ memreq_t map;
+
+ handle = link->handle;
+ dev = link->priv;
+
+ DEBUG(0, "airo_config(0x%p)\n", link);
+
+ /*
+ This reads the card's CONFIG tuple to find its configuration
+ registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /*
+ In this loop, we scan the CIS for configuration table entries,
+ each of which describes a valid card configuration, including
+ voltage, IO window, memory window, and interrupt settings.
+
+ We make no assumptions about the card to be configured: we use
+ just the information available in the CIS. In an ideal world,
+ this would work for any PCMCIA card, but it requires a complete
+ and accurate CIS. In practice, a driver usually "knows" most of
+ these things without consulting the CIS, and most client drivers
+ will only use the CIS to fill in implementation-defined details.
+ */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ while (1) {
+ cistpl_cftable_entry_t dflt = { 0 };
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
+ pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ goto next_entry;
+
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
+ if (cfg->index == 0) goto next_entry;
+ link->conf.ConfigIndex = cfg->index;
+
+ /* Does this card need audio output? */
+ if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ /* Use power settings for Vcc and Vpp if present */
+ /* Note that the CIS values need to be rescaled */
+ if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vcc = cfg->vcc.param[CISTPL_POWER_VNOM]/10000;
+ else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vcc = dflt.vcc.param[CISTPL_POWER_VNOM]/10000;
+
+ if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
+ else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
+
+ /* Do we need to allocate an interrupt? */
+ if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
+ link->conf.Attributes |= CONF_ENABLE_IRQ;
+
+ /* IO window settings */
+ link->io.NumPorts1 = link->io.NumPorts2 = 0;
+ if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
+ cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (!(io->flags & CISTPL_IO_8BIT))
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ if (!(io->flags & CISTPL_IO_16BIT))
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.BasePort1 = io->win[0].base;
+ link->io.NumPorts1 = io->win[0].len;
+ if (io->nwin > 1) {
+ link->io.Attributes2 = link->io.Attributes1;
+ link->io.BasePort2 = io->win[1].base;
+ link->io.NumPorts2 = io->win[1].len;
+ }
+ }
+
+ /* This reserves IO space but doesn't actually enable it */
+ if (pcmcia_request_io(link->handle, &link->io) != 0)
+ goto next_entry;
+
+ /*
+ Now set up a common memory window, if needed. There is room
+ in the dev_link_t structure for one memory window handle,
+ but if the base addresses need to be saved, or if multiple
+ windows are needed, the info should go in the private data
+ structure for this device.
+
+ Note that the memory window base is a physical address, and
+ needs to be mapped to virtual space with ioremap() before it
+ is used.
+ */
+ if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) {
+ cistpl_mem_t *mem =
+ (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
+ req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
+ req.Base = mem->win[0].host_addr;
+ req.Size = mem->win[0].len;
+ req.AccessSpeed = 0;
+ if (pcmcia_request_window(&link->handle, &req, &link->win) != 0)
+ goto next_entry;
+ map.Page = 0; map.CardOffset = mem->win[0].card_addr;
+ if (pcmcia_map_mem_page(link->win, &map) != 0)
+ goto next_entry;
+ }
+ /* If we got this far, we're cool! */
+ break;
+
+ next_entry:
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ }
+
+ /*
+ Allocate an interrupt line. Note that this does not assign a
+ handler to the interrupt, unless the 'Handler' member of the
+ irq structure is initialized.
+ */
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+
+ /*
+ This actually configures the PCMCIA socket -- setting up
+ the I/O windows and the interrupt mapping, and putting the
+ card and host interface into "Memory and IO" mode.
+ */
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ ((local_info_t*)link->priv)->eth_dev =
+ init_airo_card( link->irq.AssignedIRQ,
+ link->io.BasePort1, 1, &handle_to_dev(handle) );
+ if (!((local_info_t*)link->priv)->eth_dev) goto cs_failed;
+
+ /*
+ At this point, the dev_node_t structure(s) need to be
+ initialized and arranged in a linked list at link->dev.
+ */
+ strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
+ dev->node.major = dev->node.minor = 0;
+ link->dev = &dev->node;
+
+ /* Finally, report what we've done */
+ printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
+ dev->node.dev_name, link->conf.ConfigIndex,
+ link->conf.Vcc/10, link->conf.Vcc%10);
+ if (link->conf.Vpp1)
+ printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ printk(", irq %d", link->irq.AssignedIRQ);
+ if (link->io.NumPorts1)
+ printk(", io 0x%04x-0x%04x", link->io.BasePort1,
+ link->io.BasePort1+link->io.NumPorts1-1);
+ if (link->io.NumPorts2)
+ printk(" & 0x%04x-0x%04x", link->io.BasePort2,
+ link->io.BasePort2+link->io.NumPorts2-1);
+ if (link->win)
+ printk(", mem 0x%06lx-0x%06lx", req.Base,
+ req.Base+req.Size-1);
+ printk("\n");
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+ cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+ airo_release(link);
+
+} /* airo_config */
+
+/*======================================================================
+
+ After a card is removed, airo_release() will unregister the
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+ ======================================================================*/
+
+static void airo_release(dev_link_t *link)
+{
+ DEBUG(0, "airo_release(0x%p)\n", link);
+
+ /* Unlink the device chain */
+ link->dev = NULL;
+
+ /*
+ In a normal driver, additional code may be needed to release
+ other kernel data structures associated with this device.
+ */
+
+ /* Don't bother checking to see if these succeed or not */
+ if (link->win)
+ pcmcia_release_window(link->win);
+ pcmcia_release_configuration(link->handle);
+ if (link->io.NumPorts1)
+ pcmcia_release_io(link->handle, &link->io);
+ if (link->irq.AssignedIRQ)
+ pcmcia_release_irq(link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received.
+
+ When a CARD_REMOVAL event is received, we immediately set a
+ private flag to block future accesses to this device. All the
+ functions that actually access the device should check this flag
+ to make sure the card is still present.
+
+ ======================================================================*/
+
+static int airo_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ local_info_t *local = link->priv;
+
+ DEBUG(1, "airo_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(local->eth_dev);
+ airo_release(link);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ airo_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(local->eth_dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ reset_airo_card(local->eth_dev);
+ netif_device_attach(local->eth_dev);
+ }
+ break;
+ }
+ return 0;
+} /* airo_event */
+
+static struct pcmcia_driver airo_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "airo_cs",
+ },
+ .attach = airo_attach,
+ .detach = airo_detach,
+};
+
+static int airo_cs_init(void)
+{
+ return pcmcia_register_driver(&airo_driver);
+}
+
+static void airo_cs_cleanup(void)
+{
+ pcmcia_unregister_driver(&airo_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+/*
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ In addition:
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+module_init(airo_cs_init);
+module_exit(airo_cs_cleanup);
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
new file mode 100644
index 000000000000..a1dc2a196087
--- /dev/null
+++ b/drivers/net/wireless/airport.c
@@ -0,0 +1,304 @@
+/* airport.c
+ *
+ * A driver for "Hermes" chipset based Apple Airport wireless
+ * card.
+ *
+ * Copyright notice & release notes in file orinoco.c
+ *
+ * Note specific to airport stub:
+ *
+ * 0.05 : first version of the new split driver
+ * 0.06 : fix possible hang on powerup, add sleep support
+ */
+
+#define DRIVER_NAME "airport"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/current.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "orinoco.h"
+
+#define AIRPORT_IO_LEN (0x1000) /* one page */
+
+struct airport {
+ struct macio_dev *mdev;
+ void __iomem *vaddr;
+ int irq_requested;
+ int ndev_registered;
+};
+
+static int
+airport_suspend(struct macio_dev *mdev, u32 state)
+{
+ struct net_device *dev = dev_get_drvdata(&mdev->ofdev.dev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ printk(KERN_DEBUG "%s: Airport entering sleep mode\n", dev->name);
+
+ err = orinoco_lock(priv, &flags);
+ if (err) {
+ printk(KERN_ERR "%s: hw_unavailable on PBOOK_SLEEP_NOW\n",
+ dev->name);
+ return 0;
+ }
+
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: PBOOK_SLEEP_NOW: Error %d downing interface\n",
+ dev->name, err);
+
+ netif_device_detach(dev);
+
+ priv->hw_unavailable++;
+
+ orinoco_unlock(priv, &flags);
+
+ disable_irq(dev->irq);
+ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0);
+
+ return 0;
+}
+
+static int
+airport_resume(struct macio_dev *mdev)
+{
+ struct net_device *dev = dev_get_drvdata(&mdev->ofdev.dev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ printk(KERN_DEBUG "%s: Airport waking up\n", dev->name);
+
+ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1);
+ msleep(200);
+
+ enable_irq(dev->irq);
+
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d re-initializing firmware on PBOOK_WAKE\n",
+ dev->name, err);
+ return 0;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_attach(dev);
+
+ priv->hw_unavailable--;
+
+ if (priv->open && (! priv->hw_unavailable)) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card on PBOOK_WAKE\n",
+ dev->name, err);
+ }
+
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int
+airport_detach(struct macio_dev *mdev)
+{
+ struct net_device *dev = dev_get_drvdata(&mdev->ofdev.dev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct airport *card = priv->card;
+
+ if (card->ndev_registered)
+ unregister_netdev(dev);
+ card->ndev_registered = 0;
+
+ if (card->irq_requested)
+ free_irq(dev->irq, dev);
+ card->irq_requested = 0;
+
+ if (card->vaddr)
+ iounmap(card->vaddr);
+ card->vaddr = NULL;
+
+ macio_release_resource(mdev, 0);
+
+ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0);
+ ssleep(1);
+
+ macio_set_drvdata(mdev, NULL);
+ free_orinocodev(dev);
+
+ return 0;
+}
+
+static int airport_hard_reset(struct orinoco_private *priv)
+{
+ /* It would be nice to power cycle the Airport for a real hard
+ * reset, but for some reason although it appears to
+ * re-initialize properly, it falls in a screaming heap
+ * shortly afterwards. */
+#if 0
+ struct net_device *dev = priv->ndev;
+ struct airport *card = priv->card;
+
+ /* Vitally important. If we don't do this it seems we get an
+ * interrupt somewhere during the power cycle, since
+ * hw_unavailable is already set it doesn't get ACKed, we get
+ * into an interrupt loop and the the PMU decides to turn us
+ * off. */
+ disable_irq(dev->irq);
+
+ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 0);
+ ssleep(1);
+ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 1);
+ ssleep(1);
+
+ enable_irq(dev->irq);
+ ssleep(1);
+#endif
+
+ return 0;
+}
+
+static int
+airport_attach(struct macio_dev *mdev, const struct of_match *match)
+{
+ struct orinoco_private *priv;
+ struct net_device *dev;
+ struct airport *card;
+ unsigned long phys_addr;
+ hermes_t *hw;
+
+ if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) {
+ printk(KERN_ERR PFX "Wrong interrupt/addresses in OF tree\n");
+ return -ENODEV;
+ }
+
+ /* Allocate space for private device-specific data */
+ dev = alloc_orinocodev(sizeof(*card), airport_hard_reset);
+ if (! dev) {
+ printk(KERN_ERR PFX "Cannot allocate network device\n");
+ return -ENODEV;
+ }
+ priv = netdev_priv(dev);
+ card = priv->card;
+
+ hw = &priv->hw;
+ card->mdev = mdev;
+
+ if (macio_request_resource(mdev, 0, "airport")) {
+ printk(KERN_ERR PFX "can't request IO resource !\n");
+ free_orinocodev(dev);
+ return -EBUSY;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
+
+ macio_set_drvdata(mdev, dev);
+
+ /* Setup interrupts & base address */
+ dev->irq = macio_irq(mdev, 0);
+ phys_addr = macio_resource_start(mdev, 0); /* Physical address */
+ printk(KERN_DEBUG PFX "Physical address %lx\n", phys_addr);
+ dev->base_addr = phys_addr;
+ card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN);
+ if (!card->vaddr) {
+ printk(KERN_ERR PFX "ioremap() failed\n");
+ goto failed;
+ }
+
+ hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING);
+
+ /* Power up card */
+ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1);
+ ssleep(1);
+
+ /* Reset it before we get the interrupt */
+ hermes_init(hw);
+
+ if (request_irq(dev->irq, orinoco_interrupt, 0, dev->name, dev)) {
+ printk(KERN_ERR PFX "Couldn't get IRQ %d\n", dev->irq);
+ goto failed;
+ }
+ card->irq_requested = 1;
+
+ /* Tell the stack we exist */
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR PFX "register_netdev() failed\n");
+ goto failed;
+ }
+ printk(KERN_DEBUG PFX "Card registered for interface %s\n", dev->name);
+ card->ndev_registered = 1;
+ return 0;
+ failed:
+ airport_detach(mdev);
+ return -ENODEV;
+} /* airport_attach */
+
+
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Driver for the Apple Airport wireless card.");
+MODULE_LICENSE("Dual MPL/GPL");
+
+static struct of_match airport_match[] =
+{
+ {
+ .name = "radio",
+ .type = OF_ANY_MATCH,
+ .compatible = OF_ANY_MATCH
+ },
+ {},
+};
+
+static struct macio_driver airport_driver =
+{
+ .name = DRIVER_NAME,
+ .match_table = airport_match,
+ .probe = airport_attach,
+ .remove = airport_detach,
+ .suspend = airport_suspend,
+ .resume = airport_resume,
+};
+
+static int __init
+init_airport(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+
+ return macio_register_driver(&airport_driver);
+}
+
+static void __exit
+exit_airport(void)
+{
+ return macio_unregister_driver(&airport_driver);
+}
+
+module_init(init_airport);
+module_exit(exit_airport);
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
new file mode 100644
index 000000000000..4f304c6e693a
--- /dev/null
+++ b/drivers/net/wireless/arlan-main.c
@@ -0,0 +1,1896 @@
+/*
+ * Copyright (C) 1997 Cullen Jennings
+ * Copyright (C) 1998 Elmer Joandiu, elmer@ylenurme.ee
+ * GNU General Public License applies
+ * This module provides support for the Arlan 655 card made by Aironet
+ */
+
+#include <linux/config.h>
+#include "arlan.h"
+
+#if BITS_PER_LONG != 32
+# error FIXME: this driver requires a 32-bit platform
+#endif
+
+static const char *arlan_version = "C.Jennigs 97 & Elmer.Joandi@ut.ee Oct'98, http://www.ylenurme.ee/~elmer/655/";
+
+struct net_device *arlan_device[MAX_ARLANS];
+
+static int SID = SIDUNKNOWN;
+static int radioNodeId = radioNodeIdUNKNOWN;
+static char encryptionKey[12] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'};
+int arlan_debug = debugUNKNOWN;
+static int spreadingCode = spreadingCodeUNKNOWN;
+static int channelNumber = channelNumberUNKNOWN;
+static int channelSet = channelSetUNKNOWN;
+static int systemId = systemIdUNKNOWN;
+static int registrationMode = registrationModeUNKNOWN;
+static int keyStart;
+static int tx_delay_ms;
+static int retries = 5;
+static int tx_queue_len = 1;
+static int arlan_EEPROM_bad;
+
+#ifdef ARLAN_DEBUGGING
+
+static int arlan_entry_debug;
+static int arlan_exit_debug;
+static int testMemory = testMemoryUNKNOWN;
+static int irq = irqUNKNOWN;
+static int txScrambled = 1;
+static int mdebug;
+
+module_param(irq, int, 0);
+module_param(mdebug, int, 0);
+module_param(testMemory, int, 0);
+module_param(arlan_entry_debug, int, 0);
+module_param(arlan_exit_debug, int, 0);
+module_param(txScrambled, int, 0);
+MODULE_PARM_DESC(irq, "(unused)");
+MODULE_PARM_DESC(testMemory, "(unused)");
+MODULE_PARM_DESC(mdebug, "Arlan multicast debugging (0-1)");
+#endif
+
+module_param(arlan_debug, int, 0);
+module_param(spreadingCode, int, 0);
+module_param(channelNumber, int, 0);
+module_param(channelSet, int, 0);
+module_param(systemId, int, 0);
+module_param(registrationMode, int, 0);
+module_param(radioNodeId, int, 0);
+module_param(SID, int, 0);
+module_param(keyStart, int, 0);
+module_param(tx_delay_ms, int, 0);
+module_param(retries, int, 0);
+module_param(tx_queue_len, int, 0);
+module_param(arlan_EEPROM_bad, int, 0);
+MODULE_PARM_DESC(arlan_debug, "Arlan debug enable (0-1)");
+MODULE_PARM_DESC(retries, "Arlan maximum packet retransmisions");
+#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
+MODULE_PARM_DESC(arlan_entry_debug, "Arlan driver function entry debugging");
+MODULE_PARM_DESC(arlan_exit_debug, "Arlan driver function exit debugging");
+MODULE_PARM_DESC(arlan_entry_and_exit_debug, "Arlan driver function entry and exit debugging");
+#else
+MODULE_PARM_DESC(arlan_entry_debug, "(ignored)");
+MODULE_PARM_DESC(arlan_exit_debug, "(ignored)");
+MODULE_PARM_DESC(arlan_entry_and_exit_debug, "(ignored)");
+#endif
+
+struct arlan_conf_stru arlan_conf[MAX_ARLANS];
+static int arlans_found;
+
+static int arlan_open(struct net_device *dev);
+static int arlan_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t arlan_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int arlan_close(struct net_device *dev);
+static struct net_device_stats *
+ arlan_statistics (struct net_device *dev);
+static void arlan_set_multicast (struct net_device *dev);
+static int arlan_hw_tx (struct net_device* dev, char *buf, int length );
+static int arlan_hw_config (struct net_device * dev);
+static void arlan_tx_done_interrupt (struct net_device * dev, int status);
+static void arlan_rx_interrupt (struct net_device * dev, u_char rxStatus, u_short, u_short);
+static void arlan_process_interrupt (struct net_device * dev);
+static void arlan_tx_timeout (struct net_device *dev);
+
+static inline long us2ticks(int us)
+{
+ return us * (1000000 / HZ);
+}
+
+
+#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
+#define ARLAN_DEBUG_ENTRY(name) \
+ {\
+ struct timeval timev;\
+ do_gettimeofday(&timev);\
+ if (arlan_entry_debug || arlan_entry_and_exit_debug)\
+ printk("--->>>" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec));\
+ }
+#define ARLAN_DEBUG_EXIT(name) \
+ {\
+ struct timeval timev;\
+ do_gettimeofday(&timev);\
+ if (arlan_exit_debug || arlan_entry_and_exit_debug)\
+ printk("<<<---" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec) );\
+ }
+#else
+#define ARLAN_DEBUG_ENTRY(name)
+#define ARLAN_DEBUG_EXIT(name)
+#endif
+
+
+#define arlan_interrupt_ack(dev)\
+ clearClearInterrupt(dev);\
+ setClearInterrupt(dev);
+
+static inline int arlan_drop_tx(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ priv->stats.tx_errors++;
+ if (priv->Conf->tx_delay_ms)
+ {
+ priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1;
+ }
+ else
+ {
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_TX;
+ TXHEAD(dev).offset = 0;
+ TXTAIL(dev).offset = 0;
+ priv->txLast = 0;
+ priv->bad = 0;
+ if (!priv->under_reset && !priv->under_config)
+ netif_wake_queue (dev);
+ }
+ return 1;
+}
+
+
+int arlan_command(struct net_device *dev, int command_p)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ struct arlan_conf_stru *conf = priv->Conf;
+ int udelayed = 0;
+ int i = 0;
+ unsigned long flags;
+
+ ARLAN_DEBUG_ENTRY("arlan_command");
+
+ if (priv->card_polling_interval)
+ priv->card_polling_interval = 1;
+
+ if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
+ printk(KERN_DEBUG "arlan_command, %lx commandByte %x waiting %lx incoming %x \n",
+ jiffies, READSHMB(arlan->commandByte),
+ priv->waiting_command_mask, command_p);
+
+ priv->waiting_command_mask |= command_p;
+
+ if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
+ if (time_after(jiffies, priv->lastReset + 5 * HZ))
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_RESET;
+
+ if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ACK)
+ {
+ arlan_interrupt_ack(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ACK;
+ }
+ if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ENABLE)
+ {
+ setInterruptEnable(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ENABLE;
+ }
+
+ /* Card access serializing lock */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Check cards status and waiting */
+
+ if (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
+ {
+ while (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
+ {
+ if (READSHMB(arlan->resetFlag) ||
+ READSHMB(arlan->commandByte)) /* ||
+ (readControlRegister(dev) & ARLAN_ACCESS))
+ */
+ udelay(40);
+ else
+ priv->waiting_command_mask &= ~(ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW);
+
+ udelayed++;
+
+ if (priv->waiting_command_mask & ARLAN_COMMAND_LONG_WAIT_NOW)
+ {
+ if (udelayed * 40 > 1000000)
+ {
+ printk(KERN_ERR "%s long wait too long \n", dev->name);
+ priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
+ break;
+ }
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_WAIT_NOW)
+ {
+ if (udelayed * 40 > 1000)
+ {
+ printk(KERN_ERR "%s short wait too long \n", dev->name);
+ goto bad_end;
+ }
+ }
+ }
+ }
+ else
+ {
+ i = 0;
+ while ((READSHMB(arlan->resetFlag) ||
+ READSHMB(arlan->commandByte)) &&
+ conf->pre_Command_Wait > (i++) * 10)
+ udelay(10);
+
+
+ if ((READSHMB(arlan->resetFlag) ||
+ READSHMB(arlan->commandByte)) &&
+ !(priv->waiting_command_mask & ARLAN_COMMAND_RESET))
+ {
+ goto card_busy_end;
+ }
+ }
+ if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
+ priv->under_reset = 1;
+ if (priv->waiting_command_mask & ARLAN_COMMAND_CONF)
+ priv->under_config = 1;
+
+ /* Issuing command */
+ arlan_lock_card_access(dev);
+ if (priv->waiting_command_mask & ARLAN_COMMAND_POWERUP)
+ {
+ // if (readControlRegister(dev) & (ARLAN_ACCESS && ARLAN_POWER))
+ setPowerOn(dev);
+ arlan_interrupt_lancpu(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_POWERUP;
+ priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
+ priv->card_polling_interval = HZ / 10;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_ACTIVATE)
+ {
+ WRITESHMB(arlan->commandByte, ARLAN_COM_ACTIVATE);
+ arlan_interrupt_lancpu(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_ACTIVATE;
+ priv->card_polling_interval = HZ / 10;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_RX_ABORT)
+ {
+ if (priv->rx_command_given)
+ {
+ WRITESHMB(arlan->commandByte, ARLAN_COM_RX_ABORT);
+ arlan_interrupt_lancpu(dev);
+ priv->rx_command_given = 0;
+ }
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_RX_ABORT;
+ priv->card_polling_interval = 1;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_TX_ABORT)
+ {
+ if (priv->tx_command_given)
+ {
+ WRITESHMB(arlan->commandByte, ARLAN_COM_TX_ABORT);
+ arlan_interrupt_lancpu(dev);
+ priv->tx_command_given = 0;
+ }
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_TX_ABORT;
+ priv->card_polling_interval = 1;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
+ {
+ priv->under_reset=1;
+ netif_stop_queue (dev);
+
+ arlan_drop_tx(dev);
+ if (priv->tx_command_given || priv->rx_command_given)
+ {
+ printk(KERN_ERR "%s: Reset under tx or rx command \n", dev->name);
+ }
+ netif_stop_queue (dev);
+ if (arlan_debug & ARLAN_DEBUG_RESET)
+ printk(KERN_ERR "%s: Doing chip reset\n", dev->name);
+ priv->lastReset = jiffies;
+ WRITESHM(arlan->commandByte, 0, u_char);
+ /* hold card in reset state */
+ setHardwareReset(dev);
+ /* set reset flag and then release reset */
+ WRITESHM(arlan->resetFlag, 0xff, u_char);
+ clearChannelAttention(dev);
+ clearHardwareReset(dev);
+ priv->card_polling_interval = HZ / 4;
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_RESET;
+ priv->waiting_command_mask |= ARLAN_COMMAND_INT_RACK;
+// priv->waiting_command_mask |= ARLAN_COMMAND_INT_RENABLE;
+// priv->waiting_command_mask |= ARLAN_COMMAND_RX;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_INT_RACK)
+ {
+ clearHardwareReset(dev);
+ clearClearInterrupt(dev);
+ setClearInterrupt(dev);
+ setInterruptEnable(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_RACK;
+ priv->waiting_command_mask |= ARLAN_COMMAND_CONF;
+ priv->under_config = 1;
+ priv->under_reset = 0;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_INT_RENABLE)
+ {
+ setInterruptEnable(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_RENABLE;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_CONF)
+ {
+ if (priv->tx_command_given || priv->rx_command_given)
+ {
+ printk(KERN_ERR "%s: Reset under tx or rx command \n", dev->name);
+ }
+ arlan_drop_tx(dev);
+ setInterruptEnable(dev);
+ arlan_hw_config(dev);
+ arlan_interrupt_lancpu(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_CONF;
+ priv->card_polling_interval = HZ / 10;
+// priv->waiting_command_mask |= ARLAN_COMMAND_INT_RACK;
+// priv->waiting_command_mask |= ARLAN_COMMAND_INT_ENABLE;
+ priv->waiting_command_mask |= ARLAN_COMMAND_CONF_WAIT;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_CONF_WAIT)
+ {
+ if (READSHMB(arlan->configuredStatusFlag) != 0 &&
+ READSHMB(arlan->diagnosticInfo) == 0xff)
+ {
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_CONF_WAIT;
+ priv->waiting_command_mask |= ARLAN_COMMAND_RX;
+ priv->waiting_command_mask |= ARLAN_COMMAND_TBUSY_CLEAR;
+ priv->card_polling_interval = HZ / 10;
+ priv->tx_command_given = 0;
+ priv->under_config = 0;
+ }
+ else
+ {
+ priv->card_polling_interval = 1;
+ if (arlan_debug & ARLAN_DEBUG_TIMING)
+ printk(KERN_ERR "configure delayed \n");
+ }
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_RX)
+ {
+ if (!registrationBad(dev))
+ {
+ setInterruptEnable(dev);
+ memset_io(arlan->commandParameter, 0, 0xf);
+ WRITESHMB(arlan->commandByte, ARLAN_COM_INT | ARLAN_COM_RX_ENABLE);
+ WRITESHMB(arlan->commandParameter[0], conf->rxParameter);
+ arlan_interrupt_lancpu(dev);
+ priv->rx_command_given = 0; // mnjah, bad
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_RX;
+ priv->card_polling_interval = 1;
+ }
+ else
+ priv->card_polling_interval = 2;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_TBUSY_CLEAR)
+ {
+ if ( !registrationBad(dev) &&
+ (netif_queue_stopped(dev) || !netif_running(dev)) )
+ {
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_TBUSY_CLEAR;
+ netif_wake_queue (dev);
+ }
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_TX)
+ {
+ if (!test_and_set_bit(0, (void *) &priv->tx_command_given))
+ {
+ if (time_after(jiffies,
+ priv->tx_last_sent + us2ticks(conf->rx_tweak1))
+ || time_before(jiffies,
+ priv->last_rx_int_ack_time + us2ticks(conf->rx_tweak2)))
+ {
+ setInterruptEnable(dev);
+ memset_io(arlan->commandParameter, 0, 0xf);
+ WRITESHMB(arlan->commandByte, ARLAN_COM_TX_ENABLE | ARLAN_COM_INT);
+ memcpy_toio(arlan->commandParameter, &TXLAST(dev), 14);
+// for ( i=1 ; i < 15 ; i++) printk("%02x:",READSHMB(arlan->commandParameter[i]));
+ priv->tx_last_sent = jiffies;
+ arlan_interrupt_lancpu(dev);
+ priv->tx_command_given = 1;
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_TX;
+ priv->card_polling_interval = 1;
+ }
+ else
+ {
+ priv->tx_command_given = 0;
+ priv->card_polling_interval = 1;
+ }
+ }
+ else if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
+ printk(KERN_ERR "tx command when tx chain locked \n");
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_NOOPINT)
+ {
+ {
+ WRITESHMB(arlan->commandByte, ARLAN_COM_NOP | ARLAN_COM_INT);
+ }
+ arlan_interrupt_lancpu(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_NOOPINT;
+ priv->card_polling_interval = HZ / 3;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_NOOP)
+ {
+ WRITESHMB(arlan->commandByte, ARLAN_COM_NOP);
+ arlan_interrupt_lancpu(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_NOOP;
+ priv->card_polling_interval = HZ / 3;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_SLOW_POLL)
+ {
+ WRITESHMB(arlan->commandByte, ARLAN_COM_GOTO_SLOW_POLL);
+ arlan_interrupt_lancpu(dev);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_SLOW_POLL;
+ priv->card_polling_interval = HZ / 3;
+ }
+ else if (priv->waiting_command_mask & ARLAN_COMMAND_POWERDOWN)
+ {
+ setPowerOff(dev);
+ if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
+ printk(KERN_WARNING "%s: Arlan Going Standby\n", dev->name);
+ priv->waiting_command_mask &= ~ARLAN_COMMAND_POWERDOWN;
+ priv->card_polling_interval = 3 * HZ;
+ }
+ arlan_unlock_card_access(dev);
+ for (i = 0; READSHMB(arlan->commandByte) && i < 20; i++)
+ udelay(10);
+ if (READSHMB(arlan->commandByte))
+ if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
+ printk(KERN_ERR "card busy leaving command %lx\n", priv->waiting_command_mask);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ARLAN_DEBUG_EXIT("arlan_command");
+ priv->last_command_buff_free_time = jiffies;
+ return 0;
+
+card_busy_end:
+ if (time_after(jiffies, priv->last_command_buff_free_time + HZ))
+ priv->waiting_command_mask |= ARLAN_COMMAND_CLEAN_AND_RESET;
+
+ if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
+ printk(KERN_ERR "%s arlan_command card busy end \n", dev->name);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ARLAN_DEBUG_EXIT("arlan_command");
+ return 1;
+
+bad_end:
+ printk(KERN_ERR "%s arlan_command bad end \n", dev->name);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ARLAN_DEBUG_EXIT("arlan_command");
+
+ return -1;
+}
+
+static inline void arlan_command_process(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ int times = 0;
+ while (priv->waiting_command_mask && times < 8)
+ {
+ if (priv->waiting_command_mask)
+ {
+ if (arlan_command(dev, 0))
+ break;
+ times++;
+ }
+ /* if long command, we won't repeat trying */ ;
+ if (priv->card_polling_interval > 1)
+ break;
+ times++;
+ }
+}
+
+
+static inline void arlan_retransmit_now(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+
+ ARLAN_DEBUG_ENTRY("arlan_retransmit_now");
+ if (TXLAST(dev).offset == 0)
+ {
+ if (TXHEAD(dev).offset)
+ {
+ priv->txLast = 0;
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_DEBUG "TX buff switch to head \n");
+
+ }
+ else if (TXTAIL(dev).offset)
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_DEBUG "TX buff switch to tail \n");
+ priv->txLast = 1;
+ }
+ else
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_ERR "ReTransmit buff empty");
+ netif_wake_queue (dev);
+ return;
+
+ }
+ arlan_command(dev, ARLAN_COMMAND_TX);
+
+ priv->Conf->driverRetransmissions++;
+ priv->retransmissions++;
+
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk("Retransmit %d bytes \n", TXLAST(dev).length);
+
+ ARLAN_DEBUG_EXIT("arlan_retransmit_now");
+}
+
+
+
+static void arlan_registration_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct arlan_private *priv = netdev_priv(dev);
+ int bh_mark_needed = 0;
+ int next_tick = 1;
+ long lostTime = ((long)jiffies - (long)priv->registrationLastSeen)
+ * (1000/HZ);
+
+ if (registrationBad(dev))
+ {
+ priv->registrationLostCount++;
+ if (lostTime > 7000 && lostTime < 7200)
+ {
+ printk(KERN_NOTICE "%s registration Lost \n", dev->name);
+ }
+ if (lostTime / priv->reRegisterExp > 2000)
+ arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_CONF);
+ if (lostTime / (priv->reRegisterExp) > 3500)
+ arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
+ if (priv->reRegisterExp < 400)
+ priv->reRegisterExp += 2;
+ if (lostTime > 7200)
+ {
+ next_tick = HZ;
+ arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
+ }
+ }
+ else
+ {
+ if (priv->Conf->registrationMode && lostTime > 10000 &&
+ priv->registrationLostCount)
+ {
+ printk(KERN_NOTICE "%s registration is back after %ld milliseconds\n",
+ dev->name, lostTime);
+ }
+ priv->registrationLastSeen = jiffies;
+ priv->registrationLostCount = 0;
+ priv->reRegisterExp = 1;
+ if (!netif_running(dev) )
+ netif_wake_queue(dev);
+ if (time_after(priv->tx_last_sent,priv->tx_last_cleared) &&
+ time_after(jiffies, priv->tx_last_sent * 5*HZ) ){
+ arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
+ priv->tx_last_cleared = jiffies;
+ }
+ }
+
+
+ if (!registrationBad(dev) && priv->ReTransmitRequested)
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk(KERN_ERR "Retransmit from timer \n");
+ priv->ReTransmitRequested = 0;
+ arlan_retransmit_now(dev);
+ }
+ if (!registrationBad(dev) &&
+ time_after(jiffies, priv->tx_done_delayed) &&
+ priv->tx_done_delayed != 0)
+ {
+ TXLAST(dev).offset = 0;
+ if (priv->txLast)
+ priv->txLast = 0;
+ else if (TXTAIL(dev).offset)
+ priv->txLast = 1;
+ if (TXLAST(dev).offset)
+ {
+ arlan_retransmit_now(dev);
+ dev->trans_start = jiffies;
+ }
+ if (!(TXHEAD(dev).offset && TXTAIL(dev).offset))
+ {
+ netif_wake_queue (dev);
+ }
+ priv->tx_done_delayed = 0;
+ bh_mark_needed = 1;
+ }
+ if (bh_mark_needed)
+ {
+ netif_wake_queue (dev);
+ }
+ arlan_process_interrupt(dev);
+
+ if (next_tick < priv->card_polling_interval)
+ next_tick = priv->card_polling_interval;
+
+ priv->timer.expires = jiffies + next_tick;
+
+ add_timer(&priv->timer);
+}
+
+
+#ifdef ARLAN_DEBUGGING
+
+static void arlan_print_registers(struct net_device *dev, int line)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem *arlan = priv->card;
+
+ u_char hostcpuLock, lancpuLock, controlRegister, cntrlRegImage,
+ txStatus, rxStatus, interruptInProgress, commandByte;
+
+
+ ARLAN_DEBUG_ENTRY("arlan_print_registers");
+ READSHM(interruptInProgress, arlan->interruptInProgress, u_char);
+ READSHM(hostcpuLock, arlan->hostcpuLock, u_char);
+ READSHM(lancpuLock, arlan->lancpuLock, u_char);
+ READSHM(controlRegister, arlan->controlRegister, u_char);
+ READSHM(cntrlRegImage, arlan->cntrlRegImage, u_char);
+ READSHM(txStatus, arlan->txStatus, u_char);
+ READSHM(rxStatus, arlan->rxStatus, u_char);
+ READSHM(commandByte, arlan->commandByte, u_char);
+
+ printk(KERN_WARNING "line %04d IP %02x HL %02x LL %02x CB %02x CR %02x CRI %02x TX %02x RX %02x\n",
+ line, interruptInProgress, hostcpuLock, lancpuLock, commandByte,
+ controlRegister, cntrlRegImage, txStatus, rxStatus);
+
+ ARLAN_DEBUG_EXIT("arlan_print_registers");
+}
+#endif
+
+
+static int arlan_hw_tx(struct net_device *dev, char *buf, int length)
+{
+ int i;
+
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ struct arlan_conf_stru *conf = priv->Conf;
+
+ int tailStarts = 0x800;
+ int headEnds = 0x0;
+
+
+ ARLAN_DEBUG_ENTRY("arlan_hw_tx");
+ if (TXHEAD(dev).offset)
+ headEnds = (((TXHEAD(dev).offset + TXHEAD(dev).length - offsetof(struct arlan_shmem, txBuffer)) / 64) + 1) * 64;
+ if (TXTAIL(dev).offset)
+ tailStarts = 0x800 - (((TXTAIL(dev).offset - offsetof(struct arlan_shmem, txBuffer)) / 64) + 2) * 64;
+
+
+ if (!TXHEAD(dev).offset && length < tailStarts)
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk(KERN_ERR "TXHEAD insert, tailStart %d\n", tailStarts);
+
+ TXHEAD(dev).offset =
+ offsetof(struct arlan_shmem, txBuffer);
+ TXHEAD(dev).length = length - ARLAN_FAKE_HDR_LEN;
+ for (i = 0; i < 6; i++)
+ TXHEAD(dev).dest[i] = buf[i];
+ TXHEAD(dev).clear = conf->txClear;
+ TXHEAD(dev).retries = conf->txRetries; /* 0 is use default */
+ TXHEAD(dev).routing = conf->txRouting;
+ TXHEAD(dev).scrambled = conf->txScrambled;
+ memcpy_toio((char __iomem *)arlan + TXHEAD(dev).offset, buf + ARLAN_FAKE_HDR_LEN, TXHEAD(dev).length);
+ }
+ else if (!TXTAIL(dev).offset && length < (0x800 - headEnds))
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk(KERN_ERR "TXTAIL insert, headEnd %d\n", headEnds);
+
+ TXTAIL(dev).offset =
+ offsetof(struct arlan_shmem, txBuffer) + 0x800 - (length / 64 + 2) * 64;
+ TXTAIL(dev).length = length - ARLAN_FAKE_HDR_LEN;
+ for (i = 0; i < 6; i++)
+ TXTAIL(dev).dest[i] = buf[i];
+ TXTAIL(dev).clear = conf->txClear;
+ TXTAIL(dev).retries = conf->txRetries;
+ TXTAIL(dev).routing = conf->txRouting;
+ TXTAIL(dev).scrambled = conf->txScrambled;
+ memcpy_toio(((char __iomem *)arlan + TXTAIL(dev).offset), buf + ARLAN_FAKE_HDR_LEN, TXTAIL(dev).length);
+ }
+ else
+ {
+ netif_stop_queue (dev);
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk(KERN_ERR "TX TAIL & HEAD full, return, tailStart %d headEnd %d\n", tailStarts, headEnds);
+ return -1;
+ }
+ priv->out_bytes += length;
+ priv->out_bytes10 += length;
+ if (conf->measure_rate < 1)
+ conf->measure_rate = 1;
+ if (time_after(jiffies, priv->out_time + conf->measure_rate * HZ))
+ {
+ conf->out_speed = priv->out_bytes / conf->measure_rate;
+ priv->out_bytes = 0;
+ priv->out_time = jiffies;
+ }
+ if (time_after(jiffies, priv->out_time10 + conf->measure_rate * 10*HZ))
+ {
+ conf->out_speed10 = priv->out_bytes10 / (10 * conf->measure_rate);
+ priv->out_bytes10 = 0;
+ priv->out_time10 = jiffies;
+ }
+ if (TXHEAD(dev).offset && TXTAIL(dev).offset)
+ {
+ netif_stop_queue (dev);
+ return 0;
+ }
+ else
+ netif_start_queue (dev);
+
+
+ IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
+ printk(KERN_WARNING "%s Transmit t %2x:%2x:%2x:%2x:%2x:%2x f %2x:%2x:%2x:%2x:%2x:%2x \n", dev->name,
+ (unsigned char) buf[0], (unsigned char) buf[1], (unsigned char) buf[2], (unsigned char) buf[3],
+ (unsigned char) buf[4], (unsigned char) buf[5], (unsigned char) buf[6], (unsigned char) buf[7],
+ (unsigned char) buf[8], (unsigned char) buf[9], (unsigned char) buf[10], (unsigned char) buf[11]);
+
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_ERR "TX command prepare for buffer %d\n", priv->txLast);
+
+ arlan_command(dev, ARLAN_COMMAND_TX);
+
+ priv->tx_last_sent = jiffies;
+
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk("%s TX Qued %d bytes \n", dev->name, length);
+
+ ARLAN_DEBUG_EXIT("arlan_hw_tx");
+
+ return 0;
+}
+
+
+static int arlan_hw_config(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ struct arlan_conf_stru *conf = priv->Conf;
+
+ ARLAN_DEBUG_ENTRY("arlan_hw_config");
+
+ printk(KERN_NOTICE "%s arlan configure called \n", dev->name);
+ if (arlan_EEPROM_bad)
+ printk(KERN_NOTICE "arlan configure with eeprom bad option \n");
+
+
+ WRITESHM(arlan->spreadingCode, conf->spreadingCode, u_char);
+ WRITESHM(arlan->channelSet, conf->channelSet, u_char);
+
+ if (arlan_EEPROM_bad)
+ WRITESHM(arlan->defaultChannelSet, conf->channelSet, u_char);
+
+ WRITESHM(arlan->channelNumber, conf->channelNumber, u_char);
+
+ WRITESHM(arlan->scramblingDisable, conf->scramblingDisable, u_char);
+ WRITESHM(arlan->txAttenuation, conf->txAttenuation, u_char);
+
+ WRITESHM(arlan->systemId, conf->systemId, u_int);
+
+ WRITESHM(arlan->maxRetries, conf->maxRetries, u_char);
+ WRITESHM(arlan->receiveMode, conf->receiveMode, u_char);
+ WRITESHM(arlan->priority, conf->priority, u_char);
+ WRITESHM(arlan->rootOrRepeater, conf->rootOrRepeater, u_char);
+ WRITESHM(arlan->SID, conf->SID, u_int);
+
+ WRITESHM(arlan->registrationMode, conf->registrationMode, u_char);
+
+ WRITESHM(arlan->registrationFill, conf->registrationFill, u_char);
+ WRITESHM(arlan->localTalkAddress, conf->localTalkAddress, u_char);
+ WRITESHM(arlan->codeFormat, conf->codeFormat, u_char);
+ WRITESHM(arlan->numChannels, conf->numChannels, u_char);
+ WRITESHM(arlan->channel1, conf->channel1, u_char);
+ WRITESHM(arlan->channel2, conf->channel2, u_char);
+ WRITESHM(arlan->channel3, conf->channel3, u_char);
+ WRITESHM(arlan->channel4, conf->channel4, u_char);
+ WRITESHM(arlan->radioNodeId, conf->radioNodeId, u_short);
+ WRITESHM(arlan->SID, conf->SID, u_int);
+ WRITESHM(arlan->waitTime, conf->waitTime, u_short);
+ WRITESHM(arlan->lParameter, conf->lParameter, u_short);
+ memcpy_toio(&(arlan->_15), &(conf->_15), 3);
+ WRITESHM(arlan->_15, conf->_15, u_short);
+ WRITESHM(arlan->headerSize, conf->headerSize, u_short);
+ if (arlan_EEPROM_bad)
+ WRITESHM(arlan->hardwareType, conf->hardwareType, u_char);
+ WRITESHM(arlan->radioType, conf->radioType, u_char);
+ if (arlan_EEPROM_bad)
+ WRITESHM(arlan->radioModule, conf->radioType, u_char);
+
+ memcpy_toio(arlan->encryptionKey + keyStart, encryptionKey, 8);
+ memcpy_toio(arlan->name, conf->siteName, 16);
+
+ WRITESHMB(arlan->commandByte, ARLAN_COM_INT | ARLAN_COM_CONF); /* do configure */
+ memset_io(arlan->commandParameter, 0, 0xf); /* 0xf */
+ memset_io(arlan->commandParameter + 1, 0, 2);
+ if (conf->writeEEPROM)
+ {
+ memset_io(arlan->commandParameter, conf->writeEEPROM, 1);
+// conf->writeEEPROM=0;
+ }
+ if (conf->registrationMode && conf->registrationInterrupts)
+ memset_io(arlan->commandParameter + 3, 1, 1);
+ else
+ memset_io(arlan->commandParameter + 3, 0, 1);
+
+ priv->irq_test_done = 0;
+
+ if (conf->tx_queue_len)
+ dev->tx_queue_len = conf->tx_queue_len;
+ udelay(100);
+
+ ARLAN_DEBUG_EXIT("arlan_hw_config");
+ return 0;
+}
+
+
+static int arlan_read_card_configuration(struct net_device *dev)
+{
+ u_char tlx415;
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ struct arlan_conf_stru *conf = priv->Conf;
+
+ ARLAN_DEBUG_ENTRY("arlan_read_card_configuration");
+
+ if (radioNodeId == radioNodeIdUNKNOWN)
+ {
+ READSHM(conf->radioNodeId, arlan->radioNodeId, u_short);
+ }
+ else
+ conf->radioNodeId = radioNodeId;
+
+ if (SID == SIDUNKNOWN)
+ {
+ READSHM(conf->SID, arlan->SID, u_int);
+ }
+ else conf->SID = SID;
+
+ if (spreadingCode == spreadingCodeUNKNOWN)
+ {
+ READSHM(conf->spreadingCode, arlan->spreadingCode, u_char);
+ }
+ else
+ conf->spreadingCode = spreadingCode;
+
+ if (channelSet == channelSetUNKNOWN)
+ {
+ READSHM(conf->channelSet, arlan->channelSet, u_char);
+ }
+ else conf->channelSet = channelSet;
+
+ if (channelNumber == channelNumberUNKNOWN)
+ {
+ READSHM(conf->channelNumber, arlan->channelNumber, u_char);
+ }
+ else conf->channelNumber = channelNumber;
+
+ READSHM(conf->scramblingDisable, arlan->scramblingDisable, u_char);
+ READSHM(conf->txAttenuation, arlan->txAttenuation, u_char);
+
+ if (systemId == systemIdUNKNOWN)
+ {
+ READSHM(conf->systemId, arlan->systemId, u_int);
+ }
+ else conf->systemId = systemId;
+
+ READSHM(conf->maxDatagramSize, arlan->maxDatagramSize, u_short);
+ READSHM(conf->maxFrameSize, arlan->maxFrameSize, u_short);
+ READSHM(conf->maxRetries, arlan->maxRetries, u_char);
+ READSHM(conf->receiveMode, arlan->receiveMode, u_char);
+ READSHM(conf->priority, arlan->priority, u_char);
+ READSHM(conf->rootOrRepeater, arlan->rootOrRepeater, u_char);
+
+ if (SID == SIDUNKNOWN)
+ {
+ READSHM(conf->SID, arlan->SID, u_int);
+ }
+ else conf->SID = SID;
+
+ if (registrationMode == registrationModeUNKNOWN)
+ {
+ READSHM(conf->registrationMode, arlan->registrationMode, u_char);
+ }
+ else conf->registrationMode = registrationMode;
+
+ READSHM(conf->registrationFill, arlan->registrationFill, u_char);
+ READSHM(conf->localTalkAddress, arlan->localTalkAddress, u_char);
+ READSHM(conf->codeFormat, arlan->codeFormat, u_char);
+ READSHM(conf->numChannels, arlan->numChannels, u_char);
+ READSHM(conf->channel1, arlan->channel1, u_char);
+ READSHM(conf->channel2, arlan->channel2, u_char);
+ READSHM(conf->channel3, arlan->channel3, u_char);
+ READSHM(conf->channel4, arlan->channel4, u_char);
+ READSHM(conf->waitTime, arlan->waitTime, u_short);
+ READSHM(conf->lParameter, arlan->lParameter, u_short);
+ READSHM(conf->_15, arlan->_15, u_short);
+ READSHM(conf->headerSize, arlan->headerSize, u_short);
+ READSHM(conf->hardwareType, arlan->hardwareType, u_char);
+ READSHM(conf->radioType, arlan->radioModule, u_char);
+
+ if (conf->radioType == 0)
+ conf->radioType = 0xc;
+
+ WRITESHM(arlan->configStatus, 0xA5, u_char);
+ READSHM(tlx415, arlan->configStatus, u_char);
+
+ if (tlx415 != 0xA5)
+ printk(KERN_INFO "%s tlx415 chip \n", dev->name);
+
+ conf->txClear = 0;
+ conf->txRetries = 1;
+ conf->txRouting = 1;
+ conf->txScrambled = 0;
+ conf->rxParameter = 1;
+ conf->txTimeoutMs = 4000;
+ conf->waitCardTimeout = 100000;
+ conf->receiveMode = ARLAN_RCV_CLEAN;
+ memcpy_fromio(conf->siteName, arlan->name, 16);
+ conf->siteName[16] = '\0';
+ conf->retries = retries;
+ conf->tx_delay_ms = tx_delay_ms;
+ conf->ReTransmitPacketMaxSize = 200;
+ conf->waitReTransmitPacketMaxSize = 200;
+ conf->txAckTimeoutMs = 900;
+ conf->fastReTransCount = 3;
+
+ ARLAN_DEBUG_EXIT("arlan_read_card_configuration");
+
+ return 0;
+}
+
+
+static int lastFoundAt = 0xbe000;
+
+
+/*
+ * This is the real probe routine. Linux has a history of friendly device
+ * probes on the ISA bus. A good device probes avoids doing writes, and
+ * verifies that the correct device exists and functions.
+ */
+#define ARLAN_SHMEM_SIZE 0x2000
+static int __init arlan_check_fingerprint(unsigned long memaddr)
+{
+ static const char probeText[] = "TELESYSTEM SLW INC. ARLAN \0";
+ volatile struct arlan_shmem __iomem *arlan = (struct arlan_shmem *) memaddr;
+ unsigned long paddr = virt_to_phys((void *) memaddr);
+ char tempBuf[49];
+
+ ARLAN_DEBUG_ENTRY("arlan_check_fingerprint");
+
+ if (!request_mem_region(paddr, ARLAN_SHMEM_SIZE, "arlan")) {
+ // printk(KERN_WARNING "arlan: memory region %lx excluded from probing \n",paddr);
+ return -ENODEV;
+ }
+
+ memcpy_fromio(tempBuf, arlan->textRegion, 29);
+ tempBuf[30] = 0;
+
+ /* check for card at this address */
+ if (0 != strncmp(tempBuf, probeText, 29)){
+ release_mem_region(paddr, ARLAN_SHMEM_SIZE);
+ return -ENODEV;
+ }
+
+// printk(KERN_INFO "arlan found at 0x%x \n",memaddr);
+ ARLAN_DEBUG_EXIT("arlan_check_fingerprint");
+
+ return 0;
+}
+
+static int arlan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ struct arlan_conf_stru *conf = priv->Conf;
+
+ ARLAN_DEBUG_ENTRY("arlan_change_mtu");
+ if (new_mtu > 2032)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ if (new_mtu < 256)
+ new_mtu = 256; /* cards book suggests 1600 */
+ conf->maxDatagramSize = new_mtu;
+ conf->maxFrameSize = new_mtu + 48;
+
+ arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_CONF);
+ printk(KERN_NOTICE "%s mtu changed to %d \n", dev->name, new_mtu);
+
+ ARLAN_DEBUG_EXIT("arlan_change_mtu");
+
+ return 0;
+}
+
+static int arlan_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+
+ ARLAN_DEBUG_ENTRY("arlan_mac_addr");
+ return -EINVAL;
+
+ if (!netif_running(dev))
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ ARLAN_DEBUG_EXIT("arlan_mac_addr");
+ return 0;
+}
+
+
+
+static int __init arlan_setup_device(struct net_device *dev, int num)
+{
+ struct arlan_private *ap = netdev_priv(dev);
+ int err;
+
+ ARLAN_DEBUG_ENTRY("arlan_setup_device");
+
+ ap->conf = (struct arlan_shmem *)(ap+1);
+
+ dev->tx_queue_len = tx_queue_len;
+ dev->open = arlan_open;
+ dev->stop = arlan_close;
+ dev->hard_start_xmit = arlan_tx;
+ dev->get_stats = arlan_statistics;
+ dev->set_multicast_list = arlan_set_multicast;
+ dev->change_mtu = arlan_change_mtu;
+ dev->set_mac_address = arlan_mac_addr;
+ dev->tx_timeout = arlan_tx_timeout;
+ dev->watchdog_timeo = 3*HZ;
+
+ ap->irq_test_done = 0;
+ ap->Conf = &arlan_conf[num];
+
+ ap->Conf->pre_Command_Wait = 40;
+ ap->Conf->rx_tweak1 = 30;
+ ap->Conf->rx_tweak2 = 0;
+
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(virt_to_phys((void *) dev->mem_start),
+ ARLAN_SHMEM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ arlan_device[num] = dev;
+ ARLAN_DEBUG_EXIT("arlan_setup_device");
+ return 0;
+}
+
+static int __init arlan_probe_here(struct net_device *dev,
+ unsigned long memaddr)
+{
+ struct arlan_private *ap = netdev_priv(dev);
+
+ ARLAN_DEBUG_ENTRY("arlan_probe_here");
+
+ if (arlan_check_fingerprint(memaddr))
+ return -ENODEV;
+
+ printk(KERN_NOTICE "%s: Arlan found at %x, \n ", dev->name,
+ (int) virt_to_phys((void*)memaddr));
+
+ ap->card = (void *) memaddr;
+ dev->mem_start = memaddr;
+ dev->mem_end = memaddr + ARLAN_SHMEM_SIZE-1;
+
+ if (dev->irq < 2)
+ {
+ READSHM(dev->irq, ap->card->irqLevel, u_char);
+ } else if (dev->irq == 2)
+ dev->irq = 9;
+
+ arlan_read_card_configuration(dev);
+
+ ARLAN_DEBUG_EXIT("arlan_probe_here");
+ return 0;
+}
+
+
+static int arlan_open(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ int ret = 0;
+
+ ARLAN_DEBUG_ENTRY("arlan_open");
+
+ ret = request_irq(dev->irq, &arlan_interrupt, 0, dev->name, dev);
+ if (ret)
+ {
+ printk(KERN_ERR "%s: unable to get IRQ %d .\n",
+ dev->name, dev->irq);
+ return ret;
+ }
+
+
+ priv->bad = 0;
+ priv->lastReset = 0;
+ priv->reset = 0;
+ memcpy_fromio(dev->dev_addr, arlan->lanCardNodeId, 6);
+ memset(dev->broadcast, 0xff, 6);
+ dev->tx_queue_len = tx_queue_len;
+ priv->interrupt_processing_active = 0;
+ spin_lock_init(&priv->lock);
+
+ netif_start_queue (dev);
+
+ priv->registrationLostCount = 0;
+ priv->registrationLastSeen = jiffies;
+ priv->txLast = 0;
+ priv->tx_command_given = 0;
+ priv->rx_command_given = 0;
+
+ priv->reRegisterExp = 1;
+ priv->tx_last_sent = jiffies - 1;
+ priv->tx_last_cleared = jiffies;
+ priv->Conf->writeEEPROM = 0;
+ priv->Conf->registrationInterrupts = 1;
+
+ init_timer(&priv->timer);
+ priv->timer.expires = jiffies + HZ / 10;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.function = &arlan_registration_timer; /* timer handler */
+
+ arlan_command(dev, ARLAN_COMMAND_POWERUP | ARLAN_COMMAND_LONG_WAIT_NOW);
+ mdelay(200);
+ add_timer(&priv->timer);
+
+ ARLAN_DEBUG_EXIT("arlan_open");
+ return 0;
+}
+
+
+static void arlan_tx_timeout (struct net_device *dev)
+{
+ printk(KERN_ERR "%s: arlan transmit timed out, kernel decided\n", dev->name);
+ /* Try to restart the adaptor. */
+ arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
+ // dev->trans_start = jiffies;
+ // netif_start_queue (dev);
+}
+
+
+static int arlan_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ short length;
+ unsigned char *buf;
+
+ ARLAN_DEBUG_ENTRY("arlan_tx");
+
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ buf = skb->data;
+
+ if (length + 0x12 > 0x800) {
+ printk(KERN_ERR "TX RING overflow \n");
+ netif_stop_queue (dev);
+ }
+
+ if (arlan_hw_tx(dev, buf, length) == -1)
+ goto bad_end;
+
+ dev->trans_start = jiffies;
+
+ dev_kfree_skb(skb);
+
+ arlan_process_interrupt(dev);
+ ARLAN_DEBUG_EXIT("arlan_tx");
+ return 0;
+
+bad_end:
+ arlan_process_interrupt(dev);
+ netif_stop_queue (dev);
+ ARLAN_DEBUG_EXIT("arlan_tx");
+ return 1;
+}
+
+
+static inline int DoNotReTransmitCrap(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ if (TXLAST(dev).length < priv->Conf->ReTransmitPacketMaxSize)
+ return 1;
+ return 0;
+
+}
+
+static inline int DoNotWaitReTransmitCrap(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ if (TXLAST(dev).length < priv->Conf->waitReTransmitPacketMaxSize)
+ return 1;
+ return 0;
+}
+
+static inline void arlan_queue_retransmit(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ ARLAN_DEBUG_ENTRY("arlan_queue_retransmit");
+
+ if (DoNotWaitReTransmitCrap(dev))
+ {
+ arlan_drop_tx(dev);
+ } else
+ priv->ReTransmitRequested++;
+
+ ARLAN_DEBUG_EXIT("arlan_queue_retransmit");
+}
+
+static inline void RetryOrFail(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ ARLAN_DEBUG_ENTRY("RetryOrFail");
+
+ if (priv->retransmissions > priv->Conf->retries ||
+ DoNotReTransmitCrap(dev))
+ {
+ arlan_drop_tx(dev);
+ }
+ else if (priv->bad <= priv->Conf->fastReTransCount)
+ {
+ arlan_retransmit_now(dev);
+ }
+ else arlan_queue_retransmit(dev);
+
+ ARLAN_DEBUG_EXIT("RetryOrFail");
+}
+
+
+static void arlan_tx_done_interrupt(struct net_device *dev, int status)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ ARLAN_DEBUG_ENTRY("arlan_tx_done_interrupt");
+
+ priv->tx_last_cleared = jiffies;
+ priv->tx_command_given = 0;
+ switch (status)
+ {
+ case 1:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit OK\n");
+ priv->stats.tx_packets++;
+ priv->bad = 0;
+ priv->reset = 0;
+ priv->retransmissions = 0;
+ if (priv->Conf->tx_delay_ms)
+ {
+ priv->tx_done_delayed = jiffies + (priv->Conf->tx_delay_ms * HZ) / 1000 + 1;
+ }
+ else
+ {
+ TXLAST(dev).offset = 0;
+ if (priv->txLast)
+ priv->txLast = 0;
+ else if (TXTAIL(dev).offset)
+ priv->txLast = 1;
+ if (TXLAST(dev).offset)
+ {
+ arlan_retransmit_now(dev);
+ dev->trans_start = jiffies;
+ }
+ if (!TXHEAD(dev).offset || !TXTAIL(dev).offset)
+ {
+ netif_wake_queue (dev);
+ }
+ }
+ }
+ break;
+
+ case 2:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit timed out\n");
+ priv->bad += 1;
+ //arlan_queue_retransmit(dev);
+ RetryOrFail(dev);
+ }
+ break;
+
+ case 3:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit max retries\n");
+ priv->bad += 1;
+ priv->reset = 0;
+ //arlan_queue_retransmit(dev);
+ RetryOrFail(dev);
+ }
+ break;
+
+ case 4:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit aborted\n");
+ priv->bad += 1;
+ arlan_queue_retransmit(dev);
+ //RetryOrFail(dev);
+ }
+ break;
+
+ case 5:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit not registered\n");
+ priv->bad += 1;
+ //debug=101;
+ arlan_queue_retransmit(dev);
+ }
+ break;
+
+ case 6:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit destination full\n");
+ priv->bad += 1;
+ priv->reset = 0;
+ //arlan_drop_tx(dev);
+ arlan_queue_retransmit(dev);
+ }
+ break;
+
+ case 7:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit unknown ack\n");
+ priv->bad += 1;
+ priv->reset = 0;
+ arlan_queue_retransmit(dev);
+ }
+ break;
+
+ case 8:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit dest mail box full\n");
+ priv->bad += 1;
+ priv->reset = 0;
+ //arlan_drop_tx(dev);
+ arlan_queue_retransmit(dev);
+ }
+ break;
+
+ case 9:
+ {
+ IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
+ printk("arlan intr: transmit root dest not reg.\n");
+ priv->bad += 1;
+ priv->reset = 1;
+ //arlan_drop_tx(dev);
+ arlan_queue_retransmit(dev);
+ }
+ break;
+
+ default:
+ {
+ printk(KERN_ERR "arlan intr: transmit status unknown\n");
+ priv->bad += 1;
+ priv->reset = 1;
+ arlan_drop_tx(dev);
+ }
+ }
+
+ ARLAN_DEBUG_EXIT("arlan_tx_done_interrupt");
+}
+
+
+static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short rxOffset, u_short pkt_len)
+{
+ char *skbtmp;
+ int i = 0;
+
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ struct arlan_conf_stru *conf = priv->Conf;
+
+
+ ARLAN_DEBUG_ENTRY("arlan_rx_interrupt");
+ // by spec, not WRITESHMB(arlan->rxStatus,0x00);
+ // prohibited here arlan_command(dev, ARLAN_COMMAND_RX);
+
+ if (pkt_len < 10 || pkt_len > 2048)
+ {
+ printk(KERN_WARNING "%s: got too short or long packet, len %d \n", dev->name, pkt_len);
+ return;
+ }
+ if (rxOffset + pkt_len > 0x2000)
+ {
+ printk("%s: got too long packet, len %d offset %x\n", dev->name, pkt_len, rxOffset);
+ return;
+ }
+ priv->in_bytes += pkt_len;
+ priv->in_bytes10 += pkt_len;
+ if (conf->measure_rate < 1)
+ conf->measure_rate = 1;
+ if (time_after(jiffies, priv->in_time + conf->measure_rate * HZ))
+ {
+ conf->in_speed = priv->in_bytes / conf->measure_rate;
+ priv->in_bytes = 0;
+ priv->in_time = jiffies;
+ }
+ if (time_after(jiffies, priv->in_time10 + conf->measure_rate * 10*HZ))
+ {
+ conf->in_speed10 = priv->in_bytes10 / (10 * conf->measure_rate);
+ priv->in_bytes10 = 0;
+ priv->in_time10 = jiffies;
+ }
+ DEBUGSHM(1, "arlan rcv pkt rxStatus= %d ", arlan->rxStatus, u_char);
+ switch (rxStatus)
+ {
+ case 1:
+ case 2:
+ case 3:
+ {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ DEBUGSHM(50, "arlan recv pkt offs=%d\n", arlan->rxOffset, u_short);
+ DEBUGSHM(1, "arlan rxFrmType = %d \n", arlan->rxFrmType, u_char);
+ DEBUGSHM(1, KERN_INFO "arlan rx scrambled = %d \n", arlan->scrambled, u_char);
+
+ /* here we do multicast filtering to avoid slow 8-bit memcopy */
+#ifdef ARLAN_MULTICAST
+ if (!(dev->flags & IFF_ALLMULTI) &&
+ !(dev->flags & IFF_PROMISC) &&
+ dev->mc_list)
+ {
+ char hw_dst_addr[6];
+ struct dev_mc_list *dmi = dev->mc_list;
+ int i;
+
+ memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
+ if (hw_dst_addr[0] == 0x01)
+ {
+ if (mdebug)
+ if (hw_dst_addr[1] == 0x00)
+ printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
+ else if (hw_dst_addr[1] == 0x40)
+ printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
+ while (dmi)
+ { if (dmi->dmi_addrlen == 6)
+ {
+ if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
+ printk(KERN_ERR "%s mcl %2x:%2x:%2x:%2x:%2x:%2x \n", dev->name,
+ dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
+ dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
+ for (i = 0; i < 6; i++)
+ if (dmi->dmi_addr[i] != hw_dst_addr[i])
+ break;
+ if (i == 6)
+ break;
+ }
+ else
+ printk(KERN_ERR "%s: invalid multicast address length given.\n", dev->name);
+ dmi = dmi->next;
+ }
+ /* we reach here if multicast filtering is on and packet
+ * is multicast and not for receive */
+ goto end_of_interrupt;
+ }
+ }
+#endif // ARLAN_MULTICAST
+ /* multicast filtering ends here */
+ pkt_len += ARLAN_FAKE_HDR_LEN;
+
+ skb = dev_alloc_skb(pkt_len + 4);
+ if (skb == NULL)
+ {
+ printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
+ priv->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, 2);
+ skb->dev = dev;
+ skbtmp = skb_put(skb, pkt_len);
+
+ memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN);
+ memcpy_fromio(skbtmp, arlan->ultimateDestAddress, 6);
+ memcpy_fromio(skbtmp + 6, arlan->rxSrc, 6);
+ WRITESHMB(arlan->rxStatus, 0x00);
+ arlan_command(dev, ARLAN_COMMAND_RX);
+
+ IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
+ {
+ char immedDestAddress[6];
+ char immedSrcAddress[6];
+ memcpy_fromio(immedDestAddress, arlan->immedDestAddress, 6);
+ memcpy_fromio(immedSrcAddress, arlan->immedSrcAddress, 6);
+
+ printk(KERN_WARNING "%s t %2x:%2x:%2x:%2x:%2x:%2x f %2x:%2x:%2x:%2x:%2x:%2x imd %2x:%2x:%2x:%2x:%2x:%2x ims %2x:%2x:%2x:%2x:%2x:%2x\n", dev->name,
+ (unsigned char) skbtmp[0], (unsigned char) skbtmp[1], (unsigned char) skbtmp[2], (unsigned char) skbtmp[3],
+ (unsigned char) skbtmp[4], (unsigned char) skbtmp[5], (unsigned char) skbtmp[6], (unsigned char) skbtmp[7],
+ (unsigned char) skbtmp[8], (unsigned char) skbtmp[9], (unsigned char) skbtmp[10], (unsigned char) skbtmp[11],
+ immedDestAddress[0], immedDestAddress[1], immedDestAddress[2],
+ immedDestAddress[3], immedDestAddress[4], immedDestAddress[5],
+ immedSrcAddress[0], immedSrcAddress[1], immedSrcAddress[2],
+ immedSrcAddress[3], immedSrcAddress[4], immedSrcAddress[5]);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
+ if (skb->protocol != 0x608 && skb->protocol != 0x8)
+ {
+ for (i = 0; i <= 22; i++)
+ printk("%02x:", (u_char) skbtmp[i + 12]);
+ printk(KERN_ERR "\n");
+ printk(KERN_WARNING "arlan kernel pkt type trans %x \n", skb->protocol);
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += pkt_len;
+ }
+ break;
+
+ default:
+ printk(KERN_ERR "arlan intr: received unknown status\n");
+ priv->stats.rx_crc_errors++;
+ break;
+ }
+ ARLAN_DEBUG_EXIT("arlan_rx_interrupt");
+}
+
+static void arlan_process_interrupt(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ u_char rxStatus = READSHMB(arlan->rxStatus);
+ u_char txStatus = READSHMB(arlan->txStatus);
+ u_short rxOffset = READSHMS(arlan->rxOffset);
+ u_short pkt_len = READSHMS(arlan->rxLength);
+ int interrupt_count = 0;
+
+ ARLAN_DEBUG_ENTRY("arlan_process_interrupt");
+
+ if (test_and_set_bit(0, (void *) &priv->interrupt_processing_active))
+ {
+ if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
+ printk(KERN_ERR "interrupt chain reentering \n");
+ goto end_int_process;
+ }
+ while ((rxStatus || txStatus || priv->interrupt_ack_requested)
+ && (interrupt_count < 5))
+ {
+ if (rxStatus)
+ priv->last_rx_int_ack_time = jiffies;
+
+ arlan_command(dev, ARLAN_COMMAND_INT_ACK);
+ arlan_command(dev, ARLAN_COMMAND_INT_ENABLE);
+
+ IFDEBUG(ARLAN_DEBUG_INTERRUPT)
+ printk(KERN_ERR "%s: got IRQ rx %x tx %x comm %x rxOff %x rxLen %x \n",
+ dev->name, rxStatus, txStatus, READSHMB(arlan->commandByte),
+ rxOffset, pkt_len);
+
+ if (rxStatus == 0 && txStatus == 0)
+ {
+ if (priv->irq_test_done)
+ {
+ if (!registrationBad(dev))
+ IFDEBUG(ARLAN_DEBUG_INTERRUPT) printk(KERN_ERR "%s unknown interrupt(nop? regLost ?) reason tx %d rx %d ",
+ dev->name, txStatus, rxStatus);
+ } else {
+ IFDEBUG(ARLAN_DEBUG_INTERRUPT)
+ printk(KERN_INFO "%s irq $%d test OK \n", dev->name, dev->irq);
+
+ }
+ priv->interrupt_ack_requested = 0;
+ goto ends;
+ }
+ if (txStatus != 0)
+ {
+ WRITESHMB(arlan->txStatus, 0x00);
+ arlan_tx_done_interrupt(dev, txStatus);
+ goto ends;
+ }
+ if (rxStatus == 1 || rxStatus == 2)
+ { /* a packet waiting */
+ arlan_rx_interrupt(dev, rxStatus, rxOffset, pkt_len);
+ goto ends;
+ }
+ if (rxStatus > 2 && rxStatus < 0xff)
+ {
+ WRITESHMB(arlan->rxStatus, 0x00);
+ printk(KERN_ERR "%s unknown rxStatus reason tx %d rx %d ",
+ dev->name, txStatus, rxStatus);
+ goto ends;
+ }
+ if (rxStatus == 0xff)
+ {
+ WRITESHMB(arlan->rxStatus, 0x00);
+ arlan_command(dev, ARLAN_COMMAND_RX);
+ if (registrationBad(dev))
+ netif_device_detach(dev);
+ if (!registrationBad(dev))
+ {
+ priv->registrationLastSeen = jiffies;
+ if (!netif_queue_stopped(dev) && !priv->under_reset && !priv->under_config)
+ netif_wake_queue (dev);
+ }
+ goto ends;
+ }
+ends:
+
+ arlan_command_process(dev);
+
+ rxStatus = READSHMB(arlan->rxStatus);
+ txStatus = READSHMB(arlan->txStatus);
+ rxOffset = READSHMS(arlan->rxOffset);
+ pkt_len = READSHMS(arlan->rxLength);
+
+
+ priv->irq_test_done = 1;
+
+ interrupt_count++;
+ }
+ priv->interrupt_processing_active = 0;
+
+end_int_process:
+ arlan_command_process(dev);
+
+ ARLAN_DEBUG_EXIT("arlan_process_interrupt");
+ return;
+}
+
+static irqreturn_t arlan_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ u_char rxStatus = READSHMB(arlan->rxStatus);
+ u_char txStatus = READSHMB(arlan->txStatus);
+
+ ARLAN_DEBUG_ENTRY("arlan_interrupt");
+
+
+ if (!rxStatus && !txStatus)
+ priv->interrupt_ack_requested++;
+
+ arlan_process_interrupt(dev);
+
+ priv->irq_test_done = 1;
+
+ ARLAN_DEBUG_EXIT("arlan_interrupt");
+ return IRQ_HANDLED;
+
+}
+
+
+static int arlan_close(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+
+ ARLAN_DEBUG_ENTRY("arlan_close");
+
+ del_timer_sync(&priv->timer);
+
+ arlan_command(dev, ARLAN_COMMAND_POWERDOWN);
+
+ IFDEBUG(ARLAN_DEBUG_STARTUP)
+ printk(KERN_NOTICE "%s: Closing device\n", dev->name);
+
+ netif_stop_queue(dev);
+ free_irq(dev->irq, dev);
+
+ ARLAN_DEBUG_EXIT("arlan_close");
+ return 0;
+}
+
+#ifdef ARLAN_DEBUGGING
+static long alignLong(volatile u_char * ptr)
+{
+ long ret;
+ memcpy_fromio(&ret, (void *) ptr, 4);
+ return ret;
+}
+#endif
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+
+static struct net_device_stats *arlan_statistics(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+
+
+ ARLAN_DEBUG_ENTRY("arlan_statistics");
+
+ /* Update the statistics from the device registers. */
+
+ READSHM(priv->stats.collisions, arlan->numReTransmissions, u_int);
+ READSHM(priv->stats.rx_crc_errors, arlan->numCRCErrors, u_int);
+ READSHM(priv->stats.rx_dropped, arlan->numFramesDiscarded, u_int);
+ READSHM(priv->stats.rx_fifo_errors, arlan->numRXBufferOverflows, u_int);
+ READSHM(priv->stats.rx_frame_errors, arlan->numReceiveFramesLost, u_int);
+ READSHM(priv->stats.rx_over_errors, arlan->numRXOverruns, u_int);
+ READSHM(priv->stats.rx_packets, arlan->numDatagramsReceived, u_int);
+ READSHM(priv->stats.tx_aborted_errors, arlan->numAbortErrors, u_int);
+ READSHM(priv->stats.tx_carrier_errors, arlan->numStatusTimeouts, u_int);
+ READSHM(priv->stats.tx_dropped, arlan->numDatagramsDiscarded, u_int);
+ READSHM(priv->stats.tx_fifo_errors, arlan->numTXUnderruns, u_int);
+ READSHM(priv->stats.tx_packets, arlan->numDatagramsTransmitted, u_int);
+ READSHM(priv->stats.tx_window_errors, arlan->numHoldOffs, u_int);
+
+ ARLAN_DEBUG_EXIT("arlan_statistics");
+
+ return &priv->stats;
+}
+
+
+static void arlan_set_multicast(struct net_device *dev)
+{
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ struct arlan_conf_stru *conf = priv->Conf;
+ int board_conf_needed = 0;
+
+
+ ARLAN_DEBUG_ENTRY("arlan_set_multicast");
+
+ if (dev->flags & IFF_PROMISC)
+ {
+ unsigned char recMode;
+ READSHM(recMode, arlan->receiveMode, u_char);
+ conf->receiveMode = (ARLAN_RCV_PROMISC | ARLAN_RCV_CONTROL);
+ if (conf->receiveMode != recMode)
+ board_conf_needed = 1;
+ }
+ else
+ {
+ /* turn off promiscuous mode */
+ unsigned char recMode;
+ READSHM(recMode, arlan->receiveMode, u_char);
+ conf->receiveMode = ARLAN_RCV_CLEAN | ARLAN_RCV_CONTROL;
+ if (conf->receiveMode != recMode)
+ board_conf_needed = 1;
+ }
+ if (board_conf_needed)
+ arlan_command(dev, ARLAN_COMMAND_CONF);
+
+ ARLAN_DEBUG_EXIT("arlan_set_multicast");
+}
+
+
+struct net_device * __init arlan_probe(int unit)
+{
+ struct net_device *dev;
+ int err;
+ int m;
+
+ ARLAN_DEBUG_ENTRY("arlan_probe");
+
+ if (arlans_found == MAX_ARLANS)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Reserve space for local data and a copy of the shared memory
+ * that is used by the /proc interface.
+ */
+ dev = alloc_etherdev(sizeof(struct arlan_private)
+ + sizeof(struct arlan_shmem));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ SET_MODULE_OWNER(dev);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ if (dev->mem_start) {
+ if (arlan_probe_here(dev, dev->mem_start) == 0)
+ goto found;
+ goto not_found;
+ }
+
+ }
+
+
+ for (m = (int)phys_to_virt(lastFoundAt) + ARLAN_SHMEM_SIZE;
+ m <= (int)phys_to_virt(0xDE000);
+ m += ARLAN_SHMEM_SIZE)
+ {
+ if (arlan_probe_here(dev, m) == 0)
+ {
+ lastFoundAt = (int)virt_to_phys((void*)m);
+ goto found;
+ }
+ }
+
+ if (lastFoundAt == 0xbe000)
+ printk(KERN_ERR "arlan: No Arlan devices found \n");
+
+ not_found:
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+
+ found:
+ err = arlan_setup_device(dev, arlans_found);
+ if (err)
+ dev = ERR_PTR(err);
+ else if (!arlans_found++)
+ printk(KERN_INFO "Arlan driver %s\n", arlan_version);
+
+ return dev;
+}
+
+#ifdef MODULE
+int init_module(void)
+{
+ int i = 0;
+
+ ARLAN_DEBUG_ENTRY("init_module");
+
+ if (channelSet != channelSetUNKNOWN || channelNumber != channelNumberUNKNOWN || systemId != systemIdUNKNOWN)
+ return -EINVAL;
+
+ for (i = 0; i < MAX_ARLANS; i++) {
+ struct net_device *dev = arlan_probe(i);
+
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ }
+ init_arlan_proc();
+ printk(KERN_INFO "Arlan driver %s\n", arlan_version);
+ ARLAN_DEBUG_EXIT("init_module");
+ return 0;
+}
+
+
+void cleanup_module(void)
+{
+ int i = 0;
+ struct net_device *dev;
+
+ ARLAN_DEBUG_ENTRY("cleanup_module");
+
+ IFDEBUG(ARLAN_DEBUG_SHUTDOWN)
+ printk(KERN_INFO "arlan: unloading module\n");
+
+ cleanup_arlan_proc();
+
+ for (i = 0; i < MAX_ARLANS; i++)
+ {
+ dev = arlan_device[i];
+ if (dev) {
+ arlan_command(dev, ARLAN_COMMAND_POWERDOWN );
+
+ unregister_netdev(dev);
+ release_mem_region(virt_to_phys((void *) dev->mem_start),
+ ARLAN_SHMEM_SIZE);
+ free_netdev(dev);
+ arlan_device[i] = NULL;
+ }
+ }
+
+ ARLAN_DEBUG_EXIT("cleanup_module");
+}
+
+
+#endif
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c
new file mode 100644
index 000000000000..a2cca521f444
--- /dev/null
+++ b/drivers/net/wireless/arlan-proc.c
@@ -0,0 +1,1262 @@
+#include <linux/config.h>
+#include "arlan.h"
+
+#include <linux/sysctl.h>
+
+#ifdef CONFIG_PROC_FS
+
+/* void enableReceive(struct net_device* dev);
+*/
+
+
+
+#define ARLAN_STR_SIZE 0x2ff0
+#define DEV_ARLAN_INFO 1
+#define DEV_ARLAN 1
+#define SARLG(type,var) {\
+ pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n", #var, READSHMB(priva->card->var)); \
+ }
+
+#define SARLBN(type,var,nn) {\
+ pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x",#var);\
+ for (i=0; i < nn; i++ ) pos += sprintf(arlan_drive_info+pos, "%02x",READSHMB(priva->card->var[i]));\
+ pos += sprintf(arlan_drive_info+pos, "\n"); \
+ }
+
+#define SARLBNpln(type,var,nn) {\
+ for (i=0; i < nn; i++ ) pos += sprintf(arlan_drive_info+pos, "%02x",READSHMB(priva->card->var[i]));\
+ }
+
+#define SARLSTR(var,nn) {\
+ char tmpStr[400];\
+ int tmpLn = nn;\
+ if (nn > 399 ) tmpLn = 399; \
+ memcpy(tmpStr,(char *) priva->conf->var,tmpLn);\
+ tmpStr[tmpLn] = 0; \
+ pos += sprintf(arlan_drive_info+pos, "%s\t=\t%s \n",#var,priva->conf->var);\
+ }
+
+#define SARLUC(var) SARLG(u_char, var)
+#define SARLUCN(var,nn) SARLBN(u_char,var, nn)
+#define SARLUS(var) SARLG(u_short, var)
+#define SARLUSN(var,nn) SARLBN(u_short,var, nn)
+#define SARLUI(var) SARLG(u_int, var)
+
+#define SARLUSA(var) {\
+ u_short tmpVar;\
+ memcpy(&tmpVar, (short *) priva->conf->var,2); \
+ pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n",#var, tmpVar);\
+}
+
+#define SARLUIA(var) {\
+ u_int tmpVar;\
+ memcpy(&tmpVar, (int* )priva->conf->var,4); \
+ pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n",#var, tmpVar);\
+}
+
+
+static const char *arlan_diagnostic_info_string(struct net_device *dev)
+{
+
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ u_char diagnosticInfo;
+
+ READSHM(diagnosticInfo, arlan->diagnosticInfo, u_char);
+
+ switch (diagnosticInfo)
+ {
+ case 0xFF:
+ return "Diagnostic info is OK";
+ case 0xFE:
+ return "ERROR EPROM Checksum error ";
+ case 0xFD:
+ return "ERROR Local Ram Test Failed ";
+ case 0xFC:
+ return "ERROR SCC failure ";
+ case 0xFB:
+ return "ERROR BackBone failure ";
+ case 0xFA:
+ return "ERROR transceiver not found ";
+ case 0xF9:
+ return "ERROR no more address space ";
+ case 0xF8:
+ return "ERROR Checksum error ";
+ case 0xF7:
+ return "ERROR Missing SS Code";
+ case 0xF6:
+ return "ERROR Invalid config format";
+ case 0xF5:
+ return "ERROR Reserved errorcode F5";
+ case 0xF4:
+ return "ERROR Invalid spreading code/channel number";
+ case 0xF3:
+ return "ERROR Load Code Error";
+ case 0xF2:
+ return "ERROR Reserver errorcode F2 ";
+ case 0xF1:
+ return "ERROR Invalid command receivec by LAN card ";
+ case 0xF0:
+ return "ERROR Invalid parameter found in command ";
+ case 0xEF:
+ return "ERROR On-chip timer failure ";
+ case 0xEE:
+ return "ERROR T410 timer failure ";
+ case 0xED:
+ return "ERROR Too Many TxEnable commands ";
+ case 0xEC:
+ return "ERROR EEPROM error on radio module ";
+ default:
+ return "ERROR unknown Diagnostic info reply code ";
+ }
+}
+
+static const char *arlan_hardware_type_string(struct net_device *dev)
+{
+ u_char hardwareType;
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+
+ READSHM(hardwareType, arlan->hardwareType, u_char);
+ switch (hardwareType)
+ {
+ case 0x00:
+ return "type A450";
+ case 0x01:
+ return "type A650 ";
+ case 0x04:
+ return "type TMA coproc";
+ case 0x0D:
+ return "type A650E ";
+ case 0x18:
+ return "type TMA coproc Australian";
+ case 0x19:
+ return "type A650A ";
+ case 0x26:
+ return "type TMA coproc European";
+ case 0x2E:
+ return "type A655 ";
+ case 0x2F:
+ return "type A655A ";
+ case 0x30:
+ return "type A655E ";
+ case 0x0B:
+ return "type A670 ";
+ case 0x0C:
+ return "type A670E ";
+ case 0x2D:
+ return "type A670A ";
+ case 0x0F:
+ return "type A411T";
+ case 0x16:
+ return "type A411TA";
+ case 0x1B:
+ return "type A440T";
+ case 0x1C:
+ return "type A412T";
+ case 0x1E:
+ return "type A412TA";
+ case 0x22:
+ return "type A411TE";
+ case 0x24:
+ return "type A412TE";
+ case 0x27:
+ return "type A671T ";
+ case 0x29:
+ return "type A671TA ";
+ case 0x2B:
+ return "type A671TE ";
+ case 0x31:
+ return "type A415T ";
+ case 0x33:
+ return "type A415TA ";
+ case 0x35:
+ return "type A415TE ";
+ case 0x37:
+ return "type A672";
+ case 0x39:
+ return "type A672A ";
+ case 0x3B:
+ return "type A672T";
+ case 0x6B:
+ return "type IC2200";
+ default:
+ return "type A672T";
+ }
+}
+#ifdef ARLAN_DEBUGGING
+static void arlan_print_diagnostic_info(struct net_device *dev)
+{
+ int i;
+ u_char diagnosticInfo;
+ u_short diagnosticOffset;
+ u_char hardwareType;
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+
+ // ARLAN_DEBUG_ENTRY("arlan_print_diagnostic_info");
+
+ if (READSHMB(arlan->configuredStatusFlag) == 0)
+ printk("Arlan: Card NOT configured\n");
+ else
+ printk("Arlan: Card is configured\n");
+
+ READSHM(diagnosticInfo, arlan->diagnosticInfo, u_char);
+ READSHM(diagnosticOffset, arlan->diagnosticOffset, u_short);
+
+ printk(KERN_INFO "%s\n", arlan_diagnostic_info_string(dev));
+
+ if (diagnosticInfo != 0xff)
+ printk("%s arlan: Diagnostic Offset %d \n", dev->name, diagnosticOffset);
+
+ printk("arlan: LAN CODE ID = ");
+ for (i = 0; i < 6; i++)
+ DEBUGSHM(1, "%03d:", arlan->lanCardNodeId[i], u_char);
+ printk("\n");
+
+ printk("arlan: Arlan BroadCast address = ");
+ for (i = 0; i < 6; i++)
+ DEBUGSHM(1, "%03d:", arlan->broadcastAddress[i], u_char);
+ printk("\n");
+
+ READSHM(hardwareType, arlan->hardwareType, u_char);
+ printk(KERN_INFO "%s\n", arlan_hardware_type_string(dev));
+
+
+ DEBUGSHM(1, "arlan: channelNumber=%d\n", arlan->channelNumber, u_char);
+ DEBUGSHM(1, "arlan: channelSet=%d\n", arlan->channelSet, u_char);
+ DEBUGSHM(1, "arlan: spreadingCode=%d\n", arlan->spreadingCode, u_char);
+ DEBUGSHM(1, "arlan: radioNodeId=%d\n", arlan->radioNodeId, u_short);
+ DEBUGSHM(1, "arlan: SID =%d\n", arlan->SID, u_short);
+ DEBUGSHM(1, "arlan: rxOffset=%d\n", arlan->rxOffset, u_short);
+
+ DEBUGSHM(1, "arlan: registration mode is %d\n", arlan->registrationMode, u_char);
+
+ printk("arlan: name= ");
+ IFDEBUG(1)
+
+ for (i = 0; i < 16; i++)
+ {
+ char c;
+ READSHM(c, arlan->name[i], char);
+ if (c)
+ printk("%c", c);
+ }
+ printk("\n");
+
+// ARLAN_DEBUG_EXIT("arlan_print_diagnostic_info");
+
+}
+
+
+/****************************** TEST MEMORY **************/
+
+static int arlan_hw_test_memory(struct net_device *dev)
+{
+ u_char *ptr;
+ int i;
+ int memlen = sizeof(struct arlan_shmem) - 0xF; /* avoid control register */
+ volatile char *arlan_mem = (char *) (dev->mem_start);
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+ char pattern;
+
+ ptr = NULL;
+
+ /* hold card in reset state */
+ setHardwareReset(dev);
+
+ /* test memory */
+ pattern = 0;
+ for (i = 0; i < memlen; i++)
+ WRITESHM(arlan_mem[i], ((u_char) pattern++), u_char);
+
+ pattern = 0;
+ for (i = 0; i < memlen; i++)
+ {
+ char res;
+ READSHM(res, arlan_mem[i], char);
+ if (res != pattern++)
+ {
+ printk(KERN_ERR "Arlan driver memory test 1 failed \n");
+ return -1;
+ }
+ }
+
+ pattern = 0;
+ for (i = 0; i < memlen; i++)
+ WRITESHM(arlan_mem[i], ~(pattern++), char);
+
+ pattern = 0;
+ for (i = 0; i < memlen; i++)
+ {
+ char res;
+ READSHM(res, arlan_mem[i], char);
+ if (res != ~(pattern++))
+ {
+ printk(KERN_ERR "Arlan driver memory test 2 failed \n");
+ return -1;
+ }
+ }
+
+ /* zero memory */
+ for (i = 0; i < memlen; i++)
+ WRITESHM(arlan_mem[i], 0x00, char);
+
+ IFDEBUG(1) printk(KERN_INFO "Arlan: memory tests ok\n");
+
+ /* set reset flag and then release reset */
+ WRITESHM(arlan->resetFlag, 0xff, u_char);
+
+ clearChannelAttention(dev);
+ clearHardwareReset(dev);
+
+ /* wait for reset flag to become zero, we'll wait for two seconds */
+ if (arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW))
+ {
+ printk(KERN_ERR "%s arlan: failed to come back from memory test\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
+
+static int arlan_setup_card_by_book(struct net_device *dev)
+{
+ u_char irqLevel, configuredStatusFlag;
+ struct arlan_private *priv = netdev_priv(dev);
+ volatile struct arlan_shmem __iomem *arlan = priv->card;
+
+// ARLAN_DEBUG_ENTRY("arlan_setup_card");
+
+ READSHM(configuredStatusFlag, arlan->configuredStatusFlag, u_char);
+
+ IFDEBUG(10)
+ if (configuredStatusFlag != 0)
+ IFDEBUG(10) printk("arlan: CARD IS CONFIGURED\n");
+ else
+ IFDEBUG(10) printk("arlan: card is NOT configured\n");
+
+ if (testMemory || (READSHMB(arlan->diagnosticInfo) != 0xff))
+ if (arlan_hw_test_memory(dev))
+ return -1;
+
+ DEBUGSHM(4, "arlan configuredStatus = %d \n", arlan->configuredStatusFlag, u_char);
+ DEBUGSHM(4, "arlan driver diagnostic: 0x%2x\n", arlan->diagnosticInfo, u_char);
+
+ /* issue nop command - no interrupt */
+ arlan_command(dev, ARLAN_COMMAND_NOOP);
+ if (arlan_command(dev, ARLAN_COMMAND_WAIT_NOW) != 0)
+ return -1;
+
+ IFDEBUG(50) printk("1st Noop successfully executed !!\n");
+
+ /* try to turn on the arlan interrupts */
+ clearClearInterrupt(dev);
+ setClearInterrupt(dev);
+ setInterruptEnable(dev);
+
+ /* issue nop command - with interrupt */
+
+ arlan_command(dev, ARLAN_COMMAND_NOOPINT);
+ if (arlan_command(dev, ARLAN_COMMAND_WAIT_NOW) != 0)
+ return -1;
+
+
+ IFDEBUG(50) printk("2nd Noop successfully executed !!\n");
+
+ READSHM(irqLevel, arlan->irqLevel, u_char)
+
+ if (irqLevel != dev->irq)
+ {
+ IFDEBUG(1) printk(KERN_WARNING "arlan dip switches set irq to %d\n", irqLevel);
+ printk(KERN_WARNING "device driver irq set to %d - does not match\n", dev->irq);
+ dev->irq = irqLevel;
+ }
+ else
+ IFDEBUG(2) printk("irq level is OK\n");
+
+
+ IFDEBUG(3) arlan_print_diagnostic_info(dev);
+
+ arlan_command(dev, ARLAN_COMMAND_CONF);
+
+ READSHM(configuredStatusFlag, arlan->configuredStatusFlag, u_char);
+ if (configuredStatusFlag == 0)
+ {
+ printk(KERN_WARNING "arlan configure failed\n");
+ return -1;
+ }
+ arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW);
+ arlan_command(dev, ARLAN_COMMAND_RX);
+ arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW);
+ printk(KERN_NOTICE "%s: arlan driver version %s loaded\n",
+ dev->name, arlan_version);
+
+// ARLAN_DEBUG_EXIT("arlan_setup_card");
+
+ return 0; /* no errors */
+}
+#endif
+
+#ifdef ARLAN_PROC_INTERFACE
+#ifdef ARLAN_PROC_SHM_DUMP
+
+static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
+
+static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp,
+ void __user *buffer, size_t * lenp, loff_t *ppos)
+{
+ int i;
+ int retv, pos, devnum;
+ struct arlan_private *priva = NULL;
+ struct net_device *dev;
+ pos = 0;
+ if (write)
+ {
+ printk("wrirte: ");
+ for (i = 0; i < 100; i++)
+ printk("adi %x \n", arlan_drive_info[i]);
+ }
+ if (ctl->procname == NULL || arlan_drive_info == NULL)
+ {
+ printk(KERN_WARNING " procname is NULL in sysctl_table or arlan_drive_info is NULL \n at arlan module\n ");
+ return -1;
+ }
+ devnum = ctl->procname[5] - '0';
+ if (devnum < 0 || devnum > MAX_ARLANS - 1)
+ {
+ printk(KERN_WARNING "too strange devnum in procfs parse\n ");
+ return -1;
+ }
+ else if (arlan_device[devnum] == NULL)
+ {
+ if (ctl->procname)
+ pos += sprintf(arlan_drive_info + pos, "\t%s\n\n", ctl->procname);
+ pos += sprintf(arlan_drive_info + pos, "No device found here \n");
+ goto final;
+ }
+ else
+ priva = arlan_device[devnum]->priv;
+
+ if (priva == NULL)
+ {
+ printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
+ return -1;
+ }
+ dev = arlan_device[devnum];
+
+ memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
+
+ pos = sprintf(arlan_drive_info, "Arlan info \n");
+ /* Header Signature */
+ SARLSTR(textRegion, 48);
+ SARLUC(resetFlag);
+ pos += sprintf(arlan_drive_info + pos, "diagnosticInfo\t=\t%s \n", arlan_diagnostic_info_string(dev));
+ SARLUC(diagnosticInfo);
+ SARLUS(diagnosticOffset);
+ SARLUCN(_1, 12);
+ SARLUCN(lanCardNodeId, 6);
+ SARLUCN(broadcastAddress, 6);
+ pos += sprintf(arlan_drive_info + pos, "hardwareType =\t %s \n", arlan_hardware_type_string(dev));
+ SARLUC(hardwareType);
+ SARLUC(majorHardwareVersion);
+ SARLUC(minorHardwareVersion);
+ SARLUC(radioModule);
+ SARLUC(defaultChannelSet);
+ SARLUCN(_2, 47);
+
+ /* Control/Status Block - 0x0080 */
+ SARLUC(interruptInProgress);
+ SARLUC(cntrlRegImage);
+
+ SARLUCN(_3, 14);
+ SARLUC(commandByte);
+ SARLUCN(commandParameter, 15);
+
+ /* Receive Status - 0x00a0 */
+ SARLUC(rxStatus);
+ SARLUC(rxFrmType);
+ SARLUS(rxOffset);
+ SARLUS(rxLength);
+ SARLUCN(rxSrc, 6);
+ SARLUC(rxBroadcastFlag);
+ SARLUC(rxQuality);
+ SARLUC(scrambled);
+ SARLUCN(_4, 1);
+
+ /* Transmit Status - 0x00b0 */
+ SARLUC(txStatus);
+ SARLUC(txAckQuality);
+ SARLUC(numRetries);
+ SARLUCN(_5, 14);
+ SARLUCN(registeredRouter, 6);
+ SARLUCN(backboneRouter, 6);
+ SARLUC(registrationStatus);
+ SARLUC(configuredStatusFlag);
+ SARLUCN(_6, 1);
+ SARLUCN(ultimateDestAddress, 6);
+ SARLUCN(immedDestAddress, 6);
+ SARLUCN(immedSrcAddress, 6);
+ SARLUS(rxSequenceNumber);
+ SARLUC(assignedLocaltalkAddress);
+ SARLUCN(_7, 27);
+
+ /* System Parameter Block */
+
+ /* - Driver Parameters (Novell Specific) */
+
+ SARLUS(txTimeout);
+ SARLUS(transportTime);
+ SARLUCN(_8, 4);
+
+ /* - Configuration Parameters */
+ SARLUC(irqLevel);
+ SARLUC(spreadingCode);
+ SARLUC(channelSet);
+ SARLUC(channelNumber);
+ SARLUS(radioNodeId);
+ SARLUCN(_9, 2);
+ SARLUC(scramblingDisable);
+ SARLUC(radioType);
+ SARLUS(routerId);
+ SARLUCN(_10, 9);
+ SARLUC(txAttenuation);
+ SARLUIA(systemId);
+ SARLUS(globalChecksum);
+ SARLUCN(_11, 4);
+ SARLUS(maxDatagramSize);
+ SARLUS(maxFrameSize);
+ SARLUC(maxRetries);
+ SARLUC(receiveMode);
+ SARLUC(priority);
+ SARLUC(rootOrRepeater);
+ SARLUCN(specifiedRouter, 6);
+ SARLUS(fastPollPeriod);
+ SARLUC(pollDecay);
+ SARLUSA(fastPollDelay);
+ SARLUC(arlThreshold);
+ SARLUC(arlDecay);
+ SARLUCN(_12, 1);
+ SARLUS(specRouterTimeout);
+ SARLUCN(_13, 5);
+
+ /* Scrambled Area */
+ SARLUIA(SID);
+ SARLUCN(encryptionKey, 12);
+ SARLUIA(_14);
+ SARLUSA(waitTime);
+ SARLUSA(lParameter);
+ SARLUCN(_15, 3);
+ SARLUS(headerSize);
+ SARLUS(sectionChecksum);
+
+ SARLUC(registrationMode);
+ SARLUC(registrationFill);
+ SARLUS(pollPeriod);
+ SARLUS(refreshPeriod);
+ SARLSTR(name, 16);
+ SARLUCN(NID, 6);
+ SARLUC(localTalkAddress);
+ SARLUC(codeFormat);
+ SARLUC(numChannels);
+ SARLUC(channel1);
+ SARLUC(channel2);
+ SARLUC(channel3);
+ SARLUC(channel4);
+ SARLUCN(SSCode, 59);
+
+/* SARLUCN( _16, 0x140);
+ */
+ /* Statistics Block - 0x0300 */
+ SARLUC(hostcpuLock);
+ SARLUC(lancpuLock);
+ SARLUCN(resetTime, 18);
+ SARLUIA(numDatagramsTransmitted);
+ SARLUIA(numReTransmissions);
+ SARLUIA(numFramesDiscarded);
+ SARLUIA(numDatagramsReceived);
+ SARLUIA(numDuplicateReceivedFrames);
+ SARLUIA(numDatagramsDiscarded);
+ SARLUS(maxNumReTransmitDatagram);
+ SARLUS(maxNumReTransmitFrames);
+ SARLUS(maxNumConsecutiveDuplicateFrames);
+ /* misaligned here so we have to go to characters */
+ SARLUIA(numBytesTransmitted);
+ SARLUIA(numBytesReceived);
+ SARLUIA(numCRCErrors);
+ SARLUIA(numLengthErrors);
+ SARLUIA(numAbortErrors);
+ SARLUIA(numTXUnderruns);
+ SARLUIA(numRXOverruns);
+ SARLUIA(numHoldOffs);
+ SARLUIA(numFramesTransmitted);
+ SARLUIA(numFramesReceived);
+ SARLUIA(numReceiveFramesLost);
+ SARLUIA(numRXBufferOverflows);
+ SARLUIA(numFramesDiscardedAddrMismatch);
+ SARLUIA(numFramesDiscardedSIDMismatch);
+ SARLUIA(numPollsTransmistted);
+ SARLUIA(numPollAcknowledges);
+ SARLUIA(numStatusTimeouts);
+ SARLUIA(numNACKReceived);
+ SARLUS(auxCmd);
+ SARLUCN(dumpPtr, 4);
+ SARLUC(dumpVal);
+ SARLUC(wireTest);
+
+ /* next 4 seems too long for procfs, over single page ?
+ SARLUCN( _17, 0x86);
+ SARLUCN( txBuffer, 0x800);
+ SARLUCN( rxBuffer, 0x800);
+ SARLUCN( _18, 0x0bff);
+ */
+
+ pos += sprintf(arlan_drive_info + pos, "rxRing\t=\t0x");
+ for (i = 0; i < 0x50; i++)
+ pos += sprintf(arlan_drive_info + pos, "%02x", ((char *) priva->conf)[priva->conf->rxOffset + i]);
+ pos += sprintf(arlan_drive_info + pos, "\n");
+
+ SARLUC(configStatus);
+ SARLUC(_22);
+ SARLUC(progIOCtrl);
+ SARLUC(shareMBase);
+ SARLUC(controlRegister);
+
+ pos += sprintf(arlan_drive_info + pos, " total %d chars\n", pos);
+ if (ctl)
+ if (ctl->procname)
+ pos += sprintf(arlan_drive_info + pos, " driver name : %s\n", ctl->procname);
+final:
+ *lenp = pos;
+
+ if (!write)
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ else
+ {
+ *lenp = 0;
+ return -1;
+ }
+ return retv;
+}
+
+
+static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp,
+ void __user *buffer, size_t * lenp, loff_t *ppos)
+{
+ int i;
+ int retv, pos, devnum;
+ struct arlan_private *priva = NULL;
+
+ pos = 0;
+ devnum = ctl->procname[5] - '0';
+ if (arlan_device[devnum] == NULL)
+ {
+ pos += sprintf(arlan_drive_info + pos, "No device found here \n");
+ goto final;
+ }
+ else
+ priva = arlan_device[devnum]->priv;
+ if (priva == NULL)
+ {
+ printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
+ return -1;
+ }
+ memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
+ SARLUCN(_16, 0xC0);
+ SARLUCN(_17, 0x6A);
+ SARLUCN(_18, 14);
+ SARLUCN(_19, 0x86);
+ SARLUCN(_21, 0x3fd);
+
+final:
+ *lenp = pos;
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return retv;
+}
+
+static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp,
+ void __user *buffer, size_t * lenp, loff_t *ppos)
+{
+ int i;
+ int retv, pos, devnum;
+ struct arlan_private *priva = NULL;
+
+ pos = 0;
+ devnum = ctl->procname[5] - '0';
+ if (arlan_device[devnum] == NULL)
+ {
+ pos += sprintf(arlan_drive_info + pos, "No device found here \n");
+ goto final;
+ }
+ else
+ priva = arlan_device[devnum]->priv;
+ if (priva == NULL)
+ {
+ printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
+ return -1;
+ }
+ memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
+ SARLBNpln(u_char, txBuffer, 0x800);
+final:
+ *lenp = pos;
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return retv;
+}
+
+static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp,
+ void __user *buffer, size_t * lenp, loff_t *ppos)
+{
+ int i;
+ int retv, pos, devnum;
+ struct arlan_private *priva = NULL;
+
+ pos = 0;
+ devnum = ctl->procname[5] - '0';
+ if (arlan_device[devnum] == NULL)
+ {
+ pos += sprintf(arlan_drive_info + pos, "No device found here \n");
+ goto final;
+ } else
+ priva = arlan_device[devnum]->priv;
+ if (priva == NULL)
+ {
+ printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
+ return -1;
+ }
+ memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
+ SARLBNpln(u_char, rxBuffer, 0x800);
+final:
+ *lenp = pos;
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return retv;
+}
+
+static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
+ void __user *buffer, size_t * lenp, loff_t *ppos)
+{
+ int i;
+ int retv, pos, devnum;
+ struct arlan_private *priva = NULL;
+
+ pos = 0;
+ devnum = ctl->procname[5] - '0';
+ if (arlan_device[devnum] == NULL)
+ {
+ pos += sprintf(arlan_drive_info + pos, "No device found here \n");
+ goto final;
+ }
+ else
+ priva = arlan_device[devnum]->priv;
+ if (priva == NULL)
+ {
+ printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
+ return -1;
+ }
+ memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
+ SARLBNpln(u_char, _18, 0x800);
+
+final:
+ *lenp = pos;
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return retv;
+}
+
+
+#endif /* #ifdef ARLAN_PROC_SHM_DUMP */
+
+
+static char conf_reset_result[200];
+
+static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
+ void __user *buffer, size_t * lenp, loff_t *ppos)
+{
+ int pos = 0;
+ int devnum = ctl->procname[6] - '0';
+ struct arlan_private *priv;
+
+ if (devnum < 0 || devnum > MAX_ARLANS - 1)
+ {
+ printk(KERN_WARNING "too strange devnum in procfs parse\n ");
+ return -1;
+ }
+ else if (arlan_device[devnum] != NULL)
+ {
+ priv = arlan_device[devnum]->priv;
+
+ arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_CONF);
+ }
+ else
+ return -1;
+
+ *lenp = pos;
+ return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+}
+
+static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
+ void __user *buffer, size_t * lenp, loff_t *ppos)
+{
+ int pos = 0;
+ int devnum = ctl->procname[5] - '0';
+ struct arlan_private *priv;
+
+ if (devnum < 0 || devnum > MAX_ARLANS - 1)
+ {
+ printk(KERN_WARNING "too strange devnum in procfs parse\n ");
+ return -1;
+ }
+ else if (arlan_device[devnum] != NULL)
+ {
+ priv = arlan_device[devnum]->priv;
+ arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_RESET);
+
+ } else
+ return -1;
+ *lenp = pos + 3;
+ return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+}
+
+
+/* Place files in /proc/sys/dev/arlan */
+#define CTBLN(num,card,nam) \
+ { .ctl_name = num,\
+ .procname = #nam,\
+ .data = &(arlan_conf[card].nam),\
+ .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec}
+#ifdef ARLAN_DEBUGGING
+
+#define ARLAN_PROC_DEBUG_ENTRIES \
+ { .ctl_name = 48, .procname = "entry_exit_debug",\
+ .data = &arlan_entry_and_exit_debug,\
+ .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec},\
+ { .ctl_name = 49, .procname = "debug", .data = &arlan_debug,\
+ .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec},
+#else
+#define ARLAN_PROC_DEBUG_ENTRIES
+#endif
+
+#define ARLAN_SYSCTL_TABLE_TOTAL(cardNo)\
+ CTBLN(1,cardNo,spreadingCode),\
+ CTBLN(2,cardNo, channelNumber),\
+ CTBLN(3,cardNo, scramblingDisable),\
+ CTBLN(4,cardNo, txAttenuation),\
+ CTBLN(5,cardNo, systemId), \
+ CTBLN(6,cardNo, maxDatagramSize),\
+ CTBLN(7,cardNo, maxFrameSize),\
+ CTBLN(8,cardNo, maxRetries),\
+ CTBLN(9,cardNo, receiveMode),\
+ CTBLN(10,cardNo, priority),\
+ CTBLN(11,cardNo, rootOrRepeater),\
+ CTBLN(12,cardNo, SID),\
+ CTBLN(13,cardNo, registrationMode),\
+ CTBLN(14,cardNo, registrationFill),\
+ CTBLN(15,cardNo, localTalkAddress),\
+ CTBLN(16,cardNo, codeFormat),\
+ CTBLN(17,cardNo, numChannels),\
+ CTBLN(18,cardNo, channel1),\
+ CTBLN(19,cardNo, channel2),\
+ CTBLN(20,cardNo, channel3),\
+ CTBLN(21,cardNo, channel4),\
+ CTBLN(22,cardNo, txClear),\
+ CTBLN(23,cardNo, txRetries),\
+ CTBLN(24,cardNo, txRouting),\
+ CTBLN(25,cardNo, txScrambled),\
+ CTBLN(26,cardNo, rxParameter),\
+ CTBLN(27,cardNo, txTimeoutMs),\
+ CTBLN(28,cardNo, waitCardTimeout),\
+ CTBLN(29,cardNo, channelSet), \
+ {.ctl_name = 30, .procname = "name",\
+ .data = arlan_conf[cardNo].siteName,\
+ .maxlen = 16, .mode = 0600, .proc_handler = &proc_dostring},\
+ CTBLN(31,cardNo,waitTime),\
+ CTBLN(32,cardNo,lParameter),\
+ CTBLN(33,cardNo,_15),\
+ CTBLN(34,cardNo,headerSize),\
+ CTBLN(36,cardNo,tx_delay_ms),\
+ CTBLN(37,cardNo,retries),\
+ CTBLN(38,cardNo,ReTransmitPacketMaxSize),\
+ CTBLN(39,cardNo,waitReTransmitPacketMaxSize),\
+ CTBLN(40,cardNo,fastReTransCount),\
+ CTBLN(41,cardNo,driverRetransmissions),\
+ CTBLN(42,cardNo,txAckTimeoutMs),\
+ CTBLN(43,cardNo,registrationInterrupts),\
+ CTBLN(44,cardNo,hardwareType),\
+ CTBLN(45,cardNo,radioType),\
+ CTBLN(46,cardNo,writeEEPROM),\
+ CTBLN(47,cardNo,writeRadioType),\
+ ARLAN_PROC_DEBUG_ENTRIES\
+ CTBLN(50,cardNo,in_speed),\
+ CTBLN(51,cardNo,out_speed),\
+ CTBLN(52,cardNo,in_speed10),\
+ CTBLN(53,cardNo,out_speed10),\
+ CTBLN(54,cardNo,in_speed_max),\
+ CTBLN(55,cardNo,out_speed_max),\
+ CTBLN(56,cardNo,measure_rate),\
+ CTBLN(57,cardNo,pre_Command_Wait),\
+ CTBLN(58,cardNo,rx_tweak1),\
+ CTBLN(59,cardNo,rx_tweak2),\
+ CTBLN(60,cardNo,tx_queue_len),\
+
+
+
+static ctl_table arlan_conf_table0[] =
+{
+ ARLAN_SYSCTL_TABLE_TOTAL(0)
+
+#ifdef ARLAN_PROC_SHM_DUMP
+ {
+ .ctl_name = 150,
+ .procname = "arlan0-txRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_infotxRing,
+ },
+ {
+ .ctl_name = 151,
+ .procname = "arlan0-rxRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_inforxRing,
+ },
+ {
+ .ctl_name = 152,
+ .procname = "arlan0-18",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info18,
+ },
+ {
+ .ctl_name = 153,
+ .procname = "arlan0-ring",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info161719,
+ },
+ {
+ .ctl_name = 154,
+ .procname = "arlan0-shm-cpy",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info,
+ },
+#endif
+ {
+ .ctl_name = 155,
+ .procname = "config0",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_configure
+ },
+ {
+ .ctl_name = 156,
+ .procname = "reset0",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_reset,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table arlan_conf_table1[] =
+{
+
+ ARLAN_SYSCTL_TABLE_TOTAL(1)
+
+#ifdef ARLAN_PROC_SHM_DUMP
+ {
+ .ctl_name = 150,
+ .procname = "arlan1-txRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_infotxRing,
+ },
+ {
+ .ctl_name = 151,
+ .procname = "arlan1-rxRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_inforxRing,
+ },
+ {
+ .ctl_name = 152,
+ .procname = "arlan1-18",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info18,
+ },
+ {
+ .ctl_name = 153,
+ .procname = "arlan1-ring",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info161719,
+ },
+ {
+ .ctl_name = 154,
+ .procname = "arlan1-shm-cpy",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info,
+ },
+#endif
+ {
+ .ctl_name = 155,
+ .procname = "config1",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_configure,
+ },
+ {
+ .ctl_name = 156,
+ .procname = "reset1",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_reset,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table arlan_conf_table2[] =
+{
+
+ ARLAN_SYSCTL_TABLE_TOTAL(2)
+
+#ifdef ARLAN_PROC_SHM_DUMP
+ {
+ .ctl_name = 150,
+ .procname = "arlan2-txRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_infotxRing,
+ },
+ {
+ .ctl_name = 151,
+ .procname = "arlan2-rxRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_inforxRing,
+ },
+ {
+ .ctl_name = 152,
+ .procname = "arlan2-18",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info18,
+ },
+ {
+ .ctl_name = 153,
+ .procname = "arlan2-ring",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info161719,
+ },
+ {
+ .ctl_name = 154,
+ .procname = "arlan2-shm-cpy",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info,
+ },
+#endif
+ {
+ .ctl_name = 155,
+ .procname = "config2",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_configure,
+ },
+ {
+ .ctl_name = 156,
+ .procname = "reset2",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_reset,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table arlan_conf_table3[] =
+{
+
+ ARLAN_SYSCTL_TABLE_TOTAL(3)
+
+#ifdef ARLAN_PROC_SHM_DUMP
+ {
+ .ctl_name = 150,
+ .procname = "arlan3-txRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_infotxRing,
+ },
+ {
+ .ctl_name = 151,
+ .procname = "arlan3-rxRing",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_inforxRing,
+ },
+ {
+ .ctl_name = 152,
+ .procname = "arlan3-18",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info18,
+ },
+ {
+ .ctl_name = 153,
+ .procname = "arlan3-ring",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info161719,
+ },
+ {
+ .ctl_name = 154,
+ .procname = "arlan3-shm-cpy",
+ .data = &arlan_drive_info,
+ .maxlen = ARLAN_STR_SIZE,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_info,
+ },
+#endif
+ {
+ .ctl_name = 155,
+ .procname = "config3",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_configure,
+ },
+ {
+ .ctl_name = 156,
+ .procname = "reset3",
+ .data = &conf_reset_result,
+ .maxlen = 100,
+ .mode = 0400,
+ .proc_handler = &arlan_sysctl_reset,
+ },
+ { .ctl_name = 0 }
+};
+
+
+
+static ctl_table arlan_table[] =
+{
+ {
+ .ctl_name = 0,
+ .procname = "arlan0",
+ .maxlen = 0,
+ .mode = 0600,
+ .child = arlan_conf_table0,
+ },
+ {
+ .ctl_name = 0,
+ .procname = "arlan1",
+ .maxlen = 0,
+ .mode = 0600,
+ .child = arlan_conf_table1,
+ },
+ {
+ .ctl_name = 0,
+ .procname = "arlan2",
+ .maxlen = 0,
+ .mode = 0600,
+ .child = arlan_conf_table2,
+ },
+ {
+ .ctl_name = 0,
+ .procname = "arlan3",
+ .maxlen = 0,
+ .mode = 0600,
+ .child = arlan_conf_table3,
+ },
+ { .ctl_name = 0 }
+};
+
+#else
+
+static ctl_table arlan_table[MAX_ARLANS + 1] =
+{
+ { .ctl_name = 0 }
+};
+#endif
+#else
+
+static ctl_table arlan_table[MAX_ARLANS + 1] =
+{
+ { .ctl_name = 0 }
+};
+#endif
+
+
+// static int mmtu = 1234;
+
+static ctl_table arlan_root_table[] =
+{
+ {
+ .ctl_name = 254,
+ .procname = "arlan",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = arlan_table,
+ },
+ { .ctl_name = 0 }
+};
+
+/* Make sure that /proc/sys/dev is there */
+//static ctl_table arlan_device_root_table[] =
+//{
+// {CTL_DEV, "dev", NULL, 0, 0555, arlan_root_table},
+// {0}
+//};
+
+
+#ifdef CONFIG_PROC_FS
+static struct ctl_table_header *arlan_device_sysctl_header;
+
+int __init init_arlan_proc(void)
+{
+
+ int i = 0;
+ if (arlan_device_sysctl_header)
+ return 0;
+ for (i = 0; i < MAX_ARLANS && arlan_device[i]; i++)
+ arlan_table[i].ctl_name = i + 1;
+ arlan_device_sysctl_header = register_sysctl_table(arlan_root_table, 0);
+ if (!arlan_device_sysctl_header)
+ return -1;
+
+ return 0;
+
+}
+
+void __exit cleanup_arlan_proc(void)
+{
+ unregister_sysctl_table(arlan_device_sysctl_header);
+ arlan_device_sysctl_header = NULL;
+
+}
+#endif
diff --git a/drivers/net/wireless/arlan.h b/drivers/net/wireless/arlan.h
new file mode 100644
index 000000000000..70a6d7b83c4a
--- /dev/null
+++ b/drivers/net/wireless/arlan.h
@@ -0,0 +1,541 @@
+/*
+ * Copyright (C) 1997 Cullen Jennings
+ * Copyright (C) 1998 Elmer.Joandi@ut.ee, +37-255-13500
+ * GNU General Public License applies
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h> /* For the statistics structure. */
+#include <linux/if_arp.h> /* For ARPHRD_ETHER */
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+
+//#define ARLAN_DEBUGGING 1
+
+#define ARLAN_PROC_INTERFACE
+#define MAX_ARLANS 4 /* not more than 4 ! */
+#define ARLAN_PROC_SHM_DUMP /* shows all card registers, makes driver way larger */
+
+#define ARLAN_MAX_MULTICAST_ADDRS 16
+#define ARLAN_RCV_CLEAN 0
+#define ARLAN_RCV_PROMISC 1
+#define ARLAN_RCV_CONTROL 2
+
+#ifdef CONFIG_PROC_FS
+extern int init_arlan_proc(void);
+extern void cleanup_arlan_proc(void);
+#else
+#define init_arlan_proc() ({ 0; })
+#define cleanup_arlan_proc() do { } while (0)
+#endif
+
+extern struct net_device *arlan_device[MAX_ARLANS];
+extern int arlan_debug;
+extern int arlan_entry_debug;
+extern int arlan_exit_debug;
+extern int testMemory;
+extern int arlan_command(struct net_device * dev, int command);
+
+#define SIDUNKNOWN -1
+#define radioNodeIdUNKNOWN -1
+#define irqUNKNOWN 0
+#define debugUNKNOWN 0
+#define testMemoryUNKNOWN 1
+#define spreadingCodeUNKNOWN 0
+#define channelNumberUNKNOWN 0
+#define channelSetUNKNOWN 0
+#define systemIdUNKNOWN -1
+#define registrationModeUNKNOWN -1
+
+
+#define IFDEBUG( L ) if ( (L) & arlan_debug )
+#define ARLAN_FAKE_HDR_LEN 12
+
+#ifdef ARLAN_DEBUGGING
+ #define DEBUG 1
+ #define ARLAN_ENTRY_EXIT_DEBUGGING 1
+ #define ARLAN_DEBUG(a,b) printk(KERN_DEBUG a, b)
+#else
+ #define ARLAN_DEBUG(a,b)
+#endif
+
+#define ARLAN_SHMEM_SIZE 0x2000
+
+struct arlan_shmem
+{
+ /* Header Signature */
+ volatile char textRegion[48];
+ volatile u_char resetFlag;
+ volatile u_char diagnosticInfo;
+ volatile u_short diagnosticOffset;
+ volatile u_char _1[12];
+ volatile u_char lanCardNodeId[6];
+ volatile u_char broadcastAddress[6];
+ volatile u_char hardwareType;
+ volatile u_char majorHardwareVersion;
+ volatile u_char minorHardwareVersion;
+ volatile u_char radioModule;// shows EEPROM, can be overridden at 0x111
+ volatile u_char defaultChannelSet; // shows EEProm, can be overriiden at 0x10A
+ volatile u_char _2[47];
+
+ /* Control/Status Block - 0x0080 */
+ volatile u_char interruptInProgress; /* not used by lancpu */
+ volatile u_char cntrlRegImage; /* not used by lancpu */
+ volatile u_char _3[13];
+ volatile u_char dumpByte;
+ volatile u_char commandByte; /* non-zero = active */
+ volatile u_char commandParameter[15];
+
+ /* Receive Status - 0x00a0 */
+ volatile u_char rxStatus; /* 1- data, 2-control, 0xff - registr change */
+ volatile u_char rxFrmType;
+ volatile u_short rxOffset;
+ volatile u_short rxLength;
+ volatile u_char rxSrc[6];
+ volatile u_char rxBroadcastFlag;
+ volatile u_char rxQuality;
+ volatile u_char scrambled;
+ volatile u_char _4[1];
+
+ /* Transmit Status - 0x00b0 */
+ volatile u_char txStatus;
+ volatile u_char txAckQuality;
+ volatile u_char numRetries;
+ volatile u_char _5[14];
+ volatile u_char registeredRouter[6];
+ volatile u_char backboneRouter[6];
+ volatile u_char registrationStatus;
+ volatile u_char configuredStatusFlag;
+ volatile u_char _6[1];
+ volatile u_char ultimateDestAddress[6];
+ volatile u_char immedDestAddress[6];
+ volatile u_char immedSrcAddress[6];
+ volatile u_short rxSequenceNumber;
+ volatile u_char assignedLocaltalkAddress;
+ volatile u_char _7[27];
+
+ /* System Parameter Block */
+
+ /* - Driver Parameters (Novell Specific) */
+
+ volatile u_short txTimeout;
+ volatile u_short transportTime;
+ volatile u_char _8[4];
+
+ /* - Configuration Parameters */
+ volatile u_char irqLevel;
+ volatile u_char spreadingCode;
+ volatile u_char channelSet;
+ volatile u_char channelNumber;
+ volatile u_short radioNodeId;
+ volatile u_char _9[2];
+ volatile u_char scramblingDisable;
+ volatile u_char radioType;
+ volatile u_short routerId;
+ volatile u_char _10[9];
+ volatile u_char txAttenuation;
+ volatile u_char systemId[4];
+ volatile u_short globalChecksum;
+ volatile u_char _11[4];
+ volatile u_short maxDatagramSize;
+ volatile u_short maxFrameSize;
+ volatile u_char maxRetries;
+ volatile u_char receiveMode;
+ volatile u_char priority;
+ volatile u_char rootOrRepeater;
+ volatile u_char specifiedRouter[6];
+ volatile u_short fastPollPeriod;
+ volatile u_char pollDecay;
+ volatile u_char fastPollDelay[2];
+ volatile u_char arlThreshold;
+ volatile u_char arlDecay;
+ volatile u_char _12[1];
+ volatile u_short specRouterTimeout;
+ volatile u_char _13[5];
+
+ /* Scrambled Area */
+ volatile u_char SID[4];
+ volatile u_char encryptionKey[12];
+ volatile u_char _14[2];
+ volatile u_char waitTime[2];
+ volatile u_char lParameter[2];
+ volatile u_char _15[3];
+ volatile u_short headerSize;
+ volatile u_short sectionChecksum;
+
+ volatile u_char registrationMode;
+ volatile u_char registrationFill;
+ volatile u_short pollPeriod;
+ volatile u_short refreshPeriod;
+ volatile u_char name[16];
+ volatile u_char NID[6];
+ volatile u_char localTalkAddress;
+ volatile u_char codeFormat;
+ volatile u_char numChannels;
+ volatile u_char channel1;
+ volatile u_char channel2;
+ volatile u_char channel3;
+ volatile u_char channel4;
+ volatile u_char SSCode[59];
+
+ volatile u_char _16[0xC0];
+ volatile u_short auxCmd;
+ volatile u_char dumpPtr[4];
+ volatile u_char dumpVal;
+ volatile u_char _17[0x6A];
+ volatile u_char wireTest;
+ volatile u_char _18[14];
+
+ /* Statistics Block - 0x0300 */
+ volatile u_char hostcpuLock;
+ volatile u_char lancpuLock;
+ volatile u_char resetTime[18];
+
+ volatile u_char numDatagramsTransmitted[4];
+ volatile u_char numReTransmissions[4];
+ volatile u_char numFramesDiscarded[4];
+ volatile u_char numDatagramsReceived[4];
+ volatile u_char numDuplicateReceivedFrames[4];
+ volatile u_char numDatagramsDiscarded[4];
+
+ volatile u_short maxNumReTransmitDatagram;
+ volatile u_short maxNumReTransmitFrames;
+ volatile u_short maxNumConsecutiveDuplicateFrames;
+ /* misaligned here so we have to go to characters */
+
+ volatile u_char numBytesTransmitted[4];
+ volatile u_char numBytesReceived[4];
+ volatile u_char numCRCErrors[4];
+ volatile u_char numLengthErrors[4];
+ volatile u_char numAbortErrors[4];
+ volatile u_char numTXUnderruns[4];
+ volatile u_char numRXOverruns[4];
+ volatile u_char numHoldOffs[4];
+ volatile u_char numFramesTransmitted[4];
+ volatile u_char numFramesReceived[4];
+ volatile u_char numReceiveFramesLost[4];
+ volatile u_char numRXBufferOverflows[4];
+ volatile u_char numFramesDiscardedAddrMismatch[4];
+ volatile u_char numFramesDiscardedSIDMismatch[4];
+ volatile u_char numPollsTransmistted[4];
+ volatile u_char numPollAcknowledges[4];
+ volatile u_char numStatusTimeouts[4];
+ volatile u_char numNACKReceived[4];
+
+ volatile u_char _19[0x86];
+
+ volatile u_char txBuffer[0x800];
+ volatile u_char rxBuffer[0x800];
+
+ volatile u_char _20[0x800];
+ volatile u_char _21[0x3fb];
+ volatile u_char configStatus;
+ volatile u_char _22;
+ volatile u_char progIOCtrl;
+ volatile u_char shareMBase;
+ volatile u_char controlRegister;
+};
+
+struct arlan_conf_stru {
+ int spreadingCode;
+ int channelSet;
+ int channelNumber;
+ int scramblingDisable;
+ int txAttenuation;
+ int systemId;
+ int maxDatagramSize;
+ int maxFrameSize;
+ int maxRetries;
+ int receiveMode;
+ int priority;
+ int rootOrRepeater;
+ int SID;
+ int radioNodeId;
+ int registrationMode;
+ int registrationFill;
+ int localTalkAddress;
+ int codeFormat;
+ int numChannels;
+ int channel1;
+ int channel2;
+ int channel3;
+ int channel4;
+ int txClear;
+ int txRetries;
+ int txRouting;
+ int txScrambled;
+ int rxParameter;
+ int txTimeoutMs;
+ int txAckTimeoutMs;
+ int waitCardTimeout;
+ int waitTime;
+ int lParameter;
+ int _15;
+ int headerSize;
+ int retries;
+ int tx_delay_ms;
+ int waitReTransmitPacketMaxSize;
+ int ReTransmitPacketMaxSize;
+ int fastReTransCount;
+ int driverRetransmissions;
+ int registrationInterrupts;
+ int hardwareType;
+ int radioType;
+ int writeRadioType;
+ int writeEEPROM;
+ char siteName[17];
+ int measure_rate;
+ int in_speed;
+ int out_speed;
+ int in_speed10;
+ int out_speed10;
+ int in_speed_max;
+ int out_speed_max;
+ int pre_Command_Wait;
+ int rx_tweak1;
+ int rx_tweak2;
+ int tx_queue_len;
+};
+
+extern struct arlan_conf_stru arlan_conf[MAX_ARLANS];
+
+struct TxParam
+{
+ volatile short offset;
+ volatile short length;
+ volatile u_char dest[6];
+ volatile unsigned char clear;
+ volatile unsigned char retries;
+ volatile unsigned char routing;
+ volatile unsigned char scrambled;
+};
+
+#define TX_RING_SIZE 2
+/* Information that need to be kept for each board. */
+struct arlan_private {
+ struct net_device_stats stats;
+ struct arlan_shmem __iomem * card;
+ struct arlan_shmem * conf;
+
+ struct arlan_conf_stru * Conf;
+ int bad;
+ int reset;
+ unsigned long lastReset;
+ struct timer_list timer;
+ struct timer_list tx_delay_timer;
+ struct timer_list tx_retry_timer;
+ struct timer_list rx_check_timer;
+
+ int registrationLostCount;
+ int reRegisterExp;
+ int irq_test_done;
+
+ struct TxParam txRing[TX_RING_SIZE];
+ char reTransmitBuff[0x800];
+ int txLast;
+ unsigned ReTransmitRequested;
+ unsigned long tx_done_delayed;
+ unsigned long registrationLastSeen;
+
+ unsigned long tx_last_sent;
+ unsigned long tx_last_cleared;
+ unsigned long retransmissions;
+ unsigned long interrupt_ack_requested;
+ spinlock_t lock;
+ unsigned long waiting_command_mask;
+ unsigned long card_polling_interval;
+ unsigned long last_command_buff_free_time;
+
+ int under_reset;
+ int under_config;
+ int rx_command_given;
+ int tx_command_given;
+ unsigned long interrupt_processing_active;
+ unsigned long last_rx_int_ack_time;
+ unsigned long in_bytes;
+ unsigned long out_bytes;
+ unsigned long in_time;
+ unsigned long out_time;
+ unsigned long in_time10;
+ unsigned long out_time10;
+ unsigned long in_bytes10;
+ unsigned long out_bytes10;
+ int init_etherdev_alloc;
+};
+
+
+
+#define ARLAN_CLEAR 0x00
+#define ARLAN_RESET 0x01
+#define ARLAN_CHANNEL_ATTENTION 0x02
+#define ARLAN_INTERRUPT_ENABLE 0x04
+#define ARLAN_CLEAR_INTERRUPT 0x08
+#define ARLAN_POWER 0x40
+#define ARLAN_ACCESS 0x80
+
+#define ARLAN_COM_CONF 0x01
+#define ARLAN_COM_RX_ENABLE 0x03
+#define ARLAN_COM_RX_ABORT 0x04
+#define ARLAN_COM_TX_ENABLE 0x05
+#define ARLAN_COM_TX_ABORT 0x06
+#define ARLAN_COM_NOP 0x07
+#define ARLAN_COM_STANDBY 0x08
+#define ARLAN_COM_ACTIVATE 0x09
+#define ARLAN_COM_GOTO_SLOW_POLL 0x0a
+#define ARLAN_COM_INT 0x80
+
+
+#define TXLAST(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[((struct arlan_private *)netdev_priv(dev))->txLast])
+#define TXHEAD(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[0])
+#define TXTAIL(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[1])
+
+#define TXBuffStart(dev) offsetof(struct arlan_shmem, txBuffer)
+#define TXBuffEnd(dev) offsetof(struct arlan_shmem, xxBuffer)
+
+#define READSHM(to,from,atype) {\
+ atype tmp;\
+ memcpy_fromio(&(tmp),&(from),sizeof(atype));\
+ to = tmp;\
+ }
+
+#define READSHMEM(from,atype)\
+ atype from; \
+ READSHM(from, arlan->from, atype);
+
+#define WRITESHM(to,from,atype) \
+ { atype tmpSHM = from;\
+ memcpy_toio(&(to),&tmpSHM,sizeof(atype));\
+ }
+
+#define DEBUGSHM(levelSHM,stringSHM,stuff,atype) \
+ { atype tmpSHM; \
+ memcpy_fromio(&tmpSHM,&(stuff),sizeof(atype));\
+ IFDEBUG(levelSHM) printk(stringSHM,tmpSHM);\
+ }
+
+#define WRITESHMB(to, val) \
+ writeb(val,&(to))
+#define READSHMB(to) \
+ readb(&(to))
+#define WRITESHMS(to, val) \
+ writew(val,&(to))
+#define READSHMS(to) \
+ readw(&(to))
+#define WRITESHMI(to, val) \
+ writel(val,&(to))
+#define READSHMI(to) \
+ readl(&(to))
+
+
+
+
+
+#define registrationBad(dev)\
+ ( ( READSHMB(((struct arlan_private *)netdev_priv(dev))->card->registrationMode) > 0) && \
+ ( READSHMB(((struct arlan_private *)netdev_priv(dev))->card->registrationStatus) == 0) )
+
+
+#define readControlRegister(dev)\
+ READSHMB(((struct arlan_private *)netdev_priv(dev))->card->cntrlRegImage)
+
+#define writeControlRegister(dev, v){\
+ WRITESHMB(((struct arlan_private *)netdev_priv(dev))->card->cntrlRegImage ,((v) &0xF) );\
+ WRITESHMB(((struct arlan_private *)netdev_priv(dev))->card->controlRegister ,(v) );}
+
+
+#define arlan_interrupt_lancpu(dev) {\
+ int cr; \
+ \
+ cr = readControlRegister(dev);\
+ if (cr & ARLAN_CHANNEL_ATTENTION){ \
+ writeControlRegister(dev, (cr & ~ARLAN_CHANNEL_ATTENTION));\
+ }else \
+ writeControlRegister(dev, (cr | ARLAN_CHANNEL_ATTENTION));\
+}
+
+#define clearChannelAttention(dev){ \
+ writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_CHANNEL_ATTENTION);}
+#define setHardwareReset(dev) {\
+ writeControlRegister(dev,readControlRegister(dev) | ARLAN_RESET);}
+#define clearHardwareReset(dev) {\
+ writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_RESET);}
+#define setInterruptEnable(dev){\
+ writeControlRegister(dev,readControlRegister(dev) | ARLAN_INTERRUPT_ENABLE) ;}
+#define clearInterruptEnable(dev){\
+ writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_INTERRUPT_ENABLE) ;}
+#define setClearInterrupt(dev){\
+ writeControlRegister(dev,readControlRegister(dev) | ARLAN_CLEAR_INTERRUPT) ;}
+#define clearClearInterrupt(dev){\
+ writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_CLEAR_INTERRUPT);}
+#define setPowerOff(dev){\
+ writeControlRegister(dev,readControlRegister(dev) | (ARLAN_POWER && ARLAN_ACCESS));\
+ writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_ACCESS);}
+#define setPowerOn(dev){\
+ writeControlRegister(dev,readControlRegister(dev) & ~(ARLAN_POWER)); }
+#define arlan_lock_card_access(dev){\
+ writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_ACCESS);}
+#define arlan_unlock_card_access(dev){\
+ writeControlRegister(dev,readControlRegister(dev) | ARLAN_ACCESS ); }
+
+
+
+
+#define ARLAN_COMMAND_RX 0x000001
+#define ARLAN_COMMAND_NOOP 0x000002
+#define ARLAN_COMMAND_NOOPINT 0x000004
+#define ARLAN_COMMAND_TX 0x000008
+#define ARLAN_COMMAND_CONF 0x000010
+#define ARLAN_COMMAND_RESET 0x000020
+#define ARLAN_COMMAND_TX_ABORT 0x000040
+#define ARLAN_COMMAND_RX_ABORT 0x000080
+#define ARLAN_COMMAND_POWERDOWN 0x000100
+#define ARLAN_COMMAND_POWERUP 0x000200
+#define ARLAN_COMMAND_SLOW_POLL 0x000400
+#define ARLAN_COMMAND_ACTIVATE 0x000800
+#define ARLAN_COMMAND_INT_ACK 0x001000
+#define ARLAN_COMMAND_INT_ENABLE 0x002000
+#define ARLAN_COMMAND_WAIT_NOW 0x004000
+#define ARLAN_COMMAND_LONG_WAIT_NOW 0x008000
+#define ARLAN_COMMAND_STANDBY 0x010000
+#define ARLAN_COMMAND_INT_RACK 0x020000
+#define ARLAN_COMMAND_INT_RENABLE 0x040000
+#define ARLAN_COMMAND_CONF_WAIT 0x080000
+#define ARLAN_COMMAND_TBUSY_CLEAR 0x100000
+#define ARLAN_COMMAND_CLEAN_AND_CONF (ARLAN_COMMAND_TX_ABORT\
+ | ARLAN_COMMAND_RX_ABORT\
+ | ARLAN_COMMAND_CONF)
+#define ARLAN_COMMAND_CLEAN_AND_RESET (ARLAN_COMMAND_TX_ABORT\
+ | ARLAN_COMMAND_RX_ABORT\
+ | ARLAN_COMMAND_RESET)
+
+
+
+#define ARLAN_DEBUG_CHAIN_LOCKS 0x00001
+#define ARLAN_DEBUG_RESET 0x00002
+#define ARLAN_DEBUG_TIMING 0x00004
+#define ARLAN_DEBUG_CARD_STATE 0x00008
+#define ARLAN_DEBUG_TX_CHAIN 0x00010
+#define ARLAN_DEBUG_MULTICAST 0x00020
+#define ARLAN_DEBUG_HEADER_DUMP 0x00040
+#define ARLAN_DEBUG_INTERRUPT 0x00080
+#define ARLAN_DEBUG_STARTUP 0x00100
+#define ARLAN_DEBUG_SHUTDOWN 0x00200
+
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
new file mode 100644
index 000000000000..18a7d38d2a13
--- /dev/null
+++ b/drivers/net/wireless/atmel.c
@@ -0,0 +1,4272 @@
+/*** -*- linux-c -*- **********************************************************
+
+ Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
+
+ Copyright 2000-2001 ATMEL Corporation.
+ Copyright 2003-2004 Simon Kelley.
+
+ This code was developed from version 2.1.1 of the Atmel drivers,
+ released by Atmel corp. under the GPL in December 2002. It also
+ includes code from the Linux aironet drivers (C) Benjamin Reed,
+ and the Linux PCMCIA package, (C) David Hinds and the Linux wireless
+ extensions, (C) Jean Tourrilhes.
+
+ The firmware module for reading the MAC address of the card comes from
+ net.russotto.AtmelMACFW, written by Matthew T. Russotto and copyright
+ by him. net.russotto.AtmelMACFW is used under the GPL license version 2.
+ This file contains the module in binary form and, under the terms
+ of the GPL, in source form. The source is located at the end of the file.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This software is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Atmel wireless lan drivers; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ For all queries about this code, please contact the current author,
+ Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
+
+ Credit is due to HP UK and Cambridge Online Systems Ltd for supplying
+ hardware used during development of this driver.
+
+******************************************************************************/
+
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/timer.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/byteorder/generic.h>
+#include <linux/crc32.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/firmware.h>
+#include "ieee802_11.h"
+#include "atmel.h"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 96
+
+MODULE_AUTHOR("Simon Kelley");
+MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards");
+
+/* The name of the firmware file to be loaded
+ over-rides any automatic selection */
+static char *firmware = NULL;
+module_param(firmware, charp, 0);
+
+/* table of firmware file names */
+static struct {
+ AtmelFWType fw_type;
+ const char *fw_file;
+ const char *fw_file_ext;
+} fw_table[] = {
+ { ATMEL_FW_TYPE_502, "atmel_at76c502", "bin" },
+ { ATMEL_FW_TYPE_502D, "atmel_at76c502d", "bin" },
+ { ATMEL_FW_TYPE_502E, "atmel_at76c502e", "bin" },
+ { ATMEL_FW_TYPE_502_3COM, "atmel_at76c502_3com", "bin" },
+ { ATMEL_FW_TYPE_504, "atmel_at76c504", "bin" },
+ { ATMEL_FW_TYPE_504_2958, "atmel_at76c504_2958", "bin" },
+ { ATMEL_FW_TYPE_504A_2958,"atmel_at76c504a_2958","bin" },
+ { ATMEL_FW_TYPE_506, "atmel_at76c506", "bin" },
+ { ATMEL_FW_TYPE_NONE, NULL, NULL }
+};
+
+#define MAX_SSID_LENGTH 32
+#define MGMT_JIFFIES (256 * HZ / 100)
+
+#define MAX_BSS_ENTRIES 64
+
+/* registers */
+#define GCR 0x00 // (SIR0) General Configuration Register
+#define BSR 0x02 // (SIR1) Bank Switching Select Register
+#define AR 0x04
+#define DR 0x08
+#define MR1 0x12 // Mirror Register 1
+#define MR2 0x14 // Mirror Register 2
+#define MR3 0x16 // Mirror Register 3
+#define MR4 0x18 // Mirror Register 4
+
+#define GPR1 0x0c
+#define GPR2 0x0e
+#define GPR3 0x10
+//
+// Constants for the GCR register.
+//
+#define GCR_REMAP 0x0400 // Remap internal SRAM to 0
+#define GCR_SWRES 0x0080 // BIU reset (ARM and PAI are NOT reset)
+#define GCR_CORES 0x0060 // Core Reset (ARM and PAI are reset)
+#define GCR_ENINT 0x0002 // Enable Interrupts
+#define GCR_ACKINT 0x0008 // Acknowledge Interrupts
+
+#define BSS_SRAM 0x0200 // AMBA module selection --> SRAM
+#define BSS_IRAM 0x0100 // AMBA module selection --> IRAM
+//
+// Constants for the MR registers.
+//
+#define MAC_INIT_COMPLETE 0x0001 // MAC init has been completed
+#define MAC_BOOT_COMPLETE 0x0010 // MAC boot has been completed
+#define MAC_INIT_OK 0x0002 // MAC boot has been completed
+
+#define C80211_SUBTYPE_MGMT_ASS_REQUEST 0x00
+#define C80211_SUBTYPE_MGMT_ASS_RESPONSE 0x10
+#define C80211_SUBTYPE_MGMT_REASS_REQUEST 0x20
+#define C80211_SUBTYPE_MGMT_REASS_RESPONSE 0x30
+#define C80211_SUBTYPE_MGMT_ProbeRequest 0x40
+#define C80211_SUBTYPE_MGMT_ProbeResponse 0x50
+#define C80211_SUBTYPE_MGMT_BEACON 0x80
+#define C80211_SUBTYPE_MGMT_ATIM 0x90
+#define C80211_SUBTYPE_MGMT_DISASSOSIATION 0xA0
+#define C80211_SUBTYPE_MGMT_Authentication 0xB0
+#define C80211_SUBTYPE_MGMT_Deauthentication 0xC0
+
+#define C80211_MGMT_AAN_OPENSYSTEM 0x0000
+#define C80211_MGMT_AAN_SHAREDKEY 0x0001
+
+#define C80211_MGMT_CAPABILITY_ESS 0x0001 // see 802.11 p.58
+#define C80211_MGMT_CAPABILITY_IBSS 0x0002 // - " -
+#define C80211_MGMT_CAPABILITY_CFPollable 0x0004 // - " -
+#define C80211_MGMT_CAPABILITY_CFPollRequest 0x0008 // - " -
+#define C80211_MGMT_CAPABILITY_Privacy 0x0010 // - " -
+
+#define C80211_MGMT_SC_Success 0
+#define C80211_MGMT_SC_Unspecified 1
+#define C80211_MGMT_SC_SupportCapabilities 10
+#define C80211_MGMT_SC_ReassDenied 11
+#define C80211_MGMT_SC_AssDenied 12
+#define C80211_MGMT_SC_AuthAlgNotSupported 13
+#define C80211_MGMT_SC_AuthTransSeqNumError 14
+#define C80211_MGMT_SC_AuthRejectChallenge 15
+#define C80211_MGMT_SC_AuthRejectTimeout 16
+#define C80211_MGMT_SC_AssDeniedHandleAP 17
+#define C80211_MGMT_SC_AssDeniedBSSRate 18
+
+#define C80211_MGMT_ElementID_SSID 0
+#define C80211_MGMT_ElementID_SupportedRates 1
+#define C80211_MGMT_ElementID_ChallengeText 16
+#define C80211_MGMT_CAPABILITY_ShortPreamble 0x0020
+
+#define MIB_MAX_DATA_BYTES 212
+#define MIB_HEADER_SIZE 4 /* first four fields */
+
+struct get_set_mib {
+ u8 type;
+ u8 size;
+ u8 index;
+ u8 reserved;
+ u8 data[MIB_MAX_DATA_BYTES];
+};
+
+struct rx_desc {
+ u32 Next;
+ u16 MsduPos;
+ u16 MsduSize;
+
+ u8 State;
+ u8 Status;
+ u8 Rate;
+ u8 Rssi;
+ u8 LinkQuality;
+ u8 PreambleType;
+ u16 Duration;
+ u32 RxTime;
+
+};
+
+#define RX_DESC_FLAG_VALID 0x80
+#define RX_DESC_FLAG_CONSUMED 0x40
+#define RX_DESC_FLAG_IDLE 0x00
+
+#define RX_STATUS_SUCCESS 0x00
+
+#define RX_DESC_MSDU_POS_OFFSET 4
+#define RX_DESC_MSDU_SIZE_OFFSET 6
+#define RX_DESC_FLAGS_OFFSET 8
+#define RX_DESC_STATUS_OFFSET 9
+#define RX_DESC_RSSI_OFFSET 11
+#define RX_DESC_LINK_QUALITY_OFFSET 12
+#define RX_DESC_PREAMBLE_TYPE_OFFSET 13
+#define RX_DESC_DURATION_OFFSET 14
+#define RX_DESC_RX_TIME_OFFSET 16
+
+
+struct tx_desc {
+ u32 NextDescriptor;
+ u16 TxStartOfFrame;
+ u16 TxLength;
+
+ u8 TxState;
+ u8 TxStatus;
+ u8 RetryCount;
+
+ u8 TxRate;
+
+ u8 KeyIndex;
+ u8 ChiperType;
+ u8 ChipreLength;
+ u8 Reserved1;
+
+ u8 Reserved;
+ u8 PacketType;
+ u16 HostTxLength;
+
+};
+
+
+#define TX_DESC_NEXT_OFFSET 0
+#define TX_DESC_POS_OFFSET 4
+#define TX_DESC_SIZE_OFFSET 6
+#define TX_DESC_FLAGS_OFFSET 8
+#define TX_DESC_STATUS_OFFSET 9
+#define TX_DESC_RETRY_OFFSET 10
+#define TX_DESC_RATE_OFFSET 11
+#define TX_DESC_KEY_INDEX_OFFSET 12
+#define TX_DESC_CIPHER_TYPE_OFFSET 13
+#define TX_DESC_CIPHER_LENGTH_OFFSET 14
+#define TX_DESC_PACKET_TYPE_OFFSET 17
+#define TX_DESC_HOST_LENGTH_OFFSET 18
+
+
+
+///////////////////////////////////////////////////////
+// Host-MAC interface
+///////////////////////////////////////////////////////
+
+#define TX_STATUS_SUCCESS 0x00
+
+#define TX_FIRM_OWN 0x80
+#define TX_DONE 0x40
+
+
+#define TX_ERROR 0x01
+
+#define TX_PACKET_TYPE_DATA 0x01
+#define TX_PACKET_TYPE_MGMT 0x02
+
+#define ISR_EMPTY 0x00 // no bits set in ISR
+#define ISR_TxCOMPLETE 0x01 // packet transmitted
+#define ISR_RxCOMPLETE 0x02 // packet received
+#define ISR_RxFRAMELOST 0x04 // Rx Frame lost
+#define ISR_FATAL_ERROR 0x08 // Fatal error
+#define ISR_COMMAND_COMPLETE 0x10 // command completed
+#define ISR_OUT_OF_RANGE 0x20 // command completed
+#define ISR_IBSS_MERGE 0x40 // (4.1.2.30): IBSS merge
+#define ISR_GENERIC_IRQ 0x80
+
+
+#define Local_Mib_Type 0x01
+#define Mac_Address_Mib_Type 0x02
+#define Mac_Mib_Type 0x03
+#define Statistics_Mib_Type 0x04
+#define Mac_Mgmt_Mib_Type 0x05
+#define Mac_Wep_Mib_Type 0x06
+#define Phy_Mib_Type 0x07
+#define Multi_Domain_MIB 0x08
+
+#define MAC_MGMT_MIB_CUR_BSSID_POS 14
+#define MAC_MIB_FRAG_THRESHOLD_POS 8
+#define MAC_MIB_RTS_THRESHOLD_POS 10
+#define MAC_MIB_SHORT_RETRY_POS 16
+#define MAC_MIB_LONG_RETRY_POS 17
+#define MAC_MIB_SHORT_RETRY_LIMIT_POS 16
+#define MAC_MGMT_MIB_BEACON_PER_POS 0
+#define MAC_MGMT_MIB_STATION_ID_POS 6
+#define MAC_MGMT_MIB_CUR_PRIVACY_POS 11
+#define MAC_MGMT_MIB_CUR_BSSID_POS 14
+#define MAC_MGMT_MIB_PS_MODE_POS 53
+#define MAC_MGMT_MIB_LISTEN_INTERVAL_POS 54
+#define MAC_MGMT_MIB_MULTI_DOMAIN_IMPLEMENTED 56
+#define MAC_MGMT_MIB_MULTI_DOMAIN_ENABLED 57
+#define PHY_MIB_CHANNEL_POS 14
+#define PHY_MIB_RATE_SET_POS 20
+#define PHY_MIB_REG_DOMAIN_POS 26
+#define LOCAL_MIB_AUTO_TX_RATE_POS 3
+#define LOCAL_MIB_SSID_SIZE 5
+#define LOCAL_MIB_TX_PROMISCUOUS_POS 6
+#define LOCAL_MIB_TX_MGMT_RATE_POS 7
+#define LOCAL_MIB_TX_CONTROL_RATE_POS 8
+#define LOCAL_MIB_PREAMBLE_TYPE 9
+#define MAC_ADDR_MIB_MAC_ADDR_POS 0
+
+
+#define CMD_Set_MIB_Vars 0x01
+#define CMD_Get_MIB_Vars 0x02
+#define CMD_Scan 0x03
+#define CMD_Join 0x04
+#define CMD_Start 0x05
+#define CMD_EnableRadio 0x06
+#define CMD_DisableRadio 0x07
+#define CMD_SiteSurvey 0x0B
+
+#define CMD_STATUS_IDLE 0x00
+#define CMD_STATUS_COMPLETE 0x01
+#define CMD_STATUS_UNKNOWN 0x02
+#define CMD_STATUS_INVALID_PARAMETER 0x03
+#define CMD_STATUS_FUNCTION_NOT_SUPPORTED 0x04
+#define CMD_STATUS_TIME_OUT 0x07
+#define CMD_STATUS_IN_PROGRESS 0x08
+#define CMD_STATUS_REJECTED_RADIO_OFF 0x09
+#define CMD_STATUS_HOST_ERROR 0xFF
+#define CMD_STATUS_BUSY 0xFE
+
+
+#define CMD_BLOCK_COMMAND_OFFSET 0
+#define CMD_BLOCK_STATUS_OFFSET 1
+#define CMD_BLOCK_PARAMETERS_OFFSET 4
+
+#define SCAN_OPTIONS_SITE_SURVEY 0x80
+
+#define MGMT_FRAME_BODY_OFFSET 24
+#define MAX_AUTHENTICATION_RETRIES 3
+#define MAX_ASSOCIATION_RETRIES 3
+
+#define AUTHENTICATION_RESPONSE_TIME_OUT 1000
+
+#define MAX_WIRELESS_BODY 2316 /* mtu is 2312, CRC is 4 */
+#define LOOP_RETRY_LIMIT 500000
+
+#define ACTIVE_MODE 1
+#define PS_MODE 2
+
+#define MAX_ENCRYPTION_KEYS 4
+#define MAX_ENCRYPTION_KEY_SIZE 40
+
+///////////////////////////////////////////////////////////////////////////
+// 802.11 related definitions
+///////////////////////////////////////////////////////////////////////////
+
+//
+// Regulatory Domains
+//
+
+#define REG_DOMAIN_FCC 0x10 //Channels 1-11 USA
+#define REG_DOMAIN_DOC 0x20 //Channel 1-11 Canada
+#define REG_DOMAIN_ETSI 0x30 //Channel 1-13 Europe (ex Spain/France)
+#define REG_DOMAIN_SPAIN 0x31 //Channel 10-11 Spain
+#define REG_DOMAIN_FRANCE 0x32 //Channel 10-13 France
+#define REG_DOMAIN_MKK 0x40 //Channel 14 Japan
+#define REG_DOMAIN_MKK1 0x41 //Channel 1-14 Japan(MKK1)
+#define REG_DOMAIN_ISRAEL 0x50 //Channel 3-9 ISRAEL
+
+#define BSS_TYPE_AD_HOC 1
+#define BSS_TYPE_INFRASTRUCTURE 2
+
+#define SCAN_TYPE_ACTIVE 0
+#define SCAN_TYPE_PASSIVE 1
+
+#define LONG_PREAMBLE 0
+#define SHORT_PREAMBLE 1
+#define AUTO_PREAMBLE 2
+
+#define DATA_FRAME_WS_HEADER_SIZE 30
+
+/* promiscuous mode control */
+#define PROM_MODE_OFF 0x0
+#define PROM_MODE_UNKNOWN 0x1
+#define PROM_MODE_CRC_FAILED 0x2
+#define PROM_MODE_DUPLICATED 0x4
+#define PROM_MODE_MGMT 0x8
+#define PROM_MODE_CTRL 0x10
+#define PROM_MODE_BAD_PROTOCOL 0x20
+
+
+#define IFACE_INT_STATUS_OFFSET 0
+#define IFACE_INT_MASK_OFFSET 1
+#define IFACE_LOCKOUT_HOST_OFFSET 2
+#define IFACE_LOCKOUT_MAC_OFFSET 3
+#define IFACE_FUNC_CTRL_OFFSET 28
+#define IFACE_MAC_STAT_OFFSET 30
+#define IFACE_GENERIC_INT_TYPE_OFFSET 32
+
+#define CIPHER_SUITE_NONE 0
+#define CIPHER_SUITE_WEP_64 1
+#define CIPHER_SUITE_TKIP 2
+#define CIPHER_SUITE_AES 3
+#define CIPHER_SUITE_CCX 4
+#define CIPHER_SUITE_WEP_128 5
+
+//
+// IFACE MACROS & definitions
+//
+//
+
+// FuncCtrl field:
+//
+#define FUNC_CTRL_TxENABLE 0x10
+#define FUNC_CTRL_RxENABLE 0x20
+#define FUNC_CTRL_INIT_COMPLETE 0x01
+
+/* A stub firmware image which reads the MAC address from NVRAM on the card.
+ For copyright information and source see the end of this file. */
+static u8 mac_reader[] = {
+ 0x06,0x00,0x00,0xea,0x04,0x00,0x00,0xea,0x03,0x00,0x00,0xea,0x02,0x00,0x00,0xea,
+ 0x01,0x00,0x00,0xea,0x00,0x00,0x00,0xea,0xff,0xff,0xff,0xea,0xfe,0xff,0xff,0xea,
+ 0xd3,0x00,0xa0,0xe3,0x00,0xf0,0x21,0xe1,0x0e,0x04,0xa0,0xe3,0x00,0x10,0xa0,0xe3,
+ 0x81,0x11,0xa0,0xe1,0x00,0x10,0x81,0xe3,0x00,0x10,0x80,0xe5,0x1c,0x10,0x90,0xe5,
+ 0x10,0x10,0xc1,0xe3,0x1c,0x10,0x80,0xe5,0x01,0x10,0xa0,0xe3,0x08,0x10,0x80,0xe5,
+ 0x02,0x03,0xa0,0xe3,0x00,0x10,0xa0,0xe3,0xb0,0x10,0xc0,0xe1,0xb4,0x10,0xc0,0xe1,
+ 0xb8,0x10,0xc0,0xe1,0xbc,0x10,0xc0,0xe1,0x56,0xdc,0xa0,0xe3,0x21,0x00,0x00,0xeb,
+ 0x0a,0x00,0xa0,0xe3,0x1a,0x00,0x00,0xeb,0x10,0x00,0x00,0xeb,0x07,0x00,0x00,0xeb,
+ 0x02,0x03,0xa0,0xe3,0x02,0x14,0xa0,0xe3,0xb4,0x10,0xc0,0xe1,0x4c,0x10,0x9f,0xe5,
+ 0xbc,0x10,0xc0,0xe1,0x10,0x10,0xa0,0xe3,0xb8,0x10,0xc0,0xe1,0xfe,0xff,0xff,0xea,
+ 0x00,0x40,0x2d,0xe9,0x00,0x20,0xa0,0xe3,0x02,0x3c,0xa0,0xe3,0x00,0x10,0xa0,0xe3,
+ 0x28,0x00,0x9f,0xe5,0x37,0x00,0x00,0xeb,0x00,0x40,0xbd,0xe8,0x1e,0xff,0x2f,0xe1,
+ 0x00,0x40,0x2d,0xe9,0x12,0x2e,0xa0,0xe3,0x06,0x30,0xa0,0xe3,0x00,0x10,0xa0,0xe3,
+ 0x02,0x04,0xa0,0xe3,0x2f,0x00,0x00,0xeb,0x00,0x40,0xbd,0xe8,0x1e,0xff,0x2f,0xe1,
+ 0x00,0x02,0x00,0x02,0x80,0x01,0x90,0xe0,0x01,0x00,0x00,0x0a,0x01,0x00,0x50,0xe2,
+ 0xfc,0xff,0xff,0xea,0x1e,0xff,0x2f,0xe1,0x80,0x10,0xa0,0xe3,0xf3,0x06,0xa0,0xe3,
+ 0x00,0x10,0x80,0xe5,0x00,0x10,0xa0,0xe3,0x00,0x10,0x80,0xe5,0x01,0x10,0xa0,0xe3,
+ 0x04,0x10,0x80,0xe5,0x00,0x10,0x80,0xe5,0x0e,0x34,0xa0,0xe3,0x1c,0x10,0x93,0xe5,
+ 0x02,0x1a,0x81,0xe3,0x1c,0x10,0x83,0xe5,0x58,0x11,0x9f,0xe5,0x30,0x10,0x80,0xe5,
+ 0x54,0x11,0x9f,0xe5,0x34,0x10,0x80,0xe5,0x38,0x10,0x80,0xe5,0x3c,0x10,0x80,0xe5,
+ 0x10,0x10,0x90,0xe5,0x08,0x00,0x90,0xe5,0x1e,0xff,0x2f,0xe1,0xf3,0x16,0xa0,0xe3,
+ 0x08,0x00,0x91,0xe5,0x05,0x00,0xa0,0xe3,0x0c,0x00,0x81,0xe5,0x10,0x00,0x91,0xe5,
+ 0x02,0x00,0x10,0xe3,0xfc,0xff,0xff,0x0a,0xff,0x00,0xa0,0xe3,0x0c,0x00,0x81,0xe5,
+ 0x10,0x00,0x91,0xe5,0x02,0x00,0x10,0xe3,0xfc,0xff,0xff,0x0a,0x08,0x00,0x91,0xe5,
+ 0x10,0x00,0x91,0xe5,0x01,0x00,0x10,0xe3,0xfc,0xff,0xff,0x0a,0x08,0x00,0x91,0xe5,
+ 0xff,0x00,0x00,0xe2,0x1e,0xff,0x2f,0xe1,0x30,0x40,0x2d,0xe9,0x00,0x50,0xa0,0xe1,
+ 0x03,0x40,0xa0,0xe1,0xa2,0x02,0xa0,0xe1,0x08,0x00,0x00,0xe2,0x03,0x00,0x80,0xe2,
+ 0xd8,0x10,0x9f,0xe5,0x00,0x00,0xc1,0xe5,0x01,0x20,0xc1,0xe5,0xe2,0xff,0xff,0xeb,
+ 0x01,0x00,0x10,0xe3,0xfc,0xff,0xff,0x1a,0x14,0x00,0xa0,0xe3,0xc4,0xff,0xff,0xeb,
+ 0x04,0x20,0xa0,0xe1,0x05,0x10,0xa0,0xe1,0x02,0x00,0xa0,0xe3,0x01,0x00,0x00,0xeb,
+ 0x30,0x40,0xbd,0xe8,0x1e,0xff,0x2f,0xe1,0x70,0x40,0x2d,0xe9,0xf3,0x46,0xa0,0xe3,
+ 0x00,0x30,0xa0,0xe3,0x00,0x00,0x50,0xe3,0x08,0x00,0x00,0x9a,0x8c,0x50,0x9f,0xe5,
+ 0x03,0x60,0xd5,0xe7,0x0c,0x60,0x84,0xe5,0x10,0x60,0x94,0xe5,0x02,0x00,0x16,0xe3,
+ 0xfc,0xff,0xff,0x0a,0x01,0x30,0x83,0xe2,0x00,0x00,0x53,0xe1,0xf7,0xff,0xff,0x3a,
+ 0xff,0x30,0xa0,0xe3,0x0c,0x30,0x84,0xe5,0x08,0x00,0x94,0xe5,0x10,0x00,0x94,0xe5,
+ 0x01,0x00,0x10,0xe3,0xfc,0xff,0xff,0x0a,0x08,0x00,0x94,0xe5,0x00,0x00,0xa0,0xe3,
+ 0x00,0x00,0x52,0xe3,0x0b,0x00,0x00,0x9a,0x10,0x50,0x94,0xe5,0x02,0x00,0x15,0xe3,
+ 0xfc,0xff,0xff,0x0a,0x0c,0x30,0x84,0xe5,0x10,0x50,0x94,0xe5,0x01,0x00,0x15,0xe3,
+ 0xfc,0xff,0xff,0x0a,0x08,0x50,0x94,0xe5,0x01,0x50,0xc1,0xe4,0x01,0x00,0x80,0xe2,
+ 0x02,0x00,0x50,0xe1,0xf3,0xff,0xff,0x3a,0xc8,0x00,0xa0,0xe3,0x98,0xff,0xff,0xeb,
+ 0x70,0x40,0xbd,0xe8,0x1e,0xff,0x2f,0xe1,0x01,0x0c,0x00,0x02,0x01,0x02,0x00,0x02,
+ 0x00,0x01,0x00,0x02
+};
+
+struct atmel_private {
+ void *card; /* Bus dependent stucture varies for PCcard */
+ int (*present_callback)(void *); /* And callback which uses it */
+ char firmware_id[32];
+ AtmelFWType firmware_type;
+ u8 *firmware;
+ int firmware_length;
+ struct timer_list management_timer;
+ struct net_device *dev;
+ struct device *sys_dev;
+ struct iw_statistics wstats;
+ struct net_device_stats stats; // device stats
+ spinlock_t irqlock, timerlock; // spinlocks
+ enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type;
+ enum {
+ CARD_TYPE_PARALLEL_FLASH,
+ CARD_TYPE_SPI_FLASH,
+ CARD_TYPE_EEPROM
+ } card_type;
+ int do_rx_crc; /* If we need to CRC incoming packets */
+ int probe_crc; /* set if we don't yet know */
+ int crc_ok_cnt, crc_ko_cnt; /* counters for probing */
+ u16 rx_desc_head;
+ u16 tx_desc_free, tx_desc_head, tx_desc_tail, tx_desc_previous;
+ u16 tx_free_mem, tx_buff_head, tx_buff_tail;
+
+ u16 frag_seq, frag_len, frag_no;
+ u8 frag_source[6];
+
+ u8 wep_is_on, default_key, exclude_unencrypted, encryption_level;
+ u8 group_cipher_suite, pairwise_cipher_suite;
+ u8 wep_keys[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
+ int wep_key_len[MAX_ENCRYPTION_KEYS];
+ int use_wpa, radio_on_broken; /* firmware dependent stuff. */
+
+ u16 host_info_base;
+ struct host_info_struct {
+ /* NB this is matched to the hardware, don't change. */
+ u8 volatile int_status;
+ u8 volatile int_mask;
+ u8 volatile lockout_host;
+ u8 volatile lockout_mac;
+
+ u16 tx_buff_pos;
+ u16 tx_buff_size;
+ u16 tx_desc_pos;
+ u16 tx_desc_count;
+
+ u16 rx_buff_pos;
+ u16 rx_buff_size;
+ u16 rx_desc_pos;
+ u16 rx_desc_count;
+
+ u16 build_version;
+ u16 command_pos;
+
+ u16 major_version;
+ u16 minor_version;
+
+ u16 func_ctrl;
+ u16 mac_status;
+ u16 generic_IRQ_type;
+ u8 reserved[2];
+ } host_info;
+
+ enum {
+ STATION_STATE_SCANNING,
+ STATION_STATE_JOINNING,
+ STATION_STATE_AUTHENTICATING,
+ STATION_STATE_ASSOCIATING,
+ STATION_STATE_READY,
+ STATION_STATE_REASSOCIATING,
+ STATION_STATE_DOWN,
+ STATION_STATE_MGMT_ERROR
+ } station_state;
+
+ int operating_mode, power_mode;
+ time_t last_qual;
+ int beacons_this_sec;
+ int channel;
+ int reg_domain, config_reg_domain;
+ int tx_rate;
+ int auto_tx_rate;
+ int rts_threshold;
+ int frag_threshold;
+ int long_retry, short_retry;
+ int preamble;
+ int default_beacon_period, beacon_period, listen_interval;
+ int CurrentAuthentTransactionSeqNum, ExpectedAuthentTransactionSeqNum;
+ int AuthenticationRequestRetryCnt, AssociationRequestRetryCnt, ReAssociationRequestRetryCnt;
+ enum {
+ SITE_SURVEY_IDLE,
+ SITE_SURVEY_IN_PROGRESS,
+ SITE_SURVEY_COMPLETED
+ } site_survey_state;
+ time_t last_survey;
+
+ int station_was_associated, station_is_associated;
+ int fast_scan;
+
+ struct bss_info {
+ int channel;
+ int SSIDsize;
+ int RSSI;
+ int UsingWEP;
+ int preamble;
+ int beacon_period;
+ int BSStype;
+ u8 BSSID[6];
+ u8 SSID[MAX_SSID_LENGTH];
+ } BSSinfo[MAX_BSS_ENTRIES];
+ int BSS_list_entries, current_BSS;
+ int connect_to_any_BSS;
+ int SSID_size, new_SSID_size;
+ u8 CurrentBSSID[6], BSSID[6];
+ u8 SSID[MAX_SSID_LENGTH], new_SSID[MAX_SSID_LENGTH];
+ u64 last_beacon_timestamp;
+ u8 rx_buf[MAX_WIRELESS_BODY];
+
+};
+
+static u8 atmel_basic_rates[4] = {0x82,0x84,0x0b,0x16};
+
+static const struct {
+ int reg_domain;
+ int min, max;
+ char *name;
+} channel_table[] = { { REG_DOMAIN_FCC, 1, 11, "USA" },
+ { REG_DOMAIN_DOC, 1, 11, "Canada" },
+ { REG_DOMAIN_ETSI, 1, 13, "Europe" },
+ { REG_DOMAIN_SPAIN, 10, 11, "Spain" },
+ { REG_DOMAIN_FRANCE, 10, 13, "France" },
+ { REG_DOMAIN_MKK, 14, 14, "MKK" },
+ { REG_DOMAIN_MKK1, 1, 14, "MKK1" },
+ { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} };
+
+static void build_wpa_mib(struct atmel_private *priv);
+static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *src, u16 len);
+static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, u16 src, u16 len);
+static void atmel_set_gcr(struct net_device *dev, u16 mask);
+static void atmel_clear_gcr(struct net_device *dev, u16 mask);
+static int atmel_lock_mac(struct atmel_private *priv);
+static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
+static void atmel_command_irq(struct atmel_private *priv);
+static int atmel_validate_channel(struct atmel_private *priv, int channel);
+static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header,
+ u16 frame_len, u8 rssi);
+static void atmel_management_timer(u_long a);
+static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size);
+static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size);
+static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header,
+ u8 *body, int body_len);
+
+static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
+static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 data);
+static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 data);
+static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len);
+static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len);
+static void atmel_scan(struct atmel_private *priv, int specific_ssid);
+static void atmel_join_bss(struct atmel_private *priv, int bss_index);
+static void atmel_smooth_qual(struct atmel_private *priv);
+static void atmel_writeAR(struct net_device *dev, u16 data);
+static int probe_atmel_card(struct net_device *dev);
+static int reset_atmel_card(struct net_device *dev );
+static void atmel_enter_state(struct atmel_private *priv, int new_state);
+int atmel_open (struct net_device *dev);
+
+static inline u16 atmel_hi(struct atmel_private *priv, u16 offset)
+{
+ return priv->host_info_base + offset;
+}
+
+static inline u16 atmel_co(struct atmel_private *priv, u16 offset)
+{
+ return priv->host_info.command_pos + offset;
+}
+
+static inline u16 atmel_rx(struct atmel_private *priv, u16 offset, u16 desc)
+{
+ return priv->host_info.rx_desc_pos + (sizeof(struct rx_desc) * desc) + offset;
+}
+
+static inline u16 atmel_tx(struct atmel_private *priv, u16 offset, u16 desc)
+{
+ return priv->host_info.tx_desc_pos + (sizeof(struct tx_desc) * desc) + offset;
+}
+
+static inline u8 atmel_read8(struct net_device *dev, u16 offset)
+{
+ return inb(dev->base_addr + offset);
+}
+
+static inline void atmel_write8(struct net_device *dev, u16 offset, u8 data)
+{
+ outb(data, dev->base_addr + offset);
+}
+
+static inline u16 atmel_read16(struct net_device *dev, u16 offset)
+{
+ return inw(dev->base_addr + offset);
+}
+
+static inline void atmel_write16(struct net_device *dev, u16 offset, u16 data)
+{
+ outw(data, dev->base_addr + offset);
+}
+
+static inline u8 atmel_rmem8(struct atmel_private *priv, u16 pos)
+{
+ atmel_writeAR(priv->dev, pos);
+ return atmel_read8(priv->dev, DR);
+}
+
+static inline void atmel_wmem8(struct atmel_private *priv, u16 pos, u16 data)
+{
+ atmel_writeAR(priv->dev, pos);
+ atmel_write8(priv->dev, DR, data);
+}
+
+static inline u16 atmel_rmem16(struct atmel_private *priv, u16 pos)
+{
+ atmel_writeAR(priv->dev, pos);
+ return atmel_read16(priv->dev, DR);
+}
+
+static inline void atmel_wmem16(struct atmel_private *priv, u16 pos, u16 data)
+{
+ atmel_writeAR(priv->dev, pos);
+ atmel_write16(priv->dev, DR, data);
+}
+
+static const struct iw_handler_def atmel_handler_def;
+
+static void tx_done_irq(struct atmel_private *priv)
+{
+ int i;
+
+ for (i = 0;
+ atmel_rmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head)) == TX_DONE &&
+ i < priv->host_info.tx_desc_count;
+ i++) {
+
+ u8 status = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_STATUS_OFFSET, priv->tx_desc_head));
+ u16 msdu_size = atmel_rmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_head));
+ u8 type = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_head));
+
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head), 0);
+
+ priv->tx_free_mem += msdu_size;
+ priv->tx_desc_free++;
+
+ if (priv->tx_buff_head + msdu_size > (priv->host_info.tx_buff_pos + priv->host_info.tx_buff_size))
+ priv->tx_buff_head = 0;
+ else
+ priv->tx_buff_head += msdu_size;
+
+ if (priv->tx_desc_head < (priv->host_info.tx_desc_count - 1))
+ priv->tx_desc_head++ ;
+ else
+ priv->tx_desc_head = 0;
+
+ if (type == TX_PACKET_TYPE_DATA) {
+ if (status == TX_STATUS_SUCCESS)
+ priv->stats.tx_packets++;
+ else
+ priv->stats.tx_errors++;
+ netif_wake_queue(priv->dev);
+ }
+ }
+}
+
+static u16 find_tx_buff(struct atmel_private *priv, u16 len)
+{
+ u16 bottom_free = priv->host_info.tx_buff_size - priv->tx_buff_tail;
+
+ if (priv->tx_desc_free == 3 || priv->tx_free_mem < len)
+ return 0;
+
+ if (bottom_free >= len)
+ return priv->host_info.tx_buff_pos + priv->tx_buff_tail;
+
+ if (priv->tx_free_mem - bottom_free >= len) {
+ priv->tx_buff_tail = 0;
+ return priv->host_info.tx_buff_pos;
+ }
+
+ return 0;
+}
+
+static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 len, u16 buff, u8 type)
+{
+ atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, priv->tx_desc_tail), buff);
+ atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_tail), len);
+ if (!priv->use_wpa)
+ atmel_wmem16(priv, atmel_tx(priv, TX_DESC_HOST_LENGTH_OFFSET, priv->tx_desc_tail), len);
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_tail), type);
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_RATE_OFFSET, priv->tx_desc_tail), priv->tx_rate);
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_RETRY_OFFSET, priv->tx_desc_tail), 0);
+ if (priv->use_wpa) {
+ int cipher_type, cipher_length;
+ if (is_bcast) {
+ cipher_type = priv->group_cipher_suite;
+ if (cipher_type == CIPHER_SUITE_WEP_64 ||
+ cipher_type == CIPHER_SUITE_WEP_128 )
+ cipher_length = 8;
+ else if (cipher_type == CIPHER_SUITE_TKIP)
+ cipher_length = 12;
+ else if (priv->pairwise_cipher_suite == CIPHER_SUITE_WEP_64 ||
+ priv->pairwise_cipher_suite == CIPHER_SUITE_WEP_128) {
+ cipher_type = priv->pairwise_cipher_suite;
+ cipher_length = 8;
+ } else {
+ cipher_type = CIPHER_SUITE_NONE;
+ cipher_length = 0;
+ }
+ } else {
+ cipher_type = priv->pairwise_cipher_suite;
+ if (cipher_type == CIPHER_SUITE_WEP_64 ||
+ cipher_type == CIPHER_SUITE_WEP_128 )
+ cipher_length = 8;
+ else if (cipher_type == CIPHER_SUITE_TKIP)
+ cipher_length = 12;
+ else if (priv->group_cipher_suite == CIPHER_SUITE_WEP_64 ||
+ priv->group_cipher_suite == CIPHER_SUITE_WEP_128) {
+ cipher_type = priv->group_cipher_suite;
+ cipher_length = 8;
+ } else {
+ cipher_type = CIPHER_SUITE_NONE;
+ cipher_length = 0;
+ }
+ }
+
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_TYPE_OFFSET, priv->tx_desc_tail),
+ cipher_type);
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_LENGTH_OFFSET, priv->tx_desc_tail),
+ cipher_length);
+ }
+ atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_tail), 0x80000000L);
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_tail), TX_FIRM_OWN);
+ if (priv->tx_desc_previous != priv->tx_desc_tail)
+ atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_previous), 0);
+ priv->tx_desc_previous = priv->tx_desc_tail;
+ if (priv->tx_desc_tail < (priv->host_info.tx_desc_count -1 ))
+ priv->tx_desc_tail++;
+ else
+ priv->tx_desc_tail = 0;
+ priv->tx_desc_free--;
+ priv->tx_free_mem -= len;
+
+}
+
+static int start_tx (struct sk_buff *skb, struct net_device *dev)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ struct ieee802_11_hdr header;
+ unsigned long flags;
+ u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+ u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+ if (priv->card && priv->present_callback &&
+ !(*priv->present_callback)(priv->card)) {
+ priv->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (priv->station_state != STATION_STATE_READY) {
+ priv->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ /* first ensure the timer func cannot run */
+ spin_lock_bh(&priv->timerlock);
+ /* then stop the hardware ISR */
+ spin_lock_irqsave(&priv->irqlock, flags);
+ /* nb doing the above in the opposite order will deadlock */
+
+ /* The Wireless Header is 30 bytes. In the Ethernet packet we "cut" the
+ 12 first bytes (containing DA/SA) and put them in the appropriate fields of
+ the Wireless Header. Thus the packet length is then the initial + 18 (+30-12) */
+
+ if (!(buff = find_tx_buff(priv, len + 18))) {
+ priv->stats.tx_dropped++;
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+ spin_unlock_bh(&priv->timerlock);
+ netif_stop_queue(dev);
+ return 1;
+ }
+
+ frame_ctl = IEEE802_11_FTYPE_DATA;
+ header.duration_id = 0;
+ header.seq_ctl = 0;
+ if (priv->wep_is_on)
+ frame_ctl |= IEEE802_11_FCTL_WEP;
+ if (priv->operating_mode == IW_MODE_ADHOC) {
+ memcpy(&header.addr1, skb->data, 6);
+ memcpy(&header.addr2, dev->dev_addr, 6);
+ memcpy(&header.addr3, priv->BSSID, 6);
+ } else {
+ frame_ctl |= IEEE802_11_FCTL_TODS;
+ memcpy(&header.addr1, priv->CurrentBSSID, 6);
+ memcpy(&header.addr2, dev->dev_addr, 6);
+ memcpy(&header.addr3, skb->data, 6);
+ }
+
+ if (priv->use_wpa)
+ memcpy(&header.addr4, SNAP_RFC1024, 6);
+
+ header.frame_ctl = cpu_to_le16(frame_ctl);
+ /* Copy the wireless header into the card */
+ atmel_copy_to_card(dev, buff, (unsigned char *)&header, DATA_FRAME_WS_HEADER_SIZE);
+ /* Copy the packet sans its 802.3 header addresses which have been replaced */
+ atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12);
+ priv->tx_buff_tail += len - 12 + DATA_FRAME_WS_HEADER_SIZE;
+
+ /* low bit of first byte of destination tells us if broadcast */
+ tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
+ dev->trans_start = jiffies;
+ priv->stats.tx_bytes += len;
+
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+ spin_unlock_bh(&priv->timerlock);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void atmel_transmit_management_frame(struct atmel_private *priv,
+ struct ieee802_11_hdr *header,
+ u8 *body, int body_len)
+{
+ u16 buff;
+ int len = MGMT_FRAME_BODY_OFFSET + body_len;
+
+ if (!(buff = find_tx_buff(priv, len)))
+ return;
+
+ atmel_copy_to_card(priv->dev, buff, (u8 *)header, MGMT_FRAME_BODY_OFFSET);
+ atmel_copy_to_card(priv->dev, buff + MGMT_FRAME_BODY_OFFSET, body, body_len);
+ priv->tx_buff_tail += len;
+ tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
+}
+
+static void fast_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header,
+ u16 msdu_size, u16 rx_packet_loc, u32 crc)
+{
+ /* fast path: unfragmented packet copy directly into skbuf */
+ u8 mac4[6];
+ struct sk_buff *skb;
+ unsigned char *skbp;
+
+ /* get the final, mac 4 header field, this tells us encapsulation */
+ atmel_copy_to_host(priv->dev, mac4, rx_packet_loc + 24, 6);
+ msdu_size -= 6;
+
+ if (priv->do_rx_crc) {
+ crc = crc32_le(crc, mac4, 6);
+ msdu_size -= 4;
+ }
+
+ if (!(skb = dev_alloc_skb(msdu_size + 14))) {
+ priv->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, 2);
+ skbp = skb_put(skb, msdu_size + 12);
+ atmel_copy_to_host(priv->dev, skbp + 12, rx_packet_loc + 30, msdu_size);
+
+ if (priv->do_rx_crc) {
+ u32 netcrc;
+ crc = crc32_le(crc, skbp + 12, msdu_size);
+ atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + 30 + msdu_size, 4);
+ if ((crc ^ 0xffffffff) != netcrc) {
+ priv->stats.rx_crc_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
+
+ memcpy(skbp, header->addr1, 6); /* destination address */
+ if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS)
+ memcpy(&skbp[6], header->addr3, 6);
+ else
+ memcpy(&skbp[6], header->addr2, 6); /* source address */
+
+ priv->dev->last_rx=jiffies;
+ skb->dev = priv->dev;
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx(skb);
+ priv->stats.rx_bytes += 12 + msdu_size;
+ priv->stats.rx_packets++;
+}
+
+/* Test to see if the packet in card memory at packet_loc has a valid CRC
+ It doesn't matter that this is slow: it is only used to proble the first few packets. */
+static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
+{
+ int i = msdu_size - 4;
+ u32 netcrc, crc = 0xffffffff;
+
+ if (msdu_size < 4)
+ return 0;
+
+ atmel_copy_to_host(priv->dev, (void *)&netcrc, packet_loc + i, 4);
+
+ atmel_writeAR(priv->dev, packet_loc);
+ while (i--) {
+ u8 octet = atmel_read8(priv->dev, DR);
+ crc = crc32_le(crc, &octet, 1);
+ }
+
+ return (crc ^ 0xffffffff) == netcrc;
+}
+
+static void frag_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header,
+ u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags)
+{
+ u8 mac4[6];
+ u8 source[6];
+ struct sk_buff *skb;
+
+ if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS)
+ memcpy(source, header->addr3, 6);
+ else
+ memcpy(source, header->addr2, 6);
+
+ rx_packet_loc += 24; /* skip header */
+
+ if (priv->do_rx_crc)
+ msdu_size -= 4;
+
+ if (frag_no == 0) { /* first fragment */
+ atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
+ msdu_size -= 6;
+ rx_packet_loc += 6;
+
+ if (priv->do_rx_crc)
+ crc = crc32_le(crc, mac4, 6);
+
+ priv->frag_seq = seq_no;
+ priv->frag_no = 1;
+ priv->frag_len = msdu_size;
+ memcpy(priv->frag_source, source, 6);
+ memcpy(&priv->rx_buf[6], source, 6);
+ memcpy(priv->rx_buf, header->addr1, 6);
+
+ atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
+
+ if (priv->do_rx_crc) {
+ u32 netcrc;
+ crc = crc32_le(crc, &priv->rx_buf[12], msdu_size);
+ atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
+ if ((crc ^ 0xffffffff) != netcrc) {
+ priv->stats.rx_crc_errors++;
+ memset(priv->frag_source, 0xff, 6);
+ }
+ }
+
+ } else if (priv->frag_no == frag_no &&
+ priv->frag_seq == seq_no &&
+ memcmp(priv->frag_source, source, 6) == 0) {
+
+ atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
+ rx_packet_loc, msdu_size);
+ if (priv->do_rx_crc) {
+ u32 netcrc;
+ crc = crc32_le(crc,
+ &priv->rx_buf[12 + priv->frag_len],
+ msdu_size);
+ atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
+ if ((crc ^ 0xffffffff) != netcrc) {
+ priv->stats.rx_crc_errors++;
+ memset(priv->frag_source, 0xff, 6);
+ more_frags = 1; /* don't send broken assembly */
+ }
+ }
+
+ priv->frag_len += msdu_size;
+ priv->frag_no++;
+
+ if (!more_frags) { /* last one */
+ memset(priv->frag_source, 0xff, 6);
+ if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
+ priv->stats.rx_dropped++;
+ } else {
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, priv->frag_len + 12),
+ priv->rx_buf,
+ priv->frag_len + 12);
+ priv->dev->last_rx = jiffies;
+ skb->dev = priv->dev;
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx(skb);
+ priv->stats.rx_bytes += priv->frag_len + 12;
+ priv->stats.rx_packets++;
+ }
+ }
+
+ } else
+ priv->wstats.discard.fragment++;
+}
+
+static void rx_done_irq(struct atmel_private *priv)
+{
+ int i;
+ struct ieee802_11_hdr header;
+
+ for (i = 0;
+ atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
+ i < priv->host_info.rx_desc_count;
+ i++) {
+
+ u16 msdu_size, rx_packet_loc, frame_ctl, seq_control;
+ u8 status = atmel_rmem8(priv, atmel_rx(priv, RX_DESC_STATUS_OFFSET, priv->rx_desc_head));
+ u32 crc = 0xffffffff;
+
+ if (status != RX_STATUS_SUCCESS) {
+ if (status == 0xc1) /* determined by experiment */
+ priv->wstats.discard.nwid++;
+ else
+ priv->stats.rx_errors++;
+ goto next;
+ }
+
+ msdu_size = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_SIZE_OFFSET, priv->rx_desc_head));
+ rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head));
+
+ if (msdu_size < 30) {
+ priv->stats.rx_errors++;
+ goto next;
+ }
+
+ /* Get header as far as end of seq_ctl */
+ atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24);
+ frame_ctl = le16_to_cpu(header.frame_ctl);
+ seq_control = le16_to_cpu(header.seq_ctl);
+
+ /* probe for CRC use here if needed once five packets have arrived with
+ the same crc status, we assume we know what's happening and stop probing */
+ if (priv->probe_crc) {
+ if (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP)) {
+ priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
+ } else {
+ priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24);
+ }
+ if (priv->do_rx_crc) {
+ if (priv->crc_ok_cnt++ > 5)
+ priv->probe_crc = 0;
+ } else {
+ if (priv->crc_ko_cnt++ > 5)
+ priv->probe_crc = 0;
+ }
+ }
+
+ /* don't CRC header when WEP in use */
+ if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP))) {
+ crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
+ }
+ msdu_size -= 24; /* header */
+
+ if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_DATA) {
+
+ int more_fragments = frame_ctl & IEEE802_11_FCTL_MOREFRAGS;
+ u8 packet_fragment_no = seq_control & IEEE802_11_SCTL_FRAG;
+ u16 packet_sequence_no = (seq_control & IEEE802_11_SCTL_SEQ) >> 4;
+
+ if (!more_fragments && packet_fragment_no == 0 ) {
+ fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
+ } else {
+ frag_rx_path(priv, &header, msdu_size, rx_packet_loc, crc,
+ packet_sequence_no, packet_fragment_no, more_fragments);
+ }
+ }
+
+ if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_MGMT) {
+ /* copy rest of packet into buffer */
+ atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
+
+ /* we use the same buffer for frag reassembly and control packets */
+ memset(priv->frag_source, 0xff, 6);
+
+ if (priv->do_rx_crc) {
+ /* last 4 octets is crc */
+ msdu_size -= 4;
+ crc = crc32_le(crc, (unsigned char *)&priv->rx_buf, msdu_size);
+ if ((crc ^ 0xffffffff) != (*((u32 *)&priv->rx_buf[msdu_size]))) {
+ priv->stats.rx_crc_errors++;
+ goto next;
+ }
+ }
+
+ atmel_management_frame(priv, &header, msdu_size,
+ atmel_rmem8(priv, atmel_rx(priv, RX_DESC_RSSI_OFFSET, priv->rx_desc_head)));
+ }
+
+ next:
+ /* release descriptor */
+ atmel_wmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head), RX_DESC_FLAG_CONSUMED);
+
+ if (priv->rx_desc_head < (priv->host_info.rx_desc_count - 1))
+ priv->rx_desc_head++;
+ else
+ priv->rx_desc_head = 0;
+ }
+}
+
+static irqreturn_t service_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct atmel_private *priv = netdev_priv(dev);
+ u8 isr;
+ int i = -1;
+ static u8 irq_order[] = {
+ ISR_OUT_OF_RANGE,
+ ISR_RxCOMPLETE,
+ ISR_TxCOMPLETE,
+ ISR_RxFRAMELOST,
+ ISR_FATAL_ERROR,
+ ISR_COMMAND_COMPLETE,
+ ISR_IBSS_MERGE,
+ ISR_GENERIC_IRQ
+ };
+
+
+ if (priv->card && priv->present_callback &&
+ !(*priv->present_callback)(priv->card))
+ return IRQ_HANDLED;
+
+ /* In this state upper-level code assumes it can mess with
+ the card unhampered by interrupts which may change register state.
+ Note that even though the card shouldn't generate interrupts
+ the inturrupt line may be shared. This allows card setup
+ to go on without disabling interrupts for a long time. */
+ if (priv->station_state == STATION_STATE_DOWN)
+ return IRQ_NONE;
+
+ atmel_clear_gcr(dev, GCR_ENINT); /* disable interrupts */
+
+ while (1) {
+ if (!atmel_lock_mac(priv)) {
+ /* failed to contact card */
+ printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
+ return IRQ_HANDLED;
+ }
+
+ isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
+
+ if (!isr) {
+ atmel_set_gcr(dev, GCR_ENINT); /* enable interrupts */
+ return i == -1 ? IRQ_NONE : IRQ_HANDLED;
+ }
+
+ atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */
+
+ for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++)
+ if (isr & irq_order[i])
+ break;
+
+ if (!atmel_lock_mac(priv)) {
+ /* failed to contact card */
+ printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
+ return IRQ_HANDLED;
+ }
+
+ isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
+ isr ^= irq_order[i];
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET), isr);
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
+
+ switch (irq_order[i]) {
+
+ case ISR_OUT_OF_RANGE:
+ if (priv->operating_mode == IW_MODE_INFRA &&
+ priv->station_state == STATION_STATE_READY) {
+ priv->station_is_associated = 0;
+ atmel_scan(priv, 1);
+ }
+ break;
+
+ case ISR_RxFRAMELOST:
+ priv->wstats.discard.misc++;
+ /* fall through */
+ case ISR_RxCOMPLETE:
+ rx_done_irq(priv);
+ break;
+
+ case ISR_TxCOMPLETE:
+ tx_done_irq(priv);
+ break;
+
+ case ISR_FATAL_ERROR:
+ printk(KERN_ALERT "%s: *** FATAL error interrupt ***\n", dev->name);
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ break;
+
+ case ISR_COMMAND_COMPLETE:
+ atmel_command_irq(priv);
+ break;
+
+ case ISR_IBSS_MERGE:
+ atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
+ priv->CurrentBSSID, 6);
+ /* The WPA stuff cares about the current AP address */
+ if (priv->use_wpa)
+ build_wpa_mib(priv);
+ break;
+ case ISR_GENERIC_IRQ:
+ printk(KERN_INFO "%s: Generic_irq received.\n", dev->name);
+ break;
+ }
+ }
+}
+
+
+static struct net_device_stats *atmel_get_stats (struct net_device *dev)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ return &priv->stats;
+}
+
+static struct iw_statistics *atmel_get_wireless_stats (struct net_device *dev)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ /* update the link quality here in case we are seeing no beacons
+ at all to drive the process */
+ atmel_smooth_qual(priv);
+
+ priv->wstats.status = priv->station_state;
+
+ if (priv->operating_mode == IW_MODE_INFRA) {
+ if (priv->station_state != STATION_STATE_READY) {
+ priv->wstats.qual.qual = 0;
+ priv->wstats.qual.level = 0;
+ priv->wstats.qual.updated = (IW_QUAL_QUAL_INVALID
+ | IW_QUAL_LEVEL_INVALID);
+ }
+ priv->wstats.qual.noise = 0;
+ priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
+ } else {
+ /* Quality levels cannot be determined in ad-hoc mode,
+ because we can 'hear' more that one remote station. */
+ priv->wstats.qual.qual = 0;
+ priv->wstats.qual.level = 0;
+ priv->wstats.qual.noise = 0;
+ priv->wstats.qual.updated = IW_QUAL_QUAL_INVALID
+ | IW_QUAL_LEVEL_INVALID
+ | IW_QUAL_NOISE_INVALID;
+ priv->wstats.miss.beacon = 0;
+ }
+
+ return (&priv->wstats);
+}
+
+static int atmel_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 2312))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int atmel_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ memcpy (dev->dev_addr, addr->sa_data, dev->addr_len);
+ return atmel_open(dev);
+}
+
+EXPORT_SYMBOL(atmel_open);
+
+int atmel_open (struct net_device *dev)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ int i, channel;
+
+ /* any scheduled timer is no longer needed and might screw things up.. */
+ del_timer_sync(&priv->management_timer);
+
+ /* Interrupts will not touch the card once in this state... */
+ priv->station_state = STATION_STATE_DOWN;
+
+ if (priv->new_SSID_size) {
+ memcpy(priv->SSID, priv->new_SSID, priv->new_SSID_size);
+ priv->SSID_size = priv->new_SSID_size;
+ priv->new_SSID_size = 0;
+ }
+ priv->BSS_list_entries = 0;
+
+ priv->AuthenticationRequestRetryCnt = 0;
+ priv->AssociationRequestRetryCnt = 0;
+ priv->ReAssociationRequestRetryCnt = 0;
+ priv->CurrentAuthentTransactionSeqNum = 0x0001;
+ priv->ExpectedAuthentTransactionSeqNum = 0x0002;
+
+ priv->site_survey_state = SITE_SURVEY_IDLE;
+ priv->station_is_associated = 0;
+
+ if (!reset_atmel_card(dev))
+ return -EAGAIN;
+
+ if (priv->config_reg_domain) {
+ priv->reg_domain = priv->config_reg_domain;
+ atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS, priv->reg_domain);
+ } else {
+ priv->reg_domain = atmel_get_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS);
+ for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+ if (priv->reg_domain == channel_table[i].reg_domain)
+ break;
+ if (i == sizeof(channel_table)/sizeof(channel_table[0])) {
+ priv->reg_domain = REG_DOMAIN_MKK1;
+ printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name);
+ }
+ }
+
+ if ((channel = atmel_validate_channel(priv, priv->channel)))
+ priv->channel = channel;
+
+ /* this moves station_state on.... */
+ atmel_scan(priv, 1);
+
+ atmel_set_gcr(priv->dev, GCR_ENINT); /* enable interrupts */
+ return 0;
+}
+
+static int atmel_close (struct net_device *dev)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ atmel_enter_state(priv, STATION_STATE_DOWN);
+
+ if (priv->bus_type == BUS_TYPE_PCCARD)
+ atmel_write16(dev, GCR, 0x0060);
+ atmel_write16(dev, GCR, 0x0040);
+ return 0;
+}
+
+static int atmel_validate_channel(struct atmel_private *priv, int channel)
+{
+ /* check that channel is OK, if so return zero,
+ else return suitable default channel */
+ int i;
+
+ for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+ if (priv->reg_domain == channel_table[i].reg_domain) {
+ if (channel >= channel_table[i].min &&
+ channel <= channel_table[i].max)
+ return 0;
+ else
+ return channel_table[i].min;
+ }
+ return 0;
+}
+
+static int atmel_proc_output (char *buf, struct atmel_private *priv)
+{
+ int i;
+ char *p = buf;
+ char *s, *r, *c;
+
+ p += sprintf(p, "Driver version:\t\t%d.%d\n", DRIVER_MAJOR, DRIVER_MINOR);
+
+ if (priv->station_state != STATION_STATE_DOWN) {
+ p += sprintf(p, "Firmware version:\t%d.%d build %d\nFirmware location:\t",
+ priv->host_info.major_version,
+ priv->host_info.minor_version,
+ priv->host_info.build_version);
+
+ if (priv->card_type != CARD_TYPE_EEPROM)
+ p += sprintf(p, "on card\n");
+ else if (priv->firmware)
+ p += sprintf(p, "%s loaded by host\n", priv->firmware_id);
+ else
+ p += sprintf(p, "%s loaded by hotplug\n", priv->firmware_id);
+
+ switch(priv->card_type) {
+ case CARD_TYPE_PARALLEL_FLASH: c = "Parallel flash"; break;
+ case CARD_TYPE_SPI_FLASH: c = "SPI flash\n"; break;
+ case CARD_TYPE_EEPROM: c = "EEPROM"; break;
+ default: c = "<unknown>";
+ }
+
+
+ r = "<unknown>";
+ for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+ if (priv->reg_domain == channel_table[i].reg_domain)
+ r = channel_table[i].name;
+
+ p += sprintf(p, "MAC memory type:\t%s\n", c);
+ p += sprintf(p, "Regulatory domain:\t%s\n", r);
+ p += sprintf(p, "Host CRC checking:\t%s\n",
+ priv->do_rx_crc ? "On" : "Off");
+ p += sprintf(p, "WPA-capable firmware:\t%s\n",
+ priv->use_wpa ? "Yes" : "No");
+ }
+
+ switch(priv->station_state) {
+ case STATION_STATE_SCANNING: s = "Scanning"; break;
+ case STATION_STATE_JOINNING: s = "Joining"; break;
+ case STATION_STATE_AUTHENTICATING: s = "Authenticating"; break;
+ case STATION_STATE_ASSOCIATING: s = "Associating"; break;
+ case STATION_STATE_READY: s = "Ready"; break;
+ case STATION_STATE_REASSOCIATING: s = "Reassociating"; break;
+ case STATION_STATE_MGMT_ERROR: s = "Management error"; break;
+ case STATION_STATE_DOWN: s = "Down"; break;
+ default: s = "<unknown>";
+ }
+
+ p += sprintf(p, "Current state:\t\t%s\n", s);
+ return p - buf;
+}
+
+static int atmel_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct atmel_private *priv = data;
+ int len = atmel_proc_output (page, priv);
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+struct net_device *init_atmel_card( unsigned short irq, int port, const AtmelFWType fw_type,
+ struct device *sys_dev, int (*card_present)(void *), void *card)
+{
+ struct net_device *dev;
+ struct atmel_private *priv;
+ int rc;
+
+ /* Create the network device object. */
+ dev = alloc_etherdev(sizeof(*priv));
+ if (!dev) {
+ printk(KERN_ERR "atmel: Couldn't alloc_etherdev\n");
+ return NULL;
+ }
+ if (dev_alloc_name(dev, dev->name) < 0) {
+ printk(KERN_ERR "atmel: Couldn't get name!\n");
+ goto err_out_free;
+ }
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->sys_dev = sys_dev;
+ priv->present_callback = card_present;
+ priv->card = card;
+ priv->firmware = NULL;
+ priv->firmware_id[0] = '\0';
+ priv->firmware_type = fw_type;
+ if (firmware) /* module parameter */
+ strcpy(priv->firmware_id, firmware);
+ priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
+ priv->station_state = STATION_STATE_DOWN;
+ priv->do_rx_crc = 0;
+ /* For PCMCIA cards, some chips need CRC, some don't
+ so we have to probe. */
+ if (priv->bus_type == BUS_TYPE_PCCARD) {
+ priv->probe_crc = 1;
+ priv->crc_ok_cnt = priv->crc_ko_cnt = 0;
+ } else
+ priv->probe_crc = 0;
+ memset(&priv->stats, 0, sizeof(priv->stats));
+ memset(&priv->wstats, 0, sizeof(priv->wstats));
+ priv->last_qual = jiffies;
+ priv->last_beacon_timestamp = 0;
+ memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
+ memset(priv->BSSID, 0, 6);
+ priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
+ priv->station_was_associated = 0;
+
+ priv->last_survey = jiffies;
+ priv->preamble = LONG_PREAMBLE;
+ priv->operating_mode = IW_MODE_INFRA;
+ priv->connect_to_any_BSS = 0;
+ priv->config_reg_domain = 0;
+ priv->reg_domain = 0;
+ priv->tx_rate = 3;
+ priv->auto_tx_rate = 1;
+ priv->channel = 4;
+ priv->power_mode = 0;
+ priv->SSID[0] = '\0';
+ priv->SSID_size = 0;
+ priv->new_SSID_size = 0;
+ priv->frag_threshold = 2346;
+ priv->rts_threshold = 2347;
+ priv->short_retry = 7;
+ priv->long_retry = 4;
+
+ priv->wep_is_on = 0;
+ priv->default_key = 0;
+ priv->encryption_level = 0;
+ priv->exclude_unencrypted = 0;
+ priv->group_cipher_suite = priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
+ priv->use_wpa = 0;
+ memset(priv->wep_keys, 0, sizeof(priv->wep_keys));
+ memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
+
+ priv->default_beacon_period = priv->beacon_period = 100;
+ priv->listen_interval = 1;
+
+ init_timer(&priv->management_timer);
+ spin_lock_init(&priv->irqlock);
+ spin_lock_init(&priv->timerlock);
+ priv->management_timer.function = atmel_management_timer;
+ priv->management_timer.data = (unsigned long) dev;
+
+ dev->open = atmel_open;
+ dev->stop = atmel_close;
+ dev->change_mtu = atmel_change_mtu;
+ dev->set_mac_address = atmel_set_mac_address;
+ dev->hard_start_xmit = start_tx;
+ dev->get_stats = atmel_get_stats;
+ dev->get_wireless_stats = atmel_get_wireless_stats;
+ dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def;
+ dev->do_ioctl = atmel_ioctl;
+ dev->irq = irq;
+ dev->base_addr = port;
+
+ SET_NETDEV_DEV(dev, sys_dev);
+
+ if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ, dev->name, dev))) {
+ printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc );
+ goto err_out_free;
+ }
+
+ if (priv->bus_type == BUS_TYPE_PCI &&
+ !request_region( dev->base_addr, 64, dev->name )) {
+ goto err_out_irq;
+ }
+
+ if (register_netdev(dev))
+ goto err_out_res;
+
+ if (!probe_atmel_card(dev)){
+ unregister_netdev(dev);
+ goto err_out_res;
+ }
+
+ netif_carrier_off(dev);
+
+ create_proc_read_entry ("driver/atmel", 0, NULL, atmel_read_proc, priv);
+
+ printk(KERN_INFO "%s: Atmel at76c50x wireless. Version %d.%d simon@thekelleys.org.uk\n",
+ dev->name, DRIVER_MAJOR, DRIVER_MINOR);
+
+ SET_MODULE_OWNER(dev);
+ return dev;
+
+ err_out_res:
+ if (priv->bus_type == BUS_TYPE_PCI)
+ release_region( dev->base_addr, 64 );
+ err_out_irq:
+ free_irq(dev->irq, dev);
+ err_out_free:
+ free_netdev(dev);
+ return NULL;
+}
+
+EXPORT_SYMBOL(init_atmel_card);
+
+void stop_atmel_card(struct net_device *dev, int freeres)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ /* put a brick on it... */
+ if (priv->bus_type == BUS_TYPE_PCCARD)
+ atmel_write16(dev, GCR, 0x0060);
+ atmel_write16(dev, GCR, 0x0040);
+
+ del_timer_sync(&priv->management_timer);
+ unregister_netdev(dev);
+ remove_proc_entry("driver/atmel", NULL);
+ free_irq(dev->irq, dev);
+ if (priv->firmware)
+ kfree(priv->firmware);
+ if (freeres) {
+ /* PCMCIA frees this stuff, so only for PCI */
+ release_region(dev->base_addr, 64);
+ }
+ free_netdev(dev);
+}
+
+EXPORT_SYMBOL(stop_atmel_card);
+
+static int atmel_set_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ /* Check if we asked for `any' */
+ if(dwrq->flags == 0) {
+ priv->connect_to_any_BSS = 1;
+ } else {
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ priv->connect_to_any_BSS = 0;
+
+ /* Check the size of the string */
+ if (dwrq->length > MAX_SSID_LENGTH + 1)
+ return -E2BIG ;
+ if (index != 0)
+ return -EINVAL;
+
+ memcpy(priv->new_SSID, extra, dwrq->length - 1);
+ priv->new_SSID_size = dwrq->length - 1;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int atmel_get_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ /* Get the current SSID */
+ if (priv->new_SSID_size != 0) {
+ memcpy(extra, priv->new_SSID, priv->new_SSID_size);
+ extra[priv->new_SSID_size] = '\0';
+ dwrq->length = priv->new_SSID_size + 1;
+ } else {
+ memcpy(extra, priv->SSID, priv->SSID_size);
+ extra[priv->SSID_size] = '\0';
+ dwrq->length = priv->SSID_size + 1;
+ }
+
+ dwrq->flags = !priv->connect_to_any_BSS; /* active */
+
+ return 0;
+}
+
+static int atmel_get_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+ awrq->sa_family = ARPHRD_ETHER;
+
+ return 0;
+}
+
+static int atmel_set_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ /* Basic checking: do we have a key to set ?
+ * Note : with the new API, it's impossible to get a NULL pointer.
+ * Therefore, we need to check a key size == 0 instead.
+ * New version of iwconfig properly set the IW_ENCODE_NOKEY flag
+ * when no key is present (only change flags), but older versions
+ * don't do it. - Jean II */
+ if (dwrq->length > 0) {
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ int current_index = priv->default_key;
+ /* Check the size of the key */
+ if (dwrq->length > 13) {
+ return -EINVAL;
+ }
+ /* Check the index (none -> use current) */
+ if (index < 0 || index >= 4)
+ index = current_index;
+ else
+ priv->default_key = index;
+ /* Set the length */
+ if (dwrq->length > 5)
+ priv->wep_key_len[index] = 13;
+ else
+ if (dwrq->length > 0)
+ priv->wep_key_len[index] = 5;
+ else
+ /* Disable the key */
+ priv->wep_key_len[index] = 0;
+ /* Check if the key is not marked as invalid */
+ if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ /* Cleanup */
+ memset(priv->wep_keys[index], 0, 13);
+ /* Copy the key in the driver */
+ memcpy(priv->wep_keys[index], extra, dwrq->length);
+ }
+ /* WE specify that if a valid key is set, encryption
+ * should be enabled (user may turn it off later)
+ * This is also how "iwconfig ethX key on" works */
+ if (index == current_index &&
+ priv->wep_key_len[index] > 0) {
+ priv->wep_is_on = 1;
+ priv->exclude_unencrypted = 1;
+ if (priv->wep_key_len[index] > 5) {
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
+ priv->encryption_level = 2;
+ } else {
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
+ priv->encryption_level = 1;
+ }
+ }
+ } else {
+ /* Do we want to just set the transmit key index ? */
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if ( index>=0 && index < 4 ) {
+ priv->default_key = index;
+ } else
+ /* Don't complain if only change the mode */
+ if(!dwrq->flags & IW_ENCODE_MODE) {
+ return -EINVAL;
+ }
+ }
+ /* Read the flags */
+ if(dwrq->flags & IW_ENCODE_DISABLED) {
+ priv->wep_is_on = 0;
+ priv->encryption_level = 0;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
+ } else {
+ priv->wep_is_on = 1;
+ if (priv->wep_key_len[priv->default_key] > 5) {
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
+ priv->encryption_level = 2;
+ } else {
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
+ priv->encryption_level = 1;
+ }
+ }
+ if(dwrq->flags & IW_ENCODE_RESTRICTED)
+ priv->exclude_unencrypted = 1;
+ if(dwrq->flags & IW_ENCODE_OPEN)
+ priv->exclude_unencrypted = 0;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+
+static int atmel_get_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (!priv->wep_is_on)
+ dwrq->flags = IW_ENCODE_DISABLED;
+ else if (priv->exclude_unencrypted)
+ dwrq->flags = IW_ENCODE_RESTRICTED;
+ else
+ dwrq->flags = IW_ENCODE_OPEN;
+
+ /* Which key do we want ? -1 -> tx index */
+ if (index < 0 || index >= 4)
+ index = priv->default_key;
+ dwrq->flags |= index + 1;
+ /* Copy the key to the user buffer */
+ dwrq->length = priv->wep_key_len[index];
+ if (dwrq->length > 16) {
+ dwrq->length=0;
+ } else {
+ memset(extra, 0, 16);
+ memcpy(extra, priv->wep_keys[index], dwrq->length);
+ }
+
+ return 0;
+}
+
+static int atmel_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ char *cwrq,
+ char *extra)
+{
+ strcpy(cwrq, "IEEE 802.11-DS");
+ return 0;
+}
+
+static int atmel_set_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ if (vwrq->fixed == 0) {
+ priv->tx_rate = 3;
+ priv->auto_tx_rate = 1;
+ } else {
+ priv->auto_tx_rate = 0;
+
+ /* Which type of value ? */
+ if((vwrq->value < 4) && (vwrq->value >= 0)) {
+ /* Setting by rate index */
+ priv->tx_rate = vwrq->value;
+ } else {
+ /* Setting by frequency value */
+ switch (vwrq->value) {
+ case 1000000: priv->tx_rate = 0; break;
+ case 2000000: priv->tx_rate = 1; break;
+ case 5500000: priv->tx_rate = 2; break;
+ case 11000000: priv->tx_rate = 3; break;
+ default: return -EINVAL;
+ }
+ }
+ }
+
+ return -EINPROGRESS;
+}
+
+static int atmel_set_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ if (*uwrq != IW_MODE_ADHOC && *uwrq != IW_MODE_INFRA)
+ return -EINVAL;
+
+ priv->operating_mode = *uwrq;
+ return -EINPROGRESS;
+}
+
+static int atmel_get_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ *uwrq = priv->operating_mode;
+ return 0;
+}
+
+static int atmel_get_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ if (priv->auto_tx_rate) {
+ vwrq->fixed = 0;
+ vwrq->value = 11000000;
+ } else {
+ vwrq->fixed = 1;
+ switch(priv->tx_rate) {
+ case 0: vwrq->value = 1000000; break;
+ case 1: vwrq->value = 2000000; break;
+ case 2: vwrq->value = 5500000; break;
+ case 3: vwrq->value = 11000000; break;
+ }
+ }
+ return 0;
+}
+
+static int atmel_set_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ priv->power_mode = vwrq->disabled ? 0 : 1;
+ return -EINPROGRESS;
+}
+
+static int atmel_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ vwrq->disabled = priv->power_mode ? 0 : 1;
+ vwrq->flags = IW_POWER_ON;
+ return 0;
+}
+
+static int atmel_set_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ if(!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) {
+ if(vwrq->flags & IW_RETRY_MAX)
+ priv->long_retry = vwrq->value;
+ else if (vwrq->flags & IW_RETRY_MIN)
+ priv->short_retry = vwrq->value;
+ else {
+ /* No modifier : set both */
+ priv->long_retry = vwrq->value;
+ priv->short_retry = vwrq->value;
+ }
+ return -EINPROGRESS;
+ }
+
+ return -EINVAL;
+}
+
+static int atmel_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ vwrq->disabled = 0; /* Can't be disabled */
+
+ /* Note : by default, display the min retry number */
+ if((vwrq->flags & IW_RETRY_MAX)) {
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ vwrq->value = priv->long_retry;
+ } else {
+ vwrq->flags = IW_RETRY_LIMIT;
+ vwrq->value = priv->short_retry;
+ if(priv->long_retry != priv->short_retry)
+ vwrq->flags |= IW_RETRY_MIN;
+ }
+
+ return 0;
+}
+
+static int atmel_set_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ int rthr = vwrq->value;
+
+ if(vwrq->disabled)
+ rthr = 2347;
+ if((rthr < 0) || (rthr > 2347)) {
+ return -EINVAL;
+ }
+ priv->rts_threshold = rthr;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+static int atmel_get_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ vwrq->value = priv->rts_threshold;
+ vwrq->disabled = (vwrq->value >= 2347);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int atmel_set_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ int fthr = vwrq->value;
+
+ if(vwrq->disabled)
+ fthr = 2346;
+ if((fthr < 256) || (fthr > 2346)) {
+ return -EINVAL;
+ }
+ fthr &= ~0x1; /* Get an even value - is it really needed ??? */
+ priv->frag_threshold = fthr;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+static int atmel_get_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ vwrq->value = priv->frag_threshold;
+ vwrq->disabled = (vwrq->value >= 2346);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+
+static int atmel_set_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ int rc = -EINPROGRESS; /* Call commit handler */
+
+ /* If setting by frequency, convert to a channel */
+ if((fwrq->e == 1) &&
+ (fwrq->m >= (int) 241200000) &&
+ (fwrq->m <= (int) 248700000)) {
+ int f = fwrq->m / 100000;
+ int c = 0;
+ while((c < 14) && (f != frequency_list[c]))
+ c++;
+ /* Hack to fall through... */
+ fwrq->e = 0;
+ fwrq->m = c + 1;
+ }
+ /* Setting by channel number */
+ if((fwrq->m > 1000) || (fwrq->e > 0))
+ rc = -EOPNOTSUPP;
+ else {
+ int channel = fwrq->m;
+ if (atmel_validate_channel(priv, channel) == 0) {
+ priv->channel = channel;
+ } else {
+ rc = -EINVAL;
+ }
+ }
+ return rc;
+}
+
+static int atmel_get_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+
+ fwrq->m = priv->channel;
+ fwrq->e = 0;
+ return 0;
+}
+
+static int atmel_set_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Note : you may have realised that, as this is a SET operation,
+ * this is privileged and therefore a normal user can't
+ * perform scanning.
+ * This is not an error, while the device perform scanning,
+ * traffic doesn't flow, so it's a perfect DoS...
+ * Jean II */
+
+ if (priv->station_state == STATION_STATE_DOWN)
+ return -EAGAIN;
+
+ /* Timeout old surveys. */
+ if ((jiffies - priv->last_survey) > (20 * HZ))
+ priv->site_survey_state = SITE_SURVEY_IDLE;
+ priv->last_survey = jiffies;
+
+ /* Initiate a scan command */
+ if (priv->site_survey_state == SITE_SURVEY_IN_PROGRESS)
+ return -EBUSY;
+
+ del_timer_sync(&priv->management_timer);
+ spin_lock_irqsave(&priv->irqlock, flags);
+
+ priv->site_survey_state = SITE_SURVEY_IN_PROGRESS;
+ priv->fast_scan = 0;
+ atmel_scan(priv, 0);
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+
+ return 0;
+}
+
+static int atmel_get_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ int i;
+ char *current_ev = extra;
+ struct iw_event iwe;
+
+ if (priv->site_survey_state != SITE_SURVEY_COMPLETED)
+ return -EAGAIN;
+
+ for(i=0; i<priv->BSS_list_entries; i++) {
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+ current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_ADDR_LEN);
+
+ iwe.u.data.length = priv->BSSinfo[i].SSIDsize;
+ if (iwe.u.data.length > 32)
+ iwe.u.data.length = 32;
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, priv->BSSinfo[i].SSID);
+
+ iwe.cmd = SIOCGIWMODE;
+ iwe.u.mode = priv->BSSinfo[i].BSStype;
+ current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN);
+
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = priv->BSSinfo[i].channel;
+ iwe.u.freq.e = 0;
+ current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN);
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (priv->BSSinfo[i].UsingWEP)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ current_ev = iwe_stream_add_point(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, NULL);
+
+ }
+
+ /* Length of data */
+ dwrq->length = (current_ev - extra);
+ dwrq->flags = 0;
+
+ return 0;
+}
+
+static int atmel_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ struct iw_range *range = (struct iw_range *) extra;
+ int k,i,j;
+
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(range));
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0x0000;
+ range->num_channels = 0;
+ for (j = 0; j < sizeof(channel_table)/sizeof(channel_table[0]); j++)
+ if (priv->reg_domain == channel_table[j].reg_domain) {
+ range->num_channels = channel_table[j].max - channel_table[j].min + 1;
+ break;
+ }
+ if (range->num_channels != 0) {
+ for(k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) {
+ range->freq[k].i = i; /* List index */
+ range->freq[k].m = frequency_list[i-1] * 100000;
+ range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+ }
+ range->num_frequency = k;
+ }
+
+ range->max_qual.qual = 100;
+ range->max_qual.level = 100;
+ range->max_qual.noise = 0;
+ range->max_qual.updated = IW_QUAL_NOISE_INVALID;
+
+ range->avg_qual.qual = 50;
+ range->avg_qual.level = 50;
+ range->avg_qual.noise = 0;
+ range->avg_qual.updated = IW_QUAL_NOISE_INVALID;
+
+ range->sensitivity = 0;
+
+ range->bitrate[0] = 1000000;
+ range->bitrate[1] = 2000000;
+ range->bitrate[2] = 5500000;
+ range->bitrate[3] = 11000000;
+ range->num_bitrates = 4;
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->encoding_size[0] = 5;
+ range->encoding_size[1] = 13;
+ range->num_encoding_sizes = 2;
+ range->max_encoding_tokens = 4;
+
+ range->pmp_flags = IW_POWER_ON;
+ range->pmt_flags = IW_POWER_ON;
+ range->pm_capa = 0;
+
+ range->we_version_source = WIRELESS_EXT;
+ range->we_version_compiled = WIRELESS_EXT;
+ range->retry_capa = IW_RETRY_LIMIT ;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = 0;
+ range->min_retry = 1;
+ range->max_retry = 65535;
+
+ return 0;
+}
+
+static int atmel_set_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ int i;
+ static const u8 bcast[] = { 255, 255, 255, 255, 255, 255 };
+ unsigned long flags;
+
+ if (awrq->sa_family != ARPHRD_ETHER)
+ return -EINVAL;
+
+ if (memcmp(bcast, awrq->sa_data, 6) == 0) {
+ del_timer_sync(&priv->management_timer);
+ spin_lock_irqsave(&priv->irqlock, flags);
+ atmel_scan(priv, 1);
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+ return 0;
+ }
+
+ for(i=0; i<priv->BSS_list_entries; i++) {
+ if (memcmp(priv->BSSinfo[i].BSSID, awrq->sa_data, 6) == 0) {
+ if (!priv->wep_is_on && priv->BSSinfo[i].UsingWEP) {
+ return -EINVAL;
+ } else if (priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) {
+ return -EINVAL;
+ } else {
+ del_timer_sync(&priv->management_timer);
+ spin_lock_irqsave(&priv->irqlock, flags);
+ atmel_join_bss(priv, i);
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int atmel_config_commit(struct net_device *dev,
+ struct iw_request_info *info, /* NULL */
+ void *zwrq, /* NULL */
+ char *extra) /* NULL */
+{
+ return atmel_open(dev);
+}
+
+static const iw_handler atmel_handler[] =
+{
+ (iw_handler) atmel_config_commit, /* SIOCSIWCOMMIT */
+ (iw_handler) atmel_get_name, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) atmel_set_freq, /* SIOCSIWFREQ */
+ (iw_handler) atmel_get_freq, /* SIOCGIWFREQ */
+ (iw_handler) atmel_set_mode, /* SIOCSIWMODE */
+ (iw_handler) atmel_get_mode, /* SIOCGIWMODE */
+ (iw_handler) NULL, /* SIOCSIWSENS */
+ (iw_handler) NULL, /* SIOCGIWSENS */
+ (iw_handler) NULL, /* SIOCSIWRANGE */
+ (iw_handler) atmel_get_range, /* SIOCGIWRANGE */
+ (iw_handler) NULL, /* SIOCSIWPRIV */
+ (iw_handler) NULL, /* SIOCGIWPRIV */
+ (iw_handler) NULL, /* SIOCSIWSTATS */
+ (iw_handler) NULL, /* SIOCGIWSTATS */
+ (iw_handler) NULL, /* SIOCSIWSPY */
+ (iw_handler) NULL, /* SIOCGIWSPY */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) atmel_set_wap, /* SIOCSIWAP */
+ (iw_handler) atmel_get_wap, /* SIOCGIWAP */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* SIOCGIWAPLIST */
+ (iw_handler) atmel_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) atmel_get_scan, /* SIOCGIWSCAN */
+ (iw_handler) atmel_set_essid, /* SIOCSIWESSID */
+ (iw_handler) atmel_get_essid, /* SIOCGIWESSID */
+ (iw_handler) NULL, /* SIOCSIWNICKN */
+ (iw_handler) NULL, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) atmel_set_rate, /* SIOCSIWRATE */
+ (iw_handler) atmel_get_rate, /* SIOCGIWRATE */
+ (iw_handler) atmel_set_rts, /* SIOCSIWRTS */
+ (iw_handler) atmel_get_rts, /* SIOCGIWRTS */
+ (iw_handler) atmel_set_frag, /* SIOCSIWFRAG */
+ (iw_handler) atmel_get_frag, /* SIOCGIWFRAG */
+ (iw_handler) NULL, /* SIOCSIWTXPOW */
+ (iw_handler) NULL, /* SIOCGIWTXPOW */
+ (iw_handler) atmel_set_retry, /* SIOCSIWRETRY */
+ (iw_handler) atmel_get_retry, /* SIOCGIWRETRY */
+ (iw_handler) atmel_set_encode, /* SIOCSIWENCODE */
+ (iw_handler) atmel_get_encode, /* SIOCGIWENCODE */
+ (iw_handler) atmel_set_power, /* SIOCSIWPOWER */
+ (iw_handler) atmel_get_power, /* SIOCGIWPOWER */
+};
+
+
+static const iw_handler atmel_private_handler[] =
+{
+ NULL, /* SIOCIWFIRSTPRIV */
+};
+
+typedef struct atmel_priv_ioctl {
+ char id[32];
+ unsigned char __user *data;
+ unsigned short len;
+} atmel_priv_ioctl;
+
+
+#define ATMELFWL SIOCIWFIRSTPRIV
+#define ATMELIDIFC ATMELFWL + 1
+#define ATMELRD ATMELFWL + 2
+#define ATMELMAGIC 0x51807
+#define REGDOMAINSZ 20
+
+static const struct iw_priv_args atmel_private_args[] = {
+/*{ cmd, set_args, get_args, name } */
+ { ATMELFWL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (atmel_priv_ioctl), IW_PRIV_TYPE_NONE, "atmelfwl" },
+ { ATMELIDIFC, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "atmelidifc" },
+ { ATMELRD, IW_PRIV_TYPE_CHAR | REGDOMAINSZ, IW_PRIV_TYPE_NONE, "regdomain" },
+};
+
+static const struct iw_handler_def atmel_handler_def =
+{
+ .num_standard = sizeof(atmel_handler)/sizeof(iw_handler),
+ .num_private = sizeof(atmel_private_handler)/sizeof(iw_handler),
+ .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args),
+ .standard = (iw_handler *) atmel_handler,
+ .private = (iw_handler *) atmel_private_handler,
+ .private_args = (struct iw_priv_args *) atmel_private_args
+};
+
+static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ int i, rc = 0;
+ struct atmel_private *priv = netdev_priv(dev);
+ atmel_priv_ioctl com;
+ struct iwreq *wrq = (struct iwreq *) rq;
+ unsigned char *new_firmware;
+ char domain[REGDOMAINSZ+1];
+
+ switch (cmd) {
+ case SIOCGIWPRIV:
+ if(wrq->u.data.pointer) {
+ /* Set the number of ioctl available */
+ wrq->u.data.length = sizeof(atmel_private_args) / sizeof(atmel_private_args[0]);
+
+ /* Copy structure to the user buffer */
+ if (copy_to_user(wrq->u.data.pointer,
+ (u_char *) atmel_private_args,
+ sizeof(atmel_private_args)))
+ rc = -EFAULT;
+ }
+ break;
+
+ case ATMELIDIFC:
+ wrq->u.param.value = ATMELMAGIC;
+ break;
+
+ case ATMELFWL:
+ if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ rc = -EPERM;
+ break;
+ }
+
+ if (!(new_firmware = kmalloc(com.len, GFP_KERNEL))) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(new_firmware, com.data, com.len)) {
+ kfree(new_firmware);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (priv->firmware)
+ kfree(priv->firmware);
+
+ priv->firmware = new_firmware;
+ priv->firmware_length = com.len;
+ strncpy(priv->firmware_id, com.id, 31);
+ priv->firmware_id[31] = '\0';
+ break;
+
+ case ATMELRD:
+ if (copy_from_user(domain, rq->ifr_data, REGDOMAINSZ)) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ rc = -EPERM;
+ break;
+ }
+
+ domain[REGDOMAINSZ] = 0;
+ rc = -EINVAL;
+ for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) {
+ /* strcasecmp doesn't exist in the library */
+ char *a = channel_table[i].name;
+ char *b = domain;
+ while (*a) {
+ char c1 = *a++;
+ char c2 = *b++;
+ if (tolower(c1) != tolower(c2))
+ break;
+ }
+ if (!*a && !*b) {
+ priv->config_reg_domain = channel_table[i].reg_domain;
+ rc = 0;
+ }
+ }
+
+ if (rc == 0 && priv->station_state != STATION_STATE_DOWN)
+ rc = atmel_open(dev);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+struct auth_body {
+ u16 alg;
+ u16 trans_seq;
+ u16 status;
+ u8 el_id;
+ u8 chall_text_len;
+ u8 chall_text[253];
+};
+
+static void atmel_enter_state(struct atmel_private *priv, int new_state)
+{
+ int old_state = priv->station_state;
+
+ if (new_state == old_state)
+ return;
+
+ priv->station_state = new_state;
+
+ if (new_state == STATION_STATE_READY) {
+ netif_start_queue(priv->dev);
+ netif_carrier_on(priv->dev);
+ }
+
+ if (old_state == STATION_STATE_READY) {
+ netif_carrier_off(priv->dev);
+ if (netif_running(priv->dev))
+ netif_stop_queue(priv->dev);
+ priv->last_beacon_timestamp = 0;
+ }
+}
+
+static void atmel_scan(struct atmel_private *priv, int specific_ssid)
+{
+ struct {
+ u8 BSSID[6];
+ u8 SSID[MAX_SSID_LENGTH];
+ u8 scan_type;
+ u8 channel;
+ u16 BSS_type;
+ u16 min_channel_time;
+ u16 max_channel_time;
+ u8 options;
+ u8 SSID_size;
+ } cmd;
+
+ memset(cmd.BSSID, 0xff, 6);
+
+ if (priv->fast_scan) {
+ cmd.SSID_size = priv->SSID_size;
+ memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
+ cmd.min_channel_time = cpu_to_le16(10);
+ cmd.max_channel_time = cpu_to_le16(50);
+ } else {
+ priv->BSS_list_entries = 0;
+ cmd.SSID_size = 0;
+ cmd.min_channel_time = cpu_to_le16(10);
+ cmd.max_channel_time = cpu_to_le16(120);
+ }
+
+ cmd.options = 0;
+
+ if (!specific_ssid)
+ cmd.options |= SCAN_OPTIONS_SITE_SURVEY;
+
+ cmd.channel = (priv->channel & 0x7f);
+ cmd.scan_type = SCAN_TYPE_ACTIVE;
+ cmd.BSS_type = cpu_to_le16(priv->operating_mode == IW_MODE_ADHOC ?
+ BSS_TYPE_AD_HOC : BSS_TYPE_INFRASTRUCTURE);
+
+ atmel_send_command(priv, CMD_Scan, &cmd, sizeof(cmd));
+
+ /* This must come after all hardware access to avoid being messed up
+ by stuff happening in interrupt context after we leave STATE_DOWN */
+ atmel_enter_state(priv, STATION_STATE_SCANNING);
+}
+
+static void join(struct atmel_private *priv, int type)
+{
+ struct {
+ u8 BSSID[6];
+ u8 SSID[MAX_SSID_LENGTH];
+ u8 BSS_type; /* this is a short in a scan command - weird */
+ u8 channel;
+ u16 timeout;
+ u8 SSID_size;
+ u8 reserved;
+ } cmd;
+
+ cmd.SSID_size = priv->SSID_size;
+ memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
+ memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+ cmd.channel = (priv->channel & 0x7f);
+ cmd.BSS_type = type;
+ cmd.timeout = cpu_to_le16(2000);
+
+ atmel_send_command(priv, CMD_Join, &cmd, sizeof(cmd));
+}
+
+
+static void start(struct atmel_private *priv, int type)
+{
+ struct {
+ u8 BSSID[6];
+ u8 SSID[MAX_SSID_LENGTH];
+ u8 BSS_type;
+ u8 channel;
+ u8 SSID_size;
+ u8 reserved[3];
+ } cmd;
+
+ cmd.SSID_size = priv->SSID_size;
+ memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
+ memcpy(cmd.BSSID, priv->BSSID, 6);
+ cmd.BSS_type = type;
+ cmd.channel = (priv->channel & 0x7f);
+
+ atmel_send_command(priv, CMD_Start, &cmd, sizeof(cmd));
+}
+
+static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 channel)
+{
+ int rejoin = 0;
+ int new = capability & C80211_MGMT_CAPABILITY_ShortPreamble ?
+ SHORT_PREAMBLE : LONG_PREAMBLE;
+
+ if (priv->preamble != new) {
+ priv->preamble = new;
+ rejoin = 1;
+ atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, new);
+ }
+
+ if (priv->channel != channel) {
+ priv->channel = channel;
+ rejoin = 1;
+ atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_CHANNEL_POS, channel);
+ }
+
+ if (rejoin) {
+ priv->station_is_associated = 0;
+ atmel_enter_state(priv, STATION_STATE_JOINNING);
+
+ if (priv->operating_mode == IW_MODE_INFRA)
+ join(priv, BSS_TYPE_INFRASTRUCTURE);
+ else
+ join(priv, BSS_TYPE_AD_HOC);
+ }
+}
+
+
+static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len)
+{
+ struct ieee802_11_hdr header;
+ struct auth_body auth;
+
+ header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | IEEE802_11_STYPE_AUTH);
+ header.duration_id = cpu_to_le16(0x8000);
+ header.seq_ctl = 0;
+ memcpy(header.addr1, priv->CurrentBSSID, 6);
+ memcpy(header.addr2, priv->dev->dev_addr, 6);
+ memcpy(header.addr3, priv->CurrentBSSID, 6);
+
+ if (priv->wep_is_on) {
+ auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY);
+ /* no WEP for authentication frames with TrSeqNo 1 */
+ if (priv->CurrentAuthentTransactionSeqNum != 1)
+ header.frame_ctl |= cpu_to_le16(IEEE802_11_FCTL_WEP);
+ } else {
+ auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM);
+ }
+
+ auth.status = 0;
+ auth.trans_seq = cpu_to_le16(priv->CurrentAuthentTransactionSeqNum);
+ priv->ExpectedAuthentTransactionSeqNum = priv->CurrentAuthentTransactionSeqNum+1;
+ priv->CurrentAuthentTransactionSeqNum += 2;
+
+ if (challenge_len != 0) {
+ auth.el_id = 16; /* challenge_text */
+ auth.chall_text_len = challenge_len;
+ memcpy(auth.chall_text, challenge, challenge_len);
+ atmel_transmit_management_frame(priv, &header, (u8 *)&auth, 8 + challenge_len);
+ } else {
+ atmel_transmit_management_frame(priv, &header, (u8 *)&auth, 6);
+ }
+}
+
+static void send_association_request(struct atmel_private *priv, int is_reassoc)
+{
+ u8 *ssid_el_p;
+ int bodysize;
+ struct ieee802_11_hdr header;
+ struct ass_req_format {
+ u16 capability;
+ u16 listen_interval;
+ u8 ap[6]; /* nothing after here directly accessible */
+ u8 ssid_el_id;
+ u8 ssid_len;
+ u8 ssid[MAX_SSID_LENGTH];
+ u8 sup_rates_el_id;
+ u8 sup_rates_len;
+ u8 rates[4];
+ } body;
+
+ header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT |
+ (is_reassoc ? IEEE802_11_STYPE_REASSOC_REQ : IEEE802_11_STYPE_ASSOC_REQ));
+ header.duration_id = cpu_to_le16(0x8000);
+ header.seq_ctl = 0;
+
+ memcpy(header.addr1, priv->CurrentBSSID, 6);
+ memcpy(header.addr2, priv->dev->dev_addr, 6);
+ memcpy(header.addr3, priv->CurrentBSSID, 6);
+
+ body.capability = cpu_to_le16(C80211_MGMT_CAPABILITY_ESS);
+ if (priv->wep_is_on)
+ body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_Privacy);
+ if (priv->preamble == SHORT_PREAMBLE)
+ body.capability |= cpu_to_le16(C80211_MGMT_CAPABILITY_ShortPreamble);
+
+ body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period);
+
+ /* current AP address - only in reassoc frame */
+ if (is_reassoc) {
+ memcpy(body.ap, priv->CurrentBSSID, 6);
+ ssid_el_p = (u8 *)&body.ssid_el_id;
+ bodysize = 18 + priv->SSID_size;
+ } else {
+ ssid_el_p = (u8 *)&body.ap[0];
+ bodysize = 12 + priv->SSID_size;
+ }
+
+ ssid_el_p[0]= C80211_MGMT_ElementID_SSID;
+ ssid_el_p[1] = priv->SSID_size;
+ memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
+ ssid_el_p[2 + priv->SSID_size] = C80211_MGMT_ElementID_SupportedRates;
+ ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */
+ memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4);
+
+ atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
+}
+
+static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee802_11_hdr *header)
+{
+ if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS)
+ return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
+ else
+ return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0;
+}
+
+static int retrieve_bss(struct atmel_private *priv)
+{
+ int i;
+ int max_rssi = -128;
+ int max_index = -1;
+
+ if (priv->BSS_list_entries == 0)
+ return -1;
+
+ if (priv->connect_to_any_BSS) {
+ /* Select a BSS with the max-RSSI but of the same type and of the same WEP mode
+ and that it is not marked as 'bad' (i.e. we had previously failed to connect to
+ this BSS with the settings that we currently use) */
+ priv->current_BSS = 0;
+ for(i=0; i<priv->BSS_list_entries; i++) {
+ if (priv->operating_mode == priv->BSSinfo[i].BSStype &&
+ ((!priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) ||
+ (priv->wep_is_on && priv->BSSinfo[i].UsingWEP)) &&
+ !(priv->BSSinfo[i].channel & 0x80)) {
+ max_rssi = priv->BSSinfo[i].RSSI;
+ priv->current_BSS = max_index = i;
+ }
+
+ }
+ return max_index;
+ }
+
+ for(i=0; i<priv->BSS_list_entries; i++) {
+ if (priv->SSID_size == priv->BSSinfo[i].SSIDsize &&
+ memcmp(priv->SSID, priv->BSSinfo[i].SSID, priv->SSID_size) == 0 &&
+ priv->operating_mode == priv->BSSinfo[i].BSStype &&
+ atmel_validate_channel(priv, priv->BSSinfo[i].channel) == 0) {
+ if (priv->BSSinfo[i].RSSI >= max_rssi) {
+ max_rssi = priv->BSSinfo[i].RSSI;
+ max_index = i;
+ }
+ }
+ }
+ return max_index;
+}
+
+
+static void store_bss_info(struct atmel_private *priv, struct ieee802_11_hdr *header,
+ u16 capability, u16 beacon_period, u8 channel, u8 rssi,
+ u8 ssid_len, u8 *ssid, int is_beacon)
+{
+ u8 *bss = capability & C80211_MGMT_CAPABILITY_ESS ? header->addr2 : header->addr3;
+ int i, index;
+
+ for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
+ if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+ index = i;
+
+ /* If we process a probe and an entry from this BSS exists
+ we will update the BSS entry with the info from this BSS.
+ If we process a beacon we will only update RSSI */
+
+ if (index == -1) {
+ if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
+ return;
+ index = priv->BSS_list_entries++;
+ memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+ priv->BSSinfo[index].RSSI = rssi;
+ } else {
+ if (rssi > priv->BSSinfo[index].RSSI)
+ priv->BSSinfo[index].RSSI = rssi;
+ if (is_beacon)
+ return;
+ }
+
+ priv->BSSinfo[index].channel = channel;
+ priv->BSSinfo[index].beacon_period = beacon_period;
+ priv->BSSinfo[index].UsingWEP = capability & C80211_MGMT_CAPABILITY_Privacy;
+ memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len);
+ priv->BSSinfo[index].SSIDsize = ssid_len;
+
+ if (capability & C80211_MGMT_CAPABILITY_IBSS)
+ priv->BSSinfo[index].BSStype = IW_MODE_ADHOC;
+ else if (capability & C80211_MGMT_CAPABILITY_ESS)
+ priv->BSSinfo[index].BSStype =IW_MODE_INFRA;
+
+ priv->BSSinfo[index].preamble = capability & C80211_MGMT_CAPABILITY_ShortPreamble ?
+ SHORT_PREAMBLE : LONG_PREAMBLE;
+}
+
+static void authenticate(struct atmel_private *priv, u16 frame_len)
+{
+ struct auth_body *auth = (struct auth_body *)priv->rx_buf;
+ u16 status = le16_to_cpu(auth->status);
+ u16 trans_seq_no = le16_to_cpu(auth->trans_seq);
+
+ if (status == C80211_MGMT_SC_Success && !priv->wep_is_on) {
+ /* no WEP */
+ if (priv->station_was_associated) {
+ atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
+ send_association_request(priv, 1);
+ return;
+ } else {
+ atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
+ send_association_request(priv, 0);
+ return;
+ }
+ }
+
+ if (status == C80211_MGMT_SC_Success && priv->wep_is_on) {
+ /* WEP */
+ if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
+ return;
+
+ if (trans_seq_no == 0x0002 &&
+ auth->el_id == C80211_MGMT_ElementID_ChallengeText) {
+ send_authentication_request(priv, auth->chall_text, auth->chall_text_len);
+ return;
+ }
+
+ if (trans_seq_no == 0x0004) {
+ if(priv->station_was_associated) {
+ atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
+ send_association_request(priv, 1);
+ return;
+ } else {
+ atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
+ send_association_request(priv, 0);
+ return;
+ }
+ }
+ }
+
+ if (status == C80211_MGMT_SC_AuthAlgNotSupported && priv->connect_to_any_BSS) {
+ int bss_index;
+
+ priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
+
+ if ((bss_index = retrieve_bss(priv)) != -1) {
+ atmel_join_bss(priv, bss_index);
+ return;
+ }
+ }
+
+
+ priv->AuthenticationRequestRetryCnt = 0;
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ priv->station_is_associated = 0;
+}
+
+static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
+{
+ struct ass_resp_format {
+ u16 capability;
+ u16 status;
+ u16 ass_id;
+ u8 el_id;
+ u8 length;
+ u8 rates[4];
+ } *ass_resp = (struct ass_resp_format *)priv->rx_buf;
+
+ u16 status = le16_to_cpu(ass_resp->status);
+ u16 ass_id = le16_to_cpu(ass_resp->ass_id);
+ u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
+
+ if (frame_len < 8 + rates_len)
+ return;
+
+ if (status == C80211_MGMT_SC_Success) {
+ if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE)
+ priv->AssociationRequestRetryCnt = 0;
+ else
+ priv->ReAssociationRequestRetryCnt = 0;
+
+ atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_STATION_ID_POS, ass_id & 0x3fff);
+ atmel_set_mib(priv, Phy_Mib_Type, PHY_MIB_RATE_SET_POS, ass_resp->rates, rates_len);
+ if (priv->power_mode == 0) {
+ priv->listen_interval = 1;
+ atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
+ atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
+ } else {
+ priv->listen_interval = 2;
+ atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, PS_MODE);
+ atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 2);
+ }
+
+ priv->station_is_associated = 1;
+ priv->station_was_associated = 1;
+ atmel_enter_state(priv, STATION_STATE_READY);
+ return;
+ }
+
+ if (subtype == C80211_SUBTYPE_MGMT_ASS_RESPONSE &&
+ status != C80211_MGMT_SC_AssDeniedBSSRate &&
+ status != C80211_MGMT_SC_SupportCapabilities &&
+ priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
+ mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
+ priv->AssociationRequestRetryCnt++;
+ send_association_request(priv, 0);
+ return;
+ }
+
+ if (subtype == C80211_SUBTYPE_MGMT_REASS_RESPONSE &&
+ status != C80211_MGMT_SC_AssDeniedBSSRate &&
+ status != C80211_MGMT_SC_SupportCapabilities &&
+ priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
+ mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
+ priv->ReAssociationRequestRetryCnt++;
+ send_association_request(priv, 1);
+ return;
+ }
+
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ priv->station_is_associated = 0;
+
+ if(priv->connect_to_any_BSS) {
+ int bss_index;
+ priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
+
+ if ((bss_index = retrieve_bss(priv)) != -1)
+ atmel_join_bss(priv, bss_index);
+
+ }
+}
+
+void atmel_join_bss(struct atmel_private *priv, int bss_index)
+{
+ struct bss_info *bss = &priv->BSSinfo[bss_index];
+
+ memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+ memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
+
+ /* The WPA stuff cares about the current AP address */
+ if (priv->use_wpa)
+ build_wpa_mib(priv);
+
+ /* When switching to AdHoc turn OFF Power Save if needed */
+
+ if (bss->BSStype == IW_MODE_ADHOC &&
+ priv->operating_mode != IW_MODE_ADHOC &&
+ priv->power_mode) {
+ priv->power_mode = 0;
+ priv->listen_interval = 1;
+ atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
+ atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
+ }
+
+ priv->operating_mode = bss->BSStype;
+ priv->channel = bss->channel & 0x7f;
+ priv->beacon_period = bss->beacon_period;
+
+ if (priv->preamble != bss->preamble) {
+ priv->preamble = bss->preamble;
+ atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, bss->preamble);
+ }
+
+ if (!priv->wep_is_on && bss->UsingWEP) {
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ priv->station_is_associated = 0;
+ return;
+ }
+
+ if (priv->wep_is_on && !bss->UsingWEP) {
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ priv->station_is_associated = 0;
+ return;
+ }
+
+ atmel_enter_state(priv, STATION_STATE_JOINNING);
+
+ if (priv->operating_mode == IW_MODE_INFRA)
+ join(priv, BSS_TYPE_INFRASTRUCTURE);
+ else
+ join(priv, BSS_TYPE_AD_HOC);
+}
+
+
+static void restart_search(struct atmel_private *priv)
+{
+ int bss_index;
+
+ if (!priv->connect_to_any_BSS) {
+ atmel_scan(priv, 1);
+ } else {
+ priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
+
+ if ((bss_index = retrieve_bss(priv)) != -1)
+ atmel_join_bss(priv, bss_index);
+ else
+ atmel_scan(priv, 0);
+
+ }
+}
+
+static void smooth_rssi(struct atmel_private *priv, u8 rssi)
+{
+ u8 old = priv->wstats.qual.level;
+ u8 max_rssi = 42; /* 502-rmfd-revd max by experiment, default for now */
+
+ switch (priv->firmware_type) {
+ case ATMEL_FW_TYPE_502E:
+ max_rssi = 63; /* 502-rmfd-reve max by experiment */
+ break;
+ default:
+ break;
+ }
+
+ rssi = rssi * 100 / max_rssi;
+ if((rssi + old) % 2)
+ priv->wstats.qual.level = ((rssi + old)/2) + 1;
+ else
+ priv->wstats.qual.level = ((rssi + old)/2);
+ priv->wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
+ priv->wstats.qual.updated &= ~IW_QUAL_LEVEL_INVALID;
+}
+
+static void atmel_smooth_qual(struct atmel_private *priv)
+{
+ unsigned long time_diff = (jiffies - priv->last_qual)/HZ;
+ while (time_diff--) {
+ priv->last_qual += HZ;
+ priv->wstats.qual.qual = priv->wstats.qual.qual/2;
+ priv->wstats.qual.qual +=
+ priv->beacons_this_sec * priv->beacon_period * (priv->wstats.qual.level + 100) / 4000;
+ priv->beacons_this_sec = 0;
+ }
+ priv->wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
+ priv->wstats.qual.updated &= ~IW_QUAL_QUAL_INVALID;
+}
+
+/* deals with incoming managment frames. */
+static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header,
+ u16 frame_len, u8 rssi)
+{
+ u16 subtype;
+
+ switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_STYPE) {
+ case C80211_SUBTYPE_MGMT_BEACON :
+ case C80211_SUBTYPE_MGMT_ProbeResponse:
+
+ /* beacon frame has multiple variable-length fields -
+ never let an engineer loose with a data structure design. */
+ {
+ struct beacon_format {
+ u64 timestamp;
+ u16 interval;
+ u16 capability;
+ u8 ssid_el_id;
+ u8 ssid_length;
+ /* ssid here */
+ u8 rates_el_id;
+ u8 rates_length;
+ /* rates here */
+ u8 ds_el_id;
+ u8 ds_length;
+ /* ds here */
+ } *beacon = (struct beacon_format *)priv->rx_buf;
+
+ u8 channel, rates_length, ssid_length;
+ u64 timestamp = le64_to_cpu(beacon->timestamp);
+ u16 beacon_interval = le16_to_cpu(beacon->interval);
+ u16 capability = le16_to_cpu(beacon->capability);
+ u8 *beaconp = priv->rx_buf;
+ ssid_length = beacon->ssid_length;
+ /* this blows chunks. */
+ if (frame_len < 14 || frame_len < ssid_length + 15)
+ return;
+ rates_length = beaconp[beacon->ssid_length + 15];
+ if (frame_len < ssid_length + rates_length + 18)
+ return;
+ if (ssid_length > MAX_SSID_LENGTH)
+ return;
+ channel = beaconp[ssid_length + rates_length + 18];
+
+ if (priv->station_state == STATION_STATE_READY) {
+ smooth_rssi(priv, rssi);
+ if (is_frame_from_current_bss(priv, header)) {
+ priv->beacons_this_sec++;
+ atmel_smooth_qual(priv);
+ if (priv->last_beacon_timestamp) {
+ /* Note truncate this to 32 bits - kernel can't divide a long long */
+ u32 beacon_delay = timestamp - priv->last_beacon_timestamp;
+ int beacons = beacon_delay / (beacon_interval * 1000);
+ if (beacons > 1)
+ priv->wstats.miss.beacon += beacons - 1;
+ }
+ priv->last_beacon_timestamp = timestamp;
+ handle_beacon_probe(priv, capability, channel);
+ }
+ }
+
+ if (priv->station_state == STATION_STATE_SCANNING )
+ store_bss_info(priv, header, capability, beacon_interval, channel,
+ rssi, ssid_length, &beacon->rates_el_id,
+ subtype == C80211_SUBTYPE_MGMT_BEACON) ;
+ }
+ break;
+
+ case C80211_SUBTYPE_MGMT_Authentication:
+
+ if (priv->station_state == STATION_STATE_AUTHENTICATING)
+ authenticate(priv, frame_len);
+
+ break;
+
+ case C80211_SUBTYPE_MGMT_ASS_RESPONSE:
+ case C80211_SUBTYPE_MGMT_REASS_RESPONSE:
+
+ if (priv->station_state == STATION_STATE_ASSOCIATING ||
+ priv->station_state == STATION_STATE_REASSOCIATING)
+ associate(priv, frame_len, subtype);
+
+ break;
+
+ case C80211_SUBTYPE_MGMT_DISASSOSIATION:
+ if (priv->station_is_associated &&
+ priv->operating_mode == IW_MODE_INFRA &&
+ is_frame_from_current_bss(priv, header)) {
+ priv->station_was_associated = 0;
+ priv->station_is_associated = 0;
+
+ atmel_enter_state(priv, STATION_STATE_JOINNING);
+ join(priv, BSS_TYPE_INFRASTRUCTURE);
+ }
+
+ break;
+
+ case C80211_SUBTYPE_MGMT_Deauthentication:
+ if (priv->operating_mode == IW_MODE_INFRA &&
+ is_frame_from_current_bss(priv, header)) {
+ priv->station_was_associated = 0;
+
+ atmel_enter_state(priv, STATION_STATE_JOINNING);
+ join(priv, BSS_TYPE_INFRASTRUCTURE);
+ }
+
+ break;
+ }
+}
+
+/* run when timer expires */
+static void atmel_management_timer(u_long a)
+{
+ struct net_device *dev = (struct net_device *) a;
+ struct atmel_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Check if the card has been yanked. */
+ if (priv->card && priv->present_callback &&
+ !(*priv->present_callback)(priv->card))
+ return;
+
+ spin_lock_irqsave(&priv->irqlock, flags);
+
+ switch (priv->station_state) {
+
+ case STATION_STATE_AUTHENTICATING:
+ if (priv->AuthenticationRequestRetryCnt >= MAX_AUTHENTICATION_RETRIES) {
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ priv->station_is_associated = 0;
+ priv->AuthenticationRequestRetryCnt = 0;
+ restart_search(priv);
+ } else {
+ priv->AuthenticationRequestRetryCnt++;
+ priv->CurrentAuthentTransactionSeqNum = 0x0001;
+ mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
+ send_authentication_request(priv, NULL, 0);
+ }
+
+ break;
+
+ case STATION_STATE_ASSOCIATING:
+ if (priv->AssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ priv->station_is_associated = 0;
+ priv->AssociationRequestRetryCnt = 0;
+ restart_search(priv);
+ } else {
+ priv->AssociationRequestRetryCnt++;
+ mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
+ send_association_request(priv, 0);
+ }
+
+ break;
+
+ case STATION_STATE_REASSOCIATING:
+ if (priv->ReAssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
+ atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
+ priv->station_is_associated = 0;
+ priv->ReAssociationRequestRetryCnt = 0;
+ restart_search(priv);
+ } else {
+ priv->ReAssociationRequestRetryCnt++;
+ mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
+ send_association_request(priv, 1);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+}
+
+static void atmel_command_irq(struct atmel_private *priv)
+{
+ u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
+ u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET));
+ int fast_scan;
+
+ if (status == CMD_STATUS_IDLE ||
+ status == CMD_STATUS_IN_PROGRESS)
+ return;
+
+ switch (command){
+
+ case CMD_Start:
+ if (status == CMD_STATUS_COMPLETE) {
+ priv->station_was_associated = priv->station_is_associated;
+ atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
+ (u8 *)priv->CurrentBSSID, 6);
+ atmel_enter_state(priv, STATION_STATE_READY);
+ }
+ break;
+
+ case CMD_Scan:
+ fast_scan = priv->fast_scan;
+ priv->fast_scan = 0;
+
+ if (status != CMD_STATUS_COMPLETE) {
+ atmel_scan(priv, 1);
+ } else {
+ int bss_index = retrieve_bss(priv);
+ if (bss_index != -1) {
+ atmel_join_bss(priv, bss_index);
+ } else if (priv->operating_mode == IW_MODE_ADHOC &&
+ priv->SSID_size != 0) {
+ start(priv, BSS_TYPE_AD_HOC);
+ } else {
+ priv->fast_scan = !fast_scan;
+ atmel_scan(priv, 1);
+ }
+ priv->site_survey_state = SITE_SURVEY_COMPLETED;
+ }
+ break;
+
+ case CMD_SiteSurvey:
+ priv->fast_scan = 0;
+
+ if (status != CMD_STATUS_COMPLETE)
+ return;
+
+ priv->site_survey_state = SITE_SURVEY_COMPLETED;
+ if (priv->station_is_associated) {
+ atmel_enter_state(priv, STATION_STATE_READY);
+ } else {
+ atmel_scan(priv, 1);
+ }
+ break;
+
+ case CMD_Join:
+ if (status == CMD_STATUS_COMPLETE) {
+ if (priv->operating_mode == IW_MODE_ADHOC) {
+ priv->station_was_associated = priv->station_is_associated;
+ atmel_enter_state(priv, STATION_STATE_READY);
+ } else {
+ priv->AuthenticationRequestRetryCnt = 0;
+ atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
+
+ mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
+ priv->CurrentAuthentTransactionSeqNum = 0x0001;
+ send_authentication_request(priv, NULL, 0);
+ }
+ return;
+ }
+
+ atmel_scan(priv, 1);
+
+ }
+}
+
+static int atmel_wakeup_firmware(struct atmel_private *priv)
+{
+ struct host_info_struct *iface = &priv->host_info;
+ u16 mr1, mr3;
+ int i;
+
+ if (priv->card_type == CARD_TYPE_SPI_FLASH)
+ atmel_set_gcr(priv->dev, GCR_REMAP);
+
+ /* wake up on-board processor */
+ atmel_clear_gcr(priv->dev, 0x0040);
+ atmel_write16(priv->dev, BSR, BSS_SRAM);
+
+ if (priv->card_type == CARD_TYPE_SPI_FLASH)
+ mdelay(100);
+
+ /* and wait for it */
+ for (i = LOOP_RETRY_LIMIT; i; i--) {
+ mr1 = atmel_read16(priv->dev, MR1);
+ mr3 = atmel_read16(priv->dev, MR3);
+
+ if (mr3 & MAC_BOOT_COMPLETE)
+ break;
+ if (mr1 & MAC_BOOT_COMPLETE &&
+ priv->bus_type == BUS_TYPE_PCCARD)
+ break;
+ }
+
+ if (i == 0) {
+ printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
+ return 0;
+ }
+
+ if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
+ printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
+ return 0;
+ }
+
+ /* now check for completion of MAC initialization through
+ the FunCtrl field of the IFACE, poll MR1 to detect completion of
+ MAC initialization, check completion status, set interrupt mask,
+ enables interrupts and calls Tx and Rx initialization functions */
+
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), FUNC_CTRL_INIT_COMPLETE);
+
+ for (i = LOOP_RETRY_LIMIT; i; i--) {
+ mr1 = atmel_read16(priv->dev, MR1);
+ mr3 = atmel_read16(priv->dev, MR3);
+
+ if (mr3 & MAC_INIT_COMPLETE)
+ break;
+ if (mr1 & MAC_INIT_COMPLETE &&
+ priv->bus_type == BUS_TYPE_PCCARD)
+ break;
+ }
+
+ if (i == 0) {
+ printk(KERN_ALERT "%s: MAC failed to initialise.\n", priv->dev->name);
+ return 0;
+ }
+
+ /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
+ if ((mr3 & MAC_INIT_COMPLETE) &&
+ !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
+ printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name);
+ return 0;
+ }
+ if ((mr1 & MAC_INIT_COMPLETE) &&
+ !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) {
+ printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name);
+ return 0;
+ }
+
+ atmel_copy_to_host(priv->dev, (unsigned char *)iface,
+ priv->host_info_base, sizeof(*iface));
+
+ iface->tx_buff_pos = le16_to_cpu(iface->tx_buff_pos);
+ iface->tx_buff_size = le16_to_cpu(iface->tx_buff_size);
+ iface->tx_desc_pos = le16_to_cpu(iface->tx_desc_pos);
+ iface->tx_desc_count = le16_to_cpu(iface->tx_desc_count);
+ iface->rx_buff_pos = le16_to_cpu(iface->rx_buff_pos);
+ iface->rx_buff_size = le16_to_cpu(iface->rx_buff_size);
+ iface->rx_desc_pos = le16_to_cpu(iface->rx_desc_pos);
+ iface->rx_desc_count = le16_to_cpu(iface->rx_desc_count);
+ iface->build_version = le16_to_cpu(iface->build_version);
+ iface->command_pos = le16_to_cpu(iface->command_pos);
+ iface->major_version = le16_to_cpu(iface->major_version);
+ iface->minor_version = le16_to_cpu(iface->minor_version);
+ iface->func_ctrl = le16_to_cpu(iface->func_ctrl);
+ iface->mac_status = le16_to_cpu(iface->mac_status);
+
+ return 1;
+}
+
+/* determine type of memory and MAC address */
+static int probe_atmel_card(struct net_device *dev)
+{
+ int rc = 0;
+ struct atmel_private *priv = netdev_priv(dev);
+
+ /* reset pccard */
+ if (priv->bus_type == BUS_TYPE_PCCARD)
+ atmel_write16(dev, GCR, 0x0060);
+
+ atmel_write16(dev, GCR, 0x0040);
+ mdelay(500);
+
+ if (atmel_read16(dev, MR2) == 0) {
+ /* No stored firmware so load a small stub which just
+ tells us the MAC address */
+ int i;
+ priv->card_type = CARD_TYPE_EEPROM;
+ atmel_write16(dev, BSR, BSS_IRAM);
+ atmel_copy_to_card(dev, 0, mac_reader, sizeof(mac_reader));
+ atmel_set_gcr(dev, GCR_REMAP);
+ atmel_clear_gcr(priv->dev, 0x0040);
+ atmel_write16(dev, BSR, BSS_SRAM);
+ for (i = LOOP_RETRY_LIMIT; i; i--)
+ if (atmel_read16(dev, MR3) & MAC_BOOT_COMPLETE)
+ break;
+ if (i == 0) {
+ printk(KERN_ALERT "%s: MAC failed to boot MAC address reader.\n", dev->name);
+ } else {
+ atmel_copy_to_host(dev, dev->dev_addr, atmel_read16(dev, MR2), 6);
+ /* got address, now squash it again until the network
+ interface is opened */
+ if (priv->bus_type == BUS_TYPE_PCCARD)
+ atmel_write16(dev, GCR, 0x0060);
+ atmel_write16(dev, GCR, 0x0040);
+ rc = 1;
+ }
+ } else if (atmel_read16(dev, MR4) == 0) {
+ /* Mac address easy in this case. */
+ priv->card_type = CARD_TYPE_PARALLEL_FLASH;
+ atmel_write16(dev, BSR, 1);
+ atmel_copy_to_host(dev, dev->dev_addr, 0xc000, 6);
+ atmel_write16(dev, BSR, 0x200);
+ rc = 1;
+ } else {
+ /* Standard firmware in flash, boot it up and ask
+ for the Mac Address */
+ priv->card_type = CARD_TYPE_SPI_FLASH;
+ if (atmel_wakeup_firmware(priv)) {
+ atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6);
+
+ /* got address, now squash it again until the network
+ interface is opened */
+ if (priv->bus_type == BUS_TYPE_PCCARD)
+ atmel_write16(dev, GCR, 0x0060);
+ atmel_write16(dev, GCR, 0x0040);
+ rc = 1;
+ }
+ }
+
+ if (rc) {
+ if (dev->dev_addr[0] == 0xFF) {
+ u8 default_mac[] = {0x00,0x04, 0x25, 0x00, 0x00, 0x00};
+ printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
+ memcpy(dev->dev_addr, default_mac, 6);
+ }
+ printk(KERN_INFO "%s: MAC address %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] );
+
+ }
+
+ return rc;
+}
+
+static void build_wep_mib(struct atmel_private *priv)
+/* Move the encyption information on the MIB structure.
+ This routine is for the pre-WPA firmware: later firmware has
+ a different format MIB and a different routine. */
+{
+ struct { /* NB this is matched to the hardware, don't change. */
+ u8 wep_is_on;
+ u8 default_key; /* 0..3 */
+ u8 reserved;
+ u8 exclude_unencrypted;
+
+ u32 WEPICV_error_count;
+ u32 WEP_excluded_count;
+
+ u8 wep_keys[MAX_ENCRYPTION_KEYS][13];
+ u8 encryption_level; /* 0, 1, 2 */
+ u8 reserved2[3];
+ } mib;
+ int i;
+
+ mib.wep_is_on = priv->wep_is_on;
+ if (priv->wep_is_on) {
+ if (priv->wep_key_len[priv->default_key] > 5)
+ mib.encryption_level = 2;
+ else
+ mib.encryption_level = 1;
+ } else {
+ mib.encryption_level = 0;
+ }
+
+ mib.default_key = priv->default_key;
+ mib.exclude_unencrypted = priv->exclude_unencrypted;
+
+ for(i = 0; i < MAX_ENCRYPTION_KEYS; i++)
+ memcpy(mib.wep_keys[i], priv->wep_keys[i], 13);
+
+ atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
+}
+
+static void build_wpa_mib(struct atmel_private *priv)
+{
+ /* This is for the later (WPA enabled) firmware. */
+
+ struct { /* NB this is matched to the hardware, don't change. */
+ u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
+ u8 receiver_address[6];
+ u8 wep_is_on;
+ u8 default_key; /* 0..3 */
+ u8 group_key;
+ u8 exclude_unencrypted;
+ u8 encryption_type;
+ u8 reserved;
+
+ u32 WEPICV_error_count;
+ u32 WEP_excluded_count;
+
+ u8 key_RSC[4][8];
+ } mib;
+
+ int i;
+
+ mib.wep_is_on = priv->wep_is_on;
+ mib.exclude_unencrypted = priv->exclude_unencrypted;
+ memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+
+ /* zero all the keys before adding in valid ones. */
+ memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
+
+ if (priv->wep_is_on) {
+ /* There's a comment in the Atmel code to the effect that this is only valid
+ when still using WEP, it may need to be set to something to use WPA */
+ memset(mib.key_RSC, 0, sizeof(mib.key_RSC));
+
+ mib.default_key = mib.group_key = 255;
+ for (i = 0; i < MAX_ENCRYPTION_KEYS; i++) {
+ if (priv->wep_key_len[i] > 0) {
+ memcpy(mib.cipher_default_key_value[i], priv->wep_keys[i], MAX_ENCRYPTION_KEY_SIZE);
+ if (i == priv->default_key) {
+ mib.default_key = i;
+ mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 7;
+ mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->pairwise_cipher_suite;
+ } else {
+ mib.group_key = i;
+ priv->group_cipher_suite = priv->pairwise_cipher_suite;
+ mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 1;
+ mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->group_cipher_suite;
+ }
+ }
+ }
+ if (mib.default_key == 255)
+ mib.default_key = mib.group_key != 255 ? mib.group_key : 0;
+ if (mib.group_key == 255)
+ mib.group_key = mib.default_key;
+
+ }
+
+ atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
+}
+
+static int reset_atmel_card(struct net_device *dev)
+{
+ /* do everything necessary to wake up the hardware, including
+ waiting for the lightning strike and throwing the knife switch....
+
+ set all the Mib values which matter in the card to match
+ their settings in the atmel_private structure. Some of these
+ can be altered on the fly, but many (WEP, infrastucture or ad-hoc)
+ can only be changed by tearing down the world and coming back through
+ here.
+
+ This routine is also responsible for initialising some
+ hardware-specific fields in the atmel_private structure,
+ including a copy of the firmware's hostinfo stucture
+ which is the route into the rest of the firmare datastructures. */
+
+ struct atmel_private *priv = netdev_priv(dev);
+ u8 configuration;
+
+ /* data to add to the firmware names, in priority order
+ this implemenents firmware versioning */
+
+ static char *firmware_modifier[] = {
+ "-wpa",
+ "",
+ NULL
+ };
+
+ /* reset pccard */
+ if (priv->bus_type == BUS_TYPE_PCCARD)
+ atmel_write16(priv->dev, GCR, 0x0060);
+
+ /* stop card , disable interrupts */
+ atmel_write16(priv->dev, GCR, 0x0040);
+
+ if (priv->card_type == CARD_TYPE_EEPROM) {
+ /* copy in firmware if needed */
+ const struct firmware *fw_entry = NULL;
+ unsigned char *fw;
+ int len = priv->firmware_length;
+ if (!(fw = priv->firmware)) {
+ if (priv->firmware_type == ATMEL_FW_TYPE_NONE) {
+ if (strlen(priv->firmware_id) == 0) {
+ printk(KERN_INFO
+ "%s: card type is unknown: assuming at76c502 firmware is OK.\n",
+ dev->name);
+ printk(KERN_INFO
+ "%s: if not, use the firmware= module parameter.\n",
+ dev->name);
+ strcpy(priv->firmware_id, "atmel_at76c502.bin");
+ }
+ if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) {
+ printk(KERN_ALERT
+ "%s: firmware %s is missing, cannot continue.\n",
+ dev->name, priv->firmware_id);
+ return 0;
+ }
+ } else {
+ int fw_index = 0;
+ int success = 0;
+
+ /* get firmware filename entry based on firmware type ID */
+ while (fw_table[fw_index].fw_type != priv->firmware_type
+ && fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE)
+ fw_index++;
+
+ /* construct the actual firmware file name */
+ if (fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) {
+ int i;
+ for (i = 0; firmware_modifier[i]; i++) {
+ snprintf(priv->firmware_id, 32, "%s%s.%s", fw_table[fw_index].fw_file,
+ firmware_modifier[i], fw_table[fw_index].fw_file_ext);
+ priv->firmware_id[31] = '\0';
+ if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) == 0) {
+ success = 1;
+ break;
+ }
+ }
+ }
+ if (!success) {
+ printk(KERN_ALERT
+ "%s: firmware %s is missing, cannot start.\n",
+ dev->name, priv->firmware_id);
+ priv->firmware_id[0] = '\0';
+ return 0;
+ }
+ }
+
+ fw = fw_entry->data;
+ len = fw_entry->size;
+ }
+
+ if (len <= 0x6000) {
+ atmel_write16(priv->dev, BSR, BSS_IRAM);
+ atmel_copy_to_card(priv->dev, 0, fw, len);
+ atmel_set_gcr(priv->dev, GCR_REMAP);
+ } else {
+ /* Remap */
+ atmel_set_gcr(priv->dev, GCR_REMAP);
+ atmel_write16(priv->dev, BSR, BSS_IRAM);
+ atmel_copy_to_card(priv->dev, 0, fw, 0x6000);
+ atmel_write16(priv->dev, BSR, 0x2ff);
+ atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000);
+ }
+
+ if (fw_entry)
+ release_firmware(fw_entry);
+ }
+
+ if (!atmel_wakeup_firmware(priv))
+ return 0;
+
+ /* Check the version and set the correct flag for wpa stuff,
+ old and new firmware is incompatible.
+ The pre-wpa 3com firmware reports major version 5,
+ the wpa 3com firmware is major version 4 and doesn't need
+ the 3com broken-ness filter. */
+ priv->use_wpa = (priv->host_info.major_version == 4);
+ priv->radio_on_broken = (priv->host_info.major_version == 5);
+
+ /* unmask all irq sources */
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_MASK_OFFSET), 0xff);
+
+ /* int Tx system and enable Tx */
+ atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, 0), 0);
+ atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, 0), 0x80000000L);
+ atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, 0), 0);
+ atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, 0), 0);
+
+ priv->tx_desc_free = priv->host_info.tx_desc_count;
+ priv->tx_desc_head = 0;
+ priv->tx_desc_tail = 0;
+ priv->tx_desc_previous = 0;
+ priv->tx_free_mem = priv->host_info.tx_buff_size;
+ priv->tx_buff_head = 0;
+ priv->tx_buff_tail = 0;
+
+ configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
+ configuration | FUNC_CTRL_TxENABLE);
+
+ /* init Rx system and enable */
+ priv->rx_desc_head = 0;
+
+ configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
+ configuration | FUNC_CTRL_RxENABLE);
+
+ if (!priv->radio_on_broken) {
+ if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
+ CMD_STATUS_REJECTED_RADIO_OFF) {
+ printk(KERN_INFO
+ "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n",
+ dev->name);
+ return 0;
+ }
+ }
+
+ /* set up enough MIB values to run. */
+ atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_AUTO_TX_RATE_POS, priv->auto_tx_rate);
+ atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_TX_PROMISCUOUS_POS, PROM_MODE_OFF);
+ atmel_set_mib16(priv, Mac_Mib_Type, MAC_MIB_RTS_THRESHOLD_POS, priv->rts_threshold);
+ atmel_set_mib16(priv, Mac_Mib_Type, MAC_MIB_FRAG_THRESHOLD_POS, priv->frag_threshold);
+ atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_SHORT_RETRY_POS, priv->short_retry);
+ atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_LONG_RETRY_POS, priv->long_retry);
+ atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, priv->preamble);
+ atmel_set_mib(priv, Mac_Address_Mib_Type, MAC_ADDR_MIB_MAC_ADDR_POS,
+ priv->dev->dev_addr, 6);
+ atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
+ atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
+ atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_BEACON_PER_POS, priv->default_beacon_period);
+ atmel_set_mib(priv, Phy_Mib_Type, PHY_MIB_RATE_SET_POS, atmel_basic_rates, 4);
+ atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_PRIVACY_POS, priv->wep_is_on);
+ if (priv->use_wpa)
+ build_wpa_mib(priv);
+ else
+ build_wep_mib(priv);
+
+ return 1;
+}
+
+static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size)
+{
+ if (cmd)
+ atmel_copy_to_card(priv->dev, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET),
+ cmd, cmd_size);
+
+ atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET), command);
+ atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET), 0);
+}
+
+static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size)
+{
+ int i, status;
+
+ atmel_send_command(priv, command, cmd, cmd_size);
+
+ for (i = 5000; i; i--) {
+ status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
+ if (status != CMD_STATUS_IDLE &&
+ status != CMD_STATUS_IN_PROGRESS)
+ break;
+ udelay(20);
+ }
+
+ if (i == 0) {
+ printk(KERN_ALERT "%s: failed to contact MAC.\n", priv->dev->name);
+ status = CMD_STATUS_HOST_ERROR;
+ } else {
+ if (command != CMD_EnableRadio)
+ status = CMD_STATUS_COMPLETE;
+ }
+
+ return status;
+}
+
+static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index)
+{
+ struct get_set_mib m;
+ m.type = type;
+ m.size = 1;
+ m.index = index;
+
+ atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
+ return atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE));
+}
+
+static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 data)
+{
+ struct get_set_mib m;
+ m.type = type;
+ m.size = 1;
+ m.index = index;
+ m.data[0] = data;
+
+ atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
+}
+
+static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 data)
+{
+ struct get_set_mib m;
+ m.type = type;
+ m.size = 2;
+ m.index = index;
+ m.data[0] = data;
+ m.data[1] = data >> 8;
+
+ atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 2);
+}
+
+static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len)
+{
+ struct get_set_mib m;
+ m.type = type;
+ m.size = data_len;
+ m.index = index;
+
+ if (data_len > MIB_MAX_DATA_BYTES)
+ printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
+
+ memcpy(m.data, data, data_len);
+ atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
+}
+
+static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len)
+{
+ struct get_set_mib m;
+ m.type = type;
+ m.size = data_len;
+ m.index = index;
+
+ if (data_len > MIB_MAX_DATA_BYTES)
+ printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
+
+ atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
+ atmel_copy_to_host(priv->dev, data,
+ atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE), data_len);
+}
+
+static void atmel_writeAR(struct net_device *dev, u16 data)
+{
+ int i;
+ outw(data, dev->base_addr + AR);
+ /* Address register appears to need some convincing..... */
+ for (i = 0; data != inw(dev->base_addr + AR) && i<10; i++)
+ outw(data, dev->base_addr + AR);
+}
+
+static void atmel_copy_to_card(struct net_device *dev, u16 dest, unsigned char *src, u16 len)
+{
+ int i;
+ atmel_writeAR(dev, dest);
+ if (dest % 2) {
+ atmel_write8(dev, DR, *src);
+ src++; len--;
+ }
+ for (i = len; i > 1 ; i -= 2) {
+ u8 lb = *src++;
+ u8 hb = *src++;
+ atmel_write16(dev, DR, lb | (hb << 8));
+ }
+ if (i)
+ atmel_write8(dev, DR, *src);
+}
+
+static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, u16 src, u16 len)
+{
+ int i;
+ atmel_writeAR(dev, src);
+ if (src % 2) {
+ *dest = atmel_read8(dev, DR);
+ dest++; len--;
+ }
+ for (i = len; i > 1 ; i -= 2) {
+ u16 hw = atmel_read16(dev, DR);
+ *dest++ = hw;
+ *dest++ = hw >> 8;
+ }
+ if (i)
+ *dest = atmel_read8(dev, DR);
+}
+
+static void atmel_set_gcr(struct net_device *dev, u16 mask)
+{
+ outw(inw(dev->base_addr + GCR) | mask, dev->base_addr + GCR);
+}
+
+static void atmel_clear_gcr(struct net_device *dev, u16 mask)
+{
+ outw(inw(dev->base_addr + GCR) & ~mask, dev->base_addr + GCR);
+}
+
+static int atmel_lock_mac(struct atmel_private *priv)
+{
+ int i, j = 20;
+ retry:
+ for (i = 5000; i; i--) {
+ if (!atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET)))
+ break;
+ udelay(20);
+ }
+
+ if (!i) return 0; /* timed out */
+
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 1);
+ if (atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET))) {
+ atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
+ if (!j--) return 0; /* timed out */
+ goto retry;
+ }
+
+ return 1;
+}
+
+static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
+{
+ atmel_writeAR(priv->dev, pos);
+ atmel_write16(priv->dev, DR, data); /* card is little-endian */
+ atmel_write16(priv->dev, DR, data >> 16);
+}
+
+/***************************************************************************/
+/* There follows the source form of the MAC address reading firmware */
+/***************************************************************************/
+#if 0
+
+/* Copyright 2003 Matthew T. Russotto */
+/* But derived from the Atmel 76C502 firmware written by Atmel and */
+/* included in "atmel wireless lan drivers" package */
+/**
+ This file is part of net.russotto.AtmelMACFW, hereto referred to
+ as AtmelMACFW
+
+ AtmelMACFW is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2
+ as published by the Free Software Foundation.
+
+ AtmelMACFW is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with AtmelMACFW; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+****************************************************************************/
+/* This firmware should work on the 76C502 RFMD, RFMD_D, and RFMD_E */
+/* It will probably work on the 76C504 and 76C502 RFMD_3COM */
+/* It only works on SPI EEPROM versions of the card. */
+
+/* This firmware initializes the SPI controller and clock, reads the MAC */
+/* address from the EEPROM into SRAM, and puts the SRAM offset of the MAC */
+/* address in MR2, and sets MR3 to 0x10 to indicate it is done */
+/* It also puts a complete copy of the EEPROM in SRAM with the offset in */
+/* MR4, for investigational purposes (maybe we can determine chip type */
+/* from that?) */
+
+ .org 0
+ .set MRBASE, 0x8000000
+ .set CPSR_INITIAL, 0xD3 /* IRQ/FIQ disabled, ARM mode, Supervisor state */
+ .set CPSR_USER, 0xD1 /* IRQ/FIQ disabled, ARM mode, USER state */
+ .set SRAM_BASE, 0x02000000
+ .set SP_BASE, 0x0F300000
+ .set UNK_BASE, 0x0F000000 /* Some internal device, but which one? */
+ .set SPI_CGEN_BASE, 0x0E000000 /* Some internal device, but which one? */
+ .set UNK3_BASE, 0x02014000 /* Some internal device, but which one? */
+ .set STACK_BASE, 0x5600
+ .set SP_SR, 0x10
+ .set SP_TDRE, 2 /* status register bit -- TDR empty */
+ .set SP_RDRF, 1 /* status register bit -- RDR full */
+ .set SP_SWRST, 0x80
+ .set SP_SPIEN, 0x1
+ .set SP_CR, 0 /* control register */
+ .set SP_MR, 4 /* mode register */
+ .set SP_RDR, 0x08 /* Read Data Register */
+ .set SP_TDR, 0x0C /* Transmit Data Register */
+ .set SP_CSR0, 0x30 /* chip select registers */
+ .set SP_CSR1, 0x34
+ .set SP_CSR2, 0x38
+ .set SP_CSR3, 0x3C
+ .set NVRAM_CMD_RDSR, 5 /* read status register */
+ .set NVRAM_CMD_READ, 3 /* read data */
+ .set NVRAM_SR_RDY, 1 /* RDY bit. This bit is inverted */
+ .set SPI_8CLOCKS, 0xFF /* Writing this to the TDR doesn't do anything to the
+ serial output, since SO is normally high. But it
+ does cause 8 clock cycles and thus 8 bits to be
+ clocked in to the chip. See Atmel's SPI
+ controller (e.g. AT91M55800) timing and 4K
+ SPI EEPROM manuals */
+
+ .set NVRAM_SCRATCH, 0x02000100 /* arbitrary area for scratchpad memory */
+ .set NVRAM_IMAGE, 0x02000200
+ .set NVRAM_LENGTH, 0x0200
+ .set MAC_ADDRESS_MIB, SRAM_BASE
+ .set MAC_ADDRESS_LENGTH, 6
+ .set MAC_BOOT_FLAG, 0x10
+ .set MR1, 0
+ .set MR2, 4
+ .set MR3, 8
+ .set MR4, 0xC
+RESET_VECTOR:
+ b RESET_HANDLER
+UNDEF_VECTOR:
+ b HALT1
+SWI_VECTOR:
+ b HALT1
+IABORT_VECTOR:
+ b HALT1
+DABORT_VECTOR:
+RESERVED_VECTOR:
+ b HALT1
+IRQ_VECTOR:
+ b HALT1
+FIQ_VECTOR:
+ b HALT1
+HALT1: b HALT1
+RESET_HANDLER:
+ mov r0, #CPSR_INITIAL
+ msr CPSR_c, r0 /* This is probably unnecessary */
+
+/* I'm guessing this is initializing clock generator electronics for SPI */
+ ldr r0, =SPI_CGEN_BASE
+ mov r1, #0
+ mov r1, r1, lsl #3
+ orr r1,r1, #0
+ str r1, [r0]
+ ldr r1, [r0, #28]
+ bic r1, r1, #16
+ str r1, [r0, #28]
+ mov r1, #1
+ str r1, [r0, #8]
+
+ ldr r0, =MRBASE
+ mov r1, #0
+ strh r1, [r0, #MR1]
+ strh r1, [r0, #MR2]
+ strh r1, [r0, #MR3]
+ strh r1, [r0, #MR4]
+
+ mov sp, #STACK_BASE
+ bl SP_INIT
+ mov r0, #10
+ bl DELAY9
+ bl GET_MAC_ADDR
+ bl GET_WHOLE_NVRAM
+ ldr r0, =MRBASE
+ ldr r1, =MAC_ADDRESS_MIB
+ strh r1, [r0, #MR2]
+ ldr r1, =NVRAM_IMAGE
+ strh r1, [r0, #MR4]
+ mov r1, #MAC_BOOT_FLAG
+ strh r1, [r0, #MR3]
+HALT2: b HALT2
+.func Get_Whole_NVRAM, GET_WHOLE_NVRAM
+GET_WHOLE_NVRAM:
+ stmdb sp!, {lr}
+ mov r2, #0 /* 0th bytes of NVRAM */
+ mov r3, #NVRAM_LENGTH
+ mov r1, #0 /* not used in routine */
+ ldr r0, =NVRAM_IMAGE
+ bl NVRAM_XFER
+ ldmia sp!, {lr}
+ bx lr
+.endfunc
+
+.func Get_MAC_Addr, GET_MAC_ADDR
+GET_MAC_ADDR:
+ stmdb sp!, {lr}
+ mov r2, #0x120 /* address of MAC Address within NVRAM */
+ mov r3, #MAC_ADDRESS_LENGTH
+ mov r1, #0 /* not used in routine */
+ ldr r0, =MAC_ADDRESS_MIB
+ bl NVRAM_XFER
+ ldmia sp!, {lr}
+ bx lr
+.endfunc
+.ltorg
+.func Delay9, DELAY9
+DELAY9:
+ adds r0, r0, r0, LSL #3 /* r0 = r0 * 9 */
+DELAYLOOP:
+ beq DELAY9_done
+ subs r0, r0, #1
+ b DELAYLOOP
+DELAY9_done:
+ bx lr
+.endfunc
+
+.func SP_Init, SP_INIT
+SP_INIT:
+ mov r1, #SP_SWRST
+ ldr r0, =SP_BASE
+ str r1, [r0, #SP_CR] /* reset the SPI */
+ mov r1, #0
+ str r1, [r0, #SP_CR] /* release SPI from reset state */
+ mov r1, #SP_SPIEN
+ str r1, [r0, #SP_MR] /* set the SPI to MASTER mode*/
+ str r1, [r0, #SP_CR] /* enable the SPI */
+
+/* My guess would be this turns on the SPI clock */
+ ldr r3, =SPI_CGEN_BASE
+ ldr r1, [r3, #28]
+ orr r1, r1, #0x2000
+ str r1, [r3, #28]
+
+ ldr r1, =0x2000c01
+ str r1, [r0, #SP_CSR0]
+ ldr r1, =0x2000201
+ str r1, [r0, #SP_CSR1]
+ str r1, [r0, #SP_CSR2]
+ str r1, [r0, #SP_CSR3]
+ ldr r1, [r0, #SP_SR]
+ ldr r0, [r0, #SP_RDR]
+ bx lr
+.endfunc
+.func NVRAM_Init, NVRAM_INIT
+NVRAM_INIT:
+ ldr r1, =SP_BASE
+ ldr r0, [r1, #SP_RDR]
+ mov r0, #NVRAM_CMD_RDSR
+ str r0, [r1, #SP_TDR]
+SP_loop1:
+ ldr r0, [r1, #SP_SR]
+ tst r0, #SP_TDRE
+ beq SP_loop1
+
+ mov r0, #SPI_8CLOCKS
+ str r0, [r1, #SP_TDR]
+SP_loop2:
+ ldr r0, [r1, #SP_SR]
+ tst r0, #SP_TDRE
+ beq SP_loop2
+
+ ldr r0, [r1, #SP_RDR]
+SP_loop3:
+ ldr r0, [r1, #SP_SR]
+ tst r0, #SP_RDRF
+ beq SP_loop3
+
+ ldr r0, [r1, #SP_RDR]
+ and r0, r0, #255
+ bx lr
+.endfunc
+
+.func NVRAM_Xfer, NVRAM_XFER
+ /* r0 = dest address */
+ /* r1 = not used */
+ /* r2 = src address within NVRAM */
+ /* r3 = length */
+NVRAM_XFER:
+ stmdb sp!, {r4, r5, lr}
+ mov r5, r0 /* save r0 (dest address) */
+ mov r4, r3 /* save r3 (length) */
+ mov r0, r2, LSR #5 /* SPI memories put A8 in the command field */
+ and r0, r0, #8
+ add r0, r0, #NVRAM_CMD_READ
+ ldr r1, =NVRAM_SCRATCH
+ strb r0, [r1, #0] /* save command in NVRAM_SCRATCH[0] */
+ strb r2, [r1, #1] /* save low byte of source address in NVRAM_SCRATCH[1] */
+_local1:
+ bl NVRAM_INIT
+ tst r0, #NVRAM_SR_RDY
+ bne _local1
+ mov r0, #20
+ bl DELAY9
+ mov r2, r4 /* length */
+ mov r1, r5 /* dest address */
+ mov r0, #2 /* bytes to transfer in command */
+ bl NVRAM_XFER2
+ ldmia sp!, {r4, r5, lr}
+ bx lr
+.endfunc
+
+.func NVRAM_Xfer2, NVRAM_XFER2
+NVRAM_XFER2:
+ stmdb sp!, {r4, r5, r6, lr}
+ ldr r4, =SP_BASE
+ mov r3, #0
+ cmp r0, #0
+ bls _local2
+ ldr r5, =NVRAM_SCRATCH
+_local4:
+ ldrb r6, [r5, r3]
+ str r6, [r4, #SP_TDR]
+_local3:
+ ldr r6, [r4, #SP_SR]
+ tst r6, #SP_TDRE
+ beq _local3
+ add r3, r3, #1
+ cmp r3, r0 /* r0 is # of bytes to send out (command+addr) */
+ blo _local4
+_local2:
+ mov r3, #SPI_8CLOCKS
+ str r3, [r4, #SP_TDR]
+ ldr r0, [r4, #SP_RDR]
+_local5:
+ ldr r0, [r4, #SP_SR]
+ tst r0, #SP_RDRF
+ beq _local5
+ ldr r0, [r4, #SP_RDR] /* what's this byte? It's the byte read while writing the TDR -- nonsense, because the NVRAM doesn't read and write at the same time */
+ mov r0, #0
+ cmp r2, #0 /* r2 is # of bytes to copy in */
+ bls _local6
+_local7:
+ ldr r5, [r4, #SP_SR]
+ tst r5, #SP_TDRE
+ beq _local7
+ str r3, [r4, #SP_TDR] /* r3 has SPI_8CLOCKS */
+_local8:
+ ldr r5, [r4, #SP_SR]
+ tst r5, #SP_RDRF
+ beq _local8
+ ldr r5, [r4, #SP_RDR] /* but didn't we read this byte above? */
+ strb r5, [r1], #1 /* postindexed */
+ add r0, r0, #1
+ cmp r0, r2
+ blo _local7 /* since we don't send another address, the NVRAM must be capable of sequential reads */
+_local6:
+ mov r0, #200
+ bl DELAY9
+ ldmia sp!, {r4, r5, r6, lr}
+ bx lr
+#endif
diff --git a/drivers/net/wireless/atmel.h b/drivers/net/wireless/atmel.h
new file mode 100644
index 000000000000..825000edfc2c
--- /dev/null
+++ b/drivers/net/wireless/atmel.h
@@ -0,0 +1,43 @@
+/*** -*- linux-c -*- **********************************************************
+
+ Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
+
+ Copyright 2005 Dan Williams and Red Hat, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This software is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Atmel wireless lan drivers; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+******************************************************************************/
+
+#ifndef _ATMEL_H
+#define _ATMEL_H
+
+typedef enum {
+ ATMEL_FW_TYPE_NONE = 0,
+ ATMEL_FW_TYPE_502,
+ ATMEL_FW_TYPE_502D,
+ ATMEL_FW_TYPE_502E,
+ ATMEL_FW_TYPE_502_3COM,
+ ATMEL_FW_TYPE_504,
+ ATMEL_FW_TYPE_504_2958,
+ ATMEL_FW_TYPE_504A_2958,
+ ATMEL_FW_TYPE_506
+} AtmelFWType;
+
+struct net_device *init_atmel_card(unsigned short, int, const AtmelFWType, struct device *,
+ int (*present_func)(void *), void * );
+void stop_atmel_card( struct net_device *, int );
+int atmel_open( struct net_device * );
+
+#endif
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
new file mode 100644
index 000000000000..a91b507e0a7a
--- /dev/null
+++ b/drivers/net/wireless/atmel_cs.c
@@ -0,0 +1,708 @@
+/*** -*- linux-c -*- **********************************************************
+
+ Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
+
+ Copyright 2000-2001 ATMEL Corporation.
+ Copyright 2003 Simon Kelley.
+
+ This code was developed from version 2.1.1 of the Atmel drivers,
+ released by Atmel corp. under the GPL in December 2002. It also
+ includes code from the Linux aironet drivers (C) Benjamin Reed,
+ and the Linux PCMCIA package, (C) David Hinds.
+
+ For all queries about this code, please contact the current author,
+ Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This software is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Atmel wireless lan drivers; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+******************************************************************************/
+
+#include <linux/config.h>
+#ifdef __IN_PCMCIA_PACKAGE__
+#include <pcmcia/k_compat.h>
+#endif
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/ciscode.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/wireless.h>
+
+#include "atmel.h"
+
+/*
+ All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ you do not define PCMCIA_DEBUG at all, all the debug code will be
+ left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ be present but disabled -- but it can then be enabled for specific
+ modules at load time with a 'pc_debug=#' option to insmod.
+*/
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+static char *version = "$Revision: 1.2 $";
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+MODULE_AUTHOR("Simon Kelley");
+MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards");
+
+/*====================================================================*/
+
+/*
+ The event() function is this driver's Card Services event handler.
+ It will be called by Card Services when an appropriate card status
+ event is received. The config() and release() entry points are
+ used to configure or release a socket, in response to card
+ insertion and ejection events. They are invoked from the atmel_cs
+ event handler.
+*/
+
+static void atmel_config(dev_link_t *link);
+static void atmel_release(dev_link_t *link);
+static int atmel_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+/*
+ The attach() and detach() entry points are used to create and destroy
+ "instances" of the driver, where each instance represents everything
+ needed to manage one actual PCMCIA card.
+*/
+
+static dev_link_t *atmel_attach(void);
+static void atmel_detach(dev_link_t *);
+
+/*
+ You'll also need to prototype all the functions that will actually
+ be used to talk to your device. See 'pcmem_cs' for a good example
+ of a fully self-sufficient driver; the other drivers rely more or
+ less on other parts of the kernel.
+*/
+
+/*
+ The dev_info variable is the "key" that is used to match up this
+ device driver with appropriate cards, through the card configuration
+ database.
+*/
+
+static dev_info_t dev_info = "atmel_cs";
+
+/*
+ A linked list of "instances" of the atmelnet device. Each actual
+ PCMCIA card corresponds to one device instance, and is described
+ by one dev_link_t structure (defined in ds.h).
+
+ You may not want to use a linked list for this -- for example, the
+ memory card driver uses an array of dev_link_t pointers, where minor
+ device numbers are used to derive the corresponding array index.
+*/
+
+static dev_link_t *dev_list = NULL;
+
+/*
+ A dev_link_t structure has fields for most things that are needed
+ to keep track of a socket, but there will usually be some device
+ specific information that also needs to be kept track of. The
+ 'priv' pointer in a dev_link_t structure can be used to point to
+ a device-specific private data structure, like this.
+
+ A driver needs to provide a dev_node_t structure for each device
+ on a card. In some cases, there is only one device per card (for
+ example, ethernet cards, modems). In other cases, there may be
+ many actual or logical devices (SCSI adapters, memory cards with
+ multiple partitions). The dev_node_t structures need to be kept
+ in a linked list starting at the 'dev' field of a dev_link_t
+ structure. We allocate them in the card's private data structure,
+ because they generally shouldn't be allocated dynamically.
+
+ In this case, we also provide a flag to indicate if a device is
+ "stopped" due to a power management event, or card ejection. The
+ device IO routines can use a flag like this to throttle IO to a
+ card that is not ready to accept it.
+*/
+
+typedef struct local_info_t {
+ dev_node_t node;
+ struct net_device *eth_dev;
+} local_info_t;
+
+/*======================================================================
+
+ atmel_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+ The dev_link structure is initialized, but we don't actually
+ configure the card at this point -- we wait until we receive a
+ card insertion event.
+
+ ======================================================================*/
+
+static dev_link_t *atmel_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ local_info_t *local;
+ int ret;
+
+ DEBUG(0, "atmel_attach()\n");
+
+ /* Initialize the dev_link_t structure */
+ link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
+ if (!link) {
+ printk(KERN_ERR "atmel_cs: no memory for new device\n");
+ return NULL;
+ }
+ memset(link, 0, sizeof(struct dev_link_t));
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = NULL;
+
+ /*
+ General socket configuration defaults can go here. In this
+ client, we assume very little, and rely on the CIS for almost
+ everything. In most clients, many details (i.e., number, sizes,
+ and attributes of IO windows) are fixed by the nature of the
+ device, and can be hard-wired here.
+ */
+ link->conf.Attributes = 0;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* Allocate space for private device-specific data */
+ local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
+ if (!local) {
+ printk(KERN_ERR "atmel_cs: no memory for new device\n");
+ kfree (link);
+ return NULL;
+ }
+ memset(local, 0, sizeof(local_info_t));
+ link->priv = local;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &atmel_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ atmel_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* atmel_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+ ======================================================================*/
+
+static void atmel_detach(dev_link_t *link)
+{
+ dev_link_t **linkp;
+
+ DEBUG(0, "atmel_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ if (link->state & DEV_CONFIG)
+ atmel_release(link);
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ if (link->priv)
+ kfree(link->priv);
+ kfree(link);
+}
+
+/*======================================================================
+
+ atmel_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ device available to the system.
+
+ ======================================================================*/
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+/* Call-back function to interrogate PCMCIA-specific information
+ about the current existance of the card */
+static int card_present(void *arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+ if (link->state & DEV_SUSPEND)
+ return 0;
+ else if (link->state & DEV_PRESENT)
+ return 1;
+
+ return 0;
+}
+
+/* list of cards we know about and their firmware requirements.
+ Go either by Manfid or version strings.
+ Cards not in this list will need a firmware parameter to the module
+ in all probability. Note that the SMC 2632 V2 and V3 have the same
+ manfids, so we ignore those and use the version1 strings. */
+
+static struct {
+ int manf, card;
+ char *ver1;
+ AtmelFWType firmware;
+ char *name;
+} card_table[] = {
+ { 0, 0, "WLAN/802.11b PC CARD", ATMEL_FW_TYPE_502D, "Actiontec 802CAT1" },
+ { 0, 0, "ATMEL/AT76C502AR", ATMEL_FW_TYPE_502, "NoName-RFMD" },
+ { 0, 0, "ATMEL/AT76C502AR_D", ATMEL_FW_TYPE_502D, "NoName-revD" },
+ { 0, 0, "ATMEL/AT76C502AR_E", ATMEL_FW_TYPE_502E, "NoName-revE" },
+ { 0, 0, "ATMEL/AT76C504", ATMEL_FW_TYPE_504, "NoName-504" },
+ { 0, 0, "ATMEL/AT76C504A", ATMEL_FW_TYPE_504A_2958, "NoName-504a-2958" },
+ { 0, 0, "ATMEL/AT76C504_R", ATMEL_FW_TYPE_504_2958, "NoName-504-2958" },
+ { MANFID_3COM, 0x0620, NULL, ATMEL_FW_TYPE_502_3COM, "3com 3CRWE62092B" },
+ { MANFID_3COM, 0x0696, NULL, ATMEL_FW_TYPE_502_3COM, "3com 3CRSHPW196" },
+ { 0, 0, "SMC/2632W-V2", ATMEL_FW_TYPE_502, "SMC 2632W-V2" },
+ { 0, 0, "SMC/2632W", ATMEL_FW_TYPE_502D, "SMC 2632W-V3" },
+ { 0xd601, 0x0007, NULL, ATMEL_FW_TYPE_502, "Sitecom WLAN-011" },
+ { 0x01bf, 0x3302, NULL, ATMEL_FW_TYPE_502E, "Belkin F5D6020-V2" },
+ { 0, 0, "BT/Voyager 1020 Laptop Adapter", ATMEL_FW_TYPE_502, "BT Voyager 1020" },
+ { 0, 0, "IEEE 802.11b/Wireless LAN PC Card", ATMEL_FW_TYPE_502, "Siemens Gigaset PC Card II" },
+ { 0, 0, "CNet/CNWLC 11Mbps Wireless PC Card V-5", ATMEL_FW_TYPE_502E, "CNet CNWLC-811ARL" },
+ { 0, 0, "Wireless/PC_CARD", ATMEL_FW_TYPE_502D, "Planet WL-3552" },
+ { 0, 0, "OEM/11Mbps Wireless LAN PC Card V-3", ATMEL_FW_TYPE_502, "OEM 11Mbps WLAN PCMCIA Card" },
+ { 0, 0, "11WAVE/11WP611AL-E", ATMEL_FW_TYPE_502E, "11WAVE WaveBuddy" },
+ { 0, 0, "LG/LW2100N", ATMEL_FW_TYPE_502E, "LG LW2100N 11Mbps WLAN PCMCIA Card" },
+};
+
+static void atmel_config(dev_link_t *link)
+{
+ client_handle_t handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ local_info_t *dev;
+ int last_fn, last_ret;
+ u_char buf[64];
+ int card_index = -1, done = 0;
+
+ handle = link->handle;
+ dev = link->priv;
+
+ DEBUG(0, "atmel_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if (pcmcia_get_first_tuple(handle, &tuple) == 0) {
+ int i;
+ cistpl_manfid_t *manfid;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ manfid = &(parse.manfid);
+ for (i = 0; i < sizeof(card_table)/sizeof(card_table[0]); i++) {
+ if (!card_table[i].ver1 &&
+ manfid->manf == card_table[i].manf &&
+ manfid->card == card_table[i].card) {
+ card_index = i;
+ done = 1;
+ }
+ }
+ }
+
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (!done && (pcmcia_get_first_tuple(handle, &tuple) == 0)) {
+ int i, j, k;
+ cistpl_vers_1_t *ver1;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ ver1 = &(parse.version_1);
+
+ for (i = 0; i < sizeof(card_table)/sizeof(card_table[0]); i++) {
+ for (j = 0; j < ver1->ns; j++) {
+ char *p = card_table[i].ver1;
+ char *q = &ver1->str[ver1->ofs[j]];
+ if (!p)
+ goto mismatch;
+ for (k = 0; k < j; k++) {
+ while ((*p != '\0') && (*p != '/')) p++;
+ if (*p == '\0') {
+ if (*q != '\0')
+ goto mismatch;
+ } else {
+ p++;
+ }
+ }
+ while((*q != '\0') && (*p != '\0') &&
+ (*p != '/') && (*p == *q)) p++, q++;
+ if (((*p != '\0') && *p != '/') || *q != '\0')
+ goto mismatch;
+ }
+ card_index = i;
+ break; /* done */
+
+ mismatch:
+ j = 0; /* dummy stmt to shut up compiler */
+ }
+ }
+
+ /*
+ This reads the card's CONFIG tuple to find its configuration
+ registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /*
+ In this loop, we scan the CIS for configuration table entries,
+ each of which describes a valid card configuration, including
+ voltage, IO window, memory window, and interrupt settings.
+
+ We make no assumptions about the card to be configured: we use
+ just the information available in the CIS. In an ideal world,
+ this would work for any PCMCIA card, but it requires a complete
+ and accurate CIS. In practice, a driver usually "knows" most of
+ these things without consulting the CIS, and most client drivers
+ will only use the CIS to fill in implementation-defined details.
+ */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ while (1) {
+ cistpl_cftable_entry_t dflt = { 0 };
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
+ pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ goto next_entry;
+
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
+ if (cfg->index == 0) goto next_entry;
+ link->conf.ConfigIndex = cfg->index;
+
+ /* Does this card need audio output? */
+ if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ /* Use power settings for Vcc and Vpp if present */
+ /* Note that the CIS values need to be rescaled */
+ if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vcc = cfg->vcc.param[CISTPL_POWER_VNOM]/10000;
+ else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vcc = dflt.vcc.param[CISTPL_POWER_VNOM]/10000;
+
+ if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
+ else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
+
+ /* Do we need to allocate an interrupt? */
+ if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
+ link->conf.Attributes |= CONF_ENABLE_IRQ;
+
+ /* IO window settings */
+ link->io.NumPorts1 = link->io.NumPorts2 = 0;
+ if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
+ cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (!(io->flags & CISTPL_IO_8BIT))
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ if (!(io->flags & CISTPL_IO_16BIT))
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.BasePort1 = io->win[0].base;
+ link->io.NumPorts1 = io->win[0].len;
+ if (io->nwin > 1) {
+ link->io.Attributes2 = link->io.Attributes1;
+ link->io.BasePort2 = io->win[1].base;
+ link->io.NumPorts2 = io->win[1].len;
+ }
+ }
+
+ /* This reserves IO space but doesn't actually enable it */
+ if (pcmcia_request_io(link->handle, &link->io) != 0)
+ goto next_entry;
+
+ /* If we got this far, we're cool! */
+ break;
+
+ next_entry:
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ }
+
+ /*
+ Allocate an interrupt line. Note that this does not assign a
+ handler to the interrupt, unless the 'Handler' member of the
+ irq structure is initialized.
+ */
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+
+ /*
+ This actually configures the PCMCIA socket -- setting up
+ the I/O windows and the interrupt mapping, and putting the
+ card and host interface into "Memory and IO" mode.
+ */
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+
+ if (link->irq.AssignedIRQ == 0) {
+ printk(KERN_ALERT
+ "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
+ goto cs_failed;
+ }
+
+ ((local_info_t*)link->priv)->eth_dev =
+ init_atmel_card(link->irq.AssignedIRQ,
+ link->io.BasePort1,
+ card_index == -1 ? ATMEL_FW_TYPE_NONE : card_table[card_index].firmware,
+ &handle_to_dev(handle),
+ card_present,
+ link);
+ if (!((local_info_t*)link->priv)->eth_dev)
+ goto cs_failed;
+
+ /*
+ At this point, the dev_node_t structure(s) need to be
+ initialized and arranged in a linked list at link->dev.
+ */
+ strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
+ dev->node.major = dev->node.minor = 0;
+ link->dev = &dev->node;
+
+ /* Finally, report what we've done */
+ printk(KERN_INFO "%s: %s%sindex 0x%02x: Vcc %d.%d",
+ dev->node.dev_name,
+ card_index == -1 ? "" : card_table[card_index].name,
+ card_index == -1 ? "" : " ",
+ link->conf.ConfigIndex,
+ link->conf.Vcc/10, link->conf.Vcc%10);
+ if (link->conf.Vpp1)
+ printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ printk(", irq %d", link->irq.AssignedIRQ);
+ if (link->io.NumPorts1)
+ printk(", io 0x%04x-0x%04x", link->io.BasePort1,
+ link->io.BasePort1+link->io.NumPorts1-1);
+ if (link->io.NumPorts2)
+ printk(" & 0x%04x-0x%04x", link->io.BasePort2,
+ link->io.BasePort2+link->io.NumPorts2-1);
+ printk("\n");
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+ cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+ atmel_release(link);
+}
+
+/*======================================================================
+
+ After a card is removed, atmel_release() will unregister the
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+ ======================================================================*/
+
+static void atmel_release(dev_link_t *link)
+{
+ struct net_device *dev = ((local_info_t*)link->priv)->eth_dev;
+
+ DEBUG(0, "atmel_release(0x%p)\n", link);
+
+ /* Unlink the device chain */
+ link->dev = NULL;
+
+ if (dev)
+ stop_atmel_card(dev, 0);
+ ((local_info_t*)link->priv)->eth_dev = NULL;
+
+ /* Don't bother checking to see if these succeed or not */
+ pcmcia_release_configuration(link->handle);
+ if (link->io.NumPorts1)
+ pcmcia_release_io(link->handle, &link->io);
+ if (link->irq.AssignedIRQ)
+ pcmcia_release_irq(link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+}
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received.
+
+ When a CARD_REMOVAL event is received, we immediately set a
+ private flag to block future accesses to this device. All the
+ functions that actually access the device should check this flag
+ to make sure the card is still present.
+
+ ======================================================================*/
+
+static int atmel_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ local_info_t *local = link->priv;
+
+ DEBUG(1, "atmel_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(local->eth_dev);
+ atmel_release(link);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ atmel_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(local->eth_dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ atmel_open(local->eth_dev);
+ netif_device_attach(local->eth_dev);
+ }
+ break;
+ }
+ return 0;
+} /* atmel_event */
+
+/*====================================================================*/
+static struct pcmcia_driver atmel_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "atmel_cs",
+ },
+ .attach = atmel_attach,
+ .detach = atmel_detach,
+};
+
+static int atmel_cs_init(void)
+{
+ return pcmcia_register_driver(&atmel_driver);
+}
+
+static void atmel_cs_cleanup(void)
+{
+ pcmcia_unregister_driver(&atmel_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+/*
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ In addition:
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+module_init(atmel_cs_init);
+module_exit(atmel_cs_cleanup);
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
new file mode 100644
index 000000000000..2eb00a957bbe
--- /dev/null
+++ b/drivers/net/wireless/atmel_pci.c
@@ -0,0 +1,89 @@
+/*** -*- linux-c -*- **********************************************************
+
+ Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
+
+ Copyright 2004 Simon Kelley.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This software is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Atmel wireless lan drivers; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+******************************************************************************/
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include "atmel.h"
+
+MODULE_AUTHOR("Simon Kelley");
+MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
+
+static struct pci_device_id card_ids[] = {
+ { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, card_ids);
+
+static int atmel_pci_probe(struct pci_dev *, const struct pci_device_id *);
+static void atmel_pci_remove(struct pci_dev *);
+
+static struct pci_driver atmel_driver = {
+ .name = "atmel",
+ .id_table = card_ids,
+ .probe = atmel_pci_probe,
+ .remove = __devexit_p(atmel_pci_remove),
+};
+
+
+static int __devinit atmel_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pent)
+{
+ struct net_device *dev;
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ pci_set_master(pdev);
+
+ dev = init_atmel_card(pdev->irq, pdev->resource[1].start,
+ ATMEL_FW_TYPE_506,
+ &pdev->dev, NULL, NULL);
+ if (!dev)
+ return -ENODEV;
+
+ pci_set_drvdata(pdev, dev);
+ return 0;
+}
+
+static void __devexit atmel_pci_remove(struct pci_dev *pdev)
+{
+ stop_atmel_card(pci_get_drvdata(pdev), 1);
+}
+
+static int __init atmel_init_module(void)
+{
+ return pci_module_init(&atmel_driver);
+}
+
+static void __exit atmel_cleanup_module(void)
+{
+ pci_unregister_driver(&atmel_driver);
+}
+
+module_init(atmel_init_module);
+module_exit(atmel_cleanup_module);
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
new file mode 100644
index 000000000000..21c3d0d227e6
--- /dev/null
+++ b/drivers/net/wireless/hermes.c
@@ -0,0 +1,554 @@
+/* hermes.c
+ *
+ * Driver core for the "Hermes" wireless MAC controller, as used in
+ * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
+ * work on the hfa3841 and hfa3842 MAC controller chips used in the
+ * Prism II chipsets.
+ *
+ * This is not a complete driver, just low-level access routines for
+ * the MAC controller itself.
+ *
+ * Based on the prism2 driver from Absolute Value Systems' linux-wlan
+ * project, the Linux wvlan_cs driver, Lucent's HCF-Light
+ * (wvlan_hcf.c) library, and the NetBSD wireless driver (in no
+ * particular order).
+ *
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia.
+ * (C) Copyright David Gibson, IBM Corp. 2001-2003.
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <asm/errno.h>
+
+#include "hermes.h"
+
+MODULE_DESCRIPTION("Low-level driver helper for Lucent Hermes chipset and Prism II HFA384x wireless MAC controller");
+MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>"
+ " & David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_LICENSE("Dual MPL/GPL");
+
+/* These are maximum timeouts. Most often, card wil react much faster */
+#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */
+#define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */
+#define CMD_COMPL_TIMEOUT (20000) /* in iterations of ~10us */
+#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
+
+/*
+ * Debugging helpers
+ */
+
+#define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %p: " , hw->iobase); \
+ printk(stuff);} while (0)
+
+#undef HERMES_DEBUG
+#ifdef HERMES_DEBUG
+#include <stdarg.h>
+
+#define DEBUG(lvl, stuff...) if ( (lvl) <= HERMES_DEBUG) DMSG(stuff)
+
+#else /* ! HERMES_DEBUG */
+
+#define DEBUG(lvl, stuff...) do { } while (0)
+
+#endif /* ! HERMES_DEBUG */
+
+
+/*
+ * Internal functions
+ */
+
+/* Issue a command to the chip. Waiting for it to complete is the caller's
+ problem.
+
+ Returns -EBUSY if the command register is busy, 0 on success.
+
+ Callable from any context.
+*/
+static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
+{
+ int k = CMD_BUSY_TIMEOUT;
+ u16 reg;
+
+ /* First wait for the command register to unbusy */
+ reg = hermes_read_regn(hw, CMD);
+ while ( (reg & HERMES_CMD_BUSY) && k ) {
+ k--;
+ udelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+ if (reg & HERMES_CMD_BUSY) {
+ return -EBUSY;
+ }
+
+ hermes_write_regn(hw, PARAM2, 0);
+ hermes_write_regn(hw, PARAM1, 0);
+ hermes_write_regn(hw, PARAM0, param0);
+ hermes_write_regn(hw, CMD, cmd);
+
+ return 0;
+}
+
+/*
+ * Function definitions
+ */
+
+void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
+{
+ hw->iobase = address;
+ hw->reg_spacing = reg_spacing;
+ hw->inten = 0x0;
+
+#ifdef HERMES_DEBUG_BUFFER
+ hw->dbufp = 0;
+ memset(&hw->dbuf, 0xff, sizeof(hw->dbuf));
+ memset(&hw->profile, 0, sizeof(hw->profile));
+#endif
+}
+
+int hermes_init(hermes_t *hw)
+{
+ u16 status, reg;
+ int err = 0;
+ int k;
+
+ /* We don't want to be interrupted while resetting the chipset */
+ hw->inten = 0x0;
+ hermes_write_regn(hw, INTEN, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+
+ /* Normally it's a "can't happen" for the command register to
+ be busy when we go to issue a command because we are
+ serializing all commands. However we want to have some
+ chance of resetting the card even if it gets into a stupid
+ state, so we actually wait to see if the command register
+ will unbusy itself here. */
+ k = CMD_BUSY_TIMEOUT;
+ reg = hermes_read_regn(hw, CMD);
+ while (k && (reg & HERMES_CMD_BUSY)) {
+ if (reg == 0xffff) /* Special case - the card has probably been removed,
+ so don't wait for the timeout */
+ return -ENODEV;
+
+ k--;
+ udelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+
+ /* No need to explicitly handle the timeout - if we've timed
+ out hermes_issue_cmd() will probably return -EBUSY below */
+
+ /* According to the documentation, EVSTAT may contain
+ obsolete event occurrence information. We have to acknowledge
+ it by writing EVACK. */
+ reg = hermes_read_regn(hw, EVSTAT);
+ hermes_write_regn(hw, EVACK, reg);
+
+ /* We don't use hermes_docmd_wait here, because the reset wipes
+ the magic constant in SWSUPPORT0 away, and it gets confused */
+ err = hermes_issue_cmd(hw, HERMES_CMD_INIT, 0);
+ if (err)
+ return err;
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = CMD_INIT_TIMEOUT;
+ while ( (! (reg & HERMES_EV_CMD)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
+
+ if (! hermes_present(hw)) {
+ DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
+ hw->iobase);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (! (reg & HERMES_EV_CMD)) {
+ printk(KERN_ERR "hermes @ %p: "
+ "Timeout waiting for card to reset (reg=0x%04x)!\n",
+ hw->iobase, reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = hermes_read_regn(hw, STATUS);
+
+ hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
+
+ if (status & HERMES_STATUS_RESULT)
+ err = -EIO;
+
+ out:
+ return err;
+}
+
+/* Issue a command to the chip, and (busy!) wait for it to
+ * complete.
+ *
+ * Returns: < 0 on internal error, 0 on success, > 0 on error returned by the firmware
+ *
+ * Callable from any context, but locking is your problem. */
+int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp)
+{
+ int err;
+ int k;
+ u16 reg;
+ u16 status;
+
+ err = hermes_issue_cmd(hw, cmd, parm0);
+ if (err) {
+ if (! hermes_present(hw)) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "hermes @ %p: "
+ "Card removed while issuing command "
+ "0x%04x.\n", hw->iobase, cmd);
+ err = -ENODEV;
+ } else
+ if (net_ratelimit())
+ printk(KERN_ERR "hermes @ %p: "
+ "Error %d issuing command 0x%04x.\n",
+ hw->iobase, err, cmd);
+ goto out;
+ }
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = CMD_COMPL_TIMEOUT;
+ while ( (! (reg & HERMES_EV_CMD)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %p: Card removed "
+ "while waiting for command 0x%04x completion.\n",
+ hw->iobase, cmd);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (! (reg & HERMES_EV_CMD)) {
+ printk(KERN_ERR "hermes @ %p: Timeout waiting for "
+ "command 0x%04x completion.\n", hw->iobase, cmd);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = hermes_read_regn(hw, STATUS);
+ if (resp) {
+ resp->status = status;
+ resp->resp0 = hermes_read_regn(hw, RESP0);
+ resp->resp1 = hermes_read_regn(hw, RESP1);
+ resp->resp2 = hermes_read_regn(hw, RESP2);
+ }
+
+ hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
+
+ if (status & HERMES_STATUS_RESULT)
+ err = -EIO;
+
+ out:
+ return err;
+}
+
+int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
+{
+ int err = 0;
+ int k;
+ u16 reg;
+
+ if ( (size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX) )
+ return -EINVAL;
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL);
+ if (err) {
+ return err;
+ }
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = ALLOC_COMPL_TIMEOUT;
+ while ( (! (reg & HERMES_EV_ALLOC)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %p: "
+ "Card removed waiting for frame allocation.\n",
+ hw->iobase);
+ return -ENODEV;
+ }
+
+ if (! (reg & HERMES_EV_ALLOC)) {
+ printk(KERN_ERR "hermes @ %p: "
+ "Timeout waiting for frame allocation\n",
+ hw->iobase);
+ return -ETIMEDOUT;
+ }
+
+ *fid = hermes_read_regn(hw, ALLOCFID);
+ hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC);
+
+ return 0;
+}
+
+
+/* Set up a BAP to read a particular chunk of data from card's internal buffer.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, >0 on error
+ * from firmware
+ *
+ * Callable from any context */
+static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
+{
+ int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0;
+ int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0;
+ int k;
+ u16 reg;
+
+ /* Paranoia.. */
+ if ( (offset > HERMES_BAP_OFFSET_MAX) || (offset % 2) )
+ return -EINVAL;
+
+ k = HERMES_BAP_BUSY_TIMEOUT;
+ reg = hermes_read_reg(hw, oreg);
+ while ((reg & HERMES_OFFSET_BUSY) && k) {
+ k--;
+ udelay(1);
+ reg = hermes_read_reg(hw, oreg);
+ }
+
+#ifdef HERMES_DEBUG_BUFFER
+ hw->profile[HERMES_BAP_BUSY_TIMEOUT - k]++;
+
+ if (k < HERMES_BAP_BUSY_TIMEOUT) {
+ struct hermes_debug_entry *e =
+ &hw->dbuf[(hw->dbufp++) % HERMES_DEBUG_BUFSIZE];
+ e->bap = bap;
+ e->id = id;
+ e->offset = offset;
+ e->cycles = HERMES_BAP_BUSY_TIMEOUT - k;
+ }
+#endif
+
+ if (reg & HERMES_OFFSET_BUSY)
+ return -ETIMEDOUT;
+
+ /* Now we actually set up the transfer */
+ hermes_write_reg(hw, sreg, id);
+ hermes_write_reg(hw, oreg, offset);
+
+ /* Wait for the BAP to be ready */
+ k = HERMES_BAP_BUSY_TIMEOUT;
+ reg = hermes_read_reg(hw, oreg);
+ while ( (reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) {
+ k--;
+ udelay(1);
+ reg = hermes_read_reg(hw, oreg);
+ }
+
+ if (reg != offset) {
+ printk(KERN_ERR "hermes @ %p: BAP%d offset %s: "
+ "reg=0x%x id=0x%x offset=0x%x\n", hw->iobase, bap,
+ (reg & HERMES_OFFSET_BUSY) ? "timeout" : "error",
+ reg, id, offset);
+
+ if (reg & HERMES_OFFSET_BUSY) {
+ return -ETIMEDOUT;
+ }
+
+ return -EIO; /* error or wrong offset */
+ }
+
+ return 0;
+}
+
+/* Read a block of data from the chip's buffer, via the
+ * BAP. Synchronization/serialization is the caller's problem. len
+ * must be even.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
+ */
+int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
+ u16 id, u16 offset)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+
+ if ( (len < 0) || (len % 2) )
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, id, offset);
+ if (err)
+ goto out;
+
+ /* Actually do the transfer */
+ hermes_read_words(hw, dreg, buf, len/2);
+
+ out:
+ return err;
+}
+
+/* Write a block of data to the chip's buffer, via the
+ * BAP. Synchronization/serialization is the caller's problem. len
+ * must be even.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
+ */
+int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
+ u16 id, u16 offset)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+
+ if ( (len < 0) || (len % 2) )
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, id, offset);
+ if (err)
+ goto out;
+
+ /* Actually do the transfer */
+ hermes_write_words(hw, dreg, buf, len/2);
+
+ out:
+ return err;
+}
+
+/* Read a Length-Type-Value record from the card.
+ *
+ * If length is NULL, we ignore the length read from the card, and
+ * read the entire buffer regardless. This is useful because some of
+ * the configuration records appear to have incorrect lengths in
+ * practice.
+ *
+ * Callable from user or bh context. */
+int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
+ u16 *length, void *buf)
+{
+ int err = 0;
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ u16 rlength, rtype;
+ unsigned nwords;
+
+ if ( (bufsize < 0) || (bufsize % 2) )
+ return -EINVAL;
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL);
+ if (err)
+ return err;
+
+ err = hermes_bap_seek(hw, bap, rid, 0);
+ if (err)
+ return err;
+
+ rlength = hermes_read_reg(hw, dreg);
+
+ if (! rlength)
+ return -ENODATA;
+
+ rtype = hermes_read_reg(hw, dreg);
+
+ if (length)
+ *length = rlength;
+
+ if (rtype != rid)
+ printk(KERN_WARNING "hermes @ %p: %s(): "
+ "rid (0x%04x) does not match type (0x%04x)\n",
+ hw->iobase, __FUNCTION__, rid, rtype);
+ if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
+ printk(KERN_WARNING "hermes @ %p: "
+ "Truncating LTV record from %d to %d bytes. "
+ "(rid=0x%04x, len=0x%04x)\n", hw->iobase,
+ HERMES_RECLEN_TO_BYTES(rlength), bufsize, rid, rlength);
+
+ nwords = min((unsigned)rlength - 1, bufsize / 2);
+ hermes_read_words(hw, dreg, buf, nwords);
+
+ return 0;
+}
+
+int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+ unsigned count;
+
+ if (length == 0)
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, rid, 0);
+ if (err)
+ return err;
+
+ hermes_write_reg(hw, dreg, length);
+ hermes_write_reg(hw, dreg, rid);
+
+ count = length - 1;
+
+ hermes_write_words(hw, dreg, value, count);
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
+ rid, NULL);
+
+ return err;
+}
+
+EXPORT_SYMBOL(hermes_struct_init);
+EXPORT_SYMBOL(hermes_init);
+EXPORT_SYMBOL(hermes_docmd_wait);
+EXPORT_SYMBOL(hermes_allocate);
+
+EXPORT_SYMBOL(hermes_bap_pread);
+EXPORT_SYMBOL(hermes_bap_pwrite);
+EXPORT_SYMBOL(hermes_read_ltv);
+EXPORT_SYMBOL(hermes_write_ltv);
+
+static int __init init_hermes(void)
+{
+ return 0;
+}
+
+static void __exit exit_hermes(void)
+{
+}
+
+module_init(init_hermes);
+module_exit(exit_hermes);
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
new file mode 100644
index 000000000000..8c9e874c9118
--- /dev/null
+++ b/drivers/net/wireless/hermes.h
@@ -0,0 +1,481 @@
+/* hermes.h
+ *
+ * Driver core for the "Hermes" wireless MAC controller, as used in
+ * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
+ * work on the hfa3841 and hfa3842 MAC controller chips used in the
+ * Prism I & II chipsets.
+ *
+ * This is not a complete driver, just low-level access routines for
+ * the MAC controller itself.
+ *
+ * Based on the prism2 driver from Absolute Value Systems' linux-wlan
+ * project, the Linux wvlan_cs driver, Lucent's HCF-Light
+ * (wvlan_hcf.c) library, and the NetBSD wireless driver.
+ *
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia.
+ * (C) Copyright David Gibson, IBM Corp. 2001-2003.
+ *
+ * Portions taken from hfa384x.h, Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ *
+ * This file distributed under the GPL, version 2.
+ */
+
+#ifndef _HERMES_H
+#define _HERMES_H
+
+/* Notes on locking:
+ *
+ * As a module of low level hardware access routines, there is no
+ * locking. Users of this module should ensure that they serialize
+ * access to the hermes_t structure, and to the hardware
+*/
+
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/*
+ * Limits and constants
+ */
+#define HERMES_ALLOC_LEN_MIN (4)
+#define HERMES_ALLOC_LEN_MAX (2400)
+#define HERMES_LTV_LEN_MAX (34)
+#define HERMES_BAP_DATALEN_MAX (4096)
+#define HERMES_BAP_OFFSET_MAX (4096)
+#define HERMES_PORTID_MAX (7)
+#define HERMES_NUMPORTS_MAX (HERMES_PORTID_MAX+1)
+#define HERMES_PDR_LEN_MAX (260) /* in bytes, from EK */
+#define HERMES_PDA_RECS_MAX (200) /* a guess */
+#define HERMES_PDA_LEN_MAX (1024) /* in bytes, from EK */
+#define HERMES_SCANRESULT_MAX (35)
+#define HERMES_CHINFORESULT_MAX (8)
+#define HERMES_MAX_MULTICAST (16)
+#define HERMES_MAGIC (0x7d1f)
+
+/*
+ * Hermes register offsets
+ */
+#define HERMES_CMD (0x00)
+#define HERMES_PARAM0 (0x02)
+#define HERMES_PARAM1 (0x04)
+#define HERMES_PARAM2 (0x06)
+#define HERMES_STATUS (0x08)
+#define HERMES_RESP0 (0x0A)
+#define HERMES_RESP1 (0x0C)
+#define HERMES_RESP2 (0x0E)
+#define HERMES_INFOFID (0x10)
+#define HERMES_RXFID (0x20)
+#define HERMES_ALLOCFID (0x22)
+#define HERMES_TXCOMPLFID (0x24)
+#define HERMES_SELECT0 (0x18)
+#define HERMES_OFFSET0 (0x1C)
+#define HERMES_DATA0 (0x36)
+#define HERMES_SELECT1 (0x1A)
+#define HERMES_OFFSET1 (0x1E)
+#define HERMES_DATA1 (0x38)
+#define HERMES_EVSTAT (0x30)
+#define HERMES_INTEN (0x32)
+#define HERMES_EVACK (0x34)
+#define HERMES_CONTROL (0x14)
+#define HERMES_SWSUPPORT0 (0x28)
+#define HERMES_SWSUPPORT1 (0x2A)
+#define HERMES_SWSUPPORT2 (0x2C)
+#define HERMES_AUXPAGE (0x3A)
+#define HERMES_AUXOFFSET (0x3C)
+#define HERMES_AUXDATA (0x3E)
+
+/*
+ * CMD register bitmasks
+ */
+#define HERMES_CMD_BUSY (0x8000)
+#define HERMES_CMD_AINFO (0x7f00)
+#define HERMES_CMD_MACPORT (0x0700)
+#define HERMES_CMD_RECL (0x0100)
+#define HERMES_CMD_WRITE (0x0100)
+#define HERMES_CMD_PROGMODE (0x0300)
+#define HERMES_CMD_CMDCODE (0x003f)
+
+/*
+ * STATUS register bitmasks
+ */
+#define HERMES_STATUS_RESULT (0x7f00)
+#define HERMES_STATUS_CMDCODE (0x003f)
+
+/*
+ * OFFSET register bitmasks
+ */
+#define HERMES_OFFSET_BUSY (0x8000)
+#define HERMES_OFFSET_ERR (0x4000)
+#define HERMES_OFFSET_DATAOFF (0x0ffe)
+
+/*
+ * Event register bitmasks (INTEN, EVSTAT, EVACK)
+ */
+#define HERMES_EV_TICK (0x8000)
+#define HERMES_EV_WTERR (0x4000)
+#define HERMES_EV_INFDROP (0x2000)
+#define HERMES_EV_INFO (0x0080)
+#define HERMES_EV_DTIM (0x0020)
+#define HERMES_EV_CMD (0x0010)
+#define HERMES_EV_ALLOC (0x0008)
+#define HERMES_EV_TXEXC (0x0004)
+#define HERMES_EV_TX (0x0002)
+#define HERMES_EV_RX (0x0001)
+
+/*
+ * Command codes
+ */
+/*--- Controller Commands ----------------------------*/
+#define HERMES_CMD_INIT (0x0000)
+#define HERMES_CMD_ENABLE (0x0001)
+#define HERMES_CMD_DISABLE (0x0002)
+#define HERMES_CMD_DIAG (0x0003)
+
+/*--- Buffer Mgmt Commands ---------------------------*/
+#define HERMES_CMD_ALLOC (0x000A)
+#define HERMES_CMD_TX (0x000B)
+
+/*--- Regulate Commands ------------------------------*/
+#define HERMES_CMD_NOTIFY (0x0010)
+#define HERMES_CMD_INQUIRE (0x0011)
+
+/*--- Configure Commands -----------------------------*/
+#define HERMES_CMD_ACCESS (0x0021)
+#define HERMES_CMD_DOWNLD (0x0022)
+
+/*--- Serial I/O Commands ----------------------------*/
+#define HERMES_CMD_READMIF (0x0030)
+#define HERMES_CMD_WRITEMIF (0x0031)
+
+/*--- Debugging Commands -----------------------------*/
+#define HERMES_CMD_TEST (0x0038)
+
+
+/* Test command arguments */
+#define HERMES_TEST_SET_CHANNEL 0x0800
+#define HERMES_TEST_MONITOR 0x0b00
+#define HERMES_TEST_STOP 0x0f00
+
+/* Authentication algorithms */
+#define HERMES_AUTH_OPEN 1
+#define HERMES_AUTH_SHARED_KEY 2
+
+/* WEP settings */
+#define HERMES_WEP_PRIVACY_INVOKED 0x0001
+#define HERMES_WEP_EXCL_UNENCRYPTED 0x0002
+#define HERMES_WEP_HOST_ENCRYPT 0x0010
+#define HERMES_WEP_HOST_DECRYPT 0x0080
+
+/* Symbol hostscan options */
+#define HERMES_HOSTSCAN_SYMBOL_5SEC 0x0001
+#define HERMES_HOSTSCAN_SYMBOL_ONCE 0x0002
+#define HERMES_HOSTSCAN_SYMBOL_PASSIVE 0x0040
+#define HERMES_HOSTSCAN_SYMBOL_BCAST 0x0080
+
+/*
+ * Frame structures and constants
+ */
+
+#define HERMES_DESCRIPTOR_OFFSET 0
+#define HERMES_802_11_OFFSET (14)
+#define HERMES_802_3_OFFSET (14+32)
+#define HERMES_802_2_OFFSET (14+32+14)
+
+#define HERMES_RXSTAT_ERR (0x0003)
+#define HERMES_RXSTAT_BADCRC (0x0001)
+#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
+#define HERMES_RXSTAT_MACPORT (0x0700)
+#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */
+#define HERMES_RXSTAT_MSGTYPE (0xE000)
+#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */
+#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */
+#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
+
+struct hermes_tx_descriptor {
+ u16 status;
+ u16 reserved1;
+ u16 reserved2;
+ u32 sw_support;
+ u8 retry_count;
+ u8 tx_rate;
+ u16 tx_control;
+} __attribute__ ((packed));
+
+#define HERMES_TXSTAT_RETRYERR (0x0001)
+#define HERMES_TXSTAT_AGEDERR (0x0002)
+#define HERMES_TXSTAT_DISCON (0x0004)
+#define HERMES_TXSTAT_FORMERR (0x0008)
+
+#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */
+#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */
+#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */
+#define HERMES_TXCTRL_ALT_RTRY (0x0020)
+
+/* Inquiry constants and data types */
+
+#define HERMES_INQ_TALLIES (0xF100)
+#define HERMES_INQ_SCAN (0xF101)
+#define HERMES_INQ_CHANNELINFO (0xF102)
+#define HERMES_INQ_HOSTSCAN (0xF103)
+#define HERMES_INQ_HOSTSCAN_SYMBOL (0xF104)
+#define HERMES_INQ_LINKSTATUS (0xF200)
+#define HERMES_INQ_SEC_STAT_AGERE (0xF202)
+
+struct hermes_tallies_frame {
+ u16 TxUnicastFrames;
+ u16 TxMulticastFrames;
+ u16 TxFragments;
+ u16 TxUnicastOctets;
+ u16 TxMulticastOctets;
+ u16 TxDeferredTransmissions;
+ u16 TxSingleRetryFrames;
+ u16 TxMultipleRetryFrames;
+ u16 TxRetryLimitExceeded;
+ u16 TxDiscards;
+ u16 RxUnicastFrames;
+ u16 RxMulticastFrames;
+ u16 RxFragments;
+ u16 RxUnicastOctets;
+ u16 RxMulticastOctets;
+ u16 RxFCSErrors;
+ u16 RxDiscards_NoBuffer;
+ u16 TxDiscardsWrongSA;
+ u16 RxWEPUndecryptable;
+ u16 RxMsgInMsgFragments;
+ u16 RxMsgInBadMsgFragments;
+ /* Those last are probably not available in very old firmwares */
+ u16 RxDiscards_WEPICVError;
+ u16 RxDiscards_WEPExcluded;
+} __attribute__ ((packed));
+
+/* Grabbed from wlan-ng - Thanks Mark... - Jean II
+ * This is the result of a scan inquiry command */
+/* Structure describing info about an Access Point */
+struct prism2_scan_apinfo {
+ u16 channel; /* Channel where the AP sits */
+ u16 noise; /* Noise level */
+ u16 level; /* Signal level */
+ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
+ u16 beacon_interv; /* Beacon interval */
+ u16 capabilities; /* Capabilities */
+ u16 essid_len; /* ESSID length */
+ u8 essid[32]; /* ESSID of the network */
+ u8 rates[10]; /* Bit rate supported */
+ u16 proberesp_rate; /* Data rate of the response frame */
+ u16 atim; /* ATIM window time, Kus (hostscan only) */
+} __attribute__ ((packed));
+
+/* Same stuff for the Lucent/Agere card.
+ * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
+struct agere_scan_apinfo {
+ u16 channel; /* Channel where the AP sits */
+ u16 noise; /* Noise level */
+ u16 level; /* Signal level */
+ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
+ u16 beacon_interv; /* Beacon interval */
+ u16 capabilities; /* Capabilities */
+ /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
+ u16 essid_len; /* ESSID length */
+ u8 essid[32]; /* ESSID of the network */
+} __attribute__ ((packed));
+
+/* Moustafa: Scan structure for Symbol cards */
+struct symbol_scan_apinfo {
+ u8 channel; /* Channel where the AP sits */
+ u8 unknown1; /* 8 in 2.9x and 3.9x f/w, 0 otherwise */
+ u16 noise; /* Noise level */
+ u16 level; /* Signal level */
+ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
+ u16 beacon_interv; /* Beacon interval */
+ u16 capabilities; /* Capabilities */
+ /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
+ u16 essid_len; /* ESSID length */
+ u8 essid[32]; /* ESSID of the network */
+ u16 rates[5]; /* Bit rate supported */
+ u16 basic_rates; /* Basic rates bitmask */
+ u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
+ u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
+} __attribute__ ((packed));
+
+union hermes_scan_info {
+ struct agere_scan_apinfo a;
+ struct prism2_scan_apinfo p;
+ struct symbol_scan_apinfo s;
+};
+
+#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
+#define HERMES_LINKSTATUS_CONNECTED (0x0001)
+#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
+#define HERMES_LINKSTATUS_AP_CHANGE (0x0003)
+#define HERMES_LINKSTATUS_AP_OUT_OF_RANGE (0x0004)
+#define HERMES_LINKSTATUS_AP_IN_RANGE (0x0005)
+#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006)
+
+struct hermes_linkstatus {
+ u16 linkstatus; /* Link status */
+} __attribute__ ((packed));
+
+struct hermes_response {
+ u16 status, resp0, resp1, resp2;
+};
+
+/* "ID" structure - used for ESSID and station nickname */
+struct hermes_idstring {
+ u16 len;
+ u16 val[16];
+} __attribute__ ((packed));
+
+struct hermes_multicast {
+ u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
+} __attribute__ ((packed));
+
+// #define HERMES_DEBUG_BUFFER 1
+#define HERMES_DEBUG_BUFSIZE 4096
+struct hermes_debug_entry {
+ int bap;
+ u16 id, offset;
+ int cycles;
+};
+
+#ifdef __KERNEL__
+
+/* Timeouts */
+#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
+
+/* Basic control structure */
+typedef struct hermes {
+ void __iomem *iobase;
+ int reg_spacing;
+#define HERMES_16BIT_REGSPACING 0
+#define HERMES_32BIT_REGSPACING 1
+
+ u16 inten; /* Which interrupts should be enabled? */
+
+#ifdef HERMES_DEBUG_BUFFER
+ struct hermes_debug_entry dbuf[HERMES_DEBUG_BUFSIZE];
+ unsigned long dbufp;
+ unsigned long profile[HERMES_BAP_BUSY_TIMEOUT+1];
+#endif
+} hermes_t;
+
+/* Register access convenience macros */
+#define hermes_read_reg(hw, off) \
+ (ioread16((hw)->iobase + ( (off) << (hw)->reg_spacing )))
+#define hermes_write_reg(hw, off, val) \
+ (iowrite16((val), (hw)->iobase + ((off) << (hw)->reg_spacing)))
+#define hermes_read_regn(hw, name) hermes_read_reg((hw), HERMES_##name)
+#define hermes_write_regn(hw, name, val) hermes_write_reg((hw), HERMES_##name, (val))
+
+/* Function prototypes */
+void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
+int hermes_init(hermes_t *hw);
+int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp);
+int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
+
+int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
+ u16 id, u16 offset);
+int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
+ u16 id, u16 offset);
+int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
+ u16 *length, void *buf);
+int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value);
+
+/* Inline functions */
+
+static inline int hermes_present(hermes_t *hw)
+{
+ return hermes_read_regn(hw, SWSUPPORT0) == HERMES_MAGIC;
+}
+
+static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
+{
+ hw->inten = events;
+ hermes_write_regn(hw, INTEN, events);
+}
+
+static inline int hermes_enable_port(hermes_t *hw, int port)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
+ 0, NULL);
+}
+
+static inline int hermes_disable_port(hermes_t *hw, int port)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
+ 0, NULL);
+}
+
+/* Initiate an INQUIRE command (tallies or scan). The result will come as an
+ * information frame in __orinoco_ev_info() */
+static inline int hermes_inquire(hermes_t *hw, u16 rid)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
+}
+
+#define HERMES_BYTES_TO_RECLEN(n) ( (((n)+1)/2) + 1 )
+#define HERMES_RECLEN_TO_BYTES(n) ( ((n)-1) * 2 )
+
+/* Note that for the next two, the count is in 16-bit words, not bytes */
+static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsigned count)
+{
+ off = off << hw->reg_spacing;
+ ioread16_rep(hw->iobase + off, buf, count);
+}
+
+static inline void hermes_write_words(struct hermes *hw, int off, const void *buf, unsigned count)
+{
+ off = off << hw->reg_spacing;
+ iowrite16_rep(hw->iobase + off, buf, count);
+}
+
+static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count)
+{
+ unsigned i;
+
+ off = off << hw->reg_spacing;
+
+ for (i = 0; i < count; i++)
+ iowrite16(0, hw->iobase + off);
+}
+
+#define HERMES_READ_RECORD(hw, bap, rid, buf) \
+ (hermes_read_ltv((hw),(bap),(rid), sizeof(*buf), NULL, (buf)))
+#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
+ (hermes_write_ltv((hw),(bap),(rid),HERMES_BYTES_TO_RECLEN(sizeof(*buf)),(buf)))
+
+static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
+{
+ u16 rec;
+ int err;
+
+ err = HERMES_READ_RECORD(hw, bap, rid, &rec);
+ *word = le16_to_cpu(rec);
+ return err;
+}
+
+static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
+{
+ u16 rec = cpu_to_le16(word);
+ return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
+}
+
+#else /* ! __KERNEL__ */
+
+/* These are provided for the benefit of userspace drivers and testing programs
+ which use ioperm() or iopl() */
+
+#define hermes_read_reg(base, off) (inw((base) + (off)))
+#define hermes_write_reg(base, off, val) (outw((val), (base) + (off)))
+
+#define hermes_read_regn(base, name) (hermes_read_reg((base), HERMES_##name))
+#define hermes_write_regn(base, name, val) (hermes_write_reg((base), HERMES_##name, (val)))
+
+/* Note that for the next two, the count is in 16-bit words, not bytes */
+#define hermes_read_data(base, off, buf, count) (insw((base) + (off), (buf), (count)))
+#define hermes_write_data(base, off, buf, count) (outsw((base) + (off), (buf), (count)))
+
+#endif /* ! __KERNEL__ */
+
+#endif /* _HERMES_H */
diff --git a/drivers/net/wireless/hermes_rid.h b/drivers/net/wireless/hermes_rid.h
new file mode 100644
index 000000000000..4f46b4809e55
--- /dev/null
+++ b/drivers/net/wireless/hermes_rid.h
@@ -0,0 +1,148 @@
+#ifndef _HERMES_RID_H
+#define _HERMES_RID_H
+
+/*
+ * Configuration RIDs
+ */
+#define HERMES_RID_CNFPORTTYPE 0xFC00
+#define HERMES_RID_CNFOWNMACADDR 0xFC01
+#define HERMES_RID_CNFDESIREDSSID 0xFC02
+#define HERMES_RID_CNFOWNCHANNEL 0xFC03
+#define HERMES_RID_CNFOWNSSID 0xFC04
+#define HERMES_RID_CNFOWNATIMWINDOW 0xFC05
+#define HERMES_RID_CNFSYSTEMSCALE 0xFC06
+#define HERMES_RID_CNFMAXDATALEN 0xFC07
+#define HERMES_RID_CNFWDSADDRESS 0xFC08
+#define HERMES_RID_CNFPMENABLED 0xFC09
+#define HERMES_RID_CNFPMEPS 0xFC0A
+#define HERMES_RID_CNFMULTICASTRECEIVE 0xFC0B
+#define HERMES_RID_CNFMAXSLEEPDURATION 0xFC0C
+#define HERMES_RID_CNFPMHOLDOVERDURATION 0xFC0D
+#define HERMES_RID_CNFOWNNAME 0xFC0E
+#define HERMES_RID_CNFOWNDTIMPERIOD 0xFC10
+#define HERMES_RID_CNFWDSADDRESS1 0xFC11
+#define HERMES_RID_CNFWDSADDRESS2 0xFC12
+#define HERMES_RID_CNFWDSADDRESS3 0xFC13
+#define HERMES_RID_CNFWDSADDRESS4 0xFC14
+#define HERMES_RID_CNFWDSADDRESS5 0xFC15
+#define HERMES_RID_CNFWDSADDRESS6 0xFC16
+#define HERMES_RID_CNFMULTICASTPMBUFFERING 0xFC17
+#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20
+#define HERMES_RID_CNFAUTHENTICATION_AGERE 0xFC21
+#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21
+#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23
+#define HERMES_RID_CNFDEFAULTKEY0 0xFC24
+#define HERMES_RID_CNFDEFAULTKEY1 0xFC25
+#define HERMES_RID_CNFMWOROBUST_AGERE 0xFC25
+#define HERMES_RID_CNFDEFAULTKEY2 0xFC26
+#define HERMES_RID_CNFDEFAULTKEY3 0xFC27
+#define HERMES_RID_CNFWEPFLAGS_INTERSIL 0xFC28
+#define HERMES_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
+#define HERMES_RID_CNFAUTHENTICATION 0xFC2A
+#define HERMES_RID_CNFMAXASSOCSTA 0xFC2B
+#define HERMES_RID_CNFKEYLENGTH_SYMBOL 0xFC2B
+#define HERMES_RID_CNFTXCONTROL 0xFC2C
+#define HERMES_RID_CNFROAMINGMODE 0xFC2D
+#define HERMES_RID_CNFHOSTAUTHENTICATION 0xFC2E
+#define HERMES_RID_CNFRCVCRCERROR 0xFC30
+#define HERMES_RID_CNFMMLIFE 0xFC31
+#define HERMES_RID_CNFALTRETRYCOUNT 0xFC32
+#define HERMES_RID_CNFBEACONINT 0xFC33
+#define HERMES_RID_CNFAPPCFINFO 0xFC34
+#define HERMES_RID_CNFSTAPCFINFO 0xFC35
+#define HERMES_RID_CNFPRIORITYQUSAGE 0xFC37
+#define HERMES_RID_CNFTIMCTRL 0xFC40
+#define HERMES_RID_CNFTHIRTY2TALLY 0xFC42
+#define HERMES_RID_CNFENHSECURITY 0xFC43
+#define HERMES_RID_CNFGROUPADDRESSES 0xFC80
+#define HERMES_RID_CNFCREATEIBSS 0xFC81
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD 0xFC82
+#define HERMES_RID_CNFRTSTHRESHOLD 0xFC83
+#define HERMES_RID_CNFTXRATECONTROL 0xFC84
+#define HERMES_RID_CNFPROMISCUOUSMODE 0xFC85
+#define HERMES_RID_CNFBASICRATES_SYMBOL 0xFC8A
+#define HERMES_RID_CNFPREAMBLE_SYMBOL 0xFC8C
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD0 0xFC90
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD1 0xFC91
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD2 0xFC92
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD3 0xFC93
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD4 0xFC94
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD5 0xFC95
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD6 0xFC96
+#define HERMES_RID_CNFRTSTHRESHOLD0 0xFC97
+#define HERMES_RID_CNFRTSTHRESHOLD1 0xFC98
+#define HERMES_RID_CNFRTSTHRESHOLD2 0xFC99
+#define HERMES_RID_CNFRTSTHRESHOLD3 0xFC9A
+#define HERMES_RID_CNFRTSTHRESHOLD4 0xFC9B
+#define HERMES_RID_CNFRTSTHRESHOLD5 0xFC9C
+#define HERMES_RID_CNFRTSTHRESHOLD6 0xFC9D
+#define HERMES_RID_CNFHOSTSCAN_SYMBOL 0xFCAB
+#define HERMES_RID_CNFSHORTPREAMBLE 0xFCB0
+#define HERMES_RID_CNFWEPKEYS_AGERE 0xFCB0
+#define HERMES_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
+#define HERMES_RID_CNFTXKEY_AGERE 0xFCB1
+#define HERMES_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
+#define HERMES_RID_CNFSCANSSID_AGERE 0xFCB2
+#define HERMES_RID_CNFBASICRATES 0xFCB3
+#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4
+#define HERMES_RID_CNFTICKTIME 0xFCE0
+#define HERMES_RID_CNFSCANREQUEST 0xFCE1
+#define HERMES_RID_CNFJOINREQUEST 0xFCE2
+#define HERMES_RID_CNFAUTHENTICATESTATION 0xFCE3
+#define HERMES_RID_CNFCHANNELINFOREQUEST 0xFCE4
+#define HERMES_RID_CNFHOSTSCAN 0xFCE5
+
+/*
+ * Information RIDs
+ */
+#define HERMES_RID_MAXLOADTIME 0xFD00
+#define HERMES_RID_DOWNLOADBUFFER 0xFD01
+#define HERMES_RID_PRIID 0xFD02
+#define HERMES_RID_PRISUPRANGE 0xFD03
+#define HERMES_RID_CFIACTRANGES 0xFD04
+#define HERMES_RID_NICSERNUM 0xFD0A
+#define HERMES_RID_NICID 0xFD0B
+#define HERMES_RID_MFISUPRANGE 0xFD0C
+#define HERMES_RID_CFISUPRANGE 0xFD0D
+#define HERMES_RID_CHANNELLIST 0xFD10
+#define HERMES_RID_REGULATORYDOMAINS 0xFD11
+#define HERMES_RID_TEMPTYPE 0xFD12
+#define HERMES_RID_CIS 0xFD13
+#define HERMES_RID_STAID 0xFD20
+#define HERMES_RID_STASUPRANGE 0xFD21
+#define HERMES_RID_MFIACTRANGES 0xFD22
+#define HERMES_RID_CFIACTRANGES2 0xFD23
+#define HERMES_RID_SECONDARYVERSION_SYMBOL 0xFD24
+#define HERMES_RID_PORTSTATUS 0xFD40
+#define HERMES_RID_CURRENTSSID 0xFD41
+#define HERMES_RID_CURRENTBSSID 0xFD42
+#define HERMES_RID_COMMSQUALITY 0xFD43
+#define HERMES_RID_CURRENTTXRATE 0xFD44
+#define HERMES_RID_CURRENTBEACONINTERVAL 0xFD45
+#define HERMES_RID_CURRENTSCALETHRESHOLDS 0xFD46
+#define HERMES_RID_PROTOCOLRSPTIME 0xFD47
+#define HERMES_RID_SHORTRETRYLIMIT 0xFD48
+#define HERMES_RID_LONGRETRYLIMIT 0xFD49
+#define HERMES_RID_MAXTRANSMITLIFETIME 0xFD4A
+#define HERMES_RID_MAXRECEIVELIFETIME 0xFD4B
+#define HERMES_RID_CFPOLLABLE 0xFD4C
+#define HERMES_RID_AUTHENTICATIONALGORITHMS 0xFD4D
+#define HERMES_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
+#define HERMES_RID_DBMCOMMSQUALITY_INTERSIL 0xFD51
+#define HERMES_RID_CURRENTTXRATE1 0xFD80
+#define HERMES_RID_CURRENTTXRATE2 0xFD81
+#define HERMES_RID_CURRENTTXRATE3 0xFD82
+#define HERMES_RID_CURRENTTXRATE4 0xFD83
+#define HERMES_RID_CURRENTTXRATE5 0xFD84
+#define HERMES_RID_CURRENTTXRATE6 0xFD85
+#define HERMES_RID_OWNMACADDR 0xFD86
+#define HERMES_RID_SCANRESULTSTABLE 0xFD88
+#define HERMES_RID_PHYTYPE 0xFDC0
+#define HERMES_RID_CURRENTCHANNEL 0xFDC1
+#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2
+#define HERMES_RID_CCAMODE 0xFDC3
+#define HERMES_RID_SUPPORTEDDATARATES 0xFDC6
+#define HERMES_RID_BUILDSEQ 0xFFFE
+#define HERMES_RID_FWID 0xFFFF
+
+#endif
diff --git a/drivers/net/wireless/i82586.h b/drivers/net/wireless/i82586.h
new file mode 100644
index 000000000000..5f65b250646f
--- /dev/null
+++ b/drivers/net/wireless/i82586.h
@@ -0,0 +1,413 @@
+/*
+ * Intel 82586 IEEE 802.3 Ethernet LAN Coprocessor.
+ *
+ * See:
+ * Intel Microcommunications 1991
+ * p1-1 to p1-37
+ * Intel order No. 231658
+ * ISBN 1-55512-119-5
+ *
+ * Unfortunately, the above chapter mentions neither
+ * the System Configuration Pointer (SCP) nor the
+ * Intermediate System Configuration Pointer (ISCP),
+ * so we probably need to look elsewhere for the
+ * whole story -- some recommend the "Intel LAN
+ * Components manual" but I have neither a copy
+ * nor a full reference. But "elsewhere" may be
+ * in the same publication...
+ * The description of a later device, the
+ * "82596CA High-Performance 32-Bit Local Area Network
+ * Coprocessor", (ibid. p1-38 to p1-109) does mention
+ * the SCP and ISCP and also has an i82586 compatibility
+ * mode. Even more useful is "AP-235 An 82586 Data Link
+ * Driver" (ibid. p1-337 to p1-417).
+ */
+
+#define I82586_MEMZ (64 * 1024)
+
+#define I82586_SCP_ADDR (I82586_MEMZ - sizeof(scp_t))
+
+#define ADDR_LEN 6
+#define I82586NULL 0xFFFF
+
+#define toff(t,p,f) (unsigned short)((void *)(&((t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * System Configuration Pointer (SCP).
+ */
+typedef struct scp_t scp_t;
+struct scp_t
+{
+ unsigned short scp_sysbus; /* 82586 bus width: */
+#define SCP_SY_16BBUS (0x0 << 0) /* 16 bits */
+#define SCP_SY_8BBUS (0x1 << 0) /* 8 bits. */
+ unsigned short scp_junk[2]; /* Unused */
+ unsigned short scp_iscpl; /* lower 16 bits of ISCP_ADDR */
+ unsigned short scp_iscph; /* upper 16 bits of ISCP_ADDR */
+};
+
+/*
+ * Intermediate System Configuration Pointer (ISCP).
+ */
+typedef struct iscp_t iscp_t;
+struct iscp_t
+{
+ unsigned short iscp_busy; /* set by CPU before first CA, */
+ /* cleared by 82586 after read. */
+ unsigned short iscp_offset; /* offset of SCB */
+ unsigned short iscp_basel; /* base of SCB */
+ unsigned short iscp_baseh; /* " */
+};
+
+/*
+ * System Control Block (SCB).
+ * The 82586 writes its status to scb_status and then
+ * raises an interrupt to alert the CPU.
+ * The CPU writes a command to scb_command and
+ * then issues a Channel Attention (CA) to alert the 82586.
+ */
+typedef struct scb_t scb_t;
+struct scb_t
+{
+ unsigned short scb_status; /* Status of 82586 */
+#define SCB_ST_INT (0xF << 12) /* Some of: */
+#define SCB_ST_CX (0x1 << 15) /* Cmd completed */
+#define SCB_ST_FR (0x1 << 14) /* Frame received */
+#define SCB_ST_CNA (0x1 << 13) /* Cmd unit not active */
+#define SCB_ST_RNR (0x1 << 12) /* Rcv unit not ready */
+#define SCB_ST_JUNK0 (0x1 << 11) /* 0 */
+#define SCB_ST_CUS (0x7 << 8) /* Cmd unit status */
+#define SCB_ST_CUS_IDLE (0 << 8) /* Idle */
+#define SCB_ST_CUS_SUSP (1 << 8) /* Suspended */
+#define SCB_ST_CUS_ACTV (2 << 8) /* Active */
+#define SCB_ST_JUNK1 (0x1 << 7) /* 0 */
+#define SCB_ST_RUS (0x7 << 4) /* Rcv unit status */
+#define SCB_ST_RUS_IDLE (0 << 4) /* Idle */
+#define SCB_ST_RUS_SUSP (1 << 4) /* Suspended */
+#define SCB_ST_RUS_NRES (2 << 4) /* No resources */
+#define SCB_ST_RUS_RDY (4 << 4) /* Ready */
+ unsigned short scb_command; /* Next command */
+#define SCB_CMD_ACK_CX (0x1 << 15) /* Ack cmd completion */
+#define SCB_CMD_ACK_FR (0x1 << 14) /* Ack frame received */
+#define SCB_CMD_ACK_CNA (0x1 << 13) /* Ack CU not active */
+#define SCB_CMD_ACK_RNR (0x1 << 12) /* Ack RU not ready */
+#define SCB_CMD_JUNKX (0x1 << 11) /* Unused */
+#define SCB_CMD_CUC (0x7 << 8) /* Command Unit command */
+#define SCB_CMD_CUC_NOP (0 << 8) /* Nop */
+#define SCB_CMD_CUC_GO (1 << 8) /* Start cbl_offset */
+#define SCB_CMD_CUC_RES (2 << 8) /* Resume execution */
+#define SCB_CMD_CUC_SUS (3 << 8) /* Suspend " */
+#define SCB_CMD_CUC_ABT (4 << 8) /* Abort " */
+#define SCB_CMD_RESET (0x1 << 7) /* Reset chip (hardware) */
+#define SCB_CMD_RUC (0x7 << 4) /* Receive Unit command */
+#define SCB_CMD_RUC_NOP (0 << 4) /* Nop */
+#define SCB_CMD_RUC_GO (1 << 4) /* Start rfa_offset */
+#define SCB_CMD_RUC_RES (2 << 4) /* Resume reception */
+#define SCB_CMD_RUC_SUS (3 << 4) /* Suspend " */
+#define SCB_CMD_RUC_ABT (4 << 4) /* Abort " */
+ unsigned short scb_cbl_offset; /* Offset of first command unit */
+ /* Action Command */
+ unsigned short scb_rfa_offset; /* Offset of first Receive */
+ /* Frame Descriptor in the */
+ /* Receive Frame Area */
+ unsigned short scb_crcerrs; /* Properly aligned frames */
+ /* received with a CRC error */
+ unsigned short scb_alnerrs; /* Misaligned frames received */
+ /* with a CRC error */
+ unsigned short scb_rscerrs; /* Frames lost due to no space */
+ unsigned short scb_ovrnerrs; /* Frames lost due to slow bus */
+};
+
+#define scboff(p,f) toff(scb_t, p, f)
+
+/*
+ * The eight Action Commands.
+ */
+typedef enum acmd_e acmd_e;
+enum acmd_e
+{
+ acmd_nop = 0, /* Do nothing */
+ acmd_ia_setup = 1, /* Load an (ethernet) address into the */
+ /* 82586 */
+ acmd_configure = 2, /* Update the 82586 operating parameters */
+ acmd_mc_setup = 3, /* Load a list of (ethernet) multicast */
+ /* addresses into the 82586 */
+ acmd_transmit = 4, /* Transmit a frame */
+ acmd_tdr = 5, /* Perform a Time Domain Reflectometer */
+ /* test on the serial link */
+ acmd_dump = 6, /* Copy 82586 registers to memory */
+ acmd_diagnose = 7, /* Run an internal self test */
+};
+
+/*
+ * Generic Action Command header.
+ */
+typedef struct ach_t ach_t;
+struct ach_t
+{
+ unsigned short ac_status; /* Command status: */
+#define AC_SFLD_C (0x1 << 15) /* Command completed */
+#define AC_SFLD_B (0x1 << 14) /* Busy executing */
+#define AC_SFLD_OK (0x1 << 13) /* Completed error free */
+#define AC_SFLD_A (0x1 << 12) /* Command aborted */
+#define AC_SFLD_FAIL (0x1 << 11) /* Selftest failed */
+#define AC_SFLD_S10 (0x1 << 10) /* No carrier sense */
+ /* during transmission */
+#define AC_SFLD_S9 (0x1 << 9) /* Tx unsuccessful: */
+ /* (stopped) lost CTS */
+#define AC_SFLD_S8 (0x1 << 8) /* Tx unsuccessful: */
+ /* (stopped) slow DMA */
+#define AC_SFLD_S7 (0x1 << 7) /* Tx deferred: */
+ /* other link traffic */
+#define AC_SFLD_S6 (0x1 << 6) /* Heart Beat: collision */
+ /* detect after last tx */
+#define AC_SFLD_S5 (0x1 << 5) /* Tx stopped: */
+ /* excessive collisions */
+#define AC_SFLD_MAXCOL (0xF << 0) /* Collision count */
+ unsigned short ac_command; /* Command specifier: */
+#define AC_CFLD_EL (0x1 << 15) /* End of command list */
+#define AC_CFLD_S (0x1 << 14) /* Suspend on completion */
+#define AC_CFLD_I (0x1 << 13) /* Interrupt on completion */
+#define AC_CFLD_CMD (0x7 << 0) /* acmd_e */
+ unsigned short ac_link; /* Next Action Command */
+};
+
+#define acoff(p,f) toff(ach_t, p, f)
+
+/*
+ * The Nop Action Command.
+ */
+typedef struct ac_nop_t ac_nop_t;
+struct ac_nop_t
+{
+ ach_t nop_h;
+};
+
+/*
+ * The IA-Setup Action Command.
+ */
+typedef struct ac_ias_t ac_ias_t;
+struct ac_ias_t
+{
+ ach_t ias_h;
+ unsigned char ias_addr[ADDR_LEN]; /* The (ethernet) address */
+};
+
+/*
+ * The Configure Action Command.
+ */
+typedef struct ac_cfg_t ac_cfg_t;
+struct ac_cfg_t
+{
+ ach_t cfg_h;
+ unsigned char cfg_byte_cnt; /* Size foll data: 4-12 */
+#define AC_CFG_BYTE_CNT(v) (((v) & 0xF) << 0)
+ unsigned char cfg_fifolim; /* FIFO threshold */
+#define AC_CFG_FIFOLIM(v) (((v) & 0xF) << 0)
+ unsigned char cfg_byte8;
+#define AC_CFG_SAV_BF(v) (((v) & 0x1) << 7) /* Save rxd bad frames */
+#define AC_CFG_SRDY(v) (((v) & 0x1) << 6) /* SRDY/ARDY pin means */
+ /* external sync. */
+ unsigned char cfg_byte9;
+#define AC_CFG_ELPBCK(v) (((v) & 0x1) << 7) /* External loopback */
+#define AC_CFG_ILPBCK(v) (((v) & 0x1) << 6) /* Internal loopback */
+#define AC_CFG_PRELEN(v) (((v) & 0x3) << 4) /* Preamble length */
+#define AC_CFG_PLEN_2 0 /* 2 bytes */
+#define AC_CFG_PLEN_4 1 /* 4 bytes */
+#define AC_CFG_PLEN_8 2 /* 8 bytes */
+#define AC_CFG_PLEN_16 3 /* 16 bytes */
+#define AC_CFG_ALOC(v) (((v) & 0x1) << 3) /* Addr/len data is */
+ /* explicit in buffers */
+#define AC_CFG_ADDRLEN(v) (((v) & 0x7) << 0) /* Bytes per address */
+ unsigned char cfg_byte10;
+#define AC_CFG_BOFMET(v) (((v) & 0x1) << 7) /* Use alternate expo. */
+ /* backoff method */
+#define AC_CFG_ACR(v) (((v) & 0x7) << 4) /* Accelerated cont. res. */
+#define AC_CFG_LINPRIO(v) (((v) & 0x7) << 0) /* Linear priority */
+ unsigned char cfg_ifs; /* Interframe spacing */
+ unsigned char cfg_slotl; /* Slot time (low byte) */
+ unsigned char cfg_byte13;
+#define AC_CFG_RETRYNUM(v) (((v) & 0xF) << 4) /* Max. collision retry */
+#define AC_CFG_SLTTMHI(v) (((v) & 0x7) << 0) /* Slot time (high bits) */
+ unsigned char cfg_byte14;
+#define AC_CFG_FLGPAD(v) (((v) & 0x1) << 7) /* Pad with HDLC flags */
+#define AC_CFG_BTSTF(v) (((v) & 0x1) << 6) /* Do HDLC bitstuffing */
+#define AC_CFG_CRC16(v) (((v) & 0x1) << 5) /* 16 bit CCITT CRC */
+#define AC_CFG_NCRC(v) (((v) & 0x1) << 4) /* Insert no CRC */
+#define AC_CFG_TNCRS(v) (((v) & 0x1) << 3) /* Tx even if no carrier */
+#define AC_CFG_MANCH(v) (((v) & 0x1) << 2) /* Manchester coding */
+#define AC_CFG_BCDIS(v) (((v) & 0x1) << 1) /* Disable broadcast */
+#define AC_CFG_PRM(v) (((v) & 0x1) << 0) /* Promiscuous mode */
+ unsigned char cfg_byte15;
+#define AC_CFG_ICDS(v) (((v) & 0x1) << 7) /* Internal collision */
+ /* detect source */
+#define AC_CFG_CDTF(v) (((v) & 0x7) << 4) /* Collision detect */
+ /* filter in bit times */
+#define AC_CFG_ICSS(v) (((v) & 0x1) << 3) /* Internal carrier */
+ /* sense source */
+#define AC_CFG_CSTF(v) (((v) & 0x7) << 0) /* Carrier sense */
+ /* filter in bit times */
+ unsigned short cfg_min_frm_len;
+#define AC_CFG_MNFRM(v) (((v) & 0xFF) << 0) /* Min. bytes/frame (<= 255) */
+};
+
+/*
+ * The MC-Setup Action Command.
+ */
+typedef struct ac_mcs_t ac_mcs_t;
+struct ac_mcs_t
+{
+ ach_t mcs_h;
+ unsigned short mcs_cnt; /* No. of bytes of MC addresses */
+#if 0
+ unsigned char mcs_data[ADDR_LEN]; /* The first MC address .. */
+ ...
+#endif
+};
+
+#define I82586_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
+
+/*
+ * The Transmit Action Command.
+ */
+typedef struct ac_tx_t ac_tx_t;
+struct ac_tx_t
+{
+ ach_t tx_h;
+ unsigned short tx_tbd_offset; /* Address of list of buffers. */
+#if 0
+Linux packets are passed down with the destination MAC address
+and length/type field already prepended to the data,
+so we do not need to insert it. Consistent with this
+we must also set the AC_CFG_ALOC(..) flag during the
+ac_cfg_t action command.
+ unsigned char tx_addr[ADDR_LEN]; /* The frame dest. address */
+ unsigned short tx_length; /* The frame length */
+#endif /* 0 */
+};
+
+/*
+ * The Time Domain Reflectometer Action Command.
+ */
+typedef struct ac_tdr_t ac_tdr_t;
+struct ac_tdr_t
+{
+ ach_t tdr_h;
+ unsigned short tdr_result; /* Result. */
+#define AC_TDR_LNK_OK (0x1 << 15) /* No link problem */
+#define AC_TDR_XCVR_PRB (0x1 << 14) /* Txcvr cable problem */
+#define AC_TDR_ET_OPN (0x1 << 13) /* Open on the link */
+#define AC_TDR_ET_SRT (0x1 << 12) /* Short on the link */
+#define AC_TDR_TIME (0x7FF << 0) /* Distance to problem */
+ /* site in transmit */
+ /* clock cycles */
+};
+
+/*
+ * The Dump Action Command.
+ */
+typedef struct ac_dmp_t ac_dmp_t;
+struct ac_dmp_t
+{
+ ach_t dmp_h;
+ unsigned short dmp_offset; /* Result. */
+};
+
+/*
+ * Size of the result of the dump command.
+ */
+#define DUMPBYTES 170
+
+/*
+ * The Diagnose Action Command.
+ */
+typedef struct ac_dgn_t ac_dgn_t;
+struct ac_dgn_t
+{
+ ach_t dgn_h;
+};
+
+/*
+ * Transmit Buffer Descriptor (TBD).
+ */
+typedef struct tbd_t tbd_t;
+struct tbd_t
+{
+ unsigned short tbd_status; /* Written by the CPU */
+#define TBD_STATUS_EOF (0x1 << 15) /* This TBD is the */
+ /* last for this frame */
+#define TBD_STATUS_ACNT (0x3FFF << 0) /* Actual count of data */
+ /* bytes in this buffer */
+ unsigned short tbd_next_bd_offset; /* Next in list */
+ unsigned short tbd_bufl; /* Buffer address (low) */
+ unsigned short tbd_bufh; /* " " (high) */
+};
+
+/*
+ * Receive Buffer Descriptor (RBD).
+ */
+typedef struct rbd_t rbd_t;
+struct rbd_t
+{
+ unsigned short rbd_status; /* Written by the 82586 */
+#define RBD_STATUS_EOF (0x1 << 15) /* This RBD is the */
+ /* last for this frame */
+#define RBD_STATUS_F (0x1 << 14) /* ACNT field is valid */
+#define RBD_STATUS_ACNT (0x3FFF << 0) /* Actual no. of data */
+ /* bytes in this buffer */
+ unsigned short rbd_next_rbd_offset; /* Next rbd in list */
+ unsigned short rbd_bufl; /* Data pointer (low) */
+ unsigned short rbd_bufh; /* " " (high) */
+ unsigned short rbd_el_size; /* EL+Data buf. size */
+#define RBD_EL (0x1 << 15) /* This BD is the */
+ /* last in the list */
+#define RBD_SIZE (0x3FFF << 0) /* No. of bytes the */
+ /* buffer can hold */
+};
+
+#define rbdoff(p,f) toff(rbd_t, p, f)
+
+/*
+ * Frame Descriptor (FD).
+ */
+typedef struct fd_t fd_t;
+struct fd_t
+{
+ unsigned short fd_status; /* Written by the 82586 */
+#define FD_STATUS_C (0x1 << 15) /* Completed storing frame */
+#define FD_STATUS_B (0x1 << 14) /* FD was consumed by RU */
+#define FD_STATUS_OK (0x1 << 13) /* Frame rxd successfully */
+#define FD_STATUS_S11 (0x1 << 11) /* CRC error */
+#define FD_STATUS_S10 (0x1 << 10) /* Alignment error */
+#define FD_STATUS_S9 (0x1 << 9) /* Ran out of resources */
+#define FD_STATUS_S8 (0x1 << 8) /* Rx DMA overrun */
+#define FD_STATUS_S7 (0x1 << 7) /* Frame too short */
+#define FD_STATUS_S6 (0x1 << 6) /* No EOF flag */
+ unsigned short fd_command; /* Command */
+#define FD_COMMAND_EL (0x1 << 15) /* Last FD in list */
+#define FD_COMMAND_S (0x1 << 14) /* Suspend RU after rx */
+ unsigned short fd_link_offset; /* Next FD */
+ unsigned short fd_rbd_offset; /* First RBD (data) */
+ /* Prepared by CPU, */
+ /* updated by 82586 */
+#if 0
+I think the rest is unused since we
+have set AC_CFG_ALOC(..). However, just
+in case, we leave the space.
+#endif /* 0 */
+ unsigned char fd_dest[ADDR_LEN]; /* Destination address */
+ /* Written by 82586 */
+ unsigned char fd_src[ADDR_LEN]; /* Source address */
+ /* Written by 82586 */
+ unsigned short fd_length; /* Frame length or type */
+ /* Written by 82586 */
+};
+
+#define fdoff(p,f) toff(fd_t, p, f)
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU General Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/drivers/net/wireless/i82593.h b/drivers/net/wireless/i82593.h
new file mode 100644
index 000000000000..33acb8add4d6
--- /dev/null
+++ b/drivers/net/wireless/i82593.h
@@ -0,0 +1,224 @@
+/*
+ * Definitions for Intel 82593 CSMA/CD Core LAN Controller
+ * The definitions are taken from the 1992 users manual with Intel
+ * order number 297125-001.
+ *
+ * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
+ *
+ * Copyright 1994, Anders Klemets <klemets@it.kth.se>
+ *
+ * This software may be freely distributed for noncommercial purposes
+ * as long as this notice is retained.
+ *
+ * HISTORY
+ * i82593.h,v
+ * Revision 1.1 1996/07/17 15:23:12 root
+ * Initial revision
+ *
+ * Revision 1.3 1995/04/05 15:13:58 adj
+ * Initial alpha release
+ *
+ * Revision 1.2 1994/06/16 23:57:31 klemets
+ * Mirrored all the fields in the configuration block.
+ *
+ * Revision 1.1 1994/06/02 20:25:34 klemets
+ * Initial revision
+ *
+ *
+ */
+#ifndef _I82593_H
+#define _I82593_H
+
+/* Intel 82593 CSMA/CD Core LAN Controller */
+
+/* Port 0 Command Register definitions */
+
+/* Execution operations */
+#define OP0_NOP 0 /* CHNL = 0 */
+#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */
+#define OP0_IA_SETUP 1
+#define OP0_CONFIGURE 2
+#define OP0_MC_SETUP 3
+#define OP0_TRANSMIT 4
+#define OP0_TDR 5
+#define OP0_DUMP 6
+#define OP0_DIAGNOSE 7
+#define OP0_TRANSMIT_NO_CRC 9
+#define OP0_RETRANSMIT 12
+#define OP0_ABORT 13
+/* Reception operations */
+#define OP0_RCV_ENABLE 8
+#define OP0_RCV_DISABLE 10
+#define OP0_STOP_RCV 11
+/* Status pointer control operations */
+#define OP0_FIX_PTR 15 /* CHNL = 1 */
+#define OP0_RLS_PTR 15 /* CHNL = 0 */
+#define OP0_RESET 14
+
+#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */
+#define CR0_STATUS_0 0x00
+#define CR0_STATUS_1 0x20
+#define CR0_STATUS_2 0x40
+#define CR0_STATUS_3 0x60
+#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */
+
+/* Port 0 Status Register definitions */
+
+#define SR0_NO_RESULT 0 /* dummy */
+#define SR0_EVENT_MASK 0x0f
+#define SR0_IA_SETUP_DONE 1
+#define SR0_CONFIGURE_DONE 2
+#define SR0_MC_SETUP_DONE 3
+#define SR0_TRANSMIT_DONE 4
+#define SR0_TDR_DONE 5
+#define SR0_DUMP_DONE 6
+#define SR0_DIAGNOSE_PASSED 7
+#define SR0_TRANSMIT_NO_CRC_DONE 9
+#define SR0_RETRANSMIT_DONE 12
+#define SR0_EXECUTION_ABORTED 13
+#define SR0_END_OF_FRAME 8
+#define SR0_RECEPTION_ABORTED 10
+#define SR0_DIAGNOSE_FAILED 15
+#define SR0_STOP_REG_HIT 11
+
+#define SR0_CHNL (1 << 4)
+#define SR0_EXECUTION (1 << 5)
+#define SR0_RECEPTION (1 << 6)
+#define SR0_INTERRUPT (1 << 7)
+#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION)
+
+#define SR3_EXEC_STATE_MASK 0x03
+#define SR3_EXEC_IDLE 0
+#define SR3_TX_ABORT_IN_PROGRESS 1
+#define SR3_EXEC_ACTIVE 2
+#define SR3_ABORT_IN_PROGRESS 3
+#define SR3_EXEC_CHNL (1 << 2)
+#define SR3_STP_ON_NO_RSRC (1 << 3)
+#define SR3_RCVING_NO_RSRC (1 << 4)
+#define SR3_RCV_STATE_MASK 0x60
+#define SR3_RCV_IDLE 0x00
+#define SR3_RCV_READY 0x20
+#define SR3_RCV_ACTIVE 0x40
+#define SR3_RCV_STOP_IN_PROG 0x60
+#define SR3_RCV_CHNL (1 << 7)
+
+/* Port 1 Command Register definitions */
+
+#define OP1_NOP 0
+#define OP1_SWIT_TO_PORT_0 1
+#define OP1_INT_DISABLE 2
+#define OP1_INT_ENABLE 3
+#define OP1_SET_TS 5
+#define OP1_RST_TS 7
+#define OP1_POWER_DOWN 8
+#define OP1_RESET_RING_MNGMT 11
+#define OP1_RESET 14
+#define OP1_SEL_RST 15
+
+#define CR1_STATUS_4 0x00
+#define CR1_STATUS_5 0x20
+#define CR1_STATUS_6 0x40
+#define CR1_STOP_REG_UPDATE (1 << 7)
+
+/* Receive frame status bits */
+
+#define RX_RCLD (1 << 0)
+#define RX_IA_MATCH (1 << 1)
+#define RX_NO_AD_MATCH (1 << 2)
+#define RX_NO_SFD (1 << 3)
+#define RX_SRT_FRM (1 << 7)
+#define RX_OVRRUN (1 << 8)
+#define RX_ALG_ERR (1 << 10)
+#define RX_CRC_ERR (1 << 11)
+#define RX_LEN_ERR (1 << 12)
+#define RX_RCV_OK (1 << 13)
+#define RX_TYP_LEN (1 << 15)
+
+/* Transmit status bits */
+
+#define TX_NCOL_MASK 0x0f
+#define TX_FRTL (1 << 4)
+#define TX_MAX_COL (1 << 5)
+#define TX_HRT_BEAT (1 << 6)
+#define TX_DEFER (1 << 7)
+#define TX_UND_RUN (1 << 8)
+#define TX_LOST_CTS (1 << 9)
+#define TX_LOST_CRS (1 << 10)
+#define TX_LTCOL (1 << 11)
+#define TX_OK (1 << 13)
+#define TX_COLL (1 << 15)
+
+struct i82593_conf_block {
+ u_char fifo_limit : 4,
+ forgnesi : 1,
+ fifo_32 : 1,
+ d6mod : 1,
+ throttle_enb : 1;
+ u_char throttle : 6,
+ cntrxint : 1,
+ contin : 1;
+ u_char addr_len : 3,
+ acloc : 1,
+ preamb_len : 2,
+ loopback : 2;
+ u_char lin_prio : 3,
+ tbofstop : 1,
+ exp_prio : 3,
+ bof_met : 1;
+ u_char : 4,
+ ifrm_spc : 4;
+ u_char : 5,
+ slottim_low : 3;
+ u_char slottim_hi : 3,
+ : 1,
+ max_retr : 4;
+ u_char prmisc : 1,
+ bc_dis : 1,
+ : 1,
+ crs_1 : 1,
+ nocrc_ins : 1,
+ crc_1632 : 1,
+ : 1,
+ crs_cdt : 1;
+ u_char cs_filter : 3,
+ crs_src : 1,
+ cd_filter : 3,
+ : 1;
+ u_char : 2,
+ min_fr_len : 6;
+ u_char lng_typ : 1,
+ lng_fld : 1,
+ rxcrc_xf : 1,
+ artx : 1,
+ sarec : 1,
+ tx_jabber : 1, /* why is this called max_len in the manual? */
+ hash_1 : 1,
+ lbpkpol : 1;
+ u_char : 6,
+ fdx : 1,
+ : 1;
+ u_char dummy_6 : 6, /* supposed to be ones */
+ mult_ia : 1,
+ dis_bof : 1;
+ u_char dummy_1 : 1, /* supposed to be one */
+ tx_ifs_retrig : 2,
+ mc_all : 1,
+ rcv_mon : 2,
+ frag_acpt : 1,
+ tstrttrs : 1;
+ u_char fretx : 1,
+ runt_eop : 1,
+ hw_sw_pin : 1,
+ big_endn : 1,
+ syncrqs : 1,
+ sttlen : 1,
+ tx_eop : 1,
+ rx_eop : 1;
+ u_char rbuf_size : 5,
+ rcvstop : 1,
+ : 2;
+};
+
+#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
+
+#endif /* _I82593_H */
diff --git a/drivers/net/wireless/ieee802_11.h b/drivers/net/wireless/ieee802_11.h
new file mode 100644
index 000000000000..53dd5248f9f1
--- /dev/null
+++ b/drivers/net/wireless/ieee802_11.h
@@ -0,0 +1,78 @@
+#ifndef _IEEE802_11_H
+#define _IEEE802_11_H
+
+#define IEEE802_11_DATA_LEN 2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ The figure in section 7.1.2 suggests a body size of up to 2312
+ bytes is allowed, which is a bit confusing, I suspect this
+ represents the 2304 bytes of real data, plus a possible 8 bytes of
+ WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+
+
+#define IEEE802_11_HLEN 30
+#define IEEE802_11_FRAME_LEN (IEEE802_11_DATA_LEN + IEEE802_11_HLEN)
+
+struct ieee802_11_hdr {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+} __attribute__ ((packed));
+
+/* Frame control field constants */
+#define IEEE802_11_FCTL_VERS 0x0002
+#define IEEE802_11_FCTL_FTYPE 0x000c
+#define IEEE802_11_FCTL_STYPE 0x00f0
+#define IEEE802_11_FCTL_TODS 0x0100
+#define IEEE802_11_FCTL_FROMDS 0x0200
+#define IEEE802_11_FCTL_MOREFRAGS 0x0400
+#define IEEE802_11_FCTL_RETRY 0x0800
+#define IEEE802_11_FCTL_PM 0x1000
+#define IEEE802_11_FCTL_MOREDATA 0x2000
+#define IEEE802_11_FCTL_WEP 0x4000
+#define IEEE802_11_FCTL_ORDER 0x8000
+
+#define IEEE802_11_FTYPE_MGMT 0x0000
+#define IEEE802_11_FTYPE_CTL 0x0004
+#define IEEE802_11_FTYPE_DATA 0x0008
+
+/* management */
+#define IEEE802_11_STYPE_ASSOC_REQ 0x0000
+#define IEEE802_11_STYPE_ASSOC_RESP 0x0010
+#define IEEE802_11_STYPE_REASSOC_REQ 0x0020
+#define IEEE802_11_STYPE_REASSOC_RESP 0x0030
+#define IEEE802_11_STYPE_PROBE_REQ 0x0040
+#define IEEE802_11_STYPE_PROBE_RESP 0x0050
+#define IEEE802_11_STYPE_BEACON 0x0080
+#define IEEE802_11_STYPE_ATIM 0x0090
+#define IEEE802_11_STYPE_DISASSOC 0x00A0
+#define IEEE802_11_STYPE_AUTH 0x00B0
+#define IEEE802_11_STYPE_DEAUTH 0x00C0
+
+/* control */
+#define IEEE802_11_STYPE_PSPOLL 0x00A0
+#define IEEE802_11_STYPE_RTS 0x00B0
+#define IEEE802_11_STYPE_CTS 0x00C0
+#define IEEE802_11_STYPE_ACK 0x00D0
+#define IEEE802_11_STYPE_CFEND 0x00E0
+#define IEEE802_11_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define IEEE802_11_STYPE_DATA 0x0000
+#define IEEE802_11_STYPE_DATA_CFACK 0x0010
+#define IEEE802_11_STYPE_DATA_CFPOLL 0x0020
+#define IEEE802_11_STYPE_DATA_CFACKPOLL 0x0030
+#define IEEE802_11_STYPE_NULLFUNC 0x0040
+#define IEEE802_11_STYPE_CFACK 0x0050
+#define IEEE802_11_STYPE_CFPOLL 0x0060
+#define IEEE802_11_STYPE_CFACKPOLL 0x0070
+
+#define IEEE802_11_SCTL_FRAG 0x000F
+#define IEEE802_11_SCTL_SEQ 0xFFF0
+
+#endif /* _IEEE802_11_H */
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
new file mode 100644
index 000000000000..382241e7edbb
--- /dev/null
+++ b/drivers/net/wireless/netwave_cs.c
@@ -0,0 +1,1736 @@
+/*********************************************************************
+ *
+ * Filename: netwave_cs.c
+ * Version: 0.4.1
+ * Description: Netwave AirSurfer Wireless LAN PC Card driver
+ * Status: Experimental.
+ * Authors: John Markus Bjørndalen <johnm@cs.uit.no>
+ * Dag Brattli <dagb@cs.uit.no>
+ * David Hinds <dahinds@users.sourceforge.net>
+ * Created at: A long time ago!
+ * Modified at: Mon Nov 10 11:54:37 1997
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1997 University of Tromsø, Norway
+ *
+ * Revision History:
+ *
+ * 08-Nov-97 15:14:47 John Markus Bjørndalen <johnm@cs.uit.no>
+ * - Fixed some bugs in netwave_rx and cleaned it up a bit.
+ * (One of the bugs would have destroyed packets when receiving
+ * multiple packets per interrupt).
+ * - Cleaned up parts of newave_hw_xmit.
+ * - A few general cleanups.
+ * 24-Oct-97 13:17:36 Dag Brattli <dagb@cs.uit.no>
+ * - Fixed netwave_rx receive function (got updated docs)
+ * Others:
+ * - Changed name from xircnw to netwave, take a look at
+ * http://www.netwave-wireless.com
+ * - Some reorganizing of the code
+ * - Removed possible race condition between interrupt handler and transmit
+ * function
+ * - Started to add wireless extensions, but still needs some coding
+ * - Added watchdog for better handling of transmission timeouts
+ * (hopefully this works better)
+ ********************************************************************/
+
+/* To have statistics (just packets sent) define this */
+#undef NETWAVE_STATS
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h>
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+#endif /* WIRELESS_EXT > 12 */
+#endif
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/mem_op.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define NETWAVE_REGOFF 0x8000
+/* The Netwave IO registers, offsets to iobase */
+#define NETWAVE_REG_COR 0x0
+#define NETWAVE_REG_CCSR 0x2
+#define NETWAVE_REG_ASR 0x4
+#define NETWAVE_REG_IMR 0xa
+#define NETWAVE_REG_PMR 0xc
+#define NETWAVE_REG_IOLOW 0x6
+#define NETWAVE_REG_IOHI 0x7
+#define NETWAVE_REG_IOCONTROL 0x8
+#define NETWAVE_REG_DATA 0xf
+/* The Netwave Extended IO registers, offsets to RamBase */
+#define NETWAVE_EREG_ASCC 0x114
+#define NETWAVE_EREG_RSER 0x120
+#define NETWAVE_EREG_RSERW 0x124
+#define NETWAVE_EREG_TSER 0x130
+#define NETWAVE_EREG_TSERW 0x134
+#define NETWAVE_EREG_CB 0x100
+#define NETWAVE_EREG_SPCQ 0x154
+#define NETWAVE_EREG_SPU 0x155
+#define NETWAVE_EREG_LIF 0x14e
+#define NETWAVE_EREG_ISPLQ 0x156
+#define NETWAVE_EREG_HHC 0x158
+#define NETWAVE_EREG_NI 0x16e
+#define NETWAVE_EREG_MHS 0x16b
+#define NETWAVE_EREG_TDP 0x140
+#define NETWAVE_EREG_RDP 0x150
+#define NETWAVE_EREG_PA 0x160
+#define NETWAVE_EREG_EC 0x180
+#define NETWAVE_EREG_CRBP 0x17a
+#define NETWAVE_EREG_ARW 0x166
+
+/*
+ * Commands used in the extended command buffer
+ * NETWAVE_EREG_CB (0x100-0x10F)
+ */
+#define NETWAVE_CMD_NOP 0x00
+#define NETWAVE_CMD_SRC 0x01
+#define NETWAVE_CMD_STC 0x02
+#define NETWAVE_CMD_AMA 0x03
+#define NETWAVE_CMD_DMA 0x04
+#define NETWAVE_CMD_SAMA 0x05
+#define NETWAVE_CMD_ER 0x06
+#define NETWAVE_CMD_DR 0x07
+#define NETWAVE_CMD_TL 0x08
+#define NETWAVE_CMD_SRP 0x09
+#define NETWAVE_CMD_SSK 0x0a
+#define NETWAVE_CMD_SMD 0x0b
+#define NETWAVE_CMD_SAPD 0x0c
+#define NETWAVE_CMD_SSS 0x11
+/* End of Command marker */
+#define NETWAVE_CMD_EOC 0x00
+
+/* ASR register bits */
+#define NETWAVE_ASR_RXRDY 0x80
+#define NETWAVE_ASR_TXBA 0x01
+
+#define TX_TIMEOUT ((32*HZ)/100)
+
+static const unsigned int imrConfRFU1 = 0x10; /* RFU interrupt mask, keep high */
+static const unsigned int imrConfIENA = 0x02; /* Interrupt enable */
+
+static const unsigned int corConfIENA = 0x01; /* Interrupt enable */
+static const unsigned int corConfLVLREQ = 0x40; /* Keep high */
+
+static const unsigned int rxConfRxEna = 0x80; /* Receive Enable */
+static const unsigned int rxConfMAC = 0x20; /* MAC host receive mode*/
+static const unsigned int rxConfPro = 0x10; /* Promiscuous */
+static const unsigned int rxConfAMP = 0x08; /* Accept Multicast Packets */
+static const unsigned int rxConfBcast = 0x04; /* Accept Broadcast Packets */
+
+static const unsigned int txConfTxEna = 0x80; /* Transmit Enable */
+static const unsigned int txConfMAC = 0x20; /* Host sends MAC mode */
+static const unsigned int txConfEUD = 0x10; /* Enable Uni-Data packets */
+static const unsigned int txConfKey = 0x02; /* Scramble data packets */
+static const unsigned int txConfLoop = 0x01; /* Loopback mode */
+
+/*
+ All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ you do not define PCMCIA_DEBUG at all, all the debug code will be
+ left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ be present but disabled -- but it can then be enabled for specific
+ modules at load time with a 'pc_debug=#' option to insmod.
+*/
+
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"netwave_cs.c 0.3.0 Thu Jul 17 14:36:02 1997 (John Markus Bjørndalen)\n";
+#else
+#define DEBUG(n, args...)
+#endif
+
+static dev_info_t dev_info = "netwave_cs";
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+/* Choose the domain, default is 0x100 */
+static u_int domain = 0x100;
+
+/* Scramble key, range from 0x0 to 0xffff.
+ * 0x0 is no scrambling.
+ */
+static u_int scramble_key = 0x0;
+
+/* Shared memory speed, in ns. The documentation states that
+ * the card should not be read faster than every 400ns.
+ * This timing should be provided by the HBA. If it becomes a
+ * problem, try setting mem_speed to 400.
+ */
+static int mem_speed;
+
+module_param(domain, int, 0);
+module_param(scramble_key, int, 0);
+module_param(mem_speed, int, 0);
+
+/*====================================================================*/
+
+/* PCMCIA (Card Services) related functions */
+static void netwave_release(dev_link_t *link); /* Card removal */
+static int netwave_event(event_t event, int priority,
+ event_callback_args_t *args);
+static void netwave_pcmcia_config(dev_link_t *arg); /* Runs after card
+ insertion */
+static dev_link_t *netwave_attach(void); /* Create instance */
+static void netwave_detach(dev_link_t *); /* Destroy instance */
+
+/* Hardware configuration */
+static void netwave_doreset(kio_addr_t iobase, u_char __iomem *ramBase);
+static void netwave_reset(struct net_device *dev);
+
+/* Misc device stuff */
+static int netwave_open(struct net_device *dev); /* Open the device */
+static int netwave_close(struct net_device *dev); /* Close the device */
+
+/* Packet transmission and Packet reception */
+static int netwave_start_xmit( struct sk_buff *skb, struct net_device *dev);
+static int netwave_rx( struct net_device *dev);
+
+/* Interrupt routines */
+static irqreturn_t netwave_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void netwave_watchdog(struct net_device *);
+
+/* Statistics */
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *netwave_get_stats(struct net_device *dev);
+
+/* Wireless extensions */
+#ifdef WIRELESS_EXT
+static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev);
+#endif
+static int netwave_ioctl(struct net_device *, struct ifreq *, int);
+
+static void set_multicast_list(struct net_device *dev);
+
+/*
+ A linked list of "instances" of the skeleton device. Each actual
+ PCMCIA card corresponds to one device instance, and is described
+ by one dev_link_t structure (defined in ds.h).
+
+ You may not want to use a linked list for this -- for example, the
+ memory card driver uses an array of dev_link_t pointers, where minor
+ device numbers are used to derive the corresponding array index.
+*/
+static dev_link_t *dev_list;
+
+/*
+ A dev_link_t structure has fields for most things that are needed
+ to keep track of a socket, but there will usually be some device
+ specific information that also needs to be kept track of. The
+ 'priv' pointer in a dev_link_t structure can be used to point to
+ a device-specific private data structure, like this.
+
+ A driver needs to provide a dev_node_t structure for each device
+ on a card. In some cases, there is only one device per card (for
+ example, ethernet cards, modems). In other cases, there may be
+ many actual or logical devices (SCSI adapters, memory cards with
+ multiple partitions). The dev_node_t structures need to be kept
+ in a linked list starting at the 'dev' field of a dev_link_t
+ structure. We allocate them in the card's private data structure,
+ because they generally can't be allocated dynamically.
+*/
+
+#if WIRELESS_EXT <= 12
+/* Wireless extensions backward compatibility */
+
+/* Part of iw_handler prototype we need */
+struct iw_request_info
+{
+ __u16 cmd; /* Wireless Extension command */
+ __u16 flags; /* More to come ;-) */
+};
+
+/* Wireless Extension Backward compatibility - Jean II
+ * If the new wireless device private ioctl range is not defined,
+ * default to standard device private ioctl range */
+#ifndef SIOCIWFIRSTPRIV
+#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
+#endif /* SIOCIWFIRSTPRIV */
+
+#else /* WIRELESS_EXT <= 12 */
+static const struct iw_handler_def netwave_handler_def;
+#endif /* WIRELESS_EXT <= 12 */
+
+#define SIOCGIPSNAP SIOCIWFIRSTPRIV + 1 /* Site Survey Snapshot */
+
+#define MAX_ESA 10
+
+typedef struct net_addr {
+ u_char addr48[6];
+} net_addr;
+
+struct site_survey {
+ u_short length;
+ u_char struct_revision;
+ u_char roaming_state;
+
+ u_char sp_existsFlag;
+ u_char sp_link_quality;
+ u_char sp_max_link_quality;
+ u_char linkQualityGoodFairBoundary;
+ u_char linkQualityFairPoorBoundary;
+ u_char sp_utilization;
+ u_char sp_goodness;
+ u_char sp_hotheadcount;
+ u_char roaming_condition;
+
+ net_addr sp;
+ u_char numAPs;
+ net_addr nearByAccessPoints[MAX_ESA];
+};
+
+typedef struct netwave_private {
+ dev_link_t link;
+ spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
+ dev_node_t node;
+ u_char __iomem *ramBase;
+ int timeoutCounter;
+ int lastExec;
+ struct timer_list watchdog; /* To avoid blocking state */
+ struct site_survey nss;
+ struct net_device_stats stats;
+#ifdef WIRELESS_EXT
+ struct iw_statistics iw_stats; /* Wireless stats */
+#endif
+} netwave_private;
+
+#ifdef NETWAVE_STATS
+static struct net_device_stats *netwave_get_stats(struct net_device *dev);
+#endif
+
+/*
+ * The Netwave card is little-endian, so won't work for big endian
+ * systems.
+ */
+static inline unsigned short get_uint16(u_char __iomem *staddr)
+{
+ return readw(staddr); /* Return only 16 bits */
+}
+
+static inline short get_int16(u_char __iomem * staddr)
+{
+ return readw(staddr);
+}
+
+/*
+ * Wait until the WOC (Write Operation Complete) bit in the
+ * ASR (Adapter Status Register) is asserted.
+ * This should have aborted if it takes too long time.
+ */
+static inline void wait_WOC(unsigned int iobase)
+{
+ /* Spin lock */
+ while ((inb(iobase + NETWAVE_REG_ASR) & 0x8) != 0x8) ;
+}
+
+#ifdef WIRELESS_EXT
+static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
+ kio_addr_t iobase) {
+ u_short resultBuffer;
+
+ /* if time since last snapshot is > 1 sec. (100 jiffies?) then take
+ * new snapshot, else return cached data. This is the recommended rate.
+ */
+ if ( jiffies - priv->lastExec > 100) {
+ /* Take site survey snapshot */
+ /*printk( KERN_DEBUG "Taking new snapshot. %ld\n", jiffies -
+ priv->lastExec); */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SSS, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
+ wait_WOC(iobase);
+
+ /* Get result and copy to cach */
+ resultBuffer = readw(ramBase + NETWAVE_EREG_CRBP);
+ copy_from_pc( &priv->nss, ramBase+resultBuffer,
+ sizeof(struct site_survey));
+ }
+}
+#endif
+
+#ifdef WIRELESS_EXT
+/*
+ * Function netwave_get_wireless_stats (dev)
+ *
+ * Wireless extensions statistics
+ *
+ */
+static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
+{
+ unsigned long flags;
+ kio_addr_t iobase = dev->base_addr;
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem *ramBase = priv->ramBase;
+ struct iw_statistics* wstats;
+
+ wstats = &priv->iw_stats;
+
+ spin_lock_irqsave(&priv->spinlock, flags);
+
+ netwave_snapshot( priv, ramBase, iobase);
+
+ wstats->status = priv->nss.roaming_state;
+ wstats->qual.qual = readb( ramBase + NETWAVE_EREG_SPCQ);
+ wstats->qual.level = readb( ramBase + NETWAVE_EREG_ISPLQ);
+ wstats->qual.noise = readb( ramBase + NETWAVE_EREG_SPU) & 0x3f;
+ wstats->discard.nwid = 0L;
+ wstats->discard.code = 0L;
+ wstats->discard.misc = 0L;
+
+ spin_unlock_irqrestore(&priv->spinlock, flags);
+
+ return &priv->iw_stats;
+}
+#endif
+
+/*
+ * Function netwave_attach (void)
+ *
+ * Creates an "instance" of the driver, allocating local data
+ * structures for one device. The device is registered with Card
+ * Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
+ */
+static dev_link_t *netwave_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ netwave_private *priv;
+ int ret;
+
+ DEBUG(0, "netwave_attach()\n");
+
+ /* Initialize the dev_link_t structure */
+ dev = alloc_etherdev(sizeof(netwave_private));
+ if (!dev)
+ return NULL;
+ priv = netdev_priv(dev);
+ link = &priv->link;
+ link->priv = dev;
+
+ /* The io structure describes IO port mapping */
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ /* link->io.NumPorts2 = 16;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_16; */
+ link->io.IOAddrLines = 5;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &netwave_interrupt;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ /* Netwave private struct init. link/dev/node already taken care of,
+ * other stuff zero'd - Jean II */
+ spin_lock_init(&priv->spinlock);
+
+ /* Netwave specific entries in the device structure */
+ SET_MODULE_OWNER(dev);
+ dev->hard_start_xmit = &netwave_start_xmit;
+ dev->get_stats = &netwave_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ /* wireless extensions */
+#ifdef WIRELESS_EXT
+ dev->get_wireless_stats = &netwave_get_wireless_stats;
+#if WIRELESS_EXT > 12
+ dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* WIRELESS_EXT */
+ dev->do_ioctl = &netwave_ioctl;
+
+ dev->tx_timeout = &netwave_watchdog;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->open = &netwave_open;
+ dev->stop = &netwave_close;
+ link->irq.Instance = dev;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &netwave_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ netwave_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* netwave_attach */
+
+/*
+ * Function netwave_detach (link)
+ *
+ * This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
+ */
+static void netwave_detach(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "netwave_detach(0x%p)\n", link);
+
+ /*
+ If the device is currently configured and active, we won't
+ actually delete it yet. Instead, it is marked so that when
+ the release() function is called, that will trigger a proper
+ detach().
+ */
+ if (link->state & DEV_CONFIG)
+ netwave_release(link);
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ {
+ DEBUG(1, "netwave_cs: detach fail, '%s' not in list\n",
+ link->dev->dev_name);
+ return;
+ }
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(dev);
+ free_netdev(dev);
+
+} /* netwave_detach */
+
+/*
+ * Wireless Handler : get protocol name
+ */
+static int netwave_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ strcpy(wrqu->name, "Netwave");
+ return 0;
+}
+
+/*
+ * Wireless Handler : set Network ID
+ */
+static int netwave_set_nwid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long flags;
+ kio_addr_t iobase = dev->base_addr;
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem *ramBase = priv->ramBase;
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&priv->spinlock, flags);
+
+#if WIRELESS_EXT > 8
+ if(!wrqu->nwid.disabled) {
+ domain = wrqu->nwid.value;
+#else /* WIRELESS_EXT > 8 */
+ if(wrqu->nwid.on) {
+ domain = wrqu->nwid.nwid;
+#endif /* WIRELESS_EXT > 8 */
+ printk( KERN_DEBUG "Setting domain to 0x%x%02x\n",
+ (domain >> 8) & 0x01, domain & 0xff);
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SMD, ramBase + NETWAVE_EREG_CB + 0);
+ writeb( domain & 0xff, ramBase + NETWAVE_EREG_CB + 1);
+ writeb((domain >>8 ) & 0x01,ramBase + NETWAVE_EREG_CB+2);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
+ }
+
+ /* ReEnable interrupts & restore flags */
+ spin_unlock_irqrestore(&priv->spinlock, flags);
+
+ return 0;
+}
+
+/*
+ * Wireless Handler : get Network ID
+ */
+static int netwave_get_nwid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+#if WIRELESS_EXT > 8
+ wrqu->nwid.value = domain;
+ wrqu->nwid.disabled = 0;
+ wrqu->nwid.fixed = 1;
+#else /* WIRELESS_EXT > 8 */
+ wrqu->nwid.nwid = domain;
+ wrqu->nwid.on = 1;
+#endif /* WIRELESS_EXT > 8 */
+
+ return 0;
+}
+
+/*
+ * Wireless Handler : set scramble key
+ */
+static int netwave_set_scramble(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *key)
+{
+ unsigned long flags;
+ kio_addr_t iobase = dev->base_addr;
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem *ramBase = priv->ramBase;
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&priv->spinlock, flags);
+
+ scramble_key = (key[0] << 8) | key[1];
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SSK, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(scramble_key & 0xff, ramBase + NETWAVE_EREG_CB + 1);
+ writeb((scramble_key>>8) & 0xff, ramBase + NETWAVE_EREG_CB + 2);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
+
+ /* ReEnable interrupts & restore flags */
+ spin_unlock_irqrestore(&priv->spinlock, flags);
+
+ return 0;
+}
+
+/*
+ * Wireless Handler : get scramble key
+ */
+static int netwave_get_scramble(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *key)
+{
+ key[1] = scramble_key & 0xff;
+ key[0] = (scramble_key>>8) & 0xff;
+#if WIRELESS_EXT > 8
+ wrqu->encoding.flags = IW_ENCODE_ENABLED;
+ wrqu->encoding.length = 2;
+#else /* WIRELESS_EXT > 8 */
+ wrqu->encoding.method = 1;
+#endif /* WIRELESS_EXT > 8 */
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 8
+/*
+ * Wireless Handler : get mode
+ */
+static int netwave_get_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ if(domain & 0x100)
+ wrqu->mode = IW_MODE_INFRA;
+ else
+ wrqu->mode = IW_MODE_ADHOC;
+
+ return 0;
+}
+#endif /* WIRELESS_EXT > 8 */
+
+/*
+ * Wireless Handler : get range info
+ */
+static int netwave_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ struct iw_range *range = (struct iw_range *) extra;
+ int ret = 0;
+
+ /* Set the length (very important for backward compatibility) */
+ wrqu->data.length = sizeof(struct iw_range);
+
+ /* Set all the info we don't care or don't know about to zero */
+ memset(range, 0, sizeof(struct iw_range));
+
+#if WIRELESS_EXT > 10
+ /* Set the Wireless Extension versions */
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 9; /* Nothing for us in v10 and v11 */
+#endif /* WIRELESS_EXT > 10 */
+
+ /* Set information in the range struct */
+ range->throughput = 450 * 1000; /* don't argue on this ! */
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0x01FF;
+
+ range->num_channels = range->num_frequency = 0;
+
+ range->sensitivity = 0x3F;
+ range->max_qual.qual = 255;
+ range->max_qual.level = 255;
+ range->max_qual.noise = 0;
+
+#if WIRELESS_EXT > 7
+ range->num_bitrates = 1;
+ range->bitrate[0] = 1000000; /* 1 Mb/s */
+#endif /* WIRELESS_EXT > 7 */
+
+#if WIRELESS_EXT > 8
+ range->encoding_size[0] = 2; /* 16 bits scrambling */
+ range->num_encoding_sizes = 1;
+ range->max_encoding_tokens = 1; /* Only one key possible */
+#endif /* WIRELESS_EXT > 8 */
+
+ return ret;
+}
+
+/*
+ * Wireless Private Handler : get snapshot
+ */
+static int netwave_get_snap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long flags;
+ kio_addr_t iobase = dev->base_addr;
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem *ramBase = priv->ramBase;
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&priv->spinlock, flags);
+
+ /* Take snapshot of environment */
+ netwave_snapshot( priv, ramBase, iobase);
+ wrqu->data.length = priv->nss.length;
+ memcpy(extra, (u_char *) &priv->nss, sizeof( struct site_survey));
+
+ priv->lastExec = jiffies;
+
+ /* ReEnable interrupts & restore flags */
+ spin_unlock_irqrestore(&priv->spinlock, flags);
+
+ return(0);
+}
+
+/*
+ * Structures to export the Wireless Handlers
+ * This is the stuff that are treated the wireless extensions (iwconfig)
+ */
+
+static const struct iw_priv_args netwave_private_args[] = {
+/*{ cmd, set_args, get_args, name } */
+ { SIOCGIPSNAP, 0,
+ IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof(struct site_survey),
+ "getsitesurvey" },
+};
+
+#if WIRELESS_EXT > 12
+
+static const iw_handler netwave_handler[] =
+{
+ NULL, /* SIOCSIWNAME */
+ netwave_get_name, /* SIOCGIWNAME */
+ netwave_set_nwid, /* SIOCSIWNWID */
+ netwave_get_nwid, /* SIOCGIWNWID */
+ NULL, /* SIOCSIWFREQ */
+ NULL, /* SIOCGIWFREQ */
+ NULL, /* SIOCSIWMODE */
+ netwave_get_mode, /* SIOCGIWMODE */
+ NULL, /* SIOCSIWSENS */
+ NULL, /* SIOCGIWSENS */
+ NULL, /* SIOCSIWRANGE */
+ netwave_get_range, /* SIOCGIWRANGE */
+ NULL, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ NULL, /* SIOCSIWSPY */
+ NULL, /* SIOCGIWSPY */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCSIWAP */
+ NULL, /* SIOCGIWAP */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCGIWAPLIST */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCSIWESSID */
+ NULL, /* SIOCGIWESSID */
+ NULL, /* SIOCSIWNICKN */
+ NULL, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCSIWRATE */
+ NULL, /* SIOCGIWRATE */
+ NULL, /* SIOCSIWRTS */
+ NULL, /* SIOCGIWRTS */
+ NULL, /* SIOCSIWFRAG */
+ NULL, /* SIOCGIWFRAG */
+ NULL, /* SIOCSIWTXPOW */
+ NULL, /* SIOCGIWTXPOW */
+ NULL, /* SIOCSIWRETRY */
+ NULL, /* SIOCGIWRETRY */
+ netwave_set_scramble, /* SIOCSIWENCODE */
+ netwave_get_scramble, /* SIOCGIWENCODE */
+};
+
+static const iw_handler netwave_private_handler[] =
+{
+ NULL, /* SIOCIWFIRSTPRIV */
+ netwave_get_snap, /* SIOCIWFIRSTPRIV + 1 */
+};
+
+static const struct iw_handler_def netwave_handler_def =
+{
+ .num_standard = sizeof(netwave_handler)/sizeof(iw_handler),
+ .num_private = sizeof(netwave_private_handler)/sizeof(iw_handler),
+ .num_private_args = sizeof(netwave_private_args)/sizeof(struct iw_priv_args),
+ .standard = (iw_handler *) netwave_handler,
+ .private = (iw_handler *) netwave_private_handler,
+ .private_args = (struct iw_priv_args *) netwave_private_args,
+};
+#endif /* WIRELESS_EXT > 12 */
+
+/*
+ * Function netwave_ioctl (dev, rq, cmd)
+ *
+ * Perform ioctl : config & info stuff
+ * This is the stuff that are treated the wireless extensions (iwconfig)
+ *
+ */
+static int netwave_ioctl(struct net_device *dev, /* ioctl device */
+ struct ifreq *rq, /* Data passed */
+ int cmd) /* Ioctl number */
+{
+ int ret = 0;
+#ifdef WIRELESS_EXT
+#if WIRELESS_EXT <= 12
+ struct iwreq *wrq = (struct iwreq *) rq;
+#endif
+#endif
+
+ DEBUG(0, "%s: ->netwave_ioctl(cmd=0x%X)\n", dev->name, cmd);
+
+ /* Look what is the request */
+ switch(cmd) {
+ /* --------------- WIRELESS EXTENSIONS --------------- */
+#ifdef WIRELESS_EXT
+#if WIRELESS_EXT <= 12
+ case SIOCGIWNAME:
+ netwave_get_name(dev, NULL, &(wrq->u), NULL);
+ break;
+ case SIOCSIWNWID:
+ ret = netwave_set_nwid(dev, NULL, &(wrq->u), NULL);
+ break;
+ case SIOCGIWNWID:
+ ret = netwave_get_nwid(dev, NULL, &(wrq->u), NULL);
+ break;
+#if WIRELESS_EXT > 8 /* Note : The API did change... */
+ case SIOCGIWENCODE:
+ /* Get scramble key */
+ if(wrq->u.encoding.pointer != (caddr_t) 0)
+ {
+ char key[2];
+ ret = netwave_get_scramble(dev, NULL, &(wrq->u), key);
+ if(copy_to_user(wrq->u.encoding.pointer, key, 2))
+ ret = -EFAULT;
+ }
+ break;
+ case SIOCSIWENCODE:
+ /* Set scramble key */
+ if(wrq->u.encoding.pointer != (caddr_t) 0)
+ {
+ char key[2];
+ if(copy_from_user(key, wrq->u.encoding.pointer, 2))
+ {
+ ret = -EFAULT;
+ break;
+ }
+ ret = netwave_set_scramble(dev, NULL, &(wrq->u), key);
+ }
+ break;
+ case SIOCGIWMODE:
+ /* Mode of operation */
+ ret = netwave_get_mode(dev, NULL, &(wrq->u), NULL);
+ break;
+#else /* WIRELESS_EXT > 8 */
+ case SIOCGIWENCODE:
+ /* Get scramble key */
+ ret = netwave_get_scramble(dev, NULL, &(wrq->u),
+ (char *) &wrq->u.encoding.code);
+ break;
+ case SIOCSIWENCODE:
+ /* Set scramble key */
+ ret = netwave_set_scramble(dev, NULL, &(wrq->u),
+ (char *) &wrq->u.encoding.code);
+ break;
+#endif /* WIRELESS_EXT > 8 */
+ case SIOCGIWRANGE:
+ /* Basic checking... */
+ if(wrq->u.data.pointer != (caddr_t) 0) {
+ struct iw_range range;
+ ret = netwave_get_range(dev, NULL, &(wrq->u), (char *) &range);
+ if (copy_to_user(wrq->u.data.pointer, &range,
+ sizeof(struct iw_range)))
+ ret = -EFAULT;
+ }
+ break;
+ case SIOCGIWPRIV:
+ /* Basic checking... */
+ if(wrq->u.data.pointer != (caddr_t) 0) {
+ /* Set the number of ioctl available */
+ wrq->u.data.length = sizeof(netwave_private_args) / sizeof(netwave_private_args[0]);
+
+ /* Copy structure to the user buffer */
+ if(copy_to_user(wrq->u.data.pointer,
+ (u_char *) netwave_private_args,
+ sizeof(netwave_private_args)))
+ ret = -EFAULT;
+ }
+ break;
+ case SIOCGIPSNAP:
+ if(wrq->u.data.pointer != (caddr_t) 0) {
+ char buffer[sizeof( struct site_survey)];
+ ret = netwave_get_snap(dev, NULL, &(wrq->u), buffer);
+ /* Copy structure to the user buffer */
+ if(copy_to_user(wrq->u.data.pointer,
+ buffer,
+ sizeof( struct site_survey)))
+ {
+ printk(KERN_DEBUG "Bad buffer!\n");
+ break;
+ }
+ }
+ break;
+#endif /* WIRELESS_EXT <= 12 */
+#endif /* WIRELESS_EXT */
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/*
+ * Function netwave_pcmcia_config (link)
+ *
+ * netwave_pcmcia_config() is scheduled to run after a CARD_INSERTION
+ * event is received, to configure the PCMCIA socket, and to make the
+ * device available to the system.
+ *
+ */
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void netwave_pcmcia_config(dev_link_t *link) {
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ netwave_private *priv = netdev_priv(dev);
+ tuple_t tuple;
+ cisparse_t parse;
+ int i, j, last_ret, last_fn;
+ u_char buf[64];
+ win_req_t req;
+ memreq_t mem;
+ u_char __iomem *ramBase = NULL;
+
+ DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link);
+
+ /*
+ This reads the card's CONFIG tuple to find its configuration
+ registers.
+ */
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *) buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /*
+ * Try allocating IO ports. This tries a few fixed addresses.
+ * If you want, you can also read the card's config table to
+ * pick addresses -- see the serial driver for an example.
+ */
+ for (i = j = 0x0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+
+ /*
+ * Now allocate an interrupt line. Note that this does not
+ * actually assign a handler to the interrupt.
+ */
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+
+ /*
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping.
+ */
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+
+ /*
+ * Allocate a 32K memory window. Note that the dev_link_t
+ * structure provides space for one window handle -- if your
+ * device needs several windows, you'll need to keep track of
+ * the handles in your private data structure, dev->priv.
+ */
+ DEBUG(1, "Setting mem speed of %d\n", mem_speed);
+
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0x8000;
+ req.AccessSpeed = mem_speed;
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+ mem.CardOffset = 0x20000; mem.Page = 0;
+ CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
+
+ /* Store base address of the common window frame */
+ ramBase = ioremap(req.Base, 0x8000);
+ priv->ramBase = ramBase;
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ strcpy(priv->node.dev_name, dev->name);
+ link->dev = &priv->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ /* Reset card before reading physical address */
+ netwave_doreset(dev->base_addr, ramBase);
+
+ /* Read the ethernet address and fill in the Netwave registers. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i);
+
+ printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx id "
+ "%c%c, hw_addr ", dev->name, dev->base_addr, dev->irq,
+ (u_long) ramBase, (int) readb(ramBase+NETWAVE_EREG_NI),
+ (int) readb(ramBase+NETWAVE_EREG_NI+1));
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ /* get revision words */
+ printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n",
+ get_uint16(ramBase + NETWAVE_EREG_ARW),
+ get_uint16(ramBase + NETWAVE_EREG_ARW+2));
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ netwave_release(link);
+} /* netwave_pcmcia_config */
+
+/*
+ * Function netwave_release (arg)
+ *
+ * After a card is removed, netwave_release() will unregister the net
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void netwave_release(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ netwave_private *priv = netdev_priv(dev);
+
+ DEBUG(0, "netwave_release(0x%p)\n", link);
+
+ /* Don't bother checking to see if these succeed or not */
+ if (link->win) {
+ iounmap(priv->ramBase);
+ pcmcia_release_window(link->win);
+ }
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+}
+
+/*
+ * Function netwave_event (event, priority, args)
+ *
+ * The card status event handler. Mostly, this schedules other
+ * stuff to run after an event is received. A CARD_REMOVAL event
+ * also sets some flags to discourage the net drivers from trying
+ * to talk to the card any more.
+ *
+ * When a CARD_REMOVAL event is received, we immediately set a flag
+ * to block future accesses to this device. All the functions that
+ * actually access the device should check this flag to make sure
+ * the card is still present.
+ *
+ */
+static int netwave_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ DEBUG(1, "netwave_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_REGISTRATION_COMPLETE:
+ DEBUG(0, "netwave_cs: registration complete\n");
+ break;
+
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ netwave_release(link);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ netwave_pcmcia_config( link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ netwave_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* netwave_event */
+
+/*
+ * Function netwave_doreset (ioBase, ramBase)
+ *
+ * Proper hardware reset of the card.
+ */
+static void netwave_doreset(kio_addr_t ioBase, u_char __iomem *ramBase)
+{
+ /* Reset card */
+ wait_WOC(ioBase);
+ outb(0x80, ioBase + NETWAVE_REG_PMR);
+ writeb(0x08, ramBase + NETWAVE_EREG_ASCC); /* Bit 3 is WOC */
+ outb(0x0, ioBase + NETWAVE_REG_PMR); /* release reset */
+}
+
+/*
+ * Function netwave_reset (dev)
+ *
+ * Reset and restore all of the netwave registers
+ */
+static void netwave_reset(struct net_device *dev) {
+ /* u_char state; */
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem *ramBase = priv->ramBase;
+ kio_addr_t iobase = dev->base_addr;
+
+ DEBUG(0, "netwave_reset: Done with hardware reset\n");
+
+ priv->timeoutCounter = 0;
+
+ /* Reset card */
+ netwave_doreset(iobase, ramBase);
+ printk(KERN_DEBUG "netwave_reset: Done with hardware reset\n");
+
+ /* Write a NOP to check the card */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_NOP, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
+
+ /* Set receive conf */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SRC, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(rxConfRxEna + rxConfBcast, ramBase + NETWAVE_EREG_CB + 1);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 2);
+
+ /* Set transmit conf */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_STC, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(txConfTxEna, ramBase + NETWAVE_EREG_CB + 1);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 2);
+
+ /* Now set the MU Domain */
+ printk(KERN_DEBUG "Setting domain to 0x%x%02x\n", (domain >> 8) & 0x01, domain & 0xff);
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SMD, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(domain & 0xff, ramBase + NETWAVE_EREG_CB + 1);
+ writeb((domain>>8) & 0x01, ramBase + NETWAVE_EREG_CB + 2);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
+
+ /* Set scramble key */
+ printk(KERN_DEBUG "Setting scramble key to 0x%x\n", scramble_key);
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SSK, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(scramble_key & 0xff, ramBase + NETWAVE_EREG_CB + 1);
+ writeb((scramble_key>>8) & 0xff, ramBase + NETWAVE_EREG_CB + 2);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
+
+ /* Enable interrupts, bit 4 high to keep unused
+ * source from interrupting us, bit 2 high to
+ * set interrupt enable, 567 to enable TxDN,
+ * RxErr and RxRdy
+ */
+ wait_WOC(iobase);
+ outb(imrConfIENA+imrConfRFU1, iobase + NETWAVE_REG_IMR);
+
+ /* Hent 4 bytes fra 0x170. Skal vaere 0a,29,88,36
+ * waitWOC
+ * skriv 80 til d000:3688
+ * sjekk om det ble 80
+ */
+
+ /* Enable Receiver */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_ER, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
+
+ /* Set the IENA bit in COR */
+ wait_WOC(iobase);
+ outb(corConfIENA + corConfLVLREQ, iobase + NETWAVE_REG_COR);
+}
+
+/*
+ * Function netwave_hw_xmit (data, len, dev)
+ */
+static int netwave_hw_xmit(unsigned char* data, int len,
+ struct net_device* dev) {
+ unsigned long flags;
+ unsigned int TxFreeList,
+ curBuff,
+ MaxData,
+ DataOffset;
+ int tmpcount;
+
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem * ramBase = priv->ramBase;
+ kio_addr_t iobase = dev->base_addr;
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&priv->spinlock, flags);
+
+ /* Check if there are transmit buffers available */
+ wait_WOC(iobase);
+ if ((inb(iobase+NETWAVE_REG_ASR) & NETWAVE_ASR_TXBA) == 0) {
+ /* No buffers available */
+ printk(KERN_DEBUG "netwave_hw_xmit: %s - no xmit buffers available.\n",
+ dev->name);
+ spin_unlock_irqrestore(&priv->spinlock, flags);
+ return 1;
+ }
+
+ priv->stats.tx_bytes += len;
+
+ DEBUG(3, "Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n",
+ readb(ramBase + NETWAVE_EREG_SPCQ),
+ readb(ramBase + NETWAVE_EREG_SPU),
+ readb(ramBase + NETWAVE_EREG_LIF),
+ readb(ramBase + NETWAVE_EREG_ISPLQ));
+
+ /* Now try to insert it into the adapters free memory */
+ wait_WOC(iobase);
+ TxFreeList = get_uint16(ramBase + NETWAVE_EREG_TDP);
+ MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2);
+ DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4);
+
+ DEBUG(3, "TxFreeList %x, MaxData %x, DataOffset %x\n",
+ TxFreeList, MaxData, DataOffset);
+
+ /* Copy packet to the adapter fragment buffers */
+ curBuff = TxFreeList;
+ tmpcount = 0;
+ while (tmpcount < len) {
+ int tmplen = len - tmpcount;
+ copy_to_pc(ramBase + curBuff + DataOffset, data + tmpcount,
+ (tmplen < MaxData) ? tmplen : MaxData);
+ tmpcount += MaxData;
+
+ /* Advance to next buffer */
+ curBuff = get_uint16(ramBase + curBuff);
+ }
+
+ /* Now issue transmit list */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_TL, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(len & 0xff, ramBase + NETWAVE_EREG_CB + 1);
+ writeb((len>>8) & 0xff, ramBase + NETWAVE_EREG_CB + 2);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
+
+ spin_unlock_irqrestore(&priv->spinlock, flags);
+ return 0;
+}
+
+static int netwave_start_xmit(struct sk_buff *skb, struct net_device *dev) {
+ /* This flag indicate that the hardware can't perform a transmission.
+ * Theoritically, NET3 check it before sending a packet to the driver,
+ * but in fact it never do that and pool continuously.
+ * As the watchdog will abort too long transmissions, we are quite safe...
+ */
+
+ netif_stop_queue(dev);
+
+ {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char* buf = skb->data;
+
+ if (netwave_hw_xmit( buf, length, dev) == 1) {
+ /* Some error, let's make them call us another time? */
+ netif_start_queue(dev);
+ }
+ dev->trans_start = jiffies;
+ }
+ dev_kfree_skb(skb);
+
+ return 0;
+} /* netwave_start_xmit */
+
+/*
+ * Function netwave_interrupt (irq, dev_id, regs)
+ *
+ * This function is the interrupt handler for the Netwave card. This
+ * routine will be called whenever:
+ * 1. A packet is received.
+ * 2. A packet has successfully been transferred and the unit is
+ * ready to transmit another packet.
+ * 3. A command has completed execution.
+ */
+static irqreturn_t netwave_interrupt(int irq, void* dev_id, struct pt_regs *regs)
+{
+ kio_addr_t iobase;
+ u_char __iomem *ramBase;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct netwave_private *priv = netdev_priv(dev);
+ dev_link_t *link = &priv->link;
+ int i;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+
+ iobase = dev->base_addr;
+ ramBase = priv->ramBase;
+
+ /* Now find what caused the interrupt, check while interrupts ready */
+ for (i = 0; i < 10; i++) {
+ u_char status;
+
+ wait_WOC(iobase);
+ if (!(inb(iobase+NETWAVE_REG_CCSR) & 0x02))
+ break; /* None of the interrupt sources asserted (normal exit) */
+
+ status = inb(iobase + NETWAVE_REG_ASR);
+
+ if (!DEV_OK(link)) {
+ DEBUG(1, "netwave_interrupt: Interrupt with status 0x%x "
+ "from removed or suspended card!\n", status);
+ break;
+ }
+
+ /* RxRdy */
+ if (status & 0x80) {
+ netwave_rx(dev);
+ /* wait_WOC(iobase); */
+ /* RxRdy cannot be reset directly by the host */
+ }
+ /* RxErr */
+ if (status & 0x40) {
+ u_char rser;
+
+ rser = readb(ramBase + NETWAVE_EREG_RSER);
+
+ if (rser & 0x04) {
+ ++priv->stats.rx_dropped;
+ ++priv->stats.rx_crc_errors;
+ }
+ if (rser & 0x02)
+ ++priv->stats.rx_frame_errors;
+
+ /* Clear the RxErr bit in RSER. RSER+4 is the
+ * write part. Also clear the RxCRC (0x04) and
+ * RxBig (0x02) bits if present */
+ wait_WOC(iobase);
+ writeb(0x40 | (rser & 0x06), ramBase + NETWAVE_EREG_RSER + 4);
+
+ /* Write bit 6 high to ASCC to clear RxErr in ASR,
+ * WOC must be set first!
+ */
+ wait_WOC(iobase);
+ writeb(0x40, ramBase + NETWAVE_EREG_ASCC);
+
+ /* Remember to count up priv->stats on error packets */
+ ++priv->stats.rx_errors;
+ }
+ /* TxDN */
+ if (status & 0x20) {
+ int txStatus;
+
+ txStatus = readb(ramBase + NETWAVE_EREG_TSER);
+ DEBUG(3, "Transmit done. TSER = %x id %x\n",
+ txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1));
+
+ if (txStatus & 0x20) {
+ /* Transmitting was okay, clear bits */
+ wait_WOC(iobase);
+ writeb(0x2f, ramBase + NETWAVE_EREG_TSER + 4);
+ ++priv->stats.tx_packets;
+ }
+
+ if (txStatus & 0xd0) {
+ if (txStatus & 0x80) {
+ ++priv->stats.collisions; /* Because of /proc/net/dev*/
+ /* ++priv->stats.tx_aborted_errors; */
+ /* printk("Collision. %ld\n", jiffies - dev->trans_start); */
+ }
+ if (txStatus & 0x40)
+ ++priv->stats.tx_carrier_errors;
+ /* 0x80 TxGU Transmit giveup - nine times and no luck
+ * 0x40 TxNOAP No access point. Discarded packet.
+ * 0x10 TxErr Transmit error. Always set when
+ * TxGU and TxNOAP is set. (Those are the only ones
+ * to set TxErr).
+ */
+ DEBUG(3, "netwave_interrupt: TxDN with error status %x\n",
+ txStatus);
+
+ /* Clear out TxGU, TxNOAP, TxErr and TxTrys */
+ wait_WOC(iobase);
+ writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4);
+ ++priv->stats.tx_errors;
+ }
+ DEBUG(3, "New status is TSER %x ASR %x\n",
+ readb(ramBase + NETWAVE_EREG_TSER),
+ inb(iobase + NETWAVE_REG_ASR));
+
+ netif_wake_queue(dev);
+ }
+ /* TxBA, this would trigger on all error packets received */
+ /* if (status & 0x01) {
+ DEBUG(4, "Transmit buffers available, %x\n", status);
+ }
+ */
+ }
+ /* Handled if we looped at least one time - Jean II */
+ return IRQ_RETVAL(i);
+} /* netwave_interrupt */
+
+/*
+ * Function netwave_watchdog (a)
+ *
+ * Watchdog : when we start a transmission, we set a timer in the
+ * kernel. If the transmission complete, this timer is disabled. If
+ * it expire, we reset the card.
+ *
+ */
+static void netwave_watchdog(struct net_device *dev) {
+
+ DEBUG(1, "%s: netwave_watchdog: watchdog timer expired\n", dev->name);
+ netwave_reset(dev);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+} /* netwave_watchdog */
+
+static struct net_device_stats *netwave_get_stats(struct net_device *dev) {
+ netwave_private *priv = netdev_priv(dev);
+
+ update_stats(dev);
+
+ DEBUG(2, "netwave: SPCQ %x SPU %x LIF %x ISPLQ %x MHS %x rxtx %x"
+ " %x tx %x %x %x %x\n",
+ readb(priv->ramBase + NETWAVE_EREG_SPCQ),
+ readb(priv->ramBase + NETWAVE_EREG_SPU),
+ readb(priv->ramBase + NETWAVE_EREG_LIF),
+ readb(priv->ramBase + NETWAVE_EREG_ISPLQ),
+ readb(priv->ramBase + NETWAVE_EREG_MHS),
+ readb(priv->ramBase + NETWAVE_EREG_EC + 0xe),
+ readb(priv->ramBase + NETWAVE_EREG_EC + 0xf),
+ readb(priv->ramBase + NETWAVE_EREG_EC + 0x18),
+ readb(priv->ramBase + NETWAVE_EREG_EC + 0x19),
+ readb(priv->ramBase + NETWAVE_EREG_EC + 0x1a),
+ readb(priv->ramBase + NETWAVE_EREG_EC + 0x1b));
+
+ return &priv->stats;
+}
+
+static void update_stats(struct net_device *dev) {
+ //unsigned long flags;
+/* netwave_private *priv = netdev_priv(dev); */
+
+ //spin_lock_irqsave(&priv->spinlock, flags);
+
+/* priv->stats.rx_packets = readb(priv->ramBase + 0x18e);
+ priv->stats.tx_packets = readb(priv->ramBase + 0x18f); */
+
+ //spin_unlock_irqrestore(&priv->spinlock, flags);
+}
+
+static int netwave_rx(struct net_device *dev)
+{
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem *ramBase = priv->ramBase;
+ kio_addr_t iobase = dev->base_addr;
+ u_char rxStatus;
+ struct sk_buff *skb = NULL;
+ unsigned int curBuffer,
+ rcvList;
+ int rcvLen;
+ int tmpcount = 0;
+ int dataCount, dataOffset;
+ int i;
+ u_char *ptr;
+
+ DEBUG(3, "xinw_rx: Receiving ... \n");
+
+ /* Receive max 10 packets for now. */
+ for (i = 0; i < 10; i++) {
+ /* Any packets? */
+ wait_WOC(iobase);
+ rxStatus = readb(ramBase + NETWAVE_EREG_RSER);
+ if ( !( rxStatus & 0x80)) /* No more packets */
+ break;
+
+ /* Check if multicast/broadcast or other */
+ /* multicast = (rxStatus & 0x20); */
+
+ /* The receive list pointer and length of the packet */
+ wait_WOC(iobase);
+ rcvLen = get_int16( ramBase + NETWAVE_EREG_RDP);
+ rcvList = get_uint16( ramBase + NETWAVE_EREG_RDP + 2);
+
+ if (rcvLen < 0) {
+ printk(KERN_DEBUG "netwave_rx: Receive packet with len %d\n",
+ rcvLen);
+ return 0;
+ }
+
+ skb = dev_alloc_skb(rcvLen+5);
+ if (skb == NULL) {
+ DEBUG(1, "netwave_rx: Could not allocate an sk_buff of "
+ "length %d\n", rcvLen);
+ ++priv->stats.rx_dropped;
+ /* Tell the adapter to skip the packet */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
+ return 0;
+ }
+
+ skb_reserve( skb, 2); /* Align IP on 16 byte */
+ skb_put( skb, rcvLen);
+ skb->dev = dev;
+
+ /* Copy packet fragments to the skb data area */
+ ptr = (u_char*) skb->data;
+ curBuffer = rcvList;
+ tmpcount = 0;
+ while ( tmpcount < rcvLen) {
+ /* Get length and offset of current buffer */
+ dataCount = get_uint16( ramBase+curBuffer+2);
+ dataOffset = get_uint16( ramBase+curBuffer+4);
+
+ copy_from_pc( ptr + tmpcount,
+ ramBase+curBuffer+dataOffset, dataCount);
+
+ tmpcount += dataCount;
+
+ /* Point to next buffer */
+ curBuffer = get_uint16(ramBase + curBuffer);
+ }
+
+ skb->protocol = eth_type_trans(skb,dev);
+ /* Queue packet for network layer */
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += rcvLen;
+
+ /* Got the packet, tell the adapter to skip it */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
+ DEBUG(3, "Packet reception ok\n");
+ }
+ return 0;
+}
+
+static int netwave_open(struct net_device *dev) {
+ netwave_private *priv = netdev_priv(dev);
+ dev_link_t *link = &priv->link;
+
+ DEBUG(1, "netwave_open: starting.\n");
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+
+ netif_start_queue(dev);
+ netwave_reset(dev);
+
+ return 0;
+}
+
+static int netwave_close(struct net_device *dev) {
+ netwave_private *priv = netdev_priv(dev);
+ dev_link_t *link = &priv->link;
+
+ DEBUG(1, "netwave_close: finishing.\n");
+
+ link->open--;
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static struct pcmcia_driver netwave_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "netwave_cs",
+ },
+ .attach = netwave_attach,
+ .detach = netwave_detach,
+};
+
+static int __init init_netwave_cs(void)
+{
+ return pcmcia_register_driver(&netwave_driver);
+}
+
+static void __exit exit_netwave_cs(void)
+{
+ pcmcia_unregister_driver(&netwave_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_netwave_cs);
+module_exit(exit_netwave_cs);
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+ kio_addr_t iobase = dev->base_addr;
+ netwave_private *priv = netdev_priv(dev);
+ u_char __iomem * ramBase = priv->ramBase;
+ u_char rcvMode = 0;
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 2) {
+ static int old;
+ if (old != dev->mc_count) {
+ old = dev->mc_count;
+ DEBUG(0, "%s: setting Rx mode to %d addresses.\n",
+ dev->name, dev->mc_count);
+ }
+ }
+#endif
+
+ if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ /* Multicast Mode */
+ rcvMode = rxConfRxEna + rxConfAMP + rxConfBcast;
+ } else if (dev->flags & IFF_PROMISC) {
+ /* Promiscous mode */
+ rcvMode = rxConfRxEna + rxConfPro + rxConfAMP + rxConfBcast;
+ } else {
+ /* Normal mode */
+ rcvMode = rxConfRxEna + rxConfBcast;
+ }
+
+ /* printk("netwave set_multicast_list: rcvMode to %x\n", rcvMode);*/
+ /* Now set receive mode */
+ wait_WOC(iobase);
+ writeb(NETWAVE_CMD_SRC, ramBase + NETWAVE_EREG_CB + 0);
+ writeb(rcvMode, ramBase + NETWAVE_EREG_CB + 1);
+ writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 2);
+}
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
new file mode 100644
index 000000000000..a3a32430ae9d
--- /dev/null
+++ b/drivers/net/wireless/orinoco.c
@@ -0,0 +1,4243 @@
+/* orinoco.c - (formerly known as dldwd_cs.c and orinoco_cs.c)
+ *
+ * A driver for Hermes or Prism 2 chipset based PCMCIA wireless
+ * adaptors, with Lucent/Agere, Intersil or Symbol firmware.
+ *
+ * Current maintainers (as of 29 September 2003) are:
+ * Pavel Roskin <proski AT gnu.org>
+ * and David Gibson <hermes AT gibson.dropbear.id.au>
+ *
+ * (C) Copyright David Gibson, IBM Corporation 2001-2003.
+ * Copyright (C) 2000 David Gibson, Linuxcare Australia.
+ * With some help from :
+ * Copyright (C) 2001 Jean Tourrilhes, HP Labs
+ * Copyright (C) 2001 Benjamin Herrenschmidt
+ *
+ * Based on dummy_cs.c 1.27 2000/06/12 21:27:25
+ *
+ * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy
+ * AT fasta.fh-dortmund.de>
+ * http://www.stud.fh-dortmund.de/~andy/wvlan/
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds AT users.sourceforge.net>. Portions created by David
+ * A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
+ * Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL. */
+
+/*
+ * v0.01 -> v0.02 - 21/3/2001 - Jean II
+ * o Allow to use regular ethX device name instead of dldwdX
+ * o Warning on IBSS with ESSID=any for firmware 6.06
+ * o Put proper range.throughput values (optimistic)
+ * o IWSPY support (IOCTL and stat gather in Rx path)
+ * o Allow setting frequency in Ad-Hoc mode
+ * o Disable WEP setting if !has_wep to work on old firmware
+ * o Fix txpower range
+ * o Start adding support for Samsung/Compaq firmware
+ *
+ * v0.02 -> v0.03 - 23/3/2001 - Jean II
+ * o Start adding Symbol support - need to check all that
+ * o Fix Prism2/Symbol WEP to accept 128 bits keys
+ * o Add Symbol WEP (add authentication type)
+ * o Add Prism2/Symbol rate
+ * o Add PM timeout (holdover duration)
+ * o Enable "iwconfig eth0 key off" and friends (toggle flags)
+ * o Enable "iwconfig eth0 power unicast/all" (toggle flags)
+ * o Try with an Intel card. It report firmware 1.01, behave like
+ * an antiquated firmware, however on windows it says 2.00. Yuck !
+ * o Workaround firmware bug in allocate buffer (Intel 1.01)
+ * o Finish external renaming to orinoco...
+ * o Testing with various Wavelan firmwares
+ *
+ * v0.03 -> v0.04 - 30/3/2001 - Jean II
+ * o Update to Wireless 11 -> add retry limit/lifetime support
+ * o Tested with a D-Link DWL 650 card, fill in firmware support
+ * o Warning on Vcc mismatch (D-Link 3.3v card in Lucent 5v only slot)
+ * o Fixed the Prism2 WEP bugs that I introduced in v0.03 :-(
+ * It works on D-Link *only* after a tcpdump. Weird...
+ * And still doesn't work on Intel card. Grrrr...
+ * o Update the mode after a setport3
+ * o Add preamble setting for Symbol cards (not yet enabled)
+ * o Don't complain as much about Symbol cards...
+ *
+ * v0.04 -> v0.04b - 22/4/2001 - David Gibson
+ * o Removed the 'eth' parameter - always use ethXX as the
+ * interface name instead of dldwdXX. The other was racy
+ * anyway.
+ * o Clean up RID definitions in hermes.h, other cleanups
+ *
+ * v0.04b -> v0.04c - 24/4/2001 - Jean II
+ * o Tim Hurley <timster AT seiki.bliztech.com> reported a D-Link card
+ * with vendor 02 and firmware 0.08. Added in the capabilities...
+ * o Tested Lucent firmware 7.28, everything works...
+ *
+ * v0.04c -> v0.05 - 3/5/2001 - Benjamin Herrenschmidt
+ * o Spin-off Pcmcia code. This file is renamed orinoco.c,
+ * and orinoco_cs.c now contains only the Pcmcia specific stuff
+ * o Add Airport driver support on top of orinoco.c (see airport.c)
+ *
+ * v0.05 -> v0.05a - 4/5/2001 - Jean II
+ * o Revert to old Pcmcia code to fix breakage of Ben's changes...
+ *
+ * v0.05a -> v0.05b - 4/5/2001 - Jean II
+ * o add module parameter 'ignore_cis_vcc' for D-Link @ 5V
+ * o D-Link firmware doesn't support multicast. We just print a few
+ * error messages, but otherwise everything works...
+ * o For David : set/getport3 works fine, just upgrade iwpriv...
+ *
+ * v0.05b -> v0.05c - 5/5/2001 - Benjamin Herrenschmidt
+ * o Adapt airport.c to latest changes in orinoco.c
+ * o Remove deferred power enabling code
+ *
+ * v0.05c -> v0.05d - 5/5/2001 - Jean II
+ * o Workaround to SNAP decapsulate frame from Linksys AP
+ * original patch from : Dong Liu <dliu AT research.bell-labs.com>
+ * (note : the memcmp bug was mine - fixed)
+ * o Remove set_retry stuff, no firmware support it (bloat--).
+ *
+ * v0.05d -> v0.06 - 25/5/2001 - Jean II
+ * Original patch from "Hong Lin" <alin AT redhat.com>,
+ * "Ian Kinner" <ikinner AT redhat.com>
+ * and "David Smith" <dsmith AT redhat.com>
+ * o Init of priv->tx_rate_ctrl in firmware specific section.
+ * o Prism2/Symbol rate, upto should be 0xF and not 0x15. Doh !
+ * o Spectrum card always need cor_reset (for every reset)
+ * o Fix cor_reset to not lose bit 7 in the register
+ * o flush_stale_links to remove zombie Pcmcia instances
+ * o Ack previous hermes event before reset
+ * Me (with my little hands)
+ * o Allow orinoco.c to call cor_reset via priv->card_reset_handler
+ * o Add priv->need_card_reset to toggle this feature
+ * o Fix various buglets when setting WEP in Symbol firmware
+ * Now, encryption is fully functional on Symbol cards. Youpi !
+ *
+ * v0.06 -> v0.06b - 25/5/2001 - Jean II
+ * o IBSS on Symbol use port_mode = 4. Please don't ask...
+ *
+ * v0.06b -> v0.06c - 29/5/2001 - Jean II
+ * o Show first spy address in /proc/net/wireless for IBSS mode as well
+ *
+ * v0.06c -> v0.06d - 6/7/2001 - David Gibson
+ * o Change a bunch of KERN_INFO messages to KERN_DEBUG, as per Linus'
+ * wishes to reduce the number of unnecessary messages.
+ * o Removed bogus message on CRC error.
+ * o Merged fixes for v0.08 Prism 2 firmware from William Waghorn
+ * <willwaghorn AT yahoo.co.uk>
+ * o Slight cleanup/re-arrangement of firmware detection code.
+ *
+ * v0.06d -> v0.06e - 1/8/2001 - David Gibson
+ * o Removed some redundant global initializers (orinoco_cs.c).
+ * o Added some module metadata
+ *
+ * v0.06e -> v0.06f - 14/8/2001 - David Gibson
+ * o Wording fix to license
+ * o Added a 'use_alternate_encaps' module parameter for APs which need an
+ * oui of 00:00:00. We really need a better way of handling this, but
+ * the module flag is better than nothing for now.
+ *
+ * v0.06f -> v0.07 - 20/8/2001 - David Gibson
+ * o Removed BAP error retries from hermes_bap_seek(). For Tx we now
+ * let the upper layers handle the retry, we retry explicitly in the
+ * Rx path, but don't make as much noise about it.
+ * o Firmware detection cleanups.
+ *
+ * v0.07 -> v0.07a - 1/10/3001 - Jean II
+ * o Add code to read Symbol firmware revision, inspired by latest code
+ * in Spectrum24 by Lee John Keyser-Allen - Thanks Lee !
+ * o Thanks to Jared Valentine <hidden AT xmission.com> for "providing" me
+ * a 3Com card with a recent firmware, fill out Symbol firmware
+ * capabilities of latest rev (2.20), as well as older Symbol cards.
+ * o Disable Power Management in newer Symbol firmware, the API
+ * has changed (documentation needed).
+ *
+ * v0.07a -> v0.08 - 3/10/2001 - David Gibson
+ * o Fixed a possible buffer overrun found by the Stanford checker (in
+ * dldwd_ioctl_setiwencode()). Can only be called by root anyway, so not
+ * a big problem.
+ * o Turned has_big_wep on for Intersil cards. That's not true for all of
+ * them but we should at least let the capable ones try.
+ * o Wait for BUSY to clear at the beginning of hermes_bap_seek(). I
+ * realized that my assumption that the driver's serialization
+ * would prevent the BAP being busy on entry was possibly false, because
+ * things other than seeks may make the BAP busy.
+ * o Use "alternate" (oui 00:00:00) encapsulation by default.
+ * Setting use_old_encaps will mimic the old behaviour, but I think we
+ * will be able to eliminate this.
+ * o Don't try to make __initdata const (the version string). This can't
+ * work because of the way the __initdata sectioning works.
+ * o Added MODULE_LICENSE tags.
+ * o Support for PLX (transparent PCMCIA->PCI bridge) cards.
+ * o Changed to using the new type-fascist min/max.
+ *
+ * v0.08 -> v0.08a - 9/10/2001 - David Gibson
+ * o Inserted some missing acknowledgements/info into the Changelog.
+ * o Fixed some bugs in the normalization of signal level reporting.
+ * o Fixed bad bug in WEP key handling on Intersil and Symbol firmware,
+ * which led to an instant crash on big-endian machines.
+ *
+ * v0.08a -> v0.08b - 20/11/2001 - David Gibson
+ * o Lots of cleanup and bugfixes in orinoco_plx.c
+ * o Cleanup to handling of Tx rate setting.
+ * o Removed support for old encapsulation method.
+ * o Removed old "dldwd" names.
+ * o Split RID constants into a new file hermes_rid.h
+ * o Renamed RID constants to match linux-wlan-ng and prism2.o
+ * o Bugfixes in hermes.c
+ * o Poke the PLX's INTCSR register, so it actually starts
+ * generating interrupts. These cards might actually work now.
+ * o Update to wireless extensions v12 (Jean II)
+ * o Support for tallies and inquire command (Jean II)
+ * o Airport updates for newer PPC kernels (BenH)
+ *
+ * v0.08b -> v0.09 - 21/12/2001 - David Gibson
+ * o Some new PCI IDs for PLX cards.
+ * o Removed broken attempt to do ALLMULTI reception. Just use
+ * promiscuous mode instead
+ * o Preliminary work for list-AP (Jean II)
+ * o Airport updates from (BenH)
+ * o Eliminated racy hw_ready stuff
+ * o Fixed generation of fake events in irq handler. This should
+ * finally kill the EIO problems (Jean II & dgibson)
+ * o Fixed breakage of bitrate set/get on Agere firmware (Jean II)
+ *
+ * v0.09 -> v0.09a - 2/1/2002 - David Gibson
+ * o Fixed stupid mistake in multicast list handling, triggering
+ * a BUG()
+ *
+ * v0.09a -> v0.09b - 16/1/2002 - David Gibson
+ * o Fixed even stupider mistake in new interrupt handling, which
+ * seriously broke things on big-endian machines.
+ * o Removed a bunch of redundant includes and exports.
+ * o Removed a redundant MOD_{INC,DEC}_USE_COUNT pair in airport.c
+ * o Don't attempt to do hardware level multicast reception on
+ * Intersil firmware, just go promisc instead.
+ * o Typo fixed in hermes_issue_cmd()
+ * o Eliminated WIRELESS_SPY #ifdefs
+ * o Status code reported on Tx exceptions
+ * o Moved netif_wake_queue() from ALLOC interrupts to TX and TXEXC
+ * interrupts, which should fix the timeouts we're seeing.
+ *
+ * v0.09b -> v0.10 - 25 Feb 2002 - David Gibson
+ * o Removed nested structures used for header parsing, so the
+ * driver should now work without hackery on ARM
+ * o Fix for WEP handling on Intersil (Hawk Newton)
+ * o Eliminated the /proc/hermes/ethXX/regs debugging file. It
+ * was never very useful.
+ * o Make Rx errors less noisy.
+ *
+ * v0.10 -> v0.11 - 5 Apr 2002 - David Gibson
+ * o Laid the groundwork in hermes.[ch] for devices which map
+ * into PCI memory space rather than IO space.
+ * o Fixed bug in multicast handling (cleared multicast list when
+ * leaving promiscuous mode).
+ * o Relegated Tx error messages to debug.
+ * o Cleaned up / corrected handling of allocation lengths.
+ * o Set OWNSSID in IBSS mode for WinXP interoperability (jimc).
+ * o Change to using alloc_etherdev() for structure allocations.
+ * o Check for and drop undersized packets.
+ * o Fixed a race in stopping/waking the queue. This should fix
+ * the timeout problems (Pavel Roskin)
+ * o Reverted to netif_wake_queue() on the ALLOC event.
+ * o Fixes for recent Symbol firmwares which lack AP density
+ * (Pavel Roskin).
+ *
+ * v0.11 -> v0.11a - 29 Apr 2002 - David Gibson
+ * o Handle different register spacing, necessary for Prism 2.5
+ * PCI adaptors (Steve Hill).
+ * o Cleaned up initialization of card structures in orinoco_cs
+ * and airport. Removed card->priv field.
+ * o Make response structure optional for hermes_docmd_wait()
+ * Pavel Roskin)
+ * o Added PCI id for Nortel emobility to orinoco_plx.c.
+ * o Cleanup to handling of Symbol's allocation bug. (Pavel Roskin)
+ * o Cleanups to firmware capability detection.
+ * o Arrange for orinoco_pci.c to override firmware detection.
+ * We should be able to support the PCI Intersil cards now.
+ * o Cleanup handling of reset_cor and hard_reset (Pavel Roskin).
+ * o Remove erroneous use of USER_BAP in the TxExc handler (Jouni
+ * Malinen).
+ * o Makefile changes for better integration into David Hinds
+ * pcmcia-cs package.
+ *
+ * v0.11a -> v0.11b - 1 May 2002 - David Gibson
+ * o Better error reporting in orinoco_plx_init_one()
+ * o Fixed multiple bad kfree() bugs introduced by the
+ * alloc_orinocodev() changes.
+ *
+ * v0.11b -> v0.12 - 19 Jun 2002 - David Gibson
+ * o Support changing the MAC address.
+ * o Correct display of Intersil firmware revision numbers.
+ * o Entirely revised locking scheme. Should be both simpler and
+ * better.
+ * o Merged some common code in orinoco_plx, orinoco_pci and
+ * airport by creating orinoco_default_{open,stop,reset}()
+ * which are used as the dev->open, dev->stop, priv->reset
+ * callbacks if none are specified when alloc_orinocodev() is
+ * called.
+ * o Removed orinoco_plx_interrupt() and orinoco_pci_interrupt().
+ * They didn't do anything.
+ *
+ * v0.12 -> v0.12a - 4 Jul 2002 - David Gibson
+ * o Some rearrangement of code.
+ * o Numerous fixups to locking and rest handling, particularly
+ * for PCMCIA.
+ * o This allows open and stop net_device methods to be in
+ * orinoco.c now, rather than in the init modules.
+ * o In orinoco_cs.c link->priv now points to the struct
+ * net_device not to the struct orinoco_private.
+ * o Added a check for undersized SNAP frames, which could cause
+ * crashes.
+ *
+ * v0.12a -> v0.12b - 11 Jul 2002 - David Gibson
+ * o Fix hw->num_init testing code, so num_init is actually
+ * incremented.
+ * o Fix very stupid bug in orinoco_cs which broke compile with
+ * CONFIG_SMP.
+ * o Squashed a warning.
+ *
+ * v0.12b -> v0.12c - 26 Jul 2002 - David Gibson
+ * o Change to C9X style designated initializers.
+ * o Add support for 3Com AirConnect PCI.
+ * o No longer ignore the hard_reset argument to
+ * alloc_orinocodev(). Oops.
+ *
+ * v0.12c -> v0.13beta1 - 13 Sep 2002 - David Gibson
+ * o Revert the broken 0.12* locking scheme and go to a new yet
+ * simpler scheme.
+ * o Do firmware resets only in orinoco_init() and when waking
+ * the card from hard sleep.
+ *
+ * v0.13beta1 -> v0.13 - 27 Sep 2002 - David Gibson
+ * o Re-introduced full resets (via schedule_task()) on Tx
+ * timeout.
+ *
+ * v0.13 -> v0.13a - 30 Sep 2002 - David Gibson
+ * o Minor cleanups to info frame handling. Add basic support
+ * for linkstatus info frames.
+ * o Include required kernel headers in orinoco.h, to avoid
+ * compile problems.
+ *
+ * v0.13a -> v0.13b - 10 Feb 2003 - David Gibson
+ * o Implemented hard reset for Airport cards
+ * o Experimental suspend/resume implementation for orinoco_pci
+ * o Abolished /proc debugging support, replaced with a debugging
+ * iwpriv. Now it's ugly and simple instead of ugly and complex.
+ * o Bugfix in hermes.c if the firmware returned a record length
+ * of 0, we could go clobbering memory.
+ * o Bugfix in orinoco_stop() - it used to fail if hw_unavailable
+ * was set, which was usually true on PCMCIA hot removes.
+ * o Track LINKSTATUS messages, silently drop Tx packets before
+ * we are connected (avoids confusing the firmware), and only
+ * give LINKSTATUS printk()s if the status has changed.
+ *
+ * v0.13b -> v0.13c - 11 Mar 2003 - David Gibson
+ * o Cleanup: use dev instead of priv in various places.
+ * o Bug fix: Don't ReleaseConfiguration on RESET_PHYSICAL event
+ * if we're in the middle of a (driver initiated) hard reset.
+ * o Bug fix: ETH_ZLEN is supposed to include the header
+ * (Dionysus Blazakis & Manish Karir)
+ * o Convert to using workqueues instead of taskqueues (and
+ * backwards compatibility macros for pre 2.5.41 kernels).
+ * o Drop redundant (I think...) MOD_{INC,DEC}_USE_COUNT in
+ * airport.c
+ * o New orinoco_tmd.c init module from Joerg Dorchain for
+ * TMD7160 based PCI to PCMCIA bridges (similar to
+ * orinoco_plx.c).
+ *
+ * v0.13c -> v0.13d - 22 Apr 2003 - David Gibson
+ * o Make hw_unavailable a counter, rather than just a flag, this
+ * is necessary to avoid some races (such as a card being
+ * removed in the middle of orinoco_reset().
+ * o Restore Release/RequestConfiguration in the PCMCIA event handler
+ * when dealing with a driver initiated hard reset. This is
+ * necessary to prevent hangs due to a spurious interrupt while
+ * the reset is in progress.
+ * o Clear the 802.11 header when transmitting, even though we
+ * don't use it. This fixes a long standing bug on some
+ * firmwares, which seem to get confused if that isn't done.
+ * o Be less eager to de-encapsulate SNAP frames, only do so if
+ * the OUI is 00:00:00 or 00:00:f8, leave others alone. The old
+ * behaviour broke CDP (Cisco Discovery Protocol).
+ * o Use dev instead of priv for free_irq() as well as
+ * request_irq() (oops).
+ * o Attempt to reset rather than giving up if we get too many
+ * IRQs.
+ * o Changed semantics of __orinoco_down() so it can be called
+ * safely with hw_unavailable set. It also now clears the
+ * linkstatus (since we're going to have to reassociate).
+ *
+ * v0.13d -> v0.13e - 12 May 2003 - David Gibson
+ * o Support for post-2.5.68 return values from irq handler.
+ * o Fixed bug where underlength packets would be double counted
+ * in the rx_dropped statistics.
+ * o Provided a module parameter to suppress linkstatus messages.
+ *
+ * v0.13e -> v0.14alpha1 - 30 Sep 2003 - David Gibson
+ * o Replaced priv->connected logic with netif_carrier_on/off()
+ * calls.
+ * o Remove has_ibss_any and never set the CREATEIBSS RID when
+ * the ESSID is empty. Too many firmwares break if we do.
+ * o 2.6 merges: Replace pdev->slot_name with pci_name(), remove
+ * __devinitdata from PCI ID tables, use free_netdev().
+ * o Enabled shared-key authentication for Agere firmware (from
+ * Robert J. Moore <Robert.J.Moore AT allanbank.com>
+ * o Move netif_wake_queue() (back) to the Tx completion from the
+ * ALLOC event. This seems to prevent/mitigate the rolling
+ * error -110 problems at least on some Intersil firmwares.
+ * Theoretically reduces performance, but I can't measure it.
+ * Patch from Andrew Tridgell <tridge AT samba.org>
+ *
+ * v0.14alpha1 -> v0.14alpha2 - 20 Oct 2003 - David Gibson
+ * o Correctly turn off shared-key authentication when requested
+ * (bugfix from Robert J. Moore).
+ * o Correct airport sleep interfaces for current 2.6 kernels.
+ * o Add code for key change without disabling/enabling the MAC
+ * port. This is supposed to allow 802.1x to work sanely, but
+ * doesn't seem to yet.
+ *
+ * TODO
+ * o New wireless extensions API (patch from Moustafa
+ * Youssef, updated by Jim Carter and Pavel Roskin).
+ * o Handle de-encapsulation within network layer, provide 802.11
+ * headers (patch from Thomas 'Dent' Mirlacher)
+ * o RF monitor mode support
+ * o Fix possible races in SPY handling.
+ * o Disconnect wireless extensions from fundamental configuration.
+ * o (maybe) Software WEP support (patch from Stano Meduna).
+ * o (maybe) Use multiple Tx buffers - driver handling queue
+ * rather than firmware.
+ */
+
+/* Locking and synchronization:
+ *
+ * The basic principle is that everything is serialized through a
+ * single spinlock, priv->lock. The lock is used in user, bh and irq
+ * context, so when taken outside hardirq context it should always be
+ * taken with interrupts disabled. The lock protects both the
+ * hardware and the struct orinoco_private.
+ *
+ * Another flag, priv->hw_unavailable indicates that the hardware is
+ * unavailable for an extended period of time (e.g. suspended, or in
+ * the middle of a hard reset). This flag is protected by the
+ * spinlock. All code which touches the hardware should check the
+ * flag after taking the lock, and if it is set, give up on whatever
+ * they are doing and drop the lock again. The orinoco_lock()
+ * function handles this (it unlocks and returns -EBUSY if
+ * hw_unavailable is non-zero).
+ */
+
+#define DRIVER_NAME "orinoco"
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "hermes.h"
+#include "hermes_rid.h"
+#include "orinoco.h"
+#include "ieee802_11.h"
+
+/********************************************************************/
+/* Module information */
+/********************************************************************/
+
+MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based and similar wireless cards");
+MODULE_LICENSE("Dual MPL/GPL");
+
+/* Level of debugging. Used in the macros in orinoco.h */
+#ifdef ORINOCO_DEBUG
+int orinoco_debug = ORINOCO_DEBUG;
+module_param(orinoco_debug, int, 0644);
+MODULE_PARM_DESC(orinoco_debug, "Debug level");
+EXPORT_SYMBOL(orinoco_debug);
+#endif
+
+static int suppress_linkstatus; /* = 0 */
+module_param(suppress_linkstatus, bool, 0644);
+MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
+
+/********************************************************************/
+/* Compile time configuration and compatibility stuff */
+/********************************************************************/
+
+/* We do this this way to avoid ifdefs in the actual code */
+#ifdef WIRELESS_SPY
+#define SPY_NUMBER(priv) (priv->spy_number)
+#else
+#define SPY_NUMBER(priv) 0
+#endif /* WIRELESS_SPY */
+
+/********************************************************************/
+/* Internal constants */
+/********************************************************************/
+
+#define ORINOCO_MIN_MTU 256
+#define ORINOCO_MAX_MTU (IEEE802_11_DATA_LEN - ENCAPS_OVERHEAD)
+
+#define SYMBOL_MAX_VER_LEN (14)
+#define USER_BAP 0
+#define IRQ_BAP 1
+#define MAX_IRQLOOPS_PER_IRQ 10
+#define MAX_IRQLOOPS_PER_JIFFY (20000/HZ) /* Based on a guestimate of
+ * how many events the
+ * device could
+ * legitimately generate */
+#define SMALL_KEY_SIZE 5
+#define LARGE_KEY_SIZE 13
+#define TX_NICBUF_SIZE_BUG 1585 /* Bug in Symbol firmware */
+
+#define DUMMY_FID 0xFFFF
+
+/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
+ HERMES_MAX_MULTICAST : 0)*/
+#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
+
+#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
+ | HERMES_EV_TX | HERMES_EV_TXEXC \
+ | HERMES_EV_WTERR | HERMES_EV_INFO \
+ | HERMES_EV_INFDROP )
+
+/********************************************************************/
+/* Data tables */
+/********************************************************************/
+
+/* The frequency of each channel in MHz */
+static const long channel_frequency[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+#define NUM_CHANNELS ARRAY_SIZE(channel_frequency)
+
+/* This tables gives the actual meanings of the bitrate IDs returned
+ * by the firmware. */
+static struct {
+ int bitrate; /* in 100s of kilobits */
+ int automatic;
+ u16 agere_txratectrl;
+ u16 intersil_txratectrl;
+} bitrate_table[] = {
+ {110, 1, 3, 15}, /* Entry 0 is the default */
+ {10, 0, 1, 1},
+ {10, 1, 1, 1},
+ {20, 0, 2, 2},
+ {20, 1, 6, 3},
+ {55, 0, 4, 4},
+ {55, 1, 7, 7},
+ {110, 0, 5, 8},
+};
+#define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table)
+
+/********************************************************************/
+/* Data types */
+/********************************************************************/
+
+struct header_struct {
+ /* 802.3 */
+ u8 dest[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u16 len;
+ /* 802.2 */
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl;
+ /* SNAP */
+ u8 oui[3];
+ u16 ethertype;
+} __attribute__ ((packed));
+
+/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
+u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
+
+struct hermes_rx_descriptor {
+ u16 status;
+ u32 time;
+ u8 silence;
+ u8 signal;
+ u8 rate;
+ u8 rxflow;
+ u32 reserved;
+} __attribute__ ((packed));
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+static int orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int __orinoco_program_rids(struct net_device *dev);
+static void __orinoco_set_multicast_list(struct net_device *dev);
+static int orinoco_debug_dump_recs(struct net_device *dev);
+
+/********************************************************************/
+/* Internal helper functions */
+/********************************************************************/
+
+static inline void set_port_type(struct orinoco_private *priv)
+{
+ switch (priv->iw_mode) {
+ case IW_MODE_INFRA:
+ priv->port_type = 1;
+ priv->createibss = 0;
+ break;
+ case IW_MODE_ADHOC:
+ if (priv->prefer_port3) {
+ priv->port_type = 3;
+ priv->createibss = 0;
+ } else {
+ priv->port_type = priv->ibss_port;
+ priv->createibss = 1;
+ }
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
+ priv->ndev->name);
+ }
+}
+
+/********************************************************************/
+/* Device methods */
+/********************************************************************/
+
+static int orinoco_open(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ err = __orinoco_up(dev);
+
+ if (! err)
+ priv->open = 1;
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+int orinoco_stop(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int err = 0;
+
+ /* We mustn't use orinoco_lock() here, because we need to be
+ able to close the interface even if hw_unavailable is set
+ (e.g. as we're released after a PC Card removal) */
+ spin_lock_irq(&priv->lock);
+
+ priv->open = 0;
+
+ err = __orinoco_down(dev);
+
+ spin_unlock_irq(&priv->lock);
+
+ return err;
+}
+
+static struct net_device_stats *orinoco_get_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+
+ return &priv->stats;
+}
+
+static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ struct iw_statistics *wstats = &priv->wstats;
+ int err = 0;
+ unsigned long flags;
+
+ if (! netif_device_present(dev)) {
+ printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
+ dev->name);
+ return NULL; /* FIXME: Can we do better than this? */
+ }
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return NULL; /* FIXME: Erg, we've been signalled, how
+ * do we propagate this back up? */
+
+ if (priv->iw_mode == IW_MODE_ADHOC) {
+ memset(&wstats->qual, 0, sizeof(wstats->qual));
+ /* If a spy address is defined, we report stats of the
+ * first spy address - Jean II */
+ if (SPY_NUMBER(priv)) {
+ wstats->qual.qual = priv->spy_stat[0].qual;
+ wstats->qual.level = priv->spy_stat[0].level;
+ wstats->qual.noise = priv->spy_stat[0].noise;
+ wstats->qual.updated = priv->spy_stat[0].updated;
+ }
+ } else {
+ struct {
+ u16 qual, signal, noise;
+ } __attribute__ ((packed)) cq;
+
+ err = HERMES_READ_RECORD(hw, USER_BAP,
+ HERMES_RID_COMMSQUALITY, &cq);
+
+ wstats->qual.qual = (int)le16_to_cpu(cq.qual);
+ wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
+ wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
+ wstats->qual.updated = 7;
+ }
+
+ /* We can't really wait for the tallies inquiry command to
+ * complete, so we just use the previous results and trigger
+ * a new tallies inquiry command for next time - Jean II */
+ /* FIXME: We're in user context (I think?), so we should just
+ wait for the tallies to come through */
+ err = hermes_inquire(hw, HERMES_INQ_TALLIES);
+
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return NULL;
+
+ return wstats;
+}
+
+static void orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
+ "called when hw_unavailable\n", dev->name);
+ return;
+ }
+
+ __orinoco_set_multicast_list(dev);
+ orinoco_unlock(priv, &flags);
+}
+
+static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+
+ if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
+ return -EINVAL;
+
+ if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) >
+ (priv->nicbuf_size - ETH_HLEN) )
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/********************************************************************/
+/* Tx path */
+/********************************************************************/
+
+static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 txfid = priv->txfid;
+ char *p;
+ struct ethhdr *eh;
+ int len, data_len, data_off;
+ struct hermes_tx_descriptor desc;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (! netif_running(dev)) {
+ printk(KERN_ERR "%s: Tx on stopped device!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (! netif_carrier_ok(dev)) {
+ /* Oops, the firmware hasn't established a connection,
+ silently drop the packet (this seems to be the
+ safest approach). */
+ stats->tx_errors++;
+ orinoco_unlock(priv, &flags);
+ dev_kfree_skb(skb);
+ TRACE_EXIT(dev->name);
+ return 0;
+ }
+
+ /* Length of the packet body */
+ /* FIXME: what if the skb is smaller than this? */
+ len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
+
+ eh = (struct ethhdr *)skb->data;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX);
+ err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0);
+ if (err) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error %d writing Tx descriptor "
+ "to BAP\n", dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Clear the 802.11 header and data length fields - some
+ * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
+ * if this isn't done. */
+ hermes_clear_words(hw, HERMES_DATA0,
+ HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
+
+ /* Encapsulate Ethernet-II frames */
+ if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
+ struct header_struct hdr;
+ data_len = len;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+ p = skb->data + ETH_HLEN;
+
+ /* 802.3 header */
+ memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
+ memcpy(hdr.src, eh->h_source, ETH_ALEN);
+ hdr.len = htons(data_len + ENCAPS_OVERHEAD);
+
+ /* 802.2 header */
+ memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
+
+ hdr.ethertype = eh->h_proto;
+ err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
+ txfid, HERMES_802_3_OFFSET);
+ if (err) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error %d writing packet "
+ "header to BAP\n", dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+ } else { /* IEEE 802.3 frame */
+ data_len = len + ETH_HLEN;
+ data_off = HERMES_802_3_OFFSET;
+ p = skb->data;
+ }
+
+ /* Round up for odd length packets */
+ err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2),
+ txfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Finally, we actually initiate the send */
+ netif_stop_queue(dev);
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
+ txfid, NULL);
+ if (err) {
+ netif_start_queue(dev);
+ printk(KERN_ERR "%s: Error %d transmitting packet\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ dev->trans_start = jiffies;
+ stats->tx_bytes += data_off + data_len;
+
+ orinoco_unlock(priv, &flags);
+
+ dev_kfree_skb(skb);
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+ fail:
+ TRACE_EXIT(dev->name);
+
+ orinoco_unlock(priv, &flags);
+ return err;
+}
+
+static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ u16 fid = hermes_read_regn(hw, ALLOCFID);
+
+ if (fid != priv->txfid) {
+ if (fid != DUMMY_FID)
+ printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
+ dev->name, fid);
+ return;
+ }
+
+ hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+
+ stats->tx_packets++;
+
+ netif_wake_queue(dev);
+
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ u16 fid = hermes_read_regn(hw, TXCOMPLFID);
+ struct hermes_tx_descriptor desc;
+ int err = 0;
+
+ if (fid == DUMMY_FID)
+ return; /* Nothing's really happened */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc), fid, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
+ "(FID=%04X error %d)\n",
+ dev->name, fid, err);
+ } else {
+ DEBUG(1, "%s: Tx error, status %d\n",
+ dev->name, le16_to_cpu(desc.status));
+ }
+
+ stats->tx_errors++;
+
+ netif_wake_queue(dev);
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void orinoco_tx_timeout(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ struct hermes *hw = &priv->hw;
+
+ printk(KERN_WARNING "%s: Tx timeout! "
+ "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
+ dev->name, hermes_read_regn(hw, ALLOCFID),
+ hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
+
+ stats->tx_errors++;
+
+ schedule_work(&priv->reset_work);
+}
+
+/********************************************************************/
+/* Rx path (data frames) */
+/********************************************************************/
+
+/* Does the frame have a SNAP header indicating it should be
+ * de-encapsulated to Ethernet-II? */
+static inline int is_ethersnap(void *_hdr)
+{
+ u8 *hdr = _hdr;
+
+ /* We de-encapsulate all packets which, a) have SNAP headers
+ * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
+ * and where b) the OUI of the SNAP header is 00:00:00 or
+ * 00:00:f8 - we need both because different APs appear to use
+ * different OUIs for some reason */
+ return (memcmp(hdr, &encaps_hdr, 5) == 0)
+ && ( (hdr[5] == 0x00) || (hdr[5] == 0xf8) );
+}
+
+static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
+ int level, int noise)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int i;
+
+ /* Gather wireless spy statistics: for each packet, compare the
+ * source address with out list, and if match, get the stats... */
+ for (i = 0; i < priv->spy_number; i++)
+ if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) {
+ priv->spy_stat[i].level = level - 0x95;
+ priv->spy_stat[i].noise = noise - 0x95;
+ priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
+ priv->spy_stat[i].updated = 7;
+ }
+}
+
+static void orinoco_stat_gather(struct net_device *dev,
+ struct sk_buff *skb,
+ struct hermes_rx_descriptor *desc)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+
+ /* Using spy support with lots of Rx packets, like in an
+ * infrastructure (AP), will really slow down everything, because
+ * the MAC address must be compared to each entry of the spy list.
+ * If the user really asks for it (set some address in the
+ * spy list), we do it, but he will pay the price.
+ * Note that to get here, you need both WIRELESS_SPY
+ * compiled in AND some addresses in the list !!!
+ */
+ /* Note : gcc will optimise the whole section away if
+ * WIRELESS_SPY is not defined... - Jean II */
+ if (SPY_NUMBER(priv)) {
+ orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN,
+ desc->signal, desc->silence);
+ }
+}
+
+static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ struct iw_statistics *wstats = &priv->wstats;
+ struct sk_buff *skb = NULL;
+ u16 rxfid, status;
+ int length, data_len, data_off;
+ char *p;
+ struct hermes_rx_descriptor desc;
+ struct header_struct hdr;
+ struct ethhdr *eh;
+ int err;
+
+ rxfid = hermes_read_regn(hw, RXFID);
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc),
+ rxfid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading Rx descriptor. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ status = le16_to_cpu(desc.status);
+
+ if (status & HERMES_RXSTAT_ERR) {
+ if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
+ wstats->discard.code++;
+ DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
+ dev->name);
+ } else {
+ stats->rx_crc_errors++;
+ DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", dev->name);
+ }
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* For now we ignore the 802.11 header completely, assuming
+ that the card's firmware has handled anything vital */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &hdr, sizeof(hdr),
+ rxfid, HERMES_802_3_OFFSET);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame header. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ length = ntohs(hdr.len);
+
+ /* Sanity checks */
+ if (length < 3) { /* No for even an 802.2 LLC header */
+ /* At least on Symbol firmware with PCF we get quite a
+ lot of these legitimately - Poll frames with no
+ data. */
+ stats->rx_dropped++;
+ goto drop;
+ }
+ if (length > IEEE802_11_DATA_LEN) {
+ printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
+ dev->name, length);
+ stats->rx_length_errors++;
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* We need space for the packet data itself, plus an ethernet
+ header, plus 2 bytes so we can align the IP header on a
+ 32bit boundary, plus 1 byte so we can read in odd length
+ packets from the card, which has an IO granularity of 16
+ bits */
+ skb = dev_alloc_skb(length+ETH_HLEN+2+1);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
+ dev->name);
+ goto drop;
+ }
+
+ skb_reserve(skb, 2); /* This way the IP header is aligned */
+
+ /* Handle decapsulation
+ * In most cases, the firmware tell us about SNAP frames.
+ * For some reason, the SNAP frames sent by LinkSys APs
+ * are not properly recognised by most firmwares.
+ * So, check ourselves */
+ if (((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
+ ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
+ is_ethersnap(&hdr)) {
+ /* These indicate a SNAP within 802.2 LLC within
+ 802.11 frame which we'll need to de-encapsulate to
+ the original EthernetII frame. */
+
+ if (length < ENCAPS_OVERHEAD) { /* No room for full LLC+SNAP */
+ stats->rx_length_errors++;
+ goto drop;
+ }
+
+ /* Remove SNAP header, reconstruct EthernetII frame */
+ data_len = length - ENCAPS_OVERHEAD;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+
+ eh = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+
+ memcpy(eh, &hdr, 2 * ETH_ALEN);
+ eh->h_proto = hdr.ethertype;
+ } else {
+ /* All other cases indicate a genuine 802.3 frame. No
+ decapsulation needed. We just throw the whole
+ thing in, and hope the protocol layer can deal with
+ it as 802.3 */
+ data_len = length;
+ data_off = HERMES_802_3_OFFSET;
+ /* FIXME: we re-read from the card data we already read here */
+ }
+
+ p = skb_put(skb, data_len);
+ err = hermes_bap_pread(hw, IRQ_BAP, p, ALIGN(data_len, 2),
+ rxfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Process the wireless stats if needed */
+ orinoco_stat_gather(dev, skb, &desc);
+
+ /* Pass the packet to the networking stack */
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += length;
+
+ return;
+
+ drop:
+ stats->rx_dropped++;
+
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ return;
+}
+
+/********************************************************************/
+/* Rx path (info frames) */
+/********************************************************************/
+
+static void print_linkstatus(struct net_device *dev, u16 status)
+{
+ char * s;
+
+ if (suppress_linkstatus)
+ return;
+
+ switch (status) {
+ case HERMES_LINKSTATUS_NOT_CONNECTED:
+ s = "Not Connected";
+ break;
+ case HERMES_LINKSTATUS_CONNECTED:
+ s = "Connected";
+ break;
+ case HERMES_LINKSTATUS_DISCONNECTED:
+ s = "Disconnected";
+ break;
+ case HERMES_LINKSTATUS_AP_CHANGE:
+ s = "AP Changed";
+ break;
+ case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
+ s = "AP Out of Range";
+ break;
+ case HERMES_LINKSTATUS_AP_IN_RANGE:
+ s = "AP In Range";
+ break;
+ case HERMES_LINKSTATUS_ASSOC_FAILED:
+ s = "Association Failed";
+ break;
+ default:
+ s = "UNKNOWN";
+ }
+
+ printk(KERN_INFO "%s: New link status: %s (%04x)\n",
+ dev->name, s, status);
+}
+
+static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ u16 infofid;
+ struct {
+ u16 len;
+ u16 type;
+ } __attribute__ ((packed)) info;
+ int len, type;
+ int err;
+
+ /* This is an answer to an INQUIRE command that we did earlier,
+ * or an information "event" generated by the card
+ * The controller return to us a pseudo frame containing
+ * the information in question - Jean II */
+ infofid = hermes_read_regn(hw, INFOFID);
+
+ /* Read the info frame header - don't try too hard */
+ err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
+ infofid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading info frame. "
+ "Frame dropped.\n", dev->name, err);
+ return;
+ }
+
+ len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
+ type = le16_to_cpu(info.type);
+
+ switch (type) {
+ case HERMES_INQ_TALLIES: {
+ struct hermes_tallies_frame tallies;
+ struct iw_statistics *wstats = &priv->wstats;
+
+ if (len > sizeof(tallies)) {
+ printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
+ dev->name, len);
+ len = sizeof(tallies);
+ }
+
+ /* Read directly the data (no seek) */
+ hermes_read_words(hw, HERMES_DATA1, (void *) &tallies,
+ len / 2); /* FIXME: blech! */
+
+ /* Increment our various counters */
+ /* wstats->discard.nwid - no wrong BSSID stuff */
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxWEPUndecryptable);
+ if (len == sizeof(tallies))
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxDiscards_WEPICVError) +
+ le16_to_cpu(tallies.RxDiscards_WEPExcluded);
+ wstats->discard.misc +=
+ le16_to_cpu(tallies.TxDiscardsWrongSA);
+ wstats->discard.fragment +=
+ le16_to_cpu(tallies.RxMsgInBadMsgFragments);
+ wstats->discard.retries +=
+ le16_to_cpu(tallies.TxRetryLimitExceeded);
+ /* wstats->miss.beacon - no match */
+ }
+ break;
+ case HERMES_INQ_LINKSTATUS: {
+ struct hermes_linkstatus linkstatus;
+ u16 newstatus;
+ int connected;
+
+ if (len != sizeof(linkstatus)) {
+ printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
+ dev->name, len);
+ break;
+ }
+
+ hermes_read_words(hw, HERMES_DATA1, (void *) &linkstatus,
+ len / 2);
+ newstatus = le16_to_cpu(linkstatus.linkstatus);
+
+ connected = (newstatus == HERMES_LINKSTATUS_CONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
+ || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE);
+
+ if (connected)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ if (newstatus != priv->last_linkstatus)
+ print_linkstatus(dev, newstatus);
+
+ priv->last_linkstatus = newstatus;
+ }
+ break;
+ default:
+ printk(KERN_DEBUG "%s: Unknown information frame received: "
+ "type 0x%04x, length %d\n", dev->name, type, len);
+ /* We don't actually do anything about it */
+ break;
+ }
+}
+
+static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
+{
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: Information frame lost.\n", dev->name);
+}
+
+/********************************************************************/
+/* Internal hardware control routines */
+/********************************************************************/
+
+int __orinoco_up(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = __orinoco_program_rids(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d configuring card\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Fire things up again */
+ hermes_set_irqmask(hw, ORINOCO_INTEN);
+ err = hermes_enable_port(hw, 0);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d enabling MAC port\n",
+ dev->name, err);
+ return err;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+int __orinoco_down(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ netif_stop_queue(dev);
+
+ if (! priv->hw_unavailable) {
+ if (! priv->broken_disableport) {
+ err = hermes_disable_port(hw, 0);
+ if (err) {
+ /* Some firmwares (e.g. Intersil 1.3.x) seem
+ * to have problems disabling the port, oh
+ * well, too bad. */
+ printk(KERN_WARNING "%s: Error %d disabling MAC port\n",
+ dev->name, err);
+ priv->broken_disableport = 1;
+ }
+ }
+ hermes_set_irqmask(hw, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+ }
+
+ /* firmware will have to reassociate */
+ netif_carrier_off(dev);
+ priv->last_linkstatus = 0xffff;
+
+ return 0;
+}
+
+int orinoco_reinit_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = hermes_init(hw);
+ if (err)
+ return err;
+
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err == -EIO) {
+ /* Try workaround for old Symbol firmware bug */
+ printk(KERN_WARNING "%s: firmware ALLOC bug detected "
+ "(old Symbol firmware?). Trying to work around... ",
+ dev->name);
+
+ priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err)
+ printk("failed!\n");
+ else
+ printk("ok.\n");
+ }
+
+ return err;
+}
+
+static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+
+ if (priv->bitratemode >= BITRATE_TABLE_SIZE) {
+ printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
+ priv->ndev->name, priv->bitratemode);
+ return -EINVAL;
+ }
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].agere_txratectrl);
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ case FIRMWARE_TYPE_SYMBOL:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].intersil_txratectrl);
+ break;
+ default:
+ BUG();
+ }
+
+ return err;
+}
+
+/* Change the WEP keys and/or the current keys. Can be called
+ * either from __orinoco_hw_setup_wep() or directly from
+ * orinoco_ioctl_setiwencode(). In the later case the association
+ * with the AP is not broken (if the firmware can handle it),
+ * which is needed for 802.1x implementations. */
+static int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ err = HERMES_WRITE_RECORD(hw, USER_BAP,
+ HERMES_RID_CNFWEPKEYS_AGERE,
+ &priv->keys);
+ if (err)
+ return err;
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXKEY_AGERE,
+ priv->tx_key);
+ if (err)
+ return err;
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ case FIRMWARE_TYPE_SYMBOL:
+ {
+ int keylen;
+ int i;
+
+ /* Force uniform key length to work around firmware bugs */
+ keylen = le16_to_cpu(priv->keys[priv->tx_key].len);
+
+ if (keylen > LARGE_KEY_SIZE) {
+ printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
+ priv->ndev->name, priv->tx_key, keylen);
+ return -E2BIG;
+ }
+
+ /* Write all 4 keys */
+ for(i = 0; i < ORINOCO_MAX_KEYS; i++) {
+ err = hermes_write_ltv(hw, USER_BAP,
+ HERMES_RID_CNFDEFAULTKEY0 + i,
+ HERMES_BYTES_TO_RECLEN(keylen),
+ priv->keys[i].data);
+ if (err)
+ return err;
+ }
+
+ /* Write the index of the key used in transmission */
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPDEFAULTKEYID,
+ priv->tx_key);
+ if (err)
+ return err;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int master_wep_flag;
+ int auth_flag;
+
+ if (priv->wep_on)
+ __orinoco_hw_setup_wepkeys(priv);
+
+ if (priv->wep_restrict)
+ auth_flag = HERMES_AUTH_SHARED_KEY;
+ else
+ auth_flag = HERMES_AUTH_OPEN;
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
+ if (priv->wep_on) {
+ /* Enable the shared-key authentication. */
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFAUTHENTICATION_AGERE,
+ auth_flag);
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPENABLED_AGERE,
+ priv->wep_on);
+ if (err)
+ return err;
+ break;
+
+ case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
+ case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
+ if (priv->wep_on) {
+ if (priv->wep_restrict ||
+ (priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
+ master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
+ HERMES_WEP_EXCL_UNENCRYPTED;
+ else
+ master_wep_flag = HERMES_WEP_PRIVACY_INVOKED;
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFAUTHENTICATION,
+ auth_flag);
+ if (err)
+ return err;
+ } else
+ master_wep_flag = 0;
+
+ if (priv->iw_mode == IW_MODE_MONITOR)
+ master_wep_flag |= HERMES_WEP_HOST_DECRYPT;
+
+ /* Master WEP setting : on/off */
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPFLAGS_INTERSIL,
+ master_wep_flag);
+ if (err)
+ return err;
+
+ break;
+ }
+
+ return 0;
+}
+
+static int __orinoco_program_rids(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err;
+ struct hermes_idstring idbuf;
+
+ /* Set the MAC address */
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting MAC address\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Set up the link mode */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE,
+ priv->port_type);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting port type\n",
+ dev->name, err);
+ return err;
+ }
+ /* Set the channel/frequency */
+ if (priv->channel == 0) {
+ printk(KERN_DEBUG "%s: Channel is 0 in __orinoco_program_rids()\n", dev->name);
+ if (priv->createibss)
+ priv->channel = 10;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL,
+ priv->channel);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting channel\n",
+ dev->name, err);
+ return err;
+ }
+
+ if (priv->has_ibss) {
+ u16 createibss;
+
+ if ((strlen(priv->desired_essid) == 0) && (priv->createibss)) {
+ printk(KERN_WARNING "%s: This firmware requires an "
+ "ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
+ /* With wvlan_cs, in this case, we would crash.
+ * hopefully, this driver will behave better...
+ * Jean II */
+ createibss = 0;
+ } else {
+ createibss = priv->createibss;
+ }
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFCREATEIBSS,
+ createibss);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set the desired ESSID */
+ idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
+ memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
+ /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting OWNSSID\n",
+ dev->name, err);
+ return err;
+ }
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Set the station name */
+ idbuf.len = cpu_to_le16(strlen(priv->nick));
+ memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting nickname\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Set AP density */
+ if (priv->has_sensitivity) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFSYSTEMSCALE,
+ priv->ap_density);
+ if (err) {
+ printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
+ "Disabling sensitivity control\n",
+ dev->name, err);
+
+ priv->has_sensitivity = 0;
+ }
+ }
+
+ /* Set RTS threshold */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
+ priv->rts_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting RTS threshold\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Set fragmentation threshold or MWO robustness */
+ if (priv->has_mwo)
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ priv->mwo_robust);
+ else
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ priv->frag_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting fragmentation\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Set bitrate */
+ err = __orinoco_hw_set_bitrate(priv);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting bitrate\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Set power management */
+ if (priv->has_pm) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMENABLED,
+ priv->pm_on);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMULTICASTRECEIVE,
+ priv->pm_mcast);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION,
+ priv->pm_period);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMHOLDOVERDURATION,
+ priv->pm_timeout);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set preamble - only for Symbol so far... */
+ if (priv->has_preamble) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPREAMBLE_SYMBOL,
+ priv->preamble);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting preamble\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set up encryption */
+ if (priv->has_wep) {
+ err = __orinoco_hw_setup_wep(priv);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d activating WEP\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set promiscuity / multicast*/
+ priv->promiscuous = 0;
+ priv->mc_count = 0;
+ __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
+
+ return 0;
+}
+
+/* FIXME: return int? */
+static void
+__orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int promisc, mc_count;
+
+ /* The Hermes doesn't seem to have an allmulti mode, so we go
+ * into promiscuous mode and let the upper levels deal. */
+ if ( (dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > MAX_MULTICAST(priv)) ) {
+ promisc = 1;
+ mc_count = 0;
+ } else {
+ promisc = 0;
+ mc_count = dev->mc_count;
+ }
+
+ if (promisc != priv->promiscuous) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPROMISCUOUSMODE,
+ promisc);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
+ dev->name, err);
+ } else
+ priv->promiscuous = promisc;
+ }
+
+ if (! promisc && (mc_count || priv->mc_count) ) {
+ struct dev_mc_list *p = dev->mc_list;
+ struct hermes_multicast mclist;
+ int i;
+
+ for (i = 0; i < mc_count; i++) {
+ /* paranoia: is list shorter than mc_count? */
+ BUG_ON(! p);
+ /* paranoia: bad address size in list? */
+ BUG_ON(p->dmi_addrlen != ETH_ALEN);
+
+ memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
+ p = p->next;
+ }
+
+ if (p)
+ printk(KERN_WARNING "%s: Multicast list is "
+ "longer than mc_count\n", dev->name);
+
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES,
+ HERMES_BYTES_TO_RECLEN(priv->mc_count * ETH_ALEN),
+ &mclist);
+ if (err)
+ printk(KERN_ERR "%s: Error %d setting multicast list.\n",
+ dev->name, err);
+ else
+ priv->mc_count = mc_count;
+ }
+
+ /* Since we can set the promiscuous flag when it wasn't asked
+ for, make sure the net_device knows about it. */
+ if (priv->promiscuous)
+ dev->flags |= IFF_PROMISC;
+ else
+ dev->flags &= ~IFF_PROMISC;
+}
+
+static int orinoco_reconfigure(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct hermes *hw = &priv->hw;
+ unsigned long flags;
+ int err = 0;
+
+ if (priv->broken_disableport) {
+ schedule_work(&priv->reset_work);
+ return 0;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ err = hermes_disable_port(hw, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to disable port while reconfiguring card\n",
+ dev->name);
+ priv->broken_disableport = 1;
+ goto out;
+ }
+
+ err = __orinoco_program_rids(dev);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to reconfigure card\n",
+ dev->name);
+ goto out;
+ }
+
+ err = hermes_enable_port(hw, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n",
+ dev->name);
+ goto out;
+ }
+
+ out:
+ if (err) {
+ printk(KERN_WARNING "%s: Resetting instead...\n", dev->name);
+ schedule_work(&priv->reset_work);
+ err = 0;
+ }
+
+ orinoco_unlock(priv, &flags);
+ return err;
+
+}
+
+/* This must be called from user context, without locks held - use
+ * schedule_work() */
+static void orinoco_reset(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct hermes *hw = &priv->hw;
+ int err = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ /* When the hardware becomes available again, whatever
+ * detects that is responsible for re-initializing
+ * it. So no need for anything further */
+ return;
+
+ netif_stop_queue(dev);
+
+ /* Shut off interrupts. Depending on what state the hardware
+ * is in, this might not work, but we'll try anyway */
+ hermes_set_irqmask(hw, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+
+ priv->hw_unavailable++;
+ priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */
+ netif_carrier_off(dev);
+
+ orinoco_unlock(priv, &flags);
+
+ if (priv->hard_reset)
+ err = (*priv->hard_reset)(priv);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d "
+ "performing hard reset\n", dev->name, err);
+ /* FIXME: shutdown of some sort */
+ return;
+ }
+
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
+ dev->name, err);
+ return;
+ }
+
+ spin_lock_irq(&priv->lock); /* This has to be called from user context */
+
+ priv->hw_unavailable--;
+
+ /* priv->open or priv->hw_unavailable might have changed while
+ * we dropped the lock */
+ if (priv->open && (! priv->hw_unavailable)) {
+ err = __orinoco_up(dev);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
+ dev->name, err);
+ } else
+ dev->trans_start = jiffies;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return;
+}
+
+/********************************************************************/
+/* Interrupt handler */
+/********************************************************************/
+
+static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
+{
+ printk(KERN_DEBUG "%s: TICK\n", dev->name);
+}
+
+static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
+{
+ /* This seems to happen a fair bit under load, but ignoring it
+ seems to work fine...*/
+ printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
+ dev->name);
+}
+
+irqreturn_t orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int count = MAX_IRQLOOPS_PER_IRQ;
+ u16 evstat, events;
+ /* These are used to detect a runaway interrupt situation */
+ /* If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
+ * we panic and shut down the hardware */
+ static int last_irq_jiffy = 0; /* jiffies value the last time
+ * we were called */
+ static int loops_this_jiffy = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ /* If hw is unavailable - we don't know if the irq was
+ * for us or not */
+ return IRQ_HANDLED;
+ }
+
+ evstat = hermes_read_regn(hw, EVSTAT);
+ events = evstat & hw->inten;
+ if (! events) {
+ orinoco_unlock(priv, &flags);
+ return IRQ_NONE;
+ }
+
+ if (jiffies != last_irq_jiffy)
+ loops_this_jiffy = 0;
+ last_irq_jiffy = jiffies;
+
+ while (events && count--) {
+ if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) {
+ printk(KERN_WARNING "%s: IRQ handler is looping too "
+ "much! Resetting.\n", dev->name);
+ /* Disable interrupts for now */
+ hermes_set_irqmask(hw, 0);
+ schedule_work(&priv->reset_work);
+ break;
+ }
+
+ /* Check the card hasn't been removed */
+ if (! hermes_present(hw)) {
+ DEBUG(0, "orinoco_interrupt(): card removed\n");
+ break;
+ }
+
+ if (events & HERMES_EV_TICK)
+ __orinoco_ev_tick(dev, hw);
+ if (events & HERMES_EV_WTERR)
+ __orinoco_ev_wterr(dev, hw);
+ if (events & HERMES_EV_INFDROP)
+ __orinoco_ev_infdrop(dev, hw);
+ if (events & HERMES_EV_INFO)
+ __orinoco_ev_info(dev, hw);
+ if (events & HERMES_EV_RX)
+ __orinoco_ev_rx(dev, hw);
+ if (events & HERMES_EV_TXEXC)
+ __orinoco_ev_txexc(dev, hw);
+ if (events & HERMES_EV_TX)
+ __orinoco_ev_tx(dev, hw);
+ if (events & HERMES_EV_ALLOC)
+ __orinoco_ev_alloc(dev, hw);
+
+ hermes_write_regn(hw, EVACK, events);
+
+ evstat = hermes_read_regn(hw, EVSTAT);
+ events = evstat & hw->inten;
+ };
+
+ orinoco_unlock(priv, &flags);
+ return IRQ_HANDLED;
+}
+
+/********************************************************************/
+/* Initialization */
+/********************************************************************/
+
+struct comp_id {
+ u16 id, variant, major, minor;
+} __attribute__ ((packed));
+
+static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
+{
+ if (nic_id->id < 0x8000)
+ return FIRMWARE_TYPE_AGERE;
+ else if (nic_id->id == 0x8000 && nic_id->major == 0)
+ return FIRMWARE_TYPE_SYMBOL;
+ else
+ return FIRMWARE_TYPE_INTERSIL;
+}
+
+/* Set priv->firmware type, determine firmware properties */
+static int determine_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err;
+ struct comp_id nic_id, sta_id;
+ unsigned int firmver;
+ char tmp[SYMBOL_MAX_VER_LEN+1];
+
+ /* Get the hardware version */
+ err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
+ if (err) {
+ printk(KERN_ERR "%s: Cannot read hardware identity: error %d\n",
+ dev->name, err);
+ return err;
+ }
+
+ le16_to_cpus(&nic_id.id);
+ le16_to_cpus(&nic_id.variant);
+ le16_to_cpus(&nic_id.major);
+ le16_to_cpus(&nic_id.minor);
+ printk(KERN_DEBUG "%s: Hardware identity %04x:%04x:%04x:%04x\n",
+ dev->name, nic_id.id, nic_id.variant,
+ nic_id.major, nic_id.minor);
+
+ priv->firmware_type = determine_firmware_type(&nic_id);
+
+ /* Get the firmware version */
+ err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
+ if (err) {
+ printk(KERN_ERR "%s: Cannot read station identity: error %d\n",
+ dev->name, err);
+ return err;
+ }
+
+ le16_to_cpus(&sta_id.id);
+ le16_to_cpus(&sta_id.variant);
+ le16_to_cpus(&sta_id.major);
+ le16_to_cpus(&sta_id.minor);
+ printk(KERN_DEBUG "%s: Station identity %04x:%04x:%04x:%04x\n",
+ dev->name, sta_id.id, sta_id.variant,
+ sta_id.major, sta_id.minor);
+
+ switch (sta_id.id) {
+ case 0x15:
+ printk(KERN_ERR "%s: Primary firmware is active\n",
+ dev->name);
+ return -ENODEV;
+ case 0x14b:
+ printk(KERN_ERR "%s: Tertiary firmware is active\n",
+ dev->name);
+ return -ENODEV;
+ case 0x1f: /* Intersil, Agere, Symbol Spectrum24 */
+ case 0x21: /* Symbol Spectrum24 Trilogy */
+ break;
+ default:
+ printk(KERN_NOTICE "%s: Unknown station ID, please report\n",
+ dev->name);
+ break;
+ }
+
+ /* Default capabilities */
+ priv->has_sensitivity = 1;
+ priv->has_mwo = 0;
+ priv->has_preamble = 0;
+ priv->has_port3 = 1;
+ priv->has_ibss = 1;
+ priv->has_wep = 0;
+ priv->has_big_wep = 0;
+
+ /* Determine capabilities from the firmware version */
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
+ ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
+ snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
+ "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor);
+
+ firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
+
+ priv->has_ibss = (firmver >= 0x60006);
+ priv->has_wep = (firmver >= 0x40020);
+ priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell
+ Gold cards from the others? */
+ priv->has_mwo = (firmver >= 0x60000);
+ priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
+ priv->ibss_port = 1;
+
+ /* Tested with Agere firmware :
+ * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
+ * Tested CableTron firmware : 4.32 => Anton */
+ break;
+ case FIRMWARE_TYPE_SYMBOL:
+ /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
+ /* Intel MAC : 00:02:B3:* */
+ /* 3Com MAC : 00:50:DA:* */
+ memset(tmp, 0, sizeof(tmp));
+ /* Get the Symbol firmware version */
+ err = hermes_read_ltv(hw, USER_BAP,
+ HERMES_RID_SECONDARYVERSION_SYMBOL,
+ SYMBOL_MAX_VER_LEN, NULL, &tmp);
+ if (err) {
+ printk(KERN_WARNING
+ "%s: Error %d reading Symbol firmware info. Wildly guessing capabilities...\n",
+ dev->name, err);
+ firmver = 0;
+ tmp[0] = '\0';
+ } else {
+ /* The firmware revision is a string, the format is
+ * something like : "V2.20-01".
+ * Quick and dirty parsing... - Jean II
+ */
+ firmver = ((tmp[1] - '0') << 16) | ((tmp[3] - '0') << 12)
+ | ((tmp[4] - '0') << 8) | ((tmp[6] - '0') << 4)
+ | (tmp[7] - '0');
+
+ tmp[SYMBOL_MAX_VER_LEN] = '\0';
+ }
+
+ snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
+ "Symbol %s", tmp);
+
+ priv->has_ibss = (firmver >= 0x20000);
+ priv->has_wep = (firmver >= 0x15012);
+ priv->has_big_wep = (firmver >= 0x20000);
+ priv->has_pm = (firmver >= 0x20000 && firmver < 0x22000) ||
+ (firmver >= 0x29000 && firmver < 0x30000) ||
+ firmver >= 0x31000;
+ priv->has_preamble = (firmver >= 0x20000);
+ priv->ibss_port = 4;
+ /* Tested with Intel firmware : 0x20015 => Jean II */
+ /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ /* D-Link, Linksys, Adtron, ZoomAir, and many others...
+ * Samsung, Compaq 100/200 and Proxim are slightly
+ * different and less well tested */
+ /* D-Link MAC : 00:40:05:* */
+ /* Addtron MAC : 00:90:D1:* */
+ snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
+ "Intersil %d.%d.%d", sta_id.major, sta_id.minor,
+ sta_id.variant);
+
+ firmver = ((unsigned long)sta_id.major << 16) |
+ ((unsigned long)sta_id.minor << 8) | sta_id.variant;
+
+ priv->has_ibss = (firmver >= 0x000700); /* FIXME */
+ priv->has_big_wep = priv->has_wep = (firmver >= 0x000800);
+ priv->has_pm = (firmver >= 0x000700);
+
+ if (firmver >= 0x000800)
+ priv->ibss_port = 0;
+ else {
+ printk(KERN_NOTICE "%s: Intersil firmware earlier "
+ "than v0.8.x - several features not supported\n",
+ dev->name);
+ priv->ibss_port = 1;
+ }
+ break;
+ }
+ printk(KERN_DEBUG "%s: Firmware determined as %s\n", dev->name,
+ priv->fw_name);
+
+ return 0;
+}
+
+static int orinoco_init(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ struct hermes_idstring nickbuf;
+ u16 reclen;
+ int len;
+
+ TRACE_ENTER(dev->name);
+
+ /* No need to lock, the hw_unavailable flag is already set in
+ * alloc_orinocodev() */
+ priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN;
+
+ /* Initialize the firmware */
+ err = hermes_init(hw);
+ if (err != 0) {
+ printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
+ dev->name, err);
+ goto out;
+ }
+
+ err = determine_firmware(dev);
+ if (err != 0) {
+ printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
+ dev->name);
+ goto out;
+ }
+
+ if (priv->has_port3)
+ printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name);
+ if (priv->has_ibss)
+ printk(KERN_DEBUG "%s: IEEE standard IBSS ad-hoc mode supported\n",
+ dev->name);
+ if (priv->has_wep) {
+ printk(KERN_DEBUG "%s: WEP supported, ", dev->name);
+ if (priv->has_big_wep)
+ printk("104-bit key\n");
+ else
+ printk("40-bit key\n");
+ }
+
+ /* Get the MAC address */
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ ETH_ALEN, NULL, dev->dev_addr);
+ if (err) {
+ printk(KERN_WARNING "%s: failed to read MAC address!\n",
+ dev->name);
+ goto out;
+ }
+
+ printk(KERN_DEBUG "%s: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->name, dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ /* Get the station name */
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ sizeof(nickbuf), &reclen, &nickbuf);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read station name\n",
+ dev->name);
+ goto out;
+ }
+ if (nickbuf.len)
+ len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len));
+ else
+ len = min(IW_ESSID_MAX_SIZE, 2 * reclen);
+ memcpy(priv->nick, &nickbuf.val, len);
+ priv->nick[len] = '\0';
+
+ printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
+
+ /* Get allowed channels */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
+ &priv->channel_mask);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read channel list!\n",
+ dev->name);
+ goto out;
+ }
+
+ /* Get initial AP density */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
+ &priv->ap_density);
+ if (err || priv->ap_density < 1 || priv->ap_density > 3) {
+ priv->has_sensitivity = 0;
+ }
+
+ /* Get initial RTS threshold */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
+ &priv->rts_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read RTS threshold!\n",
+ dev->name);
+ goto out;
+ }
+
+ /* Get initial fragmentation settings */
+ if (priv->has_mwo)
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ &priv->mwo_robust);
+ else
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ &priv->frag_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read fragmentation settings!\n",
+ dev->name);
+ goto out;
+ }
+
+ /* Power management setup */
+ if (priv->has_pm) {
+ priv->pm_on = 0;
+ priv->pm_mcast = 1;
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION,
+ &priv->pm_period);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read power management period!\n",
+ dev->name);
+ goto out;
+ }
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMHOLDOVERDURATION,
+ &priv->pm_timeout);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read power management timeout!\n",
+ dev->name);
+ goto out;
+ }
+ }
+
+ /* Preamble setup */
+ if (priv->has_preamble) {
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPREAMBLE_SYMBOL,
+ &priv->preamble);
+ if (err)
+ goto out;
+ }
+
+ /* Set up the default configuration */
+ priv->iw_mode = IW_MODE_INFRA;
+ /* By default use IEEE/IBSS ad-hoc mode if we have it */
+ priv->prefer_port3 = priv->has_port3 && (! priv->has_ibss);
+ set_port_type(priv);
+ priv->channel = 10; /* default channel, more-or-less arbitrary */
+
+ priv->promiscuous = 0;
+ priv->wep_on = 0;
+ priv->tx_key = 0;
+
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err == -EIO) {
+ /* Try workaround for old Symbol firmware bug */
+ printk(KERN_WARNING "%s: firmware ALLOC bug detected "
+ "(old Symbol firmware?). Trying to work around... ",
+ dev->name);
+
+ priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err)
+ printk("failed!\n");
+ else
+ printk("ok.\n");
+ }
+ if (err) {
+ printk("%s: Error %d allocating Tx buffer\n", dev->name, err);
+ goto out;
+ }
+
+ /* Make the hardware available, as long as it hasn't been
+ * removed elsewhere (e.g. by PCMCIA hot unplug) */
+ spin_lock_irq(&priv->lock);
+ priv->hw_unavailable--;
+ spin_unlock_irq(&priv->lock);
+
+ printk(KERN_DEBUG "%s: ready\n", dev->name);
+
+ out:
+ TRACE_EXIT(dev->name);
+ return err;
+}
+
+struct net_device *alloc_orinocodev(int sizeof_card,
+ int (*hard_reset)(struct orinoco_private *))
+{
+ struct net_device *dev;
+ struct orinoco_private *priv;
+
+ dev = alloc_etherdev(sizeof(struct orinoco_private) + sizeof_card);
+ if (! dev)
+ return NULL;
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ if (sizeof_card)
+ priv->card = (void *)((unsigned long)netdev_priv(dev)
+ + sizeof(struct orinoco_private));
+ else
+ priv->card = NULL;
+
+ /* Setup / override net_device fields */
+ dev->init = orinoco_init;
+ dev->hard_start_xmit = orinoco_xmit;
+ dev->tx_timeout = orinoco_tx_timeout;
+ dev->watchdog_timeo = HZ; /* 1 second timeout */
+ dev->get_stats = orinoco_get_stats;
+ dev->get_wireless_stats = orinoco_get_wireless_stats;
+ dev->do_ioctl = orinoco_ioctl;
+ dev->change_mtu = orinoco_change_mtu;
+ dev->set_multicast_list = orinoco_set_multicast_list;
+ /* we use the default eth_mac_addr for setting the MAC addr */
+
+ /* Set up default callbacks */
+ dev->open = orinoco_open;
+ dev->stop = orinoco_stop;
+ priv->hard_reset = hard_reset;
+
+ spin_lock_init(&priv->lock);
+ priv->open = 0;
+ priv->hw_unavailable = 1; /* orinoco_init() must clear this
+ * before anything else touches the
+ * hardware */
+ INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
+
+ netif_carrier_off(dev);
+ priv->last_linkstatus = 0xffff;
+
+ return dev;
+
+}
+
+void free_orinocodev(struct net_device *dev)
+{
+ free_netdev(dev);
+}
+
+/********************************************************************/
+/* Wireless extensions */
+/********************************************************************/
+
+static int orinoco_hw_get_bssid(struct orinoco_private *priv,
+ char buf[ETH_ALEN])
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, buf);
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
+ char buf[IW_ESSID_MAX_SIZE+1])
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ struct hermes_idstring essidbuf;
+ char *p = (char *)(&essidbuf.val);
+ int len;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (strlen(priv->desired_essid) > 0) {
+ /* We read the desired SSID from the hardware rather
+ than from priv->desired_essid, just in case the
+ firmware is allowed to change it on us. I'm not
+ sure about this */
+ /* My guess is that the OWNSSID should always be whatever
+ * we set to the card, whereas CURRENT_SSID is the one that
+ * may change... - Jean II */
+ u16 rid;
+
+ *active = 1;
+
+ rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
+ HERMES_RID_CNFDESIREDSSID;
+
+ err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
+ NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
+ } else {
+ *active = 0;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
+ sizeof(essidbuf), NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
+ }
+
+ len = le16_to_cpu(essidbuf.len);
+
+ memset(buf, 0, IW_ESSID_MAX_SIZE+1);
+ memcpy(buf, p, len);
+ buf[len] = '\0';
+
+ fail_unlock:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static long orinoco_hw_get_freq(struct orinoco_private *priv)
+{
+
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 channel;
+ long freq = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel);
+ if (err)
+ goto out;
+
+ /* Intersil firmware 1.3.5 returns 0 when the interface is down */
+ if (channel == 0) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if ( (channel < 1) || (channel > NUM_CHANNELS) ) {
+ printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
+ priv->ndev->name, channel);
+ err = -EBUSY;
+ goto out;
+
+ }
+ freq = channel_frequency[channel-1] * 100000;
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ if (err > 0)
+ err = -EBUSY;
+ return err ? err : freq;
+}
+
+static int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
+ int *numrates, s32 *rates, int max)
+{
+ hermes_t *hw = &priv->hw;
+ struct hermes_idstring list;
+ unsigned char *p = (unsigned char *)&list.val;
+ int err = 0;
+ int num;
+ int i;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
+ sizeof(list), NULL, &list);
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return err;
+
+ num = le16_to_cpu(list.len);
+ *numrates = num;
+ num = min(num, max);
+
+ for (i = 0; i < num; i++) {
+ rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
+ }
+
+ return 0;
+}
+
+static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int err = 0;
+ int mode;
+ struct iw_range range;
+ int numrates;
+ int i, k;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (!access_ok(VERIFY_WRITE, rrq->pointer, sizeof(range)))
+ return -EFAULT;
+
+ rrq->length = sizeof(range);
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ mode = priv->iw_mode;
+ orinoco_unlock(priv, &flags);
+
+ memset(&range, 0, sizeof(range));
+
+ /* Much of this shamelessly taken from wvlan_cs.c. No idea
+ * what it all means -dgibson */
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 11;
+
+ range.min_nwid = range.max_nwid = 0; /* We don't use nwids */
+
+ /* Set available channels/frequencies */
+ range.num_channels = NUM_CHANNELS;
+ k = 0;
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (priv->channel_mask & (1 << i)) {
+ range.freq[k].i = i + 1;
+ range.freq[k].m = channel_frequency[i] * 100000;
+ range.freq[k].e = 1;
+ k++;
+ }
+
+ if (k >= IW_MAX_FREQUENCIES)
+ break;
+ }
+ range.num_frequency = k;
+
+ range.sensitivity = 3;
+
+ if ((mode == IW_MODE_ADHOC) && (priv->spy_number == 0)){
+ /* Quality stats meaningless in ad-hoc mode */
+ range.max_qual.qual = 0;
+ range.max_qual.level = 0;
+ range.max_qual.noise = 0;
+ range.avg_qual.qual = 0;
+ range.avg_qual.level = 0;
+ range.avg_qual.noise = 0;
+ } else {
+ range.max_qual.qual = 0x8b - 0x2f;
+ range.max_qual.level = 0x2f - 0x95 - 1;
+ range.max_qual.noise = 0x2f - 0x95 - 1;
+ /* Need to get better values */
+ range.avg_qual.qual = 0x24;
+ range.avg_qual.level = 0xC2;
+ range.avg_qual.noise = 0x9E;
+ }
+
+ err = orinoco_hw_get_bitratelist(priv, &numrates,
+ range.bitrate, IW_MAX_BITRATES);
+ if (err)
+ return err;
+ range.num_bitrates = numrates;
+
+ /* Set an indication of the max TCP throughput in bit/s that we can
+ * expect using this interface. May be use for QoS stuff...
+ * Jean II */
+ if(numrates > 2)
+ range.throughput = 5 * 1000 * 1000; /* ~5 Mb/s */
+ else
+ range.throughput = 1.5 * 1000 * 1000; /* ~1.5 Mb/s */
+
+ range.min_rts = 0;
+ range.max_rts = 2347;
+ range.min_frag = 256;
+ range.max_frag = 2346;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ if (priv->has_wep) {
+ range.max_encoding_tokens = ORINOCO_MAX_KEYS;
+
+ range.encoding_size[0] = SMALL_KEY_SIZE;
+ range.num_encoding_sizes = 1;
+
+ if (priv->has_big_wep) {
+ range.encoding_size[1] = LARGE_KEY_SIZE;
+ range.num_encoding_sizes = 2;
+ }
+ } else {
+ range.num_encoding_sizes = 0;
+ range.max_encoding_tokens = 0;
+ }
+ orinoco_unlock(priv, &flags);
+
+ range.min_pmp = 0;
+ range.max_pmp = 65535000;
+ range.min_pmt = 0;
+ range.max_pmt = 65535 * 1000; /* ??? */
+ range.pmp_flags = IW_POWER_PERIOD;
+ range.pmt_flags = IW_POWER_TIMEOUT;
+ range.pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_UNICAST_R;
+
+ range.num_txpower = 1;
+ range.txpower[0] = 15; /* 15dBm */
+ range.txpower_capa = IW_TXPOW_DBM;
+
+ range.retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
+ range.retry_flags = IW_RETRY_LIMIT;
+ range.r_time_flags = IW_RETRY_LIFETIME;
+ range.min_retry = 0;
+ range.max_retry = 65535; /* ??? */
+ range.min_r_time = 0;
+ range.max_r_time = 65535 * 1000; /* ??? */
+
+ if (copy_to_user(rrq->pointer, &range, sizeof(range)))
+ return -EFAULT;
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+ int setindex = priv->tx_key;
+ int enable = priv->wep_on;
+ int restricted = priv->wep_restrict;
+ u16 xlen = 0;
+ int err = 0;
+ char keybuf[ORINOCO_MAX_KEY_SIZE];
+ unsigned long flags;
+
+ if (! priv->has_wep)
+ return -EOPNOTSUPP;
+
+ if (erq->pointer) {
+ /* We actually have a key to set - check its length */
+ if (erq->length > LARGE_KEY_SIZE)
+ return -E2BIG;
+
+ if ( (erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep )
+ return -E2BIG;
+
+ if (copy_from_user(keybuf, erq->pointer, erq->length))
+ return -EFAULT;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (erq->pointer) {
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
+ index = priv->tx_key;
+
+ /* Adjust key length to a supported value */
+ if (erq->length > SMALL_KEY_SIZE) {
+ xlen = LARGE_KEY_SIZE;
+ } else if (erq->length > 0) {
+ xlen = SMALL_KEY_SIZE;
+ } else
+ xlen = 0;
+
+ /* Switch on WEP if off */
+ if ((!enable) && (xlen > 0)) {
+ setindex = index;
+ enable = 1;
+ }
+ } else {
+ /* Important note : if the user do "iwconfig eth0 enc off",
+ * we will arrive there with an index of -1. This is valid
+ * but need to be taken care off... Jean II */
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) {
+ if((index != -1) || (erq->flags == 0)) {
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* Set the index : Check that the key is valid */
+ if(priv->keys[index].len == 0) {
+ err = -EINVAL;
+ goto out;
+ }
+ setindex = index;
+ }
+ }
+
+ if (erq->flags & IW_ENCODE_DISABLED)
+ enable = 0;
+ if (erq->flags & IW_ENCODE_OPEN)
+ restricted = 0;
+ if (erq->flags & IW_ENCODE_RESTRICTED)
+ restricted = 1;
+
+ if (erq->pointer) {
+ priv->keys[index].len = cpu_to_le16(xlen);
+ memset(priv->keys[index].data, 0,
+ sizeof(priv->keys[index].data));
+ memcpy(priv->keys[index].data, keybuf, erq->length);
+ }
+ priv->tx_key = setindex;
+
+ /* Try fast key change if connected and only keys are changed */
+ if (priv->wep_on && enable && (priv->wep_restrict == restricted) &&
+ netif_carrier_ok(dev)) {
+ err = __orinoco_hw_setup_wepkeys(priv);
+ /* No need to commit if successful */
+ goto out;
+ }
+
+ priv->wep_on = enable;
+ priv->wep_restrict = restricted;
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getiwencode(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+ u16 xlen = 0;
+ char keybuf[ORINOCO_MAX_KEY_SIZE];
+ unsigned long flags;
+
+ if (! priv->has_wep)
+ return -EOPNOTSUPP;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
+ index = priv->tx_key;
+
+ erq->flags = 0;
+ if (! priv->wep_on)
+ erq->flags |= IW_ENCODE_DISABLED;
+ erq->flags |= index + 1;
+
+ if (priv->wep_restrict)
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ else
+ erq->flags |= IW_ENCODE_OPEN;
+
+ xlen = le16_to_cpu(priv->keys[index].len);
+
+ erq->length = xlen;
+
+ memcpy(keybuf, priv->keys[index].data, ORINOCO_MAX_KEY_SIZE);
+
+ orinoco_unlock(priv, &flags);
+
+ if (erq->pointer) {
+ if (copy_to_user(erq->pointer, keybuf, xlen))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int orinoco_ioctl_setessid(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ char essidbuf[IW_ESSID_MAX_SIZE+1];
+ unsigned long flags;
+
+ /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it
+ * anyway... - Jean II */
+
+ memset(&essidbuf, 0, sizeof(essidbuf));
+
+ if (erq->flags) {
+ if (erq->length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ if (copy_from_user(&essidbuf, erq->pointer, erq->length))
+ return -EFAULT;
+
+ essidbuf[erq->length] = '\0';
+ }
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ memcpy(priv->desired_essid, essidbuf, sizeof(priv->desired_essid));
+
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getessid(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ char essidbuf[IW_ESSID_MAX_SIZE+1];
+ int active;
+ int err = 0;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (netif_running(dev)) {
+ err = orinoco_hw_get_essid(priv, &active, essidbuf);
+ if (err)
+ return err;
+ } else {
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ memcpy(essidbuf, priv->desired_essid, sizeof(essidbuf));
+ orinoco_unlock(priv, &flags);
+ }
+
+ erq->flags = 1;
+ erq->length = strlen(essidbuf) + 1;
+ if (erq->pointer)
+ if (copy_to_user(erq->pointer, essidbuf, erq->length))
+ return -EFAULT;
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setnick(struct net_device *dev, struct iw_point *nrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ char nickbuf[IW_ESSID_MAX_SIZE+1];
+ unsigned long flags;
+
+ if (nrq->length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ memset(nickbuf, 0, sizeof(nickbuf));
+
+ if (copy_from_user(nickbuf, nrq->pointer, nrq->length))
+ return -EFAULT;
+
+ nickbuf[nrq->length] = '\0';
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ memcpy(priv->nick, nickbuf, sizeof(priv->nick));
+
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getnick(struct net_device *dev, struct iw_point *nrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ char nickbuf[IW_ESSID_MAX_SIZE+1];
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ memcpy(nickbuf, priv->nick, IW_ESSID_MAX_SIZE+1);
+ orinoco_unlock(priv, &flags);
+
+ nrq->length = strlen(nickbuf)+1;
+
+ if (copy_to_user(nrq->pointer, nickbuf, sizeof(nickbuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int orinoco_ioctl_setfreq(struct net_device *dev, struct iw_freq *frq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int chan = -1;
+ unsigned long flags;
+
+ /* We can only use this in Ad-Hoc demo mode to set the operating
+ * frequency, or in IBSS mode to set the frequency where the IBSS
+ * will be created - Jean II */
+ if (priv->iw_mode != IW_MODE_ADHOC)
+ return -EOPNOTSUPP;
+
+ if ( (frq->e == 0) && (frq->m <= 1000) ) {
+ /* Setting by channel number */
+ chan = frq->m;
+ } else {
+ /* Setting by frequency - search the table */
+ int mult = 1;
+ int i;
+
+ for (i = 0; i < (6 - frq->e); i++)
+ mult *= 10;
+
+ for (i = 0; i < NUM_CHANNELS; i++)
+ if (frq->m == (channel_frequency[i] * mult))
+ chan = i+1;
+ }
+
+ if ( (chan < 1) || (chan > NUM_CHANNELS) ||
+ ! (priv->channel_mask & (1 << (chan-1)) ) )
+ return -EINVAL;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ priv->channel = chan;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getsens(struct net_device *dev, struct iw_param *srq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ u16 val;
+ int err;
+ unsigned long flags;
+
+ if (!priv->has_sensitivity)
+ return -EOPNOTSUPP;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFSYSTEMSCALE, &val);
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return err;
+
+ srq->value = val;
+ srq->fixed = 0; /* auto */
+
+ return 0;
+}
+
+static int orinoco_ioctl_setsens(struct net_device *dev, struct iw_param *srq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int val = srq->value;
+ unsigned long flags;
+
+ if (!priv->has_sensitivity)
+ return -EOPNOTSUPP;
+
+ if ((val < 1) || (val > 3))
+ return -EINVAL;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ priv->ap_density = val;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setrts(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int val = rrq->value;
+ unsigned long flags;
+
+ if (rrq->disabled)
+ val = 2347;
+
+ if ( (val < 0) || (val > 2347) )
+ return -EINVAL;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ priv->rts_thresh = val;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setfrag(struct net_device *dev, struct iw_param *frq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int err = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (priv->has_mwo) {
+ if (frq->disabled)
+ priv->mwo_robust = 0;
+ else {
+ if (frq->fixed)
+ printk(KERN_WARNING "%s: Fixed fragmentation is "
+ "not supported on this firmware. "
+ "Using MWO robust instead.\n", dev->name);
+ priv->mwo_robust = 1;
+ }
+ } else {
+ if (frq->disabled)
+ priv->frag_thresh = 2346;
+ else {
+ if ( (frq->value < 256) || (frq->value > 2346) )
+ err = -EINVAL;
+ else
+ priv->frag_thresh = frq->value & ~0x1; /* must be even */
+ }
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getfrag(struct net_device *dev, struct iw_param *frq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 val;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (priv->has_mwo) {
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ &val);
+ if (err)
+ val = 0;
+
+ frq->value = val ? 2347 : 0;
+ frq->disabled = ! val;
+ frq->fixed = 0;
+ } else {
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ &val);
+ if (err)
+ val = 0;
+
+ frq->value = val;
+ frq->disabled = (val >= 2346);
+ frq->fixed = 1;
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_setrate(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int err = 0;
+ int ratemode = -1;
+ int bitrate; /* 100s of kilobits */
+ int i;
+ unsigned long flags;
+
+ /* As the user space doesn't know our highest rate, it uses -1
+ * to ask us to set the highest rate. Test it using "iwconfig
+ * ethX rate auto" - Jean II */
+ if (rrq->value == -1)
+ bitrate = 110;
+ else {
+ if (rrq->value % 100000)
+ return -EINVAL;
+ bitrate = rrq->value / 100000;
+ }
+
+ if ( (bitrate != 10) && (bitrate != 20) &&
+ (bitrate != 55) && (bitrate != 110) )
+ return -EINVAL;
+
+ for (i = 0; i < BITRATE_TABLE_SIZE; i++)
+ if ( (bitrate_table[i].bitrate == bitrate) &&
+ (bitrate_table[i].automatic == ! rrq->fixed) ) {
+ ratemode = i;
+ break;
+ }
+
+ if (ratemode == -1)
+ return -EINVAL;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ priv->bitratemode = ratemode;
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getrate(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int ratemode;
+ int i;
+ u16 val;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ ratemode = priv->bitratemode;
+
+ BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE));
+
+ rrq->value = bitrate_table[ratemode].bitrate * 100000;
+ rrq->fixed = ! bitrate_table[ratemode].automatic;
+ rrq->disabled = 0;
+
+ /* If the interface is running we try to find more about the
+ current mode */
+ if (netif_running(dev)) {
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CURRENTTXRATE, &val);
+ if (err)
+ goto out;
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE: /* Lucent style rate */
+ /* Note : in Lucent firmware, the return value of
+ * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s,
+ * and therefore is totally different from the
+ * encoding of HERMES_RID_CNFTXRATECONTROL.
+ * Don't forget that 6Mb/s is really 5.5Mb/s */
+ if (val == 6)
+ rrq->value = 5500000;
+ else
+ rrq->value = val * 1000000;
+ break;
+ case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
+ case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
+ for (i = 0; i < BITRATE_TABLE_SIZE; i++)
+ if (bitrate_table[i].intersil_txratectrl == val) {
+ ratemode = i;
+ break;
+ }
+ if (i >= BITRATE_TABLE_SIZE)
+ printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
+ dev->name, val);
+
+ rrq->value = bitrate_table[ratemode].bitrate * 100000;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_setpower(struct net_device *dev, struct iw_param *prq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int err = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (prq->disabled) {
+ priv->pm_on = 0;
+ } else {
+ switch (prq->flags & IW_POWER_MODE) {
+ case IW_POWER_UNICAST_R:
+ priv->pm_mcast = 0;
+ priv->pm_on = 1;
+ break;
+ case IW_POWER_ALL_R:
+ priv->pm_mcast = 1;
+ priv->pm_on = 1;
+ break;
+ case IW_POWER_ON:
+ /* No flags : but we may have a value - Jean II */
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto out;
+
+ if (prq->flags & IW_POWER_TIMEOUT) {
+ priv->pm_on = 1;
+ priv->pm_timeout = prq->value / 1000;
+ }
+ if (prq->flags & IW_POWER_PERIOD) {
+ priv->pm_on = 1;
+ priv->pm_period = prq->value / 1000;
+ }
+ /* It's valid to not have a value if we are just toggling
+ * the flags... Jean II */
+ if(!priv->pm_on) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getpower(struct net_device *dev, struct iw_param *prq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 enable, period, timeout, mcast;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, &enable);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION, &period);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &timeout);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, &mcast);
+ if (err)
+ goto out;
+
+ prq->disabled = !enable;
+ /* Note : by default, display the period */
+ if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ prq->flags = IW_POWER_TIMEOUT;
+ prq->value = timeout * 1000;
+ } else {
+ prq->flags = IW_POWER_PERIOD;
+ prq->value = period * 1000;
+ }
+ if (mcast)
+ prq->flags |= IW_POWER_ALL_R;
+ else
+ prq->flags |= IW_POWER_UNICAST_R;
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getretry(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 short_limit, long_limit, lifetime;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
+ &short_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
+ &long_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
+ &lifetime);
+ if (err)
+ goto out;
+
+ rrq->disabled = 0; /* Can't be disabled */
+
+ /* Note : by default, display the retry number */
+ if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ rrq->flags = IW_RETRY_LIFETIME;
+ rrq->value = lifetime * 1000; /* ??? */
+ } else {
+ /* By default, display the min number */
+ if ((rrq->flags & IW_RETRY_MAX)) {
+ rrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ rrq->value = long_limit;
+ } else {
+ rrq->flags = IW_RETRY_LIMIT;
+ rrq->value = short_limit;
+ if(short_limit != long_limit)
+ rrq->flags |= IW_RETRY_MIN;
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_setibssport(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int val = *( (int *) wrq->u.name );
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ priv->ibss_port = val ;
+
+ /* Actually update the mode we are using */
+ set_port_type(priv);
+
+ orinoco_unlock(priv, &flags);
+ return 0;
+}
+
+static int orinoco_ioctl_getibssport(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int *val = (int *)wrq->u.name;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ *val = priv->ibss_port;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setport3(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int val = *( (int *) wrq->u.name );
+ int err = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ switch (val) {
+ case 0: /* Try to do IEEE ad-hoc mode */
+ if (! priv->has_ibss) {
+ err = -EINVAL;
+ break;
+ }
+ priv->prefer_port3 = 0;
+
+ break;
+
+ case 1: /* Try to do Lucent proprietary ad-hoc mode */
+ if (! priv->has_port3) {
+ err = -EINVAL;
+ break;
+ }
+ priv->prefer_port3 = 1;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (! err)
+ /* Actually update the mode we are using */
+ set_port_type(priv);
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getport3(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ int *val = (int *)wrq->u.name;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ *val = priv->prefer_port3;
+ orinoco_unlock(priv, &flags);
+ return 0;
+}
+
+/* Spy is used for link quality/strength measurements in Ad-Hoc mode
+ * Jean II */
+static int orinoco_ioctl_setspy(struct net_device *dev, struct iw_point *srq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct sockaddr address[IW_MAX_SPY];
+ int number = srq->length;
+ int i;
+ int err = 0;
+ unsigned long flags;
+
+ /* Check the number of addresses */
+ if (number > IW_MAX_SPY)
+ return -E2BIG;
+
+ /* Get the data in the driver */
+ if (srq->pointer) {
+ if (copy_from_user(address, srq->pointer,
+ sizeof(struct sockaddr) * number))
+ return -EFAULT;
+ }
+
+ /* Make sure nobody mess with the structure while we do */
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ /* orinoco_lock() doesn't disable interrupts, so make sure the
+ * interrupt rx path don't get confused while we copy */
+ priv->spy_number = 0;
+
+ if (number > 0) {
+ /* Extract the addresses */
+ for (i = 0; i < number; i++)
+ memcpy(priv->spy_address[i], address[i].sa_data,
+ ETH_ALEN);
+ /* Reset stats */
+ memset(priv->spy_stat, 0,
+ sizeof(struct iw_quality) * IW_MAX_SPY);
+ /* Set number of addresses */
+ priv->spy_number = number;
+ }
+
+ /* Now, let the others play */
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getspy(struct net_device *dev, struct iw_point *srq)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct sockaddr address[IW_MAX_SPY];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+ int number;
+ int i;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ number = priv->spy_number;
+ if ((number > 0) && (srq->pointer)) {
+ /* Create address struct */
+ for (i = 0; i < number; i++) {
+ memcpy(address[i].sa_data, priv->spy_address[i],
+ ETH_ALEN);
+ address[i].sa_family = AF_UNIX;
+ }
+ /* Copy stats */
+ /* In theory, we should disable irqs while copying the stats
+ * because the rx path might update it in the middle...
+ * Bah, who care ? - Jean II */
+ memcpy(&spy_stat, priv->spy_stat,
+ sizeof(struct iw_quality) * IW_MAX_SPY);
+ for (i=0; i < number; i++)
+ priv->spy_stat[i].updated = 0;
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ /* Push stuff to user space */
+ srq->length = number;
+ if(copy_to_user(srq->pointer, address,
+ sizeof(struct sockaddr) * number))
+ return -EFAULT;
+ if(copy_to_user(srq->pointer + (sizeof(struct sockaddr)*number),
+ &spy_stat, sizeof(struct iw_quality) * number))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct iwreq *wrq = (struct iwreq *)rq;
+ int err = 0;
+ int tmp;
+ int changed = 0;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ /* In theory, we could allow most of the the SET stuff to be
+ * done. In practice, the lapse of time at startup when the
+ * card is not ready is very short, so why bother... Note
+ * that netif_device_present is different from up/down
+ * (ifconfig), when the device is not yet up, it is usually
+ * already ready... Jean II */
+ if (! netif_device_present(dev))
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGIWNAME:
+ strcpy(wrq->u.name, "IEEE 802.11-DS");
+ break;
+
+ case SIOCGIWAP:
+ wrq->u.ap_addr.sa_family = ARPHRD_ETHER;
+ err = orinoco_hw_get_bssid(priv, wrq->u.ap_addr.sa_data);
+ break;
+
+ case SIOCGIWRANGE:
+ err = orinoco_ioctl_getiwrange(dev, &wrq->u.data);
+ break;
+
+ case SIOCSIWMODE:
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ switch (wrq->u.mode) {
+ case IW_MODE_ADHOC:
+ if (! (priv->has_ibss || priv->has_port3) )
+ err = -EINVAL;
+ else {
+ priv->iw_mode = IW_MODE_ADHOC;
+ changed = 1;
+ }
+ break;
+
+ case IW_MODE_INFRA:
+ priv->iw_mode = IW_MODE_INFRA;
+ changed = 1;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ set_port_type(priv);
+ orinoco_unlock(priv, &flags);
+ break;
+
+ case SIOCGIWMODE:
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ wrq->u.mode = priv->iw_mode;
+ orinoco_unlock(priv, &flags);
+ break;
+
+ case SIOCSIWENCODE:
+ err = orinoco_ioctl_setiwencode(dev, &wrq->u.encoding);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWENCODE:
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_getiwencode(dev, &wrq->u.encoding);
+ break;
+
+ case SIOCSIWESSID:
+ err = orinoco_ioctl_setessid(dev, &wrq->u.essid);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWESSID:
+ err = orinoco_ioctl_getessid(dev, &wrq->u.essid);
+ break;
+
+ case SIOCSIWNICKN:
+ err = orinoco_ioctl_setnick(dev, &wrq->u.data);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWNICKN:
+ err = orinoco_ioctl_getnick(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWFREQ:
+ tmp = orinoco_hw_get_freq(priv);
+ if (tmp < 0) {
+ err = tmp;
+ } else {
+ wrq->u.freq.m = tmp;
+ wrq->u.freq.e = 1;
+ }
+ break;
+
+ case SIOCSIWFREQ:
+ err = orinoco_ioctl_setfreq(dev, &wrq->u.freq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWSENS:
+ err = orinoco_ioctl_getsens(dev, &wrq->u.sens);
+ break;
+
+ case SIOCSIWSENS:
+ err = orinoco_ioctl_setsens(dev, &wrq->u.sens);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWRTS:
+ wrq->u.rts.value = priv->rts_thresh;
+ wrq->u.rts.disabled = (wrq->u.rts.value == 2347);
+ wrq->u.rts.fixed = 1;
+ break;
+
+ case SIOCSIWRTS:
+ err = orinoco_ioctl_setrts(dev, &wrq->u.rts);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCSIWFRAG:
+ err = orinoco_ioctl_setfrag(dev, &wrq->u.frag);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWFRAG:
+ err = orinoco_ioctl_getfrag(dev, &wrq->u.frag);
+ break;
+
+ case SIOCSIWRATE:
+ err = orinoco_ioctl_setrate(dev, &wrq->u.bitrate);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWRATE:
+ err = orinoco_ioctl_getrate(dev, &wrq->u.bitrate);
+ break;
+
+ case SIOCSIWPOWER:
+ err = orinoco_ioctl_setpower(dev, &wrq->u.power);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWPOWER:
+ err = orinoco_ioctl_getpower(dev, &wrq->u.power);
+ break;
+
+ case SIOCGIWTXPOW:
+ /* The card only supports one tx power, so this is easy */
+ wrq->u.txpower.value = 15; /* dBm */
+ wrq->u.txpower.fixed = 1;
+ wrq->u.txpower.disabled = 0;
+ wrq->u.txpower.flags = IW_TXPOW_DBM;
+ break;
+
+ case SIOCSIWRETRY:
+ err = -EOPNOTSUPP;
+ break;
+
+ case SIOCGIWRETRY:
+ err = orinoco_ioctl_getretry(dev, &wrq->u.retry);
+ break;
+
+ case SIOCSIWSPY:
+ err = orinoco_ioctl_setspy(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWSPY:
+ err = orinoco_ioctl_getspy(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWPRIV:
+ if (wrq->u.data.pointer) {
+ struct iw_priv_args privtab[] = {
+ { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" },
+ { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
+ { SIOCIWFIRSTPRIV + 0x2,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_port3" },
+ { SIOCIWFIRSTPRIV + 0x3, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_port3" },
+ { SIOCIWFIRSTPRIV + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_preamble" },
+ { SIOCIWFIRSTPRIV + 0x5, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_preamble" },
+ { SIOCIWFIRSTPRIV + 0x6,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_ibssport" },
+ { SIOCIWFIRSTPRIV + 0x7, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_ibssport" },
+ { SIOCIWLASTPRIV, 0, 0, "dump_recs" },
+ };
+
+ wrq->u.data.length = sizeof(privtab) / sizeof(privtab[0]);
+ if (copy_to_user(wrq->u.data.pointer, privtab, sizeof(privtab)))
+ err = -EFAULT;
+ }
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x0: /* force_reset */
+ case SIOCIWFIRSTPRIV + 0x1: /* card_reset */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
+
+ schedule_work(&priv->reset_work);
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x2: /* set_port3 */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_setport3(dev, wrq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x3: /* get_port3 */
+ err = orinoco_ioctl_getport3(dev, wrq);
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x4: /* set_preamble */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ /* 802.11b has recently defined some short preamble.
+ * Basically, the Phy header has been reduced in size.
+ * This increase performance, especially at high rates
+ * (the preamble is transmitted at 1Mb/s), unfortunately
+ * this give compatibility troubles... - Jean II */
+ if(priv->has_preamble) {
+ int val = *( (int *) wrq->u.name );
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ if (val)
+ priv->preamble = 1;
+ else
+ priv->preamble = 0;
+ orinoco_unlock(priv, &flags);
+ changed = 1;
+ } else
+ err = -EOPNOTSUPP;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x5: /* get_preamble */
+ if(priv->has_preamble) {
+ int *val = (int *)wrq->u.name;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+ *val = priv->preamble;
+ orinoco_unlock(priv, &flags);
+ } else
+ err = -EOPNOTSUPP;
+ break;
+ case SIOCIWFIRSTPRIV + 0x6: /* set_ibssport */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_setibssport(dev, wrq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x7: /* get_ibssport */
+ err = orinoco_ioctl_getibssport(dev, wrq);
+ break;
+
+ case SIOCIWLASTPRIV:
+ err = orinoco_debug_dump_recs(dev);
+ if (err)
+ printk(KERN_ERR "%s: Unable to dump records (%d)\n",
+ dev->name, err);
+ break;
+
+
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (! err && changed && netif_running(dev)) {
+ err = orinoco_reconfigure(dev);
+ }
+
+ TRACE_EXIT(dev->name);
+
+ return err;
+}
+
+struct {
+ u16 rid;
+ char *name;
+ int displaytype;
+#define DISPLAY_WORDS 0
+#define DISPLAY_BYTES 1
+#define DISPLAY_STRING 2
+#define DISPLAY_XSTRING 3
+} record_table[] = {
+#define DEBUG_REC(name,type) { HERMES_RID_##name, #name, DISPLAY_##type }
+ DEBUG_REC(CNFPORTTYPE,WORDS),
+ DEBUG_REC(CNFOWNMACADDR,BYTES),
+ DEBUG_REC(CNFDESIREDSSID,STRING),
+ DEBUG_REC(CNFOWNCHANNEL,WORDS),
+ DEBUG_REC(CNFOWNSSID,STRING),
+ DEBUG_REC(CNFOWNATIMWINDOW,WORDS),
+ DEBUG_REC(CNFSYSTEMSCALE,WORDS),
+ DEBUG_REC(CNFMAXDATALEN,WORDS),
+ DEBUG_REC(CNFPMENABLED,WORDS),
+ DEBUG_REC(CNFPMEPS,WORDS),
+ DEBUG_REC(CNFMULTICASTRECEIVE,WORDS),
+ DEBUG_REC(CNFMAXSLEEPDURATION,WORDS),
+ DEBUG_REC(CNFPMHOLDOVERDURATION,WORDS),
+ DEBUG_REC(CNFOWNNAME,STRING),
+ DEBUG_REC(CNFOWNDTIMPERIOD,WORDS),
+ DEBUG_REC(CNFMULTICASTPMBUFFERING,WORDS),
+ DEBUG_REC(CNFWEPENABLED_AGERE,WORDS),
+ DEBUG_REC(CNFMANDATORYBSSID_SYMBOL,WORDS),
+ DEBUG_REC(CNFWEPDEFAULTKEYID,WORDS),
+ DEBUG_REC(CNFDEFAULTKEY0,BYTES),
+ DEBUG_REC(CNFDEFAULTKEY1,BYTES),
+ DEBUG_REC(CNFMWOROBUST_AGERE,WORDS),
+ DEBUG_REC(CNFDEFAULTKEY2,BYTES),
+ DEBUG_REC(CNFDEFAULTKEY3,BYTES),
+ DEBUG_REC(CNFWEPFLAGS_INTERSIL,WORDS),
+ DEBUG_REC(CNFWEPKEYMAPPINGTABLE,WORDS),
+ DEBUG_REC(CNFAUTHENTICATION,WORDS),
+ DEBUG_REC(CNFMAXASSOCSTA,WORDS),
+ DEBUG_REC(CNFKEYLENGTH_SYMBOL,WORDS),
+ DEBUG_REC(CNFTXCONTROL,WORDS),
+ DEBUG_REC(CNFROAMINGMODE,WORDS),
+ DEBUG_REC(CNFHOSTAUTHENTICATION,WORDS),
+ DEBUG_REC(CNFRCVCRCERROR,WORDS),
+ DEBUG_REC(CNFMMLIFE,WORDS),
+ DEBUG_REC(CNFALTRETRYCOUNT,WORDS),
+ DEBUG_REC(CNFBEACONINT,WORDS),
+ DEBUG_REC(CNFAPPCFINFO,WORDS),
+ DEBUG_REC(CNFSTAPCFINFO,WORDS),
+ DEBUG_REC(CNFPRIORITYQUSAGE,WORDS),
+ DEBUG_REC(CNFTIMCTRL,WORDS),
+ DEBUG_REC(CNFTHIRTY2TALLY,WORDS),
+ DEBUG_REC(CNFENHSECURITY,WORDS),
+ DEBUG_REC(CNFGROUPADDRESSES,BYTES),
+ DEBUG_REC(CNFCREATEIBSS,WORDS),
+ DEBUG_REC(CNFFRAGMENTATIONTHRESHOLD,WORDS),
+ DEBUG_REC(CNFRTSTHRESHOLD,WORDS),
+ DEBUG_REC(CNFTXRATECONTROL,WORDS),
+ DEBUG_REC(CNFPROMISCUOUSMODE,WORDS),
+ DEBUG_REC(CNFBASICRATES_SYMBOL,WORDS),
+ DEBUG_REC(CNFPREAMBLE_SYMBOL,WORDS),
+ DEBUG_REC(CNFSHORTPREAMBLE,WORDS),
+ DEBUG_REC(CNFWEPKEYS_AGERE,BYTES),
+ DEBUG_REC(CNFEXCLUDELONGPREAMBLE,WORDS),
+ DEBUG_REC(CNFTXKEY_AGERE,WORDS),
+ DEBUG_REC(CNFAUTHENTICATIONRSPTO,WORDS),
+ DEBUG_REC(CNFBASICRATES,WORDS),
+ DEBUG_REC(CNFSUPPORTEDRATES,WORDS),
+ DEBUG_REC(CNFTICKTIME,WORDS),
+ DEBUG_REC(CNFSCANREQUEST,WORDS),
+ DEBUG_REC(CNFJOINREQUEST,WORDS),
+ DEBUG_REC(CNFAUTHENTICATESTATION,WORDS),
+ DEBUG_REC(CNFCHANNELINFOREQUEST,WORDS),
+ DEBUG_REC(MAXLOADTIME,WORDS),
+ DEBUG_REC(DOWNLOADBUFFER,WORDS),
+ DEBUG_REC(PRIID,WORDS),
+ DEBUG_REC(PRISUPRANGE,WORDS),
+ DEBUG_REC(CFIACTRANGES,WORDS),
+ DEBUG_REC(NICSERNUM,XSTRING),
+ DEBUG_REC(NICID,WORDS),
+ DEBUG_REC(MFISUPRANGE,WORDS),
+ DEBUG_REC(CFISUPRANGE,WORDS),
+ DEBUG_REC(CHANNELLIST,WORDS),
+ DEBUG_REC(REGULATORYDOMAINS,WORDS),
+ DEBUG_REC(TEMPTYPE,WORDS),
+/* DEBUG_REC(CIS,BYTES), */
+ DEBUG_REC(STAID,WORDS),
+ DEBUG_REC(CURRENTSSID,STRING),
+ DEBUG_REC(CURRENTBSSID,BYTES),
+ DEBUG_REC(COMMSQUALITY,WORDS),
+ DEBUG_REC(CURRENTTXRATE,WORDS),
+ DEBUG_REC(CURRENTBEACONINTERVAL,WORDS),
+ DEBUG_REC(CURRENTSCALETHRESHOLDS,WORDS),
+ DEBUG_REC(PROTOCOLRSPTIME,WORDS),
+ DEBUG_REC(SHORTRETRYLIMIT,WORDS),
+ DEBUG_REC(LONGRETRYLIMIT,WORDS),
+ DEBUG_REC(MAXTRANSMITLIFETIME,WORDS),
+ DEBUG_REC(MAXRECEIVELIFETIME,WORDS),
+ DEBUG_REC(CFPOLLABLE,WORDS),
+ DEBUG_REC(AUTHENTICATIONALGORITHMS,WORDS),
+ DEBUG_REC(PRIVACYOPTIONIMPLEMENTED,WORDS),
+ DEBUG_REC(OWNMACADDR,BYTES),
+ DEBUG_REC(SCANRESULTSTABLE,WORDS),
+ DEBUG_REC(PHYTYPE,WORDS),
+ DEBUG_REC(CURRENTCHANNEL,WORDS),
+ DEBUG_REC(CURRENTPOWERSTATE,WORDS),
+ DEBUG_REC(CCAMODE,WORDS),
+ DEBUG_REC(SUPPORTEDDATARATES,WORDS),
+ DEBUG_REC(BUILDSEQ,BYTES),
+ DEBUG_REC(FWID,XSTRING)
+#undef DEBUG_REC
+};
+
+#define DEBUG_LTV_SIZE 128
+
+static int orinoco_debug_dump_recs(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ hermes_t *hw = &priv->hw;
+ u8 *val8;
+ u16 *val16;
+ int i,j;
+ u16 length;
+ int err;
+
+ /* I'm not sure: we might have a lock here, so we'd better go
+ atomic, just in case. */
+ val8 = kmalloc(DEBUG_LTV_SIZE + 2, GFP_ATOMIC);
+ if (! val8)
+ return -ENOMEM;
+ val16 = (u16 *)val8;
+
+ for (i = 0; i < ARRAY_SIZE(record_table); i++) {
+ u16 rid = record_table[i].rid;
+ int len;
+
+ memset(val8, 0, DEBUG_LTV_SIZE + 2);
+
+ err = hermes_read_ltv(hw, USER_BAP, rid, DEBUG_LTV_SIZE,
+ &length, val8);
+ if (err) {
+ DEBUG(0, "Error %d reading RID 0x%04x\n", err, rid);
+ continue;
+ }
+ val16 = (u16 *)val8;
+ if (length == 0)
+ continue;
+
+ printk(KERN_DEBUG "%-15s (0x%04x): length=%d (%d bytes)\tvalue=",
+ record_table[i].name,
+ rid, length, (length-1)*2);
+ len = min(((int)length-1)*2, DEBUG_LTV_SIZE);
+
+ switch (record_table[i].displaytype) {
+ case DISPLAY_WORDS:
+ for (j = 0; j < len / 2; j++)
+ printk("%04X-", le16_to_cpu(val16[j]));
+ break;
+
+ case DISPLAY_BYTES:
+ default:
+ for (j = 0; j < len; j++)
+ printk("%02X:", val8[j]);
+ break;
+
+ case DISPLAY_STRING:
+ len = min(len, le16_to_cpu(val16[0])+2);
+ val8[len] = '\0';
+ printk("\"%s\"", (char *)&val16[1]);
+ break;
+
+ case DISPLAY_XSTRING:
+ printk("'%s'", (char *)val8);
+ }
+
+ printk("\n");
+ }
+
+ kfree(val8);
+
+ return 0;
+}
+
+/********************************************************************/
+/* Debugging */
+/********************************************************************/
+
+#if 0
+static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
+{
+ printk(KERN_DEBUG "RX descriptor:\n");
+ printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
+ printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
+ printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
+ printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
+ printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
+ printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
+ printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
+
+ printk(KERN_DEBUG "IEEE 802.11 header:\n");
+ printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
+ frame->p80211.frame_ctl);
+ printk(KERN_DEBUG " duration_id = 0x%04x\n",
+ frame->p80211.duration_id);
+ printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr1[0], frame->p80211.addr1[1],
+ frame->p80211.addr1[2], frame->p80211.addr1[3],
+ frame->p80211.addr1[4], frame->p80211.addr1[5]);
+ printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr2[0], frame->p80211.addr2[1],
+ frame->p80211.addr2[2], frame->p80211.addr2[3],
+ frame->p80211.addr2[4], frame->p80211.addr2[5]);
+ printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr3[0], frame->p80211.addr3[1],
+ frame->p80211.addr3[2], frame->p80211.addr3[3],
+ frame->p80211.addr3[4], frame->p80211.addr3[5]);
+ printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
+ frame->p80211.seq_ctl);
+ printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr4[0], frame->p80211.addr4[1],
+ frame->p80211.addr4[2], frame->p80211.addr4[3],
+ frame->p80211.addr4[4], frame->p80211.addr4[5]);
+ printk(KERN_DEBUG " data_len = 0x%04x\n",
+ frame->p80211.data_len);
+
+ printk(KERN_DEBUG "IEEE 802.3 header:\n");
+ printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_dest[0], frame->p8023.h_dest[1],
+ frame->p8023.h_dest[2], frame->p8023.h_dest[3],
+ frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
+ printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_source[0], frame->p8023.h_source[1],
+ frame->p8023.h_source[2], frame->p8023.h_source[3],
+ frame->p8023.h_source[4], frame->p8023.h_source[5]);
+ printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
+
+ printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
+ printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
+ printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
+ printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
+ printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
+ frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
+ printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
+}
+#endif /* 0 */
+
+/********************************************************************/
+/* Module initialization */
+/********************************************************************/
+
+EXPORT_SYMBOL(alloc_orinocodev);
+EXPORT_SYMBOL(free_orinocodev);
+
+EXPORT_SYMBOL(__orinoco_up);
+EXPORT_SYMBOL(__orinoco_down);
+EXPORT_SYMBOL(orinoco_stop);
+EXPORT_SYMBOL(orinoco_reinit_firmware);
+
+EXPORT_SYMBOL(orinoco_interrupt);
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (David Gibson <hermes@gibson.dropbear.id.au>, "
+ "Pavel Roskin <proski@gnu.org>, et al)";
+
+static int __init init_orinoco(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+ return 0;
+}
+
+static void __exit exit_orinoco(void)
+{
+}
+
+module_init(init_orinoco);
+module_exit(exit_orinoco);
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
new file mode 100644
index 000000000000..13e42c2afb27
--- /dev/null
+++ b/drivers/net/wireless/orinoco.h
@@ -0,0 +1,153 @@
+/* orinoco.h
+ *
+ * Common definitions to all pieces of the various orinoco
+ * drivers
+ */
+
+#ifndef _ORINOCO_H
+#define _ORINOCO_H
+
+#define DRIVER_VERSION "0.14alpha2"
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/version.h>
+
+#include "hermes.h"
+
+/* To enable debug messages */
+//#define ORINOCO_DEBUG 3
+
+#define WIRELESS_SPY // enable iwspy support
+
+#define ORINOCO_MAX_KEY_SIZE 14
+#define ORINOCO_MAX_KEYS 4
+
+struct orinoco_key {
+ u16 len; /* always stored as little-endian */
+ char data[ORINOCO_MAX_KEY_SIZE];
+} __attribute__ ((packed));
+
+typedef enum {
+ FIRMWARE_TYPE_AGERE,
+ FIRMWARE_TYPE_INTERSIL,
+ FIRMWARE_TYPE_SYMBOL
+} fwtype_t;
+
+struct orinoco_private {
+ void *card; /* Pointer to card dependent structure */
+ int (*hard_reset)(struct orinoco_private *);
+
+ /* Synchronisation stuff */
+ spinlock_t lock;
+ int hw_unavailable;
+ struct work_struct reset_work;
+
+ /* driver state */
+ int open;
+ u16 last_linkstatus;
+
+ /* Net device stuff */
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ struct iw_statistics wstats;
+
+ /* Hardware control variables */
+ hermes_t hw;
+ u16 txfid;
+
+ /* Capabilities of the hardware/firmware */
+ fwtype_t firmware_type;
+ char fw_name[32];
+ int ibss_port;
+ int nicbuf_size;
+ u16 channel_mask;
+
+ /* Boolean capabilities */
+ unsigned int has_ibss:1;
+ unsigned int has_port3:1;
+ unsigned int has_wep:1;
+ unsigned int has_big_wep:1;
+ unsigned int has_mwo:1;
+ unsigned int has_pm:1;
+ unsigned int has_preamble:1;
+ unsigned int has_sensitivity:1;
+ unsigned int broken_disableport:1;
+
+ /* Configuration paramaters */
+ u32 iw_mode;
+ int prefer_port3;
+ u16 wep_on, wep_restrict, tx_key;
+ struct orinoco_key keys[ORINOCO_MAX_KEYS];
+ int bitratemode;
+ char nick[IW_ESSID_MAX_SIZE+1];
+ char desired_essid[IW_ESSID_MAX_SIZE+1];
+ u16 frag_thresh, mwo_robust;
+ u16 channel;
+ u16 ap_density, rts_thresh;
+ u16 pm_on, pm_mcast, pm_period, pm_timeout;
+ u16 preamble;
+#ifdef WIRELESS_SPY
+ int spy_number;
+ u_char spy_address[IW_MAX_SPY][ETH_ALEN];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+#endif
+
+ /* Configuration dependent variables */
+ int port_type, createibss;
+ int promiscuous, mc_count;
+};
+
+#ifdef ORINOCO_DEBUG
+extern int orinoco_debug;
+#define DEBUG(n, args...) do { if (orinoco_debug>(n)) printk(KERN_DEBUG args); } while(0)
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif /* ORINOCO_DEBUG */
+
+#define TRACE_ENTER(devname) DEBUG(2, "%s: -> %s()\n", devname, __FUNCTION__);
+#define TRACE_EXIT(devname) DEBUG(2, "%s: <- %s()\n", devname, __FUNCTION__);
+
+/********************************************************************/
+/* Exported prototypes */
+/********************************************************************/
+
+extern struct net_device *alloc_orinocodev(int sizeof_card,
+ int (*hard_reset)(struct orinoco_private *));
+extern void free_orinocodev(struct net_device *dev);
+extern int __orinoco_up(struct net_device *dev);
+extern int __orinoco_down(struct net_device *dev);
+extern int orinoco_stop(struct net_device *dev);
+extern int orinoco_reinit_firmware(struct net_device *dev);
+extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *regs);
+
+/********************************************************************/
+/* Locking and synchronization functions */
+/********************************************************************/
+
+/* These functions *must* be inline or they will break horribly on
+ * SPARC, due to its weird semantics for save/restore flags. extern
+ * inline should prevent the kernel from linking or module from
+ * loading if they are not inlined. */
+extern inline int orinoco_lock(struct orinoco_private *priv,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&priv->lock, *flags);
+ if (priv->hw_unavailable) {
+ DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n",
+ priv->ndev);
+ spin_unlock_irqrestore(&priv->lock, *flags);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+extern inline void orinoco_unlock(struct orinoco_private *priv,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&priv->lock, *flags);
+}
+
+#endif /* _ORINOCO_H */
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
new file mode 100644
index 000000000000..74a8227256aa
--- /dev/null
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -0,0 +1,636 @@
+/* orinoco_cs.c (formerly known as dldwd_cs.c)
+ *
+ * A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
+ * as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
+ * EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and others).
+ * It should also be usable on various Prism II based cards such as the
+ * Linksys, D-Link and Farallon Skyline. It should also work on Symbol
+ * cards such as the 3Com AirConnect and Ericsson WLAN.
+ *
+ * Copyright notice & release notes in file orinoco.c
+ */
+
+#define DRIVER_NAME "orinoco_cs"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/config.h>
+#ifdef __IN_PCMCIA_PACKAGE__
+#include <pcmcia/k_compat.h>
+#endif /* __IN_PCMCIA_PACKAGE__ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "orinoco.h"
+
+/********************************************************************/
+/* Module stuff */
+/********************************************************************/
+
+MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco, Prism II based and similar wireless cards");
+MODULE_LICENSE("Dual MPL/GPL");
+
+/* Module parameters */
+
+/* Some D-Link cards have buggy CIS. They do work at 5v properly, but
+ * don't have any CIS entry for it. This workaround it... */
+static int ignore_cis_vcc; /* = 0 */
+module_param(ignore_cis_vcc, int, 0);
+MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket");
+
+/********************************************************************/
+/* Magic constants */
+/********************************************************************/
+
+/*
+ * The dev_info variable is the "key" that is used to match up this
+ * device driver with appropriate cards, through the card
+ * configuration database.
+ */
+static dev_info_t dev_info = DRIVER_NAME;
+
+/********************************************************************/
+/* Data structures */
+/********************************************************************/
+
+/* PCMCIA specific device information (goes in the card field of
+ * struct orinoco_private */
+struct orinoco_pccard {
+ dev_link_t link;
+ dev_node_t node;
+
+ /* Used to handle hard reset */
+ /* yuck, we need this hack to work around the insanity of the
+ * PCMCIA layer */
+ unsigned long hard_reset_in_progress;
+};
+
+/*
+ * A linked list of "instances" of the device. Each actual PCMCIA
+ * card corresponds to one device instance, and is described by one
+ * dev_link_t structure (defined in ds.h).
+ */
+static dev_link_t *dev_list; /* = NULL */
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+/* device methods */
+static int orinoco_cs_hard_reset(struct orinoco_private *priv);
+
+/* PCMCIA gumpf */
+static void orinoco_cs_config(dev_link_t * link);
+static void orinoco_cs_release(dev_link_t * link);
+static int orinoco_cs_event(event_t event, int priority,
+ event_callback_args_t * args);
+
+static dev_link_t *orinoco_cs_attach(void);
+static void orinoco_cs_detach(dev_link_t *);
+
+/********************************************************************/
+/* Device methods */
+/********************************************************************/
+
+static int
+orinoco_cs_hard_reset(struct orinoco_private *priv)
+{
+ struct orinoco_pccard *card = priv->card;
+ dev_link_t *link = &card->link;
+ int err;
+
+ /* We need atomic ops here, because we're not holding the lock */
+ set_bit(0, &card->hard_reset_in_progress);
+
+ err = pcmcia_reset_card(link->handle, NULL);
+ if (err)
+ return err;
+
+ msleep(100);
+ clear_bit(0, &card->hard_reset_in_progress);
+
+ return 0;
+}
+
+/********************************************************************/
+/* PCMCIA stuff */
+/********************************************************************/
+
+/*
+ * This creates an "instance" of the driver, allocating local data
+ * structures for one device. The device is registered with Card
+ * Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a card
+ * insertion event. */
+static dev_link_t *
+orinoco_cs_attach(void)
+{
+ struct net_device *dev;
+ struct orinoco_private *priv;
+ struct orinoco_pccard *card;
+ dev_link_t *link;
+ client_reg_t client_reg;
+ int ret;
+
+ dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset);
+ if (! dev)
+ return NULL;
+ priv = netdev_priv(dev);
+ card = priv->card;
+
+ /* Link both structures together */
+ link = &card->link;
+ link->priv = dev;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = orinoco_interrupt;
+ link->irq.Instance = dev;
+
+ /* General socket configuration defaults can go here. In this
+ * client, we assume very little, and rely on the CIS for
+ * almost everything. In most clients, many details (i.e.,
+ * number, sizes, and attributes of IO windows) are fixed by
+ * the nature of the device, and can be hard-wired here. */
+ link->conf.Attributes = 0;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* Register with Card Services */
+ /* FIXME: need a lock? */
+ link->next = dev_list;
+ dev_list = link;
+
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &orinoco_cs_event;
+ client_reg.Version = 0x0210; /* FIXME: what does this mean? */
+ client_reg.event_callback_args.client_data = link;
+
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(link->handle, RegisterClient, ret);
+ orinoco_cs_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* orinoco_cs_attach */
+
+/*
+ * This deletes a driver "instance". The device is de-registered with
+ * Card Services. If it has been released, all local data structures
+ * are freed. Otherwise, the structures will be freed when the device
+ * is released.
+ */
+static void orinoco_cs_detach(dev_link_t *link)
+{
+ dev_link_t **linkp;
+ struct net_device *dev = link->priv;
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link)
+ break;
+
+ BUG_ON(*linkp == NULL);
+
+ if (link->state & DEV_CONFIG)
+ orinoco_cs_release(link);
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, and free it */
+ *linkp = link->next;
+ DEBUG(0, PFX "detach: link=%p link->dev=%p\n", link, link->dev);
+ if (link->dev) {
+ DEBUG(0, PFX "About to unregister net device %p\n",
+ dev);
+ unregister_netdev(dev);
+ }
+ free_orinocodev(dev);
+} /* orinoco_cs_detach */
+
+/*
+ * orinoco_cs_config() is scheduled to run after a CARD_INSERTION
+ * event is received, to configure the PCMCIA socket, and to make the
+ * device available to the system.
+ */
+
+#define CS_CHECK(fn, ret) do { \
+ last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; \
+ } while (0)
+
+static void
+orinoco_cs_config(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ client_handle_t handle = link->handle;
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct orinoco_pccard *card = priv->card;
+ hermes_t *hw = &priv->hw;
+ int last_fn, last_ret;
+ u_char buf[64];
+ config_info_t conf;
+ cisinfo_t info;
+ tuple_t tuple;
+ cisparse_t parse;
+ void __iomem *mem;
+
+ CS_CHECK(ValidateCIS, pcmcia_validate_cis(handle, &info));
+
+ /*
+ * This reads the card's CONFIG tuple to find its
+ * configuration registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up the current Vcc */
+ CS_CHECK(GetConfigurationInfo,
+ pcmcia_get_configuration_info(handle, &conf));
+ link->conf.Vcc = conf.Vcc;
+
+ /*
+ * In this loop, we scan the CIS for configuration table
+ * entries, each of which describes a valid card
+ * configuration, including voltage, IO window, memory window,
+ * and interrupt settings.
+ *
+ * We make no assumptions about the card to be configured: we
+ * use just the information available in the CIS. In an ideal
+ * world, this would work for any PCMCIA card, but it requires
+ * a complete and accurate CIS. In practice, a driver usually
+ * "knows" most of these things without consulting the CIS,
+ * and most client drivers will only use the CIS to fill in
+ * implementation-defined details.
+ */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ while (1) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_cftable_entry_t dflt = { .index = 0 };
+
+ if ( (pcmcia_get_tuple_data(handle, &tuple) != 0)
+ || (pcmcia_parse_tuple(handle, &tuple, &parse) != 0))
+ goto next_entry;
+
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
+ dflt = *cfg;
+ if (cfg->index == 0)
+ goto next_entry;
+ link->conf.ConfigIndex = cfg->index;
+
+ /* Does this card need audio output? */
+ if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ /* Use power settings for Vcc and Vpp if present */
+ /* Note that the CIS values need to be rescaled */
+ if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
+ if (!ignore_cis_vcc)
+ goto next_entry;
+ }
+ } else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
+ if(!ignore_cis_vcc)
+ goto next_entry;
+ }
+ }
+
+ if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+ else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
+
+ /* Do we need to allocate an interrupt? */
+ link->conf.Attributes |= CONF_ENABLE_IRQ;
+
+ /* IO window settings */
+ link->io.NumPorts1 = link->io.NumPorts2 = 0;
+ if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
+ cistpl_io_t *io =
+ (cfg->io.nwin) ? &cfg->io : &dflt.io;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (!(io->flags & CISTPL_IO_8BIT))
+ link->io.Attributes1 =
+ IO_DATA_PATH_WIDTH_16;
+ if (!(io->flags & CISTPL_IO_16BIT))
+ link->io.Attributes1 =
+ IO_DATA_PATH_WIDTH_8;
+ link->io.IOAddrLines =
+ io->flags & CISTPL_IO_LINES_MASK;
+ link->io.BasePort1 = io->win[0].base;
+ link->io.NumPorts1 = io->win[0].len;
+ if (io->nwin > 1) {
+ link->io.Attributes2 =
+ link->io.Attributes1;
+ link->io.BasePort2 = io->win[1].base;
+ link->io.NumPorts2 = io->win[1].len;
+ }
+
+ /* This reserves IO space but doesn't actually enable it */
+ if (pcmcia_request_io(link->handle, &link->io) != 0)
+ goto next_entry;
+ }
+
+
+ /* If we got this far, we're cool! */
+
+ break;
+
+ next_entry:
+ if (link->io.NumPorts1)
+ pcmcia_release_io(link->handle, &link->io);
+ last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ if (last_ret == CS_NO_MORE_ITEMS) {
+ printk(KERN_ERR PFX "GetNextTuple(): No matching "
+ "CIS configuration. Maybe you need the "
+ "ignore_cis_vcc=1 parameter.\n");
+ goto cs_failed;
+ }
+ }
+
+ /*
+ * Allocate an interrupt line. Note that this does not assign
+ * a handler to the interrupt, unless the 'Handler' member of
+ * the irq structure is initialized.
+ */
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
+ mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
+ if (!mem)
+ goto cs_failed;
+
+ hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+
+ /*
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping, and putting the
+ * card and host interface into "Memory and IO" mode.
+ */
+ CS_CHECK(RequestConfiguration,
+ pcmcia_request_configuration(link->handle, &link->conf));
+
+ /* Ok, we have the configuration, prepare to register the netdev */
+ dev->base_addr = link->io.BasePort1;
+ dev->irq = link->irq.AssignedIRQ;
+ SET_MODULE_OWNER(dev);
+ card->node.major = card->node.minor = 0;
+
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ /* Tell the stack we exist */
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR PFX "register_netdev() failed\n");
+ goto failed;
+ }
+
+ /* At this point, the dev_node_t structure(s) needs to be
+ * initialized and arranged in a linked list at link->dev. */
+ strcpy(card->node.dev_name, dev->name);
+ link->dev = &card->node; /* link->dev being non-NULL is also
+ used to indicate that the
+ net_device has been registered */
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ /* Finally, report what we've done */
+ printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d",
+ dev->name, link->conf.ConfigIndex,
+ link->conf.Vcc / 10, link->conf.Vcc % 10);
+ if (link->conf.Vpp1)
+ printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
+ link->conf.Vpp1 % 10);
+ printk(", irq %d", link->irq.AssignedIRQ);
+ if (link->io.NumPorts1)
+ printk(", io 0x%04x-0x%04x", link->io.BasePort1,
+ link->io.BasePort1 + link->io.NumPorts1 - 1);
+ if (link->io.NumPorts2)
+ printk(" & 0x%04x-0x%04x", link->io.BasePort2,
+ link->io.BasePort2 + link->io.NumPorts2 - 1);
+ printk("\n");
+
+ return;
+
+ cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+
+ failed:
+ orinoco_cs_release(link);
+} /* orinoco_cs_config */
+
+/*
+ * After a card is removed, orinoco_cs_release() will unregister the
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void
+orinoco_cs_release(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ /* We're committed to taking the device away now, so mark the
+ * hardware as unavailable */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->hw_unavailable++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Don't bother checking to see if these succeed or not */
+ pcmcia_release_configuration(link->handle);
+ if (link->io.NumPorts1)
+ pcmcia_release_io(link->handle, &link->io);
+ if (link->irq.AssignedIRQ)
+ pcmcia_release_irq(link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+ if (priv->hw.iobase)
+ ioport_unmap(priv->hw.iobase);
+} /* orinoco_cs_release */
+
+/*
+ * The card status event handler. Mostly, this schedules other stuff
+ * to run after an event is received.
+ */
+static int
+orinoco_cs_event(event_t event, int priority,
+ event_callback_args_t * args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct orinoco_pccard *card = priv->card;
+ int err = 0;
+ unsigned long flags;
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ break;
+
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ orinoco_cs_config(link);
+ break;
+
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ /* Mark the device as stopped, to block IO until later */
+ if (link->state & DEV_CONFIG) {
+ /* This is probably racy, but I can't think of
+ a better way, short of rewriting the PCMCIA
+ layer to not suck :-( */
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: %s: Error %d downing interface\n",
+ dev->name,
+ event == CS_EVENT_PM_SUSPEND ? "SUSPEND" : "RESET_PHYSICAL",
+ err);
+
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ /* FIXME: should we double check that this is
+ * the same card as we had before */
+ pcmcia_request_configuration(link->handle, &link->conf);
+
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
+ dev->name, err);
+ break;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_attach(dev);
+ priv->hw_unavailable--;
+
+ if (priv->open && ! priv->hw_unavailable) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card\n",
+ dev->name, err);
+
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+ break;
+ }
+
+ return err;
+} /* orinoco_cs_event */
+
+/********************************************************************/
+/* Module initialization */
+/********************************************************************/
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (David Gibson <hermes@gibson.dropbear.id.au>, "
+ "Pavel Roskin <proski@gnu.org>, et al)";
+
+static struct pcmcia_driver orinoco_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = DRIVER_NAME,
+ },
+ .attach = orinoco_cs_attach,
+ .detach = orinoco_cs_detach,
+};
+
+static int __init
+init_orinoco_cs(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+
+ return pcmcia_register_driver(&orinoco_driver);
+}
+
+static void __exit
+exit_orinoco_cs(void)
+{
+ pcmcia_unregister_driver(&orinoco_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_orinoco_cs);
+module_exit(exit_orinoco_cs);
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
new file mode 100644
index 000000000000..ff30d37e12e2
--- /dev/null
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -0,0 +1,417 @@
+/* orinoco_pci.c
+ *
+ * Driver for Prism II devices that have a direct PCI interface
+ * (i.e., not in a Pcmcia or PLX bridge)
+ *
+ * Specifically here we're talking about the Linksys WMP11
+ *
+ * Current maintainers (as of 29 September 2003) are:
+ * Pavel Roskin <proski AT gnu.org>
+ * and David Gibson <hermes AT gibson.dropbear.id.au>
+ *
+ * Some of this code is borrowed from orinoco_plx.c
+ * Copyright (C) 2001 Daniel Barlow <dan AT telent.net>
+ * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing
+ * has been copied from it. linux-wlan-ng-0.1.10 is originally :
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * This file originally written by:
+ * Copyright (C) 2001 Jean Tourrilhes <jt AT hpl.hp.com>
+ * And is now maintained by:
+ * (C) Copyright David Gibson, IBM Corp. 2002-2003.
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+/*
+ * Theory of operation...
+ * -------------------
+ * Maybe you had a look in orinoco_plx. Well, this is totally different...
+ *
+ * The card contains only one PCI region, which contains all the usual
+ * hermes registers.
+ *
+ * The driver will memory map this region in normal memory. Because
+ * the hermes registers are mapped in normal memory and not in ISA I/O
+ * post space, we can't use the usual inw/outw macros and we need to
+ * use readw/writew.
+ * This slight difference force us to compile our own version of
+ * hermes.c with the register access macro changed. That's a bit
+ * hackish but works fine.
+ *
+ * Note that the PCI region is pretty big (4K). That's much more than
+ * the usual set of hermes register (0x0 -> 0x3E). I've got a strong
+ * suspicion that the whole memory space of the adapter is in fact in
+ * this region. Accessing directly the adapter memory instead of going
+ * through the usual register would speed up significantely the
+ * operations...
+ *
+ * Finally, the card looks like this :
+-----------------------
+ Bus 0, device 14, function 0:
+ Network controller: PCI device 1260:3873 (Harris Semiconductor) (rev 1).
+ IRQ 11.
+ Master Capable. Latency=248.
+ Prefetchable 32 bit memory at 0xffbcc000 [0xffbccfff].
+-----------------------
+00:0e.0 Network controller: Harris Semiconductor: Unknown device 3873 (rev 01)
+ Subsystem: Unknown device 1737:3874
+ Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B-
+ Status: Cap+ 66Mhz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR-
+ Latency: 248 set, cache line size 08
+ Interrupt: pin A routed to IRQ 11
+ Region 0: Memory at ffbcc000 (32-bit, prefetchable) [size=4K]
+ Capabilities: [dc] Power Management version 2
+ Flags: PMEClk- AuxPwr- DSI- D1+ D2+ PME+
+ Status: D0 PME-Enable- DSel=0 DScale=0 PME-
+-----------------------
+ *
+ * That's all..
+ *
+ * Jean II
+ */
+
+#define DRIVER_NAME "orinoco_pci"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/fcntl.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "hermes.h"
+#include "orinoco.h"
+
+/* All the magic there is from wlan-ng */
+/* Magic offset of the reset register of the PCI card */
+#define HERMES_PCI_COR (0x26)
+/* Magic bitmask to reset the card */
+#define HERMES_PCI_COR_MASK (0x0080)
+/* Magic timeouts for doing the reset.
+ * Those times are straight from wlan-ng, and it is claimed that they
+ * are necessary. Alan will kill me. Take your time and grab a coffee. */
+#define HERMES_PCI_COR_ONT (250) /* ms */
+#define HERMES_PCI_COR_OFFT (500) /* ms */
+#define HERMES_PCI_COR_BUSYT (500) /* ms */
+
+/* Orinoco PCI specific data */
+struct orinoco_pci_card {
+ void __iomem *pci_ioaddr;
+};
+
+/*
+ * Do a soft reset of the PCI card using the Configuration Option Register
+ * We need this to get going...
+ * This is the part of the code that is strongly inspired from wlan-ng
+ *
+ * Note : This code is done with irq enabled. This mean that many
+ * interrupts will occur while we are there. This is why we use the
+ * jiffies to regulate time instead of a straight mdelay(). Usually we
+ * need only around 245 iteration of the loop to do 250 ms delay.
+ *
+ * Note bis : Don't try to access HERMES_CMD during the reset phase.
+ * It just won't work !
+ */
+static int
+orinoco_pci_cor_reset(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ unsigned long timeout;
+ u16 reg;
+
+ /* Assert the reset until the card notice */
+ hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK);
+ mdelay(HERMES_PCI_COR_ONT);
+
+ /* Give time for the card to recover from this hard effort */
+ hermes_write_regn(hw, PCI_COR, 0x0000);
+ mdelay(HERMES_PCI_COR_OFFT);
+
+ /* The card is ready when it's no longer busy */
+ timeout = jiffies + (HERMES_PCI_COR_BUSYT * HZ / 1000);
+ reg = hermes_read_regn(hw, CMD);
+ while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
+ mdelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+
+ /* Still busy? */
+ if (reg & HERMES_CMD_BUSY) {
+ printk(KERN_ERR PFX "Busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialise a card. Mostly similar to PLX code.
+ */
+static int orinoco_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err = 0;
+ unsigned long pci_iorange;
+ u16 __iomem *pci_ioaddr = NULL;
+ unsigned long pci_iolen;
+ struct orinoco_private *priv = NULL;
+ struct orinoco_pci_card *card;
+ struct net_device *dev = NULL;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRIVER_NAME);
+ if (err != 0) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
+ goto fail_resources;
+ }
+
+ /* Resource 0 is mapped to the hermes registers */
+ pci_iorange = pci_resource_start(pdev, 0);
+ pci_iolen = pci_resource_len(pdev, 0);
+ pci_ioaddr = ioremap(pci_iorange, pci_iolen);
+ if (!pci_iorange) {
+ printk(KERN_ERR PFX "Cannot remap hardware registers\n");
+ goto fail_map;
+ }
+
+ /* Allocate network device */
+ dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset);
+ if (! dev) {
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ priv = netdev_priv(dev);
+ card = priv->card;
+ card->pci_ioaddr = pci_ioaddr;
+ dev->mem_start = pci_iorange;
+ dev->mem_end = pci_iorange + pci_iolen - 1;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ hermes_struct_init(&priv->hw, pci_ioaddr, HERMES_32BIT_REGSPACING);
+
+ printk(KERN_DEBUG PFX "Detected device %s, mem:0x%lx-0x%lx, irq %d\n",
+ pci_name(pdev), dev->mem_start, dev->mem_end, pdev->irq);
+
+ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
+ err = -EBUSY;
+ goto fail_irq;
+ }
+ dev->irq = pdev->irq;
+
+ /* Perform a COR reset to start the card */
+ err = orinoco_pci_cor_reset(priv);
+ if (err) {
+ printk(KERN_ERR PFX "Initial reset failed\n");
+ goto fail;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Failed to register net device\n");
+ goto fail;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+ fail:
+ free_irq(pdev->irq, dev);
+
+ fail_irq:
+ pci_set_drvdata(pdev, NULL);
+ free_orinocodev(dev);
+
+ fail_alloc:
+ iounmap(pci_ioaddr);
+
+ fail_map:
+ pci_release_regions(pdev);
+
+ fail_resources:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct orinoco_pci_card *card = priv->card;
+
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ pci_set_drvdata(pdev, NULL);
+ free_orinocodev(dev);
+ iounmap(card->pci_ioaddr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int orinoco_pci_suspend(struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ printk(KERN_DEBUG "%s: Orinoco-PCI entering sleep mode (state=%d)\n",
+ dev->name, state);
+
+ err = orinoco_lock(priv, &flags);
+ if (err) {
+ printk(KERN_ERR "%s: hw_unavailable on orinoco_pci_suspend\n",
+ dev->name);
+ return err;
+ }
+
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: orinoco_pci_suspend(): Error %d downing interface\n",
+ dev->name, err);
+
+ netif_device_detach(dev);
+
+ priv->hw_unavailable++;
+
+ orinoco_unlock(priv, &flags);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, 3);
+
+ return 0;
+}
+
+static int orinoco_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ printk(KERN_DEBUG "%s: Orinoco-PCI waking up\n", dev->name);
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d re-initializing firmware on orinoco_pci_resume()\n",
+ dev->name, err);
+ return err;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_attach(dev);
+
+ priv->hw_unavailable--;
+
+ if (priv->open && (! priv->hw_unavailable)) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card on orinoco_pci_resume()\n",
+ dev->name, err);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static struct pci_device_id orinoco_pci_pci_id_table[] = {
+ /* Intersil Prism 3 */
+ {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
+ /* Intersil Prism 2.5 */
+ {0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID,},
+ /* Samsung MagicLAN SWL-2210P */
+ {0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, orinoco_pci_pci_id_table);
+
+static struct pci_driver orinoco_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = orinoco_pci_pci_id_table,
+ .probe = orinoco_pci_init_one,
+ .remove = __devexit_p(orinoco_pci_remove_one),
+ .suspend = orinoco_pci_suspend,
+ .resume = orinoco_pci_resume,
+};
+
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Pavel Roskin <proski@gnu.org>,"
+ " David Gibson <hermes@gibson.dropbear.id.au> &"
+ " Jean Tourrilhes <jt@hpl.hp.com>)";
+MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("Driver for wireless LAN cards using direct PCI interface");
+MODULE_LICENSE("Dual MPL/GPL");
+
+static int __init orinoco_pci_init(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+ return pci_module_init(&orinoco_pci_driver);
+}
+
+static void __exit orinoco_pci_exit(void)
+{
+ pci_unregister_driver(&orinoco_pci_driver);
+}
+
+module_init(orinoco_pci_init);
+module_exit(orinoco_pci_exit);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
new file mode 100644
index 000000000000..7ab05b89fb3f
--- /dev/null
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -0,0 +1,419 @@
+/* orinoco_plx.c
+ *
+ * Driver for Prism II devices which would usually be driven by orinoco_cs,
+ * but are connected to the PCI bus by a PLX9052.
+ *
+ * Current maintainers (as of 29 September 2003) are:
+ * Pavel Roskin <proski AT gnu.org>
+ * and David Gibson <hermes AT gibson.dropbear.id.au>
+ *
+ * (C) Copyright David Gibson, IBM Corp. 2001-2003.
+ * Copyright (C) 2001 Daniel Barlow
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+
+ * Caution: this is experimental and probably buggy. For success and
+ * failure reports for different cards and adaptors, see
+ * orinoco_plx_pci_id_table near the end of the file. If you have a
+ * card we don't have the PCI id for, and looks like it should work,
+ * drop me mail with the id and "it works"/"it doesn't work".
+ *
+ * Note: if everything gets detected fine but it doesn't actually send
+ * or receive packets, your first port of call should probably be to
+ * try newer firmware in the card. Especially if you're doing Ad-Hoc
+ * modes.
+ *
+ * The actual driving is done by orinoco.c, this is just resource
+ * allocation stuff. The explanation below is courtesy of Ryan Niemi
+ * on the linux-wlan-ng list at
+ * http://archives.neohapsis.com/archives/dev/linux-wlan/2001-q1/0026.html
+ *
+ * The PLX9052-based cards (WL11000 and several others) are a
+ * different beast than the usual PCMCIA-based PRISM2 configuration
+ * expected by wlan-ng. Here's the general details on how the WL11000
+ * PCI adapter works:
+ *
+ * - Two PCI I/O address spaces, one 0x80 long which contains the
+ * PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA
+ * slot I/O address space.
+ *
+ * - One PCI memory address space, mapped to the PCMCIA memory space
+ * (containing the CIS).
+ *
+ * After identifying the I/O and memory space, you can read through
+ * the memory space to confirm the CIS's device ID or manufacturer ID
+ * to make sure it's the expected card. qKeep in mind that the PCMCIA
+ * spec specifies the CIS as the lower 8 bits of each word read from
+ * the CIS, so to read the bytes of the CIS, read every other byte
+ * (0,2,4,...). Passing that test, you need to enable the I/O address
+ * space on the PCMCIA card via the PCMCIA COR register. This is the
+ * first byte following the CIS. In my case (which may not have any
+ * relation to what's on the PRISM2 cards), COR was at offset 0x800
+ * within the PCI memory space. Write 0x41 to the COR register to
+ * enable I/O mode and to select level triggered interrupts. To
+ * confirm you actually succeeded, read the COR register back and make
+ * sure it actually got set to 0x41, incase you have an unexpected
+ * card inserted.
+ *
+ * Following that, you can treat the second PCI I/O address space (the
+ * one that's not 0x80 in length) as the PCMCIA I/O space.
+ *
+ * Note that in the Eumitcom's source for their drivers, they register
+ * the interrupt as edge triggered when registering it with the
+ * Windows kernel. I don't recall how to register edge triggered on
+ * Linux (if it can be done at all). But in some experimentation, I
+ * don't see much operational difference between using either
+ * interrupt mode. Don't mess with the interrupt mode in the COR
+ * register though, as the PLX9052 wants level triggers with the way
+ * the serial EEPROM configures it on the WL11000.
+ *
+ * There's some other little quirks related to timing that I bumped
+ * into, but I don't recall right now. Also, there's two variants of
+ * the WL11000 I've seen, revision A1 and T2. These seem to differ
+ * slightly in the timings configured in the wait-state generator in
+ * the PLX9052. There have also been some comments from Eumitcom that
+ * cards shouldn't be hot swapped, apparently due to risk of cooking
+ * the PLX9052. I'm unsure why they believe this, as I can't see
+ * anything in the design that would really cause a problem, except
+ * for crashing drivers not written to expect it. And having developed
+ * drivers for the WL11000, I'd say it's quite tricky to write code
+ * that will successfully deal with a hot unplug. Very odd things
+ * happen on the I/O side of things. But anyway, be warned. Despite
+ * that, I've hot-swapped a number of times during debugging and
+ * driver development for various reasons (stuck WAIT# line after the
+ * radio card's firmware locks up).
+ *
+ * Hope this is enough info for someone to add PLX9052 support to the
+ * wlan-ng card. In the case of the WL11000, the PCI ID's are
+ * 0x1639/0x0200, with matching subsystem ID's. Other PLX9052-based
+ * manufacturers other than Eumitcom (or on cards other than the
+ * WL11000) may have different PCI ID's.
+ *
+ * If anyone needs any more specific info, let me know. I haven't had
+ * time to implement support myself yet, and with the way things are
+ * going, might not have time for a while..
+ */
+
+#define DRIVER_NAME "orinoco_plx"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/fcntl.h>
+
+#include <pcmcia/cisreg.h>
+
+#include "hermes.h"
+#include "orinoco.h"
+
+#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */
+#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
+#define COR_RESET (0x80) /* reset bit in the COR register */
+#define PLX_RESET_TIME (500) /* milliseconds */
+
+#define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */
+#define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */
+
+static const u8 cis_magic[] = {
+ 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67
+};
+
+/* Orinoco PLX specific data */
+struct orinoco_plx_card {
+ void __iomem *attr_mem;
+};
+
+/*
+ * Do a soft reset of the card using the Configuration Option Register
+ */
+static int orinoco_plx_cor_reset(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ struct orinoco_plx_card *card = priv->card;
+ u8 __iomem *attr_mem = card->attr_mem;
+ unsigned long timeout;
+ u16 reg;
+
+ writeb(COR_VALUE | COR_RESET, attr_mem + COR_OFFSET);
+ mdelay(1);
+
+ writeb(COR_VALUE, attr_mem + COR_OFFSET);
+ mdelay(1);
+
+ /* Just in case, wait more until the card is no longer busy */
+ timeout = jiffies + (PLX_RESET_TIME * HZ / 1000);
+ reg = hermes_read_regn(hw, CMD);
+ while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
+ mdelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+
+ /* Did we timeout ? */
+ if (reg & HERMES_CMD_BUSY) {
+ printk(KERN_ERR PFX "Busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+
+static int orinoco_plx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err = 0;
+ u8 __iomem *attr_mem = NULL;
+ u32 csr_reg, plx_addr;
+ struct orinoco_private *priv = NULL;
+ struct orinoco_plx_card *card;
+ unsigned long pccard_ioaddr = 0;
+ unsigned long pccard_iolen = 0;
+ struct net_device *dev = NULL;
+ void __iomem *mem;
+ int i;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRIVER_NAME);
+ if (err != 0) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
+ goto fail_resources;
+ }
+
+ /* Resource 1 is mapped to PLX-specific registers */
+ plx_addr = pci_resource_start(pdev, 1);
+
+ /* Resource 2 is mapped to the PCMCIA attribute memory */
+ attr_mem = ioremap(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!attr_mem) {
+ printk(KERN_ERR PFX "Cannot remap PCMCIA space\n");
+ goto fail_map_attr;
+ }
+
+ /* Resource 3 is mapped to the PCMCIA I/O address space */
+ pccard_ioaddr = pci_resource_start(pdev, 3);
+ pccard_iolen = pci_resource_len(pdev, 3);
+
+ mem = pci_iomap(pdev, 3, 0);
+ if (!mem) {
+ err = -ENOMEM;
+ goto fail_map_io;
+ }
+
+ /* Allocate network device */
+ dev = alloc_orinocodev(sizeof(*card), orinoco_plx_cor_reset);
+ if (!dev) {
+ printk(KERN_ERR PFX "Cannot allocate network device\n");
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ priv = netdev_priv(dev);
+ card = priv->card;
+ card->attr_mem = attr_mem;
+ dev->base_addr = pccard_ioaddr;
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING);
+
+ printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 PLX device "
+ "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
+ pccard_ioaddr);
+
+ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
+ err = -EBUSY;
+ goto fail_irq;
+ }
+ dev->irq = pdev->irq;
+
+ /* bjoern: We need to tell the card to enable interrupts, in
+ case the serial eprom didn't do this already. See the
+ PLX9052 data book, p8-1 and 8-24 for reference. */
+ csr_reg = inl(plx_addr + PLX_INTCSR);
+ if (!(csr_reg & PLX_INTCSR_INTEN)) {
+ csr_reg |= PLX_INTCSR_INTEN;
+ outl(csr_reg, plx_addr + PLX_INTCSR);
+ csr_reg = inl(plx_addr + PLX_INTCSR);
+ if (!(csr_reg & PLX_INTCSR_INTEN)) {
+ printk(KERN_ERR PFX "Cannot enable interrupts\n");
+ goto fail;
+ }
+ }
+
+ err = orinoco_plx_cor_reset(priv);
+ if (err) {
+ printk(KERN_ERR PFX "Initial reset failed\n");
+ goto fail;
+ }
+
+ printk(KERN_DEBUG PFX "CIS: ");
+ for (i = 0; i < 16; i++) {
+ printk("%02X:", readb(attr_mem + 2*i));
+ }
+ printk("\n");
+
+ /* Verify whether a supported PC card is present */
+ /* FIXME: we probably need to be smarted about this */
+ for (i = 0; i < sizeof(cis_magic); i++) {
+ if (cis_magic[i] != readb(attr_mem +2*i)) {
+ printk(KERN_ERR PFX "The CIS value of Prism2 PC "
+ "card is unexpected\n");
+ err = -EIO;
+ goto fail;
+ }
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot register network device\n");
+ goto fail;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+ fail:
+ free_irq(pdev->irq, dev);
+
+ fail_irq:
+ pci_set_drvdata(pdev, NULL);
+ free_orinocodev(dev);
+
+ fail_alloc:
+ pci_iounmap(pdev, mem);
+
+ fail_map_io:
+ iounmap(attr_mem);
+
+ fail_map_attr:
+ pci_release_regions(pdev);
+
+ fail_resources:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct orinoco_plx_card *card = priv->card;
+ u8 __iomem *attr_mem = card->attr_mem;
+
+ BUG_ON(! dev);
+
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ pci_set_drvdata(pdev, NULL);
+ free_orinocodev(dev);
+ pci_iounmap(pdev, priv->hw.iobase);
+ iounmap(attr_mem);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+
+static struct pci_device_id orinoco_plx_pci_id_table[] = {
+ {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
+ {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
+ {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
+ {0x1638, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* SMC EZConnect SMC2602W,
+ Eumitcom PCI WL11000,
+ Addtron AWA-100 */
+ {0x16ab, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* Global Sun Tech GL24110P */
+ {0x16ab, 0x1101, PCI_ANY_ID, PCI_ANY_ID,}, /* Reported working, but unknown */
+ {0x16ab, 0x1102, PCI_ANY_ID, PCI_ANY_ID,}, /* Linksys WDT11 */
+ {0x16ec, 0x3685, PCI_ANY_ID, PCI_ANY_ID,}, /* USR 2415 */
+ {0xec80, 0xec00, PCI_ANY_ID, PCI_ANY_ID,}, /* Belkin F5D6000 tested by
+ Brendan W. McAdams <rit AT jacked-in.org> */
+ {0x10b7, 0x7770, PCI_ANY_ID, PCI_ANY_ID,}, /* 3Com AirConnect PCI tested by
+ Damien Persohn <damien AT persohn.net> */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, orinoco_plx_pci_id_table);
+
+static struct pci_driver orinoco_plx_driver = {
+ .name = DRIVER_NAME,
+ .id_table = orinoco_plx_pci_id_table,
+ .probe = orinoco_plx_init_one,
+ .remove = __devexit_p(orinoco_plx_remove_one),
+};
+
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Pavel Roskin <proski@gnu.org>,"
+ " David Gibson <hermes@gibson.dropbear.id.au>,"
+ " Daniel Barlow <dan@telent.net>)";
+MODULE_AUTHOR("Daniel Barlow <dan@telent.net>");
+MODULE_DESCRIPTION("Driver for wireless LAN cards using the PLX9052 PCI bridge");
+MODULE_LICENSE("Dual MPL/GPL");
+
+static int __init orinoco_plx_init(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+ return pci_module_init(&orinoco_plx_driver);
+}
+
+static void __exit orinoco_plx_exit(void)
+{
+ pci_unregister_driver(&orinoco_plx_driver);
+ ssleep(1);
+}
+
+module_init(orinoco_plx_init);
+module_exit(orinoco_plx_exit);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
new file mode 100644
index 000000000000..85893f42445b
--- /dev/null
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -0,0 +1,276 @@
+/* orinoco_tmd.c
+ *
+ * Driver for Prism II devices which would usually be driven by orinoco_cs,
+ * but are connected to the PCI bus by a TMD7160.
+ *
+ * Copyright (C) 2003 Joerg Dorchain <joerg AT dorchain.net>
+ * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+
+ * Caution: this is experimental and probably buggy. For success and
+ * failure reports for different cards and adaptors, see
+ * orinoco_tmd_pci_id_table near the end of the file. If you have a
+ * card we don't have the PCI id for, and looks like it should work,
+ * drop me mail with the id and "it works"/"it doesn't work".
+ *
+ * Note: if everything gets detected fine but it doesn't actually send
+ * or receive packets, your first port of call should probably be to
+ * try newer firmware in the card. Especially if you're doing Ad-Hoc
+ * modes
+ *
+ * The actual driving is done by orinoco.c, this is just resource
+ * allocation stuff.
+ *
+ * This driver is modeled after the orinoco_plx driver. The main
+ * difference is that the TMD chip has only IO port ranges and no
+ * memory space, i.e. no access to the CIS. Compared to the PLX chip,
+ * the io range functionalities are exchanged.
+ *
+ * Pheecom sells cards with the TMD chip as "ASIC version"
+ */
+
+#define DRIVER_NAME "orinoco_tmd"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/fcntl.h>
+
+#include <pcmcia/cisreg.h>
+
+#include "hermes.h"
+#include "orinoco.h"
+
+#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
+#define COR_RESET (0x80) /* reset bit in the COR register */
+#define TMD_RESET_TIME (500) /* milliseconds */
+
+/* Orinoco TMD specific data */
+struct orinoco_tmd_card {
+ u32 tmd_io;
+};
+
+
+/*
+ * Do a soft reset of the card using the Configuration Option Register
+ */
+static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ struct orinoco_tmd_card *card = priv->card;
+ u32 addr = card->tmd_io;
+ unsigned long timeout;
+ u16 reg;
+
+ outb(COR_VALUE | COR_RESET, addr);
+ mdelay(1);
+
+ outb(COR_VALUE, addr);
+ mdelay(1);
+
+ /* Just in case, wait more until the card is no longer busy */
+ timeout = jiffies + (TMD_RESET_TIME * HZ / 1000);
+ reg = hermes_read_regn(hw, CMD);
+ while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
+ mdelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+
+ /* Did we timeout ? */
+ if (reg & HERMES_CMD_BUSY) {
+ printk(KERN_ERR PFX "Busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+
+static int orinoco_tmd_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct orinoco_private *priv = NULL;
+ struct orinoco_tmd_card *card;
+ struct net_device *dev = NULL;
+ void __iomem *mem;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRIVER_NAME);
+ if (err != 0) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
+ goto fail_resources;
+ }
+
+ mem = pci_iomap(pdev, 2, 0);
+ if (! mem) {
+ err = -ENOMEM;
+ goto fail_iomap;
+ }
+
+ /* Allocate network device */
+ dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset);
+ if (! dev) {
+ printk(KERN_ERR PFX "Cannot allocate network device\n");
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ priv = netdev_priv(dev);
+ card = priv->card;
+ card->tmd_io = pci_resource_start(pdev, 1);
+ dev->base_addr = pci_resource_start(pdev, 2);
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING);
+
+ printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 TMD device "
+ "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
+ dev->base_addr);
+
+ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
+ err = -EBUSY;
+ goto fail_irq;
+ }
+ dev->irq = pdev->irq;
+
+ err = orinoco_tmd_cor_reset(priv);
+ if (err) {
+ printk(KERN_ERR PFX "Initial reset failed\n");
+ goto fail;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot register network device\n");
+ goto fail;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+ fail:
+ free_irq(pdev->irq, dev);
+
+ fail_irq:
+ pci_set_drvdata(pdev, NULL);
+ free_orinocodev(dev);
+
+ fail_alloc:
+ pci_iounmap(pdev, mem);
+
+ fail_iomap:
+ pci_release_regions(pdev);
+
+ fail_resources:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = dev->priv;
+
+ BUG_ON(! dev);
+
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ pci_set_drvdata(pdev, NULL);
+ free_orinocodev(dev);
+ pci_iounmap(pdev, priv->hw.iobase);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+
+static struct pci_device_id orinoco_tmd_pci_id_table[] = {
+ {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, orinoco_tmd_pci_id_table);
+
+static struct pci_driver orinoco_tmd_driver = {
+ .name = DRIVER_NAME,
+ .id_table = orinoco_tmd_pci_id_table,
+ .probe = orinoco_tmd_init_one,
+ .remove = __devexit_p(orinoco_tmd_remove_one),
+};
+
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Joerg Dorchain <joerg@dorchain.net>)";
+MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
+MODULE_DESCRIPTION("Driver for wireless LAN cards using the TMD7160 PCI bridge");
+MODULE_LICENSE("Dual MPL/GPL");
+
+static int __init orinoco_tmd_init(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+ return pci_module_init(&orinoco_tmd_driver);
+}
+
+static void __exit orinoco_tmd_exit(void)
+{
+ pci_unregister_driver(&orinoco_tmd_driver);
+ ssleep(1);
+}
+
+module_init(orinoco_tmd_init);
+module_exit(orinoco_tmd_exit);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/wireless/prism54/Makefile b/drivers/net/wireless/prism54/Makefile
new file mode 100644
index 000000000000..fad305c76737
--- /dev/null
+++ b/drivers/net/wireless/prism54/Makefile
@@ -0,0 +1,8 @@
+# $Id: Makefile.k26,v 1.7 2004/01/30 16:24:00 ajfa Exp $
+
+prism54-objs := islpci_eth.o islpci_mgt.o \
+ isl_38xx.o isl_ioctl.o islpci_dev.o \
+ islpci_hotplug.o oid_mgt.o
+
+obj-$(CONFIG_PRISM54) += prism54.o
+
diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/prism54/isl_38xx.c
new file mode 100644
index 000000000000..4481ec18c5a0
--- /dev/null
+++ b/drivers/net/wireless/prism54/isl_38xx.c
@@ -0,0 +1,260 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include "prismcompat.h"
+#include "isl_38xx.h"
+#include "islpci_dev.h"
+#include "islpci_mgt.h"
+
+/******************************************************************************
+ Device Interface & Control functions
+******************************************************************************/
+
+/**
+ * isl38xx_disable_interrupts - disable all interrupts
+ * @device: pci memory base address
+ *
+ * Instructs the device to disable all interrupt reporting by asserting
+ * the IRQ line. New events may still show up in the interrupt identification
+ * register located at offset %ISL38XX_INT_IDENT_REG.
+ */
+void
+isl38xx_disable_interrupts(void __iomem *device)
+{
+ isl38xx_w32_flush(device, 0x00000000, ISL38XX_INT_EN_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+}
+
+void
+isl38xx_handle_sleep_request(isl38xx_control_block *control_block,
+ int *powerstate, void __iomem *device_base)
+{
+ /* device requests to go into sleep mode
+ * check whether the transmit queues for data and management are empty */
+ if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ))
+ /* data tx queue not empty */
+ return;
+
+ if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ))
+ /* management tx queue not empty */
+ return;
+
+ /* check also whether received frames are pending */
+ if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_DATA_LQ))
+ /* data rx queue not empty */
+ return;
+
+ if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_MGMTQ))
+ /* management rx queue not empty */
+ return;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "Device going to sleep mode\n");
+#endif
+
+ /* all queues are empty, allow the device to go into sleep mode */
+ *powerstate = ISL38XX_PSM_POWERSAVE_STATE;
+
+ /* assert the Sleep interrupt in the Device Interrupt Register */
+ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_SLEEP,
+ ISL38XX_DEV_INT_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+}
+
+void
+isl38xx_handle_wakeup(isl38xx_control_block *control_block,
+ int *powerstate, void __iomem *device_base)
+{
+ /* device is in active state, update the powerstate flag */
+ *powerstate = ISL38XX_PSM_ACTIVE_STATE;
+
+ /* now check whether there are frames pending for the card */
+ if (!isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ)
+ && !isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ))
+ return;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_ANYTHING, "Wake up handler trigger the device\n");
+#endif
+
+ /* either data or management transmit queue has a frame pending
+ * trigger the device by setting the Update bit in the Device Int reg */
+ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE,
+ ISL38XX_DEV_INT_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+}
+
+void
+isl38xx_trigger_device(int asleep, void __iomem *device_base)
+{
+ struct timeval current_time;
+ u32 reg, counter = 0;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n");
+#endif
+
+ /* check whether the device is in power save mode */
+ if (asleep) {
+ /* device is in powersave, trigger the device for wakeup */
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ do_gettimeofday(&current_time);
+ DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n",
+ current_time.tv_sec, (long)current_time.tv_usec);
+#endif
+
+ DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
+ current_time.tv_sec, (long)current_time.tv_usec,
+ readl(device_base + ISL38XX_CTRL_STAT_REG));
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ reg = readl(device_base + ISL38XX_INT_IDENT_REG);
+ if (reg == 0xabadface) {
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ do_gettimeofday(&current_time);
+ DEBUG(SHOW_TRACING,
+ "%08li.%08li Device register abadface\n",
+ current_time.tv_sec, (long)current_time.tv_usec);
+#endif
+ /* read the Device Status Register until Sleepmode bit is set */
+ while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG),
+ (reg & ISL38XX_CTRL_STAT_SLEEPMODE) == 0) {
+ udelay(ISL38XX_WRITEIO_DELAY);
+ counter++;
+ }
+
+ DEBUG(SHOW_TRACING,
+ "%08li.%08li Device register read %08x\n",
+ current_time.tv_sec, (long)current_time.tv_usec,
+ readl(device_base + ISL38XX_CTRL_STAT_REG));
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ do_gettimeofday(&current_time);
+ DEBUG(SHOW_TRACING,
+ "%08li.%08li Device asleep counter %i\n",
+ current_time.tv_sec, (long)current_time.tv_usec,
+ counter);
+#endif
+ }
+ /* assert the Wakeup interrupt in the Device Interrupt Register */
+ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_WAKEUP,
+ ISL38XX_DEV_INT_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* perform another read on the Device Status Register */
+ reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ do_gettimeofday(&current_time);
+ DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
+ current_time.tv_sec, (long)current_time.tv_usec, reg);
+#endif
+ } else {
+ /* device is (still) awake */
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "Device is in active state\n");
+#endif
+ /* trigger the device by setting the Update bit in the Device Int reg */
+
+ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE,
+ ISL38XX_DEV_INT_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+ }
+}
+
+void
+isl38xx_interface_reset(void __iomem *device_base, dma_addr_t host_address)
+{
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "isl38xx_interface_reset\n");
+#endif
+
+ /* load the address of the control block in the device */
+ isl38xx_w32_flush(device_base, host_address, ISL38XX_CTRL_BLK_BASE_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* set the reset bit in the Device Interrupt Register */
+ isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_RESET, ISL38XX_DEV_INT_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* enable the interrupt for detecting initialization */
+
+ /* Note: Do not enable other interrupts here. We want the
+ * device to have come up first 100% before allowing any other
+ * interrupts. */
+ isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG);
+ udelay(ISL38XX_WRITEIO_DELAY); /* allow complete full reset */
+}
+
+void
+isl38xx_enable_common_interrupts(void __iomem *device_base) {
+ u32 reg;
+ reg = ( ISL38XX_INT_IDENT_UPDATE |
+ ISL38XX_INT_IDENT_SLEEP | ISL38XX_INT_IDENT_WAKEUP);
+ isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+}
+
+int
+isl38xx_in_queue(isl38xx_control_block *cb, int queue)
+{
+ const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) -
+ le32_to_cpu(cb->device_curr_frag[queue]));
+
+ /* determine the amount of fragments in the queue depending on the type
+ * of the queue, either transmit or receive */
+
+ BUG_ON(delta < 0); /* driver ptr must be ahead of device ptr */
+
+ switch (queue) {
+ /* send queues */
+ case ISL38XX_CB_TX_MGMTQ:
+ BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
+ case ISL38XX_CB_TX_DATA_LQ:
+ case ISL38XX_CB_TX_DATA_HQ:
+ BUG_ON(delta > ISL38XX_CB_TX_QSIZE);
+ return delta;
+ break;
+
+ /* receive queues */
+ case ISL38XX_CB_RX_MGMTQ:
+ BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
+ return ISL38XX_CB_MGMT_QSIZE - delta;
+ break;
+
+ case ISL38XX_CB_RX_DATA_LQ:
+ case ISL38XX_CB_RX_DATA_HQ:
+ BUG_ON(delta > ISL38XX_CB_RX_QSIZE);
+ return ISL38XX_CB_RX_QSIZE - delta;
+ break;
+ }
+ BUG();
+ return 0;
+}
diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/prism54/isl_38xx.h
new file mode 100644
index 000000000000..e83e4912ab66
--- /dev/null
+++ b/drivers/net/wireless/prism54/isl_38xx.h
@@ -0,0 +1,173 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ISL_38XX_H
+#define _ISL_38XX_H
+
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#define ISL38XX_CB_RX_QSIZE 8
+#define ISL38XX_CB_TX_QSIZE 32
+
+/* ISL38XX Access Point Specific definitions */
+#define ISL38XX_MAX_WDS_LINKS 8
+
+/* ISL38xx Client Specific definitions */
+#define ISL38XX_PSM_ACTIVE_STATE 0
+#define ISL38XX_PSM_POWERSAVE_STATE 1
+
+/* ISL38XX Host Interface Definitions */
+#define ISL38XX_PCI_MEM_SIZE 0x02000
+#define ISL38XX_MEMORY_WINDOW_SIZE 0x01000
+#define ISL38XX_DEV_FIRMWARE_ADDRES 0x20000
+#define ISL38XX_WRITEIO_DELAY 10 /* in us */
+#define ISL38XX_RESET_DELAY 50 /* in ms */
+#define ISL38XX_WAIT_CYCLE 10 /* in 10ms */
+#define ISL38XX_MAX_WAIT_CYCLES 10
+
+/* PCI Memory Area */
+#define ISL38XX_HARDWARE_REG 0x0000
+#define ISL38XX_CARDBUS_CIS 0x0800
+#define ISL38XX_DIRECT_MEM_WIN 0x1000
+
+/* Hardware registers */
+#define ISL38XX_DEV_INT_REG 0x0000
+#define ISL38XX_INT_IDENT_REG 0x0010
+#define ISL38XX_INT_ACK_REG 0x0014
+#define ISL38XX_INT_EN_REG 0x0018
+#define ISL38XX_GEN_PURP_COM_REG_1 0x0020
+#define ISL38XX_GEN_PURP_COM_REG_2 0x0024
+#define ISL38XX_CTRL_BLK_BASE_REG ISL38XX_GEN_PURP_COM_REG_1
+#define ISL38XX_DIR_MEM_BASE_REG 0x0030
+#define ISL38XX_CTRL_STAT_REG 0x0078
+
+/* High end mobos queue up pci writes, the following
+ * is used to "read" from after a write to force flush */
+#define ISL38XX_PCI_POSTING_FLUSH ISL38XX_INT_EN_REG
+
+/**
+ * isl38xx_w32_flush - PCI iomem write helper
+ * @base: (host) memory base address of the device
+ * @val: 32bit value (host order) to write
+ * @offset: byte offset into @base to write value to
+ *
+ * This helper takes care of writing a 32bit datum to the
+ * specified offset into the device's pci memory space, and making sure
+ * the pci memory buffers get flushed by performing one harmless read
+ * from the %ISL38XX_PCI_POSTING_FLUSH offset.
+ */
+static inline void
+isl38xx_w32_flush(void __iomem *base, u32 val, unsigned long offset)
+{
+ writel(val, base + offset);
+ (void) readl(base + ISL38XX_PCI_POSTING_FLUSH);
+}
+
+/* Device Interrupt register bits */
+#define ISL38XX_DEV_INT_RESET 0x0001
+#define ISL38XX_DEV_INT_UPDATE 0x0002
+#define ISL38XX_DEV_INT_WAKEUP 0x0008
+#define ISL38XX_DEV_INT_SLEEP 0x0010
+
+/* Interrupt Identification/Acknowledge/Enable register bits */
+#define ISL38XX_INT_IDENT_UPDATE 0x0002
+#define ISL38XX_INT_IDENT_INIT 0x0004
+#define ISL38XX_INT_IDENT_WAKEUP 0x0008
+#define ISL38XX_INT_IDENT_SLEEP 0x0010
+#define ISL38XX_INT_SOURCES 0x001E
+
+/* Control/Status register bits */
+/* Looks like there are other meaningful bits
+ 0x20004400 seen in normal operation,
+ 0x200044db at 'timeout waiting for mgmt response'
+*/
+#define ISL38XX_CTRL_STAT_SLEEPMODE 0x00000200
+#define ISL38XX_CTRL_STAT_CLKRUN 0x00800000
+#define ISL38XX_CTRL_STAT_RESET 0x10000000
+#define ISL38XX_CTRL_STAT_RAMBOOT 0x20000000
+#define ISL38XX_CTRL_STAT_STARTHALTED 0x40000000
+#define ISL38XX_CTRL_STAT_HOST_OVERRIDE 0x80000000
+
+/* Control Block definitions */
+#define ISL38XX_CB_RX_DATA_LQ 0
+#define ISL38XX_CB_TX_DATA_LQ 1
+#define ISL38XX_CB_RX_DATA_HQ 2
+#define ISL38XX_CB_TX_DATA_HQ 3
+#define ISL38XX_CB_RX_MGMTQ 4
+#define ISL38XX_CB_TX_MGMTQ 5
+#define ISL38XX_CB_QCOUNT 6
+#define ISL38XX_CB_MGMT_QSIZE 4
+#define ISL38XX_MIN_QTHRESHOLD 4 /* fragments */
+
+/* Memory Manager definitions */
+#define MGMT_FRAME_SIZE 1500 /* >= size struct obj_bsslist */
+#define MGMT_TX_FRAME_COUNT 24 /* max 4 + spare 4 + 8 init */
+#define MGMT_RX_FRAME_COUNT 24 /* 4*4 + spare 8 */
+#define MGMT_FRAME_COUNT (MGMT_TX_FRAME_COUNT + MGMT_RX_FRAME_COUNT)
+#define CONTROL_BLOCK_SIZE 1024 /* should be enough */
+#define PSM_FRAME_SIZE 1536
+#define PSM_MINIMAL_STATION_COUNT 64
+#define PSM_FRAME_COUNT PSM_MINIMAL_STATION_COUNT
+#define PSM_BUFFER_SIZE PSM_FRAME_SIZE * PSM_FRAME_COUNT
+#define MAX_TRAP_RX_QUEUE 4
+#define HOST_MEM_BLOCK CONTROL_BLOCK_SIZE + PSM_BUFFER_SIZE
+
+/* Fragment package definitions */
+#define FRAGMENT_FLAG_MF 0x0001
+#define MAX_FRAGMENT_SIZE 1536
+
+/* In monitor mode frames have a header. I don't know exactly how big those
+ * frame can be but I've never seen any frame bigger than 1584... :
+ */
+#define MAX_FRAGMENT_SIZE_RX 1600
+
+typedef struct {
+ u32 address; /* physical address on host */
+ u16 size; /* packet size */
+ u16 flags; /* set of bit-wise flags */
+} isl38xx_fragment;
+
+struct isl38xx_cb {
+ u32 driver_curr_frag[ISL38XX_CB_QCOUNT];
+ u32 device_curr_frag[ISL38XX_CB_QCOUNT];
+ isl38xx_fragment rx_data_low[ISL38XX_CB_RX_QSIZE];
+ isl38xx_fragment tx_data_low[ISL38XX_CB_TX_QSIZE];
+ isl38xx_fragment rx_data_high[ISL38XX_CB_RX_QSIZE];
+ isl38xx_fragment tx_data_high[ISL38XX_CB_TX_QSIZE];
+ isl38xx_fragment rx_data_mgmt[ISL38XX_CB_MGMT_QSIZE];
+ isl38xx_fragment tx_data_mgmt[ISL38XX_CB_MGMT_QSIZE];
+};
+
+typedef struct isl38xx_cb isl38xx_control_block;
+
+/* determine number of entries currently in queue */
+int isl38xx_in_queue(isl38xx_control_block *cb, int queue);
+
+void isl38xx_disable_interrupts(void __iomem *);
+void isl38xx_enable_common_interrupts(void __iomem *);
+
+void isl38xx_handle_sleep_request(isl38xx_control_block *, int *,
+ void __iomem *);
+void isl38xx_handle_wakeup(isl38xx_control_block *, int *, void __iomem *);
+void isl38xx_trigger_device(int, void __iomem *);
+void isl38xx_interface_reset(void __iomem *, dma_addr_t);
+
+#endif /* _ISL_38XX_H */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
new file mode 100644
index 000000000000..0f29a9c7bc2c
--- /dev/null
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -0,0 +1,2750 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
+ * (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
+ * (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/pci.h>
+
+#include <asm/uaccess.h>
+
+#include "prismcompat.h"
+#include "isl_ioctl.h"
+#include "islpci_mgt.h"
+#include "isl_oid.h" /* additional types and defs for isl38xx fw */
+#include "oid_mgt.h"
+
+#include <net/iw_handler.h> /* New driver API */
+
+
+static void prism54_wpa_ie_add(islpci_private *priv, u8 *bssid,
+ u8 *wpa_ie, size_t wpa_ie_len);
+static size_t prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);
+static int prism54_set_wpa(struct net_device *, struct iw_request_info *,
+ __u32 *, char *);
+
+
+/**
+ * prism54_mib_mode_helper - MIB change mode helper function
+ * @mib: the &struct islpci_mib object to modify
+ * @iw_mode: new mode (%IW_MODE_*)
+ *
+ * This is a helper function, hence it does not lock. Make sure
+ * caller deals with locking *if* necessary. This function sets the
+ * mode-dependent mib values and does the mapping of the Linux
+ * Wireless API modes to Device firmware modes. It also checks for
+ * correct valid Linux wireless modes.
+ */
+static int
+prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
+{
+ u32 config = INL_CONFIG_MANUALRUN;
+ u32 mode, bsstype;
+
+ /* For now, just catch early the Repeater and Secondary modes here */
+ if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) {
+ printk(KERN_DEBUG
+ "%s(): Sorry, Repeater mode and Secondary mode "
+ "are not yet supported by this driver.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ priv->iw_mode = iw_mode;
+
+ switch (iw_mode) {
+ case IW_MODE_AUTO:
+ mode = INL_MODE_CLIENT;
+ bsstype = DOT11_BSSTYPE_ANY;
+ break;
+ case IW_MODE_ADHOC:
+ mode = INL_MODE_CLIENT;
+ bsstype = DOT11_BSSTYPE_IBSS;
+ break;
+ case IW_MODE_INFRA:
+ mode = INL_MODE_CLIENT;
+ bsstype = DOT11_BSSTYPE_INFRA;
+ break;
+ case IW_MODE_MASTER:
+ mode = INL_MODE_AP;
+ bsstype = DOT11_BSSTYPE_INFRA;
+ break;
+ case IW_MODE_MONITOR:
+ mode = INL_MODE_PROMISCUOUS;
+ bsstype = DOT11_BSSTYPE_ANY;
+ config |= INL_CONFIG_RXANNEX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (init_wds)
+ config |= INL_CONFIG_WDS;
+ mgt_set(priv, DOT11_OID_BSSTYPE, &bsstype);
+ mgt_set(priv, OID_INL_CONFIG, &config);
+ mgt_set(priv, OID_INL_MODE, &mode);
+
+ return 0;
+}
+
+/**
+ * prism54_mib_init - fill MIB cache with defaults
+ *
+ * this function initializes the struct given as @mib with defaults,
+ * of which many are retrieved from the global module parameter
+ * variables.
+ */
+
+void
+prism54_mib_init(islpci_private *priv)
+{
+ u32 channel, authen, wep, filter, dot1x, mlme, conformance, power, mode;
+ struct obj_buffer psm_buffer = {
+ .size = PSM_BUFFER_SIZE,
+ .addr = priv->device_psm_buffer
+ };
+
+ channel = CARD_DEFAULT_CHANNEL;
+ authen = CARD_DEFAULT_AUTHEN;
+ wep = CARD_DEFAULT_WEP;
+ filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */
+ dot1x = CARD_DEFAULT_DOT1X;
+ mlme = CARD_DEFAULT_MLME_MODE;
+ conformance = CARD_DEFAULT_CONFORMANCE;
+ power = 127;
+ mode = CARD_DEFAULT_IW_MODE;
+
+ mgt_set(priv, DOT11_OID_CHANNEL, &channel);
+ mgt_set(priv, DOT11_OID_AUTHENABLE, &authen);
+ mgt_set(priv, DOT11_OID_PRIVACYINVOKED, &wep);
+ mgt_set(priv, DOT11_OID_PSMBUFFER, &psm_buffer);
+ mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &filter);
+ mgt_set(priv, DOT11_OID_DOT1XENABLE, &dot1x);
+ mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlme);
+ mgt_set(priv, OID_INL_DOT11D_CONFORMANCE, &conformance);
+ mgt_set(priv, OID_INL_OUTPUTPOWER, &power);
+
+ /* This sets all of the mode-dependent values */
+ prism54_mib_mode_helper(priv, mode);
+}
+
+/* this will be executed outside of atomic context thanks to
+ * schedule_work(), thus we can as well use sleeping semaphore
+ * locking */
+void
+prism54_update_stats(islpci_private *priv)
+{
+ char *data;
+ int j;
+ struct obj_bss bss, *bss2;
+ union oid_res_t r;
+
+ if (down_interruptible(&priv->stats_sem))
+ return;
+
+/* Noise floor.
+ * I'm not sure if the unit is dBm.
+ * Note : If we are not connected, this value seems to be irrelevant. */
+
+ mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
+ priv->local_iwstatistics.qual.noise = r.u;
+
+/* Get the rssi of the link. To do this we need to retrieve a bss. */
+
+ /* First get the MAC address of the AP we are associated with. */
+ mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
+ data = r.ptr;
+
+ /* copy this MAC to the bss */
+ memcpy(bss.address, data, 6);
+ kfree(data);
+
+ /* now ask for the corresponding bss */
+ j = mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r);
+ bss2 = r.ptr;
+ /* report the rssi and use it to calculate
+ * link quality through a signal-noise
+ * ratio */
+ priv->local_iwstatistics.qual.level = bss2->rssi;
+ priv->local_iwstatistics.qual.qual =
+ bss2->rssi - priv->iwstatistics.qual.noise;
+
+ kfree(bss2);
+
+ /* report that the stats are new */
+ priv->local_iwstatistics.qual.updated = 0x7;
+
+/* Rx : unable to decrypt the MPDU */
+ mgt_get_request(priv, DOT11_OID_PRIVRXFAILED, 0, NULL, &r);
+ priv->local_iwstatistics.discard.code = r.u;
+
+/* Tx : Max MAC retries num reached */
+ mgt_get_request(priv, DOT11_OID_MPDUTXFAILED, 0, NULL, &r);
+ priv->local_iwstatistics.discard.retries = r.u;
+
+ up(&priv->stats_sem);
+
+ return;
+}
+
+struct iw_statistics *
+prism54_get_wireless_stats(struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ /* If the stats are being updated return old data */
+ if (down_trylock(&priv->stats_sem) == 0) {
+ memcpy(&priv->iwstatistics, &priv->local_iwstatistics,
+ sizeof (struct iw_statistics));
+ /* They won't be marked updated for the next time */
+ priv->local_iwstatistics.qual.updated = 0;
+ up(&priv->stats_sem);
+ } else
+ priv->iwstatistics.qual.updated = 0;
+
+ /* Update our wireless stats, but do not schedule to often
+ * (max 1 HZ) */
+ if ((priv->stats_timestamp == 0) ||
+ time_after(jiffies, priv->stats_timestamp + 1 * HZ)) {
+ schedule_work(&priv->stats_work);
+ priv->stats_timestamp = jiffies;
+ }
+
+ return &priv->iwstatistics;
+}
+
+static int
+prism54_commit(struct net_device *ndev, struct iw_request_info *info,
+ char *cwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ /* simply re-set the last set SSID, this should commit most stuff */
+
+ /* Commit in Monitor mode is not necessary, also setting essid
+ * in Monitor mode does not make sense and isn't allowed for this
+ * device's firmware */
+ if (priv->iw_mode != IW_MODE_MONITOR)
+ return mgt_set_request(priv, DOT11_OID_SSID, 0, NULL);
+ return 0;
+}
+
+static int
+prism54_get_name(struct net_device *ndev, struct iw_request_info *info,
+ char *cwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ char *capabilities;
+ union oid_res_t r;
+ int rvalue;
+
+ if (islpci_get_state(priv) < PRV_STATE_INIT) {
+ strncpy(cwrq, "NOT READY!", IFNAMSIZ);
+ return 0;
+ }
+ rvalue = mgt_get_request(priv, OID_INL_PHYCAPABILITIES, 0, NULL, &r);
+
+ switch (r.u) {
+ case INL_PHYCAP_5000MHZ:
+ capabilities = "IEEE 802.11a/b/g";
+ break;
+ case INL_PHYCAP_FAA:
+ capabilities = "IEEE 802.11b/g - FAA Support";
+ break;
+ case INL_PHYCAP_2400MHZ:
+ default:
+ capabilities = "IEEE 802.11b/g"; /* Default */
+ break;
+ }
+ strncpy(cwrq, capabilities, IFNAMSIZ);
+ return rvalue;
+}
+
+static int
+prism54_set_freq(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_freq *fwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ int rvalue;
+ u32 c;
+
+ if (fwrq->m < 1000)
+ /* we have a channel number */
+ c = fwrq->m;
+ else
+ c = (fwrq->e == 1) ? channel_of_freq(fwrq->m / 100000) : 0;
+
+ rvalue = c ? mgt_set_request(priv, DOT11_OID_CHANNEL, 0, &c) : -EINVAL;
+
+ /* Call commit handler */
+ return (rvalue ? rvalue : -EINPROGRESS);
+}
+
+static int
+prism54_get_freq(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_freq *fwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ union oid_res_t r;
+ int rvalue;
+
+ rvalue = mgt_get_request(priv, DOT11_OID_CHANNEL, 0, NULL, &r);
+ fwrq->i = r.u;
+ rvalue |= mgt_get_request(priv, DOT11_OID_FREQUENCY, 0, NULL, &r);
+ fwrq->m = r.u;
+ fwrq->e = 3;
+
+ return rvalue;
+}
+
+static int
+prism54_set_mode(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ u32 mlmeautolevel = CARD_DEFAULT_MLME_MODE;
+
+ /* Let's see if the user passed a valid Linux Wireless mode */
+ if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) {
+ printk(KERN_DEBUG
+ "%s: %s() You passed a non-valid init_mode.\n",
+ priv->ndev->name, __FUNCTION__);
+ return -EINVAL;
+ }
+
+ down_write(&priv->mib_sem);
+
+ if (prism54_mib_mode_helper(priv, *uwrq)) {
+ up_write(&priv->mib_sem);
+ return -EOPNOTSUPP;
+ }
+
+ /* the ACL code needs an intermediate mlmeautolevel. The wpa stuff an
+ * extended one.
+ */
+ if ((*uwrq == IW_MODE_MASTER) && (priv->acl.policy != MAC_POLICY_OPEN))
+ mlmeautolevel = DOT11_MLME_INTERMEDIATE;
+ if (priv->wpa)
+ mlmeautolevel = DOT11_MLME_EXTENDED;
+
+ mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel);
+
+ if (mgt_commit(priv)) {
+ up_write(&priv->mib_sem);
+ return -EIO;
+ }
+ priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR)
+ ? priv->monitor_type : ARPHRD_ETHER;
+ up_write(&priv->mib_sem);
+
+ return 0;
+}
+
+/* Use mib cache */
+static int
+prism54_get_mode(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ BUG_ON((priv->iw_mode < IW_MODE_AUTO) || (priv->iw_mode >
+ IW_MODE_MONITOR));
+ *uwrq = priv->iw_mode;
+
+ return 0;
+}
+
+/* we use DOT11_OID_EDTHRESHOLD. From what I guess the card will not try to
+ * emit data if (sensitivity > rssi - noise) (in dBm).
+ * prism54_set_sens does not seem to work.
+ */
+
+static int
+prism54_set_sens(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ u32 sens;
+
+ /* by default the card sets this to 20. */
+ sens = vwrq->disabled ? 20 : vwrq->value;
+
+ return mgt_set_request(priv, DOT11_OID_EDTHRESHOLD, 0, &sens);
+}
+
+static int
+prism54_get_sens(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ union oid_res_t r;
+ int rvalue;
+
+ rvalue = mgt_get_request(priv, DOT11_OID_EDTHRESHOLD, 0, NULL, &r);
+
+ vwrq->value = r.u;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+
+ return rvalue;
+}
+
+static int
+prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct iw_range *range = (struct iw_range *) extra;
+ islpci_private *priv = netdev_priv(ndev);
+ u8 *data;
+ int i, m, rvalue;
+ struct obj_frequencies *freq;
+ union oid_res_t r;
+
+ memset(range, 0, sizeof (struct iw_range));
+ dwrq->length = sizeof (struct iw_range);
+
+ /* set the wireless extension version number */
+ range->we_version_source = SUPPORTED_WIRELESS_EXT;
+ range->we_version_compiled = WIRELESS_EXT;
+
+ /* Now the encoding capabilities */
+ range->num_encoding_sizes = 3;
+ /* 64(40) bits WEP */
+ range->encoding_size[0] = 5;
+ /* 128(104) bits WEP */
+ range->encoding_size[1] = 13;
+ /* 256 bits for WPA-PSK */
+ range->encoding_size[2] = 32;
+ /* 4 keys are allowed */
+ range->max_encoding_tokens = 4;
+
+ /* we don't know the quality range... */
+ range->max_qual.level = 0;
+ range->max_qual.noise = 0;
+ range->max_qual.qual = 0;
+ /* these value describe an average quality. Needs more tweaking... */
+ range->avg_qual.level = -80; /* -80 dBm */
+ range->avg_qual.noise = 0; /* don't know what to put here */
+ range->avg_qual.qual = 0;
+
+ range->sensitivity = 200;
+
+ /* retry limit capabilities */
+ range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = IW_RETRY_LIFETIME;
+
+ /* I don't know the range. Put stupid things here */
+ range->min_retry = 1;
+ range->max_retry = 65535;
+ range->min_r_time = 1024;
+ range->max_r_time = 65535 * 1024;
+
+ /* txpower is supported in dBm's */
+ range->txpower_capa = IW_TXPOW_DBM;
+
+#if WIRELESS_EXT > 16
+ /* Event capability (kernel + driver) */
+ range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+ IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
+ IW_EVENT_CAPA_MASK(SIOCGIWAP));
+ range->event_capa[1] = IW_EVENT_CAPA_K_1;
+ range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
+#endif /* WIRELESS_EXT > 16 */
+
+ if (islpci_get_state(priv) < PRV_STATE_INIT)
+ return 0;
+
+ /* Request the device for the supported frequencies
+ * not really relevant since some devices will report the 5 GHz band
+ * frequencies even if they don't support them.
+ */
+ rvalue =
+ mgt_get_request(priv, DOT11_OID_SUPPORTEDFREQUENCIES, 0, NULL, &r);
+ freq = r.ptr;
+
+ range->num_channels = freq->nr;
+ range->num_frequency = freq->nr;
+
+ m = min(IW_MAX_FREQUENCIES, (int) freq->nr);
+ for (i = 0; i < m; i++) {
+ range->freq[i].m = freq->mhz[i];
+ range->freq[i].e = 6;
+ range->freq[i].i = channel_of_freq(freq->mhz[i]);
+ }
+ kfree(freq);
+
+ rvalue |= mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r);
+ data = r.ptr;
+
+ /* We got an array of char. It is NULL terminated. */
+ i = 0;
+ while ((i < IW_MAX_BITRATES) && (*data != 0)) {
+ /* the result must be in bps. The card gives us 500Kbps */
+ range->bitrate[i] = *data * 500000;
+ i++;
+ data++;
+ }
+ range->num_bitrates = i;
+ kfree(r.ptr);
+
+ return rvalue;
+}
+
+/* Set AP address*/
+
+static int
+prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
+ struct sockaddr *awrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ char bssid[6];
+ int rvalue;
+
+ if (awrq->sa_family != ARPHRD_ETHER)
+ return -EINVAL;
+
+ /* prepare the structure for the set object */
+ memcpy(&bssid[0], awrq->sa_data, 6);
+
+ /* set the bssid -- does this make sense when in AP mode? */
+ rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
+
+ return (rvalue ? rvalue : -EINPROGRESS); /* Call commit handler */
+}
+
+/* get AP address*/
+
+static int
+prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
+ struct sockaddr *awrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ union oid_res_t r;
+ int rvalue;
+
+ rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
+ memcpy(awrq->sa_data, r.ptr, 6);
+ awrq->sa_family = ARPHRD_ETHER;
+ kfree(r.ptr);
+
+ return rvalue;
+}
+
+static int
+prism54_set_scan(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ /* hehe the device does this automagicaly */
+ return 0;
+}
+
+/* a little helper that will translate our data into a card independent
+ * format that the Wireless Tools will understand. This was inspired by
+ * the "Aironet driver for 4500 and 4800 series cards" (GPL)
+ */
+
+static char *
+prism54_translate_bss(struct net_device *ndev, char *current_ev,
+ char *end_buf, struct obj_bss *bss, char noise)
+{
+ struct iw_event iwe; /* Temporary buffer */
+ short cap;
+ islpci_private *priv = netdev_priv(ndev);
+
+ /* The first entry must be the MAC address */
+ memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ iwe.cmd = SIOCGIWAP;
+ current_ev =
+ iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
+
+ /* The following entries will be displayed in the same order we give them */
+
+ /* The ESSID. */
+ iwe.u.data.length = bss->ssid.length;
+ iwe.u.data.flags = 1;
+ iwe.cmd = SIOCGIWESSID;
+ current_ev = iwe_stream_add_point(current_ev, end_buf,
+ &iwe, bss->ssid.octets);
+
+ /* Capabilities */
+#define CAP_ESS 0x01
+#define CAP_IBSS 0x02
+#define CAP_CRYPT 0x10
+
+ /* Mode */
+ cap = bss->capinfo;
+ iwe.u.mode = 0;
+ if (cap & CAP_ESS)
+ iwe.u.mode = IW_MODE_MASTER;
+ else if (cap & CAP_IBSS)
+ iwe.u.mode = IW_MODE_ADHOC;
+ iwe.cmd = SIOCGIWMODE;
+ if (iwe.u.mode)
+ current_ev =
+ iwe_stream_add_event(current_ev, end_buf, &iwe,
+ IW_EV_UINT_LEN);
+
+ /* Encryption capability */
+ if (cap & CAP_CRYPT)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ iwe.cmd = SIOCGIWENCODE;
+ current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, NULL);
+
+ /* Add frequency. (short) bss->channel is the frequency in MHz */
+ iwe.u.freq.m = bss->channel;
+ iwe.u.freq.e = 6;
+ iwe.cmd = SIOCGIWFREQ;
+ current_ev =
+ iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
+
+ /* Add quality statistics */
+ iwe.u.qual.level = bss->rssi;
+ iwe.u.qual.noise = noise;
+ /* do a simple SNR for quality */
+ iwe.u.qual.qual = bss->rssi - noise;
+ iwe.cmd = IWEVQUAL;
+ current_ev =
+ iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
+
+ if (priv->wpa) {
+ u8 wpa_ie[MAX_WPA_IE_LEN];
+ char *buf, *p;
+ size_t wpa_ie_len;
+ int i;
+
+ wpa_ie_len = prism54_wpa_ie_get(priv, bss->address, wpa_ie);
+ if (wpa_ie_len > 0 &&
+ (buf = kmalloc(wpa_ie_len * 2 + 10, GFP_ATOMIC))) {
+ p = buf;
+ p += sprintf(p, "wpa_ie=");
+ for (i = 0; i < wpa_ie_len; i++) {
+ p += sprintf(p, "%02x", wpa_ie[i]);
+ }
+ memset(&iwe, 0, sizeof (iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ current_ev = iwe_stream_add_point(current_ev, end_buf,
+ &iwe, buf);
+ kfree(buf);
+ }
+ }
+ return current_ev;
+}
+
+static int
+prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ int i, rvalue;
+ struct obj_bsslist *bsslist;
+ u32 noise = 0;
+ char *current_ev = extra;
+ union oid_res_t r;
+
+ if (islpci_get_state(priv) < PRV_STATE_INIT) {
+ /* device is not ready, fail gently */
+ dwrq->length = 0;
+ return 0;
+ }
+
+ /* first get the noise value. We will use it to report the link quality */
+ rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
+ noise = r.u;
+
+ /* Ask the device for a list of known bss.
+ * The old API, using SIOCGIWAPLIST, had a hard limit of IW_MAX_AP=64.
+ * The new API, using SIOCGIWSCAN, is only limited by the buffer size.
+ * WE-14->WE-16, the buffer is limited to IW_SCAN_MAX_DATA bytes.
+ * Starting with WE-17, the buffer can be as big as needed.
+ * But the device won't repport anything if you change the value
+ * of IWMAX_BSS=24. */
+
+ rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
+ bsslist = r.ptr;
+
+ /* ok now, scan the list and translate its info */
+ for (i = 0; i < (int) bsslist->nr; i++) {
+ current_ev = prism54_translate_bss(ndev, current_ev,
+ extra + dwrq->length,
+ &(bsslist->bsslist[i]),
+ noise);
+#if WIRELESS_EXT > 16
+ /* Check if there is space for one more entry */
+ if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
+ /* Ask user space to try again with a bigger buffer */
+ rvalue = -E2BIG;
+ break;
+ }
+#endif /* WIRELESS_EXT > 16 */
+ }
+
+ kfree(bsslist);
+ dwrq->length = (current_ev - extra);
+ dwrq->flags = 0; /* todo */
+
+ return rvalue;
+}
+
+static int
+prism54_set_essid(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct obj_ssid essid;
+
+ memset(essid.octets, 0, 33);
+
+ /* Check if we were asked for `any' */
+ if (dwrq->flags && dwrq->length) {
+ if (dwrq->length > min(33, IW_ESSID_MAX_SIZE + 1))
+ return -E2BIG;
+ essid.length = dwrq->length - 1;
+ memcpy(essid.octets, extra, dwrq->length);
+ } else
+ essid.length = 0;
+
+ if (priv->iw_mode != IW_MODE_MONITOR)
+ return mgt_set_request(priv, DOT11_OID_SSID, 0, &essid);
+
+ /* If in monitor mode, just save to mib */
+ mgt_set(priv, DOT11_OID_SSID, &essid);
+ return 0;
+
+}
+
+static int
+prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct obj_ssid *essid;
+ union oid_res_t r;
+ int rvalue;
+
+ rvalue = mgt_get_request(priv, DOT11_OID_SSID, 0, NULL, &r);
+ essid = r.ptr;
+
+ if (essid->length) {
+ dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
+ /* if it is to big, trunk it */
+ dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1);
+ } else {
+ dwrq->flags = 0;
+ dwrq->length = 0;
+ }
+ essid->octets[essid->length] = '\0';
+ memcpy(extra, essid->octets, dwrq->length);
+ kfree(essid);
+
+ return rvalue;
+}
+
+/* Provides no functionality, just completes the ioctl. In essence this is a
+ * just a cosmetic ioctl.
+ */
+static int
+prism54_set_nick(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ if (dwrq->length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ down_write(&priv->mib_sem);
+ memset(priv->nickname, 0, sizeof (priv->nickname));
+ memcpy(priv->nickname, extra, dwrq->length);
+ up_write(&priv->mib_sem);
+
+ return 0;
+}
+
+static int
+prism54_get_nick(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ dwrq->length = 0;
+
+ down_read(&priv->mib_sem);
+ dwrq->length = strlen(priv->nickname) + 1;
+ memcpy(extra, priv->nickname, dwrq->length);
+ up_read(&priv->mib_sem);
+
+ return 0;
+}
+
+/* Set the allowed Bitrates */
+
+static int
+prism54_set_rate(struct net_device *ndev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+
+ islpci_private *priv = netdev_priv(ndev);
+ u32 rate, profile;
+ char *data;
+ int ret, i;
+ union oid_res_t r;
+
+ if (vwrq->value == -1) {
+ /* auto mode. No limit. */
+ profile = 1;
+ return mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile);
+ }
+
+ ret = mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r);
+ if (ret) {
+ kfree(r.ptr);
+ return ret;
+ }
+
+ rate = (u32) (vwrq->value / 500000);
+ data = r.ptr;
+ i = 0;
+
+ while (data[i]) {
+ if (rate && (data[i] == rate)) {
+ break;
+ }
+ if (vwrq->value == i) {
+ break;
+ }
+ data[i] |= 0x80;
+ i++;
+ }
+
+ if (!data[i]) {
+ kfree(r.ptr);
+ return -EINVAL;
+ }
+
+ data[i] |= 0x80;
+ data[i + 1] = 0;
+
+ /* Now, check if we want a fixed or auto value */
+ if (vwrq->fixed) {
+ data[0] = data[i];
+ data[1] = 0;
+ }
+
+/*
+ i = 0;
+ printk("prism54 rate: ");
+ while(data[i]) {
+ printk("%u ", data[i]);
+ i++;
+ }
+ printk("0\n");
+*/
+ profile = -1;
+ ret = mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile);
+ ret |= mgt_set_request(priv, DOT11_OID_EXTENDEDRATES, 0, data);
+ ret |= mgt_set_request(priv, DOT11_OID_RATES, 0, data);
+
+ kfree(r.ptr);
+
+ return ret;
+}
+
+/* Get the current bit rate */
+static int
+prism54_get_rate(struct net_device *ndev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ int rvalue;
+ char *data;
+ union oid_res_t r;
+
+ /* Get the current bit rate */
+ if ((rvalue = mgt_get_request(priv, GEN_OID_LINKSTATE, 0, NULL, &r)))
+ return rvalue;
+ vwrq->value = r.u * 500000;
+
+ /* request the device for the enabled rates */
+ rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r);
+ if (rvalue) {
+ kfree(r.ptr);
+ return rvalue;
+ }
+ data = r.ptr;
+ vwrq->fixed = (data[0] != 0) && (data[1] == 0);
+ kfree(r.ptr);
+
+ return 0;
+}
+
+static int
+prism54_set_rts(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ return mgt_set_request(priv, DOT11_OID_RTSTHRESH, 0, &vwrq->value);
+}
+
+static int
+prism54_get_rts(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ union oid_res_t r;
+ int rvalue;
+
+ /* get the rts threshold */
+ rvalue = mgt_get_request(priv, DOT11_OID_RTSTHRESH, 0, NULL, &r);
+ vwrq->value = r.u;
+
+ return rvalue;
+}
+
+static int
+prism54_set_frag(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ return mgt_set_request(priv, DOT11_OID_FRAGTHRESH, 0, &vwrq->value);
+}
+
+static int
+prism54_get_frag(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ union oid_res_t r;
+ int rvalue;
+
+ rvalue = mgt_get_request(priv, DOT11_OID_FRAGTHRESH, 0, NULL, &r);
+ vwrq->value = r.u;
+
+ return rvalue;
+}
+
+/* Here we have (min,max) = max retries for (small frames, big frames). Where
+ * big frame <=> bigger than the rts threshold
+ * small frame <=> smaller than the rts threshold
+ * This is not really the behavior expected by the wireless tool but it seems
+ * to be a common behavior in other drivers.
+ */
+
+static int
+prism54_set_retry(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ u32 slimit = 0, llimit = 0; /* short and long limit */
+ u32 lifetime = 0;
+ int rvalue = 0;
+
+ if (vwrq->disabled)
+ /* we cannot disable this feature */
+ return -EINVAL;
+
+ if (vwrq->flags & IW_RETRY_LIMIT) {
+ if (vwrq->flags & IW_RETRY_MIN)
+ slimit = vwrq->value;
+ else if (vwrq->flags & IW_RETRY_MAX)
+ llimit = vwrq->value;
+ else {
+ /* we are asked to set both */
+ slimit = vwrq->value;
+ llimit = vwrq->value;
+ }
+ }
+ if (vwrq->flags & IW_RETRY_LIFETIME)
+ /* Wireless tools use us unit while the device uses 1024 us unit */
+ lifetime = vwrq->value / 1024;
+
+ /* now set what is requested */
+ if (slimit)
+ rvalue =
+ mgt_set_request(priv, DOT11_OID_SHORTRETRIES, 0, &slimit);
+ if (llimit)
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_LONGRETRIES, 0, &llimit);
+ if (lifetime)
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_MAXTXLIFETIME, 0,
+ &lifetime);
+ return rvalue;
+}
+
+static int
+prism54_get_retry(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ union oid_res_t r;
+ int rvalue = 0;
+ vwrq->disabled = 0; /* It cannot be disabled */
+
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ /* we are asked for the life time */
+ rvalue =
+ mgt_get_request(priv, DOT11_OID_MAXTXLIFETIME, 0, NULL, &r);
+ vwrq->value = r.u * 1024;
+ vwrq->flags = IW_RETRY_LIFETIME;
+ } else if ((vwrq->flags & IW_RETRY_MAX)) {
+ /* we are asked for the long retry limit */
+ rvalue |=
+ mgt_get_request(priv, DOT11_OID_LONGRETRIES, 0, NULL, &r);
+ vwrq->value = r.u;
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ } else {
+ /* default. get the short retry limit */
+ rvalue |=
+ mgt_get_request(priv, DOT11_OID_SHORTRETRIES, 0, NULL, &r);
+ vwrq->value = r.u;
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
+ }
+
+ return rvalue;
+}
+
+static int
+prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ int rvalue = 0, force = 0;
+ int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
+ union oid_res_t r;
+
+ /* with the new API, it's impossible to get a NULL pointer.
+ * New version of iwconfig set the IW_ENCODE_NOKEY flag
+ * when no key is given, but older versions don't. */
+
+ if (dwrq->length > 0) {
+ /* we have a key to set */
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ int current_index;
+ struct obj_key key = { DOT11_PRIV_WEP, 0, "" };
+
+ /* get the current key index */
+ rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
+ current_index = r.u;
+ /* Verify that the key is not marked as invalid */
+ if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ key.length = dwrq->length > sizeof (key.key) ?
+ sizeof (key.key) : dwrq->length;
+ memcpy(key.key, extra, key.length);
+ if (key.length == 32)
+ /* we want WPA-PSK */
+ key.type = DOT11_PRIV_TKIP;
+ if ((index < 0) || (index > 3))
+ /* no index provided use the current one */
+ index = current_index;
+
+ /* now send the key to the card */
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
+ &key);
+ }
+ /*
+ * If a valid key is set, encryption should be enabled
+ * (user may turn it off later).
+ * This is also how "iwconfig ethX key on" works
+ */
+ if ((index == current_index) && (key.length > 0))
+ force = 1;
+ } else {
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if ((index >= 0) && (index <= 3)) {
+ /* we want to set the key index */
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
+ &index);
+ } else {
+ if (!dwrq->flags & IW_ENCODE_MODE) {
+ /* we cannot do anything. Complain. */
+ return -EINVAL;
+ }
+ }
+ }
+ /* now read the flags */
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+ /* Encoding disabled,
+ * authen = DOT11_AUTH_OS;
+ * invoke = 0;
+ * exunencrypt = 0; */
+ }
+ if (dwrq->flags & IW_ENCODE_OPEN)
+ /* Encode but accept non-encoded packets. No auth */
+ invoke = 1;
+ if ((dwrq->flags & IW_ENCODE_RESTRICTED) || force) {
+ /* Refuse non-encoded packets. Auth */
+ authen = DOT11_AUTH_BOTH;
+ invoke = 1;
+ exunencrypt = 1;
+ }
+ /* do the change if requested */
+ if ((dwrq->flags & IW_ENCODE_MODE) || force) {
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
+ &exunencrypt);
+ }
+ return rvalue;
+}
+
+static int
+prism54_get_encode(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct obj_key *key;
+ u32 devindex, index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ u32 authen = 0, invoke = 0, exunencrypt = 0;
+ int rvalue;
+ union oid_res_t r;
+
+ /* first get the flags */
+ rvalue = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
+ authen = r.u;
+ rvalue |= mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
+ invoke = r.u;
+ rvalue |= mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
+ exunencrypt = r.u;
+
+ if (invoke && (authen == DOT11_AUTH_BOTH) && exunencrypt)
+ dwrq->flags = IW_ENCODE_RESTRICTED;
+ else if ((authen == DOT11_AUTH_OS) && !exunencrypt) {
+ if (invoke)
+ dwrq->flags = IW_ENCODE_OPEN;
+ else
+ dwrq->flags = IW_ENCODE_DISABLED;
+ } else
+ /* The card should not work in this state */
+ dwrq->flags = 0;
+
+ /* get the current device key index */
+ rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
+ devindex = r.u;
+ /* Now get the key, return it */
+ if ((index < 0) || (index > 3))
+ /* no index provided, use the current one */
+ index = devindex;
+ rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYX, index, NULL, &r);
+ key = r.ptr;
+ dwrq->length = key->length;
+ memcpy(extra, key->key, dwrq->length);
+ kfree(key);
+ /* return the used key index */
+ dwrq->flags |= devindex + 1;
+
+ return rvalue;
+}
+
+static int
+prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ union oid_res_t r;
+ int rvalue;
+
+ rvalue = mgt_get_request(priv, OID_INL_OUTPUTPOWER, 0, NULL, &r);
+ /* intersil firmware operates in 0.25 dBm (1/4 dBm) */
+ vwrq->value = (s32) r.u / 4;
+ vwrq->fixed = 1;
+ /* radio is not turned of
+ * btw: how is possible to turn off only the radio
+ */
+ vwrq->disabled = 0;
+
+ return rvalue;
+}
+
+static int
+prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ s32 u = vwrq->value;
+
+ /* intersil firmware operates in 0.25 dBm (1/4) */
+ u *= 4;
+ if (vwrq->disabled) {
+ /* don't know how to disable radio */
+ printk(KERN_DEBUG
+ "%s: %s() disabling radio is not yet supported.\n",
+ priv->ndev->name, __FUNCTION__);
+ return -ENOTSUPP;
+ } else if (vwrq->fixed)
+ /* currently only fixed value is supported */
+ return mgt_set_request(priv, OID_INL_OUTPUTPOWER, 0, &u);
+ else {
+ printk(KERN_DEBUG
+ "%s: %s() auto power will be implemented later.\n",
+ priv->ndev->name, __FUNCTION__);
+ return -ENOTSUPP;
+ }
+}
+
+static int
+prism54_reset(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_reset(netdev_priv(ndev), 0);
+
+ return 0;
+}
+
+static int
+prism54_get_oid(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ union oid_res_t r;
+ int rvalue;
+ enum oid_num_t n = dwrq->flags;
+
+ rvalue = mgt_get_request((islpci_private *) ndev->priv, n, 0, NULL, &r);
+ dwrq->length = mgt_response_to_str(n, &r, extra);
+ if ((isl_oid[n].flags & OID_FLAG_TYPE) != OID_TYPE_U32)
+ kfree(r.ptr);
+ return rvalue;
+}
+
+static int
+prism54_set_u32(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ u32 oid = uwrq[0], u = uwrq[1];
+
+ return mgt_set_request((islpci_private *) ndev->priv, oid, 0, &u);
+}
+
+static int
+prism54_set_raw(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ u32 oid = dwrq->flags;
+
+ return mgt_set_request((islpci_private *) ndev->priv, oid, 0, extra);
+}
+
+void
+prism54_acl_init(struct islpci_acl *acl)
+{
+ sema_init(&acl->sem, 1);
+ INIT_LIST_HEAD(&acl->mac_list);
+ acl->size = 0;
+ acl->policy = MAC_POLICY_OPEN;
+}
+
+static void
+prism54_clear_mac(struct islpci_acl *acl)
+{
+ struct list_head *ptr, *next;
+ struct mac_entry *entry;
+
+ if (down_interruptible(&acl->sem))
+ return;
+
+ if (acl->size == 0) {
+ up(&acl->sem);
+ return;
+ }
+
+ for (ptr = acl->mac_list.next, next = ptr->next;
+ ptr != &acl->mac_list; ptr = next, next = ptr->next) {
+ entry = list_entry(ptr, struct mac_entry, _list);
+ list_del(ptr);
+ kfree(entry);
+ }
+ acl->size = 0;
+ up(&acl->sem);
+}
+
+void
+prism54_acl_clean(struct islpci_acl *acl)
+{
+ prism54_clear_mac(acl);
+}
+
+static int
+prism54_add_mac(struct net_device *ndev, struct iw_request_info *info,
+ struct sockaddr *awrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct islpci_acl *acl = &priv->acl;
+ struct mac_entry *entry;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+
+ if (addr->sa_family != ARPHRD_ETHER)
+ return -EOPNOTSUPP;
+
+ entry = kmalloc(sizeof (struct mac_entry), GFP_KERNEL);
+ if (entry == NULL)
+ return -ENOMEM;
+
+ memcpy(entry->addr, addr->sa_data, ETH_ALEN);
+
+ if (down_interruptible(&acl->sem)) {
+ kfree(entry);
+ return -ERESTARTSYS;
+ }
+ list_add_tail(&entry->_list, &acl->mac_list);
+ acl->size++;
+ up(&acl->sem);
+
+ return 0;
+}
+
+static int
+prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
+ struct sockaddr *awrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct islpci_acl *acl = &priv->acl;
+ struct mac_entry *entry;
+ struct list_head *ptr;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+
+ if (addr->sa_family != ARPHRD_ETHER)
+ return -EOPNOTSUPP;
+
+ if (down_interruptible(&acl->sem))
+ return -ERESTARTSYS;
+ for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
+ entry = list_entry(ptr, struct mac_entry, _list);
+
+ if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
+ list_del(ptr);
+ acl->size--;
+ kfree(entry);
+ up(&acl->sem);
+ return 0;
+ }
+ }
+ up(&acl->sem);
+ return -EINVAL;
+}
+
+static int
+prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct islpci_acl *acl = &priv->acl;
+ struct mac_entry *entry;
+ struct list_head *ptr;
+ struct sockaddr *dst = (struct sockaddr *) extra;
+
+ dwrq->length = 0;
+
+ if (down_interruptible(&acl->sem))
+ return -ERESTARTSYS;
+
+ for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
+ entry = list_entry(ptr, struct mac_entry, _list);
+
+ memcpy(dst->sa_data, entry->addr, ETH_ALEN);
+ dst->sa_family = ARPHRD_ETHER;
+ dwrq->length++;
+ dst++;
+ }
+ up(&acl->sem);
+ return 0;
+}
+
+/* Setting policy also clears the MAC acl, even if we don't change the defaut
+ * policy
+ */
+
+static int
+prism54_set_policy(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct islpci_acl *acl = &priv->acl;
+ u32 mlmeautolevel;
+
+ prism54_clear_mac(acl);
+
+ if ((*uwrq < MAC_POLICY_OPEN) || (*uwrq > MAC_POLICY_REJECT))
+ return -EINVAL;
+
+ down_write(&priv->mib_sem);
+
+ acl->policy = *uwrq;
+
+ /* the ACL code needs an intermediate mlmeautolevel */
+ if ((priv->iw_mode == IW_MODE_MASTER) &&
+ (acl->policy != MAC_POLICY_OPEN))
+ mlmeautolevel = DOT11_MLME_INTERMEDIATE;
+ else
+ mlmeautolevel = CARD_DEFAULT_MLME_MODE;
+ if (priv->wpa)
+ mlmeautolevel = DOT11_MLME_EXTENDED;
+ mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel);
+ /* restart the card with our new policy */
+ if (mgt_commit(priv)) {
+ up_write(&priv->mib_sem);
+ return -EIO;
+ }
+ up_write(&priv->mib_sem);
+
+ return 0;
+}
+
+static int
+prism54_get_policy(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct islpci_acl *acl = &priv->acl;
+
+ *uwrq = acl->policy;
+
+ return 0;
+}
+
+/* Return 1 only if client should be accepted. */
+
+static int
+prism54_mac_accept(struct islpci_acl *acl, char *mac)
+{
+ struct list_head *ptr;
+ struct mac_entry *entry;
+ int res = 0;
+
+ if (down_interruptible(&acl->sem))
+ return -ERESTARTSYS;
+
+ if (acl->policy == MAC_POLICY_OPEN) {
+ up(&acl->sem);
+ return 1;
+ }
+
+ for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
+ entry = list_entry(ptr, struct mac_entry, _list);
+ if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+ res = 1;
+ break;
+ }
+ }
+ res = (acl->policy == MAC_POLICY_ACCEPT) ? !res : res;
+ up(&acl->sem);
+
+ return res;
+}
+
+static int
+prism54_kick_all(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct obj_mlme *mlme;
+ int rvalue;
+
+ mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL);
+ if (mlme == NULL)
+ return -ENOMEM;
+
+ /* Tell the card to kick every client */
+ mlme->id = 0;
+ rvalue =
+ mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme);
+ kfree(mlme);
+
+ return rvalue;
+}
+
+static int
+prism54_kick_mac(struct net_device *ndev, struct iw_request_info *info,
+ struct sockaddr *awrq, char *extra)
+{
+ struct obj_mlme *mlme;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ int rvalue;
+
+ if (addr->sa_family != ARPHRD_ETHER)
+ return -EOPNOTSUPP;
+
+ mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL);
+ if (mlme == NULL)
+ return -ENOMEM;
+
+ /* Tell the card to only kick the corresponding bastard */
+ memcpy(mlme->address, addr->sa_data, ETH_ALEN);
+ mlme->id = -1;
+ rvalue =
+ mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme);
+
+ kfree(mlme);
+
+ return rvalue;
+}
+
+/* Translate a TRAP oid into a wireless event. Called in islpci_mgt_receive. */
+
+static void
+format_event(islpci_private *priv, char *dest, const char *str,
+ const struct obj_mlme *mlme, u16 *length, int error)
+{
+ const u8 *a = mlme->address;
+ int n = snprintf(dest, IW_CUSTOM_MAX,
+ "%s %s %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X %s (%2.2X)",
+ str,
+ ((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"),
+ a[0], a[1], a[2], a[3], a[4], a[5],
+ (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ")
+ : ""), mlme->code);
+ BUG_ON(n > IW_CUSTOM_MAX);
+ *length = n;
+}
+
+static void
+send_formatted_event(islpci_private *priv, const char *str,
+ const struct obj_mlme *mlme, int error)
+{
+ union iwreq_data wrqu;
+ char *memptr;
+
+ memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL);
+ if (!memptr)
+ return;
+ wrqu.data.pointer = memptr;
+ wrqu.data.length = 0;
+ format_event(priv, memptr, str, mlme, &wrqu.data.length,
+ error);
+ wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr);
+ kfree(memptr);
+}
+
+static void
+send_simple_event(islpci_private *priv, const char *str)
+{
+ union iwreq_data wrqu;
+ char *memptr;
+ int n = strlen(str);
+
+ memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL);
+ if (!memptr)
+ return;
+ BUG_ON(n > IW_CUSTOM_MAX);
+ wrqu.data.pointer = memptr;
+ wrqu.data.length = n;
+ strcpy(memptr, str);
+ wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr);
+ kfree(memptr);
+}
+
+static void
+link_changed(struct net_device *ndev, u32 bitrate)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ if (bitrate) {
+ if (priv->iw_mode == IW_MODE_INFRA) {
+ union iwreq_data uwrq;
+ prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq,
+ NULL);
+ wireless_send_event(ndev, SIOCGIWAP, &uwrq, NULL);
+ } else
+ send_simple_event(netdev_priv(ndev),
+ "Link established");
+ } else
+ send_simple_event(netdev_priv(ndev), "Link lost");
+}
+
+/* Beacon/ProbeResp payload header */
+struct ieee80211_beacon_phdr {
+ u8 timestamp[8];
+ u16 beacon_int;
+ u16 capab_info;
+} __attribute__ ((packed));
+
+#define WLAN_EID_GENERIC 0xdd
+static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+static void
+prism54_wpa_ie_add(islpci_private *priv, u8 *bssid,
+ u8 *wpa_ie, size_t wpa_ie_len)
+{
+ struct list_head *ptr;
+ struct islpci_bss_wpa_ie *bss = NULL;
+
+ if (wpa_ie_len > MAX_WPA_IE_LEN)
+ wpa_ie_len = MAX_WPA_IE_LEN;
+
+ if (down_interruptible(&priv->wpa_sem))
+ return;
+
+ /* try to use existing entry */
+ list_for_each(ptr, &priv->bss_wpa_list) {
+ bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
+ if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0) {
+ list_move(&bss->list, &priv->bss_wpa_list);
+ break;
+ }
+ bss = NULL;
+ }
+
+ if (bss == NULL) {
+ /* add a new BSS entry; if max number of entries is already
+ * reached, replace the least recently updated */
+ if (priv->num_bss_wpa >= MAX_BSS_WPA_IE_COUNT) {
+ bss = list_entry(priv->bss_wpa_list.prev,
+ struct islpci_bss_wpa_ie, list);
+ list_del(&bss->list);
+ } else {
+ bss = kmalloc(sizeof (*bss), GFP_ATOMIC);
+ if (bss != NULL) {
+ priv->num_bss_wpa++;
+ memset(bss, 0, sizeof (*bss));
+ }
+ }
+ if (bss != NULL) {
+ memcpy(bss->bssid, bssid, ETH_ALEN);
+ list_add(&bss->list, &priv->bss_wpa_list);
+ }
+ }
+
+ if (bss != NULL) {
+ memcpy(bss->wpa_ie, wpa_ie, wpa_ie_len);
+ bss->wpa_ie_len = wpa_ie_len;
+ bss->last_update = jiffies;
+ } else {
+ printk(KERN_DEBUG "Failed to add BSS WPA entry for " MACSTR
+ "\n", MAC2STR(bssid));
+ }
+
+ /* expire old entries from WPA list */
+ while (priv->num_bss_wpa > 0) {
+ bss = list_entry(priv->bss_wpa_list.prev,
+ struct islpci_bss_wpa_ie, list);
+ if (!time_after(jiffies, bss->last_update + 60 * HZ))
+ break;
+
+ list_del(&bss->list);
+ priv->num_bss_wpa--;
+ kfree(bss);
+ }
+
+ up(&priv->wpa_sem);
+}
+
+static size_t
+prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
+{
+ struct list_head *ptr;
+ struct islpci_bss_wpa_ie *bss = NULL;
+ size_t len = 0;
+
+ if (down_interruptible(&priv->wpa_sem))
+ return 0;
+
+ list_for_each(ptr, &priv->bss_wpa_list) {
+ bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
+ if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0)
+ break;
+ bss = NULL;
+ }
+ if (bss) {
+ len = bss->wpa_ie_len;
+ memcpy(wpa_ie, bss->wpa_ie, len);
+ }
+ up(&priv->wpa_sem);
+
+ return len;
+}
+
+void
+prism54_wpa_ie_init(islpci_private *priv)
+{
+ INIT_LIST_HEAD(&priv->bss_wpa_list);
+ sema_init(&priv->wpa_sem, 1);
+}
+
+void
+prism54_wpa_ie_clean(islpci_private *priv)
+{
+ struct list_head *ptr, *n;
+
+ list_for_each_safe(ptr, n, &priv->bss_wpa_list) {
+ struct islpci_bss_wpa_ie *bss;
+ bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
+ kfree(bss);
+ }
+}
+
+static void
+prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr,
+ u8 *payload, size_t len)
+{
+ struct ieee80211_beacon_phdr *hdr;
+ u8 *pos, *end;
+
+ if (!priv->wpa)
+ return;
+
+ hdr = (struct ieee80211_beacon_phdr *) payload;
+ pos = (u8 *) (hdr + 1);
+ end = payload + len;
+ while (pos < end) {
+ if (pos + 2 + pos[1] > end) {
+ printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed "
+ "for " MACSTR "\n", MAC2STR(addr));
+ return;
+ }
+ if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 &&
+ memcmp(pos + 2, wpa_oid, 4) == 0) {
+ prism54_wpa_ie_add(priv, addr, pos, pos[1] + 2);
+ return;
+ }
+ pos += 2 + pos[1];
+ }
+}
+
+static void
+handle_request(islpci_private *priv, struct obj_mlme *mlme, enum oid_num_t oid)
+{
+ if (((mlme->state == DOT11_STATE_AUTHING) ||
+ (mlme->state == DOT11_STATE_ASSOCING))
+ && mgt_mlme_answer(priv)) {
+ /* Someone is requesting auth and we must respond. Just send back
+ * the trap with error code set accordingly.
+ */
+ mlme->code = prism54_mac_accept(&priv->acl,
+ mlme->address) ? 0 : 1;
+ mgt_set_request(priv, oid, 0, mlme);
+ }
+}
+
+static int
+prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
+ char *data)
+{
+ struct obj_mlme *mlme = (struct obj_mlme *) data;
+ struct obj_mlmeex *mlmeex = (struct obj_mlmeex *) data;
+ struct obj_mlmeex *confirm;
+ u8 wpa_ie[MAX_WPA_IE_LEN];
+ int wpa_ie_len;
+ size_t len = 0; /* u16, better? */
+ u8 *payload = NULL, *pos = NULL;
+ int ret;
+
+ /* I think all trapable objects are listed here.
+ * Some oids have a EX version. The difference is that they are emitted
+ * in DOT11_MLME_EXTENDED mode (set with DOT11_OID_MLMEAUTOLEVEL)
+ * with more info.
+ * The few events already defined by the wireless tools are not really
+ * suited. We use the more flexible custom event facility.
+ */
+
+ if (oid >= DOT11_OID_BEACON) {
+ len = mlmeex->size;
+ payload = pos = mlmeex->data;
+ }
+
+ /* I fear prism54_process_bss_data won't work with big endian data */
+ if ((oid == DOT11_OID_BEACON) || (oid == DOT11_OID_PROBE))
+ prism54_process_bss_data(priv, oid, mlmeex->address,
+ payload, len);
+
+ mgt_le_to_cpu(isl_oid[oid].flags & OID_FLAG_TYPE, (void *) mlme);
+
+ switch (oid) {
+
+ case GEN_OID_LINKSTATE:
+ link_changed(priv->ndev, (u32) *data);
+ break;
+
+ case DOT11_OID_MICFAILURE:
+ send_simple_event(priv, "Mic failure");
+ break;
+
+ case DOT11_OID_DEAUTHENTICATE:
+ send_formatted_event(priv, "DeAuthenticate request", mlme, 0);
+ break;
+
+ case DOT11_OID_AUTHENTICATE:
+ handle_request(priv, mlme, oid);
+ send_formatted_event(priv, "Authenticate request", mlme, 1);
+ break;
+
+ case DOT11_OID_DISASSOCIATE:
+ send_formatted_event(priv, "Disassociate request", mlme, 0);
+ break;
+
+ case DOT11_OID_ASSOCIATE:
+ handle_request(priv, mlme, oid);
+ send_formatted_event(priv, "Associate request", mlme, 1);
+ break;
+
+ case DOT11_OID_REASSOCIATE:
+ handle_request(priv, mlme, oid);
+ send_formatted_event(priv, "ReAssociate request", mlme, 1);
+ break;
+
+ case DOT11_OID_BEACON:
+ send_formatted_event(priv,
+ "Received a beacon from an unkown AP",
+ mlme, 0);
+ break;
+
+ case DOT11_OID_PROBE:
+ /* we received a probe from a client. */
+ send_formatted_event(priv, "Received a probe from client", mlme,
+ 0);
+ break;
+
+ /* Note : "mlme" is actually a "struct obj_mlmeex *" here, but this
+ * is backward compatible layout-wise with "struct obj_mlme".
+ */
+
+ case DOT11_OID_DEAUTHENTICATEEX:
+ send_formatted_event(priv, "DeAuthenticate request", mlme, 0);
+ break;
+
+ case DOT11_OID_AUTHENTICATEEX:
+ handle_request(priv, mlme, oid);
+ send_formatted_event(priv, "Authenticate request (ex)", mlme, 1);
+
+ if (priv->iw_mode != IW_MODE_MASTER
+ && mlmeex->state != DOT11_STATE_AUTHING)
+ break;
+
+ confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC);
+
+ if (!confirm)
+ break;
+
+ memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
+ printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mlmeex->address[0],
+ mlmeex->address[1],
+ mlmeex->address[2],
+ mlmeex->address[3],
+ mlmeex->address[4],
+ mlmeex->address[5]
+ );
+ confirm->id = -1; /* or mlmeex->id ? */
+ confirm->state = 0; /* not used */
+ confirm->code = 0;
+ confirm->size = 6;
+ confirm->data[0] = 0x00;
+ confirm->data[1] = 0x00;
+ confirm->data[2] = 0x02;
+ confirm->data[3] = 0x00;
+ confirm->data[4] = 0x00;
+ confirm->data[5] = 0x00;
+
+ ret = mgt_set_varlen(priv, DOT11_OID_ASSOCIATEEX, confirm, 6);
+
+ kfree(confirm);
+ if (ret)
+ return ret;
+ break;
+
+ case DOT11_OID_DISASSOCIATEEX:
+ send_formatted_event(priv, "Disassociate request (ex)", mlme, 0);
+ break;
+
+ case DOT11_OID_ASSOCIATEEX:
+ handle_request(priv, mlme, oid);
+ send_formatted_event(priv, "Associate request (ex)", mlme, 1);
+
+ if (priv->iw_mode != IW_MODE_MASTER
+ && mlmeex->state != DOT11_STATE_AUTHING)
+ break;
+
+ confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
+
+ if (!confirm)
+ break;
+
+ memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
+
+ confirm->id = ((struct obj_mlmeex *)mlme)->id;
+ confirm->state = 0; /* not used */
+ confirm->code = 0;
+
+ wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie);
+
+ if (!wpa_ie_len) {
+ printk(KERN_DEBUG "No WPA IE found from "
+ "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mlmeex->address[0],
+ mlmeex->address[1],
+ mlmeex->address[2],
+ mlmeex->address[3],
+ mlmeex->address[4],
+ mlmeex->address[5]
+ );
+ kfree(confirm);
+ break;
+ }
+
+ confirm->size = wpa_ie_len;
+ memcpy(&confirm->data, wpa_ie, wpa_ie_len);
+
+ mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
+
+ kfree(confirm);
+
+ break;
+
+ case DOT11_OID_REASSOCIATEEX:
+ handle_request(priv, mlme, oid);
+ send_formatted_event(priv, "Reassociate request (ex)", mlme, 1);
+
+ if (priv->iw_mode != IW_MODE_MASTER
+ && mlmeex->state != DOT11_STATE_ASSOCING)
+ break;
+
+ confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
+
+ if (!confirm)
+ break;
+
+ memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
+
+ confirm->id = mlmeex->id;
+ confirm->state = 0; /* not used */
+ confirm->code = 0;
+
+ wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie);
+
+ if (!wpa_ie_len) {
+ printk(KERN_DEBUG "No WPA IE found from "
+ "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mlmeex->address[0],
+ mlmeex->address[1],
+ mlmeex->address[2],
+ mlmeex->address[3],
+ mlmeex->address[4],
+ mlmeex->address[5]
+ );
+ kfree(confirm);
+ break;
+ }
+
+ confirm->size = wpa_ie_len;
+ memcpy(&confirm->data, wpa_ie, wpa_ie_len);
+
+ mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
+
+ kfree(confirm);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Process a device trap. This is called via schedule_work(), outside of
+ * interrupt context, no locks held.
+ */
+void
+prism54_process_trap(void *data)
+{
+ struct islpci_mgmtframe *frame = data;
+ struct net_device *ndev = frame->ndev;
+ enum oid_num_t n = mgt_oidtonum(frame->header->oid);
+
+ if (n != OID_NUM_LAST)
+ prism54_process_trap_helper(netdev_priv(ndev), n, frame->data);
+ islpci_mgt_release(frame);
+}
+
+int
+prism54_set_mac_address(struct net_device *ndev, void *addr)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ int ret;
+
+ if (ndev->addr_len != 6)
+ return -EINVAL;
+ ret = mgt_set_request(priv, GEN_OID_MACADDRESS, 0,
+ &((struct sockaddr *) addr)->sa_data);
+ if (!ret)
+ memcpy(priv->ndev->dev_addr,
+ &((struct sockaddr *) addr)->sa_data, 6);
+
+ return ret;
+}
+
+/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
+ * support. This is to be replaced with Linux wireless extensions once they
+ * get WPA support. */
+
+/* Note II: please leave all this together as it will be easier to remove later,
+ * once wireless extensions add WPA support -mcgrof */
+
+/* PRISM54_HOSTAPD ioctl() cmd: */
+enum {
+ PRISM2_SET_ENCRYPTION = 6,
+ PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
+ PRISM2_HOSTAPD_MLME = 13,
+ PRISM2_HOSTAPD_SCAN_REQ = 14,
+};
+
+#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
+#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25
+#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26
+
+#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
+#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
+((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
+
+/* Maximum length for algorithm names (-1 for nul termination)
+ * used in ioctl() */
+#define HOSTAP_CRYPT_ALG_NAME_LEN 16
+
+struct prism2_hostapd_param {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ union {
+ struct {
+ u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
+ u32 flags;
+ u32 err;
+ u8 idx;
+ u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+ u16 key_len;
+ u8 key[0];
+ } crypt;
+ struct {
+ u8 len;
+ u8 data[0];
+ } generic_elem;
+ struct {
+#define MLME_STA_DEAUTH 0
+#define MLME_STA_DISASSOC 1
+ u16 cmd;
+ u16 reason_code;
+ } mlme;
+ struct {
+ u8 ssid_len;
+ u8 ssid[32];
+ } scan_req;
+ } u;
+};
+
+
+static int
+prism2_ioctl_set_encryption(struct net_device *dev,
+ struct prism2_hostapd_param *param,
+ int param_len)
+{
+ islpci_private *priv = netdev_priv(dev);
+ int rvalue = 0, force = 0;
+ int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
+ union oid_res_t r;
+
+ /* with the new API, it's impossible to get a NULL pointer.
+ * New version of iwconfig set the IW_ENCODE_NOKEY flag
+ * when no key is given, but older versions don't. */
+
+ if (param->u.crypt.key_len > 0) {
+ /* we have a key to set */
+ int index = param->u.crypt.idx;
+ int current_index;
+ struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
+
+ /* get the current key index */
+ rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
+ current_index = r.u;
+ /* Verify that the key is not marked as invalid */
+ if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
+ key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
+ sizeof (param->u.crypt.key) : param->u.crypt.key_len;
+ memcpy(key.key, param->u.crypt.key, key.length);
+ if (key.length == 32)
+ /* we want WPA-PSK */
+ key.type = DOT11_PRIV_TKIP;
+ if ((index < 0) || (index > 3))
+ /* no index provided use the current one */
+ index = current_index;
+
+ /* now send the key to the card */
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
+ &key);
+ }
+ /*
+ * If a valid key is set, encryption should be enabled
+ * (user may turn it off later).
+ * This is also how "iwconfig ethX key on" works
+ */
+ if ((index == current_index) && (key.length > 0))
+ force = 1;
+ } else {
+ int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
+ if ((index >= 0) && (index <= 3)) {
+ /* we want to set the key index */
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
+ &index);
+ } else {
+ if (!param->u.crypt.flags & IW_ENCODE_MODE) {
+ /* we cannot do anything. Complain. */
+ return -EINVAL;
+ }
+ }
+ }
+ /* now read the flags */
+ if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
+ /* Encoding disabled,
+ * authen = DOT11_AUTH_OS;
+ * invoke = 0;
+ * exunencrypt = 0; */
+ }
+ if (param->u.crypt.flags & IW_ENCODE_OPEN)
+ /* Encode but accept non-encoded packets. No auth */
+ invoke = 1;
+ if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
+ /* Refuse non-encoded packets. Auth */
+ authen = DOT11_AUTH_BOTH;
+ invoke = 1;
+ exunencrypt = 1;
+ }
+ /* do the change if requested */
+ if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
+ rvalue |=
+ mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
+ &exunencrypt);
+ }
+ return rvalue;
+}
+
+static int
+prism2_ioctl_set_generic_element(struct net_device *ndev,
+ struct prism2_hostapd_param *param,
+ int param_len)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ int max_len, len, alen, ret=0;
+ struct obj_attachment *attach;
+
+ len = param->u.generic_elem.len;
+ max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
+ if (max_len < 0 || max_len < len)
+ return -EINVAL;
+
+ alen = sizeof(*attach) + len;
+ attach = kmalloc(alen, GFP_KERNEL);
+ if (attach == NULL)
+ return -ENOMEM;
+
+ memset(attach, 0, alen);
+#define WLAN_FC_TYPE_MGMT 0
+#define WLAN_FC_STYPE_ASSOC_REQ 0
+#define WLAN_FC_STYPE_REASSOC_REQ 2
+
+ /* Note: endianness is covered by mgt_set_varlen */
+
+ attach->type = (WLAN_FC_TYPE_MGMT << 2) |
+ (WLAN_FC_STYPE_ASSOC_REQ << 4);
+ attach->id = -1;
+ attach->size = len;
+ memcpy(attach->data, param->u.generic_elem.data, len);
+
+ ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
+
+ if (ret == 0) {
+ attach->type = (WLAN_FC_TYPE_MGMT << 2) |
+ (WLAN_FC_STYPE_REASSOC_REQ << 4);
+
+ ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
+
+ if (ret == 0)
+ printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
+ ndev->name);
+ }
+
+ kfree(attach);
+ return ret;
+
+}
+
+static int
+prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+prism2_ioctl_scan_req(struct net_device *ndev,
+ struct prism2_hostapd_param *param)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ int i, rvalue;
+ struct obj_bsslist *bsslist;
+ u32 noise = 0;
+ char *extra = "";
+ char *current_ev = "foo";
+ union oid_res_t r;
+
+ if (islpci_get_state(priv) < PRV_STATE_INIT) {
+ /* device is not ready, fail gently */
+ return 0;
+ }
+
+ /* first get the noise value. We will use it to report the link quality */
+ rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
+ noise = r.u;
+
+ /* Ask the device for a list of known bss. We can report at most
+ * IW_MAX_AP=64 to the range struct. But the device won't repport anything
+ * if you change the value of IWMAX_BSS=24.
+ */
+ rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
+ bsslist = r.ptr;
+
+ /* ok now, scan the list and translate its info */
+ for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
+ current_ev = prism54_translate_bss(ndev, current_ev,
+ extra + IW_SCAN_MAX_DATA,
+ &(bsslist->bsslist[i]),
+ noise);
+ kfree(bsslist);
+
+ return rvalue;
+}
+
+static int
+prism54_hostapd(struct net_device *ndev, struct iw_point *p)
+{
+ struct prism2_hostapd_param *param;
+ int ret = 0;
+ u32 uwrq;
+
+ printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
+ if (p->length < sizeof(struct prism2_hostapd_param) ||
+ p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
+ return -EINVAL;
+
+ param = (struct prism2_hostapd_param *) kmalloc(p->length, GFP_KERNEL);
+ if (param == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(param, p->pointer, p->length)) {
+ kfree(param);
+ return -EFAULT;
+ }
+
+ switch (param->cmd) {
+ case PRISM2_SET_ENCRYPTION:
+ printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
+ ndev->name);
+ ret = prism2_ioctl_set_encryption(ndev, param, p->length);
+ break;
+ case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
+ printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
+ ndev->name);
+ ret = prism2_ioctl_set_generic_element(ndev, param,
+ p->length);
+ break;
+ case PRISM2_HOSTAPD_MLME:
+ printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
+ ndev->name);
+ ret = prism2_ioctl_mlme(ndev, param);
+ break;
+ case PRISM2_HOSTAPD_SCAN_REQ:
+ printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
+ ndev->name);
+ ret = prism2_ioctl_scan_req(ndev, param);
+ break;
+ case PRISM54_SET_WPA:
+ printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
+ ndev->name);
+ uwrq = 1;
+ ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
+ break;
+ case PRISM54_DROP_UNENCRYPTED:
+ printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
+ ndev->name);
+#if 0
+ uwrq = 0x01;
+ mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
+ down_write(&priv->mib_sem);
+ mgt_commit(priv);
+ up_write(&priv->mib_sem);
+#endif
+ /* Not necessary, as set_wpa does it, should we just do it here though? */
+ ret = 0;
+ break;
+ default:
+ printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
+ ndev->name);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+
+ kfree(param);
+
+ return ret;
+}
+
+static int
+prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ u32 mlme, authen, dot1x, filter, wep;
+
+ if (islpci_get_state(priv) < PRV_STATE_INIT)
+ return 0;
+
+ wep = 1; /* For privacy invoked */
+ filter = 1; /* Filter out all unencrypted frames */
+ dot1x = 0x01; /* To enable eap filter */
+ mlme = DOT11_MLME_EXTENDED;
+ authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
+
+ down_write(&priv->mib_sem);
+ priv->wpa = *uwrq;
+
+ switch (priv->wpa) {
+ default:
+ case 0: /* Clears/disables WPA and friends */
+ wep = 0;
+ filter = 0; /* Do not filter un-encrypted data */
+ dot1x = 0;
+ mlme = DOT11_MLME_AUTO;
+ printk("%s: Disabling WPA\n", ndev->name);
+ break;
+ case 2:
+ case 1: /* WPA */
+ printk("%s: Enabling WPA\n", ndev->name);
+ break;
+ }
+ up_write(&priv->mib_sem);
+
+ mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
+ mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &wep);
+ mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &filter);
+ mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x);
+ mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlme);
+
+ return 0;
+}
+
+static int
+prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ *uwrq = priv->wpa;
+ return 0;
+}
+
+static int
+prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ priv->monitor_type =
+ (*uwrq ? ARPHRD_IEEE80211_PRISM : ARPHRD_IEEE80211);
+ if (priv->iw_mode == IW_MODE_MONITOR)
+ priv->ndev->type = priv->monitor_type;
+
+ return 0;
+}
+
+static int
+prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ *uwrq = (priv->monitor_type == ARPHRD_IEEE80211_PRISM);
+ return 0;
+}
+
+static int
+prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ priv->priv_oid = *uwrq;
+ printk("%s: oid 0x%08X\n", ndev->name, *uwrq);
+
+ return 0;
+}
+
+static int
+prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct islpci_mgmtframe *response;
+ int ret = -EIO;
+
+ printk("%s: get_oid 0x%08X\n", ndev->name, priv->priv_oid);
+ data->length = 0;
+
+ if (islpci_get_state(priv) >= PRV_STATE_INIT) {
+ ret =
+ islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
+ priv->priv_oid, extra, 256,
+ &response);
+ printk("%s: ret: %i\n", ndev->name, ret);
+ if (ret || !response
+ || response->header->operation == PIMFOR_OP_ERROR) {
+ if (response) {
+ islpci_mgt_release(response);
+ }
+ printk("%s: EIO\n", ndev->name);
+ ret = -EIO;
+ }
+ if (!ret) {
+ data->length = response->header->length;
+ memcpy(extra, response->data, data->length);
+ islpci_mgt_release(response);
+ printk("%s: len: %i\n", ndev->name, data->length);
+ }
+ }
+
+ return ret;
+}
+
+static int
+prism54_debug_set_oid(struct net_device *ndev, struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct islpci_mgmtframe *response;
+ int ret = 0, response_op = PIMFOR_OP_ERROR;
+
+ printk("%s: set_oid 0x%08X\tlen: %d\n", ndev->name, priv->priv_oid,
+ data->length);
+
+ if (islpci_get_state(priv) >= PRV_STATE_INIT) {
+ ret =
+ islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET,
+ priv->priv_oid, extra, data->length,
+ &response);
+ printk("%s: ret: %i\n", ndev->name, ret);
+ if (ret || !response
+ || response->header->operation == PIMFOR_OP_ERROR) {
+ if (response) {
+ islpci_mgt_release(response);
+ }
+ printk("%s: EIO\n", ndev->name);
+ ret = -EIO;
+ }
+ if (!ret) {
+ response_op = response->header->operation;
+ printk("%s: response_op: %i\n", ndev->name,
+ response_op);
+ islpci_mgt_release(response);
+ }
+ }
+
+ return (ret ? ret : -EINPROGRESS);
+}
+
+static int
+prism54_set_spy(struct net_device *ndev,
+ struct iw_request_info *info,
+ union iwreq_data *uwrq, char *extra)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ u32 u, oid = OID_INL_CONFIG;
+
+ down_write(&priv->mib_sem);
+ mgt_get(priv, OID_INL_CONFIG, &u);
+
+ if ((uwrq->data.length == 0) && (priv->spy_data.spy_number > 0))
+ /* disable spy */
+ u &= ~INL_CONFIG_RXANNEX;
+ else if ((uwrq->data.length > 0) && (priv->spy_data.spy_number == 0))
+ /* enable spy */
+ u |= INL_CONFIG_RXANNEX;
+
+ mgt_set(priv, OID_INL_CONFIG, &u);
+ mgt_commit_list(priv, &oid, 1);
+ up_write(&priv->mib_sem);
+
+ return iw_handler_set_spy(ndev, info, uwrq, extra);
+}
+
+static const iw_handler prism54_handler[] = {
+ (iw_handler) prism54_commit, /* SIOCSIWCOMMIT */
+ (iw_handler) prism54_get_name, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) prism54_set_freq, /* SIOCSIWFREQ */
+ (iw_handler) prism54_get_freq, /* SIOCGIWFREQ */
+ (iw_handler) prism54_set_mode, /* SIOCSIWMODE */
+ (iw_handler) prism54_get_mode, /* SIOCGIWMODE */
+ (iw_handler) prism54_set_sens, /* SIOCSIWSENS */
+ (iw_handler) prism54_get_sens, /* SIOCGIWSENS */
+ (iw_handler) NULL, /* SIOCSIWRANGE */
+ (iw_handler) prism54_get_range, /* SIOCGIWRANGE */
+ (iw_handler) NULL, /* SIOCSIWPRIV */
+ (iw_handler) NULL, /* SIOCGIWPRIV */
+ (iw_handler) NULL, /* SIOCSIWSTATS */
+ (iw_handler) NULL, /* SIOCGIWSTATS */
+ prism54_set_spy, /* SIOCSIWSPY */
+ iw_handler_get_spy, /* SIOCGIWSPY */
+ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
+ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
+ (iw_handler) prism54_set_wap, /* SIOCSIWAP */
+ (iw_handler) prism54_get_wap, /* SIOCGIWAP */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* SIOCGIWAPLIST depreciated */
+ (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */
+ (iw_handler) prism54_set_essid, /* SIOCSIWESSID */
+ (iw_handler) prism54_get_essid, /* SIOCGIWESSID */
+ (iw_handler) prism54_set_nick, /* SIOCSIWNICKN */
+ (iw_handler) prism54_get_nick, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) prism54_set_rate, /* SIOCSIWRATE */
+ (iw_handler) prism54_get_rate, /* SIOCGIWRATE */
+ (iw_handler) prism54_set_rts, /* SIOCSIWRTS */
+ (iw_handler) prism54_get_rts, /* SIOCGIWRTS */
+ (iw_handler) prism54_set_frag, /* SIOCSIWFRAG */
+ (iw_handler) prism54_get_frag, /* SIOCGIWFRAG */
+ (iw_handler) prism54_set_txpower, /* SIOCSIWTXPOW */
+ (iw_handler) prism54_get_txpower, /* SIOCGIWTXPOW */
+ (iw_handler) prism54_set_retry, /* SIOCSIWRETRY */
+ (iw_handler) prism54_get_retry, /* SIOCGIWRETRY */
+ (iw_handler) prism54_set_encode, /* SIOCSIWENCODE */
+ (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */
+ (iw_handler) NULL, /* SIOCSIWPOWER */
+ (iw_handler) NULL, /* SIOCGIWPOWER */
+};
+
+/* The low order bit identify a SET (0) or a GET (1) ioctl. */
+
+#define PRISM54_RESET SIOCIWFIRSTPRIV
+#define PRISM54_GET_POLICY SIOCIWFIRSTPRIV+1
+#define PRISM54_SET_POLICY SIOCIWFIRSTPRIV+2
+#define PRISM54_GET_MAC SIOCIWFIRSTPRIV+3
+#define PRISM54_ADD_MAC SIOCIWFIRSTPRIV+4
+
+#define PRISM54_DEL_MAC SIOCIWFIRSTPRIV+6
+
+#define PRISM54_KICK_MAC SIOCIWFIRSTPRIV+8
+
+#define PRISM54_KICK_ALL SIOCIWFIRSTPRIV+10
+
+#define PRISM54_GET_WPA SIOCIWFIRSTPRIV+11
+#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
+
+#define PRISM54_DBG_OID SIOCIWFIRSTPRIV+14
+#define PRISM54_DBG_GET_OID SIOCIWFIRSTPRIV+15
+#define PRISM54_DBG_SET_OID SIOCIWFIRSTPRIV+16
+
+#define PRISM54_GET_OID SIOCIWFIRSTPRIV+17
+#define PRISM54_SET_OID_U32 SIOCIWFIRSTPRIV+18
+#define PRISM54_SET_OID_STR SIOCIWFIRSTPRIV+20
+#define PRISM54_SET_OID_ADDR SIOCIWFIRSTPRIV+22
+
+#define PRISM54_GET_PRISMHDR SIOCIWFIRSTPRIV+23
+#define PRISM54_SET_PRISMHDR SIOCIWFIRSTPRIV+24
+
+#define IWPRIV_SET_U32(n,x) { n, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x }
+#define IWPRIV_SET_SSID(n,x) { n, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x }
+#define IWPRIV_SET_ADDR(n,x) { n, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x }
+#define IWPRIV_GET(n,x) { n, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, "g_"x }
+
+#define IWPRIV_U32(n,x) IWPRIV_SET_U32(n,x), IWPRIV_GET(n,x)
+#define IWPRIV_SSID(n,x) IWPRIV_SET_SSID(n,x), IWPRIV_GET(n,x)
+#define IWPRIV_ADDR(n,x) IWPRIV_SET_ADDR(n,x), IWPRIV_GET(n,x)
+
+/* Note : limited to 128 private ioctls (wireless tools 26) */
+
+static const struct iw_priv_args prism54_private_args[] = {
+/*{ cmd, set_args, get_args, name } */
+ {PRISM54_RESET, 0, 0, "reset"},
+ {PRISM54_GET_PRISMHDR, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_prismhdr"},
+ {PRISM54_SET_PRISMHDR, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
+ "set_prismhdr"},
+ {PRISM54_GET_POLICY, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "getPolicy"},
+ {PRISM54_SET_POLICY, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
+ "setPolicy"},
+ {PRISM54_GET_MAC, 0, IW_PRIV_TYPE_ADDR | 64, "getMac"},
+ {PRISM54_ADD_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,
+ "addMac"},
+ {PRISM54_DEL_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,
+ "delMac"},
+ {PRISM54_KICK_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,
+ "kickMac"},
+ {PRISM54_KICK_ALL, 0, 0, "kickAll"},
+ {PRISM54_GET_WPA, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_wpa"},
+ {PRISM54_SET_WPA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
+ "set_wpa"},
+ {PRISM54_DBG_OID, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
+ "dbg_oid"},
+ {PRISM54_DBG_GET_OID, 0, IW_PRIV_TYPE_BYTE | 256, "dbg_get_oid"},
+ {PRISM54_DBG_SET_OID, IW_PRIV_TYPE_BYTE | 256, 0, "dbg_set_oid"},
+ /* --- sub-ioctls handlers --- */
+ {PRISM54_GET_OID,
+ 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, ""},
+ {PRISM54_SET_OID_U32,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, ""},
+ {PRISM54_SET_OID_STR,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, ""},
+ {PRISM54_SET_OID_ADDR,
+ IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, ""},
+ /* --- sub-ioctls definitions --- */
+ IWPRIV_ADDR(GEN_OID_MACADDRESS, "addr"),
+ IWPRIV_GET(GEN_OID_LINKSTATE, "linkstate"),
+ IWPRIV_U32(DOT11_OID_BSSTYPE, "bsstype"),
+ IWPRIV_ADDR(DOT11_OID_BSSID, "bssid"),
+ IWPRIV_U32(DOT11_OID_STATE, "state"),
+ IWPRIV_U32(DOT11_OID_AID, "aid"),
+
+ IWPRIV_SSID(DOT11_OID_SSIDOVERRIDE, "ssidoverride"),
+
+ IWPRIV_U32(DOT11_OID_MEDIUMLIMIT, "medlimit"),
+ IWPRIV_U32(DOT11_OID_BEACONPERIOD, "beacon"),
+ IWPRIV_U32(DOT11_OID_DTIMPERIOD, "dtimperiod"),
+
+ IWPRIV_U32(DOT11_OID_AUTHENABLE, "authenable"),
+ IWPRIV_U32(DOT11_OID_PRIVACYINVOKED, "privinvok"),
+ IWPRIV_U32(DOT11_OID_EXUNENCRYPTED, "exunencrypt"),
+
+ IWPRIV_U32(DOT11_OID_REKEYTHRESHOLD, "rekeythresh"),
+
+ IWPRIV_U32(DOT11_OID_MAXTXLIFETIME, "maxtxlife"),
+ IWPRIV_U32(DOT11_OID_MAXRXLIFETIME, "maxrxlife"),
+ IWPRIV_U32(DOT11_OID_ALOFT_FIXEDRATE, "fixedrate"),
+ IWPRIV_U32(DOT11_OID_MAXFRAMEBURST, "frameburst"),
+ IWPRIV_U32(DOT11_OID_PSM, "psm"),
+
+ IWPRIV_U32(DOT11_OID_BRIDGELOCAL, "bridge"),
+ IWPRIV_U32(DOT11_OID_CLIENTS, "clients"),
+ IWPRIV_U32(DOT11_OID_CLIENTSASSOCIATED, "clientassoc"),
+ IWPRIV_U32(DOT11_OID_DOT1XENABLE, "dot1xenable"),
+ IWPRIV_U32(DOT11_OID_ANTENNARX, "rxant"),
+ IWPRIV_U32(DOT11_OID_ANTENNATX, "txant"),
+ IWPRIV_U32(DOT11_OID_ANTENNADIVERSITY, "antdivers"),
+ IWPRIV_U32(DOT11_OID_EDTHRESHOLD, "edthresh"),
+ IWPRIV_U32(DOT11_OID_PREAMBLESETTINGS, "preamble"),
+ IWPRIV_GET(DOT11_OID_RATES, "rates"),
+ IWPRIV_U32(DOT11_OID_OUTPUTPOWER, ".11outpower"),
+ IWPRIV_GET(DOT11_OID_SUPPORTEDRATES, "supprates"),
+ IWPRIV_GET(DOT11_OID_SUPPORTEDFREQUENCIES, "suppfreq"),
+
+ IWPRIV_U32(DOT11_OID_NOISEFLOOR, "noisefloor"),
+ IWPRIV_GET(DOT11_OID_FREQUENCYACTIVITY, "freqactivity"),
+ IWPRIV_U32(DOT11_OID_NONERPPROTECTION, "nonerpprotec"),
+ IWPRIV_U32(DOT11_OID_PROFILES, "profile"),
+ IWPRIV_GET(DOT11_OID_EXTENDEDRATES, "extrates"),
+ IWPRIV_U32(DOT11_OID_MLMEAUTOLEVEL, "mlmelevel"),
+
+ IWPRIV_GET(DOT11_OID_BSSS, "bsss"),
+ IWPRIV_GET(DOT11_OID_BSSLIST, "bsslist"),
+ IWPRIV_U32(OID_INL_MODE, "mode"),
+ IWPRIV_U32(OID_INL_CONFIG, "config"),
+ IWPRIV_U32(OID_INL_DOT11D_CONFORMANCE, ".11dconform"),
+ IWPRIV_GET(OID_INL_PHYCAPABILITIES, "phycapa"),
+ IWPRIV_U32(OID_INL_OUTPUTPOWER, "outpower"),
+};
+
+static const iw_handler prism54_private_handler[] = {
+ (iw_handler) prism54_reset,
+ (iw_handler) prism54_get_policy,
+ (iw_handler) prism54_set_policy,
+ (iw_handler) prism54_get_mac,
+ (iw_handler) prism54_add_mac,
+ (iw_handler) NULL,
+ (iw_handler) prism54_del_mac,
+ (iw_handler) NULL,
+ (iw_handler) prism54_kick_mac,
+ (iw_handler) NULL,
+ (iw_handler) prism54_kick_all,
+ (iw_handler) prism54_get_wpa,
+ (iw_handler) prism54_set_wpa,
+ (iw_handler) NULL,
+ (iw_handler) prism54_debug_oid,
+ (iw_handler) prism54_debug_get_oid,
+ (iw_handler) prism54_debug_set_oid,
+ (iw_handler) prism54_get_oid,
+ (iw_handler) prism54_set_u32,
+ (iw_handler) NULL,
+ (iw_handler) prism54_set_raw,
+ (iw_handler) NULL,
+ (iw_handler) prism54_set_raw,
+ (iw_handler) prism54_get_prismhdr,
+ (iw_handler) prism54_set_prismhdr,
+};
+
+const struct iw_handler_def prism54_handler_def = {
+ .num_standard = sizeof (prism54_handler) / sizeof (iw_handler),
+ .num_private = sizeof (prism54_private_handler) / sizeof (iw_handler),
+ .num_private_args =
+ sizeof (prism54_private_args) / sizeof (struct iw_priv_args),
+ .standard = (iw_handler *) prism54_handler,
+ .private = (iw_handler *) prism54_private_handler,
+ .private_args = (struct iw_priv_args *) prism54_private_args,
+#if WIRELESS_EXT == 16
+ .spy_offset = offsetof(islpci_private, spy_data),
+#endif /* WIRELESS_EXT == 16 */
+};
+
+/* For wpa_supplicant */
+
+int
+prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *) rq;
+ int ret = -1;
+ switch (cmd) {
+ case PRISM54_HOSTAPD:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ ret = prism54_hostapd(ndev, &wrq->u.data);
+ return ret;
+ }
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
new file mode 100644
index 000000000000..46d5cde80c85
--- /dev/null
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * (C) 2003 Aurelien Alleaume <slts@free.fr>
+ * (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ISL_IOCTL_H
+#define _ISL_IOCTL_H
+
+#include "islpci_mgt.h"
+#include "islpci_dev.h"
+
+#include <net/iw_handler.h> /* New driver API */
+
+#define SUPPORTED_WIRELESS_EXT 16
+
+void prism54_mib_init(islpci_private *);
+
+struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
+void prism54_update_stats(islpci_private *);
+
+void prism54_acl_init(struct islpci_acl *);
+void prism54_acl_clean(struct islpci_acl *);
+
+void prism54_process_trap(void *);
+
+void prism54_wpa_ie_init(islpci_private *priv);
+void prism54_wpa_ie_clean(islpci_private *priv);
+
+int prism54_set_mac_address(struct net_device *, void *);
+
+int prism54_ioctl(struct net_device *, struct ifreq *, int);
+
+extern const struct iw_handler_def prism54_handler_def;
+
+#endif /* _ISL_IOCTL_H */
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
new file mode 100644
index 000000000000..419edf7ccf1a
--- /dev/null
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -0,0 +1,507 @@
+/*
+ *
+ *
+ * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
+ * Copyright (C) 2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#if !defined(_ISL_OID_H)
+#define _ISL_OID_H
+
+/*
+ * MIB related constant and structure definitions for communicating
+ * with the device firmware
+ */
+
+struct obj_ssid {
+ u8 length;
+ char octets[33];
+} __attribute__ ((packed));
+
+struct obj_key {
+ u8 type; /* dot11_priv_t */
+ u8 length;
+ char key[32];
+} __attribute__ ((packed));
+
+struct obj_mlme {
+ u8 address[6];
+ u16 id;
+ u16 state;
+ u16 code;
+} __attribute__ ((packed));
+
+struct obj_mlmeex {
+ u8 address[6];
+ u16 id;
+ u16 state;
+ u16 code;
+ u16 size;
+ u8 data[0];
+} __attribute__ ((packed));
+
+struct obj_buffer {
+ u32 size;
+ u32 addr; /* 32bit bus address */
+} __attribute__ ((packed));
+
+struct obj_bss {
+ u8 address[6];
+ int:16; /* padding */
+
+ char state;
+ char reserved;
+ short age;
+
+ char quality;
+ char rssi;
+
+ struct obj_ssid ssid;
+ short channel;
+ char beacon_period;
+ char dtim_period;
+ short capinfo;
+ short rates;
+ short basic_rates;
+ int:16; /* padding */
+} __attribute__ ((packed));
+
+struct obj_bsslist {
+ u32 nr;
+ struct obj_bss bsslist[0];
+} __attribute__ ((packed));
+
+struct obj_frequencies {
+ u16 nr;
+ u16 mhz[0];
+} __attribute__ ((packed));
+
+struct obj_attachment {
+ char type;
+ char reserved;
+ short id;
+ short size;
+ char data[0];
+} __attribute__((packed));
+
+/*
+ * in case everything's ok, the inlined function below will be
+ * optimized away by the compiler...
+ */
+static inline void
+__bug_on_wrong_struct_sizes(void)
+{
+ BUG_ON(sizeof (struct obj_ssid) != 34);
+ BUG_ON(sizeof (struct obj_key) != 34);
+ BUG_ON(sizeof (struct obj_mlme) != 12);
+ BUG_ON(sizeof (struct obj_mlmeex) != 14);
+ BUG_ON(sizeof (struct obj_buffer) != 8);
+ BUG_ON(sizeof (struct obj_bss) != 60);
+ BUG_ON(sizeof (struct obj_bsslist) != 4);
+ BUG_ON(sizeof (struct obj_frequencies) != 2);
+}
+
+enum dot11_state_t {
+ DOT11_STATE_NONE = 0,
+ DOT11_STATE_AUTHING = 1,
+ DOT11_STATE_AUTH = 2,
+ DOT11_STATE_ASSOCING = 3,
+
+ DOT11_STATE_ASSOC = 5,
+ DOT11_STATE_IBSS = 6,
+ DOT11_STATE_WDS = 7
+};
+
+enum dot11_bsstype_t {
+ DOT11_BSSTYPE_NONE = 0,
+ DOT11_BSSTYPE_INFRA = 1,
+ DOT11_BSSTYPE_IBSS = 2,
+ DOT11_BSSTYPE_ANY = 3
+};
+
+enum dot11_auth_t {
+ DOT11_AUTH_NONE = 0,
+ DOT11_AUTH_OS = 1,
+ DOT11_AUTH_SK = 2,
+ DOT11_AUTH_BOTH = 3
+};
+
+enum dot11_mlme_t {
+ DOT11_MLME_AUTO = 0,
+ DOT11_MLME_INTERMEDIATE = 1,
+ DOT11_MLME_EXTENDED = 2
+};
+
+enum dot11_priv_t {
+ DOT11_PRIV_WEP = 0,
+ DOT11_PRIV_TKIP = 1
+};
+
+/* Prism "Nitro" / Frameburst / "Packet Frame Grouping"
+ * Value is in microseconds. Represents the # microseconds
+ * the firmware will take to group frames before sending out then out
+ * together with a CSMA contention. Without this all frames are
+ * sent with a CSMA contention.
+ * Bibliography:
+ * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html
+ */
+enum dot11_maxframeburst_t {
+ /* Values for DOT11_OID_MAXFRAMEBURST */
+ DOT11_MAXFRAMEBURST_OFF = 0, /* Card firmware default */
+ DOT11_MAXFRAMEBURST_MIXED_SAFE = 650, /* 802.11 a,b,g safe */
+ DOT11_MAXFRAMEBURST_IDEAL = 1300, /* Theoretical ideal level */
+ DOT11_MAXFRAMEBURST_MAX = 5000, /* Use this as max,
+ * Note: firmware allows for greater values. This is a
+ * recommended max. I'll update this as I find
+ * out what the real MAX is. Also note that you don't necessarily
+ * get better results with a greater value here.
+ */
+};
+
+/* Support for 802.11 long and short frame preambles.
+ * Long preamble uses 128-bit sync field, 8-bit CRC
+ * Short preamble uses 56-bit sync field, 16-bit CRC
+ *
+ * 802.11a -- not sure, both optionally ?
+ * 802.11b supports long and optionally short
+ * 802.11g supports both */
+enum dot11_preamblesettings_t {
+ DOT11_PREAMBLESETTING_LONG = 0,
+ /* Allows *only* long 802.11 preambles */
+ DOT11_PREAMBLESETTING_SHORT = 1,
+ /* Allows *only* short 802.11 preambles */
+ DOT11_PREAMBLESETTING_DYNAMIC = 2
+ /* AutomatiGically set */
+};
+
+/* Support for 802.11 slot timing (time between packets).
+ *
+ * Long uses 802.11a slot timing (9 usec ?)
+ * Short uses 802.11b slot timing (20 use ?) */
+enum dot11_slotsettings_t {
+ DOT11_SLOTSETTINGS_LONG = 0,
+ /* Allows *only* long 802.11b slot timing */
+ DOT11_SLOTSETTINGS_SHORT = 1,
+ /* Allows *only* long 802.11a slot timing */
+ DOT11_SLOTSETTINGS_DYNAMIC = 2
+ /* AutomatiGically set */
+};
+
+/* All you need to know, ERP is "Extended Rate PHY".
+ * An Extended Rate PHY (ERP) STA or AP shall support three different
+ * preamble and header formats:
+ * Long preamble (refer to above)
+ * Short preamble (refer to above)
+ * OFDM preamble ( ? )
+ *
+ * I'm assuming here Protection tells the AP
+ * to be careful, a STA which cannot handle the long pre-amble
+ * has joined.
+ */
+enum do11_nonerpstatus_t {
+ DOT11_ERPSTAT_NONEPRESENT = 0,
+ DOT11_ERPSTAT_USEPROTECTION = 1
+};
+
+/* (ERP is "Extended Rate PHY") Way to read NONERP is NON-ERP-*
+ * The key here is DOT11 NON ERP NEVER protects against
+ * NON ERP STA's. You *don't* want this unless
+ * you know what you are doing. It means you will only
+ * get Extended Rate capabilities */
+enum dot11_nonerpprotection_t {
+ DOT11_NONERP_NEVER = 0,
+ DOT11_NONERP_ALWAYS = 1,
+ DOT11_NONERP_DYNAMIC = 2
+};
+
+/* Preset OID configuration for 802.11 modes
+ * Note: DOT11_OID_CW[MIN|MAX] hold the values of the
+ * DCS MIN|MAX backoff used */
+enum dot11_profile_t { /* And set/allowed values */
+ /* Allowed values for DOT11_OID_PROFILES */
+ DOT11_PROFILE_B_ONLY = 0,
+ /* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps
+ * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC
+ * DOT11_OID_CWMIN: 31
+ * DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC
+ * DOT11_OID_SLOTSETTINGS: DOT11_SLOTSETTINGS_LONG
+ */
+ DOT11_PROFILE_MIXED_G_WIFI = 1,
+ /* DOT11_OID_RATES: 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54Mbs
+ * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC
+ * DOT11_OID_CWMIN: 15
+ * DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC
+ * DOT11_OID_SLOTSETTINGS: DOT11_SLOTSETTINGS_DYNAMIC
+ */
+ DOT11_PROFILE_MIXED_LONG = 2, /* "Long range" */
+ /* Same as Profile MIXED_G_WIFI */
+ DOT11_PROFILE_G_ONLY = 3,
+ /* Same as Profile MIXED_G_WIFI */
+ DOT11_PROFILE_TEST = 4,
+ /* Same as Profile MIXED_G_WIFI except:
+ * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_SHORT
+ * DOT11_OID_NONEPROTECTION: DOT11_NOERP_NEVER
+ * DOT11_OID_SLOTSETTINGS: DOT11_SLOTSETTINGS_SHORT
+ */
+ DOT11_PROFILE_B_WIFI = 5,
+ /* Same as Profile B_ONLY */
+ DOT11_PROFILE_A_ONLY = 6,
+ /* Same as Profile MIXED_G_WIFI except:
+ * DOT11_OID_RATES: 6, 9, 12, 18, 24, 36, 48, 54Mbs
+ */
+ DOT11_PROFILE_MIXED_SHORT = 7
+ /* Same as MIXED_G_WIFI */
+};
+
+
+/* The dot11d conformance level configures the 802.11d conformance levels.
+ * The following conformance levels exist:*/
+enum oid_inl_conformance_t {
+ OID_INL_CONFORMANCE_NONE = 0, /* Perform active scanning */
+ OID_INL_CONFORMANCE_STRICT = 1, /* Strictly adhere to 802.11d */
+ OID_INL_CONFORMANCE_FLEXIBLE = 2, /* Use passed 802.11d info to
+ * determine channel AND/OR just make assumption that active
+ * channels are valid channels */
+};
+
+enum oid_inl_mode_t {
+ INL_MODE_NONE = -1,
+ INL_MODE_PROMISCUOUS = 0,
+ INL_MODE_CLIENT = 1,
+ INL_MODE_AP = 2,
+ INL_MODE_SNIFFER = 3
+};
+
+enum oid_inl_config_t {
+ INL_CONFIG_NOTHING = 0x00,
+ INL_CONFIG_MANUALRUN = 0x01,
+ INL_CONFIG_FRAMETRAP = 0x02,
+ INL_CONFIG_RXANNEX = 0x04,
+ INL_CONFIG_TXANNEX = 0x08,
+ INL_CONFIG_WDS = 0x10
+};
+
+enum oid_inl_phycap_t {
+ INL_PHYCAP_2400MHZ = 1,
+ INL_PHYCAP_5000MHZ = 2,
+ INL_PHYCAP_FAA = 0x80000000, /* Means card supports the FAA switch */
+};
+
+
+enum oid_num_t {
+ GEN_OID_MACADDRESS = 0,
+ GEN_OID_LINKSTATE,
+ GEN_OID_WATCHDOG,
+ GEN_OID_MIBOP,
+ GEN_OID_OPTIONS,
+ GEN_OID_LEDCONFIG,
+
+ /* 802.11 */
+ DOT11_OID_BSSTYPE,
+ DOT11_OID_BSSID,
+ DOT11_OID_SSID,
+ DOT11_OID_STATE,
+ DOT11_OID_AID,
+ DOT11_OID_COUNTRYSTRING,
+ DOT11_OID_SSIDOVERRIDE,
+
+ DOT11_OID_MEDIUMLIMIT,
+ DOT11_OID_BEACONPERIOD,
+ DOT11_OID_DTIMPERIOD,
+ DOT11_OID_ATIMWINDOW,
+ DOT11_OID_LISTENINTERVAL,
+ DOT11_OID_CFPPERIOD,
+ DOT11_OID_CFPDURATION,
+
+ DOT11_OID_AUTHENABLE,
+ DOT11_OID_PRIVACYINVOKED,
+ DOT11_OID_EXUNENCRYPTED,
+ DOT11_OID_DEFKEYID,
+ DOT11_OID_DEFKEYX, /* DOT11_OID_DEFKEY1,...DOT11_OID_DEFKEY4 */
+ DOT11_OID_STAKEY,
+ DOT11_OID_REKEYTHRESHOLD,
+ DOT11_OID_STASC,
+
+ DOT11_OID_PRIVTXREJECTED,
+ DOT11_OID_PRIVRXPLAIN,
+ DOT11_OID_PRIVRXFAILED,
+ DOT11_OID_PRIVRXNOKEY,
+
+ DOT11_OID_RTSTHRESH,
+ DOT11_OID_FRAGTHRESH,
+ DOT11_OID_SHORTRETRIES,
+ DOT11_OID_LONGRETRIES,
+ DOT11_OID_MAXTXLIFETIME,
+ DOT11_OID_MAXRXLIFETIME,
+ DOT11_OID_AUTHRESPTIMEOUT,
+ DOT11_OID_ASSOCRESPTIMEOUT,
+
+ DOT11_OID_ALOFT_TABLE,
+ DOT11_OID_ALOFT_CTRL_TABLE,
+ DOT11_OID_ALOFT_RETREAT,
+ DOT11_OID_ALOFT_PROGRESS,
+ DOT11_OID_ALOFT_FIXEDRATE,
+ DOT11_OID_ALOFT_RSSIGRAPH,
+ DOT11_OID_ALOFT_CONFIG,
+
+ DOT11_OID_VDCFX,
+ DOT11_OID_MAXFRAMEBURST,
+
+ DOT11_OID_PSM,
+ DOT11_OID_CAMTIMEOUT,
+ DOT11_OID_RECEIVEDTIMS,
+ DOT11_OID_ROAMPREFERENCE,
+
+ DOT11_OID_BRIDGELOCAL,
+ DOT11_OID_CLIENTS,
+ DOT11_OID_CLIENTSASSOCIATED,
+ DOT11_OID_CLIENTX, /* DOT11_OID_CLIENTX,...DOT11_OID_CLIENT2007 */
+
+ DOT11_OID_CLIENTFIND,
+ DOT11_OID_WDSLINKADD,
+ DOT11_OID_WDSLINKREMOVE,
+ DOT11_OID_EAPAUTHSTA,
+ DOT11_OID_EAPUNAUTHSTA,
+ DOT11_OID_DOT1XENABLE,
+ DOT11_OID_MICFAILURE,
+ DOT11_OID_REKEYINDICATE,
+
+ DOT11_OID_MPDUTXSUCCESSFUL,
+ DOT11_OID_MPDUTXONERETRY,
+ DOT11_OID_MPDUTXMULTIPLERETRIES,
+ DOT11_OID_MPDUTXFAILED,
+ DOT11_OID_MPDURXSUCCESSFUL,
+ DOT11_OID_MPDURXDUPS,
+ DOT11_OID_RTSSUCCESSFUL,
+ DOT11_OID_RTSFAILED,
+ DOT11_OID_ACKFAILED,
+ DOT11_OID_FRAMERECEIVES,
+ DOT11_OID_FRAMEERRORS,
+ DOT11_OID_FRAMEABORTS,
+ DOT11_OID_FRAMEABORTSPHY,
+
+ DOT11_OID_SLOTTIME,
+ DOT11_OID_CWMIN, /* MIN DCS backoff */
+ DOT11_OID_CWMAX, /* MAX DCS backoff */
+ DOT11_OID_ACKWINDOW,
+ DOT11_OID_ANTENNARX,
+ DOT11_OID_ANTENNATX,
+ DOT11_OID_ANTENNADIVERSITY,
+ DOT11_OID_CHANNEL,
+ DOT11_OID_EDTHRESHOLD,
+ DOT11_OID_PREAMBLESETTINGS,
+ DOT11_OID_RATES,
+ DOT11_OID_CCAMODESUPPORTED,
+ DOT11_OID_CCAMODE,
+ DOT11_OID_RSSIVECTOR,
+ DOT11_OID_OUTPUTPOWERTABLE,
+ DOT11_OID_OUTPUTPOWER,
+ DOT11_OID_SUPPORTEDRATES,
+ DOT11_OID_FREQUENCY,
+ DOT11_OID_SUPPORTEDFREQUENCIES,
+ DOT11_OID_NOISEFLOOR,
+ DOT11_OID_FREQUENCYACTIVITY,
+ DOT11_OID_IQCALIBRATIONTABLE,
+ DOT11_OID_NONERPPROTECTION,
+ DOT11_OID_SLOTSETTINGS,
+ DOT11_OID_NONERPTIMEOUT,
+ DOT11_OID_PROFILES,
+ DOT11_OID_EXTENDEDRATES,
+
+ DOT11_OID_DEAUTHENTICATE,
+ DOT11_OID_AUTHENTICATE,
+ DOT11_OID_DISASSOCIATE,
+ DOT11_OID_ASSOCIATE,
+ DOT11_OID_SCAN,
+ DOT11_OID_BEACON,
+ DOT11_OID_PROBE,
+ DOT11_OID_DEAUTHENTICATEEX,
+ DOT11_OID_AUTHENTICATEEX,
+ DOT11_OID_DISASSOCIATEEX,
+ DOT11_OID_ASSOCIATEEX,
+ DOT11_OID_REASSOCIATE,
+ DOT11_OID_REASSOCIATEEX,
+
+ DOT11_OID_NONERPSTATUS,
+
+ DOT11_OID_STATIMEOUT,
+ DOT11_OID_MLMEAUTOLEVEL,
+ DOT11_OID_BSSTIMEOUT,
+ DOT11_OID_ATTACHMENT,
+ DOT11_OID_PSMBUFFER,
+
+ DOT11_OID_BSSS,
+ DOT11_OID_BSSX, /*DOT11_OID_BSS1,...,DOT11_OID_BSS64 */
+ DOT11_OID_BSSFIND,
+ DOT11_OID_BSSLIST,
+
+ OID_INL_TUNNEL,
+ OID_INL_MEMADDR,
+ OID_INL_MEMORY,
+ OID_INL_MODE,
+ OID_INL_COMPONENT_NR,
+ OID_INL_VERSION,
+ OID_INL_INTERFACE_ID,
+ OID_INL_COMPONENT_ID,
+ OID_INL_CONFIG,
+ OID_INL_DOT11D_CONFORMANCE,
+ OID_INL_PHYCAPABILITIES,
+ OID_INL_OUTPUTPOWER,
+
+ OID_NUM_LAST
+};
+
+#define OID_FLAG_CACHED 0x80
+#define OID_FLAG_TYPE 0x7f
+
+#define OID_TYPE_U32 0x01
+#define OID_TYPE_SSID 0x02
+#define OID_TYPE_KEY 0x03
+#define OID_TYPE_BUFFER 0x04
+#define OID_TYPE_BSS 0x05
+#define OID_TYPE_BSSLIST 0x06
+#define OID_TYPE_FREQUENCIES 0x07
+#define OID_TYPE_MLME 0x08
+#define OID_TYPE_MLMEEX 0x09
+#define OID_TYPE_ADDR 0x0A
+#define OID_TYPE_RAW 0x0B
+#define OID_TYPE_ATTACH 0x0C
+
+/* OID_TYPE_MLMEEX is special because of a variable size field when sending.
+ * Not yet implemented (not used in driver anyway).
+ */
+
+struct oid_t {
+ enum oid_num_t oid;
+ short range; /* to define a range of oid */
+ short size; /* max size of the associated data */
+ char flags;
+};
+
+union oid_res_t {
+ void *ptr;
+ u32 u;
+};
+
+#define IWMAX_BITRATES 20
+#define IWMAX_BSS 24
+#define IWMAX_FREQ 30
+#define PRIV_STR_SIZE 1024
+
+#endif /* !defined(_ISL_OID_H) */
+/* EOF */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
new file mode 100644
index 000000000000..efab07e9e24e
--- /dev/null
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -0,0 +1,956 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
+ * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/if_arp.h>
+
+#include <asm/io.h>
+
+#include "prismcompat.h"
+#include "isl_38xx.h"
+#include "isl_ioctl.h"
+#include "islpci_dev.h"
+#include "islpci_mgt.h"
+#include "islpci_eth.h"
+#include "oid_mgt.h"
+
+#define ISL3877_IMAGE_FILE "isl3877"
+#define ISL3886_IMAGE_FILE "isl3886"
+#define ISL3890_IMAGE_FILE "isl3890"
+
+static int prism54_bring_down(islpci_private *);
+static int islpci_alloc_memory(islpci_private *);
+static struct net_device_stats *islpci_statistics(struct net_device *);
+
+/* Temporary dummy MAC address to use until firmware is loaded.
+ * The idea there is that some tools (such as nameif) may query
+ * the MAC address before the netdev is 'open'. By using a valid
+ * OUI prefix, they can process the netdev properly.
+ * Of course, this is not the final/real MAC address. It doesn't
+ * matter, as you are suppose to be able to change it anytime via
+ * ndev->set_mac_address. Jean II */
+static const unsigned char dummy_mac[6] = { 0x00, 0x30, 0xB4, 0x00, 0x00, 0x00 };
+
+static int
+isl_upload_firmware(islpci_private *priv)
+{
+ u32 reg, rc;
+ void __iomem *device_base = priv->device_base;
+
+ /* clear the RAMBoot and the Reset bit */
+ reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
+ reg &= ~ISL38XX_CTRL_STAT_RESET;
+ reg &= ~ISL38XX_CTRL_STAT_RAMBOOT;
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ wmb();
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* set the Reset bit without reading the register ! */
+ reg |= ISL38XX_CTRL_STAT_RESET;
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ wmb();
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* clear the Reset bit */
+ reg &= ~ISL38XX_CTRL_STAT_RESET;
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ wmb();
+
+ /* wait a while for the device to reboot */
+ mdelay(50);
+
+ {
+ const struct firmware *fw_entry = NULL;
+ long fw_len;
+ const u32 *fw_ptr;
+
+ rc = request_firmware(&fw_entry, priv->firmware, PRISM_FW_PDEV);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: request_firmware() failed for '%s'\n",
+ "prism54", priv->firmware);
+ return rc;
+ }
+ /* prepare the Direct Memory Base register */
+ reg = ISL38XX_DEV_FIRMWARE_ADDRES;
+
+ fw_ptr = (u32 *) fw_entry->data;
+ fw_len = fw_entry->size;
+
+ if (fw_len % 4) {
+ printk(KERN_ERR
+ "%s: firmware '%s' size is not multiple of 32bit, aborting!\n",
+ "prism54", priv->firmware);
+ release_firmware(fw_entry);
+ return -EILSEQ; /* Illegal byte sequence */;
+ }
+
+ while (fw_len > 0) {
+ long _fw_len =
+ (fw_len >
+ ISL38XX_MEMORY_WINDOW_SIZE) ?
+ ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
+ u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
+
+ /* set the cards base address for writting the data */
+ isl38xx_w32_flush(device_base, reg,
+ ISL38XX_DIR_MEM_BASE_REG);
+ wmb(); /* be paranoid */
+
+ /* increment the write address for next iteration */
+ reg += _fw_len;
+ fw_len -= _fw_len;
+
+ /* write the data to the Direct Memory Window 32bit-wise */
+ /* memcpy_toio() doesn't guarantee 32bit writes :-| */
+ while (_fw_len > 0) {
+ /* use non-swapping writel() */
+ __raw_writel(*fw_ptr, dev_fw_ptr);
+ fw_ptr++, dev_fw_ptr++;
+ _fw_len -= 4;
+ }
+
+ /* flush PCI posting */
+ (void) readl(device_base + ISL38XX_PCI_POSTING_FLUSH);
+ wmb(); /* be paranoid again */
+
+ BUG_ON(_fw_len != 0);
+ }
+
+ BUG_ON(fw_len != 0);
+
+ /* Firmware version is at offset 40 (also for "newmac") */
+ printk(KERN_DEBUG "%s: firmware version: %.8s\n",
+ priv->ndev->name, fw_entry->data + 40);
+
+ release_firmware(fw_entry);
+ }
+
+ /* now reset the device
+ * clear the Reset & ClkRun bit, set the RAMBoot bit */
+ reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
+ reg &= ~ISL38XX_CTRL_STAT_CLKRUN;
+ reg &= ~ISL38XX_CTRL_STAT_RESET;
+ reg |= ISL38XX_CTRL_STAT_RAMBOOT;
+ isl38xx_w32_flush(device_base, reg, ISL38XX_CTRL_STAT_REG);
+ wmb();
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* set the reset bit latches the host override and RAMBoot bits
+ * into the device for operation when the reset bit is reset */
+ reg |= ISL38XX_CTRL_STAT_RESET;
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ /* don't do flush PCI posting here! */
+ wmb();
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* clear the reset bit should start the whole circus */
+ reg &= ~ISL38XX_CTRL_STAT_RESET;
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ /* don't do flush PCI posting here! */
+ wmb();
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ return 0;
+}
+
+/******************************************************************************
+ Device Interrupt Handler
+******************************************************************************/
+
+irqreturn_t
+islpci_interrupt(int irq, void *config, struct pt_regs *regs)
+{
+ u32 reg;
+ islpci_private *priv = config;
+ struct net_device *ndev = priv->ndev;
+ void __iomem *device = priv->device_base;
+ int powerstate = ISL38XX_PSM_POWERSAVE_STATE;
+
+ /* lock the interrupt handler */
+ spin_lock(&priv->slock);
+
+ /* received an interrupt request on a shared IRQ line
+ * first check whether the device is in sleep mode */
+ reg = readl(device + ISL38XX_CTRL_STAT_REG);
+ if (reg & ISL38XX_CTRL_STAT_SLEEPMODE)
+ /* device is in sleep mode, IRQ was generated by someone else */
+ {
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
+#endif
+ spin_unlock(&priv->slock);
+ return IRQ_NONE;
+ }
+
+
+ /* check whether there is any source of interrupt on the device */
+ reg = readl(device + ISL38XX_INT_IDENT_REG);
+
+ /* also check the contents of the Interrupt Enable Register, because this
+ * will filter out interrupt sources from other devices on the same irq ! */
+ reg &= readl(device + ISL38XX_INT_EN_REG);
+ reg &= ISL38XX_INT_SOURCES;
+
+ if (reg != 0) {
+ if (islpci_get_state(priv) != PRV_STATE_SLEEP)
+ powerstate = ISL38XX_PSM_ACTIVE_STATE;
+
+ /* reset the request bits in the Identification register */
+ isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG);
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS,
+ "IRQ: Identification register 0x%p 0x%x \n", device, reg);
+#endif
+
+ /* check for each bit in the register separately */
+ if (reg & ISL38XX_INT_IDENT_UPDATE) {
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ /* Queue has been updated */
+ DEBUG(SHOW_TRACING, "IRQ: Update flag \n");
+
+ DEBUG(SHOW_QUEUE_INDEXES,
+ "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
+ le32_to_cpu(priv->control_block->
+ driver_curr_frag[0]),
+ le32_to_cpu(priv->control_block->
+ driver_curr_frag[1]),
+ le32_to_cpu(priv->control_block->
+ driver_curr_frag[2]),
+ le32_to_cpu(priv->control_block->
+ driver_curr_frag[3]),
+ le32_to_cpu(priv->control_block->
+ driver_curr_frag[4]),
+ le32_to_cpu(priv->control_block->
+ driver_curr_frag[5])
+ );
+
+ DEBUG(SHOW_QUEUE_INDEXES,
+ "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n",
+ le32_to_cpu(priv->control_block->
+ device_curr_frag[0]),
+ le32_to_cpu(priv->control_block->
+ device_curr_frag[1]),
+ le32_to_cpu(priv->control_block->
+ device_curr_frag[2]),
+ le32_to_cpu(priv->control_block->
+ device_curr_frag[3]),
+ le32_to_cpu(priv->control_block->
+ device_curr_frag[4]),
+ le32_to_cpu(priv->control_block->
+ device_curr_frag[5])
+ );
+#endif
+
+ /* cleanup the data low transmit queue */
+ islpci_eth_cleanup_transmit(priv, priv->control_block);
+
+ /* device is in active state, update the
+ * powerstate flag if necessary */
+ powerstate = ISL38XX_PSM_ACTIVE_STATE;
+
+ /* check all three queues in priority order
+ * call the PIMFOR receive function until the
+ * queue is empty */
+ if (isl38xx_in_queue(priv->control_block,
+ ISL38XX_CB_RX_MGMTQ) != 0) {
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING,
+ "Received frame in Management Queue\n");
+#endif
+ islpci_mgt_receive(ndev);
+
+ islpci_mgt_cleanup_transmit(ndev);
+
+ /* Refill slots in receive queue */
+ islpci_mgmt_rx_fill(ndev);
+
+ /* no need to trigger the device, next
+ islpci_mgt_transaction does it */
+ }
+
+ while (isl38xx_in_queue(priv->control_block,
+ ISL38XX_CB_RX_DATA_LQ) != 0) {
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING,
+ "Received frame in Data Low Queue \n");
+#endif
+ islpci_eth_receive(priv);
+ }
+
+ /* check whether the data transmit queues were full */
+ if (priv->data_low_tx_full) {
+ /* check whether the transmit is not full anymore */
+ if (ISL38XX_CB_TX_QSIZE -
+ isl38xx_in_queue(priv->control_block,
+ ISL38XX_CB_TX_DATA_LQ) >=
+ ISL38XX_MIN_QTHRESHOLD) {
+ /* nope, the driver is ready for more network frames */
+ netif_wake_queue(priv->ndev);
+
+ /* reset the full flag */
+ priv->data_low_tx_full = 0;
+ }
+ }
+ }
+
+ if (reg & ISL38XX_INT_IDENT_INIT) {
+ /* Device has been initialized */
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING,
+ "IRQ: Init flag, device initialized \n");
+#endif
+ wake_up(&priv->reset_done);
+ }
+
+ if (reg & ISL38XX_INT_IDENT_SLEEP) {
+ /* Device intends to move to powersave state */
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n");
+#endif
+ isl38xx_handle_sleep_request(priv->control_block,
+ &powerstate,
+ priv->device_base);
+ }
+
+ if (reg & ISL38XX_INT_IDENT_WAKEUP) {
+ /* Device has been woken up to active state */
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n");
+#endif
+
+ isl38xx_handle_wakeup(priv->control_block,
+ &powerstate, priv->device_base);
+ }
+ } else {
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
+#endif
+ spin_unlock(&priv->slock);
+ return IRQ_NONE;
+ }
+
+ /* sleep -> ready */
+ if (islpci_get_state(priv) == PRV_STATE_SLEEP
+ && powerstate == ISL38XX_PSM_ACTIVE_STATE)
+ islpci_set_state(priv, PRV_STATE_READY);
+
+ /* !sleep -> sleep */
+ if (islpci_get_state(priv) != PRV_STATE_SLEEP
+ && powerstate == ISL38XX_PSM_POWERSAVE_STATE)
+ islpci_set_state(priv, PRV_STATE_SLEEP);
+
+ /* unlock the interrupt handler */
+ spin_unlock(&priv->slock);
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ Network Interface Control & Statistical functions
+******************************************************************************/
+static int
+islpci_open(struct net_device *ndev)
+{
+ u32 rc;
+ islpci_private *priv = netdev_priv(ndev);
+
+ /* reset data structures, upload firmware and reset device */
+ rc = islpci_reset(priv,1);
+ if (rc) {
+ prism54_bring_down(priv);
+ return rc; /* Returns informative message */
+ }
+
+ netif_start_queue(ndev);
+/* netif_mark_up( ndev ); */
+
+ return 0;
+}
+
+static int
+islpci_close(struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+ printk(KERN_DEBUG "%s: islpci_close ()\n", ndev->name);
+
+ netif_stop_queue(ndev);
+
+ return prism54_bring_down(priv);
+}
+
+static int
+prism54_bring_down(islpci_private *priv)
+{
+ void __iomem *device_base = priv->device_base;
+ u32 reg;
+ /* we are going to shutdown the device */
+ islpci_set_state(priv, PRV_STATE_PREBOOT);
+
+ /* disable all device interrupts in case they weren't */
+ isl38xx_disable_interrupts(priv->device_base);
+
+ /* For safety reasons, we may want to ensure that no DMA transfer is
+ * currently in progress by emptying the TX and RX queues. */
+
+ /* wait until interrupts have finished executing on other CPUs */
+ synchronize_irq(priv->pdev->irq);
+
+ reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
+ reg &= ~(ISL38XX_CTRL_STAT_RESET | ISL38XX_CTRL_STAT_RAMBOOT);
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ wmb();
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ reg |= ISL38XX_CTRL_STAT_RESET;
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ wmb();
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ /* clear the Reset bit */
+ reg &= ~ISL38XX_CTRL_STAT_RESET;
+ writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
+ wmb();
+
+ /* wait a while for the device to reset */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(50*HZ/1000);
+
+ return 0;
+}
+
+static int
+islpci_upload_fw(islpci_private *priv)
+{
+ islpci_state_t old_state;
+ u32 rc;
+
+ old_state = islpci_set_state(priv, PRV_STATE_BOOT);
+
+ printk(KERN_DEBUG "%s: uploading firmware...\n", priv->ndev->name);
+
+ rc = isl_upload_firmware(priv);
+ if (rc) {
+ /* error uploading the firmware */
+ printk(KERN_ERR "%s: could not upload firmware ('%s')\n",
+ priv->ndev->name, priv->firmware);
+
+ islpci_set_state(priv, old_state);
+ return rc;
+ }
+
+ printk(KERN_DEBUG "%s: firmware upload complete\n",
+ priv->ndev->name);
+
+ islpci_set_state(priv, PRV_STATE_POSTBOOT);
+
+ return 0;
+}
+
+static int
+islpci_reset_if(islpci_private *priv)
+{
+ long remaining;
+ int result = -ETIME;
+ int count;
+
+ DEFINE_WAIT(wait);
+ prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);
+
+ /* now the last step is to reset the interface */
+ isl38xx_interface_reset(priv->device_base, priv->device_host_address);
+ islpci_set_state(priv, PRV_STATE_PREINIT);
+
+ for(count = 0; count < 2 && result; count++) {
+ /* The software reset acknowledge needs about 220 msec here.
+ * Be conservative and wait for up to one second. */
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ remaining = schedule_timeout(HZ);
+
+ if(remaining > 0) {
+ result = 0;
+ break;
+ }
+
+ /* If we're here it's because our IRQ hasn't yet gone through.
+ * Retry a bit more...
+ */
+ printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
+ priv->ndev->name);
+ }
+
+ finish_wait(&priv->reset_done, &wait);
+
+ if (result) {
+ printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
+ return result;
+ }
+
+ islpci_set_state(priv, PRV_STATE_INIT);
+
+ /* Now that the device is 100% up, let's allow
+ * for the other interrupts --
+ * NOTE: this is not *yet* true since we've only allowed the
+ * INIT interrupt on the IRQ line. We can perhaps poll
+ * the IRQ line until we know for sure the reset went through */
+ isl38xx_enable_common_interrupts(priv->device_base);
+
+ down_write(&priv->mib_sem);
+ result = mgt_commit(priv);
+ if (result) {
+ printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name);
+ up_write(&priv->mib_sem);
+ return result;
+ }
+ up_write(&priv->mib_sem);
+
+ islpci_set_state(priv, PRV_STATE_READY);
+
+ printk(KERN_DEBUG "%s: interface reset complete\n", priv->ndev->name);
+ return 0;
+}
+
+int
+islpci_reset(islpci_private *priv, int reload_firmware)
+{
+ isl38xx_control_block *cb = /* volatile not needed */
+ (isl38xx_control_block *) priv->control_block;
+ unsigned counter;
+ int rc;
+
+ if (reload_firmware)
+ islpci_set_state(priv, PRV_STATE_PREBOOT);
+ else
+ islpci_set_state(priv, PRV_STATE_POSTBOOT);
+
+ printk(KERN_DEBUG "%s: resetting device...\n", priv->ndev->name);
+
+ /* disable all device interrupts in case they weren't */
+ isl38xx_disable_interrupts(priv->device_base);
+
+ /* flush all management queues */
+ priv->index_mgmt_tx = 0;
+ priv->index_mgmt_rx = 0;
+
+ /* clear the indexes in the frame pointer */
+ for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) {
+ cb->driver_curr_frag[counter] = cpu_to_le32(0);
+ cb->device_curr_frag[counter] = cpu_to_le32(0);
+ }
+
+ /* reset the mgmt receive queue */
+ for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
+ isl38xx_fragment *frag = &cb->rx_data_mgmt[counter];
+ frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
+ frag->flags = 0;
+ frag->address = cpu_to_le32(priv->mgmt_rx[counter].pci_addr);
+ }
+
+ for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
+ cb->rx_data_low[counter].address =
+ cpu_to_le32((u32) priv->pci_map_rx_address[counter]);
+ }
+
+ /* since the receive queues are filled with empty fragments, now we can
+ * set the corresponding indexes in the Control Block */
+ priv->control_block->driver_curr_frag[ISL38XX_CB_RX_DATA_LQ] =
+ cpu_to_le32(ISL38XX_CB_RX_QSIZE);
+ priv->control_block->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] =
+ cpu_to_le32(ISL38XX_CB_MGMT_QSIZE);
+
+ /* reset the remaining real index registers and full flags */
+ priv->free_data_rx = 0;
+ priv->free_data_tx = 0;
+ priv->data_low_tx_full = 0;
+
+ if (reload_firmware) { /* Should we load the firmware ? */
+ /* now that the data structures are cleaned up, upload
+ * firmware and reset interface */
+ rc = islpci_upload_fw(priv);
+ if (rc) {
+ printk(KERN_ERR "%s: islpci_reset: failure\n",
+ priv->ndev->name);
+ return rc;
+ }
+ }
+
+ /* finally reset interface */
+ rc = islpci_reset_if(priv);
+ if (rc)
+ printk(KERN_ERR "prism54: Your card/socket may be faulty, or IRQ line too busy :(\n");
+ return rc;
+}
+
+static struct net_device_stats *
+islpci_statistics(struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_statistics\n");
+#endif
+
+ return &priv->statistics;
+}
+
+/******************************************************************************
+ Network device configuration functions
+******************************************************************************/
+static int
+islpci_alloc_memory(islpci_private *priv)
+{
+ int counter;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ printk(KERN_DEBUG "islpci_alloc_memory\n");
+#endif
+
+ /* remap the PCI device base address to accessable */
+ if (!(priv->device_base =
+ ioremap(pci_resource_start(priv->pdev, 0),
+ ISL38XX_PCI_MEM_SIZE))) {
+ /* error in remapping the PCI device memory address range */
+ printk(KERN_ERR "PCI memory remapping failed \n");
+ return -1;
+ }
+
+ /* memory layout for consistent DMA region:
+ *
+ * Area 1: Control Block for the device interface
+ * Area 2: Power Save Mode Buffer for temporary frame storage. Be aware that
+ * the number of supported stations in the AP determines the minimal
+ * size of the buffer !
+ */
+
+ /* perform the allocation */
+ priv->driver_mem_address = pci_alloc_consistent(priv->pdev,
+ HOST_MEM_BLOCK,
+ &priv->
+ device_host_address);
+
+ if (!priv->driver_mem_address) {
+ /* error allocating the block of PCI memory */
+ printk(KERN_ERR "%s: could not allocate DMA memory, aborting!",
+ "prism54");
+ return -1;
+ }
+
+ /* assign the Control Block to the first address of the allocated area */
+ priv->control_block =
+ (isl38xx_control_block *) priv->driver_mem_address;
+
+ /* set the Power Save Buffer pointer directly behind the CB */
+ priv->device_psm_buffer =
+ priv->device_host_address + CONTROL_BLOCK_SIZE;
+
+ /* make sure all buffer pointers are initialized */
+ for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) {
+ priv->control_block->driver_curr_frag[counter] = cpu_to_le32(0);
+ priv->control_block->device_curr_frag[counter] = cpu_to_le32(0);
+ }
+
+ priv->index_mgmt_rx = 0;
+ memset(priv->mgmt_rx, 0, sizeof(priv->mgmt_rx));
+ memset(priv->mgmt_tx, 0, sizeof(priv->mgmt_tx));
+
+ /* allocate rx queue for management frames */
+ if (islpci_mgmt_rx_fill(priv->ndev) < 0)
+ goto out_free;
+
+ /* now get the data rx skb's */
+ memset(priv->data_low_rx, 0, sizeof (priv->data_low_rx));
+ memset(priv->pci_map_rx_address, 0, sizeof (priv->pci_map_rx_address));
+
+ for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
+ struct sk_buff *skb;
+
+ /* allocate an sk_buff for received data frames storage
+ * each frame on receive size consists of 1 fragment
+ * include any required allignment operations */
+ if (!(skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2))) {
+ /* error allocating an sk_buff structure elements */
+ printk(KERN_ERR "Error allocating skb.\n");
+ skb = NULL;
+ goto out_free;
+ }
+ skb_reserve(skb, (4 - (long) skb->data) & 0x03);
+ /* add the new allocated sk_buff to the buffer array */
+ priv->data_low_rx[counter] = skb;
+
+ /* map the allocated skb data area to pci */
+ priv->pci_map_rx_address[counter] =
+ pci_map_single(priv->pdev, (void *) skb->data,
+ MAX_FRAGMENT_SIZE_RX + 2,
+ PCI_DMA_FROMDEVICE);
+ if (!priv->pci_map_rx_address[counter]) {
+ /* error mapping the buffer to device
+ accessable memory address */
+ printk(KERN_ERR "failed to map skb DMA'able\n");
+ goto out_free;
+ }
+ }
+
+ prism54_acl_init(&priv->acl);
+ prism54_wpa_ie_init(priv);
+ if (mgt_init(priv))
+ goto out_free;
+
+ return 0;
+ out_free:
+ islpci_free_memory(priv);
+ return -1;
+}
+
+int
+islpci_free_memory(islpci_private *priv)
+{
+ int counter;
+
+ if (priv->device_base)
+ iounmap(priv->device_base);
+ priv->device_base = NULL;
+
+ /* free consistent DMA area... */
+ if (priv->driver_mem_address)
+ pci_free_consistent(priv->pdev, HOST_MEM_BLOCK,
+ priv->driver_mem_address,
+ priv->device_host_address);
+
+ /* clear some dangling pointers */
+ priv->driver_mem_address = NULL;
+ priv->device_host_address = 0;
+ priv->device_psm_buffer = 0;
+ priv->control_block = NULL;
+
+ /* clean up mgmt rx buffers */
+ for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
+ struct islpci_membuf *buf = &priv->mgmt_rx[counter];
+ if (buf->pci_addr)
+ pci_unmap_single(priv->pdev, buf->pci_addr,
+ buf->size, PCI_DMA_FROMDEVICE);
+ buf->pci_addr = 0;
+ if (buf->mem)
+ kfree(buf->mem);
+ buf->size = 0;
+ buf->mem = NULL;
+ }
+
+ /* clean up data rx buffers */
+ for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
+ if (priv->pci_map_rx_address[counter])
+ pci_unmap_single(priv->pdev,
+ priv->pci_map_rx_address[counter],
+ MAX_FRAGMENT_SIZE_RX + 2,
+ PCI_DMA_FROMDEVICE);
+ priv->pci_map_rx_address[counter] = 0;
+
+ if (priv->data_low_rx[counter])
+ dev_kfree_skb(priv->data_low_rx[counter]);
+ priv->data_low_rx[counter] = NULL;
+ }
+
+ /* Free the acces control list and the WPA list */
+ prism54_acl_clean(&priv->acl);
+ prism54_wpa_ie_clean(priv);
+ mgt_clean(priv);
+
+ return 0;
+}
+
+#if 0
+static void
+islpci_set_multicast_list(struct net_device *dev)
+{
+ /* put device into promisc mode and let network layer handle it */
+}
+#endif
+
+struct net_device *
+islpci_setup(struct pci_dev *pdev)
+{
+ islpci_private *priv;
+ struct net_device *ndev = alloc_etherdev(sizeof (islpci_private));
+
+ if (!ndev)
+ return ndev;
+
+ SET_MODULE_OWNER(ndev);
+ pci_set_drvdata(pdev, ndev);
+#if defined(SET_NETDEV_DEV)
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+#endif
+
+ /* setup the structure members */
+ ndev->base_addr = pci_resource_start(pdev, 0);
+ ndev->irq = pdev->irq;
+
+ /* initialize the function pointers */
+ ndev->open = &islpci_open;
+ ndev->stop = &islpci_close;
+ ndev->get_stats = &islpci_statistics;
+ ndev->get_wireless_stats = &prism54_get_wireless_stats;
+ ndev->do_ioctl = &prism54_ioctl;
+ ndev->wireless_handlers =
+ (struct iw_handler_def *) &prism54_handler_def;
+
+ ndev->hard_start_xmit = &islpci_eth_transmit;
+ /* ndev->set_multicast_list = &islpci_set_multicast_list; */
+ ndev->addr_len = ETH_ALEN;
+ ndev->set_mac_address = &prism54_set_mac_address;
+ /* Get a non-zero dummy MAC address for nameif. Jean II */
+ memcpy(ndev->dev_addr, dummy_mac, 6);
+
+#ifdef HAVE_TX_TIMEOUT
+ ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
+ ndev->tx_timeout = &islpci_eth_tx_timeout;
+#endif
+
+ /* allocate a private device structure to the network device */
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ priv->monitor_type = ARPHRD_IEEE80211;
+ priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ?
+ priv->monitor_type : ARPHRD_ETHER;
+
+#if WIRELESS_EXT > 16
+ /* Add pointers to enable iwspy support. */
+ priv->wireless_data.spy_data = &priv->spy_data;
+ ndev->wireless_data = &priv->wireless_data;
+#endif /* WIRELESS_EXT > 16 */
+
+ /* save the start and end address of the PCI memory area */
+ ndev->mem_start = (unsigned long) priv->device_base;
+ ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base);
+#endif
+
+ init_waitqueue_head(&priv->reset_done);
+
+ /* init the queue read locks, process wait counter */
+ sema_init(&priv->mgmt_sem, 1);
+ priv->mgmt_received = NULL;
+ init_waitqueue_head(&priv->mgmt_wqueue);
+ sema_init(&priv->stats_sem, 1);
+ spin_lock_init(&priv->slock);
+
+ /* init state machine with off#1 state */
+ priv->state = PRV_STATE_OFF;
+ priv->state_off = 1;
+
+ /* initialize workqueue's */
+ INIT_WORK(&priv->stats_work,
+ (void (*)(void *)) prism54_update_stats, priv);
+ priv->stats_timestamp = 0;
+
+ INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
+ priv->reset_task_pending = 0;
+
+ /* allocate various memory areas */
+ if (islpci_alloc_memory(priv))
+ goto do_free_netdev;
+
+ /* select the firmware file depending on the device id */
+ switch (pdev->device) {
+ case 0x3877:
+ strcpy(priv->firmware, ISL3877_IMAGE_FILE);
+ break;
+
+ case 0x3886:
+ strcpy(priv->firmware, ISL3886_IMAGE_FILE);
+ break;
+
+ default:
+ strcpy(priv->firmware, ISL3890_IMAGE_FILE);
+ break;
+ }
+
+ if (register_netdev(ndev)) {
+ DEBUG(SHOW_ERROR_MESSAGES,
+ "ERROR: register_netdev() failed \n");
+ goto do_islpci_free_memory;
+ }
+
+ return ndev;
+
+ do_islpci_free_memory:
+ islpci_free_memory(priv);
+ do_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+ priv = NULL;
+ return NULL;
+}
+
+islpci_state_t
+islpci_set_state(islpci_private *priv, islpci_state_t new_state)
+{
+ islpci_state_t old_state;
+
+ /* lock */
+ old_state = priv->state;
+
+ /* this means either a race condition or some serious error in
+ * the driver code */
+ switch (new_state) {
+ case PRV_STATE_OFF:
+ priv->state_off++;
+ default:
+ priv->state = new_state;
+ break;
+
+ case PRV_STATE_PREBOOT:
+ /* there are actually many off-states, enumerated by
+ * state_off */
+ if (old_state == PRV_STATE_OFF)
+ priv->state_off--;
+
+ /* only if hw_unavailable is zero now it means we either
+ * were in off#1 state, or came here from
+ * somewhere else */
+ if (!priv->state_off)
+ priv->state = new_state;
+ break;
+ };
+#if 0
+ printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n",
+ priv->ndev->name, old_state, new_state, priv->state_off);
+#endif
+
+ /* invariants */
+ BUG_ON(priv->state_off < 0);
+ BUG_ON(priv->state_off && (priv->state != PRV_STATE_OFF));
+ BUG_ON(!priv->state_off && (priv->state == PRV_STATE_OFF));
+
+ /* unlock */
+ return old_state;
+}
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
new file mode 100644
index 000000000000..32a1019f1b36
--- /dev/null
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -0,0 +1,216 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
+ * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
+ * Copyright (C) 2003 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ISLPCI_DEV_H
+#define _ISLPCI_DEV_H
+
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/list.h>
+
+#include "isl_38xx.h"
+#include "isl_oid.h"
+#include "islpci_mgt.h"
+
+/* some states might not be superflous and may be removed when
+ design is finalized (hvr) */
+typedef enum {
+ PRV_STATE_OFF = 0, /* this means hw_unavailable is != 0 */
+ PRV_STATE_PREBOOT, /* we are in a pre-boot state (empty RAM) */
+ PRV_STATE_BOOT, /* boot state (fw upload, run fw) */
+ PRV_STATE_POSTBOOT, /* after boot state, need reset now */
+ PRV_STATE_PREINIT, /* pre-init state */
+ PRV_STATE_INIT, /* init state (restore MIB backup to device) */
+ PRV_STATE_READY, /* driver&device are in operational state */
+ PRV_STATE_SLEEP /* device in sleep mode */
+} islpci_state_t;
+
+/* ACL using MAC address */
+struct mac_entry {
+ struct list_head _list;
+ char addr[ETH_ALEN];
+};
+
+struct islpci_acl {
+ enum { MAC_POLICY_OPEN=0, MAC_POLICY_ACCEPT=1, MAC_POLICY_REJECT=2 } policy;
+ struct list_head mac_list; /* a list of mac_entry */
+ int size; /* size of queue */
+ struct semaphore sem; /* accessed in ioctls and trap_work */
+};
+
+struct islpci_membuf {
+ int size; /* size of memory */
+ void *mem; /* address of memory as seen by CPU */
+ dma_addr_t pci_addr; /* address of memory as seen by device */
+};
+
+#define MAX_BSS_WPA_IE_COUNT 64
+#define MAX_WPA_IE_LEN 64
+struct islpci_bss_wpa_ie {
+ struct list_head list;
+ unsigned long last_update;
+ u8 bssid[ETH_ALEN];
+ u8 wpa_ie[MAX_WPA_IE_LEN];
+ size_t wpa_ie_len;
+
+};
+
+typedef struct {
+ spinlock_t slock; /* generic spinlock; */
+
+ u32 priv_oid;
+
+ /* our mib cache */
+ u32 iw_mode;
+ struct rw_semaphore mib_sem;
+ void **mib;
+ char nickname[IW_ESSID_MAX_SIZE+1];
+
+ /* Take care of the wireless stats */
+ struct work_struct stats_work;
+ struct semaphore stats_sem;
+ /* remember when we last updated the stats */
+ unsigned long stats_timestamp;
+ /* The first is accessed under semaphore locking.
+ * The second is the clean one we return to iwconfig.
+ */
+ struct iw_statistics local_iwstatistics;
+ struct iw_statistics iwstatistics;
+
+ struct iw_spy_data spy_data; /* iwspy support */
+
+#if WIRELESS_EXT > 16
+ struct iw_public_data wireless_data;
+#endif /* WIRELESS_EXT > 16 */
+
+ int monitor_type; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_PRISM */
+
+ struct islpci_acl acl;
+
+ /* PCI bus allocation & configuration members */
+ struct pci_dev *pdev; /* PCI structure information */
+ char firmware[33];
+
+ void __iomem *device_base; /* ioremapped device base address */
+
+ /* consistent DMA region */
+ void *driver_mem_address; /* base DMA address */
+ dma_addr_t device_host_address; /* base DMA address (bus address) */
+ dma_addr_t device_psm_buffer; /* host memory for PSM buffering (bus address) */
+
+ /* our network_device structure */
+ struct net_device *ndev;
+
+ /* device queue interface members */
+ struct isl38xx_cb *control_block; /* device control block
+ (== driver_mem_address!) */
+
+ /* Each queue has three indexes:
+ * free/index_mgmt/data_rx/tx (called index, see below),
+ * driver_curr_frag, and device_curr_frag (in the control block)
+ * All indexes are ever-increasing, but interpreted modulo the
+ * device queue size when used.
+ * index <= device_curr_frag <= driver_curr_frag at all times
+ * For rx queues, [index, device_curr_frag) contains fragments
+ * that the interrupt processing needs to handle (owned by driver).
+ * [device_curr_frag, driver_curr_frag) is the free space in the
+ * rx queue, waiting for data (owned by device). The driver
+ * increments driver_curr_frag to indicate to the device that more
+ * buffers are available.
+ * If device_curr_frag == driver_curr_frag, no more rx buffers are
+ * available, and the rx DMA engine of the device is halted.
+ * For tx queues, [index, device_curr_frag) contains fragments
+ * where tx is done; they need to be freed (owned by driver).
+ * [device_curr_frag, driver_curr_frag) contains the frames
+ * that are being transferred (owned by device). The driver
+ * increments driver_curr_frag to indicate that more tx work
+ * needs to be done.
+ */
+ u32 index_mgmt_rx; /* real index mgmt rx queue */
+ u32 index_mgmt_tx; /* read index mgmt tx queue */
+ u32 free_data_rx; /* free pointer data rx queue */
+ u32 free_data_tx; /* free pointer data tx queue */
+ u32 data_low_tx_full; /* full detected flag */
+
+ /* frame memory buffers for the device queues */
+ struct islpci_membuf mgmt_tx[ISL38XX_CB_MGMT_QSIZE];
+ struct islpci_membuf mgmt_rx[ISL38XX_CB_MGMT_QSIZE];
+ struct sk_buff *data_low_tx[ISL38XX_CB_TX_QSIZE];
+ struct sk_buff *data_low_rx[ISL38XX_CB_RX_QSIZE];
+ dma_addr_t pci_map_tx_address[ISL38XX_CB_TX_QSIZE];
+ dma_addr_t pci_map_rx_address[ISL38XX_CB_RX_QSIZE];
+
+ /* driver network interface members */
+ struct net_device_stats statistics;
+
+ /* wait for a reset interrupt */
+ wait_queue_head_t reset_done;
+
+ /* used by islpci_mgt_transaction */
+ struct semaphore mgmt_sem; /* serialize access to mailbox and wqueue */
+ struct islpci_mgmtframe *mgmt_received; /* mbox for incoming frame */
+ wait_queue_head_t mgmt_wqueue; /* waitqueue for mbox */
+
+ /* state machine */
+ islpci_state_t state;
+ int state_off; /* enumeration of off-state, if 0 then
+ * we're not in any off-state */
+
+ /* WPA stuff */
+ int wpa; /* WPA mode enabled */
+ struct list_head bss_wpa_list;
+ int num_bss_wpa;
+ struct semaphore wpa_sem;
+
+ struct work_struct reset_task;
+ int reset_task_pending;
+} islpci_private;
+
+static inline islpci_state_t
+islpci_get_state(islpci_private *priv)
+{
+ /* lock */
+ return priv->state;
+ /* unlock */
+}
+
+islpci_state_t islpci_set_state(islpci_private *priv, islpci_state_t new_state);
+
+#define ISLPCI_TX_TIMEOUT (2*HZ)
+
+irqreturn_t islpci_interrupt(int, void *, struct pt_regs *);
+
+int prism54_post_setup(islpci_private *, int);
+int islpci_reset(islpci_private *, int);
+
+static inline void
+islpci_trigger(islpci_private *priv)
+{
+ isl38xx_trigger_device(islpci_get_state(priv) == PRV_STATE_SLEEP,
+ priv->device_base);
+}
+
+int islpci_free_memory(islpci_private *);
+struct net_device *islpci_setup(struct pci_dev *);
+#endif /* _ISLPCI_DEV_H */
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
new file mode 100644
index 000000000000..5952e9960499
--- /dev/null
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -0,0 +1,519 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include "prismcompat.h"
+#include "isl_38xx.h"
+#include "islpci_eth.h"
+#include "islpci_mgt.h"
+#include "oid_mgt.h"
+
+/******************************************************************************
+ Network Interface functions
+******************************************************************************/
+void
+islpci_eth_cleanup_transmit(islpci_private *priv,
+ isl38xx_control_block *control_block)
+{
+ struct sk_buff *skb;
+ u32 index;
+
+ /* compare the control block read pointer with the free pointer */
+ while (priv->free_data_tx !=
+ le32_to_cpu(control_block->
+ device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
+ /* read the index of the first fragment to be freed */
+ index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
+
+ /* check for holes in the arrays caused by multi fragment frames
+ * searching for the last fragment of a frame */
+ if (priv->pci_map_tx_address[index] != (dma_addr_t) NULL) {
+ /* entry is the last fragment of a frame
+ * free the skb structure and unmap pci memory */
+ skb = priv->data_low_tx[index];
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING,
+ "cleanup skb %p skb->data %p skb->len %u truesize %u\n ",
+ skb, skb->data, skb->len, skb->truesize);
+#endif
+
+ pci_unmap_single(priv->pdev,
+ priv->pci_map_tx_address[index],
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ skb = NULL;
+ }
+ /* increment the free data low queue pointer */
+ priv->free_data_tx++;
+ }
+}
+
+int
+islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ isl38xx_control_block *cb = priv->control_block;
+ u32 index;
+ dma_addr_t pci_map_address;
+ int frame_size;
+ isl38xx_fragment *fragment;
+ int offset;
+ struct sk_buff *newskb;
+ int newskb_offset;
+ unsigned long flags;
+ unsigned char wds_mac[6];
+ u32 curr_frag;
+ int err = 0;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
+#endif
+
+ /* lock the driver code */
+ spin_lock_irqsave(&priv->slock, flags);
+
+ /* determine the amount of fragments needed to store the frame */
+
+ frame_size = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+ if (init_wds)
+ frame_size += 6;
+
+ /* check whether the destination queue has enough fragments for the frame */
+ curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
+ if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
+ printk(KERN_ERR "%s: transmit device queue full when awake\n",
+ ndev->name);
+ netif_stop_queue(ndev);
+
+ /* trigger the device */
+ isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
+ ISL38XX_DEV_INT_REG);
+ udelay(ISL38XX_WRITEIO_DELAY);
+
+ err = -EBUSY;
+ goto drop_free;
+ }
+ /* Check alignment and WDS frame formatting. The start of the packet should
+ * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
+ * and add WDS address information */
+ if (likely(((long) skb->data & 0x03) | init_wds)) {
+ /* get the number of bytes to add and re-allign */
+ offset = (4 - (long) skb->data) & 0x03;
+ offset += init_wds ? 6 : 0;
+
+ /* check whether the current skb can be used */
+ if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
+ unsigned char *src = skb->data;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
+ init_wds);
+#endif
+
+ /* align the buffer on 4-byte boundary */
+ skb_reserve(skb, (4 - (long) skb->data) & 0x03);
+ if (init_wds) {
+ /* wds requires an additional address field of 6 bytes */
+ skb_put(skb, 6);
+#ifdef ISLPCI_ETH_DEBUG
+ printk("islpci_eth_transmit:wds_mac\n");
+#endif
+ memmove(skb->data + 6, src, skb->len);
+ memcpy(skb->data, wds_mac, 6);
+ } else {
+ memmove(skb->data, src, skb->len);
+ }
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data,
+ src, skb->len);
+#endif
+ } else {
+ newskb =
+ dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
+ if (unlikely(newskb == NULL)) {
+ printk(KERN_ERR "%s: Cannot allocate skb\n",
+ ndev->name);
+ err = -ENOMEM;
+ goto drop_free;
+ }
+ newskb_offset = (4 - (long) newskb->data) & 0x03;
+
+ /* Check if newskb->data is aligned */
+ if (newskb_offset)
+ skb_reserve(newskb, newskb_offset);
+
+ skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
+ if (init_wds) {
+ memcpy(newskb->data + 6, skb->data, skb->len);
+ memcpy(newskb->data, wds_mac, 6);
+#ifdef ISLPCI_ETH_DEBUG
+ printk("islpci_eth_transmit:wds_mac\n");
+#endif
+ } else
+ memcpy(newskb->data, skb->data, skb->len);
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
+ newskb->data, skb->data, skb->len, init_wds);
+#endif
+
+ newskb->dev = skb->dev;
+ dev_kfree_skb(skb);
+ skb = newskb;
+ }
+ }
+ /* display the buffer contents for debugging */
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
+ display_buffer((char *) skb->data, skb->len);
+#endif
+
+ /* map the skb buffer to pci memory for DMA operation */
+ pci_map_address = pci_map_single(priv->pdev,
+ (void *) skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(pci_map_address == 0)) {
+ printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
+ ndev->name);
+
+ err = -EIO;
+ goto drop_free;
+ }
+ /* Place the fragment in the control block structure. */
+ index = curr_frag % ISL38XX_CB_TX_QSIZE;
+ fragment = &cb->tx_data_low[index];
+
+ priv->pci_map_tx_address[index] = pci_map_address;
+ /* store the skb address for future freeing */
+ priv->data_low_tx[index] = skb;
+ /* set the proper fragment start address and size information */
+ fragment->size = cpu_to_le16(frame_size);
+ fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */
+ fragment->address = cpu_to_le32(pci_map_address);
+ curr_frag++;
+
+ /* The fragment address in the control block must have been
+ * written before announcing the frame buffer to device. */
+ wmb();
+ cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
+
+ if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
+ > ISL38XX_CB_TX_QSIZE) {
+ /* stop sends from upper layers */
+ netif_stop_queue(ndev);
+
+ /* set the full flag for the transmission queue */
+ priv->data_low_tx_full = 1;
+ }
+
+ /* trigger the device */
+ islpci_trigger(priv);
+
+ /* unlock the driver code */
+ spin_unlock_irqrestore(&priv->slock, flags);
+
+ /* set the transmission time */
+ ndev->trans_start = jiffies;
+ priv->statistics.tx_packets++;
+ priv->statistics.tx_bytes += skb->len;
+
+ return 0;
+
+ drop_free:
+ /* free the skbuf structure before aborting */
+ dev_kfree_skb(skb);
+ skb = NULL;
+
+ priv->statistics.tx_dropped++;
+ spin_unlock_irqrestore(&priv->slock, flags);
+ return err;
+}
+
+static inline int
+islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
+{
+ /* The card reports full 802.11 packets but with a 20 bytes
+ * header and without the FCS. But there a is a bit that
+ * indicates if the packet is corrupted :-) */
+ struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
+ if (hdr->flags & 0x01)
+ /* This one is bad. Drop it ! */
+ return -1;
+ if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
+ struct avs_80211_1_header *avs;
+ /* extract the relevant data from the header */
+ u32 clock = le32_to_cpu(hdr->clock);
+ u8 rate = hdr->rate;
+ u16 freq = le16_to_cpu(hdr->freq);
+ u8 rssi = hdr->rssi;
+
+ skb_pull(*skb, sizeof (struct rfmon_header));
+
+ if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
+ struct sk_buff *newskb = skb_copy_expand(*skb,
+ sizeof (struct
+ avs_80211_1_header),
+ 0, GFP_ATOMIC);
+ if (newskb) {
+ dev_kfree_skb_irq(*skb);
+ *skb = newskb;
+ } else
+ return -1;
+ /* This behavior is not very subtile... */
+ }
+
+ /* make room for the new header and fill it. */
+ avs =
+ (struct avs_80211_1_header *) skb_push(*skb,
+ sizeof (struct
+ avs_80211_1_header));
+
+ avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
+ avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
+ avs->mactime = cpu_to_be64(le64_to_cpu(clock));
+ avs->hosttime = cpu_to_be64(jiffies);
+ avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */
+ avs->channel = cpu_to_be32(channel_of_freq(freq));
+ avs->datarate = cpu_to_be32(rate * 5);
+ avs->antenna = cpu_to_be32(0); /*unknown */
+ avs->priority = cpu_to_be32(0); /*unknown */
+ avs->ssi_type = cpu_to_be32(3); /*2: dBm, 3: raw RSSI */
+ avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
+ avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise); /*better than 'undefined', I assume */
+ avs->preamble = cpu_to_be32(0); /*unknown */
+ avs->encoding = cpu_to_be32(0); /*unknown */
+ } else
+ skb_pull(*skb, sizeof (struct rfmon_header));
+
+ (*skb)->protocol = htons(ETH_P_802_2);
+ (*skb)->mac.raw = (*skb)->data;
+ (*skb)->pkt_type = PACKET_OTHERHOST;
+
+ return 0;
+}
+
+int
+islpci_eth_receive(islpci_private *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ isl38xx_control_block *control_block = priv->control_block;
+ struct sk_buff *skb;
+ u16 size;
+ u32 index, offset;
+ unsigned char *src;
+ int discard = 0;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n");
+#endif
+
+ /* the device has written an Ethernet frame in the data area
+ * of the sk_buff without updating the structure, do it now */
+ index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
+ size = le16_to_cpu(control_block->rx_data_low[index].size);
+ skb = priv->data_low_rx[index];
+ offset = ((unsigned long)
+ le32_to_cpu(control_block->rx_data_low[index].address) -
+ (unsigned long) skb->data) & 3;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING,
+ "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
+ control_block->rx_data_low[priv->free_data_rx].address, skb->data,
+ skb->len, offset, skb->truesize);
+#endif
+
+ /* delete the streaming DMA mapping before processing the skb */
+ pci_unmap_single(priv->pdev,
+ priv->pci_map_rx_address[index],
+ MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
+
+ /* update the skb structure and allign the buffer */
+ skb_put(skb, size);
+ if (offset) {
+ /* shift the buffer allocation offset bytes to get the right frame */
+ skb_pull(skb, 2);
+ skb_put(skb, 2);
+ }
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ /* display the buffer contents for debugging */
+ DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
+ display_buffer((char *) skb->data, skb->len);
+#endif
+
+ /* check whether WDS is enabled and whether the data frame is a WDS frame */
+
+ if (init_wds) {
+ /* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
+ src = skb->data + 6;
+ memmove(skb->data, src, skb->len - 6);
+ skb_trim(skb, skb->len - 6);
+ }
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
+ DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
+
+ /* display the buffer contents for debugging */
+ DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
+ display_buffer((char *) skb->data, skb->len);
+#endif
+
+ /* do some additional sk_buff and network layer parameters */
+ skb->dev = ndev;
+
+ /* take care of monitor mode and spy monitoring. */
+ if (unlikely(priv->iw_mode == IW_MODE_MONITOR))
+ discard = islpci_monitor_rx(priv, &skb);
+ else {
+ if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
+ /* The packet has a rx_annex. Read it for spy monitoring, Then
+ * remove it, while keeping the 2 leading MAC addr.
+ */
+ struct iw_quality wstats;
+ struct rx_annex_header *annex =
+ (struct rx_annex_header *) skb->data;
+ wstats.level = annex->rfmon.rssi;
+ /* The noise value can be a bit outdated if nobody's
+ * reading wireless stats... */
+ wstats.noise = priv->local_iwstatistics.qual.noise;
+ wstats.qual = wstats.level - wstats.noise;
+ wstats.updated = 0x07;
+ /* Update spy records */
+ wireless_spy_update(ndev, annex->addr2, &wstats);
+
+ memcpy(skb->data + sizeof (struct rfmon_header),
+ skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, sizeof (struct rfmon_header));
+ }
+ skb->protocol = eth_type_trans(skb, ndev);
+ }
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->statistics.rx_packets++;
+ priv->statistics.rx_bytes += size;
+
+ /* deliver the skb to the network layer */
+#ifdef ISLPCI_ETH_DEBUG
+ printk
+ ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5]);
+#endif
+ if (unlikely(discard)) {
+ dev_kfree_skb_irq(skb);
+ skb = NULL;
+ } else
+ netif_rx(skb);
+
+ /* increment the read index for the rx data low queue */
+ priv->free_data_rx++;
+
+ /* add one or more sk_buff structures */
+ while (index =
+ le32_to_cpu(control_block->
+ driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
+ index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
+ /* allocate an sk_buff for received data frames storage
+ * include any required allignment operations */
+ skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
+ if (unlikely(skb == NULL)) {
+ /* error allocating an sk_buff structure elements */
+ DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
+ break;
+ }
+ skb_reserve(skb, (4 - (long) skb->data) & 0x03);
+ /* store the new skb structure pointer */
+ index = index % ISL38XX_CB_RX_QSIZE;
+ priv->data_low_rx[index] = skb;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING,
+ "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
+ skb, skb->data, skb->len, index, skb->truesize);
+#endif
+
+ /* set the streaming DMA mapping for proper PCI bus operation */
+ priv->pci_map_rx_address[index] =
+ pci_map_single(priv->pdev, (void *) skb->data,
+ MAX_FRAGMENT_SIZE_RX + 2,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(priv->pci_map_rx_address[index] == (dma_addr_t) NULL)) {
+ /* error mapping the buffer to device accessable memory address */
+ DEBUG(SHOW_ERROR_MESSAGES,
+ "Error mapping DMA address\n");
+
+ /* free the skbuf structure before aborting */
+ dev_kfree_skb_irq((struct sk_buff *) skb);
+ skb = NULL;
+ break;
+ }
+ /* update the fragment address */
+ control_block->rx_data_low[index].address = cpu_to_le32((u32)
+ priv->
+ pci_map_rx_address
+ [index]);
+ wmb();
+
+ /* increment the driver read pointer */
+ add_le32p((u32 *) &control_block->
+ driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
+ }
+
+ /* trigger the device */
+ islpci_trigger(priv);
+
+ return 0;
+}
+
+void
+islpci_do_reset_and_wake(void *data)
+{
+ islpci_private *priv = (islpci_private *) data;
+ islpci_reset(priv, 1);
+ netif_wake_queue(priv->ndev);
+ priv->reset_task_pending = 0;
+}
+
+void
+islpci_eth_tx_timeout(struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ struct net_device_stats *statistics = &priv->statistics;
+
+ /* increment the transmit error counter */
+ statistics->tx_errors++;
+
+ printk(KERN_WARNING "%s: tx_timeout", ndev->name);
+ if (!priv->reset_task_pending) {
+ priv->reset_task_pending = 1;
+ printk(", scheduling a reset");
+ netif_stop_queue(ndev);
+ schedule_work(&priv->reset_task);
+ }
+ printk("\n");
+}
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
new file mode 100644
index 000000000000..bc9d7a60b8d6
--- /dev/null
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ISLPCI_ETH_H
+#define _ISLPCI_ETH_H
+
+#include "isl_38xx.h"
+#include "islpci_dev.h"
+
+struct rfmon_header {
+ u16 unk0; /* = 0x0000 */
+ u16 length; /* = 0x1400 */
+ u32 clock; /* 1MHz clock */
+ u8 flags;
+ u8 unk1;
+ u8 rate;
+ u8 unk2;
+ u16 freq;
+ u16 unk3;
+ u8 rssi;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+struct rx_annex_header {
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ struct rfmon_header rfmon;
+} __attribute__ ((packed));
+
+/* wlan-ng (and hopefully others) AVS header, version one. Fields in
+ * network byte order. */
+#define P80211CAPTURE_VERSION 0x80211001
+
+struct avs_80211_1_header {
+ uint32_t version;
+ uint32_t length;
+ uint64_t mactime;
+ uint64_t hosttime;
+ uint32_t phytype;
+ uint32_t channel;
+ uint32_t datarate;
+ uint32_t antenna;
+ uint32_t priority;
+ uint32_t ssi_type;
+ int32_t ssi_signal;
+ int32_t ssi_noise;
+ uint32_t preamble;
+ uint32_t encoding;
+};
+
+void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *);
+int islpci_eth_transmit(struct sk_buff *, struct net_device *);
+int islpci_eth_receive(islpci_private *);
+void islpci_eth_tx_timeout(struct net_device *);
+void islpci_do_reset_and_wake(void *data);
+
+#endif /* _ISL_GEN_H */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
new file mode 100644
index 000000000000..efd4d213ac3d
--- /dev/null
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -0,0 +1,339 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h> /* For __init, __exit */
+
+#include "prismcompat.h"
+#include "islpci_dev.h"
+#include "islpci_mgt.h" /* for pc_debug */
+#include "isl_oid.h"
+
+#define DRV_NAME "prism54"
+#define DRV_VERSION "1.2"
+
+MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>");
+MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
+MODULE_LICENSE("GPL");
+
+static int init_pcitm = 0;
+module_param(init_pcitm, int, 0);
+
+/* In this order: vendor, device, subvendor, subdevice, class, class_mask,
+ * driver_data
+ * If you have an update for this please contact prism54-devel@prism54.org
+ * The latest list can be found at http://prism54.org/supported_cards.php */
+static const struct pci_device_id prism54_id_tbl[] = {
+ /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
+ {
+ 0x1260, 0x3890,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0
+ },
+
+ /* 3COM 3CRWE154G72 Wireless LAN adapter */
+ {
+ 0x10b7, 0x6001,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0
+ },
+
+ /* Intersil PRISM Indigo Wireless LAN adapter */
+ {
+ 0x1260, 0x3877,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0
+ },
+
+ /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
+ {
+ 0x1260, 0x3886,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, 0
+ },
+
+ /* End of list */
+ {0,0,0,0,0,0,0}
+};
+
+/* register the device with the Hotplug facilities of the kernel */
+MODULE_DEVICE_TABLE(pci, prism54_id_tbl);
+
+static int prism54_probe(struct pci_dev *, const struct pci_device_id *);
+static void prism54_remove(struct pci_dev *);
+static int prism54_suspend(struct pci_dev *, u32 state);
+static int prism54_resume(struct pci_dev *);
+
+static struct pci_driver prism54_driver = {
+ .name = DRV_NAME,
+ .id_table = prism54_id_tbl,
+ .probe = prism54_probe,
+ .remove = prism54_remove,
+ .suspend = prism54_suspend,
+ .resume = prism54_resume,
+ /* .enable_wake ; we don't support this yet */
+};
+
+/******************************************************************************
+ Module initialization functions
+******************************************************************************/
+
+int
+prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ u8 latency_tmr;
+ u32 mem_addr;
+ islpci_private *priv;
+ int rvalue;
+
+ /* Enable the pci device */
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME);
+ return -ENODEV;
+ }
+
+ /* check whether the latency timer is set correctly */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr);
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr);
+#endif
+ if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) {
+ /* set the latency timer */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
+ PCIDEVICE_LATENCY_TIMER_VAL);
+ }
+
+ /* enable PCI DMA */
+ if (pci_set_dma_mask(pdev, 0xffffffff)) {
+ printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME);
+ goto do_pci_disable_device;
+ }
+
+ /* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT)
+ * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT)
+ * The RETRY_TIMEOUT is used to set the number of retries that the core, as a
+ * Master, will perform before abandoning a cycle. The default value for
+ * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new
+ * devices. A write of zero to the RETRY_TIMEOUT register disables this
+ * function to allow use with any non-compliant legacy devices that may
+ * execute more retries.
+ *
+ * Writing zero to both these two registers will disable both timeouts and
+ * *can* solve problems caused by devices that are slow to respond.
+ * Make this configurable - MSW
+ */
+ if ( init_pcitm >= 0 ) {
+ pci_write_config_byte(pdev, 0x40, (u8)init_pcitm);
+ pci_write_config_byte(pdev, 0x41, (u8)init_pcitm);
+ } else {
+ printk(KERN_INFO "PCI TRDY/RETRY unchanged\n");
+ }
+
+ /* request the pci device I/O regions */
+ rvalue = pci_request_regions(pdev, DRV_NAME);
+ if (rvalue) {
+ printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n",
+ DRV_NAME, rvalue);
+ goto do_pci_disable_device;
+ }
+
+ /* check if the memory window is indeed set */
+ rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr);
+ if (rvalue || !mem_addr) {
+ printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n",
+ DRV_NAME);
+ goto do_pci_release_regions;
+ }
+
+ /* enable PCI bus-mastering */
+ DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME);
+ pci_set_master(pdev);
+
+ /* enable MWI */
+ pci_set_mwi(pdev);
+
+ /* setup the network device interface and its structure */
+ if (!(ndev = islpci_setup(pdev))) {
+ /* error configuring the driver as a network device */
+ printk(KERN_ERR "%s: could not configure network device\n",
+ DRV_NAME);
+ goto do_pci_release_regions;
+ }
+
+ priv = netdev_priv(ndev);
+ islpci_set_state(priv, PRV_STATE_PREBOOT); /* we are attempting to boot */
+
+ /* card is in unknown state yet, might have some interrupts pending */
+ isl38xx_disable_interrupts(priv->device_base);
+
+ /* request for the interrupt before uploading the firmware */
+ rvalue = request_irq(pdev->irq, &islpci_interrupt,
+ SA_SHIRQ, ndev->name, priv);
+
+ if (rvalue) {
+ /* error, could not hook the handler to the irq */
+ printk(KERN_ERR "%s: could not install IRQ handler\n",
+ ndev->name);
+ goto do_unregister_netdev;
+ }
+
+ /* firmware upload is triggered in islpci_open */
+
+ return 0;
+
+ do_unregister_netdev:
+ unregister_netdev(ndev);
+ islpci_free_memory(priv);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+ priv = NULL;
+ do_pci_release_regions:
+ pci_release_regions(pdev);
+ do_pci_disable_device:
+ pci_disable_device(pdev);
+ return -EIO;
+}
+
+/* set by cleanup_module */
+static volatile int __in_cleanup_module = 0;
+
+/* this one removes one(!!) instance only */
+void
+prism54_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ BUG_ON(!priv);
+
+ if (!__in_cleanup_module) {
+ printk(KERN_DEBUG "%s: hot unplug detected\n", ndev->name);
+ islpci_set_state(priv, PRV_STATE_OFF);
+ }
+
+ printk(KERN_DEBUG "%s: removing device\n", ndev->name);
+
+ unregister_netdev(ndev);
+
+ /* free the interrupt request */
+
+ if (islpci_get_state(priv) != PRV_STATE_OFF) {
+ isl38xx_disable_interrupts(priv->device_base);
+ islpci_set_state(priv, PRV_STATE_OFF);
+ /* This bellow causes a lockup at rmmod time. It might be
+ * because some interrupts still linger after rmmod time,
+ * see bug #17 */
+ /* pci_set_power_state(pdev, 3);*/ /* try to power-off */
+ }
+
+ free_irq(pdev->irq, priv);
+
+ /* free the PCI memory and unmap the remapped page */
+ islpci_free_memory(priv);
+
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+ priv = NULL;
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+int
+prism54_suspend(struct pci_dev *pdev, u32 state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ BUG_ON(!priv);
+
+ printk(KERN_NOTICE "%s: got suspend request (state %d)\n",
+ ndev->name, state);
+
+ pci_save_state(pdev);
+
+ /* tell the device not to trigger interrupts for now... */
+ isl38xx_disable_interrupts(priv->device_base);
+
+ /* from now on assume the hardware was already powered down
+ and don't touch it anymore */
+ islpci_set_state(priv, PRV_STATE_OFF);
+
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+
+ return 0;
+}
+
+int
+prism54_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ BUG_ON(!priv);
+
+ pci_enable_device(pdev);
+
+ printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
+
+ pci_restore_state(pdev);
+
+ /* alright let's go into the PREBOOT state */
+ islpci_reset(priv, 1);
+
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int __init
+prism54_module_init(void)
+{
+ printk(KERN_INFO "Loaded %s driver, version %s\n",
+ DRV_NAME, DRV_VERSION);
+
+ __bug_on_wrong_struct_sizes ();
+
+ return pci_module_init(&prism54_driver);
+}
+
+/* by the time prism54_module_exit() terminates, as a postcondition
+ * all instances will have been destroyed by calls to
+ * prism54_remove() */
+static void __exit
+prism54_module_exit(void)
+{
+ __in_cleanup_module = 1;
+
+ pci_unregister_driver(&prism54_driver);
+
+ printk(KERN_INFO "Unloaded %s driver\n", DRV_NAME);
+
+ __in_cleanup_module = 0;
+}
+
+/* register entry points */
+module_init(prism54_module_init);
+module_exit(prism54_module_exit);
+/* EOF */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
new file mode 100644
index 000000000000..b6f2e5a223be
--- /dev/null
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -0,0 +1,513 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/if_arp.h>
+
+#include "prismcompat.h"
+#include "isl_38xx.h"
+#include "islpci_mgt.h"
+#include "isl_oid.h" /* additional types and defs for isl38xx fw */
+#include "isl_ioctl.h"
+
+#include <net/iw_handler.h>
+
+/******************************************************************************
+ Global variable definition section
+******************************************************************************/
+int pc_debug = VERBOSE;
+module_param(pc_debug, int, 0);
+
+/******************************************************************************
+ Driver general functions
+******************************************************************************/
+#if VERBOSE > SHOW_ERROR_MESSAGES
+void
+display_buffer(char *buffer, int length)
+{
+ if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0)
+ return;
+
+ while (length > 0) {
+ printk("[%02x]", *buffer & 255);
+ length--;
+ buffer++;
+ }
+
+ printk("\n");
+}
+#endif
+
+/*****************************************************************************
+ Queue handling for management frames
+******************************************************************************/
+
+/*
+ * Helper function to create a PIMFOR management frame header.
+ */
+static void
+pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h)
+{
+ h->version = PIMFOR_VERSION;
+ h->operation = operation;
+ h->device_id = PIMFOR_DEV_ID_MHLI_MIB;
+ h->flags = 0;
+ h->oid = cpu_to_be32(oid);
+ h->length = cpu_to_be32(length);
+}
+
+/*
+ * Helper function to analyze a PIMFOR management frame header.
+ */
+static pimfor_header_t *
+pimfor_decode_header(void *data, int len)
+{
+ pimfor_header_t *h = data;
+
+ while ((void *) h < data + len) {
+ if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) {
+ le32_to_cpus(&h->oid);
+ le32_to_cpus(&h->length);
+ } else {
+ be32_to_cpus(&h->oid);
+ be32_to_cpus(&h->length);
+ }
+ if (h->oid != OID_INL_TUNNEL)
+ return h;
+ h++;
+ }
+ return NULL;
+}
+
+/*
+ * Fill the receive queue for management frames with fresh buffers.
+ */
+int
+islpci_mgmt_rx_fill(struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ isl38xx_control_block *cb = /* volatile not needed */
+ (isl38xx_control_block *) priv->control_block;
+ u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n");
+#endif
+
+ while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
+ u32 index = curr % ISL38XX_CB_MGMT_QSIZE;
+ struct islpci_membuf *buf = &priv->mgmt_rx[index];
+ isl38xx_fragment *frag = &cb->rx_data_mgmt[index];
+
+ if (buf->mem == NULL) {
+ buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC);
+ if (!buf->mem) {
+ printk(KERN_WARNING
+ "Error allocating management frame.\n");
+ return -ENOMEM;
+ }
+ buf->size = MGMT_FRAME_SIZE;
+ }
+ if (buf->pci_addr == 0) {
+ buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
+ MGMT_FRAME_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (!buf->pci_addr) {
+ printk(KERN_WARNING
+ "Failed to make memory DMA'able\n.");
+ return -ENOMEM;
+ }
+ }
+
+ /* be safe: always reset control block information */
+ frag->size = cpu_to_le16(MGMT_FRAME_SIZE);
+ frag->flags = 0;
+ frag->address = cpu_to_le32(buf->pci_addr);
+ curr++;
+
+ /* The fragment address in the control block must have
+ * been written before announcing the frame buffer to
+ * device */
+ wmb();
+ cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr);
+ }
+ return 0;
+}
+
+/*
+ * Create and transmit a management frame using "operation" and "oid",
+ * with arguments data/length.
+ * We either return an error and free the frame, or we return 0 and
+ * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
+ * interrupt.
+ */
+static int
+islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
+ void *data, int length)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ isl38xx_control_block *cb =
+ (isl38xx_control_block *) priv->control_block;
+ void *p;
+ int err = -EINVAL;
+ unsigned long flags;
+ isl38xx_fragment *frag;
+ struct islpci_membuf buf;
+ u32 curr_frag;
+ int index;
+ int frag_len = length + PIMFOR_HEADER_SIZE;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n");
+#endif
+
+ if (frag_len > MGMT_FRAME_SIZE) {
+ printk(KERN_DEBUG "%s: mgmt frame too large %d\n",
+ ndev->name, frag_len);
+ goto error;
+ }
+
+ err = -ENOMEM;
+ p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
+ if (!buf.mem) {
+ printk(KERN_DEBUG "%s: cannot allocate mgmt frame\n",
+ ndev->name);
+ goto error;
+ }
+ buf.size = frag_len;
+
+ /* create the header directly in the fragment data area */
+ pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p);
+ p += PIMFOR_HEADER_SIZE;
+
+ if (data)
+ memcpy(p, data, length);
+ else
+ memset(p, 0, length);
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ {
+ pimfor_header_t *h = buf.mem;
+ DEBUG(SHOW_PIMFOR_FRAMES,
+ "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n",
+ h->operation, oid, h->device_id, h->flags, length);
+
+ /* display the buffer contents for debugging */
+ display_buffer((char *) h, sizeof (pimfor_header_t));
+ display_buffer(p, length);
+ }
+#endif
+
+ err = -ENOMEM;
+ buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len,
+ PCI_DMA_TODEVICE);
+ if (!buf.pci_addr) {
+ printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
+ ndev->name);
+ goto error_free;
+ }
+
+ /* Protect the control block modifications against interrupts. */
+ spin_lock_irqsave(&priv->slock, flags);
+ curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]);
+ if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) {
+ printk(KERN_WARNING "%s: mgmt tx queue is still full\n",
+ ndev->name);
+ goto error_unlock;
+ }
+
+ /* commit the frame to the tx device queue */
+ index = curr_frag % ISL38XX_CB_MGMT_QSIZE;
+ priv->mgmt_tx[index] = buf;
+ frag = &cb->tx_data_mgmt[index];
+ frag->size = cpu_to_le16(frag_len);
+ frag->flags = 0; /* for any other than the last fragment, set to 1 */
+ frag->address = cpu_to_le32(buf.pci_addr);
+
+ /* The fragment address in the control block must have
+ * been written before announcing the frame buffer to
+ * device */
+ wmb();
+ cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1);
+ spin_unlock_irqrestore(&priv->slock, flags);
+
+ /* trigger the device */
+ islpci_trigger(priv);
+ return 0;
+
+ error_unlock:
+ spin_unlock_irqrestore(&priv->slock, flags);
+ error_free:
+ kfree(buf.mem);
+ error:
+ return err;
+}
+
+/*
+ * Receive a management frame from the device.
+ * This can be an arbitrary number of traps, and at most one response
+ * frame for a previous request sent via islpci_mgt_transmit().
+ */
+int
+islpci_mgt_receive(struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ isl38xx_control_block *cb =
+ (isl38xx_control_block *) priv->control_block;
+ u32 curr_frag;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
+#endif
+
+ /* Only once per interrupt, determine fragment range to
+ * process. This avoids an endless loop (i.e. lockup) if
+ * frames come in faster than we can process them. */
+ curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]);
+ barrier();
+
+ for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) {
+ pimfor_header_t *header;
+ u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE;
+ struct islpci_membuf *buf = &priv->mgmt_rx[index];
+ u16 frag_len;
+ int size;
+ struct islpci_mgmtframe *frame;
+
+ /* I have no idea (and no documentation) if flags != 0
+ * is possible. Drop the frame, reuse the buffer. */
+ if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) {
+ printk(KERN_WARNING "%s: unknown flags 0x%04x\n",
+ ndev->name,
+ le16_to_cpu(cb->rx_data_mgmt[index].flags));
+ continue;
+ }
+
+ /* The device only returns the size of the header(s) here. */
+ frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size);
+
+ /*
+ * We appear to have no way to tell the device the
+ * size of a receive buffer. Thus, if this check
+ * triggers, we likely have kernel heap corruption. */
+ if (frag_len > MGMT_FRAME_SIZE) {
+ printk(KERN_WARNING
+ "%s: Bogus packet size of %d (%#x).\n",
+ ndev->name, frag_len, frag_len);
+ frag_len = MGMT_FRAME_SIZE;
+ }
+
+ /* Ensure the results of device DMA are visible to the CPU. */
+ pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
+ buf->size, PCI_DMA_FROMDEVICE);
+
+ /* Perform endianess conversion for PIMFOR header in-place. */
+ header = pimfor_decode_header(buf->mem, frag_len);
+ if (!header) {
+ printk(KERN_WARNING "%s: no PIMFOR header found\n",
+ ndev->name);
+ continue;
+ }
+
+ /* The device ID from the PIMFOR packet received from
+ * the MVC is always 0. We forward a sensible device_id.
+ * Not that anyone upstream would care... */
+ header->device_id = priv->ndev->ifindex;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_PIMFOR_FRAMES,
+ "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
+ header->operation, header->oid, header->device_id,
+ header->flags, header->length);
+
+ /* display the buffer contents for debugging */
+ display_buffer((char *) header, PIMFOR_HEADER_SIZE);
+ display_buffer((char *) header + PIMFOR_HEADER_SIZE,
+ header->length);
+#endif
+
+ /* nobody sends these */
+ if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) {
+ printk(KERN_DEBUG
+ "%s: errant PIMFOR application frame\n",
+ ndev->name);
+ continue;
+ }
+
+ /* Determine frame size, skipping OID_INL_TUNNEL headers. */
+ size = PIMFOR_HEADER_SIZE + header->length;
+ frame = kmalloc(sizeof (struct islpci_mgmtframe) + size,
+ GFP_ATOMIC);
+ if (!frame) {
+ printk(KERN_WARNING
+ "%s: Out of memory, cannot handle oid 0x%08x\n",
+ ndev->name, header->oid);
+ continue;
+ }
+ frame->ndev = ndev;
+ memcpy(&frame->buf, header, size);
+ frame->header = (pimfor_header_t *) frame->buf;
+ frame->data = frame->buf + PIMFOR_HEADER_SIZE;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_PIMFOR_FRAMES,
+ "frame: header: %p, data: %p, size: %d\n",
+ frame->header, frame->data, size);
+#endif
+
+ if (header->operation == PIMFOR_OP_TRAP) {
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ printk(KERN_DEBUG
+ "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
+ header->oid, header->device_id, header->flags,
+ header->length);
+#endif
+
+ /* Create work to handle trap out of interrupt
+ * context. */
+ INIT_WORK(&frame->ws, prism54_process_trap, frame);
+ schedule_work(&frame->ws);
+
+ } else {
+ /* Signal the one waiting process that a response
+ * has been received. */
+ if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) {
+ printk(KERN_WARNING
+ "%s: mgmt response not collected\n",
+ ndev->name);
+ kfree(frame);
+ }
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n");
+#endif
+ wake_up(&priv->mgmt_wqueue);
+ }
+
+ }
+
+ return 0;
+}
+
+/*
+ * Cleanup the transmit queue by freeing all frames handled by the device.
+ */
+void
+islpci_mgt_cleanup_transmit(struct net_device *ndev)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ isl38xx_control_block *cb = /* volatile not needed */
+ (isl38xx_control_block *) priv->control_block;
+ u32 curr_frag;
+
+#if VERBOSE > SHOW_ERROR_MESSAGES
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n");
+#endif
+
+ /* Only once per cleanup, determine fragment range to
+ * process. This avoids an endless loop (i.e. lockup) if
+ * the device became confused, incrementing device_curr_frag
+ * rapidly. */
+ curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]);
+ barrier();
+
+ for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) {
+ int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE;
+ struct islpci_membuf *buf = &priv->mgmt_tx[index];
+ pci_unmap_single(priv->pdev, buf->pci_addr, buf->size,
+ PCI_DMA_TODEVICE);
+ buf->pci_addr = 0;
+ kfree(buf->mem);
+ buf->mem = NULL;
+ buf->size = 0;
+ }
+}
+
+/*
+ * Perform one request-response transaction to the device.
+ */
+int
+islpci_mgt_transaction(struct net_device *ndev,
+ int operation, unsigned long oid,
+ void *senddata, int sendlen,
+ struct islpci_mgmtframe **recvframe)
+{
+ islpci_private *priv = netdev_priv(ndev);
+ const long wait_cycle_jiffies = (ISL38XX_WAIT_CYCLE * 10 * HZ) / 1000;
+ long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
+ int err;
+ DEFINE_WAIT(wait);
+
+ *recvframe = NULL;
+
+ if (down_interruptible(&priv->mgmt_sem))
+ return -ERESTARTSYS;
+
+ prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE);
+ err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen);
+ if (err)
+ goto out;
+
+ err = -ETIMEDOUT;
+ while (timeout_left > 0) {
+ int timeleft;
+ struct islpci_mgmtframe *frame;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ timeleft = schedule_timeout(wait_cycle_jiffies);
+ frame = xchg(&priv->mgmt_received, NULL);
+ if (frame) {
+ if (frame->header->oid == oid) {
+ *recvframe = frame;
+ err = 0;
+ goto out;
+ } else {
+ printk(KERN_DEBUG
+ "%s: expecting oid 0x%x, received 0x%x.\n",
+ ndev->name, (unsigned int) oid,
+ frame->header->oid);
+ kfree(frame);
+ frame = NULL;
+ }
+ }
+ if (timeleft == 0) {
+ printk(KERN_DEBUG
+ "%s: timeout waiting for mgmt response %lu, "
+ "triggering device\n",
+ ndev->name, timeout_left);
+ islpci_trigger(priv);
+ }
+ timeout_left += timeleft - wait_cycle_jiffies;
+ }
+ printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
+ ndev->name);
+
+ /* TODO: we should reset the device here */
+ out:
+ finish_wait(&priv->mgmt_wqueue, &wait);
+ up(&priv->mgmt_sem);
+ return err;
+}
+
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
new file mode 100644
index 000000000000..2982be3363ef
--- /dev/null
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -0,0 +1,145 @@
+/*
+ *
+ * Copyright (C) 2002 Intersil Americas Inc.
+ * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ISLPCI_MGT_H
+#define _ISLPCI_MGT_H
+
+#include <linux/wireless.h>
+#include <linux/skbuff.h>
+
+/*
+ * Function definitions
+ */
+
+#define K_DEBUG(f, m, args...) do { if(f & m) printk(KERN_DEBUG args); } while(0)
+#define DEBUG(f, args...) K_DEBUG(f, pc_debug, args)
+
+extern int pc_debug;
+#define init_wds 0 /* help compiler optimize away dead code */
+
+
+/* General driver definitions */
+#define PCIDEVICE_LATENCY_TIMER_MIN 0x40
+#define PCIDEVICE_LATENCY_TIMER_VAL 0x50
+
+/* Debugging verbose definitions */
+#define SHOW_NOTHING 0x00 /* overrules everything */
+#define SHOW_ANYTHING 0xFF
+#define SHOW_ERROR_MESSAGES 0x01
+#define SHOW_TRAPS 0x02
+#define SHOW_FUNCTION_CALLS 0x04
+#define SHOW_TRACING 0x08
+#define SHOW_QUEUE_INDEXES 0x10
+#define SHOW_PIMFOR_FRAMES 0x20
+#define SHOW_BUFFER_CONTENTS 0x40
+#define VERBOSE 0x01
+
+/* Default card definitions */
+#define CARD_DEFAULT_CHANNEL 6
+#define CARD_DEFAULT_MODE INL_MODE_CLIENT
+#define CARD_DEFAULT_IW_MODE IW_MODE_INFRA
+#define CARD_DEFAULT_BSSTYPE DOT11_BSSTYPE_INFRA
+#define CARD_DEFAULT_CLIENT_SSID ""
+#define CARD_DEFAULT_AP_SSID "default"
+#define CARD_DEFAULT_KEY1 "default_key_1"
+#define CARD_DEFAULT_KEY2 "default_key_2"
+#define CARD_DEFAULT_KEY3 "default_key_3"
+#define CARD_DEFAULT_KEY4 "default_key_4"
+#define CARD_DEFAULT_WEP 0
+#define CARD_DEFAULT_FILTER 0
+#define CARD_DEFAULT_WDS 0
+#define CARD_DEFAULT_AUTHEN DOT11_AUTH_OS
+#define CARD_DEFAULT_DOT1X 0
+#define CARD_DEFAULT_MLME_MODE DOT11_MLME_AUTO
+#define CARD_DEFAULT_CONFORMANCE OID_INL_CONFORMANCE_NONE
+#define CARD_DEFAULT_PROFILE DOT11_PROFILE_MIXED_G_WIFI
+#define CARD_DEFAULT_MAXFRAMEBURST DOT11_MAXFRAMEBURST_MIXED_SAFE
+
+/* PIMFOR package definitions */
+#define PIMFOR_ETHERTYPE 0x8828
+#define PIMFOR_HEADER_SIZE 12
+#define PIMFOR_VERSION 1
+#define PIMFOR_OP_GET 0
+#define PIMFOR_OP_SET 1
+#define PIMFOR_OP_RESPONSE 2
+#define PIMFOR_OP_ERROR 3
+#define PIMFOR_OP_TRAP 4
+#define PIMFOR_OP_RESERVED 5 /* till 255 */
+#define PIMFOR_DEV_ID_MHLI_MIB 0
+#define PIMFOR_FLAG_APPLIC_ORIGIN 0x01
+#define PIMFOR_FLAG_LITTLE_ENDIAN 0x02
+
+static inline void
+add_le32p(u32 * le_number, u32 add)
+{
+ *le_number = cpu_to_le32(le32_to_cpup(le_number) + add);
+}
+
+void display_buffer(char *, int);
+
+/*
+ * Type definition section
+ *
+ * the structure defines only the header allowing copyless
+ * frame handling
+ */
+typedef struct {
+ u8 version;
+ u8 operation;
+ u32 oid;
+ u8 device_id;
+ u8 flags;
+ u32 length;
+} __attribute__ ((packed))
+pimfor_header_t;
+
+/* A received and interrupt-processed management frame, either for
+ * schedule_work(prism54_process_trap) or for priv->mgmt_received,
+ * processed by islpci_mgt_transaction(). */
+struct islpci_mgmtframe {
+ struct net_device *ndev; /* pointer to network device */
+ pimfor_header_t *header; /* payload header, points into buf */
+ void *data; /* payload ex header, points into buf */
+ struct work_struct ws; /* argument for schedule_work() */
+ char buf[0]; /* fragment buffer */
+};
+
+int
+islpci_mgt_receive(struct net_device *ndev);
+
+int
+islpci_mgmt_rx_fill(struct net_device *ndev);
+
+void
+islpci_mgt_cleanup_transmit(struct net_device *ndev);
+
+int
+islpci_mgt_transaction(struct net_device *ndev,
+ int operation, unsigned long oid,
+ void *senddata, int sendlen,
+ struct islpci_mgmtframe **recvframe);
+
+static inline void
+islpci_mgt_release(struct islpci_mgmtframe *frame)
+{
+ kfree(frame);
+}
+
+#endif /* _ISLPCI_MGT_H */
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
new file mode 100644
index 000000000000..12123e24b113
--- /dev/null
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -0,0 +1,907 @@
+/*
+ * Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "prismcompat.h"
+#include "islpci_dev.h"
+#include "islpci_mgt.h"
+#include "isl_oid.h"
+#include "oid_mgt.h"
+#include "isl_ioctl.h"
+
+/* to convert between channel and freq */
+static const int frequency_list_bg[] = { 2412, 2417, 2422, 2427, 2432,
+ 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+
+int
+channel_of_freq(int f)
+{
+ int c = 0;
+
+ if ((f >= 2412) && (f <= 2484)) {
+ while ((c < 14) && (f != frequency_list_bg[c]))
+ c++;
+ return (c >= 14) ? 0 : ++c;
+ } else if ((f >= (int) 5000) && (f <= (int) 6000)) {
+ return ( (f - 5000) / 5 );
+ } else
+ return 0;
+}
+
+#define OID_STRUCT(name,oid,s,t) [name] = {oid, 0, sizeof(s), t}
+#define OID_STRUCT_C(name,oid,s,t) OID_STRUCT(name,oid,s,t | OID_FLAG_CACHED)
+#define OID_U32(name,oid) OID_STRUCT(name,oid,u32,OID_TYPE_U32)
+#define OID_U32_C(name,oid) OID_STRUCT_C(name,oid,u32,OID_TYPE_U32)
+#define OID_STRUCT_MLME(name,oid) OID_STRUCT(name,oid,struct obj_mlme,OID_TYPE_MLME)
+#define OID_STRUCT_MLMEEX(name,oid) OID_STRUCT(name,oid,struct obj_mlmeex,OID_TYPE_MLMEEX)
+
+#define OID_UNKNOWN(name,oid) OID_STRUCT(name,oid,0,0)
+
+struct oid_t isl_oid[] = {
+ OID_STRUCT(GEN_OID_MACADDRESS, 0x00000000, u8[6], OID_TYPE_ADDR),
+ OID_U32(GEN_OID_LINKSTATE, 0x00000001),
+ OID_UNKNOWN(GEN_OID_WATCHDOG, 0x00000002),
+ OID_UNKNOWN(GEN_OID_MIBOP, 0x00000003),
+ OID_UNKNOWN(GEN_OID_OPTIONS, 0x00000004),
+ OID_UNKNOWN(GEN_OID_LEDCONFIG, 0x00000005),
+
+ /* 802.11 */
+ OID_U32_C(DOT11_OID_BSSTYPE, 0x10000000),
+ OID_STRUCT_C(DOT11_OID_BSSID, 0x10000001, u8[6], OID_TYPE_RAW),
+ OID_STRUCT_C(DOT11_OID_SSID, 0x10000002, struct obj_ssid,
+ OID_TYPE_SSID),
+ OID_U32(DOT11_OID_STATE, 0x10000003),
+ OID_U32(DOT11_OID_AID, 0x10000004),
+ OID_STRUCT(DOT11_OID_COUNTRYSTRING, 0x10000005, u8[4], OID_TYPE_RAW),
+ OID_STRUCT_C(DOT11_OID_SSIDOVERRIDE, 0x10000006, struct obj_ssid,
+ OID_TYPE_SSID),
+
+ OID_U32(DOT11_OID_MEDIUMLIMIT, 0x11000000),
+ OID_U32_C(DOT11_OID_BEACONPERIOD, 0x11000001),
+ OID_U32(DOT11_OID_DTIMPERIOD, 0x11000002),
+ OID_U32(DOT11_OID_ATIMWINDOW, 0x11000003),
+ OID_U32(DOT11_OID_LISTENINTERVAL, 0x11000004),
+ OID_U32(DOT11_OID_CFPPERIOD, 0x11000005),
+ OID_U32(DOT11_OID_CFPDURATION, 0x11000006),
+
+ OID_U32_C(DOT11_OID_AUTHENABLE, 0x12000000),
+ OID_U32_C(DOT11_OID_PRIVACYINVOKED, 0x12000001),
+ OID_U32_C(DOT11_OID_EXUNENCRYPTED, 0x12000002),
+ OID_U32_C(DOT11_OID_DEFKEYID, 0x12000003),
+ [DOT11_OID_DEFKEYX] = {0x12000004, 3, sizeof (struct obj_key),
+ OID_FLAG_CACHED | OID_TYPE_KEY}, /* DOT11_OID_DEFKEY1,...DOT11_OID_DEFKEY4 */
+ OID_UNKNOWN(DOT11_OID_STAKEY, 0x12000008),
+ OID_U32(DOT11_OID_REKEYTHRESHOLD, 0x12000009),
+ OID_UNKNOWN(DOT11_OID_STASC, 0x1200000a),
+
+ OID_U32(DOT11_OID_PRIVTXREJECTED, 0x1a000000),
+ OID_U32(DOT11_OID_PRIVRXPLAIN, 0x1a000001),
+ OID_U32(DOT11_OID_PRIVRXFAILED, 0x1a000002),
+ OID_U32(DOT11_OID_PRIVRXNOKEY, 0x1a000003),
+
+ OID_U32_C(DOT11_OID_RTSTHRESH, 0x13000000),
+ OID_U32_C(DOT11_OID_FRAGTHRESH, 0x13000001),
+ OID_U32_C(DOT11_OID_SHORTRETRIES, 0x13000002),
+ OID_U32_C(DOT11_OID_LONGRETRIES, 0x13000003),
+ OID_U32_C(DOT11_OID_MAXTXLIFETIME, 0x13000004),
+ OID_U32(DOT11_OID_MAXRXLIFETIME, 0x13000005),
+ OID_U32(DOT11_OID_AUTHRESPTIMEOUT, 0x13000006),
+ OID_U32(DOT11_OID_ASSOCRESPTIMEOUT, 0x13000007),
+
+ OID_UNKNOWN(DOT11_OID_ALOFT_TABLE, 0x1d000000),
+ OID_UNKNOWN(DOT11_OID_ALOFT_CTRL_TABLE, 0x1d000001),
+ OID_UNKNOWN(DOT11_OID_ALOFT_RETREAT, 0x1d000002),
+ OID_UNKNOWN(DOT11_OID_ALOFT_PROGRESS, 0x1d000003),
+ OID_U32(DOT11_OID_ALOFT_FIXEDRATE, 0x1d000004),
+ OID_UNKNOWN(DOT11_OID_ALOFT_RSSIGRAPH, 0x1d000005),
+ OID_UNKNOWN(DOT11_OID_ALOFT_CONFIG, 0x1d000006),
+
+ [DOT11_OID_VDCFX] = {0x1b000000, 7, 0, 0},
+ OID_U32(DOT11_OID_MAXFRAMEBURST, 0x1b000008),
+
+ OID_U32(DOT11_OID_PSM, 0x14000000),
+ OID_U32(DOT11_OID_CAMTIMEOUT, 0x14000001),
+ OID_U32(DOT11_OID_RECEIVEDTIMS, 0x14000002),
+ OID_U32(DOT11_OID_ROAMPREFERENCE, 0x14000003),
+
+ OID_U32(DOT11_OID_BRIDGELOCAL, 0x15000000),
+ OID_U32(DOT11_OID_CLIENTS, 0x15000001),
+ OID_U32(DOT11_OID_CLIENTSASSOCIATED, 0x15000002),
+ [DOT11_OID_CLIENTX] = {0x15000003, 2006, 0, 0}, /* DOT11_OID_CLIENTX,...DOT11_OID_CLIENT2007 */
+
+ OID_STRUCT(DOT11_OID_CLIENTFIND, 0x150007DB, u8[6], OID_TYPE_ADDR),
+ OID_STRUCT(DOT11_OID_WDSLINKADD, 0x150007DC, u8[6], OID_TYPE_ADDR),
+ OID_STRUCT(DOT11_OID_WDSLINKREMOVE, 0x150007DD, u8[6], OID_TYPE_ADDR),
+ OID_STRUCT(DOT11_OID_EAPAUTHSTA, 0x150007DE, u8[6], OID_TYPE_ADDR),
+ OID_STRUCT(DOT11_OID_EAPUNAUTHSTA, 0x150007DF, u8[6], OID_TYPE_ADDR),
+ OID_U32_C(DOT11_OID_DOT1XENABLE, 0x150007E0),
+ OID_UNKNOWN(DOT11_OID_MICFAILURE, 0x150007E1),
+ OID_UNKNOWN(DOT11_OID_REKEYINDICATE, 0x150007E2),
+
+ OID_U32(DOT11_OID_MPDUTXSUCCESSFUL, 0x16000000),
+ OID_U32(DOT11_OID_MPDUTXONERETRY, 0x16000001),
+ OID_U32(DOT11_OID_MPDUTXMULTIPLERETRIES, 0x16000002),
+ OID_U32(DOT11_OID_MPDUTXFAILED, 0x16000003),
+ OID_U32(DOT11_OID_MPDURXSUCCESSFUL, 0x16000004),
+ OID_U32(DOT11_OID_MPDURXDUPS, 0x16000005),
+ OID_U32(DOT11_OID_RTSSUCCESSFUL, 0x16000006),
+ OID_U32(DOT11_OID_RTSFAILED, 0x16000007),
+ OID_U32(DOT11_OID_ACKFAILED, 0x16000008),
+ OID_U32(DOT11_OID_FRAMERECEIVES, 0x16000009),
+ OID_U32(DOT11_OID_FRAMEERRORS, 0x1600000A),
+ OID_U32(DOT11_OID_FRAMEABORTS, 0x1600000B),
+ OID_U32(DOT11_OID_FRAMEABORTSPHY, 0x1600000C),
+
+ OID_U32(DOT11_OID_SLOTTIME, 0x17000000),
+ OID_U32(DOT11_OID_CWMIN, 0x17000001),
+ OID_U32(DOT11_OID_CWMAX, 0x17000002),
+ OID_U32(DOT11_OID_ACKWINDOW, 0x17000003),
+ OID_U32(DOT11_OID_ANTENNARX, 0x17000004),
+ OID_U32(DOT11_OID_ANTENNATX, 0x17000005),
+ OID_U32(DOT11_OID_ANTENNADIVERSITY, 0x17000006),
+ OID_U32_C(DOT11_OID_CHANNEL, 0x17000007),
+ OID_U32_C(DOT11_OID_EDTHRESHOLD, 0x17000008),
+ OID_U32(DOT11_OID_PREAMBLESETTINGS, 0x17000009),
+ OID_STRUCT(DOT11_OID_RATES, 0x1700000A, u8[IWMAX_BITRATES + 1],
+ OID_TYPE_RAW),
+ OID_U32(DOT11_OID_CCAMODESUPPORTED, 0x1700000B),
+ OID_U32(DOT11_OID_CCAMODE, 0x1700000C),
+ OID_UNKNOWN(DOT11_OID_RSSIVECTOR, 0x1700000D),
+ OID_UNKNOWN(DOT11_OID_OUTPUTPOWERTABLE, 0x1700000E),
+ OID_U32(DOT11_OID_OUTPUTPOWER, 0x1700000F),
+ OID_STRUCT(DOT11_OID_SUPPORTEDRATES, 0x17000010,
+ u8[IWMAX_BITRATES + 1], OID_TYPE_RAW),
+ OID_U32_C(DOT11_OID_FREQUENCY, 0x17000011),
+ [DOT11_OID_SUPPORTEDFREQUENCIES] =
+ {0x17000012, 0, sizeof (struct obj_frequencies)
+ + sizeof (u16) * IWMAX_FREQ, OID_TYPE_FREQUENCIES},
+
+ OID_U32(DOT11_OID_NOISEFLOOR, 0x17000013),
+ OID_STRUCT(DOT11_OID_FREQUENCYACTIVITY, 0x17000014, u8[IWMAX_FREQ + 1],
+ OID_TYPE_RAW),
+ OID_UNKNOWN(DOT11_OID_IQCALIBRATIONTABLE, 0x17000015),
+ OID_U32(DOT11_OID_NONERPPROTECTION, 0x17000016),
+ OID_U32(DOT11_OID_SLOTSETTINGS, 0x17000017),
+ OID_U32(DOT11_OID_NONERPTIMEOUT, 0x17000018),
+ OID_U32(DOT11_OID_PROFILES, 0x17000019),
+ OID_STRUCT(DOT11_OID_EXTENDEDRATES, 0x17000020,
+ u8[IWMAX_BITRATES + 1], OID_TYPE_RAW),
+
+ OID_STRUCT_MLME(DOT11_OID_DEAUTHENTICATE, 0x18000000),
+ OID_STRUCT_MLME(DOT11_OID_AUTHENTICATE, 0x18000001),
+ OID_STRUCT_MLME(DOT11_OID_DISASSOCIATE, 0x18000002),
+ OID_STRUCT_MLME(DOT11_OID_ASSOCIATE, 0x18000003),
+ OID_UNKNOWN(DOT11_OID_SCAN, 0x18000004),
+ OID_STRUCT_MLMEEX(DOT11_OID_BEACON, 0x18000005),
+ OID_STRUCT_MLMEEX(DOT11_OID_PROBE, 0x18000006),
+ OID_STRUCT_MLMEEX(DOT11_OID_DEAUTHENTICATEEX, 0x18000007),
+ OID_STRUCT_MLMEEX(DOT11_OID_AUTHENTICATEEX, 0x18000008),
+ OID_STRUCT_MLMEEX(DOT11_OID_DISASSOCIATEEX, 0x18000009),
+ OID_STRUCT_MLMEEX(DOT11_OID_ASSOCIATEEX, 0x1800000A),
+ OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATE, 0x1800000B),
+ OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATEEX, 0x1800000C),
+
+ OID_U32(DOT11_OID_NONERPSTATUS, 0x1E000000),
+
+ OID_U32(DOT11_OID_STATIMEOUT, 0x19000000),
+ OID_U32_C(DOT11_OID_MLMEAUTOLEVEL, 0x19000001),
+ OID_U32(DOT11_OID_BSSTIMEOUT, 0x19000002),
+ [DOT11_OID_ATTACHMENT] = {0x19000003, 0,
+ sizeof(struct obj_attachment), OID_TYPE_ATTACH},
+ OID_STRUCT_C(DOT11_OID_PSMBUFFER, 0x19000004, struct obj_buffer,
+ OID_TYPE_BUFFER),
+
+ OID_U32(DOT11_OID_BSSS, 0x1C000000),
+ [DOT11_OID_BSSX] = {0x1C000001, 63, sizeof (struct obj_bss),
+ OID_TYPE_BSS}, /*DOT11_OID_BSS1,...,DOT11_OID_BSS64 */
+ OID_STRUCT(DOT11_OID_BSSFIND, 0x1C000042, struct obj_bss, OID_TYPE_BSS),
+ [DOT11_OID_BSSLIST] = {0x1C000043, 0, sizeof (struct
+ obj_bsslist) +
+ sizeof (struct obj_bss[IWMAX_BSS]),
+ OID_TYPE_BSSLIST},
+
+ OID_UNKNOWN(OID_INL_TUNNEL, 0xFF020000),
+ OID_UNKNOWN(OID_INL_MEMADDR, 0xFF020001),
+ OID_UNKNOWN(OID_INL_MEMORY, 0xFF020002),
+ OID_U32_C(OID_INL_MODE, 0xFF020003),
+ OID_UNKNOWN(OID_INL_COMPONENT_NR, 0xFF020004),
+ OID_STRUCT(OID_INL_VERSION, 0xFF020005, u8[8], OID_TYPE_RAW),
+ OID_UNKNOWN(OID_INL_INTERFACE_ID, 0xFF020006),
+ OID_UNKNOWN(OID_INL_COMPONENT_ID, 0xFF020007),
+ OID_U32_C(OID_INL_CONFIG, 0xFF020008),
+ OID_U32_C(OID_INL_DOT11D_CONFORMANCE, 0xFF02000C),
+ OID_U32(OID_INL_PHYCAPABILITIES, 0xFF02000D),
+ OID_U32_C(OID_INL_OUTPUTPOWER, 0xFF02000F),
+
+};
+
+int
+mgt_init(islpci_private *priv)
+{
+ int i;
+
+ priv->mib = kmalloc(OID_NUM_LAST * sizeof (void *), GFP_KERNEL);
+ if (!priv->mib)
+ return -ENOMEM;
+
+ memset(priv->mib, 0, OID_NUM_LAST * sizeof (void *));
+
+ /* Alloc the cache */
+ for (i = 0; i < OID_NUM_LAST; i++) {
+ if (isl_oid[i].flags & OID_FLAG_CACHED) {
+ priv->mib[i] = kmalloc(isl_oid[i].size *
+ (isl_oid[i].range + 1),
+ GFP_KERNEL);
+ if (!priv->mib[i])
+ return -ENOMEM;
+ memset(priv->mib[i], 0,
+ isl_oid[i].size * (isl_oid[i].range + 1));
+ } else
+ priv->mib[i] = NULL;
+ }
+
+ init_rwsem(&priv->mib_sem);
+ prism54_mib_init(priv);
+
+ return 0;
+}
+
+void
+mgt_clean(islpci_private *priv)
+{
+ int i;
+
+ if (!priv->mib)
+ return;
+ for (i = 0; i < OID_NUM_LAST; i++)
+ if (priv->mib[i]) {
+ kfree(priv->mib[i]);
+ priv->mib[i] = NULL;
+ }
+ kfree(priv->mib);
+ priv->mib = NULL;
+}
+
+void
+mgt_le_to_cpu(int type, void *data)
+{
+ switch (type) {
+ case OID_TYPE_U32:
+ *(u32 *) data = le32_to_cpu(*(u32 *) data);
+ break;
+ case OID_TYPE_BUFFER:{
+ struct obj_buffer *buff = data;
+ buff->size = le32_to_cpu(buff->size);
+ buff->addr = le32_to_cpu(buff->addr);
+ break;
+ }
+ case OID_TYPE_BSS:{
+ struct obj_bss *bss = data;
+ bss->age = le16_to_cpu(bss->age);
+ bss->channel = le16_to_cpu(bss->channel);
+ bss->capinfo = le16_to_cpu(bss->capinfo);
+ bss->rates = le16_to_cpu(bss->rates);
+ bss->basic_rates = le16_to_cpu(bss->basic_rates);
+ break;
+ }
+ case OID_TYPE_BSSLIST:{
+ struct obj_bsslist *list = data;
+ int i;
+ list->nr = le32_to_cpu(list->nr);
+ for (i = 0; i < list->nr; i++)
+ mgt_le_to_cpu(OID_TYPE_BSS, &list->bsslist[i]);
+ break;
+ }
+ case OID_TYPE_FREQUENCIES:{
+ struct obj_frequencies *freq = data;
+ int i;
+ freq->nr = le16_to_cpu(freq->nr);
+ for (i = 0; i < freq->nr; i++)
+ freq->mhz[i] = le16_to_cpu(freq->mhz[i]);
+ break;
+ }
+ case OID_TYPE_MLME:{
+ struct obj_mlme *mlme = data;
+ mlme->id = le16_to_cpu(mlme->id);
+ mlme->state = le16_to_cpu(mlme->state);
+ mlme->code = le16_to_cpu(mlme->code);
+ break;
+ }
+ case OID_TYPE_MLMEEX:{
+ struct obj_mlmeex *mlme = data;
+ mlme->id = le16_to_cpu(mlme->id);
+ mlme->state = le16_to_cpu(mlme->state);
+ mlme->code = le16_to_cpu(mlme->code);
+ mlme->size = le16_to_cpu(mlme->size);
+ break;
+ }
+ case OID_TYPE_ATTACH:{
+ struct obj_attachment *attach = data;
+ attach->id = le16_to_cpu(attach->id);
+ attach->size = le16_to_cpu(attach->size);;
+ break;
+ }
+ case OID_TYPE_SSID:
+ case OID_TYPE_KEY:
+ case OID_TYPE_ADDR:
+ case OID_TYPE_RAW:
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void
+mgt_cpu_to_le(int type, void *data)
+{
+ switch (type) {
+ case OID_TYPE_U32:
+ *(u32 *) data = cpu_to_le32(*(u32 *) data);
+ break;
+ case OID_TYPE_BUFFER:{
+ struct obj_buffer *buff = data;
+ buff->size = cpu_to_le32(buff->size);
+ buff->addr = cpu_to_le32(buff->addr);
+ break;
+ }
+ case OID_TYPE_BSS:{
+ struct obj_bss *bss = data;
+ bss->age = cpu_to_le16(bss->age);
+ bss->channel = cpu_to_le16(bss->channel);
+ bss->capinfo = cpu_to_le16(bss->capinfo);
+ bss->rates = cpu_to_le16(bss->rates);
+ bss->basic_rates = cpu_to_le16(bss->basic_rates);
+ break;
+ }
+ case OID_TYPE_BSSLIST:{
+ struct obj_bsslist *list = data;
+ int i;
+ list->nr = cpu_to_le32(list->nr);
+ for (i = 0; i < list->nr; i++)
+ mgt_cpu_to_le(OID_TYPE_BSS, &list->bsslist[i]);
+ break;
+ }
+ case OID_TYPE_FREQUENCIES:{
+ struct obj_frequencies *freq = data;
+ int i;
+ freq->nr = cpu_to_le16(freq->nr);
+ for (i = 0; i < freq->nr; i++)
+ freq->mhz[i] = cpu_to_le16(freq->mhz[i]);
+ break;
+ }
+ case OID_TYPE_MLME:{
+ struct obj_mlme *mlme = data;
+ mlme->id = cpu_to_le16(mlme->id);
+ mlme->state = cpu_to_le16(mlme->state);
+ mlme->code = cpu_to_le16(mlme->code);
+ break;
+ }
+ case OID_TYPE_MLMEEX:{
+ struct obj_mlmeex *mlme = data;
+ mlme->id = cpu_to_le16(mlme->id);
+ mlme->state = cpu_to_le16(mlme->state);
+ mlme->code = cpu_to_le16(mlme->code);
+ mlme->size = cpu_to_le16(mlme->size);
+ break;
+ }
+ case OID_TYPE_ATTACH:{
+ struct obj_attachment *attach = data;
+ attach->id = cpu_to_le16(attach->id);
+ attach->size = cpu_to_le16(attach->size);;
+ break;
+ }
+ case OID_TYPE_SSID:
+ case OID_TYPE_KEY:
+ case OID_TYPE_ADDR:
+ case OID_TYPE_RAW:
+ break;
+ default:
+ BUG();
+ }
+}
+
+/* Note : data is modified during this function */
+
+int
+mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data)
+{
+ int ret = 0;
+ struct islpci_mgmtframe *response = NULL;
+ int response_op = PIMFOR_OP_ERROR;
+ int dlen;
+ void *cache, *_data = data;
+ u32 oid;
+
+ BUG_ON(OID_NUM_LAST <= n);
+ BUG_ON(extra > isl_oid[n].range);
+
+ if (!priv->mib)
+ /* memory has been freed */
+ return -1;
+
+ dlen = isl_oid[n].size;
+ cache = priv->mib[n];
+ cache += (cache ? extra * dlen : 0);
+ oid = isl_oid[n].oid + extra;
+
+ if (_data == NULL)
+ /* we are requested to re-set a cached value */
+ _data = cache;
+ else
+ mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, _data);
+ /* If we are going to write to the cache, we don't want anyone to read
+ * it -> acquire write lock.
+ * Else we could acquire a read lock to be sure we don't bother the
+ * commit process (which takes a write lock). But I'm not sure if it's
+ * needed.
+ */
+ if (cache)
+ down_write(&priv->mib_sem);
+
+ if (islpci_get_state(priv) >= PRV_STATE_READY) {
+ ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid,
+ _data, dlen, &response);
+ if (!ret) {
+ response_op = response->header->operation;
+ islpci_mgt_release(response);
+ }
+ if (ret || response_op == PIMFOR_OP_ERROR)
+ ret = -EIO;
+ } else if (!cache)
+ ret = -EIO;
+
+ if (cache) {
+ if (!ret && data)
+ memcpy(cache, _data, dlen);
+ up_write(&priv->mib_sem);
+ }
+
+ /* re-set given data to what it was */
+ if (data)
+ mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data);
+
+ return ret;
+}
+
+/* None of these are cached */
+int
+mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len)
+{
+ int ret = 0;
+ struct islpci_mgmtframe *response;
+ int response_op = PIMFOR_OP_ERROR;
+ int dlen;
+ u32 oid;
+
+ BUG_ON(OID_NUM_LAST <= n);
+
+ dlen = isl_oid[n].size;
+ oid = isl_oid[n].oid;
+
+ mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, data);
+
+ if (islpci_get_state(priv) >= PRV_STATE_READY) {
+ ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid,
+ data, dlen + extra_len, &response);
+ if (!ret) {
+ response_op = response->header->operation;
+ islpci_mgt_release(response);
+ }
+ if (ret || response_op == PIMFOR_OP_ERROR)
+ ret = -EIO;
+ } else
+ ret = -EIO;
+
+ /* re-set given data to what it was */
+ if (data)
+ mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data);
+
+ return ret;
+}
+
+int
+mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data,
+ union oid_res_t *res)
+{
+
+ int ret = -EIO;
+ int reslen = 0;
+ struct islpci_mgmtframe *response = NULL;
+
+ int dlen;
+ void *cache, *_res = NULL;
+ u32 oid;
+
+ BUG_ON(OID_NUM_LAST <= n);
+ BUG_ON(extra > isl_oid[n].range);
+
+ res->ptr = NULL;
+
+ if (!priv->mib)
+ /* memory has been freed */
+ return -1;
+
+ dlen = isl_oid[n].size;
+ cache = priv->mib[n];
+ cache += cache ? extra * dlen : 0;
+ oid = isl_oid[n].oid + extra;
+ reslen = dlen;
+
+ if (cache)
+ down_read(&priv->mib_sem);
+
+ if (islpci_get_state(priv) >= PRV_STATE_READY) {
+ ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
+ oid, data, dlen, &response);
+ if (ret || !response ||
+ response->header->operation == PIMFOR_OP_ERROR) {
+ if (response)
+ islpci_mgt_release(response);
+ ret = -EIO;
+ }
+ if (!ret) {
+ _res = response->data;
+ reslen = response->header->length;
+ }
+ } else if (cache) {
+ _res = cache;
+ ret = 0;
+ }
+ if ((isl_oid[n].flags & OID_FLAG_TYPE) == OID_TYPE_U32)
+ res->u = ret ? 0 : le32_to_cpu(*(u32 *) _res);
+ else {
+ res->ptr = kmalloc(reslen, GFP_KERNEL);
+ BUG_ON(res->ptr == NULL);
+ if (ret)
+ memset(res->ptr, 0, reslen);
+ else {
+ memcpy(res->ptr, _res, reslen);
+ mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE,
+ res->ptr);
+ }
+ }
+ if (cache)
+ up_read(&priv->mib_sem);
+
+ if (response && !ret)
+ islpci_mgt_release(response);
+
+ if (reslen > isl_oid[n].size)
+ printk(KERN_DEBUG
+ "mgt_get_request(0x%x): received data length was bigger "
+ "than expected (%d > %d). Memory is probably corrupted...",
+ oid, reslen, isl_oid[n].size);
+
+ return ret;
+}
+
+/* lock outside */
+int
+mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n)
+{
+ int i, ret = 0;
+ struct islpci_mgmtframe *response;
+
+ for (i = 0; i < n; i++) {
+ struct oid_t *t = &(isl_oid[l[i]]);
+ void *data = priv->mib[l[i]];
+ int j = 0;
+ u32 oid = t->oid;
+ BUG_ON(data == NULL);
+ while (j <= t->range) {
+ int r = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET,
+ oid, data, t->size,
+ &response);
+ if (response) {
+ r |= (response->header->operation == PIMFOR_OP_ERROR);
+ islpci_mgt_release(response);
+ }
+ if (r)
+ printk(KERN_ERR "%s: mgt_commit_list: failure. "
+ "oid=%08x err=%d\n",
+ priv->ndev->name, oid, r);
+ ret |= r;
+ j++;
+ oid++;
+ data += t->size;
+ }
+ }
+ return ret;
+}
+
+/* Lock outside */
+
+void
+mgt_set(islpci_private *priv, enum oid_num_t n, void *data)
+{
+ BUG_ON(OID_NUM_LAST <= n);
+ BUG_ON(priv->mib[n] == NULL);
+
+ memcpy(priv->mib[n], data, isl_oid[n].size);
+ mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, priv->mib[n]);
+}
+
+void
+mgt_get(islpci_private *priv, enum oid_num_t n, void *res)
+{
+ BUG_ON(OID_NUM_LAST <= n);
+ BUG_ON(priv->mib[n] == NULL);
+ BUG_ON(res == NULL);
+
+ memcpy(res, priv->mib[n], isl_oid[n].size);
+ mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, res);
+}
+
+/* Commits the cache. Lock outside. */
+
+static enum oid_num_t commit_part1[] = {
+ OID_INL_CONFIG,
+ OID_INL_MODE,
+ DOT11_OID_BSSTYPE,
+ DOT11_OID_CHANNEL,
+ DOT11_OID_MLMEAUTOLEVEL
+};
+
+static enum oid_num_t commit_part2[] = {
+ DOT11_OID_SSID,
+ DOT11_OID_PSMBUFFER,
+ DOT11_OID_AUTHENABLE,
+ DOT11_OID_PRIVACYINVOKED,
+ DOT11_OID_EXUNENCRYPTED,
+ DOT11_OID_DEFKEYX, /* MULTIPLE */
+ DOT11_OID_DEFKEYID,
+ DOT11_OID_DOT1XENABLE,
+ OID_INL_DOT11D_CONFORMANCE,
+ /* Do not initialize this - fw < 1.0.4.3 rejects it
+ OID_INL_OUTPUTPOWER,
+ */
+};
+
+/* update the MAC addr. */
+static int
+mgt_update_addr(islpci_private *priv)
+{
+ struct islpci_mgmtframe *res;
+ int ret;
+
+ ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
+ isl_oid[GEN_OID_MACADDRESS].oid, NULL,
+ isl_oid[GEN_OID_MACADDRESS].size, &res);
+
+ if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
+ memcpy(priv->ndev->dev_addr, res->data, 6);
+ else
+ ret = -EIO;
+ if (res)
+ islpci_mgt_release(res);
+
+ if (ret)
+ printk(KERN_ERR "%s: mgt_update_addr: failure\n", priv->ndev->name);
+ return ret;
+}
+
+#define VEC_SIZE(a) (sizeof(a)/sizeof(a[0]))
+
+int
+mgt_commit(islpci_private *priv)
+{
+ int rvalue;
+ u32 u;
+
+ if (islpci_get_state(priv) < PRV_STATE_INIT)
+ return 0;
+
+ rvalue = mgt_commit_list(priv, commit_part1, VEC_SIZE(commit_part1));
+
+ if (priv->iw_mode != IW_MODE_MONITOR)
+ rvalue |= mgt_commit_list(priv, commit_part2, VEC_SIZE(commit_part2));
+
+ u = OID_INL_MODE;
+ rvalue |= mgt_commit_list(priv, &u, 1);
+ rvalue |= mgt_update_addr(priv);
+
+ if (rvalue) {
+ /* some request have failed. The device might be in an
+ incoherent state. We should reset it ! */
+ printk(KERN_DEBUG "%s: mgt_commit: failure\n", priv->ndev->name);
+ }
+ return rvalue;
+}
+
+/* The following OIDs need to be "unlatched":
+ *
+ * MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL
+ * FREQUENCY,EXTENDEDRATES.
+ *
+ * The way to do this is to set ESSID. Note though that they may get
+ * unlatch before though by setting another OID. */
+#if 0
+void
+mgt_unlatch_all(islpci_private *priv)
+{
+ u32 u;
+ int rvalue = 0;
+
+ if (islpci_get_state(priv) < PRV_STATE_INIT)
+ return;
+
+ u = DOT11_OID_SSID;
+ rvalue = mgt_commit_list(priv, &u, 1);
+ /* Necessary if in MANUAL RUN mode? */
+#if 0
+ u = OID_INL_MODE;
+ rvalue |= mgt_commit_list(priv, &u, 1);
+
+ u = DOT11_OID_MLMEAUTOLEVEL;
+ rvalue |= mgt_commit_list(priv, &u, 1);
+
+ u = OID_INL_MODE;
+ rvalue |= mgt_commit_list(priv, &u, 1);
+#endif
+
+ if (rvalue)
+ printk(KERN_DEBUG "%s: Unlatching OIDs failed\n", priv->ndev->name);
+}
+#endif
+
+/* This will tell you if you are allowed to answer a mlme(ex) request .*/
+
+int
+mgt_mlme_answer(islpci_private *priv)
+{
+ u32 mlmeautolevel;
+ /* Acquire a read lock because if we are in a mode change, it's
+ * possible to answer true, while the card is leaving master to managed
+ * mode. Answering to a mlme in this situation could hang the card.
+ */
+ down_read(&priv->mib_sem);
+ mlmeautolevel =
+ le32_to_cpu(*(u32 *) priv->mib[DOT11_OID_MLMEAUTOLEVEL]);
+ up_read(&priv->mib_sem);
+
+ return ((priv->iw_mode == IW_MODE_MASTER) &&
+ (mlmeautolevel >= DOT11_MLME_INTERMEDIATE));
+}
+
+enum oid_num_t
+mgt_oidtonum(u32 oid)
+{
+ int i;
+
+ for (i = 0; i < OID_NUM_LAST; i++)
+ if (isl_oid[i].oid == oid)
+ return i;
+
+ printk(KERN_DEBUG "looking for an unknown oid 0x%x", oid);
+
+ return OID_NUM_LAST;
+}
+
+int
+mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
+{
+ switch (isl_oid[n].flags & OID_FLAG_TYPE) {
+ case OID_TYPE_U32:
+ return snprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
+ break;
+ case OID_TYPE_BUFFER:{
+ struct obj_buffer *buff = r->ptr;
+ return snprintf(str, PRIV_STR_SIZE,
+ "size=%u\naddr=0x%X\n", buff->size,
+ buff->addr);
+ }
+ break;
+ case OID_TYPE_BSS:{
+ struct obj_bss *bss = r->ptr;
+ return snprintf(str, PRIV_STR_SIZE,
+ "age=%u\nchannel=%u\n"
+ "capinfo=0x%X\nrates=0x%X\n"
+ "basic_rates=0x%X\n", bss->age,
+ bss->channel, bss->capinfo,
+ bss->rates, bss->basic_rates);
+ }
+ break;
+ case OID_TYPE_BSSLIST:{
+ struct obj_bsslist *list = r->ptr;
+ int i, k;
+ k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
+ for (i = 0; i < list->nr; i++)
+ k += snprintf(str + k, PRIV_STR_SIZE - k,
+ "bss[%u] : \nage=%u\nchannel=%u\n"
+ "capinfo=0x%X\nrates=0x%X\n"
+ "basic_rates=0x%X\n",
+ i, list->bsslist[i].age,
+ list->bsslist[i].channel,
+ list->bsslist[i].capinfo,
+ list->bsslist[i].rates,
+ list->bsslist[i].basic_rates);
+ return k;
+ }
+ break;
+ case OID_TYPE_FREQUENCIES:{
+ struct obj_frequencies *freq = r->ptr;
+ int i, t;
+ printk("nr : %u\n", freq->nr);
+ t = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
+ for (i = 0; i < freq->nr; i++)
+ t += snprintf(str + t, PRIV_STR_SIZE - t,
+ "mhz[%u]=%u\n", i, freq->mhz[i]);
+ return t;
+ }
+ break;
+ case OID_TYPE_MLME:{
+ struct obj_mlme *mlme = r->ptr;
+ return snprintf(str, PRIV_STR_SIZE,
+ "id=0x%X\nstate=0x%X\ncode=0x%X\n",
+ mlme->id, mlme->state, mlme->code);
+ }
+ break;
+ case OID_TYPE_MLMEEX:{
+ struct obj_mlmeex *mlme = r->ptr;
+ return snprintf(str, PRIV_STR_SIZE,
+ "id=0x%X\nstate=0x%X\n"
+ "code=0x%X\nsize=0x%X\n", mlme->id,
+ mlme->state, mlme->code, mlme->size);
+ }
+ break;
+ case OID_TYPE_ATTACH:{
+ struct obj_attachment *attach = r->ptr;
+ return snprintf(str, PRIV_STR_SIZE,
+ "id=%d\nsize=%d\n",
+ attach->id,
+ attach->size);
+ }
+ break;
+ case OID_TYPE_SSID:{
+ struct obj_ssid *ssid = r->ptr;
+ return snprintf(str, PRIV_STR_SIZE,
+ "length=%u\noctets=%.*s\n",
+ ssid->length, ssid->length,
+ ssid->octets);
+ }
+ break;
+ case OID_TYPE_KEY:{
+ struct obj_key *key = r->ptr;
+ int t, i;
+ t = snprintf(str, PRIV_STR_SIZE,
+ "type=0x%X\nlength=0x%X\nkey=0x",
+ key->type, key->length);
+ for (i = 0; i < key->length; i++)
+ t += snprintf(str + t, PRIV_STR_SIZE - t,
+ "%02X:", key->key[i]);
+ t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
+ return t;
+ }
+ break;
+ case OID_TYPE_RAW:
+ case OID_TYPE_ADDR:{
+ unsigned char *buff = r->ptr;
+ int t, i;
+ t = snprintf(str, PRIV_STR_SIZE, "hex data=");
+ for (i = 0; i < isl_oid[n].size; i++)
+ t += snprintf(str + t, PRIV_STR_SIZE - t,
+ "%02X:", buff[i]);
+ t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
+ return t;
+ }
+ break;
+ default:
+ BUG();
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/prism54/oid_mgt.h b/drivers/net/wireless/prism54/oid_mgt.h
new file mode 100644
index 000000000000..92c8a2d4acd8
--- /dev/null
+++ b/drivers/net/wireless/prism54/oid_mgt.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2003 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#if !defined(_OID_MGT_H)
+#define _OID_MGT_H
+
+#include "isl_oid.h"
+#include "islpci_dev.h"
+
+extern struct oid_t isl_oid[];
+
+int mgt_init(islpci_private *);
+
+void mgt_clean(islpci_private *);
+
+/* I don't know where to put these 2 */
+extern const int frequency_list_a[];
+int channel_of_freq(int);
+
+void mgt_le_to_cpu(int, void *);
+
+int mgt_set_request(islpci_private *, enum oid_num_t, int, void *);
+int mgt_set_varlen(islpci_private *, enum oid_num_t, void *, int);
+
+
+int mgt_get_request(islpci_private *, enum oid_num_t, int, void *,
+ union oid_res_t *);
+
+int mgt_commit_list(islpci_private *, enum oid_num_t *, int);
+
+void mgt_set(islpci_private *, enum oid_num_t, void *);
+
+void mgt_get(islpci_private *, enum oid_num_t, void *);
+
+int mgt_commit(islpci_private *);
+
+int mgt_mlme_answer(islpci_private *);
+
+enum oid_num_t mgt_oidtonum(u32 oid);
+
+int mgt_response_to_str(enum oid_num_t, union oid_res_t *, char *);
+
+#endif /* !defined(_OID_MGT_H) */
+/* EOF */
diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h
new file mode 100644
index 000000000000..55541c01752e
--- /dev/null
+++ b/drivers/net/wireless/prism54/prismcompat.h
@@ -0,0 +1,44 @@
+/*
+ * (C) 2004 Margit Schubert-While <margitsw@t-online.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+ * Compatibility header file to aid support of different kernel versions
+ */
+
+#ifdef PRISM54_COMPAT24
+#include "prismcompat24.h"
+#else /* PRISM54_COMPAT24 */
+
+#ifndef _PRISM_COMPAT_H
+#define _PRISM_COMPAT_H
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/config.h>
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/compiler.h>
+
+#ifndef __iomem
+#define __iomem
+#endif
+
+#define PRISM_FW_PDEV &priv->pdev->dev
+
+#endif /* _PRISM_COMPAT_H */
+#endif /* PRISM54_COMPAT24 */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
new file mode 100644
index 000000000000..6e5bda56b8f8
--- /dev/null
+++ b/drivers/net/wireless/ray_cs.c
@@ -0,0 +1,2957 @@
+/*=============================================================================
+ *
+ * A PCMCIA client driver for the Raylink wireless LAN card.
+ * The starting point for this module was the skeleton.c in the
+ * PCMCIA 2.9.12 package written by David Hinds, dahinds@users.sourceforge.net
+ *
+ *
+ * Copyright (c) 1998 Corey Thomas (corey@world.std.com)
+ *
+ * This driver is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 only of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * It is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Changes:
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
+ * - reorganize kmallocs in ray_attach, checking all for failure
+ * and releasing the previous allocations if one fails
+ *
+ * Daniele Bellucci <bellucda@tiscali.it> - 07/10/2003
+ * - Audit copy_to_user in ioctl(SIOCGIWESSID)
+ *
+=============================================================================*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/mem_op.h>
+
+#include <linux/wireless.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+/* Warning : these stuff will slow down the driver... */
+#define WIRELESS_SPY /* Enable spying addresses */
+/* Definitions we need for spy */
+typedef struct iw_statistics iw_stats;
+typedef struct iw_quality iw_qual;
+typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */
+
+#include "rayctl.h"
+#include "ray_cs.h"
+
+/* All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ you do not define PCMCIA_DEBUG at all, all the debug code will be
+ left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ be present but disabled -- but it can then be enabled for specific
+ modules at load time with a 'pc_debug=#' option to insmod.
+*/
+
+#ifdef RAYLINK_DEBUG
+#define PCMCIA_DEBUG RAYLINK_DEBUG
+#endif
+#ifdef PCMCIA_DEBUG
+static int ray_debug;
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+/* #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); */
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(args);
+#else
+#define DEBUG(n, args...)
+#endif
+/** Prototypes based on PCMCIA skeleton driver *******************************/
+static void ray_config(dev_link_t *link);
+static void ray_release(dev_link_t *link);
+static int ray_event(event_t event, int priority, event_callback_args_t *args);
+static dev_link_t *ray_attach(void);
+static void ray_detach(dev_link_t *);
+
+/***** Prototypes indicated by device structure ******************************/
+static int ray_dev_close(struct net_device *dev);
+static int ray_dev_config(struct net_device *dev, struct ifmap *map);
+static struct net_device_stats *ray_get_stats(struct net_device *dev);
+static int ray_dev_init(struct net_device *dev);
+static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static struct ethtool_ops netdev_ethtool_ops;
+
+static int ray_open(struct net_device *dev);
+static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void ray_update_multi_list(struct net_device *dev, int all);
+static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
+ unsigned char *data, int len);
+static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx, UCHAR msg_type,
+ unsigned char *data);
+static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len);
+#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */
+static iw_stats * ray_get_wireless_stats(struct net_device * dev);
+#endif /* WIRELESS_EXT > 7 */
+
+/***** Prototypes for raylink functions **************************************/
+static int asc_to_int(char a);
+static void authenticate(ray_dev_t *local);
+static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type);
+static void authenticate_timeout(u_long);
+static int get_free_ccs(ray_dev_t *local);
+static int get_free_tx_ccs(ray_dev_t *local);
+static void init_startup_params(ray_dev_t *local);
+static int parse_addr(char *in_str, UCHAR *out);
+static int ray_hw_xmit(unsigned char* data, int len, struct net_device* dev, UCHAR type);
+static int ray_init(struct net_device *dev);
+static int interrupt_ecf(ray_dev_t *local, int ccs);
+static void ray_reset(struct net_device *dev);
+static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len);
+static void verify_dl_startup(u_long);
+
+/* Prototypes for interrpt time functions **********************************/
+static irqreturn_t ray_interrupt (int reg, void *dev_id, struct pt_regs *regs);
+static void clear_interrupt(ray_dev_t *local);
+static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
+ unsigned int pkt_addr, int rx_len);
+static int copy_from_rx_buff(ray_dev_t *local, UCHAR *dest, int pkt_addr, int len);
+static void ray_rx(struct net_device *dev, ray_dev_t *local, struct rcs __iomem *prcs);
+static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs);
+static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
+ unsigned int pkt_addr, int rx_len);
+static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned int pkt_addr,
+ int rx_len);
+static void associate(ray_dev_t *local);
+
+/* Card command functions */
+static int dl_startup_params(struct net_device *dev);
+static void join_net(u_long local);
+static void start_net(u_long local);
+/* void start_net(ray_dev_t *local); */
+
+/*===========================================================================*/
+/* Parameters that can be set with 'insmod' */
+
+/* ADHOC=0, Infrastructure=1 */
+static int net_type = ADHOC;
+
+/* Hop dwell time in Kus (1024 us units defined by 802.11) */
+static int hop_dwell = 128;
+
+/* Beacon period in Kus */
+static int beacon_period = 256;
+
+/* power save mode (0 = off, 1 = save power) */
+static int psm;
+
+/* String for network's Extended Service Set ID. 32 Characters max */
+static char *essid;
+
+/* Default to encapsulation unless translation requested */
+static int translate = 1;
+
+static int country = USA;
+
+static int sniffer;
+
+static int bc;
+
+/* 48 bit physical card address if overriding card's real physical
+ * address is required. Since IEEE 802.11 addresses are 48 bits
+ * like ethernet, an int can't be used, so a string is used. To
+ * allow use of addresses starting with a decimal digit, the first
+ * character must be a letter and will be ignored. This letter is
+ * followed by up to 12 hex digits which are the address. If less
+ * than 12 digits are used, the address will be left filled with 0's.
+ * Note that bit 0 of the first byte is the broadcast bit, and evil
+ * things will happen if it is not 0 in a card address.
+ */
+static char *phy_addr = NULL;
+
+
+/* The dev_info variable is the "key" that is used to match up this
+ device driver with appropriate cards, through the card configuration
+ database.
+*/
+static dev_info_t dev_info = "ray_cs";
+
+/* A linked list of "instances" of the ray device. Each actual
+ PCMCIA card corresponds to one device instance, and is described
+ by one dev_link_t structure (defined in ds.h).
+*/
+static dev_link_t *dev_list = NULL;
+
+/* A dev_link_t structure has fields for most things that are needed
+ to keep track of a socket, but there will usually be some device
+ specific information that also needs to be kept track of. The
+ 'priv' pointer in a dev_link_t structure can be used to point to
+ a device-specific private data structure, like this.
+*/
+static unsigned int ray_mem_speed = 500;
+
+MODULE_AUTHOR("Corey Thomas <corey@world.std.com>");
+MODULE_DESCRIPTION("Raylink/WebGear wireless LAN driver");
+MODULE_LICENSE("GPL");
+
+module_param(net_type, int, 0);
+module_param(hop_dwell, int, 0);
+module_param(beacon_period, int, 0);
+module_param(psm, int, 0);
+module_param(essid, charp, 0);
+module_param(translate, int, 0);
+module_param(country, int, 0);
+module_param(sniffer, int, 0);
+module_param(bc, int, 0);
+module_param(phy_addr, charp, 0);
+module_param(ray_mem_speed, int, 0);
+
+static UCHAR b5_default_startup_parms[] = {
+ 0, 0, /* Adhoc station */
+ 'L','I','N','U','X', 0, 0, 0, /* 32 char ESSID */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, /* Active scan, CA Mode */
+ 0, 0, 0, 0, 0, 0, /* No default MAC addr */
+ 0x7f, 0xff, /* Frag threshold */
+ 0x00, 0x80, /* Hop time 128 Kus*/
+ 0x01, 0x00, /* Beacon period 256 Kus */
+ 0x01, 0x07, 0xa3, /* DTIM, retries, ack timeout*/
+ 0x1d, 0x82, 0x4e, /* SIFS, DIFS, PIFS */
+ 0x7f, 0xff, /* RTS threshold */
+ 0x04, 0xe2, 0x38, 0xA4, /* scan_dwell, max_scan_dwell */
+ 0x05, /* assoc resp timeout thresh */
+ 0x08, 0x02, 0x08, /* adhoc, infra, super cycle max*/
+ 0, /* Promiscuous mode */
+ 0x0c, 0x0bd, /* Unique word */
+ 0x32, /* Slot time */
+ 0xff, 0xff, /* roam-low snr, low snr count */
+ 0x05, 0xff, /* Infra, adhoc missed bcn thresh */
+ 0x01, 0x0b, 0x4f, /* USA, hop pattern, hop pat length */
+/* b4 - b5 differences start here */
+ 0x00, 0x3f, /* CW max */
+ 0x00, 0x0f, /* CW min */
+ 0x04, 0x08, /* Noise gain, limit offset */
+ 0x28, 0x28, /* det rssi, med busy offsets */
+ 7, /* det sync thresh */
+ 0, 2, 2, /* test mode, min, max */
+ 0, /* allow broadcast SSID probe resp */
+ 0, 0, /* privacy must start, can join */
+ 2, 0, 0, 0, 0, 0, 0, 0 /* basic rate set */
+};
+
+static UCHAR b4_default_startup_parms[] = {
+ 0, 0, /* Adhoc station */
+ 'L','I','N','U','X', 0, 0, 0, /* 32 char ESSID */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, /* Active scan, CA Mode */
+ 0, 0, 0, 0, 0, 0, /* No default MAC addr */
+ 0x7f, 0xff, /* Frag threshold */
+ 0x02, 0x00, /* Hop time */
+ 0x00, 0x01, /* Beacon period */
+ 0x01, 0x07, 0xa3, /* DTIM, retries, ack timeout*/
+ 0x1d, 0x82, 0xce, /* SIFS, DIFS, PIFS */
+ 0x7f, 0xff, /* RTS threshold */
+ 0xfb, 0x1e, 0xc7, 0x5c, /* scan_dwell, max_scan_dwell */
+ 0x05, /* assoc resp timeout thresh */
+ 0x04, 0x02, 0x4, /* adhoc, infra, super cycle max*/
+ 0, /* Promiscuous mode */
+ 0x0c, 0x0bd, /* Unique word */
+ 0x4e, /* Slot time (TBD seems wrong)*/
+ 0xff, 0xff, /* roam-low snr, low snr count */
+ 0x05, 0xff, /* Infra, adhoc missed bcn thresh */
+ 0x01, 0x0b, 0x4e, /* USA, hop pattern, hop pat length */
+/* b4 - b5 differences start here */
+ 0x3f, 0x0f, /* CW max, min */
+ 0x04, 0x08, /* Noise gain, limit offset */
+ 0x28, 0x28, /* det rssi, med busy offsets */
+ 7, /* det sync thresh */
+ 0, 2, 2 /* test mode, min, max*/
+};
+/*===========================================================================*/
+static unsigned char eth2_llc[] = {0xaa, 0xaa, 3, 0, 0, 0};
+
+static char hop_pattern_length[] = { 1,
+ USA_HOP_MOD, EUROPE_HOP_MOD,
+ JAPAN_HOP_MOD, KOREA_HOP_MOD,
+ SPAIN_HOP_MOD, FRANCE_HOP_MOD,
+ ISRAEL_HOP_MOD, AUSTRALIA_HOP_MOD,
+ JAPAN_TEST_HOP_MOD
+};
+
+static char rcsid[] = "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.std.com>";
+
+/*=============================================================================
+ ray_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+ The dev_link structure is initialized, but we don't actually
+ configure the card at this point -- we wait until we receive a
+ card insertion event.
+=============================================================================*/
+static dev_link_t *ray_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ ray_dev_t *local;
+ int ret;
+ struct net_device *dev;
+
+ DEBUG(1, "ray_attach()\n");
+
+ /* Initialize the dev_link_t structure */
+ link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
+
+ if (!link)
+ return NULL;
+
+ /* Allocate space for private device-specific data */
+ dev = alloc_etherdev(sizeof(ray_dev_t));
+
+ if (!dev)
+ goto fail_alloc_dev;
+
+ local = dev->priv;
+
+ memset(link, 0, sizeof(struct dev_link_t));
+
+ /* The io structure describes IO port mapping. None used here */
+ link->io.NumPorts1 = 0;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.IOAddrLines = 5;
+
+ /* Interrupt setup. For PCMCIA, driver takes what's given */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = &ray_interrupt;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ link->priv = dev;
+ link->irq.Instance = dev;
+
+ local->finder = link;
+ local->card_status = CARD_INSERTED;
+ local->authentication_state = UNAUTHENTICATED;
+ local->num_multi = 0;
+ DEBUG(2,"ray_attach link = %p, dev = %p, local = %p, intr = %p\n",
+ link,dev,local,&ray_interrupt);
+
+ /* Raylink entries in the device structure */
+ dev->hard_start_xmit = &ray_dev_start_xmit;
+ dev->set_config = &ray_dev_config;
+ dev->get_stats = &ray_get_stats;
+ dev->do_ioctl = &ray_dev_ioctl;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */
+ dev->get_wireless_stats = ray_get_wireless_stats;
+#endif
+
+ dev->set_multicast_list = &set_multicast_list;
+
+ DEBUG(2,"ray_cs ray_attach calling ether_setup.)\n");
+ SET_MODULE_OWNER(dev);
+ dev->init = &ray_dev_init;
+ dev->open = &ray_open;
+ dev->stop = &ray_dev_close;
+ netif_stop_queue(dev);
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &ray_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+
+ DEBUG(2,"ray_cs ray_attach calling pcmcia_register_client(...)\n");
+
+ init_timer(&local->timer);
+
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ printk("ray_cs ray_attach RegisterClient unhappy - detaching\n");
+ cs_error(link->handle, RegisterClient, ret);
+ ray_detach(link);
+ return NULL;
+ }
+ DEBUG(2,"ray_cs ray_attach ending\n");
+ return link;
+
+fail_alloc_dev:
+ kfree(link);
+ return NULL;
+} /* ray_attach */
+/*=============================================================================
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+=============================================================================*/
+static void ray_detach(dev_link_t *link)
+{
+ dev_link_t **linkp;
+
+ DEBUG(1, "ray_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ /* If the device is currently configured and active, we won't
+ actually delete it yet. Instead, it is marked so that when
+ the release() function is called, that will trigger a proper
+ detach().
+ */
+ if (link->state & DEV_CONFIG)
+ ray_release(link);
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ if (link->priv) {
+ struct net_device *dev = link->priv;
+ if (link->dev) unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ kfree(link);
+ DEBUG(2,"ray_cs ray_detach ending\n");
+} /* ray_detach */
+/*=============================================================================
+ ray_config() is run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+=============================================================================*/
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+#define MAX_TUPLE_SIZE 128
+static void ray_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ int last_fn = 0, last_ret = 0;
+ int i;
+ u_char buf[MAX_TUPLE_SIZE];
+ win_req_t req;
+ memreq_t mem;
+ struct net_device *dev = (struct net_device *)link->priv;
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+
+ DEBUG(1, "ray_config(0x%p)\n", link);
+
+ /* This reads the card's CONFIG tuple to find its configuration regs */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = MAX_TUPLE_SIZE;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Determine card type and firmware version */
+ buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0;
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = MAX_TUPLE_SIZE;
+ tuple.TupleOffset = 2;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+
+ for (i=0; i<tuple.TupleDataLen - 4; i++)
+ if (buf[i] == 0) buf[i] = ' ';
+ printk(KERN_INFO "ray_cs Detected: %s\n",buf);
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Now allocate an interrupt line. Note that this does not
+ actually assign a handler to the interrupt.
+ */
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ dev->irq = link->irq.AssignedIRQ;
+
+ /* This actually configures the PCMCIA socket -- setting up
+ the I/O windows and the interrupt mapping.
+ */
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+
+/*** Set up 32k window for shared memory (transmit and control) ************/
+ req.Attributes = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
+ req.Base = 0;
+ req.Size = 0x8000;
+ req.AccessSpeed = ray_mem_speed;
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+ mem.CardOffset = 0x0000; mem.Page = 0;
+ CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
+ local->sram = ioremap(req.Base,req.Size);
+
+/*** Set up 16k window for shared memory (receive buffer) ***************/
+ req.Attributes = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
+ req.Base = 0;
+ req.Size = 0x4000;
+ req.AccessSpeed = ray_mem_speed;
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &local->rmem_handle));
+ mem.CardOffset = 0x8000; mem.Page = 0;
+ CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->rmem_handle, &mem));
+ local->rmem = ioremap(req.Base,req.Size);
+
+/*** Set up window for attribute memory ***********************************/
+ req.Attributes = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | WIN_ENABLE | WIN_USE_WAIT;
+ req.Base = 0;
+ req.Size = 0x1000;
+ req.AccessSpeed = ray_mem_speed;
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &local->amem_handle));
+ mem.CardOffset = 0x0000; mem.Page = 0;
+ CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->amem_handle, &mem));
+ local->amem = ioremap(req.Base,req.Size);
+
+ DEBUG(3,"ray_config sram=%p\n",local->sram);
+ DEBUG(3,"ray_config rmem=%p\n",local->rmem);
+ DEBUG(3,"ray_config amem=%p\n",local->amem);
+ if (ray_init(dev) < 0) {
+ ray_release(link);
+ return;
+ }
+
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ i = register_netdev(dev);
+ if (i != 0) {
+ printk("ray_config register_netdev() failed\n");
+ ray_release(link);
+ return;
+ }
+
+ strcpy(local->node.dev_name, dev->name);
+ link->dev = &local->node;
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ printk(KERN_INFO "%s: RayLink, irq %d, hw_addr ",
+ dev->name, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+
+ ray_release(link);
+} /* ray_config */
+
+static inline struct ccs __iomem *ccs_base(ray_dev_t *dev)
+{
+ return dev->sram + CCS_BASE;
+}
+
+static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
+{
+ /*
+ * This looks nonsensical, since there is a separate
+ * RCS_BASE. But the difference between a "struct rcs"
+ * and a "struct ccs" ends up being in the _index_ off
+ * the base, so the base pointer is the same for both
+ * ccs/rcs.
+ */
+ return dev->sram + CCS_BASE;
+}
+
+/*===========================================================================*/
+static int ray_init(struct net_device *dev)
+{
+ int i;
+ UCHAR *p;
+ struct ccs __iomem *pccs;
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ dev_link_t *link = local->finder;
+ DEBUG(1, "ray_init(0x%p)\n", dev);
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(0,"ray_init - device not present\n");
+ return -1;
+ }
+
+ local->net_type = net_type;
+ local->sta_type = TYPE_STA;
+
+ /* Copy the startup results to local memory */
+ memcpy_fromio(&local->startup_res, local->sram + ECF_TO_HOST_BASE,\
+ sizeof(struct startup_res_6));
+
+ /* Check Power up test status and get mac address from card */
+ if (local->startup_res.startup_word != 0x80) {
+ printk(KERN_INFO "ray_init ERROR card status = %2x\n",
+ local->startup_res.startup_word);
+ local->card_status = CARD_INIT_ERROR;
+ return -1;
+ }
+
+ local->fw_ver = local->startup_res.firmware_version[0];
+ local->fw_bld = local->startup_res.firmware_version[1];
+ local->fw_var = local->startup_res.firmware_version[2];
+ DEBUG(1,"ray_init firmware version %d.%d \n",local->fw_ver, local->fw_bld);
+
+ local->tib_length = 0x20;
+ if ((local->fw_ver == 5) && (local->fw_bld >= 30))
+ local->tib_length = local->startup_res.tib_length;
+ DEBUG(2,"ray_init tib_length = 0x%02x\n", local->tib_length);
+ /* Initialize CCS's to buffer free state */
+ pccs = ccs_base(local);
+ for (i=0; i<NUMBER_OF_CCS; i++) {
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+ }
+ init_startup_params(local);
+
+ /* copy mac address to startup parameters */
+ if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr))
+ {
+ p = local->sparm.b4.a_mac_addr;
+ }
+ else
+ {
+ memcpy(&local->sparm.b4.a_mac_addr,
+ &local->startup_res.station_addr, ADDRLEN);
+ p = local->sparm.b4.a_mac_addr;
+ }
+
+ clear_interrupt(local); /* Clear any interrupt from the card */
+ local->card_status = CARD_AWAITING_PARAM;
+ DEBUG(2,"ray_init ending\n");
+ return 0;
+} /* ray_init */
+/*===========================================================================*/
+/* Download startup parameters to the card and command it to read them */
+static int dl_startup_params(struct net_device *dev)
+{
+ int ccsindex;
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ struct ccs __iomem *pccs;
+ dev_link_t *link = local->finder;
+
+ DEBUG(1,"dl_startup_params entered\n");
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs dl_startup_params - device not present\n");
+ return -1;
+ }
+
+ /* Copy parameters to host to ECF area */
+ if (local->fw_ver == 0x55)
+ memcpy_toio(local->sram + HOST_TO_ECF_BASE, &local->sparm.b4,
+ sizeof(struct b4_startup_params));
+ else
+ memcpy_toio(local->sram + HOST_TO_ECF_BASE, &local->sparm.b5,
+ sizeof(struct b5_startup_params));
+
+
+ /* Fill in the CCS fields for the ECF */
+ if ((ccsindex = get_free_ccs(local)) < 0) return -1;
+ local->dl_param_ccs = ccsindex;
+ pccs = ccs_base(local) + ccsindex;
+ writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd);
+ DEBUG(2,"dl_startup_params start ccsindex = %d\n", local->dl_param_ccs);
+ /* Interrupt the firmware to process the command */
+ if (interrupt_ecf(local, ccsindex)) {
+ printk(KERN_INFO "ray dl_startup_params failed - "
+ "ECF not ready for intr\n");
+ local->card_status = CARD_DL_PARAM_ERROR;
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+ return -2;
+ }
+ local->card_status = CARD_DL_PARAM;
+ /* Start kernel timer to wait for dl startup to complete. */
+ local->timer.expires = jiffies + HZ/2;
+ local->timer.data = (long)local;
+ local->timer.function = &verify_dl_startup;
+ add_timer(&local->timer);
+ DEBUG(2,"ray_cs dl_startup_params started timer for verify_dl_startup\n");
+ return 0;
+} /* dl_startup_params */
+/*===========================================================================*/
+static void init_startup_params(ray_dev_t *local)
+{
+ int i;
+
+ if (country > JAPAN_TEST) country = USA;
+ else
+ if (country < USA) country = USA;
+ /* structure for hop time and beacon period is defined here using
+ * New 802.11D6.1 format. Card firmware is still using old format
+ * until version 6.
+ * Before After
+ * a_hop_time ms byte a_hop_time ms byte
+ * a_hop_time 2s byte a_hop_time ls byte
+ * a_hop_time ls byte a_beacon_period ms byte
+ * a_beacon_period a_beacon_period ls byte
+ *
+ * a_hop_time = uS a_hop_time = KuS
+ * a_beacon_period = hops a_beacon_period = KuS
+ */ /* 64ms = 010000 */
+ if (local->fw_ver == 0x55) {
+ memcpy((UCHAR *)&local->sparm.b4, b4_default_startup_parms,
+ sizeof(struct b4_startup_params));
+ /* Translate sane kus input values to old build 4/5 format */
+ /* i = hop time in uS truncated to 3 bytes */
+ i = (hop_dwell * 1024) & 0xffffff;
+ local->sparm.b4.a_hop_time[0] = (i >> 16) & 0xff;
+ local->sparm.b4.a_hop_time[1] = (i >> 8) & 0xff;
+ local->sparm.b4.a_beacon_period[0] = 0;
+ local->sparm.b4.a_beacon_period[1] =
+ ((beacon_period/hop_dwell) - 1) & 0xff;
+ local->sparm.b4.a_curr_country_code = country;
+ local->sparm.b4.a_hop_pattern_length =
+ hop_pattern_length[(int)country] - 1;
+ if (bc)
+ {
+ local->sparm.b4.a_ack_timeout = 0x50;
+ local->sparm.b4.a_sifs = 0x3f;
+ }
+ }
+ else { /* Version 5 uses real kus values */
+ memcpy((UCHAR *)&local->sparm.b5, b5_default_startup_parms,
+ sizeof(struct b5_startup_params));
+
+ local->sparm.b5.a_hop_time[0] = (hop_dwell >> 8) & 0xff;
+ local->sparm.b5.a_hop_time[1] = hop_dwell & 0xff;
+ local->sparm.b5.a_beacon_period[0] = (beacon_period >> 8) & 0xff;
+ local->sparm.b5.a_beacon_period[1] = beacon_period & 0xff;
+ if (psm)
+ local->sparm.b5.a_power_mgt_state = 1;
+ local->sparm.b5.a_curr_country_code = country;
+ local->sparm.b5.a_hop_pattern_length =
+ hop_pattern_length[(int)country];
+ }
+
+ local->sparm.b4.a_network_type = net_type & 0x01;
+ local->sparm.b4.a_acting_as_ap_status = TYPE_STA;
+
+ if (essid != NULL)
+ strncpy(local->sparm.b4.a_current_ess_id, essid, ESSID_SIZE);
+} /* init_startup_params */
+/*===========================================================================*/
+static void verify_dl_startup(u_long data)
+{
+ ray_dev_t *local = (ray_dev_t *)data;
+ struct ccs __iomem *pccs = ccs_base(local) + local->dl_param_ccs;
+ UCHAR status;
+ dev_link_t *link = local->finder;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs verify_dl_startup - device not present\n");
+ return;
+ }
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 2) {
+ int i;
+ printk(KERN_DEBUG "verify_dl_startup parameters sent via ccs %d:\n",
+ local->dl_param_ccs);
+ for (i=0; i<sizeof(struct b5_startup_params); i++) {
+ printk(" %2x", (unsigned int) readb(local->sram + HOST_TO_ECF_BASE + i));
+ }
+ printk("\n");
+ }
+#endif
+
+ status = readb(&pccs->buffer_status);
+ if (status!= CCS_BUFFER_FREE)
+ {
+ printk(KERN_INFO "Download startup params failed. Status = %d\n",
+ status);
+ local->card_status = CARD_DL_PARAM_ERROR;
+ return;
+ }
+ if (local->sparm.b4.a_network_type == ADHOC)
+ start_net((u_long)local);
+ else
+ join_net((u_long)local);
+
+ return;
+} /* end verify_dl_startup */
+/*===========================================================================*/
+/* Command card to start a network */
+static void start_net(u_long data)
+{
+ ray_dev_t *local = (ray_dev_t *)data;
+ struct ccs __iomem *pccs;
+ int ccsindex;
+ dev_link_t *link = local->finder;
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs start_net - device not present\n");
+ return;
+ }
+ /* Fill in the CCS fields for the ECF */
+ if ((ccsindex = get_free_ccs(local)) < 0) return;
+ pccs = ccs_base(local) + ccsindex;
+ writeb(CCS_START_NETWORK, &pccs->cmd);
+ writeb(0, &pccs->var.start_network.update_param);
+ /* Interrupt the firmware to process the command */
+ if (interrupt_ecf(local, ccsindex)) {
+ DEBUG(1,"ray start net failed - card not ready for intr\n");
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+ return;
+ }
+ local->card_status = CARD_DOING_ACQ;
+ return;
+} /* end start_net */
+/*===========================================================================*/
+/* Command card to join a network */
+static void join_net(u_long data)
+{
+ ray_dev_t *local = (ray_dev_t *)data;
+
+ struct ccs __iomem *pccs;
+ int ccsindex;
+ dev_link_t *link = local->finder;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs join_net - device not present\n");
+ return;
+ }
+ /* Fill in the CCS fields for the ECF */
+ if ((ccsindex = get_free_ccs(local)) < 0) return;
+ pccs = ccs_base(local) + ccsindex;
+ writeb(CCS_JOIN_NETWORK, &pccs->cmd);
+ writeb(0, &pccs->var.join_network.update_param);
+ writeb(0, &pccs->var.join_network.net_initiated);
+ /* Interrupt the firmware to process the command */
+ if (interrupt_ecf(local, ccsindex)) {
+ DEBUG(1,"ray join net failed - card not ready for intr\n");
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+ return;
+ }
+ local->card_status = CARD_DOING_ACQ;
+ return;
+}
+/*============================================================================
+ After a card is removed, ray_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+=============================================================================*/
+static void ray_release(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ ray_dev_t *local = dev->priv;
+ int i;
+
+ DEBUG(1, "ray_release(0x%p)\n", link);
+
+ del_timer(&local->timer);
+ link->state &= ~DEV_CONFIG;
+
+ iounmap(local->sram);
+ iounmap(local->rmem);
+ iounmap(local->amem);
+ /* Do bother checking to see if these succeed or not */
+ i = pcmcia_release_window(link->win);
+ if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(link->win) ret = %x\n",i);
+ i = pcmcia_release_window(local->amem_handle);
+ if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->amem) ret = %x\n",i);
+ i = pcmcia_release_window(local->rmem_handle);
+ if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->rmem) ret = %x\n",i);
+ i = pcmcia_release_configuration(link->handle);
+ if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseConfiguration ret = %x\n",i);
+ i = pcmcia_release_irq(link->handle, &link->irq);
+ if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseIRQ ret = %x\n",i);
+
+ DEBUG(2,"ray_release ending\n");
+}
+
+/*=============================================================================
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+ When a CARD_REMOVAL event is received, we immediately set a flag
+ to block future accesses to this device. All the functions that
+ actually access the device should check this flag to make sure
+ the card is still present.
+=============================================================================*/
+static int ray_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ DEBUG(1, "ray_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ netif_device_detach(dev);
+ if (link->state & DEV_CONFIG) {
+ ray_release(link);
+ del_timer(&local->timer);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ ray_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ ray_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+ DEBUG(2,"ray_event ending\n");
+} /* ray_event */
+/*===========================================================================*/
+int ray_dev_init(struct net_device *dev)
+{
+#ifdef RAY_IMMEDIATE_INIT
+ int i;
+#endif /* RAY_IMMEDIATE_INIT */
+ ray_dev_t *local = dev->priv;
+ dev_link_t *link = local->finder;
+
+ DEBUG(1,"ray_dev_init(dev=%p)\n",dev);
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_dev_init - device not present\n");
+ return -1;
+ }
+#ifdef RAY_IMMEDIATE_INIT
+ /* Download startup parameters */
+ if ( (i = dl_startup_params(dev)) < 0)
+ {
+ printk(KERN_INFO "ray_dev_init dl_startup_params failed - "
+ "returns 0x%x\n",i);
+ return -1;
+ }
+#else /* RAY_IMMEDIATE_INIT */
+ /* Postpone the card init so that we can still configure the card,
+ * for example using the Wireless Extensions. The init will happen
+ * in ray_open() - Jean II */
+ DEBUG(1,"ray_dev_init: postponing card init to ray_open() ; Status = %d\n",
+ local->card_status);
+#endif /* RAY_IMMEDIATE_INIT */
+
+ /* copy mac and broadcast addresses to linux device */
+ memcpy(&dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
+ memset(dev->broadcast, 0xff, ETH_ALEN);
+
+ DEBUG(2,"ray_dev_init ending\n");
+ return 0;
+}
+/*===========================================================================*/
+static int ray_dev_config(struct net_device *dev, struct ifmap *map)
+{
+ ray_dev_t *local = dev->priv;
+ dev_link_t *link = local->finder;
+ /* Dummy routine to satisfy device structure */
+ DEBUG(1,"ray_dev_config(dev=%p,ifmap=%p)\n",dev,map);
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_dev_config - device not present\n");
+ return -1;
+ }
+
+ return 0;
+}
+/*===========================================================================*/
+static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ ray_dev_t *local = dev->priv;
+ dev_link_t *link = local->finder;
+ short length = skb->len;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_dev_start_xmit - device not present\n");
+ return -1;
+ }
+ DEBUG(3,"ray_dev_start_xmit(skb=%p, dev=%p)\n",skb,dev);
+ if (local->authentication_state == NEED_TO_AUTH) {
+ DEBUG(0,"ray_cs Sending authentication request.\n");
+ if (!build_auth_frame (local, local->auth_id, OPEN_AUTH_REQUEST)) {
+ local->authentication_state = AUTHENTICATED;
+ netif_stop_queue(dev);
+ return 1;
+ }
+ }
+
+ if (length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+ switch (ray_hw_xmit( skb->data, length, dev, DATA_TYPE)) {
+ case XMIT_NO_CCS:
+ case XMIT_NEED_AUTH:
+ netif_stop_queue(dev);
+ return 1;
+ case XMIT_NO_INTR:
+ case XMIT_MSG_BAD:
+ case XMIT_OK:
+ default:
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ return 0;
+} /* ray_dev_start_xmit */
+/*===========================================================================*/
+static int ray_hw_xmit(unsigned char* data, int len, struct net_device* dev,
+ UCHAR msg_type)
+{
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ struct ccs __iomem *pccs;
+ int ccsindex;
+ int offset;
+ struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */
+ short int addr; /* Address of xmit buffer in card space */
+
+ DEBUG(3,"ray_hw_xmit(data=%p, len=%d, dev=%p)\n",data,len,dev);
+ if (len + TX_HEADER_LENGTH > TX_BUF_SIZE)
+ {
+ printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n",len);
+ return XMIT_MSG_BAD;
+ }
+ switch (ccsindex = get_free_tx_ccs(local)) {
+ case ECCSBUSY:
+ DEBUG(2,"ray_hw_xmit tx_ccs table busy\n");
+ case ECCSFULL:
+ DEBUG(2,"ray_hw_xmit No free tx ccs\n");
+ case ECARDGONE:
+ netif_stop_queue(dev);
+ return XMIT_NO_CCS;
+ default:
+ break;
+ }
+ addr = TX_BUF_BASE + (ccsindex << 11);
+
+ if (msg_type == DATA_TYPE) {
+ local->stats.tx_bytes += len;
+ local->stats.tx_packets++;
+ }
+
+ ptx = local->sram + addr;
+
+ ray_build_header(local, ptx, msg_type, data);
+ if (translate) {
+ offset = translate_frame(local, ptx, data, len);
+ }
+ else { /* Encapsulate frame */
+ /* TBD TIB length will move address of ptx->var */
+ memcpy_toio(&ptx->var, data, len);
+ offset = 0;
+ }
+
+ /* fill in the CCS */
+ pccs = ccs_base(local) + ccsindex;
+ len += TX_HEADER_LENGTH + offset;
+ writeb(CCS_TX_REQUEST, &pccs->cmd);
+ writeb(addr >> 8, &pccs->var.tx_request.tx_data_ptr[0]);
+ writeb(local->tib_length, &pccs->var.tx_request.tx_data_ptr[1]);
+ writeb(len >> 8, &pccs->var.tx_request.tx_data_length[0]);
+ writeb(len & 0xff, &pccs->var.tx_request.tx_data_length[1]);
+/* TBD still need psm_cam? */
+ writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode);
+ writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate);
+ writeb(0, &pccs->var.tx_request.antenna);
+ DEBUG(3,"ray_hw_xmit default_tx_rate = 0x%x\n",\
+ local->net_default_tx_rate);
+
+ /* Interrupt the firmware to process the command */
+ if (interrupt_ecf(local, ccsindex)) {
+ DEBUG(2,"ray_hw_xmit failed - ECF not ready for intr\n");
+/* TBD very inefficient to copy packet to buffer, and then not
+ send it, but the alternative is to queue the messages and that
+ won't be done for a while. Maybe set tbusy until a CCS is free?
+*/
+ writeb(CCS_BUFFER_FREE, &pccs->buffer_status);
+ return XMIT_NO_INTR;
+ }
+ return XMIT_OK;
+} /* end ray_hw_xmit */
+/*===========================================================================*/
+static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx, unsigned char *data,
+ int len)
+{
+ unsigned short int proto = ((struct ethhdr *)data)->h_proto;
+ if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */
+ DEBUG(3,"ray_cs translate_frame DIX II\n");
+ /* Copy LLC header to card buffer */
+ memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
+ memcpy_toio( ((void __iomem *)&ptx->var) + sizeof(eth2_llc), (UCHAR *)&proto, 2);
+ if ((proto == 0xf380) || (proto == 0x3781)) {
+ /* This is the selective translation table, only 2 entries */
+ writeb(0xf8, &((struct snaphdr_t __iomem *)ptx->var)->org[3]);
+ }
+ /* Copy body of ethernet packet without ethernet header */
+ memcpy_toio((void __iomem *)&ptx->var + sizeof(struct snaphdr_t), \
+ data + ETH_HLEN, len - ETH_HLEN);
+ return (int) sizeof(struct snaphdr_t) - ETH_HLEN;
+ }
+ else { /* already 802 type, and proto is length */
+ DEBUG(3,"ray_cs translate_frame 802\n");
+ if (proto == 0xffff) { /* evil netware IPX 802.3 without LLC */
+ DEBUG(3,"ray_cs translate_frame evil IPX\n");
+ memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
+ return 0 - ETH_HLEN;
+ }
+ memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
+ return 0 - ETH_HLEN;
+ }
+ /* TBD do other frame types */
+} /* end translate_frame */
+/*===========================================================================*/
+static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx, UCHAR msg_type,
+ unsigned char *data)
+{
+ writeb(PROTOCOL_VER | msg_type, &ptx->mac.frame_ctl_1);
+/*** IEEE 802.11 Address field assignments *************
+ TODS FROMDS addr_1 addr_2 addr_3 addr_4
+Adhoc 0 0 dest src (terminal) BSSID N/A
+AP to Terminal 0 1 dest AP(BSSID) source N/A
+Terminal to AP 1 0 AP(BSSID) src (terminal) dest N/A
+AP to AP 1 1 dest AP src AP dest source
+*******************************************************/
+ if (local->net_type == ADHOC) {
+ writeb(0, &ptx->mac.frame_ctl_2);
+ memcpy_toio(ptx->mac.addr_1, ((struct ethhdr *)data)->h_dest, 2 * ADDRLEN);
+ memcpy_toio(ptx->mac.addr_3, local->bss_id, ADDRLEN);
+ }
+ else /* infrastructure */
+ {
+ if (local->sparm.b4.a_acting_as_ap_status)
+ {
+ writeb(FC2_FROM_DS, &ptx->mac.frame_ctl_2);
+ memcpy_toio(ptx->mac.addr_1, ((struct ethhdr *)data)->h_dest, ADDRLEN);
+ memcpy_toio(ptx->mac.addr_2, local->bss_id, 6);
+ memcpy_toio(ptx->mac.addr_3, ((struct ethhdr *)data)->h_source, ADDRLEN);
+ }
+ else /* Terminal */
+ {
+ writeb(FC2_TO_DS, &ptx->mac.frame_ctl_2);
+ memcpy_toio(ptx->mac.addr_1, local->bss_id, ADDRLEN);
+ memcpy_toio(ptx->mac.addr_2, ((struct ethhdr *)data)->h_source, ADDRLEN);
+ memcpy_toio(ptx->mac.addr_3, ((struct ethhdr *)data)->h_dest, ADDRLEN);
+ }
+ }
+} /* end encapsulate_frame */
+
+
+/*===========================================================================*/
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "ray_cs");
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/*====================================================================*/
+
+static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ dev_link_t *link = local->finder;
+ int err = 0;
+#if WIRELESS_EXT > 7
+ struct iwreq *wrq = (struct iwreq *) ifr;
+#endif /* WIRELESS_EXT > 7 */
+#ifdef WIRELESS_SPY
+ struct sockaddr address[IW_MAX_SPY];
+#endif /* WIRELESS_SPY */
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_dev_ioctl - device not present\n");
+ return -1;
+ }
+ DEBUG(2,"ray_cs IOCTL dev=%p, ifr=%p, cmd = 0x%x\n",dev,ifr,cmd);
+ /* Validate the command */
+ switch (cmd)
+ {
+#if WIRELESS_EXT > 7
+ /* --------------- WIRELESS EXTENSIONS --------------- */
+ /* Get name */
+ case SIOCGIWNAME:
+ strcpy(wrq->u.name, "IEEE 802.11-FH");
+ break;
+
+ /* Get frequency/channel */
+ case SIOCGIWFREQ:
+ wrq->u.freq.m = local->sparm.b5.a_hop_pattern;
+ wrq->u.freq.e = 0;
+ break;
+
+ /* Set frequency/channel */
+ case SIOCSIWFREQ:
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ /* Setting by channel number */
+ if ((wrq->u.freq.m > USA_HOP_MOD) || (wrq->u.freq.e > 0))
+ err = -EOPNOTSUPP;
+ else
+ local->sparm.b5.a_hop_pattern = wrq->u.freq.m;
+ break;
+
+ /* Get current network name (ESSID) */
+ case SIOCGIWESSID:
+ if (wrq->u.data.pointer)
+ {
+ char essid[IW_ESSID_MAX_SIZE + 1];
+ /* Get the essid that was set */
+ memcpy(essid, local->sparm.b5.a_current_ess_id,
+ IW_ESSID_MAX_SIZE);
+ essid[IW_ESSID_MAX_SIZE] = '\0';
+
+ /* Push it out ! */
+ wrq->u.data.length = strlen(essid) + 1;
+ wrq->u.data.flags = 1; /* active */
+ if (copy_to_user(wrq->u.data.pointer, essid, sizeof(essid)))
+ err = -EFAULT;
+ }
+ break;
+
+ /* Set desired network name (ESSID) */
+ case SIOCSIWESSID:
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ if (wrq->u.data.pointer)
+ {
+ char card_essid[IW_ESSID_MAX_SIZE + 1];
+
+ /* Check if we asked for `any' */
+ if(wrq->u.data.flags == 0)
+ {
+ /* Corey : can you do that ? */
+ err = -EOPNOTSUPP;
+ }
+ else
+ {
+ /* Check the size of the string */
+ if(wrq->u.data.length >
+ IW_ESSID_MAX_SIZE + 1)
+ {
+ err = -E2BIG;
+ break;
+ }
+ if (copy_from_user(card_essid,
+ wrq->u.data.pointer,
+ wrq->u.data.length)) {
+ err = -EFAULT;
+ break;
+ }
+ card_essid[IW_ESSID_MAX_SIZE] = '\0';
+
+ /* Set the ESSID in the card */
+ memcpy(local->sparm.b5.a_current_ess_id, card_essid,
+ IW_ESSID_MAX_SIZE);
+ }
+ }
+ break;
+
+ /* Get current Access Point (BSSID in our case) */
+ case SIOCGIWAP:
+ memcpy(wrq->u.ap_addr.sa_data, local->bss_id, ETH_ALEN);
+ wrq->u.ap_addr.sa_family = ARPHRD_ETHER;
+ break;
+
+ /* Get the current bit-rate */
+ case SIOCGIWRATE:
+ if(local->net_default_tx_rate == 3)
+ wrq->u.bitrate.value = 2000000; /* Hum... */
+ else
+ wrq->u.bitrate.value = local->net_default_tx_rate * 500000;
+ wrq->u.bitrate.fixed = 0; /* We are in auto mode */
+ break;
+
+ /* Set the desired bit-rate */
+ case SIOCSIWRATE:
+ /* Check if rate is in range */
+ if((wrq->u.bitrate.value != 1000000) &&
+ (wrq->u.bitrate.value != 2000000))
+ {
+ err = -EINVAL;
+ break;
+ }
+ /* Hack for 1.5 Mb/s instead of 2 Mb/s */
+ if((local->fw_ver == 0x55) && /* Please check */
+ (wrq->u.bitrate.value == 2000000))
+ local->net_default_tx_rate = 3;
+ else
+ local->net_default_tx_rate = wrq->u.bitrate.value/500000;
+ break;
+
+ /* Get the current RTS threshold */
+ case SIOCGIWRTS:
+ wrq->u.rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ + local->sparm.b5.a_rts_threshold[1];
+#if WIRELESS_EXT > 8
+ wrq->u.rts.disabled = (wrq->u.rts.value == 32767);
+#endif /* WIRELESS_EXT > 8 */
+ wrq->u.rts.fixed = 1;
+ break;
+
+ /* Set the desired RTS threshold */
+ case SIOCSIWRTS:
+ {
+ int rthr = wrq->u.rts.value;
+
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ /* if(wrq->u.rts.fixed == 0) we should complain */
+#if WIRELESS_EXT > 8
+ if(wrq->u.rts.disabled)
+ rthr = 32767;
+ else
+#endif /* WIRELESS_EXT > 8 */
+ if((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
+ {
+ err = -EINVAL;
+ break;
+ }
+ local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF;
+ local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF;
+ }
+ break;
+
+ /* Get the current fragmentation threshold */
+ case SIOCGIWFRAG:
+ wrq->u.frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ + local->sparm.b5.a_frag_threshold[1];
+#if WIRELESS_EXT > 8
+ wrq->u.frag.disabled = (wrq->u.frag.value == 32767);
+#endif /* WIRELESS_EXT > 8 */
+ wrq->u.frag.fixed = 1;
+ break;
+
+ /* Set the desired fragmentation threshold */
+ case SIOCSIWFRAG:
+ {
+ int fthr = wrq->u.frag.value;
+
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ /* if(wrq->u.frag.fixed == 0) should complain */
+#if WIRELESS_EXT > 8
+ if(wrq->u.frag.disabled)
+ fthr = 32767;
+ else
+#endif /* WIRELESS_EXT > 8 */
+ if((fthr < 256) || (fthr > 2347)) /* To check out ! */
+ {
+ err = -EINVAL;
+ break;
+ }
+ local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF;
+ local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF;
+ }
+ break;
+
+#endif /* WIRELESS_EXT > 7 */
+#if WIRELESS_EXT > 8
+
+ /* Get the current mode of operation */
+ case SIOCGIWMODE:
+ if(local->sparm.b5.a_network_type)
+ wrq->u.mode = IW_MODE_INFRA;
+ else
+ wrq->u.mode = IW_MODE_ADHOC;
+ break;
+
+ /* Set the current mode of operation */
+ case SIOCSIWMODE:
+ {
+ char card_mode = 1;
+
+ /* Reject if card is already initialised */
+ if(local->card_status != CARD_AWAITING_PARAM)
+ {
+ err = -EBUSY;
+ break;
+ }
+
+ switch (wrq->u.mode)
+ {
+ case IW_MODE_ADHOC:
+ card_mode = 0;
+ // Fall through
+ case IW_MODE_INFRA:
+ local->sparm.b5.a_network_type = card_mode;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ break;
+
+#endif /* WIRELESS_EXT > 8 */
+#if WIRELESS_EXT > 7
+ /* ------------------ IWSPY SUPPORT ------------------ */
+ /* Define the range (variations) of above parameters */
+ case SIOCGIWRANGE:
+ /* Basic checking... */
+ if(wrq->u.data.pointer != (caddr_t) 0)
+ {
+ struct iw_range range;
+ memset((char *) &range, 0, sizeof(struct iw_range));
+
+ /* Set the length (very important for backward compatibility) */
+ wrq->u.data.length = sizeof(struct iw_range);
+
+#if WIRELESS_EXT > 10
+ /* Set the Wireless Extension versions */
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 9;
+#endif /* WIRELESS_EXT > 10 */
+
+ /* Set information in the range struct */
+ range.throughput = 1.1 * 1000 * 1000; /* Put the right number here */
+ range.num_channels = hop_pattern_length[(int)country];
+ range.num_frequency = 0;
+ range.max_qual.qual = 0;
+ range.max_qual.level = 255; /* What's the correct value ? */
+ range.max_qual.noise = 255; /* Idem */
+ range.num_bitrates = 2;
+ range.bitrate[0] = 1000000; /* 1 Mb/s */
+ range.bitrate[1] = 2000000; /* 2 Mb/s */
+
+ /* Copy structure to the user buffer */
+ if(copy_to_user(wrq->u.data.pointer, &range,
+ sizeof(struct iw_range)))
+ err = -EFAULT;
+ }
+ break;
+
+#ifdef WIRELESS_SPY
+ /* Set addresses to spy */
+ case SIOCSIWSPY:
+ /* Check the number of addresses */
+ if(wrq->u.data.length > IW_MAX_SPY)
+ {
+ err = -E2BIG;
+ break;
+ }
+ local->spy_number = wrq->u.data.length;
+
+ /* If there is some addresses to copy */
+ if(local->spy_number > 0)
+ {
+ int i;
+
+ /* Copy addresses to the driver */
+ if(copy_from_user(address, wrq->u.data.pointer,
+ sizeof(struct sockaddr) * local->spy_number))
+ {
+ err = -EFAULT;
+ break;
+ }
+
+ /* Copy addresses to the lp structure */
+ for(i = 0; i < local->spy_number; i++)
+ memcpy(local->spy_address[i], address[i].sa_data, ETH_ALEN);
+
+ /* Reset structure... */
+ memset(local->spy_stat, 0x00, sizeof(iw_qual) * IW_MAX_SPY);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "SetSpy - Set of new addresses is :\n");
+ for(i = 0; i < local->spy_number; i++)
+ printk(KERN_DEBUG "%02X:%02X:%02X:%02X:%02X:%02X\n",
+ local->spy_address[i][0],
+ local->spy_address[i][1],
+ local->spy_address[i][2],
+ local->spy_address[i][3],
+ local->spy_address[i][4],
+ local->spy_address[i][5]);
+#endif /* DEBUG_IOCTL_INFO */
+ }
+ break;
+
+ /* Get the spy list and spy stats */
+ case SIOCGIWSPY:
+ /* Set the number of addresses */
+ wrq->u.data.length = local->spy_number;
+
+ /* If the user want to have the addresses back... */
+ if((local->spy_number > 0) && (wrq->u.data.pointer != (caddr_t) 0))
+ {
+ int i;
+
+ /* Copy addresses from the lp structure */
+ for(i = 0; i < local->spy_number; i++)
+ {
+ memcpy(address[i].sa_data, local->spy_address[i], ETH_ALEN);
+ address[i].sa_family = ARPHRD_ETHER;
+ }
+
+ /* Copy addresses to the user buffer */
+ if(copy_to_user(wrq->u.data.pointer, address,
+ sizeof(struct sockaddr) * local->spy_number))
+ {
+ err = -EFAULT;
+ break;
+ }
+
+ /* Copy stats to the user buffer (just after) */
+ if(copy_to_user(wrq->u.data.pointer +
+ (sizeof(struct sockaddr) * local->spy_number),
+ local->spy_stat, sizeof(iw_qual) * local->spy_number))
+ {
+ err = -EFAULT;
+ break;
+ }
+
+ /* Reset updated flags */
+ for(i = 0; i < local->spy_number; i++)
+ local->spy_stat[i].updated = 0x0;
+ } /* if(pointer != NULL) */
+
+ break;
+#endif /* WIRELESS_SPY */
+
+ /* ------------------ PRIVATE IOCTL ------------------ */
+#ifndef SIOCIWFIRSTPRIV
+#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
+#endif /* SIOCIWFIRSTPRIV */
+#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
+#define SIOCGIPFRAMING SIOCIWFIRSTPRIV + 1 /* Get framing mode */
+#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
+ case SIOCSIPFRAMING:
+ if(!capable(CAP_NET_ADMIN)) /* For private IOCTLs, we need to check permissions */
+ {
+ err = -EPERM;
+ break;
+ }
+ translate = *(wrq->u.name); /* Set framing mode */
+ break;
+ case SIOCGIPFRAMING:
+ *(wrq->u.name) = translate;
+ break;
+ case SIOCGIPCOUNTRY:
+ *(wrq->u.name) = country;
+ break;
+ case SIOCGIWPRIV:
+ /* Export our "private" intercace */
+ if(wrq->u.data.pointer != (caddr_t) 0)
+ {
+ struct iw_priv_args priv[] =
+ { /* cmd, set_args, get_args, name */
+ { SIOCSIPFRAMING, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "set_framing" },
+ { SIOCGIPFRAMING, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_framing" },
+ { SIOCGIPCOUNTRY, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_country" },
+ };
+ /* Set the number of ioctl available */
+ wrq->u.data.length = 3;
+ /* Copy structure to the user buffer */
+ if(copy_to_user(wrq->u.data.pointer, (u_char *) priv,
+ sizeof(priv)))
+ err = -EFAULT;
+ }
+ break;
+#endif /* WIRELESS_EXT > 7 */
+
+
+ default:
+ DEBUG(0,"ray_dev_ioctl cmd = 0x%x\n", cmd);
+ err = -EOPNOTSUPP;
+ }
+ return err;
+} /* end ray_dev_ioctl */
+/*===========================================================================*/
+#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */
+static iw_stats * ray_get_wireless_stats(struct net_device * dev)
+{
+ ray_dev_t * local = (ray_dev_t *) dev->priv;
+ dev_link_t *link = local->finder;
+ struct status __iomem *p = local->sram + STATUS_BASE;
+
+ if(local == (ray_dev_t *) NULL)
+ return (iw_stats *) NULL;
+
+ local->wstats.status = local->card_status;
+#ifdef WIRELESS_SPY
+ if((local->spy_number > 0) && (local->sparm.b5.a_network_type == 0))
+ {
+ /* Get it from the first node in spy list */
+ local->wstats.qual.qual = local->spy_stat[0].qual;
+ local->wstats.qual.level = local->spy_stat[0].level;
+ local->wstats.qual.noise = local->spy_stat[0].noise;
+ local->wstats.qual.updated = local->spy_stat[0].updated;
+ }
+#endif /* WIRELESS_SPY */
+
+ if((link->state & DEV_PRESENT)) {
+ local->wstats.qual.noise = readb(&p->rxnoise);
+ local->wstats.qual.updated |= 4;
+ }
+
+ return &local->wstats;
+} /* end ray_get_wireless_stats */
+#endif /* WIRELESS_EXT > 7 */
+/*===========================================================================*/
+static int ray_open(struct net_device *dev)
+{
+ dev_link_t *link;
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+
+ DEBUG(1, "ray_open('%s')\n", dev->name);
+
+ for (link = dev_list; link; link = link->next)
+ if (link->priv == dev) break;
+ if (!DEV_OK(link)) {
+ return -ENODEV;
+ }
+
+ if (link->open == 0) local->num_multi = 0;
+ link->open++;
+
+ /* If the card is not started, time to start it ! - Jean II */
+ if(local->card_status == CARD_AWAITING_PARAM) {
+ int i;
+
+ DEBUG(1,"ray_open: doing init now !\n");
+
+ /* Download startup parameters */
+ if ( (i = dl_startup_params(dev)) < 0)
+ {
+ printk(KERN_INFO "ray_dev_init dl_startup_params failed - "
+ "returns 0x%x\n",i);
+ return -1;
+ }
+ }
+
+ if (sniffer) netif_stop_queue(dev);
+ else netif_start_queue(dev);
+
+ DEBUG(2,"ray_open ending\n");
+ return 0;
+} /* end ray_open */
+/*===========================================================================*/
+static int ray_dev_close(struct net_device *dev)
+{
+ dev_link_t *link;
+
+ DEBUG(1, "ray_dev_close('%s')\n", dev->name);
+
+ for (link = dev_list; link; link = link->next)
+ if (link->priv == dev) break;
+ if (link == NULL)
+ return -ENODEV;
+
+ link->open--;
+ netif_stop_queue(dev);
+
+ /* In here, we should stop the hardware (stop card from beeing active)
+ * and set local->card_status to CARD_AWAITING_PARAM, so that while the
+ * card is closed we can chage its configuration.
+ * Probably also need a COR reset to get sane state - Jean II */
+
+ return 0;
+} /* end ray_dev_close */
+/*===========================================================================*/
+static void ray_reset(struct net_device *dev) {
+ DEBUG(1,"ray_reset entered\n");
+ return;
+}
+/*===========================================================================*/
+/* Cause a firmware interrupt if it is ready for one */
+/* Return nonzero if not ready */
+static int interrupt_ecf(ray_dev_t *local, int ccs)
+{
+ int i = 50;
+ dev_link_t *link = local->finder;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs interrupt_ecf - device not present\n");
+ return -1;
+ }
+ DEBUG(2,"interrupt_ecf(local=%p, ccs = 0x%x\n",local,ccs);
+
+ while ( i &&
+ (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) & ECF_INTR_SET))
+ i--;
+ if (i == 0) {
+ DEBUG(2,"ray_cs interrupt_ecf card not ready for interrupt\n");
+ return -1;
+ }
+ /* Fill the mailbox, then kick the card */
+ writeb(ccs, local->sram + SCB_BASE);
+ writeb(ECF_INTR_SET, local->amem + CIS_OFFSET + ECF_INTR_OFFSET);
+ return 0;
+} /* interrupt_ecf */
+/*===========================================================================*/
+/* Get next free transmit CCS */
+/* Return - index of current tx ccs */
+static int get_free_tx_ccs(ray_dev_t *local)
+{
+ int i;
+ struct ccs __iomem *pccs = ccs_base(local);
+ dev_link_t *link = local->finder;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs get_free_tx_ccs - device not present\n");
+ return ECARDGONE;
+ }
+
+ if (test_and_set_bit(0,&local->tx_ccs_lock)) {
+ DEBUG(1,"ray_cs tx_ccs_lock busy\n");
+ return ECCSBUSY;
+ }
+
+ for (i=0; i < NUMBER_OF_TX_CCS; i++) {
+ if (readb(&(pccs+i)->buffer_status) == CCS_BUFFER_FREE) {
+ writeb(CCS_BUFFER_BUSY, &(pccs+i)->buffer_status);
+ writeb(CCS_END_LIST, &(pccs+i)->link);
+ local->tx_ccs_lock = 0;
+ return i;
+ }
+ }
+ local->tx_ccs_lock = 0;
+ DEBUG(2,"ray_cs ERROR no free tx CCS for raylink card\n");
+ return ECCSFULL;
+} /* get_free_tx_ccs */
+/*===========================================================================*/
+/* Get next free CCS */
+/* Return - index of current ccs */
+static int get_free_ccs(ray_dev_t *local)
+{
+ int i;
+ struct ccs __iomem *pccs = ccs_base(local);
+ dev_link_t *link = local->finder;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs get_free_ccs - device not present\n");
+ return ECARDGONE;
+ }
+ if (test_and_set_bit(0,&local->ccs_lock)) {
+ DEBUG(1,"ray_cs ccs_lock busy\n");
+ return ECCSBUSY;
+ }
+
+ for (i = NUMBER_OF_TX_CCS; i < NUMBER_OF_CCS; i++) {
+ if (readb(&(pccs+i)->buffer_status) == CCS_BUFFER_FREE) {
+ writeb(CCS_BUFFER_BUSY, &(pccs+i)->buffer_status);
+ writeb(CCS_END_LIST, &(pccs+i)->link);
+ local->ccs_lock = 0;
+ return i;
+ }
+ }
+ local->ccs_lock = 0;
+ DEBUG(1,"ray_cs ERROR no free CCS for raylink card\n");
+ return ECCSFULL;
+} /* get_free_ccs */
+/*===========================================================================*/
+static void authenticate_timeout(u_long data)
+{
+ ray_dev_t *local = (ray_dev_t *)data;
+ del_timer(&local->timer);
+ printk(KERN_INFO "ray_cs Authentication with access point failed"
+ " - timeout\n");
+ join_net((u_long)local);
+}
+/*===========================================================================*/
+static int asc_to_int(char a)
+{
+ if (a < '0') return -1;
+ if (a <= '9') return (a - '0');
+ if (a < 'A') return -1;
+ if (a <= 'F') return (10 + a - 'A');
+ if (a < 'a') return -1;
+ if (a <= 'f') return (10 + a - 'a');
+ return -1;
+}
+/*===========================================================================*/
+static int parse_addr(char *in_str, UCHAR *out)
+{
+ int len;
+ int i,j,k;
+ int status;
+
+ if (in_str == NULL) return 0;
+ if ((len = strlen(in_str)) < 2) return 0;
+ memset(out, 0, ADDRLEN);
+
+ status = 1;
+ j = len - 1;
+ if (j > 12) j = 12;
+ i = 5;
+
+ while (j > 0)
+ {
+ if ((k = asc_to_int(in_str[j--])) != -1) out[i] = k;
+ else return 0;
+
+ if (j == 0) break;
+ if ((k = asc_to_int(in_str[j--])) != -1) out[i] += k << 4;
+ else return 0;
+ if (!i--) break;
+ }
+ return status;
+}
+/*===========================================================================*/
+static struct net_device_stats *ray_get_stats(struct net_device *dev)
+{
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ dev_link_t *link = local->finder;
+ struct status __iomem *p = local->sram + STATUS_BASE;
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs net_device_stats - device not present\n");
+ return &local->stats;
+ }
+ if (readb(&p->mrx_overflow_for_host))
+ {
+ local->stats.rx_over_errors += ntohs(readb(&p->mrx_overflow));
+ writeb(0,&p->mrx_overflow);
+ writeb(0,&p->mrx_overflow_for_host);
+ }
+ if (readb(&p->mrx_checksum_error_for_host))
+ {
+ local->stats.rx_crc_errors += ntohs(readb(&p->mrx_checksum_error));
+ writeb(0,&p->mrx_checksum_error);
+ writeb(0,&p->mrx_checksum_error_for_host);
+ }
+ if (readb(&p->rx_hec_error_for_host))
+ {
+ local->stats.rx_frame_errors += ntohs(readb(&p->rx_hec_error));
+ writeb(0,&p->rx_hec_error);
+ writeb(0,&p->rx_hec_error_for_host);
+ }
+ return &local->stats;
+}
+/*===========================================================================*/
+static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len)
+{
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ dev_link_t *link = local->finder;
+ int ccsindex;
+ int i;
+ struct ccs __iomem *pccs;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_update_parm - device not present\n");
+ return;
+ }
+
+ if ((ccsindex = get_free_ccs(local)) < 0)
+ {
+ DEBUG(0,"ray_update_parm - No free ccs\n");
+ return;
+ }
+ pccs = ccs_base(local) + ccsindex;
+ writeb(CCS_UPDATE_PARAMS, &pccs->cmd);
+ writeb(objid, &pccs->var.update_param.object_id);
+ writeb(1, &pccs->var.update_param.number_objects);
+ writeb(0, &pccs->var.update_param.failure_cause);
+ for (i=0; i<len; i++) {
+ writeb(value[i], local->sram + HOST_TO_ECF_BASE);
+ }
+ /* Interrupt the firmware to process the command */
+ if (interrupt_ecf(local, ccsindex)) {
+ DEBUG(0,"ray_cs associate failed - ECF not ready for intr\n");
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+ }
+}
+/*===========================================================================*/
+static void ray_update_multi_list(struct net_device *dev, int all)
+{
+ struct dev_mc_list *dmi, **dmip;
+ int ccsindex;
+ struct ccs __iomem *pccs;
+ int i = 0;
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ dev_link_t *link = local->finder;
+ void __iomem *p = local->sram + HOST_TO_ECF_BASE;
+
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_update_multi_list - device not present\n");
+ return;
+ }
+ else
+ DEBUG(2,"ray_update_multi_list(%p)\n",dev);
+ if ((ccsindex = get_free_ccs(local)) < 0)
+ {
+ DEBUG(1,"ray_update_multi - No free ccs\n");
+ return;
+ }
+ pccs = ccs_base(local) + ccsindex;
+ writeb(CCS_UPDATE_MULTICAST_LIST, &pccs->cmd);
+
+ if (all) {
+ writeb(0xff, &pccs->var);
+ local->num_multi = 0xff;
+ }
+ else {
+ /* Copy the kernel's list of MC addresses to card */
+ for (dmip=&dev->mc_list; (dmi=*dmip)!=NULL; dmip=&dmi->next) {
+ memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
+ DEBUG(1,"ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",dmi->dmi_addr[0],dmi->dmi_addr[1],dmi->dmi_addr[2],dmi->dmi_addr[3],dmi->dmi_addr[4],dmi->dmi_addr[5]);
+ p += ETH_ALEN;
+ i++;
+ }
+ if (i > 256/ADDRLEN) i = 256/ADDRLEN;
+ writeb((UCHAR)i, &pccs->var);
+ DEBUG(1,"ray_cs update_multi %d addresses in list\n", i);
+ /* Interrupt the firmware to process the command */
+ local->num_multi = i;
+ }
+ if (interrupt_ecf(local, ccsindex)) {
+ DEBUG(1,"ray_cs update_multi failed - ECF not ready for intr\n");
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+ }
+} /* end ray_update_multi_list */
+/*===========================================================================*/
+static void set_multicast_list(struct net_device *dev)
+{
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ UCHAR promisc;
+
+ DEBUG(2,"ray_cs set_multicast_list(%p)\n",dev);
+
+ if (dev->flags & IFF_PROMISC)
+ {
+ if (local->sparm.b5.a_promiscuous_mode == 0) {
+ DEBUG(1,"ray_cs set_multicast_list promisc on\n");
+ local->sparm.b5.a_promiscuous_mode = 1;
+ promisc = 1;
+ ray_update_parm(dev, OBJID_promiscuous_mode, \
+ &promisc, sizeof(promisc));
+ }
+ }
+ else {
+ if (local->sparm.b5.a_promiscuous_mode == 1) {
+ DEBUG(1,"ray_cs set_multicast_list promisc off\n");
+ local->sparm.b5.a_promiscuous_mode = 0;
+ promisc = 0;
+ ray_update_parm(dev, OBJID_promiscuous_mode, \
+ &promisc, sizeof(promisc));
+ }
+ }
+
+ if (dev->flags & IFF_ALLMULTI) ray_update_multi_list(dev, 1);
+ else
+ {
+ if (local->num_multi != dev->mc_count) ray_update_multi_list(dev, 0);
+ }
+} /* end set_multicast_list */
+/*=============================================================================
+ * All routines below here are run at interrupt time.
+=============================================================================*/
+static irqreturn_t ray_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ dev_link_t *link;
+ ray_dev_t *local;
+ struct ccs __iomem *pccs;
+ struct rcs __iomem *prcs;
+ UCHAR rcsindex;
+ UCHAR tmp;
+ UCHAR cmd;
+ UCHAR status;
+
+ if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */
+ return IRQ_NONE;
+
+ DEBUG(4,"ray_cs: interrupt for *dev=%p\n",dev);
+
+ local = (ray_dev_t *)dev->priv;
+ link = (dev_link_t *)local->finder;
+ if ( ! (link->state & DEV_PRESENT) || link->state & DEV_SUSPEND ) {
+ DEBUG(2,"ray_cs interrupt from device not present or suspended.\n");
+ return IRQ_NONE;
+ }
+ rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index);
+
+ if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS))
+ {
+ DEBUG(1,"ray_cs interrupt bad rcsindex = 0x%x\n",rcsindex);
+ clear_interrupt(local);
+ return IRQ_HANDLED;
+ }
+ if (rcsindex < NUMBER_OF_CCS) /* If it's a returned CCS */
+ {
+ pccs = ccs_base(local) + rcsindex;
+ cmd = readb(&pccs->cmd);
+ status = readb(&pccs->buffer_status);
+ switch (cmd)
+ {
+ case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */
+ del_timer(&local->timer);
+ if (status == CCS_COMMAND_COMPLETE) {
+ DEBUG(1,"ray_cs interrupt download_startup_parameters OK\n");
+ }
+ else {
+ DEBUG(1,"ray_cs interrupt download_startup_parameters fail\n");
+ }
+ break;
+ case CCS_UPDATE_PARAMS:
+ DEBUG(1,"ray_cs interrupt update params done\n");
+ if (status != CCS_COMMAND_COMPLETE) {
+ tmp = readb(&pccs->var.update_param.failure_cause);
+ DEBUG(0,"ray_cs interrupt update params failed - reason %d\n",tmp);
+ }
+ break;
+ case CCS_REPORT_PARAMS:
+ DEBUG(1,"ray_cs interrupt report params done\n");
+ break;
+ case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */
+ DEBUG(1,"ray_cs interrupt CCS Update Multicast List done\n");
+ break;
+ case CCS_UPDATE_POWER_SAVINGS_MODE:
+ DEBUG(1,"ray_cs interrupt update power save mode done\n");
+ break;
+ case CCS_START_NETWORK:
+ case CCS_JOIN_NETWORK:
+ if (status == CCS_COMMAND_COMPLETE) {
+ if (readb(&pccs->var.start_network.net_initiated) == 1) {
+ DEBUG(0,"ray_cs interrupt network \"%s\" started\n",\
+ local->sparm.b4.a_current_ess_id);
+ }
+ else {
+ DEBUG(0,"ray_cs interrupt network \"%s\" joined\n",\
+ local->sparm.b4.a_current_ess_id);
+ }
+ memcpy_fromio(&local->bss_id,pccs->var.start_network.bssid,ADDRLEN);
+
+ if (local->fw_ver == 0x55) local->net_default_tx_rate = 3;
+ else local->net_default_tx_rate =
+ readb(&pccs->var.start_network.net_default_tx_rate);
+ local->encryption = readb(&pccs->var.start_network.encryption);
+ if (!sniffer && (local->net_type == INFRA)
+ && !(local->sparm.b4.a_acting_as_ap_status)) {
+ authenticate(local);
+ }
+ local->card_status = CARD_ACQ_COMPLETE;
+ }
+ else {
+ local->card_status = CARD_ACQ_FAILED;
+
+ del_timer(&local->timer);
+ local->timer.expires = jiffies + HZ*5;
+ local->timer.data = (long)local;
+ if (status == CCS_START_NETWORK) {
+ DEBUG(0,"ray_cs interrupt network \"%s\" start failed\n",\
+ local->sparm.b4.a_current_ess_id);
+ local->timer.function = &start_net;
+ }
+ else {
+ DEBUG(0,"ray_cs interrupt network \"%s\" join failed\n",\
+ local->sparm.b4.a_current_ess_id);
+ local->timer.function = &join_net;
+ }
+ add_timer(&local->timer);
+ }
+ break;
+ case CCS_START_ASSOCIATION:
+ if (status == CCS_COMMAND_COMPLETE) {
+ local->card_status = CARD_ASSOC_COMPLETE;
+ DEBUG(0,"ray_cs association successful\n");
+ }
+ else
+ {
+ DEBUG(0,"ray_cs association failed,\n");
+ local->card_status = CARD_ASSOC_FAILED;
+ join_net((u_long)local);
+ }
+ break;
+ case CCS_TX_REQUEST:
+ if (status == CCS_COMMAND_COMPLETE) {
+ DEBUG(3,"ray_cs interrupt tx request complete\n");
+ }
+ else {
+ DEBUG(1,"ray_cs interrupt tx request failed\n");
+ }
+ if (!sniffer) netif_start_queue(dev);
+ netif_wake_queue(dev);
+ break;
+ case CCS_TEST_MEMORY:
+ DEBUG(1,"ray_cs interrupt mem test done\n");
+ break;
+ case CCS_SHUTDOWN:
+ DEBUG(1,"ray_cs interrupt Unexpected CCS returned - Shutdown\n");
+ break;
+ case CCS_DUMP_MEMORY:
+ DEBUG(1,"ray_cs interrupt dump memory done\n");
+ break;
+ case CCS_START_TIMER:
+ DEBUG(2,"ray_cs interrupt DING - raylink timer expired\n");
+ break;
+ default:
+ DEBUG(1,"ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n",\
+ rcsindex, cmd);
+ }
+ writeb(CCS_BUFFER_FREE, &pccs->buffer_status);
+ }
+ else /* It's an RCS */
+ {
+ prcs = rcs_base(local) + rcsindex;
+
+ switch (readb(&prcs->interrupt_id))
+ {
+ case PROCESS_RX_PACKET:
+ ray_rx(dev, local, prcs);
+ break;
+ case REJOIN_NET_COMPLETE:
+ DEBUG(1,"ray_cs interrupt rejoin net complete\n");
+ local->card_status = CARD_ACQ_COMPLETE;
+ /* do we need to clear tx buffers CCS's? */
+ if (local->sparm.b4.a_network_type == ADHOC) {
+ if (!sniffer) netif_start_queue(dev);
+ }
+ else {
+ memcpy_fromio(&local->bss_id, prcs->var.rejoin_net_complete.bssid, ADDRLEN);
+ DEBUG(1,"ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n",\
+ local->bss_id[0], local->bss_id[1], local->bss_id[2],\
+ local->bss_id[3], local->bss_id[4], local->bss_id[5]);
+ if (!sniffer) authenticate(local);
+ }
+ break;
+ case ROAMING_INITIATED:
+ DEBUG(1,"ray_cs interrupt roaming initiated\n");
+ netif_stop_queue(dev);
+ local->card_status = CARD_DOING_ACQ;
+ break;
+ case JAPAN_CALL_SIGN_RXD:
+ DEBUG(1,"ray_cs interrupt japan call sign rx\n");
+ break;
+ default:
+ DEBUG(1,"ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n",\
+ rcsindex, (unsigned int) readb(&prcs->interrupt_id));
+ break;
+ }
+ writeb(CCS_BUFFER_FREE, &prcs->buffer_status);
+ }
+ clear_interrupt(local);
+ return IRQ_HANDLED;
+} /* ray_interrupt */
+/*===========================================================================*/
+static void ray_rx(struct net_device *dev, ray_dev_t *local, struct rcs __iomem *prcs)
+{
+ int rx_len;
+ unsigned int pkt_addr;
+ void __iomem *pmsg;
+ DEBUG(4,"ray_rx process rx packet\n");
+
+ /* Calculate address of packet within Rx buffer */
+ pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8)
+ + readb(&prcs->var.rx_packet.rx_data_ptr[1])) & RX_BUFF_END;
+ /* Length of first packet fragment */
+ rx_len = (readb(&prcs->var.rx_packet.rx_data_length[0]) << 8)
+ + readb(&prcs->var.rx_packet.rx_data_length[1]);
+
+ local->last_rsl = readb(&prcs->var.rx_packet.rx_sig_lev);
+ pmsg = local->rmem + pkt_addr;
+ switch(readb(pmsg))
+ {
+ case DATA_TYPE:
+ DEBUG(4,"ray_rx data type\n");
+ rx_data(dev, prcs, pkt_addr, rx_len);
+ break;
+ case AUTHENTIC_TYPE:
+ DEBUG(4,"ray_rx authentic type\n");
+ if (sniffer) rx_data(dev, prcs, pkt_addr, rx_len);
+ else rx_authenticate(local, prcs, pkt_addr, rx_len);
+ break;
+ case DEAUTHENTIC_TYPE:
+ DEBUG(4,"ray_rx deauth type\n");
+ if (sniffer) rx_data(dev, prcs, pkt_addr, rx_len);
+ else rx_deauthenticate(local, prcs, pkt_addr, rx_len);
+ break;
+ case NULL_MSG_TYPE:
+ DEBUG(3,"ray_cs rx NULL msg\n");
+ break;
+ case BEACON_TYPE:
+ DEBUG(4,"ray_rx beacon type\n");
+ if (sniffer) rx_data(dev, prcs, pkt_addr, rx_len);
+
+ copy_from_rx_buff(local, (UCHAR *)&local->last_bcn, pkt_addr,
+ rx_len < sizeof(struct beacon_rx) ?
+ rx_len : sizeof(struct beacon_rx));
+
+ local->beacon_rxed = 1;
+ /* Get the statistics so the card counters never overflow */
+ ray_get_stats(dev);
+ break;
+ default:
+ DEBUG(0,"ray_cs unknown pkt type %2x\n", (unsigned int) readb(pmsg));
+ break;
+ }
+
+} /* end ray_rx */
+/*===========================================================================*/
+static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned int pkt_addr,
+ int rx_len)
+{
+ struct sk_buff *skb = NULL;
+ struct rcs __iomem *prcslink = prcs;
+ ray_dev_t *local = dev->priv;
+ UCHAR *rx_ptr;
+ int total_len;
+ int tmp;
+#ifdef WIRELESS_SPY
+ int siglev = local->last_rsl;
+ u_char linksrcaddr[ETH_ALEN]; /* Other end of the wireless link */
+#endif
+
+ if (!sniffer) {
+ if (translate) {
+/* TBD length needs fixing for translated header */
+ if (rx_len < (ETH_HLEN + RX_MAC_HEADER_LENGTH) ||
+ rx_len > (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + FCS_LEN))
+ {
+ DEBUG(0,"ray_cs invalid packet length %d received \n",rx_len);
+ return;
+ }
+ }
+ else /* encapsulated ethernet */ {
+ if (rx_len < (ETH_HLEN + RX_MAC_HEADER_LENGTH) ||
+ rx_len > (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + FCS_LEN))
+ {
+ DEBUG(0,"ray_cs invalid packet length %d received \n",rx_len);
+ return;
+ }
+ }
+ }
+ DEBUG(4,"ray_cs rx_data packet\n");
+ /* If fragmented packet, verify sizes of fragments add up */
+ if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
+ DEBUG(1,"ray_cs rx'ed fragment\n");
+ tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8)
+ + readb(&prcs->var.rx_packet.totalpacketlength[1]);
+ total_len = tmp;
+ prcslink = prcs;
+ do {
+ tmp -= (readb(&prcslink->var.rx_packet.rx_data_length[0]) << 8)
+ + readb(&prcslink->var.rx_packet.rx_data_length[1]);
+ if (readb(&prcslink->var.rx_packet.next_frag_rcs_index) == 0xFF
+ || tmp < 0) break;
+ prcslink = rcs_base(local)
+ + readb(&prcslink->link_field);
+ } while (1);
+
+ if (tmp < 0)
+ {
+ DEBUG(0,"ray_cs rx_data fragment lengths don't add up\n");
+ local->stats.rx_dropped++;
+ release_frag_chain(local, prcs);
+ return;
+ }
+ }
+ else { /* Single unfragmented packet */
+ total_len = rx_len;
+ }
+
+ skb = dev_alloc_skb( total_len+5 );
+ if (skb == NULL)
+ {
+ DEBUG(0,"ray_cs rx_data could not allocate skb\n");
+ local->stats.rx_dropped++;
+ if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF)
+ release_frag_chain(local, prcs);
+ return;
+ }
+ skb_reserve( skb, 2); /* Align IP on 16 byte (TBD check this)*/
+ skb->dev = dev;
+
+ DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len);
+
+/************************/
+ /* Reserve enough room for the whole damn packet. */
+ rx_ptr = skb_put( skb, total_len);
+ /* Copy the whole packet to sk_buff */
+ rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len);
+ /* Get source address */
+#ifdef WIRELESS_SPY
+ memcpy(linksrcaddr, ((struct mac_header *)skb->data)->addr_2, ETH_ALEN);
+#endif
+ /* Now, deal with encapsulation/translation/sniffer */
+ if (!sniffer) {
+ if (!translate) {
+ /* Encapsulated ethernet, so just lop off 802.11 MAC header */
+/* TBD reserve skb_reserve( skb, RX_MAC_HEADER_LENGTH); */
+ skb_pull( skb, RX_MAC_HEADER_LENGTH);
+ }
+ else {
+ /* Do translation */
+ untranslate(local, skb, total_len);
+ }
+ }
+ else
+ { /* sniffer mode, so just pass whole packet */ };
+
+/************************/
+ /* Now pick up the rest of the fragments if any */
+ tmp = 17;
+ if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
+ prcslink = prcs;
+ DEBUG(1,"ray_cs rx_data in fragment loop\n");
+ do {
+ prcslink = rcs_base(local)
+ + readb(&prcslink->var.rx_packet.next_frag_rcs_index);
+ rx_len = (( readb(&prcslink->var.rx_packet.rx_data_length[0]) << 8)
+ + readb(&prcslink->var.rx_packet.rx_data_length[1]))
+ & RX_BUFF_END;
+ pkt_addr = (( readb(&prcslink->var.rx_packet.rx_data_ptr[0]) << 8)
+ + readb(&prcslink->var.rx_packet.rx_data_ptr[1]))
+ & RX_BUFF_END;
+
+ rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr, rx_len);
+
+ } while (tmp-- &&
+ readb(&prcslink->var.rx_packet.next_frag_rcs_index) != 0xFF);
+ release_frag_chain(local, prcs);
+ }
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ local->stats.rx_packets++;
+ local->stats.rx_bytes += total_len;
+
+ /* Gather signal strength per address */
+#ifdef WIRELESS_SPY
+ /* For the Access Point or the node having started the ad-hoc net
+ * note : ad-hoc work only in some specific configurations, but we
+ * kludge in ray_get_wireless_stats... */
+ if(!memcmp(linksrcaddr, local->bss_id, ETH_ALEN))
+ {
+ /* Update statistics */
+ /*local->wstats.qual.qual = none ? */
+ local->wstats.qual.level = siglev;
+ /*local->wstats.qual.noise = none ? */
+ local->wstats.qual.updated = 0x2;
+ }
+ /* Now, for the addresses in the spy list */
+ {
+ int i;
+ /* Look all addresses */
+ for(i = 0; i < local->spy_number; i++)
+ /* If match */
+ if(!memcmp(linksrcaddr, local->spy_address[i], ETH_ALEN))
+ {
+ /* Update statistics */
+ /*local->spy_stat[i].qual = none ? */
+ local->spy_stat[i].level = siglev;
+ /*local->spy_stat[i].noise = none ? */
+ local->spy_stat[i].updated = 0x2;
+ }
+ }
+#endif /* WIRELESS_SPY */
+} /* end rx_data */
+/*===========================================================================*/
+static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
+{
+ snaphdr_t *psnap = (snaphdr_t *)(skb->data + RX_MAC_HEADER_LENGTH);
+ struct mac_header *pmac = (struct mac_header *)skb->data;
+ unsigned short type = *(unsigned short *)psnap->ethertype;
+ unsigned int xsap = *(unsigned int *)psnap & 0x00ffffff;
+ unsigned int org = (*(unsigned int *)psnap->org) & 0x00ffffff;
+ int delta;
+ struct ethhdr *peth;
+ UCHAR srcaddr[ADDRLEN];
+ UCHAR destaddr[ADDRLEN];
+
+ if (pmac->frame_ctl_2 & FC2_FROM_DS) {
+ if (pmac->frame_ctl_2 & FC2_TO_DS) { /* AP to AP */
+ memcpy(destaddr, pmac->addr_3, ADDRLEN);
+ memcpy(srcaddr, ((unsigned char *)pmac->addr_3) + ADDRLEN, ADDRLEN);
+ } else { /* AP to terminal */
+ memcpy(destaddr, pmac->addr_1, ADDRLEN);
+ memcpy(srcaddr, pmac->addr_3, ADDRLEN);
+ }
+ } else { /* Terminal to AP */
+ if (pmac->frame_ctl_2 & FC2_TO_DS) {
+ memcpy(destaddr, pmac->addr_3, ADDRLEN);
+ memcpy(srcaddr, pmac->addr_2, ADDRLEN);
+ } else { /* Adhoc */
+ memcpy(destaddr, pmac->addr_1, ADDRLEN);
+ memcpy(srcaddr, pmac->addr_2, ADDRLEN);
+ }
+ }
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 3) {
+ int i;
+ printk(KERN_DEBUG "skb->data before untranslate");
+ for (i=0;i<64;i++)
+ printk("%02x ",skb->data[i]);
+ printk("\n" KERN_DEBUG "type = %08x, xsap = %08x, org = %08x\n",
+ type,xsap,org);
+ printk(KERN_DEBUG "untranslate skb->data = %p\n",skb->data);
+ }
+#endif
+
+ if ( xsap != SNAP_ID) {
+ /* not a snap type so leave it alone */
+ DEBUG(3,"ray_cs untranslate NOT SNAP %x\n", *(unsigned int *)psnap & 0x00ffffff);
+
+ delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
+ peth = (struct ethhdr *)(skb->data + delta);
+ peth->h_proto = htons(len - RX_MAC_HEADER_LENGTH);
+ }
+ else { /* Its a SNAP */
+ if (org == BRIDGE_ENCAP) { /* EtherII and nuke the LLC */
+ DEBUG(3,"ray_cs untranslate Bridge encap\n");
+ delta = RX_MAC_HEADER_LENGTH
+ + sizeof(struct snaphdr_t) - ETH_HLEN;
+ peth = (struct ethhdr *)(skb->data + delta);
+ peth->h_proto = type;
+ }
+ else {
+ if (org == RFC1042_ENCAP) {
+ switch (type) {
+ case RAY_IPX_TYPE:
+ case APPLEARP_TYPE:
+ DEBUG(3,"ray_cs untranslate RFC IPX/AARP\n");
+ delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
+ peth = (struct ethhdr *)(skb->data + delta);
+ peth->h_proto = htons(len - RX_MAC_HEADER_LENGTH);
+ break;
+ default:
+ DEBUG(3,"ray_cs untranslate RFC default\n");
+ delta = RX_MAC_HEADER_LENGTH +
+ sizeof(struct snaphdr_t) - ETH_HLEN;
+ peth = (struct ethhdr *)(skb->data + delta);
+ peth->h_proto = type;
+ break;
+ }
+ }
+ else {
+ printk("ray_cs untranslate very confused by packet\n");
+ delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
+ peth = (struct ethhdr *)(skb->data + delta);
+ peth->h_proto = type;
+ }
+ }
+ }
+/* TBD reserve skb_reserve(skb, delta); */
+ skb_pull(skb, delta);
+ DEBUG(3,"untranslate after skb_pull(%d), skb->data = %p\n",delta,skb->data);
+ memcpy(peth->h_dest, destaddr, ADDRLEN);
+ memcpy(peth->h_source, srcaddr, ADDRLEN);
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 3) {
+ int i;
+ printk(KERN_DEBUG "skb->data after untranslate:");
+ for (i=0;i<64;i++)
+ printk("%02x ",skb->data[i]);
+ printk("\n");
+ }
+#endif
+} /* end untranslate */
+/*===========================================================================*/
+/* Copy data from circular receive buffer to PC memory.
+ * dest = destination address in PC memory
+ * pkt_addr = source address in receive buffer
+ * len = length of packet to copy
+ */
+static int copy_from_rx_buff(ray_dev_t *local, UCHAR *dest, int pkt_addr, int length)
+{
+ int wrap_bytes = (pkt_addr + length) - (RX_BUFF_END + 1);
+ if (wrap_bytes <= 0)
+ {
+ memcpy_fromio(dest,local->rmem + pkt_addr,length);
+ }
+ else /* Packet wrapped in circular buffer */
+ {
+ memcpy_fromio(dest,local->rmem+pkt_addr,length - wrap_bytes);
+ memcpy_fromio(dest + length - wrap_bytes, local->rmem, wrap_bytes);
+ }
+ return length;
+}
+/*===========================================================================*/
+static void release_frag_chain(ray_dev_t *local, struct rcs __iomem * prcs)
+{
+ struct rcs __iomem *prcslink = prcs;
+ int tmp = 17;
+ unsigned rcsindex = readb(&prcs->var.rx_packet.next_frag_rcs_index);
+
+ while (tmp--) {
+ writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
+ if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
+ DEBUG(1,"ray_cs interrupt bad rcsindex = 0x%x\n",rcsindex);
+ break;
+ }
+ prcslink = rcs_base(local) + rcsindex;
+ rcsindex = readb(&prcslink->var.rx_packet.next_frag_rcs_index);
+ }
+ writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
+}
+/*===========================================================================*/
+static void authenticate(ray_dev_t *local)
+{
+ dev_link_t *link = local->finder;
+ DEBUG(0,"ray_cs Starting authentication.\n");
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs authenticate - device not present\n");
+ return;
+ }
+
+ del_timer(&local->timer);
+ if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
+ local->timer.function = &join_net;
+ }
+ else {
+ local->timer.function = &authenticate_timeout;
+ }
+ local->timer.expires = jiffies + HZ*2;
+ local->timer.data = (long)local;
+ add_timer(&local->timer);
+ local->authentication_state = AWAITING_RESPONSE;
+} /* end authenticate */
+/*===========================================================================*/
+static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
+ unsigned int pkt_addr, int rx_len)
+{
+ UCHAR buff[256];
+ struct rx_msg *msg = (struct rx_msg *)buff;
+
+ del_timer(&local->timer);
+
+ copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
+ /* if we are trying to get authenticated */
+ if (local->sparm.b4.a_network_type == ADHOC) {
+ DEBUG(1,"ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n", msg->var[0],msg->var[1],msg->var[2],msg->var[3],msg->var[4],msg->var[5]);
+ if (msg->var[2] == 1) {
+ DEBUG(0,"ray_cs Sending authentication response.\n");
+ if (!build_auth_frame (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) {
+ local->authentication_state = NEED_TO_AUTH;
+ memcpy(local->auth_id, msg->mac.addr_2, ADDRLEN);
+ }
+ }
+ }
+ else /* Infrastructure network */
+ {
+ if (local->authentication_state == AWAITING_RESPONSE) {
+ /* Verify authentication sequence #2 and success */
+ if (msg->var[2] == 2) {
+ if ((msg->var[3] | msg->var[4]) == 0) {
+ DEBUG(1,"Authentication successful\n");
+ local->card_status = CARD_AUTH_COMPLETE;
+ associate(local);
+ local->authentication_state = AUTHENTICATED;
+ }
+ else {
+ DEBUG(0,"Authentication refused\n");
+ local->card_status = CARD_AUTH_REFUSED;
+ join_net((u_long)local);
+ local->authentication_state = UNAUTHENTICATED;
+ }
+ }
+ }
+ }
+
+} /* end rx_authenticate */
+/*===========================================================================*/
+static void associate(ray_dev_t *local)
+{
+ struct ccs __iomem *pccs;
+ dev_link_t *link = local->finder;
+ struct net_device *dev = link->priv;
+ int ccsindex;
+ if (!(link->state & DEV_PRESENT)) {
+ DEBUG(2,"ray_cs associate - device not present\n");
+ return;
+ }
+ /* If no tx buffers available, return*/
+ if ((ccsindex = get_free_ccs(local)) < 0)
+ {
+/* TBD should never be here but... what if we are? */
+ DEBUG(1,"ray_cs associate - No free ccs\n");
+ return;
+ }
+ DEBUG(1,"ray_cs Starting association with access point\n");
+ pccs = ccs_base(local) + ccsindex;
+ /* fill in the CCS */
+ writeb(CCS_START_ASSOCIATION, &pccs->cmd);
+ /* Interrupt the firmware to process the command */
+ if (interrupt_ecf(local, ccsindex)) {
+ DEBUG(1,"ray_cs associate failed - ECF not ready for intr\n");
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+
+ del_timer(&local->timer);
+ local->timer.expires = jiffies + HZ*2;
+ local->timer.data = (long)local;
+ local->timer.function = &join_net;
+ add_timer(&local->timer);
+ local->card_status = CARD_ASSOC_FAILED;
+ return;
+ }
+ if (!sniffer) netif_start_queue(dev);
+
+} /* end associate */
+/*===========================================================================*/
+static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
+ unsigned int pkt_addr, int rx_len)
+{
+/* UCHAR buff[256];
+ struct rx_msg *msg = (struct rx_msg *)buff;
+*/
+ DEBUG(0,"Deauthentication frame received\n");
+ local->authentication_state = UNAUTHENTICATED;
+ /* Need to reauthenticate or rejoin depending on reason code */
+/* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
+ */
+}
+/*===========================================================================*/
+static void clear_interrupt(ray_dev_t *local)
+{
+ writeb(0, local->amem + CIS_OFFSET + HCS_INTR_OFFSET);
+}
+/*===========================================================================*/
+#ifdef CONFIG_PROC_FS
+#define MAXDATA (PAGE_SIZE - 80)
+
+static char *card_status[] = {
+ "Card inserted - uninitialized", /* 0 */
+ "Card not downloaded", /* 1 */
+ "Waiting for download parameters", /* 2 */
+ "Card doing acquisition", /* 3 */
+ "Acquisition complete", /* 4 */
+ "Authentication complete", /* 5 */
+ "Association complete", /* 6 */
+ "???", "???", "???", "???", /* 7 8 9 10 undefined */
+ "Card init error", /* 11 */
+ "Download parameters error", /* 12 */
+ "???", /* 13 */
+ "Acquisition failed", /* 14 */
+ "Authentication refused", /* 15 */
+ "Association failed" /* 16 */
+};
+
+static char *nettype[] = {"Adhoc", "Infra "};
+static char *framing[] = {"Encapsulation", "Translation"}
+;
+/*===========================================================================*/
+static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len)
+{
+/* Print current values which are not available via other means
+ * eg ifconfig
+ */
+ int i;
+ dev_link_t *link;
+ struct net_device *dev;
+ ray_dev_t *local;
+ UCHAR *p;
+ struct freq_hop_element *pfh;
+ UCHAR c[33];
+
+ link = dev_list;
+ if (!link)
+ return 0;
+ dev = (struct net_device *)link->priv;
+ if (!dev)
+ return 0;
+ local = (ray_dev_t *)dev->priv;
+ if (!local)
+ return 0;
+
+ len = 0;
+
+ len += sprintf(buf + len, "Raylink Wireless LAN driver status\n");
+ len += sprintf(buf + len, "%s\n", rcsid);
+ /* build 4 does not report version, and field is 0x55 after memtest */
+ len += sprintf(buf + len, "Firmware version = ");
+ if (local->fw_ver == 0x55)
+ len += sprintf(buf + len, "4 - Use dump_cis for more details\n");
+ else
+ len += sprintf(buf + len, "%2d.%02d.%02d\n",
+ local->fw_ver, local->fw_bld, local->fw_var);
+
+ for (i=0; i<32; i++) c[i] = local->sparm.b5.a_current_ess_id[i];
+ c[32] = 0;
+ len += sprintf(buf + len, "%s network ESSID = \"%s\"\n",
+ nettype[local->sparm.b5.a_network_type], c);
+
+ p = local->bss_id;
+ len += sprintf(buf + len,
+ "BSSID = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p[0],p[1],p[2],p[3],p[4],p[5]);
+
+ len += sprintf(buf + len, "Country code = %d\n",
+ local->sparm.b5.a_curr_country_code);
+
+ i = local->card_status;
+ if (i < 0) i = 10;
+ if (i > 16) i = 10;
+ len += sprintf(buf + len, "Card status = %s\n", card_status[i]);
+
+ len += sprintf(buf + len, "Framing mode = %s\n",framing[translate]);
+
+ len += sprintf(buf + len, "Last pkt signal lvl = %d\n", local->last_rsl);
+
+ if (local->beacon_rxed) {
+ /* Pull some fields out of last beacon received */
+ len += sprintf(buf + len, "Beacon Interval = %d Kus\n",
+ local->last_bcn.beacon_intvl[0]
+ + 256 * local->last_bcn.beacon_intvl[1]);
+
+ p = local->last_bcn.elements;
+ if (p[0] == C_ESSID_ELEMENT_ID) p += p[1] + 2;
+ else {
+ len += sprintf(buf + len, "Parse beacon failed at essid element id = %d\n",p[0]);
+ return len;
+ }
+
+ if (p[0] == C_SUPPORTED_RATES_ELEMENT_ID) {
+ len += sprintf(buf + len, "Supported rate codes = ");
+ for (i=2; i<p[1] + 2; i++)
+ len += sprintf(buf + len, "0x%02x ", p[i]);
+ len += sprintf(buf + len, "\n");
+ p += p[1] + 2;
+ }
+ else {
+ len += sprintf(buf + len, "Parse beacon failed at rates element\n");
+ return len;
+ }
+
+ if (p[0] == C_FH_PARAM_SET_ELEMENT_ID) {
+ pfh = (struct freq_hop_element *)p;
+ len += sprintf(buf + len, "Hop dwell = %d Kus\n",
+ pfh->dwell_time[0] + 256 * pfh->dwell_time[1]);
+ len += sprintf(buf + len, "Hop set = %d \n", pfh->hop_set);
+ len += sprintf(buf + len, "Hop pattern = %d \n", pfh->hop_pattern);
+ len += sprintf(buf + len, "Hop index = %d \n", pfh->hop_index);
+ p += p[1] + 2;
+ }
+ else {
+ len += sprintf(buf + len, "Parse beacon failed at FH param element\n");
+ return len;
+ }
+ } else {
+ len += sprintf(buf + len, "No beacons received\n");
+ }
+ return len;
+}
+
+#endif
+/*===========================================================================*/
+static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
+{
+ int addr;
+ struct ccs __iomem *pccs;
+ struct tx_msg __iomem *ptx;
+ int ccsindex;
+
+ /* If no tx buffers available, return */
+ if ((ccsindex = get_free_tx_ccs(local)) < 0)
+ {
+ DEBUG(1,"ray_cs send authenticate - No free tx ccs\n");
+ return -1;
+ }
+
+ pccs = ccs_base(local) + ccsindex;
+
+ /* Address in card space */
+ addr = TX_BUF_BASE + (ccsindex << 11);
+ /* fill in the CCS */
+ writeb(CCS_TX_REQUEST, &pccs->cmd);
+ writeb(addr >> 8, pccs->var.tx_request.tx_data_ptr);
+ writeb(0x20, pccs->var.tx_request.tx_data_ptr + 1);
+ writeb(TX_AUTHENTICATE_LENGTH_MSB, pccs->var.tx_request.tx_data_length);
+ writeb(TX_AUTHENTICATE_LENGTH_LSB,pccs->var.tx_request.tx_data_length + 1);
+ writeb(0, &pccs->var.tx_request.pow_sav_mode);
+
+ ptx = local->sram + addr;
+ /* fill in the mac header */
+ writeb(PROTOCOL_VER | AUTHENTIC_TYPE, &ptx->mac.frame_ctl_1);
+ writeb(0, &ptx->mac.frame_ctl_2);
+
+ memcpy_toio(ptx->mac.addr_1, dest, ADDRLEN);
+ memcpy_toio(ptx->mac.addr_2, local->sparm.b4.a_mac_addr, ADDRLEN);
+ memcpy_toio(ptx->mac.addr_3, local->bss_id, ADDRLEN);
+
+ /* Fill in msg body with protocol 00 00, sequence 01 00 ,status 00 00 */
+ memset_io(ptx->var, 0, 6);
+ writeb(auth_type & 0xff, ptx->var + 2);
+
+ /* Interrupt the firmware to process the command */
+ if (interrupt_ecf(local, ccsindex)) {
+ DEBUG(1,"ray_cs send authentication request failed - ECF not ready for intr\n");
+ writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
+ return -1;
+ }
+ return 0;
+} /* End build_auth_frame */
+
+/*===========================================================================*/
+#ifdef CONFIG_PROC_FS
+static void raycs_write(const char *name, write_proc_t *w, void *data)
+{
+ struct proc_dir_entry * entry = create_proc_entry(name, S_IFREG | S_IWUSR, NULL);
+ if (entry) {
+ entry->write_proc = w;
+ entry->data = data;
+ }
+}
+
+static int write_essid(struct file *file, const char __user *buffer, unsigned long count, void *data)
+{
+ static char proc_essid[33];
+ int len = count;
+
+ if (len > 32)
+ len = 32;
+ memset(proc_essid, 0, 33);
+ if (copy_from_user(proc_essid, buffer, len))
+ return -EFAULT;
+ essid = proc_essid;
+ return count;
+}
+
+static int write_int(struct file *file, const char __user *buffer, unsigned long count, void *data)
+{
+ static char proc_number[10];
+ char *p;
+ int nr, len;
+
+ if (!count)
+ return 0;
+
+ if (count > 9)
+ return -EINVAL;
+ if (copy_from_user(proc_number, buffer, count))
+ return -EFAULT;
+ p = proc_number;
+ nr = 0;
+ len = count;
+ do {
+ unsigned int c = *p - '0';
+ if (c > 9)
+ return -EINVAL;
+ nr = nr*10 + c;
+ p++;
+ } while (--len);
+ *(int *)data = nr;
+ return count;
+}
+#endif
+
+static struct pcmcia_driver ray_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "ray_cs",
+ },
+ .attach = ray_attach,
+ .detach = ray_detach,
+};
+
+static int __init init_ray_cs(void)
+{
+ int rc;
+
+ DEBUG(1, "%s\n", rcsid);
+ rc = pcmcia_register_driver(&ray_driver);
+ DEBUG(1, "raylink init_module register_pcmcia_driver returns 0x%x\n",rc);
+
+#ifdef CONFIG_PROC_FS
+ proc_mkdir("driver/ray_cs", NULL);
+
+ create_proc_info_entry("driver/ray_cs/ray_cs", 0, NULL, &ray_cs_proc_read);
+ raycs_write("driver/ray_cs/essid", write_essid, NULL);
+ raycs_write("driver/ray_cs/net_type", write_int, &net_type);
+ raycs_write("driver/ray_cs/translate", write_int, &translate);
+#endif
+ if (translate != 0) translate = 1;
+ return 0;
+} /* init_ray_cs */
+
+/*===========================================================================*/
+
+static void __exit exit_ray_cs(void)
+{
+ DEBUG(0, "ray_cs: cleanup_module\n");
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("driver/ray_cs/ray_cs", NULL);
+ remove_proc_entry("driver/ray_cs/essid", NULL);
+ remove_proc_entry("driver/ray_cs/net_type", NULL);
+ remove_proc_entry("driver/ray_cs/translate", NULL);
+ remove_proc_entry("driver/ray_cs", NULL);
+#endif
+
+ pcmcia_unregister_driver(&ray_driver);
+ BUG_ON(dev_list != NULL);
+} /* exit_ray_cs */
+
+module_init(init_ray_cs);
+module_exit(exit_ray_cs);
+
+/*===========================================================================*/
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
new file mode 100644
index 000000000000..c77afa14fa86
--- /dev/null
+++ b/drivers/net/wireless/ray_cs.h
@@ -0,0 +1,78 @@
+/* Raytheon wireless LAN PCMCIA card driver for Linux
+ A PCMCIA client driver for the Raylink wireless network card
+ Written by Corey Thomas
+*/
+
+#ifndef RAYLINK_H
+
+struct beacon_rx {
+ struct mac_header mac;
+ UCHAR timestamp[8];
+ UCHAR beacon_intvl[2];
+ UCHAR capability[2];
+ UCHAR elements[sizeof(struct essid_element)
+ + sizeof(struct rates_element)
+ + sizeof(struct freq_hop_element)
+ + sizeof(struct japan_call_sign_element)
+ + sizeof(struct tim_element)];
+};
+
+/* Return values for get_free{,_tx}_ccs */
+#define ECCSFULL (-1)
+#define ECCSBUSY (-2)
+#define ECARDGONE (-3)
+
+typedef struct ray_dev_t {
+ int card_status;
+ int authentication_state;
+ dev_node_t node;
+ window_handle_t amem_handle; /* handle to window for attribute memory */
+ window_handle_t rmem_handle; /* handle to window for rx buffer on card */
+ void __iomem *sram; /* pointer to beginning of shared RAM */
+ void __iomem *amem; /* pointer to attribute mem window */
+ void __iomem *rmem; /* pointer to receive buffer window */
+ dev_link_t *finder; /* pointer back to dev_link_t for card */
+ struct timer_list timer;
+ long tx_ccs_lock;
+ long ccs_lock;
+ int dl_param_ccs;
+ union {
+ struct b4_startup_params b4;
+ struct b5_startup_params b5;
+ } sparm;
+ int timeout_flag;
+ UCHAR supported_rates[8];
+ UCHAR japan_call_sign[12];
+ struct startup_res_6 startup_res;
+ int num_multi;
+ /* Network parameters from start/join */
+ UCHAR bss_id[6];
+ UCHAR auth_id[6];
+ UCHAR net_default_tx_rate;
+ UCHAR encryption;
+ struct net_device_stats stats;
+
+ UCHAR net_type;
+ UCHAR sta_type;
+ UCHAR fw_ver;
+ UCHAR fw_bld;
+ UCHAR fw_var;
+ UCHAR ASIC_version;
+ UCHAR assoc_id[2];
+ UCHAR tib_length;
+ UCHAR last_rsl;
+ int beacon_rxed;
+ struct beacon_rx last_bcn;
+#ifdef WIRELESS_EXT
+ iw_stats wstats; /* Wireless specific stats */
+#endif
+#ifdef WIRELESS_SPY
+ int spy_number; /* Number of addresses to spy */
+ mac_addr spy_address[IW_MAX_SPY + 1]; /* The addresses to spy */
+ iw_qual spy_stat[IW_MAX_SPY + 1]; /* Statistics gathered */
+#endif /* WIRELESS_SPY */
+
+} ray_dev_t;
+/*****************************************************************************/
+
+#endif /* RAYLINK_H */
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
new file mode 100644
index 000000000000..49d9b267bc0f
--- /dev/null
+++ b/drivers/net/wireless/rayctl.h
@@ -0,0 +1,732 @@
+#ifndef RAYLINK_H
+
+typedef unsigned char UCHAR;
+
+/****** IEEE 802.11 constants ************************************************/
+#define ADDRLEN 6
+/* Frame control 1 bit fields */
+#define PROTOCOL_VER 0x00
+#define DATA_TYPE 0x08
+#define ASSOC_REQ_TYPE 0x00
+#define ASSOC_RESP_TYPE 0x10
+#define REASSOC_REQ_TYPE 0x20
+#define REASSOC_RESP_TYPE 0x30
+#define NULL_MSG_TYPE 0x48
+#define BEACON_TYPE 0x80
+#define DISASSOC_TYPE 0xA0
+#define PSPOLL_TYPE 0xA4
+#define AUTHENTIC_TYPE 0xB0
+#define DEAUTHENTIC_TYPE 0xC0
+/* Frame control 2 bit fields */
+#define FC2_TO_DS 0x01
+#define FC2_FROM_DS 0x02
+#define FC2_MORE_FRAG 0x04
+#define FC2_RETRY 0x08
+#define FC2_PSM 0x10
+#define FC2_MORE_DATA 0x20
+#define FC2_WEP 0x40
+#define FC2_ORDER 0x80
+/*****************************************************************************/
+/* 802.11 element ID's and lengths */
+#define C_BP_CAPABILITY_ESS 0x01
+#define C_BP_CAPABILITY_IBSS 0x02
+#define C_BP_CAPABILITY_CF_POLLABLE 0x04
+#define C_BP_CAPABILITY_CF_POLL_REQUEST 0x08
+#define C_BP_CAPABILITY_PRIVACY 0x10
+
+#define C_ESSID_ELEMENT_ID 0
+#define C_ESSID_ELEMENT_MAX_LENGTH 32
+
+#define C_SUPPORTED_RATES_ELEMENT_ID 1
+#define C_SUPPORTED_RATES_ELEMENT_LENGTH 2
+
+#define C_FH_PARAM_SET_ELEMENT_ID 2
+#define C_FH_PARAM_SET_ELEMENT_LNGTH 5
+
+#define C_CF_PARAM_SET_ELEMENT_ID 4
+#define C_CF_PARAM_SET_ELEMENT_LNGTH 6
+
+#define C_TIM_ELEMENT_ID 5
+#define C_TIM_BITMAP_LENGTH 251
+#define C_TIM_BMCAST_BIT 0x01
+
+#define C_IBSS_ELEMENT_ID 6
+#define C_IBSS_ELEMENT_LENGTH 2
+
+#define C_JAPAN_CALL_SIGN_ELEMENT_ID 51
+#define C_JAPAN_CALL_SIGN_ELEMENT_LNGTH 12
+
+#define C_DISASSOC_REASON_CODE_LEN 2
+#define C_DISASSOC_REASON_CODE_DEFAULT 8
+
+#define C_CRC_LEN 4
+#define C_NUM_SUPPORTED_RATES 8
+/****** IEEE 802.11 mac header for type data packets *************************/
+struct mac_header {
+ UCHAR frame_ctl_1;
+ UCHAR frame_ctl_2;
+ UCHAR duration_lsb;
+ UCHAR duration_msb;
+ UCHAR addr_1[ADDRLEN];
+ UCHAR addr_2[ADDRLEN];
+ UCHAR addr_3[ADDRLEN];
+ UCHAR seq_frag_num[2];
+/* UCHAR addr_4[ADDRLEN]; *//* only present for AP to AP (TO DS and FROM DS */
+};
+/****** IEEE 802.11 frame element structures *********************************/
+struct essid_element
+{
+ UCHAR id;
+ UCHAR length;
+ UCHAR text[C_ESSID_ELEMENT_MAX_LENGTH];
+};
+struct rates_element
+{
+ UCHAR id;
+ UCHAR length;
+ UCHAR value[8];
+};
+struct freq_hop_element
+{
+ UCHAR id;
+ UCHAR length;
+ UCHAR dwell_time[2];
+ UCHAR hop_set;
+ UCHAR hop_pattern;
+ UCHAR hop_index;
+};
+struct tim_element
+{
+ UCHAR id;
+ UCHAR length;
+ UCHAR dtim_count;
+ UCHAR dtim_period;
+ UCHAR bitmap_control;
+ UCHAR tim[C_TIM_BITMAP_LENGTH];
+};
+struct ibss_element
+{
+ UCHAR id;
+ UCHAR length;
+ UCHAR atim_window[2];
+};
+struct japan_call_sign_element
+{
+ UCHAR id;
+ UCHAR length;
+ UCHAR call_sign[12];
+};
+/****** Beacon message structures ********************************************/
+/* .elements is a large lump of max size because elements are variable size */
+struct infra_beacon
+{
+ UCHAR timestamp[8];
+ UCHAR beacon_intvl[2];
+ UCHAR capability[2];
+ UCHAR elements[sizeof(struct essid_element)
+ + sizeof(struct rates_element)
+ + sizeof(struct freq_hop_element)
+ + sizeof(struct japan_call_sign_element)
+ + sizeof(struct tim_element)];
+};
+struct adhoc_beacon
+{
+ UCHAR timestamp[8];
+ UCHAR beacon_intvl[2];
+ UCHAR capability[2];
+ UCHAR elements[sizeof(struct essid_element)
+ + sizeof(struct rates_element)
+ + sizeof(struct freq_hop_element)
+ + sizeof(struct japan_call_sign_element)
+ + sizeof(struct ibss_element)];
+};
+/*****************************************************************************/
+/*****************************************************************************/
+/* #define C_MAC_HDR_2_WEP 0x40 */
+/* TX/RX CCS constants */
+#define TX_HEADER_LENGTH 0x1C
+#define RX_MAC_HEADER_LENGTH 0x18
+#define TX_AUTHENTICATE_LENGTH (TX_HEADER_LENGTH + 6)
+#define TX_AUTHENTICATE_LENGTH_MSB (TX_AUTHENTICATE_LENGTH >> 8)
+#define TX_AUTHENTICATE_LENGTH_LSB (TX_AUTHENTICATE_LENGTH & 0xff)
+#define TX_DEAUTHENTICATE_LENGTH (TX_HEADER_LENGTH + 2)
+#define TX_DEAUTHENTICATE_LENGTH_MSB (TX_AUTHENTICATE_LENGTH >> 8)
+#define TX_DEAUTHENTICATE_LENGTH_LSB (TX_AUTHENTICATE_LENGTH & 0xff)
+#define FCS_LEN 4
+
+#define ADHOC 0
+#define INFRA 1
+
+#define TYPE_STA 0
+#define TYPE_AP 1
+
+#define PASSIVE_SCAN 1
+#define ACTIVE_SCAN 1
+
+#define PSM_CAM 0
+
+/* Country codes */
+#define USA 1
+#define EUROPE 2
+#define JAPAN 3
+#define KOREA 4
+#define SPAIN 5
+#define FRANCE 6
+#define ISRAEL 7
+#define AUSTRALIA 8
+#define JAPAN_TEST 9
+
+/* Hop pattern lengths */
+#define USA_HOP_MOD 79
+#define EUROPE_HOP_MOD 79
+#define JAPAN_HOP_MOD 23
+#define KOREA_HOP_MOD 23
+#define SPAIN_HOP_MOD 27
+#define FRANCE_HOP_MOD 35
+#define ISRAEL_HOP_MOD 35
+#define AUSTRALIA_HOP_MOD 47
+#define JAPAN_TEST_HOP_MOD 23
+
+#define ESSID_SIZE 32
+/**********************************************************************/
+/* CIS Register Constants */
+#define CIS_OFFSET 0x0f00
+/* Configuration Option Register (0x0F00) */
+#define COR_OFFSET 0x00
+#define COR_SOFT_RESET 0x80
+#define COR_LEVEL_IRQ 0x40
+#define COR_CONFIG_NUM 0x01
+#define COR_DEFAULT (COR_LEVEL_IRQ | COR_CONFIG_NUM)
+
+/* Card Configuration and Status Register (0x0F01) */
+#define CCSR_OFFSET 0x01
+#define CCSR_HOST_INTR_PENDING 0x01
+#define CCSR_POWER_DOWN 0x04
+
+/* HCS Interrupt Register (0x0F05) */
+#define HCS_INTR_OFFSET 0x05
+/* #define HCS_INTR_OFFSET 0x0A */
+#define HCS_INTR_CLEAR 0x00
+
+/* ECF Interrupt Register (0x0F06) */
+#define ECF_INTR_OFFSET 0x06
+/* #define ECF_INTR_OFFSET 0x0C */
+#define ECF_INTR_SET 0x01
+
+/* Authorization Register 0 (0x0F08) */
+#define AUTH_0_ON 0x57
+
+/* Authorization Register 1 (0x0F09) */
+#define AUTH_1_ON 0x82
+
+/* Program Mode Register (0x0F0A) */
+#define PC2PM 0x02
+#define PC2CAL 0x10
+#define PC2MLSE 0x20
+
+/* PC Test Mode Register (0x0F0B) */
+#define PC_TEST_MODE 0x08
+
+/* Frequency Control Word (0x0F10) */
+/* Range 0x02 - 0xA6 */
+
+/* Test Mode Control 1-4 (0x0F14 - 0x0F17) */
+
+/**********************************************************************/
+
+/* Shared RAM Area */
+#define SCB_BASE 0x0000
+#define STATUS_BASE 0x0100
+#define HOST_TO_ECF_BASE 0x0200
+#define ECF_TO_HOST_BASE 0x0300
+#define CCS_BASE 0x0400
+#define RCS_BASE 0x0800
+#define INFRA_TIM_BASE 0x0C00
+#define SSID_LIST_BASE 0x0D00
+#define TX_BUF_BASE 0x1000
+#define RX_BUF_BASE 0x8000
+
+#define NUMBER_OF_CCS 64
+#define NUMBER_OF_RCS 64
+/*#define NUMBER_OF_TX_CCS 14 */
+#define NUMBER_OF_TX_CCS 14
+
+#define TX_BUF_SIZE (2048 - sizeof(struct tx_msg))
+#define RX_BUFF_END 0x3FFF
+/* Values for buffer_status */
+#define CCS_BUFFER_FREE 0
+#define CCS_BUFFER_BUSY 1
+#define CCS_COMMAND_COMPLETE 2
+#define CCS_COMMAND_FAILED 3
+
+/* Values for cmd */
+#define CCS_DOWNLOAD_STARTUP_PARAMS 1
+#define CCS_UPDATE_PARAMS 2
+#define CCS_REPORT_PARAMS 3
+#define CCS_UPDATE_MULTICAST_LIST 4
+#define CCS_UPDATE_POWER_SAVINGS_MODE 5
+#define CCS_START_NETWORK 6
+#define CCS_JOIN_NETWORK 7
+#define CCS_START_ASSOCIATION 8
+#define CCS_TX_REQUEST 9
+#define CCS_TEST_MEMORY 0xa
+#define CCS_SHUTDOWN 0xb
+#define CCS_DUMP_MEMORY 0xc
+#define CCS_START_TIMER 0xe
+#define CCS_LAST_CMD CCS_START_TIMER
+
+/* Values for link field */
+#define CCS_END_LIST 0xff
+
+/* values for buffer_status field */
+#define RCS_BUFFER_FREE 0
+#define RCS_BUFFER_BUSY 1
+#define RCS_COMPLETE 2
+#define RCS_FAILED 3
+#define RCS_BUFFER_RELEASE 0xFF
+
+/* values for interrupt_id field */
+#define PROCESS_RX_PACKET 0x80 /* */
+#define REJOIN_NET_COMPLETE 0x81 /* RCS ID: Rejoin Net Complete */
+#define ROAMING_INITIATED 0x82 /* RCS ID: Roaming Initiated */
+#define JAPAN_CALL_SIGN_RXD 0x83 /* RCS ID: New Japan Call Sign */
+
+/*****************************************************************************/
+/* Memory types for dump memory command */
+#define C_MEM_PROG 0
+#define C_MEM_XDATA 1
+#define C_MEM_SFR 2
+#define C_MEM_IDATA 3
+
+/*** Return values for hw_xmit **********/
+#define XMIT_OK (0)
+#define XMIT_MSG_BAD (-1)
+#define XMIT_NO_CCS (-2)
+#define XMIT_NO_INTR (-3)
+#define XMIT_NEED_AUTH (-4)
+
+/*** Values for card status */
+#define CARD_INSERTED (0)
+
+#define CARD_AWAITING_PARAM (1)
+#define CARD_INIT_ERROR (11)
+
+#define CARD_DL_PARAM (2)
+#define CARD_DL_PARAM_ERROR (12)
+
+#define CARD_DOING_ACQ (3)
+
+#define CARD_ACQ_COMPLETE (4)
+#define CARD_ACQ_FAILED (14)
+
+#define CARD_AUTH_COMPLETE (5)
+#define CARD_AUTH_REFUSED (15)
+
+#define CARD_ASSOC_COMPLETE (6)
+#define CARD_ASSOC_FAILED (16)
+
+/*** Values for authentication_state ***********************************/
+#define UNAUTHENTICATED (0)
+#define AWAITING_RESPONSE (1)
+#define AUTHENTICATED (2)
+#define NEED_TO_AUTH (3)
+
+/*** Values for authentication type ************************************/
+#define OPEN_AUTH_REQUEST (1)
+#define OPEN_AUTH_RESPONSE (2)
+#define BROADCAST_DEAUTH (0xc0)
+/*** Values for timer functions ****************************************/
+#define TODO_NOTHING (0)
+#define TODO_VERIFY_DL_START (-1)
+#define TODO_START_NET (-2)
+#define TODO_JOIN_NET (-3)
+#define TODO_AUTHENTICATE_TIMEOUT (-4)
+#define TODO_SEND_CCS (-5)
+/***********************************************************************/
+/* Parameter passing structure for update/report parameter CCS's */
+struct object_id {
+ void *object_addr;
+ unsigned char object_length;
+};
+
+#define OBJID_network_type 0
+#define OBJID_acting_as_ap_status 1
+#define OBJID_current_ess_id 2
+#define OBJID_scanning_mode 3
+#define OBJID_power_mgt_state 4
+#define OBJID_mac_address 5
+#define OBJID_frag_threshold 6
+#define OBJID_hop_time 7
+#define OBJID_beacon_period 8
+#define OBJID_dtim_period 9
+#define OBJID_retry_max 10
+#define OBJID_ack_timeout 11
+#define OBJID_sifs 12
+#define OBJID_difs 13
+#define OBJID_pifs 14
+#define OBJID_rts_threshold 15
+#define OBJID_scan_dwell_time 16
+#define OBJID_max_scan_dwell_time 17
+#define OBJID_assoc_resp_timeout 18
+#define OBJID_adhoc_scan_cycle_max 19
+#define OBJID_infra_scan_cycle_max 20
+#define OBJID_infra_super_cycle_max 21
+#define OBJID_promiscuous_mode 22
+#define OBJID_unique_word 23
+#define OBJID_slot_time 24
+#define OBJID_roaming_low_snr 25
+#define OBJID_low_snr_count_thresh 26
+#define OBJID_infra_missed_bcn 27
+#define OBJID_adhoc_missed_bcn 28
+#define OBJID_curr_country_code 29
+#define OBJID_hop_pattern 30
+#define OBJID_reserved 31
+#define OBJID_cw_max_msb 32
+#define OBJID_cw_min_msb 33
+#define OBJID_noise_filter_gain 34
+#define OBJID_noise_limit_offset 35
+#define OBJID_det_rssi_thresh_offset 36
+#define OBJID_med_busy_thresh_offset 37
+#define OBJID_det_sync_thresh 38
+#define OBJID_test_mode 39
+#define OBJID_test_min_chan_num 40
+#define OBJID_test_max_chan_num 41
+#define OBJID_allow_bcast_ID_prbrsp 42
+#define OBJID_privacy_must_start 43
+#define OBJID_privacy_can_join 44
+#define OBJID_basic_rate_set 45
+
+/**** Configuration/Status/Control Area ***************************/
+/* System Control Block (SCB) Area
+ * Located at Shared RAM offset 0
+ */
+struct scb {
+ UCHAR ccs_index;
+ UCHAR rcs_index;
+};
+
+/****** Status area at Shared RAM offset 0x0100 ******************************/
+struct status {
+ UCHAR mrx_overflow_for_host; /* 0=ECF may write, 1=host may write*/
+ UCHAR mrx_checksum_error_for_host; /* 0=ECF may write, 1=host may write*/
+ UCHAR rx_hec_error_for_host; /* 0=ECF may write, 1=host may write*/
+ UCHAR reserved1;
+ short mrx_overflow; /* ECF increments on rx overflow */
+ short mrx_checksum_error; /* ECF increments on rx CRC error */
+ short rx_hec_error; /* ECF incs on mac header CRC error */
+ UCHAR rxnoise; /* Average RSL measurement */
+};
+
+/****** Host-to-ECF Data Area at Shared RAM offset 0x200 *********************/
+struct host_to_ecf_area {
+
+};
+
+/****** ECF-to-Host Data Area at Shared RAM offset 0x0300 ********************/
+struct startup_res_518 {
+ UCHAR startup_word;
+ UCHAR station_addr[ADDRLEN];
+ UCHAR calc_prog_chksum;
+ UCHAR calc_cis_chksum;
+ UCHAR ecf_spare[7];
+ UCHAR japan_call_sign[12];
+};
+
+struct startup_res_6 {
+ UCHAR startup_word;
+ UCHAR station_addr[ADDRLEN];
+ UCHAR reserved;
+ UCHAR supp_rates[8];
+ UCHAR japan_call_sign[12];
+ UCHAR calc_prog_chksum;
+ UCHAR calc_cis_chksum;
+ UCHAR firmware_version[3];
+ UCHAR asic_version;
+ UCHAR tib_length;
+};
+
+struct start_join_net_params {
+ UCHAR net_type;
+ UCHAR ssid[ESSID_SIZE];
+ UCHAR reserved;
+ UCHAR privacy_can_join;
+};
+
+/****** Command Control Structure area at Shared ram offset 0x0400 ***********/
+/* Structures for command specific parameters (ccs.var) */
+struct update_param_cmd {
+ UCHAR object_id;
+ UCHAR number_objects;
+ UCHAR failure_cause;
+};
+struct report_param_cmd {
+ UCHAR object_id;
+ UCHAR number_objects;
+ UCHAR failure_cause;
+ UCHAR length;
+};
+struct start_network_cmd {
+ UCHAR update_param;
+ UCHAR bssid[ADDRLEN];
+ UCHAR net_initiated;
+ UCHAR net_default_tx_rate;
+ UCHAR encryption;
+};
+struct join_network_cmd {
+ UCHAR update_param;
+ UCHAR bssid[ADDRLEN];
+ UCHAR net_initiated;
+ UCHAR net_default_tx_rate;
+ UCHAR encryption;
+};
+struct tx_requested_cmd {
+
+ UCHAR tx_data_ptr[2];
+ UCHAR tx_data_length[2];
+ UCHAR host_reserved[2];
+ UCHAR reserved[3];
+ UCHAR tx_rate;
+ UCHAR pow_sav_mode;
+ UCHAR retries;
+ UCHAR antenna;
+};
+struct tx_requested_cmd_4 {
+
+ UCHAR tx_data_ptr[2];
+ UCHAR tx_data_length[2];
+ UCHAR dest_addr[ADDRLEN];
+ UCHAR pow_sav_mode;
+ UCHAR retries;
+ UCHAR station_id;
+};
+struct memory_dump_cmd {
+ UCHAR memory_type;
+ UCHAR memory_ptr[2];
+ UCHAR length;
+};
+struct update_association_cmd {
+ UCHAR status;
+ UCHAR aid[2];
+};
+struct start_timer_cmd {
+ UCHAR duration[2];
+};
+
+struct ccs {
+ UCHAR buffer_status; /* 0 = buffer free, 1 = buffer busy */
+ /* 2 = command complete, 3 = failed */
+ UCHAR cmd; /* command to ECF */
+ UCHAR link; /* link to next CCS, FF=end of list */
+ /* command specific parameters */
+ union {
+ char reserved[13];
+ struct update_param_cmd update_param;
+ struct report_param_cmd report_param;
+ UCHAR nummulticast;
+ UCHAR mode;
+ struct start_network_cmd start_network;
+ struct join_network_cmd join_network;
+ struct tx_requested_cmd tx_request;
+ struct memory_dump_cmd memory_dump;
+ struct update_association_cmd update_assoc;
+ struct start_timer_cmd start_timer;
+ } var;
+};
+
+/*****************************************************************************/
+/* Transmit buffer structures */
+struct tib_structure {
+ UCHAR ccs_index;
+ UCHAR psm;
+ UCHAR pass_fail;
+ UCHAR retry_count;
+ UCHAR max_retries;
+ UCHAR frags_remaining;
+ UCHAR no_rb;
+ UCHAR rts_reqd;
+ UCHAR csma_tx_cntrl_2;
+ UCHAR sifs_tx_cntrl_2;
+ UCHAR tx_dma_addr_1[2];
+ UCHAR tx_dma_addr_2[2];
+ UCHAR var_dur_2mhz[2];
+ UCHAR var_dur_1mhz[2];
+ UCHAR max_dur_2mhz[2];
+ UCHAR max_dur_1mhz[2];
+ UCHAR hdr_len;
+ UCHAR max_frag_len[2];
+ UCHAR var_len[2];
+ UCHAR phy_hdr_4;
+ UCHAR mac_hdr_1;
+ UCHAR mac_hdr_2;
+ UCHAR sid[2];
+};
+
+struct phy_header {
+ UCHAR sfd[2];
+ UCHAR hdr_3;
+ UCHAR hdr_4;
+};
+struct rx_msg {
+ struct mac_header mac;
+ UCHAR var[1];
+};
+
+struct tx_msg {
+ struct tib_structure tib;
+ struct phy_header phy;
+ struct mac_header mac;
+ UCHAR var[1];
+};
+
+/****** ECF Receive Control Stucture (RCS) Area at Shared RAM offset 0x0800 */
+/* Structures for command specific parameters (rcs.var) */
+struct rx_packet_cmd {
+ UCHAR rx_data_ptr[2];
+ UCHAR rx_data_length[2];
+ UCHAR rx_sig_lev;
+ UCHAR next_frag_rcs_index;
+ UCHAR totalpacketlength[2];
+};
+struct rejoin_net_cmplt_cmd {
+ UCHAR reserved;
+ UCHAR bssid[ADDRLEN];
+};
+struct japan_call_sign_rxd {
+ UCHAR rxd_call_sign[8];
+ UCHAR reserved[5];
+};
+
+struct rcs {
+ UCHAR buffer_status;
+ UCHAR interrupt_id;
+ UCHAR link_field;
+ /* command specific parameters */
+ union {
+ UCHAR reserved[13];
+ struct rx_packet_cmd rx_packet;
+ struct rejoin_net_cmplt_cmd rejoin_net_complete;
+ struct japan_call_sign_rxd japan_call_sign;
+ } var;
+};
+
+/****** Startup parameter structures for both versions of firmware ***********/
+struct b4_startup_params {
+ UCHAR a_network_type; /* C_ADHOC, C_INFRA */
+ UCHAR a_acting_as_ap_status; /* C_TYPE_STA, C_TYPE_AP */
+ UCHAR a_current_ess_id[ESSID_SIZE]; /* Null terminated unless 32 long */
+ UCHAR a_scanning_mode; /* passive 0, active 1 */
+ UCHAR a_power_mgt_state; /* CAM 0, */
+ UCHAR a_mac_addr[ADDRLEN]; /* */
+ UCHAR a_frag_threshold[2]; /* 512 */
+ UCHAR a_hop_time[2]; /* 16k * 2**n, n=0-4 in Kus */
+ UCHAR a_beacon_period[2]; /* n * a_hop_time in Kus */
+ UCHAR a_dtim_period; /* in beacons */
+ UCHAR a_retry_max; /* */
+ UCHAR a_ack_timeout; /* */
+ UCHAR a_sifs; /* */
+ UCHAR a_difs; /* */
+ UCHAR a_pifs; /* */
+ UCHAR a_rts_threshold[2]; /* */
+ UCHAR a_scan_dwell_time[2]; /* */
+ UCHAR a_max_scan_dwell_time[2]; /* */
+ UCHAR a_assoc_resp_timeout_thresh; /* */
+ UCHAR a_adhoc_scan_cycle_max; /* */
+ UCHAR a_infra_scan_cycle_max; /* */
+ UCHAR a_infra_super_scan_cycle_max; /* */
+ UCHAR a_promiscuous_mode; /* */
+ UCHAR a_unique_word[2]; /* */
+ UCHAR a_slot_time; /* */
+ UCHAR a_roaming_low_snr_thresh; /* */
+ UCHAR a_low_snr_count_thresh; /* */
+ UCHAR a_infra_missed_bcn_thresh; /* */
+ UCHAR a_adhoc_missed_bcn_thresh; /* */
+ UCHAR a_curr_country_code; /* C_USA */
+ UCHAR a_hop_pattern; /* */
+ UCHAR a_hop_pattern_length; /* */
+/* b4 - b5 differences start here */
+ UCHAR a_cw_max; /* */
+ UCHAR a_cw_min; /* */
+ UCHAR a_noise_filter_gain; /* */
+ UCHAR a_noise_limit_offset; /* */
+ UCHAR a_det_rssi_thresh_offset; /* */
+ UCHAR a_med_busy_thresh_offset; /* */
+ UCHAR a_det_sync_thresh; /* */
+ UCHAR a_test_mode; /* */
+ UCHAR a_test_min_chan_num; /* */
+ UCHAR a_test_max_chan_num; /* */
+ UCHAR a_rx_tx_delay; /* */
+ UCHAR a_current_bss_id[ADDRLEN]; /* */
+ UCHAR a_hop_set; /* */
+};
+struct b5_startup_params {
+ UCHAR a_network_type; /* C_ADHOC, C_INFRA */
+ UCHAR a_acting_as_ap_status; /* C_TYPE_STA, C_TYPE_AP */
+ UCHAR a_current_ess_id[ESSID_SIZE]; /* Null terminated unless 32 long */
+ UCHAR a_scanning_mode; /* passive 0, active 1 */
+ UCHAR a_power_mgt_state; /* CAM 0, */
+ UCHAR a_mac_addr[ADDRLEN]; /* */
+ UCHAR a_frag_threshold[2]; /* 512 */
+ UCHAR a_hop_time[2]; /* 16k * 2**n, n=0-4 in Kus */
+ UCHAR a_beacon_period[2]; /* n * a_hop_time in Kus */
+ UCHAR a_dtim_period; /* in beacons */
+ UCHAR a_retry_max; /* 4 */
+ UCHAR a_ack_timeout; /* */
+ UCHAR a_sifs; /* */
+ UCHAR a_difs; /* */
+ UCHAR a_pifs; /* */
+ UCHAR a_rts_threshold[2]; /* */
+ UCHAR a_scan_dwell_time[2]; /* */
+ UCHAR a_max_scan_dwell_time[2]; /* */
+ UCHAR a_assoc_resp_timeout_thresh; /* */
+ UCHAR a_adhoc_scan_cycle_max; /* */
+ UCHAR a_infra_scan_cycle_max; /* */
+ UCHAR a_infra_super_scan_cycle_max; /* */
+ UCHAR a_promiscuous_mode; /* */
+ UCHAR a_unique_word[2]; /* */
+ UCHAR a_slot_time; /* */
+ UCHAR a_roaming_low_snr_thresh; /* */
+ UCHAR a_low_snr_count_thresh; /* */
+ UCHAR a_infra_missed_bcn_thresh; /* */
+ UCHAR a_adhoc_missed_bcn_thresh; /* */
+ UCHAR a_curr_country_code; /* C_USA */
+ UCHAR a_hop_pattern; /* */
+ UCHAR a_hop_pattern_length; /* */
+/* b4 - b5 differences start here */
+ UCHAR a_cw_max[2]; /* */
+ UCHAR a_cw_min[2]; /* */
+ UCHAR a_noise_filter_gain; /* */
+ UCHAR a_noise_limit_offset; /* */
+ UCHAR a_det_rssi_thresh_offset; /* */
+ UCHAR a_med_busy_thresh_offset; /* */
+ UCHAR a_det_sync_thresh; /* */
+ UCHAR a_test_mode; /* */
+ UCHAR a_test_min_chan_num; /* */
+ UCHAR a_test_max_chan_num; /* */
+ UCHAR a_allow_bcast_SSID_probe_rsp;
+ UCHAR a_privacy_must_start;
+ UCHAR a_privacy_can_join;
+ UCHAR a_basic_rate_set[8];
+};
+
+/*****************************************************************************/
+#define RAY_IOCG_PARMS (SIOCDEVPRIVATE)
+#define RAY_IOCS_PARMS (SIOCDEVPRIVATE + 1)
+#define RAY_DO_CMD (SIOCDEVPRIVATE + 2)
+
+/****** ethernet <-> 802.11 translation **************************************/
+typedef struct snaphdr_t
+{
+ UCHAR dsap;
+ UCHAR ssap;
+ UCHAR ctrl;
+ UCHAR org[3];
+ UCHAR ethertype[2];
+} snaphdr_t;
+
+#define BRIDGE_ENCAP 0xf80000
+#define RFC1042_ENCAP 0
+#define SNAP_ID 0x0003aaaa
+#define RAY_IPX_TYPE 0x8137
+#define APPLEARP_TYPE 0x80f3
+/*****************************************************************************/
+#endif /* #ifndef RAYLINK_H */
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
new file mode 100644
index 000000000000..ec8cf29ffced
--- /dev/null
+++ b/drivers/net/wireless/strip.c
@@ -0,0 +1,2843 @@
+/*
+ * Copyright 1996 The Board of Trustees of The Leland Stanford
+ * Junior University. All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this
+ * software and its documentation for any purpose and without
+ * fee is hereby granted, provided that the above copyright
+ * notice appear in all copies. Stanford University
+ * makes no representations about the suitability of this
+ * software for any purpose. It is provided "as is" without
+ * express or implied warranty.
+ *
+ * strip.c This module implements Starmode Radio IP (STRIP)
+ * for kernel-based devices like TTY. It interfaces between a
+ * raw TTY, and the kernel's INET protocol layers (via DDI).
+ *
+ * Version: @(#)strip.c 1.3 July 1997
+ *
+ * Author: Stuart Cheshire <cheshire@cs.stanford.edu>
+ *
+ * Fixes: v0.9 12th Feb 1996 (SC)
+ * New byte stuffing (2+6 run-length encoding)
+ * New watchdog timer task
+ * New Protocol key (SIP0)
+ *
+ * v0.9.1 3rd March 1996 (SC)
+ * Changed to dynamic device allocation -- no more compile
+ * time (or boot time) limit on the number of STRIP devices.
+ *
+ * v0.9.2 13th March 1996 (SC)
+ * Uses arp cache lookups (but doesn't send arp packets yet)
+ *
+ * v0.9.3 17th April 1996 (SC)
+ * Fixed bug where STR_ERROR flag was getting set unneccessarily
+ * (causing otherwise good packets to be unneccessarily dropped)
+ *
+ * v0.9.4 27th April 1996 (SC)
+ * First attempt at using "&COMMAND" Starmode AT commands
+ *
+ * v0.9.5 29th May 1996 (SC)
+ * First attempt at sending (unicast) ARP packets
+ *
+ * v0.9.6 5th June 1996 (Elliot)
+ * Put "message level" tags in every "printk" statement
+ *
+ * v0.9.7 13th June 1996 (laik)
+ * Added support for the /proc fs
+ *
+ * v0.9.8 July 1996 (Mema)
+ * Added packet logging
+ *
+ * v1.0 November 1996 (SC)
+ * Fixed (severe) memory leaks in the /proc fs code
+ * Fixed race conditions in the logging code
+ *
+ * v1.1 January 1997 (SC)
+ * Deleted packet logging (use tcpdump instead)
+ * Added support for Metricom Firmware v204 features
+ * (like message checksums)
+ *
+ * v1.2 January 1997 (SC)
+ * Put portables list back in
+ *
+ * v1.3 July 1997 (SC)
+ * Made STRIP driver set the radio's baud rate automatically.
+ * It is no longer necessarily to manually set the radio's
+ * rate permanently to 115200 -- the driver handles setting
+ * the rate automatically.
+ */
+
+#ifdef MODULE
+static const char StripVersion[] = "1.3A-STUART.CHESHIRE-MODULAR";
+#else
+static const char StripVersion[] = "1.3A-STUART.CHESHIRE";
+#endif
+
+#define TICKLE_TIMERS 0
+#define EXT_COUNTERS 1
+
+
+/************************************************************************/
+/* Header files */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+# include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_strip.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/rcupdate.h>
+#include <net/arp.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/time.h>
+
+
+/************************************************************************/
+/* Useful structures and definitions */
+
+/*
+ * A MetricomKey identifies the protocol being carried inside a Metricom
+ * Starmode packet.
+ */
+
+typedef union {
+ __u8 c[4];
+ __u32 l;
+} MetricomKey;
+
+/*
+ * An IP address can be viewed as four bytes in memory (which is what it is) or as
+ * a single 32-bit long (which is convenient for assignment, equality testing etc.)
+ */
+
+typedef union {
+ __u8 b[4];
+ __u32 l;
+} IPaddr;
+
+/*
+ * A MetricomAddressString is used to hold a printable representation of
+ * a Metricom address.
+ */
+
+typedef struct {
+ __u8 c[24];
+} MetricomAddressString;
+
+/* Encapsulation can expand packet of size x to 65/64x + 1
+ * Sent packet looks like "<CR>*<address>*<key><encaps payload><CR>"
+ * 1 1 1-18 1 4 ? 1
+ * eg. <CR>*0000-1234*SIP0<encaps payload><CR>
+ * We allow 31 bytes for the stars, the key, the address and the <CR>s
+ */
+#define STRIP_ENCAP_SIZE(X) (32 + (X)*65L/64L)
+
+/*
+ * A STRIP_Header is never really sent over the radio, but making a dummy
+ * header for internal use within the kernel that looks like an Ethernet
+ * header makes certain other software happier. For example, tcpdump
+ * already understands Ethernet headers.
+ */
+
+typedef struct {
+ MetricomAddress dst_addr; /* Destination address, e.g. "0000-1234" */
+ MetricomAddress src_addr; /* Source address, e.g. "0000-5678" */
+ unsigned short protocol; /* The protocol type, using Ethernet codes */
+} STRIP_Header;
+
+typedef struct {
+ char c[60];
+} MetricomNode;
+
+#define NODE_TABLE_SIZE 32
+typedef struct {
+ struct timeval timestamp;
+ int num_nodes;
+ MetricomNode node[NODE_TABLE_SIZE];
+} MetricomNodeTable;
+
+enum { FALSE = 0, TRUE = 1 };
+
+/*
+ * Holds the radio's firmware version.
+ */
+typedef struct {
+ char c[50];
+} FirmwareVersion;
+
+/*
+ * Holds the radio's serial number.
+ */
+typedef struct {
+ char c[18];
+} SerialNumber;
+
+/*
+ * Holds the radio's battery voltage.
+ */
+typedef struct {
+ char c[11];
+} BatteryVoltage;
+
+typedef struct {
+ char c[8];
+} char8;
+
+enum {
+ NoStructure = 0, /* Really old firmware */
+ StructuredMessages = 1, /* Parsable AT response msgs */
+ ChecksummedMessages = 2 /* Parsable AT response msgs with checksums */
+} FirmwareLevel;
+
+struct strip {
+ int magic;
+ /*
+ * These are pointers to the malloc()ed frame buffers.
+ */
+
+ unsigned char *rx_buff; /* buffer for received IP packet */
+ unsigned char *sx_buff; /* buffer for received serial data */
+ int sx_count; /* received serial data counter */
+ int sx_size; /* Serial buffer size */
+ unsigned char *tx_buff; /* transmitter buffer */
+ unsigned char *tx_head; /* pointer to next byte to XMIT */
+ int tx_left; /* bytes left in XMIT queue */
+ int tx_size; /* Serial buffer size */
+
+ /*
+ * STRIP interface statistics.
+ */
+
+ unsigned long rx_packets; /* inbound frames counter */
+ unsigned long tx_packets; /* outbound frames counter */
+ unsigned long rx_errors; /* Parity, etc. errors */
+ unsigned long tx_errors; /* Planned stuff */
+ unsigned long rx_dropped; /* No memory for skb */
+ unsigned long tx_dropped; /* When MTU change */
+ unsigned long rx_over_errors; /* Frame bigger then STRIP buf. */
+
+ unsigned long pps_timer; /* Timer to determine pps */
+ unsigned long rx_pps_count; /* Counter to determine pps */
+ unsigned long tx_pps_count; /* Counter to determine pps */
+ unsigned long sx_pps_count; /* Counter to determine pps */
+ unsigned long rx_average_pps; /* rx packets per second * 8 */
+ unsigned long tx_average_pps; /* tx packets per second * 8 */
+ unsigned long sx_average_pps; /* sent packets per second * 8 */
+
+#ifdef EXT_COUNTERS
+ unsigned long rx_bytes; /* total received bytes */
+ unsigned long tx_bytes; /* total received bytes */
+ unsigned long rx_rbytes; /* bytes thru radio i/f */
+ unsigned long tx_rbytes; /* bytes thru radio i/f */
+ unsigned long rx_sbytes; /* tot bytes thru serial i/f */
+ unsigned long tx_sbytes; /* tot bytes thru serial i/f */
+ unsigned long rx_ebytes; /* tot stat/err bytes */
+ unsigned long tx_ebytes; /* tot stat/err bytes */
+#endif
+
+ /*
+ * Internal variables.
+ */
+
+ struct list_head list; /* Linked list of devices */
+
+ int discard; /* Set if serial error */
+ int working; /* Is radio working correctly? */
+ int firmware_level; /* Message structuring level */
+ int next_command; /* Next periodic command */
+ unsigned int user_baud; /* The user-selected baud rate */
+ int mtu; /* Our mtu (to spot changes!) */
+ long watchdog_doprobe; /* Next time to test the radio */
+ long watchdog_doreset; /* Time to do next reset */
+ long gratuitous_arp; /* Time to send next ARP refresh */
+ long arp_interval; /* Next ARP interval */
+ struct timer_list idle_timer; /* For periodic wakeup calls */
+ MetricomAddress true_dev_addr; /* True address of radio */
+ int manual_dev_addr; /* Hack: See note below */
+
+ FirmwareVersion firmware_version; /* The radio's firmware version */
+ SerialNumber serial_number; /* The radio's serial number */
+ BatteryVoltage battery_voltage; /* The radio's battery voltage */
+
+ /*
+ * Other useful structures.
+ */
+
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* Our device structure */
+
+ /*
+ * Neighbour radio records
+ */
+
+ MetricomNodeTable portables;
+ MetricomNodeTable poletops;
+};
+
+/*
+ * Note: manual_dev_addr hack
+ *
+ * It is not possible to change the hardware address of a Metricom radio,
+ * or to send packets with a user-specified hardware source address, thus
+ * trying to manually set a hardware source address is a questionable
+ * thing to do. However, if the user *does* manually set the hardware
+ * source address of a STRIP interface, then the kernel will believe it,
+ * and use it in certain places. For example, the hardware address listed
+ * by ifconfig will be the manual address, not the true one.
+ * (Both addresses are listed in /proc/net/strip.)
+ * Also, ARP packets will be sent out giving the user-specified address as
+ * the source address, not the real address. This is dangerous, because
+ * it means you won't receive any replies -- the ARP replies will go to
+ * the specified address, which will be some other radio. The case where
+ * this is useful is when that other radio is also connected to the same
+ * machine. This allows you to connect a pair of radios to one machine,
+ * and to use one exclusively for inbound traffic, and the other
+ * exclusively for outbound traffic. Pretty neat, huh?
+ *
+ * Here's the full procedure to set this up:
+ *
+ * 1. "slattach" two interfaces, e.g. st0 for outgoing packets,
+ * and st1 for incoming packets
+ *
+ * 2. "ifconfig" st0 (outbound radio) to have the hardware address
+ * which is the real hardware address of st1 (inbound radio).
+ * Now when it sends out packets, it will masquerade as st1, and
+ * replies will be sent to that radio, which is exactly what we want.
+ *
+ * 3. Set the route table entry ("route add default ..." or
+ * "route add -net ...", as appropriate) to send packets via the st0
+ * interface (outbound radio). Do not add any route which sends packets
+ * out via the st1 interface -- that radio is for inbound traffic only.
+ *
+ * 4. "ifconfig" st1 (inbound radio) to have hardware address zero.
+ * This tells the STRIP driver to "shut down" that interface and not
+ * send any packets through it. In particular, it stops sending the
+ * periodic gratuitous ARP packets that a STRIP interface normally sends.
+ * Also, when packets arrive on that interface, it will search the
+ * interface list to see if there is another interface who's manual
+ * hardware address matches its own real address (i.e. st0 in this
+ * example) and if so it will transfer ownership of the skbuff to
+ * that interface, so that it looks to the kernel as if the packet
+ * arrived on that interface. This is necessary because when the
+ * kernel sends an ARP packet on st0, it expects to get a reply on
+ * st0, and if it sees the reply come from st1 then it will ignore
+ * it (to be accurate, it puts the entry in the ARP table, but
+ * labelled in such a way that st0 can't use it).
+ *
+ * Thanks to Petros Maniatis for coming up with the idea of splitting
+ * inbound and outbound traffic between two interfaces, which turned
+ * out to be really easy to implement, even if it is a bit of a hack.
+ *
+ * Having set a manual address on an interface, you can restore it
+ * to automatic operation (where the address is automatically kept
+ * consistent with the real address of the radio) by setting a manual
+ * address of all ones, e.g. "ifconfig st0 hw strip FFFFFFFFFFFF"
+ * This 'turns off' manual override mode for the device address.
+ *
+ * Note: The IEEE 802 headers reported in tcpdump will show the *real*
+ * radio addresses the packets were sent and received from, so that you
+ * can see what is really going on with packets, and which interfaces
+ * they are really going through.
+ */
+
+
+/************************************************************************/
+/* Constants */
+
+/*
+ * CommandString1 works on all radios
+ * Other CommandStrings are only used with firmware that provides structured responses.
+ *
+ * ats319=1 Enables Info message for node additions and deletions
+ * ats319=2 Enables Info message for a new best node
+ * ats319=4 Enables checksums
+ * ats319=8 Enables ACK messages
+ */
+
+static const int MaxCommandStringLength = 32;
+static const int CompatibilityCommand = 1;
+
+static const char CommandString0[] = "*&COMMAND*ATS319=7"; /* Turn on checksums & info messages */
+static const char CommandString1[] = "*&COMMAND*ATS305?"; /* Query radio name */
+static const char CommandString2[] = "*&COMMAND*ATS325?"; /* Query battery voltage */
+static const char CommandString3[] = "*&COMMAND*ATS300?"; /* Query version information */
+static const char CommandString4[] = "*&COMMAND*ATS311?"; /* Query poletop list */
+static const char CommandString5[] = "*&COMMAND*AT~LA"; /* Query portables list */
+typedef struct {
+ const char *string;
+ long length;
+} StringDescriptor;
+
+static const StringDescriptor CommandString[] = {
+ {CommandString0, sizeof(CommandString0) - 1},
+ {CommandString1, sizeof(CommandString1) - 1},
+ {CommandString2, sizeof(CommandString2) - 1},
+ {CommandString3, sizeof(CommandString3) - 1},
+ {CommandString4, sizeof(CommandString4) - 1},
+ {CommandString5, sizeof(CommandString5) - 1}
+};
+
+#define GOT_ALL_RADIO_INFO(S) \
+ ((S)->firmware_version.c[0] && \
+ (S)->battery_voltage.c[0] && \
+ memcmp(&(S)->true_dev_addr, zero_address.c, sizeof(zero_address)))
+
+static const char hextable[16] = "0123456789ABCDEF";
+
+static const MetricomAddress zero_address;
+static const MetricomAddress broadcast_address =
+ { {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} };
+
+static const MetricomKey SIP0Key = { "SIP0" };
+static const MetricomKey ARP0Key = { "ARP0" };
+static const MetricomKey ATR_Key = { "ATR " };
+static const MetricomKey ACK_Key = { "ACK_" };
+static const MetricomKey INF_Key = { "INF_" };
+static const MetricomKey ERR_Key = { "ERR_" };
+
+static const long MaxARPInterval = 60 * HZ; /* One minute */
+
+/*
+ * Maximum Starmode packet length is 1183 bytes. Allowing 4 bytes for
+ * protocol key, 4 bytes for checksum, one byte for CR, and 65/64 expansion
+ * for STRIP encoding, that translates to a maximum payload MTU of 1155.
+ * Note: A standard NFS 1K data packet is a total of 0x480 (1152) bytes
+ * long, including IP header, UDP header, and NFS header. Setting the STRIP
+ * MTU to 1152 allows us to send default sized NFS packets without fragmentation.
+ */
+static const unsigned short MAX_SEND_MTU = 1152;
+static const unsigned short MAX_RECV_MTU = 1500; /* Hoping for Ethernet sized packets in the future! */
+static const unsigned short DEFAULT_STRIP_MTU = 1152;
+static const int STRIP_MAGIC = 0x5303;
+static const long LongTime = 0x7FFFFFFF;
+
+/************************************************************************/
+/* Global variables */
+
+static LIST_HEAD(strip_list);
+static DEFINE_SPINLOCK(strip_lock);
+
+/************************************************************************/
+/* Macros */
+
+/* Returns TRUE if text T begins with prefix P */
+#define has_prefix(T,L,P) (((L) >= sizeof(P)-1) && !strncmp((T), (P), sizeof(P)-1))
+
+/* Returns TRUE if text T of length L is equal to string S */
+#define text_equal(T,L,S) (((L) == sizeof(S)-1) && !strncmp((T), (S), sizeof(S)-1))
+
+#define READHEX(X) ((X)>='0' && (X)<='9' ? (X)-'0' : \
+ (X)>='a' && (X)<='f' ? (X)-'a'+10 : \
+ (X)>='A' && (X)<='F' ? (X)-'A'+10 : 0 )
+
+#define READHEX16(X) ((__u16)(READHEX(X)))
+
+#define READDEC(X) ((X)>='0' && (X)<='9' ? (X)-'0' : 0)
+
+#define ARRAY_END(X) (&((X)[ARRAY_SIZE(X)]))
+
+#define JIFFIE_TO_SEC(X) ((X) / HZ)
+
+
+/************************************************************************/
+/* Utility routines */
+
+static int arp_query(unsigned char *haddr, u32 paddr,
+ struct net_device *dev)
+{
+ struct neighbour *neighbor_entry;
+
+ neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev);
+
+ if (neighbor_entry != NULL) {
+ neighbor_entry->used = jiffies;
+ if (neighbor_entry->nud_state & NUD_VALID) {
+ memcpy(haddr, neighbor_entry->ha, dev->addr_len);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr,
+ __u8 * end)
+{
+ static const int MAX_DumpData = 80;
+ __u8 pkt_text[MAX_DumpData], *p = pkt_text;
+
+ *p++ = '\"';
+
+ while (ptr < end && p < &pkt_text[MAX_DumpData - 4]) {
+ if (*ptr == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else {
+ if (*ptr >= 32 && *ptr <= 126) {
+ *p++ = *ptr;
+ } else {
+ sprintf(p, "\\%02X", *ptr);
+ p += 3;
+ }
+ }
+ ptr++;
+ }
+
+ if (ptr == end)
+ *p++ = '\"';
+ *p++ = 0;
+
+ printk(KERN_INFO "%s: %-13s%s\n", strip_info->dev->name, msg, pkt_text);
+}
+
+
+/************************************************************************/
+/* Byte stuffing/unstuffing routines */
+
+/* Stuffing scheme:
+ * 00 Unused (reserved character)
+ * 01-3F Run of 2-64 different characters
+ * 40-7F Run of 1-64 different characters plus a single zero at the end
+ * 80-BF Run of 1-64 of the same character
+ * C0-FF Run of 1-64 zeroes (ASCII 0)
+ */
+
+typedef enum {
+ Stuff_Diff = 0x00,
+ Stuff_DiffZero = 0x40,
+ Stuff_Same = 0x80,
+ Stuff_Zero = 0xC0,
+ Stuff_NoCode = 0xFF, /* Special code, meaning no code selected */
+
+ Stuff_CodeMask = 0xC0,
+ Stuff_CountMask = 0x3F,
+ Stuff_MaxCount = 0x3F,
+ Stuff_Magic = 0x0D /* The value we are eliminating */
+} StuffingCode;
+
+/* StuffData encodes the data starting at "src" for "length" bytes.
+ * It writes it to the buffer pointed to by "dst" (which must be at least
+ * as long as 1 + 65/64 of the input length). The output may be up to 1.6%
+ * larger than the input for pathological input, but will usually be smaller.
+ * StuffData returns the new value of the dst pointer as its result.
+ * "code_ptr_ptr" points to a "__u8 *" which is used to hold encoding state
+ * between calls, allowing an encoded packet to be incrementally built up
+ * from small parts. On the first call, the "__u8 *" pointed to should be
+ * initialized to NULL; between subsequent calls the calling routine should
+ * leave the value alone and simply pass it back unchanged so that the
+ * encoder can recover its current state.
+ */
+
+#define StuffData_FinishBlock(X) \
+(*code_ptr = (X) ^ Stuff_Magic, code = Stuff_NoCode)
+
+static __u8 *StuffData(__u8 * src, __u32 length, __u8 * dst,
+ __u8 ** code_ptr_ptr)
+{
+ __u8 *end = src + length;
+ __u8 *code_ptr = *code_ptr_ptr;
+ __u8 code = Stuff_NoCode, count = 0;
+
+ if (!length)
+ return (dst);
+
+ if (code_ptr) {
+ /*
+ * Recover state from last call, if applicable
+ */
+ code = (*code_ptr ^ Stuff_Magic) & Stuff_CodeMask;
+ count = (*code_ptr ^ Stuff_Magic) & Stuff_CountMask;
+ }
+
+ while (src < end) {
+ switch (code) {
+ /* Stuff_NoCode: If no current code, select one */
+ case Stuff_NoCode:
+ /* Record where we're going to put this code */
+ code_ptr = dst++;
+ count = 0; /* Reset the count (zero means one instance) */
+ /* Tentatively start a new block */
+ if (*src == 0) {
+ code = Stuff_Zero;
+ src++;
+ } else {
+ code = Stuff_Same;
+ *dst++ = *src++ ^ Stuff_Magic;
+ }
+ /* Note: We optimistically assume run of same -- */
+ /* which will be fixed later in Stuff_Same */
+ /* if it turns out not to be true. */
+ break;
+
+ /* Stuff_Zero: We already have at least one zero encoded */
+ case Stuff_Zero:
+ /* If another zero, count it, else finish this code block */
+ if (*src == 0) {
+ count++;
+ src++;
+ } else {
+ StuffData_FinishBlock(Stuff_Zero + count);
+ }
+ break;
+
+ /* Stuff_Same: We already have at least one byte encoded */
+ case Stuff_Same:
+ /* If another one the same, count it */
+ if ((*src ^ Stuff_Magic) == code_ptr[1]) {
+ count++;
+ src++;
+ break;
+ }
+ /* else, this byte does not match this block. */
+ /* If we already have two or more bytes encoded, finish this code block */
+ if (count) {
+ StuffData_FinishBlock(Stuff_Same + count);
+ break;
+ }
+ /* else, we only have one so far, so switch to Stuff_Diff code */
+ code = Stuff_Diff;
+ /* and fall through to Stuff_Diff case below
+ * Note cunning cleverness here: case Stuff_Diff compares
+ * the current character with the previous two to see if it
+ * has a run of three the same. Won't this be an error if
+ * there aren't two previous characters stored to compare with?
+ * No. Because we know the current character is *not* the same
+ * as the previous one, the first test below will necessarily
+ * fail and the send half of the "if" won't be executed.
+ */
+
+ /* Stuff_Diff: We have at least two *different* bytes encoded */
+ case Stuff_Diff:
+ /* If this is a zero, must encode a Stuff_DiffZero, and begin a new block */
+ if (*src == 0) {
+ StuffData_FinishBlock(Stuff_DiffZero +
+ count);
+ }
+ /* else, if we have three in a row, it is worth starting a Stuff_Same block */
+ else if ((*src ^ Stuff_Magic) == dst[-1]
+ && dst[-1] == dst[-2]) {
+ /* Back off the last two characters we encoded */
+ code += count - 2;
+ /* Note: "Stuff_Diff + 0" is an illegal code */
+ if (code == Stuff_Diff + 0) {
+ code = Stuff_Same + 0;
+ }
+ StuffData_FinishBlock(code);
+ code_ptr = dst - 2;
+ /* dst[-1] already holds the correct value */
+ count = 2; /* 2 means three bytes encoded */
+ code = Stuff_Same;
+ }
+ /* else, another different byte, so add it to the block */
+ else {
+ *dst++ = *src ^ Stuff_Magic;
+ count++;
+ }
+ src++; /* Consume the byte */
+ break;
+ }
+ if (count == Stuff_MaxCount) {
+ StuffData_FinishBlock(code + count);
+ }
+ }
+ if (code == Stuff_NoCode) {
+ *code_ptr_ptr = NULL;
+ } else {
+ *code_ptr_ptr = code_ptr;
+ StuffData_FinishBlock(code + count);
+ }
+ return (dst);
+}
+
+/*
+ * UnStuffData decodes the data at "src", up to (but not including) "end".
+ * It writes the decoded data into the buffer pointed to by "dst", up to a
+ * maximum of "dst_length", and returns the new value of "src" so that a
+ * follow-on call can read more data, continuing from where the first left off.
+ *
+ * There are three types of results:
+ * 1. The source data runs out before extracting "dst_length" bytes:
+ * UnStuffData returns NULL to indicate failure.
+ * 2. The source data produces exactly "dst_length" bytes:
+ * UnStuffData returns new_src = end to indicate that all bytes were consumed.
+ * 3. "dst_length" bytes are extracted, with more remaining.
+ * UnStuffData returns new_src < end to indicate that there are more bytes
+ * to be read.
+ *
+ * Note: The decoding may be destructive, in that it may alter the source
+ * data in the process of decoding it (this is necessary to allow a follow-on
+ * call to resume correctly).
+ */
+
+static __u8 *UnStuffData(__u8 * src, __u8 * end, __u8 * dst,
+ __u32 dst_length)
+{
+ __u8 *dst_end = dst + dst_length;
+ /* Sanity check */
+ if (!src || !end || !dst || !dst_length)
+ return (NULL);
+ while (src < end && dst < dst_end) {
+ int count = (*src ^ Stuff_Magic) & Stuff_CountMask;
+ switch ((*src ^ Stuff_Magic) & Stuff_CodeMask) {
+ case Stuff_Diff:
+ if (src + 1 + count >= end)
+ return (NULL);
+ do {
+ *dst++ = *++src ^ Stuff_Magic;
+ }
+ while (--count >= 0 && dst < dst_end);
+ if (count < 0)
+ src += 1;
+ else {
+ if (count == 0)
+ *src = Stuff_Same ^ Stuff_Magic;
+ else
+ *src =
+ (Stuff_Diff +
+ count) ^ Stuff_Magic;
+ }
+ break;
+ case Stuff_DiffZero:
+ if (src + 1 + count >= end)
+ return (NULL);
+ do {
+ *dst++ = *++src ^ Stuff_Magic;
+ }
+ while (--count >= 0 && dst < dst_end);
+ if (count < 0)
+ *src = Stuff_Zero ^ Stuff_Magic;
+ else
+ *src =
+ (Stuff_DiffZero + count) ^ Stuff_Magic;
+ break;
+ case Stuff_Same:
+ if (src + 1 >= end)
+ return (NULL);
+ do {
+ *dst++ = src[1] ^ Stuff_Magic;
+ }
+ while (--count >= 0 && dst < dst_end);
+ if (count < 0)
+ src += 2;
+ else
+ *src = (Stuff_Same + count) ^ Stuff_Magic;
+ break;
+ case Stuff_Zero:
+ do {
+ *dst++ = 0;
+ }
+ while (--count >= 0 && dst < dst_end);
+ if (count < 0)
+ src += 1;
+ else
+ *src = (Stuff_Zero + count) ^ Stuff_Magic;
+ break;
+ }
+ }
+ if (dst < dst_end)
+ return (NULL);
+ else
+ return (src);
+}
+
+
+/************************************************************************/
+/* General routines for STRIP */
+
+/*
+ * get_baud returns the current baud rate, as one of the constants defined in
+ * termbits.h
+ * If the user has issued a baud rate override using the 'setserial' command
+ * and the logical current rate is set to 38.4, then the true baud rate
+ * currently in effect (57.6 or 115.2) is returned.
+ */
+static unsigned int get_baud(struct tty_struct *tty)
+{
+ if (!tty || !tty->termios)
+ return (0);
+ if ((tty->termios->c_cflag & CBAUD) == B38400 && tty->driver_data) {
+ struct async_struct *info =
+ (struct async_struct *) tty->driver_data;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ return (B57600);
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ return (B115200);
+ }
+ return (tty->termios->c_cflag & CBAUD);
+}
+
+/*
+ * set_baud sets the baud rate to the rate defined by baudcode
+ * Note: The rate B38400 should be avoided, because the user may have
+ * issued a 'setserial' speed override to map that to a different speed.
+ * We could achieve a true rate of 38400 if we needed to by cancelling
+ * any user speed override that is in place, but that might annoy the
+ * user, so it is simplest to just avoid using 38400.
+ */
+static void set_baud(struct tty_struct *tty, unsigned int baudcode)
+{
+ struct termios old_termios = *(tty->termios);
+ tty->termios->c_cflag &= ~CBAUD; /* Clear the old baud setting */
+ tty->termios->c_cflag |= baudcode; /* Set the new baud setting */
+ tty->driver->set_termios(tty, &old_termios);
+}
+
+/*
+ * Convert a string to a Metricom Address.
+ */
+
+#define IS_RADIO_ADDRESS(p) ( \
+ isdigit((p)[0]) && isdigit((p)[1]) && isdigit((p)[2]) && isdigit((p)[3]) && \
+ (p)[4] == '-' && \
+ isdigit((p)[5]) && isdigit((p)[6]) && isdigit((p)[7]) && isdigit((p)[8]) )
+
+static int string_to_radio_address(MetricomAddress * addr, __u8 * p)
+{
+ if (!IS_RADIO_ADDRESS(p))
+ return (1);
+ addr->c[0] = 0;
+ addr->c[1] = 0;
+ addr->c[2] = READHEX(p[0]) << 4 | READHEX(p[1]);
+ addr->c[3] = READHEX(p[2]) << 4 | READHEX(p[3]);
+ addr->c[4] = READHEX(p[5]) << 4 | READHEX(p[6]);
+ addr->c[5] = READHEX(p[7]) << 4 | READHEX(p[8]);
+ return (0);
+}
+
+/*
+ * Convert a Metricom Address to a string.
+ */
+
+static __u8 *radio_address_to_string(const MetricomAddress * addr,
+ MetricomAddressString * p)
+{
+ sprintf(p->c, "%02X%02X-%02X%02X", addr->c[2], addr->c[3],
+ addr->c[4], addr->c[5]);
+ return (p->c);
+}
+
+/*
+ * Note: Must make sure sx_size is big enough to receive a stuffed
+ * MAX_RECV_MTU packet. Additionally, we also want to ensure that it's
+ * big enough to receive a large radio neighbour list (currently 4K).
+ */
+
+static int allocate_buffers(struct strip *strip_info, int mtu)
+{
+ struct net_device *dev = strip_info->dev;
+ int sx_size = max_t(int, STRIP_ENCAP_SIZE(MAX_RECV_MTU), 4096);
+ int tx_size = STRIP_ENCAP_SIZE(mtu) + MaxCommandStringLength;
+ __u8 *r = kmalloc(MAX_RECV_MTU, GFP_ATOMIC);
+ __u8 *s = kmalloc(sx_size, GFP_ATOMIC);
+ __u8 *t = kmalloc(tx_size, GFP_ATOMIC);
+ if (r && s && t) {
+ strip_info->rx_buff = r;
+ strip_info->sx_buff = s;
+ strip_info->tx_buff = t;
+ strip_info->sx_size = sx_size;
+ strip_info->tx_size = tx_size;
+ strip_info->mtu = dev->mtu = mtu;
+ return (1);
+ }
+ if (r)
+ kfree(r);
+ if (s)
+ kfree(s);
+ if (t)
+ kfree(t);
+ return (0);
+}
+
+/*
+ * MTU has been changed by the IP layer.
+ * We could be in
+ * an upcall from the tty driver, or in an ip packet queue.
+ */
+static int strip_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct strip *strip_info = netdev_priv(dev);
+ int old_mtu = strip_info->mtu;
+ unsigned char *orbuff = strip_info->rx_buff;
+ unsigned char *osbuff = strip_info->sx_buff;
+ unsigned char *otbuff = strip_info->tx_buff;
+
+ if (new_mtu > MAX_SEND_MTU) {
+ printk(KERN_ERR
+ "%s: MTU exceeds maximum allowable (%d), MTU change cancelled.\n",
+ strip_info->dev->name, MAX_SEND_MTU);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&strip_lock);
+ if (!allocate_buffers(strip_info, new_mtu)) {
+ printk(KERN_ERR "%s: unable to grow strip buffers, MTU change cancelled.\n",
+ strip_info->dev->name);
+ spin_unlock_bh(&strip_lock);
+ return -ENOMEM;
+ }
+
+ if (strip_info->sx_count) {
+ if (strip_info->sx_count <= strip_info->sx_size)
+ memcpy(strip_info->sx_buff, osbuff,
+ strip_info->sx_count);
+ else {
+ strip_info->discard = strip_info->sx_count;
+ strip_info->rx_over_errors++;
+ }
+ }
+
+ if (strip_info->tx_left) {
+ if (strip_info->tx_left <= strip_info->tx_size)
+ memcpy(strip_info->tx_buff, strip_info->tx_head,
+ strip_info->tx_left);
+ else {
+ strip_info->tx_left = 0;
+ strip_info->tx_dropped++;
+ }
+ }
+ strip_info->tx_head = strip_info->tx_buff;
+ spin_unlock_bh(&strip_lock);
+
+ printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n",
+ strip_info->dev->name, old_mtu, strip_info->mtu);
+
+ if (orbuff)
+ kfree(orbuff);
+ if (osbuff)
+ kfree(osbuff);
+ if (otbuff)
+ kfree(otbuff);
+
+ return 0;
+}
+
+static void strip_unlock(struct strip *strip_info)
+{
+ /*
+ * Set the timer to go off in one second.
+ */
+ strip_info->idle_timer.expires = jiffies + 1 * HZ;
+ add_timer(&strip_info->idle_timer);
+ netif_wake_queue(strip_info->dev);
+}
+
+
+
+/*
+ * If the time is in the near future, time_delta prints the number of
+ * seconds to go into the buffer and returns the address of the buffer.
+ * If the time is not in the near future, it returns the address of the
+ * string "Not scheduled" The buffer must be long enough to contain the
+ * ascii representation of the number plus 9 charactes for the " seconds"
+ * and the null character.
+ */
+#ifdef CONFIG_PROC_FS
+static char *time_delta(char buffer[], long time)
+{
+ time -= jiffies;
+ if (time > LongTime / 2)
+ return ("Not scheduled");
+ if (time < 0)
+ time = 0; /* Don't print negative times */
+ sprintf(buffer, "%ld seconds", time / HZ);
+ return (buffer);
+}
+
+/* get Nth element of the linked list */
+static struct strip *strip_get_idx(loff_t pos)
+{
+ struct list_head *l;
+ int i = 0;
+
+ list_for_each_rcu(l, &strip_list) {
+ if (pos == i)
+ return list_entry(l, struct strip, list);
+ ++i;
+ }
+ return NULL;
+}
+
+static void *strip_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ rcu_read_lock();
+ return *pos ? strip_get_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *strip_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct list_head *l;
+ struct strip *s;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN)
+ return strip_get_idx(1);
+
+ s = v;
+ l = &s->list;
+ list_for_each_continue_rcu(l, &strip_list) {
+ return list_entry(l, struct strip, list);
+ }
+ return NULL;
+}
+
+static void strip_seq_stop(struct seq_file *seq, void *v)
+{
+ rcu_read_unlock();
+}
+
+static void strip_seq_neighbours(struct seq_file *seq,
+ const MetricomNodeTable * table,
+ const char *title)
+{
+ /* We wrap this in a do/while loop, so if the table changes */
+ /* while we're reading it, we just go around and try again. */
+ struct timeval t;
+
+ do {
+ int i;
+ t = table->timestamp;
+ if (table->num_nodes)
+ seq_printf(seq, "\n %s\n", title);
+ for (i = 0; i < table->num_nodes; i++) {
+ MetricomNode node;
+
+ spin_lock_bh(&strip_lock);
+ node = table->node[i];
+ spin_unlock_bh(&strip_lock);
+ seq_printf(seq, " %s\n", node.c);
+ }
+ } while (table->timestamp.tv_sec != t.tv_sec
+ || table->timestamp.tv_usec != t.tv_usec);
+}
+
+/*
+ * This function prints radio status information via the seq_file
+ * interface. The interface takes care of buffer size and over
+ * run issues.
+ *
+ * The buffer in seq_file is PAGESIZE (4K)
+ * so this routine should never print more or it will get truncated.
+ * With the maximum of 32 portables and 32 poletops
+ * reported, the routine outputs 3107 bytes into the buffer.
+ */
+static void strip_seq_status_info(struct seq_file *seq,
+ const struct strip *strip_info)
+{
+ char temp[32];
+ MetricomAddressString addr_string;
+
+ /* First, we must copy all of our data to a safe place, */
+ /* in case a serial interrupt comes in and changes it. */
+ int tx_left = strip_info->tx_left;
+ unsigned long rx_average_pps = strip_info->rx_average_pps;
+ unsigned long tx_average_pps = strip_info->tx_average_pps;
+ unsigned long sx_average_pps = strip_info->sx_average_pps;
+ int working = strip_info->working;
+ int firmware_level = strip_info->firmware_level;
+ long watchdog_doprobe = strip_info->watchdog_doprobe;
+ long watchdog_doreset = strip_info->watchdog_doreset;
+ long gratuitous_arp = strip_info->gratuitous_arp;
+ long arp_interval = strip_info->arp_interval;
+ FirmwareVersion firmware_version = strip_info->firmware_version;
+ SerialNumber serial_number = strip_info->serial_number;
+ BatteryVoltage battery_voltage = strip_info->battery_voltage;
+ char *if_name = strip_info->dev->name;
+ MetricomAddress true_dev_addr = strip_info->true_dev_addr;
+ MetricomAddress dev_dev_addr =
+ *(MetricomAddress *) strip_info->dev->dev_addr;
+ int manual_dev_addr = strip_info->manual_dev_addr;
+#ifdef EXT_COUNTERS
+ unsigned long rx_bytes = strip_info->rx_bytes;
+ unsigned long tx_bytes = strip_info->tx_bytes;
+ unsigned long rx_rbytes = strip_info->rx_rbytes;
+ unsigned long tx_rbytes = strip_info->tx_rbytes;
+ unsigned long rx_sbytes = strip_info->rx_sbytes;
+ unsigned long tx_sbytes = strip_info->tx_sbytes;
+ unsigned long rx_ebytes = strip_info->rx_ebytes;
+ unsigned long tx_ebytes = strip_info->tx_ebytes;
+#endif
+
+ seq_printf(seq, "\nInterface name\t\t%s\n", if_name);
+ seq_printf(seq, " Radio working:\t\t%s\n", working ? "Yes" : "No");
+ radio_address_to_string(&true_dev_addr, &addr_string);
+ seq_printf(seq, " Radio address:\t\t%s\n", addr_string.c);
+ if (manual_dev_addr) {
+ radio_address_to_string(&dev_dev_addr, &addr_string);
+ seq_printf(seq, " Device address:\t%s\n", addr_string.c);
+ }
+ seq_printf(seq, " Firmware version:\t%s", !working ? "Unknown" :
+ !firmware_level ? "Should be upgraded" :
+ firmware_version.c);
+ if (firmware_level >= ChecksummedMessages)
+ seq_printf(seq, " (Checksums Enabled)");
+ seq_printf(seq, "\n");
+ seq_printf(seq, " Serial number:\t\t%s\n", serial_number.c);
+ seq_printf(seq, " Battery voltage:\t%s\n", battery_voltage.c);
+ seq_printf(seq, " Transmit queue (bytes):%d\n", tx_left);
+ seq_printf(seq, " Receive packet rate: %ld packets per second\n",
+ rx_average_pps / 8);
+ seq_printf(seq, " Transmit packet rate: %ld packets per second\n",
+ tx_average_pps / 8);
+ seq_printf(seq, " Sent packet rate: %ld packets per second\n",
+ sx_average_pps / 8);
+ seq_printf(seq, " Next watchdog probe:\t%s\n",
+ time_delta(temp, watchdog_doprobe));
+ seq_printf(seq, " Next watchdog reset:\t%s\n",
+ time_delta(temp, watchdog_doreset));
+ seq_printf(seq, " Next gratuitous ARP:\t");
+
+ if (!memcmp
+ (strip_info->dev->dev_addr, zero_address.c,
+ sizeof(zero_address)))
+ seq_printf(seq, "Disabled\n");
+ else {
+ seq_printf(seq, "%s\n", time_delta(temp, gratuitous_arp));
+ seq_printf(seq, " Next ARP interval:\t%ld seconds\n",
+ JIFFIE_TO_SEC(arp_interval));
+ }
+
+ if (working) {
+#ifdef EXT_COUNTERS
+ seq_printf(seq, "\n");
+ seq_printf(seq,
+ " Total bytes: \trx:\t%lu\ttx:\t%lu\n",
+ rx_bytes, tx_bytes);
+ seq_printf(seq,
+ " thru radio: \trx:\t%lu\ttx:\t%lu\n",
+ rx_rbytes, tx_rbytes);
+ seq_printf(seq,
+ " thru serial port: \trx:\t%lu\ttx:\t%lu\n",
+ rx_sbytes, tx_sbytes);
+ seq_printf(seq,
+ " Total stat/err bytes:\trx:\t%lu\ttx:\t%lu\n",
+ rx_ebytes, tx_ebytes);
+#endif
+ strip_seq_neighbours(seq, &strip_info->poletops,
+ "Poletops:");
+ strip_seq_neighbours(seq, &strip_info->portables,
+ "Portables:");
+ }
+}
+
+/*
+ * This function is exports status information from the STRIP driver through
+ * the /proc file system.
+ */
+static int strip_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_printf(seq, "strip_version: %s\n", StripVersion);
+ else
+ strip_seq_status_info(seq, (const struct strip *)v);
+ return 0;
+}
+
+
+static struct seq_operations strip_seq_ops = {
+ .start = strip_seq_start,
+ .next = strip_seq_next,
+ .stop = strip_seq_stop,
+ .show = strip_seq_show,
+};
+
+static int strip_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &strip_seq_ops);
+}
+
+static struct file_operations strip_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = strip_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
+
+
+/************************************************************************/
+/* Sending routines */
+
+static void ResetRadio(struct strip *strip_info)
+{
+ struct tty_struct *tty = strip_info->tty;
+ static const char init[] = "ate0q1dt**starmode\r**";
+ StringDescriptor s = { init, sizeof(init) - 1 };
+
+ /*
+ * If the radio isn't working anymore,
+ * we should clear the old status information.
+ */
+ if (strip_info->working) {
+ printk(KERN_INFO "%s: No response: Resetting radio.\n",
+ strip_info->dev->name);
+ strip_info->firmware_version.c[0] = '\0';
+ strip_info->serial_number.c[0] = '\0';
+ strip_info->battery_voltage.c[0] = '\0';
+ strip_info->portables.num_nodes = 0;
+ do_gettimeofday(&strip_info->portables.timestamp);
+ strip_info->poletops.num_nodes = 0;
+ do_gettimeofday(&strip_info->poletops.timestamp);
+ }
+
+ strip_info->pps_timer = jiffies;
+ strip_info->rx_pps_count = 0;
+ strip_info->tx_pps_count = 0;
+ strip_info->sx_pps_count = 0;
+ strip_info->rx_average_pps = 0;
+ strip_info->tx_average_pps = 0;
+ strip_info->sx_average_pps = 0;
+
+ /* Mark radio address as unknown */
+ *(MetricomAddress *) & strip_info->true_dev_addr = zero_address;
+ if (!strip_info->manual_dev_addr)
+ *(MetricomAddress *) strip_info->dev->dev_addr =
+ zero_address;
+ strip_info->working = FALSE;
+ strip_info->firmware_level = NoStructure;
+ strip_info->next_command = CompatibilityCommand;
+ strip_info->watchdog_doprobe = jiffies + 10 * HZ;
+ strip_info->watchdog_doreset = jiffies + 1 * HZ;
+
+ /* If the user has selected a baud rate above 38.4 see what magic we have to do */
+ if (strip_info->user_baud > B38400) {
+ /*
+ * Subtle stuff: Pay attention :-)
+ * If the serial port is currently at the user's selected (>38.4) rate,
+ * then we temporarily switch to 19.2 and issue the ATS304 command
+ * to tell the radio to switch to the user's selected rate.
+ * If the serial port is not currently at that rate, that means we just
+ * issued the ATS304 command last time through, so this time we restore
+ * the user's selected rate and issue the normal starmode reset string.
+ */
+ if (strip_info->user_baud == get_baud(tty)) {
+ static const char b0[] = "ate0q1s304=57600\r";
+ static const char b1[] = "ate0q1s304=115200\r";
+ static const StringDescriptor baudstring[2] =
+ { {b0, sizeof(b0) - 1}
+ , {b1, sizeof(b1) - 1}
+ };
+ set_baud(tty, B19200);
+ if (strip_info->user_baud == B57600)
+ s = baudstring[0];
+ else if (strip_info->user_baud == B115200)
+ s = baudstring[1];
+ else
+ s = baudstring[1]; /* For now */
+ } else
+ set_baud(tty, strip_info->user_baud);
+ }
+
+ tty->driver->write(tty, s.string, s.length);
+#ifdef EXT_COUNTERS
+ strip_info->tx_ebytes += s.length;
+#endif
+}
+
+/*
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+
+static void strip_write_some_more(struct tty_struct *tty)
+{
+ struct strip *strip_info = (struct strip *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!strip_info || strip_info->magic != STRIP_MAGIC ||
+ !netif_running(strip_info->dev))
+ return;
+
+ if (strip_info->tx_left > 0) {
+ int num_written =
+ tty->driver->write(tty, strip_info->tx_head,
+ strip_info->tx_left);
+ strip_info->tx_left -= num_written;
+ strip_info->tx_head += num_written;
+#ifdef EXT_COUNTERS
+ strip_info->tx_sbytes += num_written;
+#endif
+ } else { /* Else start transmission of another packet */
+
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ strip_unlock(strip_info);
+ }
+}
+
+static __u8 *add_checksum(__u8 * buffer, __u8 * end)
+{
+ __u16 sum = 0;
+ __u8 *p = buffer;
+ while (p < end)
+ sum += *p++;
+ end[3] = hextable[sum & 0xF];
+ sum >>= 4;
+ end[2] = hextable[sum & 0xF];
+ sum >>= 4;
+ end[1] = hextable[sum & 0xF];
+ sum >>= 4;
+ end[0] = hextable[sum & 0xF];
+ return (end + 4);
+}
+
+static unsigned char *strip_make_packet(unsigned char *buffer,
+ struct strip *strip_info,
+ struct sk_buff *skb)
+{
+ __u8 *ptr = buffer;
+ __u8 *stuffstate = NULL;
+ STRIP_Header *header = (STRIP_Header *) skb->data;
+ MetricomAddress haddr = header->dst_addr;
+ int len = skb->len - sizeof(STRIP_Header);
+ MetricomKey key;
+
+ /*HexDump("strip_make_packet", strip_info, skb->data, skb->data + skb->len); */
+
+ if (header->protocol == htons(ETH_P_IP))
+ key = SIP0Key;
+ else if (header->protocol == htons(ETH_P_ARP))
+ key = ARP0Key;
+ else {
+ printk(KERN_ERR
+ "%s: strip_make_packet: Unknown packet type 0x%04X\n",
+ strip_info->dev->name, ntohs(header->protocol));
+ return (NULL);
+ }
+
+ if (len > strip_info->mtu) {
+ printk(KERN_ERR
+ "%s: Dropping oversized transmit packet: %d bytes\n",
+ strip_info->dev->name, len);
+ return (NULL);
+ }
+
+ /*
+ * If we're sending to ourselves, discard the packet.
+ * (Metricom radios choke if they try to send a packet to their own address.)
+ */
+ if (!memcmp(haddr.c, strip_info->true_dev_addr.c, sizeof(haddr))) {
+ printk(KERN_ERR "%s: Dropping packet addressed to self\n",
+ strip_info->dev->name);
+ return (NULL);
+ }
+
+ /*
+ * If this is a broadcast packet, send it to our designated Metricom
+ * 'broadcast hub' radio (First byte of address being 0xFF means broadcast)
+ */
+ if (haddr.c[0] == 0xFF) {
+ u32 brd = 0;
+ struct in_device *in_dev;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get(strip_info->dev);
+ if (in_dev == NULL) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ if (in_dev->ifa_list)
+ brd = in_dev->ifa_list->ifa_broadcast;
+ rcu_read_unlock();
+
+ /* arp_query returns 1 if it succeeds in looking up the address, 0 if it fails */
+ if (!arp_query(haddr.c, brd, strip_info->dev)) {
+ printk(KERN_ERR
+ "%s: Unable to send packet (no broadcast hub configured)\n",
+ strip_info->dev->name);
+ return (NULL);
+ }
+ /*
+ * If we are the broadcast hub, don't bother sending to ourselves.
+ * (Metricom radios choke if they try to send a packet to their own address.)
+ */
+ if (!memcmp
+ (haddr.c, strip_info->true_dev_addr.c, sizeof(haddr)))
+ return (NULL);
+ }
+
+ *ptr++ = 0x0D;
+ *ptr++ = '*';
+ *ptr++ = hextable[haddr.c[2] >> 4];
+ *ptr++ = hextable[haddr.c[2] & 0xF];
+ *ptr++ = hextable[haddr.c[3] >> 4];
+ *ptr++ = hextable[haddr.c[3] & 0xF];
+ *ptr++ = '-';
+ *ptr++ = hextable[haddr.c[4] >> 4];
+ *ptr++ = hextable[haddr.c[4] & 0xF];
+ *ptr++ = hextable[haddr.c[5] >> 4];
+ *ptr++ = hextable[haddr.c[5] & 0xF];
+ *ptr++ = '*';
+ *ptr++ = key.c[0];
+ *ptr++ = key.c[1];
+ *ptr++ = key.c[2];
+ *ptr++ = key.c[3];
+
+ ptr =
+ StuffData(skb->data + sizeof(STRIP_Header), len, ptr,
+ &stuffstate);
+
+ if (strip_info->firmware_level >= ChecksummedMessages)
+ ptr = add_checksum(buffer + 1, ptr);
+
+ *ptr++ = 0x0D;
+ return (ptr);
+}
+
+static void strip_send(struct strip *strip_info, struct sk_buff *skb)
+{
+ MetricomAddress haddr;
+ unsigned char *ptr = strip_info->tx_buff;
+ int doreset = (long) jiffies - strip_info->watchdog_doreset >= 0;
+ int doprobe = (long) jiffies - strip_info->watchdog_doprobe >= 0
+ && !doreset;
+ u32 addr, brd;
+
+ /*
+ * 1. If we have a packet, encapsulate it and put it in the buffer
+ */
+ if (skb) {
+ char *newptr = strip_make_packet(ptr, strip_info, skb);
+ strip_info->tx_pps_count++;
+ if (!newptr)
+ strip_info->tx_dropped++;
+ else {
+ ptr = newptr;
+ strip_info->sx_pps_count++;
+ strip_info->tx_packets++; /* Count another successful packet */
+#ifdef EXT_COUNTERS
+ strip_info->tx_bytes += skb->len;
+ strip_info->tx_rbytes += ptr - strip_info->tx_buff;
+#endif
+ /*DumpData("Sending:", strip_info, strip_info->tx_buff, ptr); */
+ /*HexDump("Sending", strip_info, strip_info->tx_buff, ptr); */
+ }
+ }
+
+ /*
+ * 2. If it is time for another tickle, tack it on, after the packet
+ */
+ if (doprobe) {
+ StringDescriptor ts = CommandString[strip_info->next_command];
+#if TICKLE_TIMERS
+ {
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ printk(KERN_INFO "**** Sending tickle string %d at %02d.%06d\n",
+ strip_info->next_command, tv.tv_sec % 100,
+ tv.tv_usec);
+ }
+#endif
+ if (ptr == strip_info->tx_buff)
+ *ptr++ = 0x0D;
+
+ *ptr++ = '*'; /* First send "**" to provoke an error message */
+ *ptr++ = '*';
+
+ /* Then add the command */
+ memcpy(ptr, ts.string, ts.length);
+
+ /* Add a checksum ? */
+ if (strip_info->firmware_level < ChecksummedMessages)
+ ptr += ts.length;
+ else
+ ptr = add_checksum(ptr, ptr + ts.length);
+
+ *ptr++ = 0x0D; /* Terminate the command with a <CR> */
+
+ /* Cycle to next periodic command? */
+ if (strip_info->firmware_level >= StructuredMessages)
+ if (++strip_info->next_command >=
+ ARRAY_SIZE(CommandString))
+ strip_info->next_command = 0;
+#ifdef EXT_COUNTERS
+ strip_info->tx_ebytes += ts.length;
+#endif
+ strip_info->watchdog_doprobe = jiffies + 10 * HZ;
+ strip_info->watchdog_doreset = jiffies + 1 * HZ;
+ /*printk(KERN_INFO "%s: Routine radio test.\n", strip_info->dev->name); */
+ }
+
+ /*
+ * 3. Set up the strip_info ready to send the data (if any).
+ */
+ strip_info->tx_head = strip_info->tx_buff;
+ strip_info->tx_left = ptr - strip_info->tx_buff;
+ strip_info->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+
+ /*
+ * 4. Debugging check to make sure we're not overflowing the buffer.
+ */
+ if (strip_info->tx_size - strip_info->tx_left < 20)
+ printk(KERN_ERR "%s: Sending%5d bytes;%5d bytes free.\n",
+ strip_info->dev->name, strip_info->tx_left,
+ strip_info->tx_size - strip_info->tx_left);
+
+ /*
+ * 5. If watchdog has expired, reset the radio. Note: if there's data waiting in
+ * the buffer, strip_write_some_more will send it after the reset has finished
+ */
+ if (doreset) {
+ ResetRadio(strip_info);
+ return;
+ }
+
+ if (1) {
+ struct in_device *in_dev;
+
+ brd = addr = 0;
+ rcu_read_lock();
+ in_dev = __in_dev_get(strip_info->dev);
+ if (in_dev) {
+ if (in_dev->ifa_list) {
+ brd = in_dev->ifa_list->ifa_broadcast;
+ addr = in_dev->ifa_list->ifa_local;
+ }
+ }
+ rcu_read_unlock();
+ }
+
+
+ /*
+ * 6. If it is time for a periodic ARP, queue one up to be sent.
+ * We only do this if:
+ * 1. The radio is working
+ * 2. It's time to send another periodic ARP
+ * 3. We really know what our address is (and it is not manually set to zero)
+ * 4. We have a designated broadcast address configured
+ * If we queue up an ARP packet when we don't have a designated broadcast
+ * address configured, then the packet will just have to be discarded in
+ * strip_make_packet. This is not fatal, but it causes misleading information
+ * to be displayed in tcpdump. tcpdump will report that periodic APRs are
+ * being sent, when in fact they are not, because they are all being dropped
+ * in the strip_make_packet routine.
+ */
+ if (strip_info->working
+ && (long) jiffies - strip_info->gratuitous_arp >= 0
+ && memcmp(strip_info->dev->dev_addr, zero_address.c,
+ sizeof(zero_address))
+ && arp_query(haddr.c, brd, strip_info->dev)) {
+ /*printk(KERN_INFO "%s: Sending gratuitous ARP with interval %ld\n",
+ strip_info->dev->name, strip_info->arp_interval / HZ); */
+ strip_info->gratuitous_arp =
+ jiffies + strip_info->arp_interval;
+ strip_info->arp_interval *= 2;
+ if (strip_info->arp_interval > MaxARPInterval)
+ strip_info->arp_interval = MaxARPInterval;
+ if (addr)
+ arp_send(ARPOP_REPLY, ETH_P_ARP, addr, /* Target address of ARP packet is our address */
+ strip_info->dev, /* Device to send packet on */
+ addr, /* Source IP address this ARP packet comes from */
+ NULL, /* Destination HW address is NULL (broadcast it) */
+ strip_info->dev->dev_addr, /* Source HW address is our HW address */
+ strip_info->dev->dev_addr); /* Target HW address is our HW address (redundant) */
+ }
+
+ /*
+ * 7. All ready. Start the transmission
+ */
+ strip_write_some_more(strip_info->tty);
+}
+
+/* Encapsulate a datagram and kick it into a TTY queue. */
+static int strip_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct strip *strip_info = netdev_priv(dev);
+
+ if (!netif_running(dev)) {
+ printk(KERN_ERR "%s: xmit call when iface is down\n",
+ dev->name);
+ return (1);
+ }
+
+ netif_stop_queue(dev);
+
+ del_timer(&strip_info->idle_timer);
+
+
+ if (jiffies - strip_info->pps_timer > HZ) {
+ unsigned long t = jiffies - strip_info->pps_timer;
+ unsigned long rx_pps_count = (strip_info->rx_pps_count * HZ * 8 + t / 2) / t;
+ unsigned long tx_pps_count = (strip_info->tx_pps_count * HZ * 8 + t / 2) / t;
+ unsigned long sx_pps_count = (strip_info->sx_pps_count * HZ * 8 + t / 2) / t;
+
+ strip_info->pps_timer = jiffies;
+ strip_info->rx_pps_count = 0;
+ strip_info->tx_pps_count = 0;
+ strip_info->sx_pps_count = 0;
+
+ strip_info->rx_average_pps = (strip_info->rx_average_pps + rx_pps_count + 1) / 2;
+ strip_info->tx_average_pps = (strip_info->tx_average_pps + tx_pps_count + 1) / 2;
+ strip_info->sx_average_pps = (strip_info->sx_average_pps + sx_pps_count + 1) / 2;
+
+ if (rx_pps_count / 8 >= 10)
+ printk(KERN_INFO "%s: WARNING: Receiving %ld packets per second.\n",
+ strip_info->dev->name, rx_pps_count / 8);
+ if (tx_pps_count / 8 >= 10)
+ printk(KERN_INFO "%s: WARNING: Tx %ld packets per second.\n",
+ strip_info->dev->name, tx_pps_count / 8);
+ if (sx_pps_count / 8 >= 10)
+ printk(KERN_INFO "%s: WARNING: Sending %ld packets per second.\n",
+ strip_info->dev->name, sx_pps_count / 8);
+ }
+
+ spin_lock_bh(&strip_lock);
+
+ strip_send(strip_info, skb);
+
+ spin_unlock_bh(&strip_lock);
+
+ if (skb)
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * IdleTask periodically calls strip_xmit, so even when we have no IP packets
+ * to send for an extended period of time, the watchdog processing still gets
+ * done to ensure that the radio stays in Starmode
+ */
+
+static void strip_IdleTask(unsigned long parameter)
+{
+ strip_xmit(NULL, (struct net_device *) parameter);
+}
+
+/*
+ * Create the MAC header for an arbitrary protocol layer
+ *
+ * saddr!=NULL means use this specific address (n/a for Metricom)
+ * saddr==NULL means use default device source address
+ * daddr!=NULL means use this destination address
+ * daddr==NULL means leave destination address alone
+ * (e.g. unresolved arp -- kernel will call
+ * rebuild_header later to fill in the address)
+ */
+
+static int strip_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len)
+{
+ struct strip *strip_info = netdev_priv(dev);
+ STRIP_Header *header = (STRIP_Header *) skb_push(skb, sizeof(STRIP_Header));
+
+ /*printk(KERN_INFO "%s: strip_header 0x%04X %s\n", dev->name, type,
+ type == ETH_P_IP ? "IP" : type == ETH_P_ARP ? "ARP" : ""); */
+
+ header->src_addr = strip_info->true_dev_addr;
+ header->protocol = htons(type);
+
+ /*HexDump("strip_header", netdev_priv(dev), skb->data, skb->data + skb->len); */
+
+ if (!daddr)
+ return (-dev->hard_header_len);
+
+ header->dst_addr = *(MetricomAddress *) daddr;
+ return (dev->hard_header_len);
+}
+
+/*
+ * Rebuild the MAC header. This is called after an ARP
+ * (or in future other address resolution) has completed on this
+ * sk_buff. We now let ARP fill in the other fields.
+ * I think this should return zero if packet is ready to send,
+ * or non-zero if it needs more time to do an address lookup
+ */
+
+static int strip_rebuild_header(struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ STRIP_Header *header = (STRIP_Header *) skb->data;
+
+ /* Arp find returns zero if if knows the address, */
+ /* or if it doesn't know the address it sends an ARP packet and returns non-zero */
+ return arp_find(header->dst_addr.c, skb) ? 1 : 0;
+#else
+ return 0;
+#endif
+}
+
+
+/************************************************************************/
+/* Receiving routines */
+
+static int strip_receive_room(struct tty_struct *tty)
+{
+ return 0x10000; /* We can handle an infinite amount of data. :-) */
+}
+
+/*
+ * This function parses the response to the ATS300? command,
+ * extracting the radio version and serial number.
+ */
+static void get_radio_version(struct strip *strip_info, __u8 * ptr, __u8 * end)
+{
+ __u8 *p, *value_begin, *value_end;
+ int len;
+
+ /* Determine the beginning of the second line of the payload */
+ p = ptr;
+ while (p < end && *p != 10)
+ p++;
+ if (p >= end)
+ return;
+ p++;
+ value_begin = p;
+
+ /* Determine the end of line */
+ while (p < end && *p != 10)
+ p++;
+ if (p >= end)
+ return;
+ value_end = p;
+ p++;
+
+ len = value_end - value_begin;
+ len = min_t(int, len, sizeof(FirmwareVersion) - 1);
+ if (strip_info->firmware_version.c[0] == 0)
+ printk(KERN_INFO "%s: Radio Firmware: %.*s\n",
+ strip_info->dev->name, len, value_begin);
+ sprintf(strip_info->firmware_version.c, "%.*s", len, value_begin);
+
+ /* Look for the first colon */
+ while (p < end && *p != ':')
+ p++;
+ if (p >= end)
+ return;
+ /* Skip over the space */
+ p += 2;
+ len = sizeof(SerialNumber) - 1;
+ if (p + len <= end) {
+ sprintf(strip_info->serial_number.c, "%.*s", len, p);
+ } else {
+ printk(KERN_DEBUG
+ "STRIP: radio serial number shorter (%zd) than expected (%d)\n",
+ end - p, len);
+ }
+}
+
+/*
+ * This function parses the response to the ATS325? command,
+ * extracting the radio battery voltage.
+ */
+static void get_radio_voltage(struct strip *strip_info, __u8 * ptr, __u8 * end)
+{
+ int len;
+
+ len = sizeof(BatteryVoltage) - 1;
+ if (ptr + len <= end) {
+ sprintf(strip_info->battery_voltage.c, "%.*s", len, ptr);
+ } else {
+ printk(KERN_DEBUG
+ "STRIP: radio voltage string shorter (%zd) than expected (%d)\n",
+ end - ptr, len);
+ }
+}
+
+/*
+ * This function parses the responses to the AT~LA and ATS311 commands,
+ * which list the radio's neighbours.
+ */
+static void get_radio_neighbours(MetricomNodeTable * table, __u8 * ptr, __u8 * end)
+{
+ table->num_nodes = 0;
+ while (ptr < end && table->num_nodes < NODE_TABLE_SIZE) {
+ MetricomNode *node = &table->node[table->num_nodes++];
+ char *dst = node->c, *limit = dst + sizeof(*node) - 1;
+ while (ptr < end && *ptr <= 32)
+ ptr++;
+ while (ptr < end && dst < limit && *ptr != 10)
+ *dst++ = *ptr++;
+ *dst++ = 0;
+ while (ptr < end && ptr[-1] != 10)
+ ptr++;
+ }
+ do_gettimeofday(&table->timestamp);
+}
+
+static int get_radio_address(struct strip *strip_info, __u8 * p)
+{
+ MetricomAddress addr;
+
+ if (string_to_radio_address(&addr, p))
+ return (1);
+
+ /* See if our radio address has changed */
+ if (memcmp(strip_info->true_dev_addr.c, addr.c, sizeof(addr))) {
+ MetricomAddressString addr_string;
+ radio_address_to_string(&addr, &addr_string);
+ printk(KERN_INFO "%s: Radio address = %s\n",
+ strip_info->dev->name, addr_string.c);
+ strip_info->true_dev_addr = addr;
+ if (!strip_info->manual_dev_addr)
+ *(MetricomAddress *) strip_info->dev->dev_addr =
+ addr;
+ /* Give the radio a few seconds to get its head straight, then send an arp */
+ strip_info->gratuitous_arp = jiffies + 15 * HZ;
+ strip_info->arp_interval = 1 * HZ;
+ }
+ return (0);
+}
+
+static int verify_checksum(struct strip *strip_info)
+{
+ __u8 *p = strip_info->sx_buff;
+ __u8 *end = strip_info->sx_buff + strip_info->sx_count - 4;
+ u_short sum =
+ (READHEX16(end[0]) << 12) | (READHEX16(end[1]) << 8) |
+ (READHEX16(end[2]) << 4) | (READHEX16(end[3]));
+ while (p < end)
+ sum -= *p++;
+ if (sum == 0 && strip_info->firmware_level == StructuredMessages) {
+ strip_info->firmware_level = ChecksummedMessages;
+ printk(KERN_INFO "%s: Radio provides message checksums\n",
+ strip_info->dev->name);
+ }
+ return (sum == 0);
+}
+
+static void RecvErr(char *msg, struct strip *strip_info)
+{
+ __u8 *ptr = strip_info->sx_buff;
+ __u8 *end = strip_info->sx_buff + strip_info->sx_count;
+ DumpData(msg, strip_info, ptr, end);
+ strip_info->rx_errors++;
+}
+
+static void RecvErr_Message(struct strip *strip_info, __u8 * sendername,
+ const __u8 * msg, u_long len)
+{
+ if (has_prefix(msg, len, "001")) { /* Not in StarMode! */
+ RecvErr("Error Msg:", strip_info);
+ printk(KERN_INFO "%s: Radio %s is not in StarMode\n",
+ strip_info->dev->name, sendername);
+ }
+
+ else if (has_prefix(msg, len, "002")) { /* Remap handle */
+ /* We ignore "Remap handle" messages for now */
+ }
+
+ else if (has_prefix(msg, len, "003")) { /* Can't resolve name */
+ RecvErr("Error Msg:", strip_info);
+ printk(KERN_INFO "%s: Destination radio name is unknown\n",
+ strip_info->dev->name);
+ }
+
+ else if (has_prefix(msg, len, "004")) { /* Name too small or missing */
+ strip_info->watchdog_doreset = jiffies + LongTime;
+#if TICKLE_TIMERS
+ {
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ printk(KERN_INFO
+ "**** Got ERR_004 response at %02d.%06d\n",
+ tv.tv_sec % 100, tv.tv_usec);
+ }
+#endif
+ if (!strip_info->working) {
+ strip_info->working = TRUE;
+ printk(KERN_INFO "%s: Radio now in starmode\n",
+ strip_info->dev->name);
+ /*
+ * If the radio has just entered a working state, we should do our first
+ * probe ASAP, so that we find out our radio address etc. without delay.
+ */
+ strip_info->watchdog_doprobe = jiffies;
+ }
+ if (strip_info->firmware_level == NoStructure && sendername) {
+ strip_info->firmware_level = StructuredMessages;
+ strip_info->next_command = 0; /* Try to enable checksums ASAP */
+ printk(KERN_INFO
+ "%s: Radio provides structured messages\n",
+ strip_info->dev->name);
+ }
+ if (strip_info->firmware_level >= StructuredMessages) {
+ /*
+ * If this message has a valid checksum on the end, then the call to verify_checksum
+ * will elevate the firmware_level to ChecksummedMessages for us. (The actual return
+ * code from verify_checksum is ignored here.)
+ */
+ verify_checksum(strip_info);
+ /*
+ * If the radio has structured messages but we don't yet have all our information about it,
+ * we should do probes without delay, until we have gathered all the information
+ */
+ if (!GOT_ALL_RADIO_INFO(strip_info))
+ strip_info->watchdog_doprobe = jiffies;
+ }
+ }
+
+ else if (has_prefix(msg, len, "005")) /* Bad count specification */
+ RecvErr("Error Msg:", strip_info);
+
+ else if (has_prefix(msg, len, "006")) /* Header too big */
+ RecvErr("Error Msg:", strip_info);
+
+ else if (has_prefix(msg, len, "007")) { /* Body too big */
+ RecvErr("Error Msg:", strip_info);
+ printk(KERN_ERR
+ "%s: Error! Packet size too big for radio.\n",
+ strip_info->dev->name);
+ }
+
+ else if (has_prefix(msg, len, "008")) { /* Bad character in name */
+ RecvErr("Error Msg:", strip_info);
+ printk(KERN_ERR
+ "%s: Radio name contains illegal character\n",
+ strip_info->dev->name);
+ }
+
+ else if (has_prefix(msg, len, "009")) /* No count or line terminator */
+ RecvErr("Error Msg:", strip_info);
+
+ else if (has_prefix(msg, len, "010")) /* Invalid checksum */
+ RecvErr("Error Msg:", strip_info);
+
+ else if (has_prefix(msg, len, "011")) /* Checksum didn't match */
+ RecvErr("Error Msg:", strip_info);
+
+ else if (has_prefix(msg, len, "012")) /* Failed to transmit packet */
+ RecvErr("Error Msg:", strip_info);
+
+ else
+ RecvErr("Error Msg:", strip_info);
+}
+
+static void process_AT_response(struct strip *strip_info, __u8 * ptr,
+ __u8 * end)
+{
+ u_long len;
+ __u8 *p = ptr;
+ while (p < end && p[-1] != 10)
+ p++; /* Skip past first newline character */
+ /* Now ptr points to the AT command, and p points to the text of the response. */
+ len = p - ptr;
+
+#if TICKLE_TIMERS
+ {
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ printk(KERN_INFO "**** Got AT response %.7s at %02d.%06d\n",
+ ptr, tv.tv_sec % 100, tv.tv_usec);
+ }
+#endif
+
+ if (has_prefix(ptr, len, "ATS300?"))
+ get_radio_version(strip_info, p, end);
+ else if (has_prefix(ptr, len, "ATS305?"))
+ get_radio_address(strip_info, p);
+ else if (has_prefix(ptr, len, "ATS311?"))
+ get_radio_neighbours(&strip_info->poletops, p, end);
+ else if (has_prefix(ptr, len, "ATS319=7"))
+ verify_checksum(strip_info);
+ else if (has_prefix(ptr, len, "ATS325?"))
+ get_radio_voltage(strip_info, p, end);
+ else if (has_prefix(ptr, len, "AT~LA"))
+ get_radio_neighbours(&strip_info->portables, p, end);
+ else
+ RecvErr("Unknown AT Response:", strip_info);
+}
+
+static void process_ACK(struct strip *strip_info, __u8 * ptr, __u8 * end)
+{
+ /* Currently we don't do anything with ACKs from the radio */
+}
+
+static void process_Info(struct strip *strip_info, __u8 * ptr, __u8 * end)
+{
+ if (ptr + 16 > end)
+ RecvErr("Bad Info Msg:", strip_info);
+}
+
+static struct net_device *get_strip_dev(struct strip *strip_info)
+{
+ /* If our hardware address is *manually set* to zero, and we know our */
+ /* real radio hardware address, try to find another strip device that has been */
+ /* manually set to that address that we can 'transfer ownership' of this packet to */
+ if (strip_info->manual_dev_addr &&
+ !memcmp(strip_info->dev->dev_addr, zero_address.c,
+ sizeof(zero_address))
+ && memcmp(&strip_info->true_dev_addr, zero_address.c,
+ sizeof(zero_address))) {
+ struct net_device *dev;
+ read_lock_bh(&dev_base_lock);
+ dev = dev_base;
+ while (dev) {
+ if (dev->type == strip_info->dev->type &&
+ !memcmp(dev->dev_addr,
+ &strip_info->true_dev_addr,
+ sizeof(MetricomAddress))) {
+ printk(KERN_INFO
+ "%s: Transferred packet ownership to %s.\n",
+ strip_info->dev->name, dev->name);
+ read_unlock_bh(&dev_base_lock);
+ return (dev);
+ }
+ dev = dev->next;
+ }
+ read_unlock_bh(&dev_base_lock);
+ }
+ return (strip_info->dev);
+}
+
+/*
+ * Send one completely decapsulated datagram to the next layer.
+ */
+
+static void deliver_packet(struct strip *strip_info, STRIP_Header * header,
+ __u16 packetlen)
+{
+ struct sk_buff *skb = dev_alloc_skb(sizeof(STRIP_Header) + packetlen);
+ if (!skb) {
+ printk(KERN_ERR "%s: memory squeeze, dropping packet.\n",
+ strip_info->dev->name);
+ strip_info->rx_dropped++;
+ } else {
+ memcpy(skb_put(skb, sizeof(STRIP_Header)), header,
+ sizeof(STRIP_Header));
+ memcpy(skb_put(skb, packetlen), strip_info->rx_buff,
+ packetlen);
+ skb->dev = get_strip_dev(strip_info);
+ skb->protocol = header->protocol;
+ skb->mac.raw = skb->data;
+
+ /* Having put a fake header on the front of the sk_buff for the */
+ /* benefit of tools like tcpdump, skb_pull now 'consumes' that */
+ /* fake header before we hand the packet up to the next layer. */
+ skb_pull(skb, sizeof(STRIP_Header));
+
+ /* Finally, hand the packet up to the next layer (e.g. IP or ARP, etc.) */
+ strip_info->rx_packets++;
+ strip_info->rx_pps_count++;
+#ifdef EXT_COUNTERS
+ strip_info->rx_bytes += packetlen;
+#endif
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ }
+}
+
+static void process_IP_packet(struct strip *strip_info,
+ STRIP_Header * header, __u8 * ptr,
+ __u8 * end)
+{
+ __u16 packetlen;
+
+ /* Decode start of the IP packet header */
+ ptr = UnStuffData(ptr, end, strip_info->rx_buff, 4);
+ if (!ptr) {
+ RecvErr("IP Packet too short", strip_info);
+ return;
+ }
+
+ packetlen = ((__u16) strip_info->rx_buff[2] << 8) | strip_info->rx_buff[3];
+
+ if (packetlen > MAX_RECV_MTU) {
+ printk(KERN_INFO "%s: Dropping oversized received IP packet: %d bytes\n",
+ strip_info->dev->name, packetlen);
+ strip_info->rx_dropped++;
+ return;
+ }
+
+ /*printk(KERN_INFO "%s: Got %d byte IP packet\n", strip_info->dev->name, packetlen); */
+
+ /* Decode remainder of the IP packet */
+ ptr =
+ UnStuffData(ptr, end, strip_info->rx_buff + 4, packetlen - 4);
+ if (!ptr) {
+ RecvErr("IP Packet too short", strip_info);
+ return;
+ }
+
+ if (ptr < end) {
+ RecvErr("IP Packet too long", strip_info);
+ return;
+ }
+
+ header->protocol = htons(ETH_P_IP);
+
+ deliver_packet(strip_info, header, packetlen);
+}
+
+static void process_ARP_packet(struct strip *strip_info,
+ STRIP_Header * header, __u8 * ptr,
+ __u8 * end)
+{
+ __u16 packetlen;
+ struct arphdr *arphdr = (struct arphdr *) strip_info->rx_buff;
+
+ /* Decode start of the ARP packet */
+ ptr = UnStuffData(ptr, end, strip_info->rx_buff, 8);
+ if (!ptr) {
+ RecvErr("ARP Packet too short", strip_info);
+ return;
+ }
+
+ packetlen = 8 + (arphdr->ar_hln + arphdr->ar_pln) * 2;
+
+ if (packetlen > MAX_RECV_MTU) {
+ printk(KERN_INFO
+ "%s: Dropping oversized received ARP packet: %d bytes\n",
+ strip_info->dev->name, packetlen);
+ strip_info->rx_dropped++;
+ return;
+ }
+
+ /*printk(KERN_INFO "%s: Got %d byte ARP %s\n",
+ strip_info->dev->name, packetlen,
+ ntohs(arphdr->ar_op) == ARPOP_REQUEST ? "request" : "reply"); */
+
+ /* Decode remainder of the ARP packet */
+ ptr =
+ UnStuffData(ptr, end, strip_info->rx_buff + 8, packetlen - 8);
+ if (!ptr) {
+ RecvErr("ARP Packet too short", strip_info);
+ return;
+ }
+
+ if (ptr < end) {
+ RecvErr("ARP Packet too long", strip_info);
+ return;
+ }
+
+ header->protocol = htons(ETH_P_ARP);
+
+ deliver_packet(strip_info, header, packetlen);
+}
+
+/*
+ * process_text_message processes a <CR>-terminated block of data received
+ * from the radio that doesn't begin with a '*' character. All normal
+ * Starmode communication messages with the radio begin with a '*',
+ * so any text that does not indicates a serial port error, a radio that
+ * is in Hayes command mode instead of Starmode, or a radio with really
+ * old firmware that doesn't frame its Starmode responses properly.
+ */
+static void process_text_message(struct strip *strip_info)
+{
+ __u8 *msg = strip_info->sx_buff;
+ int len = strip_info->sx_count;
+
+ /* Check for anything that looks like it might be our radio name */
+ /* (This is here for backwards compatibility with old firmware) */
+ if (len == 9 && get_radio_address(strip_info, msg) == 0)
+ return;
+
+ if (text_equal(msg, len, "OK"))
+ return; /* Ignore 'OK' responses from prior commands */
+ if (text_equal(msg, len, "ERROR"))
+ return; /* Ignore 'ERROR' messages */
+ if (has_prefix(msg, len, "ate0q1"))
+ return; /* Ignore character echo back from the radio */
+
+ /* Catch other error messages */
+ /* (This is here for backwards compatibility with old firmware) */
+ if (has_prefix(msg, len, "ERR_")) {
+ RecvErr_Message(strip_info, NULL, &msg[4], len - 4);
+ return;
+ }
+
+ RecvErr("No initial *", strip_info);
+}
+
+/*
+ * process_message processes a <CR>-terminated block of data received
+ * from the radio. If the radio is not in Starmode or has old firmware,
+ * it may be a line of text in response to an AT command. Ideally, with
+ * a current radio that's properly in Starmode, all data received should
+ * be properly framed and checksummed radio message blocks, containing
+ * either a starmode packet, or a other communication from the radio
+ * firmware, like "INF_" Info messages and &COMMAND responses.
+ */
+static void process_message(struct strip *strip_info)
+{
+ STRIP_Header header = { zero_address, zero_address, 0 };
+ __u8 *ptr = strip_info->sx_buff;
+ __u8 *end = strip_info->sx_buff + strip_info->sx_count;
+ __u8 sendername[32], *sptr = sendername;
+ MetricomKey key;
+
+ /*HexDump("Receiving", strip_info, ptr, end); */
+
+ /* Check for start of address marker, and then skip over it */
+ if (*ptr == '*')
+ ptr++;
+ else {
+ process_text_message(strip_info);
+ return;
+ }
+
+ /* Copy out the return address */
+ while (ptr < end && *ptr != '*'
+ && sptr < ARRAY_END(sendername) - 1)
+ *sptr++ = *ptr++;
+ *sptr = 0; /* Null terminate the sender name */
+
+ /* Check for end of address marker, and skip over it */
+ if (ptr >= end || *ptr != '*') {
+ RecvErr("No second *", strip_info);
+ return;
+ }
+ ptr++; /* Skip the second '*' */
+
+ /* If the sender name is "&COMMAND", ignore this 'packet' */
+ /* (This is here for backwards compatibility with old firmware) */
+ if (!strcmp(sendername, "&COMMAND")) {
+ strip_info->firmware_level = NoStructure;
+ strip_info->next_command = CompatibilityCommand;
+ return;
+ }
+
+ if (ptr + 4 > end) {
+ RecvErr("No proto key", strip_info);
+ return;
+ }
+
+ /* Get the protocol key out of the buffer */
+ key.c[0] = *ptr++;
+ key.c[1] = *ptr++;
+ key.c[2] = *ptr++;
+ key.c[3] = *ptr++;
+
+ /* If we're using checksums, verify the checksum at the end of the packet */
+ if (strip_info->firmware_level >= ChecksummedMessages) {
+ end -= 4; /* Chop the last four bytes off the packet (they're the checksum) */
+ if (ptr > end) {
+ RecvErr("Missing Checksum", strip_info);
+ return;
+ }
+ if (!verify_checksum(strip_info)) {
+ RecvErr("Bad Checksum", strip_info);
+ return;
+ }
+ }
+
+ /*printk(KERN_INFO "%s: Got packet from \"%s\".\n", strip_info->dev->name, sendername); */
+
+ /*
+ * Fill in (pseudo) source and destination addresses in the packet.
+ * We assume that the destination address was our address (the radio does not
+ * tell us this). If the radio supplies a source address, then we use it.
+ */
+ header.dst_addr = strip_info->true_dev_addr;
+ string_to_radio_address(&header.src_addr, sendername);
+
+#ifdef EXT_COUNTERS
+ if (key.l == SIP0Key.l) {
+ strip_info->rx_rbytes += (end - ptr);
+ process_IP_packet(strip_info, &header, ptr, end);
+ } else if (key.l == ARP0Key.l) {
+ strip_info->rx_rbytes += (end - ptr);
+ process_ARP_packet(strip_info, &header, ptr, end);
+ } else if (key.l == ATR_Key.l) {
+ strip_info->rx_ebytes += (end - ptr);
+ process_AT_response(strip_info, ptr, end);
+ } else if (key.l == ACK_Key.l) {
+ strip_info->rx_ebytes += (end - ptr);
+ process_ACK(strip_info, ptr, end);
+ } else if (key.l == INF_Key.l) {
+ strip_info->rx_ebytes += (end - ptr);
+ process_Info(strip_info, ptr, end);
+ } else if (key.l == ERR_Key.l) {
+ strip_info->rx_ebytes += (end - ptr);
+ RecvErr_Message(strip_info, sendername, ptr, end - ptr);
+ } else
+ RecvErr("Unrecognized protocol key", strip_info);
+#else
+ if (key.l == SIP0Key.l)
+ process_IP_packet(strip_info, &header, ptr, end);
+ else if (key.l == ARP0Key.l)
+ process_ARP_packet(strip_info, &header, ptr, end);
+ else if (key.l == ATR_Key.l)
+ process_AT_response(strip_info, ptr, end);
+ else if (key.l == ACK_Key.l)
+ process_ACK(strip_info, ptr, end);
+ else if (key.l == INF_Key.l)
+ process_Info(strip_info, ptr, end);
+ else if (key.l == ERR_Key.l)
+ RecvErr_Message(strip_info, sendername, ptr, end - ptr);
+ else
+ RecvErr("Unrecognized protocol key", strip_info);
+#endif
+}
+
+#define TTYERROR(X) ((X) == TTY_BREAK ? "Break" : \
+ (X) == TTY_FRAME ? "Framing Error" : \
+ (X) == TTY_PARITY ? "Parity Error" : \
+ (X) == TTY_OVERRUN ? "Hardware Overrun" : "Unknown Error")
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of STRIP data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing.
+ */
+
+static void strip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct strip *strip_info = (struct strip *) tty->disc_data;
+ const unsigned char *end = cp + count;
+
+ if (!strip_info || strip_info->magic != STRIP_MAGIC
+ || !netif_running(strip_info->dev))
+ return;
+
+ spin_lock_bh(&strip_lock);
+#if 0
+ {
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ printk(KERN_INFO
+ "**** strip_receive_buf: %3d bytes at %02d.%06d\n",
+ count, tv.tv_sec % 100, tv.tv_usec);
+ }
+#endif
+
+#ifdef EXT_COUNTERS
+ strip_info->rx_sbytes += count;
+#endif
+
+ /* Read the characters out of the buffer */
+ while (cp < end) {
+ if (fp && *fp)
+ printk(KERN_INFO "%s: %s on serial port\n",
+ strip_info->dev->name, TTYERROR(*fp));
+ if (fp && *fp++ && !strip_info->discard) { /* If there's a serial error, record it */
+ /* If we have some characters in the buffer, discard them */
+ strip_info->discard = strip_info->sx_count;
+ strip_info->rx_errors++;
+ }
+
+ /* Leading control characters (CR, NL, Tab, etc.) are ignored */
+ if (strip_info->sx_count > 0 || *cp >= ' ') {
+ if (*cp == 0x0D) { /* If end of packet, decide what to do with it */
+ if (strip_info->sx_count > 3000)
+ printk(KERN_INFO
+ "%s: Cut a %d byte packet (%zd bytes remaining)%s\n",
+ strip_info->dev->name,
+ strip_info->sx_count,
+ end - cp - 1,
+ strip_info->
+ discard ? " (discarded)" :
+ "");
+ if (strip_info->sx_count >
+ strip_info->sx_size) {
+ strip_info->rx_over_errors++;
+ printk(KERN_INFO
+ "%s: sx_buff overflow (%d bytes total)\n",
+ strip_info->dev->name,
+ strip_info->sx_count);
+ } else if (strip_info->discard)
+ printk(KERN_INFO
+ "%s: Discarding bad packet (%d/%d)\n",
+ strip_info->dev->name,
+ strip_info->discard,
+ strip_info->sx_count);
+ else
+ process_message(strip_info);
+ strip_info->discard = 0;
+ strip_info->sx_count = 0;
+ } else {
+ /* Make sure we have space in the buffer */
+ if (strip_info->sx_count <
+ strip_info->sx_size)
+ strip_info->sx_buff[strip_info->
+ sx_count] =
+ *cp;
+ strip_info->sx_count++;
+ }
+ }
+ cp++;
+ }
+ spin_unlock_bh(&strip_lock);
+}
+
+
+/************************************************************************/
+/* General control routines */
+
+static int set_mac_address(struct strip *strip_info,
+ MetricomAddress * addr)
+{
+ /*
+ * We're using a manually specified address if the address is set
+ * to anything other than all ones. Setting the address to all ones
+ * disables manual mode and goes back to automatic address determination
+ * (tracking the true address that the radio has).
+ */
+ strip_info->manual_dev_addr =
+ memcmp(addr->c, broadcast_address.c,
+ sizeof(broadcast_address));
+ if (strip_info->manual_dev_addr)
+ *(MetricomAddress *) strip_info->dev->dev_addr = *addr;
+ else
+ *(MetricomAddress *) strip_info->dev->dev_addr =
+ strip_info->true_dev_addr;
+ return 0;
+}
+
+static int strip_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct strip *strip_info = netdev_priv(dev);
+ struct sockaddr *sa = addr;
+ printk(KERN_INFO "%s: strip_set_dev_mac_address called\n", dev->name);
+ set_mac_address(strip_info, (MetricomAddress *) sa->sa_data);
+ return 0;
+}
+
+static struct net_device_stats *strip_get_stats(struct net_device *dev)
+{
+ struct strip *strip_info = netdev_priv(dev);
+ static struct net_device_stats stats;
+
+ memset(&stats, 0, sizeof(struct net_device_stats));
+
+ stats.rx_packets = strip_info->rx_packets;
+ stats.tx_packets = strip_info->tx_packets;
+ stats.rx_dropped = strip_info->rx_dropped;
+ stats.tx_dropped = strip_info->tx_dropped;
+ stats.tx_errors = strip_info->tx_errors;
+ stats.rx_errors = strip_info->rx_errors;
+ stats.rx_over_errors = strip_info->rx_over_errors;
+ return (&stats);
+}
+
+
+/************************************************************************/
+/* Opening and closing */
+
+/*
+ * Here's the order things happen:
+ * When the user runs "slattach -p strip ..."
+ * 1. The TTY module calls strip_open
+ * 2. strip_open calls strip_alloc
+ * 3. strip_alloc calls register_netdev
+ * 4. register_netdev calls strip_dev_init
+ * 5. then strip_open finishes setting up the strip_info
+ *
+ * When the user runs "ifconfig st<x> up address netmask ..."
+ * 6. strip_open_low gets called
+ *
+ * When the user runs "ifconfig st<x> down"
+ * 7. strip_close_low gets called
+ *
+ * When the user kills the slattach process
+ * 8. strip_close gets called
+ * 9. strip_close calls dev_close
+ * 10. if the device is still up, then dev_close calls strip_close_low
+ * 11. strip_close calls strip_free
+ */
+
+/* Open the low-level part of the STRIP channel. Easy! */
+
+static int strip_open_low(struct net_device *dev)
+{
+ struct strip *strip_info = netdev_priv(dev);
+
+ if (strip_info->tty == NULL)
+ return (-ENODEV);
+
+ if (!allocate_buffers(strip_info, dev->mtu))
+ return (-ENOMEM);
+
+ strip_info->sx_count = 0;
+ strip_info->tx_left = 0;
+
+ strip_info->discard = 0;
+ strip_info->working = FALSE;
+ strip_info->firmware_level = NoStructure;
+ strip_info->next_command = CompatibilityCommand;
+ strip_info->user_baud = get_baud(strip_info->tty);
+
+ printk(KERN_INFO "%s: Initializing Radio.\n",
+ strip_info->dev->name);
+ ResetRadio(strip_info);
+ strip_info->idle_timer.expires = jiffies + 1 * HZ;
+ add_timer(&strip_info->idle_timer);
+ netif_wake_queue(dev);
+ return (0);
+}
+
+
+/*
+ * Close the low-level part of the STRIP channel. Easy!
+ */
+
+static int strip_close_low(struct net_device *dev)
+{
+ struct strip *strip_info = netdev_priv(dev);
+
+ if (strip_info->tty == NULL)
+ return -EBUSY;
+ strip_info->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+
+ netif_stop_queue(dev);
+
+ /*
+ * Free all STRIP frame buffers.
+ */
+ if (strip_info->rx_buff) {
+ kfree(strip_info->rx_buff);
+ strip_info->rx_buff = NULL;
+ }
+ if (strip_info->sx_buff) {
+ kfree(strip_info->sx_buff);
+ strip_info->sx_buff = NULL;
+ }
+ if (strip_info->tx_buff) {
+ kfree(strip_info->tx_buff);
+ strip_info->tx_buff = NULL;
+ }
+ del_timer(&strip_info->idle_timer);
+ return 0;
+}
+
+/*
+ * This routine is called by DDI when the
+ * (dynamically assigned) device is registered
+ */
+
+static void strip_dev_setup(struct net_device *dev)
+{
+ /*
+ * Finish setting up the DEVICE info.
+ */
+
+ SET_MODULE_OWNER(dev);
+
+ dev->trans_start = 0;
+ dev->last_rx = 0;
+ dev->tx_queue_len = 30; /* Drop after 30 frames queued */
+
+ dev->flags = 0;
+ dev->mtu = DEFAULT_STRIP_MTU;
+ dev->type = ARPHRD_METRICOM; /* dtang */
+ dev->hard_header_len = sizeof(STRIP_Header);
+ /*
+ * dev->priv Already holds a pointer to our struct strip
+ */
+
+ *(MetricomAddress *) & dev->broadcast = broadcast_address;
+ dev->dev_addr[0] = 0;
+ dev->addr_len = sizeof(MetricomAddress);
+
+ /*
+ * Pointers to interface service routines.
+ */
+
+ dev->open = strip_open_low;
+ dev->stop = strip_close_low;
+ dev->hard_start_xmit = strip_xmit;
+ dev->hard_header = strip_header;
+ dev->rebuild_header = strip_rebuild_header;
+ dev->set_mac_address = strip_set_mac_address;
+ dev->get_stats = strip_get_stats;
+ dev->change_mtu = strip_change_mtu;
+}
+
+/*
+ * Free a STRIP channel.
+ */
+
+static void strip_free(struct strip *strip_info)
+{
+ spin_lock_bh(&strip_lock);
+ list_del_rcu(&strip_info->list);
+ spin_unlock_bh(&strip_lock);
+
+ strip_info->magic = 0;
+
+ free_netdev(strip_info->dev);
+}
+
+
+/*
+ * Allocate a new free STRIP channel
+ */
+static struct strip *strip_alloc(void)
+{
+ struct list_head *n;
+ struct net_device *dev;
+ struct strip *strip_info;
+
+ dev = alloc_netdev(sizeof(struct strip), "st%d",
+ strip_dev_setup);
+
+ if (!dev)
+ return NULL; /* If no more memory, return */
+
+
+ strip_info = dev->priv;
+ strip_info->dev = dev;
+
+ strip_info->magic = STRIP_MAGIC;
+ strip_info->tty = NULL;
+
+ strip_info->gratuitous_arp = jiffies + LongTime;
+ strip_info->arp_interval = 0;
+ init_timer(&strip_info->idle_timer);
+ strip_info->idle_timer.data = (long) dev;
+ strip_info->idle_timer.function = strip_IdleTask;
+
+
+ spin_lock_bh(&strip_lock);
+ rescan:
+ /*
+ * Search the list to find where to put our new entry
+ * (and in the process decide what channel number it is
+ * going to be)
+ */
+ list_for_each(n, &strip_list) {
+ struct strip *s = hlist_entry(n, struct strip, list);
+
+ if (s->dev->base_addr == dev->base_addr) {
+ ++dev->base_addr;
+ goto rescan;
+ }
+ }
+
+ sprintf(dev->name, "st%ld", dev->base_addr);
+
+ list_add_tail_rcu(&strip_info->list, &strip_list);
+ spin_unlock_bh(&strip_lock);
+
+ return strip_info;
+}
+
+/*
+ * Open the high-level part of the STRIP channel.
+ * This function is called by the TTY module when the
+ * STRIP line discipline is called for. Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free STRIP channel...
+ */
+
+static int strip_open(struct tty_struct *tty)
+{
+ struct strip *strip_info = (struct strip *) tty->disc_data;
+
+ /*
+ * First make sure we're not already connected.
+ */
+
+ if (strip_info && strip_info->magic == STRIP_MAGIC)
+ return -EEXIST;
+
+ /*
+ * OK. Find a free STRIP channel to use.
+ */
+ if ((strip_info = strip_alloc()) == NULL)
+ return -ENFILE;
+
+ /*
+ * Register our newly created device so it can be ifconfig'd
+ * strip_dev_init() will be called as a side-effect
+ */
+
+ if (register_netdev(strip_info->dev) != 0) {
+ printk(KERN_ERR "strip: register_netdev() failed.\n");
+ strip_free(strip_info);
+ return -ENFILE;
+ }
+
+ strip_info->tty = tty;
+ tty->disc_data = strip_info;
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+
+ /*
+ * Restore default settings
+ */
+
+ strip_info->dev->type = ARPHRD_METRICOM; /* dtang */
+
+ /*
+ * Set tty options
+ */
+
+ tty->termios->c_iflag |= IGNBRK | IGNPAR; /* Ignore breaks and parity errors. */
+ tty->termios->c_cflag |= CLOCAL; /* Ignore modem control signals. */
+ tty->termios->c_cflag &= ~HUPCL; /* Don't close on hup */
+
+ printk(KERN_INFO "STRIP: device \"%s\" activated\n",
+ strip_info->dev->name);
+
+ /*
+ * Done. We have linked the TTY line to a channel.
+ */
+ return (strip_info->dev->base_addr);
+}
+
+/*
+ * Close down a STRIP channel.
+ * This means flushing out any pending queues, and then restoring the
+ * TTY line discipline to what it was before it got hooked to STRIP
+ * (which usually is TTY again).
+ */
+
+static void strip_close(struct tty_struct *tty)
+{
+ struct strip *strip_info = (struct strip *) tty->disc_data;
+
+ /*
+ * First make sure we're connected.
+ */
+
+ if (!strip_info || strip_info->magic != STRIP_MAGIC)
+ return;
+
+ unregister_netdev(strip_info->dev);
+
+ tty->disc_data = NULL;
+ strip_info->tty = NULL;
+ printk(KERN_INFO "STRIP: device \"%s\" closed down\n",
+ strip_info->dev->name);
+ strip_free(strip_info);
+ tty->disc_data = NULL;
+}
+
+
+/************************************************************************/
+/* Perform I/O control calls on an active STRIP channel. */
+
+static int strip_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct strip *strip_info = (struct strip *) tty->disc_data;
+
+ /*
+ * First make sure we're connected.
+ */
+
+ if (!strip_info || strip_info->magic != STRIP_MAGIC)
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ if(copy_to_user((void __user *) arg, strip_info->dev->name, strlen(strip_info->dev->name) + 1))
+ return -EFAULT;
+ break;
+ case SIOCSIFHWADDR:
+ {
+ MetricomAddress addr;
+ //printk(KERN_INFO "%s: SIOCSIFHWADDR\n", strip_info->dev->name);
+ if(copy_from_user(&addr, (void __user *) arg, sizeof(MetricomAddress)))
+ return -EFAULT;
+ return set_mac_address(strip_info, &addr);
+ }
+ /*
+ * Allow stty to read, but not set, the serial port
+ */
+
+ case TCGETS:
+ case TCGETA:
+ return n_tty_ioctl(tty, file, cmd, arg);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ break;
+ }
+ return 0;
+}
+
+
+/************************************************************************/
+/* Initialization */
+
+static struct tty_ldisc strip_ldisc = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "strip",
+ .owner = THIS_MODULE,
+ .open = strip_open,
+ .close = strip_close,
+ .ioctl = strip_ioctl,
+ .receive_buf = strip_receive_buf,
+ .receive_room = strip_receive_room,
+ .write_wakeup = strip_write_some_more,
+};
+
+/*
+ * Initialize the STRIP driver.
+ * This routine is called at boot time, to bootstrap the multi-channel
+ * STRIP driver
+ */
+
+static char signon[] __initdata =
+ KERN_INFO "STRIP: Version %s (unlimited channels)\n";
+
+static int __init strip_init_driver(void)
+{
+ int status;
+
+ printk(signon, StripVersion);
+
+
+ /*
+ * Fill in our line protocol discipline, and register it
+ */
+ if ((status = tty_register_ldisc(N_STRIP, &strip_ldisc)))
+ printk(KERN_ERR "STRIP: can't register line discipline (err = %d)\n",
+ status);
+
+ /*
+ * Register the status file with /proc
+ */
+ proc_net_fops_create("strip", S_IFREG | S_IRUGO, &strip_seq_fops);
+
+ return status;
+}
+
+module_init(strip_init_driver);
+
+static const char signoff[] __exitdata =
+ KERN_INFO "STRIP: Module Unloaded\n";
+
+static void __exit strip_exit_driver(void)
+{
+ int i;
+ struct list_head *p,*n;
+
+ /* module ref count rules assure that all entries are unregistered */
+ list_for_each_safe(p, n, &strip_list) {
+ struct strip *s = list_entry(p, struct strip, list);
+ strip_free(s);
+ }
+
+ /* Unregister with the /proc/net file here. */
+ proc_net_remove("strip");
+
+ if ((i = tty_register_ldisc(N_STRIP, NULL)))
+ printk(KERN_ERR "STRIP: can't unregister line discipline (err = %d)\n", i);
+
+ printk(signoff);
+}
+
+module_exit(strip_exit_driver);
+
+MODULE_AUTHOR("Stuart Cheshire <cheshire@cs.stanford.edu>");
+MODULE_DESCRIPTION("Starmode Radio IP (STRIP) Device Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_SUPPORTED_DEVICE("Starmode Radio IP (STRIP) modem");
diff --git a/drivers/net/wireless/todo.txt b/drivers/net/wireless/todo.txt
new file mode 100644
index 000000000000..32234018de72
--- /dev/null
+++ b/drivers/net/wireless/todo.txt
@@ -0,0 +1,15 @@
+ Wireless Todo
+ -------------
+
+1) Bring other kernel Wireless LAN drivers here
+ Completed
+
+2) Bring new Wireless LAN driver not yet in the kernel there
+ See my web page for details
+ In particular : HostAP
+
+3) Misc
+ o Mark wavelan, wavelan_cs, netwave_cs drivers as obsolete
+ o Maybe arlan.c, ray_cs.c and strip.c also deserve to be obsolete
+
+ Jean II
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
new file mode 100644
index 000000000000..7a5e20a17890
--- /dev/null
+++ b/drivers/net/wireless/wavelan.c
@@ -0,0 +1,4452 @@
+/*
+ * WaveLAN ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ * Original copyright follows (also see the end of this file).
+ * See wavelan.p.h for details.
+ *
+ *
+ *
+ * AT&T GIS (nee NCR) WaveLAN card:
+ * An Ethernet-like radio transceiver
+ * controlled by an Intel 82586 coprocessor.
+ */
+
+#include "wavelan.p.h" /* Private header */
+
+/************************* MISC SUBROUTINES **************************/
+/*
+ * Subroutines which won't fit in one of the following category
+ * (WaveLAN modem or i82586)
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate irq number to PSA irq parameter
+ */
+static u8 wv_irq_to_psa(int irq)
+{
+ if (irq < 0 || irq >= NELS(irqvals))
+ return 0;
+
+ return irqvals[irq];
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate PSA irq parameter to irq number
+ */
+static int __init wv_psa_to_irq(u8 irqval)
+{
+ int irq;
+
+ for (irq = 0; irq < NELS(irqvals); irq++)
+ if (irqvals[irq] == irqval)
+ return irq;
+
+ return -1;
+}
+
+#ifdef STRUCT_CHECK
+/*------------------------------------------------------------------*/
+/*
+ * Sanity routine to verify the sizes of the various WaveLAN interface
+ * structures.
+ */
+static char *wv_struct_check(void)
+{
+#define SC(t,s,n) if (sizeof(t) != s) return(n);
+
+ SC(psa_t, PSA_SIZE, "psa_t");
+ SC(mmw_t, MMW_SIZE, "mmw_t");
+ SC(mmr_t, MMR_SIZE, "mmr_t");
+ SC(ha_t, HA_SIZE, "ha_t");
+
+#undef SC
+
+ return ((char *) NULL);
+} /* wv_struct_check */
+#endif /* STRUCT_CHECK */
+
+/********************* HOST ADAPTER SUBROUTINES *********************/
+/*
+ * Useful subroutines to manage the WaveLAN ISA interface
+ *
+ * One major difference with the PCMCIA hardware (except the port mapping)
+ * is that we have to keep the state of the Host Control Register
+ * because of the interrupt enable & bus size flags.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read from card's Host Adaptor Status Register.
+ */
+static inline u16 hasr_read(unsigned long ioaddr)
+{
+ return (inw(HASR(ioaddr)));
+} /* hasr_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register.
+ */
+static inline void hacr_write(unsigned long ioaddr, u16 hacr)
+{
+ outw(hacr, HACR(ioaddr));
+} /* hacr_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register. Include a delay for
+ * those times when it is needed.
+ */
+static inline void hacr_write_slow(unsigned long ioaddr, u16 hacr)
+{
+ hacr_write(ioaddr, hacr);
+ /* delay might only be needed sometimes */
+ mdelay(1);
+} /* hacr_write_slow */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the channel attention bit.
+ */
+static inline void set_chan_attn(unsigned long ioaddr, u16 hacr)
+{
+ hacr_write(ioaddr, hacr | HACR_CA);
+} /* set_chan_attn */
+
+/*------------------------------------------------------------------*/
+/*
+ * Reset, and then set host adaptor into default mode.
+ */
+static inline void wv_hacr_reset(unsigned long ioaddr)
+{
+ hacr_write_slow(ioaddr, HACR_RESET);
+ hacr_write(ioaddr, HACR_DEFAULT);
+} /* wv_hacr_reset */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the I/O transfer over the ISA bus to 8-bit mode
+ */
+static inline void wv_16_off(unsigned long ioaddr, u16 hacr)
+{
+ hacr &= ~HACR_16BITS;
+ hacr_write(ioaddr, hacr);
+} /* wv_16_off */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the I/O transfer over the ISA bus to 8-bit mode
+ */
+static inline void wv_16_on(unsigned long ioaddr, u16 hacr)
+{
+ hacr |= HACR_16BITS;
+ hacr_write(ioaddr, hacr);
+} /* wv_16_on */
+
+/*------------------------------------------------------------------*/
+/*
+ * Disable interrupts on the WaveLAN hardware.
+ * (called by wv_82586_stop())
+ */
+static inline void wv_ints_off(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+
+ lp->hacr &= ~HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+} /* wv_ints_off */
+
+/*------------------------------------------------------------------*/
+/*
+ * Enable interrupts on the WaveLAN hardware.
+ * (called by wv_hw_reset())
+ */
+static inline void wv_ints_on(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+
+ lp->hacr |= HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+} /* wv_ints_on */
+
+/******************* MODEM MANAGEMENT SUBROUTINES *******************/
+/*
+ * Useful subroutines to manage the modem of the WaveLAN
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read the Parameter Storage Area from the WaveLAN card's memory
+ */
+/*
+ * Read bytes from the PSA.
+ */
+static void psa_read(unsigned long ioaddr, u16 hacr, int o, /* offset in PSA */
+ u8 * b, /* buffer to fill */
+ int n)
+{ /* size to read */
+ wv_16_off(ioaddr, hacr);
+
+ while (n-- > 0) {
+ outw(o, PIOR2(ioaddr));
+ o++;
+ *b++ = inb(PIOP2(ioaddr));
+ }
+
+ wv_16_on(ioaddr, hacr);
+} /* psa_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write the Parameter Storage Area to the WaveLAN card's memory.
+ */
+static void psa_write(unsigned long ioaddr, u16 hacr, int o, /* Offset in PSA */
+ u8 * b, /* Buffer in memory */
+ int n)
+{ /* Length of buffer */
+ int count = 0;
+
+ wv_16_off(ioaddr, hacr);
+
+ while (n-- > 0) {
+ outw(o, PIOR2(ioaddr));
+ o++;
+
+ outb(*b, PIOP2(ioaddr));
+ b++;
+
+ /* Wait for the memory to finish its write cycle */
+ count = 0;
+ while ((count++ < 100) &&
+ (hasr_read(ioaddr) & HASR_PSA_BUSY)) mdelay(1);
+ }
+
+ wv_16_on(ioaddr, hacr);
+} /* psa_write */
+
+#ifdef SET_PSA_CRC
+/*------------------------------------------------------------------*/
+/*
+ * Calculate the PSA CRC
+ * Thanks to Valster, Nico <NVALSTER@wcnd.nl.lucent.com> for the code
+ * NOTE: By specifying a length including the CRC position the
+ * returned value should be zero. (i.e. a correct checksum in the PSA)
+ *
+ * The Windows drivers don't use the CRC, but the AP and the PtP tool
+ * depend on it.
+ */
+static inline u16 psa_crc(u8 * psa, /* The PSA */
+ int size)
+{ /* Number of short for CRC */
+ int byte_cnt; /* Loop on the PSA */
+ u16 crc_bytes = 0; /* Data in the PSA */
+ int bit_cnt; /* Loop on the bits of the short */
+
+ for (byte_cnt = 0; byte_cnt < size; byte_cnt++) {
+ crc_bytes ^= psa[byte_cnt]; /* Its an xor */
+
+ for (bit_cnt = 1; bit_cnt < 9; bit_cnt++) {
+ if (crc_bytes & 0x0001)
+ crc_bytes = (crc_bytes >> 1) ^ 0xA001;
+ else
+ crc_bytes >>= 1;
+ }
+ }
+
+ return crc_bytes;
+} /* psa_crc */
+#endif /* SET_PSA_CRC */
+
+/*------------------------------------------------------------------*/
+/*
+ * update the checksum field in the Wavelan's PSA
+ */
+static void update_psa_checksum(struct net_device * dev, unsigned long ioaddr, u16 hacr)
+{
+#ifdef SET_PSA_CRC
+ psa_t psa;
+ u16 crc;
+
+ /* read the parameter storage area */
+ psa_read(ioaddr, hacr, 0, (unsigned char *) &psa, sizeof(psa));
+
+ /* update the checksum */
+ crc = psa_crc((unsigned char *) &psa,
+ sizeof(psa) - sizeof(psa.psa_crc[0]) -
+ sizeof(psa.psa_crc[1])
+ - sizeof(psa.psa_crc_status));
+
+ psa.psa_crc[0] = crc & 0xFF;
+ psa.psa_crc[1] = (crc & 0xFF00) >> 8;
+
+ /* Write it ! */
+ psa_write(ioaddr, hacr, (char *) &psa.psa_crc - (char *) &psa,
+ (unsigned char *) &psa.psa_crc, 2);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "%s: update_psa_checksum(): crc = 0x%02x%02x\n",
+ dev->name, psa.psa_crc[0], psa.psa_crc[1]);
+
+ /* Check again (luxury !) */
+ crc = psa_crc((unsigned char *) &psa,
+ sizeof(psa) - sizeof(psa.psa_crc_status));
+
+ if (crc != 0)
+ printk(KERN_WARNING
+ "%s: update_psa_checksum(): CRC does not agree with PSA data (even after recalculating)\n",
+ dev->name);
+#endif /* DEBUG_IOCTL_INFO */
+#endif /* SET_PSA_CRC */
+} /* update_psa_checksum */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write 1 byte to the MMC.
+ */
+static inline void mmc_out(unsigned long ioaddr, u16 o, u8 d)
+{
+ int count = 0;
+
+ /* Wait for MMC to go idle */
+ while ((count++ < 100) && (inw(HASR(ioaddr)) & HASR_MMC_BUSY))
+ udelay(10);
+
+ outw((u16) (((u16) d << 8) | (o << 1) | 1), MMCR(ioaddr));
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to write bytes to the Modem Management Controller.
+ * We start at the end because it is the way it should be!
+ */
+static inline void mmc_write(unsigned long ioaddr, u8 o, u8 * b, int n)
+{
+ o += n;
+ b += n;
+
+ while (n-- > 0)
+ mmc_out(ioaddr, --o, *(--b));
+} /* mmc_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read a byte from the MMC.
+ * Optimised version for 1 byte, avoid using memory.
+ */
+static inline u8 mmc_in(unsigned long ioaddr, u16 o)
+{
+ int count = 0;
+
+ while ((count++ < 100) && (inw(HASR(ioaddr)) & HASR_MMC_BUSY))
+ udelay(10);
+ outw(o << 1, MMCR(ioaddr));
+
+ while ((count++ < 100) && (inw(HASR(ioaddr)) & HASR_MMC_BUSY))
+ udelay(10);
+ return (u8) (inw(MMCR(ioaddr)) >> 8);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to read bytes from the Modem Management Controller.
+ * The implementation is complicated by a lack of address lines,
+ * which prevents decoding of the low-order bit.
+ * (code has just been moved in the above function)
+ * We start at the end because it is the way it should be!
+ */
+static inline void mmc_read(unsigned long ioaddr, u8 o, u8 * b, int n)
+{
+ o += n;
+ b += n;
+
+ while (n-- > 0)
+ *(--b) = mmc_in(ioaddr, --o);
+} /* mmc_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the type of encryption available.
+ */
+static inline int mmc_encr(unsigned long ioaddr)
+{ /* I/O port of the card */
+ int temp;
+
+ temp = mmc_in(ioaddr, mmroff(0, mmr_des_avail));
+ if ((temp != MMR_DES_AVAIL_DES) && (temp != MMR_DES_AVAIL_AES))
+ return 0;
+ else
+ return temp;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wait for the frequency EEPROM to complete a command.
+ * I hope this one will be optimally inlined.
+ */
+static inline void fee_wait(unsigned long ioaddr, /* I/O port of the card */
+ int delay, /* Base delay to wait for */
+ int number)
+{ /* Number of time to wait */
+ int count = 0; /* Wait only a limited time */
+
+ while ((count++ < number) &&
+ (mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ MMR_FEE_STATUS_BUSY)) udelay(delay);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Read bytes from the Frequency EEPROM (frequency select cards).
+ */
+static void fee_read(unsigned long ioaddr, /* I/O port of the card */
+ u16 o, /* destination offset */
+ u16 * b, /* data buffer */
+ int n)
+{ /* number of registers */
+ b += n; /* Position at the end of the area */
+
+ /* Write the address */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while (n-- > 0) {
+ /* Write the read command */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ);
+
+ /* Wait until EEPROM is ready (should be quick). */
+ fee_wait(ioaddr, 10, 100);
+
+ /* Read the value. */
+ *--b = ((mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)) << 8) |
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
+ }
+}
+
+#ifdef WIRELESS_EXT /* if the wireless extension exists in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write bytes from the Frequency EEPROM (frequency select cards).
+ * This is a bit complicated, because the frequency EEPROM has to
+ * be unprotected and the write enabled.
+ * Jean II
+ */
+static void fee_write(unsigned long ioaddr, /* I/O port of the card */
+ u16 o, /* destination offset */
+ u16 * b, /* data buffer */
+ int n)
+{ /* number of registers */
+ b += n; /* Position at the end of the area. */
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* Ask to read the protected register */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRREAD);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Read the protected register. */
+ printk("Protected 2: %02X-%02X\n",
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)),
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ /* Enable protected register. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PREN);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Unprotect area. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* or use: */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRCLEAR);
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ fee_wait(ioaddr, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+
+ /* Write enable. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WREN);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Write the EEPROM address. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while (n-- > 0) {
+ /* Write the value. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_data_h), (*--b) >> 8);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_data_l), *b & 0xFF);
+
+ /* Write the write command. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_WRITE);
+
+ /* WaveLAN documentation says to wait at least 10 ms for EEBUSY = 0 */
+ mdelay(10);
+ fee_wait(ioaddr, 10, 100);
+ }
+
+ /* Write disable. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_DS);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WDS);
+
+ fee_wait(ioaddr, 10, 100);
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+ /* Reprotect EEPROM. */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x00);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+
+ fee_wait(ioaddr, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+}
+#endif /* WIRELESS_EXT */
+
+/************************ I82586 SUBROUTINES *************************/
+/*
+ * Useful subroutines to manage the Ethernet controller
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read bytes from the on-board RAM.
+ * Why does inlining this function make it fail?
+ */
+static /*inline */ void obram_read(unsigned long ioaddr,
+ u16 o, u8 * b, int n)
+{
+ outw(o, PIOR1(ioaddr));
+ insw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Write bytes to the on-board RAM.
+ */
+static inline void obram_write(unsigned long ioaddr, u16 o, u8 * b, int n)
+{
+ outw(o, PIOR1(ioaddr));
+ outsw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Acknowledge the reading of the status issued by the i82586.
+ */
+static void wv_ack(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 scb_cs;
+ int i;
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ scb_cs &= SCB_ST_INT;
+
+ if (scb_cs == 0)
+ return;
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--) {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+ udelay(100);
+
+#ifdef DEBUG_CONFIG_ERROR
+ if (i <= 0)
+ printk(KERN_INFO
+ "%s: wv_ack(): board not accepting command.\n",
+ dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Set channel attention bit and busy wait until command has
+ * completed, then acknowledge completion of the command.
+ */
+static inline int wv_synchronous_cmd(struct net_device * dev, const char *str)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 scb_cmd;
+ ach_t cb;
+ int i;
+
+ scb_cmd = SCB_CMD_CUC & SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cmd, sizeof(scb_cmd));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--) {
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *) &cb,
+ sizeof(cb));
+ if (cb.ac_status & AC_SFLD_C)
+ break;
+
+ udelay(10);
+ }
+ udelay(100);
+
+ if (i <= 0 || !(cb.ac_status & AC_SFLD_OK)) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO "%s: %s failed; status = 0x%x\n",
+ dev->name, str, cb.ac_status);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_scb_show(ioaddr);
+#endif
+ return -1;
+ }
+
+ /* Ack the status */
+ wv_ack(dev);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Configuration commands completion interrupt.
+ * Check if done, and if OK.
+ */
+static inline int
+wv_config_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp)
+{
+ unsigned short mcs_addr;
+ unsigned short status;
+ int ret;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wv_config_complete()\n", dev->name);
+#endif
+
+ mcs_addr = lp->tx_first_in_use + sizeof(ac_tx_t) + sizeof(ac_nop_t)
+ + sizeof(tbd_t) + sizeof(ac_cfg_t) + sizeof(ac_ias_t);
+
+ /* Read the status of the last command (set mc list). */
+ obram_read(ioaddr, acoff(mcs_addr, ac_status),
+ (unsigned char *) &status, sizeof(status));
+
+ /* If not completed -> exit */
+ if ((status & AC_SFLD_C) == 0)
+ ret = 0; /* Not ready to be scrapped */
+ else {
+#ifdef DEBUG_CONFIG_ERROR
+ unsigned short cfg_addr;
+ unsigned short ias_addr;
+
+ /* Check mc_config command */
+ if ((status & AC_SFLD_OK) != AC_SFLD_OK)
+ printk(KERN_INFO
+ "%s: wv_config_complete(): set_multicast_address failed; status = 0x%x\n",
+ dev->name, status);
+
+ /* check ia-config command */
+ ias_addr = mcs_addr - sizeof(ac_ias_t);
+ obram_read(ioaddr, acoff(ias_addr, ac_status),
+ (unsigned char *) &status, sizeof(status));
+ if ((status & AC_SFLD_OK) != AC_SFLD_OK)
+ printk(KERN_INFO
+ "%s: wv_config_complete(): set_MAC_address failed; status = 0x%x\n",
+ dev->name, status);
+
+ /* Check config command. */
+ cfg_addr = ias_addr - sizeof(ac_cfg_t);
+ obram_read(ioaddr, acoff(cfg_addr, ac_status),
+ (unsigned char *) &status, sizeof(status));
+ if ((status & AC_SFLD_OK) != AC_SFLD_OK)
+ printk(KERN_INFO
+ "%s: wv_config_complete(): configure failed; status = 0x%x\n",
+ dev->name, status);
+#endif /* DEBUG_CONFIG_ERROR */
+
+ ret = 1; /* Ready to be scrapped */
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wv_config_complete() - %d\n", dev->name,
+ ret);
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Command completion interrupt.
+ * Reclaim as many freed tx buffers as we can.
+ * (called in wavelan_interrupt()).
+ * Note : the spinlock is already grabbed for us.
+ */
+static int wv_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp)
+{
+ int nreaped = 0;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wv_complete()\n", dev->name);
+#endif
+
+ /* Loop on all the transmit buffers */
+ while (lp->tx_first_in_use != I82586NULL) {
+ unsigned short tx_status;
+
+ /* Read the first transmit buffer */
+ obram_read(ioaddr, acoff(lp->tx_first_in_use, ac_status),
+ (unsigned char *) &tx_status,
+ sizeof(tx_status));
+
+ /* If not completed -> exit */
+ if ((tx_status & AC_SFLD_C) == 0)
+ break;
+
+ /* Hack for reconfiguration */
+ if (tx_status == 0xFFFF)
+ if (!wv_config_complete(dev, ioaddr, lp))
+ break; /* Not completed */
+
+ /* We now remove this buffer */
+ nreaped++;
+ --lp->tx_n_in_use;
+
+/*
+if (lp->tx_n_in_use > 0)
+ printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
+*/
+
+ /* Was it the last one? */
+ if (lp->tx_n_in_use <= 0)
+ lp->tx_first_in_use = I82586NULL;
+ else {
+ /* Next one in the chain */
+ lp->tx_first_in_use += TXBLOCKZ;
+ if (lp->tx_first_in_use >=
+ OFFSET_CU +
+ NTXBLOCKS * TXBLOCKZ) lp->tx_first_in_use -=
+ NTXBLOCKS * TXBLOCKZ;
+ }
+
+ /* Hack for reconfiguration */
+ if (tx_status == 0xFFFF)
+ continue;
+
+ /* Now, check status of the finished command */
+ if (tx_status & AC_SFLD_OK) {
+ int ncollisions;
+
+ lp->stats.tx_packets++;
+ ncollisions = tx_status & AC_SFLD_MAXCOL;
+ lp->stats.collisions += ncollisions;
+#ifdef DEBUG_TX_INFO
+ if (ncollisions > 0)
+ printk(KERN_DEBUG
+ "%s: wv_complete(): tx completed after %d collisions.\n",
+ dev->name, ncollisions);
+#endif
+ } else {
+ lp->stats.tx_errors++;
+ if (tx_status & AC_SFLD_S10) {
+ lp->stats.tx_carrier_errors++;
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_complete(): tx error: no CS.\n",
+ dev->name);
+#endif
+ }
+ if (tx_status & AC_SFLD_S9) {
+ lp->stats.tx_carrier_errors++;
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_complete(): tx error: lost CTS.\n",
+ dev->name);
+#endif
+ }
+ if (tx_status & AC_SFLD_S8) {
+ lp->stats.tx_fifo_errors++;
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_complete(): tx error: slow DMA.\n",
+ dev->name);
+#endif
+ }
+ if (tx_status & AC_SFLD_S6) {
+ lp->stats.tx_heartbeat_errors++;
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_complete(): tx error: heart beat.\n",
+ dev->name);
+#endif
+ }
+ if (tx_status & AC_SFLD_S5) {
+ lp->stats.tx_aborted_errors++;
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_complete(): tx error: too many collisions.\n",
+ dev->name);
+#endif
+ }
+ }
+
+#ifdef DEBUG_TX_INFO
+ printk(KERN_DEBUG
+ "%s: wv_complete(): tx completed, tx_status 0x%04x\n",
+ dev->name, tx_status);
+#endif
+ }
+
+#ifdef DEBUG_INTERRUPT_INFO
+ if (nreaped > 1)
+ printk(KERN_DEBUG "%s: wv_complete(): reaped %d\n",
+ dev->name, nreaped);
+#endif
+
+ /*
+ * Inform upper layers.
+ */
+ if (lp->tx_n_in_use < NTXBLOCKS - 1) {
+ netif_wake_queue(dev);
+ }
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wv_complete()\n", dev->name);
+#endif
+ return nreaped;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Reconfigure the i82586, or at least ask for it.
+ * Because wv_82586_config uses a transmission buffer, we must do it
+ * when we are sure that there is one left, so we do it now
+ * or in wavelan_packet_xmit() (I can't find any better place,
+ * wavelan_interrupt is not an option), so you may experience
+ * delays sometimes.
+ */
+static inline void wv_82586_reconfig(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long flags;
+
+ /* Arm the flag, will be cleard in wv_82586_config() */
+ lp->reconfig_82586 = 1;
+
+ /* Check if we can do it now ! */
+ if((netif_running(dev)) && !(netif_queue_stopped(dev))) {
+ spin_lock_irqsave(&lp->spinlock, flags);
+ /* May fail */
+ wv_82586_config(dev);
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+ }
+ else {
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG
+ "%s: wv_82586_reconfig(): delayed (state = %lX)\n",
+ dev->name, dev->state);
+#endif
+ }
+}
+
+/********************* DEBUG & INFO SUBROUTINES *********************/
+/*
+ * This routine is used in the code to show information for debugging.
+ * Most of the time, it dumps the contents of hardware structures.
+ */
+
+#ifdef DEBUG_PSA_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted contents of the Parameter Storage Area.
+ */
+static void wv_psa_show(psa_t * p)
+{
+ printk(KERN_DEBUG "##### WaveLAN PSA contents: #####\n");
+ printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
+ p->psa_io_base_addr_1,
+ p->psa_io_base_addr_2,
+ p->psa_io_base_addr_3, p->psa_io_base_addr_4);
+ printk(KERN_DEBUG "psa_rem_boot_addr_1: 0x%02X %02X %02X\n",
+ p->psa_rem_boot_addr_1,
+ p->psa_rem_boot_addr_2, p->psa_rem_boot_addr_3);
+ printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
+ printk("psa_int_req_no: %d\n", p->psa_int_req_no);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG
+ "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_unused0[0], p->psa_unused0[1], p->psa_unused0[2],
+ p->psa_unused0[3], p->psa_unused0[4], p->psa_unused0[5],
+ p->psa_unused0[6]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG
+ "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_univ_mac_addr[0], p->psa_univ_mac_addr[1],
+ p->psa_univ_mac_addr[2], p->psa_univ_mac_addr[3],
+ p->psa_univ_mac_addr[4], p->psa_univ_mac_addr[5]);
+ printk(KERN_DEBUG
+ "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_local_mac_addr[0], p->psa_local_mac_addr[1],
+ p->psa_local_mac_addr[2], p->psa_local_mac_addr[3],
+ p->psa_local_mac_addr[4], p->psa_local_mac_addr[5]);
+ printk(KERN_DEBUG "psa_univ_local_sel: %d, ",
+ p->psa_univ_local_sel);
+ printk("psa_comp_number: %d, ", p->psa_comp_number);
+ printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set);
+ printk(KERN_DEBUG "psa_feature_select/decay_prm: 0x%02x, ",
+ p->psa_feature_select);
+ printk("psa_subband/decay_update_prm: %d\n", p->psa_subband);
+ printk(KERN_DEBUG "psa_quality_thr: 0x%02x, ", p->psa_quality_thr);
+ printk("psa_mod_delay: 0x%02x\n", p->psa_mod_delay);
+ printk(KERN_DEBUG "psa_nwid: 0x%02x%02x, ", p->psa_nwid[0],
+ p->psa_nwid[1]);
+ printk("psa_nwid_select: %d\n", p->psa_nwid_select);
+ printk(KERN_DEBUG "psa_encryption_select: %d, ",
+ p->psa_encryption_select);
+ printk
+ ("psa_encryption_key[]: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_encryption_key[0], p->psa_encryption_key[1],
+ p->psa_encryption_key[2], p->psa_encryption_key[3],
+ p->psa_encryption_key[4], p->psa_encryption_key[5],
+ p->psa_encryption_key[6], p->psa_encryption_key[7]);
+ printk(KERN_DEBUG "psa_databus_width: %d\n", p->psa_databus_width);
+ printk(KERN_DEBUG "psa_call_code/auto_squelch: 0x%02x, ",
+ p->psa_call_code[0]);
+ printk
+ ("psa_call_code[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_call_code[0], p->psa_call_code[1], p->psa_call_code[2],
+ p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5],
+ p->psa_call_code[6], p->psa_call_code[7]);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+ p->psa_reserved[0],
+ p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
+ printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+ printk("psa_crc_status: 0x%02x\n", p->psa_crc_status);
+} /* wv_psa_show */
+#endif /* DEBUG_PSA_SHOW */
+
+#ifdef DEBUG_MMC_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the Modem Management Controller.
+ * This function needs to be completed.
+ */
+static void wv_mmc_show(struct net_device * dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv;
+ mmr_t m;
+
+ /* Basic check */
+ if (hasr_read(ioaddr) & HASR_NO_CLK) {
+ printk(KERN_WARNING
+ "%s: wv_mmc_show: modem not connected\n",
+ dev->name);
+ return;
+ }
+
+ /* Read the mmc */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+ mmc_read(ioaddr, 0, (u8 *) & m, sizeof(m));
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */
+ /* Don't forget to update statistics */
+ lp->wstats.discard.nwid +=
+ (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+#endif /* WIRELESS_EXT */
+
+ printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG
+ "mmc_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused0[0], m.mmr_unused0[1], m.mmr_unused0[2],
+ m.mmr_unused0[3], m.mmr_unused0[4], m.mmr_unused0[5],
+ m.mmr_unused0[6], m.mmr_unused0[7]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "Encryption algorithm: %02X - Status: %02X\n",
+ m.mmr_des_avail, m.mmr_des_status);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused1[]: %02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused1[0],
+ m.mmr_unused1[1],
+ m.mmr_unused1[2], m.mmr_unused1[3], m.mmr_unused1[4]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "dce_status: 0x%x [%s%s%s%s]\n",
+ m.mmr_dce_status,
+ (m.
+ mmr_dce_status & MMR_DCE_STATUS_RX_BUSY) ?
+ "energy detected," : "",
+ (m.
+ mmr_dce_status & MMR_DCE_STATUS_LOOPT_IND) ?
+ "loop test indicated," : "",
+ (m.
+ mmr_dce_status & MMR_DCE_STATUS_TX_BUSY) ?
+ "transmitter on," : "",
+ (m.
+ mmr_dce_status & MMR_DCE_STATUS_JBR_EXPIRED) ?
+ "jabber timer expired," : "");
+ printk(KERN_DEBUG "Dsp ID: %02X\n", m.mmr_dsp_id);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused2[]: %02X:%02X\n",
+ m.mmr_unused2[0], m.mmr_unused2[1]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "# correct_nwid: %d, # wrong_nwid: %d\n",
+ (m.mmr_correct_nwid_h << 8) | m.mmr_correct_nwid_l,
+ (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l);
+ printk(KERN_DEBUG "thr_pre_set: 0x%x [current signal %s]\n",
+ m.mmr_thr_pre_set & MMR_THR_PRE_SET,
+ (m.
+ mmr_thr_pre_set & MMR_THR_PRE_SET_CUR) ? "above" :
+ "below");
+ printk(KERN_DEBUG "signal_lvl: %d [%s], ",
+ m.mmr_signal_lvl & MMR_SIGNAL_LVL,
+ (m.
+ mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) ? "new msg" :
+ "no new msg");
+ printk("silence_lvl: %d [%s], ",
+ m.mmr_silence_lvl & MMR_SILENCE_LVL,
+ (m.
+ mmr_silence_lvl & MMR_SILENCE_LVL_VALID) ? "update done" :
+ "no new update");
+ printk("sgnl_qual: 0x%x [%s]\n", m.mmr_sgnl_qual & MMR_SGNL_QUAL,
+ (m.
+ mmr_sgnl_qual & MMR_SGNL_QUAL_ANT) ? "Antenna 1" :
+ "Antenna 0");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "netw_id_l: %x\n", m.mmr_netw_id_l);
+#endif /* DEBUG_SHOW_UNUSED */
+} /* wv_mmc_show */
+#endif /* DEBUG_MMC_SHOW */
+
+#ifdef DEBUG_I82586_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the last block of the i82586 memory.
+ */
+static void wv_scb_show(unsigned long ioaddr)
+{
+ scb_t scb;
+
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *) &scb,
+ sizeof(scb));
+
+ printk(KERN_DEBUG "##### WaveLAN system control block: #####\n");
+
+ printk(KERN_DEBUG "status: ");
+ printk("stat 0x%x[%s%s%s%s] ",
+ (scb.
+ scb_status & (SCB_ST_CX | SCB_ST_FR | SCB_ST_CNA |
+ SCB_ST_RNR)) >> 12,
+ (scb.
+ scb_status & SCB_ST_CX) ? "command completion interrupt," :
+ "", (scb.scb_status & SCB_ST_FR) ? "frame received," : "",
+ (scb.
+ scb_status & SCB_ST_CNA) ? "command unit not active," : "",
+ (scb.
+ scb_status & SCB_ST_RNR) ? "receiving unit not ready," :
+ "");
+ printk("cus 0x%x[%s%s%s] ", (scb.scb_status & SCB_ST_CUS) >> 8,
+ ((scb.scb_status & SCB_ST_CUS) ==
+ SCB_ST_CUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_CUS) ==
+ SCB_ST_CUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_CUS) ==
+ SCB_ST_CUS_ACTV) ? "active" : "");
+ printk("rus 0x%x[%s%s%s%s]\n", (scb.scb_status & SCB_ST_RUS) >> 4,
+ ((scb.scb_status & SCB_ST_RUS) ==
+ SCB_ST_RUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_RUS) ==
+ SCB_ST_RUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_RUS) ==
+ SCB_ST_RUS_NRES) ? "no resources" : "",
+ ((scb.scb_status & SCB_ST_RUS) ==
+ SCB_ST_RUS_RDY) ? "ready" : "");
+
+ printk(KERN_DEBUG "command: ");
+ printk("ack 0x%x[%s%s%s%s] ",
+ (scb.
+ scb_command & (SCB_CMD_ACK_CX | SCB_CMD_ACK_FR |
+ SCB_CMD_ACK_CNA | SCB_CMD_ACK_RNR)) >> 12,
+ (scb.
+ scb_command & SCB_CMD_ACK_CX) ? "ack cmd completion," : "",
+ (scb.
+ scb_command & SCB_CMD_ACK_FR) ? "ack frame received," : "",
+ (scb.
+ scb_command & SCB_CMD_ACK_CNA) ? "ack CU not active," : "",
+ (scb.
+ scb_command & SCB_CMD_ACK_RNR) ? "ack RU not ready," : "");
+ printk("cuc 0x%x[%s%s%s%s%s] ",
+ (scb.scb_command & SCB_CMD_CUC) >> 8,
+ ((scb.scb_command & SCB_CMD_CUC) ==
+ SCB_CMD_CUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_CUC) ==
+ SCB_CMD_CUC_GO) ? "start cbl_offset" : "",
+ ((scb.scb_command & SCB_CMD_CUC) ==
+ SCB_CMD_CUC_RES) ? "resume execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) ==
+ SCB_CMD_CUC_SUS) ? "suspend execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) ==
+ SCB_CMD_CUC_ABT) ? "abort execution" : "");
+ printk("ruc 0x%x[%s%s%s%s%s]\n",
+ (scb.scb_command & SCB_CMD_RUC) >> 4,
+ ((scb.scb_command & SCB_CMD_RUC) ==
+ SCB_CMD_RUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_RUC) ==
+ SCB_CMD_RUC_GO) ? "start rfa_offset" : "",
+ ((scb.scb_command & SCB_CMD_RUC) ==
+ SCB_CMD_RUC_RES) ? "resume reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) ==
+ SCB_CMD_RUC_SUS) ? "suspend reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) ==
+ SCB_CMD_RUC_ABT) ? "abort reception" : "");
+
+ printk(KERN_DEBUG "cbl_offset 0x%x ", scb.scb_cbl_offset);
+ printk("rfa_offset 0x%x\n", scb.scb_rfa_offset);
+
+ printk(KERN_DEBUG "crcerrs %d ", scb.scb_crcerrs);
+ printk("alnerrs %d ", scb.scb_alnerrs);
+ printk("rscerrs %d ", scb.scb_rscerrs);
+ printk("ovrnerrs %d\n", scb.scb_ovrnerrs);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the i82586's receive unit.
+ */
+static void wv_ru_show(struct net_device * dev)
+{
+ /* net_local *lp = (net_local *) dev->priv; */
+
+ printk(KERN_DEBUG
+ "##### WaveLAN i82586 receiver unit status: #####\n");
+ printk(KERN_DEBUG "ru:");
+ /*
+ * Not implemented yet
+ */
+ printk("\n");
+} /* wv_ru_show */
+
+/*------------------------------------------------------------------*/
+/*
+ * Display info about one control block of the i82586 memory.
+ */
+static void wv_cu_show_one(struct net_device * dev, net_local * lp, int i, u16 p)
+{
+ unsigned long ioaddr;
+ ac_tx_t actx;
+
+ ioaddr = dev->base_addr;
+
+ printk("%d: 0x%x:", i, p);
+
+ obram_read(ioaddr, p, (unsigned char *) &actx, sizeof(actx));
+ printk(" status=0x%x,", actx.tx_h.ac_status);
+ printk(" command=0x%x,", actx.tx_h.ac_command);
+
+ /*
+ {
+ tbd_t tbd;
+
+ obram_read(ioaddr, actx.tx_tbd_offset, (unsigned char *)&tbd, sizeof(tbd));
+ printk(" tbd_status=0x%x,", tbd.tbd_status);
+ }
+ */
+
+ printk("|");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Print status of the command unit of the i82586.
+ */
+static void wv_cu_show(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned int i;
+ u16 p;
+
+ printk(KERN_DEBUG
+ "##### WaveLAN i82586 command unit status: #####\n");
+
+ printk(KERN_DEBUG);
+ for (i = 0, p = lp->tx_first_in_use; i < NTXBLOCKS; i++) {
+ wv_cu_show_one(dev, lp, i, p);
+
+ p += TXBLOCKZ;
+ if (p >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ p -= NTXBLOCKS * TXBLOCKZ;
+ }
+ printk("\n");
+}
+#endif /* DEBUG_I82586_SHOW */
+
+#ifdef DEBUG_DEVICE_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver.
+ */
+static void wv_dev_show(struct net_device * dev)
+{
+ printk(KERN_DEBUG "dev:");
+ printk(" state=%lX,", dev->state);
+ printk(" trans_start=%ld,", dev->trans_start);
+ printk(" flags=0x%x,", dev->flags);
+ printk("\n");
+} /* wv_dev_show */
+
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver's
+ * private information.
+ */
+static void wv_local_show(struct net_device * dev)
+{
+ net_local *lp;
+
+ lp = (net_local *) dev->priv;
+
+ printk(KERN_DEBUG "local:");
+ printk(" tx_n_in_use=%d,", lp->tx_n_in_use);
+ printk(" hacr=0x%x,", lp->hacr);
+ printk(" rx_head=0x%x,", lp->rx_head);
+ printk(" rx_last=0x%x,", lp->rx_last);
+ printk(" tx_first_free=0x%x,", lp->tx_first_free);
+ printk(" tx_first_in_use=0x%x,", lp->tx_first_in_use);
+ printk("\n");
+} /* wv_local_show */
+#endif /* DEBUG_DEVICE_SHOW */
+
+#if defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO)
+/*------------------------------------------------------------------*/
+/*
+ * Dump packet header (and content if necessary) on the screen
+ */
+static inline void wv_packet_info(u8 * p, /* Packet to dump */
+ int length, /* Length of the packet */
+ char *msg1, /* Name of the device */
+ char *msg2)
+{ /* Name of the function */
+ int i;
+ int maxi;
+
+ printk(KERN_DEBUG
+ "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n",
+ msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length);
+ printk(KERN_DEBUG
+ "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n",
+ msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12],
+ p[13]);
+
+#ifdef DEBUG_PACKET_DUMP
+
+ printk(KERN_DEBUG "data=\"");
+
+ if ((maxi = length) > DEBUG_PACKET_DUMP)
+ maxi = DEBUG_PACKET_DUMP;
+ for (i = 14; i < maxi; i++)
+ if (p[i] >= ' ' && p[i] <= '~')
+ printk(" %c", p[i]);
+ else
+ printk("%02X", p[i]);
+ if (maxi < length)
+ printk("..");
+ printk("\"\n");
+ printk(KERN_DEBUG "\n");
+#endif /* DEBUG_PACKET_DUMP */
+}
+#endif /* defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO) */
+
+/*------------------------------------------------------------------*/
+/*
+ * This is the information which is displayed by the driver at startup.
+ * There are lots of flags for configuring it to your liking.
+ */
+static inline void wv_init_info(struct net_device * dev)
+{
+ short ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv;
+ psa_t psa;
+ int i;
+
+ /* Read the parameter storage area */
+ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
+
+#ifdef DEBUG_PSA_SHOW
+ wv_psa_show(&psa);
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_cu_show(dev);
+#endif
+
+#ifdef DEBUG_BASIC_SHOW
+ /* Now, let's go for the basic stuff. */
+ printk(KERN_NOTICE "%s: WaveLAN at %#x,", dev->name, ioaddr);
+ for (i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]);
+ printk(", IRQ %d", dev->irq);
+
+ /* Print current network ID. */
+ if (psa.psa_nwid_select)
+ printk(", nwid 0x%02X-%02X", psa.psa_nwid[0],
+ psa.psa_nwid[1]);
+ else
+ printk(", nwid off");
+
+ /* If 2.00 card */
+ if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
+ unsigned short freq;
+
+ /* Ask the EEPROM to read the frequency from the first area. */
+ fee_read(ioaddr, 0x00, &freq, 1);
+
+ /* Print frequency */
+ printk(", 2.00, %ld", (freq >> 6) + 2400L);
+
+ /* Hack! */
+ if (freq & 0x20)
+ printk(".5");
+ } else {
+ printk(", PC");
+ switch (psa.psa_comp_number) {
+ case PSA_COMP_PC_AT_915:
+ case PSA_COMP_PC_AT_2400:
+ printk("-AT");
+ break;
+ case PSA_COMP_PC_MC_915:
+ case PSA_COMP_PC_MC_2400:
+ printk("-MC");
+ break;
+ case PSA_COMP_PCMCIA_915:
+ printk("MCIA");
+ break;
+ default:
+ printk("?");
+ }
+ printk(", ");
+ switch (psa.psa_subband) {
+ case PSA_SUBBAND_915:
+ printk("915");
+ break;
+ case PSA_SUBBAND_2425:
+ printk("2425");
+ break;
+ case PSA_SUBBAND_2460:
+ printk("2460");
+ break;
+ case PSA_SUBBAND_2484:
+ printk("2484");
+ break;
+ case PSA_SUBBAND_2430_5:
+ printk("2430.5");
+ break;
+ default:
+ printk("?");
+ }
+ }
+
+ printk(" MHz\n");
+#endif /* DEBUG_BASIC_SHOW */
+
+#ifdef DEBUG_VERSION_SHOW
+ /* Print version information */
+ printk(KERN_NOTICE "%s", version);
+#endif
+} /* wv_init_info */
+
+/********************* IOCTL, STATS & RECONFIG *********************/
+/*
+ * We found here routines that are called by Linux on different
+ * occasions after the configuration and not for transmitting data
+ * These may be called when the user use ifconfig, /proc/net/dev
+ * or wireless extensions
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the current Ethernet statistics. This may be called with the
+ * card open or closed.
+ * Used when the user read /proc/net/dev
+ */
+static en_stats *wavelan_get_stats(struct net_device * dev)
+{
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
+#endif
+
+ return (&((net_local *) dev->priv)->stats);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void wavelan_set_multicast_list(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n",
+ dev->name);
+#endif
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG
+ "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
+ dev->name, dev->flags, dev->mc_count);
+#endif
+
+ /* Are we asking for promiscuous mode,
+ * or all multicast addresses (we don't have that!)
+ * or too many multicast addresses for the hardware filter? */
+ if ((dev->flags & IFF_PROMISC) ||
+ (dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > I82586_MAX_MULTICAST_ADDRESSES)) {
+ /*
+ * Enable promiscuous mode: receive all packets.
+ */
+ if (!lp->promiscuous) {
+ lp->promiscuous = 1;
+ lp->mc_count = 0;
+
+ wv_82586_reconfig(dev);
+
+ /* Tell the kernel that we are doing a really bad job. */
+ dev->flags |= IFF_PROMISC;
+ }
+ } else
+ /* Are there multicast addresses to send? */
+ if (dev->mc_list != (struct dev_mc_list *) NULL) {
+ /*
+ * Disable promiscuous mode, but receive all packets
+ * in multicast list
+ */
+#ifdef MULTICAST_AVOID
+ if (lp->promiscuous || (dev->mc_count != lp->mc_count))
+#endif
+ {
+ lp->promiscuous = 0;
+ lp->mc_count = dev->mc_count;
+
+ wv_82586_reconfig(dev);
+ }
+ } else {
+ /*
+ * Switch to normal mode: disable promiscuous mode and
+ * clear the multicast list.
+ */
+ if (lp->promiscuous || lp->mc_count == 0) {
+ lp->promiscuous = 0;
+ lp->mc_count = 0;
+
+ wv_82586_reconfig(dev);
+ }
+ }
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_set_multicast_list()\n",
+ dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This function doesn't exist.
+ * (Note : it was a nice way to test the reconfigure stuff...)
+ */
+#ifdef SET_MAC_ADDRESS
+static int wavelan_set_mac_address(struct net_device * dev, void *addr)
+{
+ struct sockaddr *mac = addr;
+
+ /* Copy the address. */
+ memcpy(dev->dev_addr, mac->sa_data, WAVELAN_ADDR_SIZE);
+
+ /* Reconfigure the beast. */
+ wv_82586_reconfig(dev);
+
+ return 0;
+}
+#endif /* SET_MAC_ADDRESS */
+
+#ifdef WIRELESS_EXT /* if wireless extensions exist in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Frequency setting (for hardware capable of it)
+ * It's a bit complicated and you don't really want to look into it.
+ * (called in wavelan_ioctl)
+ */
+static inline int wv_set_frequency(unsigned long ioaddr, /* I/O port of the card */
+ iw_freq * frequency)
+{
+ const int BAND_NUM = 10; /* Number of bands */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz */
+#ifdef DEBUG_IOCTL_INFO
+ int i;
+#endif
+
+ /* Setting by frequency */
+ /* Theoretically, you may set any frequency between
+ * the two limits with a 0.5 MHz precision. In practice,
+ * I don't want you to have trouble with local regulations.
+ */
+ if ((frequency->e == 1) &&
+ (frequency->m >= (int) 2.412e8)
+ && (frequency->m <= (int) 2.487e8)) {
+ freq = ((frequency->m / 10000) - 24000L) / 5;
+ }
+
+ /* Setting by channel (same as wfreqsel) */
+ /* Warning: each channel is 22 MHz wide, so some of the channels
+ * will interfere. */
+ if ((frequency->e == 0) && (frequency->m < BAND_NUM)) {
+ /* Get frequency offset. */
+ freq = channel_bands[frequency->m] >> 1;
+ }
+
+ /* Verify that the frequency is allowed. */
+ if (freq != 0L) {
+ u16 table[10]; /* Authorized frequency table */
+
+ /* Read the frequency table. */
+ fee_read(ioaddr, 0x71, table, 10);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "Frequency table: ");
+ for (i = 0; i < 10; i++) {
+ printk(" %04X", table[i]);
+ }
+ printk("\n");
+#endif
+
+ /* Look in the table to see whether the frequency is allowed. */
+ if (!(table[9 - ((freq - 24) / 16)] &
+ (1 << ((freq - 24) % 16)))) return -EINVAL; /* not allowed */
+ } else
+ return -EINVAL;
+
+ /* if we get a usable frequency */
+ if (freq != 0L) {
+ unsigned short area[16];
+ unsigned short dac[2];
+ unsigned short area_verify[16];
+ unsigned short dac_verify[2];
+ /* Corresponding gain (in the power adjust value table)
+ * See AT&T WaveLAN Data Manual, REF 407-024689/E, page 3-8
+ * and WCIN062D.DOC, page 6.2.9. */
+ unsigned short power_limit[] = { 40, 80, 120, 160, 0 };
+ int power_band = 0; /* Selected band */
+ unsigned short power_adjust; /* Correct value */
+
+ /* Search for the gain. */
+ power_band = 0;
+ while ((freq > power_limit[power_band]) &&
+ (power_limit[++power_band] != 0));
+
+ /* Read the first area. */
+ fee_read(ioaddr, 0x00, area, 16);
+
+ /* Read the DAC. */
+ fee_read(ioaddr, 0x60, dac, 2);
+
+ /* Read the new power adjust value. */
+ fee_read(ioaddr, 0x6B - (power_band >> 1), &power_adjust,
+ 1);
+ if (power_band & 0x1)
+ power_adjust >>= 8;
+ else
+ power_adjust &= 0xFF;
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "WaveLAN EEPROM Area 1: ");
+ for (i = 0; i < 16; i++) {
+ printk(" %04X", area[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
+ dac[0], dac[1]);
+#endif
+
+ /* Frequency offset (for info only) */
+ area[0] = ((freq << 5) & 0xFFE0) | (area[0] & 0x1F);
+
+ /* Receiver Principle main divider coefficient */
+ area[3] = (freq >> 1) + 2400L - 352L;
+ area[2] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Transmitter Main divider coefficient */
+ area[13] = (freq >> 1) + 2400L;
+ area[12] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Other parts of the area are flags, bit streams or unused. */
+
+ /* Set the value in the DAC. */
+ dac[1] = ((power_adjust >> 1) & 0x7F) | (dac[1] & 0xFF80);
+ dac[0] = ((power_adjust & 0x1) << 4) | (dac[0] & 0xFFEF);
+
+ /* Write the first area. */
+ fee_write(ioaddr, 0x00, area, 16);
+
+ /* Write the DAC. */
+ fee_write(ioaddr, 0x60, dac, 2);
+
+ /* We now should verify here that the writing of the EEPROM went OK. */
+
+ /* Reread the first area. */
+ fee_read(ioaddr, 0x00, area_verify, 16);
+
+ /* Reread the DAC. */
+ fee_read(ioaddr, 0x60, dac_verify, 2);
+
+ /* Compare. */
+ if (memcmp(area, area_verify, 16 * 2) ||
+ memcmp(dac, dac_verify, 2 * 2)) {
+#ifdef DEBUG_IOCTL_ERROR
+ printk(KERN_INFO
+ "WaveLAN: wv_set_frequency: unable to write new frequency to EEPROM(?).\n");
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ /* We must download the frequency parameters to the
+ * synthesizers (from the EEPROM - area 1)
+ * Note: as the EEPROM is automatically decremented, we set the end
+ * if the area... */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x0F);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait until the download is finished. */
+ fee_wait(ioaddr, 100, 100);
+
+ /* We must now download the power adjust value (gain) to
+ * the synthesizers (from the EEPROM - area 7 - DAC). */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x61);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait for the download to finish. */
+ fee_wait(ioaddr, 100, 100);
+
+#ifdef DEBUG_IOCTL_INFO
+ /* Verification of what we have done */
+
+ printk(KERN_DEBUG "WaveLAN EEPROM Area 1: ");
+ for (i = 0; i < 16; i++) {
+ printk(" %04X", area_verify[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
+ dac_verify[0], dac_verify[1]);
+#endif
+
+ return 0;
+ } else
+ return -EINVAL; /* Bah, never get there... */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Give the list of available frequencies.
+ */
+static inline int wv_frequency_list(unsigned long ioaddr, /* I/O port of the card */
+ iw_freq * list, /* List of frequencies to fill */
+ int max)
+{ /* Maximum number of frequencies */
+ u16 table[10]; /* Authorized frequency table */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz + 12 MHz */
+ int i; /* index in the table */
+ int c = 0; /* Channel number */
+
+ /* Read the frequency table. */
+ fee_read(ioaddr, 0x71 /* frequency table */ , table, 10);
+
+ /* Check all frequencies. */
+ i = 0;
+ for (freq = 0; freq < 150; freq++)
+ /* Look in the table if the frequency is allowed */
+ if (table[9 - (freq / 16)] & (1 << (freq % 16))) {
+ /* Compute approximate channel number */
+ while ((((channel_bands[c] >> 1) - 24) < freq) &&
+ (c < NELS(channel_bands)))
+ c++;
+ list[i].i = c; /* Set the list index */
+
+ /* put in the list */
+ list[i].m = (((freq + 24) * 5) + 24000L) * 10000;
+ list[i++].e = 1;
+
+ /* Check number. */
+ if (i >= max)
+ return (i);
+ }
+
+ return (i);
+}
+
+#ifdef IW_WIRELESS_SPY
+/*------------------------------------------------------------------*/
+/*
+ * Gather wireless spy statistics: for each packet, compare the source
+ * address with our list, and if they match, get the statistics.
+ * Sorry, but this function really needs the wireless extensions.
+ */
+static inline void wl_spy_gather(struct net_device * dev,
+ u8 * mac, /* MAC address */
+ u8 * stats) /* Statistics to gather */
+{
+ struct iw_quality wstats;
+
+ wstats.qual = stats[2] & MMR_SGNL_QUAL;
+ wstats.level = stats[0] & MMR_SIGNAL_LVL;
+ wstats.noise = stats[1] & MMR_SILENCE_LVL;
+ wstats.updated = 0x7;
+
+ /* Update spy records */
+ wireless_spy_update(dev, mac, &wstats);
+}
+#endif /* IW_WIRELESS_SPY */
+
+#ifdef HISTOGRAM
+/*------------------------------------------------------------------*/
+/*
+ * This function calculates a histogram of the signal level.
+ * As the noise is quite constant, it's like doing it on the SNR.
+ * We have defined a set of interval (lp->his_range), and each time
+ * the level goes in that interval, we increment the count (lp->his_sum).
+ * With this histogram you may detect if one WaveLAN is really weak,
+ * or you may also calculate the mean and standard deviation of the level.
+ */
+static inline void wl_his_gather(struct net_device * dev, u8 * stats)
+{ /* Statistics to gather */
+ net_local *lp = (net_local *) dev->priv;
+ u8 level = stats[0] & MMR_SIGNAL_LVL;
+ int i;
+
+ /* Find the correct interval. */
+ i = 0;
+ while ((i < (lp->his_number - 1))
+ && (level >= lp->his_range[i++]));
+
+ /* Increment interval counter. */
+ (lp->his_sum[i])++;
+}
+#endif /* HISTOGRAM */
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get protocol name
+ */
+static int wavelan_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ strcpy(wrqu->name, "WaveLAN");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set NWID
+ */
+static int wavelan_set_nwid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ mm_t m;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Set NWID in WaveLAN. */
+ if (!wrqu->nwid.disabled) {
+ /* Set NWID in psa */
+ psa.psa_nwid[0] = (wrqu->nwid.value & 0xFF00) >> 8;
+ psa.psa_nwid[1] = wrqu->nwid.value & 0xFF;
+ psa.psa_nwid_select = 0x01;
+ psa_write(ioaddr, lp->hacr,
+ (char *) psa.psa_nwid - (char *) &psa,
+ (unsigned char *) psa.psa_nwid, 3);
+
+ /* Set NWID in mmc. */
+ m.w.mmw_netw_id_l = psa.psa_nwid[1];
+ m.w.mmw_netw_id_h = psa.psa_nwid[0];
+ mmc_write(ioaddr,
+ (char *) &m.w.mmw_netw_id_l -
+ (char *) &m,
+ (unsigned char *) &m.w.mmw_netw_id_l, 2);
+ mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel), 0x00);
+ } else {
+ /* Disable NWID in the psa. */
+ psa.psa_nwid_select = 0x00;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_nwid_select -
+ (char *) &psa,
+ (unsigned char *) &psa.psa_nwid_select,
+ 1);
+
+ /* Disable NWID in the mmc (no filtering). */
+ mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel),
+ MMW_LOOPT_SEL_DIS_NWID);
+ }
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev, ioaddr, lp->hacr);
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get NWID
+ */
+static int wavelan_get_nwid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Read the NWID. */
+ psa_read(ioaddr, lp->hacr,
+ (char *) psa.psa_nwid - (char *) &psa,
+ (unsigned char *) psa.psa_nwid, 3);
+ wrqu->nwid.value = (psa.psa_nwid[0] << 8) + psa.psa_nwid[1];
+ wrqu->nwid.disabled = !(psa.psa_nwid_select);
+ wrqu->nwid.fixed = 1; /* Superfluous */
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set frequency
+ */
+static int wavelan_set_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ unsigned long flags;
+ int ret;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
+ if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ ret = wv_set_frequency(ioaddr, &(wrqu->freq));
+ else
+ ret = -EOPNOTSUPP;
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get frequency
+ */
+static int wavelan_get_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable).
+ * Does it work for everybody, especially old cards? */
+ if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
+ unsigned short freq;
+
+ /* Ask the EEPROM to read the frequency from the first area. */
+ fee_read(ioaddr, 0x00, &freq, 1);
+ wrqu->freq.m = ((freq >> 5) * 5 + 24000L) * 10000;
+ wrqu->freq.e = 1;
+ } else {
+ psa_read(ioaddr, lp->hacr,
+ (char *) &psa.psa_subband - (char *) &psa,
+ (unsigned char *) &psa.psa_subband, 1);
+
+ if (psa.psa_subband <= 4) {
+ wrqu->freq.m = fixed_bands[psa.psa_subband];
+ wrqu->freq.e = (psa.psa_subband != 0);
+ } else
+ ret = -EOPNOTSUPP;
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set level threshold
+ */
+static int wavelan_set_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Set the level threshold. */
+ /* We should complain loudly if wrqu->sens.fixed = 0, because we
+ * can't set auto mode... */
+ psa.psa_thr_pre_set = wrqu->sens.value & 0x3F;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_thr_pre_set - (char *) &psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev, ioaddr, lp->hacr);
+ mmc_out(ioaddr, mmwoff(0, mmw_thr_pre_set),
+ psa.psa_thr_pre_set);
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get level threshold
+ */
+static int wavelan_get_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Read the level threshold. */
+ psa_read(ioaddr, lp->hacr,
+ (char *) &psa.psa_thr_pre_set - (char *) &psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ wrqu->sens.value = psa.psa_thr_pre_set & 0x3F;
+ wrqu->sens.fixed = 1;
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set encryption key
+ */
+static int wavelan_set_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ unsigned long flags;
+ psa_t psa;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Check if capable of encryption */
+ if (!mmc_encr(ioaddr)) {
+ ret = -EOPNOTSUPP;
+ }
+
+ /* Check the size of the key */
+ if((wrqu->encoding.length != 8) && (wrqu->encoding.length != 0)) {
+ ret = -EINVAL;
+ }
+
+ if(!ret) {
+ /* Basic checking... */
+ if (wrqu->encoding.length == 8) {
+ /* Copy the key in the driver */
+ memcpy(psa.psa_encryption_key, extra,
+ wrqu->encoding.length);
+ psa.psa_encryption_select = 1;
+
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select -
+ (char *) &psa,
+ (unsigned char *) &psa.
+ psa_encryption_select, 8 + 1);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_encr_enable),
+ MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE);
+ mmc_write(ioaddr, mmwoff(0, mmw_encr_key),
+ (unsigned char *) &psa.
+ psa_encryption_key, 8);
+ }
+
+ /* disable encryption */
+ if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
+ psa.psa_encryption_select = 0;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select -
+ (char *) &psa,
+ (unsigned char *) &psa.
+ psa_encryption_select, 1);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_encr_enable), 0);
+ }
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev, ioaddr, lp->hacr);
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get encryption key
+ */
+static int wavelan_get_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Check if encryption is available */
+ if (!mmc_encr(ioaddr)) {
+ ret = -EOPNOTSUPP;
+ } else {
+ /* Read the encryption key */
+ psa_read(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select -
+ (char *) &psa,
+ (unsigned char *) &psa.
+ psa_encryption_select, 1 + 8);
+
+ /* encryption is enabled ? */
+ if (psa.psa_encryption_select)
+ wrqu->encoding.flags = IW_ENCODE_ENABLED;
+ else
+ wrqu->encoding.flags = IW_ENCODE_DISABLED;
+ wrqu->encoding.flags |= mmc_encr(ioaddr);
+
+ /* Copy the key to the user buffer */
+ wrqu->encoding.length = 8;
+ memcpy(extra, psa.psa_encryption_key, wrqu->encoding.length);
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get range info
+ */
+static int wavelan_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ struct iw_range *range = (struct iw_range *) extra;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Set the length (very important for backward compatibility) */
+ wrqu->data.length = sizeof(struct iw_range);
+
+ /* Set all the info we don't care or don't know about to zero */
+ memset(range, 0, sizeof(struct iw_range));
+
+ /* Set the Wireless Extension versions */
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 9;
+
+ /* Set information in the range struct. */
+ range->throughput = 1.6 * 1000 * 1000; /* don't argue on this ! */
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0xFFFF;
+
+ range->sensitivity = 0x3F;
+ range->max_qual.qual = MMR_SGNL_QUAL;
+ range->max_qual.level = MMR_SIGNAL_LVL;
+ range->max_qual.noise = MMR_SILENCE_LVL;
+ range->avg_qual.qual = MMR_SGNL_QUAL; /* Always max */
+ /* Need to get better values for those two */
+ range->avg_qual.level = 30;
+ range->avg_qual.noise = 8;
+
+ range->num_bitrates = 1;
+ range->bitrate[0] = 2000000; /* 2 Mb/s */
+
+ /* Event capability (kernel + driver) */
+ range->event_capa[0] = (IW_EVENT_CAPA_MASK(0x8B02) |
+ IW_EVENT_CAPA_MASK(0x8B04));
+ range->event_capa[1] = IW_EVENT_CAPA_K_1;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
+ if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
+ range->num_channels = 10;
+ range->num_frequency = wv_frequency_list(ioaddr, range->freq,
+ IW_MAX_FREQUENCIES);
+ } else
+ range->num_channels = range->num_frequency = 0;
+
+ /* Encryption supported ? */
+ if (mmc_encr(ioaddr)) {
+ range->encoding_size[0] = 8; /* DES = 64 bits key */
+ range->num_encoding_sizes = 1;
+ range->max_encoding_tokens = 1; /* Only one key possible */
+ } else {
+ range->num_encoding_sizes = 0;
+ range->max_encoding_tokens = 0;
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : set quality threshold
+ */
+static int wavelan_set_qthr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ unsigned long flags;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ psa.psa_quality_thr = *(extra) & 0x0F;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_quality_thr - (char *) &psa,
+ (unsigned char *) &psa.psa_quality_thr, 1);
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev, ioaddr, lp->hacr);
+ mmc_out(ioaddr, mmwoff(0, mmw_quality_thr),
+ psa.psa_quality_thr);
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : get quality threshold
+ */
+static int wavelan_get_qthr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+ psa_t psa;
+ unsigned long flags;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ psa_read(ioaddr, lp->hacr,
+ (char *) &psa.psa_quality_thr - (char *) &psa,
+ (unsigned char *) &psa.psa_quality_thr, 1);
+ *(extra) = psa.psa_quality_thr & 0x0F;
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return 0;
+}
+
+#ifdef HISTOGRAM
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : set histogram
+ */
+static int wavelan_set_histo(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+
+ /* Check the number of intervals. */
+ if (wrqu->data.length > 16) {
+ return(-E2BIG);
+ }
+
+ /* Disable histo while we copy the addresses.
+ * As we don't disable interrupts, we need to do this */
+ lp->his_number = 0;
+
+ /* Are there ranges to copy? */
+ if (wrqu->data.length > 0) {
+ /* Copy interval ranges to the driver */
+ memcpy(lp->his_range, extra, wrqu->data.length);
+
+ {
+ int i;
+ printk(KERN_DEBUG "Histo :");
+ for(i = 0; i < wrqu->data.length; i++)
+ printk(" %d", lp->his_range[i]);
+ printk("\n");
+ }
+
+ /* Reset result structure. */
+ memset(lp->his_sum, 0x00, sizeof(long) * 16);
+ }
+
+ /* Now we can set the number of ranges */
+ lp->his_number = wrqu->data.length;
+
+ return(0);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : get histogram
+ */
+static int wavelan_get_histo(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = (net_local *) dev->priv; /* lp is not unused */
+
+ /* Set the number of intervals. */
+ wrqu->data.length = lp->his_number;
+
+ /* Give back the distribution statistics */
+ if(lp->his_number > 0)
+ memcpy(extra, lp->his_sum, sizeof(long) * lp->his_number);
+
+ return(0);
+}
+#endif /* HISTOGRAM */
+
+/*------------------------------------------------------------------*/
+/*
+ * Structures to export the Wireless Handlers
+ */
+
+static const iw_handler wavelan_handler[] =
+{
+ NULL, /* SIOCSIWNAME */
+ wavelan_get_name, /* SIOCGIWNAME */
+ wavelan_set_nwid, /* SIOCSIWNWID */
+ wavelan_get_nwid, /* SIOCGIWNWID */
+ wavelan_set_freq, /* SIOCSIWFREQ */
+ wavelan_get_freq, /* SIOCGIWFREQ */
+ NULL, /* SIOCSIWMODE */
+ NULL, /* SIOCGIWMODE */
+ wavelan_set_sens, /* SIOCSIWSENS */
+ wavelan_get_sens, /* SIOCGIWSENS */
+ NULL, /* SIOCSIWRANGE */
+ wavelan_get_range, /* SIOCGIWRANGE */
+ NULL, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ iw_handler_set_spy, /* SIOCSIWSPY */
+ iw_handler_get_spy, /* SIOCGIWSPY */
+ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
+ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
+ NULL, /* SIOCSIWAP */
+ NULL, /* SIOCGIWAP */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCGIWAPLIST */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCSIWESSID */
+ NULL, /* SIOCGIWESSID */
+ NULL, /* SIOCSIWNICKN */
+ NULL, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCSIWRATE */
+ NULL, /* SIOCGIWRATE */
+ NULL, /* SIOCSIWRTS */
+ NULL, /* SIOCGIWRTS */
+ NULL, /* SIOCSIWFRAG */
+ NULL, /* SIOCGIWFRAG */
+ NULL, /* SIOCSIWTXPOW */
+ NULL, /* SIOCGIWTXPOW */
+ NULL, /* SIOCSIWRETRY */
+ NULL, /* SIOCGIWRETRY */
+ /* Bummer ! Why those are only at the end ??? */
+ wavelan_set_encode, /* SIOCSIWENCODE */
+ wavelan_get_encode, /* SIOCGIWENCODE */
+};
+
+static const iw_handler wavelan_private_handler[] =
+{
+ wavelan_set_qthr, /* SIOCIWFIRSTPRIV */
+ wavelan_get_qthr, /* SIOCIWFIRSTPRIV + 1 */
+#ifdef HISTOGRAM
+ wavelan_set_histo, /* SIOCIWFIRSTPRIV + 2 */
+ wavelan_get_histo, /* SIOCIWFIRSTPRIV + 3 */
+#endif /* HISTOGRAM */
+};
+
+static const struct iw_priv_args wavelan_private_args[] = {
+/*{ cmd, set_args, get_args, name } */
+ { SIOCSIPQTHR, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setqualthr" },
+ { SIOCGIPQTHR, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getqualthr" },
+ { SIOCSIPHISTO, IW_PRIV_TYPE_BYTE | 16, 0, "sethisto" },
+ { SIOCGIPHISTO, 0, IW_PRIV_TYPE_INT | 16, "gethisto" },
+};
+
+static const struct iw_handler_def wavelan_handler_def =
+{
+ .num_standard = sizeof(wavelan_handler)/sizeof(iw_handler),
+ .num_private = sizeof(wavelan_private_handler)/sizeof(iw_handler),
+ .num_private_args = sizeof(wavelan_private_args)/sizeof(struct iw_priv_args),
+ .standard = wavelan_handler,
+ .private = wavelan_private_handler,
+ .private_args = wavelan_private_args,
+ .get_wireless_stats = wavelan_get_wireless_stats,
+};
+
+/*------------------------------------------------------------------*/
+/*
+ * Get wireless statistics.
+ * Called by /proc/net/wireless
+ */
+static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv;
+ mmr_t m;
+ iw_stats *wstats;
+ unsigned long flags;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_get_wireless_stats()\n",
+ dev->name);
+#endif
+
+ /* Check */
+ if (lp == (net_local *) NULL)
+ return (iw_stats *) NULL;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ wstats = &lp->wstats;
+
+ /* Get data from the mmc. */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+
+ mmc_read(ioaddr, mmroff(0, mmr_dce_status), &m.mmr_dce_status, 1);
+ mmc_read(ioaddr, mmroff(0, mmr_wrong_nwid_l), &m.mmr_wrong_nwid_l,
+ 2);
+ mmc_read(ioaddr, mmroff(0, mmr_thr_pre_set), &m.mmr_thr_pre_set,
+ 4);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+ /* Copy data to wireless stuff. */
+ wstats->status = m.mmr_dce_status & MMR_DCE_STATUS;
+ wstats->qual.qual = m.mmr_sgnl_qual & MMR_SGNL_QUAL;
+ wstats->qual.level = m.mmr_signal_lvl & MMR_SIGNAL_LVL;
+ wstats->qual.noise = m.mmr_silence_lvl & MMR_SILENCE_LVL;
+ wstats->qual.updated = (((m. mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 7)
+ | ((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 6)
+ | ((m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) >> 5));
+ wstats->discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+ wstats->discard.code = 0L;
+ wstats->discard.misc = 0L;
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_get_wireless_stats()\n",
+ dev->name);
+#endif
+ return &lp->wstats;
+}
+#endif /* WIRELESS_EXT */
+
+/************************* PACKET RECEPTION *************************/
+/*
+ * This part deals with receiving the packets.
+ * The interrupt handler gets an interrupt when a packet has been
+ * successfully received and calls this part.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does the actual copying of data (including the Ethernet
+ * header structure) from the WaveLAN card to an sk_buff chain that
+ * will be passed up to the network interface layer. NOTE: we
+ * currently don't handle trailer protocols (neither does the rest of
+ * the network interface), so if that is needed, it will (at least in
+ * part) be added here. The contents of the receive ring buffer are
+ * copied to a message chain that is then passed to the kernel.
+ *
+ * Note: if any errors occur, the packet is "dropped on the floor".
+ * (called by wv_packet_rcv())
+ */
+static inline void
+wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ struct sk_buff *skb;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_read(0x%X, %d)\n",
+ dev->name, buf_off, sksize);
+#endif
+
+ /* Allocate buffer for the data */
+ if ((skb = dev_alloc_skb(sksize)) == (struct sk_buff *) NULL) {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO
+ "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n",
+ dev->name, sksize);
+#endif
+ lp->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = dev;
+
+ /* Copy the packet to the buffer. */
+ obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize);
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef DEBUG_RX_INFO
+ wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read");
+#endif /* DEBUG_RX_INFO */
+
+ /* Statistics-gathering and associated stuff.
+ * It seem a bit messy with all the define, but it's really
+ * simple... */
+ if (
+#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
+ (lp->spy_data.spy_number > 0) ||
+#endif /* IW_WIRELESS_SPY */
+#ifdef HISTOGRAM
+ (lp->his_number > 0) ||
+#endif /* HISTOGRAM */
+ 0) {
+ u8 stats[3]; /* signal level, noise level, signal quality */
+
+ /* Read signal level, silence level and signal quality bytes */
+ /* Note: in the PCMCIA hardware, these are part of the frame.
+ * It seems that for the ISA hardware, it's nowhere to be
+ * found in the frame, so I'm obliged to do this (it has a
+ * side effect on /proc/net/wireless).
+ * Any ideas?
+ */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+ mmc_read(ioaddr, mmroff(0, mmr_signal_lvl), stats, 3);
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+#ifdef DEBUG_RX_INFO
+ printk(KERN_DEBUG
+ "%s: wv_packet_read(): Signal level %d/63, Silence level %d/63, signal quality %d/16\n",
+ dev->name, stats[0] & 0x3F, stats[1] & 0x3F,
+ stats[2] & 0x0F);
+#endif
+
+ /* Spying stuff */
+#ifdef IW_WIRELESS_SPY
+ wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE,
+ stats);
+#endif /* IW_WIRELESS_SPY */
+#ifdef HISTOGRAM
+ wl_his_gather(dev, stats);
+#endif /* HISTOGRAM */
+ }
+
+ /*
+ * Hand the packet to the network module.
+ */
+ netif_rx(skb);
+
+ /* Keep statistics up to date */
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += sksize;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Transfer as many packets as we can
+ * from the device RAM.
+ * (called in wavelan_interrupt()).
+ * Note : the spinlock is already grabbed for us.
+ */
+static inline void wv_receive(struct net_device * dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv;
+ fd_t fd;
+ rbd_t rbd;
+ int nreaped = 0;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_receive()\n", dev->name);
+#endif
+
+ /* Loop on each received packet. */
+ for (;;) {
+ obram_read(ioaddr, lp->rx_head, (unsigned char *) &fd,
+ sizeof(fd));
+
+ /* Note about the status :
+ * It start up to be 0 (the value we set). Then, when the RU
+ * grab the buffer to prepare for reception, it sets the
+ * FD_STATUS_B flag. When the RU has finished receiving the
+ * frame, it clears FD_STATUS_B, set FD_STATUS_C to indicate
+ * completion and set the other flags to indicate the eventual
+ * errors. FD_STATUS_OK indicates that the reception was OK.
+ */
+
+ /* If the current frame is not complete, we have reached the end. */
+ if ((fd.fd_status & FD_STATUS_C) != FD_STATUS_C)
+ break; /* This is how we exit the loop. */
+
+ nreaped++;
+
+ /* Check whether frame was correctly received. */
+ if ((fd.fd_status & FD_STATUS_OK) == FD_STATUS_OK) {
+ /* Does the frame contain a pointer to the data? Let's check. */
+ if (fd.fd_rbd_offset != I82586NULL) {
+ /* Read the receive buffer descriptor */
+ obram_read(ioaddr, fd.fd_rbd_offset,
+ (unsigned char *) &rbd,
+ sizeof(rbd));
+
+#ifdef DEBUG_RX_ERROR
+ if ((rbd.rbd_status & RBD_STATUS_EOF) !=
+ RBD_STATUS_EOF) printk(KERN_INFO
+ "%s: wv_receive(): missing EOF flag.\n",
+ dev->name);
+
+ if ((rbd.rbd_status & RBD_STATUS_F) !=
+ RBD_STATUS_F) printk(KERN_INFO
+ "%s: wv_receive(): missing F flag.\n",
+ dev->name);
+#endif /* DEBUG_RX_ERROR */
+
+ /* Read the packet and transmit to Linux */
+ wv_packet_read(dev, rbd.rbd_bufl,
+ rbd.
+ rbd_status &
+ RBD_STATUS_ACNT);
+ }
+#ifdef DEBUG_RX_ERROR
+ else /* if frame has no data */
+ printk(KERN_INFO
+ "%s: wv_receive(): frame has no data.\n",
+ dev->name);
+#endif
+ } else { /* If reception was no successful */
+
+ lp->stats.rx_errors++;
+
+#ifdef DEBUG_RX_INFO
+ printk(KERN_DEBUG
+ "%s: wv_receive(): frame not received successfully (%X).\n",
+ dev->name, fd.fd_status);
+#endif
+
+#ifdef DEBUG_RX_ERROR
+ if ((fd.fd_status & FD_STATUS_S6) != 0)
+ printk(KERN_INFO
+ "%s: wv_receive(): no EOF flag.\n",
+ dev->name);
+#endif
+
+ if ((fd.fd_status & FD_STATUS_S7) != 0) {
+ lp->stats.rx_length_errors++;
+#ifdef DEBUG_RX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_receive(): frame too short.\n",
+ dev->name);
+#endif
+ }
+
+ if ((fd.fd_status & FD_STATUS_S8) != 0) {
+ lp->stats.rx_over_errors++;
+#ifdef DEBUG_RX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_receive(): rx DMA overrun.\n",
+ dev->name);
+#endif
+ }
+
+ if ((fd.fd_status & FD_STATUS_S9) != 0) {
+ lp->stats.rx_fifo_errors++;
+#ifdef DEBUG_RX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_receive(): ran out of resources.\n",
+ dev->name);
+#endif
+ }
+
+ if ((fd.fd_status & FD_STATUS_S10) != 0) {
+ lp->stats.rx_frame_errors++;
+#ifdef DEBUG_RX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_receive(): alignment error.\n",
+ dev->name);
+#endif
+ }
+
+ if ((fd.fd_status & FD_STATUS_S11) != 0) {
+ lp->stats.rx_crc_errors++;
+#ifdef DEBUG_RX_FAIL
+ printk(KERN_DEBUG
+ "%s: wv_receive(): CRC error.\n",
+ dev->name);
+#endif
+ }
+ }
+
+ fd.fd_status = 0;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_status),
+ (unsigned char *) &fd.fd_status,
+ sizeof(fd.fd_status));
+
+ fd.fd_command = FD_COMMAND_EL;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_command),
+ (unsigned char *) &fd.fd_command,
+ sizeof(fd.fd_command));
+
+ fd.fd_command = 0;
+ obram_write(ioaddr, fdoff(lp->rx_last, fd_command),
+ (unsigned char *) &fd.fd_command,
+ sizeof(fd.fd_command));
+
+ lp->rx_last = lp->rx_head;
+ lp->rx_head = fd.fd_link_offset;
+ } /* for(;;) -> loop on all frames */
+
+#ifdef DEBUG_RX_INFO
+ if (nreaped > 1)
+ printk(KERN_DEBUG "%s: wv_receive(): reaped %d\n",
+ dev->name, nreaped);
+#endif
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_receive()\n", dev->name);
+#endif
+}
+
+/*********************** PACKET TRANSMISSION ***********************/
+/*
+ * This part deals with sending packets through the WaveLAN.
+ *
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine fills in the appropriate registers and memory
+ * locations on the WaveLAN card and starts the card off on
+ * the transmit.
+ *
+ * The principle:
+ * Each block contains a transmit command, a NOP command,
+ * a transmit block descriptor and a buffer.
+ * The CU read the transmit block which point to the tbd,
+ * read the tbd and the content of the buffer.
+ * When it has finish with it, it goes to the next command
+ * which in our case is the NOP. The NOP points on itself,
+ * so the CU stop here.
+ * When we add the next block, we modify the previous nop
+ * to make it point on the new tx command.
+ * Simple, isn't it ?
+ *
+ * (called in wavelan_packet_xmit())
+ */
+static inline int wv_packet_write(struct net_device * dev, void *buf, short length)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ unsigned short txblock;
+ unsigned short txpred;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ int clen = length;
+ unsigned long flags;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_write(%d)\n", dev->name,
+ length);
+#endif
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Check nothing bad has happened */
+ if (lp->tx_n_in_use == (NTXBLOCKS - 1)) {
+#ifdef DEBUG_TX_ERROR
+ printk(KERN_INFO "%s: wv_packet_write(): Tx queue full.\n",
+ dev->name);
+#endif
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+ return 1;
+ }
+
+ /* Calculate addresses of next block and previous block. */
+ txblock = lp->tx_first_free;
+ txpred = txblock - TXBLOCKZ;
+ if (txpred < OFFSET_CU)
+ txpred += NTXBLOCKS * TXBLOCKZ;
+ lp->tx_first_free += TXBLOCKZ;
+ if (lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
+
+ lp->tx_n_in_use++;
+
+ /* Calculate addresses of the different parts of the block. */
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ /*
+ * Transmit command
+ */
+ tx.tx_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
+ (unsigned char *) &tx.tx_h.ac_status,
+ sizeof(tx.tx_h.ac_status));
+
+ /*
+ * NOP command
+ */
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /*
+ * Transmit buffer descriptor
+ */
+ tbd.tbd_status = TBD_STATUS_EOF | (TBD_STATUS_ACNT & clen);
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *) &tbd, sizeof(tbd));
+
+ /*
+ * Data
+ */
+ obram_write(ioaddr, buf_addr, buf, length);
+
+ /*
+ * Overwrite the predecessor NOP link
+ * so that it points to this txblock.
+ */
+ nop_addr = txpred + sizeof(tx);
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = txblock;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* Make sure the watchdog will keep quiet for a while */
+ dev->trans_start = jiffies;
+
+ /* Keep stats up to date. */
+ lp->stats.tx_bytes += length;
+
+ if (lp->tx_first_in_use == I82586NULL)
+ lp->tx_first_in_use = txblock;
+
+ if (lp->tx_n_in_use < NTXBLOCKS - 1)
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_TX_INFO
+ wv_packet_info((u8 *) buf, length, dev->name,
+ "wv_packet_write");
+#endif /* DEBUG_TX_INFO */
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_write()\n", dev->name);
+#endif
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called when we want to send a packet (NET3 callback)
+ * In this routine, we check if the harware is ready to accept
+ * the packet. We also prevent reentrance. Then we call the function
+ * to send the packet.
+ */
+static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long flags;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_packet_xmit(0x%X)\n", dev->name,
+ (unsigned) skb);
+#endif
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ * In other words, prevent reentering this routine.
+ */
+ netif_stop_queue(dev);
+
+ /* If somebody has asked to reconfigure the controller,
+ * we can do it now.
+ */
+ if (lp->reconfig_82586) {
+ spin_lock_irqsave(&lp->spinlock, flags);
+ wv_82586_config(dev);
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+ /* Check that we can continue */
+ if (lp->tx_n_in_use == (NTXBLOCKS - 1))
+ return 1;
+ }
+#ifdef DEBUG_TX_ERROR
+ if (skb->next)
+ printk(KERN_INFO "skb has next\n");
+#endif
+
+ /* Do we need some padding? */
+ /* Note : on wireless the propagation time is in the order of 1us,
+ * and we don't have the Ethernet specific requirement of beeing
+ * able to detect collisions, therefore in theory we don't really
+ * need to pad. Jean II */
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ }
+
+ /* Write packet on the card */
+ if(wv_packet_write(dev, skb->data, skb->len))
+ return 1; /* We failed */
+
+ dev_kfree_skb(skb);
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*********************** HARDWARE CONFIGURATION ***********************/
+/*
+ * This part does the real job of starting and configuring the hardware.
+ */
+
+/*--------------------------------------------------------------------*/
+/*
+ * Routine to initialize the Modem Management Controller.
+ * (called by wv_hw_reset())
+ */
+static inline int wv_mmc_init(struct net_device * dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ net_local *lp = (net_local *) dev->priv;
+ psa_t psa;
+ mmw_t m;
+ int configured;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_mmc_init()\n", dev->name);
+#endif
+
+ /* Read the parameter storage area. */
+ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
+
+#ifdef USE_PSA_CONFIG
+ configured = psa.psa_conf_status & 1;
+#else
+ configured = 0;
+#endif
+
+ /* Is the PSA is not configured */
+ if (!configured) {
+ /* User will be able to configure NWID later (with iwconfig). */
+ psa.psa_nwid[0] = 0;
+ psa.psa_nwid[1] = 0;
+
+ /* no NWID checking since NWID is not set */
+ psa.psa_nwid_select = 0;
+
+ /* Disable encryption */
+ psa.psa_encryption_select = 0;
+
+ /* Set to standard values:
+ * 0x04 for AT,
+ * 0x01 for MCA,
+ * 0x04 for PCMCIA and 2.00 card (AT&T 407-024689/E document)
+ */
+ if (psa.psa_comp_number & 1)
+ psa.psa_thr_pre_set = 0x01;
+ else
+ psa.psa_thr_pre_set = 0x04;
+ psa.psa_quality_thr = 0x03;
+
+ /* It is configured */
+ psa.psa_conf_status |= 1;
+
+#ifdef USE_PSA_CONFIG
+ /* Write the psa. */
+ psa_write(ioaddr, lp->hacr,
+ (char *) psa.psa_nwid - (char *) &psa,
+ (unsigned char *) psa.psa_nwid, 4);
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_thr_pre_set - (char *) &psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_quality_thr - (char *) &psa,
+ (unsigned char *) &psa.psa_quality_thr, 1);
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_conf_status - (char *) &psa,
+ (unsigned char *) &psa.psa_conf_status, 1);
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev, ioaddr, lp->hacr);
+#endif
+ }
+
+ /* Zero the mmc structure. */
+ memset(&m, 0x00, sizeof(m));
+
+ /* Copy PSA info to the mmc. */
+ m.mmw_netw_id_l = psa.psa_nwid[1];
+ m.mmw_netw_id_h = psa.psa_nwid[0];
+
+ if (psa.psa_nwid_select & 1)
+ m.mmw_loopt_sel = 0x00;
+ else
+ m.mmw_loopt_sel = MMW_LOOPT_SEL_DIS_NWID;
+
+ memcpy(&m.mmw_encr_key, &psa.psa_encryption_key,
+ sizeof(m.mmw_encr_key));
+
+ if (psa.psa_encryption_select)
+ m.mmw_encr_enable =
+ MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE;
+ else
+ m.mmw_encr_enable = 0;
+
+ m.mmw_thr_pre_set = psa.psa_thr_pre_set & 0x3F;
+ m.mmw_quality_thr = psa.psa_quality_thr & 0x0F;
+
+ /*
+ * Set default modem control parameters.
+ * See NCR document 407-0024326 Rev. A.
+ */
+ m.mmw_jabber_enable = 0x01;
+ m.mmw_freeze = 0;
+ m.mmw_anten_sel = MMW_ANTEN_SEL_ALG_EN;
+ m.mmw_ifs = 0x20;
+ m.mmw_mod_delay = 0x04;
+ m.mmw_jam_time = 0x38;
+
+ m.mmw_des_io_invert = 0;
+ m.mmw_decay_prm = 0;
+ m.mmw_decay_updat_prm = 0;
+
+ /* Write all info to MMC. */
+ mmc_write(ioaddr, 0, (u8 *) & m, sizeof(m));
+
+ /* The following code starts the modem of the 2.00 frequency
+ * selectable cards at power on. It's not strictly needed for the
+ * following boots.
+ * The original patch was by Joe Finney for the PCMCIA driver, but
+ * I've cleaned it up a bit and added documentation.
+ * Thanks to Loeke Brederveld from Lucent for the info.
+ */
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
+ * Does it work for everybody, especially old cards? */
+ /* Note: WFREQSEL verifies that it is able to read a sensible
+ * frequency from EEPROM (address 0x00) and that MMR_FEE_STATUS_ID
+ * is 0xA (Xilinx version) or 0xB (Ariadne version).
+ * My test is more crude but does work. */
+ if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
+ /* We must download the frequency parameters to the
+ * synthesizers (from the EEPROM - area 1)
+ * Note: as the EEPROM is automatically decremented, we set the end
+ * if the area... */
+ m.mmw_fee_addr = 0x0F;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(ioaddr, (char *) &m.mmw_fee_ctrl - (char *) &m,
+ (unsigned char *) &m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished. */
+ fee_wait(ioaddr, 100, 100);
+
+#ifdef DEBUG_CONFIG_INFO
+ /* The frequency was in the last word downloaded. */
+ mmc_read(ioaddr, (char *) &m.mmw_fee_data_l - (char *) &m,
+ (unsigned char *) &m.mmw_fee_data_l, 2);
+
+ /* Print some info for the user. */
+ printk(KERN_DEBUG
+ "%s: WaveLAN 2.00 recognised (frequency select). Current frequency = %ld\n",
+ dev->name,
+ ((m.
+ mmw_fee_data_h << 4) | (m.mmw_fee_data_l >> 4)) *
+ 5 / 2 + 24000L);
+#endif
+
+ /* We must now download the power adjust value (gain) to
+ * the synthesizers (from the EEPROM - area 7 - DAC). */
+ m.mmw_fee_addr = 0x61;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(ioaddr, (char *) &m.mmw_fee_ctrl - (char *) &m,
+ (unsigned char *) &m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished. */
+ }
+ /* if 2.00 card */
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_mmc_init()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Construct the fd and rbd structures.
+ * Start the receive unit.
+ * (called by wv_hw_reset())
+ */
+static inline int wv_ru_start(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 scb_cs;
+ fd_t fd;
+ rbd_t rbd;
+ u16 rx;
+ u16 rx_next;
+ int i;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_ru_start()\n", dev->name);
+#endif
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if ((scb_cs & SCB_ST_RUS) == SCB_ST_RUS_RDY)
+ return 0;
+
+ lp->rx_head = OFFSET_RU;
+
+ for (i = 0, rx = lp->rx_head; i < NRXBLOCKS; i++, rx = rx_next) {
+ rx_next =
+ (i == NRXBLOCKS - 1) ? lp->rx_head : rx + RXBLOCKZ;
+
+ fd.fd_status = 0;
+ fd.fd_command = (i == NRXBLOCKS - 1) ? FD_COMMAND_EL : 0;
+ fd.fd_link_offset = rx_next;
+ fd.fd_rbd_offset = rx + sizeof(fd);
+ obram_write(ioaddr, rx, (unsigned char *) &fd, sizeof(fd));
+
+ rbd.rbd_status = 0;
+ rbd.rbd_next_rbd_offset = I82586NULL;
+ rbd.rbd_bufl = rx + sizeof(fd) + sizeof(rbd);
+ rbd.rbd_bufh = 0;
+ rbd.rbd_el_size = RBD_EL | (RBD_SIZE & MAXDATAZ);
+ obram_write(ioaddr, rx + sizeof(fd),
+ (unsigned char *) &rbd, sizeof(rbd));
+
+ lp->rx_last = rx;
+ }
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_rfa_offset),
+ (unsigned char *) &lp->rx_head, sizeof(lp->rx_head));
+
+ scb_cs = SCB_CMD_RUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--) {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+
+ if (i <= 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_ru_start(): board not accepting command.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_ru_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Initialise the transmit blocks.
+ * Start the command unit executing the NOP
+ * self-loop of the first transmit block.
+ *
+ * Here we create the list of send buffers used to transmit packets
+ * between the PC and the command unit. For each buffer, we create a
+ * buffer descriptor (pointing on the buffer), a transmit command
+ * (pointing to the buffer descriptor) and a NOP command.
+ * The transmit command is linked to the NOP, and the NOP to itself.
+ * When we will have finished executing the transmit command, we will
+ * then loop on the NOP. By releasing the NOP link to a new command,
+ * we may send another buffer.
+ *
+ * (called by wv_hw_reset())
+ */
+static inline int wv_cu_start(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+ u16 txblock;
+ u16 first_nop;
+ u16 scb_cs;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_cu_start()\n", dev->name);
+#endif
+
+ lp->tx_first_free = OFFSET_CU;
+ lp->tx_first_in_use = I82586NULL;
+
+ for (i = 0, txblock = OFFSET_CU;
+ i < NTXBLOCKS; i++, txblock += TXBLOCKZ) {
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ tx.tx_h.ac_status = 0;
+ tx.tx_h.ac_command = acmd_transmit | AC_CFLD_I;
+ tx.tx_h.ac_link = nop_addr;
+ tx.tx_tbd_offset = tbd_addr;
+ obram_write(ioaddr, tx_addr, (unsigned char *) &tx,
+ sizeof(tx));
+
+ nop.nop_h.ac_status = 0;
+ nop.nop_h.ac_command = acmd_nop;
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, nop_addr, (unsigned char *) &nop,
+ sizeof(nop));
+
+ tbd.tbd_status = TBD_STATUS_EOF;
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *) &tbd,
+ sizeof(tbd));
+ }
+
+ first_nop =
+ OFFSET_CU + (NTXBLOCKS - 1) * TXBLOCKZ + sizeof(ac_tx_t);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_cbl_offset),
+ (unsigned char *) &first_nop, sizeof(first_nop));
+
+ scb_cs = SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--) {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+
+ if (i <= 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_cu_start(): board not accepting command.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+ lp->tx_n_in_use = 0;
+ netif_start_queue(dev);
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_cu_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a standard configuration of the WaveLAN
+ * controller (i82586).
+ *
+ * It initialises the scp, iscp and scb structure
+ * The first two are just pointers to the next.
+ * The last one is used for basic configuration and for basic
+ * communication (interrupt status).
+ *
+ * (called by wv_hw_reset())
+ */
+static inline int wv_82586_start(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ scp_t scp; /* system configuration pointer */
+ iscp_t iscp; /* intermediate scp */
+ scb_t scb; /* system control block */
+ ach_t cb; /* Action command header */
+ u8 zeroes[512];
+ int i;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_start()\n", dev->name);
+#endif
+
+ /*
+ * Clear the onboard RAM.
+ */
+ memset(&zeroes[0], 0x00, sizeof(zeroes));
+ for (i = 0; i < I82586_MEMZ; i += sizeof(zeroes))
+ obram_write(ioaddr, i, &zeroes[0], sizeof(zeroes));
+
+ /*
+ * Construct the command unit structures:
+ * scp, iscp, scb, cb.
+ */
+ memset(&scp, 0x00, sizeof(scp));
+ scp.scp_sysbus = SCP_SY_16BBUS;
+ scp.scp_iscpl = OFFSET_ISCP;
+ obram_write(ioaddr, OFFSET_SCP, (unsigned char *) &scp,
+ sizeof(scp));
+
+ memset(&iscp, 0x00, sizeof(iscp));
+ iscp.iscp_busy = 1;
+ iscp.iscp_offset = OFFSET_SCB;
+ obram_write(ioaddr, OFFSET_ISCP, (unsigned char *) &iscp,
+ sizeof(iscp));
+
+ /* Our first command is to reset the i82586. */
+ memset(&scb, 0x00, sizeof(scb));
+ scb.scb_command = SCB_CMD_RESET;
+ scb.scb_cbl_offset = OFFSET_CU;
+ scb.scb_rfa_offset = OFFSET_RU;
+ obram_write(ioaddr, OFFSET_SCB, (unsigned char *) &scb,
+ sizeof(scb));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ /* Wait for command to finish. */
+ for (i = 1000; i > 0; i--) {
+ obram_read(ioaddr, OFFSET_ISCP, (unsigned char *) &iscp,
+ sizeof(iscp));
+
+ if (iscp.iscp_busy == (unsigned short) 0)
+ break;
+
+ udelay(10);
+ }
+
+ if (i <= 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO
+ "%s: wv_82586_start(): iscp_busy timeout.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+ /* Check command completion. */
+ for (i = 15; i > 0; i--) {
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *) &scb,
+ sizeof(scb));
+
+ if (scb.scb_status == (SCB_ST_CX | SCB_ST_CNA))
+ break;
+
+ udelay(10);
+ }
+
+ if (i <= 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO
+ "%s: wv_82586_start(): status: expected 0x%02x, got 0x%02x.\n",
+ dev->name, SCB_ST_CX | SCB_ST_CNA, scb.scb_status);
+#endif
+ return -1;
+ }
+
+ wv_ack(dev);
+
+ /* Set the action command header. */
+ memset(&cb, 0x00, sizeof(cb));
+ cb.ac_command = AC_CFLD_EL | (AC_CFLD_CMD & acmd_diagnose);
+ cb.ac_link = OFFSET_CU;
+ obram_write(ioaddr, OFFSET_CU, (unsigned char *) &cb, sizeof(cb));
+
+ if (wv_synchronous_cmd(dev, "diag()") == -1)
+ return -1;
+
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *) &cb, sizeof(cb));
+ if (cb.ac_status & AC_SFLD_FAIL) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO
+ "%s: wv_82586_start(): i82586 Self Test failed.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+#ifdef DEBUG_I82586_SHOW
+ wv_scb_show(ioaddr);
+#endif
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a standard configuration of the WaveLAN
+ * controller (i82586).
+ *
+ * This routine is a violent hack. We use the first free transmit block
+ * to make our configuration. In the buffer area, we create the three
+ * configuration commands (linked). We make the previous NOP point to
+ * the beginning of the buffer instead of the tx command. After, we go
+ * as usual to the NOP command.
+ * Note that only the last command (mc_set) will generate an interrupt.
+ *
+ * (called by wv_hw_reset(), wv_82586_reconfig(), wavelan_packet_xmit())
+ */
+static void wv_82586_config(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ unsigned short txblock;
+ unsigned short txpred;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short cfg_addr;
+ unsigned short ias_addr;
+ unsigned short mcs_addr;
+ ac_tx_t tx;
+ ac_nop_t nop;
+ ac_cfg_t cfg; /* Configure action */
+ ac_ias_t ias; /* IA-setup action */
+ ac_mcs_t mcs; /* Multicast setup */
+ struct dev_mc_list *dmi;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name);
+#endif
+
+ /* Check nothing bad has happened */
+ if (lp->tx_n_in_use == (NTXBLOCKS - 1)) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO "%s: wv_82586_config(): Tx queue full.\n",
+ dev->name);
+#endif
+ return;
+ }
+
+ /* Calculate addresses of next block and previous block. */
+ txblock = lp->tx_first_free;
+ txpred = txblock - TXBLOCKZ;
+ if (txpred < OFFSET_CU)
+ txpred += NTXBLOCKS * TXBLOCKZ;
+ lp->tx_first_free += TXBLOCKZ;
+ if (lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
+
+ lp->tx_n_in_use++;
+
+ /* Calculate addresses of the different parts of the block. */
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ cfg_addr = tbd_addr + sizeof(tbd_t); /* beginning of the buffer */
+ ias_addr = cfg_addr + sizeof(cfg);
+ mcs_addr = ias_addr + sizeof(ias);
+
+ /*
+ * Transmit command
+ */
+ tx.tx_h.ac_status = 0xFFFF; /* Fake completion value */
+ obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
+ (unsigned char *) &tx.tx_h.ac_status,
+ sizeof(tx.tx_h.ac_status));
+
+ /*
+ * NOP command
+ */
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* Create a configure action. */
+ memset(&cfg, 0x00, sizeof(cfg));
+
+ /*
+ * For Linux we invert AC_CFG_ALOC() so as to conform
+ * to the way that net packets reach us from above.
+ * (See also ac_tx_t.)
+ *
+ * Updated from Wavelan Manual WCIN085B
+ */
+ cfg.cfg_byte_cnt =
+ AC_CFG_BYTE_CNT(sizeof(ac_cfg_t) - sizeof(ach_t));
+ cfg.cfg_fifolim = AC_CFG_FIFOLIM(4);
+ cfg.cfg_byte8 = AC_CFG_SAV_BF(1) | AC_CFG_SRDY(0);
+ cfg.cfg_byte9 = AC_CFG_ELPBCK(0) |
+ AC_CFG_ILPBCK(0) |
+ AC_CFG_PRELEN(AC_CFG_PLEN_2) |
+ AC_CFG_ALOC(1) | AC_CFG_ADDRLEN(WAVELAN_ADDR_SIZE);
+ cfg.cfg_byte10 = AC_CFG_BOFMET(1) |
+ AC_CFG_ACR(6) | AC_CFG_LINPRIO(0);
+ cfg.cfg_ifs = 0x20;
+ cfg.cfg_slotl = 0x0C;
+ cfg.cfg_byte13 = AC_CFG_RETRYNUM(15) | AC_CFG_SLTTMHI(0);
+ cfg.cfg_byte14 = AC_CFG_FLGPAD(0) |
+ AC_CFG_BTSTF(0) |
+ AC_CFG_CRC16(0) |
+ AC_CFG_NCRC(0) |
+ AC_CFG_TNCRS(1) |
+ AC_CFG_MANCH(0) |
+ AC_CFG_BCDIS(0) | AC_CFG_PRM(lp->promiscuous);
+ cfg.cfg_byte15 = AC_CFG_ICDS(0) |
+ AC_CFG_CDTF(0) | AC_CFG_ICSS(0) | AC_CFG_CSTF(0);
+/*
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(64);
+*/
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(8);
+
+ cfg.cfg_h.ac_command = (AC_CFLD_CMD & acmd_configure);
+ cfg.cfg_h.ac_link = ias_addr;
+ obram_write(ioaddr, cfg_addr, (unsigned char *) &cfg, sizeof(cfg));
+
+ /* Set up the MAC address */
+ memset(&ias, 0x00, sizeof(ias));
+ ias.ias_h.ac_command = (AC_CFLD_CMD & acmd_ia_setup);
+ ias.ias_h.ac_link = mcs_addr;
+ memcpy(&ias.ias_addr[0], (unsigned char *) &dev->dev_addr[0],
+ sizeof(ias.ias_addr));
+ obram_write(ioaddr, ias_addr, (unsigned char *) &ias, sizeof(ias));
+
+ /* Initialize adapter's Ethernet multicast addresses */
+ memset(&mcs, 0x00, sizeof(mcs));
+ mcs.mcs_h.ac_command = AC_CFLD_I | (AC_CFLD_CMD & acmd_mc_setup);
+ mcs.mcs_h.ac_link = nop_addr;
+ mcs.mcs_cnt = WAVELAN_ADDR_SIZE * lp->mc_count;
+ obram_write(ioaddr, mcs_addr, (unsigned char *) &mcs, sizeof(mcs));
+
+ /* Any address to set? */
+ if (lp->mc_count) {
+ for (dmi = dev->mc_list; dmi; dmi = dmi->next)
+ outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr,
+ WAVELAN_ADDR_SIZE >> 1);
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG
+ "%s: wv_82586_config(): set %d multicast addresses:\n",
+ dev->name, lp->mc_count);
+ for (dmi = dev->mc_list; dmi; dmi = dmi->next)
+ printk(KERN_DEBUG
+ " %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dmi->dmi_addr[0], dmi->dmi_addr[1],
+ dmi->dmi_addr[2], dmi->dmi_addr[3],
+ dmi->dmi_addr[4], dmi->dmi_addr[5]);
+#endif
+ }
+
+ /*
+ * Overwrite the predecessor NOP link
+ * so that it points to the configure action.
+ */
+ nop_addr = txpred + sizeof(tx);
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = cfg_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* Job done, clear the flag */
+ lp->reconfig_82586 = 0;
+
+ if (lp->tx_first_in_use == I82586NULL)
+ lp->tx_first_in_use = txblock;
+
+ if (lp->tx_n_in_use == (NTXBLOCKS - 1))
+ netif_stop_queue(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_config()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine, called by wavelan_close(), gracefully stops the
+ * WaveLAN controller (i82586).
+ * (called by wavelan_close())
+ */
+static inline void wv_82586_stop(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 scb_cmd;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_stop()\n", dev->name);
+#endif
+
+ /* Suspend both command unit and receive unit. */
+ scb_cmd =
+ (SCB_CMD_CUC & SCB_CMD_CUC_SUS) | (SCB_CMD_RUC &
+ SCB_CMD_RUC_SUS);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cmd, sizeof(scb_cmd));
+ set_chan_attn(ioaddr, lp->hacr);
+
+ /* No more interrupts */
+ wv_ints_off(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_stop()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Totally reset the WaveLAN and restart it.
+ * Performs the following actions:
+ * 1. A power reset (reset DMA)
+ * 2. Initialize the radio modem (using wv_mmc_init)
+ * 3. Reset & Configure LAN controller (using wv_82586_start)
+ * 4. Start the LAN controller's command unit
+ * 5. Start the LAN controller's receive unit
+ * (called by wavelan_interrupt(), wavelan_watchdog() & wavelan_open())
+ */
+static int wv_hw_reset(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_hw_reset(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* Increase the number of resets done. */
+ lp->nresets++;
+
+ wv_hacr_reset(ioaddr);
+ lp->hacr = HACR_DEFAULT;
+
+ if ((wv_mmc_init(dev) < 0) || (wv_82586_start(dev) < 0))
+ return -1;
+
+ /* Enable the card to send interrupts. */
+ wv_ints_on(dev);
+
+ /* Start card functions */
+ if (wv_cu_start(dev) < 0)
+ return -1;
+
+ /* Setup the controller and parameters */
+ wv_82586_config(dev);
+
+ /* Finish configuration with the receive unit */
+ if (wv_ru_start(dev) < 0)
+ return -1;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_hw_reset()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Check if there is a WaveLAN at the specific base address.
+ * As a side effect, this reads the MAC address.
+ * (called in wavelan_probe() and init_module())
+ */
+static int wv_check_ioaddr(unsigned long ioaddr, u8 * mac)
+{
+ int i; /* Loop counter */
+
+ /* Check if the base address if available. */
+ if (!request_region(ioaddr, sizeof(ha_t), "wavelan probe"))
+ return -EBUSY; /* ioaddr already used */
+
+ /* Reset host interface */
+ wv_hacr_reset(ioaddr);
+
+ /* Read the MAC address from the parameter storage area. */
+ psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_univ_mac_addr),
+ mac, 6);
+
+ release_region(ioaddr, sizeof(ha_t));
+
+ /*
+ * Check the first three octets of the address for the manufacturer's code.
+ * Note: if this can't find your WaveLAN card, you've got a
+ * non-NCR/AT&T/Lucent ISA card. See wavelan.p.h for detail on
+ * how to configure your card.
+ */
+ for (i = 0; i < (sizeof(MAC_ADDRESSES) / sizeof(char) / 3); i++)
+ if ((mac[0] == MAC_ADDRESSES[i][0]) &&
+ (mac[1] == MAC_ADDRESSES[i][1]) &&
+ (mac[2] == MAC_ADDRESSES[i][2]))
+ return 0;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_WARNING
+ "WaveLAN (0x%3X): your MAC address might be %02X:%02X:%02X.\n",
+ ioaddr, mac[0], mac[1], mac[2]);
+#endif
+ return -ENODEV;
+}
+
+/************************ INTERRUPT HANDLING ************************/
+
+/*
+ * This function is the interrupt handler for the WaveLAN card. This
+ * routine will be called whenever:
+ */
+static irqreturn_t wavelan_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev;
+ unsigned long ioaddr;
+ net_local *lp;
+ u16 hasr;
+ u16 status;
+ u16 ack_cmd;
+
+ dev = dev_id;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name);
+#endif
+
+ lp = (net_local *) dev->priv;
+ ioaddr = dev->base_addr;
+
+#ifdef DEBUG_INTERRUPT_INFO
+ /* Check state of our spinlock */
+ if(spin_is_locked(&lp->spinlock))
+ printk(KERN_DEBUG
+ "%s: wavelan_interrupt(): spinlock is already locked !!!\n",
+ dev->name);
+#endif
+
+ /* Prevent reentrancy. We need to do that because we may have
+ * multiple interrupt handler running concurrently.
+ * It is safe because interrupts are disabled before acquiring
+ * the spinlock. */
+ spin_lock(&lp->spinlock);
+
+ /* We always had spurious interrupts at startup, but lately I
+ * saw them comming *between* the request_irq() and the
+ * spin_lock_irqsave() in wavelan_open(), so the spinlock
+ * protection is no enough.
+ * So, we also check lp->hacr that will tell us is we enabled
+ * irqs or not (see wv_ints_on()).
+ * We can't use netif_running(dev) because we depend on the
+ * proper processing of the irq generated during the config. */
+
+ /* Which interrupt it is ? */
+ hasr = hasr_read(ioaddr);
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_INFO
+ "%s: wavelan_interrupt(): hasr 0x%04x; hacr 0x%04x.\n",
+ dev->name, hasr, lp->hacr);
+#endif
+
+ /* Check modem interrupt */
+ if ((hasr & HASR_MMC_INTR) && (lp->hacr & HACR_MMC_INT_ENABLE)) {
+ u8 dce_status;
+
+ /*
+ * Interrupt from the modem management controller.
+ * This will clear it -- ignored for now.
+ */
+ mmc_read(ioaddr, mmroff(0, mmr_dce_status), &dce_status,
+ sizeof(dce_status));
+
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_interrupt(): unexpected mmc interrupt: status 0x%04x.\n",
+ dev->name, dce_status);
+#endif
+ }
+
+ /* Check if not controller interrupt */
+ if (((hasr & HASR_82586_INTR) == 0) ||
+ ((lp->hacr & HACR_82586_INT_ENABLE) == 0)) {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_interrupt(): interrupt not coming from i82586 - hasr 0x%04x.\n",
+ dev->name, hasr);
+#endif
+ spin_unlock (&lp->spinlock);
+ return IRQ_NONE;
+ }
+
+ /* Read interrupt data. */
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
+ (unsigned char *) &status, sizeof(status));
+
+ /*
+ * Acknowledge the interrupt(s).
+ */
+ ack_cmd = status & SCB_ST_INT;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &ack_cmd, sizeof(ack_cmd));
+ set_chan_attn(ioaddr, lp->hacr);
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_interrupt(): status 0x%04x.\n",
+ dev->name, status);
+#endif
+
+ /* Command completed. */
+ if ((status & SCB_ST_CX) == SCB_ST_CX) {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG
+ "%s: wavelan_interrupt(): command completed.\n",
+ dev->name);
+#endif
+ wv_complete(dev, ioaddr, lp);
+ }
+
+ /* Frame received. */
+ if ((status & SCB_ST_FR) == SCB_ST_FR) {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG
+ "%s: wavelan_interrupt(): received packet.\n",
+ dev->name);
+#endif
+ wv_receive(dev);
+ }
+
+ /* Check the state of the command unit. */
+ if (((status & SCB_ST_CNA) == SCB_ST_CNA) ||
+ (((status & SCB_ST_CUS) != SCB_ST_CUS_ACTV) &&
+ (netif_running(dev)))) {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_interrupt(): CU inactive -- restarting\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+ /* Check the state of the command unit. */
+ if (((status & SCB_ST_RNR) == SCB_ST_RNR) ||
+ (((status & SCB_ST_RUS) != SCB_ST_RUS_RDY) &&
+ (netif_running(dev)))) {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_interrupt(): RU not ready -- restarting\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+ /* Release spinlock */
+ spin_unlock (&lp->spinlock);
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_interrupt()\n", dev->name);
+#endif
+ return IRQ_HANDLED;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Watchdog: when we start a transmission, a timer is set for us in the
+ * kernel. If the transmission completes, this timer is disabled. If
+ * the timer expires, we are called and we try to unlock the hardware.
+ */
+static void wavelan_watchdog(struct net_device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ unsigned long flags;
+ unsigned int nreaped;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_watchdog()\n", dev->name);
+#endif
+
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_watchdog: watchdog timer expired\n",
+ dev->name);
+#endif
+
+ /* Check that we came here for something */
+ if (lp->tx_n_in_use <= 0) {
+ return;
+ }
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Try to see if some buffers are not free (in case we missed
+ * an interrupt */
+ nreaped = wv_complete(dev, ioaddr, lp);
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG
+ "%s: wavelan_watchdog(): %d reaped, %d remain.\n",
+ dev->name, nreaped, lp->tx_n_in_use);
+#endif
+
+#ifdef DEBUG_PSA_SHOW
+ {
+ psa_t psa;
+ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
+ wv_psa_show(&psa);
+ }
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_cu_show(dev);
+#endif
+
+ /* If no buffer has been freed */
+ if (nreaped == 0) {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_watchdog(): cleanup failed, trying reset\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+ /* At this point, we should have some free Tx buffer ;-) */
+ if (lp->tx_n_in_use < NTXBLOCKS - 1)
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_watchdog()\n", dev->name);
+#endif
+}
+
+/********************* CONFIGURATION CALLBACKS *********************/
+/*
+ * Here are the functions called by the Linux networking code (NET3)
+ * for initialization, configuration and deinstallations of the
+ * WaveLAN ISA hardware.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Configure and start up the WaveLAN PCMCIA adaptor.
+ * Called by NET3 when it "opens" the device.
+ */
+static int wavelan_open(struct net_device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ unsigned long flags;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* Check irq */
+ if (dev->irq == 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING "%s: wavelan_open(): no IRQ\n",
+ dev->name);
+#endif
+ return -ENXIO;
+ }
+
+ if (request_irq(dev->irq, &wavelan_interrupt, 0, "WaveLAN", dev) != 0)
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING "%s: wavelan_open(): invalid IRQ\n",
+ dev->name);
+#endif
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ if (wv_hw_reset(dev) != -1) {
+ netif_start_queue(dev);
+ } else {
+ free_irq(dev->irq, dev);
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_open(): impossible to start the card\n",
+ dev->name);
+#endif
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+ return -EAGAIN;
+ }
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_open()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Shut down the WaveLAN ISA card.
+ * Called by NET3 when it "closes" the device.
+ */
+static int wavelan_close(struct net_device * dev)
+{
+ net_local *lp = (net_local *) dev->priv;
+ unsigned long flags;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ netif_stop_queue(dev);
+
+ /*
+ * Flush the Tx and disable Rx.
+ */
+ spin_lock_irqsave(&lp->spinlock, flags);
+ wv_82586_stop(dev);
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ free_irq(dev->irq, dev);
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_close()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Probe an I/O address, and if the WaveLAN is there configure the
+ * device structure
+ * (called by wavelan_probe() and via init_module()).
+ */
+static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
+{
+ u8 irq_mask;
+ int irq;
+ net_local *lp;
+ mac_addr mac;
+ int err;
+
+ if (!request_region(ioaddr, sizeof(ha_t), "wavelan"))
+ return -EADDRINUSE;
+
+ err = wv_check_ioaddr(ioaddr, mac);
+ if (err)
+ goto out;
+
+ memcpy(dev->dev_addr, mac, 6);
+
+ dev->base_addr = ioaddr;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_config(dev=0x%x, ioaddr=0x%lx)\n",
+ dev->name, (unsigned int) dev, ioaddr);
+#endif
+
+ /* Check IRQ argument on command line. */
+ if (dev->irq != 0) {
+ irq_mask = wv_irq_to_psa(dev->irq);
+
+ if (irq_mask == 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING
+ "%s: wavelan_config(): invalid IRQ %d ignored.\n",
+ dev->name, dev->irq);
+#endif
+ dev->irq = 0;
+ } else {
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG
+ "%s: wavelan_config(): changing IRQ to %d\n",
+ dev->name, dev->irq);
+#endif
+ psa_write(ioaddr, HACR_DEFAULT,
+ psaoff(0, psa_int_req_no), &irq_mask, 1);
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev, ioaddr, HACR_DEFAULT);
+ wv_hacr_reset(ioaddr);
+ }
+ }
+
+ psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_int_req_no),
+ &irq_mask, 1);
+ if ((irq = wv_psa_to_irq(irq_mask)) == -1) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO
+ "%s: wavelan_config(): could not wavelan_map_irq(%d).\n",
+ dev->name, irq_mask);
+#endif
+ err = -EAGAIN;
+ goto out;
+ }
+
+ dev->irq = irq;
+
+ dev->mem_start = 0x0000;
+ dev->mem_end = 0x0000;
+ dev->if_port = 0;
+
+ /* Initialize device structures */
+ memset(dev->priv, 0, sizeof(net_local));
+ lp = (net_local *) dev->priv;
+
+ /* Back link to the device structure. */
+ lp->dev = dev;
+ /* Add the device at the beginning of the linked list. */
+ lp->next = wavelan_list;
+ wavelan_list = lp;
+
+ lp->hacr = HACR_DEFAULT;
+
+ /* Multicast stuff */
+ lp->promiscuous = 0;
+ lp->mc_count = 0;
+
+ /* Init spinlock */
+ spin_lock_init(&lp->spinlock);
+
+ SET_MODULE_OWNER(dev);
+ dev->open = wavelan_open;
+ dev->stop = wavelan_close;
+ dev->hard_start_xmit = wavelan_packet_xmit;
+ dev->get_stats = wavelan_get_stats;
+ dev->set_multicast_list = &wavelan_set_multicast_list;
+ dev->tx_timeout = &wavelan_watchdog;
+ dev->watchdog_timeo = WATCHDOG_JIFFIES;
+#ifdef SET_MAC_ADDRESS
+ dev->set_mac_address = &wavelan_set_mac_address;
+#endif /* SET_MAC_ADDRESS */
+
+#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */
+ dev->wireless_handlers = &wavelan_handler_def;
+ lp->wireless_data.spy_data = &lp->spy_data;
+ dev->wireless_data = &lp->wireless_data;
+#endif
+
+ dev->mtu = WAVELAN_MTU;
+
+ /* Display nice information. */
+ wv_init_info(dev);
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_config()\n", dev->name);
+#endif
+ return 0;
+out:
+ release_region(ioaddr, sizeof(ha_t));
+ return err;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Check for a network adaptor of this type. Return '0' iff one
+ * exists. There seem to be different interpretations of
+ * the initial value of dev->base_addr.
+ * We follow the example in drivers/net/ne.c.
+ * (called in "Space.c")
+ */
+struct net_device * __init wavelan_probe(int unit)
+{
+ struct net_device *dev;
+ short base_addr;
+ int def_irq;
+ int i;
+ int r = 0;
+
+#ifdef STRUCT_CHECK
+ if (wv_struct_check() != (char *) NULL) {
+ printk(KERN_WARNING
+ "%s: wavelan_probe(): structure/compiler botch: \"%s\"\n",
+ dev->name, wv_struct_check());
+ return -ENODEV;
+ }
+#endif /* STRUCT_CHECK */
+
+ dev = alloc_etherdev(sizeof(net_local));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ base_addr = dev->base_addr;
+ def_irq = dev->irq;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG
+ "%s: ->wavelan_probe(dev=%p (base_addr=0x%x))\n",
+ dev->name, dev, (unsigned int) dev->base_addr);
+#endif
+
+ /* Don't probe at all. */
+ if (base_addr < 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING
+ "%s: wavelan_probe(): invalid base address\n",
+ dev->name);
+#endif
+ r = -ENXIO;
+ } else if (base_addr > 0x100) { /* Check a single specified location. */
+ r = wavelan_config(dev, base_addr);
+#ifdef DEBUG_CONFIG_INFO
+ if (r != 0)
+ printk(KERN_DEBUG
+ "%s: wavelan_probe(): no device at specified base address (0x%X) or address already in use\n",
+ dev->name, base_addr);
+#endif
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_probe()\n", dev->name);
+#endif
+ } else { /* Scan all possible addresses of the WaveLAN hardware. */
+ for (i = 0; i < NELS(iobase); i++) {
+ dev->irq = def_irq;
+ if (wavelan_config(dev, iobase[i]) == 0) {
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG
+ "%s: <-wavelan_probe()\n",
+ dev->name);
+#endif
+ break;
+ }
+ }
+ if (i == NELS(iobase))
+ r = -ENODEV;
+ }
+ if (r)
+ goto out;
+ r = register_netdev(dev);
+ if (r)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, sizeof(ha_t));
+ wavelan_list = wavelan_list->next;
+out:
+ free_netdev(dev);
+ return ERR_PTR(r);
+}
+
+/****************************** MODULE ******************************/
+/*
+ * Module entry point: insertion and removal
+ */
+
+#ifdef MODULE
+/*------------------------------------------------------------------*/
+/*
+ * Insertion of the module
+ * I'm now quite proud of the multi-device support.
+ */
+int init_module(void)
+{
+ int ret = -EIO; /* Return error if no cards found */
+ int i;
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "-> init_module()\n");
+#endif
+
+ /* If probing is asked */
+ if (io[0] == 0) {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING
+ "WaveLAN init_module(): doing device probing (bad !)\n");
+ printk(KERN_WARNING
+ "Specify base addresses while loading module to correct the problem\n");
+#endif
+
+ /* Copy the basic set of address to be probed. */
+ for (i = 0; i < NELS(iobase); i++)
+ io[i] = iobase[i];
+ }
+
+
+ /* Loop on all possible base addresses. */
+ i = -1;
+ while ((io[++i] != 0) && (i < NELS(io))) {
+ struct net_device *dev = alloc_etherdev(sizeof(net_local));
+ if (!dev)
+ break;
+ if (name[i])
+ strcpy(dev->name, name[i]); /* Copy name */
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+
+ /* Check if there is something at this base address. */
+ if (wavelan_config(dev, io[i]) == 0) {
+ if (register_netdev(dev) != 0) {
+ release_region(dev->base_addr, sizeof(ha_t));
+ wavelan_list = wavelan_list->next;
+ } else {
+ ret = 0;
+ continue;
+ }
+ }
+ free_netdev(dev);
+ }
+
+#ifdef DEBUG_CONFIG_ERROR
+ if (!wavelan_list)
+ printk(KERN_WARNING
+ "WaveLAN init_module(): no device found\n");
+#endif
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "<- init_module()\n");
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Removal of the module
+ */
+void cleanup_module(void)
+{
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "-> cleanup_module()\n");
+#endif
+
+ /* Loop on all devices and release them. */
+ while (wavelan_list) {
+ struct net_device *dev = wavelan_list->dev;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG
+ "%s: cleanup_module(): removing device at 0x%x\n",
+ dev->name, (unsigned int) dev);
+#endif
+ unregister_netdev(dev);
+
+ release_region(dev->base_addr, sizeof(ha_t));
+ wavelan_list = wavelan_list->next;
+
+ free_netdev(dev);
+ }
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "<- cleanup_module()\n");
+#endif
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU General Public License.
+ *
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@scyld.com),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Jean Tourrilhes (jt@hplb.hpl.hp.com),
+ * Girish Welling (welling@paul.rutgers.edu),
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (alan@redhat.com),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Please send bug reports, updates, comments to:
+ *
+ * Bruce Janson Email: bruce@cs.usyd.edu.au
+ * Basser Department of Computer Science Phone: +61-2-9351-3423
+ * University of Sydney, N.S.W., 2006, AUSTRALIA Fax: +61-2-9351-3838
+ */
diff --git a/drivers/net/wireless/wavelan.h b/drivers/net/wireless/wavelan.h
new file mode 100644
index 000000000000..27172cde5a39
--- /dev/null
+++ b/drivers/net/wireless/wavelan.h
@@ -0,0 +1,370 @@
+/*
+ * WaveLAN ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ * Original copyright follows. See wavelan.p.h for details.
+ *
+ * This file contains the declarations for the WaveLAN hardware. Note that
+ * the WaveLAN ISA includes a i82586 controller (see definitions in
+ * file i82586.h).
+ *
+ * The main difference between the ISA hardware and the PCMCIA one is
+ * the Ethernet controller (i82586 instead of i82593).
+ * The i82586 allows multiple transmit buffers. The PSA needs to be accessed
+ * through the host interface.
+ */
+
+#ifndef _WAVELAN_H
+#define _WAVELAN_H
+
+/************************** MAGIC NUMBERS ***************************/
+
+/* Detection of the WaveLAN card is done by reading the MAC
+ * address from the card and checking it. If you have a non-AT&T
+ * product (OEM, like DEC RoamAbout, Digital Ocean, or Epson),
+ * you might need to modify this part to accommodate your hardware.
+ */
+static const char MAC_ADDRESSES[][3] =
+{
+ { 0x08, 0x00, 0x0E }, /* AT&T WaveLAN (standard) & DEC RoamAbout */
+ { 0x08, 0x00, 0x6A }, /* AT&T WaveLAN (alternate) */
+ { 0x00, 0x00, 0xE1 }, /* Hitachi Wavelan */
+ { 0x00, 0x60, 0x1D } /* Lucent Wavelan (another one) */
+ /* Add your card here and send me the patch! */
+};
+
+#define WAVELAN_ADDR_SIZE 6 /* Size of a MAC address */
+
+#define WAVELAN_MTU 1500 /* Maximum size of WaveLAN packet */
+
+#define MAXDATAZ (WAVELAN_ADDR_SIZE + WAVELAN_ADDR_SIZE + 2 + WAVELAN_MTU)
+
+/*
+ * Constants used to convert channels to frequencies
+ */
+
+/* Frequency available in the 2.0 modem, in units of 250 kHz
+ * (as read in the offset register of the dac area).
+ * Used to map channel numbers used by `wfreqsel' to frequencies
+ */
+static const short channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8,
+ 0xD0, 0xF0, 0xF8, 0x150 };
+
+/* Frequencies of the 1.0 modem (fixed frequencies).
+ * Use to map the PSA `subband' to a frequency
+ * Note : all frequencies apart from the first one need to be multiplied by 10
+ */
+static const int fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
+
+
+
+/*************************** PC INTERFACE ****************************/
+
+/*
+ * Host Adaptor structure.
+ * (base is board port address).
+ */
+typedef union hacs_u hacs_u;
+union hacs_u
+{
+ unsigned short hu_command; /* Command register */
+#define HACR_RESET 0x0001 /* Reset board */
+#define HACR_CA 0x0002 /* Set Channel Attention for 82586 */
+#define HACR_16BITS 0x0004 /* 16-bit operation (0 => 8bits) */
+#define HACR_OUT0 0x0008 /* General purpose output pin 0 */
+ /* not used - must be 1 */
+#define HACR_OUT1 0x0010 /* General purpose output pin 1 */
+ /* not used - must be 1 */
+#define HACR_82586_INT_ENABLE 0x0020 /* Enable 82586 interrupts */
+#define HACR_MMC_INT_ENABLE 0x0040 /* Enable MMC interrupts */
+#define HACR_INTR_CLR_ENABLE 0x0080 /* Enable interrupt status read/clear */
+ unsigned short hu_status; /* Status Register */
+#define HASR_82586_INTR 0x0001 /* Interrupt request from 82586 */
+#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */
+#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */
+#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */
+};
+
+typedef struct ha_t ha_t;
+struct ha_t
+{
+ hacs_u ha_cs; /* Command and status registers */
+#define ha_command ha_cs.hu_command
+#define ha_status ha_cs.hu_status
+ unsigned short ha_mmcr; /* Modem Management Ctrl Register */
+ unsigned short ha_pior0; /* Program I/O Address Register Port 0 */
+ unsigned short ha_piop0; /* Program I/O Port 0 */
+ unsigned short ha_pior1; /* Program I/O Address Register Port 1 */
+ unsigned short ha_piop1; /* Program I/O Port 1 */
+ unsigned short ha_pior2; /* Program I/O Address Register Port 2 */
+ unsigned short ha_piop2; /* Program I/O Port 2 */
+};
+
+#define HA_SIZE 16
+
+#define hoff(p,f) (unsigned short)((void *)(&((ha_t *)((void *)0 + (p)))->f) - (void *)0)
+#define HACR(p) hoff(p, ha_command)
+#define HASR(p) hoff(p, ha_status)
+#define MMCR(p) hoff(p, ha_mmcr)
+#define PIOR0(p) hoff(p, ha_pior0)
+#define PIOP0(p) hoff(p, ha_piop0)
+#define PIOR1(p) hoff(p, ha_pior1)
+#define PIOP1(p) hoff(p, ha_piop1)
+#define PIOR2(p) hoff(p, ha_pior2)
+#define PIOP2(p) hoff(p, ha_piop2)
+
+/*
+ * Program I/O Mode Register values.
+ */
+#define STATIC_PIO 0 /* Mode 1: static mode */
+ /* RAM access ??? */
+#define AUTOINCR_PIO 1 /* Mode 2: auto increment mode */
+ /* RAM access ??? */
+#define AUTODECR_PIO 2 /* Mode 3: auto decrement mode */
+ /* RAM access ??? */
+#define PARAM_ACCESS_PIO 3 /* Mode 4: LAN parameter access mode */
+ /* Parameter access. */
+#define PIO_MASK 3 /* register mask */
+#define PIOM(cmd,piono) ((u_short)cmd << 10 << (piono * 2))
+
+#define HACR_DEFAULT (HACR_OUT0 | HACR_OUT1 | HACR_16BITS | PIOM(STATIC_PIO, 0) | PIOM(AUTOINCR_PIO, 1) | PIOM(PARAM_ACCESS_PIO, 2))
+#define HACR_INTRON (HACR_82586_INT_ENABLE | HACR_MMC_INT_ENABLE | HACR_INTR_CLR_ENABLE)
+
+/************************** MEMORY LAYOUT **************************/
+
+/*
+ * Onboard 64 k RAM layout.
+ * (Offsets from 0x0000.)
+ */
+#define OFFSET_RU 0x0000 /* 75% memory */
+#define OFFSET_CU 0xC000 /* 25% memory */
+#define OFFSET_SCB (OFFSET_ISCP - sizeof(scb_t))
+#define OFFSET_ISCP (OFFSET_SCP - sizeof(iscp_t))
+#define OFFSET_SCP I82586_SCP_ADDR
+
+#define RXBLOCKZ (sizeof(fd_t) + sizeof(rbd_t) + MAXDATAZ)
+#define TXBLOCKZ (sizeof(ac_tx_t) + sizeof(ac_nop_t) + sizeof(tbd_t) + MAXDATAZ)
+
+#define NRXBLOCKS ((OFFSET_CU - OFFSET_RU) / RXBLOCKZ)
+#define NTXBLOCKS ((OFFSET_SCB - OFFSET_CU) / TXBLOCKZ)
+
+/********************** PARAMETER STORAGE AREA **********************/
+
+/*
+ * Parameter Storage Area (PSA).
+ */
+typedef struct psa_t psa_t;
+struct psa_t
+{
+ unsigned char psa_io_base_addr_1; /* [0x00] Base address 1 ??? */
+ unsigned char psa_io_base_addr_2; /* [0x01] Base address 2 */
+ unsigned char psa_io_base_addr_3; /* [0x02] Base address 3 */
+ unsigned char psa_io_base_addr_4; /* [0x03] Base address 4 */
+ unsigned char psa_rem_boot_addr_1; /* [0x04] Remote Boot Address 1 */
+ unsigned char psa_rem_boot_addr_2; /* [0x05] Remote Boot Address 2 */
+ unsigned char psa_rem_boot_addr_3; /* [0x06] Remote Boot Address 3 */
+ unsigned char psa_holi_params; /* [0x07] HOst Lan Interface (HOLI) Parameters */
+ unsigned char psa_int_req_no; /* [0x08] Interrupt Request Line */
+ unsigned char psa_unused0[7]; /* [0x09-0x0F] unused */
+
+ unsigned char psa_univ_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x10-0x15] Universal (factory) MAC Address */
+ unsigned char psa_local_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x16-1B] Local MAC Address */
+ unsigned char psa_univ_local_sel; /* [0x1C] Universal Local Selection */
+#define PSA_UNIVERSAL 0 /* Universal (factory) */
+#define PSA_LOCAL 1 /* Local */
+ unsigned char psa_comp_number; /* [0x1D] Compatibility Number: */
+#define PSA_COMP_PC_AT_915 0 /* PC-AT 915 MHz */
+#define PSA_COMP_PC_MC_915 1 /* PC-MC 915 MHz */
+#define PSA_COMP_PC_AT_2400 2 /* PC-AT 2.4 GHz */
+#define PSA_COMP_PC_MC_2400 3 /* PC-MC 2.4 GHz */
+#define PSA_COMP_PCMCIA_915 4 /* PCMCIA 915 MHz or 2.0 */
+ unsigned char psa_thr_pre_set; /* [0x1E] Modem Threshold Preset */
+ unsigned char psa_feature_select; /* [0x1F] Call code required (1=on) */
+#define PSA_FEATURE_CALL_CODE 0x01 /* Call code required (Japan) */
+ unsigned char psa_subband; /* [0x20] Subband */
+#define PSA_SUBBAND_915 0 /* 915 MHz or 2.0 */
+#define PSA_SUBBAND_2425 1 /* 2425 MHz */
+#define PSA_SUBBAND_2460 2 /* 2460 MHz */
+#define PSA_SUBBAND_2484 3 /* 2484 MHz */
+#define PSA_SUBBAND_2430_5 4 /* 2430.5 MHz */
+ unsigned char psa_quality_thr; /* [0x21] Modem Quality Threshold */
+ unsigned char psa_mod_delay; /* [0x22] Modem Delay (?) (reserved) */
+ unsigned char psa_nwid[2]; /* [0x23-0x24] Network ID */
+ unsigned char psa_nwid_select; /* [0x25] Network ID Select On/Off */
+ unsigned char psa_encryption_select; /* [0x26] Encryption On/Off */
+ unsigned char psa_encryption_key[8]; /* [0x27-0x2E] Encryption Key */
+ unsigned char psa_databus_width; /* [0x2F] AT bus width select 8/16 */
+ unsigned char psa_call_code[8]; /* [0x30-0x37] (Japan) Call Code */
+ unsigned char psa_nwid_prefix[2]; /* [0x38-0x39] Roaming domain */
+ unsigned char psa_reserved[2]; /* [0x3A-0x3B] Reserved - fixed 00 */
+ unsigned char psa_conf_status; /* [0x3C] Conf Status, bit 0=1:config*/
+ unsigned char psa_crc[2]; /* [0x3D] CRC-16 over PSA */
+ unsigned char psa_crc_status; /* [0x3F] CRC Valid Flag */
+};
+
+#define PSA_SIZE 64
+
+/* Calculate offset of a field in the above structure.
+ * Warning: only even addresses are used. */
+#define psaoff(p,f) ((unsigned short) ((void *)(&((psa_t *) ((void *) NULL + (p)))->f) - (void *) NULL))
+
+/******************** MODEM MANAGEMENT INTERFACE ********************/
+
+/*
+ * Modem Management Controller (MMC) write structure.
+ */
+typedef struct mmw_t mmw_t;
+struct mmw_t
+{
+ unsigned char mmw_encr_key[8]; /* encryption key */
+ unsigned char mmw_encr_enable; /* Enable or disable encryption. */
+#define MMW_ENCR_ENABLE_MODE 0x02 /* mode of security option */
+#define MMW_ENCR_ENABLE_EN 0x01 /* Enable security option. */
+ unsigned char mmw_unused0[1]; /* unused */
+ unsigned char mmw_des_io_invert; /* encryption option */
+#define MMW_DES_IO_INVERT_RES 0x0F /* reserved */
+#define MMW_DES_IO_INVERT_CTRL 0xF0 /* control (?) (set to 0) */
+ unsigned char mmw_unused1[5]; /* unused */
+ unsigned char mmw_loopt_sel; /* looptest selection */
+#define MMW_LOOPT_SEL_DIS_NWID 0x40 /* Disable NWID filtering. */
+#define MMW_LOOPT_SEL_INT 0x20 /* Activate Attention Request. */
+#define MMW_LOOPT_SEL_LS 0x10 /* looptest, no collision avoidance */
+#define MMW_LOOPT_SEL_LT3A 0x08 /* looptest 3a */
+#define MMW_LOOPT_SEL_LT3B 0x04 /* looptest 3b */
+#define MMW_LOOPT_SEL_LT3C 0x02 /* looptest 3c */
+#define MMW_LOOPT_SEL_LT3D 0x01 /* looptest 3d */
+ unsigned char mmw_jabber_enable; /* jabber timer enable */
+ /* Abort transmissions > 200 ms */
+ unsigned char mmw_freeze; /* freeze or unfreeze signal level */
+ /* 0 : signal level & qual updated for every new message, 1 : frozen */
+ unsigned char mmw_anten_sel; /* antenna selection */
+#define MMW_ANTEN_SEL_SEL 0x01 /* direct antenna selection */
+#define MMW_ANTEN_SEL_ALG_EN 0x02 /* antenna selection algo. enable */
+ unsigned char mmw_ifs; /* inter frame spacing */
+ /* min time between transmission in bit periods (.5 us) - bit 0 ignored */
+ unsigned char mmw_mod_delay; /* modem delay (synchro) */
+ unsigned char mmw_jam_time; /* jamming time (after collision) */
+ unsigned char mmw_unused2[1]; /* unused */
+ unsigned char mmw_thr_pre_set; /* level threshold preset */
+ /* Discard all packet with signal < this value (4) */
+ unsigned char mmw_decay_prm; /* decay parameters */
+ unsigned char mmw_decay_updat_prm; /* decay update parameters */
+ unsigned char mmw_quality_thr; /* quality (z-quotient) threshold */
+ /* Discard all packet with quality < this value (3) */
+ unsigned char mmw_netw_id_l; /* NWID low order byte */
+ unsigned char mmw_netw_id_h; /* NWID high order byte */
+ /* Network ID or Domain : create virtual net on the air */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmw_mode_select; /* for analog tests (set to 0) */
+ unsigned char mmw_unused3[1]; /* unused */
+ unsigned char mmw_fee_ctrl; /* frequency EEPROM control */
+#define MMW_FEE_CTRL_PRE 0x10 /* Enable protected instructions. */
+#define MMW_FEE_CTRL_DWLD 0x08 /* Download EEPROM to mmc. */
+#define MMW_FEE_CTRL_CMD 0x07 /* EEPROM commands: */
+#define MMW_FEE_CTRL_READ 0x06 /* Read */
+#define MMW_FEE_CTRL_WREN 0x04 /* Write enable */
+#define MMW_FEE_CTRL_WRITE 0x05 /* Write data to address. */
+#define MMW_FEE_CTRL_WRALL 0x04 /* Write data to all addresses. */
+#define MMW_FEE_CTRL_WDS 0x04 /* Write disable */
+#define MMW_FEE_CTRL_PRREAD 0x16 /* Read addr from protect register */
+#define MMW_FEE_CTRL_PREN 0x14 /* Protect register enable */
+#define MMW_FEE_CTRL_PRCLEAR 0x17 /* Unprotect all registers. */
+#define MMW_FEE_CTRL_PRWRITE 0x15 /* Write address in protect register */
+#define MMW_FEE_CTRL_PRDS 0x14 /* Protect register disable */
+ /* Never issue the PRDS command: it's irreversible! */
+
+ unsigned char mmw_fee_addr; /* EEPROM address */
+#define MMW_FEE_ADDR_CHANNEL 0xF0 /* Select the channel. */
+#define MMW_FEE_ADDR_OFFSET 0x0F /* Offset in channel data */
+#define MMW_FEE_ADDR_EN 0xC0 /* FEE_CTRL enable operations */
+#define MMW_FEE_ADDR_DS 0x00 /* FEE_CTRL disable operations */
+#define MMW_FEE_ADDR_ALL 0x40 /* FEE_CTRL all operations */
+#define MMW_FEE_ADDR_CLEAR 0xFF /* FEE_CTRL clear operations */
+
+ unsigned char mmw_fee_data_l; /* Write data to EEPROM. */
+ unsigned char mmw_fee_data_h; /* high octet */
+ unsigned char mmw_ext_ant; /* Setting for external antenna */
+#define MMW_EXT_ANT_EXTANT 0x01 /* Select external antenna */
+#define MMW_EXT_ANT_POL 0x02 /* Polarity of the antenna */
+#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
+#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
+#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
+};
+
+#define MMW_SIZE 37
+
+#define mmwoff(p,f) (unsigned short)((void *)(&((mmw_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * Modem Management Controller (MMC) read structure.
+ */
+typedef struct mmr_t mmr_t;
+struct mmr_t
+{
+ unsigned char mmr_unused0[8]; /* unused */
+ unsigned char mmr_des_status; /* encryption status */
+ unsigned char mmr_des_avail; /* encryption available (0x55 read) */
+#define MMR_DES_AVAIL_DES 0x55 /* DES available */
+#define MMR_DES_AVAIL_AES 0x33 /* AES (AT&T) available */
+ unsigned char mmr_des_io_invert; /* des I/O invert register */
+ unsigned char mmr_unused1[5]; /* unused */
+ unsigned char mmr_dce_status; /* DCE status */
+#define MMR_DCE_STATUS_RX_BUSY 0x01 /* receiver busy */
+#define MMR_DCE_STATUS_LOOPT_IND 0x02 /* loop test indicated */
+#define MMR_DCE_STATUS_TX_BUSY 0x04 /* transmitter on */
+#define MMR_DCE_STATUS_JBR_EXPIRED 0x08 /* jabber timer expired */
+#define MMR_DCE_STATUS 0x0F /* mask to get the bits */
+ unsigned char mmr_dsp_id; /* DSP ID (AA = Daedalus rev A) */
+ unsigned char mmr_unused2[2]; /* unused */
+ unsigned char mmr_correct_nwid_l; /* # of correct NWIDs rxd (low) */
+ unsigned char mmr_correct_nwid_h; /* # of correct NWIDs rxd (high) */
+ /* Warning: read high-order octet first! */
+ unsigned char mmr_wrong_nwid_l; /* # of wrong NWIDs rxd (low) */
+ unsigned char mmr_wrong_nwid_h; /* # of wrong NWIDs rxd (high) */
+ unsigned char mmr_thr_pre_set; /* level threshold preset */
+#define MMR_THR_PRE_SET 0x3F /* level threshold preset */
+#define MMR_THR_PRE_SET_CUR 0x80 /* Current signal above it */
+ unsigned char mmr_signal_lvl; /* signal level */
+#define MMR_SIGNAL_LVL 0x3F /* signal level */
+#define MMR_SIGNAL_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_silence_lvl; /* silence level (noise) */
+#define MMR_SILENCE_LVL 0x3F /* silence level */
+#define MMR_SILENCE_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_sgnl_qual; /* signal quality */
+#define MMR_SGNL_QUAL 0x0F /* signal quality */
+#define MMR_SGNL_QUAL_ANT 0x80 /* current antenna used */
+ unsigned char mmr_netw_id_l; /* NWID low order byte (?) */
+ unsigned char mmr_unused3[3]; /* unused */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmr_fee_status; /* Status of frequency EEPROM */
+#define MMR_FEE_STATUS_ID 0xF0 /* Modem revision ID */
+#define MMR_FEE_STATUS_DWLD 0x08 /* Download in progress */
+#define MMR_FEE_STATUS_BUSY 0x04 /* EEPROM busy */
+ unsigned char mmr_unused4[1]; /* unused */
+ unsigned char mmr_fee_data_l; /* Read data from EEPROM (low) */
+ unsigned char mmr_fee_data_h; /* Read data from EEPROM (high) */
+};
+
+#define MMR_SIZE 36
+
+#define mmroff(p,f) (unsigned short)((void *)(&((mmr_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/* Make the two above structures one */
+typedef union mm_t
+{
+ struct mmw_t w; /* Write to the mmc */
+ struct mmr_t r; /* Read from the mmc */
+} mm_t;
+
+#endif /* _WAVELAN_H */
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU General Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h
new file mode 100644
index 000000000000..509ff22a6caa
--- /dev/null
+++ b/drivers/net/wireless/wavelan.p.h
@@ -0,0 +1,716 @@
+/*
+ * WaveLAN ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ *
+ * This file contains all definitions and declarations necessary for the
+ * WaveLAN ISA driver. This file is a private header, so it should
+ * be included only in wavelan.c!
+ */
+
+#ifndef WAVELAN_P_H
+#define WAVELAN_P_H
+
+/************************** DOCUMENTATION ***************************/
+/*
+ * This driver provides a Linux interface to the WaveLAN ISA hardware.
+ * The WaveLAN is a product of Lucent (http://www.wavelan.com/).
+ * This division was formerly part of NCR and then AT&T.
+ * WaveLANs are also distributed by DEC (RoamAbout DS) and Digital Ocean.
+ *
+ * To learn how to use this driver, read the NET3 HOWTO.
+ * If you want to exploit the many other functionalities, read the comments
+ * in the code.
+ *
+ * This driver is the result of the effort of many people (see below).
+ */
+
+/* ------------------------ SPECIFIC NOTES ------------------------ */
+/*
+ * Web page
+ * --------
+ * I try to maintain a web page with the Wireless LAN Howto at :
+ * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Wavelan.html
+ *
+ * SMP
+ * ---
+ * We now are SMP compliant (I eventually fixed the remaining bugs).
+ * The driver has been tested on a dual P6-150 and survived my usual
+ * set of torture tests.
+ * Anyway, I spent enough time chasing interrupt re-entrancy during
+ * errors or reconfigure, and I designed the locked/unlocked sections
+ * of the driver with great care, and with the recent addition of
+ * the spinlock (thanks to the new API), we should be quite close to
+ * the truth.
+ * The SMP/IRQ locking is quite coarse and conservative (i.e. not fast),
+ * but better safe than sorry (especially at 2 Mb/s ;-).
+ *
+ * I have also looked into disabling only our interrupt on the card
+ * (via HACR) instead of all interrupts in the processor (via cli),
+ * so that other driver are not impacted, and it look like it's
+ * possible, but it's very tricky to do right (full of races). As
+ * the gain would be mostly for SMP systems, it can wait...
+ *
+ * Debugging and options
+ * ---------------------
+ * You will find below a set of '#define" allowing a very fine control
+ * on the driver behaviour and the debug messages printed.
+ * The main options are :
+ * o SET_PSA_CRC, to have your card correctly recognised by
+ * an access point and the Point-to-Point diagnostic tool.
+ * o USE_PSA_CONFIG, to read configuration from the PSA (EEprom)
+ * (otherwise we always start afresh with some defaults)
+ *
+ * wavelan.o is too darned big
+ * ---------------------------
+ * That's true! There is a very simple way to reduce the driver
+ * object by 33%! Comment out the following line:
+ * #include <linux/wireless.h>
+ * Other compile options can also reduce the size of it...
+ *
+ * MAC address and hardware detection:
+ * -----------------------------------
+ * The detection code for the WaveLAN checks that the first three
+ * octets of the MAC address fit the company code. This type of
+ * detection works well for AT&T cards (because the AT&T code is
+ * hardcoded in wavelan.h), but of course will fail for other
+ * manufacturers.
+ *
+ * If you are sure that your card is derived from the WaveLAN,
+ * here is the way to configure it:
+ * 1) Get your MAC address
+ * a) With your card utilities (wfreqsel, instconf, etc.)
+ * b) With the driver:
+ * o compile the kernel with DEBUG_CONFIG_INFO enabled
+ * o Boot and look the card messages
+ * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan.h)
+ * 3) Compile and verify
+ * 4) Send me the MAC code. I will include it in the next version.
+ *
+ */
+
+/* --------------------- WIRELESS EXTENSIONS --------------------- */
+/*
+ * This driver is the first to support "wireless extensions".
+ * This set of extensions provides a standard way to control the wireless
+ * characteristics of the hardware. Applications such as mobile IP may
+ * take advantage of it.
+ *
+ * You will need to enable the CONFIG_NET_RADIO define in the kernel
+ * configuration to enable the wireless extensions (this is the one
+ * giving access to the radio network device choice).
+ *
+ * It might also be a good idea as well to fetch the wireless tools to
+ * configure the device and play a bit.
+ */
+
+/* ---------------------------- FILES ---------------------------- */
+/*
+ * wavelan.c: actual code for the driver: C functions
+ *
+ * wavelan.p.h: private header: local types and variables for driver
+ *
+ * wavelan.h: description of the hardware interface and structs
+ *
+ * i82586.h: description of the Ethernet controller
+ */
+
+/* --------------------------- HISTORY --------------------------- */
+/*
+ * This is based on information in the drivers' headers. It may not be
+ * accurate, and I guarantee only my best effort.
+ *
+ * The history of the WaveLAN drivers is as complicated as the history of
+ * the WaveLAN itself (NCR -> AT&T -> Lucent).
+ *
+ * It all started with Anders Klemets <klemets@paul.rutgers.edu>
+ * writing a WaveLAN ISA driver for the Mach microkernel. Girish
+ * Welling <welling@paul.rutgers.edu> had also worked on it.
+ * Keith Moore modified this for the PCMCIA hardware.
+ *
+ * Robert Morris <rtm@das.harvard.edu> ported these two drivers to BSDI
+ * and added specific PCMCIA support (there is currently no equivalent
+ * of the PCMCIA package under BSD).
+ *
+ * Jim Binkley <jrb@cs.pdx.edu> ported both BSDI drivers to FreeBSD.
+ *
+ * Bruce Janson <bruce@cs.usyd.edu.au> ported the BSDI ISA driver to Linux.
+ *
+ * Anthony D. Joseph <adj@lcs.mit.edu> started to modify Bruce's driver
+ * (with help of the BSDI PCMCIA driver) for PCMCIA.
+ * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished this work.
+ * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
+ * 2.00 cards correctly (2.4 GHz with frequency selection).
+ * David Hinds <dahinds@users.sourceforge.net> integrated the whole in his
+ * PCMCIA package (and bug corrections).
+ *
+ * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
+ * patches to the PCMCIA driver. Later, I added code in the ISA driver
+ * for Wireless Extensions and full support of frequency selection
+ * cards. Then, I did the same to the PCMCIA driver, and did some
+ * reorganisation. Finally, I came back to the ISA driver to
+ * upgrade it at the same level as the PCMCIA one and reorganise
+ * the code.
+ * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
+ * much needed information on the WaveLAN hardware.
+ */
+
+/* The original copyrights and literature mention others' names and
+ * credits. I don't know what their part in this development was.
+ */
+
+/* By the way, for the copyright and legal stuff:
+ * almost everybody wrote code under the GNU or BSD license (or similar),
+ * and want their original copyright to remain somewhere in the
+ * code (for myself, I go with the GPL).
+ * Nobody wants to take responsibility for anything, except the fame.
+ */
+
+/* --------------------------- CREDITS --------------------------- */
+/*
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre <bakre@paul.rutgers.edu>,
+ * Donald Becker <becker@cesdis.gsfc.nasa.gov>,
+ * Loeke Brederveld <Loeke.Brederveld@Utrecht.NCR.com>,
+ * Brent Elphick <belphick@uwaterloo.ca>,
+ * Anders Klemets <klemets@it.kth.se>,
+ * Vladimir V. Kolpakov <w@stier.koenig.ru>,
+ * Marc Meertens <Marc.Meertens@Utrecht.NCR.com>,
+ * Pauline Middelink <middelin@polyware.iaf.nl>,
+ * Robert Morris <rtm@das.harvard.edu>,
+ * Jean Tourrilhes <jt@hpl.hp.com>,
+ * Girish Welling <welling@paul.rutgers.edu>,
+ * Clark Woodworth <clark@hiway1.exit109.com>
+ * Yongguang Zhang <ygz@isl.hrl.hac.com>
+ *
+ * Thanks go also to:
+ * James Ashton <jaa101@syseng.anu.edu.au>,
+ * Alan Cox <alan@redhat.com>,
+ * Allan Creighton <allanc@cs.usyd.edu.au>,
+ * Matthew Geier <matthew@cs.usyd.edu.au>,
+ * Remo di Giovanni <remo@cs.usyd.edu.au>,
+ * Eckhard Grah <grah@wrcs1.urz.uni-wuppertal.de>,
+ * Vipul Gupta <vgupta@cs.binghamton.edu>,
+ * Mark Hagan <mhagan@wtcpost.daytonoh.NCR.COM>,
+ * Tim Nicholson <tim@cs.usyd.edu.au>,
+ * Ian Parkin <ian@cs.usyd.edu.au>,
+ * John Rosenberg <johnr@cs.usyd.edu.au>,
+ * George Rossi <george@phm.gov.au>,
+ * Arthur Scott <arthur@cs.usyd.edu.au>,
+ * Stanislav Sinyagin <stas@isf.ru>
+ * and Peter Storey for their assistance and advice.
+ *
+ * Additional Credits:
+ *
+ * My development has been done initially under Debian 1.1 (Linux 2.0.x)
+ * and now under Debian 2.2, initially with an HP Vectra XP/60, and now
+ * an HP Vectra XP/90.
+ *
+ */
+
+/* ------------------------- IMPROVEMENTS ------------------------- */
+/*
+ * I proudly present:
+ *
+ * Changes made in first pre-release:
+ * ----------------------------------
+ * - reorganisation of the code, function name change
+ * - creation of private header (wavelan.p.h)
+ * - reorganised debug messages
+ * - more comments, history, etc.
+ * - mmc_init: configure the PSA if not done
+ * - mmc_init: correct default value of level threshold for PCMCIA
+ * - mmc_init: 2.00 detection better code for 2.00 initialization
+ * - better info at startup
+ * - IRQ setting (note: this setting is permanent)
+ * - watchdog: change strategy (and solve module removal problems)
+ * - add wireless extensions (ioctl and get_wireless_stats)
+ * get/set nwid/frequency on fly, info for /proc/net/wireless
+ * - more wireless extensions: SETSPY and GETSPY
+ * - make wireless extensions optional
+ * - private ioctl to set/get quality and level threshold, histogram
+ * - remove /proc/net/wavelan
+ * - suppress useless stuff from lp (net_local)
+ * - kernel 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
+ * - add message level (debug stuff in /var/adm/debug and errors not
+ * displayed at console and still in /var/adm/messages)
+ * - multi device support
+ * - start fixing the probe (init code)
+ * - more inlines
+ * - man page
+ * - many other minor details and cleanups
+ *
+ * Changes made in second pre-release:
+ * -----------------------------------
+ * - clean up init code (probe and module init)
+ * - better multiple device support (module)
+ * - name assignment (module)
+ *
+ * Changes made in third pre-release:
+ * ----------------------------------
+ * - be more conservative on timers
+ * - preliminary support for multicast (I still lack some details)
+ *
+ * Changes made in fourth pre-release:
+ * -----------------------------------
+ * - multicast (revisited and finished)
+ * - avoid reset in set_multicast_list (a really big hack)
+ * if somebody could apply this code for other i82586 based drivers
+ * - share onboard memory 75% RU and 25% CU (instead of 50/50)
+ *
+ * Changes made for release in 2.1.15:
+ * -----------------------------------
+ * - change the detection code for multi manufacturer code support
+ *
+ * Changes made for release in 2.1.17:
+ * -----------------------------------
+ * - update to wireless extensions changes
+ * - silly bug in card initial configuration (psa_conf_status)
+ *
+ * Changes made for release in 2.1.27 & 2.0.30:
+ * --------------------------------------------
+ * - small bug in debug code (probably not the last one...)
+ * - remove extern keyword for wavelan_probe()
+ * - level threshold is now a standard wireless extension (version 4 !)
+ * - modules parameters types (new module interface)
+ *
+ * Changes made for release in 2.1.36:
+ * -----------------------------------
+ * - byte count stats (courtesy of David Hinds)
+ * - remove dev_tint stuff (courtesy of David Hinds)
+ * - encryption setting from Brent Elphick (thanks a lot!)
+ * - 'ioaddr' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
+ *
+ * Other changes (not by me) :
+ * -------------------------
+ * - Spelling and gramar "rectification".
+ *
+ * Changes made for release in 2.0.37 & 2.2.2 :
+ * ------------------------------------------
+ * - Correct status in /proc/net/wireless
+ * - Set PSA CRC to make PtP diagnostic tool happy (Bob Gray)
+ * - Module init code don't fail if we found at least one card in
+ * the address list (Karlis Peisenieks)
+ * - Missing parenthesis (Christopher Peterson)
+ * - Correct i82586 configuration parameters
+ * - Encryption initialisation bug (Robert McCormack)
+ * - New mac addresses detected in the probe
+ * - Increase watchdog for busy environments
+ *
+ * Changes made for release in 2.0.38 & 2.2.7 :
+ * ------------------------------------------
+ * - Correct the reception logic to better report errors and avoid
+ * sending bogus packet up the stack
+ * - Delay RU config to avoid corrupting first received packet
+ * - Change config completion code (to actually check something)
+ * - Avoid reading out of bound in skbuf to transmit
+ * - Rectify a lot of (useless) debugging code
+ * - Change the way to `#ifdef SET_PSA_CRC'
+ *
+ * Changes made for release in 2.2.11 & 2.3.13 :
+ * -------------------------------------------
+ * - Change e-mail and web page addresses
+ * - Watchdog timer is now correctly expressed in HZ, not in jiffies
+ * - Add channel number to the list of frequencies in range
+ * - Add the (short) list of bit-rates in range
+ * - Developp a new sensitivity... (sens.value & sens.fixed)
+ *
+ * Changes made for release in 2.2.14 & 2.3.23 :
+ * -------------------------------------------
+ * - Fix check for root permission (break instead of exit)
+ * - New nwid & encoding setting (Wireless Extension 9)
+ *
+ * Changes made for release in 2.3.49 :
+ * ----------------------------------
+ * - Indentation reformating (Alan)
+ * - Update to new network API (softnet - 2.3.43) :
+ * o replace dev->tbusy (Alan)
+ * o replace dev->tstart (Alan)
+ * o remove dev->interrupt (Alan)
+ * o add SMP locking via spinlock in splxx (me)
+ * o add spinlock in interrupt handler (me)
+ * o use kernel watchdog instead of ours (me)
+ * o increase watchdog timeout (kernel is more sensitive) (me)
+ * o verify that all the changes make sense and work (me)
+ * - Fixup a potential gotcha when reconfiguring and thighten a bit
+ * the interactions with Tx queue.
+ *
+ * Changes made for release in 2.4.0 :
+ * ---------------------------------
+ * - Fix spinlock stupid bugs that I left in. The driver is now SMP
+ * compliant and doesn't lockup at startup.
+ *
+ * Changes made for release in 2.5.2 :
+ * ---------------------------------
+ * - Use new driver API for Wireless Extensions :
+ * o got rid of wavelan_ioctl()
+ * o use a bunch of iw_handler instead
+ *
+ * Changes made for release in 2.5.35 :
+ * ----------------------------------
+ * - Set dev->trans_start to avoid filling the logs
+ * - Handle better spurious/bogus interrupt
+ * - Avoid deadlocks in mmc_out()/mmc_in()
+ *
+ * Wishes & dreams:
+ * ----------------
+ * - roaming (see Pcmcia driver)
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+
+#include <linux/wireless.h> /* Wireless extensions */
+#include <net/iw_handler.h> /* Wireless handlers */
+
+/* WaveLAN declarations */
+#include "i82586.h"
+#include "wavelan.h"
+
+/************************** DRIVER OPTIONS **************************/
+/*
+ * `#define' or `#undef' the following constant to change the behaviour
+ * of the driver...
+ */
+#undef SET_PSA_CRC /* Calculate and set the CRC on PSA (slower) */
+#define USE_PSA_CONFIG /* Use info from the PSA. */
+#undef STRUCT_CHECK /* Verify padding of structures. */
+#undef EEPROM_IS_PROTECTED /* doesn't seem to be necessary */
+#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical). */
+#undef SET_MAC_ADDRESS /* Experimental */
+
+#ifdef WIRELESS_EXT /* If wireless extensions exist in the kernel */
+/* Warning: this stuff will slow down the driver. */
+#define WIRELESS_SPY /* Enable spying addresses. */
+#undef HISTOGRAM /* Enable histogram of signal level. */
+#endif
+
+/****************************** DEBUG ******************************/
+
+#undef DEBUG_MODULE_TRACE /* module insertion/removal */
+#undef DEBUG_CALLBACK_TRACE /* calls made by Linux */
+#undef DEBUG_INTERRUPT_TRACE /* calls to handler */
+#undef DEBUG_INTERRUPT_INFO /* type of interrupt and so on */
+#define DEBUG_INTERRUPT_ERROR /* problems */
+#undef DEBUG_CONFIG_TRACE /* Trace the config functions. */
+#undef DEBUG_CONFIG_INFO /* what's going on */
+#define DEBUG_CONFIG_ERROR /* errors on configuration */
+#undef DEBUG_TX_TRACE /* transmission calls */
+#undef DEBUG_TX_INFO /* header of the transmitted packet */
+#undef DEBUG_TX_FAIL /* Normal failure conditions */
+#define DEBUG_TX_ERROR /* Unexpected conditions */
+#undef DEBUG_RX_TRACE /* transmission calls */
+#undef DEBUG_RX_INFO /* header of the received packet */
+#undef DEBUG_RX_FAIL /* Normal failure conditions */
+#define DEBUG_RX_ERROR /* Unexpected conditions */
+
+#undef DEBUG_PACKET_DUMP /* Dump packet on the screen if defined to 32. */
+#undef DEBUG_IOCTL_TRACE /* misc. call by Linux */
+#undef DEBUG_IOCTL_INFO /* various debugging info */
+#define DEBUG_IOCTL_ERROR /* what's going wrong */
+#define DEBUG_BASIC_SHOW /* Show basic startup info. */
+#undef DEBUG_VERSION_SHOW /* Print version info. */
+#undef DEBUG_PSA_SHOW /* Dump PSA to screen. */
+#undef DEBUG_MMC_SHOW /* Dump mmc to screen. */
+#undef DEBUG_SHOW_UNUSED /* Show unused fields too. */
+#undef DEBUG_I82586_SHOW /* Show i82586 status. */
+#undef DEBUG_DEVICE_SHOW /* Show device parameters. */
+
+/************************ CONSTANTS & MACROS ************************/
+
+#ifdef DEBUG_VERSION_SHOW
+static const char *version = "wavelan.c : v24 (SMP + wireless extensions) 11/12/01\n";
+#endif
+
+/* Watchdog temporisation */
+#define WATCHDOG_JIFFIES (512*HZ/100)
+
+/* Macro to get the number of elements in an array */
+#define NELS(a) (sizeof(a) / sizeof(a[0]))
+
+/* ------------------------ PRIVATE IOCTL ------------------------ */
+
+#define SIOCSIPQTHR SIOCIWFIRSTPRIV /* Set quality threshold */
+#define SIOCGIPQTHR SIOCIWFIRSTPRIV + 1 /* Get quality threshold */
+
+#define SIOCSIPHISTO SIOCIWFIRSTPRIV + 2 /* Set histogram ranges */
+#define SIOCGIPHISTO SIOCIWFIRSTPRIV + 3 /* Get histogram values */
+
+/****************************** TYPES ******************************/
+
+/* Shortcuts */
+typedef struct net_device_stats en_stats;
+typedef struct iw_statistics iw_stats;
+typedef struct iw_quality iw_qual;
+typedef struct iw_freq iw_freq;
+typedef struct net_local net_local;
+typedef struct timer_list timer_list;
+
+/* Basic types */
+typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
+
+/*
+ * Static specific data for the interface.
+ *
+ * For each network interface, Linux keeps data in two structures: "device"
+ * keeps the generic data (same format for everybody) and "net_local" keeps
+ * additional specific data.
+ * Note that some of this specific data is in fact generic (en_stats, for
+ * example).
+ */
+struct net_local
+{
+ net_local * next; /* linked list of the devices */
+ struct net_device * dev; /* reverse link */
+ spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
+ en_stats stats; /* Ethernet interface statistics */
+ int nresets; /* number of hardware resets */
+ u_char reconfig_82586; /* We need to reconfigure the controller. */
+ u_char promiscuous; /* promiscuous mode */
+ int mc_count; /* number of multicast addresses */
+ u_short hacr; /* current host interface state */
+
+ int tx_n_in_use;
+ u_short rx_head;
+ u_short rx_last;
+ u_short tx_first_free;
+ u_short tx_first_in_use;
+
+#ifdef WIRELESS_EXT
+ iw_stats wstats; /* Wireless-specific statistics */
+
+ struct iw_spy_data spy_data;
+ struct iw_public_data wireless_data;
+#endif
+
+#ifdef HISTOGRAM
+ int his_number; /* number of intervals */
+ u_char his_range[16]; /* boundaries of interval ]n-1; n] */
+ u_long his_sum[16]; /* sum in interval */
+#endif /* HISTOGRAM */
+};
+
+/**************************** PROTOTYPES ****************************/
+
+/* ----------------------- MISC. SUBROUTINES ------------------------ */
+static u_char
+ wv_irq_to_psa(int);
+static int
+ wv_psa_to_irq(u_char);
+/* ------------------- HOST ADAPTER SUBROUTINES ------------------- */
+static inline u_short /* data */
+ hasr_read(u_long); /* Read the host interface: base address */
+static inline void
+ hacr_write(u_long, /* Write to host interface: base address */
+ u_short), /* data */
+ hacr_write_slow(u_long,
+ u_short),
+ set_chan_attn(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_hacr_reset(u_long), /* ioaddr */
+ wv_16_off(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_16_on(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_ints_off(struct net_device *),
+ wv_ints_on(struct net_device *);
+/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
+static void
+ psa_read(u_long, /* Read the Parameter Storage Area. */
+ u_short, /* hacr */
+ int, /* offset in PSA */
+ u_char *, /* buffer to fill */
+ int), /* size to read */
+ psa_write(u_long, /* Write to the PSA. */
+ u_short, /* hacr */
+ int, /* offset in PSA */
+ u_char *, /* buffer in memory */
+ int); /* length of buffer */
+static inline void
+ mmc_out(u_long, /* Write 1 byte to the Modem Manag Control. */
+ u_short,
+ u_char),
+ mmc_write(u_long, /* Write n bytes to the MMC. */
+ u_char,
+ u_char *,
+ int);
+static inline u_char /* Read 1 byte from the MMC. */
+ mmc_in(u_long,
+ u_short);
+static inline void
+ mmc_read(u_long, /* Read n bytes from the MMC. */
+ u_char,
+ u_char *,
+ int),
+ fee_wait(u_long, /* Wait for frequency EEPROM: base address */
+ int, /* base delay to wait for */
+ int); /* time to wait */
+static void
+ fee_read(u_long, /* Read the frequency EEPROM: base address */
+ u_short, /* destination offset */
+ u_short *, /* data buffer */
+ int); /* number of registers */
+/* ---------------------- I82586 SUBROUTINES ----------------------- */
+static /*inline*/ void
+ obram_read(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static inline void
+ obram_write(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static void
+ wv_ack(struct net_device *);
+static inline int
+ wv_synchronous_cmd(struct net_device *,
+ const char *),
+ wv_config_complete(struct net_device *,
+ u_long,
+ net_local *);
+static int
+ wv_complete(struct net_device *,
+ u_long,
+ net_local *);
+static inline void
+ wv_82586_reconfig(struct net_device *);
+/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
+#ifdef DEBUG_I82586_SHOW
+static void
+ wv_scb_show(unsigned short);
+#endif
+static inline void
+ wv_init_info(struct net_device *); /* display startup info */
+/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
+static en_stats *
+ wavelan_get_stats(struct net_device *); /* Give stats /proc/net/dev */
+static iw_stats *
+ wavelan_get_wireless_stats(struct net_device *);
+static void
+ wavelan_set_multicast_list(struct net_device *);
+/* ----------------------- PACKET RECEPTION ----------------------- */
+static inline void
+ wv_packet_read(struct net_device *, /* Read a packet from a frame. */
+ u_short,
+ int),
+ wv_receive(struct net_device *); /* Read all packets waiting. */
+/* --------------------- PACKET TRANSMISSION --------------------- */
+static inline int
+ wv_packet_write(struct net_device *, /* Write a packet to the Tx buffer. */
+ void *,
+ short);
+static int
+ wavelan_packet_xmit(struct sk_buff *, /* Send a packet. */
+ struct net_device *);
+/* -------------------- HARDWARE CONFIGURATION -------------------- */
+static inline int
+ wv_mmc_init(struct net_device *), /* Initialize the modem. */
+ wv_ru_start(struct net_device *), /* Start the i82586 receiver unit. */
+ wv_cu_start(struct net_device *), /* Start the i82586 command unit. */
+ wv_82586_start(struct net_device *); /* Start the i82586. */
+static void
+ wv_82586_config(struct net_device *); /* Configure the i82586. */
+static inline void
+ wv_82586_stop(struct net_device *);
+static int
+ wv_hw_reset(struct net_device *), /* Reset the WaveLAN hardware. */
+ wv_check_ioaddr(u_long, /* ioaddr */
+ u_char *); /* mac address (read) */
+/* ---------------------- INTERRUPT HANDLING ---------------------- */
+static irqreturn_t
+ wavelan_interrupt(int, /* interrupt handler */
+ void *,
+ struct pt_regs *);
+static void
+ wavelan_watchdog(struct net_device *); /* transmission watchdog */
+/* ------------------- CONFIGURATION CALLBACKS ------------------- */
+static int
+ wavelan_open(struct net_device *), /* Open the device. */
+ wavelan_close(struct net_device *), /* Close the device. */
+ wavelan_config(struct net_device *, unsigned short);/* Configure one device. */
+extern struct net_device *wavelan_probe(int unit); /* See Space.c. */
+
+/**************************** VARIABLES ****************************/
+
+/*
+ * This is the root of the linked list of WaveLAN drivers
+ * It is use to verify that we don't reuse the same base address
+ * for two different drivers and to clean up when removing the module.
+ */
+static net_local * wavelan_list = (net_local *) NULL;
+
+/*
+ * This table is used to translate the PSA value to IRQ number
+ * and vice versa.
+ */
+static u_char irqvals[] =
+{
+ 0, 0, 0, 0x01,
+ 0x02, 0x04, 0, 0x08,
+ 0, 0, 0x10, 0x20,
+ 0x40, 0, 0, 0x80,
+};
+
+/*
+ * Table of the available I/O addresses (base addresses) for WaveLAN
+ */
+static unsigned short iobase[] =
+{
+#if 0
+ /* Leave out 0x3C0 for now -- seems to clash with some video
+ * controllers.
+ * Leave out the others too -- we will always use 0x390 and leave
+ * 0x300 for the Ethernet device.
+ * Jean II: 0x3E0 is fine as well.
+ */
+ 0x300, 0x390, 0x3E0, 0x3C0
+#endif /* 0 */
+ 0x390, 0x3E0
+};
+
+#ifdef MODULE
+/* Parameters set by insmod */
+static int io[4];
+static int irq[4];
+static char *name[4];
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(name, charp, NULL, 0);
+
+MODULE_PARM_DESC(io, "WaveLAN I/O base address(es),required");
+MODULE_PARM_DESC(irq, "WaveLAN IRQ number(s)");
+MODULE_PARM_DESC(name, "WaveLAN interface neme(s)");
+#endif /* MODULE */
+
+#endif /* WAVELAN_P_H */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
new file mode 100644
index 000000000000..ec8329788e49
--- /dev/null
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -0,0 +1,4914 @@
+/*
+ * Wavelan Pcmcia driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ * Original copyright follow. See wavelan_cs.p.h for details.
+ *
+ * This code is derived from Anthony D. Joseph's code and all the changes here
+ * are also under the original copyright below.
+ *
+ * This code supports version 2.00 of WaveLAN/PCMCIA cards (2.4GHz), and
+ * can work on Linux 2.0.36 with support of David Hinds' PCMCIA Card Services
+ *
+ * Joe Finney (joe@comp.lancs.ac.uk) at Lancaster University in UK added
+ * critical code in the routine to initialize the Modem Management Controller.
+ *
+ * Thanks to Alan Cox and Bruce Janson for their advice.
+ *
+ * -- Yunzhou Li (scip4166@nus.sg)
+ *
+#ifdef WAVELAN_ROAMING
+ * Roaming support added 07/22/98 by Justin Seger (jseger@media.mit.edu)
+ * based on patch by Joe Finney from Lancaster University.
+#endif
+ *
+ * Lucent (formerly AT&T GIS, formerly NCR) WaveLAN PCMCIA card: An
+ * Ethernet-like radio transceiver controlled by an Intel 82593 coprocessor.
+ *
+ * A non-shared memory PCMCIA ethernet driver for linux
+ *
+ * ISA version modified to support PCMCIA by Anthony Joseph (adj@lcs.mit.edu)
+ *
+ *
+ * Joseph O'Sullivan & John Langford (josullvn@cs.cmu.edu & jcl@cs.cmu.edu)
+ *
+ * Apr 2 '98 made changes to bring the i82593 control/int handling in line
+ * with offical specs...
+ *
+ ****************************************************************************
+ * Copyright 1995
+ * Anthony D. Joseph
+ * Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this program
+ * for any purpose and without fee is hereby granted, provided
+ * that this copyright and permission notice appear on all copies
+ * and supporting documentation, the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * program without specific prior permission, and notice be given
+ * in supporting documentation that copying and distribution is
+ * by permission of M.I.T. M.I.T. makes no representations about
+ * the suitability of this software for any purpose. It is pro-
+ * vided "as is" without express or implied warranty.
+ ****************************************************************************
+ *
+ */
+
+/* Do *NOT* add other headers here, you are guaranteed to be wrong - Jean II */
+#include "wavelan_cs.p.h" /* Private header */
+
+/************************* MISC SUBROUTINES **************************/
+/*
+ * Subroutines which won't fit in one of the following category
+ * (wavelan modem or i82593)
+ */
+
+#ifdef STRUCT_CHECK
+/*------------------------------------------------------------------*/
+/*
+ * Sanity routine to verify the sizes of the various WaveLAN interface
+ * structures.
+ */
+static char *
+wv_structuct_check(void)
+{
+#define SC(t,s,n) if (sizeof(t) != s) return(n);
+
+ SC(psa_t, PSA_SIZE, "psa_t");
+ SC(mmw_t, MMW_SIZE, "mmw_t");
+ SC(mmr_t, MMR_SIZE, "mmr_t");
+
+#undef SC
+
+ return((char *) NULL);
+} /* wv_structuct_check */
+#endif /* STRUCT_CHECK */
+
+/******************* MODEM MANAGEMENT SUBROUTINES *******************/
+/*
+ * Useful subroutines to manage the modem of the wavelan
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read from card's Host Adaptor Status Register.
+ */
+static inline u_char
+hasr_read(u_long base)
+{
+ return(inb(HASR(base)));
+} /* hasr_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register.
+ */
+static inline void
+hacr_write(u_long base,
+ u_char hacr)
+{
+ outb(hacr, HACR(base));
+} /* hacr_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register. Include a delay for
+ * those times when it is needed.
+ */
+static inline void
+hacr_write_slow(u_long base,
+ u_char hacr)
+{
+ hacr_write(base, hacr);
+ /* delay might only be needed sometimes */
+ mdelay(1);
+} /* hacr_write_slow */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read the Parameter Storage Area from the WaveLAN card's memory
+ */
+static void
+psa_read(struct net_device * dev,
+ int o, /* offset in PSA */
+ u_char * b, /* buffer to fill */
+ int n) /* size to read */
+{
+ net_local *lp = netdev_priv(dev);
+ u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1);
+
+ while(n-- > 0)
+ {
+ *b++ = readb(ptr);
+ /* Due to a lack of address decode pins, the WaveLAN PCMCIA card
+ * only supports reading even memory addresses. That means the
+ * increment here MUST be two.
+ * Because of that, we can't use memcpy_fromio()...
+ */
+ ptr += 2;
+ }
+} /* psa_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write the Paramter Storage Area to the WaveLAN card's memory
+ */
+static void
+psa_write(struct net_device * dev,
+ int o, /* Offset in psa */
+ u_char * b, /* Buffer in memory */
+ int n) /* Length of buffer */
+{
+ net_local *lp = netdev_priv(dev);
+ u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1);
+ int count = 0;
+ kio_addr_t base = dev->base_addr;
+ /* As there seem to have no flag PSA_BUSY as in the ISA model, we are
+ * oblige to verify this address to know when the PSA is ready... */
+ volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
+ (psaoff(0, psa_comp_number) << 1);
+
+ /* Authorize writting to PSA */
+ hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN);
+
+ while(n-- > 0)
+ {
+ /* write to PSA */
+ writeb(*b++, ptr);
+ ptr += 2;
+
+ /* I don't have the spec, so I don't know what the correct
+ * sequence to write is. This hack seem to work for me... */
+ count = 0;
+ while((readb(verify) != PSA_COMP_PCMCIA_915) && (count++ < 100))
+ mdelay(1);
+ }
+
+ /* Put the host interface back in standard state */
+ hacr_write(base, HACR_DEFAULT);
+} /* psa_write */
+
+#ifdef SET_PSA_CRC
+/*------------------------------------------------------------------*/
+/*
+ * Calculate the PSA CRC
+ * Thanks to Valster, Nico <NVALSTER@wcnd.nl.lucent.com> for the code
+ * NOTE: By specifying a length including the CRC position the
+ * returned value should be zero. (i.e. a correct checksum in the PSA)
+ *
+ * The Windows drivers don't use the CRC, but the AP and the PtP tool
+ * depend on it.
+ */
+static u_short
+psa_crc(unsigned char * psa, /* The PSA */
+ int size) /* Number of short for CRC */
+{
+ int byte_cnt; /* Loop on the PSA */
+ u_short crc_bytes = 0; /* Data in the PSA */
+ int bit_cnt; /* Loop on the bits of the short */
+
+ for(byte_cnt = 0; byte_cnt < size; byte_cnt++ )
+ {
+ crc_bytes ^= psa[byte_cnt]; /* Its an xor */
+
+ for(bit_cnt = 1; bit_cnt < 9; bit_cnt++ )
+ {
+ if(crc_bytes & 0x0001)
+ crc_bytes = (crc_bytes >> 1) ^ 0xA001;
+ else
+ crc_bytes >>= 1 ;
+ }
+ }
+
+ return crc_bytes;
+} /* psa_crc */
+#endif /* SET_PSA_CRC */
+
+/*------------------------------------------------------------------*/
+/*
+ * update the checksum field in the Wavelan's PSA
+ */
+static void
+update_psa_checksum(struct net_device * dev)
+{
+#ifdef SET_PSA_CRC
+ psa_t psa;
+ u_short crc;
+
+ /* read the parameter storage area */
+ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
+
+ /* update the checksum */
+ crc = psa_crc((unsigned char *) &psa,
+ sizeof(psa) - sizeof(psa.psa_crc[0]) - sizeof(psa.psa_crc[1])
+ - sizeof(psa.psa_crc_status));
+
+ psa.psa_crc[0] = crc & 0xFF;
+ psa.psa_crc[1] = (crc & 0xFF00) >> 8;
+
+ /* Write it ! */
+ psa_write(dev, (char *)&psa.psa_crc - (char *)&psa,
+ (unsigned char *)&psa.psa_crc, 2);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk (KERN_DEBUG "%s: update_psa_checksum(): crc = 0x%02x%02x\n",
+ dev->name, psa.psa_crc[0], psa.psa_crc[1]);
+
+ /* Check again (luxury !) */
+ crc = psa_crc((unsigned char *) &psa,
+ sizeof(psa) - sizeof(psa.psa_crc_status));
+
+ if(crc != 0)
+ printk(KERN_WARNING "%s: update_psa_checksum(): CRC does not agree with PSA data (even after recalculating)\n", dev->name);
+#endif /* DEBUG_IOCTL_INFO */
+#endif /* SET_PSA_CRC */
+} /* update_psa_checksum */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write 1 byte to the MMC.
+ */
+static inline void
+mmc_out(u_long base,
+ u_short o,
+ u_char d)
+{
+ int count = 0;
+
+ /* Wait for MMC to go idle */
+ while((count++ < 100) && (inb(HASR(base)) & HASR_MMI_BUSY))
+ udelay(10);
+
+ outb((u_char)((o << 1) | MMR_MMI_WR), MMR(base));
+ outb(d, MMD(base));
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to write bytes to the Modem Management Controller.
+ * We start by the end because it is the way it should be !
+ */
+static inline void
+mmc_write(u_long base,
+ u_char o,
+ u_char * b,
+ int n)
+{
+ o += n;
+ b += n;
+
+ while(n-- > 0 )
+ mmc_out(base, --o, *(--b));
+} /* mmc_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read 1 byte from the MMC.
+ * Optimised version for 1 byte, avoid using memory...
+ */
+static inline u_char
+mmc_in(u_long base,
+ u_short o)
+{
+ int count = 0;
+
+ while((count++ < 100) && (inb(HASR(base)) & HASR_MMI_BUSY))
+ udelay(10);
+ outb(o << 1, MMR(base)); /* Set the read address */
+
+ outb(0, MMD(base)); /* Required dummy write */
+
+ while((count++ < 100) && (inb(HASR(base)) & HASR_MMI_BUSY))
+ udelay(10);
+ return (u_char) (inb(MMD(base))); /* Now do the actual read */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to read bytes from the Modem Management Controller.
+ * The implementation is complicated by a lack of address lines,
+ * which prevents decoding of the low-order bit.
+ * (code has just been moved in the above function)
+ * We start by the end because it is the way it should be !
+ */
+static inline void
+mmc_read(u_long base,
+ u_char o,
+ u_char * b,
+ int n)
+{
+ o += n;
+ b += n;
+
+ while(n-- > 0)
+ *(--b) = mmc_in(base, --o);
+} /* mmc_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the type of encryption available...
+ */
+static inline int
+mmc_encr(u_long base) /* i/o port of the card */
+{
+ int temp;
+
+ temp = mmc_in(base, mmroff(0, mmr_des_avail));
+ if((temp != MMR_DES_AVAIL_DES) && (temp != MMR_DES_AVAIL_AES))
+ return 0;
+ else
+ return temp;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wait for the frequency EEprom to complete a command...
+ * I hope this one will be optimally inlined...
+ */
+static inline void
+fee_wait(u_long base, /* i/o port of the card */
+ int delay, /* Base delay to wait for */
+ int number) /* Number of time to wait */
+{
+ int count = 0; /* Wait only a limited time */
+
+ while((count++ < number) &&
+ (mmc_in(base, mmroff(0, mmr_fee_status)) & MMR_FEE_STATUS_BUSY))
+ udelay(delay);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Read bytes from the Frequency EEprom (frequency select cards).
+ */
+static void
+fee_read(u_long base, /* i/o port of the card */
+ u_short o, /* destination offset */
+ u_short * b, /* data buffer */
+ int n) /* number of registers */
+{
+ b += n; /* Position at the end of the area */
+
+ /* Write the address */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while(n-- > 0)
+ {
+ /* Write the read command */
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_READ);
+
+ /* Wait until EEprom is ready (should be quick !) */
+ fee_wait(base, 10, 100);
+
+ /* Read the value */
+ *--b = ((mmc_in(base, mmroff(0, mmr_fee_data_h)) << 8) |
+ mmc_in(base, mmroff(0, mmr_fee_data_l)));
+ }
+}
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write bytes from the Frequency EEprom (frequency select cards).
+ * This is a bit complicated, because the frequency eeprom has to
+ * be unprotected and the write enabled.
+ * Jean II
+ */
+static void
+fee_write(u_long base, /* i/o port of the card */
+ u_short o, /* destination offset */
+ u_short * b, /* data buffer */
+ int n) /* number of registers */
+{
+ b += n; /* Position at the end of the area */
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* Ask to read the protected register */
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRREAD);
+
+ fee_wait(base, 10, 100);
+
+ /* Read the protected register */
+ printk("Protected 2 : %02X-%02X\n",
+ mmc_in(base, mmroff(0, mmr_fee_data_h)),
+ mmc_in(base, mmroff(0, mmr_fee_data_l)));
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ /* Enable protected register */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PREN);
+
+ fee_wait(base, 10, 100);
+
+ /* Unprotect area */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), o + n);
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* Or use : */
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRCLEAR);
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ fee_wait(base, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+
+ /* Write enable */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WREN);
+
+ fee_wait(base, 10, 100);
+
+ /* Write the EEprom address */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while(n-- > 0)
+ {
+ /* Write the value */
+ mmc_out(base, mmwoff(0, mmw_fee_data_h), (*--b) >> 8);
+ mmc_out(base, mmwoff(0, mmw_fee_data_l), *b & 0xFF);
+
+ /* Write the write command */
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WRITE);
+
+ /* Wavelan doc says : wait at least 10 ms for EEBUSY = 0 */
+ mdelay(10);
+ fee_wait(base, 10, 100);
+ }
+
+ /* Write disable */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_DS);
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WDS);
+
+ fee_wait(base, 10, 100);
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+ /* Reprotect EEprom */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), 0x00);
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+
+ fee_wait(base, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+}
+#endif /* WIRELESS_EXT */
+
+/******************* WaveLAN Roaming routines... ********************/
+
+#ifdef WAVELAN_ROAMING /* Conditional compile, see wavelan_cs.h */
+
+unsigned char WAVELAN_BEACON_ADDRESS[]= {0x09,0x00,0x0e,0x20,0x03,0x00};
+
+void wv_roam_init(struct net_device *dev)
+{
+ net_local *lp= netdev_priv(dev);
+
+ /* Do not remove this unless you have a good reason */
+ printk(KERN_NOTICE "%s: Warning, you have enabled roaming on"
+ " device %s !\n", dev->name, dev->name);
+ printk(KERN_NOTICE "Roaming is currently an experimental unsupported feature"
+ " of the Wavelan driver.\n");
+ printk(KERN_NOTICE "It may work, but may also make the driver behave in"
+ " erratic ways or crash.\n");
+
+ lp->wavepoint_table.head=NULL; /* Initialise WavePoint table */
+ lp->wavepoint_table.num_wavepoints=0;
+ lp->wavepoint_table.locked=0;
+ lp->curr_point=NULL; /* No default WavePoint */
+ lp->cell_search=0;
+
+ lp->cell_timer.data=(long)lp; /* Start cell expiry timer */
+ lp->cell_timer.function=wl_cell_expiry;
+ lp->cell_timer.expires=jiffies+CELL_TIMEOUT;
+ add_timer(&lp->cell_timer);
+
+ wv_nwid_filter(NWID_PROMISC,lp) ; /* Enter NWID promiscuous mode */
+ /* to build up a good WavePoint */
+ /* table... */
+ printk(KERN_DEBUG "WaveLAN: Roaming enabled on device %s\n",dev->name);
+}
+
+void wv_roam_cleanup(struct net_device *dev)
+{
+ wavepoint_history *ptr,*old_ptr;
+ net_local *lp= netdev_priv(dev);
+
+ printk(KERN_DEBUG "WaveLAN: Roaming Disabled on device %s\n",dev->name);
+
+ /* Fixme : maybe we should check that the timer exist before deleting it */
+ del_timer(&lp->cell_timer); /* Remove cell expiry timer */
+ ptr=lp->wavepoint_table.head; /* Clear device's WavePoint table */
+ while(ptr!=NULL)
+ {
+ old_ptr=ptr;
+ ptr=ptr->next;
+ wl_del_wavepoint(old_ptr,lp);
+ }
+}
+
+/* Enable/Disable NWID promiscuous mode on a given device */
+void wv_nwid_filter(unsigned char mode, net_local *lp)
+{
+ mm_t m;
+ unsigned long flags;
+
+#ifdef WAVELAN_ROAMING_DEBUG
+ printk(KERN_DEBUG "WaveLAN: NWID promisc %s, device %s\n",(mode==NWID_PROMISC) ? "on" : "off", lp->dev->name);
+#endif
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ m.w.mmw_loopt_sel = (mode==NWID_PROMISC) ? MMW_LOOPT_SEL_DIS_NWID : 0x00;
+ mmc_write(lp->dev->base_addr, (char *)&m.w.mmw_loopt_sel - (char *)&m, (unsigned char *)&m.w.mmw_loopt_sel, 1);
+
+ if(mode==NWID_PROMISC)
+ lp->cell_search=1;
+ else
+ lp->cell_search=0;
+
+ /* ReEnable interrupts & restore flags */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+}
+
+/* Find a record in the WavePoint table matching a given NWID */
+wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp)
+{
+ wavepoint_history *ptr=lp->wavepoint_table.head;
+
+ while(ptr!=NULL){
+ if(ptr->nwid==nwid)
+ return ptr;
+ ptr=ptr->next;
+ }
+ return NULL;
+}
+
+/* Create a new wavepoint table entry */
+wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local* lp)
+{
+ wavepoint_history *new_wavepoint;
+
+#ifdef WAVELAN_ROAMING_DEBUG
+ printk(KERN_DEBUG "WaveLAN: New Wavepoint, NWID:%.4X\n",nwid);
+#endif
+
+ if(lp->wavepoint_table.num_wavepoints==MAX_WAVEPOINTS)
+ return NULL;
+
+ new_wavepoint=(wavepoint_history *) kmalloc(sizeof(wavepoint_history),GFP_ATOMIC);
+ if(new_wavepoint==NULL)
+ return NULL;
+
+ new_wavepoint->nwid=nwid; /* New WavePoints NWID */
+ new_wavepoint->average_fast=0; /* Running Averages..*/
+ new_wavepoint->average_slow=0;
+ new_wavepoint->qualptr=0; /* Start of ringbuffer */
+ new_wavepoint->last_seq=seq-1; /* Last sequence no.seen */
+ memset(new_wavepoint->sigqual,0,WAVEPOINT_HISTORY);/* Empty ringbuffer */
+
+ new_wavepoint->next=lp->wavepoint_table.head;/* Add to wavepoint table */
+ new_wavepoint->prev=NULL;
+
+ if(lp->wavepoint_table.head!=NULL)
+ lp->wavepoint_table.head->prev=new_wavepoint;
+
+ lp->wavepoint_table.head=new_wavepoint;
+
+ lp->wavepoint_table.num_wavepoints++; /* no. of visible wavepoints */
+
+ return new_wavepoint;
+}
+
+/* Remove a wavepoint entry from WavePoint table */
+void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp)
+{
+ if(wavepoint==NULL)
+ return;
+
+ if(lp->curr_point==wavepoint)
+ lp->curr_point=NULL;
+
+ if(wavepoint->prev!=NULL)
+ wavepoint->prev->next=wavepoint->next;
+
+ if(wavepoint->next!=NULL)
+ wavepoint->next->prev=wavepoint->prev;
+
+ if(lp->wavepoint_table.head==wavepoint)
+ lp->wavepoint_table.head=wavepoint->next;
+
+ lp->wavepoint_table.num_wavepoints--;
+ kfree(wavepoint);
+}
+
+/* Timer callback function - checks WavePoint table for stale entries */
+void wl_cell_expiry(unsigned long data)
+{
+ net_local *lp=(net_local *)data;
+ wavepoint_history *wavepoint=lp->wavepoint_table.head,*old_point;
+
+#if WAVELAN_ROAMING_DEBUG > 1
+ printk(KERN_DEBUG "WaveLAN: Wavepoint timeout, dev %s\n",lp->dev->name);
+#endif
+
+ if(lp->wavepoint_table.locked)
+ {
+#if WAVELAN_ROAMING_DEBUG > 1
+ printk(KERN_DEBUG "WaveLAN: Wavepoint table locked...\n");
+#endif
+
+ lp->cell_timer.expires=jiffies+1; /* If table in use, come back later */
+ add_timer(&lp->cell_timer);
+ return;
+ }
+
+ while(wavepoint!=NULL)
+ {
+ if(time_after(jiffies, wavepoint->last_seen + CELL_TIMEOUT))
+ {
+#ifdef WAVELAN_ROAMING_DEBUG
+ printk(KERN_DEBUG "WaveLAN: Bye bye %.4X\n",wavepoint->nwid);
+#endif
+
+ old_point=wavepoint;
+ wavepoint=wavepoint->next;
+ wl_del_wavepoint(old_point,lp);
+ }
+ else
+ wavepoint=wavepoint->next;
+ }
+ lp->cell_timer.expires=jiffies+CELL_TIMEOUT;
+ add_timer(&lp->cell_timer);
+}
+
+/* Update SNR history of a wavepoint */
+void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq)
+{
+ int i=0,num_missed=0,ptr=0;
+ int average_fast=0,average_slow=0;
+
+ num_missed=(seq-wavepoint->last_seq)%WAVEPOINT_HISTORY;/* Have we missed
+ any beacons? */
+ if(num_missed)
+ for(i=0;i<num_missed;i++)
+ {
+ wavepoint->sigqual[wavepoint->qualptr++]=0; /* If so, enter them as 0's */
+ wavepoint->qualptr %=WAVEPOINT_HISTORY; /* in the ringbuffer. */
+ }
+ wavepoint->last_seen=jiffies; /* Add beacon to history */
+ wavepoint->last_seq=seq;
+ wavepoint->sigqual[wavepoint->qualptr++]=sigqual;
+ wavepoint->qualptr %=WAVEPOINT_HISTORY;
+ ptr=(wavepoint->qualptr-WAVEPOINT_FAST_HISTORY+WAVEPOINT_HISTORY)%WAVEPOINT_HISTORY;
+
+ for(i=0;i<WAVEPOINT_FAST_HISTORY;i++) /* Update running averages */
+ {
+ average_fast+=wavepoint->sigqual[ptr++];
+ ptr %=WAVEPOINT_HISTORY;
+ }
+
+ average_slow=average_fast;
+ for(i=WAVEPOINT_FAST_HISTORY;i<WAVEPOINT_HISTORY;i++)
+ {
+ average_slow+=wavepoint->sigqual[ptr++];
+ ptr %=WAVEPOINT_HISTORY;
+ }
+
+ wavepoint->average_fast=average_fast/WAVEPOINT_FAST_HISTORY;
+ wavepoint->average_slow=average_slow/WAVEPOINT_HISTORY;
+}
+
+/* Perform a handover to a new WavePoint */
+void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
+{
+ kio_addr_t base = lp->dev->base_addr;
+ mm_t m;
+ unsigned long flags;
+
+ if(wavepoint==lp->curr_point) /* Sanity check... */
+ {
+ wv_nwid_filter(!NWID_PROMISC,lp);
+ return;
+ }
+
+#ifdef WAVELAN_ROAMING_DEBUG
+ printk(KERN_DEBUG "WaveLAN: Doing handover to %.4X, dev %s\n",wavepoint->nwid,lp->dev->name);
+#endif
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ m.w.mmw_netw_id_l = wavepoint->nwid & 0xFF;
+ m.w.mmw_netw_id_h = (wavepoint->nwid & 0xFF00) >> 8;
+
+ mmc_write(base, (char *)&m.w.mmw_netw_id_l - (char *)&m, (unsigned char *)&m.w.mmw_netw_id_l, 2);
+
+ /* ReEnable interrupts & restore flags */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ wv_nwid_filter(!NWID_PROMISC,lp);
+ lp->curr_point=wavepoint;
+}
+
+/* Called when a WavePoint beacon is received */
+static inline void wl_roam_gather(struct net_device * dev,
+ u_char * hdr, /* Beacon header */
+ u_char * stats) /* SNR, Signal quality
+ of packet */
+{
+ wavepoint_beacon *beacon= (wavepoint_beacon *)hdr; /* Rcvd. Beacon */
+ unsigned short nwid=ntohs(beacon->nwid);
+ unsigned short sigqual=stats[2] & MMR_SGNL_QUAL; /* SNR of beacon */
+ wavepoint_history *wavepoint=NULL; /* WavePoint table entry */
+ net_local *lp = netdev_priv(dev); /* Device info */
+
+#ifdef I_NEED_THIS_FEATURE
+ /* Some people don't need this, some other may need it */
+ nwid=nwid^ntohs(beacon->domain_id);
+#endif
+
+#if WAVELAN_ROAMING_DEBUG > 1
+ printk(KERN_DEBUG "WaveLAN: beacon, dev %s:\n",dev->name);
+ printk(KERN_DEBUG "Domain: %.4X NWID: %.4X SigQual=%d\n",ntohs(beacon->domain_id),nwid,sigqual);
+#endif
+
+ lp->wavepoint_table.locked=1; /* <Mutex> */
+
+ wavepoint=wl_roam_check(nwid,lp); /* Find WavePoint table entry */
+ if(wavepoint==NULL) /* If no entry, Create a new one... */
+ {
+ wavepoint=wl_new_wavepoint(nwid,beacon->seq,lp);
+ if(wavepoint==NULL)
+ goto out;
+ }
+ if(lp->curr_point==NULL) /* If this is the only WavePoint, */
+ wv_roam_handover(wavepoint, lp); /* Jump on it! */
+
+ wl_update_history(wavepoint, sigqual, beacon->seq); /* Update SNR history
+ stats. */
+
+ if(lp->curr_point->average_slow < SEARCH_THRESH_LOW) /* If our current */
+ if(!lp->cell_search) /* WavePoint is getting faint, */
+ wv_nwid_filter(NWID_PROMISC,lp); /* start looking for a new one */
+
+ if(wavepoint->average_slow >
+ lp->curr_point->average_slow + WAVELAN_ROAMING_DELTA)
+ wv_roam_handover(wavepoint, lp); /* Handover to a better WavePoint */
+
+ if(lp->curr_point->average_slow > SEARCH_THRESH_HIGH) /* If our SNR is */
+ if(lp->cell_search) /* getting better, drop out of cell search mode */
+ wv_nwid_filter(!NWID_PROMISC,lp);
+
+out:
+ lp->wavepoint_table.locked=0; /* </MUTEX> :-) */
+}
+
+/* Test this MAC frame a WavePoint beacon */
+static inline int WAVELAN_BEACON(unsigned char *data)
+{
+ wavepoint_beacon *beacon= (wavepoint_beacon *)data;
+ static wavepoint_beacon beacon_template={0xaa,0xaa,0x03,0x08,0x00,0x0e,0x20,0x03,0x00};
+
+ if(memcmp(beacon,&beacon_template,9)==0)
+ return 1;
+ else
+ return 0;
+}
+#endif /* WAVELAN_ROAMING */
+
+/************************ I82593 SUBROUTINES *************************/
+/*
+ * Useful subroutines to manage the Ethernet controller
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to synchronously send a command to the i82593 chip.
+ * Should be called with interrupts disabled.
+ * (called by wv_packet_write(), wv_ru_stop(), wv_ru_start(),
+ * wv_82593_config() & wv_diag())
+ */
+static int
+wv_82593_cmd(struct net_device * dev,
+ char * str,
+ int cmd,
+ int result)
+{
+ kio_addr_t base = dev->base_addr;
+ int status;
+ int wait_completed;
+ long spin;
+
+ /* Spin until the chip finishes executing its current command (if any) */
+ spin = 1000;
+ do
+ {
+ /* Time calibration of the loop */
+ udelay(10);
+
+ /* Read the interrupt register */
+ outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
+ status = inb(LCSR(base));
+ }
+ while(((status & SR3_EXEC_STATE_MASK) != SR3_EXEC_IDLE) && (spin-- > 0));
+
+ /* If the interrupt hasn't be posted */
+ if(spin <= 0)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "wv_82593_cmd: %s timeout (previous command), status 0x%02x\n",
+ str, status);
+#endif
+ return(FALSE);
+ }
+
+ /* Issue the command to the controller */
+ outb(cmd, LCCR(base));
+
+ /* If we don't have to check the result of the command
+ * Note : this mean that the irq handler will deal with that */
+ if(result == SR0_NO_RESULT)
+ return(TRUE);
+
+ /* We are waiting for command completion */
+ wait_completed = TRUE;
+
+ /* Busy wait while the LAN controller executes the command. */
+ spin = 1000;
+ do
+ {
+ /* Time calibration of the loop */
+ udelay(10);
+
+ /* Read the interrupt register */
+ outb(CR0_STATUS_0 | OP0_NOP, LCCR(base));
+ status = inb(LCSR(base));
+
+ /* Check if there was an interrupt posted */
+ if((status & SR0_INTERRUPT))
+ {
+ /* Acknowledge the interrupt */
+ outb(CR0_INT_ACK | OP0_NOP, LCCR(base));
+
+ /* Check if interrupt is a command completion */
+ if(((status & SR0_BOTH_RX_TX) != SR0_BOTH_RX_TX) &&
+ ((status & SR0_BOTH_RX_TX) != 0x0) &&
+ !(status & SR0_RECEPTION))
+ {
+ /* Signal command completion */
+ wait_completed = FALSE;
+ }
+ else
+ {
+ /* Note : Rx interrupts will be handled later, because we can
+ * handle multiple Rx packets at once */
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_INFO "wv_82593_cmd: not our interrupt\n");
+#endif
+ }
+ }
+ }
+ while(wait_completed && (spin-- > 0));
+
+ /* If the interrupt hasn't be posted */
+ if(wait_completed)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "wv_82593_cmd: %s timeout, status 0x%02x\n",
+ str, status);
+#endif
+ return(FALSE);
+ }
+
+ /* Check the return code returned by the card (see above) against
+ * the expected return code provided by the caller */
+ if((status & SR0_EVENT_MASK) != result)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "wv_82593_cmd: %s failed, status = 0x%x\n",
+ str, status);
+#endif
+ return(FALSE);
+ }
+
+ return(TRUE);
+} /* wv_82593_cmd */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a 593 op-code number 7, and obtains the diagnose
+ * status for the WaveLAN.
+ */
+static inline int
+wv_diag(struct net_device * dev)
+{
+ int ret = FALSE;
+
+ if(wv_82593_cmd(dev, "wv_diag(): diagnose",
+ OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED))
+ ret = TRUE;
+
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "wavelan_cs: i82593 Self Test failed!\n");
+#endif
+ return(ret);
+} /* wv_diag */
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to read len bytes from the i82593's ring buffer, starting at
+ * chip address addr. The results read from the chip are stored in buf.
+ * The return value is the address to use for next the call.
+ */
+static int
+read_ringbuf(struct net_device * dev,
+ int addr,
+ char * buf,
+ int len)
+{
+ kio_addr_t base = dev->base_addr;
+ int ring_ptr = addr;
+ int chunk_len;
+ char * buf_ptr = buf;
+
+ /* Get all the buffer */
+ while(len > 0)
+ {
+ /* Position the Program I/O Register at the ring buffer pointer */
+ outb(ring_ptr & 0xff, PIORL(base));
+ outb(((ring_ptr >> 8) & PIORH_MASK), PIORH(base));
+
+ /* First, determine how much we can read without wrapping around the
+ ring buffer */
+ if((addr + len) < (RX_BASE + RX_SIZE))
+ chunk_len = len;
+ else
+ chunk_len = RX_BASE + RX_SIZE - addr;
+ insb(PIOP(base), buf_ptr, chunk_len);
+ buf_ptr += chunk_len;
+ len -= chunk_len;
+ ring_ptr = (ring_ptr - RX_BASE + chunk_len) % RX_SIZE + RX_BASE;
+ }
+ return(ring_ptr);
+} /* read_ringbuf */
+
+/*------------------------------------------------------------------*/
+/*
+ * Reconfigure the i82593, or at least ask for it...
+ * Because wv_82593_config use the transmission buffer, we must do it
+ * when we are sure that there is no transmission, so we do it now
+ * or in wavelan_packet_xmit() (I can't find any better place,
+ * wavelan_interrupt is not an option...), so you may experience
+ * some delay sometime...
+ */
+static inline void
+wv_82593_reconfig(struct net_device * dev)
+{
+ net_local * lp = netdev_priv(dev);
+ dev_link_t * link = lp->link;
+ unsigned long flags;
+
+ /* Arm the flag, will be cleard in wv_82593_config() */
+ lp->reconfig_82593 = TRUE;
+
+ /* Check if we can do it now ! */
+ if((link->open) && (netif_running(dev)) && !(netif_queue_stopped(dev)))
+ {
+ spin_lock_irqsave(&lp->spinlock, flags); /* Disable interrupts */
+ wv_82593_config(dev);
+ spin_unlock_irqrestore(&lp->spinlock, flags); /* Re-enable interrupts */
+ }
+ else
+ {
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG
+ "%s: wv_82593_reconfig(): delayed (state = %lX, link = %d)\n",
+ dev->name, dev->state, link->open);
+#endif
+ }
+}
+
+/********************* DEBUG & INFO SUBROUTINES *********************/
+/*
+ * This routines are used in the code to show debug informations.
+ * Most of the time, it dump the content of hardware structures...
+ */
+
+#ifdef DEBUG_PSA_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted contents of the Parameter Storage Area.
+ */
+static void
+wv_psa_show(psa_t * p)
+{
+ printk(KERN_DEBUG "##### wavelan psa contents: #####\n");
+ printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
+ p->psa_io_base_addr_1,
+ p->psa_io_base_addr_2,
+ p->psa_io_base_addr_3,
+ p->psa_io_base_addr_4);
+ printk(KERN_DEBUG "psa_rem_boot_addr_1: 0x%02X %02X %02X\n",
+ p->psa_rem_boot_addr_1,
+ p->psa_rem_boot_addr_2,
+ p->psa_rem_boot_addr_3);
+ printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
+ printk("psa_int_req_no: %d\n", p->psa_int_req_no);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_unused0[0],
+ p->psa_unused0[1],
+ p->psa_unused0[2],
+ p->psa_unused0[3],
+ p->psa_unused0[4],
+ p->psa_unused0[5],
+ p->psa_unused0[6]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_univ_mac_addr[0],
+ p->psa_univ_mac_addr[1],
+ p->psa_univ_mac_addr[2],
+ p->psa_univ_mac_addr[3],
+ p->psa_univ_mac_addr[4],
+ p->psa_univ_mac_addr[5]);
+ printk(KERN_DEBUG "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_local_mac_addr[0],
+ p->psa_local_mac_addr[1],
+ p->psa_local_mac_addr[2],
+ p->psa_local_mac_addr[3],
+ p->psa_local_mac_addr[4],
+ p->psa_local_mac_addr[5]);
+ printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel);
+ printk("psa_comp_number: %d, ", p->psa_comp_number);
+ printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set);
+ printk(KERN_DEBUG "psa_feature_select/decay_prm: 0x%02x, ",
+ p->psa_feature_select);
+ printk("psa_subband/decay_update_prm: %d\n", p->psa_subband);
+ printk(KERN_DEBUG "psa_quality_thr: 0x%02x, ", p->psa_quality_thr);
+ printk("psa_mod_delay: 0x%02x\n", p->psa_mod_delay);
+ printk(KERN_DEBUG "psa_nwid: 0x%02x%02x, ", p->psa_nwid[0], p->psa_nwid[1]);
+ printk("psa_nwid_select: %d\n", p->psa_nwid_select);
+ printk(KERN_DEBUG "psa_encryption_select: %d, ", p->psa_encryption_select);
+ printk("psa_encryption_key[]: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_encryption_key[0],
+ p->psa_encryption_key[1],
+ p->psa_encryption_key[2],
+ p->psa_encryption_key[3],
+ p->psa_encryption_key[4],
+ p->psa_encryption_key[5],
+ p->psa_encryption_key[6],
+ p->psa_encryption_key[7]);
+ printk(KERN_DEBUG "psa_databus_width: %d\n", p->psa_databus_width);
+ printk(KERN_DEBUG "psa_call_code/auto_squelch: 0x%02x, ",
+ p->psa_call_code[0]);
+ printk("psa_call_code[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_call_code[0],
+ p->psa_call_code[1],
+ p->psa_call_code[2],
+ p->psa_call_code[3],
+ p->psa_call_code[4],
+ p->psa_call_code[5],
+ p->psa_call_code[6],
+ p->psa_call_code[7]);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+ p->psa_reserved[0],
+ p->psa_reserved[1],
+ p->psa_reserved[2],
+ p->psa_reserved[3]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
+ printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+ printk("psa_crc_status: 0x%02x\n", p->psa_crc_status);
+} /* wv_psa_show */
+#endif /* DEBUG_PSA_SHOW */
+
+#ifdef DEBUG_MMC_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the Modem Management Controller.
+ * This function need to be completed...
+ */
+static void
+wv_mmc_show(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local * lp = netdev_priv(dev);
+ mmr_t m;
+
+ /* Basic check */
+ if(hasr_read(base) & HASR_NO_CLK)
+ {
+ printk(KERN_WARNING "%s: wv_mmc_show: modem not connected\n",
+ dev->name);
+ return;
+ }
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Read the mmc */
+ mmc_out(base, mmwoff(0, mmw_freeze), 1);
+ mmc_read(base, 0, (u_char *)&m, sizeof(m));
+ mmc_out(base, mmwoff(0, mmw_freeze), 0);
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+ /* Don't forget to update statistics */
+ lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+#endif /* WIRELESS_EXT */
+
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ printk(KERN_DEBUG "##### wavelan modem status registers: #####\n");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused0[0],
+ m.mmr_unused0[1],
+ m.mmr_unused0[2],
+ m.mmr_unused0[3],
+ m.mmr_unused0[4],
+ m.mmr_unused0[5],
+ m.mmr_unused0[6],
+ m.mmr_unused0[7]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "Encryption algorythm: %02X - Status: %02X\n",
+ m.mmr_des_avail, m.mmr_des_status);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused1[]: %02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused1[0],
+ m.mmr_unused1[1],
+ m.mmr_unused1[2],
+ m.mmr_unused1[3],
+ m.mmr_unused1[4]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "dce_status: 0x%x [%s%s%s%s]\n",
+ m.mmr_dce_status,
+ (m.mmr_dce_status & MMR_DCE_STATUS_RX_BUSY) ? "energy detected,":"",
+ (m.mmr_dce_status & MMR_DCE_STATUS_LOOPT_IND) ?
+ "loop test indicated," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_TX_BUSY) ? "transmitter on," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_JBR_EXPIRED) ?
+ "jabber timer expired," : "");
+ printk(KERN_DEBUG "Dsp ID: %02X\n",
+ m.mmr_dsp_id);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused2[]: %02X:%02X\n",
+ m.mmr_unused2[0],
+ m.mmr_unused2[1]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "# correct_nwid: %d, # wrong_nwid: %d\n",
+ (m.mmr_correct_nwid_h << 8) | m.mmr_correct_nwid_l,
+ (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l);
+ printk(KERN_DEBUG "thr_pre_set: 0x%x [current signal %s]\n",
+ m.mmr_thr_pre_set & MMR_THR_PRE_SET,
+ (m.mmr_thr_pre_set & MMR_THR_PRE_SET_CUR) ? "above" : "below");
+ printk(KERN_DEBUG "signal_lvl: %d [%s], ",
+ m.mmr_signal_lvl & MMR_SIGNAL_LVL,
+ (m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) ? "new msg" : "no new msg");
+ printk("silence_lvl: %d [%s], ", m.mmr_silence_lvl & MMR_SILENCE_LVL,
+ (m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) ? "update done" : "no new update");
+ printk("sgnl_qual: 0x%x [%s]\n", m.mmr_sgnl_qual & MMR_SGNL_QUAL,
+ (m.mmr_sgnl_qual & MMR_SGNL_QUAL_ANT) ? "Antenna 1" : "Antenna 0");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "netw_id_l: %x\n", m.mmr_netw_id_l);
+#endif /* DEBUG_SHOW_UNUSED */
+} /* wv_mmc_show */
+#endif /* DEBUG_MMC_SHOW */
+
+#ifdef DEBUG_I82593_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the i82593's receive unit.
+ */
+static void
+wv_ru_show(struct net_device * dev)
+{
+ net_local *lp = netdev_priv(dev);
+
+ printk(KERN_DEBUG "##### wavelan i82593 receiver status: #####\n");
+ printk(KERN_DEBUG "ru: rfp %d stop %d", lp->rfp, lp->stop);
+ /*
+ * Not implemented yet...
+ */
+ printk("\n");
+} /* wv_ru_show */
+#endif /* DEBUG_I82593_SHOW */
+
+#ifdef DEBUG_DEVICE_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver.
+ */
+static void
+wv_dev_show(struct net_device * dev)
+{
+ printk(KERN_DEBUG "dev:");
+ printk(" state=%lX,", dev->state);
+ printk(" trans_start=%ld,", dev->trans_start);
+ printk(" flags=0x%x,", dev->flags);
+ printk("\n");
+} /* wv_dev_show */
+
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver's
+ * private information.
+ */
+static void
+wv_local_show(struct net_device * dev)
+{
+ net_local *lp = netdev_priv(dev);
+
+ printk(KERN_DEBUG "local:");
+ /*
+ * Not implemented yet...
+ */
+ printk("\n");
+} /* wv_local_show */
+#endif /* DEBUG_DEVICE_SHOW */
+
+#if defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO)
+/*------------------------------------------------------------------*/
+/*
+ * Dump packet header (and content if necessary) on the screen
+ */
+static inline void
+wv_packet_info(u_char * p, /* Packet to dump */
+ int length, /* Length of the packet */
+ char * msg1, /* Name of the device */
+ char * msg2) /* Name of the function */
+{
+ int i;
+ int maxi;
+
+ printk(KERN_DEBUG "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n",
+ msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length);
+ printk(KERN_DEBUG "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n",
+ msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]);
+
+#ifdef DEBUG_PACKET_DUMP
+
+ printk(KERN_DEBUG "data=\"");
+
+ if((maxi = length) > DEBUG_PACKET_DUMP)
+ maxi = DEBUG_PACKET_DUMP;
+ for(i = 14; i < maxi; i++)
+ if(p[i] >= ' ' && p[i] <= '~')
+ printk(" %c", p[i]);
+ else
+ printk("%02X", p[i]);
+ if(maxi < length)
+ printk("..");
+ printk("\"\n");
+ printk(KERN_DEBUG "\n");
+#endif /* DEBUG_PACKET_DUMP */
+}
+#endif /* defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO) */
+
+/*------------------------------------------------------------------*/
+/*
+ * This is the information which is displayed by the driver at startup
+ * There is a lot of flag to configure it at your will...
+ */
+static inline void
+wv_init_info(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ psa_t psa;
+ int i;
+
+ /* Read the parameter storage area */
+ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
+
+#ifdef DEBUG_PSA_SHOW
+ wv_psa_show(&psa);
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82593_SHOW
+ wv_ru_show(dev);
+#endif
+
+#ifdef DEBUG_BASIC_SHOW
+ /* Now, let's go for the basic stuff */
+ printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, hw_addr",
+ dev->name, base, dev->irq);
+ for(i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]);
+
+ /* Print current network id */
+ if(psa.psa_nwid_select)
+ printk(", nwid 0x%02X-%02X", psa.psa_nwid[0], psa.psa_nwid[1]);
+ else
+ printk(", nwid off");
+
+ /* If 2.00 card */
+ if(!(mmc_in(base, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ unsigned short freq;
+
+ /* Ask the EEprom to read the frequency from the first area */
+ fee_read(base, 0x00 /* 1st area - frequency... */,
+ &freq, 1);
+
+ /* Print frequency */
+ printk(", 2.00, %ld", (freq >> 6) + 2400L);
+
+ /* Hack !!! */
+ if(freq & 0x20)
+ printk(".5");
+ }
+ else
+ {
+ printk(", PCMCIA, ");
+ switch (psa.psa_subband)
+ {
+ case PSA_SUBBAND_915:
+ printk("915");
+ break;
+ case PSA_SUBBAND_2425:
+ printk("2425");
+ break;
+ case PSA_SUBBAND_2460:
+ printk("2460");
+ break;
+ case PSA_SUBBAND_2484:
+ printk("2484");
+ break;
+ case PSA_SUBBAND_2430_5:
+ printk("2430.5");
+ break;
+ default:
+ printk("unknown");
+ }
+ }
+
+ printk(" MHz\n");
+#endif /* DEBUG_BASIC_SHOW */
+
+#ifdef DEBUG_VERSION_SHOW
+ /* Print version information */
+ printk(KERN_NOTICE "%s", version);
+#endif
+} /* wv_init_info */
+
+/********************* IOCTL, STATS & RECONFIG *********************/
+/*
+ * We found here routines that are called by Linux on differents
+ * occasions after the configuration and not for transmitting data
+ * These may be called when the user use ifconfig, /proc/net/dev
+ * or wireless extensions
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the current ethernet statistics. This may be called with the
+ * card open or closed.
+ * Used when the user read /proc/net/dev
+ */
+static en_stats *
+wavelan_get_stats(struct net_device * dev)
+{
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
+#endif
+
+ return(&((net_local *)netdev_priv(dev))->stats);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+
+static void
+wavelan_set_multicast_list(struct net_device * dev)
+{
+ net_local * lp = netdev_priv(dev);
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n", dev->name);
+#endif
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
+ dev->name, dev->flags, dev->mc_count);
+#endif
+
+ if(dev->flags & IFF_PROMISC)
+ {
+ /*
+ * Enable promiscuous mode: receive all packets.
+ */
+ if(!lp->promiscuous)
+ {
+ lp->promiscuous = 1;
+ lp->allmulticast = 0;
+ lp->mc_count = 0;
+
+ wv_82593_reconfig(dev);
+
+ /* Tell the kernel that we are doing a really bad job... */
+ dev->flags |= IFF_PROMISC;
+ }
+ }
+ else
+ /* If all multicast addresses
+ * or too much multicast addresses for the hardware filter */
+ if((dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > I82593_MAX_MULTICAST_ADDRESSES))
+ {
+ /*
+ * Disable promiscuous mode, but active the all multicast mode
+ */
+ if(!lp->allmulticast)
+ {
+ lp->promiscuous = 0;
+ lp->allmulticast = 1;
+ lp->mc_count = 0;
+
+ wv_82593_reconfig(dev);
+
+ /* Tell the kernel that we are doing a really bad job... */
+ dev->flags |= IFF_ALLMULTI;
+ }
+ }
+ else
+ /* If there is some multicast addresses to send */
+ if(dev->mc_list != (struct dev_mc_list *) NULL)
+ {
+ /*
+ * Disable promiscuous mode, but receive all packets
+ * in multicast list
+ */
+#ifdef MULTICAST_AVOID
+ if(lp->promiscuous || lp->allmulticast ||
+ (dev->mc_count != lp->mc_count))
+#endif
+ {
+ lp->promiscuous = 0;
+ lp->allmulticast = 0;
+ lp->mc_count = dev->mc_count;
+
+ wv_82593_reconfig(dev);
+ }
+ }
+ else
+ {
+ /*
+ * Switch to normal mode: disable promiscuous mode and
+ * clear the multicast list.
+ */
+ if(lp->promiscuous || lp->mc_count == 0)
+ {
+ lp->promiscuous = 0;
+ lp->allmulticast = 0;
+ lp->mc_count = 0;
+
+ wv_82593_reconfig(dev);
+ }
+ }
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_set_multicast_list()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This function doesn't exist...
+ * (Note : it was a nice way to test the reconfigure stuff...)
+ */
+#ifdef SET_MAC_ADDRESS
+static int
+wavelan_set_mac_address(struct net_device * dev,
+ void * addr)
+{
+ struct sockaddr * mac = addr;
+
+ /* Copy the address */
+ memcpy(dev->dev_addr, mac->sa_data, WAVELAN_ADDR_SIZE);
+
+ /* Reconfig the beast */
+ wv_82593_reconfig(dev);
+
+ return 0;
+}
+#endif /* SET_MAC_ADDRESS */
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Frequency setting (for hardware able of it)
+ * It's a bit complicated and you don't really want to look into it...
+ */
+static inline int
+wv_set_frequency(u_long base, /* i/o port of the card */
+ iw_freq * frequency)
+{
+ const int BAND_NUM = 10; /* Number of bands */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz */
+#ifdef DEBUG_IOCTL_INFO
+ int i;
+#endif
+
+ /* Setting by frequency */
+ /* Theoritically, you may set any frequency between
+ * the two limits with a 0.5 MHz precision. In practice,
+ * I don't want you to have trouble with local
+ * regulations... */
+ if((frequency->e == 1) &&
+ (frequency->m >= (int) 2.412e8) && (frequency->m <= (int) 2.487e8))
+ {
+ freq = ((frequency->m / 10000) - 24000L) / 5;
+ }
+
+ /* Setting by channel (same as wfreqsel) */
+ /* Warning : each channel is 22MHz wide, so some of the channels
+ * will interfere... */
+ if((frequency->e == 0) &&
+ (frequency->m >= 0) && (frequency->m < BAND_NUM))
+ {
+ /* Get frequency offset. */
+ freq = channel_bands[frequency->m] >> 1;
+ }
+
+ /* Verify if the frequency is allowed */
+ if(freq != 0L)
+ {
+ u_short table[10]; /* Authorized frequency table */
+
+ /* Read the frequency table */
+ fee_read(base, 0x71 /* frequency table */,
+ table, 10);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "Frequency table :");
+ for(i = 0; i < 10; i++)
+ {
+ printk(" %04X",
+ table[i]);
+ }
+ printk("\n");
+#endif
+
+ /* Look in the table if the frequency is allowed */
+ if(!(table[9 - ((freq - 24) / 16)] &
+ (1 << ((freq - 24) % 16))))
+ return -EINVAL; /* not allowed */
+ }
+ else
+ return -EINVAL;
+
+ /* If we get a usable frequency */
+ if(freq != 0L)
+ {
+ unsigned short area[16];
+ unsigned short dac[2];
+ unsigned short area_verify[16];
+ unsigned short dac_verify[2];
+ /* Corresponding gain (in the power adjust value table)
+ * see AT&T Wavelan Data Manual, REF 407-024689/E, page 3-8
+ * & WCIN062D.DOC, page 6.2.9 */
+ unsigned short power_limit[] = { 40, 80, 120, 160, 0 };
+ int power_band = 0; /* Selected band */
+ unsigned short power_adjust; /* Correct value */
+
+ /* Search for the gain */
+ power_band = 0;
+ while((freq > power_limit[power_band]) &&
+ (power_limit[++power_band] != 0))
+ ;
+
+ /* Read the first area */
+ fee_read(base, 0x00,
+ area, 16);
+
+ /* Read the DAC */
+ fee_read(base, 0x60,
+ dac, 2);
+
+ /* Read the new power adjust value */
+ fee_read(base, 0x6B - (power_band >> 1),
+ &power_adjust, 1);
+ if(power_band & 0x1)
+ power_adjust >>= 8;
+ else
+ power_adjust &= 0xFF;
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "Wavelan EEprom Area 1 :");
+ for(i = 0; i < 16; i++)
+ {
+ printk(" %04X",
+ area[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "Wavelan EEprom DAC : %04X %04X\n",
+ dac[0], dac[1]);
+#endif
+
+ /* Frequency offset (for info only...) */
+ area[0] = ((freq << 5) & 0xFFE0) | (area[0] & 0x1F);
+
+ /* Receiver Principle main divider coefficient */
+ area[3] = (freq >> 1) + 2400L - 352L;
+ area[2] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Transmitter Main divider coefficient */
+ area[13] = (freq >> 1) + 2400L;
+ area[12] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Others part of the area are flags, bit streams or unused... */
+
+ /* Set the value in the DAC */
+ dac[1] = ((power_adjust >> 1) & 0x7F) | (dac[1] & 0xFF80);
+ dac[0] = ((power_adjust & 0x1) << 4) | (dac[0] & 0xFFEF);
+
+ /* Write the first area */
+ fee_write(base, 0x00,
+ area, 16);
+
+ /* Write the DAC */
+ fee_write(base, 0x60,
+ dac, 2);
+
+ /* We now should verify here that the EEprom writting was ok */
+
+ /* ReRead the first area */
+ fee_read(base, 0x00,
+ area_verify, 16);
+
+ /* ReRead the DAC */
+ fee_read(base, 0x60,
+ dac_verify, 2);
+
+ /* Compare */
+ if(memcmp(area, area_verify, 16 * 2) ||
+ memcmp(dac, dac_verify, 2 * 2))
+ {
+#ifdef DEBUG_IOCTL_ERROR
+ printk(KERN_INFO "Wavelan: wv_set_frequency : unable to write new frequency to EEprom (?)\n");
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ /* We must download the frequency parameters to the
+ * synthetisers (from the EEprom - area 1)
+ * Note : as the EEprom is auto decremented, we set the end
+ * if the area... */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), 0x0F);
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait until the download is finished */
+ fee_wait(base, 100, 100);
+
+ /* We must now download the power adjust value (gain) to
+ * the synthetisers (from the EEprom - area 7 - DAC) */
+ mmc_out(base, mmwoff(0, mmw_fee_addr), 0x61);
+ mmc_out(base, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait until the download is finished */
+ fee_wait(base, 100, 100);
+
+#ifdef DEBUG_IOCTL_INFO
+ /* Verification of what we have done... */
+
+ printk(KERN_DEBUG "Wavelan EEprom Area 1 :");
+ for(i = 0; i < 16; i++)
+ {
+ printk(" %04X",
+ area_verify[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "Wavelan EEprom DAC : %04X %04X\n",
+ dac_verify[0], dac_verify[1]);
+#endif
+
+ return 0;
+ }
+ else
+ return -EINVAL; /* Bah, never get there... */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Give the list of available frequencies
+ */
+static inline int
+wv_frequency_list(u_long base, /* i/o port of the card */
+ iw_freq * list, /* List of frequency to fill */
+ int max) /* Maximum number of frequencies */
+{
+ u_short table[10]; /* Authorized frequency table */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz + 12 MHz */
+ int i; /* index in the table */
+ const int BAND_NUM = 10; /* Number of bands */
+ int c = 0; /* Channel number */
+
+ /* Read the frequency table */
+ fee_read(base, 0x71 /* frequency table */,
+ table, 10);
+
+ /* Look all frequencies */
+ i = 0;
+ for(freq = 0; freq < 150; freq++)
+ /* Look in the table if the frequency is allowed */
+ if(table[9 - (freq / 16)] & (1 << (freq % 16)))
+ {
+ /* Compute approximate channel number */
+ while((((channel_bands[c] >> 1) - 24) < freq) &&
+ (c < BAND_NUM))
+ c++;
+ list[i].i = c; /* Set the list index */
+
+ /* put in the list */
+ list[i].m = (((freq + 24) * 5) + 24000L) * 10000;
+ list[i++].e = 1;
+
+ /* Check number */
+ if(i >= max)
+ return(i);
+ }
+
+ return(i);
+}
+
+#ifdef IW_WIRELESS_SPY
+/*------------------------------------------------------------------*/
+/*
+ * Gather wireless spy statistics : for each packet, compare the source
+ * address with out list, and if match, get the stats...
+ * Sorry, but this function really need wireless extensions...
+ */
+static inline void
+wl_spy_gather(struct net_device * dev,
+ u_char * mac, /* MAC address */
+ u_char * stats) /* Statistics to gather */
+{
+ struct iw_quality wstats;
+
+ wstats.qual = stats[2] & MMR_SGNL_QUAL;
+ wstats.level = stats[0] & MMR_SIGNAL_LVL;
+ wstats.noise = stats[1] & MMR_SILENCE_LVL;
+ wstats.updated = 0x7;
+
+ /* Update spy records */
+ wireless_spy_update(dev, mac, &wstats);
+}
+#endif /* IW_WIRELESS_SPY */
+
+#ifdef HISTOGRAM
+/*------------------------------------------------------------------*/
+/*
+ * This function calculate an histogram on the signal level.
+ * As the noise is quite constant, it's like doing it on the SNR.
+ * We have defined a set of interval (lp->his_range), and each time
+ * the level goes in that interval, we increment the count (lp->his_sum).
+ * With this histogram you may detect if one wavelan is really weak,
+ * or you may also calculate the mean and standard deviation of the level...
+ */
+static inline void
+wl_his_gather(struct net_device * dev,
+ u_char * stats) /* Statistics to gather */
+{
+ net_local * lp = netdev_priv(dev);
+ u_char level = stats[0] & MMR_SIGNAL_LVL;
+ int i;
+
+ /* Find the correct interval */
+ i = 0;
+ while((i < (lp->his_number - 1)) && (level >= lp->his_range[i++]))
+ ;
+
+ /* Increment interval counter */
+ (lp->his_sum[i])++;
+}
+#endif /* HISTOGRAM */
+
+static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, "wavelan_cs", sizeof(info->driver)-1);
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = wl_get_drvinfo
+};
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get protocol name
+ */
+static int wavelan_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ strcpy(wrqu->name, "WaveLAN");
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set NWID
+ */
+static int wavelan_set_nwid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ mm_t m;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Set NWID in WaveLAN. */
+ if (!wrqu->nwid.disabled) {
+ /* Set NWID in psa */
+ psa.psa_nwid[0] = (wrqu->nwid.value & 0xFF00) >> 8;
+ psa.psa_nwid[1] = wrqu->nwid.value & 0xFF;
+ psa.psa_nwid_select = 0x01;
+ psa_write(dev,
+ (char *) psa.psa_nwid - (char *) &psa,
+ (unsigned char *) psa.psa_nwid, 3);
+
+ /* Set NWID in mmc. */
+ m.w.mmw_netw_id_l = psa.psa_nwid[1];
+ m.w.mmw_netw_id_h = psa.psa_nwid[0];
+ mmc_write(base,
+ (char *) &m.w.mmw_netw_id_l -
+ (char *) &m,
+ (unsigned char *) &m.w.mmw_netw_id_l, 2);
+ mmc_out(base, mmwoff(0, mmw_loopt_sel), 0x00);
+ } else {
+ /* Disable NWID in the psa. */
+ psa.psa_nwid_select = 0x00;
+ psa_write(dev,
+ (char *) &psa.psa_nwid_select -
+ (char *) &psa,
+ (unsigned char *) &psa.psa_nwid_select,
+ 1);
+
+ /* Disable NWID in the mmc (no filtering). */
+ mmc_out(base, mmwoff(0, mmw_loopt_sel),
+ MMW_LOOPT_SEL_DIS_NWID);
+ }
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev);
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get NWID
+ */
+static int wavelan_get_nwid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Read the NWID. */
+ psa_read(dev,
+ (char *) psa.psa_nwid - (char *) &psa,
+ (unsigned char *) psa.psa_nwid, 3);
+ wrqu->nwid.value = (psa.psa_nwid[0] << 8) + psa.psa_nwid[1];
+ wrqu->nwid.disabled = !(psa.psa_nwid_select);
+ wrqu->nwid.fixed = 1; /* Superfluous */
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set frequency
+ */
+static int wavelan_set_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ int ret;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
+ if (!(mmc_in(base, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ ret = wv_set_frequency(base, &(wrqu->freq));
+ else
+ ret = -EOPNOTSUPP;
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get frequency
+ */
+static int wavelan_get_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable).
+ * Does it work for everybody, especially old cards? */
+ if (!(mmc_in(base, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
+ unsigned short freq;
+
+ /* Ask the EEPROM to read the frequency from the first area. */
+ fee_read(base, 0x00, &freq, 1);
+ wrqu->freq.m = ((freq >> 5) * 5 + 24000L) * 10000;
+ wrqu->freq.e = 1;
+ } else {
+ psa_read(dev,
+ (char *) &psa.psa_subband - (char *) &psa,
+ (unsigned char *) &psa.psa_subband, 1);
+
+ if (psa.psa_subband <= 4) {
+ wrqu->freq.m = fixed_bands[psa.psa_subband];
+ wrqu->freq.e = (psa.psa_subband != 0);
+ } else
+ ret = -EOPNOTSUPP;
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set level threshold
+ */
+static int wavelan_set_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Set the level threshold. */
+ /* We should complain loudly if wrqu->sens.fixed = 0, because we
+ * can't set auto mode... */
+ psa.psa_thr_pre_set = wrqu->sens.value & 0x3F;
+ psa_write(dev,
+ (char *) &psa.psa_thr_pre_set - (char *) &psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev);
+ mmc_out(base, mmwoff(0, mmw_thr_pre_set),
+ psa.psa_thr_pre_set);
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get level threshold
+ */
+static int wavelan_get_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Read the level threshold. */
+ psa_read(dev,
+ (char *) &psa.psa_thr_pre_set - (char *) &psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ wrqu->sens.value = psa.psa_thr_pre_set & 0x3F;
+ wrqu->sens.fixed = 1;
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set encryption key
+ */
+static int wavelan_set_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ psa_t psa;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Check if capable of encryption */
+ if (!mmc_encr(base)) {
+ ret = -EOPNOTSUPP;
+ }
+
+ /* Check the size of the key */
+ if((wrqu->encoding.length != 8) && (wrqu->encoding.length != 0)) {
+ ret = -EINVAL;
+ }
+
+ if(!ret) {
+ /* Basic checking... */
+ if (wrqu->encoding.length == 8) {
+ /* Copy the key in the driver */
+ memcpy(psa.psa_encryption_key, extra,
+ wrqu->encoding.length);
+ psa.psa_encryption_select = 1;
+
+ psa_write(dev,
+ (char *) &psa.psa_encryption_select -
+ (char *) &psa,
+ (unsigned char *) &psa.
+ psa_encryption_select, 8 + 1);
+
+ mmc_out(base, mmwoff(0, mmw_encr_enable),
+ MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE);
+ mmc_write(base, mmwoff(0, mmw_encr_key),
+ (unsigned char *) &psa.
+ psa_encryption_key, 8);
+ }
+
+ /* disable encryption */
+ if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
+ psa.psa_encryption_select = 0;
+ psa_write(dev,
+ (char *) &psa.psa_encryption_select -
+ (char *) &psa,
+ (unsigned char *) &psa.
+ psa_encryption_select, 1);
+
+ mmc_out(base, mmwoff(0, mmw_encr_enable), 0);
+ }
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev);
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get encryption key
+ */
+static int wavelan_get_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Check if encryption is available */
+ if (!mmc_encr(base)) {
+ ret = -EOPNOTSUPP;
+ } else {
+ /* Read the encryption key */
+ psa_read(dev,
+ (char *) &psa.psa_encryption_select -
+ (char *) &psa,
+ (unsigned char *) &psa.
+ psa_encryption_select, 1 + 8);
+
+ /* encryption is enabled ? */
+ if (psa.psa_encryption_select)
+ wrqu->encoding.flags = IW_ENCODE_ENABLED;
+ else
+ wrqu->encoding.flags = IW_ENCODE_DISABLED;
+ wrqu->encoding.flags |= mmc_encr(base);
+
+ /* Copy the key to the user buffer */
+ wrqu->encoding.length = 8;
+ memcpy(extra, psa.psa_encryption_key, wrqu->encoding.length);
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+#ifdef WAVELAN_ROAMING_EXT
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set ESSID (domain)
+ */
+static int wavelan_set_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Check if disable */
+ if(wrqu->data.flags == 0)
+ lp->filter_domains = 0;
+ else {
+ char essid[IW_ESSID_MAX_SIZE + 1];
+ char * endp;
+
+ /* Terminate the string */
+ memcpy(essid, extra, wrqu->data.length);
+ essid[IW_ESSID_MAX_SIZE] = '\0';
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "SetEssid : ``%s''\n", essid);
+#endif /* DEBUG_IOCTL_INFO */
+
+ /* Convert to a number (note : Wavelan specific) */
+ lp->domain_id = simple_strtoul(essid, &endp, 16);
+ /* Has it worked ? */
+ if(endp > essid)
+ lp->filter_domains = 1;
+ else {
+ lp->filter_domains = 0;
+ ret = -EINVAL;
+ }
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get ESSID (domain)
+ */
+static int wavelan_get_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+
+ /* Is the domain ID active ? */
+ wrqu->data.flags = lp->filter_domains;
+
+ /* Copy Domain ID into a string (Wavelan specific) */
+ /* Sound crazy, be we can't have a snprintf in the kernel !!! */
+ sprintf(extra, "%lX", lp->domain_id);
+ extra[IW_ESSID_MAX_SIZE] = '\0';
+
+ /* Set the length */
+ wrqu->data.length = strlen(extra) + 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set AP address
+ */
+static int wavelan_set_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "Set AP to : %02X:%02X:%02X:%02X:%02X:%02X\n",
+ wrqu->ap_addr.sa_data[0],
+ wrqu->ap_addr.sa_data[1],
+ wrqu->ap_addr.sa_data[2],
+ wrqu->ap_addr.sa_data[3],
+ wrqu->ap_addr.sa_data[4],
+ wrqu->ap_addr.sa_data[5]);
+#endif /* DEBUG_IOCTL_INFO */
+
+ return -EOPNOTSUPP;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get AP address
+ */
+static int wavelan_get_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ /* Should get the real McCoy instead of own Ethernet address */
+ memcpy(wrqu->ap_addr.sa_data, dev->dev_addr, WAVELAN_ADDR_SIZE);
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+
+ return -EOPNOTSUPP;
+}
+#endif /* WAVELAN_ROAMING_EXT */
+
+#ifdef WAVELAN_ROAMING
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : set mode
+ */
+static int wavelan_set_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ int ret = 0;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Check mode */
+ switch(wrqu->mode) {
+ case IW_MODE_ADHOC:
+ if(do_roaming) {
+ wv_roam_cleanup(dev);
+ do_roaming = 0;
+ }
+ break;
+ case IW_MODE_INFRA:
+ if(!do_roaming) {
+ wv_roam_init(dev);
+ do_roaming = 1;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get mode
+ */
+static int wavelan_get_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ if(do_roaming)
+ wrqu->mode = IW_MODE_INFRA;
+ else
+ wrqu->mode = IW_MODE_ADHOC;
+
+ return 0;
+}
+#endif /* WAVELAN_ROAMING */
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Handler : get range info
+ */
+static int wavelan_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ struct iw_range *range = (struct iw_range *) extra;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Set the length (very important for backward compatibility) */
+ wrqu->data.length = sizeof(struct iw_range);
+
+ /* Set all the info we don't care or don't know about to zero */
+ memset(range, 0, sizeof(struct iw_range));
+
+ /* Set the Wireless Extension versions */
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 9;
+
+ /* Set information in the range struct. */
+ range->throughput = 1.4 * 1000 * 1000; /* don't argue on this ! */
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0xFFFF;
+
+ range->sensitivity = 0x3F;
+ range->max_qual.qual = MMR_SGNL_QUAL;
+ range->max_qual.level = MMR_SIGNAL_LVL;
+ range->max_qual.noise = MMR_SILENCE_LVL;
+ range->avg_qual.qual = MMR_SGNL_QUAL; /* Always max */
+ /* Need to get better values for those two */
+ range->avg_qual.level = 30;
+ range->avg_qual.noise = 8;
+
+ range->num_bitrates = 1;
+ range->bitrate[0] = 2000000; /* 2 Mb/s */
+
+ /* Event capability (kernel + driver) */
+ range->event_capa[0] = (IW_EVENT_CAPA_MASK(0x8B02) |
+ IW_EVENT_CAPA_MASK(0x8B04) |
+ IW_EVENT_CAPA_MASK(0x8B06));
+ range->event_capa[1] = IW_EVENT_CAPA_K_1;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
+ if (!(mmc_in(base, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
+ range->num_channels = 10;
+ range->num_frequency = wv_frequency_list(base, range->freq,
+ IW_MAX_FREQUENCIES);
+ } else
+ range->num_channels = range->num_frequency = 0;
+
+ /* Encryption supported ? */
+ if (mmc_encr(base)) {
+ range->encoding_size[0] = 8; /* DES = 64 bits key */
+ range->num_encoding_sizes = 1;
+ range->max_encoding_tokens = 1; /* Only one key possible */
+ } else {
+ range->num_encoding_sizes = 0;
+ range->max_encoding_tokens = 0;
+ }
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : set quality threshold
+ */
+static int wavelan_set_qthr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ unsigned long flags;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ psa.psa_quality_thr = *(extra) & 0x0F;
+ psa_write(dev,
+ (char *) &psa.psa_quality_thr - (char *) &psa,
+ (unsigned char *) &psa.psa_quality_thr, 1);
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev);
+ mmc_out(base, mmwoff(0, mmw_quality_thr),
+ psa.psa_quality_thr);
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : get quality threshold
+ */
+static int wavelan_get_qthr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+ psa_t psa;
+ unsigned long flags;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ psa_read(dev,
+ (char *) &psa.psa_quality_thr - (char *) &psa,
+ (unsigned char *) &psa.psa_quality_thr, 1);
+ *(extra) = psa.psa_quality_thr & 0x0F;
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return 0;
+}
+
+#ifdef WAVELAN_ROAMING
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : set roaming
+ */
+static int wavelan_set_roam(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Disable interrupts and save flags. */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Note : should check if user == root */
+ if(do_roaming && (*extra)==0)
+ wv_roam_cleanup(dev);
+ else if(do_roaming==0 && (*extra)!=0)
+ wv_roam_init(dev);
+
+ do_roaming = (*extra);
+
+ /* Enable interrupts and restore flags. */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : get quality threshold
+ */
+static int wavelan_get_roam(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ *(extra) = do_roaming;
+
+ return 0;
+}
+#endif /* WAVELAN_ROAMING */
+
+#ifdef HISTOGRAM
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : set histogram
+ */
+static int wavelan_set_histo(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+
+ /* Check the number of intervals. */
+ if (wrqu->data.length > 16) {
+ return(-E2BIG);
+ }
+
+ /* Disable histo while we copy the addresses.
+ * As we don't disable interrupts, we need to do this */
+ lp->his_number = 0;
+
+ /* Are there ranges to copy? */
+ if (wrqu->data.length > 0) {
+ /* Copy interval ranges to the driver */
+ memcpy(lp->his_range, extra, wrqu->data.length);
+
+ {
+ int i;
+ printk(KERN_DEBUG "Histo :");
+ for(i = 0; i < wrqu->data.length; i++)
+ printk(" %d", lp->his_range[i]);
+ printk("\n");
+ }
+
+ /* Reset result structure. */
+ memset(lp->his_sum, 0x00, sizeof(long) * 16);
+ }
+
+ /* Now we can set the number of ranges */
+ lp->his_number = wrqu->data.length;
+
+ return(0);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wireless Private Handler : get histogram
+ */
+static int wavelan_get_histo(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ net_local *lp = netdev_priv(dev);
+
+ /* Set the number of intervals. */
+ wrqu->data.length = lp->his_number;
+
+ /* Give back the distribution statistics */
+ if(lp->his_number > 0)
+ memcpy(extra, lp->his_sum, sizeof(long) * lp->his_number);
+
+ return(0);
+}
+#endif /* HISTOGRAM */
+
+/*------------------------------------------------------------------*/
+/*
+ * Structures to export the Wireless Handlers
+ */
+
+static const struct iw_priv_args wavelan_private_args[] = {
+/*{ cmd, set_args, get_args, name } */
+ { SIOCSIPQTHR, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setqualthr" },
+ { SIOCGIPQTHR, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getqualthr" },
+ { SIOCSIPROAM, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setroam" },
+ { SIOCGIPROAM, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getroam" },
+ { SIOCSIPHISTO, IW_PRIV_TYPE_BYTE | 16, 0, "sethisto" },
+ { SIOCGIPHISTO, 0, IW_PRIV_TYPE_INT | 16, "gethisto" },
+};
+
+static const iw_handler wavelan_handler[] =
+{
+ NULL, /* SIOCSIWNAME */
+ wavelan_get_name, /* SIOCGIWNAME */
+ wavelan_set_nwid, /* SIOCSIWNWID */
+ wavelan_get_nwid, /* SIOCGIWNWID */
+ wavelan_set_freq, /* SIOCSIWFREQ */
+ wavelan_get_freq, /* SIOCGIWFREQ */
+#ifdef WAVELAN_ROAMING
+ wavelan_set_mode, /* SIOCSIWMODE */
+ wavelan_get_mode, /* SIOCGIWMODE */
+#else /* WAVELAN_ROAMING */
+ NULL, /* SIOCSIWMODE */
+ NULL, /* SIOCGIWMODE */
+#endif /* WAVELAN_ROAMING */
+ wavelan_set_sens, /* SIOCSIWSENS */
+ wavelan_get_sens, /* SIOCGIWSENS */
+ NULL, /* SIOCSIWRANGE */
+ wavelan_get_range, /* SIOCGIWRANGE */
+ NULL, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ iw_handler_set_spy, /* SIOCSIWSPY */
+ iw_handler_get_spy, /* SIOCGIWSPY */
+ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
+ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
+#ifdef WAVELAN_ROAMING_EXT
+ wavelan_set_wap, /* SIOCSIWAP */
+ wavelan_get_wap, /* SIOCGIWAP */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCGIWAPLIST */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ wavelan_set_essid, /* SIOCSIWESSID */
+ wavelan_get_essid, /* SIOCGIWESSID */
+#else /* WAVELAN_ROAMING_EXT */
+ NULL, /* SIOCSIWAP */
+ NULL, /* SIOCGIWAP */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCGIWAPLIST */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCSIWESSID */
+ NULL, /* SIOCGIWESSID */
+#endif /* WAVELAN_ROAMING_EXT */
+ NULL, /* SIOCSIWNICKN */
+ NULL, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ NULL, /* SIOCSIWRATE */
+ NULL, /* SIOCGIWRATE */
+ NULL, /* SIOCSIWRTS */
+ NULL, /* SIOCGIWRTS */
+ NULL, /* SIOCSIWFRAG */
+ NULL, /* SIOCGIWFRAG */
+ NULL, /* SIOCSIWTXPOW */
+ NULL, /* SIOCGIWTXPOW */
+ NULL, /* SIOCSIWRETRY */
+ NULL, /* SIOCGIWRETRY */
+ wavelan_set_encode, /* SIOCSIWENCODE */
+ wavelan_get_encode, /* SIOCGIWENCODE */
+};
+
+static const iw_handler wavelan_private_handler[] =
+{
+ wavelan_set_qthr, /* SIOCIWFIRSTPRIV */
+ wavelan_get_qthr, /* SIOCIWFIRSTPRIV + 1 */
+#ifdef WAVELAN_ROAMING
+ wavelan_set_roam, /* SIOCIWFIRSTPRIV + 2 */
+ wavelan_get_roam, /* SIOCIWFIRSTPRIV + 3 */
+#else /* WAVELAN_ROAMING */
+ NULL, /* SIOCIWFIRSTPRIV + 2 */
+ NULL, /* SIOCIWFIRSTPRIV + 3 */
+#endif /* WAVELAN_ROAMING */
+#ifdef HISTOGRAM
+ wavelan_set_histo, /* SIOCIWFIRSTPRIV + 4 */
+ wavelan_get_histo, /* SIOCIWFIRSTPRIV + 5 */
+#endif /* HISTOGRAM */
+};
+
+static const struct iw_handler_def wavelan_handler_def =
+{
+ .num_standard = sizeof(wavelan_handler)/sizeof(iw_handler),
+ .num_private = sizeof(wavelan_private_handler)/sizeof(iw_handler),
+ .num_private_args = sizeof(wavelan_private_args)/sizeof(struct iw_priv_args),
+ .standard = wavelan_handler,
+ .private = wavelan_private_handler,
+ .private_args = wavelan_private_args,
+ .get_wireless_stats = wavelan_get_wireless_stats,
+};
+
+/*------------------------------------------------------------------*/
+/*
+ * Get wireless statistics
+ * Called by /proc/net/wireless...
+ */
+static iw_stats *
+wavelan_get_wireless_stats(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local * lp = netdev_priv(dev);
+ mmr_t m;
+ iw_stats * wstats;
+ unsigned long flags;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_get_wireless_stats()\n", dev->name);
+#endif
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ wstats = &lp->wstats;
+
+ /* Get data from the mmc */
+ mmc_out(base, mmwoff(0, mmw_freeze), 1);
+
+ mmc_read(base, mmroff(0, mmr_dce_status), &m.mmr_dce_status, 1);
+ mmc_read(base, mmroff(0, mmr_wrong_nwid_l), &m.mmr_wrong_nwid_l, 2);
+ mmc_read(base, mmroff(0, mmr_thr_pre_set), &m.mmr_thr_pre_set, 4);
+
+ mmc_out(base, mmwoff(0, mmw_freeze), 0);
+
+ /* Copy data to wireless stuff */
+ wstats->status = m.mmr_dce_status & MMR_DCE_STATUS;
+ wstats->qual.qual = m.mmr_sgnl_qual & MMR_SGNL_QUAL;
+ wstats->qual.level = m.mmr_signal_lvl & MMR_SIGNAL_LVL;
+ wstats->qual.noise = m.mmr_silence_lvl & MMR_SILENCE_LVL;
+ wstats->qual.updated = (((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 7) |
+ ((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 6) |
+ ((m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) >> 5));
+ wstats->discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+ wstats->discard.code = 0L;
+ wstats->discard.misc = 0L;
+
+ /* ReEnable interrupts & restore flags */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_get_wireless_stats()\n", dev->name);
+#endif
+ return &lp->wstats;
+}
+#endif /* WIRELESS_EXT */
+
+/************************* PACKET RECEPTION *************************/
+/*
+ * This part deal with receiving the packets.
+ * The interrupt handler get an interrupt when a packet has been
+ * successfully received and called this part...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Calculate the starting address of the frame pointed to by the receive
+ * frame pointer and verify that the frame seem correct
+ * (called by wv_packet_rcv())
+ */
+static inline int
+wv_start_of_frame(struct net_device * dev,
+ int rfp, /* end of frame */
+ int wrap) /* start of buffer */
+{
+ kio_addr_t base = dev->base_addr;
+ int rp;
+ int len;
+
+ rp = (rfp - 5 + RX_SIZE) % RX_SIZE;
+ outb(rp & 0xff, PIORL(base));
+ outb(((rp >> 8) & PIORH_MASK), PIORH(base));
+ len = inb(PIOP(base));
+ len |= inb(PIOP(base)) << 8;
+
+ /* Sanity checks on size */
+ /* Frame too big */
+ if(len > MAXDATAZ + 100)
+ {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_start_of_frame: Received frame too large, rfp %d len 0x%x\n",
+ dev->name, rfp, len);
+#endif
+ return(-1);
+ }
+
+ /* Frame too short */
+ if(len < 7)
+ {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_start_of_frame: Received null frame, rfp %d len 0x%x\n",
+ dev->name, rfp, len);
+#endif
+ return(-1);
+ }
+
+ /* Wrap around buffer */
+ if(len > ((wrap - (rfp - len) + RX_SIZE) % RX_SIZE)) /* magic formula ! */
+ {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_start_of_frame: wrap around buffer, wrap %d rfp %d len 0x%x\n",
+ dev->name, wrap, rfp, len);
+#endif
+ return(-1);
+ }
+
+ return((rp - len + RX_SIZE) % RX_SIZE);
+} /* wv_start_of_frame */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does the actual copy of data (including the ethernet
+ * header structure) from the WaveLAN card to an sk_buff chain that
+ * will be passed up to the network interface layer. NOTE: We
+ * currently don't handle trailer protocols (neither does the rest of
+ * the network interface), so if that is needed, it will (at least in
+ * part) be added here. The contents of the receive ring buffer are
+ * copied to a message chain that is then passed to the kernel.
+ *
+ * Note: if any errors occur, the packet is "dropped on the floor"
+ * (called by wv_packet_rcv())
+ */
+static inline void
+wv_packet_read(struct net_device * dev,
+ int fd_p,
+ int sksize)
+{
+ net_local * lp = netdev_priv(dev);
+ struct sk_buff * skb;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_read(0x%X, %d)\n",
+ dev->name, fd_p, sksize);
+#endif
+
+ /* Allocate some buffer for the new packet */
+ if((skb = dev_alloc_skb(sksize+2)) == (struct sk_buff *) NULL)
+ {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC)\n",
+ dev->name, sksize);
+#endif
+ lp->stats.rx_dropped++;
+ /*
+ * Not only do we want to return here, but we also need to drop the
+ * packet on the floor to clear the interrupt.
+ */
+ return;
+ }
+
+ skb->dev = dev;
+
+ skb_reserve(skb, 2);
+ fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize);
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef DEBUG_RX_INFO
+ wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read");
+#endif /* DEBUG_RX_INFO */
+
+ /* Statistics gathering & stuff associated.
+ * It seem a bit messy with all the define, but it's really simple... */
+ if(
+#ifdef IW_WIRELESS_SPY
+ (lp->spy_data.spy_number > 0) ||
+#endif /* IW_WIRELESS_SPY */
+#ifdef HISTOGRAM
+ (lp->his_number > 0) ||
+#endif /* HISTOGRAM */
+#ifdef WAVELAN_ROAMING
+ (do_roaming) ||
+#endif /* WAVELAN_ROAMING */
+ 0)
+ {
+ u_char stats[3]; /* Signal level, Noise level, Signal quality */
+
+ /* read signal level, silence level and signal quality bytes */
+ fd_p = read_ringbuf(dev, (fd_p + 4) % RX_SIZE + RX_BASE,
+ stats, 3);
+#ifdef DEBUG_RX_INFO
+ printk(KERN_DEBUG "%s: wv_packet_read(): Signal level %d/63, Silence level %d/63, signal quality %d/16\n",
+ dev->name, stats[0] & 0x3F, stats[1] & 0x3F, stats[2] & 0x0F);
+#endif
+
+#ifdef WAVELAN_ROAMING
+ if(do_roaming)
+ if(WAVELAN_BEACON(skb->data))
+ wl_roam_gather(dev, skb->data, stats);
+#endif /* WAVELAN_ROAMING */
+
+#ifdef WIRELESS_SPY
+ wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats);
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ wl_his_gather(dev, stats);
+#endif /* HISTOGRAM */
+ }
+
+ /*
+ * Hand the packet to the Network Module
+ */
+ netif_rx(skb);
+
+ /* Keep stats up to date */
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += sksize;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
+#endif
+ return;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called by the interrupt handler to initiate a
+ * packet transfer from the card to the network interface layer above
+ * this driver. This routine checks if a buffer has been successfully
+ * received by the WaveLAN card. If so, the routine wv_packet_read is
+ * called to do the actual transfer of the card's data including the
+ * ethernet header into a packet consisting of an sk_buff chain.
+ * (called by wavelan_interrupt())
+ * Note : the spinlock is already grabbed for us and irq are disabled.
+ */
+static inline void
+wv_packet_rcv(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local * lp = netdev_priv(dev);
+ int newrfp;
+ int rp;
+ int len;
+ int f_start;
+ int status;
+ int i593_rfp;
+ int stat_ptr;
+ u_char c[4];
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_rcv()\n", dev->name);
+#endif
+
+ /* Get the new receive frame pointer from the i82593 chip */
+ outb(CR0_STATUS_2 | OP0_NOP, LCCR(base));
+ i593_rfp = inb(LCSR(base));
+ i593_rfp |= inb(LCSR(base)) << 8;
+ i593_rfp %= RX_SIZE;
+
+ /* Get the new receive frame pointer from the WaveLAN card.
+ * It is 3 bytes more than the increment of the i82593 receive
+ * frame pointer, for each packet. This is because it includes the
+ * 3 roaming bytes added by the mmc.
+ */
+ newrfp = inb(RPLL(base));
+ newrfp |= inb(RPLH(base)) << 8;
+ newrfp %= RX_SIZE;
+
+#ifdef DEBUG_RX_INFO
+ printk(KERN_DEBUG "%s: wv_packet_rcv(): i593_rfp %d stop %d newrfp %d lp->rfp %d\n",
+ dev->name, i593_rfp, lp->stop, newrfp, lp->rfp);
+#endif
+
+#ifdef DEBUG_RX_ERROR
+ /* If no new frame pointer... */
+ if(lp->overrunning || newrfp == lp->rfp)
+ printk(KERN_INFO "%s: wv_packet_rcv(): no new frame: i593_rfp %d stop %d newrfp %d lp->rfp %d\n",
+ dev->name, i593_rfp, lp->stop, newrfp, lp->rfp);
+#endif
+
+ /* Read all frames (packets) received */
+ while(newrfp != lp->rfp)
+ {
+ /* A frame is composed of the packet, followed by a status word,
+ * the length of the frame (word) and the mmc info (SNR & qual).
+ * It's because the length is at the end that we can only scan
+ * frames backward. */
+
+ /* Find the first frame by skipping backwards over the frames */
+ rp = newrfp; /* End of last frame */
+ while(((f_start = wv_start_of_frame(dev, rp, newrfp)) != lp->rfp) &&
+ (f_start != -1))
+ rp = f_start;
+
+ /* If we had a problem */
+ if(f_start == -1)
+ {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "wavelan_cs: cannot find start of frame ");
+ printk(" i593_rfp %d stop %d newrfp %d lp->rfp %d\n",
+ i593_rfp, lp->stop, newrfp, lp->rfp);
+#endif
+ lp->rfp = rp; /* Get to the last usable frame */
+ continue;
+ }
+
+ /* f_start point to the beggining of the first frame received
+ * and rp to the beggining of the next one */
+
+ /* Read status & length of the frame */
+ stat_ptr = (rp - 7 + RX_SIZE) % RX_SIZE;
+ stat_ptr = read_ringbuf(dev, stat_ptr, c, 4);
+ status = c[0] | (c[1] << 8);
+ len = c[2] | (c[3] << 8);
+
+ /* Check status */
+ if((status & RX_RCV_OK) != RX_RCV_OK)
+ {
+ lp->stats.rx_errors++;
+ if(status & RX_NO_SFD)
+ lp->stats.rx_frame_errors++;
+ if(status & RX_CRC_ERR)
+ lp->stats.rx_crc_errors++;
+ if(status & RX_OVRRUN)
+ lp->stats.rx_over_errors++;
+
+#ifdef DEBUG_RX_FAIL
+ printk(KERN_DEBUG "%s: wv_packet_rcv(): packet not received ok, status = 0x%x\n",
+ dev->name, status);
+#endif
+ }
+ else
+ /* Read the packet and transmit to Linux */
+ wv_packet_read(dev, f_start, len - 2);
+
+ /* One frame has been processed, skip it */
+ lp->rfp = rp;
+ }
+
+ /*
+ * Update the frame stop register, but set it to less than
+ * the full 8K to allow space for 3 bytes of signal strength
+ * per packet.
+ */
+ lp->stop = (i593_rfp + RX_SIZE - ((RX_SIZE / 64) * 3)) % RX_SIZE;
+ outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, LCCR(base));
+ outb(CR1_STOP_REG_UPDATE | (lp->stop >> RX_SIZE_SHIFT), LCCR(base));
+ outb(OP1_SWIT_TO_PORT_0, LCCR(base));
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_rcv()\n", dev->name);
+#endif
+}
+
+/*********************** PACKET TRANSMISSION ***********************/
+/*
+ * This part deal with sending packet through the wavelan
+ * We copy the packet to the send buffer and then issue the send
+ * command to the i82593. The result of this operation will be
+ * checked in wavelan_interrupt()
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine fills in the appropriate registers and memory
+ * locations on the WaveLAN card and starts the card off on
+ * the transmit.
+ * (called in wavelan_packet_xmit())
+ */
+static inline void
+wv_packet_write(struct net_device * dev,
+ void * buf,
+ short length)
+{
+ net_local * lp = netdev_priv(dev);
+ kio_addr_t base = dev->base_addr;
+ unsigned long flags;
+ int clen = length;
+ register u_short xmtdata_base = TX_BASE;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_write(%d)\n", dev->name, length);
+#endif
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Write the length of data buffer followed by the buffer */
+ outb(xmtdata_base & 0xff, PIORL(base));
+ outb(((xmtdata_base >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
+ outb(clen & 0xff, PIOP(base)); /* lsb */
+ outb(clen >> 8, PIOP(base)); /* msb */
+
+ /* Send the data */
+ outsb(PIOP(base), buf, clen);
+
+ /* Indicate end of transmit chain */
+ outb(OP0_NOP, PIOP(base));
+ /* josullvn@cs.cmu.edu: need to send a second NOP for alignment... */
+ outb(OP0_NOP, PIOP(base));
+
+ /* Reset the transmit DMA pointer */
+ hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
+ hacr_write(base, HACR_DEFAULT);
+ /* Send the transmit command */
+ wv_82593_cmd(dev, "wv_packet_write(): transmit",
+ OP0_TRANSMIT, SR0_NO_RESULT);
+
+ /* Make sure the watchdog will keep quiet for a while */
+ dev->trans_start = jiffies;
+
+ /* Keep stats up to date */
+ lp->stats.tx_bytes += length;
+
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_TX_INFO
+ wv_packet_info((u_char *) buf, length, dev->name, "wv_packet_write");
+#endif /* DEBUG_TX_INFO */
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_write()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called when we want to send a packet (NET3 callback)
+ * In this routine, we check if the harware is ready to accept
+ * the packet. We also prevent reentrance. Then, we call the function
+ * to send the packet...
+ */
+static int
+wavelan_packet_xmit(struct sk_buff * skb,
+ struct net_device * dev)
+{
+ net_local * lp = netdev_priv(dev);
+ unsigned long flags;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_packet_xmit(0x%X)\n", dev->name,
+ (unsigned) skb);
+#endif
+
+ /*
+ * Block a timer-based transmit from overlapping a previous transmit.
+ * In other words, prevent reentering this routine.
+ */
+ netif_stop_queue(dev);
+
+ /* If somebody has asked to reconfigure the controller,
+ * we can do it now */
+ if(lp->reconfig_82593)
+ {
+ spin_lock_irqsave(&lp->spinlock, flags); /* Disable interrupts */
+ wv_82593_config(dev);
+ spin_unlock_irqrestore(&lp->spinlock, flags); /* Re-enable interrupts */
+ /* Note : the configure procedure was totally synchronous,
+ * so the Tx buffer is now free */
+ }
+
+#ifdef DEBUG_TX_ERROR
+ if (skb->next)
+ printk(KERN_INFO "skb has next\n");
+#endif
+
+ /* Check if we need some padding */
+ /* Note : on wireless the propagation time is in the order of 1us,
+ * and we don't have the Ethernet specific requirement of beeing
+ * able to detect collisions, therefore in theory we don't really
+ * need to pad. Jean II */
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ }
+
+ wv_packet_write(dev, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
+#endif
+ return(0);
+}
+
+/********************** HARDWARE CONFIGURATION **********************/
+/*
+ * This part do the real job of starting and configuring the hardware.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to initialize the Modem Management Controller.
+ * (called by wv_hw_config())
+ */
+static inline int
+wv_mmc_init(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ psa_t psa;
+ mmw_t m;
+ int configured;
+ int i; /* Loop counter */
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_mmc_init()\n", dev->name);
+#endif
+
+ /* Read the parameter storage area */
+ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
+
+ /*
+ * Check the first three octets of the MAC addr for the manufacturer's code.
+ * Note: If you get the error message below, you've got a
+ * non-NCR/AT&T/Lucent PCMCIA cards, see wavelan_cs.h for detail on
+ * how to configure your card...
+ */
+ for(i = 0; i < (sizeof(MAC_ADDRESSES) / sizeof(char) / 3); i++)
+ if((psa.psa_univ_mac_addr[0] == MAC_ADDRESSES[i][0]) &&
+ (psa.psa_univ_mac_addr[1] == MAC_ADDRESSES[i][1]) &&
+ (psa.psa_univ_mac_addr[2] == MAC_ADDRESSES[i][2]))
+ break;
+
+ /* If we have not found it... */
+ if(i == (sizeof(MAC_ADDRESSES) / sizeof(char) / 3))
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wv_mmc_init(): Invalid MAC address: %02X:%02X:%02X:...\n",
+ dev->name, psa.psa_univ_mac_addr[0],
+ psa.psa_univ_mac_addr[1], psa.psa_univ_mac_addr[2]);
+#endif
+ return FALSE;
+ }
+
+ /* Get the MAC address */
+ memcpy(&dev->dev_addr[0], &psa.psa_univ_mac_addr[0], WAVELAN_ADDR_SIZE);
+
+#ifdef USE_PSA_CONFIG
+ configured = psa.psa_conf_status & 1;
+#else
+ configured = 0;
+#endif
+
+ /* Is the PSA is not configured */
+ if(!configured)
+ {
+ /* User will be able to configure NWID after (with iwconfig) */
+ psa.psa_nwid[0] = 0;
+ psa.psa_nwid[1] = 0;
+
+ /* As NWID is not set : no NWID checking */
+ psa.psa_nwid_select = 0;
+
+ /* Disable encryption */
+ psa.psa_encryption_select = 0;
+
+ /* Set to standard values
+ * 0x04 for AT,
+ * 0x01 for MCA,
+ * 0x04 for PCMCIA and 2.00 card (AT&T 407-024689/E document)
+ */
+ if (psa.psa_comp_number & 1)
+ psa.psa_thr_pre_set = 0x01;
+ else
+ psa.psa_thr_pre_set = 0x04;
+ psa.psa_quality_thr = 0x03;
+
+ /* It is configured */
+ psa.psa_conf_status |= 1;
+
+#ifdef USE_PSA_CONFIG
+ /* Write the psa */
+ psa_write(dev, (char *)psa.psa_nwid - (char *)&psa,
+ (unsigned char *)psa.psa_nwid, 4);
+ psa_write(dev, (char *)&psa.psa_thr_pre_set - (char *)&psa,
+ (unsigned char *)&psa.psa_thr_pre_set, 1);
+ psa_write(dev, (char *)&psa.psa_quality_thr - (char *)&psa,
+ (unsigned char *)&psa.psa_quality_thr, 1);
+ psa_write(dev, (char *)&psa.psa_conf_status - (char *)&psa,
+ (unsigned char *)&psa.psa_conf_status, 1);
+ /* update the Wavelan checksum */
+ update_psa_checksum(dev);
+#endif /* USE_PSA_CONFIG */
+ }
+
+ /* Zero the mmc structure */
+ memset(&m, 0x00, sizeof(m));
+
+ /* Copy PSA info to the mmc */
+ m.mmw_netw_id_l = psa.psa_nwid[1];
+ m.mmw_netw_id_h = psa.psa_nwid[0];
+
+ if(psa.psa_nwid_select & 1)
+ m.mmw_loopt_sel = 0x00;
+ else
+ m.mmw_loopt_sel = MMW_LOOPT_SEL_DIS_NWID;
+
+ memcpy(&m.mmw_encr_key, &psa.psa_encryption_key,
+ sizeof(m.mmw_encr_key));
+
+ if(psa.psa_encryption_select)
+ m.mmw_encr_enable = MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE;
+ else
+ m.mmw_encr_enable = 0;
+
+ m.mmw_thr_pre_set = psa.psa_thr_pre_set & 0x3F;
+ m.mmw_quality_thr = psa.psa_quality_thr & 0x0F;
+
+ /*
+ * Set default modem control parameters.
+ * See NCR document 407-0024326 Rev. A.
+ */
+ m.mmw_jabber_enable = 0x01;
+ m.mmw_anten_sel = MMW_ANTEN_SEL_ALG_EN;
+ m.mmw_ifs = 0x20;
+ m.mmw_mod_delay = 0x04;
+ m.mmw_jam_time = 0x38;
+
+ m.mmw_des_io_invert = 0;
+ m.mmw_freeze = 0;
+ m.mmw_decay_prm = 0;
+ m.mmw_decay_updat_prm = 0;
+
+ /* Write all info to mmc */
+ mmc_write(base, 0, (u_char *)&m, sizeof(m));
+
+ /* The following code start the modem of the 2.00 frequency
+ * selectable cards at power on. It's not strictly needed for the
+ * following boots...
+ * The original patch was by Joe Finney for the PCMCIA driver, but
+ * I've cleaned it a bit and add documentation.
+ * Thanks to Loeke Brederveld from Lucent for the info.
+ */
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
+ * (does it work for everybody ? - especially old cards...) */
+ /* Note : WFREQSEL verify that it is able to read from EEprom
+ * a sensible frequency (address 0x00) + that MMR_FEE_STATUS_ID
+ * is 0xA (Xilinx version) or 0xB (Ariadne version).
+ * My test is more crude but do work... */
+ if(!(mmc_in(base, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ /* We must download the frequency parameters to the
+ * synthetisers (from the EEprom - area 1)
+ * Note : as the EEprom is auto decremented, we set the end
+ * if the area... */
+ m.mmw_fee_addr = 0x0F;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(base, (char *)&m.mmw_fee_ctrl - (char *)&m,
+ (unsigned char *)&m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished */
+ fee_wait(base, 100, 100);
+
+#ifdef DEBUG_CONFIG_INFO
+ /* The frequency was in the last word downloaded... */
+ mmc_read(base, (char *)&m.mmw_fee_data_l - (char *)&m,
+ (unsigned char *)&m.mmw_fee_data_l, 2);
+
+ /* Print some info for the user */
+ printk(KERN_DEBUG "%s: Wavelan 2.00 recognised (frequency select) : Current frequency = %ld\n",
+ dev->name,
+ ((m.mmw_fee_data_h << 4) |
+ (m.mmw_fee_data_l >> 4)) * 5 / 2 + 24000L);
+#endif
+
+ /* We must now download the power adjust value (gain) to
+ * the synthetisers (from the EEprom - area 7 - DAC) */
+ m.mmw_fee_addr = 0x61;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(base, (char *)&m.mmw_fee_ctrl - (char *)&m,
+ (unsigned char *)&m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished */
+ } /* if 2.00 card */
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_mmc_init()\n", dev->name);
+#endif
+ return TRUE;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to gracefully turn off reception, and wait for any commands
+ * to complete.
+ * (called in wv_ru_start() and wavelan_close() and wavelan_event())
+ */
+static int
+wv_ru_stop(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local * lp = netdev_priv(dev);
+ unsigned long flags;
+ int status;
+ int spin;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_ru_stop()\n", dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* First, send the LAN controller a stop receive command */
+ wv_82593_cmd(dev, "wv_graceful_shutdown(): stop-rcv",
+ OP0_STOP_RCV, SR0_NO_RESULT);
+
+ /* Then, spin until the receive unit goes idle */
+ spin = 300;
+ do
+ {
+ udelay(10);
+ outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
+ status = inb(LCSR(base));
+ }
+ while(((status & SR3_RCV_STATE_MASK) != SR3_RCV_IDLE) && (spin-- > 0));
+
+ /* Now, spin until the chip finishes executing its current command */
+ do
+ {
+ udelay(10);
+ outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
+ status = inb(LCSR(base));
+ }
+ while(((status & SR3_EXEC_STATE_MASK) != SR3_EXEC_IDLE) && (spin-- > 0));
+
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ /* If there was a problem */
+ if(spin <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_ru_stop(): The chip doesn't want to stop...\n",
+ dev->name);
+#endif
+ return FALSE;
+ }
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_ru_stop()\n", dev->name);
+#endif
+ return TRUE;
+} /* wv_ru_stop */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine starts the receive unit running. First, it checks if
+ * the card is actually ready. Then the card is instructed to receive
+ * packets again.
+ * (called in wv_hw_reset() & wavelan_open())
+ */
+static int
+wv_ru_start(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local * lp = netdev_priv(dev);
+ unsigned long flags;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_ru_start()\n", dev->name);
+#endif
+
+ /*
+ * We need to start from a quiescent state. To do so, we could check
+ * if the card is already running, but instead we just try to shut
+ * it down. First, we disable reception (in case it was already enabled).
+ */
+ if(!wv_ru_stop(dev))
+ return FALSE;
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Now we know that no command is being executed. */
+
+ /* Set the receive frame pointer and stop pointer */
+ lp->rfp = 0;
+ outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, LCCR(base));
+
+ /* Reset ring management. This sets the receive frame pointer to 1 */
+ outb(OP1_RESET_RING_MNGMT, LCCR(base));
+
+#if 0
+ /* XXX the i82593 manual page 6-4 seems to indicate that the stop register
+ should be set as below */
+ /* outb(CR1_STOP_REG_UPDATE|((RX_SIZE - 0x40)>> RX_SIZE_SHIFT),LCCR(base));*/
+#elif 0
+ /* but I set it 0 instead */
+ lp->stop = 0;
+#else
+ /* but I set it to 3 bytes per packet less than 8K */
+ lp->stop = (0 + RX_SIZE - ((RX_SIZE / 64) * 3)) % RX_SIZE;
+#endif
+ outb(CR1_STOP_REG_UPDATE | (lp->stop >> RX_SIZE_SHIFT), LCCR(base));
+ outb(OP1_INT_ENABLE, LCCR(base));
+ outb(OP1_SWIT_TO_PORT_0, LCCR(base));
+
+ /* Reset receive DMA pointer */
+ hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
+ hacr_write_slow(base, HACR_DEFAULT);
+
+ /* Receive DMA on channel 1 */
+ wv_82593_cmd(dev, "wv_ru_start(): rcv-enable",
+ CR0_CHNL | OP0_RCV_ENABLE, SR0_NO_RESULT);
+
+#ifdef DEBUG_I82593_SHOW
+ {
+ int status;
+ int opri;
+ int spin = 10000;
+
+ /* spin until the chip starts receiving */
+ do
+ {
+ outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
+ status = inb(LCSR(base));
+ if(spin-- <= 0)
+ break;
+ }
+ while(((status & SR3_RCV_STATE_MASK) != SR3_RCV_ACTIVE) &&
+ ((status & SR3_RCV_STATE_MASK) != SR3_RCV_READY));
+ printk(KERN_DEBUG "rcv status is 0x%x [i:%d]\n",
+ (status & SR3_RCV_STATE_MASK), i);
+ }
+#endif
+
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_ru_start()\n", dev->name);
+#endif
+ return TRUE;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a standard config of the WaveLAN controller (i82593).
+ * In the ISA driver, this is integrated in wavelan_hardware_reset()
+ * (called by wv_hw_config(), wv_82593_reconfig() & wavelan_packet_xmit())
+ */
+static int
+wv_82593_config(struct net_device * dev)
+{
+ kio_addr_t base = dev->base_addr;
+ net_local * lp = netdev_priv(dev);
+ struct i82593_conf_block cfblk;
+ int ret = TRUE;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82593_config()\n", dev->name);
+#endif
+
+ /* Create & fill i82593 config block
+ *
+ * Now conform to Wavelan document WCIN085B
+ */
+ memset(&cfblk, 0x00, sizeof(struct i82593_conf_block));
+ cfblk.d6mod = FALSE; /* Run in i82593 advanced mode */
+ cfblk.fifo_limit = 5; /* = 56 B rx and 40 B tx fifo thresholds */
+ cfblk.forgnesi = FALSE; /* 0=82C501, 1=AMD7992B compatibility */
+ cfblk.fifo_32 = 1;
+ cfblk.throttle_enb = FALSE;
+ cfblk.contin = TRUE; /* enable continuous mode */
+ cfblk.cntrxint = FALSE; /* enable continuous mode receive interrupts */
+ cfblk.addr_len = WAVELAN_ADDR_SIZE;
+ cfblk.acloc = TRUE; /* Disable source addr insertion by i82593 */
+ cfblk.preamb_len = 0; /* 2 bytes preamble (SFD) */
+ cfblk.loopback = FALSE;
+ cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */
+ cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */
+ cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */
+ cfblk.ifrm_spc = 0x20; /* 32 bit times interframe spacing */
+ cfblk.slottim_low = 0x20; /* 32 bit times slot time */
+ cfblk.slottim_hi = 0x0;
+ cfblk.max_retr = 15;
+ cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */
+ cfblk.bc_dis = FALSE; /* Enable broadcast reception */
+ cfblk.crs_1 = TRUE; /* Transmit without carrier sense */
+ cfblk.nocrc_ins = FALSE; /* i82593 generates CRC */
+ cfblk.crc_1632 = FALSE; /* 32-bit Autodin-II CRC */
+ cfblk.crs_cdt = FALSE; /* CD not to be interpreted as CS */
+ cfblk.cs_filter = 0; /* CS is recognized immediately */
+ cfblk.crs_src = FALSE; /* External carrier sense */
+ cfblk.cd_filter = 0; /* CD is recognized immediately */
+ cfblk.min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length 64 bytes */
+ cfblk.lng_typ = FALSE; /* Length field > 1500 = type field */
+ cfblk.lng_fld = TRUE; /* Disable 802.3 length field check */
+ cfblk.rxcrc_xf = TRUE; /* Don't transfer CRC to memory */
+ cfblk.artx = TRUE; /* Disable automatic retransmission */
+ cfblk.sarec = TRUE; /* Disable source addr trig of CD */
+ cfblk.tx_jabber = TRUE; /* Disable jabber jam sequence */
+ cfblk.hash_1 = FALSE; /* Use bits 0-5 in mc address hash */
+ cfblk.lbpkpol = TRUE; /* Loopback pin active high */
+ cfblk.fdx = FALSE; /* Disable full duplex operation */
+ cfblk.dummy_6 = 0x3f; /* all ones */
+ cfblk.mult_ia = FALSE; /* No multiple individual addresses */
+ cfblk.dis_bof = FALSE; /* Disable the backoff algorithm ?! */
+ cfblk.dummy_1 = TRUE; /* set to 1 */
+ cfblk.tx_ifs_retrig = 3; /* Hmm... Disabled */
+#ifdef MULTICAST_ALL
+ cfblk.mc_all = (lp->allmulticast ? TRUE: FALSE); /* Allow all multicasts */
+#else
+ cfblk.mc_all = FALSE; /* No multicast all mode */
+#endif
+ cfblk.rcv_mon = 0; /* Monitor mode disabled */
+ cfblk.frag_acpt = TRUE; /* Do not accept fragments */
+ cfblk.tstrttrs = FALSE; /* No start transmission threshold */
+ cfblk.fretx = TRUE; /* FIFO automatic retransmission */
+ cfblk.syncrqs = FALSE; /* Synchronous DRQ deassertion... */
+ cfblk.sttlen = TRUE; /* 6 byte status registers */
+ cfblk.rx_eop = TRUE; /* Signal EOP on packet reception */
+ cfblk.tx_eop = TRUE; /* Signal EOP on packet transmission */
+ cfblk.rbuf_size = RX_SIZE>>11; /* Set receive buffer size */
+ cfblk.rcvstop = TRUE; /* Enable Receive Stop Register */
+
+#ifdef DEBUG_I82593_SHOW
+ {
+ u_char *c = (u_char *) &cfblk;
+ int i;
+ printk(KERN_DEBUG "wavelan_cs: config block:");
+ for(i = 0; i < sizeof(struct i82593_conf_block); i++,c++)
+ {
+ if((i % 16) == 0) printk("\n" KERN_DEBUG);
+ printk("%02x ", *c);
+ }
+ printk("\n");
+ }
+#endif
+
+ /* Copy the config block to the i82593 */
+ outb(TX_BASE & 0xff, PIORL(base));
+ outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
+ outb(sizeof(struct i82593_conf_block) & 0xff, PIOP(base)); /* lsb */
+ outb(sizeof(struct i82593_conf_block) >> 8, PIOP(base)); /* msb */
+ outsb(PIOP(base), (char *) &cfblk, sizeof(struct i82593_conf_block));
+
+ /* reset transmit DMA pointer */
+ hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
+ hacr_write(base, HACR_DEFAULT);
+ if(!wv_82593_cmd(dev, "wv_82593_config(): configure",
+ OP0_CONFIGURE, SR0_CONFIGURE_DONE))
+ ret = FALSE;
+
+ /* Initialize adapter's ethernet MAC address */
+ outb(TX_BASE & 0xff, PIORL(base));
+ outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
+ outb(WAVELAN_ADDR_SIZE, PIOP(base)); /* byte count lsb */
+ outb(0, PIOP(base)); /* byte count msb */
+ outsb(PIOP(base), &dev->dev_addr[0], WAVELAN_ADDR_SIZE);
+
+ /* reset transmit DMA pointer */
+ hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
+ hacr_write(base, HACR_DEFAULT);
+ if(!wv_82593_cmd(dev, "wv_82593_config(): ia-setup",
+ OP0_IA_SETUP, SR0_IA_SETUP_DONE))
+ ret = FALSE;
+
+#ifdef WAVELAN_ROAMING
+ /* If roaming is enabled, join the "Beacon Request" multicast group... */
+ /* But only if it's not in there already! */
+ if(do_roaming)
+ dev_mc_add(dev,WAVELAN_BEACON_ADDRESS, WAVELAN_ADDR_SIZE, 1);
+#endif /* WAVELAN_ROAMING */
+
+ /* If any multicast address to set */
+ if(lp->mc_count)
+ {
+ struct dev_mc_list * dmi;
+ int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
+ dev->name, lp->mc_count);
+ for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ printk(KERN_DEBUG " %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
+ dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5] );
+#endif
+
+ /* Initialize adapter's ethernet multicast addresses */
+ outb(TX_BASE & 0xff, PIORL(base));
+ outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
+ outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */
+ outb((addrs_len >> 8), PIOP(base)); /* byte count msb */
+ for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen);
+
+ /* reset transmit DMA pointer */
+ hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
+ hacr_write(base, HACR_DEFAULT);
+ if(!wv_82593_cmd(dev, "wv_82593_config(): mc-setup",
+ OP0_MC_SETUP, SR0_MC_SETUP_DONE))
+ ret = FALSE;
+ lp->mc_count = dev->mc_count; /* remember to avoid repeated reset */
+ }
+
+ /* Job done, clear the flag */
+ lp->reconfig_82593 = FALSE;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82593_config()\n", dev->name);
+#endif
+ return(ret);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Read the Access Configuration Register, perform a software reset,
+ * and then re-enable the card's software.
+ *
+ * If I understand correctly : reset the pcmcia interface of the
+ * wavelan.
+ * (called by wv_config())
+ */
+static inline int
+wv_pcmcia_reset(struct net_device * dev)
+{
+ int i;
+ conf_reg_t reg = { 0, CS_READ, CISREG_COR, 0 };
+ dev_link_t * link = ((net_local *)netdev_priv(dev))->link;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_pcmcia_reset()\n", dev->name);
+#endif
+
+ i = pcmcia_access_configuration_register(link->handle, &reg);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, AccessConfigurationRegister, i);
+ return FALSE;
+ }
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n",
+ dev->name, (u_int) reg.Value);
+#endif
+
+ reg.Action = CS_WRITE;
+ reg.Value = reg.Value | COR_SW_RESET;
+ i = pcmcia_access_configuration_register(link->handle, &reg);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, AccessConfigurationRegister, i);
+ return FALSE;
+ }
+
+ reg.Action = CS_WRITE;
+ reg.Value = COR_LEVEL_IRQ | COR_CONFIG;
+ i = pcmcia_access_configuration_register(link->handle, &reg);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, AccessConfigurationRegister, i);
+ return FALSE;
+ }
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name);
+#endif
+ return TRUE;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * wavelan_hw_config() is called after a CARD_INSERTION event is
+ * received, to configure the wavelan hardware.
+ * Note that the reception will be enabled in wavelan->open(), so the
+ * device is configured but idle...
+ * Performs the following actions:
+ * 1. A pcmcia software reset (using wv_pcmcia_reset())
+ * 2. A power reset (reset DMA)
+ * 3. Reset the LAN controller
+ * 4. Initialize the radio modem (using wv_mmc_init)
+ * 5. Configure LAN controller (using wv_82593_config)
+ * 6. Perform a diagnostic on the LAN controller
+ * (called by wavelan_event() & wv_hw_reset())
+ */
+static int
+wv_hw_config(struct net_device * dev)
+{
+ net_local * lp = netdev_priv(dev);
+ kio_addr_t base = dev->base_addr;
+ unsigned long flags;
+ int ret = FALSE;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_hw_config()\n", dev->name);
+#endif
+
+#ifdef STRUCT_CHECK
+ if(wv_structuct_check() != (char *) NULL)
+ {
+ printk(KERN_WARNING "%s: wv_hw_config: structure/compiler botch: \"%s\"\n",
+ dev->name, wv_structuct_check());
+ return FALSE;
+ }
+#endif /* STRUCT_CHECK == 1 */
+
+ /* Reset the pcmcia interface */
+ if(wv_pcmcia_reset(dev) == FALSE)
+ return FALSE;
+
+ /* Disable interrupts */
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Disguised goto ;-) */
+ do
+ {
+ /* Power UP the module + reset the modem + reset host adapter
+ * (in fact, reset DMA channels) */
+ hacr_write_slow(base, HACR_RESET);
+ hacr_write(base, HACR_DEFAULT);
+
+ /* Check if the module has been powered up... */
+ if(hasr_read(base) & HASR_NO_CLK)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wv_hw_config(): modem not connected or not a wavelan card\n",
+ dev->name);
+#endif
+ break;
+ }
+
+ /* initialize the modem */
+ if(wv_mmc_init(dev) == FALSE)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wv_hw_config(): Can't configure the modem\n",
+ dev->name);
+#endif
+ break;
+ }
+
+ /* reset the LAN controller (i82593) */
+ outb(OP0_RESET, LCCR(base));
+ mdelay(1); /* A bit crude ! */
+
+ /* Initialize the LAN controller */
+ if(wv_82593_config(dev) == FALSE)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_hw_config(): i82593 init failed\n",
+ dev->name);
+#endif
+ break;
+ }
+
+ /* Diagnostic */
+ if(wv_diag(dev) == FALSE)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_hw_config(): i82593 diagnostic failed\n",
+ dev->name);
+#endif
+ break;
+ }
+
+ /*
+ * insert code for loopback test here
+ */
+
+ /* The device is now configured */
+ lp->configured = 1;
+ ret = TRUE;
+ }
+ while(0);
+
+ /* Re-enable interrupts */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_hw_config()\n", dev->name);
+#endif
+ return(ret);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Totally reset the wavelan and restart it.
+ * Performs the following actions:
+ * 1. Call wv_hw_config()
+ * 2. Start the LAN controller's receive unit
+ * (called by wavelan_event(), wavelan_watchdog() and wavelan_open())
+ */
+static inline void
+wv_hw_reset(struct net_device * dev)
+{
+ net_local * lp = netdev_priv(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_hw_reset()\n", dev->name);
+#endif
+
+ lp->nresets++;
+ lp->configured = 0;
+
+ /* Call wv_hw_config() for most of the reset & init stuff */
+ if(wv_hw_config(dev) == FALSE)
+ return;
+
+ /* start receive unit */
+ wv_ru_start(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_hw_reset()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * wv_pcmcia_config() is called after a CARD_INSERTION event is
+ * received, to configure the PCMCIA socket, and to make the ethernet
+ * device available to the system.
+ * (called by wavelan_event())
+ */
+static inline int
+wv_pcmcia_config(dev_link_t * link)
+{
+ client_handle_t handle = link->handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ struct net_device * dev = (struct net_device *) link->priv;
+ int i;
+ u_char buf[64];
+ win_req_t req;
+ memreq_t mem;
+ net_local * lp = netdev_priv(dev);
+
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "->wv_pcmcia_config(0x%p)\n", link);
+#endif
+
+ /*
+ * This reads the card's CONFIG tuple to find its configuration
+ * registers.
+ */
+ do
+ {
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ i = pcmcia_get_first_tuple(handle, &tuple);
+ if(i != CS_SUCCESS)
+ break;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ i = pcmcia_get_tuple_data(handle, &tuple);
+ if(i != CS_SUCCESS)
+ break;
+ i = pcmcia_parse_tuple(handle, &tuple, &parse);
+ if(i != CS_SUCCESS)
+ break;
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+ }
+ while(0);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, ParseTuple, i);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return FALSE;
+ }
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+ do
+ {
+ i = pcmcia_request_io(link->handle, &link->io);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, RequestIO, i);
+ break;
+ }
+
+ /*
+ * Now allocate an interrupt line. Note that this does not
+ * actually assign a handler to the interrupt.
+ */
+ i = pcmcia_request_irq(link->handle, &link->irq);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, RequestIRQ, i);
+ break;
+ }
+
+ /*
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping.
+ */
+ link->conf.ConfigIndex = 1;
+ i = pcmcia_request_configuration(link->handle, &link->conf);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, RequestConfiguration, i);
+ break;
+ }
+
+ /*
+ * Allocate a small memory window. Note that the dev_link_t
+ * structure provides space for one window handle -- if your
+ * device needs several windows, you'll need to keep track of
+ * the handles in your private data structure, link->priv.
+ */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = req.Size = 0;
+ req.AccessSpeed = mem_speed;
+ i = pcmcia_request_window(&link->handle, &req, &link->win);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, RequestWindow, i);
+ break;
+ }
+
+ lp->mem = ioremap(req.Base, req.Size);
+ dev->mem_start = (u_long)lp->mem;
+ dev->mem_end = dev->mem_start + req.Size;
+
+ mem.CardOffset = 0; mem.Page = 0;
+ i = pcmcia_map_mem_page(link->win, &mem);
+ if(i != CS_SUCCESS)
+ {
+ cs_error(link->handle, MapMemPage, i);
+ break;
+ }
+
+ /* Feed device with this info... */
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ netif_start_queue(dev);
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "wv_pcmcia_config: MEMSTART %p IRQ %d IOPORT 0x%x\n",
+ lp->mem, dev->irq, (u_int) dev->base_addr);
+#endif
+
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ i = register_netdev(dev);
+ if(i != 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "wv_pcmcia_config(): register_netdev() failed\n");
+#endif
+ break;
+ }
+ }
+ while(0); /* Humm... Disguised goto !!! */
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ /* If any step failed, release any partially configured state */
+ if(i != 0)
+ {
+ wv_pcmcia_release(link);
+ return FALSE;
+ }
+
+ strcpy(((net_local *) netdev_priv(dev))->node.dev_name, dev->name);
+ link->dev = &((net_local *) netdev_priv(dev))->node;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "<-wv_pcmcia_config()\n");
+#endif
+ return TRUE;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * After a card is removed, wv_pcmcia_release() will unregister the net
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void
+wv_pcmcia_release(dev_link_t *link)
+{
+ struct net_device * dev = (struct net_device *) link->priv;
+ net_local * lp = netdev_priv(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: -> wv_pcmcia_release(0x%p)\n", dev->name, link);
+#endif
+
+ /* Don't bother checking to see if these succeed or not */
+ iounmap(lp->mem);
+ pcmcia_release_window(link->win);
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <- wv_pcmcia_release()\n", dev->name);
+#endif
+}
+
+/************************ INTERRUPT HANDLING ************************/
+
+/*
+ * This function is the interrupt handler for the WaveLAN card. This
+ * routine will be called whenever:
+ * 1. A packet is received.
+ * 2. A packet has successfully been transferred and the unit is
+ * ready to transmit another packet.
+ * 3. A command has completed execution.
+ */
+static irqreturn_t
+wavelan_interrupt(int irq,
+ void * dev_id,
+ struct pt_regs * regs)
+{
+ struct net_device * dev;
+ net_local * lp;
+ kio_addr_t base;
+ int status0;
+ u_int tx_status;
+
+ if ((dev = dev_id) == NULL)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_WARNING "wavelan_interrupt(): irq %d for unknown device.\n",
+ irq);
+#endif
+ return IRQ_NONE;
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name);
+#endif
+
+ lp = netdev_priv(dev);
+ base = dev->base_addr;
+
+#ifdef DEBUG_INTERRUPT_INFO
+ /* Check state of our spinlock (it should be cleared) */
+ if(spin_is_locked(&lp->spinlock))
+ printk(KERN_DEBUG
+ "%s: wavelan_interrupt(): spinlock is already locked !!!\n",
+ dev->name);
+#endif
+
+ /* Prevent reentrancy. We need to do that because we may have
+ * multiple interrupt handler running concurently.
+ * It is safe because interrupts are disabled before aquiring
+ * the spinlock. */
+ spin_lock(&lp->spinlock);
+
+ /* Treat all pending interrupts */
+ while(1)
+ {
+ /* ---------------- INTERRUPT CHECKING ---------------- */
+ /*
+ * Look for the interrupt and verify the validity
+ */
+ outb(CR0_STATUS_0 | OP0_NOP, LCCR(base));
+ status0 = inb(LCSR(base));
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "status0 0x%x [%s => 0x%x]", status0,
+ (status0&SR0_INTERRUPT)?"int":"no int",status0&~SR0_INTERRUPT);
+ if(status0&SR0_INTERRUPT)
+ {
+ printk(" [%s => %d]\n", (status0 & SR0_CHNL) ? "chnl" :
+ ((status0 & SR0_EXECUTION) ? "cmd" :
+ ((status0 & SR0_RECEPTION) ? "recv" : "unknown")),
+ (status0 & SR0_EVENT_MASK));
+ }
+ else
+ printk("\n");
+#endif
+
+ /* Return if no actual interrupt from i82593 (normal exit) */
+ if(!(status0 & SR0_INTERRUPT))
+ break;
+
+ /* If interrupt is both Rx and Tx or none...
+ * This code in fact is there to catch the spurious interrupt
+ * when you remove the wavelan pcmcia card from the socket */
+ if(((status0 & SR0_BOTH_RX_TX) == SR0_BOTH_RX_TX) ||
+ ((status0 & SR0_BOTH_RX_TX) == 0x0))
+ {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_INFO "%s: wv_interrupt(): bogus interrupt (or from dead card) : %X\n",
+ dev->name, status0);
+#endif
+ /* Acknowledge the interrupt */
+ outb(CR0_INT_ACK | OP0_NOP, LCCR(base));
+ break;
+ }
+
+ /* ----------------- RECEIVING PACKET ----------------- */
+ /*
+ * When the wavelan signal the reception of a new packet,
+ * we call wv_packet_rcv() to copy if from the buffer and
+ * send it to NET3
+ */
+ if(status0 & SR0_RECEPTION)
+ {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wv_interrupt(): receive\n", dev->name);
+#endif
+
+ if((status0 & SR0_EVENT_MASK) == SR0_STOP_REG_HIT)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_interrupt(): receive buffer overflow\n",
+ dev->name);
+#endif
+ lp->stats.rx_over_errors++;
+ lp->overrunning = 1;
+ }
+
+ /* Get the packet */
+ wv_packet_rcv(dev);
+ lp->overrunning = 0;
+
+ /* Acknowledge the interrupt */
+ outb(CR0_INT_ACK | OP0_NOP, LCCR(base));
+ continue;
+ }
+
+ /* ---------------- COMMAND COMPLETION ---------------- */
+ /*
+ * Interrupts issued when the i82593 has completed a command.
+ * Most likely : transmission done
+ */
+
+ /* If a transmission has been done */
+ if((status0 & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE ||
+ (status0 & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE ||
+ (status0 & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE)
+ {
+#ifdef DEBUG_TX_ERROR
+ if((status0 & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE)
+ printk(KERN_INFO "%s: wv_interrupt(): packet transmitted without CRC.\n",
+ dev->name);
+#endif
+
+ /* Get transmission status */
+ tx_status = inb(LCSR(base));
+ tx_status |= (inb(LCSR(base)) << 8);
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wv_interrupt(): transmission done\n",
+ dev->name);
+ {
+ u_int rcv_bytes;
+ u_char status3;
+ rcv_bytes = inb(LCSR(base));
+ rcv_bytes |= (inb(LCSR(base)) << 8);
+ status3 = inb(LCSR(base));
+ printk(KERN_DEBUG "tx_status 0x%02x rcv_bytes 0x%02x status3 0x%x\n",
+ tx_status, rcv_bytes, (u_int) status3);
+ }
+#endif
+ /* Check for possible errors */
+ if((tx_status & TX_OK) != TX_OK)
+ {
+ lp->stats.tx_errors++;
+
+ if(tx_status & TX_FRTL)
+ {
+#ifdef DEBUG_TX_ERROR
+ printk(KERN_INFO "%s: wv_interrupt(): frame too long\n",
+ dev->name);
+#endif
+ }
+ if(tx_status & TX_UND_RUN)
+ {
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG "%s: wv_interrupt(): DMA underrun\n",
+ dev->name);
+#endif
+ lp->stats.tx_aborted_errors++;
+ }
+ if(tx_status & TX_LOST_CTS)
+ {
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG "%s: wv_interrupt(): no CTS\n", dev->name);
+#endif
+ lp->stats.tx_carrier_errors++;
+ }
+ if(tx_status & TX_LOST_CRS)
+ {
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG "%s: wv_interrupt(): no carrier\n",
+ dev->name);
+#endif
+ lp->stats.tx_carrier_errors++;
+ }
+ if(tx_status & TX_HRT_BEAT)
+ {
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG "%s: wv_interrupt(): heart beat\n", dev->name);
+#endif
+ lp->stats.tx_heartbeat_errors++;
+ }
+ if(tx_status & TX_DEFER)
+ {
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG "%s: wv_interrupt(): channel jammed\n",
+ dev->name);
+#endif
+ }
+ /* Ignore late collisions since they're more likely to happen
+ * here (the WaveLAN design prevents the LAN controller from
+ * receiving while it is transmitting). We take action only when
+ * the maximum retransmit attempts is exceeded.
+ */
+ if(tx_status & TX_COLL)
+ {
+ if(tx_status & TX_MAX_COL)
+ {
+#ifdef DEBUG_TX_FAIL
+ printk(KERN_DEBUG "%s: wv_interrupt(): channel congestion\n",
+ dev->name);
+#endif
+ if(!(tx_status & TX_NCOL_MASK))
+ {
+ lp->stats.collisions += 0x10;
+ }
+ }
+ }
+ } /* if(!(tx_status & TX_OK)) */
+
+ lp->stats.collisions += (tx_status & TX_NCOL_MASK);
+ lp->stats.tx_packets++;
+
+ netif_wake_queue(dev);
+ outb(CR0_INT_ACK | OP0_NOP, LCCR(base)); /* Acknowledge the interrupt */
+ }
+ else /* if interrupt = transmit done or retransmit done */
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "wavelan_cs: unknown interrupt, status0 = %02x\n",
+ status0);
+#endif
+ outb(CR0_INT_ACK | OP0_NOP, LCCR(base)); /* Acknowledge the interrupt */
+ }
+ } /* while(1) */
+
+ spin_unlock(&lp->spinlock);
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_interrupt()\n", dev->name);
+#endif
+
+ /* We always return IRQ_HANDLED, because we will receive empty
+ * interrupts under normal operations. Anyway, it doesn't matter
+ * as we are dealing with an ISA interrupt that can't be shared.
+ *
+ * Explanation : under heavy receive, the following happens :
+ * ->wavelan_interrupt()
+ * (status0 & SR0_INTERRUPT) != 0
+ * ->wv_packet_rcv()
+ * (status0 & SR0_INTERRUPT) != 0
+ * ->wv_packet_rcv()
+ * (status0 & SR0_INTERRUPT) == 0 // i.e. no more event
+ * <-wavelan_interrupt()
+ * ->wavelan_interrupt()
+ * (status0 & SR0_INTERRUPT) == 0 // i.e. empty interrupt
+ * <-wavelan_interrupt()
+ * Jean II */
+ return IRQ_HANDLED;
+} /* wv_interrupt */
+
+/*------------------------------------------------------------------*/
+/*
+ * Watchdog: when we start a transmission, a timer is set for us in the
+ * kernel. If the transmission completes, this timer is disabled. If
+ * the timer expires, we are called and we try to unlock the hardware.
+ *
+ * Note : This watchdog is move clever than the one in the ISA driver,
+ * because it try to abort the current command before reseting
+ * everything...
+ * On the other hand, it's a bit simpler, because we don't have to
+ * deal with the multiple Tx buffers...
+ */
+static void
+wavelan_watchdog(struct net_device * dev)
+{
+ net_local * lp = netdev_priv(dev);
+ kio_addr_t base = dev->base_addr;
+ unsigned long flags;
+ int aborted = FALSE;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_watchdog()\n", dev->name);
+#endif
+
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_watchdog: watchdog timer expired\n",
+ dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->spinlock, flags);
+
+ /* Ask to abort the current command */
+ outb(OP0_ABORT, LCCR(base));
+
+ /* Wait for the end of the command (a bit hackish) */
+ if(wv_82593_cmd(dev, "wavelan_watchdog(): abort",
+ OP0_NOP | CR0_STATUS_3, SR0_EXECUTION_ABORTED))
+ aborted = TRUE;
+
+ /* Release spinlock here so that wv_hw_reset() can grab it */
+ spin_unlock_irqrestore(&lp->spinlock, flags);
+
+ /* Check if we were successful in aborting it */
+ if(!aborted)
+ {
+ /* It seem that it wasn't enough */
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_watchdog: abort failed, trying reset\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+#ifdef DEBUG_PSA_SHOW
+ {
+ psa_t psa;
+ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
+ wv_psa_show(&psa);
+ }
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82593_SHOW
+ wv_ru_show(dev);
+#endif
+
+ /* We are no more waiting for something... */
+ netif_wake_queue(dev);
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_watchdog()\n", dev->name);
+#endif
+}
+
+/********************* CONFIGURATION CALLBACKS *********************/
+/*
+ * Here are the functions called by the pcmcia package (cardmgr) and
+ * linux networking (NET3) for initialization, configuration and
+ * deinstallations of the Wavelan Pcmcia Hardware.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Configure and start up the WaveLAN PCMCIA adaptor.
+ * Called by NET3 when it "open" the device.
+ */
+static int
+wavelan_open(struct net_device * dev)
+{
+ net_local * lp = netdev_priv(dev);
+ dev_link_t * link = lp->link;
+ kio_addr_t base = dev->base_addr;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* Check if the modem is powered up (wavelan_close() power it down */
+ if(hasr_read(base) & HASR_NO_CLK)
+ {
+ /* Power up (power up time is 250us) */
+ hacr_write(base, HACR_DEFAULT);
+
+ /* Check if the module has been powered up... */
+ if(hasr_read(base) & HASR_NO_CLK)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wavelan_open(): modem not connected\n",
+ dev->name);
+#endif
+ return FALSE;
+ }
+ }
+
+ /* Start reception and declare the driver ready */
+ if(!lp->configured)
+ return FALSE;
+ if(!wv_ru_start(dev))
+ wv_hw_reset(dev); /* If problem : reset */
+ netif_start_queue(dev);
+
+ /* Mark the device as used */
+ link->open++;
+
+#ifdef WAVELAN_ROAMING
+ if(do_roaming)
+ wv_roam_init(dev);
+#endif /* WAVELAN_ROAMING */
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_open()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Shutdown the WaveLAN PCMCIA adaptor.
+ * Called by NET3 when it "close" the device.
+ */
+static int
+wavelan_close(struct net_device * dev)
+{
+ dev_link_t * link = ((net_local *)netdev_priv(dev))->link;
+ kio_addr_t base = dev->base_addr;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* If the device isn't open, then nothing to do */
+ if(!link->open)
+ {
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wavelan_close(): device not open\n", dev->name);
+#endif
+ return 0;
+ }
+
+#ifdef WAVELAN_ROAMING
+ /* Cleanup of roaming stuff... */
+ if(do_roaming)
+ wv_roam_cleanup(dev);
+#endif /* WAVELAN_ROAMING */
+
+ link->open--;
+
+ /* If the card is still present */
+ if(netif_running(dev))
+ {
+ netif_stop_queue(dev);
+
+ /* Stop receiving new messages and wait end of transmission */
+ wv_ru_stop(dev);
+
+ /* Power down the module */
+ hacr_write(base, HACR_DEFAULT & (~HACR_PWR_STAT));
+ }
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_close()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * wavelan_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device (one interface). The device
+ * is registered with Card Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
+ */
+static dev_link_t *
+wavelan_attach(void)
+{
+ client_reg_t client_reg; /* Register with cardmgr */
+ dev_link_t * link; /* Info for cardmgr */
+ struct net_device * dev; /* Interface generic data */
+ net_local * lp; /* Interface specific data */
+ int ret;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "-> wavelan_attach()\n");
+#endif
+
+ /* Initialize the dev_link_t structure */
+ link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
+ if (!link) return NULL;
+ memset(link, 0, sizeof(struct dev_link_t));
+
+ /* The io structure describes IO port mapping */
+ link->io.NumPorts1 = 8;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.IOAddrLines = 3;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = wavelan_interrupt;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* Chain drivers */
+ link->next = dev_list;
+ dev_list = link;
+
+ /* Allocate the generic data structure */
+ dev = alloc_etherdev(sizeof(net_local));
+ if (!dev) {
+ kfree(link);
+ return NULL;
+ }
+ link->priv = link->irq.Instance = dev;
+
+ lp = netdev_priv(dev);
+
+ /* Init specific data */
+ lp->configured = 0;
+ lp->reconfig_82593 = FALSE;
+ lp->nresets = 0;
+ /* Multicast stuff */
+ lp->promiscuous = 0;
+ lp->allmulticast = 0;
+ lp->mc_count = 0;
+
+ /* Init spinlock */
+ spin_lock_init(&lp->spinlock);
+
+ /* back links */
+ lp->link = link;
+ lp->dev = dev;
+
+ /* wavelan NET3 callbacks */
+ SET_MODULE_OWNER(dev);
+ dev->open = &wavelan_open;
+ dev->stop = &wavelan_close;
+ dev->hard_start_xmit = &wavelan_packet_xmit;
+ dev->get_stats = &wavelan_get_stats;
+ dev->set_multicast_list = &wavelan_set_multicast_list;
+#ifdef SET_MAC_ADDRESS
+ dev->set_mac_address = &wavelan_set_mac_address;
+#endif /* SET_MAC_ADDRESS */
+
+ /* Set the watchdog timer */
+ dev->tx_timeout = &wavelan_watchdog;
+ dev->watchdog_timeo = WATCHDOG_JIFFIES;
+ SET_ETHTOOL_OPS(dev, &ops);
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+ dev->wireless_handlers = &wavelan_handler_def;
+ lp->wireless_data.spy_data = &lp->spy_data;
+ dev->wireless_data = &lp->wireless_data;
+#endif
+
+ /* Other specific data */
+ dev->mtu = WAVELAN_MTU;
+
+ /* Register with Card Services */
+ client_reg.dev_info = &dev_info;
+ client_reg.EventMask =
+ CS_EVENT_REGISTRATION_COMPLETE |
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &wavelan_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "wavelan_attach(): almost done, calling pcmcia_register_client\n");
+#endif
+
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if(ret != 0)
+ {
+ cs_error(link->handle, RegisterClient, ret);
+ wavelan_detach(link);
+ return NULL;
+ }
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "<- wavelan_attach()\n");
+#endif
+
+ return link;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This deletes a driver "instance". The device is de-registered with
+ * Card Services. If it has been released, all local data structures
+ * are freed. Otherwise, the structures will be freed when the device
+ * is released.
+ */
+static void
+wavelan_detach(dev_link_t * link)
+{
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "-> wavelan_detach(0x%p)\n", link);
+#endif
+
+ /*
+ * If the device is currently configured and active, we won't
+ * actually delete it yet. Instead, it is marked so that when the
+ * release() function is called, that will trigger a proper
+ * detach().
+ */
+ if(link->state & DEV_CONFIG)
+ {
+ /* Some others haven't done their job : give them another chance */
+ wv_pcmcia_release(link);
+ }
+
+ /* Break the link with Card Services */
+ if(link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Remove the interface data from the linked list */
+ if(dev_list == link)
+ dev_list = link->next;
+ else
+ {
+ dev_link_t * prev = dev_list;
+
+ while((prev != (dev_link_t *) NULL) && (prev->next != link))
+ prev = prev->next;
+
+ if(prev == (dev_link_t *) NULL)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "wavelan_detach : Attempting to remove a nonexistent device.\n");
+#endif
+ return;
+ }
+
+ prev->next = link->next;
+ }
+
+ /* Free pieces */
+ if(link->priv)
+ {
+ struct net_device * dev = (struct net_device *) link->priv;
+
+ /* Remove ourselves from the kernel list of ethernet devices */
+ /* Warning : can't be called from interrupt, timer or wavelan_close() */
+ if (link->dev)
+ unregister_netdev(dev);
+ link->dev = NULL;
+ ((net_local *)netdev_priv(dev))->link = NULL;
+ ((net_local *)netdev_priv(dev))->dev = NULL;
+ free_netdev(dev);
+ }
+ kfree(link);
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "<- wavelan_detach()\n");
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * The card status event handler. Mostly, this schedules other stuff
+ * to run after an event is received. A CARD_REMOVAL event also sets
+ * some flags to discourage the net drivers from trying to talk to the
+ * card any more.
+ */
+static int
+wavelan_event(event_t event, /* The event received */
+ int priority,
+ event_callback_args_t * args)
+{
+ dev_link_t * link = (dev_link_t *) args->client_data;
+ struct net_device * dev = (struct net_device *) link->priv;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "->wavelan_event(): %s\n",
+ ((event == CS_EVENT_REGISTRATION_COMPLETE)?"registration complete" :
+ ((event == CS_EVENT_CARD_REMOVAL) ? "card removal" :
+ ((event == CS_EVENT_CARD_INSERTION) ? "card insertion" :
+ ((event == CS_EVENT_PM_SUSPEND) ? "pm suspend" :
+ ((event == CS_EVENT_RESET_PHYSICAL) ? "physical reset" :
+ ((event == CS_EVENT_PM_RESUME) ? "pm resume" :
+ ((event == CS_EVENT_CARD_RESET) ? "card reset" :
+ "unknown"))))))));
+#endif
+
+ switch(event)
+ {
+ case CS_EVENT_REGISTRATION_COMPLETE:
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "wavelan_cs: registration complete\n");
+#endif
+ break;
+
+ case CS_EVENT_CARD_REMOVAL:
+ /* Oups ! The card is no more there */
+ link->state &= ~DEV_PRESENT;
+ if(link->state & DEV_CONFIG)
+ {
+ /* Accept no more transmissions */
+ netif_device_detach(dev);
+
+ /* Release the card */
+ wv_pcmcia_release(link);
+ }
+ break;
+
+ case CS_EVENT_CARD_INSERTION:
+ /* Reset and configure the card */
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ if(wv_pcmcia_config(link) &&
+ wv_hw_config(dev))
+ wv_init_info(dev);
+ else
+ dev->irq = 0;
+ break;
+
+ case CS_EVENT_PM_SUSPEND:
+ /* NB: wavelan_close will be called, but too late, so we are
+ * obliged to close nicely the wavelan here. David, could you
+ * close the device before suspending them ? And, by the way,
+ * could you, on resume, add a "route add -net ..." after the
+ * ifconfig up ? Thanks... */
+
+ /* Stop receiving new messages and wait end of transmission */
+ wv_ru_stop(dev);
+
+ /* Power down the module */
+ hacr_write(dev->base_addr, HACR_DEFAULT & (~HACR_PWR_STAT));
+
+ /* The card is now suspended */
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if(link->state & DEV_CONFIG)
+ {
+ if(link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if(link->state & DEV_CONFIG)
+ {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if(link->open) /* If RESET -> True, If RESUME -> False ? */
+ {
+ wv_hw_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "<-wavelan_event()\n");
+#endif
+ return 0;
+}
+
+static struct pcmcia_driver wavelan_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "wavelan_cs",
+ },
+ .attach = wavelan_attach,
+ .detach = wavelan_detach,
+};
+
+static int __init
+init_wavelan_cs(void)
+{
+ return pcmcia_register_driver(&wavelan_driver);
+}
+
+static void __exit
+exit_wavelan_cs(void)
+{
+ pcmcia_unregister_driver(&wavelan_driver);
+}
+
+module_init(init_wavelan_cs);
+module_exit(exit_wavelan_cs);
diff --git a/drivers/net/wireless/wavelan_cs.h b/drivers/net/wireless/wavelan_cs.h
new file mode 100644
index 000000000000..29cff6daf860
--- /dev/null
+++ b/drivers/net/wireless/wavelan_cs.h
@@ -0,0 +1,386 @@
+/*
+ * Wavelan Pcmcia driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganization and extension of the driver.
+ * Original copyright follow. See wavelan_cs.h for details.
+ *
+ * This file contain the declarations of the Wavelan hardware. Note that
+ * the Pcmcia Wavelan include a i82593 controller (see definitions in
+ * file i82593.h).
+ *
+ * The main difference between the pcmcia hardware and the ISA one is
+ * the Ethernet Controller (i82593 instead of i82586). The i82593 allow
+ * only one send buffer. The PSA (Parameter Storage Area : EEprom for
+ * permanent storage of various info) is memory mapped, but not the
+ * MMI (Modem Management Interface).
+ */
+
+/*
+ * Definitions for the AT&T GIS (formerly NCR) WaveLAN PCMCIA card:
+ * An Ethernet-like radio transceiver controlled by an Intel 82593
+ * coprocessor.
+ *
+ *
+ ****************************************************************************
+ * Copyright 1995
+ * Anthony D. Joseph
+ * Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this program
+ * for any purpose and without fee is hereby granted, provided
+ * that this copyright and permission notice appear on all copies
+ * and supporting documentation, the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * program without specific prior permission, and notice be given
+ * in supporting documentation that copying and distribution is
+ * by permission of M.I.T. M.I.T. makes no representations about
+ * the suitability of this software for any purpose. It is pro-
+ * vided "as is" without express or implied warranty.
+ ****************************************************************************
+ *
+ *
+ * Credits:
+ * Special thanks to Jan Hoogendoorn of AT&T GIS Utrecht for
+ * providing extremely useful information about WaveLAN PCMCIA hardware
+ *
+ * This driver is based upon several other drivers, in particular:
+ * David Hinds' Linux driver for the PCMCIA 3c589 ethernet adapter
+ * Bruce Janson's Linux driver for the AT-bus WaveLAN adapter
+ * Anders Klemets' PCMCIA WaveLAN adapter driver
+ * Robert Morris' BSDI driver for the PCMCIA WaveLAN adapter
+ */
+
+#ifndef _WAVELAN_CS_H
+#define _WAVELAN_CS_H
+
+/************************** MAGIC NUMBERS ***************************/
+
+/* The detection of the wavelan card is made by reading the MAC address
+ * from the card and checking it. If you have a non AT&T product (OEM,
+ * like DEC RoamAbout, or Digital Ocean, Epson, ...), you must modify this
+ * part to accommodate your hardware...
+ */
+const unsigned char MAC_ADDRESSES[][3] =
+{
+ { 0x08, 0x00, 0x0E }, /* AT&T Wavelan (standard) & DEC RoamAbout */
+ { 0x08, 0x00, 0x6A }, /* AT&T Wavelan (alternate) */
+ { 0x00, 0x00, 0xE1 }, /* Hitachi Wavelan */
+ { 0x00, 0x60, 0x1D } /* Lucent Wavelan (another one) */
+ /* Add your card here and send me the patch ! */
+};
+
+/*
+ * Constants used to convert channels to frequencies
+ */
+
+/* Frequency available in the 2.0 modem, in units of 250 kHz
+ * (as read in the offset register of the dac area).
+ * Used to map channel numbers used by `wfreqsel' to frequencies
+ */
+const short channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8,
+ 0xD0, 0xF0, 0xF8, 0x150 };
+
+/* Frequencies of the 1.0 modem (fixed frequencies).
+ * Use to map the PSA `subband' to a frequency
+ * Note : all frequencies apart from the first one need to be multiplied by 10
+ */
+const int fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
+
+
+/*************************** PC INTERFACE ****************************/
+
+/* WaveLAN host interface definitions */
+
+#define LCCR(base) (base) /* LAN Controller Command Register */
+#define LCSR(base) (base) /* LAN Controller Status Register */
+#define HACR(base) (base+0x1) /* Host Adapter Command Register */
+#define HASR(base) (base+0x1) /* Host Adapter Status Register */
+#define PIORL(base) (base+0x2) /* Program I/O Register Low */
+#define RPLL(base) (base+0x2) /* Receive Pointer Latched Low */
+#define PIORH(base) (base+0x3) /* Program I/O Register High */
+#define RPLH(base) (base+0x3) /* Receive Pointer Latched High */
+#define PIOP(base) (base+0x4) /* Program I/O Port */
+#define MMR(base) (base+0x6) /* MMI Address Register */
+#define MMD(base) (base+0x7) /* MMI Data Register */
+
+/* Host Adaptor Command Register bit definitions */
+
+#define HACR_LOF (1 << 3) /* Lock Out Flag, toggle every 250ms */
+#define HACR_PWR_STAT (1 << 4) /* Power State, 1=active, 0=sleep */
+#define HACR_TX_DMA_RESET (1 << 5) /* Reset transmit DMA ptr on high */
+#define HACR_RX_DMA_RESET (1 << 6) /* Reset receive DMA ptr on high */
+#define HACR_ROM_WEN (1 << 7) /* EEPROM write enabled when true */
+
+#define HACR_RESET (HACR_TX_DMA_RESET | HACR_RX_DMA_RESET)
+#define HACR_DEFAULT (HACR_PWR_STAT)
+
+/* Host Adapter Status Register bit definitions */
+
+#define HASR_MMI_BUSY (1 << 2) /* MMI is busy when true */
+#define HASR_LOF (1 << 3) /* Lock out flag status */
+#define HASR_NO_CLK (1 << 4) /* active when modem not connected */
+
+/* Miscellaneous bit definitions */
+
+#define PIORH_SEL_TX (1 << 5) /* PIOR points to 0=rx/1=tx buffer */
+#define MMR_MMI_WR (1 << 0) /* Next MMI cycle is 0=read, 1=write */
+#define PIORH_MASK 0x1f /* only low 5 bits are significant */
+#define RPLH_MASK 0x1f /* only low 5 bits are significant */
+#define MMI_ADDR_MASK 0x7e /* Bits 1-6 of MMR are significant */
+
+/* Attribute Memory map */
+
+#define CIS_ADDR 0x0000 /* Card Information Status Register */
+#define PSA_ADDR 0x0e00 /* Parameter Storage Area address */
+#define EEPROM_ADDR 0x1000 /* EEPROM address (unused ?) */
+#define COR_ADDR 0x4000 /* Configuration Option Register */
+
+/* Configuration Option Register bit definitions */
+
+#define COR_CONFIG (1 << 0) /* Config Index, 0 when unconfigured */
+#define COR_SW_RESET (1 << 7) /* Software Reset on true */
+#define COR_LEVEL_IRQ (1 << 6) /* Level IRQ */
+
+/* Local Memory map */
+
+#define RX_BASE 0x0000 /* Receive memory, 8 kB */
+#define TX_BASE 0x2000 /* Transmit memory, 2 kB */
+#define UNUSED_BASE 0x2800 /* Unused, 22 kB */
+#define RX_SIZE (TX_BASE-RX_BASE) /* Size of receive area */
+#define RX_SIZE_SHIFT 6 /* Bits to shift in stop register */
+
+#define TRUE 1
+#define FALSE 0
+
+#define MOD_ENAL 1
+#define MOD_PROM 2
+
+/* Size of a MAC address */
+#define WAVELAN_ADDR_SIZE 6
+
+/* Maximum size of Wavelan packet */
+#define WAVELAN_MTU 1500
+
+#define MAXDATAZ (6 + 6 + 2 + WAVELAN_MTU)
+
+/********************** PARAMETER STORAGE AREA **********************/
+
+/*
+ * Parameter Storage Area (PSA).
+ */
+typedef struct psa_t psa_t;
+struct psa_t
+{
+ /* For the PCMCIA Adapter, locations 0x00-0x0F are unused and fixed at 00 */
+ unsigned char psa_io_base_addr_1; /* [0x00] Base address 1 ??? */
+ unsigned char psa_io_base_addr_2; /* [0x01] Base address 2 */
+ unsigned char psa_io_base_addr_3; /* [0x02] Base address 3 */
+ unsigned char psa_io_base_addr_4; /* [0x03] Base address 4 */
+ unsigned char psa_rem_boot_addr_1; /* [0x04] Remote Boot Address 1 */
+ unsigned char psa_rem_boot_addr_2; /* [0x05] Remote Boot Address 2 */
+ unsigned char psa_rem_boot_addr_3; /* [0x06] Remote Boot Address 3 */
+ unsigned char psa_holi_params; /* [0x07] HOst Lan Interface (HOLI) Parameters */
+ unsigned char psa_int_req_no; /* [0x08] Interrupt Request Line */
+ unsigned char psa_unused0[7]; /* [0x09-0x0F] unused */
+
+ unsigned char psa_univ_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x10-0x15] Universal (factory) MAC Address */
+ unsigned char psa_local_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x16-1B] Local MAC Address */
+ unsigned char psa_univ_local_sel; /* [0x1C] Universal Local Selection */
+#define PSA_UNIVERSAL 0 /* Universal (factory) */
+#define PSA_LOCAL 1 /* Local */
+ unsigned char psa_comp_number; /* [0x1D] Compatability Number: */
+#define PSA_COMP_PC_AT_915 0 /* PC-AT 915 MHz */
+#define PSA_COMP_PC_MC_915 1 /* PC-MC 915 MHz */
+#define PSA_COMP_PC_AT_2400 2 /* PC-AT 2.4 GHz */
+#define PSA_COMP_PC_MC_2400 3 /* PC-MC 2.4 GHz */
+#define PSA_COMP_PCMCIA_915 4 /* PCMCIA 915 MHz or 2.0 */
+ unsigned char psa_thr_pre_set; /* [0x1E] Modem Threshold Preset */
+ unsigned char psa_feature_select; /* [0x1F] Call code required (1=on) */
+#define PSA_FEATURE_CALL_CODE 0x01 /* Call code required (Japan) */
+ unsigned char psa_subband; /* [0x20] Subband */
+#define PSA_SUBBAND_915 0 /* 915 MHz or 2.0 */
+#define PSA_SUBBAND_2425 1 /* 2425 MHz */
+#define PSA_SUBBAND_2460 2 /* 2460 MHz */
+#define PSA_SUBBAND_2484 3 /* 2484 MHz */
+#define PSA_SUBBAND_2430_5 4 /* 2430.5 MHz */
+ unsigned char psa_quality_thr; /* [0x21] Modem Quality Threshold */
+ unsigned char psa_mod_delay; /* [0x22] Modem Delay ??? (reserved) */
+ unsigned char psa_nwid[2]; /* [0x23-0x24] Network ID */
+ unsigned char psa_nwid_select; /* [0x25] Network ID Select On Off */
+ unsigned char psa_encryption_select; /* [0x26] Encryption On Off */
+ unsigned char psa_encryption_key[8]; /* [0x27-0x2E] Encryption Key */
+ unsigned char psa_databus_width; /* [0x2F] AT bus width select 8/16 */
+ unsigned char psa_call_code[8]; /* [0x30-0x37] (Japan) Call Code */
+ unsigned char psa_nwid_prefix[2]; /* [0x38-0x39] Roaming domain */
+ unsigned char psa_reserved[2]; /* [0x3A-0x3B] Reserved - fixed 00 */
+ unsigned char psa_conf_status; /* [0x3C] Conf Status, bit 0=1:config*/
+ unsigned char psa_crc[2]; /* [0x3D] CRC-16 over PSA */
+ unsigned char psa_crc_status; /* [0x3F] CRC Valid Flag */
+};
+
+/* Size for structure checking (if padding is correct) */
+#define PSA_SIZE 64
+
+/* Calculate offset of a field in the above structure
+ * Warning : only even addresses are used */
+#define psaoff(p,f) ((unsigned short) ((void *)(&((psa_t *) ((void *) NULL + (p)))->f) - (void *) NULL))
+
+/******************** MODEM MANAGEMENT INTERFACE ********************/
+
+/*
+ * Modem Management Controller (MMC) write structure.
+ */
+typedef struct mmw_t mmw_t;
+struct mmw_t
+{
+ unsigned char mmw_encr_key[8]; /* encryption key */
+ unsigned char mmw_encr_enable; /* enable/disable encryption */
+#define MMW_ENCR_ENABLE_MODE 0x02 /* Mode of security option */
+#define MMW_ENCR_ENABLE_EN 0x01 /* Enable security option */
+ unsigned char mmw_unused0[1]; /* unused */
+ unsigned char mmw_des_io_invert; /* Encryption option */
+#define MMW_DES_IO_INVERT_RES 0x0F /* Reserved */
+#define MMW_DES_IO_INVERT_CTRL 0xF0 /* Control ??? (set to 0) */
+ unsigned char mmw_unused1[5]; /* unused */
+ unsigned char mmw_loopt_sel; /* looptest selection */
+#define MMW_LOOPT_SEL_DIS_NWID 0x40 /* disable NWID filtering */
+#define MMW_LOOPT_SEL_INT 0x20 /* activate Attention Request */
+#define MMW_LOOPT_SEL_LS 0x10 /* looptest w/o collision avoidance */
+#define MMW_LOOPT_SEL_LT3A 0x08 /* looptest 3a */
+#define MMW_LOOPT_SEL_LT3B 0x04 /* looptest 3b */
+#define MMW_LOOPT_SEL_LT3C 0x02 /* looptest 3c */
+#define MMW_LOOPT_SEL_LT3D 0x01 /* looptest 3d */
+ unsigned char mmw_jabber_enable; /* jabber timer enable */
+ /* Abort transmissions > 200 ms */
+ unsigned char mmw_freeze; /* freeze / unfreeeze signal level */
+ /* 0 : signal level & qual updated for every new message, 1 : frozen */
+ unsigned char mmw_anten_sel; /* antenna selection */
+#define MMW_ANTEN_SEL_SEL 0x01 /* direct antenna selection */
+#define MMW_ANTEN_SEL_ALG_EN 0x02 /* antenna selection algo. enable */
+ unsigned char mmw_ifs; /* inter frame spacing */
+ /* min time between transmission in bit periods (.5 us) - bit 0 ignored */
+ unsigned char mmw_mod_delay; /* modem delay (synchro) */
+ unsigned char mmw_jam_time; /* jamming time (after collision) */
+ unsigned char mmw_unused2[1]; /* unused */
+ unsigned char mmw_thr_pre_set; /* level threshold preset */
+ /* Discard all packet with signal < this value (4) */
+ unsigned char mmw_decay_prm; /* decay parameters */
+ unsigned char mmw_decay_updat_prm; /* decay update parameterz */
+ unsigned char mmw_quality_thr; /* quality (z-quotient) threshold */
+ /* Discard all packet with quality < this value (3) */
+ unsigned char mmw_netw_id_l; /* NWID low order byte */
+ unsigned char mmw_netw_id_h; /* NWID high order byte */
+ /* Network ID or Domain : create virtual net on the air */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmw_mode_select; /* for analog tests (set to 0) */
+ unsigned char mmw_unused3[1]; /* unused */
+ unsigned char mmw_fee_ctrl; /* frequency eeprom control */
+#define MMW_FEE_CTRL_PRE 0x10 /* Enable protected instructions */
+#define MMW_FEE_CTRL_DWLD 0x08 /* Download eeprom to mmc */
+#define MMW_FEE_CTRL_CMD 0x07 /* EEprom commands : */
+#define MMW_FEE_CTRL_READ 0x06 /* Read */
+#define MMW_FEE_CTRL_WREN 0x04 /* Write enable */
+#define MMW_FEE_CTRL_WRITE 0x05 /* Write data to address */
+#define MMW_FEE_CTRL_WRALL 0x04 /* Write data to all addresses */
+#define MMW_FEE_CTRL_WDS 0x04 /* Write disable */
+#define MMW_FEE_CTRL_PRREAD 0x16 /* Read addr from protect register */
+#define MMW_FEE_CTRL_PREN 0x14 /* Protect register enable */
+#define MMW_FEE_CTRL_PRCLEAR 0x17 /* Unprotect all registers */
+#define MMW_FEE_CTRL_PRWRITE 0x15 /* Write addr in protect register */
+#define MMW_FEE_CTRL_PRDS 0x14 /* Protect register disable */
+ /* Never issue this command (PRDS) : it's irreversible !!! */
+
+ unsigned char mmw_fee_addr; /* EEprom address */
+#define MMW_FEE_ADDR_CHANNEL 0xF0 /* Select the channel */
+#define MMW_FEE_ADDR_OFFSET 0x0F /* Offset in channel data */
+#define MMW_FEE_ADDR_EN 0xC0 /* FEE_CTRL enable operations */
+#define MMW_FEE_ADDR_DS 0x00 /* FEE_CTRL disable operations */
+#define MMW_FEE_ADDR_ALL 0x40 /* FEE_CTRL all operations */
+#define MMW_FEE_ADDR_CLEAR 0xFF /* FEE_CTRL clear operations */
+
+ unsigned char mmw_fee_data_l; /* Write data to EEprom */
+ unsigned char mmw_fee_data_h; /* high octet */
+ unsigned char mmw_ext_ant; /* Setting for external antenna */
+#define MMW_EXT_ANT_EXTANT 0x01 /* Select external antenna */
+#define MMW_EXT_ANT_POL 0x02 /* Polarity of the antenna */
+#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
+#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
+#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
+};
+
+/* Size for structure checking (if padding is correct) */
+#define MMW_SIZE 37
+
+/* Calculate offset of a field in the above structure */
+#define mmwoff(p,f) (unsigned short)((void *)(&((mmw_t *)((void *)0 + (p)))->f) - (void *)0)
+
+
+/*
+ * Modem Management Controller (MMC) read structure.
+ */
+typedef struct mmr_t mmr_t;
+struct mmr_t
+{
+ unsigned char mmr_unused0[8]; /* unused */
+ unsigned char mmr_des_status; /* encryption status */
+ unsigned char mmr_des_avail; /* encryption available (0x55 read) */
+#define MMR_DES_AVAIL_DES 0x55 /* DES available */
+#define MMR_DES_AVAIL_AES 0x33 /* AES (AT&T) available */
+ unsigned char mmr_des_io_invert; /* des I/O invert register */
+ unsigned char mmr_unused1[5]; /* unused */
+ unsigned char mmr_dce_status; /* DCE status */
+#define MMR_DCE_STATUS_RX_BUSY 0x01 /* receiver busy */
+#define MMR_DCE_STATUS_LOOPT_IND 0x02 /* loop test indicated */
+#define MMR_DCE_STATUS_TX_BUSY 0x04 /* transmitter on */
+#define MMR_DCE_STATUS_JBR_EXPIRED 0x08 /* jabber timer expired */
+#define MMR_DCE_STATUS 0x0F /* mask to get the bits */
+ unsigned char mmr_dsp_id; /* DSP id (AA = Daedalus rev A) */
+ unsigned char mmr_unused2[2]; /* unused */
+ unsigned char mmr_correct_nwid_l; /* # of correct NWID's rxd (low) */
+ unsigned char mmr_correct_nwid_h; /* # of correct NWID's rxd (high) */
+ /* Warning : Read high order octet first !!! */
+ unsigned char mmr_wrong_nwid_l; /* # of wrong NWID's rxd (low) */
+ unsigned char mmr_wrong_nwid_h; /* # of wrong NWID's rxd (high) */
+ unsigned char mmr_thr_pre_set; /* level threshold preset */
+#define MMR_THR_PRE_SET 0x3F /* level threshold preset */
+#define MMR_THR_PRE_SET_CUR 0x80 /* Current signal above it */
+ unsigned char mmr_signal_lvl; /* signal level */
+#define MMR_SIGNAL_LVL 0x3F /* signal level */
+#define MMR_SIGNAL_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_silence_lvl; /* silence level (noise) */
+#define MMR_SILENCE_LVL 0x3F /* silence level */
+#define MMR_SILENCE_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_sgnl_qual; /* signal quality */
+#define MMR_SGNL_QUAL 0x0F /* signal quality */
+#define MMR_SGNL_QUAL_ANT 0x80 /* current antenna used */
+ unsigned char mmr_netw_id_l; /* NWID low order byte ??? */
+ unsigned char mmr_unused3[3]; /* unused */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmr_fee_status; /* Status of frequency eeprom */
+#define MMR_FEE_STATUS_ID 0xF0 /* Modem revision id */
+#define MMR_FEE_STATUS_DWLD 0x08 /* Download in progress */
+#define MMR_FEE_STATUS_BUSY 0x04 /* EEprom busy */
+ unsigned char mmr_unused4[1]; /* unused */
+ unsigned char mmr_fee_data_l; /* Read data from eeprom (low) */
+ unsigned char mmr_fee_data_h; /* Read data from eeprom (high) */
+};
+
+/* Size for structure checking (if padding is correct) */
+#define MMR_SIZE 36
+
+/* Calculate offset of a field in the above structure */
+#define mmroff(p,f) (unsigned short)((void *)(&((mmr_t *)((void *)0 + (p)))->f) - (void *)0)
+
+
+/* Make the two above structures one */
+typedef union mm_t
+{
+ struct mmw_t w; /* Write to the mmc */
+ struct mmr_t r; /* Read from the mmc */
+} mm_t;
+
+#endif /* _WAVELAN_CS_H */
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
new file mode 100644
index 000000000000..ea2ef8dddb92
--- /dev/null
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -0,0 +1,813 @@
+/*
+ * Wavelan Pcmcia driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ *
+ * This file contain all definition and declarations necessary for the
+ * wavelan pcmcia driver. This file is a private header, so it should
+ * be included only on wavelan_cs.c !!!
+ */
+
+#ifndef WAVELAN_CS_P_H
+#define WAVELAN_CS_P_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * This driver provide a Linux interface to the Wavelan Pcmcia hardware
+ * The Wavelan is a product of Lucent (http://www.wavelan.com/).
+ * This division was formerly part of NCR and then AT&T.
+ * Wavelan are also distributed by DEC (RoamAbout DS)...
+ *
+ * To know how to use this driver, read the PCMCIA HOWTO.
+ * If you want to exploit the many other fonctionalities, look comments
+ * in the code...
+ *
+ * This driver is the result of the effort of many peoples (see below).
+ */
+
+/* ------------------------ SPECIFIC NOTES ------------------------ */
+/*
+ * Web page
+ * --------
+ * I try to maintain a web page with the Wireless LAN Howto at :
+ * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Wavelan.html
+ *
+ * SMP
+ * ---
+ * We now are SMP compliant (I eventually fixed the remaining bugs).
+ * The driver has been tested on a dual P6-150 and survived my usual
+ * set of torture tests.
+ * Anyway, I spent enough time chasing interrupt re-entrancy during
+ * errors or reconfigure, and I designed the locked/unlocked sections
+ * of the driver with great care, and with the recent addition of
+ * the spinlock (thanks to the new API), we should be quite close to
+ * the truth.
+ * The SMP/IRQ locking is quite coarse and conservative (i.e. not fast),
+ * but better safe than sorry (especially at 2 Mb/s ;-).
+ *
+ * I have also looked into disabling only our interrupt on the card
+ * (via HACR) instead of all interrupts in the processor (via cli),
+ * so that other driver are not impacted, and it look like it's
+ * possible, but it's very tricky to do right (full of races). As
+ * the gain would be mostly for SMP systems, it can wait...
+ *
+ * Debugging and options
+ * ---------------------
+ * You will find below a set of '#define" allowing a very fine control
+ * on the driver behaviour and the debug messages printed.
+ * The main options are :
+ * o WAVELAN_ROAMING, for the experimental roaming support.
+ * o SET_PSA_CRC, to have your card correctly recognised by
+ * an access point and the Point-to-Point diagnostic tool.
+ * o USE_PSA_CONFIG, to read configuration from the PSA (EEprom)
+ * (otherwise we always start afresh with some defaults)
+ *
+ * wavelan_cs.o is darn too big
+ * -------------------------
+ * That's true ! There is a very simple way to reduce the driver
+ * object by 33% (yes !). Comment out the following line :
+ * #include <linux/wireless.h>
+ * Other compile options can also reduce the size of it...
+ *
+ * MAC address and hardware detection :
+ * ----------------------------------
+ * The detection code of the wavelan chech that the first 3
+ * octets of the MAC address fit the company code. This type of
+ * detection work well for AT&T cards (because the AT&T code is
+ * hardcoded in wavelan_cs.h), but of course will fail for other
+ * manufacturer.
+ *
+ * If you are sure that your card is derived from the wavelan,
+ * here is the way to configure it :
+ * 1) Get your MAC address
+ * a) With your card utilities (wfreqsel, instconf, ...)
+ * b) With the driver :
+ * o compile the kernel with DEBUG_CONFIG_INFO enabled
+ * o Boot and look the card messages
+ * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan_cs.h)
+ * 3) Compile & verify
+ * 4) Send me the MAC code - I will include it in the next version...
+ *
+ */
+
+/* --------------------- WIRELESS EXTENSIONS --------------------- */
+/*
+ * This driver is the first one to support "wireless extensions".
+ * This set of extensions provide you some way to control the wireless
+ * caracteristics of the hardware in a standard way and support for
+ * applications for taking advantage of it (like Mobile IP).
+ *
+ * You will need to enable the CONFIG_NET_RADIO define in the kernel
+ * configuration to enable the wireless extensions (this is the one
+ * giving access to the radio network device choice).
+ *
+ * It might also be a good idea as well to fetch the wireless tools to
+ * configure the device and play a bit.
+ */
+
+/* ---------------------------- FILES ---------------------------- */
+/*
+ * wavelan_cs.c : The actual code for the driver - C functions
+ *
+ * wavelan_cs.p.h : Private header : local types / vars for the driver
+ *
+ * wavelan_cs.h : Description of the hardware interface & structs
+ *
+ * i82593.h : Description if the Ethernet controller
+ */
+
+/* --------------------------- HISTORY --------------------------- */
+/*
+ * The history of the Wavelan drivers is as complicated as history of
+ * the Wavelan itself (NCR -> AT&T -> Lucent).
+ *
+ * All started with Anders Klemets <klemets@paul.rutgers.edu>,
+ * writting a Wavelan ISA driver for the MACH microkernel. Girish
+ * Welling <welling@paul.rutgers.edu> had also worked on it.
+ * Keith Moore modify this for the Pcmcia hardware.
+ *
+ * Robert Morris <rtm@das.harvard.edu> port these two drivers to BSDI
+ * and add specific Pcmcia support (there is currently no equivalent
+ * of the PCMCIA package under BSD...).
+ *
+ * Jim Binkley <jrb@cs.pdx.edu> port both BSDI drivers to FreeBSD.
+ *
+ * Bruce Janson <bruce@cs.usyd.edu.au> port the BSDI ISA driver to Linux.
+ *
+ * Anthony D. Joseph <adj@lcs.mit.edu> started modify Bruce driver
+ * (with help of the BSDI PCMCIA driver) for PCMCIA.
+ * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished is work.
+ * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
+ * correctly 2.00 cards (2.4 GHz with frequency selection).
+ * David Hinds <dahinds@users.sourceforge.net> integrated the whole in his
+ * Pcmcia package (+ bug corrections).
+ *
+ * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
+ * patchs to the Pcmcia driver. After, I added code in the ISA driver
+ * for Wireless Extensions and full support of frequency selection
+ * cards. Now, I'm doing the same to the Pcmcia driver + some
+ * reorganisation.
+ * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
+ * much needed informations on the Wavelan hardware.
+ */
+
+/* By the way : for the copyright & legal stuff :
+ * Almost everybody wrote code under GNU or BSD license (or alike),
+ * and want that their original copyright remain somewhere in the
+ * code (for myself, I go with the GPL).
+ * Nobody want to take responsibility for anything, except the fame...
+ */
+
+/* --------------------------- CREDITS --------------------------- */
+/*
+ * Credits:
+ * Special thanks to Jan Hoogendoorn of AT&T GIS Utrecht and
+ * Loeke Brederveld of Lucent for providing extremely useful
+ * information about WaveLAN PCMCIA hardware
+ *
+ * This driver is based upon several other drivers, in particular:
+ * David Hinds' Linux driver for the PCMCIA 3c589 ethernet adapter
+ * Bruce Janson's Linux driver for the AT-bus WaveLAN adapter
+ * Anders Klemets' PCMCIA WaveLAN adapter driver
+ * Robert Morris' BSDI driver for the PCMCIA WaveLAN adapter
+ *
+ * Additional Credits:
+ *
+ * This software was originally developed under Linux 1.2.3
+ * (Slackware 2.0 distribution).
+ * And then under Linux 2.0.x (Debian 1.1 -> 2.2 - pcmcia 2.8.18+)
+ * with an HP OmniBook 4000 and then a 5500.
+ *
+ * It is based on other device drivers and information either written
+ * or supplied by:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@super.org),
+ * Jim Binkley <jrb@cs.pdx.edu>,
+ * Loeke Brederveld <lbrederv@wavelan.com>,
+ * Allan Creighton (allanc@cs.su.oz.au),
+ * Brent Elphick <belphick@uwaterloo.ca>,
+ * Joe Finney <joe@comp.lancs.ac.uk>,
+ * Matthew Geier (matthew@cs.su.oz.au),
+ * Remo di Giovanni (remo@cs.su.oz.au),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * David Hinds <dahinds@users.sourceforge.net>,
+ * Jan Hoogendoorn (c/o marteijn@lucent.com),
+ * Bruce Janson <bruce@cs.usyd.edu.au>,
+ * Anthony D. Joseph <adj@lcs.mit.edu>,
+ * Anders Klemets (klemets@paul.rutgers.edu),
+ * Yunzhou Li <yunzhou@strat.iol.unh.edu>,
+ * Marc Meertens (mmeertens@lucent.com),
+ * Keith Moore,
+ * Robert Morris (rtm@das.harvard.edu),
+ * Ian Parkin (ian@cs.su.oz.au),
+ * John Rosenberg (johnr@cs.su.oz.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.su.oz.au),
+ * Stanislav Sinyagin <stas@isf.ru>
+ * Peter Storey,
+ * Jean Tourrilhes <jt@hpl.hp.com>,
+ * Girish Welling (welling@paul.rutgers.edu)
+ * Clark Woodworth <clark@hiway1.exit109.com>
+ * Yongguang Zhang <ygz@isl.hrl.hac.com>...
+ */
+
+/* ------------------------- IMPROVEMENTS ------------------------- */
+/*
+ * I proudly present :
+ *
+ * Changes made in 2.8.22 :
+ * ----------------------
+ * - improved wv_set_multicast_list
+ * - catch spurious interrupt
+ * - correct release of the device
+ *
+ * Changes mades in release :
+ * ------------------------
+ * - Reorganisation of the code, function name change
+ * - Creation of private header (wavelan_cs.h)
+ * - Reorganised debug messages
+ * - More comments, history, ...
+ * - Configure earlier (in "insert" instead of "open")
+ * and do things only once
+ * - mmc_init : configure the PSA if not done
+ * - mmc_init : 2.00 detection better code for 2.00 init
+ * - better info at startup
+ * - Correct a HUGE bug (volatile & uncalibrated busy loop)
+ * in wv_82593_cmd => config speedup
+ * - Stop receiving & power down on close (and power up on open)
+ * use "ifconfig down" & "ifconfig up ; route add -net ..."
+ * - Send packets : add watchdog instead of pooling
+ * - Receive : check frame wrap around & try to recover some frames
+ * - wavelan_set_multicast_list : avoid reset
+ * - add wireless extensions (ioctl & get_wireless_stats)
+ * get/set nwid/frequency on fly, info for /proc/net/wireless
+ * - Suppress useless stuff from lp (net_local), but add link
+ * - More inlines
+ * - Lot of others minor details & cleanups
+ *
+ * Changes made in second release :
+ * ------------------------------
+ * - Optimise wv_85893_reconfig stuff, fix potential problems
+ * - Change error values for ioctl
+ * - Non blocking wv_ru_stop() + call wv_reset() in case of problems
+ * - Remove development printk from wavelan_watchdog()
+ * - Remove of the watchdog to wavelan_close instead of wavelan_release
+ * fix potential problems...
+ * - Start debugging suspend stuff (but it's still a bit weird)
+ * - Debug & optimize dump header/packet in Rx & Tx (debug)
+ * - Use "readb" and "writeb" to be kernel 2.1 compliant
+ * - Better handling of bogus interrupts
+ * - Wireless extension : SETSPY and GETSPY
+ * - Remove old stuff (stats - for those needing it, just ask me...)
+ * - Make wireless extensions optional
+ *
+ * Changes made in third release :
+ * -----------------------------
+ * - cleanups & typos
+ * - modif wireless ext (spy -> only one pointer)
+ * - new private ioctl to set/get quality & level threshold
+ * - Init : correct default value of level threshold for pcmcia
+ * - kill watchdog in hw_reset
+ * - more 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
+ * - Add message level (debug stuff in /var/adm/debug & errors not
+ * displayed at console and still in /var/adm/messages)
+ *
+ * Changes made in fourth release :
+ * ------------------------------
+ * - multicast support (yes !) thanks to Yongguang Zhang.
+ *
+ * Changes made in fifth release (2.9.0) :
+ * -------------------------------------
+ * - Revisited multicast code (it was mostly wrong).
+ * - protect code in wv_82593_reconfig with dev->tbusy (oups !)
+ *
+ * Changes made in sixth release (2.9.1a) :
+ * --------------------------------------
+ * - Change the detection code for multi manufacturer code support
+ * - Correct bug (hang kernel) in init when we were "rejecting" a card
+ *
+ * Changes made in seventh release (2.9.1b) :
+ * ----------------------------------------
+ * - Update to wireless extensions changes
+ * - Silly bug in card initial configuration (psa_conf_status)
+ *
+ * Changes made in eigth release :
+ * -----------------------------
+ * - Small bug in debug code (probably not the last one...)
+ * - 1.2.13 support (thanks to Clark Woodworth)
+ *
+ * Changes made for release in 2.9.2b :
+ * ----------------------------------
+ * - Level threshold is now a standard wireless extension (version 4 !)
+ * - modules parameters types for kernel > 2.1.17
+ * - updated man page
+ * - Others cleanup from David Hinds
+ *
+ * Changes made for release in 2.9.5 :
+ * ---------------------------------
+ * - byte count stats (courtesy of David Hinds)
+ * - Remove dev_tint stuff (courtesy of David Hinds)
+ * - Others cleanup from David Hinds
+ * - Encryption setting from Brent Elphick (thanks a lot !)
+ * - 'base' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
+ *
+ * Changes made for release in 2.9.6 :
+ * ---------------------------------
+ * - fix bug : no longuer disable watchdog in case of bogus interrupt
+ * - increase timeout in config code for picky hardware
+ * - mask unused bits in status (Wireless Extensions)
+ *
+ * Changes integrated by Justin Seger <jseger@MIT.EDU> & David Hinds :
+ * -----------------------------------------------------------------
+ * - Roaming "hack" from Joe Finney <joe@comp.lancs.ac.uk>
+ * - PSA CRC code from Bob Gray <rgray@bald.cs.dartmouth.edu>
+ * - Better initialisation of the i82593 controller
+ * from Joseph K. O'Sullivan <josullvn+@cs.cmu.edu>
+ *
+ * Changes made for release in 3.0.10 :
+ * ----------------------------------
+ * - Fix eject "hang" of the driver under 2.2.X :
+ * o create wv_flush_stale_links()
+ * o Rename wavelan_release to wv_pcmcia_release & move up
+ * o move unregister_netdev to wavelan_detach()
+ * o wavelan_release() no longer call wavelan_detach()
+ * o Suppress "release" timer
+ * o Other cleanups & fixes
+ * - New MAC address in the probe
+ * - Reorg PSA_CRC code (endian neutral & cleaner)
+ * - Correct initialisation of the i82593 from Lucent manual
+ * - Put back the watchdog, with larger timeout
+ * - TRANSMIT_NO_CRC is a "normal" error, so recover from it
+ * from Derrick J Brashear <shadow@dementia.org>
+ * - Better handling of TX and RX normal failure conditions
+ * - #ifdef out all the roaming code
+ * - Add ESSID & "AP current address" ioctl stubs
+ * - General cleanup of the code
+ *
+ * Changes made for release in 3.0.13 :
+ * ----------------------------------
+ * - Re-enable compilation of roaming code by default, but with
+ * do_roaming = 0
+ * - Nuke `nwid=nwid^ntohs(beacon->domain_id)' in wl_roam_gather
+ * at the demand of John Carol Langford <jcl@gs176.sp.cs.cmu.edu>
+ * - Introduced WAVELAN_ROAMING_EXT for incomplete ESSID stuff.
+ *
+ * Changes made for release in 3.0.15 :
+ * ----------------------------------
+ * - Change e-mail and web page addresses
+ * - Watchdog timer is now correctly expressed in HZ, not in jiffies
+ * - Add channel number to the list of frequencies in range
+ * - Add the (short) list of bit-rates in range
+ * - Developp a new sensitivity... (sens.value & sens.fixed)
+ *
+ * Changes made for release in 3.1.2 :
+ * ---------------------------------
+ * - Fix check for root permission (break instead of exit)
+ * - New nwid & encoding setting (Wireless Extension 9)
+ *
+ * Changes made for release in 3.1.12 :
+ * ----------------------------------
+ * - reworked wv_82593_cmd to avoid using the IRQ handler and doing
+ * ugly things with interrupts.
+ * - Add IRQ protection in 82593_config/ru_start/ru_stop/watchdog
+ * - Update to new network API (softnet - 2.3.43) :
+ * o replace dev->tbusy (David + me)
+ * o replace dev->tstart (David + me)
+ * o remove dev->interrupt (David)
+ * o add SMP locking via spinlock in splxx (me)
+ * o add spinlock in interrupt handler (me)
+ * o use kernel watchdog instead of ours (me)
+ * o verify that all the changes make sense and work (me)
+ * - Re-sync kernel/pcmcia versions (not much actually)
+ * - A few other cleanups (David & me)...
+ *
+ * Changes made for release in 3.1.22 :
+ * ----------------------------------
+ * - Check that SMP works, remove annoying log message
+ *
+ * Changes made for release in 3.1.24 :
+ * ----------------------------------
+ * - Fix unfrequent card lockup when watchdog was reseting the hardware :
+ * o control first busy loop in wv_82593_cmd()
+ * o Extend spinlock protection in wv_hw_config()
+ *
+ * Changes made for release in 3.1.33 :
+ * ----------------------------------
+ * - Optional use new driver API for Wireless Extensions :
+ * o got rid of wavelan_ioctl()
+ * o use a bunch of iw_handler instead
+ *
+ * Changes made for release in 3.2.1 :
+ * ---------------------------------
+ * - Set dev->trans_start to avoid filling the logs
+ * (and generating useless abort commands)
+ * - Avoid deadlocks in mmc_out()/mmc_in()
+ *
+ * Wishes & dreams:
+ * ----------------
+ * - Cleanup and integrate the roaming code
+ * (std debug, set DomainID, decay avg and co...)
+ */
+
+/***************************** INCLUDES *****************************/
+
+/* Linux headers that we need */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/ethtool.h>
+
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h> /* Wireless extensions */
+#include <net/iw_handler.h> /* New driver API */
+#endif
+
+/* Pcmcia headers that we need */
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/version.h>
+
+/* Wavelan declarations */
+#include "i82593.h" /* Definitions for the Intel chip */
+
+#include "wavelan_cs.h" /* Others bits of the hardware */
+
+/************************** DRIVER OPTIONS **************************/
+/*
+ * `#define' or `#undef' the following constant to change the behaviour
+ * of the driver...
+ */
+#define WAVELAN_ROAMING /* Include experimental roaming code */
+#undef WAVELAN_ROAMING_EXT /* Enable roaming wireless extensions */
+#undef SET_PSA_CRC /* Set the CRC in PSA (slower) */
+#define USE_PSA_CONFIG /* Use info from the PSA */
+#undef STRUCT_CHECK /* Verify padding of structures */
+#undef EEPROM_IS_PROTECTED /* Doesn't seem to be necessary */
+#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
+#undef SET_MAC_ADDRESS /* Experimental */
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+/* Warning : these stuff will slow down the driver... */
+#define WIRELESS_SPY /* Enable spying addresses */
+#undef HISTOGRAM /* Enable histogram of sig level... */
+#endif
+
+/****************************** DEBUG ******************************/
+
+#undef DEBUG_MODULE_TRACE /* Module insertion/removal */
+#undef DEBUG_CALLBACK_TRACE /* Calls made by Linux */
+#undef DEBUG_INTERRUPT_TRACE /* Calls to handler */
+#undef DEBUG_INTERRUPT_INFO /* type of interrupt & so on */
+#define DEBUG_INTERRUPT_ERROR /* problems */
+#undef DEBUG_CONFIG_TRACE /* Trace the config functions */
+#undef DEBUG_CONFIG_INFO /* What's going on... */
+#define DEBUG_CONFIG_ERRORS /* Errors on configuration */
+#undef DEBUG_TX_TRACE /* Transmission calls */
+#undef DEBUG_TX_INFO /* Header of the transmitted packet */
+#undef DEBUG_TX_FAIL /* Normal failure conditions */
+#define DEBUG_TX_ERROR /* Unexpected conditions */
+#undef DEBUG_RX_TRACE /* Transmission calls */
+#undef DEBUG_RX_INFO /* Header of the transmitted packet */
+#undef DEBUG_RX_FAIL /* Normal failure conditions */
+#define DEBUG_RX_ERROR /* Unexpected conditions */
+#undef DEBUG_PACKET_DUMP /* Dump packet on the screen */
+#undef DEBUG_IOCTL_TRACE /* Misc call by Linux */
+#undef DEBUG_IOCTL_INFO /* Various debug info */
+#define DEBUG_IOCTL_ERROR /* What's going wrong */
+#define DEBUG_BASIC_SHOW /* Show basic startup info */
+#undef DEBUG_VERSION_SHOW /* Print version info */
+#undef DEBUG_PSA_SHOW /* Dump psa to screen */
+#undef DEBUG_MMC_SHOW /* Dump mmc to screen */
+#undef DEBUG_SHOW_UNUSED /* Show also unused fields */
+#undef DEBUG_I82593_SHOW /* Show i82593 status */
+#undef DEBUG_DEVICE_SHOW /* Show device parameters */
+
+/************************ CONSTANTS & MACROS ************************/
+
+#ifdef DEBUG_VERSION_SHOW
+static const char *version = "wavelan_cs.c : v24 (SMP + wireless extensions) 11/1/02\n";
+#endif
+
+/* Watchdog temporisation */
+#define WATCHDOG_JIFFIES (256*HZ/100)
+
+/* Fix a bug in some old wireless extension definitions */
+#ifndef IW_ESSID_MAX_SIZE
+#define IW_ESSID_MAX_SIZE 32
+#endif
+
+/* ------------------------ PRIVATE IOCTL ------------------------ */
+
+#define SIOCSIPQTHR SIOCIWFIRSTPRIV /* Set quality threshold */
+#define SIOCGIPQTHR SIOCIWFIRSTPRIV + 1 /* Get quality threshold */
+#define SIOCSIPROAM SIOCIWFIRSTPRIV + 2 /* Set roaming state */
+#define SIOCGIPROAM SIOCIWFIRSTPRIV + 3 /* Get roaming state */
+
+#define SIOCSIPHISTO SIOCIWFIRSTPRIV + 4 /* Set histogram ranges */
+#define SIOCGIPHISTO SIOCIWFIRSTPRIV + 5 /* Get histogram values */
+
+/*************************** WaveLAN Roaming **************************/
+#ifdef WAVELAN_ROAMING /* Conditional compile, see above in options */
+
+#define WAVELAN_ROAMING_DEBUG 0 /* 1 = Trace of handover decisions */
+ /* 2 = Info on each beacon rcvd... */
+#define MAX_WAVEPOINTS 7 /* Max visible at one time */
+#define WAVEPOINT_HISTORY 5 /* SNR sample history slow search */
+#define WAVEPOINT_FAST_HISTORY 2 /* SNR sample history fast search */
+#define SEARCH_THRESH_LOW 10 /* SNR to enter cell search */
+#define SEARCH_THRESH_HIGH 13 /* SNR to leave cell search */
+#define WAVELAN_ROAMING_DELTA 1 /* Hysteresis value (+/- SNR) */
+#define CELL_TIMEOUT 2*HZ /* in jiffies */
+
+#define FAST_CELL_SEARCH 1 /* Boolean values... */
+#define NWID_PROMISC 1 /* for code clarity. */
+
+typedef struct wavepoint_beacon
+{
+ unsigned char dsap, /* Unused */
+ ssap, /* Unused */
+ ctrl, /* Unused */
+ O,U,I, /* Unused */
+ spec_id1, /* Unused */
+ spec_id2, /* Unused */
+ pdu_type, /* Unused */
+ seq; /* WavePoint beacon sequence number */
+ unsigned short domain_id, /* WavePoint Domain ID */
+ nwid; /* WavePoint NWID */
+} wavepoint_beacon;
+
+typedef struct wavepoint_history
+{
+ unsigned short nwid; /* WavePoint's NWID */
+ int average_slow; /* SNR running average */
+ int average_fast; /* SNR running average */
+ unsigned char sigqual[WAVEPOINT_HISTORY]; /* Ringbuffer of recent SNR's */
+ unsigned char qualptr; /* Index into ringbuffer */
+ unsigned char last_seq; /* Last seq. no seen for WavePoint */
+ struct wavepoint_history *next; /* Next WavePoint in table */
+ struct wavepoint_history *prev; /* Previous WavePoint in table */
+ unsigned long last_seen; /* Time of last beacon recvd, jiffies */
+} wavepoint_history;
+
+struct wavepoint_table
+{
+ wavepoint_history *head; /* Start of ringbuffer */
+ int num_wavepoints; /* No. of WavePoints visible */
+ unsigned char locked; /* Table lock */
+};
+
+#endif /* WAVELAN_ROAMING */
+
+/****************************** TYPES ******************************/
+
+/* Shortcuts */
+typedef struct net_device_stats en_stats;
+typedef struct iw_statistics iw_stats;
+typedef struct iw_quality iw_qual;
+typedef struct iw_freq iw_freq;
+typedef struct net_local net_local;
+typedef struct timer_list timer_list;
+
+/* Basic types */
+typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
+
+/*
+ * Static specific data for the interface.
+ *
+ * For each network interface, Linux keep data in two structure. "device"
+ * keep the generic data (same format for everybody) and "net_local" keep
+ * the additional specific data.
+ * Note that some of this specific data is in fact generic (en_stats, for
+ * example).
+ */
+struct net_local
+{
+ dev_node_t node; /* ???? What is this stuff ???? */
+ struct net_device * dev; /* Reverse link... */
+ spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
+ dev_link_t * link; /* pcmcia structure */
+ en_stats stats; /* Ethernet interface statistics */
+ int nresets; /* Number of hw resets */
+ u_char configured; /* If it is configured */
+ u_char reconfig_82593; /* Need to reconfigure the controller */
+ u_char promiscuous; /* Promiscuous mode */
+ u_char allmulticast; /* All Multicast mode */
+ int mc_count; /* Number of multicast addresses */
+
+ int stop; /* Current i82593 Stop Hit Register */
+ int rfp; /* Last DMA machine receive pointer */
+ int overrunning; /* Receiver overrun flag */
+
+#ifdef WIRELESS_EXT
+ iw_stats wstats; /* Wireless specific stats */
+
+ struct iw_spy_data spy_data;
+ struct iw_public_data wireless_data;
+#endif
+
+#ifdef HISTOGRAM
+ int his_number; /* Number of intervals */
+ u_char his_range[16]; /* Boundaries of interval ]n-1; n] */
+ u_long his_sum[16]; /* Sum in interval */
+#endif /* HISTOGRAM */
+#ifdef WAVELAN_ROAMING
+ u_long domain_id; /* Domain ID we lock on for roaming */
+ int filter_domains; /* Check Domain ID of beacon found */
+ struct wavepoint_table wavepoint_table; /* Table of visible WavePoints*/
+ wavepoint_history * curr_point; /* Current wavepoint */
+ int cell_search; /* Searching for new cell? */
+ struct timer_list cell_timer; /* Garbage collection */
+#endif /* WAVELAN_ROAMING */
+ void __iomem *mem;
+};
+
+/**************************** PROTOTYPES ****************************/
+
+#ifdef WAVELAN_ROAMING
+/* ---------------------- ROAMING SUBROUTINES -----------------------*/
+
+wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp);
+wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local *lp);
+void wl_del_wavepoint(wavepoint_history *wavepoint, net_local *lp);
+void wl_cell_expiry(unsigned long data);
+wavepoint_history *wl_best_sigqual(int fast_search, net_local *lp);
+void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq);
+void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp);
+void wv_nwid_filter(unsigned char mode, net_local *lp);
+void wv_roam_init(struct net_device *dev);
+void wv_roam_cleanup(struct net_device *dev);
+#endif /* WAVELAN_ROAMING */
+
+/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
+static inline u_char /* data */
+ hasr_read(u_long); /* Read the host interface : base address */
+static inline void
+ hacr_write(u_long, /* Write to host interface : base address */
+ u_char), /* data */
+ hacr_write_slow(u_long,
+ u_char);
+static void
+ psa_read(struct net_device *, /* Read the Parameter Storage Area */
+ int, /* offset in PSA */
+ u_char *, /* buffer to fill */
+ int), /* size to read */
+ psa_write(struct net_device *, /* Write to the PSA */
+ int, /* Offset in psa */
+ u_char *, /* Buffer in memory */
+ int); /* Length of buffer */
+static inline void
+ mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */
+ u_short,
+ u_char),
+ mmc_write(u_long, /* Write n bytes to the MMC */
+ u_char,
+ u_char *,
+ int);
+static inline u_char /* Read 1 byte from the MMC */
+ mmc_in(u_long,
+ u_short);
+static inline void
+ mmc_read(u_long, /* Read n bytes from the MMC */
+ u_char,
+ u_char *,
+ int),
+ fee_wait(u_long, /* Wait for frequency EEprom : base address */
+ int, /* Base delay to wait for */
+ int); /* Number of time to wait */
+static void
+ fee_read(u_long, /* Read the frequency EEprom : base address */
+ u_short, /* destination offset */
+ u_short *, /* data buffer */
+ int); /* number of registers */
+/* ---------------------- I82593 SUBROUTINES ----------------------- */
+static int
+ wv_82593_cmd(struct net_device *, /* synchronously send a command to i82593 */
+ char *,
+ int,
+ int);
+static inline int
+ wv_diag(struct net_device *); /* Diagnostique the i82593 */
+static int
+ read_ringbuf(struct net_device *, /* Read a receive buffer */
+ int,
+ char *,
+ int);
+static inline void
+ wv_82593_reconfig(struct net_device *); /* Reconfigure the controller */
+/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
+static inline void
+ wv_init_info(struct net_device *); /* display startup info */
+/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
+static en_stats *
+ wavelan_get_stats(struct net_device *); /* Give stats /proc/net/dev */
+static iw_stats *
+ wavelan_get_wireless_stats(struct net_device *);
+/* ----------------------- PACKET RECEPTION ----------------------- */
+static inline int
+ wv_start_of_frame(struct net_device *, /* Seek beggining of current frame */
+ int, /* end of frame */
+ int); /* start of buffer */
+static inline void
+ wv_packet_read(struct net_device *, /* Read a packet from a frame */
+ int,
+ int),
+ wv_packet_rcv(struct net_device *); /* Read all packets waiting */
+/* --------------------- PACKET TRANSMISSION --------------------- */
+static inline void
+ wv_packet_write(struct net_device *, /* Write a packet to the Tx buffer */
+ void *,
+ short);
+static int
+ wavelan_packet_xmit(struct sk_buff *, /* Send a packet */
+ struct net_device *);
+/* -------------------- HARDWARE CONFIGURATION -------------------- */
+static inline int
+ wv_mmc_init(struct net_device *); /* Initialize the modem */
+static int
+ wv_ru_stop(struct net_device *), /* Stop the i82593 receiver unit */
+ wv_ru_start(struct net_device *); /* Start the i82593 receiver unit */
+static int
+ wv_82593_config(struct net_device *); /* Configure the i82593 */
+static inline int
+ wv_pcmcia_reset(struct net_device *); /* Reset the pcmcia interface */
+static int
+ wv_hw_config(struct net_device *); /* Reset & configure the whole hardware */
+static inline void
+ wv_hw_reset(struct net_device *); /* Same, + start receiver unit */
+static inline int
+ wv_pcmcia_config(dev_link_t *); /* Configure the pcmcia interface */
+static void
+ wv_pcmcia_release(dev_link_t *);/* Remove a device */
+/* ---------------------- INTERRUPT HANDLING ---------------------- */
+static irqreturn_t
+ wavelan_interrupt(int, /* Interrupt handler */
+ void *,
+ struct pt_regs *);
+static void
+ wavelan_watchdog(struct net_device *); /* Transmission watchdog */
+/* ------------------- CONFIGURATION CALLBACKS ------------------- */
+static int
+ wavelan_open(struct net_device *), /* Open the device */
+ wavelan_close(struct net_device *); /* Close the device */
+static dev_link_t *
+ wavelan_attach(void); /* Create a new device */
+static void
+ wavelan_detach(dev_link_t *); /* Destroy a removed device */
+static int
+ wavelan_event(event_t, /* Manage pcmcia events */
+ int,
+ event_callback_args_t *);
+
+/**************************** VARIABLES ****************************/
+
+static dev_info_t dev_info = "wavelan_cs";
+static dev_link_t *dev_list = NULL; /* Linked list of devices */
+
+/*
+ * Parameters that can be set with 'insmod'
+ * The exact syntax is 'insmod wavelan_cs.o <var>=<value>'
+ */
+
+/* Shared memory speed, in ns */
+static int mem_speed = 0;
+
+/* New module interface */
+module_param(mem_speed, int, 0);
+
+#ifdef WAVELAN_ROAMING /* Conditional compile, see above in options */
+/* Enable roaming mode ? No ! Please keep this to 0 */
+static int do_roaming = 0;
+module_param(do_roaming, bool, 0);
+#endif /* WAVELAN_ROAMING */
+
+MODULE_LICENSE("GPL");
+
+#endif /* WAVELAN_CS_P_H */
+
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
new file mode 100644
index 000000000000..8636d9306785
--- /dev/null
+++ b/drivers/net/wireless/wl3501.h
@@ -0,0 +1,614 @@
+#ifndef __WL3501_H__
+#define __WL3501_H__
+
+#include <linux/spinlock.h>
+#include "ieee802_11.h"
+
+/* define for WLA 2.0 */
+#define WL3501_BLKSZ 256
+/*
+ * ID for input Signals of DRIVER block
+ * bit[7-5] is block ID: 000
+ * bit[4-0] is signal ID
+*/
+enum wl3501_signals {
+ WL3501_SIG_ALARM,
+ WL3501_SIG_MD_CONFIRM,
+ WL3501_SIG_MD_IND,
+ WL3501_SIG_ASSOC_CONFIRM,
+ WL3501_SIG_ASSOC_IND,
+ WL3501_SIG_AUTH_CONFIRM,
+ WL3501_SIG_AUTH_IND,
+ WL3501_SIG_DEAUTH_CONFIRM,
+ WL3501_SIG_DEAUTH_IND,
+ WL3501_SIG_DISASSOC_CONFIRM,
+ WL3501_SIG_DISASSOC_IND,
+ WL3501_SIG_GET_CONFIRM,
+ WL3501_SIG_JOIN_CONFIRM,
+ WL3501_SIG_PWR_MGMT_CONFIRM,
+ WL3501_SIG_REASSOC_CONFIRM,
+ WL3501_SIG_REASSOC_IND,
+ WL3501_SIG_SCAN_CONFIRM,
+ WL3501_SIG_SET_CONFIRM,
+ WL3501_SIG_START_CONFIRM,
+ WL3501_SIG_RESYNC_CONFIRM,
+ WL3501_SIG_SITE_CONFIRM,
+ WL3501_SIG_SAVE_CONFIRM,
+ WL3501_SIG_RFTEST_CONFIRM,
+/*
+ * ID for input Signals of MLME block
+ * bit[7-5] is block ID: 010
+ * bit[4-0] is signal ID
+ */
+ WL3501_SIG_ASSOC_REQ = 0x20,
+ WL3501_SIG_AUTH_REQ,
+ WL3501_SIG_DEAUTH_REQ,
+ WL3501_SIG_DISASSOC_REQ,
+ WL3501_SIG_GET_REQ,
+ WL3501_SIG_JOIN_REQ,
+ WL3501_SIG_PWR_MGMT_REQ,
+ WL3501_SIG_REASSOC_REQ,
+ WL3501_SIG_SCAN_REQ,
+ WL3501_SIG_SET_REQ,
+ WL3501_SIG_START_REQ,
+ WL3501_SIG_MD_REQ,
+ WL3501_SIG_RESYNC_REQ,
+ WL3501_SIG_SITE_REQ,
+ WL3501_SIG_SAVE_REQ,
+ WL3501_SIG_RF_TEST_REQ,
+ WL3501_SIG_MM_CONFIRM = 0x60,
+ WL3501_SIG_MM_IND,
+};
+
+enum wl3501_mib_attribs {
+ WL3501_MIB_ATTR_STATION_ID,
+ WL3501_MIB_ATTR_AUTH_ALGORITHMS,
+ WL3501_MIB_ATTR_AUTH_TYPE,
+ WL3501_MIB_ATTR_MEDIUM_OCCUPANCY_LIMIT,
+ WL3501_MIB_ATTR_CF_POLLABLE,
+ WL3501_MIB_ATTR_CFP_PERIOD,
+ WL3501_MIB_ATTR_CFPMAX_DURATION,
+ WL3501_MIB_ATTR_AUTH_RESP_TMOUT,
+ WL3501_MIB_ATTR_RX_DTIMS,
+ WL3501_MIB_ATTR_PRIV_OPT_IMPLEMENTED,
+ WL3501_MIB_ATTR_PRIV_INVOKED,
+ WL3501_MIB_ATTR_WEP_DEFAULT_KEYS,
+ WL3501_MIB_ATTR_WEP_DEFAULT_KEY_ID,
+ WL3501_MIB_ATTR_WEP_KEY_MAPPINGS,
+ WL3501_MIB_ATTR_WEP_KEY_MAPPINGS_LEN,
+ WL3501_MIB_ATTR_EXCLUDE_UNENCRYPTED,
+ WL3501_MIB_ATTR_WEP_ICV_ERROR_COUNT,
+ WL3501_MIB_ATTR_WEP_UNDECRYPTABLE_COUNT,
+ WL3501_MIB_ATTR_WEP_EXCLUDED_COUNT,
+ WL3501_MIB_ATTR_MAC_ADDR,
+ WL3501_MIB_ATTR_GROUP_ADDRS,
+ WL3501_MIB_ATTR_RTS_THRESHOLD,
+ WL3501_MIB_ATTR_SHORT_RETRY_LIMIT,
+ WL3501_MIB_ATTR_LONG_RETRY_LIMIT,
+ WL3501_MIB_ATTR_FRAG_THRESHOLD,
+ WL3501_MIB_ATTR_MAX_TX_MSDU_LIFETIME,
+ WL3501_MIB_ATTR_MAX_RX_LIFETIME,
+ WL3501_MIB_ATTR_MANUFACTURER_ID,
+ WL3501_MIB_ATTR_PRODUCT_ID,
+ WL3501_MIB_ATTR_TX_FRAG_COUNT,
+ WL3501_MIB_ATTR_MULTICAST_TX_FRAME_COUNT,
+ WL3501_MIB_ATTR_FAILED_COUNT,
+ WL3501_MIB_ATTR_RX_FRAG_COUNT,
+ WL3501_MIB_ATTR_MULTICAST_RX_COUNT,
+ WL3501_MIB_ATTR_FCS_ERROR_COUNT,
+ WL3501_MIB_ATTR_RETRY_COUNT,
+ WL3501_MIB_ATTR_MULTIPLE_RETRY_COUNT,
+ WL3501_MIB_ATTR_RTS_SUCCESS_COUNT,
+ WL3501_MIB_ATTR_RTS_FAILURE_COUNT,
+ WL3501_MIB_ATTR_ACK_FAILURE_COUNT,
+ WL3501_MIB_ATTR_FRAME_DUPLICATE_COUNT,
+ WL3501_MIB_ATTR_PHY_TYPE,
+ WL3501_MIB_ATTR_REG_DOMAINS_SUPPORT,
+ WL3501_MIB_ATTR_CURRENT_REG_DOMAIN,
+ WL3501_MIB_ATTR_SLOT_TIME,
+ WL3501_MIB_ATTR_CCA_TIME,
+ WL3501_MIB_ATTR_RX_TX_TURNAROUND_TIME,
+ WL3501_MIB_ATTR_TX_PLCP_DELAY,
+ WL3501_MIB_ATTR_RX_TX_SWITCH_TIME,
+ WL3501_MIB_ATTR_TX_RAMP_ON_TIME,
+ WL3501_MIB_ATTR_TX_RF_DELAY,
+ WL3501_MIB_ATTR_SIFS_TIME,
+ WL3501_MIB_ATTR_RX_RF_DELAY,
+ WL3501_MIB_ATTR_RX_PLCP_DELAY,
+ WL3501_MIB_ATTR_MAC_PROCESSING_DELAY,
+ WL3501_MIB_ATTR_TX_RAMP_OFF_TIME,
+ WL3501_MIB_ATTR_PREAMBLE_LEN,
+ WL3501_MIB_ATTR_PLCP_HEADER_LEN,
+ WL3501_MIB_ATTR_MPDU_DURATION_FACTOR,
+ WL3501_MIB_ATTR_AIR_PROPAGATION_TIME,
+ WL3501_MIB_ATTR_TEMP_TYPE,
+ WL3501_MIB_ATTR_CW_MIN,
+ WL3501_MIB_ATTR_CW_MAX,
+ WL3501_MIB_ATTR_SUPPORT_DATA_RATES_TX,
+ WL3501_MIB_ATTR_SUPPORT_DATA_RATES_RX,
+ WL3501_MIB_ATTR_MPDU_MAX_LEN,
+ WL3501_MIB_ATTR_SUPPORT_TX_ANTENNAS,
+ WL3501_MIB_ATTR_CURRENT_TX_ANTENNA,
+ WL3501_MIB_ATTR_SUPPORT_RX_ANTENNAS,
+ WL3501_MIB_ATTR_DIVERSITY_SUPPORT,
+ WL3501_MIB_ATTR_DIVERSITY_SELECTION_RS,
+ WL3501_MIB_ATTR_NR_SUPPORTED_PWR_LEVELS,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL1,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL2,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL3,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL4,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL5,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL6,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL7,
+ WL3501_MIB_ATTR_TX_PWR_LEVEL8,
+ WL3501_MIB_ATTR_CURRENT_TX_PWR_LEVEL,
+ WL3501_MIB_ATTR_CURRENT_CHAN,
+ WL3501_MIB_ATTR_CCA_MODE_SUPPORTED,
+ WL3501_MIB_ATTR_CURRENT_CCA_MODE,
+ WL3501_MIB_ATTR_ED_THRESHOLD,
+ WL3501_MIB_ATTR_SINTHESIZER_LOCKED,
+ WL3501_MIB_ATTR_CURRENT_PWR_STATE,
+ WL3501_MIB_ATTR_DOZE_TURNON_TIME,
+ WL3501_MIB_ATTR_RCR33,
+ WL3501_MIB_ATTR_DEFAULT_CHAN,
+ WL3501_MIB_ATTR_SSID,
+ WL3501_MIB_ATTR_PWR_MGMT_ENABLE,
+ WL3501_MIB_ATTR_NET_CAPABILITY,
+ WL3501_MIB_ATTR_ROUTING,
+};
+
+enum wl3501_net_type {
+ WL3501_NET_TYPE_INFRA,
+ WL3501_NET_TYPE_ADHOC,
+ WL3501_NET_TYPE_ANY_BSS,
+};
+
+enum wl3501_scan_type {
+ WL3501_SCAN_TYPE_ACTIVE,
+ WL3501_SCAN_TYPE_PASSIVE,
+};
+
+enum wl3501_tx_result {
+ WL3501_TX_RESULT_SUCCESS,
+ WL3501_TX_RESULT_NO_BSS,
+ WL3501_TX_RESULT_RETRY_LIMIT,
+};
+
+enum wl3501_sys_type {
+ WL3501_SYS_TYPE_OPEN,
+ WL3501_SYS_TYPE_SHARE_KEY,
+};
+
+enum wl3501_status {
+ WL3501_STATUS_SUCCESS,
+ WL3501_STATUS_INVALID,
+ WL3501_STATUS_TIMEOUT,
+ WL3501_STATUS_REFUSED,
+ WL3501_STATUS_MANY_REQ,
+ WL3501_STATUS_ALREADY_BSS,
+};
+
+#define WL3501_MGMT_CAPABILITY_ESS 0x0001 /* see 802.11 p.58 */
+#define WL3501_MGMT_CAPABILITY_IBSS 0x0002 /* - " - */
+#define WL3501_MGMT_CAPABILITY_CF_POLLABLE 0x0004 /* - " - */
+#define WL3501_MGMT_CAPABILITY_CF_POLL_REQUEST 0x0008 /* - " - */
+#define WL3501_MGMT_CAPABILITY_PRIVACY 0x0010 /* - " - */
+
+#define IW_REG_DOMAIN_FCC 0x10 /* Channel 1 to 11 USA */
+#define IW_REG_DOMAIN_DOC 0x20 /* Channel 1 to 11 Canada */
+#define IW_REG_DOMAIN_ETSI 0x30 /* Channel 1 to 13 Europe */
+#define IW_REG_DOMAIN_SPAIN 0x31 /* Channel 10 to 11 Spain */
+#define IW_REG_DOMAIN_FRANCE 0x32 /* Channel 10 to 13 France */
+#define IW_REG_DOMAIN_MKK 0x40 /* Channel 14 Japan */
+#define IW_REG_DOMAIN_MKK1 0x41 /* Channel 1-14 Japan */
+#define IW_REG_DOMAIN_ISRAEL 0x50 /* Channel 3 - 9 Israel */
+
+#define IW_MGMT_RATE_LABEL_MANDATORY 128 /* MSB */
+
+enum iw_mgmt_rate_labels {
+ IW_MGMT_RATE_LABEL_1MBIT = 2,
+ IW_MGMT_RATE_LABEL_2MBIT = 4,
+ IW_MGMT_RATE_LABEL_5_5MBIT = 11,
+ IW_MGMT_RATE_LABEL_11MBIT = 22,
+};
+
+enum iw_mgmt_info_element_ids {
+ IW_MGMT_INFO_ELEMENT_SSID, /* Service Set Identity */
+ IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES,
+ IW_MGMT_INFO_ELEMENT_FH_PARAMETER_SET,
+ IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
+ IW_MGMT_INFO_ELEMENT_CS_PARAMETER_SET,
+ IW_MGMT_INFO_ELEMENT_CS_TIM, /* Traffic Information Map */
+ IW_MGMT_INFO_ELEMENT_IBSS_PARAMETER_SET,
+ /* 7-15: Reserved, unused */
+ IW_MGMT_INFO_ELEMENT_CHALLENGE_TEXT = 16,
+ /* 17-31 Reserved for challenge text extension */
+ /* 32-255 Reserved, unused */
+};
+
+struct iw_mgmt_info_element {
+ u8 id; /* one of enum iw_mgmt_info_element_ids,
+ but sizeof(enum) > sizeof(u8) :-( */
+ u8 len;
+ u8 data[0];
+} __attribute__ ((packed));
+
+struct iw_mgmt_essid_pset {
+ struct iw_mgmt_info_element el;
+ u8 essid[IW_ESSID_MAX_SIZE];
+} __attribute__ ((packed));
+
+/*
+ * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly
+ * Pg 75
+ */
+#define IW_DATA_RATE_MAX_LABELS 8
+
+struct iw_mgmt_data_rset {
+ struct iw_mgmt_info_element el;
+ u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS];
+} __attribute__ ((packed));
+
+struct iw_mgmt_ds_pset {
+ struct iw_mgmt_info_element el;
+ u8 chan;
+} __attribute__ ((packed));
+
+struct iw_mgmt_cf_pset {
+ struct iw_mgmt_info_element el;
+ u8 cfp_count;
+ u8 cfp_period;
+ u16 cfp_max_duration;
+ u16 cfp_dur_remaining;
+} __attribute__ ((packed));
+
+struct iw_mgmt_ibss_pset {
+ struct iw_mgmt_info_element el;
+ u16 atim_window;
+} __attribute__ ((packed));
+
+struct wl3501_tx_hdr {
+ u16 tx_cnt;
+ u8 sync[16];
+ u16 sfd;
+ u8 signal;
+ u8 service;
+ u16 len;
+ u16 crc16;
+ u16 frame_ctrl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+};
+
+struct wl3501_rx_hdr {
+ u16 rx_next_blk;
+ u16 rc_next_frame_blk;
+ u8 rx_blk_ctrl;
+ u8 rx_next_frame;
+ u8 rx_next_frame1;
+ u8 rssi;
+ char time[8];
+ u8 signal;
+ u8 service;
+ u16 len;
+ u16 crc16;
+ u16 frame_ctrl;
+ u16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq;
+ u8 addr4[ETH_ALEN];
+};
+
+struct wl3501_start_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 bss_type;
+ u16 beacon_period;
+ u16 dtim_period;
+ u16 probe_delay;
+ u16 cap_info;
+ struct iw_mgmt_essid_pset ssid;
+ struct iw_mgmt_data_rset bss_basic_rset;
+ struct iw_mgmt_data_rset operational_rset;
+ struct iw_mgmt_cf_pset cf_pset;
+ struct iw_mgmt_ds_pset ds_pset;
+ struct iw_mgmt_ibss_pset ibss_pset;
+};
+
+struct wl3501_assoc_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 timeout;
+ u16 cap_info;
+ u16 listen_interval;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct wl3501_assoc_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 status;
+};
+
+struct wl3501_assoc_ind {
+ u16 next_blk;
+ u8 sig_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct wl3501_auth_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 type;
+ u16 timeout;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct wl3501_auth_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 type;
+ u16 status;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct wl3501_get_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 mib_attrib;
+};
+
+struct wl3501_get_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 mib_status;
+ u16 mib_attrib;
+ u8 mib_value[100];
+};
+
+struct wl3501_join_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ struct iw_mgmt_data_rset operational_rset;
+ u16 reserved2;
+ u16 timeout;
+ u16 probe_delay;
+ u8 timestamp[8];
+ u8 local_time[8];
+ u16 beacon_period;
+ u16 dtim_period;
+ u16 cap_info;
+ u8 bss_type;
+ u8 bssid[ETH_ALEN];
+ struct iw_mgmt_essid_pset ssid;
+ struct iw_mgmt_ds_pset ds_pset;
+ struct iw_mgmt_cf_pset cf_pset;
+ struct iw_mgmt_ibss_pset ibss_pset;
+ struct iw_mgmt_data_rset bss_basic_rset;
+};
+
+struct wl3501_join_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 status;
+};
+
+struct wl3501_pwr_mgmt_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 pwr_save;
+ u8 wake_up;
+ u8 receive_dtims;
+};
+
+struct wl3501_pwr_mgmt_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 status;
+};
+
+struct wl3501_scan_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 bss_type;
+ u16 probe_delay;
+ u16 min_chan_time;
+ u16 max_chan_time;
+ u8 chan_list[14];
+ u8 bssid[ETH_ALEN];
+ struct iw_mgmt_essid_pset ssid;
+ enum wl3501_scan_type scan_type;
+};
+
+struct wl3501_scan_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 status;
+ char timestamp[8];
+ char localtime[8];
+ u16 beacon_period;
+ u16 dtim_period;
+ u16 cap_info;
+ u8 bss_type;
+ u8 bssid[ETH_ALEN];
+ struct iw_mgmt_essid_pset ssid;
+ struct iw_mgmt_ds_pset ds_pset;
+ struct iw_mgmt_cf_pset cf_pset;
+ struct iw_mgmt_ibss_pset ibss_pset;
+ struct iw_mgmt_data_rset bss_basic_rset;
+ u8 rssi;
+};
+
+struct wl3501_start_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 status;
+};
+
+struct wl3501_md_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 routing;
+ u16 data;
+ u16 size;
+ u8 pri;
+ u8 service_class;
+ u8 daddr[ETH_ALEN];
+ u8 saddr[ETH_ALEN];
+};
+
+struct wl3501_md_ind {
+ u16 next_blk;
+ u8 sig_id;
+ u8 routing;
+ u16 data;
+ u16 size;
+ u8 reception;
+ u8 pri;
+ u8 service_class;
+ u8 daddr[ETH_ALEN];
+ u8 saddr[ETH_ALEN];
+};
+
+struct wl3501_md_confirm {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ u16 data;
+ u8 status;
+ u8 pri;
+ u8 service_class;
+};
+
+struct wl3501_resync_req {
+ u16 next_blk;
+ u8 sig_id;
+};
+
+/* Definitions for supporting clone adapters. */
+/* System Interface Registers (SIR space) */
+#define WL3501_NIC_GCR ((u8)0x00) /* SIR0 - General Conf Register */
+#define WL3501_NIC_BSS ((u8)0x01) /* SIR1 - Bank Switching Select Reg */
+#define WL3501_NIC_LMAL ((u8)0x02) /* SIR2 - Local Mem addr Reg [7:0] */
+#define WL3501_NIC_LMAH ((u8)0x03) /* SIR3 - Local Mem addr Reg [14:8] */
+#define WL3501_NIC_IODPA ((u8)0x04) /* SIR4 - I/O Data Port A */
+#define WL3501_NIC_IODPB ((u8)0x05) /* SIR5 - I/O Data Port B */
+#define WL3501_NIC_IODPC ((u8)0x06) /* SIR6 - I/O Data Port C */
+#define WL3501_NIC_IODPD ((u8)0x07) /* SIR7 - I/O Data Port D */
+
+/* Bits in GCR */
+#define WL3501_GCR_SWRESET ((u8)0x80)
+#define WL3501_GCR_CORESET ((u8)0x40)
+#define WL3501_GCR_DISPWDN ((u8)0x20)
+#define WL3501_GCR_ECWAIT ((u8)0x10)
+#define WL3501_GCR_ECINT ((u8)0x08)
+#define WL3501_GCR_INT2EC ((u8)0x04)
+#define WL3501_GCR_ENECINT ((u8)0x02)
+#define WL3501_GCR_DAM ((u8)0x01)
+
+/* Bits in BSS (Bank Switching Select Register) */
+#define WL3501_BSS_FPAGE0 ((u8)0x20) /* Flash memory page0 */
+#define WL3501_BSS_FPAGE1 ((u8)0x28)
+#define WL3501_BSS_FPAGE2 ((u8)0x30)
+#define WL3501_BSS_FPAGE3 ((u8)0x38)
+#define WL3501_BSS_SPAGE0 ((u8)0x00) /* SRAM page0 */
+#define WL3501_BSS_SPAGE1 ((u8)0x08)
+#define WL3501_BSS_SPAGE2 ((u8)0x10)
+#define WL3501_BSS_SPAGE3 ((u8)0x18)
+
+/* Define Driver Interface */
+/* Refer IEEE 802.11 */
+/* Tx packet header, include PLCP and MPDU */
+/* Tx PLCP Header */
+struct wl3501_80211_tx_plcp_hdr {
+ u8 sync[16];
+ u16 sfd;
+ u8 signal;
+ u8 service;
+ u16 len;
+ u16 crc16;
+} __attribute__ ((packed));
+
+struct wl3501_80211_tx_hdr {
+ struct wl3501_80211_tx_plcp_hdr pclp_hdr;
+ struct ieee802_11_hdr mac_hdr;
+} __attribute__ ((packed));
+
+/*
+ Reserve the beginning Tx space for descriptor use.
+
+ TxBlockOffset --> *----*----*----*----* \
+ (TxFreeDesc) | 0 | 1 | 2 | 3 | \
+ | 4 | 5 | 6 | 7 | |
+ | 8 | 9 | 10 | 11 | TX_DESC * 20
+ | 12 | 13 | 14 | 15 | |
+ | 16 | 17 | 18 | 19 | /
+ TxBufferBegin --> *----*----*----*----* /
+ (TxBufferHead) | |
+ (TxBufferTail) | |
+ | Send Buffer |
+ | |
+ | |
+ *-------------------*
+ TxBufferEnd -------------------------/
+
+*/
+
+struct wl3501_card {
+ int base_addr;
+ u8 mac_addr[ETH_ALEN];
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ struct wl3501_get_confirm sig_get_confirm;
+ struct wl3501_pwr_mgmt_confirm sig_pwr_mgmt_confirm;
+ u16 tx_buffer_size;
+ u16 tx_buffer_head;
+ u16 tx_buffer_tail;
+ u16 tx_buffer_cnt;
+ u16 esbq_req_start;
+ u16 esbq_req_end;
+ u16 esbq_req_head;
+ u16 esbq_req_tail;
+ u16 esbq_confirm_start;
+ u16 esbq_confirm_end;
+ u16 esbq_confirm;
+ struct iw_mgmt_essid_pset essid;
+ struct iw_mgmt_essid_pset keep_essid;
+ u8 bssid[ETH_ALEN];
+ int net_type;
+ char nick[32];
+ char card_name[32];
+ char firmware_date[32];
+ u8 chan;
+ u8 cap_info;
+ u16 start_seg;
+ u16 bss_cnt;
+ u16 join_sta_bss;
+ u8 rssi;
+ u8 adhoc_times;
+ u8 reg_domain;
+ u8 version[2];
+ struct wl3501_scan_confirm bss_set[20];
+ struct net_device_stats stats;
+ struct iw_statistics wstats;
+ struct iw_spy_data spy_data;
+ struct dev_node_t node;
+};
+#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
new file mode 100644
index 000000000000..1433e5aaf1b4
--- /dev/null
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -0,0 +1,2270 @@
+/*
+ * WL3501 Wireless LAN PCMCIA Card Driver for Linux
+ * Written originally for Linux 2.0.30 by Fox Chen, mhchen@golf.ccl.itri.org.tw
+ * Ported to 2.2, 2.4 & 2.5 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * Wireless extensions in 2.4 by Gustavo Niemeyer <niemeyer@conectiva.com>
+ *
+ * References used by Fox Chen while writing the original driver for 2.0.30:
+ *
+ * 1. WL24xx packet drivers (tooasm.asm)
+ * 2. Access Point Firmware Interface Specification for IEEE 802.11 SUTRO
+ * 3. IEEE 802.11
+ * 4. Linux network driver (/usr/src/linux/drivers/net)
+ * 5. ISA card driver - wl24.c
+ * 6. Linux PCMCIA skeleton driver - skeleton.c
+ * 7. Linux PCMCIA 3c589 network driver - 3c589_cs.c
+ *
+ * Tested with WL2400 firmware 1.2, Linux 2.0.30, and pcmcia-cs-2.9.12
+ * 1. Performance: about 165 Kbytes/sec in TCP/IP with Ad-Hoc mode.
+ * rsh 192.168.1.3 "dd if=/dev/zero bs=1k count=1000" > /dev/null
+ * (Specification 2M bits/sec. is about 250 Kbytes/sec., but we must deduct
+ * ETHER/IP/UDP/TCP header, and acknowledgement overhead)
+ *
+ * Tested with Planet AP in 2.4.17, 184 Kbytes/s in UDP in Infrastructure mode,
+ * 173 Kbytes/s in TCP.
+ *
+ * Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode
+ * with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60)
+ */
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fcntl.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wireless.h>
+
+#include <net/iw_handler.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include "wl3501.h"
+
+#ifndef __i386__
+#define slow_down_io()
+#endif
+
+/* For rough constant delay */
+#define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); }
+
+/*
+ * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If you do not
+ * define PCMCIA_DEBUG at all, all the debug code will be left out. If you
+ * compile with PCMCIA_DEBUG=0, the debug code will be present but disabled --
+ * but it can then be enabled for specific modules at load time with a
+ * 'pc_debug=#' option to insmod.
+ */
+#define PCMCIA_DEBUG 0
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+#define dprintk(n, format, args...) \
+ { if (pc_debug > (n)) \
+ printk(KERN_INFO "%s: " format "\n", __FUNCTION__ , ##args); }
+#else
+#define dprintk(n, format, args...)
+#endif
+
+#define wl3501_outb(a, b) { outb(a, b); slow_down_io(); }
+#define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); }
+#define wl3501_outsb(a, b, c) { outsb(a, b, c); slow_down_io(); }
+
+#define WL3501_RELEASE_TIMEOUT (25 * HZ)
+#define WL3501_MAX_ADHOC_TRIES 16
+
+#define WL3501_RESUME 0
+#define WL3501_SUSPEND 1
+
+/*
+ * The event() function is this driver's Card Services event handler. It will
+ * be called by Card Services when an appropriate card status event is
+ * received. The config() and release() entry points are used to configure or
+ * release a socket, in response to card insertion and ejection events. They
+ * are invoked from the wl24 event handler.
+ */
+static void wl3501_config(dev_link_t *link);
+static void wl3501_release(dev_link_t *link);
+static int wl3501_event(event_t event, int pri, event_callback_args_t *args);
+
+/*
+ * The dev_info variable is the "key" that is used to match up this
+ * device driver with appropriate cards, through the card configuration
+ * database.
+ */
+static dev_info_t wl3501_dev_info = "wl3501_cs";
+
+static int wl3501_chan2freq[] = {
+ [0] = 2412, [1] = 2417, [2] = 2422, [3] = 2427, [4] = 2432,
+ [5] = 2437, [6] = 2442, [7] = 2447, [8] = 2452, [9] = 2457,
+ [10] = 2462, [11] = 2467, [12] = 2472, [13] = 2477,
+};
+
+static const struct {
+ int reg_domain;
+ int min, max, deflt;
+} iw_channel_table[] = {
+ {
+ .reg_domain = IW_REG_DOMAIN_FCC,
+ .min = 1,
+ .max = 11,
+ .deflt = 1,
+ },
+ {
+ .reg_domain = IW_REG_DOMAIN_DOC,
+ .min = 1,
+ .max = 11,
+ .deflt = 1,
+ },
+ {
+ .reg_domain = IW_REG_DOMAIN_ETSI,
+ .min = 1,
+ .max = 13,
+ .deflt = 1,
+ },
+ {
+ .reg_domain = IW_REG_DOMAIN_SPAIN,
+ .min = 10,
+ .max = 11,
+ .deflt = 10,
+ },
+ {
+ .reg_domain = IW_REG_DOMAIN_FRANCE,
+ .min = 10,
+ .max = 13,
+ .deflt = 10,
+ },
+ {
+ .reg_domain = IW_REG_DOMAIN_MKK,
+ .min = 14,
+ .max = 14,
+ .deflt = 14,
+ },
+ {
+ .reg_domain = IW_REG_DOMAIN_MKK1,
+ .min = 1,
+ .max = 14,
+ .deflt = 1,
+ },
+ {
+ .reg_domain = IW_REG_DOMAIN_ISRAEL,
+ .min = 3,
+ .max = 9,
+ .deflt = 9,
+ },
+};
+
+/**
+ * iw_valid_channel - validate channel in regulatory domain
+ * @reg_comain - regulatory domain
+ * @channel - channel to validate
+ *
+ * Returns 0 if invalid in the specified regulatory domain, non-zero if valid.
+ */
+static int iw_valid_channel(int reg_domain, int channel)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ARRAY_SIZE(iw_channel_table); i++)
+ if (reg_domain == iw_channel_table[i].reg_domain) {
+ rc = channel >= iw_channel_table[i].min &&
+ channel <= iw_channel_table[i].max;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * iw_default_channel - get default channel for a regulatory domain
+ * @reg_comain - regulatory domain
+ *
+ * Returns the default channel for a regulatory domain
+ */
+static int iw_default_channel(int reg_domain)
+{
+ int i, rc = 1;
+
+ for (i = 0; i < ARRAY_SIZE(iw_channel_table); i++)
+ if (reg_domain == iw_channel_table[i].reg_domain) {
+ rc = iw_channel_table[i].deflt;
+ break;
+ }
+ return rc;
+}
+
+static void iw_set_mgmt_info_element(enum iw_mgmt_info_element_ids id,
+ struct iw_mgmt_info_element *el,
+ void *value, int len)
+{
+ el->id = id;
+ el->len = len;
+ memcpy(el->data, value, len);
+}
+
+static void iw_copy_mgmt_info_element(struct iw_mgmt_info_element *to,
+ struct iw_mgmt_info_element *from)
+{
+ iw_set_mgmt_info_element(from->id, to, from->data, from->len);
+}
+
+/*
+ * A linked list of "instances" of the wl24 device. Each actual PCMCIA card
+ * corresponds to one device instance, and is described by one dev_link_t
+ * structure (defined in ds.h).
+ *
+ * You may not want to use a linked list for this -- for example, the memory
+ * card driver uses an array of dev_link_t pointers, where minor device numbers
+ * are used to derive the corresponding array index.
+ */
+static dev_link_t *wl3501_dev_list;
+
+static inline void wl3501_switch_page(struct wl3501_card *this, u8 page)
+{
+ wl3501_outb(page, this->base_addr + WL3501_NIC_BSS);
+}
+
+/*
+ * Get Ethernet MAC addresss.
+ *
+ * WARNING: We switch to FPAGE0 and switc back again.
+ * Making sure there is no other WL function beening called by ISR.
+ */
+static int wl3501_get_flash_mac_addr(struct wl3501_card *this)
+{
+ int base_addr = this->base_addr;
+
+ /* get MAC addr */
+ wl3501_outb(WL3501_BSS_FPAGE3, base_addr + WL3501_NIC_BSS); /* BSS */
+ wl3501_outb(0x00, base_addr + WL3501_NIC_LMAL); /* LMAL */
+ wl3501_outb(0x40, base_addr + WL3501_NIC_LMAH); /* LMAH */
+
+ /* wait for reading EEPROM */
+ WL3501_NOPLOOP(100);
+ this->mac_addr[0] = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ this->mac_addr[1] = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ this->mac_addr[2] = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ this->mac_addr[3] = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ this->mac_addr[4] = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ this->mac_addr[5] = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ this->reg_domain = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ wl3501_outb(WL3501_BSS_FPAGE0, base_addr + WL3501_NIC_BSS);
+ wl3501_outb(0x04, base_addr + WL3501_NIC_LMAL);
+ wl3501_outb(0x40, base_addr + WL3501_NIC_LMAH);
+ WL3501_NOPLOOP(100);
+ this->version[0] = inb(base_addr + WL3501_NIC_IODPA);
+ WL3501_NOPLOOP(100);
+ this->version[1] = inb(base_addr + WL3501_NIC_IODPA);
+ /* switch to SRAM Page 0 (for safety) */
+ wl3501_switch_page(this, WL3501_BSS_SPAGE0);
+
+ /* The MAC addr should be 00:60:... */
+ return this->mac_addr[0] == 0x00 && this->mac_addr[1] == 0x60;
+}
+
+/**
+ * wl3501_set_to_wla - Move 'size' bytes from PC to card
+ * @dest: Card addressing space
+ * @src: PC addressing space
+ * @size: Bytes to move
+ *
+ * Move 'size' bytes from PC to card. (Shouldn't be interrupted)
+ */
+void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src, int size)
+{
+ /* switch to SRAM Page 0 */
+ wl3501_switch_page(this, (dest & 0x8000) ? WL3501_BSS_SPAGE1 :
+ WL3501_BSS_SPAGE0);
+ /* set LMAL and LMAH */
+ wl3501_outb(dest & 0xff, this->base_addr + WL3501_NIC_LMAL);
+ wl3501_outb(((dest >> 8) & 0x7f), this->base_addr + WL3501_NIC_LMAH);
+
+ /* rep out to Port A */
+ wl3501_outsb(this->base_addr + WL3501_NIC_IODPA, src, size);
+}
+
+/**
+ * wl3501_get_from_wla - Move 'size' bytes from card to PC
+ * @src: Card addressing space
+ * @dest: PC addressing space
+ * @size: Bytes to move
+ *
+ * Move 'size' bytes from card to PC. (Shouldn't be interrupted)
+ */
+void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest,
+ int size)
+{
+ /* switch to SRAM Page 0 */
+ wl3501_switch_page(this, (src & 0x8000) ? WL3501_BSS_SPAGE1 :
+ WL3501_BSS_SPAGE0);
+ /* set LMAL and LMAH */
+ wl3501_outb(src & 0xff, this->base_addr + WL3501_NIC_LMAL);
+ wl3501_outb((src >> 8) & 0x7f, this->base_addr + WL3501_NIC_LMAH);
+
+ /* rep get from Port A */
+ insb(this->base_addr + WL3501_NIC_IODPA, dest, size);
+}
+
+/*
+ * Get/Allocate a free Tx Data Buffer
+ *
+ * *--------------*-----------------*----------------------------------*
+ * | PLCP | MAC Header | DST SRC Data ... |
+ * | (24 bytes) | (30 bytes) | (6) (6) (Ethernet Row Data) |
+ * *--------------*-----------------*----------------------------------*
+ * \ \- IEEE 802.11 -/ \-------------- len --------------/
+ * \-struct wl3501_80211_tx_hdr--/ \-------- Ethernet Frame -------/
+ *
+ * Return = Postion in Card
+ */
+static u16 wl3501_get_tx_buffer(struct wl3501_card *this, u16 len)
+{
+ u16 next, blk_cnt = 0, zero = 0;
+ u16 full_len = sizeof(struct wl3501_80211_tx_hdr) + len;
+ u16 ret = 0;
+
+ if (full_len > this->tx_buffer_cnt * 254)
+ goto out;
+ ret = this->tx_buffer_head;
+ while (full_len) {
+ if (full_len < 254)
+ full_len = 0;
+ else
+ full_len -= 254;
+ wl3501_get_from_wla(this, this->tx_buffer_head, &next,
+ sizeof(next));
+ if (!full_len)
+ wl3501_set_to_wla(this, this->tx_buffer_head, &zero,
+ sizeof(zero));
+ this->tx_buffer_head = next;
+ blk_cnt++;
+ /* if buffer is not enough */
+ if (!next && full_len) {
+ this->tx_buffer_head = ret;
+ ret = 0;
+ goto out;
+ }
+ }
+ this->tx_buffer_cnt -= blk_cnt;
+out:
+ return ret;
+}
+
+/*
+ * Free an allocated Tx Buffer. ptr must be correct position.
+ */
+static void wl3501_free_tx_buffer(struct wl3501_card *this, u16 ptr)
+{
+ /* check if all space is not free */
+ if (!this->tx_buffer_head)
+ this->tx_buffer_head = ptr;
+ else
+ wl3501_set_to_wla(this, this->tx_buffer_tail,
+ &ptr, sizeof(ptr));
+ while (ptr) {
+ u16 next;
+
+ this->tx_buffer_cnt++;
+ wl3501_get_from_wla(this, ptr, &next, sizeof(next));
+ this->tx_buffer_tail = ptr;
+ ptr = next;
+ }
+}
+
+static int wl3501_esbq_req_test(struct wl3501_card *this)
+{
+ u8 tmp;
+
+ wl3501_get_from_wla(this, this->esbq_req_head + 3, &tmp, sizeof(tmp));
+ return tmp & 0x80;
+}
+
+static void wl3501_esbq_req(struct wl3501_card *this, u16 *ptr)
+{
+ u16 tmp = 0;
+
+ wl3501_set_to_wla(this, this->esbq_req_head, ptr, 2);
+ wl3501_set_to_wla(this, this->esbq_req_head + 2, &tmp, sizeof(tmp));
+ this->esbq_req_head += 4;
+ if (this->esbq_req_head >= this->esbq_req_end)
+ this->esbq_req_head = this->esbq_req_start;
+}
+
+static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size)
+{
+ int rc = -EIO;
+
+ if (wl3501_esbq_req_test(this)) {
+ u16 ptr = wl3501_get_tx_buffer(this, sig_size);
+ if (ptr) {
+ wl3501_set_to_wla(this, ptr, sig, sig_size);
+ wl3501_esbq_req(this, &ptr);
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
+ void *bf, int size)
+{
+ struct wl3501_get_req sig = {
+ .sig_id = WL3501_SIG_GET_REQ,
+ .mib_attrib = index,
+ };
+ unsigned long flags;
+ int rc = -EIO;
+
+ spin_lock_irqsave(&this->lock, flags);
+ if (wl3501_esbq_req_test(this)) {
+ u16 ptr = wl3501_get_tx_buffer(this, sizeof(sig));
+ if (ptr) {
+ wl3501_set_to_wla(this, ptr, &sig, sizeof(sig));
+ wl3501_esbq_req(this, &ptr);
+ this->sig_get_confirm.mib_status = 255;
+ spin_unlock_irqrestore(&this->lock, flags);
+ rc = wait_event_interruptible(this->wait,
+ this->sig_get_confirm.mib_status != 255);
+ if (!rc)
+ memcpy(bf, this->sig_get_confirm.mib_value,
+ size);
+ goto out;
+ }
+ }
+ spin_unlock_irqrestore(&this->lock, flags);
+out:
+ return rc;
+}
+
+static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend)
+{
+ struct wl3501_pwr_mgmt_req sig = {
+ .sig_id = WL3501_SIG_PWR_MGMT_REQ,
+ .pwr_save = suspend,
+ .wake_up = !suspend,
+ .receive_dtims = 10,
+ };
+ unsigned long flags;
+ int rc = -EIO;
+
+ spin_lock_irqsave(&this->lock, flags);
+ if (wl3501_esbq_req_test(this)) {
+ u16 ptr = wl3501_get_tx_buffer(this, sizeof(sig));
+ if (ptr) {
+ wl3501_set_to_wla(this, ptr, &sig, sizeof(sig));
+ wl3501_esbq_req(this, &ptr);
+ this->sig_pwr_mgmt_confirm.status = 255;
+ spin_unlock_irqrestore(&this->lock, flags);
+ rc = wait_event_interruptible(this->wait,
+ this->sig_pwr_mgmt_confirm.status != 255);
+ printk(KERN_INFO "%s: %s status=%d\n", __FUNCTION__,
+ suspend ? "suspend" : "resume",
+ this->sig_pwr_mgmt_confirm.status);
+ goto out;
+ }
+ }
+ spin_unlock_irqrestore(&this->lock, flags);
+out:
+ return rc;
+}
+
+/**
+ * wl3501_send_pkt - Send a packet.
+ * @this - card
+ *
+ * Send a packet.
+ *
+ * data = Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr,
+ * data[6] - data[11] is Src MAC Addr)
+ * Ref: IEEE 802.11
+ */
+static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
+{
+ u16 bf, sig_bf, next, tmplen, pktlen;
+ struct wl3501_md_req sig = {
+ .sig_id = WL3501_SIG_MD_REQ,
+ };
+ u8 *pdata = (char *)data;
+ int rc = -EIO;
+
+ if (wl3501_esbq_req_test(this)) {
+ sig_bf = wl3501_get_tx_buffer(this, sizeof(sig));
+ rc = -ENOMEM;
+ if (!sig_bf) /* No free buffer available */
+ goto out;
+ bf = wl3501_get_tx_buffer(this, len + 26 + 24);
+ if (!bf) {
+ /* No free buffer available */
+ wl3501_free_tx_buffer(this, sig_bf);
+ goto out;
+ }
+ rc = 0;
+ memcpy(&sig.daddr[0], pdata, 12);
+ pktlen = len - 12;
+ pdata += 12;
+ sig.data = bf;
+ if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
+ u8 addr4[ETH_ALEN] = {
+ [0] = 0xAA, [1] = 0xAA, [2] = 0x03, [4] = 0x00,
+ };
+
+ wl3501_set_to_wla(this, bf + 2 +
+ offsetof(struct wl3501_tx_hdr, addr4),
+ addr4, sizeof(addr4));
+ sig.size = pktlen + 24 + 4 + 6;
+ if (pktlen > (254 - sizeof(struct wl3501_tx_hdr))) {
+ tmplen = 254 - sizeof(struct wl3501_tx_hdr);
+ pktlen -= tmplen;
+ } else {
+ tmplen = pktlen;
+ pktlen = 0;
+ }
+ wl3501_set_to_wla(this,
+ bf + 2 + sizeof(struct wl3501_tx_hdr),
+ pdata, tmplen);
+ pdata += tmplen;
+ wl3501_get_from_wla(this, bf, &next, sizeof(next));
+ bf = next;
+ } else {
+ sig.size = pktlen + 24 + 4 - 2;
+ pdata += 2;
+ pktlen -= 2;
+ if (pktlen > (254 - sizeof(struct wl3501_tx_hdr) + 6)) {
+ tmplen = 254 - sizeof(struct wl3501_tx_hdr) + 6;
+ pktlen -= tmplen;
+ } else {
+ tmplen = pktlen;
+ pktlen = 0;
+ }
+ wl3501_set_to_wla(this, bf + 2 +
+ offsetof(struct wl3501_tx_hdr, addr4),
+ pdata, tmplen);
+ pdata += tmplen;
+ wl3501_get_from_wla(this, bf, &next, sizeof(next));
+ bf = next;
+ }
+ while (pktlen > 0) {
+ if (pktlen > 254) {
+ tmplen = 254;
+ pktlen -= 254;
+ } else {
+ tmplen = pktlen;
+ pktlen = 0;
+ }
+ wl3501_set_to_wla(this, bf + 2, pdata, tmplen);
+ pdata += tmplen;
+ wl3501_get_from_wla(this, bf, &next, sizeof(next));
+ bf = next;
+ }
+ wl3501_set_to_wla(this, sig_bf, &sig, sizeof(sig));
+ wl3501_esbq_req(this, &sig_bf);
+ }
+out:
+ return rc;
+}
+
+static int wl3501_mgmt_resync(struct wl3501_card *this)
+{
+ struct wl3501_resync_req sig = {
+ .sig_id = WL3501_SIG_RESYNC_REQ,
+ };
+
+ return wl3501_esbq_exec(this, &sig, sizeof(sig));
+}
+
+static inline int wl3501_fw_bss_type(struct wl3501_card *this)
+{
+ return this->net_type == IW_MODE_INFRA ? WL3501_NET_TYPE_INFRA :
+ WL3501_NET_TYPE_ADHOC;
+}
+
+static inline int wl3501_fw_cap_info(struct wl3501_card *this)
+{
+ return this->net_type == IW_MODE_INFRA ? WL3501_MGMT_CAPABILITY_ESS :
+ WL3501_MGMT_CAPABILITY_IBSS;
+}
+
+static int wl3501_mgmt_scan(struct wl3501_card *this, u16 chan_time)
+{
+ struct wl3501_scan_req sig = {
+ .sig_id = WL3501_SIG_SCAN_REQ,
+ .scan_type = WL3501_SCAN_TYPE_ACTIVE,
+ .probe_delay = 0x10,
+ .min_chan_time = chan_time,
+ .max_chan_time = chan_time,
+ .bss_type = wl3501_fw_bss_type(this),
+ };
+
+ this->bss_cnt = this->join_sta_bss = 0;
+ return wl3501_esbq_exec(this, &sig, sizeof(sig));
+}
+
+static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
+{
+ struct wl3501_join_req sig = {
+ .sig_id = WL3501_SIG_JOIN_REQ,
+ .timeout = 10,
+ .ds_pset = {
+ .el = {
+ .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
+ .len = 1,
+ },
+ .chan = this->chan,
+ },
+ };
+
+ memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
+ return wl3501_esbq_exec(this, &sig, sizeof(sig));
+}
+
+static int wl3501_mgmt_start(struct wl3501_card *this)
+{
+ struct wl3501_start_req sig = {
+ .sig_id = WL3501_SIG_START_REQ,
+ .beacon_period = 400,
+ .dtim_period = 1,
+ .ds_pset = {
+ .el = {
+ .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
+ .len = 1,
+ },
+ .chan = this->chan,
+ },
+ .bss_basic_rset = {
+ .el = {
+ .id = IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES,
+ .len = 2,
+ },
+ .data_rate_labels = {
+ [0] = IW_MGMT_RATE_LABEL_MANDATORY |
+ IW_MGMT_RATE_LABEL_1MBIT,
+ [1] = IW_MGMT_RATE_LABEL_MANDATORY |
+ IW_MGMT_RATE_LABEL_2MBIT,
+ },
+ },
+ .operational_rset = {
+ .el = {
+ .id = IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES,
+ .len = 2,
+ },
+ .data_rate_labels = {
+ [0] = IW_MGMT_RATE_LABEL_MANDATORY |
+ IW_MGMT_RATE_LABEL_1MBIT,
+ [1] = IW_MGMT_RATE_LABEL_MANDATORY |
+ IW_MGMT_RATE_LABEL_2MBIT,
+ },
+ },
+ .ibss_pset = {
+ .el = {
+ .id = IW_MGMT_INFO_ELEMENT_IBSS_PARAMETER_SET,
+ .len = 2,
+ },
+ .atim_window = 10,
+ },
+ .bss_type = wl3501_fw_bss_type(this),
+ .cap_info = wl3501_fw_cap_info(this),
+ };
+
+ iw_copy_mgmt_info_element(&sig.ssid.el, &this->essid.el);
+ iw_copy_mgmt_info_element(&this->keep_essid.el, &this->essid.el);
+ return wl3501_esbq_exec(this, &sig, sizeof(sig));
+}
+
+static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
+{
+ u16 i = 0;
+ int matchflag = 0;
+ struct wl3501_scan_confirm sig;
+
+ dprintk(3, "entry");
+ wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
+ if (sig.status == WL3501_STATUS_SUCCESS) {
+ dprintk(3, "success");
+ if ((this->net_type == IW_MODE_INFRA &&
+ (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
+ (this->net_type == IW_MODE_ADHOC &&
+ (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
+ this->net_type == IW_MODE_AUTO) {
+ if (!this->essid.el.len)
+ matchflag = 1;
+ else if (this->essid.el.len == 3 &&
+ !memcmp(this->essid.essid, "ANY", 3))
+ matchflag = 1;
+ else if (this->essid.el.len != sig.ssid.el.len)
+ matchflag = 0;
+ else if (memcmp(this->essid.essid, sig.ssid.essid,
+ this->essid.el.len))
+ matchflag = 0;
+ else
+ matchflag = 1;
+ if (matchflag) {
+ for (i = 0; i < this->bss_cnt; i++) {
+ if (!memcmp(this->bss_set[i].bssid,
+ sig.bssid, ETH_ALEN)) {
+ matchflag = 0;
+ break;
+ }
+ }
+ }
+ if (matchflag && (i < 20)) {
+ memcpy(&this->bss_set[i].beacon_period,
+ &sig.beacon_period, 73);
+ this->bss_cnt++;
+ this->rssi = sig.rssi;
+ }
+ }
+ } else if (sig.status == WL3501_STATUS_TIMEOUT) {
+ dprintk(3, "timeout");
+ this->join_sta_bss = 0;
+ for (i = this->join_sta_bss; i < this->bss_cnt; i++)
+ if (!wl3501_mgmt_join(this, i))
+ break;
+ this->join_sta_bss = i;
+ if (this->join_sta_bss == this->bss_cnt) {
+ if (this->net_type == IW_MODE_INFRA)
+ wl3501_mgmt_scan(this, 100);
+ else {
+ this->adhoc_times++;
+ if (this->adhoc_times > WL3501_MAX_ADHOC_TRIES)
+ wl3501_mgmt_start(this);
+ else
+ wl3501_mgmt_scan(this, 100);
+ }
+ }
+ }
+}
+
+/**
+ * wl3501_block_interrupt - Mask interrupt from SUTRO
+ * @this - card
+ *
+ * Mask interrupt from SUTRO. (i.e. SUTRO cannot interrupt the HOST)
+ * Return: 1 if interrupt is originally enabled
+ */
+static int wl3501_block_interrupt(struct wl3501_card *this)
+{
+ u8 old = inb(this->base_addr + WL3501_NIC_GCR);
+ u8 new = old & (~(WL3501_GCR_ECINT | WL3501_GCR_INT2EC |
+ WL3501_GCR_ENECINT));
+
+ wl3501_outb(new, this->base_addr + WL3501_NIC_GCR);
+ return old & WL3501_GCR_ENECINT;
+}
+
+/**
+ * wl3501_unblock_interrupt - Enable interrupt from SUTRO
+ * @this - card
+ *
+ * Enable interrupt from SUTRO. (i.e. SUTRO can interrupt the HOST)
+ * Return: 1 if interrupt is originally enabled
+ */
+static int wl3501_unblock_interrupt(struct wl3501_card *this)
+{
+ u8 old = inb(this->base_addr + WL3501_NIC_GCR);
+ u8 new = (old & ~(WL3501_GCR_ECINT | WL3501_GCR_INT2EC)) |
+ WL3501_GCR_ENECINT;
+
+ wl3501_outb(new, this->base_addr + WL3501_NIC_GCR);
+ return old & WL3501_GCR_ENECINT;
+}
+
+/**
+ * wl3501_receive - Receive data from Receive Queue.
+ *
+ * Receive data from Receive Queue.
+ *
+ * @this: card
+ * @bf: address of host
+ * @size: size of buffer.
+ */
+static u16 wl3501_receive(struct wl3501_card *this, u8 *bf, u16 size)
+{
+ u16 next_addr, next_addr1;
+ u8 *data = bf + 12;
+
+ size -= 12;
+ wl3501_get_from_wla(this, this->start_seg + 2,
+ &next_addr, sizeof(next_addr));
+ if (size > WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr)) {
+ wl3501_get_from_wla(this,
+ this->start_seg +
+ sizeof(struct wl3501_rx_hdr), data,
+ WL3501_BLKSZ -
+ sizeof(struct wl3501_rx_hdr));
+ size -= WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr);
+ data += WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr);
+ } else {
+ wl3501_get_from_wla(this,
+ this->start_seg +
+ sizeof(struct wl3501_rx_hdr),
+ data, size);
+ size = 0;
+ }
+ while (size > 0) {
+ if (size > WL3501_BLKSZ - 5) {
+ wl3501_get_from_wla(this, next_addr + 5, data,
+ WL3501_BLKSZ - 5);
+ size -= WL3501_BLKSZ - 5;
+ data += WL3501_BLKSZ - 5;
+ wl3501_get_from_wla(this, next_addr + 2, &next_addr1,
+ sizeof(next_addr1));
+ next_addr = next_addr1;
+ } else {
+ wl3501_get_from_wla(this, next_addr + 5, data, size);
+ size = 0;
+ }
+ }
+ return 0;
+}
+
+static void wl3501_esbq_req_free(struct wl3501_card *this)
+{
+ u8 tmp;
+ u16 addr;
+
+ if (this->esbq_req_head == this->esbq_req_tail)
+ goto out;
+ wl3501_get_from_wla(this, this->esbq_req_tail + 3, &tmp, sizeof(tmp));
+ if (!(tmp & 0x80))
+ goto out;
+ wl3501_get_from_wla(this, this->esbq_req_tail, &addr, sizeof(addr));
+ wl3501_free_tx_buffer(this, addr);
+ this->esbq_req_tail += 4;
+ if (this->esbq_req_tail >= this->esbq_req_end)
+ this->esbq_req_tail = this->esbq_req_start;
+out:
+ return;
+}
+
+static int wl3501_esbq_confirm(struct wl3501_card *this)
+{
+ u8 tmp;
+
+ wl3501_get_from_wla(this, this->esbq_confirm + 3, &tmp, sizeof(tmp));
+ return tmp & 0x80;
+}
+
+static void wl3501_online(struct net_device *dev)
+{
+ struct wl3501_card *this = dev->priv;
+
+ printk(KERN_INFO "%s: Wireless LAN online. BSSID: "
+ "%02X %02X %02X %02X %02X %02X\n", dev->name,
+ this->bssid[0], this->bssid[1], this->bssid[2],
+ this->bssid[3], this->bssid[4], this->bssid[5]);
+ netif_wake_queue(dev);
+}
+
+static void wl3501_esbq_confirm_done(struct wl3501_card *this)
+{
+ u8 tmp = 0;
+
+ wl3501_set_to_wla(this, this->esbq_confirm + 3, &tmp, sizeof(tmp));
+ this->esbq_confirm += 4;
+ if (this->esbq_confirm >= this->esbq_confirm_end)
+ this->esbq_confirm = this->esbq_confirm_start;
+}
+
+static int wl3501_mgmt_auth(struct wl3501_card *this)
+{
+ struct wl3501_auth_req sig = {
+ .sig_id = WL3501_SIG_AUTH_REQ,
+ .type = WL3501_SYS_TYPE_OPEN,
+ .timeout = 1000,
+ };
+
+ dprintk(3, "entry");
+ memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
+ return wl3501_esbq_exec(this, &sig, sizeof(sig));
+}
+
+static int wl3501_mgmt_association(struct wl3501_card *this)
+{
+ struct wl3501_assoc_req sig = {
+ .sig_id = WL3501_SIG_ASSOC_REQ,
+ .timeout = 1000,
+ .listen_interval = 5,
+ .cap_info = this->cap_info,
+ };
+
+ dprintk(3, "entry");
+ memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
+ return wl3501_esbq_exec(this, &sig, sizeof(sig));
+}
+
+static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
+{
+ struct wl3501_card *this = dev->priv;
+ struct wl3501_join_confirm sig;
+
+ dprintk(3, "entry");
+ wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
+ if (sig.status == WL3501_STATUS_SUCCESS) {
+ if (this->net_type == IW_MODE_INFRA) {
+ if (this->join_sta_bss < this->bss_cnt) {
+ const int i = this->join_sta_bss;
+ memcpy(this->bssid,
+ this->bss_set[i].bssid, ETH_ALEN);
+ this->chan = this->bss_set[i].ds_pset.chan;
+ iw_copy_mgmt_info_element(&this->keep_essid.el,
+ &this->bss_set[i].ssid.el);
+ wl3501_mgmt_auth(this);
+ }
+ } else {
+ const int i = this->join_sta_bss;
+
+ memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
+ this->chan = this->bss_set[i].ds_pset.chan;
+ iw_copy_mgmt_info_element(&this->keep_essid.el,
+ &this->bss_set[i].ssid.el);
+ wl3501_online(dev);
+ }
+ } else {
+ int i;
+ this->join_sta_bss++;
+ for (i = this->join_sta_bss; i < this->bss_cnt; i++)
+ if (!wl3501_mgmt_join(this, i))
+ break;
+ this->join_sta_bss = i;
+ if (this->join_sta_bss == this->bss_cnt) {
+ if (this->net_type == IW_MODE_INFRA)
+ wl3501_mgmt_scan(this, 100);
+ else {
+ this->adhoc_times++;
+ if (this->adhoc_times > WL3501_MAX_ADHOC_TRIES)
+ wl3501_mgmt_start(this);
+ else
+ wl3501_mgmt_scan(this, 100);
+ }
+ }
+ }
+}
+
+static inline void wl3501_alarm_interrupt(struct net_device *dev,
+ struct wl3501_card *this)
+{
+ if (this->net_type == IW_MODE_INFRA) {
+ printk(KERN_INFO "Wireless LAN offline\n");
+ netif_stop_queue(dev);
+ wl3501_mgmt_resync(this);
+ }
+}
+
+static inline void wl3501_md_confirm_interrupt(struct net_device *dev,
+ struct wl3501_card *this,
+ u16 addr)
+{
+ struct wl3501_md_confirm sig;
+
+ dprintk(3, "entry");
+ wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
+ wl3501_free_tx_buffer(this, sig.data);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+}
+
+static inline void wl3501_md_ind_interrupt(struct net_device *dev,
+ struct wl3501_card *this, u16 addr)
+{
+ struct wl3501_md_ind sig;
+ struct sk_buff *skb;
+ u8 rssi, addr4[ETH_ALEN];
+ u16 pkt_len;
+
+ wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
+ this->start_seg = sig.data;
+ wl3501_get_from_wla(this,
+ sig.data + offsetof(struct wl3501_rx_hdr, rssi),
+ &rssi, sizeof(rssi));
+ this->rssi = rssi <= 63 ? (rssi * 100) / 64 : 255;
+
+ wl3501_get_from_wla(this,
+ sig.data +
+ offsetof(struct wl3501_rx_hdr, addr4),
+ &addr4, sizeof(addr4));
+ if (!(addr4[0] == 0xAA && addr4[1] == 0xAA &&
+ addr4[2] == 0x03 && addr4[4] == 0x00)) {
+ printk(KERN_INFO "Insupported packet type!\n");
+ return;
+ }
+ pkt_len = sig.size + 12 - 24 - 4 - 6;
+
+ skb = dev_alloc_skb(pkt_len + 5);
+
+ if (!skb) {
+ printk(KERN_WARNING "%s: Can't alloc a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ this->stats.rx_dropped++;
+ } else {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
+ eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0);
+ wl3501_receive(this, skb->data, pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->last_rx = jiffies;
+ this->stats.rx_packets++;
+ this->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ }
+}
+
+static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this,
+ u16 addr, void *sig, int size)
+{
+ dprintk(3, "entry");
+ wl3501_get_from_wla(this, addr, &this->sig_get_confirm,
+ sizeof(this->sig_get_confirm));
+ wake_up(&this->wait);
+}
+
+static inline void wl3501_start_confirm_interrupt(struct net_device *dev,
+ struct wl3501_card *this,
+ u16 addr)
+{
+ struct wl3501_start_confirm sig;
+
+ dprintk(3, "entry");
+ wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
+ if (sig.status == WL3501_STATUS_SUCCESS)
+ netif_wake_queue(dev);
+}
+
+static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev,
+ u16 addr)
+{
+ struct wl3501_card *this = dev->priv;
+ struct wl3501_assoc_confirm sig;
+
+ dprintk(3, "entry");
+ wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
+
+ if (sig.status == WL3501_STATUS_SUCCESS)
+ wl3501_online(dev);
+}
+
+static inline void wl3501_auth_confirm_interrupt(struct wl3501_card *this,
+ u16 addr)
+{
+ struct wl3501_auth_confirm sig;
+
+ dprintk(3, "entry");
+ wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
+
+ if (sig.status == WL3501_STATUS_SUCCESS)
+ wl3501_mgmt_association(this);
+ else
+ wl3501_mgmt_resync(this);
+}
+
+static inline void wl3501_rx_interrupt(struct net_device *dev)
+{
+ int morepkts;
+ u16 addr;
+ u8 sig_id;
+ struct wl3501_card *this = dev->priv;
+
+ dprintk(3, "entry");
+loop:
+ morepkts = 0;
+ if (!wl3501_esbq_confirm(this))
+ goto free;
+ wl3501_get_from_wla(this, this->esbq_confirm, &addr, sizeof(addr));
+ wl3501_get_from_wla(this, addr + 2, &sig_id, sizeof(sig_id));
+
+ switch (sig_id) {
+ case WL3501_SIG_DEAUTH_IND:
+ case WL3501_SIG_DISASSOC_IND:
+ case WL3501_SIG_ALARM:
+ wl3501_alarm_interrupt(dev, this);
+ break;
+ case WL3501_SIG_MD_CONFIRM:
+ wl3501_md_confirm_interrupt(dev, this, addr);
+ break;
+ case WL3501_SIG_MD_IND:
+ wl3501_md_ind_interrupt(dev, this, addr);
+ break;
+ case WL3501_SIG_GET_CONFIRM:
+ wl3501_get_confirm_interrupt(this, addr,
+ &this->sig_get_confirm,
+ sizeof(this->sig_get_confirm));
+ break;
+ case WL3501_SIG_PWR_MGMT_CONFIRM:
+ wl3501_get_confirm_interrupt(this, addr,
+ &this->sig_pwr_mgmt_confirm,
+ sizeof(this->sig_pwr_mgmt_confirm));
+ break;
+ case WL3501_SIG_START_CONFIRM:
+ wl3501_start_confirm_interrupt(dev, this, addr);
+ break;
+ case WL3501_SIG_SCAN_CONFIRM:
+ wl3501_mgmt_scan_confirm(this, addr);
+ break;
+ case WL3501_SIG_JOIN_CONFIRM:
+ wl3501_mgmt_join_confirm(dev, addr);
+ break;
+ case WL3501_SIG_ASSOC_CONFIRM:
+ wl3501_assoc_confirm_interrupt(dev, addr);
+ break;
+ case WL3501_SIG_AUTH_CONFIRM:
+ wl3501_auth_confirm_interrupt(this, addr);
+ break;
+ case WL3501_SIG_RESYNC_CONFIRM:
+ wl3501_mgmt_resync(this); /* FIXME: should be resync_confirm */
+ break;
+ }
+ wl3501_esbq_confirm_done(this);
+ morepkts = 1;
+ /* free request if necessary */
+free:
+ wl3501_esbq_req_free(this);
+ if (morepkts)
+ goto loop;
+}
+
+static inline void wl3501_ack_interrupt(struct wl3501_card *this)
+{
+ wl3501_outb(WL3501_GCR_ECINT, this->base_addr + WL3501_NIC_GCR);
+}
+
+/**
+ * wl3501_interrupt - Hardware interrupt from card.
+ * @irq - Interrupt number
+ * @dev_id - net_device
+ * @regs - registers
+ *
+ * We must acknowledge the interrupt as soon as possible, and block the
+ * interrupt from the same card immediately to prevent re-entry.
+ *
+ * Before accessing the Control_Status_Block, we must lock SUTRO first.
+ * On the other hand, to prevent SUTRO from malfunctioning, we must
+ * unlock the SUTRO as soon as possible.
+ */
+static irqreturn_t wl3501_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct wl3501_card *this;
+ int handled = 1;
+
+ if (!dev)
+ goto unknown;
+ this = dev->priv;
+ spin_lock(&this->lock);
+ wl3501_ack_interrupt(this);
+ wl3501_block_interrupt(this);
+ wl3501_rx_interrupt(dev);
+ wl3501_unblock_interrupt(this);
+ spin_unlock(&this->lock);
+out:
+ return IRQ_RETVAL(handled);
+unknown:
+ handled = 0;
+ printk(KERN_ERR "%s: irq %d for unknown device.\n", __FUNCTION__, irq);
+ goto out;
+}
+
+static int wl3501_reset_board(struct wl3501_card *this)
+{
+ u8 tmp = 0;
+ int i, rc = 0;
+
+ /* Coreset */
+ wl3501_outb_p(WL3501_GCR_CORESET, this->base_addr + WL3501_NIC_GCR);
+ wl3501_outb_p(0, this->base_addr + WL3501_NIC_GCR);
+ wl3501_outb_p(WL3501_GCR_CORESET, this->base_addr + WL3501_NIC_GCR);
+
+ /* Reset SRAM 0x480 to zero */
+ wl3501_set_to_wla(this, 0x480, &tmp, sizeof(tmp));
+
+ /* Start up */
+ wl3501_outb_p(0, this->base_addr + WL3501_NIC_GCR);
+
+ WL3501_NOPLOOP(1024 * 50);
+
+ wl3501_unblock_interrupt(this); /* acme: was commented */
+
+ /* Polling Self_Test_Status */
+ for (i = 0; i < 10000; i++) {
+ wl3501_get_from_wla(this, 0x480, &tmp, sizeof(tmp));
+
+ if (tmp == 'W') {
+ /* firmware complete all test successfully */
+ tmp = 'A';
+ wl3501_set_to_wla(this, 0x480, &tmp, sizeof(tmp));
+ goto out;
+ }
+ WL3501_NOPLOOP(10);
+ }
+ printk(KERN_WARNING "%s: failed to reset the board!\n", __FUNCTION__);
+ rc = -ENODEV;
+out:
+ return rc;
+}
+
+static int wl3501_init_firmware(struct wl3501_card *this)
+{
+ u16 ptr, next;
+ int rc = wl3501_reset_board(this);
+
+ if (rc)
+ goto fail;
+ this->card_name[0] = '\0';
+ wl3501_get_from_wla(this, 0x1a00,
+ this->card_name, sizeof(this->card_name));
+ this->card_name[sizeof(this->card_name) - 1] = '\0';
+ this->firmware_date[0] = '\0';
+ wl3501_get_from_wla(this, 0x1a40,
+ this->firmware_date, sizeof(this->firmware_date));
+ this->firmware_date[sizeof(this->firmware_date) - 1] = '\0';
+ /* Switch to SRAM Page 0 */
+ wl3501_switch_page(this, WL3501_BSS_SPAGE0);
+ /* Read parameter from card */
+ wl3501_get_from_wla(this, 0x482, &this->esbq_req_start, 2);
+ wl3501_get_from_wla(this, 0x486, &this->esbq_req_end, 2);
+ wl3501_get_from_wla(this, 0x488, &this->esbq_confirm_start, 2);
+ wl3501_get_from_wla(this, 0x48c, &this->esbq_confirm_end, 2);
+ wl3501_get_from_wla(this, 0x48e, &this->tx_buffer_head, 2);
+ wl3501_get_from_wla(this, 0x492, &this->tx_buffer_size, 2);
+ this->esbq_req_tail = this->esbq_req_head = this->esbq_req_start;
+ this->esbq_req_end += this->esbq_req_start;
+ this->esbq_confirm = this->esbq_confirm_start;
+ this->esbq_confirm_end += this->esbq_confirm_start;
+ /* Initial Tx Buffer */
+ this->tx_buffer_cnt = 1;
+ ptr = this->tx_buffer_head;
+ next = ptr + WL3501_BLKSZ;
+ while ((next - this->tx_buffer_head) < this->tx_buffer_size) {
+ this->tx_buffer_cnt++;
+ wl3501_set_to_wla(this, ptr, &next, sizeof(next));
+ ptr = next;
+ next = ptr + WL3501_BLKSZ;
+ }
+ rc = 0;
+ next = 0;
+ wl3501_set_to_wla(this, ptr, &next, sizeof(next));
+ this->tx_buffer_tail = ptr;
+out:
+ return rc;
+fail:
+ printk(KERN_WARNING "%s: failed!\n", __FUNCTION__);
+ goto out;
+}
+
+static int wl3501_close(struct net_device *dev)
+{
+ struct wl3501_card *this = dev->priv;
+ int rc = -ENODEV;
+ unsigned long flags;
+ dev_link_t *link;
+
+ spin_lock_irqsave(&this->lock, flags);
+ /* Check if the device is in wl3501_dev_list */
+ for (link = wl3501_dev_list; link; link = link->next)
+ if (link->priv == dev)
+ break;
+ if (!link)
+ goto out;
+ link->open--;
+
+ /* Stop wl3501_hard_start_xmit() from now on */
+ netif_stop_queue(dev);
+ wl3501_ack_interrupt(this);
+
+ /* Mask interrupts from the SUTRO */
+ wl3501_block_interrupt(this);
+
+ rc = 0;
+ printk(KERN_INFO "%s: WL3501 closed\n", dev->name);
+out:
+ spin_unlock_irqrestore(&this->lock, flags);
+ return rc;
+}
+
+/**
+ * wl3501_reset - Reset the SUTRO.
+ * @dev - network device
+ *
+ * It is almost the same as wl3501_open(). In fact, we may just wl3501_close()
+ * and wl3501_open() again, but I wouldn't like to free_irq() when the driver
+ * is running. It seems to be dangerous.
+ */
+static int wl3501_reset(struct net_device *dev)
+{
+ struct wl3501_card *this = dev->priv;
+ int rc = -ENODEV;
+
+ wl3501_block_interrupt(this);
+
+ if (wl3501_init_firmware(this)) {
+ printk(KERN_WARNING "%s: Can't initialize Firmware!\n",
+ dev->name);
+ /* Free IRQ, and mark IRQ as unused */
+ free_irq(dev->irq, dev);
+ goto out;
+ }
+
+ /*
+ * Queue has to be started only when the Card is Started
+ */
+ netif_stop_queue(dev);
+ this->adhoc_times = 0;
+ wl3501_ack_interrupt(this);
+ wl3501_unblock_interrupt(this);
+ wl3501_mgmt_scan(this, 100);
+ dprintk(1, "%s: device reset", dev->name);
+ rc = 0;
+out:
+ return rc;
+}
+
+static void wl3501_tx_timeout(struct net_device *dev)
+{
+ struct wl3501_card *this = dev->priv;
+ struct net_device_stats *stats = &this->stats;
+ unsigned long flags;
+ int rc;
+
+ stats->tx_errors++;
+ spin_lock_irqsave(&this->lock, flags);
+ rc = wl3501_reset(dev);
+ spin_unlock_irqrestore(&this->lock, flags);
+ if (rc)
+ printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
+ dev->name, rc);
+ else {
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ }
+}
+
+/*
+ * Return : 0 - OK
+ * 1 - Could not transmit (dev_queue_xmit will queue it)
+ * and try to sent it later
+ */
+static int wl3501_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int enabled, rc;
+ struct wl3501_card *this = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&this->lock, flags);
+ enabled = wl3501_block_interrupt(this);
+ dev->trans_start = jiffies;
+ rc = wl3501_send_pkt(this, skb->data, skb->len);
+ if (enabled)
+ wl3501_unblock_interrupt(this);
+ if (rc) {
+ ++this->stats.tx_dropped;
+ netif_stop_queue(dev);
+ } else {
+ ++this->stats.tx_packets;
+ this->stats.tx_bytes += skb->len;
+ kfree_skb(skb);
+
+ if (this->tx_buffer_cnt < 2)
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&this->lock, flags);
+ return rc;
+}
+
+static int wl3501_open(struct net_device *dev)
+{
+ int rc = -ENODEV;
+ struct wl3501_card *this = dev->priv;
+ unsigned long flags;
+ dev_link_t *link;
+
+ spin_lock_irqsave(&this->lock, flags);
+ /* Check if the device is in wl3501_dev_list */
+ for (link = wl3501_dev_list; link; link = link->next)
+ if (link->priv == dev)
+ break;
+ if (!DEV_OK(link))
+ goto out;
+ netif_device_attach(dev);
+ link->open++;
+
+ /* Initial WL3501 firmware */
+ dprintk(1, "%s: Initialize WL3501 firmware...", dev->name);
+ if (wl3501_init_firmware(this))
+ goto fail;
+ /* Initial device variables */
+ this->adhoc_times = 0;
+ /* Acknowledge Interrupt, for cleaning last state */
+ wl3501_ack_interrupt(this);
+
+ /* Enable interrupt from card after all */
+ wl3501_unblock_interrupt(this);
+ wl3501_mgmt_scan(this, 100);
+ rc = 0;
+ dprintk(1, "%s: WL3501 opened", dev->name);
+ printk(KERN_INFO "%s: Card Name: %s\n"
+ "%s: Firmware Date: %s\n",
+ dev->name, this->card_name,
+ dev->name, this->firmware_date);
+out:
+ spin_unlock_irqrestore(&this->lock, flags);
+ return rc;
+fail:
+ printk(KERN_WARNING "%s: Can't initialize firmware!\n", dev->name);
+ goto out;
+}
+
+struct net_device_stats *wl3501_get_stats(struct net_device *dev)
+{
+ struct wl3501_card *this = dev->priv;
+
+ return &this->stats;
+}
+
+struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
+{
+ struct wl3501_card *this = dev->priv;
+ struct iw_statistics *wstats = &this->wstats;
+ u32 value; /* size checked: it is u32 */
+
+ memset(wstats, 0, sizeof(*wstats));
+ wstats->status = netif_running(dev);
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_ICV_ERROR_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.code += value;
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_UNDECRYPTABLE_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.code += value;
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_EXCLUDED_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.code += value;
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_RETRY_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.retries = value;
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_FAILED_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.misc += value;
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_FAILURE_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.misc += value;
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_ACK_FAILURE_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.misc += value;
+ if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAME_DUPLICATE_COUNT,
+ &value, sizeof(value)))
+ wstats->discard.misc += value;
+ return wstats;
+}
+
+static void wl3501_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, wl3501_dev_info, sizeof(info->driver));
+}
+
+static struct ethtool_ops ops = {
+ .get_drvinfo = wl3501_get_drvinfo
+};
+
+/**
+ * wl3501_detach - deletes a driver "instance"
+ * @link - FILL_IN
+ *
+ * This deletes a driver "instance". The device is de-registered with Card
+ * Services. If it has been released, all local data structures are freed.
+ * Otherwise, the structures will be freed when the device is released.
+ */
+static void wl3501_detach(dev_link_t *link)
+{
+ dev_link_t **linkp;
+
+ /* Locate device structure */
+ for (linkp = &wl3501_dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link)
+ break;
+ if (!*linkp)
+ goto out;
+
+ /* If the device is currently configured and active, we won't actually
+ * delete it yet. Instead, it is marked so that when the release()
+ * function is called, that will trigger a proper detach(). */
+
+ if (link->state & DEV_CONFIG) {
+#ifdef PCMCIA_DEBUG
+ printk(KERN_DEBUG "wl3501_cs: detach postponed, '%s' "
+ "still locked\n", link->dev->dev_name);
+#endif
+ goto out;
+ }
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ pcmcia_deregister_client(link->handle);
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+
+ if (link->priv)
+ free_netdev(link->priv);
+ kfree(link);
+out:
+ return;
+}
+
+static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ strlcpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
+ return 0;
+}
+
+static int wl3501_set_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+ int channel = wrqu->freq.m;
+ int rc = -EINVAL;
+
+ if (iw_valid_channel(this->reg_domain, channel)) {
+ this->chan = channel;
+ rc = wl3501_reset(dev);
+ }
+ return rc;
+}
+
+static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+
+ wrqu->freq.m = wl3501_chan2freq[this->chan - 1] * 100000;
+ wrqu->freq.e = 1;
+ return 0;
+}
+
+static int wl3501_set_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int rc = -EINVAL;
+
+ if (wrqu->mode == IW_MODE_INFRA ||
+ wrqu->mode == IW_MODE_ADHOC ||
+ wrqu->mode == IW_MODE_AUTO) {
+ struct wl3501_card *this = dev->priv;
+
+ this->net_type = wrqu->mode;
+ rc = wl3501_reset(dev);
+ }
+ return rc;
+}
+
+static int wl3501_get_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+
+ wrqu->mode = this->net_type;
+ return 0;
+}
+
+static int wl3501_get_sens(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+
+ wrqu->sens.value = this->rssi;
+ wrqu->sens.disabled = !wrqu->sens.value;
+ wrqu->sens.fixed = 1;
+ return 0;
+}
+
+static int wl3501_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iw_range *range = (struct iw_range *)extra;
+
+ /* Set the length (very important for backward compatibility) */
+ wrqu->data.length = sizeof(*range);
+
+ /* Set all the info we don't care or don't know about to zero */
+ memset(range, 0, sizeof(*range));
+
+ /* Set the Wireless Extension versions */
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 1;
+ range->throughput = 2 * 1000 * 1000; /* ~2 Mb/s */
+ /* FIXME: study the code to fill in more fields... */
+ return 0;
+}
+
+static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+ static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 };
+ int rc = -EINVAL;
+
+ /* FIXME: we support other ARPHRDs...*/
+ if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
+ goto out;
+ if (!memcmp(bcast, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+ /* FIXME: rescan? */
+ } else
+ memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
+ /* FIXME: rescan? deassoc & scan? */
+ rc = 0;
+out:
+ return rc;
+}
+
+static int wl3501_get_wap(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(wrqu->ap_addr.sa_data, this->bssid, ETH_ALEN);
+ return 0;
+}
+
+static int wl3501_set_scan(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ /*
+ * FIXME: trigger scanning with a reset, yes, I'm lazy
+ */
+ return wl3501_reset(dev);
+}
+
+static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+ int i;
+ char *current_ev = extra;
+ struct iw_event iwe;
+
+ for (i = 0; i < this->bss_cnt; ++i) {
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
+ current_ev = iwe_stream_add_event(current_ev,
+ extra + IW_SCAN_MAX_DATA,
+ &iwe, IW_EV_ADDR_LEN);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ iwe.u.data.length = this->bss_set[i].ssid.el.len;
+ current_ev = iwe_stream_add_point(current_ev,
+ extra + IW_SCAN_MAX_DATA,
+ &iwe,
+ this->bss_set[i].ssid.essid);
+ iwe.cmd = SIOCGIWMODE;
+ iwe.u.mode = this->bss_set[i].bss_type;
+ current_ev = iwe_stream_add_event(current_ev,
+ extra + IW_SCAN_MAX_DATA,
+ &iwe, IW_EV_UINT_LEN);
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
+ iwe.u.freq.e = 0;
+ current_ev = iwe_stream_add_event(current_ev,
+ extra + IW_SCAN_MAX_DATA,
+ &iwe, IW_EV_FREQ_LEN);
+ iwe.cmd = SIOCGIWENCODE;
+ if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ current_ev = iwe_stream_add_point(current_ev,
+ extra + IW_SCAN_MAX_DATA,
+ &iwe, NULL);
+ }
+ /* Length of data */
+ wrqu->data.length = (current_ev - extra);
+ wrqu->data.flags = 0; /* FIXME: set properly these flags */
+ return 0;
+}
+
+static int wl3501_set_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+
+ if (wrqu->data.flags) {
+ iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID,
+ &this->essid.el,
+ extra, wrqu->data.length);
+ } else { /* We accept any ESSID */
+ iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID,
+ &this->essid.el, "ANY", 3);
+ }
+ return wl3501_reset(dev);
+}
+
+static int wl3501_get_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&this->lock, flags);
+ wrqu->essid.flags = 1;
+ wrqu->essid.length = this->essid.el.len;
+ memcpy(extra, this->essid.essid, this->essid.el.len);
+ spin_unlock_irqrestore(&this->lock, flags);
+ return 0;
+}
+
+static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+
+ if (wrqu->data.length > sizeof(this->nick))
+ return -E2BIG;
+ strlcpy(this->nick, extra, wrqu->data.length);
+ return 0;
+}
+
+static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct wl3501_card *this = dev->priv;
+
+ strlcpy(extra, this->nick, 32);
+ wrqu->data.length = strlen(extra);
+ return 0;
+}
+
+static int wl3501_get_rate(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ /*
+ * FIXME: have to see from where to get this info, perhaps this card
+ * works at 1 Mbit/s too... for now leave at 2 Mbit/s that is the most
+ * common with the Planet Access Points. -acme
+ */
+ wrqu->bitrate.value = 2000000;
+ wrqu->bitrate.fixed = 1;
+ return 0;
+}
+
+static int wl3501_get_rts_threshold(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u16 threshold; /* size checked: it is u16 */
+ struct wl3501_card *this = dev->priv;
+ int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_THRESHOLD,
+ &threshold, sizeof(threshold));
+ if (!rc) {
+ wrqu->rts.value = threshold;
+ wrqu->rts.disabled = threshold >= 2347;
+ wrqu->rts.fixed = 1;
+ }
+ return rc;
+}
+
+static int wl3501_get_frag_threshold(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u16 threshold; /* size checked: it is u16 */
+ struct wl3501_card *this = dev->priv;
+ int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAG_THRESHOLD,
+ &threshold, sizeof(threshold));
+ if (!rc) {
+ wrqu->frag.value = threshold;
+ wrqu->frag.disabled = threshold >= 2346;
+ wrqu->frag.fixed = 1;
+ }
+ return rc;
+}
+
+static int wl3501_get_txpow(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u16 txpow;
+ struct wl3501_card *this = dev->priv;
+ int rc = wl3501_get_mib_value(this,
+ WL3501_MIB_ATTR_CURRENT_TX_PWR_LEVEL,
+ &txpow, sizeof(txpow));
+ if (!rc) {
+ wrqu->txpower.value = txpow;
+ wrqu->txpower.disabled = 0;
+ /*
+ * From the MIB values I think this can be configurable,
+ * as it lists several tx power levels -acme
+ */
+ wrqu->txpower.fixed = 0;
+ wrqu->txpower.flags = IW_TXPOW_MWATT;
+ }
+ return rc;
+}
+
+static int wl3501_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u8 retry; /* size checked: it is u8 */
+ struct wl3501_card *this = dev->priv;
+ int rc = wl3501_get_mib_value(this,
+ WL3501_MIB_ATTR_LONG_RETRY_LIMIT,
+ &retry, sizeof(retry));
+ if (rc)
+ goto out;
+ if (wrqu->retry.flags & IW_RETRY_MAX) {
+ wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ goto set_value;
+ }
+ rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_SHORT_RETRY_LIMIT,
+ &retry, sizeof(retry));
+ if (rc)
+ goto out;
+ wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
+set_value:
+ wrqu->retry.value = retry;
+ wrqu->retry.disabled = 0;
+out:
+ return rc;
+}
+
+static int wl3501_get_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u8 implemented, restricted, keys[100], len_keys, tocopy;
+ struct wl3501_card *this = dev->priv;
+ int rc = wl3501_get_mib_value(this,
+ WL3501_MIB_ATTR_PRIV_OPT_IMPLEMENTED,
+ &implemented, sizeof(implemented));
+ if (rc)
+ goto out;
+ if (!implemented) {
+ wrqu->encoding.flags = IW_ENCODE_DISABLED;
+ goto out;
+ }
+ rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_EXCLUDE_UNENCRYPTED,
+ &restricted, sizeof(restricted));
+ if (rc)
+ goto out;
+ wrqu->encoding.flags = restricted ? IW_ENCODE_RESTRICTED :
+ IW_ENCODE_OPEN;
+ rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_KEY_MAPPINGS_LEN,
+ &len_keys, sizeof(len_keys));
+ if (rc)
+ goto out;
+ rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_KEY_MAPPINGS,
+ keys, len_keys);
+ if (rc)
+ goto out;
+ tocopy = min_t(u8, len_keys, wrqu->encoding.length);
+ tocopy = min_t(u8, tocopy, 100);
+ wrqu->encoding.length = tocopy;
+ memset(extra, 0, tocopy);
+ memcpy(extra, keys, tocopy);
+out:
+ return rc;
+}
+
+static int wl3501_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u8 pwr_state;
+ struct wl3501_card *this = dev->priv;
+ int rc = wl3501_get_mib_value(this,
+ WL3501_MIB_ATTR_CURRENT_PWR_STATE,
+ &pwr_state, sizeof(pwr_state));
+ if (rc)
+ goto out;
+ wrqu->power.disabled = !pwr_state;
+ wrqu->power.flags = IW_POWER_ON;
+out:
+ return rc;
+}
+
+static const iw_handler wl3501_handler[] = {
+ [SIOCGIWNAME - SIOCIWFIRST] = wl3501_get_name,
+ [SIOCSIWFREQ - SIOCIWFIRST] = wl3501_set_freq,
+ [SIOCGIWFREQ - SIOCIWFIRST] = wl3501_get_freq,
+ [SIOCSIWMODE - SIOCIWFIRST] = wl3501_set_mode,
+ [SIOCGIWMODE - SIOCIWFIRST] = wl3501_get_mode,
+ [SIOCGIWSENS - SIOCIWFIRST] = wl3501_get_sens,
+ [SIOCGIWRANGE - SIOCIWFIRST] = wl3501_get_range,
+ [SIOCSIWSPY - SIOCIWFIRST] = iw_handler_set_spy,
+ [SIOCGIWSPY - SIOCIWFIRST] = iw_handler_get_spy,
+ [SIOCSIWTHRSPY - SIOCIWFIRST] = iw_handler_set_thrspy,
+ [SIOCGIWTHRSPY - SIOCIWFIRST] = iw_handler_get_thrspy,
+ [SIOCSIWAP - SIOCIWFIRST] = wl3501_set_wap,
+ [SIOCGIWAP - SIOCIWFIRST] = wl3501_get_wap,
+ [SIOCSIWSCAN - SIOCIWFIRST] = wl3501_set_scan,
+ [SIOCGIWSCAN - SIOCIWFIRST] = wl3501_get_scan,
+ [SIOCSIWESSID - SIOCIWFIRST] = wl3501_set_essid,
+ [SIOCGIWESSID - SIOCIWFIRST] = wl3501_get_essid,
+ [SIOCSIWNICKN - SIOCIWFIRST] = wl3501_set_nick,
+ [SIOCGIWNICKN - SIOCIWFIRST] = wl3501_get_nick,
+ [SIOCGIWRATE - SIOCIWFIRST] = wl3501_get_rate,
+ [SIOCGIWRTS - SIOCIWFIRST] = wl3501_get_rts_threshold,
+ [SIOCGIWFRAG - SIOCIWFIRST] = wl3501_get_frag_threshold,
+ [SIOCGIWTXPOW - SIOCIWFIRST] = wl3501_get_txpow,
+ [SIOCGIWRETRY - SIOCIWFIRST] = wl3501_get_retry,
+ [SIOCGIWENCODE - SIOCIWFIRST] = wl3501_get_encode,
+ [SIOCGIWPOWER - SIOCIWFIRST] = wl3501_get_power,
+};
+
+static const struct iw_handler_def wl3501_handler_def = {
+ .num_standard = sizeof(wl3501_handler) / sizeof(iw_handler),
+ .standard = (iw_handler *)wl3501_handler,
+ .spy_offset = offsetof(struct wl3501_card, spy_data),
+};
+
+/**
+ * wl3501_attach - creates an "instance" of the driver
+ *
+ * Creates an "instance" of the driver, allocating local data structures for
+ * one device. The device is registered with Card Services.
+ *
+ * The dev_link structure is initialized, but we don't actually configure the
+ * card at this point -- we wait until we receive a card insertion event.
+ */
+static dev_link_t *wl3501_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ int ret;
+
+ /* Initialize the dev_link_t structure */
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+ memset(link, 0, sizeof(struct dev_link_t));
+
+ /* The io structure describes IO port mapping */
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.IOAddrLines = 5;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->irq.Handler = wl3501_interrupt;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ dev = alloc_etherdev(sizeof(struct wl3501_card));
+ if (!dev)
+ goto out_link;
+ dev->open = wl3501_open;
+ dev->stop = wl3501_close;
+ dev->hard_start_xmit = wl3501_hard_start_xmit;
+ dev->tx_timeout = wl3501_tx_timeout;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->get_stats = wl3501_get_stats;
+ dev->get_wireless_stats = wl3501_get_wireless_stats;
+ dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def;
+ SET_ETHTOOL_OPS(dev, &ops);
+ netif_stop_queue(dev);
+ link->priv = link->irq.Instance = dev;
+
+ /* Register with Card Services */
+ link->next = wl3501_dev_list;
+ wl3501_dev_list = link;
+ client_reg.dev_info = &wl3501_dev_info;
+ client_reg.EventMask = CS_EVENT_CARD_INSERTION |
+ CS_EVENT_RESET_PHYSICAL |
+ CS_EVENT_CARD_RESET |
+ CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_PM_SUSPEND |
+ CS_EVENT_PM_RESUME;
+ client_reg.event_handler = wl3501_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret) {
+ cs_error(link->handle, RegisterClient, ret);
+ wl3501_detach(link);
+ link = NULL;
+ }
+out:
+ return link;
+out_link:
+ kfree(link);
+ link = NULL;
+ goto out;
+}
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+/**
+ * wl3501_config - configure the PCMCIA socket and make eth device available
+ * @link - FILL_IN
+ *
+ * wl3501_config() is scheduled to run after a CARD_INSERTION event is
+ * received, to configure the PCMCIA socket, and to make the ethernet device
+ * available to the system.
+ */
+static void wl3501_config(dev_link_t *link)
+{
+ tuple_t tuple;
+ cisparse_t parse;
+ client_handle_t handle = link->handle;
+ struct net_device *dev = link->priv;
+ int i = 0, j, last_fn, last_ret;
+ unsigned char bf[64];
+ struct wl3501_card *this;
+
+ /* This reads the card's CONFIG tuple to find its config registers. */
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ tuple.TupleData = bf;
+ tuple.TupleDataMax = sizeof(bf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Try allocating IO ports. This tries a few fixed addresses. If you
+ * want, you can also read the card's config table to pick addresses --
+ * see the serial driver for an example. */
+
+ for (j = 0x280; j < 0x400; j += 0x20) {
+ /* The '^0x300' is so that we probe 0x300-0x3ff first, then
+ * 0x200-0x2ff, and so on, because this seems safer */
+ link->io.BasePort1 = j;
+ link->io.BasePort2 = link->io.BasePort1 + 0x10;
+ i = pcmcia_request_io(link->handle, &link->io);
+ if (i == CS_SUCCESS)
+ break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+
+ /* Now allocate an interrupt line. Note that this does not actually
+ * assign a handler to the interrupt. */
+
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+
+ /* This actually configures the PCMCIA socket -- setting up the I/O
+ * windows and the interrupt mapping. */
+
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ if (register_netdev(dev)) {
+ printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ SET_MODULE_OWNER(dev);
+
+ this = dev->priv;
+ /*
+ * At this point, the dev_node_t structure(s) should be initialized and
+ * arranged in a linked list at link->dev.
+ */
+ link->dev = &this->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ this->base_addr = dev->base_addr;
+
+ if (!wl3501_get_flash_mac_addr(this)) {
+ printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n",
+ dev->name);
+ goto failed;
+ }
+ strcpy(this->node.dev_name, dev->name);
+
+ /* print probe information */
+ printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, MAC addr in flash ROM:",
+ dev->name, this->base_addr, (int)dev->irq);
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = ((char *)&this->mac_addr)[i];
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+ }
+ printk("\n");
+ /*
+ * Initialize card parameters - added by jss
+ */
+ this->net_type = IW_MODE_INFRA;
+ this->bss_cnt = 0;
+ this->join_sta_bss = 0;
+ this->adhoc_times = 0;
+ iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID, &this->essid.el,
+ "ANY", 3);
+ this->card_name[0] = '\0';
+ this->firmware_date[0] = '\0';
+ this->rssi = 255;
+ this->chan = iw_default_channel(this->reg_domain);
+ strlcpy(this->nick, "Planet WL3501", sizeof(this->nick));
+ spin_lock_init(&this->lock);
+ init_waitqueue_head(&this->wait);
+ netif_start_queue(dev);
+ goto out;
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ wl3501_release(link);
+out:
+ return;
+}
+
+/**
+ * wl3501_release - unregister the net, release PCMCIA configuration
+ * @arg - link
+ *
+ * After a card is removed, wl3501_release() will unregister the net device,
+ * and release the PCMCIA configuration. If the device is still open, this
+ * will be postponed until it is closed.
+ */
+static void wl3501_release(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+
+ /* Unlink the device chain */
+ if (link->dev) {
+ unregister_netdev(dev);
+ link->dev = NULL;
+ }
+
+ /* Don't bother checking to see if these succeed or not */
+ pcmcia_release_configuration(link->handle);
+ pcmcia_release_io(link->handle, &link->io);
+ pcmcia_release_irq(link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+}
+
+/**
+ * wl3501_event - The card status event handler
+ * @event - event
+ * @pri - priority
+ * @args - arguments for this event
+ *
+ * The card status event handler. Mostly, this schedules other stuff to run
+ * after an event is received. A CARD_REMOVAL event also sets some flags to
+ * discourage the net drivers from trying to talk to the card any more.
+ *
+ * When a CARD_REMOVAL event is received, we immediately set a flag to block
+ * future accesses to this device. All the functions that actually access the
+ * device should check this flag to make sure the card is still present.
+ */
+static int wl3501_event(event_t event, int pri, event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ while (link->open > 0)
+ wl3501_close(dev);
+ netif_device_detach(dev);
+ wl3501_release(link);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ wl3501_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ wl3501_pwr_mgmt(dev->priv, WL3501_SUSPEND);
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ pcmcia_release_configuration(link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ wl3501_pwr_mgmt(dev->priv, WL3501_RESUME);
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
+ wl3501_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+static struct pcmcia_driver wl3501_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = "wl3501_cs",
+ },
+ .attach = wl3501_attach,
+ .detach = wl3501_detach,
+};
+
+static int __init wl3501_init_module(void)
+{
+ return pcmcia_register_driver(&wl3501_driver);
+}
+
+static void __exit wl3501_exit_module(void)
+{
+ dprintk(0, ": unloading");
+ pcmcia_unregister_driver(&wl3501_driver);
+ BUG_ON(wl3501_dev_list != NULL);
+}
+
+module_init(wl3501_init_module);
+module_exit(wl3501_exit_module);
+
+MODULE_AUTHOR("Fox Chen <mhchen@golf.ccl.itri.org.tw>, "
+ "Arnaldo Carvalho de Melo <acme@conectiva.com.br>,"
+ "Gustavo Niemeyer <niemeyer@conectiva.com>");
+MODULE_DESCRIPTION("Planet wl3501 wireless driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
new file mode 100644
index 000000000000..9da925430109
--- /dev/null
+++ b/drivers/net/yellowfin.c
@@ -0,0 +1,1499 @@
+/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
+/*
+ Written 1997-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
+ It also supports the Symbios Logic version of the same chip core.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/yellowfin.html
+
+
+ Linux kernel changelog:
+ -----------------------
+
+ LK1.1.1 (jgarzik): Port to 2.4 kernel
+
+ LK1.1.2 (jgarzik):
+ * Merge in becker version 1.05
+
+ LK1.1.3 (jgarzik):
+ * Various cleanups
+ * Update yellowfin_timer to correctly calculate duplex.
+ (suggested by Manfred Spraul)
+
+ LK1.1.4 (val@nmt.edu):
+ * Fix three endian-ness bugs
+ * Support dual function SYM53C885E ethernet chip
+
+ LK1.1.5 (val@nmt.edu):
+ * Fix forced full-duplex bug I introduced
+
+ LK1.1.6 (val@nmt.edu):
+ * Only print warning on truly "oversized" packets
+ * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior
+
+*/
+
+#define DRV_NAME "yellowfin"
+#define DRV_VERSION "1.05+LK1.1.6"
+#define DRV_RELDATE "Feb 11, 2002"
+
+#define PFX DRV_NAME ": "
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+static int mtu;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+/* System-wide count of bogus-rx frames. */
+static int bogus_rx;
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#elif defined(YF_NEW) /* A future perfect board :->. */
+static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
+static int fifo_cfg = 0x0028;
+#else
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1514 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc.
+ No media types are currently defined. These exist for driver
+ interoperability.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Do ugly workaround for GX server chipset errata. */
+static int gx_fix;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too long decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
+#define RX_RING_SIZE 64
+#define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
+#define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#define yellowfin_debug debug
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/unaligned.h>
+#include <asm/io.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
+KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
+KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(gx_fix, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
+MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
+MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
+MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Yellowfin" Gigabit
+Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
+Symbios 53C885E dual function chip.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
+This is a descriptor list scheme similar to that used by the EEPro100 and
+Tulip. This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Yellowfin as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data.
+
+IIIC. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'yp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
+Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
+and an AlphaStation to verifty the Alpha port!
+
+IVb. References
+
+Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
+Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
+ Data Manual v3.0
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+
+IVc. Errata
+
+See Packet Engines confidential appendix (prototype chips only).
+*/
+
+
+
+enum pci_id_flags_bits {
+ /* Set PCI command register bits before calling probe1(). */
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ /* Read and map the single following PCI BAR. */
+ PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
+ PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
+ PCI_UNUSED_IRQ=0x800,
+};
+enum capability_flags {
+ HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
+ HasMACAddrBug=32, /* Only on early revs. */
+ DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
+};
+/* The PCI I/O space extent. */
+#define YELLOWFIN_SIZE 0x100
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+struct pci_id_info {
+ const char *name;
+ struct match_info {
+ int pci, pci_mask, subsystem, subsystem_mask;
+ int revision, revision_mask; /* Only 8 bits. */
+ } id;
+ enum pci_id_flags_bits pci_flags;
+ int io_size; /* Needed for I/O region check or ioremap(). */
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
+ PCI_IOTYPE, YELLOWFIN_SIZE,
+ FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
+ {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
+ PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
+ {NULL,},
+};
+
+static struct pci_device_id yellowfin_pci_tbl[] = {
+ { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
+
+
+/* Offsets to the Yellowfin registers. Various sizes and alignments. */
+enum yellowfin_offsets {
+ TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
+ TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
+ RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
+ RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
+ EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
+ ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
+ Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+ RxDepth=0xB8, FlowCtrl=0xBC,
+ AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
+ EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
+ EEFeature=0xF5,
+};
+
+/* The Yellowfin Rx and Tx buffer descriptors.
+ Elements are written as 32 bit for endian portability. */
+struct yellowfin_desc {
+ u32 dbdma_cmd;
+ u32 addr;
+ u32 branch_addr;
+ u32 result_status;
+};
+
+struct tx_status_words {
+#ifdef __BIG_ENDIAN
+ u16 tx_errs;
+ u16 tx_cnt;
+ u16 paused;
+ u16 total_tx_cnt;
+#else /* Little endian chips. */
+ u16 tx_cnt;
+ u16 tx_errs;
+ u16 total_tx_cnt;
+ u16 paused;
+#endif /* __BIG_ENDIAN */
+};
+
+/* Bits in yellowfin_desc.cmd */
+enum desc_cmd_bits {
+ CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
+ CMD_NOP=0x60000000, CMD_STOP=0x70000000,
+ BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
+ BRANCH_IFTRUE=0x040000,
+};
+
+/* Bits in yellowfin_desc.status */
+enum desc_status_bits { RX_EOP=0x0040, };
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
+ IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
+ IntrEarlyRx=0x100, IntrWakeup=0x200, };
+
+#define PRIV_ALIGN 31 /* Required alignment mask */
+#define MII_CNT 4
+struct yellowfin_private {
+ /* Descriptor rings first for alignment.
+ Tx requires a second descriptor for status. */
+ struct yellowfin_desc *rx_ring;
+ struct yellowfin_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ struct tx_status_words *tx_status;
+ dma_addr_t tx_status_dma;
+
+ struct timer_list timer; /* Media selection timer. */
+ struct net_device_stats stats;
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct tx_status_words *tx_tail_desc;
+ unsigned int cur_tx, dirty_tx;
+ int tx_threshold;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+static int read_eeprom(void __iomem *ioaddr, int location);
+static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
+static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int yellowfin_open(struct net_device *dev);
+static void yellowfin_timer(unsigned long data);
+static void yellowfin_tx_timeout(struct net_device *dev);
+static void yellowfin_init_ring(struct net_device *dev);
+static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int yellowfin_rx(struct net_device *dev);
+static void yellowfin_error(struct net_device *dev, int intr_status);
+static int yellowfin_close(struct net_device *dev);
+static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
+
+
+static int __devinit yellowfin_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct yellowfin_private *np;
+ int irq;
+ int chip_idx = ent->driver_data;
+ static int find_cnt;
+ void __iomem *ioaddr;
+ int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ int drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev) {
+ printk (KERN_ERR PFX "cannot allocate ethernet device\n");
+ return -ENOMEM;
+ }
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ np = netdev_priv(dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_free_netdev;
+
+ pci_set_master (pdev);
+
+ ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
+ if (!ioaddr)
+ goto err_out_free_res;
+
+ irq = pdev->irq;
+
+ if (drv_flags & DontUseEeprom)
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
+ else {
+ int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ }
+
+ /* Reset the chip. */
+ iowrite32(0x80000000, ioaddr + DMACtrl);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ pci_set_drvdata(pdev, dev);
+ spin_lock_init(&np->lock);
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = drv_flags;
+ np->base = ioaddr;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_cleardev;
+ np->tx_ring = (struct yellowfin_desc *)ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = (struct yellowfin_desc *)ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_rx;
+ np->tx_status = (struct tx_status_words *)ring_space;
+ np->tx_status_dma = ring_dma;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->duplex_lock = 1;
+
+ /* The Yellowfin-specific entries in the device structure. */
+ dev->open = &yellowfin_open;
+ dev->hard_start_xmit = &yellowfin_start_xmit;
+ dev->stop = &yellowfin_close;
+ dev->get_stats = &yellowfin_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &netdev_ioctl;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->tx_timeout = yellowfin_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ if (mtu)
+ dev->mtu = mtu;
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_unmap_status;
+
+ printk(KERN_INFO "%s: %s type %8x at %p, ",
+ dev->name, pci_id_tbl[chip_idx].name,
+ ioread32(ioaddr + ChipRev), ioaddr);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ if (np->drv_flags & HasMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(ioaddr, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(ioaddr, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ find_cnt++;
+
+ return 0;
+
+err_out_unmap_status:
+ pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
+ np->tx_status_dma);
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+err_out_unmap_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_free_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+static int __devinit read_eeprom(void __iomem *ioaddr, int location)
+{
+ int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
+
+ iowrite8(location, ioaddr + EEAddr);
+ iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
+ while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
+ ;
+ return ioread8(ioaddr + EERead);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
+{
+ int i;
+
+ iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
+ iowrite16(1, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((ioread16(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return ioread16(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
+{
+ int i;
+
+ iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
+ iowrite16(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((ioread16(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return;
+}
+
+
+static int yellowfin_open(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ int i;
+
+ /* Reset the chip. */
+ iowrite32(0x80000000, ioaddr + DMACtrl);
+
+ i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
+ if (i) return i;
+
+ if (yellowfin_debug > 1)
+ printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ yellowfin_init_ring(dev);
+
+ iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
+ iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
+
+ for (i = 0; i < 6; i++)
+ iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
+
+ /* Set up various condition 'select' registers.
+ There are no options here. */
+ iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
+ iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
+ iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
+ iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
+ iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
+ iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+ iowrite32(dma_ctrl, ioaddr + DMACtrl);
+ iowrite16(fifo_cfg, ioaddr + FIFOcfg);
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
+
+ yp->tx_threshold = 32;
+ iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
+
+ if (dev->if_port == 0)
+ dev->if_port = yp->default_port;
+
+ netif_start_queue(dev);
+
+ /* Setting the Rx mode will start the Rx process. */
+ if (yp->drv_flags & IsGigabit) {
+ /* We are always in full-duplex mode with gigabit! */
+ yp->full_duplex = 1;
+ iowrite16(0x01CF, ioaddr + Cnfg);
+ } else {
+ iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
+ iowrite16(0x1018, ioaddr + FrameGap1);
+ iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+ }
+ set_rx_mode(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
+ iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+ iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
+ iowrite32(0x80008000, ioaddr + TxCtrl);
+
+ if (yellowfin_debug > 2) {
+ printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
+ dev->name);
+ }
+
+ /* Set the timer to check for link beat. */
+ init_timer(&yp->timer);
+ yp->timer.expires = jiffies + 3*HZ;
+ yp->timer.data = (unsigned long)dev;
+ yp->timer.function = &yellowfin_timer; /* timer handler */
+ add_timer(&yp->timer);
+
+ return 0;
+}
+
+static void yellowfin_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ int next_tick = 60*HZ;
+
+ if (yellowfin_debug > 3) {
+ printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
+ dev->name, ioread16(ioaddr + IntrStatus));
+ }
+
+ if (yp->mii_cnt) {
+ int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
+ int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
+ int negotiated = lpa & yp->advertising;
+ if (yellowfin_debug > 1)
+ printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
+ "link partner capability %4.4x.\n",
+ dev->name, yp->phys[0], bmsr, lpa);
+
+ yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
+
+ iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+
+ if (bmsr & BMSR_LSTATUS)
+ next_tick = 60*HZ;
+ else
+ next_tick = 3*HZ;
+ }
+
+ yp->timer.expires = jiffies + next_tick;
+ add_timer(&yp->timer);
+}
+
+static void yellowfin_tx_timeout(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+
+ printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
+ "status %4.4x, Rx status %4.4x, resetting...\n",
+ dev->name, yp->cur_tx, yp->dirty_tx,
+ ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
+
+ /* Note: these should be KERN_DEBUG. */
+ if (yellowfin_debug) {
+ int i;
+ printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", yp->rx_ring[i].result_status);
+ printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
+ yp->tx_ring[i].result_status);
+ printk("\n");
+ }
+
+ /* If the hardware is found to hang regularly, we will update the code
+ to reinitialize the chip here. */
+ dev->if_port = 0;
+
+ /* Wake the potentially-idle transmit channel. */
+ iowrite32(0x10001000, yp->base + TxCtrl);
+ if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
+ netif_wake_queue (dev); /* Typical path */
+
+ dev->trans_start = jiffies;
+ yp->stats.tx_errors++;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void yellowfin_init_ring(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ int i;
+
+ yp->tx_full = 0;
+ yp->cur_rx = yp->cur_tx = 0;
+ yp->dirty_tx = 0;
+
+ yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
+ ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
+ }
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ yp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ }
+ yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+#define NO_TXSTATS
+#ifdef NO_TXSTATS
+ /* In this mode the Tx ring needs only a single descriptor. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ yp->tx_skbuff[i] = NULL;
+ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
+ ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
+ }
+ /* Wrap ring */
+ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
+#else
+{
+ int j;
+
+ /* Tx ring needs a pair of descriptors, the second for the status. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ j = 2*i;
+ yp->tx_skbuff[i] = 0;
+ /* Branch on Tx error. */
+ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
+ (j+1)*sizeof(struct yellowfin_desc);
+ j++;
+ if (yp->flags & FullTxStatus) {
+ yp->tx_ring[j].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
+ yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
+ yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
+ i*sizeof(struct tx_status_words);
+ } else {
+ /* Symbios chips write only tx_errs word. */
+ yp->tx_ring[j].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
+ yp->tx_ring[j].request_cnt = 2;
+ /* Om pade ummmmm... */
+ yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
+ i*sizeof(struct tx_status_words) +
+ &(yp->tx_status[0].tx_errs) -
+ &(yp->tx_status[0]));
+ }
+ yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
+ ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
+ }
+ /* Wrap ring */
+ yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
+}
+#endif
+ yp->tx_tail_desc = &yp->tx_status[0];
+ return;
+}
+
+static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ unsigned entry;
+ int len = skb->len;
+
+ netif_stop_queue (dev);
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = yp->cur_tx % TX_RING_SIZE;
+
+ if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
+ int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
+ /* Fix GX chipset errata. */
+ if (cacheline_end > 24 || cacheline_end == 0) {
+ len = skb->len + 32 - cacheline_end + 1;
+ if (len != skb->len)
+ skb = skb_padto(skb, len);
+ }
+ if (skb == NULL) {
+ yp->tx_skbuff[entry] = NULL;
+ netif_wake_queue(dev);
+ return 0;
+ }
+ }
+ yp->tx_skbuff[entry] = skb;
+
+#ifdef NO_TXSTATS
+ yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->data, len, PCI_DMA_TODEVICE));
+ yp->tx_ring[entry].result_status = 0;
+ if (entry >= TX_RING_SIZE-1) {
+ /* New stop command. */
+ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
+ } else {
+ yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[entry].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
+ }
+ yp->cur_tx++;
+#else
+ yp->tx_ring[entry<<1].request_cnt = len;
+ yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->data, len, PCI_DMA_TODEVICE));
+ /* The input_last (status-write) command is constant, but we must
+ rewrite the subsequent 'stop' command. */
+
+ yp->cur_tx++;
+ {
+ unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
+ yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ }
+ /* Final step -- overwrite the old 'stop' command. */
+
+ yp->tx_ring[entry<<1].dbdma_cmd =
+ cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
+ CMD_TX_PKT | BRANCH_IFTRUE) | len);
+#endif
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ iowrite32(0x10001000, yp->base + TxCtrl);
+
+ if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
+ netif_start_queue (dev); /* Typical path */
+ else
+ yp->tx_full = 1;
+ dev->trans_start = jiffies;
+
+ if (yellowfin_debug > 4) {
+ printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
+ dev->name, yp->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_instance;
+ struct yellowfin_private *yp;
+ void __iomem *ioaddr;
+ int boguscnt = max_interrupt_work;
+ unsigned int handled = 0;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+#endif
+
+ yp = netdev_priv(dev);
+ ioaddr = yp->base;
+
+ spin_lock (&yp->lock);
+
+ do {
+ u16 intr_status = ioread16(ioaddr + IntrClear);
+
+ if (yellowfin_debug > 4)
+ printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+ handled = 1;
+
+ if (intr_status & (IntrRxDone | IntrEarlyRx)) {
+ yellowfin_rx(dev);
+ iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
+ }
+
+#ifdef NO_TXSTATS
+ for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
+ int entry = yp->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (yp->tx_ring[entry].result_status == 0)
+ break;
+ skb = yp->tx_skbuff[entry];
+ yp->stats.tx_packets++;
+ yp->stats.tx_bytes += skb->len;
+ /* Free the original skb. */
+ pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ yp->tx_skbuff[entry] = NULL;
+ }
+ if (yp->tx_full
+ && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+#else
+ if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
+ unsigned dirty_tx = yp->dirty_tx;
+
+ for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ /* Todo: optimize this. */
+ int entry = dirty_tx % TX_RING_SIZE;
+ u16 tx_errs = yp->tx_status[entry].tx_errs;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (yellowfin_debug > 5)
+ printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
+ "%4.4x %4.4x %4.4x %4.4x.\n",
+ dev->name, entry,
+ yp->tx_status[entry].tx_cnt,
+ yp->tx_status[entry].tx_errs,
+ yp->tx_status[entry].total_tx_cnt,
+ yp->tx_status[entry].paused);
+#endif
+ if (tx_errs == 0)
+ break; /* It still hasn't been Txed */
+ skb = yp->tx_skbuff[entry];
+ if (tx_errs & 0xF810) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (yellowfin_debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+#endif
+ yp->stats.tx_errors++;
+ if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
+ if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
+ if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
+ if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
+ } else {
+#ifndef final_version
+ if (yellowfin_debug > 4)
+ printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+#endif
+ yp->stats.tx_bytes += skb->len;
+ yp->stats.collisions += tx_errs & 15;
+ yp->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ pci_unmap_single(yp->pci_dev,
+ yp->tx_ring[entry<<1].addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ yp->tx_skbuff[entry] = 0;
+ /* Mark status as empty. */
+ yp->tx_status[entry].tx_errs = 0;
+ }
+
+#ifndef final_version
+ if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (yp->tx_full
+ && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ yp->dirty_tx = dirty_tx;
+ yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
+ }
+#endif
+
+ /* Log errors and other uncommon events. */
+ if (intr_status & 0x2ee) /* Abnormal error summary. */
+ yellowfin_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (yellowfin_debug > 3)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread16(ioaddr + IntrStatus));
+
+ spin_unlock (&yp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int yellowfin_rx(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ int entry = yp->cur_rx % RX_RING_SIZE;
+ int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
+
+ if (yellowfin_debug > 4) {
+ printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
+ entry, yp->rx_ring[entry].result_status);
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
+ entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
+ yp->rx_ring[entry].result_status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct yellowfin_desc *desc = &yp->rx_ring[entry];
+ struct sk_buff *rx_skb = yp->rx_skbuff[entry];
+ s16 frame_status;
+ u16 desc_status;
+ int data_size;
+ u8 *buf_addr;
+
+ if(!desc->result_status)
+ break;
+ pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
+ yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ desc_status = le32_to_cpu(desc->result_status) >> 16;
+ buf_addr = rx_skb->tail;
+ data_size = (le32_to_cpu(desc->dbdma_cmd) -
+ le32_to_cpu(desc->result_status)) & 0xffff;
+ frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
+ if (yellowfin_debug > 4)
+ printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RX_EOP)) {
+ if (data_size != 0)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
+ " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
+ yp->stats.rx_length_errors++;
+ } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
+ /* There was a error. */
+ if (yellowfin_debug > 3)
+ printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
+ frame_status);
+ yp->stats.rx_errors++;
+ if (frame_status & 0x0060) yp->stats.rx_length_errors++;
+ if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
+ if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
+ if (frame_status < 0) yp->stats.rx_dropped++;
+ } else if ( !(yp->drv_flags & IsGigabit) &&
+ ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
+ u8 status1 = buf_addr[data_size-2];
+ u8 status2 = buf_addr[data_size-1];
+ yp->stats.rx_errors++;
+ if (status1 & 0xC0) yp->stats.rx_length_errors++;
+ if (status2 & 0x03) yp->stats.rx_frame_errors++;
+ if (status2 & 0x04) yp->stats.rx_crc_errors++;
+ if (status2 & 0x80) yp->stats.rx_dropped++;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ } else if ((yp->flags & HasMACAddrBug) &&
+ memcmp(le32_to_cpu(yp->rx_ring_dma +
+ entry*sizeof(struct yellowfin_desc)),
+ dev->dev_addr, 6) != 0 &&
+ memcmp(le32_to_cpu(yp->rx_ring_dma +
+ entry*sizeof(struct yellowfin_desc)),
+ "\377\377\377\377\377\377", 6) != 0) {
+ if (bogus_rx++ == 0)
+ printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x.\n",
+ dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
+ buf_addr[3], buf_addr[4], buf_addr[5]);
+#endif
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = data_size -
+ (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
+ /* To verify: Yellowfin Length should omit the CRC! */
+
+#ifndef final_version
+ if (yellowfin_debug > 4)
+ printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+#endif
+ /* Check if the packet is long enough to just pass up the skbuff
+ without copying to a properly sized skbuff. */
+ if (pkt_len > rx_copybreak) {
+ skb_put(skb = rx_skb, pkt_len);
+ pci_unmap_single(yp->pci_dev,
+ yp->rx_ring[entry].addr,
+ yp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ yp->rx_skbuff[entry] = NULL;
+ } else {
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL)
+ break;
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
+ yp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ yp->stats.rx_packets++;
+ yp->stats.rx_bytes += pkt_len;
+ }
+ entry = (++yp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
+ entry = yp->dirty_rx % RX_RING_SIZE;
+ if (yp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ yp->rx_skbuff[entry] = skb;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ }
+ yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
+ if (entry != 0)
+ yp->rx_ring[entry - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ else
+ yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
+ | yp->rx_buf_sz);
+ }
+
+ return 0;
+}
+
+static void yellowfin_error(struct net_device *dev, int intr_status)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear what to do here. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ yp->stats.tx_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ yp->stats.rx_errors++;
+}
+
+static int yellowfin_close(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ int i;
+
+ netif_stop_queue (dev);
+
+ if (yellowfin_debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, ioread16(ioaddr + TxStatus),
+ ioread16(ioaddr + RxStatus),
+ ioread16(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnb);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite32(0x80000000, ioaddr + RxCtrl);
+ iowrite32(0x80000000, ioaddr + TxCtrl);
+
+ del_timer(&yp->timer);
+
+#if defined(__i386__)
+ if (yellowfin_debug > 2) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8llx:\n",
+ (unsigned long long)yp->tx_ring_dma);
+ for (i = 0; i < TX_RING_SIZE*2; i++)
+ printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
+ ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
+ i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
+ yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
+ printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
+ yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
+
+ printk("\n"KERN_DEBUG " Rx ring %8.8llx:\n",
+ (unsigned long long)yp->rx_ring_dma);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
+ ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
+ i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
+ yp->rx_ring[i].result_status);
+ if (yellowfin_debug > 6) {
+ if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x",
+ get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (yp->rx_skbuff[i]) {
+ dev_kfree_skb(yp->rx_skbuff[i]);
+ }
+ yp->rx_skbuff[i] = NULL;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (yp->tx_skbuff[i])
+ dev_kfree_skb(yp->tx_skbuff[i]);
+ yp->tx_skbuff[i] = NULL;
+ }
+
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ if (yellowfin_debug > 0) {
+ printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
+ dev->name, bogus_rx);
+ }
+#endif
+
+ return 0;
+}
+
+static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ return &yp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor. */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ u16 cfg_value = ioread16(ioaddr + Cnfg);
+
+ /* Stop the Rx process to change any value. */
+ iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ iowrite16(0x000F, ioaddr + AddrMode);
+ } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well, or accept all multicasts. */
+ iowrite16(0x000B, ioaddr + AddrMode);
+ } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ struct dev_mc_list *mclist;
+ u16 hash_table[4];
+ int i;
+ memset(hash_table, 0, sizeof(hash_table));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ unsigned int bit;
+
+ /* Due to a bug in the early chip versions, multiple filter
+ slots must be set for each address. */
+ if (yp->drv_flags & HasMulticastBug) {
+ bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ }
+ bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ }
+ /* Copy the hash table to the chip. */
+ for (i = 0; i < 4; i++)
+ iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
+ iowrite16(0x0003, ioaddr + AddrMode);
+ } else { /* Normal, unicast/broadcast-only mode. */
+ iowrite16(0x0001, ioaddr + AddrMode);
+ }
+ /* Restart the Rx process. */
+ iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
+}
+
+static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct yellowfin_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = yellowfin_get_drvinfo
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct yellowfin_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct mii_ioctl_data *data = if_mii(rq);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = np->phys[0] & 0x1f;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data->phy_id == np->phys[0]) {
+ u16 value = data->val_in;
+ switch (data->reg_num) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct yellowfin_private *np;
+
+ if (!dev)
+ BUG();
+ np = netdev_priv(dev);
+
+ pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
+ np->tx_status_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ unregister_netdev (dev);
+
+ pci_iounmap(pdev, np->base);
+
+ pci_release_regions (pdev);
+
+ free_netdev (dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+
+static struct pci_driver yellowfin_driver = {
+ .name = DRV_NAME,
+ .id_table = yellowfin_pci_tbl,
+ .probe = yellowfin_init_one,
+ .remove = __devexit_p(yellowfin_remove_one),
+};
+
+
+static int __init yellowfin_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_module_init (&yellowfin_driver);
+}
+
+
+static void __exit yellowfin_cleanup (void)
+{
+ pci_unregister_driver (&yellowfin_driver);
+}
+
+
+module_init(yellowfin_init);
+module_exit(yellowfin_cleanup);
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
+ * compile-command-alphaLX: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c yellowfin.c -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
+ * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
new file mode 100644
index 000000000000..3ac047bc727d
--- /dev/null
+++ b/drivers/net/znet.c
@@ -0,0 +1,948 @@
+/* znet.c: An Zenith Z-Note ethernet driver for linux. */
+
+/*
+ Written by Donald Becker.
+
+ The author may be reached as becker@scyld.com.
+ This driver is based on the Linux skeleton driver. The copyright of the
+ skeleton driver is held by the United States Government, as represented
+ by DIRNSA, and it is released under the GPL.
+
+ Thanks to Mike Hollick for alpha testing and suggestions.
+
+ References:
+ The Crynwr packet driver.
+
+ "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
+ Intel Microcommunications Databook, Vol. 1, 1990.
+ As usual with Intel, the documentation is incomplete and inaccurate.
+ I had to read the Crynwr packet driver to figure out how to actually
+ use the i82593, and guess at what register bits matched the loosely
+ related i82586.
+
+ Theory of Operation
+
+ The i82593 used in the Zenith Z-Note series operates using two(!) slave
+ DMA channels, one interrupt, and one 8-bit I/O port.
+
+ While there several ways to configure '593 DMA system, I chose the one
+ that seemed commensurate with the highest system performance in the face
+ of moderate interrupt latency: Both DMA channels are configured as
+ recirculating ring buffers, with one channel (#0) dedicated to Rx and
+ the other channel (#1) to Tx and configuration. (Note that this is
+ different than the Crynwr driver, where the Tx DMA channel is initialized
+ before each operation. That approach simplifies operation and Tx error
+ recovery, but requires additional I/O in normal operation and precludes
+ transmit buffer chaining.)
+
+ Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
+ a reasonable ring size for Rx, while simplifying DMA buffer allocation --
+ DMA buffers must not cross a 128K boundary. (In truth the size selection
+ was influenced by my lack of '593 documentation. I thus was constrained
+ to use the Crynwr '593 initialization table, which sets the Rx ring size
+ to 8K.)
+
+ Despite my usual low opinion about Intel-designed parts, I must admit
+ that the bulk data handling of the i82593 is a good design for
+ an integrated system, like a laptop, where using two slave DMA channels
+ doesn't pose a problem. I still take issue with using only a single I/O
+ port. In the same controlled environment there are essentially no
+ limitations on I/O space, and using multiple locations would eliminate
+ the need for multiple operations when looking at status registers,
+ setting the Rx ring boundary, or switching to promiscuous mode.
+
+ I also question Zenith's selection of the '593: one of the advertised
+ advantages of earlier Intel parts was that if you figured out the magic
+ initialization incantation you could use the same part on many different
+ network types. Zenith's use of the "FriendlyNet" (sic) connector rather
+ than an on-board transceiver leads me to believe that they were planning
+ to take advantage of this. But, uhmmm, the '593 omits all but ethernet
+ functionality from the serial subsystem.
+ */
+
+/* 10/2002
+
+ o Resurected for Linux 2.5+ by Marc Zyngier <maz@wild-wind.fr.eu.org> :
+
+ - Removed strange DMA snooping in znet_sent_packet, which lead to
+ TX buffer corruption on my laptop.
+ - Use init_etherdev stuff.
+ - Use kmalloc-ed DMA buffers.
+ - Use as few global variables as possible.
+ - Use proper resources management.
+ - Use wireless/i82593.h as much as possible (structure, constants)
+ - Compiles as module or build-in.
+ - Now survives unplugging/replugging cable.
+
+ Some code was taken from wavelan_cs.
+
+ Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on
+ anything else. Testers (and detailed bug reports) are welcome :-).
+
+ o TODO :
+
+ - Properly handle multicast
+ - Understand why some traffic patterns add a 1s latency...
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+/* This include could be elsewhere, since it is not wireless specific */
+#include "wireless/i82593.h"
+
+static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n";
+
+#ifndef ZNET_DEBUG
+#define ZNET_DEBUG 1
+#endif
+static unsigned int znet_debug = ZNET_DEBUG;
+module_param (znet_debug, int, 0);
+MODULE_PARM_DESC (znet_debug, "ZNet debug level");
+MODULE_LICENSE("GPL");
+
+/* The DMA modes we need aren't in <dma.h>. */
+#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
+#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+#define RX_BUF_SIZE 8192
+#define TX_BUF_SIZE 8192
+#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
+
+#define TX_TIMEOUT 10
+
+struct znet_private {
+ int rx_dma, tx_dma;
+ struct net_device_stats stats;
+ spinlock_t lock;
+ short sia_base, sia_size, io_size;
+ struct i82593_conf_block i593_init;
+ /* The starting, current, and end pointers for the packet buffers. */
+ ushort *rx_start, *rx_cur, *rx_end;
+ ushort *tx_start, *tx_cur, *tx_end;
+ ushort tx_buf_len; /* Tx buffer length, in words. */
+};
+
+/* Only one can be built-in;-> */
+static struct net_device *znet_dev;
+
+struct netidblk {
+ char magic[8]; /* The magic number (string) "NETIDBLK" */
+ unsigned char netid[8]; /* The physical station address */
+ char nettype, globalopt;
+ char vendor[8]; /* The machine vendor and product name. */
+ char product[8];
+ char irq1, irq2; /* Interrupts, only one is currently used. */
+ char dma1, dma2;
+ short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
+ short iobase1, iosize1;
+ short iobase2, iosize2; /* Second iobase unused. */
+ char driver_options; /* Misc. bits */
+ char pad;
+};
+
+static int znet_open(struct net_device *dev);
+static int znet_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t znet_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void znet_rx(struct net_device *dev);
+static int znet_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void hardware_init(struct net_device *dev);
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
+static void znet_tx_timeout (struct net_device *dev);
+
+/* Request needed resources */
+static int znet_request_resources (struct net_device *dev)
+{
+ struct znet_private *znet = dev->priv;
+ unsigned long flags;
+
+ if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
+ goto failed;
+ if (request_dma (znet->rx_dma, "ZNet rx"))
+ goto free_irq;
+ if (request_dma (znet->tx_dma, "ZNet tx"))
+ goto free_rx_dma;
+ if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA"))
+ goto free_tx_dma;
+ if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O"))
+ goto free_sia;
+
+ return 0; /* Happy ! */
+
+ free_sia:
+ release_region (znet->sia_base, znet->sia_size);
+ free_tx_dma:
+ flags = claim_dma_lock();
+ free_dma (znet->tx_dma);
+ release_dma_lock (flags);
+ free_rx_dma:
+ flags = claim_dma_lock();
+ free_dma (znet->rx_dma);
+ release_dma_lock (flags);
+ free_irq:
+ free_irq (dev->irq, dev);
+ failed:
+ return -1;
+}
+
+static void znet_release_resources (struct net_device *dev)
+{
+ struct znet_private *znet = dev->priv;
+ unsigned long flags;
+
+ release_region (znet->sia_base, znet->sia_size);
+ release_region (dev->base_addr, znet->io_size);
+ flags = claim_dma_lock();
+ free_dma (znet->tx_dma);
+ free_dma (znet->rx_dma);
+ release_dma_lock (flags);
+ free_irq (dev->irq, dev);
+}
+
+/* Keep the magical SIA stuff in a single function... */
+static void znet_transceiver_power (struct net_device *dev, int on)
+{
+ struct znet_private *znet = dev->priv;
+ unsigned char v;
+
+ /* Turn on/off the 82501 SIA, using zenith-specific magic. */
+ /* Select LAN control register */
+ outb(0x10, znet->sia_base);
+
+ if (on)
+ v = inb(znet->sia_base + 1) | 0x84;
+ else
+ v = inb(znet->sia_base + 1) & ~0x84;
+
+ outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */
+}
+
+/* Init the i82593, with current promisc/mcast configuration.
+ Also used from hardware_init. */
+static void znet_set_multicast_list (struct net_device *dev)
+{
+ struct znet_private *znet = dev->priv;
+ short ioaddr = dev->base_addr;
+ struct i82593_conf_block *cfblk = &znet->i593_init;
+
+ memset(cfblk, 0x00, sizeof(struct i82593_conf_block));
+
+ /* The configuration block. What an undocumented nightmare.
+ The first set of values are those suggested (without explanation)
+ for ethernet in the Intel 82586 databook. The rest appear to be
+ completely undocumented, except for cryptic notes in the Crynwr
+ packet driver. This driver uses the Crynwr values verbatim. */
+
+ /* maz : Rewritten to take advantage of the wanvelan includes.
+ At least we have names, not just blind values */
+
+ /* Byte 0 */
+ cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */
+ cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */
+ cfblk->fifo_32 = 1;
+ cfblk->d6mod = 0; /* Run in i82593 advanced mode */
+ cfblk->throttle_enb = 1;
+
+ /* Byte 1 */
+ cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */
+ cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */
+ cfblk->contin = 1; /* enable continuous mode */
+
+ /* Byte 2 */
+ cfblk->addr_len = ETH_ALEN;
+ cfblk->acloc = 1; /* Disable source addr insertion by i82593 */
+ cfblk->preamb_len = 2; /* 8 bytes preamble */
+ cfblk->loopback = 0; /* Loopback off */
+
+ /* Byte 3 */
+ cfblk->lin_prio = 0; /* Default priorities & backoff methods. */
+ cfblk->tbofstop = 0;
+ cfblk->exp_prio = 0;
+ cfblk->bof_met = 0;
+
+ /* Byte 4 */
+ cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */
+
+ /* Byte 5 */
+ cfblk->slottim_low = 0; /* 512 bit times slot time (low) */
+
+ /* Byte 6 */
+ cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */
+ cfblk->max_retr = 15; /* 15 collisions retries */
+
+ /* Byte 7 */
+ cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */
+ cfblk->bc_dis = 0; /* Enable broadcast reception */
+ cfblk->crs_1 = 0; /* Don't transmit without carrier sense */
+ cfblk->nocrc_ins = 0; /* i82593 generates CRC */
+ cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */
+ cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */
+
+ /* Byte 8 */
+ cfblk->cs_filter = 0; /* CS is recognized immediately */
+ cfblk->crs_src = 0; /* External carrier sense */
+ cfblk->cd_filter = 0; /* CD is recognized immediately */
+
+ /* Byte 9 */
+ cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */
+
+ /* Byte A */
+ cfblk->lng_typ = 1; /* Type/length checks OFF */
+ cfblk->lng_fld = 1; /* Disable 802.3 length field check */
+ cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */
+ cfblk->artx = 1; /* Disable automatic retransmission */
+ cfblk->sarec = 1; /* Disable source addr trig of CD */
+ cfblk->tx_jabber = 0; /* Disable jabber jam sequence */
+ cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */
+ cfblk->lbpkpol = 0; /* Loopback pin active high */
+
+ /* Byte B */
+ cfblk->fdx = 0; /* Disable full duplex operation */
+
+ /* Byte C */
+ cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */
+ cfblk->mult_ia = 0; /* No multiple individual addresses */
+ cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */
+
+ /* Byte D */
+ cfblk->dummy_1 = 1; /* set to 1 */
+ cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
+ cfblk->mc_all = (dev->mc_list || (dev->flags&IFF_ALLMULTI));/* multicast all mode */
+ cfblk->rcv_mon = 0; /* Monitor mode disabled */
+ cfblk->frag_acpt = 0; /* Do not accept fragments */
+ cfblk->tstrttrs = 0; /* No start transmission threshold */
+
+ /* Byte E */
+ cfblk->fretx = 1; /* FIFO automatic retransmission */
+ cfblk->runt_eop = 0; /* drop "runt" packets */
+ cfblk->hw_sw_pin = 0; /* ?? */
+ cfblk->big_endn = 0; /* Big Endian ? no... */
+ cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */
+ cfblk->sttlen = 1; /* 6 byte status registers */
+ cfblk->rx_eop = 0; /* Signal EOP on packet reception */
+ cfblk->tx_eop = 0; /* Signal EOP on packet transmission */
+
+ /* Byte F */
+ cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */
+ cfblk->rcvstop = 1; /* Enable Receive Stop Register */
+
+ if (znet_debug > 2) {
+ int i;
+ unsigned char *c;
+
+ for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++)
+ printk ("%02X ", c[i]);
+ printk ("\n");
+ }
+
+ *znet->tx_cur++ = sizeof(struct i82593_conf_block);
+ memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block));
+ znet->tx_cur += sizeof(struct i82593_conf_block)/2;
+ outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
+
+ /* XXX FIXME maz : Add multicast addresses here, so having a
+ * multicast address configured isn't equal to IFF_ALLMULTI */
+}
+
+/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
+ BIOS area. We just scan for the signature, and pull the vital parameters
+ out of the structure. */
+
+static int __init znet_probe (void)
+{
+ int i;
+ struct netidblk *netinfo;
+ struct znet_private *znet;
+ struct net_device *dev;
+ char *p;
+ int err = -ENOMEM;
+
+ /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
+ for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++)
+ if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0)
+ break;
+
+ if (p >= (char *)phys_to_virt(0x100000)) {
+ if (znet_debug > 1)
+ printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct znet_private));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER (dev);
+
+ znet = dev->priv;
+
+ netinfo = (struct netidblk *)p;
+ dev->base_addr = netinfo->iobase1;
+ dev->irq = netinfo->irq1;
+
+ printk(KERN_INFO "%s: ZNET at %#3lx,", dev->name, dev->base_addr);
+
+ /* The station address is in the "netidblk" at 0x0f0000. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = netinfo->netid[i]);
+
+ printk(", using IRQ %d DMA %d and %d.\n", dev->irq, netinfo->dma1,
+ netinfo->dma2);
+
+ if (znet_debug > 1) {
+ printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
+ dev->name, netinfo->vendor,
+ netinfo->irq1, netinfo->irq2,
+ netinfo->dma1, netinfo->dma2);
+ printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
+ dev->name, netinfo->iobase1, netinfo->iosize1,
+ netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
+ }
+
+ if (znet_debug > 0)
+ printk(KERN_INFO "%s", version);
+
+ znet->rx_dma = netinfo->dma1;
+ znet->tx_dma = netinfo->dma2;
+ spin_lock_init(&znet->lock);
+ znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */
+ znet->sia_size = 2;
+ /* maz: Despite the '593 being advertised above as using a
+ * single 8bits I/O port, this driver does many 16bits
+ * access. So set io_size accordingly */
+ znet->io_size = 2;
+
+ if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
+ goto free_dev;
+ if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
+ goto free_rx;
+
+ if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) ||
+ !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) {
+ printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n");
+ goto free_tx;
+ }
+
+ znet->rx_end = znet->rx_start + RX_BUF_SIZE/2;
+ znet->tx_buf_len = TX_BUF_SIZE/2;
+ znet->tx_end = znet->tx_start + znet->tx_buf_len;
+
+ /* The ZNET-specific entries in the device structure. */
+ dev->open = &znet_open;
+ dev->hard_start_xmit = &znet_send_packet;
+ dev->stop = &znet_close;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &znet_set_multicast_list;
+ dev->tx_timeout = znet_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ err = register_netdev(dev);
+ if (err)
+ goto free_tx;
+ znet_dev = dev;
+ return 0;
+
+ free_tx:
+ kfree(znet->tx_start);
+ free_rx:
+ kfree(znet->rx_start);
+ free_dev:
+ free_netdev(dev);
+ return err;
+}
+
+
+static int znet_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 2)
+ printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
+
+ /* These should never fail. You can't add devices to a sealed box! */
+ if (znet_request_resources (dev)) {
+ printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
+ return -EBUSY;
+ }
+
+ znet_transceiver_power (dev, 1);
+
+ /* According to the Crynwr driver we should wait 50 msec. for the
+ LAN clock to stabilize. My experiments indicates that the '593 can
+ be initialized immediately. The delay is probably needed for the
+ DC-to-DC converter to come up to full voltage, and for the oscillator
+ to be spot-on at 20Mhz before transmitting.
+ Until this proves to be a problem we rely on the higher layers for the
+ delay and save allocating a timer entry. */
+
+ /* maz : Well, I'm getting every time the following message
+ * without the delay on a 486@33. This machine is much too
+ * fast... :-) So maybe the Crynwr driver wasn't wrong after
+ * all, even if the message is completly harmless on my
+ * setup. */
+ mdelay (50);
+
+ /* This follows the packet driver's lead, and checks for success. */
+ if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
+ printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
+ dev->name);
+
+ hardware_init(dev);
+ netif_start_queue (dev);
+
+ return 0;
+}
+
+
+static void znet_tx_timeout (struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ ushort event, tx_status, rx_offset, state;
+
+ outb (CR0_STATUS_0, ioaddr);
+ event = inb (ioaddr);
+ outb (CR0_STATUS_1, ioaddr);
+ tx_status = inw (ioaddr);
+ outb (CR0_STATUS_2, ioaddr);
+ rx_offset = inw (ioaddr);
+ outb (CR0_STATUS_3, ioaddr);
+ state = inb (ioaddr);
+ printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
+ " resetting.\n", dev->name, event, tx_status, rx_offset, state);
+ if (tx_status == TX_LOST_CRS)
+ printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
+ dev->name);
+ outb (OP0_RESET, ioaddr);
+ hardware_init (dev);
+ netif_wake_queue (dev);
+}
+
+static int znet_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct znet_private *znet = dev->priv;
+ unsigned long flags;
+ short length = skb->len;
+
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
+
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
+ netif_stop_queue (dev);
+
+ /* Check that the part hasn't reset itself, probably from suspend. */
+ outb(CR0_STATUS_0, ioaddr);
+ if (inw(ioaddr) == 0x0010 &&
+ inw(ioaddr) == 0x0000 &&
+ inw(ioaddr) == 0x0010) {
+ if (znet_debug > 1)
+ printk (KERN_WARNING "%s : waking up\n", dev->name);
+ hardware_init(dev);
+ znet_transceiver_power (dev, 1);
+ }
+
+ if (1) {
+ unsigned char *buf = (void *)skb->data;
+ ushort *tx_link = znet->tx_cur - 1;
+ ushort rnd_len = (length + 1)>>1;
+
+ znet->stats.tx_bytes+=length;
+
+ if (znet->tx_cur >= znet->tx_end)
+ znet->tx_cur = znet->tx_start;
+ *znet->tx_cur++ = length;
+ if (znet->tx_cur + rnd_len + 1 > znet->tx_end) {
+ int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */
+ memcpy(znet->tx_cur, buf, semi_cnt);
+ rnd_len -= semi_cnt>>1;
+ memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt);
+ znet->tx_cur = znet->tx_start + rnd_len;
+ } else {
+ memcpy(znet->tx_cur, buf, skb->len);
+ znet->tx_cur += rnd_len;
+ }
+ *znet->tx_cur++ = 0;
+
+ spin_lock_irqsave(&znet->lock, flags);
+ {
+ *tx_link = OP0_TRANSMIT | CR0_CHNL;
+ /* Is this always safe to do? */
+ outb(OP0_TRANSMIT | CR0_CHNL, ioaddr);
+ }
+ spin_unlock_irqrestore (&znet->lock, flags);
+
+ dev->trans_start = jiffies;
+ netif_start_queue (dev);
+
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
+ }
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* The ZNET interrupt handler. */
+static irqreturn_t znet_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ struct znet_private *znet = dev->priv;
+ int ioaddr;
+ int boguscnt = 20;
+ int handled = 0;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "znet_interrupt(): IRQ %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ spin_lock (&znet->lock);
+
+ ioaddr = dev->base_addr;
+
+ outb(CR0_STATUS_0, ioaddr);
+ do {
+ ushort status = inb(ioaddr);
+ if (znet_debug > 5) {
+ ushort result, rx_ptr, running;
+ outb(CR0_STATUS_1, ioaddr);
+ result = inw(ioaddr);
+ outb(CR0_STATUS_2, ioaddr);
+ rx_ptr = inw(ioaddr);
+ outb(CR0_STATUS_3, ioaddr);
+ running = inb(ioaddr);
+ printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
+ dev->name, status, result, rx_ptr, running, boguscnt);
+ }
+ if ((status & SR0_INTERRUPT) == 0)
+ break;
+
+ handled = 1;
+
+ if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE ||
+ (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE ||
+ (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) {
+ int tx_status;
+ outb(CR0_STATUS_1, ioaddr);
+ tx_status = inw(ioaddr);
+ /* It's undocumented, but tx_status seems to match the i82586. */
+ if (tx_status & TX_OK) {
+ znet->stats.tx_packets++;
+ znet->stats.collisions += tx_status & TX_NCOL_MASK;
+ } else {
+ if (tx_status & (TX_LOST_CTS | TX_LOST_CRS))
+ znet->stats.tx_carrier_errors++;
+ if (tx_status & TX_UND_RUN)
+ znet->stats.tx_fifo_errors++;
+ if (!(tx_status & TX_HRT_BEAT))
+ znet->stats.tx_heartbeat_errors++;
+ if (tx_status & TX_MAX_COL)
+ znet->stats.tx_aborted_errors++;
+ /* ...and the catch-all. */
+ if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL))
+ znet->stats.tx_errors++;
+
+ /* Transceiver may be stuck if cable
+ * was removed while emiting a
+ * packet. Flip it off, then on to
+ * reset it. This is very empirical,
+ * but it seems to work. */
+
+ znet_transceiver_power (dev, 0);
+ znet_transceiver_power (dev, 1);
+ }
+ netif_wake_queue (dev);
+ }
+
+ if ((status & SR0_RECEPTION) ||
+ (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) {
+ znet_rx(dev);
+ }
+ /* Clear the interrupts we've handled. */
+ outb(CR0_INT_ACK, ioaddr);
+ } while (boguscnt--);
+
+ spin_unlock (&znet->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void znet_rx(struct net_device *dev)
+{
+ struct znet_private *znet = dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 1;
+ short next_frame_end_offset = 0; /* Offset of next frame start. */
+ short *cur_frame_end;
+ short cur_frame_end_offset;
+
+ outb(CR0_STATUS_2, ioaddr);
+ cur_frame_end_offset = inw(ioaddr);
+
+ if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) {
+ printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
+ dev->name, cur_frame_end_offset);
+ return;
+ }
+
+ /* Use same method as the Crynwr driver: construct a forward list in
+ the same area of the backwards links we now have. This allows us to
+ pass packets to the upper layers in the order they were received --
+ important for fast-path sequential operations. */
+ while (znet->rx_start + cur_frame_end_offset != znet->rx_cur
+ && ++boguscount < 5) {
+ unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
+ int count, status;
+
+ if (cur_frame_end_offset < 4) {
+ /* Oh no, we have a special case: the frame trailer wraps around
+ the end of the ring buffer. We've saved space at the end of
+ the ring buffer for just this problem. */
+ memcpy(znet->rx_end, znet->rx_start, 8);
+ cur_frame_end_offset += (RX_BUF_SIZE/2);
+ }
+ cur_frame_end = znet->rx_start + cur_frame_end_offset - 4;
+
+ lo_status = *cur_frame_end++;
+ hi_status = *cur_frame_end++;
+ status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
+ lo_cnt = *cur_frame_end++;
+ hi_cnt = *cur_frame_end++;
+ count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
+ " count %#x status %04x.\n",
+ cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
+ count, status);
+ cur_frame_end[-4] = status;
+ cur_frame_end[-3] = next_frame_end_offset;
+ cur_frame_end[-2] = count;
+ next_frame_end_offset = cur_frame_end_offset;
+ cur_frame_end_offset -= ((count + 1)>>1) + 3;
+ if (cur_frame_end_offset < 0)
+ cur_frame_end_offset += RX_BUF_SIZE/2;
+ };
+
+ /* Now step forward through the list. */
+ do {
+ ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset;
+ int status = this_rfp_ptr[-4];
+ int pkt_len = this_rfp_ptr[-2];
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
+ " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
+ this_rfp_ptr[-3]<<1);
+ /* Once again we must assume that the i82586 docs apply. */
+ if ( ! (status & RX_RCV_OK)) { /* There was an error. */
+ znet->stats.rx_errors++;
+ if (status & RX_CRC_ERR) znet->stats.rx_crc_errors++;
+ if (status & RX_ALG_ERR) znet->stats.rx_frame_errors++;
+#if 0
+ if (status & 0x0200) znet->stats.rx_over_errors++; /* Wrong. */
+ if (status & 0x0100) znet->stats.rx_fifo_errors++;
+#else
+ /* maz : Wild guess... */
+ if (status & RX_OVRRUN) znet->stats.rx_over_errors++;
+#endif
+ if (status & RX_SRT_FRM) znet->stats.rx_length_errors++;
+ } else if (pkt_len > 1536) {
+ znet->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ if (znet_debug)
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+ znet->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) {
+ int semi_cnt = (znet->rx_end - znet->rx_cur)<<1;
+ memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt);
+ memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start,
+ pkt_len - semi_cnt);
+ } else {
+ memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len);
+ if (znet_debug > 6) {
+ unsigned int *packet = (unsigned int *) skb->data;
+ printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
+ packet[1], packet[2], packet[3]);
+ }
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ znet->stats.rx_packets++;
+ znet->stats.rx_bytes += pkt_len;
+ }
+ znet->rx_cur = this_rfp_ptr;
+ if (znet->rx_cur >= znet->rx_end)
+ znet->rx_cur -= RX_BUF_SIZE/2;
+ update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1);
+ next_frame_end_offset = this_rfp_ptr[-3];
+ if (next_frame_end_offset == 0) /* Read all the frames? */
+ break; /* Done for now */
+ this_rfp_ptr = znet->rx_start + next_frame_end_offset;
+ } while (--boguscount);
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(INET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to znet_open(). */
+static int znet_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ netif_stop_queue (dev);
+
+ outb(OP0_RESET, ioaddr); /* CMD0_RESET */
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ /* Turn off transceiver power. */
+ znet_transceiver_power (dev, 0);
+
+ znet_release_resources (dev);
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *net_get_stats(struct net_device *dev)
+{
+ struct znet_private *znet = dev->priv;
+
+ return &znet->stats;
+}
+
+static void show_dma(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+ unsigned char stat = inb (ioaddr);
+ struct znet_private *znet = dev->priv;
+ unsigned long flags;
+ short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ short residue;
+
+ addr |= inb(dma_port) << 8;
+ residue = get_dma_residue(znet->tx_dma);
+
+ if (znet_debug > 1) {
+ flags=claim_dma_lock();
+ printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n",
+ stat, addr<<1, residue);
+ release_dma_lock(flags);
+ }
+}
+
+/* Initialize the hardware. We have to do this when the board is open()ed
+ or when we come out of suspend mode. */
+static void hardware_init(struct net_device *dev)
+{
+ unsigned long flags;
+ short ioaddr = dev->base_addr;
+ struct znet_private *znet = dev->priv;
+
+ znet->rx_cur = znet->rx_start;
+ znet->tx_cur = znet->tx_start;
+
+ /* Reset the chip, and start it up. */
+ outb(OP0_RESET, ioaddr);
+
+ flags=claim_dma_lock();
+ disable_dma(znet->rx_dma); /* reset by an interrupting task. */
+ clear_dma_ff(znet->rx_dma);
+ set_dma_mode(znet->rx_dma, DMA_RX_MODE);
+ set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start);
+ set_dma_count(znet->rx_dma, RX_BUF_SIZE);
+ enable_dma(znet->rx_dma);
+ /* Now set up the Tx channel. */
+ disable_dma(znet->tx_dma);
+ clear_dma_ff(znet->tx_dma);
+ set_dma_mode(znet->tx_dma, DMA_TX_MODE);
+ set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start);
+ set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
+ enable_dma(znet->tx_dma);
+ release_dma_lock(flags);
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n",
+ dev->name, znet->rx_start,znet->tx_start);
+ /* Do an empty configure command, just like the Crynwr driver. This
+ resets to chip to its default values. */
+ *znet->tx_cur++ = 0;
+ *znet->tx_cur++ = 0;
+ show_dma(dev);
+ outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
+
+ znet_set_multicast_list (dev);
+
+ *znet->tx_cur++ = 6;
+ memcpy(znet->tx_cur, dev->dev_addr, 6);
+ znet->tx_cur += 3;
+ show_dma(dev);
+ outb(OP0_IA_SETUP | CR0_CHNL, ioaddr);
+ show_dma(dev);
+
+ update_stop_hit(ioaddr, 8192);
+ if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n");
+ outb(OP0_RCV_ENABLE, ioaddr);
+ netif_start_queue (dev);
+}
+
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
+{
+ outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr);
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
+ (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE);
+ outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr);
+ outb(OP1_SWIT_TO_PORT_0, ioaddr);
+}
+
+static __exit void znet_cleanup (void)
+{
+ if (znet_dev) {
+ struct znet_private *znet = znet_dev->priv;
+
+ unregister_netdev (znet_dev);
+ kfree (znet->rx_start);
+ kfree (znet->tx_start);
+ free_netdev (znet_dev);
+ }
+}
+
+module_init (znet_probe);
+module_exit (znet_cleanup);
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
new file mode 100644
index 000000000000..8ab6e12153ba
--- /dev/null
+++ b/drivers/net/zorro8390.c
@@ -0,0 +1,439 @@
+/*
+ * Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver
+ *
+ * (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM)
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on all the other NE2000 drivers for Linux
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS
+ * Ethernet Controllers.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/zorro.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "8390.h"
+
+
+#define DRV_NAME "zorro8390"
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD (0x00*2)
+#define NE_DATAPORT (0x10*2) /* NatSemi-defined port window offset. */
+#define NE_RESET (0x1f*2) /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT (0x20*2)
+
+#define NE_EN0_ISR (0x07*2)
+#define NE_EN0_DCFG (0x0e*2)
+
+#define NE_EN0_RSARLO (0x08*2)
+#define NE_EN0_RSARHI (0x09*2)
+#define NE_EN0_RCNTLO (0x0a*2)
+#define NE_EN0_RXCR (0x0c*2)
+#define NE_EN0_TXCR (0x0d*2)
+#define NE_EN0_RCNTHI (0x0b*2)
+#define NE_EN0_IMR (0x0f*2)
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+
+#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8))
+
+
+static struct card_info {
+ zorro_id id;
+ const char *name;
+ unsigned int offset;
+} cards[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
+};
+
+static int __devinit zorro8390_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static int __devinit zorro8390_init(struct net_device *dev,
+ unsigned long board, const char *name,
+ unsigned long ioaddr);
+static int zorro8390_open(struct net_device *dev);
+static int zorro8390_close(struct net_device *dev);
+static void zorro8390_reset_8390(struct net_device *dev);
+static void zorro8390_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void zorro8390_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void zorro8390_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf,
+ const int start_page);
+static void __devexit zorro8390_remove_one(struct zorro_dev *z);
+
+static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, },
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
+ { 0 }
+};
+
+static struct zorro_driver zorro8390_driver = {
+ .name = "zorro8390",
+ .id_table = zorro8390_zorro_tbl,
+ .probe = zorro8390_init_one,
+ .remove = __devexit_p(zorro8390_remove_one),
+};
+
+static int __devinit zorro8390_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct net_device *dev;
+ unsigned long board, ioaddr;
+ int err, i;
+
+ for (i = ARRAY_SIZE(cards)-1; i >= 0; i--)
+ if (z->id == cards[i].id)
+ break;
+ board = z->resource.start;
+ ioaddr = board+cards[i].offset;
+ dev = alloc_ei_netdev();
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
+ if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
+ free_netdev(dev);
+ return -EBUSY;
+ }
+ if ((err = zorro8390_init(dev, board, cards[i].name,
+ ZTWO_VADDR(ioaddr)))) {
+ release_mem_region(ioaddr, NE_IO_EXTENT*2);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+ return 0;
+}
+
+static int __devinit zorro8390_init(struct net_device *dev,
+ unsigned long board, const char *name,
+ unsigned long ioaddr)
+{
+ int i;
+ int err;
+ unsigned char SA_prom[32];
+ int start_page, stop_page;
+ static u32 zorro8390_offsets[16] = {
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+ };
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk(KERN_WARNING " not found (no reset ack).\n");
+ return -ENODEV;
+ }
+
+ z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {
+ u32 value;
+ u32 offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/
+ {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq. */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD+E8390_START, NE_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) {
+ z_writeb(program_seq[i].value, ioaddr + program_seq[i].offset);
+ }
+ }
+ for (i = 0; i < 16; i++) {
+ SA_prom[i] = z_readb(ioaddr + NE_DATAPORT);
+ (void)z_readb(ioaddr + NE_DATAPORT);
+ }
+
+ /* We must set the 8390 for word mode. */
+ z_writeb(0x49, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ dev->base_addr = ioaddr;
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+ i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ, DRV_NAME, dev);
+ if (i) return i;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+#ifdef DEBUG
+ printk(" %2.2x", SA_prom[i]);
+#endif
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+
+ ei_status.reset_8390 = &zorro8390_reset_8390;
+ ei_status.block_input = &zorro8390_block_input;
+ ei_status.block_output = &zorro8390_block_output;
+ ei_status.get_8390_hdr = &zorro8390_get_8390_hdr;
+ ei_status.reg_offset = zorro8390_offsets;
+ dev->open = &zorro8390_open;
+ dev->stop = &zorro8390_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ei_poll;
+#endif
+
+ NS8390_init(dev, 0);
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ return err;
+ }
+
+ printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, name, board,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ return 0;
+}
+
+static int zorro8390_open(struct net_device *dev)
+{
+ ei_open(dev);
+ return 0;
+}
+
+static int zorro8390_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void zorro8390_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "resetting the 8390 t=%ld...\n", jiffies);
+
+ z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n",
+ dev->name);
+ break;
+ }
+ z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void zorro8390_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int cnt;
+ short *ptrs;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n", dev->name, ei_status.dmaing,
+ ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ z_writeb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
+ z_writeb(0, nic_base + NE_EN0_RCNTHI);
+ z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */
+ z_writeb(ring_page, nic_base + NE_EN0_RSARHI);
+ z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ ptrs = (short*)hdr;
+ for (cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++)
+ *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+
+ hdr->count = WORDSWAP(hdr->count);
+
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using z_writeb. */
+
+static void zorro8390_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+ short *ptrs;
+ int cnt;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ z_writeb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
+ z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
+ z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ ptrs = (short*)buf;
+ for (cnt = 0; cnt < (count>>1); cnt++)
+ *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
+ if (count & 0x01)
+ buf[count-1] = z_readb(NE_BASE + NE_DATAPORT);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void zorro8390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+ short *ptrs;
+ int cnt;
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n", dev->name, ei_status.dmaing,
+ ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ z_writeb(0x00, nic_base + NE_EN0_RSARLO);
+ z_writeb(start_page, nic_base + NE_EN0_RSARHI);
+
+ z_writeb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ ptrs = (short*)buf;
+ for (cnt = 0; cnt < count>>1; cnt++)
+ z_writew(*ptrs++, NE_BASE+NE_DATAPORT);
+
+ dma_start = jiffies;
+
+ while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n",
+ dev->name);
+ zorro8390_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+static void __devexit zorro8390_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT*2);
+ free_netdev(dev);
+}
+
+static int __init zorro8390_init_module(void)
+{
+ return zorro_module_init(&zorro8390_driver);
+}
+
+static void __exit zorro8390_cleanup_module(void)
+{
+ zorro_unregister_driver(&zorro8390_driver);
+}
+
+module_init(zorro8390_init_module);
+module_exit(zorro8390_cleanup_module);
+
+MODULE_LICENSE("GPL");